summaryrefslogtreecommitdiff
path: root/tools/lib/python/kdoc/parse_data_structs.py
diff options
context:
space:
mode:
Diffstat (limited to 'tools/lib/python/kdoc/parse_data_structs.py')
0 files changed, 0 insertions, 0 deletions
on value='committer'>committer
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/Kbuild1
-rw-r--r--include/acpi/acbuffer.h12
-rw-r--r--include/acpi/acconfig.h7
-rw-r--r--include/acpi/acexcep.h12
-rw-r--r--include/acpi/acnames.h4
-rw-r--r--include/acpi/acoutput.h9
-rw-r--r--include/acpi/acpi.h2
-rw-r--r--include/acpi/acpi_bus.h409
-rw-r--r--include/acpi/acpi_drivers.h2
-rw-r--r--include/acpi/acpi_numa.h8
-rw-r--r--include/acpi/acpiosxf.h2
-rw-r--r--include/acpi/acpixf.h157
-rw-r--r--include/acpi/acrestyp.h39
-rw-r--r--include/acpi/actbl.h13
-rw-r--r--include/acpi/actbl1.h338
-rw-r--r--include/acpi/actbl2.h1656
-rw-r--r--include/acpi/actbl3.h53
-rw-r--r--include/acpi/actypes.h51
-rw-r--r--include/acpi/acuuid.h5
-rw-r--r--include/acpi/apei.h9
-rw-r--r--include/acpi/battery.h6
-rw-r--r--include/acpi/cppc_acpi.h113
-rw-r--r--include/acpi/ghes.h37
-rw-r--r--include/acpi/nhlt.h181
-rw-r--r--include/acpi/pcc.h64
-rw-r--r--include/acpi/pdc_intel.h36
-rw-r--r--include/acpi/platform/acenv.h15
-rw-r--r--include/acpi/platform/acenvex.h4
-rw-r--r--include/acpi/platform/acgcc.h37
-rw-r--r--include/acpi/platform/acgccex.h2
-rw-r--r--include/acpi/platform/acintel.h55
-rw-r--r--include/acpi/platform/aclinux.h15
-rw-r--r--include/acpi/platform/aclinuxex.h26
-rw-r--r--include/acpi/platform/aczephyr.h45
-rw-r--r--include/acpi/proc_cap_intel.h40
-rw-r--r--include/acpi/processor.h30
-rw-r--r--include/acpi/video.h32
-rw-r--r--include/asm-generic/Kbuild11
-rw-r--r--include/asm-generic/access_ok.h48
-rw-r--r--include/asm-generic/agp.h11
-rw-r--r--include/asm-generic/archrandom.h15
-rw-r--r--include/asm-generic/atomic-instrumented.h1833
-rw-r--r--include/asm-generic/atomic-long.h1014
-rw-r--r--include/asm-generic/atomic.h117
-rw-r--r--include/asm-generic/atomic64.h45
-rw-r--r--include/asm-generic/audit_change_attr.h6
-rw-r--r--include/asm-generic/barrier.h83
-rw-r--r--include/asm-generic/bitops.h1
-rw-r--r--include/asm-generic/bitops/__ffs.h10
-rw-r--r--include/asm-generic/bitops/__fls.h10
-rw-r--r--include/asm-generic/bitops/atomic.h38
-rw-r--r--include/asm-generic/bitops/builtin-__ffs.h2
-rw-r--r--include/asm-generic/bitops/builtin-__fls.h2
-rw-r--r--include/asm-generic/bitops/builtin-ffs.h2
-rw-r--r--include/asm-generic/bitops/builtin-fls.h2
-rw-r--r--include/asm-generic/bitops/ffs.h10
-rw-r--r--include/asm-generic/bitops/find.h188
-rw-r--r--include/asm-generic/bitops/fls.h8
-rw-r--r--include/asm-generic/bitops/fls64.h4
-rw-r--r--include/asm-generic/bitops/generic-non-atomic.h175
-rw-r--r--include/asm-generic/bitops/instrumented-atomic.h15
-rw-r--r--include/asm-generic/bitops/instrumented-lock.h31
-rw-r--r--include/asm-generic/bitops/instrumented-non-atomic.h49
-rw-r--r--include/asm-generic/bitops/le.h64
-rw-r--r--include/asm-generic/bitops/lock.h45
-rw-r--r--include/asm-generic/bitops/non-atomic.h109
-rw-r--r--include/asm-generic/bitops/non-instrumented-non-atomic.h17
-rw-r--r--include/asm-generic/bug.h157
-rw-r--r--include/asm-generic/bugs.h11
-rw-r--r--include/asm-generic/cacheflush.h29
-rw-r--r--include/asm-generic/cfi.h5
-rw-r--r--include/asm-generic/checksum.h6
-rw-r--r--include/asm-generic/cmpxchg-local.h16
-rw-r--r--include/asm-generic/cmpxchg.h48
-rw-r--r--include/asm-generic/codetag.lds.h33
-rw-r--r--include/asm-generic/compat.h135
-rw-r--r--include/asm-generic/current.h2
-rw-r--r--include/asm-generic/delay.h96
-rw-r--r--include/asm-generic/div64.h135
-rw-r--r--include/asm-generic/dma-mapping.h2
-rw-r--r--include/asm-generic/early_ioremap.h8
-rw-r--r--include/asm-generic/error-injection.h9
-rw-r--r--include/asm-generic/export.h94
-rw-r--r--include/asm-generic/fb.h13
-rw-r--r--include/asm-generic/fixmap.h3
-rw-r--r--include/asm-generic/fprobe.h46
-rw-r--r--include/asm-generic/futex.h31
-rw-r--r--include/asm-generic/gpio.h172
-rw-r--r--include/asm-generic/hugetlb.h77
-rw-r--r--include/asm-generic/hyperv-tlfs.h808
-rw-r--r--include/asm-generic/ide_iops.h39
-rw-r--r--include/asm-generic/io.h281
-rw-r--r--include/asm-generic/iomap.h52
-rw-r--r--include/asm-generic/local.h1
-rw-r--r--include/asm-generic/local64.h12
-rw-r--r--include/asm-generic/logic_io.h78
-rw-r--r--include/asm-generic/mcs_spinlock.h6
-rw-r--r--include/asm-generic/memory_model.h69
-rw-r--r--include/asm-generic/mm_hooks.h11
-rw-r--r--include/asm-generic/mmzone.h5
-rw-r--r--include/asm-generic/module.h8
-rw-r--r--include/asm-generic/mshyperv.h266
-rw-r--r--include/asm-generic/msi.h5
-rw-r--r--include/asm-generic/numa.h8
-rw-r--r--include/asm-generic/page.h97
-rw-r--r--include/asm-generic/param.h2
-rw-r--r--include/asm-generic/pci.h39
-rw-r--r--include/asm-generic/pci_iomap.h7
-rw-r--r--include/asm-generic/percpu.h235
-rw-r--r--include/asm-generic/pgalloc.h222
-rw-r--r--include/asm-generic/pgtable-nop4d.h3
-rw-r--r--include/asm-generic/pgtable-nopmd.h4
-rw-r--r--include/asm-generic/pgtable-nopud.h2
-rw-r--r--include/asm-generic/pgtable_uffd.h17
-rw-r--r--include/asm-generic/preempt.h16
-rw-r--r--include/asm-generic/qrwlock.h32
-rw-r--r--include/asm-generic/qrwlock_types.h2
-rw-r--r--include/asm-generic/qspinlock.h33
-rw-r--r--include/asm-generic/rqspinlock.h254
-rw-r--r--include/asm-generic/runtime-const.h15
-rw-r--r--include/asm-generic/rwonce.h10
-rw-r--r--include/asm-generic/sections.h120
-rw-r--r--include/asm-generic/signal.h2
-rw-r--r--include/asm-generic/simd.h9
-rw-r--r--include/asm-generic/softirq_stack.h2
-rw-r--r--include/asm-generic/spinlock.h11
-rw-r--r--include/asm-generic/spinlock_types.h9
-rw-r--r--include/asm-generic/syscall.h18
-rw-r--r--include/asm-generic/syscalls.h2
-rw-r--r--include/asm-generic/termios-base.h78
-rw-r--r--include/asm-generic/termios.h108
-rw-r--r--include/asm-generic/text-patching.h5
-rw-r--r--include/asm-generic/thread_info_tif.h51
-rw-r--r--include/asm-generic/ticket_spinlock.h105
-rw-r--r--include/asm-generic/tlb.h229
-rw-r--r--include/asm-generic/topology.h2
-rw-r--r--include/asm-generic/uaccess.h95
-rw-r--r--include/asm-generic/unaligned.h36
-rw-r--r--include/asm-generic/unwind_user.h5
-rw-r--r--include/asm-generic/vdso/vsyscall.h28
-rw-r--r--include/asm-generic/vga.h23
-rw-r--r--include/asm-generic/video.h136
-rw-r--r--include/asm-generic/vmlinux.lds.h617
-rw-r--r--include/asm-generic/vtime.h1
-rw-r--r--include/asm-generic/word-at-a-time.h5
-rw-r--r--include/asm-generic/xor.h84
-rw-r--r--include/clocksource/arm_arch_timer.h15
-rw-r--r--include/clocksource/hyperv_timer.h44
-rw-r--r--include/clocksource/samsung_pwm.h3
-rw-r--r--include/clocksource/timer-goldfish.h31
-rw-r--r--include/clocksource/timer-riscv.h16
-rw-r--r--include/clocksource/timer-ti-dm.h257
-rw-r--r--include/clocksource/timer-xilinx.h73
-rw-r--r--include/crypto/acompress.h409
-rw-r--r--include/crypto/aead.h126
-rw-r--r--include/crypto/aes.h5
-rw-r--r--include/crypto/akcipher.h120
-rw-r--r--include/crypto/algapi.h179
-rw-r--r--include/crypto/aria.h458
-rw-r--r--include/crypto/asym_tpm_subtype.h19
-rw-r--r--include/crypto/authenc.h2
-rw-r--r--include/crypto/b128ops.h14
-rw-r--r--include/crypto/blake2b.h129
-rw-r--r--include/crypto/blake2s.h133
-rw-r--r--include/crypto/chacha.h106
-rw-r--r--include/crypto/chacha20poly1305.h19
-rw-r--r--include/crypto/cryptd.h3
-rw-r--r--include/crypto/ctr.h50
-rw-r--r--include/crypto/curve25519.h58
-rw-r--r--include/crypto/df_sp80090a.h28
-rw-r--r--include/crypto/dh.h26
-rw-r--r--include/crypto/drbg.h36
-rw-r--r--include/crypto/ecc_curve.h2
-rw-r--r--include/crypto/ecdh.h1
-rw-r--r--include/crypto/engine.h122
-rw-r--r--include/crypto/gcm.h22
-rw-r--r--include/crypto/gf128mul.h6
-rw-r--r--include/crypto/ghash.h4
-rw-r--r--include/crypto/hash.h340
-rw-r--r--include/crypto/hash_info.h1
-rw-r--r--include/crypto/hkdf.h20
-rw-r--r--include/crypto/if_alg.h31
-rw-r--r--include/crypto/internal/acompress.h209
-rw-r--r--include/crypto/internal/aead.h49
-rw-r--r--include/crypto/internal/akcipher.h32
-rw-r--r--include/crypto/internal/blake2b.h115
-rw-r--r--include/crypto/internal/blake2s.h119
-rw-r--r--include/crypto/internal/blockhash.h52
-rw-r--r--include/crypto/internal/chacha.h43
-rw-r--r--include/crypto/internal/cipher.h2
-rw-r--r--include/crypto/internal/cryptouser.h16
-rw-r--r--include/crypto/internal/drbg.h54
-rw-r--r--include/crypto/internal/ecc.h310
-rw-r--r--include/crypto/internal/engine.h58
-rw-r--r--include/crypto/internal/geniv.h1
-rw-r--r--include/crypto/internal/hash.h180
-rw-r--r--include/crypto/internal/kdf_selftest.h71
-rw-r--r--include/crypto/internal/kpp.h190
-rw-r--r--include/crypto/internal/poly1305.h26
-rw-r--r--include/crypto/internal/rsa.h29
-rw-r--r--include/crypto/internal/scompress.h33
-rw-r--r--include/crypto/internal/sig.h97
-rw-r--r--include/crypto/internal/simd.h22
-rw-r--r--include/crypto/internal/skcipher.h201
-rw-r--r--include/crypto/kdf_sp800108.h61
-rw-r--r--include/crypto/kpp.h40
-rw-r--r--include/crypto/krb5.h165
-rw-r--r--include/crypto/md5.h189
-rw-r--r--include/crypto/null.h3
-rw-r--r--include/crypto/pcrypt.h2
-rw-r--r--include/crypto/poly1305.h64
-rw-r--r--include/crypto/polyval.h190
-rw-r--r--include/crypto/public_key.h52
-rw-r--r--include/crypto/rng.h37
-rw-r--r--include/crypto/scatterwalk.h237
-rw-r--r--include/crypto/sha1.h191
-rw-r--r--include/crypto/sha1_base.h109
-rw-r--r--include/crypto/sha2.h863
-rw-r--r--include/crypto/sha256_base.h113
-rw-r--r--include/crypto/sha3.h332
-rw-r--r--include/crypto/sha512_base.h134
-rw-r--r--include/crypto/sig.h265
-rw-r--r--include/crypto/skcipher.h416
-rw-r--r--include/crypto/sm2.h25
-rw-r--r--include/crypto/sm3.h34
-rw-r--r--include/crypto/sm3_base.h94
-rw-r--r--include/crypto/sm4.h29
-rw-r--r--include/crypto/streebog.h5
-rw-r--r--include/crypto/utils.h73
-rw-r--r--include/crypto/xts.h25
-rw-r--r--include/cxl/einj.h44
-rw-r--r--include/cxl/event.h323
-rw-r--r--include/cxl/features.h88
-rw-r--r--include/cxl/mailbox.h70
-rw-r--r--include/drm/Makefile18
-rw-r--r--include/drm/amd/isp.h51
-rw-r--r--include/drm/amd_asic_type.h24
-rw-r--r--include/drm/bridge/analogix_dp.h11
-rw-r--r--include/drm/bridge/aux-bridge.h52
-rw-r--r--include/drm/bridge/dw_dp.h20
-rw-r--r--include/drm/bridge/dw_hdmi.h26
-rw-r--r--include/drm/bridge/dw_hdmi_qp.h38
-rw-r--r--include/drm/bridge/dw_mipi_dsi.h20
-rw-r--r--include/drm/bridge/dw_mipi_dsi2.h95
-rw-r--r--include/drm/bridge/imx.h17
-rw-r--r--include/drm/bridge/samsung-dsim.h141
-rw-r--r--include/drm/clients/drm_client_setup.h26
-rw-r--r--include/drm/display/drm_dp.h (renamed from include/drm/drm_dp_helper.h)843
-rw-r--r--include/drm/display/drm_dp_aux_bus.h85
-rw-r--r--include/drm/display/drm_dp_dual_mode_helper.h (renamed from include/drm/drm_dp_dual_mode_helper.h)16
-rw-r--r--include/drm/display/drm_dp_helper.h1013
-rw-r--r--include/drm/display/drm_dp_mst_helper.h (renamed from include/drm/drm_dp_mst_helper.h)338
-rw-r--r--include/drm/display/drm_dp_tunnel.h248
-rw-r--r--include/drm/display/drm_dsc.h (renamed from include/drm/drm_dsc.h)11
-rw-r--r--include/drm/display/drm_dsc_helper.h36
-rw-r--r--include/drm/display/drm_hdcp.h (renamed from include/drm/drm_hdcp.h)14
-rw-r--r--include/drm/display/drm_hdcp_helper.h22
-rw-r--r--include/drm/display/drm_hdmi_audio_helper.h23
-rw-r--r--include/drm/display/drm_hdmi_cec_helper.h72
-rw-r--r--include/drm/display/drm_hdmi_helper.h37
-rw-r--r--include/drm/display/drm_hdmi_state_helper.h33
-rw-r--r--include/drm/display/drm_scdc.h (renamed from include/drm/drm_scdc_helper.h)52
-rw-r--r--include/drm/display/drm_scdc_helper.h80
-rw-r--r--include/drm/drm_accel.h85
-rw-r--r--include/drm/drm_agpsupport.h117
-rw-r--r--include/drm/drm_atomic.h461
-rw-r--r--include/drm/drm_atomic_helper.h42
-rw-r--r--include/drm/drm_atomic_state_helper.h4
-rw-r--r--include/drm/drm_atomic_uapi.h5
-rw-r--r--include/drm/drm_audio_component.h3
-rw-r--r--include/drm/drm_auth.h90
-rw-r--r--include/drm/drm_bridge.h962
-rw-r--r--include/drm/drm_bridge_connector.h2
-rw-r--r--include/drm/drm_bridge_helper.h12
-rw-r--r--include/drm/drm_buddy.h171
-rw-r--r--include/drm/drm_cache.h15
-rw-r--r--include/drm/drm_client.h96
-rw-r--r--include/drm/drm_client_event.h29
-rw-r--r--include/drm/drm_color_mgmt.h73
-rw-r--r--include/drm/drm_colorop.h464
-rw-r--r--include/drm/drm_connector.h897
-rw-r--r--include/drm/drm_crtc.h65
-rw-r--r--include/drm/drm_crtc_helper.h18
-rw-r--r--include/drm/drm_damage_helper.h20
-rw-r--r--include/drm/drm_debugfs.h102
-rw-r--r--include/drm/drm_debugfs_crc.h8
-rw-r--r--include/drm/drm_device.h185
-rw-r--r--include/drm/drm_displayid.h117
-rw-r--r--include/drm/drm_drv.h215
-rw-r--r--include/drm/drm_dumb_buffers.h14
-rw-r--r--include/drm/drm_edid.h358
-rw-r--r--include/drm/drm_eld.h164
-rw-r--r--include/drm/drm_encoder.h22
-rw-r--r--include/drm/drm_encoder_slave.h182
-rw-r--r--include/drm/drm_exec.h150
-rw-r--r--include/drm/drm_fb_cma_helper.h18
-rw-r--r--include/drm/drm_fb_dma_helper.h28
-rw-r--r--include/drm/drm_fb_helper.h248
-rw-r--r--include/drm/drm_fbdev_dma.h20
-rw-r--r--include/drm/drm_fbdev_shmem.h20
-rw-r--r--include/drm/drm_fbdev_ttm.h22
-rw-r--r--include/drm/drm_file.h166
-rw-r--r--include/drm/drm_fixed.h49
-rw-r--r--include/drm/drm_flip_work.h20
-rw-r--r--include/drm/drm_format_helper.h144
-rw-r--r--include/drm/drm_fourcc.h33
-rw-r--r--include/drm/drm_framebuffer.h36
-rw-r--r--include/drm/drm_gem.h286
-rw-r--r--include/drm/drm_gem_atomic_helper.h69
-rw-r--r--include/drm/drm_gem_cma_helper.h185
-rw-r--r--include/drm/drm_gem_dma_helper.h274
-rw-r--r--include/drm/drm_gem_framebuffer_helper.h15
-rw-r--r--include/drm/drm_gem_shmem_helper.h257
-rw-r--r--include/drm/drm_gem_ttm_helper.h16
-rw-r--r--include/drm/drm_gem_vram_helper.h67
-rw-r--r--include/drm/drm_gpusvm.h542
-rw-r--r--include/drm/drm_gpuvm.h1304
-rw-r--r--include/drm/drm_hashtab.h79
-rw-r--r--include/drm/drm_ioctl.h13
-rw-r--r--include/drm/drm_irq.h32
-rw-r--r--include/drm/drm_kunit_helpers.h136
-rw-r--r--include/drm/drm_lease.h2
-rw-r--r--include/drm/drm_legacy.h237
-rw-r--r--include/drm/drm_managed.h46
-rw-r--r--include/drm/drm_mipi_dbi.h76
-rw-r--r--include/drm/drm_mipi_dsi.h287
-rw-r--r--include/drm/drm_mm.h7
-rw-r--r--include/drm/drm_mode_config.h113
-rw-r--r--include/drm/drm_mode_object.h11
-rw-r--r--include/drm/drm_modes.h64
-rw-r--r--include/drm/drm_modeset_helper.h2
-rw-r--r--include/drm/drm_modeset_helper_vtables.h228
-rw-r--r--include/drm/drm_modeset_lock.h9
-rw-r--r--include/drm/drm_module.h125
-rw-r--r--include/drm/drm_of.h51
-rw-r--r--include/drm/drm_pagemap.h248
-rw-r--r--include/drm/drm_panel.h195
-rw-r--r--include/drm/drm_panic.h189
-rw-r--r--include/drm/drm_pciids.h112
-rw-r--r--include/drm/drm_plane.h187
-rw-r--r--include/drm/drm_plane_helper.h39
-rw-r--r--include/drm/drm_prime.h12
-rw-r--r--include/drm/drm_print.h434
-rw-r--r--include/drm/drm_privacy_screen_consumer.h65
-rw-r--r--include/drm/drm_privacy_screen_driver.h95
-rw-r--r--include/drm/drm_privacy_screen_machine.h46
-rw-r--r--include/drm/drm_probe_helper.h21
-rw-r--r--include/drm/drm_property.h15
-rw-r--r--include/drm/drm_rect.h38
-rw-r--r--include/drm/drm_simple_kms_helper.h54
-rw-r--r--include/drm/drm_suballoc.h108
-rw-r--r--include/drm/drm_syncobj.h6
-rw-r--r--include/drm/drm_sysfs.h5
-rw-r--r--include/drm/drm_util.h16
-rw-r--r--include/drm/drm_utils.h10
-rw-r--r--include/drm/drm_vblank.h71
-rw-r--r--include/drm/drm_vblank_helper.h56
-rw-r--r--include/drm/drm_vblank_work.h2
-rw-r--r--include/drm/drm_vma_manager.h3
-rw-r--r--include/drm/drm_writeback.h17
-rw-r--r--include/drm/gma_drm.h13
-rw-r--r--include/drm/gpu_scheduler.h580
-rw-r--r--include/drm/gud.h6
-rw-r--r--include/drm/i2c/ch7006.h86
-rw-r--r--include/drm/i2c/sil164.h63
-rw-r--r--include/drm/i2c/tda998x.h40
-rw-r--r--include/drm/i915_mei_hdcp_interface.h184
-rw-r--r--include/drm/i915_pciids.h648
-rw-r--r--include/drm/intel-gtt.h37
-rw-r--r--include/drm/intel/display_member.h42
-rw-r--r--include/drm/intel/display_parent_interface.h45
-rw-r--r--include/drm/intel/i915_component.h (renamed from include/drm/i915_component.h)5
-rw-r--r--include/drm/intel/i915_drm.h (renamed from include/drm/i915_drm.h)5
-rw-r--r--include/drm/intel/i915_gsc_proxy_mei_interface.h53
-rw-r--r--include/drm/intel/i915_hdcp_interface.h547
-rw-r--r--include/drm/intel/i915_pxp_tee_interface.h62
-rw-r--r--include/drm/intel/intel-gtt.h41
-rw-r--r--include/drm/intel/intel_lb_mei_interface.h70
-rw-r--r--include/drm/intel/intel_lpe_audio.h (renamed from include/drm/intel_lpe_audio.h)0
-rw-r--r--include/drm/intel/pciids.h903
-rw-r--r--include/drm/intel/xe_sriov_vfio.h143
-rw-r--r--include/drm/spsc_queue.h4
-rw-r--r--include/drm/task_barrier.h4
-rw-r--r--include/drm/ttm/ttm_allocation.h12
-rw-r--r--include/drm/ttm/ttm_backup.h72
-rw-r--r--include/drm/ttm/ttm_bo.h546
-rw-r--r--include/drm/ttm/ttm_bo_api.h640
-rw-r--r--include/drm/ttm/ttm_bo_driver.h335
-rw-r--r--include/drm/ttm/ttm_caching.h21
-rw-r--r--include/drm/ttm/ttm_device.h120
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h11
-rw-r--r--include/drm/ttm/ttm_kmap_iter.h61
-rw-r--r--include/drm/ttm/ttm_placement.h29
-rw-r--r--include/drm/ttm/ttm_pool.h30
-rw-r--r--include/drm/ttm/ttm_range_manager.h56
-rw-r--r--include/drm/ttm/ttm_resource.h378
-rw-r--r--include/drm/ttm/ttm_tt.h209
-rw-r--r--include/dt-bindings/arm/mhuv3-dt.h13
-rw-r--r--include/dt-bindings/arm/qcom,ids.h310
-rw-r--r--include/dt-bindings/ata/ahci.h20
-rw-r--r--include/dt-bindings/clock/actions,s500-cmu.h6
-rw-r--r--include/dt-bindings/clock/alphascale,asm9260.h2
-rw-r--r--include/dt-bindings/clock/am3.h93
-rw-r--r--include/dt-bindings/clock/am4.h99
-rw-r--r--include/dt-bindings/clock/amlogic,a1-peripherals-clkc.h169
-rw-r--r--include/dt-bindings/clock/amlogic,a1-pll-clkc.h26
-rw-r--r--include/dt-bindings/clock/amlogic,c3-peripherals-clkc.h212
-rw-r--r--include/dt-bindings/clock/amlogic,c3-pll-clkc.h40
-rw-r--r--include/dt-bindings/clock/amlogic,c3-scmi-clkc.h27
-rw-r--r--include/dt-bindings/clock/amlogic,s4-peripherals-clkc.h236
-rw-r--r--include/dt-bindings/clock/amlogic,s4-pll-clkc.h43
-rw-r--r--include/dt-bindings/clock/aspeed,ast2700-scu.h167
-rw-r--r--include/dt-bindings/clock/aspeed-clock.h1
-rw-r--r--include/dt-bindings/clock/ast2600-clock.h21
-rw-r--r--include/dt-bindings/clock/at91.h14
-rw-r--r--include/dt-bindings/clock/axg-audio-clkc.h72
-rw-r--r--include/dt-bindings/clock/axg-clkc.h48
-rw-r--r--include/dt-bindings/clock/axis,artpec6-clkctrl.h2
-rw-r--r--include/dt-bindings/clock/axis,artpec8-clk.h169
-rw-r--r--include/dt-bindings/clock/bcm21664.h10
-rw-r--r--include/dt-bindings/clock/bcm281xx.h10
-rw-r--r--include/dt-bindings/clock/bcm63268-clock.h13
-rw-r--r--include/dt-bindings/clock/boston-clock.h3
-rw-r--r--include/dt-bindings/clock/cirrus,cs2000-cp.h14
-rw-r--r--include/dt-bindings/clock/cirrus,ep9301-syscon.h46
-rw-r--r--include/dt-bindings/clock/cix,sky1.h279
-rw-r--r--include/dt-bindings/clock/dra7.h182
-rw-r--r--include/dt-bindings/clock/efm32-cmu.h43
-rw-r--r--include/dt-bindings/clock/en7523-clk.h17
-rw-r--r--include/dt-bindings/clock/exynos3250.h18
-rw-r--r--include/dt-bindings/clock/exynos4.h7
-rw-r--r--include/dt-bindings/clock/exynos5250.h5
-rw-r--r--include/dt-bindings/clock/exynos5260-clk.h25
-rw-r--r--include/dt-bindings/clock/exynos5410.h2
-rw-r--r--include/dt-bindings/clock/exynos5420.h3
-rw-r--r--include/dt-bindings/clock/exynos5433.h42
-rw-r--r--include/dt-bindings/clock/exynos7885.h157
-rw-r--r--include/dt-bindings/clock/exynos850.h394
-rw-r--r--include/dt-bindings/clock/fsd-clk.h156
-rw-r--r--include/dt-bindings/clock/g12a-aoclkc.h7
-rw-r--r--include/dt-bindings/clock/g12a-clkc.h138
-rw-r--r--include/dt-bindings/clock/google,gs101-acpm.h26
-rw-r--r--include/dt-bindings/clock/google,gs101.h637
-rw-r--r--include/dt-bindings/clock/gxbb-clkc.h65
-rw-r--r--include/dt-bindings/clock/hi3559av100-clock.h165
-rw-r--r--include/dt-bindings/clock/imx6qdl-clock.h4
-rw-r--r--include/dt-bindings/clock/imx6sll-clock.h2
-rw-r--r--include/dt-bindings/clock/imx6ul-clock.h7
-rw-r--r--include/dt-bindings/clock/imx8-clock.h156
-rw-r--r--include/dt-bindings/clock/imx8mm-clock.h1
-rw-r--r--include/dt-bindings/clock/imx8mn-clock.h42
-rw-r--r--include/dt-bindings/clock/imx8mp-clock.h25
-rw-r--r--include/dt-bindings/clock/imx8mq-clock.h19
-rw-r--r--include/dt-bindings/clock/imx8ulp-clock.h263
-rw-r--r--include/dt-bindings/clock/imx93-clock.h214
-rw-r--r--include/dt-bindings/clock/imxrt1050-clock.h72
-rw-r--r--include/dt-bindings/clock/ingenic,jz4725b-cgu.h (renamed from include/dt-bindings/clock/jz4725b-cgu.h)0
-rw-r--r--include/dt-bindings/clock/ingenic,jz4740-cgu.h (renamed from include/dt-bindings/clock/jz4740-cgu.h)0
-rw-r--r--include/dt-bindings/clock/ingenic,jz4755-cgu.h49
-rw-r--r--include/dt-bindings/clock/ingenic,jz4760-cgu.h56
-rw-r--r--include/dt-bindings/clock/ingenic,jz4770-cgu.h (renamed from include/dt-bindings/clock/jz4770-cgu.h)1
-rw-r--r--include/dt-bindings/clock/ingenic,jz4780-cgu.h (renamed from include/dt-bindings/clock/jz4780-cgu.h)0
-rw-r--r--include/dt-bindings/clock/ingenic,sysost.h19
-rw-r--r--include/dt-bindings/clock/ingenic,x1000-cgu.h (renamed from include/dt-bindings/clock/x1000-cgu.h)4
-rw-r--r--include/dt-bindings/clock/ingenic,x1830-cgu.h (renamed from include/dt-bindings/clock/x1830-cgu.h)0
-rw-r--r--include/dt-bindings/clock/intel,agilex5-clkmgr.h100
-rw-r--r--include/dt-bindings/clock/lochnagar.h (renamed from include/dt-bindings/clk/lochnagar.h)0
-rw-r--r--include/dt-bindings/clock/loongson,ls1x-clk.h19
-rw-r--r--include/dt-bindings/clock/loongson,ls2k-clk.h82
-rw-r--r--include/dt-bindings/clock/marvell,mmp2-audio.h1
-rw-r--r--include/dt-bindings/clock/marvell,mmp2.h5
-rw-r--r--include/dt-bindings/clock/marvell,pxa168.h11
-rw-r--r--include/dt-bindings/clock/marvell,pxa1908.h88
-rw-r--r--include/dt-bindings/clock/marvell,pxa1928.h3
-rw-r--r--include/dt-bindings/clock/marvell,pxa910.h5
-rw-r--r--include/dt-bindings/clock/mediatek,mt6735-apmixedsys.h16
-rw-r--r--include/dt-bindings/clock/mediatek,mt6735-imgsys.h15
-rw-r--r--include/dt-bindings/clock/mediatek,mt6735-infracfg.h25
-rw-r--r--include/dt-bindings/clock/mediatek,mt6735-mfgcfg.h8
-rw-r--r--include/dt-bindings/clock/mediatek,mt6735-pericfg.h37
-rw-r--r--include/dt-bindings/clock/mediatek,mt6735-topckgen.h79
-rw-r--r--include/dt-bindings/clock/mediatek,mt6735-vdecsys.h9
-rw-r--r--include/dt-bindings/clock/mediatek,mt6735-vencsys.h11
-rw-r--r--include/dt-bindings/clock/mediatek,mt6795-clk.h275
-rw-r--r--include/dt-bindings/clock/mediatek,mt7981-clk.h215
-rw-r--r--include/dt-bindings/clock/mediatek,mt7988-clk.h280
-rw-r--r--include/dt-bindings/clock/mediatek,mt8188-clk.h726
-rw-r--r--include/dt-bindings/clock/mediatek,mt8196-clock.h803
-rw-r--r--include/dt-bindings/clock/mediatek,mt8365-clk.h373
-rw-r--r--include/dt-bindings/clock/mediatek,mtmips-sysc.h130
-rw-r--r--include/dt-bindings/clock/meson8b-clkc.h107
-rw-r--r--include/dt-bindings/clock/microchip,lan966x.h34
-rw-r--r--include/dt-bindings/clock/microchip,mpfs-clock.h76
-rw-r--r--include/dt-bindings/clock/mobileye,eyeq5-clk.h65
-rw-r--r--include/dt-bindings/clock/mt7622-clk.h2
-rw-r--r--include/dt-bindings/clock/mt7986-clk.h169
-rw-r--r--include/dt-bindings/clock/mt8173-clk.h1
-rw-r--r--include/dt-bindings/clock/mt8186-clk.h445
-rw-r--r--include/dt-bindings/clock/mt8192-clk.h585
-rw-r--r--include/dt-bindings/clock/mt8195-clk.h866
-rw-r--r--include/dt-bindings/clock/nuvoton,ma35d1-clk.h253
-rw-r--r--include/dt-bindings/clock/nuvoton,npcm7xx-clock.h2
-rw-r--r--include/dt-bindings/clock/nuvoton,npcm845-clk.h49
-rw-r--r--include/dt-bindings/clock/nvidia,tegra264.h466
-rw-r--r--include/dt-bindings/clock/nxp,imx94-clock.h13
-rw-r--r--include/dt-bindings/clock/nxp,imx95-clock.h31
-rw-r--r--include/dt-bindings/clock/px30-cru.h4
-rw-r--r--include/dt-bindings/clock/qcom,apss-ipq.h6
-rw-r--r--include/dt-bindings/clock/qcom,camcc-sc7280.h127
-rw-r--r--include/dt-bindings/clock/qcom,camcc-sm8250.h138
-rw-r--r--include/dt-bindings/clock/qcom,dispcc-qcm2290.h38
-rw-r--r--include/dt-bindings/clock/qcom,dispcc-sc7280.h59
-rw-r--r--include/dt-bindings/clock/qcom,dispcc-sc8280xp.h100
-rw-r--r--include/dt-bindings/clock/qcom,dispcc-sm6125.h41
-rw-r--r--include/dt-bindings/clock/qcom,dispcc-sm6350.h52
-rw-r--r--include/dt-bindings/clock/qcom,dispcc-sm8250.h10
l---------include/dt-bindings/clock/qcom,dispcc-sm8350.h1
-rw-r--r--include/dt-bindings/clock/qcom,dsi-phy-28nm.h9
-rw-r--r--include/dt-bindings/clock/qcom,gcc-apq8084.h1
-rw-r--r--include/dt-bindings/clock/qcom,gcc-ipq4019.h6
-rw-r--r--include/dt-bindings/clock/qcom,gcc-ipq5018.h183
-rw-r--r--include/dt-bindings/clock/qcom,gcc-ipq806x.h5
-rw-r--r--include/dt-bindings/clock/qcom,gcc-ipq8074.h18
-rw-r--r--include/dt-bindings/clock/qcom,gcc-mdm9607.h104
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8909.h218
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8917.h210
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8939.h7
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8953.h238
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8976.h241
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8994.h13
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8998.h12
-rw-r--r--include/dt-bindings/clock/qcom,gcc-qcm2290.h188
-rw-r--r--include/dt-bindings/clock/qcom,gcc-qcs404.h4
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sc7280.h2
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sc8180x.h17
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sc8280xp.h508
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sdm660.h8
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sdm845.h1
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sdx65.h122
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sm6115.h201
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sm6125.h240
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sm6350.h178
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sm8150.h8
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sm8350.h1
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sm8450.h246
-rw-r--r--include/dt-bindings/clock/qcom,glymur-dispcc.h114
-rw-r--r--include/dt-bindings/clock/qcom,glymur-gcc.h578
-rw-r--r--include/dt-bindings/clock/qcom,glymur-tcsr.h24
-rw-r--r--include/dt-bindings/clock/qcom,gpucc-sc7280.h35
-rw-r--r--include/dt-bindings/clock/qcom,gpucc-sc8280xp.h35
-rw-r--r--include/dt-bindings/clock/qcom,gpucc-sm6350.h37
-rw-r--r--include/dt-bindings/clock/qcom,gpucc-sm8350.h52
-rw-r--r--include/dt-bindings/clock/qcom,ipq-cmn-pll.h22
-rw-r--r--include/dt-bindings/clock/qcom,ipq5018-cmn-pll.h16
-rw-r--r--include/dt-bindings/clock/qcom,ipq5332-gcc.h336
-rw-r--r--include/dt-bindings/clock/qcom,ipq5424-cmn-pll.h22
-rw-r--r--include/dt-bindings/clock/qcom,ipq5424-gcc.h157
-rw-r--r--include/dt-bindings/clock/qcom,ipq5424-nsscc.h65
-rw-r--r--include/dt-bindings/clock/qcom,ipq9574-gcc.h206
-rw-r--r--include/dt-bindings/clock/qcom,ipq9574-nsscc.h152
-rw-r--r--include/dt-bindings/clock/qcom,kaanapali-gcc.h241
-rw-r--r--include/dt-bindings/clock/qcom,lcc-ipq806x.h2
-rw-r--r--include/dt-bindings/clock/qcom,lcc-mdm9615.h44
-rw-r--r--include/dt-bindings/clock/qcom,lpass-sc7280.h16
-rw-r--r--include/dt-bindings/clock/qcom,lpassaudiocc-sc7280.h48
-rw-r--r--include/dt-bindings/clock/qcom,lpasscorecc-sc7280.h28
-rw-r--r--include/dt-bindings/clock/qcom,milos-camcc.h131
-rw-r--r--include/dt-bindings/clock/qcom,milos-dispcc.h61
-rw-r--r--include/dt-bindings/clock/qcom,milos-gcc.h210
-rw-r--r--include/dt-bindings/clock/qcom,milos-gpucc.h56
-rw-r--r--include/dt-bindings/clock/qcom,milos-videocc.h36
-rw-r--r--include/dt-bindings/clock/qcom,mmcc-msm8960.h2
-rw-r--r--include/dt-bindings/clock/qcom,mmcc-msm8974.h1
-rw-r--r--include/dt-bindings/clock/qcom,mmcc-msm8994.h155
-rw-r--r--include/dt-bindings/clock/qcom,mmcc-sdm660.h1
-rw-r--r--include/dt-bindings/clock/qcom,qca8k-nsscc.h101
-rw-r--r--include/dt-bindings/clock/qcom,qcm2290-gpucc.h32
-rw-r--r--include/dt-bindings/clock/qcom,qcs615-camcc.h110
-rw-r--r--include/dt-bindings/clock/qcom,qcs615-dispcc.h52
-rw-r--r--include/dt-bindings/clock/qcom,qcs615-gcc.h211
-rw-r--r--include/dt-bindings/clock/qcom,qcs615-gpucc.h39
-rw-r--r--include/dt-bindings/clock/qcom,qcs615-videocc.h30
-rw-r--r--include/dt-bindings/clock/qcom,qcs8300-camcc.h16
-rw-r--r--include/dt-bindings/clock/qcom,qcs8300-gcc.h234
-rw-r--r--include/dt-bindings/clock/qcom,qcs8300-gpucc.h17
-rw-r--r--include/dt-bindings/clock/qcom,qdu1000-ecpricc.h147
-rw-r--r--include/dt-bindings/clock/qcom,qdu1000-gcc.h177
-rw-r--r--include/dt-bindings/clock/qcom,rpmcc.h25
-rw-r--r--include/dt-bindings/clock/qcom,rpmh.h2
-rw-r--r--include/dt-bindings/clock/qcom,sa8775p-camcc.h108
-rw-r--r--include/dt-bindings/clock/qcom,sa8775p-dispcc.h87
-rw-r--r--include/dt-bindings/clock/qcom,sa8775p-gcc.h320
-rw-r--r--include/dt-bindings/clock/qcom,sa8775p-gpucc.h50
-rw-r--r--include/dt-bindings/clock/qcom,sa8775p-videocc.h47
-rw-r--r--include/dt-bindings/clock/qcom,sar2130p-gcc.h185
-rw-r--r--include/dt-bindings/clock/qcom,sar2130p-gpucc.h33
-rw-r--r--include/dt-bindings/clock/qcom,sc8180x-camcc.h181
-rw-r--r--include/dt-bindings/clock/qcom,sc8280xp-camcc.h179
-rw-r--r--include/dt-bindings/clock/qcom,sc8280xp-lpasscc.h17
-rw-r--r--include/dt-bindings/clock/qcom,sdx75-gcc.h193
-rw-r--r--include/dt-bindings/clock/qcom,sm4450-camcc.h106
-rw-r--r--include/dt-bindings/clock/qcom,sm4450-dispcc.h51
-rw-r--r--include/dt-bindings/clock/qcom,sm4450-gcc.h197
-rw-r--r--include/dt-bindings/clock/qcom,sm4450-gpucc.h62
-rw-r--r--include/dt-bindings/clock/qcom,sm6115-dispcc.h36
-rw-r--r--include/dt-bindings/clock/qcom,sm6115-gpucc.h36
-rw-r--r--include/dt-bindings/clock/qcom,sm6115-lpasscc.h15
-rw-r--r--include/dt-bindings/clock/qcom,sm6125-gpucc.h31
-rw-r--r--include/dt-bindings/clock/qcom,sm6350-camcc.h109
-rw-r--r--include/dt-bindings/clock/qcom,sm6350-videocc.h27
-rw-r--r--include/dt-bindings/clock/qcom,sm6375-dispcc.h42
-rw-r--r--include/dt-bindings/clock/qcom,sm6375-gcc.h234
-rw-r--r--include/dt-bindings/clock/qcom,sm6375-gpucc.h36
-rw-r--r--include/dt-bindings/clock/qcom,sm7150-camcc.h113
-rw-r--r--include/dt-bindings/clock/qcom,sm7150-dispcc.h62
-rw-r--r--include/dt-bindings/clock/qcom,sm7150-gcc.h186
-rw-r--r--include/dt-bindings/clock/qcom,sm7150-videocc.h28
-rw-r--r--include/dt-bindings/clock/qcom,sm8150-camcc.h135
-rw-r--r--include/dt-bindings/clock/qcom,sm8350-videocc.h35
-rw-r--r--include/dt-bindings/clock/qcom,sm8450-camcc.h159
-rw-r--r--include/dt-bindings/clock/qcom,sm8450-dispcc.h103
-rw-r--r--include/dt-bindings/clock/qcom,sm8450-gpucc.h48
-rw-r--r--include/dt-bindings/clock/qcom,sm8450-videocc.h38
-rw-r--r--include/dt-bindings/clock/qcom,sm8550-camcc.h187
-rw-r--r--include/dt-bindings/clock/qcom,sm8550-dispcc.h101
-rw-r--r--include/dt-bindings/clock/qcom,sm8550-gcc.h231
-rw-r--r--include/dt-bindings/clock/qcom,sm8550-gpucc.h48
-rw-r--r--include/dt-bindings/clock/qcom,sm8550-tcsr.h18
-rw-r--r--include/dt-bindings/clock/qcom,sm8650-camcc.h195
l---------include/dt-bindings/clock/qcom,sm8650-dispcc.h1
-rw-r--r--include/dt-bindings/clock/qcom,sm8650-gcc.h254
-rw-r--r--include/dt-bindings/clock/qcom,sm8650-gpucc.h43
-rw-r--r--include/dt-bindings/clock/qcom,sm8650-tcsr.h18
-rw-r--r--include/dt-bindings/clock/qcom,sm8650-videocc.h23
-rw-r--r--include/dt-bindings/clock/qcom,sm8750-dispcc.h112
-rw-r--r--include/dt-bindings/clock/qcom,sm8750-gcc.h226
-rw-r--r--include/dt-bindings/clock/qcom,sm8750-tcsr.h15
-rw-r--r--include/dt-bindings/clock/qcom,sm8750-videocc.h40
-rw-r--r--include/dt-bindings/clock/qcom,videocc-sc7280.h27
-rw-r--r--include/dt-bindings/clock/qcom,videocc-sm8150.h4
-rw-r--r--include/dt-bindings/clock/qcom,x1e80100-camcc.h135
-rw-r--r--include/dt-bindings/clock/qcom,x1e80100-dispcc.h101
-rw-r--r--include/dt-bindings/clock/qcom,x1e80100-gcc.h548
-rw-r--r--include/dt-bindings/clock/qcom,x1e80100-gpucc.h54
-rw-r--r--include/dt-bindings/clock/qcom,x1e80100-tcsr.h23
-rw-r--r--include/dt-bindings/clock/r8a73a4-clock.h4
-rw-r--r--include/dt-bindings/clock/r8a7779-clock.h2
-rw-r--r--include/dt-bindings/clock/r8a7790-clock.h158
-rw-r--r--include/dt-bindings/clock/r8a7791-clock.h161
-rw-r--r--include/dt-bindings/clock/r8a7792-clock.h98
-rw-r--r--include/dt-bindings/clock/r8a7793-clock.h159
-rw-r--r--include/dt-bindings/clock/r8a7794-clock.h137
-rw-r--r--include/dt-bindings/clock/r8a779a0-cpg-mssr.h1
-rw-r--r--include/dt-bindings/clock/r8a779f0-cpg-mssr.h64
-rw-r--r--include/dt-bindings/clock/r8a779g0-cpg-mssr.h91
-rw-r--r--include/dt-bindings/clock/r9a06g032-sysctrl.h1
-rw-r--r--include/dt-bindings/clock/r9a07g043-cpg.h203
-rw-r--r--include/dt-bindings/clock/r9a07g044-cpg.h220
-rw-r--r--include/dt-bindings/clock/r9a07g054-cpg.h229
-rw-r--r--include/dt-bindings/clock/r9a08g045-cpg.h242
-rw-r--r--include/dt-bindings/clock/r9a09g011-cpg.h352
-rw-r--r--include/dt-bindings/clock/raspberrypi,rp1-clocks.h65
-rw-r--r--include/dt-bindings/clock/renesas,r8a779h0-cpg-mssr.h96
-rw-r--r--include/dt-bindings/clock/renesas,r9a08g045-vbattb.h13
-rw-r--r--include/dt-bindings/clock/renesas,r9a09g047-cpg.h28
-rw-r--r--include/dt-bindings/clock/renesas,r9a09g056-cpg.h27
-rw-r--r--include/dt-bindings/clock/renesas,r9a09g057-cpg.h30
-rw-r--r--include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h35
-rw-r--r--include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h35
-rw-r--r--include/dt-bindings/clock/rk3036-cru.h4
-rw-r--r--include/dt-bindings/clock/rk3128-cru.h4
-rw-r--r--include/dt-bindings/clock/rk3188-cru-common.h4
-rw-r--r--include/dt-bindings/clock/rk3228-cru.h2
-rw-r--r--include/dt-bindings/clock/rk3288-cru.h2
-rw-r--r--include/dt-bindings/clock/rk3308-cru.h2
-rw-r--r--include/dt-bindings/clock/rk3328-cru.h2
-rw-r--r--include/dt-bindings/clock/rk3368-cru.h3
-rw-r--r--include/dt-bindings/clock/rk3399-cru.h10
-rw-r--r--include/dt-bindings/clock/rk3568-cru.h7
-rw-r--r--include/dt-bindings/clock/rockchip,rk3506-cru.h285
-rw-r--r--include/dt-bindings/clock/rockchip,rk3528-cru.h459
-rw-r--r--include/dt-bindings/clock/rockchip,rk3562-cru.h379
-rw-r--r--include/dt-bindings/clock/rockchip,rk3576-cru.h607
-rw-r--r--include/dt-bindings/clock/rockchip,rk3588-cru.h765
-rw-r--r--include/dt-bindings/clock/rockchip,rv1126-cru.h632
-rw-r--r--include/dt-bindings/clock/rockchip,rv1126b-cru.h392
-rw-r--r--include/dt-bindings/clock/s3c2410.h59
-rw-r--r--include/dt-bindings/clock/s3c2412.h70
-rw-r--r--include/dt-bindings/clock/s3c2443.h91
-rw-r--r--include/dt-bindings/clock/samsung,exynos2200-cmu.h431
-rw-r--r--include/dt-bindings/clock/samsung,exynos7870-cmu.h324
-rw-r--r--include/dt-bindings/clock/samsung,exynos8895.h453
-rw-r--r--include/dt-bindings/clock/samsung,exynos990.h438
-rw-r--r--include/dt-bindings/clock/samsung,exynosautov9.h360
-rw-r--r--include/dt-bindings/clock/samsung,exynosautov920.h308
-rw-r--r--include/dt-bindings/clock/sifive-fu540-prci.h8
-rw-r--r--include/dt-bindings/clock/sifive-fu740-prci.h18
-rw-r--r--include/dt-bindings/clock/sophgo,cv1800.h176
-rw-r--r--include/dt-bindings/clock/sophgo,sg2042-clkgen.h111
-rw-r--r--include/dt-bindings/clock/sophgo,sg2042-pll.h14
-rw-r--r--include/dt-bindings/clock/sophgo,sg2042-rpgate.h58
-rw-r--r--include/dt-bindings/clock/sophgo,sg2044-clk.h153
-rw-r--r--include/dt-bindings/clock/sophgo,sg2044-pll.h27
-rw-r--r--include/dt-bindings/clock/spacemit,k1-syscon.h394
-rw-r--r--include/dt-bindings/clock/sprd,ums512-clk.h397
-rw-r--r--include/dt-bindings/clock/st,stm32mp21-rcc.h426
-rw-r--r--include/dt-bindings/clock/st,stm32mp25-rcc.h492
-rw-r--r--include/dt-bindings/clock/starfive,jh7110-crg.h301
-rw-r--r--include/dt-bindings/clock/starfive-jh7100-audio.h41
-rw-r--r--include/dt-bindings/clock/starfive-jh7100.h202
-rw-r--r--include/dt-bindings/clock/ste-db8500-clkout.h17
-rw-r--r--include/dt-bindings/clock/stih416-clks.h17
-rw-r--r--include/dt-bindings/clock/stm32fx-clock.h4
-rw-r--r--include/dt-bindings/clock/stm32h7-clks.h4
-rw-r--r--include/dt-bindings/clock/stm32mp1-clks.h25
-rw-r--r--include/dt-bindings/clock/stm32mp13-clks.h229
-rw-r--r--include/dt-bindings/clock/stratix10-clock.h2
-rw-r--r--include/dt-bindings/clock/sun20i-d1-ccu.h158
-rw-r--r--include/dt-bindings/clock/sun20i-d1-r-ccu.h19
-rw-r--r--include/dt-bindings/clock/sun50i-a100-ccu.h2
-rw-r--r--include/dt-bindings/clock/sun50i-a64-ccu.h4
-rw-r--r--include/dt-bindings/clock/sun50i-h6-ccu.h2
-rw-r--r--include/dt-bindings/clock/sun50i-h6-r-ccu.h1
-rw-r--r--include/dt-bindings/clock/sun50i-h616-ccu.h8
-rw-r--r--include/dt-bindings/clock/sun55i-a523-ccu.h190
-rw-r--r--include/dt-bindings/clock/sun55i-a523-mcu-ccu.h54
-rw-r--r--include/dt-bindings/clock/sun55i-a523-r-ccu.h37
-rw-r--r--include/dt-bindings/clock/sun6i-rtc.h10
-rw-r--r--include/dt-bindings/clock/sun8i-h3-ccu.h2
-rw-r--r--include/dt-bindings/clock/sun8i-v3s-ccu.h2
-rw-r--r--include/dt-bindings/clock/suniv-ccu-f1c100s.h2
-rw-r--r--include/dt-bindings/clock/sunplus,sp7021-clkc.h88
-rw-r--r--include/dt-bindings/clock/tegra234-clock.h897
-rw-r--r--include/dt-bindings/clock/tegra30-car.h3
-rw-r--r--include/dt-bindings/clock/thead,th1520-clk-ap.h130
-rw-r--r--include/dt-bindings/clock/ti-dra7-atl.h10
-rw-r--r--include/dt-bindings/clock/toshiba,tmpv770x.h189
-rw-r--r--include/dt-bindings/clock/versaclock.h (renamed from include/dt-bindings/clk/versaclock.h)0
-rw-r--r--include/dt-bindings/clock/xlnx-zynqmp-clk.h7
-rw-r--r--include/dt-bindings/clock/zx296718-clock.h164
-rw-r--r--include/dt-bindings/display/sdtv-standards.h2
-rw-r--r--include/dt-bindings/dma/fsl-edma.h21
-rw-r--r--include/dt-bindings/firmware/imx/rsrc.h309
-rw-r--r--include/dt-bindings/firmware/qcom,scm.h39
-rw-r--r--include/dt-bindings/gce/mediatek,mt6795-gce.h123
-rw-r--r--include/dt-bindings/gce/mt8186-gce.h421
-rw-r--r--include/dt-bindings/gce/mt8192-gce.h335
-rw-r--r--include/dt-bindings/gce/mt8195-gce.h812
-rw-r--r--include/dt-bindings/gpio/amlogic,t7-periphs-pinctrl.h179
-rw-r--r--include/dt-bindings/gpio/amlogic-c3-gpio.h72
-rw-r--r--include/dt-bindings/gpio/gpio.h5
-rw-r--r--include/dt-bindings/gpio/meson-g12a-gpio.h2
-rw-r--r--include/dt-bindings/gpio/meson-s4-gpio.h99
-rw-r--r--include/dt-bindings/gpio/msc313-gpio.h71
-rw-r--r--include/dt-bindings/gpio/tegra234-gpio.h59
-rw-r--r--include/dt-bindings/gpio/tegra241-gpio.h42
-rw-r--r--include/dt-bindings/gpio/tegra256-gpio.h28
-rw-r--r--include/dt-bindings/i3c/i3c.h16
-rw-r--r--include/dt-bindings/iio/adc/adi,ad4695.h16
-rw-r--r--include/dt-bindings/iio/adc/adi,ad7606.h9
-rw-r--r--include/dt-bindings/iio/adc/adi,ad7768-1.h10
-rw-r--r--include/dt-bindings/iio/adc/at91-sama5d2_adc.h3
-rw-r--r--include/dt-bindings/iio/adc/gehc,pmc-adc.h10
-rw-r--r--include/dt-bindings/iio/adc/ingenic,adc.h1
-rw-r--r--include/dt-bindings/iio/adc/mediatek,mt6357-auxadc.h21
-rw-r--r--include/dt-bindings/iio/adc/mediatek,mt6358-auxadc.h22
-rw-r--r--include/dt-bindings/iio/adc/mediatek,mt6359-auxadc.h22
-rw-r--r--include/dt-bindings/iio/adc/mediatek,mt6363-auxadc.h24
-rw-r--r--include/dt-bindings/iio/adc/mediatek,mt6370_adc.h18
-rw-r--r--include/dt-bindings/iio/adc/mediatek,mt6373-auxadc.h19
-rw-r--r--include/dt-bindings/iio/addac/adi,ad74413r.h21
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pm7325.h69
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h88
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h124
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h50
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h22
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h22
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-smb139x.h19
-rw-r--r--include/dt-bindings/iio/qcom,spmi-vadc.h3
-rw-r--r--include/dt-bindings/input/cros-ec-keyboard.h104
-rw-r--r--include/dt-bindings/interconnect/fsl,imx8mp.h59
-rw-r--r--include/dt-bindings/interconnect/mediatek,mt8183.h23
-rw-r--r--include/dt-bindings/interconnect/mediatek,mt8195.h44
-rw-r--r--include/dt-bindings/interconnect/qcom,glymur-rpmh.h205
-rw-r--r--include/dt-bindings/interconnect/qcom,ipq5332.h46
-rw-r--r--include/dt-bindings/interconnect/qcom,ipq5424.h60
-rw-r--r--include/dt-bindings/interconnect/qcom,ipq9574.h59
-rw-r--r--include/dt-bindings/interconnect/qcom,kaanapali-rpmh.h149
-rw-r--r--include/dt-bindings/interconnect/qcom,milos-rpmh.h141
-rw-r--r--include/dt-bindings/interconnect/qcom,msm8909.h93
-rw-r--r--include/dt-bindings/interconnect/qcom,msm8937.h93
-rw-r--r--include/dt-bindings/interconnect/qcom,msm8953.h93
-rw-r--r--include/dt-bindings/interconnect/qcom,msm8976.h97
-rw-r--r--include/dt-bindings/interconnect/qcom,msm8996-cbf.h12
-rw-r--r--include/dt-bindings/interconnect/qcom,msm8996.h163
-rw-r--r--include/dt-bindings/interconnect/qcom,qcm2290.h94
-rw-r--r--include/dt-bindings/interconnect/qcom,qcs615-rpmh.h136
-rw-r--r--include/dt-bindings/interconnect/qcom,qcs8300-rpmh.h189
-rw-r--r--include/dt-bindings/interconnect/qcom,qdu1000-rpmh.h98
-rw-r--r--include/dt-bindings/interconnect/qcom,rpm-icc.h13
-rw-r--r--include/dt-bindings/interconnect/qcom,sa8775p-rpmh.h231
-rw-r--r--include/dt-bindings/interconnect/qcom,sar2130p-rpmh.h137
-rw-r--r--include/dt-bindings/interconnect/qcom,sc7180.h3
-rw-r--r--include/dt-bindings/interconnect/qcom,sc7280.h165
-rw-r--r--include/dt-bindings/interconnect/qcom,sc8180x.h189
-rw-r--r--include/dt-bindings/interconnect/qcom,sc8280xp.h232
-rw-r--r--include/dt-bindings/interconnect/qcom,sdm670-rpmh.h136
-rw-r--r--include/dt-bindings/interconnect/qcom,sdx55.h2
-rw-r--r--include/dt-bindings/interconnect/qcom,sdx65.h67
-rw-r--r--include/dt-bindings/interconnect/qcom,sdx75.h100
-rw-r--r--include/dt-bindings/interconnect/qcom,sm6115.h111
-rw-r--r--include/dt-bindings/interconnect/qcom,sm6350.h148
-rw-r--r--include/dt-bindings/interconnect/qcom,sm7150-rpmh.h150
-rw-r--r--include/dt-bindings/interconnect/qcom,sm8150.h3
-rw-r--r--include/dt-bindings/interconnect/qcom,sm8250.h10
-rw-r--r--include/dt-bindings/interconnect/qcom,sm8350.h10
-rw-r--r--include/dt-bindings/interconnect/qcom,sm8450.h171
-rw-r--r--include/dt-bindings/interconnect/qcom,sm8550-rpmh.h189
-rw-r--r--include/dt-bindings/interconnect/qcom,sm8650-rpmh.h155
-rw-r--r--include/dt-bindings/interconnect/qcom,sm8750-rpmh.h143
-rw-r--r--include/dt-bindings/interconnect/qcom,x1e80100-rpmh.h183
-rw-r--r--include/dt-bindings/interrupt-controller/amlogic,meson-g12a-gpio-intc.h126
-rw-r--r--include/dt-bindings/interrupt-controller/apple-aic.h2
-rw-r--r--include/dt-bindings/interrupt-controller/arm-gic.h2
-rw-r--r--include/dt-bindings/interrupt-controller/aspeed-scu-ic.h14
-rw-r--r--include/dt-bindings/interrupt-controller/irqc-rzg2l.h25
-rw-r--r--include/dt-bindings/leds/common.h22
-rw-r--r--include/dt-bindings/leds/leds-lp55xx.h10
-rw-r--r--include/dt-bindings/leds/rt4831-backlight.h23
-rw-r--r--include/dt-bindings/mailbox/mediatek,mt8188-gce.h967
-rw-r--r--include/dt-bindings/mailbox/qcom-ipcc.h6
-rw-r--r--include/dt-bindings/mailbox/tegra186-hsp.h5
-rw-r--r--include/dt-bindings/media/c8sectpfe.h13
-rw-r--r--include/dt-bindings/media/tvp5150.h2
-rw-r--r--include/dt-bindings/media/video-interfaces.h27
-rw-r--r--include/dt-bindings/memory/mediatek,mt6893-memory-port.h288
-rw-r--r--include/dt-bindings/memory/mediatek,mt8188-memory-port.h489
-rw-r--r--include/dt-bindings/memory/mediatek,mt8189-memory-port.h283
-rw-r--r--include/dt-bindings/memory/mediatek,mt8365-larb-port.h90
-rw-r--r--include/dt-bindings/memory/mt6795-larb-port.h95
-rw-r--r--include/dt-bindings/memory/mt8186-memory-port.h217
-rw-r--r--include/dt-bindings/memory/mt8195-memory-port.h408
-rw-r--r--include/dt-bindings/memory/mtk-memory-port.h2
-rw-r--r--include/dt-bindings/memory/nvidia,tegra264.h136
-rw-r--r--include/dt-bindings/memory/tegra210-mc.h74
-rw-r--r--include/dt-bindings/memory/tegra234-mc.h544
-rw-r--r--include/dt-bindings/mfd/cros_ec.h18
-rw-r--r--include/dt-bindings/mfd/st,stpmic1.h2
-rw-r--r--include/dt-bindings/mfd/stm32f4-rcc.h1
-rw-r--r--include/dt-bindings/mfd/stm32f7-rcc.h2
-rw-r--r--include/dt-bindings/mux/ti-serdes.h92
-rw-r--r--include/dt-bindings/net/pcs-rzn1-miic.h33
-rw-r--r--include/dt-bindings/net/renesas,r9a09g077-pcs-miic.h36
-rw-r--r--include/dt-bindings/net/ti-dp83867.h4
-rw-r--r--include/dt-bindings/net/ti-dp83869.h4
-rw-r--r--include/dt-bindings/nvmem/microchip,sama7g5-otpc.h12
-rw-r--r--include/dt-bindings/phy/phy-cadence.h11
-rw-r--r--include/dt-bindings/phy/phy-imx8-pcie.h14
-rw-r--r--include/dt-bindings/phy/phy-lan966x-serdes.h14
-rw-r--r--include/dt-bindings/phy/phy-qcom-qmp.h24
-rw-r--r--include/dt-bindings/phy/phy.h3
-rw-r--r--include/dt-bindings/pinctrl/amlogic,pinctrl.h46
-rw-r--r--include/dt-bindings/pinctrl/apple.h13
-rw-r--r--include/dt-bindings/pinctrl/hisi.h12
-rw-r--r--include/dt-bindings/pinctrl/k3.h41
-rw-r--r--include/dt-bindings/pinctrl/keystone.h10
-rw-r--r--include/dt-bindings/pinctrl/mediatek,mt8188-pinfunc.h1280
-rw-r--r--include/dt-bindings/pinctrl/mt65xx.h9
-rw-r--r--include/dt-bindings/pinctrl/mt6795-pinfunc.h908
-rw-r--r--include/dt-bindings/pinctrl/mt8135-pinfunc.h1294
-rw-r--r--include/dt-bindings/pinctrl/mt8183-pinfunc.h1120
-rw-r--r--include/dt-bindings/pinctrl/mt8186-pinfunc.h1174
-rw-r--r--include/dt-bindings/pinctrl/mt8365-pinfunc.h858
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-cv1800b.h63
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-cv1812h.h127
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-cv18xx.h19
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-sg2000.h127
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-sg2002.h79
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-sg2042.h196
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-sg2044.h221
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-starfive-jh7100.h275
-rw-r--r--include/dt-bindings/pinctrl/pinctrl-zynq.h17
-rw-r--r--include/dt-bindings/pinctrl/r7s9210-pinctrl.h2
-rw-r--r--include/dt-bindings/pinctrl/renesas,r9a09g047-pinctrl.h41
-rw-r--r--include/dt-bindings/pinctrl/renesas,r9a09g057-pinctrl.h31
-rw-r--r--include/dt-bindings/pinctrl/renesas,r9a09g077-pinctrl.h22
-rw-r--r--include/dt-bindings/pinctrl/rzg2l-pinctrl.h23
-rw-r--r--include/dt-bindings/pinctrl/rzv2m-pinctrl.h23
-rw-r--r--include/dt-bindings/pinctrl/samsung.h77
-rw-r--r--include/dt-bindings/pinctrl/sppctl-sp7021.h179
-rw-r--r--include/dt-bindings/pinctrl/sppctl.h31
-rw-r--r--include/dt-bindings/pinctrl/starfive,jh7110-pinctrl.h137
-rw-r--r--include/dt-bindings/pinctrl/stm32-pinfunc.h4
-rw-r--r--include/dt-bindings/power/allwinner,sun20i-d1-ppu.h10
-rw-r--r--include/dt-bindings/power/allwinner,sun55i-a523-pck-600.h15
-rw-r--r--include/dt-bindings/power/allwinner,sun55i-a523-ppu.h12
-rw-r--r--include/dt-bindings/power/allwinner,sun8i-v853-ppu.h10
-rw-r--r--include/dt-bindings/power/amlogic,a4-pwrc.h21
-rw-r--r--include/dt-bindings/power/amlogic,a5-pwrc.h21
-rw-r--r--include/dt-bindings/power/amlogic,c3-pwrc.h25
-rw-r--r--include/dt-bindings/power/amlogic,s6-pwrc.h29
-rw-r--r--include/dt-bindings/power/amlogic,s7-pwrc.h20
-rw-r--r--include/dt-bindings/power/amlogic,s7d-pwrc.h27
-rw-r--r--include/dt-bindings/power/amlogic,t7-pwrc.h63
-rw-r--r--include/dt-bindings/power/fsl,imx93-power.h15
-rw-r--r--include/dt-bindings/power/imx8mm-power.h31
-rw-r--r--include/dt-bindings/power/imx8mn-power.h20
-rw-r--r--include/dt-bindings/power/imx8mp-power.h59
-rw-r--r--include/dt-bindings/power/imx8mq-power.h3
-rw-r--r--include/dt-bindings/power/imx8ulp-power.h26
-rw-r--r--include/dt-bindings/power/marvell,pxa1908-power.h17
-rw-r--r--include/dt-bindings/power/mediatek,mt6735-power-controller.h14
-rw-r--r--include/dt-bindings/power/mediatek,mt6893-power.h35
-rw-r--r--include/dt-bindings/power/mediatek,mt8188-power.h44
-rw-r--r--include/dt-bindings/power/mediatek,mt8196-power.h58
-rw-r--r--include/dt-bindings/power/mediatek,mt8365-power.h19
-rw-r--r--include/dt-bindings/power/meson-a1-power.h2
-rw-r--r--include/dt-bindings/power/meson-axg-power.h2
-rw-r--r--include/dt-bindings/power/meson-g12a-power.h4
-rw-r--r--include/dt-bindings/power/meson-gxbb-power.h2
-rw-r--r--include/dt-bindings/power/meson-s4-power.h19
-rw-r--r--include/dt-bindings/power/meson-sm1-power.h2
-rw-r--r--include/dt-bindings/power/meson8-power.h2
-rw-r--r--include/dt-bindings/power/mt6795-power.h16
-rw-r--r--include/dt-bindings/power/mt6797-power.h9
-rw-r--r--include/dt-bindings/power/mt8186-power.h32
-rw-r--r--include/dt-bindings/power/mt8195-power.h46
-rw-r--r--include/dt-bindings/power/nvidia,tegra264-bpmp.h24
-rw-r--r--include/dt-bindings/power/qcom,rpmhpd.h268
-rw-r--r--include/dt-bindings/power/qcom-aoss-qmp.h14
-rw-r--r--include/dt-bindings/power/qcom-rpmpd.h269
-rw-r--r--include/dt-bindings/power/r8a7795-sysc.h1
-rw-r--r--include/dt-bindings/power/r8a779f0-sysc.h30
-rw-r--r--include/dt-bindings/power/r8a779g0-sysc.h46
-rw-r--r--include/dt-bindings/power/renesas,r8a779h0-sysc.h49
-rw-r--r--include/dt-bindings/power/rk3568-power.h32
-rw-r--r--include/dt-bindings/power/rk3588-power.h69
-rw-r--r--include/dt-bindings/power/rockchip,rk3528-power.h19
-rw-r--r--include/dt-bindings/power/rockchip,rk3562-power.h35
-rw-r--r--include/dt-bindings/power/rockchip,rk3576-power.h30
-rw-r--r--include/dt-bindings/power/rockchip,rv1126-power.h35
-rw-r--r--include/dt-bindings/power/rockchip,rv1126b-power-controller.h17
-rw-r--r--include/dt-bindings/power/starfive,jh7110-pmu.h21
-rw-r--r--include/dt-bindings/power/summit,smb347-charger.h6
-rw-r--r--include/dt-bindings/power/tegra234-powergate.h39
-rw-r--r--include/dt-bindings/power/thead,th1520-power.h19
-rw-r--r--include/dt-bindings/power/xlnx-zynqmp-power.h6
-rw-r--r--include/dt-bindings/regulator/nxp,pca9450-regulator.h18
-rw-r--r--include/dt-bindings/regulator/richtek,rt5190a-regulator.h15
-rw-r--r--include/dt-bindings/regulator/st,stm32mp13-regulator.h42
-rw-r--r--include/dt-bindings/regulator/st,stm32mp15-regulator.h40
-rw-r--r--include/dt-bindings/regulator/st,stm32mp25-regulator.h48
-rw-r--r--include/dt-bindings/regulator/ti,tps62864.h9
-rw-r--r--include/dt-bindings/reset/airoha,en7523-reset.h61
-rw-r--r--include/dt-bindings/reset/airoha,en7581-reset.h66
-rw-r--r--include/dt-bindings/reset/altr,rst-mgr-s10.h5
-rw-r--r--include/dt-bindings/reset/amlogic,c3-reset.h119
-rw-r--r--include/dt-bindings/reset/amlogic,meson-a1-audio-reset.h36
-rw-r--r--include/dt-bindings/reset/amlogic,meson-g12a-reset.h4
-rw-r--r--include/dt-bindings/reset/amlogic,meson-s4-reset.h125
-rw-r--r--include/dt-bindings/reset/aspeed,ast2700-scu.h124
-rw-r--r--include/dt-bindings/reset/bcm63268-reset.h4
-rw-r--r--include/dt-bindings/reset/bt1-ccu.h9
-rw-r--r--include/dt-bindings/reset/canaan,k230-rst.h90
-rw-r--r--include/dt-bindings/reset/delta,tn48m-reset.h20
-rw-r--r--include/dt-bindings/reset/eswin,eic7700-reset.h298
-rw-r--r--include/dt-bindings/reset/fsl,imx8ulp-sim-lpav.h16
-rw-r--r--include/dt-bindings/reset/imx8mp-reset-audiomix.h13
-rw-r--r--include/dt-bindings/reset/imx8ulp-pcc-reset.h59
-rw-r--r--include/dt-bindings/reset/mediatek,mt6735-infracfg.h27
-rw-r--r--include/dt-bindings/reset/mediatek,mt6735-mfgcfg.h9
-rw-r--r--include/dt-bindings/reset/mediatek,mt6735-pericfg.h31
-rw-r--r--include/dt-bindings/reset/mediatek,mt6735-vdecsys.h9
-rw-r--r--include/dt-bindings/reset/mediatek,mt6735-wdt.h17
-rw-r--r--include/dt-bindings/reset/mediatek,mt6795-resets.h53
-rw-r--r--include/dt-bindings/reset/mediatek,mt7988-resets.h19
-rw-r--r--include/dt-bindings/reset/mediatek,mt8196-resets.h26
-rw-r--r--include/dt-bindings/reset/mt2712-resets.h (renamed from include/dt-bindings/reset-controller/mt2712-resets.h)0
-rw-r--r--include/dt-bindings/reset/mt7621-reset.h37
-rw-r--r--include/dt-bindings/reset/mt7986-resets.h55
-rw-r--r--include/dt-bindings/reset/mt8173-resets.h2
-rw-r--r--include/dt-bindings/reset/mt8183-resets.h (renamed from include/dt-bindings/reset-controller/mt8183-resets.h)3
-rw-r--r--include/dt-bindings/reset/mt8186-resets.h41
-rw-r--r--include/dt-bindings/reset/mt8188-resets.h116
-rw-r--r--include/dt-bindings/reset/mt8192-resets.h (renamed from include/dt-bindings/reset-controller/mt8192-resets.h)11
-rw-r--r--include/dt-bindings/reset/mt8195-resets.h83
-rw-r--r--include/dt-bindings/reset/nuvoton,ma35d1-reset.h108
-rw-r--r--include/dt-bindings/reset/nvidia,tegra114-car.h13
-rw-r--r--include/dt-bindings/reset/nvidia,tegra264.h92
-rw-r--r--include/dt-bindings/reset/qcom,gcc-ipq5018.h122
-rw-r--r--include/dt-bindings/reset/qcom,gcc-ipq806x.h5
-rw-r--r--include/dt-bindings/reset/qcom,ipq5424-gcc.h310
-rw-r--r--include/dt-bindings/reset/qcom,ipq5424-nsscc.h46
-rw-r--r--include/dt-bindings/reset/qcom,ipq9574-gcc.h165
-rw-r--r--include/dt-bindings/reset/qcom,ipq9574-nsscc.h134
-rw-r--r--include/dt-bindings/reset/qcom,qca8k-nsscc.h76
-rw-r--r--include/dt-bindings/reset/qcom,sar2130p-gpucc.h14
-rw-r--r--include/dt-bindings/reset/qcom,sdm845-pdc.h2
-rw-r--r--include/dt-bindings/reset/qcom,sm8350-videocc.h18
-rw-r--r--include/dt-bindings/reset/qcom,sm8450-gpucc.h20
-rw-r--r--include/dt-bindings/reset/qcom,sm8650-gpucc.h20
-rw-r--r--include/dt-bindings/reset/qcom,x1e80100-gpucc.h19
-rw-r--r--include/dt-bindings/reset/rockchip,rk3506-cru.h211
-rw-r--r--include/dt-bindings/reset/rockchip,rk3528-cru.h241
-rw-r--r--include/dt-bindings/reset/rockchip,rk3562-cru.h358
-rw-r--r--include/dt-bindings/reset/rockchip,rk3576-cru.h564
-rw-r--r--include/dt-bindings/reset/rockchip,rk3588-cru.h795
-rw-r--r--include/dt-bindings/reset/rockchip,rv1126b-cru.h405
-rw-r--r--include/dt-bindings/reset/sama7g5-reset.h10
-rw-r--r--include/dt-bindings/reset/sophgo,sg2042-reset.h87
-rw-r--r--include/dt-bindings/reset/st,stm32mp21-rcc.h138
-rw-r--r--include/dt-bindings/reset/st,stm32mp25-rcc.h167
-rw-r--r--include/dt-bindings/reset/starfive,jh7110-crg.h214
-rw-r--r--include/dt-bindings/reset/starfive-jh7100.h126
-rw-r--r--include/dt-bindings/reset/stericsson,db8500-prcc-reset.h51
-rw-r--r--include/dt-bindings/reset/stih415-resets.h28
-rw-r--r--include/dt-bindings/reset/stih416-resets.h52
-rw-r--r--include/dt-bindings/reset/stm32mp1-resets.h17
-rw-r--r--include/dt-bindings/reset/stm32mp13-resets.h100
-rw-r--r--include/dt-bindings/reset/sun20i-d1-ccu.h79
-rw-r--r--include/dt-bindings/reset/sun20i-d1-r-ccu.h16
-rw-r--r--include/dt-bindings/reset/sun50i-a100-ccu.h2
-rw-r--r--include/dt-bindings/reset/sun50i-a100-r-ccu.h2
-rw-r--r--include/dt-bindings/reset/sun50i-h6-ccu.h2
-rw-r--r--include/dt-bindings/reset/sun50i-h6-r-ccu.h2
-rw-r--r--include/dt-bindings/reset/sun50i-h616-ccu.h6
-rw-r--r--include/dt-bindings/reset/sun55i-a523-ccu.h88
-rw-r--r--include/dt-bindings/reset/sun55i-a523-mcu-ccu.h30
-rw-r--r--include/dt-bindings/reset/sun55i-a523-r-ccu.h26
-rw-r--r--include/dt-bindings/reset/sunplus,sp7021-reset.h87
-rw-r--r--include/dt-bindings/reset/tegra234-reset.h178
-rw-r--r--include/dt-bindings/reset/thead,th1520-reset.h236
-rw-r--r--include/dt-bindings/reset/toshiba,tmpv770x.h48
-rw-r--r--include/dt-bindings/soc/cpm1-fsl,tsa.h13
-rw-r--r--include/dt-bindings/soc/qcom,gpr.h19
-rw-r--r--include/dt-bindings/soc/qe-fsl,tsa.h13
-rw-r--r--include/dt-bindings/soc/rockchip,vop2.h18
-rw-r--r--include/dt-bindings/soc/samsung,boot-mode.h18
-rw-r--r--include/dt-bindings/soc/samsung,exynos-usi.h26
-rw-r--r--include/dt-bindings/soc/zte,pm_domains.h24
-rw-r--r--include/dt-bindings/sound/audio-graph.h26
-rw-r--r--include/dt-bindings/sound/cs35l45.h77
-rw-r--r--include/dt-bindings/sound/cs48l32.h20
-rw-r--r--include/dt-bindings/sound/microchip,pdmc.h13
-rw-r--r--include/dt-bindings/sound/qcom,lpass.h31
-rw-r--r--include/dt-bindings/sound/qcom,q6afe.h201
-rw-r--r--include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h235
-rw-r--r--include/dt-bindings/sound/qcom,wcd9335.h14
-rw-r--r--include/dt-bindings/sound/qcom,wcd934x.h16
-rw-r--r--include/dt-bindings/sound/rt5640.h1
-rw-r--r--include/dt-bindings/sound/tlv320adc3xxx.h28
-rw-r--r--include/dt-bindings/sound/tlv320aic31xx-micbias.h9
-rw-r--r--include/dt-bindings/sound/tlv320aic31xx.h14
-rw-r--r--include/dt-bindings/thermal/mediatek,lvts-thermal.h83
-rw-r--r--include/dt-bindings/thermal/tegra114-soctherm.h19
-rw-r--r--include/dt-bindings/thermal/tegra234-bpmp-thermal.h19
-rw-r--r--include/dt-bindings/usb/pd.h89
-rw-r--r--include/dt-bindings/watchdog/aspeed-wdt.h230
-rw-r--r--include/hyperv/hvgdk.h308
-rw-r--r--include/hyperv/hvgdk_ext.h46
-rw-r--r--include/hyperv/hvgdk_mini.h1528
-rw-r--r--include/hyperv/hvhdk.h899
-rw-r--r--include/hyperv/hvhdk_mini.h531
-rw-r--r--include/keys/asymmetric-parser.h2
-rw-r--r--include/keys/asymmetric-type.h8
-rw-r--r--include/keys/dns_resolver-type.h4
-rw-r--r--include/keys/rxrpc-type.h17
-rw-r--r--include/keys/system_keyring.h44
-rw-r--r--include/keys/trusted-type.h2
-rw-r--r--include/keys/trusted_caam.h11
-rw-r--r--include/keys/trusted_dcp.h11
-rw-r--r--include/keys/trusted_tpm.h81
-rw-r--r--include/kunit/assert.h269
-rw-r--r--include/kunit/attributes.h50
-rw-r--r--include/kunit/clk.h33
-rw-r--r--include/kunit/device.h80
-rw-r--r--include/kunit/of.h121
-rw-r--r--include/kunit/platform_device.h21
-rw-r--r--include/kunit/resource.h503
-rw-r--r--include/kunit/run-in-irq-context.h129
-rw-r--r--include/kunit/skbuff.h57
-rw-r--r--include/kunit/static_stub.h113
-rw-r--r--include/kunit/test-bug.h60
-rw-r--r--include/kunit/test.h1881
-rw-r--r--include/kunit/try-catch.h4
-rw-r--r--include/kunit/visibility.h33
-rw-r--r--include/kvm/arm_arch_timer.h108
-rw-r--r--include/kvm/arm_hypercalls.h14
-rw-r--r--include/kvm/arm_pmu.h124
-rw-r--r--include/kvm/arm_psci.h20
-rw-r--r--include/kvm/arm_vgic.h141
-rw-r--r--include/kvm/iodev.h6
-rw-r--r--include/linux/a.out.h18
-rw-r--r--include/linux/acct.h1
-rw-r--r--include/linux/acpi.h489
-rw-r--r--include/linux/acpi_amd_wbrf.h91
-rw-r--r--include/linux/acpi_dma.h9
-rw-r--r--include/linux/acpi_iort.h34
-rw-r--r--include/linux/acpi_mdio.h33
-rw-r--r--include/linux/acpi_pmtmr.h13
-rw-r--r--include/linux/acpi_rimt.h28
-rw-r--r--include/linux/acpi_viot.h21
-rw-r--r--include/linux/adi-axi-common.h77
-rw-r--r--include/linux/adreno-smmu-priv.h45
-rw-r--r--include/linux/aer.h44
-rw-r--r--include/linux/ahci_platform.h10
-rw-r--r--include/linux/aio.h4
-rw-r--r--include/linux/alarmtimer.h10
-rw-r--r--include/linux/alcor_pci.h8
-rw-r--r--include/linux/align.h10
-rw-r--r--include/linux/alloc_tag.h268
-rw-r--r--include/linux/amba/bus.h80
-rw-r--r--include/linux/amba/clcd-regs.h87
-rw-r--r--include/linux/amba/clcd.h290
-rw-r--r--include/linux/amba/mmci.h6
-rw-r--r--include/linux/amba/pl022.h4
-rw-r--r--include/linux/amba/pl093.h77
-rw-r--r--include/linux/amba/serial.h261
-rw-r--r--include/linux/amd-iommu.h158
-rw-r--r--include/linux/amd-pmf-io.h65
-rw-r--r--include/linux/annotate.h127
-rw-r--r--include/linux/anon_inodes.h11
-rw-r--r--include/linux/aperture.h62
-rw-r--r--include/linux/apple-gmux.h145
-rw-r--r--include/linux/apple_bl.h27
-rw-r--r--include/linux/arch_topology.h49
-rw-r--r--include/linux/args.h28
-rw-r--r--include/linux/arm-cci.h2
-rw-r--r--include/linux/arm-smccc.h369
-rw-r--r--include/linux/arm_ffa.h515
-rw-r--r--include/linux/arm_mpam.h66
-rw-r--r--include/linux/arm_sdei.h4
-rw-r--r--include/linux/array_size.h13
-rw-r--r--include/linux/ascii85.h3
-rw-r--r--include/linux/asn1_decoder.h1
-rw-r--r--include/linux/asn1_encoder.h1
-rw-r--r--include/linux/async.h3
-rw-r--r--include/linux/async_tx.h5
-rw-r--r--include/linux/ata.h157
-rw-r--r--include/linux/ata_platform.h2
-rw-r--r--include/linux/atalk.h2
-rw-r--r--include/linux/ath9k_platform.h51
-rw-r--r--include/linux/atm_tcp.h2
-rw-r--r--include/linux/atmdev.h7
-rw-r--r--include/linux/atmel-mci.h46
-rw-r--r--include/linux/atomic-arch-fallback.h2361
-rw-r--r--include/linux/atomic-fallback.h2595
-rw-r--r--include/linux/atomic.h11
-rw-r--r--include/linux/atomic/atomic-arch-fallback.h4693
-rw-r--r--include/linux/atomic/atomic-instrumented.h5053
-rw-r--r--include/linux/atomic/atomic-long.h1812
-rw-r--r--include/linux/attribute_container.h6
-rw-r--r--include/linux/audit.h82
-rw-r--r--include/linux/audit_arch.h26
-rw-r--r--include/linux/auxiliary_bus.h222
-rw-r--r--include/linux/auxvec.h2
-rw-r--r--include/linux/avf/virtchnl.h1108
-rw-r--r--include/linux/backing-dev-defs.h57
-rw-r--r--include/linux/backing-dev.h148
-rw-r--r--include/linux/backing-file.h44
-rw-r--r--include/linux/backlight.h112
-rw-r--r--include/linux/badblocks.h40
-rw-r--r--include/linux/balloon_compaction.h116
-rw-r--r--include/linux/base64.h22
-rw-r--r--include/linux/bcd.h4
-rw-r--r--include/linux/bcm47xx_nvram.h7
-rw-r--r--include/linux/bcm47xx_sprom.h2
-rw-r--r--include/linux/bcm963xx_nvram.h16
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h8
-rw-r--r--include/linux/bcma/bcma_driver_pci.h2
-rw-r--r--include/linux/binfmts.h42
-rw-r--r--include/linux/bio-integrity.h148
-rw-r--r--include/linux/bio.h523
-rw-r--r--include/linux/bit_spinlock.h8
-rw-r--r--include/linux/bitfield.h171
-rw-r--r--include/linux/bitmap-str.h18
-rw-r--r--include/linux/bitmap.h549
-rw-r--r--include/linux/bitops.h224
-rw-r--r--include/linux/bits.h85
-rw-r--r--include/linux/blk-cgroup.h650
-rw-r--r--include/linux/blk-crypto-profile.h228
-rw-r--r--include/linux/blk-crypto.h89
-rw-r--r--include/linux/blk-integrity.h183
-rw-r--r--include/linux/blk-mq-dma.h76
-rw-r--r--include/linux/blk-mq-pci.h11
-rw-r--r--include/linux/blk-mq-rdma.h11
-rw-r--r--include/linux/blk-mq-virtio.h11
-rw-r--r--include/linux/blk-mq.h826
-rw-r--r--include/linux/blk-pm.h3
-rw-r--r--include/linux/blk_types.h406
-rw-r--r--include/linux/blkdev.h2327
-rw-r--r--include/linux/blktrace_api.h30
-rw-r--r--include/linux/bma150.h4
-rw-r--r--include/linux/bnxt/hsi.h11166
-rw-r--r--include/linux/bootconfig.h109
-rw-r--r--include/linux/bootmem_info.h94
-rw-r--r--include/linux/bottom_half.h1
-rw-r--r--include/linux/bpf-cgroup-defs.h85
-rw-r--r--include/linux/bpf-cgroup.h382
-rw-r--r--include/linux/bpf-netns.h8
-rw-r--r--include/linux/bpf.h2559
-rw-r--r--include/linux/bpf_crypto.h24
-rw-r--r--include/linux/bpf_local_storage.h81
-rw-r--r--include/linux/bpf_lsm.h39
-rw-r--r--include/linux/bpf_mem_alloc.h51
-rw-r--r--include/linux/bpf_mprog.h343
-rw-r--r--include/linux/bpf_types.h30
-rw-r--r--include/linux/bpf_verifier.h762
-rw-r--r--include/linux/bpfilter.h25
-rw-r--r--include/linux/bpfptr.h89
-rw-r--r--include/linux/brcmphy.h185
-rw-r--r--include/linux/bsg-lib.h5
-rw-r--r--include/linux/bsg.h38
-rw-r--r--include/linux/btf.h464
-rw-r--r--include/linux/btf_ids.h125
-rw-r--r--include/linux/buffer_head.h345
-rw-r--r--include/linux/bug.h18
-rw-r--r--include/linux/build_bug.h19
-rw-r--r--include/linux/buildid.h40
-rw-r--r--include/linux/bus/stm32_firewall_device.h145
-rw-r--r--include/linux/bvec.h132
-rw-r--r--include/linux/byteorder/generic.h36
-rw-r--r--include/linux/cache.h108
-rw-r--r--include/linux/cache_coherency.h61
-rw-r--r--include/linux/cacheflush.h29
-rw-r--r--include/linux/cacheinfo.h83
-rw-r--r--include/linux/call_once.h66
-rw-r--r--include/linux/can/bittiming.h233
-rw-r--r--include/linux/can/dev.h131
-rw-r--r--include/linux/can/dev/peak_canfd.h4
-rw-r--r--include/linux/can/led.h51
-rw-r--r--include/linux/can/length.h302
-rw-r--r--include/linux/can/platform/flexcan.h23
-rw-r--r--include/linux/can/platform/sja1000.h2
-rw-r--r--include/linux/can/rx-offload.h23
-rw-r--r--include/linux/can/skb.h99
-rw-r--r--include/linux/capability.h139
-rw-r--r--include/linux/cc_platform.h135
-rw-r--r--include/linux/cdrom.h23
-rw-r--r--include/linux/cdx/bitfield.h90
-rw-r--r--include/linux/cdx/cdx_bus.h291
-rw-r--r--include/linux/cdx/edac_cdx_pcol.h28
-rw-r--r--include/linux/cdx/mcdi.h199
-rw-r--r--include/linux/ceph/auth.h4
-rw-r--r--include/linux/ceph/ceph_debug.h42
-rw-r--r--include/linux/ceph/ceph_fs.h58
-rw-r--r--include/linux/ceph/decode.h2
-rw-r--r--include/linux/ceph/libceph.h27
-rw-r--r--include/linux/ceph/mdsmap.h71
-rw-r--r--include/linux/ceph/messenger.h61
-rw-r--r--include/linux/ceph/mon_client.h2
-rw-r--r--include/linux/ceph/osd_client.h136
-rw-r--r--include/linux/ceph/pagelist.h12
-rw-r--r--include/linux/ceph/rados.h4
-rw-r--r--include/linux/cfag12864b.h17
-rw-r--r--include/linux/cfi.h83
-rw-r--r--include/linux/cfi_types.h68
-rw-r--r--include/linux/cgroup-defs.h372
-rw-r--r--include/linux/cgroup.h285
-rw-r--r--include/linux/cgroup_api.h1
-rw-r--r--include/linux/cgroup_dmem.h66
-rw-r--r--include/linux/cgroup_namespace.h58
-rw-r--r--include/linux/cgroup_refcnt.h96
-rw-r--r--include/linux/cgroup_subsys.h4
-rw-r--r--include/linux/cleancache.h124
-rw-r--r--include/linux/cleanup.h534
-rw-r--r--include/linux/clk-provider.h337
-rw-r--r--include/linux/clk.h299
-rw-r--r--include/linux/clk/at91_pmc.h34
-rw-r--r--include/linux/clk/davinci.h23
-rw-r--r--include/linux/clk/mmp.h18
-rw-r--r--include/linux/clk/pxa.h16
-rw-r--r--include/linux/clk/renesas.h145
-rw-r--r--include/linux/clk/samsung.h32
-rw-r--r--include/linux/clk/spear.h14
-rw-r--r--include/linux/clk/sunxi-ng.h15
-rw-r--r--include/linux/clk/tegra.h124
-rw-r--r--include/linux/clk/ti.h43
-rw-r--r--include/linux/clkdev.h7
-rw-r--r--include/linux/clockchips.h6
-rw-r--r--include/linux/clocksource.h50
-rw-r--r--include/linux/clocksource_ids.h5
-rw-r--r--include/linux/closure.h492
-rw-r--r--include/linux/cm4000_cs.h11
-rw-r--r--include/linux/cma.h46
-rw-r--r--include/linux/cmdline-parser.h46
-rw-r--r--include/linux/cmpxchg-emu.h15
-rw-r--r--include/linux/codetag.h115
-rw-r--r--include/linux/comedi/comedi_8254.h161
-rw-r--r--include/linux/comedi/comedi_8255.h54
-rw-r--r--include/linux/comedi/comedi_isadma.h114
-rw-r--r--include/linux/comedi/comedi_pci.h56
-rw-r--r--include/linux/comedi/comedi_pcmcia.h48
-rw-r--r--include/linux/comedi/comedi_usb.h41
-rw-r--r--include/linux/comedi/comedidev.h1054
-rw-r--r--include/linux/comedi/comedilib.h56
-rw-r--r--include/linux/compaction.h122
-rw-r--r--include/linux/compat.h229
-rw-r--r--include/linux/compiler-clang.h129
-rw-r--r--include/linux/compiler-gcc.h108
-rw-r--r--include/linux/compiler-intel.h34
-rw-r--r--include/linux/compiler-version.h30
-rw-r--r--include/linux/compiler.h248
-rw-r--r--include/linux/compiler_attributes.h187
-rw-r--r--include/linux/compiler_types.h404
-rw-r--r--include/linux/completion.h2
-rw-r--r--include/linux/component.h28
-rw-r--r--include/linux/configfs.h18
-rw-r--r--include/linux/connector.h7
-rw-r--r--include/linux/console.h693
-rw-r--r--include/linux/console_struct.h14
-rw-r--r--include/linux/consolemap.h84
-rw-r--r--include/linux/container.h2
-rw-r--r--include/linux/container_of.h41
-rw-r--r--include/linux/context_tracking.h139
-rw-r--r--include/linux/context_tracking_irq.h21
-rw-r--r--include/linux/context_tracking_state.h153
-rw-r--r--include/linux/coredump.h59
-rw-r--r--include/linux/coresight-pmu.h45
-rw-r--r--include/linux/coresight.h442
-rw-r--r--include/linux/counter.h884
-rw-r--r--include/linux/counter_enum.h45
-rw-r--r--include/linux/cper.h52
-rw-r--r--include/linux/cpu.h148
-rw-r--r--include/linux/cpu_cooling.h1
-rw-r--r--include/linux/cpu_rmap.h7
-rw-r--r--include/linux/cpu_smt.h33
-rw-r--r--include/linux/cpufreq.h445
-rw-r--r--include/linux/cpuhotplug.h210
-rw-r--r--include/linux/cpuhplock.h49
-rw-r--r--include/linux/cpuidle.h58
-rw-r--r--include/linux/cpumask.h1058
-rw-r--r--include/linux/cpumask_api.h1
-rw-r--r--include/linux/cpumask_types.h66
-rw-r--r--include/linux/cpuset.h99
-rw-r--r--include/linux/crash_core.h160
-rw-r--r--include/linux/crash_dump.h103
-rw-r--r--include/linux/crash_reserve.h66
-rw-r--r--include/linux/crc-ccitt.h7
-rw-r--r--include/linux/crc-itu-t.h2
-rw-r--r--include/linux/crc-t10dif.h12
-rw-r--r--include/linux/crc16.h9
-rw-r--r--include/linux/crc32.h123
-rw-r--r--include/linux/crc32c.h8
-rw-r--r--include/linux/crc32poly.h16
-rw-r--r--include/linux/crc64.h25
-rw-r--r--include/linux/crc7.h7
-rw-r--r--include/linux/cred.h154
-rw-r--r--include/linux/crypto.h444
-rw-r--r--include/linux/cuda.h2
-rw-r--r--include/linux/damon.h975
-rw-r--r--include/linux/dax.h268
-rw-r--r--include/linux/dcache.h304
-rw-r--r--include/linux/dccp.h291
-rw-r--r--include/linux/debug_locks.h7
-rw-r--r--include/linux/debugfs.h169
-rw-r--r--include/linux/debugobjects.h14
-rw-r--r--include/linux/decompress/mm.h14
-rw-r--r--include/linux/decompress/unxz.h5
-rw-r--r--include/linux/delay.h94
-rw-r--r--include/linux/delayacct.h199
-rw-r--r--include/linux/dev_printk.h104
-rw-r--r--include/linux/devcoredump.h58
-rw-r--r--include/linux/devfreq-governor.h102
-rw-r--r--include/linux/devfreq.h43
-rw-r--r--include/linux/device-mapper.h212
-rw-r--r--include/linux/device.h665
-rw-r--r--include/linux/device/bus.h145
-rw-r--r--include/linux/device/class.h153
-rw-r--r--include/linux/device/devres.h189
-rw-r--r--include/linux/device/driver.h49
-rw-r--r--include/linux/device/faux.h69
-rw-r--r--include/linux/device_cgroup.h7
-rw-r--r--include/linux/devm-helpers.h27
-rw-r--r--include/linux/dfl.h11
-rw-r--r--include/linux/dibs.h464
-rw-r--r--include/linux/dim.h121
-rw-r--r--include/linux/dio.h4
-rw-r--r--include/linux/dlm.h58
-rw-r--r--include/linux/dlm_plock.h2
-rw-r--r--include/linux/dm-bufio.h36
-rw-r--r--include/linux/dm-dirty-log.h9
-rw-r--r--include/linux/dm-io.h14
-rw-r--r--include/linux/dm-kcopyd.h24
-rw-r--r--include/linux/dm-region-hash.h9
-rw-r--r--include/linux/dm-verity-loadpin.h27
-rw-r--r--include/linux/dma-buf-map.h266
-rw-r--r--include/linux/dma-buf-mapping.h17
-rw-r--r--include/linux/dma-buf.h239
-rw-r--r--include/linux/dma-buf/heaps/cma.h16
-rw-r--r--include/linux/dma-direct.h54
-rw-r--r--include/linux/dma-fence-array.h43
-rw-r--r--include/linux/dma-fence-chain.h74
-rw-r--r--include/linux/dma-fence-unwrap.h77
-rw-r--r--include/linux/dma-fence.h192
-rw-r--r--include/linux/dma-heap.h25
-rw-r--r--include/linux/dma-iommu.h87
-rw-r--r--include/linux/dma-map-ops.h179
-rw-r--r--include/linux/dma-mapping.h339
-rw-r--r--include/linux/dma-resv.h386
-rw-r--r--include/linux/dma/amd_xdma.h16
-rw-r--r--include/linux/dma/edma.h83
-rw-r--r--include/linux/dma/hsu.h6
-rw-r--r--include/linux/dma/imx-dma.h (renamed from include/linux/platform_data/dma-imx.h)41
-rw-r--r--include/linux/dma/ipu-dma.h174
-rw-r--r--include/linux/dma/k3-udma-glue.h15
-rw-r--r--include/linux/dma/qcom-gpi-dma.h2
-rw-r--r--include/linux/dma/qcom_adm.h12
-rw-r--r--include/linux/dma/ti-cppi5.h1
-rw-r--r--include/linux/dma/xilinx_dpdma.h11
-rw-r--r--include/linux/dmaengine.h117
-rw-r--r--include/linux/dmapool.h29
-rw-r--r--include/linux/dmar.h151
-rw-r--r--include/linux/dnotify.h7
-rw-r--r--include/linux/dpll.h229
-rw-r--r--include/linux/drbd.h7
-rw-r--r--include/linux/drbd_config.h16
-rw-r--r--include/linux/drbd_genl_api.h2
-rw-r--r--include/linux/drbd_limits.h204
-rw-r--r--include/linux/dsa/8021q.h138
-rw-r--r--include/linux/dsa/ksz_common.h53
-rw-r--r--include/linux/dsa/lan9303.h4
-rw-r--r--include/linux/dsa/loop.h1
-rw-r--r--include/linux/dsa/mv88e6xxx.h13
-rw-r--r--include/linux/dsa/ocelot.h112
-rw-r--r--include/linux/dsa/sja1105.h51
-rw-r--r--include/linux/dsa/tag_qca.h87
-rw-r--r--include/linux/dtpm.h42
-rw-r--r--include/linux/dw_apb_timer.h3
-rw-r--r--include/linux/dynamic_debug.h248
-rw-r--r--include/linux/dynamic_queue_limits.h59
-rw-r--r--include/linux/edac.h264
-rw-r--r--include/linux/eeprom_93cx6.h11
-rw-r--r--include/linux/eeprom_93xx46.h29
-rw-r--r--include/linux/efi.h499
-rw-r--r--include/linux/ehl_pse_io_aux.h24
-rw-r--r--include/linux/eisa.h9
-rw-r--r--include/linux/elevator.h179
-rw-r--r--include/linux/elf-fdpic.h14
-rw-r--r--include/linux/elf.h2
-rw-r--r--include/linux/elfcore-compat.h5
-rw-r--r--include/linux/elfcore.h39
-rw-r--r--include/linux/elfnote.h13
-rw-r--r--include/linux/energy_model.h297
-rw-r--r--include/linux/entry-common.h432
-rw-r--r--include/linux/entry-virt.h (renamed from include/linux/entry-kvm.h)37
-rw-r--r--include/linux/err.h68
-rw-r--r--include/linux/errno.h1
-rw-r--r--include/linux/error-injection.h3
-rw-r--r--include/linux/etherdevice.h149
-rw-r--r--include/linux/ethtool.h895
-rw-r--r--include/linux/ethtool_netlink.h87
-rw-r--r--include/linux/eventfd.h27
-rw-r--r--include/linux/eventpoll.h24
-rw-r--r--include/linux/evm.h74
-rw-r--r--include/linux/execmem.h207
-rw-r--r--include/linux/export-internal.h72
-rw-r--r--include/linux/export.h185
-rw-r--r--include/linux/exportfs.h147
-rw-r--r--include/linux/extcon.h16
-rw-r--r--include/linux/f2fs_fs.h174
-rw-r--r--include/linux/falloc.h19
-rw-r--r--include/linux/fanotify.h69
-rw-r--r--include/linux/fault-inject.h73
-rw-r--r--include/linux/fb.h357
-rw-r--r--include/linux/fbcon.h13
-rw-r--r--include/linux/fcntl.h4
-rw-r--r--include/linux/fdtable.h52
-rw-r--r--include/linux/fiemap.h20
-rw-r--r--include/linux/file.h235
-rw-r--r--include/linux/file_ref.h218
-rw-r--r--include/linux/fileattr.h40
-rw-r--r--include/linux/filelock.h584
-rw-r--r--include/linux/filter.h686
-rw-r--r--include/linux/find.h697
-rw-r--r--include/linux/firewire.h210
-rw-r--r--include/linux/firmware.h131
-rw-r--r--include/linux/firmware/cirrus/cs_dsp.h357
-rw-r--r--include/linux/firmware/cirrus/cs_dsp_test_utils.h159
-rw-r--r--include/linux/firmware/cirrus/wmfw.h203
-rw-r--r--include/linux/firmware/imx/dsp.h6
-rw-r--r--include/linux/firmware/imx/s4.h20
-rw-r--r--include/linux/firmware/imx/sci.h16
-rw-r--r--include/linux/firmware/imx/sm.h97
-rw-r--r--include/linux/firmware/imx/svc/rm.h5
-rw-r--r--include/linux/firmware/intel/stratix10-smc.h332
-rw-r--r--include/linux/firmware/intel/stratix10-svc-client.h166
-rw-r--r--include/linux/firmware/mediatek/mtk-adsp-ipc.h59
-rw-r--r--include/linux/firmware/meson/meson_sm.h2
-rw-r--r--include/linux/firmware/qcom/qcom_qseecom.h54
-rw-r--r--include/linux/firmware/qcom/qcom_scm.h184
-rw-r--r--include/linux/firmware/qcom/qcom_tzmem.h80
-rw-r--r--include/linux/firmware/samsung/exynos-acpm-protocol.h70
-rw-r--r--include/linux/firmware/thead/thead,th1520-aon.h200
-rw-r--r--include/linux/firmware/trusted_foundations.h8
-rw-r--r--include/linux/firmware/xlnx-event-manager.h46
-rw-r--r--include/linux/firmware/xlnx-zynqmp-ufs.h38
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h401
-rw-r--r--include/linux/fixp-arith.h1
-rw-r--r--include/linux/flex_proportions.h41
-rw-r--r--include/linux/folio_queue.h282
-rw-r--r--include/linux/font.h7
-rw-r--r--include/linux/fortify-string.h799
-rw-r--r--include/linux/fpga/adi-axi-common.h23
-rw-r--r--include/linux/fpga/altera-pr-ip-core.h1
-rw-r--r--include/linux/fpga/fpga-bridge.h36
-rw-r--r--include/linux/fpga/fpga-mgr.h104
-rw-r--r--include/linux/fpga/fpga-region.h49
-rw-r--r--include/linux/fprobe.h156
-rw-r--r--include/linux/fpu.h12
-rw-r--r--include/linux/framer/framer-provider.h193
-rw-r--r--include/linux/framer/framer.h205
-rw-r--r--include/linux/framer/pef2256.h31
-rw-r--r--include/linux/freelist.h129
-rw-r--r--include/linux/freezer.h259
-rw-r--r--include/linux/frontswap.h122
-rw-r--r--include/linux/fs.h3144
-rw-r--r--include/linux/fs/super.h238
-rw-r--r--include/linux/fs/super_types.h336
-rw-r--r--include/linux/fs_api.h1
-rw-r--r--include/linux/fs_context.h63
-rw-r--r--include/linux/fs_dirent.h (renamed from include/linux/fs_types.h)11
-rw-r--r--include/linux/fs_enet_pd.h165
-rw-r--r--include/linux/fs_parser.h25
-rw-r--r--include/linux/fs_stack.h10
-rw-r--r--include/linux/fs_struct.h17
-rw-r--r--include/linux/fs_uart_pd.h71
-rw-r--r--include/linux/fscache-cache.h627
-rw-r--r--include/linux/fscache.h1020
-rw-r--r--include/linux/fscrypt.h396
-rw-r--r--include/linux/fsi-occ.h2
-rw-r--r--include/linux/fsi.h4
-rw-r--r--include/linux/fsl/enetc_mdio.h24
-rw-r--r--include/linux/fsl/mc.h77
-rw-r--r--include/linux/fsl/netc_global.h19
-rw-r--r--include/linux/fsl/ntmp.h121
-rw-r--r--include/linux/fsl/ptp_qoriq.h11
-rw-r--r--include/linux/fsl_devices.h1
-rw-r--r--include/linux/fsnotify.h292
-rw-r--r--include/linux/fsnotify_backend.h567
-rw-r--r--include/linux/fsverity.h177
-rw-r--r--include/linux/ftrace.h576
-rw-r--r--include/linux/ftrace_irq.h13
-rw-r--r--include/linux/ftrace_regs.h43
-rw-r--r--include/linux/futex.h31
-rw-r--r--include/linux/fw_table.h61
-rw-r--r--include/linux/fwctl.h135
-rw-r--r--include/linux/fwnode.h73
-rw-r--r--include/linux/fwnode_mdio.h35
-rw-r--r--include/linux/gameport.h17
-rw-r--r--include/linux/gcd.h3
-rw-r--r--include/linux/generic-radix-tree.h195
-rw-r--r--include/linux/generic_pt/common.h191
-rw-r--r--include/linux/generic_pt/iommu.h293
-rw-r--r--include/linux/genetlink.h42
-rw-r--r--include/linux/genhd.h321
-rw-r--r--include/linux/genl_magic_func.h28
-rw-r--r--include/linux/genl_magic_struct.h10
-rw-r--r--include/linux/gfp.h630
-rw-r--r--include/linux/gfp_api.h1
-rw-r--r--include/linux/gfp_types.h386
-rw-r--r--include/linux/goldfish.h15
-rw-r--r--include/linux/gpio.h186
-rw-r--r--include/linux/gpio/aspeed.h4
-rw-r--r--include/linux/gpio/consumer.h223
-rw-r--r--include/linux/gpio/driver.h521
-rw-r--r--include/linux/gpio/forwarder.h41
-rw-r--r--include/linux/gpio/generic.h190
-rw-r--r--include/linux/gpio/gpio-nomadik.h292
-rw-r--r--include/linux/gpio/gpio-reg.h4
-rw-r--r--include/linux/gpio/machine.h16
-rw-r--r--include/linux/gpio/property.h14
-rw-r--r--include/linux/gpio/regmap.h33
-rw-r--r--include/linux/gpio_keys.h2
-rw-r--r--include/linux/greybus.h46
-rw-r--r--include/linux/greybus/greybus_manifest.h4
-rw-r--r--include/linux/greybus/greybus_protocols.h8
-rw-r--r--include/linux/greybus/hd.h2
-rw-r--r--include/linux/greybus/module.h2
-rw-r--r--include/linux/greybus/svc.h3
-rw-r--r--include/linux/group_cpus.h14
-rw-r--r--include/linux/habanalabs/cpucp_if.h1437
-rw-r--r--include/linux/habanalabs/hl_boot_if.h807
-rw-r--r--include/linux/hardirq.h14
-rw-r--r--include/linux/hash.h5
-rw-r--r--include/linux/hashtable_api.h1
-rw-r--r--include/linux/hdlc.h4
-rw-r--r--include/linux/hdlcdrv.h2
-rw-r--r--include/linux/hdmi.h21
-rw-r--r--include/linux/hex.h35
-rw-r--r--include/linux/hfs_common.h653
-rw-r--r--include/linux/hid-over-i2c.h117
-rw-r--r--include/linux/hid-over-spi.h155
-rw-r--r--include/linux/hid-roccat.h2
-rw-r--r--include/linux/hid-sensor-hub.h19
-rw-r--r--include/linux/hid-sensor-ids.h7
-rw-r--r--include/linux/hid.h351
-rw-r--r--include/linux/hid_bpf.h236
-rw-r--r--include/linux/hidraw.h1
-rw-r--r--include/linux/highmem-internal.h145
-rw-r--r--include/linux/highmem.h541
-rw-r--r--include/linux/hippidevice.h4
-rw-r--r--include/linux/hisi_acc_qm.h604
-rw-r--r--include/linux/hmm-dma.h33
-rw-r--r--include/linux/hmm.h39
-rw-r--r--include/linux/host1x.h195
-rw-r--r--include/linux/host1x_context_bus.h15
-rw-r--r--include/linux/hp_sdc.h2
-rw-r--r--include/linux/hpet.h2
-rw-r--r--include/linux/hrtimer.h249
-rw-r--r--include/linux/hrtimer_api.h1
-rw-r--r--include/linux/hrtimer_defs.h103
-rw-r--r--include/linux/hrtimer_types.h50
-rw-r--r--include/linux/hsi/ssi_protocol.h1
-rw-r--r--include/linux/htcpld.h25
-rw-r--r--include/linux/hte.h271
-rw-r--r--include/linux/huge_mm.h782
-rw-r--r--include/linux/hugetlb.h811
-rw-r--r--include/linux/hugetlb_cgroup.h126
-rw-r--r--include/linux/hugetlb_inline.h15
-rw-r--r--include/linux/hung_task.h101
-rw-r--r--include/linux/hw_bitfield.h62
-rw-r--r--include/linux/hw_breakpoint.h17
-rw-r--r--include/linux/hw_random.h11
-rw-r--r--include/linux/hwmon-sysfs.h1
-rw-r--r--include/linux/hwmon.h58
-rw-r--r--include/linux/hwspinlock.h18
-rw-r--r--include/linux/hyperv.h351
-rw-r--r--include/linux/hypervisor.h11
-rw-r--r--include/linux/i2c-algo-pca.h2
-rw-r--r--include/linux/i2c-atr.h149
-rw-r--r--include/linux/i2c-mux.h3
-rw-r--r--include/linux/i2c-of-prober.h140
-rw-r--r--include/linux/i2c-smbus.h14
-rw-r--r--include/linux/i2c.h231
-rw-r--r--include/linux/i3c/ccc.h6
-rw-r--r--include/linux/i3c/device.h84
-rw-r--r--include/linux/i3c/master.h110
-rw-r--r--include/linux/i8042.h29
-rw-r--r--include/linux/i8253.h2
-rw-r--r--include/linux/i8254.h21
-rw-r--r--include/linux/icmp.h32
-rw-r--r--include/linux/icmpv6.h21
-rw-r--r--include/linux/ide.h1623
-rw-r--r--include/linux/idle_inject.h3
-rw-r--r--include/linux/idr.h42
-rw-r--r--include/linux/ieee80211-eht.h1182
-rw-r--r--include/linux/ieee80211-he.h825
-rw-r--r--include/linux/ieee80211-ht.h292
-rw-r--r--include/linux/ieee80211-mesh.h230
-rw-r--r--include/linux/ieee80211-nan.h35
-rw-r--r--include/linux/ieee80211-p2p.h71
-rw-r--r--include/linux/ieee80211-s1g.h575
-rw-r--r--include/linux/ieee80211-vht.h236
-rw-r--r--include/linux/ieee80211.h2096
-rw-r--r--include/linux/ieee802154.h112
-rw-r--r--include/linux/if_arp.h6
-rw-r--r--include/linux/if_bridge.h70
-rw-r--r--include/linux/if_eql.h2
-rw-r--r--include/linux/if_ether.h3
-rw-r--r--include/linux/if_hsr.h46
-rw-r--r--include/linux/if_ltalk.h8
-rw-r--r--include/linux/if_macvlan.h13
-rw-r--r--include/linux/if_pppol2tp.h2
-rw-r--r--include/linux/if_pppox.h4
-rw-r--r--include/linux/if_rmnet.h36
-rw-r--r--include/linux/if_tap.h11
-rw-r--r--include/linux/if_team.h18
-rw-r--r--include/linux/if_tun.h21
-rw-r--r--include/linux/if_vlan.h172
-rw-r--r--include/linux/igmp.h14
-rw-r--r--include/linux/iio/adc-helpers.h27
-rw-r--r--include/linux/iio/adc/ad_sigma_delta.h94
-rw-r--r--include/linux/iio/adc/adi-axi-adc.h64
-rw-r--r--include/linux/iio/adc/qcom-vadc-common.h44
-rw-r--r--include/linux/iio/afe/rescale.h36
-rw-r--r--include/linux/iio/backend.h270
-rw-r--r--include/linux/iio/buffer-dma.h48
-rw-r--r--include/linux/iio/buffer-dmaengine.h28
-rw-r--r--include/linux/iio/buffer.h47
-rw-r--r--include/linux/iio/buffer_impl.h51
-rw-r--r--include/linux/iio/common/cros_ec_sensors_core.h12
-rw-r--r--include/linux/iio/common/inv_sensors_timestamp.h94
-rw-r--r--include/linux/iio/common/st_sensors.h51
-rw-r--r--include/linux/iio/consumer.h98
-rw-r--r--include/linux/iio/driver.h17
-rw-r--r--include/linux/iio/events.h31
-rw-r--r--include/linux/iio/frequency/adf4350.h2
-rw-r--r--include/linux/iio/gyro/itg3200.h2
-rw-r--r--include/linux/iio/iio-gts-helper.h213
-rw-r--r--include/linux/iio/iio-opaque.h36
-rw-r--r--include/linux/iio/iio.h431
-rw-r--r--include/linux/iio/imu/adis.h235
-rw-r--r--include/linux/iio/kfifo_buf.h8
-rw-r--r--include/linux/iio/sw_device.h3
-rw-r--r--include/linux/iio/sw_trigger.h3
-rw-r--r--include/linux/iio/sysfs.h11
-rw-r--r--include/linux/iio/timer/stm32-lptim-trigger.h9
-rw-r--r--include/linux/iio/timer/stm32-timer-trigger.h6
-rw-r--r--include/linux/iio/trigger.h37
-rw-r--r--include/linux/iio/triggered_buffer.h17
-rw-r--r--include/linux/iio/types.h9
-rw-r--r--include/linux/ima.h167
-rw-r--r--include/linux/in6.h7
-rw-r--r--include/linux/indirect_call_wrapper.h4
-rw-r--r--include/linux/inet.h2
-rw-r--r--include/linux/inet_diag.h21
-rw-r--r--include/linux/inetdevice.h43
-rw-r--r--include/linux/init.h87
-rw-r--r--include/linux/init_task.h8
-rw-r--r--include/linux/initrd.h2
-rw-r--r--include/linux/inotify.h3
-rw-r--r--include/linux/input.h28
-rw-r--r--include/linux/input/as5011.h1
-rw-r--r--include/linux/input/auo-pixcir-ts.h44
-rw-r--r--include/linux/input/cy8ctmg110_pdata.h11
-rw-r--r--include/linux/input/cyttsp.h29
-rw-r--r--include/linux/input/elan-i2c-ids.h5
-rw-r--r--include/linux/input/matrix_keypad.h53
-rw-r--r--include/linux/input/mt.h3
-rw-r--r--include/linux/input/navpoint.h9
-rw-r--r--include/linux/input/touch-overlay.h25
-rw-r--r--include/linux/input/vivaldi-fmap.h27
-rw-r--r--include/linux/instruction_pointer.h13
-rw-r--r--include/linux/instrumentation.h29
-rw-r--r--include/linux/instrumented.h133
-rw-r--r--include/linux/int_log.h (renamed from include/media/dvb_math.h)18
-rw-r--r--include/linux/integrity.h50
-rw-r--r--include/linux/intel-iommu.h830
-rw-r--r--include/linux/intel-ish-client-if.h21
-rw-r--r--include/linux/intel-svm.h42
-rw-r--r--include/linux/intel_dg_nvm_aux.h32
-rw-r--r--include/linux/intel_pmt_features.h157
-rw-r--r--include/linux/intel_rapl.h98
-rw-r--r--include/linux/intel_tcc.h19
-rw-r--r--include/linux/intel_tpmi.h37
-rw-r--r--include/linux/intel_vsec.h239
-rw-r--r--include/linux/interconnect-clk.h26
-rw-r--r--include/linux/interconnect-provider.h48
-rw-r--r--include/linux/interconnect.h56
-rw-r--r--include/linux/interrupt.h224
-rw-r--r--include/linux/interval_tree.h62
-rw-r--r--include/linux/interval_tree_generic.h10
-rw-r--r--include/linux/io-64-nonatomic-hi-lo.h16
-rw-r--r--include/linux/io-64-nonatomic-lo-hi.h16
-rw-r--r--include/linux/io-mapping.h29
-rw-r--r--include/linux/io-pgtable.h109
-rw-r--r--include/linux/io.h54
-rw-r--r--include/linux/io_uring.h28
-rw-r--r--include/linux/io_uring/cmd.h184
-rw-r--r--include/linux/io_uring/net.h18
-rw-r--r--include/linux/io_uring_types.h744
-rw-r--r--include/linux/ioam6.h13
-rw-r--r--include/linux/ioam6_genl.h13
-rw-r--r--include/linux/ioam6_iptunnel.h13
-rw-r--r--include/linux/ioasid.h82
-rw-r--r--include/linux/iocontext.h49
-rw-r--r--include/linux/iomap.h498
-rw-r--r--include/linux/iommu-dma.h64
-rw-r--r--include/linux/iommu.h1351
-rw-r--r--include/linux/iommufd.h400
-rw-r--r--include/linux/iopoll.h190
-rw-r--r--include/linux/ioport.h126
-rw-r--r--include/linux/ioprio.h84
-rw-r--r--include/linux/ioremap.h31
-rw-r--r--include/linux/iosys-map.h511
-rw-r--r--include/linux/iov_iter.h380
-rw-r--r--include/linux/iova.h90
-rw-r--r--include/linux/iova_bitmap.h52
-rw-r--r--include/linux/ip.h21
-rw-r--r--include/linux/ipack.h23
-rw-r--r--include/linux/ipc.h2
-rw-r--r--include/linux/ipc_namespace.h68
-rw-r--r--include/linux/ipmi.h23
-rw-r--r--include/linux/ipmi_smi.h76
-rw-r--r--include/linux/ipv6.h144
-rw-r--r--include/linux/irq-entry-common.h458
-rw-r--r--include/linux/irq.h244
-rw-r--r--include/linux/irq_sim.h17
-rw-r--r--include/linux/irq_work.h16
-rw-r--r--include/linux/irq_work_types.h14
-rw-r--r--include/linux/irqbypass.h46
-rw-r--r--include/linux/irqchip.h30
-rw-r--r--include/linux/irqchip/arm-gic-common.h29
-rw-r--r--include/linux/irqchip/arm-gic-v3-prio.h52
-rw-r--r--include/linux/irqchip/arm-gic-v3.h8
-rw-r--r--include/linux/irqchip/arm-gic-v4.h14
-rw-r--r--include/linux/irqchip/arm-gic-v5.h394
-rw-r--r--include/linux/irqchip/arm-gic.h12
-rw-r--r--include/linux/irqchip/arm-vgic-info.h49
-rw-r--r--include/linux/irqchip/irq-davinci-aintc.h27
-rw-r--r--include/linux/irqchip/irq-davinci-cp-intc.h25
-rw-r--r--include/linux/irqchip/irq-ixp4xx.h12
-rw-r--r--include/linux/irqchip/irq-msi-lib.h28
-rw-r--r--include/linux/irqchip/irq-partition-percpu.h53
-rw-r--r--include/linux/irqchip/irq-renesas-rzv2h.h23
-rw-r--r--include/linux/irqchip/mmp.h7
-rw-r--r--include/linux/irqchip/mxs.h11
-rw-r--r--include/linux/irqchip/riscv-aplic.h145
-rw-r--r--include/linux/irqchip/riscv-imsic.h95
-rw-r--r--include/linux/irqchip/versatile-fpga.h14
-rw-r--r--include/linux/irqdesc.h48
-rw-r--r--include/linux/irqdomain.h837
-rw-r--r--include/linux/irqdomain_defs.h32
-rw-r--r--include/linux/irqflags.h56
-rw-r--r--include/linux/irqflags_types.h22
-rw-r--r--include/linux/irqhandler.h2
-rw-r--r--include/linux/irqnr.h36
-rw-r--r--include/linux/irqreturn.h8
-rw-r--r--include/linux/isa-dma.h14
-rw-r--r--include/linux/isa.h52
-rw-r--r--include/linux/iscsi_ibft.h28
-rw-r--r--include/linux/ism.h67
-rw-r--r--include/linux/iversion.h128
-rw-r--r--include/linux/jbd2.h262
-rw-r--r--include/linux/jhash.h14
-rw-r--r--include/linux/jiffies.h239
-rw-r--r--include/linux/jump_label.h69
-rw-r--r--include/linux/kallsyms.h67
-rw-r--r--include/linux/kasan-enabled.h49
-rw-r--r--include/linux/kasan-tags.h15
-rw-r--r--include/linux/kasan.h503
-rw-r--r--include/linux/kconfig.h6
-rw-r--r--include/linux/kcore.h16
-rw-r--r--include/linux/kcov.h25
-rw-r--r--include/linux/kcsan-checks.h86
-rw-r--r--include/linux/kcsan.h11
-rw-r--r--include/linux/kdb.h51
-rw-r--r--include/linux/kernel-page-flags.h4
-rw-r--r--include/linux/kernel.h446
-rw-r--r--include/linux/kernel_read_file.h33
-rw-r--r--include/linux/kernel_stat.h31
-rw-r--r--include/linux/kernfs.h146
-rw-r--r--include/linux/kexec.h248
-rw-r--r--include/linux/kexec_handover.h143
-rw-r--r--include/linux/key-type.h10
-rw-r--r--include/linux/key.h22
-rw-r--r--include/linux/keyslot-manager.h120
-rw-r--r--include/linux/kfence.h67
-rw-r--r--include/linux/kfifo.h187
-rw-r--r--include/linux/kgdb.h22
-rw-r--r--include/linux/kho/abi/luo.h166
-rw-r--r--include/linux/kho/abi/memfd.h77
-rw-r--r--include/linux/khugepaged.h78
-rw-r--r--include/linux/kmemleak.h16
-rw-r--r--include/linux/kmod.h3
-rw-r--r--include/linux/kmsan-checks.h98
-rw-r--r--include/linux/kmsan.h411
-rw-r--r--include/linux/kmsan_string.h21
-rw-r--r--include/linux/kmsan_types.h37
-rw-r--r--include/linux/kmsg_dump.h22
-rw-r--r--include/linux/kobject.h107
-rw-r--r--include/linux/kobject_api.h1
-rw-r--r--include/linux/kobject_ns.h6
-rw-r--r--include/linux/kprobes.h221
-rw-r--r--include/linux/kref.h48
-rw-r--r--include/linux/kref_api.h1
-rw-r--r--include/linux/ksm.h110
-rw-r--r--include/linux/kstack_erase.h89
-rw-r--r--include/linux/kstrtox.h151
-rw-r--r--include/linux/kthread.h117
-rw-r--r--include/linux/ktime.h13
-rw-r--r--include/linux/ktime_api.h1
-rw-r--r--include/linux/kvm_dirty_ring.h57
-rw-r--r--include/linux/kvm_host.h1675
-rw-r--r--include/linux/kvm_irqfd.h7
-rw-r--r--include/linux/kvm_types.h83
-rw-r--r--include/linux/lapb.h5
-rw-r--r--include/linux/latencytop.h3
-rw-r--r--include/linux/lcd.h56
-rw-r--r--include/linux/leafops.h619
-rw-r--r--include/linux/led-class-flash.h42
-rw-r--r--include/linux/led-class-multicolor.h29
-rw-r--r--include/linux/leds-expresswire.h38
-rw-r--r--include/linux/leds.h255
-rw-r--r--include/linux/libata.h697
-rw-r--r--include/linux/libgcc.h11
-rw-r--r--include/linux/libnvdimm.h55
-rw-r--r--include/linux/libps2.h62
-rw-r--r--include/linux/lightnvm.h697
-rw-r--r--include/linux/limits.h3
-rw-r--r--include/linux/linear_range.h13
-rw-r--r--include/linux/linkage.h72
-rw-r--r--include/linux/linkmode.h42
-rw-r--r--include/linux/linux_logo.h3
-rw-r--r--include/linux/list.h233
-rw-r--r--include/linux/list_lru.h163
-rw-r--r--include/linux/list_nulls.h1
-rw-r--r--include/linux/litex.h103
-rw-r--r--include/linux/livepatch.h34
-rw-r--r--include/linux/livepatch_external.h76
-rw-r--r--include/linux/livepatch_helpers.h77
-rw-r--r--include/linux/livepatch_sched.h25
-rw-r--r--include/linux/liveupdate.h138
-rw-r--r--include/linux/llist.h75
-rw-r--r--include/linux/llist_api.h1
-rw-r--r--include/linux/local_lock.h65
-rw-r--r--include/linux/local_lock_internal.h250
-rw-r--r--include/linux/lockd/bind.h5
-rw-r--r--include/linux/lockd/lockd.h77
-rw-r--r--include/linux/lockd/xdr.h34
-rw-r--r--include/linux/lockd/xdr4.h30
-rw-r--r--include/linux/lockdep.h213
-rw-r--r--include/linux/lockdep_api.h1
-rw-r--r--include/linux/lockdep_types.h75
-rw-r--r--include/linux/lockref.h30
-rw-r--r--include/linux/log2.h20
-rw-r--r--include/linux/logic_iomem.h62
-rw-r--r--include/linux/logic_pio.h9
-rw-r--r--include/linux/lru_cache.h15
-rw-r--r--include/linux/lsm/apparmor.h17
-rw-r--r--include/linux/lsm/bpf.h16
-rw-r--r--include/linux/lsm/selinux.h16
-rw-r--r--include/linux/lsm/smack.h17
-rw-r--r--include/linux/lsm_audit.h36
-rw-r--r--include/linux/lsm_count.h135
-rw-r--r--include/linux/lsm_hook_defs.h193
-rw-r--r--include/linux/lsm_hooks.h1710
-rw-r--r--include/linux/lwq.h124
-rw-r--r--include/linux/lz4.h6
-rw-r--r--include/linux/lzo.h8
-rw-r--r--include/linux/mISDNif.h3
-rw-r--r--include/linux/mailbox/exynos-message.h19
-rw-r--r--include/linux/mailbox/mchp-ipc.h33
-rw-r--r--include/linux/mailbox/mtk-cmdq-mailbox.h29
-rw-r--r--include/linux/mailbox/riscv-rpmi-message.h243
-rw-r--r--include/linux/mailbox/zynqmp-ipi-message.h2
-rw-r--r--include/linux/mailbox_client.h3
-rw-r--r--include/linux/mailbox_controller.h13
-rw-r--r--include/linux/maple.h3
-rw-r--r--include/linux/maple_tree.h903
-rw-r--r--include/linux/marvell_phy.h11
-rw-r--r--include/linux/math.h80
-rw-r--r--include/linux/math64.h157
-rw-r--r--include/linux/max17040_battery.h16
-rw-r--r--include/linux/mbcache.h42
-rw-r--r--include/linux/mc146818rtc.h9
-rw-r--r--include/linux/mc33xs2410.h16
-rw-r--r--include/linux/mcb.h13
-rw-r--r--include/linux/mdev.h175
-rw-r--r--include/linux/mdio-bitbang.h6
-rw-r--r--include/linux/mdio.h424
-rw-r--r--include/linux/mdio/mdio-i2c.h10
-rw-r--r--include/linux/mdio/mdio-mscc-miim.h19
-rw-r--r--include/linux/mdio/mdio-regmap.h26
-rw-r--r--include/linux/mdio/mdio-xgene.h4
-rw-r--r--include/linux/mei_aux.h31
-rw-r--r--include/linux/mei_cl_bus.h36
-rw-r--r--include/linux/mem_encrypt.h27
-rw-r--r--include/linux/memblock.h193
-rw-r--r--include/linux/memcontrol.h1493
-rw-r--r--include/linux/memfd.h24
-rw-r--r--include/linux/memory-failure.h17
-rw-r--r--include/linux/memory-tiers.h155
-rw-r--r--include/linux/memory.h164
-rw-r--r--include/linux/memory/ti-aemif.h32
-rw-r--r--include/linux/memory_hotplug.h174
-rw-r--r--include/linux/mempolicy.h93
-rw-r--r--include/linux/mempool.h114
-rw-r--r--include/linux/memregion.h50
-rw-r--r--include/linux/memremap.h182
-rw-r--r--include/linux/memstick.h2
-rw-r--r--include/linux/mfd/88pm80x.h2
-rw-r--r--include/linux/mfd/88pm860x.h6
-rw-r--r--include/linux/mfd/88pm886.h136
-rw-r--r--include/linux/mfd/aat2870.h3
-rw-r--r--include/linux/mfd/abx500/ab8500.h10
-rw-r--r--include/linux/mfd/adp5585.h226
-rw-r--r--include/linux/mfd/arizona/pdata.h6
-rw-r--r--include/linux/mfd/asic3.h313
-rw-r--r--include/linux/mfd/atmel-hlcdc.h10
-rw-r--r--include/linux/mfd/axp20x.h336
-rw-r--r--include/linux/mfd/bcm2835-pm.h1
-rw-r--r--include/linux/mfd/bcm590xx.h28
-rw-r--r--include/linux/mfd/bq257xx.h104
-rw-r--r--include/linux/mfd/cgbc.h44
-rw-r--r--include/linux/mfd/core.h26
-rw-r--r--include/linux/mfd/cs40l50.h137
-rw-r--r--include/linux/mfd/cs42l43-regs.h1184
-rw-r--r--include/linux/mfd/cs42l43.h103
-rw-r--r--include/linux/mfd/da9052/da9052.h2
-rw-r--r--include/linux/mfd/da9055/pdata.h13
-rw-r--r--include/linux/mfd/da9063/core.h2
-rw-r--r--include/linux/mfd/da9063/registers.h23
-rw-r--r--include/linux/mfd/davinci_voicecodec.h8
-rw-r--r--include/linux/mfd/dbx500-prcmu.h54
-rw-r--r--include/linux/mfd/dm355evm_msp.h79
-rw-r--r--include/linux/mfd/ds1wm.h29
-rw-r--r--include/linux/mfd/ezx-pcap.h1
-rw-r--r--include/linux/mfd/hi6421-spmi-pmic.h30
-rw-r--r--include/linux/mfd/hi655x-pmic.h7
-rw-r--r--include/linux/mfd/htc-pasic3.h54
-rw-r--r--include/linux/mfd/idt82p33_reg.h115
-rw-r--r--include/linux/mfd/idt8a340_reg.h768
-rw-r--r--include/linux/mfd/idtRC38xxx_reg.h273
-rw-r--r--include/linux/mfd/intel-m10-bmc.h231
-rw-r--r--include/linux/mfd/intel_soc_pmic.h9
-rw-r--r--include/linux/mfd/ipaq-micro.h4
-rw-r--r--include/linux/mfd/iqs62x.h7
-rw-r--r--include/linux/mfd/lm3533.h5
-rw-r--r--include/linux/mfd/loongson-se.h53
-rw-r--r--include/linux/mfd/lp3943.h1
-rw-r--r--include/linux/mfd/lp873x.h10
-rw-r--r--include/linux/mfd/lp87565.h40
-rw-r--r--include/linux/mfd/lp8788.h45
-rw-r--r--include/linux/mfd/lpc_ich.h9
-rw-r--r--include/linux/mfd/macsmc.h280
-rw-r--r--include/linux/mfd/madera/pdata.h5
-rw-r--r--include/linux/mfd/max14577-private.h2
-rw-r--r--include/linux/mfd/max14577.h2
-rw-r--r--include/linux/mfd/max5970.h84
-rw-r--r--include/linux/mfd/max7360.h109
-rw-r--r--include/linux/mfd/max77541.h91
-rw-r--r--include/linux/mfd/max77686-private.h34
-rw-r--r--include/linux/mfd/max77686.h2
-rw-r--r--include/linux/mfd/max77693-common.h4
-rw-r--r--include/linux/mfd/max77693-private.h20
-rw-r--r--include/linux/mfd/max77693.h2
-rw-r--r--include/linux/mfd/max77705-private.h195
-rw-r--r--include/linux/mfd/max77714.h60
-rw-r--r--include/linux/mfd/max77759.h165
-rw-r--r--include/linux/mfd/max77843-private.h2
-rw-r--r--include/linux/mfd/max8997-private.h3
-rw-r--r--include/linux/mfd/max8997.h6
-rw-r--r--include/linux/mfd/max8998-private.h2
-rw-r--r--include/linux/mfd/max8998.h8
-rw-r--r--include/linux/mfd/mc13xxx.h6
-rw-r--r--include/linux/mfd/mt6328/core.h53
-rw-r--r--include/linux/mfd/mt6328/registers.h822
-rw-r--r--include/linux/mfd/mt6331/core.h40
-rw-r--r--include/linux/mfd/mt6331/registers.h584
-rw-r--r--include/linux/mfd/mt6332/core.h65
-rw-r--r--include/linux/mfd/mt6332/registers.h642
-rw-r--r--include/linux/mfd/mt6357/core.h119
-rw-r--r--include/linux/mfd/mt6357/registers.h1574
-rw-r--r--include/linux/mfd/mt6358/core.h8
-rw-r--r--include/linux/mfd/mt6358/registers.h32
-rw-r--r--include/linux/mfd/mt6359/core.h133
-rw-r--r--include/linux/mfd/mt6359/registers.h531
-rw-r--r--include/linux/mfd/mt6359p/registers.h249
-rw-r--r--include/linux/mfd/mt6360.h240
-rw-r--r--include/linux/mfd/mt6397/core.h16
-rw-r--r--include/linux/mfd/mt6397/rtc.h6
-rw-r--r--include/linux/mfd/nct6694.h102
-rw-r--r--include/linux/mfd/ntxec.h4
-rw-r--r--include/linux/mfd/ocelot.h62
-rw-r--r--include/linux/mfd/palmas.h13
-rw-r--r--include/linux/mfd/pcf50633/adc.h69
-rw-r--r--include/linux/mfd/pcf50633/backlight.h42
-rw-r--r--include/linux/mfd/pcf50633/core.h234
-rw-r--r--include/linux/mfd/pcf50633/gpio.h48
-rw-r--r--include/linux/mfd/pcf50633/mbc.h130
-rw-r--r--include/linux/mfd/pcf50633/pmic.h68
-rw-r--r--include/linux/mfd/pf1550.h273
-rw-r--r--include/linux/mfd/qnap-mcu.h28
-rw-r--r--include/linux/mfd/rk808.h736
-rw-r--r--include/linux/mfd/rn5t618.h9
-rw-r--r--include/linux/mfd/rohm-bd70528.h391
-rw-r--r--include/linux/mfd/rohm-bd71828.h78
-rw-r--r--include/linux/mfd/rohm-bd96801.h217
-rw-r--r--include/linux/mfd/rohm-bd96802.h74
-rw-r--r--include/linux/mfd/rohm-generic.h15
-rw-r--r--include/linux/mfd/rsmu.h39
-rw-r--r--include/linux/mfd/rt5033-private.h81
-rw-r--r--include/linux/mfd/rt5033.h25
-rw-r--r--include/linux/mfd/rz-mtu3.h191
-rw-r--r--include/linux/mfd/samsung/core.h44
-rw-r--r--include/linux/mfd/samsung/irq.h203
-rw-r--r--include/linux/mfd/samsung/rtc.h37
-rw-r--r--include/linux/mfd/samsung/s2mpg10.h454
-rw-r--r--include/linux/mfd/samsung/s2mpu05.h183
-rw-r--r--include/linux/mfd/samsung/s5m8763.h90
-rw-r--r--include/linux/mfd/si476x-platform.h2
-rw-r--r--include/linux/mfd/sta2x11-mfd.h506
-rw-r--r--include/linux/mfd/stm32-lptimer.h42
-rw-r--r--include/linux/mfd/stm32-timers.h189
-rw-r--r--include/linux/mfd/stmfx.h2
-rw-r--r--include/linux/mfd/stpmic1.h12
-rw-r--r--include/linux/mfd/sun4i-gpadc.h4
-rw-r--r--include/linux/mfd/sy7636a.h34
-rw-r--r--include/linux/mfd/syscon.h33
-rw-r--r--include/linux/mfd/syscon/atmel-smc.h8
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h6
-rw-r--r--include/linux/mfd/t7l66xb.h30
-rw-r--r--include/linux/mfd/tc6387xb.h20
-rw-r--r--include/linux/mfd/tc6393xb.h56
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h118
-rw-r--r--include/linux/mfd/tmio.h139
-rw-r--r--include/linux/mfd/tps65010.h11
-rw-r--r--include/linux/mfd/tps65086.h33
-rw-r--r--include/linux/mfd/tps65217.h10
-rw-r--r--include/linux/mfd/tps65218.h12
-rw-r--r--include/linux/mfd/tps65219.h449
-rw-r--r--include/linux/mfd/tps65910.h2
-rw-r--r--include/linux/mfd/tps65912.h11
-rw-r--r--include/linux/mfd/tps6594.h1346
-rw-r--r--include/linux/mfd/tps68470.h11
-rw-r--r--include/linux/mfd/tps80031.h637
-rw-r--r--include/linux/mfd/twl.h84
-rw-r--r--include/linux/mfd/twl6040.h32
-rw-r--r--include/linux/mfd/ucb1x00.h1
-rw-r--r--include/linux/mfd/upboard-fpga.h55
-rw-r--r--include/linux/mfd/wcd934x/registers.h57
-rw-r--r--include/linux/mfd/wl1273-core.h277
-rw-r--r--include/linux/mfd/wm8350/core.h10
-rw-r--r--include/linux/mhi.h125
-rw-r--r--include/linux/mhi_ep.h305
-rw-r--r--include/linux/micrel_phy.h29
-rw-r--r--include/linux/migrate.h182
-rw-r--r--include/linux/migrate_mode.h19
-rw-r--r--include/linux/mii.h94
-rw-r--r--include/linux/mii_timestamper.h15
-rw-r--r--include/linux/min_heap.h467
-rw-r--r--include/linux/minmax.h284
-rw-r--r--include/linux/misc/keba.h72
-rw-r--r--include/linux/misc_cgroup.h50
-rw-r--r--include/linux/miscdevice.h16
-rw-r--r--include/linux/mlx4/device.h26
-rw-r--r--include/linux/mlx4/driver.h64
-rw-r--r--include/linux/mlx4/qp.h2
-rw-r--r--include/linux/mlx5/accel.h156
-rw-r--r--include/linux/mlx5/cq.h11
-rw-r--r--include/linux/mlx5/device.h344
-rw-r--r--include/linux/mlx5/driver.h568
-rw-r--r--include/linux/mlx5/eq.h4
-rw-r--r--include/linux/mlx5/eswitch.h69
-rw-r--r--include/linux/mlx5/fs.h140
-rw-r--r--include/linux/mlx5/fs_helpers.h48
-rw-r--r--include/linux/mlx5/macsec.h32
-rw-r--r--include/linux/mlx5/mlx5_ifc.h3209
-rw-r--r--include/linux/mlx5/mlx5_ifc_fpga.h235
-rw-r--r--include/linux/mlx5/mlx5_ifc_vdpa.h70
-rw-r--r--include/linux/mlx5/mpfs.h18
-rw-r--r--include/linux/mlx5/port.h85
-rw-r--r--include/linux/mlx5/qp.h55
-rw-r--r--include/linux/mlx5/transobj.h1
-rw-r--r--include/linux/mlx5/vport.h8
-rw-r--r--include/linux/mm.h3721
-rw-r--r--include/linux/mm_api.h1
-rw-r--r--include/linux/mm_inline.h639
-rw-r--r--include/linux/mm_types.h1653
-rw-r--r--include/linux/mm_types_task.h56
-rw-r--r--include/linux/mman.h85
-rw-r--r--include/linux/mmap_lock.h322
-rw-r--r--include/linux/mmc/card.h83
-rw-r--r--include/linux/mmc/core.h41
-rw-r--r--include/linux/mmc/host.h189
-rw-r--r--include/linux/mmc/mmc.h18
-rw-r--r--include/linux/mmc/sd.h8
-rw-r--r--include/linux/mmc/sd_uhs2.h240
-rw-r--r--include/linux/mmc/sdhci-pci-data.h18
-rw-r--r--include/linux/mmc/sdio.h5
-rw-r--r--include/linux/mmc/sdio_func.h5
-rw-r--r--include/linux/mmc/sdio_ids.h19
-rw-r--r--include/linux/mmc/slot-gpio.h10
-rw-r--r--include/linux/mmdebug.h79
-rw-r--r--include/linux/mmu_context.h31
-rw-r--r--include/linux/mmu_notifier.h204
-rw-r--r--include/linux/mmzone.h1181
-rw-r--r--include/linux/mnt_idmapping.h253
-rw-r--r--include/linux/mnt_namespace.h8
-rw-r--r--include/linux/mod_devicetable.h96
-rw-r--r--include/linux/module.h422
-rw-r--r--include/linux/module_symbol.h15
-rw-r--r--include/linux/moduleloader.h48
-rw-r--r--include/linux/moduleparam.h56
-rw-r--r--include/linux/mount.h127
-rw-r--r--include/linux/moxtet.h11
-rw-r--r--include/linux/mpage.h4
-rw-r--r--include/linux/mpi.h194
-rw-r--r--include/linux/mroute.h17
-rw-r--r--include/linux/mroute6.h42
-rw-r--r--include/linux/mroute_base.h28
-rw-r--r--include/linux/msi.h696
-rw-r--r--include/linux/msi_api.h72
-rw-r--r--include/linux/mtd/blktrans.h2
-rw-r--r--include/linux/mtd/cfi.h35
-rw-r--r--include/linux/mtd/flashchip.h1
-rw-r--r--include/linux/mtd/hyperbus.h4
-rw-r--r--include/linux/mtd/jedec.h3
-rw-r--r--include/linux/mtd/latch-addr-flash.h29
-rw-r--r--include/linux/mtd/lpc32xx_mlc.h2
-rw-r--r--include/linux/mtd/lpc32xx_slc.h2
-rw-r--r--include/linux/mtd/map.h14
-rw-r--r--include/linux/mtd/mtd.h32
-rw-r--r--include/linux/mtd/nand-ecc-mtk.h47
-rw-r--r--include/linux/mtd/nand-ecc-mxic.h49
-rw-r--r--include/linux/mtd/nand-qpic-common.h483
-rw-r--r--include/linux/mtd/nand.h149
-rw-r--r--include/linux/mtd/onfi.h42
-rw-r--r--include/linux/mtd/qinfo.h2
-rw-r--r--include/linux/mtd/rawnand.h211
-rw-r--r--include/linux/mtd/spear_smi.h19
-rw-r--r--include/linux/mtd/spi-nor.h53
-rw-r--r--include/linux/mtd/spinand.h360
-rw-r--r--include/linux/mtd/ubi.h4
-rw-r--r--include/linux/mutex.h219
-rw-r--r--include/linux/mutex_api.h1
-rw-r--r--include/linux/mutex_types.h71
-rw-r--r--include/linux/mux/consumer.h41
-rw-r--r--include/linux/mux/driver.h8
-rw-r--r--include/linux/mv643xx.h929
-rw-r--r--include/linux/mv643xx_eth.h2
-rw-r--r--include/linux/n_r3964.h175
-rw-r--r--include/linux/namei.h206
-rw-r--r--include/linux/nd.h86
-rw-r--r--include/linux/net.h91
-rw-r--r--include/linux/net/intel/i40e_client.h25
-rw-r--r--include/linux/net/intel/iidc_rdma.h68
-rw-r--r--include/linux/net/intel/iidc_rdma_ice.h70
-rw-r--r--include/linux/net/intel/iidc_rdma_idpf.h55
-rw-r--r--include/linux/net/intel/libie/adminq.h399
-rw-r--r--include/linux/net/intel/libie/fwlog.h97
-rw-r--r--include/linux/net/intel/libie/pctype.h41
-rw-r--r--include/linux/net/intel/libie/rx.h50
-rw-r--r--include/linux/net_tstamp.h96
-rw-r--r--include/linux/netdev_features.h53
-rw-r--r--include/linux/netdevice.h2715
-rw-r--r--include/linux/netdevice_xmit.h26
-rw-r--r--include/linux/netfilter.h77
-rw-r--r--include/linux/netfilter/ipset/ip_set.h17
-rw-r--r--include/linux/netfilter/nf_conntrack_common.h11
-rw-r--r--include/linux/netfilter/nf_conntrack_dccp.h38
-rw-r--r--include/linux/netfilter/nf_conntrack_h323.h113
-rw-r--r--include/linux/netfilter/nf_conntrack_pptp.h38
-rw-r--r--include/linux/netfilter/nf_conntrack_proto_gre.h1
-rw-r--r--include/linux/netfilter/nf_conntrack_sctp.h1
-rw-r--r--include/linux/netfilter/nf_conntrack_sip.h2
-rw-r--r--include/linux/netfilter/nfnetlink.h2
-rw-r--r--include/linux/netfilter/x_tables.h20
-rw-r--r--include/linux/netfilter_arp/arp_tables.h5
-rw-r--r--include/linux/netfilter_bridge.h6
-rw-r--r--include/linux/netfilter_bridge/ebtables.h11
-rw-r--r--include/linux/netfilter_defs.h8
-rw-r--r--include/linux/netfilter_ingress.h58
-rw-r--r--include/linux/netfilter_ipv4/ip_tables.h6
-rw-r--r--include/linux/netfilter_ipv6.h10
-rw-r--r--include/linux/netfilter_ipv6/ip6_tables.h5
-rw-r--r--include/linux/netfilter_netdev.h151
-rw-r--r--include/linux/netfs.h567
-rw-r--r--include/linux/netlink.h118
-rw-r--r--include/linux/netpoll.h28
-rw-r--r--include/linux/nfs.h30
-rw-r--r--include/linux/nfs4.h335
-rw-r--r--include/linux/nfs_common.h18
-rw-r--r--include/linux/nfs_fs.h266
-rw-r--r--include/linux/nfs_fs_sb.h74
-rw-r--r--include/linux/nfs_iostat.h12
-rw-r--r--include/linux/nfs_page.h95
-rw-r--r--include/linux/nfs_ssc.h4
-rw-r--r--include/linux/nfs_xdr.h199
-rw-r--r--include/linux/nfslocalio.h123
-rw-r--r--include/linux/nl802154.h2
-rw-r--r--include/linux/nls.h2
-rw-r--r--include/linux/nmi.h122
-rw-r--r--include/linux/node.h151
-rw-r--r--include/linux/nodemask.h165
-rw-r--r--include/linux/nodemask_types.h19
-rw-r--r--include/linux/nospec.h4
-rw-r--r--include/linux/notifier.h15
-rw-r--r--include/linux/ns/ns_common_types.h196
-rw-r--r--include/linux/ns/nstree_types.h55
-rw-r--r--include/linux/ns_common.h148
-rw-r--r--include/linux/nsfs.h43
-rw-r--r--include/linux/nsproxy.h22
-rw-r--r--include/linux/nstree.h96
-rw-r--r--include/linux/nubus.h5
-rw-r--r--include/linux/numa.h60
-rw-r--r--include/linux/numa_memblks.h62
-rw-r--r--include/linux/nvme-auth.h51
-rw-r--r--include/linux/nvme-fc-driver.h39
-rw-r--r--include/linux/nvme-keyring.h42
-rw-r--r--include/linux/nvme-rdma.h12
-rw-r--r--include/linux/nvme-tcp.h14
-rw-r--r--include/linux/nvme.h807
-rw-r--r--include/linux/nvmem-consumer.h32
-rw-r--r--include/linux/nvmem-provider.h138
-rw-r--r--include/linux/oa_tc6.h24
-rw-r--r--include/linux/objagg.h1
-rw-r--r--include/linux/objpool.h277
-rw-r--r--include/linux/objtool.h110
-rw-r--r--include/linux/objtool_types.h72
-rw-r--r--include/linux/of.h793
-rw-r--r--include/linux/of_address.h97
-rw-r--r--include/linux/of_device.h59
-rw-r--r--include/linux/of_fdt.h29
-rw-r--r--include/linux/of_gpio.h121
-rw-r--r--include/linux/of_graph.h53
-rw-r--r--include/linux/of_iommu.h30
-rw-r--r--include/linux/of_irq.h28
-rw-r--r--include/linux/of_mdio.h29
-rw-r--r--include/linux/of_net.h14
-rw-r--r--include/linux/of_platform.h44
-rw-r--r--include/linux/of_reserved_mem.h41
-rw-r--r--include/linux/oid_registry.h35
-rw-r--r--include/linux/olpc-ec.h2
-rw-r--r--include/linux/omap-dma.h35
-rw-r--r--include/linux/omap-gpmc.h17
-rw-r--r--include/linux/omap-mailbox.h13
-rw-r--r--include/linux/once.h38
-rw-r--r--include/linux/once_lite.h36
-rw-r--r--include/linux/oom.h18
-rw-r--r--include/linux/osq_lock.h5
-rw-r--r--include/linux/overflow.h641
-rw-r--r--include/linux/packing.h465
-rw-r--r--include/linux/padata.h21
-rw-r--r--include/linux/page-flags-layout.h27
-rw-r--r--include/linux/page-flags.h1026
-rw-r--r--include/linux/page-isolation.h71
-rw-r--r--include/linux/page_counter.h64
-rw-r--r--include/linux/page_ext.h147
-rw-r--r--include/linux/page_frag_cache.h61
-rw-r--r--include/linux/page_idle.h115
-rw-r--r--include/linux/page_owner.h46
-rw-r--r--include/linux/page_ref.h124
-rw-r--r--include/linux/page_reporting.h3
-rw-r--r--include/linux/page_table_check.h155
-rw-r--r--include/linux/pageblock-flags.h77
-rw-r--r--include/linux/pagemap.h1483
-rw-r--r--include/linux/pagevec.h109
-rw-r--r--include/linux/pagewalk.h119
-rw-r--r--include/linux/panic.h105
-rw-r--r--include/linux/panic_notifier.h12
-rw-r--r--include/linux/papr_scm.h49
-rw-r--r--include/linux/parport.h13
-rw-r--r--include/linux/parport_pc.h3
-rw-r--r--include/linux/part_stat.h13
-rw-r--r--include/linux/path.h13
-rw-r--r--include/linux/pci-acpi.h11
-rw-r--r--include/linux/pci-ats.h6
-rw-r--r--include/linux/pci-bwctrl.h28
-rw-r--r--include/linux/pci-dma-compat.h129
-rw-r--r--include/linux/pci-doe.h29
-rw-r--r--include/linux/pci-ecam.h12
-rw-r--r--include/linux/pci-ep-cfs.h2
-rw-r--r--include/linux/pci-ep-msi.h28
-rw-r--r--include/linux/pci-epc.h186
-rw-r--r--include/linux/pci-epf.h108
-rw-r--r--include/linux/pci-ide.h119
-rw-r--r--include/linux/pci-p2pdma.h131
-rw-r--r--include/linux/pci-pwrctrl.h54
-rw-r--r--include/linux/pci-tph.h46
-rw-r--r--include/linux/pci-tsm.h243
-rw-r--r--include/linux/pci.h865
-rw-r--r--include/linux/pci_hotplug.h7
-rw-r--r--include/linux/pci_ids.h272
-rw-r--r--include/linux/pcie-dwc.h38
-rw-r--r--include/linux/pcs-lynx.h10
-rw-r--r--include/linux/pcs-rzn1-miic.h18
-rw-r--r--include/linux/pcs/pcs-mtk-lynxi.h13
-rw-r--r--include/linux/pcs/pcs-xpcs.h65
-rw-r--r--include/linux/pda_power.h39
-rw-r--r--include/linux/pds/pds_adminq.h1545
-rw-r--r--include/linux/pds/pds_auxbus.h20
-rw-r--r--include/linux/pds/pds_common.h56
-rw-r--r--include/linux/pds/pds_core_if.h572
-rw-r--r--include/linux/pds/pds_intr.h163
-rw-r--r--include/linux/pe.h303
-rw-r--r--include/linux/peci-cpu.h64
-rw-r--r--include/linux/peci.h109
-rw-r--r--include/linux/percpu-defs.h101
-rw-r--r--include/linux/percpu-refcount.h37
-rw-r--r--include/linux/percpu-rwsem.h36
-rw-r--r--include/linux/percpu.h66
-rw-r--r--include/linux/percpu_counter.h110
-rw-r--r--include/linux/perf/arm_pmu.h86
-rw-r--r--include/linux/perf/arm_pmuv3.h318
-rw-r--r--include/linux/perf/riscv_pmu.h97
-rw-r--r--include/linux/perf_event.h992
-rw-r--r--include/linux/perf_event_api.h1
-rw-r--r--include/linux/pfn.h9
-rw-r--r--include/linux/pfn_t.h131
-rw-r--r--include/linux/pgalloc.h29
-rw-r--r--include/linux/pgalloc_tag.h214
-rw-r--r--include/linux/pgtable.h1312
-rw-r--r--include/linux/pgtable_api.h1
-rw-r--r--include/linux/phy.h1239
-rw-r--r--include/linux/phy/pcie.h12
-rw-r--r--include/linux/phy/phy-dp.h3
-rw-r--r--include/linux/phy/phy-hdmi.h21
-rw-r--r--include/linux/phy/phy-lvds.h32
-rw-r--r--include/linux/phy/phy-mipi-dphy.h3
-rw-r--r--include/linux/phy/phy-sun4i-usb.h2
-rw-r--r--include/linux/phy/phy.h99
-rw-r--r--include/linux/phy/tegra/xusb.h13
-rw-r--r--include/linux/phy_fixed.h49
-rw-r--r--include/linux/phy_link_topology.h82
-rw-r--r--include/linux/phylib_stubs.h110
-rw-r--r--include/linux/phylink.h535
-rw-r--r--include/linux/pid.h173
-rw-r--r--include/linux/pid_namespace.h66
-rw-r--r--include/linux/pid_types.h16
-rw-r--r--include/linux/pidfs.h19
-rw-r--r--include/linux/pinctrl/consumer.h98
-rw-r--r--include/linux/pinctrl/devinfo.h21
-rw-r--r--include/linux/pinctrl/machine.h27
-rw-r--r--include/linux/pinctrl/pinconf-generic.h88
-rw-r--r--include/linux/pinctrl/pinconf.h16
-rw-r--r--include/linux/pinctrl/pinctrl.h104
-rw-r--r--include/linux/pinctrl/pinmux.h39
-rw-r--r--include/linux/pipe_fs_i.h140
-rw-r--r--include/linux/pkeys.h6
-rw-r--r--include/linux/pktcdvd.h205
-rw-r--r--include/linux/pl353-smc.h30
-rw-r--r--include/linux/platform_data/ad5449.h39
-rw-r--r--include/linux/platform_data/ad5755.h102
-rw-r--r--include/linux/platform_data/adp5588.h171
-rw-r--r--include/linux/platform_data/amd_qdma.h38
-rw-r--r--include/linux/platform_data/amd_xdma.h34
-rw-r--r--include/linux/platform_data/asoc-palm27x.h9
-rw-r--r--include/linux/platform_data/asoc-pxa.h32
-rw-r--r--include/linux/platform_data/asoc-s3c.h2
-rw-r--r--include/linux/platform_data/asoc-s3c24xx_simtec.h30
-rw-r--r--include/linux/platform_data/asoc-ux500-msp.h20
-rw-r--r--include/linux/platform_data/ata-samsung_cf.h31
-rw-r--r--include/linux/platform_data/bcm7038_wdt.h8
-rw-r--r--include/linux/platform_data/bcmgenet.h19
-rw-r--r--include/linux/platform_data/bd6107.h2
-rw-r--r--include/linux/platform_data/brcmfmac.h6
-rw-r--r--include/linux/platform_data/brcmnand.h12
-rw-r--r--include/linux/platform_data/clk-davinci-pll.h21
-rw-r--r--include/linux/platform_data/clk-fch.h2
-rw-r--r--include/linux/platform_data/clk-s3c2410.h19
-rw-r--r--include/linux/platform_data/cros_ec_commands.h578
-rw-r--r--include/linux/platform_data/cros_ec_proto.h71
-rw-r--r--include/linux/platform_data/cyttsp4.h62
-rw-r--r--include/linux/platform_data/davinci-cpufreq.h6
-rw-r--r--include/linux/platform_data/davinci_asp.h26
-rw-r--r--include/linux/platform_data/dma-dw.h21
-rw-r--r--include/linux/platform_data/dma-ep93xx.h94
-rw-r--r--include/linux/platform_data/dma-hsu.h2
-rw-r--r--include/linux/platform_data/dma-imx-sdma.h60
-rw-r--r--include/linux/platform_data/dma-mmp_tdma.h36
-rw-r--r--include/linux/platform_data/dma-s3c24xx.h48
-rw-r--r--include/linux/platform_data/dma-ste-dma40.h209
-rw-r--r--include/linux/platform_data/dmtimer-omap.h4
-rw-r--r--include/linux/platform_data/emc2305.h28
-rw-r--r--include/linux/platform_data/eth-ep93xx.h10
-rw-r--r--include/linux/platform_data/eth_ixp4xx.h21
-rw-r--r--include/linux/platform_data/fb-s3c2410.h99
-rw-r--r--include/linux/platform_data/gpio-ath79.h16
-rw-r--r--include/linux/platform_data/gpio-davinci.h29
-rw-r--r--include/linux/platform_data/gpio-dwapb.h25
-rw-r--r--include/linux/platform_data/gpio-omap.h3
-rw-r--r--include/linux/platform_data/gpio_backlight.h2
-rw-r--r--include/linux/platform_data/gpmc-omap.h8
-rw-r--r--include/linux/platform_data/gsc_hwmon.h5
-rw-r--r--include/linux/platform_data/hirschmann-hellcreek.h2
-rw-r--r--include/linux/platform_data/huawei-gaokun-ec.h79
-rw-r--r--include/linux/platform_data/hwmon-s3c.h10
-rw-r--r--include/linux/platform_data/i2c-davinci.h26
-rw-r--r--include/linux/platform_data/i2c-gpio.h9
-rw-r--r--include/linux/platform_data/i2c-mux-gpio.h4
-rw-r--r--include/linux/platform_data/i2c-mux-reg.h2
-rw-r--r--include/linux/platform_data/irda-pxaficp.h26
-rw-r--r--include/linux/platform_data/irda-sa11x0.h17
-rw-r--r--include/linux/platform_data/keyboard-pxa930_rotary.h21
-rw-r--r--include/linux/platform_data/keyboard-spear.h164
-rw-r--r--include/linux/platform_data/keypad-ep93xx.h32
-rw-r--r--include/linux/platform_data/keypad-nomadik-ske.h50
-rw-r--r--include/linux/platform_data/keypad-omap.h3
-rw-r--r--include/linux/platform_data/keypad-pxa27x.h73
-rw-r--r--include/linux/platform_data/keyscan-davinci.h29
-rw-r--r--include/linux/platform_data/lcd-mipid.h2
-rw-r--r--include/linux/platform_data/leds-lp55xx.h3
-rw-r--r--include/linux/platform_data/leds-omap.h19
-rw-r--r--include/linux/platform_data/leds-s3c24xx.h18
-rw-r--r--include/linux/platform_data/lenovo-yoga-c630.h44
-rw-r--r--include/linux/platform_data/lp855x.h4
-rw-r--r--include/linux/platform_data/lv5207lp.h2
-rw-r--r--include/linux/platform_data/max6639.h15
-rw-r--r--include/linux/platform_data/max6697.h33
-rw-r--r--include/linux/platform_data/max732x.h12
-rw-r--r--include/linux/platform_data/mcs.h30
-rw-r--r--include/linux/platform_data/mdio-bcm-unimac.h3
-rw-r--r--include/linux/platform_data/media/omap4iss.h66
-rw-r--r--include/linux/platform_data/media/s5p_hdmi.h32
-rw-r--r--include/linux/platform_data/microchip-ksz.h28
-rw-r--r--include/linux/platform_data/mlxreg.h92
-rw-r--r--include/linux/platform_data/mmc-esdhc-imx.h42
-rw-r--r--include/linux/platform_data/mmc-omap.h2
-rw-r--r--include/linux/platform_data/mmc-pxamci.h4
-rw-r--r--include/linux/platform_data/mmc-s3cmci.h51
-rw-r--r--include/linux/platform_data/mmp_audio.h18
-rw-r--r--include/linux/platform_data/mouse-pxa930_trkball.h11
-rw-r--r--include/linux/platform_data/mtd-davinci-aemif.h36
-rw-r--r--include/linux/platform_data/mtd-davinci.h88
-rw-r--r--include/linux/platform_data/mtd-nand-omap2.h10
-rw-r--r--include/linux/platform_data/mtd-nand-s3c2410.h70
-rw-r--r--include/linux/platform_data/net-cw1200.h4
-rw-r--r--include/linux/platform_data/nfcmrvl.h48
-rw-r--r--include/linux/platform_data/ntc_thermistor.h50
-rw-r--r--include/linux/platform_data/omap-twl4030.h3
-rw-r--r--include/linux/platform_data/omap1_bl.h1
-rw-r--r--include/linux/platform_data/pca953x.h13
-rw-r--r--include/linux/platform_data/pcf857x.h45
-rw-r--r--include/linux/platform_data/pcmcia-pxa2xx_viper.h12
-rw-r--r--include/linux/platform_data/pxa2xx_udc.h6
-rw-r--r--include/linux/platform_data/rtc-ds2404.h20
-rw-r--r--include/linux/platform_data/rtc-v3020.h41
-rw-r--r--include/linux/platform_data/s3c-hsudc.h33
-rw-r--r--include/linux/platform_data/sa11x0-serial.h1
-rw-r--r--include/linux/platform_data/sh_mmcif.h (renamed from include/linux/mmc/sh_mmcif.h)2
-rw-r--r--include/linux/platform_data/shmob_drm.h57
-rw-r--r--include/linux/platform_data/sht3x.h15
-rw-r--r--include/linux/platform_data/si5351.h2
-rw-r--r--include/linux/platform_data/simplefb.h1
-rw-r--r--include/linux/platform_data/spi-ath79.h16
-rw-r--r--include/linux/platform_data/spi-clps711x.h17
-rw-r--r--include/linux/platform_data/spi-davinci.h73
-rw-r--r--include/linux/platform_data/spi-ep93xx.h15
-rw-r--r--include/linux/platform_data/spi-mt65xx.h1
-rw-r--r--include/linux/platform_data/spi-omap2-mcspi.h3
-rw-r--r--include/linux/platform_data/spi-s3c64xx.h16
-rw-r--r--include/linux/platform_data/ssm2518.h21
-rw-r--r--include/linux/platform_data/st33zp24.h16
-rw-r--r--include/linux/platform_data/st_sensors_pdata.h3
-rw-r--r--include/linux/platform_data/syscon.h9
-rw-r--r--include/linux/platform_data/ti-aemif.h45
-rw-r--r--include/linux/platform_data/ti-sysc.h5
-rw-r--r--include/linux/platform_data/timer-ixp4xx.h11
-rw-r--r--include/linux/platform_data/tmio.h65
-rw-r--r--include/linux/platform_data/touchscreen-s3c2410.h22
-rw-r--r--include/linux/platform_data/tps68470.h40
-rw-r--r--include/linux/platform_data/tsl2563.h9
-rw-r--r--include/linux/platform_data/uio_dmem_genirq.h10
-rw-r--r--include/linux/platform_data/uio_pruss.h26
-rw-r--r--include/linux/platform_data/usb-davinci.h22
-rw-r--r--include/linux/platform_data/usb-omap.h16
-rw-r--r--include/linux/platform_data/usb-omap1.h4
-rw-r--r--include/linux/platform_data/usb-pxa3xx-ulpi.h32
-rw-r--r--include/linux/platform_data/usb-s3c2410_udc.h39
-rw-r--r--include/linux/platform_data/usb3503.h1
-rw-r--r--include/linux/platform_data/ux500_wdt.h18
-rw-r--r--include/linux/platform_data/video-imxfb.h70
-rw-r--r--include/linux/platform_data/video-mx3fb.h50
-rw-r--r--include/linux/platform_data/video-pxafb.h23
-rw-r--r--include/linux/platform_data/voltage-omap.h1
-rw-r--r--include/linux/platform_data/wan_ixp4xx_hss.h17
-rw-r--r--include/linux/platform_data/x86/amd-fch.h13
-rw-r--r--include/linux/platform_data/x86/asus-wmi-leds-ids.h50
-rw-r--r--include/linux/platform_data/x86/asus-wmi.h91
-rw-r--r--include/linux/platform_data/x86/clk-lpss.h2
-rw-r--r--include/linux/platform_data/x86/int3472.h166
-rw-r--r--include/linux/platform_data/x86/intel-mid_wdt.h (renamed from include/linux/platform_data/intel-mid_wdt.h)6
-rw-r--r--include/linux/platform_data/x86/intel_pmc_ipc.h98
-rw-r--r--include/linux/platform_data/x86/intel_scu_ipc.h72
-rw-r--r--include/linux/platform_data/x86/nvidia-wmi-ec-backlight.h76
-rw-r--r--include/linux/platform_data/x86/p2sb.h28
-rw-r--r--include/linux/platform_data/x86/pmc_atom.h38
-rw-r--r--include/linux/platform_data/x86/pwm-lpss.h60
-rw-r--r--include/linux/platform_data/x86/simatic-ipc-base.h31
-rw-r--r--include/linux/platform_data/x86/simatic-ipc.h79
-rw-r--r--include/linux/platform_data/x86/soc.h70
-rw-r--r--include/linux/platform_data/x86/spi-intel.h (renamed from include/linux/platform_data/x86/intel-spi.h)12
-rw-r--r--include/linux/platform_data/zforce_ts.h15
-rw-r--r--include/linux/platform_device.h56
-rw-r--r--include/linux/platform_profile.h40
-rw-r--r--include/linux/pldmfw.h8
-rw-r--r--include/linux/plist.h13
-rw-r--r--include/linux/plist_types.h17
-rw-r--r--include/linux/pm.h220
-rw-r--r--include/linux/pm2301_charger.h48
-rw-r--r--include/linux/pm_clock.h14
-rw-r--r--include/linux/pm_domain.h234
-rw-r--r--include/linux/pm_opp.h492
-rw-r--r--include/linux/pm_qos.h9
-rw-r--r--include/linux/pm_runtime.h394
-rw-r--r--include/linux/pm_wakeirq.h29
-rw-r--r--include/linux/pm_wakeup.h102
-rw-r--r--include/linux/pmbus.h44
-rw-r--r--include/linux/pmu.h2
-rw-r--r--include/linux/pnfs_osd_xdr.h317
-rw-r--r--include/linux/pnp.h18
-rw-r--r--include/linux/poison.h37
-rw-r--r--include/linux/poll.h32
-rw-r--r--include/linux/polynomial.h35
-rw-r--r--include/linux/posix-clock.h39
-rw-r--r--include/linux/posix-timers.h199
-rw-r--r--include/linux/posix-timers_types.h80
-rw-r--r--include/linux/posix_acl.h73
-rw-r--r--include/linux/posix_acl_xattr.h44
-rw-r--r--include/linux/power/ab8500.h16
-rw-r--r--include/linux/power/bq25890_charger.h15
-rw-r--r--include/linux/power/bq27xxx_battery.h16
-rw-r--r--include/linux/power/generic-adc-battery.h23
-rw-r--r--include/linux/power/max17042_battery.h16
-rw-r--r--include/linux/power/max77705_charger.h193
-rw-r--r--include/linux/power/power_on_reason.h19
-rw-r--r--include/linux/power/smartreflex.h5
-rw-r--r--include/linux/power_supply.h635
-rw-r--r--include/linux/ppp-comp.h2
-rw-r--r--include/linux/ppp_channel.h5
-rw-r--r--include/linux/ppp_defs.h14
-rw-r--r--include/linux/pps_gen_kernel.h78
-rw-r--r--include/linux/pps_kernel.h4
-rw-r--r--include/linux/pr.h25
-rw-r--r--include/linux/prandom.h85
-rw-r--r--include/linux/preempt.h158
-rw-r--r--include/linux/prefetch.h7
-rw-r--r--include/linux/printk.h314
-rw-r--r--include/linux/prmt.h16
-rw-r--r--include/linux/proc_fs.h43
-rw-r--r--include/linux/proc_ns.h39
-rw-r--r--include/linux/profile.h52
-rw-r--r--include/linux/property.h329
-rw-r--r--include/linux/pruss_driver.h123
-rw-r--r--include/linux/pse-pd/pse.h421
-rw-r--r--include/linux/pseudo_fs.h4
-rw-r--r--include/linux/psi.h28
-rw-r--r--include/linux/psi_types.h95
-rw-r--r--include/linux/psp-platform-access.h72
-rw-r--r--include/linux/psp-sev.h457
-rw-r--r--include/linux/psp.h29
-rw-r--r--include/linux/pstore.h10
-rw-r--r--include/linux/pstore_blk.h27
-rw-r--r--include/linux/pstore_ram.h99
-rw-r--r--include/linux/ptdump.h25
-rw-r--r--include/linux/pti.h2
-rw-r--r--include/linux/ptp_classify.h92
-rw-r--r--include/linux/ptp_clock_kernel.h231
-rw-r--r--include/linux/ptp_kvm.h7
-rw-r--r--include/linux/ptp_mock.h38
-rw-r--r--include/linux/ptp_pch.h4
-rw-r--r--include/linux/ptr_ring.h79
-rw-r--r--include/linux/ptrace.h123
-rw-r--r--include/linux/ptrace_api.h1
-rw-r--r--include/linux/pwm.h390
-rw-r--r--include/linux/pwm_backlight.h2
-rw-r--r--include/linux/pwrseq/consumer.h56
-rw-r--r--include/linux/pwrseq/provider.h78
-rw-r--r--include/linux/pxa2xx_ssp.h54
-rw-r--r--include/linux/qat/qat_mig_dev.h31
-rw-r--r--include/linux/qcom_scm.h174
-rw-r--r--include/linux/qed/common_hsi.h143
-rw-r--r--include/linux/qed/eth_common.h1
-rw-r--r--include/linux/qed/fcoe_common.h362
-rw-r--r--include/linux/qed/iscsi_common.h360
-rw-r--r--include/linux/qed/nvmetcp_common.h531
-rw-r--r--include/linux/qed/qed_chain.h97
-rw-r--r--include/linux/qed/qed_eth_if.h23
-rw-r--r--include/linux/qed/qed_fcoe_if.h7
-rw-r--r--include/linux/qed/qed_if.h297
-rw-r--r--include/linux/qed/qed_iscsi_if.h6
-rw-r--r--include/linux/qed/qed_ll2_if.h46
-rw-r--r--include/linux/qed/qed_nvmetcp_if.h257
-rw-r--r--include/linux/qed/qed_rdma_if.h3
-rw-r--r--include/linux/qed/rdma_common.h1
-rw-r--r--include/linux/quota.h8
-rw-r--r--include/linux/quotaops.h34
-rw-r--r--include/linux/radix-tree.h4
-rw-r--r--include/linux/raid/pq.h33
-rw-r--r--include/linux/raid/xor.h21
-rw-r--r--include/linux/raid_class.h6
-rw-r--r--include/linux/ramfs.h1
-rw-r--r--include/linux/random.h203
-rw-r--r--include/linux/randomize_kstack.h68
-rw-r--r--include/linux/range.h26
-rw-r--r--include/linux/ras.h32
-rw-r--r--include/linux/raspberrypi/vchiq.h112
-rw-r--r--include/linux/raspberrypi/vchiq_arm.h164
-rw-r--r--include/linux/raspberrypi/vchiq_bus.h60
-rw-r--r--include/linux/raspberrypi/vchiq_cfg.h41
-rw-r--r--include/linux/raspberrypi/vchiq_core.h646
-rw-r--r--include/linux/raspberrypi/vchiq_debugfs.h22
-rw-r--r--include/linux/ratelimit.h37
-rw-r--r--include/linux/ratelimit_types.h21
-rw-r--r--include/linux/rbtree.h169
-rw-r--r--include/linux/rbtree_augmented.h30
-rw-r--r--include/linux/rbtree_latch.h18
-rw-r--r--include/linux/rbtree_types.h34
-rw-r--r--include/linux/rcu_notifier.h32
-rw-r--r--include/linux/rcu_segcblist.h109
-rw-r--r--include/linux/rcu_sync.h1
-rw-r--r--include/linux/rculist.h98
-rw-r--r--include/linux/rculist_nulls.h71
-rw-r--r--include/linux/rcupdate.h531
-rw-r--r--include/linux/rcupdate_trace.h13
-rw-r--r--include/linux/rcupdate_wait.h47
-rw-r--r--include/linux/rcuref.h178
-rw-r--r--include/linux/rcutiny.h102
-rw-r--r--include/linux/rcutree.h85
-rw-r--r--include/linux/rcuwait.h42
-rw-r--r--include/linux/rcuwait_api.h1
-rw-r--r--include/linux/reboot.h139
-rw-r--r--include/linux/ref_tracker.h142
-rw-r--r--include/linux/refcount.h157
-rw-r--r--include/linux/refcount_api.h1
-rw-r--r--include/linux/refcount_types.h19
-rw-r--r--include/linux/regmap.h522
-rw-r--r--include/linux/regset.h27
-rw-r--r--include/linux/regulator/consumer.h182
-rw-r--r--include/linux/regulator/coupler.h8
-rw-r--r--include/linux/regulator/db8500-prcmu.h6
-rw-r--r--include/linux/regulator/driver.h277
-rw-r--r--include/linux/regulator/gpio-regulator.h2
-rw-r--r--include/linux/regulator/lp872x.h17
-rw-r--r--include/linux/regulator/machine.h51
-rw-r--r--include/linux/regulator/max8952.h2
-rw-r--r--include/linux/regulator/max8973-regulator.h6
-rw-r--r--include/linux/regulator/mt6331-regulator.h46
-rw-r--r--include/linux/regulator/mt6332-regulator.h27
-rw-r--r--include/linux/regulator/mt6357-regulator.h51
-rw-r--r--include/linux/regulator/mt6358-regulator.h46
-rw-r--r--include/linux/regulator/mt6359-regulator.h59
-rw-r--r--include/linux/regulator/mt6363-regulator.h330
-rw-r--r--include/linux/regulator/pca9450.h50
-rw-r--r--include/linux/regulator/s2dos05.h73
-rw-r--r--include/linux/regulator/tps62360.h6
-rw-r--r--include/linux/regulator/userspace-consumer.h1
-rw-r--r--include/linux/relay.h27
-rw-r--r--include/linux/remoteproc.h100
-rw-r--r--include/linux/remoteproc/mtk_scp.h3
-rw-r--r--include/linux/remoteproc/pruss.h83
-rw-r--r--include/linux/resctrl.h684
-rw-r--r--include/linux/resctrl_types.h60
-rw-r--r--include/linux/reset-controller.h47
-rw-r--r--include/linux/reset.h299
-rw-r--r--include/linux/reset/bcm63xx_pmb.h10
-rw-r--r--include/linux/resource.h2
-rw-r--r--include/linux/restart_block.h6
-rw-r--r--include/linux/resume_user_mode.h65
-rw-r--r--include/linux/rethook.h98
-rw-r--r--include/linux/rfkill.h19
-rw-r--r--include/linux/rhashtable-types.h13
-rw-r--r--include/linux/rhashtable.h201
-rw-r--r--include/linux/ring_buffer.h64
-rw-r--r--include/linux/rio.h4
-rw-r--r--include/linux/rio_drv.h5
-rw-r--r--include/linux/rio_ids.h13
-rw-r--r--include/linux/rmap.h887
-rw-r--r--include/linux/rolling_buffer.h61
-rw-r--r--include/linux/root_dev.h9
-rw-r--r--include/linux/rpmb.h167
-rw-r--r--include/linux/rpmsg.h49
-rw-r--r--include/linux/rpmsg/qcom_glink.h12
-rw-r--r--include/linux/rpmsg/qcom_smd.h5
-rw-r--r--include/linux/rseq.h166
-rw-r--r--include/linux/rseq_entry.h616
-rw-r--r--include/linux/rseq_types.h164
-rw-r--r--include/linux/rslib.h1
-rw-r--r--include/linux/rtc.h26
-rw-r--r--include/linux/rtc/ds1685.h3
-rw-r--r--include/linux/rtc/m48t59.h3
-rw-r--r--include/linux/rtmutex.h84
-rw-r--r--include/linux/rtnetlink.h144
-rw-r--r--include/linux/rtsx_common.h1
-rw-r--r--include/linux/rtsx_pci.h22
-rw-r--r--include/linux/rtsx_usb.h17
-rw-r--r--include/linux/rv.h131
-rw-r--r--include/linux/rw_hint.h25
-rw-r--r--include/linux/rwbase_rt.h44
-rw-r--r--include/linux/rwlock.h25
-rw-r--r--include/linux/rwlock_api_smp.h16
-rw-r--r--include/linux/rwlock_rt.h150
-rw-r--r--include/linux/rwlock_types.h53
-rw-r--r--include/linux/rwsem.h138
-rw-r--r--include/linux/s3c_adc_battery.h39
-rw-r--r--include/linux/sbitmap.h148
-rw-r--r--include/linux/scatterlist.h245
-rw-r--r--include/linux/sched.h1316
-rw-r--r--include/linux/sched/affinity.h1
-rw-r--r--include/linux/sched/clock.h25
-rw-r--r--include/linux/sched/cond_resched.h1
-rw-r--r--include/linux/sched/coredump.h67
-rw-r--r--include/linux/sched/cpufreq.h2
-rw-r--r--include/linux/sched/cputime.h14
-rw-r--r--include/linux/sched/deadline.h24
-rw-r--r--include/linux/sched/debug.h4
-rw-r--r--include/linux/sched/ext.h257
-rw-r--r--include/linux/sched/hotplug.h4
-rw-r--r--include/linux/sched/idle.h65
-rw-r--r--include/linux/sched/isolation.h62
-rw-r--r--include/linux/sched/jobctl.h8
-rw-r--r--include/linux/sched/mm.h261
-rw-r--r--include/linux/sched/nohz.h4
-rw-r--r--include/linux/sched/numa_balancing.h16
-rw-r--r--include/linux/sched/posix-timers.h1
-rw-r--r--include/linux/sched/prio.h1
-rw-r--r--include/linux/sched/rseq_api.h1
-rw-r--r--include/linux/sched/rt.h45
-rw-r--r--include/linux/sched/sd_flags.h34
-rw-r--r--include/linux/sched/signal.h192
-rw-r--r--include/linux/sched/smt.h4
-rw-r--r--include/linux/sched/stat.h16
-rw-r--r--include/linux/sched/sysctl.h86
-rw-r--r--include/linux/sched/task.h88
-rw-r--r--include/linux/sched/task_flags.h1
-rw-r--r--include/linux/sched/task_stack.h37
-rw-r--r--include/linux/sched/thread_info_api.h1
-rw-r--r--include/linux/sched/topology.h111
-rw-r--r--include/linux/sched/types.h2
-rw-r--r--include/linux/sched/user.h13
-rw-r--r--include/linux/sched/vhost_task.h14
-rw-r--r--include/linux/sched/wake_q.h41
-rw-r--r--include/linux/sched_clock.h4
-rw-r--r--include/linux/scmi_imx_protocol.h102
-rw-r--r--include/linux/scmi_protocol.h384
-rw-r--r--include/linux/scpi_protocol.h8
-rw-r--r--include/linux/screen_info.h147
-rw-r--r--include/linux/scs.h18
-rw-r--r--include/linux/sctp.h30
-rw-r--r--include/linux/sdb.h160
-rw-r--r--include/linux/seccomp.h31
-rw-r--r--include/linux/seccomp_types.h35
-rw-r--r--include/linux/secretmem.h36
-rw-r--r--include/linux/security.h655
-rw-r--r--include/linux/sed-opal-key.h26
-rw-r--r--include/linux/sed-opal.h12
-rw-r--r--include/linux/selection.h56
-rw-r--r--include/linux/sem.h14
-rw-r--r--include/linux/sem_types.h13
-rw-r--r--include/linux/semaphore.h25
-rw-r--r--include/linux/seq_buf.h63
-rw-r--r--include/linux/seq_file.h71
-rw-r--r--include/linux/seq_file_net.h4
-rw-r--r--include/linux/seqlock.h412
-rw-r--r--include/linux/seqlock_api.h1
-rw-r--r--include/linux/seqlock_types.h93
-rw-r--r--include/linux/seqno-fence.h109
-rw-r--r--include/linux/serdev.h73
-rw-r--r--include/linux/serial.h27
-rw-r--r--include/linux/serial_8250.h118
-rw-r--r--include/linux/serial_core.h924
-rw-r--r--include/linux/serial_max3100.h48
-rw-r--r--include/linux/serial_s3c.h31
-rw-r--r--include/linux/serial_sci.h1
-rw-r--r--include/linux/serio.h7
-rw-r--r--include/linux/set_memory.h46
-rw-r--r--include/linux/sfp.h273
-rw-r--r--include/linux/sh_intc.h11
-rw-r--r--include/linux/shdma-base.h2
-rw-r--r--include/linux/shm.h9
-rw-r--r--include/linux/shmem_fs.h185
-rw-r--r--include/linux/shrinker.h88
-rw-r--r--include/linux/signal.h17
-rw-r--r--include/linux/signal_types.h9
-rw-r--r--include/linux/siphash.h49
-rw-r--r--include/linux/sizes.h20
-rw-r--r--include/linux/skb_array.h20
-rw-r--r--include/linux/skbuff.h1831
-rw-r--r--include/linux/skbuff_ref.h74
-rw-r--r--include/linux/skmsg.h133
-rw-r--r--include/linux/slab.h1072
-rw-r--r--include/linux/slab_def.h123
-rw-r--r--include/linux/slimbus.h4
-rw-r--r--include/linux/slub_def.h199
-rw-r--r--include/linux/sm501.h3
-rw-r--r--include/linux/smc911x.h14
-rw-r--r--include/linux/smp.h50
-rw-r--r--include/linux/smscphy.h44
-rw-r--r--include/linux/soc/airoha/airoha_offload.h317
-rw-r--r--include/linux/soc/amd/isp4_misc.h12
-rw-r--r--include/linux/soc/andes/irq.h18
-rw-r--r--include/linux/soc/apple/rtkit.h175
-rw-r--r--include/linux/soc/apple/sart.h53
-rw-r--r--include/linux/soc/cirrus/ep93xx.h47
-rw-r--r--include/linux/soc/ixp4xx/cpu.h120
-rw-r--r--include/linux/soc/ixp4xx/npe.h2
-rw-r--r--include/linux/soc/marvell/octeontx2/asm.h26
-rw-r--r--include/linux/soc/marvell/silicons.h25
-rw-r--r--include/linux/soc/mediatek/dvfsrc.h36
-rw-r--r--include/linux/soc/mediatek/infracfg.h300
-rw-r--r--include/linux/soc/mediatek/mtk-cmdq.h275
-rw-r--r--include/linux/soc/mediatek/mtk-mmsys.h64
-rw-r--r--include/linux/soc/mediatek/mtk-mutex.h64
-rw-r--r--include/linux/soc/mediatek/mtk_sip_svc.h6
-rw-r--r--include/linux/soc/mediatek/mtk_wed.h333
-rw-r--r--include/linux/soc/mmp/cputype.h24
-rw-r--r--include/linux/soc/pxa/cpu.h252
-rw-r--r--include/linux/soc/pxa/mfp.h470
-rw-r--r--include/linux/soc/pxa/smemc.h29
-rw-r--r--include/linux/soc/qcom/apr.h76
-rw-r--r--include/linux/soc/qcom/geni-se.h (renamed from include/linux/qcom-geni-se.h)93
-rw-r--r--include/linux/soc/qcom/llcc-qcom.h101
-rw-r--r--include/linux/soc/qcom/mdt_loader.h24
-rw-r--r--include/linux/soc/qcom/pmic_glink.h33
-rw-r--r--include/linux/soc/qcom/qcom-pbs.h30
-rw-r--r--include/linux/soc/qcom/qcom_aoss.h38
-rw-r--r--include/linux/soc/qcom/qmi.h26
-rw-r--r--include/linux/soc/qcom/smd-rpm.h26
-rw-r--r--include/linux/soc/qcom/smem.h6
-rw-r--r--include/linux/soc/qcom/smem_state.h8
-rw-r--r--include/linux/soc/qcom/socinfo.h115
-rw-r--r--include/linux/soc/qcom/ubwc.h76
-rw-r--r--include/linux/soc/renesas/r9a06g032-sysctrl.h11
-rw-r--r--include/linux/soc/renesas/rcar-rst.h2
-rw-r--r--include/linux/soc/samsung/exynos-chipid.h6
-rw-r--r--include/linux/soc/samsung/exynos-pmu.h11
-rw-r--r--include/linux/soc/samsung/exynos-regs-pmu.h364
-rw-r--r--include/linux/soc/samsung/s3c-adc.h32
-rw-r--r--include/linux/soc/samsung/s3c-cpu-freq.h145
-rw-r--r--include/linux/soc/samsung/s3c-cpufreq-core.h299
-rw-r--r--include/linux/soc/samsung/s3c-pm.h58
-rw-r--r--include/linux/soc/sunxi/sunxi_sram.h2
-rw-r--r--include/linux/soc/ti/knav_dma.h10
-rw-r--r--include/linux/soc/ti/knav_qmss.h10
-rw-r--r--include/linux/soc/ti/omap1-io.h143
-rw-r--r--include/linux/soc/ti/omap1-mux.h311
-rw-r--r--include/linux/soc/ti/omap1-soc.h163
-rw-r--r--include/linux/soc/ti/omap1-usb.h116
-rw-r--r--include/linux/soc/ti/ti-msgmgr.h18
-rw-r--r--include/linux/soc/ti/ti_sci_inta_msi.h2
-rw-r--r--include/linux/soc/ti/ti_sci_protocol.h32
-rw-r--r--include/linux/sock_diag.h10
-rw-r--r--include/linux/socket.h84
-rw-r--r--include/linux/sockptr.h75
-rw-r--r--include/linux/softirq.h1
-rw-r--r--include/linux/sony-laptop.h39
-rw-r--r--include/linux/sort.h23
-rw-r--r--include/linux/soundwire/sdw.h549
-rw-r--r--include/linux/soundwire/sdw_amd.h174
-rw-r--r--include/linux/soundwire/sdw_intel.h305
-rw-r--r--include/linux/soundwire/sdw_registers.h34
-rw-r--r--include/linux/soundwire/sdw_type.h10
-rw-r--r--include/linux/spi/ads7846.h17
-rw-r--r--include/linux/spi/altera.h4
-rw-r--r--include/linux/spi/at86rf230.h20
-rw-r--r--include/linux/spi/cc2520.h21
-rw-r--r--include/linux/spi/corgi_lcd.h2
-rw-r--r--include/linux/spi/max7301.h4
-rw-r--r--include/linux/spi/offload/consumer.h39
-rw-r--r--include/linux/spi/offload/provider.h47
-rw-r--r--include/linux/spi/offload/types.h109
-rw-r--r--include/linux/spi/pxa2xx_spi.h52
-rw-r--r--include/linux/spi/rspi.h18
-rw-r--r--include/linux/spi/s3c24xx-fiq.h33
-rw-r--r--include/linux/spi/s3c24xx.h25
-rw-r--r--include/linux/spi/sh_msiof.h129
-rw-r--r--include/linux/spi/spi-mem.h112
-rw-r--r--include/linux/spi/spi.h828
-rw-r--r--include/linux/spi/spi_bitbang.h10
-rw-r--r--include/linux/spi/spi_gpio.h4
-rw-r--r--include/linux/spi/xilinx_spi.h15
-rw-r--r--include/linux/spinlock.h156
-rw-r--r--include/linux/spinlock_api.h1
-rw-r--r--include/linux/spinlock_api_smp.h16
-rw-r--r--include/linux/spinlock_api_up.h3
-rw-r--r--include/linux/spinlock_rt.h155
-rw-r--r--include/linux/spinlock_types.h89
-rw-r--r--include/linux/spinlock_types_raw.h73
-rw-r--r--include/linux/spinlock_types_up.h4
-rw-r--r--include/linux/spinlock_up.h5
-rw-r--r--include/linux/splice.h47
-rw-r--r--include/linux/spmi.h6
-rw-r--r--include/linux/sprintf.h31
-rw-r--r--include/linux/sram.h14
-rw-r--r--include/linux/srcu.h436
-rw-r--r--include/linux/srcutiny.h76
-rw-r--r--include/linux/srcutree.h289
-rw-r--r--include/linux/ssb/ssb.h14
-rw-r--r--include/linux/ssb/ssb_driver_extif.h2
-rw-r--r--include/linux/ssb/ssb_driver_gige.h2
-rw-r--r--include/linux/stackdepot.h248
-rw-r--r--include/linux/stackleak.h35
-rw-r--r--include/linux/stackprotector.h19
-rw-r--r--include/linux/stacktrace.h34
-rw-r--r--include/linux/start_kernel.h4
-rw-r--r--include/linux/stat.h21
-rw-r--r--include/linux/static_call.h89
-rw-r--r--include/linux/static_call_types.h4
-rw-r--r--include/linux/stdarg.h11
-rw-r--r--include/linux/stddef.h107
-rw-r--r--include/linux/stm.h14
-rw-r--r--include/linux/stmmac.h177
-rw-r--r--include/linux/stop_machine.h70
-rw-r--r--include/linux/string.h394
-rw-r--r--include/linux/string_choices.h97
-rw-r--r--include/linux/string_helpers.h59
-rw-r--r--include/linux/stringify.h2
-rw-r--r--include/linux/sungem_phy.h4
-rw-r--r--include/linux/sunrpc/auth.h3
-rw-r--r--include/linux/sunrpc/bc_xprt.h20
-rw-r--r--include/linux/sunrpc/cache.h38
-rw-r--r--include/linux/sunrpc/clnt.h35
-rw-r--r--include/linux/sunrpc/debug.h30
-rw-r--r--include/linux/sunrpc/gss_asn1.h81
-rw-r--r--include/linux/sunrpc/gss_krb5.h197
-rw-r--r--include/linux/sunrpc/gss_krb5_enctypes.h41
-rw-r--r--include/linux/sunrpc/msg_prot.h24
-rw-r--r--include/linux/sunrpc/rdma_rn.h27
-rw-r--r--include/linux/sunrpc/rpc_pipe_fs.h11
-rw-r--r--include/linux/sunrpc/sched.h76
-rw-r--r--include/linux/sunrpc/stats.h23
-rw-r--r--include/linux/sunrpc/svc.h449
-rw-r--r--include/linux/sunrpc/svc_rdma.h135
-rw-r--r--include/linux/sunrpc/svc_xprt.h85
-rw-r--r--include/linux/sunrpc/svcauth.h63
-rw-r--r--include/linux/sunrpc/svcsock.h29
-rw-r--r--include/linux/sunrpc/xdr.h112
-rw-r--r--include/linux/sunrpc/xdrgen/_builtins.h243
-rw-r--r--include/linux/sunrpc/xdrgen/_defs.h35
-rw-r--r--include/linux/sunrpc/xdrgen/nfs4_1.h153
-rw-r--r--include/linux/sunrpc/xprt.h94
-rw-r--r--include/linux/sunrpc/xprtmultipath.h16
-rw-r--r--include/linux/sunrpc/xprtsock.h5
-rw-r--r--include/linux/superhyway.h107
-rw-r--r--include/linux/surface_aggregator/controller.h234
-rw-r--r--include/linux/surface_aggregator/device.h292
-rw-r--r--include/linux/surface_aggregator/serial_hub.h121
-rw-r--r--include/linux/suspend.h188
-rw-r--r--include/linux/swab.h25
-rw-r--r--include/linux/swait.h2
-rw-r--r--include/linux/swait_api.h1
-rw-r--r--include/linux/swap.h523
-rw-r--r--include/linux/swap_cgroup.h18
-rw-r--r--include/linux/swap_slots.h31
-rw-r--r--include/linux/swapfile.h15
-rw-r--r--include/linux/swapops.h355
-rw-r--r--include/linux/swiotlb.h291
-rw-r--r--include/linux/switchtec.h6
-rw-r--r--include/linux/sync_core.h16
-rw-r--r--include/linux/sys_info.h28
-rw-r--r--include/linux/syscall_user_dispatch.h27
-rw-r--r--include/linux/syscall_user_dispatch_types.h22
-rw-r--r--include/linux/syscalls.h270
-rw-r--r--include/linux/syscalls_api.h1
-rw-r--r--include/linux/syscore_ops.h15
-rw-r--r--include/linux/sysctl.h272
-rw-r--r--include/linux/sysfb.h125
-rw-r--r--include/linux/sysfs.h271
-rw-r--r--include/linux/syslog.h3
-rw-r--r--include/linux/sysrq.h18
-rw-r--r--include/linux/sysv_fs.h214
-rw-r--r--include/linux/t10-pi.h37
-rw-r--r--include/linux/task_work.h12
-rw-r--r--include/linux/tboot.h2
-rw-r--r--include/linux/tc.h4
-rw-r--r--include/linux/tca6416_keypad.h31
-rw-r--r--include/linux/tcp.h404
-rw-r--r--include/linux/tee_core.h427
-rw-r--r--include/linux/tee_drv.h400
-rw-r--r--include/linux/tegra-icc.h65
-rw-r--r--include/linux/termios_internal.h49
-rw-r--r--include/linux/text-patching.h15
-rw-r--r--include/linux/tfrc.h51
-rw-r--r--include/linux/thermal.h374
-rw-r--r--include/linux/thread_info.h91
-rw-r--r--include/linux/threads.h6
-rw-r--r--include/linux/thunderbolt.h93
-rw-r--r--include/linux/ti-emif-sram.h10
-rw-r--r--include/linux/ti_wilink_st.h2
-rw-r--r--include/linux/tick.h91
-rw-r--r--include/linux/time64.h19
-rw-r--r--include/linux/time_namespace.h28
-rw-r--r--include/linux/timecounter.h17
-rw-r--r--include/linux/timekeeper_internal.h164
-rw-r--r--include/linux/timekeeping.h91
-rw-r--r--include/linux/timer.h117
-rw-r--r--include/linux/timer_types.h23
-rw-r--r--include/linux/timerqueue.h20
-rw-r--r--include/linux/timerqueue_types.h17
-rw-r--r--include/linux/timex.h18
-rw-r--r--include/linux/tnum.h32
-rw-r--r--include/linux/topology.h136
-rw-r--r--include/linux/torture.h38
-rw-r--r--include/linux/tpm.h312
-rw-r--r--include/linux/tpm_eventlog.h6
-rw-r--r--include/linux/tpm_svsm.h149
-rw-r--r--include/linux/trace.h59
-rw-r--r--include/linux/trace_events.h286
-rw-r--r--include/linux/trace_recursion.h131
-rw-r--r--include/linux/trace_seq.h46
-rw-r--r--include/linux/tracefs.h69
-rw-r--r--include/linux/tracehook.h225
-rw-r--r--include/linux/tracepoint-defs.h15
-rw-r--r--include/linux/tracepoint.h282
-rw-r--r--include/linux/transport_class.h8
-rw-r--r--include/linux/tsm-mr.h89
-rw-r--r--include/linux/tsm.h129
-rw-r--r--include/linux/tty.h661
-rw-r--r--include/linux/tty_buffer.h57
-rw-r--r--include/linux/tty_driver.h711
-rw-r--r--include/linux/tty_flip.h93
-rw-r--r--include/linux/tty_ldisc.h377
-rw-r--r--include/linux/tty_port.h287
-rw-r--r--include/linux/turris-omnia-mcu-interface.h397
-rw-r--r--include/linux/turris-signing-key.h35
-rw-r--r--include/linux/typecheck.h9
-rw-r--r--include/linux/types.h39
-rw-r--r--include/linux/u64_stats_sync.h140
-rw-r--r--include/linux/u64_stats_sync_api.h1
-rw-r--r--include/linux/uacce.h22
-rw-r--r--include/linux/uaccess.h605
-rw-r--r--include/linux/ubsan.h14
-rw-r--r--include/linux/ucb1400.h162
-rw-r--r--include/linux/ucopysize.h63
-rw-r--r--include/linux/ucs2_string.h1
-rw-r--r--include/linux/udp.h160
-rw-r--r--include/linux/uidgid.h30
-rw-r--r--include/linux/uidgid_types.h15
-rw-r--r--include/linux/uio.h314
-rw-r--r--include/linux/uio_driver.h13
-rw-r--r--include/linux/ulpi/driver.h2
-rw-r--r--include/linux/umh.h11
-rw-r--r--include/linux/unaligned.h146
-rw-r--r--include/linux/unaligned/access_ok.h68
-rw-r--r--include/linux/unaligned/be_byteshift.h71
-rw-r--r--include/linux/unaligned/be_memmove.h37
-rw-r--r--include/linux/unaligned/be_struct.h37
-rw-r--r--include/linux/unaligned/generic.h115
-rw-r--r--include/linux/unaligned/le_byteshift.h71
-rw-r--r--include/linux/unaligned/le_memmove.h37
-rw-r--r--include/linux/unaligned/le_struct.h37
-rw-r--r--include/linux/unaligned/memmove.h46
-rw-r--r--include/linux/unaligned/packed_struct.h2
-rw-r--r--include/linux/unicode.h53
-rw-r--r--include/linux/union_find.h41
-rw-r--r--include/linux/units.h37
-rw-r--r--include/linux/unroll.h78
-rw-r--r--include/linux/unwind_deferred.h79
-rw-r--r--include/linux/unwind_deferred_types.h55
-rw-r--r--include/linux/unwind_user.h14
-rw-r--r--include/linux/unwind_user_types.h46
-rw-r--r--include/linux/uprobes.h171
-rw-r--r--include/linux/usb.h252
-rw-r--r--include/linux/usb/audio-v2.h21
-rw-r--r--include/linux/usb/audio.h3
-rw-r--r--include/linux/usb/c67x00.h15
-rw-r--r--include/linux/usb/cdc-wdm.h7
-rw-r--r--include/linux/usb/cdc.h4
-rw-r--r--include/linux/usb/cdc_ncm.h5
-rw-r--r--include/linux/usb/ch9.h8
-rw-r--r--include/linux/usb/chipidea.h6
-rw-r--r--include/linux/usb/composite.h74
-rw-r--r--include/linux/usb/ehci_def.h47
-rw-r--r--include/linux/usb/ehci_pdriver.h14
-rw-r--r--include/linux/usb/func_utils.h86
-rw-r--r--include/linux/usb/g_hid.h14
-rw-r--r--include/linux/usb/gadget.h114
-rw-r--r--include/linux/usb/gadget_configfs.h7
-rw-r--r--include/linux/usb/hcd.h73
-rw-r--r--include/linux/usb/input.h4
-rw-r--r--include/linux/usb/isp1301.h10
-rw-r--r--include/linux/usb/isp1760.h19
-rw-r--r--include/linux/usb/ljca.h145
-rw-r--r--include/linux/usb/m66592.h14
-rw-r--r--include/linux/usb/mctp-usb.h30
-rw-r--r--include/linux/usb/midi-v2.h94
-rw-r--r--include/linux/usb/musb-ux500.h10
-rw-r--r--include/linux/usb/musb.h17
-rw-r--r--include/linux/usb/net2280.h14
-rw-r--r--include/linux/usb/of.h9
-rw-r--r--include/linux/usb/ohci_pdriver.h14
-rw-r--r--include/linux/usb/onboard_dev.h18
-rw-r--r--include/linux/usb/otg-fsm.h24
-rw-r--r--include/linux/usb/otg.h3
-rw-r--r--include/linux/usb/pd.h133
-rw-r--r--include/linux/usb/pd_bdo.h2
-rw-r--r--include/linux/usb/pd_ext_sdb.h4
-rw-r--r--include/linux/usb/pd_vdo.h14
-rw-r--r--include/linux/usb/phy.h5
-rw-r--r--include/linux/usb/phy_companion.h10
-rw-r--r--include/linux/usb/quirks.h5
-rw-r--r--include/linux/usb/r8152.h3
-rw-r--r--include/linux/usb/r8a66597.h14
-rw-r--r--include/linux/usb/renesas_usbhs.h15
-rw-r--r--include/linux/usb/rndis_host.h15
-rw-r--r--include/linux/usb/role.h12
-rw-r--r--include/linux/usb/rzv2m_usb3drd.h20
-rw-r--r--include/linux/usb/serial.h30
-rw-r--r--include/linux/usb/storage.h12
-rw-r--r--include/linux/usb/tcpci.h255
-rw-r--r--include/linux/usb/tcpm.h34
-rw-r--r--include/linux/usb/tegra_usb_phy.h27
-rw-r--r--include/linux/usb/typec.h106
-rw-r--r--include/linux/usb/typec_altmode.h47
-rw-r--r--include/linux/usb/typec_dp.h43
-rw-r--r--include/linux/usb/typec_mux.h105
-rw-r--r--include/linux/usb/typec_retimer.h45
-rw-r--r--include/linux/usb/typec_tbt.h12
-rw-r--r--include/linux/usb/ulpi.h13
-rw-r--r--include/linux/usb/usb338x.h11
-rw-r--r--include/linux/usb/usbio.h177
-rw-r--r--include/linux/usb/usbnet.h41
-rw-r--r--include/linux/usb/uvc.h189
-rw-r--r--include/linux/usb/webusb.h80
-rw-r--r--include/linux/usb/xhci-dbgp.h6
-rw-r--r--include/linux/usb/xhci-sideband.h111
-rw-r--r--include/linux/user_events.h84
-rw-r--r--include/linux/user_namespace.h77
-rw-r--r--include/linux/userfaultfd_k.h302
-rw-r--r--include/linux/usermode_driver.h19
-rw-r--r--include/linux/util_macros.h152
-rw-r--r--include/linux/uts_namespace.h65
-rw-r--r--include/linux/utsname.h54
-rw-r--r--include/linux/uuid.h18
-rw-r--r--include/linux/vdpa.h347
-rw-r--r--include/linux/vdso_datastore.h10
-rw-r--r--include/linux/verification.h12
-rw-r--r--include/linux/vermagic.h15
-rw-r--r--include/linux/vfio.h429
-rw-r--r--include/linux/vfio_pci_core.h233
-rw-r--r--include/linux/vfsdebug.h45
-rw-r--r--include/linux/vgaarb.h145
-rw-r--r--include/linux/vhost_iotlb.h5
-rw-r--r--include/linux/via-gpio.h14
-rw-r--r--include/linux/videodev2.h2
-rw-r--r--include/linux/virtio.h201
-rw-r--r--include/linux/virtio_anchor.h19
-rw-r--r--include/linux/virtio_config.h263
-rw-r--r--include/linux/virtio_console.h38
-rw-r--r--include/linux/virtio_features.h89
-rw-r--r--include/linux/virtio_net.h278
-rw-r--r--include/linux/virtio_pci_admin.h34
-rw-r--r--include/linux/virtio_pci_legacy.h40
-rw-r--r--include/linux/virtio_pci_modern.h81
-rw-r--r--include/linux/virtio_ring.h28
-rw-r--r--include/linux/virtio_vsock.h184
-rw-r--r--include/linux/visorbus.h344
-rw-r--r--include/linux/vlynq.h149
-rw-r--r--include/linux/vm_event_item.h75
-rw-r--r--include/linux/vmacache.h28
-rw-r--r--include/linux/vmalloc.h229
-rw-r--r--include/linux/vmcore_info.h88
-rw-r--r--include/linux/vme.h190
-rw-r--r--include/linux/vmpressure.h2
-rw-r--r--include/linux/vmstat.h236
-rw-r--r--include/linux/vmw_vmci_api.h7
-rw-r--r--include/linux/vmw_vmci_defs.h98
-rw-r--r--include/linux/vringh.h41
-rw-r--r--include/linux/vt_buffer.h26
-rw-r--r--include/linux/vt_kern.h44
-rw-r--r--include/linux/vtime.h5
-rw-r--r--include/linux/w1-gpio.h22
-rw-r--r--include/linux/w1.h7
-rw-r--r--include/linux/wait.h125
-rw-r--r--include/linux/wait_api.h1
-rw-r--r--include/linux/wait_bit.h452
-rw-r--r--include/linux/watch_queue.h10
-rw-r--r--include/linux/watchdog.h22
-rw-r--r--include/linux/win_minmax.h4
-rw-r--r--include/linux/wireless.h15
-rw-r--r--include/linux/wkup_m3_ipc.h23
-rw-r--r--include/linux/wl12xx.h44
-rw-r--r--include/linux/wm97xx.h4
-rw-r--r--include/linux/wmi.h85
-rw-r--r--include/linux/wordpart.h57
-rw-r--r--include/linux/workqueue.h476
-rw-r--r--include/linux/workqueue_api.h1
-rw-r--r--include/linux/workqueue_types.h25
-rw-r--r--include/linux/writeback.h180
-rw-r--r--include/linux/ww_mutex.h73
-rw-r--r--include/linux/wwan.h110
-rw-r--r--include/linux/xarray.h118
-rw-r--r--include/linux/xattr.h103
-rw-r--r--include/linux/xxhash.h72
-rw-r--r--include/linux/xz.h125
-rw-r--r--include/linux/z2_battery.h17
-rw-r--r--include/linux/zbud.h23
-rw-r--r--include/linux/zorro.h3
-rw-r--r--include/linux/zpool.h119
-rw-r--r--include/linux/zsmalloc.h36
-rw-r--r--include/linux/zstd.h1376
-rw-r--r--include/linux/zstd_errors.h87
-rw-r--r--include/linux/zstd_lib.h3160
-rw-r--r--include/linux/zswap.h74
-rw-r--r--include/math-emu/op-common.h5
-rw-r--r--include/media/cadence/cdns-csi2rx.h19
-rw-r--r--include/media/cec.h96
-rw-r--r--include/media/davinci/ccdc_types.h30
-rw-r--r--include/media/davinci/dm355_ccdc.h308
-rw-r--r--include/media/davinci/dm644x_ccdc.h171
-rw-r--r--include/media/davinci/isif.h518
-rw-r--r--include/media/davinci/vpbe.h184
-rw-r--r--include/media/davinci/vpbe_display.h122
-rw-r--r--include/media/davinci/vpbe_osd.h382
-rw-r--r--include/media/davinci/vpbe_types.h74
-rw-r--r--include/media/davinci/vpbe_venc.h37
-rw-r--r--include/media/davinci/vpfe_capture.h177
-rw-r--r--include/media/davinci/vpif_types.h2
-rw-r--r--include/media/davinci/vpss.h111
-rw-r--r--include/media/dmxdev.h1
-rw-r--r--include/media/drv-intf/cx25840.h2
-rw-r--r--include/media/drv-intf/msp3400.h2
-rw-r--r--include/media/drv-intf/saa7146_vv.h68
-rw-r--r--include/media/dvb-usb-ids.h630
-rw-r--r--include/media/dvb_frontend.h13
-rw-r--r--include/media/dvb_net.h10
-rw-r--r--include/media/dvb_ringbuffer.h2
-rw-r--r--include/media/dvbdev.h55
-rw-r--r--include/media/frame_vector.h2
-rw-r--r--include/media/hevc-ctrls.h212
-rw-r--r--include/media/i2c/ad9389b.h37
-rw-r--r--include/media/i2c/adv7343.h10
-rw-r--r--include/media/i2c/adv7393.h10
-rw-r--r--include/media/i2c/bt819.h2
-rw-r--r--include/media/i2c/cs5345.h2
-rw-r--r--include/media/i2c/cs53l32a.h2
-rw-r--r--include/media/i2c/ds90ub9xx.h22
-rw-r--r--include/media/i2c/ir-kbd-i2c.h1
-rw-r--r--include/media/i2c/m52790.h2
-rw-r--r--include/media/i2c/m5mols.h29
-rw-r--r--include/media/i2c/mt9m032.h22
-rw-r--r--include/media/i2c/mt9p031.h17
-rw-r--r--include/media/i2c/mt9t001.h10
-rw-r--r--include/media/i2c/mt9t112.h2
-rw-r--r--include/media/i2c/mt9v011.h2
-rw-r--r--include/media/i2c/mt9v022.h13
-rw-r--r--include/media/i2c/mt9v032.h12
-rw-r--r--include/media/i2c/noon010pc30.h25
-rw-r--r--include/media/i2c/ov2659.h14
-rw-r--r--include/media/i2c/ov9650.h24
-rw-r--r--include/media/i2c/s5c73m3.h56
-rw-r--r--include/media/i2c/s5k4ecgx.h33
-rw-r--r--include/media/i2c/s5k6aa.h48
-rw-r--r--include/media/i2c/saa7115.h2
-rw-r--r--include/media/i2c/saa7127.h2
-rw-r--r--include/media/i2c/sr030pc30.h17
-rw-r--r--include/media/i2c/ths7303.h6
-rw-r--r--include/media/i2c/tvaudio.h2
-rw-r--r--include/media/i2c/upd64031a.h2
-rw-r--r--include/media/i2c/upd64083.h2
-rw-r--r--include/media/i2c/wm8775.h4
-rw-r--r--include/media/ipu-bridge.h182
-rw-r--r--include/media/ipu6-pci-table.h28
-rw-r--r--include/media/jpeg.h20
-rw-r--r--include/media/media-dev-allocator.h2
-rw-r--r--include/media/media-device.h77
-rw-r--r--include/media/media-entity.h436
-rw-r--r--include/media/media-request.h2
-rw-r--r--include/media/mipi-csi2.h47
-rw-r--r--include/media/mpeg2-ctrls.h82
-rw-r--r--include/media/rc-core.h67
-rw-r--r--include/media/rc-map.h6
-rw-r--r--include/media/rcar-fcp.h5
-rw-r--r--include/media/tpg/v4l2-tpg.h16
-rw-r--r--include/media/tuner-types.h6
-rw-r--r--include/media/tuner.h2
-rw-r--r--include/media/tveeprom.h2
-rw-r--r--include/media/v4l2-async.h299
-rw-r--r--include/media/v4l2-cci.h141
-rw-r--r--include/media/v4l2-common.h229
-rw-r--r--include/media/v4l2-ctrls.h247
-rw-r--r--include/media/v4l2-dev.h138
-rw-r--r--include/media/v4l2-device.h13
-rw-r--r--include/media/v4l2-dv-timings.h68
-rw-r--r--include/media/v4l2-event.h2
-rw-r--r--include/media/v4l2-fh.h30
-rw-r--r--include/media/v4l2-fwnode.h137
-rw-r--r--include/media/v4l2-h264.h31
-rw-r--r--include/media/v4l2-ioctl.h253
-rw-r--r--include/media/v4l2-isp.h91
-rw-r--r--include/media/v4l2-jpeg.h39
-rw-r--r--include/media/v4l2-mc.h17
-rw-r--r--include/media/v4l2-mediabus.h146
-rw-r--r--include/media/v4l2-mem2mem.h106
-rw-r--r--include/media/v4l2-subdev.h1103
-rw-r--r--include/media/v4l2-vp9.h233
-rw-r--r--include/media/videobuf-core.h233
-rw-r--r--include/media/videobuf-dma-contig.h30
-rw-r--r--include/media/videobuf-dma-sg.h102
-rw-r--r--include/media/videobuf-vmalloc.h43
-rw-r--r--include/media/videobuf2-core.h208
-rw-r--r--include/media/videobuf2-dvb.h2
-rw-r--r--include/media/videobuf2-memops.h3
-rw-r--r--include/media/videobuf2-v4l2.h53
-rw-r--r--include/media/vsp1.h95
-rw-r--r--include/memory/renesas-rpc-if.h47
-rw-r--r--include/misc/cxl-base.h48
-rw-r--r--include/misc/cxl.h265
-rw-r--r--include/misc/cxllib.h129
-rw-r--r--include/net/9p/9p.h25
-rw-r--r--include/net/9p/client.h173
-rw-r--r--include/net/9p/transport.h46
-rw-r--r--include/net/Space.h18
-rw-r--r--include/net/act_api.h114
-rw-r--r--include/net/addrconf.h82
-rw-r--r--include/net/af_rxrpc.h100
-rw-r--r--include/net/af_unix.h83
-rw-r--r--include/net/af_vsock.h50
-rw-r--r--include/net/aligned_data.h22
-rw-r--r--include/net/amt.h408
-rw-r--r--include/net/arp.h17
-rw-r--r--include/net/ax25.h57
-rw-r--r--include/net/ax88796.h9
-rw-r--r--include/net/bareudp.h13
-rw-r--r--include/net/bluetooth/bluetooth.h239
-rw-r--r--include/net/bluetooth/coredump.h116
-rw-r--r--include/net/bluetooth/hci.h913
-rw-r--r--include/net/bluetooth/hci_core.h1063
-rw-r--r--include/net/bluetooth/hci_drv.h76
-rw-r--r--include/net/bluetooth/hci_mon.h4
-rw-r--r--include/net/bluetooth/hci_sock.h2
-rw-r--r--include/net/bluetooth/hci_sync.h193
-rw-r--r--include/net/bluetooth/iso.h32
-rw-r--r--include/net/bluetooth/l2cap.h92
-rw-r--r--include/net/bluetooth/mgmt.h175
-rw-r--r--include/net/bluetooth/rfcomm.h2
-rw-r--r--include/net/bluetooth/sco.h2
-rw-r--r--include/net/bond_3ad.h16
-rw-r--r--include/net/bond_alb.h8
-rw-r--r--include/net/bond_options.h56
-rw-r--r--include/net/bonding.h151
-rw-r--r--include/net/busy_poll.h59
-rw-r--r--include/net/caif/caif_dev.h2
-rw-r--r--include/net/caif/caif_hsi.h200
-rw-r--r--include/net/caif/caif_layer.h6
-rw-r--r--include/net/caif/cfcnfg.h2
-rw-r--r--include/net/caif/cfpkt.h2
-rw-r--r--include/net/caif/cfserl.h1
-rw-r--r--include/net/caif/cfsrvl.h4
-rw-r--r--include/net/calipso.h2
-rw-r--r--include/net/cfg80211-wext.h20
-rw-r--r--include/net/cfg80211.h3119
-rw-r--r--include/net/cfg802154.h211
-rw-r--r--include/net/checksum.h80
-rw-r--r--include/net/cipso_ipv4.h8
-rw-r--r--include/net/cls_cgroup.h2
-rw-r--r--include/net/codel.h11
-rw-r--r--include/net/codel_impl.h20
-rw-r--r--include/net/codel_qdisc.h3
-rw-r--r--include/net/compat.h32
-rw-r--r--include/net/datalink.h11
-rw-r--r--include/net/dcbevent.h2
-rw-r--r--include/net/dcbnl.h24
-rw-r--r--include/net/devlink.h1013
-rw-r--r--include/net/dn.h231
-rw-r--r--include/net/dn_dev.h199
-rw-r--r--include/net/dn_fib.h167
-rw-r--r--include/net/dn_neigh.h30
-rw-r--r--include/net/dn_nsp.h195
-rw-r--r--include/net/dn_route.h115
-rw-r--r--include/net/dropreason-core.h647
-rw-r--r--include/net/dropreason.h43
-rw-r--r--include/net/dsa.h933
-rw-r--r--include/net/dsa_stubs.h48
-rw-r--r--include/net/dscp.h76
-rw-r--r--include/net/dst.h127
-rw-r--r--include/net/dst_cache.h15
-rw-r--r--include/net/dst_metadata.h81
-rw-r--r--include/net/dst_ops.h6
-rw-r--r--include/net/eee.h35
-rw-r--r--include/net/erspan.h7
-rw-r--r--include/net/esp.h1
-rw-r--r--include/net/espintcp.h2
-rw-r--r--include/net/ethoc.h3
-rw-r--r--include/net/failover.h1
-rw-r--r--include/net/fib_notifier.h2
-rw-r--r--include/net/fib_rules.h59
-rw-r--r--include/net/firewire.h5
-rw-r--r--include/net/flow.h59
-rw-r--r--include/net/flow_dissector.h148
-rw-r--r--include/net/flow_offload.h179
-rw-r--r--include/net/fou.h2
-rw-r--r--include/net/fq.h9
-rw-r--r--include/net/fq_impl.h21
-rw-r--r--include/net/garp.h2
-rw-r--r--include/net/gen_stats.h59
-rw-r--r--include/net/genetlink.h266
-rw-r--r--include/net/geneve.h2
-rw-r--r--include/net/gre.h70
-rw-r--r--include/net/gro.h608
-rw-r--r--include/net/gso.h109
-rw-r--r--include/net/gtp.h51
-rw-r--r--include/net/gue.h3
-rw-r--r--include/net/handshake.h49
-rw-r--r--include/net/hotdata.h61
-rw-r--r--include/net/hwbm.h6
-rw-r--r--include/net/icmp.h11
-rw-r--r--include/net/ieee80211_radiotap.h291
-rw-r--r--include/net/ieee802154_netdev.h181
-rw-r--r--include/net/ieee8021q.h57
-rw-r--r--include/net/if_inet6.h26
-rw-r--r--include/net/ila.h14
-rw-r--r--include/net/inet6_connection_sock.h4
-rw-r--r--include/net/inet6_hashtables.h128
-rw-r--r--include/net/inet_common.h28
-rw-r--r--include/net/inet_connection_sock.h116
-rw-r--r--include/net/inet_dscp.h63
-rw-r--r--include/net/inet_ecn.h17
-rw-r--r--include/net/inet_frag.h30
-rw-r--r--include/net/inet_hashtables.h297
-rw-r--r--include/net/inet_sock.h177
-rw-r--r--include/net/inet_timewait_sock.h37
-rw-r--r--include/net/inetpeer.h12
-rw-r--r--include/net/ioam6.h72
-rw-r--r--include/net/ip.h211
-rw-r--r--include/net/ip6_checksum.h20
-rw-r--r--include/net/ip6_fib.h107
-rw-r--r--include/net/ip6_route.h116
-rw-r--r--include/net/ip6_tunnel.h12
-rw-r--r--include/net/ip_fib.h127
-rw-r--r--include/net/ip_tunnels.h328
-rw-r--r--include/net/ip_vs.h228
-rw-r--r--include/net/ipcomp.h15
-rw-r--r--include/net/ipconfig.h2
-rw-r--r--include/net/ipv6.h241
-rw-r--r--include/net/ipv6_frag.h12
-rw-r--r--include/net/ipv6_stubs.h20
-rw-r--r--include/net/ipx.h171
-rw-r--r--include/net/iucv/af_iucv.h10
-rw-r--r--include/net/iucv/iucv.h46
-rw-r--r--include/net/iw_handler.h64
-rw-r--r--include/net/kcm.h4
-rw-r--r--include/net/l3mdev.h31
-rw-r--r--include/net/lapb.h2
-rw-r--r--include/net/lib80211.h122
-rw-r--r--include/net/libeth/cache.h66
-rw-r--r--include/net/libeth/rx.h314
-rw-r--r--include/net/libeth/tx.h159
-rw-r--r--include/net/libeth/types.h127
-rw-r--r--include/net/libeth/xdp.h1870
-rw-r--r--include/net/libeth/xsk.h685
-rw-r--r--include/net/llc.h6
-rw-r--r--include/net/llc_c_ac.h8
-rw-r--r--include/net/llc_c_ev.h1
-rw-r--r--include/net/llc_c_st.h8
-rw-r--r--include/net/llc_conn.h3
-rw-r--r--include/net/llc_if.h3
-rw-r--r--include/net/llc_pdu.h45
-rw-r--r--include/net/llc_s_ac.h4
-rw-r--r--include/net/llc_s_ev.h1
-rw-r--r--include/net/llc_s_st.h10
-rw-r--r--include/net/lwtunnel.h17
-rw-r--r--include/net/mac80211.h1913
-rw-r--r--include/net/mac802154.h38
-rw-r--r--include/net/macsec.h103
-rw-r--r--include/net/mana/gdma.h956
-rw-r--r--include/net/mana/hw_channel.h211
-rw-r--r--include/net/mana/mana.h1029
-rw-r--r--include/net/mana/mana_auxiliary.h10
-rw-r--r--include/net/mana/shm_channel.h21
-rw-r--r--include/net/mctp.h356
-rw-r--r--include/net/mctpdevice.h58
-rw-r--r--include/net/mpls_iptunnel.h3
-rw-r--r--include/net/mptcp.h118
-rw-r--r--include/net/mrp.h5
-rw-r--r--include/net/ncsi.h2
-rw-r--r--include/net/ndisc.h70
-rw-r--r--include/net/neighbour.h162
-rw-r--r--include/net/neighbour_tables.h12
-rw-r--r--include/net/net_debug.h159
-rw-r--r--include/net/net_namespace.h169
-rw-r--r--include/net/net_shaper.h120
-rw-r--r--include/net/net_trackers.h18
-rw-r--r--include/net/netdev_lock.h138
-rw-r--r--include/net/netdev_netlink.h12
-rw-r--r--include/net/netdev_queues.h333
-rw-r--r--include/net/netdev_rx_queue.h61
-rw-r--r--include/net/netevent.h1
-rw-r--r--include/net/netfilter/ipv4/nf_conntrack_ipv4.h3
-rw-r--r--include/net/netfilter/ipv4/nf_reject.h8
-rw-r--r--include/net/netfilter/ipv6/nf_defrag_ipv6.h1
-rw-r--r--include/net/netfilter/ipv6/nf_reject.h10
-rw-r--r--include/net/netfilter/nf_bpf_link.h15
-rw-r--r--include/net/netfilter/nf_conntrack.h111
-rw-r--r--include/net/netfilter/nf_conntrack_acct.h3
-rw-r--r--include/net/netfilter/nf_conntrack_act_ct.h54
-rw-r--r--include/net/netfilter/nf_conntrack_bpf.h46
-rw-r--r--include/net/netfilter/nf_conntrack_core.h30
-rw-r--r--include/net/netfilter/nf_conntrack_count.h22
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h110
-rw-r--r--include/net/netfilter/nf_conntrack_expect.h20
-rw-r--r--include/net/netfilter/nf_conntrack_extend.h49
-rw-r--r--include/net/netfilter/nf_conntrack_helper.h7
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h35
-rw-r--r--include/net/netfilter/nf_conntrack_labels.h16
-rw-r--r--include/net/netfilter/nf_conntrack_seqadj.h3
-rw-r--r--include/net/netfilter/nf_conntrack_timeout.h28
-rw-r--r--include/net/netfilter/nf_conntrack_timestamp.h13
-rw-r--r--include/net/netfilter/nf_conntrack_tuple.h3
-rw-r--r--include/net/netfilter/nf_flow_table.h137
-rw-r--r--include/net/netfilter/nf_hooks_lwtunnel.h7
-rw-r--r--include/net/netfilter/nf_log.h3
-rw-r--r--include/net/netfilter/nf_nat.h6
-rw-r--r--include/net/netfilter/nf_nat_helper.h1
-rw-r--r--include/net/netfilter/nf_nat_redirect.h3
-rw-r--r--include/net/netfilter/nf_queue.h9
-rw-r--r--include/net/netfilter/nf_reject.h22
-rw-r--r--include/net/netfilter/nf_tables.h862
-rw-r--r--include/net/netfilter/nf_tables_core.h114
-rw-r--r--include/net/netfilter/nf_tables_ipv4.h59
-rw-r--r--include/net/netfilter/nf_tables_ipv6.h57
-rw-r--r--include/net/netfilter/nf_tables_offload.h4
-rw-r--r--include/net/netfilter/nf_tproxy.h12
-rw-r--r--include/net/netfilter/nft_fib.h39
-rw-r--r--include/net/netfilter/nft_meta.h16
-rw-r--r--include/net/netfilter/nft_reject.h6
-rw-r--r--include/net/netfilter/xt_rateest.h2
-rw-r--r--include/net/netkit.h44
-rw-r--r--include/net/netlabel.h40
-rw-r--r--include/net/netlink.h610
-rw-r--r--include/net/netmem.h414
-rw-r--r--include/net/netns/bpf.h9
-rw-r--r--include/net/netns/can.h1
-rw-r--r--include/net/netns/conntrack.h31
-rw-r--r--include/net/netns/core.h13
-rw-r--r--include/net/netns/flow_table.h14
-rw-r--r--include/net/netns/generic.h3
-rw-r--r--include/net/netns/ipv4.h133
-rw-r--r--include/net/netns/ipv6.h24
-rw-r--r--include/net/netns/mctp.h49
-rw-r--r--include/net/netns/mpls.h3
-rw-r--r--include/net/netns/netfilter.h11
-rw-r--r--include/net/netns/nexthop.h1
-rw-r--r--include/net/netns/nftables.h3
-rw-r--r--include/net/netns/sctp.h18
-rw-r--r--include/net/netns/smc.h33
-rw-r--r--include/net/netns/unix.h8
-rw-r--r--include/net/netns/x_tables.h12
-rw-r--r--include/net/netns/xdp.h2
-rw-r--r--include/net/netns/xfrm.h9
-rw-r--r--include/net/netrom.h1
-rw-r--r--include/net/nexthop.h75
-rw-r--r--include/net/nfc/digital.h4
-rw-r--r--include/net/nfc/hci.h6
-rw-r--r--include/net/nfc/nci.h4
-rw-r--r--include/net/nfc/nci_core.h38
-rw-r--r--include/net/nfc/nfc.h30
-rw-r--r--include/net/nl802154.h144
-rw-r--r--include/net/nsh.h2
-rw-r--r--include/net/p8022.h14
-rw-r--r--include/net/page_pool.h254
-rw-r--r--include/net/page_pool/helpers.h525
-rw-r--r--include/net/page_pool/memory_provider.h51
-rw-r--r--include/net/page_pool/types.h310
-rw-r--r--include/net/pfcp.h90
-rw-r--r--include/net/phonet/pep.h3
-rw-r--r--include/net/phonet/phonet.h25
-rw-r--r--include/net/phonet/pn_dev.h11
-rw-r--r--include/net/pie.h2
-rw-r--r--include/net/ping.h16
-rw-r--r--include/net/pkt_cls.h226
-rw-r--r--include/net/pkt_sched.h181
-rw-r--r--include/net/pptp.h3
-rw-r--r--include/net/proto_memory.h86
-rw-r--r--include/net/protocol.h8
-rw-r--r--include/net/psample.h15
-rw-r--r--include/net/psnap.h5
-rw-r--r--include/net/psp.h12
-rw-r--r--include/net/psp/functions.h209
-rw-r--r--include/net/psp/types.h216
-rw-r--r--include/net/raw.h37
-rw-r--r--include/net/rawv6.h7
-rw-r--r--include/net/red.h23
-rw-r--r--include/net/regulatory.h21
-rw-r--r--include/net/request_sock.h87
-rw-r--r--include/net/rose.h30
-rw-r--r--include/net/route.h189
-rw-r--r--include/net/rpl.h6
-rw-r--r--include/net/rps.h208
-rw-r--r--include/net/rsi_91x.h2
-rw-r--r--include/net/rstreason.h221
-rw-r--r--include/net/rtnetlink.h129
-rw-r--r--include/net/sch_generic.h433
-rw-r--r--include/net/scm.h74
-rw-r--r--include/net/sctp/auth.h18
-rw-r--r--include/net/sctp/checksum.h34
-rw-r--r--include/net/sctp/command.h1
-rw-r--r--include/net/sctp/constants.h33
-rw-r--r--include/net/sctp/sctp.h105
-rw-r--r--include/net/sctp/sm.h16
-rw-r--r--include/net/sctp/stream_sched.h18
-rw-r--r--include/net/sctp/structs.h152
-rw-r--r--include/net/sctp/ulpqueue.h3
-rw-r--r--include/net/secure_seq.h10
-rw-r--r--include/net/seg6.h28
-rw-r--r--include/net/seg6_hmac.h23
-rw-r--r--include/net/seg6_local.h1
-rw-r--r--include/net/selftests.h45
-rw-r--r--include/net/smc.h122
-rw-r--r--include/net/snmp.h10
-rw-r--r--include/net/sock.h1409
-rw-r--r--include/net/sock_reuseport.h24
-rw-r--r--include/net/stp.h2
-rw-r--r--include/net/strparser.h31
-rw-r--r--include/net/switchdev.h164
-rw-r--r--include/net/tc_act/tc_connmark.h10
-rw-r--r--include/net/tc_act/tc_csum.h10
-rw-r--r--include/net/tc_act/tc_ct.h22
-rw-r--r--include/net/tc_act/tc_ctinfo.h7
-rw-r--r--include/net/tc_act/tc_gact.h15
-rw-r--r--include/net/tc_act/tc_gate.h14
-rw-r--r--include/net/tc_act/tc_ipt.h17
-rw-r--r--include/net/tc_act/tc_mirred.h2
-rw-r--r--include/net/tc_act/tc_mpls.h10
-rw-r--r--include/net/tc_act/tc_nat.h11
-rw-r--r--include/net/tc_act/tc_pedit.h81
-rw-r--r--include/net/tc_act/tc_police.h42
-rw-r--r--include/net/tc_act/tc_sample.h9
-rw-r--r--include/net/tc_act/tc_skbedit.h43
-rw-r--r--include/net/tc_act/tc_skbmod.h1
-rw-r--r--include/net/tc_act/tc_tunnel_key.h1
-rw-r--r--include/net/tc_act/tc_vlan.h21
-rw-r--r--include/net/tc_wrapper.h232
-rw-r--r--include/net/tcp.h1057
-rw-r--r--include/net/tcp_ao.h356
-rw-r--r--include/net/tcp_ecn.h642
-rw-r--r--include/net/tcp_states.h2
-rw-r--r--include/net/tcx.h206
-rw-r--r--include/net/timewait_sock.h16
-rw-r--r--include/net/tls.h421
-rw-r--r--include/net/tls_prot.h68
-rw-r--r--include/net/transp_v6.h6
-rw-r--r--include/net/tso.h8
-rw-r--r--include/net/tun_proto.h3
-rw-r--r--include/net/udp.h263
-rw-r--r--include/net/udp_tunnel.h148
-rw-r--r--include/net/udplite.h68
-rw-r--r--include/net/vsock_addr.h2
-rw-r--r--include/net/vxlan.h141
-rw-r--r--include/net/x25.h8
-rw-r--r--include/net/xdp.h496
-rw-r--r--include/net/xdp_priv.h2
-rw-r--r--include/net/xdp_sock.h157
-rw-r--r--include/net/xdp_sock_drv.h239
-rw-r--r--include/net/xfrm.h530
-rw-r--r--include/net/xsk_buff_pool.h130
-rw-r--r--include/pcmcia/soc_common.h125
-rw-r--r--include/pcmcia/ss.h8
-rw-r--r--include/ras/ras_event.h166
-rw-r--r--include/rdma/ib.h2
-rw-r--r--include/rdma/ib_addr.h23
-rw-r--r--include/rdma/ib_cache.h16
-rw-r--r--include/rdma/ib_cm.h29
-rw-r--r--include/rdma/ib_hdrs.h8
-rw-r--r--include/rdma/ib_mad.h31
-rw-r--r--include/rdma/ib_marshall.h3
-rw-r--r--include/rdma/ib_pack.h8
-rw-r--r--include/rdma/ib_sa.h62
-rw-r--r--include/rdma/ib_smi.h12
-rw-r--r--include/rdma/ib_sysfs.h37
-rw-r--r--include/rdma/ib_ucaps.h30
-rw-r--r--include/rdma/ib_umem.h71
-rw-r--r--include/rdma/ib_umem_odp.h25
-rw-r--r--include/rdma/ib_verbs.h770
-rw-r--r--include/rdma/iba.h2
-rw-r--r--include/rdma/iw_cm.h21
-rw-r--r--include/rdma/opa_vnic.h5
-rw-r--r--include/rdma/rdma_cm.h35
-rw-r--r--include/rdma/rdma_counter.h9
-rw-r--r--include/rdma/rdma_netlink.h14
-rw-r--r--include/rdma/rdma_vt.h2
-rw-r--r--include/rdma/rdmavt_qp.h73
-rw-r--r--include/rdma/restrack.h15
-rw-r--r--include/rdma/uverbs_ioctl.h33
-rw-r--r--include/rdma/uverbs_std_types.h2
-rw-r--r--include/rdma/uverbs_types.h33
-rw-r--r--include/rv/automata.h75
-rw-r--r--include/rv/da_monitor.h533
-rw-r--r--include/rv/instrumentation.h29
-rw-r--r--include/rv/ltl_monitor.h173
-rw-r--r--include/scsi/fc/fc_ms.h63
-rw-r--r--include/scsi/fcoe_sysfs.h2
-rw-r--r--include/scsi/iscsi_proto.h2
-rw-r--r--include/scsi/iser.h2
-rw-r--r--include/scsi/libfc.h37
-rw-r--r--include/scsi/libfcoe.h38
-rw-r--r--include/scsi/libiscsi.h61
-rw-r--r--include/scsi/libiscsi_tcp.h16
-rw-r--r--include/scsi/libsas.h186
-rw-r--r--include/scsi/sas.h63
-rw-r--r--include/scsi/sas_ata.h71
-rw-r--r--include/scsi/scsi.h169
-rw-r--r--include/scsi/scsi_bsg_iscsi.h4
-rw-r--r--include/scsi/scsi_cmnd.h138
-rw-r--r--include/scsi/scsi_common.h13
-rw-r--r--include/scsi/scsi_dbg.h11
-rw-r--r--include/scsi/scsi_device.h226
-rw-r--r--include/scsi/scsi_devinfo.h14
-rw-r--r--include/scsi/scsi_driver.h14
-rw-r--r--include/scsi/scsi_eh.h4
-rw-r--r--include/scsi/scsi_host.h153
-rw-r--r--include/scsi/scsi_ioctl.h9
-rw-r--r--include/scsi/scsi_proto.h166
-rw-r--r--include/scsi/scsi_request.h33
-rw-r--r--include/scsi/scsi_status.h74
-rw-r--r--include/scsi/scsi_transport.h2
-rw-r--r--include/scsi/scsi_transport_fc.h50
-rw-r--r--include/scsi/scsi_transport_iscsi.h50
-rw-r--r--include/scsi/scsi_transport_sas.h3
-rw-r--r--include/scsi/scsi_transport_srp.h6
-rw-r--r--include/scsi/scsicam.h7
-rw-r--r--include/scsi/sg.h39
-rw-r--r--include/scsi/srp.h26
-rw-r--r--include/scsi/viosrp.h17
-rw-r--r--include/soc/amlogic/meson_ddr_pmu.h66
-rw-r--r--include/soc/arc/arc_aux.h (renamed from include/soc/arc/aux.h)0
-rw-r--r--include/soc/arc/mcip.h2
-rw-r--r--include/soc/arc/timers.h6
-rw-r--r--include/soc/at91/atmel_tcb.h3
-rw-r--r--include/soc/at91/pm.h16
-rw-r--r--include/soc/at91/sama7-ddr.h88
-rw-r--r--include/soc/at91/sama7-sfrbu.h27
-rw-r--r--include/soc/bcm2835/raspberrypi-firmware.h54
-rw-r--r--include/soc/fsl/caam-blob.h129
-rw-r--r--include/soc/fsl/dcp.h20
-rw-r--r--include/soc/fsl/dpaa2-fd.h3
-rw-r--r--include/soc/fsl/dpaa2-io.h9
-rw-r--r--include/soc/fsl/qe/immap_qe.h3
-rw-r--r--include/soc/fsl/qe/qe.h28
-rw-r--r--include/soc/fsl/qe/qe_tdm.h4
-rw-r--r--include/soc/fsl/qe/qmc.h117
-rw-r--r--include/soc/fsl/qe/ucc_fast.h2
-rw-r--r--include/soc/fsl/qe/ucc_slow.h2
-rw-r--r--include/soc/fsl/qman.h11
-rw-r--r--include/soc/imx/cpu.h1
-rw-r--r--include/soc/imx/revision.h1
-rw-r--r--include/soc/imx/timer.h23
-rw-r--r--include/soc/mediatek/smi.h26
-rw-r--r--include/soc/microchip/mpfs.h54
-rw-r--r--include/soc/mscc/ocelot.h556
-rw-r--r--include/soc/mscc/ocelot_ana.h10
-rw-r--r--include/soc/mscc/ocelot_dev.h23
-rw-r--r--include/soc/mscc/ocelot_ptp.h3
-rw-r--r--include/soc/mscc/ocelot_vcap.h37
-rw-r--r--include/soc/mscc/vsc7514_regs.h19
-rw-r--r--include/soc/nuvoton/clock-npcm8xx.h18
-rw-r--r--include/soc/qcom/cmd-db.h10
-rw-r--r--include/soc/qcom/ice.h34
-rw-r--r--include/soc/qcom/qcom-spmi-pmic.h75
-rw-r--r--include/soc/qcom/spm.h22
-rw-r--r--include/soc/qcom/tcs.h26
-rw-r--r--include/soc/rockchip/pm_domains.h25
-rw-r--r--include/soc/rockchip/rk3399_grf.h9
-rw-r--r--include/soc/rockchip/rk3568_grf.h13
-rw-r--r--include/soc/rockchip/rk3588_grf.h22
-rw-r--r--include/soc/rockchip/rockchip_grf.h19
-rw-r--r--include/soc/rockchip/rockchip_sip.h3
-rw-r--r--include/soc/sifive/sifive_ccache.h16
-rw-r--r--include/soc/sifive/sifive_l2_cache.h16
-rw-r--r--include/soc/spacemit/k1-syscon.h161
-rw-r--r--include/soc/starfive/reset-starfive-jh71x0.h17
-rw-r--r--include/soc/tegra/bpmp-abi.h1796
-rw-r--r--include/soc/tegra/bpmp.h23
-rw-r--r--include/soc/tegra/common.h46
-rw-r--r--include/soc/tegra/fuse.h76
-rw-r--r--include/soc/tegra/irq.h9
-rw-r--r--include/soc/tegra/ivc.h12
-rw-r--r--include/soc/tegra/mc.h126
-rw-r--r--include/soc/tegra/pm.h8
-rw-r--r--include/soc/tegra/pmc.h29
-rw-r--r--include/soc/tegra/tegra-cbb.h47
-rw-r--r--include/sound/ac97/codec.h7
-rw-r--r--include/sound/ac97_codec.h5
-rw-r--r--include/sound/aci.h1
-rw-r--r--include/sound/acp63_chip_offset_byte.h495
-rw-r--r--include/sound/adau1373.h33
-rw-r--r--include/sound/ak4531_codec.h3
-rw-r--r--include/sound/asequencer.h4
-rw-r--r--include/sound/asoundef.h15
-rw-r--r--include/sound/compress_driver.h56
-rw-r--r--include/sound/control.h36
-rw-r--r--include/sound/core.h155
-rw-r--r--include/sound/cs-amp-lib.h90
-rw-r--r--include/sound/cs35l41.h928
-rw-r--r--include/sound/cs35l56.h419
-rw-r--r--include/sound/cs4271.h1
-rw-r--r--include/sound/cs42l42.h815
-rw-r--r--include/sound/cs42l43.h17
-rw-r--r--include/sound/cs42l52.h29
-rw-r--r--include/sound/cs42l56.h45
-rw-r--r--include/sound/cs42l73.h19
-rw-r--r--include/sound/cs48l32.h47
-rw-r--r--include/sound/cs48l32_registers.h530
-rw-r--r--include/sound/da7219-aad.h6
-rw-r--r--include/sound/designware_i2s.h3
-rw-r--r--include/sound/dmaengine_pcm.h16
-rw-r--r--include/sound/emu10k1.h1035
-rw-r--r--include/sound/emu8000.h3
-rw-r--r--include/sound/emux_synth.h4
-rw-r--r--include/sound/es1688.h1
-rw-r--r--include/sound/graph_card.h23
-rw-r--r--include/sound/gus.h23
-rw-r--r--include/sound/hda-mlink.h213
-rw-r--r--include/sound/hda-sdw-bpt.h69
-rw-r--r--include/sound/hda_codec.h122
-rw-r--r--include/sound/hda_register.h60
-rw-r--r--include/sound/hda_verbs.h2
-rw-r--r--include/sound/hdaudio.h127
-rw-r--r--include/sound/hdaudio_ext.h100
-rw-r--r--include/sound/hdmi-codec.h29
-rw-r--r--include/sound/hwdep.h2
-rw-r--r--include/sound/info.h2
-rw-r--r--include/sound/intel-dsp-config.h3
-rw-r--r--include/sound/intel-nhlt.h80
-rw-r--r--include/sound/jack.h11
-rw-r--r--include/sound/l3.h28
-rw-r--r--include/sound/madera-pdata.h2
-rw-r--r--include/sound/max9768.h4
-rw-r--r--include/sound/memalloc.h125
-rw-r--r--include/sound/opl3.h2
-rw-r--r--include/sound/pcm-indirect.h22
-rw-r--r--include/sound/pcm.h345
-rw-r--r--include/sound/pcm_drm_eld.h91
-rw-r--r--include/sound/pcm_iec958.h8
-rw-r--r--include/sound/pcm_params.h2
-rw-r--r--include/sound/pxa2xx-lib.h17
-rw-r--r--include/sound/q6usboffload.h20
-rw-r--r--include/sound/rawmidi.h37
-rw-r--r--include/sound/rt1318.h16
-rw-r--r--include/sound/rt5665.h2
-rw-r--r--include/sound/rt5668.h3
-rw-r--r--include/sound/rt5682.h3
-rw-r--r--include/sound/rt5682s.h54
-rw-r--r--include/sound/s3c24xx_uda134x.h14
-rw-r--r--include/sound/sb.h3
-rw-r--r--include/sound/sdca.h87
-rw-r--r--include/sound/sdca_asoc.h61
-rw-r--r--include/sound/sdca_fdl.h105
-rw-r--r--include/sound/sdca_function.h1475
-rw-r--r--include/sound/sdca_hid.h38
-rw-r--r--include/sound/sdca_interrupts.h87
-rw-r--r--include/sound/sdca_regmap.h33
-rw-r--r--include/sound/sdca_ump.h50
-rw-r--r--include/sound/sdw.h49
-rw-r--r--include/sound/seq_device.h1
-rw-r--r--include/sound/seq_kernel.h14
-rw-r--r--include/sound/simple_card.h6
-rw-r--r--include/sound/simple_card_utils.h142
-rw-r--r--include/sound/snd_wavefront.h8
-rw-r--r--include/sound/soc-acpi-intel-match.h15
-rw-r--r--include/sound/soc-acpi-intel-ssp-common.h81
-rw-r--r--include/sound/soc-acpi.h96
-rw-r--r--include/sound/soc-card.h66
-rw-r--r--include/sound/soc-component.h150
-rw-r--r--include/sound/soc-dai.h275
-rw-r--r--include/sound/soc-dapm.h597
-rw-r--r--include/sound/soc-dpcm.h33
-rw-r--r--include/sound/soc-jack.h2
-rw-r--r--include/sound/soc-topology.h11
-rw-r--r--include/sound/soc-usb.h138
-rw-r--r--include/sound/soc.h624
-rw-r--r--include/sound/soc_sdw_utils.h273
-rw-r--r--include/sound/sof.h102
-rw-r--r--include/sound/sof/channel_map.h6
-rw-r--r--include/sound/sof/control.h8
-rw-r--r--include/sound/sof/dai-amd.h36
-rw-r--r--include/sound/sof/dai-imx.h7
-rw-r--r--include/sound/sof/dai-intel.h8
-rw-r--r--include/sound/sof/dai-mediatek.h23
-rw-r--r--include/sound/sof/dai.h48
-rw-r--r--include/sound/sof/debug.h4
-rw-r--r--include/sound/sof/ext_manifest.h3
-rw-r--r--include/sound/sof/ext_manifest4.h119
-rw-r--r--include/sound/sof/header.h5
-rw-r--r--include/sound/sof/info.h7
-rw-r--r--include/sound/sof/ipc4/header.h591
-rw-r--r--include/sound/sof/pm.h2
-rw-r--r--include/sound/sof/stream.h9
-rw-r--r--include/sound/sof/topology.h71
-rw-r--r--include/sound/sof/trace.h2
-rw-r--r--include/sound/sof/xtensa.h2
-rw-r--r--include/sound/soundfont.h26
-rw-r--r--include/sound/tas2563-tlv.h279
-rw-r--r--include/sound/tas2770-tlv.h23
-rw-r--r--include/sound/tas2781-comlib-i2c.h37
-rw-r--r--include/sound/tas2781-dsp.h229
-rw-r--r--include/sound/tas2781-tlv.h21
-rw-r--r--include/sound/tas2781.h276
-rw-r--r--include/sound/tas2x20-tlv.h259
-rw-r--r--include/sound/tas5825-tlv.h24
-rw-r--r--include/sound/tlv320aic32x4.h9
-rw-r--r--include/sound/tlv320aic3x.h65
-rw-r--r--include/sound/tlv320dac33-plat.h21
-rw-r--r--include/sound/tpa6130a2-plat.h17
-rw-r--r--include/sound/uda134x.h24
-rw-r--r--include/sound/ump.h282
-rw-r--r--include/sound/ump_convert.h47
-rw-r--r--include/sound/ump_msg.h765
-rw-r--r--include/sound/vx_core.h1
-rw-r--r--include/sound/wavefront.h53
-rw-r--r--include/sound/wm0010.h6
-rw-r--r--include/sound/wm1250-ev1.h24
-rw-r--r--include/sound/wm2200.h2
-rw-r--r--include/sound/wm5100.h4
-rw-r--r--include/sound/wm8904.h3
-rw-r--r--include/sound/wm8996.h3
-rw-r--r--include/target/iscsi/iscsi_target_core.h110
-rw-r--r--include/target/iscsi/iscsi_transport.h126
-rw-r--r--include/target/target_core_backend.h20
-rw-r--r--include/target/target_core_base.h109
-rw-r--r--include/target/target_core_fabric.h35
-rw-r--r--include/trace/bpf_probe.h59
-rw-r--r--include/trace/define_custom_trace.h77
-rw-r--r--include/trace/define_trace.h39
-rw-r--r--include/trace/events/9p.h59
-rw-r--r--include/trace/events/afs.h1226
-rw-r--r--include/trace/events/alarmtimer.h2
-rw-r--r--include/trace/events/amdxdna.h101
-rw-r--r--include/trace/events/asoc.h68
-rw-r--r--include/trace/events/avc.h6
-rw-r--r--include/trace/events/block.h238
-rw-r--r--include/trace/events/bpf_test_run.h17
-rw-r--r--include/trace/events/bridge.h72
-rw-r--r--include/trace/events/btrfs.h839
-rw-r--r--include/trace/events/cachefiles.h810
-rw-r--r--include/trace/events/capability.h57
-rw-r--r--include/trace/events/cgroup.h75
-rw-r--r--include/trace/events/clk.h57
-rw-r--r--include/trace/events/cma.h95
-rw-r--r--include/trace/events/compaction.h61
-rw-r--r--include/trace/events/csd.h72
-rw-r--r--include/trace/events/damon.h126
-rw-r--r--include/trace/events/devfreq.h4
-rw-r--r--include/trace/events/devlink.h95
-rw-r--r--include/trace/events/dlm.h674
-rw-r--r--include/trace/events/dma.h475
-rw-r--r--include/trace/events/dma_fence.h42
-rw-r--r--include/trace/events/erofs.h102
-rw-r--r--include/trace/events/error_report.h8
-rw-r--r--include/trace/events/exceptions.h43
-rw-r--r--include/trace/events/ext4.h671
-rw-r--r--include/trace/events/f2fs.h829
-rw-r--r--include/trace/events/fib.h15
-rw-r--r--include/trace/events/fib6.h19
-rw-r--r--include/trace/events/filelock.h107
-rw-r--r--include/trace/events/filemap.h116
-rw-r--r--include/trace/events/firewire.h912
-rw-r--r--include/trace/events/firewire_ohci.h101
-rw-r--r--include/trace/events/fs_dax.h100
-rw-r--r--include/trace/events/fscache.h682
-rw-r--r--include/trace/events/fsi.h117
-rw-r--r--include/trace/events/fsi_master_aspeed.h12
-rw-r--r--include/trace/events/fsi_master_i2cr.h107
-rw-r--r--include/trace/events/habanalabs.h211
-rw-r--r--include/trace/events/handshake.h319
-rw-r--r--include/trace/events/huge_memory.h107
-rw-r--r--include/trace/events/hugetlbfs.h156
-rw-r--r--include/trace/events/hw_pressure.h29
-rw-r--r--include/trace/events/hwmon.h16
-rw-r--r--include/trace/events/i2c_slave.h67
-rw-r--r--include/trace/events/ib_mad.h13
-rw-r--r--include/trace/events/icmp.h67
-rw-r--r--include/trace/events/initcall.h2
-rw-r--r--include/trace/events/intel_ifs.h68
-rw-r--r--include/trace/events/intel_iommu.h57
-rw-r--r--include/trace/events/intel_ish.h2
-rw-r--r--include/trace/events/io_uring.h540
-rw-r--r--include/trace/events/iocost.h18
-rw-r--r--include/trace/events/iommu.h25
-rw-r--r--include/trace/events/ipi.h46
-rw-r--r--include/trace/events/irq.h49
-rw-r--r--include/trace/events/irq_matrix.h8
-rw-r--r--include/trace/events/iscsi.h6
-rw-r--r--include/trace/events/jbd2.h153
-rw-r--r--include/trace/events/kmem.h203
-rw-r--r--include/trace/events/ksm.h284
-rw-r--r--include/trace/events/kvm.h128
-rw-r--r--include/trace/events/kyber.h27
-rw-r--r--include/trace/events/libata.h417
-rw-r--r--include/trace/events/lock.h67
-rw-r--r--include/trace/events/maple_tree.h123
-rw-r--r--include/trace/events/mce.h66
-rw-r--r--include/trace/events/mctp.h78
-rw-r--r--include/trace/events/mdio.h2
-rw-r--r--include/trace/events/memcg.h106
-rw-r--r--include/trace/events/memory-failure.h98
-rw-r--r--include/trace/events/migrate.h59
-rw-r--r--include/trace/events/mmap.h23
-rw-r--r--include/trace/events/mmap_lock.h78
-rw-r--r--include/trace/events/mmc.h4
-rw-r--r--include/trace/events/mmflags.h230
-rw-r--r--include/trace/events/module.h8
-rw-r--r--include/trace/events/mptcp.h27
-rw-r--r--include/trace/events/napi.h35
-rw-r--r--include/trace/events/nbd.h2
-rw-r--r--include/trace/events/neigh.h12
-rw-r--r--include/trace/events/net.h66
-rw-r--r--include/trace/events/net_probe_common.h71
-rw-r--r--include/trace/events/netfs.h715
-rw-r--r--include/trace/events/netlink.h2
-rw-r--r--include/trace/events/nilfs2.h8
-rw-r--r--include/trace/events/notifier.h69
-rw-r--r--include/trace/events/oom.h40
-rw-r--r--include/trace/events/osnoise.h238
-rw-r--r--include/trace/events/page_pool.h32
-rw-r--r--include/trace/events/page_ref.h12
-rw-r--r--include/trace/events/pagemap.h46
-rw-r--r--include/trace/events/percpu.h23
-rw-r--r--include/trace/events/power.h163
-rw-r--r--include/trace/events/preemptirq.h8
-rw-r--r--include/trace/events/pwc.h4
-rw-r--r--include/trace/events/pwm.h150
-rw-r--r--include/trace/events/qdisc.h56
-rw-r--r--include/trace/events/qla.h6
-rw-r--r--include/trace/events/qrtr.h35
-rw-r--r--include/trace/events/random.h247
-rw-r--r--include/trace/events/rcu.h99
-rw-r--r--include/trace/events/readahead.h132
-rw-r--r--include/trace/events/regulator.h6
-rw-r--r--include/trace/events/rpcgss.h68
-rw-r--r--include/trace/events/rpcrdma.h662
-rw-r--r--include/trace/events/rpm.h46
-rw-r--r--include/trace/events/rseq.h9
-rw-r--r--include/trace/events/rust_sample.h31
-rw-r--r--include/trace/events/rwmmio.h108
-rw-r--r--include/trace/events/rxrpc.h2623
-rw-r--r--include/trace/events/sched.h332
-rw-r--r--include/trace/events/sched_ext.h90
-rw-r--r--include/trace/events/scmi.h122
-rw-r--r--include/trace/events/scsi.h118
-rw-r--r--include/trace/events/skb.h44
-rw-r--r--include/trace/events/sock.h145
-rw-r--r--include/trace/events/sof.h121
-rw-r--r--include/trace/events/sof_intel.h148
-rw-r--r--include/trace/events/spi-mem.h106
-rw-r--r--include/trace/events/spi.h65
-rw-r--r--include/trace/events/spmi.h12
-rw-r--r--include/trace/events/sunrpc.h931
-rw-r--r--include/trace/events/swiotlb.h31
-rw-r--r--include/trace/events/syscalls.h4
-rw-r--r--include/trace/events/target.h8
-rw-r--r--include/trace/events/task.h52
-rw-r--r--include/trace/events/tcp.h644
-rw-r--r--include/trace/events/tegra_apb_dma.h6
-rw-r--r--include/trace/events/thermal.h211
-rw-r--r--include/trace/events/thermal_power_allocator.h88
-rw-r--r--include/trace/events/thp.h75
-rw-r--r--include/trace/events/timer.h60
-rw-r--r--include/trace/events/timer_migration.h298
-rw-r--r--include/trace/events/timestamp.h124
-rw-r--r--include/trace/events/tsm_mr.h80
-rw-r--r--include/trace/events/udp.h29
-rw-r--r--include/trace/events/ufs.h376
-rw-r--r--include/trace/events/vmalloc.h123
-rw-r--r--include/trace/events/vmscan.h162
-rw-r--r--include/trace/events/vsock_virtio_transport_common.h17
-rw-r--r--include/trace/events/watchdog.h66
-rw-r--r--include/trace/events/wbt.h8
-rw-r--r--include/trace/events/workqueue.h14
-rw-r--r--include/trace/events/writeback.h156
-rw-r--r--include/trace/events/xdp.h49
-rw-r--r--include/trace/events/xen.h12
-rw-r--r--include/trace/misc/fs.h165
-rw-r--r--include/trace/misc/nfs.h421
-rw-r--r--include/trace/misc/rdma.h (renamed from include/trace/events/rdma.h)0
-rw-r--r--include/trace/misc/sunrpc.h18
-rw-r--r--include/trace/perf.h61
-rw-r--r--include/trace/stages/init.h37
-rw-r--r--include/trace/stages/stage1_struct_define.h60
-rw-r--r--include/trace/stages/stage2_data_offsets.h63
-rw-r--r--include/trace/stages/stage3_trace_output.h152
-rw-r--r--include/trace/stages/stage4_event_fields.h81
-rw-r--r--include/trace/stages/stage5_get_offsets.h127
-rw-r--r--include/trace/stages/stage6_event_callback.h139
-rw-r--r--include/trace/stages/stage7_class_define.h40
-rw-r--r--include/trace/syscall.h8
-rw-r--r--include/trace/trace_custom_events.h221
-rw-r--r--include/trace/trace_events.h393
-rw-r--r--include/uapi/asm-generic/bitsperlong.h17
-rw-r--r--include/uapi/asm-generic/fcntl.h29
-rw-r--r--include/uapi/asm-generic/hugetlb_encode.h26
-rw-r--r--include/uapi/asm-generic/ioctl.h14
-rw-r--r--include/uapi/asm-generic/mman-common.h11
-rw-r--r--include/uapi/asm-generic/mman.h4
-rw-r--r--include/uapi/asm-generic/param.h6
-rw-r--r--include/uapi/asm-generic/poll.h2
-rw-r--r--include/uapi/asm-generic/posix_types.h1
-rw-r--r--include/uapi/asm-generic/shmbuf.h4
-rw-r--r--include/uapi/asm-generic/siginfo.h37
-rw-r--r--include/uapi/asm-generic/signal-defs.h1
-rw-r--r--include/uapi/asm-generic/signal.h2
-rw-r--r--include/uapi/asm-generic/socket.h28
-rw-r--r--include/uapi/asm-generic/termbits-common.h66
-rw-r--r--include/uapi/asm-generic/termbits.h239
-rw-r--r--include/uapi/asm-generic/types.h6
-rw-r--r--include/uapi/asm-generic/unistd.h209
-rw-r--r--include/uapi/cxl/features.h179
-rw-r--r--include/uapi/drm/amdgpu_drm.h580
-rw-r--r--include/uapi/drm/amdxdna_accel.h698
-rw-r--r--include/uapi/drm/asahi_drm.h1194
-rw-r--r--include/uapi/drm/drm.h388
-rw-r--r--include/uapi/drm/drm_fourcc.h553
-rw-r--r--include/uapi/drm/drm_mode.h522
-rw-r--r--include/uapi/drm/ethosu_accel.h261
-rw-r--r--include/uapi/drm/etnaviv_drm.h3
-rw-r--r--include/uapi/drm/habanalabs_accel.h2368
-rw-r--r--include/uapi/drm/i810_drm.h292
-rw-r--r--include/uapi/drm/i915_drm.h2014
-rw-r--r--include/uapi/drm/ivpu_accel.h564
-rw-r--r--include/uapi/drm/mga_drm.h427
-rw-r--r--include/uapi/drm/msm_drm.h242
-rw-r--r--include/uapi/drm/nouveau_drm.h322
-rw-r--r--include/uapi/drm/nova_drm.h101
-rw-r--r--include/uapi/drm/panfrost_drm.h222
-rw-r--r--include/uapi/drm/panthor_drm.h1095
-rw-r--r--include/uapi/drm/pvr_drm.h1295
-rw-r--r--include/uapi/drm/qaic_accel.h399
-rw-r--r--include/uapi/drm/r128_drm.h336
-rw-r--r--include/uapi/drm/rocket_accel.h142
-rw-r--r--include/uapi/drm/savage_drm.h220
-rw-r--r--include/uapi/drm/sis_drm.h77
-rw-r--r--include/uapi/drm/tegra_drm.h425
-rw-r--r--include/uapi/drm/v3d_drm.h526
-rw-r--r--include/uapi/drm/via_drm.h282
-rw-r--r--include/uapi/drm/virtgpu_drm.h57
-rw-r--r--include/uapi/drm/vmwgfx_drm.h55
-rw-r--r--include/uapi/drm/xe_drm.h2280
-rw-r--r--include/uapi/fwctl/cxl.h56
-rw-r--r--include/uapi/fwctl/fwctl.h141
-rw-r--r--include/uapi/fwctl/mlx5.h36
-rw-r--r--include/uapi/fwctl/pds.h62
-rw-r--r--include/uapi/linux/acct.h3
-rw-r--r--include/uapi/linux/acrn.h81
-rw-r--r--include/uapi/linux/affs_hardblocks.h68
-rw-r--r--include/uapi/linux/agpgart.h9
-rw-r--r--include/uapi/linux/amt.h62
-rw-r--r--include/uapi/linux/android/binder.h101
-rw-r--r--include/uapi/linux/android/binder_netlink.h38
-rw-r--r--include/uapi/linux/aspeed-video.h21
-rw-r--r--include/uapi/linux/atmbr2684.h2
-rw-r--r--include/uapi/linux/atmdev.h4
-rw-r--r--include/uapi/linux/audit.h27
-rw-r--r--include/uapi/linux/auto_dev-ioctl.h2
-rw-r--r--include/uapi/linux/auto_fs.h2
-rw-r--r--include/uapi/linux/auxvec.h7
-rw-r--r--include/uapi/linux/batadv_packet.h76
-rw-r--r--include/uapi/linux/batman_adv.h18
-rw-r--r--include/uapi/linux/bcache.h445
-rw-r--r--include/uapi/linux/bits.h14
-rw-r--r--include/uapi/linux/blk-crypto.h44
-rw-r--r--include/uapi/linux/blkdev.h14
-rw-r--r--include/uapi/linux/blktrace_api.h57
-rw-r--r--include/uapi/linux/blkzoned.h58
-rw-r--r--include/uapi/linux/bpf.h2169
-rw-r--r--include/uapi/linux/bpfilter.h21
-rw-r--r--include/uapi/linux/btf.h76
-rw-r--r--include/uapi/linux/btrfs.h282
-rw-r--r--include/uapi/linux/btrfs_tree.h368
-rw-r--r--include/uapi/linux/byteorder/big_endian.h1
-rw-r--r--include/uapi/linux/byteorder/little_endian.h1
-rw-r--r--include/uapi/linux/cachefiles.h68
-rw-r--r--include/uapi/linux/can.h71
-rw-r--r--include/uapi/linux/can/bcm.h2
-rw-r--r--include/uapi/linux/can/error.h20
-rw-r--r--include/uapi/linux/can/isotp.h58
-rw-r--r--include/uapi/linux/can/j1939.h9
-rw-r--r--include/uapi/linux/can/netlink.h84
-rw-r--r--include/uapi/linux/can/raw.h19
-rw-r--r--include/uapi/linux/capability.h14
-rw-r--r--include/uapi/linux/cdrom.h21
-rw-r--r--include/uapi/linux/cec-funcs.h56
-rw-r--r--include/uapi/linux/cec.h33
-rw-r--r--include/uapi/linux/cgroupstats.h2
-rw-r--r--include/uapi/linux/cifs/cifs_mount.h1
-rw-r--r--include/uapi/linux/cm4000_cs.h64
-rw-r--r--include/uapi/linux/cn_proc.h61
-rw-r--r--include/uapi/linux/comedi.h1528
-rw-r--r--include/uapi/linux/connector.h2
-rw-r--r--include/uapi/linux/const.h19
-rw-r--r--include/uapi/linux/coredump.h104
-rw-r--r--include/uapi/linux/counter.h172
-rw-r--r--include/uapi/linux/counter/microchip-tcb-capture.h40
-rw-r--r--include/uapi/linux/cryptouser.h35
-rw-r--r--include/uapi/linux/cxl_mem.h87
-rw-r--r--include/uapi/linux/cyclades.h35
-rw-r--r--include/uapi/linux/cycx_cfm.h2
-rw-r--r--include/uapi/linux/dcbnl.h10
-rw-r--r--include/uapi/linux/devlink.h143
-rw-r--r--include/uapi/linux/dlm.h4
-rw-r--r--include/uapi/linux/dlm_device.h4
-rw-r--r--include/uapi/linux/dlm_netlink.h60
-rw-r--r--include/uapi/linux/dlm_plock.h1
-rw-r--r--include/uapi/linux/dlmconstants.h5
-rw-r--r--include/uapi/linux/dm-ioctl.h23
-rw-r--r--include/uapi/linux/dm-log-userspace.h2
-rw-r--r--include/uapi/linux/dma-buf.h138
-rw-r--r--include/uapi/linux/dma-heap.h2
-rw-r--r--include/uapi/linux/dn.h149
-rw-r--r--include/uapi/linux/dpll.h281
-rw-r--r--include/uapi/linux/dvb/audio.h15
-rw-r--r--include/uapi/linux/dvb/ca.h15
-rw-r--r--include/uapi/linux/dvb/dmx.h15
-rw-r--r--include/uapi/linux/dvb/frontend.h87
-rw-r--r--include/uapi/linux/dvb/net.h15
-rw-r--r--include/uapi/linux/dvb/osd.h15
-rw-r--r--include/uapi/linux/dvb/version.h17
-rw-r--r--include/uapi/linux/dvb/video.h15
-rw-r--r--include/uapi/linux/dw100.h14
-rw-r--r--include/uapi/linux/elf-em.h1
-rw-r--r--include/uapi/linux/elf-fdpic.h15
-rw-r--r--include/uapi/linux/elf.h184
-rw-r--r--include/uapi/linux/energy_model.h62
-rw-r--r--include/uapi/linux/errqueue.h1
-rw-r--r--include/uapi/linux/ethtool.h647
-rw-r--r--include/uapi/linux/ethtool_netlink.h670
-rw-r--r--include/uapi/linux/ethtool_netlink_generated.h961
-rw-r--r--include/uapi/linux/eventfd.h11
-rw-r--r--include/uapi/linux/eventpoll.h31
-rw-r--r--include/uapi/linux/exfat.h25
-rw-r--r--include/uapi/linux/ext4.h170
-rw-r--r--include/uapi/linux/f2fs.h11
-rw-r--r--include/uapi/linux/falloc.h18
-rw-r--r--include/uapi/linux/fanotify.h107
-rw-r--r--include/uapi/linux/fb.h12
-rw-r--r--include/uapi/linux/fcntl.h131
-rw-r--r--include/uapi/linux/fib_rules.h10
-rw-r--r--include/uapi/linux/fiemap.h47
-rw-r--r--include/uapi/linux/firewire-cdev.h191
-rw-r--r--include/uapi/linux/fou.h57
-rw-r--r--include/uapi/linux/fs.h364
-rw-r--r--include/uapi/linux/fscrypt.h16
-rw-r--r--include/uapi/linux/fsi.h24
-rw-r--r--include/uapi/linux/fsmap.h2
-rw-r--r--include/uapi/linux/fuse.h353
-rw-r--r--include/uapi/linux/futex.h59
-rw-r--r--include/uapi/linux/genetlink.h5
-rw-r--r--include/uapi/linux/gpib.h104
-rw-r--r--include/uapi/linux/gpib_ioctl.h167
-rw-r--r--include/uapi/linux/gpio.h64
-rw-r--r--include/uapi/linux/gsmmux.h117
-rw-r--r--include/uapi/linux/gtp.h6
-rw-r--r--include/uapi/linux/handshake.h76
-rw-r--r--include/uapi/linux/hash_info.h3
-rw-r--r--include/uapi/linux/hid.h26
-rw-r--r--include/uapi/linux/hidraw.h3
-rw-r--r--include/uapi/linux/hsi/cs-protocol.h14
-rw-r--r--include/uapi/linux/hsi/hsi_char.h14
-rw-r--r--include/uapi/linux/hw_breakpoint.h10
-rw-r--r--include/uapi/linux/hyperv.h13
-rw-r--r--include/uapi/linux/i2c.h5
-rw-r--r--include/uapi/linux/i8k.h2
-rw-r--r--include/uapi/linux/icmp.h3
-rw-r--r--include/uapi/linux/icmpv6.h1
-rw-r--r--include/uapi/linux/idxd.h110
-rw-r--r--include/uapi/linux/if_addr.h13
-rw-r--r--include/uapi/linux/if_addrlabel.h4
-rw-r--r--include/uapi/linux/if_alg.h9
-rw-r--r--include/uapi/linux/if_arcnet.h12
-rw-r--r--include/uapi/linux/if_arp.h1
-rw-r--r--include/uapi/linux/if_bonding.h6
-rw-r--r--include/uapi/linux/if_bridge.h132
-rw-r--r--include/uapi/linux/if_cablemodem.h23
-rw-r--r--include/uapi/linux/if_ether.h14
-rw-r--r--include/uapi/linux/if_fc.h6
-rw-r--r--include/uapi/linux/if_hippi.h6
-rw-r--r--include/uapi/linux/if_link.h755
-rw-r--r--include/uapi/linux/if_macsec.h2
-rw-r--r--include/uapi/linux/if_packet.h14
-rw-r--r--include/uapi/linux/if_plip.h4
-rw-r--r--include/uapi/linux/if_pppox.h4
-rw-r--r--include/uapi/linux/if_slip.h4
-rw-r--r--include/uapi/linux/if_team.h117
-rw-r--r--include/uapi/linux/if_tun.h15
-rw-r--r--include/uapi/linux/if_tunnel.h40
-rw-r--r--include/uapi/linux/if_x25.h6
-rw-r--r--include/uapi/linux/if_xdp.h81
-rw-r--r--include/uapi/linux/igmp.h6
-rw-r--r--include/uapi/linux/iio/buffer.h22
-rw-r--r--include/uapi/linux/iio/types.h25
-rw-r--r--include/uapi/linux/in.h40
-rw-r--r--include/uapi/linux/in6.h5
-rw-r--r--include/uapi/linux/inet_diag.h4
-rw-r--r--include/uapi/linux/inotify.h6
-rw-r--r--include/uapi/linux/input-event-codes.h58
-rw-r--r--include/uapi/linux/input.h35
-rw-r--r--include/uapi/linux/io_uring.h797
-rw-r--r--include/uapi/linux/io_uring/mock_file.h47
-rw-r--r--include/uapi/linux/io_uring/query.h68
-rw-r--r--include/uapi/linux/ioam6.h133
-rw-r--r--include/uapi/linux/ioam6_genl.h72
-rw-r--r--include/uapi/linux/ioam6_iptunnel.h64
-rw-r--r--include/uapi/linux/iommu.h342
-rw-r--r--include/uapi/linux/iommufd.h1302
-rw-r--r--include/uapi/linux/ioprio.h127
-rw-r--r--include/uapi/linux/ip.h28
-rw-r--r--include/uapi/linux/ip6_tunnel.h4
-rw-r--r--include/uapi/linux/ip_vs.h4
-rw-r--r--include/uapi/linux/ipmi.h16
-rw-r--r--include/uapi/linux/ipmi_ssif_bmc.h18
-rw-r--r--include/uapi/linux/ipsec.h3
-rw-r--r--include/uapi/linux/ipv6.h16
-rw-r--r--include/uapi/linux/ipx.h87
-rw-r--r--include/uapi/linux/iso_fs.h4
-rw-r--r--include/uapi/linux/isst_if.h343
-rw-r--r--include/uapi/linux/ivtv.h2
-rw-r--r--include/uapi/linux/jffs2.h8
-rw-r--r--include/uapi/linux/kcov.h2
-rw-r--r--include/uapi/linux/kd.h10
-rw-r--r--include/uapi/linux/kernel-page-flags.h2
-rw-r--r--include/uapi/linux/kexec.h13
-rw-r--r--include/uapi/linux/kfd_ioctl.h1118
-rw-r--r--include/uapi/linux/kfd_sysfs.h128
-rw-r--r--include/uapi/linux/kvm.h1060
-rw-r--r--include/uapi/linux/kvm_para.h1
-rw-r--r--include/uapi/linux/l2tp.h2
-rw-r--r--include/uapi/linux/landlock.h274
-rw-r--r--include/uapi/linux/libc-compat.h36
-rw-r--r--include/uapi/linux/lightnvm.h224
-rw-r--r--include/uapi/linux/lirc.h22
-rw-r--r--include/uapi/linux/liveupdate.h216
-rw-r--r--include/uapi/linux/loadpin.h22
-rw-r--r--include/uapi/linux/lockd_netlink.h30
-rw-r--r--include/uapi/linux/loop.h11
-rw-r--r--include/uapi/linux/lsm.h93
-rw-r--r--include/uapi/linux/lwtunnel.h11
-rw-r--r--include/uapi/linux/magic.h15
-rw-r--r--include/uapi/linux/map_benchmark.h35
-rw-r--r--include/uapi/linux/map_to_14segment.h241
-rw-r--r--include/uapi/linux/mctp.h108
-rw-r--r--include/uapi/linux/mdio.h161
-rw-r--r--include/uapi/linux/media-bus-format.h31
-rw-r--r--include/uapi/linux/media.h30
-rw-r--r--include/uapi/linux/media/amlogic/c3-isp-config.h520
-rw-r--r--include/uapi/linux/media/arm/mali-c55-config.h794
-rw-r--r--include/uapi/linux/media/raspberrypi/pisp_be_config.h969
-rw-r--r--include/uapi/linux/media/raspberrypi/pisp_common.h202
-rw-r--r--include/uapi/linux/media/raspberrypi/pisp_fe_config.h273
-rw-r--r--include/uapi/linux/media/raspberrypi/pisp_fe_statistics.h64
-rw-r--r--include/uapi/linux/media/v4l2-isp.h102
-rw-r--r--include/uapi/linux/mei.h20
-rw-r--r--include/uapi/linux/mei_uuid.h29
-rw-r--r--include/uapi/linux/membarrier.h4
-rw-r--r--include/uapi/linux/memfd.h4
-rw-r--r--include/uapi/linux/mempolicy.h17
-rw-r--r--include/uapi/linux/meye.h65
-rw-r--r--include/uapi/linux/minix_fs.h4
-rw-r--r--include/uapi/linux/mman.h15
-rw-r--r--include/uapi/linux/mmc/ioctl.h2
-rw-r--r--include/uapi/linux/module.h1
-rw-r--r--include/uapi/linux/mount.h102
-rw-r--r--include/uapi/linux/mptcp.h235
-rw-r--r--include/uapi/linux/mptcp_pm.h153
-rw-r--r--include/uapi/linux/mroute6.h1
-rw-r--r--include/uapi/linux/mshv.h405
-rw-r--r--include/uapi/linux/n_r3964.h99
-rw-r--r--include/uapi/linux/nbd-netlink.h1
-rw-r--r--include/uapi/linux/nbd.h33
-rw-r--r--include/uapi/linux/ndctl.h12
-rw-r--r--include/uapi/linux/neighbour.h56
-rw-r--r--include/uapi/linux/net_dropmon.h16
-rw-r--r--include/uapi/linux/net_shaper.h96
-rw-r--r--include/uapi/linux/net_tstamp.h57
-rw-r--r--include/uapi/linux/netconf.h1
-rw-r--r--include/uapi/linux/netdev.h239
-rw-r--r--include/uapi/linux/netfilter.h3
-rw-r--r--include/uapi/linux/netfilter/ipset/ip_set.h6
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_common.h2
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_sctp.h2
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h143
-rw-r--r--include/uapi/linux/netfilter/nfnetlink.h3
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_conntrack.h3
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_cttimeout.h2
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_hook.h84
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_log.h2
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_queue.h6
-rw-r--r--include/uapi/linux/netfilter/x_tables.h4
-rw-r--r--include/uapi/linux/netfilter/xt_AUDIT.h4
-rw-r--r--include/uapi/linux/netfilter/xt_IDLETIMER.h17
-rw-r--r--include/uapi/linux/netfilter/xt_connmark.h13
-rw-r--r--include/uapi/linux/netfilter/xt_osf.h14
-rw-r--r--include/uapi/linux/netfilter_arp/arp_tables.h6
-rw-r--r--include/uapi/linux/netfilter_bridge/ebt_among.h2
-rw-r--r--include/uapi/linux/netfilter_bridge/ebtables.h22
-rw-r--r--include/uapi/linux/netfilter_decnet.h72
-rw-r--r--include/uapi/linux/netfilter_ipv4/ip_tables.h6
-rw-r--r--include/uapi/linux/netfilter_ipv6/ip6_tables.h4
-rw-r--r--include/uapi/linux/netfilter_ipv6/ip6t_LOG.h2
-rw-r--r--include/uapi/linux/netfilter_ipv6/ip6t_srh.h40
-rw-r--r--include/uapi/linux/netlink.h42
-rw-r--r--include/uapi/linux/netlink_diag.h4
-rw-r--r--include/uapi/linux/nexthop.h55
-rw-r--r--include/uapi/linux/nfc.h9
-rw-r--r--include/uapi/linux/nfs.h1
-rw-r--r--include/uapi/linux/nfs4.h12
-rw-r--r--include/uapi/linux/nfs_fs.h2
-rw-r--r--include/uapi/linux/nfsd/export.h13
-rw-r--r--include/uapi/linux/nfsd/nfsfh.h116
-rw-r--r--include/uapi/linux/nfsd_netlink.h97
-rw-r--r--include/uapi/linux/nilfs2_ondisk.h3
-rw-r--r--include/uapi/linux/nitro_enclaves.h10
-rw-r--r--include/uapi/linux/nl80211-vnd-intel.h105
-rw-r--r--include/uapi/linux/nl80211.h1567
-rw-r--r--include/uapi/linux/npcm-video.h41
-rw-r--r--include/uapi/linux/nsfs.h109
-rw-r--r--include/uapi/linux/nsm.h31
-rw-r--r--include/uapi/linux/ntsync.h59
-rw-r--r--include/uapi/linux/nvme_ioctl.h34
-rw-r--r--include/uapi/linux/omap3isp.h21
-rw-r--r--include/uapi/linux/openvswitch.h87
-rw-r--r--include/uapi/linux/ovpn.h110
-rw-r--r--include/uapi/linux/papr_pdsm.h165
-rw-r--r--include/uapi/linux/parport.h3
-rw-r--r--include/uapi/linux/pci_regs.h430
-rw-r--r--include/uapi/linux/pcitest.h13
-rw-r--r--include/uapi/linux/perf_event.h707
-rw-r--r--include/uapi/linux/pfkeyv2.h2
-rw-r--r--include/uapi/linux/pfrut.h263
-rw-r--r--include/uapi/linux/pidfd.h109
-rw-r--r--include/uapi/linux/pkt_cls.h151
-rw-r--r--include/uapi/linux/pkt_sched.h242
-rw-r--r--include/uapi/linux/pktcdvd.h12
-rw-r--r--include/uapi/linux/pps_gen.h37
-rw-r--r--include/uapi/linux/pr.h31
-rw-r--r--include/uapi/linux/prctl.h141
-rw-r--r--include/uapi/linux/psample.h11
-rw-r--r--include/uapi/linux/psci.h23
-rw-r--r--include/uapi/linux/psp-dbc.h147
-rw-r--r--include/uapi/linux/psp-sev.h176
-rw-r--r--include/uapi/linux/psp-sfs.h87
-rw-r--r--include/uapi/linux/psp.h85
-rw-r--r--include/uapi/linux/ptp_clock.h49
-rw-r--r--include/uapi/linux/ptrace.h39
-rw-r--r--include/uapi/linux/pwm.h53
-rw-r--r--include/uapi/linux/quota.h1
-rw-r--r--include/uapi/linux/raid/md_p.h15
-rw-r--r--include/uapi/linux/raid/md_u.h11
-rw-r--r--include/uapi/linux/random.h19
-rw-r--r--include/uapi/linux/raw.h17
-rw-r--r--include/uapi/linux/reiserfs_fs.h27
-rw-r--r--include/uapi/linux/reiserfs_xattr.h25
-rw-r--r--include/uapi/linux/resource.h15
-rw-r--r--include/uapi/linux/rfkill.h14
-rw-r--r--include/uapi/linux/rkisp1-config.h750
-rw-r--r--include/uapi/linux/romfs_fs.h4
-rw-r--r--include/uapi/linux/rpl.h4
-rw-r--r--include/uapi/linux/rpmsg.h20
-rw-r--r--include/uapi/linux/rseq.h63
-rw-r--r--include/uapi/linux/rtc.h32
-rw-r--r--include/uapi/linux/rtnetlink.h58
-rw-r--r--include/uapi/linux/rxrpc.h77
-rw-r--r--include/uapi/linux/sched.h1
-rw-r--r--include/uapi/linux/sched/types.h10
-rw-r--r--include/uapi/linux/sctp.h22
-rw-r--r--include/uapi/linux/seccomp.h7
-rw-r--r--include/uapi/linux/securebits.h24
-rw-r--r--include/uapi/linux/sed-opal.h72
-rw-r--r--include/uapi/linux/seg6.h2
-rw-r--r--include/uapi/linux/seg6_iptunnel.h4
-rw-r--r--include/uapi/linux/seg6_local.h26
-rw-r--r--include/uapi/linux/serial.h71
-rw-r--r--include/uapi/linux/serial_core.h78
-rw-r--r--include/uapi/linux/serial_reg.h11
-rw-r--r--include/uapi/linux/serio.h1
-rw-r--r--include/uapi/linux/sev-guest.h99
-rw-r--r--include/uapi/linux/signalfd.h4
-rw-r--r--include/uapi/linux/smc.h155
-rw-r--r--include/uapi/linux/smc_diag.h2
-rw-r--r--include/uapi/linux/snmp.h39
-rw-r--r--include/uapi/linux/socket.h9
-rw-r--r--include/uapi/linux/soundcard.h2
-rw-r--r--include/uapi/linux/spi/spi.h5
-rw-r--r--include/uapi/linux/stat.h106
-rw-r--r--include/uapi/linux/stddef.h75
-rw-r--r--include/uapi/linux/stm.h2
-rw-r--r--include/uapi/linux/surface_aggregator/cdev.h73
-rw-r--r--include/uapi/linux/swab.h8
-rw-r--r--include/uapi/linux/sync_file.h61
-rw-r--r--include/uapi/linux/sysctl.h39
-rw-r--r--include/uapi/linux/target_core_user.h6
-rw-r--r--include/uapi/linux/taskstats.h62
-rw-r--r--include/uapi/linux/tc_act/tc_bpf.h5
-rw-r--r--include/uapi/linux/tc_act/tc_ct.h3
-rw-r--r--include/uapi/linux/tc_act/tc_ipt.h20
-rw-r--r--include/uapi/linux/tc_act/tc_mirred.h1
-rw-r--r--include/uapi/linux/tc_act/tc_pedit.h2
-rw-r--r--include/uapi/linux/tc_act/tc_skbedit.h15
-rw-r--r--include/uapi/linux/tc_act/tc_skbmod.h8
-rw-r--r--include/uapi/linux/tc_act/tc_tunnel_key.h6
-rw-r--r--include/uapi/linux/tc_act/tc_vlan.h5
-rw-r--r--include/uapi/linux/tcp.h148
-rw-r--r--include/uapi/linux/tcp_metrics.h22
-rw-r--r--include/uapi/linux/tdx-guest.h42
-rw-r--r--include/uapi/linux/tee.h113
-rw-r--r--include/uapi/linux/thermal.h35
-rw-r--r--include/uapi/linux/thp7312.h19
-rw-r--r--include/uapi/linux/time.h11
-rw-r--r--include/uapi/linux/tiocl.h1
-rw-r--r--include/uapi/linux/tipc_config.h28
-rw-r--r--include/uapi/linux/tls.h66
-rw-r--r--include/uapi/linux/tps6594_pfsm.h37
-rw-r--r--include/uapi/linux/trace_mmap.h48
-rw-r--r--include/uapi/linux/tty.h8
-rw-r--r--include/uapi/linux/types.h14
-rw-r--r--include/uapi/linux/ublk_cmd.h627
-rw-r--r--include/uapi/linux/udp.h5
-rw-r--r--include/uapi/linux/uio.h18
-rw-r--r--include/uapi/linux/um_timetravel.h190
-rw-r--r--include/uapi/linux/usb/audio.h2
-rw-r--r--include/uapi/linux/usb/cdc.h31
-rw-r--r--include/uapi/linux/usb/ch11.h6
-rw-r--r--include/uapi/linux/usb/ch9.h60
-rw-r--r--include/uapi/linux/usb/functionfs.h152
-rw-r--r--include/uapi/linux/usb/g_hid.h40
-rw-r--r--include/uapi/linux/usb/g_uvc.h3
-rw-r--r--include/uapi/linux/usb/gadgetfs.h2
-rw-r--r--include/uapi/linux/usb/raw_gadget.h18
-rw-r--r--include/uapi/linux/usb/video.h93
-rw-r--r--include/uapi/linux/usbdevice_fs.h4
-rw-r--r--include/uapi/linux/usbip.h26
-rw-r--r--include/uapi/linux/user_events.h94
-rw-r--r--include/uapi/linux/userfaultfd.h99
-rw-r--r--include/uapi/linux/uuid.h43
-rw-r--r--include/uapi/linux/uvcvideo.h19
-rw-r--r--include/uapi/linux/v4l2-common.h39
-rw-r--r--include/uapi/linux/v4l2-controls.h1791
-rw-r--r--include/uapi/linux/v4l2-dv-timings.h11
-rw-r--r--include/uapi/linux/v4l2-mediabus.h22
-rw-r--r--include/uapi/linux/v4l2-subdev.h127
-rw-r--r--include/uapi/linux/vbox_vmmdev_types.h5
-rw-r--r--include/uapi/linux/vdpa.h41
-rw-r--r--include/uapi/linux/vduse.h353
-rw-r--r--include/uapi/linux/vesa.h18
-rw-r--r--include/uapi/linux/vfio.h970
-rw-r--r--include/uapi/linux/vfio_zdev.h7
-rw-r--r--include/uapi/linux/vhost.h134
-rw-r--r--include/uapi/linux/vhost_types.h51
-rw-r--r--include/uapi/linux/videodev2.h312
-rw-r--r--include/uapi/linux/virtio_9p.h2
-rw-r--r--include/uapi/linux/virtio_balloon.h16
-rw-r--r--include/uapi/linux/virtio_blk.h124
-rw-r--r--include/uapi/linux/virtio_bt.h9
-rw-r--r--include/uapi/linux/virtio_config.h30
-rw-r--r--include/uapi/linux/virtio_crypto.h83
-rw-r--r--include/uapi/linux/virtio_gpio.h72
-rw-r--r--include/uapi/linux/virtio_gpu.h22
-rw-r--r--include/uapi/linux/virtio_i2c.h47
-rw-r--r--include/uapi/linux/virtio_ids.h27
-rw-r--r--include/uapi/linux/virtio_iommu.h8
-rw-r--r--include/uapi/linux/virtio_mem.h11
-rw-r--r--include/uapi/linux/virtio_net.h245
-rw-r--r--include/uapi/linux/virtio_pci.h227
-rw-r--r--include/uapi/linux/virtio_pcidev.h65
-rw-r--r--include/uapi/linux/virtio_pmem.h7
-rw-r--r--include/uapi/linux/virtio_ring.h16
-rw-r--r--include/uapi/linux/virtio_rtc.h237
-rw-r--r--include/uapi/linux/virtio_scmi.h24
-rw-r--r--include/uapi/linux/virtio_snd.h154
-rw-r--r--include/uapi/linux/virtio_spi.h181
-rw-r--r--include/uapi/linux/virtio_vsock.h10
-rw-r--r--include/uapi/linux/vm_sockets.h34
-rw-r--r--include/uapi/linux/vmclock-abi.h182
-rw-r--r--include/uapi/linux/vmcore.h9
-rw-r--r--include/uapi/linux/vt.h55
-rw-r--r--include/uapi/linux/wireguard.h184
-rw-r--r--include/uapi/linux/wireless.h2
-rw-r--r--include/uapi/linux/wwan.h16
-rw-r--r--include/uapi/linux/xattr.h11
-rw-r--r--include/uapi/linux/xfrm.h60
-rw-r--r--include/uapi/linux/zorro_ids.h3
-rw-r--r--include/uapi/misc/amd-apml.h152
-rw-r--r--include/uapi/misc/cxl.h156
-rw-r--r--include/uapi/misc/fastrpc.h88
-rw-r--r--include/uapi/misc/habanalabs.h1160
-rw-r--r--include/uapi/misc/mrvl_cn10k_dpi.h39
-rw-r--r--include/uapi/misc/pvpanic.h7
-rw-r--r--include/uapi/misc/uacce/hisi_qm.h18
-rw-r--r--include/uapi/mtd/mtd-abi.h68
-rw-r--r--include/uapi/mtd/ubi-user.h43
-rw-r--r--include/uapi/rdma/bnxt_re-abi.h103
-rw-r--r--include/uapi/rdma/efa-abi.h50
-rw-r--r--include/uapi/rdma/erdma-abi.h49
-rw-r--r--include/uapi/rdma/hfi/hfi1_user.h2
-rw-r--r--include/uapi/rdma/hns-abi.h61
-rw-r--r--include/uapi/rdma/i40iw-abi.h107
-rw-r--r--include/uapi/rdma/ib_user_ioctl_cmds.h44
-rw-r--r--include/uapi/rdma/ib_user_ioctl_verbs.h9
-rw-r--r--include/uapi/rdma/ib_user_mad.h2
-rw-r--r--include/uapi/rdma/ib_user_sa.h14
-rw-r--r--include/uapi/rdma/ib_user_verbs.h151
-rw-r--r--include/uapi/rdma/ionic-abi.h115
-rw-r--r--include/uapi/rdma/irdma-abi.h134
-rw-r--r--include/uapi/rdma/mana-abi.h87
-rw-r--r--include/uapi/rdma/mlx5-abi.h28
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_cmds.h34
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_verbs.h8
-rw-r--r--include/uapi/rdma/rdma_netlink.h57
-rw-r--r--include/uapi/rdma/rdma_user_cm.h44
-rw-r--r--include/uapi/rdma/rdma_user_ioctl_cmds.h2
-rw-r--r--include/uapi/rdma/rdma_user_rxe.h36
-rw-r--r--include/uapi/rdma/siw-abi.h2
-rw-r--r--include/uapi/regulator/regulator.h90
-rw-r--r--include/uapi/scsi/cxlflash_ioctl.h276
-rw-r--r--include/uapi/scsi/fc/fc_els.h164
-rw-r--r--include/uapi/scsi/scsi_bsg_fc.h4
-rw-r--r--include/uapi/scsi/scsi_bsg_mpi3mr.h581
-rw-r--r--include/uapi/scsi/scsi_bsg_ufs.h129
-rw-r--r--include/uapi/scsi/scsi_netlink_fc.h7
-rw-r--r--include/uapi/sound/asequencer.h131
-rw-r--r--include/uapi/sound/asoc.h72
-rw-r--r--include/uapi/sound/asound.h195
-rw-r--r--include/uapi/sound/asound_fm.h15
-rw-r--r--include/uapi/sound/compress_offload.h118
-rw-r--r--include/uapi/sound/compress_params.h108
-rw-r--r--include/uapi/sound/emu10k1.h174
-rw-r--r--include/uapi/sound/fcp.h120
-rw-r--r--include/uapi/sound/firewire.h182
-rw-r--r--include/uapi/sound/hdsp.h14
-rw-r--r--include/uapi/sound/hdspm.h15
-rw-r--r--include/uapi/sound/intel/avs/tokens.h171
-rw-r--r--include/uapi/sound/sb16_csp.h15
-rw-r--r--include/uapi/sound/scarlett2.h54
-rw-r--r--include/uapi/sound/sfnt_info.h15
-rw-r--r--include/uapi/sound/skl-tplg-interface.h79
-rw-r--r--include/uapi/sound/snd_ar_tokens.h251
-rw-r--r--include/uapi/sound/snd_sst_tokens.h16
-rw-r--r--include/uapi/sound/sof/abi.h10
-rw-r--r--include/uapi/sound/sof/fw.h2
-rw-r--r--include/uapi/sound/sof/header.h59
-rw-r--r--include/uapi/sound/sof/tokens.h89
-rw-r--r--include/uapi/sound/tlv.h13
-rw-r--r--include/uapi/sound/usb_stream.h16
-rw-r--r--include/uapi/xen/evtchn.h9
-rw-r--r--include/uapi/xen/gntalloc.h5
-rw-r--r--include/uapi/xen/gntdev.h8
-rw-r--r--include/uapi/xen/privcmd.h39
-rw-r--r--include/ufs/ufs.h661
-rw-r--r--include/ufs/ufs_quirks.h112
-rw-r--r--include/ufs/ufshcd.h1487
-rw-r--r--include/ufs/ufshci.h624
-rw-r--r--include/ufs/unipro.h327
-rw-r--r--include/vdso/align.h15
-rw-r--r--include/vdso/auxclock.h13
-rw-r--r--include/vdso/bits.h1
-rw-r--r--include/vdso/cache.h15
-rw-r--r--include/vdso/datapage.h133
-rw-r--r--include/vdso/getrandom.h74
-rw-r--r--include/vdso/gettime.h24
-rw-r--r--include/vdso/helpers.h67
-rw-r--r--include/vdso/jiffies.h2
-rw-r--r--include/vdso/math64.h38
-rw-r--r--include/vdso/page.h31
-rw-r--r--include/vdso/unaligned.h15
-rw-r--r--include/video/cmdline.h16
-rw-r--r--include/video/da8xx-fb.h94
-rw-r--r--include/video/edid.h3
-rw-r--r--include/video/imx-ipu-image-convert.h32
-rw-r--r--include/video/imx-ipu-v3.h16
-rw-r--r--include/video/kyro.h12
-rw-r--r--include/video/mach64.h3
-rw-r--r--include/video/mmp_disp.h6
-rw-r--r--include/video/nomodeset.h8
-rw-r--r--include/video/of_display_timing.h2
-rw-r--r--include/video/omap-panel-data.h71
-rw-r--r--include/video/omapfb_dss.h11
-rw-r--r--include/video/pixel_format.h102
-rw-r--r--include/video/platform_lcd.h3
-rw-r--r--include/video/radeon.h2
-rw-r--r--include/video/samsung_fimd.h4
-rw-r--r--include/video/sisfb.h6
-rw-r--r--include/video/sticore.h406
-rw-r--r--include/video/uvesafb.h2
-rw-r--r--include/video/vga.h78
-rw-r--r--include/video/w100fb.h147
-rw-r--r--include/xen/acpi.h31
-rw-r--r--include/xen/arm/hypercall.h15
-rw-r--r--include/xen/arm/hypervisor.h12
-rw-r--r--include/xen/arm/page-coherent.h20
-rw-r--r--include/xen/arm/page.h4
-rw-r--r--include/xen/arm/xen-ops.h16
-rw-r--r--include/xen/balloon.h4
-rw-r--r--include/xen/events.h29
-rw-r--r--include/xen/grant_table.h42
-rw-r--r--include/xen/hvm.h2
-rw-r--r--include/xen/interface/callback.h19
-rw-r--r--include/xen/interface/elfnote.h112
-rw-r--r--include/xen/interface/event_channel.h2
-rw-r--r--include/xen/interface/features.h2
-rw-r--r--include/xen/interface/grant_table.h180
-rw-r--r--include/xen/interface/hvm/dm_op.h19
-rw-r--r--include/xen/interface/hvm/hvm_op.h39
-rw-r--r--include/xen/interface/hvm/hvm_vcpu.h19
-rw-r--r--include/xen/interface/hvm/ioreq.h51
-rw-r--r--include/xen/interface/hvm/params.h20
-rw-r--r--include/xen/interface/hvm/start_info.h19
-rw-r--r--include/xen/interface/io/9pfs.h19
-rw-r--r--include/xen/interface/io/blkif.h2
-rw-r--r--include/xen/interface/io/console.h2
-rw-r--r--include/xen/interface/io/displif.h21
-rw-r--r--include/xen/interface/io/fbif.h19
-rw-r--r--include/xen/interface/io/kbdif.h19
-rw-r--r--include/xen/interface/io/netif.h19
-rw-r--r--include/xen/interface/io/pciif.h19
-rw-r--r--include/xen/interface/io/protocols.h2
-rw-r--r--include/xen/interface/io/pvcalls.h2
-rw-r--r--include/xen/interface/io/ring.h270
-rw-r--r--include/xen/interface/io/sndif.h21
-rw-r--r--include/xen/interface/io/usbif.h405
-rw-r--r--include/xen/interface/io/vscsiif.h152
-rw-r--r--include/xen/interface/io/xenbus.h2
-rw-r--r--include/xen/interface/io/xs_wire.h39
-rw-r--r--include/xen/interface/memory.h2
-rw-r--r--include/xen/interface/nmi.h2
-rw-r--r--include/xen/interface/physdev.h37
-rw-r--r--include/xen/interface/platform.h22
-rw-r--r--include/xen/interface/sched.h19
-rw-r--r--include/xen/interface/vcpu.h19
-rw-r--r--include/xen/interface/version.h2
-rw-r--r--include/xen/interface/xen-mca.h3
-rw-r--r--include/xen/interface/xen.h22
-rw-r--r--include/xen/interface/xenpmu.h2
-rw-r--r--include/xen/mem-reservation.h4
-rw-r--r--include/xen/pci.h34
-rw-r--r--include/xen/swiotlb-xen.h2
-rw-r--r--include/xen/xen-ops.h55
-rw-r--r--include/xen/xen.h47
-rw-r--r--include/xen/xenbus.h25
-rw-r--r--include/xen/xenbus_dev.h2
4790 files changed, 449979 insertions, 134280 deletions
diff --git a/include/Kbuild b/include/Kbuild
new file mode 100644
index 000000000000..5e76a599e2dd
--- /dev/null
+++ b/include/Kbuild
@@ -0,0 +1 @@
+obj-$(CONFIG_DRM_HEADER_TEST) += drm/
diff --git a/include/acpi/acbuffer.h b/include/acpi/acbuffer.h
index 18197c16149f..cbc9aeabcd99 100644
--- a/include/acpi/acbuffer.h
+++ b/include/acpi/acbuffer.h
@@ -3,7 +3,7 @@
*
* Name: acbuffer.h - Support for buffers returned by ACPI predefined names
*
- * Copyright (C) 2000 - 2021, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -207,4 +207,14 @@ struct acpi_pld_info {
#define ACPI_PLD_GET_HORIZ_OFFSET(dword) ACPI_GET_BITS (dword, 16, ACPI_16BIT_MASK)
#define ACPI_PLD_SET_HORIZ_OFFSET(dword,value) ACPI_SET_BITS (dword, 16, ACPI_16BIT_MASK, value) /* Offset 128+16=144, Len 16 */
+/* Panel position defined in _PLD section of ACPI Specification 6.3 */
+
+#define ACPI_PLD_PANEL_TOP 0
+#define ACPI_PLD_PANEL_BOTTOM 1
+#define ACPI_PLD_PANEL_LEFT 2
+#define ACPI_PLD_PANEL_RIGHT 3
+#define ACPI_PLD_PANEL_FRONT 4
+#define ACPI_PLD_PANEL_BACK 5
+#define ACPI_PLD_PANEL_UNKNOWN 6
+
#endif /* ACBUFFER_H */
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index e92f84fa8c68..521d4bfa6ef0 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -3,7 +3,7 @@
*
* Name: acconfig.h - Global configuration constants
*
- * Copyright (C) 2000 - 2021, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -67,7 +67,6 @@
* General Purpose Events (GPEs)
* Global Lock
* ACPI PM timer
- * FACS table (Waking vectors and Global Lock)
*/
#ifndef ACPI_REDUCED_HARDWARE
#define ACPI_REDUCED_HARDWARE FALSE
@@ -188,6 +187,10 @@
#define ACPI_MAX_GSBUS_DATA_SIZE 255
#define ACPI_MAX_GSBUS_BUFFER_SIZE ACPI_SERIAL_HEADER_SIZE + ACPI_MAX_GSBUS_DATA_SIZE
+#define ACPI_PRM_INPUT_BUFFER_SIZE 26
+
+#define ACPI_FFH_INPUT_BUFFER_SIZE 256
+
/* _sx_d and _sx_w control methods */
#define ACPI_NUM_sx_d_METHODS 4
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index ea3b1c41bc79..a2db36d18419 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -3,7 +3,7 @@
*
* Name: acexcep.h - Exception codes returned by the ACPI subsystem
*
- * Copyright (C) 2000 - 2021, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -173,8 +173,10 @@ struct acpi_exception_info {
#define AE_AML_TARGET_TYPE EXCEP_AML (0x0023)
#define AE_AML_PROTOCOL EXCEP_AML (0x0024)
#define AE_AML_BUFFER_LENGTH EXCEP_AML (0x0025)
+#define AE_AML_TOO_FEW_ARGUMENTS EXCEP_AML (0x0026)
+#define AE_AML_TOO_MANY_ARGUMENTS EXCEP_AML (0x0027)
-#define AE_CODE_AML_MAX 0x0025
+#define AE_CODE_AML_MAX 0x0027
/*
* Internal exceptions used for control
@@ -353,7 +355,11 @@ static const struct acpi_exception_info acpi_gbl_exception_names_aml[] = {
"A target operand of an incorrect type was encountered"),
EXCEP_TXT("AE_AML_PROTOCOL", "Violation of a fixed ACPI protocol"),
EXCEP_TXT("AE_AML_BUFFER_LENGTH",
- "The length of the buffer is invalid/incorrect")
+ "The length of the buffer is invalid/incorrect"),
+ EXCEP_TXT("AE_AML_TOO_FEW_ARGUMENTS",
+ "There are fewer than expected method arguments"),
+ EXCEP_TXT("AE_AML_TOO_MANY_ARGUMENTS",
+ "There are too many arguments for this method")
};
static const struct acpi_exception_info acpi_gbl_exception_names_ctrl[] = {
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index a2bc381c7ce7..cb6a4dcc4e8e 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -3,7 +3,7 @@
*
* Name: acnames.h - Global names and strings
*
- * Copyright (C) 2000 - 2021, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -20,7 +20,9 @@
#define METHOD_NAME__CLS "_CLS"
#define METHOD_NAME__CRS "_CRS"
#define METHOD_NAME__DDN "_DDN"
+#define METHOD_NAME__DIS "_DIS"
#define METHOD_NAME__DMA "_DMA"
+#define METHOD_NAME__EVT "_EVT"
#define METHOD_NAME__HID "_HID"
#define METHOD_NAME__INI "_INI"
#define METHOD_NAME__PLD "_PLD"
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index 1b4c45815695..3584f33e352c 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -3,7 +3,7 @@
*
* Name: acoutput.h -- debug output
*
- * Copyright (C) 2000 - 2021, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -193,6 +193,7 @@
*/
#ifndef ACPI_NO_ERROR_MESSAGES
#define AE_INFO _acpi_module_name, __LINE__
+#define ACPI_ONCE(_fn, _plist) { static char _done; if (!_done) { _done = 1; _fn _plist; } }
/*
* Error reporting. Callers module and line number are inserted by AE_INFO,
@@ -201,8 +202,10 @@
*/
#define ACPI_INFO(plist) acpi_info plist
#define ACPI_WARNING(plist) acpi_warning plist
+#define ACPI_WARNING_ONCE(plist) ACPI_ONCE(acpi_warning, plist)
#define ACPI_EXCEPTION(plist) acpi_exception plist
#define ACPI_ERROR(plist) acpi_error plist
+#define ACPI_ERROR_ONCE(plist) ACPI_ONCE(acpi_error, plist)
#define ACPI_BIOS_WARNING(plist) acpi_bios_warning plist
#define ACPI_BIOS_EXCEPTION(plist) acpi_bios_exception plist
#define ACPI_BIOS_ERROR(plist) acpi_bios_error plist
@@ -214,8 +217,10 @@
#define ACPI_INFO(plist)
#define ACPI_WARNING(plist)
+#define ACPI_WARNING_ONCE(plist)
#define ACPI_EXCEPTION(plist)
#define ACPI_ERROR(plist)
+#define ACPI_ERROR_ONCE(plist)
#define ACPI_BIOS_WARNING(plist)
#define ACPI_BIOS_EXCEPTION(plist)
#define ACPI_BIOS_ERROR(plist)
@@ -415,7 +420,7 @@
/* Conditional execution */
#define ACPI_DEBUG_EXEC(a) a
-#define ACPI_DEBUG_ONLY_MEMBERS(a) a;
+#define ACPI_DEBUG_ONLY_MEMBERS(a) a
#define _VERBOSE_STRUCTURES
/* Various object display routines for debug */
diff --git a/include/acpi/acpi.h b/include/acpi/acpi.h
index 6f6282a862bc..92bf80937e5f 100644
--- a/include/acpi/acpi.h
+++ b/include/acpi/acpi.h
@@ -3,7 +3,7 @@
*
* Name: acpi.h - Master public include file used to interface to ACPICA
*
- * Copyright (C) 2000 - 2021, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 3a82faac5767..aad1a95e6863 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -9,14 +9,17 @@
#ifndef __ACPI_BUS_H__
#define __ACPI_BUS_H__
+#include <linux/completion.h>
+#include <linux/container_of.h>
#include <linux/device.h>
+#include <linux/kobject.h>
+#include <linux/mutex.h>
#include <linux/property.h>
+#include <linux/types.h>
-/* TBD: Make dynamic */
-#define ACPI_MAX_HANDLES 10
struct acpi_handle_list {
u32 count;
- acpi_handle handles[ACPI_MAX_HANDLES];
+ acpi_handle *handles;
};
/* acpi_utils.h */
@@ -27,18 +30,19 @@ acpi_status
acpi_evaluate_integer(acpi_handle handle,
acpi_string pathname,
struct acpi_object_list *arguments, unsigned long long *data);
-acpi_status
-acpi_evaluate_reference(acpi_handle handle,
- acpi_string pathname,
- struct acpi_object_list *arguments,
- struct acpi_handle_list *list);
+bool acpi_evaluate_reference(acpi_handle handle, acpi_string pathname,
+ struct acpi_object_list *arguments,
+ struct acpi_handle_list *list);
+bool acpi_handle_list_equal(struct acpi_handle_list *list1,
+ struct acpi_handle_list *list2);
+void acpi_handle_list_replace(struct acpi_handle_list *dst,
+ struct acpi_handle_list *src);
+void acpi_handle_list_free(struct acpi_handle_list *list);
+bool acpi_device_dep(acpi_handle target, acpi_handle match);
acpi_status
acpi_evaluate_ost(acpi_handle handle, u32 source_event, u32 status_code,
struct acpi_buffer *status_buf);
-acpi_status
-acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld);
-
bool acpi_has_method(acpi_handle handle, char *name);
acpi_status acpi_execute_simple_method(acpi_handle handle, char *method,
u64 arg);
@@ -52,6 +56,9 @@ bool acpi_dock_match(acpi_handle handle);
bool acpi_check_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 funcs);
union acpi_object *acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid,
u64 rev, u64 func, union acpi_object *argv4);
+#ifdef CONFIG_ACPI
+bool
+acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld);
static inline union acpi_object *
acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev,
@@ -68,6 +75,7 @@ acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev,
return obj;
}
+#endif
#define ACPI_INIT_DSM_ARGV4(cnt, eles) \
{ \
@@ -121,11 +129,12 @@ static inline struct acpi_hotplug_profile *to_acpi_hotplug_profile(
}
struct acpi_scan_handler {
- const struct acpi_device_id *ids;
struct list_head list_node;
+ const struct acpi_device_id *ids;
bool (*match)(const char *idstr, const struct acpi_device_id **matchid);
int (*attach)(struct acpi_device *dev, const struct acpi_device_id *id);
void (*detach)(struct acpi_device *dev);
+ void (*post_eject)(struct acpi_device *dev);
void (*bind)(struct device *phys_dev);
void (*unbind)(struct device *phys_dev);
struct acpi_hotplug_profile hotplug;
@@ -136,11 +145,15 @@ struct acpi_scan_handler {
* --------------------
*/
+typedef int (*acpi_hp_notify) (struct acpi_device *, u32);
+typedef void (*acpi_hp_uevent) (struct acpi_device *, u32);
+typedef void (*acpi_hp_fixup) (struct acpi_device *);
+
struct acpi_hotplug_context {
struct acpi_device *self;
- int (*notify)(struct acpi_device *, u32);
- void (*uevent)(struct acpi_device *, u32);
- void (*fixup)(struct acpi_device *);
+ acpi_hp_notify notify;
+ acpi_hp_uevent uevent;
+ acpi_hp_fixup fixup;
};
/*
@@ -149,7 +162,7 @@ struct acpi_hotplug_context {
*/
typedef int (*acpi_op_add) (struct acpi_device * device);
-typedef int (*acpi_op_remove) (struct acpi_device * device);
+typedef void (*acpi_op_remove) (struct acpi_device *device);
typedef void (*acpi_op_notify) (struct acpi_device * device, u32 event);
struct acpi_device_ops {
@@ -167,7 +180,6 @@ struct acpi_driver {
unsigned int flags;
struct acpi_device_ops ops;
struct device_driver drv;
- struct module *owner;
};
/*
@@ -202,7 +214,8 @@ struct acpi_device_flags {
u32 coherent_dma:1;
u32 cca_seen:1;
u32 enumeration_by_parent:1;
- u32 reserved:19;
+ u32 honor_deps:1;
+ u32 reserved:18;
};
/* File System */
@@ -215,10 +228,12 @@ struct acpi_device_dir {
/* Plug and Play */
+#define MAX_ACPI_DEVICE_NAME_LEN 40
+#define MAX_ACPI_CLASS_NAME_LEN 20
typedef char acpi_bus_id[8];
typedef u64 acpi_bus_address;
-typedef char acpi_device_name[40];
-typedef char acpi_device_class[20];
+typedef char acpi_device_name[MAX_ACPI_DEVICE_NAME_LEN];
+typedef char acpi_device_class[MAX_ACPI_CLASS_NAME_LEN];
struct acpi_hardware_id {
struct list_head list;
@@ -229,7 +244,8 @@ struct acpi_pnp_type {
u32 hardware_id:1;
u32 bus_address:1;
u32 platform_id:1;
- u32 reserved:29;
+ u32 backlight:1;
+ u32 reserved:28;
};
struct acpi_device_pnp {
@@ -241,7 +257,6 @@ struct acpi_device_pnp {
struct list_head ids; /* _HID and _CIDs */
acpi_device_name device_name; /* Driver-determined */
acpi_device_class device_class; /* " */
- union acpi_object *str_obj; /* unicode string for _STR method */
};
#define acpi_device_bid(d) ((d)->pnp.bus_id)
@@ -264,6 +279,7 @@ struct acpi_device_power_flags {
};
struct acpi_device_power_state {
+ struct list_head resources; /* Power resources referenced */
struct {
u8 valid:1;
u8 explicit_set:1; /* _PSx present? */
@@ -271,13 +287,22 @@ struct acpi_device_power_state {
} flags;
int power; /* % Power (compared to D0) */
int latency; /* Dx->D0 time (microseconds) */
- struct list_head resources; /* Power resources referenced */
};
struct acpi_device_power {
int state; /* Current state */
struct acpi_device_power_flags flags;
struct acpi_device_power_state states[ACPI_D_STATE_COUNT]; /* Power states (D0-D3Cold) */
+ u8 state_for_enumeration; /* Deepest power state for enumeration */
+};
+
+struct acpi_dep_data {
+ struct list_head node;
+ acpi_handle supplier;
+ acpi_handle consumer;
+ bool honor_dep;
+ bool met;
+ bool free_when_met;
};
/* Performance Management */
@@ -327,16 +352,17 @@ struct acpi_device_wakeup {
};
struct acpi_device_physical_node {
- unsigned int node_id;
struct list_head node;
struct device *dev;
+ unsigned int node_id;
bool put_online:1;
};
struct acpi_device_properties {
- const guid_t *guid;
- const union acpi_object *properties;
struct list_head list;
+ const guid_t *guid;
+ union acpi_object *properties;
+ void **bufs;
};
/* ACPI Device Specific Data (_DSD) */
@@ -349,14 +375,104 @@ struct acpi_device_data {
struct acpi_gpio_mapping;
+#define ACPI_DEVICE_SWNODE_ROOT 0
+
+/*
+ * The maximum expected number of CSI-2 data lanes.
+ *
+ * This number is not expected to ever have to be equal to or greater than the
+ * number of bits in an unsigned long variable, but if it needs to be increased
+ * above that limit, code will need to be adjusted accordingly.
+ */
+#define ACPI_DEVICE_CSI2_DATA_LANES 8
+
+#define ACPI_DEVICE_SWNODE_PORT_NAME_LENGTH 8
+
+enum acpi_device_swnode_dev_props {
+ ACPI_DEVICE_SWNODE_DEV_ROTATION,
+ ACPI_DEVICE_SWNODE_DEV_CLOCK_FREQUENCY,
+ ACPI_DEVICE_SWNODE_DEV_LED_MAX_MICROAMP,
+ ACPI_DEVICE_SWNODE_DEV_FLASH_MAX_MICROAMP,
+ ACPI_DEVICE_SWNODE_DEV_FLASH_MAX_TIMEOUT_US,
+ ACPI_DEVICE_SWNODE_DEV_NUM_OF,
+ ACPI_DEVICE_SWNODE_DEV_NUM_ENTRIES
+};
+
+enum acpi_device_swnode_port_props {
+ ACPI_DEVICE_SWNODE_PORT_REG,
+ ACPI_DEVICE_SWNODE_PORT_NUM_OF,
+ ACPI_DEVICE_SWNODE_PORT_NUM_ENTRIES
+};
+
+enum acpi_device_swnode_ep_props {
+ ACPI_DEVICE_SWNODE_EP_REMOTE_EP,
+ ACPI_DEVICE_SWNODE_EP_BUS_TYPE,
+ ACPI_DEVICE_SWNODE_EP_REG,
+ ACPI_DEVICE_SWNODE_EP_CLOCK_LANES,
+ ACPI_DEVICE_SWNODE_EP_DATA_LANES,
+ ACPI_DEVICE_SWNODE_EP_LANE_POLARITIES,
+ /* TX only */
+ ACPI_DEVICE_SWNODE_EP_LINK_FREQUENCIES,
+ ACPI_DEVICE_SWNODE_EP_NUM_OF,
+ ACPI_DEVICE_SWNODE_EP_NUM_ENTRIES
+};
+
+/*
+ * Each device has a root software node plus two times as many nodes as the
+ * number of CSI-2 ports.
+ */
+#define ACPI_DEVICE_SWNODE_PORT(port) (2 * (port) + 1)
+#define ACPI_DEVICE_SWNODE_EP(endpoint) \
+ (ACPI_DEVICE_SWNODE_PORT(endpoint) + 1)
+
+/**
+ * struct acpi_device_software_node_port - MIPI DisCo for Imaging CSI-2 port
+ * @port_name: Port name.
+ * @data_lanes: "data-lanes" property values.
+ * @lane_polarities: "lane-polarities" property values.
+ * @link_frequencies: "link_frequencies" property values.
+ * @port_nr: Port number.
+ * @crs_crs2_local: _CRS CSI2 record present (i.e. this is a transmitter one).
+ * @port_props: Port properties.
+ * @ep_props: Endpoint properties.
+ * @remote_ep: Reference to the remote endpoint.
+ */
+struct acpi_device_software_node_port {
+ char port_name[ACPI_DEVICE_SWNODE_PORT_NAME_LENGTH + 1];
+ u32 data_lanes[ACPI_DEVICE_CSI2_DATA_LANES];
+ u32 lane_polarities[ACPI_DEVICE_CSI2_DATA_LANES + 1 /* clock lane */];
+ u64 link_frequencies[ACPI_DEVICE_CSI2_DATA_LANES];
+ unsigned int port_nr;
+ bool crs_csi2_local;
+
+ struct property_entry port_props[ACPI_DEVICE_SWNODE_PORT_NUM_ENTRIES];
+ struct property_entry ep_props[ACPI_DEVICE_SWNODE_EP_NUM_ENTRIES];
+
+ struct software_node_ref_args remote_ep[1];
+};
+
+/**
+ * struct acpi_device_software_nodes - Software nodes for an ACPI device
+ * @dev_props: Device properties.
+ * @nodes: Software nodes for root as well as ports and endpoints.
+ * @nodeprts: Array of software node pointers, for (un)registering them.
+ * @ports: Information related to each port and endpoint within a port.
+ * @num_ports: The number of ports.
+ */
+struct acpi_device_software_nodes {
+ struct property_entry dev_props[ACPI_DEVICE_SWNODE_DEV_NUM_ENTRIES];
+ struct software_node *nodes;
+ const struct software_node **nodeptrs;
+ struct acpi_device_software_node_port *ports;
+ unsigned int num_ports;
+};
+
/* Device */
struct acpi_device {
+ u32 pld_crc;
int device_type;
acpi_handle handle; /* no handle for fixed hardware */
struct fwnode_handle fwnode;
- struct acpi_device *parent;
- struct list_head children;
- struct list_head node;
struct list_head wakeup_list;
struct list_head del_list;
struct acpi_device_status status;
@@ -369,7 +485,7 @@ struct acpi_device {
struct acpi_device_data data;
struct acpi_scan_handler *handler;
struct acpi_hotplug_context *hp;
- struct acpi_driver *driver;
+ struct acpi_device_software_nodes *swnodes;
const struct acpi_gpio_mapping *driver_gpios;
void *driver_data;
struct device dev;
@@ -382,12 +498,12 @@ struct acpi_device {
/* Non-device subnode */
struct acpi_data_node {
+ struct list_head sibling;
const char *name;
acpi_handle handle;
struct fwnode_handle fwnode;
struct fwnode_handle *parent;
struct acpi_device_data data;
- struct list_head sibling;
struct kobject kobj;
struct completion kobj_done;
};
@@ -448,7 +564,15 @@ static inline void *acpi_driver_data(struct acpi_device *d)
}
#define to_acpi_device(d) container_of(d, struct acpi_device, dev)
-#define to_acpi_driver(d) container_of(d, struct acpi_driver, drv)
+#define to_acpi_driver(d) container_of_const(d, struct acpi_driver, drv)
+
+static inline struct acpi_device *acpi_dev_parent(struct acpi_device *adev)
+{
+ if (adev->dev.parent)
+ return to_acpi_device(adev->dev.parent);
+
+ return NULL;
+}
static inline void acpi_set_device_status(struct acpi_device *adev, u32 sta)
{
@@ -464,11 +588,17 @@ static inline void acpi_set_hp_context(struct acpi_device *adev,
void acpi_initialize_hp_context(struct acpi_device *adev,
struct acpi_hotplug_context *hp,
- int (*notify)(struct acpi_device *, u32),
- void (*uevent)(struct acpi_device *, u32));
+ acpi_hp_notify notify, acpi_hp_uevent uevent);
/* acpi_device.dev.bus == &acpi_bus_type */
-extern struct bus_type acpi_bus_type;
+extern const struct bus_type acpi_bus_type;
+
+int acpi_bus_for_each_dev(int (*fn)(struct device *, void *), void *data);
+int acpi_dev_for_each_child(struct acpi_device *adev,
+ int (*fn)(struct acpi_device *, void *), void *data);
+int acpi_dev_for_each_child_reverse(struct acpi_device *adev,
+ int (*fn)(struct acpi_device *, void *),
+ void *data);
/*
* Events
@@ -489,6 +619,12 @@ void acpi_bus_private_data_handler(acpi_handle, void *);
int acpi_bus_get_private_data(acpi_handle, void **);
int acpi_bus_attach_private_data(acpi_handle, void *);
void acpi_bus_detach_private_data(acpi_handle);
+int acpi_dev_install_notify_handler(struct acpi_device *adev,
+ u32 handler_type,
+ acpi_notify_handler handler, void *context);
+void acpi_dev_remove_notify_handler(struct acpi_device *adev,
+ u32 handler_type,
+ acpi_notify_handler handler);
extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32);
extern int register_acpi_notifier(struct notifier_block *);
extern int unregister_acpi_notifier(struct notifier_block *);
@@ -497,9 +633,6 @@ extern int unregister_acpi_notifier(struct notifier_block *);
* External Functions
*/
-int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device);
-struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle);
-void acpi_bus_put_acpi_device(struct acpi_device *adev);
acpi_status acpi_bus_get_status_handle(acpi_handle handle,
unsigned long long *sta);
int acpi_bus_get_status(struct acpi_device *device);
@@ -509,9 +642,13 @@ const char *acpi_power_state_string(int state);
int acpi_device_set_power(struct acpi_device *device, int state);
int acpi_bus_init_power(struct acpi_device *device);
int acpi_device_fix_up_power(struct acpi_device *device);
+void acpi_device_fix_up_power_extended(struct acpi_device *adev);
+void acpi_device_fix_up_power_children(struct acpi_device *adev);
int acpi_bus_update_power(acpi_handle handle, int *state_p);
int acpi_device_update_power(struct acpi_device *device, int *state_p);
bool acpi_bus_power_manageable(acpi_handle handle);
+void acpi_dev_power_up_children_with_adr(struct acpi_device *adev);
+u8 acpi_dev_power_state_for_wake(struct acpi_device *adev);
int acpi_device_power_add_dependent(struct acpi_device *adev,
struct device *dev);
void acpi_device_power_remove_dependent(struct acpi_device *adev,
@@ -528,7 +665,12 @@ void acpi_scan_lock_release(void);
void acpi_lock_hp_context(void);
void acpi_unlock_hp_context(void);
int acpi_scan_add_handler(struct acpi_scan_handler *handler);
-int acpi_bus_register_driver(struct acpi_driver *driver);
+/*
+ * use a macro to avoid include chaining to get THIS_MODULE
+ */
+#define acpi_bus_register_driver(drv) \
+ __acpi_bus_register_driver(drv, THIS_MODULE)
+int __acpi_bus_register_driver(struct acpi_driver *driver, struct module *owner);
void acpi_bus_unregister_driver(struct acpi_driver *driver);
int acpi_bus_scan(acpi_handle handle);
void acpi_bus_trim(struct acpi_device *start);
@@ -537,8 +679,6 @@ int acpi_match_device_ids(struct acpi_device *device,
const struct acpi_device_id *ids);
void acpi_set_modalias(struct acpi_device *adev, const char *default_id,
char *modalias, size_t len);
-int acpi_create_dir(struct acpi_device *);
-void acpi_remove_dir(struct acpi_device *);
static inline bool acpi_device_enumerated(struct acpi_device *adev)
{
@@ -566,30 +706,40 @@ struct acpi_bus_type {
bool (*match)(struct device *dev);
struct acpi_device * (*find_companion)(struct device *);
void (*setup)(struct device *);
- void (*cleanup)(struct device *);
};
int register_acpi_bus_type(struct acpi_bus_type *);
int unregister_acpi_bus_type(struct acpi_bus_type *);
int acpi_bind_one(struct device *dev, struct acpi_device *adev);
int acpi_unbind_one(struct device *dev);
+enum acpi_bridge_type {
+ ACPI_BRIDGE_TYPE_PCIE = 1,
+ ACPI_BRIDGE_TYPE_CXL,
+};
+
struct acpi_pci_root {
struct acpi_device * device;
struct pci_bus *bus;
u16 segment;
+ int bridge_type;
struct resource secondary; /* downstream bus range */
- u32 osc_support_set; /* _OSC state of support bits */
- u32 osc_control_set; /* _OSC state of control bits */
+ u32 osc_support_set; /* _OSC state of support bits */
+ u32 osc_control_set; /* _OSC state of control bits */
+ u32 osc_ext_support_set; /* _OSC state of extended support bits */
+ u32 osc_ext_control_set; /* _OSC state of extended control bits */
phys_addr_t mcfg_addr;
};
/* helper */
-bool acpi_dma_supported(struct acpi_device *adev);
+struct iommu_ops;
+
+bool acpi_dma_supported(const struct acpi_device *adev);
enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev);
-int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset,
- u64 *size);
+int acpi_iommu_fwspec_init(struct device *dev, u32 id,
+ struct fwnode_handle *fwnode);
+int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map);
int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr,
const u32 *input_id);
static inline int acpi_dma_configure(struct device *dev,
@@ -599,6 +749,8 @@ static inline int acpi_dma_configure(struct device *dev,
}
struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
u64 address, bool check_children);
+struct acpi_device *acpi_find_child_by_adr(struct acpi_device *adev,
+ acpi_bus_address adr);
int acpi_is_root_bridge(acpi_handle);
struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle);
@@ -606,9 +758,45 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int state);
int acpi_disable_wakeup_device_power(struct acpi_device *dev);
#ifdef CONFIG_X86
-bool acpi_device_always_present(struct acpi_device *adev);
+bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *status);
+bool acpi_quirk_skip_acpi_ac_and_battery(void);
+int acpi_install_cmos_rtc_space_handler(acpi_handle handle);
+void acpi_remove_cmos_rtc_space_handler(acpi_handle handle);
+int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip);
#else
-static inline bool acpi_device_always_present(struct acpi_device *adev)
+static inline bool acpi_device_override_status(struct acpi_device *adev,
+ unsigned long long *status)
+{
+ return false;
+}
+static inline bool acpi_quirk_skip_acpi_ac_and_battery(void)
+{
+ return false;
+}
+static inline int acpi_install_cmos_rtc_space_handler(acpi_handle handle)
+{
+ return 1;
+}
+static inline void acpi_remove_cmos_rtc_space_handler(acpi_handle handle)
+{
+}
+static inline int
+acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip)
+{
+ *skip = false;
+ return 0;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS)
+bool acpi_quirk_skip_i2c_client_enumeration(struct acpi_device *adev);
+bool acpi_quirk_skip_gpio_event_handlers(void);
+#else
+static inline bool acpi_quirk_skip_i2c_client_enumeration(struct acpi_device *adev)
+{
+ return false;
+}
+static inline bool acpi_quirk_skip_gpio_event_handlers(void)
{
return false;
}
@@ -683,7 +871,88 @@ static inline bool acpi_device_can_poweroff(struct acpi_device *adev)
adev->power.states[ACPI_STATE_D3_HOT].flags.explicit_set);
}
-bool acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2);
+int acpi_dev_uid_to_integer(struct acpi_device *adev, u64 *integer);
+
+static inline bool acpi_dev_hid_match(struct acpi_device *adev, const char *hid2)
+{
+ const char *hid1 = acpi_device_hid(adev);
+
+ return hid1 && hid2 && !strcmp(hid1, hid2);
+}
+
+static inline bool acpi_str_uid_match(struct acpi_device *adev, const char *uid2)
+{
+ const char *uid1 = acpi_device_uid(adev);
+
+ return uid1 && uid2 && !strcmp(uid1, uid2);
+}
+
+static inline bool acpi_int_uid_match(struct acpi_device *adev, u64 uid2)
+{
+ u64 uid1;
+
+ return !acpi_dev_uid_to_integer(adev, &uid1) && uid1 == uid2;
+}
+
+#define TYPE_ENTRY(type, x) \
+ const type: x, \
+ type: x
+
+#define ACPI_STR_TYPES(match) \
+ TYPE_ENTRY(unsigned char *, match), \
+ TYPE_ENTRY(signed char *, match), \
+ TYPE_ENTRY(char *, match), \
+ TYPE_ENTRY(void *, match)
+
+/**
+ * acpi_dev_uid_match - Match device by supplied UID
+ * @adev: ACPI device to match.
+ * @uid2: Unique ID of the device.
+ *
+ * Matches UID in @adev with given @uid2.
+ *
+ * Returns: %true if matches, %false otherwise.
+ */
+#define acpi_dev_uid_match(adev, uid2) \
+ _Generic(uid2, \
+ /* Treat @uid2 as a string for acpi string types */ \
+ ACPI_STR_TYPES(acpi_str_uid_match), \
+ /* Treat as an integer otherwise */ \
+ default: acpi_int_uid_match)(adev, uid2)
+
+/**
+ * acpi_dev_hid_uid_match - Match device by supplied HID and UID
+ * @adev: ACPI device to match.
+ * @hid2: Hardware ID of the device.
+ * @uid2: Unique ID of the device, pass NULL to not check _UID.
+ *
+ * Matches HID and UID in @adev with given @hid2 and @uid2. Absence of @uid2
+ * will be treated as a match. If user wants to validate @uid2, it should be
+ * done before calling this function.
+ *
+ * Returns: %true if matches or @uid2 is NULL, %false otherwise.
+ */
+#define acpi_dev_hid_uid_match(adev, hid2, uid2) \
+ (acpi_dev_hid_match(adev, hid2) && \
+ /* Distinguish integer 0 from NULL @uid2 */ \
+ (_Generic(uid2, ACPI_STR_TYPES(!(uid2)), default: 0) || \
+ acpi_dev_uid_match(adev, uid2)))
+
+void acpi_dev_clear_dependencies(struct acpi_device *supplier);
+bool acpi_dev_ready_for_enumeration(const struct acpi_device *device);
+struct acpi_device *acpi_dev_get_next_consumer_dev(struct acpi_device *supplier,
+ struct acpi_device *start);
+
+/**
+ * for_each_acpi_consumer_dev - iterate over the consumer ACPI devices for a
+ * given supplier
+ * @supplier: Pointer to the supplier's ACPI device
+ * @consumer: Pointer to &struct acpi_device to hold the consumer, initially NULL
+ */
+#define for_each_acpi_consumer_dev(supplier, consumer) \
+ for (consumer = acpi_dev_get_next_consumer_dev(supplier, NULL); \
+ consumer; \
+ consumer = acpi_dev_get_next_consumer_dev(supplier, consumer))
struct acpi_device *
acpi_dev_get_next_match_dev(struct acpi_device *adev, const char *hid, const char *uid, s64 hrv);
@@ -698,11 +967,6 @@ acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv);
* @hrv: Hardware Revision of the device, pass -1 to not check _HRV
*
* The caller is responsible for invoking acpi_dev_put() on the returned device.
- *
- * FIXME: Due to above requirement there is a window that may invalidate @adev
- * and next iteration will use a dangling pointer, e.g. in the case of a
- * hotplug event. That said, the caller should ensure that this will never
- * happen.
*/
#define for_each_acpi_dev_match(adev, hid, uid, hrv) \
for (adev = acpi_dev_get_first_match_dev(hid, uid, hrv); \
@@ -716,13 +980,46 @@ static inline struct acpi_device *acpi_dev_get(struct acpi_device *adev)
static inline void acpi_dev_put(struct acpi_device *adev)
{
- put_device(&adev->dev);
+ if (adev)
+ put_device(&adev->dev);
}
+
+struct acpi_device *acpi_fetch_acpi_dev(acpi_handle handle);
+struct acpi_device *acpi_get_acpi_dev(acpi_handle handle);
+
+static inline void acpi_put_acpi_dev(struct acpi_device *adev)
+{
+ acpi_dev_put(adev);
+}
+
+int acpi_wait_for_acpi_ipmi(void);
+
+int acpi_scan_add_dep(acpi_handle handle, struct acpi_handle_list *dep_devices);
+u32 arch_acpi_add_auto_dep(acpi_handle handle);
#else /* CONFIG_ACPI */
static inline int register_acpi_bus_type(void *bus) { return 0; }
static inline int unregister_acpi_bus_type(void *bus) { return 0; }
+static inline int acpi_wait_for_acpi_ipmi(void) { return 0; }
+
+static inline const char *acpi_device_hid(struct acpi_device *device)
+{
+ return "";
+}
+
+static inline bool
+acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld)
+{
+ return false;
+}
+
+#define for_each_acpi_consumer_dev(supplier, consumer) \
+ for (consumer = NULL; false && (supplier);)
+
+#define for_each_acpi_dev_match(adev, hid, uid, hrv) \
+ for (adev = NULL; false && (hid) && (uid) && (hrv); )
+
#endif /* CONFIG_ACPI */
#endif /*__ACPI_BUS_H__*/
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index 8372b0e7fd15..b14d165632e7 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -27,6 +27,8 @@
#define ACPI_BAY_HID "LNXIOBAY"
#define ACPI_DOCK_HID "LNXDOCK"
#define ACPI_ECDT_HID "LNXEC"
+/* SMBUS HID definition as supported by Microsoft Windows */
+#define ACPI_SMBUS_MS_HID "SMB0001"
/* Quirk for broken IBM BIOSes */
#define ACPI_SMBUS_IBM_HID "SMBUSIBM"
diff --git a/include/acpi/acpi_numa.h b/include/acpi/acpi_numa.h
index 40a91ce87e04..99b960bd473c 100644
--- a/include/acpi/acpi_numa.h
+++ b/include/acpi/acpi_numa.h
@@ -3,7 +3,6 @@
#define __ACPI_NUMA_H
#ifdef CONFIG_ACPI_NUMA
-#include <linux/kernel.h>
#include <linux/numa.h>
/* Proximity bitmap length */
@@ -18,11 +17,16 @@ extern int node_to_pxm(int);
extern int acpi_map_pxm_to_node(int);
extern unsigned char acpi_srat_revision;
extern void disable_srat(void);
+extern int fix_pxm_node_maps(int max_nid);
extern void bad_srat(void);
extern int srat_disabled(void);
#else /* CONFIG_ACPI_NUMA */
+static inline int fix_pxm_node_maps(int max_nid)
+{
+ return 0;
+}
static inline void disable_srat(void)
{
}
@@ -43,4 +47,4 @@ static inline void disable_hmat(void)
{
}
#endif /* CONFIG_ACPI_HMAT */
-#endif /* __ACP_NUMA_H */
+#endif /* __ACPI_NUMA_H */
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 690c369b717a..65c5737b6286 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -5,7 +5,7 @@
* interfaces must be implemented by OSL to interface the
* ACPI components to the host operating system.
*
- * Copyright (C) 2000 - 2021, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index f8d44b06f3e3..e65a2afe9250 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -3,7 +3,7 @@
*
* Name: acpixf.h - External interfaces to the ACPI subsystem
*
- * Copyright (C) 2000 - 2021, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -12,7 +12,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20210331
+#define ACPI_CA_VERSION 0x20250807
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -214,6 +214,12 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_osi_data, 0);
ACPI_INIT_GLOBAL(u8, acpi_gbl_reduced_hardware, FALSE);
/*
+ * ACPI Global Lock is mainly used for systems with SMM, so no-SMM systems
+ * (such as loong_arch) may not have and not use Global Lock.
+ */
+ACPI_INIT_GLOBAL(u8, acpi_gbl_use_global_lock, TRUE);
+
+/*
* Maximum timeout for While() loop iterations before forced method abort.
* This mechanism is intended to prevent infinite loops during interpreter
* execution within a host kernel.
@@ -454,9 +460,11 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
* ACPI table load/unload interfaces
*/
ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION
- acpi_install_table(acpi_physical_address address,
- u8 physical))
+ acpi_install_table(struct acpi_table_header *table))
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION
+ acpi_install_physical_table(acpi_physical_address
+ address))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_load_table(struct acpi_table_header *table,
u32 *table_idx))
@@ -524,7 +532,7 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
struct acpi_buffer *ret_path_ptr))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_get_handle(acpi_handle parent,
- acpi_string pathname,
+ const char *pathname,
acpi_handle *ret_handle))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_attach_data(acpi_handle object,
@@ -587,82 +595,93 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_install_initialization_handler
(acpi_init_handler handler, u32 function))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
- acpi_install_sci_handler(acpi_sci_handler
- address,
- void *context))
-ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
- acpi_remove_sci_handler(acpi_sci_handler
- address))
+ acpi_install_sci_handler(acpi_sci_handler
+ address,
+ void *context))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
- acpi_install_global_event_handler
- (acpi_gbl_event_handler handler,
- void *context))
+ acpi_remove_sci_handler(acpi_sci_handler
+ address))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
- acpi_install_fixed_event_handler(u32
- acpi_event,
- acpi_event_handler
- handler,
- void
- *context))
+ acpi_install_global_event_handler
+ (acpi_gbl_event_handler handler,
+ void *context))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
- acpi_remove_fixed_event_handler(u32 acpi_event,
+ acpi_install_fixed_event_handler(u32
+ acpi_event,
acpi_event_handler
- handler))
-ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
- acpi_install_gpe_handler(acpi_handle
- gpe_device,
- u32 gpe_number,
- u32 type,
- acpi_gpe_handler
- address,
- void *context))
+ handler,
+ void
+ *context))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
- acpi_install_gpe_raw_handler(acpi_handle
- gpe_device,
- u32 gpe_number,
- u32 type,
- acpi_gpe_handler
- address,
- void *context))
+ acpi_remove_fixed_event_handler(u32 acpi_event,
+ acpi_event_handler
+ handler))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
- acpi_remove_gpe_handler(acpi_handle gpe_device,
+ acpi_install_gpe_handler(acpi_handle
+ gpe_device,
u32 gpe_number,
+ u32 type,
acpi_gpe_handler
- address))
-ACPI_EXTERNAL_RETURN_STATUS(acpi_status
- acpi_install_notify_handler(acpi_handle device,
- u32 handler_type,
- acpi_notify_handler
- handler,
+ address,
void *context))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_install_gpe_raw_handler(acpi_handle
+ gpe_device,
+ u32 gpe_number,
+ u32 type,
+ acpi_gpe_handler
+ address,
+ void *context))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+ acpi_remove_gpe_handler(acpi_handle gpe_device,
+ u32 gpe_number,
+ acpi_gpe_handler
+ address))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
- acpi_remove_notify_handler(acpi_handle device,
+ acpi_install_notify_handler(acpi_handle device,
u32 handler_type,
acpi_notify_handler
- handler))
-ACPI_EXTERNAL_RETURN_STATUS(acpi_status
- acpi_install_address_space_handler(acpi_handle
- device,
- acpi_adr_space_type
- space_id,
- acpi_adr_space_handler
- handler,
- acpi_adr_space_setup
- setup,
- void *context))
-ACPI_EXTERNAL_RETURN_STATUS(acpi_status
- acpi_remove_address_space_handler(acpi_handle
+ handler,
+ void *context))
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status
+ acpi_remove_notify_handler(acpi_handle device,
+ u32 handler_type,
+ acpi_notify_handler
+ handler))
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status
+ acpi_install_address_space_handler(acpi_handle
device,
acpi_adr_space_type
space_id,
acpi_adr_space_handler
- handler))
+ handler,
+ acpi_adr_space_setup
+ setup,
+ void *context))
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status
+ acpi_install_address_space_handler_no_reg
+ (acpi_handle device, acpi_adr_space_type space_id,
+ acpi_adr_space_handler handler,
+ acpi_adr_space_setup setup,
+ void *context))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
- acpi_install_exception_handler
- (acpi_exception_handler handler))
+ acpi_execute_reg_methods(acpi_handle device,
+ u32 nax_depth,
+ acpi_adr_space_type
+ space_id))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
- acpi_install_interface_handler
- (acpi_interface_handler handler))
+ acpi_remove_address_space_handler(acpi_handle
+ device,
+ acpi_adr_space_type
+ space_id,
+ acpi_adr_space_handler
+ handler))
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status
+ acpi_install_exception_handler
+ (acpi_exception_handler handler))
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status
+ acpi_install_interface_handler
+ (acpi_interface_handler handler))
/*
* Global Lock interfaces
@@ -749,6 +768,8 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_event_status
*event_status))
ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_dispatch_gpe(acpi_handle gpe_device, u32 gpe_number))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_hw_disable_all_gpes(void))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_hw_enable_all_wakeup_gpes(void))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void))
@@ -864,10 +885,10 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_leave_sleep_state_prep(u8 sleep_state))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_leave_sleep_state(u8 sleep_state))
-ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
- acpi_set_firmware_waking_vector
- (acpi_physical_address physical_address,
- acpi_physical_address physical_address64))
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status
+ acpi_set_firmware_waking_vector
+ (acpi_physical_address physical_address,
+ acpi_physical_address physical_address64))
/*
* ACPI Timer interfaces
*/
@@ -957,8 +978,6 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
void **data,
void (*callback)(void *)))
-void acpi_run_debugger(char *batch_buffer);
-
void acpi_set_debugger_thread_id(acpi_thread_id thread_id);
#endif /* __ACXFACE_H__ */
diff --git a/include/acpi/acrestyp.h b/include/acpi/acrestyp.h
index 8e2319bbd0a2..842f932e2c2b 100644
--- a/include/acpi/acrestyp.h
+++ b/include/acpi/acrestyp.h
@@ -3,7 +3,7 @@
*
* Name: acrestyp.h - Defines, types, and structures for resource descriptors
*
- * Copyright (C) 2000 - 2021, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -142,7 +142,10 @@ struct acpi_resource_irq {
u8 shareable;
u8 wake_capable;
u8 interrupt_count;
- u8 interrupts[1];
+ union {
+ u8 interrupt;
+ ACPI_FLEX_ARRAY(u8, interrupts);
+ };
};
struct acpi_resource_dma {
@@ -150,7 +153,10 @@ struct acpi_resource_dma {
u8 bus_master;
u8 transfer;
u8 channel_count;
- u8 channels[1];
+ union {
+ u8 channel;
+ ACPI_FLEX_ARRAY(u8, channels);
+ };
};
struct acpi_resource_start_dependent {
@@ -194,7 +200,7 @@ struct acpi_resource_fixed_dma {
struct acpi_resource_vendor {
u16 byte_length;
- u8 byte_data[1];
+ u8 byte_data[];
};
/* Vendor resource with UUID info (introduced in ACPI 3.0) */
@@ -203,7 +209,7 @@ struct acpi_resource_vendor_typed {
u16 byte_length;
u8 uuid_subtype;
u8 uuid[ACPI_UUID_LENGTH];
- u8 byte_data[1];
+ u8 byte_data[];
};
struct acpi_resource_end_tag {
@@ -332,7 +338,10 @@ struct acpi_resource_extended_irq {
u8 wake_capable;
u8 interrupt_count;
struct acpi_resource_source resource_source;
- u32 interrupts[1];
+ union {
+ u32 interrupt;
+ ACPI_FLEX_ARRAY(u32, interrupts);
+ };
};
struct acpi_resource_generic_register {
@@ -536,6 +545,15 @@ struct acpi_resource_pin_config {
u8 *vendor_data;
};
+struct acpi_resource_clock_input {
+ u8 revision_id;
+ u8 mode;
+ u8 scale;
+ u16 frequency_divisor;
+ u32 frequency_numerator;
+ struct acpi_resource_source resource_source;
+};
+
/* Values for pin_config_type field above */
#define ACPI_PIN_CONFIG_DEFAULT 0
@@ -613,7 +631,8 @@ struct acpi_resource_pin_group_config {
#define ACPI_RESOURCE_TYPE_PIN_GROUP 22 /* ACPI 6.2 */
#define ACPI_RESOURCE_TYPE_PIN_GROUP_FUNCTION 23 /* ACPI 6.2 */
#define ACPI_RESOURCE_TYPE_PIN_GROUP_CONFIG 24 /* ACPI 6.2 */
-#define ACPI_RESOURCE_TYPE_MAX 24
+#define ACPI_RESOURCE_TYPE_CLOCK_INPUT 25 /* ACPI 6.5 */
+#define ACPI_RESOURCE_TYPE_MAX 25
/* Master union for resource descriptors */
@@ -647,6 +666,7 @@ union acpi_resource_data {
struct acpi_resource_pin_group pin_group;
struct acpi_resource_pin_group_function pin_group_function;
struct acpi_resource_pin_group_config pin_group_config;
+ struct acpi_resource_clock_input clock_input;
/* Common fields */
@@ -679,7 +699,10 @@ struct acpi_pci_routing_table {
u32 pin;
u64 address; /* here for 64-bit alignment */
u32 source_index;
- char source[4]; /* pad to 64 bits so sizeof() works in all cases */
+ union {
+ char pad[4]; /* pad to 64 bits so sizeof() works in all cases */
+ ACPI_FLEX_ARRAY(char, source);
+ };
};
#endif /* __ACRESTYP_H__ */
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index f9cda909f92c..8a67d4ea6e3f 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -3,7 +3,7 @@
*
* Name: actbl.h - Basic ACPI Table Definitions
*
- * Copyright (C) 2000 - 2021, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -66,14 +66,14 @@
******************************************************************************/
struct acpi_table_header {
- char signature[ACPI_NAMESEG_SIZE]; /* ASCII table signature */
+ char signature[ACPI_NAMESEG_SIZE] ACPI_NONSTRING; /* ASCII table signature */
u32 length; /* Length of table in bytes, including this header */
u8 revision; /* ACPI Specification minor version number */
u8 checksum; /* To make sum of entire table == 0 */
- char oem_id[ACPI_OEM_ID_SIZE]; /* ASCII OEM identification */
- char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; /* ASCII OEM table identification */
+ char oem_id[ACPI_OEM_ID_SIZE] ACPI_NONSTRING; /* ASCII OEM identification */
+ char oem_table_id[ACPI_OEM_TABLE_ID_SIZE] ACPI_NONSTRING; /* ASCII OEM table identification */
u32 oem_revision; /* OEM revision number */
- char asl_compiler_id[ACPI_NAMESEG_SIZE]; /* ASCII ASL compiler vendor ID */
+ char asl_compiler_id[ACPI_NAMESEG_SIZE] ACPI_NONSTRING; /* ASCII ASL compiler vendor ID */
u32 asl_compiler_revision; /* ASL compiler version */
};
@@ -307,7 +307,8 @@ enum acpi_preferred_pm_profiles {
PM_SOHO_SERVER = 5,
PM_APPLIANCE_PC = 6,
PM_PERFORMANCE_SERVER = 7,
- PM_TABLET = 8
+ PM_TABLET = 8,
+ NR_PM_PROFILES = 9
};
/* Values for sleep_status and sleep_control registers (V5+ FADT) */
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index ce59903c2695..7f35eb0e8458 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -3,7 +3,7 @@
*
* Name: actbl1.h - Additional ACPI table definitions
*
- * Copyright (C) 2000 - 2021, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -24,7 +24,9 @@
* file. Useful because they make it more difficult to inadvertently type in
* the wrong signature.
*/
+#define ACPI_SIG_AEST "AEST" /* Arm Error Source Table */
#define ACPI_SIG_ASF "ASF!" /* Alert Standard Format table */
+#define ACPI_SIG_ASPT "ASPT" /* AMD Secure Processor Table */
#define ACPI_SIG_BERT "BERT" /* Boot Error Record Table */
#define ACPI_SIG_BGRT "BGRT" /* Boot Graphics Resource Table */
#define ACPI_SIG_BOOT "BOOT" /* Simple Boot Flag Table */
@@ -44,10 +46,13 @@
#define ACPI_SIG_HMAT "HMAT" /* Heterogeneous Memory Attributes Table */
#define ACPI_SIG_HPET "HPET" /* High Precision Event Timer table */
#define ACPI_SIG_IBFT "IBFT" /* iSCSI Boot Firmware Table */
+#define ACPI_SIG_MSCT "MSCT" /* Maximum System Characteristics Table */
#define ACPI_SIG_S3PT "S3PT" /* S3 Performance (sub)Table */
#define ACPI_SIG_PCCS "PCC" /* PCC Shared Memory Region */
+#define ACPI_SIG_NBFT "NBFT" /* NVMe Boot Firmware Table */
+
/* Reserved table signatures */
#define ACPI_SIG_MATR "MATR" /* Memory Address Translation Table */
@@ -105,6 +110,58 @@ struct acpi_whea_header {
u64 mask; /* Bitmask required for this register instruction */
};
+/* https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/acpitabl/ns-acpitabl-aspt_table */
+#define ASPT_REVISION_ID 0x01
+struct acpi_table_aspt {
+ struct acpi_table_header header;
+ u32 num_entries;
+};
+
+struct acpi_aspt_header {
+ u16 type;
+ u16 length;
+};
+
+enum acpi_aspt_type {
+ ACPI_ASPT_TYPE_GLOBAL_REGS = 0,
+ ACPI_ASPT_TYPE_SEV_MBOX_REGS = 1,
+ ACPI_ASPT_TYPE_ACPI_MBOX_REGS = 2,
+};
+
+/* 0: ASPT Global Registers */
+struct acpi_aspt_global_regs {
+ struct acpi_aspt_header header;
+ u32 reserved;
+ u64 feature_reg_addr;
+ u64 irq_en_reg_addr;
+ u64 irq_st_reg_addr;
+};
+
+/* 1: ASPT SEV Mailbox Registers */
+struct acpi_aspt_sev_mbox_regs {
+ struct acpi_aspt_header header;
+ u8 mbox_irq_id;
+ u8 reserved[3];
+ u64 cmd_resp_reg_addr;
+ u64 cmd_buf_lo_reg_addr;
+ u64 cmd_buf_hi_reg_addr;
+};
+
+/* 2: ASPT ACPI Mailbox Registers */
+struct acpi_aspt_acpi_mbox_regs {
+ struct acpi_aspt_header header;
+ u32 reserved1;
+ u64 cmd_resp_reg_addr;
+ u64 reserved2[2];
+};
+
+/* Larger subtable header (when Length can exceed 255) */
+
+struct acpi_subtbl_hdr_16 {
+ u16 type;
+ u16 length;
+};
+
/*******************************************************************************
*
* ASF - Alert Standard Format table (Signature "ASF!")
@@ -304,10 +361,128 @@ struct acpi_table_boot {
/*******************************************************************************
*
+ * CDAT - Coherent Device Attribute Table
+ * Version 1
+ *
+ * Conforms to the "Coherent Device Attribute Table (CDAT) Specification
+ " (Revision 1.01, October 2020.)
+ *
+ ******************************************************************************/
+
+struct acpi_table_cdat {
+ u32 length; /* Length of table in bytes, including this header */
+ u8 revision; /* ACPI Specification minor version number */
+ u8 checksum; /* To make sum of entire table == 0 */
+ u8 reserved[6];
+ u32 sequence; /* Used to detect runtime CDAT table changes */
+};
+
+/* CDAT common subtable header */
+
+struct acpi_cdat_header {
+ u8 type;
+ u8 reserved;
+ u16 length;
+};
+
+/* Values for Type field above */
+
+enum acpi_cdat_type {
+ ACPI_CDAT_TYPE_DSMAS = 0,
+ ACPI_CDAT_TYPE_DSLBIS = 1,
+ ACPI_CDAT_TYPE_DSMSCIS = 2,
+ ACPI_CDAT_TYPE_DSIS = 3,
+ ACPI_CDAT_TYPE_DSEMTS = 4,
+ ACPI_CDAT_TYPE_SSLBIS = 5,
+ ACPI_CDAT_TYPE_RESERVED = 6 /* 6 through 0xFF are reserved */
+};
+
+/* Subtable 0: Device Scoped Memory Affinity Structure (DSMAS) */
+
+struct acpi_cdat_dsmas {
+ u8 dsmad_handle;
+ u8 flags;
+ u16 reserved;
+ u64 dpa_base_address;
+ u64 dpa_length;
+};
+
+/* Flags for subtable above */
+
+#define ACPI_CDAT_DSMAS_NON_VOLATILE (1 << 2)
+#define ACPI_CDAT_DSMAS_SHAREABLE (1 << 3)
+#define ACPI_CDAT_DSMAS_READ_ONLY (1 << 6)
+
+/* Subtable 1: Device scoped Latency and Bandwidth Information Structure (DSLBIS) */
+
+struct acpi_cdat_dslbis {
+ u8 handle;
+ u8 flags; /* If Handle matches a DSMAS handle, the definition of this field matches
+ * Flags field in HMAT System Locality Latency */
+ u8 data_type;
+ u8 reserved;
+ u64 entry_base_unit;
+ u16 entry[3];
+ u16 reserved2;
+};
+
+/* Subtable 2: Device Scoped Memory Side Cache Information Structure (DSMSCIS) */
+
+struct acpi_cdat_dsmscis {
+ u8 dsmas_handle;
+ u8 reserved[3];
+ u64 side_cache_size;
+ u32 cache_attributes;
+};
+
+/* Subtable 3: Device Scoped Initiator Structure (DSIS) */
+
+struct acpi_cdat_dsis {
+ u8 flags;
+ u8 handle;
+ u16 reserved;
+};
+
+/* Flags for above subtable */
+
+#define ACPI_CDAT_DSIS_MEM_ATTACHED (1 << 0)
+
+/* Subtable 4: Device Scoped EFI Memory Type Structure (DSEMTS) */
+
+struct acpi_cdat_dsemts {
+ u8 dsmas_handle;
+ u8 memory_type;
+ u16 reserved;
+ u64 dpa_offset;
+ u64 range_length;
+};
+
+/* Subtable 5: Switch Scoped Latency and Bandwidth Information Structure (SSLBIS) */
+
+struct acpi_cdat_sslbis {
+ u8 data_type;
+ u8 reserved[3];
+ u64 entry_base_unit;
+};
+
+/* Sub-subtable for above, sslbe_entries field */
+
+struct acpi_cdat_sslbe {
+ u16 portx_id;
+ u16 porty_id;
+ u16 latency_or_bandwidth;
+ u16 reserved;
+};
+
+#define ACPI_CDAT_SSLBIS_US_PORT 0x0100
+#define ACPI_CDAT_SSLBIS_ANY_PORT 0xffff
+
+/*******************************************************************************
+ *
* CEDT - CXL Early Discovery Table
* Version 1
*
- * Conforms to the "CXL Early Discovery Table" (CXL 2.0)
+ * Conforms to the "CXL Early Discovery Table" (CXL 2.0, October 2020)
*
******************************************************************************/
@@ -327,9 +502,22 @@ struct acpi_cedt_header {
enum acpi_cedt_type {
ACPI_CEDT_TYPE_CHBS = 0,
- ACPI_CEDT_TYPE_RESERVED = 1
+ ACPI_CEDT_TYPE_CFMWS = 1,
+ ACPI_CEDT_TYPE_CXIMS = 2,
+ ACPI_CEDT_TYPE_RDPAS = 3,
+ ACPI_CEDT_TYPE_RESERVED = 4,
};
+/* Values for version field above */
+
+#define ACPI_CEDT_CHBS_VERSION_CXL11 (0)
+#define ACPI_CEDT_CHBS_VERSION_CXL20 (1)
+
+/* Values for length field above */
+
+#define ACPI_CEDT_CHBS_LENGTH_CXL11 (0x2000)
+#define ACPI_CEDT_CHBS_LENGTH_CXL20 (0x10000)
+
/*
* CEDT subtables
*/
@@ -345,6 +533,72 @@ struct acpi_cedt_chbs {
u64 length;
};
+/* 1: CXL Fixed Memory Window Structure */
+
+struct acpi_cedt_cfmws {
+ struct acpi_cedt_header header;
+ u32 reserved1;
+ u64 base_hpa;
+ u64 window_size;
+ u8 interleave_ways;
+ u8 interleave_arithmetic;
+ u16 reserved2;
+ u32 granularity;
+ u16 restrictions;
+ u16 qtg_id;
+ u32 interleave_targets[];
+};
+
+struct acpi_cedt_cfmws_target_element {
+ u32 interleave_target;
+};
+
+/* Values for Interleave Arithmetic field above */
+
+#define ACPI_CEDT_CFMWS_ARITHMETIC_MODULO (0)
+#define ACPI_CEDT_CFMWS_ARITHMETIC_XOR (1)
+
+/* Values for Restrictions field above */
+
+#define ACPI_CEDT_CFMWS_RESTRICT_DEVMEM (1)
+#define ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM (1<<1)
+#define ACPI_CEDT_CFMWS_RESTRICT_VOLATILE (1<<2)
+#define ACPI_CEDT_CFMWS_RESTRICT_PMEM (1<<3)
+#define ACPI_CEDT_CFMWS_RESTRICT_FIXED (1<<4)
+#define ACPI_CEDT_CFMWS_RESTRICT_BI (1<<5)
+
+/* 2: CXL XOR Interleave Math Structure */
+
+struct acpi_cedt_cxims {
+ struct acpi_cedt_header header;
+ u16 reserved1;
+ u8 hbig;
+ u8 nr_xormaps;
+ u64 xormap_list[];
+};
+
+struct acpi_cedt_cxims_target_element {
+ u64 xormap;
+};
+
+/* 3: CXL RCEC Downstream Port Association Structure */
+
+struct acpi_cedt_rdpas {
+ struct acpi_cedt_header header;
+ u16 segment;
+ u16 bdf;
+ u8 protocol;
+ u64 address;
+};
+
+/* Masks for bdf field above */
+#define ACPI_CEDT_RDPAS_BUS_MASK 0xff00
+#define ACPI_CEDT_RDPAS_DEVICE_MASK 0x00f8
+#define ACPI_CEDT_RDPAS_FUNCTION_MASK 0x0007
+
+#define ACPI_CEDT_RDPAS_PROTOCOL_IO (0)
+#define ACPI_CEDT_RDPAS_PROTOCOL_CACHEMEM (1)
+
/*******************************************************************************
*
* CPEP - Corrected Platform Error Polling table (ACPI 4.0)
@@ -443,7 +697,7 @@ struct acpi_csrt_descriptor {
* DBG2 - Debug Port Table 2
* Version 0 (Both main table and subtables)
*
- * Conforms to "Microsoft Debug Port Table 2 (DBG2)", December 10, 2015
+ * Conforms to "Microsoft Debug Port Table 2 (DBG2)", September 21, 2020
*
******************************************************************************/
@@ -493,11 +747,25 @@ struct acpi_dbg2_device {
#define ACPI_DBG2_16550_COMPATIBLE 0x0000
#define ACPI_DBG2_16550_SUBSET 0x0001
+#define ACPI_DBG2_MAX311XE_SPI 0x0002
#define ACPI_DBG2_ARM_PL011 0x0003
+#define ACPI_DBG2_MSM8X60 0x0004
+#define ACPI_DBG2_16550_NVIDIA 0x0005
+#define ACPI_DBG2_TI_OMAP 0x0006
+#define ACPI_DBG2_APM88XXXX 0x0008
+#define ACPI_DBG2_MSM8974 0x0009
+#define ACPI_DBG2_SAM5250 0x000A
+#define ACPI_DBG2_INTEL_USIF 0x000B
+#define ACPI_DBG2_IMX6 0x000C
#define ACPI_DBG2_ARM_SBSA_32BIT 0x000D
#define ACPI_DBG2_ARM_SBSA_GENERIC 0x000E
#define ACPI_DBG2_ARM_DCC 0x000F
#define ACPI_DBG2_BCM2835 0x0010
+#define ACPI_DBG2_SDM845_1_8432MHZ 0x0011
+#define ACPI_DBG2_16550_WITH_GAS 0x0012
+#define ACPI_DBG2_SDM845_7_372MHZ 0x0013
+#define ACPI_DBG2_INTEL_LPSS 0x0014
+#define ACPI_DBG2_RISCV_SBI_CON 0x0015
#define ACPI_DBG2_1394_STANDARD 0x0000
@@ -559,7 +827,8 @@ enum acpi_dmar_type {
ACPI_DMAR_TYPE_HARDWARE_AFFINITY = 3,
ACPI_DMAR_TYPE_NAMESPACE = 4,
ACPI_DMAR_TYPE_SATC = 5,
- ACPI_DMAR_TYPE_RESERVED = 6 /* 6 and greater are reserved */
+ ACPI_DMAR_TYPE_SIDP = 6,
+ ACPI_DMAR_TYPE_RESERVED = 7 /* 7 and greater are reserved */
};
/* DMAR Device Scope structure */
@@ -567,7 +836,8 @@ enum acpi_dmar_type {
struct acpi_dmar_device_scope {
u8 entry_type;
u8 length;
- u16 reserved;
+ u8 flags;
+ u8 reserved;
u8 enumeration_id;
u8 bus;
};
@@ -598,7 +868,7 @@ struct acpi_dmar_pci_path {
struct acpi_dmar_hardware_unit {
struct acpi_dmar_header header;
u8 flags;
- u8 reserved;
+ u8 size; /* Size of the register set */
u16 segment;
u64 address; /* Register Base Address */
};
@@ -649,7 +919,10 @@ struct acpi_dmar_andd {
struct acpi_dmar_header header;
u8 reserved[3];
u8 device_number;
- char device_name[1];
+ union {
+ char __pad;
+ ACPI_FLEX_ARRAY(char, device_name);
+ };
};
/* 5: SOC Integrated Address Translation Cache Reporting Structure */
@@ -660,6 +933,15 @@ struct acpi_dmar_satc {
u8 reserved;
u16 segment;
};
+
+/* 6: so_c Integrated Device Property Reporting Structure */
+
+struct acpi_dmar_sidp {
+ struct acpi_dmar_header header;
+ u16 reserved;
+ u16 segment;
+};
+
/*******************************************************************************
*
* DRTM - Dynamic Root of Trust for Measurement table
@@ -692,7 +974,7 @@ struct acpi_table_drtm {
struct acpi_drtm_vtable_list {
u32 validated_table_count;
- u64 validated_tables[1];
+ u64 validated_tables[];
};
/* 2) Resources List (of Resource Descriptors) */
@@ -707,7 +989,7 @@ struct acpi_drtm_resource {
struct acpi_drtm_resource_list {
u32 resource_count;
- struct acpi_drtm_resource resources[1];
+ struct acpi_drtm_resource resources[];
};
/* 3) Platform-specific Identifiers List */
@@ -730,7 +1012,7 @@ struct acpi_table_ecdt {
struct acpi_generic_address data; /* Address of EC data register */
u32 uid; /* Unique ID - must be same as the EC _UID method */
u8 gpe; /* The GPE for the EC */
- u8 id[1]; /* Full namepath of the EC in the ACPI namespace */
+ u8 id[]; /* Full namepath of the EC in the ACPI namespace */
};
/*******************************************************************************
@@ -761,17 +1043,18 @@ struct acpi_einj_entry {
/* Values for Action field above */
enum acpi_einj_actions {
- ACPI_EINJ_BEGIN_OPERATION = 0,
- ACPI_EINJ_GET_TRIGGER_TABLE = 1,
- ACPI_EINJ_SET_ERROR_TYPE = 2,
- ACPI_EINJ_GET_ERROR_TYPE = 3,
- ACPI_EINJ_END_OPERATION = 4,
- ACPI_EINJ_EXECUTE_OPERATION = 5,
- ACPI_EINJ_CHECK_BUSY_STATUS = 6,
- ACPI_EINJ_GET_COMMAND_STATUS = 7,
- ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS = 8,
- ACPI_EINJ_GET_EXECUTE_TIMINGS = 9,
- ACPI_EINJ_ACTION_RESERVED = 10, /* 10 and greater are reserved */
+ ACPI_EINJ_BEGIN_OPERATION = 0x0,
+ ACPI_EINJ_GET_TRIGGER_TABLE = 0x1,
+ ACPI_EINJ_SET_ERROR_TYPE = 0x2,
+ ACPI_EINJ_GET_ERROR_TYPE = 0x3,
+ ACPI_EINJ_END_OPERATION = 0x4,
+ ACPI_EINJ_EXECUTE_OPERATION = 0x5,
+ ACPI_EINJ_CHECK_BUSY_STATUS = 0x6,
+ ACPI_EINJ_GET_COMMAND_STATUS = 0x7,
+ ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS = 0x8,
+ ACPI_EINJ_GET_EXECUTE_TIMINGS = 0x9,
+ ACPI_EINJV2_GET_ERROR_TYPE = 0x11,
+ ACPI_EINJ_ACTION_RESERVED = 0x12, /* 0x12 and greater are reserved */
ACPI_EINJ_TRIGGER_ERROR = 0xFF /* Except for this value */
};
@@ -838,6 +1121,12 @@ enum acpi_einj_command_status {
#define ACPI_EINJ_PLATFORM_CORRECTABLE (1<<9)
#define ACPI_EINJ_PLATFORM_UNCORRECTABLE (1<<10)
#define ACPI_EINJ_PLATFORM_FATAL (1<<11)
+#define ACPI_EINJ_CXL_CACHE_CORRECTABLE (1<<12)
+#define ACPI_EINJ_CXL_CACHE_UNCORRECTABLE (1<<13)
+#define ACPI_EINJ_CXL_CACHE_FATAL (1<<14)
+#define ACPI_EINJ_CXL_MEM_CORRECTABLE (1<<15)
+#define ACPI_EINJ_CXL_MEM_UNCORRECTABLE (1<<16)
+#define ACPI_EINJ_CXL_MEM_FATAL (1<<17)
#define ACPI_EINJ_VENDOR_DEFINED (1<<31)
/*******************************************************************************
@@ -1529,7 +1818,7 @@ struct acpi_hmat_cache {
u32 reserved1;
u64 cache_size;
u32 cache_attributes;
- u16 reserved2;
+ u16 address_mode;
u16 number_of_SMBIOShandles;
};
@@ -1541,6 +1830,9 @@ struct acpi_hmat_cache {
#define ACPI_HMAT_WRITE_POLICY (0x0000F000)
#define ACPI_HMAT_CACHE_LINE_SIZE (0xFFFF0000)
+#define ACPI_HMAT_CACHE_MODE_UNKNOWN (0)
+#define ACPI_HMAT_CACHE_MODE_EXTENDED_LINEAR (1)
+
/* Values for cache associativity flag */
#define ACPI_HMAT_CA_NONE (0)
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index 18cafe3ebddc..f726bce3eb84 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -1,9 +1,9 @@
/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
/******************************************************************************
*
- * Name: actbl2.h - ACPI Table Definitions (tables not in ACPI spec)
+ * Name: actbl2.h - ACPI Table Definitions
*
- * Copyright (C) 2000 - 2021, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -24,26 +24,41 @@
* file. Useful because they make it more difficult to inadvertently type in
* the wrong signature.
*/
+#define ACPI_SIG_AGDI "AGDI" /* Arm Generic Diagnostic Dump and Reset Device Interface */
+#define ACPI_SIG_APMT "APMT" /* Arm Performance Monitoring Unit table */
+#define ACPI_SIG_BDAT "BDAT" /* BIOS Data ACPI Table */
+#define ACPI_SIG_CCEL "CCEL" /* CC Event Log Table */
+#define ACPI_SIG_CDAT "CDAT" /* Coherent Device Attribute Table */
+#define ACPI_SIG_ERDT "ERDT" /* Enhanced Resource Director Technology */
#define ACPI_SIG_IORT "IORT" /* IO Remapping Table */
#define ACPI_SIG_IVRS "IVRS" /* I/O Virtualization Reporting Structure */
#define ACPI_SIG_LPIT "LPIT" /* Low Power Idle Table */
#define ACPI_SIG_MADT "APIC" /* Multiple APIC Description Table */
#define ACPI_SIG_MCFG "MCFG" /* PCI Memory Mapped Configuration table */
#define ACPI_SIG_MCHI "MCHI" /* Management Controller Host Interface table */
+#define ACPI_SIG_MPAM "MPAM" /* Memory System Resource Partitioning and Monitoring Table */
#define ACPI_SIG_MPST "MPST" /* Memory Power State Table */
-#define ACPI_SIG_MSCT "MSCT" /* Maximum System Characteristics Table */
+#define ACPI_SIG_MRRM "MRRM" /* Memory Range and Region Mapping table */
#define ACPI_SIG_MSDM "MSDM" /* Microsoft Data Management Table */
#define ACPI_SIG_NFIT "NFIT" /* NVDIMM Firmware Interface Table */
+#define ACPI_SIG_NHLT "NHLT" /* Non HD Audio Link Table */
#define ACPI_SIG_PCCT "PCCT" /* Platform Communications Channel Table */
#define ACPI_SIG_PDTT "PDTT" /* Platform Debug Trigger Table */
#define ACPI_SIG_PHAT "PHAT" /* Platform Health Assessment Table */
#define ACPI_SIG_PMTT "PMTT" /* Platform Memory Topology Table */
#define ACPI_SIG_PPTT "PPTT" /* Processor Properties Topology Table */
+#define ACPI_SIG_PRMT "PRMT" /* Platform Runtime Mechanism Table */
#define ACPI_SIG_RASF "RASF" /* RAS Feature table */
+#define ACPI_SIG_RAS2 "RAS2" /* RAS2 Feature table */
+#define ACPI_SIG_RGRT "RGRT" /* Regulatory Graphics Resource Table */
+#define ACPI_SIG_RHCT "RHCT" /* RISC-V Hart Capabilities Table */
+#define ACPI_SIG_RIMT "RIMT" /* RISC-V IO Mapping Table */
#define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */
#define ACPI_SIG_SDEI "SDEI" /* Software Delegated Exception Interface Table */
#define ACPI_SIG_SDEV "SDEV" /* Secure Devices table */
-#define ACPI_SIG_NHLT "NHLT" /* Non-HDAudio Link Table */
+#define ACPI_SIG_SVKL "SVKL" /* Storage Volume Key Location Table */
+#define ACPI_SIG_SWFT "SWFT" /* SoundWire File Table */
+#define ACPI_SIG_TDEL "TDEL" /* TD Event Log Table */
/*
* All tables must be byte-packed to match the ACPI specification, since
@@ -65,10 +80,573 @@
/*******************************************************************************
*
+ * AEST - Arm Error Source Table
+ *
+ * Conforms to: ACPI for the Armv8 RAS Extensions 1.1(Sep 2020) and
+ * 2.0(May 2023) Platform Design Document.
+ *
+ ******************************************************************************/
+
+struct acpi_table_aest {
+ struct acpi_table_header header;
+};
+
+/* Common Subtable header - one per Node Structure (Subtable) */
+
+struct acpi_aest_hdr {
+ u8 type;
+ u16 length;
+ u8 reserved;
+ u32 node_specific_offset;
+ u32 node_interface_offset;
+ u32 node_interrupt_offset;
+ u32 node_interrupt_count;
+ u64 timestamp_rate;
+ u64 reserved1;
+ u64 error_injection_rate;
+};
+
+/* Values for Type above */
+
+#define ACPI_AEST_PROCESSOR_ERROR_NODE 0
+#define ACPI_AEST_MEMORY_ERROR_NODE 1
+#define ACPI_AEST_SMMU_ERROR_NODE 2
+#define ACPI_AEST_VENDOR_ERROR_NODE 3
+#define ACPI_AEST_GIC_ERROR_NODE 4
+#define ACPI_AEST_PCIE_ERROR_NODE 5
+#define ACPI_AEST_PROXY_ERROR_NODE 6
+#define ACPI_AEST_NODE_TYPE_RESERVED 7 /* 7 and above are reserved */
+
+/*
+ * AEST subtables (Error nodes)
+ */
+
+/* 0: Processor Error */
+
+typedef struct acpi_aest_processor {
+ u32 processor_id;
+ u8 resource_type;
+ u8 reserved;
+ u8 flags;
+ u8 revision;
+ u64 processor_affinity;
+
+} acpi_aest_processor;
+
+/* Values for resource_type above, related structs below */
+
+#define ACPI_AEST_CACHE_RESOURCE 0
+#define ACPI_AEST_TLB_RESOURCE 1
+#define ACPI_AEST_GENERIC_RESOURCE 2
+#define ACPI_AEST_RESOURCE_RESERVED 3 /* 3 and above are reserved */
+
+/* 0R: Processor Cache Resource Substructure */
+
+typedef struct acpi_aest_processor_cache {
+ u32 cache_reference;
+ u32 reserved;
+
+} acpi_aest_processor_cache;
+
+/* Values for cache_type above */
+
+#define ACPI_AEST_CACHE_DATA 0
+#define ACPI_AEST_CACHE_INSTRUCTION 1
+#define ACPI_AEST_CACHE_UNIFIED 2
+#define ACPI_AEST_CACHE_RESERVED 3 /* 3 and above are reserved */
+
+/* 1R: Processor TLB Resource Substructure */
+
+typedef struct acpi_aest_processor_tlb {
+ u32 tlb_level;
+ u32 reserved;
+
+} acpi_aest_processor_tlb;
+
+/* 2R: Processor Generic Resource Substructure */
+
+typedef struct acpi_aest_processor_generic {
+ u32 resource;
+
+} acpi_aest_processor_generic;
+
+/* 1: Memory Error */
+
+typedef struct acpi_aest_memory {
+ u32 srat_proximity_domain;
+
+} acpi_aest_memory;
+
+/* 2: Smmu Error */
+
+typedef struct acpi_aest_smmu {
+ u32 iort_node_reference;
+ u32 subcomponent_reference;
+
+} acpi_aest_smmu;
+
+/* 3: Vendor Defined */
+
+typedef struct acpi_aest_vendor {
+ u32 acpi_hid;
+ u32 acpi_uid;
+ u8 vendor_specific_data[16];
+
+} acpi_aest_vendor;
+
+struct acpi_aest_vendor_v2 {
+ char acpi_hid[8];
+ u32 acpi_uid;
+ u8 vendor_specific_data[16];
+};
+
+/* 4: Gic Error */
+
+typedef struct acpi_aest_gic {
+ u32 interface_type;
+ u32 instance_id;
+
+} acpi_aest_gic;
+
+/* Values for interface_type above */
+
+#define ACPI_AEST_GIC_CPU 0
+#define ACPI_AEST_GIC_DISTRIBUTOR 1
+#define ACPI_AEST_GIC_REDISTRIBUTOR 2
+#define ACPI_AEST_GIC_ITS 3
+#define ACPI_AEST_GIC_RESERVED 4 /* 4 and above are reserved */
+
+/* 5: PCIe Error */
+
+struct acpi_aest_pcie {
+ u32 iort_node_reference;
+};
+
+/* 6: Proxy Error */
+
+struct acpi_aest_proxy {
+ u64 node_address;
+};
+
+/* Node Interface Structure */
+
+typedef struct acpi_aest_node_interface {
+ u8 type;
+ u8 reserved[3];
+ u32 flags;
+ u64 address;
+ u32 error_record_index;
+ u32 error_record_count;
+ u64 error_record_implemented;
+ u64 error_status_reporting;
+ u64 addressing_mode;
+
+} acpi_aest_node_interface;
+
+/* Node Interface Structure V2 */
+
+struct acpi_aest_node_interface_header {
+ u8 type;
+ u8 group_format;
+ u8 reserved[2];
+ u32 flags;
+ u64 address;
+ u32 error_record_index;
+ u32 error_record_count;
+};
+
+#define ACPI_AEST_NODE_GROUP_FORMAT_4K 0
+#define ACPI_AEST_NODE_GROUP_FORMAT_16K 1
+#define ACPI_AEST_NODE_GROUP_FORMAT_64K 2
+
+struct acpi_aest_node_interface_common {
+ u32 error_node_device;
+ u32 processor_affinity;
+ u64 error_group_register_base;
+ u64 fault_inject_register_base;
+ u64 interrupt_config_register_base;
+};
+
+struct acpi_aest_node_interface_4k {
+ u64 error_record_implemented;
+ u64 error_status_reporting;
+ u64 addressing_mode;
+ struct acpi_aest_node_interface_common common;
+};
+
+struct acpi_aest_node_interface_16k {
+ u64 error_record_implemented[4];
+ u64 error_status_reporting[4];
+ u64 addressing_mode[4];
+ struct acpi_aest_node_interface_common common;
+};
+
+struct acpi_aest_node_interface_64k {
+ u64 error_record_implemented[14];
+ u64 error_status_reporting[14];
+ u64 addressing_mode[14];
+ struct acpi_aest_node_interface_common common;
+};
+
+/* Values for Type field above */
+
+#define ACPI_AEST_NODE_SYSTEM_REGISTER 0
+#define ACPI_AEST_NODE_MEMORY_MAPPED 1
+#define ACPI_AEST_NODE_SINGLE_RECORD_MEMORY_MAPPED 2
+#define ACPI_AEST_XFACE_RESERVED 3 /* 2 and above are reserved */
+
+/* Node Interrupt Structure */
+
+typedef struct acpi_aest_node_interrupt {
+ u8 type;
+ u8 reserved[2];
+ u8 flags;
+ u32 gsiv;
+ u8 iort_id;
+ u8 reserved1[3];
+
+} acpi_aest_node_interrupt;
+
+/* Node Interrupt Structure V2 */
+
+struct acpi_aest_node_interrupt_v2 {
+ u8 type;
+ u8 reserved[2];
+ u8 flags;
+ u32 gsiv;
+ u8 reserved1[4];
+};
+
+/* Values for Type field above */
+
+#define ACPI_AEST_NODE_FAULT_HANDLING 0
+#define ACPI_AEST_NODE_ERROR_RECOVERY 1
+#define ACPI_AEST_XRUPT_RESERVED 2 /* 2 and above are reserved */
+
+/*******************************************************************************
+ * AGDI - Arm Generic Diagnostic Dump and Reset Device Interface
+ *
+ * Conforms to "ACPI for Arm Components 1.1, Platform Design Document"
+ * ARM DEN0093 v1.1
+ *
+ ******************************************************************************/
+struct acpi_table_agdi {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u8 flags;
+ u8 reserved[3];
+ u32 sdei_event;
+ u32 gsiv;
+};
+
+/* Mask for Flags field above */
+
+#define ACPI_AGDI_SIGNALING_MODE (1)
+
+/*******************************************************************************
+ *
+ * APMT - ARM Performance Monitoring Unit Table
+ *
+ * Conforms to:
+ * ARM Performance Monitoring Unit Architecture 1.0 Platform Design Document
+ * ARM DEN0117 v1.0 November 25, 2021
+ *
+ ******************************************************************************/
+
+struct acpi_table_apmt {
+ struct acpi_table_header header; /* Common ACPI table header */
+};
+
+#define ACPI_APMT_NODE_ID_LENGTH 4
+
+/*
+ * APMT subtables
+ */
+struct acpi_apmt_node {
+ u16 length;
+ u8 flags;
+ u8 type;
+ u32 id;
+ u64 inst_primary;
+ u32 inst_secondary;
+ u64 base_address0;
+ u64 base_address1;
+ u32 ovflw_irq;
+ u32 reserved;
+ u32 ovflw_irq_flags;
+ u32 proc_affinity;
+ u32 impl_id;
+};
+
+/* Masks for Flags field above */
+
+#define ACPI_APMT_FLAGS_DUAL_PAGE (1<<0)
+#define ACPI_APMT_FLAGS_AFFINITY (1<<1)
+#define ACPI_APMT_FLAGS_ATOMIC (1<<2)
+
+/* Values for Flags dual page field above */
+
+#define ACPI_APMT_FLAGS_DUAL_PAGE_NSUPP (0<<0)
+#define ACPI_APMT_FLAGS_DUAL_PAGE_SUPP (1<<0)
+
+/* Values for Flags processor affinity field above */
+#define ACPI_APMT_FLAGS_AFFINITY_PROC (0<<1)
+#define ACPI_APMT_FLAGS_AFFINITY_PROC_CONTAINER (1<<1)
+
+/* Values for Flags 64-bit atomic field above */
+#define ACPI_APMT_FLAGS_ATOMIC_NSUPP (0<<2)
+#define ACPI_APMT_FLAGS_ATOMIC_SUPP (1<<2)
+
+/* Values for Type field above */
+
+enum acpi_apmt_node_type {
+ ACPI_APMT_NODE_TYPE_MC = 0x00,
+ ACPI_APMT_NODE_TYPE_SMMU = 0x01,
+ ACPI_APMT_NODE_TYPE_PCIE_ROOT = 0x02,
+ ACPI_APMT_NODE_TYPE_ACPI = 0x03,
+ ACPI_APMT_NODE_TYPE_CACHE = 0x04,
+ ACPI_APMT_NODE_TYPE_COUNT
+};
+
+/* Masks for ovflw_irq_flags field above */
+
+#define ACPI_APMT_OVFLW_IRQ_FLAGS_MODE (1<<0)
+#define ACPI_APMT_OVFLW_IRQ_FLAGS_TYPE (1<<1)
+
+/* Values for ovflw_irq_flags mode field above */
+
+#define ACPI_APMT_OVFLW_IRQ_FLAGS_MODE_LEVEL (0<<0)
+#define ACPI_APMT_OVFLW_IRQ_FLAGS_MODE_EDGE (1<<0)
+
+/* Values for ovflw_irq_flags type field above */
+
+#define ACPI_APMT_OVFLW_IRQ_FLAGS_TYPE_WIRED (0<<1)
+
+/*******************************************************************************
+ *
+ * BDAT - BIOS Data ACPI Table
+ *
+ * Conforms to "BIOS Data ACPI Table", Interface Specification v4.0 Draft 5
+ * Nov 2020
+ *
+ ******************************************************************************/
+
+struct acpi_table_bdat {
+ struct acpi_table_header header;
+ struct acpi_generic_address gas;
+};
+
+/*******************************************************************************
+ *
+ * CCEL - CC-Event Log
+ * From: "Guest-Host-Communication Interface (GHCI) for Intel
+ * Trust Domain Extensions (Intel TDX)". Feb 2022
+ *
+ ******************************************************************************/
+
+struct acpi_table_ccel {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u8 CCtype;
+ u8 Ccsub_type;
+ u16 reserved;
+ u64 log_area_minimum_length;
+ u64 log_area_start_address;
+};
+
+/*******************************************************************************
+ *
+ * ERDT - Enhanced Resource Director Technology (ERDT) table
+ *
+ * Conforms to "Intel Resource Director Technology Architecture Specification"
+ * Version 1.1, January 2025
+ *
+ ******************************************************************************/
+
+struct acpi_table_erdt {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 max_clos; /* Maximum classes of service */
+ u8 reserved[24];
+ u8 erdt_substructures[];
+};
+
+/* Values for subtable type in struct acpi_subtbl_hdr_16 */
+
+enum acpi_erdt_type {
+ ACPI_ERDT_TYPE_RMDD = 0,
+ ACPI_ERDT_TYPE_CACD = 1,
+ ACPI_ERDT_TYPE_DACD = 2,
+ ACPI_ERDT_TYPE_CMRC = 3,
+ ACPI_ERDT_TYPE_MMRC = 4,
+ ACPI_ERDT_TYPE_MARC = 5,
+ ACPI_ERDT_TYPE_CARC = 6,
+ ACPI_ERDT_TYPE_CMRD = 7,
+ ACPI_ERDT_TYPE_IBRD = 8,
+ ACPI_ERDT_TYPE_IBAD = 9,
+ ACPI_ERDT_TYPE_CARD = 10,
+ ACPI_ERDT_TYPE_RESERVED = 11 /* 11 and above are reserved */
+};
+
+/*
+ * ERDT Subtables, correspond to Type in struct acpi_subtbl_hdr_16
+ */
+
+/* 0: RMDD - Resource Management Domain Description */
+
+struct acpi_erdt_rmdd {
+ struct acpi_subtbl_hdr_16 header;
+ u16 flags;
+ u16 IO_l3_slices; /* Number of slices in IO cache */
+ u8 IO_l3_sets; /* Number of sets in IO cache */
+ u8 IO_l3_ways; /* Number of ways in IO cache */
+ u64 reserved;
+ u16 domain_id; /* Unique domain ID */
+ u32 max_rmid; /* Maximun RMID supported */
+ u64 creg_base; /* Control Register Base Address */
+ u16 creg_size; /* Control Register Size (4K pages) */
+ u8 rmdd_structs[];
+};
+
+/* 1: CACD - CPU Agent Collection Description */
+
+struct acpi_erdt_cacd {
+ struct acpi_subtbl_hdr_16 header;
+ u16 reserved;
+ u16 domain_id; /* Unique domain ID */
+ u32 X2APICIDS[];
+};
+
+/* 2: DACD - Device Agent Collection Description */
+
+struct acpi_erdt_dacd {
+ struct acpi_subtbl_hdr_16 header;
+ u16 reserved;
+ u16 domain_id; /* Unique domain ID */
+ u8 dev_paths[];
+};
+
+struct acpi_erdt_dacd_dev_paths {
+ struct acpi_subtable_header header;
+ u16 segment;
+ u8 reserved;
+ u8 start_bus;
+ u8 path[];
+};
+
+/* 3: CMRC - Cache Monitoring Registers for CPU Agents */
+
+struct acpi_erdt_cmrc {
+ struct acpi_subtbl_hdr_16 header;
+ u32 reserved1;
+ u32 flags;
+ u8 index_fn;
+ u8 reserved2[11];
+ u64 cmt_reg_base;
+ u32 cmt_reg_size;
+ u16 clump_size;
+ u16 clump_stride;
+ u64 up_scale;
+};
+
+/* 4: MMRC - Memory-bandwidth Monitoring Registers for CPU Agents */
+
+struct acpi_erdt_mmrc {
+ struct acpi_subtbl_hdr_16 header;
+ u32 reserved1;
+ u32 flags;
+ u8 index_fn;
+ u8 reserved2[11];
+ u64 reg_base;
+ u32 reg_size;
+ u8 counter_width;
+ u64 up_scale;
+ u8 reserved3[7];
+ u32 corr_factor_list_len;
+ u32 corr_factor_list[];
+};
+
+/* 5: MARC - Memory-bandwidth Allocation Registers for CPU Agents */
+
+struct acpi_erdt_marc {
+ struct acpi_subtbl_hdr_16 header;
+ u16 reserved1;
+ u16 flags;
+ u8 index_fn;
+ u8 reserved2[7];
+ u64 reg_base_opt;
+ u64 reg_base_min;
+ u64 reg_base_max;
+ u32 mba_reg_size;
+ u32 mba_ctrl_range;
+};
+
+/* 6: CARC - Cache Allocation Registers for CPU Agents */
+
+struct acpi_erdt_carc {
+ struct acpi_subtbl_hdr_16 header;
+};
+
+/* 7: CMRD - Cache Monitoring Registers for Device Agents */
+
+struct acpi_erdt_cmrd {
+ struct acpi_subtbl_hdr_16 header;
+ u32 reserved1;
+ u32 flags;
+ u8 index_fn;
+ u8 reserved2[11];
+ u64 reg_base;
+ u32 reg_size;
+ u16 cmt_reg_off;
+ u16 cmt_clump_size;
+ u64 up_scale;
+};
+
+/* 8: IBRD - Cache Monitoring Registers for Device Agents */
+
+struct acpi_erdt_ibrd {
+ struct acpi_subtbl_hdr_16 header;
+ u32 reserved1;
+ u32 flags;
+ u8 index_fn;
+ u8 reserved2[11];
+ u64 reg_base;
+ u32 reg_size;
+ u16 total_bw_offset;
+ u16 Iomiss_bw_offset;
+ u16 total_bw_clump;
+ u16 Iomiss_bw_clump;
+ u8 reserved3[7];
+ u8 counter_width;
+ u64 up_scale;
+ u32 corr_factor_list_len;
+ u32 corr_factor_list[];
+};
+
+/* 9: IBAD - IO bandwidth Allocation Registers for device agents */
+
+struct acpi_erdt_ibad {
+ struct acpi_subtbl_hdr_16 header;
+};
+
+/* 10: CARD - IO bandwidth Allocation Registers for Device Agents */
+
+struct acpi_erdt_card {
+ struct acpi_subtbl_hdr_16 header;
+ u32 reserved1;
+ u32 flags;
+ u32 contention_mask;
+ u8 index_fn;
+ u8 reserved2[7];
+ u64 reg_base;
+ u32 reg_size;
+ u16 cat_reg_offset;
+ u16 cat_reg_block_size;
+};
+
+/*******************************************************************************
+ *
* IORT - IO Remapping Table
*
* Conforms to "IO Remapping Table System Software on ARM Platforms",
- * Document number: ARM DEN 0049E.b, Feb 2021
+ * Document number: ARM DEN 0049E.f, Apr 2024
*
******************************************************************************/
@@ -89,7 +667,7 @@ struct acpi_iort_node {
u32 identifier;
u32 mapping_count;
u32 mapping_offset;
- char node_data[1];
+ char node_data[];
};
/* Values for subtable Type above */
@@ -139,20 +717,21 @@ struct acpi_iort_memory_access {
#define ACPI_IORT_MF_COHERENCY (1)
#define ACPI_IORT_MF_ATTRIBUTES (1<<1)
+#define ACPI_IORT_MF_CANWBS (1<<2)
/*
* IORT node specific subtables
*/
struct acpi_iort_its_group {
u32 its_count;
- u32 identifiers[1]; /* GIC ITS identifier array */
+ u32 identifiers[]; /* GIC ITS identifier array */
};
struct acpi_iort_named_component {
u32 node_flags;
u64 memory_properties; /* Memory access properties */
u8 memory_address_limit; /* Memory address size limit */
- char device_name[1]; /* Path of namespace object */
+ char device_name[]; /* Path of namespace object */
};
/* Masks for Flags field above */
@@ -165,7 +744,8 @@ struct acpi_iort_root_complex {
u32 ats_attribute;
u32 pci_segment_number;
u8 memory_address_limit; /* Memory address size limit */
- u8 reserved[3]; /* Reserved, must be zero */
+ u16 pasid_capabilities; /* PASID Capabilities */
+ u8 reserved[]; /* Reserved, must be zero */
};
/* Masks for ats_attribute field above */
@@ -174,6 +754,9 @@ struct acpi_iort_root_complex {
#define ACPI_IORT_PRI_SUPPORTED (1<<1) /* The root complex PRI support */
#define ACPI_IORT_PASID_FWD_SUPPORTED (1<<2) /* The root complex PASID forward support */
+/* Masks for pasid_capabilities field above */
+#define ACPI_IORT_PASID_MAX_WIDTH (0x1F) /* Bits 0-4 */
+
struct acpi_iort_smmu {
u64 base_address; /* SMMU base address */
u64 span; /* Length of memory range */
@@ -184,7 +767,7 @@ struct acpi_iort_smmu {
u32 context_interrupt_offset;
u32 pmu_interrupt_count;
u32 pmu_interrupt_offset;
- u64 interrupts[1]; /* Interrupt array */
+ u64 interrupts[]; /* Interrupt array */
};
/* Values for Model field above */
@@ -235,6 +818,7 @@ struct acpi_iort_smmu_v3 {
#define ACPI_IORT_SMMU_V3_COHACC_OVERRIDE (1)
#define ACPI_IORT_SMMU_V3_HTTU_OVERRIDE (3<<1)
#define ACPI_IORT_SMMU_V3_PXM_VALID (1<<3)
+#define ACPI_IORT_SMMU_V3_DEVICEID_VALID (1<<4)
struct acpi_iort_pmcg {
u64 page0_base_address;
@@ -249,6 +833,25 @@ struct acpi_iort_rmr {
u32 rmr_offset;
};
+/* Masks for Flags field above */
+#define ACPI_IORT_RMR_REMAP_PERMITTED (1)
+#define ACPI_IORT_RMR_ACCESS_PRIVILEGE (1<<1)
+
+/*
+ * Macro to access the Access Attributes in flags field above:
+ * Access Attributes is encoded in bits 9:2
+ */
+#define ACPI_IORT_RMR_ACCESS_ATTRIBUTES(flags) (((flags) >> 2) & 0xFF)
+
+/* Values for above Access Attributes */
+
+#define ACPI_IORT_RMR_ATTR_DEVICE_NGNRNE 0x00
+#define ACPI_IORT_RMR_ATTR_DEVICE_NGNRE 0x01
+#define ACPI_IORT_RMR_ATTR_DEVICE_NGRE 0x02
+#define ACPI_IORT_RMR_ATTR_DEVICE_GRE 0x03
+#define ACPI_IORT_RMR_ATTR_NORMAL_NC 0x04
+#define ACPI_IORT_RMR_ATTR_NORMAL_IWB_OWB 0x05
+
struct acpi_iort_rmr_desc {
u64 base_address;
u64 length;
@@ -446,6 +1049,12 @@ struct acpi_ivrs_device_hid {
u8 uid_length;
};
+/* Values for uid_type above */
+
+#define ACPI_IVRS_UID_NOT_PRESENT 0
+#define ACPI_IVRS_UID_IS_INTEGER 1
+#define ACPI_IVRS_UID_IS_STRING 2
+
/* 0x20, 0x21, 0x22: I/O Virtualization Memory Definition Block (IVMD) */
struct acpi_ivrs_memory {
@@ -547,7 +1156,19 @@ enum acpi_madt_type {
ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR = 14,
ACPI_MADT_TYPE_GENERIC_TRANSLATOR = 15,
ACPI_MADT_TYPE_MULTIPROC_WAKEUP = 16,
- ACPI_MADT_TYPE_RESERVED = 17 /* 17 and greater are reserved */
+ ACPI_MADT_TYPE_CORE_PIC = 17,
+ ACPI_MADT_TYPE_LIO_PIC = 18,
+ ACPI_MADT_TYPE_HT_PIC = 19,
+ ACPI_MADT_TYPE_EIO_PIC = 20,
+ ACPI_MADT_TYPE_MSI_PIC = 21,
+ ACPI_MADT_TYPE_BIO_PIC = 22,
+ ACPI_MADT_TYPE_LPC_PIC = 23,
+ ACPI_MADT_TYPE_RINTC = 24,
+ ACPI_MADT_TYPE_IMSIC = 25,
+ ACPI_MADT_TYPE_APLIC = 26,
+ ACPI_MADT_TYPE_PLIC = 27,
+ ACPI_MADT_TYPE_RESERVED = 28, /* 28 to 0x7F are reserved */
+ ACPI_MADT_TYPE_OEM_RESERVED = 0x80 /* 0x80 to 0xFF are reserved for OEM use */
};
/*
@@ -628,7 +1249,7 @@ struct acpi_madt_local_sapic {
u8 reserved[3]; /* Reserved, must be zero */
u32 lapic_flags;
u32 uid; /* Numeric UID - ACPI 3.0 */
- char uid_string[1]; /* String UID - ACPI 3.0 */
+ char uid_string[]; /* String UID - ACPI 3.0 */
};
/* 8: Platform Interrupt Source */
@@ -668,7 +1289,7 @@ struct acpi_madt_local_x2apic_nmi {
u8 reserved[3]; /* reserved - must be zero */
};
-/* 11: Generic interrupt - GICC (ACPI 5.0 + ACPI 6.0 + ACPI 6.3 changes) */
+/* 11: Generic interrupt - GICC (ACPI 5.0 + ACPI 6.0 + ACPI 6.3 + ACPI 6.5 changes) */
struct acpi_madt_generic_interrupt {
struct acpi_subtable_header header;
@@ -688,6 +1309,7 @@ struct acpi_madt_generic_interrupt {
u8 efficiency_class;
u8 reserved2[1];
u16 spe_interrupt; /* ACPI 6.3 */
+ u16 trbe_interrupt; /* ACPI 6.5 */
};
/* Masks for Flags field above */
@@ -695,6 +1317,8 @@ struct acpi_madt_generic_interrupt {
/* ACPI_MADT_ENABLED (1) Processor is usable if set */
#define ACPI_MADT_PERFORMANCE_IRQ_MODE (1<<1) /* 01: Performance Interrupt Mode */
#define ACPI_MADT_VGIC_IRQ_MODE (1<<2) /* 02: VGIC Maintenance Interrupt mode */
+#define ACPI_MADT_GICC_ONLINE_CAPABLE (1<<3) /* 03: Processor is online capable */
+#define ACPI_MADT_GICC_NON_COHERENT (1<<4) /* 04: GIC redistributor is not coherent */
/* 12: Generic Distributor (ACPI 5.0 + ACPI 6.0 changes) */
@@ -739,28 +1363,258 @@ struct acpi_madt_generic_msi_frame {
struct acpi_madt_generic_redistributor {
struct acpi_subtable_header header;
- u16 reserved; /* reserved - must be zero */
+ u8 flags;
+ u8 reserved; /* reserved - must be zero */
u64 base_address;
u32 length;
};
+#define ACPI_MADT_GICR_NON_COHERENT (1)
+
/* 15: Generic Translator (ACPI 6.0) */
struct acpi_madt_generic_translator {
struct acpi_subtable_header header;
- u16 reserved; /* reserved - must be zero */
+ u8 flags;
+ u8 reserved; /* reserved - must be zero */
u32 translation_id;
u64 base_address;
u32 reserved2;
};
+#define ACPI_MADT_ITS_NON_COHERENT (1)
+
/* 16: Multiprocessor wakeup (ACPI 6.4) */
struct acpi_madt_multiproc_wakeup {
struct acpi_subtable_header header;
- u16 mailbox_version;
+ u16 version;
u32 reserved; /* reserved - must be zero */
- u64 base_address;
+ u64 mailbox_address;
+ u64 reset_vector;
+};
+
+/* Values for Version field above */
+
+enum acpi_madt_multiproc_wakeup_version {
+ ACPI_MADT_MP_WAKEUP_VERSION_NONE = 0,
+ ACPI_MADT_MP_WAKEUP_VERSION_V1 = 1,
+ ACPI_MADT_MP_WAKEUP_VERSION_RESERVED = 2, /* 2 and greater are reserved */
+};
+
+#define ACPI_MADT_MP_WAKEUP_SIZE_V0 16
+#define ACPI_MADT_MP_WAKEUP_SIZE_V1 24
+
+#define ACPI_MULTIPROC_WAKEUP_MB_OS_SIZE 2032
+#define ACPI_MULTIPROC_WAKEUP_MB_FIRMWARE_SIZE 2048
+
+struct acpi_madt_multiproc_wakeup_mailbox {
+ u16 command;
+ u16 reserved; /* reserved - must be zero */
+ u32 apic_id;
+ u64 wakeup_vector;
+ u8 reserved_os[ACPI_MULTIPROC_WAKEUP_MB_OS_SIZE]; /* reserved for OS use */
+ u8 reserved_firmware[ACPI_MULTIPROC_WAKEUP_MB_FIRMWARE_SIZE]; /* reserved for firmware use */
+};
+
+#define ACPI_MP_WAKE_COMMAND_WAKEUP 1
+#define ACPI_MP_WAKE_COMMAND_TEST 2
+
+/* 17: CPU Core Interrupt Controller (ACPI 6.5) */
+
+struct acpi_madt_core_pic {
+ struct acpi_subtable_header header;
+ u8 version;
+ u32 processor_id;
+ u32 core_id;
+ u32 flags;
+};
+
+/* Values for Version field above */
+
+enum acpi_madt_core_pic_version {
+ ACPI_MADT_CORE_PIC_VERSION_NONE = 0,
+ ACPI_MADT_CORE_PIC_VERSION_V1 = 1,
+ ACPI_MADT_CORE_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
+};
+
+/* 18: Legacy I/O Interrupt Controller (ACPI 6.5) */
+
+struct acpi_madt_lio_pic {
+ struct acpi_subtable_header header;
+ u8 version;
+ u64 address;
+ u16 size;
+ u8 cascade[2];
+ u32 cascade_map[2];
+};
+
+/* Values for Version field above */
+
+enum acpi_madt_lio_pic_version {
+ ACPI_MADT_LIO_PIC_VERSION_NONE = 0,
+ ACPI_MADT_LIO_PIC_VERSION_V1 = 1,
+ ACPI_MADT_LIO_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
+};
+
+/* 19: HT Interrupt Controller (ACPI 6.5) */
+
+struct acpi_madt_ht_pic {
+ struct acpi_subtable_header header;
+ u8 version;
+ u64 address;
+ u16 size;
+ u8 cascade[8];
+};
+
+/* Values for Version field above */
+
+enum acpi_madt_ht_pic_version {
+ ACPI_MADT_HT_PIC_VERSION_NONE = 0,
+ ACPI_MADT_HT_PIC_VERSION_V1 = 1,
+ ACPI_MADT_HT_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
+};
+
+/* 20: Extend I/O Interrupt Controller (ACPI 6.5) */
+
+struct acpi_madt_eio_pic {
+ struct acpi_subtable_header header;
+ u8 version;
+ u8 cascade;
+ u8 node;
+ u64 node_map;
+};
+
+/* Values for Version field above */
+
+enum acpi_madt_eio_pic_version {
+ ACPI_MADT_EIO_PIC_VERSION_NONE = 0,
+ ACPI_MADT_EIO_PIC_VERSION_V1 = 1,
+ ACPI_MADT_EIO_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
+};
+
+/* 21: MSI Interrupt Controller (ACPI 6.5) */
+
+struct acpi_madt_msi_pic {
+ struct acpi_subtable_header header;
+ u8 version;
+ u64 msg_address;
+ u32 start;
+ u32 count;
+};
+
+/* Values for Version field above */
+
+enum acpi_madt_msi_pic_version {
+ ACPI_MADT_MSI_PIC_VERSION_NONE = 0,
+ ACPI_MADT_MSI_PIC_VERSION_V1 = 1,
+ ACPI_MADT_MSI_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
+};
+
+/* 22: Bridge I/O Interrupt Controller (ACPI 6.5) */
+
+struct acpi_madt_bio_pic {
+ struct acpi_subtable_header header;
+ u8 version;
+ u64 address;
+ u16 size;
+ u16 id;
+ u16 gsi_base;
+};
+
+/* Values for Version field above */
+
+enum acpi_madt_bio_pic_version {
+ ACPI_MADT_BIO_PIC_VERSION_NONE = 0,
+ ACPI_MADT_BIO_PIC_VERSION_V1 = 1,
+ ACPI_MADT_BIO_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
+};
+
+/* 23: LPC Interrupt Controller (ACPI 6.5) */
+
+struct acpi_madt_lpc_pic {
+ struct acpi_subtable_header header;
+ u8 version;
+ u64 address;
+ u16 size;
+ u8 cascade;
+};
+
+/* Values for Version field above */
+
+enum acpi_madt_lpc_pic_version {
+ ACPI_MADT_LPC_PIC_VERSION_NONE = 0,
+ ACPI_MADT_LPC_PIC_VERSION_V1 = 1,
+ ACPI_MADT_LPC_PIC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
+};
+
+/* 24: RISC-V INTC */
+struct acpi_madt_rintc {
+ struct acpi_subtable_header header;
+ u8 version;
+ u8 reserved;
+ u32 flags;
+ u64 hart_id;
+ u32 uid; /* ACPI processor UID */
+ u32 ext_intc_id; /* External INTC Id */
+ u64 imsic_addr; /* IMSIC base address */
+ u32 imsic_size; /* IMSIC size */
+};
+
+/* Values for RISC-V INTC Version field above */
+
+enum acpi_madt_rintc_version {
+ ACPI_MADT_RINTC_VERSION_NONE = 0,
+ ACPI_MADT_RINTC_VERSION_V1 = 1,
+ ACPI_MADT_RINTC_VERSION_RESERVED = 2 /* 2 and greater are reserved */
+};
+
+/* 25: RISC-V IMSIC */
+struct acpi_madt_imsic {
+ struct acpi_subtable_header header;
+ u8 version;
+ u8 reserved;
+ u32 flags;
+ u16 num_ids;
+ u16 num_guest_ids;
+ u8 guest_index_bits;
+ u8 hart_index_bits;
+ u8 group_index_bits;
+ u8 group_index_shift;
+};
+
+/* 26: RISC-V APLIC */
+struct acpi_madt_aplic {
+ struct acpi_subtable_header header;
+ u8 version;
+ u8 id;
+ u32 flags;
+ u8 hw_id[8];
+ u16 num_idcs;
+ u16 num_sources;
+ u32 gsi_base;
+ u64 base_addr;
+ u32 size;
+};
+
+/* 27: RISC-V PLIC */
+struct acpi_madt_plic {
+ struct acpi_subtable_header header;
+ u8 version;
+ u8 id;
+ u8 hw_id[8];
+ u16 num_irqs;
+ u16 max_prio;
+ u32 flags;
+ u32 size;
+ u64 base_addr;
+ u32 gsi_base;
+};
+
+/* 80: OEM data */
+
+struct acpi_madt_oem_data {
+ ACPI_FLEX_ARRAY(u8, oem_data);
};
/*
@@ -770,6 +1624,7 @@ struct acpi_madt_multiproc_wakeup {
/* MADT Local APIC flags */
#define ACPI_MADT_ENABLED (1) /* 00: Processor is usable if set */
+#define ACPI_MADT_ONLINE_CAPABLE (2) /* 01: System HW supports enabling processor at runtime */
/* MADT MPS INTI flags (inti_flags) */
@@ -840,6 +1695,121 @@ struct acpi_table_mchi {
/*******************************************************************************
*
+ * MPAM - Memory System Resource Partitioning and Monitoring
+ *
+ * Conforms to "ACPI for Memory System Resource Partitioning and Monitoring 2.0"
+ * Document number: ARM DEN 0065, December, 2022.
+ *
+ ******************************************************************************/
+
+/* MPAM RIS locator types. Table 11, Location types */
+enum acpi_mpam_locator_type {
+ ACPI_MPAM_LOCATION_TYPE_PROCESSOR_CACHE = 0,
+ ACPI_MPAM_LOCATION_TYPE_MEMORY = 1,
+ ACPI_MPAM_LOCATION_TYPE_SMMU = 2,
+ ACPI_MPAM_LOCATION_TYPE_MEMORY_CACHE = 3,
+ ACPI_MPAM_LOCATION_TYPE_ACPI_DEVICE = 4,
+ ACPI_MPAM_LOCATION_TYPE_INTERCONNECT = 5,
+ ACPI_MPAM_LOCATION_TYPE_UNKNOWN = 0xFF
+};
+
+/* MPAM Functional dependency descriptor. Table 10 */
+struct acpi_mpam_func_deps {
+ u32 producer;
+ u32 reserved;
+};
+
+/* MPAM Processor cache locator descriptor. Table 13 */
+struct acpi_mpam_resource_cache_locator {
+ u64 cache_reference;
+ u32 reserved;
+};
+
+/* MPAM Memory locator descriptor. Table 14 */
+struct acpi_mpam_resource_memory_locator {
+ u64 proximity_domain;
+ u32 reserved;
+};
+
+/* MPAM SMMU locator descriptor. Table 15 */
+struct acpi_mpam_resource_smmu_locator {
+ u64 smmu_interface;
+ u32 reserved;
+};
+
+/* MPAM Memory-side cache locator descriptor. Table 16 */
+struct acpi_mpam_resource_memcache_locator {
+ u8 reserved[7];
+ u8 level;
+ u32 reference;
+};
+
+/* MPAM ACPI device locator descriptor. Table 17 */
+struct acpi_mpam_resource_acpi_locator {
+ u64 acpi_hw_id;
+ u32 acpi_unique_id;
+};
+
+/* MPAM Interconnect locator descriptor. Table 18 */
+struct acpi_mpam_resource_interconnect_locator {
+ u64 inter_connect_desc_tbl_off;
+ u32 reserved;
+};
+
+/* MPAM Locator structure. Table 12 */
+struct acpi_mpam_resource_generic_locator {
+ u64 descriptor1;
+ u32 descriptor2;
+};
+
+union acpi_mpam_resource_locator {
+ struct acpi_mpam_resource_cache_locator cache_locator;
+ struct acpi_mpam_resource_memory_locator memory_locator;
+ struct acpi_mpam_resource_smmu_locator smmu_locator;
+ struct acpi_mpam_resource_memcache_locator mem_cache_locator;
+ struct acpi_mpam_resource_acpi_locator acpi_locator;
+ struct acpi_mpam_resource_interconnect_locator interconnect_ifc_locator;
+ struct acpi_mpam_resource_generic_locator generic_locator;
+};
+
+/* Memory System Component Resource Node Structure Table 9 */
+struct acpi_mpam_resource_node {
+ u32 identifier;
+ u8 ris_index;
+ u16 reserved1;
+ u8 locator_type;
+ union acpi_mpam_resource_locator locator;
+ u32 num_functional_deps;
+};
+
+/* Memory System Component (MSC) Node Structure. Table 4 */
+struct acpi_mpam_msc_node {
+ u16 length;
+ u8 interface_type;
+ u8 reserved;
+ u32 identifier;
+ u64 base_address;
+ u32 mmio_size;
+ u32 overflow_interrupt;
+ u32 overflow_interrupt_flags;
+ u32 reserved1;
+ u32 overflow_interrupt_affinity;
+ u32 error_interrupt;
+ u32 error_interrupt_flags;
+ u32 reserved2;
+ u32 error_interrupt_affinity;
+ u32 max_nrdy_usec;
+ u64 hardware_id_linked_device;
+ u32 instance_id_linked_device;
+ u32 num_resource_nodes;
+};
+
+struct acpi_table_mpam {
+ struct acpi_table_header header; /* Common ACPI table header */
+};
+
+/*******************************************************************************
+ *
* MPST - Memory Power State Table (ACPI 5.0)
* Version 1
*
@@ -961,6 +1931,47 @@ struct acpi_msct_proximity {
/*******************************************************************************
*
+ * MRRM - Memory Range and Region Mapping (MRRM) table
+ * Conforms to "Intel Resource Director Technology Architecture Specification"
+ * Version 1.1, January 2025
+ *
+ ******************************************************************************/
+
+struct acpi_table_mrrm {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u8 max_mem_region; /* Max Memory Regions supported */
+ u8 flags; /* Region assignment type */
+ u8 reserved[26];
+ u8 memory_range_entry[];
+};
+
+/* Flags */
+#define ACPI_MRRM_FLAGS_REGION_ASSIGNMENT_OS (1<<0)
+
+/*******************************************************************************
+ *
+ * Memory Range entry - Memory Range entry in MRRM table
+ *
+ ******************************************************************************/
+
+struct acpi_mrrm_mem_range_entry {
+ struct acpi_subtbl_hdr_16 header;
+ u32 reserved0; /* Reserved */
+ u64 addr_base; /* Base addr of the mem range */
+ u64 addr_len; /* Length of the mem range */
+ u16 region_id_flags; /* Valid local or remote Region-ID */
+ u8 local_region_id; /* Platform-assigned static local Region-ID */
+ u8 remote_region_id; /* Platform-assigned static remote Region-ID */
+ u32 reserved1; /* Reserved */
+ /* Region-ID Programming Registers[] */
+};
+
+/* Values for region_id_flags above */
+#define ACPI_MRRM_VALID_REGION_ID_FLAGS_LOCAL (1<<0)
+#define ACPI_MRRM_VALID_REGION_ID_FLAGS_REMOTE (1<<1)
+
+/*******************************************************************************
+ *
* MSDM - Microsoft Data Management table
*
* Conforms to "Microsoft Software Licensing Tables (SLIC and MSDM)",
@@ -1070,7 +2081,7 @@ struct acpi_nfit_interleave {
u16 reserved; /* Reserved, must be zero */
u32 line_count;
u32 line_size;
- u32 line_offset[1]; /* Variable length */
+ u32 line_offset[]; /* Variable length */
};
/* 3: SMBIOS Management Information Structure */
@@ -1078,7 +2089,7 @@ struct acpi_nfit_interleave {
struct acpi_nfit_smbios {
struct acpi_nfit_header header;
u32 reserved; /* Reserved, must be zero */
- u8 data[1]; /* Variable length */
+ u8 data[]; /* Variable length */
};
/* 4: NVDIMM Control Region Structure */
@@ -1135,7 +2146,7 @@ struct acpi_nfit_flush_address {
u32 device_handle;
u16 hint_count;
u8 reserved[6]; /* Reserved, must be zero */
- u64 hint_address[1]; /* Variable length */
+ u64 hint_address[]; /* Variable length */
};
/* 7: Platform Capabilities Structure */
@@ -1203,6 +2214,195 @@ struct nfit_device_handle {
/*******************************************************************************
*
+ * NHLT - Non HDAudio Link Table
+ * Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_nhlt {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u8 endpoints_count;
+ /*
+ * struct acpi_nhlt_endpoint endpoints[];
+ * struct acpi_nhlt_config oed_config;
+ */
+};
+
+struct acpi_nhlt_endpoint {
+ u32 length;
+ u8 link_type;
+ u8 instance_id;
+ u16 vendor_id;
+ u16 device_id;
+ u16 revision_id;
+ u32 subsystem_id;
+ u8 device_type;
+ u8 direction;
+ u8 virtual_bus_id;
+ /*
+ * struct acpi_nhlt_config device_config;
+ * struct acpi_nhlt_formats_config formats_config;
+ * struct acpi_nhlt_devices_info devices_info;
+ */
+};
+
+/*
+ * Values for link_type field above
+ *
+ * Only types PDM and SSP are used
+ */
+#define ACPI_NHLT_LINKTYPE_HDA 0
+#define ACPI_NHLT_LINKTYPE_DSP 1
+#define ACPI_NHLT_LINKTYPE_PDM 2
+#define ACPI_NHLT_LINKTYPE_SSP 3
+#define ACPI_NHLT_LINKTYPE_SLIMBUS 4
+#define ACPI_NHLT_LINKTYPE_SDW 5
+#define ACPI_NHLT_LINKTYPE_UAOL 6
+
+/* Values for device_id field above */
+
+#define ACPI_NHLT_DEVICEID_DMIC 0xAE20
+#define ACPI_NHLT_DEVICEID_BT 0xAE30
+#define ACPI_NHLT_DEVICEID_I2S 0xAE34
+
+/* Values for device_type field above */
+
+/*
+ * Device types unique to endpoint of link_type=PDM
+ *
+ * Type PDM used for all SKL+ platforms
+ */
+#define ACPI_NHLT_DEVICETYPE_PDM 0
+#define ACPI_NHLT_DEVICETYPE_PDM_SKL 1
+/* Device types unique to endpoint of link_type=SSP */
+#define ACPI_NHLT_DEVICETYPE_BT 0
+#define ACPI_NHLT_DEVICETYPE_FM 1
+#define ACPI_NHLT_DEVICETYPE_MODEM 2
+#define ACPI_NHLT_DEVICETYPE_CODEC 4
+
+/* Values for Direction field above */
+
+#define ACPI_NHLT_DIR_RENDER 0
+#define ACPI_NHLT_DIR_CAPTURE 1
+
+struct acpi_nhlt_config {
+ u32 capabilities_size;
+ u8 capabilities[];
+};
+
+struct acpi_nhlt_gendevice_config {
+ u8 virtual_slot;
+ u8 config_type;
+};
+
+/* Values for config_type field above */
+
+#define ACPI_NHLT_CONFIGTYPE_GENERIC 0
+#define ACPI_NHLT_CONFIGTYPE_MICARRAY 1
+
+struct acpi_nhlt_micdevice_config {
+ u8 virtual_slot;
+ u8 config_type;
+ u8 array_type;
+};
+
+/* Values for array_type field above */
+
+#define ACPI_NHLT_ARRAYTYPE_LINEAR2_SMALL 0xA
+#define ACPI_NHLT_ARRAYTYPE_LINEAR2_BIG 0xB
+#define ACPI_NHLT_ARRAYTYPE_LINEAR4_GEO1 0xC
+#define ACPI_NHLT_ARRAYTYPE_PLANAR4_LSHAPED 0xD
+#define ACPI_NHLT_ARRAYTYPE_LINEAR4_GEO2 0xE
+#define ACPI_NHLT_ARRAYTYPE_VENDOR 0xF
+
+struct acpi_nhlt_vendor_mic_config {
+ u8 type;
+ u8 panel;
+ u16 speaker_position_distance; /* mm */
+ u16 horizontal_offset; /* mm */
+ u16 vertical_offset; /* mm */
+ u8 frequency_low_band; /* 5*Hz */
+ u8 frequency_high_band; /* 500*Hz */
+ u16 direction_angle; /* -180 - +180 */
+ u16 elevation_angle; /* -180 - +180 */
+ u16 work_vertical_angle_begin; /* -180 - +180 with 2 deg step */
+ u16 work_vertical_angle_end; /* -180 - +180 with 2 deg step */
+ u16 work_horizontal_angle_begin; /* -180 - +180 with 2 deg step */
+ u16 work_horizontal_angle_end; /* -180 - +180 with 2 deg step */
+};
+
+/* Values for Type field above */
+
+#define ACPI_NHLT_MICTYPE_OMNIDIRECTIONAL 0
+#define ACPI_NHLT_MICTYPE_SUBCARDIOID 1
+#define ACPI_NHLT_MICTYPE_CARDIOID 2
+#define ACPI_NHLT_MICTYPE_SUPERCARDIOID 3
+#define ACPI_NHLT_MICTYPE_HYPERCARDIOID 4
+#define ACPI_NHLT_MICTYPE_8SHAPED 5
+#define ACPI_NHLT_MICTYPE_RESERVED 6
+#define ACPI_NHLT_MICTYPE_VENDORDEFINED 7
+
+/* Values for Panel field above */
+
+#define ACPI_NHLT_MICLOCATION_TOP 0
+#define ACPI_NHLT_MICLOCATION_BOTTOM 1
+#define ACPI_NHLT_MICLOCATION_LEFT 2
+#define ACPI_NHLT_MICLOCATION_RIGHT 3
+#define ACPI_NHLT_MICLOCATION_FRONT 4
+#define ACPI_NHLT_MICLOCATION_REAR 5
+
+struct acpi_nhlt_vendor_micdevice_config {
+ u8 virtual_slot;
+ u8 config_type;
+ u8 array_type;
+ u8 mics_count;
+ struct acpi_nhlt_vendor_mic_config mics[];
+};
+
+union acpi_nhlt_device_config {
+ u8 virtual_slot;
+ struct acpi_nhlt_gendevice_config gen;
+ struct acpi_nhlt_micdevice_config mic;
+ struct acpi_nhlt_vendor_micdevice_config vendor_mic;
+};
+
+/* Inherited from Microsoft's WAVEFORMATEXTENSIBLE. */
+struct acpi_nhlt_wave_formatext {
+ u16 format_tag;
+ u16 channel_count;
+ u32 samples_per_sec;
+ u32 avg_bytes_per_sec;
+ u16 block_align;
+ u16 bits_per_sample;
+ u16 extra_format_size;
+ u16 valid_bits_per_sample;
+ u32 channel_mask;
+ u8 subformat[16];
+};
+
+struct acpi_nhlt_format_config {
+ struct acpi_nhlt_wave_formatext format;
+ struct acpi_nhlt_config config;
+};
+
+struct acpi_nhlt_formats_config {
+ u8 formats_count;
+ struct acpi_nhlt_format_config formats[];
+};
+
+struct acpi_nhlt_device_info {
+ u8 id[16];
+ u8 instance_id;
+ u8 port_id;
+};
+
+struct acpi_nhlt_devices_info {
+ u8 devices_count;
+ struct acpi_nhlt_device_info devices[];
+};
+
+/*******************************************************************************
+ *
* PCCT - Platform Communications Channel Table (ACPI 5.0)
* Version 2 (ACPI 6.2)
*
@@ -1675,6 +2875,48 @@ struct acpi_pptt_id {
/*******************************************************************************
*
+ * PRMT - Platform Runtime Mechanism Table
+ * Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_prmt {
+ struct acpi_table_header header; /* Common ACPI table header */
+};
+
+struct acpi_table_prmt_header {
+ u8 platform_guid[16];
+ u32 module_info_offset;
+ u32 module_info_count;
+};
+
+struct acpi_prmt_module_header {
+ u16 revision;
+ u16 length;
+};
+
+struct acpi_prmt_module_info {
+ u16 revision;
+ u16 length;
+ u8 module_guid[16];
+ u16 major_rev;
+ u16 minor_rev;
+ u16 handler_info_count;
+ u32 handler_info_offset;
+ u64 mmio_list_pointer;
+};
+
+struct acpi_prmt_handler_info {
+ u16 revision;
+ u16 length;
+ u8 handler_guid[16];
+ u64 handler_address;
+ u64 static_data_buffer_address;
+ u64 acpi_param_buffer_address;
+};
+
+/*******************************************************************************
+ *
* RASF - RAS Feature Table (ACPI 5.0)
* Version 1
*
@@ -1771,6 +3013,313 @@ enum acpi_rasf_status {
/*******************************************************************************
*
+ * RAS2 - RAS2 Feature Table (ACPI 6.5)
+ * Version 1
+ *
+ *
+ ******************************************************************************/
+
+struct acpi_table_ras2 {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u16 reserved;
+ u16 num_pcc_descs;
+};
+
+/* RAS2 Platform Communication Channel Descriptor */
+
+struct acpi_ras2_pcc_desc {
+ u8 channel_id;
+ u16 reserved;
+ u8 feature_type;
+ u32 instance;
+};
+
+/* RAS2 Platform Communication Channel Shared Memory Region */
+
+struct acpi_ras2_shmem {
+ u32 signature;
+ u16 command;
+ u16 status;
+ u16 version;
+ u8 features[16];
+ u8 set_caps[16];
+ u16 num_param_blks;
+ u32 set_caps_status;
+};
+
+/* RAS2 Parameter Block Structure for PATROL_SCRUB */
+
+struct acpi_ras2_parameter_block {
+ u16 type;
+ u16 version;
+ u16 length;
+};
+
+/* RAS2 Parameter Block Structure for PATROL_SCRUB */
+
+struct acpi_ras2_patrol_scrub_param {
+ struct acpi_ras2_parameter_block header;
+ u16 command;
+ u64 req_addr_range[2];
+ u64 actl_addr_range[2];
+ u32 flags;
+ u32 scrub_params_out;
+ u32 scrub_params_in;
+};
+
+/* Masks for Flags field above */
+
+#define ACPI_RAS2_SCRUBBER_RUNNING 1
+
+/* RAS2 Parameter Block Structure for LA2PA_TRANSLATION */
+
+struct acpi_ras2_la2pa_translation_parameter {
+ struct acpi_ras2_parameter_block header;
+ u16 addr_translation_command;
+ u64 sub_inst_id;
+ u64 logical_address;
+ u64 physical_address;
+ u32 status;
+};
+
+/* Channel Commands */
+
+enum acpi_ras2_commands {
+ ACPI_RAS2_EXECUTE_RAS2_COMMAND = 1
+};
+
+/* Platform RAS2 Features */
+
+enum acpi_ras2_features {
+ ACPI_RAS2_PATROL_SCRUB_SUPPORTED = 0,
+ ACPI_RAS2_LA2PA_TRANSLATION = 1
+};
+
+/* RAS2 Patrol Scrub Commands */
+
+enum acpi_ras2_patrol_scrub_commands {
+ ACPI_RAS2_GET_PATROL_PARAMETERS = 1,
+ ACPI_RAS2_START_PATROL_SCRUBBER = 2,
+ ACPI_RAS2_STOP_PATROL_SCRUBBER = 3
+};
+
+/* RAS2 LA2PA Translation Commands */
+
+enum acpi_ras2_la2_pa_translation_commands {
+ ACPI_RAS2_GET_LA2PA_TRANSLATION = 1,
+};
+
+/* RAS2 LA2PA Translation Status values */
+
+enum acpi_ras2_la2_pa_translation_status {
+ ACPI_RAS2_LA2PA_TRANSLATION_SUCCESS = 0,
+ ACPI_RAS2_LA2PA_TRANSLATION_FAIL = 1,
+};
+
+/* Channel Command flags */
+
+#define ACPI_RAS2_GENERATE_SCI (1<<15)
+
+/* Status values */
+
+enum acpi_ras2_status {
+ ACPI_RAS2_SUCCESS = 0,
+ ACPI_RAS2_NOT_VALID = 1,
+ ACPI_RAS2_NOT_SUPPORTED = 2,
+ ACPI_RAS2_BUSY = 3,
+ ACPI_RAS2_FAILED = 4,
+ ACPI_RAS2_ABORTED = 5,
+ ACPI_RAS2_INVALID_DATA = 6
+};
+
+/* Status flags */
+
+#define ACPI_RAS2_COMMAND_COMPLETE (1)
+#define ACPI_RAS2_SCI_DOORBELL (1<<1)
+#define ACPI_RAS2_ERROR (1<<2)
+#define ACPI_RAS2_STATUS (0x1F<<3)
+
+/*******************************************************************************
+ *
+ * RGRT - Regulatory Graphics Resource Table
+ * Version 1
+ *
+ * Conforms to "ACPI RGRT" available at:
+ * https://microsoft.github.io/mu/dyn/mu_plus/ms_core_pkg/acpi_RGRT/feature_acpi_rgrt/
+ *
+ ******************************************************************************/
+
+struct acpi_table_rgrt {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u16 version;
+ u8 image_type;
+ u8 reserved;
+ u8 image[];
+};
+
+/* image_type values */
+
+enum acpi_rgrt_image_type {
+ ACPI_RGRT_TYPE_RESERVED0 = 0,
+ ACPI_RGRT_IMAGE_TYPE_PNG = 1,
+ ACPI_RGRT_TYPE_RESERVED = 2 /* 2 and greater are reserved */
+};
+
+/*******************************************************************************
+ *
+ * RHCT - RISC-V Hart Capabilities Table
+ * Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_rhct {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 flags; /* RHCT flags */
+ u64 time_base_freq;
+ u32 node_count;
+ u32 node_offset;
+};
+
+/* RHCT Flags */
+
+#define ACPI_RHCT_TIMER_CANNOT_WAKEUP_CPU (1)
+/*
+ * RHCT subtables
+ */
+struct acpi_rhct_node_header {
+ u16 type;
+ u16 length;
+ u16 revision;
+};
+
+/* Values for RHCT subtable Type above */
+
+enum acpi_rhct_node_type {
+ ACPI_RHCT_NODE_TYPE_ISA_STRING = 0x0000,
+ ACPI_RHCT_NODE_TYPE_CMO = 0x0001,
+ ACPI_RHCT_NODE_TYPE_MMU = 0x0002,
+ ACPI_RHCT_NODE_TYPE_RESERVED = 0x0003,
+ ACPI_RHCT_NODE_TYPE_HART_INFO = 0xFFFF,
+};
+
+/*
+ * RHCT node specific subtables
+ */
+
+/* ISA string node structure */
+struct acpi_rhct_isa_string {
+ u16 isa_length;
+ char isa[];
+};
+
+struct acpi_rhct_cmo_node {
+ u8 reserved; /* Must be zero */
+ u8 cbom_size; /* CBOM size in powerof 2 */
+ u8 cbop_size; /* CBOP size in powerof 2 */
+ u8 cboz_size; /* CBOZ size in powerof 2 */
+};
+
+struct acpi_rhct_mmu_node {
+ u8 reserved; /* Must be zero */
+ u8 mmu_type; /* Virtual Address Scheme */
+};
+
+enum acpi_rhct_mmu_type {
+ ACPI_RHCT_MMU_TYPE_SV39 = 0,
+ ACPI_RHCT_MMU_TYPE_SV48 = 1,
+ ACPI_RHCT_MMU_TYPE_SV57 = 2
+};
+
+/* Hart Info node structure */
+struct acpi_rhct_hart_info {
+ u16 num_offsets;
+ u32 uid; /* ACPI processor UID */
+};
+
+/*******************************************************************************
+ *
+ * RIMT - RISC-V IO Remapping Table
+ *
+ * https://github.com/riscv-non-isa/riscv-acpi-rimt
+ *
+ ******************************************************************************/
+
+struct acpi_table_rimt {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 num_nodes; /* Number of RIMT Nodes */
+ u32 node_offset; /* Offset to RIMT Node Array */
+ u32 reserved;
+};
+
+struct acpi_rimt_node {
+ u8 type;
+ u8 revision;
+ u16 length;
+ u16 reserved;
+ u16 id;
+ char node_data[];
+};
+
+enum acpi_rimt_node_type {
+ ACPI_RIMT_NODE_TYPE_IOMMU = 0x0,
+ ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX = 0x1,
+ ACPI_RIMT_NODE_TYPE_PLAT_DEVICE = 0x2,
+};
+
+struct acpi_rimt_iommu {
+ u8 hardware_id[8]; /* Hardware ID */
+ u64 base_address; /* Base Address */
+ u32 flags; /* Flags */
+ u32 proximity_domain; /* Proximity Domain */
+ u16 pcie_segment_number; /* PCIe Segment number */
+ u16 pcie_bdf; /* PCIe B/D/F */
+ u16 num_interrupt_wires; /* Number of interrupt wires */
+ u16 interrupt_wire_offset; /* Interrupt wire array offset */
+ u64 interrupt_wire[]; /* Interrupt wire array */
+};
+
+/* IOMMU Node Flags */
+#define ACPI_RIMT_IOMMU_FLAGS_PCIE (1)
+#define ACPI_RIMT_IOMMU_FLAGS_PXM_VALID (1 << 1)
+
+/* Interrupt Wire Structure */
+struct acpi_rimt_iommu_wire_gsi {
+ u32 irq_num; /* Interrupt Number */
+ u32 flags; /* Flags */
+};
+
+/* Interrupt Wire Flags */
+#define ACPI_RIMT_GSI_LEVEL_TRIGGERRED (1)
+#define ACPI_RIMT_GSI_ACTIVE_HIGH (1 << 1)
+
+struct acpi_rimt_id_mapping {
+ u32 source_id_base; /* Source ID Base */
+ u32 num_ids; /* Number of IDs */
+ u32 dest_id_base; /* Destination Device ID Base */
+ u32 dest_offset; /* Destination IOMMU Offset */
+ u32 flags; /* Flags */
+};
+
+struct acpi_rimt_pcie_rc {
+ u32 flags; /* Flags */
+ u16 reserved; /* Reserved */
+ u16 pcie_segment_number; /* PCIe Segment number */
+ u16 id_mapping_offset; /* ID mapping array offset */
+ u16 num_id_mappings; /* Number of ID mappings */
+};
+
+/* PCIe Root Complex Node Flags */
+#define ACPI_RIMT_PCIE_ATS_SUPPORTED (1)
+#define ACPI_RIMT_PCIE_PRI_SUPPORTED (1 << 1)
+
+struct acpi_rimt_platform_device {
+ u16 id_mapping_offset; /* ID Mapping array offset */
+ u16 num_id_mappings; /* Number of ID mappings */
+ char device_name[]; /* Device Object Name */
+};
+
+/*******************************************************************************
+ *
* SBST - Smart Battery Specification Table
* Version 1
*
@@ -1899,6 +3448,73 @@ struct acpi_sdev_pcie_path {
u8 function;
};
+/*******************************************************************************
+ *
+ * SVKL - Storage Volume Key Location Table (ACPI 6.4)
+ * From: "Guest-Host-Communication Interface (GHCI) for Intel
+ * Trust Domain Extensions (Intel TDX)".
+ * Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_svkl {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 count;
+};
+
+struct acpi_svkl_key {
+ u16 type;
+ u16 format;
+ u32 size;
+ u64 address;
+};
+
+enum acpi_svkl_type {
+ ACPI_SVKL_TYPE_MAIN_STORAGE = 0,
+ ACPI_SVKL_TYPE_RESERVED = 1 /* 1 and greater are reserved */
+};
+
+enum acpi_svkl_format {
+ ACPI_SVKL_FORMAT_RAW_BINARY = 0,
+ ACPI_SVKL_FORMAT_RESERVED = 1 /* 1 and greater are reserved */
+};
+
+/*******************************************************************************
+ * SWFT - SoundWire File Table
+ *
+ * Conforms to "Discovery and Configuration (DisCo) Specification for SoundWire"
+ * Version 2.1, 2 October 2023
+ *
+ ******************************************************************************/
+struct acpi_sw_file {
+ u16 vendor_id;
+ u32 file_id;
+ u16 file_version;
+ u32 file_length;
+ u8 data[];
+};
+
+struct acpi_table_swft {
+ struct acpi_table_header header;
+ struct acpi_sw_file files[];
+};
+
+/*******************************************************************************
+ *
+ * TDEL - TD-Event Log
+ * From: "Guest-Host-Communication Interface (GHCI) for Intel
+ * Trust Domain Extensions (Intel TDX)".
+ * September 2020
+ *
+ ******************************************************************************/
+
+struct acpi_table_tdel {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 reserved;
+ u64 log_area_minimum_length;
+ u64 log_area_start_address;
+};
+
/* Reset to default packing */
#pragma pack()
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index 86903ac5bbc5..79d3aa5a4bad 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -3,7 +3,7 @@
*
* Name: actbl3.h - ACPI Table Definitions
*
- * Copyright (C) 2000 - 2021, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -86,16 +86,16 @@ struct acpi_table_slic {
struct acpi_table_slit {
struct acpi_table_header header; /* Common ACPI table header */
u64 locality_count;
- u8 entry[1]; /* Real size = localities^2 */
+ u8 entry[]; /* Real size = localities^2 */
};
/*******************************************************************************
*
* SPCR - Serial Port Console Redirection table
- * Version 2
+ * Version 4
*
* Conforms to "Serial Port Console Redirection Table",
- * Version 1.03, August 10, 2015
+ * Version 1.10, Jan 5, 2023
*
******************************************************************************/
@@ -112,7 +112,7 @@ struct acpi_table_spcr {
u8 stop_bits;
u8 flow_control;
u8 terminal_type;
- u8 reserved1;
+ u8 language;
u16 pci_device_id;
u16 pci_vendor_id;
u8 pci_bus;
@@ -120,7 +120,11 @@ struct acpi_table_spcr {
u8 pci_function;
u32 pci_flags;
u8 pci_segment;
- u32 reserved2;
+ u32 uart_clk_freq;
+ u32 precise_baudrate;
+ u16 name_space_string_length;
+ u16 name_space_string_offset;
+ char name_space_string[];
};
/* Masks for pci_flags field above */
@@ -191,7 +195,9 @@ enum acpi_srat_type {
ACPI_SRAT_TYPE_GICC_AFFINITY = 3,
ACPI_SRAT_TYPE_GIC_ITS_AFFINITY = 4, /* ACPI 6.2 */
ACPI_SRAT_TYPE_GENERIC_AFFINITY = 5, /* ACPI 6.3 */
- ACPI_SRAT_TYPE_RESERVED = 6 /* 5 and greater are reserved */
+ ACPI_SRAT_TYPE_GENERIC_PORT_AFFINITY = 6, /* ACPI 6.4 */
+ ACPI_SRAT_TYPE_RINTC_AFFINITY = 7, /* ACPI 6.6 */
+ ACPI_SRAT_TYPE_RESERVED = 8 /* 8 and greater are reserved */
};
/*
@@ -263,7 +269,7 @@ struct acpi_srat_gicc_affinity {
#define ACPI_SRAT_GICC_ENABLED (1) /* 00: Use affinity structure */
-/* 4: GCC ITS Affinity (ACPI 6.2) */
+/* 4: GIC ITS Affinity (ACPI 6.2) */
struct acpi_srat_gic_its_affinity {
struct acpi_subtable_header header;
@@ -272,14 +278,20 @@ struct acpi_srat_gic_its_affinity {
u32 its_id;
};
-/* 5: Generic Initiator Affinity Structure (ACPI 6.3) */
+/*
+ * Common structure for SRAT subtable types:
+ * 5: ACPI_SRAT_TYPE_GENERIC_AFFINITY
+ * 6: ACPI_SRAT_TYPE_GENERIC_PORT_AFFINITY
+ */
+
+#define ACPI_SRAT_DEVICE_HANDLE_SIZE 16
struct acpi_srat_generic_affinity {
struct acpi_subtable_header header;
u8 reserved;
u8 device_handle_type;
u32 proximity_domain;
- u8 device_handle[16];
+ u8 device_handle[ACPI_SRAT_DEVICE_HANDLE_SIZE];
u32 flags;
u32 reserved1;
};
@@ -289,6 +301,21 @@ struct acpi_srat_generic_affinity {
#define ACPI_SRAT_GENERIC_AFFINITY_ENABLED (1) /* 00: Use affinity structure */
#define ACPI_SRAT_ARCHITECTURAL_TRANSACTIONS (1<<1) /* ACPI 6.4 */
+/* 7: RINTC Affinity Structure(ACPI 6.6) */
+
+struct acpi_srat_rintc_affinity {
+ struct acpi_subtable_header header;
+ u16 reserved;
+ u32 proximity_domain;
+ u32 acpi_processor_uid;
+ u32 flags;
+ u32 clock_domain;
+};
+
+/* Flags for struct acpi_srat_rintc_affinity */
+
+#define ACPI_SRAT_RINTC_ENABLED (1) /* 00: Use affinity structure */
+
/*******************************************************************************
*
* STAO - Status Override Table (_STA override) - ACPI 6.0
@@ -438,6 +465,8 @@ struct acpi_tpm2_phy {
#define ACPI_TPM2_RESERVED10 10
#define ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC 11 /* V1.2 Rev 8 */
#define ACPI_TPM2_RESERVED 12
+#define ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON 13
+#define ACPI_TPM2_CRB_WITH_ARM_FFA 15
/* Optional trailer appears after any start_method subtables */
@@ -723,6 +752,10 @@ struct acpi_table_wpbt {
u16 arguments_length;
};
+struct acpi_wpbt_unicode {
+ u16 *unicode_string;
+};
+
/*******************************************************************************
*
* WSMT - Windows SMM Security Mitigations Table
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 92c71dfce0d5..8fe893d776dd 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -3,7 +3,7 @@
*
* Name: actypes.h - Common data types for the entire ACPI subsystem
*
- * Copyright (C) 2000 - 2021, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -507,9 +507,12 @@ typedef u64 acpi_integer;
/* Pointer/Integer type conversions */
#define ACPI_TO_POINTER(i) ACPI_CAST_PTR (void, (acpi_size) (i))
+#ifndef ACPI_TO_INTEGER
#define ACPI_TO_INTEGER(p) ACPI_PTR_DIFF (p, (void *) 0)
+#endif
+#ifndef ACPI_OFFSET
#define ACPI_OFFSET(d, f) ACPI_PTR_DIFF (&(((d *) 0)->f), (void *) 0)
-#define ACPI_PHYSADDR_TO_PTR(i) ACPI_TO_POINTER(i)
+#endif
#define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i)
/* Optimizations for 4-character (32-bit) acpi_name manipulation */
@@ -519,12 +522,12 @@ typedef u64 acpi_integer;
#define ACPI_COPY_NAMESEG(dest,src) (*ACPI_CAST_PTR (u32, (dest)) = *ACPI_CAST_PTR (u32, (src)))
#else
#define ACPI_COMPARE_NAMESEG(a,b) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAMESEG_SIZE))
-#define ACPI_COPY_NAMESEG(dest,src) (strncpy (ACPI_CAST_PTR (char, (dest)), ACPI_CAST_PTR (char, (src)), ACPI_NAMESEG_SIZE))
+#define ACPI_COPY_NAMESEG(dest,src) (memcpy (ACPI_CAST_PTR (char, (dest)), ACPI_CAST_PTR (char, (src)), ACPI_NAMESEG_SIZE))
#endif
/* Support for the special RSDP signature (8 characters) */
-#define ACPI_VALIDATE_RSDP_SIG(a) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_SIG_RSDP, 8))
+#define ACPI_VALIDATE_RSDP_SIG(a) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_SIG_RSDP, (sizeof(a) < 8) ? ACPI_NAMESEG_SIZE : 8))
#define ACPI_MAKE_RSDP_SIG(dest) (memcpy (ACPI_CAST_PTR (char, (dest)), ACPI_SIG_RSDP, 8))
/* Support for OEMx signature (x can be any character) */
@@ -536,8 +539,14 @@ typedef u64 acpi_integer;
* Can be used with access_width of struct acpi_generic_address and access_size of
* struct acpi_resource_generic_register.
*/
-#define ACPI_ACCESS_BIT_WIDTH(size) (1 << ((size) + 2))
-#define ACPI_ACCESS_BYTE_WIDTH(size) (1 << ((size) - 1))
+#define ACPI_ACCESS_BIT_SHIFT 2
+#define ACPI_ACCESS_BYTE_SHIFT -1
+#define ACPI_ACCESS_BIT_MAX (31 - ACPI_ACCESS_BIT_SHIFT)
+#define ACPI_ACCESS_BYTE_MAX (31 - ACPI_ACCESS_BYTE_SHIFT)
+#define ACPI_ACCESS_BIT_DEFAULT (8 - ACPI_ACCESS_BIT_SHIFT)
+#define ACPI_ACCESS_BYTE_DEFAULT (8 - ACPI_ACCESS_BYTE_SHIFT)
+#define ACPI_ACCESS_BIT_WIDTH(size) (1 << ((size) + ACPI_ACCESS_BIT_SHIFT))
+#define ACPI_ACCESS_BYTE_WIDTH(size) (1 << ((size) + ACPI_ACCESS_BYTE_SHIFT))
/*******************************************************************************
*
@@ -1098,6 +1107,21 @@ struct acpi_connection_info {
u8 access_length;
};
+/* Special Context data for PCC Opregion (ACPI 6.3) */
+
+struct acpi_pcc_info {
+ u8 subspace_id;
+ u16 length;
+ u8 *internal_buffer;
+};
+
+/* Special Context data for FFH Opregion (ACPI 6.5) */
+
+struct acpi_ffh_info {
+ u64 offset;
+ u64 length;
+};
+
typedef
acpi_status (*acpi_adr_space_setup) (acpi_handle region_handle,
u32 function,
@@ -1215,6 +1239,10 @@ struct acpi_mem_space_context {
struct acpi_mem_mapping *first_mm;
};
+struct acpi_data_table_mapping {
+ void *pointer;
+};
+
/*
* struct acpi_memory_list is used only if the ACPICA local cache is enabled
*/
@@ -1281,6 +1309,9 @@ typedef enum {
#define ACPI_OSI_WIN_10_RS4 0x12
#define ACPI_OSI_WIN_10_RS5 0x13
#define ACPI_OSI_WIN_10_19H1 0x14
+#define ACPI_OSI_WIN_10_20H1 0x15
+#define ACPI_OSI_WIN_11 0x16
+#define ACPI_OSI_WIN_11_22H2 0x17
/* Definitions of getopt */
@@ -1292,4 +1323,12 @@ typedef enum {
#define ACPI_FALLTHROUGH do {} while(0)
#endif
+#ifndef ACPI_FLEX_ARRAY
+#define ACPI_FLEX_ARRAY(TYPE, NAME) TYPE NAME[0]
+#endif
+
+#ifndef ACPI_NONSTRING
+#define ACPI_NONSTRING /* No terminating NUL character */
+#endif
+
#endif /* __ACTYPES_H__ */
diff --git a/include/acpi/acuuid.h b/include/acpi/acuuid.h
index bc24388ce94e..25dd3e998727 100644
--- a/include/acpi/acuuid.h
+++ b/include/acpi/acuuid.h
@@ -3,7 +3,7 @@
*
* Name: acuuid.h - ACPI-related UUID/GUID definitions
*
- * Copyright (C) 2000 - 2021, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -69,5 +69,6 @@
#define UUID_HIERARCHICAL_DATA_EXTENSION "dbb8e3e6-5886-4ba6-8795-1319f52a966b"
#define UUID_CORESIGHT_GRAPH "3ecbc8b6-1d0e-4fb3-8107-e627f805c6cd"
#define UUID_USB4_CAPABILITIES "23a0d13a-26ab-486c-9c5f-0ffa525a575a"
-
+#define UUID_1ST_FUNCTION_ID "893f00a6-660c-494e-bcfd-3043f4fb67c0"
+#define UUID_2ND_FUNCTION_ID "107ededd-d381-4fd7-8da9-08e9a6c79644"
#endif /* __ACUUID_H__ */
diff --git a/include/acpi/apei.h b/include/acpi/apei.h
index 680f80960c3d..dc60f7db5524 100644
--- a/include/acpi/apei.h
+++ b/include/acpi/apei.h
@@ -27,19 +27,18 @@ extern int hest_disable;
extern int erst_disable;
#ifdef CONFIG_ACPI_APEI_GHES
extern bool ghes_disable;
+void __init acpi_ghes_init(void);
#else
#define ghes_disable 1
+static inline void acpi_ghes_init(void) { }
#endif
#ifdef CONFIG_ACPI_APEI
void __init acpi_hest_init(void);
#else
-static inline void acpi_hest_init(void) { return; }
+static inline void acpi_hest_init(void) { }
#endif
-typedef int (*apei_hest_func_t)(struct acpi_hest_header *hest_hdr, void *data);
-int apei_hest_parse(apei_hest_func_t func, void *data);
-
int erst_write(const struct cper_record_header *record);
ssize_t erst_get_record_count(void);
int erst_get_record_id_begin(int *pos);
@@ -47,6 +46,8 @@ int erst_get_record_id_next(int *pos, u64 *record_id);
void erst_get_record_id_end(void);
ssize_t erst_read(u64 record_id, struct cper_record_header *record,
size_t buflen);
+ssize_t erst_read_record(u64 record_id, struct cper_record_header *record,
+ size_t buflen, size_t recordlen, const guid_t *creatorid);
int erst_clear(u64 record_id);
int arch_apei_enable_cmcff(struct acpi_hest_header *hest_hdr, void *data);
diff --git a/include/acpi/battery.h b/include/acpi/battery.h
index b8d56b702c7a..c93f16dfb944 100644
--- a/include/acpi/battery.h
+++ b/include/acpi/battery.h
@@ -2,6 +2,7 @@
#ifndef __ACPI_BATTERY_H
#define __ACPI_BATTERY_H
+#include <linux/device.h>
#include <linux/power_supply.h>
#define ACPI_BATTERY_CLASS "battery"
@@ -12,12 +13,13 @@
struct acpi_battery_hook {
const char *name;
- int (*add_battery)(struct power_supply *battery);
- int (*remove_battery)(struct power_supply *battery);
+ int (*add_battery)(struct power_supply *battery, struct acpi_battery_hook *hook);
+ int (*remove_battery)(struct power_supply *battery, struct acpi_battery_hook *hook);
struct list_head list;
};
void battery_hook_register(struct acpi_battery_hook *hook);
void battery_hook_unregister(struct acpi_battery_hook *hook);
+int devm_battery_hook_register(struct device *dev, struct acpi_battery_hook *hook);
#endif
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index 9f4985b4d64d..13fa81504844 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -17,7 +17,7 @@
#include <acpi/pcc.h>
#include <acpi/processor.h>
-/* Support CPPCv2 and CPPCv3 */
+/* CPPCv2 and CPPCv3 support */
#define CPPC_V2_REV 2
#define CPPC_V3_REV 3
#define CPPC_V2_NUM_ENT 21
@@ -32,6 +32,15 @@
#define CMD_READ 0
#define CMD_WRITE 1
+#define CPPC_AUTO_ACT_WINDOW_SIG_BIT_SIZE (7)
+#define CPPC_AUTO_ACT_WINDOW_EXP_BIT_SIZE (3)
+#define CPPC_AUTO_ACT_WINDOW_MAX_SIG ((1 << CPPC_AUTO_ACT_WINDOW_SIG_BIT_SIZE) - 1)
+#define CPPC_AUTO_ACT_WINDOW_MAX_EXP ((1 << CPPC_AUTO_ACT_WINDOW_EXP_BIT_SIZE) - 1)
+/* CPPC_AUTO_ACT_WINDOW_MAX_SIG is 127, so 128 and 129 will decay to 127 when writing */
+#define CPPC_AUTO_ACT_WINDOW_SIG_CARRY_THRESH 129
+
+#define CPPC_ENERGY_PERF_MAX (0xFF)
+
/* Each register has the folowing format. */
struct cpc_reg {
u8 descriptor;
@@ -64,6 +73,8 @@ struct cpc_desc {
int cpu_id;
int write_cmd_status;
int write_cmd_id;
+ /* Lock used for RMW operations in cpc_write() */
+ raw_spinlock_t rmw_lock;
struct cpc_register_resource cpc_regs[MAX_CPC_REG_ENT];
struct acpi_psd_package domain_info;
struct kobject kobj;
@@ -108,12 +119,15 @@ struct cppc_perf_caps {
u32 lowest_nonlinear_perf;
u32 lowest_freq;
u32 nominal_freq;
+ u32 energy_perf;
+ bool auto_sel;
};
struct cppc_perf_ctrls {
u32 max_perf;
u32 min_perf;
u32 desired_perf;
+ u32 energy_perf;
};
struct cppc_perf_fb_ctrs {
@@ -125,7 +139,6 @@ struct cppc_perf_fb_ctrs {
/* Per CPU container for runtime CPPC management. */
struct cppc_cpudata {
- struct list_head node;
struct cppc_perf_caps perf_caps;
struct cppc_perf_ctrls perf_ctrls;
struct cppc_perf_fb_ctrs perf_fb_ctrs;
@@ -135,39 +148,77 @@ struct cppc_cpudata {
#ifdef CONFIG_ACPI_CPPC_LIB
extern int cppc_get_desired_perf(int cpunum, u64 *desired_perf);
+extern int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf);
+extern int cppc_get_highest_perf(int cpunum, u64 *highest_perf);
extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs);
extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
+extern int cppc_set_enable(int cpu, bool enable);
extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps);
+extern bool cppc_perf_ctrs_in_pcc(void);
+extern unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf);
+extern unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq);
extern bool acpi_cpc_valid(void);
+extern bool cppc_allow_fast_switch(void);
extern int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data);
-extern unsigned int cppc_get_transition_latency(int cpu);
+extern int cppc_get_transition_latency(int cpu);
extern bool cpc_ffh_supported(void);
+extern bool cpc_supported_by_cpu(void);
extern int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val);
extern int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val);
+extern int cppc_get_epp_perf(int cpunum, u64 *epp_perf);
+extern int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable);
+extern int cppc_set_epp(int cpu, u64 epp_val);
+extern int cppc_get_auto_act_window(int cpu, u64 *auto_act_window);
+extern int cppc_set_auto_act_window(int cpu, u64 auto_act_window);
+extern int cppc_get_auto_sel(int cpu, bool *enable);
+extern int cppc_set_auto_sel(int cpu, bool enable);
+extern int amd_get_highest_perf(unsigned int cpu, u32 *highest_perf);
+extern int amd_get_boost_ratio_numerator(unsigned int cpu, u64 *numerator);
+extern int amd_detect_prefcore(bool *detected);
#else /* !CONFIG_ACPI_CPPC_LIB */
static inline int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
+}
+static inline int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
+{
+ return -EOPNOTSUPP;
+}
+static inline int cppc_get_highest_perf(int cpunum, u64 *highest_perf)
+{
+ return -EOPNOTSUPP;
}
static inline int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
+}
+static inline int cppc_set_enable(int cpu, bool enable)
+{
+ return -EOPNOTSUPP;
}
static inline int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
+}
+static inline bool cppc_perf_ctrs_in_pcc(void)
+{
+ return false;
}
static inline bool acpi_cpc_valid(void)
{
return false;
}
-static inline unsigned int cppc_get_transition_latency(int cpu)
+static inline bool cppc_allow_fast_switch(void)
+{
+ return false;
+}
+static inline int cppc_get_transition_latency(int cpu)
{
- return CPUFREQ_ETERNAL;
+ return -ENODATA;
}
static inline bool cpc_ffh_supported(void)
{
@@ -175,11 +226,51 @@ static inline bool cpc_ffh_supported(void)
}
static inline int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
+}
+static inline int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
+{
+ return -EOPNOTSUPP;
+}
+static inline int cppc_get_epp_perf(int cpunum, u64 *epp_perf)
+{
+ return -EOPNOTSUPP;
+}
+static inline int cppc_set_epp(int cpu, u64 epp_val)
+{
+ return -EOPNOTSUPP;
+}
+static inline int cppc_get_auto_act_window(int cpu, u64 *auto_act_window)
+{
+ return -EOPNOTSUPP;
+}
+static inline int cppc_set_auto_act_window(int cpu, u64 auto_act_window)
+{
+ return -EOPNOTSUPP;
+}
+static inline int cppc_get_auto_sel(int cpu, bool *enable)
+{
+ return -EOPNOTSUPP;
+}
+static inline int cppc_set_auto_sel(int cpu, bool enable)
+{
+ return -EOPNOTSUPP;
+}
+static inline int amd_get_highest_perf(unsigned int cpu, u32 *highest_perf)
+{
+ return -ENODEV;
+}
+static inline int amd_get_boost_ratio_numerator(unsigned int cpu, u64 *numerator)
+{
+ return -EOPNOTSUPP;
+}
+static inline int amd_detect_prefcore(bool *detected)
+{
+ return -ENODEV;
}
#endif /* !CONFIG_ACPI_CPPC_LIB */
diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h
index 34fb3431a8f3..ebd21b05fe6e 100644
--- a/include/acpi/ghes.h
+++ b/include/acpi/ghes.h
@@ -27,15 +27,14 @@ struct ghes {
struct timer_list timer;
unsigned int irq;
};
+ struct device *dev;
+ struct list_head elist;
};
struct ghes_estatus_node {
struct llist_node llnode;
struct acpi_hest_generic *generic;
struct ghes *ghes;
-
- int task_work_cpu;
- struct callback_head task_work;
};
struct ghes_estatus_cache {
@@ -69,35 +68,18 @@ int ghes_register_vendor_record_notifier(struct notifier_block *nb);
* @nb: pointer to the notifier_block structure of the vendor record handler.
*/
void ghes_unregister_vendor_record_notifier(struct notifier_block *nb);
-#endif
-
-int ghes_estatus_pool_init(int num_ghes);
-
-/* From drivers/edac/ghes_edac.c */
-#ifdef CONFIG_EDAC_GHES
-void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err);
-
-int ghes_edac_register(struct ghes *ghes, struct device *dev);
-
-void ghes_edac_unregister(struct ghes *ghes);
+struct list_head *ghes_get_devices(void);
+void ghes_estatus_pool_region_free(unsigned long addr, u32 size);
#else
-static inline void ghes_edac_report_mem_error(int sev,
- struct cper_sec_mem_err *mem_err)
-{
-}
-
-static inline int ghes_edac_register(struct ghes *ghes, struct device *dev)
-{
- return -ENODEV;
-}
+static inline struct list_head *ghes_get_devices(void) { return NULL; }
-static inline void ghes_edac_unregister(struct ghes *ghes)
-{
-}
+static inline void ghes_estatus_pool_region_free(unsigned long addr, u32 size) { return; }
#endif
+int ghes_estatus_pool_init(unsigned int num_ghes);
+
static inline int acpi_hest_get_version(struct acpi_hest_generic_data *gdata)
{
return gdata->revision >> 8;
@@ -145,4 +127,7 @@ int ghes_notify_sea(void);
static inline int ghes_notify_sea(void) { return -ENOENT; }
#endif
+struct notifier_block;
+extern void ghes_register_report_chain(struct notifier_block *nb);
+extern void ghes_unregister_report_chain(struct notifier_block *nb);
#endif /* GHES_H */
diff --git a/include/acpi/nhlt.h b/include/acpi/nhlt.h
new file mode 100644
index 000000000000..2108aa6d0207
--- /dev/null
+++ b/include/acpi/nhlt.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2023-2024 Intel Corporation
+ *
+ * Authors: Cezary Rojewski <cezary.rojewski@intel.com>
+ * Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
+ */
+
+#ifndef __ACPI_NHLT_H__
+#define __ACPI_NHLT_H__
+
+#include <linux/acpi.h>
+#include <linux/kconfig.h>
+#include <linux/overflow.h>
+#include <linux/types.h>
+
+#define __acpi_nhlt_endpoint_config(ep) ((void *)((ep) + 1))
+#define __acpi_nhlt_config_caps(cfg) ((void *)((cfg) + 1))
+
+/**
+ * acpi_nhlt_endpoint_fmtscfg - Get the formats configuration space.
+ * @ep: the endpoint to retrieve the space for.
+ *
+ * Return: A pointer to the formats configuration space.
+ */
+static inline struct acpi_nhlt_formats_config *
+acpi_nhlt_endpoint_fmtscfg(const struct acpi_nhlt_endpoint *ep)
+{
+ struct acpi_nhlt_config *cfg = __acpi_nhlt_endpoint_config(ep);
+
+ return (struct acpi_nhlt_formats_config *)((u8 *)(cfg + 1) + cfg->capabilities_size);
+}
+
+#define __acpi_nhlt_first_endpoint(tb) \
+ ((void *)(tb + 1))
+
+#define __acpi_nhlt_next_endpoint(ep) \
+ ((void *)((u8 *)(ep) + (ep)->length))
+
+#define __acpi_nhlt_get_endpoint(tb, ep, i) \
+ ((i) ? __acpi_nhlt_next_endpoint(ep) : __acpi_nhlt_first_endpoint(tb))
+
+#define __acpi_nhlt_first_fmtcfg(fmts) \
+ ((void *)(fmts + 1))
+
+#define __acpi_nhlt_next_fmtcfg(fmt) \
+ ((void *)((u8 *)((fmt) + 1) + (fmt)->config.capabilities_size))
+
+#define __acpi_nhlt_get_fmtcfg(fmts, fmt, i) \
+ ((i) ? __acpi_nhlt_next_fmtcfg(fmt) : __acpi_nhlt_first_fmtcfg(fmts))
+
+/*
+ * The for_each_nhlt_*() macros rely on an iterator to deal with the
+ * variable length of each endpoint structure and the possible presence
+ * of an OED-Config used by Windows only.
+ */
+
+/**
+ * for_each_nhlt_endpoint - Iterate over endpoints in a NHLT table.
+ * @tb: the pointer to a NHLT table.
+ * @ep: the pointer to endpoint to use as loop cursor.
+ */
+#define for_each_nhlt_endpoint(tb, ep) \
+ for (unsigned int __i = 0; \
+ __i < (tb)->endpoints_count && \
+ (ep = __acpi_nhlt_get_endpoint(tb, ep, __i)); \
+ __i++)
+
+/**
+ * for_each_nhlt_fmtcfg - Iterate over format configurations.
+ * @fmts: the pointer to formats configuration space.
+ * @fmt: the pointer to format to use as loop cursor.
+ */
+#define for_each_nhlt_fmtcfg(fmts, fmt) \
+ for (unsigned int __i = 0; \
+ __i < (fmts)->formats_count && \
+ (fmt = __acpi_nhlt_get_fmtcfg(fmts, fmt, __i)); \
+ __i++)
+
+/**
+ * for_each_nhlt_endpoint_fmtcfg - Iterate over format configurations in an endpoint.
+ * @ep: the pointer to an endpoint.
+ * @fmt: the pointer to format to use as loop cursor.
+ */
+#define for_each_nhlt_endpoint_fmtcfg(ep, fmt) \
+ for_each_nhlt_fmtcfg(acpi_nhlt_endpoint_fmtscfg(ep), fmt)
+
+#if IS_ENABLED(CONFIG_ACPI_NHLT)
+
+/*
+ * System-wide pointer to the first NHLT table.
+ *
+ * A sound driver may utilize acpi_nhlt_get/put_gbl_table() on its
+ * initialization and removal respectively to avoid excessive mapping
+ * and unmapping of the memory occupied by the table between streaming
+ * operations.
+ */
+
+acpi_status acpi_nhlt_get_gbl_table(void);
+void acpi_nhlt_put_gbl_table(void);
+
+bool acpi_nhlt_endpoint_match(const struct acpi_nhlt_endpoint *ep,
+ int link_type, int dev_type, int dir, int bus_id);
+struct acpi_nhlt_endpoint *
+acpi_nhlt_tb_find_endpoint(const struct acpi_table_nhlt *tb,
+ int link_type, int dev_type, int dir, int bus_id);
+struct acpi_nhlt_endpoint *
+acpi_nhlt_find_endpoint(int link_type, int dev_type, int dir, int bus_id);
+struct acpi_nhlt_format_config *
+acpi_nhlt_endpoint_find_fmtcfg(const struct acpi_nhlt_endpoint *ep,
+ u16 ch, u32 rate, u16 vbps, u16 bps);
+struct acpi_nhlt_format_config *
+acpi_nhlt_tb_find_fmtcfg(const struct acpi_table_nhlt *tb,
+ int link_type, int dev_type, int dir, int bus_id,
+ u16 ch, u32 rate, u16 vpbs, u16 bps);
+struct acpi_nhlt_format_config *
+acpi_nhlt_find_fmtcfg(int link_type, int dev_type, int dir, int bus_id,
+ u16 ch, u32 rate, u16 vpbs, u16 bps);
+int acpi_nhlt_endpoint_mic_count(const struct acpi_nhlt_endpoint *ep);
+
+#else /* !CONFIG_ACPI_NHLT */
+
+static inline acpi_status acpi_nhlt_get_gbl_table(void)
+{
+ return AE_NOT_FOUND;
+}
+
+static inline void acpi_nhlt_put_gbl_table(void)
+{
+}
+
+static inline bool
+acpi_nhlt_endpoint_match(const struct acpi_nhlt_endpoint *ep,
+ int link_type, int dev_type, int dir, int bus_id)
+{
+ return false;
+}
+
+static inline struct acpi_nhlt_endpoint *
+acpi_nhlt_tb_find_endpoint(const struct acpi_table_nhlt *tb,
+ int link_type, int dev_type, int dir, int bus_id)
+{
+ return NULL;
+}
+
+static inline struct acpi_nhlt_format_config *
+acpi_nhlt_endpoint_find_fmtcfg(const struct acpi_nhlt_endpoint *ep,
+ u16 ch, u32 rate, u16 vbps, u16 bps)
+{
+ return NULL;
+}
+
+static inline struct acpi_nhlt_format_config *
+acpi_nhlt_tb_find_fmtcfg(const struct acpi_table_nhlt *tb,
+ int link_type, int dev_type, int dir, int bus_id,
+ u16 ch, u32 rate, u16 vpbs, u16 bps)
+{
+ return NULL;
+}
+
+static inline int acpi_nhlt_endpoint_mic_count(const struct acpi_nhlt_endpoint *ep)
+{
+ return 0;
+}
+
+static inline struct acpi_nhlt_endpoint *
+acpi_nhlt_find_endpoint(int link_type, int dev_type, int dir, int bus_id)
+{
+ return NULL;
+}
+
+static inline struct acpi_nhlt_format_config *
+acpi_nhlt_find_fmtcfg(int link_type, int dev_type, int dir, int bus_id,
+ u16 ch, u32 rate, u16 vpbs, u16 bps)
+{
+ return NULL;
+}
+
+#endif /* CONFIG_ACPI_NHLT */
+
+#endif /* __ACPI_NHLT_H__ */
diff --git a/include/acpi/pcc.h b/include/acpi/pcc.h
index 4dec4ed138cd..9af3b502f839 100644
--- a/include/acpi/pcc.h
+++ b/include/acpi/pcc.h
@@ -9,18 +9,70 @@
#include <linux/mailbox_controller.h>
#include <linux/mailbox_client.h>
+struct pcc_mbox_chan {
+ struct mbox_chan *mchan;
+ u64 shmem_base_addr;
+ void __iomem *shmem;
+ u64 shmem_size;
+ u32 latency;
+ u32 max_access_rate;
+ u16 min_turnaround_time;
+
+ /* Set to true to indicate that the mailbox should manage
+ * writing the dat to the shared buffer. This differs from
+ * the case where the drivesr are writing to the buffer and
+ * using send_data only to ring the doorbell. If this flag
+ * is set, then the void * data parameter of send_data must
+ * point to a kernel-memory buffer formatted in accordance with
+ * the PCC specification.
+ *
+ * The active buffer management will include reading the
+ * notify_on_completion flag, and will then
+ * call mbox_chan_txdone when the acknowledgment interrupt is
+ * received.
+ */
+ bool manage_writes;
+
+ /* Optional callback that allows the driver
+ * to allocate the memory used for receiving
+ * messages. The return value is the location
+ * inside the buffer where the mailbox should write the data.
+ */
+ void *(*rx_alloc)(struct mbox_client *cl, int size);
+};
+
+struct pcc_header {
+ u32 signature;
+ u32 flags;
+ u32 length;
+ u32 command;
+};
+
+/* Generic Communications Channel Shared Memory Region */
+#define PCC_SIGNATURE 0x50434300
+/* Generic Communications Channel Command Field */
+#define PCC_CMD_GENERATE_DB_INTR BIT(15)
+/* Generic Communications Channel Status Field */
+#define PCC_STATUS_CMD_COMPLETE BIT(0)
+#define PCC_STATUS_SCI_DOORBELL BIT(1)
+#define PCC_STATUS_ERROR BIT(2)
+#define PCC_STATUS_PLATFORM_NOTIFY BIT(3)
+/* Initiator Responder Communications Channel Flags */
+#define PCC_CMD_COMPLETION_NOTIFY BIT(0)
+
#define MAX_PCC_SUBSPACES 256
+
#ifdef CONFIG_PCC
-extern struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
- int subspace_id);
-extern void pcc_mbox_free_channel(struct mbox_chan *chan);
+extern struct pcc_mbox_chan *
+pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id);
+extern void pcc_mbox_free_channel(struct pcc_mbox_chan *chan);
#else
-static inline struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
- int subspace_id)
+static inline struct pcc_mbox_chan *
+pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id)
{
return ERR_PTR(-ENODEV);
}
-static inline void pcc_mbox_free_channel(struct mbox_chan *chan) { }
+static inline void pcc_mbox_free_channel(struct pcc_mbox_chan *chan) { }
#endif
#endif /* _PCC_H */
diff --git a/include/acpi/pdc_intel.h b/include/acpi/pdc_intel.h
deleted file mode 100644
index 967c552d1cd3..000000000000
--- a/include/acpi/pdc_intel.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-/* _PDC bit definition for Intel processors */
-
-#ifndef __PDC_INTEL_H__
-#define __PDC_INTEL_H__
-
-#define ACPI_PDC_P_FFH (0x0001)
-#define ACPI_PDC_C_C1_HALT (0x0002)
-#define ACPI_PDC_T_FFH (0x0004)
-#define ACPI_PDC_SMP_C1PT (0x0008)
-#define ACPI_PDC_SMP_C2C3 (0x0010)
-#define ACPI_PDC_SMP_P_SWCOORD (0x0020)
-#define ACPI_PDC_SMP_C_SWCOORD (0x0040)
-#define ACPI_PDC_SMP_T_SWCOORD (0x0080)
-#define ACPI_PDC_C_C1_FFH (0x0100)
-#define ACPI_PDC_C_C2C3_FFH (0x0200)
-#define ACPI_PDC_SMP_P_HWCOORD (0x0800)
-
-#define ACPI_PDC_EST_CAPABILITY_SMP (ACPI_PDC_SMP_C1PT | \
- ACPI_PDC_C_C1_HALT | \
- ACPI_PDC_P_FFH)
-
-#define ACPI_PDC_EST_CAPABILITY_SWSMP (ACPI_PDC_SMP_C1PT | \
- ACPI_PDC_C_C1_HALT | \
- ACPI_PDC_SMP_P_SWCOORD | \
- ACPI_PDC_SMP_P_HWCOORD | \
- ACPI_PDC_P_FFH)
-
-#define ACPI_PDC_C_CAPABILITY_SMP (ACPI_PDC_SMP_C2C3 | \
- ACPI_PDC_SMP_C1PT | \
- ACPI_PDC_C_C1_HALT | \
- ACPI_PDC_C_C1_FFH | \
- ACPI_PDC_C_C2C3_FFH)
-
-#endif /* __PDC_INTEL_H__ */
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index e8958e0d1646..a11fa83955f8 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -3,7 +3,7 @@
*
* Name: acenv.h - Host and compiler configuration
*
- * Copyright (C) 2000 - 2021, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -148,15 +148,12 @@
*
*****************************************************************************/
-#if defined(__GNUC__) && !defined(__INTEL_COMPILER)
+#if defined(__GNUC__)
#include <acpi/platform/acgcc.h>
#elif defined(_MSC_VER)
#include "acmsvc.h"
-#elif defined(__INTEL_COMPILER)
-#include <acpi/platform/acintel.h>
-
#endif
#if defined(_LINUX) || defined(__linux__)
@@ -212,6 +209,8 @@
#elif defined(_AED_EFI) || defined(_GNU_EFI) || defined(_EDK2_EFI)
#include "acefi.h"
+#elif defined(__ZEPHYR__)
+#include "aczephyr.h"
#else
/* Unknown environment */
@@ -253,6 +252,12 @@
#define ACPI_RELEASE_GLOBAL_LOCK(Glptr, pending) pending = 0
#endif
+/* NULL/invalid value to use for destroyed or not-yet-created semaphores. */
+
+#ifndef ACPI_SEMAPHORE_NULL
+#define ACPI_SEMAPHORE_NULL NULL
+#endif
+
/* Flush CPU cache - used when going to sleep. Wbinvd or similar. */
#ifndef ACPI_FLUSH_CPU_CACHE
diff --git a/include/acpi/platform/acenvex.h b/include/acpi/platform/acenvex.h
index 277fe2fa4d9b..8ffc4e1c87cf 100644
--- a/include/acpi/platform/acenvex.h
+++ b/include/acpi/platform/acenvex.h
@@ -3,7 +3,7 @@
*
* Name: acenvex.h - Extra host and compiler configuration
*
- * Copyright (C) 2000 - 2021, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -35,7 +35,7 @@
#endif
-#if defined(__GNUC__) && !defined(__INTEL_COMPILER)
+#if defined(__GNUC__)
#include "acgccex.h"
#elif defined(_MSC_VER)
diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h
index f6656be81760..8e4cf2f6b383 100644
--- a/include/acpi/platform/acgcc.h
+++ b/include/acpi/platform/acgcc.h
@@ -3,28 +3,20 @@
*
* Name: acgcc.h - GCC specific defines, etc.
*
- * Copyright (C) 2000 - 2021, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
#ifndef __ACGCC_H__
#define __ACGCC_H__
-/*
- * Use compiler specific <stdarg.h> is a good practice for even when
- * -nostdinc is specified (i.e., ACPI_USE_STANDARD_HEADERS undefined.
- */
#ifndef va_arg
-#ifdef ACPI_USE_BUILTIN_STDARG
-typedef __builtin_va_list va_list;
-#define va_start(v, l) __builtin_va_start(v, l)
-#define va_end(v) __builtin_va_end(v)
-#define va_arg(v, l) __builtin_va_arg(v, l)
-#define va_copy(d, s) __builtin_va_copy(d, s)
+#ifdef __KERNEL__
+#include <linux/stdarg.h>
#else
#include <stdarg.h>
-#endif
-#endif
+#endif /* __KERNEL__ */
+#endif /* ! va_arg */
#define ACPI_INLINE __inline__
@@ -69,4 +61,23 @@ typedef __builtin_va_list va_list;
#define ACPI_FALLTHROUGH __attribute__((__fallthrough__))
#endif
+/*
+ * Flexible array members are not allowed to be part of a union under
+ * C99, but this is not for any technical reason. Work around the
+ * limitation.
+ */
+#define ACPI_FLEX_ARRAY(TYPE, NAME) \
+ struct { \
+ struct { } __Empty_ ## NAME; \
+ TYPE NAME[]; \
+ }
+
+/*
+ * Explicitly mark strings that lack a terminating NUL character so
+ * that ACPICA can be built with -Wunterminated-string-initialization.
+ */
+#if __has_attribute(__nonstring__)
+#define ACPI_NONSTRING __attribute__((__nonstring__))
+#endif
+
#endif /* __ACGCC_H__ */
diff --git a/include/acpi/platform/acgccex.h b/include/acpi/platform/acgccex.h
index 738d52865e0a..4a3c019a4d03 100644
--- a/include/acpi/platform/acgccex.h
+++ b/include/acpi/platform/acgccex.h
@@ -3,7 +3,7 @@
*
* Name: acgccex.h - Extra GCC specific defines, etc.
*
- * Copyright (C) 2000 - 2021, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/platform/acintel.h b/include/acpi/platform/acintel.h
deleted file mode 100644
index 550fe9a8cd6c..000000000000
--- a/include/acpi/platform/acintel.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
-/******************************************************************************
- *
- * Name: acintel.h - VC specific defines, etc.
- *
- * Copyright (C) 2000 - 2021, Intel Corp.
- *
- *****************************************************************************/
-
-#ifndef __ACINTEL_H__
-#define __ACINTEL_H__
-
-/*
- * Use compiler specific <stdarg.h> is a good practice for even when
- * -nostdinc is specified (i.e., ACPI_USE_STANDARD_HEADERS undefined.
- */
-#ifndef va_arg
-#include <stdarg.h>
-#endif
-
-/* Configuration specific to Intel 64-bit C compiler */
-
-#define COMPILER_DEPENDENT_INT64 __int64
-#define COMPILER_DEPENDENT_UINT64 unsigned __int64
-#define ACPI_INLINE __inline
-
-/*
- * Calling conventions:
- *
- * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads)
- * ACPI_EXTERNAL_XFACE - External ACPI interfaces
- * ACPI_INTERNAL_XFACE - Internal ACPI interfaces
- * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces
- */
-#define ACPI_SYSTEM_XFACE
-#define ACPI_EXTERNAL_XFACE
-#define ACPI_INTERNAL_XFACE
-#define ACPI_INTERNAL_VAR_XFACE
-
-/* remark 981 - operands evaluated in no particular order */
-#pragma warning(disable:981)
-
-/* warn C4100: unreferenced formal parameter */
-#pragma warning(disable:4100)
-
-/* warn C4127: conditional expression is constant */
-#pragma warning(disable:4127)
-
-/* warn C4706: assignment within conditional expression */
-#pragma warning(disable:4706)
-
-/* warn C4214: bit field types other than int */
-#pragma warning(disable:4214)
-
-#endif /* __ACINTEL_H__ */
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index b3ffb9bbf664..edbbc9061d1e 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -3,7 +3,7 @@
*
* Name: aclinux.h - OS specific defines, etc. for Linux
*
- * Copyright (C) 2000 - 2021, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -15,7 +15,7 @@
/* ACPICA external files should not include ACPICA headers directly. */
#if !defined(BUILDING_ACPICA) && !defined(_LINUX_ACPI_H)
-#error "Please don't include <acpi/acpi.h> directly, include <linux/acpi.h> instead."
+#error "Please do not include <acpi/acpi.h> directly, include <linux/acpi.h> instead."
#endif
#endif
@@ -114,6 +114,11 @@
#define acpi_raw_spinlock raw_spinlock_t *
#define acpi_cpu_flags unsigned long
+#define acpi_uintptr_t uintptr_t
+
+#define ACPI_TO_INTEGER(p) ((uintptr_t)(p))
+#define ACPI_OFFSET(d, f) offsetof(d, f)
+
/* Use native linux version of acpi_os_allocate_zeroed */
#define USE_NATIVE_ALLOCATE_ZEROED
@@ -175,7 +180,11 @@
#define ACPI_USE_STANDARD_HEADERS
#ifdef ACPI_USE_STANDARD_HEADERS
+#include <stddef.h>
#include <unistd.h>
+#include <stdint.h>
+
+#define ACPI_OFFSET(d, f) offsetof(d, f)
#endif
/* Define/disable kernel-specific declarators */
@@ -194,7 +203,7 @@
#if defined(__ia64__) || (defined(__x86_64__) && !defined(__ILP32__)) ||\
defined(__aarch64__) || defined(__PPC64__) ||\
- defined(__s390x__) ||\
+ defined(__s390x__) || defined(__loongarch__) ||\
(defined(__riscv) && (defined(__LP64__) || defined(_LP64)))
#define ACPI_MACHINE_WIDTH 64
#define COMPILER_DEPENDENT_INT64 long
diff --git a/include/acpi/platform/aclinuxex.h b/include/acpi/platform/aclinuxex.h
index 5f642b07ad64..73265650f46b 100644
--- a/include/acpi/platform/aclinuxex.h
+++ b/include/acpi/platform/aclinuxex.h
@@ -3,7 +3,7 @@
*
* Name: aclinuxex.h - Extra OS specific defines, etc. for Linux
*
- * Copyright (C) 2000 - 2021, Intel Corp.
+ * Copyright (C) 2000 - 2025, Intel Corp.
*
*****************************************************************************/
@@ -46,28 +46,24 @@ acpi_status acpi_os_terminate(void);
* Interrupts are off during resume, just like they are for boot.
* However, boot has (system_state != SYSTEM_RUNNING)
* to quiet __might_sleep() in kmalloc() and resume does not.
+ *
+ * These specialized allocators have to be macros for their allocations to be
+ * accounted separately (to have separate alloc_tag).
*/
-static inline void *acpi_os_allocate(acpi_size size)
-{
- return kmalloc(size, irqs_disabled()? GFP_ATOMIC : GFP_KERNEL);
-}
+#define acpi_os_allocate(_size) \
+ kmalloc(_size, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL)
-static inline void *acpi_os_allocate_zeroed(acpi_size size)
-{
- return kzalloc(size, irqs_disabled()? GFP_ATOMIC : GFP_KERNEL);
-}
+#define acpi_os_allocate_zeroed(_size) \
+ kzalloc(_size, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL)
+
+#define acpi_os_acquire_object(_cache) \
+ kmem_cache_zalloc(_cache, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL)
static inline void acpi_os_free(void *memory)
{
kfree(memory);
}
-static inline void *acpi_os_acquire_object(acpi_cache_t * cache)
-{
- return kmem_cache_zalloc(cache,
- irqs_disabled()? GFP_ATOMIC : GFP_KERNEL);
-}
-
static inline acpi_thread_id acpi_os_get_thread_id(void)
{
return (acpi_thread_id) (unsigned long)current;
diff --git a/include/acpi/platform/aczephyr.h b/include/acpi/platform/aczephyr.h
new file mode 100644
index 000000000000..03d9a4a39c80
--- /dev/null
+++ b/include/acpi/platform/aczephyr.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/******************************************************************************
+ *
+ * Module Name: aczephyr.h - OS specific defines, etc.
+ *
+ * Copyright (C) 2000 - 2025, Intel Corp.
+ *
+ *****************************************************************************/
+
+#ifndef __ACZEPHYR_H__
+#define __ACZEPHYR_H__
+
+#define ACPI_MACHINE_WIDTH 64
+
+#define ACPI_NO_ERROR_MESSAGES
+#undef ACPI_DEBUG_OUTPUT
+#define ACPI_USE_SYSTEM_CLIBRARY
+#undef ACPI_DBG_TRACK_ALLOCATIONS
+#define ACPI_SINGLE_THREADED
+#define ACPI_USE_NATIVE_RSDP_POINTER
+
+#include <zephyr/kernel.h>
+#include <zephyr/device.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+#include <zephyr/fs/fs.h>
+#include <zephyr/sys/printk.h>
+#include <zephyr/sys/__assert.h>
+
+/******************************************************************************
+ *
+ * FUNCTION: acpi_enable_dbg_print
+ *
+ * PARAMETERS: Enable, - Enable/Disable debug print
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Enable/disable debug print
+ *
+ *****************************************************************************/
+
+void acpi_enable_dbg_print(bool enable);
+#endif
diff --git a/include/acpi/proc_cap_intel.h b/include/acpi/proc_cap_intel.h
new file mode 100644
index 000000000000..ddcdc41d6c3e
--- /dev/null
+++ b/include/acpi/proc_cap_intel.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Vendor specific processor capabilities bit definition
+ * for Intel processors. Those bits are used to convey OSPM
+ * power management capabilities to the platform.
+ */
+
+#ifndef __PROC_CAP_INTEL_H__
+#define __PROC_CAP_INTEL_H__
+
+#define ACPI_PROC_CAP_P_FFH (0x0001)
+#define ACPI_PROC_CAP_C_C1_HALT (0x0002)
+#define ACPI_PROC_CAP_T_FFH (0x0004)
+#define ACPI_PROC_CAP_SMP_C1PT (0x0008)
+#define ACPI_PROC_CAP_SMP_C2C3 (0x0010)
+#define ACPI_PROC_CAP_SMP_P_SWCOORD (0x0020)
+#define ACPI_PROC_CAP_SMP_C_SWCOORD (0x0040)
+#define ACPI_PROC_CAP_SMP_T_SWCOORD (0x0080)
+#define ACPI_PROC_CAP_C_C1_FFH (0x0100)
+#define ACPI_PROC_CAP_C_C2C3_FFH (0x0200)
+#define ACPI_PROC_CAP_SMP_P_HWCOORD (0x0800)
+#define ACPI_PROC_CAP_COLLAB_PROC_PERF (0x1000)
+
+#define ACPI_PROC_CAP_EST_CAPABILITY_SMP (ACPI_PROC_CAP_SMP_C1PT | \
+ ACPI_PROC_CAP_C_C1_HALT | \
+ ACPI_PROC_CAP_P_FFH)
+
+#define ACPI_PROC_CAP_EST_CAPABILITY_SWSMP (ACPI_PROC_CAP_SMP_C1PT | \
+ ACPI_PROC_CAP_C_C1_HALT | \
+ ACPI_PROC_CAP_SMP_P_SWCOORD | \
+ ACPI_PROC_CAP_SMP_P_HWCOORD | \
+ ACPI_PROC_CAP_P_FFH)
+
+#define ACPI_PROC_CAP_C_CAPABILITY_SMP (ACPI_PROC_CAP_SMP_C2C3 | \
+ ACPI_PROC_CAP_SMP_C1PT | \
+ ACPI_PROC_CAP_C_C1_HALT | \
+ ACPI_PROC_CAP_C_C1_FFH | \
+ ACPI_PROC_CAP_C_C2C3_FFH)
+
+#endif /* __PROC_CAP_INTEL_H__ */
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 683e124ad517..d0eccbd920e5 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -2,11 +2,16 @@
#ifndef __ACPI_PROCESSOR_H
#define __ACPI_PROCESSOR_H
-#include <linux/kernel.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/pm_qos.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
#include <linux/thermal.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
#include <asm/acpi.h>
#define ACPI_PROCESSOR_CLASS "processor"
@@ -212,7 +217,7 @@ struct acpi_processor_flags {
u8 has_lpi:1;
u8 power_setup_done:1;
u8 bm_rld_set:1;
- u8 need_hotplug_init:1;
+ u8 previously_online:1;
};
struct acpi_processor {
@@ -275,6 +280,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
struct acpi_processor_cx *cx,
struct acpi_power_register *reg);
void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cstate);
+void __noreturn acpi_processor_ffh_play_dead(struct acpi_processor_cx *cx);
#else
static inline void acpi_processor_power_init_bm_check(struct
acpi_processor_flags
@@ -295,6 +301,10 @@ static inline void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx
{
return;
}
+static inline void __noreturn acpi_processor_ffh_play_dead(struct acpi_processor_cx *cx)
+{
+ BUG();
+}
#endif
static inline int call_on_cpu(int cpu, long (*fn)(void *), void *arg,
@@ -436,9 +446,12 @@ static inline int acpi_processor_hotplug(struct acpi_processor *pr)
#endif /* CONFIG_ACPI_PROCESSOR_IDLE */
/* in processor_thermal.c */
-int acpi_processor_get_limit_info(struct acpi_processor *pr);
+int acpi_processor_thermal_init(struct acpi_processor *pr,
+ struct acpi_device *device);
+void acpi_processor_thermal_exit(struct acpi_processor *pr,
+ struct acpi_device *device);
extern const struct thermal_cooling_device_ops processor_cooling_ops;
-#if defined(CONFIG_ACPI_CPU_FREQ_PSS) & defined(CONFIG_CPU_FREQ)
+#ifdef CONFIG_CPU_FREQ
void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy);
void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy);
#else
@@ -450,6 +463,13 @@ static inline void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
{
return;
}
-#endif /* CONFIG_ACPI_CPU_FREQ_PSS */
+#endif /* CONFIG_CPU_FREQ */
+
+#ifdef CONFIG_ACPI_PROCESSOR_IDLE
+extern int acpi_processor_ffh_lpi_probe(unsigned int cpu);
+extern int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi);
+#endif
+
+void acpi_processor_init_invariance_cppc(void);
#endif
diff --git a/include/acpi/video.h b/include/acpi/video.h
index db8548ff03ce..044c463138df 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -48,15 +48,17 @@ enum acpi_backlight_type {
acpi_backlight_video,
acpi_backlight_vendor,
acpi_backlight_native,
+ acpi_backlight_nvidia_wmi_ec,
+ acpi_backlight_apple_gmux,
+ acpi_backlight_dell_uart,
};
#if IS_ENABLED(CONFIG_ACPI_VIDEO)
extern int acpi_video_register(void);
extern void acpi_video_unregister(void);
+extern void acpi_video_register_backlight(void);
extern int acpi_video_get_edid(struct acpi_device *device, int type,
int device_id, void **edid);
-extern enum acpi_backlight_type acpi_video_get_backlight_type(void);
-extern void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type);
/*
* Note: The value returned by acpi_video_handles_brightness_key_presses()
* may change over time and should not be cached.
@@ -65,9 +67,32 @@ extern bool acpi_video_handles_brightness_key_presses(void);
extern int acpi_video_get_levels(struct acpi_device *device,
struct acpi_video_device_brightness **dev_br,
int *pmax_level);
+
+extern enum acpi_backlight_type __acpi_video_get_backlight_type(bool native,
+ bool *auto_detect);
+
+static inline enum acpi_backlight_type acpi_video_get_backlight_type(void)
+{
+ return __acpi_video_get_backlight_type(false, NULL);
+}
+
+/*
+ * This function MUST only be called by GPU drivers to check if the driver
+ * should register a backlight class device. This function not only checks
+ * if a GPU native backlight device should be registered it *also* tells
+ * the ACPI video-detect code that native GPU backlight control is available.
+ * Therefor calling this from any place other then the GPU driver is wrong!
+ * To check if GPU native backlight control is used in other places instead use:
+ * if (acpi_video_get_backlight_type() == acpi_backlight_native) { ... }
+ */
+static inline bool acpi_video_backlight_use_native(void)
+{
+ return __acpi_video_get_backlight_type(true, NULL) == acpi_backlight_native;
+}
#else
static inline int acpi_video_register(void) { return -ENODEV; }
static inline void acpi_video_unregister(void) { return; }
+static inline void acpi_video_register_backlight(void) { return; }
static inline int acpi_video_get_edid(struct acpi_device *device, int type,
int device_id, void **edid)
{
@@ -77,8 +102,9 @@ static inline enum acpi_backlight_type acpi_video_get_backlight_type(void)
{
return acpi_backlight_vendor;
}
-static inline void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type)
+static inline bool acpi_video_backlight_use_native(void)
{
+ return true;
}
static inline bool acpi_video_handles_brightness_key_presses(void)
{
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
index 302506bbc2a4..295c94a3ccc1 100644
--- a/include/asm-generic/Kbuild
+++ b/include/asm-generic/Kbuild
@@ -2,14 +2,15 @@
#
# asm headers that all architectures except um should have
# (This file is not included when SRCARCH=um since UML borrows several
-# asm headers from the host architecutre.)
+# asm headers from the host architecture.)
mandatory-y += atomic.h
+mandatory-y += archrandom.h
mandatory-y += barrier.h
mandatory-y += bitops.h
mandatory-y += bug.h
-mandatory-y += bugs.h
mandatory-y += cacheflush.h
+mandatory-y += cfi.h
mandatory-y += checksum.h
mandatory-y += compat.h
mandatory-y += current.h
@@ -20,7 +21,6 @@ mandatory-y += dma-mapping.h
mandatory-y += dma.h
mandatory-y += emergency-restart.h
mandatory-y += exec.h
-mandatory-y += fb.h
mandatory-y += ftrace.h
mandatory-y += futex.h
mandatory-y += hardirq.h
@@ -45,6 +45,8 @@ mandatory-y += pci.h
mandatory-y += percpu.h
mandatory-y += pgalloc.h
mandatory-y += preempt.h
+mandatory-y += rqspinlock.h
+mandatory-y += runtime-const.h
mandatory-y += rwonce.h
mandatory-y += sections.h
mandatory-y += serial.h
@@ -57,8 +59,9 @@ mandatory-y += tlbflush.h
mandatory-y += topology.h
mandatory-y += trace_clock.h
mandatory-y += uaccess.h
-mandatory-y += unaligned.h
+mandatory-y += unwind_user.h
mandatory-y += vermagic.h
mandatory-y += vga.h
+mandatory-y += video.h
mandatory-y += word-at-a-time.h
mandatory-y += xor.h
diff --git a/include/asm-generic/access_ok.h b/include/asm-generic/access_ok.h
new file mode 100644
index 000000000000..2866ae61b1cd
--- /dev/null
+++ b/include/asm-generic/access_ok.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_ACCESS_OK_H__
+#define __ASM_GENERIC_ACCESS_OK_H__
+
+/*
+ * Checking whether a pointer is valid for user space access.
+ * These definitions work on most architectures, but overrides can
+ * be used where necessary.
+ */
+
+/*
+ * architectures with compat tasks have a variable TASK_SIZE and should
+ * override this to a constant.
+ */
+#ifndef TASK_SIZE_MAX
+#define TASK_SIZE_MAX TASK_SIZE
+#endif
+
+#ifndef __access_ok
+/*
+ * 'size' is a compile-time constant for most callers, so optimize for
+ * this case to turn the check into a single comparison against a constant
+ * limit and catch all possible overflows.
+ * On architectures with separate user address space (m68k, s390, parisc,
+ * sparc64) or those without an MMU, this should always return true.
+ *
+ * This version was originally contributed by Jonas Bonn for the
+ * OpenRISC architecture, and was found to be the most efficient
+ * for constant 'size' and 'limit' values.
+ */
+static inline int __access_ok(const void __user *ptr, unsigned long size)
+{
+ unsigned long limit = TASK_SIZE_MAX;
+ unsigned long addr = (unsigned long)ptr;
+
+ if (IS_ENABLED(CONFIG_ALTERNATE_USER_ADDRESS_SPACE) ||
+ !IS_ENABLED(CONFIG_MMU))
+ return true;
+
+ return (size <= limit) && (addr <= (limit - size));
+}
+#endif
+
+#ifndef access_ok
+#define access_ok(addr, size) likely(__access_ok(addr, size))
+#endif
+
+#endif
diff --git a/include/asm-generic/agp.h b/include/asm-generic/agp.h
new file mode 100644
index 000000000000..10db92ede168
--- /dev/null
+++ b/include/asm-generic/agp.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_AGP_H
+#define _ASM_GENERIC_AGP_H
+
+#include <asm/io.h>
+
+#define map_page_into_agp(page) do {} while (0)
+#define unmap_page_from_agp(page) do {} while (0)
+#define flush_agp_cache() mb()
+
+#endif /* _ASM_GENERIC_AGP_H */
diff --git a/include/asm-generic/archrandom.h b/include/asm-generic/archrandom.h
new file mode 100644
index 000000000000..3cd7f980cfdc
--- /dev/null
+++ b/include/asm-generic/archrandom.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_ARCHRANDOM_H__
+#define __ASM_GENERIC_ARCHRANDOM_H__
+
+static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs)
+{
+ return 0;
+}
+
+static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs)
+{
+ return 0;
+}
+
+#endif
diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h
deleted file mode 100644
index 888b6cfeed91..000000000000
--- a/include/asm-generic/atomic-instrumented.h
+++ /dev/null
@@ -1,1833 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-// Generated by scripts/atomic/gen-atomic-instrumented.sh
-// DO NOT MODIFY THIS FILE DIRECTLY
-
-/*
- * This file provides wrappers with KASAN instrumentation for atomic operations.
- * To use this functionality an arch's atomic.h file needs to define all
- * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include
- * this file at the end. This file provides atomic_read() that forwards to
- * arch_atomic_read() for actual atomic operation.
- * Note: if an arch atomic operation is implemented by means of other atomic
- * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use
- * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid
- * double instrumentation.
- */
-#ifndef _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
-#define _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
-
-#include <linux/build_bug.h>
-#include <linux/compiler.h>
-#include <linux/instrumented.h>
-
-static __always_inline int
-atomic_read(const atomic_t *v)
-{
- instrument_atomic_read(v, sizeof(*v));
- return arch_atomic_read(v);
-}
-#define atomic_read atomic_read
-
-#if defined(arch_atomic_read_acquire)
-static __always_inline int
-atomic_read_acquire(const atomic_t *v)
-{
- instrument_atomic_read(v, sizeof(*v));
- return arch_atomic_read_acquire(v);
-}
-#define atomic_read_acquire atomic_read_acquire
-#endif
-
-static __always_inline void
-atomic_set(atomic_t *v, int i)
-{
- instrument_atomic_write(v, sizeof(*v));
- arch_atomic_set(v, i);
-}
-#define atomic_set atomic_set
-
-#if defined(arch_atomic_set_release)
-static __always_inline void
-atomic_set_release(atomic_t *v, int i)
-{
- instrument_atomic_write(v, sizeof(*v));
- arch_atomic_set_release(v, i);
-}
-#define atomic_set_release atomic_set_release
-#endif
-
-static __always_inline void
-atomic_add(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic_add(i, v);
-}
-#define atomic_add atomic_add
-
-#if !defined(arch_atomic_add_return_relaxed) || defined(arch_atomic_add_return)
-static __always_inline int
-atomic_add_return(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_add_return(i, v);
-}
-#define atomic_add_return atomic_add_return
-#endif
-
-#if defined(arch_atomic_add_return_acquire)
-static __always_inline int
-atomic_add_return_acquire(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_add_return_acquire(i, v);
-}
-#define atomic_add_return_acquire atomic_add_return_acquire
-#endif
-
-#if defined(arch_atomic_add_return_release)
-static __always_inline int
-atomic_add_return_release(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_add_return_release(i, v);
-}
-#define atomic_add_return_release atomic_add_return_release
-#endif
-
-#if defined(arch_atomic_add_return_relaxed)
-static __always_inline int
-atomic_add_return_relaxed(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_add_return_relaxed(i, v);
-}
-#define atomic_add_return_relaxed atomic_add_return_relaxed
-#endif
-
-#if !defined(arch_atomic_fetch_add_relaxed) || defined(arch_atomic_fetch_add)
-static __always_inline int
-atomic_fetch_add(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_add(i, v);
-}
-#define atomic_fetch_add atomic_fetch_add
-#endif
-
-#if defined(arch_atomic_fetch_add_acquire)
-static __always_inline int
-atomic_fetch_add_acquire(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_add_acquire(i, v);
-}
-#define atomic_fetch_add_acquire atomic_fetch_add_acquire
-#endif
-
-#if defined(arch_atomic_fetch_add_release)
-static __always_inline int
-atomic_fetch_add_release(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_add_release(i, v);
-}
-#define atomic_fetch_add_release atomic_fetch_add_release
-#endif
-
-#if defined(arch_atomic_fetch_add_relaxed)
-static __always_inline int
-atomic_fetch_add_relaxed(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_add_relaxed(i, v);
-}
-#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-#endif
-
-static __always_inline void
-atomic_sub(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic_sub(i, v);
-}
-#define atomic_sub atomic_sub
-
-#if !defined(arch_atomic_sub_return_relaxed) || defined(arch_atomic_sub_return)
-static __always_inline int
-atomic_sub_return(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_sub_return(i, v);
-}
-#define atomic_sub_return atomic_sub_return
-#endif
-
-#if defined(arch_atomic_sub_return_acquire)
-static __always_inline int
-atomic_sub_return_acquire(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_sub_return_acquire(i, v);
-}
-#define atomic_sub_return_acquire atomic_sub_return_acquire
-#endif
-
-#if defined(arch_atomic_sub_return_release)
-static __always_inline int
-atomic_sub_return_release(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_sub_return_release(i, v);
-}
-#define atomic_sub_return_release atomic_sub_return_release
-#endif
-
-#if defined(arch_atomic_sub_return_relaxed)
-static __always_inline int
-atomic_sub_return_relaxed(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_sub_return_relaxed(i, v);
-}
-#define atomic_sub_return_relaxed atomic_sub_return_relaxed
-#endif
-
-#if !defined(arch_atomic_fetch_sub_relaxed) || defined(arch_atomic_fetch_sub)
-static __always_inline int
-atomic_fetch_sub(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_sub(i, v);
-}
-#define atomic_fetch_sub atomic_fetch_sub
-#endif
-
-#if defined(arch_atomic_fetch_sub_acquire)
-static __always_inline int
-atomic_fetch_sub_acquire(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_sub_acquire(i, v);
-}
-#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire
-#endif
-
-#if defined(arch_atomic_fetch_sub_release)
-static __always_inline int
-atomic_fetch_sub_release(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_sub_release(i, v);
-}
-#define atomic_fetch_sub_release atomic_fetch_sub_release
-#endif
-
-#if defined(arch_atomic_fetch_sub_relaxed)
-static __always_inline int
-atomic_fetch_sub_relaxed(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_sub_relaxed(i, v);
-}
-#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
-#endif
-
-#if defined(arch_atomic_inc)
-static __always_inline void
-atomic_inc(atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic_inc(v);
-}
-#define atomic_inc atomic_inc
-#endif
-
-#if defined(arch_atomic_inc_return)
-static __always_inline int
-atomic_inc_return(atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_inc_return(v);
-}
-#define atomic_inc_return atomic_inc_return
-#endif
-
-#if defined(arch_atomic_inc_return_acquire)
-static __always_inline int
-atomic_inc_return_acquire(atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_inc_return_acquire(v);
-}
-#define atomic_inc_return_acquire atomic_inc_return_acquire
-#endif
-
-#if defined(arch_atomic_inc_return_release)
-static __always_inline int
-atomic_inc_return_release(atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_inc_return_release(v);
-}
-#define atomic_inc_return_release atomic_inc_return_release
-#endif
-
-#if defined(arch_atomic_inc_return_relaxed)
-static __always_inline int
-atomic_inc_return_relaxed(atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_inc_return_relaxed(v);
-}
-#define atomic_inc_return_relaxed atomic_inc_return_relaxed
-#endif
-
-#if defined(arch_atomic_fetch_inc)
-static __always_inline int
-atomic_fetch_inc(atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_inc(v);
-}
-#define atomic_fetch_inc atomic_fetch_inc
-#endif
-
-#if defined(arch_atomic_fetch_inc_acquire)
-static __always_inline int
-atomic_fetch_inc_acquire(atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_inc_acquire(v);
-}
-#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire
-#endif
-
-#if defined(arch_atomic_fetch_inc_release)
-static __always_inline int
-atomic_fetch_inc_release(atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_inc_release(v);
-}
-#define atomic_fetch_inc_release atomic_fetch_inc_release
-#endif
-
-#if defined(arch_atomic_fetch_inc_relaxed)
-static __always_inline int
-atomic_fetch_inc_relaxed(atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_inc_relaxed(v);
-}
-#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
-#endif
-
-#if defined(arch_atomic_dec)
-static __always_inline void
-atomic_dec(atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic_dec(v);
-}
-#define atomic_dec atomic_dec
-#endif
-
-#if defined(arch_atomic_dec_return)
-static __always_inline int
-atomic_dec_return(atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_dec_return(v);
-}
-#define atomic_dec_return atomic_dec_return
-#endif
-
-#if defined(arch_atomic_dec_return_acquire)
-static __always_inline int
-atomic_dec_return_acquire(atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_dec_return_acquire(v);
-}
-#define atomic_dec_return_acquire atomic_dec_return_acquire
-#endif
-
-#if defined(arch_atomic_dec_return_release)
-static __always_inline int
-atomic_dec_return_release(atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_dec_return_release(v);
-}
-#define atomic_dec_return_release atomic_dec_return_release
-#endif
-
-#if defined(arch_atomic_dec_return_relaxed)
-static __always_inline int
-atomic_dec_return_relaxed(atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_dec_return_relaxed(v);
-}
-#define atomic_dec_return_relaxed atomic_dec_return_relaxed
-#endif
-
-#if defined(arch_atomic_fetch_dec)
-static __always_inline int
-atomic_fetch_dec(atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_dec(v);
-}
-#define atomic_fetch_dec atomic_fetch_dec
-#endif
-
-#if defined(arch_atomic_fetch_dec_acquire)
-static __always_inline int
-atomic_fetch_dec_acquire(atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_dec_acquire(v);
-}
-#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire
-#endif
-
-#if defined(arch_atomic_fetch_dec_release)
-static __always_inline int
-atomic_fetch_dec_release(atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_dec_release(v);
-}
-#define atomic_fetch_dec_release atomic_fetch_dec_release
-#endif
-
-#if defined(arch_atomic_fetch_dec_relaxed)
-static __always_inline int
-atomic_fetch_dec_relaxed(atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_dec_relaxed(v);
-}
-#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
-#endif
-
-static __always_inline void
-atomic_and(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic_and(i, v);
-}
-#define atomic_and atomic_and
-
-#if !defined(arch_atomic_fetch_and_relaxed) || defined(arch_atomic_fetch_and)
-static __always_inline int
-atomic_fetch_and(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_and(i, v);
-}
-#define atomic_fetch_and atomic_fetch_and
-#endif
-
-#if defined(arch_atomic_fetch_and_acquire)
-static __always_inline int
-atomic_fetch_and_acquire(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_and_acquire(i, v);
-}
-#define atomic_fetch_and_acquire atomic_fetch_and_acquire
-#endif
-
-#if defined(arch_atomic_fetch_and_release)
-static __always_inline int
-atomic_fetch_and_release(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_and_release(i, v);
-}
-#define atomic_fetch_and_release atomic_fetch_and_release
-#endif
-
-#if defined(arch_atomic_fetch_and_relaxed)
-static __always_inline int
-atomic_fetch_and_relaxed(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_and_relaxed(i, v);
-}
-#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-#endif
-
-#if defined(arch_atomic_andnot)
-static __always_inline void
-atomic_andnot(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic_andnot(i, v);
-}
-#define atomic_andnot atomic_andnot
-#endif
-
-#if defined(arch_atomic_fetch_andnot)
-static __always_inline int
-atomic_fetch_andnot(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_andnot(i, v);
-}
-#define atomic_fetch_andnot atomic_fetch_andnot
-#endif
-
-#if defined(arch_atomic_fetch_andnot_acquire)
-static __always_inline int
-atomic_fetch_andnot_acquire(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_andnot_acquire(i, v);
-}
-#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
-#endif
-
-#if defined(arch_atomic_fetch_andnot_release)
-static __always_inline int
-atomic_fetch_andnot_release(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_andnot_release(i, v);
-}
-#define atomic_fetch_andnot_release atomic_fetch_andnot_release
-#endif
-
-#if defined(arch_atomic_fetch_andnot_relaxed)
-static __always_inline int
-atomic_fetch_andnot_relaxed(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_andnot_relaxed(i, v);
-}
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
-#endif
-
-static __always_inline void
-atomic_or(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic_or(i, v);
-}
-#define atomic_or atomic_or
-
-#if !defined(arch_atomic_fetch_or_relaxed) || defined(arch_atomic_fetch_or)
-static __always_inline int
-atomic_fetch_or(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_or(i, v);
-}
-#define atomic_fetch_or atomic_fetch_or
-#endif
-
-#if defined(arch_atomic_fetch_or_acquire)
-static __always_inline int
-atomic_fetch_or_acquire(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_or_acquire(i, v);
-}
-#define atomic_fetch_or_acquire atomic_fetch_or_acquire
-#endif
-
-#if defined(arch_atomic_fetch_or_release)
-static __always_inline int
-atomic_fetch_or_release(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_or_release(i, v);
-}
-#define atomic_fetch_or_release atomic_fetch_or_release
-#endif
-
-#if defined(arch_atomic_fetch_or_relaxed)
-static __always_inline int
-atomic_fetch_or_relaxed(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_or_relaxed(i, v);
-}
-#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-#endif
-
-static __always_inline void
-atomic_xor(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic_xor(i, v);
-}
-#define atomic_xor atomic_xor
-
-#if !defined(arch_atomic_fetch_xor_relaxed) || defined(arch_atomic_fetch_xor)
-static __always_inline int
-atomic_fetch_xor(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_xor(i, v);
-}
-#define atomic_fetch_xor atomic_fetch_xor
-#endif
-
-#if defined(arch_atomic_fetch_xor_acquire)
-static __always_inline int
-atomic_fetch_xor_acquire(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_xor_acquire(i, v);
-}
-#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire
-#endif
-
-#if defined(arch_atomic_fetch_xor_release)
-static __always_inline int
-atomic_fetch_xor_release(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_xor_release(i, v);
-}
-#define atomic_fetch_xor_release atomic_fetch_xor_release
-#endif
-
-#if defined(arch_atomic_fetch_xor_relaxed)
-static __always_inline int
-atomic_fetch_xor_relaxed(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_xor_relaxed(i, v);
-}
-#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
-#endif
-
-#if !defined(arch_atomic_xchg_relaxed) || defined(arch_atomic_xchg)
-static __always_inline int
-atomic_xchg(atomic_t *v, int i)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_xchg(v, i);
-}
-#define atomic_xchg atomic_xchg
-#endif
-
-#if defined(arch_atomic_xchg_acquire)
-static __always_inline int
-atomic_xchg_acquire(atomic_t *v, int i)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_xchg_acquire(v, i);
-}
-#define atomic_xchg_acquire atomic_xchg_acquire
-#endif
-
-#if defined(arch_atomic_xchg_release)
-static __always_inline int
-atomic_xchg_release(atomic_t *v, int i)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_xchg_release(v, i);
-}
-#define atomic_xchg_release atomic_xchg_release
-#endif
-
-#if defined(arch_atomic_xchg_relaxed)
-static __always_inline int
-atomic_xchg_relaxed(atomic_t *v, int i)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_xchg_relaxed(v, i);
-}
-#define atomic_xchg_relaxed atomic_xchg_relaxed
-#endif
-
-#if !defined(arch_atomic_cmpxchg_relaxed) || defined(arch_atomic_cmpxchg)
-static __always_inline int
-atomic_cmpxchg(atomic_t *v, int old, int new)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_cmpxchg(v, old, new);
-}
-#define atomic_cmpxchg atomic_cmpxchg
-#endif
-
-#if defined(arch_atomic_cmpxchg_acquire)
-static __always_inline int
-atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_cmpxchg_acquire(v, old, new);
-}
-#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
-#endif
-
-#if defined(arch_atomic_cmpxchg_release)
-static __always_inline int
-atomic_cmpxchg_release(atomic_t *v, int old, int new)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_cmpxchg_release(v, old, new);
-}
-#define atomic_cmpxchg_release atomic_cmpxchg_release
-#endif
-
-#if defined(arch_atomic_cmpxchg_relaxed)
-static __always_inline int
-atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_cmpxchg_relaxed(v, old, new);
-}
-#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
-#endif
-
-#if defined(arch_atomic_try_cmpxchg)
-static __always_inline bool
-atomic_try_cmpxchg(atomic_t *v, int *old, int new)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- instrument_atomic_read_write(old, sizeof(*old));
- return arch_atomic_try_cmpxchg(v, old, new);
-}
-#define atomic_try_cmpxchg atomic_try_cmpxchg
-#endif
-
-#if defined(arch_atomic_try_cmpxchg_acquire)
-static __always_inline bool
-atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- instrument_atomic_read_write(old, sizeof(*old));
- return arch_atomic_try_cmpxchg_acquire(v, old, new);
-}
-#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
-#endif
-
-#if defined(arch_atomic_try_cmpxchg_release)
-static __always_inline bool
-atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- instrument_atomic_read_write(old, sizeof(*old));
- return arch_atomic_try_cmpxchg_release(v, old, new);
-}
-#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release
-#endif
-
-#if defined(arch_atomic_try_cmpxchg_relaxed)
-static __always_inline bool
-atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- instrument_atomic_read_write(old, sizeof(*old));
- return arch_atomic_try_cmpxchg_relaxed(v, old, new);
-}
-#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed
-#endif
-
-#if defined(arch_atomic_sub_and_test)
-static __always_inline bool
-atomic_sub_and_test(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_sub_and_test(i, v);
-}
-#define atomic_sub_and_test atomic_sub_and_test
-#endif
-
-#if defined(arch_atomic_dec_and_test)
-static __always_inline bool
-atomic_dec_and_test(atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_dec_and_test(v);
-}
-#define atomic_dec_and_test atomic_dec_and_test
-#endif
-
-#if defined(arch_atomic_inc_and_test)
-static __always_inline bool
-atomic_inc_and_test(atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_inc_and_test(v);
-}
-#define atomic_inc_and_test atomic_inc_and_test
-#endif
-
-#if defined(arch_atomic_add_negative)
-static __always_inline bool
-atomic_add_negative(int i, atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_add_negative(i, v);
-}
-#define atomic_add_negative atomic_add_negative
-#endif
-
-#if defined(arch_atomic_fetch_add_unless)
-static __always_inline int
-atomic_fetch_add_unless(atomic_t *v, int a, int u)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_fetch_add_unless(v, a, u);
-}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
-#endif
-
-#if defined(arch_atomic_add_unless)
-static __always_inline bool
-atomic_add_unless(atomic_t *v, int a, int u)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_add_unless(v, a, u);
-}
-#define atomic_add_unless atomic_add_unless
-#endif
-
-#if defined(arch_atomic_inc_not_zero)
-static __always_inline bool
-atomic_inc_not_zero(atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_inc_not_zero(v);
-}
-#define atomic_inc_not_zero atomic_inc_not_zero
-#endif
-
-#if defined(arch_atomic_inc_unless_negative)
-static __always_inline bool
-atomic_inc_unless_negative(atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_inc_unless_negative(v);
-}
-#define atomic_inc_unless_negative atomic_inc_unless_negative
-#endif
-
-#if defined(arch_atomic_dec_unless_positive)
-static __always_inline bool
-atomic_dec_unless_positive(atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_dec_unless_positive(v);
-}
-#define atomic_dec_unless_positive atomic_dec_unless_positive
-#endif
-
-#if defined(arch_atomic_dec_if_positive)
-static __always_inline int
-atomic_dec_if_positive(atomic_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic_dec_if_positive(v);
-}
-#define atomic_dec_if_positive atomic_dec_if_positive
-#endif
-
-static __always_inline s64
-atomic64_read(const atomic64_t *v)
-{
- instrument_atomic_read(v, sizeof(*v));
- return arch_atomic64_read(v);
-}
-#define atomic64_read atomic64_read
-
-#if defined(arch_atomic64_read_acquire)
-static __always_inline s64
-atomic64_read_acquire(const atomic64_t *v)
-{
- instrument_atomic_read(v, sizeof(*v));
- return arch_atomic64_read_acquire(v);
-}
-#define atomic64_read_acquire atomic64_read_acquire
-#endif
-
-static __always_inline void
-atomic64_set(atomic64_t *v, s64 i)
-{
- instrument_atomic_write(v, sizeof(*v));
- arch_atomic64_set(v, i);
-}
-#define atomic64_set atomic64_set
-
-#if defined(arch_atomic64_set_release)
-static __always_inline void
-atomic64_set_release(atomic64_t *v, s64 i)
-{
- instrument_atomic_write(v, sizeof(*v));
- arch_atomic64_set_release(v, i);
-}
-#define atomic64_set_release atomic64_set_release
-#endif
-
-static __always_inline void
-atomic64_add(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic64_add(i, v);
-}
-#define atomic64_add atomic64_add
-
-#if !defined(arch_atomic64_add_return_relaxed) || defined(arch_atomic64_add_return)
-static __always_inline s64
-atomic64_add_return(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_add_return(i, v);
-}
-#define atomic64_add_return atomic64_add_return
-#endif
-
-#if defined(arch_atomic64_add_return_acquire)
-static __always_inline s64
-atomic64_add_return_acquire(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_add_return_acquire(i, v);
-}
-#define atomic64_add_return_acquire atomic64_add_return_acquire
-#endif
-
-#if defined(arch_atomic64_add_return_release)
-static __always_inline s64
-atomic64_add_return_release(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_add_return_release(i, v);
-}
-#define atomic64_add_return_release atomic64_add_return_release
-#endif
-
-#if defined(arch_atomic64_add_return_relaxed)
-static __always_inline s64
-atomic64_add_return_relaxed(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_add_return_relaxed(i, v);
-}
-#define atomic64_add_return_relaxed atomic64_add_return_relaxed
-#endif
-
-#if !defined(arch_atomic64_fetch_add_relaxed) || defined(arch_atomic64_fetch_add)
-static __always_inline s64
-atomic64_fetch_add(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_add(i, v);
-}
-#define atomic64_fetch_add atomic64_fetch_add
-#endif
-
-#if defined(arch_atomic64_fetch_add_acquire)
-static __always_inline s64
-atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_add_acquire(i, v);
-}
-#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire
-#endif
-
-#if defined(arch_atomic64_fetch_add_release)
-static __always_inline s64
-atomic64_fetch_add_release(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_add_release(i, v);
-}
-#define atomic64_fetch_add_release atomic64_fetch_add_release
-#endif
-
-#if defined(arch_atomic64_fetch_add_relaxed)
-static __always_inline s64
-atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_add_relaxed(i, v);
-}
-#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-#endif
-
-static __always_inline void
-atomic64_sub(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic64_sub(i, v);
-}
-#define atomic64_sub atomic64_sub
-
-#if !defined(arch_atomic64_sub_return_relaxed) || defined(arch_atomic64_sub_return)
-static __always_inline s64
-atomic64_sub_return(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_sub_return(i, v);
-}
-#define atomic64_sub_return atomic64_sub_return
-#endif
-
-#if defined(arch_atomic64_sub_return_acquire)
-static __always_inline s64
-atomic64_sub_return_acquire(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_sub_return_acquire(i, v);
-}
-#define atomic64_sub_return_acquire atomic64_sub_return_acquire
-#endif
-
-#if defined(arch_atomic64_sub_return_release)
-static __always_inline s64
-atomic64_sub_return_release(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_sub_return_release(i, v);
-}
-#define atomic64_sub_return_release atomic64_sub_return_release
-#endif
-
-#if defined(arch_atomic64_sub_return_relaxed)
-static __always_inline s64
-atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_sub_return_relaxed(i, v);
-}
-#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-#endif
-
-#if !defined(arch_atomic64_fetch_sub_relaxed) || defined(arch_atomic64_fetch_sub)
-static __always_inline s64
-atomic64_fetch_sub(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_sub(i, v);
-}
-#define atomic64_fetch_sub atomic64_fetch_sub
-#endif
-
-#if defined(arch_atomic64_fetch_sub_acquire)
-static __always_inline s64
-atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_sub_acquire(i, v);
-}
-#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
-#endif
-
-#if defined(arch_atomic64_fetch_sub_release)
-static __always_inline s64
-atomic64_fetch_sub_release(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_sub_release(i, v);
-}
-#define atomic64_fetch_sub_release atomic64_fetch_sub_release
-#endif
-
-#if defined(arch_atomic64_fetch_sub_relaxed)
-static __always_inline s64
-atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_sub_relaxed(i, v);
-}
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
-#endif
-
-#if defined(arch_atomic64_inc)
-static __always_inline void
-atomic64_inc(atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic64_inc(v);
-}
-#define atomic64_inc atomic64_inc
-#endif
-
-#if defined(arch_atomic64_inc_return)
-static __always_inline s64
-atomic64_inc_return(atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_inc_return(v);
-}
-#define atomic64_inc_return atomic64_inc_return
-#endif
-
-#if defined(arch_atomic64_inc_return_acquire)
-static __always_inline s64
-atomic64_inc_return_acquire(atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_inc_return_acquire(v);
-}
-#define atomic64_inc_return_acquire atomic64_inc_return_acquire
-#endif
-
-#if defined(arch_atomic64_inc_return_release)
-static __always_inline s64
-atomic64_inc_return_release(atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_inc_return_release(v);
-}
-#define atomic64_inc_return_release atomic64_inc_return_release
-#endif
-
-#if defined(arch_atomic64_inc_return_relaxed)
-static __always_inline s64
-atomic64_inc_return_relaxed(atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_inc_return_relaxed(v);
-}
-#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
-#endif
-
-#if defined(arch_atomic64_fetch_inc)
-static __always_inline s64
-atomic64_fetch_inc(atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_inc(v);
-}
-#define atomic64_fetch_inc atomic64_fetch_inc
-#endif
-
-#if defined(arch_atomic64_fetch_inc_acquire)
-static __always_inline s64
-atomic64_fetch_inc_acquire(atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_inc_acquire(v);
-}
-#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
-#endif
-
-#if defined(arch_atomic64_fetch_inc_release)
-static __always_inline s64
-atomic64_fetch_inc_release(atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_inc_release(v);
-}
-#define atomic64_fetch_inc_release atomic64_fetch_inc_release
-#endif
-
-#if defined(arch_atomic64_fetch_inc_relaxed)
-static __always_inline s64
-atomic64_fetch_inc_relaxed(atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_inc_relaxed(v);
-}
-#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
-#endif
-
-#if defined(arch_atomic64_dec)
-static __always_inline void
-atomic64_dec(atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic64_dec(v);
-}
-#define atomic64_dec atomic64_dec
-#endif
-
-#if defined(arch_atomic64_dec_return)
-static __always_inline s64
-atomic64_dec_return(atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_dec_return(v);
-}
-#define atomic64_dec_return atomic64_dec_return
-#endif
-
-#if defined(arch_atomic64_dec_return_acquire)
-static __always_inline s64
-atomic64_dec_return_acquire(atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_dec_return_acquire(v);
-}
-#define atomic64_dec_return_acquire atomic64_dec_return_acquire
-#endif
-
-#if defined(arch_atomic64_dec_return_release)
-static __always_inline s64
-atomic64_dec_return_release(atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_dec_return_release(v);
-}
-#define atomic64_dec_return_release atomic64_dec_return_release
-#endif
-
-#if defined(arch_atomic64_dec_return_relaxed)
-static __always_inline s64
-atomic64_dec_return_relaxed(atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_dec_return_relaxed(v);
-}
-#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
-#endif
-
-#if defined(arch_atomic64_fetch_dec)
-static __always_inline s64
-atomic64_fetch_dec(atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_dec(v);
-}
-#define atomic64_fetch_dec atomic64_fetch_dec
-#endif
-
-#if defined(arch_atomic64_fetch_dec_acquire)
-static __always_inline s64
-atomic64_fetch_dec_acquire(atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_dec_acquire(v);
-}
-#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
-#endif
-
-#if defined(arch_atomic64_fetch_dec_release)
-static __always_inline s64
-atomic64_fetch_dec_release(atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_dec_release(v);
-}
-#define atomic64_fetch_dec_release atomic64_fetch_dec_release
-#endif
-
-#if defined(arch_atomic64_fetch_dec_relaxed)
-static __always_inline s64
-atomic64_fetch_dec_relaxed(atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_dec_relaxed(v);
-}
-#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
-#endif
-
-static __always_inline void
-atomic64_and(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic64_and(i, v);
-}
-#define atomic64_and atomic64_and
-
-#if !defined(arch_atomic64_fetch_and_relaxed) || defined(arch_atomic64_fetch_and)
-static __always_inline s64
-atomic64_fetch_and(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_and(i, v);
-}
-#define atomic64_fetch_and atomic64_fetch_and
-#endif
-
-#if defined(arch_atomic64_fetch_and_acquire)
-static __always_inline s64
-atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_and_acquire(i, v);
-}
-#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire
-#endif
-
-#if defined(arch_atomic64_fetch_and_release)
-static __always_inline s64
-atomic64_fetch_and_release(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_and_release(i, v);
-}
-#define atomic64_fetch_and_release atomic64_fetch_and_release
-#endif
-
-#if defined(arch_atomic64_fetch_and_relaxed)
-static __always_inline s64
-atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_and_relaxed(i, v);
-}
-#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-#endif
-
-#if defined(arch_atomic64_andnot)
-static __always_inline void
-atomic64_andnot(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic64_andnot(i, v);
-}
-#define atomic64_andnot atomic64_andnot
-#endif
-
-#if defined(arch_atomic64_fetch_andnot)
-static __always_inline s64
-atomic64_fetch_andnot(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_andnot(i, v);
-}
-#define atomic64_fetch_andnot atomic64_fetch_andnot
-#endif
-
-#if defined(arch_atomic64_fetch_andnot_acquire)
-static __always_inline s64
-atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_andnot_acquire(i, v);
-}
-#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
-#endif
-
-#if defined(arch_atomic64_fetch_andnot_release)
-static __always_inline s64
-atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_andnot_release(i, v);
-}
-#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
-#endif
-
-#if defined(arch_atomic64_fetch_andnot_relaxed)
-static __always_inline s64
-atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_andnot_relaxed(i, v);
-}
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
-#endif
-
-static __always_inline void
-atomic64_or(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic64_or(i, v);
-}
-#define atomic64_or atomic64_or
-
-#if !defined(arch_atomic64_fetch_or_relaxed) || defined(arch_atomic64_fetch_or)
-static __always_inline s64
-atomic64_fetch_or(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_or(i, v);
-}
-#define atomic64_fetch_or atomic64_fetch_or
-#endif
-
-#if defined(arch_atomic64_fetch_or_acquire)
-static __always_inline s64
-atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_or_acquire(i, v);
-}
-#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire
-#endif
-
-#if defined(arch_atomic64_fetch_or_release)
-static __always_inline s64
-atomic64_fetch_or_release(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_or_release(i, v);
-}
-#define atomic64_fetch_or_release atomic64_fetch_or_release
-#endif
-
-#if defined(arch_atomic64_fetch_or_relaxed)
-static __always_inline s64
-atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_or_relaxed(i, v);
-}
-#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-#endif
-
-static __always_inline void
-atomic64_xor(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- arch_atomic64_xor(i, v);
-}
-#define atomic64_xor atomic64_xor
-
-#if !defined(arch_atomic64_fetch_xor_relaxed) || defined(arch_atomic64_fetch_xor)
-static __always_inline s64
-atomic64_fetch_xor(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_xor(i, v);
-}
-#define atomic64_fetch_xor atomic64_fetch_xor
-#endif
-
-#if defined(arch_atomic64_fetch_xor_acquire)
-static __always_inline s64
-atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_xor_acquire(i, v);
-}
-#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
-#endif
-
-#if defined(arch_atomic64_fetch_xor_release)
-static __always_inline s64
-atomic64_fetch_xor_release(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_xor_release(i, v);
-}
-#define atomic64_fetch_xor_release atomic64_fetch_xor_release
-#endif
-
-#if defined(arch_atomic64_fetch_xor_relaxed)
-static __always_inline s64
-atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_xor_relaxed(i, v);
-}
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
-#endif
-
-#if !defined(arch_atomic64_xchg_relaxed) || defined(arch_atomic64_xchg)
-static __always_inline s64
-atomic64_xchg(atomic64_t *v, s64 i)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_xchg(v, i);
-}
-#define atomic64_xchg atomic64_xchg
-#endif
-
-#if defined(arch_atomic64_xchg_acquire)
-static __always_inline s64
-atomic64_xchg_acquire(atomic64_t *v, s64 i)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_xchg_acquire(v, i);
-}
-#define atomic64_xchg_acquire atomic64_xchg_acquire
-#endif
-
-#if defined(arch_atomic64_xchg_release)
-static __always_inline s64
-atomic64_xchg_release(atomic64_t *v, s64 i)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_xchg_release(v, i);
-}
-#define atomic64_xchg_release atomic64_xchg_release
-#endif
-
-#if defined(arch_atomic64_xchg_relaxed)
-static __always_inline s64
-atomic64_xchg_relaxed(atomic64_t *v, s64 i)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_xchg_relaxed(v, i);
-}
-#define atomic64_xchg_relaxed atomic64_xchg_relaxed
-#endif
-
-#if !defined(arch_atomic64_cmpxchg_relaxed) || defined(arch_atomic64_cmpxchg)
-static __always_inline s64
-atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_cmpxchg(v, old, new);
-}
-#define atomic64_cmpxchg atomic64_cmpxchg
-#endif
-
-#if defined(arch_atomic64_cmpxchg_acquire)
-static __always_inline s64
-atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_cmpxchg_acquire(v, old, new);
-}
-#define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire
-#endif
-
-#if defined(arch_atomic64_cmpxchg_release)
-static __always_inline s64
-atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_cmpxchg_release(v, old, new);
-}
-#define atomic64_cmpxchg_release atomic64_cmpxchg_release
-#endif
-
-#if defined(arch_atomic64_cmpxchg_relaxed)
-static __always_inline s64
-atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_cmpxchg_relaxed(v, old, new);
-}
-#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
-#endif
-
-#if defined(arch_atomic64_try_cmpxchg)
-static __always_inline bool
-atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- instrument_atomic_read_write(old, sizeof(*old));
- return arch_atomic64_try_cmpxchg(v, old, new);
-}
-#define atomic64_try_cmpxchg atomic64_try_cmpxchg
-#endif
-
-#if defined(arch_atomic64_try_cmpxchg_acquire)
-static __always_inline bool
-atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- instrument_atomic_read_write(old, sizeof(*old));
- return arch_atomic64_try_cmpxchg_acquire(v, old, new);
-}
-#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
-#endif
-
-#if defined(arch_atomic64_try_cmpxchg_release)
-static __always_inline bool
-atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- instrument_atomic_read_write(old, sizeof(*old));
- return arch_atomic64_try_cmpxchg_release(v, old, new);
-}
-#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
-#endif
-
-#if defined(arch_atomic64_try_cmpxchg_relaxed)
-static __always_inline bool
-atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- instrument_atomic_read_write(old, sizeof(*old));
- return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
-}
-#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed
-#endif
-
-#if defined(arch_atomic64_sub_and_test)
-static __always_inline bool
-atomic64_sub_and_test(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_sub_and_test(i, v);
-}
-#define atomic64_sub_and_test atomic64_sub_and_test
-#endif
-
-#if defined(arch_atomic64_dec_and_test)
-static __always_inline bool
-atomic64_dec_and_test(atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_dec_and_test(v);
-}
-#define atomic64_dec_and_test atomic64_dec_and_test
-#endif
-
-#if defined(arch_atomic64_inc_and_test)
-static __always_inline bool
-atomic64_inc_and_test(atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_inc_and_test(v);
-}
-#define atomic64_inc_and_test atomic64_inc_and_test
-#endif
-
-#if defined(arch_atomic64_add_negative)
-static __always_inline bool
-atomic64_add_negative(s64 i, atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_add_negative(i, v);
-}
-#define atomic64_add_negative atomic64_add_negative
-#endif
-
-#if defined(arch_atomic64_fetch_add_unless)
-static __always_inline s64
-atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_fetch_add_unless(v, a, u);
-}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
-#endif
-
-#if defined(arch_atomic64_add_unless)
-static __always_inline bool
-atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_add_unless(v, a, u);
-}
-#define atomic64_add_unless atomic64_add_unless
-#endif
-
-#if defined(arch_atomic64_inc_not_zero)
-static __always_inline bool
-atomic64_inc_not_zero(atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_inc_not_zero(v);
-}
-#define atomic64_inc_not_zero atomic64_inc_not_zero
-#endif
-
-#if defined(arch_atomic64_inc_unless_negative)
-static __always_inline bool
-atomic64_inc_unless_negative(atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_inc_unless_negative(v);
-}
-#define atomic64_inc_unless_negative atomic64_inc_unless_negative
-#endif
-
-#if defined(arch_atomic64_dec_unless_positive)
-static __always_inline bool
-atomic64_dec_unless_positive(atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_dec_unless_positive(v);
-}
-#define atomic64_dec_unless_positive atomic64_dec_unless_positive
-#endif
-
-#if defined(arch_atomic64_dec_if_positive)
-static __always_inline s64
-atomic64_dec_if_positive(atomic64_t *v)
-{
- instrument_atomic_read_write(v, sizeof(*v));
- return arch_atomic64_dec_if_positive(v);
-}
-#define atomic64_dec_if_positive atomic64_dec_if_positive
-#endif
-
-#if !defined(arch_xchg_relaxed) || defined(arch_xchg)
-#define xchg(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_xchg(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_xchg_acquire)
-#define xchg_acquire(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_xchg_release)
-#define xchg_release(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_xchg_release(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_xchg_relaxed)
-#define xchg_relaxed(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if !defined(arch_cmpxchg_relaxed) || defined(arch_cmpxchg)
-#define cmpxchg(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_cmpxchg_acquire)
-#define cmpxchg_acquire(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_cmpxchg_release)
-#define cmpxchg_release(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_cmpxchg_relaxed)
-#define cmpxchg_relaxed(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if !defined(arch_cmpxchg64_relaxed) || defined(arch_cmpxchg64)
-#define cmpxchg64(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_cmpxchg64_acquire)
-#define cmpxchg64_acquire(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_cmpxchg64_release)
-#define cmpxchg64_release(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_cmpxchg64_relaxed)
-#define cmpxchg64_relaxed(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \
-})
-#endif
-
-#if !defined(arch_try_cmpxchg_relaxed) || defined(arch_try_cmpxchg)
-#define try_cmpxchg(ptr, oldp, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- typeof(oldp) __ai_oldp = (oldp); \
- instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
- instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
- arch_try_cmpxchg(__ai_ptr, __ai_oldp, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_try_cmpxchg_acquire)
-#define try_cmpxchg_acquire(ptr, oldp, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- typeof(oldp) __ai_oldp = (oldp); \
- instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
- instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
- arch_try_cmpxchg_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_try_cmpxchg_release)
-#define try_cmpxchg_release(ptr, oldp, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- typeof(oldp) __ai_oldp = (oldp); \
- instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
- instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
- arch_try_cmpxchg_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \
-})
-#endif
-
-#if defined(arch_try_cmpxchg_relaxed)
-#define try_cmpxchg_relaxed(ptr, oldp, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- typeof(oldp) __ai_oldp = (oldp); \
- instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
- instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
- arch_try_cmpxchg_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \
-})
-#endif
-
-#define cmpxchg_local(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg_local(__ai_ptr, __VA_ARGS__); \
-})
-
-#define cmpxchg64_local(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \
-})
-
-#define sync_cmpxchg(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
- arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \
-})
-
-#define cmpxchg_double(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
- arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \
-})
-
-
-#define cmpxchg_double_local(ptr, ...) \
-({ \
- typeof(ptr) __ai_ptr = (ptr); \
- instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
- arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \
-})
-
-#endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */
-// 4bec382e44520f4d8267e42620054db26a659ea3
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
deleted file mode 100644
index 073cf40f431b..000000000000
--- a/include/asm-generic/atomic-long.h
+++ /dev/null
@@ -1,1014 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-// Generated by scripts/atomic/gen-atomic-long.sh
-// DO NOT MODIFY THIS FILE DIRECTLY
-
-#ifndef _ASM_GENERIC_ATOMIC_LONG_H
-#define _ASM_GENERIC_ATOMIC_LONG_H
-
-#include <linux/compiler.h>
-#include <asm/types.h>
-
-#ifdef CONFIG_64BIT
-typedef atomic64_t atomic_long_t;
-#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
-#define atomic_long_cond_read_acquire atomic64_cond_read_acquire
-#define atomic_long_cond_read_relaxed atomic64_cond_read_relaxed
-#else
-typedef atomic_t atomic_long_t;
-#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
-#define atomic_long_cond_read_acquire atomic_cond_read_acquire
-#define atomic_long_cond_read_relaxed atomic_cond_read_relaxed
-#endif
-
-#ifdef CONFIG_64BIT
-
-static __always_inline long
-atomic_long_read(const atomic_long_t *v)
-{
- return atomic64_read(v);
-}
-
-static __always_inline long
-atomic_long_read_acquire(const atomic_long_t *v)
-{
- return atomic64_read_acquire(v);
-}
-
-static __always_inline void
-atomic_long_set(atomic_long_t *v, long i)
-{
- atomic64_set(v, i);
-}
-
-static __always_inline void
-atomic_long_set_release(atomic_long_t *v, long i)
-{
- atomic64_set_release(v, i);
-}
-
-static __always_inline void
-atomic_long_add(long i, atomic_long_t *v)
-{
- atomic64_add(i, v);
-}
-
-static __always_inline long
-atomic_long_add_return(long i, atomic_long_t *v)
-{
- return atomic64_add_return(i, v);
-}
-
-static __always_inline long
-atomic_long_add_return_acquire(long i, atomic_long_t *v)
-{
- return atomic64_add_return_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_add_return_release(long i, atomic_long_t *v)
-{
- return atomic64_add_return_release(i, v);
-}
-
-static __always_inline long
-atomic_long_add_return_relaxed(long i, atomic_long_t *v)
-{
- return atomic64_add_return_relaxed(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add(long i, atomic_long_t *v)
-{
- return atomic64_fetch_add(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
-{
- return atomic64_fetch_add_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add_release(long i, atomic_long_t *v)
-{
- return atomic64_fetch_add_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
-{
- return atomic64_fetch_add_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_sub(long i, atomic_long_t *v)
-{
- atomic64_sub(i, v);
-}
-
-static __always_inline long
-atomic_long_sub_return(long i, atomic_long_t *v)
-{
- return atomic64_sub_return(i, v);
-}
-
-static __always_inline long
-atomic_long_sub_return_acquire(long i, atomic_long_t *v)
-{
- return atomic64_sub_return_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_sub_return_release(long i, atomic_long_t *v)
-{
- return atomic64_sub_return_release(i, v);
-}
-
-static __always_inline long
-atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
-{
- return atomic64_sub_return_relaxed(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_sub(long i, atomic_long_t *v)
-{
- return atomic64_fetch_sub(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
-{
- return atomic64_fetch_sub_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_sub_release(long i, atomic_long_t *v)
-{
- return atomic64_fetch_sub_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
-{
- return atomic64_fetch_sub_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_inc(atomic_long_t *v)
-{
- atomic64_inc(v);
-}
-
-static __always_inline long
-atomic_long_inc_return(atomic_long_t *v)
-{
- return atomic64_inc_return(v);
-}
-
-static __always_inline long
-atomic_long_inc_return_acquire(atomic_long_t *v)
-{
- return atomic64_inc_return_acquire(v);
-}
-
-static __always_inline long
-atomic_long_inc_return_release(atomic_long_t *v)
-{
- return atomic64_inc_return_release(v);
-}
-
-static __always_inline long
-atomic_long_inc_return_relaxed(atomic_long_t *v)
-{
- return atomic64_inc_return_relaxed(v);
-}
-
-static __always_inline long
-atomic_long_fetch_inc(atomic_long_t *v)
-{
- return atomic64_fetch_inc(v);
-}
-
-static __always_inline long
-atomic_long_fetch_inc_acquire(atomic_long_t *v)
-{
- return atomic64_fetch_inc_acquire(v);
-}
-
-static __always_inline long
-atomic_long_fetch_inc_release(atomic_long_t *v)
-{
- return atomic64_fetch_inc_release(v);
-}
-
-static __always_inline long
-atomic_long_fetch_inc_relaxed(atomic_long_t *v)
-{
- return atomic64_fetch_inc_relaxed(v);
-}
-
-static __always_inline void
-atomic_long_dec(atomic_long_t *v)
-{
- atomic64_dec(v);
-}
-
-static __always_inline long
-atomic_long_dec_return(atomic_long_t *v)
-{
- return atomic64_dec_return(v);
-}
-
-static __always_inline long
-atomic_long_dec_return_acquire(atomic_long_t *v)
-{
- return atomic64_dec_return_acquire(v);
-}
-
-static __always_inline long
-atomic_long_dec_return_release(atomic_long_t *v)
-{
- return atomic64_dec_return_release(v);
-}
-
-static __always_inline long
-atomic_long_dec_return_relaxed(atomic_long_t *v)
-{
- return atomic64_dec_return_relaxed(v);
-}
-
-static __always_inline long
-atomic_long_fetch_dec(atomic_long_t *v)
-{
- return atomic64_fetch_dec(v);
-}
-
-static __always_inline long
-atomic_long_fetch_dec_acquire(atomic_long_t *v)
-{
- return atomic64_fetch_dec_acquire(v);
-}
-
-static __always_inline long
-atomic_long_fetch_dec_release(atomic_long_t *v)
-{
- return atomic64_fetch_dec_release(v);
-}
-
-static __always_inline long
-atomic_long_fetch_dec_relaxed(atomic_long_t *v)
-{
- return atomic64_fetch_dec_relaxed(v);
-}
-
-static __always_inline void
-atomic_long_and(long i, atomic_long_t *v)
-{
- atomic64_and(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_and(long i, atomic_long_t *v)
-{
- return atomic64_fetch_and(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
-{
- return atomic64_fetch_and_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_and_release(long i, atomic_long_t *v)
-{
- return atomic64_fetch_and_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
-{
- return atomic64_fetch_and_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_andnot(long i, atomic_long_t *v)
-{
- atomic64_andnot(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_andnot(long i, atomic_long_t *v)
-{
- return atomic64_fetch_andnot(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
-{
- return atomic64_fetch_andnot_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
-{
- return atomic64_fetch_andnot_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
-{
- return atomic64_fetch_andnot_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_or(long i, atomic_long_t *v)
-{
- atomic64_or(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_or(long i, atomic_long_t *v)
-{
- return atomic64_fetch_or(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
-{
- return atomic64_fetch_or_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_or_release(long i, atomic_long_t *v)
-{
- return atomic64_fetch_or_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
-{
- return atomic64_fetch_or_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_xor(long i, atomic_long_t *v)
-{
- atomic64_xor(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_xor(long i, atomic_long_t *v)
-{
- return atomic64_fetch_xor(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
-{
- return atomic64_fetch_xor_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_xor_release(long i, atomic_long_t *v)
-{
- return atomic64_fetch_xor_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
-{
- return atomic64_fetch_xor_relaxed(i, v);
-}
-
-static __always_inline long
-atomic_long_xchg(atomic_long_t *v, long i)
-{
- return atomic64_xchg(v, i);
-}
-
-static __always_inline long
-atomic_long_xchg_acquire(atomic_long_t *v, long i)
-{
- return atomic64_xchg_acquire(v, i);
-}
-
-static __always_inline long
-atomic_long_xchg_release(atomic_long_t *v, long i)
-{
- return atomic64_xchg_release(v, i);
-}
-
-static __always_inline long
-atomic_long_xchg_relaxed(atomic_long_t *v, long i)
-{
- return atomic64_xchg_relaxed(v, i);
-}
-
-static __always_inline long
-atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
-{
- return atomic64_cmpxchg(v, old, new);
-}
-
-static __always_inline long
-atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
-{
- return atomic64_cmpxchg_acquire(v, old, new);
-}
-
-static __always_inline long
-atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
-{
- return atomic64_cmpxchg_release(v, old, new);
-}
-
-static __always_inline long
-atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
-{
- return atomic64_cmpxchg_relaxed(v, old, new);
-}
-
-static __always_inline bool
-atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
-{
- return atomic64_try_cmpxchg(v, (s64 *)old, new);
-}
-
-static __always_inline bool
-atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
-{
- return atomic64_try_cmpxchg_acquire(v, (s64 *)old, new);
-}
-
-static __always_inline bool
-atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
-{
- return atomic64_try_cmpxchg_release(v, (s64 *)old, new);
-}
-
-static __always_inline bool
-atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
-{
- return atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new);
-}
-
-static __always_inline bool
-atomic_long_sub_and_test(long i, atomic_long_t *v)
-{
- return atomic64_sub_and_test(i, v);
-}
-
-static __always_inline bool
-atomic_long_dec_and_test(atomic_long_t *v)
-{
- return atomic64_dec_and_test(v);
-}
-
-static __always_inline bool
-atomic_long_inc_and_test(atomic_long_t *v)
-{
- return atomic64_inc_and_test(v);
-}
-
-static __always_inline bool
-atomic_long_add_negative(long i, atomic_long_t *v)
-{
- return atomic64_add_negative(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
-{
- return atomic64_fetch_add_unless(v, a, u);
-}
-
-static __always_inline bool
-atomic_long_add_unless(atomic_long_t *v, long a, long u)
-{
- return atomic64_add_unless(v, a, u);
-}
-
-static __always_inline bool
-atomic_long_inc_not_zero(atomic_long_t *v)
-{
- return atomic64_inc_not_zero(v);
-}
-
-static __always_inline bool
-atomic_long_inc_unless_negative(atomic_long_t *v)
-{
- return atomic64_inc_unless_negative(v);
-}
-
-static __always_inline bool
-atomic_long_dec_unless_positive(atomic_long_t *v)
-{
- return atomic64_dec_unless_positive(v);
-}
-
-static __always_inline long
-atomic_long_dec_if_positive(atomic_long_t *v)
-{
- return atomic64_dec_if_positive(v);
-}
-
-#else /* CONFIG_64BIT */
-
-static __always_inline long
-atomic_long_read(const atomic_long_t *v)
-{
- return atomic_read(v);
-}
-
-static __always_inline long
-atomic_long_read_acquire(const atomic_long_t *v)
-{
- return atomic_read_acquire(v);
-}
-
-static __always_inline void
-atomic_long_set(atomic_long_t *v, long i)
-{
- atomic_set(v, i);
-}
-
-static __always_inline void
-atomic_long_set_release(atomic_long_t *v, long i)
-{
- atomic_set_release(v, i);
-}
-
-static __always_inline void
-atomic_long_add(long i, atomic_long_t *v)
-{
- atomic_add(i, v);
-}
-
-static __always_inline long
-atomic_long_add_return(long i, atomic_long_t *v)
-{
- return atomic_add_return(i, v);
-}
-
-static __always_inline long
-atomic_long_add_return_acquire(long i, atomic_long_t *v)
-{
- return atomic_add_return_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_add_return_release(long i, atomic_long_t *v)
-{
- return atomic_add_return_release(i, v);
-}
-
-static __always_inline long
-atomic_long_add_return_relaxed(long i, atomic_long_t *v)
-{
- return atomic_add_return_relaxed(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add(long i, atomic_long_t *v)
-{
- return atomic_fetch_add(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
-{
- return atomic_fetch_add_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add_release(long i, atomic_long_t *v)
-{
- return atomic_fetch_add_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
-{
- return atomic_fetch_add_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_sub(long i, atomic_long_t *v)
-{
- atomic_sub(i, v);
-}
-
-static __always_inline long
-atomic_long_sub_return(long i, atomic_long_t *v)
-{
- return atomic_sub_return(i, v);
-}
-
-static __always_inline long
-atomic_long_sub_return_acquire(long i, atomic_long_t *v)
-{
- return atomic_sub_return_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_sub_return_release(long i, atomic_long_t *v)
-{
- return atomic_sub_return_release(i, v);
-}
-
-static __always_inline long
-atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
-{
- return atomic_sub_return_relaxed(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_sub(long i, atomic_long_t *v)
-{
- return atomic_fetch_sub(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
-{
- return atomic_fetch_sub_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_sub_release(long i, atomic_long_t *v)
-{
- return atomic_fetch_sub_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
-{
- return atomic_fetch_sub_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_inc(atomic_long_t *v)
-{
- atomic_inc(v);
-}
-
-static __always_inline long
-atomic_long_inc_return(atomic_long_t *v)
-{
- return atomic_inc_return(v);
-}
-
-static __always_inline long
-atomic_long_inc_return_acquire(atomic_long_t *v)
-{
- return atomic_inc_return_acquire(v);
-}
-
-static __always_inline long
-atomic_long_inc_return_release(atomic_long_t *v)
-{
- return atomic_inc_return_release(v);
-}
-
-static __always_inline long
-atomic_long_inc_return_relaxed(atomic_long_t *v)
-{
- return atomic_inc_return_relaxed(v);
-}
-
-static __always_inline long
-atomic_long_fetch_inc(atomic_long_t *v)
-{
- return atomic_fetch_inc(v);
-}
-
-static __always_inline long
-atomic_long_fetch_inc_acquire(atomic_long_t *v)
-{
- return atomic_fetch_inc_acquire(v);
-}
-
-static __always_inline long
-atomic_long_fetch_inc_release(atomic_long_t *v)
-{
- return atomic_fetch_inc_release(v);
-}
-
-static __always_inline long
-atomic_long_fetch_inc_relaxed(atomic_long_t *v)
-{
- return atomic_fetch_inc_relaxed(v);
-}
-
-static __always_inline void
-atomic_long_dec(atomic_long_t *v)
-{
- atomic_dec(v);
-}
-
-static __always_inline long
-atomic_long_dec_return(atomic_long_t *v)
-{
- return atomic_dec_return(v);
-}
-
-static __always_inline long
-atomic_long_dec_return_acquire(atomic_long_t *v)
-{
- return atomic_dec_return_acquire(v);
-}
-
-static __always_inline long
-atomic_long_dec_return_release(atomic_long_t *v)
-{
- return atomic_dec_return_release(v);
-}
-
-static __always_inline long
-atomic_long_dec_return_relaxed(atomic_long_t *v)
-{
- return atomic_dec_return_relaxed(v);
-}
-
-static __always_inline long
-atomic_long_fetch_dec(atomic_long_t *v)
-{
- return atomic_fetch_dec(v);
-}
-
-static __always_inline long
-atomic_long_fetch_dec_acquire(atomic_long_t *v)
-{
- return atomic_fetch_dec_acquire(v);
-}
-
-static __always_inline long
-atomic_long_fetch_dec_release(atomic_long_t *v)
-{
- return atomic_fetch_dec_release(v);
-}
-
-static __always_inline long
-atomic_long_fetch_dec_relaxed(atomic_long_t *v)
-{
- return atomic_fetch_dec_relaxed(v);
-}
-
-static __always_inline void
-atomic_long_and(long i, atomic_long_t *v)
-{
- atomic_and(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_and(long i, atomic_long_t *v)
-{
- return atomic_fetch_and(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
-{
- return atomic_fetch_and_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_and_release(long i, atomic_long_t *v)
-{
- return atomic_fetch_and_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
-{
- return atomic_fetch_and_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_andnot(long i, atomic_long_t *v)
-{
- atomic_andnot(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_andnot(long i, atomic_long_t *v)
-{
- return atomic_fetch_andnot(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
-{
- return atomic_fetch_andnot_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
-{
- return atomic_fetch_andnot_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
-{
- return atomic_fetch_andnot_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_or(long i, atomic_long_t *v)
-{
- atomic_or(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_or(long i, atomic_long_t *v)
-{
- return atomic_fetch_or(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
-{
- return atomic_fetch_or_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_or_release(long i, atomic_long_t *v)
-{
- return atomic_fetch_or_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
-{
- return atomic_fetch_or_relaxed(i, v);
-}
-
-static __always_inline void
-atomic_long_xor(long i, atomic_long_t *v)
-{
- atomic_xor(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_xor(long i, atomic_long_t *v)
-{
- return atomic_fetch_xor(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
-{
- return atomic_fetch_xor_acquire(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_xor_release(long i, atomic_long_t *v)
-{
- return atomic_fetch_xor_release(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
-{
- return atomic_fetch_xor_relaxed(i, v);
-}
-
-static __always_inline long
-atomic_long_xchg(atomic_long_t *v, long i)
-{
- return atomic_xchg(v, i);
-}
-
-static __always_inline long
-atomic_long_xchg_acquire(atomic_long_t *v, long i)
-{
- return atomic_xchg_acquire(v, i);
-}
-
-static __always_inline long
-atomic_long_xchg_release(atomic_long_t *v, long i)
-{
- return atomic_xchg_release(v, i);
-}
-
-static __always_inline long
-atomic_long_xchg_relaxed(atomic_long_t *v, long i)
-{
- return atomic_xchg_relaxed(v, i);
-}
-
-static __always_inline long
-atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
-{
- return atomic_cmpxchg(v, old, new);
-}
-
-static __always_inline long
-atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
-{
- return atomic_cmpxchg_acquire(v, old, new);
-}
-
-static __always_inline long
-atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
-{
- return atomic_cmpxchg_release(v, old, new);
-}
-
-static __always_inline long
-atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
-{
- return atomic_cmpxchg_relaxed(v, old, new);
-}
-
-static __always_inline bool
-atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
-{
- return atomic_try_cmpxchg(v, (int *)old, new);
-}
-
-static __always_inline bool
-atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
-{
- return atomic_try_cmpxchg_acquire(v, (int *)old, new);
-}
-
-static __always_inline bool
-atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
-{
- return atomic_try_cmpxchg_release(v, (int *)old, new);
-}
-
-static __always_inline bool
-atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
-{
- return atomic_try_cmpxchg_relaxed(v, (int *)old, new);
-}
-
-static __always_inline bool
-atomic_long_sub_and_test(long i, atomic_long_t *v)
-{
- return atomic_sub_and_test(i, v);
-}
-
-static __always_inline bool
-atomic_long_dec_and_test(atomic_long_t *v)
-{
- return atomic_dec_and_test(v);
-}
-
-static __always_inline bool
-atomic_long_inc_and_test(atomic_long_t *v)
-{
- return atomic_inc_and_test(v);
-}
-
-static __always_inline bool
-atomic_long_add_negative(long i, atomic_long_t *v)
-{
- return atomic_add_negative(i, v);
-}
-
-static __always_inline long
-atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
-{
- return atomic_fetch_add_unless(v, a, u);
-}
-
-static __always_inline bool
-atomic_long_add_unless(atomic_long_t *v, long a, long u)
-{
- return atomic_add_unless(v, a, u);
-}
-
-static __always_inline bool
-atomic_long_inc_not_zero(atomic_long_t *v)
-{
- return atomic_inc_not_zero(v);
-}
-
-static __always_inline bool
-atomic_long_inc_unless_negative(atomic_long_t *v)
-{
- return atomic_inc_unless_negative(v);
-}
-
-static __always_inline bool
-atomic_long_dec_unless_positive(atomic_long_t *v)
-{
- return atomic_dec_unless_positive(v);
-}
-
-static __always_inline long
-atomic_long_dec_if_positive(atomic_long_t *v)
-{
- return atomic_dec_if_positive(v);
-}
-
-#endif /* CONFIG_64BIT */
-#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
-// a624200981f552b2c6be4f32fe44da8289f30d87
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 11f96f40f4a7..22142c71d35a 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * Generic C implementation of atomic counter operations. Usable on
- * UP systems only. Do not include in machine independent code.
+ * Generic C implementation of atomic counter operations. Do not include in
+ * machine independent code.
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
@@ -12,56 +12,39 @@
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
-/*
- * atomic_$op() - $op integer to atomic variable
- * @i: integer value to $op
- * @v: pointer to the atomic variable
- *
- * Atomically $ops @i to @v. Does not strictly guarantee a memory-barrier, use
- * smp_mb__{before,after}_atomic().
- */
-
-/*
- * atomic_$op_return() - $op interer to atomic variable and returns the result
- * @i: integer value to $op
- * @v: pointer to the atomic variable
- *
- * Atomically $ops @i to @v. Does imply a full memory barrier.
- */
-
#ifdef CONFIG_SMP
/* we can build all atomic primitives from cmpxchg */
#define ATOMIC_OP(op, c_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void generic_atomic_##op(int i, atomic_t *v) \
{ \
int c, old; \
\
c = v->counter; \
- while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
+ while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \
c = old; \
}
#define ATOMIC_OP_RETURN(op, c_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int generic_atomic_##op##_return(int i, atomic_t *v) \
{ \
int c, old; \
\
c = v->counter; \
- while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
+ while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \
c = old; \
\
return c c_op i; \
}
#define ATOMIC_FETCH_OP(op, c_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int generic_atomic_fetch_##op(int i, atomic_t *v) \
{ \
int c, old; \
\
c = v->counter; \
- while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \
+ while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \
c = old; \
\
return c; \
@@ -72,7 +55,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
#include <linux/irqflags.h>
#define ATOMIC_OP(op, c_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void generic_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
@@ -82,7 +65,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
}
#define ATOMIC_OP_RETURN(op, c_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int generic_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long flags; \
int ret; \
@@ -95,7 +78,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int generic_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
int ret; \
@@ -110,87 +93,41 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
#endif /* CONFIG_SMP */
-#ifndef atomic_add_return
ATOMIC_OP_RETURN(add, +)
-#endif
-
-#ifndef atomic_sub_return
ATOMIC_OP_RETURN(sub, -)
-#endif
-#ifndef atomic_fetch_add
ATOMIC_FETCH_OP(add, +)
-#endif
-
-#ifndef atomic_fetch_sub
ATOMIC_FETCH_OP(sub, -)
-#endif
-
-#ifndef atomic_fetch_and
ATOMIC_FETCH_OP(and, &)
-#endif
-
-#ifndef atomic_fetch_or
ATOMIC_FETCH_OP(or, |)
-#endif
-
-#ifndef atomic_fetch_xor
ATOMIC_FETCH_OP(xor, ^)
-#endif
-#ifndef atomic_and
+ATOMIC_OP(add, +)
+ATOMIC_OP(sub, -)
ATOMIC_OP(and, &)
-#endif
-
-#ifndef atomic_or
ATOMIC_OP(or, |)
-#endif
-
-#ifndef atomic_xor
ATOMIC_OP(xor, ^)
-#endif
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-/*
- * Atomic operations that C can't guarantee us. Useful for
- * resource counting etc..
- */
+#define arch_atomic_add_return generic_atomic_add_return
+#define arch_atomic_sub_return generic_atomic_sub_return
-/**
- * atomic_read - read atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically reads the value of @v.
- */
-#ifndef atomic_read
-#define atomic_read(v) READ_ONCE((v)->counter)
-#endif
-
-/**
- * atomic_set - set atomic variable
- * @v: pointer of type atomic_t
- * @i: required value
- *
- * Atomically sets the value of @v to @i.
- */
-#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
-
-#include <linux/irqflags.h>
+#define arch_atomic_fetch_add generic_atomic_fetch_add
+#define arch_atomic_fetch_sub generic_atomic_fetch_sub
+#define arch_atomic_fetch_and generic_atomic_fetch_and
+#define arch_atomic_fetch_or generic_atomic_fetch_or
+#define arch_atomic_fetch_xor generic_atomic_fetch_xor
-static inline void atomic_add(int i, atomic_t *v)
-{
- atomic_add_return(i, v);
-}
-
-static inline void atomic_sub(int i, atomic_t *v)
-{
- atomic_sub_return(i, v);
-}
+#define arch_atomic_add generic_atomic_add
+#define arch_atomic_sub generic_atomic_sub
+#define arch_atomic_and generic_atomic_and
+#define arch_atomic_or generic_atomic_or
+#define arch_atomic_xor generic_atomic_xor
-#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
-#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#endif /* __ASM_GENERIC_ATOMIC_H */
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
index 370f01d4450f..100d24b02e52 100644
--- a/include/asm-generic/atomic64.h
+++ b/include/asm-generic/atomic64.h
@@ -15,19 +15,17 @@ typedef struct {
#define ATOMIC64_INIT(i) { (i) }
-extern s64 atomic64_read(const atomic64_t *v);
-extern void atomic64_set(atomic64_t *v, s64 i);
-
-#define atomic64_set_release(v, i) atomic64_set((v), (i))
+extern s64 generic_atomic64_read(const atomic64_t *v);
+extern void generic_atomic64_set(atomic64_t *v, s64 i);
#define ATOMIC64_OP(op) \
-extern void atomic64_##op(s64 a, atomic64_t *v);
+extern void generic_atomic64_##op(s64 a, atomic64_t *v);
#define ATOMIC64_OP_RETURN(op) \
-extern s64 atomic64_##op##_return(s64 a, atomic64_t *v);
+extern s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v);
#define ATOMIC64_FETCH_OP(op) \
-extern s64 atomic64_fetch_##op(s64 a, atomic64_t *v);
+extern s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v);
#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op)
@@ -46,11 +44,32 @@ ATOMIC64_OPS(xor)
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
-extern s64 atomic64_dec_if_positive(atomic64_t *v);
-#define atomic64_dec_if_positive atomic64_dec_if_positive
-extern s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n);
-extern s64 atomic64_xchg(atomic64_t *v, s64 new);
-extern s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u);
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+extern s64 generic_atomic64_dec_if_positive(atomic64_t *v);
+extern s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n);
+extern s64 generic_atomic64_xchg(atomic64_t *v, s64 new);
+extern s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u);
+
+#define arch_atomic64_read generic_atomic64_read
+#define arch_atomic64_set generic_atomic64_set
+#define arch_atomic64_set_release generic_atomic64_set
+
+#define arch_atomic64_add generic_atomic64_add
+#define arch_atomic64_add_return generic_atomic64_add_return
+#define arch_atomic64_fetch_add generic_atomic64_fetch_add
+#define arch_atomic64_sub generic_atomic64_sub
+#define arch_atomic64_sub_return generic_atomic64_sub_return
+#define arch_atomic64_fetch_sub generic_atomic64_fetch_sub
+
+#define arch_atomic64_and generic_atomic64_and
+#define arch_atomic64_fetch_and generic_atomic64_fetch_and
+#define arch_atomic64_or generic_atomic64_or
+#define arch_atomic64_fetch_or generic_atomic64_fetch_or
+#define arch_atomic64_xor generic_atomic64_xor
+#define arch_atomic64_fetch_xor generic_atomic64_fetch_xor
+
+#define arch_atomic64_dec_if_positive generic_atomic64_dec_if_positive
+#define arch_atomic64_cmpxchg generic_atomic64_cmpxchg
+#define arch_atomic64_xchg generic_atomic64_xchg
+#define arch_atomic64_fetch_add_unless generic_atomic64_fetch_add_unless
#endif /* _ASM_GENERIC_ATOMIC64_H */
diff --git a/include/asm-generic/audit_change_attr.h b/include/asm-generic/audit_change_attr.h
index 331670807cf0..cc840537885f 100644
--- a/include/asm-generic/audit_change_attr.h
+++ b/include/asm-generic/audit_change_attr.h
@@ -11,9 +11,15 @@ __NR_lchown,
__NR_fchown,
#endif
__NR_setxattr,
+#ifdef __NR_setxattrat
+__NR_setxattrat,
+#endif
__NR_lsetxattr,
__NR_fsetxattr,
__NR_removexattr,
+#ifdef __NR_removexattrat
+__NR_removexattrat,
+#endif
__NR_lremovexattr,
__NR_fremovexattr,
#ifdef __NR_fchownat
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index 640f09479bdf..d4f581c1e21d 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -14,6 +14,7 @@
#ifndef __ASSEMBLY__
#include <linux/compiler.h>
+#include <linux/kcsan-checks.h>
#include <asm/rwonce.h>
#ifndef nop
@@ -21,6 +22,35 @@
#endif
/*
+ * Architectures that want generic instrumentation can define __ prefixed
+ * variants of all barriers.
+ */
+
+#ifdef __mb
+#define mb() do { kcsan_mb(); __mb(); } while (0)
+#endif
+
+#ifdef __rmb
+#define rmb() do { kcsan_rmb(); __rmb(); } while (0)
+#endif
+
+#ifdef __wmb
+#define wmb() do { kcsan_wmb(); __wmb(); } while (0)
+#endif
+
+#ifdef __dma_mb
+#define dma_mb() do { kcsan_mb(); __dma_mb(); } while (0)
+#endif
+
+#ifdef __dma_rmb
+#define dma_rmb() do { kcsan_rmb(); __dma_rmb(); } while (0)
+#endif
+
+#ifdef __dma_wmb
+#define dma_wmb() do { kcsan_wmb(); __dma_wmb(); } while (0)
+#endif
+
+/*
* Force strict CPU ordering. And yes, this is required on UP too when we're
* talking to devices.
*
@@ -39,6 +69,10 @@
#define wmb() mb()
#endif
+#ifndef dma_mb
+#define dma_mb() mb()
+#endif
+
#ifndef dma_rmb
#define dma_rmb() rmb()
#endif
@@ -62,15 +96,15 @@
#ifdef CONFIG_SMP
#ifndef smp_mb
-#define smp_mb() __smp_mb()
+#define smp_mb() do { kcsan_mb(); __smp_mb(); } while (0)
#endif
#ifndef smp_rmb
-#define smp_rmb() __smp_rmb()
+#define smp_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0)
#endif
#ifndef smp_wmb
-#define smp_wmb() __smp_wmb()
+#define smp_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0)
#endif
#else /* !CONFIG_SMP */
@@ -123,19 +157,19 @@ do { \
#ifdef CONFIG_SMP
#ifndef smp_store_mb
-#define smp_store_mb(var, value) __smp_store_mb(var, value)
+#define smp_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0)
#endif
#ifndef smp_mb__before_atomic
-#define smp_mb__before_atomic() __smp_mb__before_atomic()
+#define smp_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0)
#endif
#ifndef smp_mb__after_atomic
-#define smp_mb__after_atomic() __smp_mb__after_atomic()
+#define smp_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0)
#endif
#ifndef smp_store_release
-#define smp_store_release(p, v) __smp_store_release(p, v)
+#define smp_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0)
#endif
#ifndef smp_load_acquire
@@ -159,7 +193,6 @@ do { \
#ifndef smp_store_release
#define smp_store_release(p, v) \
do { \
- compiletime_assert_atomic_type(*p); \
barrier(); \
WRITE_ONCE(*p, v); \
} while (0)
@@ -169,7 +202,6 @@ do { \
#define smp_load_acquire(p) \
({ \
__unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \
- compiletime_assert_atomic_type(*p); \
barrier(); \
(typeof(*p))___p1; \
})
@@ -178,13 +210,13 @@ do { \
#endif /* CONFIG_SMP */
/* Barriers for virtual machine guests when talking to an SMP host */
-#define virt_mb() __smp_mb()
-#define virt_rmb() __smp_rmb()
-#define virt_wmb() __smp_wmb()
-#define virt_store_mb(var, value) __smp_store_mb(var, value)
-#define virt_mb__before_atomic() __smp_mb__before_atomic()
-#define virt_mb__after_atomic() __smp_mb__after_atomic()
-#define virt_store_release(p, v) __smp_store_release(p, v)
+#define virt_mb() do { kcsan_mb(); __smp_mb(); } while (0)
+#define virt_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0)
+#define virt_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0)
+#define virt_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0)
+#define virt_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0)
+#define virt_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0)
+#define virt_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0)
#define virt_load_acquire(p) __smp_load_acquire(p)
/**
@@ -251,5 +283,24 @@ do { \
#define pmem_wmb() wmb()
#endif
+/*
+ * ioremap_wc() maps I/O memory as memory with write-combining attributes. For
+ * this kind of memory accesses, the CPU may wait for prior accesses to be
+ * merged with subsequent ones. In some situation, such wait is bad for the
+ * performance. io_stop_wc() can be used to prevent the merging of
+ * write-combining memory accesses before this macro with those after it.
+ */
+#ifndef io_stop_wc
+#define io_stop_wc() do { } while (0)
+#endif
+
+/*
+ * Architectures that guarantee an implicit smp_mb() in switch_mm()
+ * can override smp_mb__after_switch_mm.
+ */
+#ifndef smp_mb__after_switch_mm
+# define smp_mb__after_switch_mm() smp_mb()
+#endif
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_GENERIC_BARRIER_H */
diff --git a/include/asm-generic/bitops.h b/include/asm-generic/bitops.h
index df9b5bc3d282..a47b8a71d6fe 100644
--- a/include/asm-generic/bitops.h
+++ b/include/asm-generic/bitops.h
@@ -20,7 +20,6 @@
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
-#include <asm-generic/bitops/find.h>
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
diff --git a/include/asm-generic/bitops/__ffs.h b/include/asm-generic/bitops/__ffs.h
index 39e56e1c7203..3a899c626fdc 100644
--- a/include/asm-generic/bitops/__ffs.h
+++ b/include/asm-generic/bitops/__ffs.h
@@ -5,14 +5,14 @@
#include <asm/types.h>
/**
- * __ffs - find first bit in word.
+ * generic___ffs - find first bit in word.
* @word: The word to search
*
* Undefined if no bit exists, so code should check against 0 first.
*/
-static __always_inline unsigned long __ffs(unsigned long word)
+static __always_inline __attribute_const__ unsigned int generic___ffs(unsigned long word)
{
- int num = 0;
+ unsigned int num = 0;
#if BITS_PER_LONG == 64
if ((word & 0xffffffff) == 0) {
@@ -41,4 +41,8 @@ static __always_inline unsigned long __ffs(unsigned long word)
return num;
}
+#ifndef __HAVE_ARCH___FFS
+#define __ffs(word) generic___ffs(word)
+#endif
+
#endif /* _ASM_GENERIC_BITOPS___FFS_H_ */
diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
index 03f721a8a2b1..35f33780ca6c 100644
--- a/include/asm-generic/bitops/__fls.h
+++ b/include/asm-generic/bitops/__fls.h
@@ -5,14 +5,14 @@
#include <asm/types.h>
/**
- * __fls - find last (most-significant) set bit in a long word
+ * generic___fls - find last (most-significant) set bit in a long word
* @word: the word to search
*
* Undefined if no set bit exists, so code should check against 0 first.
*/
-static __always_inline unsigned long __fls(unsigned long word)
+static __always_inline __attribute_const__ unsigned int generic___fls(unsigned long word)
{
- int num = BITS_PER_LONG - 1;
+ unsigned int num = BITS_PER_LONG - 1;
#if BITS_PER_LONG == 64
if (!(word & (~0ul << 32))) {
@@ -41,4 +41,8 @@ static __always_inline unsigned long __fls(unsigned long word)
return num;
}
+#ifndef __HAVE_ARCH___FLS
+#define __fls(word) generic___fls(word)
+#endif
+
#endif /* _ASM_GENERIC_BITOPS___FLS_H_ */
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index 0e7316a86240..e076e079f6b2 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -11,58 +11,60 @@
* See Documentation/atomic_bitops.txt for details.
*/
-static __always_inline void set_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void
+arch_set_bit(unsigned int nr, volatile unsigned long *p)
{
p += BIT_WORD(nr);
- atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
+ raw_atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
}
-static __always_inline void clear_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void
+arch_clear_bit(unsigned int nr, volatile unsigned long *p)
{
p += BIT_WORD(nr);
- atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
+ raw_atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
}
-static __always_inline void change_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void
+arch_change_bit(unsigned int nr, volatile unsigned long *p)
{
p += BIT_WORD(nr);
- atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
+ raw_atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
}
-static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline int
+arch_test_and_set_bit(unsigned int nr, volatile unsigned long *p)
{
long old;
unsigned long mask = BIT_MASK(nr);
p += BIT_WORD(nr);
- if (READ_ONCE(*p) & mask)
- return 1;
-
- old = atomic_long_fetch_or(mask, (atomic_long_t *)p);
+ old = raw_atomic_long_fetch_or(mask, (atomic_long_t *)p);
return !!(old & mask);
}
-static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline int
+arch_test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
{
long old;
unsigned long mask = BIT_MASK(nr);
p += BIT_WORD(nr);
- if (!(READ_ONCE(*p) & mask))
- return 0;
-
- old = atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
+ old = raw_atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
return !!(old & mask);
}
-static inline int test_and_change_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline int
+arch_test_and_change_bit(unsigned int nr, volatile unsigned long *p)
{
long old;
unsigned long mask = BIT_MASK(nr);
p += BIT_WORD(nr);
- old = atomic_long_fetch_xor(mask, (atomic_long_t *)p);
+ old = raw_atomic_long_fetch_xor(mask, (atomic_long_t *)p);
return !!(old & mask);
}
+#include <asm-generic/bitops/instrumented-atomic.h>
+
#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */
diff --git a/include/asm-generic/bitops/builtin-__ffs.h b/include/asm-generic/bitops/builtin-__ffs.h
index 87024da44d10..d3c3f567045d 100644
--- a/include/asm-generic/bitops/builtin-__ffs.h
+++ b/include/asm-generic/bitops/builtin-__ffs.h
@@ -8,7 +8,7 @@
*
* Undefined if no bit exists, so code should check against 0 first.
*/
-static __always_inline unsigned long __ffs(unsigned long word)
+static __always_inline __attribute_const__ unsigned int __ffs(unsigned long word)
{
return __builtin_ctzl(word);
}
diff --git a/include/asm-generic/bitops/builtin-__fls.h b/include/asm-generic/bitops/builtin-__fls.h
index 43a5aa9afbdb..7770c4f1bfcd 100644
--- a/include/asm-generic/bitops/builtin-__fls.h
+++ b/include/asm-generic/bitops/builtin-__fls.h
@@ -8,7 +8,7 @@
*
* Undefined if no set bit exists, so code should check against 0 first.
*/
-static __always_inline unsigned long __fls(unsigned long word)
+static __always_inline __attribute_const__ unsigned int __fls(unsigned long word)
{
return (sizeof(word) * 8) - 1 - __builtin_clzl(word);
}
diff --git a/include/asm-generic/bitops/builtin-ffs.h b/include/asm-generic/bitops/builtin-ffs.h
index 1dacfdb4247e..7b129329046b 100644
--- a/include/asm-generic/bitops/builtin-ffs.h
+++ b/include/asm-generic/bitops/builtin-ffs.h
@@ -8,7 +8,7 @@
*
* This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
+ * differs in spirit from ffz (man ffs).
*/
#define ffs(x) __builtin_ffs(x)
diff --git a/include/asm-generic/bitops/builtin-fls.h b/include/asm-generic/bitops/builtin-fls.h
index c8455cc28841..be707da8c7cd 100644
--- a/include/asm-generic/bitops/builtin-fls.h
+++ b/include/asm-generic/bitops/builtin-fls.h
@@ -9,7 +9,7 @@
* This is defined the same way as ffs.
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
-static __always_inline int fls(unsigned int x)
+static __always_inline __attribute_const__ int fls(unsigned int x)
{
return x ? sizeof(x) * 8 - __builtin_clz(x) : 0;
}
diff --git a/include/asm-generic/bitops/ffs.h b/include/asm-generic/bitops/ffs.h
index e81868b2c0f0..5ff2b7fbda6d 100644
--- a/include/asm-generic/bitops/ffs.h
+++ b/include/asm-generic/bitops/ffs.h
@@ -3,14 +3,14 @@
#define _ASM_GENERIC_BITOPS_FFS_H_
/**
- * ffs - find first bit set
+ * generic_ffs - find first bit set
* @x: the word to search
*
* This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
+ * differs in spirit from ffz (man ffs).
*/
-static inline int ffs(int x)
+static inline __attribute_const__ int generic_ffs(int x)
{
int r = 1;
@@ -39,4 +39,8 @@ static inline int ffs(int x)
return r;
}
+#ifndef __HAVE_ARCH_FFS
+#define ffs(x) generic_ffs(x)
+#endif
+
#endif /* _ASM_GENERIC_BITOPS_FFS_H_ */
diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h
deleted file mode 100644
index 0d132ee2a291..000000000000
--- a/include/asm-generic/bitops/find.h
+++ /dev/null
@@ -1,188 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_GENERIC_BITOPS_FIND_H_
-#define _ASM_GENERIC_BITOPS_FIND_H_
-
-extern unsigned long _find_next_bit(const unsigned long *addr1,
- const unsigned long *addr2, unsigned long nbits,
- unsigned long start, unsigned long invert, unsigned long le);
-extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long size);
-extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size);
-extern unsigned long _find_last_bit(const unsigned long *addr, unsigned long size);
-
-#ifndef find_next_bit
-/**
- * find_next_bit - find the next set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The bitmap size in bits
- *
- * Returns the bit number for the next set bit
- * If no bits are set, returns @size.
- */
-static inline
-unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
- unsigned long offset)
-{
- if (small_const_nbits(size)) {
- unsigned long val;
-
- if (unlikely(offset >= size))
- return size;
-
- val = *addr & GENMASK(size - 1, offset);
- return val ? __ffs(val) : size;
- }
-
- return _find_next_bit(addr, NULL, size, offset, 0UL, 0);
-}
-#endif
-
-#ifndef find_next_and_bit
-/**
- * find_next_and_bit - find the next set bit in both memory regions
- * @addr1: The first address to base the search on
- * @addr2: The second address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The bitmap size in bits
- *
- * Returns the bit number for the next set bit
- * If no bits are set, returns @size.
- */
-static inline
-unsigned long find_next_and_bit(const unsigned long *addr1,
- const unsigned long *addr2, unsigned long size,
- unsigned long offset)
-{
- if (small_const_nbits(size)) {
- unsigned long val;
-
- if (unlikely(offset >= size))
- return size;
-
- val = *addr1 & *addr2 & GENMASK(size - 1, offset);
- return val ? __ffs(val) : size;
- }
-
- return _find_next_bit(addr1, addr2, size, offset, 0UL, 0);
-}
-#endif
-
-#ifndef find_next_zero_bit
-/**
- * find_next_zero_bit - find the next cleared bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The bitmap size in bits
- *
- * Returns the bit number of the next zero bit
- * If no bits are zero, returns @size.
- */
-static inline
-unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
- unsigned long offset)
-{
- if (small_const_nbits(size)) {
- unsigned long val;
-
- if (unlikely(offset >= size))
- return size;
-
- val = *addr | ~GENMASK(size - 1, offset);
- return val == ~0UL ? size : ffz(val);
- }
-
- return _find_next_bit(addr, NULL, size, offset, ~0UL, 0);
-}
-#endif
-
-#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
-
-/**
- * find_first_bit - find the first set bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum number of bits to search
- *
- * Returns the bit number of the first set bit.
- * If no bits are set, returns @size.
- */
-static inline
-unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
-{
- if (small_const_nbits(size)) {
- unsigned long val = *addr & GENMASK(size - 1, 0);
-
- return val ? __ffs(val) : size;
- }
-
- return _find_first_bit(addr, size);
-}
-
-/**
- * find_first_zero_bit - find the first cleared bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum number of bits to search
- *
- * Returns the bit number of the first cleared bit.
- * If no bits are zero, returns @size.
- */
-static inline
-unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
-{
- if (small_const_nbits(size)) {
- unsigned long val = *addr | ~GENMASK(size - 1, 0);
-
- return val == ~0UL ? size : ffz(val);
- }
-
- return _find_first_zero_bit(addr, size);
-}
-#else /* CONFIG_GENERIC_FIND_FIRST_BIT */
-
-#ifndef find_first_bit
-#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
-#endif
-#ifndef find_first_zero_bit
-#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
-#endif
-
-#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
-
-#ifndef find_last_bit
-/**
- * find_last_bit - find the last set bit in a memory region
- * @addr: The address to start the search at
- * @size: The number of bits to search
- *
- * Returns the bit number of the last set bit, or size.
- */
-static inline
-unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
-{
- if (small_const_nbits(size)) {
- unsigned long val = *addr & GENMASK(size - 1, 0);
-
- return val ? __fls(val) : size;
- }
-
- return _find_last_bit(addr, size);
-}
-#endif
-
-/**
- * find_next_clump8 - find next 8-bit clump with set bits in a memory region
- * @clump: location to store copy of found clump
- * @addr: address to base the search on
- * @size: bitmap size in number of bits
- * @offset: bit offset at which to start searching
- *
- * Returns the bit offset for the next set clump; the found clump value is
- * copied to the location pointed by @clump. If no bits are set, returns @size.
- */
-extern unsigned long find_next_clump8(unsigned long *clump,
- const unsigned long *addr,
- unsigned long size, unsigned long offset);
-
-#define find_first_clump8(clump, bits, size) \
- find_next_clump8((clump), (bits), (size), 0)
-
-#endif /*_ASM_GENERIC_BITOPS_FIND_H_ */
diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
index b168bb10e1be..8eed3437edb9 100644
--- a/include/asm-generic/bitops/fls.h
+++ b/include/asm-generic/bitops/fls.h
@@ -3,14 +3,14 @@
#define _ASM_GENERIC_BITOPS_FLS_H_
/**
- * fls - find last (most-significant) bit set
+ * generic_fls - find last (most-significant) bit set
* @x: the word to search
*
* This is defined the same way as ffs.
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
-static __always_inline int fls(unsigned int x)
+static __always_inline __attribute_const__ int generic_fls(unsigned int x)
{
int r = 32;
@@ -39,4 +39,8 @@ static __always_inline int fls(unsigned int x)
return r;
}
+#ifndef __HAVE_ARCH_FLS
+#define fls(x) generic_fls(x)
+#endif
+
#endif /* _ASM_GENERIC_BITOPS_FLS_H_ */
diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
index 866f2b2304ff..b5f58dd261a3 100644
--- a/include/asm-generic/bitops/fls64.h
+++ b/include/asm-generic/bitops/fls64.h
@@ -16,7 +16,7 @@
* at position 64.
*/
#if BITS_PER_LONG == 32
-static __always_inline int fls64(__u64 x)
+static __always_inline __attribute_const__ int fls64(__u64 x)
{
__u32 h = x >> 32;
if (h)
@@ -24,7 +24,7 @@ static __always_inline int fls64(__u64 x)
return fls(x);
}
#elif BITS_PER_LONG == 64
-static __always_inline int fls64(__u64 x)
+static __always_inline __attribute_const__ int fls64(__u64 x)
{
if (x == 0)
return 0;
diff --git a/include/asm-generic/bitops/generic-non-atomic.h b/include/asm-generic/bitops/generic-non-atomic.h
new file mode 100644
index 000000000000..564a8c675d85
--- /dev/null
+++ b/include/asm-generic/bitops/generic-non-atomic.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H
+#define __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H
+
+#include <linux/bits.h>
+#include <asm/barrier.h>
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+/*
+ * Generic definitions for bit operations, should not be used in regular code
+ * directly.
+ */
+
+/**
+ * generic___set_bit - Set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Unlike set_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static __always_inline void
+generic___set_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+
+ *p |= mask;
+}
+
+static __always_inline void
+generic___clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+
+ *p &= ~mask;
+}
+
+/**
+ * generic___change_bit - Toggle a bit in memory
+ * @nr: the bit to change
+ * @addr: the address to start counting from
+ *
+ * Unlike change_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static __always_inline void
+generic___change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+
+ *p ^= mask;
+}
+
+/**
+ * generic___test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ */
+static __always_inline bool
+generic___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old | mask;
+ return (old & mask) != 0;
+}
+
+/**
+ * generic___test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ */
+static __always_inline bool
+generic___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old & ~mask;
+ return (old & mask) != 0;
+}
+
+/* WARNING: non atomic and it can be reordered! */
+static __always_inline bool
+generic___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old ^ mask;
+ return (old & mask) != 0;
+}
+
+/**
+ * generic_test_bit - Determine whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static __always_inline bool
+generic_test_bit(unsigned long nr, const volatile unsigned long *addr)
+{
+ /*
+ * Unlike the bitops with the '__' prefix above, this one *is* atomic,
+ * so `volatile` must always stay here with no cast-aways. See
+ * `Documentation/atomic_bitops.txt` for the details.
+ */
+ return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
+}
+
+/**
+ * generic_test_bit_acquire - Determine, with acquire semantics, whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static __always_inline bool
+generic_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
+{
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1)));
+}
+
+/*
+ * const_*() definitions provide good compile-time optimizations when
+ * the passed arguments can be resolved at compile time.
+ */
+#define const___set_bit generic___set_bit
+#define const___clear_bit generic___clear_bit
+#define const___change_bit generic___change_bit
+#define const___test_and_set_bit generic___test_and_set_bit
+#define const___test_and_clear_bit generic___test_and_clear_bit
+#define const___test_and_change_bit generic___test_and_change_bit
+#define const_test_bit_acquire generic_test_bit_acquire
+
+/**
+ * const_test_bit - Determine whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ *
+ * A version of generic_test_bit() which discards the `volatile` qualifier to
+ * allow a compiler to optimize code harder. Non-atomic and to be called only
+ * for testing compile-time constants, e.g. by the corresponding macros, not
+ * directly from "regular" code.
+ */
+static __always_inline bool
+const_test_bit(unsigned long nr, const volatile unsigned long *addr)
+{
+ const unsigned long *p = (const unsigned long *)addr + BIT_WORD(nr);
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long val = *p;
+
+ return !!(val & mask);
+}
+
+#endif /* __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H */
diff --git a/include/asm-generic/bitops/instrumented-atomic.h b/include/asm-generic/bitops/instrumented-atomic.h
index 81915dcd4b4e..4225a8ca9c1a 100644
--- a/include/asm-generic/bitops/instrumented-atomic.h
+++ b/include/asm-generic/bitops/instrumented-atomic.h
@@ -23,7 +23,7 @@
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void set_bit(long nr, volatile unsigned long *addr)
+static __always_inline void set_bit(long nr, volatile unsigned long *addr)
{
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_set_bit(nr, addr);
@@ -36,7 +36,7 @@ static inline void set_bit(long nr, volatile unsigned long *addr)
*
* This is a relaxed atomic operation (no implied memory barriers).
*/
-static inline void clear_bit(long nr, volatile unsigned long *addr)
+static __always_inline void clear_bit(long nr, volatile unsigned long *addr)
{
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_clear_bit(nr, addr);
@@ -52,7 +52,7 @@ static inline void clear_bit(long nr, volatile unsigned long *addr)
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void change_bit(long nr, volatile unsigned long *addr)
+static __always_inline void change_bit(long nr, volatile unsigned long *addr)
{
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_change_bit(nr, addr);
@@ -65,8 +65,9 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
*
* This is an atomic fully-ordered operation (implied full memory barrier).
*/
-static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
{
+ kcsan_mb();
instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_set_bit(nr, addr);
}
@@ -78,8 +79,9 @@ static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
*
* This is an atomic fully-ordered operation (implied full memory barrier).
*/
-static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
{
+ kcsan_mb();
instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_clear_bit(nr, addr);
}
@@ -91,8 +93,9 @@ static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
*
* This is an atomic fully-ordered operation (implied full memory barrier).
*/
-static inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
{
+ kcsan_mb();
instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
return arch_test_and_change_bit(nr, addr);
}
diff --git a/include/asm-generic/bitops/instrumented-lock.h b/include/asm-generic/bitops/instrumented-lock.h
index 75ef606f7145..542d3727ee4e 100644
--- a/include/asm-generic/bitops/instrumented-lock.h
+++ b/include/asm-generic/bitops/instrumented-lock.h
@@ -22,6 +22,7 @@
*/
static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
{
+ kcsan_release();
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
arch_clear_bit_unlock(nr, addr);
}
@@ -37,6 +38,7 @@ static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
*/
static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
{
+ kcsan_release();
instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___clear_bit_unlock(nr, addr);
}
@@ -56,26 +58,25 @@ static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
return arch_test_and_set_bit_lock(nr, addr);
}
-#if defined(arch_clear_bit_unlock_is_negative_byte)
/**
- * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
- * byte is negative, for unlock.
- * @nr: the bit to clear
- * @addr: the address to start counting from
+ * xor_unlock_is_negative_byte - XOR a single byte in memory and test if
+ * it is negative, for unlock.
+ * @mask: Change the bits which are set in this mask.
+ * @addr: The address of the word containing the byte to change.
*
+ * Changes some of bits 0-6 in the word pointed to by @addr.
* This operation is atomic and provides release barrier semantics.
+ * Used to optimise some folio operations which are commonly paired
+ * with an unlock or end of writeback. Bit 7 is used as PG_waiters to
+ * indicate whether anybody is waiting for the unlock.
*
- * This is a bit of a one-trick-pony for the filemap code, which clears
- * PG_locked and tests PG_waiters,
+ * Return: Whether the top bit of the byte is set.
*/
-static inline bool
-clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
+static inline bool xor_unlock_is_negative_byte(unsigned long mask,
+ volatile unsigned long *addr)
{
- instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
- return arch_clear_bit_unlock_is_negative_byte(nr, addr);
+ kcsan_release();
+ instrument_atomic_write(addr, sizeof(long));
+ return arch_xor_unlock_is_negative_byte(mask, addr);
}
-/* Let everybody know we have it. */
-#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
-#endif
-
#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H */
diff --git a/include/asm-generic/bitops/instrumented-non-atomic.h b/include/asm-generic/bitops/instrumented-non-atomic.h
index 37363d570b9b..2b238b161a62 100644
--- a/include/asm-generic/bitops/instrumented-non-atomic.h
+++ b/include/asm-generic/bitops/instrumented-non-atomic.h
@@ -14,7 +14,7 @@
#include <linux/instrumented.h>
/**
- * __set_bit - Set a bit in memory
+ * ___set_bit - Set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
@@ -22,14 +22,15 @@
* region of memory concurrently, the effect may be that only one operation
* succeeds.
*/
-static inline void __set_bit(long nr, volatile unsigned long *addr)
+static __always_inline void
+___set_bit(unsigned long nr, volatile unsigned long *addr)
{
instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___set_bit(nr, addr);
}
/**
- * __clear_bit - Clears a bit in memory
+ * ___clear_bit - Clears a bit in memory
* @nr: the bit to clear
* @addr: the address to start counting from
*
@@ -37,14 +38,15 @@ static inline void __set_bit(long nr, volatile unsigned long *addr)
* region of memory concurrently, the effect may be that only one operation
* succeeds.
*/
-static inline void __clear_bit(long nr, volatile unsigned long *addr)
+static __always_inline void
+___clear_bit(unsigned long nr, volatile unsigned long *addr)
{
instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___clear_bit(nr, addr);
}
/**
- * __change_bit - Toggle a bit in memory
+ * ___change_bit - Toggle a bit in memory
* @nr: the bit to change
* @addr: the address to start counting from
*
@@ -52,13 +54,14 @@ static inline void __clear_bit(long nr, volatile unsigned long *addr)
* region of memory concurrently, the effect may be that only one operation
* succeeds.
*/
-static inline void __change_bit(long nr, volatile unsigned long *addr)
+static __always_inline void
+___change_bit(unsigned long nr, volatile unsigned long *addr)
{
instrument_write(addr + BIT_WORD(nr), sizeof(long));
arch___change_bit(nr, addr);
}
-static inline void __instrument_read_write_bitop(long nr, volatile unsigned long *addr)
+static __always_inline void __instrument_read_write_bitop(long nr, volatile unsigned long *addr)
{
if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC)) {
/*
@@ -83,56 +86,72 @@ static inline void __instrument_read_write_bitop(long nr, volatile unsigned long
}
/**
- * __test_and_set_bit - Set a bit and return its old value
+ * ___test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is non-atomic. If two instances of this operation race, one
* can appear to succeed but actually fail.
*/
-static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool
+___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
__instrument_read_write_bitop(nr, addr);
return arch___test_and_set_bit(nr, addr);
}
/**
- * __test_and_clear_bit - Clear a bit and return its old value
+ * ___test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation is non-atomic. If two instances of this operation race, one
* can appear to succeed but actually fail.
*/
-static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool
+___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
__instrument_read_write_bitop(nr, addr);
return arch___test_and_clear_bit(nr, addr);
}
/**
- * __test_and_change_bit - Change a bit and return its old value
+ * ___test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change
* @addr: Address to count from
*
* This operation is non-atomic. If two instances of this operation race, one
* can appear to succeed but actually fail.
*/
-static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool
+___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
__instrument_read_write_bitop(nr, addr);
return arch___test_and_change_bit(nr, addr);
}
/**
- * test_bit - Determine whether a bit is set
+ * _test_bit - Determine whether a bit is set
* @nr: bit number to test
* @addr: Address to start counting from
*/
-static inline bool test_bit(long nr, const volatile unsigned long *addr)
+static __always_inline bool
+_test_bit(unsigned long nr, const volatile unsigned long *addr)
{
instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long));
return arch_test_bit(nr, addr);
}
+/**
+ * _test_bit_acquire - Determine, with acquire semantics, whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static __always_inline bool
+_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
+{
+ instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long));
+ return arch_test_bit_acquire(nr, addr);
+}
+
#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h
index 5a28629cbf4d..d51beff60375 100644
--- a/include/asm-generic/bitops/le.h
+++ b/include/asm-generic/bitops/le.h
@@ -2,83 +2,19 @@
#ifndef _ASM_GENERIC_BITOPS_LE_H_
#define _ASM_GENERIC_BITOPS_LE_H_
-#include <asm-generic/bitops/find.h>
#include <asm/types.h>
#include <asm/byteorder.h>
-#include <linux/swab.h>
#if defined(__LITTLE_ENDIAN)
#define BITOP_LE_SWIZZLE 0
-static inline unsigned long find_next_zero_bit_le(const void *addr,
- unsigned long size, unsigned long offset)
-{
- return find_next_zero_bit(addr, size, offset);
-}
-
-static inline unsigned long find_next_bit_le(const void *addr,
- unsigned long size, unsigned long offset)
-{
- return find_next_bit(addr, size, offset);
-}
-
-static inline unsigned long find_first_zero_bit_le(const void *addr,
- unsigned long size)
-{
- return find_first_zero_bit(addr, size);
-}
-
#elif defined(__BIG_ENDIAN)
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
-#ifndef find_next_zero_bit_le
-static inline
-unsigned long find_next_zero_bit_le(const void *addr, unsigned
- long size, unsigned long offset)
-{
- if (small_const_nbits(size)) {
- unsigned long val = *(const unsigned long *)addr;
-
- if (unlikely(offset >= size))
- return size;
-
- val = swab(val) | ~GENMASK(size - 1, offset);
- return val == ~0UL ? size : ffz(val);
- }
-
- return _find_next_bit(addr, NULL, size, offset, ~0UL, 1);
-}
-#endif
-
-#ifndef find_next_bit_le
-static inline
-unsigned long find_next_bit_le(const void *addr, unsigned
- long size, unsigned long offset)
-{
- if (small_const_nbits(size)) {
- unsigned long val = *(const unsigned long *)addr;
-
- if (unlikely(offset >= size))
- return size;
-
- val = swab(val) & GENMASK(size - 1, offset);
- return val ? __ffs(val) : size;
- }
-
- return _find_next_bit(addr, NULL, size, offset, 0UL, 1);
-}
#endif
-#ifndef find_first_zero_bit_le
-#define find_first_zero_bit_le(addr, size) \
- find_next_zero_bit_le((addr), (size), 0)
-#endif
-
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
static inline int test_bit_le(int nr, const void *addr)
{
diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h
index 3ae021368f48..14d4ec8c5152 100644
--- a/include/asm-generic/bitops/lock.h
+++ b/include/asm-generic/bitops/lock.h
@@ -7,7 +7,7 @@
#include <asm/barrier.h>
/**
- * test_and_set_bit_lock - Set a bit and return its old value, for lock
+ * arch_test_and_set_bit_lock - Set a bit and return its old value, for lock
* @nr: Bit to set
* @addr: Address to count from
*
@@ -15,8 +15,8 @@
* the returned value is 0.
* It can be used to implement bit locks.
*/
-static inline int test_and_set_bit_lock(unsigned int nr,
- volatile unsigned long *p)
+static __always_inline int
+arch_test_and_set_bit_lock(unsigned int nr, volatile unsigned long *p)
{
long old;
unsigned long mask = BIT_MASK(nr);
@@ -25,26 +25,27 @@ static inline int test_and_set_bit_lock(unsigned int nr,
if (READ_ONCE(*p) & mask)
return 1;
- old = atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
+ old = raw_atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
return !!(old & mask);
}
/**
- * clear_bit_unlock - Clear a bit in memory, for unlock
+ * arch_clear_bit_unlock - Clear a bit in memory, for unlock
* @nr: the bit to set
* @addr: the address to start counting from
*
* This operation is atomic and provides release barrier semantics.
*/
-static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
+static __always_inline void
+arch_clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
{
p += BIT_WORD(nr);
- atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
+ raw_atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
}
/**
- * __clear_bit_unlock - Clear a bit in memory, for unlock
+ * arch___clear_bit_unlock - Clear a bit in memory, for unlock
* @nr: the bit to set
* @addr: the address to start counting from
*
@@ -54,38 +55,28 @@ static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
*
* See for example x86's implementation.
*/
-static inline void __clear_bit_unlock(unsigned int nr,
- volatile unsigned long *p)
+static inline void
+arch___clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
{
unsigned long old;
p += BIT_WORD(nr);
old = READ_ONCE(*p);
old &= ~BIT_MASK(nr);
- atomic_long_set_release((atomic_long_t *)p, old);
+ raw_atomic_long_set_release((atomic_long_t *)p, old);
}
-/**
- * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
- * byte is negative, for unlock.
- * @nr: the bit to clear
- * @addr: the address to start counting from
- *
- * This is a bit of a one-trick-pony for the filemap code, which clears
- * PG_locked and tests PG_waiters,
- */
-#ifndef clear_bit_unlock_is_negative_byte
-static inline bool clear_bit_unlock_is_negative_byte(unsigned int nr,
- volatile unsigned long *p)
+#ifndef arch_xor_unlock_is_negative_byte
+static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
+ volatile unsigned long *p)
{
long old;
- unsigned long mask = BIT_MASK(nr);
- p += BIT_WORD(nr);
- old = atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p);
+ old = raw_atomic_long_fetch_xor_release(mask, (atomic_long_t *)p);
return !!(old & BIT(7));
}
-#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
#endif
+#include <asm-generic/bitops/instrumented-lock.h>
+
#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
diff --git a/include/asm-generic/bitops/non-atomic.h b/include/asm-generic/bitops/non-atomic.h
index 7e10c4b50c5d..71f8d54a5195 100644
--- a/include/asm-generic/bitops/non-atomic.h
+++ b/include/asm-generic/bitops/non-atomic.h
@@ -2,108 +2,19 @@
#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
-#include <asm/types.h>
+#include <asm-generic/bitops/generic-non-atomic.h>
-/**
- * __set_bit - Set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * Unlike set_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
-static inline void __set_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+#define arch___set_bit generic___set_bit
+#define arch___clear_bit generic___clear_bit
+#define arch___change_bit generic___change_bit
- *p |= mask;
-}
+#define arch___test_and_set_bit generic___test_and_set_bit
+#define arch___test_and_clear_bit generic___test_and_clear_bit
+#define arch___test_and_change_bit generic___test_and_change_bit
-static inline void __clear_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+#define arch_test_bit generic_test_bit
+#define arch_test_bit_acquire generic_test_bit_acquire
- *p &= ~mask;
-}
-
-/**
- * __change_bit - Toggle a bit in memory
- * @nr: the bit to change
- * @addr: the address to start counting from
- *
- * Unlike change_bit(), this function is non-atomic and may be reordered.
- * If it's called on the same region of memory simultaneously, the effect
- * may be that only one operation succeeds.
- */
-static inline void __change_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
-
- *p ^= mask;
-}
-
-/**
- * __test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.
- * If two examples of this operation race, one can appear to succeed
- * but actually fail. You must protect multiple accesses with a lock.
- */
-static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long old = *p;
-
- *p = old | mask;
- return (old & mask) != 0;
-}
-
-/**
- * __test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to clear
- * @addr: Address to count from
- *
- * This operation is non-atomic and can be reordered.
- * If two examples of this operation race, one can appear to succeed
- * but actually fail. You must protect multiple accesses with a lock.
- */
-static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long old = *p;
-
- *p = old & ~mask;
- return (old & mask) != 0;
-}
-
-/* WARNING: non atomic and it can be reordered! */
-static inline int __test_and_change_bit(int nr,
- volatile unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long old = *p;
-
- *p = old ^ mask;
- return (old & mask) != 0;
-}
-
-/**
- * test_bit - Determine whether a bit is set
- * @nr: bit number to test
- * @addr: Address to start counting from
- */
-static inline int test_bit(int nr, const volatile unsigned long *addr)
-{
- return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
-}
+#include <asm-generic/bitops/non-instrumented-non-atomic.h>
#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */
diff --git a/include/asm-generic/bitops/non-instrumented-non-atomic.h b/include/asm-generic/bitops/non-instrumented-non-atomic.h
new file mode 100644
index 000000000000..0ddc78dfc358
--- /dev/null
+++ b/include/asm-generic/bitops/non-instrumented-non-atomic.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H
+#define __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H
+
+#define ___set_bit arch___set_bit
+#define ___clear_bit arch___clear_bit
+#define ___change_bit arch___change_bit
+
+#define ___test_and_set_bit arch___test_and_set_bit
+#define ___test_and_clear_bit arch___test_and_clear_bit
+#define ___test_and_change_bit arch___test_and_change_bit
+
+#define _test_bit arch_test_bit
+#define _test_bit_acquire arch_test_bit_acquire
+
+#endif /* __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H */
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index b402494883b6..09e8eccee8ed 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -4,6 +4,7 @@
#include <linux/compiler.h>
#include <linux/instrumentation.h>
+#include <linux/once_lite.h>
#define CUT_HERE "------------[ cut here ]------------\n"
@@ -12,28 +13,45 @@
#define BUGFLAG_ONCE (1 << 1)
#define BUGFLAG_DONE (1 << 2)
#define BUGFLAG_NO_CUT_HERE (1 << 3) /* CUT_HERE already sent */
+#define BUGFLAG_ARGS (1 << 4)
#define BUGFLAG_TAINT(taint) ((taint) << 8)
#define BUG_GET_TAINT(bug) ((bug)->flags >> 8)
#endif
+#ifndef WARN_CONDITION_STR
+#ifdef CONFIG_DEBUG_BUGVERBOSE_DETAILED
+# define WARN_CONDITION_STR(cond_str) "[" cond_str "] "
+#else
+# define WARN_CONDITION_STR(cond_str)
+#endif
+#endif /* WARN_CONDITION_STR */
+
#ifndef __ASSEMBLY__
-#include <linux/kernel.h>
+#include <linux/panic.h>
+#include <linux/printk.h>
+
+struct warn_args;
+struct pt_regs;
+
+void __warn(const char *file, int line, void *caller, unsigned taint,
+ struct pt_regs *regs, struct warn_args *args);
#ifdef CONFIG_BUG
-#ifdef CONFIG_GENERIC_BUG
-struct bug_entry {
#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
- unsigned long bug_addr;
+#define BUG_REL(type, name) type name
#else
- signed int bug_addr_disp;
+#define BUG_REL(type, name) signed int name##_disp
#endif
-#ifdef CONFIG_DEBUG_BUGVERBOSE
-#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
- const char *file;
-#else
- signed int file_disp;
+
+#ifdef CONFIG_GENERIC_BUG
+struct bug_entry {
+ BUG_REL(unsigned long, bug_addr);
+#ifdef HAVE_ARCH_BUG_FORMAT
+ BUG_REL(const char *, format);
#endif
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+ BUG_REL(const char *, file);
unsigned short line;
#endif
unsigned short flags;
@@ -64,7 +82,7 @@ struct bug_entry {
#endif
/*
- * WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report
+ * WARN(), WARN_ON(), WARN_ON_ONCE(), and so on can be used to report
* significant kernel issues that need prompt attention if they should ever
* appear at runtime.
*
@@ -79,40 +97,58 @@ struct bug_entry {
*
* Use the versions with printk format strings to provide better diagnostics.
*/
-#ifndef __WARN_FLAGS
extern __printf(4, 5)
void warn_slowpath_fmt(const char *file, const int line, unsigned taint,
const char *fmt, ...);
-#define __WARN() __WARN_printf(TAINT_WARN, NULL)
+extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
+
+#ifdef __WARN_FLAGS
+#define __WARN() __WARN_FLAGS("", BUGFLAG_TAINT(TAINT_WARN))
+
+#ifndef WARN_ON
+#define WARN_ON(condition) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ __WARN_FLAGS(#condition, \
+ BUGFLAG_TAINT(TAINT_WARN)); \
+ unlikely(__ret_warn_on); \
+})
+#endif
+
+#ifndef WARN_ON_ONCE
+#define WARN_ON_ONCE(condition) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ __WARN_FLAGS(#condition, \
+ BUGFLAG_ONCE | \
+ BUGFLAG_TAINT(TAINT_WARN)); \
+ unlikely(__ret_warn_on); \
+})
+#endif
+#endif /* __WARN_FLAGS */
+
+#if defined(__WARN_FLAGS) && !defined(__WARN_printf)
#define __WARN_printf(taint, arg...) do { \
instrumentation_begin(); \
- warn_slowpath_fmt(__FILE__, __LINE__, taint, arg); \
+ __warn_printk(arg); \
+ __WARN_FLAGS("", BUGFLAG_NO_CUT_HERE | BUGFLAG_TAINT(taint));\
instrumentation_end(); \
} while (0)
-#else
-extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
-#define __WARN() __WARN_FLAGS(BUGFLAG_TAINT(TAINT_WARN))
+#endif
+
+#ifndef __WARN_printf
#define __WARN_printf(taint, arg...) do { \
instrumentation_begin(); \
- __warn_printk(arg); \
- __WARN_FLAGS(BUGFLAG_NO_CUT_HERE | BUGFLAG_TAINT(taint));\
+ warn_slowpath_fmt(__FILE__, __LINE__, taint, arg); \
instrumentation_end(); \
} while (0)
-#define WARN_ON_ONCE(condition) ({ \
- int __ret_warn_on = !!(condition); \
- if (unlikely(__ret_warn_on)) \
- __WARN_FLAGS(BUGFLAG_ONCE | \
- BUGFLAG_TAINT(TAINT_WARN)); \
- unlikely(__ret_warn_on); \
-})
#endif
-/* used internally by panic.c */
-struct warn_args;
-struct pt_regs;
+#ifndef __WARN
+#define __WARN() __WARN_printf(TAINT_WARN, NULL)
+#endif
-void __warn(const char *file, int line, void *caller, unsigned taint,
- struct pt_regs *regs, struct warn_args *args);
+/* used internally by panic.c */
#ifndef WARN_ON
#define WARN_ON(condition) ({ \
@@ -140,43 +176,24 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
})
#ifndef WARN_ON_ONCE
-#define WARN_ON_ONCE(condition) ({ \
- static bool __section(".data.once") __warned; \
- int __ret_warn_once = !!(condition); \
- \
- if (unlikely(__ret_warn_once && !__warned)) { \
- __warned = true; \
- WARN_ON(1); \
- } \
- unlikely(__ret_warn_once); \
-})
+#define WARN_ON_ONCE(condition) \
+ DO_ONCE_LITE_IF(condition, WARN_ON, 1)
#endif
-#define WARN_ONCE(condition, format...) ({ \
- static bool __section(".data.once") __warned; \
- int __ret_warn_once = !!(condition); \
- \
- if (unlikely(__ret_warn_once && !__warned)) { \
- __warned = true; \
- WARN(1, format); \
- } \
- unlikely(__ret_warn_once); \
-})
+#ifndef WARN_ONCE
+#define WARN_ONCE(condition, format...) \
+ DO_ONCE_LITE_IF(condition, WARN, 1, format)
+#endif
-#define WARN_TAINT_ONCE(condition, taint, format...) ({ \
- static bool __section(".data.once") __warned; \
- int __ret_warn_once = !!(condition); \
- \
- if (unlikely(__ret_warn_once && !__warned)) { \
- __warned = true; \
- WARN_TAINT(1, taint, format); \
- } \
- unlikely(__ret_warn_once); \
-})
+#define WARN_TAINT_ONCE(condition, taint, format...) \
+ DO_ONCE_LITE_IF(condition, WARN_TAINT, 1, taint, format)
#else /* !CONFIG_BUG */
#ifndef HAVE_ARCH_BUG
-#define BUG() do {} while (1)
+#define BUG() do { \
+ do {} while (1); \
+ unreachable(); \
+} while (0)
#endif
#ifndef HAVE_ARCH_BUG_ON
@@ -241,22 +258,6 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
# define WARN_ON_SMP(x) ({0;})
#endif
-/*
- * WARN_ON_FUNCTION_MISMATCH() warns if a value doesn't match a
- * function address, and can be useful for catching issues with
- * callback functions, for example.
- *
- * With CONFIG_CFI_CLANG, the warning is disabled because the
- * compiler replaces function addresses taken in C code with
- * local jump table addresses, which breaks cross-module function
- * address equality.
- */
-#if defined(CONFIG_CFI_CLANG) && defined(CONFIG_MODULES)
-# define WARN_ON_FUNCTION_MISMATCH(x, fn) ({ 0; })
-#else
-# define WARN_ON_FUNCTION_MISMATCH(x, fn) WARN_ON_ONCE((x) != (fn))
-#endif
-
#endif /* __ASSEMBLY__ */
#endif
diff --git a/include/asm-generic/bugs.h b/include/asm-generic/bugs.h
deleted file mode 100644
index 69021830f078..000000000000
--- a/include/asm-generic/bugs.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_GENERIC_BUGS_H
-#define __ASM_GENERIC_BUGS_H
-/*
- * This file is included by 'init/main.c' to check for
- * architecture-dependent bugs.
- */
-
-static inline void check_bugs(void) { }
-
-#endif /* __ASM_GENERIC_BUGS_H */
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
index 4a674db4e1fa..7ee8a179d103 100644
--- a/include/asm-generic/cacheflush.h
+++ b/include/asm-generic/cacheflush.h
@@ -2,6 +2,8 @@
#ifndef _ASM_GENERIC_CACHEFLUSH_H
#define _ASM_GENERIC_CACHEFLUSH_H
+#include <linux/instrumented.h>
+
struct mm_struct;
struct vm_area_struct;
struct page;
@@ -49,10 +51,10 @@ static inline void flush_cache_page(struct vm_area_struct *vma,
static inline void flush_dcache_page(struct page *page)
{
}
+
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#endif
-
#ifndef flush_dcache_mmap_lock
static inline void flush_dcache_mmap_lock(struct address_space *mapping)
{
@@ -75,13 +77,6 @@ static inline void flush_icache_range(unsigned long start, unsigned long end)
#define flush_icache_user_range flush_icache_range
#endif
-#ifndef flush_icache_page
-static inline void flush_icache_page(struct vm_area_struct *vma,
- struct page *page)
-{
-}
-#endif
-
#ifndef flush_icache_user_page
static inline void flush_icache_user_page(struct vm_area_struct *vma,
struct page *page,
@@ -96,6 +91,12 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
}
#endif
+#ifndef flush_cache_vmap_early
+static inline void flush_cache_vmap_early(unsigned long start, unsigned long end)
+{
+}
+#endif
+
#ifndef flush_cache_vunmap
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
@@ -105,14 +106,22 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
#ifndef copy_to_user_page
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
+ instrument_copy_to_user((void __user *)dst, src, len); \
memcpy(dst, src, len); \
flush_icache_user_page(vma, page, vaddr, len); \
} while (0)
#endif
+
#ifndef copy_from_user_page
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ do { \
+ instrument_copy_from_user_before(dst, (void __user *)src, \
+ len); \
+ memcpy(dst, src, len); \
+ instrument_copy_from_user_after(dst, (void __user *)src, len, \
+ 0); \
+ } while (0)
#endif
#endif /* _ASM_GENERIC_CACHEFLUSH_H */
diff --git a/include/asm-generic/cfi.h b/include/asm-generic/cfi.h
new file mode 100644
index 000000000000..41fac3537bf9
--- /dev/null
+++ b/include/asm-generic/cfi.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_CFI_H
+#define __ASM_GENERIC_CFI_H
+
+#endif /* __ASM_GENERIC_CFI_H */
diff --git a/include/asm-generic/checksum.h b/include/asm-generic/checksum.h
index 43e18db89c14..ad928cce268b 100644
--- a/include/asm-generic/checksum.h
+++ b/include/asm-generic/checksum.h
@@ -2,6 +2,8 @@
#ifndef __ASM_GENERIC_CHECKSUM_H
#define __ASM_GENERIC_CHECKSUM_H
+#include <linux/bitops.h>
+
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
@@ -31,9 +33,7 @@ extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
static inline __sum16 csum_fold(__wsum csum)
{
u32 sum = (__force u32)csum;
- sum = (sum & 0xffff) + (sum >> 16);
- sum = (sum & 0xffff) + (sum >> 16);
- return (__force __sum16)~sum;
+ return (__force __sum16)((~sum - ror32(sum, 16)) >> 16);
}
#endif
diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h
index f17f14f84d09..f27d66fdc00a 100644
--- a/include/asm-generic/cmpxchg-local.h
+++ b/include/asm-generic/cmpxchg-local.h
@@ -12,7 +12,7 @@ extern unsigned long wrong_size_cmpxchg(volatile void *ptr)
* Generic version of __cmpxchg_local (disables interrupts). Takes an unsigned
* long parameter, supporting various types of architectures.
*/
-static inline unsigned long __cmpxchg_local_generic(volatile void *ptr,
+static inline unsigned long __generic_cmpxchg_local(volatile void *ptr,
unsigned long old, unsigned long new, int size)
{
unsigned long flags, prev;
@@ -26,16 +26,16 @@ static inline unsigned long __cmpxchg_local_generic(volatile void *ptr,
raw_local_irq_save(flags);
switch (size) {
case 1: prev = *(u8 *)ptr;
- if (prev == old)
- *(u8 *)ptr = (u8)new;
+ if (prev == (old & 0xffu))
+ *(u8 *)ptr = (new & 0xffu);
break;
case 2: prev = *(u16 *)ptr;
- if (prev == old)
- *(u16 *)ptr = (u16)new;
+ if (prev == (old & 0xffffu))
+ *(u16 *)ptr = (new & 0xffffu);
break;
case 4: prev = *(u32 *)ptr;
- if (prev == old)
- *(u32 *)ptr = (u32)new;
+ if (prev == (old & 0xffffffffu))
+ *(u32 *)ptr = (new & 0xffffffffu);
break;
case 8: prev = *(u64 *)ptr;
if (prev == old)
@@ -51,7 +51,7 @@ static inline unsigned long __cmpxchg_local_generic(volatile void *ptr,
/*
* Generic version of __cmpxchg64_local. Takes an u64 parameter.
*/
-static inline u64 __cmpxchg64_local_generic(volatile void *ptr,
+static inline u64 __generic_cmpxchg64_local(volatile void *ptr,
u64 old, u64 new)
{
u64 prev;
diff --git a/include/asm-generic/cmpxchg.h b/include/asm-generic/cmpxchg.h
index 9a24510cd8c1..848de25fc4bf 100644
--- a/include/asm-generic/cmpxchg.h
+++ b/include/asm-generic/cmpxchg.h
@@ -14,16 +14,14 @@
#include <linux/types.h>
#include <linux/irqflags.h>
-#ifndef xchg
-
/*
* This function doesn't exist, so you'll get a linker error if
* something tries to do an invalidly-sized xchg().
*/
-extern void __xchg_called_with_bad_pointer(void);
+extern void __generic_xchg_called_with_bad_pointer(void);
static inline
-unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
+unsigned long __generic_xchg(unsigned long x, volatile void *ptr, int size)
{
unsigned long ret, flags;
@@ -34,7 +32,7 @@ unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
#else
local_irq_save(flags);
ret = *(volatile u8 *)ptr;
- *(volatile u8 *)ptr = x;
+ *(volatile u8 *)ptr = (x & 0xffu);
local_irq_restore(flags);
return ret;
#endif /* __xchg_u8 */
@@ -45,7 +43,7 @@ unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
#else
local_irq_save(flags);
ret = *(volatile u16 *)ptr;
- *(volatile u16 *)ptr = x;
+ *(volatile u16 *)ptr = (x & 0xffffu);
local_irq_restore(flags);
return ret;
#endif /* __xchg_u16 */
@@ -56,7 +54,7 @@ unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
#else
local_irq_save(flags);
ret = *(volatile u32 *)ptr;
- *(volatile u32 *)ptr = x;
+ *(volatile u32 *)ptr = (x & 0xffffffffu);
local_irq_restore(flags);
return ret;
#endif /* __xchg_u32 */
@@ -75,35 +73,43 @@ unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
#endif /* CONFIG_64BIT */
default:
- __xchg_called_with_bad_pointer();
+ __generic_xchg_called_with_bad_pointer();
return x;
}
}
-#define xchg(ptr, x) ({ \
- ((__typeof__(*(ptr))) \
- __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \
+#define generic_xchg(ptr, x) ({ \
+ ((__typeof__(*(ptr))) \
+ __generic_xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \
})
-#endif /* xchg */
-
/*
* Atomic compare and exchange.
*/
#include <asm-generic/cmpxchg-local.h>
-#ifndef cmpxchg_local
-#define cmpxchg_local(ptr, o, n) ({ \
- ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
- (unsigned long)(n), sizeof(*(ptr)))); \
+#define generic_cmpxchg_local(ptr, o, n) ({ \
+ ((__typeof__(*(ptr)))__generic_cmpxchg_local((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr)))); \
})
+
+#define generic_cmpxchg64_local(ptr, o, n) \
+ __generic_cmpxchg64_local((ptr), (o), (n))
+
+
+#ifndef arch_xchg
+#define arch_xchg generic_xchg
+#endif
+
+#ifndef arch_cmpxchg_local
+#define arch_cmpxchg_local generic_cmpxchg_local
#endif
-#ifndef cmpxchg64_local
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#ifndef arch_cmpxchg64_local
+#define arch_cmpxchg64_local generic_cmpxchg64_local
#endif
-#define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n))
-#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+#define arch_cmpxchg arch_cmpxchg_local
+#define arch_cmpxchg64 arch_cmpxchg64_local
#endif /* __ASM_GENERIC_CMPXCHG_H */
diff --git a/include/asm-generic/codetag.lds.h b/include/asm-generic/codetag.lds.h
new file mode 100644
index 000000000000..a14f4bdafdda
--- /dev/null
+++ b/include/asm-generic/codetag.lds.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_GENERIC_CODETAG_LDS_H
+#define __ASM_GENERIC_CODETAG_LDS_H
+
+#ifdef CONFIG_MEM_ALLOC_PROFILING
+#define IF_MEM_ALLOC_PROFILING(...) __VA_ARGS__
+#else
+#define IF_MEM_ALLOC_PROFILING(...)
+#endif
+
+#define SECTION_WITH_BOUNDARIES(_name) \
+ . = ALIGN(8); \
+ __start_##_name = .; \
+ KEEP(*(_name)) \
+ __stop_##_name = .;
+
+#define CODETAG_SECTIONS() \
+ IF_MEM_ALLOC_PROFILING(SECTION_WITH_BOUNDARIES(alloc_tags))
+
+#define MOD_SEPARATE_CODETAG_SECTION(_name) \
+ .codetag.##_name : { \
+ SECTION_WITH_BOUNDARIES(_name) \
+ }
+
+/*
+ * For codetags which might be used after module unload, therefore might stay
+ * longer in memory. Each such codetag type has its own section so that we can
+ * unload them individually once unused.
+ */
+#define MOD_SEPARATE_CODETAG_SECTIONS() \
+ IF_MEM_ALLOC_PROFILING(MOD_SEPARATE_CODETAG_SECTION(alloc_tags))
+
+#endif /* __ASM_GENERIC_CODETAG_LDS_H */
diff --git a/include/asm-generic/compat.h b/include/asm-generic/compat.h
index 30f7b18a36f9..8392caea398f 100644
--- a/include/asm-generic/compat.h
+++ b/include/asm-generic/compat.h
@@ -2,6 +2,30 @@
#ifndef __ASM_GENERIC_COMPAT_H
#define __ASM_GENERIC_COMPAT_H
+#ifndef COMPAT_USER_HZ
+#define COMPAT_USER_HZ 100
+#endif
+
+#ifndef COMPAT_RLIM_INFINITY
+#define COMPAT_RLIM_INFINITY 0xffffffff
+#endif
+
+#ifndef COMPAT_OFF_T_MAX
+#define COMPAT_OFF_T_MAX 0x7fffffff
+#endif
+
+#ifndef compat_arg_u64
+#ifndef CONFIG_CPU_BIG_ENDIAN
+#define compat_arg_u64(name) u32 name##_lo, u32 name##_hi
+#define compat_arg_u64_dual(name) u32, name##_lo, u32, name##_hi
+#else
+#define compat_arg_u64(name) u32 name##_hi, u32 name##_lo
+#define compat_arg_u64_dual(name) u32, name##_hi, u32, name##_lo
+#endif
+#define compat_arg_u64_glue(name) (((u64)name##_lo & 0xffffffffUL) | \
+ ((u64)name##_hi << 32))
+#endif /* compat_arg_u64 */
+
/* These types are common across all compat ABIs */
typedef u32 compat_size_t;
typedef s32 compat_ssize_t;
@@ -20,7 +44,23 @@ typedef u16 compat_ushort_t;
typedef u32 compat_uint_t;
typedef u32 compat_ulong_t;
typedef u32 compat_uptr_t;
+typedef u32 compat_caddr_t;
typedef u32 compat_aio_context_t;
+typedef u32 compat_old_sigset_t;
+
+#ifndef __compat_uid_t
+typedef u32 __compat_uid_t;
+typedef u32 __compat_gid_t;
+#endif
+
+#ifndef __compat_uid32_t
+typedef u32 __compat_uid32_t;
+typedef u32 __compat_gid32_t;
+#endif
+
+#ifndef compat_mode_t
+typedef u32 compat_mode_t;
+#endif
#ifdef CONFIG_COMPAT_FOR_U64_ALIGNMENT
typedef s64 __attribute__((aligned(4))) compat_s64;
@@ -30,4 +70,99 @@ typedef s64 compat_s64;
typedef u64 compat_u64;
#endif
+#ifndef _COMPAT_NSIG
+typedef u32 compat_sigset_word;
+#define _COMPAT_NSIG _NSIG
+#define _COMPAT_NSIG_BPW 32
+#endif
+
+#ifndef compat_dev_t
+typedef u32 compat_dev_t;
+#endif
+
+#ifndef compat_ipc_pid_t
+typedef s32 compat_ipc_pid_t;
+#endif
+
+#ifndef compat_fsid_t
+typedef __kernel_fsid_t compat_fsid_t;
+#endif
+
+#ifndef compat_statfs
+struct compat_statfs {
+ compat_int_t f_type;
+ compat_int_t f_bsize;
+ compat_int_t f_blocks;
+ compat_int_t f_bfree;
+ compat_int_t f_bavail;
+ compat_int_t f_files;
+ compat_int_t f_ffree;
+ compat_fsid_t f_fsid;
+ compat_int_t f_namelen;
+ compat_int_t f_frsize;
+ compat_int_t f_flags;
+ compat_int_t f_spare[4];
+};
+#endif
+
+#ifndef compat_ipc64_perm
+struct compat_ipc64_perm {
+ compat_key_t key;
+ __compat_uid32_t uid;
+ __compat_gid32_t gid;
+ __compat_uid32_t cuid;
+ __compat_gid32_t cgid;
+ compat_mode_t mode;
+ unsigned char __pad1[4 - sizeof(compat_mode_t)];
+ compat_ushort_t seq;
+ compat_ushort_t __pad2;
+ compat_ulong_t unused1;
+ compat_ulong_t unused2;
+};
+
+struct compat_semid64_ds {
+ struct compat_ipc64_perm sem_perm;
+ compat_ulong_t sem_otime;
+ compat_ulong_t sem_otime_high;
+ compat_ulong_t sem_ctime;
+ compat_ulong_t sem_ctime_high;
+ compat_ulong_t sem_nsems;
+ compat_ulong_t __unused3;
+ compat_ulong_t __unused4;
+};
+
+struct compat_msqid64_ds {
+ struct compat_ipc64_perm msg_perm;
+ compat_ulong_t msg_stime;
+ compat_ulong_t msg_stime_high;
+ compat_ulong_t msg_rtime;
+ compat_ulong_t msg_rtime_high;
+ compat_ulong_t msg_ctime;
+ compat_ulong_t msg_ctime_high;
+ compat_ulong_t msg_cbytes;
+ compat_ulong_t msg_qnum;
+ compat_ulong_t msg_qbytes;
+ compat_pid_t msg_lspid;
+ compat_pid_t msg_lrpid;
+ compat_ulong_t __unused4;
+ compat_ulong_t __unused5;
+};
+
+struct compat_shmid64_ds {
+ struct compat_ipc64_perm shm_perm;
+ compat_size_t shm_segsz;
+ compat_ulong_t shm_atime;
+ compat_ulong_t shm_atime_high;
+ compat_ulong_t shm_dtime;
+ compat_ulong_t shm_dtime_high;
+ compat_ulong_t shm_ctime;
+ compat_ulong_t shm_ctime_high;
+ compat_pid_t shm_cpid;
+ compat_pid_t shm_lpid;
+ compat_ulong_t shm_nattch;
+ compat_ulong_t __unused4;
+ compat_ulong_t __unused5;
+};
+#endif
+
#endif
diff --git a/include/asm-generic/current.h b/include/asm-generic/current.h
index 3a2e224b9fa0..9c2aeecbd05a 100644
--- a/include/asm-generic/current.h
+++ b/include/asm-generic/current.h
@@ -2,9 +2,11 @@
#ifndef __ASM_GENERIC_CURRENT_H
#define __ASM_GENERIC_CURRENT_H
+#ifndef __ASSEMBLY__
#include <linux/thread_info.h>
#define get_current() (current_thread_info()->task)
#define current get_current()
+#endif
#endif /* __ASM_GENERIC_CURRENT_H */
diff --git a/include/asm-generic/delay.h b/include/asm-generic/delay.h
index e448ac61430c..03b0ec7afca6 100644
--- a/include/asm-generic/delay.h
+++ b/include/asm-generic/delay.h
@@ -2,6 +2,9 @@
#ifndef __ASM_GENERIC_DELAY_H
#define __ASM_GENERIC_DELAY_H
+#include <linux/math.h>
+#include <vdso/time64.h>
+
/* Undefined functions to get compile-time errors */
extern void __bad_udelay(void);
extern void __bad_ndelay(void);
@@ -12,34 +15,73 @@ extern void __const_udelay(unsigned long xloops);
extern void __delay(unsigned long loops);
/*
- * The weird n/20000 thing suppresses a "comparison is always false due to
- * limited range of data type" warning with non-const 8-bit arguments.
+ * The microseconds/nanosecond delay multiplicators are used to convert a
+ * constant microseconds/nanoseconds value to a value which can be used by the
+ * architectures specific implementation to transform it into loops.
+ */
+#define UDELAY_CONST_MULT ((unsigned long)DIV_ROUND_UP(1ULL << 32, USEC_PER_SEC))
+#define NDELAY_CONST_MULT ((unsigned long)DIV_ROUND_UP(1ULL << 32, NSEC_PER_SEC))
+
+/*
+ * The maximum constant udelay/ndelay value picked out of thin air to prevent
+ * too long constant udelays/ndelays.
*/
+#define DELAY_CONST_MAX 20000
-/* 0x10c7 is 2**32 / 1000000 (rounded up) */
-#define udelay(n) \
- ({ \
- if (__builtin_constant_p(n)) { \
- if ((n) / 20000 >= 1) \
- __bad_udelay(); \
- else \
- __const_udelay((n) * 0x10c7ul); \
- } else { \
- __udelay(n); \
- } \
- })
-
-/* 0x5 is 2**32 / 1000000000 (rounded up) */
-#define ndelay(n) \
- ({ \
- if (__builtin_constant_p(n)) { \
- if ((n) / 20000 >= 1) \
- __bad_ndelay(); \
- else \
- __const_udelay((n) * 5ul); \
- } else { \
- __ndelay(n); \
- } \
- })
+/**
+ * udelay - Inserting a delay based on microseconds with busy waiting
+ * @usec: requested delay in microseconds
+ *
+ * When delaying in an atomic context ndelay(), udelay() and mdelay() are the
+ * only valid variants of delaying/sleeping to go with.
+ *
+ * When inserting delays in non atomic context which are shorter than the time
+ * which is required to queue e.g. an hrtimer and to enter then the scheduler,
+ * it is also valuable to use udelay(). But it is not simple to specify a
+ * generic threshold for this which will fit for all systems. An approximation
+ * is a threshold for all delays up to 10 microseconds.
+ *
+ * When having a delay which is larger than the architecture specific
+ * %MAX_UDELAY_MS value, please make sure mdelay() is used. Otherwise a overflow
+ * risk is given.
+ *
+ * Please note that ndelay(), udelay() and mdelay() may return early for several
+ * reasons (https://lists.openwall.net/linux-kernel/2011/01/09/56):
+ *
+ * #. computed loops_per_jiffy too low (due to the time taken to execute the
+ * timer interrupt.)
+ * #. cache behaviour affecting the time it takes to execute the loop function.
+ * #. CPU clock rate changes.
+ */
+static __always_inline void udelay(unsigned long usec)
+{
+ if (__builtin_constant_p(usec)) {
+ if (usec >= DELAY_CONST_MAX)
+ __bad_udelay();
+ else
+ __const_udelay(usec * UDELAY_CONST_MULT);
+ } else {
+ __udelay(usec);
+ }
+}
+
+/**
+ * ndelay - Inserting a delay based on nanoseconds with busy waiting
+ * @nsec: requested delay in nanoseconds
+ *
+ * See udelay() for basic information about ndelay() and it's variants.
+ */
+static __always_inline void ndelay(unsigned long nsec)
+{
+ if (__builtin_constant_p(nsec)) {
+ if (nsec >= DELAY_CONST_MAX)
+ __bad_ndelay();
+ else
+ __const_udelay(nsec * NDELAY_CONST_MULT);
+ } else {
+ __ndelay(nsec);
+ }
+}
+#define ndelay(x) ndelay(x)
#endif /* __ASM_GENERIC_DELAY_H */
diff --git a/include/asm-generic/div64.h b/include/asm-generic/div64.h
index cd905b44a630..25e7b4b58dcf 100644
--- a/include/asm-generic/div64.h
+++ b/include/asm-generic/div64.h
@@ -57,17 +57,11 @@
/*
* If the divisor happens to be constant, we determine the appropriate
* inverse at compile time to turn the division into a few inline
- * multiplications which ought to be much faster. And yet only if compiling
- * with a sufficiently recent gcc version to perform proper 64-bit constant
- * propagation.
+ * multiplications which ought to be much faster.
*
* (It is unfortunate that gcc doesn't perform all this internally.)
*/
-#ifndef __div64_const32_is_OK
-#define __div64_const32_is_OK (__GNUC__ >= 4)
-#endif
-
#define __div64_const32(n, ___b) \
({ \
/* \
@@ -80,7 +74,8 @@
* do the trick here). \
*/ \
uint64_t ___res, ___x, ___t, ___m, ___n = (n); \
- uint32_t ___p, ___bias; \
+ uint32_t ___p; \
+ bool ___bias = false; \
\
/* determine MSB of b */ \
___p = 1 << ilog2(___b); \
@@ -93,22 +88,14 @@
___x = ~0ULL / ___b * ___b - 1; \
\
/* test our ___m with res = m * x / (p << 64) */ \
- ___res = ((___m & 0xffffffff) * (___x & 0xffffffff)) >> 32; \
- ___t = ___res += (___m & 0xffffffff) * (___x >> 32); \
- ___res += (___x & 0xffffffff) * (___m >> 32); \
- ___t = (___res < ___t) ? (1ULL << 32) : 0; \
- ___res = (___res >> 32) + ___t; \
- ___res += (___m >> 32) * (___x >> 32); \
- ___res /= ___p; \
+ ___res = (___m & 0xffffffff) * (___x & 0xffffffff); \
+ ___t = (___m & 0xffffffff) * (___x >> 32) + (___res >> 32); \
+ ___res = (___m >> 32) * (___x >> 32) + (___t >> 32); \
+ ___t = (___m >> 32) * (___x & 0xffffffff) + (___t & 0xffffffff);\
+ ___res = (___res + (___t >> 32)) / ___p; \
\
- /* Now sanitize and optimize what we've got. */ \
- if (~0ULL % (___b / (___b & -___b)) == 0) { \
- /* special case, can be simplified to ... */ \
- ___n /= (___b & -___b); \
- ___m = ~0ULL / (___b / (___b & -___b)); \
- ___p = 1; \
- ___bias = 1; \
- } else if (___res != ___x / ___b) { \
+ /* Now validate what we've got. */ \
+ if (___res != ___x / ___b) { \
/* \
* We can't get away without a bias to compensate \
* for bit truncation errors. To avoid it we'd need an \
@@ -117,45 +104,18 @@
* \
* Instead we do m = p / b and n / b = (n * m + m) / p. \
*/ \
- ___bias = 1; \
+ ___bias = true; \
/* Compute m = (p << 64) / b */ \
___m = (~0ULL / ___b) * ___p; \
___m += ((~0ULL % ___b + 1) * ___p) / ___b; \
- } else { \
- /* \
- * Reduce m / p, and try to clear bit 31 of m when \
- * possible, otherwise that'll need extra overflow \
- * handling later. \
- */ \
- uint32_t ___bits = -(___m & -___m); \
- ___bits |= ___m >> 32; \
- ___bits = (~___bits) << 1; \
- /* \
- * If ___bits == 0 then setting bit 31 is unavoidable. \
- * Simply apply the maximum possible reduction in that \
- * case. Otherwise the MSB of ___bits indicates the \
- * best reduction we should apply. \
- */ \
- if (!___bits) { \
- ___p /= (___m & -___m); \
- ___m /= (___m & -___m); \
- } else { \
- ___p >>= ilog2(___bits); \
- ___m >>= ilog2(___bits); \
- } \
- /* No bias needed. */ \
- ___bias = 0; \
} \
\
+ /* Reduce m / p to help avoid overflow handling later. */ \
+ ___p /= (___m & -___m); \
+ ___m /= (___m & -___m); \
+ \
/* \
- * Now we have a combination of 2 conditions: \
- * \
- * 1) whether or not we need to apply a bias, and \
- * \
- * 2) whether or not there might be an overflow in the cross \
- * product determined by (___m & ((1 << 63) | (1 << 31))). \
- * \
- * Select the best way to do (m_bias + m * n) / (1 << 64). \
+ * Perform (m_bias + m * n) / (1 << 64). \
* From now on there will be actual runtime code generated. \
*/ \
___res = __arch_xprod_64(___m, ___n, ___bias); \
@@ -171,47 +131,42 @@
* Semantic: retval = ((bias ? m : 0) + m * n) >> 64
*
* The product is a 128-bit value, scaled down to 64 bits.
- * Assuming constant propagation to optimize away unused conditional code.
+ * Hoping for compile-time optimization of conditional code.
* Architectures may provide their own optimized assembly implementation.
*/
-static inline uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias)
+#ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE
+static __always_inline
+#else
+static inline
+#endif
+uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias)
{
uint32_t m_lo = m;
uint32_t m_hi = m >> 32;
uint32_t n_lo = n;
uint32_t n_hi = n >> 32;
- uint64_t res;
- uint32_t res_lo, res_hi, tmp;
-
- if (!bias) {
- res = ((uint64_t)m_lo * n_lo) >> 32;
- } else if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
- /* there can't be any overflow here */
- res = (m + (uint64_t)m_lo * n_lo) >> 32;
+ uint64_t x, y;
+
+ /* Determine if overflow handling can be dispensed with. */
+ bool no_ovf = __builtin_constant_p(m) &&
+ ((m >> 32) + (m & 0xffffffff) < 0x100000000);
+
+ if (no_ovf) {
+ x = (uint64_t)m_lo * n_lo + (bias ? m : 0);
+ x >>= 32;
+ x += (uint64_t)m_lo * n_hi;
+ x += (uint64_t)m_hi * n_lo;
+ x >>= 32;
+ x += (uint64_t)m_hi * n_hi;
} else {
- res = m + (uint64_t)m_lo * n_lo;
- res_lo = res >> 32;
- res_hi = (res_lo < m_hi);
- res = res_lo | ((uint64_t)res_hi << 32);
+ x = (uint64_t)m_lo * n_lo + (bias ? m_lo : 0);
+ y = (uint64_t)m_lo * n_hi + (uint32_t)(x >> 32) + (bias ? m_hi : 0);
+ x = (uint64_t)m_hi * n_hi + (uint32_t)(y >> 32);
+ y = (uint64_t)m_hi * n_lo + (uint32_t)y;
+ x += (uint32_t)(y >> 32);
}
- if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
- /* there can't be any overflow here */
- res += (uint64_t)m_lo * n_hi;
- res += (uint64_t)m_hi * n_lo;
- res >>= 32;
- } else {
- res += (uint64_t)m_lo * n_hi;
- tmp = res >> 32;
- res += (uint64_t)m_hi * n_lo;
- res_lo = res >> 32;
- res_hi = (res_lo < tmp);
- res = res_lo | ((uint64_t)res_hi << 32);
- }
-
- res += (uint64_t)m_hi * n_hi;
-
- return res;
+ return x;
}
#endif
@@ -230,8 +185,7 @@ extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
is_power_of_2(__base)) { \
__rem = (n) & (__base - 1); \
(n) >>= ilog2(__base); \
- } else if (__div64_const32_is_OK && \
- __builtin_constant_p(__base) && \
+ } else if (__builtin_constant_p(__base) && \
__base != 0) { \
uint32_t __res_lo, __n_lo = (n); \
(n) = __div64_const32(n, __base); \
@@ -241,8 +195,9 @@ extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
} else if (likely(((n) >> 32) == 0)) { \
__rem = (uint32_t)(n) % __base; \
(n) = (uint32_t)(n) / __base; \
- } else \
+ } else { \
__rem = __div64_32(&(n), __base); \
+ } \
__rem; \
})
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h
index c13f46109e88..46a0016efd81 100644
--- a/include/asm-generic/dma-mapping.h
+++ b/include/asm-generic/dma-mapping.h
@@ -2,7 +2,7 @@
#ifndef _ASM_GENERIC_DMA_MAPPING_H
#define _ASM_GENERIC_DMA_MAPPING_H
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
+static inline const struct dma_map_ops *get_arch_dma_ops(void)
{
return NULL;
}
diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h
index 9def22e6e2b3..5db59a1efb65 100644
--- a/include/asm-generic/early_ioremap.h
+++ b/include/asm-generic/early_ioremap.h
@@ -19,12 +19,6 @@ extern void *early_memremap_prot(resource_size_t phys_addr,
extern void early_iounmap(void __iomem *addr, unsigned long size);
extern void early_memunmap(void *addr, unsigned long size);
-/*
- * Weak function called by early_ioremap_reset(). It does nothing, but
- * architectures may provide their own version to do any needed cleanups.
- */
-extern void early_ioremap_shutdown(void);
-
#if defined(CONFIG_GENERIC_EARLY_IOREMAP) && defined(CONFIG_MMU)
/* Arch-specific initialization */
extern void early_ioremap_init(void);
@@ -41,7 +35,7 @@ extern void early_ioremap_reset(void);
/*
* Early copy from unmapped memory to kernel mapped memory.
*/
-extern void copy_from_early_mem(void *dest, phys_addr_t src,
+extern int copy_from_early_mem(void *dest, phys_addr_t src,
unsigned long size);
#else
diff --git a/include/asm-generic/error-injection.h b/include/asm-generic/error-injection.h
index 7ddd9dc10ce9..b05253f68eaa 100644
--- a/include/asm-generic/error-injection.h
+++ b/include/asm-generic/error-injection.h
@@ -4,7 +4,6 @@
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
enum {
- EI_ETYPE_NONE, /* Dummy value for undefined case */
EI_ETYPE_NULL, /* Return NULL if failure */
EI_ETYPE_ERRNO, /* Return -ERRNO if failure */
EI_ETYPE_ERRNO_NULL, /* Return -ERRNO or NULL if failure */
@@ -20,8 +19,10 @@ struct pt_regs;
#ifdef CONFIG_FUNCTION_ERROR_INJECTION
/*
- * Whitelist ganerating macro. Specify functions which can be
- * error-injectable using this macro.
+ * Whitelist generating macro. Specify functions which can be error-injectable
+ * using this macro. If you unsure what is required for the error-injectable
+ * functions, please read Documentation/fault-injection/fault-injection.rst
+ * 'Error Injectable Functions' section.
*/
#define ALLOW_ERROR_INJECTION(fname, _etype) \
static struct error_injection_entry __used \
@@ -29,7 +30,7 @@ static struct error_injection_entry __used \
_eil_addr_##fname = { \
.addr = (unsigned long)fname, \
.etype = EI_ETYPE_##_etype, \
- };
+ }
void override_function_with_return(struct pt_regs *regs);
#else
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h
deleted file mode 100644
index 07a36a874dca..000000000000
--- a/include/asm-generic/export.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-#ifndef __ASM_GENERIC_EXPORT_H
-#define __ASM_GENERIC_EXPORT_H
-
-#ifndef KSYM_FUNC
-#define KSYM_FUNC(x) x
-#endif
-#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
-#define KSYM_ALIGN 4
-#elif defined(CONFIG_64BIT)
-#define KSYM_ALIGN 8
-#else
-#define KSYM_ALIGN 4
-#endif
-#ifndef KCRC_ALIGN
-#define KCRC_ALIGN 4
-#endif
-
-.macro __put, val, name
-#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
- .long \val - ., \name - ., 0
-#elif defined(CONFIG_64BIT)
- .quad \val, \name, 0
-#else
- .long \val, \name, 0
-#endif
-.endm
-
-/*
- * note on .section use: we specify progbits since usage of the "M" (SHF_MERGE)
- * section flag requires it. Use '%progbits' instead of '@progbits' since the
- * former apparently works on all arches according to the binutils source.
- */
-
-.macro ___EXPORT_SYMBOL name,val,sec
-#if defined(CONFIG_MODULES) && !defined(__DISABLE_EXPORTS)
- .section ___ksymtab\sec+\name,"a"
- .balign KSYM_ALIGN
-__ksymtab_\name:
- __put \val, __kstrtab_\name
- .previous
- .section __ksymtab_strings,"aMS",%progbits,1
-__kstrtab_\name:
- .asciz "\name"
- .previous
-#ifdef CONFIG_MODVERSIONS
- .section ___kcrctab\sec+\name,"a"
- .balign KCRC_ALIGN
-#if defined(CONFIG_MODULE_REL_CRCS)
- .long __crc_\name - .
-#else
- .long __crc_\name
-#endif
- .weak __crc_\name
- .previous
-#endif
-#endif
-.endm
-
-#if defined(CONFIG_TRIM_UNUSED_KSYMS)
-
-#include <linux/kconfig.h>
-#include <generated/autoksyms.h>
-
-.macro __ksym_marker sym
- .section ".discard.ksym","a"
-__ksym_marker_\sym:
- .previous
-.endm
-
-#define __EXPORT_SYMBOL(sym, val, sec) \
- __ksym_marker sym; \
- __cond_export_sym(sym, val, sec, __is_defined(__KSYM_##sym))
-#define __cond_export_sym(sym, val, sec, conf) \
- ___cond_export_sym(sym, val, sec, conf)
-#define ___cond_export_sym(sym, val, sec, enabled) \
- __cond_export_sym_##enabled(sym, val, sec)
-#define __cond_export_sym_1(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec
-#define __cond_export_sym_0(sym, val, sec) /* nothing */
-
-#else
-#define __EXPORT_SYMBOL(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec
-#endif
-
-#define EXPORT_SYMBOL(name) \
- __EXPORT_SYMBOL(name, KSYM_FUNC(name),)
-#define EXPORT_SYMBOL_GPL(name) \
- __EXPORT_SYMBOL(name, KSYM_FUNC(name), _gpl)
-#define EXPORT_DATA_SYMBOL(name) \
- __EXPORT_SYMBOL(name, name,)
-#define EXPORT_DATA_SYMBOL_GPL(name) \
- __EXPORT_SYMBOL(name, name,_gpl)
-
-#endif
diff --git a/include/asm-generic/fb.h b/include/asm-generic/fb.h
deleted file mode 100644
index f9f18101ed36..000000000000
--- a/include/asm-generic/fb.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_GENERIC_FB_H_
-#define __ASM_GENERIC_FB_H_
-#include <linux/fb.h>
-
-#define fb_pgprotect(...) do {} while (0)
-
-static inline int fb_is_primary_device(struct fb_info *info)
-{
- return 0;
-}
-
-#endif /* __ASM_GENERIC_FB_H_ */
diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h
index 8cc7b09c1bc7..29cab7947980 100644
--- a/include/asm-generic/fixmap.h
+++ b/include/asm-generic/fixmap.h
@@ -97,8 +97,5 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
#define set_fixmap_io(idx, phys) \
__set_fixmap(idx, phys, FIXMAP_PAGE_IO)
-#define set_fixmap_offset_io(idx, phys) \
- __set_fixmap_offset(idx, phys, FIXMAP_PAGE_IO)
-
#endif /* __ASSEMBLY__ */
#endif /* __ASM_GENERIC_FIXMAP_H */
diff --git a/include/asm-generic/fprobe.h b/include/asm-generic/fprobe.h
new file mode 100644
index 000000000000..8659a4dc6eb6
--- /dev/null
+++ b/include/asm-generic/fprobe.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Generic arch dependent fprobe macros.
+ */
+#ifndef __ASM_GENERIC_FPROBE_H__
+#define __ASM_GENERIC_FPROBE_H__
+
+#include <linux/bits.h>
+
+#ifdef CONFIG_64BIT
+/*
+ * Encoding the size and the address of fprobe into one 64bit entry.
+ * The 32bit architectures should use 2 entries to store those info.
+ */
+
+#define ARCH_DEFINE_ENCODE_FPROBE_HEADER
+
+#define FPROBE_HEADER_MSB_SIZE_SHIFT (BITS_PER_LONG - FPROBE_DATA_SIZE_BITS)
+#define FPROBE_HEADER_MSB_MASK \
+ GENMASK(FPROBE_HEADER_MSB_SIZE_SHIFT - 1, 0)
+
+/*
+ * By default, this expects the MSBs in the address of kprobe is 0xf.
+ * If any arch needs another fixed pattern (e.g. s390 is zero filled),
+ * override this.
+ */
+#define FPROBE_HEADER_MSB_PATTERN \
+ GENMASK(BITS_PER_LONG - 1, FPROBE_HEADER_MSB_SIZE_SHIFT)
+
+#define arch_fprobe_header_encodable(fp) \
+ (((unsigned long)(fp) & ~FPROBE_HEADER_MSB_MASK) == \
+ FPROBE_HEADER_MSB_PATTERN)
+
+#define arch_encode_fprobe_header(fp, size) \
+ (((unsigned long)(fp) & FPROBE_HEADER_MSB_MASK) | \
+ ((unsigned long)(size) << FPROBE_HEADER_MSB_SIZE_SHIFT))
+
+#define arch_decode_fprobe_header_size(val) \
+ ((unsigned long)(val) >> FPROBE_HEADER_MSB_SIZE_SHIFT)
+
+#define arch_decode_fprobe_header_fp(val) \
+ ((struct fprobe *)(((unsigned long)(val) & FPROBE_HEADER_MSB_MASK) | \
+ FPROBE_HEADER_MSB_PATTERN))
+#endif /* CONFIG_64BIT */
+
+#endif /* __ASM_GENERIC_FPROBE_H__ */
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index f4c3470480c7..2a19215baae5 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -6,15 +6,22 @@
#include <linux/uaccess.h>
#include <asm/errno.h>
+#ifndef futex_atomic_cmpxchg_inatomic
#ifndef CONFIG_SMP
/*
* The following implementation only for uniprocessor machines.
* It relies on preempt_disable() ensuring mutual exclusion.
*
*/
+#define futex_atomic_cmpxchg_inatomic(uval, uaddr, oldval, newval) \
+ futex_atomic_cmpxchg_inatomic_local(uval, uaddr, oldval, newval)
+#define arch_futex_atomic_op_inuser(op, oparg, oval, uaddr) \
+ futex_atomic_op_inuser_local(op, oparg, oval, uaddr)
+#endif /* CONFIG_SMP */
+#endif
/**
- * arch_futex_atomic_op_inuser() - Atomic arithmetic operation with constant
+ * futex_atomic_op_inuser_local() - Atomic arithmetic operation with constant
* argument and comparison of the previous
* futex value with another constant.
*
@@ -28,7 +35,7 @@
* -ENOSYS - Operation not supported
*/
static inline int
-arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
+futex_atomic_op_inuser_local(int op, u32 oparg, int *oval, u32 __user *uaddr)
{
int oldval, ret;
u32 tmp;
@@ -75,7 +82,7 @@ out_pagefault_enable:
}
/**
- * futex_atomic_cmpxchg_inatomic() - Compare and exchange the content of the
+ * futex_atomic_cmpxchg_inatomic_local() - Compare and exchange the content of the
* uaddr with newval if the current value is
* oldval.
* @uval: pointer to store content of @uaddr
@@ -87,10 +94,9 @@ out_pagefault_enable:
* 0 - On success
* -EFAULT - User access resulted in a page fault
* -EAGAIN - Atomic operation was unable to complete due to contention
- * -ENOSYS - Function not implemented (only if !HAVE_FUTEX_CMPXCHG)
*/
static inline int
-futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+futex_atomic_cmpxchg_inatomic_local(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
u32 val;
@@ -112,19 +118,4 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return 0;
}
-#else
-static inline int
-arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
-{
- return -ENOSYS;
-}
-
-static inline int
-futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- u32 oldval, u32 newval)
-{
- return -ENOSYS;
-}
-
-#endif /* CONFIG_SMP */
#endif
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
deleted file mode 100644
index aea9aee1f3e9..000000000000
--- a/include/asm-generic/gpio.h
+++ /dev/null
@@ -1,172 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_GENERIC_GPIO_H
-#define _ASM_GENERIC_GPIO_H
-
-#include <linux/types.h>
-#include <linux/errno.h>
-
-#ifdef CONFIG_GPIOLIB
-
-#include <linux/compiler.h>
-#include <linux/gpio/driver.h>
-#include <linux/gpio/consumer.h>
-
-/* Platforms may implement their GPIO interface with library code,
- * at a small performance cost for non-inlined operations and some
- * extra memory (for code and for per-GPIO table entries).
- *
- * While the GPIO programming interface defines valid GPIO numbers
- * to be in the range 0..MAX_INT, this library restricts them to the
- * smaller range 0..ARCH_NR_GPIOS-1.
- *
- * ARCH_NR_GPIOS is somewhat arbitrary; it usually reflects the sum of
- * builtin/SoC GPIOs plus a number of GPIOs on expanders; the latter is
- * actually an estimate of a board-specific value.
- */
-
-#ifndef ARCH_NR_GPIOS
-#if defined(CONFIG_ARCH_NR_GPIO) && CONFIG_ARCH_NR_GPIO > 0
-#define ARCH_NR_GPIOS CONFIG_ARCH_NR_GPIO
-#else
-#define ARCH_NR_GPIOS 512
-#endif
-#endif
-
-/*
- * "valid" GPIO numbers are nonnegative and may be passed to
- * setup routines like gpio_request(). only some valid numbers
- * can successfully be requested and used.
- *
- * Invalid GPIO numbers are useful for indicating no-such-GPIO in
- * platform data and other tables.
- */
-
-static inline bool gpio_is_valid(int number)
-{
- return number >= 0 && number < ARCH_NR_GPIOS;
-}
-
-struct device;
-struct gpio;
-struct seq_file;
-struct module;
-struct device_node;
-struct gpio_desc;
-
-/* caller holds gpio_lock *OR* gpio is marked as requested */
-static inline struct gpio_chip *gpio_to_chip(unsigned gpio)
-{
- return gpiod_to_chip(gpio_to_desc(gpio));
-}
-
-/* Always use the library code for GPIO management calls,
- * or when sleeping may be involved.
- */
-extern int gpio_request(unsigned gpio, const char *label);
-extern void gpio_free(unsigned gpio);
-
-static inline int gpio_direction_input(unsigned gpio)
-{
- return gpiod_direction_input(gpio_to_desc(gpio));
-}
-static inline int gpio_direction_output(unsigned gpio, int value)
-{
- return gpiod_direction_output_raw(gpio_to_desc(gpio), value);
-}
-
-static inline int gpio_set_debounce(unsigned gpio, unsigned debounce)
-{
- return gpiod_set_debounce(gpio_to_desc(gpio), debounce);
-}
-
-static inline int gpio_get_value_cansleep(unsigned gpio)
-{
- return gpiod_get_raw_value_cansleep(gpio_to_desc(gpio));
-}
-static inline void gpio_set_value_cansleep(unsigned gpio, int value)
-{
- return gpiod_set_raw_value_cansleep(gpio_to_desc(gpio), value);
-}
-
-
-/* A platform's <asm/gpio.h> code may want to inline the I/O calls when
- * the GPIO is constant and refers to some always-present controller,
- * giving direct access to chip registers and tight bitbanging loops.
- */
-static inline int __gpio_get_value(unsigned gpio)
-{
- return gpiod_get_raw_value(gpio_to_desc(gpio));
-}
-static inline void __gpio_set_value(unsigned gpio, int value)
-{
- return gpiod_set_raw_value(gpio_to_desc(gpio), value);
-}
-
-static inline int __gpio_cansleep(unsigned gpio)
-{
- return gpiod_cansleep(gpio_to_desc(gpio));
-}
-
-static inline int __gpio_to_irq(unsigned gpio)
-{
- return gpiod_to_irq(gpio_to_desc(gpio));
-}
-
-extern int gpio_request_one(unsigned gpio, unsigned long flags, const char *label);
-extern int gpio_request_array(const struct gpio *array, size_t num);
-extern void gpio_free_array(const struct gpio *array, size_t num);
-
-/*
- * A sysfs interface can be exported by individual drivers if they want,
- * but more typically is configured entirely from userspace.
- */
-static inline int gpio_export(unsigned gpio, bool direction_may_change)
-{
- return gpiod_export(gpio_to_desc(gpio), direction_may_change);
-}
-
-static inline int gpio_export_link(struct device *dev, const char *name,
- unsigned gpio)
-{
- return gpiod_export_link(dev, name, gpio_to_desc(gpio));
-}
-
-static inline void gpio_unexport(unsigned gpio)
-{
- gpiod_unexport(gpio_to_desc(gpio));
-}
-
-#else /* !CONFIG_GPIOLIB */
-
-#include <linux/kernel.h>
-
-static inline bool gpio_is_valid(int number)
-{
- /* only non-negative numbers are valid */
- return number >= 0;
-}
-
-/* platforms that don't directly support access to GPIOs through I2C, SPI,
- * or other blocking infrastructure can use these wrappers.
- */
-
-static inline int gpio_cansleep(unsigned gpio)
-{
- return 0;
-}
-
-static inline int gpio_get_value_cansleep(unsigned gpio)
-{
- might_sleep();
- return __gpio_get_value(gpio);
-}
-
-static inline void gpio_set_value_cansleep(unsigned gpio, int value)
-{
- might_sleep();
- __gpio_set_value(gpio, value);
-}
-
-#endif /* !CONFIG_GPIOLIB */
-
-#endif /* _ASM_GENERIC_GPIO_H */
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h
index 8e1e6244a89d..e1a2e1b7c8e7 100644
--- a/include/asm-generic/hugetlb.h
+++ b/include/asm-generic/hugetlb.h
@@ -2,10 +2,8 @@
#ifndef _ASM_GENERIC_HUGETLB_H
#define _ASM_GENERIC_HUGETLB_H
-static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
-{
- return mk_pte(page, pgprot);
-}
+#include <linux/swap.h>
+#include <linux/swapops.h>
static inline unsigned long huge_pte_write(pte_t pte)
{
@@ -19,8 +17,15 @@ static inline unsigned long huge_pte_dirty(pte_t pte)
static inline pte_t huge_pte_mkwrite(pte_t pte)
{
- return pte_mkwrite(pte);
+ return pte_mkwrite_novma(pte);
+}
+
+#ifndef __HAVE_ARCH_HUGE_PTE_WRPROTECT
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+ return pte_wrprotect(pte);
}
+#endif
static inline pte_t huge_pte_mkdirty(pte_t pte)
{
@@ -32,26 +37,38 @@ static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
return pte_modify(pte, newprot);
}
-#ifndef __HAVE_ARCH_HUGE_PTE_CLEAR
-static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, unsigned long sz)
+#ifndef __HAVE_ARCH_HUGE_PTE_MKUFFD_WP
+static inline pte_t huge_pte_mkuffd_wp(pte_t pte)
{
- pte_clear(mm, addr, ptep);
+ return huge_pte_wrprotect(pte_mkuffd_wp(pte));
+}
+#endif
+
+#ifndef __HAVE_ARCH_HUGE_PTE_CLEAR_UFFD_WP
+static inline pte_t huge_pte_clear_uffd_wp(pte_t pte)
+{
+ return pte_clear_uffd_wp(pte);
}
#endif
-#ifndef __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE
-static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
- unsigned long addr, unsigned long end,
- unsigned long floor, unsigned long ceiling)
+#ifndef __HAVE_ARCH_HUGE_PTE_UFFD_WP
+static inline int huge_pte_uffd_wp(pte_t pte)
{
- free_pgd_range(tlb, addr, end, floor, ceiling);
+ return pte_uffd_wp(pte);
+}
+#endif
+
+#ifndef __HAVE_ARCH_HUGE_PTE_CLEAR
+static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned long sz)
+{
+ pte_clear(mm, addr, ptep);
}
#endif
#ifndef __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
+ pte_t *ptep, pte_t pte, unsigned long sz)
{
set_pte_at(mm, addr, ptep, pte);
}
@@ -59,17 +76,17 @@ static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
#ifndef __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
+ unsigned long addr, pte_t *ptep, unsigned long sz)
{
return ptep_get_and_clear(mm, addr, ptep);
}
#endif
#ifndef __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
-static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
- ptep_clear_flush(vma, addr, ptep);
+ return ptep_clear_flush(vma, addr, ptep);
}
#endif
@@ -80,28 +97,6 @@ static inline int huge_pte_none(pte_t pte)
}
#endif
-#ifndef __HAVE_ARCH_HUGE_PTE_WRPROTECT
-static inline pte_t huge_pte_wrprotect(pte_t pte)
-{
- return pte_wrprotect(pte);
-}
-#endif
-
-#ifndef __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
-static inline int prepare_hugepage_range(struct file *file,
- unsigned long addr, unsigned long len)
-{
- struct hstate *h = hstate_file(file);
-
- if (len & ~huge_page_mask(h))
- return -EINVAL;
- if (addr & ~huge_page_mask(h))
- return -EINVAL;
-
- return 0;
-}
-#endif
-
#ifndef __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
@@ -120,7 +115,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
#endif
#ifndef __HAVE_ARCH_HUGE_PTEP_GET
-static inline pte_t huge_ptep_get(pte_t *ptep)
+static inline pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
return ptep_get(ptep);
}
diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h
deleted file mode 100644
index 515c3fb06ab3..000000000000
--- a/include/asm-generic/hyperv-tlfs.h
+++ /dev/null
@@ -1,808 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-/*
- * This file contains definitions from Hyper-V Hypervisor Top-Level Functional
- * Specification (TLFS):
- * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
- */
-
-#ifndef _ASM_GENERIC_HYPERV_TLFS_H
-#define _ASM_GENERIC_HYPERV_TLFS_H
-
-#include <linux/types.h>
-#include <linux/bits.h>
-#include <linux/time64.h>
-
-/*
- * While not explicitly listed in the TLFS, Hyper-V always runs with a page size
- * of 4096. These definitions are used when communicating with Hyper-V using
- * guest physical pages and guest physical page addresses, since the guest page
- * size may not be 4096 on all architectures.
- */
-#define HV_HYP_PAGE_SHIFT 12
-#define HV_HYP_PAGE_SIZE BIT(HV_HYP_PAGE_SHIFT)
-#define HV_HYP_PAGE_MASK (~(HV_HYP_PAGE_SIZE - 1))
-
-/*
- * Hyper-V provides two categories of flags relevant to guest VMs. The
- * "Features" category indicates specific functionality that is available
- * to guests on this particular instance of Hyper-V. The "Features"
- * are presented in four groups, each of which is 32 bits. The group A
- * and B definitions are common across architectures and are listed here.
- * However, not all flags are relevant on all architectures.
- *
- * Groups C and D vary across architectures and are listed in the
- * architecture specific portion of hyperv-tlfs.h. Some of these flags exist
- * on multiple architectures, but the bit positions are different so they
- * cannot appear in the generic portion of hyperv-tlfs.h.
- *
- * The "Enlightenments" category provides recommendations on whether to use
- * specific enlightenments that are available. The Enlighenments are a single
- * group of 32 bits, but they vary across architectures and are listed in
- * the architecture specific portion of hyperv-tlfs.h.
- */
-
-/*
- * Group A Features.
- */
-
-/* VP Runtime register available */
-#define HV_MSR_VP_RUNTIME_AVAILABLE BIT(0)
-/* Partition Reference Counter available*/
-#define HV_MSR_TIME_REF_COUNT_AVAILABLE BIT(1)
-/* Basic SynIC register available */
-#define HV_MSR_SYNIC_AVAILABLE BIT(2)
-/* Synthetic Timer registers available */
-#define HV_MSR_SYNTIMER_AVAILABLE BIT(3)
-/* Virtual APIC assist and VP assist page registers available */
-#define HV_MSR_APIC_ACCESS_AVAILABLE BIT(4)
-/* Hypercall and Guest OS ID registers available*/
-#define HV_MSR_HYPERCALL_AVAILABLE BIT(5)
-/* Access virtual processor index register available*/
-#define HV_MSR_VP_INDEX_AVAILABLE BIT(6)
-/* Virtual system reset register available*/
-#define HV_MSR_RESET_AVAILABLE BIT(7)
-/* Access statistics page registers available */
-#define HV_MSR_STAT_PAGES_AVAILABLE BIT(8)
-/* Partition reference TSC register is available */
-#define HV_MSR_REFERENCE_TSC_AVAILABLE BIT(9)
-/* Partition Guest IDLE register is available */
-#define HV_MSR_GUEST_IDLE_AVAILABLE BIT(10)
-/* Partition local APIC and TSC frequency registers available */
-#define HV_ACCESS_FREQUENCY_MSRS BIT(11)
-/* AccessReenlightenmentControls privilege */
-#define HV_ACCESS_REENLIGHTENMENT BIT(13)
-/* AccessTscInvariantControls privilege */
-#define HV_ACCESS_TSC_INVARIANT BIT(15)
-
-/*
- * Group B features.
- */
-#define HV_CREATE_PARTITIONS BIT(0)
-#define HV_ACCESS_PARTITION_ID BIT(1)
-#define HV_ACCESS_MEMORY_POOL BIT(2)
-#define HV_ADJUST_MESSAGE_BUFFERS BIT(3)
-#define HV_POST_MESSAGES BIT(4)
-#define HV_SIGNAL_EVENTS BIT(5)
-#define HV_CREATE_PORT BIT(6)
-#define HV_CONNECT_PORT BIT(7)
-#define HV_ACCESS_STATS BIT(8)
-#define HV_DEBUGGING BIT(11)
-#define HV_CPU_MANAGEMENT BIT(12)
-#define HV_ENABLE_EXTENDED_HYPERCALLS BIT(20)
-#define HV_ISOLATION BIT(22)
-
-/*
- * TSC page layout.
- */
-struct ms_hyperv_tsc_page {
- volatile u32 tsc_sequence;
- u32 reserved1;
- volatile u64 tsc_scale;
- volatile s64 tsc_offset;
-} __packed;
-
-/*
- * The guest OS needs to register the guest ID with the hypervisor.
- * The guest ID is a 64 bit entity and the structure of this ID is
- * specified in the Hyper-V specification:
- *
- * msdn.microsoft.com/en-us/library/windows/hardware/ff542653%28v=vs.85%29.aspx
- *
- * While the current guideline does not specify how Linux guest ID(s)
- * need to be generated, our plan is to publish the guidelines for
- * Linux and other guest operating systems that currently are hosted
- * on Hyper-V. The implementation here conforms to this yet
- * unpublished guidelines.
- *
- *
- * Bit(s)
- * 63 - Indicates if the OS is Open Source or not; 1 is Open Source
- * 62:56 - Os Type; Linux is 0x100
- * 55:48 - Distro specific identification
- * 47:16 - Linux kernel version number
- * 15:0 - Distro specific identification
- *
- *
- */
-
-#define HV_LINUX_VENDOR_ID 0x8100
-
-/*
- * Crash notification flags.
- */
-#define HV_CRASH_CTL_CRASH_NOTIFY_MSG BIT_ULL(62)
-#define HV_CRASH_CTL_CRASH_NOTIFY BIT_ULL(63)
-
-/* Declare the various hypercall operations. */
-#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002
-#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003
-#define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008
-#define HVCALL_SEND_IPI 0x000b
-#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013
-#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014
-#define HVCALL_SEND_IPI_EX 0x0015
-#define HVCALL_GET_PARTITION_ID 0x0046
-#define HVCALL_DEPOSIT_MEMORY 0x0048
-#define HVCALL_CREATE_VP 0x004e
-#define HVCALL_GET_VP_REGISTERS 0x0050
-#define HVCALL_SET_VP_REGISTERS 0x0051
-#define HVCALL_POST_MESSAGE 0x005c
-#define HVCALL_SIGNAL_EVENT 0x005d
-#define HVCALL_POST_DEBUG_DATA 0x0069
-#define HVCALL_RETRIEVE_DEBUG_DATA 0x006a
-#define HVCALL_RESET_DEBUG_SESSION 0x006b
-#define HVCALL_ADD_LOGICAL_PROCESSOR 0x0076
-#define HVCALL_MAP_DEVICE_INTERRUPT 0x007c
-#define HVCALL_UNMAP_DEVICE_INTERRUPT 0x007d
-#define HVCALL_RETARGET_INTERRUPT 0x007e
-#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af
-#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0
-
-/* Extended hypercalls */
-#define HV_EXT_CALL_QUERY_CAPABILITIES 0x8001
-#define HV_EXT_CALL_MEMORY_HEAT_HINT 0x8003
-
-#define HV_FLUSH_ALL_PROCESSORS BIT(0)
-#define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES BIT(1)
-#define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY BIT(2)
-#define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT BIT(3)
-
-/* Extended capability bits */
-#define HV_EXT_CAPABILITY_MEMORY_COLD_DISCARD_HINT BIT(8)
-
-enum HV_GENERIC_SET_FORMAT {
- HV_GENERIC_SET_SPARSE_4K,
- HV_GENERIC_SET_ALL,
-};
-
-#define HV_PARTITION_ID_SELF ((u64)-1)
-#define HV_VP_INDEX_SELF ((u32)-2)
-
-#define HV_HYPERCALL_RESULT_MASK GENMASK_ULL(15, 0)
-#define HV_HYPERCALL_FAST_BIT BIT(16)
-#define HV_HYPERCALL_VARHEAD_OFFSET 17
-#define HV_HYPERCALL_REP_COMP_OFFSET 32
-#define HV_HYPERCALL_REP_COMP_1 BIT_ULL(32)
-#define HV_HYPERCALL_REP_COMP_MASK GENMASK_ULL(43, 32)
-#define HV_HYPERCALL_REP_START_OFFSET 48
-#define HV_HYPERCALL_REP_START_MASK GENMASK_ULL(59, 48)
-
-/* hypercall status code */
-#define HV_STATUS_SUCCESS 0
-#define HV_STATUS_INVALID_HYPERCALL_CODE 2
-#define HV_STATUS_INVALID_HYPERCALL_INPUT 3
-#define HV_STATUS_INVALID_ALIGNMENT 4
-#define HV_STATUS_INVALID_PARAMETER 5
-#define HV_STATUS_OPERATION_DENIED 8
-#define HV_STATUS_INSUFFICIENT_MEMORY 11
-#define HV_STATUS_INVALID_PORT_ID 17
-#define HV_STATUS_INVALID_CONNECTION_ID 18
-#define HV_STATUS_INSUFFICIENT_BUFFERS 19
-
-/*
- * The Hyper-V TimeRefCount register and the TSC
- * page provide a guest VM clock with 100ns tick rate
- */
-#define HV_CLOCK_HZ (NSEC_PER_SEC/100)
-
-/* Define the number of synthetic interrupt sources. */
-#define HV_SYNIC_SINT_COUNT (16)
-/* Define the expected SynIC version. */
-#define HV_SYNIC_VERSION_1 (0x1)
-/* Valid SynIC vectors are 16-255. */
-#define HV_SYNIC_FIRST_VALID_VECTOR (16)
-
-#define HV_SYNIC_CONTROL_ENABLE (1ULL << 0)
-#define HV_SYNIC_SIMP_ENABLE (1ULL << 0)
-#define HV_SYNIC_SIEFP_ENABLE (1ULL << 0)
-#define HV_SYNIC_SINT_MASKED (1ULL << 16)
-#define HV_SYNIC_SINT_AUTO_EOI (1ULL << 17)
-#define HV_SYNIC_SINT_VECTOR_MASK (0xFF)
-
-#define HV_SYNIC_STIMER_COUNT (4)
-
-/* Define synthetic interrupt controller message constants. */
-#define HV_MESSAGE_SIZE (256)
-#define HV_MESSAGE_PAYLOAD_BYTE_COUNT (240)
-#define HV_MESSAGE_PAYLOAD_QWORD_COUNT (30)
-
-/*
- * Define hypervisor message types. Some of the message types
- * are x86/x64 specific, but there's no good way to separate
- * them out into the arch-specific version of hyperv-tlfs.h
- * because C doesn't provide a way to extend enum types.
- * Keeping them all in the arch neutral hyperv-tlfs.h seems
- * the least messy compromise.
- */
-enum hv_message_type {
- HVMSG_NONE = 0x00000000,
-
- /* Memory access messages. */
- HVMSG_UNMAPPED_GPA = 0x80000000,
- HVMSG_GPA_INTERCEPT = 0x80000001,
-
- /* Timer notification messages. */
- HVMSG_TIMER_EXPIRED = 0x80000010,
-
- /* Error messages. */
- HVMSG_INVALID_VP_REGISTER_VALUE = 0x80000020,
- HVMSG_UNRECOVERABLE_EXCEPTION = 0x80000021,
- HVMSG_UNSUPPORTED_FEATURE = 0x80000022,
-
- /* Trace buffer complete messages. */
- HVMSG_EVENTLOG_BUFFERCOMPLETE = 0x80000040,
-
- /* Platform-specific processor intercept messages. */
- HVMSG_X64_IOPORT_INTERCEPT = 0x80010000,
- HVMSG_X64_MSR_INTERCEPT = 0x80010001,
- HVMSG_X64_CPUID_INTERCEPT = 0x80010002,
- HVMSG_X64_EXCEPTION_INTERCEPT = 0x80010003,
- HVMSG_X64_APIC_EOI = 0x80010004,
- HVMSG_X64_LEGACY_FP_ERROR = 0x80010005
-};
-
-/* Define synthetic interrupt controller message flags. */
-union hv_message_flags {
- __u8 asu8;
- struct {
- __u8 msg_pending:1;
- __u8 reserved:7;
- } __packed;
-};
-
-/* Define port identifier type. */
-union hv_port_id {
- __u32 asu32;
- struct {
- __u32 id:24;
- __u32 reserved:8;
- } __packed u;
-};
-
-/* Define synthetic interrupt controller message header. */
-struct hv_message_header {
- __u32 message_type;
- __u8 payload_size;
- union hv_message_flags message_flags;
- __u8 reserved[2];
- union {
- __u64 sender;
- union hv_port_id port;
- };
-} __packed;
-
-/* Define synthetic interrupt controller message format. */
-struct hv_message {
- struct hv_message_header header;
- union {
- __u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
- } u;
-} __packed;
-
-/* Define the synthetic interrupt message page layout. */
-struct hv_message_page {
- struct hv_message sint_message[HV_SYNIC_SINT_COUNT];
-} __packed;
-
-/* Define timer message payload structure. */
-struct hv_timer_message_payload {
- __u32 timer_index;
- __u32 reserved;
- __u64 expiration_time; /* When the timer expired */
- __u64 delivery_time; /* When the message was delivered */
-} __packed;
-
-
-/* Define synthetic interrupt controller flag constants. */
-#define HV_EVENT_FLAGS_COUNT (256 * 8)
-#define HV_EVENT_FLAGS_LONG_COUNT (256 / sizeof(unsigned long))
-
-/*
- * Synthetic timer configuration.
- */
-union hv_stimer_config {
- u64 as_uint64;
- struct {
- u64 enable:1;
- u64 periodic:1;
- u64 lazy:1;
- u64 auto_enable:1;
- u64 apic_vector:8;
- u64 direct_mode:1;
- u64 reserved_z0:3;
- u64 sintx:4;
- u64 reserved_z1:44;
- } __packed;
-};
-
-
-/* Define the synthetic interrupt controller event flags format. */
-union hv_synic_event_flags {
- unsigned long flags[HV_EVENT_FLAGS_LONG_COUNT];
-};
-
-/* Define SynIC control register. */
-union hv_synic_scontrol {
- u64 as_uint64;
- struct {
- u64 enable:1;
- u64 reserved:63;
- } __packed;
-};
-
-/* Define synthetic interrupt source. */
-union hv_synic_sint {
- u64 as_uint64;
- struct {
- u64 vector:8;
- u64 reserved1:8;
- u64 masked:1;
- u64 auto_eoi:1;
- u64 polling:1;
- u64 reserved2:45;
- } __packed;
-};
-
-/* Define the format of the SIMP register */
-union hv_synic_simp {
- u64 as_uint64;
- struct {
- u64 simp_enabled:1;
- u64 preserved:11;
- u64 base_simp_gpa:52;
- } __packed;
-};
-
-/* Define the format of the SIEFP register */
-union hv_synic_siefp {
- u64 as_uint64;
- struct {
- u64 siefp_enabled:1;
- u64 preserved:11;
- u64 base_siefp_gpa:52;
- } __packed;
-};
-
-struct hv_vpset {
- u64 format;
- u64 valid_bank_mask;
- u64 bank_contents[];
-} __packed;
-
-/* HvCallSendSyntheticClusterIpi hypercall */
-struct hv_send_ipi {
- u32 vector;
- u32 reserved;
- u64 cpu_mask;
-} __packed;
-
-/* HvCallSendSyntheticClusterIpiEx hypercall */
-struct hv_send_ipi_ex {
- u32 vector;
- u32 reserved;
- struct hv_vpset vp_set;
-} __packed;
-
-/* HvFlushGuestPhysicalAddressSpace hypercalls */
-struct hv_guest_mapping_flush {
- u64 address_space;
- u64 flags;
-} __packed;
-
-/*
- * HV_MAX_FLUSH_PAGES = "additional_pages" + 1. It's limited
- * by the bitwidth of "additional_pages" in union hv_gpa_page_range.
- */
-#define HV_MAX_FLUSH_PAGES (2048)
-#define HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB 0
-#define HV_GPA_PAGE_RANGE_PAGE_SIZE_1GB 1
-
-/* HvFlushGuestPhysicalAddressList, HvExtCallMemoryHeatHint hypercall */
-union hv_gpa_page_range {
- u64 address_space;
- struct {
- u64 additional_pages:11;
- u64 largepage:1;
- u64 basepfn:52;
- } page;
- struct {
- u64 reserved:12;
- u64 page_size:1;
- u64 reserved1:8;
- u64 base_large_pfn:43;
- };
-};
-
-/*
- * All input flush parameters should be in single page. The max flush
- * count is equal with how many entries of union hv_gpa_page_range can
- * be populated into the input parameter page.
- */
-#define HV_MAX_FLUSH_REP_COUNT ((HV_HYP_PAGE_SIZE - 2 * sizeof(u64)) / \
- sizeof(union hv_gpa_page_range))
-
-struct hv_guest_mapping_flush_list {
- u64 address_space;
- u64 flags;
- union hv_gpa_page_range gpa_list[HV_MAX_FLUSH_REP_COUNT];
-};
-
-/* HvFlushVirtualAddressSpace, HvFlushVirtualAddressList hypercalls */
-struct hv_tlb_flush {
- u64 address_space;
- u64 flags;
- u64 processor_mask;
- u64 gva_list[];
-} __packed;
-
-/* HvFlushVirtualAddressSpaceEx, HvFlushVirtualAddressListEx hypercalls */
-struct hv_tlb_flush_ex {
- u64 address_space;
- u64 flags;
- struct hv_vpset hv_vp_set;
- u64 gva_list[];
-} __packed;
-
-/* HvGetPartitionId hypercall (output only) */
-struct hv_get_partition_id {
- u64 partition_id;
-} __packed;
-
-/* HvDepositMemory hypercall */
-struct hv_deposit_memory {
- u64 partition_id;
- u64 gpa_page_list[];
-} __packed;
-
-struct hv_proximity_domain_flags {
- u32 proximity_preferred : 1;
- u32 reserved : 30;
- u32 proximity_info_valid : 1;
-} __packed;
-
-/* Not a union in windows but useful for zeroing */
-union hv_proximity_domain_info {
- struct {
- u32 domain_id;
- struct hv_proximity_domain_flags flags;
- };
- u64 as_uint64;
-} __packed;
-
-struct hv_lp_startup_status {
- u64 hv_status;
- u64 substatus1;
- u64 substatus2;
- u64 substatus3;
- u64 substatus4;
- u64 substatus5;
- u64 substatus6;
-} __packed;
-
-/* HvAddLogicalProcessor hypercall */
-struct hv_add_logical_processor_in {
- u32 lp_index;
- u32 apic_id;
- union hv_proximity_domain_info proximity_domain_info;
- u64 flags;
-} __packed;
-
-struct hv_add_logical_processor_out {
- struct hv_lp_startup_status startup_status;
-} __packed;
-
-enum HV_SUBNODE_TYPE
-{
- HvSubnodeAny = 0,
- HvSubnodeSocket = 1,
- HvSubnodeAmdNode = 2,
- HvSubnodeL3 = 3,
- HvSubnodeCount = 4,
- HvSubnodeInvalid = -1
-};
-
-/* HvCreateVp hypercall */
-struct hv_create_vp {
- u64 partition_id;
- u32 vp_index;
- u8 padding[3];
- u8 subnode_type;
- u64 subnode_id;
- union hv_proximity_domain_info proximity_domain_info;
- u64 flags;
-} __packed;
-
-enum hv_interrupt_source {
- HV_INTERRUPT_SOURCE_MSI = 1, /* MSI and MSI-X */
- HV_INTERRUPT_SOURCE_IOAPIC,
-};
-
-union hv_msi_address_register {
- u32 as_uint32;
- struct {
- u32 reserved1:2;
- u32 destination_mode:1;
- u32 redirection_hint:1;
- u32 reserved2:8;
- u32 destination_id:8;
- u32 msi_base:12;
- };
-} __packed;
-
-union hv_msi_data_register {
- u32 as_uint32;
- struct {
- u32 vector:8;
- u32 delivery_mode:3;
- u32 reserved1:3;
- u32 level_assert:1;
- u32 trigger_mode:1;
- u32 reserved2:16;
- };
-} __packed;
-
-/* HvRetargetDeviceInterrupt hypercall */
-union hv_msi_entry {
- u64 as_uint64;
- struct {
- union hv_msi_address_register address;
- union hv_msi_data_register data;
- } __packed;
-};
-
-union hv_ioapic_rte {
- u64 as_uint64;
-
- struct {
- u32 vector:8;
- u32 delivery_mode:3;
- u32 destination_mode:1;
- u32 delivery_status:1;
- u32 interrupt_polarity:1;
- u32 remote_irr:1;
- u32 trigger_mode:1;
- u32 interrupt_mask:1;
- u32 reserved1:15;
-
- u32 reserved2:24;
- u32 destination_id:8;
- };
-
- struct {
- u32 low_uint32;
- u32 high_uint32;
- };
-} __packed;
-
-struct hv_interrupt_entry {
- u32 source;
- u32 reserved1;
- union {
- union hv_msi_entry msi_entry;
- union hv_ioapic_rte ioapic_rte;
- };
-} __packed;
-
-/*
- * flags for hv_device_interrupt_target.flags
- */
-#define HV_DEVICE_INTERRUPT_TARGET_MULTICAST 1
-#define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET 2
-
-struct hv_device_interrupt_target {
- u32 vector;
- u32 flags;
- union {
- u64 vp_mask;
- struct hv_vpset vp_set;
- };
-} __packed;
-
-struct hv_retarget_device_interrupt {
- u64 partition_id; /* use "self" */
- u64 device_id;
- struct hv_interrupt_entry int_entry;
- u64 reserved2;
- struct hv_device_interrupt_target int_target;
-} __packed __aligned(8);
-
-
-/* HvGetVpRegisters hypercall input with variable size reg name list*/
-struct hv_get_vp_registers_input {
- struct {
- u64 partitionid;
- u32 vpindex;
- u8 inputvtl;
- u8 padding[3];
- } header;
- struct input {
- u32 name0;
- u32 name1;
- } element[];
-} __packed;
-
-
-/* HvGetVpRegisters returns an array of these output elements */
-struct hv_get_vp_registers_output {
- union {
- struct {
- u32 a;
- u32 b;
- u32 c;
- u32 d;
- } as32 __packed;
- struct {
- u64 low;
- u64 high;
- } as64 __packed;
- };
-};
-
-/* HvSetVpRegisters hypercall with variable size reg name/value list*/
-struct hv_set_vp_registers_input {
- struct {
- u64 partitionid;
- u32 vpindex;
- u8 inputvtl;
- u8 padding[3];
- } header;
- struct {
- u32 name;
- u32 padding1;
- u64 padding2;
- u64 valuelow;
- u64 valuehigh;
- } element[];
-} __packed;
-
-enum hv_device_type {
- HV_DEVICE_TYPE_LOGICAL = 0,
- HV_DEVICE_TYPE_PCI = 1,
- HV_DEVICE_TYPE_IOAPIC = 2,
- HV_DEVICE_TYPE_ACPI = 3,
-};
-
-typedef u16 hv_pci_rid;
-typedef u16 hv_pci_segment;
-typedef u64 hv_logical_device_id;
-union hv_pci_bdf {
- u16 as_uint16;
-
- struct {
- u8 function:3;
- u8 device:5;
- u8 bus;
- };
-} __packed;
-
-union hv_pci_bus_range {
- u16 as_uint16;
-
- struct {
- u8 subordinate_bus;
- u8 secondary_bus;
- };
-} __packed;
-
-union hv_device_id {
- u64 as_uint64;
-
- struct {
- u64 reserved0:62;
- u64 device_type:2;
- };
-
- /* HV_DEVICE_TYPE_LOGICAL */
- struct {
- u64 id:62;
- u64 device_type:2;
- } logical;
-
- /* HV_DEVICE_TYPE_PCI */
- struct {
- union {
- hv_pci_rid rid;
- union hv_pci_bdf bdf;
- };
-
- hv_pci_segment segment;
- union hv_pci_bus_range shadow_bus_range;
-
- u16 phantom_function_bits:2;
- u16 source_shadow:1;
-
- u16 rsvdz0:11;
- u16 device_type:2;
- } pci;
-
- /* HV_DEVICE_TYPE_IOAPIC */
- struct {
- u8 ioapic_id;
- u8 rsvdz0;
- u16 rsvdz1;
- u16 rsvdz2;
-
- u16 rsvdz3:14;
- u16 device_type:2;
- } ioapic;
-
- /* HV_DEVICE_TYPE_ACPI */
- struct {
- u32 input_mapping_base;
- u32 input_mapping_count:30;
- u32 device_type:2;
- } acpi;
-} __packed;
-
-enum hv_interrupt_trigger_mode {
- HV_INTERRUPT_TRIGGER_MODE_EDGE = 0,
- HV_INTERRUPT_TRIGGER_MODE_LEVEL = 1,
-};
-
-struct hv_device_interrupt_descriptor {
- u32 interrupt_type;
- u32 trigger_mode;
- u32 vector_count;
- u32 reserved;
- struct hv_device_interrupt_target target;
-} __packed;
-
-struct hv_input_map_device_interrupt {
- u64 partition_id;
- u64 device_id;
- u64 flags;
- struct hv_interrupt_entry logical_interrupt_entry;
- struct hv_device_interrupt_descriptor interrupt_descriptor;
-} __packed;
-
-struct hv_output_map_device_interrupt {
- struct hv_interrupt_entry interrupt_entry;
-} __packed;
-
-struct hv_input_unmap_device_interrupt {
- u64 partition_id;
- u64 device_id;
- struct hv_interrupt_entry interrupt_entry;
-} __packed;
-
-#define HV_SOURCE_SHADOW_NONE 0x0
-#define HV_SOURCE_SHADOW_BRIDGE_BUS_RANGE 0x1
-
-/*
- * The whole argument should fit in a page to be able to pass to the hypervisor
- * in one hypercall.
- */
-#define HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES \
- ((HV_HYP_PAGE_SIZE - sizeof(struct hv_memory_hint)) / \
- sizeof(union hv_gpa_page_range))
-
-/* HvExtCallMemoryHeatHint hypercall */
-#define HV_EXT_MEMORY_HEAT_HINT_TYPE_COLD_DISCARD 2
-struct hv_memory_hint {
- u64 type:2;
- u64 reserved:62;
- union hv_gpa_page_range ranges[];
-} __packed;
-
-#endif
diff --git a/include/asm-generic/ide_iops.h b/include/asm-generic/ide_iops.h
deleted file mode 100644
index 81dfa3ee5e06..000000000000
--- a/include/asm-generic/ide_iops.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Generic I/O and MEMIO string operations. */
-
-#define __ide_insw insw
-#define __ide_insl insl
-#define __ide_outsw outsw
-#define __ide_outsl outsl
-
-static __inline__ void __ide_mm_insw(void __iomem *port, void *addr, u32 count)
-{
- while (count--) {
- *(u16 *)addr = readw(port);
- addr += 2;
- }
-}
-
-static __inline__ void __ide_mm_insl(void __iomem *port, void *addr, u32 count)
-{
- while (count--) {
- *(u32 *)addr = readl(port);
- addr += 4;
- }
-}
-
-static __inline__ void __ide_mm_outsw(void __iomem *port, void *addr, u32 count)
-{
- while (count--) {
- writew(*(u16 *)addr, port);
- addr += 2;
- }
-}
-
-static __inline__ void __ide_mm_outsl(void __iomem * port, void *addr, u32 count)
-{
- while (count--) {
- writel(*(u32 *)addr, port);
- addr += 4;
- }
-}
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index e93375c710b9..ca5a1ce6f0f8 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -9,7 +9,9 @@
#include <asm/page.h> /* I/O is all done through memory accesses */
#include <linux/string.h> /* for memset() and memcpy() */
+#include <linux/sizes.h>
#include <linux/types.h>
+#include <linux/instruction_pointer.h>
#ifdef CONFIG_GENERIC_IOMAP
#include <asm-generic/iomap.h>
@@ -61,6 +63,46 @@
#define __io_par(v) __io_ar(v)
#endif
+/*
+ * "__DISABLE_TRACE_MMIO__" flag can be used to disable MMIO tracing for
+ * specific kernel drivers in case of excessive/unwanted logging.
+ *
+ * Usage: Add a #define flag at the beginning of the driver file.
+ * Ex: #define __DISABLE_TRACE_MMIO__
+ * #include <...>
+ * ...
+ */
+#if IS_ENABLED(CONFIG_TRACE_MMIO_ACCESS) && !(defined(__DISABLE_TRACE_MMIO__))
+#include <linux/tracepoint-defs.h>
+
+#define rwmmio_tracepoint_enabled(tracepoint) tracepoint_enabled(tracepoint)
+DECLARE_TRACEPOINT(rwmmio_write);
+DECLARE_TRACEPOINT(rwmmio_post_write);
+DECLARE_TRACEPOINT(rwmmio_read);
+DECLARE_TRACEPOINT(rwmmio_post_read);
+
+void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0);
+void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0);
+void log_read_mmio(u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0);
+void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0);
+
+#else
+
+#define rwmmio_tracepoint_enabled(tracepoint) false
+static inline void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0) {}
+static inline void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0) {}
+static inline void log_read_mmio(u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0) {}
+static inline void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
+ unsigned long caller_addr, unsigned long caller_addr0) {}
+
+#endif /* CONFIG_TRACE_MMIO_ACCESS */
/*
* __raw_{read,write}{b,w,l,q}() access memory in native endianness.
@@ -149,9 +191,13 @@ static inline u8 readb(const volatile void __iomem *addr)
{
u8 val;
+ if (rwmmio_tracepoint_enabled(rwmmio_read))
+ log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
__io_br();
val = __raw_readb(addr);
__io_ar(val);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_read))
+ log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
return val;
}
#endif
@@ -162,9 +208,13 @@ static inline u16 readw(const volatile void __iomem *addr)
{
u16 val;
+ if (rwmmio_tracepoint_enabled(rwmmio_read))
+ log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
__io_br();
val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
__io_ar(val);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_read))
+ log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
return val;
}
#endif
@@ -175,9 +225,13 @@ static inline u32 readl(const volatile void __iomem *addr)
{
u32 val;
+ if (rwmmio_tracepoint_enabled(rwmmio_read))
+ log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
__io_br();
val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
__io_ar(val);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_read))
+ log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
return val;
}
#endif
@@ -189,9 +243,13 @@ static inline u64 readq(const volatile void __iomem *addr)
{
u64 val;
+ if (rwmmio_tracepoint_enabled(rwmmio_read))
+ log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
__io_br();
- val = __le64_to_cpu(__raw_readq(addr));
+ val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
__io_ar(val);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_read))
+ log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
return val;
}
#endif
@@ -201,9 +259,13 @@ static inline u64 readq(const volatile void __iomem *addr)
#define writeb writeb
static inline void writeb(u8 value, volatile void __iomem *addr)
{
+ if (rwmmio_tracepoint_enabled(rwmmio_write))
+ log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
__io_bw();
__raw_writeb(value, addr);
__io_aw();
+ if (rwmmio_tracepoint_enabled(rwmmio_post_write))
+ log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
}
#endif
@@ -211,9 +273,13 @@ static inline void writeb(u8 value, volatile void __iomem *addr)
#define writew writew
static inline void writew(u16 value, volatile void __iomem *addr)
{
+ if (rwmmio_tracepoint_enabled(rwmmio_write))
+ log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
__io_bw();
__raw_writew((u16 __force)cpu_to_le16(value), addr);
__io_aw();
+ if (rwmmio_tracepoint_enabled(rwmmio_post_write))
+ log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
}
#endif
@@ -221,9 +287,13 @@ static inline void writew(u16 value, volatile void __iomem *addr)
#define writel writel
static inline void writel(u32 value, volatile void __iomem *addr)
{
+ if (rwmmio_tracepoint_enabled(rwmmio_write))
+ log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
__io_bw();
__raw_writel((u32 __force)__cpu_to_le32(value), addr);
__io_aw();
+ if (rwmmio_tracepoint_enabled(rwmmio_post_write))
+ log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
}
#endif
@@ -232,9 +302,13 @@ static inline void writel(u32 value, volatile void __iomem *addr)
#define writeq writeq
static inline void writeq(u64 value, volatile void __iomem *addr)
{
+ if (rwmmio_tracepoint_enabled(rwmmio_write))
+ log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
__io_bw();
- __raw_writeq(__cpu_to_le64(value), addr);
+ __raw_writeq((u64 __force)__cpu_to_le64(value), addr);
__io_aw();
+ if (rwmmio_tracepoint_enabled(rwmmio_post_write))
+ log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
}
#endif
#endif /* CONFIG_64BIT */
@@ -248,7 +322,14 @@ static inline void writeq(u64 value, volatile void __iomem *addr)
#define readb_relaxed readb_relaxed
static inline u8 readb_relaxed(const volatile void __iomem *addr)
{
- return __raw_readb(addr);
+ u8 val;
+
+ if (rwmmio_tracepoint_enabled(rwmmio_read))
+ log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
+ val = __raw_readb(addr);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_read))
+ log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
+ return val;
}
#endif
@@ -256,7 +337,14 @@ static inline u8 readb_relaxed(const volatile void __iomem *addr)
#define readw_relaxed readw_relaxed
static inline u16 readw_relaxed(const volatile void __iomem *addr)
{
- return __le16_to_cpu(__raw_readw(addr));
+ u16 val;
+
+ if (rwmmio_tracepoint_enabled(rwmmio_read))
+ log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
+ val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
+ if (rwmmio_tracepoint_enabled(rwmmio_post_read))
+ log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
+ return val;
}
#endif
@@ -264,7 +352,14 @@ static inline u16 readw_relaxed(const volatile void __iomem *addr)
#define readl_relaxed readl_relaxed
static inline u32 readl_relaxed(const volatile void __iomem *addr)
{
- return __le32_to_cpu(__raw_readl(addr));
+ u32 val;
+
+ if (rwmmio_tracepoint_enabled(rwmmio_read))
+ log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
+ val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
+ if (rwmmio_tracepoint_enabled(rwmmio_post_read))
+ log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
+ return val;
}
#endif
@@ -272,7 +367,14 @@ static inline u32 readl_relaxed(const volatile void __iomem *addr)
#define readq_relaxed readq_relaxed
static inline u64 readq_relaxed(const volatile void __iomem *addr)
{
- return __le64_to_cpu(__raw_readq(addr));
+ u64 val;
+
+ if (rwmmio_tracepoint_enabled(rwmmio_read))
+ log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
+ val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
+ if (rwmmio_tracepoint_enabled(rwmmio_post_read))
+ log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
+ return val;
}
#endif
@@ -280,7 +382,11 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr)
#define writeb_relaxed writeb_relaxed
static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
{
+ if (rwmmio_tracepoint_enabled(rwmmio_write))
+ log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
__raw_writeb(value, addr);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_write))
+ log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
}
#endif
@@ -288,7 +394,11 @@ static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
#define writew_relaxed writew_relaxed
static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
{
- __raw_writew(cpu_to_le16(value), addr);
+ if (rwmmio_tracepoint_enabled(rwmmio_write))
+ log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+ __raw_writew((u16 __force)cpu_to_le16(value), addr);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_write))
+ log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
}
#endif
@@ -296,7 +406,11 @@ static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
#define writel_relaxed writel_relaxed
static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
{
- __raw_writel(__cpu_to_le32(value), addr);
+ if (rwmmio_tracepoint_enabled(rwmmio_write))
+ log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+ __raw_writel((u32 __force)__cpu_to_le32(value), addr);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_write))
+ log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
}
#endif
@@ -304,7 +418,11 @@ static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
#define writeq_relaxed writeq_relaxed
static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
{
- __raw_writeq(__cpu_to_le64(value), addr);
+ if (rwmmio_tracepoint_enabled(rwmmio_write))
+ log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ __raw_writeq((u64 __force)__cpu_to_le64(value), addr);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_write))
+ log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
}
#endif
@@ -456,6 +574,7 @@ static inline void writesq(volatile void __iomem *addr, const void *buffer,
#if !defined(inb) && !defined(_inb)
#define _inb _inb
+#ifdef CONFIG_HAS_IOPORT
static inline u8 _inb(unsigned long addr)
{
u8 val;
@@ -465,10 +584,15 @@ static inline u8 _inb(unsigned long addr)
__io_par(val);
return val;
}
+#else
+u8 _inb(unsigned long addr)
+ __compiletime_error("inb()) requires CONFIG_HAS_IOPORT");
+#endif
#endif
#if !defined(inw) && !defined(_inw)
#define _inw _inw
+#ifdef CONFIG_HAS_IOPORT
static inline u16 _inw(unsigned long addr)
{
u16 val;
@@ -478,10 +602,15 @@ static inline u16 _inw(unsigned long addr)
__io_par(val);
return val;
}
+#else
+u16 _inw(unsigned long addr)
+ __compiletime_error("inw() requires CONFIG_HAS_IOPORT");
+#endif
#endif
#if !defined(inl) && !defined(_inl)
#define _inl _inl
+#ifdef CONFIG_HAS_IOPORT
static inline u32 _inl(unsigned long addr)
{
u32 val;
@@ -491,36 +620,55 @@ static inline u32 _inl(unsigned long addr)
__io_par(val);
return val;
}
+#else
+u32 _inl(unsigned long addr)
+ __compiletime_error("inl() requires CONFIG_HAS_IOPORT");
+#endif
#endif
#if !defined(outb) && !defined(_outb)
#define _outb _outb
+#ifdef CONFIG_HAS_IOPORT
static inline void _outb(u8 value, unsigned long addr)
{
__io_pbw();
__raw_writeb(value, PCI_IOBASE + addr);
__io_paw();
}
+#else
+void _outb(u8 value, unsigned long addr)
+ __compiletime_error("outb() requires CONFIG_HAS_IOPORT");
+#endif
#endif
#if !defined(outw) && !defined(_outw)
#define _outw _outw
+#ifdef CONFIG_HAS_IOPORT
static inline void _outw(u16 value, unsigned long addr)
{
__io_pbw();
__raw_writew((u16 __force)cpu_to_le16(value), PCI_IOBASE + addr);
__io_paw();
}
+#else
+void _outw(u16 value, unsigned long addr)
+ __compiletime_error("outw() requires CONFIG_HAS_IOPORT");
+#endif
#endif
#if !defined(outl) && !defined(_outl)
#define _outl _outl
+#ifdef CONFIG_HAS_IOPORT
static inline void _outl(u32 value, unsigned long addr)
{
__io_pbw();
__raw_writel((u32 __force)cpu_to_le32(value), PCI_IOBASE + addr);
__io_paw();
}
+#else
+void _outl(u32 value, unsigned long addr)
+ __compiletime_error("outl() requires CONFIG_HAS_IOPORT");
+#endif
#endif
#include <linux/logic_pio.h>
@@ -604,53 +752,83 @@ static inline void outl_p(u32 value, unsigned long addr)
#ifndef insb
#define insb insb
+#ifdef CONFIG_HAS_IOPORT
static inline void insb(unsigned long addr, void *buffer, unsigned int count)
{
readsb(PCI_IOBASE + addr, buffer, count);
}
+#else
+void insb(unsigned long addr, void *buffer, unsigned int count)
+ __compiletime_error("insb() requires HAS_IOPORT");
+#endif
#endif
#ifndef insw
#define insw insw
+#ifdef CONFIG_HAS_IOPORT
static inline void insw(unsigned long addr, void *buffer, unsigned int count)
{
readsw(PCI_IOBASE + addr, buffer, count);
}
+#else
+void insw(unsigned long addr, void *buffer, unsigned int count)
+ __compiletime_error("insw() requires HAS_IOPORT");
+#endif
#endif
#ifndef insl
#define insl insl
+#ifdef CONFIG_HAS_IOPORT
static inline void insl(unsigned long addr, void *buffer, unsigned int count)
{
readsl(PCI_IOBASE + addr, buffer, count);
}
+#else
+void insl(unsigned long addr, void *buffer, unsigned int count)
+ __compiletime_error("insl() requires HAS_IOPORT");
+#endif
#endif
#ifndef outsb
#define outsb outsb
+#ifdef CONFIG_HAS_IOPORT
static inline void outsb(unsigned long addr, const void *buffer,
unsigned int count)
{
writesb(PCI_IOBASE + addr, buffer, count);
}
+#else
+void outsb(unsigned long addr, const void *buffer, unsigned int count)
+ __compiletime_error("outsb() requires HAS_IOPORT");
+#endif
#endif
#ifndef outsw
#define outsw outsw
+#ifdef CONFIG_HAS_IOPORT
static inline void outsw(unsigned long addr, const void *buffer,
unsigned int count)
{
writesw(PCI_IOBASE + addr, buffer, count);
}
+#else
+void outsw(unsigned long addr, const void *buffer, unsigned int count)
+ __compiletime_error("outsw() requires HAS_IOPORT");
+#endif
#endif
#ifndef outsl
#define outsl outsl
+#ifdef CONFIG_HAS_IOPORT
static inline void outsl(unsigned long addr, const void *buffer,
unsigned int count)
{
writesl(PCI_IOBASE + addr, buffer, count);
}
+#else
+void outsl(unsigned long addr, const void *buffer, unsigned int count)
+ __compiletime_error("outsl() requires HAS_IOPORT");
+#endif
#endif
#ifndef insb_p
@@ -908,7 +1086,6 @@ static inline void iowrite64_rep(volatile void __iomem *addr,
#ifdef __KERNEL__
-#include <linux/vmalloc.h>
#define __io_virt(x) ((void __force *)(x))
/*
@@ -957,21 +1134,29 @@ static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
#ifndef iounmap
#define iounmap iounmap
-static inline void iounmap(void __iomem *addr)
+static inline void iounmap(volatile void __iomem *addr)
{
}
#endif
#elif defined(CONFIG_GENERIC_IOREMAP)
#include <linux/pgtable.h>
-void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot);
+void __iomem *generic_ioremap_prot(phys_addr_t phys_addr, size_t size,
+ pgprot_t prot);
+
+void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
+ pgprot_t prot);
void iounmap(volatile void __iomem *addr);
+void generic_iounmap(volatile void __iomem *addr);
+#ifndef ioremap
+#define ioremap ioremap
static inline void __iomem *ioremap(phys_addr_t addr, size_t size)
{
/* _PAGE_IOREMAP needs to be supplied by the architecture */
- return ioremap_prot(addr, size, _PAGE_IOREMAP);
+ return ioremap_prot(addr, size, __pgprot(_PAGE_IOREMAP));
}
+#endif
#endif /* !CONFIG_MMU || CONFIG_GENERIC_IOREMAP */
#ifndef ioremap_wc
@@ -1023,16 +1208,7 @@ static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
port &= IO_SPACE_LIMIT;
return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port;
}
-#define __pci_ioport_unmap __pci_ioport_unmap
-static inline void __pci_ioport_unmap(void __iomem *p)
-{
- uintptr_t start = (uintptr_t) PCI_IOBASE;
- uintptr_t addr = (uintptr_t) p;
-
- if (addr >= start && addr < start + IO_SPACE_LIMIT)
- return;
- iounmap(p);
-}
+#define ARCH_HAS_GENERIC_IOPORT_MAP
#endif
#ifndef ioport_unmap
@@ -1048,21 +1224,10 @@ extern void ioport_unmap(void __iomem *p);
#endif /* CONFIG_HAS_IOPORT_MAP */
#ifndef CONFIG_GENERIC_IOMAP
-struct pci_dev;
-extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
-
-#ifndef __pci_ioport_unmap
-static inline void __pci_ioport_unmap(void __iomem *p) {}
-#endif
-
#ifndef pci_iounmap
-#define pci_iounmap pci_iounmap
-static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
-{
- __pci_ioport_unmap(p);
-}
+#define ARCH_WANTS_GENERIC_PCI_IOUNMAP
+#endif
#endif
-#endif /* CONFIG_GENERIC_IOMAP */
#ifndef xlate_dev_mem_ptr
#define xlate_dev_mem_ptr xlate_dev_mem_ptr
@@ -1079,75 +1244,43 @@ static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
}
#endif
-#ifdef CONFIG_VIRT_TO_BUS
-#ifndef virt_to_bus
-static inline unsigned long virt_to_bus(void *address)
-{
- return (unsigned long)address;
-}
-
-static inline void *bus_to_virt(unsigned long address)
-{
- return (void *)address;
-}
-#endif
-#endif
-
#ifndef memset_io
-#define memset_io memset_io
/**
- * memset_io Set a range of I/O memory to a constant value
+ * memset_io - Set a range of I/O memory to a constant value
* @addr: The beginning of the I/O-memory range to set
* @val: The value to set the memory to
* @count: The number of bytes to set
*
* Set a range of I/O memory to a given value.
*/
-static inline void memset_io(volatile void __iomem *addr, int value,
- size_t size)
-{
- memset(__io_virt(addr), value, size);
-}
+void memset_io(volatile void __iomem *addr, int val, size_t count);
#endif
#ifndef memcpy_fromio
-#define memcpy_fromio memcpy_fromio
/**
- * memcpy_fromio Copy a block of data from I/O memory
+ * memcpy_fromio - Copy a block of data from I/O memory
* @dst: The (RAM) destination for the copy
* @src: The (I/O memory) source for the data
* @count: The number of bytes to copy
*
* Copy a block of data from I/O memory.
*/
-static inline void memcpy_fromio(void *buffer,
- const volatile void __iomem *addr,
- size_t size)
-{
- memcpy(buffer, __io_virt(addr), size);
-}
+void memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count);
#endif
#ifndef memcpy_toio
-#define memcpy_toio memcpy_toio
/**
- * memcpy_toio Copy a block of data into I/O memory
+ * memcpy_toio - Copy a block of data into I/O memory
* @dst: The (I/O memory) destination for the copy
* @src: The (RAM) source for the data
* @count: The number of bytes to copy
*
* Copy a block of data to I/O memory.
*/
-static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer,
- size_t size)
-{
- memcpy(__io_virt(addr), buffer, size);
-}
+void memcpy_toio(volatile void __iomem *dst, const void *src, size_t count);
#endif
-#ifndef CONFIG_GENERIC_DEVMEM_IS_ALLOWED
extern int devmem_is_allowed(unsigned long pfn);
-#endif
#endif /* __KERNEL__ */
diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h
index 9b3eb6d86200..9f3f25d7fc58 100644
--- a/include/asm-generic/iomap.h
+++ b/include/asm-generic/iomap.h
@@ -31,42 +31,22 @@ extern unsigned int ioread16(const void __iomem *);
extern unsigned int ioread16be(const void __iomem *);
extern unsigned int ioread32(const void __iomem *);
extern unsigned int ioread32be(const void __iomem *);
-#ifdef CONFIG_64BIT
-extern u64 ioread64(const void __iomem *);
-extern u64 ioread64be(const void __iomem *);
-#endif
-#ifdef readq
-#define ioread64_lo_hi ioread64_lo_hi
-#define ioread64_hi_lo ioread64_hi_lo
-#define ioread64be_lo_hi ioread64be_lo_hi
-#define ioread64be_hi_lo ioread64be_hi_lo
-extern u64 ioread64_lo_hi(const void __iomem *addr);
-extern u64 ioread64_hi_lo(const void __iomem *addr);
-extern u64 ioread64be_lo_hi(const void __iomem *addr);
-extern u64 ioread64be_hi_lo(const void __iomem *addr);
-#endif
+extern u64 __ioread64_lo_hi(const void __iomem *addr);
+extern u64 __ioread64_hi_lo(const void __iomem *addr);
+extern u64 __ioread64be_lo_hi(const void __iomem *addr);
+extern u64 __ioread64be_hi_lo(const void __iomem *addr);
extern void iowrite8(u8, void __iomem *);
extern void iowrite16(u16, void __iomem *);
extern void iowrite16be(u16, void __iomem *);
extern void iowrite32(u32, void __iomem *);
extern void iowrite32be(u32, void __iomem *);
-#ifdef CONFIG_64BIT
-extern void iowrite64(u64, void __iomem *);
-extern void iowrite64be(u64, void __iomem *);
-#endif
-#ifdef writeq
-#define iowrite64_lo_hi iowrite64_lo_hi
-#define iowrite64_hi_lo iowrite64_hi_lo
-#define iowrite64be_lo_hi iowrite64be_lo_hi
-#define iowrite64be_hi_lo iowrite64be_hi_lo
-extern void iowrite64_lo_hi(u64 val, void __iomem *addr);
-extern void iowrite64_hi_lo(u64 val, void __iomem *addr);
-extern void iowrite64be_lo_hi(u64 val, void __iomem *addr);
-extern void iowrite64be_hi_lo(u64 val, void __iomem *addr);
-#endif
+extern void __iowrite64_lo_hi(u64 val, void __iomem *addr);
+extern void __iowrite64_hi_lo(u64 val, void __iomem *addr);
+extern void __iowrite64be_lo_hi(u64 val, void __iomem *addr);
+extern void __iowrite64be_hi_lo(u64 val, void __iomem *addr);
/*
* "string" versions of the above. Note that they
@@ -93,15 +73,15 @@ extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
extern void ioport_unmap(void __iomem *);
#endif
-#ifndef ARCH_HAS_IOREMAP_WC
+#ifndef ioremap_wc
#define ioremap_wc ioremap
#endif
-#ifndef ARCH_HAS_IOREMAP_WT
+#ifndef ioremap_wt
#define ioremap_wt ioremap
#endif
-#ifndef ARCH_HAS_IOREMAP_NP
+#ifndef ioremap_np
/* See the comment in asm-generic/io.h about ioremap_np(). */
#define ioremap_np ioremap_np
static inline void __iomem *ioremap_np(phys_addr_t offset, size_t size)
@@ -110,16 +90,6 @@ static inline void __iomem *ioremap_np(phys_addr_t offset, size_t size)
}
#endif
-#ifdef CONFIG_PCI
-/* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */
-struct pci_dev;
-extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
-#elif defined(CONFIG_GENERIC_IOMAP)
-struct pci_dev;
-static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
-{ }
-#endif
-
#include <asm-generic/pci_iomap.h>
#endif
diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
index fca7f1d84818..7f97018df66f 100644
--- a/include/asm-generic/local.h
+++ b/include/asm-generic/local.h
@@ -42,6 +42,7 @@ typedef struct
#define local_inc_return(l) atomic_long_inc_return(&(l)->a)
#define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
+#define local_try_cmpxchg(l, po, n) atomic_long_try_cmpxchg((&(l)->a), (po), (n))
#define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
#define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
#define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
diff --git a/include/asm-generic/local64.h b/include/asm-generic/local64.h
index 765be0b7d883..14963a7a6253 100644
--- a/include/asm-generic/local64.h
+++ b/include/asm-generic/local64.h
@@ -42,7 +42,16 @@ typedef struct {
#define local64_sub_return(i, l) local_sub_return((i), (&(l)->a))
#define local64_inc_return(l) local_inc_return(&(l)->a)
-#define local64_cmpxchg(l, o, n) local_cmpxchg((&(l)->a), (o), (n))
+static inline s64 local64_cmpxchg(local64_t *l, s64 old, s64 new)
+{
+ return local_cmpxchg(&l->a, old, new);
+}
+
+static inline bool local64_try_cmpxchg(local64_t *l, s64 *old, s64 new)
+{
+ return local_try_cmpxchg(&l->a, (long *)old, new);
+}
+
#define local64_xchg(l, n) local_xchg((&(l)->a), (n))
#define local64_add_unless(l, _a, u) local_add_unless((&(l)->a), (_a), (u))
#define local64_inc_not_zero(l) local_inc_not_zero(&(l)->a)
@@ -81,6 +90,7 @@ typedef struct {
#define local64_inc_return(l) atomic64_inc_return(&(l)->a)
#define local64_cmpxchg(l, o, n) atomic64_cmpxchg((&(l)->a), (o), (n))
+#define local64_try_cmpxchg(l, po, n) atomic64_try_cmpxchg((&(l)->a), (po), (n))
#define local64_xchg(l, n) atomic64_xchg((&(l)->a), (n))
#define local64_add_unless(l, _a, u) atomic64_add_unless((&(l)->a), (_a), (u))
#define local64_inc_not_zero(l) atomic64_inc_not_zero(&(l)->a)
diff --git a/include/asm-generic/logic_io.h b/include/asm-generic/logic_io.h
new file mode 100644
index 000000000000..8a59b6e567df
--- /dev/null
+++ b/include/asm-generic/logic_io.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Intel Corporation
+ * Author: johannes@sipsolutions.net
+ */
+#ifndef _LOGIC_IO_H
+#define _LOGIC_IO_H
+#include <linux/types.h>
+
+/* include this file into asm/io.h */
+
+#ifdef CONFIG_INDIRECT_IOMEM
+
+#ifdef CONFIG_INDIRECT_IOMEM_FALLBACK
+/*
+ * If you want emulated IO memory to fall back to 'normal' IO memory
+ * if a region wasn't registered as emulated, then you need to have
+ * all of the real_* functions implemented.
+ */
+#if !defined(real_ioremap) || !defined(real_iounmap) || \
+ !defined(real_raw_readb) || !defined(real_raw_writeb) || \
+ !defined(real_raw_readw) || !defined(real_raw_writew) || \
+ !defined(real_raw_readl) || !defined(real_raw_writel) || \
+ (defined(CONFIG_64BIT) && \
+ (!defined(real_raw_readq) || !defined(real_raw_writeq))) || \
+ !defined(real_memset_io) || \
+ !defined(real_memcpy_fromio) || \
+ !defined(real_memcpy_toio)
+#error "Must provide fallbacks for real IO memory access"
+#endif /* defined ... */
+#endif /* CONFIG_INDIRECT_IOMEM_FALLBACK */
+
+#define ioremap ioremap
+void __iomem *ioremap(phys_addr_t offset, size_t size);
+
+#define iounmap iounmap
+void iounmap(void volatile __iomem *addr);
+
+#define __raw_readb __raw_readb
+u8 __raw_readb(const volatile void __iomem *addr);
+
+#define __raw_readw __raw_readw
+u16 __raw_readw(const volatile void __iomem *addr);
+
+#define __raw_readl __raw_readl
+u32 __raw_readl(const volatile void __iomem *addr);
+
+#ifdef CONFIG_64BIT
+#define __raw_readq __raw_readq
+u64 __raw_readq(const volatile void __iomem *addr);
+#endif /* CONFIG_64BIT */
+
+#define __raw_writeb __raw_writeb
+void __raw_writeb(u8 value, volatile void __iomem *addr);
+
+#define __raw_writew __raw_writew
+void __raw_writew(u16 value, volatile void __iomem *addr);
+
+#define __raw_writel __raw_writel
+void __raw_writel(u32 value, volatile void __iomem *addr);
+
+#ifdef CONFIG_64BIT
+#define __raw_writeq __raw_writeq
+void __raw_writeq(u64 value, volatile void __iomem *addr);
+#endif /* CONFIG_64BIT */
+
+#define memset_io memset_io
+void memset_io(volatile void __iomem *addr, int value, size_t size);
+
+#define memcpy_fromio memcpy_fromio
+void memcpy_fromio(void *buffer, const volatile void __iomem *addr,
+ size_t size);
+
+#define memcpy_toio memcpy_toio
+void memcpy_toio(volatile void __iomem *addr, const void *buffer, size_t size);
+
+#endif /* CONFIG_INDIRECT_IOMEM */
+#endif /* _LOGIC_IO_H */
diff --git a/include/asm-generic/mcs_spinlock.h b/include/asm-generic/mcs_spinlock.h
index 10cd4ffc6ba2..39c94012b88a 100644
--- a/include/asm-generic/mcs_spinlock.h
+++ b/include/asm-generic/mcs_spinlock.h
@@ -1,6 +1,12 @@
#ifndef __ASM_MCS_SPINLOCK_H
#define __ASM_MCS_SPINLOCK_H
+struct mcs_spinlock {
+ struct mcs_spinlock *next;
+ int locked; /* 1 if lock acquired */
+ int count; /* nesting count, see qspinlock.c */
+};
+
/*
* Architectures can define their own:
*
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index 7637fb46ba4f..efa6610acbc7 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -6,47 +6,39 @@
#ifndef __ASSEMBLY__
+/*
+ * supports 3 memory models.
+ */
#if defined(CONFIG_FLATMEM)
#ifndef ARCH_PFN_OFFSET
#define ARCH_PFN_OFFSET (0UL)
#endif
-#elif defined(CONFIG_DISCONTIGMEM)
-
-#ifndef arch_pfn_to_nid
-#define arch_pfn_to_nid(pfn) pfn_to_nid(pfn)
-#endif
-
-#ifndef arch_local_page_offset
-#define arch_local_page_offset(pfn, nid) \
- ((pfn) - NODE_DATA(nid)->node_start_pfn)
-#endif
-
-#endif /* CONFIG_DISCONTIGMEM */
-
-/*
- * supports 3 memory models.
- */
-#if defined(CONFIG_FLATMEM)
-
#define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET))
#define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \
ARCH_PFN_OFFSET)
-#elif defined(CONFIG_DISCONTIGMEM)
-#define __pfn_to_page(pfn) \
-({ unsigned long __pfn = (pfn); \
- unsigned long __nid = arch_pfn_to_nid(__pfn); \
- NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\
-})
+/* avoid <linux/mm.h> include hell */
+extern unsigned long max_mapnr;
-#define __page_to_pfn(pg) \
-({ const struct page *__pg = (pg); \
- struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \
- (unsigned long)(__pg - __pgdat->node_mem_map) + \
- __pgdat->node_start_pfn; \
-})
+#ifndef pfn_valid
+static inline int pfn_valid(unsigned long pfn)
+{
+ unsigned long pfn_offset = ARCH_PFN_OFFSET;
+
+ return pfn >= pfn_offset && (pfn - pfn_offset) < max_mapnr;
+}
+#define pfn_valid pfn_valid
+
+#ifndef for_each_valid_pfn
+#define for_each_valid_pfn(pfn, start_pfn, end_pfn) \
+ for ((pfn) = max_t(unsigned long, (start_pfn), ARCH_PFN_OFFSET); \
+ (pfn) < min_t(unsigned long, (end_pfn), \
+ ARCH_PFN_OFFSET + max_mapnr); \
+ (pfn)++)
+#endif /* for_each_valid_pfn */
+#endif /* valid_pfn */
#elif defined(CONFIG_SPARSEMEM_VMEMMAP)
@@ -61,7 +53,7 @@
*/
#define __page_to_pfn(pg) \
({ const struct page *__pg = (pg); \
- int __sec = page_to_section(__pg); \
+ int __sec = memdesc_section(__pg->flags); \
(unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \
})
@@ -70,7 +62,7 @@
struct mem_section *__sec = __pfn_to_section(__pfn); \
__section_mem_map_addr(__sec) + __pfn; \
})
-#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */
+#endif /* CONFIG_FLATMEM/SPARSEMEM */
/*
* Convert a physical address to a Page Frame Number and back
@@ -81,6 +73,19 @@
#define page_to_pfn __page_to_pfn
#define pfn_to_page __pfn_to_page
+#ifdef CONFIG_DEBUG_VIRTUAL
+#define page_to_phys(page) \
+({ \
+ unsigned long __pfn = page_to_pfn(page); \
+ \
+ WARN_ON_ONCE(!pfn_valid(__pfn)); \
+ PFN_PHYS(__pfn); \
+})
+#else
+#define page_to_phys(page) PFN_PHYS(page_to_pfn(page))
+#endif /* CONFIG_DEBUG_VIRTUAL */
+#define phys_to_page(phys) pfn_to_page(PHYS_PFN(phys))
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h
index 4dbb177d1150..6eea3b3c1e65 100644
--- a/include/asm-generic/mm_hooks.h
+++ b/include/asm-generic/mm_hooks.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Define generic no-op hooks for arch_dup_mmap, arch_exit_mmap
- * and arch_unmap to be included in asm-FOO/mmu_context.h for any
- * arch FOO which doesn't need to hook these.
+ * Define generic no-op hooks for arch_dup_mmap and arch_exit_mmap
+ * to be included in asm-FOO/mmu_context.h for any arch FOO which
+ * doesn't need to hook these.
*/
#ifndef _ASM_GENERIC_MM_HOOKS_H
#define _ASM_GENERIC_MM_HOOKS_H
@@ -17,11 +17,6 @@ static inline void arch_exit_mmap(struct mm_struct *mm)
{
}
-static inline void arch_unmap(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
-}
-
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
bool write, bool execute, bool foreign)
{
diff --git a/include/asm-generic/mmzone.h b/include/asm-generic/mmzone.h
new file mode 100644
index 000000000000..2ab5193e8394
--- /dev/null
+++ b/include/asm-generic/mmzone.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_MMZONE_H
+#define _ASM_GENERIC_MMZONE_H
+
+#endif
diff --git a/include/asm-generic/module.h b/include/asm-generic/module.h
index 98e1541b72b7..a8622501b975 100644
--- a/include/asm-generic/module.h
+++ b/include/asm-generic/module.h
@@ -19,12 +19,8 @@ struct mod_arch_specific
#define Elf_Dyn Elf64_Dyn
#define Elf_Ehdr Elf64_Ehdr
#define Elf_Addr Elf64_Addr
-#ifdef CONFIG_MODULES_USE_ELF_REL
#define Elf_Rel Elf64_Rel
-#endif
-#ifdef CONFIG_MODULES_USE_ELF_RELA
#define Elf_Rela Elf64_Rela
-#endif
#define ELF_R_TYPE(X) ELF64_R_TYPE(X)
#define ELF_R_SYM(X) ELF64_R_SYM(X)
@@ -36,12 +32,8 @@ struct mod_arch_specific
#define Elf_Dyn Elf32_Dyn
#define Elf_Ehdr Elf32_Ehdr
#define Elf_Addr Elf32_Addr
-#ifdef CONFIG_MODULES_USE_ELF_REL
#define Elf_Rel Elf32_Rel
-#endif
-#ifdef CONFIG_MODULES_USE_ELF_RELA
#define Elf_Rela Elf32_Rela
-#endif
#define ELF_R_TYPE(X) ELF32_R_TYPE(X)
#define ELF_R_SYM(X) ELF32_R_SYM(X)
#endif
diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h
index 9a000ba2bb75..ecedab554c80 100644
--- a/include/asm-generic/mshyperv.h
+++ b/include/asm-generic/mshyperv.h
@@ -6,9 +6,8 @@
* independent. See arch/<arch>/include/asm/mshyperv.h for definitions
* that are specific to architecture <arch>.
*
- * Definitions that are specified in the Hyper-V Top Level Functional
- * Spec (TLFS) should not go in this file, but should instead go in
- * hyperv-tlfs.h.
+ * Definitions that are derived from Hyper-V code or headers should not go in
+ * this file, but should instead go in the relevant files in include/hyperv.
*
* Copyright (C) 2019, Microsoft, Inc.
*
@@ -21,25 +20,91 @@
#include <linux/types.h>
#include <linux/atomic.h>
#include <linux/bitops.h>
+#include <acpi/acpi_numa.h>
#include <linux/cpumask.h>
+#include <linux/nmi.h>
#include <asm/ptrace.h>
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
+
+#define VTPM_BASE_ADDRESS 0xfed40000
+
+enum hv_partition_type {
+ HV_PARTITION_TYPE_GUEST,
+ HV_PARTITION_TYPE_ROOT,
+ HV_PARTITION_TYPE_L1VH,
+};
struct ms_hyperv_info {
u32 features;
u32 priv_high;
+ u32 ext_features;
u32 misc_features;
u32 hints;
u32 nested_features;
u32 max_vp_index;
u32 max_lp_index;
- u32 isolation_config_a;
- u32 isolation_config_b;
+ u8 vtl;
+ union {
+ u32 isolation_config_a;
+ struct {
+ u32 paravisor_present : 1;
+ u32 reserved_a1 : 31;
+ };
+ };
+ union {
+ u32 isolation_config_b;
+ struct {
+ u32 cvm_type : 4;
+ u32 reserved_b1 : 1;
+ u32 shared_gpa_boundary_active : 1;
+ u32 shared_gpa_boundary_bits : 6;
+ u32 reserved_b2 : 20;
+ };
+ };
+ u64 shared_gpa_boundary;
+ bool msi_ext_dest_id;
+ bool confidential_vmbus_available;
};
extern struct ms_hyperv_info ms_hyperv;
+extern bool hv_nested;
+extern u64 hv_current_partition_id;
+extern enum hv_partition_type hv_curr_partition_type;
+
+extern void * __percpu *hyperv_pcpu_input_arg;
+extern void * __percpu *hyperv_pcpu_output_arg;
+
+u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr);
+u64 hv_do_fast_hypercall8(u16 control, u64 input8);
+u64 hv_do_fast_hypercall16(u16 control, u64 input1, u64 input2);
+
+bool hv_isolation_type_snp(void);
+bool hv_isolation_type_tdx(void);
+
+/*
+ * On architectures where Hyper-V doesn't support AEOI (e.g., ARM64),
+ * it doesn't provide a recommendation flag and AEOI must be disabled.
+ */
+static inline bool hv_recommend_using_aeoi(void)
+{
+#ifdef HV_DEPRECATING_AEOI_RECOMMENDED
+ return !(ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED);
+#else
+ return false;
+#endif
+}
+
+static inline struct hv_proximity_domain_info hv_numa_node_to_pxm_info(int node)
+{
+ struct hv_proximity_domain_info pxm_info = {};
+
+ if (node != NUMA_NO_NODE) {
+ pxm_info.domain_id = node_to_pxm(node);
+ pxm_info.flags.proximity_info_valid = 1;
+ pxm_info.flags.proximity_preferred = 1;
+ }
-extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr);
-extern u64 hv_do_fast_hypercall8(u16 control, u64 input8);
+ return pxm_info;
+}
/* Helper functions that provide a consistent pattern for checking Hyper-V hypercall status. */
static inline int hv_result(u64 status)
@@ -61,10 +126,12 @@ static inline unsigned int hv_repcomp(u64 status)
/*
* Rep hypercalls. Callers of this functions are supposed to ensure that
- * rep_count and varhead_size comply with Hyper-V hypercall definition.
+ * rep_count, varhead_size, and rep_start comply with Hyper-V hypercall
+ * definition.
*/
-static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
- void *input, void *output)
+static inline u64 hv_do_rep_hypercall_ex(u16 code, u16 rep_count,
+ u16 varhead_size, u16 rep_start,
+ void *input, void *output)
{
u64 control = code;
u64 status;
@@ -72,6 +139,7 @@ static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
control |= (u64)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET;
control |= (u64)rep_count << HV_HYPERCALL_REP_COMP_OFFSET;
+ control |= (u64)rep_start << HV_HYPERCALL_REP_START_OFFSET;
do {
status = hv_do_hypercall(control, input, output);
@@ -89,54 +157,26 @@ static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
return status;
}
+/* For the typical case where rep_start is 0 */
+static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
+ void *input, void *output)
+{
+ return hv_do_rep_hypercall_ex(code, rep_count, varhead_size, 0,
+ input, output);
+}
+
/* Generate the guest OS identifier as described in the Hyper-V TLFS */
-static inline __u64 generate_guest_id(__u64 d_info1, __u64 kernel_version,
- __u64 d_info2)
+static inline u64 hv_generate_guest_id(u64 kernel_version)
{
- __u64 guest_id = 0;
+ u64 guest_id;
- guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 48);
- guest_id |= (d_info1 << 48);
+ guest_id = (((u64)HV_LINUX_VENDOR_ID) << 48);
guest_id |= (kernel_version << 16);
- guest_id |= d_info2;
return guest_id;
}
-/* Free the message slot and signal end-of-message if required */
-static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
-{
- /*
- * On crash we're reading some other CPU's message page and we need
- * to be careful: this other CPU may already had cleared the header
- * and the host may already had delivered some other message there.
- * In case we blindly write msg->header.message_type we're going
- * to lose it. We can still lose a message of the same type but
- * we count on the fact that there can only be one
- * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages
- * on crash.
- */
- if (cmpxchg(&msg->header.message_type, old_msg_type,
- HVMSG_NONE) != old_msg_type)
- return;
-
- /*
- * The cmxchg() above does an implicit memory barrier to
- * ensure the write to MessageType (ie set to
- * HVMSG_NONE) happens before we read the
- * MessagePending and EOMing. Otherwise, the EOMing
- * will not deliver any more messages since there is
- * no empty slot
- */
- if (msg->header.message_flags.msg_pending) {
- /*
- * This will cause message queue rescan to
- * possibly deliver another msg from the
- * hypervisor
- */
- hv_set_register(HV_REGISTER_EOM, 0);
- }
-}
+int hv_get_hypervisor_version(union hv_hypervisor_version_info *info);
void hv_setup_vmbus_handler(void (*handler)(void));
void hv_remove_vmbus_handler(void);
@@ -147,9 +187,7 @@ void hv_setup_kexec_handler(void (*handler)(void));
void hv_remove_kexec_handler(void);
void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs));
void hv_remove_crash_handler(void);
-
-extern int vmbus_interrupt;
-extern int vmbus_irq;
+void hv_setup_mshv_handler(void (*handler)(void));
#if IS_ENABLED(CONFIG_HYPERV)
/*
@@ -161,12 +199,18 @@ extern int vmbus_irq;
extern u32 *hv_vp_index;
extern u32 hv_max_vp_index;
+extern u64 (*hv_read_reference_counter)(void);
+
/* Sentinel value for an uninitialized entry in hv_vp_index array */
#define VP_INVAL U32_MAX
-void *hv_alloc_hyperv_page(void);
-void *hv_alloc_hyperv_zeroed_page(void);
-void hv_free_hyperv_page(unsigned long addr);
+int __init hv_common_init(void);
+void __init hv_get_partition_id(void);
+void __init hv_common_free(void);
+void __init ms_hyperv_late_init(void);
+int hv_common_cpu_init(unsigned int cpu);
+int hv_common_cpu_die(unsigned int cpu);
+void hv_identify_partition_type(void);
/**
* hv_cpu_number_to_vp_number() - Map CPU to VP.
@@ -184,13 +228,15 @@ static inline int hv_cpu_number_to_vp_number(int cpu_number)
return hv_vp_index[cpu_number];
}
-static inline int cpumask_to_vpset(struct hv_vpset *vpset,
- const struct cpumask *cpus)
+static inline int __cpumask_to_vpset(struct hv_vpset *vpset,
+ const struct cpumask *cpus,
+ bool (*func)(int cpu))
{
int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
+ int max_vcpu_bank = hv_max_vp_index / HV_VCPUS_PER_SPARSE_BANK;
- /* valid_bank_mask can represent up to 64 banks */
- if (hv_max_vp_index / 64 >= 64)
+ /* vpset.valid_bank_mask can represent up to HV_MAX_SPARSE_VCPU_BANKS banks */
+ if (max_vcpu_bank >= HV_MAX_SPARSE_VCPU_BANKS)
return 0;
/*
@@ -198,18 +244,20 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
* structs are not cleared between calls, we risk flushing unneeded
* vCPUs otherwise.
*/
- for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++)
+ for (vcpu_bank = 0; vcpu_bank <= max_vcpu_bank; vcpu_bank++)
vpset->bank_contents[vcpu_bank] = 0;
/*
* Some banks may end up being empty but this is acceptable.
*/
for_each_cpu(cpu, cpus) {
+ if (func && func(cpu))
+ continue;
vcpu = hv_cpu_number_to_vp_number(cpu);
if (vcpu == VP_INVAL)
return -1;
- vcpu_bank = vcpu / 64;
- vcpu_offset = vcpu % 64;
+ vcpu_bank = vcpu / HV_VCPUS_PER_SPARSE_BANK;
+ vcpu_offset = vcpu % HV_VCPUS_PER_SPARSE_BANK;
__set_bit(vcpu_offset, (unsigned long *)
&vpset->bank_contents[vcpu_bank]);
if (vcpu_bank >= nr_bank)
@@ -219,17 +267,107 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
return nr_bank;
}
+/*
+ * Convert a Linux cpumask into a Hyper-V VPset. In the _skip variant,
+ * 'func' is called for each CPU present in cpumask. If 'func' returns
+ * true, that CPU is skipped -- i.e., that CPU from cpumask is *not*
+ * added to the Hyper-V VPset. If 'func' is NULL, no CPUs are
+ * skipped.
+ */
+static inline int cpumask_to_vpset(struct hv_vpset *vpset,
+ const struct cpumask *cpus)
+{
+ return __cpumask_to_vpset(vpset, cpus, NULL);
+}
+
+static inline int cpumask_to_vpset_skip(struct hv_vpset *vpset,
+ const struct cpumask *cpus,
+ bool (*func)(int cpu))
+{
+ return __cpumask_to_vpset(vpset, cpus, func);
+}
+
+#define _hv_status_fmt(fmt) "%s: Hyper-V status: %#x = %s: " fmt
+#define hv_status_printk(level, status, fmt, ...) \
+do { \
+ u64 __status = (status); \
+ pr_##level(_hv_status_fmt(fmt), __func__, hv_result(__status), \
+ hv_result_to_string(__status), ##__VA_ARGS__); \
+} while (0)
+#define hv_status_err(status, fmt, ...) \
+ hv_status_printk(err, status, fmt, ##__VA_ARGS__)
+#define hv_status_debug(status, fmt, ...) \
+ hv_status_printk(debug, status, fmt, ##__VA_ARGS__)
+
+const char *hv_result_to_string(u64 hv_status);
+int hv_result_to_errno(u64 status);
void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die);
bool hv_is_hyperv_initialized(void);
bool hv_is_hibernation_supported(void);
enum hv_isolation_type hv_get_isolation_type(void);
bool hv_is_isolation_supported(void);
+bool hv_isolation_type_snp(void);
+u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size);
+u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2);
+void hv_enable_coco_interrupt(unsigned int cpu, unsigned int vector, bool set);
+void hv_para_set_sint_proxy(bool enable);
+u64 hv_para_get_synic_register(unsigned int reg);
+void hv_para_set_synic_register(unsigned int reg, u64 val);
void hyperv_cleanup(void);
bool hv_query_ext_cap(u64 cap_query);
+void hv_setup_dma_ops(struct device *dev, bool coherent);
#else /* CONFIG_HYPERV */
+static inline void hv_identify_partition_type(void) {}
static inline bool hv_is_hyperv_initialized(void) { return false; }
static inline bool hv_is_hibernation_supported(void) { return false; }
static inline void hyperv_cleanup(void) {}
+static inline void ms_hyperv_late_init(void) {}
+static inline bool hv_is_isolation_supported(void) { return false; }
+static inline enum hv_isolation_type hv_get_isolation_type(void)
+{
+ return HV_ISOLATION_TYPE_NONE;
+}
#endif /* CONFIG_HYPERV */
+#if IS_ENABLED(CONFIG_MSHV_ROOT)
+static inline bool hv_root_partition(void)
+{
+ return hv_curr_partition_type == HV_PARTITION_TYPE_ROOT;
+}
+static inline bool hv_l1vh_partition(void)
+{
+ return hv_curr_partition_type == HV_PARTITION_TYPE_L1VH;
+}
+static inline bool hv_parent_partition(void)
+{
+ return hv_root_partition() || hv_l1vh_partition();
+}
+int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages);
+int hv_call_add_logical_proc(int node, u32 lp_index, u32 acpi_id);
+int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags);
+
+#else /* CONFIG_MSHV_ROOT */
+static inline bool hv_root_partition(void) { return false; }
+static inline bool hv_l1vh_partition(void) { return false; }
+static inline bool hv_parent_partition(void) { return false; }
+static inline int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages)
+{
+ return -EOPNOTSUPP;
+}
+static inline int hv_call_add_logical_proc(int node, u32 lp_index, u32 acpi_id)
+{
+ return -EOPNOTSUPP;
+}
+static inline int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_MSHV_ROOT */
+
+#if IS_ENABLED(CONFIG_HYPERV_VTL_MODE)
+u8 __init get_vtl(void);
+#else
+static inline u8 get_vtl(void) { return 0; }
+#endif
+
#endif
diff --git a/include/asm-generic/msi.h b/include/asm-generic/msi.h
index bf910d47e900..92cca4b23f13 100644
--- a/include/asm-generic/msi.h
+++ b/include/asm-generic/msi.h
@@ -4,7 +4,7 @@
#include <linux/types.h>
-#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+#ifdef CONFIG_GENERIC_MSI_IRQ
#ifndef NUM_MSI_ALLOC_SCRATCHPAD_REGS
# define NUM_MSI_ALLOC_SCRATCHPAD_REGS 2
@@ -33,9 +33,10 @@ typedef struct msi_alloc_info {
/* Device generating MSIs is proxying for another device */
#define MSI_ALLOC_FLAGS_PROXY_DEVICE (1UL << 0)
+#define MSI_ALLOC_FLAGS_FIXED_MSG_DATA (1UL << 1)
#define GENERIC_MSI_DOMAIN_OPS 1
-#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
+#endif /* CONFIG_GENERIC_MSI_IRQ */
#endif
diff --git a/include/asm-generic/numa.h b/include/asm-generic/numa.h
index 1a3ad6d29833..e063d6487f66 100644
--- a/include/asm-generic/numa.h
+++ b/include/asm-generic/numa.h
@@ -32,9 +32,8 @@ static inline const struct cpumask *cpumask_of_node(int node)
void __init arch_numa_init(void);
int __init numa_add_memblk(int nodeid, u64 start, u64 end);
-void __init numa_set_distance(int from, int to, int distance);
-void __init numa_free_distance(void);
void __init early_map_cpu_to_node(unsigned int cpu, int nid);
+int early_cpu_to_node(int cpu);
void numa_store_cpu_info(unsigned int cpu);
void numa_add_cpu(unsigned int cpu);
void numa_remove_cpu(unsigned int cpu);
@@ -46,7 +45,12 @@ static inline void numa_add_cpu(unsigned int cpu) { }
static inline void numa_remove_cpu(unsigned int cpu) { }
static inline void arch_numa_init(void) { }
static inline void early_map_cpu_to_node(unsigned int cpu, int nid) { }
+static inline int early_cpu_to_node(int cpu) { return 0; }
#endif /* CONFIG_NUMA */
+#ifdef CONFIG_NUMA_EMU
+void debug_cpumask_set_cpu(unsigned int cpu, int node, bool enable);
+#endif
+
#endif /* __ASM_GENERIC_NUMA_H */
diff --git a/include/asm-generic/page.h b/include/asm-generic/page.h
deleted file mode 100644
index 6fc47561814c..000000000000
--- a/include/asm-generic/page.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_GENERIC_PAGE_H
-#define __ASM_GENERIC_PAGE_H
-/*
- * Generic page.h implementation, for NOMMU architectures.
- * This provides the dummy definitions for the memory management.
- */
-
-#ifdef CONFIG_MMU
-#error need to provide a real asm/page.h
-#endif
-
-
-/* PAGE_SHIFT determines the page size */
-
-#define PAGE_SHIFT 12
-#ifdef __ASSEMBLY__
-#define PAGE_SIZE (1 << PAGE_SHIFT)
-#else
-#define PAGE_SIZE (1UL << PAGE_SHIFT)
-#endif
-#define PAGE_MASK (~(PAGE_SIZE-1))
-
-#include <asm/setup.h>
-
-#ifndef __ASSEMBLY__
-
-#define clear_page(page) memset((page), 0, PAGE_SIZE)
-#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE)
-
-#define clear_user_page(page, vaddr, pg) clear_page(page)
-#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
-
-/*
- * These are used to make use of C type-checking..
- */
-typedef struct {
- unsigned long pte;
-} pte_t;
-typedef struct {
- unsigned long pmd[16];
-} pmd_t;
-typedef struct {
- unsigned long pgd;
-} pgd_t;
-typedef struct {
- unsigned long pgprot;
-} pgprot_t;
-typedef struct page *pgtable_t;
-
-#define pte_val(x) ((x).pte)
-#define pmd_val(x) ((&x)->pmd[0])
-#define pgd_val(x) ((x).pgd)
-#define pgprot_val(x) ((x).pgprot)
-
-#define __pte(x) ((pte_t) { (x) } )
-#define __pmd(x) ((pmd_t) { (x) } )
-#define __pgd(x) ((pgd_t) { (x) } )
-#define __pgprot(x) ((pgprot_t) { (x) } )
-
-extern unsigned long memory_start;
-extern unsigned long memory_end;
-
-#endif /* !__ASSEMBLY__ */
-
-#define PAGE_OFFSET (0)
-
-#ifndef ARCH_PFN_OFFSET
-#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
-#endif
-
-#ifndef __ASSEMBLY__
-
-#define __va(x) ((void *)((unsigned long) (x)))
-#define __pa(x) ((unsigned long) (x))
-
-#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
-#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
-
-#define virt_to_page(addr) pfn_to_page(virt_to_pfn(addr))
-#define page_to_virt(page) pfn_to_virt(page_to_pfn(page))
-
-#ifndef page_to_phys
-#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
-#endif
-
-#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
-
-#define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \
- ((void *)(kaddr) < (void *)memory_end))
-
-#endif /* __ASSEMBLY__ */
-
-#include <asm-generic/memory_model.h>
-#include <asm-generic/getorder.h>
-
-#endif /* __ASM_GENERIC_PAGE_H */
diff --git a/include/asm-generic/param.h b/include/asm-generic/param.h
index 8d3009dd28ff..8348c116aa3b 100644
--- a/include/asm-generic/param.h
+++ b/include/asm-generic/param.h
@@ -6,6 +6,6 @@
# undef HZ
# define HZ CONFIG_HZ /* Internal kernel timer frequency */
-# define USER_HZ 100 /* some user interfaces are */
+# define USER_HZ __USER_HZ /* some user interfaces are */
# define CLOCKS_PER_SEC (USER_HZ) /* in "ticks" like times() */
#endif /* __ASM_GENERIC_PARAM_H */
diff --git a/include/asm-generic/pci.h b/include/asm-generic/pci.h
index 6bb3cd3d695a..6869f1061528 100644
--- a/include/asm-generic/pci.h
+++ b/include/asm-generic/pci.h
@@ -1,17 +1,30 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * linux/include/asm-generic/pci.h
- *
- * Copyright (C) 2003 Russell King
- */
-#ifndef _ASM_GENERIC_PCI_H
-#define _ASM_GENERIC_PCI_H
+/* SPDX-License-Identifier: GPL-2.0-only */
-#ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
-static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+#ifndef __ASM_GENERIC_PCI_H
+#define __ASM_GENERIC_PCI_H
+
+#ifndef PCIBIOS_MIN_IO
+#define PCIBIOS_MIN_IO 0
+#endif
+
+#ifndef PCIBIOS_MIN_MEM
+#define PCIBIOS_MIN_MEM 0
+#endif
+
+#ifndef pcibios_assign_all_busses
+/* For bootloaders that do not initialize the PCI bus */
+#define pcibios_assign_all_busses() 1
+#endif
+
+/* Enable generic resource mapping code in drivers/pci/ */
+#define ARCH_GENERIC_PCI_MMAP_RESOURCE
+
+#ifdef CONFIG_PCI_DOMAINS
+static inline int pci_proc_domain(struct pci_bus *bus)
{
- return channel ? 15 : 14;
+ /* always show the domain in /proc */
+ return 1;
}
-#endif /* HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ */
+#endif /* CONFIG_PCI_DOMAINS */
-#endif /* _ASM_GENERIC_PCI_H */
+#endif /* __ASM_GENERIC_PCI_H */
diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h
index d4f16dcc2ed7..8fbb0a55545d 100644
--- a/include/asm-generic/pci_iomap.h
+++ b/include/asm-generic/pci_iomap.h
@@ -18,12 +18,15 @@ extern void __iomem *pci_iomap_range(struct pci_dev *dev, int bar,
extern void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
unsigned long offset,
unsigned long maxlen);
+extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
/* Create a virtual mapping cookie for a port on a given PCI device.
* Do not call this directly, it exists to make it easier for architectures
* to override */
#ifdef CONFIG_NO_GENERIC_PCI_IOPORT_MAP
extern void __iomem *__pci_ioport_map(struct pci_dev *dev, unsigned long port,
unsigned int nr);
+#elif !defined(CONFIG_HAS_IOPORT_MAP)
+#define __pci_ioport_map(dev, port, nr) NULL
#else
#define __pci_ioport_map(dev, port, nr) ioport_map((port), (nr))
#endif
@@ -50,6 +53,8 @@ static inline void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
{
return NULL;
}
+static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
+{ }
#endif
-#endif /* __ASM_GENERIC_IO_H */
+#endif /* __ASM_GENERIC_PCI_IOMAP_H */
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 6432a7fade91..6628670bcb90 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -2,10 +2,25 @@
#ifndef _ASM_GENERIC_PERCPU_H_
#define _ASM_GENERIC_PERCPU_H_
+#ifndef __ASSEMBLER__
+
#include <linux/compiler.h>
#include <linux/threads.h>
#include <linux/percpu-defs.h>
+/*
+ * __percpu_qual is the qualifier for the percpu named address space.
+ *
+ * Most arches use generic named address space for percpu variables but
+ * some arches define percpu variables in different named address space
+ * (on the x86 arch, percpu variable may be declared as being relative
+ * to the %fs or %gs segments using __seg_fs or __seg_gs named address
+ * space qualifier).
+ */
+#ifndef __percpu_qual
+# define __percpu_qual
+#endif
+
#ifdef CONFIG_SMP
/*
@@ -74,7 +89,7 @@ do { \
#define raw_cpu_generic_add_return(pcp, val) \
({ \
- typeof(pcp) *__p = raw_cpu_ptr(&(pcp)); \
+ TYPEOF_UNQUAL(pcp) *__p = raw_cpu_ptr(&(pcp)); \
\
*__p += val; \
*__p; \
@@ -82,39 +97,47 @@ do { \
#define raw_cpu_generic_xchg(pcp, nval) \
({ \
- typeof(pcp) *__p = raw_cpu_ptr(&(pcp)); \
- typeof(pcp) __ret; \
+ TYPEOF_UNQUAL(pcp) *__p = raw_cpu_ptr(&(pcp)); \
+ TYPEOF_UNQUAL(pcp) __ret; \
__ret = *__p; \
*__p = nval; \
__ret; \
})
-#define raw_cpu_generic_cmpxchg(pcp, oval, nval) \
+#define __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, _cmpxchg) \
({ \
- typeof(pcp) *__p = raw_cpu_ptr(&(pcp)); \
- typeof(pcp) __ret; \
- __ret = *__p; \
- if (__ret == (oval)) \
+ TYPEOF_UNQUAL(pcp) __val, __old = *(ovalp); \
+ __val = _cmpxchg(pcp, __old, nval); \
+ if (__val != __old) \
+ *(ovalp) = __val; \
+ __val == __old; \
+})
+
+#define raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval) \
+({ \
+ TYPEOF_UNQUAL(pcp) *__p = raw_cpu_ptr(&(pcp)); \
+ TYPEOF_UNQUAL(pcp) __val = *__p, ___old = *(ovalp); \
+ bool __ret; \
+ if (__val == ___old) { \
*__p = nval; \
+ __ret = true; \
+ } else { \
+ *(ovalp) = __val; \
+ __ret = false; \
+ } \
__ret; \
})
-#define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+#define raw_cpu_generic_cmpxchg(pcp, oval, nval) \
({ \
- typeof(pcp1) *__p1 = raw_cpu_ptr(&(pcp1)); \
- typeof(pcp2) *__p2 = raw_cpu_ptr(&(pcp2)); \
- int __ret = 0; \
- if (*__p1 == (oval1) && *__p2 == (oval2)) { \
- *__p1 = nval1; \
- *__p2 = nval2; \
- __ret = 1; \
- } \
- (__ret); \
+ TYPEOF_UNQUAL(pcp) __old = (oval); \
+ raw_cpu_generic_try_cmpxchg(pcp, &__old, nval); \
+ __old; \
})
#define __this_cpu_generic_read_nopreempt(pcp) \
({ \
- typeof(pcp) ___ret; \
+ TYPEOF_UNQUAL(pcp) ___ret; \
preempt_disable_notrace(); \
___ret = READ_ONCE(*raw_cpu_ptr(&(pcp))); \
preempt_enable_notrace(); \
@@ -123,7 +146,7 @@ do { \
#define __this_cpu_generic_read_noirq(pcp) \
({ \
- typeof(pcp) ___ret; \
+ TYPEOF_UNQUAL(pcp) ___ret; \
unsigned long ___flags; \
raw_local_irq_save(___flags); \
___ret = raw_cpu_generic_read(pcp); \
@@ -133,7 +156,7 @@ do { \
#define this_cpu_generic_read(pcp) \
({ \
- typeof(pcp) __ret; \
+ TYPEOF_UNQUAL(pcp) __ret; \
if (__native_word(pcp)) \
__ret = __this_cpu_generic_read_nopreempt(pcp); \
else \
@@ -152,7 +175,7 @@ do { \
#define this_cpu_generic_add_return(pcp, val) \
({ \
- typeof(pcp) __ret; \
+ TYPEOF_UNQUAL(pcp) __ret; \
unsigned long __flags; \
raw_local_irq_save(__flags); \
__ret = raw_cpu_generic_add_return(pcp, val); \
@@ -162,7 +185,7 @@ do { \
#define this_cpu_generic_xchg(pcp, nval) \
({ \
- typeof(pcp) __ret; \
+ TYPEOF_UNQUAL(pcp) __ret; \
unsigned long __flags; \
raw_local_irq_save(__flags); \
__ret = raw_cpu_generic_xchg(pcp, nval); \
@@ -170,23 +193,22 @@ do { \
__ret; \
})
-#define this_cpu_generic_cmpxchg(pcp, oval, nval) \
+#define this_cpu_generic_try_cmpxchg(pcp, ovalp, nval) \
({ \
- typeof(pcp) __ret; \
+ bool __ret; \
unsigned long __flags; \
raw_local_irq_save(__flags); \
- __ret = raw_cpu_generic_cmpxchg(pcp, oval, nval); \
+ __ret = raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval); \
raw_local_irq_restore(__flags); \
__ret; \
})
-#define this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+#define this_cpu_generic_cmpxchg(pcp, oval, nval) \
({ \
- int __ret; \
+ TYPEOF_UNQUAL(pcp) __ret; \
unsigned long __flags; \
raw_local_irq_save(__flags); \
- __ret = raw_cpu_generic_cmpxchg_double(pcp1, pcp2, \
- oval1, oval2, nval1, nval2); \
+ __ret = raw_cpu_generic_cmpxchg(pcp, oval, nval); \
raw_local_irq_restore(__flags); \
__ret; \
})
@@ -282,6 +304,62 @@ do { \
#define raw_cpu_xchg_8(pcp, nval) raw_cpu_generic_xchg(pcp, nval)
#endif
+#ifndef raw_cpu_try_cmpxchg_1
+#ifdef raw_cpu_cmpxchg_1
+#define raw_cpu_try_cmpxchg_1(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, raw_cpu_cmpxchg_1)
+#else
+#define raw_cpu_try_cmpxchg_1(pcp, ovalp, nval) \
+ raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+#ifndef raw_cpu_try_cmpxchg_2
+#ifdef raw_cpu_cmpxchg_2
+#define raw_cpu_try_cmpxchg_2(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, raw_cpu_cmpxchg_2)
+#else
+#define raw_cpu_try_cmpxchg_2(pcp, ovalp, nval) \
+ raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+#ifndef raw_cpu_try_cmpxchg_4
+#ifdef raw_cpu_cmpxchg_4
+#define raw_cpu_try_cmpxchg_4(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, raw_cpu_cmpxchg_4)
+#else
+#define raw_cpu_try_cmpxchg_4(pcp, ovalp, nval) \
+ raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+#ifndef raw_cpu_try_cmpxchg_8
+#ifdef raw_cpu_cmpxchg_8
+#define raw_cpu_try_cmpxchg_8(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, raw_cpu_cmpxchg_8)
+#else
+#define raw_cpu_try_cmpxchg_8(pcp, ovalp, nval) \
+ raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+
+#ifndef raw_cpu_try_cmpxchg64
+#ifdef raw_cpu_cmpxchg64
+#define raw_cpu_try_cmpxchg64(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, raw_cpu_cmpxchg64)
+#else
+#define raw_cpu_try_cmpxchg64(pcp, ovalp, nval) \
+ raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+#ifndef raw_cpu_try_cmpxchg128
+#ifdef raw_cpu_cmpxchg128
+#define raw_cpu_try_cmpxchg128(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, raw_cpu_cmpxchg128)
+#else
+#define raw_cpu_try_cmpxchg128(pcp, ovalp, nval) \
+ raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+
#ifndef raw_cpu_cmpxchg_1
#define raw_cpu_cmpxchg_1(pcp, oval, nval) \
raw_cpu_generic_cmpxchg(pcp, oval, nval)
@@ -299,21 +377,13 @@ do { \
raw_cpu_generic_cmpxchg(pcp, oval, nval)
#endif
-#ifndef raw_cpu_cmpxchg_double_1
-#define raw_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
-#endif
-#ifndef raw_cpu_cmpxchg_double_2
-#define raw_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
-#endif
-#ifndef raw_cpu_cmpxchg_double_4
-#define raw_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#ifndef raw_cpu_cmpxchg64
+#define raw_cpu_cmpxchg64(pcp, oval, nval) \
+ raw_cpu_generic_cmpxchg(pcp, oval, nval)
#endif
-#ifndef raw_cpu_cmpxchg_double_8
-#define raw_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#ifndef raw_cpu_cmpxchg128
+#define raw_cpu_cmpxchg128(pcp, oval, nval) \
+ raw_cpu_generic_cmpxchg(pcp, oval, nval)
#endif
#ifndef this_cpu_read_1
@@ -407,6 +477,62 @@ do { \
#define this_cpu_xchg_8(pcp, nval) this_cpu_generic_xchg(pcp, nval)
#endif
+#ifndef this_cpu_try_cmpxchg_1
+#ifdef this_cpu_cmpxchg_1
+#define this_cpu_try_cmpxchg_1(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, this_cpu_cmpxchg_1)
+#else
+#define this_cpu_try_cmpxchg_1(pcp, ovalp, nval) \
+ this_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+#ifndef this_cpu_try_cmpxchg_2
+#ifdef this_cpu_cmpxchg_2
+#define this_cpu_try_cmpxchg_2(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, this_cpu_cmpxchg_2)
+#else
+#define this_cpu_try_cmpxchg_2(pcp, ovalp, nval) \
+ this_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+#ifndef this_cpu_try_cmpxchg_4
+#ifdef this_cpu_cmpxchg_4
+#define this_cpu_try_cmpxchg_4(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, this_cpu_cmpxchg_4)
+#else
+#define this_cpu_try_cmpxchg_4(pcp, ovalp, nval) \
+ this_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+#ifndef this_cpu_try_cmpxchg_8
+#ifdef this_cpu_cmpxchg_8
+#define this_cpu_try_cmpxchg_8(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, this_cpu_cmpxchg_8)
+#else
+#define this_cpu_try_cmpxchg_8(pcp, ovalp, nval) \
+ this_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+
+#ifndef this_cpu_try_cmpxchg64
+#ifdef this_cpu_cmpxchg64
+#define this_cpu_try_cmpxchg64(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, this_cpu_cmpxchg64)
+#else
+#define this_cpu_try_cmpxchg64(pcp, ovalp, nval) \
+ this_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+#ifndef this_cpu_try_cmpxchg128
+#ifdef this_cpu_cmpxchg128
+#define this_cpu_try_cmpxchg128(pcp, ovalp, nval) \
+ __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, this_cpu_cmpxchg128)
+#else
+#define this_cpu_try_cmpxchg128(pcp, ovalp, nval) \
+ this_cpu_generic_try_cmpxchg(pcp, ovalp, nval)
+#endif
+#endif
+
#ifndef this_cpu_cmpxchg_1
#define this_cpu_cmpxchg_1(pcp, oval, nval) \
this_cpu_generic_cmpxchg(pcp, oval, nval)
@@ -424,21 +550,14 @@ do { \
this_cpu_generic_cmpxchg(pcp, oval, nval)
#endif
-#ifndef this_cpu_cmpxchg_double_1
-#define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
-#endif
-#ifndef this_cpu_cmpxchg_double_2
-#define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
-#endif
-#ifndef this_cpu_cmpxchg_double_4
-#define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#ifndef this_cpu_cmpxchg64
+#define this_cpu_cmpxchg64(pcp, oval, nval) \
+ this_cpu_generic_cmpxchg(pcp, oval, nval)
#endif
-#ifndef this_cpu_cmpxchg_double_8
-#define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#ifndef this_cpu_cmpxchg128
+#define this_cpu_cmpxchg128(pcp, oval, nval) \
+ this_cpu_generic_cmpxchg(pcp, oval, nval)
#endif
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_GENERIC_PERCPU_H_ */
diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h
index 02932efad3ab..57137d3ac159 100644
--- a/include/asm-generic/pgalloc.h
+++ b/include/asm-generic/pgalloc.h
@@ -8,7 +8,7 @@
#define GFP_PGTABLE_USER (GFP_PGTABLE_KERNEL | __GFP_ACCOUNT)
/**
- * __pte_alloc_one_kernel - allocate a page for PTE-level kernel page table
+ * __pte_alloc_one_kernel - allocate memory for a PTE-level kernel page table
* @mm: the mm_struct of the current context
*
* This function is intended for architectures that need
@@ -16,74 +16,89 @@
*
* Return: pointer to the allocated memory or %NULL on error
*/
-static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm)
+static inline pte_t *__pte_alloc_one_kernel_noprof(struct mm_struct *mm)
{
- return (pte_t *)__get_free_page(GFP_PGTABLE_KERNEL);
+ struct ptdesc *ptdesc = pagetable_alloc_noprof(GFP_PGTABLE_KERNEL, 0);
+
+ if (!ptdesc)
+ return NULL;
+ if (!pagetable_pte_ctor(mm, ptdesc)) {
+ pagetable_free(ptdesc);
+ return NULL;
+ }
+
+ ptdesc_set_kernel(ptdesc);
+
+ return ptdesc_address(ptdesc);
}
+#define __pte_alloc_one_kernel(...) alloc_hooks(__pte_alloc_one_kernel_noprof(__VA_ARGS__))
#ifndef __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
/**
- * pte_alloc_one_kernel - allocate a page for PTE-level kernel page table
+ * pte_alloc_one_kernel - allocate memory for a PTE-level kernel page table
* @mm: the mm_struct of the current context
*
* Return: pointer to the allocated memory or %NULL on error
*/
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
+static inline pte_t *pte_alloc_one_kernel_noprof(struct mm_struct *mm)
{
- return __pte_alloc_one_kernel(mm);
+ return __pte_alloc_one_kernel_noprof(mm);
}
+#define pte_alloc_one_kernel(...) alloc_hooks(pte_alloc_one_kernel_noprof(__VA_ARGS__))
#endif
/**
- * pte_free_kernel - free PTE-level kernel page table page
+ * pte_free_kernel - free PTE-level kernel page table memory
* @mm: the mm_struct of the current context
* @pte: pointer to the memory containing the page table
*/
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
- free_page((unsigned long)pte);
+ pagetable_dtor_free(virt_to_ptdesc(pte));
}
/**
- * __pte_alloc_one - allocate a page for PTE-level user page table
+ * __pte_alloc_one - allocate memory for a PTE-level user page table
* @mm: the mm_struct of the current context
* @gfp: GFP flags to use for the allocation
*
- * Allocates a page and runs the pgtable_pte_page_ctor().
+ * Allocate memory for a page table and ptdesc and runs pagetable_pte_ctor().
*
* This function is intended for architectures that need
* anything beyond simple page allocation or must have custom GFP flags.
*
- * Return: `struct page` initialized as page table or %NULL on error
+ * Return: `struct page` referencing the ptdesc or %NULL on error
*/
-static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp)
+static inline pgtable_t __pte_alloc_one_noprof(struct mm_struct *mm, gfp_t gfp)
{
- struct page *pte;
+ struct ptdesc *ptdesc;
- pte = alloc_page(gfp);
- if (!pte)
+ ptdesc = pagetable_alloc_noprof(gfp, 0);
+ if (!ptdesc)
return NULL;
- if (!pgtable_pte_page_ctor(pte)) {
- __free_page(pte);
+ if (!pagetable_pte_ctor(mm, ptdesc)) {
+ pagetable_free(ptdesc);
return NULL;
}
- return pte;
+ return ptdesc_page(ptdesc);
}
+#define __pte_alloc_one(...) alloc_hooks(__pte_alloc_one_noprof(__VA_ARGS__))
#ifndef __HAVE_ARCH_PTE_ALLOC_ONE
/**
* pte_alloc_one - allocate a page for PTE-level user page table
* @mm: the mm_struct of the current context
*
- * Allocates a page and runs the pgtable_pte_page_ctor().
+ * Allocate memory for a page table and ptdesc and runs pagetable_pte_ctor().
*
- * Return: `struct page` initialized as page table or %NULL on error
+ * Return: `struct page` referencing the ptdesc or %NULL on error
*/
-static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
+static inline pgtable_t pte_alloc_one_noprof(struct mm_struct *mm)
{
- return __pte_alloc_one(mm, GFP_PGTABLE_USER);
+ return __pte_alloc_one_noprof(mm, GFP_PGTABLE_USER);
}
+#define pte_alloc_one(...) alloc_hooks(pte_alloc_one_noprof(__VA_ARGS__))
#endif
/*
@@ -92,14 +107,15 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
*/
/**
- * pte_free - free PTE-level user page table page
+ * pte_free - free PTE-level user page table memory
* @mm: the mm_struct of the current context
- * @pte_page: the `struct page` representing the page table
+ * @pte_page: the `struct page` referencing the ptdesc
*/
static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
{
- pgtable_pte_page_dtor(pte_page);
- __free_page(pte_page);
+ struct ptdesc *ptdesc = page_ptdesc(pte_page);
+
+ pagetable_dtor_free(ptdesc);
}
@@ -107,39 +123,46 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
#ifndef __HAVE_ARCH_PMD_ALLOC_ONE
/**
- * pmd_alloc_one - allocate a page for PMD-level page table
+ * pmd_alloc_one - allocate memory for a PMD-level page table
* @mm: the mm_struct of the current context
*
- * Allocates a page and runs the pgtable_pmd_page_ctor().
+ * Allocate memory for a page table and ptdesc and runs pagetable_pmd_ctor().
+ *
* Allocations use %GFP_PGTABLE_USER in user context and
* %GFP_PGTABLE_KERNEL in kernel context.
*
* Return: pointer to the allocated memory or %NULL on error
*/
-static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+static inline pmd_t *pmd_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
{
- struct page *page;
+ struct ptdesc *ptdesc;
gfp_t gfp = GFP_PGTABLE_USER;
if (mm == &init_mm)
gfp = GFP_PGTABLE_KERNEL;
- page = alloc_pages(gfp, 0);
- if (!page)
+ ptdesc = pagetable_alloc_noprof(gfp, 0);
+ if (!ptdesc)
return NULL;
- if (!pgtable_pmd_page_ctor(page)) {
- __free_pages(page, 0);
+ if (!pagetable_pmd_ctor(mm, ptdesc)) {
+ pagetable_free(ptdesc);
return NULL;
}
- return (pmd_t *)page_address(page);
+
+ if (mm == &init_mm)
+ ptdesc_set_kernel(ptdesc);
+
+ return ptdesc_address(ptdesc);
}
+#define pmd_alloc_one(...) alloc_hooks(pmd_alloc_one_noprof(__VA_ARGS__))
#endif
#ifndef __HAVE_ARCH_PMD_FREE
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
+ struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
+
BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
- pgtable_pmd_page_dtor(virt_to_page(pmd));
- free_page((unsigned long)pmd);
+ pagetable_dtor_free(ptdesc);
}
#endif
@@ -147,38 +170,143 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
#if CONFIG_PGTABLE_LEVELS > 3
+static inline pud_t *__pud_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
+{
+ gfp_t gfp = GFP_PGTABLE_USER;
+ struct ptdesc *ptdesc;
+
+ if (mm == &init_mm)
+ gfp = GFP_PGTABLE_KERNEL;
+
+ ptdesc = pagetable_alloc_noprof(gfp, 0);
+ if (!ptdesc)
+ return NULL;
+
+ pagetable_pud_ctor(ptdesc);
+
+ if (mm == &init_mm)
+ ptdesc_set_kernel(ptdesc);
+
+ return ptdesc_address(ptdesc);
+}
+#define __pud_alloc_one(...) alloc_hooks(__pud_alloc_one_noprof(__VA_ARGS__))
+
#ifndef __HAVE_ARCH_PUD_ALLOC_ONE
/**
- * pud_alloc_one - allocate a page for PUD-level page table
+ * pud_alloc_one - allocate memory for a PUD-level page table
* @mm: the mm_struct of the current context
*
- * Allocates a page using %GFP_PGTABLE_USER for user context and
- * %GFP_PGTABLE_KERNEL for kernel context.
+ * Allocate memory for a page table using %GFP_PGTABLE_USER for user context
+ * and %GFP_PGTABLE_KERNEL for kernel context.
*
* Return: pointer to the allocated memory or %NULL on error
*/
-static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+static inline pud_t *pud_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
+{
+ return __pud_alloc_one_noprof(mm, addr);
+}
+#define pud_alloc_one(...) alloc_hooks(pud_alloc_one_noprof(__VA_ARGS__))
+#endif
+
+static inline void __pud_free(struct mm_struct *mm, pud_t *pud)
+{
+ struct ptdesc *ptdesc = virt_to_ptdesc(pud);
+
+ BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
+ pagetable_dtor_free(ptdesc);
+}
+
+#ifndef __HAVE_ARCH_PUD_FREE
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+{
+ __pud_free(mm, pud);
+}
+#endif
+
+#endif /* CONFIG_PGTABLE_LEVELS > 3 */
+
+#if CONFIG_PGTABLE_LEVELS > 4
+
+static inline p4d_t *__p4d_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
{
gfp_t gfp = GFP_PGTABLE_USER;
+ struct ptdesc *ptdesc;
if (mm == &init_mm)
gfp = GFP_PGTABLE_KERNEL;
- return (pud_t *)get_zeroed_page(gfp);
+
+ ptdesc = pagetable_alloc_noprof(gfp, 0);
+ if (!ptdesc)
+ return NULL;
+
+ pagetable_p4d_ctor(ptdesc);
+
+ if (mm == &init_mm)
+ ptdesc_set_kernel(ptdesc);
+
+ return ptdesc_address(ptdesc);
}
+#define __p4d_alloc_one(...) alloc_hooks(__p4d_alloc_one_noprof(__VA_ARGS__))
+
+#ifndef __HAVE_ARCH_P4D_ALLOC_ONE
+static inline p4d_t *p4d_alloc_one_noprof(struct mm_struct *mm, unsigned long addr)
+{
+ return __p4d_alloc_one_noprof(mm, addr);
+}
+#define p4d_alloc_one(...) alloc_hooks(p4d_alloc_one_noprof(__VA_ARGS__))
#endif
-static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+static inline void __p4d_free(struct mm_struct *mm, p4d_t *p4d)
{
- BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
- free_page((unsigned long)pud);
+ struct ptdesc *ptdesc = virt_to_ptdesc(p4d);
+
+ BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
+ pagetable_dtor_free(ptdesc);
}
-#endif /* CONFIG_PGTABLE_LEVELS > 3 */
+#ifndef __HAVE_ARCH_P4D_FREE
+static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
+{
+ if (!mm_p4d_folded(mm))
+ __p4d_free(mm, p4d);
+}
+#endif
+
+#endif /* CONFIG_PGTABLE_LEVELS > 4 */
+
+static inline pgd_t *__pgd_alloc_noprof(struct mm_struct *mm, unsigned int order)
+{
+ gfp_t gfp = GFP_PGTABLE_USER;
+ struct ptdesc *ptdesc;
+
+ if (mm == &init_mm)
+ gfp = GFP_PGTABLE_KERNEL;
+
+ ptdesc = pagetable_alloc_noprof(gfp, order);
+ if (!ptdesc)
+ return NULL;
+
+ pagetable_pgd_ctor(ptdesc);
+
+ if (mm == &init_mm)
+ ptdesc_set_kernel(ptdesc);
+
+ return ptdesc_address(ptdesc);
+}
+#define __pgd_alloc(...) alloc_hooks(__pgd_alloc_noprof(__VA_ARGS__))
+
+static inline void __pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ struct ptdesc *ptdesc = virt_to_ptdesc(pgd);
+
+ BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
+ pagetable_dtor_free(ptdesc);
+}
#ifndef __HAVE_ARCH_PGD_FREE
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
- free_page((unsigned long)pgd);
+ __pgd_free(mm, pgd);
}
#endif
diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h
index ce2cbb3c380f..03b7dae47dd4 100644
--- a/include/asm-generic/pgtable-nop4d.h
+++ b/include/asm-generic/pgtable-nop4d.h
@@ -9,7 +9,6 @@
typedef struct { pgd_t pgd; } p4d_t;
#define P4D_SHIFT PGDIR_SHIFT
-#define MAX_PTRS_PER_P4D 1
#define PTRS_PER_P4D 1
#define P4D_SIZE (1UL << P4D_SHIFT)
#define P4D_MASK (~(P4D_SIZE-1))
@@ -42,7 +41,7 @@ static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
#define __p4d(x) ((p4d_t) { __pgd(x) })
#define pgd_page(pgd) (p4d_page((p4d_t){ pgd }))
-#define pgd_page_vaddr(pgd) (p4d_page_vaddr((p4d_t){ pgd }))
+#define pgd_page_vaddr(pgd) ((unsigned long)(p4d_pgtable((p4d_t){ pgd })))
/*
* allocating and freeing a p4d is trivial: the 1-entry p4d is
diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
index 3e13acd019ae..8ffd64e7a24c 100644
--- a/include/asm-generic/pgtable-nopmd.h
+++ b/include/asm-generic/pgtable-nopmd.h
@@ -30,6 +30,8 @@ typedef struct { pud_t pud; } pmd_t;
static inline int pud_none(pud_t pud) { return 0; }
static inline int pud_bad(pud_t pud) { return 0; }
static inline int pud_present(pud_t pud) { return 1; }
+static inline int pud_user(pud_t pud) { return 0; }
+static inline int pud_leaf(pud_t pud) { return 0; }
static inline void pud_clear(pud_t *pud) { }
#define pmd_ERROR(pmd) (pud_ERROR((pmd).pud))
@@ -51,7 +53,7 @@ static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address)
#define __pmd(x) ((pmd_t) { __pud(x) } )
#define pud_page(pud) (pmd_page((pmd_t){ pud }))
-#define pud_page_vaddr(pud) (pmd_page_vaddr((pmd_t){ pud }))
+#define pud_pgtable(pud) ((pmd_t *)(pmd_page_vaddr((pmd_t){ pud })))
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
index a9d751fbda9e..eb70c6d7ceff 100644
--- a/include/asm-generic/pgtable-nopud.h
+++ b/include/asm-generic/pgtable-nopud.h
@@ -49,7 +49,7 @@ static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
#define __pud(x) ((pud_t) { __p4d(x) })
#define p4d_page(p4d) (pud_page((pud_t){ p4d }))
-#define p4d_page_vaddr(p4d) (pud_page_vaddr((pud_t){ p4d }))
+#define p4d_pgtable(p4d) ((pud_t *)(pud_pgtable((pud_t){ p4d })))
/*
* allocating and freeing a pud is trivial: the 1-entry pud is
diff --git a/include/asm-generic/pgtable_uffd.h b/include/asm-generic/pgtable_uffd.h
index 828966d4c281..0d85791efdf7 100644
--- a/include/asm-generic/pgtable_uffd.h
+++ b/include/asm-generic/pgtable_uffd.h
@@ -1,6 +1,23 @@
#ifndef _ASM_GENERIC_PGTABLE_UFFD_H
#define _ASM_GENERIC_PGTABLE_UFFD_H
+/*
+ * Some platforms can customize the uffd-wp bit, making it unavailable
+ * even if the architecture provides the resource.
+ * Adding this API allows architectures to add their own checks for the
+ * devices on which the kernel is running.
+ * Note: When overriding it, please make sure the
+ * CONFIG_HAVE_ARCH_USERFAULTFD_WP is part of this macro.
+ */
+#ifndef pgtable_supports_uffd_wp
+#define pgtable_supports_uffd_wp() IS_ENABLED(CONFIG_HAVE_ARCH_USERFAULTFD_WP)
+#endif
+
+static inline bool uffd_supports_wp_marker(void)
+{
+ return pgtable_supports_uffd_wp() && IS_ENABLED(CONFIG_PTE_MARKER_UFFD_WP);
+}
+
#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP
static __always_inline int pte_uffd_wp(pte_t pte)
{
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index d683f5e6d791..51f8f3881523 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -29,7 +29,7 @@ static __always_inline void preempt_count_set(int pc)
} while (0)
#define init_idle_preempt_count(p, cpu) do { \
- task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
+ task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
} while (0)
static __always_inline void set_preempt_need_resched(void)
@@ -80,9 +80,21 @@ static __always_inline bool should_resched(int preempt_offset)
#ifdef CONFIG_PREEMPTION
extern asmlinkage void preempt_schedule(void);
-#define __preempt_schedule() preempt_schedule()
extern asmlinkage void preempt_schedule_notrace(void);
+
+#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
+
+void dynamic_preempt_schedule(void);
+void dynamic_preempt_schedule_notrace(void);
+#define __preempt_schedule() dynamic_preempt_schedule()
+#define __preempt_schedule_notrace() dynamic_preempt_schedule_notrace()
+
+#else /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_KEY*/
+
+#define __preempt_schedule() preempt_schedule()
#define __preempt_schedule_notrace() preempt_schedule_notrace()
+
+#endif /* CONFIG_PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_KEY*/
#endif /* CONFIG_PREEMPTION */
#endif /* __ASM_PREEMPT_H */
diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h
index 7ae0ece07b4e..75b8f4601b28 100644
--- a/include/asm-generic/qrwlock.h
+++ b/include/asm-generic/qrwlock.h
@@ -2,6 +2,10 @@
/*
* Queue read/write lock
*
+ * These use generic atomic and locking routines, but depend on a fair spinlock
+ * implementation in order to be fair themselves. The implementation in
+ * asm-generic/spinlock.h meets these requirements.
+ *
* (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
*
* Authors: Waiman Long <waiman.long@hp.com>
@@ -33,8 +37,8 @@ extern void queued_read_lock_slowpath(struct qrwlock *lock);
extern void queued_write_lock_slowpath(struct qrwlock *lock);
/**
- * queued_read_trylock - try to acquire read lock of a queue rwlock
- * @lock : Pointer to queue rwlock structure
+ * queued_read_trylock - try to acquire read lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
* Return: 1 if lock acquired, 0 if failed
*/
static inline int queued_read_trylock(struct qrwlock *lock)
@@ -52,8 +56,8 @@ static inline int queued_read_trylock(struct qrwlock *lock)
}
/**
- * queued_write_trylock - try to acquire write lock of a queue rwlock
- * @lock : Pointer to queue rwlock structure
+ * queued_write_trylock - try to acquire write lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
* Return: 1 if lock acquired, 0 if failed
*/
static inline int queued_write_trylock(struct qrwlock *lock)
@@ -68,8 +72,8 @@ static inline int queued_write_trylock(struct qrwlock *lock)
_QW_LOCKED));
}
/**
- * queued_read_lock - acquire read lock of a queue rwlock
- * @lock: Pointer to queue rwlock structure
+ * queued_read_lock - acquire read lock of a queued rwlock
+ * @lock: Pointer to queued rwlock structure
*/
static inline void queued_read_lock(struct qrwlock *lock)
{
@@ -84,8 +88,8 @@ static inline void queued_read_lock(struct qrwlock *lock)
}
/**
- * queued_write_lock - acquire write lock of a queue rwlock
- * @lock : Pointer to queue rwlock structure
+ * queued_write_lock - acquire write lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
*/
static inline void queued_write_lock(struct qrwlock *lock)
{
@@ -98,8 +102,8 @@ static inline void queued_write_lock(struct qrwlock *lock)
}
/**
- * queued_read_unlock - release read lock of a queue rwlock
- * @lock : Pointer to queue rwlock structure
+ * queued_read_unlock - release read lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
*/
static inline void queued_read_unlock(struct qrwlock *lock)
{
@@ -110,8 +114,8 @@ static inline void queued_read_unlock(struct qrwlock *lock)
}
/**
- * queued_write_unlock - release write lock of a queue rwlock
- * @lock : Pointer to queue rwlock structure
+ * queued_write_unlock - release write lock of a queued rwlock
+ * @lock : Pointer to queued rwlock structure
*/
static inline void queued_write_unlock(struct qrwlock *lock)
{
@@ -120,7 +124,7 @@ static inline void queued_write_unlock(struct qrwlock *lock)
/**
* queued_rwlock_is_contended - check if the lock is contended
- * @lock : Pointer to queue rwlock structure
+ * @lock : Pointer to queued rwlock structure
* Return: 1 if lock contended, 0 otherwise
*/
static inline int queued_rwlock_is_contended(struct qrwlock *lock)
@@ -130,7 +134,7 @@ static inline int queued_rwlock_is_contended(struct qrwlock *lock)
/*
* Remapping rwlock architecture specific functions to the corresponding
- * queue rwlock functions.
+ * queued rwlock functions.
*/
#define arch_read_lock(l) queued_read_lock(l)
#define arch_write_lock(l) queued_write_lock(l)
diff --git a/include/asm-generic/qrwlock_types.h b/include/asm-generic/qrwlock_types.h
index c36f1d5a2572..12392c14c4d0 100644
--- a/include/asm-generic/qrwlock_types.h
+++ b/include/asm-generic/qrwlock_types.h
@@ -7,7 +7,7 @@
#include <asm/spinlock_types.h>
/*
- * The queue read/write lock data structure
+ * The queued read/write lock data structure
*/
typedef struct qrwlock {
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index d74b13825501..bf47cca2c375 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -2,6 +2,35 @@
/*
* Queued spinlock
*
+ * A 'generic' spinlock implementation that is based on MCS locks. For an
+ * architecture that's looking for a 'generic' spinlock, please first consider
+ * ticket-lock.h and only come looking here when you've considered all the
+ * constraints below and can show your hardware does actually perform better
+ * with qspinlock.
+ *
+ * qspinlock relies on atomic_*_release()/atomic_*_acquire() to be RCsc (or no
+ * weaker than RCtso if you're power), where regular code only expects atomic_t
+ * to be RCpc.
+ *
+ * qspinlock relies on a far greater (compared to asm-generic/spinlock.h) set
+ * of atomic operations to behave well together, please audit them carefully to
+ * ensure they all have forward progress. Many atomic operations may default to
+ * cmpxchg() loops which will not have good forward progress properties on
+ * LL/SC architectures.
+ *
+ * One notable example is atomic_fetch_or_acquire(), which x86 cannot (cheaply)
+ * do. Carefully read the patches that introduced
+ * queued_fetch_set_pending_acquire().
+ *
+ * qspinlock also heavily relies on mixed size atomic operations, in specific
+ * it requires architectures to have xchg16; something which many LL/SC
+ * architectures need to implement as a 32bit and+or in order to satisfy the
+ * forward progress guarantees mentioned above.
+ *
+ * Further reading on mixed size atomics that might be relevant:
+ *
+ * http://www.cl.cam.ac.uk/~pes20/popl17/mixed-size.pdf
+ *
* (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
* (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
*
@@ -41,7 +70,7 @@ static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
*/
static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
{
- return !atomic_read(&lock.val);
+ return !lock.val.counter;
}
/**
@@ -107,6 +136,7 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock)
}
#endif
+#ifndef __no_arch_spinlock_redefine
/*
* Remapping spinlock architecture specific functions to the corresponding
* queued spinlock functions.
@@ -117,5 +147,6 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock)
#define arch_spin_lock(l) queued_spin_lock(l)
#define arch_spin_trylock(l) queued_spin_trylock(l)
#define arch_spin_unlock(l) queued_spin_unlock(l)
+#endif
#endif /* __ASM_GENERIC_QSPINLOCK_H */
diff --git a/include/asm-generic/rqspinlock.h b/include/asm-generic/rqspinlock.h
new file mode 100644
index 000000000000..0f2dcbbfee2f
--- /dev/null
+++ b/include/asm-generic/rqspinlock.h
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Resilient Queued Spin Lock
+ *
+ * (C) Copyright 2024-2025 Meta Platforms, Inc. and affiliates.
+ *
+ * Authors: Kumar Kartikeya Dwivedi <memxor@gmail.com>
+ */
+#ifndef __ASM_GENERIC_RQSPINLOCK_H
+#define __ASM_GENERIC_RQSPINLOCK_H
+
+#include <linux/types.h>
+#include <vdso/time64.h>
+#include <linux/percpu.h>
+#ifdef CONFIG_QUEUED_SPINLOCKS
+#include <asm/qspinlock.h>
+#endif
+
+struct rqspinlock {
+ union {
+ atomic_t val;
+ u32 locked;
+ };
+};
+
+/* Even though this is same as struct rqspinlock, we need to emit a distinct
+ * type in BTF for BPF programs.
+ */
+struct bpf_res_spin_lock {
+ u32 val;
+};
+
+struct qspinlock;
+#ifdef CONFIG_QUEUED_SPINLOCKS
+typedef struct qspinlock rqspinlock_t;
+#else
+typedef struct rqspinlock rqspinlock_t;
+#endif
+
+extern int resilient_tas_spin_lock(rqspinlock_t *lock);
+#ifdef CONFIG_QUEUED_SPINLOCKS
+extern int resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val);
+#endif
+
+#ifndef resilient_virt_spin_lock_enabled
+static __always_inline bool resilient_virt_spin_lock_enabled(void)
+{
+ return false;
+}
+#endif
+
+#ifndef resilient_virt_spin_lock
+static __always_inline int resilient_virt_spin_lock(rqspinlock_t *lock)
+{
+ return 0;
+}
+#endif
+
+/*
+ * Default timeout for waiting loops is 0.25 seconds
+ */
+#define RES_DEF_TIMEOUT (NSEC_PER_SEC / 4)
+
+/*
+ * Choose 31 as it makes rqspinlock_held cacheline-aligned.
+ */
+#define RES_NR_HELD 31
+
+struct rqspinlock_held {
+ int cnt;
+ void *locks[RES_NR_HELD];
+};
+
+DECLARE_PER_CPU_ALIGNED(struct rqspinlock_held, rqspinlock_held_locks);
+
+static __always_inline void grab_held_lock_entry(void *lock)
+{
+ int cnt = this_cpu_inc_return(rqspinlock_held_locks.cnt);
+
+ if (unlikely(cnt > RES_NR_HELD)) {
+ /* Still keep the inc so we decrement later. */
+ return;
+ }
+
+ /*
+ * Implied compiler barrier in per-CPU operations; otherwise we can have
+ * the compiler reorder inc with write to table, allowing interrupts to
+ * overwrite and erase our write to the table (as on interrupt exit it
+ * will be reset to NULL).
+ *
+ * It is fine for cnt inc to be reordered wrt remote readers though,
+ * they won't observe our entry until the cnt update is visible, that's
+ * all.
+ */
+ this_cpu_write(rqspinlock_held_locks.locks[cnt - 1], lock);
+}
+
+/*
+ * We simply don't support out-of-order unlocks, and keep the logic simple here.
+ * The verifier prevents BPF programs from unlocking out-of-order, and the same
+ * holds for in-kernel users.
+ *
+ * It is possible to run into misdetection scenarios of AA deadlocks on the same
+ * CPU, and missed ABBA deadlocks on remote CPUs if this function pops entries
+ * out of order (due to lock A, lock B, unlock A, unlock B) pattern. The correct
+ * logic to preserve right entries in the table would be to walk the array of
+ * held locks and swap and clear out-of-order entries, but that's too
+ * complicated and we don't have a compelling use case for out of order unlocking.
+ */
+static __always_inline void release_held_lock_entry(void)
+{
+ struct rqspinlock_held *rqh = this_cpu_ptr(&rqspinlock_held_locks);
+
+ if (unlikely(rqh->cnt > RES_NR_HELD))
+ goto dec;
+ WRITE_ONCE(rqh->locks[rqh->cnt - 1], NULL);
+dec:
+ /*
+ * Reordering of clearing above with inc and its write in
+ * grab_held_lock_entry that came before us (in same acquisition
+ * attempt) is ok, we either see a valid entry or NULL when it's
+ * visible.
+ *
+ * But this helper is invoked when we unwind upon failing to acquire the
+ * lock. Unlike the unlock path which constitutes a release store after
+ * we clear the entry, we need to emit a write barrier here. Otherwise,
+ * we may have a situation as follows:
+ *
+ * <error> for lock B
+ * release_held_lock_entry
+ *
+ * grab_held_lock_entry
+ * try_cmpxchg_acquire for lock A
+ *
+ * Lack of any ordering means reordering may occur such that dec, inc
+ * are done before entry is overwritten. This permits a remote lock
+ * holder of lock B (which this CPU failed to acquire) to now observe it
+ * as being attempted on this CPU, and may lead to misdetection (if this
+ * CPU holds a lock it is attempting to acquire, leading to false ABBA
+ * diagnosis).
+ *
+ * The case of unlock is treated differently due to NMI reentrancy, see
+ * comments in res_spin_unlock.
+ *
+ * In theory we don't have a problem if the dec and WRITE_ONCE above get
+ * reordered with each other, we either notice an empty NULL entry on
+ * top (if dec succeeds WRITE_ONCE), or a potentially stale entry which
+ * cannot be observed (if dec precedes WRITE_ONCE).
+ *
+ * Emit the write barrier _before_ the dec, this permits dec-inc
+ * reordering but that is harmless as we'd have new entry set to NULL
+ * already, i.e. they cannot precede the NULL store above.
+ */
+ smp_wmb();
+ this_cpu_dec(rqspinlock_held_locks.cnt);
+}
+
+#ifdef CONFIG_QUEUED_SPINLOCKS
+
+/**
+ * res_spin_lock - acquire a queued spinlock
+ * @lock: Pointer to queued spinlock structure
+ *
+ * Return:
+ * * 0 - Lock was acquired successfully.
+ * * -EDEADLK - Lock acquisition failed because of AA/ABBA deadlock.
+ * * -ETIMEDOUT - Lock acquisition failed because of timeout.
+ */
+static __always_inline int res_spin_lock(rqspinlock_t *lock)
+{
+ int val = 0;
+
+ /*
+ * Grab the deadlock detection entry before doing the cmpxchg, so that
+ * reentrancy due to NMIs between the succeeding cmpxchg and creation of
+ * held lock entry can correctly detect an acquisition attempt in the
+ * interrupted context.
+ *
+ * cmpxchg lock A
+ * <NMI>
+ * res_spin_lock(A) --> missed AA, leads to timeout
+ * </NMI>
+ * grab_held_lock_entry(A)
+ */
+ grab_held_lock_entry(lock);
+
+ if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
+ return 0;
+ return resilient_queued_spin_lock_slowpath(lock, val);
+}
+
+#else
+
+#define res_spin_lock(lock) resilient_tas_spin_lock(lock)
+
+#endif /* CONFIG_QUEUED_SPINLOCKS */
+
+static __always_inline void res_spin_unlock(rqspinlock_t *lock)
+{
+ struct rqspinlock_held *rqh = this_cpu_ptr(&rqspinlock_held_locks);
+
+ /*
+ * Release barrier, ensures correct ordering. Perform release store
+ * instead of queued_spin_unlock, since we use this function for the TAS
+ * fallback as well. When we have CONFIG_QUEUED_SPINLOCKS=n, we clear
+ * the full 4-byte lockword.
+ *
+ * Perform the smp_store_release before clearing the lock entry so that
+ * NMIs landing in the unlock path can correctly detect AA issues. The
+ * opposite order shown below may lead to missed AA checks:
+ *
+ * WRITE_ONCE(rqh->locks[rqh->cnt - 1], NULL)
+ * <NMI>
+ * res_spin_lock(A) --> missed AA, leads to timeout
+ * </NMI>
+ * smp_store_release(A->locked, 0)
+ */
+ smp_store_release(&lock->locked, 0);
+ if (likely(rqh->cnt <= RES_NR_HELD))
+ WRITE_ONCE(rqh->locks[rqh->cnt - 1], NULL);
+ this_cpu_dec(rqspinlock_held_locks.cnt);
+}
+
+#ifdef CONFIG_QUEUED_SPINLOCKS
+#define raw_res_spin_lock_init(lock) ({ *(lock) = (rqspinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; })
+#else
+#define raw_res_spin_lock_init(lock) ({ *(lock) = (rqspinlock_t){0}; })
+#endif
+
+#define raw_res_spin_lock(lock) \
+ ({ \
+ int __ret; \
+ preempt_disable(); \
+ __ret = res_spin_lock(lock); \
+ if (__ret) \
+ preempt_enable(); \
+ __ret; \
+ })
+
+#define raw_res_spin_unlock(lock) ({ res_spin_unlock(lock); preempt_enable(); })
+
+#define raw_res_spin_lock_irqsave(lock, flags) \
+ ({ \
+ int __ret; \
+ local_irq_save(flags); \
+ __ret = raw_res_spin_lock(lock); \
+ if (__ret) \
+ local_irq_restore(flags); \
+ __ret; \
+ })
+
+#define raw_res_spin_unlock_irqrestore(lock, flags) ({ raw_res_spin_unlock(lock); local_irq_restore(flags); })
+
+#endif /* __ASM_GENERIC_RQSPINLOCK_H */
diff --git a/include/asm-generic/runtime-const.h b/include/asm-generic/runtime-const.h
new file mode 100644
index 000000000000..670499459514
--- /dev/null
+++ b/include/asm-generic/runtime-const.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RUNTIME_CONST_H
+#define _ASM_RUNTIME_CONST_H
+
+/*
+ * This is the fallback for when the architecture doesn't
+ * support the runtime const operations.
+ *
+ * We just use the actual symbols as-is.
+ */
+#define runtime_const_ptr(sym) (sym)
+#define runtime_const_shift_right_32(val, sym) ((u32)(val)>>(sym))
+#define runtime_const_init(type,sym) do { } while (0)
+
+#endif
diff --git a/include/asm-generic/rwonce.h b/include/asm-generic/rwonce.h
index 8d0a6280e982..52b969c7cef9 100644
--- a/include/asm-generic/rwonce.h
+++ b/include/asm-generic/rwonce.h
@@ -79,10 +79,18 @@ unsigned long __read_once_word_nocheck(const void *addr)
(typeof(x))__read_once_word_nocheck(&(x)); \
})
-static __no_kasan_or_inline
+static __no_sanitize_or_inline
unsigned long read_word_at_a_time(const void *addr)
{
+ /* open-coded instrument_read(addr, 1) */
kasan_check_read(addr, 1);
+ kcsan_check_read(addr, 1);
+
+ /*
+ * This load can race with concurrent stores to out-of-bounds memory,
+ * but READ_ONCE() can't be used because it requires higher alignment
+ * than plain loads in arm64 builds with LTO.
+ */
return *(unsigned long *)addr;
}
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index d16302d3eb59..0755bc39b0d8 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -39,7 +39,7 @@ extern char __init_begin[], __init_end[];
extern char _sinittext[], _einittext[];
extern char __start_ro_after_init[], __end_ro_after_init[];
extern char _end[];
-extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[];
+extern char __per_cpu_start[], __per_cpu_end[];
extern char __kprobes_text_start[], __kprobes_text_end[];
extern char __entry_text_start[], __entry_text_end[];
extern char __start_rodata[], __end_rodata[];
@@ -59,40 +59,23 @@ extern char __noinstr_text_start[], __noinstr_text_end[];
extern __visible const void __nosave_begin, __nosave_end;
/* Function descriptor handling (if any). Override in asm/sections.h */
-#ifndef dereference_function_descriptor
+#ifdef CONFIG_HAVE_FUNCTION_DESCRIPTORS
+void *dereference_function_descriptor(void *ptr);
+void *dereference_kernel_function_descriptor(void *ptr);
+#else
#define dereference_function_descriptor(p) ((void *)(p))
#define dereference_kernel_function_descriptor(p) ((void *)(p))
-#endif
-
-/* random extra sections (if any). Override
- * in asm/sections.h */
-#ifndef arch_is_kernel_text
-static inline int arch_is_kernel_text(unsigned long addr)
-{
- return 0;
-}
-#endif
-#ifndef arch_is_kernel_data
-static inline int arch_is_kernel_data(unsigned long addr)
-{
- return 0;
-}
+/* An address is simply the address of the function. */
+typedef struct {
+ unsigned long addr;
+} func_desc_t;
#endif
-/*
- * Check if an address is part of freed initmem. This is needed on architectures
- * with virt == phys kernel mapping, for code that wants to check if an address
- * is part of a static object within [_stext, _end]. After initmem is freed,
- * memory can be allocated from it, and such allocations would then have
- * addresses within the range [_stext, _end].
- */
-#ifndef arch_is_kernel_initmem_freed
-static inline int arch_is_kernel_initmem_freed(unsigned long addr)
+static inline bool have_function_descriptors(void)
{
- return 0;
+ return IS_ENABLED(CONFIG_HAVE_FUNCTION_DESCRIPTORS);
}
-#endif
/**
* memory_contains - checks if an object is contained within a memory region
@@ -114,7 +97,7 @@ static inline bool memory_contains(void *begin, void *end, void *virt,
/**
* memory_intersects - checks if the region occupied by an object intersects
* with another memory region
- * @begin: virtual address of the beginning of the memory regien
+ * @begin: virtual address of the beginning of the memory region
* @end: virtual address of the end of the memory region
* @virt: virtual address of the memory object
* @size: size of the memory object
@@ -127,7 +110,10 @@ static inline bool memory_intersects(void *begin, void *end, void *virt,
{
void *vend = virt + size;
- return (virt >= begin && virt < end) || (vend >= begin && vend < end);
+ if (virt < end && vend > begin)
+ return true;
+
+ return false;
}
/**
@@ -159,6 +145,28 @@ static inline bool init_section_intersects(void *virt, size_t size)
}
/**
+ * is_kernel_core_data - checks if the pointer address is located in the
+ * .data or .bss section
+ *
+ * @addr: address to check
+ *
+ * Returns: true if the address is located in .data or .bss, false otherwise.
+ * Note: On some archs it may return true for core RODATA, and false
+ * for others. But will always be true for core RW data.
+ */
+static inline bool is_kernel_core_data(unsigned long addr)
+{
+ if (addr >= (unsigned long)_sdata && addr < (unsigned long)_edata)
+ return true;
+
+ if (addr >= (unsigned long)__bss_start &&
+ addr < (unsigned long)__bss_stop)
+ return true;
+
+ return false;
+}
+
+/**
* is_kernel_rodata - checks if the pointer address is located in the
* .rodata section
*
@@ -172,4 +180,56 @@ static inline bool is_kernel_rodata(unsigned long addr)
addr < (unsigned long)__end_rodata;
}
+static inline bool is_kernel_ro_after_init(unsigned long addr)
+{
+ return addr >= (unsigned long)__start_ro_after_init &&
+ addr < (unsigned long)__end_ro_after_init;
+}
+/**
+ * is_kernel_inittext - checks if the pointer address is located in the
+ * .init.text section
+ *
+ * @addr: address to check
+ *
+ * Returns: true if the address is located in .init.text, false otherwise.
+ */
+static inline bool is_kernel_inittext(unsigned long addr)
+{
+ return addr >= (unsigned long)_sinittext &&
+ addr < (unsigned long)_einittext;
+}
+
+/**
+ * __is_kernel_text - checks if the pointer address is located in the
+ * .text section
+ *
+ * @addr: address to check
+ *
+ * Returns: true if the address is located in .text, false otherwise.
+ * Note: an internal helper, only check the range of _stext to _etext.
+ */
+static inline bool __is_kernel_text(unsigned long addr)
+{
+ return addr >= (unsigned long)_stext &&
+ addr < (unsigned long)_etext;
+}
+
+/**
+ * __is_kernel - checks if the pointer address is located in the kernel range
+ *
+ * @addr: address to check
+ *
+ * Returns: true if the address is located in the kernel range, false otherwise.
+ * Note: an internal helper, check the range of _stext to _end,
+ * and range from __init_begin to __init_end, which can be outside
+ * of the _stext to _end range.
+ */
+static inline bool __is_kernel(unsigned long addr)
+{
+ return ((addr >= (unsigned long)_stext &&
+ addr < (unsigned long)_end) ||
+ (addr >= (unsigned long)__init_begin &&
+ addr < (unsigned long)__init_end));
+}
+
#endif /* _ASM_GENERIC_SECTIONS_H_ */
diff --git a/include/asm-generic/signal.h b/include/asm-generic/signal.h
index c53984fa9761..663dd6d0795d 100644
--- a/include/asm-generic/signal.h
+++ b/include/asm-generic/signal.h
@@ -5,8 +5,6 @@
#include <uapi/asm-generic/signal.h>
#ifndef __ASSEMBLY__
-#ifdef SA_RESTORER
-#endif
#include <asm/sigcontext.h>
#undef __HAVE_ARCH_SIG_BITOPS
diff --git a/include/asm-generic/simd.h b/include/asm-generic/simd.h
index d0343d58a74a..70c8716ad32a 100644
--- a/include/asm-generic/simd.h
+++ b/include/asm-generic/simd.h
@@ -1,6 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_SIMD_H
+#define _ASM_GENERIC_SIMD_H
-#include <linux/hardirq.h>
+#include <linux/compiler_attributes.h>
+#include <linux/preempt.h>
+#include <linux/sched.h>
+#include <linux/types.h>
/*
* may_use_simd - whether it is allowable at this time to issue SIMD
@@ -13,3 +18,5 @@ static __must_check inline bool may_use_simd(void)
{
return !in_interrupt();
}
+
+#endif /* _ASM_GENERIC_SIMD_H */
diff --git a/include/asm-generic/softirq_stack.h b/include/asm-generic/softirq_stack.h
index eceeecf6a5bd..2a67aed9ac52 100644
--- a/include/asm-generic/softirq_stack.h
+++ b/include/asm-generic/softirq_stack.h
@@ -2,7 +2,7 @@
#ifndef __ASM_GENERIC_SOFTIRQ_STACK_H
#define __ASM_GENERIC_SOFTIRQ_STACK_H
-#ifdef CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK
+#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
void do_softirq_own_stack(void);
#else
static inline void do_softirq_own_stack(void)
diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h
index adaf6acab172..970590baf61b 100644
--- a/include/asm-generic/spinlock.h
+++ b/include/asm-generic/spinlock.h
@@ -1,12 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
+
#ifndef __ASM_GENERIC_SPINLOCK_H
#define __ASM_GENERIC_SPINLOCK_H
-/*
- * You need to implement asm/spinlock.h for SMP support. The generic
- * version does not handle SMP.
- */
-#ifdef CONFIG_SMP
-#error need an architecture specific asm/spinlock.h
-#endif
+
+#include <asm-generic/ticket_spinlock.h>
+#include <asm/qrwlock.h>
#endif /* __ASM_GENERIC_SPINLOCK_H */
diff --git a/include/asm-generic/spinlock_types.h b/include/asm-generic/spinlock_types.h
new file mode 100644
index 000000000000..f534aa5de394
--- /dev/null
+++ b/include/asm-generic/spinlock_types.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_GENERIC_SPINLOCK_TYPES_H
+#define __ASM_GENERIC_SPINLOCK_TYPES_H
+
+#include <asm-generic/qspinlock_types.h>
+#include <asm-generic/qrwlock_types.h>
+
+#endif /* __ASM_GENERIC_SPINLOCK_TYPES_H */
diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h
index 524218ae3825..c5a3ad53beec 100644
--- a/include/asm-generic/syscall.h
+++ b/include/asm-generic/syscall.h
@@ -5,7 +5,7 @@
* Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved.
*
* This file is a stub providing documentation for what functions
- * asm-ARCH/syscall.h files need to define. Most arch definitions
+ * arch/ARCH/include/asm/syscall.h files need to define. Most arch definitions
* will be simple inlines.
*
* All of these functions expect to be called with no locks,
@@ -38,13 +38,27 @@ struct pt_regs;
int syscall_get_nr(struct task_struct *task, struct pt_regs *regs);
/**
+ * syscall_set_nr - change the system call a task is executing
+ * @task: task of interest, must be blocked
+ * @regs: task_pt_regs() of @task
+ * @nr: system call number
+ *
+ * Changes the system call number @task is about to execute.
+ *
+ * It's only valid to call this when @task is stopped for tracing on
+ * entry to a system call, due to %SYSCALL_WORK_SYSCALL_TRACE or
+ * %SYSCALL_WORK_SYSCALL_AUDIT.
+ */
+void syscall_set_nr(struct task_struct *task, struct pt_regs *regs, int nr);
+
+/**
* syscall_rollback - roll back registers after an aborted system call
* @task: task of interest, must be in system call exit tracing
* @regs: task_pt_regs() of @task
*
* It's only valid to call this when @task is stopped for system
* call exit tracing (due to %SYSCALL_WORK_SYSCALL_TRACE or
- * %SYSCALL_WORK_SYSCALL_AUDIT), after tracehook_report_syscall_entry()
+ * %SYSCALL_WORK_SYSCALL_AUDIT), after ptrace_report_syscall_entry()
* returned nonzero to prevent the system call from taking place.
*
* This rolls back the register state in @regs so it's as if the
diff --git a/include/asm-generic/syscalls.h b/include/asm-generic/syscalls.h
index 933ca6581aba..fabcefe8a80a 100644
--- a/include/asm-generic/syscalls.h
+++ b/include/asm-generic/syscalls.h
@@ -19,7 +19,7 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
#ifndef sys_mmap
asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
- unsigned long fd, off_t pgoff);
+ unsigned long fd, unsigned long off);
#endif
#ifndef sys_rt_sigreturn
diff --git a/include/asm-generic/termios-base.h b/include/asm-generic/termios-base.h
deleted file mode 100644
index 59c5a3bd4a6e..000000000000
--- a/include/asm-generic/termios-base.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* termios.h: generic termios/termio user copying/translation
- */
-
-#ifndef _ASM_GENERIC_TERMIOS_BASE_H
-#define _ASM_GENERIC_TERMIOS_BASE_H
-
-#include <linux/uaccess.h>
-
-#ifndef __ARCH_TERMIO_GETPUT
-
-/*
- * Translate a "termio" structure into a "termios". Ugh.
- */
-static inline int user_termio_to_kernel_termios(struct ktermios *termios,
- struct termio __user *termio)
-{
- unsigned short tmp;
-
- if (get_user(tmp, &termio->c_iflag) < 0)
- goto fault;
- termios->c_iflag = (0xffff0000 & termios->c_iflag) | tmp;
-
- if (get_user(tmp, &termio->c_oflag) < 0)
- goto fault;
- termios->c_oflag = (0xffff0000 & termios->c_oflag) | tmp;
-
- if (get_user(tmp, &termio->c_cflag) < 0)
- goto fault;
- termios->c_cflag = (0xffff0000 & termios->c_cflag) | tmp;
-
- if (get_user(tmp, &termio->c_lflag) < 0)
- goto fault;
- termios->c_lflag = (0xffff0000 & termios->c_lflag) | tmp;
-
- if (get_user(termios->c_line, &termio->c_line) < 0)
- goto fault;
-
- if (copy_from_user(termios->c_cc, termio->c_cc, NCC) != 0)
- goto fault;
-
- return 0;
-
- fault:
- return -EFAULT;
-}
-
-/*
- * Translate a "termios" structure into a "termio". Ugh.
- */
-static inline int kernel_termios_to_user_termio(struct termio __user *termio,
- struct ktermios *termios)
-{
- if (put_user(termios->c_iflag, &termio->c_iflag) < 0 ||
- put_user(termios->c_oflag, &termio->c_oflag) < 0 ||
- put_user(termios->c_cflag, &termio->c_cflag) < 0 ||
- put_user(termios->c_lflag, &termio->c_lflag) < 0 ||
- put_user(termios->c_line, &termio->c_line) < 0 ||
- copy_to_user(termio->c_cc, termios->c_cc, NCC) != 0)
- return -EFAULT;
-
- return 0;
-}
-
-#ifndef user_termios_to_kernel_termios
-#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
-#endif
-
-#ifndef kernel_termios_to_user_termios
-#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
-#endif
-
-#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
-#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
-
-#endif /* __ARCH_TERMIO_GETPUT */
-
-#endif /* _ASM_GENERIC_TERMIOS_BASE_H */
diff --git a/include/asm-generic/termios.h b/include/asm-generic/termios.h
deleted file mode 100644
index b1398d0d4a1d..000000000000
--- a/include/asm-generic/termios.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_GENERIC_TERMIOS_H
-#define _ASM_GENERIC_TERMIOS_H
-
-
-#include <linux/uaccess.h>
-#include <uapi/asm-generic/termios.h>
-
-/* intr=^C quit=^\ erase=del kill=^U
- eof=^D vtime=\0 vmin=\1 sxtc=\0
- start=^Q stop=^S susp=^Z eol=\0
- reprint=^R discard=^U werase=^W lnext=^V
- eol2=\0
-*/
-#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
-
-/*
- * Translate a "termio" structure into a "termios". Ugh.
- */
-static inline int user_termio_to_kernel_termios(struct ktermios *termios,
- const struct termio __user *termio)
-{
- unsigned short tmp;
-
- if (get_user(tmp, &termio->c_iflag) < 0)
- goto fault;
- termios->c_iflag = (0xffff0000 & termios->c_iflag) | tmp;
-
- if (get_user(tmp, &termio->c_oflag) < 0)
- goto fault;
- termios->c_oflag = (0xffff0000 & termios->c_oflag) | tmp;
-
- if (get_user(tmp, &termio->c_cflag) < 0)
- goto fault;
- termios->c_cflag = (0xffff0000 & termios->c_cflag) | tmp;
-
- if (get_user(tmp, &termio->c_lflag) < 0)
- goto fault;
- termios->c_lflag = (0xffff0000 & termios->c_lflag) | tmp;
-
- if (get_user(termios->c_line, &termio->c_line) < 0)
- goto fault;
-
- if (copy_from_user(termios->c_cc, termio->c_cc, NCC) != 0)
- goto fault;
-
- return 0;
-
- fault:
- return -EFAULT;
-}
-
-/*
- * Translate a "termios" structure into a "termio". Ugh.
- */
-static inline int kernel_termios_to_user_termio(struct termio __user *termio,
- struct ktermios *termios)
-{
- if (put_user(termios->c_iflag, &termio->c_iflag) < 0 ||
- put_user(termios->c_oflag, &termio->c_oflag) < 0 ||
- put_user(termios->c_cflag, &termio->c_cflag) < 0 ||
- put_user(termios->c_lflag, &termio->c_lflag) < 0 ||
- put_user(termios->c_line, &termio->c_line) < 0 ||
- copy_to_user(termio->c_cc, termios->c_cc, NCC) != 0)
- return -EFAULT;
-
- return 0;
-}
-
-#ifdef TCGETS2
-static inline int user_termios_to_kernel_termios(struct ktermios *k,
- struct termios2 __user *u)
-{
- return copy_from_user(k, u, sizeof(struct termios2));
-}
-
-static inline int kernel_termios_to_user_termios(struct termios2 __user *u,
- struct ktermios *k)
-{
- return copy_to_user(u, k, sizeof(struct termios2));
-}
-
-static inline int user_termios_to_kernel_termios_1(struct ktermios *k,
- struct termios __user *u)
-{
- return copy_from_user(k, u, sizeof(struct termios));
-}
-
-static inline int kernel_termios_to_user_termios_1(struct termios __user *u,
- struct ktermios *k)
-{
- return copy_to_user(u, k, sizeof(struct termios));
-}
-#else /* TCGETS2 */
-static inline int user_termios_to_kernel_termios(struct ktermios *k,
- struct termios __user *u)
-{
- return copy_from_user(k, u, sizeof(struct termios));
-}
-
-static inline int kernel_termios_to_user_termios(struct termios __user *u,
- struct ktermios *k)
-{
- return copy_to_user(u, k, sizeof(struct termios));
-}
-#endif /* TCGETS2 */
-
-#endif /* _ASM_GENERIC_TERMIOS_H */
diff --git a/include/asm-generic/text-patching.h b/include/asm-generic/text-patching.h
new file mode 100644
index 000000000000..2245c641b741
--- /dev/null
+++ b/include/asm-generic/text-patching.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_TEXT_PATCHING_H
+#define _ASM_GENERIC_TEXT_PATCHING_H
+
+#endif /* _ASM_GENERIC_TEXT_PATCHING_H */
diff --git a/include/asm-generic/thread_info_tif.h b/include/asm-generic/thread_info_tif.h
new file mode 100644
index 000000000000..da1610a78f92
--- /dev/null
+++ b/include/asm-generic/thread_info_tif.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_THREAD_INFO_TIF_H_
+#define _ASM_GENERIC_THREAD_INFO_TIF_H_
+
+#include <vdso/bits.h>
+
+/* Bits 16-31 are reserved for architecture specific purposes */
+
+#define TIF_NOTIFY_RESUME 0 // callback before returning to user
+#define _TIF_NOTIFY_RESUME BIT(TIF_NOTIFY_RESUME)
+
+#define TIF_SIGPENDING 1 // signal pending
+#define _TIF_SIGPENDING BIT(TIF_SIGPENDING)
+
+#define TIF_NOTIFY_SIGNAL 2 // signal notifications exist
+#define _TIF_NOTIFY_SIGNAL BIT(TIF_NOTIFY_SIGNAL)
+
+#define TIF_MEMDIE 3 // is terminating due to OOM killer
+#define _TIF_MEMDIE BIT(TIF_MEMDIE)
+
+#define TIF_NEED_RESCHED 4 // rescheduling necessary
+#define _TIF_NEED_RESCHED BIT(TIF_NEED_RESCHED)
+
+#ifdef HAVE_TIF_NEED_RESCHED_LAZY
+# define TIF_NEED_RESCHED_LAZY 5 // Lazy rescheduling needed
+# define _TIF_NEED_RESCHED_LAZY BIT(TIF_NEED_RESCHED_LAZY)
+#endif
+
+#ifdef HAVE_TIF_POLLING_NRFLAG
+# define TIF_POLLING_NRFLAG 6 // idle is polling for TIF_NEED_RESCHED
+# define _TIF_POLLING_NRFLAG BIT(TIF_POLLING_NRFLAG)
+#endif
+
+#define TIF_USER_RETURN_NOTIFY 7 // notify kernel of userspace return
+#define _TIF_USER_RETURN_NOTIFY BIT(TIF_USER_RETURN_NOTIFY)
+
+#define TIF_UPROBE 8 // breakpointed or singlestepping
+#define _TIF_UPROBE BIT(TIF_UPROBE)
+
+#define TIF_PATCH_PENDING 9 // pending live patching update
+#define _TIF_PATCH_PENDING BIT(TIF_PATCH_PENDING)
+
+#ifdef HAVE_TIF_RESTORE_SIGMASK
+# define TIF_RESTORE_SIGMASK 10 // Restore signal mask in do_signal() */
+# define _TIF_RESTORE_SIGMASK BIT(TIF_RESTORE_SIGMASK)
+#endif
+
+#define TIF_RSEQ 11 // Run RSEQ fast path
+#define _TIF_RSEQ BIT(TIF_RSEQ)
+
+#endif /* _ASM_GENERIC_THREAD_INFO_TIF_H_ */
diff --git a/include/asm-generic/ticket_spinlock.h b/include/asm-generic/ticket_spinlock.h
new file mode 100644
index 000000000000..325779970d8a
--- /dev/null
+++ b/include/asm-generic/ticket_spinlock.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * 'Generic' ticket-lock implementation.
+ *
+ * It relies on atomic_fetch_add() having well defined forward progress
+ * guarantees under contention. If your architecture cannot provide this, stick
+ * to a test-and-set lock.
+ *
+ * It also relies on atomic_fetch_add() being safe vs smp_store_release() on a
+ * sub-word of the value. This is generally true for anything LL/SC although
+ * you'd be hard pressed to find anything useful in architecture specifications
+ * about this. If your architecture cannot do this you might be better off with
+ * a test-and-set.
+ *
+ * It further assumes atomic_*_release() + atomic_*_acquire() is RCpc and hence
+ * uses atomic_fetch_add() which is RCsc to create an RCsc hot path, along with
+ * a full fence after the spin to upgrade the otherwise-RCpc
+ * atomic_cond_read_acquire().
+ *
+ * The implementation uses smp_cond_load_acquire() to spin, so if the
+ * architecture has WFE like instructions to sleep instead of poll for word
+ * modifications be sure to implement that (see ARM64 for example).
+ *
+ */
+
+#ifndef __ASM_GENERIC_TICKET_SPINLOCK_H
+#define __ASM_GENERIC_TICKET_SPINLOCK_H
+
+#include <linux/atomic.h>
+#include <asm-generic/spinlock_types.h>
+
+static __always_inline void ticket_spin_lock(arch_spinlock_t *lock)
+{
+ u32 val = atomic_fetch_add(1<<16, &lock->val);
+ u16 ticket = val >> 16;
+
+ if (ticket == (u16)val)
+ return;
+
+ /*
+ * atomic_cond_read_acquire() is RCpc, but rather than defining a
+ * custom cond_read_rcsc() here we just emit a full fence. We only
+ * need the prior reads before subsequent writes ordering from
+ * smb_mb(), but as atomic_cond_read_acquire() just emits reads and we
+ * have no outstanding writes due to the atomic_fetch_add() the extra
+ * orderings are free.
+ */
+ atomic_cond_read_acquire(&lock->val, ticket == (u16)VAL);
+ smp_mb();
+}
+
+static __always_inline bool ticket_spin_trylock(arch_spinlock_t *lock)
+{
+ u32 old = atomic_read(&lock->val);
+
+ if ((old >> 16) != (old & 0xffff))
+ return false;
+
+ return atomic_try_cmpxchg(&lock->val, &old, old + (1<<16)); /* SC, for RCsc */
+}
+
+static __always_inline void ticket_spin_unlock(arch_spinlock_t *lock)
+{
+ u16 *ptr = (u16 *)lock + IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
+ u32 val = atomic_read(&lock->val);
+
+ smp_store_release(ptr, (u16)val + 1);
+}
+
+static __always_inline int ticket_spin_value_unlocked(arch_spinlock_t lock)
+{
+ u32 val = lock.val.counter;
+
+ return ((val >> 16) == (val & 0xffff));
+}
+
+static __always_inline int ticket_spin_is_locked(arch_spinlock_t *lock)
+{
+ arch_spinlock_t val = READ_ONCE(*lock);
+
+ return !ticket_spin_value_unlocked(val);
+}
+
+static __always_inline int ticket_spin_is_contended(arch_spinlock_t *lock)
+{
+ u32 val = atomic_read(&lock->val);
+
+ return (s16)((val >> 16) - (val & 0xffff)) > 1;
+}
+
+#ifndef __no_arch_spinlock_redefine
+/*
+ * Remapping spinlock architecture specific functions to the corresponding
+ * ticket spinlock functions.
+ */
+#define arch_spin_is_locked(l) ticket_spin_is_locked(l)
+#define arch_spin_is_contended(l) ticket_spin_is_contended(l)
+#define arch_spin_value_unlocked(l) ticket_spin_value_unlocked(l)
+#define arch_spin_lock(l) ticket_spin_lock(l)
+#define arch_spin_trylock(l) ticket_spin_trylock(l)
+#define arch_spin_unlock(l) ticket_spin_unlock(l)
+#endif
+
+#endif /* __ASM_GENERIC_TICKET_SPINLOCK_H */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 2c68a545ffa7..1fff717cae51 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -58,6 +58,11 @@
* Defaults to flushing at tlb_end_vma() to reset the range; helps when
* there's large holes between the VMAs.
*
+ * - tlb_free_vmas()
+ *
+ * tlb_free_vmas() marks the start of unlinking of one or more vmas
+ * and freeing page-tables.
+ *
* - tlb_remove_table()
*
* tlb_remove_table() is the basic primitive to free page-table directories
@@ -67,17 +72,22 @@
*
* See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE.
*
- * - tlb_remove_page() / __tlb_remove_page()
- * - tlb_remove_page_size() / __tlb_remove_page_size()
+ * - tlb_remove_page() / tlb_remove_page_size()
+ * - __tlb_remove_folio_pages() / __tlb_remove_page_size()
+ * - __tlb_remove_folio_pages_size()
*
- * __tlb_remove_page_size() is the basic primitive that queues a page for
- * freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a
- * boolean indicating if the queue is (now) full and a call to
- * tlb_flush_mmu() is required.
+ * __tlb_remove_folio_pages_size() is the basic primitive that queues pages
+ * for freeing. It will return a boolean indicating if the queue is (now)
+ * full and a call to tlb_flush_mmu() is required.
*
* tlb_remove_page() and tlb_remove_page_size() imply the call to
* tlb_flush_mmu() when required and has no return value.
*
+ * __tlb_remove_folio_pages() is similar to __tlb_remove_page_size(),
+ * however, instead of removing a single page, assume PAGE_SIZE and remove
+ * the given number of consecutive pages that are all part of the
+ * same (large) folio.
+ *
* - tlb_change_page_size()
*
* call before __tlb_remove_page*() to set the current page-size; implies a
@@ -147,8 +157,9 @@
*
* Useful if your architecture has non-page page directories.
*
- * When used, an architecture is expected to provide __tlb_remove_table()
- * which does the actual freeing of these pages.
+ * When used, an architecture is expected to provide __tlb_remove_table() or
+ * use the generic __tlb_remove_table(), which does the actual freeing of these
+ * pages.
*
* MMU_GATHER_RCU_TABLE_FREE
*
@@ -158,9 +169,24 @@
* Useful if your architecture doesn't use IPIs for remote TLB invalidates
* and therefore doesn't naturally serialize with software page-table walkers.
*
+ * MMU_GATHER_NO_FLUSH_CACHE
+ *
+ * Indicates the architecture has flush_cache_range() but it needs *NOT* be called
+ * before unmapping a VMA.
+ *
+ * NOTE: strictly speaking we shouldn't have this knob and instead rely on
+ * flush_cache_range() being a NOP, except Sparc64 seems to be
+ * different here.
+ *
+ * MMU_GATHER_MERGE_VMAS
+ *
+ * Indicates the architecture wants to merge ranges over VMAs; typical when
+ * multiple range invalidates are more expensive than a full invalidate.
+ *
* MMU_GATHER_NO_RANGE
*
- * Use this if your architecture lacks an efficient flush_tlb_range().
+ * Use this if your architecture lacks an efficient flush_tlb_range(). This
+ * option implies MMU_GATHER_MERGE_VMAS above.
*
* MMU_GATHER_NO_GATHER
*
@@ -180,22 +206,37 @@ struct mmu_table_batch {
struct rcu_head rcu;
#endif
unsigned int nr;
- void *tables[0];
+ void *tables[];
};
#define MAX_TABLE_BATCH \
((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
+#ifndef __HAVE_ARCH_TLB_REMOVE_TABLE
+static inline void __tlb_remove_table(void *table)
+{
+ struct ptdesc *ptdesc = (struct ptdesc *)table;
+
+ pagetable_dtor_free(ptdesc);
+}
+#endif
+
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
-#else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */
+#else /* !CONFIG_MMU_GATHER_TABLE_FREE */
+static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page);
/*
* Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based
* page directories and we can use the normal page batching to free them.
*/
-#define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page))
+static inline void tlb_remove_table(struct mmu_gather *tlb, void *table)
+{
+ struct ptdesc *ptdesc = (struct ptdesc *)table;
+ pagetable_dtor(ptdesc);
+ tlb_remove_page(tlb, ptdesc_page(ptdesc));
+}
#endif /* CONFIG_MMU_GATHER_TABLE_FREE */
#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
@@ -207,12 +248,16 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
#define tlb_needs_table_invalidate() (true)
#endif
+void tlb_remove_table_sync_one(void);
+
#else
#ifdef tlb_needs_table_invalidate
#error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
#endif
+static inline void tlb_remove_table_sync_one(void) { }
+
#endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
@@ -227,7 +272,7 @@ struct mmu_gather_batch {
struct mmu_gather_batch *next;
unsigned int nr;
unsigned int max;
- struct page *pages[0];
+ struct encoded_page *encoded_pages[];
};
#define MAX_GATHER_BATCH \
@@ -242,7 +287,31 @@ struct mmu_gather_batch {
#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
- int page_size);
+ bool delay_rmap, int page_size);
+bool __tlb_remove_folio_pages(struct mmu_gather *tlb, struct page *page,
+ unsigned int nr_pages, bool delay_rmap);
+
+#ifdef CONFIG_SMP
+/*
+ * This both sets 'delayed_rmap', and returns true. It would be an inline
+ * function, except we define it before the 'struct mmu_gather'.
+ */
+#define tlb_delay_rmap(tlb) (((tlb)->delayed_rmap = 1), true)
+extern void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma);
+#endif
+
+#endif
+
+/*
+ * We have a no-op version of the rmap removal that doesn't
+ * delay anything. That is used on S390, which flushes remote
+ * TLBs synchronously, and on UP, which doesn't have any
+ * remote TLBs to flush and is not preemptible due to this
+ * all happening under the page table lock.
+ */
+#ifndef tlb_delay_rmap
+#define tlb_delay_rmap(tlb) (false)
+static inline void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
#endif
/*
@@ -276,6 +345,11 @@ struct mmu_gather {
unsigned int freed_tables : 1;
/*
+ * Do we have pending delayed rmap removals?
+ */
+ unsigned int delayed_rmap : 1;
+
+ /*
* at which levels have we cleared entries?
*/
unsigned int cleared_ptes : 1;
@@ -288,6 +362,7 @@ struct mmu_gather {
*/
unsigned int vma_exec : 1;
unsigned int vma_huge : 1;
+ unsigned int vma_pfn : 1;
unsigned int batch_count;
@@ -334,8 +409,8 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
#ifdef CONFIG_MMU_GATHER_NO_RANGE
-#if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma)
-#error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma()
+#if defined(tlb_flush)
+#error MMU_GATHER_NO_RANGE relies on default tlb_flush()
#endif
/*
@@ -352,20 +427,9 @@ static inline void tlb_flush(struct mmu_gather *tlb)
flush_tlb_mm(tlb->mm);
}
-static inline void
-tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
-
-#define tlb_end_vma tlb_end_vma
-static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
-
#else /* CONFIG_MMU_GATHER_NO_RANGE */
#ifndef tlb_flush
-
-#if defined(tlb_start_vma) || defined(tlb_end_vma)
-#error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma()
-#endif
-
/*
* When an architecture does not provide its own tlb_flush() implementation
* but does have a reasonably efficient flush_vma_range() implementation
@@ -385,6 +449,9 @@ static inline void tlb_flush(struct mmu_gather *tlb)
flush_tlb_range(&vma, tlb->start, tlb->end);
}
}
+#endif
+
+#endif /* CONFIG_MMU_GATHER_NO_RANGE */
static inline void
tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
@@ -402,16 +469,13 @@ tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
*/
tlb->vma_huge = is_vm_hugetlb_page(vma);
tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
-}
-#else
-
-static inline void
-tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
-
-#endif
-
-#endif /* CONFIG_MMU_GATHER_NO_RANGE */
+ /*
+ * Track if there's at least one VM_PFNMAP/VM_MIXEDMAP vma
+ * in the tracked range, see tlb_free_vmas().
+ */
+ tlb->vma_pfn |= !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP));
+}
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
@@ -424,29 +488,24 @@ static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
return;
tlb_flush(tlb);
- mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
__tlb_reset_range(tlb);
}
static inline void tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size)
{
- if (__tlb_remove_page_size(tlb, page, page_size))
+ if (__tlb_remove_page_size(tlb, page, false, page_size))
tlb_flush_mmu(tlb);
}
-static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
- return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
+ return tlb_remove_page_size(tlb, page, PAGE_SIZE);
}
-/* tlb_remove_page
- * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
- * required.
- */
-static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+static inline void tlb_remove_ptdesc(struct mmu_gather *tlb, struct ptdesc *pt)
{
- return tlb_remove_page_size(tlb, page, PAGE_SIZE);
+ tlb_remove_table(tlb, pt);
}
static inline void tlb_change_page_size(struct mmu_gather *tlb,
@@ -486,21 +545,20 @@ static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
* case where we're doing a full MM flush. When we're doing a munmap,
* the vmas are adjusted to only cover the region to be torn down.
*/
-#ifndef tlb_start_vma
static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
if (tlb->fullmm)
return;
tlb_update_vma_flags(tlb, vma);
+#ifndef CONFIG_MMU_GATHER_NO_FLUSH_CACHE
flush_cache_range(vma, vma->vm_start, vma->vm_end);
-}
#endif
+}
-#ifndef tlb_end_vma
static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
- if (tlb->fullmm)
+ if (tlb->fullmm || IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS))
return;
/*
@@ -511,7 +569,28 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
*/
tlb_flush_mmu_tlbonly(tlb);
}
-#endif
+
+static inline void tlb_free_vmas(struct mmu_gather *tlb)
+{
+ if (tlb->fullmm)
+ return;
+
+ /*
+ * VM_PFNMAP is more fragile because the core mm will not track the
+ * page mapcount -- there might not be page-frames for these PFNs
+ * after all.
+ *
+ * Specifically() there is a race between munmap() and
+ * unmap_mapping_range(), where munmap() will unlink the VMA, such
+ * that unmap_mapping_range() will no longer observe the VMA and
+ * no-op, without observing the TLBI, returning prematurely.
+ *
+ * So if we're about to unlink such a VMA, and we have pending
+ * TLBI for such a vma, flush things now.
+ */
+ if (tlb->vma_pfn)
+ tlb_flush_mmu_tlbonly(tlb);
+}
/*
* tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
@@ -546,7 +625,9 @@ static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
}
#ifndef __tlb_remove_tlb_entry
-#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
+static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
+{
+}
#endif
/**
@@ -562,13 +643,37 @@ static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
+/**
+ * tlb_remove_tlb_entries - remember unmapping of multiple consecutive ptes for
+ * later tlb invalidation.
+ *
+ * Similar to tlb_remove_tlb_entry(), but remember unmapping of multiple
+ * consecutive ptes instead of only a single one.
+ */
+static inline void tlb_remove_tlb_entries(struct mmu_gather *tlb,
+ pte_t *ptep, unsigned int nr, unsigned long address)
+{
+ tlb_flush_pte_range(tlb, address, PAGE_SIZE * nr);
+ for (;;) {
+ __tlb_remove_tlb_entry(tlb, ptep, address);
+ if (--nr == 0)
+ break;
+ ptep++;
+ address += PAGE_SIZE;
+ }
+}
+
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
do { \
unsigned long _sz = huge_page_size(h); \
- if (_sz == PMD_SIZE) \
- tlb_flush_pmd_range(tlb, address, _sz); \
- else if (_sz == PUD_SIZE) \
+ if (_sz >= P4D_SIZE) \
+ tlb_flush_p4d_range(tlb, address, _sz); \
+ else if (_sz >= PUD_SIZE) \
tlb_flush_pud_range(tlb, address, _sz); \
+ else if (_sz >= PMD_SIZE) \
+ tlb_flush_pmd_range(tlb, address, _sz); \
+ else \
+ tlb_flush_pte_range(tlb, address, _sz); \
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
@@ -654,6 +759,20 @@ static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
} while (0)
#endif
+#ifndef pte_needs_flush
+static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
+{
+ return true;
+}
+#endif
+
+#ifndef huge_pmd_needs_flush
+static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
+{
+ return true;
+}
+#endif
+
#endif /* CONFIG_MMU */
#endif /* _ASM_GENERIC__TLB_H */
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
index 5aa8705df87e..4dbe715be65b 100644
--- a/include/asm-generic/topology.h
+++ b/include/asm-generic/topology.h
@@ -45,7 +45,7 @@
#endif
#ifndef cpumask_of_node
- #ifdef CONFIG_NEED_MULTIPLE_NODES
+ #ifdef CONFIG_NUMA
#define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask)
#else
#define cpumask_of_node(node) ((void)(node), cpu_online_mask)
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index 4973328f3c6e..b276f783494c 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -8,9 +8,10 @@
* address space, e.g. all NOMMU machines.
*/
#include <linux/string.h>
+#include <asm-generic/access_ok.h>
#ifdef CONFIG_UACCESS_MEMCPY
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
static __always_inline int
__get_user_fn(size_t size, const void __user *from, void *to)
@@ -19,7 +20,7 @@ __get_user_fn(size_t size, const void __user *from, void *to)
switch (size) {
case 1:
- *(u8 *)to = get_unaligned((u8 __force *)from);
+ *(u8 *)to = *((u8 __force *)from);
return 0;
case 2:
*(u16 *)to = get_unaligned((u16 __force *)from);
@@ -45,7 +46,7 @@ __put_user_fn(size_t size, void __user *to, void *from)
switch (size) {
case 1:
- put_unaligned(*(u8 *)from, (u8 __force *)to);
+ *(u8 __force *)to = *(u8 *)from;
return 0;
case 2:
put_unaligned(*(u16 *)from, (u16 __force *)to);
@@ -77,8 +78,6 @@ do { \
goto err_label; \
} while (0)
-#define HAVE_GET_KERNEL_NOFAULT 1
-
static inline __must_check unsigned long
raw_copy_from_user(void *to, const void __user * from, unsigned long n)
{
@@ -96,44 +95,6 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
#define INLINE_COPY_TO_USER
#endif /* CONFIG_UACCESS_MEMCPY */
-#ifdef CONFIG_SET_FS
-#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
-
-#ifndef KERNEL_DS
-#define KERNEL_DS MAKE_MM_SEG(~0UL)
-#endif
-
-#ifndef USER_DS
-#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
-#endif
-
-#ifndef get_fs
-#define get_fs() (current_thread_info()->addr_limit)
-
-static inline void set_fs(mm_segment_t fs)
-{
- current_thread_info()->addr_limit = fs;
-}
-#endif
-
-#ifndef uaccess_kernel
-#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
-#endif
-#endif /* CONFIG_SET_FS */
-
-#define access_ok(addr, size) __access_ok((unsigned long)(addr),(size))
-
-/*
- * The architecture should really override this if possible, at least
- * doing a check on the get_fs()
- */
-#ifndef __access_ok
-static inline int __access_ok(unsigned long addr, unsigned long size)
-{
- return 1;
-}
-#endif
-
/*
* These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type.
@@ -244,50 +205,6 @@ static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
extern int __get_user_bad(void) __attribute__((noreturn));
/*
- * Copy a null terminated string from userspace.
- */
-#ifndef __strncpy_from_user
-static inline long
-__strncpy_from_user(char *dst, const char __user *src, long count)
-{
- char *tmp;
- strncpy(dst, (const char __force *)src, count);
- for (tmp = dst; *tmp && count > 0; tmp++, count--)
- ;
- return (tmp - dst);
-}
-#endif
-
-static inline long
-strncpy_from_user(char *dst, const char __user *src, long count)
-{
- if (!access_ok(src, 1))
- return -EFAULT;
- return __strncpy_from_user(dst, src, count);
-}
-
-/*
- * Return the size of a string (including the ending 0)
- *
- * Return 0 on exception, a value greater than N if too long
- */
-#ifndef __strnlen_user
-#define __strnlen_user(s, n) (strnlen((s), (n)) + 1)
-#endif
-
-/*
- * Unlike strnlen, strnlen_user includes the nul terminator in
- * its returned count. Callers should check for a returned value
- * greater than N as an indication the string is too long.
- */
-static inline long strnlen_user(const char __user *src, long n)
-{
- if (!access_ok(src, 1))
- return 0;
- return __strnlen_user(src, n);
-}
-
-/*
* Zero Userspace
*/
#ifndef __clear_user
@@ -311,4 +228,8 @@ clear_user(void __user *to, unsigned long n)
#include <asm/extable.h>
+__must_check long strncpy_from_user(char *dst, const char __user *src,
+ long count);
+__must_check long strnlen_user(const char __user *src, long n);
+
#endif /* __ASM_GENERIC_UACCESS_H */
diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h
deleted file mode 100644
index 374c940e9be1..000000000000
--- a/include/asm-generic/unaligned.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_GENERIC_UNALIGNED_H
-#define __ASM_GENERIC_UNALIGNED_H
-
-/*
- * This is the most generic implementation of unaligned accesses
- * and should work almost anywhere.
- */
-#include <asm/byteorder.h>
-
-/* Set by the arch if it can handle unaligned accesses in hardware. */
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-# include <linux/unaligned/access_ok.h>
-#endif
-
-#if defined(__LITTLE_ENDIAN)
-# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-# include <linux/unaligned/le_struct.h>
-# include <linux/unaligned/be_byteshift.h>
-# endif
-# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_le
-# define put_unaligned __put_unaligned_le
-#elif defined(__BIG_ENDIAN)
-# ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-# include <linux/unaligned/be_struct.h>
-# include <linux/unaligned/le_byteshift.h>
-# endif
-# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_be
-# define put_unaligned __put_unaligned_be
-#else
-# error need to define endianess
-#endif
-
-#endif /* __ASM_GENERIC_UNALIGNED_H */
diff --git a/include/asm-generic/unwind_user.h b/include/asm-generic/unwind_user.h
new file mode 100644
index 000000000000..b8882b909944
--- /dev/null
+++ b/include/asm-generic/unwind_user.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_UNWIND_USER_H
+#define _ASM_GENERIC_UNWIND_USER_H
+
+#endif /* _ASM_GENERIC_UNWIND_USER_H */
diff --git a/include/asm-generic/vdso/vsyscall.h b/include/asm-generic/vdso/vsyscall.h
index c835607f78ae..5c6d9799f4e7 100644
--- a/include/asm-generic/vdso/vsyscall.h
+++ b/include/asm-generic/vdso/vsyscall.h
@@ -4,25 +4,31 @@
#ifndef __ASSEMBLY__
-#ifndef __arch_get_k_vdso_data
-static __always_inline struct vdso_data *__arch_get_k_vdso_data(void)
+#ifndef __arch_get_vdso_u_time_data
+static __always_inline const struct vdso_time_data *__arch_get_vdso_u_time_data(void)
{
- return NULL;
+ return &vdso_u_time_data;
}
-#endif /* __arch_get_k_vdso_data */
+#endif
-#ifndef __arch_update_vsyscall
-static __always_inline void __arch_update_vsyscall(struct vdso_data *vdata,
- struct timekeeper *tk)
+#ifndef __arch_get_vdso_u_rng_data
+static __always_inline const struct vdso_rng_data *__arch_get_vdso_u_rng_data(void)
{
+ return &vdso_u_rng_data;
}
-#endif /* __arch_update_vsyscall */
+#endif
-#ifndef __arch_sync_vdso_data
-static __always_inline void __arch_sync_vdso_data(struct vdso_data *vdata)
+#ifndef __arch_update_vdso_clock
+static __always_inline void __arch_update_vdso_clock(struct vdso_clock *vc)
{
}
-#endif /* __arch_sync_vdso_data */
+#endif /* __arch_update_vdso_clock */
+
+#ifndef __arch_sync_vdso_time_data
+static __always_inline void __arch_sync_vdso_time_data(struct vdso_time_data *vdata)
+{
+}
+#endif /* __arch_sync_vdso_time_data */
#endif /* !__ASSEMBLY__ */
diff --git a/include/asm-generic/vga.h b/include/asm-generic/vga.h
index adf91a783b5c..5dcaf4ae904a 100644
--- a/include/asm-generic/vga.h
+++ b/include/asm-generic/vga.h
@@ -1,25 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Access to VGA videoram
- *
- * (c) 1998 Martin Mares <mj@ucw.cz>
- */
#ifndef __ASM_GENERIC_VGA_H
#define __ASM_GENERIC_VGA_H
-
-/*
- * On most architectures that support VGA, we can just
- * recalculate addresses and then access the videoram
- * directly without any black magic.
- *
- * Everyone else needs to ioremap the address and use
- * proper I/O accesses.
- */
-#ifndef VGA_MAP_MEM
-#define VGA_MAP_MEM(x, s) (unsigned long)phys_to_virt(x)
-#endif
-
-#define vga_readb(x) (*(x))
-#define vga_writeb(x, y) (*(y) = (x))
-
-#endif /* _ASM_GENERIC_VGA_H */
+#endif /* __ASM_GENERIC_VGA_H */
diff --git a/include/asm-generic/video.h b/include/asm-generic/video.h
new file mode 100644
index 000000000000..b1da2309d943
--- /dev/null
+++ b/include/asm-generic/video.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_GENERIC_VIDEO_H_
+#define __ASM_GENERIC_VIDEO_H_
+
+/*
+ * Only include this header file from your architecture's <asm/fb.h>.
+ */
+
+#include <linux/io.h>
+#include <linux/mm_types.h>
+#include <linux/pgtable.h>
+#include <linux/types.h>
+
+struct device;
+
+#ifndef pgprot_framebuffer
+#define pgprot_framebuffer pgprot_framebuffer
+static inline pgprot_t pgprot_framebuffer(pgprot_t prot,
+ unsigned long vm_start, unsigned long vm_end,
+ unsigned long offset)
+{
+ return pgprot_writecombine(prot);
+}
+#endif
+
+#ifndef video_is_primary_device
+#define video_is_primary_device video_is_primary_device
+static inline bool video_is_primary_device(struct device *dev)
+{
+ return false;
+}
+#endif
+
+/*
+ * I/O helpers for the framebuffer. Prefer these functions over their
+ * regular counterparts. The regular I/O functions provide in-order
+ * access and swap bytes to/from little-endian ordering. Neither is
+ * required for framebuffers. Instead, the helpers read and write
+ * raw framebuffer data. Independent operations can be reordered for
+ * improved performance.
+ */
+
+#ifndef fb_readb
+static inline u8 fb_readb(const volatile void __iomem *addr)
+{
+ return __raw_readb(addr);
+}
+#define fb_readb fb_readb
+#endif
+
+#ifndef fb_readw
+static inline u16 fb_readw(const volatile void __iomem *addr)
+{
+ return __raw_readw(addr);
+}
+#define fb_readw fb_readw
+#endif
+
+#ifndef fb_readl
+static inline u32 fb_readl(const volatile void __iomem *addr)
+{
+ return __raw_readl(addr);
+}
+#define fb_readl fb_readl
+#endif
+
+#ifndef fb_readq
+#if defined(__raw_readq)
+static inline u64 fb_readq(const volatile void __iomem *addr)
+{
+ return __raw_readq(addr);
+}
+#define fb_readq fb_readq
+#endif
+#endif
+
+#ifndef fb_writeb
+static inline void fb_writeb(u8 b, volatile void __iomem *addr)
+{
+ __raw_writeb(b, addr);
+}
+#define fb_writeb fb_writeb
+#endif
+
+#ifndef fb_writew
+static inline void fb_writew(u16 b, volatile void __iomem *addr)
+{
+ __raw_writew(b, addr);
+}
+#define fb_writew fb_writew
+#endif
+
+#ifndef fb_writel
+static inline void fb_writel(u32 b, volatile void __iomem *addr)
+{
+ __raw_writel(b, addr);
+}
+#define fb_writel fb_writel
+#endif
+
+#ifndef fb_writeq
+#if defined(__raw_writeq)
+static inline void fb_writeq(u64 b, volatile void __iomem *addr)
+{
+ __raw_writeq(b, addr);
+}
+#define fb_writeq fb_writeq
+#endif
+#endif
+
+#ifndef fb_memcpy_fromio
+static inline void fb_memcpy_fromio(void *to, const volatile void __iomem *from, size_t n)
+{
+ memcpy_fromio(to, from, n);
+}
+#define fb_memcpy_fromio fb_memcpy_fromio
+#endif
+
+#ifndef fb_memcpy_toio
+static inline void fb_memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
+{
+ memcpy_toio(to, from, n);
+}
+#define fb_memcpy_toio fb_memcpy_toio
+#endif
+
+#ifndef fb_memset
+static inline void fb_memset_io(volatile void __iomem *addr, int c, size_t n)
+{
+ memset_io(addr, c, n);
+}
+#define fb_memset fb_memset_io
+#endif
+
+#endif /* __ASM_GENERIC_VIDEO_H_ */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 40a9c101565e..8ca130af301f 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -3,7 +3,7 @@
* linker scripts.
*
* A minimal linker scripts has following content:
- * [This is a sample, architectures may have special requiriements]
+ * [This is a sample, architectures may have special requirements]
*
* OUTPUT_FORMAT(...)
* OUTPUT_ARCH(...)
@@ -50,6 +50,8 @@
* [__nosave_begin, __nosave_end] for the nosave data
*/
+#include <asm-generic/codetag.lds.h>
+
#ifndef LOAD_OFFSET
#define LOAD_OFFSET 0
#endif
@@ -81,46 +83,66 @@
#define RO_EXCEPTION_TABLE
#endif
-/* Align . to a 8 byte boundary equals to maximum function alignment. */
-#define ALIGN_FUNCTION() . = ALIGN(8)
+/* Align . function alignment. */
+#define ALIGN_FUNCTION() . = ALIGN(CONFIG_FUNCTION_ALIGNMENT)
/*
- * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which
- * generates .data.identifier sections, which need to be pulled in with
- * .data. We don't want to pull in .data..other sections, which Linux
- * has defined. Same for text and bss.
+ * Support -ffunction-sections by matching .text and .text.*,
+ * but exclude '.text..*', .text.startup[.*], and .text.exit[.*].
+ *
+ * .text.startup and .text.startup.* are matched later by INIT_TEXT, and
+ * .text.exit and .text.exit.* are matched later by EXIT_TEXT, so they must be
+ * explicitly excluded here.
*
- * With LTO_CLANG, the linker also splits sections by default, so we need
- * these macros to combine the sections during the final link.
+ * Other .text.* sections that are typically grouped separately, such as
+ * .text.unlikely or .text.hot, must be matched explicitly before using
+ * TEXT_MAIN.
*
- * RODATA_MAIN is not used because existing code already defines .rodata.x
- * sections to be brought in with rodata.
+ * NOTE: builds *with* and *without* -ffunction-sections are both supported by
+ * this single macro. Even with -ffunction-sections, there may be some objects
+ * NOT compiled with the flag due to the use of a specific Makefile override
+ * like cflags-y or AUTOFDO_PROFILE_foo.o. So this single catchall rule is
+ * needed to support mixed object builds.
+ *
+ * One implication is that functions named startup(), exit(), split(),
+ * unlikely(), hot(), and unknown() are not allowed in the kernel due to the
+ * ambiguity of their section names with -ffunction-sections. For example,
+ * .text.startup could be __attribute__((constructor)) code in a *non*
+ * ffunction-sections object, which should be placed in .init.text; or it could
+ * be an actual function named startup() in an ffunction-sections object, which
+ * should be placed in .text. The build will detect and complain about any such
+ * ambiguously named functions.
+ */
+#define TEXT_MAIN \
+ .text \
+ .text.[_0-9A-Za-df-rt-z]* \
+ .text.s[_0-9A-Za-su-z]* .text.s .text.s.* \
+ .text.st[_0-9A-Zb-z]* .text.st .text.st.* \
+ .text.sta[_0-9A-Za-qs-z]* .text.sta .text.sta.* \
+ .text.star[_0-9A-Za-su-z]* .text.star .text.star.* \
+ .text.start[_0-9A-Za-tv-z]* .text.start .text.start.* \
+ .text.startu[_0-9A-Za-oq-z]* .text.startu .text.startu.* \
+ .text.startup[_0-9A-Za-z]* \
+ .text.e[_0-9A-Za-wy-z]* .text.e .text.e.* \
+ .text.ex[_0-9A-Za-hj-z]* .text.ex .text.ex.* \
+ .text.exi[_0-9A-Za-su-z]* .text.exi .text.exi.* \
+ .text.exit[_0-9A-Za-z]*
+
+/*
+ * Support -fdata-sections by matching .data, .data.*, and others,
+ * but exclude '.data..*'.
*/
-#if defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || defined(CONFIG_LTO_CLANG)
-#define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
-#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$L*
+#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data.rel.* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$L*
#define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
#define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* .rodata..L*
-#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..compoundliteral*
+#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..L* .bss..compoundliteral*
#define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]*
-#else
-#define TEXT_MAIN .text
-#define DATA_MAIN .data
-#define SDATA_MAIN .sdata
-#define RODATA_MAIN .rodata
-#define BSS_MAIN .bss
-#define SBSS_MAIN .sbss
-#endif
/*
* GCC 4.5 and later have a 32 bytes section alignment for structures.
* Except GCC 4.9, that feels the need to align on 64 bytes.
*/
-#if __GNUC__ == 4 && __GNUC_MINOR__ == 9
-#define STRUCT_ALIGNMENT 64
-#else
#define STRUCT_ALIGNMENT 32
-#endif
#define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
/*
@@ -130,153 +152,173 @@
*/
#define SCHED_DATA \
STRUCT_ALIGN(); \
- __begin_sched_classes = .; \
- *(__idle_sched_class) \
- *(__fair_sched_class) \
- *(__rt_sched_class) \
- *(__dl_sched_class) \
+ __sched_class_highest = .; \
*(__stop_sched_class) \
- __end_sched_classes = .;
+ *(__dl_sched_class) \
+ *(__rt_sched_class) \
+ *(__fair_sched_class) \
+ *(__ext_sched_class) \
+ *(__idle_sched_class) \
+ __sched_class_lowest = .;
/* The actual configuration determine if the init/exit sections
* are handled as text/data or they can be discarded (which
* often happens at runtime)
*/
-#ifdef CONFIG_HOTPLUG_CPU
-#define CPU_KEEP(sec) *(.cpu##sec)
-#define CPU_DISCARD(sec)
+
+#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_NO_PATCHABLE
+#define KEEP_PATCHABLE KEEP(*(__patchable_function_entries))
+#define PATCHABLE_DISCARDS
#else
-#define CPU_KEEP(sec)
-#define CPU_DISCARD(sec) *(.cpu##sec)
+#define KEEP_PATCHABLE
+#define PATCHABLE_DISCARDS *(__patchable_function_entries)
#endif
-#if defined(CONFIG_MEMORY_HOTPLUG)
-#define MEM_KEEP(sec) *(.mem##sec)
-#define MEM_DISCARD(sec)
+#ifndef CONFIG_ARCH_SUPPORTS_CFI
+/*
+ * Simply points to ftrace_stub, but with the proper protocol.
+ * Defined by the linker script in linux/vmlinux.lds.h
+ */
+#define FTRACE_STUB_HACK ftrace_stub_graph = ftrace_stub;
#else
-#define MEM_KEEP(sec)
-#define MEM_DISCARD(sec) *(.mem##sec)
+#define FTRACE_STUB_HACK
#endif
-#ifdef CONFIG_FTRACE_MCOUNT_RECORD
+#ifdef CONFIG_DYNAMIC_FTRACE
/*
* The ftrace call sites are logged to a section whose name depends on the
* compiler option used. A given kernel image will only use one, AKA
* FTRACE_CALLSITE_SECTION. We capture all of them here to avoid header
* dependencies for FTRACE_CALLSITE_SECTION's definition.
*
- * Need to also make ftrace_stub_graph point to ftrace_stub
- * so that the same stub location may have different protocols
- * and not mess up with C verifiers.
+ * ftrace_ops_list_func will be defined as arch_ftrace_ops_list_func
+ * as some archs will have a different prototype for that function
+ * but ftrace_ops_list_func() will have a single prototype.
*/
#define MCOUNT_REC() . = ALIGN(8); \
__start_mcount_loc = .; \
KEEP(*(__mcount_loc)) \
- KEEP(*(__patchable_function_entries)) \
+ KEEP_PATCHABLE \
__stop_mcount_loc = .; \
- ftrace_stub_graph = ftrace_stub;
+ FTRACE_STUB_HACK \
+ ftrace_ops_list_func = arch_ftrace_ops_list_func;
#else
# ifdef CONFIG_FUNCTION_TRACER
-# define MCOUNT_REC() ftrace_stub_graph = ftrace_stub;
+# define MCOUNT_REC() FTRACE_STUB_HACK \
+ ftrace_ops_list_func = arch_ftrace_ops_list_func;
# else
# define MCOUNT_REC()
# endif
#endif
+#define BOUNDED_SECTION_PRE_LABEL(_sec_, _label_, _BEGIN_, _END_) \
+ _BEGIN_##_label_ = .; \
+ KEEP(*(_sec_)) \
+ _END_##_label_ = .;
+
+#define BOUNDED_SECTION_POST_LABEL(_sec_, _label_, _BEGIN_, _END_) \
+ _label_##_BEGIN_ = .; \
+ KEEP(*(_sec_)) \
+ _label_##_END_ = .;
+
+#define BOUNDED_SECTION_BY(_sec_, _label_) \
+ BOUNDED_SECTION_PRE_LABEL(_sec_, _label_, __start, __stop)
+
+#define BOUNDED_SECTION(_sec) BOUNDED_SECTION_BY(_sec, _sec)
+
+#define HEADERED_SECTION_PRE_LABEL(_sec_, _label_, _BEGIN_, _END_, _HDR_) \
+ _HDR_##_label_ = .; \
+ KEEP(*(.gnu.linkonce.##_sec_)) \
+ BOUNDED_SECTION_PRE_LABEL(_sec_, _label_, _BEGIN_, _END_)
+
+#define HEADERED_SECTION_POST_LABEL(_sec_, _label_, _BEGIN_, _END_, _HDR_) \
+ _label_##_HDR_ = .; \
+ KEEP(*(.gnu.linkonce.##_sec_)) \
+ BOUNDED_SECTION_POST_LABEL(_sec_, _label_, _BEGIN_, _END_)
+
+#define HEADERED_SECTION_BY(_sec_, _label_) \
+ HEADERED_SECTION_PRE_LABEL(_sec_, _label_, __start, __stop)
+
+#define HEADERED_SECTION(_sec) HEADERED_SECTION_BY(_sec, _sec)
+
#ifdef CONFIG_TRACE_BRANCH_PROFILING
-#define LIKELY_PROFILE() __start_annotated_branch_profile = .; \
- KEEP(*(_ftrace_annotated_branch)) \
- __stop_annotated_branch_profile = .;
+#define LIKELY_PROFILE() \
+ BOUNDED_SECTION_BY(_ftrace_annotated_branch, _annotated_branch_profile)
#else
#define LIKELY_PROFILE()
#endif
#ifdef CONFIG_PROFILE_ALL_BRANCHES
-#define BRANCH_PROFILE() __start_branch_profile = .; \
- KEEP(*(_ftrace_branch)) \
- __stop_branch_profile = .;
+#define BRANCH_PROFILE() \
+ BOUNDED_SECTION_BY(_ftrace_branch, _branch_profile)
#else
#define BRANCH_PROFILE()
#endif
#ifdef CONFIG_KPROBES
-#define KPROBE_BLACKLIST() . = ALIGN(8); \
- __start_kprobe_blacklist = .; \
- KEEP(*(_kprobe_blacklist)) \
- __stop_kprobe_blacklist = .;
+#define KPROBE_BLACKLIST() \
+ . = ALIGN(8); \
+ BOUNDED_SECTION(_kprobe_blacklist)
#else
#define KPROBE_BLACKLIST()
#endif
#ifdef CONFIG_FUNCTION_ERROR_INJECTION
-#define ERROR_INJECT_WHITELIST() STRUCT_ALIGN(); \
- __start_error_injection_whitelist = .; \
- KEEP(*(_error_injection_whitelist)) \
- __stop_error_injection_whitelist = .;
+#define ERROR_INJECT_WHITELIST() \
+ STRUCT_ALIGN(); \
+ BOUNDED_SECTION(_error_injection_whitelist)
#else
#define ERROR_INJECT_WHITELIST()
#endif
#ifdef CONFIG_EVENT_TRACING
-#define FTRACE_EVENTS() . = ALIGN(8); \
- __start_ftrace_events = .; \
- KEEP(*(_ftrace_events)) \
- __stop_ftrace_events = .; \
- __start_ftrace_eval_maps = .; \
- KEEP(*(_ftrace_eval_map)) \
- __stop_ftrace_eval_maps = .;
+#define FTRACE_EVENTS() \
+ . = ALIGN(8); \
+ BOUNDED_SECTION(_ftrace_events) \
+ BOUNDED_SECTION_BY(_ftrace_eval_map, _ftrace_eval_maps)
#else
#define FTRACE_EVENTS()
#endif
#ifdef CONFIG_TRACING
-#define TRACE_PRINTKS() __start___trace_bprintk_fmt = .; \
- KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \
- __stop___trace_bprintk_fmt = .;
-#define TRACEPOINT_STR() __start___tracepoint_str = .; \
- KEEP(*(__tracepoint_str)) /* Trace_printk fmt' pointer */ \
- __stop___tracepoint_str = .;
+#define TRACE_PRINTKS() BOUNDED_SECTION_BY(__trace_printk_fmt, ___trace_bprintk_fmt)
+#define TRACEPOINT_STR() BOUNDED_SECTION_BY(__tracepoint_str, ___tracepoint_str)
#else
#define TRACE_PRINTKS()
#define TRACEPOINT_STR()
#endif
#ifdef CONFIG_FTRACE_SYSCALLS
-#define TRACE_SYSCALLS() . = ALIGN(8); \
- __start_syscalls_metadata = .; \
- KEEP(*(__syscalls_metadata)) \
- __stop_syscalls_metadata = .;
+#define TRACE_SYSCALLS() \
+ . = ALIGN(8); \
+ BOUNDED_SECTION_BY(__syscalls_metadata, _syscalls_metadata)
#else
#define TRACE_SYSCALLS()
#endif
#ifdef CONFIG_BPF_EVENTS
-#define BPF_RAW_TP() STRUCT_ALIGN(); \
- __start__bpf_raw_tp = .; \
- KEEP(*(__bpf_raw_tp_map)) \
- __stop__bpf_raw_tp = .;
+#define BPF_RAW_TP() STRUCT_ALIGN(); \
+ BOUNDED_SECTION_BY(__bpf_raw_tp_map, __bpf_raw_tp)
#else
#define BPF_RAW_TP()
#endif
#ifdef CONFIG_SERIAL_EARLYCON
-#define EARLYCON_TABLE() . = ALIGN(8); \
- __earlycon_table = .; \
- KEEP(*(__earlycon_table)) \
- __earlycon_table_end = .;
+#define EARLYCON_TABLE() \
+ . = ALIGN(8); \
+ BOUNDED_SECTION_POST_LABEL(__earlycon_table, __earlycon_table, , _end)
#else
#define EARLYCON_TABLE()
#endif
#ifdef CONFIG_SECURITY
-#define LSM_TABLE() . = ALIGN(8); \
- __start_lsm_info = .; \
- KEEP(*(.lsm_info.init)) \
- __end_lsm_info = .;
-#define EARLY_LSM_TABLE() . = ALIGN(8); \
- __start_early_lsm_info = .; \
- KEEP(*(.early_lsm_info.init)) \
- __end_early_lsm_info = .;
+#define LSM_TABLE() \
+ . = ALIGN(8); \
+ BOUNDED_SECTION_PRE_LABEL(.lsm_info.init, _lsm_info, __start, __end)
+
+#define EARLY_LSM_TABLE() \
+ . = ALIGN(8); \
+ BOUNDED_SECTION_PRE_LABEL(.early_lsm_info.init, _early_lsm_info, __start, __end)
#else
#define LSM_TABLE()
#define EARLY_LSM_TABLE()
@@ -302,9 +344,8 @@
#ifdef CONFIG_ACPI
#define ACPI_PROBE_TABLE(name) \
. = ALIGN(8); \
- __##name##_acpi_probe_table = .; \
- KEEP(*(__##name##_acpi_probe_table)) \
- __##name##_acpi_probe_table_end = .;
+ BOUNDED_SECTION_POST_LABEL(__##name##_acpi_probe_table, \
+ __##name##_acpi_probe_table,, _end)
#else
#define ACPI_PROBE_TABLE(name)
#endif
@@ -312,23 +353,12 @@
#ifdef CONFIG_THERMAL
#define THERMAL_TABLE(name) \
. = ALIGN(8); \
- __##name##_thermal_table = .; \
- KEEP(*(__##name##_thermal_table)) \
- __##name##_thermal_table_end = .;
+ BOUNDED_SECTION_POST_LABEL(__##name##_thermal_table, \
+ __##name##_thermal_table,, _end)
#else
#define THERMAL_TABLE(name)
#endif
-#ifdef CONFIG_DTPM
-#define DTPM_TABLE() \
- . = ALIGN(8); \
- __dtpm_table = .; \
- KEEP(*(__dtpm_table)) \
- __dtpm_table_end = .;
-#else
-#define DTPM_TABLE()
-#endif
-
#define KERNEL_DTB() \
STRUCT_ALIGN(); \
__dtb_start = .; \
@@ -341,26 +371,27 @@
#define DATA_DATA \
*(.xiptext) \
*(DATA_MAIN) \
+ *(.data..decrypted) \
*(.ref.data) \
*(.data..shared_aligned) /* percpu related */ \
- MEM_KEEP(init.data*) \
- MEM_KEEP(exit.data*) \
- *(.data.unlikely) \
+ *(.data..unlikely) \
__start_once = .; \
- *(.data.once) \
+ *(.data..once) \
__end_once = .; \
+ *(.data..do_once) \
STRUCT_ALIGN(); \
*(__tracepoints) \
/* implement dynamic printk debug */ \
. = ALIGN(8); \
- __start___dyndbg = .; \
- KEEP(*(__dyndbg)) \
- __stop___dyndbg = .; \
+ BOUNDED_SECTION_BY(__dyndbg_classes, ___dyndbg_classes) \
+ BOUNDED_SECTION_BY(__dyndbg, ___dyndbg) \
+ CODETAG_SECTIONS() \
LIKELY_PROFILE() \
BRANCH_PROFILE() \
TRACE_PRINTKS() \
BPF_RAW_TP() \
- TRACEPOINT_STR()
+ TRACEPOINT_STR() \
+ KUNIT_TABLE()
/*
* Data section helpers
@@ -372,6 +403,11 @@
. = ALIGN(PAGE_SIZE); \
__nosave_end = .;
+#define CACHE_HOT_DATA(align) \
+ . = ALIGN(align); \
+ *(SORT_BY_ALIGNMENT(.data..hot.*)) \
+ . = ALIGN(align);
+
#define PAGE_ALIGNED_DATA(page_align) \
. = ALIGN(page_align); \
*(.data..page_aligned) \
@@ -388,28 +424,25 @@
#define INIT_TASK_DATA(align) \
. = ALIGN(align); \
- __start_init_task = .; \
+ __start_init_stack = .; \
init_thread_union = .; \
init_stack = .; \
- KEEP(*(.data..init_task)) \
KEEP(*(.data..init_thread_info)) \
- . = __start_init_task + THREAD_SIZE; \
- __end_init_task = .;
+ . = __start_init_stack + THREAD_SIZE; \
+ __end_init_stack = .;
#define JUMP_TABLE_DATA \
. = ALIGN(8); \
- __start___jump_table = .; \
- KEEP(*(__jump_table)) \
- __stop___jump_table = .;
+ BOUNDED_SECTION_BY(__jump_table, ___jump_table)
+#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
#define STATIC_CALL_DATA \
. = ALIGN(8); \
- __start_static_call_sites = .; \
- KEEP(*(.static_call_sites)) \
- __stop_static_call_sites = .; \
- __start_static_call_tramp_key = .; \
- KEEP(*(.static_call_tramp_key)) \
- __stop_static_call_tramp_key = .;
+ BOUNDED_SECTION_BY(.static_call_sites, _static_call_sites) \
+ BOUNDED_SECTION_BY(.static_call_tramp_key, _static_call_tramp_key)
+#else
+#define STATIC_CALL_DATA
+#endif
/*
* Allow architectures to handle ro_after_init data on their
@@ -426,19 +459,31 @@
#endif
/*
+ * .kcfi_traps contains a list KCFI trap locations.
+ */
+#ifndef KCFI_TRAPS
+#ifdef CONFIG_ARCH_USES_CFI_TRAPS
+#define KCFI_TRAPS \
+ __kcfi_traps : AT(ADDR(__kcfi_traps) - LOAD_OFFSET) { \
+ BOUNDED_SECTION_BY(.kcfi_traps, ___kcfi_traps) \
+ }
+#else
+#define KCFI_TRAPS
+#endif
+#endif
+
+/*
* Read only Data
*/
#define RO_DATA(align) \
. = ALIGN((align)); \
.rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
__start_rodata = .; \
- *(.rodata) *(.rodata.*) \
+ *(.rodata) *(.rodata.*) *(.data.rel.ro*) \
SCHED_DATA \
RO_AFTER_INIT_DATA /* Read only after init */ \
. = ALIGN(8); \
- __start___tracepoints_ptrs = .; \
- KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \
- __stop___tracepoints_ptrs = .; \
+ BOUNDED_SECTION_BY(__tracepoints_ptrs, ___tracepoints_ptrs) \
*(__tracepoints_strings)/* Tracepoints: strings */ \
} \
\
@@ -448,41 +493,21 @@
\
/* PCI quirks */ \
.pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
- __start_pci_fixups_early = .; \
- KEEP(*(.pci_fixup_early)) \
- __end_pci_fixups_early = .; \
- __start_pci_fixups_header = .; \
- KEEP(*(.pci_fixup_header)) \
- __end_pci_fixups_header = .; \
- __start_pci_fixups_final = .; \
- KEEP(*(.pci_fixup_final)) \
- __end_pci_fixups_final = .; \
- __start_pci_fixups_enable = .; \
- KEEP(*(.pci_fixup_enable)) \
- __end_pci_fixups_enable = .; \
- __start_pci_fixups_resume = .; \
- KEEP(*(.pci_fixup_resume)) \
- __end_pci_fixups_resume = .; \
- __start_pci_fixups_resume_early = .; \
- KEEP(*(.pci_fixup_resume_early)) \
- __end_pci_fixups_resume_early = .; \
- __start_pci_fixups_suspend = .; \
- KEEP(*(.pci_fixup_suspend)) \
- __end_pci_fixups_suspend = .; \
- __start_pci_fixups_suspend_late = .; \
- KEEP(*(.pci_fixup_suspend_late)) \
- __end_pci_fixups_suspend_late = .; \
- } \
- \
- /* Built-in firmware blobs */ \
- .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \
- __start_builtin_fw = .; \
- KEEP(*(.builtin_fw)) \
- __end_builtin_fw = .; \
+ BOUNDED_SECTION_PRE_LABEL(.pci_fixup_early, _pci_fixups_early, __start, __end) \
+ BOUNDED_SECTION_PRE_LABEL(.pci_fixup_header, _pci_fixups_header, __start, __end) \
+ BOUNDED_SECTION_PRE_LABEL(.pci_fixup_final, _pci_fixups_final, __start, __end) \
+ BOUNDED_SECTION_PRE_LABEL(.pci_fixup_enable, _pci_fixups_enable, __start, __end) \
+ BOUNDED_SECTION_PRE_LABEL(.pci_fixup_resume, _pci_fixups_resume, __start, __end) \
+ BOUNDED_SECTION_PRE_LABEL(.pci_fixup_suspend, _pci_fixups_suspend, __start, __end) \
+ BOUNDED_SECTION_PRE_LABEL(.pci_fixup_resume_early, _pci_fixups_resume_early, __start, __end) \
+ BOUNDED_SECTION_PRE_LABEL(.pci_fixup_suspend_late, _pci_fixups_suspend_late, __start, __end) \
} \
\
+ FW_LOADER_BUILT_IN_DATA \
TRACEDATA \
\
+ PRINTK_INDEX \
+ \
/* Kernel symbol table: Normal symbols */ \
__ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
__start___ksymtab = .; \
@@ -519,24 +544,20 @@
/* __*init sections */ \
__init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
*(.ref.rodata) \
- MEM_KEEP(init.rodata) \
- MEM_KEEP(exit.rodata) \
} \
\
/* Built-in module parameters. */ \
__param : AT(ADDR(__param) - LOAD_OFFSET) { \
- __start___param = .; \
- KEEP(*(__param)) \
- __stop___param = .; \
+ BOUNDED_SECTION_BY(__param, ___param) \
} \
\
/* Built-in module versions. */ \
__modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
- __start___modver = .; \
- KEEP(*(__modver)) \
- __stop___modver = .; \
+ BOUNDED_SECTION_BY(__modver, ___modver) \
} \
\
+ KCFI_TRAPS \
+ \
RO_EXCEPTION_TABLE \
NOTES \
BTF \
@@ -546,50 +567,54 @@
/*
- * .text..L.cfi.jumptable.* contain Control-Flow Integrity (CFI)
- * jump table entries.
- */
-#ifdef CONFIG_CFI_CLANG
-#define TEXT_CFI_JT \
- . = ALIGN(PMD_SIZE); \
- __cfi_jt_start = .; \
- *(.text..L.cfi.jumptable .text..L.cfi.jumptable.*) \
- . = ALIGN(PMD_SIZE); \
- __cfi_jt_end = .;
-#else
-#define TEXT_CFI_JT
-#endif
-
-/*
* Non-instrumentable text section
*/
#define NOINSTR_TEXT \
ALIGN_FUNCTION(); \
__noinstr_text_start = .; \
*(.noinstr.text) \
+ __cpuidle_text_start = .; \
+ *(.cpuidle.text) \
+ __cpuidle_text_end = .; \
__noinstr_text_end = .;
+#define TEXT_SPLIT \
+ __split_text_start = .; \
+ *(.text.split .text.split.[0-9a-zA-Z_]*) \
+ __split_text_end = .;
+
+#define TEXT_UNLIKELY \
+ __unlikely_text_start = .; \
+ *(.text.unlikely .text.unlikely.*) \
+ __unlikely_text_end = .;
+
+#define TEXT_HOT \
+ __hot_text_start = .; \
+ *(.text.hot .text.hot.*) \
+ __hot_text_end = .;
+
/*
* .text section. Map to function alignment to avoid address changes
* during second ld run in second ld pass when generating System.map
*
- * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead
- * code elimination is enabled, so these sections should be converted
- * to use ".." first.
+ * TEXT_MAIN here will match symbols with a fixed pattern (for example,
+ * .text.hot or .text.unlikely). Match those before TEXT_MAIN to ensure
+ * they get grouped together.
+ *
+ * Also placing .text.hot section at the beginning of a page, this
+ * would help the TLB performance.
*/
#define TEXT_TEXT \
ALIGN_FUNCTION(); \
- *(.text.hot .text.hot.*) \
- *(TEXT_MAIN .text.fixup) \
- *(.text.unlikely .text.unlikely.*) \
+ *(.text.asan.* .text.tsan.*) \
*(.text.unknown .text.unknown.*) \
+ TEXT_SPLIT \
+ TEXT_UNLIKELY \
+ . = ALIGN(PAGE_SIZE); \
+ TEXT_HOT \
+ *(TEXT_MAIN .text.fixup) \
NOINSTR_TEXT \
- *(.text..refcount) \
- *(.ref.text) \
- TEXT_CFI_JT \
- MEM_KEEP(init.text*) \
- MEM_KEEP(exit.text*) \
-
+ *(.ref.text)
/* sched.text is aling to function alignment to secure we have same
* address even at second ld pass when generating System.map */
@@ -607,12 +632,6 @@
*(.spinlock.text) \
__lock_text_end = .;
-#define CPUIDLE_TEXT \
- ALIGN_FUNCTION(); \
- __cpuidle_text_start = .; \
- *(.cpuidle.text) \
- __cpuidle_text_end = .;
-
#define KPROBES_TEXT \
ALIGN_FUNCTION(); \
__kprobes_text_start = .; \
@@ -657,9 +676,7 @@
#define EXCEPTION_TABLE(align) \
. = ALIGN(align); \
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
- __start___ex_table = .; \
- KEEP(*(__ex_table)) \
- __stop___ex_table = .; \
+ BOUNDED_SECTION_BY(__ex_table, ___ex_table) \
}
/*
@@ -667,12 +684,11 @@
*/
#ifdef CONFIG_DEBUG_INFO_BTF
#define BTF \
+ . = ALIGN(PAGE_SIZE); \
.BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \
- __start_BTF = .; \
- KEEP(*(.BTF)) \
- __stop_BTF = .; \
+ BOUNDED_SECTION_BY(.BTF, _BTF) \
} \
- . = ALIGN(4); \
+ . = ALIGN(PAGE_SIZE); \
.BTF_ids : AT(ADDR(.BTF_ids) - LOAD_OFFSET) { \
*(.BTF_ids) \
}
@@ -704,8 +720,7 @@
/* init and exit section handling */
#define INIT_DATA \
KEEP(*(SORT(___kentry+*))) \
- *(.init.data init.data.*) \
- MEM_DISCARD(init.data*) \
+ *(.init.data .init.data.*) \
KERNEL_CTORS() \
MCOUNT_REC() \
*(.init.rodata .init.rodata.*) \
@@ -713,7 +728,6 @@
TRACE_SYSCALLS() \
KPROBE_BLACKLIST() \
ERROR_INJECT_WHITELIST() \
- MEM_DISCARD(init.rodata) \
CLK_OF_TABLES() \
RESERVEDMEM_OF_TABLES() \
TIMER_OF_TABLES() \
@@ -724,28 +738,23 @@
ACPI_PROBE_TABLE(irqchip) \
ACPI_PROBE_TABLE(timer) \
THERMAL_TABLE(governor) \
- DTPM_TABLE() \
EARLYCON_TABLE() \
LSM_TABLE() \
EARLY_LSM_TABLE() \
- KUNIT_TABLE()
+ KUNIT_INIT_TABLE()
#define INIT_TEXT \
*(.init.text .init.text.*) \
- *(.text.startup) \
- MEM_DISCARD(init.text*)
+ *(.text.startup .text.startup.*)
#define EXIT_DATA \
*(.exit.data .exit.data.*) \
*(.fini_array .fini_array.*) \
- *(.dtors .dtors.*) \
- MEM_DISCARD(exit.data*) \
- MEM_DISCARD(exit.rodata*)
+ *(.dtors .dtors.*)
#define EXIT_TEXT \
*(.exit.text) \
- *(.text.exit) \
- MEM_DISCARD(exit.text)
+ *(.text.exit .text.exit.*)
#define EXIT_CALL \
*(.exitcall.exit)
@@ -839,6 +848,7 @@
/* Required sections not related to debugging. */
#define ELF_DETAILS \
+ .modinfo : { *(.modinfo) . = ALIGN(8); } \
.comment 0 : { *(.comment) } \
.symtab 0 : { *(.symtab) } \
.strtab 0 : { *(.strtab) } \
@@ -848,9 +858,7 @@
#define BUG_TABLE \
. = ALIGN(8); \
__bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
- __start___bug_table = .; \
- KEEP(*(__bug_table)) \
- __stop___bug_table = .; \
+ BOUNDED_SECTION_BY(__bug_table, ___bug_table) \
}
#else
#define BUG_TABLE
@@ -858,22 +866,22 @@
#ifdef CONFIG_UNWINDER_ORC
#define ORC_UNWIND_TABLE \
+ .orc_header : AT(ADDR(.orc_header) - LOAD_OFFSET) { \
+ BOUNDED_SECTION_BY(.orc_header, _orc_header) \
+ } \
. = ALIGN(4); \
.orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) { \
- __start_orc_unwind_ip = .; \
- KEEP(*(.orc_unwind_ip)) \
- __stop_orc_unwind_ip = .; \
+ BOUNDED_SECTION_BY(.orc_unwind_ip, _orc_unwind_ip) \
} \
. = ALIGN(2); \
.orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \
- __start_orc_unwind = .; \
- KEEP(*(.orc_unwind)) \
- __stop_orc_unwind = .; \
+ BOUNDED_SECTION_BY(.orc_unwind, _orc_unwind) \
} \
+ text_size = _etext - _stext; \
. = ALIGN(4); \
.orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) { \
orc_lookup = .; \
- . += (((SIZEOF(.text) + LOOKUP_BLOCK_SIZE - 1) / \
+ . += (((text_size + LOOKUP_BLOCK_SIZE - 1) / \
LOOKUP_BLOCK_SIZE) + 1) * 4; \
orc_lookup_end = .; \
}
@@ -881,31 +889,56 @@
#define ORC_UNWIND_TABLE
#endif
+/* Built-in firmware blobs */
+#ifdef CONFIG_FW_LOADER
+#define FW_LOADER_BUILT_IN_DATA \
+ .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \
+ BOUNDED_SECTION_PRE_LABEL(.builtin_fw, _builtin_fw, __start, __end) \
+ }
+#else
+#define FW_LOADER_BUILT_IN_DATA
+#endif
+
#ifdef CONFIG_PM_TRACE
#define TRACEDATA \
. = ALIGN(4); \
.tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
- __tracedata_start = .; \
- KEEP(*(.tracedata)) \
- __tracedata_end = .; \
+ BOUNDED_SECTION_POST_LABEL(.tracedata, __tracedata, _start, _end) \
}
#else
#define TRACEDATA
#endif
+#ifdef CONFIG_PRINTK_INDEX
+#define PRINTK_INDEX \
+ .printk_index : AT(ADDR(.printk_index) - LOAD_OFFSET) { \
+ BOUNDED_SECTION_BY(.printk_index, _printk_index) \
+ }
+#else
+#define PRINTK_INDEX
+#endif
+
+/*
+ * Discard .note.GNU-stack, which is emitted as PROGBITS by the compiler.
+ * Otherwise, the type of .notes section would become PROGBITS instead of NOTES.
+ *
+ * Also, discard .note.gnu.property, otherwise it forces the notes section to
+ * be 8-byte aligned which causes alignment mismatches with the kernel's custom
+ * 4-byte aligned notes.
+ */
#define NOTES \
+ /DISCARD/ : { \
+ *(.note.GNU-stack) \
+ *(.note.gnu.property) \
+ } \
.notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
- __start_notes = .; \
- KEEP(*(.note.*)) \
- __stop_notes = .; \
+ BOUNDED_SECTION_BY(.note.*, _notes) \
} NOTES_HEADERS \
NOTES_HEADERS_RESTORE
#define INIT_SETUP(initsetup_align) \
. = ALIGN(initsetup_align); \
- __setup_start = .; \
- KEEP(*(.init.setup)) \
- __setup_end = .;
+ BOUNDED_SECTION_POST_LABEL(.init.setup, __setup, _start, _end)
#define INIT_CALLS_LEVEL(level) \
__initcall##level##_start = .; \
@@ -927,16 +960,30 @@
__initcall_end = .;
#define CON_INITCALL \
- __con_initcall_start = .; \
- KEEP(*(.con_initcall.init)) \
- __con_initcall_end = .;
+ BOUNDED_SECTION_POST_LABEL(.con_initcall.init, __con_initcall, _start, _end)
+
+#define NAMED_SECTION(name) \
+ . = ALIGN(8); \
+ name : AT(ADDR(name) - LOAD_OFFSET) \
+ { BOUNDED_SECTION_PRE_LABEL(name, name, __start_, __stop_) }
+
+#define RUNTIME_CONST(t,x) NAMED_SECTION(runtime_##t##_##x)
+
+#define RUNTIME_CONST_VARIABLES \
+ RUNTIME_CONST(shift, d_hash_shift) \
+ RUNTIME_CONST(ptr, dentry_hashtable) \
+ RUNTIME_CONST(ptr, __dentry_cache)
/* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */
#define KUNIT_TABLE() \
. = ALIGN(8); \
- __kunit_suites_start = .; \
- KEEP(*(.kunit_test_suites)) \
- __kunit_suites_end = .;
+ BOUNDED_SECTION_POST_LABEL(.kunit_test_suites, __kunit_suites, _start, _end)
+
+/* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */
+#define KUNIT_INIT_TABLE() \
+ . = ALIGN(8); \
+ BOUNDED_SECTION_POST_LABEL(.kunit_init_test_suites, \
+ __kunit_init_suites, _start, _end)
#ifdef CONFIG_BLK_DEV_INITRD
#define INIT_RAM_FS \
@@ -989,17 +1036,21 @@
* -fsanitize=thread produce unwanted sections (.eh_frame
* and .init_array.*), but CONFIG_CONSTRUCTORS wants to
* keep any .init_array.* sections.
- * https://bugs.llvm.org/show_bug.cgi?id=46478
+ * https://llvm.org/pr46478
*/
-#if defined(CONFIG_GCOV_KERNEL) || defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KCSAN) || \
- defined(CONFIG_CFI_CLANG)
+#ifdef CONFIG_UNWIND_TABLES
+#define DISCARD_EH_FRAME
+#else
+#define DISCARD_EH_FRAME *(.eh_frame)
+#endif
+#if defined(CONFIG_GCOV_KERNEL) || defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KCSAN)
# ifdef CONFIG_CONSTRUCTORS
# define SANITIZER_DISCARDS \
- *(.eh_frame)
+ DISCARD_EH_FRAME
# else
# define SANITIZER_DISCARDS \
*(.init_array) *(.init_array.*) \
- *(.eh_frame)
+ DISCARD_EH_FRAME
# endif
#else
# define SANITIZER_DISCARDS
@@ -1007,11 +1058,14 @@
#define COMMON_DISCARDS \
SANITIZER_DISCARDS \
+ PATCHABLE_DISCARDS \
*(.discard) \
*(.discard.*) \
- *(.modinfo) \
+ *(.export_symbol) \
+ *(.no_trim_symbol) \
/* ld.bfd warns about .gnu.version* even when not emitted */ \
*(.gnu.version*) \
+ *(__tracepoint_check) \
#define DISCARDS \
/DISCARD/ : { \
@@ -1032,10 +1086,13 @@
*/
#define PERCPU_INPUT(cacheline) \
__per_cpu_start = .; \
- *(.data..percpu..first) \
. = ALIGN(PAGE_SIZE); \
*(.data..percpu..page_aligned) \
. = ALIGN(cacheline); \
+ __per_cpu_hot_start = .; \
+ *(SORT_BY_ALIGNMENT(.data..percpu..hot.*)) \
+ __per_cpu_hot_end = .; \
+ . = ALIGN(cacheline); \
*(.data..percpu..read_mostly) \
. = ALIGN(cacheline); \
*(.data..percpu) \
@@ -1044,52 +1101,17 @@
__per_cpu_end = .;
/**
- * PERCPU_VADDR - define output section for percpu area
+ * PERCPU_SECTION - define output section for percpu area
* @cacheline: cacheline size
- * @vaddr: explicit base address (optional)
- * @phdr: destination PHDR (optional)
*
* Macro which expands to output section for percpu area.
*
* @cacheline is used to align subsections to avoid false cacheline
* sharing between subsections for different purposes.
- *
- * If @vaddr is not blank, it specifies explicit base address and all
- * percpu symbols will be offset from the given address. If blank,
- * @vaddr always equals @laddr + LOAD_OFFSET.
- *
- * @phdr defines the output PHDR to use if not blank. Be warned that
- * output PHDR is sticky. If @phdr is specified, the next output
- * section in the linker script will go there too. @phdr should have
- * a leading colon.
- *
- * Note that this macros defines __per_cpu_load as an absolute symbol.
- * If there is no need to put the percpu section at a predetermined
- * address, use PERCPU_SECTION.
- */
-#define PERCPU_VADDR(cacheline, vaddr, phdr) \
- __per_cpu_load = .; \
- .data..percpu vaddr : AT(__per_cpu_load - LOAD_OFFSET) { \
- PERCPU_INPUT(cacheline) \
- } phdr \
- . = __per_cpu_load + SIZEOF(.data..percpu);
-
-/**
- * PERCPU_SECTION - define output section for percpu area, simple version
- * @cacheline: cacheline size
- *
- * Align to PAGE_SIZE and outputs output section for percpu area. This
- * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and
- * __per_cpu_start will be identical.
- *
- * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,)
- * except that __per_cpu_load is defined as a relative symbol against
- * .data..percpu which is required for relocatable x86_32 configuration.
*/
#define PERCPU_SECTION(cacheline) \
. = ALIGN(PAGE_SIZE); \
.data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
- __per_cpu_load = .; \
PERCPU_INPUT(cacheline) \
}
@@ -1118,6 +1140,7 @@
INIT_TASK_DATA(inittask) \
NOSAVE_DATA \
PAGE_ALIGNED_DATA(pagealigned) \
+ CACHE_HOT_DATA(cacheline) \
CACHELINE_ALIGNED_DATA(cacheline) \
READ_MOSTLY_DATA(cacheline) \
DATA_DATA \
diff --git a/include/asm-generic/vtime.h b/include/asm-generic/vtime.h
deleted file mode 100644
index b1a49677fe25..000000000000
--- a/include/asm-generic/vtime.h
+++ /dev/null
@@ -1 +0,0 @@
-/* no content, but patch(1) dislikes empty files */
diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h
index 20c93f08c993..ef3f841c6625 100644
--- a/include/asm-generic/word-at-a-time.h
+++ b/include/asm-generic/word-at-a-time.h
@@ -2,7 +2,8 @@
#ifndef _ASM_WORD_AT_A_TIME_H
#define _ASM_WORD_AT_A_TIME_H
-#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/wordpart.h>
#include <asm/byteorder.h>
#ifdef __BIG_ENDIAN
@@ -38,7 +39,7 @@ static inline long find_zero(unsigned long mask)
return (mask >> 8) ? byte : byte + 1;
}
-static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
+static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
{
unsigned long rhs = val | c->low_bits;
*data = rhs;
diff --git a/include/asm-generic/xor.h b/include/asm-generic/xor.h
index b62a2a56a4d4..44509d48fca2 100644
--- a/include/asm-generic/xor.h
+++ b/include/asm-generic/xor.h
@@ -8,7 +8,8 @@
#include <linux/prefetch.h>
static void
-xor_8regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+xor_8regs_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -27,8 +28,9 @@ xor_8regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
}
static void
-xor_8regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
+xor_8regs_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -48,8 +50,10 @@ xor_8regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_8regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
+xor_8regs_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -70,8 +74,11 @@ xor_8regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_8regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
+xor_8regs_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -93,7 +100,8 @@ xor_8regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_32regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+xor_32regs_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -129,8 +137,9 @@ xor_32regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
}
static void
-xor_32regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
+xor_32regs_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -175,8 +184,10 @@ xor_32regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_32regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
+xor_32regs_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -230,8 +241,11 @@ xor_32regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_32regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
+xor_32regs_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
long lines = bytes / (sizeof (long)) / 8;
@@ -294,7 +308,8 @@ xor_32regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_8regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+xor_8regs_p_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
prefetchw(p1);
@@ -320,8 +335,9 @@ xor_8regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
}
static void
-xor_8regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
+xor_8regs_p_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
prefetchw(p1);
@@ -350,8 +366,10 @@ xor_8regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_8regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
+xor_8regs_p_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
@@ -384,8 +402,11 @@ xor_8regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_8regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
+xor_8regs_p_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
@@ -421,7 +442,8 @@ xor_8regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_32regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+xor_32regs_p_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
@@ -466,8 +488,9 @@ xor_32regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
}
static void
-xor_32regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3)
+xor_32regs_p_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
@@ -523,8 +546,10 @@ xor_32regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_32regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4)
+xor_32regs_p_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
@@ -591,8 +616,11 @@ xor_32regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static void
-xor_32regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
- unsigned long *p3, unsigned long *p4, unsigned long *p5)
+xor_32regs_p_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
long lines = bytes / (sizeof (long)) / 8 - 1;
diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h
index 73c7139c866f..2eda895f19f5 100644
--- a/include/clocksource/arm_arch_timer.h
+++ b/include/clocksource/arm_arch_timer.h
@@ -9,9 +9,6 @@
#include <linux/timecounter.h>
#include <linux/types.h>
-#define ARCH_TIMER_TYPE_CP15 BIT(0)
-#define ARCH_TIMER_TYPE_MEM BIT(1)
-
#define ARCH_TIMER_CTRL_ENABLE (1 << 0)
#define ARCH_TIMER_CTRL_IT_MASK (1 << 1)
#define ARCH_TIMER_CTRL_IT_STAT (1 << 2)
@@ -21,10 +18,17 @@
#define CNTHCTL_EVNTEN (1 << 2)
#define CNTHCTL_EVNTDIR (1 << 3)
#define CNTHCTL_EVNTI (0xF << 4)
+#define CNTHCTL_ECV (1 << 12)
+#define CNTHCTL_EL1TVT (1 << 13)
+#define CNTHCTL_EL1TVCT (1 << 14)
+#define CNTHCTL_EL1NVPCT (1 << 15)
+#define CNTHCTL_EL1NVVCT (1 << 16)
+#define CNTHCTL_CNTVMASK (1 << 18)
+#define CNTHCTL_CNTPMASK (1 << 19)
enum arch_timer_reg {
ARCH_TIMER_REG_CTRL,
- ARCH_TIMER_REG_TVAL,
+ ARCH_TIMER_REG_CVAL,
};
enum arch_timer_ppi_nr {
@@ -44,8 +48,6 @@ enum arch_timer_spi_nr {
#define ARCH_TIMER_PHYS_ACCESS 0
#define ARCH_TIMER_VIRT_ACCESS 1
-#define ARCH_TIMER_MEM_PHYS_ACCESS 2
-#define ARCH_TIMER_MEM_VIRT_ACCESS 3
#define ARCH_TIMER_MEM_MAX_FRAMES 8
@@ -56,6 +58,7 @@ enum arch_timer_spi_nr {
#define ARCH_TIMER_EVT_TRIGGER_MASK (0xF << ARCH_TIMER_EVT_TRIGGER_SHIFT)
#define ARCH_TIMER_USR_VT_ACCESS_EN (1 << 8) /* virtual timer registers */
#define ARCH_TIMER_USR_PT_ACCESS_EN (1 << 9) /* physical timer registers */
+#define ARCH_TIMER_EVT_INTERVAL_SCALE (1 << 17) /* EVNTIS in the ARMv8 ARM */
#define ARCH_TIMER_EVT_STREAM_PERIOD_US 100
#define ARCH_TIMER_EVT_STREAM_FREQ \
diff --git a/include/clocksource/hyperv_timer.h b/include/clocksource/hyperv_timer.h
index b6774aa5a4b8..d48dd4176fd3 100644
--- a/include/clocksource/hyperv_timer.h
+++ b/include/clocksource/hyperv_timer.h
@@ -15,11 +15,15 @@
#include <linux/clocksource.h>
#include <linux/math64.h>
-#include <asm/mshyperv.h>
+#include <hyperv/hvhdk.h>
#define HV_MAX_MAX_DELTA_TICKS 0xffffffff
#define HV_MIN_DELTA_TICKS 1
+#ifdef CONFIG_HYPERV_TIMER
+
+#include <asm/hyperv_timer.h>
+
/* Routines called by the VMbus driver */
extern int hv_stimer_alloc(bool have_percpu_irqs);
extern int hv_stimer_cleanup(unsigned int cpu);
@@ -28,14 +32,17 @@ extern void hv_stimer_legacy_cleanup(unsigned int cpu);
extern void hv_stimer_global_cleanup(void);
extern void hv_stimer0_isr(void);
-#ifdef CONFIG_HYPERV_TIMER
-extern u64 (*hv_read_reference_counter)(void);
extern void hv_init_clocksource(void);
+extern void hv_remap_tsc_clocksource(void);
+extern unsigned long hv_get_tsc_pfn(void);
extern struct ms_hyperv_tsc_page *hv_get_tsc_page(void);
-static inline notrace u64
-hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg, u64 *cur_tsc)
+extern void hv_adj_sched_clock_offset(u64 offset);
+
+static __always_inline bool
+hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg,
+ u64 *cur_tsc, u64 *time)
{
u64 scale, offset;
u32 sequence;
@@ -59,7 +66,7 @@ hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg, u64 *cur_tsc)
do {
sequence = READ_ONCE(tsc_pg->tsc_sequence);
if (!sequence)
- return U64_MAX;
+ return false;
/*
* Make sure we read sequence before we read other values from
* TSC page.
@@ -78,28 +85,33 @@ hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg, u64 *cur_tsc)
} while (READ_ONCE(tsc_pg->tsc_sequence) != sequence);
- return mul_u64_u64_shr(*cur_tsc, scale, 64) + offset;
+ *time = mul_u64_u64_shr(*cur_tsc, scale, 64) + offset;
+ return true;
}
-static inline notrace u64
-hv_read_tsc_page(const struct ms_hyperv_tsc_page *tsc_pg)
+#else /* CONFIG_HYPERV_TIMER */
+static inline unsigned long hv_get_tsc_pfn(void)
{
- u64 cur_tsc;
-
- return hv_read_tsc_page_tsc(tsc_pg, &cur_tsc);
+ return 0;
}
-#else /* CONFIG_HYPERV_TIMER */
static inline struct ms_hyperv_tsc_page *hv_get_tsc_page(void)
{
return NULL;
}
-static inline u64 hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg,
- u64 *cur_tsc)
+static __always_inline bool
+hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg, u64 *cur_tsc, u64 *time)
{
- return U64_MAX;
+ return false;
}
+
+static inline int hv_stimer_cleanup(unsigned int cpu) { return 0; }
+static inline void hv_stimer_legacy_init(unsigned int cpu, int sint) {}
+static inline void hv_stimer_legacy_cleanup(unsigned int cpu) {}
+static inline void hv_stimer_global_cleanup(void) {}
+static inline void hv_stimer0_isr(void) {}
+
#endif /* CONFIG_HYPERV_TIMER */
#endif
diff --git a/include/clocksource/samsung_pwm.h b/include/clocksource/samsung_pwm.h
index c395238d0922..9b435caa95fe 100644
--- a/include/clocksource/samsung_pwm.h
+++ b/include/clocksource/samsung_pwm.h
@@ -27,6 +27,7 @@ struct samsung_pwm_variant {
};
void samsung_pwm_clocksource_init(void __iomem *base,
- unsigned int *irqs, struct samsung_pwm_variant *variant);
+ unsigned int *irqs,
+ const struct samsung_pwm_variant *variant);
#endif /* __CLOCKSOURCE_SAMSUNG_PWM_H */
diff --git a/include/clocksource/timer-goldfish.h b/include/clocksource/timer-goldfish.h
new file mode 100644
index 000000000000..05a3a4f610d6
--- /dev/null
+++ b/include/clocksource/timer-goldfish.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * goldfish-timer clocksource
+ * Registers definition for the goldfish-timer device
+ */
+
+#ifndef _CLOCKSOURCE_TIMER_GOLDFISH_H
+#define _CLOCKSOURCE_TIMER_GOLDFISH_H
+
+/*
+ * TIMER_TIME_LOW get low bits of current time and update TIMER_TIME_HIGH
+ * TIMER_TIME_HIGH get high bits of time at last TIMER_TIME_LOW read
+ * TIMER_ALARM_LOW set low bits of alarm and activate it
+ * TIMER_ALARM_HIGH set high bits of next alarm
+ * TIMER_IRQ_ENABLED enable alarm interrupt
+ * TIMER_CLEAR_ALARM disarm an existing alarm
+ * TIMER_ALARM_STATUS alarm status (running or not)
+ * TIMER_CLEAR_INTERRUPT clear interrupt
+ */
+#define TIMER_TIME_LOW 0x00
+#define TIMER_TIME_HIGH 0x04
+#define TIMER_ALARM_LOW 0x08
+#define TIMER_ALARM_HIGH 0x0c
+#define TIMER_IRQ_ENABLED 0x10
+#define TIMER_CLEAR_ALARM 0x14
+#define TIMER_ALARM_STATUS 0x18
+#define TIMER_CLEAR_INTERRUPT 0x1c
+
+extern int goldfish_timer_init(int irq, void __iomem *base);
+
+#endif /* _CLOCKSOURCE_TIMER_GOLDFISH_H */
diff --git a/include/clocksource/timer-riscv.h b/include/clocksource/timer-riscv.h
new file mode 100644
index 000000000000..d7f455754e60
--- /dev/null
+++ b/include/clocksource/timer-riscv.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Atish Patra <atish.patra@wdc.com>
+ */
+
+#ifndef __TIMER_RISCV_H
+#define __TIMER_RISCV_H
+
+#include <linux/types.h>
+
+extern void riscv_cs_get_mult_shift(u32 *mult, u32 *shift);
+
+#endif
diff --git a/include/clocksource/timer-ti-dm.h b/include/clocksource/timer-ti-dm.h
index 4c61dade8835..dcc1712f75e7 100644
--- a/include/clocksource/timer-ti-dm.h
+++ b/include/clocksource/timer-ti-dm.h
@@ -52,10 +52,6 @@
#define OMAP_TIMER_TRIGGER_OVERFLOW 0x01
#define OMAP_TIMER_TRIGGER_OVERFLOW_AND_COMPARE 0x02
-/* posted mode types */
-#define OMAP_TIMER_NONPOSTED 0x00
-#define OMAP_TIMER_POSTED 0x01
-
/* timer capabilities used in hwmod database */
#define OMAP_TIMER_SECURE 0x80000000
#define OMAP_TIMER_ALWON 0x40000000
@@ -63,72 +59,11 @@
#define OMAP_TIMER_NEEDS_RESET 0x10000000
#define OMAP_TIMER_HAS_DSP_IRQ 0x08000000
-/*
- * timer errata flags
- *
- * Errata i103/i767 impacts all OMAP3/4/5 devices including AM33xx. This
- * errata prevents us from using posted mode on these devices, unless the
- * timer counter register is never read. For more details please refer to
- * the OMAP3/4/5 errata documents.
- */
-#define OMAP_TIMER_ERRATA_I103_I767 0x80000000
-
-struct timer_regs {
- u32 tidr;
- u32 tier;
- u32 twer;
- u32 tclr;
- u32 tcrr;
- u32 tldr;
- u32 ttrg;
- u32 twps;
- u32 tmar;
- u32 tcar1;
- u32 tsicr;
- u32 tcar2;
- u32 tpir;
- u32 tnir;
- u32 tcvr;
- u32 tocr;
- u32 towr;
-};
-
struct omap_dm_timer {
- int id;
- int irq;
- struct clk *fclk;
-
- void __iomem *io_base;
- void __iomem *irq_stat; /* TISR/IRQSTATUS interrupt status */
- void __iomem *irq_ena; /* irq enable */
- void __iomem *irq_dis; /* irq disable, only on v2 ip */
- void __iomem *pend; /* write pending */
- void __iomem *func_base; /* function register base */
-
- atomic_t enabled;
- unsigned long rate;
- unsigned reserved:1;
- unsigned posted:1;
- struct timer_regs context;
- int revision;
- u32 capability;
- u32 errata;
- struct platform_device *pdev;
- struct list_head node;
- struct notifier_block nb;
};
-int omap_dm_timer_reserve_systimer(int id);
-struct omap_dm_timer *omap_dm_timer_request_by_cap(u32 cap);
-
-int omap_dm_timer_get_irq(struct omap_dm_timer *timer);
-
u32 omap_dm_timer_modify_idlect_mask(u32 inputmask);
-int omap_dm_timer_trigger(struct omap_dm_timer *timer);
-
-int omap_dm_timers_active(void);
-
/*
* Do not use the defines below, they are not needed. They should be only
* used by dmtimer.c and sys_timer related code.
@@ -198,196 +133,4 @@ int omap_dm_timers_active(void);
#define _OMAP_TIMER_TICK_INT_MASK_SET_OFFSET 0x54 /* TOCR, 34xx only */
#define _OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET 0x58 /* TOWR, 34xx only */
-/* register offsets with the write pending bit encoded */
-#define WPSHIFT 16
-
-#define OMAP_TIMER_WAKEUP_EN_REG (_OMAP_TIMER_WAKEUP_EN_OFFSET \
- | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_CTRL_REG (_OMAP_TIMER_CTRL_OFFSET \
- | (WP_TCLR << WPSHIFT))
-
-#define OMAP_TIMER_COUNTER_REG (_OMAP_TIMER_COUNTER_OFFSET \
- | (WP_TCRR << WPSHIFT))
-
-#define OMAP_TIMER_LOAD_REG (_OMAP_TIMER_LOAD_OFFSET \
- | (WP_TLDR << WPSHIFT))
-
-#define OMAP_TIMER_TRIGGER_REG (_OMAP_TIMER_TRIGGER_OFFSET \
- | (WP_TTGR << WPSHIFT))
-
-#define OMAP_TIMER_WRITE_PEND_REG (_OMAP_TIMER_WRITE_PEND_OFFSET \
- | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_MATCH_REG (_OMAP_TIMER_MATCH_OFFSET \
- | (WP_TMAR << WPSHIFT))
-
-#define OMAP_TIMER_CAPTURE_REG (_OMAP_TIMER_CAPTURE_OFFSET \
- | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_IF_CTRL_REG (_OMAP_TIMER_IF_CTRL_OFFSET \
- | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_CAPTURE2_REG (_OMAP_TIMER_CAPTURE2_OFFSET \
- | (WP_NONE << WPSHIFT))
-
-#define OMAP_TIMER_TICK_POS_REG (_OMAP_TIMER_TICK_POS_OFFSET \
- | (WP_TPIR << WPSHIFT))
-
-#define OMAP_TIMER_TICK_NEG_REG (_OMAP_TIMER_TICK_NEG_OFFSET \
- | (WP_TNIR << WPSHIFT))
-
-#define OMAP_TIMER_TICK_COUNT_REG (_OMAP_TIMER_TICK_COUNT_OFFSET \
- | (WP_TCVR << WPSHIFT))
-
-#define OMAP_TIMER_TICK_INT_MASK_SET_REG \
- (_OMAP_TIMER_TICK_INT_MASK_SET_OFFSET | (WP_TOCR << WPSHIFT))
-
-#define OMAP_TIMER_TICK_INT_MASK_COUNT_REG \
- (_OMAP_TIMER_TICK_INT_MASK_COUNT_OFFSET | (WP_TOWR << WPSHIFT))
-
-/*
- * The below are inlined to optimize code size for system timers. Other code
- * should not need these at all.
- */
-#if defined(CONFIG_ARCH_OMAP1) || defined(CONFIG_ARCH_OMAP2PLUS)
-static inline u32 __omap_dm_timer_read(struct omap_dm_timer *timer, u32 reg,
- int posted)
-{
- if (posted)
- while (readl_relaxed(timer->pend) & (reg >> WPSHIFT))
- cpu_relax();
-
- return readl_relaxed(timer->func_base + (reg & 0xff));
-}
-
-static inline void __omap_dm_timer_write(struct omap_dm_timer *timer,
- u32 reg, u32 val, int posted)
-{
- if (posted)
- while (readl_relaxed(timer->pend) & (reg >> WPSHIFT))
- cpu_relax();
-
- writel_relaxed(val, timer->func_base + (reg & 0xff));
-}
-
-static inline void __omap_dm_timer_init_regs(struct omap_dm_timer *timer)
-{
- u32 tidr;
-
- /* Assume v1 ip if bits [31:16] are zero */
- tidr = readl_relaxed(timer->io_base);
- if (!(tidr >> 16)) {
- timer->revision = 1;
- timer->irq_stat = timer->io_base + OMAP_TIMER_V1_STAT_OFFSET;
- timer->irq_ena = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET;
- timer->irq_dis = timer->io_base + OMAP_TIMER_V1_INT_EN_OFFSET;
- timer->pend = timer->io_base + _OMAP_TIMER_WRITE_PEND_OFFSET;
- timer->func_base = timer->io_base;
- } else {
- timer->revision = 2;
- timer->irq_stat = timer->io_base + OMAP_TIMER_V2_IRQSTATUS;
- timer->irq_ena = timer->io_base + OMAP_TIMER_V2_IRQENABLE_SET;
- timer->irq_dis = timer->io_base + OMAP_TIMER_V2_IRQENABLE_CLR;
- timer->pend = timer->io_base +
- _OMAP_TIMER_WRITE_PEND_OFFSET +
- OMAP_TIMER_V2_FUNC_OFFSET;
- timer->func_base = timer->io_base + OMAP_TIMER_V2_FUNC_OFFSET;
- }
-}
-
-/*
- * __omap_dm_timer_enable_posted - enables write posted mode
- * @timer: pointer to timer instance handle
- *
- * Enables the write posted mode for the timer. When posted mode is enabled
- * writes to certain timer registers are immediately acknowledged by the
- * internal bus and hence prevents stalling the CPU waiting for the write to
- * complete. Enabling this feature can improve performance for writing to the
- * timer registers.
- */
-static inline void __omap_dm_timer_enable_posted(struct omap_dm_timer *timer)
-{
- if (timer->posted)
- return;
-
- if (timer->errata & OMAP_TIMER_ERRATA_I103_I767) {
- timer->posted = OMAP_TIMER_NONPOSTED;
- __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG, 0, 0);
- return;
- }
-
- __omap_dm_timer_write(timer, OMAP_TIMER_IF_CTRL_REG,
- OMAP_TIMER_CTRL_POSTED, 0);
- timer->context.tsicr = OMAP_TIMER_CTRL_POSTED;
- timer->posted = OMAP_TIMER_POSTED;
-}
-
-/**
- * __omap_dm_timer_override_errata - override errata flags for a timer
- * @timer: pointer to timer handle
- * @errata: errata flags to be ignored
- *
- * For a given timer, override a timer errata by clearing the flags
- * specified by the errata argument. A specific erratum should only be
- * overridden for a timer if the timer is used in such a way the erratum
- * has no impact.
- */
-static inline void __omap_dm_timer_override_errata(struct omap_dm_timer *timer,
- u32 errata)
-{
- timer->errata &= ~errata;
-}
-
-static inline void __omap_dm_timer_stop(struct omap_dm_timer *timer,
- int posted, unsigned long rate)
-{
- u32 l;
-
- l = __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted);
- if (l & OMAP_TIMER_CTRL_ST) {
- l &= ~0x1;
- __omap_dm_timer_write(timer, OMAP_TIMER_CTRL_REG, l, posted);
-#ifdef CONFIG_ARCH_OMAP2PLUS
- /* Readback to make sure write has completed */
- __omap_dm_timer_read(timer, OMAP_TIMER_CTRL_REG, posted);
- /*
- * Wait for functional clock period x 3.5 to make sure that
- * timer is stopped
- */
- udelay(3500000 / rate + 1);
-#endif
- }
-
- /* Ack possibly pending interrupt */
- writel_relaxed(OMAP_TIMER_INT_OVERFLOW, timer->irq_stat);
-}
-
-static inline void __omap_dm_timer_load_start(struct omap_dm_timer *timer,
- u32 ctrl, unsigned int load,
- int posted)
-{
- __omap_dm_timer_write(timer, OMAP_TIMER_COUNTER_REG, load, posted);
- __omap_dm_timer_write(timer, OMAP_TIMER_CTRL_REG, ctrl, posted);
-}
-
-static inline void __omap_dm_timer_int_enable(struct omap_dm_timer *timer,
- unsigned int value)
-{
- writel_relaxed(value, timer->irq_ena);
- __omap_dm_timer_write(timer, OMAP_TIMER_WAKEUP_EN_REG, value, 0);
-}
-
-static inline unsigned int
-__omap_dm_timer_read_counter(struct omap_dm_timer *timer, int posted)
-{
- return __omap_dm_timer_read(timer, OMAP_TIMER_COUNTER_REG, posted);
-}
-
-static inline void __omap_dm_timer_write_status(struct omap_dm_timer *timer,
- unsigned int value)
-{
- writel_relaxed(value, timer->irq_stat);
-}
-#endif /* CONFIG_ARCH_OMAP1 || CONFIG_ARCH_OMAP2PLUS */
#endif /* __CLOCKSOURCE_DMTIMER_H */
diff --git a/include/clocksource/timer-xilinx.h b/include/clocksource/timer-xilinx.h
new file mode 100644
index 000000000000..d116f18de899
--- /dev/null
+++ b/include/clocksource/timer-xilinx.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2021 Sean Anderson <sean.anderson@seco.com>
+ */
+
+#ifndef XILINX_TIMER_H
+#define XILINX_TIMER_H
+
+#include <linux/compiler.h>
+
+#define TCSR0 0x00
+#define TLR0 0x04
+#define TCR0 0x08
+#define TCSR1 0x10
+#define TLR1 0x14
+#define TCR1 0x18
+
+#define TCSR_MDT BIT(0)
+#define TCSR_UDT BIT(1)
+#define TCSR_GENT BIT(2)
+#define TCSR_CAPT BIT(3)
+#define TCSR_ARHT BIT(4)
+#define TCSR_LOAD BIT(5)
+#define TCSR_ENIT BIT(6)
+#define TCSR_ENT BIT(7)
+#define TCSR_TINT BIT(8)
+#define TCSR_PWMA BIT(9)
+#define TCSR_ENALL BIT(10)
+#define TCSR_CASC BIT(11)
+
+struct clk;
+struct device_node;
+struct regmap;
+
+/**
+ * struct xilinx_timer_priv - Private data for Xilinx AXI timer drivers
+ * @map: Regmap of the device, possibly with an offset
+ * @clk: Parent clock
+ * @max: Maximum value of the counters
+ */
+struct xilinx_timer_priv {
+ struct regmap *map;
+ struct clk *clk;
+ u64 max;
+};
+
+/**
+ * xilinx_timer_tlr_cycles() - Calculate the TLR for a period specified
+ * in clock cycles
+ * @priv: The timer's private data
+ * @tcsr: The value of the TCSR register for this counter
+ * @cycles: The number of cycles in this period
+ *
+ * Callers of this function MUST ensure that @cycles is representable as
+ * a TLR.
+ *
+ * Return: The calculated value for TLR
+ */
+u32 xilinx_timer_tlr_cycles(struct xilinx_timer_priv *priv, u32 tcsr,
+ u64 cycles);
+
+/**
+ * xilinx_timer_get_period() - Get the current period of a counter
+ * @priv: The timer's private data
+ * @tlr: The value of TLR for this counter
+ * @tcsr: The value of TCSR for this counter
+ *
+ * Return: The period, in ns
+ */
+unsigned int xilinx_timer_get_period(struct xilinx_timer_priv *priv,
+ u32 tlr, u32 tcsr);
+
+#endif /* XILINX_TIMER_H */
diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
index cb3d6b1c655d..9eacb9fa375d 100644
--- a/include/crypto/acompress.h
+++ b/include/crypto/acompress.h
@@ -8,28 +8,95 @@
*/
#ifndef _CRYPTO_ACOMP_H
#define _CRYPTO_ACOMP_H
+
+#include <linux/atomic.h>
+#include <linux/args.h>
+#include <linux/compiler_types.h>
+#include <linux/container_of.h>
#include <linux/crypto.h>
+#include <linux/err.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+
+/* Set this bit if source is virtual address instead of SG list. */
+#define CRYPTO_ACOMP_REQ_SRC_VIRT 0x00000002
+
+/* Set this bit for if virtual address source cannot be used for DMA. */
+#define CRYPTO_ACOMP_REQ_SRC_NONDMA 0x00000004
+
+/* Set this bit if destination is virtual address instead of SG list. */
+#define CRYPTO_ACOMP_REQ_DST_VIRT 0x00000008
+
+/* Set this bit for if virtual address destination cannot be used for DMA. */
+#define CRYPTO_ACOMP_REQ_DST_NONDMA 0x00000010
+
+/* Private flags that should not be touched by the user. */
+#define CRYPTO_ACOMP_REQ_PRIVATE \
+ (CRYPTO_ACOMP_REQ_SRC_VIRT | CRYPTO_ACOMP_REQ_SRC_NONDMA | \
+ CRYPTO_ACOMP_REQ_DST_VIRT | CRYPTO_ACOMP_REQ_DST_NONDMA)
+
+#define CRYPTO_ACOMP_DST_MAX 131072
+
+#define MAX_SYNC_COMP_REQSIZE 0
+
+#define ACOMP_REQUEST_ON_STACK(name, tfm) \
+ char __##name##_req[sizeof(struct acomp_req) + \
+ MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \
+ struct acomp_req *name = acomp_request_on_stack_init( \
+ __##name##_req, (tfm))
+
+#define ACOMP_REQUEST_CLONE(name, gfp) \
+ acomp_request_clone(name, sizeof(__##name##_req), gfp)
-#define CRYPTO_ACOMP_ALLOC_OUTPUT 0x00000001
+struct acomp_req;
+struct folio;
+
+struct acomp_req_chain {
+ crypto_completion_t compl;
+ void *data;
+ struct scatterlist ssg;
+ struct scatterlist dsg;
+ union {
+ const u8 *src;
+ struct folio *sfolio;
+ };
+ union {
+ u8 *dst;
+ struct folio *dfolio;
+ };
+ u32 flags;
+};
/**
* struct acomp_req - asynchronous (de)compression request
*
* @base: Common attributes for asynchronous crypto requests
- * @src: Source Data
- * @dst: Destination data
+ * @src: Source scatterlist
+ * @dst: Destination scatterlist
+ * @svirt: Source virtual address
+ * @dvirt: Destination virtual address
* @slen: Size of the input buffer
* @dlen: Size of the output buffer and number of bytes produced
- * @flags: Internal flags
+ * @chain: Private API code data, do not use
* @__ctx: Start of private context data
*/
struct acomp_req {
struct crypto_async_request base;
- struct scatterlist *src;
- struct scatterlist *dst;
+ union {
+ struct scatterlist *src;
+ const u8 *svirt;
+ };
+ union {
+ struct scatterlist *dst;
+ u8 *dvirt;
+ };
unsigned int slen;
unsigned int dlen;
- u32 flags;
+
+ struct acomp_req_chain chain;
+
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
@@ -39,49 +106,21 @@ struct acomp_req {
*
* @compress: Function performs a compress operation
* @decompress: Function performs a de-compress operation
- * @dst_free: Frees destination buffer if allocated inside the
- * algorithm
* @reqsize: Context size for (de)compression requests
+ * @fb: Synchronous fallback tfm
* @base: Common crypto API algorithm data structure
*/
struct crypto_acomp {
int (*compress)(struct acomp_req *req);
int (*decompress)(struct acomp_req *req);
- void (*dst_free)(struct scatterlist *dst);
unsigned int reqsize;
struct crypto_tfm base;
};
-/**
- * struct acomp_alg - asynchronous compression algorithm
- *
- * @compress: Function performs a compress operation
- * @decompress: Function performs a de-compress operation
- * @dst_free: Frees destination buffer if allocated inside the algorithm
- * @init: Initialize the cryptographic transformation object.
- * This function is used to initialize the cryptographic
- * transformation object. This function is called only once at
- * the instantiation time, right after the transformation context
- * was allocated. In case the cryptographic hardware has some
- * special requirements which need to be handled by software, this
- * function shall check for the precise requirement of the
- * transformation and put any software fallbacks in place.
- * @exit: Deinitialize the cryptographic transformation object. This is a
- * counterpart to @init, used to remove various changes set in
- * @init.
- *
- * @reqsize: Context size for (de)compression requests
- * @base: Common crypto API algorithm data structure
- */
-struct acomp_alg {
- int (*compress)(struct acomp_req *req);
- int (*decompress)(struct acomp_req *req);
- void (*dst_free)(struct scatterlist *dst);
- int (*init)(struct crypto_acomp *tfm);
- void (*exit)(struct crypto_acomp *tfm);
- unsigned int reqsize;
- struct crypto_alg base;
-};
+#define COMP_ALG_COMMON { \
+ struct crypto_alg base; \
+}
+struct comp_alg_common COMP_ALG_COMMON;
/**
* DOC: Asynchronous Compression API
@@ -130,9 +169,10 @@ static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm)
return &tfm->base;
}
-static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
+static inline struct comp_alg_common *__crypto_comp_alg_common(
+ struct crypto_alg *alg)
{
- return container_of(alg, struct acomp_alg, base);
+ return container_of(alg, struct comp_alg_common, base);
}
static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm)
@@ -140,9 +180,10 @@ static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm)
return container_of(tfm, struct crypto_acomp, base);
}
-static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
+static inline struct comp_alg_common *crypto_comp_alg_common(
+ struct crypto_acomp *tfm)
{
- return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
+ return __crypto_comp_alg_common(crypto_acomp_tfm(tfm)->__crt_alg);
}
static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm)
@@ -153,7 +194,13 @@ static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm)
static inline void acomp_request_set_tfm(struct acomp_req *req,
struct crypto_acomp *tfm)
{
- req->base.tfm = crypto_acomp_tfm(tfm);
+ crypto_request_set_tfm(&req->base, crypto_acomp_tfm(tfm));
+}
+
+static inline bool acomp_is_async(struct crypto_acomp *tfm)
+{
+ return crypto_comp_alg_common(tfm)->base.cra_flags &
+ CRYPTO_ALG_ASYNC;
}
static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
@@ -182,14 +229,72 @@ static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask)
return crypto_has_alg(alg_name, type, mask);
}
+static inline const char *crypto_acomp_alg_name(struct crypto_acomp *tfm)
+{
+ return crypto_tfm_alg_name(crypto_acomp_tfm(tfm));
+}
+
+static inline const char *crypto_acomp_driver_name(struct crypto_acomp *tfm)
+{
+ return crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm));
+}
+
/**
* acomp_request_alloc() -- allocates asynchronous (de)compression request
*
* @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
+ * @gfp: gfp to pass to kzalloc (defaults to GFP_KERNEL)
*
* Return: allocated handle in case of success or NULL in case of an error
*/
-struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm);
+static inline struct acomp_req *acomp_request_alloc_extra_noprof(
+ struct crypto_acomp *tfm, size_t extra, gfp_t gfp)
+{
+ struct acomp_req *req;
+ size_t len;
+
+ len = ALIGN(sizeof(*req) + crypto_acomp_reqsize(tfm), CRYPTO_MINALIGN);
+ if (check_add_overflow(len, extra, &len))
+ return NULL;
+
+ req = kzalloc_noprof(len, gfp);
+ if (likely(req))
+ acomp_request_set_tfm(req, tfm);
+ return req;
+}
+#define acomp_request_alloc_noprof(tfm, ...) \
+ CONCATENATE(acomp_request_alloc_noprof_, COUNT_ARGS(__VA_ARGS__))( \
+ tfm, ##__VA_ARGS__)
+#define acomp_request_alloc_noprof_0(tfm) \
+ acomp_request_alloc_noprof_1(tfm, GFP_KERNEL)
+#define acomp_request_alloc_noprof_1(tfm, gfp) \
+ acomp_request_alloc_extra_noprof(tfm, 0, gfp)
+#define acomp_request_alloc(...) alloc_hooks(acomp_request_alloc_noprof(__VA_ARGS__))
+
+/**
+ * acomp_request_alloc_extra() -- allocate acomp request with extra memory
+ *
+ * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
+ * @extra: amount of extra memory
+ * @gfp: gfp to pass to kzalloc
+ *
+ * Return: allocated handle in case of success or NULL in case of an error
+ */
+#define acomp_request_alloc_extra(...) alloc_hooks(acomp_request_alloc_extra_noprof(__VA_ARGS__))
+
+static inline void *acomp_request_extra(struct acomp_req *req)
+{
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+ size_t len;
+
+ len = ALIGN(sizeof(*req) + crypto_acomp_reqsize(tfm), CRYPTO_MINALIGN);
+ return (void *)((char *)req + len);
+}
+
+static inline bool acomp_req_on_stack(struct acomp_req *req)
+{
+ return crypto_req_on_stack(&req->base);
+}
/**
* acomp_request_free() -- zeroize and free asynchronous (de)compression
@@ -198,7 +303,12 @@ struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm);
*
* @req: request to free
*/
-void acomp_request_free(struct acomp_req *req);
+static inline void acomp_request_free(struct acomp_req *req)
+{
+ if (!req || acomp_req_on_stack(req))
+ return;
+ kfree_sensitive(req);
+}
/**
* acomp_request_set_callback() -- Sets an asynchronous callback
@@ -216,9 +326,9 @@ static inline void acomp_request_set_callback(struct acomp_req *req,
crypto_completion_t cmpl,
void *data)
{
- req->base.complete = cmpl;
- req->base.data = data;
- req->base.flags = flgs;
+ flgs &= ~CRYPTO_ACOMP_REQ_PRIVATE;
+ flgs |= req->base.flags & CRYPTO_ACOMP_REQ_PRIVATE;
+ crypto_request_set_callback(&req->base, flgs, cmpl, data);
}
/**
@@ -245,8 +355,169 @@ static inline void acomp_request_set_params(struct acomp_req *req,
req->slen = slen;
req->dlen = dlen;
- if (!req->dst)
- req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
+ req->base.flags &= ~(CRYPTO_ACOMP_REQ_SRC_VIRT |
+ CRYPTO_ACOMP_REQ_SRC_NONDMA |
+ CRYPTO_ACOMP_REQ_DST_VIRT |
+ CRYPTO_ACOMP_REQ_DST_NONDMA);
+}
+
+/**
+ * acomp_request_set_src_sg() -- Sets source scatterlist
+ *
+ * Sets source scatterlist required by an acomp operation.
+ *
+ * @req: asynchronous compress request
+ * @src: pointer to input buffer scatterlist
+ * @slen: size of the input buffer
+ */
+static inline void acomp_request_set_src_sg(struct acomp_req *req,
+ struct scatterlist *src,
+ unsigned int slen)
+{
+ req->src = src;
+ req->slen = slen;
+
+ req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA;
+ req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_VIRT;
+}
+
+/**
+ * acomp_request_set_src_dma() -- Sets DMA source virtual address
+ *
+ * Sets source virtual address required by an acomp operation.
+ * The address must be usable for DMA.
+ *
+ * @req: asynchronous compress request
+ * @src: virtual address pointer to input buffer
+ * @slen: size of the input buffer
+ */
+static inline void acomp_request_set_src_dma(struct acomp_req *req,
+ const u8 *src, unsigned int slen)
+{
+ req->svirt = src;
+ req->slen = slen;
+
+ req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA;
+ req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT;
+}
+
+/**
+ * acomp_request_set_src_nondma() -- Sets non-DMA source virtual address
+ *
+ * Sets source virtual address required by an acomp operation.
+ * The address can not be used for DMA.
+ *
+ * @req: asynchronous compress request
+ * @src: virtual address pointer to input buffer
+ * @slen: size of the input buffer
+ */
+static inline void acomp_request_set_src_nondma(struct acomp_req *req,
+ const u8 *src,
+ unsigned int slen)
+{
+ req->svirt = src;
+ req->slen = slen;
+
+ req->base.flags |= CRYPTO_ACOMP_REQ_SRC_NONDMA;
+ req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT;
+}
+
+/**
+ * acomp_request_set_src_folio() -- Sets source folio
+ *
+ * Sets source folio required by an acomp operation.
+ *
+ * @req: asynchronous compress request
+ * @folio: pointer to input folio
+ * @off: input folio offset
+ * @len: size of the input buffer
+ */
+static inline void acomp_request_set_src_folio(struct acomp_req *req,
+ struct folio *folio, size_t off,
+ unsigned int len)
+{
+ sg_init_table(&req->chain.ssg, 1);
+ sg_set_folio(&req->chain.ssg, folio, len, off);
+ acomp_request_set_src_sg(req, &req->chain.ssg, len);
+}
+
+/**
+ * acomp_request_set_dst_sg() -- Sets destination scatterlist
+ *
+ * Sets destination scatterlist required by an acomp operation.
+ *
+ * @req: asynchronous compress request
+ * @dst: pointer to output buffer scatterlist
+ * @dlen: size of the output buffer
+ */
+static inline void acomp_request_set_dst_sg(struct acomp_req *req,
+ struct scatterlist *dst,
+ unsigned int dlen)
+{
+ req->dst = dst;
+ req->dlen = dlen;
+
+ req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA;
+ req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_VIRT;
+}
+
+/**
+ * acomp_request_set_dst_dma() -- Sets DMA destination virtual address
+ *
+ * Sets destination virtual address required by an acomp operation.
+ * The address must be usable for DMA.
+ *
+ * @req: asynchronous compress request
+ * @dst: virtual address pointer to output buffer
+ * @dlen: size of the output buffer
+ */
+static inline void acomp_request_set_dst_dma(struct acomp_req *req,
+ u8 *dst, unsigned int dlen)
+{
+ req->dvirt = dst;
+ req->dlen = dlen;
+
+ req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA;
+ req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT;
+}
+
+/**
+ * acomp_request_set_dst_nondma() -- Sets non-DMA destination virtual address
+ *
+ * Sets destination virtual address required by an acomp operation.
+ * The address can not be used for DMA.
+ *
+ * @req: asynchronous compress request
+ * @dst: virtual address pointer to output buffer
+ * @dlen: size of the output buffer
+ */
+static inline void acomp_request_set_dst_nondma(struct acomp_req *req,
+ u8 *dst, unsigned int dlen)
+{
+ req->dvirt = dst;
+ req->dlen = dlen;
+
+ req->base.flags |= CRYPTO_ACOMP_REQ_DST_NONDMA;
+ req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT;
+}
+
+/**
+ * acomp_request_set_dst_folio() -- Sets destination folio
+ *
+ * Sets destination folio required by an acomp operation.
+ *
+ * @req: asynchronous compress request
+ * @folio: pointer to input folio
+ * @off: input folio offset
+ * @len: size of the input buffer
+ */
+static inline void acomp_request_set_dst_folio(struct acomp_req *req,
+ struct folio *folio, size_t off,
+ unsigned int len)
+{
+ sg_init_table(&req->chain.dsg, 1);
+ sg_set_folio(&req->chain.dsg, folio, len, off);
+ acomp_request_set_dst_sg(req, &req->chain.dsg, len);
}
/**
@@ -258,18 +529,7 @@ static inline void acomp_request_set_params(struct acomp_req *req,
*
* Return: zero on success; error code in case of error
*/
-static inline int crypto_acomp_compress(struct acomp_req *req)
-{
- struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
- struct crypto_alg *alg = tfm->base.__crt_alg;
- unsigned int slen = req->slen;
- int ret;
-
- crypto_stats_get(alg);
- ret = tfm->compress(req);
- crypto_stats_compress(slen, ret, alg);
- return ret;
-}
+int crypto_acomp_compress(struct acomp_req *req);
/**
* crypto_acomp_decompress() -- Invoke asynchronous decompress operation
@@ -280,17 +540,18 @@ static inline int crypto_acomp_compress(struct acomp_req *req)
*
* Return: zero on success; error code in case of error
*/
-static inline int crypto_acomp_decompress(struct acomp_req *req)
+int crypto_acomp_decompress(struct acomp_req *req);
+
+static inline struct acomp_req *acomp_request_on_stack_init(
+ char *buf, struct crypto_acomp *tfm)
{
- struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
- struct crypto_alg *alg = tfm->base.__crt_alg;
- unsigned int slen = req->slen;
- int ret;
-
- crypto_stats_get(alg);
- ret = tfm->decompress(req);
- crypto_stats_decompress(slen, ret, alg);
- return ret;
+ struct acomp_req *req = (void *)buf;
+
+ crypto_stack_request_init(&req->base, crypto_acomp_tfm(tfm));
+ return req;
}
+struct acomp_req *acomp_request_clone(struct acomp_req *req,
+ size_t total, gfp_t gfp);
+
#endif
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index e728469c4ccc..8e66a1fa9c78 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -8,9 +8,11 @@
#ifndef _CRYPTO_AEAD_H
#define _CRYPTO_AEAD_H
+#include <linux/atomic.h>
+#include <linux/container_of.h>
#include <linux/crypto.h>
-#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/types.h>
/**
* DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API
@@ -26,15 +28,12 @@
*
* For example: authenc(hmac(sha256), cbc(aes))
*
- * The example code provided for the symmetric key cipher operation
- * applies here as well. Naturally all *skcipher* symbols must be exchanged
- * the *aead* pendants discussed in the following. In addition, for the AEAD
- * operation, the aead_request_set_ad function must be used to set the
- * pointer to the associated data memory location before performing the
- * encryption or decryption operation. In case of an encryption, the associated
- * data memory is filled during the encryption operation. For decryption, the
- * associated data memory must contain data that is used to verify the integrity
- * of the decrypted data. Another deviation from the asynchronous block cipher
+ * The example code provided for the symmetric key cipher operation applies
+ * here as well. Naturally all *skcipher* symbols must be exchanged the *aead*
+ * pendants discussed in the following. In addition, for the AEAD operation,
+ * the aead_request_set_ad function must be used to set the pointer to the
+ * associated data memory location before performing the encryption or
+ * decryption operation. Another deviation from the asynchronous block cipher
* operation is that the caller should explicitly check for -EBADMSG of the
* crypto_aead_decrypt. That error indicates an authentication error, i.e.
* a breach in the integrity of the message. In essence, that -EBADMSG error
@@ -48,7 +47,10 @@
*
* The destination scatterlist has the same layout, except that the plaintext
* (resp. ciphertext) will grow (resp. shrink) by the authentication tag size
- * during encryption (resp. decryption).
+ * during encryption (resp. decryption). The authentication tag is generated
+ * during the encryption operation and appended to the ciphertext. During
+ * decryption, the authentication tag is consumed along with the ciphertext and
+ * used to verify the integrity of the plaintext and the associated data.
*
* In-place encryption/decryption is enabled by using the same scatterlist
* pointer for both the source and destination.
@@ -73,6 +75,7 @@
*/
struct crypto_aead;
+struct scatterlist;
/**
* struct aead_request - AEAD request
@@ -156,6 +159,21 @@ struct crypto_aead {
struct crypto_tfm base;
};
+struct crypto_sync_aead {
+ struct crypto_aead base;
+};
+
+#define MAX_SYNC_AEAD_REQSIZE 384
+
+#define SYNC_AEAD_REQUEST_ON_STACK(name, _tfm) \
+ char __##name##_desc[sizeof(struct aead_request) + \
+ MAX_SYNC_AEAD_REQSIZE \
+ ] CRYPTO_MINALIGN_ATTR; \
+ struct aead_request *name = \
+ (((struct aead_request *)__##name##_desc)->base.tfm = \
+ crypto_sync_aead_tfm((_tfm)), \
+ (void *)__##name##_desc)
+
static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm)
{
return container_of(tfm, struct crypto_aead, base);
@@ -177,11 +195,18 @@ static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm)
*/
struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask);
+struct crypto_sync_aead *crypto_alloc_sync_aead(const char *alg_name, u32 type, u32 mask);
+
static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
{
return &tfm->base;
}
+static inline struct crypto_tfm *crypto_sync_aead_tfm(struct crypto_sync_aead *tfm)
+{
+ return crypto_aead_tfm(&tfm->base);
+}
+
/**
* crypto_free_aead() - zeroize and free aead handle
* @tfm: cipher handle to be freed
@@ -193,6 +218,23 @@ static inline void crypto_free_aead(struct crypto_aead *tfm)
crypto_destroy_tfm(tfm, crypto_aead_tfm(tfm));
}
+static inline void crypto_free_sync_aead(struct crypto_sync_aead *tfm)
+{
+ crypto_free_aead(&tfm->base);
+}
+
+/**
+ * crypto_has_aead() - Search for the availability of an aead.
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * aead
+ * @type: specifies the type of the aead
+ * @mask: specifies the mask for the aead
+ *
+ * Return: true when the aead is known to the kernel crypto API; false
+ * otherwise
+ */
+int crypto_has_aead(const char *alg_name, u32 type, u32 mask);
+
static inline const char *crypto_aead_driver_name(struct crypto_aead *tfm)
{
return crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
@@ -223,6 +265,11 @@ static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm)
return crypto_aead_alg_ivsize(crypto_aead_alg(tfm));
}
+static inline unsigned int crypto_sync_aead_ivsize(struct crypto_sync_aead *tfm)
+{
+ return crypto_aead_ivsize(&tfm->base);
+}
+
/**
* crypto_aead_authsize() - obtain maximum authentication data size
* @tfm: cipher handle
@@ -240,6 +287,11 @@ static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm)
return tfm->authsize;
}
+static inline unsigned int crypto_sync_aead_authsize(struct crypto_sync_aead *tfm)
+{
+ return crypto_aead_authsize(&tfm->base);
+}
+
static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg)
{
return alg->maxauthsize;
@@ -250,6 +302,11 @@ static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead)
return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead));
}
+static inline unsigned int crypto_sync_aead_maxauthsize(struct crypto_sync_aead *tfm)
+{
+ return crypto_aead_maxauthsize(&tfm->base);
+}
+
/**
* crypto_aead_blocksize() - obtain block size of cipher
* @tfm: cipher handle
@@ -265,6 +322,11 @@ static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm)
return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm));
}
+static inline unsigned int crypto_sync_aead_blocksize(struct crypto_sync_aead *tfm)
+{
+ return crypto_aead_blocksize(&tfm->base);
+}
+
static inline unsigned int crypto_aead_alignmask(struct crypto_aead *tfm)
{
return crypto_tfm_alg_alignmask(crypto_aead_tfm(tfm));
@@ -285,6 +347,21 @@ static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags)
crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags);
}
+static inline u32 crypto_sync_aead_get_flags(struct crypto_sync_aead *tfm)
+{
+ return crypto_aead_get_flags(&tfm->base);
+}
+
+static inline void crypto_sync_aead_set_flags(struct crypto_sync_aead *tfm, u32 flags)
+{
+ crypto_aead_set_flags(&tfm->base, flags);
+}
+
+static inline void crypto_sync_aead_clear_flags(struct crypto_sync_aead *tfm, u32 flags)
+{
+ crypto_aead_clear_flags(&tfm->base, flags);
+}
+
/**
* crypto_aead_setkey() - set key for cipher
* @tfm: cipher handle
@@ -304,6 +381,12 @@ static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags)
int crypto_aead_setkey(struct crypto_aead *tfm,
const u8 *key, unsigned int keylen);
+static inline int crypto_sync_aead_setkey(struct crypto_sync_aead *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto_aead_setkey(&tfm->base, key, keylen);
+}
+
/**
* crypto_aead_setauthsize() - set authentication data size
* @tfm: cipher handle
@@ -316,11 +399,24 @@ int crypto_aead_setkey(struct crypto_aead *tfm,
*/
int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize);
+static inline int crypto_sync_aead_setauthsize(struct crypto_sync_aead *tfm,
+ unsigned int authsize)
+{
+ return crypto_aead_setauthsize(&tfm->base, authsize);
+}
+
static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
{
return __crypto_aead_cast(req->base.tfm);
}
+static inline struct crypto_sync_aead *crypto_sync_aead_reqtfm(struct aead_request *req)
+{
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+
+ return container_of(tfm, struct crypto_sync_aead, base);
+}
+
/**
* crypto_aead_encrypt() - encrypt plaintext
* @req: reference to the aead_request handle that holds all information
@@ -402,6 +498,12 @@ static inline void aead_request_set_tfm(struct aead_request *req,
req->base.tfm = crypto_aead_tfm(tfm);
}
+static inline void aead_request_set_sync_tfm(struct aead_request *req,
+ struct crypto_sync_aead *tfm)
+{
+ aead_request_set_tfm(req, &tfm->base);
+}
+
/**
* aead_request_alloc() - allocate request data structure
* @tfm: cipher handle to be registered with the request
@@ -490,7 +592,7 @@ static inline void aead_request_set_callback(struct aead_request *req,
* The memory structure for cipher operation has the following structure:
*
* - AEAD encryption input: assoc data || plaintext
- * - AEAD encryption output: assoc data || cipherntext || auth tag
+ * - AEAD encryption output: assoc data || ciphertext || auth tag
* - AEAD decryption input: assoc data || ciphertext || auth tag
* - AEAD decryption output: assoc data || plaintext
*
diff --git a/include/crypto/aes.h b/include/crypto/aes.h
index 2090729701ab..9339da7c20a8 100644
--- a/include/crypto/aes.h
+++ b/include/crypto/aes.h
@@ -87,4 +87,9 @@ void aes_decrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in);
extern const u8 crypto_aes_sbox[];
extern const u8 crypto_aes_inv_sbox[];
+void aescfb_encrypt(const struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src,
+ int len, const u8 iv[AES_BLOCK_SIZE]);
+void aescfb_decrypt(const struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src,
+ int len, const u8 iv[AES_BLOCK_SIZE]);
+
#endif
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
index 5764b46bd1ec..cdf7da74bf2f 100644
--- a/include/crypto/akcipher.h
+++ b/include/crypto/akcipher.h
@@ -7,27 +7,24 @@
*/
#ifndef _CRYPTO_AKCIPHER_H
#define _CRYPTO_AKCIPHER_H
+
+#include <linux/atomic.h>
#include <linux/crypto.h>
/**
- * struct akcipher_request - public key request
+ * struct akcipher_request - public key cipher request
*
* @base: Common attributes for async crypto requests
* @src: Source data
- * For verify op this is signature + digest, in that case
- * total size of @src is @src_len + @dst_len.
- * @dst: Destination data (Should be NULL for verify op)
+ * @dst: Destination data
* @src_len: Size of the input buffer
- * For verify op it's size of signature part of @src, this part
- * is supposed to be operated by cipher.
- * @dst_len: Size of @dst buffer (for all ops except verify).
+ * @dst_len: Size of @dst buffer
* It needs to be at least as big as the expected result
* depending on the operation.
* After operation it will be updated with the actual size of the
* result.
* In case of error where the dst sgl size was insufficient,
* it will be updated to the size required for the operation.
- * For verify op this is size of digest part in @src.
* @__ctx: Start of private context data
*/
struct akcipher_request {
@@ -43,22 +40,18 @@ struct akcipher_request {
* struct crypto_akcipher - user-instantiated objects which encapsulate
* algorithms and core processing logic
*
+ * @reqsize: Request context size required by algorithm implementation
* @base: Common crypto API algorithm data structure
*/
struct crypto_akcipher {
+ unsigned int reqsize;
+
struct crypto_tfm base;
};
/**
- * struct akcipher_alg - generic public key algorithm
+ * struct akcipher_alg - generic public key cipher algorithm
*
- * @sign: Function performs a sign operation as defined by public key
- * algorithm. In case of error, where the dst_len was insufficient,
- * the req->dst_len will be updated to the size required for the
- * operation
- * @verify: Function performs a complete verify operation as defined by
- * public key algorithm, returning verification status. Requires
- * digest value as input parameter.
* @encrypt: Function performs an encrypt operation as defined by public key
* algorithm. In case of error, where the dst_len was insufficient,
* the req->dst_len will be updated to the size required for the
@@ -86,12 +79,9 @@ struct crypto_akcipher {
* counterpart to @init, used to remove various changes set in
* @init.
*
- * @reqsize: Request context size required by algorithm implementation
* @base: Common crypto API algorithm data structure
*/
struct akcipher_alg {
- int (*sign)(struct akcipher_request *req);
- int (*verify)(struct akcipher_request *req);
int (*encrypt)(struct akcipher_request *req);
int (*decrypt)(struct akcipher_request *req);
int (*set_pub_key)(struct crypto_akcipher *tfm, const void *key,
@@ -102,14 +92,13 @@ struct akcipher_alg {
int (*init)(struct crypto_akcipher *tfm);
void (*exit)(struct crypto_akcipher *tfm);
- unsigned int reqsize;
struct crypto_alg base;
};
/**
- * DOC: Generic Public Key API
+ * DOC: Generic Public Key Cipher API
*
- * The Public Key API is used with the algorithms of type
+ * The Public Key Cipher API is used with the algorithms of type
* CRYPTO_ALG_TYPE_AKCIPHER (listed as type "akcipher" in /proc/crypto)
*/
@@ -155,7 +144,7 @@ static inline struct akcipher_alg *crypto_akcipher_alg(
static inline unsigned int crypto_akcipher_reqsize(struct crypto_akcipher *tfm)
{
- return crypto_akcipher_alg(tfm)->reqsize;
+ return tfm->reqsize;
}
static inline void akcipher_request_set_tfm(struct akcipher_request *req,
@@ -240,10 +229,9 @@ static inline void akcipher_request_set_callback(struct akcipher_request *req,
*
* @req: public key request
* @src: ptr to input scatter list
- * @dst: ptr to output scatter list or NULL for verify op
+ * @dst: ptr to output scatter list
* @src_len: size of the src input scatter list to be processed
- * @dst_len: size of the dst output scatter list or size of signature
- * portion in @src for verify op
+ * @dst_len: size of the dst output scatter list
*/
static inline void akcipher_request_set_crypt(struct akcipher_request *req,
struct scatterlist *src,
@@ -287,15 +275,8 @@ static inline unsigned int crypto_akcipher_maxsize(struct crypto_akcipher *tfm)
static inline int crypto_akcipher_encrypt(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
- struct crypto_alg *calg = tfm->base.__crt_alg;
- unsigned int src_len = req->src_len;
- int ret;
-
- crypto_stats_get(calg);
- ret = alg->encrypt(req);
- crypto_stats_akcipher_encrypt(src_len, ret, calg);
- return ret;
+
+ return crypto_akcipher_alg(tfm)->encrypt(req);
}
/**
@@ -311,66 +292,45 @@ static inline int crypto_akcipher_encrypt(struct akcipher_request *req)
static inline int crypto_akcipher_decrypt(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
- struct crypto_alg *calg = tfm->base.__crt_alg;
- unsigned int src_len = req->src_len;
- int ret;
-
- crypto_stats_get(calg);
- ret = alg->decrypt(req);
- crypto_stats_akcipher_decrypt(src_len, ret, calg);
- return ret;
+
+ return crypto_akcipher_alg(tfm)->decrypt(req);
}
/**
- * crypto_akcipher_sign() - Invoke public key sign operation
+ * crypto_akcipher_sync_encrypt() - Invoke public key encrypt operation
*
- * Function invokes the specific public key sign operation for a given
+ * Function invokes the specific public key encrypt operation for a given
* public key algorithm
*
- * @req: asymmetric key request
+ * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher()
+ * @src: source buffer
+ * @slen: source length
+ * @dst: destination obuffer
+ * @dlen: destination length
*
* Return: zero on success; error code in case of error
*/
-static inline int crypto_akcipher_sign(struct akcipher_request *req)
-{
- struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
- struct crypto_alg *calg = tfm->base.__crt_alg;
- int ret;
-
- crypto_stats_get(calg);
- ret = alg->sign(req);
- crypto_stats_akcipher_sign(ret, calg);
- return ret;
-}
+int crypto_akcipher_sync_encrypt(struct crypto_akcipher *tfm,
+ const void *src, unsigned int slen,
+ void *dst, unsigned int dlen);
/**
- * crypto_akcipher_verify() - Invoke public key signature verification
- *
- * Function invokes the specific public key signature verification operation
- * for a given public key algorithm.
+ * crypto_akcipher_sync_decrypt() - Invoke public key decrypt operation
*
- * @req: asymmetric key request
+ * Function invokes the specific public key decrypt operation for a given
+ * public key algorithm
*
- * Note: req->dst should be NULL, req->src should point to SG of size
- * (req->src_size + req->dst_size), containing signature (of req->src_size
- * length) with appended digest (of req->dst_size length).
+ * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher()
+ * @src: source buffer
+ * @slen: source length
+ * @dst: destination obuffer
+ * @dlen: destination length
*
- * Return: zero on verification success; error code in case of error.
+ * Return: Output length on success; error code in case of error
*/
-static inline int crypto_akcipher_verify(struct akcipher_request *req)
-{
- struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
- struct crypto_alg *calg = tfm->base.__crt_alg;
- int ret;
-
- crypto_stats_get(calg);
- ret = alg->verify(req);
- crypto_stats_akcipher_verify(ret, calg);
- return ret;
-}
+int crypto_akcipher_sync_decrypt(struct crypto_akcipher *tfm,
+ const void *src, unsigned int slen,
+ void *dst, unsigned int dlen);
/**
* crypto_akcipher_set_pub_key() - Invoke set public key operation
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 86f0748009af..05deea9dac5e 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -7,9 +7,13 @@
#ifndef _CRYPTO_ALGAPI_H
#define _CRYPTO_ALGAPI_H
+#include <crypto/utils.h>
+#include <linux/align.h>
+#include <linux/cache.h>
#include <linux/crypto.h>
#include <linux/list.h>
-#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
/*
* Maximum values for blocksize and alignmask, used to allocate
@@ -17,31 +21,40 @@
* algs and architectures. Ciphers have a lower maximum size.
*/
#define MAX_ALGAPI_BLOCKSIZE 160
-#define MAX_ALGAPI_ALIGNMASK 63
+#define MAX_ALGAPI_ALIGNMASK 127
#define MAX_CIPHER_BLOCKSIZE 16
#define MAX_CIPHER_ALIGNMASK 15
+#ifdef ARCH_DMA_MINALIGN
+#define CRYPTO_DMA_ALIGN ARCH_DMA_MINALIGN
+#else
+#define CRYPTO_DMA_ALIGN CRYPTO_MINALIGN
+#endif
+
+#define CRYPTO_DMA_PADDING ((CRYPTO_DMA_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
+
+/*
+ * Autoloaded crypto modules should only use a prefixed name to avoid allowing
+ * arbitrary modules to be loaded. Loading from userspace may still need the
+ * unprefixed names, so retains those aliases as well.
+ * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
+ * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
+ * expands twice on the same line. Instead, use a separate base name for the
+ * alias.
+ */
+#define MODULE_ALIAS_CRYPTO(name) \
+ MODULE_INFO(alias, name); \
+ MODULE_INFO(alias, "crypto-" name)
+
struct crypto_aead;
struct crypto_instance;
struct module;
+struct notifier_block;
struct rtattr;
+struct scatterlist;
struct seq_file;
struct sk_buff;
-
-struct crypto_type {
- unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
- unsigned int (*extsize)(struct crypto_alg *alg);
- int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
- int (*init_tfm)(struct crypto_tfm *tfm);
- void (*show)(struct seq_file *m, struct crypto_alg *alg);
- int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
- void (*free)(struct crypto_instance *inst);
-
- unsigned int type;
- unsigned int maskclear;
- unsigned int maskset;
- unsigned int tfmsize;
-};
+union crypto_no_such_thing;
struct crypto_instance {
struct crypto_alg alg;
@@ -61,8 +74,11 @@ struct crypto_instance {
struct crypto_template {
struct list_head list;
struct hlist_head instances;
+ struct hlist_head dead;
struct module *module;
+ struct work_struct free_work;
+
int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
char name[CRYPTO_MAX_ALG_NAME];
@@ -92,10 +108,34 @@ struct crypto_queue {
};
struct scatter_walk {
+ /* Must be the first member, see struct skcipher_walk. */
+ union {
+ void *const addr;
+
+ /* Private API field, do not touch. */
+ union crypto_no_such_thing *__addr;
+ };
struct scatterlist *sg;
unsigned int offset;
};
+struct crypto_attr_alg {
+ char name[CRYPTO_MAX_ALG_NAME];
+};
+
+struct crypto_attr_type {
+ u32 type;
+ u32 mask;
+};
+
+/*
+ * Algorithm registration interface.
+ */
+int crypto_register_alg(struct crypto_alg *alg);
+void crypto_unregister_alg(struct crypto_alg *alg);
+int crypto_register_algs(struct crypto_alg *algs, int count);
+void crypto_unregister_algs(struct crypto_alg *algs, int count);
+
void crypto_mod_put(struct crypto_alg *alg);
int crypto_register_template(struct crypto_template *tmpl);
@@ -118,9 +158,16 @@ void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret);
const char *crypto_attr_alg_name(struct rtattr *rta);
-int crypto_attr_u32(struct rtattr *rta, u32 *num);
-int crypto_inst_setname(struct crypto_instance *inst, const char *name,
- struct crypto_alg *alg);
+int __crypto_inst_setname(struct crypto_instance *inst, const char *name,
+ const char *driver, struct crypto_alg *alg);
+
+#define crypto_inst_setname(inst, name, ...) \
+ CONCATENATE(crypto_inst_setname_, COUNT_ARGS(__VA_ARGS__))( \
+ inst, name, ##__VA_ARGS__)
+#define crypto_inst_setname_1(inst, name, alg) \
+ __crypto_inst_setname(inst, name, name, alg)
+#define crypto_inst_setname_2(inst, name, driver, alg) \
+ __crypto_inst_setname(inst, name, driver, alg)
void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
int crypto_enqueue_request(struct crypto_queue *queue,
@@ -134,48 +181,34 @@ static inline unsigned int crypto_queue_len(struct crypto_queue *queue)
}
void crypto_inc(u8 *a, unsigned int size);
-void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size);
-static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
+static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
{
- if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
- __builtin_constant_p(size) &&
- (size % sizeof(unsigned long)) == 0) {
- unsigned long *d = (unsigned long *)dst;
- unsigned long *s = (unsigned long *)src;
-
- while (size > 0) {
- *d++ ^= *s++;
- size -= sizeof(unsigned long);
- }
- } else {
- __crypto_xor(dst, dst, src, size);
- }
+ return tfm->__crt_ctx;
}
-static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
- unsigned int size)
+static inline void *crypto_tfm_ctx_align(struct crypto_tfm *tfm,
+ unsigned int align)
{
- if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
- __builtin_constant_p(size) &&
- (size % sizeof(unsigned long)) == 0) {
- unsigned long *d = (unsigned long *)dst;
- unsigned long *s1 = (unsigned long *)src1;
- unsigned long *s2 = (unsigned long *)src2;
-
- while (size > 0) {
- *d++ = *s1++ ^ *s2++;
- size -= sizeof(unsigned long);
- }
- } else {
- __crypto_xor(dst, src1, src2, size);
- }
+ if (align <= crypto_tfm_ctx_alignment())
+ align = 1;
+
+ return PTR_ALIGN(crypto_tfm_ctx(tfm), align);
}
-static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
+static inline unsigned int crypto_dma_align(void)
{
- return PTR_ALIGN(crypto_tfm_ctx(tfm),
- crypto_tfm_alg_alignmask(tfm) + 1);
+ return CRYPTO_DMA_ALIGN;
+}
+
+static inline unsigned int crypto_dma_padding(void)
+{
+ return (crypto_dma_align() - 1) & ~(crypto_tfm_ctx_alignment() - 1);
+}
+
+static inline void *crypto_tfm_ctx_dma(struct crypto_tfm *tfm)
+{
+ return crypto_tfm_ctx_align(tfm, crypto_dma_align());
}
static inline struct crypto_instance *crypto_tfm_alg_instance(
@@ -221,23 +254,6 @@ static inline u32 crypto_algt_inherited_mask(struct crypto_attr_type *algt)
return crypto_requires_off(algt, CRYPTO_ALG_INHERITED_FLAGS);
}
-noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
-
-/**
- * crypto_memneq - Compare two areas of memory without leaking
- * timing information.
- *
- * @a: One area of memory
- * @b: Another area of memory
- * @size: The size of the area.
- *
- * Returns 0 when data is equal, 1 otherwise.
- */
-static inline int crypto_memneq(const void *a, const void *b, size_t size)
-{
- return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
-}
-
int crypto_register_notifier(struct notifier_block *nb);
int crypto_unregister_notifier(struct notifier_block *nb);
@@ -248,4 +264,25 @@ enum {
CRYPTO_MSG_ALG_LOADED,
};
+static inline void crypto_request_complete(struct crypto_async_request *req,
+ int err)
+{
+ req->complete(req->data, err);
+}
+
+static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
+{
+ return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
+}
+
+static inline bool crypto_tfm_req_virt(struct crypto_tfm *tfm)
+{
+ return tfm->__crt_alg->cra_flags & CRYPTO_ALG_REQ_VIRT;
+}
+
+static inline u32 crypto_request_flags(struct crypto_async_request *req)
+{
+ return req->flags & ~CRYPTO_TFM_REQ_ON_STACK;
+}
+
#endif /* _CRYPTO_ALGAPI_H */
diff --git a/include/crypto/aria.h b/include/crypto/aria.h
new file mode 100644
index 000000000000..73295146be11
--- /dev/null
+++ b/include/crypto/aria.h
@@ -0,0 +1,458 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Cryptographic API.
+ *
+ * ARIA Cipher Algorithm.
+ *
+ * Documentation of ARIA can be found in RFC 5794.
+ * Copyright (c) 2022 Taehee Yoo <ap420073@gmail.com>
+ * Copyright (c) 2022 Taehee Yoo <ap420073@gmail.com>
+ *
+ * Information for ARIA
+ * http://210.104.33.10/ARIA/index-e.html (English)
+ * http://seed.kisa.or.kr/ (Korean)
+ *
+ * Public domain version is distributed above.
+ */
+
+#ifndef _CRYPTO_ARIA_H
+#define _CRYPTO_ARIA_H
+
+#include <crypto/algapi.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <asm/byteorder.h>
+
+#define ARIA_MIN_KEY_SIZE 16
+#define ARIA_MAX_KEY_SIZE 32
+#define ARIA_BLOCK_SIZE 16
+#define ARIA_MAX_RD_KEYS 17
+#define ARIA_RD_KEY_WORDS (ARIA_BLOCK_SIZE / sizeof(u32))
+
+struct aria_ctx {
+ u32 enc_key[ARIA_MAX_RD_KEYS][ARIA_RD_KEY_WORDS];
+ u32 dec_key[ARIA_MAX_RD_KEYS][ARIA_RD_KEY_WORDS];
+ int rounds;
+ int key_length;
+};
+
+static const u32 s1[256] = {
+ 0x00636363, 0x007c7c7c, 0x00777777, 0x007b7b7b,
+ 0x00f2f2f2, 0x006b6b6b, 0x006f6f6f, 0x00c5c5c5,
+ 0x00303030, 0x00010101, 0x00676767, 0x002b2b2b,
+ 0x00fefefe, 0x00d7d7d7, 0x00ababab, 0x00767676,
+ 0x00cacaca, 0x00828282, 0x00c9c9c9, 0x007d7d7d,
+ 0x00fafafa, 0x00595959, 0x00474747, 0x00f0f0f0,
+ 0x00adadad, 0x00d4d4d4, 0x00a2a2a2, 0x00afafaf,
+ 0x009c9c9c, 0x00a4a4a4, 0x00727272, 0x00c0c0c0,
+ 0x00b7b7b7, 0x00fdfdfd, 0x00939393, 0x00262626,
+ 0x00363636, 0x003f3f3f, 0x00f7f7f7, 0x00cccccc,
+ 0x00343434, 0x00a5a5a5, 0x00e5e5e5, 0x00f1f1f1,
+ 0x00717171, 0x00d8d8d8, 0x00313131, 0x00151515,
+ 0x00040404, 0x00c7c7c7, 0x00232323, 0x00c3c3c3,
+ 0x00181818, 0x00969696, 0x00050505, 0x009a9a9a,
+ 0x00070707, 0x00121212, 0x00808080, 0x00e2e2e2,
+ 0x00ebebeb, 0x00272727, 0x00b2b2b2, 0x00757575,
+ 0x00090909, 0x00838383, 0x002c2c2c, 0x001a1a1a,
+ 0x001b1b1b, 0x006e6e6e, 0x005a5a5a, 0x00a0a0a0,
+ 0x00525252, 0x003b3b3b, 0x00d6d6d6, 0x00b3b3b3,
+ 0x00292929, 0x00e3e3e3, 0x002f2f2f, 0x00848484,
+ 0x00535353, 0x00d1d1d1, 0x00000000, 0x00ededed,
+ 0x00202020, 0x00fcfcfc, 0x00b1b1b1, 0x005b5b5b,
+ 0x006a6a6a, 0x00cbcbcb, 0x00bebebe, 0x00393939,
+ 0x004a4a4a, 0x004c4c4c, 0x00585858, 0x00cfcfcf,
+ 0x00d0d0d0, 0x00efefef, 0x00aaaaaa, 0x00fbfbfb,
+ 0x00434343, 0x004d4d4d, 0x00333333, 0x00858585,
+ 0x00454545, 0x00f9f9f9, 0x00020202, 0x007f7f7f,
+ 0x00505050, 0x003c3c3c, 0x009f9f9f, 0x00a8a8a8,
+ 0x00515151, 0x00a3a3a3, 0x00404040, 0x008f8f8f,
+ 0x00929292, 0x009d9d9d, 0x00383838, 0x00f5f5f5,
+ 0x00bcbcbc, 0x00b6b6b6, 0x00dadada, 0x00212121,
+ 0x00101010, 0x00ffffff, 0x00f3f3f3, 0x00d2d2d2,
+ 0x00cdcdcd, 0x000c0c0c, 0x00131313, 0x00ececec,
+ 0x005f5f5f, 0x00979797, 0x00444444, 0x00171717,
+ 0x00c4c4c4, 0x00a7a7a7, 0x007e7e7e, 0x003d3d3d,
+ 0x00646464, 0x005d5d5d, 0x00191919, 0x00737373,
+ 0x00606060, 0x00818181, 0x004f4f4f, 0x00dcdcdc,
+ 0x00222222, 0x002a2a2a, 0x00909090, 0x00888888,
+ 0x00464646, 0x00eeeeee, 0x00b8b8b8, 0x00141414,
+ 0x00dedede, 0x005e5e5e, 0x000b0b0b, 0x00dbdbdb,
+ 0x00e0e0e0, 0x00323232, 0x003a3a3a, 0x000a0a0a,
+ 0x00494949, 0x00060606, 0x00242424, 0x005c5c5c,
+ 0x00c2c2c2, 0x00d3d3d3, 0x00acacac, 0x00626262,
+ 0x00919191, 0x00959595, 0x00e4e4e4, 0x00797979,
+ 0x00e7e7e7, 0x00c8c8c8, 0x00373737, 0x006d6d6d,
+ 0x008d8d8d, 0x00d5d5d5, 0x004e4e4e, 0x00a9a9a9,
+ 0x006c6c6c, 0x00565656, 0x00f4f4f4, 0x00eaeaea,
+ 0x00656565, 0x007a7a7a, 0x00aeaeae, 0x00080808,
+ 0x00bababa, 0x00787878, 0x00252525, 0x002e2e2e,
+ 0x001c1c1c, 0x00a6a6a6, 0x00b4b4b4, 0x00c6c6c6,
+ 0x00e8e8e8, 0x00dddddd, 0x00747474, 0x001f1f1f,
+ 0x004b4b4b, 0x00bdbdbd, 0x008b8b8b, 0x008a8a8a,
+ 0x00707070, 0x003e3e3e, 0x00b5b5b5, 0x00666666,
+ 0x00484848, 0x00030303, 0x00f6f6f6, 0x000e0e0e,
+ 0x00616161, 0x00353535, 0x00575757, 0x00b9b9b9,
+ 0x00868686, 0x00c1c1c1, 0x001d1d1d, 0x009e9e9e,
+ 0x00e1e1e1, 0x00f8f8f8, 0x00989898, 0x00111111,
+ 0x00696969, 0x00d9d9d9, 0x008e8e8e, 0x00949494,
+ 0x009b9b9b, 0x001e1e1e, 0x00878787, 0x00e9e9e9,
+ 0x00cecece, 0x00555555, 0x00282828, 0x00dfdfdf,
+ 0x008c8c8c, 0x00a1a1a1, 0x00898989, 0x000d0d0d,
+ 0x00bfbfbf, 0x00e6e6e6, 0x00424242, 0x00686868,
+ 0x00414141, 0x00999999, 0x002d2d2d, 0x000f0f0f,
+ 0x00b0b0b0, 0x00545454, 0x00bbbbbb, 0x00161616
+};
+
+static const u32 s2[256] = {
+ 0xe200e2e2, 0x4e004e4e, 0x54005454, 0xfc00fcfc,
+ 0x94009494, 0xc200c2c2, 0x4a004a4a, 0xcc00cccc,
+ 0x62006262, 0x0d000d0d, 0x6a006a6a, 0x46004646,
+ 0x3c003c3c, 0x4d004d4d, 0x8b008b8b, 0xd100d1d1,
+ 0x5e005e5e, 0xfa00fafa, 0x64006464, 0xcb00cbcb,
+ 0xb400b4b4, 0x97009797, 0xbe00bebe, 0x2b002b2b,
+ 0xbc00bcbc, 0x77007777, 0x2e002e2e, 0x03000303,
+ 0xd300d3d3, 0x19001919, 0x59005959, 0xc100c1c1,
+ 0x1d001d1d, 0x06000606, 0x41004141, 0x6b006b6b,
+ 0x55005555, 0xf000f0f0, 0x99009999, 0x69006969,
+ 0xea00eaea, 0x9c009c9c, 0x18001818, 0xae00aeae,
+ 0x63006363, 0xdf00dfdf, 0xe700e7e7, 0xbb00bbbb,
+ 0x00000000, 0x73007373, 0x66006666, 0xfb00fbfb,
+ 0x96009696, 0x4c004c4c, 0x85008585, 0xe400e4e4,
+ 0x3a003a3a, 0x09000909, 0x45004545, 0xaa00aaaa,
+ 0x0f000f0f, 0xee00eeee, 0x10001010, 0xeb00ebeb,
+ 0x2d002d2d, 0x7f007f7f, 0xf400f4f4, 0x29002929,
+ 0xac00acac, 0xcf00cfcf, 0xad00adad, 0x91009191,
+ 0x8d008d8d, 0x78007878, 0xc800c8c8, 0x95009595,
+ 0xf900f9f9, 0x2f002f2f, 0xce00cece, 0xcd00cdcd,
+ 0x08000808, 0x7a007a7a, 0x88008888, 0x38003838,
+ 0x5c005c5c, 0x83008383, 0x2a002a2a, 0x28002828,
+ 0x47004747, 0xdb00dbdb, 0xb800b8b8, 0xc700c7c7,
+ 0x93009393, 0xa400a4a4, 0x12001212, 0x53005353,
+ 0xff00ffff, 0x87008787, 0x0e000e0e, 0x31003131,
+ 0x36003636, 0x21002121, 0x58005858, 0x48004848,
+ 0x01000101, 0x8e008e8e, 0x37003737, 0x74007474,
+ 0x32003232, 0xca00caca, 0xe900e9e9, 0xb100b1b1,
+ 0xb700b7b7, 0xab00abab, 0x0c000c0c, 0xd700d7d7,
+ 0xc400c4c4, 0x56005656, 0x42004242, 0x26002626,
+ 0x07000707, 0x98009898, 0x60006060, 0xd900d9d9,
+ 0xb600b6b6, 0xb900b9b9, 0x11001111, 0x40004040,
+ 0xec00ecec, 0x20002020, 0x8c008c8c, 0xbd00bdbd,
+ 0xa000a0a0, 0xc900c9c9, 0x84008484, 0x04000404,
+ 0x49004949, 0x23002323, 0xf100f1f1, 0x4f004f4f,
+ 0x50005050, 0x1f001f1f, 0x13001313, 0xdc00dcdc,
+ 0xd800d8d8, 0xc000c0c0, 0x9e009e9e, 0x57005757,
+ 0xe300e3e3, 0xc300c3c3, 0x7b007b7b, 0x65006565,
+ 0x3b003b3b, 0x02000202, 0x8f008f8f, 0x3e003e3e,
+ 0xe800e8e8, 0x25002525, 0x92009292, 0xe500e5e5,
+ 0x15001515, 0xdd00dddd, 0xfd00fdfd, 0x17001717,
+ 0xa900a9a9, 0xbf00bfbf, 0xd400d4d4, 0x9a009a9a,
+ 0x7e007e7e, 0xc500c5c5, 0x39003939, 0x67006767,
+ 0xfe00fefe, 0x76007676, 0x9d009d9d, 0x43004343,
+ 0xa700a7a7, 0xe100e1e1, 0xd000d0d0, 0xf500f5f5,
+ 0x68006868, 0xf200f2f2, 0x1b001b1b, 0x34003434,
+ 0x70007070, 0x05000505, 0xa300a3a3, 0x8a008a8a,
+ 0xd500d5d5, 0x79007979, 0x86008686, 0xa800a8a8,
+ 0x30003030, 0xc600c6c6, 0x51005151, 0x4b004b4b,
+ 0x1e001e1e, 0xa600a6a6, 0x27002727, 0xf600f6f6,
+ 0x35003535, 0xd200d2d2, 0x6e006e6e, 0x24002424,
+ 0x16001616, 0x82008282, 0x5f005f5f, 0xda00dada,
+ 0xe600e6e6, 0x75007575, 0xa200a2a2, 0xef00efef,
+ 0x2c002c2c, 0xb200b2b2, 0x1c001c1c, 0x9f009f9f,
+ 0x5d005d5d, 0x6f006f6f, 0x80008080, 0x0a000a0a,
+ 0x72007272, 0x44004444, 0x9b009b9b, 0x6c006c6c,
+ 0x90009090, 0x0b000b0b, 0x5b005b5b, 0x33003333,
+ 0x7d007d7d, 0x5a005a5a, 0x52005252, 0xf300f3f3,
+ 0x61006161, 0xa100a1a1, 0xf700f7f7, 0xb000b0b0,
+ 0xd600d6d6, 0x3f003f3f, 0x7c007c7c, 0x6d006d6d,
+ 0xed00eded, 0x14001414, 0xe000e0e0, 0xa500a5a5,
+ 0x3d003d3d, 0x22002222, 0xb300b3b3, 0xf800f8f8,
+ 0x89008989, 0xde00dede, 0x71007171, 0x1a001a1a,
+ 0xaf00afaf, 0xba00baba, 0xb500b5b5, 0x81008181
+};
+
+static const u32 x1[256] = {
+ 0x52520052, 0x09090009, 0x6a6a006a, 0xd5d500d5,
+ 0x30300030, 0x36360036, 0xa5a500a5, 0x38380038,
+ 0xbfbf00bf, 0x40400040, 0xa3a300a3, 0x9e9e009e,
+ 0x81810081, 0xf3f300f3, 0xd7d700d7, 0xfbfb00fb,
+ 0x7c7c007c, 0xe3e300e3, 0x39390039, 0x82820082,
+ 0x9b9b009b, 0x2f2f002f, 0xffff00ff, 0x87870087,
+ 0x34340034, 0x8e8e008e, 0x43430043, 0x44440044,
+ 0xc4c400c4, 0xdede00de, 0xe9e900e9, 0xcbcb00cb,
+ 0x54540054, 0x7b7b007b, 0x94940094, 0x32320032,
+ 0xa6a600a6, 0xc2c200c2, 0x23230023, 0x3d3d003d,
+ 0xeeee00ee, 0x4c4c004c, 0x95950095, 0x0b0b000b,
+ 0x42420042, 0xfafa00fa, 0xc3c300c3, 0x4e4e004e,
+ 0x08080008, 0x2e2e002e, 0xa1a100a1, 0x66660066,
+ 0x28280028, 0xd9d900d9, 0x24240024, 0xb2b200b2,
+ 0x76760076, 0x5b5b005b, 0xa2a200a2, 0x49490049,
+ 0x6d6d006d, 0x8b8b008b, 0xd1d100d1, 0x25250025,
+ 0x72720072, 0xf8f800f8, 0xf6f600f6, 0x64640064,
+ 0x86860086, 0x68680068, 0x98980098, 0x16160016,
+ 0xd4d400d4, 0xa4a400a4, 0x5c5c005c, 0xcccc00cc,
+ 0x5d5d005d, 0x65650065, 0xb6b600b6, 0x92920092,
+ 0x6c6c006c, 0x70700070, 0x48480048, 0x50500050,
+ 0xfdfd00fd, 0xeded00ed, 0xb9b900b9, 0xdada00da,
+ 0x5e5e005e, 0x15150015, 0x46460046, 0x57570057,
+ 0xa7a700a7, 0x8d8d008d, 0x9d9d009d, 0x84840084,
+ 0x90900090, 0xd8d800d8, 0xabab00ab, 0x00000000,
+ 0x8c8c008c, 0xbcbc00bc, 0xd3d300d3, 0x0a0a000a,
+ 0xf7f700f7, 0xe4e400e4, 0x58580058, 0x05050005,
+ 0xb8b800b8, 0xb3b300b3, 0x45450045, 0x06060006,
+ 0xd0d000d0, 0x2c2c002c, 0x1e1e001e, 0x8f8f008f,
+ 0xcaca00ca, 0x3f3f003f, 0x0f0f000f, 0x02020002,
+ 0xc1c100c1, 0xafaf00af, 0xbdbd00bd, 0x03030003,
+ 0x01010001, 0x13130013, 0x8a8a008a, 0x6b6b006b,
+ 0x3a3a003a, 0x91910091, 0x11110011, 0x41410041,
+ 0x4f4f004f, 0x67670067, 0xdcdc00dc, 0xeaea00ea,
+ 0x97970097, 0xf2f200f2, 0xcfcf00cf, 0xcece00ce,
+ 0xf0f000f0, 0xb4b400b4, 0xe6e600e6, 0x73730073,
+ 0x96960096, 0xacac00ac, 0x74740074, 0x22220022,
+ 0xe7e700e7, 0xadad00ad, 0x35350035, 0x85850085,
+ 0xe2e200e2, 0xf9f900f9, 0x37370037, 0xe8e800e8,
+ 0x1c1c001c, 0x75750075, 0xdfdf00df, 0x6e6e006e,
+ 0x47470047, 0xf1f100f1, 0x1a1a001a, 0x71710071,
+ 0x1d1d001d, 0x29290029, 0xc5c500c5, 0x89890089,
+ 0x6f6f006f, 0xb7b700b7, 0x62620062, 0x0e0e000e,
+ 0xaaaa00aa, 0x18180018, 0xbebe00be, 0x1b1b001b,
+ 0xfcfc00fc, 0x56560056, 0x3e3e003e, 0x4b4b004b,
+ 0xc6c600c6, 0xd2d200d2, 0x79790079, 0x20200020,
+ 0x9a9a009a, 0xdbdb00db, 0xc0c000c0, 0xfefe00fe,
+ 0x78780078, 0xcdcd00cd, 0x5a5a005a, 0xf4f400f4,
+ 0x1f1f001f, 0xdddd00dd, 0xa8a800a8, 0x33330033,
+ 0x88880088, 0x07070007, 0xc7c700c7, 0x31310031,
+ 0xb1b100b1, 0x12120012, 0x10100010, 0x59590059,
+ 0x27270027, 0x80800080, 0xecec00ec, 0x5f5f005f,
+ 0x60600060, 0x51510051, 0x7f7f007f, 0xa9a900a9,
+ 0x19190019, 0xb5b500b5, 0x4a4a004a, 0x0d0d000d,
+ 0x2d2d002d, 0xe5e500e5, 0x7a7a007a, 0x9f9f009f,
+ 0x93930093, 0xc9c900c9, 0x9c9c009c, 0xefef00ef,
+ 0xa0a000a0, 0xe0e000e0, 0x3b3b003b, 0x4d4d004d,
+ 0xaeae00ae, 0x2a2a002a, 0xf5f500f5, 0xb0b000b0,
+ 0xc8c800c8, 0xebeb00eb, 0xbbbb00bb, 0x3c3c003c,
+ 0x83830083, 0x53530053, 0x99990099, 0x61610061,
+ 0x17170017, 0x2b2b002b, 0x04040004, 0x7e7e007e,
+ 0xbaba00ba, 0x77770077, 0xd6d600d6, 0x26260026,
+ 0xe1e100e1, 0x69690069, 0x14140014, 0x63630063,
+ 0x55550055, 0x21210021, 0x0c0c000c, 0x7d7d007d
+};
+
+static const u32 x2[256] = {
+ 0x30303000, 0x68686800, 0x99999900, 0x1b1b1b00,
+ 0x87878700, 0xb9b9b900, 0x21212100, 0x78787800,
+ 0x50505000, 0x39393900, 0xdbdbdb00, 0xe1e1e100,
+ 0x72727200, 0x09090900, 0x62626200, 0x3c3c3c00,
+ 0x3e3e3e00, 0x7e7e7e00, 0x5e5e5e00, 0x8e8e8e00,
+ 0xf1f1f100, 0xa0a0a000, 0xcccccc00, 0xa3a3a300,
+ 0x2a2a2a00, 0x1d1d1d00, 0xfbfbfb00, 0xb6b6b600,
+ 0xd6d6d600, 0x20202000, 0xc4c4c400, 0x8d8d8d00,
+ 0x81818100, 0x65656500, 0xf5f5f500, 0x89898900,
+ 0xcbcbcb00, 0x9d9d9d00, 0x77777700, 0xc6c6c600,
+ 0x57575700, 0x43434300, 0x56565600, 0x17171700,
+ 0xd4d4d400, 0x40404000, 0x1a1a1a00, 0x4d4d4d00,
+ 0xc0c0c000, 0x63636300, 0x6c6c6c00, 0xe3e3e300,
+ 0xb7b7b700, 0xc8c8c800, 0x64646400, 0x6a6a6a00,
+ 0x53535300, 0xaaaaaa00, 0x38383800, 0x98989800,
+ 0x0c0c0c00, 0xf4f4f400, 0x9b9b9b00, 0xededed00,
+ 0x7f7f7f00, 0x22222200, 0x76767600, 0xafafaf00,
+ 0xdddddd00, 0x3a3a3a00, 0x0b0b0b00, 0x58585800,
+ 0x67676700, 0x88888800, 0x06060600, 0xc3c3c300,
+ 0x35353500, 0x0d0d0d00, 0x01010100, 0x8b8b8b00,
+ 0x8c8c8c00, 0xc2c2c200, 0xe6e6e600, 0x5f5f5f00,
+ 0x02020200, 0x24242400, 0x75757500, 0x93939300,
+ 0x66666600, 0x1e1e1e00, 0xe5e5e500, 0xe2e2e200,
+ 0x54545400, 0xd8d8d800, 0x10101000, 0xcecece00,
+ 0x7a7a7a00, 0xe8e8e800, 0x08080800, 0x2c2c2c00,
+ 0x12121200, 0x97979700, 0x32323200, 0xababab00,
+ 0xb4b4b400, 0x27272700, 0x0a0a0a00, 0x23232300,
+ 0xdfdfdf00, 0xefefef00, 0xcacaca00, 0xd9d9d900,
+ 0xb8b8b800, 0xfafafa00, 0xdcdcdc00, 0x31313100,
+ 0x6b6b6b00, 0xd1d1d100, 0xadadad00, 0x19191900,
+ 0x49494900, 0xbdbdbd00, 0x51515100, 0x96969600,
+ 0xeeeeee00, 0xe4e4e400, 0xa8a8a800, 0x41414100,
+ 0xdadada00, 0xffffff00, 0xcdcdcd00, 0x55555500,
+ 0x86868600, 0x36363600, 0xbebebe00, 0x61616100,
+ 0x52525200, 0xf8f8f800, 0xbbbbbb00, 0x0e0e0e00,
+ 0x82828200, 0x48484800, 0x69696900, 0x9a9a9a00,
+ 0xe0e0e000, 0x47474700, 0x9e9e9e00, 0x5c5c5c00,
+ 0x04040400, 0x4b4b4b00, 0x34343400, 0x15151500,
+ 0x79797900, 0x26262600, 0xa7a7a700, 0xdedede00,
+ 0x29292900, 0xaeaeae00, 0x92929200, 0xd7d7d700,
+ 0x84848400, 0xe9e9e900, 0xd2d2d200, 0xbababa00,
+ 0x5d5d5d00, 0xf3f3f300, 0xc5c5c500, 0xb0b0b000,
+ 0xbfbfbf00, 0xa4a4a400, 0x3b3b3b00, 0x71717100,
+ 0x44444400, 0x46464600, 0x2b2b2b00, 0xfcfcfc00,
+ 0xebebeb00, 0x6f6f6f00, 0xd5d5d500, 0xf6f6f600,
+ 0x14141400, 0xfefefe00, 0x7c7c7c00, 0x70707000,
+ 0x5a5a5a00, 0x7d7d7d00, 0xfdfdfd00, 0x2f2f2f00,
+ 0x18181800, 0x83838300, 0x16161600, 0xa5a5a500,
+ 0x91919100, 0x1f1f1f00, 0x05050500, 0x95959500,
+ 0x74747400, 0xa9a9a900, 0xc1c1c100, 0x5b5b5b00,
+ 0x4a4a4a00, 0x85858500, 0x6d6d6d00, 0x13131300,
+ 0x07070700, 0x4f4f4f00, 0x4e4e4e00, 0x45454500,
+ 0xb2b2b200, 0x0f0f0f00, 0xc9c9c900, 0x1c1c1c00,
+ 0xa6a6a600, 0xbcbcbc00, 0xececec00, 0x73737300,
+ 0x90909000, 0x7b7b7b00, 0xcfcfcf00, 0x59595900,
+ 0x8f8f8f00, 0xa1a1a100, 0xf9f9f900, 0x2d2d2d00,
+ 0xf2f2f200, 0xb1b1b100, 0x00000000, 0x94949400,
+ 0x37373700, 0x9f9f9f00, 0xd0d0d000, 0x2e2e2e00,
+ 0x9c9c9c00, 0x6e6e6e00, 0x28282800, 0x3f3f3f00,
+ 0x80808000, 0xf0f0f000, 0x3d3d3d00, 0xd3d3d300,
+ 0x25252500, 0x8a8a8a00, 0xb5b5b500, 0xe7e7e700,
+ 0x42424200, 0xb3b3b300, 0xc7c7c700, 0xeaeaea00,
+ 0xf7f7f700, 0x4c4c4c00, 0x11111100, 0x33333300,
+ 0x03030300, 0xa2a2a200, 0xacacac00, 0x60606000
+};
+
+static inline u32 rotl32(u32 v, u32 r)
+{
+ return ((v << r) | (v >> (32 - r)));
+}
+
+static inline u32 rotr32(u32 v, u32 r)
+{
+ return ((v >> r) | (v << (32 - r)));
+}
+
+static inline u32 bswap32(u32 v)
+{
+ return ((v << 24) ^
+ (v >> 24) ^
+ ((v & 0x0000ff00) << 8) ^
+ ((v & 0x00ff0000) >> 8));
+}
+
+static inline u8 get_u8(u32 x, u32 y)
+{
+ return (x >> ((3 - y) * 8));
+}
+
+static inline u32 make_u32(u8 v0, u8 v1, u8 v2, u8 v3)
+{
+ return ((u32)v0 << 24) | ((u32)v1 << 16) | ((u32)v2 << 8) | ((u32)v3);
+}
+
+static inline u32 aria_m(u32 t0)
+{
+ return rotr32(t0, 8) ^ rotr32(t0 ^ rotr32(t0, 8), 16);
+}
+
+/* S-Box Layer 1 + M */
+static inline void aria_sbox_layer1_with_pre_diff(u32 *t0, u32 *t1, u32 *t2,
+ u32 *t3)
+{
+ *t0 = s1[get_u8(*t0, 0)] ^
+ s2[get_u8(*t0, 1)] ^
+ x1[get_u8(*t0, 2)] ^
+ x2[get_u8(*t0, 3)];
+ *t1 = s1[get_u8(*t1, 0)] ^
+ s2[get_u8(*t1, 1)] ^
+ x1[get_u8(*t1, 2)] ^
+ x2[get_u8(*t1, 3)];
+ *t2 = s1[get_u8(*t2, 0)] ^
+ s2[get_u8(*t2, 1)] ^
+ x1[get_u8(*t2, 2)] ^
+ x2[get_u8(*t2, 3)];
+ *t3 = s1[get_u8(*t3, 0)] ^
+ s2[get_u8(*t3, 1)] ^
+ x1[get_u8(*t3, 2)] ^
+ x2[get_u8(*t3, 3)];
+}
+
+/* S-Box Layer 2 + M */
+static inline void aria_sbox_layer2_with_pre_diff(u32 *t0, u32 *t1, u32 *t2,
+ u32 *t3)
+{
+ *t0 = x1[get_u8(*t0, 0)] ^
+ x2[get_u8(*t0, 1)] ^
+ s1[get_u8(*t0, 2)] ^
+ s2[get_u8(*t0, 3)];
+ *t1 = x1[get_u8(*t1, 0)] ^
+ x2[get_u8(*t1, 1)] ^
+ s1[get_u8(*t1, 2)] ^
+ s2[get_u8(*t1, 3)];
+ *t2 = x1[get_u8(*t2, 0)] ^
+ x2[get_u8(*t2, 1)] ^
+ s1[get_u8(*t2, 2)] ^
+ s2[get_u8(*t2, 3)];
+ *t3 = x1[get_u8(*t3, 0)] ^
+ x2[get_u8(*t3, 1)] ^
+ s1[get_u8(*t3, 2)] ^
+ s2[get_u8(*t3, 3)];
+}
+
+/* Word-level diffusion */
+static inline void aria_diff_word(u32 *t0, u32 *t1, u32 *t2, u32 *t3)
+{
+ *t1 ^= *t2;
+ *t2 ^= *t3;
+ *t0 ^= *t1;
+
+ *t3 ^= *t1;
+ *t2 ^= *t0;
+ *t1 ^= *t2;
+}
+
+/* Byte-level diffusion */
+static inline void aria_diff_byte(u32 *t1, u32 *t2, u32 *t3)
+{
+ *t1 = ((*t1 << 8) & 0xff00ff00) ^ ((*t1 >> 8) & 0x00ff00ff);
+ *t2 = rotr32(*t2, 16);
+ *t3 = bswap32(*t3);
+}
+
+/* Key XOR Layer */
+static inline void aria_add_round_key(u32 *rk, u32 *t0, u32 *t1, u32 *t2,
+ u32 *t3)
+{
+ *t0 ^= rk[0];
+ *t1 ^= rk[1];
+ *t2 ^= rk[2];
+ *t3 ^= rk[3];
+}
+/* Odd round Substitution & Diffusion */
+static inline void aria_subst_diff_odd(u32 *t0, u32 *t1, u32 *t2, u32 *t3)
+{
+ aria_sbox_layer1_with_pre_diff(t0, t1, t2, t3);
+ aria_diff_word(t0, t1, t2, t3);
+ aria_diff_byte(t1, t2, t3);
+ aria_diff_word(t0, t1, t2, t3);
+}
+
+/* Even round Substitution & Diffusion */
+static inline void aria_subst_diff_even(u32 *t0, u32 *t1, u32 *t2, u32 *t3)
+{
+ aria_sbox_layer2_with_pre_diff(t0, t1, t2, t3);
+ aria_diff_word(t0, t1, t2, t3);
+ aria_diff_byte(t3, t0, t1);
+ aria_diff_word(t0, t1, t2, t3);
+}
+
+/* Q, R Macro expanded ARIA GSRK */
+static inline void aria_gsrk(u32 *rk, u32 *x, u32 *y, u32 n)
+{
+ int q = 4 - (n / 32);
+ int r = n % 32;
+
+ rk[0] = (x[0]) ^
+ ((y[q % 4]) >> r) ^
+ ((y[(q + 3) % 4]) << (32 - r));
+ rk[1] = (x[1]) ^
+ ((y[(q + 1) % 4]) >> r) ^
+ ((y[q % 4]) << (32 - r));
+ rk[2] = (x[2]) ^
+ ((y[(q + 2) % 4]) >> r) ^
+ ((y[(q + 1) % 4]) << (32 - r));
+ rk[3] = (x[3]) ^
+ ((y[(q + 3) % 4]) >> r) ^
+ ((y[(q + 2) % 4]) << (32 - r));
+}
+
+void aria_encrypt(void *ctx, u8 *out, const u8 *in);
+void aria_decrypt(void *ctx, u8 *out, const u8 *in);
+int aria_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len);
+
+#endif
diff --git a/include/crypto/asym_tpm_subtype.h b/include/crypto/asym_tpm_subtype.h
deleted file mode 100644
index 48198c36d6b9..000000000000
--- a/include/crypto/asym_tpm_subtype.h
+++ /dev/null
@@ -1,19 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#ifndef _LINUX_ASYM_TPM_SUBTYPE_H
-#define _LINUX_ASYM_TPM_SUBTYPE_H
-
-#include <linux/keyctl.h>
-
-struct tpm_key {
- void *blob;
- u32 blob_len;
- uint16_t key_len; /* Size in bits of the key */
- const void *pub_key; /* pointer inside blob to the public key bytes */
- uint16_t pub_key_len; /* length of the public key */
-};
-
-struct tpm_key *tpm_key_create(const void *blob, uint32_t blob_len);
-
-extern struct asymmetric_key_subtype asym_tpm_subtype;
-
-#endif /* _LINUX_ASYM_TPM_SUBTYPE_H */
diff --git a/include/crypto/authenc.h b/include/crypto/authenc.h
index 5f92a986083c..15a9caa2354a 100644
--- a/include/crypto/authenc.h
+++ b/include/crypto/authenc.h
@@ -28,5 +28,7 @@ struct crypto_authenc_keys {
int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
unsigned int keylen);
+int crypto_krb5enc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
+ unsigned int keylen);
#endif /* _CRYPTO_AUTHENC_H */
diff --git a/include/crypto/b128ops.h b/include/crypto/b128ops.h
index 0b8e6bc55301..f3b37cbb3131 100644
--- a/include/crypto/b128ops.h
+++ b/include/crypto/b128ops.h
@@ -50,10 +50,6 @@
#include <linux/types.h>
typedef struct {
- u64 a, b;
-} u128;
-
-typedef struct {
__be64 a, b;
} be128;
@@ -61,20 +57,16 @@ typedef struct {
__le64 b, a;
} le128;
-static inline void u128_xor(u128 *r, const u128 *p, const u128 *q)
+static inline void be128_xor(be128 *r, const be128 *p, const be128 *q)
{
r->a = p->a ^ q->a;
r->b = p->b ^ q->b;
}
-static inline void be128_xor(be128 *r, const be128 *p, const be128 *q)
-{
- u128_xor((u128 *)r, (u128 *)p, (u128 *)q);
-}
-
static inline void le128_xor(le128 *r, const le128 *p, const le128 *q)
{
- u128_xor((u128 *)r, (u128 *)p, (u128 *)q);
+ r->a = p->a ^ q->a;
+ r->b = p->b ^ q->b;
}
#endif /* _CRYPTO_B128OPS_H */
diff --git a/include/crypto/blake2b.h b/include/crypto/blake2b.h
index 18875f16f8ca..3bc37fd103a7 100644
--- a/include/crypto/blake2b.h
+++ b/include/crypto/blake2b.h
@@ -5,7 +5,6 @@
#include <linux/bug.h>
#include <linux/types.h>
-#include <linux/kernel.h>
#include <linux/string.h>
enum blake2b_lengths {
@@ -19,7 +18,16 @@ enum blake2b_lengths {
BLAKE2B_512_HASH_SIZE = 64,
};
-struct blake2b_state {
+/**
+ * struct blake2b_ctx - Context for hashing a message with BLAKE2b
+ * @h: compression function state
+ * @t: block counter
+ * @f: finalization indicator
+ * @buf: partial block buffer; 'buflen' bytes are valid
+ * @buflen: number of bytes buffered in @buf
+ * @outlen: length of output hash value in bytes, at most BLAKE2B_HASH_SIZE
+ */
+struct blake2b_ctx {
/* 'h', 't', and 'f' are used in assembly code, so keep them as-is. */
u64 h[8];
u64 t[2];
@@ -40,28 +48,109 @@ enum blake2b_iv {
BLAKE2B_IV7 = 0x5BE0CD19137E2179ULL,
};
-static inline void __blake2b_init(struct blake2b_state *state, size_t outlen,
+static inline void __blake2b_init(struct blake2b_ctx *ctx, size_t outlen,
const void *key, size_t keylen)
{
- state->h[0] = BLAKE2B_IV0 ^ (0x01010000 | keylen << 8 | outlen);
- state->h[1] = BLAKE2B_IV1;
- state->h[2] = BLAKE2B_IV2;
- state->h[3] = BLAKE2B_IV3;
- state->h[4] = BLAKE2B_IV4;
- state->h[5] = BLAKE2B_IV5;
- state->h[6] = BLAKE2B_IV6;
- state->h[7] = BLAKE2B_IV7;
- state->t[0] = 0;
- state->t[1] = 0;
- state->f[0] = 0;
- state->f[1] = 0;
- state->buflen = 0;
- state->outlen = outlen;
+ ctx->h[0] = BLAKE2B_IV0 ^ (0x01010000 | keylen << 8 | outlen);
+ ctx->h[1] = BLAKE2B_IV1;
+ ctx->h[2] = BLAKE2B_IV2;
+ ctx->h[3] = BLAKE2B_IV3;
+ ctx->h[4] = BLAKE2B_IV4;
+ ctx->h[5] = BLAKE2B_IV5;
+ ctx->h[6] = BLAKE2B_IV6;
+ ctx->h[7] = BLAKE2B_IV7;
+ ctx->t[0] = 0;
+ ctx->t[1] = 0;
+ ctx->f[0] = 0;
+ ctx->f[1] = 0;
+ ctx->buflen = 0;
+ ctx->outlen = outlen;
if (keylen) {
- memcpy(state->buf, key, keylen);
- memset(&state->buf[keylen], 0, BLAKE2B_BLOCK_SIZE - keylen);
- state->buflen = BLAKE2B_BLOCK_SIZE;
+ memcpy(ctx->buf, key, keylen);
+ memset(&ctx->buf[keylen], 0, BLAKE2B_BLOCK_SIZE - keylen);
+ ctx->buflen = BLAKE2B_BLOCK_SIZE;
}
}
+/**
+ * blake2b_init() - Initialize a BLAKE2b context for a new message (unkeyed)
+ * @ctx: the context to initialize
+ * @outlen: length of output hash value in bytes, at most BLAKE2B_HASH_SIZE
+ *
+ * Context: Any context.
+ */
+static inline void blake2b_init(struct blake2b_ctx *ctx, size_t outlen)
+{
+ __blake2b_init(ctx, outlen, NULL, 0);
+}
+
+/**
+ * blake2b_init_key() - Initialize a BLAKE2b context for a new message (keyed)
+ * @ctx: the context to initialize
+ * @outlen: length of output hash value in bytes, at most BLAKE2B_HASH_SIZE
+ * @key: the key
+ * @keylen: the key length in bytes, at most BLAKE2B_KEY_SIZE
+ *
+ * Context: Any context.
+ */
+static inline void blake2b_init_key(struct blake2b_ctx *ctx, size_t outlen,
+ const void *key, size_t keylen)
+{
+ WARN_ON(IS_ENABLED(DEBUG) && (!outlen || outlen > BLAKE2B_HASH_SIZE ||
+ !key || !keylen || keylen > BLAKE2B_KEY_SIZE));
+
+ __blake2b_init(ctx, outlen, key, keylen);
+}
+
+/**
+ * blake2b_update() - Update a BLAKE2b context with message data
+ * @ctx: the context to update; must have been initialized
+ * @in: the message data
+ * @inlen: the data length in bytes
+ *
+ * This can be called any number of times.
+ *
+ * Context: Any context.
+ */
+void blake2b_update(struct blake2b_ctx *ctx, const u8 *in, size_t inlen);
+
+/**
+ * blake2b_final() - Finish computing a BLAKE2b hash
+ * @ctx: the context to finalize; must have been initialized
+ * @out: (output) the resulting BLAKE2b hash. Its length will be equal to the
+ * @outlen that was passed to blake2b_init() or blake2b_init_key().
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void blake2b_final(struct blake2b_ctx *ctx, u8 *out);
+
+/**
+ * blake2b() - Compute BLAKE2b hash in one shot
+ * @key: the key, or NULL for an unkeyed hash
+ * @keylen: the key length in bytes (at most BLAKE2B_KEY_SIZE), or 0 for an
+ * unkeyed hash
+ * @in: the message data
+ * @inlen: the data length in bytes
+ * @out: (output) the resulting BLAKE2b hash, with length @outlen
+ * @outlen: length of output hash value in bytes, at most BLAKE2B_HASH_SIZE
+ *
+ * Context: Any context.
+ */
+static inline void blake2b(const u8 *key, size_t keylen,
+ const u8 *in, size_t inlen,
+ u8 *out, size_t outlen)
+{
+ struct blake2b_ctx ctx;
+
+ WARN_ON(IS_ENABLED(DEBUG) && ((!in && inlen > 0) || !out || !outlen ||
+ outlen > BLAKE2B_HASH_SIZE || keylen > BLAKE2B_KEY_SIZE ||
+ (!key && keylen)));
+
+ __blake2b_init(&ctx, outlen, key, keylen);
+ blake2b_update(&ctx, in, inlen);
+ blake2b_final(&ctx, out);
+}
+
#endif /* _CRYPTO_BLAKE2B_H */
diff --git a/include/crypto/blake2s.h b/include/crypto/blake2s.h
index bc3fb59442ce..648cb7824358 100644
--- a/include/crypto/blake2s.h
+++ b/include/crypto/blake2s.h
@@ -7,8 +7,8 @@
#define _CRYPTO_BLAKE2S_H
#include <linux/bug.h>
+#include <linux/kconfig.h>
#include <linux/types.h>
-#include <linux/kernel.h>
#include <linux/string.h>
enum blake2s_lengths {
@@ -22,7 +22,16 @@ enum blake2s_lengths {
BLAKE2S_256_HASH_SIZE = 32,
};
-struct blake2s_state {
+/**
+ * struct blake2s_ctx - Context for hashing a message with BLAKE2s
+ * @h: compression function state
+ * @t: block counter
+ * @f: finalization indicator
+ * @buf: partial block buffer; 'buflen' bytes are valid
+ * @buflen: number of bytes buffered in @buf
+ * @outlen: length of output hash value in bytes, at most BLAKE2S_HASH_SIZE
+ */
+struct blake2s_ctx {
/* 'h', 't', and 'f' are used in assembly code, so keep them as-is. */
u32 h[8];
u32 t[2];
@@ -43,65 +52,109 @@ enum blake2s_iv {
BLAKE2S_IV7 = 0x5BE0CD19UL,
};
-static inline void __blake2s_init(struct blake2s_state *state, size_t outlen,
+static inline void __blake2s_init(struct blake2s_ctx *ctx, size_t outlen,
const void *key, size_t keylen)
{
- state->h[0] = BLAKE2S_IV0 ^ (0x01010000 | keylen << 8 | outlen);
- state->h[1] = BLAKE2S_IV1;
- state->h[2] = BLAKE2S_IV2;
- state->h[3] = BLAKE2S_IV3;
- state->h[4] = BLAKE2S_IV4;
- state->h[5] = BLAKE2S_IV5;
- state->h[6] = BLAKE2S_IV6;
- state->h[7] = BLAKE2S_IV7;
- state->t[0] = 0;
- state->t[1] = 0;
- state->f[0] = 0;
- state->f[1] = 0;
- state->buflen = 0;
- state->outlen = outlen;
+ ctx->h[0] = BLAKE2S_IV0 ^ (0x01010000 | keylen << 8 | outlen);
+ ctx->h[1] = BLAKE2S_IV1;
+ ctx->h[2] = BLAKE2S_IV2;
+ ctx->h[3] = BLAKE2S_IV3;
+ ctx->h[4] = BLAKE2S_IV4;
+ ctx->h[5] = BLAKE2S_IV5;
+ ctx->h[6] = BLAKE2S_IV6;
+ ctx->h[7] = BLAKE2S_IV7;
+ ctx->t[0] = 0;
+ ctx->t[1] = 0;
+ ctx->f[0] = 0;
+ ctx->f[1] = 0;
+ ctx->buflen = 0;
+ ctx->outlen = outlen;
if (keylen) {
- memcpy(state->buf, key, keylen);
- memset(&state->buf[keylen], 0, BLAKE2S_BLOCK_SIZE - keylen);
- state->buflen = BLAKE2S_BLOCK_SIZE;
+ memcpy(ctx->buf, key, keylen);
+ memset(&ctx->buf[keylen], 0, BLAKE2S_BLOCK_SIZE - keylen);
+ ctx->buflen = BLAKE2S_BLOCK_SIZE;
}
}
-static inline void blake2s_init(struct blake2s_state *state,
- const size_t outlen)
+/**
+ * blake2s_init() - Initialize a BLAKE2s context for a new message (unkeyed)
+ * @ctx: the context to initialize
+ * @outlen: length of output hash value in bytes, at most BLAKE2S_HASH_SIZE
+ *
+ * Context: Any context.
+ */
+static inline void blake2s_init(struct blake2s_ctx *ctx, size_t outlen)
{
- __blake2s_init(state, outlen, NULL, 0);
+ __blake2s_init(ctx, outlen, NULL, 0);
}
-static inline void blake2s_init_key(struct blake2s_state *state,
- const size_t outlen, const void *key,
- const size_t keylen)
+/**
+ * blake2s_init_key() - Initialize a BLAKE2s context for a new message (keyed)
+ * @ctx: the context to initialize
+ * @outlen: length of output hash value in bytes, at most BLAKE2S_HASH_SIZE
+ * @key: the key
+ * @keylen: the key length in bytes, at most BLAKE2S_KEY_SIZE
+ *
+ * Context: Any context.
+ */
+static inline void blake2s_init_key(struct blake2s_ctx *ctx, size_t outlen,
+ const void *key, size_t keylen)
{
WARN_ON(IS_ENABLED(DEBUG) && (!outlen || outlen > BLAKE2S_HASH_SIZE ||
!key || !keylen || keylen > BLAKE2S_KEY_SIZE));
- __blake2s_init(state, outlen, key, keylen);
+ __blake2s_init(ctx, outlen, key, keylen);
}
-void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen);
-void blake2s_final(struct blake2s_state *state, u8 *out);
-
-static inline void blake2s(u8 *out, const u8 *in, const u8 *key,
- const size_t outlen, const size_t inlen,
- const size_t keylen)
+/**
+ * blake2s_update() - Update a BLAKE2s context with message data
+ * @ctx: the context to update; must have been initialized
+ * @in: the message data
+ * @inlen: the data length in bytes
+ *
+ * This can be called any number of times.
+ *
+ * Context: Any context.
+ */
+void blake2s_update(struct blake2s_ctx *ctx, const u8 *in, size_t inlen);
+
+/**
+ * blake2s_final() - Finish computing a BLAKE2s hash
+ * @ctx: the context to finalize; must have been initialized
+ * @out: (output) the resulting BLAKE2s hash. Its length will be equal to the
+ * @outlen that was passed to blake2s_init() or blake2s_init_key().
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void blake2s_final(struct blake2s_ctx *ctx, u8 *out);
+
+/**
+ * blake2s() - Compute BLAKE2s hash in one shot
+ * @key: the key, or NULL for an unkeyed hash
+ * @keylen: the key length in bytes (at most BLAKE2S_KEY_SIZE), or 0 for an
+ * unkeyed hash
+ * @in: the message data
+ * @inlen: the data length in bytes
+ * @out: (output) the resulting BLAKE2s hash, with length @outlen
+ * @outlen: length of output hash value in bytes, at most BLAKE2S_HASH_SIZE
+ *
+ * Context: Any context.
+ */
+static inline void blake2s(const u8 *key, size_t keylen,
+ const u8 *in, size_t inlen,
+ u8 *out, size_t outlen)
{
- struct blake2s_state state;
+ struct blake2s_ctx ctx;
WARN_ON(IS_ENABLED(DEBUG) && ((!in && inlen > 0) || !out || !outlen ||
outlen > BLAKE2S_HASH_SIZE || keylen > BLAKE2S_KEY_SIZE ||
(!key && keylen)));
- __blake2s_init(&state, outlen, key, keylen);
- blake2s_update(&state, in, inlen);
- blake2s_final(&state, out);
+ __blake2s_init(&ctx, outlen, key, keylen);
+ blake2s_update(&ctx, in, inlen);
+ blake2s_final(&ctx, out);
}
-void blake2s256_hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen,
- const size_t keylen);
-
#endif /* _CRYPTO_BLAKE2S_H */
diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h
index dabaee698718..1cc301a48469 100644
--- a/include/crypto/chacha.h
+++ b/include/crypto/chacha.h
@@ -15,7 +15,8 @@
#ifndef _CRYPTO_CHACHA_H
#define _CRYPTO_CHACHA_H
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
+#include <linux/string.h>
#include <linux/types.h>
/* 32-bit stream position, then 96-bit nonce (RFC7539 convention) */
@@ -25,80 +26,77 @@
#define CHACHA_BLOCK_SIZE 64
#define CHACHAPOLY_IV_SIZE 12
-#define CHACHA_STATE_WORDS (CHACHA_BLOCK_SIZE / sizeof(u32))
+#define CHACHA_KEY_WORDS 8
+#define CHACHA_STATE_WORDS 16
+#define HCHACHA_OUT_WORDS 8
/* 192-bit nonce, then 64-bit stream position */
#define XCHACHA_IV_SIZE 32
-void chacha_block_generic(u32 *state, u8 *stream, int nrounds);
-static inline void chacha20_block(u32 *state, u8 *stream)
+struct chacha_state {
+ u32 x[CHACHA_STATE_WORDS];
+};
+
+void chacha_block_generic(struct chacha_state *state,
+ u8 out[at_least CHACHA_BLOCK_SIZE], int nrounds);
+static inline void chacha20_block(struct chacha_state *state,
+ u8 out[at_least CHACHA_BLOCK_SIZE])
{
- chacha_block_generic(state, stream, 20);
+ chacha_block_generic(state, out, 20);
}
-void hchacha_block_arch(const u32 *state, u32 *out, int nrounds);
-void hchacha_block_generic(const u32 *state, u32 *out, int nrounds);
+void hchacha_block_generic(const struct chacha_state *state,
+ u32 out[at_least HCHACHA_OUT_WORDS], int nrounds);
-static inline void hchacha_block(const u32 *state, u32 *out, int nrounds)
-{
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA))
- hchacha_block_arch(state, out, nrounds);
- else
- hchacha_block_generic(state, out, nrounds);
-}
+void hchacha_block(const struct chacha_state *state,
+ u32 out[at_least HCHACHA_OUT_WORDS], int nrounds);
-static inline void chacha_init_consts(u32 *state)
-{
- state[0] = 0x61707865; /* "expa" */
- state[1] = 0x3320646e; /* "nd 3" */
- state[2] = 0x79622d32; /* "2-by" */
- state[3] = 0x6b206574; /* "te k" */
-}
+enum chacha_constants { /* expand 32-byte k */
+ CHACHA_CONSTANT_EXPA = 0x61707865U,
+ CHACHA_CONSTANT_ND_3 = 0x3320646eU,
+ CHACHA_CONSTANT_2_BY = 0x79622d32U,
+ CHACHA_CONSTANT_TE_K = 0x6b206574U
+};
-void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv);
-static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv)
+static inline void chacha_init_consts(struct chacha_state *state)
{
- chacha_init_consts(state);
- state[4] = key[0];
- state[5] = key[1];
- state[6] = key[2];
- state[7] = key[3];
- state[8] = key[4];
- state[9] = key[5];
- state[10] = key[6];
- state[11] = key[7];
- state[12] = get_unaligned_le32(iv + 0);
- state[13] = get_unaligned_le32(iv + 4);
- state[14] = get_unaligned_le32(iv + 8);
- state[15] = get_unaligned_le32(iv + 12);
+ state->x[0] = CHACHA_CONSTANT_EXPA;
+ state->x[1] = CHACHA_CONSTANT_ND_3;
+ state->x[2] = CHACHA_CONSTANT_2_BY;
+ state->x[3] = CHACHA_CONSTANT_TE_K;
}
-static inline void chacha_init(u32 *state, const u32 *key, const u8 *iv)
+static inline void chacha_init(struct chacha_state *state,
+ const u32 key[at_least CHACHA_KEY_WORDS],
+ const u8 iv[at_least CHACHA_IV_SIZE])
{
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA))
- chacha_init_arch(state, key, iv);
- else
- chacha_init_generic(state, key, iv);
+ chacha_init_consts(state);
+ state->x[4] = key[0];
+ state->x[5] = key[1];
+ state->x[6] = key[2];
+ state->x[7] = key[3];
+ state->x[8] = key[4];
+ state->x[9] = key[5];
+ state->x[10] = key[6];
+ state->x[11] = key[7];
+ state->x[12] = get_unaligned_le32(iv + 0);
+ state->x[13] = get_unaligned_le32(iv + 4);
+ state->x[14] = get_unaligned_le32(iv + 8);
+ state->x[15] = get_unaligned_le32(iv + 12);
}
-void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src,
- unsigned int bytes, int nrounds);
-void chacha_crypt_generic(u32 *state, u8 *dst, const u8 *src,
- unsigned int bytes, int nrounds);
+void chacha_crypt(struct chacha_state *state, u8 *dst, const u8 *src,
+ unsigned int bytes, int nrounds);
-static inline void chacha_crypt(u32 *state, u8 *dst, const u8 *src,
- unsigned int bytes, int nrounds)
+static inline void chacha20_crypt(struct chacha_state *state,
+ u8 *dst, const u8 *src, unsigned int bytes)
{
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA))
- chacha_crypt_arch(state, dst, src, bytes, nrounds);
- else
- chacha_crypt_generic(state, dst, src, bytes, nrounds);
+ chacha_crypt(state, dst, src, bytes, 20);
}
-static inline void chacha20_crypt(u32 *state, u8 *dst, const u8 *src,
- unsigned int bytes)
+static inline void chacha_zeroize_state(struct chacha_state *state)
{
- chacha_crypt(state, dst, src, bytes, 20);
+ memzero_explicit(state, sizeof(*state));
}
#endif /* _CRYPTO_CHACHA_H */
diff --git a/include/crypto/chacha20poly1305.h b/include/crypto/chacha20poly1305.h
index d2ac3ff7dc1e..0f71b037702d 100644
--- a/include/crypto/chacha20poly1305.h
+++ b/include/crypto/chacha20poly1305.h
@@ -18,32 +18,33 @@ enum chacha20poly1305_lengths {
void chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len,
const u8 *ad, const size_t ad_len,
const u64 nonce,
- const u8 key[CHACHA20POLY1305_KEY_SIZE]);
+ const u8 key[at_least CHACHA20POLY1305_KEY_SIZE]);
bool __must_check
chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len,
const u8 *ad, const size_t ad_len, const u64 nonce,
- const u8 key[CHACHA20POLY1305_KEY_SIZE]);
+ const u8 key[at_least CHACHA20POLY1305_KEY_SIZE]);
void xchacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len,
const u8 *ad, const size_t ad_len,
- const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE],
- const u8 key[CHACHA20POLY1305_KEY_SIZE]);
+ const u8 nonce[at_least XCHACHA20POLY1305_NONCE_SIZE],
+ const u8 key[at_least CHACHA20POLY1305_KEY_SIZE]);
bool __must_check xchacha20poly1305_decrypt(
- u8 *dst, const u8 *src, const size_t src_len, const u8 *ad,
- const size_t ad_len, const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE],
- const u8 key[CHACHA20POLY1305_KEY_SIZE]);
+ u8 *dst, const u8 *src, const size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u8 nonce[at_least XCHACHA20POLY1305_NONCE_SIZE],
+ const u8 key[at_least CHACHA20POLY1305_KEY_SIZE]);
bool chacha20poly1305_encrypt_sg_inplace(struct scatterlist *src, size_t src_len,
const u8 *ad, const size_t ad_len,
const u64 nonce,
- const u8 key[CHACHA20POLY1305_KEY_SIZE]);
+ const u8 key[at_least CHACHA20POLY1305_KEY_SIZE]);
bool chacha20poly1305_decrypt_sg_inplace(struct scatterlist *src, size_t src_len,
const u8 *ad, const size_t ad_len,
const u64 nonce,
- const u8 key[CHACHA20POLY1305_KEY_SIZE]);
+ const u8 key[at_least CHACHA20POLY1305_KEY_SIZE]);
bool chacha20poly1305_selftest(void);
diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h
index 23169f4d87e6..796d986e58e1 100644
--- a/include/crypto/cryptd.h
+++ b/include/crypto/cryptd.h
@@ -13,7 +13,8 @@
#ifndef _CRYPTO_CRYPT_H
#define _CRYPTO_CRYPT_H
-#include <linux/kernel.h>
+#include <linux/types.h>
+
#include <crypto/aead.h>
#include <crypto/hash.h>
#include <crypto/skcipher.h>
diff --git a/include/crypto/ctr.h b/include/crypto/ctr.h
index a1c66d1001af..06984a26c8cf 100644
--- a/include/crypto/ctr.h
+++ b/include/crypto/ctr.h
@@ -8,58 +8,8 @@
#ifndef _CRYPTO_CTR_H
#define _CRYPTO_CTR_H
-#include <crypto/algapi.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
#define CTR_RFC3686_NONCE_SIZE 4
#define CTR_RFC3686_IV_SIZE 8
#define CTR_RFC3686_BLOCK_SIZE 16
-static inline int crypto_ctr_encrypt_walk(struct skcipher_request *req,
- void (*fn)(struct crypto_skcipher *,
- const u8 *, u8 *))
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- int blocksize = crypto_skcipher_chunksize(tfm);
- u8 buf[MAX_CIPHER_BLOCKSIZE];
- struct skcipher_walk walk;
- int err;
-
- /* avoid integer division due to variable blocksize parameter */
- if (WARN_ON_ONCE(!is_power_of_2(blocksize)))
- return -EINVAL;
-
- err = skcipher_walk_virt(&walk, req, false);
-
- while (walk.nbytes > 0) {
- u8 *dst = walk.dst.virt.addr;
- u8 *src = walk.src.virt.addr;
- int nbytes = walk.nbytes;
- int tail = 0;
-
- if (nbytes < walk.total) {
- tail = walk.nbytes & (blocksize - 1);
- nbytes -= tail;
- }
-
- do {
- int bsize = min(nbytes, blocksize);
-
- fn(tfm, walk.iv, buf);
-
- crypto_xor_cpy(dst, src, buf, bsize);
- crypto_inc(walk.iv, blocksize);
-
- dst += bsize;
- src += bsize;
- nbytes -= bsize;
- } while (nbytes > 0);
-
- err = skcipher_walk_done(&walk, tail);
- }
- return err;
-}
-
#endif /* _CRYPTO_CTR_H */
diff --git a/include/crypto/curve25519.h b/include/crypto/curve25519.h
index ece6a9b5fafc..2362b48f8741 100644
--- a/include/crypto/curve25519.h
+++ b/include/crypto/curve25519.h
@@ -6,7 +6,6 @@
#ifndef CURVE25519_H
#define CURVE25519_H
-#include <crypto/algapi.h> // For crypto_memneq.
#include <linux/types.h>
#include <linux/random.h>
@@ -14,57 +13,28 @@ enum curve25519_lengths {
CURVE25519_KEY_SIZE = 32
};
-extern const u8 curve25519_null_point[];
-extern const u8 curve25519_base_point[];
+void curve25519_generic(u8 out[at_least CURVE25519_KEY_SIZE],
+ const u8 scalar[at_least CURVE25519_KEY_SIZE],
+ const u8 point[at_least CURVE25519_KEY_SIZE]);
-void curve25519_generic(u8 out[CURVE25519_KEY_SIZE],
- const u8 scalar[CURVE25519_KEY_SIZE],
- const u8 point[CURVE25519_KEY_SIZE]);
+bool __must_check
+curve25519(u8 mypublic[at_least CURVE25519_KEY_SIZE],
+ const u8 secret[at_least CURVE25519_KEY_SIZE],
+ const u8 basepoint[at_least CURVE25519_KEY_SIZE]);
-void curve25519_arch(u8 out[CURVE25519_KEY_SIZE],
- const u8 scalar[CURVE25519_KEY_SIZE],
- const u8 point[CURVE25519_KEY_SIZE]);
+bool __must_check
+curve25519_generate_public(u8 pub[at_least CURVE25519_KEY_SIZE],
+ const u8 secret[at_least CURVE25519_KEY_SIZE]);
-void curve25519_base_arch(u8 pub[CURVE25519_KEY_SIZE],
- const u8 secret[CURVE25519_KEY_SIZE]);
-
-bool curve25519_selftest(void);
-
-static inline
-bool __must_check curve25519(u8 mypublic[CURVE25519_KEY_SIZE],
- const u8 secret[CURVE25519_KEY_SIZE],
- const u8 basepoint[CURVE25519_KEY_SIZE])
-{
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519))
- curve25519_arch(mypublic, secret, basepoint);
- else
- curve25519_generic(mypublic, secret, basepoint);
- return crypto_memneq(mypublic, curve25519_null_point,
- CURVE25519_KEY_SIZE);
-}
-
-static inline bool
-__must_check curve25519_generate_public(u8 pub[CURVE25519_KEY_SIZE],
- const u8 secret[CURVE25519_KEY_SIZE])
-{
- if (unlikely(!crypto_memneq(secret, curve25519_null_point,
- CURVE25519_KEY_SIZE)))
- return false;
-
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519))
- curve25519_base_arch(pub, secret);
- else
- curve25519_generic(pub, secret, curve25519_base_point);
- return crypto_memneq(pub, curve25519_null_point, CURVE25519_KEY_SIZE);
-}
-
-static inline void curve25519_clamp_secret(u8 secret[CURVE25519_KEY_SIZE])
+static inline void
+curve25519_clamp_secret(u8 secret[at_least CURVE25519_KEY_SIZE])
{
secret[0] &= 248;
secret[31] = (secret[31] & 127) | 64;
}
-static inline void curve25519_generate_secret(u8 secret[CURVE25519_KEY_SIZE])
+static inline void
+curve25519_generate_secret(u8 secret[at_least CURVE25519_KEY_SIZE])
{
get_random_bytes_wait(secret, CURVE25519_KEY_SIZE);
curve25519_clamp_secret(secret);
diff --git a/include/crypto/df_sp80090a.h b/include/crypto/df_sp80090a.h
new file mode 100644
index 000000000000..6b25305fe611
--- /dev/null
+++ b/include/crypto/df_sp80090a.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright Stephan Mueller <smueller@chronox.de>, 2014
+ */
+
+#ifndef _CRYPTO_DF80090A_H
+#define _CRYPTO_DF80090A_H
+
+#include <crypto/internal/cipher.h>
+#include <crypto/aes.h>
+
+static inline int crypto_drbg_ctr_df_datalen(u8 statelen, u8 blocklen)
+{
+ return statelen + /* df_data */
+ blocklen + /* pad */
+ blocklen + /* iv */
+ statelen + blocklen; /* temp */
+}
+
+int crypto_drbg_ctr_df(struct crypto_aes_ctx *aes,
+ unsigned char *df_data,
+ size_t bytes_to_return,
+ struct list_head *seedlist,
+ u8 blocklen_bytes,
+ u8 statelen);
+
+#endif /* _CRYPTO_DF80090A_H */
diff --git a/include/crypto/dh.h b/include/crypto/dh.h
index d71e9858ab86..7b863e911cb4 100644
--- a/include/crypto/dh.h
+++ b/include/crypto/dh.h
@@ -24,21 +24,17 @@
*
* @key: Private DH key
* @p: Diffie-Hellman parameter P
- * @q: Diffie-Hellman parameter Q
* @g: Diffie-Hellman generator G
* @key_size: Size of the private DH key
* @p_size: Size of DH parameter P
- * @q_size: Size of DH parameter Q
* @g_size: Size of DH generator G
*/
struct dh {
- void *key;
- void *p;
- void *q;
- void *g;
+ const void *key;
+ const void *p;
+ const void *g;
unsigned int key_size;
unsigned int p_size;
- unsigned int q_size;
unsigned int g_size;
};
@@ -83,4 +79,20 @@ int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params);
*/
int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params);
+/**
+ * __crypto_dh_decode_key() - decode a private key without parameter checks
+ * @buf: Buffer holding a packet key that should be decoded
+ * @len: Length of the packet private key buffer
+ * @params: Buffer allocated by the caller that is filled with the
+ * unpacked DH private key.
+ *
+ * Internal function providing the same services as the exported
+ * crypto_dh_decode_key(), but without any of those basic parameter
+ * checks conducted by the latter.
+ *
+ * Return: -EINVAL if buffer has insufficient size, 0 on success
+ */
+int __crypto_dh_decode_key(const char *buf, unsigned int len,
+ struct dh *params);
+
#endif
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
index c4165126937e..2d42518cbdce 100644
--- a/include/crypto/drbg.h
+++ b/include/crypto/drbg.h
@@ -47,6 +47,7 @@
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/slab.h>
+#include <crypto/internal/drbg.h>
#include <crypto/internal/rng.h>
#include <crypto/rng.h>
#include <linux/fips.h>
@@ -54,30 +55,6 @@
#include <linux/list.h>
#include <linux/workqueue.h>
-/*
- * Concatenation Helper and string operation helper
- *
- * SP800-90A requires the concatenation of different data. To avoid copying
- * buffers around or allocate additional memory, the following data structure
- * is used to point to the original memory with its size. In addition, it
- * is used to build a linked list. The linked list defines the concatenation
- * of individual buffers. The order of memory block referenced in that
- * linked list determines the order of concatenation.
- */
-struct drbg_string {
- const unsigned char *buf;
- size_t len;
- struct list_head list;
-};
-
-static inline void drbg_string_fill(struct drbg_string *string,
- const unsigned char *buf, size_t len)
-{
- string->buf = buf;
- string->len = len;
- INIT_LIST_HEAD(&string->list);
-}
-
struct drbg_state;
typedef uint32_t drbg_flag_t;
@@ -105,6 +82,12 @@ struct drbg_test_data {
struct drbg_string *testentropy; /* TEST PARAMETER: test entropy */
};
+enum drbg_seed_state {
+ DRBG_SEED_STATE_UNSEEDED,
+ DRBG_SEED_STATE_PARTIAL, /* Seeded with !rng_is_initialized() */
+ DRBG_SEED_STATE_FULL,
+};
+
struct drbg_state {
struct mutex drbg_mutex; /* lock around DRBG */
unsigned char *V; /* internal state 10.1.1.1 1a) */
@@ -127,16 +110,15 @@ struct drbg_state {
struct crypto_wait ctr_wait; /* CTR mode async wait obj */
struct scatterlist sg_in, sg_out; /* CTR mode SGLs */
- bool seeded; /* DRBG fully seeded? */
+ enum drbg_seed_state seeded; /* DRBG fully seeded? */
+ unsigned long last_seed_time;
bool pr; /* Prediction resistance enabled? */
bool fips_primed; /* Continuous test primed? */
unsigned char *prev; /* FIPS 140-2 continuous test value */
- struct work_struct seed_work; /* asynchronous seeding support */
struct crypto_rng *jent;
const struct drbg_state_ops *d_ops;
const struct drbg_core *core;
struct drbg_string test_data;
- struct random_ready_callback random_ready;
};
static inline __u8 drbg_statelen(struct drbg_state *drbg)
diff --git a/include/crypto/ecc_curve.h b/include/crypto/ecc_curve.h
index 70964781eb68..7d90c5e82266 100644
--- a/include/crypto/ecc_curve.h
+++ b/include/crypto/ecc_curve.h
@@ -23,6 +23,7 @@ struct ecc_point {
* struct ecc_curve - definition of elliptic curve
*
* @name: Short name of the curve.
+ * @nbits: The number of bits of a curve.
* @g: Generator point of the curve.
* @p: Prime number, if Barrett's reduction is used for this curve
* pre-calculated value 'mu' is appended to the @p after ndigits.
@@ -34,6 +35,7 @@ struct ecc_point {
*/
struct ecc_curve {
char *name;
+ u32 nbits;
struct ecc_point g;
u64 *p;
u64 *n;
diff --git a/include/crypto/ecdh.h b/include/crypto/ecdh.h
index a9f98078d29c..9784ecdd2fb4 100644
--- a/include/crypto/ecdh.h
+++ b/include/crypto/ecdh.h
@@ -26,6 +26,7 @@
#define ECC_CURVE_NIST_P192 0x0001
#define ECC_CURVE_NIST_P256 0x0002
#define ECC_CURVE_NIST_P384 0x0003
+#define ECC_CURVE_NIST_P521 0x0004
/**
* struct ecdh - define an ECDH private key
diff --git a/include/crypto/engine.h b/include/crypto/engine.h
index 3f06e40d063a..2e60344437da 100644
--- a/include/crypto/engine.h
+++ b/include/crypto/engine.h
@@ -7,86 +7,47 @@
#ifndef _CRYPTO_ENGINE_H
#define _CRYPTO_ENGINE_H
-#include <linux/crypto.h>
-#include <linux/list.h>
-#include <linux/kernel.h>
-#include <linux/kthread.h>
-#include <crypto/algapi.h>
#include <crypto/aead.h>
#include <crypto/akcipher.h>
#include <crypto/hash.h>
+#include <crypto/kpp.h>
#include <crypto/skcipher.h>
+#include <linux/types.h>
-#define ENGINE_NAME_LEN 30
-/*
- * struct crypto_engine - crypto hardware engine
- * @name: the engine name
- * @idling: the engine is entering idle state
- * @busy: request pump is busy
- * @running: the engine is on working
- * @retry_support: indication that the hardware allows re-execution
- * of a failed backlog request
- * crypto-engine, in head position to keep order
- * @list: link with the global crypto engine list
- * @queue_lock: spinlock to syncronise access to request queue
- * @queue: the crypto queue of the engine
- * @rt: whether this queue is set to run as a realtime task
- * @prepare_crypt_hardware: a request will soon arrive from the queue
- * so the subsystem requests the driver to prepare the hardware
- * by issuing this call
- * @unprepare_crypt_hardware: there are currently no more requests on the
- * queue so the subsystem notifies the driver that it may relax the
- * hardware by issuing this call
- * @do_batch_requests: execute a batch of requests. Depends on multiple
- * requests support.
- * @kworker: kthread worker struct for request pump
- * @pump_requests: work struct for scheduling work to the request pump
- * @priv_data: the engine private data
- * @cur_req: the current request which is on processing
- */
-struct crypto_engine {
- char name[ENGINE_NAME_LEN];
- bool idling;
- bool busy;
- bool running;
-
- bool retry_support;
-
- struct list_head list;
- spinlock_t queue_lock;
- struct crypto_queue queue;
- struct device *dev;
-
- bool rt;
-
- int (*prepare_crypt_hardware)(struct crypto_engine *engine);
- int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
- int (*do_batch_requests)(struct crypto_engine *engine);
-
-
- struct kthread_worker *kworker;
- struct kthread_work pump_requests;
-
- void *priv_data;
- struct crypto_async_request *cur_req;
-};
+struct crypto_engine;
+struct device;
/*
* struct crypto_engine_op - crypto hardware engine operations
- * @prepare__request: do some prepare if need before handle the current request
- * @unprepare_request: undo any work done by prepare_request()
* @do_one_request: do encryption for current request
*/
struct crypto_engine_op {
- int (*prepare_request)(struct crypto_engine *engine,
- void *areq);
- int (*unprepare_request)(struct crypto_engine *engine,
- void *areq);
int (*do_one_request)(struct crypto_engine *engine,
void *areq);
};
-struct crypto_engine_ctx {
+struct aead_engine_alg {
+ struct aead_alg base;
+ struct crypto_engine_op op;
+};
+
+struct ahash_engine_alg {
+ struct ahash_alg base;
+ struct crypto_engine_op op;
+};
+
+struct akcipher_engine_alg {
+ struct akcipher_alg base;
+ struct crypto_engine_op op;
+};
+
+struct kpp_engine_alg {
+ struct kpp_alg base;
+ struct crypto_engine_op op;
+};
+
+struct skcipher_engine_alg {
+ struct skcipher_alg base;
struct crypto_engine_op op;
};
@@ -96,6 +57,8 @@ int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
struct akcipher_request *req);
int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
struct ahash_request *req);
+int crypto_transfer_kpp_request_to_engine(struct crypto_engine *engine,
+ struct kpp_request *req);
int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
struct skcipher_request *req);
void crypto_finalize_aead_request(struct crypto_engine *engine,
@@ -104,6 +67,8 @@ void crypto_finalize_akcipher_request(struct crypto_engine *engine,
struct akcipher_request *req, int err);
void crypto_finalize_hash_request(struct crypto_engine *engine,
struct ahash_request *req, int err);
+void crypto_finalize_kpp_request(struct crypto_engine *engine,
+ struct kpp_request *req, int err);
void crypto_finalize_skcipher_request(struct crypto_engine *engine,
struct skcipher_request *req, int err);
int crypto_engine_start(struct crypto_engine *engine);
@@ -111,8 +76,31 @@ int crypto_engine_stop(struct crypto_engine *engine);
struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt);
struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
bool retry_support,
- int (*cbk_do_batch)(struct crypto_engine *engine),
bool rt, int qlen);
-int crypto_engine_exit(struct crypto_engine *engine);
+void crypto_engine_exit(struct crypto_engine *engine);
+
+int crypto_engine_register_aead(struct aead_engine_alg *alg);
+void crypto_engine_unregister_aead(struct aead_engine_alg *alg);
+int crypto_engine_register_aeads(struct aead_engine_alg *algs, int count);
+void crypto_engine_unregister_aeads(struct aead_engine_alg *algs, int count);
+
+int crypto_engine_register_ahash(struct ahash_engine_alg *alg);
+void crypto_engine_unregister_ahash(struct ahash_engine_alg *alg);
+int crypto_engine_register_ahashes(struct ahash_engine_alg *algs, int count);
+void crypto_engine_unregister_ahashes(struct ahash_engine_alg *algs,
+ int count);
+
+int crypto_engine_register_akcipher(struct akcipher_engine_alg *alg);
+void crypto_engine_unregister_akcipher(struct akcipher_engine_alg *alg);
+
+int crypto_engine_register_kpp(struct kpp_engine_alg *alg);
+void crypto_engine_unregister_kpp(struct kpp_engine_alg *alg);
+
+int crypto_engine_register_skcipher(struct skcipher_engine_alg *alg);
+void crypto_engine_unregister_skcipher(struct skcipher_engine_alg *alg);
+int crypto_engine_register_skciphers(struct skcipher_engine_alg *algs,
+ int count);
+void crypto_engine_unregister_skciphers(struct skcipher_engine_alg *algs,
+ int count);
#endif /* _CRYPTO_ENGINE_H */
diff --git a/include/crypto/gcm.h b/include/crypto/gcm.h
index 9d7eff04f224..fd9df607a836 100644
--- a/include/crypto/gcm.h
+++ b/include/crypto/gcm.h
@@ -3,6 +3,9 @@
#include <linux/errno.h>
+#include <crypto/aes.h>
+#include <crypto/gf128mul.h>
+
#define GCM_AES_IV_SIZE 12
#define GCM_RFC4106_IV_SIZE 8
#define GCM_RFC4543_IV_SIZE 8
@@ -60,4 +63,23 @@ static inline int crypto_ipsec_check_assoclen(unsigned int assoclen)
return 0;
}
+
+struct aesgcm_ctx {
+ be128 ghash_key;
+ struct crypto_aes_ctx aes_ctx;
+ unsigned int authsize;
+};
+
+int aesgcm_expandkey(struct aesgcm_ctx *ctx, const u8 *key,
+ unsigned int keysize, unsigned int authsize);
+
+void aesgcm_encrypt(const struct aesgcm_ctx *ctx, u8 *dst, const u8 *src,
+ int crypt_len, const u8 *assoc, int assoc_len,
+ const u8 iv[GCM_AES_IV_SIZE], u8 *authtag);
+
+bool __must_check aesgcm_decrypt(const struct aesgcm_ctx *ctx, u8 *dst,
+ const u8 *src, int crypt_len, const u8 *assoc,
+ int assoc_len, const u8 iv[GCM_AES_IV_SIZE],
+ const u8 *authtag);
+
#endif
diff --git a/include/crypto/gf128mul.h b/include/crypto/gf128mul.h
index 81330c6446f6..b0853f7cada0 100644
--- a/include/crypto/gf128mul.h
+++ b/include/crypto/gf128mul.h
@@ -158,12 +158,10 @@
64...71 72...79 80...87 88...95 96..103 104.111 112.119 120.127
*/
-/* A slow generic version of gf_mul, implemented for lle and bbe
+/* A slow generic version of gf_mul, implemented for lle
* It multiplies a and b and puts the result in a */
void gf128mul_lle(be128 *a, const be128 *b);
-void gf128mul_bbe(be128 *a, const be128 *b);
-
/*
* The following functions multiply a field element by x in
* the polynomial field representation. They use 64-bit word operations
@@ -224,9 +222,7 @@ struct gf128mul_4k {
};
struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g);
-struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g);
void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t);
-void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t);
void gf128mul_x8_ble(le128 *r, const le128 *x);
static inline void gf128mul_free_4k(struct gf128mul_4k *t)
{
diff --git a/include/crypto/ghash.h b/include/crypto/ghash.h
index f832c9f2aca3..043d938e9a2c 100644
--- a/include/crypto/ghash.h
+++ b/include/crypto/ghash.h
@@ -7,18 +7,18 @@
#define __CRYPTO_GHASH_H__
#include <linux/types.h>
-#include <crypto/gf128mul.h>
#define GHASH_BLOCK_SIZE 16
#define GHASH_DIGEST_SIZE 16
+struct gf128mul_4k;
+
struct ghash_ctx {
struct gf128mul_4k *gf128;
};
struct ghash_desc_ctx {
u8 buffer[GHASH_BLOCK_SIZE];
- u32 bytes;
};
#endif
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index b2bc1e46e86a..586700332c73 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -9,8 +9,16 @@
#define _CRYPTO_HASH_H
#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
#include <linux/string.h>
+/* Set this bit for virtual address instead of SG list. */
+#define CRYPTO_AHASH_REQ_VIRT 0x00000001
+
+#define CRYPTO_AHASH_REQ_PRIVATE \
+ CRYPTO_AHASH_REQ_VIRT
+
struct crypto_ahash;
/**
@@ -22,7 +30,7 @@ struct crypto_ahash;
* crypto_unregister_shash().
*/
-/**
+/*
* struct hash_alg_common - define properties of message digest
* @digestsize: Size of the result of the transformation. A buffer of this size
* must be available to the @final and @finup calls, so they can
@@ -39,22 +47,27 @@ struct crypto_ahash;
* The hash_alg_common data structure now adds the hash-specific
* information.
*/
-struct hash_alg_common {
- unsigned int digestsize;
- unsigned int statesize;
-
- struct crypto_alg base;
-};
+#define HASH_ALG_COMMON { \
+ unsigned int digestsize; \
+ unsigned int statesize; \
+ \
+ struct crypto_alg base; \
+}
+struct hash_alg_common HASH_ALG_COMMON;
struct ahash_request {
struct crypto_async_request base;
unsigned int nbytes;
- struct scatterlist *src;
+ union {
+ struct scatterlist *src;
+ const u8 *svirt;
+ };
u8 *result;
- /* This field may only be used by the ahash API code. */
- void *priv;
+ struct scatterlist sg_head[2];
+ crypto_completion_t saved_complete;
+ void *saved_data;
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
@@ -76,6 +89,8 @@ struct ahash_request {
* transformation object. Data processing can happen synchronously
* [SHASH] or asynchronously [AHASH] at this point. Driver must not use
* req->result.
+ * For block-only algorithms, @update must return the number
+ * of bytes to store in the API partial block buffer.
* @final: **[mandatory]** Retrieve result from the driver. This function finalizes the
* transformation and retrieves the resulting hash from the driver and
* pushes it back to upper layers. No data processing happens at this
@@ -118,6 +133,10 @@ struct ahash_request {
* data so the transformation can continue from this point onward. No
* data processing happens at this point. Driver must not use
* req->result.
+ * @export_core: Export partial state without partial block. Only defined
+ * for algorithms that are not block-only.
+ * @import_core: Import partial state without partial block. Only defined
+ * for algorithms that are not block-only.
* @init_tfm: Initialize the cryptographic transformation object.
* This function is called only once at the instantiation
* time, right after the transformation context was
@@ -129,6 +148,7 @@ struct ahash_request {
* @exit_tfm: Deinitialize the cryptographic transformation object.
* This is a counterpart to @init_tfm, used to remove
* various changes set in @init_tfm.
+ * @clone_tfm: Copy transform into new object, may allocate memory.
* @halg: see struct hash_alg_common
*/
struct ahash_alg {
@@ -139,10 +159,13 @@ struct ahash_alg {
int (*digest)(struct ahash_request *req);
int (*export)(struct ahash_request *req, void *out);
int (*import)(struct ahash_request *req, const void *in);
+ int (*export_core)(struct ahash_request *req, void *out);
+ int (*import_core)(struct ahash_request *req, const void *in);
int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
int (*init_tfm)(struct crypto_ahash *tfm);
void (*exit_tfm)(struct crypto_ahash *tfm);
+ int (*clone_tfm)(struct crypto_ahash *dst, struct crypto_ahash *src);
struct hash_alg_common halg;
};
@@ -155,18 +178,44 @@ struct shash_desc {
#define HASH_MAX_DIGESTSIZE 64
/*
- * Worst case is hmac(sha3-224-generic). Its context is a nested 'shash_desc'
- * containing a 'struct sha3_state'.
+ * The size of a core hash state and a partial block. The final byte
+ * is the length of the partial block.
*/
-#define HASH_MAX_DESCSIZE (sizeof(struct shash_desc) + 360)
+#define HASH_STATE_AND_BLOCK(state, block) ((state) + (block) + 1)
+
-#define HASH_MAX_STATESIZE 512
+/* Worst case is sha3-224. */
+#define HASH_MAX_STATESIZE HASH_STATE_AND_BLOCK(200, 144)
+
+/* This needs to match arch/s390/crypto/sha.h. */
+#define S390_SHA_CTX_SIZE 216
+
+/*
+ * Worst case is hmac(sha3-224-s390). Its context is a nested 'shash_desc'
+ * containing a 'struct s390_sha_ctx'.
+ */
+#define SHA3_224_S390_DESCSIZE HASH_STATE_AND_BLOCK(S390_SHA_CTX_SIZE, 144)
+#define HASH_MAX_DESCSIZE (sizeof(struct shash_desc) + \
+ SHA3_224_S390_DESCSIZE)
+#define MAX_SYNC_HASH_REQSIZE (sizeof(struct ahash_request) + \
+ HASH_MAX_DESCSIZE)
#define SHASH_DESC_ON_STACK(shash, ctx) \
char __##shash##_desc[sizeof(struct shash_desc) + HASH_MAX_DESCSIZE] \
__aligned(__alignof__(struct shash_desc)); \
struct shash_desc *shash = (struct shash_desc *)__##shash##_desc
+#define HASH_REQUEST_ON_STACK(name, _tfm) \
+ char __##name##_req[sizeof(struct ahash_request) + \
+ MAX_SYNC_HASH_REQSIZE] CRYPTO_MINALIGN_ATTR; \
+ struct ahash_request *name = \
+ ahash_request_on_stack_init(__##name##_req, (_tfm))
+
+#define HASH_REQUEST_CLONE(name, gfp) \
+ hash_request_clone(name, sizeof(__##name##_req), gfp)
+
+#define CRYPTO_HASH_STATESIZE(coresize, blocksize) (coresize + blocksize + 1)
+
/**
* struct shash_alg - synchronous message digest definition
* @init: see struct ahash_alg
@@ -176,6 +225,8 @@ struct shash_desc {
* @digest: see struct ahash_alg
* @export: see struct ahash_alg
* @import: see struct ahash_alg
+ * @export_core: see struct ahash_alg
+ * @import_core: see struct ahash_alg
* @setkey: see struct ahash_alg
* @init_tfm: Initialize the cryptographic transformation object.
* This function is called only once at the instantiation
@@ -188,12 +239,12 @@ struct shash_desc {
* @exit_tfm: Deinitialize the cryptographic transformation object.
* This is a counterpart to @init_tfm, used to remove
* various changes set in @init_tfm.
- * @digestsize: see struct ahash_alg
- * @statesize: see struct ahash_alg
+ * @clone_tfm: Copy transform into new object, may allocate memory.
* @descsize: Size of the operational state for the message digest. This state
* size is the memory size that needs to be allocated for
* shash_desc.__ctx
- * @base: internally used
+ * @halg: see struct hash_alg_common
+ * @HASH_ALG_COMMON: see struct hash_alg_common
*/
struct shash_alg {
int (*init)(struct shash_desc *desc);
@@ -206,38 +257,31 @@ struct shash_alg {
unsigned int len, u8 *out);
int (*export)(struct shash_desc *desc, void *out);
int (*import)(struct shash_desc *desc, const void *in);
+ int (*export_core)(struct shash_desc *desc, void *out);
+ int (*import_core)(struct shash_desc *desc, const void *in);
int (*setkey)(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen);
int (*init_tfm)(struct crypto_shash *tfm);
void (*exit_tfm)(struct crypto_shash *tfm);
+ int (*clone_tfm)(struct crypto_shash *dst, struct crypto_shash *src);
unsigned int descsize;
- /* These fields must match hash_alg_common. */
- unsigned int digestsize
- __attribute__ ((aligned(__alignof__(struct hash_alg_common))));
- unsigned int statesize;
-
- struct crypto_alg base;
+ union {
+ struct HASH_ALG_COMMON;
+ struct hash_alg_common halg;
+ };
};
+#undef HASH_ALG_COMMON
struct crypto_ahash {
- int (*init)(struct ahash_request *req);
- int (*update)(struct ahash_request *req);
- int (*final)(struct ahash_request *req);
- int (*finup)(struct ahash_request *req);
- int (*digest)(struct ahash_request *req);
- int (*export)(struct ahash_request *req, void *out);
- int (*import)(struct ahash_request *req, const void *in);
- int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen);
-
+ bool using_shash; /* Underlying algorithm is shash, not ahash */
+ unsigned int statesize;
unsigned int reqsize;
struct crypto_tfm base;
};
struct crypto_shash {
- unsigned int descsize;
struct crypto_tfm base;
};
@@ -251,6 +295,11 @@ struct crypto_shash {
* CRYPTO_ALG_TYPE_SKCIPHER API applies here as well.
*/
+static inline bool ahash_req_on_stack(struct ahash_request *req)
+{
+ return crypto_req_on_stack(&req->base);
+}
+
static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm)
{
return container_of(tfm, struct crypto_ahash, base);
@@ -273,6 +322,8 @@ static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm)
struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
u32 mask);
+struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *tfm);
+
static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
{
return &tfm->base;
@@ -311,12 +362,6 @@ static inline const char *crypto_ahash_driver_name(struct crypto_ahash *tfm)
return crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
}
-static inline unsigned int crypto_ahash_alignmask(
- struct crypto_ahash *tfm)
-{
- return crypto_tfm_alg_alignmask(crypto_ahash_tfm(tfm));
-}
-
/**
* crypto_ahash_blocksize() - obtain block size for cipher
* @tfm: cipher handle
@@ -370,7 +415,7 @@ static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm)
*/
static inline unsigned int crypto_ahash_statesize(struct crypto_ahash *tfm)
{
- return crypto_hash_alg_common(tfm)->statesize;
+ return tfm->statesize;
}
static inline u32 crypto_ahash_get_flags(struct crypto_ahash *tfm)
@@ -458,11 +503,15 @@ int crypto_ahash_finup(struct ahash_request *req);
*
* Return:
* 0 if the message digest was successfully calculated;
- * -EINPROGRESS if data is feeded into hardware (DMA) or queued for later;
+ * -EINPROGRESS if data is fed into hardware (DMA) or queued for later;
* -EBUSY if queue is full and request should be resubmitted later;
* other < 0 if an error occurred
*/
-int crypto_ahash_final(struct ahash_request *req);
+static inline int crypto_ahash_final(struct ahash_request *req)
+{
+ req->nbytes = 0;
+ return crypto_ahash_finup(req);
+}
/**
* crypto_ahash_digest() - calculate message digest for a buffer
@@ -488,10 +537,7 @@ int crypto_ahash_digest(struct ahash_request *req);
*
* Return: 0 if the export was successful; < 0 if an error occurred
*/
-static inline int crypto_ahash_export(struct ahash_request *req, void *out)
-{
- return crypto_ahash_reqtfm(req)->export(req, out);
-}
+int crypto_ahash_export(struct ahash_request *req, void *out);
/**
* crypto_ahash_import() - import message digest state
@@ -504,15 +550,7 @@ static inline int crypto_ahash_export(struct ahash_request *req, void *out)
*
* Return: 0 if the import was successful; < 0 if an error occurred
*/
-static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-
- if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
-
- return tfm->import(req, in);
-}
+int crypto_ahash_import(struct ahash_request *req, const void *in);
/**
* crypto_ahash_init() - (re)initialize message digest handle
@@ -525,15 +563,7 @@ static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
*
* Return: see crypto_ahash_final()
*/
-static inline int crypto_ahash_init(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-
- if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
-
- return tfm->init(req);
-}
+int crypto_ahash_init(struct ahash_request *req);
/**
* crypto_ahash_update() - add data to message digest for processing
@@ -546,18 +576,7 @@ static inline int crypto_ahash_init(struct ahash_request *req)
*
* Return: see crypto_ahash_final()
*/
-static inline int crypto_ahash_update(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct crypto_alg *alg = tfm->base.__crt_alg;
- unsigned int nbytes = req->nbytes;
- int ret;
-
- crypto_stats_get(alg);
- ret = crypto_ahash_reqtfm(req)->update(req);
- crypto_stats_ahash_update(nbytes, ret, alg);
- return ret;
-}
+int crypto_ahash_update(struct ahash_request *req);
/**
* DOC: Asynchronous Hash Request Handle
@@ -581,7 +600,7 @@ static inline int crypto_ahash_update(struct ahash_request *req)
static inline void ahash_request_set_tfm(struct ahash_request *req,
struct crypto_ahash *tfm)
{
- req->base.tfm = crypto_ahash_tfm(tfm);
+ crypto_request_set_tfm(&req->base, crypto_ahash_tfm(tfm));
}
/**
@@ -596,28 +615,26 @@ static inline void ahash_request_set_tfm(struct ahash_request *req,
*
* Return: allocated request handle in case of success, or NULL if out of memory
*/
-static inline struct ahash_request *ahash_request_alloc(
+static inline struct ahash_request *ahash_request_alloc_noprof(
struct crypto_ahash *tfm, gfp_t gfp)
{
struct ahash_request *req;
- req = kmalloc(sizeof(struct ahash_request) +
- crypto_ahash_reqsize(tfm), gfp);
+ req = kmalloc_noprof(sizeof(struct ahash_request) +
+ crypto_ahash_reqsize(tfm), gfp);
if (likely(req))
ahash_request_set_tfm(req, tfm);
return req;
}
+#define ahash_request_alloc(...) alloc_hooks(ahash_request_alloc_noprof(__VA_ARGS__))
/**
* ahash_request_free() - zeroize and free the request data structure
* @req: request data structure cipher handle to be freed
*/
-static inline void ahash_request_free(struct ahash_request *req)
-{
- kfree_sensitive(req);
-}
+void ahash_request_free(struct ahash_request *req);
static inline void ahash_request_zero(struct ahash_request *req)
{
@@ -661,9 +678,9 @@ static inline void ahash_request_set_callback(struct ahash_request *req,
crypto_completion_t compl,
void *data)
{
- req->base.complete = compl;
- req->base.data = data;
- req->base.flags = flags;
+ flags &= ~CRYPTO_AHASH_REQ_PRIVATE;
+ flags |= req->base.flags & CRYPTO_AHASH_REQ_PRIVATE;
+ crypto_request_set_callback(&req->base, flags, compl, data);
}
/**
@@ -686,6 +703,30 @@ static inline void ahash_request_set_crypt(struct ahash_request *req,
req->src = src;
req->nbytes = nbytes;
req->result = result;
+ req->base.flags &= ~CRYPTO_AHASH_REQ_VIRT;
+}
+
+/**
+ * ahash_request_set_virt() - set virtual address data buffers
+ * @req: ahash_request handle to be updated
+ * @src: source virtual address
+ * @result: buffer that is filled with the message digest -- the caller must
+ * ensure that the buffer has sufficient space by, for example, calling
+ * crypto_ahash_digestsize()
+ * @nbytes: number of bytes to process from the source virtual address
+ *
+ * By using this call, the caller references the source virtual address.
+ * The source virtual address points to the data the message digest is to
+ * be calculated for.
+ */
+static inline void ahash_request_set_virt(struct ahash_request *req,
+ const u8 *src, u8 *result,
+ unsigned int nbytes)
+{
+ req->svirt = src;
+ req->nbytes = nbytes;
+ req->result = result;
+ req->base.flags |= CRYPTO_AHASH_REQ_VIRT;
}
/**
@@ -718,6 +759,10 @@ static inline void ahash_request_set_crypt(struct ahash_request *req,
struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
u32 mask);
+struct crypto_shash *crypto_clone_shash(struct crypto_shash *tfm);
+
+int crypto_has_shash(const char *alg_name, u32 type, u32 mask);
+
static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
{
return &tfm->base;
@@ -744,12 +789,6 @@ static inline const char *crypto_shash_driver_name(struct crypto_shash *tfm)
return crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm));
}
-static inline unsigned int crypto_shash_alignmask(
- struct crypto_shash *tfm)
-{
- return crypto_tfm_alg_alignmask(crypto_shash_tfm(tfm));
-}
-
/**
* crypto_shash_blocksize() - obtain block size for cipher
* @tfm: cipher handle
@@ -825,7 +864,7 @@ static inline void crypto_shash_clear_flags(struct crypto_shash *tfm, u32 flags)
*/
static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm)
{
- return tfm->descsize;
+ return crypto_shash_alg(tfm)->descsize;
}
static inline void *shash_desc_ctx(struct shash_desc *desc)
@@ -843,7 +882,7 @@ static inline void *shash_desc_ctx(struct shash_desc *desc)
* cipher handle must point to a keyed message digest cipher in order for this
* function to succeed.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the setting of the key was successful; < 0 if an error occurred
*/
int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
@@ -860,7 +899,7 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
* crypto_shash_update and crypto_shash_final. The parameters have the same
* meaning as discussed for those separate three functions.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the message digest creation was successful; < 0 if an error
* occurred
*/
@@ -880,12 +919,15 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
* directly, and it allocates a hash descriptor on the stack internally.
* Note that this stack allocation may be fairly large.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 on success; < 0 if an error occurred.
*/
int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data,
unsigned int len, u8 *out);
+int crypto_hash_digest(struct crypto_ahash *tfm, const u8 *data,
+ unsigned int len, u8 *out);
+
/**
* crypto_shash_export() - extract operational state for message digest
* @desc: reference to the operational state handle whose state is exported
@@ -895,13 +937,10 @@ int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data,
* caller-allocated output buffer out which must have sufficient size (e.g. by
* calling crypto_shash_descsize).
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the export creation was successful; < 0 if an error occurred
*/
-static inline int crypto_shash_export(struct shash_desc *desc, void *out)
-{
- return crypto_shash_alg(desc->tfm)->export(desc, out);
-}
+int crypto_shash_export(struct shash_desc *desc, void *out);
/**
* crypto_shash_import() - import operational state
@@ -912,18 +951,10 @@ static inline int crypto_shash_export(struct shash_desc *desc, void *out)
* the input buffer. That buffer should have been generated with the
* crypto_ahash_export function.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the import was successful; < 0 if an error occurred
*/
-static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
-{
- struct crypto_shash *tfm = desc->tfm;
-
- if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
-
- return crypto_shash_alg(tfm)->import(desc, in);
-}
+int crypto_shash_import(struct shash_desc *desc, const void *in);
/**
* crypto_shash_init() - (re)initialize message digest
@@ -933,19 +964,29 @@ static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
* operational state handle. Any potentially existing state created by
* previous operations is discarded.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the message digest initialization was successful; < 0 if an
* error occurred
*/
-static inline int crypto_shash_init(struct shash_desc *desc)
-{
- struct crypto_shash *tfm = desc->tfm;
-
- if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
+int crypto_shash_init(struct shash_desc *desc);
- return crypto_shash_alg(tfm)->init(desc);
-}
+/**
+ * crypto_shash_finup() - calculate message digest of buffer
+ * @desc: see crypto_shash_final()
+ * @data: see crypto_shash_update()
+ * @len: see crypto_shash_update()
+ * @out: see crypto_shash_final()
+ *
+ * This function is a "short-hand" for the function calls of
+ * crypto_shash_update and crypto_shash_final. The parameters have the same
+ * meaning as discussed for those separate functions.
+ *
+ * Context: Softirq or process context.
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ * occurred
+ */
+int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out);
/**
* crypto_shash_update() - add data to message digest for processing
@@ -955,12 +996,15 @@ static inline int crypto_shash_init(struct shash_desc *desc)
*
* Updates the message digest state of the operational state handle.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the message digest update was successful; < 0 if an error
* occurred
*/
-int crypto_shash_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
+static inline int crypto_shash_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ return crypto_shash_finup(desc, data, len, NULL);
+}
/**
* crypto_shash_final() - calculate message digest
@@ -972,29 +1016,14 @@ int crypto_shash_update(struct shash_desc *desc, const u8 *data,
* into the output buffer. The caller must ensure that the output buffer is
* large enough by using crypto_shash_digestsize.
*
- * Context: Any context.
+ * Context: Softirq or process context.
* Return: 0 if the message digest creation was successful; < 0 if an error
* occurred
*/
-int crypto_shash_final(struct shash_desc *desc, u8 *out);
-
-/**
- * crypto_shash_finup() - calculate message digest of buffer
- * @desc: see crypto_shash_final()
- * @data: see crypto_shash_update()
- * @len: see crypto_shash_update()
- * @out: see crypto_shash_final()
- *
- * This function is a "short-hand" for the function calls of
- * crypto_shash_update and crypto_shash_final. The parameters have the same
- * meaning as discussed for those separate functions.
- *
- * Context: Any context.
- * Return: 0 if the message digest creation was successful; < 0 if an error
- * occurred
- */
-int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out);
+static inline int crypto_shash_final(struct shash_desc *desc, u8 *out)
+{
+ return crypto_shash_finup(desc, NULL, 0, out);
+}
static inline void shash_desc_zero(struct shash_desc *desc)
{
@@ -1002,4 +1031,25 @@ static inline void shash_desc_zero(struct shash_desc *desc)
sizeof(*desc) + crypto_shash_descsize(desc->tfm));
}
+static inline bool ahash_is_async(struct crypto_ahash *tfm)
+{
+ return crypto_tfm_is_async(&tfm->base);
+}
+
+static inline struct ahash_request *ahash_request_on_stack_init(
+ char *buf, struct crypto_ahash *tfm)
+{
+ struct ahash_request *req = (void *)buf;
+
+ crypto_stack_request_init(&req->base, crypto_ahash_tfm(tfm));
+ return req;
+}
+
+static inline struct ahash_request *ahash_request_clone(
+ struct ahash_request *req, size_t total, gfp_t gfp)
+{
+ return container_of(crypto_request_clone(&req->base, total, gfp),
+ struct ahash_request, base);
+}
+
#endif /* _CRYPTO_HASH_H */
diff --git a/include/crypto/hash_info.h b/include/crypto/hash_info.h
index dd4f06785049..d6927739f8b2 100644
--- a/include/crypto/hash_info.h
+++ b/include/crypto/hash_info.h
@@ -10,6 +10,7 @@
#include <crypto/sha1.h>
#include <crypto/sha2.h>
+#include <crypto/sha3.h>
#include <crypto/md5.h>
#include <crypto/streebog.h>
diff --git a/include/crypto/hkdf.h b/include/crypto/hkdf.h
new file mode 100644
index 000000000000..6a9678f508f5
--- /dev/null
+++ b/include/crypto/hkdf.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * HKDF: HMAC-based Key Derivation Function (HKDF), RFC 5869
+ *
+ * Extracted from fs/crypto/hkdf.c, which has
+ * Copyright 2019 Google LLC
+ */
+
+#ifndef _CRYPTO_HKDF_H
+#define _CRYPTO_HKDF_H
+
+#include <crypto/hash.h>
+
+int hkdf_extract(struct crypto_shash *hmac_tfm, const u8 *ikm,
+ unsigned int ikmlen, const u8 *salt, unsigned int saltlen,
+ u8 *prk);
+int hkdf_expand(struct crypto_shash *hmac_tfm,
+ const u8 *info, unsigned int infolen,
+ u8 *okm, unsigned int okmlen);
+#endif
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index a5db86670bdf..107b797c33ec 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -21,8 +21,6 @@
#define ALG_MAX_PAGES 16
-struct crypto_async_request;
-
struct alg_sock {
/* struct sock must be the first member of struct alg_sock */
struct sock sk;
@@ -58,9 +56,9 @@ struct af_alg_type {
};
struct af_alg_sgl {
- struct scatterlist sg[ALG_MAX_PAGES + 1];
- struct page *pages[ALG_MAX_PAGES];
- unsigned int npages;
+ struct sg_table sgt;
+ struct scatterlist sgl[ALG_MAX_PAGES + 1];
+ bool need_unpin;
};
/* TX SGL entry */
@@ -123,6 +121,7 @@ struct af_alg_async_req {
*
* @tsgl_list: Link to TX SGL
* @iv: IV for cipher operation
+ * @state: Existing state for continuing operation
* @aead_assoclen: Length of AAD for AEAD cipher operations
* @completion: Work queue for synchronous operation
* @used: TX bytes sent to kernel. This variable is used to
@@ -136,13 +135,16 @@ struct af_alg_async_req {
* SG?
* @enc: Cryptographic operation to be performed when
* recvmsg is invoked.
+ * @write: True if we are in the middle of a write.
* @init: True if metadata has been sent.
* @len: Length of memory allocated for this data structure.
+ * @inflight: Non-zero when AIO requests are in flight.
*/
struct af_alg_ctx {
struct list_head tsgl_list;
void *iv;
+ void *state;
size_t aead_assoclen;
struct crypto_wait wait;
@@ -150,12 +152,15 @@ struct af_alg_ctx {
size_t used;
atomic_t rcvused;
- bool more;
- bool merge;
- bool enc;
- bool init;
+ bool more:1,
+ merge:1,
+ enc:1,
+ write:1,
+ init:1;
unsigned int len;
+
+ unsigned int inflight;
};
int af_alg_register_type(const struct af_alg_type *type);
@@ -163,9 +168,9 @@ int af_alg_unregister_type(const struct af_alg_type *type);
int af_alg_release(struct socket *sock);
void af_alg_release_parent(struct sock *sk);
-int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern);
+int af_alg_accept(struct sock *sk, struct socket *newsock,
+ struct proto_accept_arg *arg);
-int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len);
void af_alg_free_sg(struct af_alg_sgl *sgl);
static inline struct alg_sock *alg_sk(struct sock *sk)
@@ -232,10 +237,8 @@ void af_alg_wmem_wakeup(struct sock *sk);
int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min);
int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
unsigned int ivsize);
-ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
- int offset, size_t size, int flags);
void af_alg_free_resources(struct af_alg_async_req *areq);
-void af_alg_async_cb(struct crypto_async_request *_req, int err);
+void af_alg_async_cb(void *data, int err);
__poll_t af_alg_poll(struct file *file, struct socket *sock,
poll_table *wait);
struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
index cfc47e18820f..2d97440028ff 100644
--- a/include/crypto/internal/acompress.h
+++ b/include/crypto/internal/acompress.h
@@ -8,7 +8,98 @@
*/
#ifndef _CRYPTO_ACOMP_INT_H
#define _CRYPTO_ACOMP_INT_H
+
#include <crypto/acompress.h>
+#include <crypto/algapi.h>
+#include <crypto/scatterwalk.h>
+#include <linux/compiler_types.h>
+#include <linux/cpumask_types.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue_types.h>
+
+#define ACOMP_FBREQ_ON_STACK(name, req) \
+ char __##name##_req[sizeof(struct acomp_req) + \
+ MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \
+ struct acomp_req *name = acomp_fbreq_on_stack_init( \
+ __##name##_req, (req))
+
+/**
+ * struct acomp_alg - asynchronous compression algorithm
+ *
+ * @compress: Function performs a compress operation
+ * @decompress: Function performs a de-compress operation
+ * @init: Initialize the cryptographic transformation object.
+ * This function is used to initialize the cryptographic
+ * transformation object. This function is called only once at
+ * the instantiation time, right after the transformation context
+ * was allocated. In case the cryptographic hardware has some
+ * special requirements which need to be handled by software, this
+ * function shall check for the precise requirement of the
+ * transformation and put any software fallbacks in place.
+ * @exit: Deinitialize the cryptographic transformation object. This is a
+ * counterpart to @init, used to remove various changes set in
+ * @init.
+ *
+ * @base: Common crypto API algorithm data structure
+ * @calg: Cmonn algorithm data structure shared with scomp
+ */
+struct acomp_alg {
+ int (*compress)(struct acomp_req *req);
+ int (*decompress)(struct acomp_req *req);
+ int (*init)(struct crypto_acomp *tfm);
+ void (*exit)(struct crypto_acomp *tfm);
+
+ union {
+ struct COMP_ALG_COMMON;
+ struct comp_alg_common calg;
+ };
+};
+
+struct crypto_acomp_stream {
+ spinlock_t lock;
+ void *ctx;
+};
+
+struct crypto_acomp_streams {
+ /* These must come first because of struct scomp_alg. */
+ void *(*alloc_ctx)(void);
+ void (*free_ctx)(void *);
+
+ struct crypto_acomp_stream __percpu *streams;
+ struct work_struct stream_work;
+ cpumask_t stream_want;
+};
+
+struct acomp_walk {
+ union {
+ /* Virtual address of the source. */
+ struct {
+ struct {
+ const void *const addr;
+ } virt;
+ } src;
+
+ /* Private field for the API, do not use. */
+ struct scatter_walk in;
+ };
+
+ union {
+ /* Virtual address of the destination. */
+ struct {
+ struct {
+ void *const addr;
+ } virt;
+ } dst;
+
+ /* Private field for the API, do not use. */
+ struct scatter_walk out;
+ };
+
+ unsigned int slen;
+ unsigned int dlen;
+
+ int flags;
+};
/*
* Transform internal helpers.
@@ -26,27 +117,7 @@ static inline void *acomp_tfm_ctx(struct crypto_acomp *tfm)
static inline void acomp_request_complete(struct acomp_req *req,
int err)
{
- req->base.complete(&req->base, err);
-}
-
-static inline const char *acomp_alg_name(struct crypto_acomp *tfm)
-{
- return crypto_acomp_tfm(tfm)->__crt_alg->cra_name;
-}
-
-static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm)
-{
- struct acomp_req *req;
-
- req = kzalloc(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL);
- if (likely(req))
- acomp_request_set_tfm(req, tfm);
- return req;
-}
-
-static inline void __acomp_request_free(struct acomp_req *req)
-{
- kfree_sensitive(req);
+ crypto_request_complete(&req->base, err);
}
/**
@@ -74,4 +145,100 @@ void crypto_unregister_acomp(struct acomp_alg *alg);
int crypto_register_acomps(struct acomp_alg *algs, int count);
void crypto_unregister_acomps(struct acomp_alg *algs, int count);
+static inline bool acomp_request_issg(struct acomp_req *req)
+{
+ return !(req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT |
+ CRYPTO_ACOMP_REQ_DST_VIRT));
+}
+
+static inline bool acomp_request_src_isvirt(struct acomp_req *req)
+{
+ return req->base.flags & CRYPTO_ACOMP_REQ_SRC_VIRT;
+}
+
+static inline bool acomp_request_dst_isvirt(struct acomp_req *req)
+{
+ return req->base.flags & CRYPTO_ACOMP_REQ_DST_VIRT;
+}
+
+static inline bool acomp_request_isvirt(struct acomp_req *req)
+{
+ return req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT |
+ CRYPTO_ACOMP_REQ_DST_VIRT);
+}
+
+static inline bool acomp_request_src_isnondma(struct acomp_req *req)
+{
+ return req->base.flags & CRYPTO_ACOMP_REQ_SRC_NONDMA;
+}
+
+static inline bool acomp_request_dst_isnondma(struct acomp_req *req)
+{
+ return req->base.flags & CRYPTO_ACOMP_REQ_DST_NONDMA;
+}
+
+static inline bool acomp_request_isnondma(struct acomp_req *req)
+{
+ return req->base.flags & (CRYPTO_ACOMP_REQ_SRC_NONDMA |
+ CRYPTO_ACOMP_REQ_DST_NONDMA);
+}
+
+static inline bool crypto_acomp_req_virt(struct crypto_acomp *tfm)
+{
+ return crypto_tfm_req_virt(&tfm->base);
+}
+
+void crypto_acomp_free_streams(struct crypto_acomp_streams *s);
+int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s);
+
+struct crypto_acomp_stream *crypto_acomp_lock_stream_bh(
+ struct crypto_acomp_streams *s) __acquires(stream);
+
+static inline void crypto_acomp_unlock_stream_bh(
+ struct crypto_acomp_stream *stream) __releases(stream)
+{
+ spin_unlock_bh(&stream->lock);
+}
+
+void acomp_walk_done_src(struct acomp_walk *walk, int used);
+void acomp_walk_done_dst(struct acomp_walk *walk, int used);
+int acomp_walk_next_src(struct acomp_walk *walk);
+int acomp_walk_next_dst(struct acomp_walk *walk);
+int acomp_walk_virt(struct acomp_walk *__restrict walk,
+ struct acomp_req *__restrict req, bool atomic);
+
+static inline bool acomp_walk_more_src(const struct acomp_walk *walk, int cur)
+{
+ return walk->slen != cur;
+}
+
+static inline u32 acomp_request_flags(struct acomp_req *req)
+{
+ return crypto_request_flags(&req->base) & ~CRYPTO_ACOMP_REQ_PRIVATE;
+}
+
+static inline struct crypto_acomp *crypto_acomp_fb(struct crypto_acomp *tfm)
+{
+ return __crypto_acomp_tfm(crypto_acomp_tfm(tfm)->fb);
+}
+
+static inline struct acomp_req *acomp_fbreq_on_stack_init(
+ char *buf, struct acomp_req *old)
+{
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(old);
+ struct acomp_req *req = (void *)buf;
+
+ crypto_stack_request_init(&req->base,
+ crypto_acomp_tfm(crypto_acomp_fb(tfm)));
+ acomp_request_set_callback(req, acomp_request_flags(old), NULL, NULL);
+ req->base.flags &= ~CRYPTO_ACOMP_REQ_PRIVATE;
+ req->base.flags |= old->base.flags & CRYPTO_ACOMP_REQ_PRIVATE;
+ req->src = old->src;
+ req->dst = old->dst;
+ req->slen = old->slen;
+ req->dlen = old->dlen;
+
+ return req;
+}
+
#endif
diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h
index 27b7b0224ea6..28a95eb3182d 100644
--- a/include/crypto/internal/aead.h
+++ b/include/crypto/internal/aead.h
@@ -39,6 +39,11 @@ static inline void *crypto_aead_ctx(struct crypto_aead *tfm)
return crypto_tfm_ctx(&tfm->base);
}
+static inline void *crypto_aead_ctx_dma(struct crypto_aead *tfm)
+{
+ return crypto_tfm_ctx_dma(&tfm->base);
+}
+
static inline struct crypto_instance *aead_crypto_instance(
struct aead_instance *inst)
{
@@ -65,9 +70,19 @@ static inline void *aead_request_ctx(struct aead_request *req)
return req->__ctx;
}
+static inline void *aead_request_ctx_dma(struct aead_request *req)
+{
+ unsigned int align = crypto_dma_align();
+
+ if (align <= crypto_tfm_ctx_alignment())
+ align = 1;
+
+ return PTR_ALIGN(aead_request_ctx(req), align);
+}
+
static inline void aead_request_complete(struct aead_request *req, int err)
{
- req->base.complete(&req->base, err);
+ crypto_request_complete(&req->base, err);
}
static inline u32 aead_request_flags(struct aead_request *req)
@@ -108,35 +123,17 @@ static inline void crypto_aead_set_reqsize(struct crypto_aead *aead,
aead->reqsize = reqsize;
}
-static inline void aead_init_queue(struct aead_queue *queue,
- unsigned int max_qlen)
+static inline void crypto_aead_set_reqsize_dma(struct crypto_aead *aead,
+ unsigned int reqsize)
{
- crypto_init_queue(&queue->base, max_qlen);
-}
-
-static inline int aead_enqueue_request(struct aead_queue *queue,
- struct aead_request *request)
-{
- return crypto_enqueue_request(&queue->base, &request->base);
-}
-
-static inline struct aead_request *aead_dequeue_request(
- struct aead_queue *queue)
-{
- struct crypto_async_request *req;
-
- req = crypto_dequeue_request(&queue->base);
-
- return req ? container_of(req, struct aead_request, base) : NULL;
+ reqsize += crypto_dma_align() & ~(crypto_tfm_ctx_alignment() - 1);
+ aead->reqsize = reqsize;
}
-static inline struct aead_request *aead_get_backlog(struct aead_queue *queue)
+static inline void aead_init_queue(struct aead_queue *queue,
+ unsigned int max_qlen)
{
- struct crypto_async_request *req;
-
- req = crypto_get_backlog(&queue->base);
-
- return req ? container_of(req, struct aead_request, base) : NULL;
+ crypto_init_queue(&queue->base, max_qlen);
}
static inline unsigned int crypto_aead_alg_chunksize(struct aead_alg *alg)
diff --git a/include/crypto/internal/akcipher.h b/include/crypto/internal/akcipher.h
index 8d3220c9ab77..14ee62bc52b6 100644
--- a/include/crypto/internal/akcipher.h
+++ b/include/crypto/internal/akcipher.h
@@ -33,21 +33,43 @@ static inline void *akcipher_request_ctx(struct akcipher_request *req)
return req->__ctx;
}
+static inline void *akcipher_request_ctx_dma(struct akcipher_request *req)
+{
+ unsigned int align = crypto_dma_align();
+
+ if (align <= crypto_tfm_ctx_alignment())
+ align = 1;
+
+ return PTR_ALIGN(akcipher_request_ctx(req), align);
+}
+
static inline void akcipher_set_reqsize(struct crypto_akcipher *akcipher,
unsigned int reqsize)
{
- crypto_akcipher_alg(akcipher)->reqsize = reqsize;
+ akcipher->reqsize = reqsize;
+}
+
+static inline void akcipher_set_reqsize_dma(struct crypto_akcipher *akcipher,
+ unsigned int reqsize)
+{
+ reqsize += crypto_dma_align() & ~(crypto_tfm_ctx_alignment() - 1);
+ akcipher->reqsize = reqsize;
}
static inline void *akcipher_tfm_ctx(struct crypto_akcipher *tfm)
{
- return tfm->base.__crt_ctx;
+ return crypto_tfm_ctx(&tfm->base);
+}
+
+static inline void *akcipher_tfm_ctx_dma(struct crypto_akcipher *tfm)
+{
+ return crypto_tfm_ctx_dma(&tfm->base);
}
static inline void akcipher_request_complete(struct akcipher_request *req,
int err)
{
- req->base.complete(&req->base, err);
+ crypto_request_complete(&req->base, err);
}
static inline const char *akcipher_alg_name(struct crypto_akcipher *tfm)
@@ -102,7 +124,7 @@ static inline struct akcipher_alg *crypto_spawn_akcipher_alg(
/**
* crypto_register_akcipher() -- Register public key algorithm
*
- * Function registers an implementation of a public key verify algorithm
+ * Function registers an implementation of a public key cipher algorithm
*
* @alg: algorithm definition
*
@@ -113,7 +135,7 @@ int crypto_register_akcipher(struct akcipher_alg *alg);
/**
* crypto_unregister_akcipher() -- Unregister public key algorithm
*
- * Function unregisters an implementation of a public key verify algorithm
+ * Function unregisters an implementation of a public key cipher algorithm
*
* @alg: algorithm definition
*/
diff --git a/include/crypto/internal/blake2b.h b/include/crypto/internal/blake2b.h
deleted file mode 100644
index 982fe5e8471c..000000000000
--- a/include/crypto/internal/blake2b.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR MIT */
-/*
- * Helper functions for BLAKE2b implementations.
- * Keep this in sync with the corresponding BLAKE2s header.
- */
-
-#ifndef _CRYPTO_INTERNAL_BLAKE2B_H
-#define _CRYPTO_INTERNAL_BLAKE2B_H
-
-#include <crypto/blake2b.h>
-#include <crypto/internal/hash.h>
-#include <linux/string.h>
-
-void blake2b_compress_generic(struct blake2b_state *state,
- const u8 *block, size_t nblocks, u32 inc);
-
-static inline void blake2b_set_lastblock(struct blake2b_state *state)
-{
- state->f[0] = -1;
-}
-
-typedef void (*blake2b_compress_t)(struct blake2b_state *state,
- const u8 *block, size_t nblocks, u32 inc);
-
-static inline void __blake2b_update(struct blake2b_state *state,
- const u8 *in, size_t inlen,
- blake2b_compress_t compress)
-{
- const size_t fill = BLAKE2B_BLOCK_SIZE - state->buflen;
-
- if (unlikely(!inlen))
- return;
- if (inlen > fill) {
- memcpy(state->buf + state->buflen, in, fill);
- (*compress)(state, state->buf, 1, BLAKE2B_BLOCK_SIZE);
- state->buflen = 0;
- in += fill;
- inlen -= fill;
- }
- if (inlen > BLAKE2B_BLOCK_SIZE) {
- const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2B_BLOCK_SIZE);
- /* Hash one less (full) block than strictly possible */
- (*compress)(state, in, nblocks - 1, BLAKE2B_BLOCK_SIZE);
- in += BLAKE2B_BLOCK_SIZE * (nblocks - 1);
- inlen -= BLAKE2B_BLOCK_SIZE * (nblocks - 1);
- }
- memcpy(state->buf + state->buflen, in, inlen);
- state->buflen += inlen;
-}
-
-static inline void __blake2b_final(struct blake2b_state *state, u8 *out,
- blake2b_compress_t compress)
-{
- int i;
-
- blake2b_set_lastblock(state);
- memset(state->buf + state->buflen, 0,
- BLAKE2B_BLOCK_SIZE - state->buflen); /* Padding */
- (*compress)(state, state->buf, 1, state->buflen);
- for (i = 0; i < ARRAY_SIZE(state->h); i++)
- __cpu_to_le64s(&state->h[i]);
- memcpy(out, state->h, state->outlen);
-}
-
-/* Helper functions for shash implementations of BLAKE2b */
-
-struct blake2b_tfm_ctx {
- u8 key[BLAKE2B_KEY_SIZE];
- unsigned int keylen;
-};
-
-static inline int crypto_blake2b_setkey(struct crypto_shash *tfm,
- const u8 *key, unsigned int keylen)
-{
- struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(tfm);
-
- if (keylen == 0 || keylen > BLAKE2B_KEY_SIZE)
- return -EINVAL;
-
- memcpy(tctx->key, key, keylen);
- tctx->keylen = keylen;
-
- return 0;
-}
-
-static inline int crypto_blake2b_init(struct shash_desc *desc)
-{
- const struct blake2b_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
- struct blake2b_state *state = shash_desc_ctx(desc);
- unsigned int outlen = crypto_shash_digestsize(desc->tfm);
-
- __blake2b_init(state, outlen, tctx->key, tctx->keylen);
- return 0;
-}
-
-static inline int crypto_blake2b_update(struct shash_desc *desc,
- const u8 *in, unsigned int inlen,
- blake2b_compress_t compress)
-{
- struct blake2b_state *state = shash_desc_ctx(desc);
-
- __blake2b_update(state, in, inlen, compress);
- return 0;
-}
-
-static inline int crypto_blake2b_final(struct shash_desc *desc, u8 *out,
- blake2b_compress_t compress)
-{
- struct blake2b_state *state = shash_desc_ctx(desc);
-
- __blake2b_final(state, out, compress);
- return 0;
-}
-
-#endif /* _CRYPTO_INTERNAL_BLAKE2B_H */
diff --git a/include/crypto/internal/blake2s.h b/include/crypto/internal/blake2s.h
deleted file mode 100644
index 8e50d487500f..000000000000
--- a/include/crypto/internal/blake2s.h
+++ /dev/null
@@ -1,119 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR MIT */
-/*
- * Helper functions for BLAKE2s implementations.
- * Keep this in sync with the corresponding BLAKE2b header.
- */
-
-#ifndef _CRYPTO_INTERNAL_BLAKE2S_H
-#define _CRYPTO_INTERNAL_BLAKE2S_H
-
-#include <crypto/blake2s.h>
-#include <crypto/internal/hash.h>
-#include <linux/string.h>
-
-void blake2s_compress_generic(struct blake2s_state *state,const u8 *block,
- size_t nblocks, const u32 inc);
-
-void blake2s_compress_arch(struct blake2s_state *state,const u8 *block,
- size_t nblocks, const u32 inc);
-
-bool blake2s_selftest(void);
-
-static inline void blake2s_set_lastblock(struct blake2s_state *state)
-{
- state->f[0] = -1;
-}
-
-typedef void (*blake2s_compress_t)(struct blake2s_state *state,
- const u8 *block, size_t nblocks, u32 inc);
-
-/* Helper functions for BLAKE2s shared by the library and shash APIs */
-
-static inline void __blake2s_update(struct blake2s_state *state,
- const u8 *in, size_t inlen,
- blake2s_compress_t compress)
-{
- const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen;
-
- if (unlikely(!inlen))
- return;
- if (inlen > fill) {
- memcpy(state->buf + state->buflen, in, fill);
- (*compress)(state, state->buf, 1, BLAKE2S_BLOCK_SIZE);
- state->buflen = 0;
- in += fill;
- inlen -= fill;
- }
- if (inlen > BLAKE2S_BLOCK_SIZE) {
- const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE);
- /* Hash one less (full) block than strictly possible */
- (*compress)(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE);
- in += BLAKE2S_BLOCK_SIZE * (nblocks - 1);
- inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1);
- }
- memcpy(state->buf + state->buflen, in, inlen);
- state->buflen += inlen;
-}
-
-static inline void __blake2s_final(struct blake2s_state *state, u8 *out,
- blake2s_compress_t compress)
-{
- blake2s_set_lastblock(state);
- memset(state->buf + state->buflen, 0,
- BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */
- (*compress)(state, state->buf, 1, state->buflen);
- cpu_to_le32_array(state->h, ARRAY_SIZE(state->h));
- memcpy(out, state->h, state->outlen);
-}
-
-/* Helper functions for shash implementations of BLAKE2s */
-
-struct blake2s_tfm_ctx {
- u8 key[BLAKE2S_KEY_SIZE];
- unsigned int keylen;
-};
-
-static inline int crypto_blake2s_setkey(struct crypto_shash *tfm,
- const u8 *key, unsigned int keylen)
-{
- struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(tfm);
-
- if (keylen == 0 || keylen > BLAKE2S_KEY_SIZE)
- return -EINVAL;
-
- memcpy(tctx->key, key, keylen);
- tctx->keylen = keylen;
-
- return 0;
-}
-
-static inline int crypto_blake2s_init(struct shash_desc *desc)
-{
- const struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
- struct blake2s_state *state = shash_desc_ctx(desc);
- unsigned int outlen = crypto_shash_digestsize(desc->tfm);
-
- __blake2s_init(state, outlen, tctx->key, tctx->keylen);
- return 0;
-}
-
-static inline int crypto_blake2s_update(struct shash_desc *desc,
- const u8 *in, unsigned int inlen,
- blake2s_compress_t compress)
-{
- struct blake2s_state *state = shash_desc_ctx(desc);
-
- __blake2s_update(state, in, inlen, compress);
- return 0;
-}
-
-static inline int crypto_blake2s_final(struct shash_desc *desc, u8 *out,
- blake2s_compress_t compress)
-{
- struct blake2s_state *state = shash_desc_ctx(desc);
-
- __blake2s_final(state, out, compress);
- return 0;
-}
-
-#endif /* _CRYPTO_INTERNAL_BLAKE2S_H */
diff --git a/include/crypto/internal/blockhash.h b/include/crypto/internal/blockhash.h
new file mode 100644
index 000000000000..52d9d4c82493
--- /dev/null
+++ b/include/crypto/internal/blockhash.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Handle partial blocks for block hash.
+ *
+ * Copyright (c) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
+ * Copyright (c) 2025 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+
+#ifndef _CRYPTO_INTERNAL_BLOCKHASH_H
+#define _CRYPTO_INTERNAL_BLOCKHASH_H
+
+#include <linux/string.h>
+#include <linux/types.h>
+
+#define BLOCK_HASH_UPDATE_BASE(block_fn, state, src, nbytes, bs, dv, \
+ buf, buflen) \
+ ({ \
+ typeof(block_fn) *_block_fn = &(block_fn); \
+ typeof(state + 0) _state = (state); \
+ unsigned int _buflen = (buflen); \
+ size_t _nbytes = (nbytes); \
+ unsigned int _bs = (bs); \
+ const u8 *_src = (src); \
+ u8 *_buf = (buf); \
+ while ((_buflen + _nbytes) >= _bs) { \
+ const u8 *data = _src; \
+ size_t len = _nbytes; \
+ size_t blocks; \
+ int remain; \
+ if (_buflen) { \
+ remain = _bs - _buflen; \
+ memcpy(_buf + _buflen, _src, remain); \
+ data = _buf; \
+ len = _bs; \
+ } \
+ remain = len % bs; \
+ blocks = (len - remain) / (dv); \
+ (*_block_fn)(_state, data, blocks); \
+ _src += len - remain - _buflen; \
+ _nbytes -= len - remain - _buflen; \
+ _buflen = 0; \
+ } \
+ memcpy(_buf + _buflen, _src, _nbytes); \
+ _buflen += _nbytes; \
+ })
+
+#define BLOCK_HASH_UPDATE(block, state, src, nbytes, bs, buf, buflen) \
+ BLOCK_HASH_UPDATE_BASE(block, state, src, nbytes, bs, 1, buf, buflen)
+#define BLOCK_HASH_UPDATE_BLOCKS(block, state, src, nbytes, bs, buf, buflen) \
+ BLOCK_HASH_UPDATE_BASE(block, state, src, nbytes, bs, bs, buf, buflen)
+
+#endif /* _CRYPTO_INTERNAL_BLOCKHASH_H */
diff --git a/include/crypto/internal/chacha.h b/include/crypto/internal/chacha.h
deleted file mode 100644
index b085dc1ac151..000000000000
--- a/include/crypto/internal/chacha.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#ifndef _CRYPTO_INTERNAL_CHACHA_H
-#define _CRYPTO_INTERNAL_CHACHA_H
-
-#include <crypto/chacha.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/crypto.h>
-
-struct chacha_ctx {
- u32 key[8];
- int nrounds;
-};
-
-static inline int chacha_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keysize, int nrounds)
-{
- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
- int i;
-
- if (keysize != CHACHA_KEY_SIZE)
- return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(ctx->key); i++)
- ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32));
-
- ctx->nrounds = nrounds;
- return 0;
-}
-
-static inline int chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keysize)
-{
- return chacha_setkey(tfm, key, keysize, 20);
-}
-
-static inline int chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keysize)
-{
- return chacha_setkey(tfm, key, keysize, 12);
-}
-
-#endif /* _CRYPTO_CHACHA_H */
diff --git a/include/crypto/internal/cipher.h b/include/crypto/internal/cipher.h
index a9174ba90250..5030f6d2df31 100644
--- a/include/crypto/internal/cipher.h
+++ b/include/crypto/internal/cipher.h
@@ -176,6 +176,8 @@ void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
u8 *dst, const u8 *src);
+struct crypto_cipher *crypto_clone_cipher(struct crypto_cipher *cipher);
+
struct crypto_cipher_spawn {
struct crypto_spawn base;
};
diff --git a/include/crypto/internal/cryptouser.h b/include/crypto/internal/cryptouser.h
deleted file mode 100644
index fd54074332f5..000000000000
--- a/include/crypto/internal/cryptouser.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <linux/cryptouser.h>
-#include <net/netlink.h>
-
-struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact);
-
-#ifdef CONFIG_CRYPTO_STATS
-int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs);
-#else
-static inline int crypto_reportstat(struct sk_buff *in_skb,
- struct nlmsghdr *in_nlh,
- struct nlattr **attrs)
-{
- return -ENOTSUPP;
-}
-#endif
diff --git a/include/crypto/internal/drbg.h b/include/crypto/internal/drbg.h
new file mode 100644
index 000000000000..371e52dcee6c
--- /dev/null
+++ b/include/crypto/internal/drbg.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * NIST SP800-90A DRBG derivation function
+ *
+ * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _INTERNAL_DRBG_H
+#define _INTERNAL_DRBG_H
+
+/*
+ * Convert an integer into a byte representation of this integer.
+ * The byte representation is big-endian
+ *
+ * @val value to be converted
+ * @buf buffer holding the converted integer -- caller must ensure that
+ * buffer size is at least 32 bit
+ */
+static inline void drbg_cpu_to_be32(__u32 val, unsigned char *buf)
+{
+ struct s {
+ __be32 conv;
+ };
+ struct s *conversion = (struct s *)buf;
+
+ conversion->conv = cpu_to_be32(val);
+}
+
+/*
+ * Concatenation Helper and string operation helper
+ *
+ * SP800-90A requires the concatenation of different data. To avoid copying
+ * buffers around or allocate additional memory, the following data structure
+ * is used to point to the original memory with its size. In addition, it
+ * is used to build a linked list. The linked list defines the concatenation
+ * of individual buffers. The order of memory block referenced in that
+ * linked list determines the order of concatenation.
+ */
+struct drbg_string {
+ const unsigned char *buf;
+ size_t len;
+ struct list_head list;
+};
+
+static inline void drbg_string_fill(struct drbg_string *string,
+ const unsigned char *buf, size_t len)
+{
+ string->buf = buf;
+ string->len = len;
+ INIT_LIST_HEAD(&string->list);
+}
+
+#endif //_INTERNAL_DRBG_H
diff --git a/include/crypto/internal/ecc.h b/include/crypto/internal/ecc.h
new file mode 100644
index 000000000000..57cd75242141
--- /dev/null
+++ b/include/crypto/internal/ecc.h
@@ -0,0 +1,310 @@
+/*
+ * Copyright (c) 2013, Kenneth MacKay
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _CRYPTO_ECC_H
+#define _CRYPTO_ECC_H
+
+#include <crypto/ecc_curve.h>
+#include <linux/unaligned.h>
+
+/* One digit is u64 qword. */
+#define ECC_CURVE_NIST_P192_DIGITS 3
+#define ECC_CURVE_NIST_P256_DIGITS 4
+#define ECC_CURVE_NIST_P384_DIGITS 6
+#define ECC_CURVE_NIST_P521_DIGITS 9
+#define ECC_MAX_DIGITS DIV_ROUND_UP(521, 64) /* NIST P521 */
+
+#define ECC_DIGITS_TO_BYTES_SHIFT 3
+
+#define ECC_MAX_BYTES (ECC_MAX_DIGITS << ECC_DIGITS_TO_BYTES_SHIFT)
+
+#define ECC_POINT_INIT(x, y, ndigits) (struct ecc_point) { x, y, ndigits }
+
+/*
+ * The integers r and s making up the signature are expected to be
+ * formatted as two consecutive u64 arrays of size ECC_MAX_BYTES.
+ * The bytes within each u64 digit are in native endianness,
+ * but the order of the u64 digits themselves is little endian.
+ * This format allows direct use by internal vli_*() functions.
+ */
+struct ecdsa_raw_sig {
+ u64 r[ECC_MAX_DIGITS];
+ u64 s[ECC_MAX_DIGITS];
+};
+
+/**
+ * ecc_swap_digits() - Copy ndigits from big endian array to native array
+ * @in: Input array
+ * @out: Output array
+ * @ndigits: Number of digits to copy
+ */
+static inline void ecc_swap_digits(const void *in, u64 *out, unsigned int ndigits)
+{
+ const __be64 *src = (__force __be64 *)in;
+ int i;
+
+ for (i = 0; i < ndigits; i++)
+ out[i] = get_unaligned_be64(&src[ndigits - 1 - i]);
+}
+
+/**
+ * ecc_digits_from_bytes() - Create ndigits-sized digits array from byte array
+ * @in: Input byte array
+ * @nbytes Size of input byte array
+ * @out Output digits array
+ * @ndigits: Number of digits to create from byte array
+ *
+ * The first byte in the input byte array is expected to hold the most
+ * significant bits of the large integer.
+ */
+void ecc_digits_from_bytes(const u8 *in, unsigned int nbytes,
+ u64 *out, unsigned int ndigits);
+
+/**
+ * ecc_is_key_valid() - Validate a given ECDH private key
+ *
+ * @curve_id: id representing the curve to use
+ * @ndigits: curve's number of digits
+ * @private_key: private key to be used for the given curve
+ * @private_key_len: private key length
+ *
+ * Returns 0 if the key is acceptable, a negative value otherwise
+ */
+int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits,
+ const u64 *private_key, unsigned int private_key_len);
+
+/**
+ * ecc_gen_privkey() - Generates an ECC private key.
+ * The private key is a random integer in the range 0 < random < n, where n is a
+ * prime that is the order of the cyclic subgroup generated by the distinguished
+ * point G.
+ * @curve_id: id representing the curve to use
+ * @ndigits: curve number of digits
+ * @private_key: buffer for storing the generated private key
+ *
+ * Returns 0 if the private key was generated successfully, a negative value
+ * if an error occurred.
+ */
+int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits,
+ u64 *private_key);
+
+/**
+ * ecc_make_pub_key() - Compute an ECC public key
+ *
+ * @curve_id: id representing the curve to use
+ * @ndigits: curve's number of digits
+ * @private_key: pregenerated private key for the given curve
+ * @public_key: buffer for storing the generated public key
+ *
+ * Returns 0 if the public key was generated successfully, a negative value
+ * if an error occurred.
+ */
+int ecc_make_pub_key(const unsigned int curve_id, unsigned int ndigits,
+ const u64 *private_key, u64 *public_key);
+
+/**
+ * crypto_ecdh_shared_secret() - Compute a shared secret
+ *
+ * @curve_id: id representing the curve to use
+ * @ndigits: curve's number of digits
+ * @private_key: private key of part A
+ * @public_key: public key of counterpart B
+ * @secret: buffer for storing the calculated shared secret
+ *
+ * Note: It is recommended that you hash the result of crypto_ecdh_shared_secret
+ * before using it for symmetric encryption or HMAC.
+ *
+ * Returns 0 if the shared secret was generated successfully, a negative value
+ * if an error occurred.
+ */
+int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
+ const u64 *private_key, const u64 *public_key,
+ u64 *secret);
+
+/**
+ * ecc_is_pubkey_valid_partial() - Partial public key validation
+ *
+ * @curve: elliptic curve domain parameters
+ * @pk: public key as a point
+ *
+ * Valdiate public key according to SP800-56A section 5.6.2.3.4 ECC Partial
+ * Public-Key Validation Routine.
+ *
+ * Note: There is no check that the public key is in the correct elliptic curve
+ * subgroup.
+ *
+ * Return: 0 if validation is successful, -EINVAL if validation is failed.
+ */
+int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
+ struct ecc_point *pk);
+
+/**
+ * ecc_is_pubkey_valid_full() - Full public key validation
+ *
+ * @curve: elliptic curve domain parameters
+ * @pk: public key as a point
+ *
+ * Valdiate public key according to SP800-56A section 5.6.2.3.3 ECC Full
+ * Public-Key Validation Routine.
+ *
+ * Return: 0 if validation is successful, -EINVAL if validation is failed.
+ */
+int ecc_is_pubkey_valid_full(const struct ecc_curve *curve,
+ struct ecc_point *pk);
+
+/**
+ * vli_is_zero() - Determine is vli is zero
+ *
+ * @vli: vli to check.
+ * @ndigits: length of the @vli
+ */
+bool vli_is_zero(const u64 *vli, unsigned int ndigits);
+
+/**
+ * vli_cmp() - compare left and right vlis
+ *
+ * @left: vli
+ * @right: vli
+ * @ndigits: length of both vlis
+ *
+ * Returns sign of @left - @right, i.e. -1 if @left < @right,
+ * 0 if @left == @right, 1 if @left > @right.
+ */
+int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits);
+
+/**
+ * vli_sub() - Subtracts right from left
+ *
+ * @result: where to write result
+ * @left: vli
+ * @right vli
+ * @ndigits: length of all vlis
+ *
+ * Note: can modify in-place.
+ *
+ * Return: carry bit.
+ */
+u64 vli_sub(u64 *result, const u64 *left, const u64 *right,
+ unsigned int ndigits);
+
+/**
+ * vli_from_be64() - Load vli from big-endian u64 array
+ *
+ * @dest: destination vli
+ * @src: source array of u64 BE values
+ * @ndigits: length of both vli and array
+ */
+void vli_from_be64(u64 *dest, const void *src, unsigned int ndigits);
+
+/**
+ * vli_from_le64() - Load vli from little-endian u64 array
+ *
+ * @dest: destination vli
+ * @src: source array of u64 LE values
+ * @ndigits: length of both vli and array
+ */
+void vli_from_le64(u64 *dest, const void *src, unsigned int ndigits);
+
+/**
+ * vli_mod_inv() - Modular inversion
+ *
+ * @result: where to write vli number
+ * @input: vli value to operate on
+ * @mod: modulus
+ * @ndigits: length of all vlis
+ */
+void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod,
+ unsigned int ndigits);
+
+/**
+ * vli_mod_mult_slow() - Modular multiplication
+ *
+ * @result: where to write result value
+ * @left: vli number to multiply with @right
+ * @right: vli number to multiply with @left
+ * @mod: modulus
+ * @ndigits: length of all vlis
+ *
+ * Note: Assumes that mod is big enough curve order.
+ */
+void vli_mod_mult_slow(u64 *result, const u64 *left, const u64 *right,
+ const u64 *mod, unsigned int ndigits);
+
+/**
+ * vli_num_bits() - Counts the number of bits required for vli.
+ *
+ * @vli: vli to check.
+ * @ndigits: Length of the @vli
+ *
+ * Return: The number of bits required to represent @vli.
+ */
+unsigned int vli_num_bits(const u64 *vli, unsigned int ndigits);
+
+/**
+ * ecc_aloc_point() - Allocate ECC point.
+ *
+ * @ndigits: Length of vlis in u64 qwords.
+ *
+ * Return: Pointer to the allocated point or NULL if allocation failed.
+ */
+struct ecc_point *ecc_alloc_point(unsigned int ndigits);
+
+/**
+ * ecc_free_point() - Free ECC point.
+ *
+ * @p: The point to free.
+ */
+void ecc_free_point(struct ecc_point *p);
+
+/**
+ * ecc_point_is_zero() - Check if point is zero.
+ *
+ * @p: Point to check for zero.
+ *
+ * Return: true if point is the point at infinity, false otherwise.
+ */
+bool ecc_point_is_zero(const struct ecc_point *point);
+
+/**
+ * ecc_point_mult_shamir() - Add two points multiplied by scalars
+ *
+ * @result: resulting point
+ * @x: scalar to multiply with @p
+ * @p: point to multiply with @x
+ * @y: scalar to multiply with @q
+ * @q: point to multiply with @y
+ * @curve: curve
+ *
+ * Returns result = x * p + x * q over the curve.
+ * This works faster than two multiplications and addition.
+ */
+void ecc_point_mult_shamir(const struct ecc_point *result,
+ const u64 *x, const struct ecc_point *p,
+ const u64 *y, const struct ecc_point *q,
+ const struct ecc_curve *curve);
+
+extern struct crypto_template ecdsa_x962_tmpl;
+extern struct crypto_template ecdsa_p1363_tmpl;
+#endif
diff --git a/include/crypto/internal/engine.h b/include/crypto/internal/engine.h
new file mode 100644
index 000000000000..f19ef376833f
--- /dev/null
+++ b/include/crypto/internal/engine.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Crypto engine API
+ *
+ * Copyright (c) 2016 Baolin Wang <baolin.wang@linaro.org>
+ * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+#ifndef _CRYPTO_INTERNAL_ENGINE_H
+#define _CRYPTO_INTERNAL_ENGINE_H
+
+#include <crypto/algapi.h>
+#include <crypto/engine.h>
+#include <linux/kthread.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+
+#define ENGINE_NAME_LEN 30
+
+struct device;
+
+/*
+ * struct crypto_engine - crypto hardware engine
+ * @name: the engine name
+ * @busy: request pump is busy
+ * @running: the engine is on working
+ * @retry_support: indication that the hardware allows re-execution
+ * of a failed backlog request
+ * crypto-engine, in head position to keep order
+ * @rt: whether this queue is set to run as a realtime task
+ * @list: link with the global crypto engine list
+ * @queue_lock: spinlock to synchronise access to request queue
+ * @queue: the crypto queue of the engine
+ * @kworker: kthread worker struct for request pump
+ * @pump_requests: work struct for scheduling work to the request pump
+ * @priv_data: the engine private data
+ * @cur_req: the current request which is on processing
+ */
+struct crypto_engine {
+ char name[ENGINE_NAME_LEN];
+ bool busy;
+ bool running;
+
+ bool retry_support;
+ bool rt;
+
+ struct list_head list;
+ spinlock_t queue_lock;
+ struct crypto_queue queue;
+ struct device *dev;
+
+ struct kthread_worker *kworker;
+ struct kthread_work pump_requests;
+
+ void *priv_data;
+ struct crypto_async_request *cur_req;
+};
+
+#endif
diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h
index 7fd7126f593a..012f5fb22d43 100644
--- a/include/crypto/internal/geniv.h
+++ b/include/crypto/internal/geniv.h
@@ -15,7 +15,6 @@
struct aead_geniv_ctx {
spinlock_t lock;
struct crypto_aead *child;
- struct crypto_sync_skcipher *sknull;
u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
};
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 0a288dddcf5b..6ec5f2f37ccb 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -11,22 +11,38 @@
#include <crypto/algapi.h>
#include <crypto/hash.h>
+/* Set this bit to handle partial blocks in the API. */
+#define CRYPTO_AHASH_ALG_BLOCK_ONLY 0x01000000
+
+/* Set this bit if final requires at least one byte. */
+#define CRYPTO_AHASH_ALG_FINAL_NONZERO 0x02000000
+
+/* Set this bit if finup can deal with multiple blocks. */
+#define CRYPTO_AHASH_ALG_FINUP_MAX 0x04000000
+
+/* This bit is set by the Crypto API if export_core is not supported. */
+#define CRYPTO_AHASH_ALG_NO_EXPORT_CORE 0x08000000
+
+#define HASH_FBREQ_ON_STACK(name, req) \
+ char __##name##_req[sizeof(struct ahash_request) + \
+ MAX_SYNC_HASH_REQSIZE] CRYPTO_MINALIGN_ATTR; \
+ struct ahash_request *name = ahash_fbreq_on_stack_init( \
+ __##name##_req, (req))
+
struct ahash_request;
struct scatterlist;
struct crypto_hash_walk {
- char *data;
+ const char *data;
unsigned int offset;
- unsigned int alignmask;
+ unsigned int flags;
struct page *pg;
unsigned int entrylen;
unsigned int total;
struct scatterlist *sg;
-
- unsigned int flags;
};
struct ahash_instance {
@@ -74,6 +90,7 @@ int crypto_register_ahashes(struct ahash_alg *algs, int count);
void crypto_unregister_ahashes(struct ahash_alg *algs, int count);
int ahash_register_instance(struct crypto_template *tmpl,
struct ahash_instance *inst);
+void ahash_free_singlespawn_instance(struct ahash_instance *inst);
int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen);
@@ -83,13 +100,25 @@ static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
return alg->setkey != shash_no_setkey;
}
+bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg);
+
static inline bool crypto_shash_alg_needs_key(struct shash_alg *alg)
{
return crypto_shash_alg_has_setkey(alg) &&
!(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY);
}
-bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg);
+static inline bool crypto_hash_alg_needs_key(struct hash_alg_common *alg)
+{
+ return crypto_hash_alg_has_setkey(alg) &&
+ !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY);
+}
+
+static inline bool crypto_hash_no_export_core(struct crypto_ahash *tfm)
+{
+ return crypto_hash_alg_common(tfm)->base.cra_flags &
+ CRYPTO_AHASH_ALG_NO_EXPORT_CORE;
+}
int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
struct crypto_instance *inst,
@@ -133,25 +162,54 @@ int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc);
int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc);
int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc);
-int crypto_init_shash_ops_async(struct crypto_tfm *tfm);
-
static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm)
{
return crypto_tfm_ctx(crypto_ahash_tfm(tfm));
}
+static inline void *crypto_ahash_ctx_dma(struct crypto_ahash *tfm)
+{
+ return crypto_tfm_ctx_dma(crypto_ahash_tfm(tfm));
+}
+
static inline struct ahash_alg *__crypto_ahash_alg(struct crypto_alg *alg)
{
return container_of(__crypto_hash_alg_common(alg), struct ahash_alg,
halg);
}
+static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
+{
+ return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
+ halg);
+}
+
+static inline void crypto_ahash_set_statesize(struct crypto_ahash *tfm,
+ unsigned int size)
+{
+ tfm->statesize = size;
+}
+
static inline void crypto_ahash_set_reqsize(struct crypto_ahash *tfm,
unsigned int reqsize)
{
tfm->reqsize = reqsize;
}
+static inline bool crypto_ahash_tested(struct crypto_ahash *tfm)
+{
+ struct crypto_tfm *tfm_base = crypto_ahash_tfm(tfm);
+
+ return tfm_base->__crt_alg->cra_flags & CRYPTO_ALG_TESTED;
+}
+
+static inline void crypto_ahash_set_reqsize_dma(struct crypto_ahash *ahash,
+ unsigned int reqsize)
+{
+ reqsize += crypto_dma_align() & ~(crypto_tfm_ctx_alignment() - 1);
+ ahash->reqsize = reqsize;
+}
+
static inline struct crypto_instance *ahash_crypto_instance(
struct ahash_instance *inst)
{
@@ -175,14 +233,24 @@ static inline void *ahash_instance_ctx(struct ahash_instance *inst)
return crypto_instance_ctx(ahash_crypto_instance(inst));
}
+static inline void *ahash_request_ctx_dma(struct ahash_request *req)
+{
+ unsigned int align = crypto_dma_align();
+
+ if (align <= crypto_tfm_ctx_alignment())
+ align = 1;
+
+ return PTR_ALIGN(ahash_request_ctx(req), align);
+}
+
static inline void ahash_request_complete(struct ahash_request *req, int err)
{
- req->base.complete(&req->base, err);
+ crypto_request_complete(&req->base, err);
}
static inline u32 ahash_request_flags(struct ahash_request *req)
{
- return req->base.flags;
+ return crypto_request_flags(&req->base) & ~CRYPTO_AHASH_REQ_PRIVATE;
}
static inline struct crypto_ahash *crypto_spawn_ahash(
@@ -237,15 +305,101 @@ static inline struct crypto_shash *crypto_spawn_shash(
return crypto_spawn_tfm2(&spawn->base);
}
-static inline void *crypto_shash_ctx_aligned(struct crypto_shash *tfm)
+static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm)
{
- return crypto_tfm_ctx_aligned(&tfm->base);
+ return container_of(tfm, struct crypto_shash, base);
}
-static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm)
+static inline bool ahash_request_isvirt(struct ahash_request *req)
{
- return container_of(tfm, struct crypto_shash, base);
+ return req->base.flags & CRYPTO_AHASH_REQ_VIRT;
+}
+
+static inline bool crypto_ahash_req_virt(struct crypto_ahash *tfm)
+{
+ return crypto_tfm_req_virt(&tfm->base);
+}
+
+static inline struct crypto_ahash *crypto_ahash_fb(struct crypto_ahash *tfm)
+{
+ return __crypto_ahash_cast(crypto_ahash_tfm(tfm)->fb);
}
+static inline struct ahash_request *ahash_fbreq_on_stack_init(
+ char *buf, struct ahash_request *old)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(old);
+ struct ahash_request *req = (void *)buf;
+
+ crypto_stack_request_init(&req->base,
+ crypto_ahash_tfm(crypto_ahash_fb(tfm)));
+ ahash_request_set_callback(req, ahash_request_flags(old), NULL, NULL);
+ req->base.flags &= ~CRYPTO_AHASH_REQ_PRIVATE;
+ req->base.flags |= old->base.flags & CRYPTO_AHASH_REQ_PRIVATE;
+ req->src = old->src;
+ req->result = old->result;
+ req->nbytes = old->nbytes;
+
+ return req;
+}
+
+/* Return the state size without partial block for block-only algorithms. */
+static inline unsigned int crypto_shash_coresize(struct crypto_shash *tfm)
+{
+ return crypto_shash_statesize(tfm) - crypto_shash_blocksize(tfm) - 1;
+}
+
+/* This can only be used if the request was never cloned. */
+#define HASH_REQUEST_ZERO(name) \
+ memzero_explicit(__##name##_req, sizeof(__##name##_req))
+
+/**
+ * crypto_ahash_export_core() - extract core state for message digest
+ * @req: reference to the ahash_request handle whose state is exported
+ * @out: output buffer of sufficient size that can hold the hash state
+ *
+ * Export the hash state without the partial block buffer.
+ *
+ * Context: Softirq or process context.
+ * Return: 0 if the export creation was successful; < 0 if an error occurred
+ */
+int crypto_ahash_export_core(struct ahash_request *req, void *out);
+
+/**
+ * crypto_ahash_import_core() - import core state
+ * @req: reference to ahash_request handle the state is imported into
+ * @in: buffer holding the state
+ *
+ * Import the hash state without the partial block buffer.
+ *
+ * Context: Softirq or process context.
+ * Return: 0 if the import was successful; < 0 if an error occurred
+ */
+int crypto_ahash_import_core(struct ahash_request *req, const void *in);
+
+/**
+ * crypto_shash_export_core() - extract core state for message digest
+ * @desc: reference to the operational state handle whose state is exported
+ * @out: output buffer of sufficient size that can hold the hash state
+ *
+ * Export the hash state without the partial block buffer.
+ *
+ * Context: Softirq or process context.
+ * Return: 0 if the export creation was successful; < 0 if an error occurred
+ */
+int crypto_shash_export_core(struct shash_desc *desc, void *out);
+
+/**
+ * crypto_shash_import_core() - import core state
+ * @desc: reference to the operational state handle the state imported into
+ * @in: buffer holding the state
+ *
+ * Import the hash state without the partial block buffer.
+ *
+ * Context: Softirq or process context.
+ * Return: 0 if the import was successful; < 0 if an error occurred
+ */
+int crypto_shash_import_core(struct shash_desc *desc, const void *in);
+
#endif /* _CRYPTO_INTERNAL_HASH_H */
diff --git a/include/crypto/internal/kdf_selftest.h b/include/crypto/internal/kdf_selftest.h
new file mode 100644
index 000000000000..4d03d2af57b7
--- /dev/null
+++ b/include/crypto/internal/kdf_selftest.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (C) 2021, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _CRYPTO_KDF_SELFTEST_H
+#define _CRYPTO_KDF_SELFTEST_H
+
+#include <crypto/hash.h>
+#include <linux/uio.h>
+
+struct kdf_testvec {
+ unsigned char *key;
+ size_t keylen;
+ unsigned char *ikm;
+ size_t ikmlen;
+ struct kvec info;
+ unsigned char *expected;
+ size_t expectedlen;
+};
+
+static inline int
+kdf_test(const struct kdf_testvec *test, const char *name,
+ int (*crypto_kdf_setkey)(struct crypto_shash *kmd,
+ const u8 *key, size_t keylen,
+ const u8 *ikm, size_t ikmlen),
+ int (*crypto_kdf_generate)(struct crypto_shash *kmd,
+ const struct kvec *info,
+ unsigned int info_nvec,
+ u8 *dst, unsigned int dlen))
+{
+ struct crypto_shash *kmd;
+ int ret;
+ u8 *buf = kzalloc(test->expectedlen, GFP_KERNEL);
+
+ if (!buf)
+ return -ENOMEM;
+
+ kmd = crypto_alloc_shash(name, 0, 0);
+ if (IS_ERR(kmd)) {
+ pr_err("alg: kdf: could not allocate hash handle for %s\n",
+ name);
+ kfree(buf);
+ return -ENOMEM;
+ }
+
+ ret = crypto_kdf_setkey(kmd, test->key, test->keylen,
+ test->ikm, test->ikmlen);
+ if (ret) {
+ pr_err("alg: kdf: could not set key derivation key\n");
+ goto err;
+ }
+
+ ret = crypto_kdf_generate(kmd, &test->info, 1, buf, test->expectedlen);
+ if (ret) {
+ pr_err("alg: kdf: could not obtain key data\n");
+ goto err;
+ }
+
+ ret = memcmp(test->expected, buf, test->expectedlen);
+ if (ret)
+ ret = -EINVAL;
+
+err:
+ crypto_free_shash(kmd);
+ kfree(buf);
+ return ret;
+}
+
+#endif /* _CRYPTO_KDF_SELFTEST_H */
diff --git a/include/crypto/internal/kpp.h b/include/crypto/internal/kpp.h
index 659b642efada..0a6db8c4a9a0 100644
--- a/include/crypto/internal/kpp.h
+++ b/include/crypto/internal/kpp.h
@@ -10,6 +10,38 @@
#include <crypto/kpp.h>
#include <crypto/algapi.h>
+/**
+ * struct kpp_instance - KPP template instance
+ * @free: Callback getting invoked upon instance destruction. Must be set.
+ * @s: Internal. Generic crypto core instance state properly layout
+ * to alias with @alg as needed.
+ * @alg: The &struct kpp_alg implementation provided by the instance.
+ */
+struct kpp_instance {
+ void (*free)(struct kpp_instance *inst);
+ union {
+ struct {
+ char head[offsetof(struct kpp_alg, base)];
+ struct crypto_instance base;
+ } s;
+ struct kpp_alg alg;
+ };
+};
+
+/**
+ * struct crypto_kpp_spawn - KPP algorithm spawn
+ * @base: Internal. Generic crypto core spawn state.
+ *
+ * Template instances can get a hold on some inner KPP algorithm by
+ * binding a &struct crypto_kpp_spawn via
+ * crypto_grab_kpp(). Transforms may subsequently get instantiated
+ * from the referenced inner &struct kpp_alg by means of
+ * crypto_spawn_kpp().
+ */
+struct crypto_kpp_spawn {
+ struct crypto_spawn base;
+};
+
/*
* Transform internal helpers.
*/
@@ -18,14 +50,42 @@ static inline void *kpp_request_ctx(struct kpp_request *req)
return req->__ctx;
}
+static inline void *kpp_request_ctx_dma(struct kpp_request *req)
+{
+ unsigned int align = crypto_dma_align();
+
+ if (align <= crypto_tfm_ctx_alignment())
+ align = 1;
+
+ return PTR_ALIGN(kpp_request_ctx(req), align);
+}
+
+static inline void kpp_set_reqsize(struct crypto_kpp *kpp,
+ unsigned int reqsize)
+{
+ kpp->reqsize = reqsize;
+}
+
+static inline void kpp_set_reqsize_dma(struct crypto_kpp *kpp,
+ unsigned int reqsize)
+{
+ reqsize += crypto_dma_align() & ~(crypto_tfm_ctx_alignment() - 1);
+ kpp->reqsize = reqsize;
+}
+
static inline void *kpp_tfm_ctx(struct crypto_kpp *tfm)
{
- return tfm->base.__crt_ctx;
+ return crypto_tfm_ctx(&tfm->base);
+}
+
+static inline void *kpp_tfm_ctx_dma(struct crypto_kpp *tfm)
+{
+ return crypto_tfm_ctx_dma(&tfm->base);
}
static inline void kpp_request_complete(struct kpp_request *req, int err)
{
- req->base.complete(&req->base, err);
+ crypto_request_complete(&req->base, err);
}
static inline const char *kpp_alg_name(struct crypto_kpp *tfm)
@@ -33,6 +93,62 @@ static inline const char *kpp_alg_name(struct crypto_kpp *tfm)
return crypto_kpp_tfm(tfm)->__crt_alg->cra_name;
}
+/*
+ * Template instance internal helpers.
+ */
+/**
+ * kpp_crypto_instance() - Cast a &struct kpp_instance to the corresponding
+ * generic &struct crypto_instance.
+ * @inst: Pointer to the &struct kpp_instance to be cast.
+ * Return: A pointer to the &struct crypto_instance embedded in @inst.
+ */
+static inline struct crypto_instance *kpp_crypto_instance(
+ struct kpp_instance *inst)
+{
+ return &inst->s.base;
+}
+
+/**
+ * kpp_instance() - Cast a generic &struct crypto_instance to the corresponding
+ * &struct kpp_instance.
+ * @inst: Pointer to the &struct crypto_instance to be cast.
+ * Return: A pointer to the &struct kpp_instance @inst is embedded in.
+ */
+static inline struct kpp_instance *kpp_instance(struct crypto_instance *inst)
+{
+ return container_of(inst, struct kpp_instance, s.base);
+}
+
+/**
+ * kpp_alg_instance() - Get the &struct kpp_instance a given KPP transform has
+ * been instantiated from.
+ * @kpp: The KPP transform instantiated from some &struct kpp_instance.
+ * Return: The &struct kpp_instance associated with @kpp.
+ */
+static inline struct kpp_instance *kpp_alg_instance(struct crypto_kpp *kpp)
+{
+ return kpp_instance(crypto_tfm_alg_instance(&kpp->base));
+}
+
+/**
+ * kpp_instance_ctx() - Get a pointer to a &struct kpp_instance's implementation
+ * specific context data.
+ * @inst: The &struct kpp_instance whose context data to access.
+ *
+ * A KPP template implementation may allocate extra memory beyond the
+ * end of a &struct kpp_instance instantiated from &crypto_template.create().
+ * This function provides a means to obtain a pointer to this area.
+ *
+ * Return: A pointer to the implementation specific context data.
+ */
+static inline void *kpp_instance_ctx(struct kpp_instance *inst)
+{
+ return crypto_instance_ctx(kpp_crypto_instance(inst));
+}
+
+/*
+ * KPP algorithm (un)registration functions.
+ */
/**
* crypto_register_kpp() -- Register key-agreement protocol primitives algorithm
*
@@ -56,4 +172,74 @@ int crypto_register_kpp(struct kpp_alg *alg);
*/
void crypto_unregister_kpp(struct kpp_alg *alg);
+/**
+ * kpp_register_instance() - Register a KPP template instance.
+ * @tmpl: The instantiating template.
+ * @inst: The KPP template instance to be registered.
+ * Return: %0 on success, negative error code otherwise.
+ */
+int kpp_register_instance(struct crypto_template *tmpl,
+ struct kpp_instance *inst);
+
+/*
+ * KPP spawn related functions.
+ */
+/**
+ * crypto_grab_kpp() - Look up a KPP algorithm and bind a spawn to it.
+ * @spawn: The KPP spawn to bind.
+ * @inst: The template instance owning @spawn.
+ * @name: The KPP algorithm name to look up.
+ * @type: The type bitset to pass on to the lookup.
+ * @mask: The mask bismask to pass on to the lookup.
+ * Return: %0 on success, a negative error code otherwise.
+ */
+int crypto_grab_kpp(struct crypto_kpp_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
+
+/**
+ * crypto_drop_kpp() - Release a spawn previously bound via crypto_grab_kpp().
+ * @spawn: The spawn to release.
+ */
+static inline void crypto_drop_kpp(struct crypto_kpp_spawn *spawn)
+{
+ crypto_drop_spawn(&spawn->base);
+}
+
+/**
+ * crypto_spawn_kpp_alg() - Get the algorithm a KPP spawn has been bound to.
+ * @spawn: The spawn to get the referenced &struct kpp_alg for.
+ *
+ * This function as well as the returned result are safe to use only
+ * after @spawn has been successfully bound via crypto_grab_kpp() and
+ * up to until the template instance owning @spawn has either been
+ * registered successfully or the spawn has been released again via
+ * crypto_drop_spawn().
+ *
+ * Return: A pointer to the &struct kpp_alg referenced from the spawn.
+ */
+static inline struct kpp_alg *crypto_spawn_kpp_alg(
+ struct crypto_kpp_spawn *spawn)
+{
+ return container_of(spawn->base.alg, struct kpp_alg, base);
+}
+
+/**
+ * crypto_spawn_kpp() - Create a transform from a KPP spawn.
+ * @spawn: The spawn previously bound to some &struct kpp_alg via
+ * crypto_grab_kpp().
+ *
+ * Once a &struct crypto_kpp_spawn has been successfully bound to a
+ * &struct kpp_alg via crypto_grab_kpp(), transforms for the latter
+ * may get instantiated from the former by means of this function.
+ *
+ * Return: A pointer to the freshly created KPP transform on success
+ * or an ``ERR_PTR()`` otherwise.
+ */
+static inline struct crypto_kpp *crypto_spawn_kpp(
+ struct crypto_kpp_spawn *spawn)
+{
+ return crypto_spawn_tfm2(&spawn->base);
+}
+
#endif
diff --git a/include/crypto/internal/poly1305.h b/include/crypto/internal/poly1305.h
index 196aa769f296..a72fff409ab8 100644
--- a/include/crypto/internal/poly1305.h
+++ b/include/crypto/internal/poly1305.h
@@ -6,9 +6,8 @@
#ifndef _CRYPTO_INTERNAL_POLY1305_H
#define _CRYPTO_INTERNAL_POLY1305_H
-#include <asm/unaligned.h>
-#include <linux/types.h>
#include <crypto/poly1305.h>
+#include <linux/types.h>
/*
* Poly1305 core functions. These only accept whole blocks; the caller must
@@ -31,4 +30,27 @@ void poly1305_core_blocks(struct poly1305_state *state,
void poly1305_core_emit(const struct poly1305_state *state, const u32 nonce[4],
void *dst);
+static inline void
+poly1305_block_init_generic(struct poly1305_block_state *desc,
+ const u8 raw_key[POLY1305_BLOCK_SIZE])
+{
+ poly1305_core_init(&desc->h);
+ poly1305_core_setkey(&desc->core_r, raw_key);
+}
+
+static inline void poly1305_blocks_generic(struct poly1305_block_state *state,
+ const u8 *src, unsigned int len,
+ u32 padbit)
+{
+ poly1305_core_blocks(&state->h, &state->core_r, src,
+ len / POLY1305_BLOCK_SIZE, padbit);
+}
+
+static inline void poly1305_emit_generic(const struct poly1305_state *state,
+ u8 digest[POLY1305_DIGEST_SIZE],
+ const u32 nonce[4])
+{
+ poly1305_core_emit(state, nonce, digest);
+}
+
#endif
diff --git a/include/crypto/internal/rsa.h b/include/crypto/internal/rsa.h
index e870133f4b77..071a1951b992 100644
--- a/include/crypto/internal/rsa.h
+++ b/include/crypto/internal/rsa.h
@@ -8,6 +8,7 @@
#ifndef _RSA_HELPER_
#define _RSA_HELPER_
#include <linux/types.h>
+#include <crypto/akcipher.h>
/**
* rsa_key - RSA key structure
@@ -53,5 +54,33 @@ int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key,
int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key,
unsigned int key_len);
+#define RSA_PUB (true)
+#define RSA_PRIV (false)
+
+static inline int rsa_set_key(struct crypto_akcipher *child,
+ unsigned int *key_size, bool is_pubkey,
+ const void *key, unsigned int keylen)
+{
+ int err;
+
+ *key_size = 0;
+
+ if (is_pubkey)
+ err = crypto_akcipher_set_pub_key(child, key, keylen);
+ else
+ err = crypto_akcipher_set_priv_key(child, key, keylen);
+ if (err)
+ return err;
+
+ /* Find out new modulus size from rsa implementation */
+ err = crypto_akcipher_maxsize(child);
+ if (err > PAGE_SIZE)
+ return -ENOTSUPP;
+
+ *key_size = err;
+ return 0;
+}
+
extern struct crypto_template rsa_pkcs1pad_tmpl;
+extern struct crypto_template rsassa_pkcs1_tmpl;
#endif
diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
index f834274c2493..6a2c5f2e90f9 100644
--- a/include/crypto/internal/scompress.h
+++ b/include/crypto/internal/scompress.h
@@ -8,9 +8,8 @@
*/
#ifndef _CRYPTO_SCOMP_INT_H
#define _CRYPTO_SCOMP_INT_H
-#include <linux/crypto.h>
-#define SCOMP_SCRATCH_SIZE 131072
+#include <crypto/internal/acompress.h>
struct crypto_scomp {
struct crypto_tfm base;
@@ -19,22 +18,25 @@ struct crypto_scomp {
/**
* struct scomp_alg - synchronous compression algorithm
*
- * @alloc_ctx: Function allocates algorithm specific context
- * @free_ctx: Function frees context allocated with alloc_ctx
* @compress: Function performs a compress operation
* @decompress: Function performs a de-compress operation
- * @base: Common crypto API algorithm data structure
+ * @streams: Per-cpu memory for algorithm
+ * @calg: Cmonn algorithm data structure shared with acomp
*/
struct scomp_alg {
- void *(*alloc_ctx)(struct crypto_scomp *tfm);
- void (*free_ctx)(struct crypto_scomp *tfm, void *ctx);
int (*compress)(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx);
int (*decompress)(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx);
- struct crypto_alg base;
+
+ struct crypto_acomp_streams streams;
+
+ union {
+ struct COMP_ALG_COMMON;
+ struct comp_alg_common calg;
+ };
};
static inline struct scomp_alg *__crypto_scomp_alg(struct crypto_alg *alg)
@@ -62,17 +64,6 @@ static inline struct scomp_alg *crypto_scomp_alg(struct crypto_scomp *tfm)
return __crypto_scomp_alg(crypto_scomp_tfm(tfm)->__crt_alg);
}
-static inline void *crypto_scomp_alloc_ctx(struct crypto_scomp *tfm)
-{
- return crypto_scomp_alg(tfm)->alloc_ctx(tfm);
-}
-
-static inline void crypto_scomp_free_ctx(struct crypto_scomp *tfm,
- void *ctx)
-{
- return crypto_scomp_alg(tfm)->free_ctx(tfm, ctx);
-}
-
static inline int crypto_scomp_compress(struct crypto_scomp *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
@@ -89,10 +80,6 @@ static inline int crypto_scomp_decompress(struct crypto_scomp *tfm,
ctx);
}
-int crypto_init_scomp_ops_async(struct crypto_tfm *tfm);
-struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req);
-void crypto_acomp_scomp_free_ctx(struct acomp_req *req);
-
/**
* crypto_register_scomp() -- Register synchronous compression algorithm
*
diff --git a/include/crypto/internal/sig.h b/include/crypto/internal/sig.h
new file mode 100644
index 000000000000..b16648c1a986
--- /dev/null
+++ b/include/crypto/internal/sig.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Public Key Signature Algorithm
+ *
+ * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+#ifndef _CRYPTO_INTERNAL_SIG_H
+#define _CRYPTO_INTERNAL_SIG_H
+
+#include <crypto/algapi.h>
+#include <crypto/sig.h>
+
+struct sig_instance {
+ void (*free)(struct sig_instance *inst);
+ union {
+ struct {
+ char head[offsetof(struct sig_alg, base)];
+ struct crypto_instance base;
+ };
+ struct sig_alg alg;
+ };
+};
+
+struct crypto_sig_spawn {
+ struct crypto_spawn base;
+};
+
+static inline void *crypto_sig_ctx(struct crypto_sig *tfm)
+{
+ return crypto_tfm_ctx(&tfm->base);
+}
+
+/**
+ * crypto_register_sig() -- Register public key signature algorithm
+ *
+ * Function registers an implementation of a public key signature algorithm
+ *
+ * @alg: algorithm definition
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_register_sig(struct sig_alg *alg);
+
+/**
+ * crypto_unregister_sig() -- Unregister public key signature algorithm
+ *
+ * Function unregisters an implementation of a public key signature algorithm
+ *
+ * @alg: algorithm definition
+ */
+void crypto_unregister_sig(struct sig_alg *alg);
+
+int sig_register_instance(struct crypto_template *tmpl,
+ struct sig_instance *inst);
+
+static inline struct sig_instance *sig_instance(struct crypto_instance *inst)
+{
+ return container_of(&inst->alg, struct sig_instance, alg.base);
+}
+
+static inline struct sig_instance *sig_alg_instance(struct crypto_sig *tfm)
+{
+ return sig_instance(crypto_tfm_alg_instance(&tfm->base));
+}
+
+static inline struct crypto_instance *sig_crypto_instance(struct sig_instance
+ *inst)
+{
+ return container_of(&inst->alg.base, struct crypto_instance, alg);
+}
+
+static inline void *sig_instance_ctx(struct sig_instance *inst)
+{
+ return crypto_instance_ctx(sig_crypto_instance(inst));
+}
+
+int crypto_grab_sig(struct crypto_sig_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
+
+static inline struct crypto_sig *crypto_spawn_sig(struct crypto_sig_spawn
+ *spawn)
+{
+ return crypto_spawn_tfm2(&spawn->base);
+}
+
+static inline void crypto_drop_sig(struct crypto_sig_spawn *spawn)
+{
+ crypto_drop_spawn(&spawn->base);
+}
+
+static inline struct sig_alg *crypto_spawn_sig_alg(struct crypto_sig_spawn
+ *spawn)
+{
+ return container_of(spawn->base.alg, struct sig_alg, base);
+}
+#endif
diff --git a/include/crypto/internal/simd.h b/include/crypto/internal/simd.h
index d2316242a988..9e338e7aafbd 100644
--- a/include/crypto/internal/simd.h
+++ b/include/crypto/internal/simd.h
@@ -6,6 +6,7 @@
#ifndef _CRYPTO_INTERNAL_SIMD_H
#define _CRYPTO_INTERNAL_SIMD_H
+#include <asm/simd.h>
#include <linux/percpu.h>
#include <linux/types.h>
@@ -14,11 +15,10 @@
struct simd_skcipher_alg;
struct skcipher_alg;
-struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname,
+struct simd_skcipher_alg *simd_skcipher_create_compat(struct skcipher_alg *ialg,
+ const char *algname,
const char *drvname,
const char *basename);
-struct simd_skcipher_alg *simd_skcipher_create(const char *algname,
- const char *basename);
void simd_skcipher_free(struct simd_skcipher_alg *alg);
int simd_register_skciphers_compat(struct skcipher_alg *algs, int count,
@@ -32,13 +32,6 @@ void simd_unregister_skciphers(struct skcipher_alg *algs, int count,
struct simd_aead_alg;
struct aead_alg;
-struct simd_aead_alg *simd_aead_create_compat(const char *algname,
- const char *drvname,
- const char *basename);
-struct simd_aead_alg *simd_aead_create(const char *algname,
- const char *basename);
-void simd_aead_free(struct simd_aead_alg *alg);
-
int simd_register_aeads_compat(struct aead_alg *algs, int count,
struct simd_aead_alg **simd_algs);
@@ -52,13 +45,10 @@ void simd_unregister_aeads(struct aead_alg *algs, int count,
* This delegates to may_use_simd(), except that this also returns false if SIMD
* in crypto code has been temporarily disabled on this CPU by the crypto
* self-tests, in order to test the no-SIMD fallback code. This override is
- * currently limited to configurations where the extra self-tests are enabled,
- * because it might be a bit too invasive to be part of the regular self-tests.
- *
- * This is a macro so that <asm/simd.h>, which some architectures don't have,
- * doesn't have to be included directly here.
+ * currently limited to configurations where the "full" self-tests are enabled,
+ * because it might be a bit too invasive to be part of the "fast" self-tests.
*/
-#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+#ifdef CONFIG_CRYPTO_SELFTESTS_FULL
DECLARE_PER_CPU(bool, crypto_simd_disabled_for_test);
#define crypto_simd_usable() \
(may_use_simd() && !this_cpu_read(crypto_simd_disabled_for_test))
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index a2339f80a615..0cad8e7364c8 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -11,9 +11,16 @@
#include <crypto/algapi.h>
#include <crypto/internal/cipher.h>
#include <crypto/skcipher.h>
-#include <linux/list.h>
#include <linux/types.h>
+/*
+ * Set this if your algorithm is sync but needs a reqsize larger
+ * than MAX_SYNC_SKCIPHER_REQSIZE.
+ *
+ * Reuse bit that is specific to hash algorithms.
+ */
+#define CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE CRYPTO_ALG_OPTIONAL_KEY
+
struct aead_request;
struct rtattr;
@@ -28,31 +35,53 @@ struct skcipher_instance {
};
};
+struct lskcipher_instance {
+ void (*free)(struct lskcipher_instance *inst);
+ union {
+ struct {
+ char head[offsetof(struct lskcipher_alg, co.base)];
+ struct crypto_instance base;
+ } s;
+ struct lskcipher_alg alg;
+ };
+};
+
struct crypto_skcipher_spawn {
struct crypto_spawn base;
};
+struct crypto_lskcipher_spawn {
+ struct crypto_spawn base;
+};
+
struct skcipher_walk {
union {
+ /* Virtual address of the source. */
struct {
- struct page *page;
- unsigned long offset;
- } phys;
+ struct {
+ const void *const addr;
+ } virt;
+ } src;
+ /* Private field for the API, do not use. */
+ struct scatter_walk in;
+ };
+
+ union {
+ /* Virtual address of the destination. */
struct {
- u8 *page;
- void *addr;
- } virt;
- } src, dst;
+ struct {
+ void *const addr;
+ } virt;
+ } dst;
- struct scatter_walk in;
- unsigned int nbytes;
+ /* Private field for the API, do not use. */
+ struct scatter_walk out;
+ };
- struct scatter_walk out;
+ unsigned int nbytes;
unsigned int total;
- struct list_head buffers;
-
u8 *page;
u8 *buffer;
u8 *oiv;
@@ -72,6 +101,12 @@ static inline struct crypto_instance *skcipher_crypto_instance(
return &inst->s.base;
}
+static inline struct crypto_instance *lskcipher_crypto_instance(
+ struct lskcipher_instance *inst)
+{
+ return &inst->s.base;
+}
+
static inline struct skcipher_instance *skcipher_alg_instance(
struct crypto_skcipher *skcipher)
{
@@ -79,35 +114,62 @@ static inline struct skcipher_instance *skcipher_alg_instance(
struct skcipher_instance, alg);
}
+static inline struct lskcipher_instance *lskcipher_alg_instance(
+ struct crypto_lskcipher *lskcipher)
+{
+ return container_of(crypto_lskcipher_alg(lskcipher),
+ struct lskcipher_instance, alg);
+}
+
static inline void *skcipher_instance_ctx(struct skcipher_instance *inst)
{
return crypto_instance_ctx(skcipher_crypto_instance(inst));
}
+static inline void *lskcipher_instance_ctx(struct lskcipher_instance *inst)
+{
+ return crypto_instance_ctx(lskcipher_crypto_instance(inst));
+}
+
static inline void skcipher_request_complete(struct skcipher_request *req, int err)
{
- req->base.complete(&req->base, err);
+ crypto_request_complete(&req->base, err);
}
int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
struct crypto_instance *inst,
const char *name, u32 type, u32 mask);
+int crypto_grab_lskcipher(struct crypto_lskcipher_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
+
static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn)
{
crypto_drop_spawn(&spawn->base);
}
-static inline struct skcipher_alg *crypto_skcipher_spawn_alg(
- struct crypto_skcipher_spawn *spawn)
+static inline void crypto_drop_lskcipher(struct crypto_lskcipher_spawn *spawn)
{
- return container_of(spawn->base.alg, struct skcipher_alg, base);
+ crypto_drop_spawn(&spawn->base);
}
-static inline struct skcipher_alg *crypto_spawn_skcipher_alg(
+static inline struct lskcipher_alg *crypto_lskcipher_spawn_alg(
+ struct crypto_lskcipher_spawn *spawn)
+{
+ return container_of(spawn->base.alg, struct lskcipher_alg, co.base);
+}
+
+static inline struct skcipher_alg_common *crypto_spawn_skcipher_alg_common(
struct crypto_skcipher_spawn *spawn)
{
- return crypto_skcipher_spawn_alg(spawn);
+ return container_of(spawn->base.alg, struct skcipher_alg_common, base);
+}
+
+static inline struct lskcipher_alg *crypto_spawn_lskcipher_alg(
+ struct crypto_lskcipher_spawn *spawn)
+{
+ return crypto_lskcipher_spawn_alg(spawn);
}
static inline struct crypto_skcipher *crypto_spawn_skcipher(
@@ -116,12 +178,25 @@ static inline struct crypto_skcipher *crypto_spawn_skcipher(
return crypto_spawn_tfm2(&spawn->base);
}
+static inline struct crypto_lskcipher *crypto_spawn_lskcipher(
+ struct crypto_lskcipher_spawn *spawn)
+{
+ return crypto_spawn_tfm2(&spawn->base);
+}
+
static inline void crypto_skcipher_set_reqsize(
struct crypto_skcipher *skcipher, unsigned int reqsize)
{
skcipher->reqsize = reqsize;
}
+static inline void crypto_skcipher_set_reqsize_dma(
+ struct crypto_skcipher *skcipher, unsigned int reqsize)
+{
+ reqsize += crypto_dma_align() & ~(crypto_tfm_ctx_alignment() - 1);
+ skcipher->reqsize = reqsize;
+}
+
int crypto_register_skcipher(struct skcipher_alg *alg);
void crypto_unregister_skcipher(struct skcipher_alg *alg);
int crypto_register_skciphers(struct skcipher_alg *algs, int count);
@@ -129,17 +204,23 @@ void crypto_unregister_skciphers(struct skcipher_alg *algs, int count);
int skcipher_register_instance(struct crypto_template *tmpl,
struct skcipher_instance *inst);
-int skcipher_walk_done(struct skcipher_walk *walk, int err);
-int skcipher_walk_virt(struct skcipher_walk *walk,
- struct skcipher_request *req,
+int crypto_register_lskcipher(struct lskcipher_alg *alg);
+void crypto_unregister_lskcipher(struct lskcipher_alg *alg);
+int crypto_register_lskciphers(struct lskcipher_alg *algs, int count);
+void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count);
+int lskcipher_register_instance(struct crypto_template *tmpl,
+ struct lskcipher_instance *inst);
+
+int skcipher_walk_done(struct skcipher_walk *walk, int res);
+int skcipher_walk_virt(struct skcipher_walk *__restrict walk,
+ struct skcipher_request *__restrict req,
bool atomic);
-int skcipher_walk_async(struct skcipher_walk *walk,
- struct skcipher_request *req);
-int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
- struct aead_request *req, bool atomic);
-int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
- struct aead_request *req, bool atomic);
-void skcipher_walk_complete(struct skcipher_walk *walk, int err);
+int skcipher_walk_aead_encrypt(struct skcipher_walk *__restrict walk,
+ struct aead_request *__restrict req,
+ bool atomic);
+int skcipher_walk_aead_decrypt(struct skcipher_walk *__restrict walk,
+ struct aead_request *__restrict req,
+ bool atomic);
static inline void skcipher_walk_abort(struct skcipher_walk *walk)
{
@@ -151,49 +232,34 @@ static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm)
return crypto_tfm_ctx(&tfm->base);
}
-static inline void *skcipher_request_ctx(struct skcipher_request *req)
+static inline void *crypto_lskcipher_ctx(struct crypto_lskcipher *tfm)
{
- return req->__ctx;
+ return crypto_tfm_ctx(&tfm->base);
}
-static inline u32 skcipher_request_flags(struct skcipher_request *req)
+static inline void *crypto_skcipher_ctx_dma(struct crypto_skcipher *tfm)
{
- return req->base.flags;
+ return crypto_tfm_ctx_dma(&tfm->base);
}
-static inline unsigned int crypto_skcipher_alg_min_keysize(
- struct skcipher_alg *alg)
+static inline void *skcipher_request_ctx(struct skcipher_request *req)
{
- return alg->min_keysize;
+ return req->__ctx;
}
-static inline unsigned int crypto_skcipher_alg_max_keysize(
- struct skcipher_alg *alg)
+static inline void *skcipher_request_ctx_dma(struct skcipher_request *req)
{
- return alg->max_keysize;
-}
+ unsigned int align = crypto_dma_align();
-static inline unsigned int crypto_skcipher_alg_walksize(
- struct skcipher_alg *alg)
-{
- return alg->walksize;
+ if (align <= crypto_tfm_ctx_alignment())
+ align = 1;
+
+ return PTR_ALIGN(skcipher_request_ctx(req), align);
}
-/**
- * crypto_skcipher_walksize() - obtain walk size
- * @tfm: cipher handle
- *
- * In some cases, algorithms can only perform optimally when operating on
- * multiple blocks in parallel. This is reflected by the walksize, which
- * must be a multiple of the chunksize (or equal if the concern does not
- * apply)
- *
- * Return: walk size in bytes
- */
-static inline unsigned int crypto_skcipher_walksize(
- struct crypto_skcipher *tfm)
+static inline u32 skcipher_request_flags(struct skcipher_request *req)
{
- return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm));
+ return req->base.flags;
}
/* Helpers for simple block cipher modes of operation */
@@ -219,5 +285,24 @@ static inline struct crypto_alg *skcipher_ialg_simple(
return crypto_spawn_cipher_alg(spawn);
}
+static inline struct crypto_lskcipher *lskcipher_cipher_simple(
+ struct crypto_lskcipher *tfm)
+{
+ struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm);
+
+ return *ctx;
+}
+
+struct lskcipher_instance *lskcipher_alloc_instance_simple(
+ struct crypto_template *tmpl, struct rtattr **tb);
+
+static inline struct lskcipher_alg *lskcipher_ialg_simple(
+ struct lskcipher_instance *inst)
+{
+ struct crypto_lskcipher_spawn *spawn = lskcipher_instance_ctx(inst);
+
+ return crypto_lskcipher_spawn_alg(spawn);
+}
+
#endif /* _CRYPTO_INTERNAL_SKCIPHER_H */
diff --git a/include/crypto/kdf_sp800108.h b/include/crypto/kdf_sp800108.h
new file mode 100644
index 000000000000..b7b20a778fb7
--- /dev/null
+++ b/include/crypto/kdf_sp800108.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (C) 2021, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _CRYPTO_KDF108_H
+#define _CRYPTO_KDF108_H
+
+#include <crypto/hash.h>
+#include <linux/uio.h>
+
+/**
+ * Counter KDF generate operation according to SP800-108 section 5.1
+ * as well as SP800-56A section 5.8.1 (Single-step KDF).
+ *
+ * @kmd Keyed message digest whose key was set with crypto_kdf108_setkey or
+ * unkeyed message digest
+ * @info optional context and application specific information - this may be
+ * NULL
+ * @info_vec number of optional context/application specific information entries
+ * @dst destination buffer that the caller already allocated
+ * @dlen length of the destination buffer - the KDF derives that amount of
+ * bytes.
+ *
+ * To comply with SP800-108, the caller must provide Label || 0x00 || Context
+ * in the info parameter.
+ *
+ * @return 0 on success, < 0 on error
+ */
+int crypto_kdf108_ctr_generate(struct crypto_shash *kmd,
+ const struct kvec *info, unsigned int info_nvec,
+ u8 *dst, unsigned int dlen);
+
+/**
+ * Counter KDF setkey operation
+ *
+ * @kmd Keyed message digest allocated by the caller. The key should not have
+ * been set.
+ * @key Seed key to be used to initialize the keyed message digest context.
+ * @keylen This length of the key buffer.
+ * @ikm The SP800-108 KDF does not support IKM - this parameter must be NULL
+ * @ikmlen This parameter must be 0.
+ *
+ * According to SP800-108 section 7.2, the seed key must be at least as large as
+ * the message digest size of the used keyed message digest. This limitation
+ * is enforced by the implementation.
+ *
+ * SP800-108 allows the use of either a HMAC or a hash primitive. When
+ * the caller intends to use a hash primitive, the call to
+ * crypto_kdf108_setkey is not required and the key derivation operation can
+ * immediately performed using crypto_kdf108_ctr_generate after allocating
+ * a handle.
+ *
+ * @return 0 on success, < 0 on error
+ */
+int crypto_kdf108_setkey(struct crypto_shash *kmd,
+ const u8 *key, size_t keylen,
+ const u8 *ikm, size_t ikmlen);
+
+#endif /* _CRYPTO_KDF108_H */
diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h
index cccceadc164b..2d9c4de57b69 100644
--- a/include/crypto/kpp.h
+++ b/include/crypto/kpp.h
@@ -8,7 +8,11 @@
#ifndef _CRYPTO_KPP_
#define _CRYPTO_KPP_
+
+#include <linux/atomic.h>
+#include <linux/container_of.h>
#include <linux/crypto.h>
+#include <linux/slab.h>
/**
* struct kpp_request
@@ -37,9 +41,13 @@ struct kpp_request {
* struct crypto_kpp - user-instantiated object which encapsulate
* algorithms and core processing logic
*
+ * @reqsize: Request context size required by algorithm
+ * implementation
* @base: Common crypto API algorithm data structure
*/
struct crypto_kpp {
+ unsigned int reqsize;
+
struct crypto_tfm base;
};
@@ -64,8 +72,6 @@ struct crypto_kpp {
* put in place here.
* @exit: Undo everything @init did.
*
- * @reqsize: Request context size required by algorithm
- * implementation
* @base: Common crypto API algorithm data structure
*/
struct kpp_alg {
@@ -79,7 +85,6 @@ struct kpp_alg {
int (*init)(struct crypto_kpp *tfm);
void (*exit)(struct crypto_kpp *tfm);
- unsigned int reqsize;
struct crypto_alg base;
};
@@ -104,6 +109,8 @@ struct kpp_alg {
*/
struct crypto_kpp *crypto_alloc_kpp(const char *alg_name, u32 type, u32 mask);
+int crypto_has_kpp(const char *alg_name, u32 type, u32 mask);
+
static inline struct crypto_tfm *crypto_kpp_tfm(struct crypto_kpp *tfm)
{
return &tfm->base;
@@ -126,7 +133,7 @@ static inline struct kpp_alg *crypto_kpp_alg(struct crypto_kpp *tfm)
static inline unsigned int crypto_kpp_reqsize(struct crypto_kpp *tfm)
{
- return crypto_kpp_alg(tfm)->reqsize;
+ return tfm->reqsize;
}
static inline void kpp_request_set_tfm(struct kpp_request *req,
@@ -283,14 +290,7 @@ struct kpp_secret {
static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm,
const void *buffer, unsigned int len)
{
- struct kpp_alg *alg = crypto_kpp_alg(tfm);
- struct crypto_alg *calg = tfm->base.__crt_alg;
- int ret;
-
- crypto_stats_get(calg);
- ret = alg->set_secret(tfm, buffer, len);
- crypto_stats_kpp_set_secret(calg, ret);
- return ret;
+ return crypto_kpp_alg(tfm)->set_secret(tfm, buffer, len);
}
/**
@@ -309,14 +309,8 @@ static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm,
static inline int crypto_kpp_generate_public_key(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
- struct kpp_alg *alg = crypto_kpp_alg(tfm);
- struct crypto_alg *calg = tfm->base.__crt_alg;
- int ret;
- crypto_stats_get(calg);
- ret = alg->generate_public_key(req);
- crypto_stats_kpp_generate_public_key(calg, ret);
- return ret;
+ return crypto_kpp_alg(tfm)->generate_public_key(req);
}
/**
@@ -332,14 +326,8 @@ static inline int crypto_kpp_generate_public_key(struct kpp_request *req)
static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
- struct kpp_alg *alg = crypto_kpp_alg(tfm);
- struct crypto_alg *calg = tfm->base.__crt_alg;
- int ret;
- crypto_stats_get(calg);
- ret = alg->compute_shared_secret(req);
- crypto_stats_kpp_compute_shared_secret(calg, ret);
- return ret;
+ return crypto_kpp_alg(tfm)->compute_shared_secret(req);
}
/**
diff --git a/include/crypto/krb5.h b/include/crypto/krb5.h
new file mode 100644
index 000000000000..71dd38f59be1
--- /dev/null
+++ b/include/crypto/krb5.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Kerberos 5 crypto
+ *
+ * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#ifndef _CRYPTO_KRB5_H
+#define _CRYPTO_KRB5_H
+
+#include <linux/crypto.h>
+#include <crypto/aead.h>
+#include <crypto/hash.h>
+
+struct crypto_shash;
+struct scatterlist;
+
+/*
+ * Per Kerberos v5 protocol spec crypto types from the wire. These get mapped
+ * to linux kernel crypto routines.
+ */
+#define KRB5_ENCTYPE_NULL 0x0000
+#define KRB5_ENCTYPE_DES_CBC_CRC 0x0001 /* DES cbc mode with CRC-32 */
+#define KRB5_ENCTYPE_DES_CBC_MD4 0x0002 /* DES cbc mode with RSA-MD4 */
+#define KRB5_ENCTYPE_DES_CBC_MD5 0x0003 /* DES cbc mode with RSA-MD5 */
+#define KRB5_ENCTYPE_DES_CBC_RAW 0x0004 /* DES cbc mode raw */
+/* XXX deprecated? */
+#define KRB5_ENCTYPE_DES3_CBC_SHA 0x0005 /* DES-3 cbc mode with NIST-SHA */
+#define KRB5_ENCTYPE_DES3_CBC_RAW 0x0006 /* DES-3 cbc mode raw */
+#define KRB5_ENCTYPE_DES_HMAC_SHA1 0x0008
+#define KRB5_ENCTYPE_DES3_CBC_SHA1 0x0010
+#define KRB5_ENCTYPE_AES128_CTS_HMAC_SHA1_96 0x0011
+#define KRB5_ENCTYPE_AES256_CTS_HMAC_SHA1_96 0x0012
+#define KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128 0x0013
+#define KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192 0x0014
+#define KRB5_ENCTYPE_ARCFOUR_HMAC 0x0017
+#define KRB5_ENCTYPE_ARCFOUR_HMAC_EXP 0x0018
+#define KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC 0x0019
+#define KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC 0x001a
+#define KRB5_ENCTYPE_UNKNOWN 0x01ff
+
+#define KRB5_CKSUMTYPE_CRC32 0x0001
+#define KRB5_CKSUMTYPE_RSA_MD4 0x0002
+#define KRB5_CKSUMTYPE_RSA_MD4_DES 0x0003
+#define KRB5_CKSUMTYPE_DESCBC 0x0004
+#define KRB5_CKSUMTYPE_RSA_MD5 0x0007
+#define KRB5_CKSUMTYPE_RSA_MD5_DES 0x0008
+#define KRB5_CKSUMTYPE_NIST_SHA 0x0009
+#define KRB5_CKSUMTYPE_HMAC_SHA1_DES3 0x000c
+#define KRB5_CKSUMTYPE_HMAC_SHA1_96_AES128 0x000f
+#define KRB5_CKSUMTYPE_HMAC_SHA1_96_AES256 0x0010
+#define KRB5_CKSUMTYPE_CMAC_CAMELLIA128 0x0011
+#define KRB5_CKSUMTYPE_CMAC_CAMELLIA256 0x0012
+#define KRB5_CKSUMTYPE_HMAC_SHA256_128_AES128 0x0013
+#define KRB5_CKSUMTYPE_HMAC_SHA384_192_AES256 0x0014
+#define KRB5_CKSUMTYPE_HMAC_MD5_ARCFOUR -138 /* Microsoft md5 hmac cksumtype */
+
+/*
+ * Constants used for key derivation
+ */
+/* from rfc3961 */
+#define KEY_USAGE_SEED_CHECKSUM (0x99)
+#define KEY_USAGE_SEED_ENCRYPTION (0xAA)
+#define KEY_USAGE_SEED_INTEGRITY (0x55)
+
+/*
+ * Standard Kerberos error codes.
+ */
+#define KRB5_PROG_KEYTYPE_NOSUPP -1765328233
+
+/*
+ * Mode of operation.
+ */
+enum krb5_crypto_mode {
+ KRB5_CHECKSUM_MODE, /* Checksum only */
+ KRB5_ENCRYPT_MODE, /* Fully encrypted, possibly with integrity checksum */
+};
+
+struct krb5_buffer {
+ unsigned int len;
+ void *data;
+};
+
+/*
+ * Kerberos encoding type definition.
+ */
+struct krb5_enctype {
+ int etype; /* Encryption (key) type */
+ int ctype; /* Checksum type */
+ const char *name; /* "Friendly" name */
+ const char *encrypt_name; /* Crypto encrypt+checksum name */
+ const char *cksum_name; /* Crypto checksum name */
+ const char *hash_name; /* Crypto hash name */
+ const char *derivation_enc; /* Cipher used in key derivation */
+ u16 block_len; /* Length of encryption block */
+ u16 conf_len; /* Length of confounder (normally == block_len) */
+ u16 cksum_len; /* Length of checksum */
+ u16 key_bytes; /* Length of raw key, in bytes */
+ u16 key_len; /* Length of final key, in bytes */
+ u16 hash_len; /* Length of hash in bytes */
+ u16 prf_len; /* Length of PRF() result in bytes */
+ u16 Kc_len; /* Length of Kc in bytes */
+ u16 Ke_len; /* Length of Ke in bytes */
+ u16 Ki_len; /* Length of Ki in bytes */
+ bool keyed_cksum; /* T if a keyed cksum */
+
+ const struct krb5_crypto_profile *profile;
+
+ int (*random_to_key)(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *in,
+ struct krb5_buffer *out); /* complete key generation */
+};
+
+/*
+ * krb5_api.c
+ */
+const struct krb5_enctype *crypto_krb5_find_enctype(u32 enctype);
+size_t crypto_krb5_how_much_buffer(const struct krb5_enctype *krb5,
+ enum krb5_crypto_mode mode,
+ size_t data_size, size_t *_offset);
+size_t crypto_krb5_how_much_data(const struct krb5_enctype *krb5,
+ enum krb5_crypto_mode mode,
+ size_t *_buffer_size, size_t *_offset);
+void crypto_krb5_where_is_the_data(const struct krb5_enctype *krb5,
+ enum krb5_crypto_mode mode,
+ size_t *_offset, size_t *_len);
+struct crypto_aead *crypto_krb5_prepare_encryption(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ u32 usage, gfp_t gfp);
+struct crypto_shash *crypto_krb5_prepare_checksum(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *TK,
+ u32 usage, gfp_t gfp);
+ssize_t crypto_krb5_encrypt(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t sg_len,
+ size_t data_offset, size_t data_len,
+ bool preconfounded);
+int crypto_krb5_decrypt(const struct krb5_enctype *krb5,
+ struct crypto_aead *aead,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t *_offset, size_t *_len);
+ssize_t crypto_krb5_get_mic(const struct krb5_enctype *krb5,
+ struct crypto_shash *shash,
+ const struct krb5_buffer *metadata,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t sg_len,
+ size_t data_offset, size_t data_len);
+int crypto_krb5_verify_mic(const struct krb5_enctype *krb5,
+ struct crypto_shash *shash,
+ const struct krb5_buffer *metadata,
+ struct scatterlist *sg, unsigned int nr_sg,
+ size_t *_offset, size_t *_len);
+
+/*
+ * krb5_kdf.c
+ */
+int crypto_krb5_calc_PRFplus(const struct krb5_enctype *krb5,
+ const struct krb5_buffer *K,
+ unsigned int L,
+ const struct krb5_buffer *S,
+ struct krb5_buffer *result,
+ gfp_t gfp);
+
+#endif /* _CRYPTO_KRB5_H */
diff --git a/include/crypto/md5.h b/include/crypto/md5.h
index cf9e9dec3d21..c47aedfe67ec 100644
--- a/include/crypto/md5.h
+++ b/include/crypto/md5.h
@@ -2,24 +2,209 @@
#ifndef _CRYPTO_MD5_H
#define _CRYPTO_MD5_H
+#include <crypto/hash.h>
#include <linux/types.h>
#define MD5_DIGEST_SIZE 16
#define MD5_HMAC_BLOCK_SIZE 64
+#define MD5_BLOCK_SIZE 64
#define MD5_BLOCK_WORDS 16
#define MD5_HASH_WORDS 4
+#define MD5_STATE_SIZE 24
#define MD5_H0 0x67452301UL
#define MD5_H1 0xefcdab89UL
#define MD5_H2 0x98badcfeUL
#define MD5_H3 0x10325476UL
+#define CRYPTO_MD5_STATESIZE \
+ CRYPTO_HASH_STATESIZE(MD5_STATE_SIZE, MD5_HMAC_BLOCK_SIZE)
+
extern const u8 md5_zero_message_hash[MD5_DIGEST_SIZE];
struct md5_state {
u32 hash[MD5_HASH_WORDS];
- u32 block[MD5_BLOCK_WORDS];
u64 byte_count;
+ u32 block[MD5_BLOCK_WORDS];
+};
+
+/* State for the MD5 compression function */
+struct md5_block_state {
+ u32 h[MD5_HASH_WORDS];
+};
+
+/**
+ * struct md5_ctx - Context for hashing a message with MD5
+ * @state: the compression function state
+ * @bytecount: number of bytes processed so far
+ * @buf: partial block buffer; bytecount % MD5_BLOCK_SIZE bytes are valid
+ */
+struct md5_ctx {
+ struct md5_block_state state;
+ u64 bytecount;
+ u8 buf[MD5_BLOCK_SIZE] __aligned(__alignof__(__le64));
+};
+
+/**
+ * md5_init() - Initialize an MD5 context for a new message
+ * @ctx: the context to initialize
+ *
+ * If you don't need incremental computation, consider md5() instead.
+ *
+ * Context: Any context.
+ */
+void md5_init(struct md5_ctx *ctx);
+
+/**
+ * md5_update() - Update an MD5 context with message data
+ * @ctx: the context to update; must have been initialized
+ * @data: the message data
+ * @len: the data length in bytes
+ *
+ * This can be called any number of times.
+ *
+ * Context: Any context.
+ */
+void md5_update(struct md5_ctx *ctx, const u8 *data, size_t len);
+
+/**
+ * md5_final() - Finish computing an MD5 message digest
+ * @ctx: the context to finalize; must have been initialized
+ * @out: (output) the resulting MD5 message digest
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void md5_final(struct md5_ctx *ctx, u8 out[at_least MD5_DIGEST_SIZE]);
+
+/**
+ * md5() - Compute MD5 message digest in one shot
+ * @data: the message data
+ * @len: the data length in bytes
+ * @out: (output) the resulting MD5 message digest
+ *
+ * Context: Any context.
+ */
+void md5(const u8 *data, size_t len, u8 out[at_least MD5_DIGEST_SIZE]);
+
+/**
+ * struct hmac_md5_key - Prepared key for HMAC-MD5
+ * @istate: private
+ * @ostate: private
+ */
+struct hmac_md5_key {
+ struct md5_block_state istate;
+ struct md5_block_state ostate;
+};
+
+/**
+ * struct hmac_md5_ctx - Context for computing HMAC-MD5 of a message
+ * @hash_ctx: private
+ * @ostate: private
+ */
+struct hmac_md5_ctx {
+ struct md5_ctx hash_ctx;
+ struct md5_block_state ostate;
};
-#endif
+/**
+ * hmac_md5_preparekey() - Prepare a key for HMAC-MD5
+ * @key: (output) the key structure to initialize
+ * @raw_key: the raw HMAC-MD5 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ *
+ * Note: the caller is responsible for zeroizing both the struct hmac_md5_key
+ * and the raw key once they are no longer needed.
+ *
+ * Context: Any context.
+ */
+void hmac_md5_preparekey(struct hmac_md5_key *key,
+ const u8 *raw_key, size_t raw_key_len);
+
+/**
+ * hmac_md5_init() - Initialize an HMAC-MD5 context for a new message
+ * @ctx: (output) the HMAC context to initialize
+ * @key: the prepared HMAC key
+ *
+ * If you don't need incremental computation, consider hmac_md5() instead.
+ *
+ * Context: Any context.
+ */
+void hmac_md5_init(struct hmac_md5_ctx *ctx, const struct hmac_md5_key *key);
+
+/**
+ * hmac_md5_init_usingrawkey() - Initialize an HMAC-MD5 context for a new
+ * message, using a raw key
+ * @ctx: (output) the HMAC context to initialize
+ * @raw_key: the raw HMAC-MD5 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ *
+ * If you don't need incremental computation, consider hmac_md5_usingrawkey()
+ * instead.
+ *
+ * Context: Any context.
+ */
+void hmac_md5_init_usingrawkey(struct hmac_md5_ctx *ctx,
+ const u8 *raw_key, size_t raw_key_len);
+
+/**
+ * hmac_md5_update() - Update an HMAC-MD5 context with message data
+ * @ctx: the HMAC context to update; must have been initialized
+ * @data: the message data
+ * @data_len: the data length in bytes
+ *
+ * This can be called any number of times.
+ *
+ * Context: Any context.
+ */
+static inline void hmac_md5_update(struct hmac_md5_ctx *ctx,
+ const u8 *data, size_t data_len)
+{
+ md5_update(&ctx->hash_ctx, data, data_len);
+}
+
+/**
+ * hmac_md5_final() - Finish computing an HMAC-MD5 value
+ * @ctx: the HMAC context to finalize; must have been initialized
+ * @out: (output) the resulting HMAC-MD5 value
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void hmac_md5_final(struct hmac_md5_ctx *ctx, u8 out[at_least MD5_DIGEST_SIZE]);
+
+/**
+ * hmac_md5() - Compute HMAC-MD5 in one shot, using a prepared key
+ * @key: the prepared HMAC key
+ * @data: the message data
+ * @data_len: the data length in bytes
+ * @out: (output) the resulting HMAC-MD5 value
+ *
+ * If you're using the key only once, consider using hmac_md5_usingrawkey().
+ *
+ * Context: Any context.
+ */
+void hmac_md5(const struct hmac_md5_key *key,
+ const u8 *data, size_t data_len,
+ u8 out[at_least MD5_DIGEST_SIZE]);
+
+/**
+ * hmac_md5_usingrawkey() - Compute HMAC-MD5 in one shot, using a raw key
+ * @raw_key: the raw HMAC-MD5 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ * @data: the message data
+ * @data_len: the data length in bytes
+ * @out: (output) the resulting HMAC-MD5 value
+ *
+ * If you're using the key multiple times, prefer to use hmac_md5_preparekey()
+ * followed by multiple calls to hmac_md5() instead.
+ *
+ * Context: Any context.
+ */
+void hmac_md5_usingrawkey(const u8 *raw_key, size_t raw_key_len,
+ const u8 *data, size_t data_len,
+ u8 out[at_least MD5_DIGEST_SIZE]);
+
+#endif /* _CRYPTO_MD5_H */
diff --git a/include/crypto/null.h b/include/crypto/null.h
index 0ef577cc00e3..1c66abf9de3b 100644
--- a/include/crypto/null.h
+++ b/include/crypto/null.h
@@ -9,7 +9,4 @@
#define NULL_DIGEST_SIZE 0
#define NULL_IV_SIZE 0
-struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void);
-void crypto_put_default_null_skcipher(void);
-
#endif
diff --git a/include/crypto/pcrypt.h b/include/crypto/pcrypt.h
index b9bc3436196a..234d7cf3cf5e 100644
--- a/include/crypto/pcrypt.h
+++ b/include/crypto/pcrypt.h
@@ -9,8 +9,8 @@
#ifndef _CRYPTO_PCRYPT_H
#define _CRYPTO_PCRYPT_H
+#include <linux/container_of.h>
#include <linux/crypto.h>
-#include <linux/kernel.h>
#include <linux/padata.h>
struct pcrypt_request {
diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
index 090692ec3bc7..190beb427c6d 100644
--- a/include/crypto/poly1305.h
+++ b/include/crypto/poly1305.h
@@ -7,7 +7,6 @@
#define _CRYPTO_POLY1305_H
#include <linux/types.h>
-#include <linux/crypto.h>
#define POLY1305_BLOCK_SIZE 16
#define POLY1305_KEY_SIZE 32
@@ -38,17 +37,8 @@ struct poly1305_state {
};
};
-struct poly1305_desc_ctx {
- /* partial buffer */
- u8 buf[POLY1305_BLOCK_SIZE];
- /* bytes used in partial buffer */
- unsigned int buflen;
- /* how many keys have been set in r[] */
- unsigned short rset;
- /* whether s[] has been set */
- bool sset;
- /* finalize key */
- u32 s[4];
+/* Combined state for block function. */
+struct poly1305_block_state {
/* accumulator */
struct poly1305_state h;
/* key */
@@ -58,42 +48,20 @@ struct poly1305_desc_ctx {
};
};
-void poly1305_init_arch(struct poly1305_desc_ctx *desc,
- const u8 key[POLY1305_KEY_SIZE]);
-void poly1305_init_generic(struct poly1305_desc_ctx *desc,
- const u8 key[POLY1305_KEY_SIZE]);
-
-static inline void poly1305_init(struct poly1305_desc_ctx *desc, const u8 *key)
-{
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305))
- poly1305_init_arch(desc, key);
- else
- poly1305_init_generic(desc, key);
-}
-
-void poly1305_update_arch(struct poly1305_desc_ctx *desc, const u8 *src,
- unsigned int nbytes);
-void poly1305_update_generic(struct poly1305_desc_ctx *desc, const u8 *src,
- unsigned int nbytes);
-
-static inline void poly1305_update(struct poly1305_desc_ctx *desc,
- const u8 *src, unsigned int nbytes)
-{
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305))
- poly1305_update_arch(desc, src, nbytes);
- else
- poly1305_update_generic(desc, src, nbytes);
-}
-
-void poly1305_final_arch(struct poly1305_desc_ctx *desc, u8 *digest);
-void poly1305_final_generic(struct poly1305_desc_ctx *desc, u8 *digest);
+struct poly1305_desc_ctx {
+ /* partial buffer */
+ u8 buf[POLY1305_BLOCK_SIZE];
+ /* bytes used in partial buffer */
+ unsigned int buflen;
+ /* finalize key */
+ u32 s[4];
+ struct poly1305_block_state state;
+};
-static inline void poly1305_final(struct poly1305_desc_ctx *desc, u8 *digest)
-{
- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305))
- poly1305_final_arch(desc, digest);
- else
- poly1305_final_generic(desc, digest);
-}
+void poly1305_init(struct poly1305_desc_ctx *desc,
+ const u8 key[at_least POLY1305_KEY_SIZE]);
+void poly1305_update(struct poly1305_desc_ctx *desc,
+ const u8 *src, unsigned int nbytes);
+void poly1305_final(struct poly1305_desc_ctx *desc, u8 *digest);
#endif
diff --git a/include/crypto/polyval.h b/include/crypto/polyval.h
new file mode 100644
index 000000000000..b28b8ef11353
--- /dev/null
+++ b/include/crypto/polyval.h
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * POLYVAL library API
+ *
+ * Copyright 2025 Google LLC
+ */
+
+#ifndef _CRYPTO_POLYVAL_H
+#define _CRYPTO_POLYVAL_H
+
+#include <linux/string.h>
+#include <linux/types.h>
+
+#define POLYVAL_BLOCK_SIZE 16
+#define POLYVAL_DIGEST_SIZE 16
+
+/**
+ * struct polyval_elem - An element of the POLYVAL finite field
+ * @bytes: View of the element as a byte array (unioned with @lo and @hi)
+ * @lo: The low 64 terms of the element's polynomial
+ * @hi: The high 64 terms of the element's polynomial
+ *
+ * This represents an element of the finite field GF(2^128), using the POLYVAL
+ * convention: little-endian byte order and natural bit order.
+ */
+struct polyval_elem {
+ union {
+ u8 bytes[POLYVAL_BLOCK_SIZE];
+ struct {
+ __le64 lo;
+ __le64 hi;
+ };
+ };
+};
+
+/**
+ * struct polyval_key - Prepared key for POLYVAL
+ *
+ * This may contain just the raw key H, or it may contain precomputed key
+ * powers, depending on the platform's POLYVAL implementation. Use
+ * polyval_preparekey() to initialize this.
+ *
+ * By H^i we mean H^(i-1) * H * x^-128, with base case H^1 = H. I.e. the
+ * exponentiation repeats the POLYVAL dot operation, with its "extra" x^-128.
+ */
+struct polyval_key {
+#ifdef CONFIG_CRYPTO_LIB_POLYVAL_ARCH
+#ifdef CONFIG_ARM64
+ /** @h_powers: Powers of the hash key H^8 through H^1 */
+ struct polyval_elem h_powers[8];
+#elif defined(CONFIG_X86)
+ /** @h_powers: Powers of the hash key H^8 through H^1 */
+ struct polyval_elem h_powers[8];
+#else
+#error "Unhandled arch"
+#endif
+#else /* CONFIG_CRYPTO_LIB_POLYVAL_ARCH */
+ /** @h: The hash key H */
+ struct polyval_elem h;
+#endif /* !CONFIG_CRYPTO_LIB_POLYVAL_ARCH */
+};
+
+/**
+ * struct polyval_ctx - Context for computing a POLYVAL value
+ * @key: Pointer to the prepared POLYVAL key. The user of the API is
+ * responsible for ensuring that the key lives as long as the context.
+ * @acc: The accumulator
+ * @partial: Number of data bytes processed so far modulo POLYVAL_BLOCK_SIZE
+ */
+struct polyval_ctx {
+ const struct polyval_key *key;
+ struct polyval_elem acc;
+ size_t partial;
+};
+
+/**
+ * polyval_preparekey() - Prepare a POLYVAL key
+ * @key: (output) The key structure to initialize
+ * @raw_key: The raw hash key
+ *
+ * Initialize a POLYVAL key structure from a raw key. This may be a simple
+ * copy, or it may involve precomputing powers of the key, depending on the
+ * platform's POLYVAL implementation.
+ *
+ * Context: Any context.
+ */
+#ifdef CONFIG_CRYPTO_LIB_POLYVAL_ARCH
+void polyval_preparekey(struct polyval_key *key,
+ const u8 raw_key[POLYVAL_BLOCK_SIZE]);
+
+#else
+static inline void polyval_preparekey(struct polyval_key *key,
+ const u8 raw_key[POLYVAL_BLOCK_SIZE])
+{
+ /* Just a simple copy, so inline it. */
+ memcpy(key->h.bytes, raw_key, POLYVAL_BLOCK_SIZE);
+}
+#endif
+
+/**
+ * polyval_init() - Initialize a POLYVAL context for a new message
+ * @ctx: The context to initialize
+ * @key: The key to use. Note that a pointer to the key is saved in the
+ * context, so the key must live at least as long as the context.
+ */
+static inline void polyval_init(struct polyval_ctx *ctx,
+ const struct polyval_key *key)
+{
+ *ctx = (struct polyval_ctx){ .key = key };
+}
+
+/**
+ * polyval_import_blkaligned() - Import a POLYVAL accumulator value
+ * @ctx: The context to initialize
+ * @key: The key to import. Note that a pointer to the key is saved in the
+ * context, so the key must live at least as long as the context.
+ * @acc: The accumulator value to import.
+ *
+ * This imports an accumulator that was saved by polyval_export_blkaligned().
+ * The same key must be used.
+ */
+static inline void
+polyval_import_blkaligned(struct polyval_ctx *ctx,
+ const struct polyval_key *key,
+ const struct polyval_elem *acc)
+{
+ *ctx = (struct polyval_ctx){ .key = key, .acc = *acc };
+}
+
+/**
+ * polyval_export_blkaligned() - Export a POLYVAL accumulator value
+ * @ctx: The context to export the accumulator value from
+ * @acc: (output) The exported accumulator value
+ *
+ * This exports the accumulator from a POLYVAL context. The number of data
+ * bytes processed so far must be a multiple of POLYVAL_BLOCK_SIZE.
+ */
+static inline void polyval_export_blkaligned(const struct polyval_ctx *ctx,
+ struct polyval_elem *acc)
+{
+ *acc = ctx->acc;
+}
+
+/**
+ * polyval_update() - Update a POLYVAL context with message data
+ * @ctx: The context to update; must have been initialized
+ * @data: The message data
+ * @len: The data length in bytes. Doesn't need to be block-aligned.
+ *
+ * This can be called any number of times.
+ *
+ * Context: Any context.
+ */
+void polyval_update(struct polyval_ctx *ctx, const u8 *data, size_t len);
+
+/**
+ * polyval_final() - Finish computing a POLYVAL value
+ * @ctx: The context to finalize
+ * @out: The output value
+ *
+ * If the total data length isn't a multiple of POLYVAL_BLOCK_SIZE, then the
+ * final block is automatically zero-padded.
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void polyval_final(struct polyval_ctx *ctx, u8 out[POLYVAL_BLOCK_SIZE]);
+
+/**
+ * polyval() - Compute a POLYVAL value
+ * @key: The prepared key
+ * @data: The message data
+ * @len: The data length in bytes. Doesn't need to be block-aligned.
+ * @out: The output value
+ *
+ * Context: Any context.
+ */
+static inline void polyval(const struct polyval_key *key,
+ const u8 *data, size_t len,
+ u8 out[POLYVAL_BLOCK_SIZE])
+{
+ struct polyval_ctx ctx;
+
+ polyval_init(&ctx, key);
+ polyval_update(&ctx, data, len);
+ polyval_final(&ctx, out);
+}
+
+#endif /* _CRYPTO_POLYVAL_H */
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
index 47accec68cb0..81098e00c08f 100644
--- a/include/crypto/public_key.h
+++ b/include/crypto/public_key.h
@@ -10,6 +10,7 @@
#ifndef _LINUX_PUBLIC_KEY_H
#define _LINUX_PUBLIC_KEY_H
+#include <linux/errno.h>
#include <linux/keyctl.h>
#include <linux/oid_registry.h>
@@ -28,6 +29,10 @@ struct public_key {
bool key_is_private;
const char *id_type;
const char *pkey_algo;
+ unsigned long key_eflags; /* key extension flags */
+#define KEY_EFLAG_CA 0 /* set if the CA basic constraints is set */
+#define KEY_EFLAG_DIGITALSIG 1 /* set if the digitalSignature usage is set */
+#define KEY_EFLAG_KEYCERTSIGN 2 /* set if the keyCertSign usage is set */
};
extern void public_key_free(struct public_key *key);
@@ -36,16 +41,14 @@ extern void public_key_free(struct public_key *key);
* Public key cryptography signature data
*/
struct public_key_signature {
- struct asymmetric_key_id *auth_ids[2];
+ struct asymmetric_key_id *auth_ids[3];
u8 *s; /* Signature */
- u32 s_size; /* Number of bytes in signature */
u8 *digest;
- u8 digest_size; /* Number of bytes in digest */
+ u32 s_size; /* Number of bytes in signature */
+ u32 digest_size; /* Number of bytes in digest */
const char *pkey_algo;
const char *hash_algo;
const char *encoding;
- const void *data;
- unsigned int data_size;
};
extern void public_key_signature_free(struct public_key_signature *sig);
@@ -71,16 +74,49 @@ extern int restrict_link_by_key_or_keyring_chain(struct key *trust_keyring,
const union key_payload *payload,
struct key *trusted);
+#if IS_REACHABLE(CONFIG_ASYMMETRIC_KEY_TYPE)
+extern int restrict_link_by_ca(struct key *dest_keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *trust_keyring);
+int restrict_link_by_digsig(struct key *dest_keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *trust_keyring);
+#else
+static inline int restrict_link_by_ca(struct key *dest_keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *trust_keyring)
+{
+ return 0;
+}
+
+static inline int restrict_link_by_digsig(struct key *dest_keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *trust_keyring)
+{
+ return 0;
+}
+#endif
+
extern int query_asymmetric_key(const struct kernel_pkey_params *,
struct kernel_pkey_query *);
-extern int encrypt_blob(struct kernel_pkey_params *, const void *, void *);
-extern int decrypt_blob(struct kernel_pkey_params *, const void *, void *);
-extern int create_signature(struct kernel_pkey_params *, const void *, void *);
extern int verify_signature(const struct key *,
const struct public_key_signature *);
+#if IS_REACHABLE(CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE)
int public_key_verify_signature(const struct public_key *pkey,
const struct public_key_signature *sig);
+#else
+static inline
+int public_key_verify_signature(const struct public_key *pkey,
+ const struct public_key_signature *sig)
+{
+ return -EINVAL;
+}
+#endif
#endif /* _LINUX_PUBLIC_KEY_H */
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
index 17bb3673d3c1..d451b54b322a 100644
--- a/include/crypto/rng.h
+++ b/include/crypto/rng.h
@@ -9,6 +9,8 @@
#ifndef _CRYPTO_RNG_H
#define _CRYPTO_RNG_H
+#include <linux/atomic.h>
+#include <linux/container_of.h>
#include <linux/crypto.h>
struct crypto_rng;
@@ -94,18 +96,20 @@ static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm)
return &tfm->base;
}
+static inline struct rng_alg *__crypto_rng_alg(struct crypto_alg *alg)
+{
+ return container_of(alg, struct rng_alg, base);
+}
+
/**
- * crypto_rng_alg - obtain name of RNG
- * @tfm: cipher handle
- *
- * Return the generic name (cra_name) of the initialized random number generator
+ * crypto_rng_alg() - obtain 'struct rng_alg' pointer from RNG handle
+ * @tfm: RNG handle
*
- * Return: generic name string
+ * Return: Pointer to 'struct rng_alg', derived from @tfm RNG handle
*/
static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm)
{
- return container_of(crypto_rng_tfm(tfm)->__crt_alg,
- struct rng_alg, base);
+ return __crypto_rng_alg(crypto_rng_tfm(tfm)->__crt_alg);
}
/**
@@ -137,13 +141,7 @@ static inline int crypto_rng_generate(struct crypto_rng *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int dlen)
{
- struct crypto_alg *alg = tfm->base.__crt_alg;
- int ret;
-
- crypto_stats_get(alg);
- ret = crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen);
- crypto_stats_rng_generate(alg, dlen, ret);
- return ret;
+ return crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen);
}
/**
@@ -171,12 +169,11 @@ static inline int crypto_rng_get_bytes(struct crypto_rng *tfm,
*
* The reset function completely re-initializes the random number generator
* referenced by the cipher handle by clearing the current state. The new state
- * is initialized with the caller provided seed or automatically, depending
- * on the random number generator type (the ANSI X9.31 RNG requires
- * caller-provided seed, the SP800-90A DRBGs perform an automatic seeding).
- * The seed is provided as a parameter to this function call. The provided seed
- * should have the length of the seed size defined for the random number
- * generator as defined by crypto_rng_seedsize.
+ * is initialized with the caller provided seed or automatically, depending on
+ * the random number generator type. (The SP800-90A DRBGs perform an automatic
+ * seeding.) The seed is provided as a parameter to this function call. The
+ * provided seed should have the length of the seed size defined for the random
+ * number generator as defined by crypto_rng_seedsize.
*
* Return: 0 if the setting of the key was successful; < 0 if an error occurred
*/
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index c837d0775474..624fab589c2c 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -12,8 +12,9 @@
#define _CRYPTO_SCATTERWALK_H
#include <crypto/algapi.h>
+
#include <linux/highmem.h>
-#include <linux/kernel.h>
+#include <linux/mm.h>
#include <linux/scatterlist.h>
static inline void scatterwalk_crypto_chain(struct scatterlist *head,
@@ -25,88 +26,228 @@ static inline void scatterwalk_crypto_chain(struct scatterlist *head,
sg_mark_end(head);
}
-static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk)
+static inline void scatterwalk_start(struct scatter_walk *walk,
+ struct scatterlist *sg)
{
- unsigned int len = walk->sg->offset + walk->sg->length - walk->offset;
- unsigned int len_this_page = offset_in_page(~walk->offset) + 1;
- return len_this_page > len ? len : len_this_page;
+ walk->sg = sg;
+ walk->offset = sg->offset;
+}
+
+/*
+ * This is equivalent to scatterwalk_start(walk, sg) followed by
+ * scatterwalk_skip(walk, pos).
+ */
+static inline void scatterwalk_start_at_pos(struct scatter_walk *walk,
+ struct scatterlist *sg,
+ unsigned int pos)
+{
+ while (pos > sg->length) {
+ pos -= sg->length;
+ sg = sg_next(sg);
+ }
+ walk->sg = sg;
+ walk->offset = sg->offset + pos;
}
static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk,
unsigned int nbytes)
{
- unsigned int len_this_page = scatterwalk_pagelen(walk);
- return nbytes > len_this_page ? len_this_page : nbytes;
+ unsigned int len_this_sg;
+ unsigned int limit;
+
+ if (walk->offset >= walk->sg->offset + walk->sg->length)
+ scatterwalk_start(walk, sg_next(walk->sg));
+ len_this_sg = walk->sg->offset + walk->sg->length - walk->offset;
+
+ /*
+ * HIGHMEM case: the page may have to be mapped into memory. To avoid
+ * the complexity of having to map multiple pages at once per sg entry,
+ * clamp the returned length to not cross a page boundary.
+ *
+ * !HIGHMEM case: no mapping is needed; all pages of the sg entry are
+ * already mapped contiguously in the kernel's direct map. For improved
+ * performance, allow the walker to return data segments that cross a
+ * page boundary. Do still cap the length to PAGE_SIZE, since some
+ * users rely on that to avoid disabling preemption for too long when
+ * using SIMD. It's also needed for when skcipher_walk uses a bounce
+ * page due to the data not being aligned to the algorithm's alignmask.
+ */
+ if (IS_ENABLED(CONFIG_HIGHMEM))
+ limit = PAGE_SIZE - offset_in_page(walk->offset);
+ else
+ limit = PAGE_SIZE;
+
+ return min3(nbytes, len_this_sg, limit);
}
-static inline void scatterwalk_advance(struct scatter_walk *walk,
- unsigned int nbytes)
+/*
+ * Create a scatterlist that represents the remaining data in a walk. Uses
+ * chaining to reference the original scatterlist, so this uses at most two
+ * entries in @sg_out regardless of the number of entries in the original list.
+ * Assumes that sg_init_table() was already done.
+ */
+static inline void scatterwalk_get_sglist(struct scatter_walk *walk,
+ struct scatterlist sg_out[2])
{
- walk->offset += nbytes;
+ if (walk->offset >= walk->sg->offset + walk->sg->length)
+ scatterwalk_start(walk, sg_next(walk->sg));
+ sg_set_page(sg_out, sg_page(walk->sg),
+ walk->sg->offset + walk->sg->length - walk->offset,
+ walk->offset);
+ scatterwalk_crypto_chain(sg_out, sg_next(walk->sg), 2);
}
-static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk,
- unsigned int alignmask)
+static inline void scatterwalk_map(struct scatter_walk *walk)
{
- return !(walk->offset & alignmask);
+ struct page *base_page = sg_page(walk->sg);
+ unsigned int offset = walk->offset;
+ void *addr;
+
+ if (IS_ENABLED(CONFIG_HIGHMEM)) {
+ struct page *page;
+
+ page = base_page + (offset >> PAGE_SHIFT);
+ offset = offset_in_page(offset);
+ addr = kmap_local_page(page) + offset;
+ } else {
+ /*
+ * When !HIGHMEM we allow the walker to return segments that
+ * span a page boundary; see scatterwalk_clamp(). To make it
+ * clear that in this case we're working in the linear buffer of
+ * the whole sg entry in the kernel's direct map rather than
+ * within the mapped buffer of a single page, compute the
+ * address as an offset from the page_address() of the first
+ * page of the sg entry. Either way the result is the address
+ * in the direct map, but this makes it clearer what is really
+ * going on.
+ */
+ addr = page_address(base_page) + offset;
+ }
+
+ walk->__addr = addr;
}
-static inline struct page *scatterwalk_page(struct scatter_walk *walk)
+/**
+ * scatterwalk_next() - Get the next data buffer in a scatterlist walk
+ * @walk: the scatter_walk
+ * @total: the total number of bytes remaining, > 0
+ *
+ * A virtual address for the next segment of data from the scatterlist will
+ * be placed into @walk->addr. The caller must call scatterwalk_done_src()
+ * or scatterwalk_done_dst() when it is done using this virtual address.
+ *
+ * Returns: the next number of bytes available, <= @total
+ */
+static inline unsigned int scatterwalk_next(struct scatter_walk *walk,
+ unsigned int total)
{
- return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
+ unsigned int nbytes = scatterwalk_clamp(walk, total);
+
+ scatterwalk_map(walk);
+ return nbytes;
}
-static inline void scatterwalk_unmap(void *vaddr)
+static inline void scatterwalk_unmap(struct scatter_walk *walk)
{
- kunmap_atomic(vaddr);
+ if (IS_ENABLED(CONFIG_HIGHMEM))
+ kunmap_local(walk->__addr);
}
-static inline void scatterwalk_start(struct scatter_walk *walk,
- struct scatterlist *sg)
+static inline void scatterwalk_advance(struct scatter_walk *walk,
+ unsigned int nbytes)
{
- walk->sg = sg;
- walk->offset = sg->offset;
+ walk->offset += nbytes;
}
-static inline void *scatterwalk_map(struct scatter_walk *walk)
+/**
+ * scatterwalk_done_src() - Finish one step of a walk of source scatterlist
+ * @walk: the scatter_walk
+ * @nbytes: the number of bytes processed this step, less than or equal to the
+ * number of bytes that scatterwalk_next() returned.
+ *
+ * Use this if the mapped address was not written to, i.e. it is source data.
+ */
+static inline void scatterwalk_done_src(struct scatter_walk *walk,
+ unsigned int nbytes)
{
- return kmap_atomic(scatterwalk_page(walk)) +
- offset_in_page(walk->offset);
+ scatterwalk_unmap(walk);
+ scatterwalk_advance(walk, nbytes);
}
-static inline void scatterwalk_pagedone(struct scatter_walk *walk, int out,
- unsigned int more)
+/*
+ * Flush the dcache of any pages that overlap the region
+ * [offset, offset + nbytes) relative to base_page.
+ *
+ * This should be called only when ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, to ensure
+ * that all relevant code (including the call to sg_page() in the caller, if
+ * applicable) gets fully optimized out when !ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE.
+ */
+static inline void __scatterwalk_flush_dcache_pages(struct page *base_page,
+ unsigned int offset,
+ unsigned int nbytes)
{
- if (out) {
- struct page *page;
+ unsigned int num_pages;
- page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT);
- /* Test ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE first as
- * PageSlab cannot be optimised away per se due to
- * use of volatile pointer.
- */
- if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE && !PageSlab(page))
- flush_dcache_page(page);
- }
+ base_page += offset / PAGE_SIZE;
+ offset %= PAGE_SIZE;
- if (more && walk->offset >= walk->sg->offset + walk->sg->length)
- scatterwalk_start(walk, sg_next(walk->sg));
+ /*
+ * This is an overflow-safe version of
+ * num_pages = DIV_ROUND_UP(offset + nbytes, PAGE_SIZE).
+ */
+ num_pages = nbytes / PAGE_SIZE;
+ num_pages += DIV_ROUND_UP(offset + (nbytes % PAGE_SIZE), PAGE_SIZE);
+
+ for (unsigned int i = 0; i < num_pages; i++)
+ flush_dcache_page(base_page + i);
}
-static inline void scatterwalk_done(struct scatter_walk *walk, int out,
- int more)
+/**
+ * scatterwalk_done_dst() - Finish one step of a walk of destination scatterlist
+ * @walk: the scatter_walk
+ * @nbytes: the number of bytes processed this step, less than or equal to the
+ * number of bytes that scatterwalk_next() returned.
+ *
+ * Use this if the mapped address may have been written to, i.e. it is
+ * destination data.
+ */
+static inline void scatterwalk_done_dst(struct scatter_walk *walk,
+ unsigned int nbytes)
{
- if (!more || walk->offset >= walk->sg->offset + walk->sg->length ||
- !(walk->offset & (PAGE_SIZE - 1)))
- scatterwalk_pagedone(walk, out, more);
+ scatterwalk_unmap(walk);
+ if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE)
+ __scatterwalk_flush_dcache_pages(sg_page(walk->sg),
+ walk->offset, nbytes);
+ scatterwalk_advance(walk, nbytes);
}
-void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
- size_t nbytes, int out);
-void *scatterwalk_map(struct scatter_walk *walk);
+void scatterwalk_skip(struct scatter_walk *walk, unsigned int nbytes);
+
+void memcpy_from_scatterwalk(void *buf, struct scatter_walk *walk,
+ unsigned int nbytes);
-void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
- unsigned int start, unsigned int nbytes, int out);
+void memcpy_to_scatterwalk(struct scatter_walk *walk, const void *buf,
+ unsigned int nbytes);
+
+void memcpy_from_sglist(void *buf, struct scatterlist *sg,
+ unsigned int start, unsigned int nbytes);
+
+void memcpy_to_sglist(struct scatterlist *sg, unsigned int start,
+ const void *buf, unsigned int nbytes);
+
+void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes);
+
+/* In new code, please use memcpy_{from,to}_sglist() directly instead. */
+static inline void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
+ unsigned int start,
+ unsigned int nbytes, int out)
+{
+ if (out)
+ memcpy_to_sglist(sg, start, buf, nbytes);
+ else
+ memcpy_from_sglist(buf, sg, start, nbytes);
+}
struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
struct scatterlist *src,
diff --git a/include/crypto/sha1.h b/include/crypto/sha1.h
index 044ecea60ac8..27f08b972931 100644
--- a/include/crypto/sha1.h
+++ b/include/crypto/sha1.h
@@ -10,6 +10,7 @@
#define SHA1_DIGEST_SIZE 20
#define SHA1_BLOCK_SIZE 64
+#define SHA1_STATE_SIZE offsetof(struct sha1_state, buffer)
#define SHA1_H0 0x67452301UL
#define SHA1_H1 0xefcdab89UL
@@ -25,14 +26,6 @@ struct sha1_state {
u8 buffer[SHA1_BLOCK_SIZE];
};
-struct shash_desc;
-
-extern int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
-
-extern int crypto_sha1_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash);
-
/*
* An implementation of SHA-1's compression function. Don't use in new code!
* You shouldn't be using SHA-1, and even if you *have* to use SHA-1, this isn't
@@ -40,7 +33,187 @@ extern int crypto_sha1_finup(struct shash_desc *desc, const u8 *data,
*/
#define SHA1_DIGEST_WORDS (SHA1_DIGEST_SIZE / 4)
#define SHA1_WORKSPACE_WORDS 16
-void sha1_init(__u32 *buf);
+void sha1_init_raw(__u32 *buf);
void sha1_transform(__u32 *digest, const char *data, __u32 *W);
+/* State for the SHA-1 compression function */
+struct sha1_block_state {
+ u32 h[SHA1_DIGEST_SIZE / 4];
+};
+
+/**
+ * struct sha1_ctx - Context for hashing a message with SHA-1
+ * @state: the compression function state
+ * @bytecount: number of bytes processed so far
+ * @buf: partial block buffer; bytecount % SHA1_BLOCK_SIZE bytes are valid
+ */
+struct sha1_ctx {
+ struct sha1_block_state state;
+ u64 bytecount;
+ u8 buf[SHA1_BLOCK_SIZE];
+};
+
+/**
+ * sha1_init() - Initialize a SHA-1 context for a new message
+ * @ctx: the context to initialize
+ *
+ * If you don't need incremental computation, consider sha1() instead.
+ *
+ * Context: Any context.
+ */
+void sha1_init(struct sha1_ctx *ctx);
+
+/**
+ * sha1_update() - Update a SHA-1 context with message data
+ * @ctx: the context to update; must have been initialized
+ * @data: the message data
+ * @len: the data length in bytes
+ *
+ * This can be called any number of times.
+ *
+ * Context: Any context.
+ */
+void sha1_update(struct sha1_ctx *ctx, const u8 *data, size_t len);
+
+/**
+ * sha1_final() - Finish computing a SHA-1 message digest
+ * @ctx: the context to finalize; must have been initialized
+ * @out: (output) the resulting SHA-1 message digest
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void sha1_final(struct sha1_ctx *ctx, u8 out[at_least SHA1_DIGEST_SIZE]);
+
+/**
+ * sha1() - Compute SHA-1 message digest in one shot
+ * @data: the message data
+ * @len: the data length in bytes
+ * @out: (output) the resulting SHA-1 message digest
+ *
+ * Context: Any context.
+ */
+void sha1(const u8 *data, size_t len, u8 out[at_least SHA1_DIGEST_SIZE]);
+
+/**
+ * struct hmac_sha1_key - Prepared key for HMAC-SHA1
+ * @istate: private
+ * @ostate: private
+ */
+struct hmac_sha1_key {
+ struct sha1_block_state istate;
+ struct sha1_block_state ostate;
+};
+
+/**
+ * struct hmac_sha1_ctx - Context for computing HMAC-SHA1 of a message
+ * @sha_ctx: private
+ * @ostate: private
+ */
+struct hmac_sha1_ctx {
+ struct sha1_ctx sha_ctx;
+ struct sha1_block_state ostate;
+};
+
+/**
+ * hmac_sha1_preparekey() - Prepare a key for HMAC-SHA1
+ * @key: (output) the key structure to initialize
+ * @raw_key: the raw HMAC-SHA1 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ *
+ * Note: the caller is responsible for zeroizing both the struct hmac_sha1_key
+ * and the raw key once they are no longer needed.
+ *
+ * Context: Any context.
+ */
+void hmac_sha1_preparekey(struct hmac_sha1_key *key,
+ const u8 *raw_key, size_t raw_key_len);
+
+/**
+ * hmac_sha1_init() - Initialize an HMAC-SHA1 context for a new message
+ * @ctx: (output) the HMAC context to initialize
+ * @key: the prepared HMAC key
+ *
+ * If you don't need incremental computation, consider hmac_sha1() instead.
+ *
+ * Context: Any context.
+ */
+void hmac_sha1_init(struct hmac_sha1_ctx *ctx, const struct hmac_sha1_key *key);
+
+/**
+ * hmac_sha1_init_usingrawkey() - Initialize an HMAC-SHA1 context for a new
+ * message, using a raw key
+ * @ctx: (output) the HMAC context to initialize
+ * @raw_key: the raw HMAC-SHA1 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ *
+ * If you don't need incremental computation, consider hmac_sha1_usingrawkey()
+ * instead.
+ *
+ * Context: Any context.
+ */
+void hmac_sha1_init_usingrawkey(struct hmac_sha1_ctx *ctx,
+ const u8 *raw_key, size_t raw_key_len);
+
+/**
+ * hmac_sha1_update() - Update an HMAC-SHA1 context with message data
+ * @ctx: the HMAC context to update; must have been initialized
+ * @data: the message data
+ * @data_len: the data length in bytes
+ *
+ * This can be called any number of times.
+ *
+ * Context: Any context.
+ */
+static inline void hmac_sha1_update(struct hmac_sha1_ctx *ctx,
+ const u8 *data, size_t data_len)
+{
+ sha1_update(&ctx->sha_ctx, data, data_len);
+}
+
+/**
+ * hmac_sha1_final() - Finish computing an HMAC-SHA1 value
+ * @ctx: the HMAC context to finalize; must have been initialized
+ * @out: (output) the resulting HMAC-SHA1 value
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void hmac_sha1_final(struct hmac_sha1_ctx *ctx,
+ u8 out[at_least SHA1_DIGEST_SIZE]);
+
+/**
+ * hmac_sha1() - Compute HMAC-SHA1 in one shot, using a prepared key
+ * @key: the prepared HMAC key
+ * @data: the message data
+ * @data_len: the data length in bytes
+ * @out: (output) the resulting HMAC-SHA1 value
+ *
+ * If you're using the key only once, consider using hmac_sha1_usingrawkey().
+ *
+ * Context: Any context.
+ */
+void hmac_sha1(const struct hmac_sha1_key *key,
+ const u8 *data, size_t data_len,
+ u8 out[at_least SHA1_DIGEST_SIZE]);
+
+/**
+ * hmac_sha1_usingrawkey() - Compute HMAC-SHA1 in one shot, using a raw key
+ * @raw_key: the raw HMAC-SHA1 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ * @data: the message data
+ * @data_len: the data length in bytes
+ * @out: (output) the resulting HMAC-SHA1 value
+ *
+ * If you're using the key multiple times, prefer to use hmac_sha1_preparekey()
+ * followed by multiple calls to hmac_sha1() instead.
+ *
+ * Context: Any context.
+ */
+void hmac_sha1_usingrawkey(const u8 *raw_key, size_t raw_key_len,
+ const u8 *data, size_t data_len,
+ u8 out[at_least SHA1_DIGEST_SIZE]);
+
#endif /* _CRYPTO_SHA1_H */
diff --git a/include/crypto/sha1_base.h b/include/crypto/sha1_base.h
deleted file mode 100644
index 2e0e7c3827d1..000000000000
--- a/include/crypto/sha1_base.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * sha1_base.h - core logic for SHA-1 implementations
- *
- * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
- */
-
-#ifndef _CRYPTO_SHA1_BASE_H
-#define _CRYPTO_SHA1_BASE_H
-
-#include <crypto/internal/hash.h>
-#include <crypto/sha1.h>
-#include <linux/crypto.h>
-#include <linux/module.h>
-#include <linux/string.h>
-
-#include <asm/unaligned.h>
-
-typedef void (sha1_block_fn)(struct sha1_state *sst, u8 const *src, int blocks);
-
-static inline int sha1_base_init(struct shash_desc *desc)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
-
- sctx->state[0] = SHA1_H0;
- sctx->state[1] = SHA1_H1;
- sctx->state[2] = SHA1_H2;
- sctx->state[3] = SHA1_H3;
- sctx->state[4] = SHA1_H4;
- sctx->count = 0;
-
- return 0;
-}
-
-static inline int sha1_base_do_update(struct shash_desc *desc,
- const u8 *data,
- unsigned int len,
- sha1_block_fn *block_fn)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
-
- sctx->count += len;
-
- if (unlikely((partial + len) >= SHA1_BLOCK_SIZE)) {
- int blocks;
-
- if (partial) {
- int p = SHA1_BLOCK_SIZE - partial;
-
- memcpy(sctx->buffer + partial, data, p);
- data += p;
- len -= p;
-
- block_fn(sctx, sctx->buffer, 1);
- }
-
- blocks = len / SHA1_BLOCK_SIZE;
- len %= SHA1_BLOCK_SIZE;
-
- if (blocks) {
- block_fn(sctx, data, blocks);
- data += blocks * SHA1_BLOCK_SIZE;
- }
- partial = 0;
- }
- if (len)
- memcpy(sctx->buffer + partial, data, len);
-
- return 0;
-}
-
-static inline int sha1_base_do_finalize(struct shash_desc *desc,
- sha1_block_fn *block_fn)
-{
- const int bit_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
- struct sha1_state *sctx = shash_desc_ctx(desc);
- __be64 *bits = (__be64 *)(sctx->buffer + bit_offset);
- unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
-
- sctx->buffer[partial++] = 0x80;
- if (partial > bit_offset) {
- memset(sctx->buffer + partial, 0x0, SHA1_BLOCK_SIZE - partial);
- partial = 0;
-
- block_fn(sctx, sctx->buffer, 1);
- }
-
- memset(sctx->buffer + partial, 0x0, bit_offset - partial);
- *bits = cpu_to_be64(sctx->count << 3);
- block_fn(sctx, sctx->buffer, 1);
-
- return 0;
-}
-
-static inline int sha1_base_finish(struct shash_desc *desc, u8 *out)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- __be32 *digest = (__be32 *)out;
- int i;
-
- for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++)
- put_unaligned_be32(sctx->state[i], digest++);
-
- memzero_explicit(sctx, sizeof(*sctx));
- return 0;
-}
-
-#endif /* _CRYPTO_SHA1_BASE_H */
diff --git a/include/crypto/sha2.h b/include/crypto/sha2.h
index 2838f529f31e..7bb8fe169daf 100644
--- a/include/crypto/sha2.h
+++ b/include/crypto/sha2.h
@@ -13,12 +13,14 @@
#define SHA256_DIGEST_SIZE 32
#define SHA256_BLOCK_SIZE 64
+#define SHA256_STATE_WORDS 8
#define SHA384_DIGEST_SIZE 48
#define SHA384_BLOCK_SIZE 128
#define SHA512_DIGEST_SIZE 64
#define SHA512_BLOCK_SIZE 128
+#define SHA512_STATE_SIZE 80
#define SHA224_H0 0xc1059ed8UL
#define SHA224_H1 0x367cd507UL
@@ -64,9 +66,45 @@ extern const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE];
extern const u8 sha512_zero_message_hash[SHA512_DIGEST_SIZE];
-struct sha256_state {
- u32 state[SHA256_DIGEST_SIZE / 4];
+struct crypto_sha256_state {
+ u32 state[SHA256_STATE_WORDS];
u64 count;
+};
+
+static inline void sha224_block_init(struct crypto_sha256_state *sctx)
+{
+ sctx->state[0] = SHA224_H0;
+ sctx->state[1] = SHA224_H1;
+ sctx->state[2] = SHA224_H2;
+ sctx->state[3] = SHA224_H3;
+ sctx->state[4] = SHA224_H4;
+ sctx->state[5] = SHA224_H5;
+ sctx->state[6] = SHA224_H6;
+ sctx->state[7] = SHA224_H7;
+ sctx->count = 0;
+}
+
+static inline void sha256_block_init(struct crypto_sha256_state *sctx)
+{
+ sctx->state[0] = SHA256_H0;
+ sctx->state[1] = SHA256_H1;
+ sctx->state[2] = SHA256_H2;
+ sctx->state[3] = SHA256_H3;
+ sctx->state[4] = SHA256_H4;
+ sctx->state[5] = SHA256_H5;
+ sctx->state[6] = SHA256_H6;
+ sctx->state[7] = SHA256_H7;
+ sctx->count = 0;
+}
+
+struct sha256_state {
+ union {
+ struct crypto_sha256_state ctx;
+ struct {
+ u32 state[SHA256_STATE_WORDS];
+ u64 count;
+ };
+ };
u8 buf[SHA256_BLOCK_SIZE];
};
@@ -76,59 +114,800 @@ struct sha512_state {
u8 buf[SHA512_BLOCK_SIZE];
};
-struct shash_desc;
+/* State for the SHA-256 (and SHA-224) compression function */
+struct sha256_block_state {
+ u32 h[SHA256_STATE_WORDS];
+};
+
+/*
+ * Context structure, shared by SHA-224 and SHA-256. The sha224_ctx and
+ * sha256_ctx structs wrap this one so that the API has proper typing and
+ * doesn't allow mixing the SHA-224 and SHA-256 functions arbitrarily.
+ */
+struct __sha256_ctx {
+ struct sha256_block_state state;
+ u64 bytecount;
+ u8 buf[SHA256_BLOCK_SIZE] __aligned(__alignof__(__be64));
+};
+void __sha256_update(struct __sha256_ctx *ctx, const u8 *data, size_t len);
+
+/*
+ * HMAC key and message context structs, shared by HMAC-SHA224 and HMAC-SHA256.
+ * The hmac_sha224_* and hmac_sha256_* structs wrap this one so that the API has
+ * proper typing and doesn't allow mixing the functions arbitrarily.
+ */
+struct __hmac_sha256_key {
+ struct sha256_block_state istate;
+ struct sha256_block_state ostate;
+};
+struct __hmac_sha256_ctx {
+ struct __sha256_ctx sha_ctx;
+ struct sha256_block_state ostate;
+};
+void __hmac_sha256_init(struct __hmac_sha256_ctx *ctx,
+ const struct __hmac_sha256_key *key);
+
+/**
+ * struct sha224_ctx - Context for hashing a message with SHA-224
+ * @ctx: private
+ */
+struct sha224_ctx {
+ struct __sha256_ctx ctx;
+};
+
+/**
+ * sha224_init() - Initialize a SHA-224 context for a new message
+ * @ctx: the context to initialize
+ *
+ * If you don't need incremental computation, consider sha224() instead.
+ *
+ * Context: Any context.
+ */
+void sha224_init(struct sha224_ctx *ctx);
+
+/**
+ * sha224_update() - Update a SHA-224 context with message data
+ * @ctx: the context to update; must have been initialized
+ * @data: the message data
+ * @len: the data length in bytes
+ *
+ * This can be called any number of times.
+ *
+ * Context: Any context.
+ */
+static inline void sha224_update(struct sha224_ctx *ctx,
+ const u8 *data, size_t len)
+{
+ __sha256_update(&ctx->ctx, data, len);
+}
+
+/**
+ * sha224_final() - Finish computing a SHA-224 message digest
+ * @ctx: the context to finalize; must have been initialized
+ * @out: (output) the resulting SHA-224 message digest
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void sha224_final(struct sha224_ctx *ctx, u8 out[at_least SHA224_DIGEST_SIZE]);
+
+/**
+ * sha224() - Compute SHA-224 message digest in one shot
+ * @data: the message data
+ * @len: the data length in bytes
+ * @out: (output) the resulting SHA-224 message digest
+ *
+ * Context: Any context.
+ */
+void sha224(const u8 *data, size_t len, u8 out[at_least SHA224_DIGEST_SIZE]);
+
+/**
+ * struct hmac_sha224_key - Prepared key for HMAC-SHA224
+ * @key: private
+ */
+struct hmac_sha224_key {
+ struct __hmac_sha256_key key;
+};
+
+/**
+ * struct hmac_sha224_ctx - Context for computing HMAC-SHA224 of a message
+ * @ctx: private
+ */
+struct hmac_sha224_ctx {
+ struct __hmac_sha256_ctx ctx;
+};
+
+/**
+ * hmac_sha224_preparekey() - Prepare a key for HMAC-SHA224
+ * @key: (output) the key structure to initialize
+ * @raw_key: the raw HMAC-SHA224 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ *
+ * Note: the caller is responsible for zeroizing both the struct hmac_sha224_key
+ * and the raw key once they are no longer needed.
+ *
+ * Context: Any context.
+ */
+void hmac_sha224_preparekey(struct hmac_sha224_key *key,
+ const u8 *raw_key, size_t raw_key_len);
+
+/**
+ * hmac_sha224_init() - Initialize an HMAC-SHA224 context for a new message
+ * @ctx: (output) the HMAC context to initialize
+ * @key: the prepared HMAC key
+ *
+ * If you don't need incremental computation, consider hmac_sha224() instead.
+ *
+ * Context: Any context.
+ */
+static inline void hmac_sha224_init(struct hmac_sha224_ctx *ctx,
+ const struct hmac_sha224_key *key)
+{
+ __hmac_sha256_init(&ctx->ctx, &key->key);
+}
+
+/**
+ * hmac_sha224_init_usingrawkey() - Initialize an HMAC-SHA224 context for a new
+ * message, using a raw key
+ * @ctx: (output) the HMAC context to initialize
+ * @raw_key: the raw HMAC-SHA224 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ *
+ * If you don't need incremental computation, consider hmac_sha224_usingrawkey()
+ * instead.
+ *
+ * Context: Any context.
+ */
+void hmac_sha224_init_usingrawkey(struct hmac_sha224_ctx *ctx,
+ const u8 *raw_key, size_t raw_key_len);
+
+/**
+ * hmac_sha224_update() - Update an HMAC-SHA224 context with message data
+ * @ctx: the HMAC context to update; must have been initialized
+ * @data: the message data
+ * @data_len: the data length in bytes
+ *
+ * This can be called any number of times.
+ *
+ * Context: Any context.
+ */
+static inline void hmac_sha224_update(struct hmac_sha224_ctx *ctx,
+ const u8 *data, size_t data_len)
+{
+ __sha256_update(&ctx->ctx.sha_ctx, data, data_len);
+}
+
+/**
+ * hmac_sha224_final() - Finish computing an HMAC-SHA224 value
+ * @ctx: the HMAC context to finalize; must have been initialized
+ * @out: (output) the resulting HMAC-SHA224 value
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void hmac_sha224_final(struct hmac_sha224_ctx *ctx,
+ u8 out[at_least SHA224_DIGEST_SIZE]);
+
+/**
+ * hmac_sha224() - Compute HMAC-SHA224 in one shot, using a prepared key
+ * @key: the prepared HMAC key
+ * @data: the message data
+ * @data_len: the data length in bytes
+ * @out: (output) the resulting HMAC-SHA224 value
+ *
+ * If you're using the key only once, consider using hmac_sha224_usingrawkey().
+ *
+ * Context: Any context.
+ */
+void hmac_sha224(const struct hmac_sha224_key *key,
+ const u8 *data, size_t data_len,
+ u8 out[at_least SHA224_DIGEST_SIZE]);
+
+/**
+ * hmac_sha224_usingrawkey() - Compute HMAC-SHA224 in one shot, using a raw key
+ * @raw_key: the raw HMAC-SHA224 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ * @data: the message data
+ * @data_len: the data length in bytes
+ * @out: (output) the resulting HMAC-SHA224 value
+ *
+ * If you're using the key multiple times, prefer to use
+ * hmac_sha224_preparekey() followed by multiple calls to hmac_sha224() instead.
+ *
+ * Context: Any context.
+ */
+void hmac_sha224_usingrawkey(const u8 *raw_key, size_t raw_key_len,
+ const u8 *data, size_t data_len,
+ u8 out[at_least SHA224_DIGEST_SIZE]);
+
+/**
+ * struct sha256_ctx - Context for hashing a message with SHA-256
+ * @ctx: private
+ */
+struct sha256_ctx {
+ struct __sha256_ctx ctx;
+};
+
+/**
+ * sha256_init() - Initialize a SHA-256 context for a new message
+ * @ctx: the context to initialize
+ *
+ * If you don't need incremental computation, consider sha256() instead.
+ *
+ * Context: Any context.
+ */
+void sha256_init(struct sha256_ctx *ctx);
-extern int crypto_sha256_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
+/**
+ * sha256_update() - Update a SHA-256 context with message data
+ * @ctx: the context to update; must have been initialized
+ * @data: the message data
+ * @len: the data length in bytes
+ *
+ * This can be called any number of times.
+ *
+ * Context: Any context.
+ */
+static inline void sha256_update(struct sha256_ctx *ctx,
+ const u8 *data, size_t len)
+{
+ __sha256_update(&ctx->ctx, data, len);
+}
-extern int crypto_sha256_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash);
+/**
+ * sha256_final() - Finish computing a SHA-256 message digest
+ * @ctx: the context to finalize; must have been initialized
+ * @out: (output) the resulting SHA-256 message digest
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void sha256_final(struct sha256_ctx *ctx, u8 out[at_least SHA256_DIGEST_SIZE]);
+
+/**
+ * sha256() - Compute SHA-256 message digest in one shot
+ * @data: the message data
+ * @len: the data length in bytes
+ * @out: (output) the resulting SHA-256 message digest
+ *
+ * Context: Any context.
+ */
+void sha256(const u8 *data, size_t len, u8 out[at_least SHA256_DIGEST_SIZE]);
+
+/**
+ * sha256_finup_2x() - Compute two SHA-256 digests from a common initial
+ * context. On some CPUs, this is faster than sequentially
+ * computing each digest.
+ * @ctx: an optional initial context, which may have already processed data. If
+ * NULL, a default initial context is used (equivalent to sha256_init()).
+ * @data1: data for the first message
+ * @data2: data for the second message
+ * @len: the length of each of @data1 and @data2, in bytes
+ * @out1: (output) the first SHA-256 message digest
+ * @out2: (output) the second SHA-256 message digest
+ *
+ * Context: Any context.
+ */
+void sha256_finup_2x(const struct sha256_ctx *ctx, const u8 *data1,
+ const u8 *data2, size_t len,
+ u8 out1[at_least SHA256_DIGEST_SIZE],
+ u8 out2[at_least SHA256_DIGEST_SIZE]);
+
+/**
+ * sha256_finup_2x_is_optimized() - Check if sha256_finup_2x() is using a real
+ * interleaved implementation, as opposed to a
+ * sequential fallback
+ * @return: true if optimized
+ *
+ * Context: Any context.
+ */
+bool sha256_finup_2x_is_optimized(void);
+
+/**
+ * struct hmac_sha256_key - Prepared key for HMAC-SHA256
+ * @key: private
+ */
+struct hmac_sha256_key {
+ struct __hmac_sha256_key key;
+};
+
+/**
+ * struct hmac_sha256_ctx - Context for computing HMAC-SHA256 of a message
+ * @ctx: private
+ */
+struct hmac_sha256_ctx {
+ struct __hmac_sha256_ctx ctx;
+};
+
+/**
+ * hmac_sha256_preparekey() - Prepare a key for HMAC-SHA256
+ * @key: (output) the key structure to initialize
+ * @raw_key: the raw HMAC-SHA256 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ *
+ * Note: the caller is responsible for zeroizing both the struct hmac_sha256_key
+ * and the raw key once they are no longer needed.
+ *
+ * Context: Any context.
+ */
+void hmac_sha256_preparekey(struct hmac_sha256_key *key,
+ const u8 *raw_key, size_t raw_key_len);
-extern int crypto_sha512_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
+/**
+ * hmac_sha256_init() - Initialize an HMAC-SHA256 context for a new message
+ * @ctx: (output) the HMAC context to initialize
+ * @key: the prepared HMAC key
+ *
+ * If you don't need incremental computation, consider hmac_sha256() instead.
+ *
+ * Context: Any context.
+ */
+static inline void hmac_sha256_init(struct hmac_sha256_ctx *ctx,
+ const struct hmac_sha256_key *key)
+{
+ __hmac_sha256_init(&ctx->ctx, &key->key);
+}
+
+/**
+ * hmac_sha256_init_usingrawkey() - Initialize an HMAC-SHA256 context for a new
+ * message, using a raw key
+ * @ctx: (output) the HMAC context to initialize
+ * @raw_key: the raw HMAC-SHA256 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ *
+ * If you don't need incremental computation, consider hmac_sha256_usingrawkey()
+ * instead.
+ *
+ * Context: Any context.
+ */
+void hmac_sha256_init_usingrawkey(struct hmac_sha256_ctx *ctx,
+ const u8 *raw_key, size_t raw_key_len);
-extern int crypto_sha512_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash);
+/**
+ * hmac_sha256_update() - Update an HMAC-SHA256 context with message data
+ * @ctx: the HMAC context to update; must have been initialized
+ * @data: the message data
+ * @data_len: the data length in bytes
+ *
+ * This can be called any number of times.
+ *
+ * Context: Any context.
+ */
+static inline void hmac_sha256_update(struct hmac_sha256_ctx *ctx,
+ const u8 *data, size_t data_len)
+{
+ __sha256_update(&ctx->ctx.sha_ctx, data, data_len);
+}
+
+/**
+ * hmac_sha256_final() - Finish computing an HMAC-SHA256 value
+ * @ctx: the HMAC context to finalize; must have been initialized
+ * @out: (output) the resulting HMAC-SHA256 value
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void hmac_sha256_final(struct hmac_sha256_ctx *ctx,
+ u8 out[at_least SHA256_DIGEST_SIZE]);
+
+/**
+ * hmac_sha256() - Compute HMAC-SHA256 in one shot, using a prepared key
+ * @key: the prepared HMAC key
+ * @data: the message data
+ * @data_len: the data length in bytes
+ * @out: (output) the resulting HMAC-SHA256 value
+ *
+ * If you're using the key only once, consider using hmac_sha256_usingrawkey().
+ *
+ * Context: Any context.
+ */
+void hmac_sha256(const struct hmac_sha256_key *key,
+ const u8 *data, size_t data_len,
+ u8 out[at_least SHA256_DIGEST_SIZE]);
+
+/**
+ * hmac_sha256_usingrawkey() - Compute HMAC-SHA256 in one shot, using a raw key
+ * @raw_key: the raw HMAC-SHA256 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ * @data: the message data
+ * @data_len: the data length in bytes
+ * @out: (output) the resulting HMAC-SHA256 value
+ *
+ * If you're using the key multiple times, prefer to use
+ * hmac_sha256_preparekey() followed by multiple calls to hmac_sha256() instead.
+ *
+ * Context: Any context.
+ */
+void hmac_sha256_usingrawkey(const u8 *raw_key, size_t raw_key_len,
+ const u8 *data, size_t data_len,
+ u8 out[at_least SHA256_DIGEST_SIZE]);
+
+/* State for the SHA-512 (and SHA-384) compression function */
+struct sha512_block_state {
+ u64 h[8];
+};
/*
- * Stand-alone implementation of the SHA256 algorithm. It is designed to
- * have as little dependencies as possible so it can be used in the
- * kexec_file purgatory. In other cases you should generally use the
- * hash APIs from include/crypto/hash.h. Especially when hashing large
- * amounts of data as those APIs may be hw-accelerated.
+ * Context structure, shared by SHA-384 and SHA-512. The sha384_ctx and
+ * sha512_ctx structs wrap this one so that the API has proper typing and
+ * doesn't allow mixing the SHA-384 and SHA-512 functions arbitrarily.
+ */
+struct __sha512_ctx {
+ struct sha512_block_state state;
+ u64 bytecount_lo;
+ u64 bytecount_hi;
+ u8 buf[SHA512_BLOCK_SIZE] __aligned(__alignof__(__be64));
+};
+void __sha512_update(struct __sha512_ctx *ctx, const u8 *data, size_t len);
+
+/*
+ * HMAC key and message context structs, shared by HMAC-SHA384 and HMAC-SHA512.
+ * The hmac_sha384_* and hmac_sha512_* structs wrap this one so that the API has
+ * proper typing and doesn't allow mixing the functions arbitrarily.
+ */
+struct __hmac_sha512_key {
+ struct sha512_block_state istate;
+ struct sha512_block_state ostate;
+};
+struct __hmac_sha512_ctx {
+ struct __sha512_ctx sha_ctx;
+ struct sha512_block_state ostate;
+};
+void __hmac_sha512_init(struct __hmac_sha512_ctx *ctx,
+ const struct __hmac_sha512_key *key);
+
+/**
+ * struct sha384_ctx - Context for hashing a message with SHA-384
+ * @ctx: private
+ */
+struct sha384_ctx {
+ struct __sha512_ctx ctx;
+};
+
+/**
+ * sha384_init() - Initialize a SHA-384 context for a new message
+ * @ctx: the context to initialize
+ *
+ * If you don't need incremental computation, consider sha384() instead.
*
- * For details see lib/crypto/sha256.c
+ * Context: Any context.
*/
+void sha384_init(struct sha384_ctx *ctx);
-static inline void sha256_init(struct sha256_state *sctx)
+/**
+ * sha384_update() - Update a SHA-384 context with message data
+ * @ctx: the context to update; must have been initialized
+ * @data: the message data
+ * @len: the data length in bytes
+ *
+ * This can be called any number of times.
+ *
+ * Context: Any context.
+ */
+static inline void sha384_update(struct sha384_ctx *ctx,
+ const u8 *data, size_t len)
{
- sctx->state[0] = SHA256_H0;
- sctx->state[1] = SHA256_H1;
- sctx->state[2] = SHA256_H2;
- sctx->state[3] = SHA256_H3;
- sctx->state[4] = SHA256_H4;
- sctx->state[5] = SHA256_H5;
- sctx->state[6] = SHA256_H6;
- sctx->state[7] = SHA256_H7;
- sctx->count = 0;
+ __sha512_update(&ctx->ctx, data, len);
}
-void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len);
-void sha256_final(struct sha256_state *sctx, u8 *out);
-void sha256(const u8 *data, unsigned int len, u8 *out);
-static inline void sha224_init(struct sha256_state *sctx)
+/**
+ * sha384_final() - Finish computing a SHA-384 message digest
+ * @ctx: the context to finalize; must have been initialized
+ * @out: (output) the resulting SHA-384 message digest
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void sha384_final(struct sha384_ctx *ctx, u8 out[at_least SHA384_DIGEST_SIZE]);
+
+/**
+ * sha384() - Compute SHA-384 message digest in one shot
+ * @data: the message data
+ * @len: the data length in bytes
+ * @out: (output) the resulting SHA-384 message digest
+ *
+ * Context: Any context.
+ */
+void sha384(const u8 *data, size_t len, u8 out[at_least SHA384_DIGEST_SIZE]);
+
+/**
+ * struct hmac_sha384_key - Prepared key for HMAC-SHA384
+ * @key: private
+ */
+struct hmac_sha384_key {
+ struct __hmac_sha512_key key;
+};
+
+/**
+ * struct hmac_sha384_ctx - Context for computing HMAC-SHA384 of a message
+ * @ctx: private
+ */
+struct hmac_sha384_ctx {
+ struct __hmac_sha512_ctx ctx;
+};
+
+/**
+ * hmac_sha384_preparekey() - Prepare a key for HMAC-SHA384
+ * @key: (output) the key structure to initialize
+ * @raw_key: the raw HMAC-SHA384 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ *
+ * Note: the caller is responsible for zeroizing both the struct hmac_sha384_key
+ * and the raw key once they are no longer needed.
+ *
+ * Context: Any context.
+ */
+void hmac_sha384_preparekey(struct hmac_sha384_key *key,
+ const u8 *raw_key, size_t raw_key_len);
+
+/**
+ * hmac_sha384_init() - Initialize an HMAC-SHA384 context for a new message
+ * @ctx: (output) the HMAC context to initialize
+ * @key: the prepared HMAC key
+ *
+ * If you don't need incremental computation, consider hmac_sha384() instead.
+ *
+ * Context: Any context.
+ */
+static inline void hmac_sha384_init(struct hmac_sha384_ctx *ctx,
+ const struct hmac_sha384_key *key)
{
- sctx->state[0] = SHA224_H0;
- sctx->state[1] = SHA224_H1;
- sctx->state[2] = SHA224_H2;
- sctx->state[3] = SHA224_H3;
- sctx->state[4] = SHA224_H4;
- sctx->state[5] = SHA224_H5;
- sctx->state[6] = SHA224_H6;
- sctx->state[7] = SHA224_H7;
- sctx->count = 0;
+ __hmac_sha512_init(&ctx->ctx, &key->key);
}
-void sha224_update(struct sha256_state *sctx, const u8 *data, unsigned int len);
-void sha224_final(struct sha256_state *sctx, u8 *out);
+
+/**
+ * hmac_sha384_init_usingrawkey() - Initialize an HMAC-SHA384 context for a new
+ * message, using a raw key
+ * @ctx: (output) the HMAC context to initialize
+ * @raw_key: the raw HMAC-SHA384 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ *
+ * If you don't need incremental computation, consider hmac_sha384_usingrawkey()
+ * instead.
+ *
+ * Context: Any context.
+ */
+void hmac_sha384_init_usingrawkey(struct hmac_sha384_ctx *ctx,
+ const u8 *raw_key, size_t raw_key_len);
+
+/**
+ * hmac_sha384_update() - Update an HMAC-SHA384 context with message data
+ * @ctx: the HMAC context to update; must have been initialized
+ * @data: the message data
+ * @data_len: the data length in bytes
+ *
+ * This can be called any number of times.
+ *
+ * Context: Any context.
+ */
+static inline void hmac_sha384_update(struct hmac_sha384_ctx *ctx,
+ const u8 *data, size_t data_len)
+{
+ __sha512_update(&ctx->ctx.sha_ctx, data, data_len);
+}
+
+/**
+ * hmac_sha384_final() - Finish computing an HMAC-SHA384 value
+ * @ctx: the HMAC context to finalize; must have been initialized
+ * @out: (output) the resulting HMAC-SHA384 value
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void hmac_sha384_final(struct hmac_sha384_ctx *ctx,
+ u8 out[at_least SHA384_DIGEST_SIZE]);
+
+/**
+ * hmac_sha384() - Compute HMAC-SHA384 in one shot, using a prepared key
+ * @key: the prepared HMAC key
+ * @data: the message data
+ * @data_len: the data length in bytes
+ * @out: (output) the resulting HMAC-SHA384 value
+ *
+ * If you're using the key only once, consider using hmac_sha384_usingrawkey().
+ *
+ * Context: Any context.
+ */
+void hmac_sha384(const struct hmac_sha384_key *key,
+ const u8 *data, size_t data_len,
+ u8 out[at_least SHA384_DIGEST_SIZE]);
+
+/**
+ * hmac_sha384_usingrawkey() - Compute HMAC-SHA384 in one shot, using a raw key
+ * @raw_key: the raw HMAC-SHA384 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ * @data: the message data
+ * @data_len: the data length in bytes
+ * @out: (output) the resulting HMAC-SHA384 value
+ *
+ * If you're using the key multiple times, prefer to use
+ * hmac_sha384_preparekey() followed by multiple calls to hmac_sha384() instead.
+ *
+ * Context: Any context.
+ */
+void hmac_sha384_usingrawkey(const u8 *raw_key, size_t raw_key_len,
+ const u8 *data, size_t data_len,
+ u8 out[at_least SHA384_DIGEST_SIZE]);
+
+/**
+ * struct sha512_ctx - Context for hashing a message with SHA-512
+ * @ctx: private
+ */
+struct sha512_ctx {
+ struct __sha512_ctx ctx;
+};
+
+/**
+ * sha512_init() - Initialize a SHA-512 context for a new message
+ * @ctx: the context to initialize
+ *
+ * If you don't need incremental computation, consider sha512() instead.
+ *
+ * Context: Any context.
+ */
+void sha512_init(struct sha512_ctx *ctx);
+
+/**
+ * sha512_update() - Update a SHA-512 context with message data
+ * @ctx: the context to update; must have been initialized
+ * @data: the message data
+ * @len: the data length in bytes
+ *
+ * This can be called any number of times.
+ *
+ * Context: Any context.
+ */
+static inline void sha512_update(struct sha512_ctx *ctx,
+ const u8 *data, size_t len)
+{
+ __sha512_update(&ctx->ctx, data, len);
+}
+
+/**
+ * sha512_final() - Finish computing a SHA-512 message digest
+ * @ctx: the context to finalize; must have been initialized
+ * @out: (output) the resulting SHA-512 message digest
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void sha512_final(struct sha512_ctx *ctx, u8 out[at_least SHA512_DIGEST_SIZE]);
+
+/**
+ * sha512() - Compute SHA-512 message digest in one shot
+ * @data: the message data
+ * @len: the data length in bytes
+ * @out: (output) the resulting SHA-512 message digest
+ *
+ * Context: Any context.
+ */
+void sha512(const u8 *data, size_t len, u8 out[at_least SHA512_DIGEST_SIZE]);
+
+/**
+ * struct hmac_sha512_key - Prepared key for HMAC-SHA512
+ * @key: private
+ */
+struct hmac_sha512_key {
+ struct __hmac_sha512_key key;
+};
+
+/**
+ * struct hmac_sha512_ctx - Context for computing HMAC-SHA512 of a message
+ * @ctx: private
+ */
+struct hmac_sha512_ctx {
+ struct __hmac_sha512_ctx ctx;
+};
+
+/**
+ * hmac_sha512_preparekey() - Prepare a key for HMAC-SHA512
+ * @key: (output) the key structure to initialize
+ * @raw_key: the raw HMAC-SHA512 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ *
+ * Note: the caller is responsible for zeroizing both the struct hmac_sha512_key
+ * and the raw key once they are no longer needed.
+ *
+ * Context: Any context.
+ */
+void hmac_sha512_preparekey(struct hmac_sha512_key *key,
+ const u8 *raw_key, size_t raw_key_len);
+
+/**
+ * hmac_sha512_init() - Initialize an HMAC-SHA512 context for a new message
+ * @ctx: (output) the HMAC context to initialize
+ * @key: the prepared HMAC key
+ *
+ * If you don't need incremental computation, consider hmac_sha512() instead.
+ *
+ * Context: Any context.
+ */
+static inline void hmac_sha512_init(struct hmac_sha512_ctx *ctx,
+ const struct hmac_sha512_key *key)
+{
+ __hmac_sha512_init(&ctx->ctx, &key->key);
+}
+
+/**
+ * hmac_sha512_init_usingrawkey() - Initialize an HMAC-SHA512 context for a new
+ * message, using a raw key
+ * @ctx: (output) the HMAC context to initialize
+ * @raw_key: the raw HMAC-SHA512 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ *
+ * If you don't need incremental computation, consider hmac_sha512_usingrawkey()
+ * instead.
+ *
+ * Context: Any context.
+ */
+void hmac_sha512_init_usingrawkey(struct hmac_sha512_ctx *ctx,
+ const u8 *raw_key, size_t raw_key_len);
+
+/**
+ * hmac_sha512_update() - Update an HMAC-SHA512 context with message data
+ * @ctx: the HMAC context to update; must have been initialized
+ * @data: the message data
+ * @data_len: the data length in bytes
+ *
+ * This can be called any number of times.
+ *
+ * Context: Any context.
+ */
+static inline void hmac_sha512_update(struct hmac_sha512_ctx *ctx,
+ const u8 *data, size_t data_len)
+{
+ __sha512_update(&ctx->ctx.sha_ctx, data, data_len);
+}
+
+/**
+ * hmac_sha512_final() - Finish computing an HMAC-SHA512 value
+ * @ctx: the HMAC context to finalize; must have been initialized
+ * @out: (output) the resulting HMAC-SHA512 value
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void hmac_sha512_final(struct hmac_sha512_ctx *ctx,
+ u8 out[at_least SHA512_DIGEST_SIZE]);
+
+/**
+ * hmac_sha512() - Compute HMAC-SHA512 in one shot, using a prepared key
+ * @key: the prepared HMAC key
+ * @data: the message data
+ * @data_len: the data length in bytes
+ * @out: (output) the resulting HMAC-SHA512 value
+ *
+ * If you're using the key only once, consider using hmac_sha512_usingrawkey().
+ *
+ * Context: Any context.
+ */
+void hmac_sha512(const struct hmac_sha512_key *key,
+ const u8 *data, size_t data_len,
+ u8 out[at_least SHA512_DIGEST_SIZE]);
+
+/**
+ * hmac_sha512_usingrawkey() - Compute HMAC-SHA512 in one shot, using a raw key
+ * @raw_key: the raw HMAC-SHA512 key
+ * @raw_key_len: the key length in bytes. All key lengths are supported.
+ * @data: the message data
+ * @data_len: the data length in bytes
+ * @out: (output) the resulting HMAC-SHA512 value
+ *
+ * If you're using the key multiple times, prefer to use
+ * hmac_sha512_preparekey() followed by multiple calls to hmac_sha512() instead.
+ *
+ * Context: Any context.
+ */
+void hmac_sha512_usingrawkey(const u8 *raw_key, size_t raw_key_len,
+ const u8 *data, size_t data_len,
+ u8 out[at_least SHA512_DIGEST_SIZE]);
#endif /* _CRYPTO_SHA2_H */
diff --git a/include/crypto/sha256_base.h b/include/crypto/sha256_base.h
deleted file mode 100644
index 76173c613058..000000000000
--- a/include/crypto/sha256_base.h
+++ /dev/null
@@ -1,113 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * sha256_base.h - core logic for SHA-256 implementations
- *
- * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
- */
-
-#ifndef _CRYPTO_SHA256_BASE_H
-#define _CRYPTO_SHA256_BASE_H
-
-#include <crypto/internal/hash.h>
-#include <crypto/sha2.h>
-#include <linux/crypto.h>
-#include <linux/module.h>
-#include <linux/string.h>
-
-#include <asm/unaligned.h>
-
-typedef void (sha256_block_fn)(struct sha256_state *sst, u8 const *src,
- int blocks);
-
-static inline int sha224_base_init(struct shash_desc *desc)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- sha224_init(sctx);
- return 0;
-}
-
-static inline int sha256_base_init(struct shash_desc *desc)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- sha256_init(sctx);
- return 0;
-}
-
-static inline int sha256_base_do_update(struct shash_desc *desc,
- const u8 *data,
- unsigned int len,
- sha256_block_fn *block_fn)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
- unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
-
- sctx->count += len;
-
- if (unlikely((partial + len) >= SHA256_BLOCK_SIZE)) {
- int blocks;
-
- if (partial) {
- int p = SHA256_BLOCK_SIZE - partial;
-
- memcpy(sctx->buf + partial, data, p);
- data += p;
- len -= p;
-
- block_fn(sctx, sctx->buf, 1);
- }
-
- blocks = len / SHA256_BLOCK_SIZE;
- len %= SHA256_BLOCK_SIZE;
-
- if (blocks) {
- block_fn(sctx, data, blocks);
- data += blocks * SHA256_BLOCK_SIZE;
- }
- partial = 0;
- }
- if (len)
- memcpy(sctx->buf + partial, data, len);
-
- return 0;
-}
-
-static inline int sha256_base_do_finalize(struct shash_desc *desc,
- sha256_block_fn *block_fn)
-{
- const int bit_offset = SHA256_BLOCK_SIZE - sizeof(__be64);
- struct sha256_state *sctx = shash_desc_ctx(desc);
- __be64 *bits = (__be64 *)(sctx->buf + bit_offset);
- unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
-
- sctx->buf[partial++] = 0x80;
- if (partial > bit_offset) {
- memset(sctx->buf + partial, 0x0, SHA256_BLOCK_SIZE - partial);
- partial = 0;
-
- block_fn(sctx, sctx->buf, 1);
- }
-
- memset(sctx->buf + partial, 0x0, bit_offset - partial);
- *bits = cpu_to_be64(sctx->count << 3);
- block_fn(sctx, sctx->buf, 1);
-
- return 0;
-}
-
-static inline int sha256_base_finish(struct shash_desc *desc, u8 *out)
-{
- unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
- struct sha256_state *sctx = shash_desc_ctx(desc);
- __be32 *digest = (__be32 *)out;
- int i;
-
- for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be32))
- put_unaligned_be32(sctx->state[i], digest++);
-
- memzero_explicit(sctx, sizeof(*sctx));
- return 0;
-}
-
-#endif /* _CRYPTO_SHA256_BASE_H */
diff --git a/include/crypto/sha3.h b/include/crypto/sha3.h
index 080f60c2e6b1..c9e4182ff74f 100644
--- a/include/crypto/sha3.h
+++ b/include/crypto/sha3.h
@@ -1,34 +1,346 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Common values for SHA-3 algorithms
+ *
+ * See also Documentation/crypto/sha3.rst
*/
#ifndef __CRYPTO_SHA3_H__
#define __CRYPTO_SHA3_H__
+#include <linux/types.h>
+#include <linux/string.h>
+
#define SHA3_224_DIGEST_SIZE (224 / 8)
#define SHA3_224_BLOCK_SIZE (200 - 2 * SHA3_224_DIGEST_SIZE)
+#define SHA3_224_EXPORT_SIZE SHA3_STATE_SIZE + SHA3_224_BLOCK_SIZE + 1
#define SHA3_256_DIGEST_SIZE (256 / 8)
#define SHA3_256_BLOCK_SIZE (200 - 2 * SHA3_256_DIGEST_SIZE)
+#define SHA3_256_EXPORT_SIZE SHA3_STATE_SIZE + SHA3_256_BLOCK_SIZE + 1
#define SHA3_384_DIGEST_SIZE (384 / 8)
#define SHA3_384_BLOCK_SIZE (200 - 2 * SHA3_384_DIGEST_SIZE)
+#define SHA3_384_EXPORT_SIZE SHA3_STATE_SIZE + SHA3_384_BLOCK_SIZE + 1
#define SHA3_512_DIGEST_SIZE (512 / 8)
#define SHA3_512_BLOCK_SIZE (200 - 2 * SHA3_512_DIGEST_SIZE)
+#define SHA3_512_EXPORT_SIZE SHA3_STATE_SIZE + SHA3_512_BLOCK_SIZE + 1
+
+/*
+ * SHAKE128 and SHAKE256 actually have variable output size, but this is used to
+ * calculate the block size (rate) analogously to the above.
+ */
+#define SHAKE128_DEFAULT_SIZE (128 / 8)
+#define SHAKE128_BLOCK_SIZE (200 - 2 * SHAKE128_DEFAULT_SIZE)
+#define SHAKE256_DEFAULT_SIZE (256 / 8)
+#define SHAKE256_BLOCK_SIZE (200 - 2 * SHAKE256_DEFAULT_SIZE)
+#define SHA3_STATE_SIZE 200
+
+/*
+ * State for the Keccak-f[1600] permutation: 25 64-bit words.
+ *
+ * We usually keep the state words as little-endian, to make absorbing and
+ * squeezing easier. (It means that absorbing and squeezing can just treat the
+ * state as a byte array.) The state words are converted to native-endian only
+ * temporarily by implementations of the permutation that need native-endian
+ * words. Of course, that conversion is a no-op on little-endian machines.
+ */
struct sha3_state {
- u64 st[25];
- unsigned int rsiz;
- unsigned int rsizw;
+ union {
+ __le64 words[SHA3_STATE_SIZE / 8];
+ u8 bytes[SHA3_STATE_SIZE];
+
+ u64 native_words[SHA3_STATE_SIZE / 8]; /* see comment above */
+ };
+};
+
+/* Internal context, shared by the digests (SHA3-*) and the XOFs (SHAKE*) */
+struct __sha3_ctx {
+ struct sha3_state state;
+ u8 digest_size; /* Digests only: the digest size in bytes */
+ u8 block_size; /* Block size in bytes */
+ u8 absorb_offset; /* Index of next state byte to absorb into */
+ u8 squeeze_offset; /* XOFs only: index of next state byte to extract */
+};
+
+void __sha3_update(struct __sha3_ctx *ctx, const u8 *in, size_t in_len);
+
+/**
+ * struct sha3_ctx - Context for SHA3-224, SHA3-256, SHA3-384, or SHA3-512
+ * @ctx: private
+ */
+struct sha3_ctx {
+ struct __sha3_ctx ctx;
+};
+
+/**
+ * sha3_zeroize_ctx() - Zeroize a SHA-3 context
+ * @ctx: The context to zeroize
+ *
+ * This is already called by sha3_final(). Call this explicitly when abandoning
+ * a context without calling sha3_final().
+ */
+static inline void sha3_zeroize_ctx(struct sha3_ctx *ctx)
+{
+ memzero_explicit(ctx, sizeof(*ctx));
+}
- unsigned int partial;
- u8 buf[SHA3_224_BLOCK_SIZE];
+/**
+ * struct shake_ctx - Context for SHAKE128 or SHAKE256
+ * @ctx: private
+ */
+struct shake_ctx {
+ struct __sha3_ctx ctx;
};
-int crypto_sha3_init(struct shash_desc *desc);
-int crypto_sha3_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
-int crypto_sha3_final(struct shash_desc *desc, u8 *out);
+/**
+ * shake_zeroize_ctx() - Zeroize a SHAKE context
+ * @ctx: The context to zeroize
+ *
+ * Call this after the last squeeze.
+ */
+static inline void shake_zeroize_ctx(struct shake_ctx *ctx)
+{
+ memzero_explicit(ctx, sizeof(*ctx));
+}
+
+/**
+ * sha3_224_init() - Initialize a context for SHA3-224
+ * @ctx: The context to initialize
+ *
+ * This begins a new SHA3-224 message digest computation.
+ *
+ * Context: Any context.
+ */
+static inline void sha3_224_init(struct sha3_ctx *ctx)
+{
+ *ctx = (struct sha3_ctx){
+ .ctx.digest_size = SHA3_224_DIGEST_SIZE,
+ .ctx.block_size = SHA3_224_BLOCK_SIZE,
+ };
+}
+
+/**
+ * sha3_256_init() - Initialize a context for SHA3-256
+ * @ctx: The context to initialize
+ *
+ * This begins a new SHA3-256 message digest computation.
+ *
+ * Context: Any context.
+ */
+static inline void sha3_256_init(struct sha3_ctx *ctx)
+{
+ *ctx = (struct sha3_ctx){
+ .ctx.digest_size = SHA3_256_DIGEST_SIZE,
+ .ctx.block_size = SHA3_256_BLOCK_SIZE,
+ };
+}
+
+/**
+ * sha3_384_init() - Initialize a context for SHA3-384
+ * @ctx: The context to initialize
+ *
+ * This begins a new SHA3-384 message digest computation.
+ *
+ * Context: Any context.
+ */
+static inline void sha3_384_init(struct sha3_ctx *ctx)
+{
+ *ctx = (struct sha3_ctx){
+ .ctx.digest_size = SHA3_384_DIGEST_SIZE,
+ .ctx.block_size = SHA3_384_BLOCK_SIZE,
+ };
+}
+
+/**
+ * sha3_512_init() - Initialize a context for SHA3-512
+ * @ctx: The context to initialize
+ *
+ * This begins a new SHA3-512 message digest computation.
+ *
+ * Context: Any context.
+ */
+static inline void sha3_512_init(struct sha3_ctx *ctx)
+{
+ *ctx = (struct sha3_ctx){
+ .ctx.digest_size = SHA3_512_DIGEST_SIZE,
+ .ctx.block_size = SHA3_512_BLOCK_SIZE,
+ };
+}
+
+/**
+ * sha3_update() - Update a SHA-3 digest context with input data
+ * @ctx: The context to update; must have been initialized
+ * @in: The input data
+ * @in_len: Length of the input data in bytes
+ *
+ * This can be called any number of times to add data to a SHA3-224, SHA3-256,
+ * SHA3-384, or SHA3-512 digest (depending on which init function was called).
+ *
+ * Context: Any context.
+ */
+static inline void sha3_update(struct sha3_ctx *ctx,
+ const u8 *in, size_t in_len)
+{
+ __sha3_update(&ctx->ctx, in, in_len);
+}
+
+/**
+ * sha3_final() - Finish computing a SHA-3 message digest
+ * @ctx: The context to finalize; must have been initialized
+ * @out: (output) The resulting SHA3-224, SHA3-256, SHA3-384, or SHA3-512
+ * message digest, matching the init function that was called. Note that
+ * the size differs for each one; see SHA3_*_DIGEST_SIZE.
+ *
+ * After finishing, this zeroizes @ctx. So the caller does not need to do it.
+ *
+ * Context: Any context.
+ */
+void sha3_final(struct sha3_ctx *ctx, u8 *out);
+
+/**
+ * shake128_init() - Initialize a context for SHAKE128
+ * @ctx: The context to initialize
+ *
+ * This begins a new SHAKE128 extendable-output function (XOF) computation.
+ *
+ * Context: Any context.
+ */
+static inline void shake128_init(struct shake_ctx *ctx)
+{
+ *ctx = (struct shake_ctx){
+ .ctx.block_size = SHAKE128_BLOCK_SIZE,
+ };
+}
+
+/**
+ * shake256_init() - Initialize a context for SHAKE256
+ * @ctx: The context to initialize
+ *
+ * This begins a new SHAKE256 extendable-output function (XOF) computation.
+ *
+ * Context: Any context.
+ */
+static inline void shake256_init(struct shake_ctx *ctx)
+{
+ *ctx = (struct shake_ctx){
+ .ctx.block_size = SHAKE256_BLOCK_SIZE,
+ };
+}
+
+/**
+ * shake_update() - Update a SHAKE context with input data
+ * @ctx: The context to update; must have been initialized
+ * @in: The input data
+ * @in_len: Length of the input data in bytes
+ *
+ * This can be called any number of times to add more input data to SHAKE128 or
+ * SHAKE256. This cannot be called after squeezing has begun.
+ *
+ * Context: Any context.
+ */
+static inline void shake_update(struct shake_ctx *ctx,
+ const u8 *in, size_t in_len)
+{
+ __sha3_update(&ctx->ctx, in, in_len);
+}
+
+/**
+ * shake_squeeze() - Generate output from SHAKE128 or SHAKE256
+ * @ctx: The context to squeeze; must have been initialized
+ * @out: Where to write the resulting output data
+ * @out_len: The amount of data to extract to @out in bytes
+ *
+ * This may be called multiple times. A number of consecutive squeezes laid
+ * end-to-end will yield the same output as one big squeeze generating the same
+ * total amount of output. More input cannot be provided after squeezing has
+ * begun. After the last squeeze, call shake_zeroize_ctx().
+ *
+ * Context: Any context.
+ */
+void shake_squeeze(struct shake_ctx *ctx, u8 *out, size_t out_len);
+
+/**
+ * sha3_224() - Compute SHA3-224 digest in one shot
+ * @in: The input data to be digested
+ * @in_len: Length of the input data in bytes
+ * @out: The buffer into which the digest will be stored
+ *
+ * Convenience function that computes a SHA3-224 digest. Use this instead of
+ * the incremental API if you're able to provide all the input at once.
+ *
+ * Context: Any context.
+ */
+void sha3_224(const u8 *in, size_t in_len, u8 out[SHA3_224_DIGEST_SIZE]);
+
+/**
+ * sha3_256() - Compute SHA3-256 digest in one shot
+ * @in: The input data to be digested
+ * @in_len: Length of the input data in bytes
+ * @out: The buffer into which the digest will be stored
+ *
+ * Convenience function that computes a SHA3-256 digest. Use this instead of
+ * the incremental API if you're able to provide all the input at once.
+ *
+ * Context: Any context.
+ */
+void sha3_256(const u8 *in, size_t in_len, u8 out[SHA3_256_DIGEST_SIZE]);
+
+/**
+ * sha3_384() - Compute SHA3-384 digest in one shot
+ * @in: The input data to be digested
+ * @in_len: Length of the input data in bytes
+ * @out: The buffer into which the digest will be stored
+ *
+ * Convenience function that computes a SHA3-384 digest. Use this instead of
+ * the incremental API if you're able to provide all the input at once.
+ *
+ * Context: Any context.
+ */
+void sha3_384(const u8 *in, size_t in_len, u8 out[SHA3_384_DIGEST_SIZE]);
+
+/**
+ * sha3_512() - Compute SHA3-512 digest in one shot
+ * @in: The input data to be digested
+ * @in_len: Length of the input data in bytes
+ * @out: The buffer into which the digest will be stored
+ *
+ * Convenience function that computes a SHA3-512 digest. Use this instead of
+ * the incremental API if you're able to provide all the input at once.
+ *
+ * Context: Any context.
+ */
+void sha3_512(const u8 *in, size_t in_len, u8 out[SHA3_512_DIGEST_SIZE]);
+
+/**
+ * shake128() - Compute SHAKE128 in one shot
+ * @in: The input data to be used
+ * @in_len: Length of the input data in bytes
+ * @out: The buffer into which the output will be stored
+ * @out_len: Length of the output to produce in bytes
+ *
+ * Convenience function that computes SHAKE128 in one shot. Use this instead of
+ * the incremental API if you're able to provide all the input at once as well
+ * as receive all the output at once. All output lengths are supported.
+ *
+ * Context: Any context.
+ */
+void shake128(const u8 *in, size_t in_len, u8 *out, size_t out_len);
+
+/**
+ * shake256() - Compute SHAKE256 in one shot
+ * @in: The input data to be used
+ * @in_len: Length of the input data in bytes
+ * @out: The buffer into which the output will be stored
+ * @out_len: Length of the output to produce in bytes
+ *
+ * Convenience function that computes SHAKE256 in one shot. Use this instead of
+ * the incremental API if you're able to provide all the input at once as well
+ * as receive all the output at once. All output lengths are supported.
+ *
+ * Context: Any context.
+ */
+void shake256(const u8 *in, size_t in_len, u8 *out, size_t out_len);
-#endif
+#endif /* __CRYPTO_SHA3_H__ */
diff --git a/include/crypto/sha512_base.h b/include/crypto/sha512_base.h
deleted file mode 100644
index b370b3340b16..000000000000
--- a/include/crypto/sha512_base.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * sha512_base.h - core logic for SHA-512 implementations
- *
- * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
- */
-
-#ifndef _CRYPTO_SHA512_BASE_H
-#define _CRYPTO_SHA512_BASE_H
-
-#include <crypto/internal/hash.h>
-#include <crypto/sha2.h>
-#include <linux/crypto.h>
-#include <linux/module.h>
-#include <linux/string.h>
-
-#include <asm/unaligned.h>
-
-typedef void (sha512_block_fn)(struct sha512_state *sst, u8 const *src,
- int blocks);
-
-static inline int sha384_base_init(struct shash_desc *desc)
-{
- struct sha512_state *sctx = shash_desc_ctx(desc);
-
- sctx->state[0] = SHA384_H0;
- sctx->state[1] = SHA384_H1;
- sctx->state[2] = SHA384_H2;
- sctx->state[3] = SHA384_H3;
- sctx->state[4] = SHA384_H4;
- sctx->state[5] = SHA384_H5;
- sctx->state[6] = SHA384_H6;
- sctx->state[7] = SHA384_H7;
- sctx->count[0] = sctx->count[1] = 0;
-
- return 0;
-}
-
-static inline int sha512_base_init(struct shash_desc *desc)
-{
- struct sha512_state *sctx = shash_desc_ctx(desc);
-
- sctx->state[0] = SHA512_H0;
- sctx->state[1] = SHA512_H1;
- sctx->state[2] = SHA512_H2;
- sctx->state[3] = SHA512_H3;
- sctx->state[4] = SHA512_H4;
- sctx->state[5] = SHA512_H5;
- sctx->state[6] = SHA512_H6;
- sctx->state[7] = SHA512_H7;
- sctx->count[0] = sctx->count[1] = 0;
-
- return 0;
-}
-
-static inline int sha512_base_do_update(struct shash_desc *desc,
- const u8 *data,
- unsigned int len,
- sha512_block_fn *block_fn)
-{
- struct sha512_state *sctx = shash_desc_ctx(desc);
- unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
-
- sctx->count[0] += len;
- if (sctx->count[0] < len)
- sctx->count[1]++;
-
- if (unlikely((partial + len) >= SHA512_BLOCK_SIZE)) {
- int blocks;
-
- if (partial) {
- int p = SHA512_BLOCK_SIZE - partial;
-
- memcpy(sctx->buf + partial, data, p);
- data += p;
- len -= p;
-
- block_fn(sctx, sctx->buf, 1);
- }
-
- blocks = len / SHA512_BLOCK_SIZE;
- len %= SHA512_BLOCK_SIZE;
-
- if (blocks) {
- block_fn(sctx, data, blocks);
- data += blocks * SHA512_BLOCK_SIZE;
- }
- partial = 0;
- }
- if (len)
- memcpy(sctx->buf + partial, data, len);
-
- return 0;
-}
-
-static inline int sha512_base_do_finalize(struct shash_desc *desc,
- sha512_block_fn *block_fn)
-{
- const int bit_offset = SHA512_BLOCK_SIZE - sizeof(__be64[2]);
- struct sha512_state *sctx = shash_desc_ctx(desc);
- __be64 *bits = (__be64 *)(sctx->buf + bit_offset);
- unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
-
- sctx->buf[partial++] = 0x80;
- if (partial > bit_offset) {
- memset(sctx->buf + partial, 0x0, SHA512_BLOCK_SIZE - partial);
- partial = 0;
-
- block_fn(sctx, sctx->buf, 1);
- }
-
- memset(sctx->buf + partial, 0x0, bit_offset - partial);
- bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
- bits[1] = cpu_to_be64(sctx->count[0] << 3);
- block_fn(sctx, sctx->buf, 1);
-
- return 0;
-}
-
-static inline int sha512_base_finish(struct shash_desc *desc, u8 *out)
-{
- unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
- struct sha512_state *sctx = shash_desc_ctx(desc);
- __be64 *digest = (__be64 *)out;
- int i;
-
- for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be64))
- put_unaligned_be64(sctx->state[i], digest++);
-
- memzero_explicit(sctx, sizeof(*sctx));
- return 0;
-}
-
-#endif /* _CRYPTO_SHA512_BASE_H */
diff --git a/include/crypto/sig.h b/include/crypto/sig.h
new file mode 100644
index 000000000000..fa6dafafab3f
--- /dev/null
+++ b/include/crypto/sig.h
@@ -0,0 +1,265 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Public Key Signature Algorithm
+ *
+ * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+#ifndef _CRYPTO_SIG_H
+#define _CRYPTO_SIG_H
+
+#include <linux/crypto.h>
+
+/**
+ * struct crypto_sig - user-instantiated objects which encapsulate
+ * algorithms and core processing logic
+ *
+ * @base: Common crypto API algorithm data structure
+ */
+struct crypto_sig {
+ struct crypto_tfm base;
+};
+
+/**
+ * struct sig_alg - generic public key signature algorithm
+ *
+ * @sign: Function performs a sign operation as defined by public key
+ * algorithm. On success, the signature size is returned.
+ * Optional.
+ * @verify: Function performs a complete verify operation as defined by
+ * public key algorithm, returning verification status. Optional.
+ * @set_pub_key: Function invokes the algorithm specific set public key
+ * function, which knows how to decode and interpret
+ * the BER encoded public key and parameters. Mandatory.
+ * @set_priv_key: Function invokes the algorithm specific set private key
+ * function, which knows how to decode and interpret
+ * the BER encoded private key and parameters. Optional.
+ * @key_size: Function returns key size. Mandatory.
+ * @digest_size: Function returns maximum digest size. Optional.
+ * @max_size: Function returns maximum signature size. Optional.
+ * @init: Initialize the cryptographic transformation object.
+ * This function is used to initialize the cryptographic
+ * transformation object. This function is called only once at
+ * the instantiation time, right after the transformation context
+ * was allocated. In case the cryptographic hardware has some
+ * special requirements which need to be handled by software, this
+ * function shall check for the precise requirement of the
+ * transformation and put any software fallbacks in place.
+ * @exit: Deinitialize the cryptographic transformation object. This is a
+ * counterpart to @init, used to remove various changes set in
+ * @init.
+ *
+ * @base: Common crypto API algorithm data structure
+ */
+struct sig_alg {
+ int (*sign)(struct crypto_sig *tfm,
+ const void *src, unsigned int slen,
+ void *dst, unsigned int dlen);
+ int (*verify)(struct crypto_sig *tfm,
+ const void *src, unsigned int slen,
+ const void *digest, unsigned int dlen);
+ int (*set_pub_key)(struct crypto_sig *tfm,
+ const void *key, unsigned int keylen);
+ int (*set_priv_key)(struct crypto_sig *tfm,
+ const void *key, unsigned int keylen);
+ unsigned int (*key_size)(struct crypto_sig *tfm);
+ unsigned int (*digest_size)(struct crypto_sig *tfm);
+ unsigned int (*max_size)(struct crypto_sig *tfm);
+ int (*init)(struct crypto_sig *tfm);
+ void (*exit)(struct crypto_sig *tfm);
+
+ struct crypto_alg base;
+};
+
+/**
+ * DOC: Generic Public Key Signature API
+ *
+ * The Public Key Signature API is used with the algorithms of type
+ * CRYPTO_ALG_TYPE_SIG (listed as type "sig" in /proc/crypto)
+ */
+
+/**
+ * crypto_alloc_sig() - allocate signature tfm handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * signing algorithm e.g. "ecdsa"
+ * @type: specifies the type of the algorithm
+ * @mask: specifies the mask for the algorithm
+ *
+ * Allocate a handle for public key signature algorithm. The returned struct
+ * crypto_sig is the handle that is required for any subsequent
+ * API invocation for signature operations.
+ *
+ * Return: allocated handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_sig *crypto_alloc_sig(const char *alg_name, u32 type, u32 mask);
+
+static inline struct crypto_tfm *crypto_sig_tfm(struct crypto_sig *tfm)
+{
+ return &tfm->base;
+}
+
+static inline struct crypto_sig *__crypto_sig_tfm(struct crypto_tfm *tfm)
+{
+ return container_of(tfm, struct crypto_sig, base);
+}
+
+static inline struct sig_alg *__crypto_sig_alg(struct crypto_alg *alg)
+{
+ return container_of(alg, struct sig_alg, base);
+}
+
+static inline struct sig_alg *crypto_sig_alg(struct crypto_sig *tfm)
+{
+ return __crypto_sig_alg(crypto_sig_tfm(tfm)->__crt_alg);
+}
+
+/**
+ * crypto_free_sig() - free signature tfm handle
+ *
+ * @tfm: signature tfm handle allocated with crypto_alloc_sig()
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
+ */
+static inline void crypto_free_sig(struct crypto_sig *tfm)
+{
+ crypto_destroy_tfm(tfm, crypto_sig_tfm(tfm));
+}
+
+/**
+ * crypto_sig_keysize() - Get key size
+ *
+ * Function returns the key size in bits.
+ * Function assumes that the key is already set in the transformation. If this
+ * function is called without a setkey or with a failed setkey, you may end up
+ * in a NULL dereference.
+ *
+ * @tfm: signature tfm handle allocated with crypto_alloc_sig()
+ */
+static inline unsigned int crypto_sig_keysize(struct crypto_sig *tfm)
+{
+ struct sig_alg *alg = crypto_sig_alg(tfm);
+
+ return alg->key_size(tfm);
+}
+
+/**
+ * crypto_sig_digestsize() - Get maximum digest size
+ *
+ * Function returns the maximum digest size in bytes.
+ * Function assumes that the key is already set in the transformation. If this
+ * function is called without a setkey or with a failed setkey, you may end up
+ * in a NULL dereference.
+ *
+ * @tfm: signature tfm handle allocated with crypto_alloc_sig()
+ */
+static inline unsigned int crypto_sig_digestsize(struct crypto_sig *tfm)
+{
+ struct sig_alg *alg = crypto_sig_alg(tfm);
+
+ return alg->digest_size(tfm);
+}
+
+/**
+ * crypto_sig_maxsize() - Get maximum signature size
+ *
+ * Function returns the maximum signature size in bytes.
+ * Function assumes that the key is already set in the transformation. If this
+ * function is called without a setkey or with a failed setkey, you may end up
+ * in a NULL dereference.
+ *
+ * @tfm: signature tfm handle allocated with crypto_alloc_sig()
+ */
+static inline unsigned int crypto_sig_maxsize(struct crypto_sig *tfm)
+{
+ struct sig_alg *alg = crypto_sig_alg(tfm);
+
+ return alg->max_size(tfm);
+}
+
+/**
+ * crypto_sig_sign() - Invoke signing operation
+ *
+ * Function invokes the specific signing operation for a given algorithm
+ *
+ * @tfm: signature tfm handle allocated with crypto_alloc_sig()
+ * @src: source buffer
+ * @slen: source length
+ * @dst: destination obuffer
+ * @dlen: destination length
+ *
+ * Return: signature size on success; error code in case of error
+ */
+static inline int crypto_sig_sign(struct crypto_sig *tfm,
+ const void *src, unsigned int slen,
+ void *dst, unsigned int dlen)
+{
+ struct sig_alg *alg = crypto_sig_alg(tfm);
+
+ return alg->sign(tfm, src, slen, dst, dlen);
+}
+
+/**
+ * crypto_sig_verify() - Invoke signature verification
+ *
+ * Function invokes the specific signature verification operation
+ * for a given algorithm.
+ *
+ * @tfm: signature tfm handle allocated with crypto_alloc_sig()
+ * @src: source buffer
+ * @slen: source length
+ * @digest: digest
+ * @dlen: digest length
+ *
+ * Return: zero on verification success; error code in case of error.
+ */
+static inline int crypto_sig_verify(struct crypto_sig *tfm,
+ const void *src, unsigned int slen,
+ const void *digest, unsigned int dlen)
+{
+ struct sig_alg *alg = crypto_sig_alg(tfm);
+
+ return alg->verify(tfm, src, slen, digest, dlen);
+}
+
+/**
+ * crypto_sig_set_pubkey() - Invoke set public key operation
+ *
+ * Function invokes the algorithm specific set key function, which knows
+ * how to decode and interpret the encoded key and parameters
+ *
+ * @tfm: tfm handle
+ * @key: BER encoded public key, algo OID, paramlen, BER encoded
+ * parameters
+ * @keylen: length of the key (not including other data)
+ *
+ * Return: zero on success; error code in case of error
+ */
+static inline int crypto_sig_set_pubkey(struct crypto_sig *tfm,
+ const void *key, unsigned int keylen)
+{
+ struct sig_alg *alg = crypto_sig_alg(tfm);
+
+ return alg->set_pub_key(tfm, key, keylen);
+}
+
+/**
+ * crypto_sig_set_privkey() - Invoke set private key operation
+ *
+ * Function invokes the algorithm specific set key function, which knows
+ * how to decode and interpret the encoded key and parameters
+ *
+ * @tfm: tfm handle
+ * @key: BER encoded private key, algo OID, paramlen, BER encoded
+ * parameters
+ * @keylen: length of the key (not including other data)
+ *
+ * Return: zero on success; error code in case of error
+ */
+static inline int crypto_sig_set_privkey(struct crypto_sig *tfm,
+ const void *key, unsigned int keylen)
+{
+ struct sig_alg *alg = crypto_sig_alg(tfm);
+
+ return alg->set_priv_key(tfm, key, keylen);
+}
+#endif
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index ef0fc9ed4342..9e5853464345 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -8,9 +8,25 @@
#ifndef _CRYPTO_SKCIPHER_H
#define _CRYPTO_SKCIPHER_H
+#include <linux/atomic.h>
+#include <linux/container_of.h>
#include <linux/crypto.h>
-#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+/* Set this bit if the lskcipher operation is a continuation. */
+#define CRYPTO_LSKCIPHER_FLAG_CONT 0x00000001
+/* Set this bit if the lskcipher operation is final. */
+#define CRYPTO_LSKCIPHER_FLAG_FINAL 0x00000002
+/* The bit CRYPTO_TFM_REQ_MAY_SLEEP can also be set if needed. */
+
+/* Set this bit if the skcipher operation is a continuation. */
+#define CRYPTO_SKCIPHER_REQ_CONT 0x00000001
+/* Set this bit if the skcipher operation is not final. */
+#define CRYPTO_SKCIPHER_REQ_NOTFINAL 0x00000002
+
+struct scatterlist;
/**
* struct skcipher_request - Symmetric key cipher request
@@ -44,8 +60,12 @@ struct crypto_sync_skcipher {
struct crypto_skcipher base;
};
-/**
- * struct skcipher_alg - symmetric key cipher definition
+struct crypto_lskcipher {
+ struct crypto_tfm base;
+};
+
+/*
+ * struct skcipher_alg_common - common properties of skcipher_alg
* @min_keysize: Minimum key size supported by the transformation. This is the
* smallest key length supported by this transformation algorithm.
* This must be set to one of the pre-defined values as this is
@@ -56,6 +76,26 @@ struct crypto_sync_skcipher {
* This must be set to one of the pre-defined values as this is
* not hardware specific. Possible values for this field can be
* found via git grep "_MAX_KEY_SIZE" include/crypto/
+ * @ivsize: IV size applicable for transformation. The consumer must provide an
+ * IV of exactly that size to perform the encrypt or decrypt operation.
+ * @chunksize: Equal to the block size except for stream ciphers such as
+ * CTR where it is set to the underlying block size.
+ * @statesize: Size of the internal state for the algorithm.
+ * @base: Definition of a generic crypto algorithm.
+ */
+#define SKCIPHER_ALG_COMMON { \
+ unsigned int min_keysize; \
+ unsigned int max_keysize; \
+ unsigned int ivsize; \
+ unsigned int chunksize; \
+ unsigned int statesize; \
+ \
+ struct crypto_alg base; \
+}
+struct skcipher_alg_common SKCIPHER_ALG_COMMON;
+
+/**
+ * struct skcipher_alg - symmetric key cipher definition
* @setkey: Set key for the transformation. This function is used to either
* program a supplied key into the hardware or store the key in the
* transformation context for programming it later. Note that this
@@ -79,6 +119,17 @@ struct crypto_sync_skcipher {
* be called in parallel with the same transformation object.
* @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt
* and the conditions are exactly the same.
+ * @export: Export partial state of the transformation. This function dumps the
+ * entire state of the ongoing transformation into a provided block of
+ * data so it can be @import 'ed back later on. This is useful in case
+ * you want to save partial result of the transformation after
+ * processing certain amount of data and reload this partial result
+ * multiple times later on for multiple re-use. No data processing
+ * happens at this point.
+ * @import: Import partial state of the transformation. This function loads the
+ * entire state of the ongoing transformation from a provided block of
+ * data so the transformation can continue from this point onward. No
+ * data processing happens at this point.
* @init: Initialize the cryptographic transformation object. This function
* is used to initialize the cryptographic transformation object.
* This function is called only once at the instantiation time, right
@@ -90,14 +141,10 @@ struct crypto_sync_skcipher {
* @exit: Deinitialize the cryptographic transformation object. This is a
* counterpart to @init, used to remove various changes set in
* @init.
- * @ivsize: IV size applicable for transformation. The consumer must provide an
- * IV of exactly that size to perform the encrypt or decrypt operation.
- * @chunksize: Equal to the block size except for stream ciphers such as
- * CTR where it is set to the underlying block size.
* @walksize: Equal to the chunk size except in cases where the algorithm is
* considerably more efficient if it can operate on multiple chunks
* in parallel. Should be a multiple of chunksize.
- * @base: Definition of a generic crypto algorithm.
+ * @co: see struct skcipher_alg_common
*
* All fields except @ivsize are mandatory and must be filled.
*/
@@ -106,30 +153,78 @@ struct skcipher_alg {
unsigned int keylen);
int (*encrypt)(struct skcipher_request *req);
int (*decrypt)(struct skcipher_request *req);
+ int (*export)(struct skcipher_request *req, void *out);
+ int (*import)(struct skcipher_request *req, const void *in);
int (*init)(struct crypto_skcipher *tfm);
void (*exit)(struct crypto_skcipher *tfm);
- unsigned int min_keysize;
- unsigned int max_keysize;
- unsigned int ivsize;
- unsigned int chunksize;
unsigned int walksize;
- struct crypto_alg base;
+ union {
+ struct SKCIPHER_ALG_COMMON;
+ struct skcipher_alg_common co;
+ };
+};
+
+/**
+ * struct lskcipher_alg - linear symmetric key cipher definition
+ * @setkey: Set key for the transformation. This function is used to either
+ * program a supplied key into the hardware or store the key in the
+ * transformation context for programming it later. Note that this
+ * function does modify the transformation context. This function can
+ * be called multiple times during the existence of the transformation
+ * object, so one must make sure the key is properly reprogrammed into
+ * the hardware. This function is also responsible for checking the key
+ * length for validity. In case a software fallback was put in place in
+ * the @cra_init call, this function might need to use the fallback if
+ * the algorithm doesn't support all of the key sizes.
+ * @encrypt: Encrypt a number of bytes. This function is used to encrypt
+ * the supplied data. This function shall not modify
+ * the transformation context, as this function may be called
+ * in parallel with the same transformation object. Data
+ * may be left over if length is not a multiple of blocks
+ * and there is more to come (final == false). The number of
+ * left-over bytes should be returned in case of success.
+ * The siv field shall be as long as ivsize + statesize with
+ * the IV placed at the front. The state will be used by the
+ * algorithm internally.
+ * @decrypt: Decrypt a number of bytes. This is a reverse counterpart to
+ * @encrypt and the conditions are exactly the same.
+ * @init: Initialize the cryptographic transformation object. This function
+ * is used to initialize the cryptographic transformation object.
+ * This function is called only once at the instantiation time, right
+ * after the transformation context was allocated.
+ * @exit: Deinitialize the cryptographic transformation object. This is a
+ * counterpart to @init, used to remove various changes set in
+ * @init.
+ * @co: see struct skcipher_alg_common
+ */
+struct lskcipher_alg {
+ int (*setkey)(struct crypto_lskcipher *tfm, const u8 *key,
+ unsigned int keylen);
+ int (*encrypt)(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *siv, u32 flags);
+ int (*decrypt)(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *siv, u32 flags);
+ int (*init)(struct crypto_lskcipher *tfm);
+ void (*exit)(struct crypto_lskcipher *tfm);
+
+ struct skcipher_alg_common co;
};
#define MAX_SYNC_SKCIPHER_REQSIZE 384
/*
- * This performs a type-check against the "tfm" argument to make sure
+ * This performs a type-check against the "_tfm" argument to make sure
* all users have the correct skcipher tfm for doing on-stack requests.
*/
-#define SYNC_SKCIPHER_REQUEST_ON_STACK(name, tfm) \
+#define SYNC_SKCIPHER_REQUEST_ON_STACK(name, _tfm) \
char __##name##_desc[sizeof(struct skcipher_request) + \
- MAX_SYNC_SKCIPHER_REQSIZE + \
- (!(sizeof((struct crypto_sync_skcipher *)1 == \
- (typeof(tfm))1))) \
+ MAX_SYNC_SKCIPHER_REQSIZE \
] CRYPTO_MINALIGN_ATTR; \
- struct skcipher_request *name = (void *)__##name##_desc
+ struct skcipher_request *name = \
+ (((struct skcipher_request *)__##name##_desc)->base.tfm = \
+ crypto_sync_skcipher_tfm((_tfm)), \
+ (void *)__##name##_desc)
/**
* DOC: Symmetric Key Cipher API
@@ -187,12 +282,42 @@ struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(const char *alg_name,
u32 type, u32 mask);
+
+/**
+ * crypto_alloc_lskcipher() - allocate linear symmetric key cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * lskcipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for an lskcipher. The returned struct
+ * crypto_lskcipher is the cipher handle that is required for any subsequent
+ * API invocation for that lskcipher.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_lskcipher *crypto_alloc_lskcipher(const char *alg_name,
+ u32 type, u32 mask);
+
static inline struct crypto_tfm *crypto_skcipher_tfm(
struct crypto_skcipher *tfm)
{
return &tfm->base;
}
+static inline struct crypto_tfm *crypto_lskcipher_tfm(
+ struct crypto_lskcipher *tfm)
+{
+ return &tfm->base;
+}
+
+static inline struct crypto_tfm *crypto_sync_skcipher_tfm(
+ struct crypto_sync_skcipher *tfm)
+{
+ return crypto_skcipher_tfm(&tfm->base);
+}
+
/**
* crypto_free_skcipher() - zeroize and free cipher handle
* @tfm: cipher handle to be freed
@@ -210,6 +335,17 @@ static inline void crypto_free_sync_skcipher(struct crypto_sync_skcipher *tfm)
}
/**
+ * crypto_free_lskcipher() - zeroize and free cipher handle
+ * @tfm: cipher handle to be freed
+ *
+ * If @tfm is a NULL or error pointer, this function does nothing.
+ */
+static inline void crypto_free_lskcipher(struct crypto_lskcipher *tfm)
+{
+ crypto_destroy_tfm(tfm, crypto_lskcipher_tfm(tfm));
+}
+
+/**
* crypto_has_skcipher() - Search for the availability of an skcipher.
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
* skcipher
@@ -227,6 +363,19 @@ static inline const char *crypto_skcipher_driver_name(
return crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm));
}
+static inline const char *crypto_lskcipher_driver_name(
+ struct crypto_lskcipher *tfm)
+{
+ return crypto_tfm_alg_driver_name(crypto_lskcipher_tfm(tfm));
+}
+
+static inline struct skcipher_alg_common *crypto_skcipher_alg_common(
+ struct crypto_skcipher *tfm)
+{
+ return container_of(crypto_skcipher_tfm(tfm)->__crt_alg,
+ struct skcipher_alg_common, base);
+}
+
static inline struct skcipher_alg *crypto_skcipher_alg(
struct crypto_skcipher *tfm)
{
@@ -234,9 +383,11 @@ static inline struct skcipher_alg *crypto_skcipher_alg(
struct skcipher_alg, base);
}
-static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg)
+static inline struct lskcipher_alg *crypto_lskcipher_alg(
+ struct crypto_lskcipher *tfm)
{
- return alg->ivsize;
+ return container_of(crypto_lskcipher_tfm(tfm)->__crt_alg,
+ struct lskcipher_alg, co.base);
}
/**
@@ -250,7 +401,7 @@ static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg)
*/
static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm)
{
- return crypto_skcipher_alg(tfm)->ivsize;
+ return crypto_skcipher_alg_common(tfm)->ivsize;
}
static inline unsigned int crypto_sync_skcipher_ivsize(
@@ -260,6 +411,21 @@ static inline unsigned int crypto_sync_skcipher_ivsize(
}
/**
+ * crypto_lskcipher_ivsize() - obtain IV size
+ * @tfm: cipher handle
+ *
+ * The size of the IV for the lskcipher referenced by the cipher handle is
+ * returned. This IV size may be zero if the cipher does not need an IV.
+ *
+ * Return: IV size in bytes
+ */
+static inline unsigned int crypto_lskcipher_ivsize(
+ struct crypto_lskcipher *tfm)
+{
+ return crypto_lskcipher_alg(tfm)->co.ivsize;
+}
+
+/**
* crypto_skcipher_blocksize() - obtain block size of cipher
* @tfm: cipher handle
*
@@ -275,10 +441,20 @@ static inline unsigned int crypto_skcipher_blocksize(
return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm));
}
-static inline unsigned int crypto_skcipher_alg_chunksize(
- struct skcipher_alg *alg)
+/**
+ * crypto_lskcipher_blocksize() - obtain block size of cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the lskcipher referenced with the cipher handle is
+ * returned. The caller may use that information to allocate appropriate
+ * memory for the data returned by the encryption or decryption operation
+ *
+ * Return: block size of cipher
+ */
+static inline unsigned int crypto_lskcipher_blocksize(
+ struct crypto_lskcipher *tfm)
{
- return alg->chunksize;
+ return crypto_tfm_alg_blocksize(crypto_lskcipher_tfm(tfm));
}
/**
@@ -295,7 +471,58 @@ static inline unsigned int crypto_skcipher_alg_chunksize(
static inline unsigned int crypto_skcipher_chunksize(
struct crypto_skcipher *tfm)
{
- return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm));
+ return crypto_skcipher_alg_common(tfm)->chunksize;
+}
+
+/**
+ * crypto_lskcipher_chunksize() - obtain chunk size
+ * @tfm: cipher handle
+ *
+ * The block size is set to one for ciphers such as CTR. However,
+ * you still need to provide incremental updates in multiples of
+ * the underlying block size as the IV does not have sub-block
+ * granularity. This is known in this API as the chunk size.
+ *
+ * Return: chunk size in bytes
+ */
+static inline unsigned int crypto_lskcipher_chunksize(
+ struct crypto_lskcipher *tfm)
+{
+ return crypto_lskcipher_alg(tfm)->co.chunksize;
+}
+
+/**
+ * crypto_skcipher_statesize() - obtain state size
+ * @tfm: cipher handle
+ *
+ * Some algorithms cannot be chained with the IV alone. They carry
+ * internal state which must be replicated if data is to be processed
+ * incrementally. The size of that state can be obtained with this
+ * function.
+ *
+ * Return: state size in bytes
+ */
+static inline unsigned int crypto_skcipher_statesize(
+ struct crypto_skcipher *tfm)
+{
+ return crypto_skcipher_alg_common(tfm)->statesize;
+}
+
+/**
+ * crypto_lskcipher_statesize() - obtain state size
+ * @tfm: cipher handle
+ *
+ * Some algorithms cannot be chained with the IV alone. They carry
+ * internal state which must be replicated if data is to be processed
+ * incrementally. The size of that state can be obtained with this
+ * function.
+ *
+ * Return: state size in bytes
+ */
+static inline unsigned int crypto_lskcipher_statesize(
+ struct crypto_lskcipher *tfm)
+{
+ return crypto_lskcipher_alg(tfm)->co.statesize;
}
static inline unsigned int crypto_sync_skcipher_blocksize(
@@ -310,6 +537,12 @@ static inline unsigned int crypto_skcipher_alignmask(
return crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm));
}
+static inline unsigned int crypto_lskcipher_alignmask(
+ struct crypto_lskcipher *tfm)
+{
+ return crypto_tfm_alg_alignmask(crypto_lskcipher_tfm(tfm));
+}
+
static inline u32 crypto_skcipher_get_flags(struct crypto_skcipher *tfm)
{
return crypto_tfm_get_flags(crypto_skcipher_tfm(tfm));
@@ -345,6 +578,23 @@ static inline void crypto_sync_skcipher_clear_flags(
crypto_skcipher_clear_flags(&tfm->base, flags);
}
+static inline u32 crypto_lskcipher_get_flags(struct crypto_lskcipher *tfm)
+{
+ return crypto_tfm_get_flags(crypto_lskcipher_tfm(tfm));
+}
+
+static inline void crypto_lskcipher_set_flags(struct crypto_lskcipher *tfm,
+ u32 flags)
+{
+ crypto_tfm_set_flags(crypto_lskcipher_tfm(tfm), flags);
+}
+
+static inline void crypto_lskcipher_clear_flags(struct crypto_lskcipher *tfm,
+ u32 flags)
+{
+ crypto_tfm_clear_flags(crypto_lskcipher_tfm(tfm), flags);
+}
+
/**
* crypto_skcipher_setkey() - set key for cipher
* @tfm: cipher handle
@@ -370,16 +620,47 @@ static inline int crypto_sync_skcipher_setkey(struct crypto_sync_skcipher *tfm,
return crypto_skcipher_setkey(&tfm->base, key, keylen);
}
+/**
+ * crypto_lskcipher_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the lskcipher referenced by the cipher
+ * handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+int crypto_lskcipher_setkey(struct crypto_lskcipher *tfm,
+ const u8 *key, unsigned int keylen);
+
static inline unsigned int crypto_skcipher_min_keysize(
struct crypto_skcipher *tfm)
{
- return crypto_skcipher_alg(tfm)->min_keysize;
+ return crypto_skcipher_alg_common(tfm)->min_keysize;
}
static inline unsigned int crypto_skcipher_max_keysize(
struct crypto_skcipher *tfm)
{
- return crypto_skcipher_alg(tfm)->max_keysize;
+ return crypto_skcipher_alg_common(tfm)->max_keysize;
+}
+
+static inline unsigned int crypto_lskcipher_min_keysize(
+ struct crypto_lskcipher *tfm)
+{
+ return crypto_lskcipher_alg(tfm)->co.min_keysize;
+}
+
+static inline unsigned int crypto_lskcipher_max_keysize(
+ struct crypto_lskcipher *tfm)
+{
+ return crypto_lskcipher_alg(tfm)->co.max_keysize;
}
/**
@@ -432,6 +713,78 @@ int crypto_skcipher_encrypt(struct skcipher_request *req);
int crypto_skcipher_decrypt(struct skcipher_request *req);
/**
+ * crypto_skcipher_export() - export partial state
+ * @req: reference to the skcipher_request handle that holds all information
+ * needed to perform the operation
+ * @out: output buffer of sufficient size that can hold the state
+ *
+ * Export partial state of the transformation. This function dumps the
+ * entire state of the ongoing transformation into a provided block of
+ * data so it can be @import 'ed back later on. This is useful in case
+ * you want to save partial result of the transformation after
+ * processing certain amount of data and reload this partial result
+ * multiple times later on for multiple re-use. No data processing
+ * happens at this point.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+int crypto_skcipher_export(struct skcipher_request *req, void *out);
+
+/**
+ * crypto_skcipher_import() - import partial state
+ * @req: reference to the skcipher_request handle that holds all information
+ * needed to perform the operation
+ * @in: buffer holding the state
+ *
+ * Import partial state of the transformation. This function loads the
+ * entire state of the ongoing transformation from a provided block of
+ * data so the transformation can continue from this point onward. No
+ * data processing happens at this point.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+int crypto_skcipher_import(struct skcipher_request *req, const void *in);
+
+/**
+ * crypto_lskcipher_encrypt() - encrypt plaintext
+ * @tfm: lskcipher handle
+ * @src: source buffer
+ * @dst: destination buffer
+ * @len: number of bytes to process
+ * @siv: IV + state for the cipher operation. The length of the IV must
+ * comply with the IV size defined by crypto_lskcipher_ivsize. The
+ * IV is then followed with a buffer with the length as specified by
+ * crypto_lskcipher_statesize.
+ * Encrypt plaintext data using the lskcipher handle.
+ *
+ * Return: >=0 if the cipher operation was successful, if positive
+ * then this many bytes have been left unprocessed;
+ * < 0 if an error occurred
+ */
+int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *siv);
+
+/**
+ * crypto_lskcipher_decrypt() - decrypt ciphertext
+ * @tfm: lskcipher handle
+ * @src: source buffer
+ * @dst: destination buffer
+ * @len: number of bytes to process
+ * @siv: IV + state for the cipher operation. The length of the IV must
+ * comply with the IV size defined by crypto_lskcipher_ivsize. The
+ * IV is then followed with a buffer with the length as specified by
+ * crypto_lskcipher_statesize.
+ *
+ * Decrypt ciphertext data using the lskcipher handle.
+ *
+ * Return: >=0 if the cipher operation was successful, if positive
+ * then this many bytes have been left unprocessed;
+ * < 0 if an error occurred
+ */
+int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src,
+ u8 *dst, unsigned len, u8 *siv);
+
+/**
* DOC: Symmetric Key Cipher Request Handle
*
* The skcipher_request data structure contains all pointers to data
@@ -490,19 +843,20 @@ static inline struct skcipher_request *skcipher_request_cast(
*
* Return: allocated request handle in case of success, or NULL if out of memory
*/
-static inline struct skcipher_request *skcipher_request_alloc(
+static inline struct skcipher_request *skcipher_request_alloc_noprof(
struct crypto_skcipher *tfm, gfp_t gfp)
{
struct skcipher_request *req;
- req = kmalloc(sizeof(struct skcipher_request) +
- crypto_skcipher_reqsize(tfm), gfp);
+ req = kmalloc_noprof(sizeof(struct skcipher_request) +
+ crypto_skcipher_reqsize(tfm), gfp);
if (likely(req))
skcipher_request_set_tfm(req, tfm);
return req;
}
+#define skcipher_request_alloc(...) alloc_hooks(skcipher_request_alloc_noprof(__VA_ARGS__))
/**
* skcipher_request_free() - zeroize and free request data structure
diff --git a/include/crypto/sm2.h b/include/crypto/sm2.h
deleted file mode 100644
index af452556dcd4..000000000000
--- a/include/crypto/sm2.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * sm2.h - SM2 asymmetric public-key algorithm
- * as specified by OSCCA GM/T 0003.1-2012 -- 0003.5-2012 SM2 and
- * described at https://tools.ietf.org/html/draft-shen-sm2-ecdsa-02
- *
- * Copyright (c) 2020, Alibaba Group.
- * Written by Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
- */
-
-#ifndef _CRYPTO_SM2_H
-#define _CRYPTO_SM2_H
-
-#include <crypto/sm3.h>
-#include <crypto/akcipher.h>
-
-/* The default user id as specified in GM/T 0009-2012 */
-#define SM2_DEFAULT_USERID "1234567812345678"
-#define SM2_DEFAULT_USERID_LEN 16
-
-extern int sm2_compute_z_digest(struct crypto_akcipher *tfm,
- const unsigned char *id, size_t id_len,
- unsigned char dgst[SM3_DIGEST_SIZE]);
-
-#endif /* _CRYPTO_SM2_H */
diff --git a/include/crypto/sm3.h b/include/crypto/sm3.h
index 42ea21289ba9..c8d02c86c298 100644
--- a/include/crypto/sm3.h
+++ b/include/crypto/sm3.h
@@ -1,5 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Common values for SM3 algorithm
+ *
+ * Copyright (C) 2017 ARM Limited or its affiliates.
+ * Copyright (C) 2017 Gilad Ben-Yossef <gilad@benyossef.com>
+ * Copyright (C) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
*/
#ifndef _CRYPTO_SM3_H
@@ -9,6 +14,7 @@
#define SM3_DIGEST_SIZE 32
#define SM3_BLOCK_SIZE 64
+#define SM3_STATE_SIZE 40
#define SM3_T1 0x79CC4519
#define SM3_T2 0x7A879D8A
@@ -30,13 +36,29 @@ struct sm3_state {
u8 buffer[SM3_BLOCK_SIZE];
};
-struct shash_desc;
+/*
+ * Stand-alone implementation of the SM3 algorithm. It is designed to
+ * have as little dependencies as possible so it can be used in the
+ * kexec_file purgatory. In other cases you should generally use the
+ * hash APIs from include/crypto/hash.h. Especially when hashing large
+ * amounts of data as those APIs may be hw-accelerated.
+ *
+ * For details see lib/crypto/sm3.c
+ */
-extern int crypto_sm3_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
+static inline void sm3_init(struct sm3_state *sctx)
+{
+ sctx->state[0] = SM3_IVA;
+ sctx->state[1] = SM3_IVB;
+ sctx->state[2] = SM3_IVC;
+ sctx->state[3] = SM3_IVD;
+ sctx->state[4] = SM3_IVE;
+ sctx->state[5] = SM3_IVF;
+ sctx->state[6] = SM3_IVG;
+ sctx->state[7] = SM3_IVH;
+ sctx->count = 0;
+}
-extern int crypto_sm3_final(struct shash_desc *desc, u8 *out);
+void sm3_block_generic(struct sm3_state *sctx, u8 const *data, int blocks);
-extern int crypto_sm3_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash);
#endif
diff --git a/include/crypto/sm3_base.h b/include/crypto/sm3_base.h
index 2f3a32ab97bb..7c53570bc05e 100644
--- a/include/crypto/sm3_base.h
+++ b/include/crypto/sm3_base.h
@@ -11,87 +11,59 @@
#include <crypto/internal/hash.h>
#include <crypto/sm3.h>
-#include <linux/crypto.h>
+#include <linux/math.h>
#include <linux/module.h>
#include <linux/string.h>
-#include <asm/unaligned.h>
+#include <linux/types.h>
+#include <linux/unaligned.h>
typedef void (sm3_block_fn)(struct sm3_state *sst, u8 const *src, int blocks);
static inline int sm3_base_init(struct shash_desc *desc)
{
- struct sm3_state *sctx = shash_desc_ctx(desc);
-
- sctx->state[0] = SM3_IVA;
- sctx->state[1] = SM3_IVB;
- sctx->state[2] = SM3_IVC;
- sctx->state[3] = SM3_IVD;
- sctx->state[4] = SM3_IVE;
- sctx->state[5] = SM3_IVF;
- sctx->state[6] = SM3_IVG;
- sctx->state[7] = SM3_IVH;
- sctx->count = 0;
-
+ sm3_init(shash_desc_ctx(desc));
return 0;
}
-static inline int sm3_base_do_update(struct shash_desc *desc,
- const u8 *data,
- unsigned int len,
- sm3_block_fn *block_fn)
+static inline int sm3_base_do_update_blocks(struct shash_desc *desc,
+ const u8 *data, unsigned int len,
+ sm3_block_fn *block_fn)
{
+ unsigned int remain = len - round_down(len, SM3_BLOCK_SIZE);
struct sm3_state *sctx = shash_desc_ctx(desc);
- unsigned int partial = sctx->count % SM3_BLOCK_SIZE;
-
- sctx->count += len;
-
- if (unlikely((partial + len) >= SM3_BLOCK_SIZE)) {
- int blocks;
-
- if (partial) {
- int p = SM3_BLOCK_SIZE - partial;
-
- memcpy(sctx->buffer + partial, data, p);
- data += p;
- len -= p;
- block_fn(sctx, sctx->buffer, 1);
- }
-
- blocks = len / SM3_BLOCK_SIZE;
- len %= SM3_BLOCK_SIZE;
-
- if (blocks) {
- block_fn(sctx, data, blocks);
- data += blocks * SM3_BLOCK_SIZE;
- }
- partial = 0;
- }
- if (len)
- memcpy(sctx->buffer + partial, data, len);
-
- return 0;
+ sctx->count += len - remain;
+ block_fn(sctx, data, len / SM3_BLOCK_SIZE);
+ return remain;
}
-static inline int sm3_base_do_finalize(struct shash_desc *desc,
- sm3_block_fn *block_fn)
+static inline int sm3_base_do_finup(struct shash_desc *desc,
+ const u8 *src, unsigned int len,
+ sm3_block_fn *block_fn)
{
- const int bit_offset = SM3_BLOCK_SIZE - sizeof(__be64);
+ unsigned int bit_offset = SM3_BLOCK_SIZE / 8 - 1;
struct sm3_state *sctx = shash_desc_ctx(desc);
- __be64 *bits = (__be64 *)(sctx->buffer + bit_offset);
- unsigned int partial = sctx->count % SM3_BLOCK_SIZE;
+ union {
+ __be64 b64[SM3_BLOCK_SIZE / 4];
+ u8 u8[SM3_BLOCK_SIZE * 2];
+ } block = {};
- sctx->buffer[partial++] = 0x80;
- if (partial > bit_offset) {
- memset(sctx->buffer + partial, 0x0, SM3_BLOCK_SIZE - partial);
- partial = 0;
+ if (len >= SM3_BLOCK_SIZE) {
+ int remain;
- block_fn(sctx, sctx->buffer, 1);
+ remain = sm3_base_do_update_blocks(desc, src, len, block_fn);
+ src += len - remain;
+ len = remain;
}
- memset(sctx->buffer + partial, 0x0, bit_offset - partial);
- *bits = cpu_to_be64(sctx->count << 3);
- block_fn(sctx, sctx->buffer, 1);
+ if (len >= bit_offset * 8)
+ bit_offset += SM3_BLOCK_SIZE / 8;
+ memcpy(&block, src, len);
+ block.u8[len] = 0x80;
+ sctx->count += len;
+ block.b64[bit_offset] = cpu_to_be64(sctx->count << 3);
+ block_fn(sctx, block.u8, (bit_offset + 1) * 8 / SM3_BLOCK_SIZE);
+ memzero_explicit(&block, sizeof(block));
return 0;
}
@@ -104,8 +76,6 @@ static inline int sm3_base_finish(struct shash_desc *desc, u8 *out)
for (i = 0; i < SM3_DIGEST_SIZE / sizeof(__be32); i++)
put_unaligned_be32(sctx->state[i], digest++);
-
- memzero_explicit(sctx, sizeof(*sctx));
return 0;
}
diff --git a/include/crypto/sm4.h b/include/crypto/sm4.h
index 7afd730d16ff..9656a9a40326 100644
--- a/include/crypto/sm4.h
+++ b/include/crypto/sm4.h
@@ -3,6 +3,7 @@
/*
* Common values for the SM4 algorithm
* Copyright (C) 2018 ARM Limited or its affiliates.
+ * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
*/
#ifndef _CRYPTO_SM4_H
@@ -15,17 +16,33 @@
#define SM4_BLOCK_SIZE 16
#define SM4_RKEY_WORDS 32
-struct crypto_sm4_ctx {
+struct sm4_ctx {
u32 rkey_enc[SM4_RKEY_WORDS];
u32 rkey_dec[SM4_RKEY_WORDS];
};
-int crypto_sm4_set_key(struct crypto_tfm *tfm, const u8 *in_key,
- unsigned int key_len);
-int crypto_sm4_expand_key(struct crypto_sm4_ctx *ctx, const u8 *in_key,
+extern const u32 crypto_sm4_fk[];
+extern const u32 crypto_sm4_ck[];
+extern const u8 crypto_sm4_sbox[];
+
+/**
+ * sm4_expandkey - Expands the SM4 key as described in GB/T 32907-2016
+ * @ctx: The location where the computed key will be stored.
+ * @in_key: The supplied key.
+ * @key_len: The length of the supplied key.
+ *
+ * Returns 0 on success. The function fails only if an invalid key size (or
+ * pointer) is supplied.
+ */
+int sm4_expandkey(struct sm4_ctx *ctx, const u8 *in_key,
unsigned int key_len);
-void crypto_sm4_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in);
-void crypto_sm4_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in);
+/**
+ * sm4_crypt_block - Encrypt or decrypt a single SM4 block
+ * @rk: The rkey_enc for encrypt or rkey_dec for decrypt
+ * @out: Buffer to store output data
+ * @in: Buffer containing the input data
+ */
+void sm4_crypt_block(const u32 *rk, u8 *out, const u8 *in);
#endif
diff --git a/include/crypto/streebog.h b/include/crypto/streebog.h
index cae1b4a01971..570f720a113b 100644
--- a/include/crypto/streebog.h
+++ b/include/crypto/streebog.h
@@ -23,15 +23,10 @@ struct streebog_uint512 {
};
struct streebog_state {
- union {
- u8 buffer[STREEBOG_BLOCK_SIZE];
- struct streebog_uint512 m;
- };
struct streebog_uint512 hash;
struct streebog_uint512 h;
struct streebog_uint512 N;
struct streebog_uint512 Sigma;
- size_t fillsize;
};
#endif /* !_CRYPTO_STREEBOG_H_ */
diff --git a/include/crypto/utils.h b/include/crypto/utils.h
new file mode 100644
index 000000000000..2594f45777b5
--- /dev/null
+++ b/include/crypto/utils.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Cryptographic utilities
+ *
+ * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+#ifndef _CRYPTO_UTILS_H
+#define _CRYPTO_UTILS_H
+
+#include <linux/unaligned.h>
+#include <linux/compiler_attributes.h>
+#include <linux/types.h>
+
+void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size);
+
+static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
+{
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
+ __builtin_constant_p(size) &&
+ (size % sizeof(unsigned long)) == 0) {
+ unsigned long *d = (unsigned long *)dst;
+ unsigned long *s = (unsigned long *)src;
+ unsigned long l;
+
+ while (size > 0) {
+ l = get_unaligned(d) ^ get_unaligned(s++);
+ put_unaligned(l, d++);
+ size -= sizeof(unsigned long);
+ }
+ } else {
+ __crypto_xor(dst, dst, src, size);
+ }
+}
+
+static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
+ unsigned int size)
+{
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
+ __builtin_constant_p(size) &&
+ (size % sizeof(unsigned long)) == 0) {
+ unsigned long *d = (unsigned long *)dst;
+ unsigned long *s1 = (unsigned long *)src1;
+ unsigned long *s2 = (unsigned long *)src2;
+ unsigned long l;
+
+ while (size > 0) {
+ l = get_unaligned(s1++) ^ get_unaligned(s2++);
+ put_unaligned(l, d++);
+ size -= sizeof(unsigned long);
+ }
+ } else {
+ __crypto_xor(dst, src1, src2, size);
+ }
+}
+
+noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
+
+/**
+ * crypto_memneq - Compare two areas of memory without leaking
+ * timing information.
+ *
+ * @a: One area of memory
+ * @b: Another area of memory
+ * @size: The size of the area.
+ *
+ * Returns 0 when data is equal, 1 otherwise.
+ */
+static inline int crypto_memneq(const void *a, const void *b, size_t size)
+{
+ return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
+}
+
+#endif /* _CRYPTO_UTILS_H */
diff --git a/include/crypto/xts.h b/include/crypto/xts.h
index 0f8dba69feb4..15b16c4853d8 100644
--- a/include/crypto/xts.h
+++ b/include/crypto/xts.h
@@ -8,8 +8,8 @@
#define XTS_BLOCK_SIZE 16
-static inline int xts_check_key(struct crypto_tfm *tfm,
- const u8 *key, unsigned int keylen)
+static inline int xts_verify_key(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
{
/*
* key consists of keys of equal size concatenated, therefore
@@ -18,24 +18,17 @@ static inline int xts_check_key(struct crypto_tfm *tfm,
if (keylen % 2)
return -EINVAL;
- /* ensure that the AES and tweak key are not identical */
- if (fips_enabled && !crypto_memneq(key, key + (keylen / 2), keylen / 2))
- return -EINVAL;
-
- return 0;
-}
-
-static inline int xts_verify_key(struct crypto_skcipher *tfm,
- const u8 *key, unsigned int keylen)
-{
/*
- * key consists of keys of equal size concatenated, therefore
- * the length must be even.
+ * In FIPS mode only a combined key length of either 256 or
+ * 512 bits is allowed, c.f. FIPS 140-3 IG C.I.
*/
- if (keylen % 2)
+ if (fips_enabled && keylen != 32 && keylen != 64)
return -EINVAL;
- /* ensure that the AES and tweak key are not identical */
+ /*
+ * Ensure that the AES and tweak key are not identical when
+ * in FIPS mode or the FORBID_WEAK_KEYS flag is set.
+ */
if ((fips_enabled || (crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) &&
!crypto_memneq(key, key + (keylen / 2), keylen / 2))
diff --git a/include/cxl/einj.h b/include/cxl/einj.h
new file mode 100644
index 000000000000..624ff6ff41f9
--- /dev/null
+++ b/include/cxl/einj.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * CXL protocol Error INJection support.
+ *
+ * Copyright (c) 2023 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Ben Cheatham <benjamin.cheatham@amd.com>
+ */
+#ifndef EINJ_CXL_H
+#define EINJ_CXL_H
+
+#include <linux/errno.h>
+#include <linux/types.h>
+
+struct pci_dev;
+struct seq_file;
+
+#if IS_ENABLED(CONFIG_ACPI_APEI_EINJ_CXL)
+int einj_cxl_available_error_type_show(struct seq_file *m, void *v);
+int einj_cxl_inject_error(struct pci_dev *dport_dev, u64 type);
+int einj_cxl_inject_rch_error(u64 rcrb, u64 type);
+bool einj_cxl_is_initialized(void);
+#else /* !IS_ENABLED(CONFIG_ACPI_APEI_EINJ_CXL) */
+static inline int einj_cxl_available_error_type_show(struct seq_file *m,
+ void *v)
+{
+ return -ENXIO;
+}
+
+static inline int einj_cxl_inject_error(struct pci_dev *dport_dev, u64 type)
+{
+ return -ENXIO;
+}
+
+static inline int einj_cxl_inject_rch_error(u64 rcrb, u64 type)
+{
+ return -ENXIO;
+}
+
+static inline bool einj_cxl_is_initialized(void) { return false; }
+#endif /* CONFIG_ACPI_APEI_EINJ_CXL */
+
+#endif /* EINJ_CXL_H */
diff --git a/include/cxl/event.h b/include/cxl/event.h
new file mode 100644
index 000000000000..6fd90f9cc203
--- /dev/null
+++ b/include/cxl/event.h
@@ -0,0 +1,323 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2023 Intel Corporation. */
+#ifndef _LINUX_CXL_EVENT_H
+#define _LINUX_CXL_EVENT_H
+
+#include <linux/types.h>
+#include <linux/uuid.h>
+#include <linux/workqueue_types.h>
+
+/*
+ * Common Event Record Format
+ * CXL rev 3.0 section 8.2.9.2.1; Table 8-42
+ */
+struct cxl_event_record_hdr {
+ u8 length;
+ u8 flags[3];
+ __le16 handle;
+ __le16 related_handle;
+ __le64 timestamp;
+ u8 maint_op_class;
+ u8 maint_op_sub_class;
+ __le16 ld_id;
+ u8 head_id;
+ u8 reserved[11];
+} __packed;
+
+struct cxl_event_media_hdr {
+ struct cxl_event_record_hdr hdr;
+ __le64 phys_addr;
+ u8 descriptor;
+ u8 type;
+ u8 transaction_type;
+ /*
+ * The meaning of Validity Flags from bit 2 is
+ * different across DRAM and General Media records
+ */
+ u8 validity_flags[2];
+ u8 channel;
+ u8 rank;
+} __packed;
+
+#define CXL_EVENT_RECORD_DATA_LENGTH 0x50
+struct cxl_event_generic {
+ struct cxl_event_record_hdr hdr;
+ u8 data[CXL_EVENT_RECORD_DATA_LENGTH];
+} __packed;
+
+/*
+ * General Media Event Record
+ * CXL rev 3.1 Section 8.2.9.2.1.1; Table 8-45
+ */
+#define CXL_EVENT_GEN_MED_COMP_ID_SIZE 0x10
+struct cxl_event_gen_media {
+ struct cxl_event_media_hdr media_hdr;
+ u8 device[3];
+ u8 component_id[CXL_EVENT_GEN_MED_COMP_ID_SIZE];
+ u8 cme_threshold_ev_flags;
+ u8 cme_count[3];
+ u8 sub_type;
+ u8 reserved[41];
+} __packed;
+
+/*
+ * DRAM Event Record - DER
+ * CXL rev 3.1 section 8.2.9.2.1.2; Table 8-46
+ */
+#define CXL_EVENT_DER_CORRECTION_MASK_SIZE 0x20
+struct cxl_event_dram {
+ struct cxl_event_media_hdr media_hdr;
+ u8 nibble_mask[3];
+ u8 bank_group;
+ u8 bank;
+ u8 row[3];
+ u8 column[2];
+ u8 correction_mask[CXL_EVENT_DER_CORRECTION_MASK_SIZE];
+ u8 component_id[CXL_EVENT_GEN_MED_COMP_ID_SIZE];
+ u8 sub_channel;
+ u8 cme_threshold_ev_flags;
+ u8 cvme_count[3];
+ u8 sub_type;
+ u8 reserved;
+} __packed;
+
+/*
+ * Get Health Info Record
+ * CXL rev 3.1 section 8.2.9.9.3.1; Table 8-133
+ */
+struct cxl_get_health_info {
+ u8 health_status;
+ u8 media_status;
+ u8 add_status;
+ u8 life_used;
+ u8 device_temp[2];
+ u8 dirty_shutdown_cnt[4];
+ u8 cor_vol_err_cnt[4];
+ u8 cor_per_err_cnt[4];
+} __packed;
+
+/*
+ * Memory Module Event Record
+ * CXL rev 3.1 section 8.2.9.2.1.3; Table 8-47
+ */
+struct cxl_event_mem_module {
+ struct cxl_event_record_hdr hdr;
+ u8 event_type;
+ struct cxl_get_health_info info;
+ u8 validity_flags[2];
+ u8 component_id[CXL_EVENT_GEN_MED_COMP_ID_SIZE];
+ u8 event_sub_type;
+ u8 reserved[0x2a];
+} __packed;
+
+/*
+ * Memory Sparing Event Record - MSER
+ * CXL rev 3.2 section 8.2.10.2.1.4; Table 8-60
+ */
+struct cxl_event_mem_sparing {
+ struct cxl_event_record_hdr hdr;
+ /*
+ * The fields maintenance operation class and maintenance operation
+ * subclass defined in the Memory Sparing Event Record are the
+ * duplication of the same in the common event record. Thus defined
+ * as reserved and to be removed after the spec correction.
+ */
+ u8 rsv1;
+ u8 rsv2;
+ u8 flags;
+ u8 result;
+ __le16 validity_flags;
+ u8 reserved1[6];
+ __le16 res_avail;
+ u8 channel;
+ u8 rank;
+ u8 nibble_mask[3];
+ u8 bank_group;
+ u8 bank;
+ u8 row[3];
+ __le16 column;
+ u8 component_id[CXL_EVENT_GEN_MED_COMP_ID_SIZE];
+ u8 sub_channel;
+ u8 reserved2[0x25];
+} __packed;
+
+union cxl_event {
+ struct cxl_event_generic generic;
+ struct cxl_event_gen_media gen_media;
+ struct cxl_event_dram dram;
+ struct cxl_event_mem_module mem_module;
+ struct cxl_event_mem_sparing mem_sparing;
+ /* dram & gen_media event header */
+ struct cxl_event_media_hdr media_hdr;
+} __packed;
+
+/*
+ * Common Event Record Format; in event logs
+ * CXL rev 3.0 section 8.2.9.2.1; Table 8-42
+ */
+struct cxl_event_record_raw {
+ uuid_t id;
+ union cxl_event event;
+} __packed;
+
+enum cxl_event_type {
+ CXL_CPER_EVENT_GENERIC,
+ CXL_CPER_EVENT_GEN_MEDIA,
+ CXL_CPER_EVENT_DRAM,
+ CXL_CPER_EVENT_MEM_MODULE,
+ CXL_CPER_EVENT_MEM_SPARING,
+};
+
+#define CPER_CXL_DEVICE_ID_VALID BIT(0)
+#define CPER_CXL_DEVICE_SN_VALID BIT(1)
+#define CPER_CXL_COMP_EVENT_LOG_VALID BIT(2)
+struct cxl_cper_event_rec {
+ struct {
+ u32 length;
+ u64 validation_bits;
+ struct cper_cxl_event_devid {
+ u16 vendor_id;
+ u16 device_id;
+ u8 func_num;
+ u8 device_num;
+ u8 bus_num;
+ u16 segment_num;
+ u16 slot_num; /* bits 2:0 reserved */
+ u8 reserved;
+ } __packed device_id;
+ struct cper_cxl_event_sn {
+ u32 lower_dw;
+ u32 upper_dw;
+ } __packed dev_serial_num;
+ } __packed hdr;
+
+ union cxl_event event;
+} __packed;
+
+struct cxl_cper_work_data {
+ enum cxl_event_type event_type;
+ struct cxl_cper_event_rec rec;
+};
+
+#define PROT_ERR_VALID_AGENT_TYPE BIT_ULL(0)
+#define PROT_ERR_VALID_AGENT_ADDRESS BIT_ULL(1)
+#define PROT_ERR_VALID_DEVICE_ID BIT_ULL(2)
+#define PROT_ERR_VALID_SERIAL_NUMBER BIT_ULL(3)
+#define PROT_ERR_VALID_CAPABILITY BIT_ULL(4)
+#define PROT_ERR_VALID_DVSEC BIT_ULL(5)
+#define PROT_ERR_VALID_ERROR_LOG BIT_ULL(6)
+
+/*
+ * The layout of the enumeration and the values matches CXL Agent Type
+ * field in the UEFI 2.10 Section N.2.13,
+ */
+enum {
+ RCD, /* Restricted CXL Device */
+ RCH_DP, /* Restricted CXL Host Downstream Port */
+ DEVICE, /* CXL Device */
+ LD, /* CXL Logical Device */
+ FMLD, /* CXL Fabric Manager managed Logical Device */
+ RP, /* CXL Root Port */
+ DSP, /* CXL Downstream Switch Port */
+ USP, /* CXL Upstream Switch Port */
+};
+
+#pragma pack(1)
+
+/* Compute Express Link Protocol Error Section, UEFI v2.10 sec N.2.13 */
+struct cxl_cper_sec_prot_err {
+ u64 valid_bits;
+ u8 agent_type;
+ u8 reserved[7];
+
+ /*
+ * Except for RCH Downstream Port, all the remaining CXL Agent
+ * types are uniquely identified by the PCIe compatible SBDF number.
+ */
+ union {
+ u64 rcrb_base_addr;
+ struct {
+ u8 function;
+ u8 device;
+ u8 bus;
+ u16 segment;
+ u8 reserved_1[3];
+ };
+ } agent_addr;
+
+ struct {
+ u16 vendor_id;
+ u16 device_id;
+ u16 subsystem_vendor_id;
+ u16 subsystem_id;
+ u8 class_code[2];
+ u16 slot;
+ u8 reserved_1[4];
+ } device_id;
+
+ struct {
+ u32 lower_dw;
+ u32 upper_dw;
+ } dev_serial_num;
+
+ u8 capability[60];
+ u16 dvsec_len;
+ u16 err_len;
+ u8 reserved_2[4];
+};
+
+#pragma pack()
+
+/* CXL RAS Capability Structure, CXL v3.0 sec 8.2.4.16 */
+struct cxl_ras_capability_regs {
+ u32 uncor_status;
+ u32 uncor_mask;
+ u32 uncor_severity;
+ u32 cor_status;
+ u32 cor_mask;
+ u32 cap_control;
+ u32 header_log[16];
+};
+
+struct cxl_cper_prot_err_work_data {
+ struct cxl_cper_sec_prot_err prot_err;
+ struct cxl_ras_capability_regs ras_cap;
+ int severity;
+};
+
+#ifdef CONFIG_ACPI_APEI_GHES
+int cxl_cper_register_work(struct work_struct *work);
+int cxl_cper_unregister_work(struct work_struct *work);
+int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd);
+int cxl_cper_register_prot_err_work(struct work_struct *work);
+int cxl_cper_unregister_prot_err_work(struct work_struct *work);
+int cxl_cper_prot_err_kfifo_get(struct cxl_cper_prot_err_work_data *wd);
+#else
+static inline int cxl_cper_register_work(struct work_struct *work)
+{
+ return 0;
+}
+
+static inline int cxl_cper_unregister_work(struct work_struct *work)
+{
+ return 0;
+}
+static inline int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd)
+{
+ return 0;
+}
+static inline int cxl_cper_register_prot_err_work(struct work_struct *work)
+{
+ return 0;
+}
+static inline int cxl_cper_unregister_prot_err_work(struct work_struct *work)
+{
+ return 0;
+}
+static inline int cxl_cper_prot_err_kfifo_get(struct cxl_cper_prot_err_work_data *wd)
+{
+ return 0;
+}
+#endif
+
+#endif /* _LINUX_CXL_EVENT_H */
diff --git a/include/cxl/features.h b/include/cxl/features.h
new file mode 100644
index 000000000000..b9297693dae7
--- /dev/null
+++ b/include/cxl/features.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2024-2025 Intel Corporation. */
+#ifndef __CXL_FEATURES_H__
+#define __CXL_FEATURES_H__
+
+#include <linux/uuid.h>
+#include <linux/fwctl.h>
+#include <uapi/cxl/features.h>
+
+/* Feature UUIDs used by the kernel */
+#define CXL_FEAT_PATROL_SCRUB_UUID \
+ UUID_INIT(0x96dad7d6, 0xfde8, 0x482b, 0xa7, 0x33, 0x75, 0x77, 0x4e, \
+ 0x06, 0xdb, 0x8a)
+
+#define CXL_FEAT_ECS_UUID \
+ UUID_INIT(0xe5b13f22, 0x2328, 0x4a14, 0xb8, 0xba, 0xb9, 0x69, 0x1e, \
+ 0x89, 0x33, 0x86)
+
+#define CXL_FEAT_SPPR_UUID \
+ UUID_INIT(0x892ba475, 0xfad8, 0x474e, 0x9d, 0x3e, 0x69, 0x2c, 0x91, \
+ 0x75, 0x68, 0xbb)
+
+#define CXL_FEAT_HPPR_UUID \
+ UUID_INIT(0x80ea4521, 0x786f, 0x4127, 0xaf, 0xb1, 0xec, 0x74, 0x59, \
+ 0xfb, 0x0e, 0x24)
+
+#define CXL_FEAT_CACHELINE_SPARING_UUID \
+ UUID_INIT(0x96C33386, 0x91dd, 0x44c7, 0x9e, 0xcb, 0xfd, 0xaf, 0x65, \
+ 0x03, 0xba, 0xc4)
+
+#define CXL_FEAT_ROW_SPARING_UUID \
+ UUID_INIT(0x450ebf67, 0xb135, 0x4f97, 0xa4, 0x98, 0xc2, 0xd5, 0x7f, \
+ 0x27, 0x9b, 0xed)
+
+#define CXL_FEAT_BANK_SPARING_UUID \
+ UUID_INIT(0x78b79636, 0x90ac, 0x4b64, 0xa4, 0xef, 0xfa, 0xac, 0x5d, \
+ 0x18, 0xa8, 0x63)
+
+#define CXL_FEAT_RANK_SPARING_UUID \
+ UUID_INIT(0x34dbaff5, 0x0552, 0x4281, 0x8f, 0x76, 0xda, 0x0b, 0x5e, \
+ 0x7a, 0x76, 0xa7)
+
+/* Feature commands capability supported by a device */
+enum cxl_features_capability {
+ CXL_FEATURES_NONE = 0,
+ CXL_FEATURES_RO,
+ CXL_FEATURES_RW,
+};
+
+/**
+ * struct cxl_features_state - The Features state for the device
+ * @cxlds: Pointer to CXL device state
+ * @entries: CXl feature entry context
+ */
+struct cxl_features_state {
+ struct cxl_dev_state *cxlds;
+ struct cxl_feat_entries {
+ int num_features;
+ int num_user_features;
+ struct cxl_feat_entry ent[] __counted_by(num_features);
+ } *entries;
+};
+
+struct cxl_mailbox;
+struct cxl_memdev;
+#ifdef CONFIG_CXL_FEATURES
+struct cxl_features_state *to_cxlfs(struct cxl_dev_state *cxlds);
+int devm_cxl_setup_features(struct cxl_dev_state *cxlds);
+int devm_cxl_setup_fwctl(struct device *host, struct cxl_memdev *cxlmd);
+#else
+static inline struct cxl_features_state *to_cxlfs(struct cxl_dev_state *cxlds)
+{
+ return NULL;
+}
+
+static inline int devm_cxl_setup_features(struct cxl_dev_state *cxlds)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int devm_cxl_setup_fwctl(struct device *host,
+ struct cxl_memdev *cxlmd)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#endif
diff --git a/include/cxl/mailbox.h b/include/cxl/mailbox.h
new file mode 100644
index 000000000000..c4e99e2e3a9d
--- /dev/null
+++ b/include/cxl/mailbox.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2024 Intel Corporation. */
+#ifndef __CXL_MBOX_H__
+#define __CXL_MBOX_H__
+#include <linux/rcuwait.h>
+#include <cxl/features.h>
+#include <uapi/linux/cxl_mem.h>
+
+/**
+ * struct cxl_mbox_cmd - A command to be submitted to hardware.
+ * @opcode: (input) The command set and command submitted to hardware.
+ * @payload_in: (input) Pointer to the input payload.
+ * @payload_out: (output) Pointer to the output payload. Must be allocated by
+ * the caller.
+ * @size_in: (input) Number of bytes to load from @payload_in.
+ * @size_out: (input) Max number of bytes loaded into @payload_out.
+ * (output) Number of bytes generated by the device. For fixed size
+ * outputs commands this is always expected to be deterministic. For
+ * variable sized output commands, it tells the exact number of bytes
+ * written.
+ * @min_out: (input) internal command output payload size validation
+ * @poll_count: (input) Number of timeouts to attempt.
+ * @poll_interval_ms: (input) Time between mailbox background command polling
+ * interval timeouts.
+ * @return_code: (output) Error code returned from hardware.
+ *
+ * This is the primary mechanism used to send commands to the hardware.
+ * All the fields except @payload_* correspond exactly to the fields described in
+ * Command Register section of the CXL 2.0 8.2.8.4.5. @payload_in and
+ * @payload_out are written to, and read from the Command Payload Registers
+ * defined in CXL 2.0 8.2.8.4.8.
+ */
+struct cxl_mbox_cmd {
+ u16 opcode;
+ void *payload_in;
+ void *payload_out;
+ size_t size_in;
+ size_t size_out;
+ size_t min_out;
+ int poll_count;
+ int poll_interval_ms;
+ u16 return_code;
+};
+
+/**
+ * struct cxl_mailbox - context for CXL mailbox operations
+ * @host: device that hosts the mailbox
+ * @enabled_cmds: mailbox commands that are enabled by the driver
+ * @exclusive_cmds: mailbox commands that are exclusive to the kernel
+ * @payload_size: Size of space for payload
+ * (CXL 3.1 8.2.8.4.3 Mailbox Capabilities Register)
+ * @mbox_mutex: mutex protects device mailbox and firmware
+ * @mbox_wait: rcuwait for mailbox
+ * @mbox_send: @dev specific transport for transmitting mailbox commands
+ * @feat_cap: Features capability
+ */
+struct cxl_mailbox {
+ struct device *host;
+ DECLARE_BITMAP(enabled_cmds, CXL_MEM_COMMAND_ID_MAX);
+ DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
+ size_t payload_size;
+ struct mutex mbox_mutex; /* lock to protect mailbox context */
+ struct rcuwait mbox_wait;
+ int (*mbox_send)(struct cxl_mailbox *cxl_mbox, struct cxl_mbox_cmd *cmd);
+ enum cxl_features_capability feat_cap;
+};
+
+int cxl_mailbox_init(struct cxl_mailbox *cxl_mbox, struct device *host);
+
+#endif
diff --git a/include/drm/Makefile b/include/drm/Makefile
new file mode 100644
index 000000000000..48fae3f167c7
--- /dev/null
+++ b/include/drm/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# Ensure drm headers are self-contained and pass kernel-doc
+hdrtest-files := \
+ $(shell cd $(src) && find * -name '*.h' 2>/dev/null)
+
+always-$(CONFIG_DRM_HEADER_TEST) += \
+ $(patsubst %.h,%.hdrtest, $(hdrtest-files))
+
+# Include the header twice to detect missing include guard.
+quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
+ cmd_hdrtest = \
+ $(CC) $(c_flags) -fsyntax-only -x c /dev/null -include $< -include $<; \
+ PYTHONDONTWRITEBYTECODE=1 $(PYTHON3) $(KERNELDOC) -none $(if $(CONFIG_WERROR)$(CONFIG_DRM_WERROR),-Werror) $<; \
+ touch $@
+
+$(obj)/%.hdrtest: $(src)/%.h FORCE
+ $(call if_changed_dep,hdrtest)
diff --git a/include/drm/amd/isp.h b/include/drm/amd/isp.h
new file mode 100644
index 000000000000..ec868288abf2
--- /dev/null
+++ b/include/drm/amd/isp.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2025 Advanced Micro Devices, Inc. All rights reserved.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+
+#ifndef __ISP_H__
+#define __ISP_H__
+
+#include <linux/types.h>
+
+struct device;
+
+struct isp_platform_data {
+ void *adev;
+ u32 asic_type;
+ resource_size_t base_rmmio_size;
+};
+
+int isp_user_buffer_alloc(struct device *dev, void *dmabuf,
+ void **buf_obj, u64 *buf_addr);
+
+void isp_user_buffer_free(void *buf_obj);
+
+int isp_kernel_buffer_alloc(struct device *dev, u64 size,
+ void **buf_obj, u64 *gpu_addr, void **cpu_addr);
+
+void isp_kernel_buffer_free(void **buf_obj, u64 *gpu_addr, void **cpu_addr);
+
+#endif
diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h
index 336e36506910..9be85b821aa6 100644
--- a/include/drm/amd_asic_type.h
+++ b/include/drm/amd_asic_type.h
@@ -22,6 +22,9 @@
#ifndef __AMD_ASIC_TYPE_H__
#define __AMD_ASIC_TYPE_H__
+
+#include <linux/types.h>
+
/*
* Supported ASIC types
*/
@@ -53,15 +56,24 @@ enum amd_asic_type {
CHIP_RENOIR, /* 24 */
CHIP_ALDEBARAN, /* 25 */
CHIP_NAVI10, /* 26 */
- CHIP_NAVI14, /* 27 */
- CHIP_NAVI12, /* 28 */
- CHIP_SIENNA_CICHLID, /* 29 */
- CHIP_NAVY_FLOUNDER, /* 30 */
- CHIP_VANGOGH, /* 31 */
- CHIP_DIMGREY_CAVEFISH, /* 32 */
+ CHIP_CYAN_SKILLFISH, /* 27 */
+ CHIP_NAVI14, /* 28 */
+ CHIP_NAVI12, /* 29 */
+ CHIP_SIENNA_CICHLID, /* 30 */
+ CHIP_NAVY_FLOUNDER, /* 31 */
+ CHIP_VANGOGH, /* 32 */
+ CHIP_DIMGREY_CAVEFISH, /* 33 */
+ CHIP_BEIGE_GOBY, /* 34 */
+ CHIP_YELLOW_CARP, /* 35 */
+ CHIP_IP_DISCOVERY, /* 36 */
CHIP_LAST,
};
extern const char *amdgpu_asic_name[];
+struct amdgpu_asic_type_quirk {
+ unsigned short device; /* PCI device ID */
+ u8 revision; /* revision ID */
+ unsigned short type; /* real ASIC type */
+};
#endif /*__AMD_ASIC_TYPE_H__ */
diff --git a/include/drm/bridge/analogix_dp.h b/include/drm/bridge/analogix_dp.h
index b0dcc07334a1..cf17646c1310 100644
--- a/include/drm/bridge/analogix_dp.h
+++ b/include/drm/bridge/analogix_dp.h
@@ -10,16 +10,18 @@
#include <drm/drm_crtc.h>
struct analogix_dp_device;
+struct drm_dp_aux;
enum analogix_dp_devtype {
EXYNOS_DP,
RK3288_DP,
RK3399_EDP,
+ RK3588_EDP,
};
static inline bool is_rockchip(enum analogix_dp_devtype type)
{
- return type == RK3288_DP || type == RK3399_EDP;
+ return type == RK3288_DP || type == RK3399_EDP || type == RK3588_EDP;
}
struct analogix_dp_plat_data {
@@ -29,8 +31,7 @@ struct analogix_dp_plat_data {
struct drm_connector *connector;
bool skip_connector;
- int (*power_on_start)(struct analogix_dp_plat_data *);
- int (*power_on_end)(struct analogix_dp_plat_data *);
+ int (*power_on)(struct analogix_dp_plat_data *);
int (*power_off)(struct analogix_dp_plat_data *);
int (*attach)(struct analogix_dp_plat_data *, struct drm_bridge *,
struct drm_connector *);
@@ -45,9 +46,11 @@ struct analogix_dp_device *
analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data);
int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev);
void analogix_dp_unbind(struct analogix_dp_device *dp);
-void analogix_dp_remove(struct analogix_dp_device *dp);
int analogix_dp_start_crc(struct drm_connector *connector);
int analogix_dp_stop_crc(struct drm_connector *connector);
+struct analogix_dp_plat_data *analogix_dp_aux_to_plat_data(struct drm_dp_aux *aux);
+struct drm_dp_aux *analogix_dp_get_aux(struct analogix_dp_device *dp);
+
#endif /* _ANALOGIX_DP_H_ */
diff --git a/include/drm/bridge/aux-bridge.h b/include/drm/bridge/aux-bridge.h
new file mode 100644
index 000000000000..c2f5a855512f
--- /dev/null
+++ b/include/drm/bridge/aux-bridge.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2023 Linaro Ltd.
+ *
+ * Author: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+ */
+#ifndef DRM_AUX_BRIDGE_H
+#define DRM_AUX_BRIDGE_H
+
+#include <drm/drm_connector.h>
+
+struct auxiliary_device;
+
+#if IS_ENABLED(CONFIG_DRM_AUX_BRIDGE)
+int drm_aux_bridge_register(struct device *parent);
+#else
+static inline int drm_aux_bridge_register(struct device *parent)
+{
+ return 0;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_DRM_AUX_HPD_BRIDGE)
+struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct device *parent, struct device_node *np);
+int devm_drm_dp_hpd_bridge_add(struct device *dev, struct auxiliary_device *adev);
+struct device *drm_dp_hpd_bridge_register(struct device *parent,
+ struct device_node *np);
+void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status status);
+#else
+static inline struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct device *parent,
+ struct device_node *np)
+{
+ return NULL;
+}
+
+static inline int devm_drm_dp_hpd_bridge_add(struct device *dev, struct auxiliary_device *adev)
+{
+ return 0;
+}
+
+static inline struct device *drm_dp_hpd_bridge_register(struct device *parent,
+ struct device_node *np)
+{
+ return NULL;
+}
+
+static inline void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status status)
+{
+}
+#endif
+
+#endif
diff --git a/include/drm/bridge/dw_dp.h b/include/drm/bridge/dw_dp.h
new file mode 100644
index 000000000000..d05df49fd884
--- /dev/null
+++ b/include/drm/bridge/dw_dp.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2025 Rockchip Electronics Co., Ltd.
+ */
+
+#ifndef __DW_DP__
+#define __DW_DP__
+
+#include <linux/device.h>
+
+struct drm_encoder;
+struct dw_dp;
+
+struct dw_dp_plat_data {
+ u32 max_link_rate;
+};
+
+struct dw_dp *dw_dp_bind(struct device *dev, struct drm_encoder *encoder,
+ const struct dw_dp_plat_data *plat_data);
+#endif /* __DW_DP__ */
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index ea34ca146b82..336f062e1f9d 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -126,6 +126,8 @@ struct dw_hdmi_phy_ops {
struct dw_hdmi_plat_data {
struct regmap *regm;
+ unsigned int output_port;
+
unsigned long input_bus_encoding;
bool use_drm_infoframe;
bool ycbcr_420_allowed;
@@ -141,6 +143,17 @@ struct dw_hdmi_plat_data {
const struct drm_display_info *info,
const struct drm_display_mode *mode);
+ /*
+ * priv_audio is specially used for additional audio device to get
+ * driver data through this dw_hdmi_plat_data.
+ */
+ void *priv_audio;
+
+ /* Platform-specific audio enable/disable (optional) */
+ void (*enable_audio)(struct dw_hdmi *hdmi, int channel,
+ int width, int rate, int non_pcm, int iec958);
+ void (*disable_audio)(struct dw_hdmi *hdmi);
+
/* Vendor PHY support */
const struct dw_hdmi_phy_ops *phy_ops;
const char *phy_name;
@@ -153,6 +166,8 @@ struct dw_hdmi_plat_data {
const struct dw_hdmi_phy_config *phy_config;
int (*configure_phy)(struct dw_hdmi *hdmi, void *data,
unsigned long mpixelclock);
+
+ unsigned int disable_cec : 1;
};
struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
@@ -169,6 +184,9 @@ void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense);
int dw_hdmi_set_plugged_cb(struct dw_hdmi *hdmi, hdmi_codec_plugged_cb fn,
struct device *codec_dev);
+void dw_hdmi_set_sample_non_pcm(struct dw_hdmi *hdmi, unsigned int non_pcm);
+void dw_hdmi_set_sample_iec958(struct dw_hdmi *hdmi, unsigned int iec958);
+void dw_hdmi_set_sample_width(struct dw_hdmi *hdmi, unsigned int width);
void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate);
void dw_hdmi_set_channel_count(struct dw_hdmi *hdmi, unsigned int cnt);
void dw_hdmi_set_channel_status(struct dw_hdmi *hdmi, u8 *channel_status);
@@ -183,9 +201,11 @@ void dw_hdmi_phy_i2c_set_addr(struct dw_hdmi *hdmi, u8 address);
void dw_hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data,
unsigned char addr);
+void dw_hdmi_phy_gen1_reset(struct dw_hdmi *hdmi);
+
void dw_hdmi_phy_gen2_pddq(struct dw_hdmi *hdmi, u8 enable);
void dw_hdmi_phy_gen2_txpwron(struct dw_hdmi *hdmi, u8 enable);
-void dw_hdmi_phy_reset(struct dw_hdmi *hdmi);
+void dw_hdmi_phy_gen2_reset(struct dw_hdmi *hdmi);
enum drm_connector_status dw_hdmi_phy_read_hpd(struct dw_hdmi *hdmi,
void *data);
@@ -193,4 +213,8 @@ void dw_hdmi_phy_update_hpd(struct dw_hdmi *hdmi, void *data,
bool force, bool disabled, bool rxsense);
void dw_hdmi_phy_setup_hpd(struct dw_hdmi *hdmi, void *data);
+bool dw_hdmi_bus_fmt_is_420(struct dw_hdmi *hdmi);
+
+const struct dw_hdmi_plat_data *dw_hdmi_to_plat_data(struct dw_hdmi *hdmi);
+
#endif /* __IMX_HDMI_H__ */
diff --git a/include/drm/bridge/dw_hdmi_qp.h b/include/drm/bridge/dw_hdmi_qp.h
new file mode 100644
index 000000000000..3f461f6b9bbf
--- /dev/null
+++ b/include/drm/bridge/dw_hdmi_qp.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2021-2022 Rockchip Electronics Co., Ltd.
+ * Copyright (c) 2024 Collabora Ltd.
+ */
+
+#ifndef __DW_HDMI_QP__
+#define __DW_HDMI_QP__
+
+struct device;
+struct drm_encoder;
+struct dw_hdmi_qp;
+struct platform_device;
+
+struct dw_hdmi_qp_phy_ops {
+ int (*init)(struct dw_hdmi_qp *hdmi, void *data);
+ void (*disable)(struct dw_hdmi_qp *hdmi, void *data);
+ enum drm_connector_status (*read_hpd)(struct dw_hdmi_qp *hdmi, void *data);
+ void (*setup_hpd)(struct dw_hdmi_qp *hdmi, void *data);
+};
+
+struct dw_hdmi_qp_plat_data {
+ const struct dw_hdmi_qp_phy_ops *phy_ops;
+ void *phy_data;
+ int main_irq;
+ int cec_irq;
+ unsigned long ref_clk_rate;
+ /* Supported output formats: bitmask of @hdmi_colorspace */
+ unsigned int supported_formats;
+ /* Maximum bits per color channel: 8, 10 or 12 */
+ unsigned int max_bpc;
+};
+
+struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
+ struct drm_encoder *encoder,
+ const struct dw_hdmi_qp_plat_data *plat_data);
+void dw_hdmi_qp_resume(struct device *dev, struct dw_hdmi_qp *hdmi);
+#endif /* __DW_HDMI_QP__ */
diff --git a/include/drm/bridge/dw_mipi_dsi.h b/include/drm/bridge/dw_mipi_dsi.h
index bda8aa7c2280..65d5e68065e3 100644
--- a/include/drm/bridge/dw_mipi_dsi.h
+++ b/include/drm/bridge/dw_mipi_dsi.h
@@ -11,6 +11,10 @@
#include <linux/types.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
#include <drm/drm_modes.h>
struct drm_display_mode;
@@ -51,7 +55,20 @@ struct dw_mipi_dsi_plat_data {
unsigned int max_data_lanes;
enum drm_mode_status (*mode_valid)(void *priv_data,
- const struct drm_display_mode *mode);
+ const struct drm_display_mode *mode,
+ unsigned long mode_flags,
+ u32 lanes, u32 format);
+
+ bool (*mode_fixup)(void *priv_data, const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+ u32 *(*get_input_bus_fmts)(void *priv_data,
+ struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts);
const struct dw_mipi_dsi_phy_ops *phy_ops;
const struct dw_mipi_dsi_host_ops *host_ops;
@@ -66,5 +83,6 @@ void dw_mipi_dsi_remove(struct dw_mipi_dsi *dsi);
int dw_mipi_dsi_bind(struct dw_mipi_dsi *dsi, struct drm_encoder *encoder);
void dw_mipi_dsi_unbind(struct dw_mipi_dsi *dsi);
void dw_mipi_dsi_set_slave(struct dw_mipi_dsi *dsi, struct dw_mipi_dsi *slave);
+struct drm_bridge *dw_mipi_dsi_get_bridge(struct dw_mipi_dsi *dsi);
#endif /* __DW_MIPI_DSI__ */
diff --git a/include/drm/bridge/dw_mipi_dsi2.h b/include/drm/bridge/dw_mipi_dsi2.h
new file mode 100644
index 000000000000..c18c49379247
--- /dev/null
+++ b/include/drm/bridge/dw_mipi_dsi2.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * Authors: Guochun Huang <hero.huang@rock-chips.com>
+ * Heiko Stuebner <heiko.stuebner@cherry.de>
+ */
+
+#ifndef __DW_MIPI_DSI2__
+#define __DW_MIPI_DSI2__
+
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_modes.h>
+
+struct drm_display_mode;
+struct drm_encoder;
+struct dw_mipi_dsi2;
+struct mipi_dsi_device;
+struct platform_device;
+
+enum dw_mipi_dsi2_phy_type {
+ DW_MIPI_DSI2_DPHY,
+ DW_MIPI_DSI2_CPHY,
+};
+
+struct dw_mipi_dsi2_phy_iface {
+ int ppi_width;
+ enum dw_mipi_dsi2_phy_type phy_type;
+};
+
+struct dw_mipi_dsi2_phy_timing {
+ u32 data_hs2lp;
+ u32 data_lp2hs;
+};
+
+struct dw_mipi_dsi2_phy_ops {
+ int (*init)(void *priv_data);
+ void (*power_on)(void *priv_data);
+ void (*power_off)(void *priv_data);
+ void (*get_interface)(void *priv_data, struct dw_mipi_dsi2_phy_iface *iface);
+ int (*get_lane_mbps)(void *priv_data,
+ const struct drm_display_mode *mode,
+ unsigned long mode_flags, u32 lanes, u32 format,
+ unsigned int *lane_mbps);
+ int (*get_timing)(void *priv_data, unsigned int lane_mbps,
+ struct dw_mipi_dsi2_phy_timing *timing);
+ int (*get_esc_clk_rate)(void *priv_data, unsigned int *esc_clk_rate);
+};
+
+struct dw_mipi_dsi2_host_ops {
+ int (*attach)(void *priv_data,
+ struct mipi_dsi_device *dsi);
+ int (*detach)(void *priv_data,
+ struct mipi_dsi_device *dsi);
+};
+
+struct dw_mipi_dsi2_plat_data {
+ struct regmap *regmap;
+ unsigned int max_data_lanes;
+
+ enum drm_mode_status (*mode_valid)(void *priv_data,
+ const struct drm_display_mode *mode,
+ unsigned long mode_flags,
+ u32 lanes, u32 format);
+
+ bool (*mode_fixup)(void *priv_data, const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+
+ u32 *(*get_input_bus_fmts)(void *priv_data,
+ struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts);
+
+ const struct dw_mipi_dsi2_phy_ops *phy_ops;
+ const struct dw_mipi_dsi2_host_ops *host_ops;
+
+ void *priv_data;
+};
+
+struct dw_mipi_dsi2 *dw_mipi_dsi2_probe(struct platform_device *pdev,
+ const struct dw_mipi_dsi2_plat_data *plat_data);
+void dw_mipi_dsi2_remove(struct dw_mipi_dsi2 *dsi2);
+int dw_mipi_dsi2_bind(struct dw_mipi_dsi2 *dsi2, struct drm_encoder *encoder);
+void dw_mipi_dsi2_unbind(struct dw_mipi_dsi2 *dsi2);
+
+#endif /* __DW_MIPI_DSI2__ */
diff --git a/include/drm/bridge/imx.h b/include/drm/bridge/imx.h
new file mode 100644
index 000000000000..b93f719fe0e7
--- /dev/null
+++ b/include/drm/bridge/imx.h
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2012 Sascha Hauer, Pengutronix
+ */
+
+#ifndef DRM_IMX_BRIDGE_H
+#define DRM_IMX_BRIDGE_H
+
+struct device;
+struct device_node;
+struct drm_bridge;
+
+struct drm_bridge *devm_imx_drm_legacy_bridge(struct device *dev,
+ struct device_node *np,
+ int type);
+
+#endif
diff --git a/include/drm/bridge/samsung-dsim.h b/include/drm/bridge/samsung-dsim.h
new file mode 100644
index 000000000000..31d7ed589233
--- /dev/null
+++ b/include/drm/bridge/samsung-dsim.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2022 Amarula Solutions(India)
+ * Author: Jagan Teki <jagan@amarulasolutions.com>
+ */
+
+#ifndef __SAMSUNG_DSIM__
+#define __SAMSUNG_DSIM__
+
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+
+struct platform_device;
+struct samsung_dsim;
+
+#define DSIM_STATE_ENABLED BIT(0)
+#define DSIM_STATE_INITIALIZED BIT(1)
+#define DSIM_STATE_CMD_LPM BIT(2)
+#define DSIM_STATE_VIDOUT_AVAILABLE BIT(3)
+
+enum samsung_dsim_type {
+ DSIM_TYPE_EXYNOS3250,
+ DSIM_TYPE_EXYNOS4210,
+ DSIM_TYPE_EXYNOS5410,
+ DSIM_TYPE_EXYNOS5422,
+ DSIM_TYPE_EXYNOS5433,
+ DSIM_TYPE_EXYNOS7870,
+ DSIM_TYPE_IMX8MM,
+ DSIM_TYPE_IMX8MP,
+ DSIM_TYPE_COUNT,
+};
+
+#define samsung_dsim_hw_is_exynos(hw) \
+ ((hw) >= DSIM_TYPE_EXYNOS3250 && (hw) <= DSIM_TYPE_EXYNOS5433)
+
+struct samsung_dsim_transfer {
+ struct list_head list;
+ struct completion completed;
+ int result;
+ struct mipi_dsi_packet packet;
+ u16 flags;
+ u16 tx_done;
+
+ u8 *rx_payload;
+ u16 rx_len;
+ u16 rx_done;
+};
+
+struct samsung_dsim_driver_data {
+ const unsigned int *reg_ofs;
+ unsigned int plltmr_reg;
+ unsigned int has_legacy_status_reg:1;
+ unsigned int has_freqband:1;
+ unsigned int has_clklane_stop:1;
+ unsigned int has_broken_fifoctrl_emptyhdr:1;
+ unsigned int has_sfrctrl:1;
+ struct clk_bulk_data *clk_data;
+ unsigned int num_clks;
+ unsigned int min_freq;
+ unsigned int max_freq;
+ unsigned int wait_for_hdr_fifo;
+ unsigned int wait_for_reset;
+ unsigned int num_bits_resol;
+ unsigned int video_mode_bit;
+ unsigned int pll_stable_bit;
+ unsigned int esc_clken_bit;
+ unsigned int byte_clken_bit;
+ unsigned int tx_req_hsclk_bit;
+ unsigned int lane_esc_clk_bit;
+ unsigned int lane_esc_data_offset;
+ unsigned int pll_p_offset;
+ unsigned int pll_m_offset;
+ unsigned int pll_s_offset;
+ unsigned int main_vsa_offset;
+ const unsigned int *reg_values;
+ unsigned int pll_fin_min;
+ unsigned int pll_fin_max;
+ u16 m_min;
+ u16 m_max;
+};
+
+struct samsung_dsim_host_ops {
+ int (*register_host)(struct samsung_dsim *dsim);
+ void (*unregister_host)(struct samsung_dsim *dsim);
+ int (*attach)(struct samsung_dsim *dsim, struct mipi_dsi_device *device);
+ void (*detach)(struct samsung_dsim *dsim, struct mipi_dsi_device *device);
+ irqreturn_t (*te_irq_handler)(struct samsung_dsim *dsim);
+};
+
+struct samsung_dsim_plat_data {
+ enum samsung_dsim_type hw_type;
+ const struct samsung_dsim_host_ops *host_ops;
+};
+
+struct samsung_dsim {
+ struct mipi_dsi_host dsi_host;
+ struct drm_bridge bridge;
+ struct drm_bridge *out_bridge;
+ struct device *dev;
+ struct drm_display_mode mode;
+
+ void __iomem *reg_base;
+ struct phy *phy;
+ struct clk *pll_clk;
+ struct regulator_bulk_data supplies[2];
+ int irq;
+ struct gpio_desc *te_gpio;
+
+ u32 pll_clk_rate;
+ u32 burst_clk_rate;
+ u32 hs_clock;
+ u32 esc_clk_rate;
+ u32 lanes;
+ u32 mode_flags;
+ u32 format;
+
+ bool swap_dn_dp_clk;
+ bool swap_dn_dp_data;
+ int state;
+ struct drm_property *brightness;
+ struct completion completed;
+
+ spinlock_t transfer_lock; /* protects transfer_list */
+ struct list_head transfer_list;
+
+ const struct samsung_dsim_driver_data *driver_data;
+ const struct samsung_dsim_plat_data *plat_data;
+
+ void *priv;
+};
+
+extern int samsung_dsim_probe(struct platform_device *pdev);
+extern void samsung_dsim_remove(struct platform_device *pdev);
+extern const struct dev_pm_ops samsung_dsim_pm_ops;
+
+#endif /* __SAMSUNG_DSIM__ */
diff --git a/include/drm/clients/drm_client_setup.h b/include/drm/clients/drm_client_setup.h
new file mode 100644
index 000000000000..46aab3fb46be
--- /dev/null
+++ b/include/drm/clients/drm_client_setup.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef DRM_CLIENT_SETUP_H
+#define DRM_CLIENT_SETUP_H
+
+#include <linux/types.h>
+
+struct drm_device;
+struct drm_format_info;
+
+#if defined(CONFIG_DRM_CLIENT_SETUP)
+void drm_client_setup(struct drm_device *dev, const struct drm_format_info *format);
+void drm_client_setup_with_fourcc(struct drm_device *dev, u32 fourcc);
+void drm_client_setup_with_color_mode(struct drm_device *dev, unsigned int color_mode);
+#else
+static inline void drm_client_setup(struct drm_device *dev,
+ const struct drm_format_info *format)
+{ }
+static inline void drm_client_setup_with_fourcc(struct drm_device *dev, u32 fourcc)
+{ }
+static inline void drm_client_setup_with_color_mode(struct drm_device *dev,
+ unsigned int color_mode)
+{ }
+#endif
+
+#endif
diff --git a/include/drm/drm_dp_helper.h b/include/drm/display/drm_dp.h
index 1e85c2021f2f..e4eebabab975 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/display/drm_dp.h
@@ -20,15 +20,10 @@
* OF THIS SOFTWARE.
*/
-#ifndef _DRM_DP_HELPER_H_
-#define _DRM_DP_HELPER_H_
+#ifndef _DRM_DP_H_
+#define _DRM_DP_H_
-#include <linux/delay.h>
-#include <linux/i2c.h>
#include <linux/types.h>
-#include <drm/drm_connector.h>
-
-struct drm_device;
/*
* Unless otherwise noted, all values are from the DP 1.1a spec. Note that
@@ -120,6 +115,7 @@ struct drm_device;
#define DP_MAX_LANE_COUNT 0x002
# define DP_MAX_LANE_COUNT_MASK 0x1f
+# define DP_POST_LT_ADJ_REQ_SUPPORTED (1 << 5) /* 1.3 */
# define DP_TPS3_SUPPORTED (1 << 6) /* 1.2 */
# define DP_ENHANCED_FRAME_CAP (1 << 7)
@@ -153,6 +149,7 @@ struct drm_device;
#define DP_RECEIVE_PORT_0_CAP_0 0x008
# define DP_LOCAL_EDID_PRESENT (1 << 1)
# define DP_ASSOCIATED_TO_PRECEDING_PORT (1 << 2)
+# define DP_HBLANK_EXPANSION_CAPABLE (1 << 3)
#define DP_RECEIVE_PORT_0_BUFFER_SIZE 0x009
@@ -236,6 +233,8 @@ struct drm_device;
#define DP_RECEIVER_ALPM_CAP 0x02e /* eDP 1.4 */
# define DP_ALPM_CAP (1 << 0)
+# define DP_ALPM_PM_STATE_2A_SUPPORT (1 << 1) /* eDP 1.5 */
+# define DP_ALPM_AUX_LESS_CAP (1 << 2) /* eDP 1.5 */
#define DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP 0x02f /* eDP 1.4 */
# define DP_AUX_FRAME_SYNC_CAP (1 << 0)
@@ -244,6 +243,9 @@ struct drm_device;
#define DP_DSC_SUPPORT 0x060 /* DP 1.4 */
# define DP_DSC_DECOMPRESSION_IS_SUPPORTED (1 << 0)
+# define DP_DSC_PASSTHROUGH_IS_SUPPORTED (1 << 1)
+# define DP_DSC_DYNAMIC_PPS_UPDATE_SUPPORT_COMP_TO_COMP (1 << 2)
+# define DP_DSC_DYNAMIC_PPS_UPDATE_SUPPORT_UNCOMP_TO_COMP (1 << 3)
#define DP_DSC_REV 0x061
# define DP_DSC_MAJOR_MASK (0xf << 0)
@@ -256,6 +258,8 @@ struct drm_device;
# define DP_DSC_RC_BUF_BLK_SIZE_4 0x1
# define DP_DSC_RC_BUF_BLK_SIZE_16 0x2
# define DP_DSC_RC_BUF_BLK_SIZE_64 0x3
+# define DP_DSC_THROUGHPUT_MODE_0_DELTA_SHIFT 3 /* DP 2.1a, in units of 2 MPixels/sec */
+# define DP_DSC_THROUGHPUT_MODE_0_DELTA_MASK (0x1f << DP_DSC_THROUGHPUT_MODE_0_DELTA_SHIFT)
#define DP_DSC_RC_BUF_SIZE 0x063
@@ -282,12 +286,14 @@ struct drm_device;
#define DP_DSC_BLK_PREDICTION_SUPPORT 0x066
# define DP_DSC_BLK_PREDICTION_IS_SUPPORTED (1 << 0)
+# define DP_DSC_RGB_COLOR_CONV_BYPASS_SUPPORT (1 << 1)
#define DP_DSC_MAX_BITS_PER_PIXEL_LOW 0x067 /* eDP 1.4 */
#define DP_DSC_MAX_BITS_PER_PIXEL_HI 0x068 /* eDP 1.4 */
# define DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK (0x3 << 0)
-# define DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT 8
+# define DP_DSC_MAX_BPP_DELTA_VERSION_MASK (0x3 << 5) /* eDP 1.5 & DP 2.0 */
+# define DP_DSC_MAX_BPP_DELTA_AVAILABILITY (1 << 7) /* eDP 1.5 & DP 2.0 */
#define DP_DSC_DEC_COLOR_FORMAT_CAP 0x069
# define DP_DSC_RGB (1 << 0)
@@ -349,16 +355,20 @@ struct drm_device;
# define DP_DSC_24_PER_DP_DSC_SINK (1 << 2)
#define DP_DSC_BITS_PER_PIXEL_INC 0x06F
+# define DP_DSC_RGB_YCbCr444_MAX_BPP_DELTA_MASK 0x1f
+# define DP_DSC_RGB_YCbCr420_MAX_BPP_DELTA_MASK 0xe0
# define DP_DSC_BITS_PER_PIXEL_1_16 0x0
# define DP_DSC_BITS_PER_PIXEL_1_8 0x1
# define DP_DSC_BITS_PER_PIXEL_1_4 0x2
# define DP_DSC_BITS_PER_PIXEL_1_2 0x3
-# define DP_DSC_BITS_PER_PIXEL_1 0x4
+# define DP_DSC_BITS_PER_PIXEL_1_1 0x4
+# define DP_DSC_BITS_PER_PIXEL_MASK 0x7
#define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */
# define DP_PSR_IS_SUPPORTED 1
# define DP_PSR2_IS_SUPPORTED 2 /* eDP 1.4 */
# define DP_PSR2_WITH_Y_COORD_IS_SUPPORTED 3 /* eDP 1.4a */
+# define DP_PSR2_WITH_Y_COORD_ET_SUPPORTED 4 /* eDP 1.5, adopted eDP 1.4b SCR */
#define DP_PSR_CAPS 0x071 /* XXX 1.2? */
# define DP_PSR_NO_TRAIN_ON_EXIT 1
@@ -373,6 +383,7 @@ struct drm_device;
# define DP_PSR_SETUP_TIME_SHIFT 1
# define DP_PSR2_SU_Y_COORDINATE_REQUIRED (1 << 4) /* eDP 1.4a */
# define DP_PSR2_SU_GRANULARITY_REQUIRED (1 << 5) /* eDP 1.4b */
+# define DP_PSR2_SU_AUX_FRAME_SYNC_NOT_NEEDED (1 << 6)/* eDP 1.5, adopted eDP 1.4b SCR */
#define DP_PSR2_SU_X_GRANULARITY 0x072 /* eDP 1.4b */
#define DP_PSR2_SU_Y_GRANULARITY 0x074 /* eDP 1.4b */
@@ -451,9 +462,10 @@ struct drm_device;
# define DP_FEC_UNCORR_BLK_ERROR_COUNT_CAP (1 << 1)
# define DP_FEC_CORR_BLK_ERROR_COUNT_CAP (1 << 2)
# define DP_FEC_BIT_ERROR_COUNT_CAP (1 << 3)
+#define DP_FEC_CAPABILITY_1 0x091 /* 2.0 */
/* DP-HDMI2.1 PCON DSC ENCODER SUPPORT */
-#define DP_PCON_DSC_ENCODER_CAP_SIZE 0xC /* 0x9E - 0x92 */
+#define DP_PCON_DSC_ENCODER_CAP_SIZE 0xD /* 0x92 through 0x9E */
#define DP_PCON_DSC_ENCODER 0x092
# define DP_PCON_DSC_ENCODER_SUPPORTED (1 << 0)
# define DP_PCON_DSC_PPS_ENC_OVERRIDE (1 << 1)
@@ -535,6 +547,32 @@ struct drm_device;
#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 0x0a1
#define DP_DSC_BRANCH_MAX_LINE_WIDTH 0x0a2
+/* DFP Capability Extension */
+#define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
+
+#define DP_PANEL_REPLAY_CAP_SUPPORT 0x0b0 /* DP 2.0 */
+# define DP_PANEL_REPLAY_SUPPORT (1 << 0)
+# define DP_PANEL_REPLAY_SU_SUPPORT (1 << 1)
+# define DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT (1 << 2) /* eDP 1.5 */
+
+#define DP_PANEL_REPLAY_CAP_SIZE 7
+
+#define DP_PANEL_REPLAY_CAP_CAPABILITY 0xb1
+# define DP_PANEL_REPLAY_DSC_DECODE_CAPABILITY_IN_PR_SHIFT 1 /* DP 2.1a */
+# define DP_PANEL_REPLAY_DSC_DECODE_CAPABILITY_IN_PR_MASK (3 << DP_PANEL_REPLAY_DSC_DECODE_CAPABILITY_IN_PR_SHIFT)
+# define DP_DSC_DECODE_CAPABILITY_IN_PR_SUPPORTED 0x00
+# define DP_DSC_DECODE_CAPABILITY_IN_PR_FULL_FRAME_ONLY 0x01
+# define DP_DSC_DECODE_CAPABILITY_IN_PR_NOT_SUPPORTED 0x02
+# define DP_DSC_DECODE_CAPABILITY_IN_PR_RESERVED 0x03
+# define DP_PANEL_REPLAY_ASYNC_VIDEO_TIMING_NOT_SUPPORTED_IN_PR (1 << 3)
+# define DP_PANEL_REPLAY_DSC_CRC_OF_MULTIPLE_SUS_SUPPORTED (1 << 4)
+# define DP_PANEL_REPLAY_SU_GRANULARITY_REQUIRED (1 << 5)
+# define DP_PANEL_REPLAY_SU_Y_GRANULARITY_EXTENDED_CAPABILITY_SUPPORTED (1 << 6)
+# define DP_PANEL_REPLAY_LINK_OFF_SUPPORTED_IN_PR_AFTER_ADAPTIVE_SYNC_SDP (1 << 7)
+
+#define DP_PANEL_REPLAY_CAP_X_GRANULARITY 0xb2
+#define DP_PANEL_REPLAY_CAP_Y_GRANULARITY 0xb4
+
/* Link Configuration */
#define DP_LINK_BW_SET 0x100
# define DP_LINK_RATE_TABLE 0x00 /* eDP 1.4 */
@@ -548,12 +586,14 @@ struct drm_device;
#define DP_LANE_COUNT_SET 0x101
# define DP_LANE_COUNT_MASK 0x0f
+# define DP_POST_LT_ADJ_REQ_GRANTED (1 << 5) /* 1.3 */
# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7)
#define DP_TRAINING_PATTERN_SET 0x102
# define DP_TRAINING_PATTERN_DISABLE 0
# define DP_TRAINING_PATTERN_1 1
# define DP_TRAINING_PATTERN_2 2
+# define DP_TRAINING_PATTERN_2_CDS 3 /* 2.0 E11 */
# define DP_TRAINING_PATTERN_3 3 /* 1.2 */
# define DP_TRAINING_PATTERN_4 7 /* 1.4 */
# define DP_TRAINING_PATTERN_MASK 0x3
@@ -600,6 +640,7 @@ struct drm_device;
#define DP_DOWNSPREAD_CTRL 0x107
# define DP_SPREAD_AMP_0_5 (1 << 4)
+# define DP_FIXED_VTOTAL_AS_SDP_EN_IN_PR_ACTIVE (1 << 6)
# define DP_MSA_TIMING_PAR_IGNORE_EN (1 << 7) /* eDP */
#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108
@@ -636,6 +677,9 @@ struct drm_device;
# define DP_LINK_QUAL_PATTERN_PRSBS31 0x38
# define DP_LINK_QUAL_PATTERN_CUSTOM 0x40
# define DP_LINK_QUAL_PATTERN_SQUARE 0x48
+# define DP_LINK_QUAL_PATTERN_SQUARE_PRESHOOT_DISABLED 0x49
+# define DP_LINK_QUAL_PATTERN_SQUARE_DEEMPHASIS_DISABLED 0x4a
+# define DP_LINK_QUAL_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED 0x4b
#define DP_TRAINING_LANE0_1_SET2 0x10f
#define DP_TRAINING_LANE2_3_SET2 0x110
@@ -659,7 +703,8 @@ struct drm_device;
#define DP_RECEIVER_ALPM_CONFIG 0x116 /* eDP 1.4 */
# define DP_ALPM_ENABLE (1 << 0)
-# define DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE (1 << 1)
+# define DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE (1 << 1) /* eDP 1.5 */
+# define DP_ALPM_MODE_AUX_LESS (1 << 2) /* eDP 1.5 */
#define DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF 0x117 /* eDP 1.4 */
# define DP_AUX_FRAME_SYNC_ENABLE (1 << 0)
@@ -668,6 +713,9 @@ struct drm_device;
#define DP_UPSTREAM_DEVICE_DP_PWR_NEED 0x118 /* 1.2 */
# define DP_PWR_NOT_NEEDED (1 << 0)
+#define DP_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_GRANT 0x119 /* 1.4a */
+# define DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_GRANTED (1 << 0)
+
#define DP_FEC_CONFIGURATION 0x120 /* 1.4 */
# define DP_FEC_READY (1 << 0)
# define DP_FEC_ERR_COUNT_SEL_MASK (7 << 1)
@@ -681,20 +729,26 @@ struct drm_device;
# define DP_FEC_LANE_2_SELECT (2 << 4)
# define DP_FEC_LANE_3_SELECT (3 << 4)
+#define DP_SDP_ERROR_DETECTION_CONFIGURATION 0x121 /* DP 2.0 E11 */
+#define DP_SDP_CRC16_128B132B_EN BIT(0)
+
#define DP_AUX_FRAME_SYNC_VALUE 0x15c /* eDP 1.4 */
# define DP_AUX_FRAME_SYNC_VALID (1 << 0)
#define DP_DSC_ENABLE 0x160 /* DP 1.4 */
# define DP_DECOMPRESSION_EN (1 << 0)
-
-#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */
-# define DP_PSR_ENABLE (1 << 0)
-# define DP_PSR_MAIN_LINK_ACTIVE (1 << 1)
-# define DP_PSR_CRC_VERIFICATION (1 << 2)
-# define DP_PSR_FRAME_CAPTURE (1 << 3)
-# define DP_PSR_SELECTIVE_UPDATE (1 << 4)
-# define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS (1 << 5)
-# define DP_PSR_ENABLE_PSR2 (1 << 6) /* eDP 1.4a */
+# define DP_DSC_PASSTHROUGH_EN (1 << 1)
+#define DP_DSC_CONFIGURATION 0x161 /* DP 2.0 */
+
+#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */
+# define DP_PSR_ENABLE BIT(0)
+# define DP_PSR_MAIN_LINK_ACTIVE BIT(1)
+# define DP_PSR_CRC_VERIFICATION BIT(2)
+# define DP_PSR_FRAME_CAPTURE BIT(3)
+# define DP_PSR_SU_REGION_SCANLINE_CAPTURE BIT(4) /* eDP 1.4a */
+# define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS BIT(5) /* eDP 1.4a */
+# define DP_PSR_ENABLE_PSR2 BIT(6) /* eDP 1.4a */
+# define DP_PSR_ENABLE_SU_REGION_ET BIT(7) /* eDP 1.5 */
#define DP_ADAPTER_CTRL 0x1a0
# define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0)
@@ -702,6 +756,23 @@ struct drm_device;
#define DP_BRANCH_DEVICE_CTRL 0x1a1
# define DP_BRANCH_DEVICE_IRQ_HPD (1 << 0)
+#define PANEL_REPLAY_CONFIG 0x1b0 /* DP 2.0 */
+# define DP_PANEL_REPLAY_ENABLE (1 << 0)
+# define DP_PANEL_REPLAY_VSC_SDP_CRC_EN (1 << 1) /* eDP 1.5 */
+# define DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN (1 << 3)
+# define DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN (1 << 4)
+# define DP_PANEL_REPLAY_ACTIVE_FRAME_CRC_ERROR_EN (1 << 5)
+# define DP_PANEL_REPLAY_SU_ENABLE (1 << 6)
+# define DP_PANEL_REPLAY_ENABLE_SU_REGION_ET (1 << 7) /* DP 2.1 */
+
+#define PANEL_REPLAY_CONFIG2 0x1b1 /* eDP 1.5 */
+# define DP_PANEL_REPLAY_SINK_REFRESH_RATE_UNLOCK_GRANTED (1 << 0)
+# define DP_PANEL_REPLAY_CRC_VERIFICATION (1 << 1)
+# define DP_PANEL_REPLAY_SU_Y_GRANULARITY_EXTENDED_EN (1 << 2)
+# define DP_PANEL_REPLAY_SU_Y_GRANULARITY_EXTENDED_VAL_SEL_SHIFT 3
+# define DP_PANEL_REPLAY_SU_Y_GRANULARITY_EXTENDED_VAL_SEL_MASK (0xf << 3)
+# define DP_PANEL_REPLAY_SU_REGION_SCANLINE_CAPTURE (1 << 7)
+
#define DP_PAYLOAD_ALLOCATE_SET 0x1c0
#define DP_PAYLOAD_ALLOCATE_START_TIME_SLOT 0x1c1
#define DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT 0x1c2
@@ -731,16 +802,20 @@ struct drm_device;
DP_LANE_CHANNEL_EQ_DONE | \
DP_LANE_SYMBOL_LOCKED)
-#define DP_LANE_ALIGN_STATUS_UPDATED 0x204
-
-#define DP_INTERLANE_ALIGN_DONE (1 << 0)
-#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6)
-#define DP_LINK_STATUS_UPDATED (1 << 7)
+#define DP_LANE_ALIGN_STATUS_UPDATED 0x204
+#define DP_INTERLANE_ALIGN_DONE (1 << 0)
+#define DP_POST_LT_ADJ_REQ_IN_PROGRESS (1 << 1) /* 1.3 */
+#define DP_128B132B_DPRX_EQ_INTERLANE_ALIGN_DONE (1 << 2) /* 2.0 E11 */
+#define DP_128B132B_DPRX_CDS_INTERLANE_ALIGN_DONE (1 << 3) /* 2.0 E11 */
+#define DP_128B132B_LT_FAILED (1 << 4) /* 2.0 E11 */
+#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6)
+#define DP_LINK_STATUS_UPDATED (1 << 7)
#define DP_SINK_STATUS 0x205
# define DP_RECEIVE_PORT_0_STATUS (1 << 0)
# define DP_RECEIVE_PORT_1_STATUS (1 << 1)
# define DP_STREAM_REGENERATION_STATUS (1 << 2) /* 2.0 */
+# define DP_INTRA_HOP_AUX_REPLY_INDICATION (1 << 3) /* 2.0 */
#define DP_ADJUST_REQUEST_LANE0_1 0x206
#define DP_ADJUST_REQUEST_LANE2_3 0x207
@@ -863,6 +938,8 @@ struct drm_device;
# define DP_PHY_TEST_PATTERN_80BIT_CUSTOM 0x4
# define DP_PHY_TEST_PATTERN_CP2520 0x5
+#define DP_PHY_SQUARE_PATTERN 0x249
+
#define DP_TEST_HBR2_SCRAMBLER_RESET 0x24A
#define DP_TEST_80BIT_CUSTOM_PATTERN_7_0 0x250
#define DP_TEST_80BIT_CUSTOM_PATTERN_15_8 0x251
@@ -940,6 +1017,7 @@ struct drm_device;
# define DP_EDP_14 0x03
# define DP_EDP_14a 0x04 /* eDP 1.4a */
# define DP_EDP_14b 0x05 /* eDP 1.4b */
+# define DP_EDP_15 0x06 /* eDP 1.5 */
#define DP_EDP_GENERAL_CAP_1 0x701
# define DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP (1 << 0)
@@ -963,6 +1041,8 @@ struct drm_device;
#define DP_EDP_GENERAL_CAP_2 0x703
# define DP_EDP_OVERDRIVE_ENGINE_ENABLED (1 << 0)
+# define DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE (1 << 4)
+# define DP_EDP_SMOOTH_BRIGHTNESS_CAPABLE (1 << 6) /* eDP 2.0 */
#define DP_EDP_GENERAL_CAP_3 0x704 /* eDP 1.4 */
# define DP_EDP_X_REGION_CAP_MASK (0xf << 0)
@@ -988,6 +1068,7 @@ struct drm_device;
# define DP_EDP_DYNAMIC_BACKLIGHT_ENABLE (1 << 4)
# define DP_EDP_REGIONAL_BACKLIGHT_ENABLE (1 << 5)
# define DP_EDP_UPDATE_REGION_BRIGHTNESS (1 << 6) /* eDP 1.4 */
+# define DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE (1 << 7)
#define DP_EDP_BACKLIGHT_BRIGHTNESS_MSB 0x722
#define DP_EDP_BACKLIGHT_BRIGHTNESS_LSB 0x723
@@ -1012,6 +1093,7 @@ struct drm_device;
#define DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET 0x732
#define DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET 0x733
+#define DP_EDP_PANEL_TARGET_LUMINANCE_VALUE 0x734
#define DP_EDP_REGIONAL_BACKLIGHT_BASE 0x740 /* eDP 1.4 */
#define DP_EDP_REGIONAL_BACKLIGHT_0 0x741 /* eDP 1.4 */
@@ -1028,11 +1110,8 @@ struct drm_device;
#define DP_SIDEBAND_MSG_UP_REQ_BASE 0x1600 /* 1.2 MST */
/* DPRX Event Status Indicator */
-#define DP_SINK_COUNT_ESI 0x2002 /* 1.2 */
-/* 0-5 sink count */
-# define DP_SINK_COUNT_CP_READY (1 << 6)
-
-#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 0x2003 /* 1.2 */
+#define DP_SINK_COUNT_ESI 0x2002 /* same as 0x200 */
+#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 0x2003 /* same as 0x201 */
#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1 0x2004 /* 1.2 */
# define DP_RX_GTC_MSTR_REQ_STATUS_CHANGE (1 << 0)
@@ -1045,6 +1124,7 @@ struct drm_device;
# define STREAM_STATUS_CHANGED (1 << 2)
# define HDMI_LINK_STATUS_CHANGED (1 << 3)
# define CONNECTED_OFF_ENTRY_REQUESTED (1 << 4)
+# define DP_TUNNELING_IRQ (1 << 5)
#define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */
# define DP_PSR_LINK_CRC_ERROR (1 << 0)
@@ -1086,6 +1166,18 @@ struct drm_device;
#define DP_LANE_ALIGN_STATUS_UPDATED_ESI 0x200e /* status same as 0x204 */
#define DP_SINK_STATUS_ESI 0x200f /* status same as 0x205 */
+#define DP_PANEL_REPLAY_ERROR_STATUS 0x2020 /* DP 2.1*/
+# define DP_PANEL_REPLAY_LINK_CRC_ERROR (1 << 0)
+# define DP_PANEL_REPLAY_RFB_STORAGE_ERROR (1 << 1)
+# define DP_PANEL_REPLAY_VSC_SDP_UNCORRECTABLE_ERROR (1 << 2)
+
+#define DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS 0x2022 /* DP 2.1 */
+# define DP_SINK_DEVICE_PANEL_REPLAY_STATUS_MASK (7 << 0)
+# define DP_SINK_FRAME_LOCKED_SHIFT 3
+# define DP_SINK_FRAME_LOCKED_MASK (3 << 3)
+# define DP_SINK_FRAME_LOCKED_STATUS_VALID_SHIFT 5
+# define DP_SINK_FRAME_LOCKED_STATUS_VALID_MASK (1 << 5)
+
/* Extended Receiver Capability: See DP_DPCD_REV for definitions */
#define DP_DP13_DPCD_REV 0x2200
@@ -1099,13 +1191,49 @@ struct drm_device;
# define DP_VSC_EXT_CEA_SDP_SUPPORTED (1 << 6) /* DP 1.4 */
# define DP_VSC_EXT_CEA_SDP_CHAINING_SUPPORTED (1 << 7) /* DP 1.4 */
+#define DP_EXTENDED_DPRX_SLEEP_WAKE_TIMEOUT_REQUEST 0x2211 /* 1.4a */
+# define DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_MASK 0xff
+# define DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_1_MS 0x00
+# define DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_20_MS 0x01
+# define DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_40_MS 0x02
+# define DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_60_MS 0x03
+# define DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_80_MS 0x04
+# define DP_DPRX_SLEEP_WAKE_TIMEOUT_PERIOD_100_MS 0x05
+
+#define DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1 0x2214 /* 2.0 E11 */
+# define DP_ADAPTIVE_SYNC_SDP_SUPPORTED (1 << 0)
+# define DP_ADAPTIVE_SYNC_SDP_OPERATION_MODE GENMASK(1, 0)
+# define DP_ADAPTIVE_SYNC_SDP_LENGTH GENMASK(5, 0)
+# define DP_AS_SDP_FIRST_HALF_LINE_OR_3840_PIXEL_CYCLE_WINDOW_NOT_SUPPORTED (1 << 1)
+# define DP_VSC_EXT_SDP_FRAMEWORK_VERSION_1_SUPPORTED (1 << 4)
+
#define DP_128B132B_SUPPORTED_LINK_RATES 0x2215 /* 2.0 */
# define DP_UHBR10 (1 << 0)
# define DP_UHBR20 (1 << 1)
# define DP_UHBR13_5 (1 << 2)
-#define DP_128B132B_TRAINING_AUX_RD_INTERVAL 0x2216 /* 2.0 */
-# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK 0x7f
+#define DP_128B132B_TRAINING_AUX_RD_INTERVAL 0x2216 /* 2.0 */
+# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_1MS_UNIT (1 << 7)
+# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK 0x7f
+# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_400_US 0x00
+# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_4_MS 0x01
+# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_8_MS 0x02
+# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_12_MS 0x03
+# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_16_MS 0x04
+# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_32_MS 0x05
+# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_64_MS 0x06
+
+#define DP_TEST_264BIT_CUSTOM_PATTERN_7_0 0x2230
+#define DP_TEST_264BIT_CUSTOM_PATTERN_263_256 0x2250
+
+/* DSC Extended Capability Branch Total DSC Resources */
+#define DP_DSC_SUPPORT_AND_DSC_DECODER_COUNT 0x2260 /* 2.0 */
+# define DP_DSC_DECODER_COUNT_MASK (0b111 << 5)
+# define DP_DSC_DECODER_COUNT_SHIFT 5
+#define DP_DSC_MAX_SLICE_COUNT_AND_AGGREGATION_0 0x2270 /* 2.0 */
+# define DP_DSC_DECODER_0_MAXIMUM_SLICE_COUNT_MASK (1 << 0)
+# define DP_DSC_DECODER_0_AGGREGATION_SUPPORT_MASK (0b111 << 1)
+# define DP_DSC_DECODER_0_AGGREGATION_SUPPORT_SHIFT 1
/* Protocol Converter Extension */
/* HDMI CEC tunneling over AUX DP 1.3 section 5.3.3.3.1 DPCD 1.4+ */
@@ -1309,6 +1437,66 @@ struct drm_device;
#define DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET 0x69494
#define DP_HDCP_2_2_REG_DBG_OFFSET 0x69518
+/* DP-tunneling */
+#define DP_TUNNELING_OUI 0xe0000
+#define DP_TUNNELING_OUI_BYTES 3
+
+#define DP_TUNNELING_DEV_ID 0xe0003
+#define DP_TUNNELING_DEV_ID_BYTES 6
+
+#define DP_TUNNELING_HW_REV 0xe0009
+#define DP_TUNNELING_HW_REV_MAJOR_SHIFT 4
+#define DP_TUNNELING_HW_REV_MAJOR_MASK (0xf << DP_TUNNELING_HW_REV_MAJOR_SHIFT)
+#define DP_TUNNELING_HW_REV_MINOR_SHIFT 0
+#define DP_TUNNELING_HW_REV_MINOR_MASK (0xf << DP_TUNNELING_HW_REV_MINOR_SHIFT)
+
+#define DP_TUNNELING_SW_REV_MAJOR 0xe000a
+#define DP_TUNNELING_SW_REV_MINOR 0xe000b
+
+#define DP_TUNNELING_CAPABILITIES 0xe000d
+#define DP_IN_BW_ALLOCATION_MODE_SUPPORT (1 << 7)
+#define DP_PANEL_REPLAY_OPTIMIZATION_SUPPORT (1 << 6)
+#define DP_TUNNELING_SUPPORT (1 << 0)
+
+#define DP_IN_ADAPTER_INFO 0xe000e
+#define DP_IN_ADAPTER_NUMBER_BITS 7
+#define DP_IN_ADAPTER_NUMBER_MASK ((1 << DP_IN_ADAPTER_NUMBER_BITS) - 1)
+
+#define DP_USB4_DRIVER_ID 0xe000f
+#define DP_USB4_DRIVER_ID_BITS 4
+#define DP_USB4_DRIVER_ID_MASK ((1 << DP_USB4_DRIVER_ID_BITS) - 1)
+
+#define DP_USB4_DRIVER_BW_CAPABILITY 0xe0020
+#define DP_USB4_DRIVER_BW_ALLOCATION_MODE_SUPPORT (1 << 7)
+
+#define DP_IN_ADAPTER_TUNNEL_INFORMATION 0xe0021
+#define DP_GROUP_ID_BITS 3
+#define DP_GROUP_ID_MASK ((1 << DP_GROUP_ID_BITS) - 1)
+
+#define DP_BW_GRANULARITY 0xe0022
+#define DP_BW_GRANULARITY_MASK 0x3
+
+#define DP_ESTIMATED_BW 0xe0023
+#define DP_ALLOCATED_BW 0xe0024
+
+#define DP_TUNNELING_STATUS 0xe0025
+#define DP_BW_ALLOCATION_CAPABILITY_CHANGED (1 << 3)
+#define DP_ESTIMATED_BW_CHANGED (1 << 2)
+#define DP_BW_REQUEST_SUCCEEDED (1 << 1)
+#define DP_BW_REQUEST_FAILED (1 << 0)
+
+#define DP_TUNNELING_MAX_LINK_RATE 0xe0028
+
+#define DP_TUNNELING_MAX_LANE_COUNT 0xe0029
+#define DP_TUNNELING_MAX_LANE_COUNT_MASK 0x1f
+
+#define DP_DPTX_BW_ALLOCATION_MODE_CONTROL 0xe0030
+#define DP_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE (1 << 7)
+#define DP_UNMASK_BW_ALLOCATION_IRQ (1 << 6)
+
+#define DP_REQUEST_BW 0xe0031
+#define MAX_DP_REQUEST_BW 255
+
/* LTTPR: Link Training (LT)-tunable PHY Repeaters */
#define DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV 0xf0000 /* 1.3 */
#define DP_MAX_LINK_RATE_PHY_REPEATER 0xf0001 /* 1.4a */
@@ -1317,6 +1505,13 @@ struct drm_device;
#define DP_MAX_LANE_COUNT_PHY_REPEATER 0xf0004 /* 1.4a */
#define DP_Repeater_FEC_CAPABILITY 0xf0004 /* 1.4 */
#define DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT 0xf0005 /* 1.4a */
+# define DP_EXTENDED_WAKE_TIMEOUT_REQUEST_MASK 0x7f
+# define DP_EXTENDED_WAKE_TIMEOUT_GRANT (1 << 7)
+#define DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER 0xf0006 /* 2.0 */
+# define DP_PHY_REPEATER_128B132B_SUPPORTED (1 << 0)
+/* See DP_128B132B_SUPPORTED_LINK_RATES for values */
+#define DP_PHY_REPEATER_128B132B_RATES 0xf0007 /* 2.0 */
+#define DP_PHY_REPEATER_EQ_DONE 0xf0008 /* 2.0 E11 */
enum drm_dp_phy {
DP_PHY_DPRX,
@@ -1363,6 +1558,11 @@ enum drm_dp_phy {
# define DP_VOLTAGE_SWING_LEVEL_3_SUPPORTED BIT(0)
# define DP_PRE_EMPHASIS_LEVEL_3_SUPPORTED BIT(1)
+#define DP_128B132B_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 0xf0022 /* 2.0 */
+#define DP_128B132B_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER(dp_phy) \
+ DP_LTTPR_REG(dp_phy, DP_128B132B_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1)
+/* see DP_128B132B_TRAINING_AUX_RD_INTERVAL for values */
+
#define DP_LANE0_1_STATUS_PHY_REPEATER1 0xf0030 /* 1.3 */
#define DP_LANE0_1_STATUS_PHY_REPEATER(dp_phy) \
DP_LTTPR_REG(dp_phy, DP_LANE0_1_STATUS_PHY_REPEATER1)
@@ -1376,10 +1576,31 @@ enum drm_dp_phy {
#define DP_SYMBOL_ERROR_COUNT_LANE1_PHY_REPEATER1 0xf0037 /* 1.3 */
#define DP_SYMBOL_ERROR_COUNT_LANE2_PHY_REPEATER1 0xf0039 /* 1.3 */
#define DP_SYMBOL_ERROR_COUNT_LANE3_PHY_REPEATER1 0xf003b /* 1.3 */
+
+#define DP_OUI_PHY_REPEATER1 0xf003d /* 1.3 */
+#define DP_OUI_PHY_REPEATER(dp_phy) \
+ DP_LTTPR_REG(dp_phy, DP_OUI_PHY_REPEATER1)
+
+#define __DP_FEC1_BASE 0xf0290 /* 1.4 */
+#define __DP_FEC2_BASE 0xf0298 /* 1.4 */
+#define DP_FEC_BASE(dp_phy) \
+ (__DP_FEC1_BASE + ((__DP_FEC2_BASE - __DP_FEC1_BASE) * \
+ ((dp_phy) - DP_PHY_LTTPR1)))
+
+#define DP_FEC_REG(dp_phy, fec1_reg) \
+ (DP_FEC_BASE(dp_phy) - DP_FEC_BASE(DP_PHY_LTTPR1) + fec1_reg)
+
#define DP_FEC_STATUS_PHY_REPEATER1 0xf0290 /* 1.4 */
+#define DP_FEC_STATUS_PHY_REPEATER(dp_phy) \
+ DP_FEC_REG(dp_phy, DP_FEC_STATUS_PHY_REPEATER1)
+
#define DP_FEC_ERROR_COUNT_PHY_REPEATER1 0xf0291 /* 1.4 */
#define DP_FEC_CAPABILITY_PHY_REPEATER1 0xf0294 /* 1.4a */
+#define DP_LTTPR_MAX_ADD 0xf02ff /* 1.4 */
+
+#define DP_DPCD_MAX_ADD 0xfffff /* 1.4 */
+
/* Repeater modes */
#define DP_PHY_REPEATER_MODE_TRANSPARENT 0x55 /* 1.3 */
#define DP_PHY_REPEATER_MODE_NON_TRANSPARENT 0xaa /* 1.3 */
@@ -1463,45 +1684,32 @@ enum drm_dp_phy {
#define DP_LINK_CONSTANT_N_VALUE 0x8000
#define DP_LINK_STATUS_SIZE 6
-bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
- int lane_count);
-bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
- int lane_count);
-u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
- int lane);
-u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
- int lane);
-u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZE],
- unsigned int lane);
#define DP_BRANCH_OUI_HEADER_SIZE 0xc
#define DP_RECEIVER_CAP_SIZE 0xf
-#define DP_DSC_RECEIVER_CAP_SIZE 0xf
+#define DP_DSC_RECEIVER_CAP_SIZE 0x10 /* DSC Capabilities 0x60 through 0x6F */
+#define DP_DSC_BRANCH_CAP_SIZE 3
#define EDP_PSR_RECEIVER_CAP_SIZE 2
-#define EDP_DISPLAY_CTL_CAP_SIZE 3
+#define EDP_DISPLAY_CTL_CAP_SIZE 5
#define DP_LTTPR_COMMON_CAP_SIZE 8
#define DP_LTTPR_PHY_CAP_SIZE 3
-void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
-void drm_dp_lttpr_link_train_clock_recovery_delay(void);
-void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
-void drm_dp_lttpr_link_train_channel_eq_delay(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
-
-u8 drm_dp_link_rate_to_bw_code(int link_rate);
-int drm_dp_bw_code_to_link_rate(u8 link_bw);
-
#define DP_SDP_AUDIO_TIMESTAMP 0x01
#define DP_SDP_AUDIO_STREAM 0x02
#define DP_SDP_EXTENSION 0x04 /* DP 1.1 */
#define DP_SDP_AUDIO_COPYMANAGEMENT 0x05 /* DP 1.2 */
#define DP_SDP_ISRC 0x06 /* DP 1.2 */
#define DP_SDP_VSC 0x07 /* DP 1.2 */
+#define DP_SDP_ADAPTIVE_SYNC 0x22 /* DP 1.4 */
#define DP_SDP_CAMERA_GENERIC(i) (0x08 + (i)) /* 0-7, DP 1.3 */
#define DP_SDP_PPS 0x10 /* DP 1.4 */
#define DP_SDP_VSC_EXT_VESA 0x20 /* DP 1.4 */
#define DP_SDP_VSC_EXT_CEA 0x21 /* DP 1.4 */
+
/* 0x80+ CEA-861 infoframe types */
+#define DP_SDP_AUDIO_INFOFRAME_HB2 0x1b
+
/**
* struct dp_sdp_header - DP secondary data packet header
* @HB0: Secondary Data Packet ID
@@ -1580,7 +1788,7 @@ enum dp_pixelformat {
*
* This enum is used to indicate DP VSC SDP Colorimetry formats.
* It is based on DP 1.4 spec [Table 2-117: VSC SDP Payload for DB16 through
- * DB18] and a name of enum member follows DRM_MODE_COLORIMETRY definition.
+ * DB18] and a name of enum member follows enum drm_colorimetry definition.
*
* @DP_COLORIMETRY_DEFAULT: sRGB (IEC 61966-2-1) or
* ITU-R BT.601 colorimetry format
@@ -1653,528 +1861,11 @@ enum dp_content_type {
DP_CONTENT_TYPE_GAME = 0x04,
};
-/**
- * struct drm_dp_vsc_sdp - drm DP VSC SDP
- *
- * This structure represents a DP VSC SDP of drm
- * It is based on DP 1.4 spec [Table 2-116: VSC SDP Header Bytes] and
- * [Table 2-117: VSC SDP Payload for DB16 through DB18]
- *
- * @sdp_type: secondary-data packet type
- * @revision: revision number
- * @length: number of valid data bytes
- * @pixelformat: pixel encoding format
- * @colorimetry: colorimetry format
- * @bpc: bit per color
- * @dynamic_range: dynamic range information
- * @content_type: CTA-861-G defines content types and expected processing by a sink device
- */
-struct drm_dp_vsc_sdp {
- unsigned char sdp_type;
- unsigned char revision;
- unsigned char length;
- enum dp_pixelformat pixelformat;
- enum dp_colorimetry colorimetry;
- int bpc;
- enum dp_dynamic_range dynamic_range;
- enum dp_content_type content_type;
-};
-
-void drm_dp_vsc_sdp_log(const char *level, struct device *dev,
- const struct drm_dp_vsc_sdp *vsc);
-
-int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE]);
-
-static inline int
-drm_dp_max_link_rate(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
-{
- return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]);
-}
-
-static inline u8
-drm_dp_max_lane_count(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
-{
- return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
-}
-
-static inline bool
-drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
-{
- return dpcd[DP_DPCD_REV] >= 0x11 &&
- (dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP);
-}
-
-static inline bool
-drm_dp_fast_training_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
-{
- return dpcd[DP_DPCD_REV] >= 0x11 &&
- (dpcd[DP_MAX_DOWNSPREAD] & DP_NO_AUX_HANDSHAKE_LINK_TRAINING);
-}
-
-static inline bool
-drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
-{
- return dpcd[DP_DPCD_REV] >= 0x12 &&
- dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED;
-}
-
-static inline bool
-drm_dp_tps4_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
-{
- return dpcd[DP_DPCD_REV] >= 0x14 &&
- dpcd[DP_MAX_DOWNSPREAD] & DP_TPS4_SUPPORTED;
-}
-
-static inline u8
-drm_dp_training_pattern_mask(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
-{
- return (dpcd[DP_DPCD_REV] >= 0x14) ? DP_TRAINING_PATTERN_MASK_1_4 :
- DP_TRAINING_PATTERN_MASK;
-}
-
-static inline bool
-drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
-{
- return dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT;
-}
-
-/* DP/eDP DSC support */
-u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
- bool is_edp);
-u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]);
-int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpc[DP_DSC_RECEIVER_CAP_SIZE],
- u8 dsc_bpc[3]);
-
-static inline bool
-drm_dp_sink_supports_dsc(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
-{
- return dsc_dpcd[DP_DSC_SUPPORT - DP_DSC_SUPPORT] &
- DP_DSC_DECOMPRESSION_IS_SUPPORTED;
-}
-
-static inline u16
-drm_edp_dsc_sink_output_bpp(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
-{
- return dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_LOW - DP_DSC_SUPPORT] |
- (dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_HI - DP_DSC_SUPPORT] &
- DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK <<
- DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT);
-}
-
-static inline u32
-drm_dp_dsc_sink_max_slice_width(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
-{
- /* Max Slicewidth = Number of Pixels * 320 */
- return dsc_dpcd[DP_DSC_MAX_SLICE_WIDTH - DP_DSC_SUPPORT] *
- DP_DSC_SLICE_WIDTH_MULTIPLIER;
-}
-
-/* Forward Error Correction Support on DP 1.4 */
-static inline bool
-drm_dp_sink_supports_fec(const u8 fec_capable)
-{
- return fec_capable & DP_FEC_CAPABLE;
-}
-
-static inline bool
-drm_dp_channel_coding_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
-{
- return dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_8B10B;
-}
-
-static inline bool
-drm_dp_alternate_scrambler_reset_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
-{
- return dpcd[DP_EDP_CONFIGURATION_CAP] &
- DP_ALTERNATE_SCRAMBLER_RESET_CAP;
-}
-
-/* Ignore MSA timing for Adaptive Sync support on DP 1.4 */
-static inline bool
-drm_dp_sink_can_do_video_without_timing_msa(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
-{
- return dpcd[DP_DOWN_STREAM_PORT_COUNT] &
- DP_MSA_TIMING_PAR_IGNORED;
-}
-
-/*
- * DisplayPort AUX channel
- */
-
-/**
- * struct drm_dp_aux_msg - DisplayPort AUX channel transaction
- * @address: address of the (first) register to access
- * @request: contains the type of transaction (see DP_AUX_* macros)
- * @reply: upon completion, contains the reply type of the transaction
- * @buffer: pointer to a transmission or reception buffer
- * @size: size of @buffer
- */
-struct drm_dp_aux_msg {
- unsigned int address;
- u8 request;
- u8 reply;
- void *buffer;
- size_t size;
-};
-
-struct cec_adapter;
-struct edid;
-struct drm_connector;
-
-/**
- * struct drm_dp_aux_cec - DisplayPort CEC-Tunneling-over-AUX
- * @lock: mutex protecting this struct
- * @adap: the CEC adapter for CEC-Tunneling-over-AUX support.
- * @connector: the connector this CEC adapter is associated with
- * @unregister_work: unregister the CEC adapter
- */
-struct drm_dp_aux_cec {
- struct mutex lock;
- struct cec_adapter *adap;
- struct drm_connector *connector;
- struct delayed_work unregister_work;
-};
-
-/**
- * struct drm_dp_aux - DisplayPort AUX channel
- * @name: user-visible name of this AUX channel and the I2C-over-AUX adapter
- * @ddc: I2C adapter that can be used for I2C-over-AUX communication
- * @dev: pointer to struct device that is the parent for this AUX channel
- * @crtc: backpointer to the crtc that is currently using this AUX channel
- * @hw_mutex: internal mutex used for locking transfers
- * @crc_work: worker that captures CRCs for each frame
- * @crc_count: counter of captured frame CRCs
- * @transfer: transfers a message representing a single AUX transaction
- *
- * The @dev field should be set to a pointer to the device that implements the
- * AUX channel.
- *
- * The @name field may be used to specify the name of the I2C adapter. If set to
- * %NULL, dev_name() of @dev will be used.
- *
- * Drivers provide a hardware-specific implementation of how transactions are
- * executed via the @transfer() function. A pointer to a &drm_dp_aux_msg
- * structure describing the transaction is passed into this function. Upon
- * success, the implementation should return the number of payload bytes that
- * were transferred, or a negative error-code on failure. Helpers propagate
- * errors from the @transfer() function, with the exception of the %-EBUSY
- * error, which causes a transaction to be retried. On a short, helpers will
- * return %-EPROTO to make it simpler to check for failure.
- *
- * An AUX channel can also be used to transport I2C messages to a sink. A
- * typical application of that is to access an EDID that's present in the sink
- * device. The @transfer() function can also be used to execute such
- * transactions. The drm_dp_aux_register() function registers an I2C adapter
- * that can be passed to drm_probe_ddc(). Upon removal, drivers should call
- * drm_dp_aux_unregister() to remove the I2C adapter. The I2C adapter uses long
- * transfers by default; if a partial response is received, the adapter will
- * drop down to the size given by the partial response for this transaction
- * only.
- *
- * Note that the aux helper code assumes that the @transfer() function only
- * modifies the reply field of the &drm_dp_aux_msg structure. The retry logic
- * and i2c helpers assume this is the case.
- */
-struct drm_dp_aux {
- const char *name;
- struct i2c_adapter ddc;
- struct device *dev;
- struct drm_crtc *crtc;
- struct mutex hw_mutex;
- struct work_struct crc_work;
- u8 crc_count;
- ssize_t (*transfer)(struct drm_dp_aux *aux,
- struct drm_dp_aux_msg *msg);
- /**
- * @i2c_nack_count: Counts I2C NACKs, used for DP validation.
- */
- unsigned i2c_nack_count;
- /**
- * @i2c_defer_count: Counts I2C DEFERs, used for DP validation.
- */
- unsigned i2c_defer_count;
- /**
- * @cec: struct containing fields used for CEC-Tunneling-over-AUX.
- */
- struct drm_dp_aux_cec cec;
- /**
- * @is_remote: Is this AUX CH actually using sideband messaging.
- */
- bool is_remote;
-};
-
-ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
- void *buffer, size_t size);
-ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
- void *buffer, size_t size);
-
-/**
- * drm_dp_dpcd_readb() - read a single byte from the DPCD
- * @aux: DisplayPort AUX channel
- * @offset: address of the register to read
- * @valuep: location where the value of the register will be stored
- *
- * Returns the number of bytes transferred (1) on success, or a negative
- * error code on failure.
- */
-static inline ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux,
- unsigned int offset, u8 *valuep)
-{
- return drm_dp_dpcd_read(aux, offset, valuep, 1);
-}
-
-/**
- * drm_dp_dpcd_writeb() - write a single byte to the DPCD
- * @aux: DisplayPort AUX channel
- * @offset: address of the register to write
- * @value: value to write to the register
- *
- * Returns the number of bytes transferred (1) on success, or a negative
- * error code on failure.
- */
-static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux,
- unsigned int offset, u8 value)
-{
- return drm_dp_dpcd_write(aux, offset, &value, 1);
-}
-
-int drm_dp_read_dpcd_caps(struct drm_dp_aux *aux,
- u8 dpcd[DP_RECEIVER_CAP_SIZE]);
-
-int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
- u8 status[DP_LINK_STATUS_SIZE]);
-
-int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux,
- enum drm_dp_phy dp_phy,
- u8 link_status[DP_LINK_STATUS_SIZE]);
-
-bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
- u8 real_edid_checksum);
-
-int drm_dp_read_downstream_info(struct drm_dp_aux *aux,
- const u8 dpcd[DP_RECEIVER_CAP_SIZE],
- u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS]);
-bool drm_dp_downstream_is_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
- const u8 port_cap[4], u8 type);
-bool drm_dp_downstream_is_tmds(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
- const u8 port_cap[4],
- const struct edid *edid);
-int drm_dp_downstream_max_dotclock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
- const u8 port_cap[4]);
-int drm_dp_downstream_max_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
- const u8 port_cap[4],
- const struct edid *edid);
-int drm_dp_downstream_min_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
- const u8 port_cap[4],
- const struct edid *edid);
-int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
- const u8 port_cap[4],
- const struct edid *edid);
-bool drm_dp_downstream_420_passthrough(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
- const u8 port_cap[4]);
-bool drm_dp_downstream_444_to_420_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
- const u8 port_cap[4]);
-struct drm_display_mode *drm_dp_downstream_mode(struct drm_device *dev,
- const u8 dpcd[DP_RECEIVER_CAP_SIZE],
- const u8 port_cap[4]);
-int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]);
-void drm_dp_downstream_debug(struct seq_file *m,
- const u8 dpcd[DP_RECEIVER_CAP_SIZE],
- const u8 port_cap[4],
- const struct edid *edid,
- struct drm_dp_aux *aux);
-enum drm_mode_subconnector
-drm_dp_subconnector_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
- const u8 port_cap[4]);
-void drm_dp_set_subconnector_property(struct drm_connector *connector,
- enum drm_connector_status status,
- const u8 *dpcd,
- const u8 port_cap[4]);
-
-struct drm_dp_desc;
-bool drm_dp_read_sink_count_cap(struct drm_connector *connector,
- const u8 dpcd[DP_RECEIVER_CAP_SIZE],
- const struct drm_dp_desc *desc);
-int drm_dp_read_sink_count(struct drm_dp_aux *aux);
-
-int drm_dp_read_lttpr_common_caps(struct drm_dp_aux *aux,
- u8 caps[DP_LTTPR_COMMON_CAP_SIZE]);
-int drm_dp_read_lttpr_phy_caps(struct drm_dp_aux *aux,
- enum drm_dp_phy dp_phy,
- u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
-int drm_dp_lttpr_count(const u8 cap[DP_LTTPR_COMMON_CAP_SIZE]);
-int drm_dp_lttpr_max_link_rate(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE]);
-int drm_dp_lttpr_max_lane_count(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE]);
-bool drm_dp_lttpr_voltage_swing_level_3_supported(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
-bool drm_dp_lttpr_pre_emphasis_level_3_supported(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
-
-void drm_dp_remote_aux_init(struct drm_dp_aux *aux);
-void drm_dp_aux_init(struct drm_dp_aux *aux);
-int drm_dp_aux_register(struct drm_dp_aux *aux);
-void drm_dp_aux_unregister(struct drm_dp_aux *aux);
-
-int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc);
-int drm_dp_stop_crc(struct drm_dp_aux *aux);
-
-struct drm_dp_dpcd_ident {
- u8 oui[3];
- u8 device_id[6];
- u8 hw_rev;
- u8 sw_major_rev;
- u8 sw_minor_rev;
-} __packed;
-
-/**
- * struct drm_dp_desc - DP branch/sink device descriptor
- * @ident: DP device identification from DPCD 0x400 (sink) or 0x500 (branch).
- * @quirks: Quirks; use drm_dp_has_quirk() to query for the quirks.
- */
-struct drm_dp_desc {
- struct drm_dp_dpcd_ident ident;
- u32 quirks;
-};
-
-int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
- bool is_branch);
-
-/**
- * enum drm_dp_quirk - Display Port sink/branch device specific quirks
- *
- * Display Port sink and branch devices in the wild have a variety of bugs, try
- * to collect them here. The quirks are shared, but it's up to the drivers to
- * implement workarounds for them.
- */
-enum drm_dp_quirk {
- /**
- * @DP_DPCD_QUIRK_CONSTANT_N:
- *
- * The device requires main link attributes Mvid and Nvid to be limited
- * to 16 bits. So will give a constant value (0x8000) for compatability.
- */
- DP_DPCD_QUIRK_CONSTANT_N,
- /**
- * @DP_DPCD_QUIRK_NO_PSR:
- *
- * The device does not support PSR even if reports that it supports or
- * driver still need to implement proper handling for such device.
- */
- DP_DPCD_QUIRK_NO_PSR,
- /**
- * @DP_DPCD_QUIRK_NO_SINK_COUNT:
- *
- * The device does not set SINK_COUNT to a non-zero value.
- * The driver should ignore SINK_COUNT during detection. Note that
- * drm_dp_read_sink_count_cap() automatically checks for this quirk.
- */
- DP_DPCD_QUIRK_NO_SINK_COUNT,
- /**
- * @DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD:
- *
- * The device supports MST DSC despite not supporting Virtual DPCD.
- * The DSC caps can be read from the physical aux instead.
- */
- DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD,
- /**
- * @DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS:
- *
- * The device supports a link rate of 3.24 Gbps (multiplier 0xc) despite
- * the DP_MAX_LINK_RATE register reporting a lower max multiplier.
- */
- DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS,
-};
-
-/**
- * drm_dp_has_quirk() - does the DP device have a specific quirk
- * @desc: Device descriptor filled by drm_dp_read_desc()
- * @quirk: Quirk to query for
- *
- * Return true if DP device identified by @desc has @quirk.
- */
-static inline bool
-drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk)
-{
- return desc->quirks & BIT(quirk);
-}
-
-#ifdef CONFIG_DRM_DP_CEC
-void drm_dp_cec_irq(struct drm_dp_aux *aux);
-void drm_dp_cec_register_connector(struct drm_dp_aux *aux,
- struct drm_connector *connector);
-void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux);
-void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid);
-void drm_dp_cec_unset_edid(struct drm_dp_aux *aux);
-#else
-static inline void drm_dp_cec_irq(struct drm_dp_aux *aux)
-{
-}
-
-static inline void
-drm_dp_cec_register_connector(struct drm_dp_aux *aux,
- struct drm_connector *connector)
-{
-}
-
-static inline void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux)
-{
-}
-
-static inline void drm_dp_cec_set_edid(struct drm_dp_aux *aux,
- const struct edid *edid)
-{
-}
-
-static inline void drm_dp_cec_unset_edid(struct drm_dp_aux *aux)
-{
-}
-
-#endif
-
-/**
- * struct drm_dp_phy_test_params - DP Phy Compliance parameters
- * @link_rate: Requested Link rate from DPCD 0x219
- * @num_lanes: Number of lanes requested by sing through DPCD 0x220
- * @phy_pattern: DP Phy test pattern from DPCD 0x248
- * @hbr2_reset: DP HBR2_COMPLIANCE_SCRAMBLER_RESET from DCPD 0x24A and 0x24B
- * @custom80: DP Test_80BIT_CUSTOM_PATTERN from DPCDs 0x250 through 0x259
- * @enhanced_frame_cap: flag for enhanced frame capability.
- */
-struct drm_dp_phy_test_params {
- int link_rate;
- u8 num_lanes;
- u8 phy_pattern;
- u8 hbr2_reset[2];
- u8 custom80[10];
- bool enhanced_frame_cap;
+enum operation_mode {
+ DP_AS_SDP_AVT_DYNAMIC_VTOTAL = 0x00,
+ DP_AS_SDP_AVT_FIXED_VTOTAL = 0x01,
+ DP_AS_SDP_FAVT_TRR_NOT_REACHED = 0x02,
+ DP_AS_SDP_FAVT_TRR_REACHED = 0x03
};
-int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux,
- struct drm_dp_phy_test_params *data);
-int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux,
- struct drm_dp_phy_test_params *data, u8 dp_rev);
-int drm_dp_get_pcon_max_frl_bw(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
- const u8 port_cap[4]);
-int drm_dp_pcon_frl_prepare(struct drm_dp_aux *aux, bool enable_frl_ready_hpd);
-bool drm_dp_pcon_is_frl_ready(struct drm_dp_aux *aux);
-int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps,
- u8 frl_mode);
-int drm_dp_pcon_frl_configure_2(struct drm_dp_aux *aux, int max_frl_mask,
- u8 frl_type);
-int drm_dp_pcon_reset_frl_config(struct drm_dp_aux *aux);
-int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux);
-
-bool drm_dp_pcon_hdmi_link_active(struct drm_dp_aux *aux);
-int drm_dp_pcon_hdmi_link_mode(struct drm_dp_aux *aux, u8 *frl_trained_mask);
-void drm_dp_pcon_hdmi_frl_link_error_count(struct drm_dp_aux *aux,
- struct drm_connector *connector);
-bool drm_dp_pcon_enc_is_dsc_1_2(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]);
-int drm_dp_pcon_dsc_max_slices(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]);
-int drm_dp_pcon_dsc_max_slice_width(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]);
-int drm_dp_pcon_dsc_bpp_incr(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]);
-int drm_dp_pcon_pps_default(struct drm_dp_aux *aux);
-int drm_dp_pcon_pps_override_buf(struct drm_dp_aux *aux, u8 pps_buf[128]);
-int drm_dp_pcon_pps_override_param(struct drm_dp_aux *aux, u8 pps_param[6]);
-bool drm_dp_downstream_rgb_to_ycbcr_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
- const u8 port_cap[4], u8 color_spc);
-int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc);
-
-#endif /* _DRM_DP_HELPER_H_ */
+#endif /* _DRM_DP_H_ */
diff --git a/include/drm/display/drm_dp_aux_bus.h b/include/drm/display/drm_dp_aux_bus.h
new file mode 100644
index 000000000000..8a0a486383c5
--- /dev/null
+++ b/include/drm/display/drm_dp_aux_bus.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2021 Google Inc.
+ *
+ * The DP AUX bus is used for devices that are connected over a DisplayPort
+ * AUX bus. The devices on the far side of the bus are referred to as
+ * endpoints in this code.
+ */
+
+#ifndef _DP_AUX_BUS_H_
+#define _DP_AUX_BUS_H_
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+/**
+ * struct dp_aux_ep_device - Main dev structure for DP AUX endpoints
+ *
+ * This is used to instantiate devices that are connected via a DP AUX
+ * bus. Usually the device is a panel, but conceivable other devices could
+ * be hooked up there.
+ */
+struct dp_aux_ep_device {
+ /** @dev: The normal dev pointer */
+ struct device dev;
+ /** @aux: Pointer to the aux bus */
+ struct drm_dp_aux *aux;
+};
+
+struct dp_aux_ep_driver {
+ int (*probe)(struct dp_aux_ep_device *aux_ep);
+ void (*remove)(struct dp_aux_ep_device *aux_ep);
+ void (*shutdown)(struct dp_aux_ep_device *aux_ep);
+ struct device_driver driver;
+};
+
+static inline struct dp_aux_ep_device *to_dp_aux_ep_dev(struct device *dev)
+{
+ return container_of(dev, struct dp_aux_ep_device, dev);
+}
+
+static inline struct dp_aux_ep_driver *to_dp_aux_ep_drv(struct device_driver *drv)
+{
+ return container_of(drv, struct dp_aux_ep_driver, driver);
+}
+
+int of_dp_aux_populate_bus(struct drm_dp_aux *aux,
+ int (*done_probing)(struct drm_dp_aux *aux));
+void of_dp_aux_depopulate_bus(struct drm_dp_aux *aux);
+int devm_of_dp_aux_populate_bus(struct drm_dp_aux *aux,
+ int (*done_probing)(struct drm_dp_aux *aux));
+
+/* Deprecated versions of the above functions. To be removed when no callers. */
+static inline int of_dp_aux_populate_ep_devices(struct drm_dp_aux *aux)
+{
+ int ret;
+
+ ret = of_dp_aux_populate_bus(aux, NULL);
+
+ /* New API returns -ENODEV for no child case; adapt to old assumption */
+ return (ret != -ENODEV) ? ret : 0;
+}
+
+static inline int devm_of_dp_aux_populate_ep_devices(struct drm_dp_aux *aux)
+{
+ int ret;
+
+ ret = devm_of_dp_aux_populate_bus(aux, NULL);
+
+ /* New API returns -ENODEV for no child case; adapt to old assumption */
+ return (ret != -ENODEV) ? ret : 0;
+}
+
+static inline void of_dp_aux_depopulate_ep_devices(struct drm_dp_aux *aux)
+{
+ of_dp_aux_depopulate_bus(aux);
+}
+
+#define dp_aux_dp_driver_register(aux_ep_drv) \
+ __dp_aux_dp_driver_register(aux_ep_drv, THIS_MODULE)
+int __dp_aux_dp_driver_register(struct dp_aux_ep_driver *aux_ep_drv,
+ struct module *owner);
+void dp_aux_dp_driver_unregister(struct dp_aux_ep_driver *aux_ep_drv);
+
+#endif /* _DP_AUX_BUS_H_ */
diff --git a/include/drm/drm_dp_dual_mode_helper.h b/include/drm/display/drm_dp_dual_mode_helper.h
index 4c42db81fcb4..7ac6969db935 100644
--- a/include/drm/drm_dp_dual_mode_helper.h
+++ b/include/drm/display/drm_dp_dual_mode_helper.h
@@ -62,6 +62,7 @@
#define DP_DUAL_MODE_LSPCON_CURRENT_MODE 0x41
#define DP_DUAL_MODE_LSPCON_MODE_PCON 0x1
+struct drm_device;
struct i2c_adapter;
ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter,
@@ -103,17 +104,18 @@ enum drm_dp_dual_mode_type {
DRM_DP_DUAL_MODE_LSPCON,
};
-enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter);
-int drm_dp_dual_mode_max_tmds_clock(enum drm_dp_dual_mode_type type,
+enum drm_dp_dual_mode_type
+drm_dp_dual_mode_detect(const struct drm_device *dev, struct i2c_adapter *adapter);
+int drm_dp_dual_mode_max_tmds_clock(const struct drm_device *dev, enum drm_dp_dual_mode_type type,
struct i2c_adapter *adapter);
-int drm_dp_dual_mode_get_tmds_output(enum drm_dp_dual_mode_type type,
+int drm_dp_dual_mode_get_tmds_output(const struct drm_device *dev, enum drm_dp_dual_mode_type type,
struct i2c_adapter *adapter, bool *enabled);
-int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
+int drm_dp_dual_mode_set_tmds_output(const struct drm_device *dev, enum drm_dp_dual_mode_type type,
struct i2c_adapter *adapter, bool enable);
const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type);
-int drm_lspcon_get_mode(struct i2c_adapter *adapter,
+int drm_lspcon_get_mode(const struct drm_device *dev, struct i2c_adapter *adapter,
enum drm_lspcon_mode *current_mode);
-int drm_lspcon_set_mode(struct i2c_adapter *adapter,
- enum drm_lspcon_mode reqd_mode);
+int drm_lspcon_set_mode(const struct drm_device *dev, struct i2c_adapter *adapter,
+ enum drm_lspcon_mode reqd_mode, int time_out);
#endif
diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h
new file mode 100644
index 000000000000..df2f24b950e4
--- /dev/null
+++ b/include/drm/display/drm_dp_helper.h
@@ -0,0 +1,1013 @@
+/*
+ * Copyright © 2008 Keith Packard
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#ifndef _DRM_DP_HELPER_H_
+#define _DRM_DP_HELPER_H_
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+
+#include <drm/display/drm_dp.h>
+#include <drm/drm_connector.h>
+
+struct drm_device;
+struct drm_dp_aux;
+struct drm_panel;
+
+bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count);
+bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count);
+bool drm_dp_post_lt_adj_req_in_progress(const u8 link_status[DP_LINK_STATUS_SIZE]);
+u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane);
+u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane);
+u8 drm_dp_get_adjust_tx_ffe_preset(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane);
+
+int drm_dp_read_clock_recovery_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ enum drm_dp_phy dp_phy, bool uhbr);
+int drm_dp_read_channel_eq_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ enum drm_dp_phy dp_phy, bool uhbr);
+
+void drm_dp_link_train_clock_recovery_delay(const struct drm_dp_aux *aux,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+void drm_dp_lttpr_link_train_clock_recovery_delay(void);
+void drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+void drm_dp_lttpr_link_train_channel_eq_delay(const struct drm_dp_aux *aux,
+ const u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
+
+int drm_dp_128b132b_read_aux_rd_interval(struct drm_dp_aux *aux);
+bool drm_dp_128b132b_lane_channel_eq_done(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count);
+bool drm_dp_128b132b_lane_symbol_locked(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane_count);
+bool drm_dp_128b132b_eq_interlane_align_done(const u8 link_status[DP_LINK_STATUS_SIZE]);
+bool drm_dp_128b132b_cds_interlane_align_done(const u8 link_status[DP_LINK_STATUS_SIZE]);
+bool drm_dp_128b132b_link_training_failed(const u8 link_status[DP_LINK_STATUS_SIZE]);
+
+u8 drm_dp_link_rate_to_bw_code(int link_rate);
+int drm_dp_bw_code_to_link_rate(u8 link_bw);
+
+const char *drm_dp_phy_name(enum drm_dp_phy dp_phy);
+
+/**
+ * struct drm_dp_vsc_sdp - drm DP VSC SDP
+ *
+ * This structure represents a DP VSC SDP of drm
+ * It is based on DP 1.4 spec [Table 2-116: VSC SDP Header Bytes] and
+ * [Table 2-117: VSC SDP Payload for DB16 through DB18]
+ *
+ * @sdp_type: secondary-data packet type
+ * @revision: revision number
+ * @length: number of valid data bytes
+ * @pixelformat: pixel encoding format
+ * @colorimetry: colorimetry format
+ * @bpc: bit per color
+ * @dynamic_range: dynamic range information
+ * @content_type: CTA-861-G defines content types and expected processing by a sink device
+ */
+struct drm_dp_vsc_sdp {
+ unsigned char sdp_type;
+ unsigned char revision;
+ unsigned char length;
+ enum dp_pixelformat pixelformat;
+ enum dp_colorimetry colorimetry;
+ int bpc;
+ enum dp_dynamic_range dynamic_range;
+ enum dp_content_type content_type;
+};
+
+/**
+ * struct drm_dp_as_sdp - drm DP Adaptive Sync SDP
+ *
+ * This structure represents a DP AS SDP of drm
+ * It is based on DP 2.1 spec [Table 2-126: Adaptive-Sync SDP Header Bytes] and
+ * [Table 2-127: Adaptive-Sync SDP Payload for DB0 through DB8]
+ *
+ * @sdp_type: Secondary-data packet type
+ * @revision: Revision Number
+ * @length: Number of valid data bytes
+ * @vtotal: Minimum Vertical Vtotal
+ * @target_rr: Target Refresh
+ * @duration_incr_ms: Successive frame duration increase
+ * @duration_decr_ms: Successive frame duration decrease
+ * @target_rr_divider: Target refresh rate divider
+ * @mode: Adaptive Sync Operation Mode
+ */
+struct drm_dp_as_sdp {
+ unsigned char sdp_type;
+ unsigned char revision;
+ unsigned char length;
+ int vtotal;
+ int target_rr;
+ int duration_incr_ms;
+ int duration_decr_ms;
+ bool target_rr_divider;
+ enum operation_mode mode;
+};
+
+void drm_dp_as_sdp_log(struct drm_printer *p,
+ const struct drm_dp_as_sdp *as_sdp);
+void drm_dp_vsc_sdp_log(struct drm_printer *p, const struct drm_dp_vsc_sdp *vsc);
+
+bool drm_dp_vsc_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+bool drm_dp_as_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+
+int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE]);
+
+static inline int
+drm_dp_max_link_rate(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]);
+}
+
+static inline u8
+drm_dp_max_lane_count(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
+}
+
+static inline bool
+drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DPCD_REV] >= 0x11 &&
+ (dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP);
+}
+
+static inline bool
+drm_dp_post_lt_adj_req_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DPCD_REV] >= 0x13 &&
+ (dpcd[DP_MAX_LANE_COUNT] & DP_POST_LT_ADJ_REQ_SUPPORTED);
+}
+
+static inline bool
+drm_dp_fast_training_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DPCD_REV] >= 0x11 &&
+ (dpcd[DP_MAX_DOWNSPREAD] & DP_NO_AUX_HANDSHAKE_LINK_TRAINING);
+}
+
+static inline bool
+drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DPCD_REV] >= 0x12 &&
+ dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED;
+}
+
+static inline bool
+drm_dp_max_downspread(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DPCD_REV] >= 0x11 ||
+ dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5;
+}
+
+static inline bool
+drm_dp_tps4_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DPCD_REV] >= 0x14 &&
+ dpcd[DP_MAX_DOWNSPREAD] & DP_TPS4_SUPPORTED;
+}
+
+static inline u8
+drm_dp_training_pattern_mask(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return (dpcd[DP_DPCD_REV] >= 0x14) ? DP_TRAINING_PATTERN_MASK_1_4 :
+ DP_TRAINING_PATTERN_MASK;
+}
+
+static inline bool
+drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT;
+}
+
+/* DP/eDP DSC support */
+u8 drm_dp_dsc_sink_bpp_incr(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]);
+u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
+ bool is_edp);
+u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]);
+int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpc[DP_DSC_RECEIVER_CAP_SIZE],
+ u8 dsc_bpc[3]);
+int drm_dp_dsc_sink_max_slice_throughput(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
+ int peak_pixel_rate, bool is_rgb_yuv444);
+int drm_dp_dsc_branch_max_overall_throughput(const u8 dsc_branch_dpcd[DP_DSC_BRANCH_CAP_SIZE],
+ bool is_rgb_yuv444);
+int drm_dp_dsc_branch_max_line_width(const u8 dsc_branch_dpcd[DP_DSC_BRANCH_CAP_SIZE]);
+
+static inline bool
+drm_dp_sink_supports_dsc(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+ return dsc_dpcd[DP_DSC_SUPPORT - DP_DSC_SUPPORT] &
+ DP_DSC_DECOMPRESSION_IS_SUPPORTED;
+}
+
+static inline u16
+drm_edp_dsc_sink_output_bpp(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+ return dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_LOW - DP_DSC_SUPPORT] |
+ ((dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_HI - DP_DSC_SUPPORT] &
+ DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK) << 8);
+}
+
+static inline u32
+drm_dp_dsc_sink_max_slice_width(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+ /* Max Slicewidth = Number of Pixels * 320 */
+ return dsc_dpcd[DP_DSC_MAX_SLICE_WIDTH - DP_DSC_SUPPORT] *
+ DP_DSC_SLICE_WIDTH_MULTIPLIER;
+}
+
+/**
+ * drm_dp_dsc_sink_supports_format() - check if sink supports DSC with given output format
+ * @dsc_dpcd : DSC-capability DPCDs of the sink
+ * @output_format: output_format which is to be checked
+ *
+ * Returns true if the sink supports DSC with the given output_format, false otherwise.
+ */
+static inline bool
+drm_dp_dsc_sink_supports_format(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE], u8 output_format)
+{
+ return dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & output_format;
+}
+
+/* Forward Error Correction Support on DP 1.4 */
+static inline bool
+drm_dp_sink_supports_fec(const u8 fec_capable)
+{
+ return fec_capable & DP_FEC_CAPABLE;
+}
+
+static inline bool
+drm_dp_channel_coding_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_8B10B;
+}
+
+static inline bool
+drm_dp_128b132b_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_128B132B;
+}
+
+static inline bool
+drm_dp_alternate_scrambler_reset_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_EDP_CONFIGURATION_CAP] &
+ DP_ALTERNATE_SCRAMBLER_RESET_CAP;
+}
+
+/* Ignore MSA timing for Adaptive Sync support on DP 1.4 */
+static inline bool
+drm_dp_sink_can_do_video_without_timing_msa(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DOWN_STREAM_PORT_COUNT] &
+ DP_MSA_TIMING_PAR_IGNORED;
+}
+
+/**
+ * drm_edp_backlight_supported() - Check an eDP DPCD for VESA backlight support
+ * @edp_dpcd: The DPCD to check
+ *
+ * Note that currently this function will return %false for panels which support various DPCD
+ * backlight features but which require the brightness be set through PWM, and don't support setting
+ * the brightness level via the DPCD.
+ *
+ * Returns: %True if @edp_dpcd indicates that VESA backlight controls are supported, %false
+ * otherwise
+ */
+static inline bool
+drm_edp_backlight_supported(const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE])
+{
+ return !!(edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP);
+}
+
+/**
+ * drm_dp_is_uhbr_rate - Determine if a link rate is UHBR
+ * @link_rate: link rate in 10kbits/s units
+ *
+ * Determine if the provided link rate is an UHBR rate.
+ *
+ * Returns: %True if @link_rate is an UHBR rate.
+ */
+static inline bool drm_dp_is_uhbr_rate(int link_rate)
+{
+ return link_rate >= 1000000;
+}
+
+/*
+ * DisplayPort AUX channel
+ */
+
+/**
+ * struct drm_dp_aux_msg - DisplayPort AUX channel transaction
+ * @address: address of the (first) register to access
+ * @request: contains the type of transaction (see DP_AUX_* macros)
+ * @reply: upon completion, contains the reply type of the transaction
+ * @buffer: pointer to a transmission or reception buffer
+ * @size: size of @buffer
+ */
+struct drm_dp_aux_msg {
+ unsigned int address;
+ u8 request;
+ u8 reply;
+ void *buffer;
+ size_t size;
+};
+
+struct cec_adapter;
+struct drm_connector;
+struct drm_edid;
+
+/**
+ * struct drm_dp_aux_cec - DisplayPort CEC-Tunneling-over-AUX
+ * @lock: mutex protecting this struct
+ * @adap: the CEC adapter for CEC-Tunneling-over-AUX support.
+ * @connector: the connector this CEC adapter is associated with
+ * @unregister_work: unregister the CEC adapter
+ */
+struct drm_dp_aux_cec {
+ struct mutex lock;
+ struct cec_adapter *adap;
+ struct drm_connector *connector;
+ struct delayed_work unregister_work;
+};
+
+/**
+ * struct drm_dp_aux - DisplayPort AUX channel
+ *
+ * An AUX channel can also be used to transport I2C messages to a sink. A
+ * typical application of that is to access an EDID that's present in the sink
+ * device. The @transfer() function can also be used to execute such
+ * transactions. The drm_dp_aux_register() function registers an I2C adapter
+ * that can be passed to drm_probe_ddc(). Upon removal, drivers should call
+ * drm_dp_aux_unregister() to remove the I2C adapter. The I2C adapter uses long
+ * transfers by default; if a partial response is received, the adapter will
+ * drop down to the size given by the partial response for this transaction
+ * only.
+ */
+struct drm_dp_aux {
+ /**
+ * @name: user-visible name of this AUX channel and the
+ * I2C-over-AUX adapter.
+ *
+ * It's also used to specify the name of the I2C adapter. If set
+ * to %NULL, dev_name() of @dev will be used.
+ */
+ const char *name;
+
+ /**
+ * @ddc: I2C adapter that can be used for I2C-over-AUX
+ * communication
+ */
+ struct i2c_adapter ddc;
+
+ /**
+ * @dev: pointer to struct device that is the parent for this
+ * AUX channel.
+ */
+ struct device *dev;
+
+ /**
+ * @drm_dev: pointer to the &drm_device that owns this AUX channel.
+ * Beware, this may be %NULL before drm_dp_aux_register() has been
+ * called.
+ *
+ * It should be set to the &drm_device that will be using this AUX
+ * channel as early as possible. For many graphics drivers this should
+ * happen before drm_dp_aux_init(), however it's perfectly fine to set
+ * this field later so long as it's assigned before calling
+ * drm_dp_aux_register().
+ */
+ struct drm_device *drm_dev;
+
+ /**
+ * @crtc: backpointer to the crtc that is currently using this
+ * AUX channel
+ */
+ struct drm_crtc *crtc;
+
+ /**
+ * @hw_mutex: internal mutex used for locking transfers.
+ *
+ * Note that if the underlying hardware is shared among multiple
+ * channels, the driver needs to do additional locking to
+ * prevent concurrent access.
+ */
+ struct mutex hw_mutex;
+
+ /**
+ * @crc_work: worker that captures CRCs for each frame
+ */
+ struct work_struct crc_work;
+
+ /**
+ * @crc_count: counter of captured frame CRCs
+ */
+ u8 crc_count;
+
+ /**
+ * @transfer: transfers a message representing a single AUX
+ * transaction.
+ *
+ * This is a hardware-specific implementation of how
+ * transactions are executed that the drivers must provide.
+ *
+ * A pointer to a &drm_dp_aux_msg structure describing the
+ * transaction is passed into this function. Upon success, the
+ * implementation should return the number of payload bytes that
+ * were transferred, or a negative error-code on failure.
+ *
+ * Helpers will propagate these errors, with the exception of
+ * the %-EBUSY error, which causes a transaction to be retried.
+ * On a short, helpers will return %-EPROTO to make it simpler
+ * to check for failure.
+ *
+ * The @transfer() function must only modify the reply field of
+ * the &drm_dp_aux_msg structure. The retry logic and i2c
+ * helpers assume this is the case.
+ *
+ * Also note that this callback can be called no matter the
+ * state @dev is in and also no matter what state the panel is
+ * in. It's expected:
+ *
+ * - If the @dev providing the AUX bus is currently unpowered then
+ * it will power itself up for the transfer.
+ *
+ * - If we're on eDP (using a drm_panel) and the panel is not in a
+ * state where it can respond (it's not powered or it's in a
+ * low power state) then this function may return an error, but
+ * not crash. It's up to the caller of this code to make sure that
+ * the panel is powered on if getting an error back is not OK. If a
+ * drm_panel driver is initiating a DP AUX transfer it may power
+ * itself up however it wants. All other code should ensure that
+ * the pre_enable() bridge chain (which eventually calls the
+ * drm_panel prepare function) has powered the panel.
+ */
+ ssize_t (*transfer)(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg);
+
+ /**
+ * @wait_hpd_asserted: wait for HPD to be asserted
+ *
+ * This is mainly useful for eDP panels drivers to wait for an eDP
+ * panel to finish powering on. It is optional for DP AUX controllers
+ * to implement this function. It is required for DP AUX endpoints
+ * (panel drivers) to call this function after powering up but before
+ * doing AUX transfers unless the DP AUX endpoint driver knows that
+ * we're not using the AUX controller's HPD. One example of the panel
+ * driver not needing to call this is if HPD is hooked up to a GPIO
+ * that the panel driver can read directly.
+ *
+ * If a DP AUX controller does not implement this function then it
+ * may still support eDP panels that use the AUX controller's built-in
+ * HPD signal by implementing a long wait for HPD in the transfer()
+ * callback, though this is deprecated.
+ *
+ * This function will efficiently wait for the HPD signal to be
+ * asserted. The `wait_us` parameter that is passed in says that we
+ * know that the HPD signal is expected to be asserted within `wait_us`
+ * microseconds. This function could wait for longer than `wait_us` if
+ * the logic in the DP controller has a long debouncing time. The
+ * important thing is that if this function returns success that the
+ * DP controller is ready to send AUX transactions.
+ *
+ * This function returns 0 if HPD was asserted or -ETIMEDOUT if time
+ * expired and HPD wasn't asserted. This function should not print
+ * timeout errors to the log.
+ *
+ * The semantics of this function are designed to match the
+ * readx_poll_timeout() function. That means a `wait_us` of 0 means
+ * to wait forever. Like readx_poll_timeout(), this function may sleep.
+ *
+ * NOTE: this function specifically reports the state of the HPD pin
+ * that's associated with the DP AUX channel. This is different from
+ * the HPD concept in much of the rest of DRM which is more about
+ * physical presence of a display. For eDP, for instance, a display is
+ * assumed always present even if the HPD pin is deasserted.
+ */
+ int (*wait_hpd_asserted)(struct drm_dp_aux *aux, unsigned long wait_us);
+
+ /**
+ * @i2c_nack_count: Counts I2C NACKs, used for DP validation.
+ */
+ unsigned i2c_nack_count;
+ /**
+ * @i2c_defer_count: Counts I2C DEFERs, used for DP validation.
+ */
+ unsigned i2c_defer_count;
+ /**
+ * @cec: struct containing fields used for CEC-Tunneling-over-AUX.
+ */
+ struct drm_dp_aux_cec cec;
+ /**
+ * @is_remote: Is this AUX CH actually using sideband messaging.
+ */
+ bool is_remote;
+
+ /**
+ * @powered_down: If true then the remote endpoint is powered down.
+ */
+ bool powered_down;
+
+ /**
+ * @no_zero_sized: If the hw can't use zero sized transfers (NVIDIA)
+ */
+ bool no_zero_sized;
+
+ /**
+ * @dpcd_probe_disabled: If probing before a DPCD access is disabled.
+ */
+ bool dpcd_probe_disabled;
+};
+
+int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset);
+void drm_dp_dpcd_set_powered(struct drm_dp_aux *aux, bool powered);
+void drm_dp_dpcd_set_probe(struct drm_dp_aux *aux, bool enable);
+ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
+ void *buffer, size_t size);
+ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
+ void *buffer, size_t size);
+
+/**
+ * drm_dp_dpcd_read_data() - read a series of bytes from the DPCD
+ * @aux: DisplayPort AUX channel (SST or MST)
+ * @offset: address of the (first) register to read
+ * @buffer: buffer to store the register values
+ * @size: number of bytes in @buffer
+ *
+ * Returns zero (0) on success, or a negative error
+ * code on failure. -EIO is returned if the request was NAKed by the sink or
+ * if the retry count was exceeded. If not all bytes were transferred, this
+ * function returns -EPROTO. Errors from the underlying AUX channel transfer
+ * function, with the exception of -EBUSY (which causes the transaction to
+ * be retried), are propagated to the caller.
+ */
+static inline int drm_dp_dpcd_read_data(struct drm_dp_aux *aux,
+ unsigned int offset,
+ void *buffer, size_t size)
+{
+ int ret;
+
+ ret = drm_dp_dpcd_read(aux, offset, buffer, size);
+ if (ret < 0)
+ return ret;
+ if (ret < size)
+ return -EPROTO;
+
+ return 0;
+}
+
+/**
+ * drm_dp_dpcd_write_data() - write a series of bytes to the DPCD
+ * @aux: DisplayPort AUX channel (SST or MST)
+ * @offset: address of the (first) register to write
+ * @buffer: buffer containing the values to write
+ * @size: number of bytes in @buffer
+ *
+ * Returns zero (0) on success, or a negative error
+ * code on failure. -EIO is returned if the request was NAKed by the sink or
+ * if the retry count was exceeded. If not all bytes were transferred, this
+ * function returns -EPROTO. Errors from the underlying AUX channel transfer
+ * function, with the exception of -EBUSY (which causes the transaction to
+ * be retried), are propagated to the caller.
+ */
+static inline int drm_dp_dpcd_write_data(struct drm_dp_aux *aux,
+ unsigned int offset,
+ void *buffer, size_t size)
+{
+ int ret;
+
+ ret = drm_dp_dpcd_write(aux, offset, buffer, size);
+ if (ret < 0)
+ return ret;
+ if (ret < size)
+ return -EPROTO;
+
+ return 0;
+}
+
+/**
+ * drm_dp_dpcd_readb() - read a single byte from the DPCD
+ * @aux: DisplayPort AUX channel
+ * @offset: address of the register to read
+ * @valuep: location where the value of the register will be stored
+ *
+ * Returns the number of bytes transferred (1) on success, or a negative
+ * error code on failure. In most of the cases you should be using
+ * drm_dp_dpcd_read_byte() instead.
+ */
+static inline ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux,
+ unsigned int offset, u8 *valuep)
+{
+ return drm_dp_dpcd_read(aux, offset, valuep, 1);
+}
+
+/**
+ * drm_dp_dpcd_writeb() - write a single byte to the DPCD
+ * @aux: DisplayPort AUX channel
+ * @offset: address of the register to write
+ * @value: value to write to the register
+ *
+ * Returns the number of bytes transferred (1) on success, or a negative
+ * error code on failure. In most of the cases you should be using
+ * drm_dp_dpcd_write_byte() instead.
+ */
+static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux,
+ unsigned int offset, u8 value)
+{
+ return drm_dp_dpcd_write(aux, offset, &value, 1);
+}
+
+/**
+ * drm_dp_dpcd_read_byte() - read a single byte from the DPCD
+ * @aux: DisplayPort AUX channel
+ * @offset: address of the register to read
+ * @valuep: location where the value of the register will be stored
+ *
+ * Returns zero (0) on success, or a negative error code on failure.
+ */
+static inline int drm_dp_dpcd_read_byte(struct drm_dp_aux *aux,
+ unsigned int offset, u8 *valuep)
+{
+ return drm_dp_dpcd_read_data(aux, offset, valuep, 1);
+}
+
+/**
+ * drm_dp_dpcd_write_byte() - write a single byte to the DPCD
+ * @aux: DisplayPort AUX channel
+ * @offset: address of the register to write
+ * @value: value to write to the register
+ *
+ * Returns zero (0) on success, or a negative error code on failure.
+ */
+static inline int drm_dp_dpcd_write_byte(struct drm_dp_aux *aux,
+ unsigned int offset, u8 value)
+{
+ return drm_dp_dpcd_write_data(aux, offset, &value, 1);
+}
+
+int drm_dp_read_dpcd_caps(struct drm_dp_aux *aux,
+ u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+
+int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
+ u8 status[DP_LINK_STATUS_SIZE]);
+
+int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux,
+ enum drm_dp_phy dp_phy,
+ u8 link_status[DP_LINK_STATUS_SIZE]);
+int drm_dp_link_power_up(struct drm_dp_aux *aux, unsigned char revision);
+int drm_dp_link_power_down(struct drm_dp_aux *aux, unsigned char revision);
+
+int drm_dp_dpcd_write_payload(struct drm_dp_aux *aux,
+ int vcpid, u8 start_time_slot, u8 time_slot_count);
+int drm_dp_dpcd_clear_payload(struct drm_dp_aux *aux);
+int drm_dp_dpcd_poll_act_handled(struct drm_dp_aux *aux, int timeout_ms);
+
+bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
+ u8 real_edid_checksum);
+
+int drm_dp_read_downstream_info(struct drm_dp_aux *aux,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS]);
+bool drm_dp_downstream_is_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4], u8 type);
+bool drm_dp_downstream_is_tmds(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4],
+ const struct drm_edid *drm_edid);
+int drm_dp_downstream_max_dotclock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4]);
+int drm_dp_downstream_max_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4],
+ const struct drm_edid *drm_edid);
+int drm_dp_downstream_min_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4],
+ const struct drm_edid *drm_edid);
+int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4],
+ const struct drm_edid *drm_edid);
+bool drm_dp_downstream_420_passthrough(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4]);
+bool drm_dp_downstream_444_to_420_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4]);
+struct drm_display_mode *drm_dp_downstream_mode(struct drm_device *dev,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4]);
+int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]);
+void drm_dp_downstream_debug(struct seq_file *m,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4],
+ const struct drm_edid *drm_edid,
+ struct drm_dp_aux *aux);
+enum drm_mode_subconnector
+drm_dp_subconnector_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4]);
+void drm_dp_set_subconnector_property(struct drm_connector *connector,
+ enum drm_connector_status status,
+ const u8 *dpcd,
+ const u8 port_cap[4]);
+
+struct drm_dp_desc;
+bool drm_dp_read_sink_count_cap(struct drm_connector *connector,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const struct drm_dp_desc *desc);
+int drm_dp_read_sink_count(struct drm_dp_aux *aux);
+
+int drm_dp_read_lttpr_common_caps(struct drm_dp_aux *aux,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ u8 caps[DP_LTTPR_COMMON_CAP_SIZE]);
+int drm_dp_read_lttpr_phy_caps(struct drm_dp_aux *aux,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ enum drm_dp_phy dp_phy,
+ u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
+int drm_dp_lttpr_count(const u8 cap[DP_LTTPR_COMMON_CAP_SIZE]);
+int drm_dp_lttpr_max_link_rate(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE]);
+int drm_dp_lttpr_set_transparent_mode(struct drm_dp_aux *aux, bool enable);
+int drm_dp_lttpr_init(struct drm_dp_aux *aux, int lttpr_count);
+int drm_dp_lttpr_max_lane_count(const u8 caps[DP_LTTPR_COMMON_CAP_SIZE]);
+bool drm_dp_lttpr_voltage_swing_level_3_supported(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
+bool drm_dp_lttpr_pre_emphasis_level_3_supported(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
+void drm_dp_lttpr_wake_timeout_setup(struct drm_dp_aux *aux, bool transparent_mode);
+
+void drm_dp_remote_aux_init(struct drm_dp_aux *aux);
+void drm_dp_aux_init(struct drm_dp_aux *aux);
+int drm_dp_aux_register(struct drm_dp_aux *aux);
+void drm_dp_aux_unregister(struct drm_dp_aux *aux);
+
+int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc);
+int drm_dp_stop_crc(struct drm_dp_aux *aux);
+
+struct drm_dp_dpcd_ident {
+ u8 oui[3];
+ u8 device_id[6];
+ u8 hw_rev;
+ u8 sw_major_rev;
+ u8 sw_minor_rev;
+} __packed;
+
+/**
+ * struct drm_dp_desc - DP branch/sink device descriptor
+ * @ident: DP device identification from DPCD 0x400 (sink) or 0x500 (branch).
+ * @quirks: Quirks; use drm_dp_has_quirk() to query for the quirks.
+ */
+struct drm_dp_desc {
+ struct drm_dp_dpcd_ident ident;
+ u32 quirks;
+};
+
+int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
+ bool is_branch);
+
+int drm_dp_dump_lttpr_desc(struct drm_dp_aux *aux, enum drm_dp_phy dp_phy);
+
+/**
+ * enum drm_dp_quirk - Display Port sink/branch device specific quirks
+ *
+ * Display Port sink and branch devices in the wild have a variety of bugs, try
+ * to collect them here. The quirks are shared, but it's up to the drivers to
+ * implement workarounds for them.
+ */
+enum drm_dp_quirk {
+ /**
+ * @DP_DPCD_QUIRK_CONSTANT_N:
+ *
+ * The device requires main link attributes Mvid and Nvid to be limited
+ * to 16 bits. So will give a constant value (0x8000) for compatability.
+ */
+ DP_DPCD_QUIRK_CONSTANT_N,
+ /**
+ * @DP_DPCD_QUIRK_NO_PSR:
+ *
+ * The device does not support PSR even if reports that it supports or
+ * driver still need to implement proper handling for such device.
+ */
+ DP_DPCD_QUIRK_NO_PSR,
+ /**
+ * @DP_DPCD_QUIRK_NO_SINK_COUNT:
+ *
+ * The device does not set SINK_COUNT to a non-zero value.
+ * The driver should ignore SINK_COUNT during detection. Note that
+ * drm_dp_read_sink_count_cap() automatically checks for this quirk.
+ */
+ DP_DPCD_QUIRK_NO_SINK_COUNT,
+ /**
+ * @DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD:
+ *
+ * The device supports MST DSC despite not supporting Virtual DPCD.
+ * The DSC caps can be read from the physical aux instead.
+ */
+ DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD,
+ /**
+ * @DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS:
+ *
+ * The device supports a link rate of 3.24 Gbps (multiplier 0xc) despite
+ * the DP_MAX_LINK_RATE register reporting a lower max multiplier.
+ */
+ DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS,
+ /**
+ * @DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC:
+ *
+ * The device applies HBLANK expansion for some modes, but this
+ * requires enabling DSC.
+ */
+ DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC,
+ /**
+ * @DP_DPCD_QUIRK_DSC_THROUGHPUT_BPP_LIMIT:
+ *
+ * The device doesn't support DSC decompression at the maximum DSC
+ * pixel throughput and compressed bpp it indicates via its DPCD DSC
+ * capabilities. The compressed bpp must be limited above a device
+ * specific DSC pixel throughput.
+ */
+ DP_DPCD_QUIRK_DSC_THROUGHPUT_BPP_LIMIT,
+};
+
+/**
+ * drm_dp_has_quirk() - does the DP device have a specific quirk
+ * @desc: Device descriptor filled by drm_dp_read_desc()
+ * @quirk: Quirk to query for
+ *
+ * Return true if DP device identified by @desc has @quirk.
+ */
+static inline bool
+drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk)
+{
+ return desc->quirks & BIT(quirk);
+}
+
+/**
+ * struct drm_edp_backlight_info - Probed eDP backlight info struct
+ * @pwmgen_bit_count: The pwmgen bit count
+ * @pwm_freq_pre_divider: The PWM frequency pre-divider value being used for this backlight, if any
+ * @max: The maximum backlight level that may be set
+ * @lsb_reg_used: Do we also write values to the DP_EDP_BACKLIGHT_BRIGHTNESS_LSB register?
+ * @aux_enable: Does the panel support the AUX enable cap?
+ * @aux_set: Does the panel support setting the brightness through AUX?
+ * @luminance_set: Does the panel support setting the brightness through AUX using luminance values?
+ *
+ * This structure contains various data about an eDP backlight, which can be populated by using
+ * drm_edp_backlight_init().
+ */
+struct drm_edp_backlight_info {
+ u8 pwmgen_bit_count;
+ u8 pwm_freq_pre_divider;
+ u32 max;
+
+ bool lsb_reg_used : 1;
+ bool aux_enable : 1;
+ bool aux_set : 1;
+ bool luminance_set : 1;
+};
+
+int
+drm_edp_backlight_init(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl,
+ u32 max_luminance,
+ u16 driver_pwm_freq_hz, const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE],
+ u32 *current_level, u8 *current_mode, bool need_luminance);
+int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl,
+ u32 level);
+int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl,
+ u32 level);
+int drm_edp_backlight_disable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl);
+
+#if IS_ENABLED(CONFIG_DRM_KMS_HELPER) && (IS_BUILTIN(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
+ (IS_MODULE(CONFIG_DRM_KMS_HELPER) && IS_MODULE(CONFIG_BACKLIGHT_CLASS_DEVICE)))
+
+int drm_panel_dp_aux_backlight(struct drm_panel *panel, struct drm_dp_aux *aux);
+
+#else
+
+static inline int drm_panel_dp_aux_backlight(struct drm_panel *panel,
+ struct drm_dp_aux *aux)
+{
+ return 0;
+}
+
+#endif
+
+#ifdef CONFIG_DRM_DISPLAY_DP_AUX_CEC
+void drm_dp_cec_irq(struct drm_dp_aux *aux);
+void drm_dp_cec_register_connector(struct drm_dp_aux *aux,
+ struct drm_connector *connector);
+void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux);
+void drm_dp_cec_attach(struct drm_dp_aux *aux, u16 source_physical_address);
+void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid);
+void drm_dp_cec_unset_edid(struct drm_dp_aux *aux);
+#else
+static inline void drm_dp_cec_irq(struct drm_dp_aux *aux)
+{
+}
+
+static inline void
+drm_dp_cec_register_connector(struct drm_dp_aux *aux,
+ struct drm_connector *connector)
+{
+}
+
+static inline void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux)
+{
+}
+
+static inline void drm_dp_cec_attach(struct drm_dp_aux *aux,
+ u16 source_physical_address)
+{
+}
+
+static inline void drm_dp_cec_set_edid(struct drm_dp_aux *aux,
+ const struct edid *edid)
+{
+}
+
+static inline void drm_dp_cec_unset_edid(struct drm_dp_aux *aux)
+{
+}
+
+#endif
+
+/**
+ * struct drm_dp_phy_test_params - DP Phy Compliance parameters
+ * @link_rate: Requested Link rate from DPCD 0x219
+ * @num_lanes: Number of lanes requested by sing through DPCD 0x220
+ * @phy_pattern: DP Phy test pattern from DPCD 0x248
+ * @hbr2_reset: DP HBR2_COMPLIANCE_SCRAMBLER_RESET from DCPD 0x24A and 0x24B
+ * @custom80: DP Test_80BIT_CUSTOM_PATTERN from DPCDs 0x250 through 0x259
+ * @enhanced_frame_cap: flag for enhanced frame capability.
+ */
+struct drm_dp_phy_test_params {
+ int link_rate;
+ u8 num_lanes;
+ u8 phy_pattern;
+ u8 hbr2_reset[2];
+ u8 custom80[10];
+ bool enhanced_frame_cap;
+};
+
+int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux,
+ struct drm_dp_phy_test_params *data);
+int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux,
+ struct drm_dp_phy_test_params *data, u8 dp_rev);
+int drm_dp_get_pcon_max_frl_bw(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4]);
+int drm_dp_pcon_frl_prepare(struct drm_dp_aux *aux, bool enable_frl_ready_hpd);
+bool drm_dp_pcon_is_frl_ready(struct drm_dp_aux *aux);
+int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps,
+ u8 frl_mode);
+int drm_dp_pcon_frl_configure_2(struct drm_dp_aux *aux, int max_frl_mask,
+ u8 frl_type);
+int drm_dp_pcon_reset_frl_config(struct drm_dp_aux *aux);
+int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux);
+
+bool drm_dp_pcon_hdmi_link_active(struct drm_dp_aux *aux);
+int drm_dp_pcon_hdmi_link_mode(struct drm_dp_aux *aux, u8 *frl_trained_mask);
+void drm_dp_pcon_hdmi_frl_link_error_count(struct drm_dp_aux *aux,
+ struct drm_connector *connector);
+bool drm_dp_pcon_enc_is_dsc_1_2(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]);
+int drm_dp_pcon_dsc_max_slices(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]);
+int drm_dp_pcon_dsc_max_slice_width(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]);
+int drm_dp_pcon_dsc_bpp_incr(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]);
+int drm_dp_pcon_pps_default(struct drm_dp_aux *aux);
+int drm_dp_pcon_pps_override_buf(struct drm_dp_aux *aux, u8 pps_buf[128]);
+int drm_dp_pcon_pps_override_param(struct drm_dp_aux *aux, u8 pps_param[6]);
+bool drm_dp_downstream_rgb_to_ycbcr_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ const u8 port_cap[4], u8 color_spc);
+int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc);
+
+#define DRM_DP_BW_OVERHEAD_MST BIT(0)
+#define DRM_DP_BW_OVERHEAD_UHBR BIT(1)
+#define DRM_DP_BW_OVERHEAD_SSC_REF_CLK BIT(2)
+#define DRM_DP_BW_OVERHEAD_FEC BIT(3)
+#define DRM_DP_BW_OVERHEAD_DSC BIT(4)
+
+int drm_dp_bw_overhead(int lane_count, int hactive,
+ int dsc_slice_count,
+ int bpp_x16, unsigned long flags);
+int drm_dp_bw_channel_coding_efficiency(bool is_uhbr);
+int drm_dp_max_dprx_data_rate(int max_link_rate, int max_lanes);
+
+ssize_t drm_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc, struct dp_sdp *sdp);
+int drm_dp_link_symbol_cycles(int lane_count, int pixels, int dsc_slice_count,
+ int bpp_x16, int symbol_size, bool is_mst);
+
+#endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h
index bd1c39907b92..2cfe1d4bfc96 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/display/drm_dp_mst_helper.h
@@ -23,8 +23,9 @@
#define _DRM_DP_MST_HELPER_H_
#include <linux/types.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/display/drm_dp_helper.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_fixed.h>
#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
#include <linux/stackdepot.h>
@@ -46,22 +47,15 @@ struct drm_dp_mst_topology_ref_history {
};
#endif /* IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) */
-struct drm_dp_mst_branch;
-
-/**
- * struct drm_dp_vcpi - Virtual Channel Payload Identifier
- * @vcpi: Virtual channel ID.
- * @pbn: Payload Bandwidth Number for this channel
- * @aligned_pbn: PBN aligned with slot size
- * @num_slots: number of slots for this PBN
- */
-struct drm_dp_vcpi {
- int vcpi;
- int pbn;
- int aligned_pbn;
- int num_slots;
+enum drm_dp_mst_payload_allocation {
+ DRM_DP_MST_PAYLOAD_ALLOCATION_NONE,
+ DRM_DP_MST_PAYLOAD_ALLOCATION_LOCAL,
+ DRM_DP_MST_PAYLOAD_ALLOCATION_DFP,
+ DRM_DP_MST_PAYLOAD_ALLOCATION_REMOTE,
};
+struct drm_dp_mst_branch;
+
/**
* struct drm_dp_mst_port - MST port
* @port_num: port number
@@ -86,8 +80,9 @@ struct drm_dp_vcpi {
* @next: link to next port on this branch device
* @aux: i2c aux transport to talk to device connected to this port, protected
* by &drm_dp_mst_topology_mgr.base.lock.
+ * @passthrough_aux: parent aux to which DSC pass-through requests should be
+ * sent, only set if DSC pass-through is possible.
* @parent: branch device parent of this port
- * @vcpi: Virtual Channel Payload info for this port.
* @connector: DRM connector this port is connected to. Protected by
* &drm_dp_mst_topology_mgr.base.lock.
* @mgr: topology manager this port lives under.
@@ -140,9 +135,9 @@ struct drm_dp_mst_port {
*/
struct drm_dp_mst_branch *mstb;
struct drm_dp_aux aux; /* i2c bus for this port? */
+ struct drm_dp_aux *passthrough_aux;
struct drm_dp_mst_branch *parent;
- struct drm_dp_vcpi vcpi;
struct drm_connector *connector;
struct drm_dp_mst_topology_mgr *mgr;
@@ -150,12 +145,7 @@ struct drm_dp_mst_port {
* @cached_edid: for DP logical ports - make tiling work by ensuring
* that the EDID for all connectors is read immediately.
*/
- struct edid *cached_edid;
- /**
- * @has_audio: Tracks whether the sink connector to this port is
- * audio-capable.
- */
- bool has_audio;
+ const struct drm_edid *cached_edid;
/**
* @fec_capable: bool indicating if FEC can be supported up to that
@@ -232,6 +222,13 @@ struct drm_dp_mst_branch {
*/
struct list_head destroy_next;
+ /**
+ * @rad: Relative Address of the MST branch.
+ * For &drm_dp_mst_topology_mgr.mst_primary, it's rad[8] are all 0,
+ * unset and unused. For MST branches connected after mst_primary,
+ * in each element of rad[] the nibbles are ordered by the most
+ * signifcant 4 bits first and the least significant 4 bits second.
+ */
u8 rad[8];
u8 lct;
int num_ports;
@@ -254,18 +251,18 @@ struct drm_dp_mst_branch {
bool link_address_sent;
/* global unique identifier to identify branch devices */
- u8 guid[16];
+ guid_t guid;
};
struct drm_dp_nak_reply {
- u8 guid[16];
+ guid_t guid;
u8 reason;
u8 nak_data;
};
struct drm_dp_link_address_ack_reply {
- u8 guid[16];
+ guid_t guid;
u8 nports;
struct drm_dp_link_addr_reply_port {
bool input_port;
@@ -275,7 +272,7 @@ struct drm_dp_link_address_ack_reply {
bool ddps;
bool legacy_device_plug_status;
u8 dpcd_revision;
- u8 peer_guid[16];
+ guid_t peer_guid;
u8 num_sdp_streams;
u8 num_sdp_stream_sinks;
} ports[16];
@@ -358,7 +355,7 @@ struct drm_dp_allocate_payload_ack_reply {
};
struct drm_dp_connection_status_notify {
- u8 guid[16];
+ guid_t guid;
u8 port_number;
bool legacy_device_plug_status;
bool displayport_device_plug_status;
@@ -435,7 +432,7 @@ struct drm_dp_query_payload {
struct drm_dp_resource_status_notify {
u8 port_number;
- u8 guid[16];
+ guid_t guid;
u16 available_pbn;
};
@@ -527,33 +524,107 @@ struct drm_dp_mst_topology_cbs {
void (*poll_hpd_irq)(struct drm_dp_mst_topology_mgr *mgr);
};
-#define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8)
-
-#define DP_PAYLOAD_LOCAL 1
-#define DP_PAYLOAD_REMOTE 2
-#define DP_PAYLOAD_DELETE_LOCAL 3
-
-struct drm_dp_payload {
- int payload_state;
- int start_slot;
- int num_slots;
- int vcpi;
-};
-
#define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base)
-struct drm_dp_vcpi_allocation {
+/**
+ * struct drm_dp_mst_atomic_payload - Atomic state struct for an MST payload
+ *
+ * The primary atomic state structure for a given MST payload. Stores information like current
+ * bandwidth allocation, intended action for this payload, etc.
+ */
+struct drm_dp_mst_atomic_payload {
+ /** @port: The MST port assigned to this payload */
struct drm_dp_mst_port *port;
- int vcpi;
+
+ /**
+ * @vc_start_slot: The time slot that this payload starts on. Because payload start slots
+ * can't be determined ahead of time, the contents of this value are UNDEFINED at atomic
+ * check time. This shouldn't usually matter, as the start slot should never be relevant for
+ * atomic state computations.
+ *
+ * Since this value is determined at commit time instead of check time, this value is
+ * protected by the MST helpers ensuring that async commits operating on the given topology
+ * never run in parallel. In the event that a driver does need to read this value (e.g. to
+ * inform hardware of the starting timeslot for a payload), the driver may either:
+ *
+ * * Read this field during the atomic commit after
+ * drm_dp_mst_atomic_wait_for_dependencies() has been called, which will ensure the
+ * previous MST states payload start slots have been copied over to the new state. Note
+ * that a new start slot won't be assigned/removed from this payload until
+ * drm_dp_add_payload_part1()/drm_dp_remove_payload_part2() have been called.
+ * * Acquire the MST modesetting lock, and then wait for any pending MST-related commits to
+ * get committed to hardware by calling drm_crtc_commit_wait() on each of the
+ * &drm_crtc_commit structs in &drm_dp_mst_topology_state.commit_deps.
+ *
+ * If neither of the two above solutions suffice (e.g. the driver needs to read the start
+ * slot in the middle of an atomic commit without waiting for some reason), then drivers
+ * should cache this value themselves after changing payloads.
+ */
+ s8 vc_start_slot;
+
+ /** @vcpi: The Virtual Channel Payload Identifier */
+ u8 vcpi;
+ /**
+ * @time_slots:
+ * The number of timeslots allocated to this payload from the source DP Tx to
+ * the immediate downstream DP Rx
+ */
+ int time_slots;
+ /** @pbn: The payload bandwidth for this payload */
int pbn;
- bool dsc_enabled;
+
+ /** @delete: Whether or not we intend to delete this payload during this atomic commit */
+ bool delete : 1;
+ /** @dsc_enabled: Whether or not this payload has DSC enabled */
+ bool dsc_enabled : 1;
+
+ /** @payload_allocation_status: The allocation status of this payload */
+ enum drm_dp_mst_payload_allocation payload_allocation_status;
+
+ /** @next: The list node for this payload */
struct list_head next;
};
+/**
+ * struct drm_dp_mst_topology_state - DisplayPort MST topology atomic state
+ *
+ * This struct represents the atomic state of the toplevel DisplayPort MST manager
+ */
struct drm_dp_mst_topology_state {
+ /** @base: Base private state for atomic */
struct drm_private_state base;
- struct list_head vcpis;
+
+ /** @mgr: The topology manager */
struct drm_dp_mst_topology_mgr *mgr;
+
+ /**
+ * @pending_crtc_mask: A bitmask of all CRTCs this topology state touches, drivers may
+ * modify this to add additional dependencies if needed.
+ */
+ u32 pending_crtc_mask;
+ /**
+ * @commit_deps: A list of all CRTC commits affecting this topology, this field isn't
+ * populated until drm_dp_mst_atomic_wait_for_dependencies() is called.
+ */
+ struct drm_crtc_commit **commit_deps;
+ /** @num_commit_deps: The number of CRTC commits in @commit_deps */
+ size_t num_commit_deps;
+
+ /** @payload_mask: A bitmask of allocated VCPIs, used for VCPI assignments */
+ u32 payload_mask;
+ /** @payloads: The list of payloads being created/destroyed in this state */
+ struct list_head payloads;
+
+ /** @total_avail_slots: The total number of slots this topology can handle (63 or 64) */
+ u8 total_avail_slots;
+ /** @start_slot: The first usable time slot in this topology (1 or 0) */
+ u8 start_slot;
+
+ /**
+ * @pbn_div: The current PBN divisor for this topology. The driver is expected to fill this
+ * out itself.
+ */
+ fixed20_12 pbn_div;
};
#define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base)
@@ -636,6 +707,27 @@ struct drm_dp_mst_topology_mgr {
bool payload_id_table_cleared : 1;
/**
+ * @reset_rx_state: The down request's reply and up request message
+ * receiver state must be reset, after the topology manager got
+ * removed. Protected by @lock.
+ */
+ bool reset_rx_state : 1;
+
+ /**
+ * @payload_count: The number of currently active payloads in hardware. This value is only
+ * intended to be used internally by MST helpers for payload tracking, and is only safe to
+ * read/write from the atomic commit (not check) context.
+ */
+ u8 payload_count;
+
+ /**
+ * @next_start_slot: The starting timeslot to use for new VC payloads. This value is used
+ * internally by MST helpers for payload tracking, and is only safe to read/write from the
+ * atomic commit (not check) context.
+ */
+ u8 next_start_slot;
+
+ /**
* @mst_primary: Pointer to the primary/first branch device.
*/
struct drm_dp_mst_branch *mst_primary;
@@ -648,10 +740,6 @@ struct drm_dp_mst_topology_mgr {
* @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0.
*/
u8 sink_count;
- /**
- * @pbn_div: PBN to slots divisor.
- */
- int pbn_div;
/**
* @funcs: Atomic helper callbacks
@@ -669,32 +757,6 @@ struct drm_dp_mst_topology_mgr {
struct list_head tx_msg_downq;
/**
- * @payload_lock: Protect payload information.
- */
- struct mutex payload_lock;
- /**
- * @proposed_vcpis: Array of pointers for the new VCPI allocation. The
- * VCPI structure itself is &drm_dp_mst_port.vcpi, and the size of
- * this array is determined by @max_payloads.
- */
- struct drm_dp_vcpi **proposed_vcpis;
- /**
- * @payloads: Array of payloads. The size of this array is determined
- * by @max_payloads.
- */
- struct drm_dp_payload *payloads;
- /**
- * @payload_mask: Elements of @payloads actually in use. Since
- * reallocation of active outputs isn't possible gaps can be created by
- * disabling outputs out of order compared to how they've been enabled.
- */
- unsigned long payload_mask;
- /**
- * @vcpi_mask: Similar to @payload_mask, but for @proposed_vcpis.
- */
- unsigned long vcpi_mask;
-
- /**
* @tx_waitq: Wait to queue stall for the tx worker.
*/
wait_queue_head_t tx_waitq;
@@ -769,11 +831,35 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
-bool drm_dp_read_mst_cap(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
-int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state);
+/**
+ * enum drm_dp_mst_mode - sink's MST mode capability
+ */
+enum drm_dp_mst_mode {
+ /**
+ * @DRM_DP_SST: The sink does not support MST nor single stream sideband
+ * messaging.
+ */
+ DRM_DP_SST,
+ /**
+ * @DRM_DP_MST: Sink supports MST, more than one stream and single
+ * stream sideband messaging.
+ */
+ DRM_DP_MST,
+ /**
+ * @DRM_DP_SST_SIDEBAND_MSG: Sink supports only one stream and single
+ * stream sideband messaging.
+ */
+ DRM_DP_SST_SIDEBAND_MSG,
+};
-int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
+enum drm_dp_mst_mode drm_dp_read_mst_cap(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state);
+int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr,
+ const u8 *esi,
+ u8 *ack,
+ bool *handled);
+void drm_dp_mst_hpd_irq_send_new_request(struct drm_dp_mst_topology_mgr *mgr);
int
drm_dp_mst_detect_port(struct drm_connector *connector,
@@ -781,39 +867,39 @@ drm_dp_mst_detect_port(struct drm_connector *connector,
struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port);
-struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
-
-int drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count);
-
-int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc);
-
-bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port, int pbn, int slots);
-
-int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
-
-
-void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
-
-
-void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port);
-
-
-int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
- int pbn);
+const struct drm_edid *drm_dp_mst_edid_read(struct drm_connector *connector,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port);
+struct edid *drm_dp_mst_get_edid(struct drm_connector *connector,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port);
+fixed20_12 drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count);
-int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr);
+int drm_dp_calc_pbn_mode(int clock, int bpp);
+void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap);
-int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr);
+int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct drm_dp_mst_atomic_payload *payload);
+int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_atomic_payload *payload);
+void drm_dp_remove_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct drm_dp_mst_atomic_payload *payload);
+void drm_dp_remove_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *mst_state,
+ const struct drm_dp_mst_atomic_payload *old_payload,
+ struct drm_dp_mst_atomic_payload *new_payload);
int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
void drm_dp_mst_dump_topology(struct seq_file *m,
struct drm_dp_mst_topology_mgr *mgr);
+void drm_dp_mst_topology_queue_probe(struct drm_dp_mst_topology_mgr *mgr);
+
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
int __must_check
drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
@@ -829,36 +915,68 @@ int drm_dp_mst_connector_late_register(struct drm_connector *connector,
void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
struct drm_dp_mst_port *port);
-struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
- struct drm_dp_mst_topology_mgr *mgr);
+struct drm_dp_mst_topology_state *
+drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr);
+struct drm_dp_mst_topology_state *
+drm_atomic_get_old_mst_topology_state(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr);
+struct drm_dp_mst_topology_state *
+drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr);
+struct drm_dp_mst_atomic_payload *
+drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state,
+ struct drm_dp_mst_port *port);
+bool drm_dp_mst_port_downstream_of_parent(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ struct drm_dp_mst_port *parent);
int __must_check
-drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
+drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port, int pbn,
- int pbn_div);
+ struct drm_dp_mst_port *port, int pbn);
int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
struct drm_dp_mst_port *port,
- int pbn, int pbn_div,
- bool enable);
+ int pbn, bool enable);
int __must_check
drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr);
int __must_check
-drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
+drm_dp_atomic_release_time_slots(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port);
+void drm_dp_mst_atomic_wait_for_dependencies(struct drm_atomic_state *state);
+int __must_check drm_dp_mst_atomic_setup_commit(struct drm_atomic_state *state);
int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port, bool power_up);
int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
struct drm_dp_query_stream_enc_status_ack_reply *status);
+int __must_check drm_dp_mst_atomic_check_mgr(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct drm_dp_mst_port **failing_port);
int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state);
+int __must_check drm_dp_mst_root_conn_atomic_check(struct drm_connector_state *new_conn_state,
+ struct drm_dp_mst_topology_mgr *mgr);
void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port);
void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port);
+static inline
+bool drm_dp_mst_port_is_logical(struct drm_dp_mst_port *port)
+{
+ return port->port_num >= DP_MST_LOGICAL_PORT_0;
+}
+
+struct drm_dp_aux *drm_dp_mst_aux_for_parent(struct drm_dp_mst_port *port);
struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port);
+static inline struct drm_dp_mst_topology_state *
+to_drm_dp_mst_topology_state(struct drm_private_state *state)
+{
+ return container_of(state, struct drm_dp_mst_topology_state, base);
+}
+
extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs;
/**
diff --git a/include/drm/display/drm_dp_tunnel.h b/include/drm/display/drm_dp_tunnel.h
new file mode 100644
index 000000000000..87212c847915
--- /dev/null
+++ b/include/drm/display/drm_dp_tunnel.h
@@ -0,0 +1,248 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __DRM_DP_TUNNEL_H__
+#define __DRM_DP_TUNNEL_H__
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+
+struct drm_dp_aux;
+
+struct drm_device;
+
+struct drm_atomic_state;
+struct drm_dp_tunnel_mgr;
+struct drm_dp_tunnel_state;
+
+struct ref_tracker;
+
+struct drm_dp_tunnel_ref {
+ struct drm_dp_tunnel *tunnel;
+ struct ref_tracker *tracker;
+};
+
+#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL
+
+struct drm_dp_tunnel *
+drm_dp_tunnel_get(struct drm_dp_tunnel *tunnel, struct ref_tracker **tracker);
+
+void
+drm_dp_tunnel_put(struct drm_dp_tunnel *tunnel, struct ref_tracker **tracker);
+
+static inline void drm_dp_tunnel_ref_get(struct drm_dp_tunnel *tunnel,
+ struct drm_dp_tunnel_ref *tunnel_ref)
+{
+ tunnel_ref->tunnel = drm_dp_tunnel_get(tunnel, &tunnel_ref->tracker);
+}
+
+static inline void drm_dp_tunnel_ref_put(struct drm_dp_tunnel_ref *tunnel_ref)
+{
+ drm_dp_tunnel_put(tunnel_ref->tunnel, &tunnel_ref->tracker);
+ tunnel_ref->tunnel = NULL;
+}
+
+struct drm_dp_tunnel *
+drm_dp_tunnel_detect(struct drm_dp_tunnel_mgr *mgr,
+ struct drm_dp_aux *aux);
+int drm_dp_tunnel_destroy(struct drm_dp_tunnel *tunnel);
+
+int drm_dp_tunnel_enable_bw_alloc(struct drm_dp_tunnel *tunnel);
+int drm_dp_tunnel_disable_bw_alloc(struct drm_dp_tunnel *tunnel);
+bool drm_dp_tunnel_bw_alloc_is_enabled(const struct drm_dp_tunnel *tunnel);
+int drm_dp_tunnel_alloc_bw(struct drm_dp_tunnel *tunnel, int bw);
+int drm_dp_tunnel_get_allocated_bw(struct drm_dp_tunnel *tunnel);
+int drm_dp_tunnel_update_state(struct drm_dp_tunnel *tunnel);
+
+void drm_dp_tunnel_set_io_error(struct drm_dp_tunnel *tunnel);
+
+int drm_dp_tunnel_handle_irq(struct drm_dp_tunnel_mgr *mgr,
+ struct drm_dp_aux *aux);
+
+int drm_dp_tunnel_max_dprx_rate(const struct drm_dp_tunnel *tunnel);
+int drm_dp_tunnel_max_dprx_lane_count(const struct drm_dp_tunnel *tunnel);
+int drm_dp_tunnel_available_bw(const struct drm_dp_tunnel *tunnel);
+
+const char *drm_dp_tunnel_name(const struct drm_dp_tunnel *tunnel);
+
+struct drm_dp_tunnel_state *
+drm_dp_tunnel_atomic_get_state(struct drm_atomic_state *state,
+ struct drm_dp_tunnel *tunnel);
+
+struct drm_dp_tunnel_state *
+drm_dp_tunnel_atomic_get_old_state(struct drm_atomic_state *state,
+ const struct drm_dp_tunnel *tunnel);
+
+struct drm_dp_tunnel_state *
+drm_dp_tunnel_atomic_get_new_state(struct drm_atomic_state *state,
+ const struct drm_dp_tunnel *tunnel);
+
+int drm_dp_tunnel_atomic_set_stream_bw(struct drm_atomic_state *state,
+ struct drm_dp_tunnel *tunnel,
+ u8 stream_id, int bw);
+int drm_dp_tunnel_atomic_get_group_streams_in_state(struct drm_atomic_state *state,
+ const struct drm_dp_tunnel *tunnel,
+ u32 *stream_mask);
+
+int drm_dp_tunnel_atomic_check_stream_bws(struct drm_atomic_state *state,
+ u32 *failed_stream_mask);
+
+int drm_dp_tunnel_atomic_get_required_bw(const struct drm_dp_tunnel_state *tunnel_state);
+
+struct drm_dp_tunnel_mgr *
+drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count);
+void drm_dp_tunnel_mgr_destroy(struct drm_dp_tunnel_mgr *mgr);
+
+#else
+
+static inline struct drm_dp_tunnel *
+drm_dp_tunnel_get(struct drm_dp_tunnel *tunnel, struct ref_tracker **tracker)
+{
+ return NULL;
+}
+
+static inline void
+drm_dp_tunnel_put(struct drm_dp_tunnel *tunnel, struct ref_tracker **tracker) {}
+
+static inline void drm_dp_tunnel_ref_get(struct drm_dp_tunnel *tunnel,
+ struct drm_dp_tunnel_ref *tunnel_ref) {}
+
+static inline void drm_dp_tunnel_ref_put(struct drm_dp_tunnel_ref *tunnel_ref) {}
+
+static inline struct drm_dp_tunnel *
+drm_dp_tunnel_detect(struct drm_dp_tunnel_mgr *mgr,
+ struct drm_dp_aux *aux)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int
+drm_dp_tunnel_destroy(struct drm_dp_tunnel *tunnel)
+{
+ return 0;
+}
+
+static inline int drm_dp_tunnel_enable_bw_alloc(struct drm_dp_tunnel *tunnel)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int drm_dp_tunnel_disable_bw_alloc(struct drm_dp_tunnel *tunnel)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool drm_dp_tunnel_bw_alloc_is_enabled(const struct drm_dp_tunnel *tunnel)
+{
+ return false;
+}
+
+static inline int
+drm_dp_tunnel_alloc_bw(struct drm_dp_tunnel *tunnel, int bw)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+drm_dp_tunnel_get_allocated_bw(struct drm_dp_tunnel *tunnel)
+{
+ return -1;
+}
+
+static inline int
+drm_dp_tunnel_update_state(struct drm_dp_tunnel *tunnel)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void drm_dp_tunnel_set_io_error(struct drm_dp_tunnel *tunnel) {}
+
+static inline int
+drm_dp_tunnel_handle_irq(struct drm_dp_tunnel_mgr *mgr,
+ struct drm_dp_aux *aux)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+drm_dp_tunnel_max_dprx_rate(const struct drm_dp_tunnel *tunnel)
+{
+ return 0;
+}
+
+static inline int
+drm_dp_tunnel_max_dprx_lane_count(const struct drm_dp_tunnel *tunnel)
+{
+ return 0;
+}
+
+static inline int
+drm_dp_tunnel_available_bw(const struct drm_dp_tunnel *tunnel)
+{
+ return -1;
+}
+
+static inline const char *
+drm_dp_tunnel_name(const struct drm_dp_tunnel *tunnel)
+{
+ return NULL;
+}
+
+static inline struct drm_dp_tunnel_state *
+drm_dp_tunnel_atomic_get_state(struct drm_atomic_state *state,
+ struct drm_dp_tunnel *tunnel)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline struct drm_dp_tunnel_state *
+drm_dp_tunnel_atomic_get_new_state(struct drm_atomic_state *state,
+ const struct drm_dp_tunnel *tunnel)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int
+drm_dp_tunnel_atomic_set_stream_bw(struct drm_atomic_state *state,
+ struct drm_dp_tunnel *tunnel,
+ u8 stream_id, int bw)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+drm_dp_tunnel_atomic_get_group_streams_in_state(struct drm_atomic_state *state,
+ const struct drm_dp_tunnel *tunnel,
+ u32 *stream_mask)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+drm_dp_tunnel_atomic_check_stream_bws(struct drm_atomic_state *state,
+ u32 *failed_stream_mask)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+drm_dp_tunnel_atomic_get_required_bw(const struct drm_dp_tunnel_state *tunnel_state)
+{
+ return 0;
+}
+
+static inline struct drm_dp_tunnel_mgr *
+drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline
+void drm_dp_tunnel_mgr_destroy(struct drm_dp_tunnel_mgr *mgr) {}
+
+#endif /* CONFIG_DRM_DISPLAY_DP_TUNNEL */
+
+#endif /* __DRM_DP_TUNNEL_H__ */
diff --git a/include/drm/drm_dsc.h b/include/drm/display/drm_dsc.h
index cf43561e60fa..bbbe7438473d 100644
--- a/include/drm/drm_dsc.h
+++ b/include/drm/display/drm_dsc.h
@@ -8,7 +8,7 @@
#ifndef DRM_DSC_H_
#define DRM_DSC_H_
-#include <drm/drm_dp_helper.h>
+#include <drm/display/drm_dp.h>
/* VESA Display Stream Compression DSC 1.2 constants */
#define DSC_NUM_BUF_RANGES 15
@@ -40,9 +40,6 @@
#define DSC_PPS_RC_RANGE_MINQP_SHIFT 11
#define DSC_PPS_RC_RANGE_MAXQP_SHIFT 6
#define DSC_PPS_NATIVE_420_SHIFT 1
-#define DSC_1_2_MAX_LINEBUF_DEPTH_BITS 16
-#define DSC_1_2_MAX_LINEBUF_DEPTH_VAL 0
-#define DSC_1_1_MAX_LINEBUF_DEPTH_BITS 13
/**
* struct drm_dsc_rc_range_parameters - DSC Rate Control range parameters
@@ -602,10 +599,4 @@ struct drm_dsc_pps_infoframe {
struct drm_dsc_picture_parameter_set pps_payload;
} __packed;
-void drm_dsc_dp_pps_header_init(struct dp_sdp_header *pps_header);
-int drm_dsc_dp_rc_buffer_size(u8 rc_buffer_block_size, u8 rc_buffer_size);
-void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_sdp,
- const struct drm_dsc_config *dsc_cfg);
-int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg);
-
#endif /* _DRM_DSC_H_ */
diff --git a/include/drm/display/drm_dsc_helper.h b/include/drm/display/drm_dsc_helper.h
new file mode 100644
index 000000000000..2c2b9033f60f
--- /dev/null
+++ b/include/drm/display/drm_dsc_helper.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT
+ * Copyright (C) 2018 Intel Corp.
+ *
+ * Authors:
+ * Manasi Navare <manasi.d.navare@intel.com>
+ */
+
+#ifndef DRM_DSC_HELPER_H_
+#define DRM_DSC_HELPER_H_
+
+#include <drm/display/drm_dsc.h>
+
+enum drm_dsc_params_type {
+ DRM_DSC_1_2_444,
+ DRM_DSC_1_1_PRE_SCR, /* legacy params from DSC 1.1 */
+ DRM_DSC_1_2_422,
+ DRM_DSC_1_2_420,
+};
+
+struct drm_printer;
+
+void drm_dsc_dp_pps_header_init(struct dp_sdp_header *pps_header);
+int drm_dsc_dp_rc_buffer_size(u8 rc_buffer_block_size, u8 rc_buffer_size);
+void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_sdp,
+ const struct drm_dsc_config *dsc_cfg);
+void drm_dsc_set_const_params(struct drm_dsc_config *vdsc_cfg);
+void drm_dsc_set_rc_buf_thresh(struct drm_dsc_config *vdsc_cfg);
+int drm_dsc_setup_rc_params(struct drm_dsc_config *vdsc_cfg, enum drm_dsc_params_type type);
+int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg);
+u8 drm_dsc_initial_scale_value(const struct drm_dsc_config *dsc);
+u32 drm_dsc_flatness_det_thresh(const struct drm_dsc_config *dsc);
+u32 drm_dsc_get_bpp_int(const struct drm_dsc_config *vdsc_cfg);
+void drm_dsc_dump_config(struct drm_printer *p, int indent, const struct drm_dsc_config *cfg);
+
+#endif /* _DRM_DSC_HELPER_H_ */
+
diff --git a/include/drm/drm_hdcp.h b/include/drm/display/drm_hdcp.h
index 0b1111e3228e..96a99b1377c0 100644
--- a/include/drm/drm_hdcp.h
+++ b/include/drm/display/drm_hdcp.h
@@ -6,8 +6,8 @@
* Sean Paul <seanpaul@chromium.org>
*/
-#ifndef _DRM_HDCP_H_INCLUDED_
-#define _DRM_HDCP_H_INCLUDED_
+#ifndef _DRM_HDCP_H_
+#define _DRM_HDCP_H_
#include <linux/types.h>
@@ -291,16 +291,6 @@ struct hdcp_srm_header {
u8 srm_gen_no;
} __packed;
-struct drm_device;
-struct drm_connector;
-
-int drm_hdcp_check_ksvs_revoked(struct drm_device *dev,
- u8 *ksvs, u32 ksv_count);
-int drm_connector_attach_content_protection_property(
- struct drm_connector *connector, bool hdcp_content_type);
-void drm_hdcp_update_content_protection(struct drm_connector *connector,
- u64 val);
-
/* Content Type classification for HDCP2.2 vs others */
#define DRM_MODE_HDCP_CONTENT_TYPE0 0
#define DRM_MODE_HDCP_CONTENT_TYPE1 1
diff --git a/include/drm/display/drm_hdcp_helper.h b/include/drm/display/drm_hdcp_helper.h
new file mode 100644
index 000000000000..8aaf87bf2735
--- /dev/null
+++ b/include/drm/display/drm_hdcp_helper.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * Authors:
+ * Sean Paul <seanpaul@chromium.org>
+ */
+
+#ifndef _DRM_HDCP_HELPER_H_INCLUDED_
+#define _DRM_HDCP_HELPER_H_INCLUDED_
+
+#include <drm/display/drm_hdcp.h>
+
+struct drm_device;
+struct drm_connector;
+
+int drm_hdcp_check_ksvs_revoked(struct drm_device *dev, u8 *ksvs, u32 ksv_count);
+int drm_connector_attach_content_protection_property(struct drm_connector *connector,
+ bool hdcp_content_type);
+void drm_hdcp_update_content_protection(struct drm_connector *connector, u64 val);
+
+#endif
diff --git a/include/drm/display/drm_hdmi_audio_helper.h b/include/drm/display/drm_hdmi_audio_helper.h
new file mode 100644
index 000000000000..44d910bdc72d
--- /dev/null
+++ b/include/drm/display/drm_hdmi_audio_helper.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef DRM_DISPLAY_HDMI_AUDIO_HELPER_H_
+#define DRM_DISPLAY_HDMI_AUDIO_HELPER_H_
+
+#include <linux/types.h>
+
+struct drm_connector;
+struct drm_connector_hdmi_audio_funcs;
+
+struct device;
+
+int drm_connector_hdmi_audio_init(struct drm_connector *connector,
+ struct device *hdmi_codec_dev,
+ const struct drm_connector_hdmi_audio_funcs *funcs,
+ unsigned int max_i2s_playback_channels,
+ u64 i2s_formats,
+ bool spdif_playback,
+ int sound_dai_port);
+void drm_connector_hdmi_audio_plugged_notify(struct drm_connector *connector,
+ bool plugged);
+
+#endif
diff --git a/include/drm/display/drm_hdmi_cec_helper.h b/include/drm/display/drm_hdmi_cec_helper.h
new file mode 100644
index 000000000000..fd8f4d2f02c1
--- /dev/null
+++ b/include/drm/display/drm_hdmi_cec_helper.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef DRM_DISPLAY_HDMI_CEC_HELPER
+#define DRM_DISPLAY_HDMI_CEC_HELPER
+
+#include <linux/types.h>
+
+struct drm_connector;
+
+struct cec_msg;
+struct device;
+
+struct drm_connector_hdmi_cec_funcs {
+ /**
+ * @init: perform hardware-specific initialization before registering the CEC adapter
+ */
+ int (*init)(struct drm_connector *connector);
+
+ /**
+ * @uninit: perform hardware-specific teardown for the CEC adapter
+ */
+ void (*uninit)(struct drm_connector *connector);
+
+ /**
+ * @enable: enable or disable CEC adapter
+ */
+ int (*enable)(struct drm_connector *connector, bool enable);
+
+ /**
+ * @log_addr: set adapter's logical address, can be called multiple
+ * times if adapter supports several LAs
+ */
+ int (*log_addr)(struct drm_connector *connector, u8 logical_addr);
+
+ /**
+ * @transmit: start transmission of the specified CEC message
+ */
+ int (*transmit)(struct drm_connector *connector, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg);
+};
+
+int drmm_connector_hdmi_cec_register(struct drm_connector *connector,
+ const struct drm_connector_hdmi_cec_funcs *funcs,
+ const char *name,
+ u8 available_las,
+ struct device *dev);
+
+void drm_connector_hdmi_cec_received_msg(struct drm_connector *connector,
+ struct cec_msg *msg);
+
+void drm_connector_hdmi_cec_transmit_done(struct drm_connector *connector,
+ u8 status,
+ u8 arb_lost_cnt, u8 nack_cnt,
+ u8 low_drive_cnt, u8 error_cnt);
+
+void drm_connector_hdmi_cec_transmit_attempt_done(struct drm_connector *connector,
+ u8 status);
+
+#if IS_ENABLED(CONFIG_DRM_DISPLAY_HDMI_CEC_NOTIFIER_HELPER)
+int drmm_connector_hdmi_cec_notifier_register(struct drm_connector *connector,
+ const char *port_name,
+ struct device *dev);
+#else
+static inline int drmm_connector_hdmi_cec_notifier_register(struct drm_connector *connector,
+ const char *port_name,
+ struct device *dev)
+{
+ return 0;
+}
+#endif
+
+#endif
diff --git a/include/drm/display/drm_hdmi_helper.h b/include/drm/display/drm_hdmi_helper.h
new file mode 100644
index 000000000000..09145c9ee9fc
--- /dev/null
+++ b/include/drm/display/drm_hdmi_helper.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef DRM_HDMI_HELPER
+#define DRM_HDMI_HELPER
+
+#include <linux/hdmi.h>
+
+struct drm_connector;
+struct drm_connector_state;
+struct drm_display_mode;
+
+void
+drm_hdmi_avi_infoframe_colorimetry(struct hdmi_avi_infoframe *frame,
+ const struct drm_connector_state *conn_state);
+
+void
+drm_hdmi_avi_infoframe_bars(struct hdmi_avi_infoframe *frame,
+ const struct drm_connector_state *conn_state);
+
+int
+drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame,
+ const struct drm_connector_state *conn_state);
+
+void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame,
+ const struct drm_connector_state *conn_state);
+
+unsigned long long
+drm_hdmi_compute_mode_clock(const struct drm_display_mode *mode,
+ unsigned int bpc, enum hdmi_colorspace fmt);
+
+void
+drm_hdmi_acr_get_n_cts(unsigned long long tmds_char_rate,
+ unsigned int sample_rate,
+ unsigned int *out_n,
+ unsigned int *out_cts);
+
+#endif
diff --git a/include/drm/display/drm_hdmi_state_helper.h b/include/drm/display/drm_hdmi_state_helper.h
new file mode 100644
index 000000000000..2349c0d0f00f
--- /dev/null
+++ b/include/drm/display/drm_hdmi_state_helper.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef DRM_HDMI_STATE_HELPER_H_
+#define DRM_HDMI_STATE_HELPER_H_
+
+struct drm_atomic_state;
+struct drm_connector;
+struct drm_connector_state;
+struct drm_display_mode;
+struct hdmi_audio_infoframe;
+
+enum drm_connector_status;
+
+void __drm_atomic_helper_connector_hdmi_reset(struct drm_connector *connector,
+ struct drm_connector_state *new_conn_state);
+
+int drm_atomic_helper_connector_hdmi_check(struct drm_connector *connector,
+ struct drm_atomic_state *state);
+
+int drm_atomic_helper_connector_hdmi_update_audio_infoframe(struct drm_connector *connector,
+ struct hdmi_audio_infoframe *frame);
+int drm_atomic_helper_connector_hdmi_clear_audio_infoframe(struct drm_connector *connector);
+int drm_atomic_helper_connector_hdmi_update_infoframes(struct drm_connector *connector,
+ struct drm_atomic_state *state);
+void drm_atomic_helper_connector_hdmi_hotplug(struct drm_connector *connector,
+ enum drm_connector_status status);
+void drm_atomic_helper_connector_hdmi_force(struct drm_connector *connector);
+
+enum drm_mode_status
+drm_hdmi_connector_mode_valid(struct drm_connector *connector,
+ const struct drm_display_mode *mode);
+
+#endif // DRM_HDMI_STATE_HELPER_H_
diff --git a/include/drm/drm_scdc_helper.h b/include/drm/display/drm_scdc.h
index 6a483533aae4..3d58f37e8ed8 100644
--- a/include/drm/drm_scdc_helper.h
+++ b/include/drm/display/drm_scdc.h
@@ -21,11 +21,8 @@
* DEALINGS IN THE SOFTWARE.
*/
-#ifndef DRM_SCDC_HELPER_H
-#define DRM_SCDC_HELPER_H
-
-#include <linux/i2c.h>
-#include <linux/types.h>
+#ifndef DRM_SCDC_H
+#define DRM_SCDC_H
#define SCDC_SINK_VERSION 0x01
@@ -88,49 +85,4 @@
#define SCDC_MANUFACTURER_SPECIFIC 0xde
#define SCDC_MANUFACTURER_SPECIFIC_SIZE 34
-ssize_t drm_scdc_read(struct i2c_adapter *adapter, u8 offset, void *buffer,
- size_t size);
-ssize_t drm_scdc_write(struct i2c_adapter *adapter, u8 offset,
- const void *buffer, size_t size);
-
-/**
- * drm_scdc_readb - read a single byte from SCDC
- * @adapter: I2C adapter
- * @offset: offset of register to read
- * @value: return location for the register value
- *
- * Reads a single byte from SCDC. This is a convenience wrapper around the
- * drm_scdc_read() function.
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- */
-static inline int drm_scdc_readb(struct i2c_adapter *adapter, u8 offset,
- u8 *value)
-{
- return drm_scdc_read(adapter, offset, value, sizeof(*value));
-}
-
-/**
- * drm_scdc_writeb - write a single byte to SCDC
- * @adapter: I2C adapter
- * @offset: offset of register to read
- * @value: return location for the register value
- *
- * Writes a single byte to SCDC. This is a convenience wrapper around the
- * drm_scdc_write() function.
- *
- * Returns:
- * 0 on success or a negative error code on failure.
- */
-static inline int drm_scdc_writeb(struct i2c_adapter *adapter, u8 offset,
- u8 value)
-{
- return drm_scdc_write(adapter, offset, &value, sizeof(value));
-}
-
-bool drm_scdc_get_scrambling_status(struct i2c_adapter *adapter);
-
-bool drm_scdc_set_scrambling(struct i2c_adapter *adapter, bool enable);
-bool drm_scdc_set_high_tmds_clock_ratio(struct i2c_adapter *adapter, bool set);
#endif
diff --git a/include/drm/display/drm_scdc_helper.h b/include/drm/display/drm_scdc_helper.h
new file mode 100644
index 000000000000..34600476a1b9
--- /dev/null
+++ b/include/drm/display/drm_scdc_helper.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2015 NVIDIA Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef DRM_SCDC_HELPER_H
+#define DRM_SCDC_HELPER_H
+
+#include <linux/types.h>
+
+#include <drm/display/drm_scdc.h>
+
+struct drm_connector;
+struct i2c_adapter;
+
+ssize_t drm_scdc_read(struct i2c_adapter *adapter, u8 offset, void *buffer,
+ size_t size);
+ssize_t drm_scdc_write(struct i2c_adapter *adapter, u8 offset,
+ const void *buffer, size_t size);
+
+/**
+ * drm_scdc_readb - read a single byte from SCDC
+ * @adapter: I2C adapter
+ * @offset: offset of register to read
+ * @value: return location for the register value
+ *
+ * Reads a single byte from SCDC. This is a convenience wrapper around the
+ * drm_scdc_read() function.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+static inline int drm_scdc_readb(struct i2c_adapter *adapter, u8 offset,
+ u8 *value)
+{
+ return drm_scdc_read(adapter, offset, value, sizeof(*value));
+}
+
+/**
+ * drm_scdc_writeb - write a single byte to SCDC
+ * @adapter: I2C adapter
+ * @offset: offset of register to read
+ * @value: return location for the register value
+ *
+ * Writes a single byte to SCDC. This is a convenience wrapper around the
+ * drm_scdc_write() function.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+static inline int drm_scdc_writeb(struct i2c_adapter *adapter, u8 offset,
+ u8 value)
+{
+ return drm_scdc_write(adapter, offset, &value, sizeof(value));
+}
+
+bool drm_scdc_get_scrambling_status(struct drm_connector *connector);
+
+bool drm_scdc_set_scrambling(struct drm_connector *connector, bool enable);
+bool drm_scdc_set_high_tmds_clock_ratio(struct drm_connector *connector, bool set);
+
+#endif
diff --git a/include/drm/drm_accel.h b/include/drm/drm_accel.h
new file mode 100644
index 000000000000..20a665ec6f16
--- /dev/null
+++ b/include/drm/drm_accel.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2022 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef DRM_ACCEL_H_
+#define DRM_ACCEL_H_
+
+#include <drm/drm_file.h>
+
+#define ACCEL_MAJOR 261
+#define ACCEL_MAX_MINORS 256
+
+/**
+ * DRM_ACCEL_FOPS - Default drm accelerators file operations
+ *
+ * This macro provides a shorthand for setting the accelerator file ops in the
+ * &file_operations structure. If all you need are the default ops, use
+ * DEFINE_DRM_ACCEL_FOPS instead.
+ */
+#define DRM_ACCEL_FOPS \
+ .open = accel_open,\
+ .release = drm_release,\
+ .unlocked_ioctl = drm_ioctl,\
+ .compat_ioctl = drm_compat_ioctl,\
+ .poll = drm_poll,\
+ .read = drm_read,\
+ .llseek = noop_llseek, \
+ .mmap = drm_gem_mmap, \
+ .fop_flags = FOP_UNSIGNED_OFFSET
+
+/**
+ * DEFINE_DRM_ACCEL_FOPS() - macro to generate file operations for accelerators drivers
+ * @name: name for the generated structure
+ *
+ * This macro autogenerates a suitable &struct file_operations for accelerators based
+ * drivers, which can be assigned to &drm_driver.fops. Note that this structure
+ * cannot be shared between drivers, because it contains a reference to the
+ * current module using THIS_MODULE.
+ *
+ * Note that the declaration is already marked as static - if you need a
+ * non-static version of this you're probably doing it wrong and will break the
+ * THIS_MODULE reference by accident.
+ */
+#define DEFINE_DRM_ACCEL_FOPS(name) \
+ static const struct file_operations name = {\
+ .owner = THIS_MODULE,\
+ DRM_ACCEL_FOPS,\
+ }
+
+#if IS_ENABLED(CONFIG_DRM_ACCEL)
+
+extern struct xarray accel_minors_xa;
+
+void accel_core_exit(void);
+int accel_core_init(void);
+void accel_set_device_instance_params(struct device *kdev, int index);
+int accel_open(struct inode *inode, struct file *filp);
+void accel_debugfs_register(struct drm_device *dev);
+
+#else
+
+static inline void accel_core_exit(void)
+{
+}
+
+static inline int __init accel_core_init(void)
+{
+ /* Return 0 to allow drm_core_init to complete successfully */
+ return 0;
+}
+
+static inline void accel_set_device_instance_params(struct device *kdev, int index)
+{
+}
+
+static inline void accel_debugfs_register(struct drm_device *dev)
+{
+}
+
+#endif /* IS_ENABLED(CONFIG_DRM_ACCEL) */
+
+#endif /* DRM_ACCEL_H_ */
diff --git a/include/drm/drm_agpsupport.h b/include/drm/drm_agpsupport.h
deleted file mode 100644
index f3136750c490..000000000000
--- a/include/drm/drm_agpsupport.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _DRM_AGPSUPPORT_H_
-#define _DRM_AGPSUPPORT_H_
-
-#include <linux/agp_backend.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/mm.h>
-#include <linux/mutex.h>
-#include <linux/types.h>
-#include <uapi/drm/drm.h>
-
-struct drm_device;
-struct drm_file;
-
-struct drm_agp_head {
- struct agp_kern_info agp_info;
- struct list_head memory;
- unsigned long mode;
- struct agp_bridge_data *bridge;
- int enabled;
- int acquired;
- unsigned long base;
- int agp_mtrr;
- int cant_use_aperture;
- unsigned long page_mask;
-};
-
-#if IS_ENABLED(CONFIG_AGP)
-
-struct drm_agp_head *drm_agp_init(struct drm_device *dev);
-void drm_legacy_agp_clear(struct drm_device *dev);
-int drm_agp_acquire(struct drm_device *dev);
-int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_agp_release(struct drm_device *dev);
-int drm_agp_release_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
-int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info);
-int drm_agp_info_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
-int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
-int drm_agp_free_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
-int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
-int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
-#else /* CONFIG_AGP */
-
-static inline struct drm_agp_head *drm_agp_init(struct drm_device *dev)
-{
- return NULL;
-}
-
-static inline void drm_legacy_agp_clear(struct drm_device *dev)
-{
-}
-
-static inline int drm_agp_acquire(struct drm_device *dev)
-{
- return -ENODEV;
-}
-
-static inline int drm_agp_release(struct drm_device *dev)
-{
- return -ENODEV;
-}
-
-static inline int drm_agp_enable(struct drm_device *dev,
- struct drm_agp_mode mode)
-{
- return -ENODEV;
-}
-
-static inline int drm_agp_info(struct drm_device *dev,
- struct drm_agp_info *info)
-{
- return -ENODEV;
-}
-
-static inline int drm_agp_alloc(struct drm_device *dev,
- struct drm_agp_buffer *request)
-{
- return -ENODEV;
-}
-
-static inline int drm_agp_free(struct drm_device *dev,
- struct drm_agp_buffer *request)
-{
- return -ENODEV;
-}
-
-static inline int drm_agp_unbind(struct drm_device *dev,
- struct drm_agp_binding *request)
-{
- return -ENODEV;
-}
-
-static inline int drm_agp_bind(struct drm_device *dev,
- struct drm_agp_binding *request)
-{
- return -ENODEV;
-}
-
-#endif /* CONFIG_AGP */
-
-#endif /* _DRM_AGPSUPPORT_H_ */
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index ac5a28eff2c8..43783891d359 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -30,6 +30,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_util.h>
+#include <drm/drm_colorop.h>
/**
* struct drm_crtc_commit - track modeset commits on a CRTC
@@ -157,14 +158,51 @@ struct drm_crtc_commit {
bool abort_completion;
};
+struct __drm_colorops_state {
+ struct drm_colorop *ptr;
+ struct drm_colorop_state *state, *old_state, *new_state;
+};
+
struct __drm_planes_state {
struct drm_plane *ptr;
- struct drm_plane_state *state, *old_state, *new_state;
+
+ /**
+ * @state_to_destroy:
+ *
+ * Used to track the @drm_plane_state we will need to free when
+ * tearing down the associated &drm_atomic_state in
+ * $drm_mode_config_funcs.atomic_state_clear or
+ * drm_atomic_state_default_clear().
+ *
+ * Before a commit, and the call to
+ * drm_atomic_helper_swap_state() in particular, it points to
+ * the same state than @new_state. After a commit, it points to
+ * the same state than @old_state.
+ */
+ struct drm_plane_state *state_to_destroy;
+
+ struct drm_plane_state *old_state, *new_state;
};
struct __drm_crtcs_state {
struct drm_crtc *ptr;
- struct drm_crtc_state *state, *old_state, *new_state;
+
+ /**
+ * @state_to_destroy:
+ *
+ * Used to track the @drm_crtc_state we will need to free when
+ * tearing down the associated &drm_atomic_state in
+ * $drm_mode_config_funcs.atomic_state_clear or
+ * drm_atomic_state_default_clear().
+ *
+ * Before a commit, and the call to
+ * drm_atomic_helper_swap_state() in particular, it points to
+ * the same state than @new_state. After a commit, it points to
+ * the same state than @old_state.
+ */
+ struct drm_crtc_state *state_to_destroy;
+
+ struct drm_crtc_state *old_state, *new_state;
/**
* @commit:
@@ -182,7 +220,24 @@ struct __drm_crtcs_state {
struct __drm_connnectors_state {
struct drm_connector *ptr;
- struct drm_connector_state *state, *old_state, *new_state;
+
+ /**
+ * @state_to_destroy:
+ *
+ * Used to track the @drm_connector_state we will need to free
+ * when tearing down the associated &drm_atomic_state in
+ * $drm_mode_config_funcs.atomic_state_clear or
+ * drm_atomic_state_default_clear().
+ *
+ * Before a commit, and the call to
+ * drm_atomic_helper_swap_state() in particular, it points to
+ * the same state than @new_state. After a commit, it points to
+ * the same state than @old_state.
+ */
+ struct drm_connector_state *state_to_destroy;
+
+ struct drm_connector_state *old_state, *new_state;
+
/**
* @out_fence_ptr:
*
@@ -227,6 +282,18 @@ struct drm_private_state_funcs {
*/
void (*atomic_destroy_state)(struct drm_private_obj *obj,
struct drm_private_state *state);
+
+ /**
+ * @atomic_print_state:
+ *
+ * If driver subclasses &struct drm_private_state, it should implement
+ * this optional hook for printing additional driver specific state.
+ *
+ * Do not call this directly, use drm_atomic_private_obj_print_state()
+ * instead.
+ */
+ void (*atomic_print_state)(struct drm_printer *p,
+ const struct drm_private_state *state);
};
/**
@@ -311,41 +378,99 @@ struct drm_private_obj {
/**
* struct drm_private_state - base struct for driver private object state
- * @state: backpointer to global drm_atomic_state
*
- * Currently only contains a backpointer to the overall atomic update, but in
- * the future also might hold synchronization information similar to e.g.
- * &drm_crtc.commit.
+ * Currently only contains a backpointer to the overall atomic update,
+ * and the relevant private object but in the future also might hold
+ * synchronization information similar to e.g. &drm_crtc.commit.
*/
struct drm_private_state {
+ /**
+ * @state: backpointer to global drm_atomic_state
+ */
struct drm_atomic_state *state;
+
+ /**
+ * @obj: backpointer to the private object
+ */
+ struct drm_private_obj *obj;
};
struct __drm_private_objs_state {
struct drm_private_obj *ptr;
- struct drm_private_state *state, *old_state, *new_state;
+
+ /**
+ * @state_to_destroy:
+ *
+ * Used to track the @drm_private_state we will need to free
+ * when tearing down the associated &drm_atomic_state in
+ * $drm_mode_config_funcs.atomic_state_clear or
+ * drm_atomic_state_default_clear().
+ *
+ * Before a commit, and the call to
+ * drm_atomic_helper_swap_state() in particular, it points to
+ * the same state than @new_state. After a commit, it points to
+ * the same state than @old_state.
+ */
+ struct drm_private_state *state_to_destroy;
+
+ struct drm_private_state *old_state, *new_state;
};
/**
- * struct drm_atomic_state - the global state object for atomic updates
- * @ref: count of all references to this state (will not be freed until zero)
- * @dev: parent DRM device
- * @async_update: hint for asynchronous plane update
- * @planes: pointer to array of structures with per-plane data
- * @crtcs: pointer to array of CRTC pointers
- * @num_connector: size of the @connectors and @connector_states arrays
- * @connectors: pointer to array of structures with per-connector data
- * @num_private_objs: size of the @private_objs array
- * @private_objs: pointer to array of private object pointers
- * @acquire_ctx: acquire context for this atomic modeset state update
+ * struct drm_atomic_state - Atomic commit structure
+ *
+ * This structure is the kernel counterpart of @drm_mode_atomic and represents
+ * an atomic commit that transitions from an old to a new display state. It
+ * contains all the objects affected by the atomic commit and both the new
+ * state structures and pointers to the old state structures for
+ * these.
*
* States are added to an atomic update by calling drm_atomic_get_crtc_state(),
* drm_atomic_get_plane_state(), drm_atomic_get_connector_state(), or for
* private state structures, drm_atomic_get_private_obj_state().
+ *
+ * NOTE: struct drm_atomic_state first started as a single collection of
+ * entities state pointers (drm_plane_state, drm_crtc_state, etc.).
+ *
+ * At atomic_check time, you could get the state about to be committed
+ * from drm_atomic_state, and the one currently running from the
+ * entities state pointer (drm_crtc.state, for example). After the call
+ * to drm_atomic_helper_swap_state(), the entities state pointer would
+ * contain the state previously checked, and the drm_atomic_state
+ * structure the old state.
+ *
+ * Over time, and in order to avoid confusion, drm_atomic_state has
+ * grown to have both the old state (ie, the state we replace) and the
+ * new state (ie, the state we want to apply). Those names are stable
+ * during the commit process, which makes it easier to reason about.
+ *
+ * You can still find some traces of that evolution through some hooks
+ * or callbacks taking a drm_atomic_state parameter called names like
+ * "old_state". This doesn't necessarily mean that the previous
+ * drm_atomic_state is passed, but rather that this used to be the state
+ * collection we were replacing after drm_atomic_helper_swap_state(),
+ * but the variable name was never updated.
+ *
+ * Some atomic operations implementations followed a similar process. We
+ * first started to pass the entity state only. However, it was pretty
+ * cumbersome for drivers, and especially CRTCs, to retrieve the states
+ * of other components. Thus, we switched to passing the whole
+ * drm_atomic_state as a parameter to those operations. Similarly, the
+ * transition isn't complete yet, and one might still find atomic
+ * operations taking a drm_atomic_state pointer, or a component state
+ * pointer. The former is the preferred form.
*/
struct drm_atomic_state {
+ /**
+ * @ref:
+ *
+ * Count of all references to this update (will not be freed until zero).
+ */
struct kref ref;
+ /**
+ * @dev: Parent DRM Device.
+ */
struct drm_device *dev;
/**
@@ -353,8 +478,27 @@ struct drm_atomic_state {
*
* Allow full modeset. This is used by the ATOMIC IOCTL handler to
* implement the DRM_MODE_ATOMIC_ALLOW_MODESET flag. Drivers should
- * never consult this flag, instead looking at the output of
- * drm_atomic_crtc_needs_modeset().
+ * generally not consult this flag, but instead look at the output of
+ * drm_atomic_crtc_needs_modeset(). The detailed rules are:
+ *
+ * - Drivers must not consult @allow_modeset in the atomic commit path.
+ * Use drm_atomic_crtc_needs_modeset() instead.
+ *
+ * - Drivers must consult @allow_modeset before adding unrelated struct
+ * drm_crtc_state to this commit by calling
+ * drm_atomic_get_crtc_state(). See also the warning in the
+ * documentation for that function.
+ *
+ * - Drivers must never change this flag, it is under the exclusive
+ * control of userspace.
+ *
+ * - Drivers may consult @allow_modeset in the atomic check path, if
+ * they have the choice between an optimal hardware configuration
+ * which requires a modeset, and a less optimal configuration which
+ * can be committed without a modeset. An example would be suboptimal
+ * scanout FIFO allocation resulting in increased idle power
+ * consumption. This allows userspace to avoid flickering and delays
+ * for the normal composition loop at reasonable cost.
*/
bool allow_modeset : 1;
/**
@@ -369,7 +513,12 @@ struct drm_atomic_state {
* flag are not allowed.
*/
bool legacy_cursor_update : 1;
+
+ /**
+ * @async_update: hint for asynchronous plane update
+ */
bool async_update : 1;
+
/**
* @duplicated:
*
@@ -379,13 +528,86 @@ struct drm_atomic_state {
* states.
*/
bool duplicated : 1;
+
+ /**
+ * @checked:
+ *
+ * Indicates the state has been checked and thus must no longer
+ * be mutated. For internal use only, do not consult from drivers.
+ */
+ bool checked : 1;
+
+ /**
+ * @plane_color_pipeline:
+ *
+ * Indicates whether this atomic state originated with a client that
+ * set the DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE.
+ *
+ * Drivers and helper functions should use this to ignore legacy
+ * properties that are incompatible with the drm_plane COLOR_PIPELINE
+ * behavior, such as:
+ *
+ * - COLOR_RANGE
+ * - COLOR_ENCODING
+ *
+ * or any other driver-specific properties that might affect pixel
+ * values.
+ */
+ bool plane_color_pipeline : 1;
+
+ /**
+ * @colorops:
+ *
+ * Pointer to array of @drm_colorop and @drm_colorop_state part of this
+ * update.
+ */
+ struct __drm_colorops_state *colorops;
+
+ /**
+ * @planes:
+ *
+ * Pointer to array of @drm_plane and @drm_plane_state part of this
+ * update.
+ */
struct __drm_planes_state *planes;
+
+ /**
+ * @crtcs:
+ *
+ * Pointer to array of @drm_crtc and @drm_crtc_state part of this
+ * update.
+ */
struct __drm_crtcs_state *crtcs;
+
+ /**
+ * @num_connector: size of the @connectors array
+ */
int num_connector;
+
+ /**
+ * @connectors:
+ *
+ * Pointer to array of @drm_connector and @drm_connector_state part of
+ * this update.
+ */
struct __drm_connnectors_state *connectors;
+
+ /**
+ * @num_private_objs: size of the @private_objs array
+ */
int num_private_objs;
+
+ /**
+ * @private_objs:
+ *
+ * Pointer to array of @drm_private_obj and @drm_private_obj_state part
+ * of this update.
+ */
struct __drm_private_objs_state *private_objs;
+ /**
+ * @acquire_ctx: acquire context for this atomic modeset state update
+ */
struct drm_modeset_acquire_ctx *acquire_ctx;
/**
@@ -393,7 +615,7 @@ struct drm_atomic_state {
*
* Used for signaling unbound planes/connectors.
* When a connector or plane is not bound to any CRTC, it's still important
- * to preserve linearity to prevent the atomic states from being freed to early.
+ * to preserve linearity to prevent the atomic states from being freed too early.
*
* This commit (if set) is not bound to any CRTC, but will be completed when
* drm_atomic_helper_commit_hw_done() is called.
@@ -482,6 +704,9 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
struct drm_plane_state * __must_check
drm_atomic_get_plane_state(struct drm_atomic_state *state,
struct drm_plane *plane);
+struct drm_colorop_state *
+drm_atomic_get_colorop_state(struct drm_atomic_state *state,
+ struct drm_colorop *colorop);
struct drm_connector_state * __must_check
drm_atomic_get_connector_state(struct drm_atomic_state *state,
struct drm_connector *connector);
@@ -496,36 +721,28 @@ struct drm_private_state * __must_check
drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
struct drm_private_obj *obj);
struct drm_private_state *
-drm_atomic_get_old_private_obj_state(struct drm_atomic_state *state,
+drm_atomic_get_old_private_obj_state(const struct drm_atomic_state *state,
struct drm_private_obj *obj);
struct drm_private_state *
-drm_atomic_get_new_private_obj_state(struct drm_atomic_state *state,
+drm_atomic_get_new_private_obj_state(const struct drm_atomic_state *state,
struct drm_private_obj *obj);
struct drm_connector *
-drm_atomic_get_old_connector_for_encoder(struct drm_atomic_state *state,
+drm_atomic_get_old_connector_for_encoder(const struct drm_atomic_state *state,
struct drm_encoder *encoder);
struct drm_connector *
-drm_atomic_get_new_connector_for_encoder(struct drm_atomic_state *state,
+drm_atomic_get_new_connector_for_encoder(const struct drm_atomic_state *state,
struct drm_encoder *encoder);
+struct drm_connector *
+drm_atomic_get_connector_for_encoder(const struct drm_encoder *encoder,
+ struct drm_modeset_acquire_ctx *ctx);
-/**
- * drm_atomic_get_existing_crtc_state - get CRTC state, if it exists
- * @state: global atomic state object
- * @crtc: CRTC to grab
- *
- * This function returns the CRTC state for the given CRTC, or NULL
- * if the CRTC is not part of the global atomic state.
- *
- * This function is deprecated, @drm_atomic_get_old_crtc_state or
- * @drm_atomic_get_new_crtc_state should be used instead.
- */
-static inline struct drm_crtc_state *
-drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state,
- struct drm_crtc *crtc)
-{
- return state->crtcs[drm_crtc_index(crtc)].state;
-}
+struct drm_crtc *
+drm_atomic_get_old_crtc_for_encoder(struct drm_atomic_state *state,
+ struct drm_encoder *encoder);
+struct drm_crtc *
+drm_atomic_get_new_crtc_for_encoder(struct drm_atomic_state *state,
+ struct drm_encoder *encoder);
/**
* drm_atomic_get_old_crtc_state - get old CRTC state, if it exists
@@ -536,7 +753,7 @@ drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state,
* NULL if the CRTC is not part of the global atomic state.
*/
static inline struct drm_crtc_state *
-drm_atomic_get_old_crtc_state(struct drm_atomic_state *state,
+drm_atomic_get_old_crtc_state(const struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
return state->crtcs[drm_crtc_index(crtc)].old_state;
@@ -550,31 +767,13 @@ drm_atomic_get_old_crtc_state(struct drm_atomic_state *state,
* NULL if the CRTC is not part of the global atomic state.
*/
static inline struct drm_crtc_state *
-drm_atomic_get_new_crtc_state(struct drm_atomic_state *state,
+drm_atomic_get_new_crtc_state(const struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
return state->crtcs[drm_crtc_index(crtc)].new_state;
}
/**
- * drm_atomic_get_existing_plane_state - get plane state, if it exists
- * @state: global atomic state object
- * @plane: plane to grab
- *
- * This function returns the plane state for the given plane, or NULL
- * if the plane is not part of the global atomic state.
- *
- * This function is deprecated, @drm_atomic_get_old_plane_state or
- * @drm_atomic_get_new_plane_state should be used instead.
- */
-static inline struct drm_plane_state *
-drm_atomic_get_existing_plane_state(struct drm_atomic_state *state,
- struct drm_plane *plane)
-{
- return state->planes[drm_plane_index(plane)].state;
-}
-
-/**
* drm_atomic_get_old_plane_state - get plane state, if it exists
* @state: global atomic state object
* @plane: plane to grab
@@ -583,7 +782,7 @@ drm_atomic_get_existing_plane_state(struct drm_atomic_state *state,
* NULL if the plane is not part of the global atomic state.
*/
static inline struct drm_plane_state *
-drm_atomic_get_old_plane_state(struct drm_atomic_state *state,
+drm_atomic_get_old_plane_state(const struct drm_atomic_state *state,
struct drm_plane *plane)
{
return state->planes[drm_plane_index(plane)].old_state;
@@ -598,33 +797,40 @@ drm_atomic_get_old_plane_state(struct drm_atomic_state *state,
* NULL if the plane is not part of the global atomic state.
*/
static inline struct drm_plane_state *
-drm_atomic_get_new_plane_state(struct drm_atomic_state *state,
+drm_atomic_get_new_plane_state(const struct drm_atomic_state *state,
struct drm_plane *plane)
{
return state->planes[drm_plane_index(plane)].new_state;
}
/**
- * drm_atomic_get_existing_connector_state - get connector state, if it exists
+ * drm_atomic_get_old_colorop_state - get colorop state, if it exists
* @state: global atomic state object
- * @connector: connector to grab
- *
- * This function returns the connector state for the given connector,
- * or NULL if the connector is not part of the global atomic state.
+ * @colorop: colorop to grab
*
- * This function is deprecated, @drm_atomic_get_old_connector_state or
- * @drm_atomic_get_new_connector_state should be used instead.
+ * This function returns the old colorop state for the given colorop, or
+ * NULL if the colorop is not part of the global atomic state.
*/
-static inline struct drm_connector_state *
-drm_atomic_get_existing_connector_state(struct drm_atomic_state *state,
- struct drm_connector *connector)
+static inline struct drm_colorop_state *
+drm_atomic_get_old_colorop_state(struct drm_atomic_state *state,
+ struct drm_colorop *colorop)
{
- int index = drm_connector_index(connector);
-
- if (index >= state->num_connector)
- return NULL;
+ return state->colorops[drm_colorop_index(colorop)].old_state;
+}
- return state->connectors[index].state;
+/**
+ * drm_atomic_get_new_colorop_state - get colorop state, if it exists
+ * @state: global atomic state object
+ * @colorop: colorop to grab
+ *
+ * This function returns the new colorop state for the given colorop, or
+ * NULL if the colorop is not part of the global atomic state.
+ */
+static inline struct drm_colorop_state *
+drm_atomic_get_new_colorop_state(struct drm_atomic_state *state,
+ struct drm_colorop *colorop)
+{
+ return state->colorops[drm_colorop_index(colorop)].new_state;
}
/**
@@ -636,7 +842,7 @@ drm_atomic_get_existing_connector_state(struct drm_atomic_state *state,
* or NULL if the connector is not part of the global atomic state.
*/
static inline struct drm_connector_state *
-drm_atomic_get_old_connector_state(struct drm_atomic_state *state,
+drm_atomic_get_old_connector_state(const struct drm_atomic_state *state,
struct drm_connector *connector)
{
int index = drm_connector_index(connector);
@@ -656,7 +862,7 @@ drm_atomic_get_old_connector_state(struct drm_atomic_state *state,
* or NULL if the connector is not part of the global atomic state.
*/
static inline struct drm_connector_state *
-drm_atomic_get_new_connector_state(struct drm_atomic_state *state,
+drm_atomic_get_new_connector_state(const struct drm_atomic_state *state,
struct drm_connector *connector)
{
int index = drm_connector_index(connector);
@@ -672,11 +878,11 @@ drm_atomic_get_new_connector_state(struct drm_atomic_state *state,
* @state: global atomic state object
* @plane: plane to grab
*
- * This function returns the plane state for the given plane, either from
- * @state, or if the plane isn't part of the atomic state update, from @plane.
- * This is useful in atomic check callbacks, when drivers need to peek at, but
- * not change, state of other planes, since it avoids threading an error code
- * back up the call chain.
+ * This function returns the plane state for the given plane, either the
+ * new plane state from @state, or if the plane isn't part of the atomic
+ * state update, from @plane. This is useful in atomic check callbacks,
+ * when drivers need to peek at, but not change, state of other planes,
+ * since it avoids threading an error code back up the call chain.
*
* WARNING:
*
@@ -694,12 +900,18 @@ drm_atomic_get_new_connector_state(struct drm_atomic_state *state,
* Read-only pointer to the current plane state.
*/
static inline const struct drm_plane_state *
-__drm_atomic_get_current_plane_state(struct drm_atomic_state *state,
+__drm_atomic_get_current_plane_state(const struct drm_atomic_state *state,
struct drm_plane *plane)
{
- if (state->planes[drm_plane_index(plane)].state)
- return state->planes[drm_plane_index(plane)].state;
+ struct drm_plane_state *plane_state;
+
+ plane_state = drm_atomic_get_new_plane_state(state, plane);
+ if (plane_state)
+ return plane_state;
+ /*
+ * If the plane isn't part of the state, fallback to the currently active one.
+ */
return plane->state;
}
@@ -712,6 +924,9 @@ drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
int __must_check
drm_atomic_add_affected_planes(struct drm_atomic_state *state,
struct drm_crtc *crtc);
+int __must_check
+drm_atomic_add_affected_colorops(struct drm_atomic_state *state,
+ struct drm_plane *plane);
int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
int __must_check drm_atomic_commit(struct drm_atomic_state *state);
@@ -852,6 +1067,49 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
(void)(new_crtc_state) /* Only to avoid unused-but-set-variable warning */, 1))
/**
+ * for_each_oldnew_colorop_in_state - iterate over all colorops in an atomic update
+ * @__state: &struct drm_atomic_state pointer
+ * @colorop: &struct drm_colorop iteration cursor
+ * @old_colorop_state: &struct drm_colorop_state iteration cursor for the old state
+ * @new_colorop_state: &struct drm_colorop_state iteration cursor for the new state
+ * @__i: int iteration cursor, for macro-internal use
+ *
+ * This iterates over all colorops in an atomic update, tracking both old and
+ * new state. This is useful in places where the state delta needs to be
+ * considered, for example in atomic check functions.
+ */
+#define for_each_oldnew_colorop_in_state(__state, colorop, old_colorop_state, \
+ new_colorop_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->dev->mode_config.num_colorop; \
+ (__i)++) \
+ for_each_if ((__state)->colorops[__i].ptr && \
+ ((colorop) = (__state)->colorops[__i].ptr, \
+ (void)(colorop) /* Only to avoid unused-but-set-variable warning */, \
+ (old_colorop_state) = (__state)->colorops[__i].old_state,\
+ (new_colorop_state) = (__state)->colorops[__i].new_state, 1))
+
+/**
+ * for_each_new_colorop_in_state - iterate over all colorops in an atomic update
+ * @__state: &struct drm_atomic_state pointer
+ * @colorop: &struct drm_colorop iteration cursor
+ * @new_colorop_state: &struct drm_colorop_state iteration cursor for the new state
+ * @__i: int iteration cursor, for macro-internal use
+ *
+ * This iterates over all colorops in an atomic update, tracking new state. This is
+ * useful in places where the state delta needs to be considered, for example in
+ * atomic check functions.
+ */
+#define for_each_new_colorop_in_state(__state, colorop, new_colorop_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->dev->mode_config.num_colorop; \
+ (__i)++) \
+ for_each_if ((__state)->colorops[__i].ptr && \
+ ((colorop) = (__state)->colorops[__i].ptr, \
+ (void)(colorop) /* Only to avoid unused-but-set-variable warning */, \
+ (new_colorop_state) = (__state)->colorops[__i].new_state, 1))
+
+/**
* for_each_oldnew_plane_in_state - iterate over all planes in an atomic update
* @__state: &struct drm_atomic_state pointer
* @plane: &struct drm_plane iteration cursor
@@ -896,6 +1154,22 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
(new_plane_state) = (__state)->planes[__i].new_state, 1))
/**
+ * for_each_new_plane_in_state_reverse - other than only tracking new state,
+ * it's the same as for_each_oldnew_plane_in_state_reverse
+ * @__state: &struct drm_atomic_state pointer
+ * @plane: &struct drm_plane iteration cursor
+ * @new_plane_state: &struct drm_plane_state iteration cursor for the new state
+ * @__i: int iteration cursor, for macro-internal use
+ */
+#define for_each_new_plane_in_state_reverse(__state, plane, new_plane_state, __i) \
+ for ((__i) = ((__state)->dev->mode_config.num_total_plane - 1); \
+ (__i) >= 0; \
+ (__i)--) \
+ for_each_if ((__state)->planes[__i].ptr && \
+ ((plane) = (__state)->planes[__i].ptr, \
+ (new_plane_state) = (__state)->planes[__i].new_state, 1))
+
+/**
* for_each_old_plane_in_state - iterate over all planes in an atomic update
* @__state: &struct drm_atomic_state pointer
* @plane: &struct drm_plane iteration cursor
@@ -987,6 +1261,7 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
for ((__i) = 0; \
(__i) < (__state)->num_private_objs && \
((obj) = (__state)->private_objs[__i].ptr, \
+ (void)(obj) /* Only to avoid unused-but-set-variable warning */, \
(new_obj_state) = (__state)->private_objs[__i].new_state, 1); \
(__i)++)
@@ -1083,7 +1358,7 @@ struct drm_bridge_state {
struct drm_bus_cfg input_bus_cfg;
/**
- * @output_bus_cfg: input bus configuration
+ * @output_bus_cfg: output bus configuration
*/
struct drm_bus_cfg output_bus_cfg;
};
@@ -1098,10 +1373,10 @@ struct drm_bridge_state *
drm_atomic_get_bridge_state(struct drm_atomic_state *state,
struct drm_bridge *bridge);
struct drm_bridge_state *
-drm_atomic_get_old_bridge_state(struct drm_atomic_state *state,
+drm_atomic_get_old_bridge_state(const struct drm_atomic_state *state,
struct drm_bridge *bridge);
struct drm_bridge_state *
-drm_atomic_get_new_bridge_state(struct drm_atomic_state *state,
+drm_atomic_get_new_bridge_state(const struct drm_atomic_state *state,
struct drm_bridge *bridge);
#endif /* DRM_ATOMIC_H_ */
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 4045e2507e11..53382fe93537 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -34,12 +34,23 @@
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_util.h>
+/*
+ * Drivers that don't allow primary plane scaling may pass this macro in place
+ * of the min/max scale parameters of the plane-state checker function.
+ *
+ * Due to src being in 16.16 fixed point and dest being in integer pixels,
+ * 1<<16 represents no scaling.
+ */
+#define DRM_PLANE_NO_SCALING (1<<16)
+
struct drm_atomic_state;
struct drm_private_obj;
struct drm_private_state;
int drm_atomic_helper_check_modeset(struct drm_device *dev,
struct drm_atomic_state *state);
+int drm_atomic_helper_check_wb_connector_state(struct drm_connector *connector,
+ struct drm_atomic_state *state);
int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
const struct drm_crtc_state *crtc_state,
int min_scale,
@@ -48,6 +59,7 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
bool can_update_disabled);
int drm_atomic_helper_check_planes(struct drm_device *dev,
struct drm_atomic_state *state);
+int drm_atomic_helper_check_crtc_primary_plane(struct drm_crtc_state *crtc_state);
int drm_atomic_helper_check(struct drm_device *dev,
struct drm_atomic_state *state);
void drm_atomic_helper_commit_tail(struct drm_atomic_state *state);
@@ -84,6 +96,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
int drm_atomic_helper_prepare_planes(struct drm_device *dev,
struct drm_atomic_state *state);
+void drm_atomic_helper_unprepare_planes(struct drm_device *dev,
+ struct drm_atomic_state *state);
#define DRM_PLANE_COMMIT_ACTIVE_ONLY BIT(0)
#define DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET BIT(1)
@@ -125,6 +139,8 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set,
int drm_atomic_helper_disable_all(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
+int drm_atomic_helper_reset_crtc(struct drm_crtc *crtc,
+ struct drm_modeset_acquire_ctx *ctx);
void drm_atomic_helper_shutdown(struct drm_device *dev);
struct drm_atomic_state *
drm_atomic_helper_duplicate_state(struct drm_device *dev,
@@ -197,6 +213,32 @@ int drm_atomic_helper_page_flip_target(
plane)))
/**
+ * drm_atomic_plane_enabling - check whether a plane is being enabled
+ * @old_plane_state: old atomic plane state
+ * @new_plane_state: new atomic plane state
+ *
+ * Checks the atomic state of a plane to determine whether it's being enabled
+ * or not. This also WARNs if it detects an invalid state (both CRTC and FB
+ * need to either both be NULL or both be non-NULL).
+ *
+ * RETURNS:
+ * True if the plane is being enabled, false otherwise.
+ */
+static inline bool drm_atomic_plane_enabling(struct drm_plane_state *old_plane_state,
+ struct drm_plane_state *new_plane_state)
+{
+ /*
+ * When enabling a plane, CRTC and FB should always be set together.
+ * Anything else should be considered a bug in the atomic core, so we
+ * gently warn about it.
+ */
+ WARN_ON((!new_plane_state->crtc && new_plane_state->fb) ||
+ (new_plane_state->crtc && !new_plane_state->fb));
+
+ return !old_plane_state->crtc && new_plane_state->crtc;
+}
+
+/**
* drm_atomic_plane_disabling - check whether a plane is being disabled
* @old_plane_state: old atomic plane state
* @new_plane_state: new atomic plane state
diff --git a/include/drm/drm_atomic_state_helper.h b/include/drm/drm_atomic_state_helper.h
index 3f8f1d627f7c..b9740edb2658 100644
--- a/include/drm/drm_atomic_state_helper.h
+++ b/include/drm/drm_atomic_state_helper.h
@@ -26,6 +26,7 @@
#include <linux/types.h>
+struct drm_atomic_state;
struct drm_bridge;
struct drm_bridge_state;
struct drm_crtc;
@@ -71,6 +72,9 @@ void __drm_atomic_helper_connector_reset(struct drm_connector *connector,
struct drm_connector_state *conn_state);
void drm_atomic_helper_connector_reset(struct drm_connector *connector);
void drm_atomic_helper_connector_tv_reset(struct drm_connector *connector);
+int drm_atomic_helper_connector_tv_check(struct drm_connector *connector,
+ struct drm_atomic_state *state);
+void drm_atomic_helper_connector_tv_margins_reset(struct drm_connector *connector);
void
__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
struct drm_connector_state *state);
diff --git a/include/drm/drm_atomic_uapi.h b/include/drm/drm_atomic_uapi.h
index 8cec52ad1277..436315523326 100644
--- a/include/drm/drm_atomic_uapi.h
+++ b/include/drm/drm_atomic_uapi.h
@@ -37,6 +37,7 @@ struct drm_crtc;
struct drm_connector_state;
struct dma_fence;
struct drm_framebuffer;
+struct drm_colorop;
int __must_check
drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
@@ -49,8 +50,8 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
struct drm_crtc *crtc);
void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
struct drm_framebuffer *fb);
-void drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
- struct dma_fence *fence);
+void drm_atomic_set_colorop_for_plane(struct drm_plane_state *plane_state,
+ struct drm_colorop *colorop);
int __must_check
drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
struct drm_crtc *crtc);
diff --git a/include/drm/drm_audio_component.h b/include/drm/drm_audio_component.h
index 0d36bfd1a4cd..5a4cd1fa8e2a 100644
--- a/include/drm/drm_audio_component.h
+++ b/include/drm/drm_audio_component.h
@@ -4,6 +4,9 @@
#ifndef _DRM_AUDIO_COMPONENT_H_
#define _DRM_AUDIO_COMPONENT_H_
+#include <linux/completion.h>
+#include <linux/types.h>
+
struct drm_audio_component;
struct device;
diff --git a/include/drm/drm_auth.h b/include/drm/drm_auth.h
index 6bf8b2b78991..50131383ed81 100644
--- a/include/drm/drm_auth.h
+++ b/include/drm/drm_auth.h
@@ -33,24 +33,6 @@
#include <linux/wait.h>
struct drm_file;
-struct drm_hw_lock;
-
-/*
- * Legacy DRI1 locking data structure. Only here instead of in drm_legacy.h for
- * include ordering reasons.
- *
- * DO NOT USE.
- */
-struct drm_lock_data {
- struct drm_hw_lock *hw_lock;
- struct drm_file *file_priv;
- wait_queue_head_t lock_queue;
- unsigned long lock_time;
- spinlock_t spinlock;
- uint32_t kernel_waiters;
- uint32_t user_waiters;
- int idle_has_lock;
-};
/**
* struct drm_master - drm master structure
@@ -58,12 +40,6 @@ struct drm_lock_data {
* @refcount: Refcount for this master object.
* @dev: Link back to the DRM device
* @driver_priv: Pointer to driver-private information.
- * @lessor: Lease holder
- * @lessee_id: id for lessees. Owners always have id 0
- * @lessee_list: other lessees of the same master
- * @lessees: drm_masters leasing from this one
- * @leases: Objects leased to this drm_master.
- * @lessee_idr: All lessees under this owner (only used where lessor == NULL)
*
* Note that master structures are only relevant for the legacy/primary device
* nodes, hence there can only be one per device, not one per drm_minor.
@@ -88,25 +64,73 @@ struct drm_master {
struct idr magic_map;
void *driver_priv;
- /* Tree of display resource leases, each of which is a drm_master struct
- * All of these get activated simultaneously, so drm_device master points
- * at the top of the tree (for which lessor is NULL). Protected by
- * &drm_device.mode_config.idr_mutex.
+ /**
+ * @lessor:
+ *
+ * Lease grantor, only set if this &struct drm_master represents a
+ * lessee holding a lease of objects from @lessor. Full owners of the
+ * device have this set to NULL.
+ *
+ * The lessor does not change once it's set in drm_lease_create(), and
+ * each lessee holds a reference to its lessor that it releases upon
+ * being destroyed in drm_lease_destroy().
+ *
+ * See also the :ref:`section on display resource leasing
+ * <drm_leasing>`.
*/
-
struct drm_master *lessor;
+
+ /**
+ * @lessee_id:
+ *
+ * ID for lessees. Owners (i.e. @lessor is NULL) always have ID 0.
+ * Protected by &drm_device.mode_config's &drm_mode_config.idr_mutex.
+ */
int lessee_id;
+
+ /**
+ * @lessee_list:
+ *
+ * List entry of lessees of @lessor, where they are linked to @lessees.
+ * Not used for owners. Protected by &drm_device.mode_config's
+ * &drm_mode_config.idr_mutex.
+ */
struct list_head lessee_list;
+
+ /**
+ * @lessees:
+ *
+ * List of drm_masters leasing from this one. Protected by
+ * &drm_device.mode_config's &drm_mode_config.idr_mutex.
+ *
+ * This list is empty if no leases have been granted, or if all lessees
+ * have been destroyed. Since lessors are referenced by all their
+ * lessees, this master cannot be destroyed unless the list is empty.
+ */
struct list_head lessees;
+
+ /**
+ * @leases:
+ *
+ * Objects leased to this drm_master. Protected by
+ * &drm_device.mode_config's &drm_mode_config.idr_mutex.
+ *
+ * Objects are leased all together in drm_lease_create(), and are
+ * removed all together when the lease is revoked.
+ */
struct idr leases;
+
+ /**
+ * @lessee_idr:
+ *
+ * All lessees under this owner (only used where @lessor is NULL).
+ * Protected by &drm_device.mode_config's &drm_mode_config.idr_mutex.
+ */
struct idr lessee_idr;
- /* private: */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
- struct drm_lock_data lock;
-#endif
};
struct drm_master *drm_master_get(struct drm_master *master);
+struct drm_master *drm_file_get_master(struct drm_file *file_priv);
void drm_master_put(struct drm_master **master);
bool drm_is_current_master(struct drm_file *fpriv);
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 2195daa289d2..0ff7ab4aa868 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -23,6 +23,7 @@
#ifndef __DRM_BRIDGE_H__
#define __DRM_BRIDGE_H__
+#include <linux/cleanup.h>
#include <linux/ctype.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -32,12 +33,18 @@
#include <drm/drm_mode_object.h>
#include <drm/drm_modes.h>
+struct cec_msg;
+struct device_node;
+
struct drm_bridge;
struct drm_bridge_timings;
struct drm_connector;
struct drm_display_info;
+struct drm_minor;
struct drm_panel;
struct edid;
+struct hdmi_codec_daifmt;
+struct hdmi_codec_params;
struct i2c_adapter;
/**
@@ -68,10 +75,20 @@ struct drm_bridge_funcs {
*
* Zero on success, error code on failure.
*/
- int (*attach)(struct drm_bridge *bridge,
+ int (*attach)(struct drm_bridge *bridge, struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags);
/**
+ * @destroy:
+ *
+ * This callback is invoked when the bridge is about to be
+ * deallocated.
+ *
+ * The @destroy callback is optional.
+ */
+ void (*destroy)(struct drm_bridge *bridge);
+
+ /**
* @detach:
*
* This callback is invoked whenever our bridge is being detached from a
@@ -104,7 +121,7 @@ struct drm_bridge_funcs {
* Since this function is both called from the check phase of an atomic
* commit, and the mode validation in the probe paths it is not allowed
* to look at anything else but the passed-in mode, and validate it
- * against configuration-invariant hardward constraints. Any further
+ * against configuration-invariant hardware constraints. Any further
* limits which depend upon the configuration can only be checked in
* @mode_fixup.
*
@@ -159,37 +176,80 @@ struct drm_bridge_funcs {
/**
* @disable:
*
- * This callback should disable the bridge. It is called right before
- * the preceding element in the display pipe is disabled. If the
- * preceding element is a bridge this means it's called before that
- * bridge's @disable vfunc. If the preceding element is a &drm_encoder
- * it's called right before the &drm_encoder_helper_funcs.disable,
- * &drm_encoder_helper_funcs.prepare or &drm_encoder_helper_funcs.dpms
- * hook.
+ * The @disable callback should disable the bridge.
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is still running when this callback is called.
*
+ *
+ * If the preceding element is a &drm_bridge, then this is called before
+ * that bridge is disabled via one of:
+ *
+ * - &drm_bridge_funcs.disable
+ * - &drm_bridge_funcs.atomic_disable
+ *
+ * If the preceding element of the bridge is a display controller, then
+ * this callback is called before the encoder is disabled via one of:
+ *
+ * - &drm_encoder_helper_funcs.atomic_disable
+ * - &drm_encoder_helper_funcs.prepare
+ * - &drm_encoder_helper_funcs.disable
+ * - &drm_encoder_helper_funcs.dpms
+ *
+ * and the CRTC is disabled via one of:
+ *
+ * - &drm_crtc_helper_funcs.prepare
+ * - &drm_crtc_helper_funcs.atomic_disable
+ * - &drm_crtc_helper_funcs.disable
+ * - &drm_crtc_helper_funcs.dpms.
+ *
* The @disable callback is optional.
+ *
+ * NOTE:
+ *
+ * This is deprecated, do not use!
+ * New drivers shall use &drm_bridge_funcs.atomic_disable.
*/
void (*disable)(struct drm_bridge *bridge);
/**
* @post_disable:
*
- * This callback should disable the bridge. It is called right after the
- * preceding element in the display pipe is disabled. If the preceding
- * element is a bridge this means it's called after that bridge's
- * @post_disable function. If the preceding element is a &drm_encoder
- * it's called right after the encoder's
- * &drm_encoder_helper_funcs.disable, &drm_encoder_helper_funcs.prepare
- * or &drm_encoder_helper_funcs.dpms hook.
- *
* The bridge must assume that the display pipe (i.e. clocks and timing
- * singals) feeding it is no longer running when this callback is
- * called.
+ * signals) feeding this bridge is no longer running when the
+ * @post_disable is called.
+ *
+ * This callback should perform all the actions required by the hardware
+ * after it has stopped receiving signals from the preceding element.
+ *
+ * If the preceding element is a &drm_bridge, then this is called after
+ * that bridge is post-disabled (unless marked otherwise by the
+ * @pre_enable_prev_first flag) via one of:
+ *
+ * - &drm_bridge_funcs.post_disable
+ * - &drm_bridge_funcs.atomic_post_disable
+ *
+ * If the preceding element of the bridge is a display controller, then
+ * this callback is called after the encoder is disabled via one of:
+ *
+ * - &drm_encoder_helper_funcs.atomic_disable
+ * - &drm_encoder_helper_funcs.prepare
+ * - &drm_encoder_helper_funcs.disable
+ * - &drm_encoder_helper_funcs.dpms
+ *
+ * and the CRTC is disabled via one of:
+ *
+ * - &drm_crtc_helper_funcs.prepare
+ * - &drm_crtc_helper_funcs.atomic_disable
+ * - &drm_crtc_helper_funcs.disable
+ * - &drm_crtc_helper_funcs.dpms
*
* The @post_disable callback is optional.
+ *
+ * NOTE:
+ *
+ * This is deprecated, do not use!
+ * New drivers shall use &drm_bridge_funcs.atomic_post_disable.
*/
void (*post_disable)(struct drm_bridge *bridge);
@@ -215,9 +275,9 @@ struct drm_bridge_funcs {
*
* NOTE:
*
- * If a need arises to store and access modes adjusted for other
- * locations than the connection between the CRTC and the first bridge,
- * the DRM framework will have to be extended with DRM bridge states.
+ * This is deprecated, do not use!
+ * New drivers shall set their mode in the
+ * &drm_bridge_funcs.atomic_enable operation.
*/
void (*mode_set)(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
@@ -225,142 +285,212 @@ struct drm_bridge_funcs {
/**
* @pre_enable:
*
- * This callback should enable the bridge. It is called right before
- * the preceding element in the display pipe is enabled. If the
- * preceding element is a bridge this means it's called before that
- * bridge's @pre_enable function. If the preceding element is a
- * &drm_encoder it's called right before the encoder's
- * &drm_encoder_helper_funcs.enable, &drm_encoder_helper_funcs.commit or
- * &drm_encoder_helper_funcs.dpms hook.
- *
* The display pipe (i.e. clocks and timing signals) feeding this bridge
- * will not yet be running when this callback is called. The bridge must
- * not enable the display link feeding the next bridge in the chain (if
- * there is one) when this callback is called.
+ * will not yet be running when the @pre_enable is called.
+ *
+ * This callback should perform all the necessary actions to prepare the
+ * bridge to accept signals from the preceding element.
+ *
+ * If the preceding element is a &drm_bridge, then this is called before
+ * that bridge is pre-enabled (unless marked otherwise by
+ * @pre_enable_prev_first flag) via one of:
+ *
+ * - &drm_bridge_funcs.pre_enable
+ * - &drm_bridge_funcs.atomic_pre_enable
+ *
+ * If the preceding element of the bridge is a display controller, then
+ * this callback is called before the CRTC is enabled via one of:
+ *
+ * - &drm_crtc_helper_funcs.atomic_enable
+ * - &drm_crtc_helper_funcs.commit
+ *
+ * and the encoder is enabled via one of:
+ *
+ * - &drm_encoder_helper_funcs.atomic_enable
+ * - &drm_encoder_helper_funcs.enable
+ * - &drm_encoder_helper_funcs.commit
*
* The @pre_enable callback is optional.
+ *
+ * NOTE:
+ *
+ * This is deprecated, do not use!
+ * New drivers shall use &drm_bridge_funcs.atomic_pre_enable.
*/
void (*pre_enable)(struct drm_bridge *bridge);
/**
* @enable:
*
- * This callback should enable the bridge. It is called right after
- * the preceding element in the display pipe is enabled. If the
- * preceding element is a bridge this means it's called after that
- * bridge's @enable function. If the preceding element is a
- * &drm_encoder it's called right after the encoder's
- * &drm_encoder_helper_funcs.enable, &drm_encoder_helper_funcs.commit or
- * &drm_encoder_helper_funcs.dpms hook.
+ * The @enable callback should enable the bridge.
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is running when this callback is called. This
* callback must enable the display link feeding the next bridge in the
* chain if there is one.
*
+ * If the preceding element is a &drm_bridge, then this is called after
+ * that bridge is enabled via one of:
+ *
+ * - &drm_bridge_funcs.enable
+ * - &drm_bridge_funcs.atomic_enable
+ *
+ * If the preceding element of the bridge is a display controller, then
+ * this callback is called after the CRTC is enabled via one of:
+ *
+ * - &drm_crtc_helper_funcs.atomic_enable
+ * - &drm_crtc_helper_funcs.commit
+ *
+ * and the encoder is enabled via one of:
+ *
+ * - &drm_encoder_helper_funcs.atomic_enable
+ * - &drm_encoder_helper_funcs.enable
+ * - drm_encoder_helper_funcs.commit
+ *
* The @enable callback is optional.
+ *
+ * NOTE:
+ *
+ * This is deprecated, do not use!
+ * New drivers shall use &drm_bridge_funcs.atomic_enable.
*/
void (*enable)(struct drm_bridge *bridge);
/**
* @atomic_pre_enable:
*
- * This callback should enable the bridge. It is called right before
- * the preceding element in the display pipe is enabled. If the
- * preceding element is a bridge this means it's called before that
- * bridge's @atomic_pre_enable or @pre_enable function. If the preceding
- * element is a &drm_encoder it's called right before the encoder's
- * &drm_encoder_helper_funcs.atomic_enable hook.
- *
* The display pipe (i.e. clocks and timing signals) feeding this bridge
- * will not yet be running when this callback is called. The bridge must
- * not enable the display link feeding the next bridge in the chain (if
- * there is one) when this callback is called.
+ * will not yet be running when the @atomic_pre_enable is called.
+ *
+ * This callback should perform all the necessary actions to prepare the
+ * bridge to accept signals from the preceding element.
*
- * Note that this function will only be invoked in the context of an
- * atomic commit. It will not be invoked from
- * &drm_bridge_chain_pre_enable. It would be prudent to also provide an
- * implementation of @pre_enable if you are expecting driver calls into
- * &drm_bridge_chain_pre_enable.
+ * If the preceding element is a &drm_bridge, then this is called before
+ * that bridge is pre-enabled (unless marked otherwise by
+ * @pre_enable_prev_first flag) via one of:
+ *
+ * - &drm_bridge_funcs.pre_enable
+ * - &drm_bridge_funcs.atomic_pre_enable
+ *
+ * If the preceding element of the bridge is a display controller, then
+ * this callback is called before the CRTC is enabled via one of:
+ *
+ * - &drm_crtc_helper_funcs.atomic_enable
+ * - &drm_crtc_helper_funcs.commit
+ *
+ * and the encoder is enabled via one of:
+ *
+ * - &drm_encoder_helper_funcs.atomic_enable
+ * - &drm_encoder_helper_funcs.enable
+ * - &drm_encoder_helper_funcs.commit
*
* The @atomic_pre_enable callback is optional.
*/
void (*atomic_pre_enable)(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state);
+ struct drm_atomic_state *state);
/**
* @atomic_enable:
*
- * This callback should enable the bridge. It is called right after
- * the preceding element in the display pipe is enabled. If the
- * preceding element is a bridge this means it's called after that
- * bridge's @atomic_enable or @enable function. If the preceding element
- * is a &drm_encoder it's called right after the encoder's
- * &drm_encoder_helper_funcs.atomic_enable hook.
+ * The @atomic_enable callback should enable the bridge.
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is running when this callback is called. This
* callback must enable the display link feeding the next bridge in the
* chain if there is one.
*
- * Note that this function will only be invoked in the context of an
- * atomic commit. It will not be invoked from &drm_bridge_chain_enable.
- * It would be prudent to also provide an implementation of @enable if
- * you are expecting driver calls into &drm_bridge_chain_enable.
+ * If the preceding element is a &drm_bridge, then this is called after
+ * that bridge is enabled via one of:
+ *
+ * - &drm_bridge_funcs.enable
+ * - &drm_bridge_funcs.atomic_enable
+ *
+ * If the preceding element of the bridge is a display controller, then
+ * this callback is called after the CRTC is enabled via one of:
+ *
+ * - &drm_crtc_helper_funcs.atomic_enable
+ * - &drm_crtc_helper_funcs.commit
+ *
+ * and the encoder is enabled via one of:
+ *
+ * - &drm_encoder_helper_funcs.atomic_enable
+ * - &drm_encoder_helper_funcs.enable
+ * - drm_encoder_helper_funcs.commit
*
* The @atomic_enable callback is optional.
*/
void (*atomic_enable)(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state);
+ struct drm_atomic_state *state);
/**
* @atomic_disable:
*
- * This callback should disable the bridge. It is called right before
- * the preceding element in the display pipe is disabled. If the
- * preceding element is a bridge this means it's called before that
- * bridge's @atomic_disable or @disable vfunc. If the preceding element
- * is a &drm_encoder it's called right before the
- * &drm_encoder_helper_funcs.atomic_disable hook.
+ * The @atomic_disable callback should disable the bridge.
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is still running when this callback is called.
*
- * Note that this function will only be invoked in the context of an
- * atomic commit. It will not be invoked from
- * &drm_bridge_chain_disable. It would be prudent to also provide an
- * implementation of @disable if you are expecting driver calls into
- * &drm_bridge_chain_disable.
+ * If the preceding element is a &drm_bridge, then this is called before
+ * that bridge is disabled via one of:
+ *
+ * - &drm_bridge_funcs.disable
+ * - &drm_bridge_funcs.atomic_disable
+ *
+ * If the preceding element of the bridge is a display controller, then
+ * this callback is called before the encoder is disabled via one of:
+ *
+ * - &drm_encoder_helper_funcs.atomic_disable
+ * - &drm_encoder_helper_funcs.prepare
+ * - &drm_encoder_helper_funcs.disable
+ * - &drm_encoder_helper_funcs.dpms
+ *
+ * and the CRTC is disabled via one of:
+ *
+ * - &drm_crtc_helper_funcs.prepare
+ * - &drm_crtc_helper_funcs.atomic_disable
+ * - &drm_crtc_helper_funcs.disable
+ * - &drm_crtc_helper_funcs.dpms.
*
* The @atomic_disable callback is optional.
*/
void (*atomic_disable)(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state);
+ struct drm_atomic_state *state);
/**
* @atomic_post_disable:
*
- * This callback should disable the bridge. It is called right after the
- * preceding element in the display pipe is disabled. If the preceding
- * element is a bridge this means it's called after that bridge's
- * @atomic_post_disable or @post_disable function. If the preceding
- * element is a &drm_encoder it's called right after the encoder's
- * &drm_encoder_helper_funcs.atomic_disable hook.
- *
* The bridge must assume that the display pipe (i.e. clocks and timing
- * signals) feeding it is no longer running when this callback is
- * called.
+ * signals) feeding this bridge is no longer running when the
+ * @atomic_post_disable is called.
+ *
+ * This callback should perform all the actions required by the hardware
+ * after it has stopped receiving signals from the preceding element.
+ *
+ * If the preceding element is a &drm_bridge, then this is called after
+ * that bridge is post-disabled (unless marked otherwise by the
+ * @pre_enable_prev_first flag) via one of:
+ *
+ * - &drm_bridge_funcs.post_disable
+ * - &drm_bridge_funcs.atomic_post_disable
*
- * Note that this function will only be invoked in the context of an
- * atomic commit. It will not be invoked from
- * &drm_bridge_chain_post_disable.
- * It would be prudent to also provide an implementation of
- * @post_disable if you are expecting driver calls into
- * &drm_bridge_chain_post_disable.
+ * If the preceding element of the bridge is a display controller, then
+ * this callback is called after the encoder is disabled via one of:
+ *
+ * - &drm_encoder_helper_funcs.atomic_disable
+ * - &drm_encoder_helper_funcs.prepare
+ * - &drm_encoder_helper_funcs.disable
+ * - &drm_encoder_helper_funcs.dpms
+ *
+ * and the CRTC is disabled via one of:
+ *
+ * - &drm_crtc_helper_funcs.prepare
+ * - &drm_crtc_helper_funcs.atomic_disable
+ * - &drm_crtc_helper_funcs.disable
+ * - &drm_crtc_helper_funcs.dpms
*
* The @atomic_post_disable callback is optional.
*/
void (*atomic_post_disable)(struct drm_bridge *bridge,
- struct drm_bridge_state *old_bridge_state);
+ struct drm_atomic_state *state);
/**
* @atomic_duplicate_state:
@@ -427,11 +557,11 @@ struct drm_bridge_funcs {
*
* The returned array must be allocated with kmalloc() and will be
* freed by the caller. If the allocation fails, NULL should be
- * returned. num_output_fmts must be set to the returned array size.
+ * returned. num_input_fmts must be set to the returned array size.
* Formats listed in the returned array should be listed in decreasing
* preference order (the core will try all formats until it finds one
* that works). When the format is not supported NULL should be
- * returned and num_output_fmts should be set to 0.
+ * returned and num_input_fmts should be set to 0.
*
* This method is called on all elements of the bridge chain as part of
* the bus format negotiation process that happens in
@@ -531,7 +661,8 @@ struct drm_bridge_funcs {
*
* drm_connector_status indicating the bridge output status.
*/
- enum drm_connector_status (*detect)(struct drm_bridge *bridge);
+ enum drm_connector_status (*detect)(struct drm_bridge *bridge,
+ struct drm_connector *connector);
/**
* @get_modes:
@@ -542,7 +673,7 @@ struct drm_bridge_funcs {
* The @get_modes callback is mostly intended to support non-probeable
* displays such as many fixed panels. Bridges that support reading
* EDID shall leave @get_modes unimplemented and implement the
- * &drm_bridge_funcs->get_edid callback instead.
+ * &drm_bridge_funcs->edid_read callback instead.
*
* This callback is optional. Bridges that implement it shall set the
* DRM_BRIDGE_OP_MODES flag in their &drm_bridge->ops.
@@ -559,11 +690,11 @@ struct drm_bridge_funcs {
struct drm_connector *connector);
/**
- * @get_edid:
+ * @edid_read:
*
- * Read and parse the EDID data of the connected display.
+ * Read the EDID data of the connected display.
*
- * The @get_edid callback is the preferred way of reporting mode
+ * The @edid_read callback is the preferred way of reporting mode
* information for a display connected to the bridge output. Bridges
* that support reading EDID shall implement this callback and leave
* the @get_modes callback unimplemented.
@@ -576,17 +707,18 @@ struct drm_bridge_funcs {
* DRM_BRIDGE_OP_EDID flag in their &drm_bridge->ops.
*
* The connector parameter shall be used for the sole purpose of EDID
- * retrieval and parsing, and shall not be stored internally by bridge
- * drivers for future usage.
+ * retrieval, and shall not be stored internally by bridge drivers for
+ * future usage.
*
* RETURNS:
*
- * An edid structure newly allocated with kmalloc() (or similar) on
- * success, or NULL otherwise. The caller is responsible for freeing
- * the returned edid structure with kfree().
+ * An edid structure newly allocated with drm_edid_alloc() or returned
+ * from drm_edid_read() family of functions on success, or NULL
+ * otherwise. The caller is responsible for freeing the returned edid
+ * structure with drm_edid_free().
*/
- struct edid *(*get_edid)(struct drm_bridge *bridge,
- struct drm_connector *connector);
+ const struct drm_edid *(*edid_read)(struct drm_bridge *bridge,
+ struct drm_connector *connector);
/**
* @hpd_notify:
@@ -629,6 +761,235 @@ struct drm_bridge_funcs {
* the DRM_BRIDGE_OP_HPD flag in their &drm_bridge->ops.
*/
void (*hpd_disable)(struct drm_bridge *bridge);
+
+ /**
+ * @hdmi_tmds_char_rate_valid:
+ *
+ * Check whether a particular TMDS character rate is supported by the
+ * driver.
+ *
+ * This callback is optional and should only be implemented by the
+ * bridges that take part in the HDMI connector implementation. Bridges
+ * that implement it shall set the DRM_BRIDGE_OP_HDMI flag in their
+ * &drm_bridge->ops.
+ *
+ * Returns:
+ *
+ * Either &drm_mode_status.MODE_OK or one of the failure reasons
+ * in &enum drm_mode_status.
+ */
+ enum drm_mode_status
+ (*hdmi_tmds_char_rate_valid)(const struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ unsigned long long tmds_rate);
+
+ /**
+ * @hdmi_clear_infoframe:
+ *
+ * This callback clears the infoframes in the hardware during commit.
+ * It will be called multiple times, once for every disabled infoframe
+ * type.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the DRM_BRIDGE_OP_HDMI flag in their &drm_bridge->ops.
+ */
+ int (*hdmi_clear_infoframe)(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type);
+ /**
+ * @hdmi_write_infoframe:
+ *
+ * Program the infoframe into the hardware. It will be called multiple
+ * times, once for every updated infoframe type.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the DRM_BRIDGE_OP_HDMI flag in their &drm_bridge->ops.
+ */
+ int (*hdmi_write_infoframe)(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type,
+ const u8 *buffer, size_t len);
+
+ /**
+ * @hdmi_audio_startup:
+ *
+ * Called when ASoC starts an audio stream setup.
+ *
+ * This callback is optional, it can be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_HDMI_AUDIO flag in their &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*hdmi_audio_startup)(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+
+ /**
+ * @hdmi_audio_prepare:
+ * Configures HDMI-encoder for audio stream. Can be called multiple
+ * times for each setup.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_HDMI_AUDIO flag in their &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*hdmi_audio_prepare)(struct drm_bridge *bridge,
+ struct drm_connector *connector,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms);
+
+ /**
+ * @hdmi_audio_shutdown:
+ *
+ * Shut down the audio stream.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_HDMI_AUDIO flag in their &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ void (*hdmi_audio_shutdown)(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+
+ /**
+ * @hdmi_audio_mute_stream:
+ *
+ * Mute/unmute HDMI audio stream.
+ *
+ * This callback is optional, it can be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_HDMI_AUDIO flag in their &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*hdmi_audio_mute_stream)(struct drm_bridge *bridge,
+ struct drm_connector *connector,
+ bool enable, int direction);
+
+ /**
+ * @hdmi_cec_init:
+ *
+ * Initialize CEC part of the bridge.
+ *
+ * This callback is optional, it can be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_HDMI_CEC_ADAPTER flag in their
+ * &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*hdmi_cec_init)(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+
+ /**
+ * @hdmi_cec_enable:
+ *
+ * Enable or disable the CEC adapter inside the bridge.
+ *
+ * This callback is optional, it can be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_HDMI_CEC_ADAPTER flag in their
+ * &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*hdmi_cec_enable)(struct drm_bridge *bridge, bool enable);
+
+ /**
+ * @hdmi_cec_log_addr:
+ *
+ * Set the logical address of the CEC adapter inside the bridge.
+ *
+ * This callback is optional, it can be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_HDMI_CEC_ADAPTER flag in their
+ * &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*hdmi_cec_log_addr)(struct drm_bridge *bridge, u8 logical_addr);
+
+ /**
+ * @hdmi_cec_transmit:
+ *
+ * Transmit the message using the CEC adapter inside the bridge.
+ *
+ * This callback is optional, it can be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_HDMI_CEC_ADAPTER flag in their
+ * &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*hdmi_cec_transmit)(struct drm_bridge *bridge, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg);
+
+ /**
+ * @dp_audio_startup:
+ *
+ * Called when ASoC starts a DisplayPort audio stream setup.
+ *
+ * This callback is optional, it can be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_DP_AUDIO flag in their &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*dp_audio_startup)(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+
+ /**
+ * @dp_audio_prepare:
+ * Configures DisplayPort audio stream. Can be called multiple
+ * times for each setup.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_DP_AUDIO flag in their &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*dp_audio_prepare)(struct drm_bridge *bridge,
+ struct drm_connector *connector,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms);
+
+ /**
+ * @dp_audio_shutdown:
+ *
+ * Shut down the DisplayPort audio stream.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_DP_AUDIO flag in their &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ void (*dp_audio_shutdown)(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+
+ /**
+ * @dp_audio_mute_stream:
+ *
+ * Mute/unmute DisplayPort audio stream.
+ *
+ * This callback is optional, it can be implemented by bridges that
+ * set the @DRM_BRIDGE_OP_DP_AUDIO flag in their &drm_bridge->ops.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*dp_audio_mute_stream)(struct drm_bridge *bridge,
+ struct drm_connector *connector,
+ bool enable, int direction);
+
+ /**
+ * @debugfs_init:
+ *
+ * Allows bridges to create bridge-specific debugfs files.
+ */
+ void (*debugfs_init)(struct drm_bridge *bridge, struct dentry *root);
};
/**
@@ -680,7 +1041,7 @@ enum drm_bridge_ops {
/**
* @DRM_BRIDGE_OP_EDID: The bridge can retrieve the EDID of the display
* connected to its output. Bridges that set this flag shall implement
- * the &drm_bridge_funcs->get_edid callback.
+ * the &drm_bridge_funcs->edid_read callback.
*/
DRM_BRIDGE_OP_EDID = BIT(1),
/**
@@ -698,6 +1059,52 @@ enum drm_bridge_ops {
* this flag shall implement the &drm_bridge_funcs->get_modes callback.
*/
DRM_BRIDGE_OP_MODES = BIT(3),
+ /**
+ * @DRM_BRIDGE_OP_HDMI: The bridge provides HDMI connector operations,
+ * including infoframes support. Bridges that set this flag must
+ * implement the &drm_bridge_funcs->write_infoframe callback.
+ *
+ * Note: currently there can be at most one bridge in a chain that sets
+ * this bit. This is to simplify corresponding glue code in connector
+ * drivers.
+ */
+ DRM_BRIDGE_OP_HDMI = BIT(4),
+ /**
+ * @DRM_BRIDGE_OP_HDMI_AUDIO: The bridge provides HDMI audio operations.
+ * Bridges that set this flag must implement the
+ * &drm_bridge_funcs->hdmi_audio_prepare and
+ * &drm_bridge_funcs->hdmi_audio_shutdown callbacks.
+ *
+ * Note: currently there can be at most one bridge in a chain that sets
+ * this bit. This is to simplify corresponding glue code in connector
+ * drivers. Also it is not possible to have a bridge in the chain that
+ * sets @DRM_BRIDGE_OP_DP_AUDIO if there is a bridge that sets this
+ * flag.
+ */
+ DRM_BRIDGE_OP_HDMI_AUDIO = BIT(5),
+ /**
+ * @DRM_BRIDGE_OP_DP_AUDIO: The bridge provides DisplayPort audio operations.
+ * Bridges that set this flag must implement the
+ * &drm_bridge_funcs->dp_audio_prepare and
+ * &drm_bridge_funcs->dp_audio_shutdown callbacks.
+ *
+ * Note: currently there can be at most one bridge in a chain that sets
+ * this bit. This is to simplify corresponding glue code in connector
+ * drivers. Also it is not possible to have a bridge in the chain that
+ * sets @DRM_BRIDGE_OP_HDMI_AUDIO if there is a bridge that sets this
+ * flag.
+ */
+ DRM_BRIDGE_OP_DP_AUDIO = BIT(6),
+ /**
+ * @DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER: The bridge requires CEC notifier
+ * to be present.
+ */
+ DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER = BIT(7),
+ /**
+ * @DRM_BRIDGE_OP_HDMI_CEC_ADAPTER: The bridge requires CEC adapter
+ * to be present.
+ */
+ DRM_BRIDGE_OP_HDMI_CEC_ADAPTER = BIT(8),
};
/**
@@ -712,10 +1119,8 @@ struct drm_bridge {
struct drm_encoder *encoder;
/** @chain_node: used to form a bridge chain */
struct list_head chain_node;
-#ifdef CONFIG_OF
/** @of_node: device node pointer to the bridge */
struct device_node *of_node;
-#endif
/** @list: to keep track of all added bridges */
struct list_head list;
/**
@@ -726,6 +1131,18 @@ struct drm_bridge {
const struct drm_bridge_timings *timings;
/** @funcs: control functions */
const struct drm_bridge_funcs *funcs;
+
+ /**
+ * @container: Pointer to the private driver struct embedding this
+ * @struct drm_bridge.
+ */
+ void *container;
+
+ /**
+ * @refcount: reference count of users referencing this bridge.
+ */
+ struct kref refcount;
+
/** @driver_private: pointer to the bridge driver's internal context */
void *driver_private;
/** @ops: bitmask of operations supported by the bridge */
@@ -742,9 +1159,102 @@ struct drm_bridge {
*/
bool interlace_allowed;
/**
+ * @ycbcr_420_allowed: Indicate that the bridge can handle YCbCr 420
+ * output.
+ */
+ bool ycbcr_420_allowed;
+ /**
+ * @pre_enable_prev_first: The bridge requires that the prev
+ * bridge @pre_enable function is called before its @pre_enable,
+ * and conversely for post_disable. This is most frequently a
+ * requirement for DSI devices which need the host to be initialised
+ * before the peripheral.
+ */
+ bool pre_enable_prev_first;
+ /**
+ * @support_hdcp: Indicate that the bridge supports HDCP.
+ */
+ bool support_hdcp;
+ /**
* @ddc: Associated I2C adapter for DDC access, if any.
*/
struct i2c_adapter *ddc;
+
+ /**
+ * @vendor: Vendor of the product to be used for the SPD InfoFrame
+ * generation. This is required if @DRM_BRIDGE_OP_HDMI is set.
+ */
+ const char *vendor;
+
+ /**
+ * @product: Name of the product to be used for the SPD InfoFrame
+ * generation. This is required if @DRM_BRIDGE_OP_HDMI is set.
+ */
+ const char *product;
+
+ /**
+ * @supported_formats: Bitmask of @hdmi_colorspace listing supported
+ * output formats. This is only relevant if @DRM_BRIDGE_OP_HDMI is set.
+ */
+ unsigned int supported_formats;
+
+ /**
+ * @max_bpc: Maximum bits per char the HDMI bridge supports. Allowed
+ * values are 8, 10 and 12. This is only relevant if
+ * @DRM_BRIDGE_OP_HDMI is set.
+ */
+ unsigned int max_bpc;
+
+ /**
+ * @hdmi_cec_dev: device to be used as a containing device for CEC
+ * functions.
+ */
+ struct device *hdmi_cec_dev;
+
+ /**
+ * @hdmi_audio_dev: device to be used as a parent for the HDMI Codec if
+ * either of @DRM_BRIDGE_OP_HDMI_AUDIO or @DRM_BRIDGE_OP_DP_AUDIO is set.
+ */
+ struct device *hdmi_audio_dev;
+
+ /**
+ * @hdmi_audio_max_i2s_playback_channels: maximum number of playback
+ * I2S channels for the @DRM_BRIDGE_OP_HDMI_AUDIO or
+ * @DRM_BRIDGE_OP_DP_AUDIO.
+ */
+ int hdmi_audio_max_i2s_playback_channels;
+
+ /**
+ * @hdmi_audio_i2s_formats: supported I2S formats, optional. The
+ * default is to allow all formats supported by the corresponding I2S
+ * bus driver. This is only used for bridges setting
+ * @DRM_BRIDGE_OP_HDMI_AUDIO or @DRM_BRIDGE_OP_DP_AUDIO.
+ */
+ u64 hdmi_audio_i2s_formats;
+
+ /**
+ * @hdmi_audio_spdif_playback: set if this bridge has S/PDIF playback
+ * port for @DRM_BRIDGE_OP_HDMI_AUDIO or @DRM_BRIDGE_OP_DP_AUDIO.
+ */
+ unsigned int hdmi_audio_spdif_playback : 1;
+
+ /**
+ * @hdmi_audio_dai_port: sound DAI port for either of
+ * @DRM_BRIDGE_OP_HDMI_AUDIO and @DRM_BRIDGE_OP_DP_AUDIO, -1 if it is
+ * not used.
+ */
+ int hdmi_audio_dai_port;
+
+ /**
+ * @hdmi_cec_adapter_name: the name of the adapter to register
+ */
+ const char *hdmi_cec_adapter_name;
+
+ /**
+ * @hdmi_cec_available_las: number of logical addresses, CEC_MAX_LOG_ADDRS if unset
+ */
+ u8 hdmi_cec_available_las;
+
/** private: */
/**
* @hpd_mutex: Protects the @hpd_cb and @hpd_data fields.
@@ -768,17 +1278,97 @@ drm_priv_to_bridge(struct drm_private_obj *priv)
return container_of(priv, struct drm_bridge, base);
}
+struct drm_bridge *drm_bridge_get(struct drm_bridge *bridge);
+void drm_bridge_put(struct drm_bridge *bridge);
+
+/* Cleanup action for use with __free() */
+DEFINE_FREE(drm_bridge_put, struct drm_bridge *, if (_T) drm_bridge_put(_T))
+
+void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset,
+ const struct drm_bridge_funcs *funcs);
+
+/**
+ * devm_drm_bridge_alloc - Allocate and initialize a bridge
+ * @dev: struct device of the bridge device
+ * @type: the type of the struct which contains struct &drm_bridge
+ * @member: the name of the &drm_bridge within @type
+ * @funcs: callbacks for this bridge
+ *
+ * The reference count of the returned bridge is initialized to 1. This
+ * reference will be automatically dropped via devm (by calling
+ * drm_bridge_put()) when @dev is removed.
+ *
+ * Returns:
+ * Pointer to new bridge, or ERR_PTR on failure.
+ */
+#define devm_drm_bridge_alloc(dev, type, member, funcs) \
+ ((type *)__devm_drm_bridge_alloc(dev, sizeof(type), \
+ offsetof(type, member), funcs))
+
void drm_bridge_add(struct drm_bridge *bridge);
+int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge);
void drm_bridge_remove(struct drm_bridge *bridge);
-struct drm_bridge *of_drm_find_bridge(struct device_node *np);
int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
struct drm_bridge *previous,
enum drm_bridge_attach_flags flags);
+#ifdef CONFIG_OF
+struct drm_bridge *of_drm_find_bridge(struct device_node *np);
+#else
+static inline struct drm_bridge *of_drm_find_bridge(struct device_node *np)
+{
+ return NULL;
+}
+#endif
+
+static inline bool drm_bridge_is_last(struct drm_bridge *bridge)
+{
+ return list_is_last(&bridge->chain_node, &bridge->encoder->bridge_chain);
+}
+
+/**
+ * drm_bridge_get_current_state() - Get the current bridge state
+ * @bridge: bridge object
+ *
+ * This function must be called with the modeset lock held.
+ *
+ * RETURNS:
+ *
+ * The current bridge state, or NULL if there is none.
+ */
+static inline struct drm_bridge_state *
+drm_bridge_get_current_state(struct drm_bridge *bridge)
+{
+ if (!bridge)
+ return NULL;
+
+ /*
+ * Only atomic bridges will have bridge->base initialized by
+ * drm_atomic_private_obj_init(), so we need to make sure we're
+ * working with one before we try to use the lock.
+ */
+ if (!bridge->funcs || !bridge->funcs->atomic_reset)
+ return NULL;
+
+ drm_modeset_lock_assert_held(&bridge->base.lock);
+
+ if (!bridge->base.state)
+ return NULL;
+
+ return drm_priv_to_bridge_state(bridge->base.state);
+}
+
/**
* drm_bridge_get_next_bridge() - Get the next bridge in the chain
* @bridge: bridge object
*
+ * The caller is responsible of having a reference to @bridge via
+ * drm_bridge_get() or equivalent. This function leaves the refcount of
+ * @bridge unmodified.
+ *
+ * The refcount of the returned bridge is incremented. Use drm_bridge_put()
+ * when done with it.
+ *
* RETURNS:
* the next bridge in the chain after @bridge, or NULL if @bridge is the last.
*/
@@ -788,13 +1378,20 @@ drm_bridge_get_next_bridge(struct drm_bridge *bridge)
if (list_is_last(&bridge->chain_node, &bridge->encoder->bridge_chain))
return NULL;
- return list_next_entry(bridge, chain_node);
+ return drm_bridge_get(list_next_entry(bridge, chain_node));
}
/**
* drm_bridge_get_prev_bridge() - Get the previous bridge in the chain
* @bridge: bridge object
*
+ * The caller is responsible of having a reference to @bridge via
+ * drm_bridge_get() or equivalent. This function leaves the refcount of
+ * @bridge unmodified.
+ *
+ * The refcount of the returned bridge is incremented. Use drm_bridge_put()
+ * when done with it.
+ *
* RETURNS:
* the previous bridge in the chain, or NULL if @bridge is the first.
*/
@@ -804,13 +1401,16 @@ drm_bridge_get_prev_bridge(struct drm_bridge *bridge)
if (list_is_first(&bridge->chain_node, &bridge->encoder->bridge_chain))
return NULL;
- return list_prev_entry(bridge, chain_node);
+ return drm_bridge_get(list_prev_entry(bridge, chain_node));
}
/**
* drm_bridge_chain_get_first_bridge() - Get the first bridge in the chain
* @encoder: encoder object
*
+ * The refcount of the returned bridge is incremented. Use drm_bridge_put()
+ * when done with it.
+ *
* RETURNS:
* the first bridge in the chain, or NULL if @encoder has no bridge attached
* to it.
@@ -818,35 +1418,92 @@ drm_bridge_get_prev_bridge(struct drm_bridge *bridge)
static inline struct drm_bridge *
drm_bridge_chain_get_first_bridge(struct drm_encoder *encoder)
{
- return list_first_entry_or_null(&encoder->bridge_chain,
- struct drm_bridge, chain_node);
+ return drm_bridge_get(list_first_entry_or_null(&encoder->bridge_chain,
+ struct drm_bridge, chain_node));
}
/**
- * drm_for_each_bridge_in_chain() - Iterate over all bridges present in a chain
+ * drm_bridge_chain_get_last_bridge() - Get the last bridge in the chain
+ * @encoder: encoder object
+ *
+ * The refcount of the returned bridge is incremented. Use drm_bridge_put()
+ * when done with it.
+ *
+ * RETURNS:
+ * the last bridge in the chain, or NULL if @encoder has no bridge attached
+ * to it.
+ */
+static inline struct drm_bridge *
+drm_bridge_chain_get_last_bridge(struct drm_encoder *encoder)
+{
+ return drm_bridge_get(list_last_entry_or_null(&encoder->bridge_chain,
+ struct drm_bridge, chain_node));
+}
+
+/**
+ * drm_bridge_get_next_bridge_and_put - Get the next bridge in the chain
+ * and put the previous
+ * @bridge: bridge object
+ *
+ * Same as drm_bridge_get_next_bridge() but additionally puts the @bridge.
+ *
+ * RETURNS:
+ * the next bridge in the chain after @bridge, or NULL if @bridge is the last.
+ */
+static inline struct drm_bridge *
+drm_bridge_get_next_bridge_and_put(struct drm_bridge *bridge)
+{
+ struct drm_bridge *next = drm_bridge_get_next_bridge(bridge);
+
+ drm_bridge_put(bridge);
+
+ return next;
+}
+
+/**
+ * drm_for_each_bridge_in_chain_scoped - iterate over all bridges attached
+ * to an encoder
* @encoder: the encoder to iterate bridges on
* @bridge: a bridge pointer updated to point to the current bridge at each
* iteration
*
* Iterate over all bridges present in the bridge chain attached to @encoder.
+ *
+ * Automatically gets/puts the bridge reference while iterating, and puts
+ * the reference even if returning or breaking in the middle of the loop.
*/
-#define drm_for_each_bridge_in_chain(encoder, bridge) \
- list_for_each_entry(bridge, &(encoder)->bridge_chain, chain_node)
+#define drm_for_each_bridge_in_chain_scoped(encoder, bridge) \
+ for (struct drm_bridge *bridge __free(drm_bridge_put) = \
+ drm_bridge_chain_get_first_bridge(encoder); \
+ bridge; \
+ bridge = drm_bridge_get_next_bridge_and_put(bridge))
+
+/**
+ * drm_for_each_bridge_in_chain_from - iterate over all bridges starting
+ * from the given bridge
+ * @first_bridge: the bridge to start from
+ * @bridge: a bridge pointer updated to point to the current bridge at each
+ * iteration
+ *
+ * Iterate over all bridges in the encoder chain starting from
+ * @first_bridge, included.
+ *
+ * Automatically gets/puts the bridge reference while iterating, and puts
+ * the reference even if returning or breaking in the middle of the loop.
+ */
+#define drm_for_each_bridge_in_chain_from(first_bridge, bridge) \
+ for (struct drm_bridge *bridge __free(drm_bridge_put) = \
+ drm_bridge_get(first_bridge); \
+ bridge; \
+ bridge = drm_bridge_get_next_bridge_and_put(bridge))
-bool drm_bridge_chain_mode_fixup(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
enum drm_mode_status
drm_bridge_chain_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode);
-void drm_bridge_chain_disable(struct drm_bridge *bridge);
-void drm_bridge_chain_post_disable(struct drm_bridge *bridge);
void drm_bridge_chain_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode);
-void drm_bridge_chain_pre_enable(struct drm_bridge *bridge);
-void drm_bridge_chain_enable(struct drm_bridge *bridge);
int drm_atomic_bridge_chain_check(struct drm_bridge *bridge,
struct drm_crtc_state *crtc_state,
@@ -868,11 +1525,12 @@ drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge,
u32 output_fmt,
unsigned int *num_input_fmts);
-enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge);
+enum drm_connector_status
+drm_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector);
int drm_bridge_get_modes(struct drm_bridge *bridge,
struct drm_connector *connector);
-struct edid *drm_bridge_get_edid(struct drm_bridge *bridge,
- struct drm_connector *connector);
+const struct drm_edid *drm_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector);
void drm_bridge_hpd_enable(struct drm_bridge *bridge,
void (*cb)(void *data,
enum drm_connector_status status),
@@ -882,16 +1540,60 @@ void drm_bridge_hpd_notify(struct drm_bridge *bridge,
enum drm_connector_status status);
#ifdef CONFIG_DRM_PANEL_BRIDGE
+bool drm_bridge_is_panel(const struct drm_bridge *bridge);
struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel);
struct drm_bridge *drm_panel_bridge_add_typed(struct drm_panel *panel,
u32 connector_type);
void drm_panel_bridge_remove(struct drm_bridge *bridge);
+int drm_panel_bridge_set_orientation(struct drm_connector *connector,
+ struct drm_bridge *bridge);
struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev,
struct drm_panel *panel);
struct drm_bridge *devm_drm_panel_bridge_add_typed(struct device *dev,
struct drm_panel *panel,
u32 connector_type);
+struct drm_bridge *drmm_panel_bridge_add(struct drm_device *drm,
+ struct drm_panel *panel);
struct drm_connector *drm_panel_bridge_connector(struct drm_bridge *bridge);
+#else
+static inline bool drm_bridge_is_panel(const struct drm_bridge *bridge)
+{
+ return false;
+}
+
+static inline int drm_panel_bridge_set_orientation(struct drm_connector *connector,
+ struct drm_bridge *bridge)
+{
+ return -EINVAL;
+}
#endif
+#if defined(CONFIG_OF) && defined(CONFIG_DRM_PANEL_BRIDGE)
+struct drm_bridge *devm_drm_of_get_bridge(struct device *dev, struct device_node *node,
+ u32 port, u32 endpoint);
+struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm, struct device_node *node,
+ u32 port, u32 endpoint);
+#else
+static inline struct drm_bridge *devm_drm_of_get_bridge(struct device *dev,
+ struct device_node *node,
+ u32 port,
+ u32 endpoint)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm,
+ struct device_node *node,
+ u32 port,
+ u32 endpoint)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif
+
+void devm_drm_put_bridge(struct device *dev, struct drm_bridge *bridge);
+
+void drm_bridge_debugfs_params(struct dentry *root);
+void drm_bridge_debugfs_encoder_params(struct dentry *root, struct drm_encoder *encoder);
+
#endif
diff --git a/include/drm/drm_bridge_connector.h b/include/drm/drm_bridge_connector.h
index 33f6c3bbdb4a..69630815fb09 100644
--- a/include/drm/drm_bridge_connector.h
+++ b/include/drm/drm_bridge_connector.h
@@ -10,8 +10,6 @@ struct drm_connector;
struct drm_device;
struct drm_encoder;
-void drm_bridge_connector_enable_hpd(struct drm_connector *connector);
-void drm_bridge_connector_disable_hpd(struct drm_connector *connector);
struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
struct drm_encoder *encoder);
diff --git a/include/drm/drm_bridge_helper.h b/include/drm/drm_bridge_helper.h
new file mode 100644
index 000000000000..6c35b479ec2a
--- /dev/null
+++ b/include/drm/drm_bridge_helper.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#ifndef __DRM_BRIDGE_HELPER_H_
+#define __DRM_BRIDGE_HELPER_H_
+
+struct drm_bridge;
+struct drm_modeset_acquire_ctx;
+
+int drm_bridge_helper_reset_crtc(struct drm_bridge *bridge,
+ struct drm_modeset_acquire_ctx *ctx);
+
+#endif // __DRM_BRIDGE_HELPER_H_
diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
new file mode 100644
index 000000000000..b909fa8f810a
--- /dev/null
+++ b/include/drm/drm_buddy.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __DRM_BUDDY_H__
+#define __DRM_BUDDY_H__
+
+#include <linux/bitops.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/rbtree.h>
+
+struct drm_printer;
+
+#define DRM_BUDDY_RANGE_ALLOCATION BIT(0)
+#define DRM_BUDDY_TOPDOWN_ALLOCATION BIT(1)
+#define DRM_BUDDY_CONTIGUOUS_ALLOCATION BIT(2)
+#define DRM_BUDDY_CLEAR_ALLOCATION BIT(3)
+#define DRM_BUDDY_CLEARED BIT(4)
+#define DRM_BUDDY_TRIM_DISABLE BIT(5)
+
+struct drm_buddy_block {
+#define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
+#define DRM_BUDDY_HEADER_STATE GENMASK_ULL(11, 10)
+#define DRM_BUDDY_ALLOCATED (1 << 10)
+#define DRM_BUDDY_FREE (2 << 10)
+#define DRM_BUDDY_SPLIT (3 << 10)
+#define DRM_BUDDY_HEADER_CLEAR GENMASK_ULL(9, 9)
+/* Free to be used, if needed in the future */
+#define DRM_BUDDY_HEADER_UNUSED GENMASK_ULL(8, 6)
+#define DRM_BUDDY_HEADER_ORDER GENMASK_ULL(5, 0)
+ u64 header;
+
+ struct drm_buddy_block *left;
+ struct drm_buddy_block *right;
+ struct drm_buddy_block *parent;
+
+ void *private; /* owned by creator */
+
+ /*
+ * While the block is allocated by the user through drm_buddy_alloc*,
+ * the user has ownership of the link, for example to maintain within
+ * a list, if so desired. As soon as the block is freed with
+ * drm_buddy_free* ownership is given back to the mm.
+ */
+ union {
+ struct rb_node rb;
+ struct list_head link;
+ };
+
+ struct list_head tmp_link;
+};
+
+/* Order-zero must be at least SZ_4K */
+#define DRM_BUDDY_MAX_ORDER (63 - 12)
+
+/*
+ * Binary Buddy System.
+ *
+ * Locking should be handled by the user, a simple mutex around
+ * drm_buddy_alloc* and drm_buddy_free* should suffice.
+ */
+struct drm_buddy {
+ /* Maintain a free list for each order. */
+ struct rb_root **free_trees;
+
+ /*
+ * Maintain explicit binary tree(s) to track the allocation of the
+ * address space. This gives us a simple way of finding a buddy block
+ * and performing the potentially recursive merge step when freeing a
+ * block. Nodes are either allocated or free, in which case they will
+ * also exist on the respective free list.
+ */
+ struct drm_buddy_block **roots;
+
+ /*
+ * Anything from here is public, and remains static for the lifetime of
+ * the mm. Everything above is considered do-not-touch.
+ */
+ unsigned int n_roots;
+ unsigned int max_order;
+
+ /* Must be at least SZ_4K */
+ u64 chunk_size;
+ u64 size;
+ u64 avail;
+ u64 clear_avail;
+};
+
+static inline u64
+drm_buddy_block_offset(const struct drm_buddy_block *block)
+{
+ return block->header & DRM_BUDDY_HEADER_OFFSET;
+}
+
+static inline unsigned int
+drm_buddy_block_order(struct drm_buddy_block *block)
+{
+ return block->header & DRM_BUDDY_HEADER_ORDER;
+}
+
+static inline unsigned int
+drm_buddy_block_state(struct drm_buddy_block *block)
+{
+ return block->header & DRM_BUDDY_HEADER_STATE;
+}
+
+static inline bool
+drm_buddy_block_is_allocated(struct drm_buddy_block *block)
+{
+ return drm_buddy_block_state(block) == DRM_BUDDY_ALLOCATED;
+}
+
+static inline bool
+drm_buddy_block_is_clear(struct drm_buddy_block *block)
+{
+ return block->header & DRM_BUDDY_HEADER_CLEAR;
+}
+
+static inline bool
+drm_buddy_block_is_free(struct drm_buddy_block *block)
+{
+ return drm_buddy_block_state(block) == DRM_BUDDY_FREE;
+}
+
+static inline bool
+drm_buddy_block_is_split(struct drm_buddy_block *block)
+{
+ return drm_buddy_block_state(block) == DRM_BUDDY_SPLIT;
+}
+
+static inline u64
+drm_buddy_block_size(struct drm_buddy *mm,
+ struct drm_buddy_block *block)
+{
+ return mm->chunk_size << drm_buddy_block_order(block);
+}
+
+int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size);
+
+void drm_buddy_fini(struct drm_buddy *mm);
+
+struct drm_buddy_block *
+drm_get_buddy(struct drm_buddy_block *block);
+
+int drm_buddy_alloc_blocks(struct drm_buddy *mm,
+ u64 start, u64 end, u64 size,
+ u64 min_page_size,
+ struct list_head *blocks,
+ unsigned long flags);
+
+int drm_buddy_block_trim(struct drm_buddy *mm,
+ u64 *start,
+ u64 new_size,
+ struct list_head *blocks);
+
+void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear);
+
+void drm_buddy_free_block(struct drm_buddy *mm, struct drm_buddy_block *block);
+
+void drm_buddy_free_list(struct drm_buddy *mm,
+ struct list_head *objects,
+ unsigned int flags);
+
+void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p);
+void drm_buddy_block_print(struct drm_buddy *mm,
+ struct drm_buddy_block *block,
+ struct drm_printer *p);
+#endif
diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
index e9ad4863d915..08e0e3ffad13 100644
--- a/include/drm/drm_cache.h
+++ b/include/drm/drm_cache.h
@@ -35,6 +35,8 @@
#include <linux/scatterlist.h>
+struct iosys_map;
+
void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
void drm_clflush_sg(struct sg_table *st);
void drm_clflush_virt_range(void *addr, unsigned long length);
@@ -65,9 +67,22 @@ static inline bool drm_arch_can_wc_memory(void)
* optimization entirely for ARM and arm64.
*/
return false;
+#elif defined(CONFIG_LOONGARCH)
+ /*
+ * LoongArch maintains cache coherency in hardware, but its WUC attribute
+ * (Weak-ordered UnCached, which is similar to WC) is out of the scope of
+ * cache coherency machanism. This means WUC can only used for write-only
+ * memory regions.
+ */
+ return false;
#else
return true;
#endif
}
+void drm_memcpy_init_early(void);
+
+void drm_memcpy_from_wc(struct iosys_map *dst,
+ const struct iosys_map *src,
+ unsigned long len);
#endif
diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h
index f07f2fb02e75..c972a8a3385b 100644
--- a/include/drm/drm_client.h
+++ b/include/drm/drm_client.h
@@ -3,7 +3,7 @@
#ifndef _DRM_CLIENT_H_
#define _DRM_CLIENT_H_
-#include <linux/dma-buf-map.h>
+#include <linux/iosys-map.h>
#include <linux/lockdep.h>
#include <linux/mutex.h>
#include <linux/types.h>
@@ -29,6 +29,16 @@ struct drm_client_funcs {
struct module *owner;
/**
+ * @free:
+ *
+ * Called when the client gets unregistered. Implementations should
+ * release all client-specific data and free the memory.
+ *
+ * This callback is optional.
+ */
+ void (*free)(struct drm_client_dev *client);
+
+ /**
* @unregister:
*
* Called when &drm_device is unregistered. The client should respond by
@@ -47,12 +57,14 @@ struct drm_client_funcs {
*
* Note that the core does not guarantee exclusion against concurrent
* drm_open(). Clients need to ensure this themselves, for example by
- * using drm_master_internal_acquire() and
- * drm_master_internal_release().
+ * using drm_master_internal_acquire() and drm_master_internal_release().
+ *
+ * If the caller passes force, the client should ignore any present DRM
+ * master and restore the display anyway.
*
* This callback is optional.
*/
- int (*restore)(struct drm_client_dev *client);
+ int (*restore)(struct drm_client_dev *client, bool force);
/**
* @hotplug:
@@ -63,6 +75,24 @@ struct drm_client_funcs {
* This callback is optional.
*/
int (*hotplug)(struct drm_client_dev *client);
+
+ /**
+ * @suspend:
+ *
+ * Called when suspending the device.
+ *
+ * This callback is optional.
+ */
+ int (*suspend)(struct drm_client_dev *client);
+
+ /**
+ * @resume:
+ *
+ * Called when resuming the device from suspend.
+ *
+ * This callback is optional.
+ */
+ int (*resume)(struct drm_client_dev *client);
};
/**
@@ -106,6 +136,29 @@ struct drm_client_dev {
* @modesets: CRTC configurations
*/
struct drm_mode_set *modesets;
+
+ /**
+ * @suspended:
+ *
+ * The client has been suspended.
+ */
+ bool suspended;
+
+ /**
+ * @hotplug_pending:
+ *
+ * A hotplug event has been received while the client was suspended.
+ * Try again on resume.
+ */
+ bool hotplug_pending;
+
+ /**
+ * @hotplug_failed:
+ *
+ * Set by client hotplug helpers if the hotplugging failed
+ * before. It is usually not tried again.
+ */
+ bool hotplug_failed;
};
int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
@@ -113,10 +166,6 @@ int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
void drm_client_release(struct drm_client_dev *client);
void drm_client_register(struct drm_client_dev *client);
-void drm_client_dev_unregister(struct drm_device *dev);
-void drm_client_dev_hotplug(struct drm_device *dev);
-void drm_client_dev_restore(struct drm_device *dev);
-
/**
* struct drm_client_buffer - DRM client buffer
*/
@@ -127,24 +176,18 @@ struct drm_client_buffer {
struct drm_client_dev *client;
/**
- * @handle: Buffer handle
- */
- u32 handle;
-
- /**
- * @pitch: Buffer pitch
- */
- u32 pitch;
-
- /**
* @gem: GEM object backing this buffer
+ *
+ * FIXME: The DRM framebuffer holds a reference on its GEM
+ * buffer objects. Do not use this field in new code and
+ * update existing users.
*/
struct drm_gem_object *gem;
/**
* @map: Virtual address for the buffer
*/
- struct dma_buf_map map;
+ struct iosys_map map;
/**
* @fb: DRM framebuffer
@@ -153,10 +196,14 @@ struct drm_client_buffer {
};
struct drm_client_buffer *
-drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format);
-void drm_client_framebuffer_delete(struct drm_client_buffer *buffer);
-int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect);
-int drm_client_buffer_vmap(struct drm_client_buffer *buffer, struct dma_buf_map *map);
+drm_client_buffer_create_dumb(struct drm_client_dev *client, u32 width, u32 height, u32 format);
+void drm_client_buffer_delete(struct drm_client_buffer *buffer);
+int drm_client_buffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect);
+int drm_client_buffer_vmap_local(struct drm_client_buffer *buffer,
+ struct iosys_map *map_copy);
+void drm_client_buffer_vunmap_local(struct drm_client_buffer *buffer);
+int drm_client_buffer_vmap(struct drm_client_buffer *buffer,
+ struct iosys_map *map);
void drm_client_buffer_vunmap(struct drm_client_buffer *buffer);
int drm_client_modeset_create(struct drm_client_dev *client);
@@ -167,6 +214,7 @@ int drm_client_modeset_check(struct drm_client_dev *client);
int drm_client_modeset_commit_locked(struct drm_client_dev *client);
int drm_client_modeset_commit(struct drm_client_dev *client);
int drm_client_modeset_dpms(struct drm_client_dev *client, int mode);
+int drm_client_modeset_wait_for_vblank(struct drm_client_dev *client, unsigned int crtc_index);
/**
* drm_client_for_each_modeset() - Iterate over client modesets
@@ -191,6 +239,4 @@ int drm_client_modeset_dpms(struct drm_client_dev *client, int mode);
drm_for_each_connector_iter(connector, iter) \
if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
-void drm_client_debugfs_init(struct drm_minor *minor);
-
#endif
diff --git a/include/drm/drm_client_event.h b/include/drm/drm_client_event.h
new file mode 100644
index 000000000000..79369c755bc9
--- /dev/null
+++ b/include/drm/drm_client_event.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+
+#ifndef _DRM_CLIENT_EVENT_H_
+#define _DRM_CLIENT_EVENT_H_
+
+#include <linux/types.h>
+
+struct drm_device;
+
+#if defined(CONFIG_DRM_CLIENT)
+void drm_client_dev_unregister(struct drm_device *dev);
+void drm_client_dev_hotplug(struct drm_device *dev);
+void drm_client_dev_restore(struct drm_device *dev, bool force);
+void drm_client_dev_suspend(struct drm_device *dev);
+void drm_client_dev_resume(struct drm_device *dev);
+#else
+static inline void drm_client_dev_unregister(struct drm_device *dev)
+{ }
+static inline void drm_client_dev_hotplug(struct drm_device *dev)
+{ }
+static inline void drm_client_dev_restore(struct drm_device *dev, bool force)
+{ }
+static inline void drm_client_dev_suspend(struct drm_device *dev)
+{ }
+static inline void drm_client_dev_resume(struct drm_device *dev)
+{ }
+#endif
+
+#endif
diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h
index 81c298488b0c..5140691f476a 100644
--- a/include/drm/drm_color_mgmt.h
+++ b/include/drm/drm_color_mgmt.h
@@ -24,6 +24,7 @@
#define __DRM_COLOR_MGMT_H__
#include <linux/ctype.h>
+#include <linux/math64.h>
#include <drm/drm_property.h>
struct drm_crtc;
@@ -36,20 +37,33 @@ struct drm_plane;
*
* Extract a degamma/gamma LUT value provided by user (in the form of
* &drm_color_lut entries) and round it to the precision supported by the
- * hardware.
+ * hardware, following OpenGL int<->float conversion rules
+ * (see eg. OpenGL 4.6 specification - 2.3.5 Fixed-Point Data Conversions).
*/
static inline u32 drm_color_lut_extract(u32 user_input, int bit_precision)
{
- u32 val = user_input;
- u32 max = 0xffff >> (16 - bit_precision);
+ if (bit_precision > 16)
+ return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(user_input, (1 << bit_precision) - 1),
+ (1 << 16) - 1);
+ else
+ return DIV_ROUND_CLOSEST(user_input * ((1 << bit_precision) - 1),
+ (1 << 16) - 1);
+}
- /* Round only if we're not using full precision. */
- if (bit_precision < 16) {
- val += 1UL << (16 - bit_precision - 1);
- val >>= 16 - bit_precision;
- }
+/**
+ * drm_color_lut32_extract - clamp and round LUT entries
+ * @user_input: input value
+ * @bit_precision: number of bits the hw LUT supports
+ *
+ * Extract U0.bit_precision from a U0.32 LUT value.
+ *
+ */
+static inline u32 drm_color_lut32_extract(u32 user_input, int bit_precision)
+{
+ u64 max = (bit_precision >= 64) ? ~0ULL : (1ULL << bit_precision) - 1;
- return clamp_val(val, 0, max);
+ return DIV_ROUND_CLOSEST_ULL((u64)user_input * max,
+ (1ULL << 32) - 1);
}
u64 drm_color_ctm_s31_32_to_qm_n(u64 user_input, u32 m, u32 n);
@@ -74,6 +88,18 @@ static inline int drm_color_lut_size(const struct drm_property_blob *blob)
return blob->length / sizeof(struct drm_color_lut);
}
+/**
+ * drm_color_lut32_size - calculate the number of entries in the extended LUT
+ * @blob: blob containing the LUT
+ *
+ * Returns:
+ * The number of entries in the color LUT stored in @blob.
+ */
+static inline int drm_color_lut32_size(const struct drm_property_blob *blob)
+{
+ return blob->length / sizeof(struct drm_color_lut32);
+}
+
enum drm_color_encoding {
DRM_COLOR_YCBCR_BT601,
DRM_COLOR_YCBCR_BT709,
@@ -120,4 +146,33 @@ enum drm_color_lut_tests {
};
int drm_color_lut_check(const struct drm_property_blob *lut, u32 tests);
+
+/*
+ * Gamma-LUT programming
+ */
+
+typedef void (*drm_crtc_set_lut_func)(struct drm_crtc *, unsigned int, u16, u16, u16);
+
+void drm_crtc_load_gamma_888(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_gamma);
+void drm_crtc_load_gamma_565_from_888(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_gamma);
+void drm_crtc_load_gamma_555_from_888(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_gamma);
+
+void drm_crtc_fill_gamma_888(struct drm_crtc *crtc, drm_crtc_set_lut_func set_gamma);
+void drm_crtc_fill_gamma_565(struct drm_crtc *crtc, drm_crtc_set_lut_func set_gamma);
+void drm_crtc_fill_gamma_555(struct drm_crtc *crtc, drm_crtc_set_lut_func set_gamma);
+
+/*
+ * Color-LUT programming
+ */
+
+void drm_crtc_load_palette_8(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_palette);
+
+void drm_crtc_fill_palette_332(struct drm_crtc *crtc, drm_crtc_set_lut_func set_palette);
+void drm_crtc_fill_palette_8(struct drm_crtc *crtc, drm_crtc_set_lut_func set_palette);
+
+int drm_color_lut32_check(const struct drm_property_blob *lut, u32 tests);
#endif
diff --git a/include/drm/drm_colorop.h b/include/drm/drm_colorop.h
new file mode 100644
index 000000000000..a3a32f9f918c
--- /dev/null
+++ b/include/drm/drm_colorop.h
@@ -0,0 +1,464 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2023 Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DRM_COLOROP_H__
+#define __DRM_COLOROP_H__
+
+#include <drm/drm_mode_object.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_property.h>
+
+/* DRM colorop flags */
+#define DRM_COLOROP_FLAG_ALLOW_BYPASS (1<<0) /* Allow bypass on the drm_colorop */
+
+/**
+ * enum drm_colorop_curve_1d_type - type of 1D curve
+ *
+ * Describes a 1D curve to be applied by the DRM_COLOROP_1D_CURVE colorop.
+ */
+enum drm_colorop_curve_1d_type {
+ /**
+ * @DRM_COLOROP_1D_CURVE_SRGB_EOTF:
+ *
+ * enum string "sRGB EOTF"
+ *
+ * sRGB piece-wise electro-optical transfer function. Transfer
+ * characteristics as defined by IEC 61966-2-1 sRGB. Equivalent
+ * to H.273 TransferCharacteristics code point 13 with
+ * MatrixCoefficients set to 0.
+ */
+ DRM_COLOROP_1D_CURVE_SRGB_EOTF,
+
+ /**
+ * @DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF:
+ *
+ * enum string "sRGB Inverse EOTF"
+ *
+ * The inverse of &DRM_COLOROP_1D_CURVE_SRGB_EOTF
+ */
+ DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF,
+
+ /**
+ * @DRM_COLOROP_1D_CURVE_PQ_125_EOTF:
+ *
+ * enum string "PQ 125 EOTF"
+ *
+ * The PQ transfer function, scaled by 125.0f, so that 10,000
+ * nits correspond to 125.0f.
+ *
+ * Transfer characteristics of the PQ function as defined by
+ * SMPTE ST 2084 (2014) for 10-, 12-, 14-, and 16-bit systems
+ * and Rec. ITU-R BT.2100-2 perceptual quantization (PQ) system,
+ * represented by H.273 TransferCharacteristics code point 16.
+ */
+ DRM_COLOROP_1D_CURVE_PQ_125_EOTF,
+
+ /**
+ * @DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF:
+ *
+ * enum string "PQ 125 Inverse EOTF"
+ *
+ * The inverse of DRM_COLOROP_1D_CURVE_PQ_125_EOTF.
+ */
+ DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF,
+
+ /**
+ * @DRM_COLOROP_1D_CURVE_BT2020_INV_OETF:
+ *
+ * enum string "BT.2020 Inverse OETF"
+ *
+ * The inverse of &DRM_COLOROP_1D_CURVE_BT2020_OETF
+ */
+ DRM_COLOROP_1D_CURVE_BT2020_INV_OETF,
+
+ /**
+ * @DRM_COLOROP_1D_CURVE_BT2020_OETF:
+ *
+ * enum string "BT.2020 OETF"
+ *
+ * The BT.2020/BT.709 transfer function. The BT.709 and BT.2020
+ * transfer functions are the same, the only difference is that
+ * BT.2020 is defined with more precision for 10 and 12-bit
+ * encodings.
+ *
+ *
+ */
+ DRM_COLOROP_1D_CURVE_BT2020_OETF,
+
+ /**
+ * @DRM_COLOROP_1D_CURVE_GAMMA22:
+ *
+ * enum string "Gamma 2.2"
+ *
+ * A gamma 2.2 power function. This applies a power curve with
+ * gamma value of 2.2 to the input values.
+ */
+ DRM_COLOROP_1D_CURVE_GAMMA22,
+
+ /**
+ * @DRM_COLOROP_1D_CURVE_GAMMA22_INV:
+ *
+ * enum string "Gamma 2.2 Inverse"
+ *
+ * The inverse of &DRM_COLOROP_1D_CURVE_GAMMA22
+ */
+ DRM_COLOROP_1D_CURVE_GAMMA22_INV,
+ /**
+ * @DRM_COLOROP_1D_CURVE_COUNT:
+ *
+ * enum value denoting the size of the enum
+ */
+ DRM_COLOROP_1D_CURVE_COUNT
+};
+
+/**
+ * struct drm_colorop_state - mutable colorop state
+ */
+struct drm_colorop_state {
+ /** @colorop: backpointer to the colorop */
+ struct drm_colorop *colorop;
+
+ /*
+ * Color properties
+ *
+ * The following fields are not always valid, their usage depends
+ * on the colorop type. See their associated comment for more
+ * information.
+ */
+
+ /**
+ * @bypass:
+ *
+ * When the property BYPASS exists on this colorop, this stores
+ * the requested bypass state: true if colorop shall be bypassed,
+ * false if colorop is enabled.
+ */
+ bool bypass;
+
+ /**
+ * @curve_1d_type:
+ *
+ * Type of 1D curve.
+ */
+ enum drm_colorop_curve_1d_type curve_1d_type;
+
+ /**
+ * @multiplier:
+ *
+ * Multiplier to 'gain' the plane. Format is S31.32 sign-magnitude.
+ */
+ uint64_t multiplier;
+
+ /**
+ * @data:
+ *
+ * Data blob for any TYPE that requires such a blob. The
+ * interpretation of the blob is TYPE-specific.
+ *
+ * See the &drm_colorop_type documentation for how blob is laid
+ * out.
+ */
+ struct drm_property_blob *data;
+
+ /** @state: backpointer to global drm_atomic_state */
+ struct drm_atomic_state *state;
+};
+
+/**
+ * struct drm_colorop - DRM color operation control structure
+ *
+ * A colorop represents one color operation. They can be chained via
+ * the 'next' pointer to build a color pipeline.
+ *
+ * Since colorops cannot stand-alone and are used to describe colorop
+ * operations on a plane they don't have their own locking mechanism but
+ * are locked and programmed along with their associated &drm_plane.
+ *
+ */
+struct drm_colorop {
+ /** @dev: parent DRM device */
+ struct drm_device *dev;
+
+ /**
+ * @head:
+ *
+ * List of all colorops on @dev, linked from &drm_mode_config.colorop_list.
+ * Invariant over the lifetime of @dev and therefore does not need
+ * locking.
+ */
+ struct list_head head;
+
+ /**
+ * @index: Position inside the mode_config.list, can be used as an array
+ * index. It is invariant over the lifetime of the colorop.
+ */
+ unsigned int index;
+
+ /** @base: base mode object */
+ struct drm_mode_object base;
+
+ /**
+ * @plane:
+ *
+ * The plane on which the colorop sits. A drm_colorop is always unique
+ * to a plane.
+ */
+ struct drm_plane *plane;
+
+ /**
+ * @state:
+ *
+ * Current atomic state for this colorop.
+ *
+ * This is protected by @mutex. Note that nonblocking atomic commits
+ * access the current colorop state without taking locks.
+ */
+ struct drm_colorop_state *state;
+
+ /*
+ * Color properties
+ *
+ * The following fields are not always valid, their usage depends
+ * on the colorop type. See their associated comment for more
+ * information.
+ */
+
+ /** @properties: property tracking for this colorop */
+ struct drm_object_properties properties;
+
+ /**
+ * @type:
+ *
+ * Read-only
+ * Type of color operation
+ */
+ enum drm_colorop_type type;
+
+ /**
+ * @next:
+ *
+ * Read-only
+ * Pointer to next drm_colorop in pipeline
+ */
+ struct drm_colorop *next;
+
+ /**
+ * @type_property:
+ *
+ * Read-only "TYPE" property for specifying the type of
+ * this color operation. The type is enum drm_colorop_type.
+ */
+ struct drm_property *type_property;
+
+ /**
+ * @bypass_property:
+ *
+ * Boolean property to control enablement of the color
+ * operation. Only present if DRM_COLOROP_FLAG_ALLOW_BYPASS
+ * flag is set. When present, setting bypass to "true" shall
+ * always be supported to allow compositors to quickly fall
+ * back to alternate methods of color processing. This is
+ * important since setting color operations can fail due to
+ * unique HW constraints.
+ */
+ struct drm_property *bypass_property;
+
+ /**
+ * @size:
+ *
+ * Number of entries of the custom LUT. This should be read-only.
+ */
+ uint32_t size;
+
+ /**
+ * @lut1d_interpolation:
+ *
+ * Read-only
+ * Interpolation for DRM_COLOROP_1D_LUT
+ */
+ enum drm_colorop_lut1d_interpolation_type lut1d_interpolation;
+
+ /**
+ * @lut3d_interpolation:
+ *
+ * Read-only
+ * Interpolation for DRM_COLOROP_3D_LUT
+ */
+ enum drm_colorop_lut3d_interpolation_type lut3d_interpolation;
+
+ /**
+ * @lut1d_interpolation_property:
+ *
+ * Read-only property for DRM_COLOROP_1D_LUT interpolation
+ */
+ struct drm_property *lut1d_interpolation_property;
+
+ /**
+ * @curve_1d_type_property:
+ *
+ * Sub-type for DRM_COLOROP_1D_CURVE type.
+ */
+ struct drm_property *curve_1d_type_property;
+
+ /**
+ * @multiplier_property:
+ *
+ * Multiplier property for plane gain
+ */
+ struct drm_property *multiplier_property;
+
+ /**
+ * @size_property:
+ *
+ * Size property for custom LUT from userspace.
+ */
+ struct drm_property *size_property;
+
+ /**
+ * @lut3d_interpolation_property:
+ *
+ * Read-only property for DRM_COLOROP_3D_LUT interpolation
+ */
+ struct drm_property *lut3d_interpolation_property;
+
+ /**
+ * @data_property:
+ *
+ * blob property for any TYPE that requires a blob of data,
+ * such as 1DLUT, CTM, 3DLUT, etc.
+ *
+ * The way this blob is interpreted depends on the TYPE of
+ * this
+ */
+ struct drm_property *data_property;
+
+ /**
+ * @next_property:
+ *
+ * Read-only property to next colorop in the pipeline
+ */
+ struct drm_property *next_property;
+
+};
+
+#define obj_to_colorop(x) container_of(x, struct drm_colorop, base)
+
+/**
+ * drm_colorop_find - look up a Colorop object from its ID
+ * @dev: DRM device
+ * @file_priv: drm file to check for lease against.
+ * @id: &drm_mode_object ID
+ *
+ * This can be used to look up a Colorop from its userspace ID. Only used by
+ * drivers for legacy IOCTLs and interface, nowadays extensions to the KMS
+ * userspace interface should be done using &drm_property.
+ */
+static inline struct drm_colorop *drm_colorop_find(struct drm_device *dev,
+ struct drm_file *file_priv,
+ uint32_t id)
+{
+ struct drm_mode_object *mo;
+
+ mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_COLOROP);
+ return mo ? obj_to_colorop(mo) : NULL;
+}
+
+void drm_colorop_pipeline_destroy(struct drm_device *dev);
+void drm_colorop_cleanup(struct drm_colorop *colorop);
+
+int drm_plane_colorop_curve_1d_init(struct drm_device *dev, struct drm_colorop *colorop,
+ struct drm_plane *plane, u64 supported_tfs, uint32_t flags);
+int drm_plane_colorop_curve_1d_lut_init(struct drm_device *dev, struct drm_colorop *colorop,
+ struct drm_plane *plane, uint32_t lut_size,
+ enum drm_colorop_lut1d_interpolation_type interpolation,
+ uint32_t flags);
+int drm_plane_colorop_ctm_3x4_init(struct drm_device *dev, struct drm_colorop *colorop,
+ struct drm_plane *plane, uint32_t flags);
+int drm_plane_colorop_mult_init(struct drm_device *dev, struct drm_colorop *colorop,
+ struct drm_plane *plane, uint32_t flags);
+int drm_plane_colorop_3dlut_init(struct drm_device *dev, struct drm_colorop *colorop,
+ struct drm_plane *plane,
+ uint32_t lut_size,
+ enum drm_colorop_lut3d_interpolation_type interpolation,
+ uint32_t flags);
+
+struct drm_colorop_state *
+drm_atomic_helper_colorop_duplicate_state(struct drm_colorop *colorop);
+
+void drm_colorop_atomic_destroy_state(struct drm_colorop *colorop,
+ struct drm_colorop_state *state);
+
+/**
+ * drm_colorop_reset - reset colorop atomic state
+ * @colorop: drm colorop
+ *
+ * Resets the atomic state for @colorop by freeing the state pointer (which might
+ * be NULL, e.g. at driver load time) and allocating a new empty state object.
+ */
+void drm_colorop_reset(struct drm_colorop *colorop);
+
+/**
+ * drm_colorop_index - find the index of a registered colorop
+ * @colorop: colorop to find index for
+ *
+ * Given a registered colorop, return the index of that colorop within a DRM
+ * device's list of colorops.
+ */
+static inline unsigned int drm_colorop_index(const struct drm_colorop *colorop)
+{
+ return colorop->index;
+}
+
+#define drm_for_each_colorop(colorop, dev) \
+ list_for_each_entry(colorop, &(dev)->mode_config.colorop_list, head)
+
+/**
+ * drm_get_colorop_type_name - return a string for colorop type
+ * @type: colorop type to compute name of
+ *
+ * In contrast to the other drm_get_*_name functions this one here returns a
+ * const pointer and hence is threadsafe.
+ */
+const char *drm_get_colorop_type_name(enum drm_colorop_type type);
+
+/**
+ * drm_get_colorop_curve_1d_type_name - return a string for 1D curve type
+ * @type: 1d curve type to compute name of
+ *
+ * In contrast to the other drm_get_*_name functions this one here returns a
+ * const pointer and hence is threadsafe.
+ */
+const char *drm_get_colorop_curve_1d_type_name(enum drm_colorop_curve_1d_type type);
+
+const char *
+drm_get_colorop_lut1d_interpolation_name(enum drm_colorop_lut1d_interpolation_type type);
+
+const char *
+drm_get_colorop_lut3d_interpolation_name(enum drm_colorop_lut3d_interpolation_type type);
+
+void drm_colorop_set_next_property(struct drm_colorop *colorop, struct drm_colorop *next);
+
+#endif /* __DRM_COLOROP_H__ */
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 1922b278ffad..8f34f4b8183d 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -27,8 +27,10 @@
#include <linux/llist.h>
#include <linux/ctype.h>
#include <linux/hdmi.h>
+#include <linux/notifier.h>
#include <drm/drm_mode_object.h>
#include <drm/drm_util.h>
+#include <drm/drm_property.h>
#include <uapi/drm/drm_mode.h>
@@ -36,11 +38,17 @@ struct drm_connector_helper_funcs;
struct drm_modeset_acquire_ctx;
struct drm_device;
struct drm_crtc;
+struct drm_display_mode;
struct drm_encoder;
+struct drm_panel;
struct drm_property;
struct drm_property_blob;
struct drm_printer;
+struct drm_privacy_screen;
+struct drm_edid;
struct edid;
+struct hdmi_codec_daifmt;
+struct hdmi_codec_params;
struct i2c_adapter;
enum drm_connector_force {
@@ -141,6 +149,77 @@ enum subpixel_order {
};
/**
+ * enum drm_connector_tv_mode - Analog TV output mode
+ *
+ * This enum is used to indicate the TV output mode used on an analog TV
+ * connector.
+ *
+ * WARNING: The values of this enum is uABI since they're exposed in the
+ * "TV mode" connector property.
+ */
+enum drm_connector_tv_mode {
+ /**
+ * @DRM_MODE_TV_MODE_NTSC: CCIR System M (aka 525-lines)
+ * together with the NTSC Color Encoding.
+ */
+ DRM_MODE_TV_MODE_NTSC,
+
+ /**
+ * @DRM_MODE_TV_MODE_NTSC_443: Variant of
+ * @DRM_MODE_TV_MODE_NTSC. Uses a color subcarrier frequency
+ * of 4.43 MHz.
+ */
+ DRM_MODE_TV_MODE_NTSC_443,
+
+ /**
+ * @DRM_MODE_TV_MODE_NTSC_J: Variant of @DRM_MODE_TV_MODE_NTSC
+ * used in Japan. Uses a black level equals to the blanking
+ * level.
+ */
+ DRM_MODE_TV_MODE_NTSC_J,
+
+ /**
+ * @DRM_MODE_TV_MODE_PAL: CCIR System B together with the PAL
+ * color system.
+ */
+ DRM_MODE_TV_MODE_PAL,
+
+ /**
+ * @DRM_MODE_TV_MODE_PAL_M: CCIR System M (aka 525-lines)
+ * together with the PAL color encoding
+ */
+ DRM_MODE_TV_MODE_PAL_M,
+
+ /**
+ * @DRM_MODE_TV_MODE_PAL_N: CCIR System N together with the PAL
+ * color encoding. It uses 625 lines, but has a color subcarrier
+ * frequency of 3.58MHz, the SECAM color space, and narrower
+ * channels compared to most of the other PAL variants.
+ */
+ DRM_MODE_TV_MODE_PAL_N,
+
+ /**
+ * @DRM_MODE_TV_MODE_SECAM: CCIR System B together with the
+ * SECAM color system.
+ */
+ DRM_MODE_TV_MODE_SECAM,
+
+ /**
+ * @DRM_MODE_TV_MODE_MONOCHROME: Use timings appropriate to
+ * the DRM mode, including equalizing pulses for a 525-line
+ * or 625-line mode, with no pedestal or color encoding.
+ */
+ DRM_MODE_TV_MODE_MONOCHROME,
+
+ /**
+ * @DRM_MODE_TV_MODE_MAX: Number of analog TV output modes.
+ *
+ * Internal implementation detail; this is not uABI.
+ */
+ DRM_MODE_TV_MODE_MAX,
+};
+
+/**
* struct drm_scrambling: sink's scrambling support.
*/
struct drm_scrambling {
@@ -242,9 +321,6 @@ struct drm_hdmi_info {
*/
unsigned long y420_cmdb_modes[BITS_TO_LONGS(256)];
- /** @y420_cmdb_map: bitmap of SVD index, to extraxt vcb modes */
- u64 y420_cmdb_map;
-
/** @y420_dc_modes: bitmap of deep color support index */
u8 y420_dc_modes;
@@ -304,6 +380,32 @@ enum drm_panel_orientation {
};
/**
+ * enum drm_hdmi_broadcast_rgb - Broadcast RGB Selection for an HDMI @drm_connector
+ */
+enum drm_hdmi_broadcast_rgb {
+ /**
+ * @DRM_HDMI_BROADCAST_RGB_AUTO: The RGB range is selected
+ * automatically based on the mode.
+ */
+ DRM_HDMI_BROADCAST_RGB_AUTO,
+
+ /**
+ * @DRM_HDMI_BROADCAST_RGB_FULL: Full range RGB is forced.
+ */
+ DRM_HDMI_BROADCAST_RGB_FULL,
+
+ /**
+ * @DRM_HDMI_BROADCAST_RGB_LIMITED: Limited range RGB is forced.
+ */
+ DRM_HDMI_BROADCAST_RGB_LIMITED,
+};
+
+const char *
+drm_hdmi_connector_get_broadcast_rgb_name(enum drm_hdmi_broadcast_rgb broadcast_rgb);
+const char *
+drm_hdmi_connector_get_output_format_name(enum hdmi_colorspace fmt);
+
+/**
* struct drm_monitor_range_info - Panel's Monitor range in EDID for
* &drm_display_info
*
@@ -316,41 +418,143 @@ enum drm_panel_orientation {
* EDID's detailed monitor range
*/
struct drm_monitor_range_info {
- u8 min_vfreq;
- u8 max_vfreq;
+ u16 min_vfreq;
+ u16 max_vfreq;
};
-/*
- * This is a consolidated colorimetry list supported by HDMI and
+/**
+ * struct drm_luminance_range_info - Panel's luminance range for
+ * &drm_display_info. Calculated using data in EDID
+ *
+ * This struct is used to store a luminance range supported by panel
+ * as calculated using data from EDID's static hdr metadata.
+ *
+ * @min_luminance: This is the min supported luminance value
+ *
+ * @max_luminance: This is the max supported luminance value
+ */
+struct drm_luminance_range_info {
+ u32 min_luminance;
+ u32 max_luminance;
+};
+
+/**
+ * enum drm_privacy_screen_status - privacy screen status
+ *
+ * This enum is used to track and control the state of the integrated privacy
+ * screen present on some display panels, via the "privacy-screen sw-state"
+ * and "privacy-screen hw-state" properties. Note the _LOCKED enum values
+ * are only valid for the "privacy-screen hw-state" property.
+ *
+ * @PRIVACY_SCREEN_DISABLED:
+ * The privacy-screen on the panel is disabled
+ * @PRIVACY_SCREEN_ENABLED:
+ * The privacy-screen on the panel is enabled
+ * @PRIVACY_SCREEN_DISABLED_LOCKED:
+ * The privacy-screen on the panel is disabled and locked (cannot be changed)
+ * @PRIVACY_SCREEN_ENABLED_LOCKED:
+ * The privacy-screen on the panel is enabled and locked (cannot be changed)
+ */
+enum drm_privacy_screen_status {
+ PRIVACY_SCREEN_DISABLED = 0,
+ PRIVACY_SCREEN_ENABLED,
+ PRIVACY_SCREEN_DISABLED_LOCKED,
+ PRIVACY_SCREEN_ENABLED_LOCKED,
+};
+
+/**
+ * enum drm_colorspace - color space
+ *
+ * This enum is a consolidated colorimetry list supported by HDMI and
* DP protocol standard. The respective connectors will register
* a property with the subset of this list (supported by that
* respective protocol). Userspace will set the colorspace through
* a colorspace property which will be created and exposed to
* userspace.
+ *
+ * DP definitions come from the DP v2.0 spec
+ * HDMI definitions come from the CTA-861-H spec
+ *
+ * @DRM_MODE_COLORIMETRY_DEFAULT:
+ * Driver specific behavior.
+ * @DRM_MODE_COLORIMETRY_NO_DATA:
+ * Driver specific behavior.
+ * @DRM_MODE_COLORIMETRY_SMPTE_170M_YCC:
+ * (HDMI)
+ * SMPTE ST 170M colorimetry format
+ * @DRM_MODE_COLORIMETRY_BT709_YCC:
+ * (HDMI, DP)
+ * ITU-R BT.709 colorimetry format
+ * @DRM_MODE_COLORIMETRY_XVYCC_601:
+ * (HDMI, DP)
+ * xvYCC601 colorimetry format
+ * @DRM_MODE_COLORIMETRY_XVYCC_709:
+ * (HDMI, DP)
+ * xvYCC709 colorimetry format
+ * @DRM_MODE_COLORIMETRY_SYCC_601:
+ * (HDMI, DP)
+ * sYCC601 colorimetry format
+ * @DRM_MODE_COLORIMETRY_OPYCC_601:
+ * (HDMI, DP)
+ * opYCC601 colorimetry format
+ * @DRM_MODE_COLORIMETRY_OPRGB:
+ * (HDMI, DP)
+ * opRGB colorimetry format
+ * @DRM_MODE_COLORIMETRY_BT2020_CYCC:
+ * (HDMI, DP)
+ * ITU-R BT.2020 Y'c C'bc C'rc (constant luminance) colorimetry format
+ * @DRM_MODE_COLORIMETRY_BT2020_RGB:
+ * (HDMI, DP)
+ * ITU-R BT.2020 R' G' B' colorimetry format
+ * @DRM_MODE_COLORIMETRY_BT2020_YCC:
+ * (HDMI, DP)
+ * ITU-R BT.2020 Y' C'b C'r colorimetry format
+ * @DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65:
+ * (HDMI)
+ * SMPTE ST 2113 P3D65 colorimetry format
+ * @DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER:
+ * (HDMI)
+ * SMPTE ST 2113 P3DCI colorimetry format
+ * @DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED:
+ * (DP)
+ * RGB wide gamut fixed point colorimetry format
+ * @DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT:
+ * (DP)
+ * RGB wide gamut floating point
+ * (scRGB (IEC 61966-2-2)) colorimetry format
+ * @DRM_MODE_COLORIMETRY_BT601_YCC:
+ * (DP)
+ * ITU-R BT.601 colorimetry format
+ * The DP spec does not say whether this is the 525 or the 625
+ * line version.
+ * @DRM_MODE_COLORIMETRY_COUNT:
+ * Not a valid value; merely used four counting
*/
-
-/* For Default case, driver will set the colorspace */
-#define DRM_MODE_COLORIMETRY_DEFAULT 0
-/* CEA 861 Normal Colorimetry options */
-#define DRM_MODE_COLORIMETRY_NO_DATA 0
-#define DRM_MODE_COLORIMETRY_SMPTE_170M_YCC 1
-#define DRM_MODE_COLORIMETRY_BT709_YCC 2
-/* CEA 861 Extended Colorimetry Options */
-#define DRM_MODE_COLORIMETRY_XVYCC_601 3
-#define DRM_MODE_COLORIMETRY_XVYCC_709 4
-#define DRM_MODE_COLORIMETRY_SYCC_601 5
-#define DRM_MODE_COLORIMETRY_OPYCC_601 6
-#define DRM_MODE_COLORIMETRY_OPRGB 7
-#define DRM_MODE_COLORIMETRY_BT2020_CYCC 8
-#define DRM_MODE_COLORIMETRY_BT2020_RGB 9
-#define DRM_MODE_COLORIMETRY_BT2020_YCC 10
-/* Additional Colorimetry extension added as part of CTA 861.G */
-#define DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65 11
-#define DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER 12
-/* Additional Colorimetry Options added for DP 1.4a VSC Colorimetry Format */
-#define DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED 13
-#define DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT 14
-#define DRM_MODE_COLORIMETRY_BT601_YCC 15
+enum drm_colorspace {
+ /* For Default case, driver will set the colorspace */
+ DRM_MODE_COLORIMETRY_DEFAULT = 0,
+ /* CEA 861 Normal Colorimetry options */
+ DRM_MODE_COLORIMETRY_NO_DATA = 0,
+ DRM_MODE_COLORIMETRY_SMPTE_170M_YCC = 1,
+ DRM_MODE_COLORIMETRY_BT709_YCC = 2,
+ /* CEA 861 Extended Colorimetry Options */
+ DRM_MODE_COLORIMETRY_XVYCC_601 = 3,
+ DRM_MODE_COLORIMETRY_XVYCC_709 = 4,
+ DRM_MODE_COLORIMETRY_SYCC_601 = 5,
+ DRM_MODE_COLORIMETRY_OPYCC_601 = 6,
+ DRM_MODE_COLORIMETRY_OPRGB = 7,
+ DRM_MODE_COLORIMETRY_BT2020_CYCC = 8,
+ DRM_MODE_COLORIMETRY_BT2020_RGB = 9,
+ DRM_MODE_COLORIMETRY_BT2020_YCC = 10,
+ /* Additional Colorimetry extension added as part of CTA 861.G */
+ DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65 = 11,
+ DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER = 12,
+ /* Additional Colorimetry Options added for DP 1.4a VSC Colorimetry Format */
+ DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED = 13,
+ DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT = 14,
+ DRM_MODE_COLORIMETRY_BT601_YCC = 15,
+ DRM_MODE_COLORIMETRY_COUNT
+};
/**
* enum drm_bus_flags - bus_flags info for &drm_display_info
@@ -496,9 +700,9 @@ struct drm_display_info {
enum subpixel_order subpixel_order;
#define DRM_COLOR_FORMAT_RGB444 (1<<0)
-#define DRM_COLOR_FORMAT_YCRCB444 (1<<1)
-#define DRM_COLOR_FORMAT_YCRCB422 (1<<2)
-#define DRM_COLOR_FORMAT_YCRCB420 (1<<3)
+#define DRM_COLOR_FORMAT_YCBCR444 (1<<1)
+#define DRM_COLOR_FORMAT_YCBCR422 (1<<2)
+#define DRM_COLOR_FORMAT_YCBCR420 (1<<3)
/**
* @panel_orientation: Read only connector property for built-in panels,
@@ -555,6 +759,14 @@ struct drm_display_info {
bool is_hdmi;
/**
+ * @has_audio: True if the sink supports audio.
+ *
+ * This field shall be used instead of calling
+ * drm_detect_monitor_audio() when possible.
+ */
+ bool has_audio;
+
+ /**
* @has_hdmi_infoframe: Does the sink support the HDMI infoframe?
*/
bool has_hdmi_infoframe;
@@ -566,10 +778,16 @@ struct drm_display_info {
bool rgb_quant_range_selectable;
/**
- * @edid_hdmi_dc_modes: Mask of supported hdmi deep color modes. Even
- * more stuff redundant with @bus_formats.
+ * @edid_hdmi_rgb444_dc_modes: Mask of supported hdmi deep color modes
+ * in RGB 4:4:4. Even more stuff redundant with @bus_formats.
*/
- u8 edid_hdmi_dc_modes;
+ u8 edid_hdmi_rgb444_dc_modes;
+
+ /**
+ * @edid_hdmi_ycbcr444_dc_modes: Mask of supported hdmi deep color
+ * modes in YCbCr 4:4:4. Even more stuff redundant with @bus_formats.
+ */
+ u8 edid_hdmi_ycbcr444_dc_modes;
/**
* @cea_rev: CEA revision of the HDMI sink.
@@ -582,6 +800,11 @@ struct drm_display_info {
struct drm_hdmi_info hdmi;
/**
+ * @hdr_sink_metadata: HDR Metadata Information read from sink
+ */
+ struct hdr_sink_metadata hdr_sink_metadata;
+
+ /**
* @non_desktop: Non desktop display (HMD).
*/
bool non_desktop;
@@ -590,6 +813,54 @@ struct drm_display_info {
* @monitor_range: Frequency range supported by monitor range descriptor
*/
struct drm_monitor_range_info monitor_range;
+
+ /**
+ * @luminance_range: Luminance range supported by panel
+ */
+ struct drm_luminance_range_info luminance_range;
+
+ /**
+ * @mso_stream_count: eDP Multi-SST Operation (MSO) stream count from
+ * the DisplayID VESA vendor block. 0 for conventional Single-Stream
+ * Transport (SST), or 2 or 4 MSO streams.
+ */
+ u8 mso_stream_count;
+
+ /**
+ * @mso_pixel_overlap: eDP MSO segment pixel overlap, 0-8 pixels.
+ */
+ u8 mso_pixel_overlap;
+
+ /**
+ * @max_dsc_bpp: Maximum DSC target bitrate, if it is set to 0 the
+ * monitor's default value is used instead.
+ */
+ u32 max_dsc_bpp;
+
+ /**
+ * @vics: Array of vics_len VICs. Internal to EDID parsing.
+ */
+ u8 *vics;
+
+ /**
+ * @vics_len: Number of elements in vics. Internal to EDID parsing.
+ */
+ int vics_len;
+
+ /**
+ * @quirks: EDID based quirks. DRM core and drivers can query the
+ * @drm_edid_quirk quirks using drm_edid_has_quirk(), the rest of
+ * the quirks also tracked here are internal to EDID parsing.
+ */
+ u32 quirks;
+
+ /**
+ * @source_physical_address: Source Physical Address from HDMI
+ * Vendor-Specific Data Block, for CEC usage.
+ *
+ * Defaults to CEC_PHYS_ADDR_INVALID (0xffff).
+ */
+ u16 source_physical_address;
};
int drm_display_info_set_bus_formats(struct drm_display_info *info,
@@ -626,8 +897,10 @@ struct drm_connector_tv_margins {
/**
* struct drm_tv_connector_state - TV connector related states
- * @subconnector: selected subconnector
+ * @select_subconnector: selected subconnector
+ * @subconnector: detected subconnector
* @margins: TV margins
+ * @legacy_mode: Legacy TV mode, driver specific value
* @mode: TV mode
* @brightness: brightness in percent
* @contrast: contrast in percent
@@ -637,8 +910,10 @@ struct drm_connector_tv_margins {
* @hue: hue in percent
*/
struct drm_tv_connector_state {
+ enum drm_mode_subconnector select_subconnector;
enum drm_mode_subconnector subconnector;
struct drm_connector_tv_margins margins;
+ unsigned int legacy_mode;
unsigned int mode;
unsigned int brightness;
unsigned int contrast;
@@ -649,6 +924,82 @@ struct drm_tv_connector_state {
};
/**
+ * struct drm_connector_hdmi_infoframe - HDMI Infoframe container
+ */
+struct drm_connector_hdmi_infoframe {
+ /**
+ * @data: HDMI Infoframe structure
+ */
+ union hdmi_infoframe data;
+
+ /**
+ * @set: Is the content of @data valid?
+ */
+ bool set;
+};
+
+/*
+ * struct drm_connector_hdmi_state - HDMI state container
+ */
+struct drm_connector_hdmi_state {
+ /**
+ * @broadcast_rgb: Connector property to pass the
+ * Broadcast RGB selection value.
+ */
+ enum drm_hdmi_broadcast_rgb broadcast_rgb;
+
+ /**
+ * @infoframes: HDMI Infoframes matching that state
+ */
+ struct {
+ /**
+ * @avi: AVI Infoframes structure matching our
+ * state.
+ */
+ struct drm_connector_hdmi_infoframe avi;
+
+ /**
+ * @hdr_drm: DRM (Dynamic Range and Mastering)
+ * Infoframes structure matching our state.
+ */
+ struct drm_connector_hdmi_infoframe hdr_drm;
+
+ /**
+ * @spd: SPD Infoframes structure matching our
+ * state.
+ */
+ struct drm_connector_hdmi_infoframe spd;
+
+ /**
+ * @vendor: HDMI Vendor Infoframes structure
+ * matching our state.
+ */
+ struct drm_connector_hdmi_infoframe hdmi;
+ } infoframes;
+
+ /**
+ * @is_limited_range: Is the output supposed to use a limited
+ * RGB Quantization Range or not?
+ */
+ bool is_limited_range;
+
+ /**
+ * @output_bpc: Bits per color channel to output.
+ */
+ unsigned int output_bpc;
+
+ /**
+ * @output_format: Pixel format to output in.
+ */
+ enum hdmi_colorspace output_format;
+
+ /**
+ * @tmds_char_rate: TMDS Character Rate, in Hz.
+ */
+ unsigned long long tmds_char_rate;
+};
+
+/**
* struct drm_connector_state - mutable connector state
*/
struct drm_connector_state {
@@ -754,7 +1105,7 @@ struct drm_connector_state {
* colorspace change on Sink. This is most commonly used to switch
* to wider color gamuts like BT2020.
*/
- u32 colorspace;
+ enum drm_colorspace colorspace;
/**
* @writeback_job: Writeback job for writeback connectors
@@ -782,10 +1133,166 @@ struct drm_connector_state {
u8 max_bpc;
/**
+ * @privacy_screen_sw_state: See :ref:`Standard Connector
+ * Properties<standard_connector_properties>`
+ */
+ enum drm_privacy_screen_status privacy_screen_sw_state;
+
+ /**
* @hdr_output_metadata:
* DRM blob property for HDR output metadata
*/
struct drm_property_blob *hdr_output_metadata;
+
+ /**
+ * @hdmi: HDMI-related variable and properties. Filled by
+ * @drm_atomic_helper_connector_hdmi_check().
+ */
+ struct drm_connector_hdmi_state hdmi;
+};
+
+struct drm_connector_hdmi_audio_funcs {
+ /**
+ * @startup:
+ *
+ * Called when ASoC starts an audio stream setup. The
+ * @startup() is optional.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*startup)(struct drm_connector *connector);
+
+ /**
+ * @prepare:
+ * Configures HDMI-encoder for audio stream. Can be called
+ * multiple times for each setup. Mandatory.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*prepare)(struct drm_connector *connector,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms);
+
+ /**
+ * @shutdown:
+ *
+ * Shut down the audio stream. Mandatory.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ void (*shutdown)(struct drm_connector *connector);
+
+ /**
+ * @mute_stream:
+ *
+ * Mute/unmute HDMI audio stream. The @mute_stream callback is
+ * optional.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*mute_stream)(struct drm_connector *connector,
+ bool enable, int direction);
+};
+
+void drm_connector_cec_phys_addr_invalidate(struct drm_connector *connector);
+void drm_connector_cec_phys_addr_set(struct drm_connector *connector);
+
+/**
+ * struct drm_connector_cec_funcs - drm_hdmi_connector control functions
+ */
+struct drm_connector_cec_funcs {
+ /**
+ * @phys_addr_invalidate: mark CEC physical address as invalid
+ *
+ * The callback to mark CEC physical address as invalid, abstracting
+ * the operation.
+ */
+ void (*phys_addr_invalidate)(struct drm_connector *connector);
+
+ /**
+ * @phys_addr_set: set CEC physical address
+ *
+ * The callback to set CEC physical address, abstracting the operation.
+ */
+ void (*phys_addr_set)(struct drm_connector *connector, u16 addr);
+};
+
+/**
+ * struct drm_connector_hdmi_funcs - drm_hdmi_connector control functions
+ */
+struct drm_connector_hdmi_funcs {
+ /**
+ * @tmds_char_rate_valid:
+ *
+ * This callback is invoked at atomic_check time to figure out
+ * whether a particular TMDS character rate is supported by the
+ * driver.
+ *
+ * The @tmds_char_rate_valid callback is optional.
+ *
+ * Returns:
+ *
+ * Either &drm_mode_status.MODE_OK or one of the failure reasons
+ * in &enum drm_mode_status.
+ */
+ enum drm_mode_status
+ (*tmds_char_rate_valid)(const struct drm_connector *connector,
+ const struct drm_display_mode *mode,
+ unsigned long long tmds_rate);
+
+ /**
+ * @clear_infoframe:
+ *
+ * This callback is invoked through
+ * @drm_atomic_helper_connector_hdmi_update_infoframes during a
+ * commit to clear the infoframes into the hardware. It will be
+ * called multiple times, once for every disabled infoframe
+ * type.
+ *
+ * The @clear_infoframe callback is optional.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*clear_infoframe)(struct drm_connector *connector,
+ enum hdmi_infoframe_type type);
+
+ /**
+ * @write_infoframe:
+ *
+ * This callback is invoked through
+ * @drm_atomic_helper_connector_hdmi_update_infoframes during a
+ * commit to program the infoframes into the hardware. It will
+ * be called multiple times, once for every updated infoframe
+ * type.
+ *
+ * The @write_infoframe callback is mandatory.
+ *
+ * Returns:
+ * 0 on success, a negative error code otherwise
+ */
+ int (*write_infoframe)(struct drm_connector *connector,
+ enum hdmi_infoframe_type type,
+ const u8 *buffer, size_t len);
+
+ /**
+ * @read_edid:
+ *
+ * This callback is used by the framework as a replacement for reading
+ * the EDID from connector->ddc. It is still recommended to provide
+ * connector->ddc instead of implementing this callback. Returned EDID
+ * should be freed via the drm_edid_free().
+ *
+ * The @read_edid callback is optional.
+ *
+ * Returns:
+ * Valid EDID on success, NULL in case of failure.
+ */
+ const struct drm_edid *(*read_edid)(struct drm_connector *connector);
};
/**
@@ -848,6 +1355,11 @@ struct drm_connector_funcs {
* locks to avoid races with concurrent modeset changes need to use
* &drm_connector_helper_funcs.detect_ctx instead.
*
+ * Also note that this callback can be called no matter the
+ * state the connector is in. Drivers that need the underlying
+ * device to be powered to perform the detection will first need
+ * to make sure it's been properly enabled.
+ *
* RETURNS:
*
* drm_connector_status indicating the connector's status.
@@ -1079,6 +1591,22 @@ struct drm_connector_funcs {
*/
void (*atomic_print_state)(struct drm_printer *p,
const struct drm_connector_state *state);
+
+ /**
+ * @oob_hotplug_event:
+ *
+ * This will get called when a hotplug-event for a drm-connector
+ * has been received from a source outside the display driver / device.
+ */
+ void (*oob_hotplug_event)(struct drm_connector *connector,
+ enum drm_connector_status status);
+
+ /**
+ * @debugfs_init:
+ *
+ * Allows connectors to create connector-specific debugfs files.
+ */
+ void (*debugfs_init)(struct drm_connector *connector, struct dentry *root);
};
/**
@@ -1119,6 +1647,13 @@ struct drm_cmdline_mode {
bool bpp_specified;
/**
+ * @pixel_clock:
+ *
+ * Pixel Clock in kHz. Optional.
+ */
+ unsigned int pixel_clock;
+
+ /**
* @xres:
*
* Active resolution on the X axis, in pixels.
@@ -1206,6 +1741,145 @@ struct drm_cmdline_mode {
* @tv_margins: TV margins to apply to the mode.
*/
struct drm_connector_tv_margins tv_margins;
+
+ /**
+ * @tv_mode: TV mode standard. See DRM_MODE_TV_MODE_*.
+ */
+ enum drm_connector_tv_mode tv_mode;
+
+ /**
+ * @tv_mode_specified:
+ *
+ * Did the mode have a preferred TV mode?
+ */
+ bool tv_mode_specified;
+};
+
+/**
+ * struct drm_connector_hdmi_audio - DRM gemeric HDMI Codec-related structure
+ *
+ * HDMI drivers usually incorporate a HDMI Codec. This structure expresses the
+ * generic HDMI Codec as used by the DRM HDMI Codec framework.
+ */
+struct drm_connector_hdmi_audio {
+ /**
+ * @funcs:
+ *
+ * Implementation of the HDMI codec functionality to be used by the DRM
+ * HDMI Codec framework.
+ */
+ const struct drm_connector_hdmi_audio_funcs *funcs;
+
+ /**
+ * @codec_pdev:
+ *
+ * Platform device created to hold the HDMI Codec. It will be
+ * automatically unregistered during drm_connector_cleanup().
+ */
+ struct platform_device *codec_pdev;
+
+ /**
+ * @lock:
+ *
+ * Mutex to protect @last_state, @plugged_cb and @plugged_cb_dev.
+ */
+ struct mutex lock;
+
+ /**
+ * @plugged_cb:
+ *
+ * Callback to be called when the HDMI sink get plugged to or unplugged
+ * from this connector. This is assigned by the framework when
+ * requested by the ASoC code.
+ */
+ void (*plugged_cb)(struct device *dev, bool plugged);
+
+ /**
+ * @plugged_cb_dev:
+ *
+ * The data for @plugged_cb(). It is being provided by the ASoC.
+ */
+ struct device *plugged_cb_dev;
+
+ /**
+ * @last_state:
+ *
+ * Last plugged state recored by the framework. It is used to correctly
+ * report the state to @plugged_cb().
+ */
+ bool last_state;
+
+ /**
+ * @dai_port:
+ *
+ * The port in DT that is used for the Codec DAI.
+ */
+ int dai_port;
+};
+
+/*
+ * struct drm_connector_hdmi - DRM Connector HDMI-related structure
+ */
+struct drm_connector_hdmi {
+#define DRM_CONNECTOR_HDMI_VENDOR_LEN 8
+ /**
+ * @vendor: HDMI Controller Vendor Name
+ */
+ unsigned char vendor[DRM_CONNECTOR_HDMI_VENDOR_LEN] __nonstring;
+
+#define DRM_CONNECTOR_HDMI_PRODUCT_LEN 16
+ /**
+ * @product: HDMI Controller Product Name
+ */
+ unsigned char product[DRM_CONNECTOR_HDMI_PRODUCT_LEN] __nonstring;
+
+ /**
+ * @supported_formats: Bitmask of @hdmi_colorspace
+ * supported by the controller.
+ */
+ unsigned long supported_formats;
+
+ /**
+ * @funcs: HDMI connector Control Functions
+ */
+ const struct drm_connector_hdmi_funcs *funcs;
+
+ /**
+ * @infoframes: Current Infoframes output by the connector
+ */
+ struct {
+ /**
+ * @lock: Mutex protecting against concurrent access to
+ * the infoframes, most notably between KMS and ALSA.
+ */
+ struct mutex lock;
+
+ /**
+ * @audio: Current Audio Infoframes structure. Protected
+ * by @lock.
+ */
+ struct drm_connector_hdmi_infoframe audio;
+ } infoframes;
+};
+
+/**
+ * struct drm_connector_cec - DRM Connector CEC-related structure
+ */
+struct drm_connector_cec {
+ /**
+ * @mutex: protects all fields in this structure.
+ */
+ struct mutex mutex;
+
+ /**
+ * @funcs: CEC Control Functions
+ */
+ const struct drm_connector_cec_funcs *funcs;
+
+ /**
+ * @data: CEC implementation-specific data
+ */
+ void *data;
};
/**
@@ -1223,6 +1897,14 @@ struct drm_connector {
struct device *kdev;
/** @attr: sysfs attributes */
struct device_attribute *attr;
+ /**
+ * @fwnode: associated fwnode supplied by platform firmware
+ *
+ * Drivers can set this to associate a fwnode with a connector, drivers
+ * are expected to get a reference on the fwnode when setting this.
+ * drm_connector_cleanup() will call fwnode_handle_put() on this.
+ */
+ struct fwnode_handle *fwnode;
/**
* @head:
@@ -1234,6 +1916,14 @@ struct drm_connector {
*/
struct list_head head;
+ /**
+ * @global_connector_list_entry:
+ *
+ * Connector entry in the global connector-list, used by
+ * drm_connector_find_by_fwnode().
+ */
+ struct list_head global_connector_list_entry;
+
/** @base: base KMS object */
struct drm_mode_object base;
@@ -1335,8 +2025,12 @@ struct drm_connector {
/**
* @edid_blob_ptr: DRM property containing EDID if present. Protected by
- * &drm_mode_config.mutex. This should be updated only by calling
+ * &drm_mode_config.mutex.
+ *
+ * This must be updated only by calling drm_edid_connector_update() or
* drm_connector_update_edid_property().
+ *
+ * This must not be used by drivers directly.
*/
struct drm_property_blob *edid_blob_ptr;
@@ -1375,11 +2069,40 @@ struct drm_connector {
struct drm_property_blob *path_blob_ptr;
/**
+ * @max_bpc: Maximum bits per color channel the connector supports.
+ */
+ unsigned int max_bpc;
+
+ /**
* @max_bpc_property: Default connector property for the max bpc to be
* driven out of the connector.
*/
struct drm_property *max_bpc_property;
+ /** @privacy_screen: drm_privacy_screen for this connector, or NULL. */
+ struct drm_privacy_screen *privacy_screen;
+
+ /** @privacy_screen_notifier: privacy-screen notifier_block */
+ struct notifier_block privacy_screen_notifier;
+
+ /**
+ * @privacy_screen_sw_state_property: Optional atomic property for the
+ * connector to control the integrated privacy screen.
+ */
+ struct drm_property *privacy_screen_sw_state_property;
+
+ /**
+ * @privacy_screen_hw_state_property: Optional atomic property for the
+ * connector to report the actual integrated privacy screen state.
+ */
+ struct drm_property *privacy_screen_hw_state_property;
+
+ /**
+ * @broadcast_rgb_property: Connector property to set the
+ * Broadcast RGB selection to output with.
+ */
+ struct drm_property *broadcast_rgb_property;
+
#define DRM_CONNECTOR_POLL_HPD (1 << 0)
#define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
@@ -1422,8 +2145,20 @@ struct drm_connector {
struct drm_cmdline_mode cmdline_mode;
/** @force: a DRM_FORCE_<foo> state for forced mode sets */
enum drm_connector_force force;
- /** @override_edid: has the EDID been overwritten through debugfs for testing? */
- bool override_edid;
+
+ /**
+ * @edid_override: Override EDID set via debugfs.
+ *
+ * Do not modify or access outside of the drm_edid_override_* family of
+ * functions.
+ */
+ const struct drm_edid *edid_override;
+
+ /**
+ * @edid_override_mutex: Protect access to edid_override.
+ */
+ struct mutex edid_override_mutex;
+
/** @epoch_counter: used to detect any other changes in connector, besides status */
u64 epoch_counter;
@@ -1443,8 +2178,11 @@ struct drm_connector {
struct drm_encoder *encoder;
#define MAX_ELD_BYTES 128
- /** @eld: EDID-like data, if present */
+ /** @eld: EDID-like data, if present, protected by @eld_mutex */
uint8_t eld[MAX_ELD_BYTES];
+ /** @eld_mutex: protection for concurrenct access to @eld */
+ struct mutex eld_mutex;
+
/** @latency_present: AV delay info from ELD, if found */
bool latency_present[2];
/**
@@ -1553,8 +2291,20 @@ struct drm_connector {
*/
struct llist_node free_node;
- /** @hdr_sink_metadata: HDR Metadata Information read from sink */
- struct hdr_sink_metadata hdr_sink_metadata;
+ /**
+ * @hdmi: HDMI-related variable and properties.
+ */
+ struct drm_connector_hdmi hdmi;
+
+ /**
+ * @hdmi_audio: HDMI codec properties and non-DRM state.
+ */
+ struct drm_connector_hdmi_audio hdmi_audio;
+
+ /**
+ * @cec: CEC-related data.
+ */
+ struct drm_connector_cec cec;
};
#define obj_to_connector(x) container_of(x, struct drm_connector, base)
@@ -1563,13 +2313,33 @@ int drm_connector_init(struct drm_device *dev,
struct drm_connector *connector,
const struct drm_connector_funcs *funcs,
int connector_type);
+int drm_connector_dynamic_init(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type,
+ struct i2c_adapter *ddc);
int drm_connector_init_with_ddc(struct drm_device *dev,
struct drm_connector *connector,
const struct drm_connector_funcs *funcs,
int connector_type,
struct i2c_adapter *ddc);
+int drmm_connector_init(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type,
+ struct i2c_adapter *ddc);
+int drmm_connector_hdmi_init(struct drm_device *dev,
+ struct drm_connector *connector,
+ const char *vendor, const char *product,
+ const struct drm_connector_funcs *funcs,
+ const struct drm_connector_hdmi_funcs *hdmi_funcs,
+ int connector_type,
+ struct i2c_adapter *ddc,
+ unsigned long supported_formats,
+ unsigned int max_bpc);
void drm_connector_attach_edid_property(struct drm_connector *connector);
int drm_connector_register(struct drm_connector *connector);
+int drm_connector_dynamic_register(struct drm_connector *connector);
void drm_connector_unregister(struct drm_connector *connector);
int drm_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder);
@@ -1645,25 +2415,32 @@ drm_connector_is_unregistered(struct drm_connector *connector)
DRM_CONNECTOR_UNREGISTERED;
}
+void drm_connector_oob_hotplug_event(struct fwnode_handle *connector_fwnode,
+ enum drm_connector_status status);
const char *drm_get_connector_type_name(unsigned int connector_type);
const char *drm_get_connector_status_name(enum drm_connector_status status);
const char *drm_get_subpixel_order_name(enum subpixel_order order);
const char *drm_get_dpms_name(int val);
const char *drm_get_dvi_i_subconnector_name(int val);
const char *drm_get_dvi_i_select_name(int val);
+const char *drm_get_tv_mode_name(int val);
const char *drm_get_tv_subconnector_name(int val);
const char *drm_get_tv_select_name(int val);
const char *drm_get_dp_subconnector_name(int val);
const char *drm_get_content_protection_name(int val);
const char *drm_get_hdcp_content_type_name(int val);
+int drm_get_tv_mode_from_name(const char *name, size_t len);
+
int drm_mode_create_dvi_i_properties(struct drm_device *dev);
void drm_connector_attach_dp_subconnector_property(struct drm_connector *connector);
int drm_mode_create_tv_margin_properties(struct drm_device *dev);
+int drm_mode_create_tv_properties_legacy(struct drm_device *dev,
+ unsigned int num_modes,
+ const char * const modes[]);
int drm_mode_create_tv_properties(struct drm_device *dev,
- unsigned int num_modes,
- const char * const modes[]);
+ unsigned int supported_tv_modes);
void drm_connector_attach_tv_margin_properties(struct drm_connector *conn);
int drm_mode_create_scaling_mode_property(struct drm_device *dev);
int drm_connector_attach_content_type_property(struct drm_connector *dev);
@@ -1671,13 +2448,17 @@ int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
u32 scaling_mode_mask);
int drm_connector_attach_vrr_capable_property(
struct drm_connector *connector);
+int drm_connector_attach_broadcast_rgb_property(struct drm_connector *connector);
+int drm_connector_attach_colorspace_property(struct drm_connector *connector);
+int drm_connector_attach_hdr_output_metadata_property(struct drm_connector *connector);
+bool drm_connector_atomic_hdr_metadata_equal(struct drm_connector_state *old_state,
+ struct drm_connector_state *new_state);
int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
-int drm_mode_create_hdmi_colorspace_property(struct drm_connector *connector);
-int drm_mode_create_dp_colorspace_property(struct drm_connector *connector);
+int drm_mode_create_hdmi_colorspace_property(struct drm_connector *connector,
+ u32 supported_colorspaces);
+int drm_mode_create_dp_colorspace_property(struct drm_connector *connector,
+ u32 supported_colorspaces);
int drm_mode_create_content_type_property(struct drm_device *dev);
-void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame,
- const struct drm_connector_state *conn_state);
-
int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
int drm_connector_set_path_property(struct drm_connector *connector,
@@ -1696,8 +2477,16 @@ int drm_connector_set_panel_orientation_with_quirk(
struct drm_connector *connector,
enum drm_panel_orientation panel_orientation,
int width, int height);
+int drm_connector_set_orientation_from_panel(
+ struct drm_connector *connector,
+ struct drm_panel *panel);
int drm_connector_attach_max_bpc_property(struct drm_connector *connector,
int min, int max);
+void drm_connector_create_privacy_screen_properties(struct drm_connector *conn);
+void drm_connector_attach_privacy_screen_properties(struct drm_connector *conn);
+void drm_connector_attach_privacy_screen_provider(
+ struct drm_connector *connector, struct drm_privacy_screen *priv);
+void drm_connector_update_privacy_screen(const struct drm_connector_state *connector_state);
/**
* struct drm_tile_group - Tile group metadata
@@ -1731,6 +2520,11 @@ void drm_mode_put_tile_group(struct drm_device *dev,
* drm_connector_list_iter_begin(), drm_connector_list_iter_end() and
* drm_connector_list_iter_next() respectively the convenience macro
* drm_for_each_connector_iter().
+ *
+ * Note that the return value of drm_connector_list_iter_next() is only valid
+ * up to the next drm_connector_list_iter_next() or
+ * drm_connector_list_iter_end() call. If you want to use the connector later,
+ * then you need to grab your own reference first using drm_connector_get().
*/
struct drm_connector_list_iter {
/* private: */
@@ -1746,6 +2540,7 @@ void drm_connector_list_iter_end(struct drm_connector_list_iter *iter);
bool drm_connector_has_possible_encoder(struct drm_connector *connector,
struct drm_encoder *encoder);
+const char *drm_get_colorspace_name(enum drm_colorspace colorspace);
/**
* drm_for_each_connector_iter - connector_list iterator macro
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 13eeba2a750a..66278ffeebd6 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -25,37 +25,24 @@
#ifndef __DRM_CRTC_H__
#define __DRM_CRTC_H__
-#include <linux/i2c.h>
#include <linux/spinlock.h>
#include <linux/types.h>
-#include <linux/fb.h>
-#include <linux/hdmi.h>
-#include <linux/media-bus-format.h>
-#include <uapi/drm/drm_mode.h>
-#include <uapi/drm/drm_fourcc.h>
#include <drm/drm_modeset_lock.h>
-#include <drm/drm_rect.h>
#include <drm/drm_mode_object.h>
-#include <drm/drm_framebuffer.h>
#include <drm/drm_modes.h>
-#include <drm/drm_connector.h>
#include <drm/drm_device.h>
-#include <drm/drm_property.h>
-#include <drm/drm_edid.h>
#include <drm/drm_plane.h>
-#include <drm/drm_blend.h>
-#include <drm/drm_color_mgmt.h>
#include <drm/drm_debugfs_crc.h>
#include <drm/drm_mode_config.h>
+struct drm_connector;
struct drm_device;
+struct drm_framebuffer;
struct drm_mode_set;
struct drm_file;
-struct drm_clip_rect;
struct drm_printer;
struct drm_self_refresh_data;
struct device_node;
-struct dma_fence;
struct edid;
static inline int64_t U642I64(uint64_t val)
@@ -90,11 +77,6 @@ struct drm_plane_helper_funcs;
* intended to indicate whether a full modeset is needed, rather than strictly
* describing what has changed in a commit. See also:
* drm_atomic_crtc_needs_modeset()
- *
- * WARNING: Transitional helpers (like drm_helper_crtc_mode_set() or
- * drm_helper_crtc_mode_set_base()) do not maintain many of the derived control
- * state like @plane_mask so drivers not converted over to atomic helpers should
- * not rely on these being accurate!
*/
struct drm_crtc_state {
/** @crtc: backpointer to the CRTC */
@@ -204,7 +186,7 @@ struct drm_crtc_state {
* this case the driver will send the VBLANK event on its own when the
* writeback job is complete.
*/
- bool no_vblank : 1;
+ bool no_vblank;
/**
* @plane_mask: Bitmask of drm_plane_mask(plane) of planes attached to
@@ -285,6 +267,10 @@ struct drm_crtc_state {
* Lookup table for converting pixel data after the color conversion
* matrix @ctm. See drm_crtc_enable_color_mgmt(). The blob (if not
* NULL) is an array of &struct drm_color_lut.
+ *
+ * Note that for mostly historical reasons stemming from Xorg heritage,
+ * this is also used to store the color map (also sometimes color lut,
+ * CLUT or color palette) for indexed formats like DRM_FORMAT_C8.
*/
struct drm_property_blob *gamma_lut;
@@ -332,6 +318,17 @@ struct drm_crtc_state {
enum drm_scaling_filter scaling_filter;
/**
+ * @sharpness_strength:
+ *
+ * Used by the user to set the sharpness intensity.
+ * The value ranges from 0-255.
+ * Default value is 0 which disable the sharpness feature.
+ * Any value greater than 0 enables sharpening with the
+ * specified strength.
+ */
+ u8 sharpness_strength;
+
+ /**
* @event:
*
* Optional pointer to a DRM event to signal upon completion of the
@@ -1075,12 +1072,18 @@ struct drm_crtc {
/**
* @gamma_size: Size of legacy gamma ramp reported to userspace. Set up
* by calling drm_mode_crtc_set_gamma_size().
+ *
+ * Note that atomic drivers need to instead use
+ * &drm_crtc_state.gamma_lut. See drm_crtc_enable_color_mgmt().
*/
uint32_t gamma_size;
/**
* @gamma_store: Gamma ramp values used by the legacy SETGAMMA and
* GETGAMMA IOCTls. Set up by calling drm_mode_crtc_set_gamma_size().
+ *
+ * Note that atomic drivers need to instead use
+ * &drm_crtc_state.gamma_lut. See drm_crtc_enable_color_mgmt().
*/
uint16_t *gamma_store;
@@ -1097,6 +1100,12 @@ struct drm_crtc {
struct drm_property *scaling_filter_property;
/**
+ * @sharpness_strength_property: property to apply
+ * the intensity of the sharpness requested.
+ */
+ struct drm_property *sharpness_strength_property;
+
+ /**
* @state:
*
* Current atomic state for this CRTC.
@@ -1135,14 +1144,12 @@ struct drm_crtc {
*/
spinlock_t commit_lock;
-#ifdef CONFIG_DEBUG_FS
/**
* @debugfs_entry:
*
* Debugfs directory for this CRTC.
*/
struct dentry *debugfs_entry;
-#endif
/**
* @crc:
@@ -1221,6 +1228,15 @@ int drm_crtc_init_with_planes(struct drm_device *dev,
struct drm_plane *cursor,
const struct drm_crtc_funcs *funcs,
const char *name, ...);
+
+__printf(6, 7)
+int drmm_crtc_init_with_planes(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_plane *primary,
+ struct drm_plane *cursor,
+ const struct drm_crtc_funcs *funcs,
+ const char *name, ...);
+
void drm_crtc_cleanup(struct drm_crtc *crtc);
__printf(7, 8)
@@ -1324,5 +1340,6 @@ static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev,
int drm_crtc_create_scaling_filter_property(struct drm_crtc *crtc,
unsigned int supported_filters);
-
+bool drm_crtc_in_clone_mode(struct drm_crtc_state *crtc_state);
+int drm_crtc_create_sharpness_strength_property(struct drm_crtc *crtc);
#endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index a6d520d5b6ca..8c886fc46ef2 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -33,15 +33,17 @@
#ifndef __DRM_CRTC_HELPER_H__
#define __DRM_CRTC_HELPER_H__
-#include <linux/spinlock.h>
#include <linux/types.h>
-#include <linux/idr.h>
-#include <linux/fb.h>
-
-#include <drm/drm_crtc.h>
-#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_modeset_helper.h>
+struct drm_atomic_state;
+struct drm_connector;
+struct drm_crtc;
+struct drm_device;
+struct drm_display_mode;
+struct drm_encoder;
+struct drm_framebuffer;
+struct drm_mode_set;
+struct drm_modeset_acquire_ctx;
void drm_helper_disable_unused_functions(struct drm_device *dev);
int drm_crtc_helper_set_config(struct drm_mode_set *set,
@@ -50,6 +52,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y,
struct drm_framebuffer *old_fb);
+int drm_crtc_helper_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *state);
bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
diff --git a/include/drm/drm_damage_helper.h b/include/drm/drm_damage_helper.h
index 40c34a5bf149..a58cbcd11276 100644
--- a/include/drm/drm_damage_helper.h
+++ b/include/drm/drm_damage_helper.h
@@ -64,7 +64,6 @@ struct drm_atomic_helper_damage_iter {
bool full_update;
};
-void drm_plane_enable_fb_damage_clips(struct drm_plane *plane);
void drm_atomic_helper_check_plane_damage(struct drm_atomic_state *state,
struct drm_plane_state *plane_state);
int drm_atomic_helper_dirtyfb(struct drm_framebuffer *fb,
@@ -79,24 +78,7 @@ bool
drm_atomic_helper_damage_iter_next(struct drm_atomic_helper_damage_iter *iter,
struct drm_rect *rect);
bool drm_atomic_helper_damage_merged(const struct drm_plane_state *old_state,
- struct drm_plane_state *state,
+ const struct drm_plane_state *state,
struct drm_rect *rect);
-/**
- * drm_helper_get_plane_damage_clips - Returns damage clips in &drm_rect.
- * @state: Plane state.
- *
- * Returns plane damage rectangles in internal &drm_rect. Currently &drm_rect
- * can be obtained by simply typecasting &drm_mode_rect. This is because both
- * are signed 32 and during drm_atomic_check_only() it is verified that damage
- * clips are inside fb.
- *
- * Return: Clips in plane fb_damage_clips blob property.
- */
-static inline struct drm_rect *
-drm_helper_get_plane_damage_clips(const struct drm_plane_state *state)
-{
- return (struct drm_rect *)drm_plane_get_damage_clips(state);
-}
-
#endif
diff --git a/include/drm/drm_debugfs.h b/include/drm/drm_debugfs.h
index 2188dc83957f..ea8cba94208a 100644
--- a/include/drm/drm_debugfs.h
+++ b/include/drm/drm_debugfs.h
@@ -34,6 +34,22 @@
#include <linux/types.h>
#include <linux/seq_file.h>
+
+#include <drm/drm_gpuvm.h>
+
+/**
+ * DRM_DEBUGFS_GPUVA_INFO - &drm_info_list entry to dump a GPU VA space
+ * @show: the &drm_info_list's show callback
+ * @data: driver private data
+ *
+ * Drivers should use this macro to define a &drm_info_list entry to provide a
+ * debugfs file for dumping the GPU VA space regions and mappings.
+ *
+ * For each DRM GPU VA space drivers should call drm_debugfs_gpuva_info() from
+ * their @show callback.
+ */
+#define DRM_DEBUGFS_GPUVA_INFO(show, data) {"gpuvas", show, DRIVER_GEM_GPUVA, data}
+
/**
* struct drm_info_list - debugfs info list entry
*
@@ -79,12 +95,67 @@ struct drm_info_node {
struct dentry *dent;
};
+/**
+ * struct drm_debugfs_info - debugfs info list entry
+ *
+ * This structure represents a debugfs file to be created by the drm
+ * core.
+ */
+struct drm_debugfs_info {
+ /** @name: File name */
+ const char *name;
+
+ /**
+ * @show:
+ *
+ * Show callback. &seq_file->private will be set to the &struct
+ * drm_debugfs_entry corresponding to the instance of this info
+ * on a given &struct drm_device.
+ */
+ int (*show)(struct seq_file*, void*);
+
+ /** @driver_features: Required driver features for this entry. */
+ u32 driver_features;
+
+ /** @data: Driver-private data, should not be device-specific. */
+ void *data;
+};
+
+/**
+ * struct drm_debugfs_entry - Per-device debugfs node structure
+ *
+ * This structure represents a debugfs file, as an instantiation of a &struct
+ * drm_debugfs_info on a &struct drm_device.
+ */
+struct drm_debugfs_entry {
+ /** @dev: &struct drm_device for this node. */
+ struct drm_device *dev;
+
+ /** @file: Template for this node. */
+ struct drm_debugfs_info file;
+
+ /** @list: Linked list of all device nodes. */
+ struct list_head list;
+};
+
#if defined(CONFIG_DEBUG_FS)
void drm_debugfs_create_files(const struct drm_info_list *files,
int count, struct dentry *root,
struct drm_minor *minor);
-int drm_debugfs_remove_files(const struct drm_info_list *files,
- int count, struct drm_minor *minor);
+int drm_debugfs_remove_files(const struct drm_info_list *files, int count,
+ struct dentry *root, struct drm_minor *minor);
+
+void drm_debugfs_add_file(struct drm_device *dev, const char *name,
+ int (*show)(struct seq_file*, void*), void *data);
+
+void drm_debugfs_add_files(struct drm_device *dev,
+ const struct drm_debugfs_info *files, int count);
+
+int drm_debugfs_gpuva_info(struct seq_file *m,
+ struct drm_gpuvm *gpuvm);
+
+void drm_debugfs_clients_add(struct drm_file *file);
+void drm_debugfs_clients_remove(struct drm_file *file);
#else
static inline void drm_debugfs_create_files(const struct drm_info_list *files,
int count, struct dentry *root,
@@ -92,10 +163,35 @@ static inline void drm_debugfs_create_files(const struct drm_info_list *files,
{}
static inline int drm_debugfs_remove_files(const struct drm_info_list *files,
- int count, struct drm_minor *minor)
+ int count, struct dentry *root,
+ struct drm_minor *minor)
{
return 0;
}
+
+static inline void drm_debugfs_add_file(struct drm_device *dev, const char *name,
+ int (*show)(struct seq_file*, void*),
+ void *data)
+{}
+
+static inline void drm_debugfs_add_files(struct drm_device *dev,
+ const struct drm_debugfs_info *files,
+ int count)
+{}
+
+static inline int drm_debugfs_gpuva_info(struct seq_file *m,
+ struct drm_gpuvm *gpuvm)
+{
+ return 0;
+}
+
+static inline void drm_debugfs_clients_add(struct drm_file *file)
+{
+}
+
+static inline void drm_debugfs_clients_remove(struct drm_file *file)
+{
+}
#endif
#endif /* _DRM_DEBUGFS_H_ */
diff --git a/include/drm/drm_debugfs_crc.h b/include/drm/drm_debugfs_crc.h
index b225eeb30d05..1b4c98c2f838 100644
--- a/include/drm/drm_debugfs_crc.h
+++ b/include/drm/drm_debugfs_crc.h
@@ -22,13 +22,19 @@
#ifndef __DRM_DEBUGFS_CRC_H__
#define __DRM_DEBUGFS_CRC_H__
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+struct drm_crtc;
+
#define DRM_MAX_CRC_NR 10
/**
* struct drm_crtc_crc_entry - entry describing a frame's content
* @has_frame_counter: whether the source was able to provide a frame number
* @frame: number of the frame this CRC is about, if @has_frame_counter is true
- * @crc: array of values that characterize the frame
+ * @crcs: array of values that characterize the frame
*/
struct drm_crtc_crc_entry {
bool has_frame_counter;
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index d647223e8390..5af49c5c3778 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -5,17 +5,14 @@
#include <linux/kref.h>
#include <linux/mutex.h>
#include <linux/idr.h>
+#include <linux/sched.h>
-#include <drm/drm_hashtab.h>
#include <drm/drm_mode_config.h>
struct drm_driver;
struct drm_minor;
struct drm_master;
-struct drm_device_dma;
struct drm_vblank_crtc;
-struct drm_sg_mem;
-struct drm_local_map;
struct drm_vma_offset_manager;
struct drm_vram_mm;
struct drm_fb_helper;
@@ -25,6 +22,28 @@ struct inode;
struct pci_dev;
struct pci_controller;
+/*
+ * Recovery methods for wedged device in order of less to more side-effects.
+ * To be used with drm_dev_wedged_event() as recovery @method. Callers can
+ * use any one, multiple (or'd) or none depending on their needs.
+ *
+ * Refer to "Device Wedging" chapter in Documentation/gpu/drm-uapi.rst for more
+ * details.
+ */
+#define DRM_WEDGE_RECOVERY_NONE BIT(0) /* optional telemetry collection */
+#define DRM_WEDGE_RECOVERY_REBIND BIT(1) /* unbind + bind driver */
+#define DRM_WEDGE_RECOVERY_BUS_RESET BIT(2) /* unbind + reset bus device + bind */
+#define DRM_WEDGE_RECOVERY_VENDOR BIT(3) /* vendor specific recovery method */
+
+/**
+ * struct drm_wedge_task_info - information about the guilty task of a wedge dev
+ */
+struct drm_wedge_task_info {
+ /** @pid: pid of the task */
+ pid_t pid;
+ /** @comm: command name of the task */
+ char comm[TASK_COMM_LEN];
+};
/**
* enum switch_power_state - power state of drm device
@@ -61,6 +80,28 @@ struct drm_device {
struct device *dev;
/**
+ * @dma_dev:
+ *
+ * Device for DMA operations. Only required if the device @dev
+ * cannot perform DMA by itself. Should be NULL otherwise. Call
+ * drm_dev_dma_dev() to get the DMA device instead of using this
+ * field directly. Call drm_dev_set_dma_dev() to set this field.
+ *
+ * DRM devices are sometimes bound to virtual devices that cannot
+ * perform DMA by themselves. Drivers should set this field to the
+ * respective DMA controller.
+ *
+ * Devices on USB and other peripheral busses also cannot perform
+ * DMA by themselves. The @dma_dev field should point the bus
+ * controller that does DMA on behalve of such a device. Required
+ * for importing buffers via dma-buf.
+ *
+ * If set, the DRM core automatically releases the reference on the
+ * device.
+ */
+ struct device *dma_dev;
+
+ /**
* @managed:
*
* Managed resources linked to the lifetime of this &drm_device as
@@ -90,12 +131,28 @@ struct drm_device {
*/
void *dev_private;
- /** @primary: Primary node */
+ /**
+ * @primary:
+ *
+ * Primary node. Drivers should not interact with this
+ * directly. debugfs interfaces can be registered with
+ * drm_debugfs_add_file(), and sysfs should be directly added on the
+ * hardware (and not character device node) struct device @dev.
+ */
struct drm_minor *primary;
- /** @render: Render node */
+ /**
+ * @render:
+ *
+ * Render node. Drivers should not interact with this directly ever.
+ * Drivers should not expose any additional interfaces in debugfs or
+ * sysfs on this node.
+ */
struct drm_minor *render;
+ /** @accel: Compute Acceleration node */
+ struct drm_minor *accel;
+
/**
* @registered:
*
@@ -136,16 +193,6 @@ struct drm_device {
char *unique;
/**
- * @struct_mutex:
- *
- * Lock for others (not &drm_minor.master and &drm_file.is_master)
- *
- * WARNING:
- * Only drivers annotated with DRIVER_LEGACY should be using this.
- */
- struct mutex struct_mutex;
-
- /**
* @master_mutex:
*
* Lock for &drm_minor.master and &drm_file.is_master
@@ -192,18 +239,12 @@ struct drm_device {
struct list_head clientlist;
/**
- * @irq_enabled:
+ * @client_sysrq_list:
*
- * Indicates that interrupt handling is enabled, specifically vblank
- * handling. Drivers which don't use drm_irq_install() need to set this
- * to true manually.
- */
- bool irq_enabled;
-
- /**
- * @irq: Used by the drm_irq_install() and drm_irq_unistall() helpers.
+ * Entry into list of devices registered for sysrq. Allows in-kernel
+ * clients on this device to handle sysrq keys.
*/
- int irq;
+ struct list_head client_sysrq_list;
/**
* @vblank_disable_immediate:
@@ -215,8 +256,9 @@ struct drm_device {
* This can be set to true it the hardware has a working vblank counter
* with high-precision timestamping (otherwise there are races) and the
* driver uses drm_crtc_vblank_on() and drm_crtc_vblank_off()
- * appropriately. See also @max_vblank_count and
- * &drm_crtc_funcs.get_vblank_counter.
+ * appropriately. Also, see @max_vblank_count,
+ * &drm_crtc_funcs.get_vblank_counter and
+ * &drm_vblank_crtc_config.disable_immediate.
*/
bool vblank_disable_immediate;
@@ -276,12 +318,6 @@ struct drm_device {
*/
spinlock_t event_lock;
- /** @agp: AGP data */
- struct drm_agp_head *agp;
-
- /** @pdev: PCI device structure */
- struct pci_dev *pdev;
-
/** @num_crtcs: Number of CRTCs on this device */
unsigned int num_crtcs;
@@ -318,64 +354,31 @@ struct drm_device {
*/
struct drm_fb_helper *fb_helper;
- /* Everything below here is for legacy driver, never use! */
- /* private: */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
- /* List of devices per driver for stealth attach cleanup */
- struct list_head legacy_dev_list;
-
-#ifdef __alpha__
- /** @hose: PCI hose, only used on ALPHA platforms. */
- struct pci_controller *hose;
-#endif
-
- /* Context handle management - linked list of context handles */
- struct list_head ctxlist;
-
- /* Context handle management - mutex for &ctxlist */
- struct mutex ctxlist_mutex;
-
- /* Context handle management */
- struct idr ctx_idr;
-
- /* Memory management - linked list of regions */
- struct list_head maplist;
-
- /* Memory management - user token hash table for maps */
- struct drm_open_hash map_hash;
-
- /* Context handle management - list of vmas (for debugging) */
- struct list_head vmalist;
-
- /* Optional pointer for DMA support */
- struct drm_device_dma *dma;
-
- /* Context swapping flag */
- __volatile__ long context_flag;
-
- /* Last current context */
- int last_context;
-
- /* Lock for &buf_use and a few other things. */
- spinlock_t buf_lock;
-
- /* Usage counter for buffers in use -- cannot alloc */
- int buf_use;
-
- /* Buffer allocation in progress */
- atomic_t buf_alloc;
-
- struct {
- int context;
- struct drm_hw_lock *lock;
- } sigdata;
+ /**
+ * @debugfs_root:
+ *
+ * Root directory for debugfs files.
+ */
+ struct dentry *debugfs_root;
+};
- struct drm_local_map *agp_buffer_map;
- unsigned int agp_buffer_token;
+void drm_dev_set_dma_dev(struct drm_device *dev, struct device *dma_dev);
- /* Scatter gather memory */
- struct drm_sg_mem *sg;
-#endif
-};
+/**
+ * drm_dev_dma_dev - returns the DMA device for a DRM device
+ * @dev: DRM device
+ *
+ * Returns the DMA device of the given DRM device. By default, this
+ * the DRM device's parent. See drm_dev_set_dma_dev().
+ *
+ * Returns:
+ * A DMA-capable device for the DRM device.
+ */
+static inline struct device *drm_dev_dma_dev(struct drm_device *dev)
+{
+ if (dev->dma_dev)
+ return dev->dma_dev;
+ return dev->dev;
+}
#endif
diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h
deleted file mode 100644
index ec64d141f578..000000000000
--- a/include/drm/drm_displayid.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright © 2014 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef DRM_DISPLAYID_H
-#define DRM_DISPLAYID_H
-
-#include <linux/types.h>
-
-struct edid;
-
-#define DATA_BLOCK_PRODUCT_ID 0x00
-#define DATA_BLOCK_DISPLAY_PARAMETERS 0x01
-#define DATA_BLOCK_COLOR_CHARACTERISTICS 0x02
-#define DATA_BLOCK_TYPE_1_DETAILED_TIMING 0x03
-#define DATA_BLOCK_TYPE_2_DETAILED_TIMING 0x04
-#define DATA_BLOCK_TYPE_3_SHORT_TIMING 0x05
-#define DATA_BLOCK_TYPE_4_DMT_TIMING 0x06
-#define DATA_BLOCK_VESA_TIMING 0x07
-#define DATA_BLOCK_CEA_TIMING 0x08
-#define DATA_BLOCK_VIDEO_TIMING_RANGE 0x09
-#define DATA_BLOCK_PRODUCT_SERIAL_NUMBER 0x0a
-#define DATA_BLOCK_GP_ASCII_STRING 0x0b
-#define DATA_BLOCK_DISPLAY_DEVICE_DATA 0x0c
-#define DATA_BLOCK_INTERFACE_POWER_SEQUENCING 0x0d
-#define DATA_BLOCK_TRANSFER_CHARACTERISTICS 0x0e
-#define DATA_BLOCK_DISPLAY_INTERFACE 0x0f
-#define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10
-#define DATA_BLOCK_TILED_DISPLAY 0x12
-#define DATA_BLOCK_CTA 0x81
-
-#define DATA_BLOCK_VENDOR_SPECIFIC 0x7f
-
-#define PRODUCT_TYPE_EXTENSION 0
-#define PRODUCT_TYPE_TEST 1
-#define PRODUCT_TYPE_PANEL 2
-#define PRODUCT_TYPE_MONITOR 3
-#define PRODUCT_TYPE_TV 4
-#define PRODUCT_TYPE_REPEATER 5
-#define PRODUCT_TYPE_DIRECT_DRIVE 6
-
-struct displayid_header {
- u8 rev;
- u8 bytes;
- u8 prod_id;
- u8 ext_count;
-} __packed;
-
-struct displayid_block {
- u8 tag;
- u8 rev;
- u8 num_bytes;
-} __packed;
-
-struct displayid_tiled_block {
- struct displayid_block base;
- u8 tile_cap;
- u8 topo[3];
- u8 tile_size[4];
- u8 tile_pixel_bezel[5];
- u8 topology_id[8];
-} __packed;
-
-struct displayid_detailed_timings_1 {
- u8 pixel_clock[3];
- u8 flags;
- u8 hactive[2];
- u8 hblank[2];
- u8 hsync[2];
- u8 hsw[2];
- u8 vactive[2];
- u8 vblank[2];
- u8 vsync[2];
- u8 vsw[2];
-} __packed;
-
-struct displayid_detailed_timing_block {
- struct displayid_block base;
- struct displayid_detailed_timings_1 timings[];
-};
-
-/* DisplayID iteration */
-struct displayid_iter {
- const struct edid *edid;
-
- const u8 *section;
- int length;
- int idx;
- int ext_index;
-};
-
-void displayid_iter_edid_begin(const struct edid *edid,
- struct displayid_iter *iter);
-const struct displayid_block *
-__displayid_iter_next(struct displayid_iter *iter);
-#define displayid_iter_for_each(__block, __iter) \
- while (((__block) = __displayid_iter_next(__iter)))
-void displayid_iter_end(struct displayid_iter *iter);
-
-#endif
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index b439ae1921b8..42fc085f986d 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -30,8 +30,13 @@
#include <linux/list.h>
#include <linux/irqreturn.h>
+#include <video/nomodeset.h>
+
#include <drm/drm_device.h>
+struct dmem_cgroup_region;
+struct drm_fb_helper;
+struct drm_fb_helper_surface_size;
struct drm_file;
struct drm_gem_object;
struct drm_master;
@@ -94,6 +99,29 @@ enum drm_driver_feature {
* synchronization of command submission.
*/
DRIVER_SYNCOBJ_TIMELINE = BIT(6),
+ /**
+ * @DRIVER_COMPUTE_ACCEL:
+ *
+ * Driver supports compute acceleration devices. This flag is mutually exclusive with
+ * @DRIVER_RENDER and @DRIVER_MODESET. Devices that support both graphics and compute
+ * acceleration should be handled by two drivers that are connected using auxiliary bus.
+ */
+ DRIVER_COMPUTE_ACCEL = BIT(7),
+ /**
+ * @DRIVER_GEM_GPUVA:
+ *
+ * Driver supports user defined GPU VA bindings for GEM objects.
+ */
+ DRIVER_GEM_GPUVA = BIT(8),
+ /**
+ * @DRIVER_CURSOR_HOTSPOT:
+ *
+ * Driver supports and requires cursor hotspot information in the
+ * cursor plane (e.g. cursor plane has to actually track the mouse
+ * cursor and the clients are required to set hotspot in order for
+ * the cursor planes to work correctly).
+ */
+ DRIVER_CURSOR_HOTSPOT = BIT(9),
/* IMPORTANT: Below are all the legacy flags, add new ones above. */
@@ -137,19 +165,8 @@ enum drm_driver_feature {
* @DRIVER_HAVE_IRQ:
*
* Legacy irq support. Only for legacy drivers. Do not use.
- *
- * New drivers can either use the drm_irq_install() and
- * drm_irq_uninstall() helper functions, or roll their own irq support
- * code by calling request_irq() directly.
*/
DRIVER_HAVE_IRQ = BIT(30),
- /**
- * @DRIVER_KMS_LEGACY_CONTEXT:
- *
- * Used only by nouveau for backwards compatibility with existing
- * userspace. Do not use.
- */
- DRIVER_KMS_LEGACY_CONTEXT = BIT(31),
};
/**
@@ -215,34 +232,6 @@ struct drm_driver {
void (*postclose) (struct drm_device *, struct drm_file *);
/**
- * @lastclose:
- *
- * Called when the last &struct drm_file has been closed and there's
- * currently no userspace client for the &struct drm_device.
- *
- * Modern drivers should only use this to force-restore the fbdev
- * framebuffer using drm_fb_helper_restore_fbdev_mode_unlocked().
- * Anything else would indicate there's something seriously wrong.
- * Modern drivers can also use this to execute delayed power switching
- * state changes, e.g. in conjunction with the :ref:`vga_switcheroo`
- * infrastructure.
- *
- * This is called after @postclose hook has been called.
- *
- * NOTE:
- *
- * All legacy drivers use this callback to de-initialize the hardware.
- * This is purely because of the shadow-attach model, where the DRM
- * kernel driver does not really own the hardware. Instead ownershipe is
- * handled with the help of userspace through an inheritedly racy dance
- * to set/unset the VT into raw mode.
- *
- * Legacy drivers initialize the hardware in the @firstopen callback,
- * which isn't even called for modern drivers.
- */
- void (*lastclose) (struct drm_device *);
-
- /**
* @unload:
*
* Reverse the effects of the driver load callback. Ideally,
@@ -272,42 +261,6 @@ struct drm_driver {
void (*release) (struct drm_device *);
/**
- * @irq_handler:
- *
- * Interrupt handler called when using drm_irq_install(). Not used by
- * drivers which implement their own interrupt handling.
- */
- irqreturn_t(*irq_handler) (int irq, void *arg);
-
- /**
- * @irq_preinstall:
- *
- * Optional callback used by drm_irq_install() which is called before
- * the interrupt handler is registered. This should be used to clear out
- * any pending interrupts (from e.g. firmware based drives) and reset
- * the interrupt handling registers.
- */
- void (*irq_preinstall) (struct drm_device *dev);
-
- /**
- * @irq_postinstall:
- *
- * Optional callback used by drm_irq_install() which is called after
- * the interrupt handler is registered. This should be used to enable
- * interrupt generation in the hardware.
- */
- int (*irq_postinstall) (struct drm_device *dev);
-
- /**
- * @irq_uninstall:
- *
- * Optional callback used by drm_irq_uninstall() which is called before
- * the interrupt handler is unregistered. This should be used to disable
- * interrupt generation in the hardware.
- */
- void (*irq_uninstall) (struct drm_device *dev);
-
- /**
* @master_set:
*
* Called whenever the minor master is set. Only used by vmwgfx.
@@ -331,8 +284,9 @@ struct drm_driver {
/**
* @gem_create_object: constructor for gem objects
*
- * Hook for allocating the GEM object struct, for use by the CMA and
- * SHMEM GEM helpers.
+ * Hook for allocating the GEM object struct, for use by the CMA
+ * and SHMEM GEM helpers. Returns a GEM object on success, or an
+ * ERR_PTR()-encoded error code otherwise.
*/
struct drm_gem_object *(*gem_create_object)(struct drm_device *dev,
size_t size);
@@ -340,22 +294,14 @@ struct drm_driver {
/**
* @prime_handle_to_fd:
*
- * Main PRIME export function. Should be implemented with
- * drm_gem_prime_handle_to_fd() for GEM based drivers.
- *
- * For an in-depth discussion see :ref:`PRIME buffer sharing
- * documentation <prime_buffer_sharing>`.
+ * PRIME export function. Only used by vmwgfx.
*/
int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv,
uint32_t handle, uint32_t flags, int *prime_fd);
/**
* @prime_fd_to_handle:
*
- * Main PRIME import function. Should be implemented with
- * drm_gem_prime_fd_to_handle() for GEM based drivers.
- *
- * For an in-depth discussion see :ref:`PRIME buffer sharing
- * documentation <prime_buffer_sharing>`.
+ * PRIME import function. Only used by vmwgfx.
*/
int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv,
int prime_fd, uint32_t *handle);
@@ -379,17 +325,6 @@ struct drm_driver {
struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
- /**
- * @gem_prime_mmap:
- *
- * mmap hook for GEM drivers, used to implement dma-buf mmap in the
- * PRIME helpers.
- *
- * FIXME: There's way too much duplication going on here, and also moved
- * to &drm_gem_object_funcs.
- */
- int (*gem_prime_mmap)(struct drm_gem_object *obj,
- struct vm_area_struct *vma);
/**
* @dumb_create:
@@ -433,25 +368,29 @@ struct drm_driver {
int (*dumb_map_offset)(struct drm_file *file_priv,
struct drm_device *dev, uint32_t handle,
uint64_t *offset);
+
/**
- * @dumb_destroy:
- *
- * This destroys the userspace handle for the given dumb backing storage buffer.
- * Since buffer objects must be reference counted in the kernel a buffer object
- * won't be immediately freed if a framebuffer modeset object still uses it.
+ * @fbdev_probe:
*
- * Called by the user via ioctl.
+ * Allocates and initialize the fb_info structure for fbdev emulation.
+ * Furthermore it also needs to allocate the DRM framebuffer used to
+ * back the fbdev.
*
- * The default implementation is drm_gem_dumb_destroy(). GEM based drivers
- * must not overwrite this.
+ * This callback is mandatory for fbdev support.
*
* Returns:
*
- * Zero on success, negative errno on failure.
+ * 0 on success ot a negative error code otherwise.
*/
- int (*dumb_destroy)(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle);
+ int (*fbdev_probe)(struct drm_fb_helper *fbdev_helper,
+ struct drm_fb_helper_surface_size *sizes);
+
+ /**
+ * @show_fdinfo:
+ *
+ * Print device specific fdinfo. See Documentation/gpu/drm-usage-stats.rst.
+ */
+ void (*show_fdinfo)(struct drm_printer *p, struct drm_file *f);
/** @major: driver major number */
int major;
@@ -463,8 +402,6 @@ struct drm_driver {
char *name;
/** @desc: driver description */
char *desc;
- /** @date: driver date */
- char *date;
/**
* @driver_features:
@@ -494,27 +431,16 @@ struct drm_driver {
* some examples.
*/
const struct file_operations *fops;
-
-#ifdef CONFIG_DRM_LEGACY
- /* Everything below here is for legacy driver, never use! */
- /* private: */
-
- int (*firstopen) (struct drm_device *);
- void (*preclose) (struct drm_device *, struct drm_file *file_priv);
- int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
- int (*dma_quiescent) (struct drm_device *);
- int (*context_dtor) (struct drm_device *dev, int context);
- u32 (*get_vblank_counter)(struct drm_device *dev, unsigned int pipe);
- int (*enable_vblank)(struct drm_device *dev, unsigned int pipe);
- void (*disable_vblank)(struct drm_device *dev, unsigned int pipe);
- int dev_priv_size;
-#endif
};
void *__devm_drm_dev_alloc(struct device *parent,
const struct drm_driver *driver,
size_t size, size_t offset);
+struct dmem_cgroup_region *
+drmm_cgroup_register_region(struct drm_device *dev,
+ const char *region_name, u64 size);
+
/**
* devm_drm_dev_alloc - Resource managed allocation of a &drm_device instance
* @parent: Parent device object
@@ -547,6 +473,11 @@ void *__devm_drm_dev_alloc(struct device *parent,
struct drm_device *drm_dev_alloc(const struct drm_driver *driver,
struct device *parent);
+
+void *__drm_dev_alloc(struct device *parent,
+ const struct drm_driver *driver,
+ size_t size, size_t offset);
+
int drm_dev_register(struct drm_device *dev, unsigned long flags);
void drm_dev_unregister(struct drm_device *dev);
@@ -556,6 +487,8 @@ void drm_put_dev(struct drm_device *dev);
bool drm_dev_enter(struct drm_device *dev, int *idx);
void drm_dev_exit(int idx);
void drm_dev_unplug(struct drm_device *dev);
+int drm_dev_wedged_event(struct drm_device *dev, unsigned long method,
+ struct drm_wedge_task_info *info);
/**
* drm_dev_is_unplugged - is a DRM device unplugged
@@ -632,7 +565,33 @@ static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev)
}
-int drm_dev_set_unique(struct drm_device *dev, const char *name);
+/* TODO: Inline drm_firmware_drivers_only() in all its callers. */
+static inline bool drm_firmware_drivers_only(void)
+{
+ return video_firmware_drivers_only();
+}
+
+#if defined(CONFIG_DEBUG_FS)
+void drm_debugfs_dev_init(struct drm_device *dev);
+void drm_debugfs_init_root(void);
+void drm_debugfs_remove_root(void);
+void drm_debugfs_bridge_params(void);
+#else
+static inline void drm_debugfs_dev_init(struct drm_device *dev)
+{
+}
+
+static inline void drm_debugfs_init_root(void)
+{
+}
+
+static inline void drm_debugfs_remove_root(void)
+{
+}
+static inline void drm_debugfs_bridge_params(void)
+{
+}
+#endif
#endif
diff --git a/include/drm/drm_dumb_buffers.h b/include/drm/drm_dumb_buffers.h
new file mode 100644
index 000000000000..1f3a8236fb3d
--- /dev/null
+++ b/include/drm/drm_dumb_buffers.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef __DRM_DUMB_BUFFERS_H__
+#define __DRM_DUMB_BUFFERS_H__
+
+struct drm_device;
+struct drm_mode_create_dumb;
+
+int drm_mode_size_dumb(struct drm_device *dev,
+ struct drm_mode_create_dumb *args,
+ unsigned long hw_pitch_align,
+ unsigned long hw_size_align);
+
+#endif
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 759328a5eeb2..04f7a7f1f108 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -24,10 +24,15 @@
#define __DRM_EDID_H__
#include <linux/types.h>
-#include <linux/hdmi.h>
-#include <drm/drm_mode.h>
+enum hdmi_quantization_range;
+struct drm_connector;
struct drm_device;
+struct drm_display_mode;
+struct drm_edid;
+struct drm_printer;
+struct hdmi_avi_infoframe;
+struct hdmi_vendor_infoframe;
struct i2c_adapter;
#define EDID_LENGTH 128
@@ -45,7 +50,7 @@ struct est_timings {
u8 t1;
u8 t2;
u8 mfg_rsvd;
-} __attribute__((packed));
+} __packed;
/* 00=16:10, 01=4:3, 10=5:4, 11=16:9 */
#define EDID_TIMING_ASPECT_SHIFT 6
@@ -58,7 +63,7 @@ struct est_timings {
struct std_timing {
u8 hsize; /* need to multiply by 8 then add 248 */
u8 vfreq_aspect;
-} __attribute__((packed));
+} __packed;
#define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1)
#define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2)
@@ -84,17 +89,32 @@ struct detailed_pixel_timing {
u8 hborder;
u8 vborder;
u8 misc;
-} __attribute__((packed));
+} __packed;
/* If it's not pixel timing, it'll be one of the below */
struct detailed_data_string {
u8 str[13];
-} __attribute__((packed));
+} __packed;
+
+#define DRM_EDID_RANGE_OFFSET_MIN_VFREQ (1 << 0) /* 1.4 */
+#define DRM_EDID_RANGE_OFFSET_MAX_VFREQ (1 << 1) /* 1.4 */
+#define DRM_EDID_RANGE_OFFSET_MIN_HFREQ (1 << 2) /* 1.4 */
+#define DRM_EDID_RANGE_OFFSET_MAX_HFREQ (1 << 3) /* 1.4 */
+
+#define DRM_EDID_DEFAULT_GTF_SUPPORT_FLAG 0x00 /* 1.3 */
+#define DRM_EDID_RANGE_LIMITS_ONLY_FLAG 0x01 /* 1.4 */
+#define DRM_EDID_SECONDARY_GTF_SUPPORT_FLAG 0x02 /* 1.3 */
+#define DRM_EDID_CVT_SUPPORT_FLAG 0x04 /* 1.4 */
-#define DRM_EDID_DEFAULT_GTF_SUPPORT_FLAG 0x00
-#define DRM_EDID_RANGE_LIMITS_ONLY_FLAG 0x01
-#define DRM_EDID_SECONDARY_GTF_SUPPORT_FLAG 0x02
-#define DRM_EDID_CVT_SUPPORT_FLAG 0x04
+#define DRM_EDID_CVT_FLAGS_STANDARD_BLANKING (1 << 3)
+#define DRM_EDID_CVT_FLAGS_REDUCED_BLANKING (1 << 4)
+
+enum drm_edid_quirk {
+ /* Do a dummy read before DPCD accesses, to prevent corruption. */
+ DRM_EDID_QUIRK_DP_DPCD_PROBE,
+
+ DRM_EDID_QUIRK_NUM,
+};
struct detailed_data_monitor_range {
u8 min_vfreq;
@@ -111,7 +131,7 @@ struct detailed_data_monitor_range {
__le16 m;
u8 k;
u8 j; /* need to divide by 2 */
- } __attribute__((packed)) gtf2;
+ } __packed gtf2;
struct {
u8 version;
u8 data1; /* high 6 bits: extra clock resolution */
@@ -120,27 +140,27 @@ struct detailed_data_monitor_range {
u8 flags; /* preferred aspect and blanking support */
u8 supported_scalings;
u8 preferred_refresh;
- } __attribute__((packed)) cvt;
- } formula;
-} __attribute__((packed));
+ } __packed cvt;
+ } __packed formula;
+} __packed;
struct detailed_data_wpindex {
u8 white_yx_lo; /* Lower 2 bits each */
u8 white_x_hi;
u8 white_y_hi;
u8 gamma; /* need to divide by 100 then add 1 */
-} __attribute__((packed));
+} __packed;
struct detailed_data_color_point {
u8 windex1;
u8 wpindex1[3];
u8 windex2;
u8 wpindex2[3];
-} __attribute__((packed));
+} __packed;
struct cvt_timing {
u8 code[3];
-} __attribute__((packed));
+} __packed;
struct detailed_non_pixel {
u8 pad1;
@@ -154,8 +174,8 @@ struct detailed_non_pixel {
struct detailed_data_wpindex color;
struct std_timing timings[6];
struct cvt_timing cvt[4];
- } data;
-} __attribute__((packed));
+ } __packed data;
+} __packed;
#define EDID_DETAIL_EST_TIMINGS 0xf7
#define EDID_DETAIL_CVT_3BYTE 0xf8
@@ -172,8 +192,8 @@ struct detailed_timing {
union {
struct detailed_pixel_timing pixel_data;
struct detailed_non_pixel other_data;
- } data;
-} __attribute__((packed));
+ } __packed data;
+} __packed;
#define DRM_EDID_INPUT_SERRATION_VSYNC (1 << 0)
#define DRM_EDID_INPUT_SYNC_ON_GREEN (1 << 1)
@@ -200,7 +220,8 @@ struct detailed_timing {
#define DRM_EDID_DIGITAL_TYPE_DP (5 << 0) /* 1.4 */
#define DRM_EDID_DIGITAL_DFP_1_X (1 << 0) /* 1.3 */
-#define DRM_EDID_FEATURE_DEFAULT_GTF (1 << 0)
+#define DRM_EDID_FEATURE_DEFAULT_GTF (1 << 0) /* 1.2 */
+#define DRM_EDID_FEATURE_CONTINUOUS_FREQ (1 << 0) /* 1.4 */
#define DRM_EDID_FEATURE_PREFERRED_TIMING (1 << 1)
#define DRM_EDID_FEATURE_STANDARD_COLOR (1 << 2)
/* If analog */
@@ -259,72 +280,27 @@ struct detailed_timing {
#define DRM_EDID_DSC_MAX_SLICES 0xf
#define DRM_EDID_DSC_TOTAL_CHUNK_KBYTES 0x3f
-/* ELD Header Block */
-#define DRM_ELD_HEADER_BLOCK_SIZE 4
-
-#define DRM_ELD_VER 0
-# define DRM_ELD_VER_SHIFT 3
-# define DRM_ELD_VER_MASK (0x1f << 3)
-# define DRM_ELD_VER_CEA861D (2 << 3) /* supports 861D or below */
-# define DRM_ELD_VER_CANNED (0x1f << 3)
-
-#define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */
-
-/* ELD Baseline Block for ELD_Ver == 2 */
-#define DRM_ELD_CEA_EDID_VER_MNL 4
-# define DRM_ELD_CEA_EDID_VER_SHIFT 5
-# define DRM_ELD_CEA_EDID_VER_MASK (7 << 5)
-# define DRM_ELD_CEA_EDID_VER_NONE (0 << 5)
-# define DRM_ELD_CEA_EDID_VER_CEA861 (1 << 5)
-# define DRM_ELD_CEA_EDID_VER_CEA861A (2 << 5)
-# define DRM_ELD_CEA_EDID_VER_CEA861BCD (3 << 5)
-# define DRM_ELD_MNL_SHIFT 0
-# define DRM_ELD_MNL_MASK (0x1f << 0)
-
-#define DRM_ELD_SAD_COUNT_CONN_TYPE 5
-# define DRM_ELD_SAD_COUNT_SHIFT 4
-# define DRM_ELD_SAD_COUNT_MASK (0xf << 4)
-# define DRM_ELD_CONN_TYPE_SHIFT 2
-# define DRM_ELD_CONN_TYPE_MASK (3 << 2)
-# define DRM_ELD_CONN_TYPE_HDMI (0 << 2)
-# define DRM_ELD_CONN_TYPE_DP (1 << 2)
-# define DRM_ELD_SUPPORTS_AI (1 << 1)
-# define DRM_ELD_SUPPORTS_HDCP (1 << 0)
-
-#define DRM_ELD_AUD_SYNCH_DELAY 6 /* in units of 2 ms */
-# define DRM_ELD_AUD_SYNCH_DELAY_MAX 0xfa /* 500 ms */
-
-#define DRM_ELD_SPEAKER 7
-# define DRM_ELD_SPEAKER_MASK 0x7f
-# define DRM_ELD_SPEAKER_RLRC (1 << 6)
-# define DRM_ELD_SPEAKER_FLRC (1 << 5)
-# define DRM_ELD_SPEAKER_RC (1 << 4)
-# define DRM_ELD_SPEAKER_RLR (1 << 3)
-# define DRM_ELD_SPEAKER_FC (1 << 2)
-# define DRM_ELD_SPEAKER_LFE (1 << 1)
-# define DRM_ELD_SPEAKER_FLR (1 << 0)
-
-#define DRM_ELD_PORT_ID 8 /* offsets 8..15 inclusive */
-# define DRM_ELD_PORT_ID_LEN 8
-
-#define DRM_ELD_MANUFACTURER_NAME0 16
-#define DRM_ELD_MANUFACTURER_NAME1 17
-
-#define DRM_ELD_PRODUCT_CODE0 18
-#define DRM_ELD_PRODUCT_CODE1 19
-
-#define DRM_ELD_MONITOR_NAME_STRING 20 /* offsets 20..(20+mnl-1) inclusive */
-
-#define DRM_ELD_CEA_SAD(mnl, sad) (20 + (mnl) + 3 * (sad))
+struct drm_edid_product_id {
+ __be16 manufacturer_name;
+ __le16 product_code;
+ __le32 serial_number;
+ u8 week_of_manufacture;
+ u8 year_of_manufacture;
+} __packed;
struct edid {
u8 header[8];
/* Vendor & product info */
- u8 mfg_id[2];
- u8 prod_code[2];
- u32 serial; /* FIXME: byte order */
- u8 mfg_week;
- u8 mfg_year;
+ union {
+ struct drm_edid_product_id product_id;
+ struct {
+ u8 mfg_id[2];
+ u8 prod_code[2];
+ u32 serial; /* FIXME: byte order */
+ u8 mfg_week;
+ u8 mfg_year;
+ } __packed;
+ } __packed;
/* EDID version */
u8 version;
u8 revision;
@@ -336,7 +312,7 @@ struct edid {
u8 features;
/* Color characteristics */
u8 red_green_lo;
- u8 black_white_lo;
+ u8 blue_white_lo;
u8 red_x;
u8 red_y;
u8 green_x;
@@ -355,7 +331,20 @@ struct edid {
u8 extensions;
/* Checksum */
u8 checksum;
-} __attribute__((packed));
+} __packed;
+
+/* EDID matching */
+struct drm_edid_ident {
+ /* ID encoded by drm_edid_encode_panel_id() */
+ u32 panel_id;
+ const char *name;
+};
+
+#define DRM_EDID_IDENT_INIT(_vend_chr_0, _vend_chr_1, _vend_chr_2, _product_id, _name) \
+{ \
+ .panel_id = drm_edid_encode_panel_id(_vend_chr_0, _vend_chr_1, _vend_chr_2, _product_id), \
+ .name = _name, \
+}
#define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8))
@@ -367,30 +356,11 @@ struct cea_sad {
u8 byte2; /* meaning depends on format */
};
-struct drm_encoder;
-struct drm_connector;
-struct drm_connector_state;
-struct drm_display_mode;
-
-int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads);
-int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb);
+int drm_edid_to_sad(const struct edid *edid, struct cea_sad **sads);
+int drm_edid_to_speaker_allocation(const struct edid *edid, u8 **sadb);
int drm_av_sync_delay(struct drm_connector *connector,
const struct drm_display_mode *mode);
-#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
-struct edid *drm_load_edid_firmware(struct drm_connector *connector);
-int __drm_set_edid_firmware_path(const char *path);
-int __drm_get_edid_firmware_path(char *buf, size_t bufsize);
-#else
-static inline struct edid *
-drm_load_edid_firmware(struct drm_connector *connector)
-{
- return ERR_PTR(-ENOENT);
-}
-#endif
-
-bool drm_edid_are_equal(const struct edid *edid1, const struct edid *edid2);
-
int
drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
const struct drm_connector *connector,
@@ -401,141 +371,90 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
const struct drm_display_mode *mode);
void
-drm_hdmi_avi_infoframe_colorspace(struct hdmi_avi_infoframe *frame,
- const struct drm_connector_state *conn_state);
-
-void
-drm_hdmi_avi_infoframe_bars(struct hdmi_avi_infoframe *frame,
- const struct drm_connector_state *conn_state);
-
-void
drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
const struct drm_connector *connector,
const struct drm_display_mode *mode,
enum hdmi_quantization_range rgb_quant_range);
-int
-drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame,
- const struct drm_connector_state *conn_state);
-
/**
- * drm_eld_mnl - Get ELD monitor name length in bytes.
- * @eld: pointer to an eld memory structure with mnl set
+ * drm_edid_decode_mfg_id - Decode the manufacturer ID
+ * @mfg_id: The manufacturer ID
+ * @vend: A 4-byte buffer to store the 3-letter vendor string plus a '\0'
+ * termination
*/
-static inline int drm_eld_mnl(const uint8_t *eld)
+static inline const char *drm_edid_decode_mfg_id(u16 mfg_id, char vend[4])
{
- return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT;
-}
-
-/**
- * drm_eld_sad - Get ELD SAD structures.
- * @eld: pointer to an eld memory structure with sad_count set
- */
-static inline const uint8_t *drm_eld_sad(const uint8_t *eld)
-{
- unsigned int ver, mnl;
-
- ver = (eld[DRM_ELD_VER] & DRM_ELD_VER_MASK) >> DRM_ELD_VER_SHIFT;
- if (ver != 2 && ver != 31)
- return NULL;
-
- mnl = drm_eld_mnl(eld);
- if (mnl > 16)
- return NULL;
+ vend[0] = '@' + ((mfg_id >> 10) & 0x1f);
+ vend[1] = '@' + ((mfg_id >> 5) & 0x1f);
+ vend[2] = '@' + ((mfg_id >> 0) & 0x1f);
+ vend[3] = '\0';
- return eld + DRM_ELD_CEA_SAD(mnl, 0);
+ return vend;
}
/**
- * drm_eld_sad_count - Get ELD SAD count.
- * @eld: pointer to an eld memory structure with sad_count set
- */
-static inline int drm_eld_sad_count(const uint8_t *eld)
-{
- return (eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_SAD_COUNT_MASK) >>
- DRM_ELD_SAD_COUNT_SHIFT;
-}
-
-/**
- * drm_eld_calc_baseline_block_size - Calculate baseline block size in bytes
- * @eld: pointer to an eld memory structure with mnl and sad_count set
+ * drm_edid_encode_panel_id - Encode an ID for matching against drm_edid_get_panel_id()
+ * @vend_chr_0: First character of the vendor string.
+ * @vend_chr_1: Second character of the vendor string.
+ * @vend_chr_2: Third character of the vendor string.
+ * @product_id: The 16-bit product ID.
*
- * This is a helper for determining the payload size of the baseline block, in
- * bytes, for e.g. setting the Baseline_ELD_Len field in the ELD header block.
- */
-static inline int drm_eld_calc_baseline_block_size(const uint8_t *eld)
-{
- return DRM_ELD_MONITOR_NAME_STRING - DRM_ELD_HEADER_BLOCK_SIZE +
- drm_eld_mnl(eld) + drm_eld_sad_count(eld) * 3;
-}
-
-/**
- * drm_eld_size - Get ELD size in bytes
- * @eld: pointer to a complete eld memory structure
- *
- * The returned value does not include the vendor block. It's vendor specific,
- * and comprises of the remaining bytes in the ELD memory buffer after
- * drm_eld_size() bytes of header and baseline block.
+ * This is a macro so that it can be calculated at compile time and used
+ * as an initializer.
*
- * The returned value is guaranteed to be a multiple of 4.
- */
-static inline int drm_eld_size(const uint8_t *eld)
-{
- return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4;
-}
-
-/**
- * drm_eld_get_spk_alloc - Get speaker allocation
- * @eld: pointer to an ELD memory structure
+ * For instance:
+ * drm_edid_encode_panel_id('B', 'O', 'E', 0x2d08) => 0x09e52d08
*
- * The returned value is the speakers mask. User has to use %DRM_ELD_SPEAKER
- * field definitions to identify speakers.
+ * Return: a 32-bit ID per panel.
*/
-static inline u8 drm_eld_get_spk_alloc(const uint8_t *eld)
-{
- return eld[DRM_ELD_SPEAKER] & DRM_ELD_SPEAKER_MASK;
-}
+#define drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, product_id) \
+ ((((u32)(vend_chr_0) - '@') & 0x1f) << 26 | \
+ (((u32)(vend_chr_1) - '@') & 0x1f) << 21 | \
+ (((u32)(vend_chr_2) - '@') & 0x1f) << 16 | \
+ ((product_id) & 0xffff))
/**
- * drm_eld_get_conn_type - Get device type hdmi/dp connected
- * @eld: pointer to an ELD memory structure
+ * drm_edid_decode_panel_id - Decode a panel ID from drm_edid_encode_panel_id()
+ * @panel_id: The panel ID to decode.
+ * @vend: A 4-byte buffer to store the 3-letter vendor string plus a '\0'
+ * termination
+ * @product_id: The product ID will be returned here.
*
- * The caller need to use %DRM_ELD_CONN_TYPE_HDMI or %DRM_ELD_CONN_TYPE_DP to
- * identify the display type connected.
+ * For instance, after:
+ * drm_edid_decode_panel_id(0x09e52d08, vend, &product_id)
+ * These will be true:
+ * vend[0] = 'B'
+ * vend[1] = 'O'
+ * vend[2] = 'E'
+ * vend[3] = '\0'
+ * product_id = 0x2d08
*/
-static inline u8 drm_eld_get_conn_type(const uint8_t *eld)
+static inline void drm_edid_decode_panel_id(u32 panel_id, char vend[4], u16 *product_id)
{
- return eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_CONN_TYPE_MASK;
+ *product_id = (u16)(panel_id & 0xffff);
+ drm_edid_decode_mfg_id(panel_id >> 16, vend);
}
bool drm_probe_ddc(struct i2c_adapter *adapter);
-struct edid *drm_do_get_edid(struct drm_connector *connector,
- int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
- size_t len),
- void *data);
struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter);
struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
struct i2c_adapter *adapter);
struct edid *drm_edid_duplicate(const struct edid *edid);
int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
-int drm_add_override_edid_modes(struct drm_connector *connector);
+int drm_edid_override_connector_update(struct drm_connector *connector);
u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
-bool drm_detect_hdmi_monitor(struct edid *edid);
-bool drm_detect_monitor_audio(struct edid *edid);
+bool drm_detect_hdmi_monitor(const struct edid *edid);
+bool drm_detect_monitor_audio(const struct edid *edid);
enum hdmi_quantization_range
drm_default_rgb_quant_range(const struct drm_display_mode *mode);
int drm_add_modes_noedid(struct drm_connector *connector,
- int hdisplay, int vdisplay);
-void drm_set_preferred_mode(struct drm_connector *connector,
- int hpref, int vpref);
+ unsigned int hdisplay, unsigned int vdisplay);
-int drm_edid_header_is_valid(const u8 *raw_edid);
-bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
- bool *edid_corrupt);
+int drm_edid_header_is_valid(const void *edid);
bool drm_edid_is_valid(struct edid *edid);
-void drm_edid_get_monitor_name(struct edid *edid, char *name,
+void drm_edid_get_monitor_name(const struct edid *edid, char *name,
int buflen);
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh,
@@ -543,8 +462,33 @@ struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
struct drm_display_mode *
drm_display_mode_from_cea_vic(struct drm_device *dev,
u8 video_code);
-const u8 *drm_find_edid_extension(const struct edid *edid,
- int ext_id, int *ext_index);
+/* Interface based on struct drm_edid */
+const struct drm_edid *drm_edid_alloc(const void *edid, size_t size);
+const struct drm_edid *drm_edid_dup(const struct drm_edid *drm_edid);
+void drm_edid_free(const struct drm_edid *drm_edid);
+bool drm_edid_valid(const struct drm_edid *drm_edid);
+const struct edid *drm_edid_raw(const struct drm_edid *drm_edid);
+const struct drm_edid *drm_edid_read(struct drm_connector *connector);
+const struct drm_edid *drm_edid_read_ddc(struct drm_connector *connector,
+ struct i2c_adapter *adapter);
+const struct drm_edid *drm_edid_read_custom(struct drm_connector *connector,
+ int (*read_block)(void *context, u8 *buf, unsigned int block, size_t len),
+ void *context);
+const struct drm_edid *drm_edid_read_base_block(struct i2c_adapter *adapter);
+const struct drm_edid *drm_edid_read_switcheroo(struct drm_connector *connector,
+ struct i2c_adapter *adapter);
+int drm_edid_connector_update(struct drm_connector *connector,
+ const struct drm_edid *edid);
+int drm_edid_connector_add_modes(struct drm_connector *connector);
+bool drm_edid_is_digital(const struct drm_edid *drm_edid);
+void drm_edid_get_product_id(const struct drm_edid *drm_edid,
+ struct drm_edid_product_id *id);
+void drm_edid_print_product_id(struct drm_printer *p,
+ const struct drm_edid_product_id *id, bool raw);
+u32 drm_edid_get_panel_id(const struct drm_edid *drm_edid);
+bool drm_edid_match(const struct drm_edid *drm_edid,
+ const struct drm_edid_ident *ident);
+bool drm_edid_has_quirk(struct drm_connector *connector, enum drm_edid_quirk quirk);
#endif /* __DRM_EDID_H__ */
diff --git a/include/drm/drm_eld.h b/include/drm/drm_eld.h
new file mode 100644
index 000000000000..0a88d10b28b0
--- /dev/null
+++ b/include/drm/drm_eld.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __DRM_ELD_H__
+#define __DRM_ELD_H__
+
+#include <linux/types.h>
+
+struct cea_sad;
+
+/* ELD Header Block */
+#define DRM_ELD_HEADER_BLOCK_SIZE 4
+
+#define DRM_ELD_VER 0
+# define DRM_ELD_VER_SHIFT 3
+# define DRM_ELD_VER_MASK (0x1f << 3)
+# define DRM_ELD_VER_CEA861D (2 << 3) /* supports 861D or below */
+# define DRM_ELD_VER_CANNED (0x1f << 3)
+
+#define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */
+
+/* ELD Baseline Block for ELD_Ver == 2 */
+#define DRM_ELD_CEA_EDID_VER_MNL 4
+# define DRM_ELD_CEA_EDID_VER_SHIFT 5
+# define DRM_ELD_CEA_EDID_VER_MASK (7 << 5)
+# define DRM_ELD_CEA_EDID_VER_NONE (0 << 5)
+# define DRM_ELD_CEA_EDID_VER_CEA861 (1 << 5)
+# define DRM_ELD_CEA_EDID_VER_CEA861A (2 << 5)
+# define DRM_ELD_CEA_EDID_VER_CEA861BCD (3 << 5)
+# define DRM_ELD_MNL_SHIFT 0
+# define DRM_ELD_MNL_MASK (0x1f << 0)
+
+#define DRM_ELD_SAD_COUNT_CONN_TYPE 5
+# define DRM_ELD_SAD_COUNT_SHIFT 4
+# define DRM_ELD_SAD_COUNT_MASK (0xf << 4)
+# define DRM_ELD_CONN_TYPE_SHIFT 2
+# define DRM_ELD_CONN_TYPE_MASK (3 << 2)
+# define DRM_ELD_CONN_TYPE_HDMI (0 << 2)
+# define DRM_ELD_CONN_TYPE_DP (1 << 2)
+# define DRM_ELD_SUPPORTS_AI (1 << 1)
+# define DRM_ELD_SUPPORTS_HDCP (1 << 0)
+
+#define DRM_ELD_AUD_SYNCH_DELAY 6 /* in units of 2 ms */
+# define DRM_ELD_AUD_SYNCH_DELAY_MAX 0xfa /* 500 ms */
+
+#define DRM_ELD_SPEAKER 7
+# define DRM_ELD_SPEAKER_MASK 0x7f
+# define DRM_ELD_SPEAKER_RLRC (1 << 6)
+# define DRM_ELD_SPEAKER_FLRC (1 << 5)
+# define DRM_ELD_SPEAKER_RC (1 << 4)
+# define DRM_ELD_SPEAKER_RLR (1 << 3)
+# define DRM_ELD_SPEAKER_FC (1 << 2)
+# define DRM_ELD_SPEAKER_LFE (1 << 1)
+# define DRM_ELD_SPEAKER_FLR (1 << 0)
+
+#define DRM_ELD_PORT_ID 8 /* offsets 8..15 inclusive */
+# define DRM_ELD_PORT_ID_LEN 8
+
+#define DRM_ELD_MANUFACTURER_NAME0 16
+#define DRM_ELD_MANUFACTURER_NAME1 17
+
+#define DRM_ELD_PRODUCT_CODE0 18
+#define DRM_ELD_PRODUCT_CODE1 19
+
+#define DRM_ELD_MONITOR_NAME_STRING 20 /* offsets 20..(20+mnl-1) inclusive */
+
+#define DRM_ELD_CEA_SAD(mnl, sad) (20 + (mnl) + 3 * (sad))
+
+/**
+ * drm_eld_mnl - Get ELD monitor name length in bytes.
+ * @eld: pointer to an eld memory structure with mnl set
+ */
+static inline int drm_eld_mnl(const u8 *eld)
+{
+ return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT;
+}
+
+int drm_eld_sad_get(const u8 *eld, int sad_index, struct cea_sad *cta_sad);
+int drm_eld_sad_set(u8 *eld, int sad_index, const struct cea_sad *cta_sad);
+
+/**
+ * drm_eld_sad - Get ELD SAD structures.
+ * @eld: pointer to an eld memory structure with sad_count set
+ */
+static inline const u8 *drm_eld_sad(const u8 *eld)
+{
+ unsigned int ver, mnl;
+
+ ver = (eld[DRM_ELD_VER] & DRM_ELD_VER_MASK) >> DRM_ELD_VER_SHIFT;
+ if (ver != 2 && ver != 31)
+ return NULL;
+
+ mnl = drm_eld_mnl(eld);
+ if (mnl > 16)
+ return NULL;
+
+ return eld + DRM_ELD_CEA_SAD(mnl, 0);
+}
+
+/**
+ * drm_eld_sad_count - Get ELD SAD count.
+ * @eld: pointer to an eld memory structure with sad_count set
+ */
+static inline int drm_eld_sad_count(const u8 *eld)
+{
+ return (eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_SAD_COUNT_MASK) >>
+ DRM_ELD_SAD_COUNT_SHIFT;
+}
+
+/**
+ * drm_eld_calc_baseline_block_size - Calculate baseline block size in bytes
+ * @eld: pointer to an eld memory structure with mnl and sad_count set
+ *
+ * This is a helper for determining the payload size of the baseline block, in
+ * bytes, for e.g. setting the Baseline_ELD_Len field in the ELD header block.
+ */
+static inline int drm_eld_calc_baseline_block_size(const u8 *eld)
+{
+ return DRM_ELD_MONITOR_NAME_STRING - DRM_ELD_HEADER_BLOCK_SIZE +
+ drm_eld_mnl(eld) + drm_eld_sad_count(eld) * 3;
+}
+
+/**
+ * drm_eld_size - Get ELD size in bytes
+ * @eld: pointer to a complete eld memory structure
+ *
+ * The returned value does not include the vendor block. It's vendor specific,
+ * and comprises of the remaining bytes in the ELD memory buffer after
+ * drm_eld_size() bytes of header and baseline block.
+ *
+ * The returned value is guaranteed to be a multiple of 4.
+ */
+static inline int drm_eld_size(const u8 *eld)
+{
+ return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4;
+}
+
+/**
+ * drm_eld_get_spk_alloc - Get speaker allocation
+ * @eld: pointer to an ELD memory structure
+ *
+ * The returned value is the speakers mask. User has to use %DRM_ELD_SPEAKER
+ * field definitions to identify speakers.
+ */
+static inline u8 drm_eld_get_spk_alloc(const u8 *eld)
+{
+ return eld[DRM_ELD_SPEAKER] & DRM_ELD_SPEAKER_MASK;
+}
+
+/**
+ * drm_eld_get_conn_type - Get device type hdmi/dp connected
+ * @eld: pointer to an ELD memory structure
+ *
+ * The caller need to use %DRM_ELD_CONN_TYPE_HDMI or %DRM_ELD_CONN_TYPE_DP to
+ * identify the display type connected.
+ */
+static inline u8 drm_eld_get_conn_type(const u8 *eld)
+{
+ return eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_CONN_TYPE_MASK;
+}
+
+#endif /* __DRM_ELD_H__ */
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
index 6e91a0280f31..977a9381c8ba 100644
--- a/include/drm/drm_encoder.h
+++ b/include/drm/drm_encoder.h
@@ -60,7 +60,7 @@ struct drm_encoder_funcs {
* @late_register:
*
* This optional hook can be used to register additional userspace
- * interfaces attached to the encoder like debugfs interfaces.
+ * interfaces attached to the encoder.
* It is called late in the driver load sequence from drm_dev_register().
* Everything added from this callback should be unregistered in
* the early_unregister callback.
@@ -81,6 +81,13 @@ struct drm_encoder_funcs {
* before data structures are torndown.
*/
void (*early_unregister)(struct drm_encoder *encoder);
+
+ /**
+ * @debugfs_init:
+ *
+ * Allows encoders to create encoder-specific debugfs files.
+ */
+ void (*debugfs_init)(struct drm_encoder *encoder, struct dentry *root);
};
/**
@@ -184,6 +191,13 @@ struct drm_encoder {
const struct drm_encoder_funcs *funcs;
const struct drm_encoder_helper_funcs *helper_private;
+
+ /**
+ * @debugfs_entry:
+ *
+ * Debugfs directory for this CRTC.
+ */
+ struct dentry *debugfs_entry;
};
#define obj_to_encoder(x) container_of(x, struct drm_encoder, base)
@@ -194,6 +208,12 @@ int drm_encoder_init(struct drm_device *dev,
const struct drm_encoder_funcs *funcs,
int encoder_type, const char *name, ...);
+__printf(5, 6)
+int drmm_encoder_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ const struct drm_encoder_funcs *funcs,
+ int encoder_type, const char *name, ...);
+
__printf(6, 7)
void *__drmm_encoder_alloc(struct drm_device *dev,
size_t size, size_t offset,
diff --git a/include/drm/drm_encoder_slave.h b/include/drm/drm_encoder_slave.h
deleted file mode 100644
index a09864f6d684..000000000000
--- a/include/drm/drm_encoder_slave.h
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * Copyright (C) 2009 Francisco Jerez.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __DRM_ENCODER_SLAVE_H__
-#define __DRM_ENCODER_SLAVE_H__
-
-#include <drm/drm_crtc.h>
-#include <drm/drm_encoder.h>
-
-/**
- * struct drm_encoder_slave_funcs - Entry points exposed by a slave encoder driver
- * @set_config: Initialize any encoder-specific modesetting parameters.
- * The meaning of the @params parameter is implementation
- * dependent. It will usually be a structure with DVO port
- * data format settings or timings. It's not required for
- * the new parameters to take effect until the next mode
- * is set.
- *
- * Most of its members are analogous to the function pointers in
- * &drm_encoder_helper_funcs and they can optionally be used to
- * initialize the latter. Connector-like methods (e.g. @get_modes and
- * @set_property) will typically be wrapped around and only be called
- * if the encoder is the currently selected one for the connector.
- */
-struct drm_encoder_slave_funcs {
- void (*set_config)(struct drm_encoder *encoder,
- void *params);
-
- void (*destroy)(struct drm_encoder *encoder);
- void (*dpms)(struct drm_encoder *encoder, int mode);
- void (*save)(struct drm_encoder *encoder);
- void (*restore)(struct drm_encoder *encoder);
- bool (*mode_fixup)(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
- int (*mode_valid)(struct drm_encoder *encoder,
- struct drm_display_mode *mode);
- void (*mode_set)(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-
- enum drm_connector_status (*detect)(struct drm_encoder *encoder,
- struct drm_connector *connector);
- int (*get_modes)(struct drm_encoder *encoder,
- struct drm_connector *connector);
- int (*create_resources)(struct drm_encoder *encoder,
- struct drm_connector *connector);
- int (*set_property)(struct drm_encoder *encoder,
- struct drm_connector *connector,
- struct drm_property *property,
- uint64_t val);
-
-};
-
-/**
- * struct drm_encoder_slave - Slave encoder struct
- * @base: DRM encoder object.
- * @slave_funcs: Slave encoder callbacks.
- * @slave_priv: Slave encoder private data.
- * @bus_priv: Bus specific data.
- *
- * A &drm_encoder_slave has two sets of callbacks, @slave_funcs and the
- * ones in @base. The former are never actually called by the common
- * CRTC code, it's just a convenience for splitting the encoder
- * functions in an upper, GPU-specific layer and a (hopefully)
- * GPU-agnostic lower layer: It's the GPU driver responsibility to
- * call the slave methods when appropriate.
- *
- * drm_i2c_encoder_init() provides a way to get an implementation of
- * this.
- */
-struct drm_encoder_slave {
- struct drm_encoder base;
-
- const struct drm_encoder_slave_funcs *slave_funcs;
- void *slave_priv;
- void *bus_priv;
-};
-#define to_encoder_slave(x) container_of((x), struct drm_encoder_slave, base)
-
-int drm_i2c_encoder_init(struct drm_device *dev,
- struct drm_encoder_slave *encoder,
- struct i2c_adapter *adap,
- const struct i2c_board_info *info);
-
-
-/**
- * struct drm_i2c_encoder_driver
- *
- * Describes a device driver for an encoder connected to the GPU
- * through an I2C bus. In addition to the entry points in @i2c_driver
- * an @encoder_init function should be provided. It will be called to
- * give the driver an opportunity to allocate any per-encoder data
- * structures and to initialize the @slave_funcs and (optionally)
- * @slave_priv members of @encoder.
- */
-struct drm_i2c_encoder_driver {
- struct i2c_driver i2c_driver;
-
- int (*encoder_init)(struct i2c_client *client,
- struct drm_device *dev,
- struct drm_encoder_slave *encoder);
-
-};
-#define to_drm_i2c_encoder_driver(x) container_of((x), \
- struct drm_i2c_encoder_driver, \
- i2c_driver)
-
-/**
- * drm_i2c_encoder_get_client - Get the I2C client corresponding to an encoder
- */
-static inline struct i2c_client *drm_i2c_encoder_get_client(struct drm_encoder *encoder)
-{
- return (struct i2c_client *)to_encoder_slave(encoder)->bus_priv;
-}
-
-/**
- * drm_i2c_encoder_register - Register an I2C encoder driver
- * @owner: Module containing the driver.
- * @driver: Driver to be registered.
- */
-static inline int drm_i2c_encoder_register(struct module *owner,
- struct drm_i2c_encoder_driver *driver)
-{
- return i2c_register_driver(owner, &driver->i2c_driver);
-}
-
-/**
- * drm_i2c_encoder_unregister - Unregister an I2C encoder driver
- * @driver: Driver to be unregistered.
- */
-static inline void drm_i2c_encoder_unregister(struct drm_i2c_encoder_driver *driver)
-{
- i2c_del_driver(&driver->i2c_driver);
-}
-
-void drm_i2c_encoder_destroy(struct drm_encoder *encoder);
-
-
-/*
- * Wrapper fxns which can be plugged in to drm_encoder_helper_funcs:
- */
-
-void drm_i2c_encoder_dpms(struct drm_encoder *encoder, int mode);
-bool drm_i2c_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-void drm_i2c_encoder_prepare(struct drm_encoder *encoder);
-void drm_i2c_encoder_commit(struct drm_encoder *encoder);
-void drm_i2c_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-enum drm_connector_status drm_i2c_encoder_detect(struct drm_encoder *encoder,
- struct drm_connector *connector);
-void drm_i2c_encoder_save(struct drm_encoder *encoder);
-void drm_i2c_encoder_restore(struct drm_encoder *encoder);
-
-
-#endif
diff --git a/include/drm/drm_exec.h b/include/drm/drm_exec.h
new file mode 100644
index 000000000000..aa786b828a0a
--- /dev/null
+++ b/include/drm/drm_exec.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+#ifndef __DRM_EXEC_H__
+#define __DRM_EXEC_H__
+
+#include <linux/compiler.h>
+#include <linux/ww_mutex.h>
+
+#define DRM_EXEC_INTERRUPTIBLE_WAIT BIT(0)
+#define DRM_EXEC_IGNORE_DUPLICATES BIT(1)
+
+struct drm_gem_object;
+
+/**
+ * struct drm_exec - Execution context
+ */
+struct drm_exec {
+ /**
+ * @flags: Flags to control locking behavior
+ */
+ u32 flags;
+
+ /**
+ * @ticket: WW ticket used for acquiring locks
+ */
+ struct ww_acquire_ctx ticket;
+
+ /**
+ * @num_objects: number of objects locked
+ */
+ unsigned int num_objects;
+
+ /**
+ * @max_objects: maximum objects in array
+ */
+ unsigned int max_objects;
+
+ /**
+ * @objects: array of the locked objects
+ */
+ struct drm_gem_object **objects;
+
+ /**
+ * @contended: contended GEM object we backed off for
+ */
+ struct drm_gem_object *contended;
+
+ /**
+ * @prelocked: already locked GEM object due to contention
+ */
+ struct drm_gem_object *prelocked;
+};
+
+/**
+ * drm_exec_obj() - Return the object for a give drm_exec index
+ * @exec: Pointer to the drm_exec context
+ * @index: The index.
+ *
+ * Return: Pointer to the locked object corresponding to @index if
+ * index is within the number of locked objects. NULL otherwise.
+ */
+static inline struct drm_gem_object *
+drm_exec_obj(struct drm_exec *exec, unsigned long index)
+{
+ return index < exec->num_objects ? exec->objects[index] : NULL;
+}
+
+/**
+ * drm_exec_for_each_locked_object - iterate over all the locked objects
+ * @exec: drm_exec object
+ * @index: unsigned long index for the iteration
+ * @obj: the current GEM object
+ *
+ * Iterate over all the locked GEM objects inside the drm_exec object.
+ */
+#define drm_exec_for_each_locked_object(exec, index, obj) \
+ for ((index) = 0; ((obj) = drm_exec_obj(exec, index)); ++(index))
+
+/**
+ * drm_exec_for_each_locked_object_reverse - iterate over all the locked
+ * objects in reverse locking order
+ * @exec: drm_exec object
+ * @index: unsigned long index for the iteration
+ * @obj: the current GEM object
+ *
+ * Iterate over all the locked GEM objects inside the drm_exec object in
+ * reverse locking order. Note that @index may go below zero and wrap,
+ * but that will be caught by drm_exec_obj(), returning a NULL object.
+ */
+#define drm_exec_for_each_locked_object_reverse(exec, index, obj) \
+ for ((index) = (exec)->num_objects - 1; \
+ ((obj) = drm_exec_obj(exec, index)); --(index))
+
+/**
+ * drm_exec_until_all_locked - loop until all GEM objects are locked
+ * @exec: drm_exec object
+ *
+ * Core functionality of the drm_exec object. Loops until all GEM objects are
+ * locked and no more contention exists. At the beginning of the loop it is
+ * guaranteed that no GEM object is locked.
+ *
+ * Since labels can't be defined local to the loops body we use a jump pointer
+ * to make sure that the retry is only used from within the loops body.
+ */
+#define drm_exec_until_all_locked(exec) \
+__PASTE(__drm_exec_, __LINE__): \
+ for (void *__drm_exec_retry_ptr; ({ \
+ __drm_exec_retry_ptr = &&__PASTE(__drm_exec_, __LINE__);\
+ (void)__drm_exec_retry_ptr; \
+ drm_exec_cleanup(exec); \
+ });)
+
+/**
+ * drm_exec_retry_on_contention - restart the loop to grap all locks
+ * @exec: drm_exec object
+ *
+ * Control flow helper to continue when a contention was detected and we need to
+ * clean up and re-start the loop to prepare all GEM objects.
+ */
+#define drm_exec_retry_on_contention(exec) \
+ do { \
+ if (unlikely(drm_exec_is_contended(exec))) \
+ goto *__drm_exec_retry_ptr; \
+ } while (0)
+
+/**
+ * drm_exec_is_contended - check for contention
+ * @exec: drm_exec object
+ *
+ * Returns true if the drm_exec object has run into some contention while
+ * locking a GEM object and needs to clean up.
+ */
+static inline bool drm_exec_is_contended(struct drm_exec *exec)
+{
+ return !!exec->contended;
+}
+
+void drm_exec_init(struct drm_exec *exec, u32 flags, unsigned nr);
+void drm_exec_fini(struct drm_exec *exec);
+bool drm_exec_cleanup(struct drm_exec *exec);
+int drm_exec_lock_obj(struct drm_exec *exec, struct drm_gem_object *obj);
+void drm_exec_unlock_obj(struct drm_exec *exec, struct drm_gem_object *obj);
+int drm_exec_prepare_obj(struct drm_exec *exec, struct drm_gem_object *obj,
+ unsigned int num_fences);
+int drm_exec_prepare_array(struct drm_exec *exec,
+ struct drm_gem_object **objects,
+ unsigned int num_objects,
+ unsigned int num_fences);
+
+#endif
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
deleted file mode 100644
index 795aea1d0a25..000000000000
--- a/include/drm/drm_fb_cma_helper.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DRM_FB_CMA_HELPER_H__
-#define __DRM_FB_CMA_HELPER_H__
-
-#include <linux/types.h>
-
-struct drm_framebuffer;
-struct drm_plane_state;
-
-struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
- unsigned int plane);
-
-dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
- struct drm_plane_state *state,
- unsigned int plane);
-
-#endif
-
diff --git a/include/drm/drm_fb_dma_helper.h b/include/drm/drm_fb_dma_helper.h
new file mode 100644
index 000000000000..c950732c6d36
--- /dev/null
+++ b/include/drm/drm_fb_dma_helper.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DRM_FB_DMA_HELPER_H__
+#define __DRM_FB_DMA_HELPER_H__
+
+#include <linux/types.h>
+
+struct drm_device;
+struct drm_framebuffer;
+struct drm_plane;
+struct drm_plane_state;
+struct drm_scanout_buffer;
+
+struct drm_gem_dma_object *drm_fb_dma_get_gem_obj(struct drm_framebuffer *fb,
+ unsigned int plane);
+
+dma_addr_t drm_fb_dma_get_gem_addr(struct drm_framebuffer *fb,
+ struct drm_plane_state *state,
+ unsigned int plane);
+
+void drm_fb_dma_sync_non_coherent(struct drm_device *drm,
+ struct drm_plane_state *old_state,
+ struct drm_plane_state *state);
+
+int drm_fb_dma_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb);
+
+#endif
+
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 3b273f9ca39a..dd9a18f8de5a 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -30,18 +30,12 @@
#ifndef DRM_FB_HELPER_H
#define DRM_FB_HELPER_H
+struct drm_clip_rect;
struct drm_fb_helper;
+#include <linux/fb.h>
+
#include <drm/drm_client.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_device.h>
-#include <linux/kgdb.h>
-#include <linux/vgaarb.h>
-
-enum mode_set_atomic {
- LEAVE_ATOMIC_MODE_SET,
- ENTER_ATOMIC_MODE_SET,
-};
/**
* struct drm_fb_helper_surface_size - describes fbdev size and scanout surface size
@@ -76,21 +70,45 @@ struct drm_fb_helper_surface_size {
*/
struct drm_fb_helper_funcs {
/**
- * @fb_probe:
+ * @fb_dirty:
+ *
+ * Driver callback to update the framebuffer memory. If set, fbdev
+ * emulation will invoke this callback in regular intervals after
+ * the framebuffer has been written.
*
- * Driver callback to allocate and initialize the fbdev info structure.
- * Furthermore it also needs to allocate the DRM framebuffer used to
- * back the fbdev.
+ * This callback is optional.
+ *
+ * Returns:
+ * 0 on success, or an error code otherwise.
+ */
+ int (*fb_dirty)(struct drm_fb_helper *helper, struct drm_clip_rect *clip);
+
+ /**
+ * @fb_restore:
*
- * This callback is mandatory.
+ * Driver callback to restore internal fbdev state. If set, fbdev
+ * emulation will invoke this callback after restoring the display
+ * mode.
*
- * RETURNS:
+ * Only for i915. Do not use in new code.
*
- * The driver should return 0 on success and a negative error code on
- * failure.
+ * TODO: Fix i915 to not require this callback.
*/
- int (*fb_probe)(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes);
+ void (*fb_restore)(struct drm_fb_helper *helper);
+
+ /**
+ * @fb_set_suspend:
+ *
+ * Driver callback to suspend or resume, if set, fbdev emulation will
+ * invoke this callback during suspend and resume. Driver should call
+ * fb_set_suspend() from their implementation. If not set, fbdev
+ * emulation will invoke fb_set_suspend() directly.
+ *
+ * Only for i915. Do not use in new code.
+ *
+ * TODO: Fix i915 to not require this callback.
+ */
+ void (*fb_set_suspend)(struct drm_fb_helper *helper, bool suspend);
};
/**
@@ -98,7 +116,7 @@ struct drm_fb_helper_funcs {
* @fb: Scanout framebuffer object
* @dev: DRM device
* @funcs: driver callbacks for fb helper
- * @fbdev: emulated fbdev device info struct
+ * @info: emulated fbdev device info struct
* @pseudo_palette: fake palette of 16 colors
* @damage_clip: clip rectangle used with deferred_io to accumulate damage to
* the screen buffer
@@ -129,7 +147,7 @@ struct drm_fb_helper {
struct drm_framebuffer *fb;
struct drm_device *dev;
const struct drm_fb_helper_funcs *funcs;
- struct fb_info *fbdev;
+ struct fb_info *info;
u32 pseudo_palette[17];
struct drm_clip_rect damage_clip;
spinlock_t damage_lock;
@@ -186,6 +204,18 @@ struct drm_fb_helper {
* See also: @deferred_setup
*/
int preferred_bpp;
+
+#ifdef CONFIG_FB_DEFERRED_IO
+ /**
+ * @fbdefio:
+ *
+ * Temporary storage for the driver's FB deferred I/O handler. If the
+ * driver uses the DRM fbdev emulation layer, this is set by the core
+ * to a generic deferred I/O handler if a driver is preferring to use
+ * a shadow buffer.
+ */
+ struct fb_deferred_io fbdefio;
+#endif
};
static inline struct drm_fb_helper *
@@ -212,7 +242,9 @@ drm_fb_helper_from_client(struct drm_client_dev *client)
#ifdef CONFIG_DRM_FBDEV_EMULATION
void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
+ unsigned int preferred_bpp,
const struct drm_fb_helper_funcs *funcs);
+void drm_fb_helper_unprepare(struct drm_fb_helper *fb_helper);
int drm_fb_helper_init(struct drm_device *dev, struct drm_fb_helper *helper);
void drm_fb_helper_fini(struct drm_fb_helper *helper);
int drm_fb_helper_blank(int blank, struct fb_info *info);
@@ -222,35 +254,20 @@ int drm_fb_helper_set_par(struct fb_info *info);
int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct fb_info *info);
-int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper);
+int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper,
+ bool force);
-struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper);
-void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper);
+void drm_fb_helper_unregister_info(struct drm_fb_helper *fb_helper);
void drm_fb_helper_fill_info(struct fb_info *info,
struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes);
-void drm_fb_helper_deferred_io(struct fb_info *info,
- struct list_head *pagelist);
+void drm_fb_helper_damage_range(struct fb_info *info, off_t off, size_t len);
+void drm_fb_helper_damage_area(struct fb_info *info, u32 x, u32 y, u32 width, u32 height);
-ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf,
- size_t count, loff_t *ppos);
-ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf,
- size_t count, loff_t *ppos);
-
-void drm_fb_helper_sys_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect);
-void drm_fb_helper_sys_copyarea(struct fb_info *info,
- const struct fb_copyarea *area);
-void drm_fb_helper_sys_imageblit(struct fb_info *info,
- const struct fb_image *image);
-
-void drm_fb_helper_cfb_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect);
-void drm_fb_helper_cfb_copyarea(struct fb_info *info,
- const struct fb_copyarea *area);
-void drm_fb_helper_cfb_imageblit(struct fb_info *info,
- const struct fb_image *image);
+#ifdef CONFIG_FB_DEFERRED_IO
+void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagereflist);
+#endif
void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, bool suspend);
void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper,
@@ -262,19 +279,18 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg);
int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
-int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
+int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper);
int drm_fb_helper_debug_enter(struct fb_info *info);
int drm_fb_helper_debug_leave(struct fb_info *info);
-
-void drm_fb_helper_lastclose(struct drm_device *dev);
-void drm_fb_helper_output_poll_changed(struct drm_device *dev);
-
-void drm_fbdev_generic_setup(struct drm_device *dev,
- unsigned int preferred_bpp);
#else
static inline void drm_fb_helper_prepare(struct drm_device *dev,
- struct drm_fb_helper *helper,
- const struct drm_fb_helper_funcs *funcs)
+ struct drm_fb_helper *helper,
+ unsigned int preferred_bpp,
+ const struct drm_fb_helper_funcs *funcs)
+{
+}
+
+static inline void drm_fb_helper_unprepare(struct drm_fb_helper *fb_helper)
{
}
@@ -322,13 +338,7 @@ drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
return 0;
}
-static inline struct fb_info *
-drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper)
-{
- return NULL;
-}
-
-static inline void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper)
+static inline void drm_fb_helper_unregister_info(struct drm_fb_helper *fb_helper)
{
}
@@ -351,59 +361,12 @@ static inline int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
return 0;
}
+#ifdef CONFIG_FB_DEFERRED_IO
static inline void drm_fb_helper_deferred_io(struct fb_info *info,
struct list_head *pagelist)
{
}
-
-static inline int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper)
-{
- return -ENODEV;
-}
-
-static inline ssize_t drm_fb_helper_sys_read(struct fb_info *info,
- char __user *buf, size_t count,
- loff_t *ppos)
-{
- return -ENODEV;
-}
-
-static inline ssize_t drm_fb_helper_sys_write(struct fb_info *info,
- const char __user *buf,
- size_t count, loff_t *ppos)
-{
- return -ENODEV;
-}
-
-static inline void drm_fb_helper_sys_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect)
-{
-}
-
-static inline void drm_fb_helper_sys_copyarea(struct fb_info *info,
- const struct fb_copyarea *area)
-{
-}
-
-static inline void drm_fb_helper_sys_imageblit(struct fb_info *info,
- const struct fb_image *image)
-{
-}
-
-static inline void drm_fb_helper_cfb_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect)
-{
-}
-
-static inline void drm_fb_helper_cfb_copyarea(struct fb_info *info,
- const struct fb_copyarea *area)
-{
-}
-
-static inline void drm_fb_helper_cfb_imageblit(struct fb_info *info,
- const struct fb_image *image)
-{
-}
+#endif
static inline void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper,
bool suspend)
@@ -420,8 +383,7 @@ static inline int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
return 0;
}
-static inline int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper,
- int bpp_sel)
+static inline int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper)
{
return 0;
}
@@ -435,70 +397,6 @@ static inline int drm_fb_helper_debug_leave(struct fb_info *info)
{
return 0;
}
-
-static inline void drm_fb_helper_lastclose(struct drm_device *dev)
-{
-}
-
-static inline void drm_fb_helper_output_poll_changed(struct drm_device *dev)
-{
-}
-
-static inline void
-drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
-{
-}
-
-#endif
-
-/**
- * drm_fb_helper_remove_conflicting_framebuffers - remove firmware-configured framebuffers
- * @a: memory range, users of which are to be removed
- * @name: requesting driver name
- * @primary: also kick vga16fb if present
- *
- * This function removes framebuffer devices (initialized by firmware/bootloader)
- * which use memory range described by @a. If @a is NULL all such devices are
- * removed.
- */
-static inline int
-drm_fb_helper_remove_conflicting_framebuffers(struct apertures_struct *a,
- const char *name, bool primary)
-{
-#if IS_REACHABLE(CONFIG_FB)
- return remove_conflicting_framebuffers(a, name, primary);
-#else
- return 0;
-#endif
-}
-
-/**
- * drm_fb_helper_remove_conflicting_pci_framebuffers - remove firmware-configured framebuffers for PCI devices
- * @pdev: PCI device
- * @name: requesting driver name
- *
- * This function removes framebuffer devices (eg. initialized by firmware)
- * using memory range configured for any of @pdev's memory bars.
- *
- * The function assumes that PCI device with shadowed ROM drives a primary
- * display and so kicks out vga16fb.
- */
-static inline int
-drm_fb_helper_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
- const char *name)
-{
- int ret = 0;
-
- /*
- * WARNING: Apparently we must kick fbdev drivers before vgacon,
- * otherwise the vga fbdev driver falls over.
- */
-#if IS_REACHABLE(CONFIG_FB)
- ret = remove_conflicting_pci_framebuffers(pdev, name);
#endif
- if (ret == 0)
- ret = vga_remove_vgacon(pdev);
- return ret;
-}
#endif
diff --git a/include/drm/drm_fbdev_dma.h b/include/drm/drm_fbdev_dma.h
new file mode 100644
index 000000000000..fb3f2a9aa01a
--- /dev/null
+++ b/include/drm/drm_fbdev_dma.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef DRM_FBDEV_DMA_H
+#define DRM_FBDEV_DMA_H
+
+struct drm_fb_helper;
+struct drm_fb_helper_surface_size;
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes);
+
+#define DRM_FBDEV_DMA_DRIVER_OPS \
+ .fbdev_probe = drm_fbdev_dma_driver_fbdev_probe
+#else
+#define DRM_FBDEV_DMA_DRIVER_OPS \
+ .fbdev_probe = NULL
+#endif
+
+#endif
diff --git a/include/drm/drm_fbdev_shmem.h b/include/drm/drm_fbdev_shmem.h
new file mode 100644
index 000000000000..2fc708964d75
--- /dev/null
+++ b/include/drm/drm_fbdev_shmem.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef DRM_FBDEV_SHMEM_H
+#define DRM_FBDEV_SHMEM_H
+
+struct drm_fb_helper;
+struct drm_fb_helper_surface_size;
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+int drm_fbdev_shmem_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes);
+
+#define DRM_FBDEV_SHMEM_DRIVER_OPS \
+ .fbdev_probe = drm_fbdev_shmem_driver_fbdev_probe
+#else
+#define DRM_FBDEV_SHMEM_DRIVER_OPS \
+ .fbdev_probe = NULL
+#endif
+
+#endif
diff --git a/include/drm/drm_fbdev_ttm.h b/include/drm/drm_fbdev_ttm.h
new file mode 100644
index 000000000000..ad4a10bb4c78
--- /dev/null
+++ b/include/drm/drm_fbdev_ttm.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef DRM_FBDEV_TTM_H
+#define DRM_FBDEV_TTM_H
+
+#include <linux/stddef.h>
+
+struct drm_fb_helper;
+struct drm_fb_helper_surface_size;
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes);
+
+#define DRM_FBDEV_TTM_DRIVER_OPS \
+ .fbdev_probe = drm_fbdev_ttm_driver_fbdev_probe
+#else
+#define DRM_FBDEV_TTM_DRIVER_OPS \
+ .fbdev_probe = NULL
+#endif
+
+#endif
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index b81b3bfb08c8..1a3018e4a537 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -41,21 +41,28 @@
struct dma_fence;
struct drm_file;
struct drm_device;
+struct drm_printer;
struct device;
struct file;
+extern struct xarray drm_minors_xa;
+
/*
* FIXME: Not sure we want to have drm_minor here in the end, but to avoid
* header include loops we need it here for now.
*/
-/* Note that the order of this enum is ABI (it determines
+/* Note that the values of this enum are ABI (it determines
* /dev/dri/renderD* numbers).
+ *
+ * Setting DRM_MINOR_ACCEL to 32 gives enough space for more drm minors to
+ * be implemented before we hit any future
*/
enum drm_minor_type {
- DRM_MINOR_PRIMARY,
- DRM_MINOR_CONTROL,
- DRM_MINOR_RENDER,
+ DRM_MINOR_PRIMARY = 0,
+ DRM_MINOR_CONTROL = 1,
+ DRM_MINOR_RENDER = 2,
+ DRM_MINOR_ACCEL = 32,
};
/**
@@ -70,14 +77,12 @@ enum drm_minor_type {
struct drm_minor {
/* private: */
int index; /* Minor device number */
- int type; /* Control or render */
+ int type; /* Control or render or accel */
struct device *kdev; /* Linux device */
struct drm_device *dev;
+ struct dentry *debugfs_symlink;
struct dentry *debugfs_root;
-
- struct list_head debugfs_list;
- struct mutex debugfs_lock; /* Protects debugfs_list. */
};
/**
@@ -202,6 +207,13 @@ struct drm_file {
bool writeback_connectors;
/**
+ * @plane_color_pipeline:
+ *
+ * True if client understands plane color pipelines
+ */
+ bool plane_color_pipeline;
+
+ /**
* @was_master:
*
* This client has or had, master capability. Protected by struct
@@ -224,19 +236,57 @@ struct drm_file {
bool is_master;
/**
+ * @supports_virtualized_cursor_plane:
+ *
+ * This client is capable of handling the cursor plane with the
+ * restrictions imposed on it by the virtualized drivers.
+ *
+ * This implies that the cursor plane has to behave like a cursor
+ * i.e. track cursor movement. It also requires setting of the
+ * hotspot properties by the client on the cursor plane.
+ */
+ bool supports_virtualized_cursor_plane;
+
+ /**
* @master:
*
- * Master this node is currently associated with. Only relevant if
- * drm_is_primary_client() returns true. Note that this only
- * matches &drm_device.master if the master is the currently active one.
+ * Master this node is currently associated with. Protected by struct
+ * &drm_device.master_mutex, and serialized by @master_lookup_lock.
+ *
+ * Only relevant if drm_is_primary_client() returns true. Note that
+ * this only matches &drm_device.master if the master is the currently
+ * active one.
+ *
+ * To update @master, both &drm_device.master_mutex and
+ * @master_lookup_lock need to be held, therefore holding either of
+ * them is safe and enough for the read side.
+ *
+ * When dereferencing this pointer, either hold struct
+ * &drm_device.master_mutex for the duration of the pointer's use, or
+ * use drm_file_get_master() if struct &drm_device.master_mutex is not
+ * currently held and there is no other need to hold it. This prevents
+ * @master from being freed during use.
*
* See also @authentication and @is_master and the :ref:`section on
* primary nodes and authentication <drm_primary_node>`.
*/
struct drm_master *master;
- /** @pid: Process that opened this file. */
- struct pid *pid;
+ /** @master_lookup_lock: Serializes @master. */
+ spinlock_t master_lookup_lock;
+
+ /**
+ * @pid: Process that is using this file.
+ *
+ * Must only be dereferenced under a rcu_read_lock or equivalent.
+ *
+ * Updates are guarded with dev->filelist_mutex and reference must be
+ * dropped after a RCU grace period to accommodate lockless readers.
+ */
+ struct pid __rcu *pid;
+
+ /** @client_id: A unique id for fdinfo */
+ u64 client_id;
/** @magic: Authentication magic, see @authenticated. */
drm_magic_t magic;
@@ -257,6 +307,9 @@ struct drm_file {
*
* Mapping of mm object handles to object pointers. Used by the GEM
* subsystem. Protected by @table_lock.
+ *
+ * Note that allocated entries might be NULL as a transient state when
+ * creating or deleting a handle.
*/
struct idr object_idr;
@@ -346,10 +399,24 @@ struct drm_file {
*/
struct drm_prime_file_private prime;
- /* private: */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
- unsigned long lock_count; /* DRI1 legacy lock count */
-#endif
+ /**
+ * @client_name:
+ *
+ * Userspace-provided name; useful for accounting and debugging.
+ */
+ const char *client_name;
+
+ /**
+ * @client_name_lock: Protects @client_name.
+ */
+ struct mutex client_name_lock;
+
+ /**
+ * @debugfs_client:
+ *
+ * debugfs directory for each client under a drm node.
+ */
+ struct dentry *debugfs_client;
};
/**
@@ -381,7 +448,31 @@ static inline bool drm_is_render_client(const struct drm_file *file_priv)
return file_priv->minor->type == DRM_MINOR_RENDER;
}
+/**
+ * drm_is_accel_client - is this an open file of the compute acceleration node
+ * @file_priv: DRM file
+ *
+ * Returns true if this is an open file of the compute acceleration node, i.e.
+ * &drm_file.minor of @file_priv is a accel minor.
+ *
+ * See also :doc:`Introduction to compute accelerators subsystem
+ * </accel/introduction>`.
+ */
+static inline bool drm_is_accel_client(const struct drm_file *file_priv)
+{
+ return file_priv->minor->type == DRM_MINOR_ACCEL;
+}
+
+__printf(2, 3)
+void drm_file_err(struct drm_file *file_priv, const char *fmt, ...);
+
+void drm_file_update_pid(struct drm_file *);
+
+struct drm_minor *drm_minor_acquire(struct xarray *minors_xa, unsigned int minor_id);
+void drm_minor_release(struct drm_minor *minor);
+
int drm_open(struct inode *inode, struct file *filp);
+int drm_open_helper(struct file *filp, struct drm_minor *minor);
ssize_t drm_read(struct file *filp, char __user *buffer,
size_t count, loff_t *offset);
int drm_release(struct inode *inode, struct file *filp);
@@ -403,15 +494,40 @@ void drm_send_event_timestamp_locked(struct drm_device *dev,
struct drm_pending_event *e,
ktime_t timestamp);
-struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags);
+/**
+ * struct drm_memory_stats - GEM object stats associated
+ * @shared: Total size of GEM objects shared between processes
+ * @private: Total size of GEM objects
+ * @resident: Total size of GEM objects backing pages
+ * @purgeable: Total size of GEM objects that can be purged (resident and not active)
+ * @active: Total size of GEM objects active on one or more engines
+ *
+ * Used by drm_print_memory_stats()
+ */
+struct drm_memory_stats {
+ u64 shared;
+ u64 private;
+ u64 resident;
+ u64 purgeable;
+ u64 active;
+};
-#ifdef CONFIG_MMU
-struct drm_vma_offset_manager;
-unsigned long drm_get_unmapped_area(struct file *file,
- unsigned long uaddr, unsigned long len,
- unsigned long pgoff, unsigned long flags,
- struct drm_vma_offset_manager *mgr);
-#endif /* CONFIG_MMU */
+enum drm_gem_object_status;
+int drm_memory_stats_is_zero(const struct drm_memory_stats *stats);
+void drm_fdinfo_print_size(struct drm_printer *p,
+ const char *prefix,
+ const char *stat,
+ const char *region,
+ u64 sz);
+void drm_print_memory_stats(struct drm_printer *p,
+ const struct drm_memory_stats *stats,
+ enum drm_gem_object_status supported_status,
+ const char *region);
+
+void drm_show_memory_stats(struct drm_printer *p, struct drm_file *file);
+void drm_show_fdinfo(struct seq_file *m, struct file *f);
+
+struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags);
#endif /* _DRM_FILE_H_ */
diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
index 553210c02ee0..33de514a5221 100644
--- a/include/drm/drm_fixed.h
+++ b/include/drm/drm_fixed.h
@@ -26,6 +26,8 @@
#define DRM_FIXED_H
#include <linux/math64.h>
+#include <linux/types.h>
+#include <linux/wordpart.h>
typedef union dfixed {
u32 full;
@@ -76,6 +78,23 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
#define DRM_FIXED_EPSILON 1LL
#define DRM_FIXED_ALMOST_ONE (DRM_FIXED_ONE - DRM_FIXED_EPSILON)
+/**
+ * @drm_sm2fixp
+ *
+ * Convert a 1.31.32 signed-magnitude fixed point to 32.32
+ * 2s-complement fixed point
+ *
+ * @return s64 2s-complement fixed point
+ */
+static inline s64 drm_sm2fixp(__u64 a)
+{
+ if ((a & (1LL << 63))) {
+ return -(a & 0x7fffffffffffffffll);
+ } else {
+ return a;
+ }
+}
+
static inline s64 drm_int2fixp(int a)
{
return ((s64)a) << DRM_FIXED_POINT;
@@ -86,9 +105,14 @@ static inline int drm_fixp2int(s64 a)
return ((s64)a) >> DRM_FIXED_POINT;
}
+static inline int drm_fixp2int_round(s64 a)
+{
+ return drm_fixp2int(a + DRM_FIXED_ONE / 2);
+}
+
static inline int drm_fixp2int_ceil(s64 a)
{
- if (a > 0)
+ if (a >= 0)
return drm_fixp2int(a + DRM_FIXED_ALMOST_ONE);
else
return drm_fixp2int(a - DRM_FIXED_ALMOST_ONE);
@@ -208,4 +232,27 @@ static inline s64 drm_fixp_exp(s64 x)
return sum;
}
+static inline int fxp_q4_from_int(int val_int)
+{
+ return val_int << 4;
+}
+
+static inline int fxp_q4_to_int(int val_q4)
+{
+ return val_q4 >> 4;
+}
+
+static inline int fxp_q4_to_int_roundup(int val_q4)
+{
+ return (val_q4 + 0xf) >> 4;
+}
+
+static inline int fxp_q4_to_frac(int val_q4)
+{
+ return val_q4 & 0xf;
+}
+
+#define FXP_Q4_FMT "%d.%04d"
+#define FXP_Q4_ARGS(val_q4) fxp_q4_to_int(val_q4), (fxp_q4_to_frac(val_q4) * 625)
+
#endif
diff --git a/include/drm/drm_flip_work.h b/include/drm/drm_flip_work.h
index 21c3d512d25c..1eef3283a109 100644
--- a/include/drm/drm_flip_work.h
+++ b/include/drm/drm_flip_work.h
@@ -31,11 +31,10 @@
/**
* DOC: flip utils
*
- * Util to queue up work to run from work-queue context after flip/vblank.
+ * Utility to queue up work to run from work-queue context after flip/vblank.
* Typically this can be used to defer unref of framebuffer's, cursor
- * bo's, etc until after vblank. The APIs are all thread-safe.
- * Moreover, drm_flip_work_queue_task and drm_flip_work_queue can be called
- * in atomic context.
+ * bo's, etc until after vblank. The APIs are all thread-safe. Moreover,
+ * drm_flip_work_commit() can be called in atomic context.
*/
struct drm_flip_work;
@@ -52,16 +51,6 @@ struct drm_flip_work;
typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val);
/**
- * struct drm_flip_task - flip work task
- * @node: list entry element
- * @data: data to pass to &drm_flip_work.func
- */
-struct drm_flip_task {
- struct list_head node;
- void *data;
-};
-
-/**
* struct drm_flip_work - flip work queue
* @name: debug name
* @func: callback fxn called for each committed item
@@ -79,9 +68,6 @@ struct drm_flip_work {
spinlock_t lock;
};
-struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags);
-void drm_flip_work_queue_task(struct drm_flip_work *work,
- struct drm_flip_task *task);
void drm_flip_work_queue(struct drm_flip_work *work, void *val);
void drm_flip_work_commit(struct drm_flip_work *work,
struct workqueue_struct *wq);
diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h
index 5f9e37032468..2b5c1aef80b0 100644
--- a/include/drm/drm_format_helper.h
+++ b/include/drm/drm_format_helper.h
@@ -6,26 +6,134 @@
#ifndef __LINUX_DRM_FORMAT_HELPER_H
#define __LINUX_DRM_FORMAT_HELPER_H
+#include <linux/types.h>
+
+struct drm_device;
+struct drm_format_info;
struct drm_framebuffer;
struct drm_rect;
-void drm_fb_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb,
- struct drm_rect *clip);
-void drm_fb_memcpy_dstclip(void __iomem *dst, void *vaddr,
- struct drm_framebuffer *fb,
- struct drm_rect *clip);
-void drm_fb_swab(void *dst, void *src, struct drm_framebuffer *fb,
- struct drm_rect *clip, bool cached);
-void drm_fb_xrgb8888_to_rgb565(void *dst, void *vaddr,
- struct drm_framebuffer *fb,
- struct drm_rect *clip, bool swab);
-void drm_fb_xrgb8888_to_rgb565_dstclip(void __iomem *dst, unsigned int dst_pitch,
- void *vaddr, struct drm_framebuffer *fb,
- struct drm_rect *clip, bool swab);
-void drm_fb_xrgb8888_to_rgb888_dstclip(void __iomem *dst, unsigned int dst_pitch,
- void *vaddr, struct drm_framebuffer *fb,
- struct drm_rect *clip);
-void drm_fb_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb,
- struct drm_rect *clip);
+struct iosys_map;
+
+/**
+ * struct drm_format_conv_state - Stores format-conversion state
+ *
+ * DRM helpers for format conversion store temporary state in
+ * struct drm_xfrm_buf. The buffer's resources can be reused
+ * among multiple conversion operations.
+ *
+ * All fields are considered private.
+ */
+struct drm_format_conv_state {
+ /* private: */
+ struct {
+ void *mem;
+ size_t size;
+ bool preallocated;
+ } tmp;
+};
+
+#define __DRM_FORMAT_CONV_STATE_INIT(_mem, _size, _preallocated) { \
+ .tmp = { \
+ .mem = (_mem), \
+ .size = (_size), \
+ .preallocated = (_preallocated), \
+ } \
+ }
+
+/**
+ * DRM_FORMAT_CONV_STATE_INIT - Initializer for struct drm_format_conv_state
+ *
+ * Initializes an instance of struct drm_format_conv_state to default values.
+ */
+#define DRM_FORMAT_CONV_STATE_INIT \
+ __DRM_FORMAT_CONV_STATE_INIT(NULL, 0, false)
+
+/**
+ * DRM_FORMAT_CONV_STATE_INIT_PREALLOCATED - Initializer for struct drm_format_conv_state
+ * @_mem: The preallocated memory area
+ * @_size: The number of bytes in _mem
+ *
+ * Initializes an instance of struct drm_format_conv_state to preallocated
+ * storage. The caller is responsible for releasing the provided memory range.
+ */
+#define DRM_FORMAT_CONV_STATE_INIT_PREALLOCATED(_mem, _size) \
+ __DRM_FORMAT_CONV_STATE_INIT(_mem, _size, true)
+
+void drm_format_conv_state_init(struct drm_format_conv_state *state);
+void drm_format_conv_state_copy(struct drm_format_conv_state *state,
+ const struct drm_format_conv_state *old_state);
+void *drm_format_conv_state_reserve(struct drm_format_conv_state *state,
+ size_t new_size, gfp_t flags);
+void drm_format_conv_state_release(struct drm_format_conv_state *state);
+
+unsigned int drm_fb_clip_offset(unsigned int pitch, const struct drm_format_info *format,
+ const struct drm_rect *clip);
+
+void drm_fb_memcpy(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip);
+void drm_fb_swab(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, bool cached,
+ struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_rgb332(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_rgb565be(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_xrgb1555(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_argb1555(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_rgba5551(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_bgr888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_argb8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_bgrx8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_xrgb2101010(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip,
+ struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_argb2101010(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip,
+ struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_gray8(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_argb8888_to_argb4444(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+
+void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+
+void drm_fb_xrgb8888_to_gray2(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
#endif /* __LINUX_DRM_FORMAT_HELPER_H */
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index 156b122c0ad5..471784426857 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -22,9 +22,15 @@
#ifndef __DRM_FOURCC_H__
#define __DRM_FOURCC_H__
+#include <linux/math.h>
#include <linux/types.h>
#include <uapi/drm/drm_fourcc.h>
+/**
+ * DRM_FORMAT_MAX_PLANES - maximum number of planes a DRM format can have
+ */
+#define DRM_FORMAT_MAX_PLANES 4u
+
/*
* DRM formats are little endian. Define host endian variants for the
* most common formats here, to reduce the #ifdefs needed in drivers.
@@ -48,7 +54,6 @@
#endif
struct drm_device;
-struct drm_mode_fb_cmd2;
/**
* struct drm_format_info - information about a DRM format
@@ -78,7 +83,7 @@ struct drm_format_info {
* triplet @char_per_block, @block_w, @block_h for better
* describing the pixel format.
*/
- u8 cpp[4];
+ u8 cpp[DRM_FORMAT_MAX_PLANES];
/**
* @char_per_block:
@@ -104,7 +109,7 @@ struct drm_format_info {
* information from their drm_mode_config.get_format_info hook
* if they want the core to be validating the pitch.
*/
- u8 char_per_block[4];
+ u8 char_per_block[DRM_FORMAT_MAX_PLANES];
};
/**
@@ -113,7 +118,7 @@ struct drm_format_info {
* Block width in pixels, this is intended to be accessed through
* drm_format_info_block_width()
*/
- u8 block_w[4];
+ u8 block_w[DRM_FORMAT_MAX_PLANES];
/**
* @block_h:
@@ -121,7 +126,7 @@ struct drm_format_info {
* Block height in pixels, this is intended to be accessed through
* drm_format_info_block_height()
*/
- u8 block_h[4];
+ u8 block_h[DRM_FORMAT_MAX_PLANES];
/** @hsub: Horizontal chroma subsampling factor */
u8 hsub;
@@ -133,14 +138,9 @@ struct drm_format_info {
/** @is_yuv: Is it a YUV format? */
bool is_yuv;
-};
-/**
- * struct drm_format_name_buf - name of a DRM format
- * @str: string buffer containing the format name
- */
-struct drm_format_name_buf {
- char str[32];
+ /** @is_color_indexed: Is it a color-indexed format? */
+ bool is_color_indexed;
};
/**
@@ -279,7 +279,7 @@ int drm_format_info_plane_width(const struct drm_format_info *info, int width,
if (plane == 0)
return width;
- return width / info->hsub;
+ return DIV_ROUND_UP(width, info->hsub);
}
/**
@@ -301,23 +301,24 @@ int drm_format_info_plane_height(const struct drm_format_info *info, int height,
if (plane == 0)
return height;
- return height / info->vsub;
+ return DIV_ROUND_UP(height, info->vsub);
}
const struct drm_format_info *__drm_format_info(u32 format);
const struct drm_format_info *drm_format_info(u32 format);
const struct drm_format_info *
drm_get_format_info(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode_cmd);
+ u32 pixel_format, u64 modifier);
uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
uint32_t drm_driver_legacy_fb_format(struct drm_device *dev,
uint32_t bpp, uint32_t depth);
+uint32_t drm_driver_color_mode_format(struct drm_device *dev, unsigned int color_mode);
unsigned int drm_format_info_block_width(const struct drm_format_info *info,
int plane);
unsigned int drm_format_info_block_height(const struct drm_format_info *info,
int plane);
+unsigned int drm_format_info_bpp(const struct drm_format_info *info, int plane);
uint64_t drm_format_info_min_pitch(const struct drm_format_info *info,
int plane, unsigned int buffer_width);
-const char *drm_get_format_name(uint32_t format, struct drm_format_name_buf *buf);
#endif /* __DRM_FOURCC_H__ */
diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h
index be658ebbec72..38b24fc8978d 100644
--- a/include/drm/drm_framebuffer.h
+++ b/include/drm/drm_framebuffer.h
@@ -23,16 +23,17 @@
#ifndef __DRM_FRAMEBUFFER_H__
#define __DRM_FRAMEBUFFER_H__
+#include <linux/bits.h>
#include <linux/ctype.h>
#include <linux/list.h>
#include <linux/sched.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_mode_object.h>
struct drm_clip_rect;
struct drm_device;
struct drm_file;
-struct drm_format_info;
struct drm_framebuffer;
struct drm_gem_object;
@@ -100,6 +101,8 @@ struct drm_framebuffer_funcs {
unsigned num_clips);
};
+#define DRM_FRAMEBUFFER_HAS_HANDLE_REF(_i) BIT(0u + (_i))
+
/**
* struct drm_framebuffer - frame buffer object
*
@@ -147,17 +150,17 @@ struct drm_framebuffer {
* @pitches: Line stride per buffer. For userspace created object this
* is copied from drm_mode_fb_cmd2.
*/
- unsigned int pitches[4];
+ unsigned int pitches[DRM_FORMAT_MAX_PLANES];
/**
* @offsets: Offset from buffer start to the actual pixel data in bytes,
* per buffer. For userspace created object this is copied from
* drm_mode_fb_cmd2.
*
* Note that this is a linear offset and does not take into account
- * tiling or buffer laytou per @modifier. It meant to be used when the
- * actual pixel data for this framebuffer plane starts at an offset,
- * e.g. when multiple planes are allocated within the same backing
- * storage buffer object. For tiled layouts this generally means it
+ * tiling or buffer layout per @modifier. It is meant to be used when
+ * the actual pixel data for this framebuffer plane starts at an offset,
+ * e.g. when multiple planes are allocated within the same backing
+ * storage buffer object. For tiled layouts this generally means its
* @offsets must at least be tile-size aligned, but hardware often has
* stricter requirements.
*
@@ -165,7 +168,7 @@ struct drm_framebuffer {
* data (even for linear buffers). Specifying an x/y pixel offset is
* instead done through the source rectangle in &struct drm_plane_state.
*/
- unsigned int offsets[4];
+ unsigned int offsets[DRM_FORMAT_MAX_PLANES];
/**
* @modifier: Data layout modifier. This is used to describe
* tiling, or also special layouts (like compression) of auxiliary
@@ -189,17 +192,9 @@ struct drm_framebuffer {
*/
int flags;
/**
- * @hot_x: X coordinate of the cursor hotspot. Used by the legacy cursor
- * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR
- * universal plane.
- */
- int hot_x;
- /**
- * @hot_y: Y coordinate of the cursor hotspot. Used by the legacy cursor
- * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR
- * universal plane.
+ * @internal_flags: Framebuffer flags like DRM_FRAMEBUFFER_HAS_HANDLE_REF.
*/
- int hot_y;
+ unsigned int internal_flags;
/**
* @filp_head: Placed on &drm_file.fbs, protected by &drm_file.fbs_lock.
*/
@@ -210,7 +205,7 @@ struct drm_framebuffer {
* This is used by the GEM framebuffer helpers, see e.g.
* drm_gem_fb_create().
*/
- struct drm_gem_object *obj[4];
+ struct drm_gem_object *obj[DRM_FORMAT_MAX_PLANES];
};
#define obj_to_fb(x) container_of(x, struct drm_framebuffer, base)
@@ -292,11 +287,6 @@ static inline void drm_framebuffer_assign(struct drm_framebuffer **p,
&fb->head != (&(dev)->mode_config.fb_list); \
fb = list_next_entry(fb, head))
-int drm_framebuffer_plane_width(int width,
- const struct drm_framebuffer *fb, int plane);
-int drm_framebuffer_plane_height(int height,
- const struct drm_framebuffer *fb, int plane);
-
/**
* struct drm_afbc_framebuffer - a special afbc frame buffer object
*
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 240049566592..8d48d2af2649 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -35,14 +35,38 @@
*/
#include <linux/kref.h>
+#include <linux/dma-buf.h>
#include <linux/dma-resv.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
#include <drm/drm_vma_manager.h>
-struct dma_buf_map;
+struct iosys_map;
struct drm_gem_object;
/**
+ * enum drm_gem_object_status - bitmask of object state for fdinfo reporting
+ * @DRM_GEM_OBJECT_RESIDENT: object is resident in memory (ie. not unpinned)
+ * @DRM_GEM_OBJECT_PURGEABLE: object marked as purgeable by userspace
+ * @DRM_GEM_OBJECT_ACTIVE: object is currently used by an active submission
+ *
+ * Bitmask of status used for fdinfo memory stats, see &drm_gem_object_funcs.status
+ * and drm_show_fdinfo(). Note that an object can report DRM_GEM_OBJECT_PURGEABLE
+ * and be active or not resident, in which case drm_show_fdinfo() will not
+ * account for it as purgeable. So drivers do not need to check if the buffer
+ * is idle and resident to return this bit, i.e. userspace can mark a buffer as
+ * purgeable even while it is still busy on the GPU. It will not get reported in
+ * the puregeable stats until it becomes idle. The status gem object func does
+ * not need to consider this.
+ */
+enum drm_gem_object_status {
+ DRM_GEM_OBJECT_RESIDENT = BIT(0),
+ DRM_GEM_OBJECT_PURGEABLE = BIT(1),
+ DRM_GEM_OBJECT_ACTIVE = BIT(2),
+};
+
+/**
* struct drm_gem_object_funcs - GEM object functions
*/
struct drm_gem_object_funcs {
@@ -102,7 +126,8 @@ struct drm_gem_object_funcs {
/**
* @pin:
*
- * Pin backing buffer in memory. Used by the drm_gem_map_attach() helper.
+ * Pin backing buffer in memory, such that dma-buf importers can
+ * access it. Used by the drm_gem_map_attach() helper.
*
* This callback is optional.
*/
@@ -135,21 +160,23 @@ struct drm_gem_object_funcs {
* @vmap:
*
* Returns a virtual address for the buffer. Used by the
- * drm_gem_dmabuf_vmap() helper.
+ * drm_gem_dmabuf_vmap() helper. Called with a held GEM reservation
+ * lock.
*
* This callback is optional.
*/
- int (*vmap)(struct drm_gem_object *obj, struct dma_buf_map *map);
+ int (*vmap)(struct drm_gem_object *obj, struct iosys_map *map);
/**
* @vunmap:
*
* Releases the address previously returned by @vmap. Used by the
- * drm_gem_dmabuf_vunmap() helper.
+ * drm_gem_dmabuf_vunmap() helper. Called with a held GEM reservation
+ * lock.
*
* This callback is optional.
*/
- void (*vunmap)(struct drm_gem_object *obj, struct dma_buf_map *map);
+ void (*vunmap)(struct drm_gem_object *obj, struct iosys_map *map);
/**
* @mmap:
@@ -165,6 +192,39 @@ struct drm_gem_object_funcs {
int (*mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma);
/**
+ * @evict:
+ *
+ * Evicts gem object out from memory. Used by the drm_gem_object_evict()
+ * helper. Returns 0 on success, -errno otherwise. Called with a held
+ * GEM reservation lock.
+ *
+ * This callback is optional.
+ */
+ int (*evict)(struct drm_gem_object *obj);
+
+ /**
+ * @status:
+ *
+ * The optional status callback can return additional object state
+ * which determines which stats the object is counted against. The
+ * callback is called under table_lock. Racing against object status
+ * change is "harmless", and the callback can expect to not race
+ * against object destruction.
+ *
+ * Called by drm_show_memory_stats().
+ */
+ enum drm_gem_object_status (*status)(struct drm_gem_object *obj);
+
+ /**
+ * @rss:
+ *
+ * Return resident size of the object in physical memory.
+ *
+ * Called by drm_show_memory_stats().
+ */
+ size_t (*rss)(struct drm_gem_object *obj);
+
+ /**
* @vm_ops:
*
* Virtual memory operations used with mmap.
@@ -175,6 +235,41 @@ struct drm_gem_object_funcs {
};
/**
+ * struct drm_gem_lru - A simple LRU helper
+ *
+ * A helper for tracking GEM objects in a given state, to aid in
+ * driver's shrinker implementation. Tracks the count of pages
+ * for lockless &shrinker.count_objects, and provides
+ * &drm_gem_lru_scan for driver's &shrinker.scan_objects
+ * implementation.
+ */
+struct drm_gem_lru {
+ /**
+ * @lock:
+ *
+ * Lock protecting movement of GEM objects between LRUs. All
+ * LRUs that the object can move between should be protected
+ * by the same lock.
+ */
+ struct mutex *lock;
+
+ /**
+ * @count:
+ *
+ * The total number of backing pages of the GEM objects in
+ * this LRU.
+ */
+ long count;
+
+ /**
+ * @list:
+ *
+ * The LRU list.
+ */
+ struct list_head list;
+};
+
+/**
* struct drm_gem_object - GEM buffer object
*
* This structure defines the generic parts for GEM buffer objects, which are
@@ -217,7 +312,7 @@ struct drm_gem_object {
*
* SHMEM file node used as backing storage for swappable buffer objects.
* GEM also supports driver private objects with driver-specific backing
- * storage (contiguous CMA memory, special reserved blocks). In this
+ * storage (contiguous DMA memory, special reserved blocks). In this
* case @filp is NULL.
*/
struct file *filp;
@@ -303,6 +398,37 @@ struct drm_gem_object {
struct dma_resv _resv;
/**
+ * @gpuva: Fields used by GPUVM to manage mappings pointing to this GEM object.
+ *
+ * When DRM_GPUVM_IMMEDIATE_MODE is set, this list is protected by the
+ * mutex. Otherwise, the list is protected by the GEMs &dma_resv lock.
+ *
+ * Note that all entries in this list must agree on whether
+ * DRM_GPUVM_IMMEDIATE_MODE is set.
+ */
+ struct {
+ /**
+ * @gpuva.list: list of GPUVM mappings attached to this GEM object.
+ *
+ * Drivers should lock list accesses with either the GEMs
+ * &dma_resv lock (&drm_gem_object.resv) or the
+ * &drm_gem_object.gpuva.lock mutex.
+ */
+ struct list_head list;
+
+ /**
+ * @gpuva.lock: lock protecting access to &drm_gem_object.gpuva.list
+ * when DRM_GPUVM_IMMEDIATE_MODE is used.
+ *
+ * Only used when DRM_GPUVM_IMMEDIATE_MODE is set. It should be
+ * safe to take this mutex during the fence signalling path, so
+ * do not allocate memory while holding this lock. Otherwise,
+ * the &dma_resv lock should be used.
+ */
+ struct mutex lock;
+ } gpuva;
+
+ /**
* @funcs:
*
* Optional GEM object functions. If this is set, it will be used instead of the
@@ -312,9 +438,41 @@ struct drm_gem_object {
*
*/
const struct drm_gem_object_funcs *funcs;
+
+ /**
+ * @lru_node:
+ *
+ * List node in a &drm_gem_lru.
+ */
+ struct list_head lru_node;
+
+ /**
+ * @lru:
+ *
+ * The current LRU list that the GEM object is on.
+ */
+ struct drm_gem_lru *lru;
};
/**
+ * DRM_GEM_FOPS - Default drm GEM file operations
+ *
+ * This macro provides a shorthand for setting the GEM file ops in the
+ * &file_operations structure. If all you need are the default ops, use
+ * DEFINE_DRM_GEM_FOPS instead.
+ */
+#define DRM_GEM_FOPS \
+ .open = drm_open,\
+ .release = drm_release,\
+ .unlocked_ioctl = drm_ioctl,\
+ .compat_ioctl = drm_compat_ioctl,\
+ .poll = drm_poll,\
+ .read = drm_read,\
+ .llseek = noop_llseek,\
+ .mmap = drm_gem_mmap, \
+ .fop_flags = FOP_UNSIGNED_OFFSET
+
+/**
* DEFINE_DRM_GEM_FOPS() - macro to generate file operations for GEM drivers
* @name: name for the generated structure
*
@@ -330,22 +488,19 @@ struct drm_gem_object {
#define DEFINE_DRM_GEM_FOPS(name) \
static const struct file_operations name = {\
.owner = THIS_MODULE,\
- .open = drm_open,\
- .release = drm_release,\
- .unlocked_ioctl = drm_ioctl,\
- .compat_ioctl = drm_compat_ioctl,\
- .poll = drm_poll,\
- .read = drm_read,\
- .llseek = noop_llseek,\
- .mmap = drm_gem_mmap,\
+ DRM_GEM_FOPS,\
}
void drm_gem_object_release(struct drm_gem_object *obj);
void drm_gem_object_free(struct kref *kref);
int drm_gem_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size);
+int drm_gem_object_init_with_mnt(struct drm_device *dev,
+ struct drm_gem_object *obj, size_t size,
+ struct vfsmount *gemfs);
void drm_gem_private_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size);
+void drm_gem_private_object_fini(struct drm_gem_object *obj);
void drm_gem_vm_open(struct vm_area_struct *vma);
void drm_gem_vm_close(struct vm_area_struct *vma);
int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
@@ -384,8 +539,6 @@ drm_gem_object_put(struct drm_gem_object *obj)
__drm_gem_object_put(obj);
}
-void drm_gem_object_put_locked(struct drm_gem_object *obj);
-
int drm_gem_handle_create(struct drm_file *file_priv,
struct drm_gem_object *obj,
u32 *handlep);
@@ -400,6 +553,12 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj);
void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
bool dirty, bool accessed);
+void drm_gem_lock(struct drm_gem_object *obj);
+void drm_gem_unlock(struct drm_gem_object *obj);
+
+int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map);
+void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
+
int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
int count, struct drm_gem_object ***objs_out);
struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle);
@@ -409,12 +568,95 @@ int drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
struct ww_acquire_ctx *acquire_ctx);
void drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
struct ww_acquire_ctx *acquire_ctx);
-int drm_gem_fence_array_add(struct xarray *fence_array,
- struct dma_fence *fence);
-int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
- struct drm_gem_object *obj,
- bool write);
int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
u32 handle, u64 *offset);
+void drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock);
+void drm_gem_lru_remove(struct drm_gem_object *obj);
+void drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj);
+void drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj);
+unsigned long
+drm_gem_lru_scan(struct drm_gem_lru *lru,
+ unsigned int nr_to_scan,
+ unsigned long *remaining,
+ bool (*shrink)(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket),
+ struct ww_acquire_ctx *ticket);
+
+int drm_gem_evict_locked(struct drm_gem_object *obj);
+
+/**
+ * drm_gem_object_is_shared_for_memory_stats - helper for shared memory stats
+ *
+ * This helper should only be used for fdinfo shared memory stats to determine
+ * if a GEM object is shared.
+ *
+ * @obj: obj in question
+ */
+static inline bool drm_gem_object_is_shared_for_memory_stats(struct drm_gem_object *obj)
+{
+ return (obj->handle_count > 1) || obj->dma_buf;
+}
+
+/**
+ * drm_gem_is_imported() - Tests if GEM object's buffer has been imported
+ * @obj: the GEM object
+ *
+ * Returns:
+ * True if the GEM object's buffer has been imported, false otherwise
+ */
+static inline bool drm_gem_is_imported(const struct drm_gem_object *obj)
+{
+ return !!obj->import_attach;
+}
+
+#ifdef CONFIG_LOCKDEP
+#define drm_gem_gpuva_assert_lock_held(gpuvm, obj) \
+ lockdep_assert(drm_gpuvm_immediate_mode(gpuvm) ? \
+ lockdep_is_held(&(obj)->gpuva.lock) : \
+ dma_resv_held((obj)->resv))
+#else
+#define drm_gem_gpuva_assert_lock_held(gpuvm, obj) do {} while (0)
+#endif
+
+/**
+ * drm_gem_gpuva_init() - initialize the gpuva list of a GEM object
+ * @obj: the &drm_gem_object
+ *
+ * This initializes the &drm_gem_object's &drm_gpuvm_bo list.
+ *
+ * Calling this function is only necessary for drivers intending to support the
+ * &drm_driver_feature DRIVER_GEM_GPUVA.
+ *
+ * See also drm_gem_gpuva_set_lock().
+ */
+static inline void drm_gem_gpuva_init(struct drm_gem_object *obj)
+{
+ INIT_LIST_HEAD(&obj->gpuva.list);
+}
+
+/**
+ * drm_gem_for_each_gpuvm_bo() - iterator to walk over a list of &drm_gpuvm_bo
+ * @entry__: &drm_gpuvm_bo structure to assign to in each iteration step
+ * @obj__: the &drm_gem_object the &drm_gpuvm_bo to walk are associated with
+ *
+ * This iterator walks over all &drm_gpuvm_bo structures associated with the
+ * &drm_gem_object.
+ */
+#define drm_gem_for_each_gpuvm_bo(entry__, obj__) \
+ list_for_each_entry(entry__, &(obj__)->gpuva.list, list.entry.gem)
+
+/**
+ * drm_gem_for_each_gpuvm_bo_safe() - iterator to safely walk over a list of
+ * &drm_gpuvm_bo
+ * @entry__: &drm_gpuvm_bostructure to assign to in each iteration step
+ * @next__: &next &drm_gpuvm_bo to store the next step
+ * @obj__: the &drm_gem_object the &drm_gpuvm_bo to walk are associated with
+ *
+ * This iterator walks over all &drm_gpuvm_bo structures associated with the
+ * &drm_gem_object. It is implemented with list_for_each_entry_safe(), hence
+ * it is save against removal of elements.
+ */
+#define drm_gem_for_each_gpuvm_bo_safe(entry__, next__, obj__) \
+ list_for_each_entry_safe(entry__, next__, &(obj__)->gpuva.list, list.entry.gem)
+
#endif /* __DRM_GEM_H__ */
diff --git a/include/drm/drm_gem_atomic_helper.h b/include/drm/drm_gem_atomic_helper.h
index cfc5adee3d13..3e01c619a25e 100644
--- a/include/drm/drm_gem_atomic_helper.h
+++ b/include/drm/drm_gem_atomic_helper.h
@@ -3,8 +3,10 @@
#ifndef __DRM_GEM_ATOMIC_HELPER_H__
#define __DRM_GEM_ATOMIC_HELPER_H__
-#include <linux/dma-buf-map.h>
+#include <linux/iosys-map.h>
+#include <drm/drm_format_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_plane.h>
struct drm_simple_display_pipe;
@@ -14,14 +16,30 @@ struct drm_simple_display_pipe;
*/
int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state);
-int drm_gem_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state);
/*
* Helpers for planes with shadow buffers
*/
/**
+ * DRM_SHADOW_PLANE_MAX_WIDTH - Maximum width of a plane's shadow buffer in pixels
+ *
+ * For drivers with shadow planes, the maximum width of the framebuffer is
+ * usually independent from hardware limitations. Drivers can initialize struct
+ * drm_mode_config.max_width from DRM_SHADOW_PLANE_MAX_WIDTH.
+ */
+#define DRM_SHADOW_PLANE_MAX_WIDTH (4096u)
+
+/**
+ * DRM_SHADOW_PLANE_MAX_HEIGHT - Maximum height of a plane's shadow buffer in scanlines
+ *
+ * For drivers with shadow planes, the maximum height of the framebuffer is
+ * usually independent from hardware limitations. Drivers can initialize struct
+ * drm_mode_config.max_height from DRM_SHADOW_PLANE_MAX_HEIGHT.
+ */
+#define DRM_SHADOW_PLANE_MAX_HEIGHT (4096u)
+
+/**
* struct drm_shadow_plane_state - plane state for planes with shadow buffers
*
* For planes that use a shadow buffer, struct drm_shadow_plane_state
@@ -32,6 +50,15 @@ struct drm_shadow_plane_state {
/** @base: plane state */
struct drm_plane_state base;
+ /**
+ * @fmtcnv_state: Format-conversion state
+ *
+ * Per-plane state for format conversion.
+ * Flags for copying shadow buffers into backend storage. Also holds
+ * temporary storage for format conversion.
+ */
+ struct drm_format_conv_state fmtcnv_state;
+
/* Transitional state - do not export or duplicate */
/**
@@ -40,7 +67,15 @@ struct drm_shadow_plane_state {
* The memory mappings stored in map should be established in the plane's
* prepare_fb callback and removed in the cleanup_fb callback.
*/
- struct dma_buf_map map[4];
+ struct iosys_map map[DRM_FORMAT_MAX_PLANES];
+
+ /**
+ * @data: Address of each framebuffer BO's data
+ *
+ * The address of the data stored in each mapping. This is different
+ * for framebuffers with non-zero offset fields.
+ */
+ struct iosys_map data[DRM_FORMAT_MAX_PLANES];
};
/**
@@ -53,6 +88,12 @@ to_drm_shadow_plane_state(struct drm_plane_state *state)
return container_of(state, struct drm_shadow_plane_state, base);
}
+void __drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane,
+ struct drm_shadow_plane_state *new_shadow_plane_state);
+void __drm_gem_destroy_shadow_plane_state(struct drm_shadow_plane_state *shadow_plane_state);
+void __drm_gem_reset_shadow_plane(struct drm_plane *plane,
+ struct drm_shadow_plane_state *shadow_plane_state);
+
void drm_gem_reset_shadow_plane(struct drm_plane *plane);
struct drm_plane_state *drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane);
void drm_gem_destroy_shadow_plane_state(struct drm_plane *plane,
@@ -70,8 +111,8 @@ void drm_gem_destroy_shadow_plane_state(struct drm_plane *plane,
.atomic_duplicate_state = drm_gem_duplicate_shadow_plane_state, \
.atomic_destroy_state = drm_gem_destroy_shadow_plane_state
-int drm_gem_prepare_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state);
-void drm_gem_cleanup_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state);
+int drm_gem_begin_shadow_fb_access(struct drm_plane *plane, struct drm_plane_state *plane_state);
+void drm_gem_end_shadow_fb_access(struct drm_plane *plane, struct drm_plane_state *plane_state);
/**
* DRM_GEM_SHADOW_PLANE_HELPER_FUNCS -
@@ -82,13 +123,13 @@ void drm_gem_cleanup_shadow_fb(struct drm_plane *plane, struct drm_plane_state *
* functions.
*/
#define DRM_GEM_SHADOW_PLANE_HELPER_FUNCS \
- .prepare_fb = drm_gem_prepare_shadow_fb, \
- .cleanup_fb = drm_gem_cleanup_shadow_fb
+ .begin_fb_access = drm_gem_begin_shadow_fb_access, \
+ .end_fb_access = drm_gem_end_shadow_fb_access
-int drm_gem_simple_kms_prepare_shadow_fb(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state);
-void drm_gem_simple_kms_cleanup_shadow_fb(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state);
+int drm_gem_simple_kms_begin_shadow_fb_access(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+void drm_gem_simple_kms_end_shadow_fb_access(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
void drm_gem_simple_kms_reset_shadow_plane(struct drm_simple_display_pipe *pipe);
struct drm_plane_state *
drm_gem_simple_kms_duplicate_shadow_plane_state(struct drm_simple_display_pipe *pipe);
@@ -104,8 +145,8 @@ void drm_gem_simple_kms_destroy_shadow_plane_state(struct drm_simple_display_pip
* functions.
*/
#define DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS \
- .prepare_fb = drm_gem_simple_kms_prepare_shadow_fb, \
- .cleanup_fb = drm_gem_simple_kms_cleanup_shadow_fb, \
+ .begin_fb_access = drm_gem_simple_kms_begin_shadow_fb_access, \
+ .end_fb_access = drm_gem_simple_kms_end_shadow_fb_access, \
.reset_plane = drm_gem_simple_kms_reset_shadow_plane, \
.duplicate_plane_state = drm_gem_simple_kms_duplicate_shadow_plane_state, \
.destroy_plane_state = drm_gem_simple_kms_destroy_shadow_plane_state
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
deleted file mode 100644
index 0a9711caa3e8..000000000000
--- a/include/drm/drm_gem_cma_helper.h
+++ /dev/null
@@ -1,185 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DRM_GEM_CMA_HELPER_H__
-#define __DRM_GEM_CMA_HELPER_H__
-
-#include <drm/drm_file.h>
-#include <drm/drm_ioctl.h>
-#include <drm/drm_gem.h>
-
-struct drm_mode_create_dumb;
-
-/**
- * struct drm_gem_cma_object - GEM object backed by CMA memory allocations
- * @base: base GEM object
- * @paddr: physical address of the backing memory
- * @sgt: scatter/gather table for imported PRIME buffers. The table can have
- * more than one entry but they are guaranteed to have contiguous
- * DMA addresses.
- * @vaddr: kernel virtual address of the backing memory
- */
-struct drm_gem_cma_object {
- struct drm_gem_object base;
- dma_addr_t paddr;
- struct sg_table *sgt;
-
- /* For objects with DMA memory allocated by GEM CMA */
- void *vaddr;
-};
-
-#define to_drm_gem_cma_obj(gem_obj) \
- container_of(gem_obj, struct drm_gem_cma_object, base)
-
-#ifndef CONFIG_MMU
-#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS \
- .get_unmapped_area = drm_gem_cma_get_unmapped_area,
-#else
-#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS
-#endif
-
-/**
- * DEFINE_DRM_GEM_CMA_FOPS() - macro to generate file operations for CMA drivers
- * @name: name for the generated structure
- *
- * This macro autogenerates a suitable &struct file_operations for CMA based
- * drivers, which can be assigned to &drm_driver.fops. Note that this structure
- * cannot be shared between drivers, because it contains a reference to the
- * current module using THIS_MODULE.
- *
- * Note that the declaration is already marked as static - if you need a
- * non-static version of this you're probably doing it wrong and will break the
- * THIS_MODULE reference by accident.
- */
-#define DEFINE_DRM_GEM_CMA_FOPS(name) \
- static const struct file_operations name = {\
- .owner = THIS_MODULE,\
- .open = drm_open,\
- .release = drm_release,\
- .unlocked_ioctl = drm_ioctl,\
- .compat_ioctl = drm_compat_ioctl,\
- .poll = drm_poll,\
- .read = drm_read,\
- .llseek = noop_llseek,\
- .mmap = drm_gem_mmap,\
- DRM_GEM_CMA_UNMAPPED_AREA_FOPS \
- }
-
-/* free GEM object */
-void drm_gem_cma_free_object(struct drm_gem_object *gem_obj);
-
-/* create memory region for DRM framebuffer */
-int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
- struct drm_device *drm,
- struct drm_mode_create_dumb *args);
-
-/* create memory region for DRM framebuffer */
-int drm_gem_cma_dumb_create(struct drm_file *file_priv,
- struct drm_device *drm,
- struct drm_mode_create_dumb *args);
-
-/* allocate physical memory */
-struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
- size_t size);
-
-extern const struct vm_operations_struct drm_gem_cma_vm_ops;
-
-#ifndef CONFIG_MMU
-unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
- unsigned long addr,
- unsigned long len,
- unsigned long pgoff,
- unsigned long flags);
-#endif
-
-void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent,
- const struct drm_gem_object *obj);
-
-struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_object *obj);
-struct drm_gem_object *
-drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sgt);
-int drm_gem_cma_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
-int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
-
-/**
- * DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE - CMA GEM driver operations
- * @dumb_create_func: callback function for .dumb_create
- *
- * This macro provides a shortcut for setting the default GEM operations in the
- * &drm_driver structure.
- *
- * This macro is a variant of DRM_GEM_CMA_DRIVER_OPS for drivers that
- * override the default implementation of &struct rm_driver.dumb_create. Use
- * DRM_GEM_CMA_DRIVER_OPS if possible. Drivers that require a virtual address
- * on imported buffers should use
- * DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead.
- */
-#define DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(dumb_create_func) \
- .dumb_create = (dumb_create_func), \
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, \
- .gem_prime_mmap = drm_gem_prime_mmap
-
-/**
- * DRM_GEM_CMA_DRIVER_OPS - CMA GEM driver operations
- *
- * This macro provides a shortcut for setting the default GEM operations in the
- * &drm_driver structure.
- *
- * Drivers that come with their own implementation of
- * &struct drm_driver.dumb_create should use
- * DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE() instead. Use
- * DRM_GEM_CMA_DRIVER_OPS if possible. Drivers that require a virtual address
- * on imported buffers should use DRM_GEM_CMA_DRIVER_OPS_VMAP instead.
- */
-#define DRM_GEM_CMA_DRIVER_OPS \
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(drm_gem_cma_dumb_create)
-
-/**
- * DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE - CMA GEM driver operations
- * ensuring a virtual address
- * on the buffer
- * @dumb_create_func: callback function for .dumb_create
- *
- * This macro provides a shortcut for setting the default GEM operations in the
- * &drm_driver structure for drivers that need the virtual address also on
- * imported buffers.
- *
- * This macro is a variant of DRM_GEM_CMA_DRIVER_OPS_VMAP for drivers that
- * override the default implementation of &struct drm_driver.dumb_create. Use
- * DRM_GEM_CMA_DRIVER_OPS_VMAP if possible. Drivers that do not require a
- * virtual address on imported buffers should use
- * DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE() instead.
- */
-#define DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(dumb_create_func) \
- .dumb_create = dumb_create_func, \
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table_vmap, \
- .gem_prime_mmap = drm_gem_prime_mmap
-
-/**
- * DRM_GEM_CMA_DRIVER_OPS_VMAP - CMA GEM driver operations ensuring a virtual
- * address on the buffer
- *
- * This macro provides a shortcut for setting the default GEM operations in the
- * &drm_driver structure for drivers that need the virtual address also on
- * imported buffers.
- *
- * Drivers that come with their own implementation of
- * &struct drm_driver.dumb_create should use
- * DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead. Use
- * DRM_GEM_CMA_DRIVER_OPS_VMAP if possible. Drivers that do not require a
- * virtual address on imported buffers should use DRM_GEM_CMA_DRIVER_OPS
- * instead.
- */
-#define DRM_GEM_CMA_DRIVER_OPS_VMAP \
- DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(drm_gem_cma_dumb_create)
-
-struct drm_gem_object *
-drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *drm,
- struct dma_buf_attachment *attach,
- struct sg_table *sgt);
-
-#endif /* __DRM_GEM_CMA_HELPER_H__ */
diff --git a/include/drm/drm_gem_dma_helper.h b/include/drm/drm_gem_dma_helper.h
new file mode 100644
index 000000000000..f2678e7ecb98
--- /dev/null
+++ b/include/drm/drm_gem_dma_helper.h
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DRM_GEM_DMA_HELPER_H__
+#define __DRM_GEM_DMA_HELPER_H__
+
+#include <drm/drm_file.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_gem.h>
+
+struct drm_mode_create_dumb;
+
+/**
+ * struct drm_gem_dma_object - GEM object backed by DMA memory allocations
+ * @base: base GEM object
+ * @dma_addr: DMA address of the backing memory
+ * @sgt: scatter/gather table for imported PRIME buffers. The table can have
+ * more than one entry but they are guaranteed to have contiguous
+ * DMA addresses.
+ * @vaddr: kernel virtual address of the backing memory
+ * @map_noncoherent: if true, the GEM object is backed by non-coherent memory
+ */
+struct drm_gem_dma_object {
+ struct drm_gem_object base;
+ dma_addr_t dma_addr;
+ struct sg_table *sgt;
+
+ /* For objects with DMA memory allocated by GEM DMA */
+ void *vaddr;
+
+ bool map_noncoherent;
+};
+
+#define to_drm_gem_dma_obj(gem_obj) \
+ container_of(gem_obj, struct drm_gem_dma_object, base)
+
+struct drm_gem_dma_object *drm_gem_dma_create(struct drm_device *drm,
+ size_t size);
+void drm_gem_dma_free(struct drm_gem_dma_object *dma_obj);
+void drm_gem_dma_print_info(const struct drm_gem_dma_object *dma_obj,
+ struct drm_printer *p, unsigned int indent);
+struct sg_table *drm_gem_dma_get_sg_table(struct drm_gem_dma_object *dma_obj);
+int drm_gem_dma_vmap(struct drm_gem_dma_object *dma_obj,
+ struct iosys_map *map);
+int drm_gem_dma_mmap(struct drm_gem_dma_object *dma_obj, struct vm_area_struct *vma);
+
+extern const struct vm_operations_struct drm_gem_dma_vm_ops;
+
+/*
+ * GEM object functions
+ */
+
+/**
+ * drm_gem_dma_object_free - GEM object function for drm_gem_dma_free()
+ * @obj: GEM object to free
+ *
+ * This function wraps drm_gem_dma_free_object(). Drivers that employ the DMA helpers
+ * should use it as their &drm_gem_object_funcs.free handler.
+ */
+static inline void drm_gem_dma_object_free(struct drm_gem_object *obj)
+{
+ struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
+
+ drm_gem_dma_free(dma_obj);
+}
+
+/**
+ * drm_gem_dma_object_print_info() - Print &drm_gem_dma_object info for debugfs
+ * @p: DRM printer
+ * @indent: Tab indentation level
+ * @obj: GEM object
+ *
+ * This function wraps drm_gem_dma_print_info(). Drivers that employ the DMA helpers
+ * should use this function as their &drm_gem_object_funcs.print_info handler.
+ */
+static inline void drm_gem_dma_object_print_info(struct drm_printer *p, unsigned int indent,
+ const struct drm_gem_object *obj)
+{
+ const struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
+
+ drm_gem_dma_print_info(dma_obj, p, indent);
+}
+
+/**
+ * drm_gem_dma_object_get_sg_table - GEM object function for drm_gem_dma_get_sg_table()
+ * @obj: GEM object
+ *
+ * This function wraps drm_gem_dma_get_sg_table(). Drivers that employ the DMA helpers should
+ * use it as their &drm_gem_object_funcs.get_sg_table handler.
+ *
+ * Returns:
+ * A pointer to the scatter/gather table of pinned pages or NULL on failure.
+ */
+static inline struct sg_table *drm_gem_dma_object_get_sg_table(struct drm_gem_object *obj)
+{
+ struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
+
+ return drm_gem_dma_get_sg_table(dma_obj);
+}
+
+/*
+ * drm_gem_dma_object_vmap - GEM object function for drm_gem_dma_vmap()
+ * @obj: GEM object
+ * @map: Returns the kernel virtual address of the DMA GEM object's backing store.
+ *
+ * This function wraps drm_gem_dma_vmap(). Drivers that employ the DMA helpers should
+ * use it as their &drm_gem_object_funcs.vmap handler.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+static inline int drm_gem_dma_object_vmap(struct drm_gem_object *obj,
+ struct iosys_map *map)
+{
+ struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
+
+ return drm_gem_dma_vmap(dma_obj, map);
+}
+
+/**
+ * drm_gem_dma_object_mmap - GEM object function for drm_gem_dma_mmap()
+ * @obj: GEM object
+ * @vma: VMA for the area to be mapped
+ *
+ * This function wraps drm_gem_dma_mmap(). Drivers that employ the dma helpers should
+ * use it as their &drm_gem_object_funcs.mmap handler.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+static inline int drm_gem_dma_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(obj);
+
+ return drm_gem_dma_mmap(dma_obj, vma);
+}
+
+/*
+ * Driver ops
+ */
+
+/* create memory region for DRM framebuffer */
+int drm_gem_dma_dumb_create_internal(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args);
+
+/* create memory region for DRM framebuffer */
+int drm_gem_dma_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args);
+
+struct drm_gem_object *
+drm_gem_dma_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+
+/**
+ * DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE - DMA GEM driver operations
+ * @dumb_create_func: callback function for .dumb_create
+ *
+ * This macro provides a shortcut for setting the default GEM operations in the
+ * &drm_driver structure.
+ *
+ * This macro is a variant of DRM_GEM_DMA_DRIVER_OPS for drivers that
+ * override the default implementation of &struct rm_driver.dumb_create. Use
+ * DRM_GEM_DMA_DRIVER_OPS if possible. Drivers that require a virtual address
+ * on imported buffers should use
+ * DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead.
+ */
+#define DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(dumb_create_func) \
+ .dumb_create = (dumb_create_func), \
+ .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table
+
+/**
+ * DRM_GEM_DMA_DRIVER_OPS - DMA GEM driver operations
+ *
+ * This macro provides a shortcut for setting the default GEM operations in the
+ * &drm_driver structure.
+ *
+ * Drivers that come with their own implementation of
+ * &struct drm_driver.dumb_create should use
+ * DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE() instead. Use
+ * DRM_GEM_DMA_DRIVER_OPS if possible. Drivers that require a virtual address
+ * on imported buffers should use DRM_GEM_DMA_DRIVER_OPS_VMAP instead.
+ */
+#define DRM_GEM_DMA_DRIVER_OPS \
+ DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(drm_gem_dma_dumb_create)
+
+/**
+ * DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE - DMA GEM driver operations
+ * ensuring a virtual address
+ * on the buffer
+ * @dumb_create_func: callback function for .dumb_create
+ *
+ * This macro provides a shortcut for setting the default GEM operations in the
+ * &drm_driver structure for drivers that need the virtual address also on
+ * imported buffers.
+ *
+ * This macro is a variant of DRM_GEM_DMA_DRIVER_OPS_VMAP for drivers that
+ * override the default implementation of &struct drm_driver.dumb_create. Use
+ * DRM_GEM_DMA_DRIVER_OPS_VMAP if possible. Drivers that do not require a
+ * virtual address on imported buffers should use
+ * DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE() instead.
+ */
+#define DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(dumb_create_func) \
+ .dumb_create = (dumb_create_func), \
+ .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table_vmap
+
+/**
+ * DRM_GEM_DMA_DRIVER_OPS_VMAP - DMA GEM driver operations ensuring a virtual
+ * address on the buffer
+ *
+ * This macro provides a shortcut for setting the default GEM operations in the
+ * &drm_driver structure for drivers that need the virtual address also on
+ * imported buffers.
+ *
+ * Drivers that come with their own implementation of
+ * &struct drm_driver.dumb_create should use
+ * DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead. Use
+ * DRM_GEM_DMA_DRIVER_OPS_VMAP if possible. Drivers that do not require a
+ * virtual address on imported buffers should use DRM_GEM_DMA_DRIVER_OPS
+ * instead.
+ */
+#define DRM_GEM_DMA_DRIVER_OPS_VMAP \
+ DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(drm_gem_dma_dumb_create)
+
+struct drm_gem_object *
+drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *drm,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+
+/*
+ * File ops
+ */
+
+#ifndef CONFIG_MMU
+unsigned long drm_gem_dma_get_unmapped_area(struct file *filp,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags);
+#define DRM_GEM_DMA_UNMAPPED_AREA_FOPS \
+ .get_unmapped_area = drm_gem_dma_get_unmapped_area,
+#else
+#define DRM_GEM_DMA_UNMAPPED_AREA_FOPS
+#endif
+
+/**
+ * DEFINE_DRM_GEM_DMA_FOPS() - macro to generate file operations for DMA drivers
+ * @name: name for the generated structure
+ *
+ * This macro autogenerates a suitable &struct file_operations for DMA based
+ * drivers, which can be assigned to &drm_driver.fops. Note that this structure
+ * cannot be shared between drivers, because it contains a reference to the
+ * current module using THIS_MODULE.
+ *
+ * Note that the declaration is already marked as static - if you need a
+ * non-static version of this you're probably doing it wrong and will break the
+ * THIS_MODULE reference by accident.
+ */
+#define DEFINE_DRM_GEM_DMA_FOPS(name) \
+ static const struct file_operations name = {\
+ .owner = THIS_MODULE,\
+ .open = drm_open,\
+ .release = drm_release,\
+ .unlocked_ioctl = drm_ioctl,\
+ .compat_ioctl = drm_compat_ioctl,\
+ .poll = drm_poll,\
+ .read = drm_read,\
+ .llseek = noop_llseek,\
+ .mmap = drm_gem_mmap,\
+ .fop_flags = FOP_UNSIGNED_OFFSET, \
+ DRM_GEM_DMA_UNMAPPED_AREA_FOPS \
+ }
+
+#endif /* __DRM_GEM_DMA_HELPER_H__ */
diff --git a/include/drm/drm_gem_framebuffer_helper.h b/include/drm/drm_gem_framebuffer_helper.h
index 6bdffc7aa124..24f1fd40d553 100644
--- a/include/drm/drm_gem_framebuffer_helper.h
+++ b/include/drm/drm_gem_framebuffer_helper.h
@@ -1,10 +1,14 @@
#ifndef __DRM_GEM_FB_HELPER_H__
#define __DRM_GEM_FB_HELPER_H__
+#include <linux/dma-buf.h>
+#include <linux/iosys-map.h>
+
struct drm_afbc_framebuffer;
struct drm_device;
struct drm_fb_helper_surface_size;
struct drm_file;
+struct drm_format_info;
struct drm_framebuffer;
struct drm_framebuffer_funcs;
struct drm_gem_object;
@@ -21,23 +25,34 @@ int drm_gem_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file,
int drm_gem_fb_init_with_funcs(struct drm_device *dev,
struct drm_framebuffer *fb,
struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
const struct drm_framebuffer_funcs *funcs);
struct drm_framebuffer *
drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
const struct drm_framebuffer_funcs *funcs);
struct drm_framebuffer *
drm_gem_fb_create(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_framebuffer *
drm_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd);
+int drm_gem_fb_vmap(struct drm_framebuffer *fb, struct iosys_map *map,
+ struct iosys_map *data);
+void drm_gem_fb_vunmap(struct drm_framebuffer *fb, struct iosys_map *map);
+int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir);
+void drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir);
+
#define drm_is_afbc(modifier) \
(((modifier) & AFBC_VENDOR_AND_TYPE_MASK) == DRM_FORMAT_MOD_ARM_AFBC(0))
int drm_gem_fb_afbc_init(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_afbc_framebuffer *afbc_fb);
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index 434328d8a0d9..589f7bfe7506 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -27,11 +27,6 @@ struct drm_gem_shmem_object {
struct drm_gem_object base;
/**
- * @pages_lock: Protects the page table and use count
- */
- struct mutex pages_lock;
-
- /**
* @pages: Page table
*/
struct page **pages;
@@ -42,7 +37,18 @@ struct drm_gem_shmem_object {
* Reference count on the pages table.
* The pages are put when the count reaches zero.
*/
- unsigned int pages_use_count;
+ refcount_t pages_use_count;
+
+ /**
+ * @pages_pin_count:
+ *
+ * Reference count on the pinned pages table.
+ *
+ * Pages are hard-pinned and reside in memory if count
+ * greater than zero. Otherwise, when count is zero, the pages are
+ * allowed to be evicted and purged by memory shrinker.
+ */
+ refcount_t pages_pin_count;
/**
* @madv: State for madvise
@@ -61,30 +67,11 @@ struct drm_gem_shmem_object {
struct list_head madv_list;
/**
- * @pages_mark_dirty_on_put:
- *
- * Mark pages as dirty when they are put.
- */
- unsigned int pages_mark_dirty_on_put : 1;
-
- /**
- * @pages_mark_accessed_on_put:
- *
- * Mark pages as accessed when they are put.
- */
- unsigned int pages_mark_accessed_on_put : 1;
-
- /**
* @sgt: Scatter/gather table for imported PRIME buffers
*/
struct sg_table *sgt;
/**
- * @vmap_lock: Protects the vmap address and use count
- */
- struct mutex vmap_lock;
-
- /**
* @vaddr: Kernel virtual address of the backing memory
*/
void *vaddr;
@@ -95,71 +82,225 @@ struct drm_gem_shmem_object {
* Reference count on the virtual address.
* The address are un-mapped when the count reaches zero.
*/
- unsigned int vmap_use_count;
+ refcount_t vmap_use_count;
+
+ /**
+ * @pages_mark_dirty_on_put:
+ *
+ * Mark pages as dirty when they are put.
+ */
+ bool pages_mark_dirty_on_put : 1;
+
+ /**
+ * @pages_mark_accessed_on_put:
+ *
+ * Mark pages as accessed when they are put.
+ */
+ bool pages_mark_accessed_on_put : 1;
/**
* @map_wc: map object write-combined (instead of using shmem defaults).
*/
- bool map_wc;
+ bool map_wc : 1;
};
#define to_drm_gem_shmem_obj(obj) \
container_of(obj, struct drm_gem_shmem_object, base)
+int drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, size_t size);
struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
-void drm_gem_shmem_free_object(struct drm_gem_object *obj);
+struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *dev,
+ size_t size,
+ struct vfsmount *gemfs);
+void drm_gem_shmem_release(struct drm_gem_shmem_object *shmem);
+void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem);
+
+void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem);
+int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem);
+void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem);
+int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
+ struct iosys_map *map);
+void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
+ struct iosys_map *map);
+int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma);
-int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem);
-void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem);
-int drm_gem_shmem_pin(struct drm_gem_object *obj);
-void drm_gem_shmem_unpin(struct drm_gem_object *obj);
-int drm_gem_shmem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
-void drm_gem_shmem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map);
+int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem);
+void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem);
-int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv);
+int drm_gem_shmem_madvise_locked(struct drm_gem_shmem_object *shmem, int madv);
static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem)
{
return (shmem->madv > 0) &&
- !shmem->vmap_use_count && shmem->sgt &&
- !shmem->base.dma_buf && !shmem->base.import_attach;
+ !refcount_read(&shmem->pages_pin_count) && shmem->sgt &&
+ !shmem->base.dma_buf && !drm_gem_is_imported(&shmem->base);
}
-void drm_gem_shmem_purge_locked(struct drm_gem_object *obj);
-bool drm_gem_shmem_purge(struct drm_gem_object *obj);
+void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem);
-struct drm_gem_shmem_object *
-drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
- struct drm_device *dev, size_t size,
- uint32_t *handle);
+struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem);
+struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem);
-int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
- struct drm_mode_create_dumb *args);
+void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
+ struct drm_printer *p, unsigned int indent);
-int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+extern const struct vm_operations_struct drm_gem_shmem_vm_ops;
-void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
- const struct drm_gem_object *obj);
+/*
+ * GEM object functions
+ */
+
+/**
+ * drm_gem_shmem_object_free - GEM object function for drm_gem_shmem_free()
+ * @obj: GEM object to free
+ *
+ * This function wraps drm_gem_shmem_free(). Drivers that employ the shmem helpers
+ * should use it as their &drm_gem_object_funcs.free handler.
+ */
+static inline void drm_gem_shmem_object_free(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ drm_gem_shmem_free(shmem);
+}
+
+/**
+ * drm_gem_shmem_object_print_info() - Print &drm_gem_shmem_object info for debugfs
+ * @p: DRM printer
+ * @indent: Tab indentation level
+ * @obj: GEM object
+ *
+ * This function wraps drm_gem_shmem_print_info(). Drivers that employ the shmem helpers should
+ * use this function as their &drm_gem_object_funcs.print_info handler.
+ */
+static inline void drm_gem_shmem_object_print_info(struct drm_printer *p, unsigned int indent,
+ const struct drm_gem_object *obj)
+{
+ const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ drm_gem_shmem_print_info(shmem, p, indent);
+}
+
+/**
+ * drm_gem_shmem_object_pin - GEM object function for drm_gem_shmem_pin()
+ * @obj: GEM object
+ *
+ * This function wraps drm_gem_shmem_pin(). Drivers that employ the shmem helpers should
+ * use it as their &drm_gem_object_funcs.pin handler.
+ */
+static inline int drm_gem_shmem_object_pin(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ return drm_gem_shmem_pin_locked(shmem);
+}
+
+/**
+ * drm_gem_shmem_object_unpin - GEM object function for drm_gem_shmem_unpin()
+ * @obj: GEM object
+ *
+ * This function wraps drm_gem_shmem_unpin(). Drivers that employ the shmem helpers should
+ * use it as their &drm_gem_object_funcs.unpin handler.
+ */
+static inline void drm_gem_shmem_object_unpin(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ drm_gem_shmem_unpin_locked(shmem);
+}
+
+/**
+ * drm_gem_shmem_object_get_sg_table - GEM object function for drm_gem_shmem_get_sg_table()
+ * @obj: GEM object
+ *
+ * This function wraps drm_gem_shmem_get_sg_table(). Drivers that employ the shmem helpers should
+ * use it as their &drm_gem_object_funcs.get_sg_table handler.
+ *
+ * Returns:
+ * A pointer to the scatter/gather table of pinned pages or error pointer on failure.
+ */
+static inline struct sg_table *drm_gem_shmem_object_get_sg_table(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ return drm_gem_shmem_get_sg_table(shmem);
+}
+
+/*
+ * drm_gem_shmem_object_vmap - GEM object function for drm_gem_shmem_vmap_locked()
+ * @obj: GEM object
+ * @map: Returns the kernel virtual address of the SHMEM GEM object's backing store.
+ *
+ * This function wraps drm_gem_shmem_vmap_locked(). Drivers that employ the shmem
+ * helpers should use it as their &drm_gem_object_funcs.vmap handler.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj,
+ struct iosys_map *map)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ return drm_gem_shmem_vmap_locked(shmem, map);
+}
+
+/*
+ * drm_gem_shmem_object_vunmap - GEM object function for drm_gem_shmem_vunmap()
+ * @obj: GEM object
+ * @map: Kernel virtual address where the SHMEM GEM object was mapped
+ *
+ * This function wraps drm_gem_shmem_vunmap_locked(). Drivers that employ the shmem
+ * helpers should use it as their &drm_gem_object_funcs.vunmap handler.
+ */
+static inline void drm_gem_shmem_object_vunmap(struct drm_gem_object *obj,
+ struct iosys_map *map)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ drm_gem_shmem_vunmap_locked(shmem, map);
+}
+
+/**
+ * drm_gem_shmem_object_mmap - GEM object function for drm_gem_shmem_mmap()
+ * @obj: GEM object
+ * @vma: VMA for the area to be mapped
+ *
+ * This function wraps drm_gem_shmem_mmap(). Drivers that employ the shmem helpers should
+ * use it as their &drm_gem_object_funcs.mmap handler.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+static inline int drm_gem_shmem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ return drm_gem_shmem_mmap(shmem, vma);
+}
+
+/*
+ * Driver ops
+ */
-struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *
drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
-
-struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj);
+int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+struct drm_gem_object *drm_gem_shmem_prime_import_no_map(struct drm_device *dev,
+ struct dma_buf *buf);
/**
* DRM_GEM_SHMEM_DRIVER_OPS - Default shmem GEM operations
*
- * This macro provides a shortcut for setting the shmem GEM operations in
- * the &drm_driver structure.
+ * This macro provides a shortcut for setting the shmem GEM operations
+ * in the &drm_driver structure. Drivers that do not require an s/g table
+ * for imported buffers should use this.
*/
#define DRM_GEM_SHMEM_DRIVER_OPS \
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
- .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, \
- .gem_prime_mmap = drm_gem_prime_mmap, \
- .dumb_create = drm_gem_shmem_dumb_create
+ .gem_prime_import = drm_gem_shmem_prime_import_no_map, \
+ .dumb_create = drm_gem_shmem_dumb_create
#endif /* __DRM_GEM_SHMEM_HELPER_H__ */
diff --git a/include/drm/drm_gem_ttm_helper.h b/include/drm/drm_gem_ttm_helper.h
index 7c6d874910b8..7b53d673ae7e 100644
--- a/include/drm/drm_gem_ttm_helper.h
+++ b/include/drm/drm_gem_ttm_helper.h
@@ -3,14 +3,13 @@
#ifndef DRM_GEM_TTM_HELPER_H
#define DRM_GEM_TTM_HELPER_H
-#include <linux/kernel.h>
+#include <linux/container_of.h>
-#include <drm/drm_gem.h>
#include <drm/drm_device.h>
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/drm_gem.h>
+#include <drm/ttm/ttm_bo.h>
-struct dma_buf_map;
+struct iosys_map;
#define drm_gem_ttm_of_gem(gem_obj) \
container_of(gem_obj, struct ttm_buffer_object, base)
@@ -18,10 +17,13 @@ struct dma_buf_map;
void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_gem_object *gem);
int drm_gem_ttm_vmap(struct drm_gem_object *gem,
- struct dma_buf_map *map);
+ struct iosys_map *map);
void drm_gem_ttm_vunmap(struct drm_gem_object *gem,
- struct dma_buf_map *map);
+ struct iosys_map *map);
int drm_gem_ttm_mmap(struct drm_gem_object *gem,
struct vm_area_struct *vma);
+int drm_gem_ttm_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
+
#endif
diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h
index 288055d397d9..2dd42bed679d 100644
--- a/include/drm/drm_gem_vram_helper.h
+++ b/include/drm/drm_gem_vram_helper.h
@@ -5,18 +5,18 @@
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
+#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_modes.h>
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_bo.h>
+#include <drm/ttm/ttm_placement.h>
-#include <linux/dma-buf-map.h>
-#include <linux/kernel.h> /* for container_of() */
+#include <linux/container_of.h>
+#include <linux/iosys-map.h>
struct drm_mode_create_dumb;
struct drm_plane;
struct drm_plane_state;
-struct drm_simple_display_pipe;
struct filp;
struct vm_area_struct;
@@ -32,8 +32,8 @@ struct vm_area_struct;
* struct drm_gem_vram_object - GEM object backed by VRAM
* @bo: TTM buffer object
* @map: Mapping information for @bo
- * @placement: TTM placement information. Supported placements are \
- %TTM_PL_VRAM and %TTM_PL_SYSTEM
+ * @placement: TTM placement information. Supported placements are %TTM_PL_VRAM
+ * and %TTM_PL_SYSTEM
* @placements: TTM placement information.
*
* The type struct drm_gem_vram_object represents a GEM object that is
@@ -50,7 +50,7 @@ struct vm_area_struct;
*/
struct drm_gem_vram_object {
struct ttm_buffer_object bo;
- struct dma_buf_map map;
+ struct iosys_map map;
/**
* @vmap_use_count:
@@ -93,12 +93,10 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
size_t size,
unsigned long pg_align);
void drm_gem_vram_put(struct drm_gem_vram_object *gbo);
-u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo);
s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo);
-int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag);
-int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo);
-int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct dma_buf_map *map);
-void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, struct dma_buf_map *map);
+int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct iosys_map *map);
+void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo,
+ struct iosys_map *map);
int drm_gem_vram_fill_create_dumb(struct drm_file *file,
struct drm_device *dev,
@@ -113,9 +111,6 @@ int drm_gem_vram_fill_create_dumb(struct drm_file *file,
int drm_gem_vram_driver_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle, uint64_t *offset);
/*
* Helpers for struct drm_plane_helper_funcs
@@ -127,30 +122,29 @@ void
drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state);
-/*
- * Helpers for struct drm_simple_display_pipe_funcs
+/**
+ * DRM_GEM_VRAM_PLANE_HELPER_FUNCS - Initializes struct drm_plane_helper_funcs
+ * for VRAM handling
+ *
+ * Drivers may use GEM BOs as VRAM helpers for the framebuffer memory. This
+ * macro initializes struct drm_plane_helper_funcs to use the respective helper
+ * functions.
*/
-
-int drm_gem_vram_simple_display_pipe_prepare_fb(
- struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *new_state);
-
-void drm_gem_vram_simple_display_pipe_cleanup_fb(
- struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *old_state);
+#define DRM_GEM_VRAM_PLANE_HELPER_FUNCS \
+ .prepare_fb = drm_gem_vram_plane_helper_prepare_fb, \
+ .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb
/**
- * define DRM_GEM_VRAM_DRIVER - default callback functions for \
- &struct drm_driver
+ * define DRM_GEM_VRAM_DRIVER - default callback functions for
+ * &struct drm_driver
*
* Drivers that use VRAM MM and GEM VRAM can use this macro to initialize
* &struct drm_driver with default functions.
*/
#define DRM_GEM_VRAM_DRIVER \
- .debugfs_init = drm_vram_mm_debugfs_init, \
- .dumb_create = drm_gem_vram_driver_dumb_create, \
- .dumb_map_offset = drm_gem_vram_driver_dumb_mmap_offset, \
- .gem_prime_mmap = drm_gem_prime_mmap
+ .debugfs_init = drm_vram_mm_debugfs_init, \
+ .dumb_create = drm_gem_vram_driver_dumb_create, \
+ .dumb_map_offset = drm_gem_ttm_dumb_map_offset
/*
* VRAM memory manager
@@ -161,7 +155,6 @@ void drm_gem_vram_simple_display_pipe_cleanup_fb(
* @vram_base: Base address of the managed video memory
* @vram_size: Size of the managed video memory in bytes
* @bdev: The TTM BO device.
- * @funcs: TTM BO functions
*
* The fields &struct drm_vram_mm.vram_base and
* &struct drm_vram_mm.vrm_size are managed by VRAM MM, but are
@@ -176,8 +169,8 @@ struct drm_vram_mm {
};
/**
- * drm_vram_mm_of_bdev() - \
- Returns the container of type &struct ttm_device for field bdev.
+ * drm_vram_mm_of_bdev() - Returns the container of type &struct ttm_device for
+ * field bdev.
* @bdev: the TTM BO device
*
* Returns:
@@ -195,10 +188,6 @@ void drm_vram_mm_debugfs_init(struct drm_minor *minor);
* Helpers for integration with struct drm_device
*/
-struct drm_vram_mm *drm_vram_helper_alloc_mm(
- struct drm_device *dev, uint64_t vram_base, size_t vram_size);
-void drm_vram_helper_release_mm(struct drm_device *dev);
-
int drmm_vram_helper_init(struct drm_device *dev, uint64_t vram_base,
size_t vram_size);
diff --git a/include/drm/drm_gpusvm.h b/include/drm/drm_gpusvm.h
new file mode 100644
index 000000000000..632e100e6efb
--- /dev/null
+++ b/include/drm/drm_gpusvm.h
@@ -0,0 +1,542 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef __DRM_GPUSVM_H__
+#define __DRM_GPUSVM_H__
+
+#include <linux/kref.h>
+#include <linux/interval_tree.h>
+#include <linux/mmu_notifier.h>
+
+struct dev_pagemap_ops;
+struct drm_device;
+struct drm_gpusvm;
+struct drm_gpusvm_notifier;
+struct drm_gpusvm_ops;
+struct drm_gpusvm_range;
+struct drm_pagemap;
+struct drm_pagemap_addr;
+
+/**
+ * struct drm_gpusvm_ops - Operations structure for GPU SVM
+ *
+ * This structure defines the operations for GPU Shared Virtual Memory (SVM).
+ * These operations are provided by the GPU driver to manage SVM ranges and
+ * notifiers.
+ */
+struct drm_gpusvm_ops {
+ /**
+ * @notifier_alloc: Allocate a GPU SVM notifier (optional)
+ *
+ * Allocate a GPU SVM notifier.
+ *
+ * Return: Pointer to the allocated GPU SVM notifier on success, NULL on failure.
+ */
+ struct drm_gpusvm_notifier *(*notifier_alloc)(void);
+
+ /**
+ * @notifier_free: Free a GPU SVM notifier (optional)
+ * @notifier: Pointer to the GPU SVM notifier to be freed
+ *
+ * Free a GPU SVM notifier.
+ */
+ void (*notifier_free)(struct drm_gpusvm_notifier *notifier);
+
+ /**
+ * @range_alloc: Allocate a GPU SVM range (optional)
+ * @gpusvm: Pointer to the GPU SVM
+ *
+ * Allocate a GPU SVM range.
+ *
+ * Return: Pointer to the allocated GPU SVM range on success, NULL on failure.
+ */
+ struct drm_gpusvm_range *(*range_alloc)(struct drm_gpusvm *gpusvm);
+
+ /**
+ * @range_free: Free a GPU SVM range (optional)
+ * @range: Pointer to the GPU SVM range to be freed
+ *
+ * Free a GPU SVM range.
+ */
+ void (*range_free)(struct drm_gpusvm_range *range);
+
+ /**
+ * @invalidate: Invalidate GPU SVM notifier (required)
+ * @gpusvm: Pointer to the GPU SVM
+ * @notifier: Pointer to the GPU SVM notifier
+ * @mmu_range: Pointer to the mmu_notifier_range structure
+ *
+ * Invalidate the GPU page tables. It can safely walk the notifier range
+ * RB tree/list in this function. Called while holding the notifier lock.
+ */
+ void (*invalidate)(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_notifier *notifier,
+ const struct mmu_notifier_range *mmu_range);
+};
+
+/**
+ * struct drm_gpusvm_notifier - Structure representing a GPU SVM notifier
+ *
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @notifier: MMU interval notifier
+ * @itree: Interval tree node for the notifier (inserted in GPU SVM)
+ * @entry: List entry to fast interval tree traversal
+ * @root: Cached root node of the RB tree containing ranges
+ * @range_list: List head containing of ranges in the same order they appear in
+ * interval tree. This is useful to keep iterating ranges while
+ * doing modifications to RB tree.
+ * @flags: Flags for notifier
+ * @flags.removed: Flag indicating whether the MMU interval notifier has been
+ * removed
+ *
+ * This structure represents a GPU SVM notifier.
+ */
+struct drm_gpusvm_notifier {
+ struct drm_gpusvm *gpusvm;
+ struct mmu_interval_notifier notifier;
+ struct interval_tree_node itree;
+ struct list_head entry;
+ struct rb_root_cached root;
+ struct list_head range_list;
+ struct {
+ u32 removed : 1;
+ } flags;
+};
+
+/**
+ * struct drm_gpusvm_pages_flags - Structure representing a GPU SVM pages flags
+ *
+ * @migrate_devmem: Flag indicating whether the pages can be migrated to device memory
+ * @unmapped: Flag indicating if the pages has been unmapped
+ * @partial_unmap: Flag indicating if the pages has been partially unmapped
+ * @has_devmem_pages: Flag indicating if the pages has devmem pages
+ * @has_dma_mapping: Flag indicating if the pages has a DMA mapping
+ * @__flags: Flags for pages in u16 form (used for READ_ONCE)
+ */
+struct drm_gpusvm_pages_flags {
+ union {
+ struct {
+ /* All flags below must be set upon creation */
+ u16 migrate_devmem : 1;
+ /* All flags below must be set / cleared under notifier lock */
+ u16 unmapped : 1;
+ u16 partial_unmap : 1;
+ u16 has_devmem_pages : 1;
+ u16 has_dma_mapping : 1;
+ };
+ u16 __flags;
+ };
+};
+
+/**
+ * struct drm_gpusvm_pages - Structure representing a GPU SVM mapped pages
+ *
+ * @dma_addr: Device address array
+ * @dpagemap: The struct drm_pagemap of the device pages we're dma-mapping.
+ * Note this is assuming only one drm_pagemap per range is allowed.
+ * @notifier_seq: Notifier sequence number of the range's pages
+ * @flags: Flags for range
+ * @flags.migrate_devmem: Flag indicating whether the range can be migrated to device memory
+ * @flags.unmapped: Flag indicating if the range has been unmapped
+ * @flags.partial_unmap: Flag indicating if the range has been partially unmapped
+ * @flags.has_devmem_pages: Flag indicating if the range has devmem pages
+ * @flags.has_dma_mapping: Flag indicating if the range has a DMA mapping
+ */
+struct drm_gpusvm_pages {
+ struct drm_pagemap_addr *dma_addr;
+ struct drm_pagemap *dpagemap;
+ unsigned long notifier_seq;
+ struct drm_gpusvm_pages_flags flags;
+};
+
+/**
+ * struct drm_gpusvm_range - Structure representing a GPU SVM range
+ *
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @notifier: Pointer to the GPU SVM notifier
+ * @refcount: Reference count for the range
+ * @itree: Interval tree node for the range (inserted in GPU SVM notifier)
+ * @entry: List entry to fast interval tree traversal
+ * @pages: The pages for this range.
+ *
+ * This structure represents a GPU SVM range used for tracking memory ranges
+ * mapped in a DRM device.
+ */
+struct drm_gpusvm_range {
+ struct drm_gpusvm *gpusvm;
+ struct drm_gpusvm_notifier *notifier;
+ struct kref refcount;
+ struct interval_tree_node itree;
+ struct list_head entry;
+ struct drm_gpusvm_pages pages;
+};
+
+/**
+ * struct drm_gpusvm - GPU SVM structure
+ *
+ * @name: Name of the GPU SVM
+ * @drm: Pointer to the DRM device structure
+ * @mm: Pointer to the mm_struct for the address space
+ * @mm_start: Start address of GPU SVM
+ * @mm_range: Range of the GPU SVM
+ * @notifier_size: Size of individual notifiers
+ * @ops: Pointer to the operations structure for GPU SVM
+ * @chunk_sizes: Pointer to the array of chunk sizes used in range allocation.
+ * Entries should be powers of 2 in descending order.
+ * @num_chunks: Number of chunks
+ * @notifier_lock: Read-write semaphore for protecting notifier operations
+ * @root: Cached root node of the Red-Black tree containing GPU SVM notifiers
+ * @notifier_list: list head containing of notifiers in the same order they
+ * appear in interval tree. This is useful to keep iterating
+ * notifiers while doing modifications to RB tree.
+ *
+ * This structure represents a GPU SVM (Shared Virtual Memory) used for tracking
+ * memory ranges mapped in a DRM (Direct Rendering Manager) device.
+ *
+ * No reference counting is provided, as this is expected to be embedded in the
+ * driver VM structure along with the struct drm_gpuvm, which handles reference
+ * counting.
+ */
+struct drm_gpusvm {
+ const char *name;
+ struct drm_device *drm;
+ struct mm_struct *mm;
+ unsigned long mm_start;
+ unsigned long mm_range;
+ unsigned long notifier_size;
+ const struct drm_gpusvm_ops *ops;
+ const unsigned long *chunk_sizes;
+ int num_chunks;
+ struct rw_semaphore notifier_lock;
+ struct rb_root_cached root;
+ struct list_head notifier_list;
+#ifdef CONFIG_LOCKDEP
+ /**
+ * @lock_dep_map: Annotates drm_gpusvm_range_find_or_insert and
+ * drm_gpusvm_range_remove with a driver provided lock.
+ */
+ struct lockdep_map *lock_dep_map;
+#endif
+};
+
+/**
+ * struct drm_gpusvm_ctx - DRM GPU SVM context
+ *
+ * @device_private_page_owner: The device-private page owner to use for
+ * this operation
+ * @check_pages_threshold: Check CPU pages for present if chunk is less than or
+ * equal to threshold. If not present, reduce chunk
+ * size.
+ * @timeslice_ms: The timeslice MS which in minimum time a piece of memory
+ * remains with either exclusive GPU or CPU access.
+ * @in_notifier: entering from a MMU notifier
+ * @read_only: operating on read-only memory
+ * @devmem_possible: possible to use device memory
+ * @devmem_only: use only device memory
+ * @allow_mixed: Allow mixed mappings in get pages. Mixing between system and
+ * single dpagemap is supported, mixing between multiple dpagemap
+ * is unsupported.
+ *
+ * Context that is DRM GPUSVM is operating in (i.e. user arguments).
+ */
+struct drm_gpusvm_ctx {
+ void *device_private_page_owner;
+ unsigned long check_pages_threshold;
+ unsigned long timeslice_ms;
+ unsigned int in_notifier :1;
+ unsigned int read_only :1;
+ unsigned int devmem_possible :1;
+ unsigned int devmem_only :1;
+ unsigned int allow_mixed :1;
+};
+
+int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
+ const char *name, struct drm_device *drm,
+ struct mm_struct *mm,
+ unsigned long mm_start, unsigned long mm_range,
+ unsigned long notifier_size,
+ const struct drm_gpusvm_ops *ops,
+ const unsigned long *chunk_sizes, int num_chunks);
+
+void drm_gpusvm_fini(struct drm_gpusvm *gpusvm);
+
+void drm_gpusvm_free(struct drm_gpusvm *gpusvm);
+
+unsigned long
+drm_gpusvm_find_vma_start(struct drm_gpusvm *gpusvm,
+ unsigned long start,
+ unsigned long end);
+
+struct drm_gpusvm_range *
+drm_gpusvm_range_find_or_insert(struct drm_gpusvm *gpusvm,
+ unsigned long fault_addr,
+ unsigned long gpuva_start,
+ unsigned long gpuva_end,
+ const struct drm_gpusvm_ctx *ctx);
+
+void drm_gpusvm_range_remove(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range);
+
+int drm_gpusvm_range_evict(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range);
+
+struct drm_gpusvm_range *
+drm_gpusvm_range_get(struct drm_gpusvm_range *range);
+
+void drm_gpusvm_range_put(struct drm_gpusvm_range *range);
+
+bool drm_gpusvm_range_pages_valid(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range);
+
+int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range,
+ const struct drm_gpusvm_ctx *ctx);
+
+void drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range,
+ const struct drm_gpusvm_ctx *ctx);
+
+bool drm_gpusvm_has_mapping(struct drm_gpusvm *gpusvm, unsigned long start,
+ unsigned long end);
+
+struct drm_gpusvm_notifier *
+drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm, unsigned long start,
+ unsigned long end);
+
+struct drm_gpusvm_range *
+drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start,
+ unsigned long end);
+
+void drm_gpusvm_range_set_unmapped(struct drm_gpusvm_range *range,
+ const struct mmu_notifier_range *mmu_range);
+
+int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ struct mm_struct *mm,
+ struct mmu_interval_notifier *notifier,
+ unsigned long pages_start, unsigned long pages_end,
+ const struct drm_gpusvm_ctx *ctx);
+
+void drm_gpusvm_unmap_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ unsigned long npages,
+ const struct drm_gpusvm_ctx *ctx);
+
+void drm_gpusvm_free_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ unsigned long npages);
+
+#ifdef CONFIG_LOCKDEP
+/**
+ * drm_gpusvm_driver_set_lock() - Set the lock protecting accesses to GPU SVM
+ * @gpusvm: Pointer to the GPU SVM structure.
+ * @lock: the lock used to protect the gpuva list. The locking primitive
+ * must contain a dep_map field.
+ *
+ * Call this to annotate drm_gpusvm_range_find_or_insert and
+ * drm_gpusvm_range_remove.
+ */
+#define drm_gpusvm_driver_set_lock(gpusvm, lock) \
+ do { \
+ if (!WARN((gpusvm)->lock_dep_map, \
+ "GPUSVM range lock should be set only once."))\
+ (gpusvm)->lock_dep_map = &(lock)->dep_map; \
+ } while (0)
+#else
+#define drm_gpusvm_driver_set_lock(gpusvm, lock) do {} while (0)
+#endif
+
+/**
+ * drm_gpusvm_notifier_lock() - Lock GPU SVM notifier
+ * @gpusvm__: Pointer to the GPU SVM structure.
+ *
+ * Abstract client usage GPU SVM notifier lock, take lock
+ */
+#define drm_gpusvm_notifier_lock(gpusvm__) \
+ down_read(&(gpusvm__)->notifier_lock)
+
+/**
+ * drm_gpusvm_notifier_unlock() - Unlock GPU SVM notifier
+ * @gpusvm__: Pointer to the GPU SVM structure.
+ *
+ * Abstract client usage GPU SVM notifier lock, drop lock
+ */
+#define drm_gpusvm_notifier_unlock(gpusvm__) \
+ up_read(&(gpusvm__)->notifier_lock)
+
+/**
+ * drm_gpusvm_range_start() - GPU SVM range start address
+ * @range: Pointer to the GPU SVM range
+ *
+ * Return: GPU SVM range start address
+ */
+static inline unsigned long
+drm_gpusvm_range_start(struct drm_gpusvm_range *range)
+{
+ return range->itree.start;
+}
+
+/**
+ * drm_gpusvm_range_end() - GPU SVM range end address
+ * @range: Pointer to the GPU SVM range
+ *
+ * Return: GPU SVM range end address
+ */
+static inline unsigned long
+drm_gpusvm_range_end(struct drm_gpusvm_range *range)
+{
+ return range->itree.last + 1;
+}
+
+/**
+ * drm_gpusvm_range_size() - GPU SVM range size
+ * @range: Pointer to the GPU SVM range
+ *
+ * Return: GPU SVM range size
+ */
+static inline unsigned long
+drm_gpusvm_range_size(struct drm_gpusvm_range *range)
+{
+ return drm_gpusvm_range_end(range) - drm_gpusvm_range_start(range);
+}
+
+/**
+ * drm_gpusvm_notifier_start() - GPU SVM notifier start address
+ * @notifier: Pointer to the GPU SVM notifier
+ *
+ * Return: GPU SVM notifier start address
+ */
+static inline unsigned long
+drm_gpusvm_notifier_start(struct drm_gpusvm_notifier *notifier)
+{
+ return notifier->itree.start;
+}
+
+/**
+ * drm_gpusvm_notifier_end() - GPU SVM notifier end address
+ * @notifier: Pointer to the GPU SVM notifier
+ *
+ * Return: GPU SVM notifier end address
+ */
+static inline unsigned long
+drm_gpusvm_notifier_end(struct drm_gpusvm_notifier *notifier)
+{
+ return notifier->itree.last + 1;
+}
+
+/**
+ * drm_gpusvm_notifier_size() - GPU SVM notifier size
+ * @notifier: Pointer to the GPU SVM notifier
+ *
+ * Return: GPU SVM notifier size
+ */
+static inline unsigned long
+drm_gpusvm_notifier_size(struct drm_gpusvm_notifier *notifier)
+{
+ return drm_gpusvm_notifier_end(notifier) -
+ drm_gpusvm_notifier_start(notifier);
+}
+
+/**
+ * __drm_gpusvm_range_next() - Get the next GPU SVM range in the list
+ * @range: a pointer to the current GPU SVM range
+ *
+ * Return: A pointer to the next drm_gpusvm_range if available, or NULL if the
+ * current range is the last one or if the input range is NULL.
+ */
+static inline struct drm_gpusvm_range *
+__drm_gpusvm_range_next(struct drm_gpusvm_range *range)
+{
+ if (range && !list_is_last(&range->entry,
+ &range->notifier->range_list))
+ return list_next_entry(range, entry);
+
+ return NULL;
+}
+
+/**
+ * drm_gpusvm_for_each_range() - Iterate over GPU SVM ranges in a notifier
+ * @range__: Iterator variable for the ranges. If set, it indicates the start of
+ * the iterator. If NULL, call drm_gpusvm_range_find() to get the range.
+ * @notifier__: Pointer to the GPU SVM notifier
+ * @start__: Start address of the range
+ * @end__: End address of the range
+ *
+ * This macro is used to iterate over GPU SVM ranges in a notifier. It is safe
+ * to use while holding the driver SVM lock or the notifier lock.
+ */
+#define drm_gpusvm_for_each_range(range__, notifier__, start__, end__) \
+ for ((range__) = (range__) ?: \
+ drm_gpusvm_range_find((notifier__), (start__), (end__)); \
+ (range__) && (drm_gpusvm_range_start(range__) < (end__)); \
+ (range__) = __drm_gpusvm_range_next(range__))
+
+/**
+ * drm_gpusvm_for_each_range_safe() - Safely iterate over GPU SVM ranges in a notifier
+ * @range__: Iterator variable for the ranges
+ * @next__: Iterator variable for the ranges temporay storage
+ * @notifier__: Pointer to the GPU SVM notifier
+ * @start__: Start address of the range
+ * @end__: End address of the range
+ *
+ * This macro is used to iterate over GPU SVM ranges in a notifier while
+ * removing ranges from it.
+ */
+#define drm_gpusvm_for_each_range_safe(range__, next__, notifier__, start__, end__) \
+ for ((range__) = drm_gpusvm_range_find((notifier__), (start__), (end__)), \
+ (next__) = __drm_gpusvm_range_next(range__); \
+ (range__) && (drm_gpusvm_range_start(range__) < (end__)); \
+ (range__) = (next__), (next__) = __drm_gpusvm_range_next(range__))
+
+/**
+ * __drm_gpusvm_notifier_next() - get the next drm_gpusvm_notifier in the list
+ * @notifier: a pointer to the current drm_gpusvm_notifier
+ *
+ * Return: A pointer to the next drm_gpusvm_notifier if available, or NULL if
+ * the current notifier is the last one or if the input notifier is
+ * NULL.
+ */
+static inline struct drm_gpusvm_notifier *
+__drm_gpusvm_notifier_next(struct drm_gpusvm_notifier *notifier)
+{
+ if (notifier && !list_is_last(&notifier->entry,
+ &notifier->gpusvm->notifier_list))
+ return list_next_entry(notifier, entry);
+
+ return NULL;
+}
+
+/**
+ * drm_gpusvm_for_each_notifier() - Iterate over GPU SVM notifiers in a gpusvm
+ * @notifier__: Iterator variable for the notifiers
+ * @gpusvm__: Pointer to the GPU SVM notifier
+ * @start__: Start address of the notifier
+ * @end__: End address of the notifier
+ *
+ * This macro is used to iterate over GPU SVM notifiers in a gpusvm.
+ */
+#define drm_gpusvm_for_each_notifier(notifier__, gpusvm__, start__, end__) \
+ for ((notifier__) = drm_gpusvm_notifier_find((gpusvm__), (start__), (end__)); \
+ (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
+ (notifier__) = __drm_gpusvm_notifier_next(notifier__))
+
+/**
+ * drm_gpusvm_for_each_notifier_safe() - Safely iterate over GPU SVM notifiers in a gpusvm
+ * @notifier__: Iterator variable for the notifiers
+ * @next__: Iterator variable for the notifiers temporay storage
+ * @gpusvm__: Pointer to the GPU SVM notifier
+ * @start__: Start address of the notifier
+ * @end__: End address of the notifier
+ *
+ * This macro is used to iterate over GPU SVM notifiers in a gpusvm while
+ * removing notifiers from it.
+ */
+#define drm_gpusvm_for_each_notifier_safe(notifier__, next__, gpusvm__, start__, end__) \
+ for ((notifier__) = drm_gpusvm_notifier_find((gpusvm__), (start__), (end__)), \
+ (next__) = __drm_gpusvm_notifier_next(notifier__); \
+ (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
+ (notifier__) = (next__), (next__) = __drm_gpusvm_notifier_next(notifier__))
+
+#endif /* __DRM_GPUSVM_H__ */
diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
new file mode 100644
index 000000000000..fdfc575b2603
--- /dev/null
+++ b/include/drm/drm_gpuvm.h
@@ -0,0 +1,1304 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+
+#ifndef __DRM_GPUVM_H__
+#define __DRM_GPUVM_H__
+
+/*
+ * Copyright (c) 2022 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/dma-resv.h>
+#include <linux/list.h>
+#include <linux/llist.h>
+#include <linux/rbtree.h>
+#include <linux/types.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_exec.h>
+
+struct drm_gpuvm;
+struct drm_gpuvm_bo;
+struct drm_gpuvm_ops;
+
+/**
+ * enum drm_gpuva_flags - flags for struct drm_gpuva
+ */
+enum drm_gpuva_flags {
+ /**
+ * @DRM_GPUVA_INVALIDATED:
+ *
+ * Flag indicating that the &drm_gpuva's backing GEM is invalidated.
+ */
+ DRM_GPUVA_INVALIDATED = (1 << 0),
+
+ /**
+ * @DRM_GPUVA_SPARSE:
+ *
+ * Flag indicating that the &drm_gpuva is a sparse mapping.
+ */
+ DRM_GPUVA_SPARSE = (1 << 1),
+
+ /**
+ * @DRM_GPUVA_USERBITS: user defined bits
+ */
+ DRM_GPUVA_USERBITS = (1 << 2),
+};
+
+/**
+ * struct drm_gpuva - structure to track a GPU VA mapping
+ *
+ * This structure represents a GPU VA mapping and is associated with a
+ * &drm_gpuvm.
+ *
+ * Typically, this structure is embedded in bigger driver structures.
+ */
+struct drm_gpuva {
+ /**
+ * @vm: the &drm_gpuvm this object is associated with
+ */
+ struct drm_gpuvm *vm;
+
+ /**
+ * @vm_bo: the &drm_gpuvm_bo abstraction for the mapped
+ * &drm_gem_object
+ */
+ struct drm_gpuvm_bo *vm_bo;
+
+ /**
+ * @flags: the &drm_gpuva_flags for this mapping
+ */
+ enum drm_gpuva_flags flags;
+
+ /**
+ * @va: structure containing the address and range of the &drm_gpuva
+ */
+ struct {
+ /**
+ * @va.addr: the start address
+ */
+ u64 addr;
+
+ /*
+ * @range: the range
+ */
+ u64 range;
+ } va;
+
+ /**
+ * @gem: structure containing the &drm_gem_object and its offset
+ */
+ struct {
+ /**
+ * @gem.offset: the offset within the &drm_gem_object
+ */
+ u64 offset;
+
+ /**
+ * @gem.obj: the mapped &drm_gem_object
+ */
+ struct drm_gem_object *obj;
+
+ /**
+ * @gem.entry: the &list_head to attach this object to a &drm_gpuvm_bo
+ */
+ struct list_head entry;
+ } gem;
+
+ /**
+ * @rb: structure containing data to store &drm_gpuvas in a rb-tree
+ */
+ struct {
+ /**
+ * @rb.node: the rb-tree node
+ */
+ struct rb_node node;
+
+ /**
+ * @rb.entry: The &list_head to additionally connect &drm_gpuvas
+ * in the same order they appear in the interval tree. This is
+ * useful to keep iterating &drm_gpuvas from a start node found
+ * through the rb-tree while doing modifications on the rb-tree
+ * itself.
+ */
+ struct list_head entry;
+
+ /**
+ * @rb.__subtree_last: needed by the interval tree, holding last-in-subtree
+ */
+ u64 __subtree_last;
+ } rb;
+};
+
+int drm_gpuva_insert(struct drm_gpuvm *gpuvm, struct drm_gpuva *va);
+void drm_gpuva_remove(struct drm_gpuva *va);
+
+void drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo);
+void drm_gpuva_unlink(struct drm_gpuva *va);
+void drm_gpuva_unlink_defer(struct drm_gpuva *va);
+
+struct drm_gpuva *drm_gpuva_find(struct drm_gpuvm *gpuvm,
+ u64 addr, u64 range);
+struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuvm *gpuvm,
+ u64 addr, u64 range);
+struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuvm *gpuvm, u64 start);
+struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuvm *gpuvm, u64 end);
+
+/**
+ * drm_gpuva_invalidate() - sets whether the backing GEM of this &drm_gpuva is
+ * invalidated
+ * @va: the &drm_gpuva to set the invalidate flag for
+ * @invalidate: indicates whether the &drm_gpuva is invalidated
+ */
+static inline void drm_gpuva_invalidate(struct drm_gpuva *va, bool invalidate)
+{
+ if (invalidate)
+ va->flags |= DRM_GPUVA_INVALIDATED;
+ else
+ va->flags &= ~DRM_GPUVA_INVALIDATED;
+}
+
+/**
+ * drm_gpuva_invalidated() - indicates whether the backing BO of this &drm_gpuva
+ * is invalidated
+ * @va: the &drm_gpuva to check
+ *
+ * Returns: %true if the GPU VA is invalidated, %false otherwise
+ */
+static inline bool drm_gpuva_invalidated(struct drm_gpuva *va)
+{
+ return va->flags & DRM_GPUVA_INVALIDATED;
+}
+
+/**
+ * enum drm_gpuvm_flags - flags for struct drm_gpuvm
+ */
+enum drm_gpuvm_flags {
+ /**
+ * @DRM_GPUVM_RESV_PROTECTED: GPUVM is protected externally by the
+ * GPUVM's &dma_resv lock
+ */
+ DRM_GPUVM_RESV_PROTECTED = BIT(0),
+
+ /**
+ * @DRM_GPUVM_IMMEDIATE_MODE: use the locking scheme for GEMs designed
+ * for modifying the GPUVM during the fence signalling path
+ *
+ * When set, gpuva.lock is used to protect gpuva.list in all GEM
+ * objects associated with this GPUVM. Otherwise, the GEMs dma-resv is
+ * used.
+ */
+ DRM_GPUVM_IMMEDIATE_MODE = BIT(1),
+
+ /**
+ * @DRM_GPUVM_USERBITS: user defined bits
+ */
+ DRM_GPUVM_USERBITS = BIT(2),
+};
+
+/**
+ * struct drm_gpuvm - DRM GPU VA Manager
+ *
+ * The DRM GPU VA Manager keeps track of a GPU's virtual address space by using
+ * &maple_tree structures. Typically, this structure is embedded in bigger
+ * driver structures.
+ *
+ * Drivers can pass addresses and ranges in an arbitrary unit, e.g. bytes or
+ * pages.
+ *
+ * There should be one manager instance per GPU virtual address space.
+ */
+struct drm_gpuvm {
+ /**
+ * @name: the name of the DRM GPU VA space
+ */
+ const char *name;
+
+ /**
+ * @flags: the &drm_gpuvm_flags of this GPUVM
+ */
+ enum drm_gpuvm_flags flags;
+
+ /**
+ * @drm: the &drm_device this VM lives in
+ */
+ struct drm_device *drm;
+
+ /**
+ * @mm_start: start of the VA space
+ */
+ u64 mm_start;
+
+ /**
+ * @mm_range: length of the VA space
+ */
+ u64 mm_range;
+
+ /**
+ * @rb: structures to track &drm_gpuva entries
+ */
+ struct {
+ /**
+ * @rb.tree: the rb-tree to track GPU VA mappings
+ */
+ struct rb_root_cached tree;
+
+ /**
+ * @rb.list: the &list_head to track GPU VA mappings
+ */
+ struct list_head list;
+ } rb;
+
+ /**
+ * @kref: reference count of this object
+ */
+ struct kref kref;
+
+ /**
+ * @kernel_alloc_node:
+ *
+ * &drm_gpuva representing the address space cutout reserved for
+ * the kernel
+ */
+ struct drm_gpuva kernel_alloc_node;
+
+ /**
+ * @ops: &drm_gpuvm_ops providing the split/merge steps to drivers
+ */
+ const struct drm_gpuvm_ops *ops;
+
+ /**
+ * @r_obj: Resv GEM object; representing the GPUVM's common &dma_resv.
+ */
+ struct drm_gem_object *r_obj;
+
+ /**
+ * @extobj: structure holding the extobj list
+ */
+ struct {
+ /**
+ * @extobj.list: &list_head storing &drm_gpuvm_bos serving as
+ * external object
+ */
+ struct list_head list;
+
+ /**
+ * @extobj.local_list: pointer to the local list temporarily
+ * storing entries from the external object list
+ */
+ struct list_head *local_list;
+
+ /**
+ * @extobj.lock: spinlock to protect the extobj list
+ */
+ spinlock_t lock;
+ } extobj;
+
+ /**
+ * @evict: structure holding the evict list and evict list lock
+ */
+ struct {
+ /**
+ * @evict.list: &list_head storing &drm_gpuvm_bos currently
+ * being evicted
+ */
+ struct list_head list;
+
+ /**
+ * @evict.local_list: pointer to the local list temporarily
+ * storing entries from the evicted object list
+ */
+ struct list_head *local_list;
+
+ /**
+ * @evict.lock: spinlock to protect the evict list
+ */
+ spinlock_t lock;
+ } evict;
+
+ /**
+ * @bo_defer: structure holding vm_bos that need to be destroyed
+ */
+ struct llist_head bo_defer;
+};
+
+void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
+ enum drm_gpuvm_flags flags,
+ struct drm_device *drm,
+ struct drm_gem_object *r_obj,
+ u64 start_offset, u64 range,
+ u64 reserve_offset, u64 reserve_range,
+ const struct drm_gpuvm_ops *ops);
+
+/**
+ * drm_gpuvm_get() - acquire a struct drm_gpuvm reference
+ * @gpuvm: the &drm_gpuvm to acquire the reference of
+ *
+ * This function acquires an additional reference to @gpuvm. It is illegal to
+ * call this without already holding a reference. No locks required.
+ *
+ * Returns: the &struct drm_gpuvm pointer
+ */
+static inline struct drm_gpuvm *
+drm_gpuvm_get(struct drm_gpuvm *gpuvm)
+{
+ kref_get(&gpuvm->kref);
+
+ return gpuvm;
+}
+
+void drm_gpuvm_put(struct drm_gpuvm *gpuvm);
+
+bool drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, u64 addr, u64 range);
+bool drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range);
+
+struct drm_gem_object *
+drm_gpuvm_resv_object_alloc(struct drm_device *drm);
+
+/**
+ * drm_gpuvm_resv_protected() - indicates whether &DRM_GPUVM_RESV_PROTECTED is
+ * set
+ * @gpuvm: the &drm_gpuvm
+ *
+ * Returns: true if &DRM_GPUVM_RESV_PROTECTED is set, false otherwise.
+ */
+static inline bool
+drm_gpuvm_resv_protected(struct drm_gpuvm *gpuvm)
+{
+ return gpuvm->flags & DRM_GPUVM_RESV_PROTECTED;
+}
+
+/**
+ * drm_gpuvm_immediate_mode() - indicates whether &DRM_GPUVM_IMMEDIATE_MODE is
+ * set
+ * @gpuvm: the &drm_gpuvm
+ *
+ * Returns: true if &DRM_GPUVM_IMMEDIATE_MODE is set, false otherwise.
+ */
+static inline bool
+drm_gpuvm_immediate_mode(struct drm_gpuvm *gpuvm)
+{
+ return gpuvm->flags & DRM_GPUVM_IMMEDIATE_MODE;
+}
+
+/**
+ * drm_gpuvm_resv() - returns the &drm_gpuvm's &dma_resv
+ * @gpuvm__: the &drm_gpuvm
+ *
+ * Returns: a pointer to the &drm_gpuvm's shared &dma_resv
+ */
+#define drm_gpuvm_resv(gpuvm__) ((gpuvm__)->r_obj->resv)
+
+/**
+ * drm_gpuvm_resv_obj() - returns the &drm_gem_object holding the &drm_gpuvm's
+ * &dma_resv
+ * @gpuvm__: the &drm_gpuvm
+ *
+ * Returns: a pointer to the &drm_gem_object holding the &drm_gpuvm's shared
+ * &dma_resv
+ */
+#define drm_gpuvm_resv_obj(gpuvm__) ((gpuvm__)->r_obj)
+
+#define drm_gpuvm_resv_held(gpuvm__) \
+ dma_resv_held(drm_gpuvm_resv(gpuvm__))
+
+#define drm_gpuvm_resv_assert_held(gpuvm__) \
+ dma_resv_assert_held(drm_gpuvm_resv(gpuvm__))
+
+#define drm_gpuvm_resv_held(gpuvm__) \
+ dma_resv_held(drm_gpuvm_resv(gpuvm__))
+
+#define drm_gpuvm_resv_assert_held(gpuvm__) \
+ dma_resv_assert_held(drm_gpuvm_resv(gpuvm__))
+
+/**
+ * drm_gpuvm_is_extobj() - indicates whether the given &drm_gem_object is an
+ * external object
+ * @gpuvm: the &drm_gpuvm to check
+ * @obj: the &drm_gem_object to check
+ *
+ * Returns: true if the &drm_gem_object &dma_resv differs from the
+ * &drm_gpuvms &dma_resv, false otherwise
+ */
+static inline bool
+drm_gpuvm_is_extobj(struct drm_gpuvm *gpuvm,
+ struct drm_gem_object *obj)
+{
+ return obj && obj->resv != drm_gpuvm_resv(gpuvm);
+}
+
+static inline struct drm_gpuva *
+__drm_gpuva_next(struct drm_gpuva *va)
+{
+ if (va && !list_is_last(&va->rb.entry, &va->vm->rb.list))
+ return list_next_entry(va, rb.entry);
+
+ return NULL;
+}
+
+/**
+ * drm_gpuvm_for_each_va_range() - iterate over a range of &drm_gpuvas
+ * @va__: &drm_gpuva structure to assign to in each iteration step
+ * @gpuvm__: &drm_gpuvm to walk over
+ * @start__: starting offset, the first gpuva will overlap this
+ * @end__: ending offset, the last gpuva will start before this (but may
+ * overlap)
+ *
+ * This iterator walks over all &drm_gpuvas in the &drm_gpuvm that lie
+ * between @start__ and @end__. It is implemented similarly to list_for_each(),
+ * but is using the &drm_gpuvm's internal interval tree to accelerate
+ * the search for the starting &drm_gpuva, and hence isn't safe against removal
+ * of elements. It assumes that @end__ is within (or is the upper limit of) the
+ * &drm_gpuvm. This iterator does not skip over the &drm_gpuvm's
+ * @kernel_alloc_node.
+ */
+#define drm_gpuvm_for_each_va_range(va__, gpuvm__, start__, end__) \
+ for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)); \
+ va__ && (va__->va.addr < (end__)); \
+ va__ = __drm_gpuva_next(va__))
+
+/**
+ * drm_gpuvm_for_each_va_range_safe() - safely iterate over a range of
+ * &drm_gpuvas
+ * @va__: &drm_gpuva to assign to in each iteration step
+ * @next__: another &drm_gpuva to use as temporary storage
+ * @gpuvm__: &drm_gpuvm to walk over
+ * @start__: starting offset, the first gpuva will overlap this
+ * @end__: ending offset, the last gpuva will start before this (but may
+ * overlap)
+ *
+ * This iterator walks over all &drm_gpuvas in the &drm_gpuvm that lie
+ * between @start__ and @end__. It is implemented similarly to
+ * list_for_each_safe(), but is using the &drm_gpuvm's internal interval
+ * tree to accelerate the search for the starting &drm_gpuva, and hence is safe
+ * against removal of elements. It assumes that @end__ is within (or is the
+ * upper limit of) the &drm_gpuvm. This iterator does not skip over the
+ * &drm_gpuvm's @kernel_alloc_node.
+ */
+#define drm_gpuvm_for_each_va_range_safe(va__, next__, gpuvm__, start__, end__) \
+ for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)), \
+ next__ = __drm_gpuva_next(va__); \
+ va__ && (va__->va.addr < (end__)); \
+ va__ = next__, next__ = __drm_gpuva_next(va__))
+
+/**
+ * drm_gpuvm_for_each_va() - iterate over all &drm_gpuvas
+ * @va__: &drm_gpuva to assign to in each iteration step
+ * @gpuvm__: &drm_gpuvm to walk over
+ *
+ * This iterator walks over all &drm_gpuva structures associated with the given
+ * &drm_gpuvm.
+ */
+#define drm_gpuvm_for_each_va(va__, gpuvm__) \
+ list_for_each_entry(va__, &(gpuvm__)->rb.list, rb.entry)
+
+/**
+ * drm_gpuvm_for_each_va_safe() - safely iterate over all &drm_gpuvas
+ * @va__: &drm_gpuva to assign to in each iteration step
+ * @next__: another &drm_gpuva to use as temporary storage
+ * @gpuvm__: &drm_gpuvm to walk over
+ *
+ * This iterator walks over all &drm_gpuva structures associated with the given
+ * &drm_gpuvm. It is implemented with list_for_each_entry_safe(), and
+ * hence safe against the removal of elements.
+ */
+#define drm_gpuvm_for_each_va_safe(va__, next__, gpuvm__) \
+ list_for_each_entry_safe(va__, next__, &(gpuvm__)->rb.list, rb.entry)
+
+/**
+ * struct drm_gpuvm_exec - &drm_gpuvm abstraction of &drm_exec
+ *
+ * This structure should be created on the stack as &drm_exec should be.
+ *
+ * Optionally, @extra can be set in order to lock additional &drm_gem_objects.
+ */
+struct drm_gpuvm_exec {
+ /**
+ * @exec: the &drm_exec structure
+ */
+ struct drm_exec exec;
+
+ /**
+ * @flags: the flags for the struct drm_exec
+ */
+ u32 flags;
+
+ /**
+ * @vm: the &drm_gpuvm to lock its DMA reservations
+ */
+ struct drm_gpuvm *vm;
+
+ /**
+ * @num_fences: the number of fences to reserve for the &dma_resv of the
+ * locked &drm_gem_objects
+ */
+ unsigned int num_fences;
+
+ /**
+ * @extra: Callback and corresponding private data for the driver to
+ * lock arbitrary additional &drm_gem_objects.
+ */
+ struct {
+ /**
+ * @extra.fn: The driver callback to lock additional
+ * &drm_gem_objects.
+ */
+ int (*fn)(struct drm_gpuvm_exec *vm_exec);
+
+ /**
+ * @extra.priv: driver private data for the @fn callback
+ */
+ void *priv;
+ } extra;
+};
+
+int drm_gpuvm_prepare_vm(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec,
+ unsigned int num_fences);
+
+int drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec,
+ unsigned int num_fences);
+
+int drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec,
+ u64 addr, u64 range,
+ unsigned int num_fences);
+
+int drm_gpuvm_exec_lock(struct drm_gpuvm_exec *vm_exec);
+
+int drm_gpuvm_exec_lock_array(struct drm_gpuvm_exec *vm_exec,
+ struct drm_gem_object **objs,
+ unsigned int num_objs);
+
+int drm_gpuvm_exec_lock_range(struct drm_gpuvm_exec *vm_exec,
+ u64 addr, u64 range);
+
+/**
+ * drm_gpuvm_exec_unlock() - lock all dma-resv of all assoiciated BOs
+ * @vm_exec: the &drm_gpuvm_exec wrapper
+ *
+ * Releases all dma-resv locks of all &drm_gem_objects previously acquired
+ * through drm_gpuvm_exec_lock() or its variants.
+ *
+ * Returns: 0 on success, negative error code on failure.
+ */
+static inline void
+drm_gpuvm_exec_unlock(struct drm_gpuvm_exec *vm_exec)
+{
+ drm_exec_fini(&vm_exec->exec);
+}
+
+int drm_gpuvm_validate(struct drm_gpuvm *gpuvm, struct drm_exec *exec);
+void drm_gpuvm_resv_add_fence(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec,
+ struct dma_fence *fence,
+ enum dma_resv_usage private_usage,
+ enum dma_resv_usage extobj_usage);
+
+/**
+ * drm_gpuvm_exec_resv_add_fence() - add fence to private and all extobj
+ * @vm_exec: the &drm_gpuvm_exec wrapper
+ * @fence: fence to add
+ * @private_usage: private dma-resv usage
+ * @extobj_usage: extobj dma-resv usage
+ *
+ * See drm_gpuvm_resv_add_fence().
+ */
+static inline void
+drm_gpuvm_exec_resv_add_fence(struct drm_gpuvm_exec *vm_exec,
+ struct dma_fence *fence,
+ enum dma_resv_usage private_usage,
+ enum dma_resv_usage extobj_usage)
+{
+ drm_gpuvm_resv_add_fence(vm_exec->vm, &vm_exec->exec, fence,
+ private_usage, extobj_usage);
+}
+
+/**
+ * drm_gpuvm_exec_validate() - validate all BOs marked as evicted
+ * @vm_exec: the &drm_gpuvm_exec wrapper
+ *
+ * See drm_gpuvm_validate().
+ *
+ * Returns: 0 on success, negative error code on failure.
+ */
+static inline int
+drm_gpuvm_exec_validate(struct drm_gpuvm_exec *vm_exec)
+{
+ return drm_gpuvm_validate(vm_exec->vm, &vm_exec->exec);
+}
+
+/**
+ * struct drm_gpuvm_bo - structure representing a &drm_gpuvm and
+ * &drm_gem_object combination
+ *
+ * This structure is an abstraction representing a &drm_gpuvm and
+ * &drm_gem_object combination. It serves as an indirection to accelerate
+ * iterating all &drm_gpuvas within a &drm_gpuvm backed by the same
+ * &drm_gem_object.
+ *
+ * Furthermore it is used cache evicted GEM objects for a certain GPU-VM to
+ * accelerate validation.
+ *
+ * Typically, drivers want to create an instance of a struct drm_gpuvm_bo once
+ * a GEM object is mapped first in a GPU-VM and release the instance once the
+ * last mapping of the GEM object in this GPU-VM is unmapped.
+ */
+struct drm_gpuvm_bo {
+ /**
+ * @vm: The &drm_gpuvm the @obj is mapped in. This is a reference
+ * counted pointer.
+ */
+ struct drm_gpuvm *vm;
+
+ /**
+ * @obj: The &drm_gem_object being mapped in @vm. This is a reference
+ * counted pointer.
+ */
+ struct drm_gem_object *obj;
+
+ /**
+ * @evicted: Indicates whether the &drm_gem_object is evicted; field
+ * protected by the &drm_gem_object's dma-resv lock.
+ */
+ bool evicted;
+
+ /**
+ * @kref: The reference count for this &drm_gpuvm_bo.
+ */
+ struct kref kref;
+
+ /**
+ * @list: Structure containing all &list_heads.
+ */
+ struct {
+ /**
+ * @list.gpuva: The list of linked &drm_gpuvas.
+ *
+ * It is safe to access entries from this list as long as the
+ * GEM's gpuva lock is held. See also struct drm_gem_object.
+ */
+ struct list_head gpuva;
+
+ /**
+ * @list.entry: Structure containing all &list_heads serving as
+ * entry.
+ */
+ struct {
+ /**
+ * @list.entry.gem: List entry to attach to the
+ * &drm_gem_objects gpuva list.
+ */
+ struct list_head gem;
+
+ /**
+ * @list.entry.evict: List entry to attach to the
+ * &drm_gpuvms extobj list.
+ */
+ struct list_head extobj;
+
+ /**
+ * @list.entry.evict: List entry to attach to the
+ * &drm_gpuvms evict list.
+ */
+ struct list_head evict;
+
+ /**
+ * @list.entry.bo_defer: List entry to attach to
+ * the &drm_gpuvms bo_defer list.
+ */
+ struct llist_node bo_defer;
+ } entry;
+ } list;
+};
+
+struct drm_gpuvm_bo *
+drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm,
+ struct drm_gem_object *obj);
+
+struct drm_gpuvm_bo *
+drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
+ struct drm_gem_object *obj);
+struct drm_gpuvm_bo *
+drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *vm_bo);
+
+/**
+ * drm_gpuvm_bo_get() - acquire a struct drm_gpuvm_bo reference
+ * @vm_bo: the &drm_gpuvm_bo to acquire the reference of
+ *
+ * This function acquires an additional reference to @vm_bo. It is illegal to
+ * call this without already holding a reference. No locks required.
+ *
+ * Returns: the &struct vm_bo pointer
+ */
+static inline struct drm_gpuvm_bo *
+drm_gpuvm_bo_get(struct drm_gpuvm_bo *vm_bo)
+{
+ kref_get(&vm_bo->kref);
+ return vm_bo;
+}
+
+bool drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo);
+
+bool drm_gpuvm_bo_put_deferred(struct drm_gpuvm_bo *vm_bo);
+void drm_gpuvm_bo_deferred_cleanup(struct drm_gpuvm *gpuvm);
+
+struct drm_gpuvm_bo *
+drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
+ struct drm_gem_object *obj);
+
+void drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict);
+
+/**
+ * drm_gpuvm_bo_gem_evict() - add/remove all &drm_gpuvm_bo's in the list
+ * to/from the &drm_gpuvms evicted list
+ * @obj: the &drm_gem_object
+ * @evict: indicates whether @obj is evicted
+ *
+ * See drm_gpuvm_bo_evict().
+ */
+static inline void
+drm_gpuvm_bo_gem_evict(struct drm_gem_object *obj, bool evict)
+{
+ struct drm_gpuvm_bo *vm_bo;
+
+ drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
+ drm_gem_gpuva_assert_lock_held(vm_bo->vm, obj);
+ drm_gpuvm_bo_evict(vm_bo, evict);
+ }
+}
+
+void drm_gpuvm_bo_extobj_add(struct drm_gpuvm_bo *vm_bo);
+
+/**
+ * drm_gpuvm_bo_for_each_va() - iterator to walk over a list of &drm_gpuva
+ * @va__: &drm_gpuva structure to assign to in each iteration step
+ * @vm_bo__: the &drm_gpuvm_bo the &drm_gpuva to walk are associated with
+ *
+ * This iterator walks over all &drm_gpuva structures associated with the
+ * &drm_gpuvm_bo.
+ *
+ * The caller must hold the GEM's gpuva lock.
+ */
+#define drm_gpuvm_bo_for_each_va(va__, vm_bo__) \
+ list_for_each_entry(va__, &(vm_bo)->list.gpuva, gem.entry)
+
+/**
+ * drm_gpuvm_bo_for_each_va_safe() - iterator to safely walk over a list of
+ * &drm_gpuva
+ * @va__: &drm_gpuva structure to assign to in each iteration step
+ * @next__: &next &drm_gpuva to store the next step
+ * @vm_bo__: the &drm_gpuvm_bo the &drm_gpuva to walk are associated with
+ *
+ * This iterator walks over all &drm_gpuva structures associated with the
+ * &drm_gpuvm_bo. It is implemented with list_for_each_entry_safe(), hence
+ * it is save against removal of elements.
+ *
+ * The caller must hold the GEM's gpuva lock.
+ */
+#define drm_gpuvm_bo_for_each_va_safe(va__, next__, vm_bo__) \
+ list_for_each_entry_safe(va__, next__, &(vm_bo)->list.gpuva, gem.entry)
+
+/**
+ * enum drm_gpuva_op_type - GPU VA operation type
+ *
+ * Operations to alter the GPU VA mappings tracked by the &drm_gpuvm.
+ */
+enum drm_gpuva_op_type {
+ /**
+ * @DRM_GPUVA_OP_MAP: the map op type
+ */
+ DRM_GPUVA_OP_MAP,
+
+ /**
+ * @DRM_GPUVA_OP_REMAP: the remap op type
+ */
+ DRM_GPUVA_OP_REMAP,
+
+ /**
+ * @DRM_GPUVA_OP_UNMAP: the unmap op type
+ */
+ DRM_GPUVA_OP_UNMAP,
+
+ /**
+ * @DRM_GPUVA_OP_PREFETCH: the prefetch op type
+ */
+ DRM_GPUVA_OP_PREFETCH,
+
+ /**
+ * @DRM_GPUVA_OP_DRIVER: the driver defined op type
+ */
+ DRM_GPUVA_OP_DRIVER,
+};
+
+/**
+ * struct drm_gpuva_op_map - GPU VA map operation
+ *
+ * This structure represents a single map operation generated by the
+ * DRM GPU VA manager.
+ */
+struct drm_gpuva_op_map {
+ /**
+ * @va: structure containing address and range of a map
+ * operation
+ */
+ struct {
+ /**
+ * @va.addr: the base address of the new mapping
+ */
+ u64 addr;
+
+ /**
+ * @va.range: the range of the new mapping
+ */
+ u64 range;
+ } va;
+
+ /**
+ * @gem: structure containing the &drm_gem_object and its offset
+ */
+ struct {
+ /**
+ * @gem.offset: the offset within the &drm_gem_object
+ */
+ u64 offset;
+
+ /**
+ * @gem.obj: the &drm_gem_object to map
+ */
+ struct drm_gem_object *obj;
+ } gem;
+};
+
+/**
+ * struct drm_gpuva_op_unmap - GPU VA unmap operation
+ *
+ * This structure represents a single unmap operation generated by the
+ * DRM GPU VA manager.
+ */
+struct drm_gpuva_op_unmap {
+ /**
+ * @va: the &drm_gpuva to unmap
+ */
+ struct drm_gpuva *va;
+
+ /**
+ * @keep:
+ *
+ * Indicates whether this &drm_gpuva is physically contiguous with the
+ * original mapping request.
+ *
+ * Optionally, if &keep is set, drivers may keep the actual page table
+ * mappings for this &drm_gpuva, adding the missing page table entries
+ * only and update the &drm_gpuvm accordingly.
+ */
+ bool keep;
+};
+
+/**
+ * struct drm_gpuva_op_remap - GPU VA remap operation
+ *
+ * This represents a single remap operation generated by the DRM GPU VA manager.
+ *
+ * A remap operation is generated when an existing GPU VA mmapping is split up
+ * by inserting a new GPU VA mapping or by partially unmapping existent
+ * mapping(s), hence it consists of a maximum of two map and one unmap
+ * operation.
+ *
+ * The @unmap operation takes care of removing the original existing mapping.
+ * @prev is used to remap the preceding part, @next the subsequent part.
+ *
+ * If either a new mapping's start address is aligned with the start address
+ * of the old mapping or the new mapping's end address is aligned with the
+ * end address of the old mapping, either @prev or @next is NULL.
+ *
+ * Note, the reason for a dedicated remap operation, rather than arbitrary
+ * unmap and map operations, is to give drivers the chance of extracting driver
+ * specific data for creating the new mappings from the unmap operations's
+ * &drm_gpuva structure which typically is embedded in larger driver specific
+ * structures.
+ */
+struct drm_gpuva_op_remap {
+ /**
+ * @prev: the preceding part of a split mapping
+ */
+ struct drm_gpuva_op_map *prev;
+
+ /**
+ * @next: the subsequent part of a split mapping
+ */
+ struct drm_gpuva_op_map *next;
+
+ /**
+ * @unmap: the unmap operation for the original existing mapping
+ */
+ struct drm_gpuva_op_unmap *unmap;
+};
+
+/**
+ * struct drm_gpuva_op_prefetch - GPU VA prefetch operation
+ *
+ * This structure represents a single prefetch operation generated by the
+ * DRM GPU VA manager.
+ */
+struct drm_gpuva_op_prefetch {
+ /**
+ * @va: the &drm_gpuva to prefetch
+ */
+ struct drm_gpuva *va;
+};
+
+/**
+ * struct drm_gpuva_op - GPU VA operation
+ *
+ * This structure represents a single generic operation.
+ *
+ * The particular type of the operation is defined by @op.
+ */
+struct drm_gpuva_op {
+ /**
+ * @entry:
+ *
+ * The &list_head used to distribute instances of this struct within
+ * &drm_gpuva_ops.
+ */
+ struct list_head entry;
+
+ /**
+ * @op: the type of the operation
+ */
+ enum drm_gpuva_op_type op;
+
+ union {
+ /**
+ * @map: the map operation
+ */
+ struct drm_gpuva_op_map map;
+
+ /**
+ * @remap: the remap operation
+ */
+ struct drm_gpuva_op_remap remap;
+
+ /**
+ * @unmap: the unmap operation
+ */
+ struct drm_gpuva_op_unmap unmap;
+
+ /**
+ * @prefetch: the prefetch operation
+ */
+ struct drm_gpuva_op_prefetch prefetch;
+ };
+};
+
+/**
+ * struct drm_gpuva_ops - wraps a list of &drm_gpuva_op
+ */
+struct drm_gpuva_ops {
+ /**
+ * @list: the &list_head
+ */
+ struct list_head list;
+};
+
+/**
+ * drm_gpuva_for_each_op() - iterator to walk over &drm_gpuva_ops
+ * @op: &drm_gpuva_op to assign in each iteration step
+ * @ops: &drm_gpuva_ops to walk
+ *
+ * This iterator walks over all ops within a given list of operations.
+ */
+#define drm_gpuva_for_each_op(op, ops) list_for_each_entry(op, &(ops)->list, entry)
+
+/**
+ * drm_gpuva_for_each_op_safe() - iterator to safely walk over &drm_gpuva_ops
+ * @op: &drm_gpuva_op to assign in each iteration step
+ * @next: &next &drm_gpuva_op to store the next step
+ * @ops: &drm_gpuva_ops to walk
+ *
+ * This iterator walks over all ops within a given list of operations. It is
+ * implemented with list_for_each_safe(), so save against removal of elements.
+ */
+#define drm_gpuva_for_each_op_safe(op, next, ops) \
+ list_for_each_entry_safe(op, next, &(ops)->list, entry)
+
+/**
+ * drm_gpuva_for_each_op_from_reverse() - iterate backwards from the given point
+ * @op: &drm_gpuva_op to assign in each iteration step
+ * @ops: &drm_gpuva_ops to walk
+ *
+ * This iterator walks over all ops within a given list of operations beginning
+ * from the given operation in reverse order.
+ */
+#define drm_gpuva_for_each_op_from_reverse(op, ops) \
+ list_for_each_entry_from_reverse(op, &(ops)->list, entry)
+
+/**
+ * drm_gpuva_for_each_op_reverse - iterator to walk over &drm_gpuva_ops in reverse
+ * @op: &drm_gpuva_op to assign in each iteration step
+ * @ops: &drm_gpuva_ops to walk
+ *
+ * This iterator walks over all ops within a given list of operations in reverse
+ */
+#define drm_gpuva_for_each_op_reverse(op, ops) \
+ list_for_each_entry_reverse(op, &(ops)->list, entry)
+
+/**
+ * drm_gpuva_first_op() - returns the first &drm_gpuva_op from &drm_gpuva_ops
+ * @ops: the &drm_gpuva_ops to get the fist &drm_gpuva_op from
+ */
+#define drm_gpuva_first_op(ops) \
+ list_first_entry(&(ops)->list, struct drm_gpuva_op, entry)
+
+/**
+ * drm_gpuva_last_op() - returns the last &drm_gpuva_op from &drm_gpuva_ops
+ * @ops: the &drm_gpuva_ops to get the last &drm_gpuva_op from
+ */
+#define drm_gpuva_last_op(ops) \
+ list_last_entry(&(ops)->list, struct drm_gpuva_op, entry)
+
+/**
+ * drm_gpuva_prev_op() - previous &drm_gpuva_op in the list
+ * @op: the current &drm_gpuva_op
+ */
+#define drm_gpuva_prev_op(op) list_prev_entry(op, entry)
+
+/**
+ * drm_gpuva_next_op() - next &drm_gpuva_op in the list
+ * @op: the current &drm_gpuva_op
+ */
+#define drm_gpuva_next_op(op) list_next_entry(op, entry)
+
+/**
+ * struct drm_gpuvm_map_req - arguments passed to drm_gpuvm_sm_map[_ops_create]()
+ */
+struct drm_gpuvm_map_req {
+ /**
+ * @map: struct drm_gpuva_op_map
+ */
+ struct drm_gpuva_op_map map;
+};
+
+struct drm_gpuva_ops *
+drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
+ const struct drm_gpuvm_map_req *req);
+struct drm_gpuva_ops *
+drm_gpuvm_madvise_ops_create(struct drm_gpuvm *gpuvm,
+ const struct drm_gpuvm_map_req *req);
+
+struct drm_gpuva_ops *
+drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
+ u64 addr, u64 range);
+
+struct drm_gpuva_ops *
+drm_gpuvm_prefetch_ops_create(struct drm_gpuvm *gpuvm,
+ u64 addr, u64 range);
+
+struct drm_gpuva_ops *
+drm_gpuvm_bo_unmap_ops_create(struct drm_gpuvm_bo *vm_bo);
+
+void drm_gpuva_ops_free(struct drm_gpuvm *gpuvm,
+ struct drm_gpuva_ops *ops);
+
+static inline void drm_gpuva_init_from_op(struct drm_gpuva *va,
+ struct drm_gpuva_op_map *op)
+{
+ va->va.addr = op->va.addr;
+ va->va.range = op->va.range;
+ va->gem.obj = op->gem.obj;
+ va->gem.offset = op->gem.offset;
+}
+
+/**
+ * struct drm_gpuvm_ops - callbacks for split/merge steps
+ *
+ * This structure defines the callbacks used by &drm_gpuvm_sm_map and
+ * &drm_gpuvm_sm_unmap to provide the split/merge steps for map and unmap
+ * operations to drivers.
+ */
+struct drm_gpuvm_ops {
+ /**
+ * @vm_free: called when the last reference of a struct drm_gpuvm is
+ * dropped
+ *
+ * This callback is mandatory.
+ */
+ void (*vm_free)(struct drm_gpuvm *gpuvm);
+
+ /**
+ * @op_alloc: called when the &drm_gpuvm allocates
+ * a struct drm_gpuva_op
+ *
+ * Some drivers may want to embed struct drm_gpuva_op into driver
+ * specific structures. By implementing this callback drivers can
+ * allocate memory accordingly.
+ *
+ * This callback is optional.
+ */
+ struct drm_gpuva_op *(*op_alloc)(void);
+
+ /**
+ * @op_free: called when the &drm_gpuvm frees a
+ * struct drm_gpuva_op
+ *
+ * Some drivers may want to embed struct drm_gpuva_op into driver
+ * specific structures. By implementing this callback drivers can
+ * free the previously allocated memory accordingly.
+ *
+ * This callback is optional.
+ */
+ void (*op_free)(struct drm_gpuva_op *op);
+
+ /**
+ * @vm_bo_alloc: called when the &drm_gpuvm allocates
+ * a struct drm_gpuvm_bo
+ *
+ * Some drivers may want to embed struct drm_gpuvm_bo into driver
+ * specific structures. By implementing this callback drivers can
+ * allocate memory accordingly.
+ *
+ * This callback is optional.
+ */
+ struct drm_gpuvm_bo *(*vm_bo_alloc)(void);
+
+ /**
+ * @vm_bo_free: called when the &drm_gpuvm frees a
+ * struct drm_gpuvm_bo
+ *
+ * Some drivers may want to embed struct drm_gpuvm_bo into driver
+ * specific structures. By implementing this callback drivers can
+ * free the previously allocated memory accordingly.
+ *
+ * This callback is optional.
+ */
+ void (*vm_bo_free)(struct drm_gpuvm_bo *vm_bo);
+
+ /**
+ * @vm_bo_validate: called from drm_gpuvm_validate()
+ *
+ * Drivers receive this callback for every evicted &drm_gem_object being
+ * mapped in the corresponding &drm_gpuvm.
+ *
+ * Typically, drivers would call their driver specific variant of
+ * ttm_bo_validate() from within this callback.
+ */
+ int (*vm_bo_validate)(struct drm_gpuvm_bo *vm_bo,
+ struct drm_exec *exec);
+
+ /**
+ * @sm_step_map: called from &drm_gpuvm_sm_map to finally insert the
+ * mapping once all previous steps were completed
+ *
+ * The &priv pointer matches the one the driver passed to
+ * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
+ *
+ * Can be NULL if &drm_gpuvm_sm_map is used.
+ */
+ int (*sm_step_map)(struct drm_gpuva_op *op, void *priv);
+
+ /**
+ * @sm_step_remap: called from &drm_gpuvm_sm_map and
+ * &drm_gpuvm_sm_unmap to split up an existent mapping
+ *
+ * This callback is called when existent mapping needs to be split up.
+ * This is the case when either a newly requested mapping overlaps or
+ * is enclosed by an existent mapping or a partial unmap of an existent
+ * mapping is requested.
+ *
+ * The &priv pointer matches the one the driver passed to
+ * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
+ *
+ * Can be NULL if neither &drm_gpuvm_sm_map nor &drm_gpuvm_sm_unmap is
+ * used.
+ */
+ int (*sm_step_remap)(struct drm_gpuva_op *op, void *priv);
+
+ /**
+ * @sm_step_unmap: called from &drm_gpuvm_sm_map and
+ * &drm_gpuvm_sm_unmap to unmap an existing mapping
+ *
+ * This callback is called when existing mapping needs to be unmapped.
+ * This is the case when either a newly requested mapping encloses an
+ * existing mapping or an unmap of an existing mapping is requested.
+ *
+ * The &priv pointer matches the one the driver passed to
+ * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
+ *
+ * Can be NULL if neither &drm_gpuvm_sm_map nor &drm_gpuvm_sm_unmap is
+ * used.
+ */
+ int (*sm_step_unmap)(struct drm_gpuva_op *op, void *priv);
+};
+
+int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
+ const struct drm_gpuvm_map_req *req);
+
+int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
+ u64 addr, u64 range);
+
+int drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec, unsigned int num_fences,
+ struct drm_gpuvm_map_req *req);
+
+int drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
+ u64 req_addr, u64 req_range);
+
+void drm_gpuva_map(struct drm_gpuvm *gpuvm,
+ struct drm_gpuva *va,
+ struct drm_gpuva_op_map *op);
+
+void drm_gpuva_remap(struct drm_gpuva *prev,
+ struct drm_gpuva *next,
+ struct drm_gpuva_op_remap *op);
+
+void drm_gpuva_unmap(struct drm_gpuva_op_unmap *op);
+
+/**
+ * drm_gpuva_op_remap_to_unmap_range() - Helper to get the start and range of
+ * the unmap stage of a remap op.
+ * @op: Remap op.
+ * @start_addr: Output pointer for the start of the required unmap.
+ * @range: Output pointer for the length of the required unmap.
+ *
+ * The given start address and range will be set such that they represent the
+ * range of the address space that was previously covered by the mapping being
+ * re-mapped, but is now empty.
+ */
+static inline void
+drm_gpuva_op_remap_to_unmap_range(const struct drm_gpuva_op_remap *op,
+ u64 *start_addr, u64 *range)
+{
+ const u64 va_start = op->prev ?
+ op->prev->va.addr + op->prev->va.range :
+ op->unmap->va->va.addr;
+ const u64 va_end = op->next ?
+ op->next->va.addr :
+ op->unmap->va->va.addr + op->unmap->va->va.range;
+
+ if (start_addr)
+ *start_addr = va_start;
+ if (range)
+ *range = va_end - va_start;
+}
+
+#endif /* __DRM_GPUVM_H__ */
diff --git a/include/drm/drm_hashtab.h b/include/drm/drm_hashtab.h
deleted file mode 100644
index bb95ff011baf..000000000000
--- a/include/drm/drm_hashtab.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismack, ND. USA.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- *
- **************************************************************************/
-/*
- * Simple open hash tab implementation.
- *
- * Authors:
- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#ifndef DRM_HASHTAB_H
-#define DRM_HASHTAB_H
-
-#include <linux/list.h>
-
-#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
-
-struct drm_hash_item {
- struct hlist_node head;
- unsigned long key;
-};
-
-struct drm_open_hash {
- struct hlist_head *table;
- u8 order;
-};
-
-int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
-int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item);
-int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
- unsigned long seed, int bits, int shift,
- unsigned long add);
-int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item);
-
-void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key);
-int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
-int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
-void drm_ht_remove(struct drm_open_hash *ht);
-
-/*
- * RCU-safe interface
- *
- * The user of this API needs to make sure that two or more instances of the
- * hash table manipulation functions are never run simultaneously.
- * The lookup function drm_ht_find_item_rcu may, however, run simultaneously
- * with any of the manipulation functions as long as it's called from within
- * an RCU read-locked section.
- */
-#define drm_ht_insert_item_rcu drm_ht_insert_item
-#define drm_ht_just_insert_please_rcu drm_ht_just_insert_please
-#define drm_ht_remove_key_rcu drm_ht_remove_key
-#define drm_ht_remove_item_rcu drm_ht_remove_item
-#define drm_ht_find_item_rcu drm_ht_find_item
-
-#endif
diff --git a/include/drm/drm_ioctl.h b/include/drm/drm_ioctl.h
index 10100a4bbe2a..171760b6c4a1 100644
--- a/include/drm/drm_ioctl.h
+++ b/include/drm/drm_ioctl.h
@@ -68,6 +68,7 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
unsigned long arg);
#define DRM_IOCTL_NR(n) _IOC_NR(n)
+#define DRM_IOCTL_TYPE(n) _IOC_TYPE(n)
#define DRM_MAJOR 226
/**
@@ -109,17 +110,6 @@ enum drm_ioctl_flags {
*/
DRM_ROOT_ONLY = BIT(2),
/**
- * @DRM_UNLOCKED:
- *
- * Whether &drm_ioctl_desc.func should be called with the DRM BKL held
- * or not. Enforced as the default for all modern drivers, hence there
- * should never be a need to set this flag.
- *
- * Do not use anywhere else than for the VBLANK_WAIT IOCTL, which is the
- * only legacy IOCTL which needs this.
- */
- DRM_UNLOCKED = BIT(4),
- /**
* @DRM_RENDER_ALLOW:
*
* This is used for all ioctl needed for rendering only, for drivers
@@ -166,7 +156,6 @@ struct drm_ioctl_desc {
.name = #ioctl \
}
-int drm_ioctl_permit(u32 flags, struct drm_file *file_priv);
long drm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
long drm_ioctl_kernel(struct file *, drm_ioctl_t, void *, u32);
#ifdef CONFIG_COMPAT
diff --git a/include/drm/drm_irq.h b/include/drm/drm_irq.h
deleted file mode 100644
index 631b22f9757d..000000000000
--- a/include/drm/drm_irq.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright 2016 Intel Corp.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef _DRM_IRQ_H_
-#define _DRM_IRQ_H_
-
-struct drm_device;
-
-int drm_irq_install(struct drm_device *dev, int irq);
-int drm_irq_uninstall(struct drm_device *dev);
-int devm_drm_irq_install(struct drm_device *dev, int irq);
-#endif
diff --git a/include/drm/drm_kunit_helpers.h b/include/drm/drm_kunit_helpers.h
new file mode 100644
index 000000000000..4948379237e9
--- /dev/null
+++ b/include/drm/drm_kunit_helpers.h
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifndef DRM_KUNIT_HELPERS_H_
+#define DRM_KUNIT_HELPERS_H_
+
+#include <drm/drm_drv.h>
+
+#include <linux/device.h>
+
+#include <kunit/test.h>
+
+struct drm_connector;
+struct drm_crtc_funcs;
+struct drm_crtc_helper_funcs;
+struct drm_device;
+struct drm_plane_funcs;
+struct drm_plane_helper_funcs;
+struct kunit;
+
+struct device *drm_kunit_helper_alloc_device(struct kunit *test);
+void drm_kunit_helper_free_device(struct kunit *test, struct device *dev);
+
+struct drm_device *
+__drm_kunit_helper_alloc_drm_device_with_driver(struct kunit *test,
+ struct device *dev,
+ size_t size, size_t offset,
+ const struct drm_driver *driver);
+
+/**
+ * drm_kunit_helper_alloc_drm_device_with_driver - Allocates a mock DRM device for KUnit tests
+ * @_test: The test context object
+ * @_dev: The parent device object
+ * @_type: the type of the struct which contains struct &drm_device
+ * @_member: the name of the &drm_device within @_type.
+ * @_drv: Mocked DRM device driver features
+ *
+ * This function creates a struct &drm_device from @_dev and @_drv.
+ *
+ * @_dev should be allocated using drm_kunit_helper_alloc_device().
+ *
+ * The driver is tied to the @_test context and will get cleaned at the
+ * end of the test. The drm_device is allocated through
+ * devm_drm_dev_alloc() and will thus be freed through a device-managed
+ * resource.
+ *
+ * Returns:
+ * A pointer to the new drm_device, or an ERR_PTR() otherwise.
+ */
+#define drm_kunit_helper_alloc_drm_device_with_driver(_test, _dev, _type, _member, _drv) \
+ ((_type *)__drm_kunit_helper_alloc_drm_device_with_driver(_test, _dev, \
+ sizeof(_type), \
+ offsetof(_type, _member), \
+ _drv))
+
+static inline struct drm_device *
+__drm_kunit_helper_alloc_drm_device(struct kunit *test,
+ struct device *dev,
+ size_t size, size_t offset,
+ u32 features)
+{
+ struct drm_driver *driver;
+
+ driver = devm_kzalloc(dev, sizeof(*driver), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, driver);
+
+ driver->driver_features = features;
+
+ return __drm_kunit_helper_alloc_drm_device_with_driver(test, dev,
+ size, offset,
+ driver);
+}
+
+/**
+ * drm_kunit_helper_alloc_drm_device - Allocates a mock DRM device for KUnit tests
+ * @_test: The test context object
+ * @_dev: The parent device object
+ * @_type: the type of the struct which contains struct &drm_device
+ * @_member: the name of the &drm_device within @_type.
+ * @_feat: Mocked DRM device driver features
+ *
+ * This function creates a struct &drm_driver and will create a struct
+ * &drm_device from @_dev and that driver.
+ *
+ * @_dev should be allocated using drm_kunit_helper_alloc_device().
+ *
+ * The driver is tied to the @_test context and will get cleaned at the
+ * end of the test. The drm_device is allocated through
+ * devm_drm_dev_alloc() and will thus be freed through a device-managed
+ * resource.
+ *
+ * Returns:
+ * A pointer to the new drm_device, or an ERR_PTR() otherwise.
+ */
+#define drm_kunit_helper_alloc_drm_device(_test, _dev, _type, _member, _feat) \
+ ((_type *)__drm_kunit_helper_alloc_drm_device(_test, _dev, \
+ sizeof(_type), \
+ offsetof(_type, _member), \
+ _feat))
+
+struct drm_atomic_state *
+drm_kunit_helper_atomic_state_alloc(struct kunit *test,
+ struct drm_device *drm,
+ struct drm_modeset_acquire_ctx *ctx);
+
+struct drm_plane *
+drm_kunit_helper_create_primary_plane(struct kunit *test,
+ struct drm_device *drm,
+ const struct drm_plane_funcs *funcs,
+ const struct drm_plane_helper_funcs *helper_funcs,
+ const uint32_t *formats,
+ unsigned int num_formats,
+ const uint64_t *modifiers);
+
+struct drm_crtc *
+drm_kunit_helper_create_crtc(struct kunit *test,
+ struct drm_device *drm,
+ struct drm_plane *primary,
+ struct drm_plane *cursor,
+ const struct drm_crtc_funcs *funcs,
+ const struct drm_crtc_helper_funcs *helper_funcs);
+
+int drm_kunit_helper_enable_crtc_connector(struct kunit *test,
+ struct drm_device *drm,
+ struct drm_crtc *crtc,
+ struct drm_connector *connector,
+ const struct drm_display_mode *mode,
+ struct drm_modeset_acquire_ctx *ctx);
+
+int drm_kunit_add_mode_destroy_action(struct kunit *test,
+ struct drm_display_mode *mode);
+
+struct drm_display_mode *
+drm_kunit_display_mode_from_cea_vic(struct kunit *test, struct drm_device *dev,
+ u8 video_code);
+
+#endif // DRM_KUNIT_HELPERS_H_
diff --git a/include/drm/drm_lease.h b/include/drm/drm_lease.h
index 5c9ef6a2aeae..53545b4ca9ef 100644
--- a/include/drm/drm_lease.h
+++ b/include/drm/drm_lease.h
@@ -6,6 +6,8 @@
#ifndef _DRM_LEASE_H_
#define _DRM_LEASE_H_
+#include <linux/types.h>
+
struct drm_file;
struct drm_device;
struct drm_master;
diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h
deleted file mode 100644
index 8ed04e9be997..000000000000
--- a/include/drm/drm_legacy.h
+++ /dev/null
@@ -1,237 +0,0 @@
-#ifndef __DRM_DRM_LEGACY_H__
-#define __DRM_DRM_LEGACY_H__
-/*
- * Legacy driver interfaces for the Direct Rendering Manager
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright (c) 2009-2010, Code Aurora Forum.
- * All rights reserved.
- * Copyright © 2014 Intel Corporation
- * Daniel Vetter <daniel.vetter@ffwll.ch>
- *
- * Author: Rickard E. (Rik) Faith <faith@valinux.com>
- * Author: Gareth Hughes <gareth@valinux.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <drm/drm.h>
-#include <drm/drm_auth.h>
-#include <drm/drm_hashtab.h>
-
-struct drm_device;
-struct drm_driver;
-struct file;
-struct pci_driver;
-
-/*
- * Legacy Support for palateontologic DRM drivers
- *
- * If you add a new driver and it uses any of these functions or structures,
- * you're doing it terribly wrong.
- */
-
-/**
- * DMA buffer.
- */
-struct drm_buf {
- int idx; /**< Index into master buflist */
- int total; /**< Buffer size */
- int order; /**< log-base-2(total) */
- int used; /**< Amount of buffer in use (for DMA) */
- unsigned long offset; /**< Byte offset (used internally) */
- void *address; /**< Address of buffer */
- unsigned long bus_address; /**< Bus address of buffer */
- struct drm_buf *next; /**< Kernel-only: used for free list */
- __volatile__ int waiting; /**< On kernel DMA queue */
- __volatile__ int pending; /**< On hardware DMA queue */
- struct drm_file *file_priv; /**< Private of holding file descr */
- int context; /**< Kernel queue for this buffer */
- int while_locked; /**< Dispatch this buffer while locked */
- enum {
- DRM_LIST_NONE = 0,
- DRM_LIST_FREE = 1,
- DRM_LIST_WAIT = 2,
- DRM_LIST_PEND = 3,
- DRM_LIST_PRIO = 4,
- DRM_LIST_RECLAIM = 5
- } list; /**< Which list we're on */
-
- int dev_priv_size; /**< Size of buffer private storage */
- void *dev_private; /**< Per-buffer private storage */
-};
-
-typedef struct drm_dma_handle {
- dma_addr_t busaddr;
- void *vaddr;
- size_t size;
-} drm_dma_handle_t;
-
-/**
- * Buffer entry. There is one of this for each buffer size order.
- */
-struct drm_buf_entry {
- int buf_size; /**< size */
- int buf_count; /**< number of buffers */
- struct drm_buf *buflist; /**< buffer list */
- int seg_count;
- int page_order;
- struct drm_dma_handle **seglist;
-
- int low_mark; /**< Low water mark */
- int high_mark; /**< High water mark */
-};
-
-/**
- * DMA data.
- */
-struct drm_device_dma {
-
- struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */
- int buf_count; /**< total number of buffers */
- struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */
- int seg_count;
- int page_count; /**< number of pages */
- unsigned long *pagelist; /**< page list */
- unsigned long byte_count;
- enum {
- _DRM_DMA_USE_AGP = 0x01,
- _DRM_DMA_USE_SG = 0x02,
- _DRM_DMA_USE_FB = 0x04,
- _DRM_DMA_USE_PCI_RO = 0x08
- } flags;
-
-};
-
-/**
- * Scatter-gather memory.
- */
-struct drm_sg_mem {
- unsigned long handle;
- void *virtual;
- int pages;
- struct page **pagelist;
- dma_addr_t *busaddr;
-};
-
-/**
- * Kernel side of a mapping
- */
-struct drm_local_map {
- dma_addr_t offset; /**< Requested physical address (0 for SAREA)*/
- unsigned long size; /**< Requested physical size (bytes) */
- enum drm_map_type type; /**< Type of memory to map */
- enum drm_map_flags flags; /**< Flags */
- void *handle; /**< User-space: "Handle" to pass to mmap() */
- /**< Kernel-space: kernel-virtual address */
- int mtrr; /**< MTRR slot used */
-};
-
-typedef struct drm_local_map drm_local_map_t;
-
-/**
- * Mappings list
- */
-struct drm_map_list {
- struct list_head head; /**< list head */
- struct drm_hash_item hash;
- struct drm_local_map *map; /**< mapping */
- uint64_t user_token;
- struct drm_master *master;
-};
-
-int drm_legacy_addmap(struct drm_device *d, resource_size_t offset,
- unsigned int size, enum drm_map_type type,
- enum drm_map_flags flags, struct drm_local_map **map_p);
-struct drm_local_map *drm_legacy_findmap(struct drm_device *dev, unsigned int token);
-void drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map);
-int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map);
-struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev);
-int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma);
-
-int drm_legacy_addbufs_agp(struct drm_device *d, struct drm_buf_desc *req);
-int drm_legacy_addbufs_pci(struct drm_device *d, struct drm_buf_desc *req);
-
-/**
- * Test that the hardware lock is held by the caller, returning otherwise.
- *
- * \param dev DRM device.
- * \param filp file pointer of the caller.
- */
-#define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \
-do { \
- if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \
- _file_priv->master->lock.file_priv != _file_priv) { \
- DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\
- __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\
- _file_priv->master->lock.file_priv, _file_priv); \
- return -EINVAL; \
- } \
-} while (0)
-
-void drm_legacy_idlelock_take(struct drm_lock_data *lock);
-void drm_legacy_idlelock_release(struct drm_lock_data *lock);
-
-/* drm_pci.c */
-
-#ifdef CONFIG_PCI
-
-struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size,
- size_t align);
-void drm_pci_free(struct drm_device *dev, struct drm_dma_handle *dmah);
-
-int drm_legacy_pci_init(const struct drm_driver *driver,
- struct pci_driver *pdriver);
-void drm_legacy_pci_exit(const struct drm_driver *driver,
- struct pci_driver *pdriver);
-
-#else
-
-static inline struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev,
- size_t size, size_t align)
-{
- return NULL;
-}
-
-static inline void drm_pci_free(struct drm_device *dev,
- struct drm_dma_handle *dmah)
-{
-}
-
-static inline int drm_legacy_pci_init(const struct drm_driver *driver,
- struct pci_driver *pdriver)
-{
- return -EINVAL;
-}
-
-static inline void drm_legacy_pci_exit(const struct drm_driver *driver,
- struct pci_driver *pdriver)
-{
-}
-
-#endif
-
-/* drm_memory.c */
-void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev);
-void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
-void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev);
-
-#endif /* __DRM_DRM_LEGACY_H__ */
diff --git a/include/drm/drm_managed.h b/include/drm/drm_managed.h
index b45c6fbf53ac..72bfac002c06 100644
--- a/include/drm/drm_managed.h
+++ b/include/drm/drm_managed.h
@@ -8,6 +8,7 @@
#include <linux/types.h>
struct drm_device;
+struct mutex;
typedef void (*drmres_release_t)(struct drm_device *dev, void *res);
@@ -44,6 +45,10 @@ int __must_check __drmm_add_action_or_reset(struct drm_device *dev,
drmres_release_t action,
void *data, const char *name);
+void drmm_release_action(struct drm_device *dev,
+ drmres_release_t action,
+ void *data);
+
void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp) __malloc;
/**
@@ -104,4 +109,45 @@ char *drmm_kstrdup(struct drm_device *dev, const char *s, gfp_t gfp);
void drmm_kfree(struct drm_device *dev, void *data);
+void __drmm_mutex_release(struct drm_device *dev, void *res);
+
+/**
+ * drmm_mutex_init - &drm_device-managed mutex_init()
+ * @dev: DRM device
+ * @lock: lock to be initialized
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise.
+ *
+ * This is a &drm_device-managed version of mutex_init(). The initialized
+ * lock is automatically destroyed on the final drm_dev_put().
+ */
+#define drmm_mutex_init(dev, lock) ({ \
+ mutex_init(lock); \
+ drmm_add_action_or_reset(dev, __drmm_mutex_release, lock); \
+}) \
+
+void __drmm_workqueue_release(struct drm_device *device, void *wq);
+
+/**
+ * drmm_alloc_ordered_workqueue - &drm_device managed alloc_ordered_workqueue()
+ * @dev: DRM device
+ * @fmt: printf format for the name of the workqueue
+ * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
+ * @args: args for @fmt
+ *
+ * This is a &drm_device-managed version of alloc_ordered_workqueue(). The
+ * allocated workqueue is automatically destroyed on the final drm_dev_put().
+ *
+ * Returns: workqueue on success, negative ERR_PTR otherwise.
+ */
+#define drmm_alloc_ordered_workqueue(dev, fmt, flags, args...) \
+ ({ \
+ struct workqueue_struct *wq = alloc_ordered_workqueue(fmt, flags, ##args); \
+ wq ? ({ \
+ int ret = drmm_add_action_or_reset(dev, __drmm_workqueue_release, wq); \
+ ret ? ERR_PTR(ret) : wq; \
+ }) : ERR_PTR(-ENOMEM); \
+ })
+
#endif
diff --git a/include/drm/drm_mipi_dbi.h b/include/drm/drm_mipi_dbi.h
index f543d6e3e822..f45f9612c0bc 100644
--- a/include/drm/drm_mipi_dbi.h
+++ b/include/drm/drm_mipi_dbi.h
@@ -12,10 +12,12 @@
#include <drm/drm_device.h>
#include <drm/drm_simple_kms_helper.h>
+struct drm_format_conv_state;
struct drm_rect;
-struct spi_device;
struct gpio_desc;
+struct iosys_map;
struct regulator;
+struct spi_device;
/**
* struct mipi_dbi - MIPI DBI interface
@@ -55,6 +57,11 @@ struct mipi_dbi {
struct spi_device *spi;
/**
+ * @write_memory_bpw: Bits per word used on a MIPI_DCS_WRITE_MEMORY_START transfer
+ */
+ unsigned int write_memory_bpw;
+
+ /**
* @dc: Optional D/C gpio.
*/
struct gpio_desc *dc;
@@ -95,6 +102,11 @@ struct mipi_dbi_dev {
struct drm_display_mode mode;
/**
+ * @pixel_format: Native pixel format (DRM_FORMAT\_\*)
+ */
+ u32 pixel_format;
+
+ /**
* @tx_buf: Buffer used for transfer (copy clip rect area)
*/
u16 *tx_buf;
@@ -122,14 +134,27 @@ struct mipi_dbi_dev {
struct backlight_device *backlight;
/**
- * @regulator: power regulator (optional)
+ * @regulator: power regulator (Vdd) (optional)
*/
struct regulator *regulator;
/**
+ * @io_regulator: I/O power regulator (Vddi) (optional)
+ */
+ struct regulator *io_regulator;
+
+ /**
* @dbi: MIPI DBI interface
*/
struct mipi_dbi dbi;
+
+ /**
+ * @driver_private: Driver private data.
+ * Necessary for drivers with private data since devm_drm_dev_alloc()
+ * can't allocate structures that embed a structure which then again
+ * embeds drm_device.
+ */
+ void *driver_private;
};
static inline struct mipi_dbi_dev *drm_to_mipi_dbi_dev(struct drm_device *drm)
@@ -147,12 +172,23 @@ int mipi_dbi_dev_init_with_formats(struct mipi_dbi_dev *dbidev,
int mipi_dbi_dev_init(struct mipi_dbi_dev *dbidev,
const struct drm_simple_display_pipe_funcs *funcs,
const struct drm_display_mode *mode, unsigned int rotation);
+enum drm_mode_status mipi_dbi_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
+ const struct drm_display_mode *mode);
void mipi_dbi_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state);
void mipi_dbi_enable_flush(struct mipi_dbi_dev *dbidev,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plan_state);
void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe);
+int mipi_dbi_pipe_begin_fb_access(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+void mipi_dbi_pipe_end_fb_access(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+void mipi_dbi_pipe_reset_plane(struct drm_simple_display_pipe *pipe);
+struct drm_plane_state *mipi_dbi_pipe_duplicate_plane_state(struct drm_simple_display_pipe *pipe);
+void mipi_dbi_pipe_destroy_plane_state(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+
void mipi_dbi_hw_reset(struct mipi_dbi *dbi);
bool mipi_dbi_display_is_on(struct mipi_dbi *dbi);
int mipi_dbi_poweron_reset(struct mipi_dbi_dev *dbidev);
@@ -166,8 +202,10 @@ int mipi_dbi_command_read(struct mipi_dbi *dbi, u8 cmd, u8 *val);
int mipi_dbi_command_buf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len);
int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, const u8 *data,
size_t len);
-int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
- struct drm_rect *clip, bool swap);
+int mipi_dbi_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer *fb,
+ struct drm_rect *clip, bool swap,
+ struct drm_format_conv_state *fmtcnv_state);
+
/**
* mipi_dbi_command - MIPI DCS command with optional parameter(s)
* @dbi: MIPI DBI structure
@@ -183,13 +221,39 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
#define mipi_dbi_command(dbi, cmd, seq...) \
({ \
const u8 d[] = { seq }; \
- mipi_dbi_command_stackbuf(dbi, cmd, d, ARRAY_SIZE(d)); \
+ struct device *dev = &(dbi)->spi->dev; \
+ int ret; \
+ ret = mipi_dbi_command_stackbuf(dbi, cmd, d, ARRAY_SIZE(d)); \
+ if (ret) \
+ dev_err_ratelimited(dev, "error %d when sending command %#02x\n", ret, cmd); \
+ ret; \
})
#ifdef CONFIG_DEBUG_FS
void mipi_dbi_debugfs_init(struct drm_minor *minor);
#else
-#define mipi_dbi_debugfs_init NULL
+static inline void mipi_dbi_debugfs_init(struct drm_minor *minor) {}
#endif
+/**
+ * DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS - Initializes struct drm_simple_display_pipe_funcs
+ * for MIPI-DBI devices
+ * @enable_: Enable-callback implementation
+ *
+ * This macro initializes struct drm_simple_display_pipe_funcs with default
+ * values for MIPI-DBI-based devices. The only callback that depends on the
+ * hardware is @enable, for which the driver has to provide an implementation.
+ * MIPI-based drivers are encouraged to use this macro for initialization.
+ */
+#define DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS(enable_) \
+ .mode_valid = mipi_dbi_pipe_mode_valid, \
+ .enable = (enable_), \
+ .disable = mipi_dbi_pipe_disable, \
+ .update = mipi_dbi_pipe_update, \
+ .begin_fb_access = mipi_dbi_pipe_begin_fb_access, \
+ .end_fb_access = mipi_dbi_pipe_end_fb_access, \
+ .reset_plane = mipi_dbi_pipe_reset_plane, \
+ .duplicate_plane_state = mipi_dbi_pipe_duplicate_plane_state, \
+ .destroy_plane_state = mipi_dbi_pipe_destroy_plane_state
+
#endif /* __LINUX_MIPI_DBI_H */
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 360e6377e84b..3aba7b380c8d 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -10,6 +10,7 @@
#define __DRM_MIPI_DSI_H__
#include <linux/device.h>
+#include <linux/delay.h>
struct mipi_dsi_host;
struct mipi_dsi_device;
@@ -80,6 +81,11 @@ int mipi_dsi_create_packet(struct mipi_dsi_packet *packet,
* Note that typically DSI packet transmission is atomic, so the .transfer()
* function will seldomly return anything other than the number of bytes
* contained in the transmit buffer on success.
+ *
+ * Also note that those callbacks can be called no matter the state the
+ * host is in. Drivers that need the underlying device to be powered to
+ * perform these operations will first need to make sure it's been
+ * properly enabled.
*/
struct mipi_dsi_host_ops {
int (*attach)(struct mipi_dsi_host *host,
@@ -119,19 +125,19 @@ struct mipi_dsi_host *of_find_mipi_dsi_host_by_node(struct device_node *node);
/* enable hsync-end packets in vsync-pulse and v-porch area */
#define MIPI_DSI_MODE_VIDEO_HSE BIT(4)
/* disable hfront-porch area */
-#define MIPI_DSI_MODE_VIDEO_HFP BIT(5)
+#define MIPI_DSI_MODE_VIDEO_NO_HFP BIT(5)
/* disable hback-porch area */
-#define MIPI_DSI_MODE_VIDEO_HBP BIT(6)
+#define MIPI_DSI_MODE_VIDEO_NO_HBP BIT(6)
/* disable hsync-active area */
-#define MIPI_DSI_MODE_VIDEO_HSA BIT(7)
-/* flush display FIFO on vsync pulse */
-#define MIPI_DSI_MODE_VSYNC_FLUSH BIT(8)
+#define MIPI_DSI_MODE_VIDEO_NO_HSA BIT(7)
/* disable EoT packets in HS mode */
-#define MIPI_DSI_MODE_EOT_PACKET BIT(9)
+#define MIPI_DSI_MODE_NO_EOT_PACKET BIT(9)
/* device supports non-continuous clock behavior (DSI spec 5.6.1) */
#define MIPI_DSI_CLOCK_NON_CONTINUOUS BIT(10)
/* transmit data in low power */
#define MIPI_DSI_MODE_LPM BIT(11)
+/* transmit data ending at the same time for all lanes within one hsync */
+#define MIPI_DSI_HS_PKT_END_ALIGNED BIT(12)
enum mipi_dsi_pixel_format {
MIPI_DSI_FMT_RGB888,
@@ -161,6 +167,7 @@ struct mipi_dsi_device_info {
* struct mipi_dsi_device - DSI peripheral device
* @host: DSI host for this peripheral
* @dev: driver model device node for this peripheral
+ * @attached: the DSI device has been successfully attached
* @name: DSI peripheral chip type
* @channel: virtual channel assigned to the peripheral
* @format: pixel format for video mode
@@ -172,10 +179,12 @@ struct mipi_dsi_device_info {
* @lp_rate: maximum lane frequency for low power mode in hertz, this should
* be set to the real limits of the hardware, zero is only accepted for
* legacy drivers
+ * @dsc: panel/bridge DSC pps payload to be sent
*/
struct mipi_dsi_device {
struct mipi_dsi_host *host;
struct device dev;
+ bool attached;
char name[DSI_DEV_NAME_SIZE];
unsigned int channel;
@@ -184,14 +193,36 @@ struct mipi_dsi_device {
unsigned long mode_flags;
unsigned long hs_rate;
unsigned long lp_rate;
+ struct drm_dsc_config *dsc;
+};
+
+/**
+ * struct mipi_dsi_multi_context - Context to call multiple MIPI DSI funcs in a row
+ */
+struct mipi_dsi_multi_context {
+ /**
+ * @dsi: Pointer to the MIPI DSI device
+ */
+ struct mipi_dsi_device *dsi;
+
+ /**
+ * @accum_err: Storage for the accumulated error over the multiple calls
+ *
+ * Init to 0. If a function encounters an error then the error code
+ * will be stored here. If you call a function and this points to a
+ * non-zero value then the function will be a noop. This allows calling
+ * a function many times in a row and just checking the error at the
+ * end to see if any of them failed.
+ */
+ int accum_err;
};
#define MIPI_DSI_MODULE_PREFIX "mipi-dsi:"
-static inline struct mipi_dsi_device *to_mipi_dsi_device(struct device *dev)
-{
- return container_of(dev, struct mipi_dsi_device, dev);
-}
+#define to_mipi_dsi_device(__dev) container_of_const(__dev, struct mipi_dsi_device, dev)
+
+extern const struct bus_type mipi_dsi_bus_type;
+#define dev_is_mipi_dsi(dev) ((dev)->bus == &mipi_dsi_bus_type)
/**
* mipi_dsi_pixel_format_to_bpp - obtain the number of bits per pixel for any
@@ -218,25 +249,66 @@ static inline int mipi_dsi_pixel_format_to_bpp(enum mipi_dsi_pixel_format fmt)
return -EINVAL;
}
+enum mipi_dsi_compression_algo {
+ MIPI_DSI_COMPRESSION_DSC = 0,
+ MIPI_DSI_COMPRESSION_VENDOR = 3,
+ /* other two values are reserved, DSI 1.3 */
+};
+
struct mipi_dsi_device *
mipi_dsi_device_register_full(struct mipi_dsi_host *host,
const struct mipi_dsi_device_info *info);
void mipi_dsi_device_unregister(struct mipi_dsi_device *dsi);
+struct mipi_dsi_device *
+devm_mipi_dsi_device_register_full(struct device *dev, struct mipi_dsi_host *host,
+ const struct mipi_dsi_device_info *info);
struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np);
int mipi_dsi_attach(struct mipi_dsi_device *dsi);
int mipi_dsi_detach(struct mipi_dsi_device *dsi);
+int devm_mipi_dsi_attach(struct device *dev, struct mipi_dsi_device *dsi);
int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi);
int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi);
int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
u16 value);
-ssize_t mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable);
-ssize_t mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
- const struct drm_dsc_picture_parameter_set *pps);
+int mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable);
+int mipi_dsi_compression_mode_ext(struct mipi_dsi_device *dsi, bool enable,
+ enum mipi_dsi_compression_algo algo,
+ unsigned int pps_selector);
+int mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
+ const struct drm_dsc_picture_parameter_set *pps);
+
+void mipi_dsi_compression_mode_ext_multi(struct mipi_dsi_multi_context *ctx,
+ bool enable,
+ enum mipi_dsi_compression_algo algo,
+ unsigned int pps_selector);
+void mipi_dsi_compression_mode_multi(struct mipi_dsi_multi_context *ctx,
+ bool enable);
+void mipi_dsi_picture_parameter_set_multi(struct mipi_dsi_multi_context *ctx,
+ const struct drm_dsc_picture_parameter_set *pps);
ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload,
size_t size);
+void mipi_dsi_generic_write_multi(struct mipi_dsi_multi_context *ctx,
+ const void *payload, size_t size);
+void mipi_dsi_dual_generic_write_multi(struct mipi_dsi_multi_context *ctx,
+ struct mipi_dsi_device *dsi1,
+ struct mipi_dsi_device *dsi2,
+ const void *payload, size_t size);
ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params,
size_t num_params, void *data, size_t size);
+u32 drm_mipi_dsi_get_input_bus_fmt(enum mipi_dsi_pixel_format dsi_format);
+
+#define mipi_dsi_msleep(ctx, delay) \
+ do { \
+ if (!(ctx)->accum_err) \
+ msleep(delay); \
+ } while (0)
+
+#define mipi_dsi_usleep_range(ctx, min, max) \
+ do { \
+ if (!(ctx)->accum_err) \
+ usleep_range(min, max); \
+ } while (0)
/**
* enum mipi_dsi_dcs_tear_mode - Tearing Effect Output Line mode
@@ -258,10 +330,20 @@ enum mipi_dsi_dcs_tear_mode {
ssize_t mipi_dsi_dcs_write_buffer(struct mipi_dsi_device *dsi,
const void *data, size_t len);
+int mipi_dsi_dcs_write_buffer_chatty(struct mipi_dsi_device *dsi,
+ const void *data, size_t len);
+void mipi_dsi_dcs_write_buffer_multi(struct mipi_dsi_multi_context *ctx,
+ const void *data, size_t len);
+void mipi_dsi_dual_dcs_write_buffer_multi(struct mipi_dsi_multi_context *ctx,
+ struct mipi_dsi_device *dsi1,
+ struct mipi_dsi_device *dsi2,
+ const void *data, size_t len);
ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, u8 cmd,
const void *data, size_t len);
ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
size_t len);
+void mipi_dsi_dcs_read_multi(struct mipi_dsi_multi_context *ctx, u8 cmd,
+ void *data, size_t len);
int mipi_dsi_dcs_nop(struct mipi_dsi_device *dsi);
int mipi_dsi_dcs_soft_reset(struct mipi_dsi_device *dsi);
int mipi_dsi_dcs_get_power_mode(struct mipi_dsi_device *dsi, u8 *mode);
@@ -274,7 +356,6 @@ int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start,
u16 end);
int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
u16 end);
-int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi);
int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
enum mipi_dsi_dcs_tear_mode mode);
int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format);
@@ -283,6 +364,182 @@ int mipi_dsi_dcs_set_display_brightness(struct mipi_dsi_device *dsi,
u16 brightness);
int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi,
u16 *brightness);
+int mipi_dsi_dcs_set_display_brightness_large(struct mipi_dsi_device *dsi,
+ u16 brightness);
+int mipi_dsi_dcs_get_display_brightness_large(struct mipi_dsi_device *dsi,
+ u16 *brightness);
+
+void mipi_dsi_dcs_nop_multi(struct mipi_dsi_multi_context *ctx);
+void mipi_dsi_dcs_enter_sleep_mode_multi(struct mipi_dsi_multi_context *ctx);
+void mipi_dsi_dcs_exit_sleep_mode_multi(struct mipi_dsi_multi_context *ctx);
+void mipi_dsi_dcs_set_display_off_multi(struct mipi_dsi_multi_context *ctx);
+void mipi_dsi_dcs_set_display_on_multi(struct mipi_dsi_multi_context *ctx);
+void mipi_dsi_dcs_set_tear_on_multi(struct mipi_dsi_multi_context *ctx,
+ enum mipi_dsi_dcs_tear_mode mode);
+void mipi_dsi_turn_on_peripheral_multi(struct mipi_dsi_multi_context *ctx);
+void mipi_dsi_dcs_soft_reset_multi(struct mipi_dsi_multi_context *ctx);
+void mipi_dsi_dcs_set_display_brightness_multi(struct mipi_dsi_multi_context *ctx,
+ u16 brightness);
+void mipi_dsi_dcs_set_pixel_format_multi(struct mipi_dsi_multi_context *ctx,
+ u8 format);
+void mipi_dsi_dcs_set_column_address_multi(struct mipi_dsi_multi_context *ctx,
+ u16 start, u16 end);
+void mipi_dsi_dcs_set_page_address_multi(struct mipi_dsi_multi_context *ctx,
+ u16 start, u16 end);
+void mipi_dsi_dcs_set_tear_scanline_multi(struct mipi_dsi_multi_context *ctx,
+ u16 scanline);
+void mipi_dsi_dcs_set_tear_off_multi(struct mipi_dsi_multi_context *ctx);
+
+/**
+ * mipi_dsi_generic_write_seq_multi - transmit data using a generic write packet
+ *
+ * This macro will print errors for you and error handling is optimized for
+ * callers that call this multiple times in a row.
+ *
+ * @ctx: Context for multiple DSI transactions
+ * @seq: buffer containing the payload
+ */
+#define mipi_dsi_generic_write_seq_multi(ctx, seq...) \
+ do { \
+ static const u8 d[] = { seq }; \
+ mipi_dsi_generic_write_multi(ctx, d, ARRAY_SIZE(d)); \
+ } while (0)
+
+/**
+ * mipi_dsi_generic_write_var_seq_multi - transmit non-constant data using a
+ * generic write packet
+ *
+ * This macro will print errors for you and error handling is optimized for
+ * callers that call this multiple times in a row.
+ *
+ * @ctx: Context for multiple DSI transactions
+ * @seq: buffer containing the payload
+ */
+#define mipi_dsi_generic_write_var_seq_multi(ctx, seq...) \
+ do { \
+ const u8 d[] = { seq }; \
+ mipi_dsi_generic_write_multi(ctx, d, ARRAY_SIZE(d)); \
+ } while (0)
+
+/**
+ * mipi_dsi_dcs_write_seq_multi - transmit a DCS command with payload
+ *
+ * This macro will print errors for you and error handling is optimized for
+ * callers that call this multiple times in a row.
+ *
+ * @ctx: Context for multiple DSI transactions
+ * @cmd: Command
+ * @seq: buffer containing data to be transmitted
+ */
+#define mipi_dsi_dcs_write_seq_multi(ctx, cmd, seq...) \
+ do { \
+ static const u8 d[] = { cmd, seq }; \
+ mipi_dsi_dcs_write_buffer_multi(ctx, d, ARRAY_SIZE(d)); \
+ } while (0)
+
+/**
+ * mipi_dsi_dcs_write_var_seq_multi - transmit a DCS command with non-constant
+ * payload
+ *
+ * This macro will print errors for you and error handling is optimized for
+ * callers that call this multiple times in a row.
+ *
+ * @ctx: Context for multiple DSI transactions
+ * @cmd: Command
+ * @seq: buffer containing data to be transmitted
+ */
+#define mipi_dsi_dcs_write_var_seq_multi(ctx, cmd, seq...) \
+ do { \
+ const u8 d[] = { cmd, seq }; \
+ mipi_dsi_dcs_write_buffer_multi(ctx, d, ARRAY_SIZE(d)); \
+ } while (0)
+
+/**
+ * mipi_dsi_dual - send the same MIPI DSI command to two interfaces
+ *
+ * This macro will send the specified MIPI DSI command twice, once per each of
+ * the two interfaces supplied. This is useful for reducing duplication of code
+ * in panel drivers which use two parallel serial interfaces.
+ *
+ * Note that the _func parameter cannot accept a macro such as
+ * mipi_dsi_generic_write_multi() or mipi_dsi_dcs_write_buffer_multi(). See
+ * mipi_dsi_dual_generic_write_multi() and
+ * mipi_dsi_dual_dcs_write_buffer_multi() instead.
+ *
+ * WARNING: This macro reuses the _func argument and the optional trailing
+ * arguments twice each, which may cause unintended side effects. For example,
+ * adding the postfix increment ++ operator to one of the arguments to be
+ * passed to _func will cause the variable to be incremented twice instead of
+ * once and the variable will be its original value + 1 when sent to _dsi2.
+ *
+ * @_func: MIPI DSI function to pass context and arguments into
+ * @_ctx: Context for multiple DSI transactions
+ * @_dsi1: First DSI interface to act as recipient of the MIPI DSI command
+ * @_dsi2: Second DSI interface to act as recipient of the MIPI DSI command
+ * @...: Arguments to pass to MIPI DSI function or macro
+ */
+
+#define mipi_dsi_dual(_func, _ctx, _dsi1, _dsi2, ...) \
+ do { \
+ struct mipi_dsi_multi_context *_ctxcpy = (_ctx); \
+ _ctxcpy->dsi = (_dsi1); \
+ (_func)(_ctxcpy, ##__VA_ARGS__); \
+ _ctxcpy->dsi = (_dsi2); \
+ (_func)(_ctxcpy, ##__VA_ARGS__); \
+ } while (0)
+
+/**
+ * mipi_dsi_dual_generic_write_seq_multi - transmit data using a generic write
+ * packet to two dsi interfaces, one after the other
+ *
+ * This macro will send the specified generic packet twice, once per each of
+ * the two interfaces supplied. This is useful for reducing duplication of code
+ * in panel drivers which use two parallel serial interfaces.
+ *
+ * Note that if an error occurs while transmitting the packet to the first DSI
+ * interface, the packet will not be sent to the second DSI interface.
+ *
+ * This macro will print errors for you and error handling is optimized for
+ * callers that call this multiple times in a row.
+ *
+ * @_ctx: Context for multiple DSI transactions
+ * @_dsi1: First DSI interface to act as recipient of packet
+ * @_dsi2: Second DSI interface to act as recipient of packet
+ * @_seq: buffer containing the payload
+ */
+#define mipi_dsi_dual_generic_write_seq_multi(_ctx, _dsi1, _dsi2, _seq...) \
+ do { \
+ static const u8 d[] = { _seq }; \
+ mipi_dsi_dual_generic_write_multi(_ctx, _dsi1, _dsi2, d, \
+ ARRAY_SIZE(d)); \
+ } while (0)
+
+/**
+ * mipi_dsi_dual_dcs_write_seq_multi - transmit a DCS command with payload to
+ * two dsi interfaces, one after the other
+ *
+ * This macro will send the specified DCS command with payload twice, once per
+ * each of the two interfaces supplied. This is useful for reducing duplication
+ * of code in panel drivers which use two parallel serial interfaces.
+ *
+ * Note that if an error occurs while transmitting the payload to the first DSI
+ * interface, the payload will not be sent to the second DSI interface.
+ *
+ * This macro will print errors for you and error handling is optimized for
+ * callers that call this multiple times in a row.
+ *
+ * @_ctx: Context for multiple DSI transactions
+ * @_dsi1: First DSI interface to act as recipient of packet
+ * @_dsi2: Second DSI interface to act as recipient of packet
+ * @_cmd: Command
+ * @_seq: buffer containing the payload
+ */
+#define mipi_dsi_dual_dcs_write_seq_multi(_ctx, _dsi1, _dsi2, _cmd, _seq...) \
+ do { \
+ static const u8 d[] = { _cmd, _seq }; \
+ mipi_dsi_dual_dcs_write_buffer_multi(_ctx, _dsi1, _dsi2, d, \
+ ARRAY_SIZE(d)); \
+ } while (0)
/**
* struct mipi_dsi_driver - DSI driver
@@ -294,7 +551,7 @@ int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi,
struct mipi_dsi_driver {
struct device_driver driver;
int(*probe)(struct mipi_dsi_device *dsi);
- int(*remove)(struct mipi_dsi_device *dsi);
+ void (*remove)(struct mipi_dsi_device *dsi);
void (*shutdown)(struct mipi_dsi_device *dsi);
};
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 9b4292f229c6..16ce0e8f36a6 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -39,14 +39,16 @@
*/
#include <linux/bug.h>
#include <linux/rbtree.h>
-#include <linux/kernel.h>
+#include <linux/limits.h>
#include <linux/mm_types.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#ifdef CONFIG_DRM_DEBUG_MM
#include <linux/stackdepot.h>
#endif
-#include <drm/drm_print.h>
+#include <linux/types.h>
+
+struct drm_printer;
#ifdef CONFIG_DRM_DEBUG_MM
#define DRM_MM_BUG_ON(expr) BUG_ON(expr)
@@ -461,7 +463,6 @@ static inline int drm_mm_insert_node(struct drm_mm *mm,
}
void drm_mm_remove_node(struct drm_mm_node *node);
-void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
void drm_mm_init(struct drm_mm *mm, u64 start, u64 size);
void drm_mm_takedown(struct drm_mm *mm);
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index ab424ddd7665..895fb820dba0 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -82,6 +82,7 @@ struct drm_mode_config_funcs {
*/
struct drm_framebuffer *(*fb_create)(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd);
/**
@@ -95,24 +96,7 @@ struct drm_mode_config_funcs {
* The format information specific to the given fb metadata, or
* NULL if none is found.
*/
- const struct drm_format_info *(*get_format_info)(const struct drm_mode_fb_cmd2 *mode_cmd);
-
- /**
- * @output_poll_changed:
- *
- * Callback used by helpers to inform the driver of output configuration
- * changes.
- *
- * Drivers implementing fbdev emulation with the helpers can call
- * drm_fb_helper_hotplug_changed from this hook to inform the fbdev
- * helper of output changes.
- *
- * FIXME:
- *
- * Except that there's no vtable for device-level helper callbacks
- * there's no reason this is a core function.
- */
- void (*output_poll_changed)(struct drm_device *dev);
+ const struct drm_format_info *(*get_format_info)(u32 pixel_format, u64 modifier);
/**
* @mode_valid:
@@ -346,7 +330,6 @@ struct drm_mode_config_funcs {
* @max_width: maximum fb pixel width on this device
* @max_height: maximum fb pixel height on this device
* @funcs: core driver provided mode setting functions
- * @fb_base: base address of the framebuffer
* @poll_enabled: track polling support for this device
* @poll_running: track polling status for this device
* @delayed_event: track delayed poll uevent deliver for this device
@@ -360,6 +343,19 @@ struct drm_mode_config_funcs {
* Core mode resource tracking structure. All CRTC, encoders, and connectors
* enumerated by the driver are added here, as are global properties. Some
* global restrictions are also here, e.g. dimension restrictions.
+ *
+ * Framebuffer sizes refer to the virtual screen that can be displayed by
+ * the CRTC. This can be different from the physical resolution programmed.
+ * The minimum width and height, stored in @min_width and @min_height,
+ * describe the smallest size of the framebuffer. It correlates to the
+ * minimum programmable resolution.
+ * The maximum width, stored in @max_width, is typically limited by the
+ * maximum pitch between two adjacent scanlines. The maximum height, stored
+ * in @max_height, is usually only limited by the amount of addressable video
+ * memory. For hardware that has no real maximum, drivers should pick a
+ * reasonable default.
+ *
+ * See also @DRM_SHADOW_PLANE_MAX_WIDTH and @DRM_SHADOW_PLANE_MAX_HEIGHT.
*/
struct drm_mode_config {
/**
@@ -495,6 +491,34 @@ struct drm_mode_config {
struct list_head plane_list;
/**
+ * @panic_lock:
+ *
+ * Raw spinlock used to protect critical sections of code that access
+ * the display hardware or modeset software state, which the panic
+ * printing code must be protected against. See drm_panic_trylock(),
+ * drm_panic_lock() and drm_panic_unlock().
+ */
+ struct raw_spinlock panic_lock;
+
+ /**
+ * @num_colorop:
+ *
+ * Number of colorop objects on this device.
+ * This is invariant over the lifetime of a device and hence doesn't
+ * need any locks.
+ */
+ int num_colorop;
+
+ /**
+ * @colorop_list:
+ *
+ * List of colorop objects linked with &drm_colorop.head. This is
+ * invariant over the lifetime of a device and hence doesn't need any
+ * locks.
+ */
+ struct list_head colorop_list;
+
+ /**
* @num_crtc:
*
* Number of CRTCs on this device linked with &drm_crtc.head. This is invariant over the lifetime
@@ -527,10 +551,9 @@ struct drm_mode_config {
*/
struct list_head privobj_list;
- int min_width, min_height;
- int max_width, max_height;
+ unsigned int min_width, min_height;
+ unsigned int max_width, max_height;
const struct drm_mode_config_funcs *funcs;
- resource_size_t fb_base;
/* output poll support */
bool poll_enabled;
@@ -702,11 +725,21 @@ struct drm_mode_config {
* between different TV connector types.
*/
struct drm_property *tv_select_subconnector_property;
+
/**
- * @tv_mode_property: Optional TV property to select
+ * @legacy_tv_mode_property: Optional TV property to select
* the output TV mode.
+ *
+ * Superseded by @tv_mode_property
+ */
+ struct drm_property *legacy_tv_mode_property;
+
+ /**
+ * @tv_mode_property: Optional TV property to select the TV
+ * standard output on the connector.
*/
struct drm_property *tv_mode_property;
+
/**
* @tv_left_margin_property: Optional TV property to set the left
* margin (expressed in pixels).
@@ -871,13 +904,6 @@ struct drm_mode_config {
uint32_t preferred_depth, prefer_shadow;
/**
- * @prefer_shadow_fbdev:
- *
- * Hint to framebuffer emulation to prefer shadow-fb rendering.
- */
- bool prefer_shadow_fbdev;
-
- /**
* @quirk_addfb_prefer_xbgr_30bpp:
*
* Special hack for legacy ADDFB to keep nouveau userspace happy. Should
@@ -906,18 +932,14 @@ struct drm_mode_config {
bool async_page_flip;
/**
- * @allow_fb_modifiers:
- *
- * Whether the driver supports fb modifiers in the ADDFB2.1 ioctl call.
+ * @fb_modifiers_not_supported:
*
- * IMPORTANT:
- *
- * If this is set the driver must fill out the full implicit modifier
- * information in their &drm_mode_config_funcs.fb_create hook for legacy
- * userspace which does not set modifiers. Otherwise the GETFB2 ioctl is
- * broken for modifier aware userspace.
+ * When this flag is set, the DRM device will not expose modifier
+ * support to userspace. This is only used by legacy drivers that infer
+ * the buffer layout through heuristics without using modifiers. New
+ * drivers shall not set fhis flag.
*/
- bool allow_fb_modifiers;
+ bool fb_modifiers_not_supported;
/**
* @normalize_zpos:
@@ -933,6 +955,17 @@ struct drm_mode_config {
*/
struct drm_property *modifiers_property;
+ /**
+ * @async_modifiers_property: Plane property to list support modifier/format
+ * combination for asynchronous flips.
+ */
+ struct drm_property *async_modifiers_property;
+
+ /**
+ * @size_hints_property: Plane SIZE_HINTS property.
+ */
+ struct drm_property *size_hints_property;
+
/* cursor size */
uint32_t cursor_width, cursor_height;
diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h
index c34a3e8030e1..c68edbd126d0 100644
--- a/include/drm/drm_mode_object.h
+++ b/include/drm/drm_mode_object.h
@@ -35,7 +35,7 @@ struct drm_file;
* @id: userspace visible identifier
* @type: type of the object, one of DRM_MODE_OBJECT\_\*
* @properties: properties attached to this object, including values
- * @refcount: reference count for objects which with dynamic lifetime
+ * @refcount: reference count for objects with dynamic lifetime
* @free_cb: free function callback, only set for objects with dynamic lifetime
*
* Base structure for modeset objects visible to userspace. Objects can be
@@ -60,7 +60,7 @@ struct drm_mode_object {
void (*free_cb)(struct kref *kref);
};
-#define DRM_OBJECT_MAX_PROPERTY 24
+#define DRM_OBJECT_MAX_PROPERTY 64
/**
* struct drm_object_properties - property tracking for &drm_mode_object
*/
@@ -98,6 +98,10 @@ struct drm_object_properties {
* Hence atomic drivers should not use drm_object_property_set_value()
* and drm_object_property_get_value() on mutable objects, i.e. those
* without the DRM_MODE_PROP_IMMUTABLE flag set.
+ *
+ * For atomic drivers the default value of properties is stored in this
+ * array, so drm_object_property_get_default_value can be used to
+ * retrieve it.
*/
uint64_t values[DRM_OBJECT_MAX_PROPERTY];
};
@@ -126,6 +130,9 @@ int drm_object_property_set_value(struct drm_mode_object *obj,
int drm_object_property_get_value(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t *value);
+int drm_object_property_get_default_value(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t *val);
void drm_object_attach_property(struct drm_mode_object *obj,
struct drm_property *property,
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index 29ba4adf0c53..b9bb92e4b029 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -139,6 +139,35 @@ enum drm_mode_status {
.vscan = (vs), .flags = (f)
/**
+ * DRM_MODE_RES_MM - Calculates the display size from resolution and DPI
+ * @res: The resolution in pixel
+ * @dpi: The number of dots per inch
+ */
+#define DRM_MODE_RES_MM(res, dpi) \
+ (((res) * 254ul) / ((dpi) * 10ul))
+
+#define __DRM_MODE_INIT(pix, hd, vd, hd_mm, vd_mm) \
+ .type = DRM_MODE_TYPE_DRIVER, .clock = (pix), \
+ .hdisplay = (hd), .hsync_start = (hd), .hsync_end = (hd), \
+ .htotal = (hd), .vdisplay = (vd), .vsync_start = (vd), \
+ .vsync_end = (vd), .vtotal = (vd), .width_mm = (hd_mm), \
+ .height_mm = (vd_mm)
+
+/**
+ * DRM_MODE_INIT - Initialize display mode
+ * @hz: Vertical refresh rate in Hertz
+ * @hd: Horizontal resolution, width
+ * @vd: Vertical resolution, height
+ * @hd_mm: Display width in millimeters
+ * @vd_mm: Display height in millimeters
+ *
+ * This macro initializes a &drm_display_mode that contains information about
+ * refresh rate, resolution and physical size.
+ */
+#define DRM_MODE_INIT(hz, hd, vd, hd_mm, vd_mm) \
+ __DRM_MODE_INIT((hd) * (vd) * (hz) / 1000 /* kHz */, hd, vd, hd_mm, vd_mm)
+
+/**
* DRM_SIMPLE_MODE - Simple display mode
* @hd: Horizontal resolution, width
* @vd: Vertical resolution, height
@@ -149,11 +178,7 @@ enum drm_mode_status {
* resolution and physical size.
*/
#define DRM_SIMPLE_MODE(hd, vd, hd_mm, vd_mm) \
- .type = DRM_MODE_TYPE_DRIVER, .clock = 1 /* pass validation */, \
- .hdisplay = (hd), .hsync_start = (hd), .hsync_end = (hd), \
- .htotal = (hd), .vdisplay = (vd), .vsync_start = (vd), \
- .vsync_end = (vd), .vtotal = (vd), .width_mm = (hd_mm), \
- .height_mm = (vd_mm)
+ __DRM_MODE_INIT(1 /* pass validation */, hd, vd, hd_mm, vd_mm)
#define CRTC_INTERLACE_HALVE_V (1 << 0) /* halve V values for interlacing */
#define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */
@@ -442,6 +467,25 @@ bool drm_mode_is_420_also(const struct drm_display_info *display,
const struct drm_display_mode *mode);
bool drm_mode_is_420(const struct drm_display_info *display,
const struct drm_display_mode *mode);
+void drm_set_preferred_mode(struct drm_connector *connector,
+ int hpref, int vpref);
+
+struct drm_display_mode *drm_analog_tv_mode(struct drm_device *dev,
+ enum drm_connector_tv_mode mode,
+ unsigned long pixel_clock_hz,
+ unsigned int hdisplay,
+ unsigned int vdisplay,
+ bool interlace);
+
+static inline struct drm_display_mode *drm_mode_analog_ntsc_480i(struct drm_device *dev)
+{
+ return drm_analog_tv_mode(dev, DRM_MODE_TV_MODE_NTSC, 13500000, 720, 480, true);
+}
+
+static inline struct drm_display_mode *drm_mode_analog_pal_576i(struct drm_device *dev)
+{
+ return drm_analog_tv_mode(dev, DRM_MODE_TV_MODE_PAL, 13500000, 720, 576, true);
+}
struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
int hdisplay, int vdisplay, int vrefresh,
@@ -466,6 +510,8 @@ void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags);
int of_get_drm_display_mode(struct device_node *np,
struct drm_display_mode *dmode, u32 *bus_flags,
int index);
+int of_get_drm_panel_display_mode(struct device_node *np,
+ struct drm_display_mode *dmode, u32 *bus_flags);
#else
static inline int of_get_drm_display_mode(struct device_node *np,
struct drm_display_mode *dmode,
@@ -473,6 +519,12 @@ static inline int of_get_drm_display_mode(struct device_node *np,
{
return -EINVAL;
}
+
+static inline int of_get_drm_panel_display_mode(struct device_node *np,
+ struct drm_display_mode *dmode, u32 *bus_flags)
+{
+ return -EINVAL;
+}
#endif
void drm_mode_set_name(struct drm_display_mode *mode);
@@ -484,6 +536,8 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p,
int adjust_flags);
void drm_mode_copy(struct drm_display_mode *dst,
const struct drm_display_mode *src);
+void drm_mode_init(struct drm_display_mode *dst,
+ const struct drm_display_mode *src);
struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
const struct drm_display_mode *mode);
bool drm_mode_match(const struct drm_display_mode *mode1,
diff --git a/include/drm/drm_modeset_helper.h b/include/drm/drm_modeset_helper.h
index 995fd981cab0..7e3d4c5a7f66 100644
--- a/include/drm/drm_modeset_helper.h
+++ b/include/drm/drm_modeset_helper.h
@@ -26,6 +26,7 @@
struct drm_crtc;
struct drm_crtc_funcs;
struct drm_device;
+struct drm_format_info;
struct drm_framebuffer;
struct drm_mode_fb_cmd2;
@@ -33,6 +34,7 @@ void drm_helper_move_panel_connectors_to_head(struct drm_device *);
void drm_helper_mode_fill_fb_struct(struct drm_device *dev,
struct drm_framebuffer *fb,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd);
int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index f3a4b47b3986..fe32854b7ffe 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -48,15 +48,20 @@
* To make this clear all the helper vtables are pulled together in this location here.
*/
-enum mode_set_atomic;
+struct drm_scanout_buffer;
struct drm_writeback_connector;
struct drm_writeback_job;
+enum mode_set_atomic {
+ LEAVE_ATOMIC_MODE_SET,
+ ENTER_ATOMIC_MODE_SET,
+};
+
/**
* struct drm_crtc_helper_funcs - helper operations for CRTCs
*
- * These hooks are used by the legacy CRTC helpers, the transitional plane
- * helpers and the new atomic modesetting helpers.
+ * These hooks are used by the legacy CRTC helpers and the new atomic
+ * modesetting helpers.
*/
struct drm_crtc_helper_funcs {
/**
@@ -130,7 +135,7 @@ struct drm_crtc_helper_funcs {
* Since this function is both called from the check phase of an atomic
* commit, and the mode validation in the probe paths it is not allowed
* to look at anything else but the passed-in mode, and validate it
- * against configuration-invariant hardward constraints. Any further
+ * against configuration-invariant hardware constraints. Any further
* limits which depend upon the configuration can only be checked in
* @mode_fixup or @atomic_check.
*
@@ -212,9 +217,7 @@ struct drm_crtc_helper_funcs {
*
* This callback is used to update the display mode of a CRTC without
* changing anything of the primary plane configuration. This fits the
- * requirement of atomic and hence is used by the atomic helpers. It is
- * also used by the transitional plane helpers to implement a
- * @mode_set hook in drm_helper_crtc_mode_set().
+ * requirement of atomic and hence is used by the atomic helpers.
*
* Note that the display pipe is completely off when this function is
* called. Atomic drivers which need hardware to be running before they
@@ -329,8 +332,8 @@ struct drm_crtc_helper_funcs {
* all updated. Again the recommendation is to just call check helpers
* until a maximal configuration is reached.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional.
*
* NOTE:
*
@@ -369,8 +372,8 @@ struct drm_crtc_helper_funcs {
* has picked. See drm_atomic_helper_commit_planes() for a discussion of
* the tradeoffs and variants of plane commit helpers.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional.
*/
void (*atomic_begin)(struct drm_crtc *crtc,
struct drm_atomic_state *state);
@@ -393,8 +396,8 @@ struct drm_crtc_helper_funcs {
* has picked. See drm_atomic_helper_commit_planes() for a discussion of
* the tradeoffs and variants of plane commit helpers.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional.
*/
void (*atomic_flush)(struct drm_crtc *crtc,
struct drm_atomic_state *state);
@@ -487,6 +490,18 @@ struct drm_crtc_helper_funcs {
bool in_vblank_irq, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode);
+
+ /**
+ * @handle_vblank_timeout: Handles timeouts of the vblank timer.
+ *
+ * Called by CRTC's the vblank timer on each timeout. Semantics is
+ * equivalient to drm_crtc_handle_vblank(). Implementations should
+ * invoke drm_crtc_handle_vblank() as part of processing the timeout.
+ *
+ * This callback is optional. If unset, the vblank timer invokes
+ * drm_crtc_handle_vblank() directly.
+ */
+ bool (*handle_vblank_timeout)(struct drm_crtc *crtc);
};
/**
@@ -503,8 +518,8 @@ static inline void drm_crtc_helper_add(struct drm_crtc *crtc,
/**
* struct drm_encoder_helper_funcs - helper operations for encoders
*
- * These hooks are used by the legacy CRTC helpers, the transitional plane
- * helpers and the new atomic modesetting helpers.
+ * These hooks are used by the legacy CRTC helpers and the new atomic
+ * modesetting helpers.
*/
struct drm_encoder_helper_funcs {
/**
@@ -548,7 +563,7 @@ struct drm_encoder_helper_funcs {
* Since this function is both called from the check phase of an atomic
* commit, and the mode validation in the probe paths it is not allowed
* to look at anything else but the passed-in mode, and validate it
- * against configuration-invariant hardward constraints. Any further
+ * against configuration-invariant hardware constraints. Any further
* limits which depend upon the configuration can only be checked in
* @mode_fixup or @atomic_check.
*
@@ -896,7 +911,8 @@ struct drm_connector_helper_funcs {
*
* RETURNS:
*
- * The number of modes added by calling drm_mode_probed_add().
+ * The number of modes added by calling drm_mode_probed_add(). Return 0
+ * on failures (no modes) instead of negative error codes.
*/
int (*get_modes)(struct drm_connector *connector);
@@ -963,7 +979,7 @@ struct drm_connector_helper_funcs {
* drm_mode_status.
*/
enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
- struct drm_display_mode *mode);
+ const struct drm_display_mode *mode);
/**
* @mode_valid_ctx:
@@ -1002,7 +1018,7 @@ struct drm_connector_helper_funcs {
*
*/
int (*mode_valid_ctx)(struct drm_connector *connector,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_modeset_acquire_ctx *ctx,
enum drm_mode_status *status);
@@ -1143,6 +1159,38 @@ struct drm_connector_helper_funcs {
*/
void (*cleanup_writeback_job)(struct drm_writeback_connector *connector,
struct drm_writeback_job *job);
+
+ /**
+ * @enable_hpd:
+ *
+ * Enable hot-plug detection for the connector.
+ *
+ * This operation is optional.
+ *
+ * This callback is used by the drm_kms_helper_poll_enable() helpers.
+ *
+ * This operation does not need to perform any hpd state tracking as
+ * the DRM core handles that maintenance and ensures the calls to enable
+ * and disable hpd are balanced.
+ *
+ */
+ void (*enable_hpd)(struct drm_connector *connector);
+
+ /**
+ * @disable_hpd:
+ *
+ * Disable hot-plug detection for the connector.
+ *
+ * This operation is optional.
+ *
+ * This callback is used by the drm_kms_helper_poll_disable() helpers.
+ *
+ * This operation does not need to perform any hpd state tracking as
+ * the DRM core handles that maintenance and ensures the calls to enable
+ * and disable hpd are balanced.
+ *
+ */
+ void (*disable_hpd)(struct drm_connector *connector);
};
/**
@@ -1159,8 +1207,7 @@ static inline void drm_connector_helper_add(struct drm_connector *connector,
/**
* struct drm_plane_helper_funcs - helper operations for planes
*
- * These functions are used by the atomic helpers and by the transitional plane
- * helpers.
+ * These functions are used by the atomic helpers.
*/
struct drm_plane_helper_funcs {
/**
@@ -1178,14 +1225,25 @@ struct drm_plane_helper_funcs {
* equivalent functionality should be implemented through private
* members in the plane structure.
*
- * Drivers which always have their buffers pinned should use
- * drm_gem_plane_helper_prepare_fb() for this hook.
+ * For GEM drivers who neither have a @prepare_fb nor @cleanup_fb hook
+ * set drm_gem_plane_helper_prepare_fb() is called automatically to
+ * implement this. Other drivers which need additional plane processing
+ * can call drm_gem_plane_helper_prepare_fb() from their @prepare_fb
+ * hook.
+ *
+ * The resources acquired in @prepare_fb persist after the end of
+ * the atomic commit. Resources that can be release at the commit's end
+ * should be acquired in @begin_fb_access and released in @end_fb_access.
+ * For example, a GEM buffer's pin operation belongs into @prepare_fb to
+ * keep the buffer pinned after the commit. But a vmap operation for
+ * shadow-plane helpers belongs into @begin_fb_access, so that atomic
+ * helpers remove the mapping at the end of the commit.
*
* The helpers will call @cleanup_fb with matching arguments for every
* successful call to this hook.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional. See @begin_fb_access for preparing per-commit resources.
*
* RETURNS:
*
@@ -1202,13 +1260,43 @@ struct drm_plane_helper_funcs {
* This hook is called to clean up any resources allocated for the given
* framebuffer and plane configuration in @prepare_fb.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional.
*/
void (*cleanup_fb)(struct drm_plane *plane,
struct drm_plane_state *old_state);
/**
+ * @begin_fb_access:
+ *
+ * This hook prepares the plane for access during an atomic commit.
+ * In contrast to @prepare_fb, resources acquired in @begin_fb_access,
+ * are released at the end of the atomic commit in @end_fb_access.
+ *
+ * For example, with shadow-plane helpers, the GEM buffer's vmap
+ * operation belongs into @begin_fb_access, so that the buffer's
+ * memory will be unmapped at the end of the commit in @end_fb_access.
+ * But a GEM buffer's pin operation belongs into @prepare_fb
+ * to keep the buffer pinned after the commit.
+ *
+ * The callback is used by the atomic modeset helpers, but it is optional.
+ * See @end_fb_cleanup for undoing the effects of @begin_fb_access and
+ * @prepare_fb for acquiring resources until the next pageflip.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise.
+ */
+ int (*begin_fb_access)(struct drm_plane *plane, struct drm_plane_state *new_plane_state);
+
+ /**
+ * @end_fb_access:
+ *
+ * This hook cleans up resources allocated by @begin_fb_access. It it called
+ * at the end of a commit for the new plane state.
+ */
+ void (*end_fb_access)(struct drm_plane *plane, struct drm_plane_state *new_plane_state);
+
+ /**
* @atomic_check:
*
* Drivers should check plane specific constraints in this hook.
@@ -1227,8 +1315,8 @@ struct drm_plane_helper_funcs {
* all updated. Again the recommendation is to just call check helpers
* until a maximal configuration is reached.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional.
*
* NOTE:
*
@@ -1258,11 +1346,36 @@ struct drm_plane_helper_funcs {
* has picked. See drm_atomic_helper_commit_planes() for a discussion of
* the tradeoffs and variants of plane commit helpers.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * This callback is used by the atomic modeset helpers, but it is optional.
*/
void (*atomic_update)(struct drm_plane *plane,
struct drm_atomic_state *state);
+
+ /**
+ * @atomic_enable:
+ *
+ * Drivers should use this function to unconditionally enable a plane.
+ * This hook is called in-between the &drm_crtc_helper_funcs.atomic_begin
+ * and drm_crtc_helper_funcs.atomic_flush callbacks. It is called after
+ * @atomic_update, which will be called for all enabled planes. Drivers
+ * that use @atomic_enable should set up a plane in @atomic_update and
+ * afterwards enable the plane in @atomic_enable. If a plane needs to be
+ * enabled before installing the scanout buffer, drivers can still do
+ * so in @atomic_update.
+ *
+ * Note that the power state of the display pipe when this function is
+ * called depends upon the exact helpers and calling sequence the driver
+ * has picked. See drm_atomic_helper_commit_planes() for a discussion of
+ * the tradeoffs and variants of plane commit helpers.
+ *
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional. If implemented, @atomic_enable should be the inverse of
+ * @atomic_disable. Drivers that don't want to use either can still
+ * implement the complete plane update in @atomic_update.
+ */
+ void (*atomic_enable)(struct drm_plane *plane,
+ struct drm_atomic_state *state);
+
/**
* @atomic_disable:
*
@@ -1282,8 +1395,8 @@ struct drm_plane_helper_funcs {
* has picked. See drm_atomic_helper_commit_planes() for a discussion of
* the tradeoffs and variants of plane commit helpers.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional. It's intended to reverse the effects of @atomic_enable.
*/
void (*atomic_disable)(struct drm_plane *plane,
struct drm_atomic_state *state);
@@ -1299,13 +1412,18 @@ struct drm_plane_helper_funcs {
* given update can be committed asynchronously, that is, if it can
* jump ahead of the state currently queued for update.
*
+ * This function is also used by drm_atomic_set_property() to determine
+ * if the plane can be flipped in async. The flip flag is used to
+ * distinguish if the function is used for just the plane state or for a
+ * flip.
+ *
* RETURNS:
*
* Return 0 on success and any error returned indicates that the update
* can not be applied in asynchronous manner.
*/
int (*atomic_async_check)(struct drm_plane *plane,
- struct drm_atomic_state *state);
+ struct drm_atomic_state *state, bool flip);
/**
* @atomic_async_update:
@@ -1343,6 +1461,44 @@ struct drm_plane_helper_funcs {
*/
void (*atomic_async_update)(struct drm_plane *plane,
struct drm_atomic_state *state);
+
+ /**
+ * @get_scanout_buffer:
+ *
+ * Get the current scanout buffer, to display a message with drm_panic.
+ * The driver should do the minimum changes to provide a buffer,
+ * that can be used to display the panic screen. Currently only linear
+ * buffers are supported. Non-linear buffer support is on the TODO list.
+ * The device &dev.mode_config.panic_lock is taken before calling this
+ * function, so you can safely access the &plane.state
+ * It is called from a panic callback, and must follow its restrictions.
+ * Please look the documentation at drm_panic_trylock() for an in-depth
+ * discussions of what's safe and what is not allowed.
+ * It's a best effort mode, so it's expected that in some complex cases
+ * the panic screen won't be displayed.
+ * The returned &drm_scanout_buffer.map must be valid if no error code is
+ * returned.
+ *
+ * Return:
+ * %0 on success, negative errno on failure.
+ */
+ int (*get_scanout_buffer)(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb);
+
+ /**
+ * @panic_flush:
+ *
+ * It is used by drm_panic, and is called after the panic screen is
+ * drawn to the scanout buffer. In this function, the driver
+ * can send additional commands to the hardware, to make the scanout
+ * buffer visible.
+ * It is only called if get_scanout_buffer() returned successfully, and
+ * the &dev.mode_config.panic_lock is held during the entire sequence.
+ * It is called from a panic callback, and must follow its restrictions.
+ * Please look the documentation at drm_panic_trylock() for an in-depth
+ * discussions of what's safe and what is not allowed.
+ */
+ void (*panic_flush)(struct drm_plane *plane);
};
/**
@@ -1375,13 +1531,13 @@ struct drm_mode_config_helper_funcs {
* swapped into the various state pointers. The passed in state
* therefore contains copies of the old/previous state. This hook should
* commit the new state into hardware. Note that the helpers have
- * already waited for preceeding atomic commits and fences, but drivers
+ * already waited for preceding atomic commits and fences, but drivers
* can add more waiting calls at the start of their implementation, e.g.
* to wait for driver-internal request for implicit syncing, before
* starting to commit the update to the hardware.
*
* After the atomic update is committed to the hardware this hook needs
- * to call drm_atomic_helper_commit_hw_done(). Then wait for the upate
+ * to call drm_atomic_helper_commit_hw_done(). Then wait for the update
* to be executed by the hardware, for example using
* drm_atomic_helper_wait_for_vblanks() or
* drm_atomic_helper_wait_for_flip_done(), and then clean up the old
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
index aafd07388eb7..ec4f543c3d95 100644
--- a/include/drm/drm_modeset_lock.h
+++ b/include/drm/drm_modeset_lock.h
@@ -24,6 +24,8 @@
#ifndef DRM_MODESET_LOCK_H_
#define DRM_MODESET_LOCK_H_
+#include <linux/types.h> /* stackdepot.h is not self-contained */
+#include <linux/stackdepot.h>
#include <linux/ww_mutex.h>
struct drm_modeset_lock;
@@ -32,6 +34,7 @@ struct drm_modeset_lock;
* struct drm_modeset_acquire_ctx - locking context (see ww_acquire_ctx)
* @ww_ctx: base acquire ctx
* @contended: used internally for -EDEADLK handling
+ * @stack_depot: used internally for contention debugging
* @locked: list of held locks
* @trylock_only: trylock mode used in atomic contexts/panic notifiers
* @interruptible: whether interruptible locking should be used.
@@ -52,6 +55,12 @@ struct drm_modeset_acquire_ctx {
struct drm_modeset_lock *contended;
/*
+ * Stack depot for debugging when a contended lock was not backed off
+ * from.
+ */
+ depot_stack_handle_t stack_depot;
+
+ /*
* list of held locks (drm_modeset_lock)
*/
struct list_head locked;
diff --git a/include/drm/drm_module.h b/include/drm/drm_module.h
new file mode 100644
index 000000000000..4db1ae03d9a5
--- /dev/null
+++ b/include/drm/drm_module.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef DRM_MODULE_H
+#define DRM_MODULE_H
+
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_drv.h>
+
+/**
+ * DOC: overview
+ *
+ * This library provides helpers registering DRM drivers during module
+ * initialization and shutdown. The provided helpers act like bus-specific
+ * module helpers, such as module_pci_driver(), but respect additional
+ * parameters that control DRM driver registration.
+ *
+ * Below is an example of initializing a DRM driver for a device on the
+ * PCI bus.
+ *
+ * .. code-block:: c
+ *
+ * struct pci_driver my_pci_drv = {
+ * };
+ *
+ * drm_module_pci_driver(my_pci_drv);
+ *
+ * The generated code will test if DRM drivers are enabled and register
+ * the PCI driver my_pci_drv. For more complex module initialization, you
+ * can still use module_init() and module_exit() in your driver.
+ */
+
+/*
+ * PCI drivers
+ */
+
+static inline int __init drm_pci_register_driver(struct pci_driver *pci_drv)
+{
+ if (drm_firmware_drivers_only())
+ return -ENODEV;
+
+ return pci_register_driver(pci_drv);
+}
+
+/**
+ * drm_module_pci_driver - Register a DRM driver for PCI-based devices
+ * @__pci_drv: the PCI driver structure
+ *
+ * Registers a DRM driver for devices on the PCI bus. The helper
+ * macro behaves like module_pci_driver() but tests the state of
+ * drm_firmware_drivers_only(). For more complex module initialization,
+ * use module_init() and module_exit() directly.
+ *
+ * Each module may only use this macro once. Calling it replaces
+ * module_init() and module_exit().
+ */
+#define drm_module_pci_driver(__pci_drv) \
+ module_driver(__pci_drv, drm_pci_register_driver, pci_unregister_driver)
+
+static inline int __init
+drm_pci_register_driver_if_modeset(struct pci_driver *pci_drv, int modeset)
+{
+ if (drm_firmware_drivers_only() && modeset == -1)
+ return -ENODEV;
+ if (modeset == 0)
+ return -ENODEV;
+
+ return pci_register_driver(pci_drv);
+}
+
+static inline void __exit
+drm_pci_unregister_driver_if_modeset(struct pci_driver *pci_drv, int modeset)
+{
+ pci_unregister_driver(pci_drv);
+}
+
+/**
+ * drm_module_pci_driver_if_modeset - Register a DRM driver for PCI-based devices
+ * @__pci_drv: the PCI driver structure
+ * @__modeset: an additional parameter that disables the driver
+ *
+ * This macro is deprecated and only provided for existing drivers. For
+ * new drivers, use drm_module_pci_driver().
+ *
+ * Registers a DRM driver for devices on the PCI bus. The helper macro
+ * behaves like drm_module_pci_driver() with an additional driver-specific
+ * flag. If __modeset is 0, the driver has been disabled, if __modeset is
+ * -1 the driver state depends on the global DRM state. For all other
+ * values, the PCI driver has been enabled. The default should be -1.
+ */
+#define drm_module_pci_driver_if_modeset(__pci_drv, __modeset) \
+ module_driver(__pci_drv, drm_pci_register_driver_if_modeset, \
+ drm_pci_unregister_driver_if_modeset, __modeset)
+
+/*
+ * Platform drivers
+ */
+
+static inline int __init
+drm_platform_driver_register(struct platform_driver *platform_drv)
+{
+ if (drm_firmware_drivers_only())
+ return -ENODEV;
+
+ return platform_driver_register(platform_drv);
+}
+
+/**
+ * drm_module_platform_driver - Register a DRM driver for platform devices
+ * @__platform_drv: the platform driver structure
+ *
+ * Registers a DRM driver for devices on the platform bus. The helper
+ * macro behaves like module_platform_driver() but tests the state of
+ * drm_firmware_drivers_only(). For more complex module initialization,
+ * use module_init() and module_exit() directly.
+ *
+ * Each module may only use this macro once. Calling it replaces
+ * module_init() and module_exit().
+ */
+#define drm_module_platform_driver(__platform_drv) \
+ module_driver(__platform_drv, drm_platform_driver_register, \
+ platform_driver_unregister)
+
+#endif
diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h
index b9b093add92e..7f0256dae3f1 100644
--- a/include/drm/drm_of.h
+++ b/include/drm/drm_of.h
@@ -2,6 +2,7 @@
#ifndef __DRM_OF_H__
#define __DRM_OF_H__
+#include <linux/err.h>
#include <linux/of_graph.h>
#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DRM_PANEL_BRIDGE)
#include <drm/drm_bridge.h>
@@ -15,6 +16,8 @@ struct drm_encoder;
struct drm_panel;
struct drm_bridge;
struct device_node;
+struct mipi_dsi_device_info;
+struct mipi_dsi_host;
/**
* enum drm_lvds_dual_link_pixels - Pixel order of an LVDS dual-link connection
@@ -49,6 +52,15 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
struct drm_bridge **bridge);
int drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1,
const struct device_node *port2);
+int drm_of_lvds_get_dual_link_pixel_order_sink(struct device_node *port1,
+ struct device_node *port2);
+int drm_of_lvds_get_data_mapping(const struct device_node *port);
+int drm_of_get_data_lanes_count(const struct device_node *endpoint,
+ const unsigned int min, const unsigned int max);
+int drm_of_get_data_lanes_count_ep(const struct device_node *port,
+ int port_reg, int reg,
+ const unsigned int min,
+ const unsigned int max);
#else
static inline uint32_t drm_of_crtc_port_mask(struct drm_device *dev,
struct device_node *port)
@@ -98,8 +110,47 @@ drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1,
{
return -EINVAL;
}
+
+static inline int
+drm_of_lvds_get_dual_link_pixel_order_sink(struct device_node *port1,
+ struct device_node *port2)
+{
+ return -EINVAL;
+}
+
+static inline int
+drm_of_lvds_get_data_mapping(const struct device_node *port)
+{
+ return -EINVAL;
+}
+
+static inline int
+drm_of_get_data_lanes_count(const struct device_node *endpoint,
+ const unsigned int min, const unsigned int max)
+{
+ return -EINVAL;
+}
+
+static inline int
+drm_of_get_data_lanes_count_ep(const struct device_node *port,
+ int port_reg, int reg,
+ const unsigned int min,
+ const unsigned int max)
+{
+ return -EINVAL;
+}
#endif
+#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DRM_MIPI_DSI)
+struct mipi_dsi_host *drm_of_get_dsi_bus(struct device *dev);
+#else
+static inline struct
+mipi_dsi_host *drm_of_get_dsi_bus(struct device *dev)
+{
+ return ERR_PTR(-EINVAL);
+}
+#endif /* CONFIG_OF && CONFIG_DRM_MIPI_DSI */
+
/*
* drm_of_panel_bridge_remove - remove panel bridge
* @np: device tree node containing panel bridge output ports
diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h
new file mode 100644
index 000000000000..f6e7e234c089
--- /dev/null
+++ b/include/drm/drm_pagemap.h
@@ -0,0 +1,248 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef _DRM_PAGEMAP_H_
+#define _DRM_PAGEMAP_H_
+
+#include <linux/dma-direction.h>
+#include <linux/hmm.h>
+#include <linux/types.h>
+
+#define NR_PAGES(order) (1U << (order))
+
+struct drm_pagemap;
+struct drm_pagemap_zdd;
+struct device;
+
+/**
+ * enum drm_interconnect_protocol - Used to identify an interconnect protocol.
+ *
+ * @DRM_INTERCONNECT_SYSTEM: DMA map is system pages
+ * @DRM_INTERCONNECT_DRIVER: DMA map is driver defined
+ */
+enum drm_interconnect_protocol {
+ DRM_INTERCONNECT_SYSTEM,
+ DRM_INTERCONNECT_DRIVER,
+ /* A driver can add private values beyond DRM_INTERCONNECT_DRIVER */
+};
+
+/**
+ * struct drm_pagemap_addr - Address representation.
+ * @addr: The dma address or driver-defined address for driver private interconnects.
+ * @proto: The interconnect protocol.
+ * @order: The page order of the device mapping. (Size is PAGE_SIZE << order).
+ * @dir: The DMA direction.
+ *
+ * Note: There is room for improvement here. We should be able to pack into
+ * 64 bits.
+ */
+struct drm_pagemap_addr {
+ dma_addr_t addr;
+ u64 proto : 54;
+ u64 order : 8;
+ u64 dir : 2;
+};
+
+/**
+ * drm_pagemap_addr_encode() - Encode a dma address with metadata
+ * @addr: The dma address or driver-defined address for driver private interconnects.
+ * @proto: The interconnect protocol.
+ * @order: The page order of the dma mapping. (Size is PAGE_SIZE << order).
+ * @dir: The DMA direction.
+ *
+ * Return: A struct drm_pagemap_addr encoding the above information.
+ */
+static inline struct drm_pagemap_addr
+drm_pagemap_addr_encode(dma_addr_t addr,
+ enum drm_interconnect_protocol proto,
+ unsigned int order,
+ enum dma_data_direction dir)
+{
+ return (struct drm_pagemap_addr) {
+ .addr = addr,
+ .proto = proto,
+ .order = order,
+ .dir = dir,
+ };
+}
+
+/**
+ * struct drm_pagemap_ops: Ops for a drm-pagemap.
+ */
+struct drm_pagemap_ops {
+ /**
+ * @device_map: Map for device access or provide a virtual address suitable for
+ *
+ * @dpagemap: The struct drm_pagemap for the page.
+ * @dev: The device mapper.
+ * @page: The page to map.
+ * @order: The page order of the device mapping. (Size is PAGE_SIZE << order).
+ * @dir: The transfer direction.
+ */
+ struct drm_pagemap_addr (*device_map)(struct drm_pagemap *dpagemap,
+ struct device *dev,
+ struct page *page,
+ unsigned int order,
+ enum dma_data_direction dir);
+
+ /**
+ * @device_unmap: Unmap a device address previously obtained using @device_map.
+ *
+ * @dpagemap: The struct drm_pagemap for the mapping.
+ * @dev: The device unmapper.
+ * @addr: The device address obtained when mapping.
+ */
+ void (*device_unmap)(struct drm_pagemap *dpagemap,
+ struct device *dev,
+ struct drm_pagemap_addr addr);
+
+ /**
+ * @populate_mm: Populate part of the mm with @dpagemap memory,
+ * migrating existing data.
+ * @dpagemap: The struct drm_pagemap managing the memory.
+ * @start: The virtual start address in @mm
+ * @end: The virtual end address in @mm
+ * @mm: Pointer to a live mm. The caller must have an mmget()
+ * reference.
+ *
+ * The caller will have the mm lock at least in read mode.
+ * Note that there is no guarantee that the memory is resident
+ * after the function returns, it's best effort only.
+ * When the mm is not using the memory anymore,
+ * it will be released. The struct drm_pagemap might have a
+ * mechanism in place to reclaim the memory and the data will
+ * then be migrated. Typically to system memory.
+ * The implementation should hold sufficient runtime power-
+ * references while pages are used in an address space and
+ * should ideally guard against hardware device unbind in
+ * a way such that device pages are migrated back to system
+ * followed by device page removal. The implementation should
+ * return -ENODEV after device removal.
+ *
+ * Return: 0 if successful. Negative error code on error.
+ */
+ int (*populate_mm)(struct drm_pagemap *dpagemap,
+ unsigned long start, unsigned long end,
+ struct mm_struct *mm,
+ unsigned long timeslice_ms);
+};
+
+/**
+ * struct drm_pagemap: Additional information for a struct dev_pagemap
+ * used for device p2p handshaking.
+ * @ops: The struct drm_pagemap_ops.
+ * @dev: The struct drevice owning the device-private memory.
+ */
+struct drm_pagemap {
+ const struct drm_pagemap_ops *ops;
+ struct device *dev;
+};
+
+struct drm_pagemap_devmem;
+
+/**
+ * struct drm_pagemap_devmem_ops - Operations structure for GPU SVM device memory
+ *
+ * This structure defines the operations for GPU Shared Virtual Memory (SVM)
+ * device memory. These operations are provided by the GPU driver to manage device memory
+ * allocations and perform operations such as migration between device memory and system
+ * RAM.
+ */
+struct drm_pagemap_devmem_ops {
+ /**
+ * @devmem_release: Release device memory allocation (optional)
+ * @devmem_allocation: device memory allocation
+ *
+ * Release device memory allocation and drop a reference to device
+ * memory allocation.
+ */
+ void (*devmem_release)(struct drm_pagemap_devmem *devmem_allocation);
+
+ /**
+ * @populate_devmem_pfn: Populate device memory PFN (required for migration)
+ * @devmem_allocation: device memory allocation
+ * @npages: Number of pages to populate
+ * @pfn: Array of page frame numbers to populate
+ *
+ * Populate device memory page frame numbers (PFN).
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+ int (*populate_devmem_pfn)(struct drm_pagemap_devmem *devmem_allocation,
+ unsigned long npages, unsigned long *pfn);
+
+ /**
+ * @copy_to_devmem: Copy to device memory (required for migration)
+ * @pages: Pointer to array of device memory pages (destination)
+ * @pagemap_addr: Pointer to array of DMA information (source)
+ * @npages: Number of pages to copy
+ *
+ * Copy pages to device memory. If the order of a @pagemap_addr entry
+ * is greater than 0, the entry is populated but subsequent entries
+ * within the range of that order are not populated.
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+ int (*copy_to_devmem)(struct page **pages,
+ struct drm_pagemap_addr *pagemap_addr,
+ unsigned long npages);
+
+ /**
+ * @copy_to_ram: Copy to system RAM (required for migration)
+ * @pages: Pointer to array of device memory pages (source)
+ * @pagemap_addr: Pointer to array of DMA information (destination)
+ * @npages: Number of pages to copy
+ *
+ * Copy pages to system RAM. If the order of a @pagemap_addr entry
+ * is greater than 0, the entry is populated but subsequent entries
+ * within the range of that order are not populated.
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+ int (*copy_to_ram)(struct page **pages,
+ struct drm_pagemap_addr *pagemap_addr,
+ unsigned long npages);
+};
+
+/**
+ * struct drm_pagemap_devmem - Structure representing a GPU SVM device memory allocation
+ *
+ * @dev: Pointer to the device structure which device memory allocation belongs to
+ * @mm: Pointer to the mm_struct for the address space
+ * @detached: device memory allocations is detached from device pages
+ * @ops: Pointer to the operations structure for GPU SVM device memory
+ * @dpagemap: The struct drm_pagemap of the pages this allocation belongs to.
+ * @size: Size of device memory allocation
+ * @timeslice_expiration: Timeslice expiration in jiffies
+ */
+struct drm_pagemap_devmem {
+ struct device *dev;
+ struct mm_struct *mm;
+ struct completion detached;
+ const struct drm_pagemap_devmem_ops *ops;
+ struct drm_pagemap *dpagemap;
+ size_t size;
+ u64 timeslice_expiration;
+};
+
+int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
+ struct mm_struct *mm,
+ unsigned long start, unsigned long end,
+ unsigned long timeslice_ms,
+ void *pgmap_owner);
+
+int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation);
+
+const struct dev_pagemap_ops *drm_pagemap_pagemap_ops_get(void);
+
+struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page);
+
+void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation,
+ struct device *dev, struct mm_struct *mm,
+ const struct drm_pagemap_devmem_ops *ops,
+ struct drm_pagemap *dpagemap, size_t size);
+
+int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
+ unsigned long start, unsigned long end,
+ struct mm_struct *mm,
+ unsigned long timeslice_ms);
+
+#endif
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 33605c3f0eba..2407bfa60236 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -27,11 +27,14 @@
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/kref.h>
struct backlight_device;
+struct dentry;
struct device_node;
struct drm_connector;
-struct drm_device;
+struct drm_panel_follower;
struct drm_panel;
struct display_timing;
@@ -64,8 +67,8 @@ enum drm_panel_orientation;
* the panel. This is the job of the .unprepare() function.
*
* Backlight can be handled automatically if configured using
- * drm_panel_of_backlight(). Then the driver does not need to implement the
- * functionality to enable/disable backlight.
+ * drm_panel_of_backlight() or drm_panel_dp_aux_backlight(). Then the driver
+ * does not need to implement the functionality to enable/disable backlight.
*/
struct drm_panel_funcs {
/**
@@ -116,6 +119,15 @@ struct drm_panel_funcs {
struct drm_connector *connector);
/**
+ * @get_orientation:
+ *
+ * Return the panel orientation set by device tree or EDID.
+ *
+ * This function is optional.
+ */
+ enum drm_panel_orientation (*get_orientation)(struct drm_panel *panel);
+
+ /**
* @get_timings:
*
* Copy display timings into the provided array and return
@@ -125,6 +137,66 @@ struct drm_panel_funcs {
*/
int (*get_timings)(struct drm_panel *panel, unsigned int num_timings,
struct display_timing *timings);
+
+ /**
+ * @debugfs_init:
+ *
+ * Allows panels to create panels-specific debugfs files.
+ */
+ void (*debugfs_init)(struct drm_panel *panel, struct dentry *root);
+};
+
+struct drm_panel_follower_funcs {
+ /**
+ * @panel_prepared:
+ *
+ * Called after the panel has been powered on.
+ */
+ int (*panel_prepared)(struct drm_panel_follower *follower);
+
+ /**
+ * @panel_unpreparing:
+ *
+ * Called before the panel is powered off.
+ */
+ int (*panel_unpreparing)(struct drm_panel_follower *follower);
+
+ /**
+ * @panel_enabled:
+ *
+ * Called after the panel and the backlight have been enabled.
+ */
+ int (*panel_enabled)(struct drm_panel_follower *follower);
+
+ /**
+ * @panel_disabling:
+ *
+ * Called before the panel and the backlight are disabled.
+ */
+ int (*panel_disabling)(struct drm_panel_follower *follower);
+};
+
+struct drm_panel_follower {
+ /**
+ * @funcs:
+ *
+ * Dependent device callbacks; should be initted by the caller.
+ */
+ const struct drm_panel_follower_funcs *funcs;
+
+ /**
+ * @list
+ *
+ * Used for linking into panel's list; set by drm_panel_add_follower().
+ */
+ struct list_head list;
+
+ /**
+ * @panel
+ *
+ * The panel we're dependent on; set by drm_panel_add_follower().
+ */
+ struct drm_panel *panel;
};
/**
@@ -144,8 +216,8 @@ struct drm_panel {
* Backlight device, used to turn on backlight after the call
* to enable(), and to turn off backlight before the call to
* disable().
- * backlight is set by drm_panel_of_backlight() and drivers
- * shall not assign it.
+ * backlight is set by drm_panel_of_backlight() or
+ * drm_panel_dp_aux_backlight() and drivers shall not assign it.
*/
struct backlight_device *backlight;
@@ -171,20 +243,98 @@ struct drm_panel {
* Panel entry in registry.
*/
struct list_head list;
+
+ /**
+ * @followers:
+ *
+ * A list of struct drm_panel_follower dependent on this panel.
+ */
+ struct list_head followers;
+
+ /**
+ * @follower_lock:
+ *
+ * Lock for followers list.
+ */
+ struct mutex follower_lock;
+
+ /**
+ * @prepare_prev_first:
+ *
+ * The previous controller should be prepared first, before the prepare
+ * for the panel is called. This is largely required for DSI panels
+ * where the DSI host controller should be initialised to LP-11 before
+ * the panel is powered up.
+ */
+ bool prepare_prev_first;
+
+ /**
+ * @prepared:
+ *
+ * If true then the panel has been prepared.
+ */
+ bool prepared;
+
+ /**
+ * @enabled:
+ *
+ * If true then the panel has been enabled.
+ */
+ bool enabled;
+
+ /**
+ * @container: Pointer to the private driver struct embedding this
+ * @struct drm_panel.
+ */
+ void *container;
+
+ /**
+ * @refcount: reference count of users referencing this panel.
+ */
+ struct kref refcount;
};
+void *__devm_drm_panel_alloc(struct device *dev, size_t size, size_t offset,
+ const struct drm_panel_funcs *funcs,
+ int connector_type);
+
+/**
+ * devm_drm_panel_alloc - Allocate and initialize a refcounted panel.
+ *
+ * @dev: struct device of the panel device
+ * @type: the type of the struct which contains struct &drm_panel
+ * @member: the name of the &drm_panel within @type
+ * @funcs: callbacks for this panel
+ * @connector_type: the connector type (DRM_MODE_CONNECTOR_*) corresponding to
+ * the panel interface
+ *
+ * The reference count of the returned panel is initialized to 1. This
+ * reference will be automatically dropped via devm (by calling
+ * drm_panel_put()) when @dev is removed.
+ *
+ * Returns:
+ * Pointer to container structure embedding the panel, ERR_PTR on failure.
+ */
+#define devm_drm_panel_alloc(dev, type, member, funcs, connector_type) \
+ ((type *)__devm_drm_panel_alloc(dev, sizeof(type), \
+ offsetof(type, member), funcs, \
+ connector_type))
+
void drm_panel_init(struct drm_panel *panel, struct device *dev,
const struct drm_panel_funcs *funcs,
int connector_type);
+struct drm_panel *drm_panel_get(struct drm_panel *panel);
+void drm_panel_put(struct drm_panel *panel);
+
void drm_panel_add(struct drm_panel *panel);
void drm_panel_remove(struct drm_panel *panel);
-int drm_panel_prepare(struct drm_panel *panel);
-int drm_panel_unprepare(struct drm_panel *panel);
+void drm_panel_prepare(struct drm_panel *panel);
+void drm_panel_unprepare(struct drm_panel *panel);
-int drm_panel_enable(struct drm_panel *panel);
-int drm_panel_disable(struct drm_panel *panel);
+void drm_panel_enable(struct drm_panel *panel);
+void drm_panel_disable(struct drm_panel *panel);
int drm_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector);
@@ -205,6 +355,33 @@ static inline int of_drm_get_panel_orientation(const struct device_node *np,
}
#endif
+#if defined(CONFIG_DRM_PANEL)
+bool drm_is_panel_follower(struct device *dev);
+int drm_panel_add_follower(struct device *follower_dev,
+ struct drm_panel_follower *follower);
+void drm_panel_remove_follower(struct drm_panel_follower *follower);
+int devm_drm_panel_add_follower(struct device *follower_dev,
+ struct drm_panel_follower *follower);
+#else
+static inline bool drm_is_panel_follower(struct device *dev)
+{
+ return false;
+}
+
+static inline int drm_panel_add_follower(struct device *follower_dev,
+ struct drm_panel_follower *follower)
+{
+ return -ENODEV;
+}
+
+static inline void drm_panel_remove_follower(struct drm_panel_follower *follower) { }
+static inline int devm_drm_panel_add_follower(struct device *follower_dev,
+ struct drm_panel_follower *follower)
+{
+ return -ENODEV;
+}
+#endif
+
#if IS_ENABLED(CONFIG_DRM_PANEL) && (IS_BUILTIN(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
(IS_MODULE(CONFIG_DRM) && IS_MODULE(CONFIG_BACKLIGHT_CLASS_DEVICE)))
int drm_panel_of_backlight(struct drm_panel *panel);
diff --git a/include/drm/drm_panic.h b/include/drm/drm_panic.h
new file mode 100644
index 000000000000..ac0e46b73436
--- /dev/null
+++ b/include/drm/drm_panic.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+
+/*
+ * Copyright (c) 2024 Intel
+ * Copyright (c) 2024 Red Hat
+ */
+
+#ifndef __DRM_PANIC_H__
+#define __DRM_PANIC_H__
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/iosys-map.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_fourcc.h>
+
+/**
+ * struct drm_scanout_buffer - DRM scanout buffer
+ *
+ * This structure holds the information necessary for drm_panic to draw the
+ * panic screen, and display it.
+ */
+struct drm_scanout_buffer {
+ /**
+ * @format:
+ *
+ * drm format of the scanout buffer.
+ */
+ const struct drm_format_info *format;
+
+ /**
+ * @map:
+ *
+ * Virtual address of the scanout buffer, either in memory or iomem.
+ * The scanout buffer should be in linear format, and can be directly
+ * sent to the display hardware. Tearing is not an issue for the panic
+ * screen.
+ */
+ struct iosys_map map[DRM_FORMAT_MAX_PLANES];
+
+ /**
+ * @pages: Optional, if the scanout buffer is not mapped, set this field
+ * to the array of pages of the scanout buffer. The panic code will use
+ * kmap_local_page_try_from_panic() to map one page at a time to write
+ * all the pixels. This array shouldn't be allocated from the
+ * get_scanoutbuffer() callback.
+ * The scanout buffer should be in linear format.
+ */
+ struct page **pages;
+
+ /**
+ * @width: Width of the scanout buffer, in pixels.
+ */
+ unsigned int width;
+
+ /**
+ * @height: Height of the scanout buffer, in pixels.
+ */
+ unsigned int height;
+
+ /**
+ * @pitch: Length in bytes between the start of two consecutive lines.
+ */
+ unsigned int pitch[DRM_FORMAT_MAX_PLANES];
+
+ /**
+ * @set_pixel: Optional function, to set a pixel color on the
+ * framebuffer. It allows to handle special tiling format inside the
+ * driver. It takes precedence over the @map and @pages fields.
+ */
+ void (*set_pixel)(struct drm_scanout_buffer *sb, unsigned int x,
+ unsigned int y, u32 color);
+
+ /**
+ * @private: private pointer that you can use in the callbacks
+ * set_pixel()
+ */
+ void *private;
+
+};
+
+#ifdef CONFIG_DRM_PANIC
+
+/**
+ * drm_panic_trylock - try to enter the panic printing critical section
+ * @dev: struct drm_device
+ * @flags: unsigned long irq flags you need to pass to the unlock() counterpart
+ *
+ * This function must be called by any panic printing code. The panic printing
+ * attempt must be aborted if the trylock fails.
+ *
+ * Panic printing code can make the following assumptions while holding the
+ * panic lock:
+ *
+ * - Anything protected by drm_panic_lock() and drm_panic_unlock() pairs is safe
+ * to access.
+ *
+ * - Furthermore the panic printing code only registers in drm_dev_unregister()
+ * and gets removed in drm_dev_unregister(). This allows the panic code to
+ * safely access any state which is invariant in between these two function
+ * calls, like the list of planes &drm_mode_config.plane_list or most of the
+ * struct drm_plane structure.
+ *
+ * Specifically thanks to the protection around plane updates in
+ * drm_atomic_helper_swap_state() the following additional guarantees hold:
+ *
+ * - It is safe to deference the drm_plane.state pointer.
+ *
+ * - Anything in struct drm_plane_state or the driver's subclass thereof which
+ * stays invariant after the atomic check code has finished is safe to access.
+ * Specifically this includes the reference counted pointers to framebuffer
+ * and buffer objects.
+ *
+ * - Anything set up by &drm_plane_helper_funcs.fb_prepare and cleaned up
+ * &drm_plane_helper_funcs.fb_cleanup is safe to access, as long as it stays
+ * invariant between these two calls. This also means that for drivers using
+ * dynamic buffer management the framebuffer is pinned, and therefer all
+ * relevant datastructures can be accessed without taking any further locks
+ * (which would be impossible in panic context anyway).
+ *
+ * - Importantly, software and hardware state set up by
+ * &drm_plane_helper_funcs.begin_fb_access and
+ * &drm_plane_helper_funcs.end_fb_access is not safe to access.
+ *
+ * Drivers must not make any assumptions about the actual state of the hardware,
+ * unless they explicitly protected these hardware access with drm_panic_lock()
+ * and drm_panic_unlock().
+ *
+ * Return:
+ * %0 when failing to acquire the raw spinlock, nonzero on success.
+ */
+#define drm_panic_trylock(dev, flags) \
+ raw_spin_trylock_irqsave(&(dev)->mode_config.panic_lock, flags)
+
+/**
+ * drm_panic_lock - protect panic printing relevant state
+ * @dev: struct drm_device
+ * @flags: unsigned long irq flags you need to pass to the unlock() counterpart
+ *
+ * This function must be called to protect software and hardware state that the
+ * panic printing code must be able to rely on. The protected sections must be
+ * as small as possible. It uses the irqsave/irqrestore variant, and can be
+ * called from irq handler. Examples include:
+ *
+ * - Access to peek/poke or other similar registers, if that is the way the
+ * driver prints the pixels into the scanout buffer at panic time.
+ *
+ * - Updates to pointers like &drm_plane.state, allowing the panic handler to
+ * safely deference these. This is done in drm_atomic_helper_swap_state().
+ *
+ * - An state that isn't invariant and that the driver must be able to access
+ * during panic printing.
+ */
+
+#define drm_panic_lock(dev, flags) \
+ raw_spin_lock_irqsave(&(dev)->mode_config.panic_lock, flags)
+
+/**
+ * drm_panic_unlock - end of the panic printing critical section
+ * @dev: struct drm_device
+ * @flags: irq flags that were returned when acquiring the lock
+ *
+ * Unlocks the raw spinlock acquired by either drm_panic_lock() or
+ * drm_panic_trylock().
+ */
+#define drm_panic_unlock(dev, flags) \
+ raw_spin_unlock_irqrestore(&(dev)->mode_config.panic_lock, flags)
+
+#else
+
+static inline bool drm_panic_trylock(struct drm_device *dev, unsigned long flags)
+{
+ return true;
+}
+
+static inline void drm_panic_lock(struct drm_device *dev, unsigned long flags) {}
+static inline void drm_panic_unlock(struct drm_device *dev, unsigned long flags) {}
+
+#endif
+
+#if defined(CONFIG_DRM_PANIC_SCREEN_QR_CODE)
+size_t drm_panic_qr_max_data_size(u8 version, size_t url_len);
+
+u8 drm_panic_qr_generate(const char *url, u8 *data, size_t data_len, size_t data_size,
+ u8 *tmp, size_t tmp_size);
+#endif
+
+#endif /* __DRM_PANIC_H__ */
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index b7e899ce44f0..90e8abc08653 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -700,115 +700,3 @@
{0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0, 0, 0}
-
-#define r128_PCI_IDS \
- {0x1002, 0x4c45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x4c46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x4d46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x4d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5041, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5044, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5045, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5046, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5047, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5048, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5049, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x504A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x504B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x504C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x504D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x504E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x504F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5052, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x524b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x524c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x534d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x544C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x5452, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0, 0, 0}
-
-#define mga_PCI_IDS \
- {0x102b, 0x0520, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
- {0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
- {0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G400}, \
- {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G550}, \
- {0, 0, 0}
-
-#define sisdrv_PCI_IDS \
- {0x1039, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1039, 0x5300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1039, 0x6300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1039, 0x6330, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
- {0x1039, 0x6351, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1039, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x18CA, 0x0040, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
- {0x18CA, 0x0042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
- {0, 0, 0}
-
-#define tdfx_PCI_IDS \
- {0x121a, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x121a, 0x0004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x121a, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x121a, 0x0007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x121a, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x121a, 0x000b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0, 0, 0}
-
-#define viadrv_PCI_IDS \
- {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \
- {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1106, 0x3344, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1106, 0x3343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1106, 0x3230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \
- {0x1106, 0x3157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \
- {0, 0, 0}
-
-#define i810_PCI_IDS \
- {0x8086, 0x7121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x8086, 0x7123, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x8086, 0x7125, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0, 0, 0}
-
-#define savage_PCI_IDS \
- {0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
- {0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
- {0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \
- {0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \
- {0x5333, 0x8c10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
- {0x5333, 0x8c11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
- {0x5333, 0x8c12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
- {0x5333, 0x8c13, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
- {0x5333, 0x8c22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
- {0x5333, 0x8c24, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
- {0x5333, 0x8c26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
- {0x5333, 0x8c2a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
- {0x5333, 0x8c2b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
- {0x5333, 0x8c2c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
- {0x5333, 0x8c2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
- {0x5333, 0x8c2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
- {0x5333, 0x8c2f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
- {0x5333, 0x8a25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \
- {0x5333, 0x8a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \
- {0x5333, 0x8d01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
- {0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
- {0x5333, 0x8d03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
- {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
- {0, 0, 0}
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 1294610e84f4..703ef4d1bbbc 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -25,6 +25,7 @@
#include <linux/list.h>
#include <linux/ctype.h>
+#include <linux/kmsg_dump.h>
#include <drm/drm_mode_object.h>
#include <drm/drm_color_mgmt.h>
#include <drm/drm_rect.h>
@@ -32,6 +33,7 @@
#include <drm/drm_util.h>
struct drm_crtc;
+struct drm_plane_size_hint;
struct drm_printer;
struct drm_modeset_acquire_ctx;
@@ -43,7 +45,7 @@ enum drm_scaling_filter {
/**
* struct drm_plane_state - mutable plane state
*
- * Please not that the destination coordinates @crtc_x, @crtc_y, @crtc_h and
+ * Please note that the destination coordinates @crtc_x, @crtc_y, @crtc_h and
* @crtc_w and the source coordinates @src_x, @src_y, @src_h and @src_w are the
* raw coordinates provided by userspace. Drivers should use
* drm_atomic_helper_check_plane_state() and only use the derived rectangles in
@@ -56,7 +58,7 @@ struct drm_plane_state {
/**
* @crtc:
*
- * Currently bound CRTC, NULL if disabled. Do not this write directly,
+ * Currently bound CRTC, NULL if disabled. Do not write this directly,
* use drm_atomic_set_crtc_for_plane()
*/
struct drm_crtc *crtc;
@@ -74,13 +76,11 @@ struct drm_plane_state {
*
* Optional fence to wait for before scanning out @fb. The core atomic
* code will set this when userspace is using explicit fencing. Do not
- * write this field directly for a driver's implicit fence, use
- * drm_atomic_set_fence_for_plane() to ensure that an explicit fence is
- * preserved.
+ * write this field directly for a driver's implicit fence.
*
* Drivers should store any implicit fence in this from their
- * &drm_plane_helper_funcs.prepare_fb callback. See drm_gem_plane_helper_prepare_fb()
- * and drm_gem_simple_display_pipe_prepare_fb() for suitable helpers.
+ * &drm_plane_helper_funcs.prepare_fb callback. See
+ * drm_gem_plane_helper_prepare_fb() for a suitable helper.
*/
struct dma_fence *fence;
@@ -118,6 +118,10 @@ struct drm_plane_state {
/** @src_h: height of visible portion of plane (in 16.16) */
uint32_t src_h, src_w;
+ /** @hotspot_x: x offset to mouse cursor hotspot */
+ /** @hotspot_y: y offset to mouse cursor hotspot */
+ int32_t hotspot_x, hotspot_y;
+
/**
* @alpha:
* Opacity of the plane with 0 as completely transparent and 0xffff as
@@ -186,10 +190,23 @@ struct drm_plane_state {
* since last plane update) as an array of &drm_mode_rect in framebuffer
* coodinates of the attached framebuffer. Note that unlike plane src,
* damage clips are not in 16.16 fixed point.
+ *
+ * See drm_plane_get_damage_clips() and
+ * drm_plane_get_damage_clips_count() for accessing these.
*/
struct drm_property_blob *fb_damage_clips;
/**
+ * @ignore_damage_clips:
+ *
+ * Set by drivers to indicate the drm_atomic_helper_damage_iter_init()
+ * helper that the @fb_damage_clips blob property should be ignored.
+ *
+ * See :ref:`damage_tracking_properties` for more information.
+ */
+ bool ignore_damage_clips;
+
+ /**
* @src:
*
* source coordinates of the plane (in 16.16).
@@ -227,6 +244,14 @@ struct drm_plane_state {
enum drm_scaling_filter scaling_filter;
/**
+ * @color_pipeline:
+ *
+ * The first colorop of the active color pipeline, or NULL, if no
+ * color pipeline is active.
+ */
+ struct drm_colorop *color_pipeline;
+
+ /**
* @commit: Tracks the pending commit to prevent use-after-free conditions,
* and for async plane updates.
*
@@ -236,6 +261,13 @@ struct drm_plane_state {
/** @state: backpointer to global drm_atomic_state */
struct drm_atomic_state *state;
+
+ /**
+ * @color_mgmt_changed: Color management properties have changed. Used
+ * by the atomic helpers and drivers to steer the atomic commit control
+ * flow.
+ */
+ bool color_mgmt_changed : 1;
};
static inline struct drm_rect
@@ -513,7 +545,7 @@ struct drm_plane_funcs {
* This optional hook is used for the DRM to determine if the given
* format/modifier combination is valid for the plane. This allows the
* DRM to generate the correct format bitmask (which formats apply to
- * which modifier), and to valdiate modifiers at atomic_check time.
+ * which modifier), and to validate modifiers at atomic_check time.
*
* If not present, then any modifier in the plane's modifier
* list is allowed with any of the plane's formats.
@@ -525,6 +557,23 @@ struct drm_plane_funcs {
*/
bool (*format_mod_supported)(struct drm_plane *plane, uint32_t format,
uint64_t modifier);
+ /**
+ * @format_mod_supported_async:
+ *
+ * This optional hook is used for the DRM to determine if for
+ * asynchronous flip the given format/modifier combination is valid for
+ * the plane. This allows the DRM to generate the correct format
+ * bitmask (which formats apply to which modifier), and to validate
+ * modifiers at atomic_check time.
+ *
+ * Returns:
+ *
+ * True if the given modifier is valid for that format on the plane.
+ * False otherwise.
+ */
+ bool (*format_mod_supported_async)(struct drm_plane *plane,
+ u32 format, u64 modifier);
+
};
/**
@@ -630,7 +679,7 @@ struct drm_plane {
unsigned int format_count;
/**
* @format_default: driver hasn't supplied supported formats for the
- * plane. Used by the drm_plane_init compatibility wrapper only.
+ * plane. Used by the non-atomic driver compatibility wrapper only.
*/
bool format_default;
@@ -743,10 +792,33 @@ struct drm_plane {
struct drm_property *color_range_property;
/**
+ * @color_pipeline_property:
+ *
+ * Optional "COLOR_PIPELINE" enum property for specifying
+ * a color pipeline to use on the plane.
+ */
+ struct drm_property *color_pipeline_property;
+
+ /**
* @scaling_filter_property: property to apply a particular filter while
* scaling.
*/
struct drm_property *scaling_filter_property;
+
+ /**
+ * @hotspot_x_property: property to set mouse hotspot x offset.
+ */
+ struct drm_property *hotspot_x_property;
+
+ /**
+ * @hotspot_y_property: property to set mouse hotspot y offset.
+ */
+ struct drm_property *hotspot_y_property;
+
+ /**
+ * @kmsg_panic: Used to register a panic notifier for this plane
+ */
+ struct kmsg_dumper kmsg_panic;
};
#define obj_to_plane(x) container_of(x, struct drm_plane, base)
@@ -761,12 +833,6 @@ int drm_universal_plane_init(struct drm_device *dev,
const uint64_t *format_modifiers,
enum drm_plane_type type,
const char *name, ...);
-int drm_plane_init(struct drm_device *dev,
- struct drm_plane *plane,
- uint32_t possible_crtcs,
- const struct drm_plane_funcs *funcs,
- const uint32_t *formats, unsigned int format_count,
- bool is_primary);
void drm_plane_cleanup(struct drm_plane *plane);
__printf(10, 11)
@@ -800,6 +866,9 @@ void *__drmm_universal_plane_alloc(struct drm_device *dev,
*
* The @drm_plane_funcs.destroy hook must be NULL.
*
+ * Drivers that only support the DRM_FORMAT_MOD_LINEAR modifier support may set
+ * @format_modifiers to NULL. The plane will advertise the linear modifier.
+ *
* Returns:
* Pointer to new plane, or ERR_PTR on failure.
*/
@@ -811,6 +880,50 @@ void *__drmm_universal_plane_alloc(struct drm_device *dev,
format_count, format_modifiers, \
plane_type, name, ##__VA_ARGS__))
+__printf(10, 11)
+void *__drm_universal_plane_alloc(struct drm_device *dev,
+ size_t size, size_t offset,
+ uint32_t possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats,
+ unsigned int format_count,
+ const uint64_t *format_modifiers,
+ enum drm_plane_type plane_type,
+ const char *name, ...);
+
+/**
+ * drm_universal_plane_alloc() - Allocate and initialize an universal plane object
+ * @dev: DRM device
+ * @type: the type of the struct which contains struct &drm_plane
+ * @member: the name of the &drm_plane within @type
+ * @possible_crtcs: bitmask of possible CRTCs
+ * @funcs: callbacks for the new plane
+ * @formats: array of supported formats (DRM_FORMAT\_\*)
+ * @format_count: number of elements in @formats
+ * @format_modifiers: array of struct drm_format modifiers terminated by
+ * DRM_FORMAT_MOD_INVALID
+ * @plane_type: type of plane (overlay, primary, cursor)
+ * @name: printf style format string for the plane name, or NULL for default name
+ *
+ * Allocates and initializes a plane object of type @type. The caller
+ * is responsible for releasing the allocated memory with kfree().
+ *
+ * Drivers are encouraged to use drmm_universal_plane_alloc() instead.
+ *
+ * Drivers that only support the DRM_FORMAT_MOD_LINEAR modifier support may set
+ * @format_modifiers to NULL. The plane will advertise the linear modifier.
+ *
+ * Returns:
+ * Pointer to new plane, or ERR_PTR on failure.
+ */
+#define drm_universal_plane_alloc(dev, type, member, possible_crtcs, funcs, formats, \
+ format_count, format_modifiers, plane_type, name, ...) \
+ ((type *)__drm_universal_plane_alloc(dev, sizeof(type), \
+ offsetof(type, member), \
+ possible_crtcs, funcs, formats, \
+ format_count, format_modifiers, \
+ plane_type, name, ##__VA_ARGS__))
+
/**
* drm_plane_index - find the index of a registered plane
* @plane: plane to find index for
@@ -892,42 +1005,24 @@ static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
#define drm_for_each_plane(plane, dev) \
list_for_each_entry(plane, &(dev)->mode_config.plane_list, head)
+bool drm_plane_has_format(struct drm_plane *plane,
+ u32 format, u64 modifier);
bool drm_any_plane_has_format(struct drm_device *dev,
u32 format, u64 modifier);
-/**
- * drm_plane_get_damage_clips_count - Returns damage clips count.
- * @state: Plane state.
- *
- * Simple helper to get the number of &drm_mode_rect clips set by user-space
- * during plane update.
- *
- * Return: Number of clips in plane fb_damage_clips blob property.
- */
-static inline unsigned int
-drm_plane_get_damage_clips_count(const struct drm_plane_state *state)
-{
- return (state && state->fb_damage_clips) ?
- state->fb_damage_clips->length/sizeof(struct drm_mode_rect) : 0;
-}
-/**
- * drm_plane_get_damage_clips - Returns damage clips.
- * @state: Plane state.
- *
- * Note that this function returns uapi type &drm_mode_rect. Drivers might
- * instead be interested in internal &drm_rect which can be obtained by calling
- * drm_helper_get_plane_damage_clips().
- *
- * Return: Damage clips in plane fb_damage_clips blob property.
- */
-static inline struct drm_mode_rect *
-drm_plane_get_damage_clips(const struct drm_plane_state *state)
-{
- return (struct drm_mode_rect *)((state && state->fb_damage_clips) ?
- state->fb_damage_clips->data : NULL);
-}
+void drm_plane_enable_fb_damage_clips(struct drm_plane *plane);
+unsigned int
+drm_plane_get_damage_clips_count(const struct drm_plane_state *state);
+struct drm_mode_rect *
+drm_plane_get_damage_clips(const struct drm_plane_state *state);
int drm_plane_create_scaling_filter_property(struct drm_plane *plane,
unsigned int supported_filters);
+int drm_plane_add_size_hints_property(struct drm_plane *plane,
+ const struct drm_plane_size_hint *hints,
+ int num_hints);
+int drm_plane_create_color_pipeline_property(struct drm_plane *plane,
+ const struct drm_prop_enum_list *pipelines,
+ int num_pipelines);
#endif
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 331ebd60b3a3..75f9c4830564 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -24,21 +24,34 @@
#ifndef DRM_PLANE_HELPER_H
#define DRM_PLANE_HELPER_H
-#include <drm/drm_rect.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_modeset_helper.h>
+#include <linux/types.h>
-/*
- * Drivers that don't allow primary plane scaling may pass this macro in place
- * of the min/max scale parameters of the update checker function.
+struct drm_crtc;
+struct drm_framebuffer;
+struct drm_modeset_acquire_ctx;
+struct drm_plane;
+
+int drm_plane_helper_update_primary(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h,
+ struct drm_modeset_acquire_ctx *ctx);
+int drm_plane_helper_disable_primary(struct drm_plane *plane,
+ struct drm_modeset_acquire_ctx *ctx);
+void drm_plane_helper_destroy(struct drm_plane *plane);
+
+/**
+ * DRM_PLANE_NON_ATOMIC_FUNCS - Default plane functions for non-atomic drivers
*
- * Due to src being in 16.16 fixed point and dest being in integer pixels,
- * 1<<16 represents no scaling.
+ * This macro initializes plane functions for non-atomic drivers to default
+ * values. Non-atomic interfaces are deprecated and should not be used in new
+ * drivers.
*/
-#define DRM_PLANE_HELPER_NO_SCALING (1<<16)
-
-void drm_primary_helper_destroy(struct drm_plane *plane);
-extern const struct drm_plane_funcs drm_primary_helper_funcs;
+#define DRM_PLANE_NON_ATOMIC_FUNCS \
+ .update_plane = drm_plane_helper_update_primary, \
+ .disable_plane = drm_plane_helper_disable_primary, \
+ .destroy = drm_plane_helper_destroy
#endif
diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
index 54f2c58305d2..f50f862f0d8b 100644
--- a/include/drm/drm_prime.h
+++ b/include/drm/drm_prime.h
@@ -54,7 +54,7 @@ struct device;
struct dma_buf_export_info;
struct dma_buf;
struct dma_buf_attachment;
-struct dma_buf_map;
+struct iosys_map;
enum dma_data_direction;
@@ -69,6 +69,9 @@ void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
int drm_gem_prime_fd_to_handle(struct drm_device *dev,
struct drm_file *file_priv, int prime_fd, uint32_t *handle);
+struct dma_buf *drm_gem_prime_handle_to_dmabuf(struct drm_device *dev,
+ struct drm_file *file_priv, uint32_t handle,
+ uint32_t flags);
int drm_gem_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv, uint32_t handle, uint32_t flags,
int *prime_fd);
@@ -83,8 +86,8 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir);
-int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map);
-void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map);
+int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map);
+void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map);
int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma);
@@ -97,6 +100,9 @@ struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt);
/* helper functions for importing */
+bool drm_gem_is_prime_exported_dma_buf(struct drm_device *dev,
+ struct dma_buf *dma_buf);
+
struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
struct dma_buf *dma_buf,
struct device *attach_dev);
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index a3c58c941bdc..ab017b05e175 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -28,14 +28,18 @@
#include <linux/compiler.h>
#include <linux/printk.h>
-#include <linux/seq_file.h>
#include <linux/device.h>
-#include <linux/debugfs.h>
+#include <linux/dynamic_debug.h>
#include <drm/drm.h>
+#include <drm/drm_device.h>
+
+struct debugfs_regset32;
+struct drm_device;
+struct seq_file;
/* Do *not* use outside of drm_print.[ch]! */
-extern unsigned int __drm_debug;
+extern unsigned long __drm_debug;
/**
* DOC: print
@@ -67,6 +71,101 @@ extern unsigned int __drm_debug;
*/
/**
+ * enum drm_debug_category - The DRM debug categories
+ *
+ * Each of the DRM debug logging macros use a specific category, and the logging
+ * is filtered by the drm.debug module parameter. This enum specifies the values
+ * for the interface.
+ *
+ * Each DRM_DEBUG_<CATEGORY> macro logs to DRM_UT_<CATEGORY> category, except
+ * DRM_DEBUG() logs to DRM_UT_CORE.
+ *
+ * Enabling verbose debug messages is done through the drm.debug parameter, each
+ * category being enabled by a bit:
+ *
+ * - drm.debug=0x1 will enable CORE messages
+ * - drm.debug=0x2 will enable DRIVER messages
+ * - drm.debug=0x3 will enable CORE and DRIVER messages
+ * - ...
+ * - drm.debug=0x1ff will enable all messages
+ *
+ * An interesting feature is that it's possible to enable verbose logging at
+ * run-time by echoing the debug value in its sysfs node::
+ *
+ * # echo 0xf > /sys/module/drm/parameters/debug
+ *
+ */
+enum drm_debug_category {
+ /* These names must match those in DYNAMIC_DEBUG_CLASSBITS */
+ /**
+ * @DRM_UT_CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c,
+ * drm_memory.c, ...
+ */
+ DRM_UT_CORE,
+ /**
+ * @DRM_UT_DRIVER: Used in the vendor specific part of the driver: i915,
+ * radeon, ... macro.
+ */
+ DRM_UT_DRIVER,
+ /**
+ * @DRM_UT_KMS: Used in the modesetting code.
+ */
+ DRM_UT_KMS,
+ /**
+ * @DRM_UT_PRIME: Used in the prime code.
+ */
+ DRM_UT_PRIME,
+ /**
+ * @DRM_UT_ATOMIC: Used in the atomic code.
+ */
+ DRM_UT_ATOMIC,
+ /**
+ * @DRM_UT_VBL: Used for verbose debug message in the vblank code.
+ */
+ DRM_UT_VBL,
+ /**
+ * @DRM_UT_STATE: Used for verbose atomic state debugging.
+ */
+ DRM_UT_STATE,
+ /**
+ * @DRM_UT_LEASE: Used in the lease code.
+ */
+ DRM_UT_LEASE,
+ /**
+ * @DRM_UT_DP: Used in the DP code.
+ */
+ DRM_UT_DP,
+ /**
+ * @DRM_UT_DRMRES: Used in the drm managed resources code.
+ */
+ DRM_UT_DRMRES
+};
+
+static inline bool drm_debug_enabled_raw(enum drm_debug_category category)
+{
+ return unlikely(__drm_debug & BIT(category));
+}
+
+#define drm_debug_enabled_instrumented(category) \
+ ({ \
+ pr_debug("todo: is this frequent enough to optimize ?\n"); \
+ drm_debug_enabled_raw(category); \
+ })
+
+#if defined(CONFIG_DRM_USE_DYNAMIC_DEBUG)
+/*
+ * the drm.debug API uses dyndbg, so each drm_*dbg macro/callsite gets
+ * a descriptor, and only enabled callsites are reachable. They use
+ * the private macro to avoid re-testing the enable-bit.
+ */
+#define __drm_debug_enabled(category) true
+#define drm_debug_enabled(category) drm_debug_enabled_instrumented(category)
+#else
+#define __drm_debug_enabled(category) drm_debug_enabled_raw(category)
+#define drm_debug_enabled(category) drm_debug_enabled_raw(category)
+#endif
+
+/**
* struct drm_printer - drm output "stream"
*
* Do not use struct members directly. Use drm_printer_seq_file(),
@@ -77,7 +176,13 @@ struct drm_printer {
void (*printfn)(struct drm_printer *p, struct va_format *vaf);
void (*puts)(struct drm_printer *p, const char *str);
void *arg;
+ const void *origin;
const char *prefix;
+ struct {
+ unsigned int series;
+ unsigned int counter;
+ } line;
+ enum drm_debug_category category;
};
void __drm_printfn_coredump(struct drm_printer *p, struct va_format *vaf);
@@ -85,8 +190,9 @@ void __drm_puts_coredump(struct drm_printer *p, const char *str);
void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf);
void __drm_puts_seq_file(struct drm_printer *p, const char *str);
void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf);
-void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf);
+void __drm_printfn_dbg(struct drm_printer *p, struct va_format *vaf);
void __drm_printfn_err(struct drm_printer *p, struct va_format *vaf);
+void __drm_printfn_line(struct drm_printer *p, struct va_format *vaf);
__printf(2, 3)
void drm_printf(struct drm_printer *p, const char *f, ...);
@@ -94,6 +200,8 @@ void drm_puts(struct drm_printer *p, const char *str);
void drm_print_regset32(struct drm_printer *p, struct debugfs_regset32 *regset);
void drm_print_bits(struct drm_printer *p, unsigned long value,
const char * const bits[], unsigned int nbits);
+void drm_print_hex_dump(struct drm_printer *p, const char *prefix,
+ const u8 *buf, size_t len);
__printf(2, 0)
/**
@@ -121,7 +229,8 @@ drm_vprintf(struct drm_printer *p, const char *fmt, va_list *va)
/**
* struct drm_print_iterator - local struct used with drm_printer_coredump
- * @data: Pointer to the devcoredump output buffer
+ * @data: Pointer to the devcoredump output buffer, can be NULL if using
+ * drm_printer_coredump to determine size of devcoredump
* @start: The offset within the buffer to start writing
* @remain: The number of bytes to write for this iteration
*/
@@ -166,6 +275,57 @@ struct drm_print_iterator {
* coredump_read, ...)
* }
*
+ * The above example has a time complexity of O(N^2), where N is the size of the
+ * devcoredump. This is acceptable for small devcoredumps but scales poorly for
+ * larger ones.
+ *
+ * Another use case for drm_coredump_printer is to capture the devcoredump into
+ * a saved buffer before the dev_coredump() callback. This involves two passes:
+ * one to determine the size of the devcoredump and another to print it to a
+ * buffer. Then, in dev_coredump(), copy from the saved buffer into the
+ * devcoredump read buffer.
+ *
+ * For example::
+ *
+ * char *devcoredump_saved_buffer;
+ *
+ * ssize_t __coredump_print(char *buffer, ssize_t count, ...)
+ * {
+ * struct drm_print_iterator iter;
+ * struct drm_printer p;
+ *
+ * iter.data = buffer;
+ * iter.start = 0;
+ * iter.remain = count;
+ *
+ * p = drm_coredump_printer(&iter);
+ *
+ * drm_printf(p, "foo=%d\n", foo);
+ * ...
+ * return count - iter.remain;
+ * }
+ *
+ * void coredump_print(...)
+ * {
+ * ssize_t count;
+ *
+ * count = __coredump_print(NULL, INT_MAX, ...);
+ * devcoredump_saved_buffer = kvmalloc(count, GFP_KERNEL);
+ * __coredump_print(devcoredump_saved_buffer, count, ...);
+ * }
+ *
+ * void coredump_read(char *buffer, loff_t offset, size_t count,
+ * void *data, size_t datalen)
+ * {
+ * ...
+ * memcpy(buffer, devcoredump_saved_buffer + offset, count);
+ * ...
+ * }
+ *
+ * The above example has a time complexity of O(N*2), where N is the size of the
+ * devcoredump. This scales better than the previous example for larger
+ * devcoredumps.
+ *
* RETURNS:
* The &drm_printer object
*/
@@ -185,6 +345,26 @@ drm_coredump_printer(struct drm_print_iterator *iter)
}
/**
+ * drm_coredump_printer_is_full() - DRM coredump printer output is full
+ * @p: DRM coredump printer
+ *
+ * DRM printer output is full, useful to short circuit coredump printing once
+ * printer is full.
+ *
+ * RETURNS:
+ * True if DRM coredump printer output buffer is full, False otherwise
+ */
+static inline bool drm_coredump_printer_is_full(struct drm_printer *p)
+{
+ struct drm_print_iterator *iterator = p->arg;
+
+ if (p->printfn != __drm_printfn_coredump)
+ return true;
+
+ return !iterator->remain;
+}
+
+/**
* drm_seq_file_printer - construct a &drm_printer that outputs to &seq_file
* @f: the &struct seq_file to output to
*
@@ -218,128 +398,125 @@ static inline struct drm_printer drm_info_printer(struct device *dev)
}
/**
- * drm_debug_printer - construct a &drm_printer that outputs to pr_debug()
- * @prefix: debug output prefix
+ * drm_dbg_printer - construct a &drm_printer for drm device specific output
+ * @drm: the &struct drm_device pointer, or NULL
+ * @category: the debug category to use
+ * @prefix: debug output prefix, or NULL for no prefix
*
* RETURNS:
* The &drm_printer object
*/
-static inline struct drm_printer drm_debug_printer(const char *prefix)
+static inline struct drm_printer drm_dbg_printer(struct drm_device *drm,
+ enum drm_debug_category category,
+ const char *prefix)
{
struct drm_printer p = {
- .printfn = __drm_printfn_debug,
- .prefix = prefix
+ .printfn = __drm_printfn_dbg,
+ .arg = drm,
+ .origin = (const void *)_THIS_IP_, /* it's fine as we will be inlined */
+ .prefix = prefix,
+ .category = category,
};
return p;
}
/**
- * drm_err_printer - construct a &drm_printer that outputs to pr_err()
- * @prefix: debug output prefix
+ * drm_err_printer - construct a &drm_printer that outputs to drm_err()
+ * @drm: the &struct drm_device pointer
+ * @prefix: debug output prefix, or NULL for no prefix
*
* RETURNS:
* The &drm_printer object
*/
-static inline struct drm_printer drm_err_printer(const char *prefix)
+static inline struct drm_printer drm_err_printer(struct drm_device *drm,
+ const char *prefix)
{
struct drm_printer p = {
.printfn = __drm_printfn_err,
+ .arg = drm,
.prefix = prefix
};
return p;
}
/**
- * enum drm_debug_category - The DRM debug categories
+ * drm_line_printer - construct a &drm_printer that prefixes outputs with line numbers
+ * @p: the &struct drm_printer which actually generates the output
+ * @prefix: optional output prefix, or NULL for no prefix
+ * @series: optional unique series identifier, or 0 to omit identifier in the output
*
- * Each of the DRM debug logging macros use a specific category, and the logging
- * is filtered by the drm.debug module parameter. This enum specifies the values
- * for the interface.
+ * This printer can be used to increase the robustness of the captured output
+ * to make sure we didn't lost any intermediate lines of the output. Helpful
+ * while capturing some crash data.
*
- * Each DRM_DEBUG_<CATEGORY> macro logs to DRM_UT_<CATEGORY> category, except
- * DRM_DEBUG() logs to DRM_UT_CORE.
+ * Example 1::
*
- * Enabling verbose debug messages is done through the drm.debug parameter, each
- * category being enabled by a bit:
+ * void crash_dump(struct drm_device *drm)
+ * {
+ * static unsigned int id;
+ * struct drm_printer p = drm_err_printer(drm, "crash");
+ * struct drm_printer lp = drm_line_printer(&p, "dump", ++id);
*
- * - drm.debug=0x1 will enable CORE messages
- * - drm.debug=0x2 will enable DRIVER messages
- * - drm.debug=0x3 will enable CORE and DRIVER messages
- * - ...
- * - drm.debug=0x1ff will enable all messages
+ * drm_printf(&lp, "foo");
+ * drm_printf(&lp, "bar");
+ * }
*
- * An interesting feature is that it's possible to enable verbose logging at
- * run-time by echoing the debug value in its sysfs node::
+ * Above code will print into the dmesg something like::
*
- * # echo 0xf > /sys/module/drm/parameters/debug
+ * [ ] 0000:00:00.0: [drm] *ERROR* crash dump 1.1: foo
+ * [ ] 0000:00:00.0: [drm] *ERROR* crash dump 1.2: bar
+ *
+ * Example 2::
+ *
+ * void line_dump(struct device *dev)
+ * {
+ * struct drm_printer p = drm_info_printer(dev);
+ * struct drm_printer lp = drm_line_printer(&p, NULL, 0);
*
+ * drm_printf(&lp, "foo");
+ * drm_printf(&lp, "bar");
+ * }
+ *
+ * Above code will print::
+ *
+ * [ ] 0000:00:00.0: [drm] 1: foo
+ * [ ] 0000:00:00.0: [drm] 2: bar
+ *
+ * RETURNS:
+ * The &drm_printer object
*/
-enum drm_debug_category {
- /**
- * @DRM_UT_CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c,
- * drm_memory.c, ...
- */
- DRM_UT_CORE = 0x01,
- /**
- * @DRM_UT_DRIVER: Used in the vendor specific part of the driver: i915,
- * radeon, ... macro.
- */
- DRM_UT_DRIVER = 0x02,
- /**
- * @DRM_UT_KMS: Used in the modesetting code.
- */
- DRM_UT_KMS = 0x04,
- /**
- * @DRM_UT_PRIME: Used in the prime code.
- */
- DRM_UT_PRIME = 0x08,
- /**
- * @DRM_UT_ATOMIC: Used in the atomic code.
- */
- DRM_UT_ATOMIC = 0x10,
- /**
- * @DRM_UT_VBL: Used for verbose debug message in the vblank code.
- */
- DRM_UT_VBL = 0x20,
- /**
- * @DRM_UT_STATE: Used for verbose atomic state debugging.
- */
- DRM_UT_STATE = 0x40,
- /**
- * @DRM_UT_LEASE: Used in the lease code.
- */
- DRM_UT_LEASE = 0x80,
- /**
- * @DRM_UT_DP: Used in the DP code.
- */
- DRM_UT_DP = 0x100,
- /**
- * @DRM_UT_DRMRES: Used in the drm managed resources code.
- */
- DRM_UT_DRMRES = 0x200,
-};
-
-static inline bool drm_debug_enabled(enum drm_debug_category category)
+static inline struct drm_printer drm_line_printer(struct drm_printer *p,
+ const char *prefix,
+ unsigned int series)
{
- return unlikely(__drm_debug & category);
+ struct drm_printer lp = {
+ .printfn = __drm_printfn_line,
+ .arg = p,
+ .prefix = prefix,
+ .line = { .series = series, },
+ };
+ return lp;
}
/*
* struct device based logging
*
- * Prefer drm_device based logging over device or prink based logging.
+ * Prefer drm_device based logging over device or printk based logging.
*/
__printf(3, 4)
void drm_dev_printk(const struct device *dev, const char *level,
const char *format, ...);
-__printf(3, 4)
-void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
- const char *format, ...);
+struct _ddebug;
+__printf(4, 5)
+void __drm_dev_dbg(struct _ddebug *desc, const struct device *dev,
+ enum drm_debug_category category, const char *format, ...);
/**
* DRM_DEV_ERROR() - Error output.
*
+ * NOTE: this is deprecated in favor of drm_err() or dev_err().
+ *
* @dev: device pointer
* @fmt: printf() like format string.
*/
@@ -349,6 +526,9 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
/**
* DRM_DEV_ERROR_RATELIMITED() - Rate limited error output.
*
+ * NOTE: this is deprecated in favor of drm_err_ratelimited() or
+ * dev_err_ratelimited().
+ *
* @dev: device pointer
* @fmt: printf() like format string.
*
@@ -364,9 +544,11 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
DRM_DEV_ERROR(dev, fmt, ##__VA_ARGS__); \
})
+/* NOTE: this is deprecated in favor of drm_info() or dev_info(). */
#define DRM_DEV_INFO(dev, fmt, ...) \
drm_dev_printk(dev, KERN_INFO, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_info_once() or dev_info_once(). */
#define DRM_DEV_INFO_ONCE(dev, fmt, ...) \
({ \
static bool __print_once __read_mostly; \
@@ -376,9 +558,20 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
} \
})
+#if !defined(CONFIG_DRM_USE_DYNAMIC_DEBUG)
+#define drm_dev_dbg(dev, cat, fmt, ...) \
+ __drm_dev_dbg(NULL, dev, cat, fmt, ##__VA_ARGS__)
+#else
+#define drm_dev_dbg(dev, cat, fmt, ...) \
+ _dynamic_func_call_cls(cat, fmt, __drm_dev_dbg, \
+ dev, cat, fmt, ##__VA_ARGS__)
+#endif
+
/**
* DRM_DEV_DEBUG() - Debug output for generic drm code
*
+ * NOTE: this is deprecated in favor of drm_dbg_core().
+ *
* @dev: device pointer
* @fmt: printf() like format string.
*/
@@ -387,6 +580,8 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
/**
* DRM_DEV_DEBUG_DRIVER() - Debug output for vendor specific part of the driver
*
+ * NOTE: this is deprecated in favor of drm_dbg() or dev_dbg().
+ *
* @dev: device pointer
* @fmt: printf() like format string.
*/
@@ -395,6 +590,8 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
/**
* DRM_DEV_DEBUG_KMS() - Debug output for modesetting code
*
+ * NOTE: this is deprecated in favor of drm_dbg_kms().
+ *
* @dev: device pointer
* @fmt: printf() like format string.
*/
@@ -407,9 +604,15 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
* Prefer drm_device based logging over device or prink based logging.
*/
+/* Helper to enforce struct drm_device type */
+static inline struct device *__drm_to_dev(const struct drm_device *drm)
+{
+ return drm ? drm->dev : NULL;
+}
+
/* Helper for struct drm_device based logging. */
#define __drm_printk(drm, level, type, fmt, ...) \
- dev_##level##type((drm)->dev, "[drm] " fmt, ##__VA_ARGS__)
+ dev_##level##type(__drm_to_dev(drm), "[drm] " fmt, ##__VA_ARGS__)
#define drm_info(drm, fmt, ...) \
@@ -443,26 +646,27 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
#define drm_dbg_core(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_CORE, fmt, ##__VA_ARGS__)
-#define drm_dbg(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_CORE, fmt, ##__VA_ARGS__)
+#define drm_dbg_driver(drm, fmt, ...) \
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
#define drm_dbg_kms(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_KMS, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_KMS, fmt, ##__VA_ARGS__)
#define drm_dbg_prime(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_PRIME, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_PRIME, fmt, ##__VA_ARGS__)
#define drm_dbg_atomic(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
#define drm_dbg_vbl(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_VBL, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_VBL, fmt, ##__VA_ARGS__)
#define drm_dbg_state(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_STATE, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_STATE, fmt, ##__VA_ARGS__)
#define drm_dbg_lease(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_LEASE, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_LEASE, fmt, ##__VA_ARGS__)
#define drm_dbg_dp(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_DP, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_DP, fmt, ##__VA_ARGS__)
#define drm_dbg_drmres(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_DRMRES, fmt, ##__VA_ARGS__)
+ drm_dev_dbg(__drm_to_dev(drm), DRM_UT_DRMRES, fmt, ##__VA_ARGS__)
+#define drm_dbg(drm, fmt, ...) drm_dbg_driver(drm, fmt, ##__VA_ARGS__)
/*
* printk based logging
@@ -470,74 +674,96 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
* Prefer drm_device based logging over device or prink based logging.
*/
-__printf(2, 3)
-void __drm_dbg(enum drm_debug_category category, const char *format, ...);
__printf(1, 2)
void __drm_err(const char *format, ...);
+#if !defined(CONFIG_DRM_USE_DYNAMIC_DEBUG)
+#define __drm_dbg(cat, fmt, ...) __drm_dev_dbg(NULL, NULL, cat, fmt, ##__VA_ARGS__)
+#else
+#define __drm_dbg(cat, fmt, ...) \
+ _dynamic_func_call_cls(cat, fmt, __drm_dev_dbg, \
+ NULL, cat, fmt, ##__VA_ARGS__)
+#endif
+
/* Macros to make printk easier */
#define _DRM_PRINTK(once, level, fmt, ...) \
printk##once(KERN_##level "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_info(). */
#define DRM_INFO(fmt, ...) \
_DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_notice(). */
#define DRM_NOTE(fmt, ...) \
_DRM_PRINTK(, NOTICE, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_warn(). */
#define DRM_WARN(fmt, ...) \
_DRM_PRINTK(, WARNING, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_info_once(). */
#define DRM_INFO_ONCE(fmt, ...) \
_DRM_PRINTK(_once, INFO, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_notice_once(). */
#define DRM_NOTE_ONCE(fmt, ...) \
_DRM_PRINTK(_once, NOTICE, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_warn_once(). */
#define DRM_WARN_ONCE(fmt, ...) \
_DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_err(). */
#define DRM_ERROR(fmt, ...) \
__drm_err(fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of pr_err_ratelimited(). */
#define DRM_ERROR_RATELIMITED(fmt, ...) \
DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_core(NULL, ...). */
#define DRM_DEBUG(fmt, ...) \
__drm_dbg(DRM_UT_CORE, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg(NULL, ...). */
#define DRM_DEBUG_DRIVER(fmt, ...) \
__drm_dbg(DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_kms(NULL, ...). */
#define DRM_DEBUG_KMS(fmt, ...) \
__drm_dbg(DRM_UT_KMS, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_prime(NULL, ...). */
#define DRM_DEBUG_PRIME(fmt, ...) \
__drm_dbg(DRM_UT_PRIME, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_atomic(NULL, ...). */
#define DRM_DEBUG_ATOMIC(fmt, ...) \
__drm_dbg(DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_vbl(NULL, ...). */
#define DRM_DEBUG_VBL(fmt, ...) \
__drm_dbg(DRM_UT_VBL, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_lease(NULL, ...). */
#define DRM_DEBUG_LEASE(fmt, ...) \
__drm_dbg(DRM_UT_LEASE, fmt, ##__VA_ARGS__)
+/* NOTE: this is deprecated in favor of drm_dbg_dp(NULL, ...). */
#define DRM_DEBUG_DP(fmt, ...) \
__drm_dbg(DRM_UT_DP, fmt, ## __VA_ARGS__)
#define __DRM_DEFINE_DBG_RATELIMITED(category, drm, fmt, ...) \
({ \
static DEFINE_RATELIMIT_STATE(rs_, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);\
- const struct drm_device *drm_ = (drm); \
\
if (drm_debug_enabled(DRM_UT_ ## category) && __ratelimit(&rs_)) \
- drm_dev_printk(drm_ ? drm_->dev : NULL, KERN_DEBUG, fmt, ## __VA_ARGS__); \
+ drm_dev_printk(__drm_to_dev(drm), KERN_DEBUG, fmt, ## __VA_ARGS__); \
})
+#define drm_dbg_ratelimited(drm, fmt, ...) \
+ __DRM_DEFINE_DBG_RATELIMITED(DRIVER, drm, fmt, ## __VA_ARGS__)
+
#define drm_dbg_kms_ratelimited(drm, fmt, ...) \
__DRM_DEFINE_DBG_RATELIMITED(KMS, drm, fmt, ## __VA_ARGS__)
-#define DRM_DEBUG_KMS_RATELIMITED(fmt, ...) drm_dbg_kms_ratelimited(NULL, fmt, ## __VA_ARGS__)
-
/*
* struct drm_device based WARNs
*
@@ -550,14 +776,14 @@ void __drm_err(const char *format, ...);
/* Helper for struct drm_device based WARNs */
#define drm_WARN(drm, condition, format, arg...) \
- WARN(condition, "%s %s: " format, \
- dev_driver_string((drm)->dev), \
- dev_name((drm)->dev), ## arg)
+ WARN(condition, "%s %s: [drm] " format, \
+ dev_driver_string(__drm_to_dev(drm)), \
+ dev_name(__drm_to_dev(drm)), ## arg)
#define drm_WARN_ONCE(drm, condition, format, arg...) \
- WARN_ONCE(condition, "%s %s: " format, \
- dev_driver_string((drm)->dev), \
- dev_name((drm)->dev), ## arg)
+ WARN_ONCE(condition, "%s %s: [drm] " format, \
+ dev_driver_string(__drm_to_dev(drm)), \
+ dev_name(__drm_to_dev(drm)), ## arg)
#define drm_WARN_ON(drm, x) \
drm_WARN((drm), (x), "%s", \
diff --git a/include/drm/drm_privacy_screen_consumer.h b/include/drm/drm_privacy_screen_consumer.h
new file mode 100644
index 000000000000..7f66a90d15b7
--- /dev/null
+++ b/include/drm/drm_privacy_screen_consumer.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2020 Red Hat, Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+
+#ifndef __DRM_PRIVACY_SCREEN_CONSUMER_H__
+#define __DRM_PRIVACY_SCREEN_CONSUMER_H__
+
+#include <linux/device.h>
+#include <drm/drm_connector.h>
+
+struct drm_privacy_screen;
+
+#if IS_ENABLED(CONFIG_DRM_PRIVACY_SCREEN)
+struct drm_privacy_screen *drm_privacy_screen_get(struct device *dev,
+ const char *con_id);
+void drm_privacy_screen_put(struct drm_privacy_screen *priv);
+
+int drm_privacy_screen_set_sw_state(struct drm_privacy_screen *priv,
+ enum drm_privacy_screen_status sw_state);
+void drm_privacy_screen_get_state(struct drm_privacy_screen *priv,
+ enum drm_privacy_screen_status *sw_state_ret,
+ enum drm_privacy_screen_status *hw_state_ret);
+
+int drm_privacy_screen_register_notifier(struct drm_privacy_screen *priv,
+ struct notifier_block *nb);
+int drm_privacy_screen_unregister_notifier(struct drm_privacy_screen *priv,
+ struct notifier_block *nb);
+#else
+static inline struct drm_privacy_screen *drm_privacy_screen_get(struct device *dev,
+ const char *con_id)
+{
+ return ERR_PTR(-ENODEV);
+}
+static inline void drm_privacy_screen_put(struct drm_privacy_screen *priv)
+{
+}
+static inline int drm_privacy_screen_set_sw_state(struct drm_privacy_screen *priv,
+ enum drm_privacy_screen_status sw_state)
+{
+ return -ENODEV;
+}
+static inline void drm_privacy_screen_get_state(struct drm_privacy_screen *priv,
+ enum drm_privacy_screen_status *sw_state_ret,
+ enum drm_privacy_screen_status *hw_state_ret)
+{
+ *sw_state_ret = PRIVACY_SCREEN_DISABLED;
+ *hw_state_ret = PRIVACY_SCREEN_DISABLED;
+}
+static inline int drm_privacy_screen_register_notifier(struct drm_privacy_screen *priv,
+ struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+static inline int drm_privacy_screen_unregister_notifier(struct drm_privacy_screen *priv,
+ struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif
diff --git a/include/drm/drm_privacy_screen_driver.h b/include/drm/drm_privacy_screen_driver.h
new file mode 100644
index 000000000000..4ef246d5706f
--- /dev/null
+++ b/include/drm/drm_privacy_screen_driver.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2020 Red Hat, Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+
+#ifndef __DRM_PRIVACY_SCREEN_DRIVER_H__
+#define __DRM_PRIVACY_SCREEN_DRIVER_H__
+
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <drm/drm_connector.h>
+
+struct drm_privacy_screen;
+
+/**
+ * struct drm_privacy_screen_ops - drm_privacy_screen operations
+ *
+ * Defines the operations which the privacy-screen class code may call.
+ * These functions should be implemented by the privacy-screen driver.
+ */
+struct drm_privacy_screen_ops {
+ /**
+ * @set_sw_state: Called to request a change of the privacy-screen
+ * state. The privacy-screen class code contains a check to avoid this
+ * getting called when the hw_state reports the state is locked.
+ * It is the driver's responsibility to update sw_state and hw_state.
+ * This is always called with the drm_privacy_screen's lock held.
+ */
+ int (*set_sw_state)(struct drm_privacy_screen *priv,
+ enum drm_privacy_screen_status sw_state);
+ /**
+ * @get_hw_state: Called to request that the driver gets the current
+ * privacy-screen state from the hardware and then updates sw_state and
+ * hw_state accordingly. This will be called by the core just before
+ * the privacy-screen is registered in sysfs.
+ */
+ void (*get_hw_state)(struct drm_privacy_screen *priv);
+};
+
+/**
+ * struct drm_privacy_screen - central privacy-screen structure
+ *
+ * Central privacy-screen structure, this contains the struct device used
+ * to register the screen in sysfs, the screen's state, ops, etc.
+ */
+struct drm_privacy_screen {
+ /** @dev: device used to register the privacy-screen in sysfs. */
+ struct device dev;
+ /** @lock: mutex protection all fields in this struct. */
+ struct mutex lock;
+ /** @list: privacy-screen devices list list-entry. */
+ struct list_head list;
+ /** @notifier_head: privacy-screen notifier head. */
+ struct blocking_notifier_head notifier_head;
+ /**
+ * @ops: &struct drm_privacy_screen_ops for this privacy-screen.
+ * This is NULL if the driver has unregistered the privacy-screen.
+ */
+ const struct drm_privacy_screen_ops *ops;
+ /**
+ * @sw_state: The privacy-screen's software state, see
+ * :ref:`Standard Connector Properties<standard_connector_properties>`
+ * for more info.
+ */
+ enum drm_privacy_screen_status sw_state;
+ /**
+ * @hw_state: The privacy-screen's hardware state, see
+ * :ref:`Standard Connector Properties<standard_connector_properties>`
+ * for more info.
+ */
+ enum drm_privacy_screen_status hw_state;
+ /**
+ * @drvdata: Private data owned by the privacy screen provider
+ */
+ void *drvdata;
+};
+
+static inline
+void *drm_privacy_screen_get_drvdata(struct drm_privacy_screen *priv)
+{
+ return priv->drvdata;
+}
+
+struct drm_privacy_screen *drm_privacy_screen_register(
+ struct device *parent, const struct drm_privacy_screen_ops *ops,
+ void *data);
+void drm_privacy_screen_unregister(struct drm_privacy_screen *priv);
+
+void drm_privacy_screen_call_notifier_chain(struct drm_privacy_screen *priv);
+
+#endif
diff --git a/include/drm/drm_privacy_screen_machine.h b/include/drm/drm_privacy_screen_machine.h
new file mode 100644
index 000000000000..02e5371904d3
--- /dev/null
+++ b/include/drm/drm_privacy_screen_machine.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2020 Red Hat, Inc.
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+
+#ifndef __DRM_PRIVACY_SCREEN_MACHINE_H__
+#define __DRM_PRIVACY_SCREEN_MACHINE_H__
+
+#include <linux/list.h>
+
+/**
+ * struct drm_privacy_screen_lookup - static privacy-screen lookup list entry
+ *
+ * Used for the static lookup-list for mapping privacy-screen consumer
+ * dev-connector pairs to a privacy-screen provider.
+ */
+struct drm_privacy_screen_lookup {
+ /** @list: Lookup list list-entry. */
+ struct list_head list;
+ /** @dev_id: Consumer device name or NULL to match all devices. */
+ const char *dev_id;
+ /** @con_id: Consumer connector name or NULL to match all connectors. */
+ const char *con_id;
+ /** @provider: dev_name() of the privacy_screen provider. */
+ const char *provider;
+};
+
+void drm_privacy_screen_lookup_add(struct drm_privacy_screen_lookup *lookup);
+void drm_privacy_screen_lookup_remove(struct drm_privacy_screen_lookup *lookup);
+
+#if IS_ENABLED(CONFIG_DRM_PRIVACY_SCREEN) && IS_ENABLED(CONFIG_X86)
+void drm_privacy_screen_lookup_init(void);
+void drm_privacy_screen_lookup_exit(void);
+#else
+static inline void drm_privacy_screen_lookup_init(void)
+{
+}
+static inline void drm_privacy_screen_lookup_exit(void)
+{
+}
+#endif
+
+#endif
diff --git a/include/drm/drm_probe_helper.h b/include/drm/drm_probe_helper.h
index 8d3ed2834d34..840ae5f798c2 100644
--- a/include/drm/drm_probe_helper.h
+++ b/include/drm/drm_probe_helper.h
@@ -3,9 +3,10 @@
#ifndef __DRM_PROBE_HELPER_H__
#define __DRM_PROBE_HELPER_H__
-#include <linux/types.h>
+#include <drm/drm_modes.h>
struct drm_connector;
+struct drm_crtc;
struct drm_device;
struct drm_modeset_acquire_ctx;
@@ -15,13 +16,31 @@ int drm_helper_probe_single_connector_modes(struct drm_connector
int drm_helper_probe_detect(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx,
bool force);
+
+void drmm_kms_helper_poll_init(struct drm_device *dev);
void drm_kms_helper_poll_init(struct drm_device *dev);
void drm_kms_helper_poll_fini(struct drm_device *dev);
bool drm_helper_hpd_irq_event(struct drm_device *dev);
+bool drm_connector_helper_hpd_irq_event(struct drm_connector *connector);
void drm_kms_helper_hotplug_event(struct drm_device *dev);
+void drm_kms_helper_connector_hotplug_event(struct drm_connector *connector);
void drm_kms_helper_poll_disable(struct drm_device *dev);
void drm_kms_helper_poll_enable(struct drm_device *dev);
+void drm_kms_helper_poll_reschedule(struct drm_device *dev);
bool drm_kms_helper_is_poll_worker(void);
+enum drm_mode_status drm_crtc_helper_mode_valid_fixed(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *fixed_mode);
+
+int drm_connector_helper_get_modes_fixed(struct drm_connector *connector,
+ const struct drm_display_mode *fixed_mode);
+int drm_connector_helper_get_modes(struct drm_connector *connector);
+int drm_connector_helper_tv_get_modes(struct drm_connector *connector);
+
+int drm_connector_helper_detect_from_ddc(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force);
+
#endif
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
index bbf5c1fdd7b0..082f29156b3e 100644
--- a/include/drm/drm_property.h
+++ b/include/drm/drm_property.h
@@ -31,7 +31,6 @@
/**
* struct drm_property_enum - symbolic values for enumerations
- * @value: numeric property value for this enum entry
* @head: list of enum values, linked to &drm_property.enum_list
* @name: symbolic name for the enum
*
@@ -39,6 +38,14 @@
* decoding for each value. This is used for example for the rotation property.
*/
struct drm_property_enum {
+ /**
+ * @value: numeric property value for this enum entry
+ *
+ * If the property has the type &DRM_MODE_PROP_BITMASK, @value stores a
+ * bitshift, not a bitmask. In other words, the enum entry is enabled
+ * if the bit number @value is set in the property's value. This enum
+ * entry has the bitmask ``1 << value``.
+ */
uint64_t value;
struct list_head head;
char name[DRM_PROP_NAME_LEN];
@@ -272,6 +279,12 @@ struct drm_property_blob *drm_property_create_blob(struct drm_device *dev,
const void *data);
struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev,
uint32_t id);
+int drm_property_replace_blob_from_id(struct drm_device *dev,
+ struct drm_property_blob **blob,
+ uint64_t blob_id,
+ ssize_t expected_size,
+ ssize_t expected_elem_size,
+ bool *replaced);
int drm_property_replace_global_blob(struct drm_device *dev,
struct drm_property_blob **replace,
size_t length,
diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h
index 39f2deee709c..46f09cf68458 100644
--- a/include/drm/drm_rect.h
+++ b/include/drm/drm_rect.h
@@ -39,12 +39,31 @@
* @x2: horizontal ending coordinate (exclusive)
* @y1: vertical starting coordinate (inclusive)
* @y2: vertical ending coordinate (exclusive)
+ *
+ * Note that this must match the layout of struct drm_mode_rect or the damage
+ * helpers like drm_atomic_helper_damage_iter_init() break.
*/
struct drm_rect {
int x1, y1, x2, y2;
};
/**
+ * DRM_RECT_INIT - initialize a rectangle from x/y/w/h
+ * @x: x coordinate
+ * @y: y coordinate
+ * @w: width
+ * @h: height
+ *
+ * RETURNS:
+ * A new rectangle of the specified size.
+ */
+#define DRM_RECT_INIT(x, y, w, h) ((struct drm_rect){ \
+ .x1 = (x), \
+ .y1 = (y), \
+ .x2 = (x) + (w), \
+ .y2 = (y) + (h) })
+
+/**
* DRM_RECT_FMT - printf string for &struct drm_rect
*/
#define DRM_RECT_FMT "%dx%d%+d%+d"
@@ -110,7 +129,7 @@ static inline void drm_rect_adjust_size(struct drm_rect *r, int dw, int dh)
/**
* drm_rect_translate - translate the rectangle
- * @r: rectangle to be tranlated
+ * @r: rectangle to be translated
* @dx: horizontal translation
* @dy: vertical translation
*
@@ -127,7 +146,7 @@ static inline void drm_rect_translate(struct drm_rect *r, int dx, int dy)
/**
* drm_rect_translate_to - translate the rectangle to an absolute position
- * @r: rectangle to be tranlated
+ * @r: rectangle to be translated
* @x: horizontal position
* @y: vertical position
*
@@ -219,6 +238,21 @@ static inline void drm_rect_fp_to_int(struct drm_rect *dst,
drm_rect_height(src) >> 16);
}
+/**
+ * drm_rect_overlap - Check if two rectangles overlap
+ * @a: first rectangle
+ * @b: second rectangle
+ *
+ * RETURNS:
+ * %true if the rectangles overlap, %false otherwise.
+ */
+static inline bool drm_rect_overlap(const struct drm_rect *a,
+ const struct drm_rect *b)
+{
+ return (a->x2 > b->x1 && b->x2 > a->x1 &&
+ a->y2 > b->y1 && b->y2 > a->y1);
+}
+
bool drm_rect_intersect(struct drm_rect *r, const struct drm_rect *clip);
bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst,
const struct drm_rect *clip);
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
index ef9944e9c5fc..b2486d073763 100644
--- a/include/drm/drm_simple_kms_helper.h
+++ b/include/drm/drm_simple_kms_helper.h
@@ -116,8 +116,11 @@ struct drm_simple_display_pipe_funcs {
* the documentation for the &drm_plane_helper_funcs.prepare_fb hook for
* more details.
*
- * Drivers which always have their buffers pinned should use
- * drm_gem_simple_display_pipe_prepare_fb() for this hook.
+ * For GEM drivers who neither have a @prepare_fb nor @cleanup_fb hook
+ * set, drm_gem_plane_helper_prepare_fb() is called automatically
+ * to implement this. Other drivers which need additional plane
+ * processing can call drm_gem_plane_helper_prepare_fb() from
+ * their @prepare_fb hook.
*/
int (*prepare_fb)(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state);
@@ -133,6 +136,26 @@ struct drm_simple_display_pipe_funcs {
struct drm_plane_state *plane_state);
/**
+ * @begin_fb_access:
+ *
+ * Optional, called by &drm_plane_helper_funcs.begin_fb_access. Please read
+ * the documentation for the &drm_plane_helper_funcs.begin_fb_access hook for
+ * more details.
+ */
+ int (*begin_fb_access)(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *new_plane_state);
+
+ /**
+ * @end_fb_access:
+ *
+ * Optional, called by &drm_plane_helper_funcs.end_fb_access. Please read
+ * the documentation for the &drm_plane_helper_funcs.end_fb_access hook for
+ * more details.
+ */
+ void (*end_fb_access)(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+
+ /**
* @enable_vblank:
*
* Optional, called by &drm_crtc_funcs.enable_vblank. Please read
@@ -151,6 +174,33 @@ struct drm_simple_display_pipe_funcs {
void (*disable_vblank)(struct drm_simple_display_pipe *pipe);
/**
+ * @reset_crtc:
+ *
+ * Optional, called by &drm_crtc_funcs.reset. Please read the
+ * documentation for the &drm_crtc_funcs.reset hook for more details.
+ */
+ void (*reset_crtc)(struct drm_simple_display_pipe *pipe);
+
+ /**
+ * @duplicate_crtc_state:
+ *
+ * Optional, called by &drm_crtc_funcs.atomic_duplicate_state. Please
+ * read the documentation for the &drm_crtc_funcs.atomic_duplicate_state
+ * hook for more details.
+ */
+ struct drm_crtc_state * (*duplicate_crtc_state)(struct drm_simple_display_pipe *pipe);
+
+ /**
+ * @destroy_crtc_state:
+ *
+ * Optional, called by &drm_crtc_funcs.atomic_destroy_state. Please
+ * read the documentation for the &drm_crtc_funcs.atomic_destroy_state
+ * hook for more details.
+ */
+ void (*destroy_crtc_state)(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state);
+
+ /**
* @reset_plane:
*
* Optional, called by &drm_plane_funcs.reset. Please read the
diff --git a/include/drm/drm_suballoc.h b/include/drm/drm_suballoc.h
new file mode 100644
index 000000000000..7ba72a81a808
--- /dev/null
+++ b/include/drm/drm_suballoc.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright 2011 Red Hat Inc.
+ * Copyright © 2022 Intel Corporation
+ */
+#ifndef _DRM_SUBALLOC_H_
+#define _DRM_SUBALLOC_H_
+
+#include <drm/drm_mm.h>
+
+#include <linux/dma-fence.h>
+#include <linux/types.h>
+
+#define DRM_SUBALLOC_MAX_QUEUES 32
+/**
+ * struct drm_suballoc_manager - fenced range allocations
+ * @wq: Wait queue for sleeping allocations on contention.
+ * @hole: Pointer to first hole node.
+ * @olist: List of allocated ranges.
+ * @flist: Array[fence context hash] of queues of fenced allocated ranges.
+ * @size: Size of the managed range.
+ * @align: Default alignment for the managed range.
+ */
+struct drm_suballoc_manager {
+ wait_queue_head_t wq;
+ struct list_head *hole;
+ struct list_head olist;
+ struct list_head flist[DRM_SUBALLOC_MAX_QUEUES];
+ size_t size;
+ size_t align;
+};
+
+/**
+ * struct drm_suballoc - Sub-allocated range
+ * @olist: List link for list of allocated ranges.
+ * @flist: List linkk for the manager fenced allocated ranges queues.
+ * @manager: The drm_suballoc_manager.
+ * @soffset: Start offset.
+ * @eoffset: End offset + 1 so that @eoffset - @soffset = size.
+ * @fence: The fence protecting the allocation.
+ */
+struct drm_suballoc {
+ struct list_head olist;
+ struct list_head flist;
+ struct drm_suballoc_manager *manager;
+ size_t soffset;
+ size_t eoffset;
+ struct dma_fence *fence;
+};
+
+void drm_suballoc_manager_init(struct drm_suballoc_manager *sa_manager,
+ size_t size, size_t align);
+
+void drm_suballoc_manager_fini(struct drm_suballoc_manager *sa_manager);
+
+struct drm_suballoc *
+drm_suballoc_new(struct drm_suballoc_manager *sa_manager, size_t size,
+ gfp_t gfp, bool intr, size_t align);
+
+void drm_suballoc_free(struct drm_suballoc *sa, struct dma_fence *fence);
+
+/**
+ * drm_suballoc_soffset - Range start.
+ * @sa: The struct drm_suballoc.
+ *
+ * Return: The start of the allocated range.
+ */
+static inline size_t drm_suballoc_soffset(struct drm_suballoc *sa)
+{
+ return sa->soffset;
+}
+
+/**
+ * drm_suballoc_eoffset - Range end.
+ * @sa: The struct drm_suballoc.
+ *
+ * Return: The end of the allocated range + 1.
+ */
+static inline size_t drm_suballoc_eoffset(struct drm_suballoc *sa)
+{
+ return sa->eoffset;
+}
+
+/**
+ * drm_suballoc_size - Range size.
+ * @sa: The struct drm_suballoc.
+ *
+ * Return: The size of the allocated range.
+ */
+static inline size_t drm_suballoc_size(struct drm_suballoc *sa)
+{
+ return sa->eoffset - sa->soffset;
+}
+
+#ifdef CONFIG_DEBUG_FS
+void drm_suballoc_dump_debug_info(struct drm_suballoc_manager *sa_manager,
+ struct drm_printer *p,
+ unsigned long long suballoc_base);
+#else
+static inline void
+drm_suballoc_dump_debug_info(struct drm_suballoc_manager *sa_manager,
+ struct drm_printer *p,
+ unsigned long long suballoc_base)
+{ }
+
+#endif
+
+#endif /* _DRM_SUBALLOC_H_ */
diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h
index 6cf7243a1dc5..b40052132e52 100644
--- a/include/drm/drm_syncobj.h
+++ b/include/drm/drm_syncobj.h
@@ -54,7 +54,11 @@ struct drm_syncobj {
*/
struct list_head cb_list;
/**
- * @lock: Protects &cb_list and write-locks &fence.
+ * @ev_fd_list: List of registered eventfd.
+ */
+ struct list_head ev_fd_list;
+ /**
+ * @lock: Protects &cb_list and &ev_fd_list, and write-locks &fence.
*/
spinlock_t lock;
/**
diff --git a/include/drm/drm_sysfs.h b/include/drm/drm_sysfs.h
index d454ef617b2c..96a5d858404b 100644
--- a/include/drm/drm_sysfs.h
+++ b/include/drm/drm_sysfs.h
@@ -11,6 +11,7 @@ int drm_class_device_register(struct device *dev);
void drm_class_device_unregister(struct device *dev);
void drm_sysfs_hotplug_event(struct drm_device *dev);
-void drm_sysfs_connector_status_event(struct drm_connector *connector,
- struct drm_property *property);
+void drm_sysfs_connector_hotplug_event(struct drm_connector *connector);
+void drm_sysfs_connector_property_event(struct drm_connector *connector,
+ struct drm_property *property);
#endif
diff --git a/include/drm/drm_util.h b/include/drm/drm_util.h
index 79952d8c4bba..440199618620 100644
--- a/include/drm/drm_util.h
+++ b/include/drm/drm_util.h
@@ -36,6 +36,7 @@
#include <linux/kgdb.h>
#include <linux/preempt.h>
#include <linux/smp.h>
+#include <linux/util_macros.h>
/*
* Use EXPORT_SYMBOL_FOR_TESTS_ONLY() for functions that shall
@@ -48,21 +49,6 @@
#endif
/**
- * for_each_if - helper for handling conditionals in various for_each macros
- * @condition: The condition to check
- *
- * Typical use::
- *
- * #define for_each_foo_bar(x, y) \'
- * list_for_each_entry(x, y->list, head) \'
- * for_each_if(x->something == SOMETHING)
- *
- * The for_each_if() macro makes the use of for_each_foo_bar() less error
- * prone.
- */
-#define for_each_if(condition) if (!(condition)) {} else
-
-/**
* drm_can_sleep - returns true if currently okay to sleep
*
* This function shall not be used in new code.
diff --git a/include/drm/drm_utils.h b/include/drm/drm_utils.h
index 70775748d243..6a46f755daba 100644
--- a/include/drm/drm_utils.h
+++ b/include/drm/drm_utils.h
@@ -12,8 +12,18 @@
#include <linux/types.h>
+struct drm_edid;
+
int drm_get_panel_orientation_quirk(int width, int height);
+struct drm_panel_backlight_quirk {
+ u16 min_brightness;
+ u32 brightness_mask;
+};
+
+const struct drm_panel_backlight_quirk *
+drm_get_panel_backlight_quirk(const struct drm_edid *edid);
+
signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec);
#endif
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index 733a3e2d1d10..ffa564d79638 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -25,6 +25,7 @@
#define _DRM_VBLANK_H_
#include <linux/seqlock.h>
+#include <linux/hrtimer.h>
#include <linux/idr.h>
#include <linux/poll.h>
#include <linux/kthread.h>
@@ -79,6 +80,53 @@ struct drm_pending_vblank_event {
};
/**
+ * struct drm_vblank_crtc_config - vblank configuration for a CRTC
+ */
+struct drm_vblank_crtc_config {
+ /**
+ * @offdelay_ms: Vblank off delay in ms, used to determine how long
+ * &drm_vblank_crtc.disable_timer waits before disabling.
+ *
+ * Defaults to the value of drm_vblank_offdelay in drm_crtc_vblank_on().
+ */
+ int offdelay_ms;
+
+ /**
+ * @disable_immediate: See &drm_device.vblank_disable_immediate
+ * for the exact semantics of immediate vblank disabling.
+ *
+ * Additionally, this tracks the disable immediate value per crtc, just
+ * in case it needs to differ from the default value for a given device.
+ *
+ * Defaults to the value of &drm_device.vblank_disable_immediate in
+ * drm_crtc_vblank_on().
+ */
+ bool disable_immediate;
+};
+
+/**
+ * struct drm_vblank_crtc_timer - vblank timer for a CRTC
+ */
+struct drm_vblank_crtc_timer {
+ /**
+ * @timer: The vblank's high-resolution timer
+ */
+ struct hrtimer timer;
+ /**
+ * @interval_lock: Protects @interval
+ */
+ spinlock_t interval_lock;
+ /**
+ * @interval: Duration between two vblanks
+ */
+ ktime_t interval;
+ /**
+ * @crtc: The timer's CRTC
+ */
+ struct drm_crtc *crtc;
+};
+
+/**
* struct drm_vblank_crtc - vblank tracking for a CRTC
*
* This structure tracks the vblank state for one CRTC.
@@ -99,8 +147,8 @@ struct drm_vblank_crtc {
wait_queue_head_t queue;
/**
* @disable_timer: Disable timer for the delayed vblank disabling
- * hysteresis logic. Vblank disabling is controlled through the
- * drm_vblank_offdelay module option and the setting of the
+ * hysteresis logic. Vblank disabling is controlled through
+ * &drm_vblank_crtc_config.offdelay_ms and the setting of the
* &drm_device.max_vblank_count value.
*/
struct timer_list disable_timer;
@@ -199,6 +247,12 @@ struct drm_vblank_crtc {
struct drm_display_mode hwmode;
/**
+ * @config: Stores vblank configuration values for a given CRTC.
+ * Also, see drm_crtc_vblank_on_config().
+ */
+ struct drm_vblank_crtc_config config;
+
+ /**
* @enabled: Tracks the enabling state of the corresponding &drm_crtc to
* avoid double-disabling and hence corrupting saved state. Needed by
* drivers not using atomic KMS, since those might go through their CRTC
@@ -223,13 +277,20 @@ struct drm_vblank_crtc {
* cancelled.
*/
wait_queue_head_t work_wait_queue;
+
+ /**
+ * @vblank_timer: Holds the state of the vblank timer
+ */
+ struct drm_vblank_crtc_timer vblank_timer;
};
+struct drm_vblank_crtc *drm_crtc_vblank_crtc(struct drm_crtc *crtc);
int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
bool drm_dev_has_vblank(const struct drm_device *dev);
u64 drm_crtc_vblank_count(struct drm_crtc *crtc);
u64 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
ktime_t *vblanktime);
+int drm_crtc_next_vblank_start(struct drm_crtc *crtc, ktime_t *vblanktime);
void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e);
void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
@@ -245,6 +306,8 @@ void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
void drm_crtc_vblank_off(struct drm_crtc *crtc);
void drm_crtc_vblank_reset(struct drm_crtc *crtc);
+void drm_crtc_vblank_on_config(struct drm_crtc *crtc,
+ const struct drm_vblank_crtc_config *config);
void drm_crtc_vblank_on(struct drm_crtc *crtc);
u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc);
void drm_crtc_vblank_restore(struct drm_crtc *crtc);
@@ -255,6 +318,10 @@ wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc);
void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc,
u32 max_vblank_count);
+int drm_crtc_vblank_start_timer(struct drm_crtc *crtc);
+void drm_crtc_vblank_cancel_timer(struct drm_crtc *crtc);
+void drm_crtc_vblank_get_vblank_timeout(struct drm_crtc *crtc, ktime_t *vblank_time);
+
/*
* Helpers for struct drm_crtc_funcs
*/
diff --git a/include/drm/drm_vblank_helper.h b/include/drm/drm_vblank_helper.h
new file mode 100644
index 000000000000..fcd8a9b35846
--- /dev/null
+++ b/include/drm/drm_vblank_helper.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _DRM_VBLANK_HELPER_H_
+#define _DRM_VBLANK_HELPER_H_
+
+#include <linux/hrtimer_types.h>
+#include <linux/types.h>
+
+struct drm_atomic_state;
+struct drm_crtc;
+
+/*
+ * VBLANK helpers
+ */
+
+void drm_crtc_vblank_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state);
+void drm_crtc_vblank_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state);
+void drm_crtc_vblank_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *crtc_state);
+
+/**
+ * DRM_CRTC_HELPER_VBLANK_FUNCS - Default implementation for VBLANK helpers
+ *
+ * This macro initializes struct &drm_crtc_helper_funcs to default helpers
+ * for VBLANK handling.
+ */
+#define DRM_CRTC_HELPER_VBLANK_FUNCS \
+ .atomic_flush = drm_crtc_vblank_atomic_flush, \
+ .atomic_enable = drm_crtc_vblank_atomic_enable, \
+ .atomic_disable = drm_crtc_vblank_atomic_disable
+
+/*
+ * VBLANK timer
+ */
+
+int drm_crtc_vblank_helper_enable_vblank_timer(struct drm_crtc *crtc);
+void drm_crtc_vblank_helper_disable_vblank_timer(struct drm_crtc *crtc);
+bool drm_crtc_vblank_helper_get_vblank_timestamp_from_timer(struct drm_crtc *crtc,
+ int *max_error,
+ ktime_t *vblank_time,
+ bool in_vblank_irq);
+
+/**
+ * DRM_CRTC_VBLANK_TIMER_FUNCS - Default implementation for VBLANK timers
+ *
+ * This macro initializes struct &drm_crtc_funcs to default helpers for
+ * VBLANK timers.
+ */
+#define DRM_CRTC_VBLANK_TIMER_FUNCS \
+ .enable_vblank = drm_crtc_vblank_helper_enable_vblank_timer, \
+ .disable_vblank = drm_crtc_vblank_helper_disable_vblank_timer, \
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp_from_timer
+
+#endif
diff --git a/include/drm/drm_vblank_work.h b/include/drm/drm_vblank_work.h
index eb41d0810c4f..e04d436b7297 100644
--- a/include/drm/drm_vblank_work.h
+++ b/include/drm/drm_vblank_work.h
@@ -17,6 +17,7 @@ struct drm_crtc;
* drm_vblank_work_init()
* drm_vblank_work_cancel_sync()
* drm_vblank_work_flush()
+ * drm_vblank_work_flush_all()
*/
struct drm_vblank_work {
/**
@@ -67,5 +68,6 @@ void drm_vblank_work_init(struct drm_vblank_work *work, struct drm_crtc *crtc,
void (*func)(struct kthread_work *work));
bool drm_vblank_work_cancel_sync(struct drm_vblank_work *work);
void drm_vblank_work_flush(struct drm_vblank_work *work);
+void drm_vblank_work_flush_all(struct drm_crtc *crtc);
#endif /* !_DRM_VBLANK_WORK_H_ */
diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h
index 76ac5e97a559..6c2a2f21dbf0 100644
--- a/include/drm/drm_vma_manager.h
+++ b/include/drm/drm_vma_manager.h
@@ -53,7 +53,7 @@ struct drm_vma_offset_node {
rwlock_t vm_lock;
struct drm_mm_node vm_node;
struct rb_root vm_files;
- bool readonly:1;
+ void *driver_private;
};
struct drm_vma_offset_manager {
@@ -74,6 +74,7 @@ void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
struct drm_vma_offset_node *node);
int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag);
+int drm_vma_node_allow_once(struct drm_vma_offset_node *node, struct drm_file *tag);
void drm_vma_node_revoke(struct drm_vma_offset_node *node,
struct drm_file *tag);
bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
diff --git a/include/drm/drm_writeback.h b/include/drm/drm_writeback.h
index 9697d2714d2a..c380a7b8f55a 100644
--- a/include/drm/drm_writeback.h
+++ b/include/drm/drm_writeback.h
@@ -30,6 +30,8 @@ struct drm_writeback_connector {
* @drm_writeback_connector control the behaviour of the @encoder
* by passing the @enc_funcs parameter to drm_writeback_connector_init()
* function.
+ * For users of drm_writeback_connector_init_with_encoder(), this field
+ * is not valid as the encoder is managed within their drivers.
*/
struct drm_encoder encoder;
@@ -150,7 +152,20 @@ int drm_writeback_connector_init(struct drm_device *dev,
struct drm_writeback_connector *wb_connector,
const struct drm_connector_funcs *con_funcs,
const struct drm_encoder_helper_funcs *enc_helper_funcs,
- const u32 *formats, int n_formats);
+ const u32 *formats, int n_formats,
+ u32 possible_crtcs);
+
+int drm_writeback_connector_init_with_encoder(struct drm_device *dev,
+ struct drm_writeback_connector *wb_connector,
+ struct drm_encoder *enc,
+ const struct drm_connector_funcs *con_funcs, const u32 *formats,
+ int n_formats);
+
+int drmm_writeback_connector_init(struct drm_device *dev,
+ struct drm_writeback_connector *wb_connector,
+ const struct drm_connector_funcs *con_funcs,
+ struct drm_encoder *enc,
+ const u32 *formats, int n_formats);
int drm_writeback_set_fb(struct drm_connector_state *conn_state,
struct drm_framebuffer *fb);
diff --git a/include/drm/gma_drm.h b/include/drm/gma_drm.h
deleted file mode 100644
index 228f43e8df89..000000000000
--- a/include/drm/gma_drm.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/**************************************************************************
- * Copyright (c) 2007-2011, Intel Corporation.
- * All Rights Reserved.
- * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA.
- * All Rights Reserved.
- *
- **************************************************************************/
-
-#ifndef _GMA_DRM_H_
-#define _GMA_DRM_H_
-
-#endif
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 10225a0a35d0..fb88301b3c45 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -27,99 +27,235 @@
#include <drm/spsc_queue.h>
#include <linux/dma-fence.h>
#include <linux/completion.h>
+#include <linux/xarray.h>
+#include <linux/workqueue.h>
#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
+/**
+ * DRM_SCHED_FENCE_DONT_PIPELINE - Prevent dependency pipelining
+ *
+ * Setting this flag on a scheduler fence prevents pipelining of jobs depending
+ * on this fence. In other words we always insert a full CPU round trip before
+ * dependent jobs are pushed to the hw queue.
+ */
+#define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS
+
+/**
+ * DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT - A fence deadline hint has been set
+ *
+ * Because we could have a deadline hint can be set before the backing hw
+ * fence is created, we need to keep track of whether a deadline has already
+ * been set.
+ */
+#define DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT (DMA_FENCE_FLAG_USER_BITS + 1)
+
+enum dma_resv_usage;
+struct dma_resv;
+struct drm_gem_object;
+
struct drm_gpu_scheduler;
struct drm_sched_rq;
+struct drm_file;
+
/* These are often used as an (initial) index
* to an array, and as such should start at 0.
*/
enum drm_sched_priority {
- DRM_SCHED_PRIORITY_MIN,
- DRM_SCHED_PRIORITY_NORMAL,
- DRM_SCHED_PRIORITY_HIGH,
DRM_SCHED_PRIORITY_KERNEL,
+ DRM_SCHED_PRIORITY_HIGH,
+ DRM_SCHED_PRIORITY_NORMAL,
+ DRM_SCHED_PRIORITY_LOW,
- DRM_SCHED_PRIORITY_COUNT,
- DRM_SCHED_PRIORITY_UNSET = -2
+ DRM_SCHED_PRIORITY_COUNT
};
/**
* struct drm_sched_entity - A wrapper around a job queue (typically
* attached to the DRM file_priv).
*
- * @list: used to append this struct to the list of entities in the
- * runqueue.
- * @rq: runqueue on which this entity is currently scheduled.
- * @sched_list: A list of schedulers (drm_gpu_schedulers).
- * Jobs from this entity can be scheduled on any scheduler
- * on this list.
- * @num_sched_list: number of drm_gpu_schedulers in the sched_list.
- * @priority: priority of the entity
- * @rq_lock: lock to modify the runqueue to which this entity belongs.
- * @job_queue: the list of jobs of this entity.
- * @fence_seq: a linearly increasing seqno incremented with each
- * new &drm_sched_fence which is part of the entity.
- * @fence_context: a unique context for all the fences which belong
- * to this entity.
- * The &drm_sched_fence.scheduled uses the
- * fence_context but &drm_sched_fence.finished uses
- * fence_context + 1.
- * @dependency: the dependency fence of the job which is on the top
- * of the job queue.
- * @cb: callback for the dependency fence above.
- * @guilty: points to ctx's guilty.
- * @fini_status: contains the exit status in case the process was signalled.
- * @last_scheduled: points to the finished fence of the last scheduled job.
- * @last_user: last group leader pushing a job into the entity.
- * @stopped: Marks the enity as removed from rq and destined for termination.
- * @entity_idle: Signals when enityt is not in use
- *
* Entities will emit jobs in order to their corresponding hardware
* ring, and the scheduler will alternate between entities based on
* scheduling policy.
*/
struct drm_sched_entity {
+ /**
+ * @list:
+ *
+ * Used to append this struct to the list of entities in the runqueue
+ * @rq under &drm_sched_rq.entities.
+ *
+ * Protected by &drm_sched_rq.lock of @rq.
+ */
struct list_head list;
+
+ /**
+ * @lock:
+ *
+ * Lock protecting the run-queue (@rq) to which this entity belongs,
+ * @priority and the list of schedulers (@sched_list, @num_sched_list).
+ */
+ spinlock_t lock;
+
+ /**
+ * @rq:
+ *
+ * Runqueue on which this entity is currently scheduled.
+ *
+ * FIXME: Locking is very unclear for this. Writers are protected by
+ * @lock, but readers are generally lockless and seem to just race with
+ * not even a READ_ONCE.
+ */
struct drm_sched_rq *rq;
+
+ /**
+ * @sched_list:
+ *
+ * A list of schedulers (struct drm_gpu_scheduler). Jobs from this entity can
+ * be scheduled on any scheduler on this list.
+ *
+ * This can be modified by calling drm_sched_entity_modify_sched().
+ * Locking is entirely up to the driver, see the above function for more
+ * details.
+ *
+ * This will be set to NULL if &num_sched_list equals 1 and @rq has been
+ * set already.
+ *
+ * FIXME: This means priority changes through
+ * drm_sched_entity_set_priority() will be lost henceforth in this case.
+ */
struct drm_gpu_scheduler **sched_list;
+
+ /**
+ * @num_sched_list:
+ *
+ * Number of drm_gpu_schedulers in the @sched_list.
+ */
unsigned int num_sched_list;
+
+ /**
+ * @priority:
+ *
+ * Priority of the entity. This can be modified by calling
+ * drm_sched_entity_set_priority(). Protected by @lock.
+ */
enum drm_sched_priority priority;
- spinlock_t rq_lock;
+ /**
+ * @job_queue: the list of jobs of this entity.
+ */
struct spsc_queue job_queue;
+ /**
+ * @fence_seq:
+ *
+ * A linearly increasing seqno incremented with each new
+ * &drm_sched_fence which is part of the entity.
+ *
+ * FIXME: Callers of drm_sched_job_arm() need to ensure correct locking,
+ * this doesn't need to be atomic.
+ */
atomic_t fence_seq;
+
+ /**
+ * @fence_context:
+ *
+ * A unique context for all the fences which belong to this entity. The
+ * &drm_sched_fence.scheduled uses the fence_context but
+ * &drm_sched_fence.finished uses fence_context + 1.
+ */
uint64_t fence_context;
+ /**
+ * @dependency:
+ *
+ * The dependency fence of the job which is on the top of the job queue.
+ */
struct dma_fence *dependency;
+
+ /**
+ * @cb:
+ *
+ * Callback for the dependency fence above.
+ */
struct dma_fence_cb cb;
+
+ /**
+ * @guilty:
+ *
+ * Points to entities' guilty.
+ */
atomic_t *guilty;
- struct dma_fence *last_scheduled;
+
+ /**
+ * @last_scheduled:
+ *
+ * Points to the finished fence of the last scheduled job. Only written
+ * by drm_sched_entity_pop_job(). Can be accessed locklessly from
+ * drm_sched_job_arm() if the queue is empty.
+ */
+ struct dma_fence __rcu *last_scheduled;
+
+ /**
+ * @last_user: last group leader pushing a job into the entity.
+ */
struct task_struct *last_user;
+
+ /**
+ * @stopped:
+ *
+ * Marks the enity as removed from rq and destined for
+ * termination. This is set by calling drm_sched_entity_flush() and by
+ * drm_sched_fini().
+ */
bool stopped;
+
+ /**
+ * @entity_idle:
+ *
+ * Signals when entity is not in use, used to sequence entity cleanup in
+ * drm_sched_entity_fini().
+ */
struct completion entity_idle;
+
+ /**
+ * @oldest_job_waiting:
+ *
+ * Marks earliest job waiting in SW queue
+ */
+ ktime_t oldest_job_waiting;
+
+ /**
+ * @rb_tree_node:
+ *
+ * The node used to insert this entity into time based priority queue
+ */
+ struct rb_node rb_tree_node;
+
};
/**
* struct drm_sched_rq - queue of entities to be scheduled.
*
- * @lock: to modify the entities list.
* @sched: the scheduler to which this rq belongs to.
- * @entities: list of the entities to be scheduled.
+ * @lock: protects @entities, @rb_tree_root and @current_entity.
* @current_entity: the entity which is to be scheduled.
+ * @entities: list of the entities to be scheduled.
+ * @rb_tree_root: root of time based priority queue of entities for FIFO scheduling
*
* Run queue is a set of entities scheduling command submissions for
* one specific ring. It implements the scheduling policy that selects
* the next entity to emit commands from.
*/
struct drm_sched_rq {
- spinlock_t lock;
struct drm_gpu_scheduler *sched;
- struct list_head entities;
+
+ spinlock_t lock;
+ /* Following members are protected by the @lock: */
struct drm_sched_entity *current_entity;
+ struct list_head entities;
+ struct rb_root_cached rb_tree_root;
};
/**
@@ -144,6 +280,12 @@ struct drm_sched_fence {
*/
struct dma_fence finished;
+ /**
+ * @deadline: deadline set on &drm_sched_fence.finished which
+ * potentially needs to be propagated to &drm_sched_fence.parent
+ */
+ ktime_t deadline;
+
/**
* @parent: the fence returned by &drm_sched_backend_ops.run_job
* when scheduling the job on hardware. We signal the
@@ -163,6 +305,13 @@ struct drm_sched_fence {
* @owner: job owner for debugging
*/
void *owner;
+
+ /**
+ * @drm_client_id:
+ *
+ * The client_id of the drm_file which owns the job.
+ */
+ uint64_t drm_client_id;
};
struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
@@ -175,7 +324,8 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
* @sched: the scheduler instance on which this job is scheduled.
* @s_fence: contains the fences for the scheduling of job.
* @finish_cb: the callback for the finished fence.
- * @id: a unique id assigned to each job scheduled on the scheduler.
+ * @credits: the number of credits this job contributes to the scheduler
+ * @work: Helper to reschedule job kill to different context.
* @karma: increment on every hang caused by this job. If this exceeds the hang
* limit of the scheduler then the job is marked guilty and will not
* be scheduled further.
@@ -188,50 +338,122 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
* to schedule the job.
*/
struct drm_sched_job {
- struct spsc_node queue_node;
- struct list_head list;
+ /**
+ * @submit_ts:
+ *
+ * When the job was pushed into the entity queue.
+ */
+ ktime_t submit_ts;
+
+ /**
+ * @sched:
+ *
+ * The scheduler this job is or will be scheduled on. Gets set by
+ * drm_sched_job_arm(). Valid until drm_sched_backend_ops.free_job()
+ * has finished.
+ */
struct drm_gpu_scheduler *sched;
+
struct drm_sched_fence *s_fence;
- struct dma_fence_cb finish_cb;
- uint64_t id;
- atomic_t karma;
- enum drm_sched_priority s_priority;
struct drm_sched_entity *entity;
+
+ enum drm_sched_priority s_priority;
+ u32 credits;
+ /** @last_dependency: tracks @dependencies as they signal */
+ unsigned int last_dependency;
+ atomic_t karma;
+
+ struct spsc_node queue_node;
+ struct list_head list;
+
+ /*
+ * work is used only after finish_cb has been used and will not be
+ * accessed anymore.
+ */
+ union {
+ struct dma_fence_cb finish_cb;
+ struct work_struct work;
+ };
+
struct dma_fence_cb cb;
-};
-static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
- int threshold)
-{
- return s_job && atomic_inc_return(&s_job->karma) > threshold;
-}
+ /**
+ * @dependencies:
+ *
+ * Contains the dependencies as struct dma_fence for this job, see
+ * drm_sched_job_add_dependency() and
+ * drm_sched_job_add_implicit_dependencies().
+ */
+ struct xarray dependencies;
+};
+/**
+ * enum drm_gpu_sched_stat - the scheduler's status
+ *
+ * @DRM_GPU_SCHED_STAT_NONE: Reserved. Do not use.
+ * @DRM_GPU_SCHED_STAT_RESET: The GPU hung and successfully reset.
+ * @DRM_GPU_SCHED_STAT_ENODEV: Error: Device is not available anymore.
+ * @DRM_GPU_SCHED_STAT_NO_HANG: Contrary to scheduler's assumption, the GPU
+ * did not hang and is still running.
+ */
enum drm_gpu_sched_stat {
- DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */
- DRM_GPU_SCHED_STAT_NOMINAL,
+ DRM_GPU_SCHED_STAT_NONE,
+ DRM_GPU_SCHED_STAT_RESET,
DRM_GPU_SCHED_STAT_ENODEV,
+ DRM_GPU_SCHED_STAT_NO_HANG,
};
/**
- * struct drm_sched_backend_ops
+ * struct drm_sched_backend_ops - Define the backend operations
+ * called by the scheduler
*
- * Define the backend operations called by the scheduler,
- * these functions should be implemented in driver side.
+ * These functions should be implemented in the driver side.
*/
struct drm_sched_backend_ops {
/**
- * @dependency: Called when the scheduler is considering scheduling
- * this job next, to get another struct dma_fence for this job to
- * block on. Once it returns NULL, run_job() may be called.
+ * @prepare_job:
+ *
+ * Called when the scheduler is considering scheduling this job next, to
+ * get another struct dma_fence for this job to block on. Once it
+ * returns NULL, run_job() may be called.
+ *
+ * Can be NULL if no additional preparation to the dependencies are
+ * necessary. Skipped when jobs are killed instead of run.
*/
- struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
- struct drm_sched_entity *s_entity);
+ struct dma_fence *(*prepare_job)(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *s_entity);
/**
- * @run_job: Called to execute the job once all of the dependencies
- * have been resolved. This may be called multiple times, if
- * timedout_job() has happened and drm_sched_job_recovery()
- * decides to try it again.
+ * @run_job: Called to execute the job once all of the dependencies
+ * have been resolved.
+ *
+ * @sched_job: the job to run
+ *
+ * The deprecated drm_sched_resubmit_jobs() (called by &struct
+ * drm_sched_backend_ops.timedout_job) can invoke this again with the
+ * same parameters. Using this is discouraged because it violates
+ * dma_fence rules, notably dma_fence_init() has to be called on
+ * already initialized fences for a second time. Moreover, this is
+ * dangerous because attempts to allocate memory might deadlock with
+ * memory management code waiting for the reset to complete.
+ *
+ * TODO: Document what drivers should do / use instead.
+ *
+ * This method is called in a workqueue context - either from the
+ * submit_wq the driver passed through drm_sched_init(), or, if the
+ * driver passed NULL, a separate, ordered workqueue the scheduler
+ * allocated.
+ *
+ * Note that the scheduler expects to 'inherit' its own reference to
+ * this fence from the callback. It does not invoke an extra
+ * dma_fence_get() on it. Consequently, this callback must take a
+ * reference for the scheduler, and additional ones for the driver's
+ * respective needs.
+ *
+ * Return:
+ * * On success: dma_fence the driver must signal once the hardware has
+ * completed the job ("hardware fence").
+ * * On failure: NULL or an ERR_PTR.
*/
struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
@@ -239,11 +461,52 @@ struct drm_sched_backend_ops {
* @timedout_job: Called when a job has taken too long to execute,
* to trigger GPU recovery.
*
- * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal,
- * and the underlying driver has started or completed recovery.
+ * @sched_job: The job that has timed out
+ *
+ * Drivers typically issue a reset to recover from GPU hangs.
+ * This procedure looks very different depending on whether a firmware
+ * or a hardware scheduler is being used.
+ *
+ * For a FIRMWARE SCHEDULER, each ring has one scheduler, and each
+ * scheduler has one entity. Hence, the steps taken typically look as
+ * follows:
+ *
+ * 1. Stop the scheduler using drm_sched_stop(). This will pause the
+ * scheduler workqueues and cancel the timeout work, guaranteeing
+ * that nothing is queued while the ring is being removed.
+ * 2. Remove the ring. The firmware will make sure that the
+ * corresponding parts of the hardware are resetted, and that other
+ * rings are not impacted.
+ * 3. Kill the entity and the associated scheduler.
+ *
+ *
+ * For a HARDWARE SCHEDULER, a scheduler instance schedules jobs from
+ * one or more entities to one ring. This implies that all entities
+ * associated with the affected scheduler cannot be torn down, because
+ * this would effectively also affect innocent userspace processes which
+ * did not submit faulty jobs (for example).
+ *
+ * Consequently, the procedure to recover with a hardware scheduler
+ * should look like this:
+ *
+ * 1. Stop all schedulers impacted by the reset using drm_sched_stop().
+ * 2. Kill the entity the faulty job stems from.
+ * 3. Issue a GPU reset on all faulty rings (driver-specific).
+ * 4. Re-submit jobs on all schedulers impacted by re-submitting them to
+ * the entities which are still alive.
+ * 5. Restart all schedulers that were stopped in step #1 using
+ * drm_sched_start().
+ *
+ * Note that some GPUs have distinct hardware queues but need to reset
+ * the GPU globally, which requires extra synchronization between the
+ * timeout handlers of different schedulers. One way to achieve this
+ * synchronization is to create an ordered workqueue (using
+ * alloc_ordered_workqueue()) at the driver level, and pass this queue
+ * as drm_sched_init()'s @timeout_wq parameter. This will guarantee
+ * that timeout handlers are executed sequentially.
+ *
+ * Return: The scheduler's status, defined by &enum drm_gpu_sched_stat
*
- * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer
- * available, i.e. has been unplugged.
*/
enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job);
@@ -252,49 +515,76 @@ struct drm_sched_backend_ops {
* and it's time to clean it up.
*/
void (*free_job)(struct drm_sched_job *sched_job);
+
+ /**
+ * @cancel_job: Used by the scheduler to guarantee remaining jobs' fences
+ * get signaled in drm_sched_fini().
+ *
+ * Used by the scheduler to cancel all jobs that have not been executed
+ * with &struct drm_sched_backend_ops.run_job by the time
+ * drm_sched_fini() gets invoked.
+ *
+ * Drivers need to signal the passed job's hardware fence with an
+ * appropriate error code (e.g., -ECANCELED) in this callback. They
+ * must not free the job.
+ *
+ * The scheduler will only call this callback once it stopped calling
+ * all other callbacks forever, with the exception of &struct
+ * drm_sched_backend_ops.free_job.
+ */
+ void (*cancel_job)(struct drm_sched_job *sched_job);
};
/**
- * struct drm_gpu_scheduler
+ * struct drm_gpu_scheduler - scheduler instance-specific data
*
* @ops: backend operations provided by the driver.
- * @hw_submission_limit: the max size of the hardware queue.
+ * @credit_limit: the credit limit of this scheduler
+ * @credit_count: the current credit count of this scheduler
* @timeout: the time after which a job is removed from the scheduler.
* @name: name of the ring for which this scheduler is being used.
- * @sched_rq: priority wise array of run queues.
- * @wake_up_worker: the wait queue on which the scheduler sleeps until a job
- * is ready to be scheduled.
- * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
+ * @num_rqs: Number of run-queues. This is at most DRM_SCHED_PRIORITY_COUNT,
+ * as there's usually one run-queue per priority, but could be less.
+ * @sched_rq: An allocated array of run-queues of size @num_rqs;
+ * @job_scheduled: once drm_sched_entity_flush() is called the scheduler
* waits on this wait queue until all the scheduled jobs are
* finished.
- * @hw_rq_count: the number of jobs currently in the hardware queue.
* @job_id_count: used to assign unique id to the each job.
+ * @submit_wq: workqueue used to queue @work_run_job and @work_free_job
+ * @timeout_wq: workqueue used to queue @work_tdr
+ * @work_run_job: work which calls run_job op of each scheduler.
+ * @work_free_job: work which calls free_job op of each scheduler.
* @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
* timeout interval is over.
- * @thread: the kthread on which the scheduler which run.
* @pending_list: the list of jobs which are currently in the job queue.
* @job_list_lock: lock to protect the pending_list.
* @hang_limit: once the hangs by a job crosses this limit then it is marked
- * guilty and it will be considered for scheduling further.
+ * guilty and it will no longer be considered for scheduling.
* @score: score to help loadbalancer pick a idle sched
* @_score: score used when the driver doesn't provide one
* @ready: marks if the underlying HW is ready to work
* @free_guilty: A hit to time out handler to free the guilty job.
+ * @pause_submit: pause queuing of @work_run_job on @submit_wq
+ * @own_submit_wq: scheduler owns allocation of @submit_wq
+ * @dev: system &struct device
*
* One scheduler is implemented for each hardware ring.
*/
struct drm_gpu_scheduler {
const struct drm_sched_backend_ops *ops;
- uint32_t hw_submission_limit;
+ u32 credit_limit;
+ atomic_t credit_count;
long timeout;
const char *name;
- struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_COUNT];
- wait_queue_head_t wake_up_worker;
+ u32 num_rqs;
+ struct drm_sched_rq **sched_rq;
wait_queue_head_t job_scheduled;
- atomic_t hw_rq_count;
atomic64_t job_id_count;
+ struct workqueue_struct *submit_wq;
+ struct workqueue_struct *timeout_wq;
+ struct work_struct work_run_job;
+ struct work_struct work_free_job;
struct delayed_work work_tdr;
- struct task_struct *thread;
struct list_head pending_list;
spinlock_t job_list_lock;
int hang_limit;
@@ -302,39 +592,96 @@ struct drm_gpu_scheduler {
atomic_t _score;
bool ready;
bool free_guilty;
+ bool pause_submit;
+ bool own_submit_wq;
+ struct device *dev;
+};
+
+/**
+ * struct drm_sched_init_args - parameters for initializing a DRM GPU scheduler
+ *
+ * @ops: backend operations provided by the driver
+ * @submit_wq: workqueue to use for submission. If NULL, an ordered wq is
+ * allocated and used.
+ * @num_rqs: Number of run-queues. This may be at most DRM_SCHED_PRIORITY_COUNT,
+ * as there's usually one run-queue per priority, but may be less.
+ * @credit_limit: the number of credits this scheduler can hold from all jobs
+ * @hang_limit: number of times to allow a job to hang before dropping it.
+ * This mechanism is DEPRECATED. Set it to 0.
+ * @timeout: timeout value in jiffies for submitted jobs.
+ * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is used.
+ * @score: score atomic shared with other schedulers. May be NULL.
+ * @name: name (typically the driver's name). Used for debugging
+ * @dev: associated device. Used for debugging
+ */
+struct drm_sched_init_args {
+ const struct drm_sched_backend_ops *ops;
+ struct workqueue_struct *submit_wq;
+ struct workqueue_struct *timeout_wq;
+ u32 num_rqs;
+ u32 credit_limit;
+ unsigned int hang_limit;
+ long timeout;
+ atomic_t *score;
+ const char *name;
+ struct device *dev;
};
+/* Scheduler operations */
+
int drm_sched_init(struct drm_gpu_scheduler *sched,
- const struct drm_sched_backend_ops *ops,
- uint32_t hw_submission, unsigned hang_limit, long timeout,
- atomic_t *score, const char *name);
+ const struct drm_sched_init_args *args);
void drm_sched_fini(struct drm_gpu_scheduler *sched);
-int drm_sched_job_init(struct drm_sched_job *job,
- struct drm_sched_entity *entity,
- void *owner);
-void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
- struct drm_gpu_scheduler **sched_list,
- unsigned int num_sched_list);
-void drm_sched_job_cleanup(struct drm_sched_job *job);
-void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
+unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
+void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
+ unsigned long remaining);
+void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched);
+bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched);
+void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched);
+void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched);
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
-void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
+void drm_sched_start(struct drm_gpu_scheduler *sched, int errno);
void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
-void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max);
-void drm_sched_increase_karma(struct drm_sched_job *bad);
-void drm_sched_reset_karma(struct drm_sched_job *bad);
-void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type);
-bool drm_sched_dependency_optimized(struct dma_fence* fence,
- struct drm_sched_entity *entity);
void drm_sched_fault(struct drm_gpu_scheduler *sched);
-void drm_sched_job_kickout(struct drm_sched_job *s_job);
-void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
- struct drm_sched_entity *entity);
-void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
- struct drm_sched_entity *entity);
+struct drm_gpu_scheduler *
+drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list);
+
+/* Jobs */
+
+int drm_sched_job_init(struct drm_sched_job *job,
+ struct drm_sched_entity *entity,
+ u32 credits, void *owner,
+ u64 drm_client_id);
+void drm_sched_job_arm(struct drm_sched_job *job);
+void drm_sched_entity_push_job(struct drm_sched_job *sched_job);
+int drm_sched_job_add_dependency(struct drm_sched_job *job,
+ struct dma_fence *fence);
+int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,
+ struct drm_file *file,
+ u32 handle,
+ u32 point);
+int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
+ struct dma_resv *resv,
+ enum dma_resv_usage usage);
+int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
+ struct drm_gem_object *obj,
+ bool write);
+bool drm_sched_job_has_dependency(struct drm_sched_job *job,
+ struct dma_fence *fence);
+void drm_sched_job_cleanup(struct drm_sched_job *job);
+void drm_sched_increase_karma(struct drm_sched_job *bad);
+
+static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
+ int threshold)
+{
+ return s_job && atomic_inc_return(&s_job->karma) > threshold;
+}
+
+/* Entities */
int drm_sched_entity_init(struct drm_sched_entity *entity,
enum drm_sched_priority priority,
@@ -344,24 +691,11 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
void drm_sched_entity_fini(struct drm_sched_entity *entity);
void drm_sched_entity_destroy(struct drm_sched_entity *entity);
-void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
-struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
-void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
- struct drm_sched_entity *entity);
void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
enum drm_sched_priority priority);
-bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
-
-struct drm_sched_fence *drm_sched_fence_create(
- struct drm_sched_entity *s_entity, void *owner);
-void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
-void drm_sched_fence_finished(struct drm_sched_fence *fence);
-
-unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
-void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
- unsigned long remaining);
-struct drm_gpu_scheduler *
-drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
- unsigned int num_sched_list);
+int drm_sched_entity_error(struct drm_sched_entity *entity);
+void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
+ struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list);
#endif
diff --git a/include/drm/gud.h b/include/drm/gud.h
index 0b46b54fe56e..c52a8ba4ae4e 100644
--- a/include/drm/gud.h
+++ b/include/drm/gud.h
@@ -246,10 +246,12 @@ struct gud_state_req {
/* Get supported pixel formats as a byte array of GUD_PIXEL_FORMAT_* */
#define GUD_REQ_GET_FORMATS 0x40
#define GUD_FORMATS_MAX_NUM 32
- /* R1 is a 1-bit monochrome transfer format presented to userspace as XRGB8888 */
- #define GUD_PIXEL_FORMAT_R1 0x01
+ #define GUD_PIXEL_FORMAT_R1 0x01 /* 1-bit monochrome */
+ #define GUD_PIXEL_FORMAT_R8 0x08 /* 8-bit greyscale */
#define GUD_PIXEL_FORMAT_XRGB1111 0x20
+ #define GUD_PIXEL_FORMAT_RGB332 0x30
#define GUD_PIXEL_FORMAT_RGB565 0x40
+ #define GUD_PIXEL_FORMAT_RGB888 0x50
#define GUD_PIXEL_FORMAT_XRGB8888 0x80
#define GUD_PIXEL_FORMAT_ARGB8888 0x81
diff --git a/include/drm/i2c/ch7006.h b/include/drm/i2c/ch7006.h
deleted file mode 100644
index 8390b437a1f8..000000000000
--- a/include/drm/i2c/ch7006.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Copyright (C) 2009 Francisco Jerez.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __DRM_I2C_CH7006_H__
-#define __DRM_I2C_CH7006_H__
-
-/**
- * struct ch7006_encoder_params
- *
- * Describes how the ch7006 is wired up with the GPU. It should be
- * used as the @params parameter of its @set_config method.
- *
- * See "http://www.chrontel.com/pdf/7006.pdf" for their precise
- * meaning.
- */
-struct ch7006_encoder_params {
- enum {
- CH7006_FORMAT_RGB16 = 0,
- CH7006_FORMAT_YCrCb24m16,
- CH7006_FORMAT_RGB24m16,
- CH7006_FORMAT_RGB15,
- CH7006_FORMAT_RGB24m12C,
- CH7006_FORMAT_RGB24m12I,
- CH7006_FORMAT_RGB24m8,
- CH7006_FORMAT_RGB16m8,
- CH7006_FORMAT_RGB15m8,
- CH7006_FORMAT_YCrCb24m8,
- } input_format;
-
- enum {
- CH7006_CLOCK_SLAVE = 0,
- CH7006_CLOCK_MASTER,
- } clock_mode;
-
- enum {
- CH7006_CLOCK_EDGE_NEG = 0,
- CH7006_CLOCK_EDGE_POS,
- } clock_edge;
-
- int xcm, pcm;
-
- enum {
- CH7006_SYNC_SLAVE = 0,
- CH7006_SYNC_MASTER,
- } sync_direction;
-
- enum {
- CH7006_SYNC_SEPARATED = 0,
- CH7006_SYNC_EMBEDDED,
- } sync_encoding;
-
- enum {
- CH7006_POUT_1_8V = 0,
- CH7006_POUT_3_3V,
- } pout_level;
-
- enum {
- CH7006_ACTIVE_HSYNC = 0,
- CH7006_ACTIVE_DSTART,
- } active_detect;
-};
-
-#endif
diff --git a/include/drm/i2c/sil164.h b/include/drm/i2c/sil164.h
deleted file mode 100644
index 205e27384c83..000000000000
--- a/include/drm/i2c/sil164.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (C) 2010 Francisco Jerez.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __DRM_I2C_SIL164_H__
-#define __DRM_I2C_SIL164_H__
-
-/**
- * struct sil164_encoder_params
- *
- * Describes how the sil164 is connected to the GPU. It should be used
- * as the @params parameter of its @set_config method.
- *
- * See "http://www.siliconimage.com/docs/SiI-DS-0021-E-164.pdf".
- */
-struct sil164_encoder_params {
- enum {
- SIL164_INPUT_EDGE_FALLING = 0,
- SIL164_INPUT_EDGE_RISING
- } input_edge;
-
- enum {
- SIL164_INPUT_WIDTH_12BIT = 0,
- SIL164_INPUT_WIDTH_24BIT
- } input_width;
-
- enum {
- SIL164_INPUT_SINGLE_EDGE = 0,
- SIL164_INPUT_DUAL_EDGE
- } input_dual;
-
- enum {
- SIL164_PLL_FILTER_ON = 0,
- SIL164_PLL_FILTER_OFF,
- } pll_filter;
-
- int input_skew; /** < Allowed range [-4, 3], use 0 for no de-skew. */
- int duallink_skew; /** < Allowed range [-4, 3]. */
-};
-
-#endif
diff --git a/include/drm/i2c/tda998x.h b/include/drm/i2c/tda998x.h
deleted file mode 100644
index 3cb25ccbe5e6..000000000000
--- a/include/drm/i2c/tda998x.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DRM_I2C_TDA998X_H__
-#define __DRM_I2C_TDA998X_H__
-
-#include <linux/hdmi.h>
-#include <dt-bindings/display/tda998x.h>
-
-enum {
- AFMT_UNUSED = 0,
- AFMT_SPDIF = TDA998x_SPDIF,
- AFMT_I2S = TDA998x_I2S,
-};
-
-struct tda998x_audio_params {
- u8 config;
- u8 format;
- unsigned sample_width;
- unsigned sample_rate;
- struct hdmi_audio_infoframe cea;
- u8 status[5];
-};
-
-struct tda998x_encoder_params {
- u8 swap_b:3;
- u8 mirr_b:1;
- u8 swap_a:3;
- u8 mirr_a:1;
- u8 swap_d:3;
- u8 mirr_d:1;
- u8 swap_c:3;
- u8 mirr_c:1;
- u8 swap_f:3;
- u8 mirr_f:1;
- u8 swap_e:3;
- u8 mirr_e:1;
-
- struct tda998x_audio_params audio_params;
-};
-
-#endif
diff --git a/include/drm/i915_mei_hdcp_interface.h b/include/drm/i915_mei_hdcp_interface.h
deleted file mode 100644
index 702f613243bb..000000000000
--- a/include/drm/i915_mei_hdcp_interface.h
+++ /dev/null
@@ -1,184 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0+) */
-/*
- * Copyright © 2017-2019 Intel Corporation
- *
- * Authors:
- * Ramalingam C <ramalingam.c@intel.com>
- */
-
-#ifndef _I915_MEI_HDCP_INTERFACE_H_
-#define _I915_MEI_HDCP_INTERFACE_H_
-
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <drm/drm_hdcp.h>
-
-/**
- * enum hdcp_port_type - HDCP port implementation type defined by ME FW
- * @HDCP_PORT_TYPE_INVALID: Invalid hdcp port type
- * @HDCP_PORT_TYPE_INTEGRATED: In-Host HDCP2.x port
- * @HDCP_PORT_TYPE_LSPCON: HDCP2.2 discrete wired Tx port with LSPCON
- * (HDMI 2.0) solution
- * @HDCP_PORT_TYPE_CPDP: HDCP2.2 discrete wired Tx port using the CPDP (DP 1.3)
- * solution
- */
-enum hdcp_port_type {
- HDCP_PORT_TYPE_INVALID,
- HDCP_PORT_TYPE_INTEGRATED,
- HDCP_PORT_TYPE_LSPCON,
- HDCP_PORT_TYPE_CPDP
-};
-
-/**
- * enum hdcp_wired_protocol - HDCP adaptation used on the port
- * @HDCP_PROTOCOL_INVALID: Invalid HDCP adaptation protocol
- * @HDCP_PROTOCOL_HDMI: HDMI adaptation of HDCP used on the port
- * @HDCP_PROTOCOL_DP: DP adaptation of HDCP used on the port
- */
-enum hdcp_wired_protocol {
- HDCP_PROTOCOL_INVALID,
- HDCP_PROTOCOL_HDMI,
- HDCP_PROTOCOL_DP
-};
-
-enum mei_fw_ddi {
- MEI_DDI_INVALID_PORT = 0x0,
-
- MEI_DDI_B = 1,
- MEI_DDI_C,
- MEI_DDI_D,
- MEI_DDI_E,
- MEI_DDI_F,
- MEI_DDI_A = 7,
- MEI_DDI_RANGE_END = MEI_DDI_A,
-};
-
-/**
- * enum mei_fw_tc - ME Firmware defined index for transcoders
- * @MEI_INVALID_TRANSCODER: Index for Invalid transcoder
- * @MEI_TRANSCODER_EDP: Index for EDP Transcoder
- * @MEI_TRANSCODER_DSI0: Index for DSI0 Transcoder
- * @MEI_TRANSCODER_DSI1: Index for DSI1 Transcoder
- * @MEI_TRANSCODER_A: Index for Transcoder A
- * @MEI_TRANSCODER_B: Index for Transcoder B
- * @MEI_TRANSCODER_C: Index for Transcoder C
- * @MEI_TRANSCODER_D: Index for Transcoder D
- */
-enum mei_fw_tc {
- MEI_INVALID_TRANSCODER = 0x00,
- MEI_TRANSCODER_EDP,
- MEI_TRANSCODER_DSI0,
- MEI_TRANSCODER_DSI1,
- MEI_TRANSCODER_A = 0x10,
- MEI_TRANSCODER_B,
- MEI_TRANSCODER_C,
- MEI_TRANSCODER_D
-};
-
-/**
- * struct hdcp_port_data - intel specific HDCP port data
- * @fw_ddi: ddi index as per ME FW
- * @fw_tc: transcoder index as per ME FW
- * @port_type: HDCP port type as per ME FW classification
- * @protocol: HDCP adaptation as per ME FW
- * @k: No of streams transmitted on a port. Only on DP MST this is != 1
- * @seq_num_m: Count of RepeaterAuth_Stream_Manage msg propagated.
- * Initialized to 0 on AKE_INIT. Incremented after every successful
- * transmission of RepeaterAuth_Stream_Manage message. When it rolls
- * over re-Auth has to be triggered.
- * @streams: struct hdcp2_streamid_type[k]. Defines the type and id for the
- * streams
- */
-struct hdcp_port_data {
- enum mei_fw_ddi fw_ddi;
- enum mei_fw_tc fw_tc;
- u8 port_type;
- u8 protocol;
- u16 k;
- u32 seq_num_m;
- struct hdcp2_streamid_type *streams;
-};
-
-/**
- * struct i915_hdcp_component_ops- ops for HDCP2.2 services.
- * @owner: Module providing the ops
- * @initiate_hdcp2_session: Initiate a Wired HDCP2.2 Tx Session.
- * And Prepare AKE_Init.
- * @verify_receiver_cert_prepare_km: Verify the Receiver Certificate
- * AKE_Send_Cert and prepare
- AKE_Stored_Km/AKE_No_Stored_Km
- * @verify_hprime: Verify AKE_Send_H_prime
- * @store_pairing_info: Store pairing info received
- * @initiate_locality_check: Prepare LC_Init
- * @verify_lprime: Verify lprime
- * @get_session_key: Prepare SKE_Send_Eks
- * @repeater_check_flow_prepare_ack: Validate the Downstream topology
- * and prepare rep_ack
- * @verify_mprime: Verify mprime
- * @enable_hdcp_authentication: Mark a port as authenticated.
- * @close_hdcp_session: Close the Wired HDCP Tx session per port.
- * This also disables the authenticated state of the port.
- */
-struct i915_hdcp_component_ops {
- /**
- * @owner: mei_hdcp module
- */
- struct module *owner;
-
- int (*initiate_hdcp2_session)(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_ake_init *ake_data);
- int (*verify_receiver_cert_prepare_km)(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_ake_send_cert
- *rx_cert,
- bool *km_stored,
- struct hdcp2_ake_no_stored_km
- *ek_pub_km,
- size_t *msg_sz);
- int (*verify_hprime)(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_ake_send_hprime *rx_hprime);
- int (*store_pairing_info)(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_ake_send_pairing_info
- *pairing_info);
- int (*initiate_locality_check)(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_lc_init *lc_init_data);
- int (*verify_lprime)(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_lc_send_lprime *rx_lprime);
- int (*get_session_key)(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_ske_send_eks *ske_data);
- int (*repeater_check_flow_prepare_ack)(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_rep_send_receiverid_list
- *rep_topology,
- struct hdcp2_rep_send_ack
- *rep_send_ack);
- int (*verify_mprime)(struct device *dev,
- struct hdcp_port_data *data,
- struct hdcp2_rep_stream_ready *stream_ready);
- int (*enable_hdcp_authentication)(struct device *dev,
- struct hdcp_port_data *data);
- int (*close_hdcp_session)(struct device *dev,
- struct hdcp_port_data *data);
-};
-
-/**
- * struct i915_hdcp_component_master - Used for communication between i915
- * and mei_hdcp drivers for the HDCP2.2 services
- * @mei_dev: device that provide the HDCP2.2 service from MEI Bus.
- * @hdcp_ops: Ops implemented by mei_hdcp driver, used by i915 driver.
- */
-struct i915_hdcp_comp_master {
- struct device *mei_dev;
- const struct i915_hdcp_component_ops *ops;
-
- /* To protect the above members. */
- struct mutex mutex;
-};
-
-#endif /* _I915_MEI_HDCP_INTERFACE_H_ */
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
deleted file mode 100644
index ebd0dd1c35b3..000000000000
--- a/include/drm/i915_pciids.h
+++ /dev/null
@@ -1,648 +0,0 @@
-/*
- * Copyright 2013 Intel Corporation
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-#ifndef _I915_PCIIDS_H
-#define _I915_PCIIDS_H
-
-/*
- * A pci_device_id struct {
- * __u32 vendor, device;
- * __u32 subvendor, subdevice;
- * __u32 class, class_mask;
- * kernel_ulong_t driver_data;
- * };
- * Don't use C99 here because "class" is reserved and we want to
- * give userspace flexibility.
- */
-#define INTEL_VGA_DEVICE(id, info) { \
- 0x8086, id, \
- ~0, ~0, \
- 0x030000, 0xff0000, \
- (unsigned long) info }
-
-#define INTEL_QUANTA_VGA_DEVICE(info) { \
- 0x8086, 0x16a, \
- 0x152d, 0x8990, \
- 0x030000, 0xff0000, \
- (unsigned long) info }
-
-#define INTEL_I810_IDS(info) \
- INTEL_VGA_DEVICE(0x7121, info), /* I810 */ \
- INTEL_VGA_DEVICE(0x7123, info), /* I810_DC100 */ \
- INTEL_VGA_DEVICE(0x7125, info) /* I810_E */
-
-#define INTEL_I815_IDS(info) \
- INTEL_VGA_DEVICE(0x1132, info) /* I815*/
-
-#define INTEL_I830_IDS(info) \
- INTEL_VGA_DEVICE(0x3577, info)
-
-#define INTEL_I845G_IDS(info) \
- INTEL_VGA_DEVICE(0x2562, info)
-
-#define INTEL_I85X_IDS(info) \
- INTEL_VGA_DEVICE(0x3582, info), /* I855_GM */ \
- INTEL_VGA_DEVICE(0x358e, info)
-
-#define INTEL_I865G_IDS(info) \
- INTEL_VGA_DEVICE(0x2572, info) /* I865_G */
-
-#define INTEL_I915G_IDS(info) \
- INTEL_VGA_DEVICE(0x2582, info), /* I915_G */ \
- INTEL_VGA_DEVICE(0x258a, info) /* E7221_G */
-
-#define INTEL_I915GM_IDS(info) \
- INTEL_VGA_DEVICE(0x2592, info) /* I915_GM */
-
-#define INTEL_I945G_IDS(info) \
- INTEL_VGA_DEVICE(0x2772, info) /* I945_G */
-
-#define INTEL_I945GM_IDS(info) \
- INTEL_VGA_DEVICE(0x27a2, info), /* I945_GM */ \
- INTEL_VGA_DEVICE(0x27ae, info) /* I945_GME */
-
-#define INTEL_I965G_IDS(info) \
- INTEL_VGA_DEVICE(0x2972, info), /* I946_GZ */ \
- INTEL_VGA_DEVICE(0x2982, info), /* G35_G */ \
- INTEL_VGA_DEVICE(0x2992, info), /* I965_Q */ \
- INTEL_VGA_DEVICE(0x29a2, info) /* I965_G */
-
-#define INTEL_G33_IDS(info) \
- INTEL_VGA_DEVICE(0x29b2, info), /* Q35_G */ \
- INTEL_VGA_DEVICE(0x29c2, info), /* G33_G */ \
- INTEL_VGA_DEVICE(0x29d2, info) /* Q33_G */
-
-#define INTEL_I965GM_IDS(info) \
- INTEL_VGA_DEVICE(0x2a02, info), /* I965_GM */ \
- INTEL_VGA_DEVICE(0x2a12, info) /* I965_GME */
-
-#define INTEL_GM45_IDS(info) \
- INTEL_VGA_DEVICE(0x2a42, info) /* GM45_G */
-
-#define INTEL_G45_IDS(info) \
- INTEL_VGA_DEVICE(0x2e02, info), /* IGD_E_G */ \
- INTEL_VGA_DEVICE(0x2e12, info), /* Q45_G */ \
- INTEL_VGA_DEVICE(0x2e22, info), /* G45_G */ \
- INTEL_VGA_DEVICE(0x2e32, info), /* G41_G */ \
- INTEL_VGA_DEVICE(0x2e42, info), /* B43_G */ \
- INTEL_VGA_DEVICE(0x2e92, info) /* B43_G.1 */
-
-#define INTEL_PINEVIEW_G_IDS(info) \
- INTEL_VGA_DEVICE(0xa001, info)
-
-#define INTEL_PINEVIEW_M_IDS(info) \
- INTEL_VGA_DEVICE(0xa011, info)
-
-#define INTEL_IRONLAKE_D_IDS(info) \
- INTEL_VGA_DEVICE(0x0042, info)
-
-#define INTEL_IRONLAKE_M_IDS(info) \
- INTEL_VGA_DEVICE(0x0046, info)
-
-#define INTEL_SNB_D_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x0102, info), \
- INTEL_VGA_DEVICE(0x010A, info)
-
-#define INTEL_SNB_D_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x0112, info), \
- INTEL_VGA_DEVICE(0x0122, info)
-
-#define INTEL_SNB_D_IDS(info) \
- INTEL_SNB_D_GT1_IDS(info), \
- INTEL_SNB_D_GT2_IDS(info)
-
-#define INTEL_SNB_M_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x0106, info)
-
-#define INTEL_SNB_M_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x0116, info), \
- INTEL_VGA_DEVICE(0x0126, info)
-
-#define INTEL_SNB_M_IDS(info) \
- INTEL_SNB_M_GT1_IDS(info), \
- INTEL_SNB_M_GT2_IDS(info)
-
-#define INTEL_IVB_M_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x0156, info) /* GT1 mobile */
-
-#define INTEL_IVB_M_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x0166, info) /* GT2 mobile */
-
-#define INTEL_IVB_M_IDS(info) \
- INTEL_IVB_M_GT1_IDS(info), \
- INTEL_IVB_M_GT2_IDS(info)
-
-#define INTEL_IVB_D_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x0152, info), /* GT1 desktop */ \
- INTEL_VGA_DEVICE(0x015a, info) /* GT1 server */
-
-#define INTEL_IVB_D_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x0162, info), /* GT2 desktop */ \
- INTEL_VGA_DEVICE(0x016a, info) /* GT2 server */
-
-#define INTEL_IVB_D_IDS(info) \
- INTEL_IVB_D_GT1_IDS(info), \
- INTEL_IVB_D_GT2_IDS(info)
-
-#define INTEL_IVB_Q_IDS(info) \
- INTEL_QUANTA_VGA_DEVICE(info) /* Quanta transcode */
-
-#define INTEL_HSW_ULT_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x0A02, info), /* ULT GT1 desktop */ \
- INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \
- INTEL_VGA_DEVICE(0x0A0A, info), /* ULT GT1 server */ \
- INTEL_VGA_DEVICE(0x0A0B, info) /* ULT GT1 reserved */
-
-#define INTEL_HSW_ULX_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x0A0E, info) /* ULX GT1 mobile */
-
-#define INTEL_HSW_GT1_IDS(info) \
- INTEL_HSW_ULT_GT1_IDS(info), \
- INTEL_HSW_ULX_GT1_IDS(info), \
- INTEL_VGA_DEVICE(0x0402, info), /* GT1 desktop */ \
- INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \
- INTEL_VGA_DEVICE(0x040A, info), /* GT1 server */ \
- INTEL_VGA_DEVICE(0x040B, info), /* GT1 reserved */ \
- INTEL_VGA_DEVICE(0x040E, info), /* GT1 reserved */ \
- INTEL_VGA_DEVICE(0x0C02, info), /* SDV GT1 desktop */ \
- INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \
- INTEL_VGA_DEVICE(0x0C0A, info), /* SDV GT1 server */ \
- INTEL_VGA_DEVICE(0x0C0B, info), /* SDV GT1 reserved */ \
- INTEL_VGA_DEVICE(0x0C0E, info), /* SDV GT1 reserved */ \
- INTEL_VGA_DEVICE(0x0D02, info), /* CRW GT1 desktop */ \
- INTEL_VGA_DEVICE(0x0D06, info), /* CRW GT1 mobile */ \
- INTEL_VGA_DEVICE(0x0D0A, info), /* CRW GT1 server */ \
- INTEL_VGA_DEVICE(0x0D0B, info), /* CRW GT1 reserved */ \
- INTEL_VGA_DEVICE(0x0D0E, info) /* CRW GT1 reserved */
-
-#define INTEL_HSW_ULT_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x0A12, info), /* ULT GT2 desktop */ \
- INTEL_VGA_DEVICE(0x0A16, info), /* ULT GT2 mobile */ \
- INTEL_VGA_DEVICE(0x0A1A, info), /* ULT GT2 server */ \
- INTEL_VGA_DEVICE(0x0A1B, info) /* ULT GT2 reserved */ \
-
-#define INTEL_HSW_ULX_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x0A1E, info) /* ULX GT2 mobile */ \
-
-#define INTEL_HSW_GT2_IDS(info) \
- INTEL_HSW_ULT_GT2_IDS(info), \
- INTEL_HSW_ULX_GT2_IDS(info), \
- INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \
- INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \
- INTEL_VGA_DEVICE(0x041A, info), /* GT2 server */ \
- INTEL_VGA_DEVICE(0x041B, info), /* GT2 reserved */ \
- INTEL_VGA_DEVICE(0x041E, info), /* GT2 reserved */ \
- INTEL_VGA_DEVICE(0x0C12, info), /* SDV GT2 desktop */ \
- INTEL_VGA_DEVICE(0x0C16, info), /* SDV GT2 mobile */ \
- INTEL_VGA_DEVICE(0x0C1A, info), /* SDV GT2 server */ \
- INTEL_VGA_DEVICE(0x0C1B, info), /* SDV GT2 reserved */ \
- INTEL_VGA_DEVICE(0x0C1E, info), /* SDV GT2 reserved */ \
- INTEL_VGA_DEVICE(0x0D12, info), /* CRW GT2 desktop */ \
- INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \
- INTEL_VGA_DEVICE(0x0D1A, info), /* CRW GT2 server */ \
- INTEL_VGA_DEVICE(0x0D1B, info), /* CRW GT2 reserved */ \
- INTEL_VGA_DEVICE(0x0D1E, info) /* CRW GT2 reserved */
-
-#define INTEL_HSW_ULT_GT3_IDS(info) \
- INTEL_VGA_DEVICE(0x0A22, info), /* ULT GT3 desktop */ \
- INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \
- INTEL_VGA_DEVICE(0x0A2A, info), /* ULT GT3 server */ \
- INTEL_VGA_DEVICE(0x0A2B, info), /* ULT GT3 reserved */ \
- INTEL_VGA_DEVICE(0x0A2E, info) /* ULT GT3 reserved */
-
-#define INTEL_HSW_GT3_IDS(info) \
- INTEL_HSW_ULT_GT3_IDS(info), \
- INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \
- INTEL_VGA_DEVICE(0x0426, info), /* GT3 mobile */ \
- INTEL_VGA_DEVICE(0x042A, info), /* GT3 server */ \
- INTEL_VGA_DEVICE(0x042B, info), /* GT3 reserved */ \
- INTEL_VGA_DEVICE(0x042E, info), /* GT3 reserved */ \
- INTEL_VGA_DEVICE(0x0C22, info), /* SDV GT3 desktop */ \
- INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \
- INTEL_VGA_DEVICE(0x0C2A, info), /* SDV GT3 server */ \
- INTEL_VGA_DEVICE(0x0C2B, info), /* SDV GT3 reserved */ \
- INTEL_VGA_DEVICE(0x0C2E, info), /* SDV GT3 reserved */ \
- INTEL_VGA_DEVICE(0x0D22, info), /* CRW GT3 desktop */ \
- INTEL_VGA_DEVICE(0x0D26, info), /* CRW GT3 mobile */ \
- INTEL_VGA_DEVICE(0x0D2A, info), /* CRW GT3 server */ \
- INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \
- INTEL_VGA_DEVICE(0x0D2E, info) /* CRW GT3 reserved */
-
-#define INTEL_HSW_IDS(info) \
- INTEL_HSW_GT1_IDS(info), \
- INTEL_HSW_GT2_IDS(info), \
- INTEL_HSW_GT3_IDS(info)
-
-#define INTEL_VLV_IDS(info) \
- INTEL_VGA_DEVICE(0x0f30, info), \
- INTEL_VGA_DEVICE(0x0f31, info), \
- INTEL_VGA_DEVICE(0x0f32, info), \
- INTEL_VGA_DEVICE(0x0f33, info)
-
-#define INTEL_BDW_ULT_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x1606, info), /* GT1 ULT */ \
- INTEL_VGA_DEVICE(0x160B, info) /* GT1 Iris */
-
-#define INTEL_BDW_ULX_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x160E, info) /* GT1 ULX */
-
-#define INTEL_BDW_GT1_IDS(info) \
- INTEL_BDW_ULT_GT1_IDS(info), \
- INTEL_BDW_ULX_GT1_IDS(info), \
- INTEL_VGA_DEVICE(0x1602, info), /* GT1 ULT */ \
- INTEL_VGA_DEVICE(0x160A, info), /* GT1 Server */ \
- INTEL_VGA_DEVICE(0x160D, info) /* GT1 Workstation */
-
-#define INTEL_BDW_ULT_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x1616, info), /* GT2 ULT */ \
- INTEL_VGA_DEVICE(0x161B, info) /* GT2 ULT */
-
-#define INTEL_BDW_ULX_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x161E, info) /* GT2 ULX */
-
-#define INTEL_BDW_GT2_IDS(info) \
- INTEL_BDW_ULT_GT2_IDS(info), \
- INTEL_BDW_ULX_GT2_IDS(info), \
- INTEL_VGA_DEVICE(0x1612, info), /* GT2 Halo */ \
- INTEL_VGA_DEVICE(0x161A, info), /* GT2 Server */ \
- INTEL_VGA_DEVICE(0x161D, info) /* GT2 Workstation */
-
-#define INTEL_BDW_ULT_GT3_IDS(info) \
- INTEL_VGA_DEVICE(0x1626, info), /* ULT */ \
- INTEL_VGA_DEVICE(0x162B, info) /* Iris */ \
-
-#define INTEL_BDW_ULX_GT3_IDS(info) \
- INTEL_VGA_DEVICE(0x162E, info) /* ULX */
-
-#define INTEL_BDW_GT3_IDS(info) \
- INTEL_BDW_ULT_GT3_IDS(info), \
- INTEL_BDW_ULX_GT3_IDS(info), \
- INTEL_VGA_DEVICE(0x1622, info), /* ULT */ \
- INTEL_VGA_DEVICE(0x162A, info), /* Server */ \
- INTEL_VGA_DEVICE(0x162D, info) /* Workstation */
-
-#define INTEL_BDW_ULT_RSVD_IDS(info) \
- INTEL_VGA_DEVICE(0x1636, info), /* ULT */ \
- INTEL_VGA_DEVICE(0x163B, info) /* Iris */
-
-#define INTEL_BDW_ULX_RSVD_IDS(info) \
- INTEL_VGA_DEVICE(0x163E, info) /* ULX */
-
-#define INTEL_BDW_RSVD_IDS(info) \
- INTEL_BDW_ULT_RSVD_IDS(info), \
- INTEL_BDW_ULX_RSVD_IDS(info), \
- INTEL_VGA_DEVICE(0x1632, info), /* ULT */ \
- INTEL_VGA_DEVICE(0x163A, info), /* Server */ \
- INTEL_VGA_DEVICE(0x163D, info) /* Workstation */
-
-#define INTEL_BDW_IDS(info) \
- INTEL_BDW_GT1_IDS(info), \
- INTEL_BDW_GT2_IDS(info), \
- INTEL_BDW_GT3_IDS(info), \
- INTEL_BDW_RSVD_IDS(info)
-
-#define INTEL_CHV_IDS(info) \
- INTEL_VGA_DEVICE(0x22b0, info), \
- INTEL_VGA_DEVICE(0x22b1, info), \
- INTEL_VGA_DEVICE(0x22b2, info), \
- INTEL_VGA_DEVICE(0x22b3, info)
-
-#define INTEL_SKL_ULT_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x1906, info), /* ULT GT1 */ \
- INTEL_VGA_DEVICE(0x1913, info) /* ULT GT1.5 */
-
-#define INTEL_SKL_ULX_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x190E, info), /* ULX GT1 */ \
- INTEL_VGA_DEVICE(0x1915, info) /* ULX GT1.5 */
-
-#define INTEL_SKL_GT1_IDS(info) \
- INTEL_SKL_ULT_GT1_IDS(info), \
- INTEL_SKL_ULX_GT1_IDS(info), \
- INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \
- INTEL_VGA_DEVICE(0x190A, info), /* SRV GT1 */ \
- INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \
- INTEL_VGA_DEVICE(0x1917, info) /* DT GT1.5 */
-
-#define INTEL_SKL_ULT_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \
- INTEL_VGA_DEVICE(0x1921, info) /* ULT GT2F */
-
-#define INTEL_SKL_ULX_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x191E, info) /* ULX GT2 */
-
-#define INTEL_SKL_GT2_IDS(info) \
- INTEL_SKL_ULT_GT2_IDS(info), \
- INTEL_SKL_ULX_GT2_IDS(info), \
- INTEL_VGA_DEVICE(0x1912, info), /* DT GT2 */ \
- INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \
- INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \
- INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */
-
-#define INTEL_SKL_ULT_GT3_IDS(info) \
- INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \
- INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3e */ \
- INTEL_VGA_DEVICE(0x1927, info) /* ULT GT3e */
-
-#define INTEL_SKL_GT3_IDS(info) \
- INTEL_SKL_ULT_GT3_IDS(info), \
- INTEL_VGA_DEVICE(0x192A, info), /* SRV GT3 */ \
- INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3e */ \
- INTEL_VGA_DEVICE(0x192D, info) /* SRV GT3e */
-
-#define INTEL_SKL_GT4_IDS(info) \
- INTEL_VGA_DEVICE(0x1932, info), /* DT GT4 */ \
- INTEL_VGA_DEVICE(0x193A, info), /* SRV GT4e */ \
- INTEL_VGA_DEVICE(0x193B, info), /* Halo GT4e */ \
- INTEL_VGA_DEVICE(0x193D, info) /* WKS GT4e */
-
-#define INTEL_SKL_IDS(info) \
- INTEL_SKL_GT1_IDS(info), \
- INTEL_SKL_GT2_IDS(info), \
- INTEL_SKL_GT3_IDS(info), \
- INTEL_SKL_GT4_IDS(info)
-
-#define INTEL_BXT_IDS(info) \
- INTEL_VGA_DEVICE(0x0A84, info), \
- INTEL_VGA_DEVICE(0x1A84, info), \
- INTEL_VGA_DEVICE(0x1A85, info), \
- INTEL_VGA_DEVICE(0x5A84, info), /* APL HD Graphics 505 */ \
- INTEL_VGA_DEVICE(0x5A85, info) /* APL HD Graphics 500 */
-
-#define INTEL_GLK_IDS(info) \
- INTEL_VGA_DEVICE(0x3184, info), \
- INTEL_VGA_DEVICE(0x3185, info)
-
-#define INTEL_KBL_ULT_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \
- INTEL_VGA_DEVICE(0x5913, info) /* ULT GT1.5 */
-
-#define INTEL_KBL_ULX_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \
- INTEL_VGA_DEVICE(0x5915, info) /* ULX GT1.5 */
-
-#define INTEL_KBL_GT1_IDS(info) \
- INTEL_KBL_ULT_GT1_IDS(info), \
- INTEL_KBL_ULX_GT1_IDS(info), \
- INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \
- INTEL_VGA_DEVICE(0x5908, info), /* Halo GT1 */ \
- INTEL_VGA_DEVICE(0x590A, info), /* SRV GT1 */ \
- INTEL_VGA_DEVICE(0x590B, info) /* Halo GT1 */
-
-#define INTEL_KBL_ULT_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \
- INTEL_VGA_DEVICE(0x5921, info) /* ULT GT2F */
-
-#define INTEL_KBL_ULX_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x591E, info) /* ULX GT2 */
-
-#define INTEL_KBL_GT2_IDS(info) \
- INTEL_KBL_ULT_GT2_IDS(info), \
- INTEL_KBL_ULX_GT2_IDS(info), \
- INTEL_VGA_DEVICE(0x5912, info), /* DT GT2 */ \
- INTEL_VGA_DEVICE(0x5917, info), /* Mobile GT2 */ \
- INTEL_VGA_DEVICE(0x591A, info), /* SRV GT2 */ \
- INTEL_VGA_DEVICE(0x591B, info), /* Halo GT2 */ \
- INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */
-
-#define INTEL_KBL_ULT_GT3_IDS(info) \
- INTEL_VGA_DEVICE(0x5926, info) /* ULT GT3 */
-
-#define INTEL_KBL_GT3_IDS(info) \
- INTEL_KBL_ULT_GT3_IDS(info), \
- INTEL_VGA_DEVICE(0x5923, info), /* ULT GT3 */ \
- INTEL_VGA_DEVICE(0x5927, info) /* ULT GT3 */
-
-#define INTEL_KBL_GT4_IDS(info) \
- INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */
-
-/* AML/KBL Y GT2 */
-#define INTEL_AML_KBL_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x591C, info), /* ULX GT2 */ \
- INTEL_VGA_DEVICE(0x87C0, info) /* ULX GT2 */
-
-/* AML/CFL Y GT2 */
-#define INTEL_AML_CFL_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x87CA, info)
-
-/* CML GT1 */
-#define INTEL_CML_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x9BA2, info), \
- INTEL_VGA_DEVICE(0x9BA4, info), \
- INTEL_VGA_DEVICE(0x9BA5, info), \
- INTEL_VGA_DEVICE(0x9BA8, info)
-
-#define INTEL_CML_U_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x9B21, info), \
- INTEL_VGA_DEVICE(0x9BAA, info), \
- INTEL_VGA_DEVICE(0x9BAC, info)
-
-/* CML GT2 */
-#define INTEL_CML_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x9BC2, info), \
- INTEL_VGA_DEVICE(0x9BC4, info), \
- INTEL_VGA_DEVICE(0x9BC5, info), \
- INTEL_VGA_DEVICE(0x9BC6, info), \
- INTEL_VGA_DEVICE(0x9BC8, info), \
- INTEL_VGA_DEVICE(0x9BE6, info), \
- INTEL_VGA_DEVICE(0x9BF6, info)
-
-#define INTEL_CML_U_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x9B41, info), \
- INTEL_VGA_DEVICE(0x9BCA, info), \
- INTEL_VGA_DEVICE(0x9BCC, info)
-
-#define INTEL_KBL_IDS(info) \
- INTEL_KBL_GT1_IDS(info), \
- INTEL_KBL_GT2_IDS(info), \
- INTEL_KBL_GT3_IDS(info), \
- INTEL_KBL_GT4_IDS(info), \
- INTEL_AML_KBL_GT2_IDS(info)
-
-/* CFL S */
-#define INTEL_CFL_S_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x3E90, info), /* SRV GT1 */ \
- INTEL_VGA_DEVICE(0x3E93, info), /* SRV GT1 */ \
- INTEL_VGA_DEVICE(0x3E99, info) /* SRV GT1 */
-
-#define INTEL_CFL_S_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \
- INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \
- INTEL_VGA_DEVICE(0x3E96, info), /* SRV GT2 */ \
- INTEL_VGA_DEVICE(0x3E98, info), /* SRV GT2 */ \
- INTEL_VGA_DEVICE(0x3E9A, info) /* SRV GT2 */
-
-/* CFL H */
-#define INTEL_CFL_H_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x3E9C, info)
-
-#define INTEL_CFL_H_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x3E94, info), /* Halo GT2 */ \
- INTEL_VGA_DEVICE(0x3E9B, info) /* Halo GT2 */
-
-/* CFL U GT2 */
-#define INTEL_CFL_U_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x3EA9, info)
-
-/* CFL U GT3 */
-#define INTEL_CFL_U_GT3_IDS(info) \
- INTEL_VGA_DEVICE(0x3EA5, info), /* ULT GT3 */ \
- INTEL_VGA_DEVICE(0x3EA6, info), /* ULT GT3 */ \
- INTEL_VGA_DEVICE(0x3EA7, info), /* ULT GT3 */ \
- INTEL_VGA_DEVICE(0x3EA8, info) /* ULT GT3 */
-
-/* WHL/CFL U GT1 */
-#define INTEL_WHL_U_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x3EA1, info), \
- INTEL_VGA_DEVICE(0x3EA4, info)
-
-/* WHL/CFL U GT2 */
-#define INTEL_WHL_U_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x3EA0, info), \
- INTEL_VGA_DEVICE(0x3EA3, info)
-
-/* WHL/CFL U GT3 */
-#define INTEL_WHL_U_GT3_IDS(info) \
- INTEL_VGA_DEVICE(0x3EA2, info)
-
-#define INTEL_CFL_IDS(info) \
- INTEL_CFL_S_GT1_IDS(info), \
- INTEL_CFL_S_GT2_IDS(info), \
- INTEL_CFL_H_GT1_IDS(info), \
- INTEL_CFL_H_GT2_IDS(info), \
- INTEL_CFL_U_GT2_IDS(info), \
- INTEL_CFL_U_GT3_IDS(info), \
- INTEL_WHL_U_GT1_IDS(info), \
- INTEL_WHL_U_GT2_IDS(info), \
- INTEL_WHL_U_GT3_IDS(info), \
- INTEL_AML_CFL_GT2_IDS(info), \
- INTEL_CML_GT1_IDS(info), \
- INTEL_CML_GT2_IDS(info), \
- INTEL_CML_U_GT1_IDS(info), \
- INTEL_CML_U_GT2_IDS(info)
-
-/* CNL */
-#define INTEL_CNL_PORT_F_IDS(info) \
- INTEL_VGA_DEVICE(0x5A44, info), \
- INTEL_VGA_DEVICE(0x5A4C, info), \
- INTEL_VGA_DEVICE(0x5A54, info), \
- INTEL_VGA_DEVICE(0x5A5C, info)
-
-#define INTEL_CNL_IDS(info) \
- INTEL_CNL_PORT_F_IDS(info), \
- INTEL_VGA_DEVICE(0x5A40, info), \
- INTEL_VGA_DEVICE(0x5A41, info), \
- INTEL_VGA_DEVICE(0x5A42, info), \
- INTEL_VGA_DEVICE(0x5A49, info), \
- INTEL_VGA_DEVICE(0x5A4A, info), \
- INTEL_VGA_DEVICE(0x5A50, info), \
- INTEL_VGA_DEVICE(0x5A51, info), \
- INTEL_VGA_DEVICE(0x5A52, info), \
- INTEL_VGA_DEVICE(0x5A59, info), \
- INTEL_VGA_DEVICE(0x5A5A, info)
-
-/* ICL */
-#define INTEL_ICL_PORT_F_IDS(info) \
- INTEL_VGA_DEVICE(0x8A50, info), \
- INTEL_VGA_DEVICE(0x8A52, info), \
- INTEL_VGA_DEVICE(0x8A53, info), \
- INTEL_VGA_DEVICE(0x8A54, info), \
- INTEL_VGA_DEVICE(0x8A56, info), \
- INTEL_VGA_DEVICE(0x8A57, info), \
- INTEL_VGA_DEVICE(0x8A58, info), \
- INTEL_VGA_DEVICE(0x8A59, info), \
- INTEL_VGA_DEVICE(0x8A5A, info), \
- INTEL_VGA_DEVICE(0x8A5B, info), \
- INTEL_VGA_DEVICE(0x8A5C, info), \
- INTEL_VGA_DEVICE(0x8A70, info), \
- INTEL_VGA_DEVICE(0x8A71, info)
-
-#define INTEL_ICL_11_IDS(info) \
- INTEL_ICL_PORT_F_IDS(info), \
- INTEL_VGA_DEVICE(0x8A51, info), \
- INTEL_VGA_DEVICE(0x8A5D, info)
-
-/* EHL */
-#define INTEL_EHL_IDS(info) \
- INTEL_VGA_DEVICE(0x4541, info), \
- INTEL_VGA_DEVICE(0x4551, info), \
- INTEL_VGA_DEVICE(0x4555, info), \
- INTEL_VGA_DEVICE(0x4557, info), \
- INTEL_VGA_DEVICE(0x4571, info)
-
-/* JSL */
-#define INTEL_JSL_IDS(info) \
- INTEL_VGA_DEVICE(0x4E51, info), \
- INTEL_VGA_DEVICE(0x4E55, info), \
- INTEL_VGA_DEVICE(0x4E57, info), \
- INTEL_VGA_DEVICE(0x4E61, info), \
- INTEL_VGA_DEVICE(0x4E71, info)
-
-/* TGL */
-#define INTEL_TGL_12_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x9A60, info), \
- INTEL_VGA_DEVICE(0x9A68, info), \
- INTEL_VGA_DEVICE(0x9A70, info)
-
-#define INTEL_TGL_12_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x9A40, info), \
- INTEL_VGA_DEVICE(0x9A49, info), \
- INTEL_VGA_DEVICE(0x9A59, info), \
- INTEL_VGA_DEVICE(0x9A78, info), \
- INTEL_VGA_DEVICE(0x9AC0, info), \
- INTEL_VGA_DEVICE(0x9AC9, info), \
- INTEL_VGA_DEVICE(0x9AD9, info), \
- INTEL_VGA_DEVICE(0x9AF8, info)
-
-#define INTEL_TGL_12_IDS(info) \
- INTEL_TGL_12_GT1_IDS(info), \
- INTEL_TGL_12_GT2_IDS(info)
-
-/* RKL */
-#define INTEL_RKL_IDS(info) \
- INTEL_VGA_DEVICE(0x4C80, info), \
- INTEL_VGA_DEVICE(0x4C8A, info), \
- INTEL_VGA_DEVICE(0x4C8B, info), \
- INTEL_VGA_DEVICE(0x4C8C, info), \
- INTEL_VGA_DEVICE(0x4C90, info), \
- INTEL_VGA_DEVICE(0x4C9A, info)
-
-/* DG1 */
-#define INTEL_DG1_IDS(info) \
- INTEL_VGA_DEVICE(0x4905, info), \
- INTEL_VGA_DEVICE(0x4906, info), \
- INTEL_VGA_DEVICE(0x4907, info), \
- INTEL_VGA_DEVICE(0x4908, info)
-
-/* ADL-S */
-#define INTEL_ADLS_IDS(info) \
- INTEL_VGA_DEVICE(0x4680, info), \
- INTEL_VGA_DEVICE(0x4681, info), \
- INTEL_VGA_DEVICE(0x4682, info), \
- INTEL_VGA_DEVICE(0x4683, info), \
- INTEL_VGA_DEVICE(0x4690, info), \
- INTEL_VGA_DEVICE(0x4691, info), \
- INTEL_VGA_DEVICE(0x4692, info), \
- INTEL_VGA_DEVICE(0x4693, info)
-
-#endif /* _I915_PCIIDS_H */
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
deleted file mode 100644
index abfefaaf897a..000000000000
--- a/include/drm/intel-gtt.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Common header for intel-gtt.ko and i915.ko */
-
-#ifndef _DRM_INTEL_GTT_H
-#define _DRM_INTEL_GTT_H
-
-#include <linux/agp_backend.h>
-#include <linux/intel-iommu.h>
-#include <linux/kernel.h>
-
-void intel_gtt_get(u64 *gtt_total,
- phys_addr_t *mappable_base,
- resource_size_t *mappable_end);
-
-int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
- struct agp_bridge_data *bridge);
-void intel_gmch_remove(void);
-
-bool intel_enable_gtt(void);
-
-void intel_gtt_chipset_flush(void);
-void intel_gtt_insert_page(dma_addr_t addr,
- unsigned int pg,
- unsigned int flags);
-void intel_gtt_insert_sg_entries(struct sg_table *st,
- unsigned int pg_start,
- unsigned int flags);
-void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
-
-/* Special gtt memory types */
-#define AGP_DCACHE_MEMORY 1
-#define AGP_PHYS_MEMORY 2
-
-/* flag for GFDT type */
-#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
-
-#endif
diff --git a/include/drm/intel/display_member.h b/include/drm/intel/display_member.h
new file mode 100644
index 000000000000..0319ea560b60
--- /dev/null
+++ b/include/drm/intel/display_member.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __DRM_INTEL_DISPLAY_H__
+#define __DRM_INTEL_DISPLAY_H__
+
+#include <linux/build_bug.h>
+#include <linux/stddef.h>
+#include <linux/stringify.h>
+
+#include <drm/drm_device.h>
+
+struct intel_display;
+
+/*
+ * A dummy device struct to define the relative offsets of drm and display
+ * members. With the members identically placed in struct drm_i915_private and
+ * struct xe_device, this allows figuring out the struct intel_display pointer
+ * without the definition of either driver specific structure.
+ */
+struct __intel_generic_device {
+ struct drm_device drm;
+ struct intel_display *display;
+};
+
+/**
+ * INTEL_DISPLAY_MEMBER_STATIC_ASSERT() - ensure correct placing of drm and display members
+ * @type: The struct to check
+ * @drm_member: Name of the struct drm_device member
+ * @display_member: Name of the struct intel_display * member.
+ *
+ * Use this static assert macro to ensure the struct drm_i915_private and struct
+ * xe_device struct drm_device and struct intel_display * members are at the
+ * same relative offsets.
+ */
+#define INTEL_DISPLAY_MEMBER_STATIC_ASSERT(type, drm_member, display_member) \
+ static_assert( \
+ offsetof(struct __intel_generic_device, display) - offsetof(struct __intel_generic_device, drm) == \
+ offsetof(type, display_member) - offsetof(type, drm_member), \
+ __stringify(type) " " __stringify(drm_member) " and " __stringify(display_member) " members at invalid offsets")
+
+#endif
diff --git a/include/drm/intel/display_parent_interface.h b/include/drm/intel/display_parent_interface.h
new file mode 100644
index 000000000000..26bedc360044
--- /dev/null
+++ b/include/drm/intel/display_parent_interface.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation x*/
+
+#ifndef __DISPLAY_PARENT_INTERFACE_H__
+#define __DISPLAY_PARENT_INTERFACE_H__
+
+#include <linux/types.h>
+
+struct drm_device;
+struct ref_tracker;
+
+struct intel_display_rpm_interface {
+ struct ref_tracker *(*get)(const struct drm_device *drm);
+ struct ref_tracker *(*get_raw)(const struct drm_device *drm);
+ struct ref_tracker *(*get_if_in_use)(const struct drm_device *drm);
+ struct ref_tracker *(*get_noresume)(const struct drm_device *drm);
+
+ void (*put)(const struct drm_device *drm, struct ref_tracker *wakeref);
+ void (*put_raw)(const struct drm_device *drm, struct ref_tracker *wakeref);
+ void (*put_unchecked)(const struct drm_device *drm);
+
+ bool (*suspended)(const struct drm_device *drm);
+ void (*assert_held)(const struct drm_device *drm);
+ void (*assert_block)(const struct drm_device *drm);
+ void (*assert_unblock)(const struct drm_device *drm);
+};
+
+/**
+ * struct intel_display_parent_interface - services parent driver provides to display
+ *
+ * The parent, or core, driver provides a pointer to this structure to display
+ * driver when calling intel_display_device_probe(). The display driver uses it
+ * to access services provided by the parent driver. The structure may contain
+ * sub-struct pointers to group function pointers by functionality.
+ *
+ * All function and sub-struct pointers must be initialized and callable unless
+ * explicitly marked as "optional" below. The display driver will only NULL
+ * check the optional pointers.
+ */
+struct intel_display_parent_interface {
+ /** @rpm: Runtime PM functions */
+ const struct intel_display_rpm_interface *rpm;
+};
+
+#endif
diff --git a/include/drm/i915_component.h b/include/drm/intel/i915_component.h
index 55c3b123581b..8082db222e00 100644
--- a/include/drm/i915_component.h
+++ b/include/drm/intel/i915_component.h
@@ -24,11 +24,14 @@
#ifndef _I915_COMPONENT_H_
#define _I915_COMPONENT_H_
-#include "drm_audio_component.h"
+#include <drm/drm_audio_component.h>
enum i915_component_type {
I915_COMPONENT_AUDIO = 1,
I915_COMPONENT_HDCP,
+ I915_COMPONENT_PXP,
+ I915_COMPONENT_GSC_PROXY,
+ INTEL_COMPONENT_LB,
};
/* MAX_PORT is the number of port
diff --git a/include/drm/i915_drm.h b/include/drm/intel/i915_drm.h
index 6722005884db..adff68538484 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/intel/i915_drm.h
@@ -26,8 +26,7 @@
#ifndef _I915_DRM_H_
#define _I915_DRM_H_
-#include <drm/i915_pciids.h>
-#include <uapi/drm/i915_drm.h>
+#include <linux/types.h>
/* For use by IPS driver */
unsigned long i915_read_mch_val(void);
@@ -43,7 +42,7 @@ extern struct resource intel_graphics_stolen_res;
* The Bridge device's PCI config space has information about the
* fb aperture size and the amount of pre-reserved memory.
* This is all handled in the intel-gtt.ko module. i915.ko only
- * cares about the vga bit for the vga rbiter.
+ * cares about the vga bit for the vga arbiter.
*/
#define INTEL_GMCH_CTRL 0x52
#define INTEL_GMCH_VGA_DISABLE (1 << 1)
diff --git a/include/drm/intel/i915_gsc_proxy_mei_interface.h b/include/drm/intel/i915_gsc_proxy_mei_interface.h
new file mode 100644
index 000000000000..850dfbf40607
--- /dev/null
+++ b/include/drm/intel/i915_gsc_proxy_mei_interface.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (c) 2022-2023 Intel Corporation
+ */
+
+#ifndef _I915_GSC_PROXY_MEI_INTERFACE_H_
+#define _I915_GSC_PROXY_MEI_INTERFACE_H_
+
+#include <linux/types.h>
+
+struct device;
+struct module;
+
+/**
+ * struct i915_gsc_proxy_component_ops - ops for GSC Proxy services.
+ * @owner: Module providing the ops
+ * @send: sends a proxy message from GSC FW to ME FW
+ * @recv: receives a proxy message for GSC FW from ME FW
+ */
+struct i915_gsc_proxy_component_ops {
+ struct module *owner;
+
+ /**
+ * @send: Sends a proxy message to ME FW.
+ * @dev: device struct corresponding to the mei device
+ * @buf: message buffer to send
+ * @size: size of the message
+ * Return: bytes sent on success, negative errno value on failure
+ */
+ int (*send)(struct device *dev, const void *buf, size_t size);
+
+ /**
+ * @recv: Receives a proxy message from ME FW.
+ * @dev: device struct corresponding to the mei device
+ * @buf: message buffer to contain the received message
+ * @size: size of the buffer
+ * Return: bytes received on success, negative errno value on failure
+ */
+ int (*recv)(struct device *dev, void *buf, size_t size);
+};
+
+/**
+ * struct i915_gsc_proxy_component - Used for communication between i915 and
+ * MEI drivers for GSC proxy services
+ * @mei_dev: device that provide the GSC proxy service.
+ * @ops: Ops implemented by GSC proxy driver, used by i915 driver.
+ */
+struct i915_gsc_proxy_component {
+ struct device *mei_dev;
+ const struct i915_gsc_proxy_component_ops *ops;
+};
+
+#endif /* _I915_GSC_PROXY_MEI_INTERFACE_H_ */
diff --git a/include/drm/intel/i915_hdcp_interface.h b/include/drm/intel/i915_hdcp_interface.h
new file mode 100644
index 000000000000..d776ed7dcd00
--- /dev/null
+++ b/include/drm/intel/i915_hdcp_interface.h
@@ -0,0 +1,547 @@
+/* SPDX-License-Identifier: (GPL-2.0+) */
+/*
+ * Copyright © 2017-2019 Intel Corporation
+ *
+ * Authors:
+ * Ramalingam C <ramalingam.c@intel.com>
+ */
+
+#ifndef _I915_HDCP_INTERFACE_H_
+#define _I915_HDCP_INTERFACE_H_
+
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <drm/display/drm_hdcp.h>
+
+/**
+ * enum hdcp_port_type - HDCP port implementation type defined by ME/GSC FW
+ * @HDCP_PORT_TYPE_INVALID: Invalid hdcp port type
+ * @HDCP_PORT_TYPE_INTEGRATED: In-Host HDCP2.x port
+ * @HDCP_PORT_TYPE_LSPCON: HDCP2.2 discrete wired Tx port with LSPCON
+ * (HDMI 2.0) solution
+ * @HDCP_PORT_TYPE_CPDP: HDCP2.2 discrete wired Tx port using the CPDP (DP 1.3)
+ * solution
+ */
+enum hdcp_port_type {
+ HDCP_PORT_TYPE_INVALID,
+ HDCP_PORT_TYPE_INTEGRATED,
+ HDCP_PORT_TYPE_LSPCON,
+ HDCP_PORT_TYPE_CPDP
+};
+
+/**
+ * enum hdcp_wired_protocol - HDCP adaptation used on the port
+ * @HDCP_PROTOCOL_INVALID: Invalid HDCP adaptation protocol
+ * @HDCP_PROTOCOL_HDMI: HDMI adaptation of HDCP used on the port
+ * @HDCP_PROTOCOL_DP: DP adaptation of HDCP used on the port
+ */
+enum hdcp_wired_protocol {
+ HDCP_PROTOCOL_INVALID,
+ HDCP_PROTOCOL_HDMI,
+ HDCP_PROTOCOL_DP
+};
+
+enum hdcp_ddi {
+ HDCP_DDI_INVALID_PORT = 0x0,
+
+ HDCP_DDI_B = 1,
+ HDCP_DDI_C,
+ HDCP_DDI_D,
+ HDCP_DDI_E,
+ HDCP_DDI_F,
+ HDCP_DDI_A = 7,
+ HDCP_DDI_RANGE_END = HDCP_DDI_A,
+};
+
+/**
+ * enum hdcp_transcoder - ME/GSC Firmware defined index for transcoders
+ * @HDCP_INVALID_TRANSCODER: Index for Invalid transcoder
+ * @HDCP_TRANSCODER_EDP: Index for EDP Transcoder
+ * @HDCP_TRANSCODER_DSI0: Index for DSI0 Transcoder
+ * @HDCP_TRANSCODER_DSI1: Index for DSI1 Transcoder
+ * @HDCP_TRANSCODER_A: Index for Transcoder A
+ * @HDCP_TRANSCODER_B: Index for Transcoder B
+ * @HDCP_TRANSCODER_C: Index for Transcoder C
+ * @HDCP_TRANSCODER_D: Index for Transcoder D
+ */
+enum hdcp_transcoder {
+ HDCP_INVALID_TRANSCODER = 0x00,
+ HDCP_TRANSCODER_EDP,
+ HDCP_TRANSCODER_DSI0,
+ HDCP_TRANSCODER_DSI1,
+ HDCP_TRANSCODER_A = 0x10,
+ HDCP_TRANSCODER_B,
+ HDCP_TRANSCODER_C,
+ HDCP_TRANSCODER_D
+};
+
+/**
+ * struct hdcp_port_data - intel specific HDCP port data
+ * @hdcp_ddi: ddi index as per ME/GSC FW
+ * @hdcp_transcoder: transcoder index as per ME/GSC FW
+ * @port_type: HDCP port type as per ME/GSC FW classification
+ * @protocol: HDCP adaptation as per ME/GSC FW
+ * @k: No of streams transmitted on a port. Only on DP MST this is != 1
+ * @seq_num_m: Count of RepeaterAuth_Stream_Manage msg propagated.
+ * Initialized to 0 on AKE_INIT. Incremented after every successful
+ * transmission of RepeaterAuth_Stream_Manage message. When it rolls
+ * over re-Auth has to be triggered.
+ * @streams: struct hdcp2_streamid_type[k]. Defines the type and id for the
+ * streams
+ */
+struct hdcp_port_data {
+ enum hdcp_ddi hdcp_ddi;
+ enum hdcp_transcoder hdcp_transcoder;
+ u8 port_type;
+ u8 protocol;
+ u16 k;
+ u32 seq_num_m;
+ struct hdcp2_streamid_type *streams;
+};
+
+/**
+ * struct i915_hdcp_ops- ops for HDCP2.2 services.
+ * @owner: Module providing the ops
+ * @initiate_hdcp2_session: Initiate a Wired HDCP2.2 Tx Session.
+ * And Prepare AKE_Init.
+ * @verify_receiver_cert_prepare_km: Verify the Receiver Certificate
+ * AKE_Send_Cert and prepare
+ * AKE_Stored_Km/AKE_No_Stored_Km
+ * @verify_hprime: Verify AKE_Send_H_prime
+ * @store_pairing_info: Store pairing info received
+ * @initiate_locality_check: Prepare LC_Init
+ * @verify_lprime: Verify lprime
+ * @get_session_key: Prepare SKE_Send_Eks
+ * @repeater_check_flow_prepare_ack: Validate the Downstream topology
+ * and prepare rep_ack
+ * @verify_mprime: Verify mprime
+ * @enable_hdcp_authentication: Mark a port as authenticated.
+ * @close_hdcp_session: Close the Wired HDCP Tx session per port.
+ * This also disables the authenticated state of the port.
+ */
+struct i915_hdcp_ops {
+ /**
+ * @owner: hdcp module
+ */
+ struct module *owner;
+
+ int (*initiate_hdcp2_session)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_ake_init *ake_data);
+ int (*verify_receiver_cert_prepare_km)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_ake_send_cert
+ *rx_cert,
+ bool *km_stored,
+ struct hdcp2_ake_no_stored_km
+ *ek_pub_km,
+ size_t *msg_sz);
+ int (*verify_hprime)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_ake_send_hprime *rx_hprime);
+ int (*store_pairing_info)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_ake_send_pairing_info
+ *pairing_info);
+ int (*initiate_locality_check)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_lc_init *lc_init_data);
+ int (*verify_lprime)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_lc_send_lprime *rx_lprime);
+ int (*get_session_key)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_ske_send_eks *ske_data);
+ int (*repeater_check_flow_prepare_ack)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_rep_send_receiverid_list
+ *rep_topology,
+ struct hdcp2_rep_send_ack
+ *rep_send_ack);
+ int (*verify_mprime)(struct device *dev,
+ struct hdcp_port_data *data,
+ struct hdcp2_rep_stream_ready *stream_ready);
+ int (*enable_hdcp_authentication)(struct device *dev,
+ struct hdcp_port_data *data);
+ int (*close_hdcp_session)(struct device *dev,
+ struct hdcp_port_data *data);
+};
+
+/**
+ * struct i915_hdcp_arbiter - Used for communication between i915
+ * and hdcp drivers for the HDCP2.2 services
+ */
+struct i915_hdcp_arbiter {
+ /**
+ * @hdcp_dev: device that provides the HDCP2.2 service from MEI Bus.
+ */
+ struct device *hdcp_dev;
+
+ /**
+ * @ops: Ops implemented by hdcp driver or intel_hdcp_gsc, used by i915
+ * driver.
+ */
+ const struct i915_hdcp_ops *ops;
+
+ /**
+ * @mutex: To protect the above members.
+ */
+ struct mutex mutex;
+};
+
+/* fw_hdcp_status: Enumeration of all HDCP Status Codes */
+enum fw_hdcp_status {
+ FW_HDCP_STATUS_SUCCESS = 0x0000,
+
+ /* WiDi Generic Status Codes */
+ FW_HDCP_STATUS_INTERNAL_ERROR = 0x1000,
+ FW_HDCP_STATUS_UNKNOWN_ERROR = 0x1001,
+ FW_HDCP_STATUS_INCORRECT_API_VERSION = 0x1002,
+ FW_HDCP_STATUS_INVALID_FUNCTION = 0x1003,
+ FW_HDCP_STATUS_INVALID_BUFFER_LENGTH = 0x1004,
+ FW_HDCP_STATUS_INVALID_PARAMS = 0x1005,
+ FW_HDCP_STATUS_AUTHENTICATION_FAILED = 0x1006,
+
+ /* WiDi Status Codes */
+ FW_HDCP_INVALID_SESSION_STATE = 0x6000,
+ FW_HDCP_SRM_FRAGMENT_UNEXPECTED = 0x6001,
+ FW_HDCP_SRM_INVALID_LENGTH = 0x6002,
+ FW_HDCP_SRM_FRAGMENT_OFFSET_INVALID = 0x6003,
+ FW_HDCP_SRM_VERIFICATION_FAILED = 0x6004,
+ FW_HDCP_SRM_VERSION_TOO_OLD = 0x6005,
+ FW_HDCP_RX_CERT_VERIFICATION_FAILED = 0x6006,
+ FW_HDCP_RX_REVOKED = 0x6007,
+ FW_HDCP_H_VERIFICATION_FAILED = 0x6008,
+ FW_HDCP_REPEATER_CHECK_UNEXPECTED = 0x6009,
+ FW_HDCP_TOPOLOGY_MAX_EXCEEDED = 0x600A,
+ FW_HDCP_V_VERIFICATION_FAILED = 0x600B,
+ FW_HDCP_L_VERIFICATION_FAILED = 0x600C,
+ FW_HDCP_STREAM_KEY_ALLOC_FAILED = 0x600D,
+ FW_HDCP_BASE_KEY_RESET_FAILED = 0x600E,
+ FW_HDCP_NONCE_GENERATION_FAILED = 0x600F,
+ FW_HDCP_STATUS_INVALID_E_KEY_STATE = 0x6010,
+ FW_HDCP_STATUS_INVALID_CS_ICV = 0x6011,
+ FW_HDCP_STATUS_INVALID_KB_KEY_STATE = 0x6012,
+ FW_HDCP_STATUS_INVALID_PAVP_MODE_ICV = 0x6013,
+ FW_HDCP_STATUS_INVALID_PAVP_MODE = 0x6014,
+ FW_HDCP_STATUS_LC_MAX_ATTEMPTS = 0x6015,
+
+ /* New status for HDCP 2.1 */
+ FW_HDCP_STATUS_MISMATCH_IN_M = 0x6016,
+
+ /* New status code for HDCP 2.2 Rx */
+ FW_HDCP_STATUS_RX_PROV_NOT_ALLOWED = 0x6017,
+ FW_HDCP_STATUS_RX_PROV_WRONG_SUBJECT = 0x6018,
+ FW_HDCP_RX_NEEDS_PROVISIONING = 0x6019,
+ FW_HDCP_BKSV_ICV_AUTH_FAILED = 0x6020,
+ FW_HDCP_STATUS_INVALID_STREAM_ID = 0x6021,
+ FW_HDCP_STATUS_CHAIN_NOT_INITIALIZED = 0x6022,
+ FW_HDCP_FAIL_NOT_EXPECTED = 0x6023,
+ FW_HDCP_FAIL_HDCP_OFF = 0x6024,
+ FW_HDCP_FAIL_INVALID_PAVP_MEMORY_MODE = 0x6025,
+ FW_HDCP_FAIL_AES_ECB_FAILURE = 0x6026,
+ FW_HDCP_FEATURE_NOT_SUPPORTED = 0x6027,
+ FW_HDCP_DMA_READ_ERROR = 0x6028,
+ FW_HDCP_DMA_WRITE_ERROR = 0x6029,
+ FW_HDCP_FAIL_INVALID_PACKET_SIZE = 0x6030,
+ FW_HDCP_H264_PARSING_ERROR = 0x6031,
+ FW_HDCP_HDCP2_ERRATA_VIDEO_VIOLATION = 0x6032,
+ FW_HDCP_HDCP2_ERRATA_AUDIO_VIOLATION = 0x6033,
+ FW_HDCP_TX_ACTIVE_ERROR = 0x6034,
+ FW_HDCP_MODE_CHANGE_ERROR = 0x6035,
+ FW_HDCP_STREAM_TYPE_ERROR = 0x6036,
+ FW_HDCP_STREAM_MANAGE_NOT_POSSIBLE = 0x6037,
+
+ FW_HDCP_STATUS_PORT_INVALID_COMMAND = 0x6038,
+ FW_HDCP_STATUS_UNSUPPORTED_PROTOCOL = 0x6039,
+ FW_HDCP_STATUS_INVALID_PORT_INDEX = 0x603a,
+ FW_HDCP_STATUS_TX_AUTH_NEEDED = 0x603b,
+ FW_HDCP_STATUS_NOT_INTEGRATED_PORT = 0x603c,
+ FW_HDCP_STATUS_SESSION_MAX_REACHED = 0x603d,
+
+ /* hdcp capable bit is not set in rx_caps(error is unique to DP) */
+ FW_HDCP_STATUS_NOT_HDCP_CAPABLE = 0x6041,
+
+ FW_HDCP_STATUS_INVALID_STREAM_COUNT = 0x6042,
+};
+
+#define HDCP_API_VERSION 0x00010000
+
+#define HDCP_M_LEN 16
+#define HDCP_KH_LEN 16
+
+/* Payload Buffer size(Excluding Header) for CMDs and corresponding response */
+/* Wired_Tx_AKE */
+#define WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN (4 + 1)
+#define WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_OUT (4 + 8 + 3)
+
+#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN (4 + 522 + 8 + 3)
+#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_MIN_OUT (4 + 1 + 3 + 16 + 16)
+#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_MAX_OUT (4 + 1 + 3 + 128)
+
+#define WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN (4 + 32)
+#define WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_OUT (4)
+
+#define WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN (4 + 16)
+#define WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_OUT (4)
+
+#define WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN (4)
+#define WIRED_CMD_BUF_LEN_CLOSE_SESSION_OUT (4)
+
+/* Wired_Tx_LC */
+#define WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN (4)
+#define WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_OUT (4 + 8)
+
+#define WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN (4 + 32)
+#define WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_OUT (4)
+
+/* Wired_Tx_SKE */
+#define WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN (4)
+#define WIRED_CMD_BUF_LEN_GET_SESSION_KEY_OUT (4 + 16 + 8)
+
+/* Wired_Tx_SKE */
+#define WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN (4 + 1)
+#define WIRED_CMD_BUF_LEN_ENABLE_AUTH_OUT (4)
+
+/* Wired_Tx_Repeater */
+#define WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN (4 + 2 + 3 + 16 + 155)
+#define WIRED_CMD_BUF_LEN_VERIFY_REPEATER_OUT (4 + 1 + 16)
+
+#define WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_MIN_IN (4 + 3 + \
+ 32 + 2 + 2)
+
+#define WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_OUT (4)
+
+/* hdcp_command_id: Enumeration of all WIRED HDCP Command IDs */
+enum hdcp_command_id {
+ _WIDI_COMMAND_BASE = 0x00030000,
+ WIDI_INITIATE_HDCP2_SESSION = _WIDI_COMMAND_BASE,
+ HDCP_GET_SRM_STATUS,
+ HDCP_SEND_SRM_FRAGMENT,
+
+ /* The wired HDCP Tx commands */
+ _WIRED_COMMAND_BASE = 0x00031000,
+ WIRED_INITIATE_HDCP2_SESSION = _WIRED_COMMAND_BASE,
+ WIRED_VERIFY_RECEIVER_CERT,
+ WIRED_AKE_SEND_HPRIME,
+ WIRED_AKE_SEND_PAIRING_INFO,
+ WIRED_INIT_LOCALITY_CHECK,
+ WIRED_VALIDATE_LOCALITY,
+ WIRED_GET_SESSION_KEY,
+ WIRED_ENABLE_AUTH,
+ WIRED_VERIFY_REPEATER,
+ WIRED_REPEATER_AUTH_STREAM_REQ,
+ WIRED_CLOSE_SESSION,
+
+ _WIRED_COMMANDS_COUNT,
+};
+
+union encrypted_buff {
+ u8 e_kpub_km[HDCP_2_2_E_KPUB_KM_LEN];
+ u8 e_kh_km_m[HDCP_2_2_E_KH_KM_M_LEN];
+ struct {
+ u8 e_kh_km[HDCP_KH_LEN];
+ u8 m[HDCP_M_LEN];
+ } __packed;
+};
+
+/* HDCP HECI message header. All header values are little endian. */
+struct hdcp_cmd_header {
+ u32 api_version;
+ u32 command_id;
+ enum fw_hdcp_status status;
+ /* Length of the HECI message (excluding the header) */
+ u32 buffer_len;
+} __packed;
+
+/* Empty command request or response. No data follows the header. */
+struct hdcp_cmd_no_data {
+ struct hdcp_cmd_header header;
+} __packed;
+
+/* Uniquely identifies the hdcp port being addressed for a given command. */
+struct hdcp_port_id {
+ u8 integrated_port_type;
+ /* physical_port is used until Gen11.5. Must be zero for Gen11.5+ */
+ u8 physical_port;
+ /* attached_transcoder is for Gen11.5+. Set to zero for <Gen11.5 */
+ u8 attached_transcoder;
+ u8 reserved;
+} __packed;
+
+/*
+ * Data structures for integrated wired HDCP2 Tx in
+ * support of the AKE protocol
+ */
+/* HECI struct for integrated wired HDCP Tx session initiation. */
+struct wired_cmd_initiate_hdcp2_session_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 protocol; /* for HDMI vs DP */
+} __packed;
+
+struct wired_cmd_initiate_hdcp2_session_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 r_tx[HDCP_2_2_RTX_LEN];
+ struct hdcp2_tx_caps tx_caps;
+} __packed;
+
+/* HECI struct for ending an integrated wired HDCP Tx session. */
+struct wired_cmd_close_session_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+struct wired_cmd_close_session_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+/* HECI struct for integrated wired HDCP Tx Rx Cert verification. */
+struct wired_cmd_verify_receiver_cert_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ struct hdcp2_cert_rx cert_rx;
+ u8 r_rx[HDCP_2_2_RRX_LEN];
+ u8 rx_caps[HDCP_2_2_RXCAPS_LEN];
+} __packed;
+
+struct wired_cmd_verify_receiver_cert_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 km_stored;
+ u8 reserved[3];
+ union encrypted_buff ekm_buff;
+} __packed;
+
+/* HECI struct for verification of Rx's Hprime in a HDCP Tx session */
+struct wired_cmd_ake_send_hprime_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 h_prime[HDCP_2_2_H_PRIME_LEN];
+} __packed;
+
+struct wired_cmd_ake_send_hprime_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+/*
+ * HECI struct for sending in AKE pairing data generated by the Rx in an
+ * integrated wired HDCP Tx session.
+ */
+struct wired_cmd_ake_send_pairing_info_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 e_kh_km[HDCP_2_2_E_KH_KM_LEN];
+} __packed;
+
+struct wired_cmd_ake_send_pairing_info_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+/* Data structures for integrated wired HDCP2 Tx in support of the LC protocol*/
+/*
+ * HECI struct for initiating locality check with an
+ * integrated wired HDCP Tx session.
+ */
+struct wired_cmd_init_locality_check_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+struct wired_cmd_init_locality_check_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 r_n[HDCP_2_2_RN_LEN];
+} __packed;
+
+/*
+ * HECI struct for validating an Rx's LPrime value in an
+ * integrated wired HDCP Tx session.
+ */
+struct wired_cmd_validate_locality_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 l_prime[HDCP_2_2_L_PRIME_LEN];
+} __packed;
+
+struct wired_cmd_validate_locality_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+/*
+ * Data structures for integrated wired HDCP2 Tx in support of the
+ * SKE protocol
+ */
+/* HECI struct for creating session key */
+struct wired_cmd_get_session_key_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+struct wired_cmd_get_session_key_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 e_dkey_ks[HDCP_2_2_E_DKEY_KS_LEN];
+ u8 r_iv[HDCP_2_2_RIV_LEN];
+} __packed;
+
+/* HECI struct for the Tx enable authentication command */
+struct wired_cmd_enable_auth_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 stream_type;
+} __packed;
+
+struct wired_cmd_enable_auth_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+/*
+ * Data structures for integrated wired HDCP2 Tx in support of
+ * the repeater protocols
+ */
+/*
+ * HECI struct for verifying the downstream repeater's HDCP topology in an
+ * integrated wired HDCP Tx session.
+ */
+struct wired_cmd_verify_repeater_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 rx_info[HDCP_2_2_RXINFO_LEN];
+ u8 seq_num_v[HDCP_2_2_SEQ_NUM_LEN];
+ u8 v_prime[HDCP_2_2_V_PRIME_HALF_LEN];
+ u8 receiver_ids[HDCP_2_2_RECEIVER_IDS_MAX_LEN];
+} __packed;
+
+struct wired_cmd_verify_repeater_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 content_type_supported;
+ u8 v[HDCP_2_2_V_PRIME_HALF_LEN];
+} __packed;
+
+/*
+ * HECI struct in support of stream management in an
+ * integrated wired HDCP Tx session.
+ */
+struct wired_cmd_repeater_auth_stream_req_in {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+ u8 seq_num_m[HDCP_2_2_SEQ_NUM_LEN];
+ u8 m_prime[HDCP_2_2_MPRIME_LEN];
+ __be16 k;
+ struct hdcp2_streamid_type streams[];
+} __packed;
+
+struct wired_cmd_repeater_auth_stream_req_out {
+ struct hdcp_cmd_header header;
+ struct hdcp_port_id port;
+} __packed;
+
+#endif /* _I915_HDCP_INTERFACE_H_ */
diff --git a/include/drm/intel/i915_pxp_tee_interface.h b/include/drm/intel/i915_pxp_tee_interface.h
new file mode 100644
index 000000000000..a532d32f58f3
--- /dev/null
+++ b/include/drm/intel/i915_pxp_tee_interface.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef _I915_PXP_TEE_INTERFACE_H_
+#define _I915_PXP_TEE_INTERFACE_H_
+
+#include <linux/mutex.h>
+#include <linux/device.h>
+struct scatterlist;
+
+/**
+ * struct i915_pxp_component_ops - ops for PXP services.
+ */
+struct i915_pxp_component_ops {
+ /**
+ * @owner: Module providing the ops.
+ */
+ struct module *owner;
+
+ /**
+ * @send: Send a PXP message.
+ */
+ int (*send)(struct device *dev, const void *message, size_t size,
+ unsigned long timeout_ms);
+ /**
+ * @recv: Receive a PXP message.
+ */
+ int (*recv)(struct device *dev, void *buffer, size_t size,
+ unsigned long timeout_ms);
+ /**
+ * @gsc_command: Send a GSC command.
+ */
+ ssize_t (*gsc_command)(struct device *dev, u8 client_id, u32 fence_id,
+ struct scatterlist *sg_in, size_t total_in_len,
+ struct scatterlist *sg_out);
+
+};
+
+/**
+ * struct i915_pxp_component - Used for communication between i915 and TEE
+ * drivers for the PXP services
+ */
+struct i915_pxp_component {
+ /**
+ * @tee_dev: device that provide the PXP service from TEE Bus.
+ */
+ struct device *tee_dev;
+
+ /**
+ * @ops: Ops implemented by TEE driver, used by i915 driver.
+ */
+ const struct i915_pxp_component_ops *ops;
+
+ /**
+ * @mutex: To protect the above members.
+ */
+ struct mutex mutex;
+};
+
+#endif /* _I915_TEE_PXP_INTERFACE_H_ */
diff --git a/include/drm/intel/intel-gtt.h b/include/drm/intel/intel-gtt.h
new file mode 100644
index 000000000000..f53bcff01f22
--- /dev/null
+++ b/include/drm/intel/intel-gtt.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Common header for intel-gtt.ko and i915.ko */
+
+#ifndef _DRM_INTEL_GTT_H
+#define _DRM_INTEL_GTT_H
+
+#include <linux/types.h>
+
+struct agp_bridge_data;
+struct pci_dev;
+struct sg_table;
+
+void intel_gmch_gtt_get(u64 *gtt_total,
+ phys_addr_t *mappable_base,
+ resource_size_t *mappable_end);
+
+int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
+ struct agp_bridge_data *bridge);
+void intel_gmch_remove(void);
+
+bool intel_gmch_enable_gtt(void);
+
+void intel_gmch_gtt_flush(void);
+void intel_gmch_gtt_insert_page(dma_addr_t addr,
+ unsigned int pg,
+ unsigned int flags);
+void intel_gmch_gtt_insert_sg_entries(struct sg_table *st,
+ unsigned int pg_start,
+ unsigned int flags);
+void intel_gmch_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
+dma_addr_t intel_gmch_gtt_read_entry(unsigned int pg,
+ bool *is_present, bool *is_local);
+
+/* Special gtt memory types */
+#define AGP_DCACHE_MEMORY 1
+#define AGP_PHYS_MEMORY 2
+
+/* flag for GFDT type */
+#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
+
+#endif
diff --git a/include/drm/intel/intel_lb_mei_interface.h b/include/drm/intel/intel_lb_mei_interface.h
new file mode 100644
index 000000000000..d65be2cba2ab
--- /dev/null
+++ b/include/drm/intel/intel_lb_mei_interface.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (c) 2025 Intel Corporation
+ */
+
+#ifndef _INTEL_LB_MEI_INTERFACE_H_
+#define _INTEL_LB_MEI_INTERFACE_H_
+
+#include <linux/types.h>
+
+struct device;
+
+/**
+ * define INTEL_LB_FLAG_IS_PERSISTENT - Mark the payload as persistent
+ *
+ * This flag indicates that the late binding payload should be stored
+ * persistently in flash across warm resets.
+ */
+#define INTEL_LB_FLAG_IS_PERSISTENT BIT(0)
+
+/**
+ * enum intel_lb_type - enum to determine late binding payload type
+ * @INTEL_LB_TYPE_FAN_CONTROL: Fan controller configuration
+ */
+enum intel_lb_type {
+ INTEL_LB_TYPE_FAN_CONTROL = 1,
+};
+
+/**
+ * enum intel_lb_status - Status codes returned on late binding transmissions
+ * @INTEL_LB_STATUS_SUCCESS: Operation completed successfully
+ * @INTEL_LB_STATUS_4ID_MISMATCH: Mismatch in the expected 4ID (firmware identity/token)
+ * @INTEL_LB_STATUS_ARB_FAILURE: Arbitration failure (e.g. conflicting access or state)
+ * @INTEL_LB_STATUS_GENERAL_ERROR: General firmware error not covered by other codes
+ * @INTEL_LB_STATUS_INVALID_PARAMS: One or more input parameters are invalid
+ * @INTEL_LB_STATUS_INVALID_SIGNATURE: Payload has an invalid or untrusted signature
+ * @INTEL_LB_STATUS_INVALID_PAYLOAD: Payload contents are not accepted by firmware
+ * @INTEL_LB_STATUS_TIMEOUT: Operation timed out before completion
+ */
+enum intel_lb_status {
+ INTEL_LB_STATUS_SUCCESS = 0,
+ INTEL_LB_STATUS_4ID_MISMATCH = 1,
+ INTEL_LB_STATUS_ARB_FAILURE = 2,
+ INTEL_LB_STATUS_GENERAL_ERROR = 3,
+ INTEL_LB_STATUS_INVALID_PARAMS = 4,
+ INTEL_LB_STATUS_INVALID_SIGNATURE = 5,
+ INTEL_LB_STATUS_INVALID_PAYLOAD = 6,
+ INTEL_LB_STATUS_TIMEOUT = 7,
+};
+
+/**
+ * struct intel_lb_component_ops - Ops for late binding services
+ */
+struct intel_lb_component_ops {
+ /**
+ * push_payload - Sends a payload to the authentication firmware
+ * @dev: Device struct corresponding to the mei device
+ * @type: Payload type (see &enum intel_lb_type)
+ * @flags: Payload flags bitmap (e.g. %INTEL_LB_FLAGS_IS_PERSISTENT)
+ * @payload: Pointer to payload buffer
+ * @payload_size: Payload buffer size in bytes
+ *
+ * Return: 0 success, negative errno value on transport failure,
+ * positive status returned by firmware
+ */
+ int (*push_payload)(struct device *dev, u32 type, u32 flags,
+ const void *payload, size_t payload_size);
+};
+
+#endif /* _INTEL_LB_MEI_INTERFACE_H_ */
diff --git a/include/drm/intel_lpe_audio.h b/include/drm/intel/intel_lpe_audio.h
index b6121c8fe539..b6121c8fe539 100644
--- a/include/drm/intel_lpe_audio.h
+++ b/include/drm/intel/intel_lpe_audio.h
diff --git a/include/drm/intel/pciids.h b/include/drm/intel/pciids.h
new file mode 100644
index 000000000000..52520e684ab1
--- /dev/null
+++ b/include/drm/intel/pciids.h
@@ -0,0 +1,903 @@
+/*
+ * Copyright 2013 Intel Corporation
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __PCIIDS_H__
+#define __PCIIDS_H__
+
+#ifdef __KERNEL__
+#define INTEL_PCI_DEVICE(_id, _info) { \
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, (_id)), \
+ .driver_data = (kernel_ulong_t)(_info), \
+}
+
+#define INTEL_VGA_DEVICE(_id, _info) { \
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, (_id)), \
+ .class = PCI_BASE_CLASS_DISPLAY << 16, .class_mask = 0xff << 16, \
+ .driver_data = (kernel_ulong_t)(_info), \
+}
+
+#define INTEL_QUANTA_VGA_DEVICE(_info) { \
+ .vendor = PCI_VENDOR_ID_INTEL, .device = 0x16a, \
+ .subvendor = 0x152d, .subdevice = 0x8990, \
+ .class = PCI_BASE_CLASS_DISPLAY << 16, .class_mask = 0xff << 16, \
+ .driver_data = (kernel_ulong_t)(_info), \
+}
+#endif
+
+#define INTEL_I810_IDS(MACRO__, ...) \
+ MACRO__(0x7121, ## __VA_ARGS__), /* I810 */ \
+ MACRO__(0x7123, ## __VA_ARGS__), /* I810_DC100 */ \
+ MACRO__(0x7125, ## __VA_ARGS__) /* I810_E */
+
+#define INTEL_I815_IDS(MACRO__, ...) \
+ MACRO__(0x1132, ## __VA_ARGS__) /* I815*/
+
+#define INTEL_I830_IDS(MACRO__, ...) \
+ MACRO__(0x3577, ## __VA_ARGS__)
+
+#define INTEL_I845G_IDS(MACRO__, ...) \
+ MACRO__(0x2562, ## __VA_ARGS__)
+
+#define INTEL_I85X_IDS(MACRO__, ...) \
+ MACRO__(0x3582, ## __VA_ARGS__), /* I855_GM */ \
+ MACRO__(0x358e, ## __VA_ARGS__)
+
+#define INTEL_I865G_IDS(MACRO__, ...) \
+ MACRO__(0x2572, ## __VA_ARGS__) /* I865_G */
+
+#define INTEL_I915G_IDS(MACRO__, ...) \
+ MACRO__(0x2582, ## __VA_ARGS__), /* I915_G */ \
+ MACRO__(0x258a, ## __VA_ARGS__) /* E7221_G */
+
+#define INTEL_I915GM_IDS(MACRO__, ...) \
+ MACRO__(0x2592, ## __VA_ARGS__) /* I915_GM */
+
+#define INTEL_I945G_IDS(MACRO__, ...) \
+ MACRO__(0x2772, ## __VA_ARGS__) /* I945_G */
+
+#define INTEL_I945GM_IDS(MACRO__, ...) \
+ MACRO__(0x27a2, ## __VA_ARGS__), /* I945_GM */ \
+ MACRO__(0x27ae, ## __VA_ARGS__) /* I945_GME */
+
+#define INTEL_I965G_IDS(MACRO__, ...) \
+ MACRO__(0x2972, ## __VA_ARGS__), /* I946_GZ */ \
+ MACRO__(0x2982, ## __VA_ARGS__), /* G35_G */ \
+ MACRO__(0x2992, ## __VA_ARGS__), /* I965_Q */ \
+ MACRO__(0x29a2, ## __VA_ARGS__) /* I965_G */
+
+#define INTEL_G33_IDS(MACRO__, ...) \
+ MACRO__(0x29b2, ## __VA_ARGS__), /* Q35_G */ \
+ MACRO__(0x29c2, ## __VA_ARGS__), /* G33_G */ \
+ MACRO__(0x29d2, ## __VA_ARGS__) /* Q33_G */
+
+#define INTEL_I965GM_IDS(MACRO__, ...) \
+ MACRO__(0x2a02, ## __VA_ARGS__), /* I965_GM */ \
+ MACRO__(0x2a12, ## __VA_ARGS__) /* I965_GME */
+
+#define INTEL_GM45_IDS(MACRO__, ...) \
+ MACRO__(0x2a42, ## __VA_ARGS__) /* GM45_G */
+
+#define INTEL_G45_IDS(MACRO__, ...) \
+ MACRO__(0x2e02, ## __VA_ARGS__), /* IGD_E_G */ \
+ MACRO__(0x2e12, ## __VA_ARGS__), /* Q45_G */ \
+ MACRO__(0x2e22, ## __VA_ARGS__), /* G45_G */ \
+ MACRO__(0x2e32, ## __VA_ARGS__), /* G41_G */ \
+ MACRO__(0x2e42, ## __VA_ARGS__), /* B43_G */ \
+ MACRO__(0x2e92, ## __VA_ARGS__) /* B43_G.1 */
+
+#define INTEL_PNV_G_IDS(MACRO__, ...) \
+ MACRO__(0xa001, ## __VA_ARGS__)
+
+#define INTEL_PNV_M_IDS(MACRO__, ...) \
+ MACRO__(0xa011, ## __VA_ARGS__)
+
+#define INTEL_PNV_IDS(MACRO__, ...) \
+ INTEL_PNV_G_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_PNV_M_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_ILK_D_IDS(MACRO__, ...) \
+ MACRO__(0x0042, ## __VA_ARGS__)
+
+#define INTEL_ILK_M_IDS(MACRO__, ...) \
+ MACRO__(0x0046, ## __VA_ARGS__)
+
+#define INTEL_ILK_IDS(MACRO__, ...) \
+ INTEL_ILK_D_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_ILK_M_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_SNB_D_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x0102, ## __VA_ARGS__), \
+ MACRO__(0x010A, ## __VA_ARGS__)
+
+#define INTEL_SNB_D_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x0112, ## __VA_ARGS__), \
+ MACRO__(0x0122, ## __VA_ARGS__)
+
+#define INTEL_SNB_D_IDS(MACRO__, ...) \
+ INTEL_SNB_D_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_SNB_D_GT2_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_SNB_M_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x0106, ## __VA_ARGS__)
+
+#define INTEL_SNB_M_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x0116, ## __VA_ARGS__), \
+ MACRO__(0x0126, ## __VA_ARGS__)
+
+#define INTEL_SNB_M_IDS(MACRO__, ...) \
+ INTEL_SNB_M_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_SNB_M_GT2_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_SNB_IDS(MACRO__, ...) \
+ INTEL_SNB_D_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_SNB_M_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_IVB_M_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x0156, ## __VA_ARGS__) /* GT1 mobile */
+
+#define INTEL_IVB_M_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x0166, ## __VA_ARGS__) /* GT2 mobile */
+
+#define INTEL_IVB_M_IDS(MACRO__, ...) \
+ INTEL_IVB_M_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_IVB_M_GT2_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_IVB_D_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x0152, ## __VA_ARGS__), /* GT1 desktop */ \
+ MACRO__(0x015a, ## __VA_ARGS__) /* GT1 server */
+
+#define INTEL_IVB_D_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x0162, ## __VA_ARGS__), /* GT2 desktop */ \
+ MACRO__(0x016a, ## __VA_ARGS__) /* GT2 server */
+
+#define INTEL_IVB_D_IDS(MACRO__, ...) \
+ INTEL_IVB_D_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_IVB_D_GT2_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_IVB_IDS(MACRO__, ...) \
+ INTEL_IVB_M_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_IVB_D_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_IVB_Q_IDS(MACRO__, ...) \
+ INTEL_QUANTA_VGA_DEVICE(__VA_ARGS__) /* Quanta transcode */
+
+#define INTEL_HSW_ULT_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x0A02, ## __VA_ARGS__), /* ULT GT1 desktop */ \
+ MACRO__(0x0A06, ## __VA_ARGS__), /* ULT GT1 mobile */ \
+ MACRO__(0x0A0A, ## __VA_ARGS__), /* ULT GT1 server */ \
+ MACRO__(0x0A0B, ## __VA_ARGS__) /* ULT GT1 reserved */
+
+#define INTEL_HSW_ULX_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x0A0E, ## __VA_ARGS__) /* ULX GT1 mobile */
+
+#define INTEL_HSW_GT1_IDS(MACRO__, ...) \
+ INTEL_HSW_ULT_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_HSW_ULX_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x0402, ## __VA_ARGS__), /* GT1 desktop */ \
+ MACRO__(0x0406, ## __VA_ARGS__), /* GT1 mobile */ \
+ MACRO__(0x040A, ## __VA_ARGS__), /* GT1 server */ \
+ MACRO__(0x040B, ## __VA_ARGS__), /* GT1 reserved */ \
+ MACRO__(0x040E, ## __VA_ARGS__), /* GT1 reserved */ \
+ MACRO__(0x0C02, ## __VA_ARGS__), /* SDV GT1 desktop */ \
+ MACRO__(0x0C06, ## __VA_ARGS__), /* SDV GT1 mobile */ \
+ MACRO__(0x0C0A, ## __VA_ARGS__), /* SDV GT1 server */ \
+ MACRO__(0x0C0B, ## __VA_ARGS__), /* SDV GT1 reserved */ \
+ MACRO__(0x0C0E, ## __VA_ARGS__), /* SDV GT1 reserved */ \
+ MACRO__(0x0D02, ## __VA_ARGS__), /* CRW GT1 desktop */ \
+ MACRO__(0x0D06, ## __VA_ARGS__), /* CRW GT1 mobile */ \
+ MACRO__(0x0D0A, ## __VA_ARGS__), /* CRW GT1 server */ \
+ MACRO__(0x0D0B, ## __VA_ARGS__), /* CRW GT1 reserved */ \
+ MACRO__(0x0D0E, ## __VA_ARGS__) /* CRW GT1 reserved */
+
+#define INTEL_HSW_ULT_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x0A12, ## __VA_ARGS__), /* ULT GT2 desktop */ \
+ MACRO__(0x0A16, ## __VA_ARGS__), /* ULT GT2 mobile */ \
+ MACRO__(0x0A1A, ## __VA_ARGS__), /* ULT GT2 server */ \
+ MACRO__(0x0A1B, ## __VA_ARGS__) /* ULT GT2 reserved */ \
+
+#define INTEL_HSW_ULX_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x0A1E, ## __VA_ARGS__) /* ULX GT2 mobile */ \
+
+#define INTEL_HSW_GT2_IDS(MACRO__, ...) \
+ INTEL_HSW_ULT_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_HSW_ULX_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x0412, ## __VA_ARGS__), /* GT2 desktop */ \
+ MACRO__(0x0416, ## __VA_ARGS__), /* GT2 mobile */ \
+ MACRO__(0x041A, ## __VA_ARGS__), /* GT2 server */ \
+ MACRO__(0x041B, ## __VA_ARGS__), /* GT2 reserved */ \
+ MACRO__(0x041E, ## __VA_ARGS__), /* GT2 reserved */ \
+ MACRO__(0x0C12, ## __VA_ARGS__), /* SDV GT2 desktop */ \
+ MACRO__(0x0C16, ## __VA_ARGS__), /* SDV GT2 mobile */ \
+ MACRO__(0x0C1A, ## __VA_ARGS__), /* SDV GT2 server */ \
+ MACRO__(0x0C1B, ## __VA_ARGS__), /* SDV GT2 reserved */ \
+ MACRO__(0x0C1E, ## __VA_ARGS__), /* SDV GT2 reserved */ \
+ MACRO__(0x0D12, ## __VA_ARGS__), /* CRW GT2 desktop */ \
+ MACRO__(0x0D16, ## __VA_ARGS__), /* CRW GT2 mobile */ \
+ MACRO__(0x0D1A, ## __VA_ARGS__), /* CRW GT2 server */ \
+ MACRO__(0x0D1B, ## __VA_ARGS__), /* CRW GT2 reserved */ \
+ MACRO__(0x0D1E, ## __VA_ARGS__) /* CRW GT2 reserved */
+
+#define INTEL_HSW_ULT_GT3_IDS(MACRO__, ...) \
+ MACRO__(0x0A22, ## __VA_ARGS__), /* ULT GT3 desktop */ \
+ MACRO__(0x0A26, ## __VA_ARGS__), /* ULT GT3 mobile */ \
+ MACRO__(0x0A2A, ## __VA_ARGS__), /* ULT GT3 server */ \
+ MACRO__(0x0A2B, ## __VA_ARGS__), /* ULT GT3 reserved */ \
+ MACRO__(0x0A2E, ## __VA_ARGS__) /* ULT GT3 reserved */
+
+#define INTEL_HSW_GT3_IDS(MACRO__, ...) \
+ INTEL_HSW_ULT_GT3_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x0422, ## __VA_ARGS__), /* GT3 desktop */ \
+ MACRO__(0x0426, ## __VA_ARGS__), /* GT3 mobile */ \
+ MACRO__(0x042A, ## __VA_ARGS__), /* GT3 server */ \
+ MACRO__(0x042B, ## __VA_ARGS__), /* GT3 reserved */ \
+ MACRO__(0x042E, ## __VA_ARGS__), /* GT3 reserved */ \
+ MACRO__(0x0C22, ## __VA_ARGS__), /* SDV GT3 desktop */ \
+ MACRO__(0x0C26, ## __VA_ARGS__), /* SDV GT3 mobile */ \
+ MACRO__(0x0C2A, ## __VA_ARGS__), /* SDV GT3 server */ \
+ MACRO__(0x0C2B, ## __VA_ARGS__), /* SDV GT3 reserved */ \
+ MACRO__(0x0C2E, ## __VA_ARGS__), /* SDV GT3 reserved */ \
+ MACRO__(0x0D22, ## __VA_ARGS__), /* CRW GT3 desktop */ \
+ MACRO__(0x0D26, ## __VA_ARGS__), /* CRW GT3 mobile */ \
+ MACRO__(0x0D2A, ## __VA_ARGS__), /* CRW GT3 server */ \
+ MACRO__(0x0D2B, ## __VA_ARGS__), /* CRW GT3 reserved */ \
+ MACRO__(0x0D2E, ## __VA_ARGS__) /* CRW GT3 reserved */
+
+#define INTEL_HSW_IDS(MACRO__, ...) \
+ INTEL_HSW_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_HSW_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_HSW_GT3_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_VLV_IDS(MACRO__, ...) \
+ MACRO__(0x0f30, ## __VA_ARGS__), \
+ MACRO__(0x0f31, ## __VA_ARGS__), \
+ MACRO__(0x0f32, ## __VA_ARGS__), \
+ MACRO__(0x0f33, ## __VA_ARGS__)
+
+#define INTEL_BDW_ULT_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x1606, ## __VA_ARGS__), /* GT1 ULT */ \
+ MACRO__(0x160B, ## __VA_ARGS__) /* GT1 Iris */
+
+#define INTEL_BDW_ULX_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x160E, ## __VA_ARGS__) /* GT1 ULX */
+
+#define INTEL_BDW_GT1_IDS(MACRO__, ...) \
+ INTEL_BDW_ULT_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_BDW_ULX_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x1602, ## __VA_ARGS__), /* GT1 ULT */ \
+ MACRO__(0x160A, ## __VA_ARGS__), /* GT1 Server */ \
+ MACRO__(0x160D, ## __VA_ARGS__) /* GT1 Workstation */
+
+#define INTEL_BDW_ULT_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x1616, ## __VA_ARGS__), /* GT2 ULT */ \
+ MACRO__(0x161B, ## __VA_ARGS__) /* GT2 ULT */
+
+#define INTEL_BDW_ULX_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x161E, ## __VA_ARGS__) /* GT2 ULX */
+
+#define INTEL_BDW_GT2_IDS(MACRO__, ...) \
+ INTEL_BDW_ULT_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_BDW_ULX_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x1612, ## __VA_ARGS__), /* GT2 Halo */ \
+ MACRO__(0x161A, ## __VA_ARGS__), /* GT2 Server */ \
+ MACRO__(0x161D, ## __VA_ARGS__) /* GT2 Workstation */
+
+#define INTEL_BDW_ULT_GT3_IDS(MACRO__, ...) \
+ MACRO__(0x1626, ## __VA_ARGS__), /* ULT */ \
+ MACRO__(0x162B, ## __VA_ARGS__) /* Iris */ \
+
+#define INTEL_BDW_ULX_GT3_IDS(MACRO__, ...) \
+ MACRO__(0x162E, ## __VA_ARGS__) /* ULX */
+
+#define INTEL_BDW_GT3_IDS(MACRO__, ...) \
+ INTEL_BDW_ULT_GT3_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_BDW_ULX_GT3_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x1622, ## __VA_ARGS__), /* ULT */ \
+ MACRO__(0x162A, ## __VA_ARGS__), /* Server */ \
+ MACRO__(0x162D, ## __VA_ARGS__) /* Workstation */
+
+#define INTEL_BDW_ULT_RSVD_IDS(MACRO__, ...) \
+ MACRO__(0x1636, ## __VA_ARGS__), /* ULT */ \
+ MACRO__(0x163B, ## __VA_ARGS__) /* Iris */
+
+#define INTEL_BDW_ULX_RSVD_IDS(MACRO__, ...) \
+ MACRO__(0x163E, ## __VA_ARGS__) /* ULX */
+
+#define INTEL_BDW_RSVD_IDS(MACRO__, ...) \
+ INTEL_BDW_ULT_RSVD_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_BDW_ULX_RSVD_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x1632, ## __VA_ARGS__), /* ULT */ \
+ MACRO__(0x163A, ## __VA_ARGS__), /* Server */ \
+ MACRO__(0x163D, ## __VA_ARGS__) /* Workstation */
+
+#define INTEL_BDW_IDS(MACRO__, ...) \
+ INTEL_BDW_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_BDW_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_BDW_GT3_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_BDW_RSVD_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_CHV_IDS(MACRO__, ...) \
+ MACRO__(0x22b0, ## __VA_ARGS__), \
+ MACRO__(0x22b1, ## __VA_ARGS__), \
+ MACRO__(0x22b2, ## __VA_ARGS__), \
+ MACRO__(0x22b3, ## __VA_ARGS__)
+
+#define INTEL_SKL_ULT_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x1906, ## __VA_ARGS__), /* ULT GT1 */ \
+ MACRO__(0x1913, ## __VA_ARGS__) /* ULT GT1.5 */
+
+#define INTEL_SKL_ULX_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x190E, ## __VA_ARGS__), /* ULX GT1 */ \
+ MACRO__(0x1915, ## __VA_ARGS__) /* ULX GT1.5 */
+
+#define INTEL_SKL_GT1_IDS(MACRO__, ...) \
+ INTEL_SKL_ULT_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_SKL_ULX_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x1902, ## __VA_ARGS__), /* DT GT1 */ \
+ MACRO__(0x190A, ## __VA_ARGS__), /* SRV GT1 */ \
+ MACRO__(0x190B, ## __VA_ARGS__), /* Halo GT1 */ \
+ MACRO__(0x1917, ## __VA_ARGS__) /* DT GT1.5 */
+
+#define INTEL_SKL_ULT_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x1916, ## __VA_ARGS__), /* ULT GT2 */ \
+ MACRO__(0x1921, ## __VA_ARGS__) /* ULT GT2F */
+
+#define INTEL_SKL_ULX_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x191E, ## __VA_ARGS__) /* ULX GT2 */
+
+#define INTEL_SKL_GT2_IDS(MACRO__, ...) \
+ INTEL_SKL_ULT_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_SKL_ULX_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x1912, ## __VA_ARGS__), /* DT GT2 */ \
+ MACRO__(0x191A, ## __VA_ARGS__), /* SRV GT2 */ \
+ MACRO__(0x191B, ## __VA_ARGS__), /* Halo GT2 */ \
+ MACRO__(0x191D, ## __VA_ARGS__) /* WKS GT2 */
+
+#define INTEL_SKL_ULT_GT3_IDS(MACRO__, ...) \
+ MACRO__(0x1923, ## __VA_ARGS__), /* ULT GT3 */ \
+ MACRO__(0x1926, ## __VA_ARGS__), /* ULT GT3e */ \
+ MACRO__(0x1927, ## __VA_ARGS__) /* ULT GT3e */
+
+#define INTEL_SKL_GT3_IDS(MACRO__, ...) \
+ INTEL_SKL_ULT_GT3_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x192A, ## __VA_ARGS__), /* SRV GT3 */ \
+ MACRO__(0x192B, ## __VA_ARGS__), /* Halo GT3e */ \
+ MACRO__(0x192D, ## __VA_ARGS__) /* SRV GT3e */
+
+#define INTEL_SKL_GT4_IDS(MACRO__, ...) \
+ MACRO__(0x1932, ## __VA_ARGS__), /* DT GT4 */ \
+ MACRO__(0x193A, ## __VA_ARGS__), /* SRV GT4e */ \
+ MACRO__(0x193B, ## __VA_ARGS__), /* Halo GT4e */ \
+ MACRO__(0x193D, ## __VA_ARGS__) /* WKS GT4e */
+
+#define INTEL_SKL_IDS(MACRO__, ...) \
+ INTEL_SKL_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_SKL_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_SKL_GT3_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_SKL_GT4_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_BXT_IDS(MACRO__, ...) \
+ MACRO__(0x0A84, ## __VA_ARGS__), \
+ MACRO__(0x1A84, ## __VA_ARGS__), \
+ MACRO__(0x1A85, ## __VA_ARGS__), \
+ MACRO__(0x5A84, ## __VA_ARGS__), /* APL HD Graphics 505 */ \
+ MACRO__(0x5A85, ## __VA_ARGS__) /* APL HD Graphics 500 */
+
+#define INTEL_GLK_IDS(MACRO__, ...) \
+ MACRO__(0x3184, ## __VA_ARGS__), \
+ MACRO__(0x3185, ## __VA_ARGS__)
+
+#define INTEL_KBL_ULT_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x5906, ## __VA_ARGS__), /* ULT GT1 */ \
+ MACRO__(0x5913, ## __VA_ARGS__) /* ULT GT1.5 */
+
+#define INTEL_KBL_ULX_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x590E, ## __VA_ARGS__), /* ULX GT1 */ \
+ MACRO__(0x5915, ## __VA_ARGS__) /* ULX GT1.5 */
+
+#define INTEL_KBL_GT1_IDS(MACRO__, ...) \
+ INTEL_KBL_ULT_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_KBL_ULX_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x5902, ## __VA_ARGS__), /* DT GT1 */ \
+ MACRO__(0x5908, ## __VA_ARGS__), /* Halo GT1 */ \
+ MACRO__(0x590A, ## __VA_ARGS__), /* SRV GT1 */ \
+ MACRO__(0x590B, ## __VA_ARGS__) /* Halo GT1 */
+
+#define INTEL_KBL_ULT_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x5916, ## __VA_ARGS__), /* ULT GT2 */ \
+ MACRO__(0x5921, ## __VA_ARGS__) /* ULT GT2F */
+
+#define INTEL_KBL_ULX_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x591E, ## __VA_ARGS__) /* ULX GT2 */
+
+#define INTEL_KBL_GT2_IDS(MACRO__, ...) \
+ INTEL_KBL_ULT_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_KBL_ULX_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x5912, ## __VA_ARGS__), /* DT GT2 */ \
+ MACRO__(0x5917, ## __VA_ARGS__), /* Mobile GT2 */ \
+ MACRO__(0x591A, ## __VA_ARGS__), /* SRV GT2 */ \
+ MACRO__(0x591B, ## __VA_ARGS__), /* Halo GT2 */ \
+ MACRO__(0x591D, ## __VA_ARGS__) /* WKS GT2 */
+
+#define INTEL_KBL_ULT_GT3_IDS(MACRO__, ...) \
+ MACRO__(0x5926, ## __VA_ARGS__) /* ULT GT3 */
+
+#define INTEL_KBL_GT3_IDS(MACRO__, ...) \
+ INTEL_KBL_ULT_GT3_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x5923, ## __VA_ARGS__), /* ULT GT3 */ \
+ MACRO__(0x5927, ## __VA_ARGS__) /* ULT GT3 */
+
+#define INTEL_KBL_GT4_IDS(MACRO__, ...) \
+ MACRO__(0x593B, ## __VA_ARGS__) /* Halo GT4 */
+
+/* AML/KBL Y GT2 */
+#define INTEL_AML_KBL_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x591C, ## __VA_ARGS__), /* ULX GT2 */ \
+ MACRO__(0x87C0, ## __VA_ARGS__) /* ULX GT2 */
+
+/* AML/CFL Y GT2 */
+#define INTEL_AML_CFL_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x87CA, ## __VA_ARGS__)
+
+/* CML GT1 */
+#define INTEL_CML_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x9BA2, ## __VA_ARGS__), \
+ MACRO__(0x9BA4, ## __VA_ARGS__), \
+ MACRO__(0x9BA5, ## __VA_ARGS__), \
+ MACRO__(0x9BA8, ## __VA_ARGS__)
+
+#define INTEL_CML_U_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x9B21, ## __VA_ARGS__), \
+ MACRO__(0x9BAA, ## __VA_ARGS__), \
+ MACRO__(0x9BAC, ## __VA_ARGS__)
+
+/* CML GT2 */
+#define INTEL_CML_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x9BC2, ## __VA_ARGS__), \
+ MACRO__(0x9BC4, ## __VA_ARGS__), \
+ MACRO__(0x9BC5, ## __VA_ARGS__), \
+ MACRO__(0x9BC6, ## __VA_ARGS__), \
+ MACRO__(0x9BC8, ## __VA_ARGS__), \
+ MACRO__(0x9BE6, ## __VA_ARGS__), \
+ MACRO__(0x9BF6, ## __VA_ARGS__)
+
+#define INTEL_CML_U_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x9B41, ## __VA_ARGS__), \
+ MACRO__(0x9BCA, ## __VA_ARGS__), \
+ MACRO__(0x9BCC, ## __VA_ARGS__)
+
+#define INTEL_CML_IDS(MACRO__, ...) \
+ INTEL_CML_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_CML_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_CML_U_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_CML_U_GT2_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_KBL_IDS(MACRO__, ...) \
+ INTEL_KBL_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_KBL_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_KBL_GT3_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_KBL_GT4_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_AML_KBL_GT2_IDS(MACRO__, ## __VA_ARGS__)
+
+/* CFL S */
+#define INTEL_CFL_S_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x3E90, ## __VA_ARGS__), /* SRV GT1 */ \
+ MACRO__(0x3E93, ## __VA_ARGS__), /* SRV GT1 */ \
+ MACRO__(0x3E99, ## __VA_ARGS__) /* SRV GT1 */
+
+#define INTEL_CFL_S_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x3E91, ## __VA_ARGS__), /* SRV GT2 */ \
+ MACRO__(0x3E92, ## __VA_ARGS__), /* SRV GT2 */ \
+ MACRO__(0x3E96, ## __VA_ARGS__), /* SRV GT2 */ \
+ MACRO__(0x3E98, ## __VA_ARGS__), /* SRV GT2 */ \
+ MACRO__(0x3E9A, ## __VA_ARGS__) /* SRV GT2 */
+
+/* CFL H */
+#define INTEL_CFL_H_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x3E9C, ## __VA_ARGS__)
+
+#define INTEL_CFL_H_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x3E94, ## __VA_ARGS__), /* Halo GT2 */ \
+ MACRO__(0x3E9B, ## __VA_ARGS__) /* Halo GT2 */
+
+/* CFL U GT2 */
+#define INTEL_CFL_U_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x3EA9, ## __VA_ARGS__)
+
+/* CFL U GT3 */
+#define INTEL_CFL_U_GT3_IDS(MACRO__, ...) \
+ MACRO__(0x3EA5, ## __VA_ARGS__), /* ULT GT3 */ \
+ MACRO__(0x3EA6, ## __VA_ARGS__), /* ULT GT3 */ \
+ MACRO__(0x3EA7, ## __VA_ARGS__), /* ULT GT3 */ \
+ MACRO__(0x3EA8, ## __VA_ARGS__) /* ULT GT3 */
+
+#define INTEL_CFL_IDS(MACRO__, ...) \
+ INTEL_CFL_S_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_CFL_S_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_CFL_H_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_CFL_H_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_CFL_U_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_CFL_U_GT3_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_AML_CFL_GT2_IDS(MACRO__, ## __VA_ARGS__)
+
+/* WHL/CFL U GT1 */
+#define INTEL_WHL_U_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x3EA1, ## __VA_ARGS__), \
+ MACRO__(0x3EA4, ## __VA_ARGS__)
+
+/* WHL/CFL U GT2 */
+#define INTEL_WHL_U_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x3EA0, ## __VA_ARGS__), \
+ MACRO__(0x3EA3, ## __VA_ARGS__)
+
+/* WHL/CFL U GT3 */
+#define INTEL_WHL_U_GT3_IDS(MACRO__, ...) \
+ MACRO__(0x3EA2, ## __VA_ARGS__)
+
+#define INTEL_WHL_IDS(MACRO__, ...) \
+ INTEL_WHL_U_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_WHL_U_GT2_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_WHL_U_GT3_IDS(MACRO__, ## __VA_ARGS__)
+
+/* CNL */
+#define INTEL_CNL_PORT_F_IDS(MACRO__, ...) \
+ MACRO__(0x5A44, ## __VA_ARGS__), \
+ MACRO__(0x5A4C, ## __VA_ARGS__), \
+ MACRO__(0x5A54, ## __VA_ARGS__), \
+ MACRO__(0x5A5C, ## __VA_ARGS__)
+
+#define INTEL_CNL_IDS(MACRO__, ...) \
+ INTEL_CNL_PORT_F_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x5A40, ## __VA_ARGS__), \
+ MACRO__(0x5A41, ## __VA_ARGS__), \
+ MACRO__(0x5A42, ## __VA_ARGS__), \
+ MACRO__(0x5A49, ## __VA_ARGS__), \
+ MACRO__(0x5A4A, ## __VA_ARGS__), \
+ MACRO__(0x5A50, ## __VA_ARGS__), \
+ MACRO__(0x5A51, ## __VA_ARGS__), \
+ MACRO__(0x5A52, ## __VA_ARGS__), \
+ MACRO__(0x5A59, ## __VA_ARGS__), \
+ MACRO__(0x5A5A, ## __VA_ARGS__)
+
+/* ICL */
+#define INTEL_ICL_PORT_F_IDS(MACRO__, ...) \
+ MACRO__(0x8A50, ## __VA_ARGS__), \
+ MACRO__(0x8A52, ## __VA_ARGS__), \
+ MACRO__(0x8A53, ## __VA_ARGS__), \
+ MACRO__(0x8A54, ## __VA_ARGS__), \
+ MACRO__(0x8A56, ## __VA_ARGS__), \
+ MACRO__(0x8A57, ## __VA_ARGS__), \
+ MACRO__(0x8A58, ## __VA_ARGS__), \
+ MACRO__(0x8A59, ## __VA_ARGS__), \
+ MACRO__(0x8A5A, ## __VA_ARGS__), \
+ MACRO__(0x8A5B, ## __VA_ARGS__), \
+ MACRO__(0x8A5C, ## __VA_ARGS__), \
+ MACRO__(0x8A70, ## __VA_ARGS__), \
+ MACRO__(0x8A71, ## __VA_ARGS__)
+
+#define INTEL_ICL_IDS(MACRO__, ...) \
+ INTEL_ICL_PORT_F_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x8A51, ## __VA_ARGS__), \
+ MACRO__(0x8A5D, ## __VA_ARGS__)
+
+/* EHL */
+#define INTEL_EHL_IDS(MACRO__, ...) \
+ MACRO__(0x4541, ## __VA_ARGS__), \
+ MACRO__(0x4551, ## __VA_ARGS__), \
+ MACRO__(0x4555, ## __VA_ARGS__), \
+ MACRO__(0x4557, ## __VA_ARGS__), \
+ MACRO__(0x4570, ## __VA_ARGS__), \
+ MACRO__(0x4571, ## __VA_ARGS__)
+
+/* JSL */
+#define INTEL_JSL_IDS(MACRO__, ...) \
+ MACRO__(0x4E51, ## __VA_ARGS__), \
+ MACRO__(0x4E55, ## __VA_ARGS__), \
+ MACRO__(0x4E57, ## __VA_ARGS__), \
+ MACRO__(0x4E61, ## __VA_ARGS__), \
+ MACRO__(0x4E71, ## __VA_ARGS__)
+
+/* TGL */
+#define INTEL_TGL_GT1_IDS(MACRO__, ...) \
+ MACRO__(0x9A60, ## __VA_ARGS__), \
+ MACRO__(0x9A68, ## __VA_ARGS__), \
+ MACRO__(0x9A70, ## __VA_ARGS__)
+
+#define INTEL_TGL_GT2_IDS(MACRO__, ...) \
+ MACRO__(0x9A40, ## __VA_ARGS__), \
+ MACRO__(0x9A49, ## __VA_ARGS__), \
+ MACRO__(0x9A59, ## __VA_ARGS__), \
+ MACRO__(0x9A78, ## __VA_ARGS__), \
+ MACRO__(0x9AC0, ## __VA_ARGS__), \
+ MACRO__(0x9AC9, ## __VA_ARGS__), \
+ MACRO__(0x9AD9, ## __VA_ARGS__), \
+ MACRO__(0x9AF8, ## __VA_ARGS__)
+
+#define INTEL_TGL_IDS(MACRO__, ...) \
+ INTEL_TGL_GT1_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_TGL_GT2_IDS(MACRO__, ## __VA_ARGS__)
+
+/* RKL */
+#define INTEL_RKL_IDS(MACRO__, ...) \
+ MACRO__(0x4C80, ## __VA_ARGS__), \
+ MACRO__(0x4C8A, ## __VA_ARGS__), \
+ MACRO__(0x4C8B, ## __VA_ARGS__), \
+ MACRO__(0x4C8C, ## __VA_ARGS__), \
+ MACRO__(0x4C90, ## __VA_ARGS__), \
+ MACRO__(0x4C9A, ## __VA_ARGS__)
+
+/* DG1 */
+#define INTEL_DG1_IDS(MACRO__, ...) \
+ MACRO__(0x4905, ## __VA_ARGS__), \
+ MACRO__(0x4906, ## __VA_ARGS__), \
+ MACRO__(0x4907, ## __VA_ARGS__), \
+ MACRO__(0x4908, ## __VA_ARGS__), \
+ MACRO__(0x4909, ## __VA_ARGS__)
+
+/* ADL-S */
+#define INTEL_ADLS_IDS(MACRO__, ...) \
+ MACRO__(0x4680, ## __VA_ARGS__), \
+ MACRO__(0x4682, ## __VA_ARGS__), \
+ MACRO__(0x4688, ## __VA_ARGS__), \
+ MACRO__(0x468A, ## __VA_ARGS__), \
+ MACRO__(0x468B, ## __VA_ARGS__), \
+ MACRO__(0x4690, ## __VA_ARGS__), \
+ MACRO__(0x4692, ## __VA_ARGS__), \
+ MACRO__(0x4693, ## __VA_ARGS__)
+
+/* ADL-P */
+#define INTEL_ADLP_IDS(MACRO__, ...) \
+ MACRO__(0x46A0, ## __VA_ARGS__), \
+ MACRO__(0x46A1, ## __VA_ARGS__), \
+ MACRO__(0x46A2, ## __VA_ARGS__), \
+ MACRO__(0x46A3, ## __VA_ARGS__), \
+ MACRO__(0x46A6, ## __VA_ARGS__), \
+ MACRO__(0x46A8, ## __VA_ARGS__), \
+ MACRO__(0x46AA, ## __VA_ARGS__), \
+ MACRO__(0x462A, ## __VA_ARGS__), \
+ MACRO__(0x4626, ## __VA_ARGS__), \
+ MACRO__(0x4628, ## __VA_ARGS__), \
+ MACRO__(0x46B0, ## __VA_ARGS__), \
+ MACRO__(0x46B1, ## __VA_ARGS__), \
+ MACRO__(0x46B2, ## __VA_ARGS__), \
+ MACRO__(0x46B3, ## __VA_ARGS__), \
+ MACRO__(0x46C0, ## __VA_ARGS__), \
+ MACRO__(0x46C1, ## __VA_ARGS__), \
+ MACRO__(0x46C2, ## __VA_ARGS__), \
+ MACRO__(0x46C3, ## __VA_ARGS__)
+
+/* ADL-N */
+#define INTEL_ADLN_IDS(MACRO__, ...) \
+ MACRO__(0x46D0, ## __VA_ARGS__), \
+ MACRO__(0x46D1, ## __VA_ARGS__), \
+ MACRO__(0x46D2, ## __VA_ARGS__), \
+ MACRO__(0x46D3, ## __VA_ARGS__), \
+ MACRO__(0x46D4, ## __VA_ARGS__)
+
+/* RPL-S */
+#define INTEL_RPLS_IDS(MACRO__, ...) \
+ MACRO__(0xA780, ## __VA_ARGS__), \
+ MACRO__(0xA781, ## __VA_ARGS__), \
+ MACRO__(0xA782, ## __VA_ARGS__), \
+ MACRO__(0xA783, ## __VA_ARGS__), \
+ MACRO__(0xA788, ## __VA_ARGS__), \
+ MACRO__(0xA789, ## __VA_ARGS__), \
+ MACRO__(0xA78A, ## __VA_ARGS__), \
+ MACRO__(0xA78B, ## __VA_ARGS__)
+
+/* RPL-U */
+#define INTEL_RPLU_IDS(MACRO__, ...) \
+ MACRO__(0xA721, ## __VA_ARGS__), \
+ MACRO__(0xA7A1, ## __VA_ARGS__), \
+ MACRO__(0xA7A9, ## __VA_ARGS__), \
+ MACRO__(0xA7AC, ## __VA_ARGS__), \
+ MACRO__(0xA7AD, ## __VA_ARGS__)
+
+/* RPL-P */
+#define INTEL_RPLP_IDS(MACRO__, ...) \
+ MACRO__(0xA720, ## __VA_ARGS__), \
+ MACRO__(0xA7A0, ## __VA_ARGS__), \
+ MACRO__(0xA7A8, ## __VA_ARGS__), \
+ MACRO__(0xA7AA, ## __VA_ARGS__), \
+ MACRO__(0xA7AB, ## __VA_ARGS__)
+
+/* DG2 */
+#define INTEL_DG2_G10_D_IDS(MACRO__, ...) \
+ MACRO__(0x56A0, ## __VA_ARGS__), \
+ MACRO__(0x56A1, ## __VA_ARGS__), \
+ MACRO__(0x56A2, ## __VA_ARGS__)
+
+#define INTEL_DG2_G10_E_IDS(MACRO__, ...) \
+ MACRO__(0x56BE, ## __VA_ARGS__), \
+ MACRO__(0x56BF, ## __VA_ARGS__)
+
+#define INTEL_DG2_G10_M_IDS(MACRO__, ...) \
+ MACRO__(0x5690, ## __VA_ARGS__), \
+ MACRO__(0x5691, ## __VA_ARGS__), \
+ MACRO__(0x5692, ## __VA_ARGS__)
+
+#define INTEL_DG2_G10_IDS(MACRO__, ...) \
+ INTEL_DG2_G10_D_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_DG2_G10_E_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_DG2_G10_M_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_DG2_G11_D_IDS(MACRO__, ...) \
+ MACRO__(0x56A5, ## __VA_ARGS__), \
+ MACRO__(0x56A6, ## __VA_ARGS__), \
+ MACRO__(0x56B0, ## __VA_ARGS__), \
+ MACRO__(0x56B1, ## __VA_ARGS__)
+
+#define INTEL_DG2_G11_E_IDS(MACRO__, ...) \
+ MACRO__(0x56BA, ## __VA_ARGS__), \
+ MACRO__(0x56BB, ## __VA_ARGS__), \
+ MACRO__(0x56BC, ## __VA_ARGS__), \
+ MACRO__(0x56BD, ## __VA_ARGS__)
+
+#define INTEL_DG2_G11_M_IDS(MACRO__, ...) \
+ MACRO__(0x5693, ## __VA_ARGS__), \
+ MACRO__(0x5694, ## __VA_ARGS__), \
+ MACRO__(0x5695, ## __VA_ARGS__)
+
+#define INTEL_DG2_G11_IDS(MACRO__, ...) \
+ INTEL_DG2_G11_D_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_DG2_G11_E_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_DG2_G11_M_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_DG2_G12_D_IDS(MACRO__, ...) \
+ MACRO__(0x56A3, ## __VA_ARGS__), \
+ MACRO__(0x56A4, ## __VA_ARGS__), \
+ MACRO__(0x56B2, ## __VA_ARGS__), \
+ MACRO__(0x56B3, ## __VA_ARGS__)
+
+#define INTEL_DG2_G12_M_IDS(MACRO__, ...) \
+ MACRO__(0x5696, ## __VA_ARGS__), \
+ MACRO__(0x5697, ## __VA_ARGS__)
+
+#define INTEL_DG2_G12_IDS(MACRO__, ...) \
+ INTEL_DG2_G12_D_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_DG2_G12_M_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_DG2_D_IDS(MACRO__, ...) \
+ INTEL_DG2_G10_D_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_DG2_G11_D_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_DG2_G12_D_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_DG2_IDS(MACRO__, ...) \
+ INTEL_DG2_G10_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_DG2_G11_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_DG2_G12_IDS(MACRO__, ## __VA_ARGS__)
+
+#define INTEL_ATS_M150_IDS(MACRO__, ...) \
+ MACRO__(0x56C0, ## __VA_ARGS__), \
+ MACRO__(0x56C2, ## __VA_ARGS__)
+
+#define INTEL_ATS_M75_IDS(MACRO__, ...) \
+ MACRO__(0x56C1, ## __VA_ARGS__)
+
+#define INTEL_ATS_M_IDS(MACRO__, ...) \
+ INTEL_ATS_M150_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_ATS_M75_IDS(MACRO__, ## __VA_ARGS__)
+
+/* ARL */
+#define INTEL_ARL_H_IDS(MACRO__, ...) \
+ MACRO__(0x7D51, ## __VA_ARGS__), \
+ MACRO__(0x7DD1, ## __VA_ARGS__)
+
+#define INTEL_ARL_U_IDS(MACRO__, ...) \
+ MACRO__(0x7D41, ## __VA_ARGS__) \
+
+#define INTEL_ARL_S_IDS(MACRO__, ...) \
+ MACRO__(0x7D67, ## __VA_ARGS__), \
+ MACRO__(0xB640, ## __VA_ARGS__)
+
+#define INTEL_ARL_IDS(MACRO__, ...) \
+ INTEL_ARL_H_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_ARL_U_IDS(MACRO__, ## __VA_ARGS__), \
+ INTEL_ARL_S_IDS(MACRO__, ## __VA_ARGS__)
+
+/* MTL */
+#define INTEL_MTL_U_IDS(MACRO__, ...) \
+ MACRO__(0x7D40, ## __VA_ARGS__), \
+ MACRO__(0x7D45, ## __VA_ARGS__)
+
+#define INTEL_MTL_IDS(MACRO__, ...) \
+ INTEL_MTL_U_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0x7D55, ## __VA_ARGS__), \
+ MACRO__(0x7D60, ## __VA_ARGS__), \
+ MACRO__(0x7DD5, ## __VA_ARGS__)
+
+/* PVC */
+#define INTEL_PVC_IDS(MACRO__, ...) \
+ MACRO__(0x0B69, ## __VA_ARGS__), \
+ MACRO__(0x0B6E, ## __VA_ARGS__), \
+ MACRO__(0x0BD4, ## __VA_ARGS__), \
+ MACRO__(0x0BD5, ## __VA_ARGS__), \
+ MACRO__(0x0BD6, ## __VA_ARGS__), \
+ MACRO__(0x0BD7, ## __VA_ARGS__), \
+ MACRO__(0x0BD8, ## __VA_ARGS__), \
+ MACRO__(0x0BD9, ## __VA_ARGS__), \
+ MACRO__(0x0BDA, ## __VA_ARGS__), \
+ MACRO__(0x0BDB, ## __VA_ARGS__), \
+ MACRO__(0x0BE0, ## __VA_ARGS__), \
+ MACRO__(0x0BE1, ## __VA_ARGS__), \
+ MACRO__(0x0BE5, ## __VA_ARGS__)
+
+/* LNL */
+#define INTEL_LNL_IDS(MACRO__, ...) \
+ MACRO__(0x6420, ## __VA_ARGS__), \
+ MACRO__(0x64A0, ## __VA_ARGS__), \
+ MACRO__(0x64B0, ## __VA_ARGS__)
+
+/* BMG */
+#define INTEL_BMG_G21_IDS(MACRO__, ...) \
+ MACRO__(0xE202, ## __VA_ARGS__), \
+ MACRO__(0xE209, ## __VA_ARGS__), \
+ MACRO__(0xE20B, ## __VA_ARGS__), \
+ MACRO__(0xE20C, ## __VA_ARGS__), \
+ MACRO__(0xE20D, ## __VA_ARGS__), \
+ MACRO__(0xE210, ## __VA_ARGS__), \
+ MACRO__(0xE211, ## __VA_ARGS__), \
+ MACRO__(0xE212, ## __VA_ARGS__), \
+ MACRO__(0xE216, ## __VA_ARGS__)
+
+#define INTEL_BMG_IDS(MACRO__, ...) \
+ INTEL_BMG_G21_IDS(MACRO__, ## __VA_ARGS__), \
+ MACRO__(0xE220, ## __VA_ARGS__), \
+ MACRO__(0xE221, ## __VA_ARGS__), \
+ MACRO__(0xE222, ## __VA_ARGS__), \
+ MACRO__(0xE223, ## __VA_ARGS__)
+
+/* PTL */
+#define INTEL_PTL_IDS(MACRO__, ...) \
+ MACRO__(0xB080, ## __VA_ARGS__), \
+ MACRO__(0xB081, ## __VA_ARGS__), \
+ MACRO__(0xB082, ## __VA_ARGS__), \
+ MACRO__(0xB083, ## __VA_ARGS__), \
+ MACRO__(0xB084, ## __VA_ARGS__), \
+ MACRO__(0xB085, ## __VA_ARGS__), \
+ MACRO__(0xB086, ## __VA_ARGS__), \
+ MACRO__(0xB087, ## __VA_ARGS__), \
+ MACRO__(0xB08F, ## __VA_ARGS__), \
+ MACRO__(0xB090, ## __VA_ARGS__), \
+ MACRO__(0xB0A0, ## __VA_ARGS__), \
+ MACRO__(0xB0B0, ## __VA_ARGS__)
+
+/* WCL */
+#define INTEL_WCL_IDS(MACRO__, ...) \
+ MACRO__(0xFD80, ## __VA_ARGS__), \
+ MACRO__(0xFD81, ## __VA_ARGS__)
+
+/* NVL-S */
+#define INTEL_NVLS_IDS(MACRO__, ...) \
+ MACRO__(0xD740, ## __VA_ARGS__), \
+ MACRO__(0xD741, ## __VA_ARGS__), \
+ MACRO__(0xD742, ## __VA_ARGS__), \
+ MACRO__(0xD743, ## __VA_ARGS__), \
+ MACRO__(0xD744, ## __VA_ARGS__), \
+ MACRO__(0xD745, ## __VA_ARGS__)
+
+/* CRI */
+#define INTEL_CRI_IDS(MACRO__, ...) \
+ MACRO__(0x674C, ## __VA_ARGS__)
+
+#endif /* __PCIIDS_H__ */
diff --git a/include/drm/intel/xe_sriov_vfio.h b/include/drm/intel/xe_sriov_vfio.h
new file mode 100644
index 000000000000..e9814e8149fd
--- /dev/null
+++ b/include/drm/intel/xe_sriov_vfio.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_VFIO_H_
+#define _XE_SRIOV_VFIO_H_
+
+#include <linux/types.h>
+
+struct pci_dev;
+struct xe_device;
+
+/**
+ * xe_sriov_vfio_get_pf() - Get PF &xe_device.
+ * @pdev: the VF &pci_dev device
+ *
+ * Return: pointer to PF &xe_device, NULL otherwise.
+ */
+struct xe_device *xe_sriov_vfio_get_pf(struct pci_dev *pdev);
+
+/**
+ * xe_sriov_vfio_migration_supported() - Check if migration is supported.
+ * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
+ *
+ * Return: true if migration is supported, false otherwise.
+ */
+bool xe_sriov_vfio_migration_supported(struct xe_device *xe);
+
+/**
+ * xe_sriov_vfio_wait_flr_done() - Wait for VF FLR completion.
+ * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
+ * @vfid: the VF identifier (can't be 0)
+ *
+ * This function will wait until VF FLR is processed by PF on all tiles (or
+ * until timeout occurs).
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_vfio_wait_flr_done(struct xe_device *xe, unsigned int vfid);
+
+/**
+ * xe_sriov_vfio_suspend_device() - Suspend VF.
+ * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
+ * @vfid: the VF identifier (can't be 0)
+ *
+ * This function will pause VF on all tiles/GTs.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_vfio_suspend_device(struct xe_device *xe, unsigned int vfid);
+
+/**
+ * xe_sriov_vfio_resume_device() - Resume VF.
+ * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
+ * @vfid: the VF identifier (can't be 0)
+ *
+ * This function will resume VF on all tiles.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_vfio_resume_device(struct xe_device *xe, unsigned int vfid);
+
+/**
+ * xe_sriov_vfio_stop_copy_enter() - Initiate a VF device migration data save.
+ * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
+ * @vfid: the VF identifier (can't be 0)
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_vfio_stop_copy_enter(struct xe_device *xe, unsigned int vfid);
+
+/**
+ * xe_sriov_vfio_stop_copy_exit() - Finish a VF device migration data save.
+ * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
+ * @vfid: the VF identifier (can't be 0)
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_vfio_stop_copy_exit(struct xe_device *xe, unsigned int vfid);
+
+/**
+ * xe_sriov_vfio_resume_data_enter() - Initiate a VF device migration data restore.
+ * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
+ * @vfid: the VF identifier (can't be 0)
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_vfio_resume_data_enter(struct xe_device *xe, unsigned int vfid);
+
+/**
+ * xe_sriov_vfio_resume_data_exit() - Finish a VF device migration data restore.
+ * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
+ * @vfid: the VF identifier (can't be 0)
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_vfio_resume_data_exit(struct xe_device *xe, unsigned int vfid);
+
+/**
+ * xe_sriov_vfio_error() - Move VF device to error state.
+ * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
+ * @vfid: the VF identifier (can't be 0)
+ *
+ * Reset is needed to move it out of error state.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_vfio_error(struct xe_device *xe, unsigned int vfid);
+
+/**
+ * xe_sriov_vfio_data_read() - Read migration data from the VF device.
+ * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
+ * @vfid: the VF identifier (can't be 0)
+ * @buf: start address of userspace buffer
+ * @len: requested read size from userspace
+ *
+ * Return: number of bytes that has been successfully read,
+ * 0 if no more migration data is available, -errno on failure.
+ */
+ssize_t xe_sriov_vfio_data_read(struct xe_device *xe, unsigned int vfid,
+ char __user *buf, size_t len);
+/**
+ * xe_sriov_vfio_data_write() - Write migration data to the VF device.
+ * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
+ * @vfid: the VF identifier (can't be 0)
+ * @buf: start address of userspace buffer
+ * @len: requested write size from userspace
+ *
+ * Return: number of bytes that has been successfully written, -errno on failure.
+ */
+ssize_t xe_sriov_vfio_data_write(struct xe_device *xe, unsigned int vfid,
+ const char __user *buf, size_t len);
+/**
+ * xe_sriov_vfio_stop_copy_size() - Get a size estimate of VF device migration data.
+ * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf()
+ * @vfid: the VF identifier (can't be 0)
+ *
+ * Return: migration data size in bytes or a negative error code on failure.
+ */
+ssize_t xe_sriov_vfio_stop_copy_size(struct xe_device *xe, unsigned int vfid);
+
+#endif
diff --git a/include/drm/spsc_queue.h b/include/drm/spsc_queue.h
index 125f096c88cb..ee9df8cc67b7 100644
--- a/include/drm/spsc_queue.h
+++ b/include/drm/spsc_queue.h
@@ -70,9 +70,11 @@ static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *n
preempt_disable();
+ atomic_inc(&queue->job_count);
+ smp_mb__after_atomic();
+
tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next);
WRITE_ONCE(*tail, node);
- atomic_inc(&queue->job_count);
/*
* In case of first element verify new node will be visible to the consumer
diff --git a/include/drm/task_barrier.h b/include/drm/task_barrier.h
index 087e3f649c52..f6e6ed529681 100644
--- a/include/drm/task_barrier.h
+++ b/include/drm/task_barrier.h
@@ -24,8 +24,8 @@
#include <linux/atomic.h>
/*
- * Reusable 2 PHASE task barrier (randevouz point) implementation for N tasks.
- * Based on the Little book of sempahores - https://greenteapress.com/wp/semaphores/
+ * Reusable 2 PHASE task barrier (rendez-vous point) implementation for N tasks.
+ * Based on the Little book of semaphores - https://greenteapress.com/wp/semaphores/
*/
diff --git a/include/drm/ttm/ttm_allocation.h b/include/drm/ttm/ttm_allocation.h
new file mode 100644
index 000000000000..655d1e44aba7
--- /dev/null
+++ b/include/drm/ttm/ttm_allocation.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2025 Valve Corporation */
+
+#ifndef _TTM_ALLOCATION_H_
+#define _TTM_ALLOCATION_H_
+
+#define TTM_ALLOCATION_POOL_BENEFICIAL_ORDER(n) ((n) & 0xff) /* Max order which caller can benefit from */
+#define TTM_ALLOCATION_POOL_USE_DMA_ALLOC BIT(8) /* Use coherent DMA allocations. */
+#define TTM_ALLOCATION_POOL_USE_DMA32 BIT(9) /* Use GFP_DMA32 allocations. */
+#define TTM_ALLOCATION_PROPAGATE_ENOSPC BIT(10) /* Do not convert ENOSPC from resource managers to ENOMEM. */
+
+#endif
diff --git a/include/drm/ttm/ttm_backup.h b/include/drm/ttm/ttm_backup.h
new file mode 100644
index 000000000000..c33cba111171
--- /dev/null
+++ b/include/drm/ttm/ttm_backup.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _TTM_BACKUP_H_
+#define _TTM_BACKUP_H_
+
+#include <linux/mm_types.h>
+#include <linux/shmem_fs.h>
+
+/**
+ * ttm_backup_handle_to_page_ptr() - Convert handle to struct page pointer
+ * @handle: The handle to convert.
+ *
+ * Converts an opaque handle received from the
+ * ttm_backup_backup_page() function to an (invalid)
+ * struct page pointer suitable for a struct page array.
+ *
+ * Return: An (invalid) struct page pointer.
+ */
+static inline struct page *
+ttm_backup_handle_to_page_ptr(unsigned long handle)
+{
+ return (struct page *)(handle << 1 | 1);
+}
+
+/**
+ * ttm_backup_page_ptr_is_handle() - Whether a struct page pointer is a handle
+ * @page: The struct page pointer to check.
+ *
+ * Return: true if the struct page pointer is a handld returned from
+ * ttm_backup_handle_to_page_ptr(). False otherwise.
+ */
+static inline bool ttm_backup_page_ptr_is_handle(const struct page *page)
+{
+ return (unsigned long)page & 1;
+}
+
+/**
+ * ttm_backup_page_ptr_to_handle() - Convert a struct page pointer to a handle
+ * @page: The struct page pointer to convert
+ *
+ * Return: The handle that was previously used in
+ * ttm_backup_handle_to_page_ptr() to obtain a struct page pointer, suitable
+ * for use as argument in the struct ttm_backup_drop() or
+ * ttm_backup_copy_page() functions.
+ */
+static inline unsigned long
+ttm_backup_page_ptr_to_handle(const struct page *page)
+{
+ WARN_ON(!ttm_backup_page_ptr_is_handle(page));
+ return (unsigned long)page >> 1;
+}
+
+void ttm_backup_drop(struct file *backup, pgoff_t handle);
+
+int ttm_backup_copy_page(struct file *backup, struct page *dst,
+ pgoff_t handle, bool intr);
+
+s64
+ttm_backup_backup_page(struct file *backup, struct page *page,
+ bool writeback, pgoff_t idx, gfp_t page_gfp,
+ gfp_t alloc_gfp);
+
+void ttm_backup_fini(struct file *backup);
+
+u64 ttm_backup_bytes_avail(void);
+
+struct file *ttm_backup_shmem_create(loff_t size);
+
+#endif
diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h
new file mode 100644
index 000000000000..bca3a8849d47
--- /dev/null
+++ b/include/drm/ttm/ttm_bo.h
@@ -0,0 +1,546 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#ifndef _TTM_BO_API_H_
+#define _TTM_BO_API_H_
+
+#include <drm/drm_gem.h>
+
+#include <linux/kref.h>
+#include <linux/list.h>
+
+#include "ttm_device.h"
+
+/* Default number of pre-faulted pages in the TTM fault handler */
+#define TTM_BO_VM_NUM_PREFAULT 16
+
+struct iosys_map;
+
+struct ttm_global;
+struct ttm_device;
+struct ttm_placement;
+struct ttm_place;
+struct ttm_resource;
+struct ttm_resource_manager;
+struct ttm_tt;
+
+/**
+ * enum ttm_bo_type
+ *
+ * @ttm_bo_type_device: These are 'normal' buffers that can
+ * be mmapped by user space. Each of these bos occupy a slot in the
+ * device address space, that can be used for normal vm operations.
+ *
+ * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
+ * but they cannot be accessed from user-space. For kernel-only use.
+ *
+ * @ttm_bo_type_sg: Buffer made from dmabuf sg table shared with another
+ * driver.
+ */
+enum ttm_bo_type {
+ ttm_bo_type_device,
+ ttm_bo_type_kernel,
+ ttm_bo_type_sg
+};
+
+/**
+ * struct ttm_buffer_object
+ *
+ * @base: drm_gem_object superclass data.
+ * @bdev: Pointer to the buffer object device structure.
+ * @type: The bo type.
+ * @page_alignment: Page alignment.
+ * @destroy: Destruction function. If NULL, kfree is used.
+ * @kref: Reference count of this buffer object. When this refcount reaches
+ * zero, the object is destroyed or put on the delayed delete list.
+ * @resource: structure describing current placement.
+ * @ttm: TTM structure holding system pages.
+ * @deleted: True if the object is only a zombie and already deleted.
+ * @bulk_move: The bulk move object.
+ * @priority: Priority for LRU, BOs with lower priority are evicted first.
+ * @pin_count: Pin count.
+ *
+ * Base class for TTM buffer object, that deals with data placement and CPU
+ * mappings. GPU mappings are really up to the driver, but for simpler GPUs
+ * the driver can usually use the placement offset @offset directly as the
+ * GPU virtual address. For drivers implementing multiple
+ * GPU memory manager contexts, the driver should manage the address space
+ * in these contexts separately and use these objects to get the correct
+ * placement and caching for these GPU maps. This makes it possible to use
+ * these objects for even quite elaborate memory management schemes.
+ * The destroy member, the API visibility of this object makes it possible
+ * to derive driver specific types.
+ */
+struct ttm_buffer_object {
+ struct drm_gem_object base;
+
+ /*
+ * Members constant at init.
+ */
+ struct ttm_device *bdev;
+ enum ttm_bo_type type;
+ uint32_t page_alignment;
+ void (*destroy) (struct ttm_buffer_object *);
+
+ /*
+ * Members not needing protection.
+ */
+ struct kref kref;
+
+ /*
+ * Members protected by the bo::resv::reserved lock.
+ */
+ struct ttm_resource *resource;
+ struct ttm_tt *ttm;
+ bool deleted;
+ struct ttm_lru_bulk_move *bulk_move;
+ unsigned priority;
+ unsigned pin_count;
+
+ /**
+ * @delayed_delete: Work item used when we can't delete the BO
+ * immediately
+ */
+ struct work_struct delayed_delete;
+
+ /**
+ * @sg: external source of pages and DMA addresses, protected by the
+ * reservation lock.
+ */
+ struct sg_table *sg;
+};
+
+#define TTM_BO_MAP_IOMEM_MASK 0x80
+
+/**
+ * struct ttm_bo_kmap_obj
+ *
+ * @virtual: The current kernel virtual address.
+ * @page: The page when kmap'ing a single page.
+ * @bo_kmap_type: Type of bo_kmap.
+ * @bo: The TTM BO.
+ *
+ * Object describing a kernel mapping. Since a TTM bo may be located
+ * in various memory types with various caching policies, the
+ * mapping can either be an ioremap, a vmap, a kmap or part of a
+ * premapped region.
+ */
+struct ttm_bo_kmap_obj {
+ void *virtual;
+ struct page *page;
+ enum {
+ ttm_bo_map_iomap = 1 | TTM_BO_MAP_IOMEM_MASK,
+ ttm_bo_map_vmap = 2,
+ ttm_bo_map_kmap = 3,
+ ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK,
+ } bo_kmap_type;
+ struct ttm_buffer_object *bo;
+};
+
+/**
+ * struct ttm_operation_ctx
+ *
+ * @interruptible: Sleep interruptible if sleeping.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
+ * @gfp_retry_mayfail: Set the __GFP_RETRY_MAYFAIL when allocation pages.
+ * @allow_res_evict: Allow eviction of reserved BOs. Can be used when multiple
+ * BOs share the same reservation object.
+ * faults. Should only be used by TTM internally.
+ * @resv: Reservation object to allow reserved evictions with.
+ * @bytes_moved: Statistics on how many bytes have been moved.
+ *
+ * Context for TTM operations like changing buffer placement or general memory
+ * allocation.
+ */
+struct ttm_operation_ctx {
+ bool interruptible;
+ bool no_wait_gpu;
+ bool gfp_retry_mayfail;
+ bool allow_res_evict;
+ struct dma_resv *resv;
+ uint64_t bytes_moved;
+};
+
+struct ttm_lru_walk;
+
+/** struct ttm_lru_walk_ops - Operations for a LRU walk. */
+struct ttm_lru_walk_ops {
+ /**
+ * process_bo - Process this bo.
+ * @walk: struct ttm_lru_walk describing the walk.
+ * @bo: A locked and referenced buffer object.
+ *
+ * Return: Negative error code on error, User-defined positive value
+ * (typically, but not always, size of the processed bo) on success.
+ * On success, the returned values are summed by the walk and the
+ * walk exits when its target is met.
+ * 0 also indicates success, -EBUSY means this bo was skipped.
+ */
+ s64 (*process_bo)(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo);
+};
+
+/**
+ * struct ttm_lru_walk_arg - Common part for the variants of BO LRU walk.
+ */
+struct ttm_lru_walk_arg {
+ /** @ctx: Pointer to the struct ttm_operation_ctx. */
+ struct ttm_operation_ctx *ctx;
+ /** @ticket: The struct ww_acquire_ctx if any. */
+ struct ww_acquire_ctx *ticket;
+ /** @trylock_only: Only use trylock for locking. */
+ bool trylock_only;
+};
+
+/**
+ * struct ttm_lru_walk - Structure describing a LRU walk.
+ */
+struct ttm_lru_walk {
+ /** @ops: Pointer to the ops structure. */
+ const struct ttm_lru_walk_ops *ops;
+ /** @arg: Common bo LRU walk arguments. */
+ struct ttm_lru_walk_arg arg;
+};
+
+s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
+ struct ttm_resource_manager *man, s64 target);
+
+/**
+ * struct ttm_bo_shrink_flags - flags to govern the bo shrinking behaviour
+ * @purge: Purge the content rather than backing it up.
+ * @writeback: Attempt to immediately write content to swap space.
+ * @allow_move: Allow moving to system before shrinking. This is typically
+ * not desired for zombie- or ghost objects (with zombie object meaning
+ * objects with a zero gem object refcount)
+ */
+struct ttm_bo_shrink_flags {
+ u32 purge : 1;
+ u32 writeback : 1;
+ u32 allow_move : 1;
+};
+
+long ttm_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
+ const struct ttm_bo_shrink_flags flags);
+
+bool ttm_bo_shrink_suitable(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx);
+
+bool ttm_bo_shrink_avoid_wait(void);
+
+/**
+ * ttm_bo_reserve:
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Sleep interruptible if waiting.
+ * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
+ * @ticket: ticket used to acquire the ww_mutex.
+ *
+ * Locks a buffer object for validation. (Or prevents other processes from
+ * locking it for validation), while taking a number of measures to prevent
+ * deadlocks.
+ *
+ * Returns:
+ * -EDEADLK: The reservation may cause a deadlock.
+ * Release all buffer reservations, wait for @bo to become unreserved and
+ * try again.
+ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
+ * -EBUSY: The function needed to sleep, but @no_wait was true
+ * -EALREADY: Bo already reserved using @ticket. This error code will only
+ * be returned if @use_ticket is set to true.
+ */
+static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
+ bool interruptible, bool no_wait,
+ struct ww_acquire_ctx *ticket)
+{
+ int ret = 0;
+
+ if (no_wait) {
+ bool success;
+
+ if (WARN_ON(ticket))
+ return -EBUSY;
+
+ success = dma_resv_trylock(bo->base.resv);
+ return success ? 0 : -EBUSY;
+ }
+
+ if (interruptible)
+ ret = dma_resv_lock_interruptible(bo->base.resv, ticket);
+ else
+ ret = dma_resv_lock(bo->base.resv, ticket);
+ if (ret == -EINTR)
+ return -ERESTARTSYS;
+ return ret;
+}
+
+/**
+ * ttm_bo_reserve_slowpath:
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Sleep interruptible if waiting.
+ * @ticket: Ticket used to acquire the ww_mutex.
+ *
+ * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
+ * from all our other reservations. Because there are no other reservations
+ * held by us, this function cannot deadlock any more.
+ */
+static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
+ bool interruptible,
+ struct ww_acquire_ctx *ticket)
+{
+ if (interruptible) {
+ int ret = dma_resv_lock_slow_interruptible(bo->base.resv,
+ ticket);
+ if (ret == -EINTR)
+ ret = -ERESTARTSYS;
+ return ret;
+ }
+ dma_resv_lock_slow(bo->base.resv, ticket);
+ return 0;
+}
+
+void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo);
+
+static inline void
+ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo)
+{
+ spin_lock(&bo->bdev->lru_lock);
+ ttm_bo_move_to_lru_tail(bo);
+ spin_unlock(&bo->bdev->lru_lock);
+}
+
+static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo,
+ struct ttm_resource *new_mem)
+{
+ WARN_ON(bo->resource);
+ bo->resource = new_mem;
+}
+
+/**
+ * ttm_bo_move_null - assign memory for a buffer object.
+ * @bo: The bo to assign the memory to
+ * @new_mem: The memory to be assigned.
+ *
+ * Assign the memory from new_mem to the memory of the buffer object bo.
+ */
+static inline void ttm_bo_move_null(struct ttm_buffer_object *bo,
+ struct ttm_resource *new_mem)
+{
+ ttm_resource_free(bo, &bo->resource);
+ ttm_bo_assign_mem(bo, new_mem);
+}
+
+/**
+ * ttm_bo_unreserve
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ *
+ * Unreserve a previous reservation of @bo.
+ */
+static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
+{
+ ttm_bo_move_to_lru_tail_unlocked(bo);
+ dma_resv_unlock(bo->base.resv);
+}
+
+/**
+ * ttm_kmap_obj_virtual
+ *
+ * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap.
+ * @is_iomem: Pointer to an integer that on return indicates 1 if the
+ * virtual map is io memory, 0 if normal memory.
+ *
+ * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap.
+ * If *is_iomem is 1 on return, the virtual address points to an io memory area,
+ * that should strictly be accessed by the iowriteXX() and similar functions.
+ */
+static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
+ bool *is_iomem)
+{
+ *is_iomem = !!(map->bo_kmap_type & TTM_BO_MAP_IOMEM_MASK);
+ return map->virtual;
+}
+
+int ttm_bo_wait_ctx(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx);
+int ttm_bo_validate(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_operation_ctx *ctx);
+void ttm_bo_fini(struct ttm_buffer_object *bo);
+void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
+ struct ttm_lru_bulk_move *bulk);
+bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
+ const struct ttm_place *place);
+int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
+ enum ttm_bo_type type, struct ttm_placement *placement,
+ uint32_t alignment, struct ttm_operation_ctx *ctx,
+ struct sg_table *sg, struct dma_resv *resv,
+ void (*destroy)(struct ttm_buffer_object *));
+int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
+ enum ttm_bo_type type, struct ttm_placement *placement,
+ uint32_t alignment, bool interruptible,
+ struct sg_table *sg, struct dma_resv *resv,
+ void (*destroy)(struct ttm_buffer_object *));
+int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
+ unsigned long num_pages, struct ttm_bo_kmap_obj *map);
+void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
+void *ttm_bo_kmap_try_from_panic(struct ttm_buffer_object *bo, unsigned long page);
+int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map);
+void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map);
+int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
+s64 ttm_bo_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
+ struct ttm_resource_manager *man, gfp_t gfp_flags,
+ s64 target);
+void ttm_bo_pin(struct ttm_buffer_object *bo);
+void ttm_bo_unpin(struct ttm_buffer_object *bo);
+int ttm_bo_evict_first(struct ttm_device *bdev,
+ struct ttm_resource_manager *man,
+ struct ttm_operation_ctx *ctx);
+int ttm_bo_access(struct ttm_buffer_object *bo, unsigned long offset,
+ void *buf, int len, int write);
+vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
+ struct vm_fault *vmf);
+vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
+ pgprot_t prot,
+ pgoff_t num_prefault);
+vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf);
+void ttm_bo_vm_open(struct vm_area_struct *vma);
+void ttm_bo_vm_close(struct vm_area_struct *vma);
+int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
+ void *buf, int len, int write);
+vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot);
+
+int ttm_bo_mem_space(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_resource **mem,
+ struct ttm_operation_ctx *ctx);
+
+void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
+/*
+ * ttm_bo_util.c
+ */
+int ttm_mem_io_reserve(struct ttm_device *bdev,
+ struct ttm_resource *mem);
+void ttm_mem_io_free(struct ttm_device *bdev,
+ struct ttm_resource *mem);
+void ttm_move_memcpy(bool clear, u32 num_pages,
+ struct ttm_kmap_iter *dst_iter,
+ struct ttm_kmap_iter *src_iter);
+int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_resource *new_mem);
+int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
+ struct dma_fence *fence, bool evict,
+ bool pipeline,
+ struct ttm_resource *new_mem);
+void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
+ struct ttm_resource *new_mem);
+int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo);
+pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
+ pgprot_t tmp);
+void ttm_bo_tt_destroy(struct ttm_buffer_object *bo);
+int ttm_bo_populate(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx);
+int ttm_bo_setup_export(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx);
+
+/* Driver LRU walk helpers initially targeted for shrinking. */
+
+/**
+ * struct ttm_bo_lru_cursor - Iterator cursor for TTM LRU list looping
+ */
+struct ttm_bo_lru_cursor {
+ /** @res_curs: Embedded struct ttm_resource_cursor. */
+ struct ttm_resource_cursor res_curs;
+ /**
+ * @bo: Buffer object pointer if a buffer object is refcounted,
+ * NULL otherwise.
+ */
+ struct ttm_buffer_object *bo;
+ /**
+ * @needs_unlock: Valid iff @bo != NULL. The bo resv needs
+ * unlock before the next iteration or after loop exit.
+ */
+ bool needs_unlock;
+ /** @arg: Pointer to common BO LRU walk arguments. */
+ struct ttm_lru_walk_arg *arg;
+};
+
+void ttm_bo_lru_cursor_fini(struct ttm_bo_lru_cursor *curs);
+
+struct ttm_bo_lru_cursor *
+ttm_bo_lru_cursor_init(struct ttm_bo_lru_cursor *curs,
+ struct ttm_resource_manager *man,
+ struct ttm_lru_walk_arg *arg);
+
+struct ttm_buffer_object *ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor *curs);
+
+struct ttm_buffer_object *ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs);
+
+/*
+ * Defines needed to use autocleanup (linux/cleanup.h) with struct ttm_bo_lru_cursor.
+ */
+DEFINE_CLASS(ttm_bo_lru_cursor, struct ttm_bo_lru_cursor *,
+ if (_T) {ttm_bo_lru_cursor_fini(_T); },
+ ttm_bo_lru_cursor_init(curs, man, arg),
+ struct ttm_bo_lru_cursor *curs, struct ttm_resource_manager *man,
+ struct ttm_lru_walk_arg *arg);
+static inline void *
+class_ttm_bo_lru_cursor_lock_ptr(class_ttm_bo_lru_cursor_t *_T)
+{ return *_T; }
+#define class_ttm_bo_lru_cursor_is_conditional false
+
+/**
+ * ttm_bo_lru_for_each_reserved_guarded() - Iterate over buffer objects owning
+ * resources on LRU lists.
+ * @_cursor: struct ttm_bo_lru_cursor to use for the iteration.
+ * @_man: The resource manager whose LRU lists to iterate over.
+ * @_arg: The struct ttm_lru_walk_arg to govern the LRU walk.
+ * @_bo: The struct ttm_buffer_object pointer pointing to the buffer object
+ * for the current iteration.
+ *
+ * Iterate over all resources of @_man and for each resource, attempt to
+ * reference and lock (using the locking mode detailed in @_ctx) the buffer
+ * object it points to. If successful, assign @_bo to the address of the
+ * buffer object and update @_cursor. The iteration is guarded in the
+ * sense that @_cursor will be initialized before looping start and cleaned
+ * up at looping termination, even if terminated prematurely by, for
+ * example a return or break statement. Exiting the loop will also unlock
+ * (if needed) and unreference @_bo.
+ *
+ * Return: If locking of a bo returns an error, then iteration is terminated
+ * and @_bo is set to a corresponding error pointer. It's illegal to
+ * dereference @_bo after loop exit.
+ */
+#define ttm_bo_lru_for_each_reserved_guarded(_cursor, _man, _arg, _bo) \
+ scoped_guard(ttm_bo_lru_cursor, _cursor, _man, _arg) \
+ for ((_bo) = ttm_bo_lru_cursor_first(_cursor); \
+ !IS_ERR_OR_NULL(_bo); \
+ (_bo) = ttm_bo_lru_cursor_next(_cursor))
+
+#endif
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
deleted file mode 100644
index 2155e2e38aec..000000000000
--- a/include/drm/ttm/ttm_bo_api.h
+++ /dev/null
@@ -1,640 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- */
-
-#ifndef _TTM_BO_API_H_
-#define _TTM_BO_API_H_
-
-#include <drm/drm_gem.h>
-#include <drm/drm_hashtab.h>
-#include <drm/drm_vma_manager.h>
-#include <linux/kref.h>
-#include <linux/list.h>
-#include <linux/wait.h>
-#include <linux/mutex.h>
-#include <linux/mm.h>
-#include <linux/bitmap.h>
-#include <linux/dma-resv.h>
-
-#include "ttm_resource.h"
-
-struct ttm_global;
-
-struct ttm_device;
-
-struct dma_buf_map;
-
-struct drm_mm_node;
-
-struct ttm_placement;
-
-struct ttm_place;
-
-struct ttm_lru_bulk_move;
-
-/**
- * enum ttm_bo_type
- *
- * @ttm_bo_type_device: These are 'normal' buffers that can
- * be mmapped by user space. Each of these bos occupy a slot in the
- * device address space, that can be used for normal vm operations.
- *
- * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers,
- * but they cannot be accessed from user-space. For kernel-only use.
- *
- * @ttm_bo_type_sg: Buffer made from dmabuf sg table shared with another
- * driver.
- */
-
-enum ttm_bo_type {
- ttm_bo_type_device,
- ttm_bo_type_kernel,
- ttm_bo_type_sg
-};
-
-struct ttm_tt;
-
-/**
- * struct ttm_buffer_object
- *
- * @base: drm_gem_object superclass data.
- * @bdev: Pointer to the buffer object device structure.
- * @type: The bo type.
- * @destroy: Destruction function. If NULL, kfree is used.
- * @num_pages: Actual number of pages.
- * @kref: Reference count of this buffer object. When this refcount reaches
- * zero, the object is destroyed or put on the delayed delete list.
- * @mem: structure describing current placement.
- * @ttm: TTM structure holding system pages.
- * @evicted: Whether the object was evicted without user-space knowing.
- * @deleted: True if the object is only a zombie and already deleted.
- * @lru: List head for the lru list.
- * @ddestroy: List head for the delayed destroy list.
- * @swap: List head for swap LRU list.
- * @moving: Fence set when BO is moving
- * @offset: The current GPU offset, which can have different meanings
- * depending on the memory type. For SYSTEM type memory, it should be 0.
- * @cur_placement: Hint of current placement.
- *
- * Base class for TTM buffer object, that deals with data placement and CPU
- * mappings. GPU mappings are really up to the driver, but for simpler GPUs
- * the driver can usually use the placement offset @offset directly as the
- * GPU virtual address. For drivers implementing multiple
- * GPU memory manager contexts, the driver should manage the address space
- * in these contexts separately and use these objects to get the correct
- * placement and caching for these GPU maps. This makes it possible to use
- * these objects for even quite elaborate memory management schemes.
- * The destroy member, the API visibility of this object makes it possible
- * to derive driver specific types.
- */
-
-struct ttm_buffer_object {
- struct drm_gem_object base;
-
- /**
- * Members constant at init.
- */
-
- struct ttm_device *bdev;
- enum ttm_bo_type type;
- void (*destroy) (struct ttm_buffer_object *);
-
- /**
- * Members not needing protection.
- */
- struct kref kref;
-
- /**
- * Members protected by the bo::resv::reserved lock.
- */
-
- struct ttm_resource mem;
- struct ttm_tt *ttm;
- bool deleted;
-
- /**
- * Members protected by the bdev::lru_lock.
- */
-
- struct list_head lru;
- struct list_head ddestroy;
-
- /**
- * Members protected by a bo reservation.
- */
-
- struct dma_fence *moving;
- unsigned priority;
- unsigned pin_count;
-
- /**
- * Special members that are protected by the reserve lock
- * and the bo::lock when written to. Can be read with
- * either of these locks held.
- */
-
- struct sg_table *sg;
-};
-
-/**
- * struct ttm_bo_kmap_obj
- *
- * @virtual: The current kernel virtual address.
- * @page: The page when kmap'ing a single page.
- * @bo_kmap_type: Type of bo_kmap.
- *
- * Object describing a kernel mapping. Since a TTM bo may be located
- * in various memory types with various caching policies, the
- * mapping can either be an ioremap, a vmap, a kmap or part of a
- * premapped region.
- */
-
-#define TTM_BO_MAP_IOMEM_MASK 0x80
-struct ttm_bo_kmap_obj {
- void *virtual;
- struct page *page;
- enum {
- ttm_bo_map_iomap = 1 | TTM_BO_MAP_IOMEM_MASK,
- ttm_bo_map_vmap = 2,
- ttm_bo_map_kmap = 3,
- ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK,
- } bo_kmap_type;
- struct ttm_buffer_object *bo;
-};
-
-/**
- * struct ttm_operation_ctx
- *
- * @interruptible: Sleep interruptible if sleeping.
- * @no_wait_gpu: Return immediately if the GPU is busy.
- * @gfp_retry_mayfail: Set the __GFP_RETRY_MAYFAIL when allocation pages.
- * @allow_res_evict: Allow eviction of reserved BOs. Can be used when multiple
- * BOs share the same reservation object.
- * @force_alloc: Don't check the memory account during suspend or CPU page
- * faults. Should only be used by TTM internally.
- * @resv: Reservation object to allow reserved evictions with.
- *
- * Context for TTM operations like changing buffer placement or general memory
- * allocation.
- */
-struct ttm_operation_ctx {
- bool interruptible;
- bool no_wait_gpu;
- bool gfp_retry_mayfail;
- bool allow_res_evict;
- bool force_alloc;
- struct dma_resv *resv;
- uint64_t bytes_moved;
-};
-
-/**
- * ttm_bo_get - reference a struct ttm_buffer_object
- *
- * @bo: The buffer object.
- */
-static inline void ttm_bo_get(struct ttm_buffer_object *bo)
-{
- kref_get(&bo->kref);
-}
-
-/**
- * ttm_bo_get_unless_zero - reference a struct ttm_buffer_object unless
- * its refcount has already reached zero.
- * @bo: The buffer object.
- *
- * Used to reference a TTM buffer object in lookups where the object is removed
- * from the lookup structure during the destructor and for RCU lookups.
- *
- * Returns: @bo if the referencing was successful, NULL otherwise.
- */
-static inline __must_check struct ttm_buffer_object *
-ttm_bo_get_unless_zero(struct ttm_buffer_object *bo)
-{
- if (!kref_get_unless_zero(&bo->kref))
- return NULL;
- return bo;
-}
-
-/**
- * ttm_bo_wait - wait for buffer idle.
- *
- * @bo: The buffer object.
- * @interruptible: Use interruptible wait.
- * @no_wait: Return immediately if buffer is busy.
- *
- * This function must be called with the bo::mutex held, and makes
- * sure any previous rendering to the buffer is completed.
- * Note: It might be necessary to block validations before the
- * wait by reserving the buffer.
- * Returns -EBUSY if no_wait is true and the buffer is busy.
- * Returns -ERESTARTSYS if interrupted by a signal.
- */
-int ttm_bo_wait(struct ttm_buffer_object *bo, bool interruptible, bool no_wait);
-
-static inline int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
-{
- return ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
-}
-
-/**
- * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo
- *
- * @placement: Return immediately if buffer is busy.
- * @mem: The struct ttm_resource indicating the region where the bo resides
- * @new_flags: Describes compatible placement found
- *
- * Returns true if the placement is compatible
- */
-bool ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_resource *mem,
- uint32_t *new_flags);
-
-/**
- * ttm_bo_validate
- *
- * @bo: The buffer object.
- * @placement: Proposed placement for the buffer object.
- * @ctx: validation parameters.
- *
- * Changes placement and caching policy of the buffer object
- * according proposed placement.
- * Returns
- * -EINVAL on invalid proposed placement.
- * -ENOMEM on out-of-memory condition.
- * -EBUSY if no_wait is true and buffer busy.
- * -ERESTARTSYS if interrupted by a signal.
- */
-int ttm_bo_validate(struct ttm_buffer_object *bo,
- struct ttm_placement *placement,
- struct ttm_operation_ctx *ctx);
-
-/**
- * ttm_bo_put
- *
- * @bo: The buffer object.
- *
- * Unreference a buffer object.
- */
-void ttm_bo_put(struct ttm_buffer_object *bo);
-
-/**
- * ttm_bo_move_to_lru_tail
- *
- * @bo: The buffer object.
- * @mem: Resource object.
- * @bulk: optional bulk move structure to remember BO positions
- *
- * Move this BO to the tail of all lru lists used to lookup and reserve an
- * object. This function must be called with struct ttm_global::lru_lock
- * held, and is used to make a BO less likely to be considered for eviction.
- */
-void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
- struct ttm_resource *mem,
- struct ttm_lru_bulk_move *bulk);
-
-/**
- * ttm_bo_bulk_move_lru_tail
- *
- * @bulk: bulk move structure
- *
- * Bulk move BOs to the LRU tail, only valid to use when driver makes sure that
- * BO order never changes. Should be called with ttm_global::lru_lock held.
- */
-void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk);
-
-/**
- * ttm_bo_lock_delayed_workqueue
- *
- * Prevent the delayed workqueue from running.
- * Returns
- * True if the workqueue was queued at the time
- */
-int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev);
-
-/**
- * ttm_bo_unlock_delayed_workqueue
- *
- * Allows the delayed workqueue to run.
- */
-void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched);
-
-/**
- * ttm_bo_eviction_valuable
- *
- * @bo: The buffer object to evict
- * @place: the placement we need to make room for
- *
- * Check if it is valuable to evict the BO to make room for the given placement.
- */
-bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
- const struct ttm_place *place);
-
-/**
- * ttm_bo_init_reserved
- *
- * @bdev: Pointer to a ttm_device struct.
- * @bo: Pointer to a ttm_buffer_object to be initialized.
- * @size: Requested size of buffer object.
- * @type: Requested type of buffer object.
- * @flags: Initial placement flags.
- * @page_alignment: Data alignment in pages.
- * @ctx: TTM operation context for memory allocation.
- * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
- * @destroy: Destroy function. Use NULL for kfree().
- *
- * This function initializes a pre-allocated struct ttm_buffer_object.
- * As this object may be part of a larger structure, this function,
- * together with the @destroy function,
- * enables driver-specific objects derived from a ttm_buffer_object.
- *
- * On successful return, the caller owns an object kref to @bo. The kref and
- * list_kref are usually set to 1, but note that in some situations, other
- * tasks may already be holding references to @bo as well.
- * Furthermore, if resv == NULL, the buffer's reservation lock will be held,
- * and it is the caller's responsibility to call ttm_bo_unreserve.
- *
- * If a failure occurs, the function will call the @destroy function, or
- * kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is
- * illegal and will likely cause memory corruption.
- *
- * Returns
- * -ENOMEM: Out of memory.
- * -EINVAL: Invalid placement flags.
- * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
- */
-
-int ttm_bo_init_reserved(struct ttm_device *bdev,
- struct ttm_buffer_object *bo,
- size_t size, enum ttm_bo_type type,
- struct ttm_placement *placement,
- uint32_t page_alignment,
- struct ttm_operation_ctx *ctx,
- struct sg_table *sg, struct dma_resv *resv,
- void (*destroy) (struct ttm_buffer_object *));
-
-/**
- * ttm_bo_init
- *
- * @bdev: Pointer to a ttm_device struct.
- * @bo: Pointer to a ttm_buffer_object to be initialized.
- * @size: Requested size of buffer object.
- * @type: Requested type of buffer object.
- * @flags: Initial placement flags.
- * @page_alignment: Data alignment in pages.
- * @interruptible: If needing to sleep to wait for GPU resources,
- * sleep interruptible.
- * pinned in physical memory. If this behaviour is not desired, this member
- * holds a pointer to a persistent shmem object. Typically, this would
- * point to the shmem object backing a GEM object if TTM is used to back a
- * GEM user interface.
- * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
- * @destroy: Destroy function. Use NULL for kfree().
- *
- * This function initializes a pre-allocated struct ttm_buffer_object.
- * As this object may be part of a larger structure, this function,
- * together with the @destroy function,
- * enables driver-specific objects derived from a ttm_buffer_object.
- *
- * On successful return, the caller owns an object kref to @bo. The kref and
- * list_kref are usually set to 1, but note that in some situations, other
- * tasks may already be holding references to @bo as well.
- *
- * If a failure occurs, the function will call the @destroy function, or
- * kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is
- * illegal and will likely cause memory corruption.
- *
- * Returns
- * -ENOMEM: Out of memory.
- * -EINVAL: Invalid placement flags.
- * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
- */
-int ttm_bo_init(struct ttm_device *bdev, struct ttm_buffer_object *bo,
- size_t size, enum ttm_bo_type type,
- struct ttm_placement *placement,
- uint32_t page_alignment, bool interrubtible,
- struct sg_table *sg, struct dma_resv *resv,
- void (*destroy) (struct ttm_buffer_object *));
-
-/**
- * ttm_kmap_obj_virtual
- *
- * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap.
- * @is_iomem: Pointer to an integer that on return indicates 1 if the
- * virtual map is io memory, 0 if normal memory.
- *
- * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap.
- * If *is_iomem is 1 on return, the virtual address points to an io memory area,
- * that should strictly be accessed by the iowriteXX() and similar functions.
- */
-static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map,
- bool *is_iomem)
-{
- *is_iomem = !!(map->bo_kmap_type & TTM_BO_MAP_IOMEM_MASK);
- return map->virtual;
-}
-
-/**
- * ttm_bo_kmap
- *
- * @bo: The buffer object.
- * @start_page: The first page to map.
- * @num_pages: Number of pages to map.
- * @map: pointer to a struct ttm_bo_kmap_obj representing the map.
- *
- * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
- * data in the buffer object. The ttm_kmap_obj_virtual function can then be
- * used to obtain a virtual address to the data.
- *
- * Returns
- * -ENOMEM: Out of memory.
- * -EINVAL: Invalid range.
- */
-int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
- unsigned long num_pages, struct ttm_bo_kmap_obj *map);
-
-/**
- * ttm_bo_kunmap
- *
- * @map: Object describing the map to unmap.
- *
- * Unmaps a kernel map set up by ttm_bo_kmap.
- */
-void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
-
-/**
- * ttm_bo_vmap
- *
- * @bo: The buffer object.
- * @map: pointer to a struct dma_buf_map representing the map.
- *
- * Sets up a kernel virtual mapping, using ioremap or vmap to the
- * data in the buffer object. The parameter @map returns the virtual
- * address as struct dma_buf_map. Unmap the buffer with ttm_bo_vunmap().
- *
- * Returns
- * -ENOMEM: Out of memory.
- * -EINVAL: Invalid range.
- */
-int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map);
-
-/**
- * ttm_bo_vunmap
- *
- * @bo: The buffer object.
- * @map: Object describing the map to unmap.
- *
- * Unmaps a kernel map set up by ttm_bo_vmap().
- */
-void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map);
-
-/**
- * ttm_bo_mmap_obj - mmap memory backed by a ttm buffer object.
- *
- * @vma: vma as input from the fbdev mmap method.
- * @bo: The bo backing the address space.
- *
- * Maps a buffer object.
- */
-int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
-
-/**
- * ttm_bo_mmap - mmap out of the ttm device address space.
- *
- * @filp: filp as input from the mmap method.
- * @vma: vma as input from the mmap method.
- * @bdev: Pointer to the ttm_device with the address space manager.
- *
- * This function is intended to be called by the device mmap method.
- * if the device address space is to be backed by the bo manager.
- */
-int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
- struct ttm_device *bdev);
-
-/**
- * ttm_bo_io
- *
- * @bdev: Pointer to the struct ttm_device.
- * @filp: Pointer to the struct file attempting to read / write.
- * @wbuf: User-space pointer to address of buffer to write. NULL on read.
- * @rbuf: User-space pointer to address of buffer to read into.
- * Null on write.
- * @count: Number of bytes to read / write.
- * @f_pos: Pointer to current file position.
- * @write: 1 for read, 0 for write.
- *
- * This function implements read / write into ttm buffer objects, and is
- * intended to
- * be called from the fops::read and fops::write method.
- * Returns:
- * See man (2) write, man(2) read. In particular,
- * the function may return -ERESTARTSYS if
- * interrupted by a signal.
- */
-ssize_t ttm_bo_io(struct ttm_device *bdev, struct file *filp,
- const char __user *wbuf, char __user *rbuf,
- size_t count, loff_t *f_pos, bool write);
-
-int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
- gfp_t gfp_flags);
-
-/**
- * ttm_bo_uses_embedded_gem_object - check if the given bo uses the
- * embedded drm_gem_object.
- *
- * Most ttm drivers are using gem too, so the embedded
- * ttm_buffer_object.base will be initialized by the driver (before
- * calling ttm_bo_init). It is also possible to use ttm without gem
- * though (vmwgfx does that).
- *
- * This helper will figure whenever a given ttm bo is a gem object too
- * or not.
- *
- * @bo: The bo to check.
- */
-static inline bool ttm_bo_uses_embedded_gem_object(struct ttm_buffer_object *bo)
-{
- return bo->base.dev != NULL;
-}
-
-/**
- * ttm_bo_pin - Pin the buffer object.
- * @bo: The buffer object to pin
- *
- * Make sure the buffer is not evicted any more during memory pressure.
- */
-static inline void ttm_bo_pin(struct ttm_buffer_object *bo)
-{
- dma_resv_assert_held(bo->base.resv);
- WARN_ON_ONCE(!kref_read(&bo->kref));
- ++bo->pin_count;
-}
-
-/**
- * ttm_bo_unpin - Unpin the buffer object.
- * @bo: The buffer object to unpin
- *
- * Allows the buffer object to be evicted again during memory pressure.
- */
-static inline void ttm_bo_unpin(struct ttm_buffer_object *bo)
-{
- dma_resv_assert_held(bo->base.resv);
- WARN_ON_ONCE(!kref_read(&bo->kref));
- if (bo->pin_count)
- --bo->pin_count;
- else
- WARN_ON_ONCE(true);
-}
-
-int ttm_mem_evict_first(struct ttm_device *bdev,
- struct ttm_resource_manager *man,
- const struct ttm_place *place,
- struct ttm_operation_ctx *ctx,
- struct ww_acquire_ctx *ticket);
-
-/* Default number of pre-faulted pages in the TTM fault handler */
-#define TTM_BO_VM_NUM_PREFAULT 16
-
-vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
- struct vm_fault *vmf);
-
-vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
- pgprot_t prot,
- pgoff_t num_prefault,
- pgoff_t fault_page_size);
-
-vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf);
-
-void ttm_bo_vm_open(struct vm_area_struct *vma);
-
-void ttm_bo_vm_close(struct vm_area_struct *vma);
-
-int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
- void *buf, int len, int write);
-bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all);
-
-#endif
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
deleted file mode 100644
index dbccac957f8f..000000000000
--- a/include/drm/ttm/ttm_bo_driver.h
+++ /dev/null
@@ -1,335 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- */
-#ifndef _TTM_BO_DRIVER_H_
-#define _TTM_BO_DRIVER_H_
-
-#include <drm/drm_mm.h>
-#include <drm/drm_vma_manager.h>
-#include <linux/workqueue.h>
-#include <linux/fs.h>
-#include <linux/spinlock.h>
-#include <linux/dma-resv.h>
-
-#include <drm/ttm/ttm_device.h>
-
-#include "ttm_bo_api.h"
-#include "ttm_placement.h"
-#include "ttm_tt.h"
-#include "ttm_pool.h"
-
-/**
- * struct ttm_lru_bulk_move_pos
- *
- * @first: first BO in the bulk move range
- * @last: last BO in the bulk move range
- *
- * Positions for a lru bulk move.
- */
-struct ttm_lru_bulk_move_pos {
- struct ttm_buffer_object *first;
- struct ttm_buffer_object *last;
-};
-
-/**
- * struct ttm_lru_bulk_move
- *
- * @tt: first/last lru entry for BOs in the TT domain
- * @vram: first/last lru entry for BOs in the VRAM domain
- * @swap: first/last lru entry for BOs on the swap list
- *
- * Helper structure for bulk moves on the LRU list.
- */
-struct ttm_lru_bulk_move {
- struct ttm_lru_bulk_move_pos tt[TTM_MAX_BO_PRIORITY];
- struct ttm_lru_bulk_move_pos vram[TTM_MAX_BO_PRIORITY];
-};
-
-/*
- * ttm_bo.c
- */
-
-/**
- * ttm_bo_mem_space
- *
- * @bo: Pointer to a struct ttm_buffer_object. the data of which
- * we want to allocate space for.
- * @proposed_placement: Proposed new placement for the buffer object.
- * @mem: A struct ttm_resource.
- * @interruptible: Sleep interruptible when sliping.
- * @no_wait_gpu: Return immediately if the GPU is busy.
- *
- * Allocate memory space for the buffer object pointed to by @bo, using
- * the placement flags in @mem, potentially evicting other idle buffer objects.
- * This function may sleep while waiting for space to become available.
- * Returns:
- * -EBUSY: No space available (only if no_wait == 1).
- * -ENOMEM: Could not allocate memory for the buffer object, either due to
- * fragmentation or concurrent allocators.
- * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
- */
-int ttm_bo_mem_space(struct ttm_buffer_object *bo,
- struct ttm_placement *placement,
- struct ttm_resource *mem,
- struct ttm_operation_ctx *ctx);
-
-/**
- * ttm_bo_unmap_virtual
- *
- * @bo: tear down the virtual mappings for this BO
- */
-void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
-
-/**
- * ttm_bo_reserve:
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- * @interruptible: Sleep interruptible if waiting.
- * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
- * @ticket: ticket used to acquire the ww_mutex.
- *
- * Locks a buffer object for validation. (Or prevents other processes from
- * locking it for validation), while taking a number of measures to prevent
- * deadlocks.
- *
- * Returns:
- * -EDEADLK: The reservation may cause a deadlock.
- * Release all buffer reservations, wait for @bo to become unreserved and
- * try again.
- * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
- * a signal. Release all buffer reservations and return to user-space.
- * -EBUSY: The function needed to sleep, but @no_wait was true
- * -EALREADY: Bo already reserved using @ticket. This error code will only
- * be returned if @use_ticket is set to true.
- */
-static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
- bool interruptible, bool no_wait,
- struct ww_acquire_ctx *ticket)
-{
- int ret = 0;
-
- if (no_wait) {
- bool success;
- if (WARN_ON(ticket))
- return -EBUSY;
-
- success = dma_resv_trylock(bo->base.resv);
- return success ? 0 : -EBUSY;
- }
-
- if (interruptible)
- ret = dma_resv_lock_interruptible(bo->base.resv, ticket);
- else
- ret = dma_resv_lock(bo->base.resv, ticket);
- if (ret == -EINTR)
- return -ERESTARTSYS;
- return ret;
-}
-
-/**
- * ttm_bo_reserve_slowpath:
- * @bo: A pointer to a struct ttm_buffer_object.
- * @interruptible: Sleep interruptible if waiting.
- * @sequence: Set (@bo)->sequence to this value after lock
- *
- * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
- * from all our other reservations. Because there are no other reservations
- * held by us, this function cannot deadlock any more.
- */
-static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
- bool interruptible,
- struct ww_acquire_ctx *ticket)
-{
- if (interruptible) {
- int ret = dma_resv_lock_slow_interruptible(bo->base.resv,
- ticket);
- if (ret == -EINTR)
- ret = -ERESTARTSYS;
- return ret;
- }
- dma_resv_lock_slow(bo->base.resv, ticket);
- return 0;
-}
-
-static inline void
-ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo)
-{
- spin_lock(&bo->bdev->lru_lock);
- ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
- spin_unlock(&bo->bdev->lru_lock);
-}
-
-static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo,
- struct ttm_resource *new_mem)
-{
- bo->mem = *new_mem;
- new_mem->mm_node = NULL;
-}
-
-/**
- * ttm_bo_move_null = assign memory for a buffer object.
- * @bo: The bo to assign the memory to
- * @new_mem: The memory to be assigned.
- *
- * Assign the memory from new_mem to the memory of the buffer object bo.
- */
-static inline void ttm_bo_move_null(struct ttm_buffer_object *bo,
- struct ttm_resource *new_mem)
-{
- struct ttm_resource *old_mem = &bo->mem;
-
- WARN_ON(old_mem->mm_node != NULL);
- ttm_bo_assign_mem(bo, new_mem);
-}
-
-/**
- * ttm_bo_unreserve
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- *
- * Unreserve a previous reservation of @bo.
- */
-static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
-{
- ttm_bo_move_to_lru_tail_unlocked(bo);
- dma_resv_unlock(bo->base.resv);
-}
-
-/*
- * ttm_bo_util.c
- */
-int ttm_mem_io_reserve(struct ttm_device *bdev,
- struct ttm_resource *mem);
-void ttm_mem_io_free(struct ttm_device *bdev,
- struct ttm_resource *mem);
-
-/**
- * ttm_bo_move_memcpy
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- * @interruptible: Sleep interruptible if waiting.
- * @no_wait_gpu: Return immediately if the GPU is busy.
- * @new_mem: struct ttm_resource indicating where to move.
- *
- * Fallback move function for a mappable buffer object in mappable memory.
- * The function will, if successful,
- * free any old aperture space, and set (@new_mem)->mm_node to NULL,
- * and update the (@bo)->mem placement flags. If unsuccessful, the old
- * data remains untouched, and it's up to the caller to free the
- * memory space indicated by @new_mem.
- * Returns:
- * !0: Failure.
- */
-
-int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
- struct ttm_operation_ctx *ctx,
- struct ttm_resource *new_mem);
-
-/**
- * ttm_bo_move_accel_cleanup.
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- * @fence: A fence object that signals when moving is complete.
- * @evict: This is an evict move. Don't return until the buffer is idle.
- * @pipeline: evictions are to be pipelined.
- * @new_mem: struct ttm_resource indicating where to move.
- *
- * Accelerated move function to be called when an accelerated move
- * has been scheduled. The function will create a new temporary buffer object
- * representing the old placement, and put the sync object on both buffer
- * objects. After that the newly created buffer object is unref'd to be
- * destroyed when the move is complete. This will help pipeline
- * buffer moves.
- */
-int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
- struct dma_fence *fence, bool evict,
- bool pipeline,
- struct ttm_resource *new_mem);
-
-/**
- * ttm_bo_pipeline_gutting.
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- *
- * Pipelined gutting a BO of its backing store.
- */
-int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo);
-
-/**
- * ttm_io_prot
- *
- * bo: ttm buffer object
- * res: ttm resource object
- * @tmp: Page protection flag for a normal, cached mapping.
- *
- * Utility function that returns the pgprot_t that should be used for
- * setting up a PTE with the caching model indicated by @c_state.
- */
-pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
- pgprot_t tmp);
-
-/**
- * ttm_bo_tt_bind
- *
- * Bind the object tt to a memory resource.
- */
-int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem);
-
-/**
- * ttm_bo_tt_destroy.
- */
-void ttm_bo_tt_destroy(struct ttm_buffer_object *bo);
-
-/**
- * ttm_range_man_init
- *
- * @bdev: ttm device
- * @type: memory manager type
- * @use_tt: if the memory manager uses tt
- * @p_size: size of area to be managed in pages.
- *
- * Initialise a generic range manager for the selected memory type.
- * The range manager is installed for this device in the type slot.
- */
-int ttm_range_man_init(struct ttm_device *bdev,
- unsigned type, bool use_tt,
- unsigned long p_size);
-
-/**
- * ttm_range_man_fini
- *
- * @bdev: ttm device
- * @type: memory manager type
- *
- * Remove the generic range manager from a slot and tear it down.
- */
-int ttm_range_man_fini(struct ttm_device *bdev,
- unsigned type);
-
-#endif
diff --git a/include/drm/ttm/ttm_caching.h b/include/drm/ttm/ttm_caching.h
index a0b4a49fa432..a18f43e93aba 100644
--- a/include/drm/ttm/ttm_caching.h
+++ b/include/drm/ttm/ttm_caching.h
@@ -25,12 +25,33 @@
#ifndef _TTM_CACHING_H_
#define _TTM_CACHING_H_
+#include <linux/pgtable.h>
+
#define TTM_NUM_CACHING_TYPES 3
+/**
+ * enum ttm_caching - CPU caching and BUS snooping behavior.
+ */
enum ttm_caching {
+ /**
+ * @ttm_uncached: Most defensive option for device mappings,
+ * don't even allow write combining.
+ */
ttm_uncached,
+
+ /**
+ * @ttm_write_combined: Don't cache read accesses, but allow at least
+ * writes to be combined.
+ */
ttm_write_combined,
+
+ /**
+ * @ttm_cached: Fully cached like normal system memory, requires that
+ * devices snoop the CPU cache on accesses.
+ */
ttm_cached
};
+pgprot_t ttm_prot_from_caching(enum ttm_caching caching, pgprot_t tmp);
+
#endif
diff --git a/include/drm/ttm/ttm_device.h b/include/drm/ttm/ttm_device.h
index 7c8f87bd52d3..5618aef462f2 100644
--- a/include/drm/ttm/ttm_device.h
+++ b/include/drm/ttm/ttm_device.h
@@ -27,11 +27,10 @@
#include <linux/types.h>
#include <linux/workqueue.h>
+#include <drm/ttm/ttm_allocation.h>
#include <drm/ttm/ttm_resource.h>
#include <drm/ttm/ttm_pool.h>
-#define TTM_NUM_MEM_TYPES 8
-
struct ttm_device;
struct ttm_placement;
struct ttm_buffer_object;
@@ -39,31 +38,23 @@ struct ttm_operation_ctx;
/**
* struct ttm_global - Buffer object driver global data.
- *
- * @dummy_read_page: Pointer to a dummy page used for mapping requests
- * of unpopulated pages.
- * @shrink: A shrink callback object used for buffer object swap.
- * @device_list_mutex: Mutex protecting the device list.
- * This mutex is held while traversing the device list for pm options.
- * @lru_lock: Spinlock protecting the bo subsystem lru lists.
- * @device_list: List of buffer object devices.
- * @swap_lru: Lru list of buffer objects used for swapping.
*/
extern struct ttm_global {
/**
- * Constant after init.
+ * @dummy_read_page: Pointer to a dummy page used for mapping requests
+ * of unpopulated pages. Constant after init.
*/
-
struct page *dummy_read_page;
/**
- * Protected by ttm_global_mutex.
+ * @device_list: List of buffer object devices. Protected by
+ * ttm_global_mutex.
*/
struct list_head device_list;
/**
- * Internal protection.
+ * @bo_count: Number of buffer objects allocated by devices.
*/
atomic_t bo_count;
} ttm_glob;
@@ -73,7 +64,7 @@ struct ttm_device_funcs {
* ttm_tt_create
*
* @bo: The buffer object to create the ttm for.
- * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @page_flags: Page flags as identified by TTM_TT_FLAG_XX flags.
*
* Create a struct ttm_tt to back data with system memory pages.
* No pages are actually allocated.
@@ -151,7 +142,7 @@ struct ttm_device_funcs {
* the graphics address space
* @ctx: context for this move with parameters
* @new_mem: the new memory region receiving the buffer
- @ @hop: placement for driver directed intermediate hop
+ * @hop: placement for driver directed intermediate hop
*
* Move a buffer between two memory regions.
* Returns errno -EMULTIHOP if driver requests a hop
@@ -162,21 +153,6 @@ struct ttm_device_funcs {
struct ttm_place *hop);
/**
- * struct ttm_bo_driver_member verify_access
- *
- * @bo: Pointer to a buffer object.
- * @filp: Pointer to a struct file trying to access the object.
- *
- * Called from the map / write / read methods to verify that the
- * caller is permitted to access the buffer object.
- * This member may be set to NULL, which will refuse this kind of
- * access for all buffer objects.
- * This function should return 0 if access is granted, -EPERM otherwise.
- */
- int (*verify_access)(struct ttm_buffer_object *bo,
- struct file *filp);
-
- /**
* Hook to notify driver about a resource delete.
*/
void (*delete_mem_notify)(struct ttm_buffer_object *bo);
@@ -224,15 +200,6 @@ struct ttm_device_funcs {
void *buf, int len, int write);
/**
- * struct ttm_bo_driver member del_from_lru_notify
- *
- * @bo: the buffer object deleted from lru
- *
- * notify driver that a BO was deleted from LRU.
- */
- void (*del_from_lru_notify)(struct ttm_buffer_object *bo);
-
- /**
* Notify the driver that we're about to release a BO
*
* @bo: BO that is about to be released
@@ -245,73 +212,94 @@ struct ttm_device_funcs {
/**
* struct ttm_device - Buffer object driver device-specific data.
- *
- * @device_list: Our entry in the global device list.
- * @funcs: Function table for the device.
- * @sysman: Resource manager for the system domain.
- * @man_drv: An array of resource_managers.
- * @vma_manager: Address space manager.
- * @pool: page pool for the device.
- * @dev_mapping: A pointer to the struct address_space representing the
- * device address space.
- * @wq: Work queue structure for the delayed delete workqueue.
*/
struct ttm_device {
- /*
+ /**
+ * @device_list: Our entry in the global device list.
* Constant after bo device init
*/
struct list_head device_list;
- struct ttm_device_funcs *funcs;
- /*
+ /**
+ * @alloc_flags: TTM_ALLOCATION_* flags.
+ */
+ unsigned int alloc_flags;
+
+ /**
+ * @funcs: Function table for the device.
+ * Constant after bo device init
+ */
+ const struct ttm_device_funcs *funcs;
+
+ /**
+ * @sysman: Resource manager for the system domain.
* Access via ttm_manager_type.
*/
struct ttm_resource_manager sysman;
+
+ /**
+ * @man_drv: An array of resource_managers, one per resource type.
+ */
struct ttm_resource_manager *man_drv[TTM_NUM_MEM_TYPES];
- /*
- * Protected by internal locks.
+ /**
+ * @vma_manager: Address space manager for finding BOs to mmap.
*/
struct drm_vma_offset_manager *vma_manager;
+
+ /**
+ * @pool: page pool for the device.
+ */
struct ttm_pool pool;
- /*
- * Protection for the per manager LRU and ddestroy lists.
+ /**
+ * @lru_lock: Protection for the per manager LRU and ddestroy lists.
*/
spinlock_t lru_lock;
- struct list_head ddestroy;
- /*
- * Protected by load / firstopen / lastclose /unload sync.
+ /**
+ * @unevictable: Buffer objects which are pinned or swapped and as such
+ * not on an LRU list.
+ */
+ struct list_head unevictable;
+
+ /**
+ * @dev_mapping: A pointer to the struct address_space for invalidating
+ * CPU mappings on buffer move. Protected by load/unload sync.
*/
struct address_space *dev_mapping;
- /*
- * Internal protection.
+ /**
+ * @wq: Work queue structure for the delayed delete workqueue.
*/
- struct delayed_work wq;
+ struct workqueue_struct *wq;
};
int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags);
int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
gfp_t gfp_flags);
+int ttm_device_prepare_hibernation(struct ttm_device *bdev);
static inline struct ttm_resource_manager *
ttm_manager_type(struct ttm_device *bdev, int mem_type)
{
+ BUILD_BUG_ON(__builtin_constant_p(mem_type)
+ && mem_type >= TTM_NUM_MEM_TYPES);
return bdev->man_drv[mem_type];
}
static inline void ttm_set_driver_manager(struct ttm_device *bdev, int type,
struct ttm_resource_manager *manager)
{
+ BUILD_BUG_ON(__builtin_constant_p(type) && type >= TTM_NUM_MEM_TYPES);
bdev->man_drv[type] = manager;
}
-int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
+int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *funcs,
struct device *dev, struct address_space *mapping,
struct drm_vma_offset_manager *vma_manager,
- bool use_dma_alloc, bool use_dma32);
+ unsigned int alloc_flags);
void ttm_device_fini(struct ttm_device *bdev);
+void ttm_device_clear_dma_mappings(struct ttm_device *bdev);
#endif
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index a99d7fdf2964..fac1e3e57ebd 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -33,7 +33,9 @@
#include <linux/list.h>
-#include "ttm_bo_api.h"
+struct ww_acquire_ctx;
+struct dma_fence;
+struct ttm_buffer_object;
/**
* struct ttm_validate_buffer
@@ -50,7 +52,7 @@ struct ttm_validate_buffer {
};
/**
- * function ttm_eu_backoff_reservation
+ * ttm_eu_backoff_reservation
*
* @ticket: ww_acquire_ctx from reserve call
* @list: thread private list of ttm_validate_buffer structs.
@@ -62,14 +64,13 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
struct list_head *list);
/**
- * function ttm_eu_reserve_buffers
+ * ttm_eu_reserve_buffers
*
* @ticket: [out] ww_acquire_ctx filled in by call, or NULL if only
* non-blocking reserves should be tried.
* @list: thread private list of ttm_validate_buffer structs.
* @intr: should the wait be interruptible
* @dups: [out] optional list of duplicates.
- * @del_lru: true if BOs should be removed from the LRU.
*
* Tries to reserve bos pointed to by the list entries for validation.
* If the function returns 0, all buffers are marked as "unfenced",
@@ -100,7 +101,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *dups);
/**
- * function ttm_eu_fence_buffer_objects.
+ * ttm_eu_fence_buffer_objects
*
* @ticket: ww_acquire_ctx from reserve call
* @list: thread private list of ttm_validate_buffer structs.
diff --git a/include/drm/ttm/ttm_kmap_iter.h b/include/drm/ttm/ttm_kmap_iter.h
new file mode 100644
index 000000000000..fe72631a6e93
--- /dev/null
+++ b/include/drm/ttm/ttm_kmap_iter.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+#ifndef __TTM_KMAP_ITER_H__
+#define __TTM_KMAP_ITER_H__
+
+#include <linux/types.h>
+
+struct ttm_kmap_iter;
+struct iosys_map;
+
+/**
+ * struct ttm_kmap_iter_ops - Ops structure for a struct
+ * ttm_kmap_iter.
+ * @maps_tt: Whether the iterator maps TT memory directly, as opposed
+ * mapping a TT through an aperture. Both these modes have
+ * struct ttm_resource_manager::use_tt set, but the latter typically
+ * returns is_iomem == true from ttm_mem_io_reserve.
+ */
+struct ttm_kmap_iter_ops {
+ /**
+ * @map_local: Map a PAGE_SIZE part of the resource using
+ * kmap_local semantics.
+ * @res_iter: Pointer to the struct ttm_kmap_iter representing
+ * the resource.
+ * @dmap: The struct iosys_map holding the virtual address after
+ * the operation.
+ * @i: The location within the resource to map. PAGE_SIZE granularity.
+ */
+ void (*map_local)(struct ttm_kmap_iter *res_iter,
+ struct iosys_map *dmap, pgoff_t i);
+ /**
+ * @unmap_local: Unmap a PAGE_SIZE part of the resource previously
+ * mapped using kmap_local.
+ * @res_iter: Pointer to the struct ttm_kmap_iter representing
+ * the resource.
+ * @dmap: The struct iosys_map holding the virtual address after
+ * the operation.
+ */
+ void (*unmap_local)(struct ttm_kmap_iter *res_iter,
+ struct iosys_map *dmap);
+ bool maps_tt;
+};
+
+/**
+ * struct ttm_kmap_iter - Iterator for kmap_local type operations on a
+ * resource.
+ * @ops: Pointer to the operations struct.
+ *
+ * This struct is intended to be embedded in a resource-specific specialization
+ * implementing operations for the resource.
+ *
+ * Nothing stops us from extending the operations to vmap, vmap_pfn etc,
+ * replacing some or parts of the ttm_bo_util. cpu-map functionality.
+ */
+struct ttm_kmap_iter {
+ const struct ttm_kmap_iter_ops *ops;
+};
+
+#endif /* __TTM_KMAP_ITER_H__ */
diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h
index aa6ba4d0cf78..b510a4812609 100644
--- a/include/drm/ttm/ttm_placement.h
+++ b/include/drm/ttm/ttm_placement.h
@@ -35,6 +35,17 @@
/*
* Memory regions for data placement.
+ *
+ * Buffers placed in TTM_PL_SYSTEM are considered under TTMs control and can
+ * be swapped out whenever TTMs thinks it is a good idea.
+ * In cases where drivers would like to use TTM_PL_SYSTEM as a valid
+ * placement they need to be able to handle the issues that arise due to the
+ * above manually.
+ *
+ * For BO's which reside in system memory but for which the accelerator
+ * requires direct access (i.e. their usage needs to be synchronized
+ * between the CPU and accelerator via fences) a new, driver private
+ * placement that can handle such scenarios is a good idea.
*/
#define TTM_PL_SYSTEM 0
@@ -47,14 +58,24 @@
* top of the memory area, instead of the bottom.
*/
-#define TTM_PL_FLAG_CONTIGUOUS (1 << 19)
-#define TTM_PL_FLAG_TOPDOWN (1 << 22)
+#define TTM_PL_FLAG_CONTIGUOUS (1 << 0)
+#define TTM_PL_FLAG_TOPDOWN (1 << 1)
+
+/* For multihop handling */
+#define TTM_PL_FLAG_TEMPORARY (1 << 2)
+
+/* Placement is never used during eviction */
+#define TTM_PL_FLAG_DESIRED (1 << 3)
+
+/* Placement is only used during eviction */
+#define TTM_PL_FLAG_FALLBACK (1 << 4)
/**
* struct ttm_place
*
* @fpfn: first valid page frame number to put the object
* @lpfn: last valid page frame number to put the object
+ * @mem_type: One of TTM_PL_* where the resource should be allocated from.
* @flags: memory domain and caching flags for the object
*
* Structure indicating a possible place to put an object.
@@ -71,16 +92,12 @@ struct ttm_place {
*
* @num_placement: number of preferred placements
* @placement: preferred placements
- * @num_busy_placement: number of preferred placements when need to evict buffer
- * @busy_placement: preferred placements when need to evict buffer
*
* Structure indicating the placement you request for an object.
*/
struct ttm_placement {
unsigned num_placement;
const struct ttm_place *placement;
- unsigned num_busy_placement;
- const struct ttm_place *busy_placement;
};
#endif
diff --git a/include/drm/ttm/ttm_pool.h b/include/drm/ttm/ttm_pool.h
index 4321728bdd11..233581670e78 100644
--- a/include/drm/ttm/ttm_pool.h
+++ b/include/drm/ttm/ttm_pool.h
@@ -32,12 +32,14 @@
#include <drm/ttm/ttm_caching.h>
struct device;
-struct ttm_tt;
-struct ttm_pool;
+struct seq_file;
+struct ttm_backup_flags;
struct ttm_operation_ctx;
+struct ttm_pool;
+struct ttm_tt;
/**
- * ttm_pool_type - Pool for a certain memory type
+ * struct ttm_pool_type - Pool for a certain memory type
*
* @pool: the pool we belong to, might be NULL for the global ones
* @order: the allocation order our pages have
@@ -58,20 +60,21 @@ struct ttm_pool_type {
};
/**
- * ttm_pool - Pool for all caching and orders
+ * struct ttm_pool - Pool for all caching and orders
*
- * @use_dma_alloc: if coherent DMA allocations should be used
- * @use_dma32: if GFP_DMA32 should be used
+ * @dev: the device we allocate pages for
+ * @nid: which numa node to use
+ * @alloc_flags: TTM_ALLOCATION_POOL_* flags
* @caching: pools for each caching/order
*/
struct ttm_pool {
struct device *dev;
+ int nid;
- bool use_dma_alloc;
- bool use_dma32;
+ unsigned int alloc_flags;
struct {
- struct ttm_pool_type orders[MAX_ORDER];
+ struct ttm_pool_type orders[NR_PAGE_ORDERS];
} caching[TTM_NUM_CACHING_TYPES];
};
@@ -80,11 +83,18 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt);
void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
- bool use_dma_alloc, bool use_dma32);
+ int nid, unsigned int alloc_flags);
void ttm_pool_fini(struct ttm_pool *pool);
int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m);
+void ttm_pool_drop_backed_up(struct ttm_tt *tt);
+
+long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *ttm,
+ const struct ttm_backup_flags *flags);
+int ttm_pool_restore_and_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
+ const struct ttm_operation_ctx *ctx);
+
int ttm_pool_mgr_init(unsigned long num_pages);
void ttm_pool_mgr_fini(void);
diff --git a/include/drm/ttm/ttm_range_manager.h b/include/drm/ttm/ttm_range_manager.h
new file mode 100644
index 000000000000..7963b957e9ef
--- /dev/null
+++ b/include/drm/ttm/ttm_range_manager.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+#ifndef _TTM_RANGE_MANAGER_H_
+#define _TTM_RANGE_MANAGER_H_
+
+#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_device.h>
+#include <drm/drm_mm.h>
+
+/**
+ * struct ttm_range_mgr_node
+ *
+ * @base: base clase we extend
+ * @mm_nodes: MM nodes, usually 1
+ *
+ * Extending the ttm_resource object to manage an address space allocation with
+ * one or more drm_mm_nodes.
+ */
+struct ttm_range_mgr_node {
+ struct ttm_resource base;
+ struct drm_mm_node mm_nodes[];
+};
+
+/**
+ * to_ttm_range_mgr_node
+ *
+ * @res: the resource to upcast
+ *
+ * Upcast the ttm_resource object into a ttm_range_mgr_node object.
+ */
+static inline struct ttm_range_mgr_node *
+to_ttm_range_mgr_node(struct ttm_resource *res)
+{
+ return container_of(res, struct ttm_range_mgr_node, base);
+}
+
+int ttm_range_man_init_nocheck(struct ttm_device *bdev,
+ unsigned type, bool use_tt,
+ unsigned long p_size);
+int ttm_range_man_fini_nocheck(struct ttm_device *bdev,
+ unsigned type);
+static __always_inline int ttm_range_man_init(struct ttm_device *bdev,
+ unsigned int type, bool use_tt,
+ unsigned long p_size)
+{
+ BUILD_BUG_ON(__builtin_constant_p(type) && type >= TTM_NUM_MEM_TYPES);
+ return ttm_range_man_init_nocheck(bdev, type, use_tt, p_size);
+}
+
+static __always_inline int ttm_range_man_fini(struct ttm_device *bdev,
+ unsigned int type)
+{
+ BUILD_BUG_ON(__builtin_constant_p(type) && type >= TTM_NUM_MEM_TYPES);
+ return ttm_range_man_fini_nocheck(bdev, type);
+}
+#endif
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index 6164ccf4f308..33e80f30b8b8 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -26,18 +26,76 @@
#define _TTM_RESOURCE_H_
#include <linux/types.h>
+#include <linux/list.h>
#include <linux/mutex.h>
+#include <linux/iosys-map.h>
#include <linux/dma-fence.h>
-#include <drm/drm_print.h>
+
#include <drm/ttm/ttm_caching.h>
+#include <drm/ttm/ttm_kmap_iter.h>
#define TTM_MAX_BO_PRIORITY 4U
+#define TTM_NUM_MEM_TYPES 9
+struct dentry;
+struct dmem_cgroup_device;
+struct drm_printer;
struct ttm_device;
struct ttm_resource_manager;
struct ttm_resource;
struct ttm_place;
struct ttm_buffer_object;
+struct ttm_placement;
+struct iosys_map;
+struct io_mapping;
+struct sg_table;
+struct scatterlist;
+
+/**
+ * define TTM_NUM_MOVE_FENCES - How many entities can be used for evictions
+ *
+ * Pipelined evictions can be spread on multiple entities. This
+ * is the max number of entities that can be used by the driver
+ * for that purpose.
+ */
+#define TTM_NUM_MOVE_FENCES 8
+
+/**
+ * enum ttm_lru_item_type - enumerate ttm_lru_item subclasses
+ */
+enum ttm_lru_item_type {
+ /** @TTM_LRU_RESOURCE: The resource subclass */
+ TTM_LRU_RESOURCE,
+ /** @TTM_LRU_HITCH: The iterator hitch subclass */
+ TTM_LRU_HITCH
+};
+
+/**
+ * struct ttm_lru_item - The TTM lru list node base class
+ * @link: The list link
+ * @type: The subclass type
+ */
+struct ttm_lru_item {
+ struct list_head link;
+ enum ttm_lru_item_type type;
+};
+
+/**
+ * ttm_lru_item_init() - initialize a struct ttm_lru_item
+ * @item: The item to initialize
+ * @type: The subclass type
+ */
+static inline void ttm_lru_item_init(struct ttm_lru_item *item,
+ enum ttm_lru_item_type type)
+{
+ item->type = type;
+ INIT_LIST_HEAD(&item->link);
+}
+
+static inline bool ttm_lru_item_is_res(const struct ttm_lru_item *item)
+{
+ return item->type == TTM_LRU_RESOURCE;
+}
struct ttm_resource_manager_func {
/**
@@ -45,46 +103,70 @@ struct ttm_resource_manager_func {
*
* @man: Pointer to a memory type manager.
* @bo: Pointer to the buffer object we're allocating space for.
- * @placement: Placement details.
- * @flags: Additional placement flags.
- * @mem: Pointer to a struct ttm_resource to be filled in.
+ * @place: Placement details.
+ * @res: Resulting pointer to the ttm_resource.
*
* This function should allocate space in the memory type managed
- * by @man. Placement details if
- * applicable are given by @placement. If successful,
- * @mem::mm_node should be set to a non-null value, and
- * @mem::start should be set to a value identifying the beginning
+ * by @man. Placement details if applicable are given by @place. If
+ * successful, a filled in ttm_resource object should be returned in
+ * @res. @res::start should be set to a value identifying the beginning
* of the range allocated, and the function should return zero.
- * If the memory region accommodate the buffer object, @mem::mm_node
- * should be set to NULL, and the function should return 0.
+ * If the manager can't fulfill the request -ENOSPC should be returned.
* If a system error occurred, preventing the request to be fulfilled,
* the function should return a negative error code.
*
- * Note that @mem::mm_node will only be dereferenced by
- * struct ttm_resource_manager functions and optionally by the driver,
- * which has knowledge of the underlying type.
- *
- * This function may not be called from within atomic context, so
- * an implementation can and must use either a mutex or a spinlock to
- * protect any data structures managing the space.
+ * This function may not be called from within atomic context and needs
+ * to take care of its own locking to protect any data structures
+ * managing the space.
*/
int (*alloc)(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *mem);
+ struct ttm_resource **res);
/**
* struct ttm_resource_manager_func member free
*
* @man: Pointer to a memory type manager.
- * @mem: Pointer to a struct ttm_resource to be filled in.
+ * @res: Pointer to a struct ttm_resource to be freed.
*
- * This function frees memory type resources previously allocated
- * and that are identified by @mem::mm_node and @mem::start. May not
- * be called from within atomic context.
+ * This function frees memory type resources previously allocated.
+ * May not be called from within atomic context.
*/
void (*free)(struct ttm_resource_manager *man,
- struct ttm_resource *mem);
+ struct ttm_resource *res);
+
+ /**
+ * struct ttm_resource_manager_func member intersects
+ *
+ * @man: Pointer to a memory type manager.
+ * @res: Pointer to a struct ttm_resource to be checked.
+ * @place: Placement to check against.
+ * @size: Size of the check.
+ *
+ * Test if @res intersects with @place + @size. Used to judge if
+ * evictions are valueable or not.
+ */
+ bool (*intersects)(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size);
+
+ /**
+ * struct ttm_resource_manager_func member compatible
+ *
+ * @man: Pointer to a memory type manager.
+ * @res: Pointer to a struct ttm_resource to be checked.
+ * @place: Placement to check against.
+ * @size: Size of the check.
+ *
+ * Test if @res compatible with @place + @size. Used to check of
+ * the need to move the backing store or not.
+ */
+ bool (*compatible)(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size);
/**
* struct ttm_resource_manager_func member debug
@@ -104,16 +186,13 @@ struct ttm_resource_manager_func {
* struct ttm_resource_manager
*
* @use_type: The memory type is enabled.
- * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
- * managed by this memory type.
- * @gpu_offset: If used, the GPU offset of the first managed page of
- * fixed memory or the first managed location in an aperture.
+ * @use_tt: If a TT object should be used for the backing store.
* @size: Size of the managed region.
+ * @bdev: ttm device this manager belongs to
* @func: structure pointer implementing the range manager. See above
- * @move_lock: lock for move fence
- * static information. bdev::driver::io_mem_free is never used.
+ * @eviction_lock: lock for eviction fences
+ * @eviction_fences: The fences of the last pipelined move operation.
* @lru: The lru list for this memory type.
- * @move: The fence of the last pipelined move operation.
*
* This structure is used to identify and manage memory types for a device.
*/
@@ -123,20 +202,31 @@ struct ttm_resource_manager {
*/
bool use_type;
bool use_tt;
+ struct ttm_device *bdev;
uint64_t size;
const struct ttm_resource_manager_func *func;
- spinlock_t move_lock;
- /*
- * Protected by the global->lru_lock.
+ /* This is very similar to a dma_resv object, but locking rules make
+ * it difficult to use one in this context.
*/
+ spinlock_t eviction_lock;
+ struct dma_fence *eviction_fences[TTM_NUM_MOVE_FENCES];
+ /*
+ * Protected by the bdev->lru_lock.
+ */
struct list_head lru[TTM_MAX_BO_PRIORITY];
- /*
- * Protected by @move_lock.
+ /**
+ * @usage: How much of the resources are used, protected by the
+ * bdev->lru_lock.
+ */
+ uint64_t usage;
+
+ /**
+ * @cg: &dmem_cgroup_region used for memory accounting, if not NULL.
*/
- struct dma_fence *move;
+ struct dmem_cgroup_region *cg;
};
/**
@@ -145,6 +235,7 @@ struct ttm_resource_manager {
* @addr: mapped virtual address
* @offset: physical addr
* @is_iomem: is this io memory ?
+ * @caching: See enum ttm_caching
*
* Structure indicating the bus placement of an object.
*/
@@ -158,24 +249,143 @@ struct ttm_bus_placement {
/**
* struct ttm_resource
*
- * @mm_node: Memory manager node.
- * @size: Requested size of memory region.
- * @num_pages: Actual size of memory region in pages.
- * @page_alignment: Page alignment.
+ * @start: Start of the allocation.
+ * @size: Actual size of resource in bytes.
+ * @mem_type: Resource type of the allocation.
* @placement: Placement flags.
* @bus: Placement on io bus accessible to the CPU
+ * @bo: weak reference to the BO, protected by ttm_device::lru_lock
+ * @css: cgroup state this resource is charged to
*
* Structure indicating the placement and space resources used by a
* buffer object.
*/
struct ttm_resource {
- void *mm_node;
unsigned long start;
- unsigned long num_pages;
- uint32_t page_alignment;
+ size_t size;
uint32_t mem_type;
uint32_t placement;
struct ttm_bus_placement bus;
+ struct ttm_buffer_object *bo;
+
+ struct dmem_cgroup_pool_state *css;
+
+ /**
+ * @lru: Least recently used list, see &ttm_resource_manager.lru
+ */
+ struct ttm_lru_item lru;
+};
+
+/**
+ * ttm_lru_item_to_res() - Downcast a struct ttm_lru_item to a struct ttm_resource
+ * @item: The struct ttm_lru_item to downcast
+ *
+ * Return: Pointer to the embedding struct ttm_resource
+ */
+static inline struct ttm_resource *
+ttm_lru_item_to_res(struct ttm_lru_item *item)
+{
+ return container_of(item, struct ttm_resource, lru);
+}
+
+/**
+ * struct ttm_lru_bulk_move_pos
+ *
+ * @first: first res in the bulk move range
+ * @last: last res in the bulk move range
+ *
+ * Range of resources for a lru bulk move.
+ */
+struct ttm_lru_bulk_move_pos {
+ struct ttm_resource *first;
+ struct ttm_resource *last;
+};
+
+/**
+ * struct ttm_lru_bulk_move
+ * @pos: first/last lru entry for resources in the each domain/priority
+ * @cursor_list: The list of cursors currently traversing any of
+ * the sublists of @pos. Protected by the ttm device's lru_lock.
+ *
+ * Container for the current bulk move state. Should be used with
+ * ttm_lru_bulk_move_init() and ttm_bo_set_bulk_move().
+ * All BOs in a bulk_move structure need to share the same reservation object to
+ * ensure that the bulk as a whole is locked for eviction even if only one BO of
+ * the bulk is evicted.
+ */
+struct ttm_lru_bulk_move {
+ struct ttm_lru_bulk_move_pos pos[TTM_NUM_MEM_TYPES][TTM_MAX_BO_PRIORITY];
+ struct list_head cursor_list;
+};
+
+/**
+ * struct ttm_resource_cursor
+ * @man: The resource manager currently being iterated over
+ * @hitch: A hitch list node inserted before the next resource
+ * to iterate over.
+ * @bulk_link: A list link for the list of cursors traversing the
+ * bulk sublist of @bulk. Protected by the ttm device's lru_lock.
+ * @bulk: Pointer to struct ttm_lru_bulk_move whose subrange @hitch is
+ * inserted to. NULL if none. Never dereference this pointer since
+ * the struct ttm_lru_bulk_move object pointed to might have been
+ * freed. The pointer is only for comparison.
+ * @mem_type: The memory type of the LRU list being traversed.
+ * This field is valid iff @bulk != NULL.
+ * @priority: the current priority
+ *
+ * Cursor to iterate over the resources in a manager.
+ */
+struct ttm_resource_cursor {
+ struct ttm_resource_manager *man;
+ struct ttm_lru_item hitch;
+ struct list_head bulk_link;
+ struct ttm_lru_bulk_move *bulk;
+ unsigned int mem_type;
+ unsigned int priority;
+};
+
+void ttm_resource_cursor_init(struct ttm_resource_cursor *cursor,
+ struct ttm_resource_manager *man);
+
+void ttm_resource_cursor_fini(struct ttm_resource_cursor *cursor);
+
+/**
+ * struct ttm_kmap_iter_iomap - Specialization for a struct io_mapping +
+ * struct sg_table backed struct ttm_resource.
+ * @base: Embedded struct ttm_kmap_iter providing the usage interface.
+ * @iomap: struct io_mapping representing the underlying linear io_memory.
+ * @st: sg_table into @iomap, representing the memory of the struct ttm_resource.
+ * @start: Offset that needs to be subtracted from @st to make
+ * sg_dma_address(st->sgl) - @start == 0 for @iomap start.
+ * @cache: Scatterlist traversal cache for fast lookups.
+ * @cache.sg: Pointer to the currently cached scatterlist segment.
+ * @cache.i: First index of @sg. PAGE_SIZE granularity.
+ * @cache.end: Last index + 1 of @sg. PAGE_SIZE granularity.
+ * @cache.offs: First offset into @iomap of @sg. PAGE_SIZE granularity.
+ */
+struct ttm_kmap_iter_iomap {
+ struct ttm_kmap_iter base;
+ struct io_mapping *iomap;
+ struct sg_table *st;
+ resource_size_t start;
+ struct {
+ struct scatterlist *sg;
+ pgoff_t i;
+ pgoff_t end;
+ pgoff_t offs;
+ } cache;
+};
+
+/**
+ * struct ttm_kmap_iter_linear_io - Iterator specialization for linear io
+ * @base: The base iterator
+ * @dmap: Points to the starting address of the region
+ * @needs_unmap: Whether we need to unmap on fini
+ */
+struct ttm_kmap_iter_linear_io {
+ struct ttm_kmap_iter base;
+ struct iosys_map dmap;
+ bool needs_unmap;
};
/**
@@ -221,22 +431,94 @@ static inline bool ttm_resource_manager_used(struct ttm_resource_manager *man)
static inline void
ttm_resource_manager_cleanup(struct ttm_resource_manager *man)
{
- dma_fence_put(man->move);
- man->move = NULL;
+ int i;
+
+ for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) {
+ dma_fence_put(man->eviction_fences[i]);
+ man->eviction_fences[i] = NULL;
+ }
}
+void ttm_lru_bulk_move_init(struct ttm_lru_bulk_move *bulk);
+void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk);
+void ttm_lru_bulk_move_fini(struct ttm_device *bdev,
+ struct ttm_lru_bulk_move *bulk);
+
+void ttm_resource_add_bulk_move(struct ttm_resource *res,
+ struct ttm_buffer_object *bo);
+void ttm_resource_del_bulk_move(struct ttm_resource *res,
+ struct ttm_buffer_object *bo);
+void ttm_resource_move_to_lru_tail(struct ttm_resource *res);
+
+void ttm_resource_init(struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_resource *res);
+void ttm_resource_fini(struct ttm_resource_manager *man,
+ struct ttm_resource *res);
+
int ttm_resource_alloc(struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *res);
-void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource *res);
+ struct ttm_resource **res,
+ struct dmem_cgroup_pool_state **ret_limit_pool);
+void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res);
+bool ttm_resource_intersects(struct ttm_device *bdev,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size);
+bool ttm_resource_compatible(struct ttm_resource *res,
+ struct ttm_placement *placement,
+ bool evicting);
+void ttm_resource_set_bo(struct ttm_resource *res,
+ struct ttm_buffer_object *bo);
void ttm_resource_manager_init(struct ttm_resource_manager *man,
- unsigned long p_size);
+ struct ttm_device *bdev,
+ uint64_t size);
int ttm_resource_manager_evict_all(struct ttm_device *bdev,
struct ttm_resource_manager *man);
+uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man);
void ttm_resource_manager_debug(struct ttm_resource_manager *man,
struct drm_printer *p);
+struct ttm_resource *
+ttm_resource_manager_first(struct ttm_resource_cursor *cursor);
+struct ttm_resource *
+ttm_resource_manager_next(struct ttm_resource_cursor *cursor);
+
+struct ttm_resource *
+ttm_lru_first_res_or_null(struct list_head *head);
+
+/**
+ * ttm_resource_manager_for_each_res - iterate over all resources
+ * @cursor: struct ttm_resource_cursor for the current position
+ * @res: the current resource
+ *
+ * Iterate over all the evictable resources in a resource manager.
+ */
+#define ttm_resource_manager_for_each_res(cursor, res) \
+ for (res = ttm_resource_manager_first(cursor); res; \
+ res = ttm_resource_manager_next(cursor))
+
+struct ttm_kmap_iter *
+ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
+ struct io_mapping *iomap,
+ struct sg_table *st,
+ resource_size_t start);
+
+struct ttm_kmap_iter_linear_io;
+
+struct ttm_kmap_iter *
+ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io,
+ struct ttm_device *bdev,
+ struct ttm_resource *mem);
+
+void ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io,
+ struct ttm_device *bdev,
+ struct ttm_resource *mem);
+
+void ttm_resource_manager_create_debugfs(struct ttm_resource_manager *man,
+ struct dentry * parent,
+ const char *name);
#endif
diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
index 134d09ef7766..406437ad674b 100644
--- a/include/drm/ttm/ttm_tt.h
+++ b/include/drm/ttm/ttm_tt.h
@@ -27,51 +27,156 @@
#ifndef _TTM_TT_H_
#define _TTM_TT_H_
+#include <linux/pagemap.h>
#include <linux/types.h>
#include <drm/ttm/ttm_caching.h>
+#include <drm/ttm/ttm_kmap_iter.h>
-struct ttm_bo_device;
+struct ttm_backup;
+struct ttm_device;
struct ttm_tt;
struct ttm_resource;
struct ttm_buffer_object;
struct ttm_operation_ctx;
-
-#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
-#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
-#define TTM_PAGE_FLAG_SG (1 << 8)
-#define TTM_PAGE_FLAG_NO_RETRY (1 << 9)
-
-#define TTM_PAGE_FLAG_PRIV_POPULATED (1 << 31)
+struct ttm_pool_tt_restore;
/**
- * struct ttm_tt
- *
- * @pages: Array of pages backing the data.
- * @page_flags: see TTM_PAGE_FLAG_*
- * @num_pages: Number of pages in the page array.
- * @sg: for SG objects via dma-buf
- * @dma_address: The DMA (bus) addresses of the pages
- * @swap_storage: Pointer to shmem struct file for swap storage.
- * @pages_list: used by some page allocation backend
- * @caching: The current caching state of the pages.
- *
- * This is a structure holding the pages, caching- and aperture binding
- * status for a buffer object that isn't backed by fixed (VRAM / AGP)
+ * struct ttm_tt - This is a structure holding the pages, caching- and aperture
+ * binding status for a buffer object that isn't backed by fixed (VRAM / AGP)
* memory.
*/
struct ttm_tt {
+ /** @pages: Array of pages backing the data. */
struct page **pages;
+ /**
+ * @page_flags: The page flags.
+ *
+ * Supported values:
+ *
+ * TTM_TT_FLAG_SWAPPED: Set by TTM when the pages have been unpopulated
+ * and swapped out by TTM. Calling ttm_tt_populate() will then swap the
+ * pages back in, and unset the flag. Drivers should in general never
+ * need to touch this.
+ *
+ * TTM_TT_FLAG_ZERO_ALLOC: Set if the pages will be zeroed on
+ * allocation.
+ *
+ * TTM_TT_FLAG_EXTERNAL: Set if the underlying pages were allocated
+ * externally, like with dma-buf or userptr. This effectively disables
+ * TTM swapping out such pages. Also important is to prevent TTM from
+ * ever directly mapping these pages.
+ *
+ * Note that enum ttm_bo_type.ttm_bo_type_sg objects will always enable
+ * this flag.
+ *
+ * TTM_TT_FLAG_EXTERNAL_MAPPABLE: Same behaviour as
+ * TTM_TT_FLAG_EXTERNAL, but with the reduced restriction that it is
+ * still valid to use TTM to map the pages directly. This is useful when
+ * implementing a ttm_tt backend which still allocates driver owned
+ * pages underneath(say with shmem).
+ *
+ * Note that since this also implies TTM_TT_FLAG_EXTERNAL, the usage
+ * here should always be:
+ *
+ * page_flags = TTM_TT_FLAG_EXTERNAL |
+ * TTM_TT_FLAG_EXTERNAL_MAPPABLE;
+ *
+ * TTM_TT_FLAG_DECRYPTED: The mapped ttm pages should be marked as
+ * not encrypted. The framework will try to match what the dma layer
+ * is doing, but note that it is a little fragile because ttm page
+ * fault handling abuses the DMA api a bit and dma_map_attrs can't be
+ * used to assure pgprot always matches.
+ *
+ * TTM_TT_FLAG_BACKED_UP: TTM internal only. This is set if the
+ * struct ttm_tt has been (possibly partially) backed up.
+ *
+ * TTM_TT_FLAG_PRIV_POPULATED: TTM internal only. DO NOT USE. This is
+ * set by TTM after ttm_tt_populate() has successfully returned, and is
+ * then unset when TTM calls ttm_tt_unpopulate().
+ *
+ */
+#define TTM_TT_FLAG_SWAPPED BIT(0)
+#define TTM_TT_FLAG_ZERO_ALLOC BIT(1)
+#define TTM_TT_FLAG_EXTERNAL BIT(2)
+#define TTM_TT_FLAG_EXTERNAL_MAPPABLE BIT(3)
+#define TTM_TT_FLAG_DECRYPTED BIT(4)
+#define TTM_TT_FLAG_BACKED_UP BIT(5)
+
+#define TTM_TT_FLAG_PRIV_POPULATED BIT(6)
uint32_t page_flags;
+ /** @num_pages: Number of pages in the page array. */
uint32_t num_pages;
+ /** @sg: for SG objects via dma-buf. */
struct sg_table *sg;
+ /** @dma_address: The DMA (bus) addresses of the pages. */
dma_addr_t *dma_address;
+ /** @swap_storage: Pointer to shmem struct file for swap storage. */
struct file *swap_storage;
+ /**
+ * @backup: Pointer to backup struct for backed up tts.
+ * Could be unified with @swap_storage. Meanwhile, the driver's
+ * ttm_tt_create() callback is responsible for assigning
+ * this field.
+ */
+ struct file *backup;
+ /**
+ * @caching: The current caching state of the pages, see enum
+ * ttm_caching.
+ */
enum ttm_caching caching;
+ /** @restore: Partial restoration from backup state. TTM private */
+ struct ttm_pool_tt_restore *restore;
+};
+
+/**
+ * struct ttm_kmap_iter_tt - Specialization of a mappig iterator for a tt.
+ * @base: Embedded struct ttm_kmap_iter providing the usage interface
+ * @tt: Cached struct ttm_tt.
+ * @prot: Cached page protection for mapping.
+ */
+struct ttm_kmap_iter_tt {
+ struct ttm_kmap_iter base;
+ struct ttm_tt *tt;
+ pgprot_t prot;
};
static inline bool ttm_tt_is_populated(struct ttm_tt *tt)
{
- return tt->page_flags & TTM_PAGE_FLAG_PRIV_POPULATED;
+ return tt->page_flags & TTM_TT_FLAG_PRIV_POPULATED;
+}
+
+/**
+ * ttm_tt_is_swapped() - Whether the ttm_tt is swapped out or backed up
+ * @tt: The struct ttm_tt.
+ *
+ * Return: true if swapped or backed up, false otherwise.
+ */
+static inline bool ttm_tt_is_swapped(const struct ttm_tt *tt)
+{
+ return tt->page_flags & (TTM_TT_FLAG_SWAPPED | TTM_TT_FLAG_BACKED_UP);
+}
+
+/**
+ * ttm_tt_is_backed_up() - Whether the ttm_tt backed up
+ * @tt: The struct ttm_tt.
+ *
+ * Return: true if swapped or backed up, false otherwise.
+ */
+static inline bool ttm_tt_is_backed_up(const struct ttm_tt *tt)
+{
+ return tt->page_flags & TTM_TT_FLAG_BACKED_UP;
+}
+
+/**
+ * ttm_tt_clear_backed_up() - Clear the ttm_tt backed-up status
+ * @tt: The struct ttm_tt.
+ *
+ * Drivers can use this functionto clear the backed-up status,
+ * for example before destroying or re-validating a purged tt.
+ */
+static inline void ttm_tt_clear_backed_up(struct ttm_tt *tt)
+{
+ tt->page_flags &= ~TTM_TT_FLAG_BACKED_UP;
}
/**
@@ -90,8 +195,9 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc);
*
* @ttm: The struct ttm_tt.
* @bo: The buffer object we create the ttm for.
- * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @page_flags: Page flags as identified by TTM_TT_FLAG_XX flags.
* @caching: the desired caching state of the pages
+ * @extra_pages: Extra pages needed for the driver.
*
* Create a struct ttm_tt to back data with system memory pages.
* No pages are actually allocated.
@@ -99,7 +205,8 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc);
* NULL: Out of memory.
*/
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
- uint32_t page_flags, enum ttm_caching caching);
+ uint32_t page_flags, enum ttm_caching caching,
+ unsigned long extra_pages);
int ttm_sg_tt_init(struct ttm_tt *ttm_dma, struct ttm_buffer_object *bo,
uint32_t page_flags, enum ttm_caching caching);
@@ -113,8 +220,9 @@ int ttm_sg_tt_init(struct ttm_tt *ttm_dma, struct ttm_buffer_object *bo,
void ttm_tt_fini(struct ttm_tt *ttm);
/**
- * ttm_ttm_destroy:
+ * ttm_tt_destroy:
*
+ * @bdev: the ttm_device this object belongs to
* @ttm: The struct ttm_tt.
*
* Unbind, unpopulate and destroy common struct ttm_tt.
@@ -122,13 +230,6 @@ void ttm_tt_fini(struct ttm_tt *ttm);
void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm);
/**
- * ttm_tt_destroy_common:
- *
- * Called from driver to destroy common path.
- */
-void ttm_tt_destroy_common(struct ttm_device *bdev, struct ttm_tt *ttm);
-
-/**
* ttm_tt_swapin:
*
* @ttm: The struct ttm_tt.
@@ -142,23 +243,63 @@ int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
/**
* ttm_tt_populate - allocate pages for a ttm
*
+ * @bdev: the ttm_device this object belongs to
* @ttm: Pointer to the ttm_tt structure
+ * @ctx: operation context for populating the tt object.
*
* Calls the driver method to allocate pages for a ttm
*/
-int ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
+int ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx);
/**
* ttm_tt_unpopulate - free pages from a ttm
*
+ * @bdev: the ttm_device this object belongs to
* @ttm: Pointer to the ttm_tt structure
*
* Calls the driver method to free all pages from a ttm
*/
void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm);
+/**
+ * ttm_tt_mark_for_clear - Mark pages for clearing on populate.
+ *
+ * @ttm: Pointer to the ttm_tt structure
+ *
+ * Marks pages for clearing so that the next time the page vector is
+ * populated, the pages will be cleared.
+ */
+static inline void ttm_tt_mark_for_clear(struct ttm_tt *ttm)
+{
+ ttm->page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
+}
+
void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages);
+struct ttm_kmap_iter *ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
+ struct ttm_tt *tt);
+unsigned long ttm_tt_pages_limit(void);
+
+/**
+ * struct ttm_backup_flags - Flags to govern backup behaviour.
+ * @purge: Free pages without backing up. Bypass pools.
+ * @writeback: Attempt to copy contents directly to swap space, even
+ * if that means blocking on writes to external memory.
+ */
+struct ttm_backup_flags {
+ u32 purge : 1;
+ u32 writeback : 1;
+};
+
+long ttm_tt_backup(struct ttm_device *bdev, struct ttm_tt *tt,
+ const struct ttm_backup_flags flags);
+
+int ttm_tt_restore(struct ttm_device *bdev, struct ttm_tt *tt,
+ const struct ttm_operation_ctx *ctx);
+
+int ttm_tt_setup_backup(struct ttm_tt *tt);
+
#if IS_ENABLED(CONFIG_AGP)
#include <linux/agp_backend.h>
@@ -167,7 +308,7 @@ void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages);
*
* @bo: Buffer object we allocate the ttm for.
* @bridge: The agp bridge this device is sitting on.
- * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @page_flags: Page flags as identified by TTM_TT_FLAG_XX flags.
*
*
* Create a TTM backend that uses the indicated AGP bridge as an aperture
diff --git a/include/dt-bindings/arm/mhuv3-dt.h b/include/dt-bindings/arm/mhuv3-dt.h
new file mode 100644
index 000000000000..4575406919dd
--- /dev/null
+++ b/include/dt-bindings/arm/mhuv3-dt.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * This header provides constants for the defined MHUv3 types.
+ */
+
+#ifndef _DT_BINDINGS_ARM_MHUV3_DT_H
+#define _DT_BINDINGS_ARM_MHUV3_DT_H
+
+#define DBE_EXT 0
+#define FCE_EXT 1
+#define FE_EXT 2
+
+#endif /* _DT_BINDINGS_ARM_MHUV3_DT_H */
diff --git a/include/dt-bindings/arm/qcom,ids.h b/include/dt-bindings/arm/qcom,ids.h
new file mode 100644
index 000000000000..8776844e0eeb
--- /dev/null
+++ b/include/dt-bindings/arm/qcom,ids.h
@@ -0,0 +1,310 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Linaro Ltd
+ * Author: Krzysztof Kozlowski <krzk@kernel.org> based on previous work of Kumar Gala.
+ */
+#ifndef _DT_BINDINGS_ARM_QCOM_IDS_H
+#define _DT_BINDINGS_ARM_QCOM_IDS_H
+
+/*
+ * The MSM chipset and hardware revision used by Qualcomm bootloaders, DTS for
+ * older chipsets (qcom,msm-id) and in socinfo driver:
+ */
+#define QCOM_ID_MSM8260 70
+#define QCOM_ID_MSM8660 71
+#define QCOM_ID_APQ8060 86
+#define QCOM_ID_MSM8960 87
+#define QCOM_ID_APQ8064 109
+#define QCOM_ID_MSM8930 116
+#define QCOM_ID_MSM8630 117
+#define QCOM_ID_MSM8230 118
+#define QCOM_ID_APQ8030 119
+#define QCOM_ID_MSM8627 120
+#define QCOM_ID_MSM8227 121
+#define QCOM_ID_MSM8660A 122
+#define QCOM_ID_MSM8260A 123
+#define QCOM_ID_APQ8060A 124
+#define QCOM_ID_MSM8974 126
+#define QCOM_ID_MSM8225 127
+#define QCOM_ID_MSM8625 129
+#define QCOM_ID_MPQ8064 130
+#define QCOM_ID_MSM8960AB 138
+#define QCOM_ID_APQ8060AB 139
+#define QCOM_ID_MSM8260AB 140
+#define QCOM_ID_MSM8660AB 141
+#define QCOM_ID_MSM8930AA 142
+#define QCOM_ID_MSM8630AA 143
+#define QCOM_ID_MSM8230AA 144
+#define QCOM_ID_MSM8626 145
+#define QCOM_ID_MSM8610 147
+#define QCOM_ID_APQ8064AB 153
+#define QCOM_ID_MSM8930AB 154
+#define QCOM_ID_MSM8630AB 155
+#define QCOM_ID_MSM8230AB 156
+#define QCOM_ID_APQ8030AB 157
+#define QCOM_ID_MSM8226 158
+#define QCOM_ID_MSM8526 159
+#define QCOM_ID_APQ8030AA 160
+#define QCOM_ID_MSM8110 161
+#define QCOM_ID_MSM8210 162
+#define QCOM_ID_MSM8810 163
+#define QCOM_ID_MSM8212 164
+#define QCOM_ID_MSM8612 165
+#define QCOM_ID_MSM8112 166
+#define QCOM_ID_MSM8125 167
+#define QCOM_ID_MSM8225Q 168
+#define QCOM_ID_MSM8625Q 169
+#define QCOM_ID_MSM8125Q 170
+#define QCOM_ID_APQ8064AA 172
+#define QCOM_ID_APQ8084 178
+#define QCOM_ID_MSM8130 179
+#define QCOM_ID_MSM8130AA 180
+#define QCOM_ID_MSM8130AB 181
+#define QCOM_ID_MSM8627AA 182
+#define QCOM_ID_MSM8227AA 183
+#define QCOM_ID_APQ8074 184
+#define QCOM_ID_MSM8274 185
+#define QCOM_ID_MSM8674 186
+#define QCOM_ID_MDM9635 187
+#define QCOM_ID_MSM8974PRO_AC 194
+#define QCOM_ID_MSM8126 198
+#define QCOM_ID_APQ8026 199
+#define QCOM_ID_MSM8926 200
+#define QCOM_ID_IPQ8062 201
+#define QCOM_ID_IPQ8064 202
+#define QCOM_ID_IPQ8066 203
+#define QCOM_ID_IPQ8068 204
+#define QCOM_ID_MSM8326 205
+#define QCOM_ID_MSM8916 206
+#define QCOM_ID_MSM8994 207
+#define QCOM_ID_APQ8074PRO_AA 208
+#define QCOM_ID_APQ8074PRO_AB 209
+#define QCOM_ID_APQ8074PRO_AC 210
+#define QCOM_ID_MSM8274PRO_AA 211
+#define QCOM_ID_MSM8274PRO_AB 212
+#define QCOM_ID_MSM8274PRO_AC 213
+#define QCOM_ID_MSM8674PRO_AA 214
+#define QCOM_ID_MSM8674PRO_AB 215
+#define QCOM_ID_MSM8674PRO_AC 216
+#define QCOM_ID_MSM8974PRO_AA 217
+#define QCOM_ID_MSM8974PRO_AB 218
+#define QCOM_ID_APQ8028 219
+#define QCOM_ID_MSM8128 220
+#define QCOM_ID_MSM8228 221
+#define QCOM_ID_MSM8528 222
+#define QCOM_ID_MSM8628 223
+#define QCOM_ID_MSM8928 224
+#define QCOM_ID_MSM8510 225
+#define QCOM_ID_MSM8512 226
+#define QCOM_ID_MSM8936 233
+#define QCOM_ID_MDM9640 234
+#define QCOM_ID_MSM8939 239
+#define QCOM_ID_APQ8036 240
+#define QCOM_ID_APQ8039 241
+#define QCOM_ID_MSM8236 242
+#define QCOM_ID_MSM8636 243
+#define QCOM_ID_MSM8909 245
+#define QCOM_ID_MSM8996 246
+#define QCOM_ID_APQ8016 247
+#define QCOM_ID_MSM8216 248
+#define QCOM_ID_MSM8116 249
+#define QCOM_ID_MSM8616 250
+#define QCOM_ID_MSM8992 251
+#define QCOM_ID_APQ8092 252
+#define QCOM_ID_APQ8094 253
+#define QCOM_ID_MSM8209 258
+#define QCOM_ID_MSM8208 259
+#define QCOM_ID_MDM9209 260
+#define QCOM_ID_MDM9309 261
+#define QCOM_ID_MDM9609 262
+#define QCOM_ID_MSM8239 263
+#define QCOM_ID_MSM8952 264
+#define QCOM_ID_APQ8009 265
+#define QCOM_ID_MSM8956 266
+#define QCOM_ID_MSM8929 268
+#define QCOM_ID_MSM8629 269
+#define QCOM_ID_MSM8229 270
+#define QCOM_ID_APQ8029 271
+#define QCOM_ID_APQ8056 274
+#define QCOM_ID_MSM8609 275
+#define QCOM_ID_APQ8076 277
+#define QCOM_ID_MSM8976 278
+#define QCOM_ID_MDM9650 279
+#define QCOM_ID_IPQ8065 280
+#define QCOM_ID_IPQ8069 281
+#define QCOM_ID_MDM9655 283
+#define QCOM_ID_MDM9250 284
+#define QCOM_ID_MDM9255 285
+#define QCOM_ID_MDM9350 286
+#define QCOM_ID_APQ8052 289
+#define QCOM_ID_MDM9607 290
+#define QCOM_ID_APQ8096 291
+#define QCOM_ID_MSM8998 292
+#define QCOM_ID_MSM8953 293
+#define QCOM_ID_MSM8937 294
+#define QCOM_ID_APQ8037 295
+#define QCOM_ID_MDM8207 296
+#define QCOM_ID_MDM9207 297
+#define QCOM_ID_MDM9307 298
+#define QCOM_ID_MDM9628 299
+#define QCOM_ID_MSM8909W 300
+#define QCOM_ID_APQ8009W 301
+#define QCOM_ID_MSM8996L 302
+#define QCOM_ID_MSM8917 303
+#define QCOM_ID_APQ8053 304
+#define QCOM_ID_MSM8996SG 305
+#define QCOM_ID_APQ8017 307
+#define QCOM_ID_MSM8217 308
+#define QCOM_ID_MSM8617 309
+#define QCOM_ID_MSM8996AU 310
+#define QCOM_ID_APQ8096AU 311
+#define QCOM_ID_APQ8096SG 312
+#define QCOM_ID_MSM8940 313
+#define QCOM_ID_SDX201 314
+#define QCOM_ID_SDM660 317
+#define QCOM_ID_SDM630 318
+#define QCOM_ID_APQ8098 319
+#define QCOM_ID_MSM8920 320
+#define QCOM_ID_SDM845 321
+#define QCOM_ID_MDM9206 322
+#define QCOM_ID_IPQ8074 323
+#define QCOM_ID_SDA660 324
+#define QCOM_ID_SDM658 325
+#define QCOM_ID_SDA658 326
+#define QCOM_ID_SDA630 327
+#define QCOM_ID_MSM8905 331
+#define QCOM_ID_SDX202 333
+#define QCOM_ID_SDM670 336
+#define QCOM_ID_SDM450 338
+#define QCOM_ID_SM8150 339
+#define QCOM_ID_SDA845 341
+#define QCOM_ID_IPQ8072 342
+#define QCOM_ID_IPQ8076 343
+#define QCOM_ID_IPQ8078 344
+#define QCOM_ID_SDM636 345
+#define QCOM_ID_SDA636 346
+#define QCOM_ID_SDM632 349
+#define QCOM_ID_SDA632 350
+#define QCOM_ID_SDA450 351
+#define QCOM_ID_SDM439 353
+#define QCOM_ID_SDM429 354
+#define QCOM_ID_SM8250 356
+#define QCOM_ID_SA8155 362
+#define QCOM_ID_SDA439 363
+#define QCOM_ID_SDA429 364
+#define QCOM_ID_SM7150 365
+#define QCOM_ID_SM7150P 366
+#define QCOM_ID_IPQ8070 375
+#define QCOM_ID_IPQ8071 376
+#define QCOM_ID_QM215 386
+#define QCOM_ID_IPQ8072A 389
+#define QCOM_ID_IPQ8074A 390
+#define QCOM_ID_IPQ8076A 391
+#define QCOM_ID_IPQ8078A 392
+#define QCOM_ID_SM6125 394
+#define QCOM_ID_IPQ8070A 395
+#define QCOM_ID_IPQ8071A 396
+#define QCOM_ID_IPQ8172 397
+#define QCOM_ID_IPQ8173 398
+#define QCOM_ID_IPQ8174 399
+#define QCOM_ID_IPQ6018 402
+#define QCOM_ID_IPQ6028 403
+#define QCOM_ID_SDM429W 416
+#define QCOM_ID_SM4250 417
+#define QCOM_ID_IPQ6000 421
+#define QCOM_ID_IPQ6010 422
+#define QCOM_ID_SC7180 425
+#define QCOM_ID_SM6350 434
+#define QCOM_ID_QCM2150 436
+#define QCOM_ID_SDA429W 437
+#define QCOM_ID_SM8350 439
+#define QCOM_ID_QCM2290 441
+#define QCOM_ID_SM7125 443
+#define QCOM_ID_SM6115 444
+#define QCOM_ID_IPQ5010 446
+#define QCOM_ID_IPQ5018 447
+#define QCOM_ID_IPQ5028 448
+#define QCOM_ID_SC8280XP 449
+#define QCOM_ID_IPQ6005 453
+#define QCOM_ID_QRB5165 455
+#define QCOM_ID_SM8450 457
+#define QCOM_ID_SM7225 459
+#define QCOM_ID_SA8295P 460
+#define QCOM_ID_SA8540P 461
+#define QCOM_ID_QCM4290 469
+#define QCOM_ID_QCS4290 470
+#define QCOM_ID_SM7325 475
+#define QCOM_ID_SM8450_2 480
+#define QCOM_ID_SM8450_3 482
+#define QCOM_ID_SC7280 487
+#define QCOM_ID_SC7180P 495
+#define QCOM_ID_QCM6490 497
+#define QCOM_ID_QCS6490 498
+#define QCOM_ID_SM7325P 499
+#define QCOM_ID_IPQ5000 503
+#define QCOM_ID_IPQ0509 504
+#define QCOM_ID_IPQ0518 505
+#define QCOM_ID_SM6375 507
+#define QCOM_ID_IPQ9514 510
+#define QCOM_ID_IPQ9550 511
+#define QCOM_ID_IPQ9554 512
+#define QCOM_ID_IPQ9570 513
+#define QCOM_ID_IPQ9574 514
+#define QCOM_ID_SM8550 519
+#define QCOM_ID_IPQ5016 520
+#define QCOM_ID_IPQ9510 521
+#define QCOM_ID_QRB4210 523
+#define QCOM_ID_QRB2210 524
+#define QCOM_ID_SAR2130P 525
+#define QCOM_ID_SM8475 530
+#define QCOM_ID_SM8475P 531
+#define QCOM_ID_SA8255P 532
+#define QCOM_ID_SA8775P 534
+#define QCOM_ID_QRU1000 539
+#define QCOM_ID_SM8475_2 540
+#define QCOM_ID_QDU1000 545
+#define QCOM_ID_X1E80100 555
+#define QCOM_ID_SM8650 557
+#define QCOM_ID_SM4450 568
+#define QCOM_ID_SAR1130P 579
+#define QCOM_ID_QDU1010 587
+#define QCOM_ID_QRU1032 588
+#define QCOM_ID_QRU1052 589
+#define QCOM_ID_QRU1062 590
+#define QCOM_ID_IPQ5332 592
+#define QCOM_ID_IPQ5322 593
+#define QCOM_ID_IPQ5312 594
+#define QCOM_ID_IPQ5302 595
+#define QCOM_ID_QCS8550 603
+#define QCOM_ID_QCM8550 604
+#define QCOM_ID_SM8750 618
+#define QCOM_ID_IPQ5300 624
+#define QCOM_ID_SM7635 636
+#define QCOM_ID_SM6650 640
+#define QCOM_ID_SM6650P 641
+#define QCOM_ID_IPQ5321 650
+#define QCOM_ID_IPQ5424 651
+#define QCOM_ID_QCM6690 657
+#define QCOM_ID_QCS6690 658
+#define QCOM_ID_SM8850 660
+#define QCOM_ID_IPQ5404 671
+#define QCOM_ID_QCS9100 667
+#define QCOM_ID_QCS8300 674
+#define QCOM_ID_QCS8275 675
+#define QCOM_ID_QCS9075 676
+#define QCOM_ID_QCS615 680
+
+/*
+ * The board type and revision information, used by Qualcomm bootloaders and
+ * DTS for older chipsets (qcom,board-id):
+ */
+#define QCOM_BOARD_ID(a, major, minor) \
+ (((major & 0xff) << 16) | ((minor & 0xff) << 8) | QCOM_BOARD_ID_##a)
+
+#define QCOM_BOARD_ID_MTP 8
+#define QCOM_BOARD_ID_DRAGONBOARD 10
+#define QCOM_BOARD_ID_QRD 11
+#define QCOM_BOARD_ID_SBC 24
+
+#endif /* _DT_BINDINGS_ARM_QCOM_IDS_H */
diff --git a/include/dt-bindings/ata/ahci.h b/include/dt-bindings/ata/ahci.h
new file mode 100644
index 000000000000..b3f3b7cf9af8
--- /dev/null
+++ b/include/dt-bindings/ata/ahci.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * This header provides constants for most AHCI bindings.
+ */
+
+#ifndef _DT_BINDINGS_ATA_AHCI_H
+#define _DT_BINDINGS_ATA_AHCI_H
+
+/* Host Bus Adapter generic platform capabilities */
+#define HBA_SSS (1 << 27)
+#define HBA_SMPS (1 << 28)
+
+/* Host Bus Adapter port-specific platform capabilities */
+#define HBA_PORT_HPCP (1 << 18)
+#define HBA_PORT_MPSP (1 << 19)
+#define HBA_PORT_CPD (1 << 20)
+#define HBA_PORT_ESP (1 << 21)
+#define HBA_PORT_FBSCP (1 << 22)
+
+#endif
diff --git a/include/dt-bindings/clock/actions,s500-cmu.h b/include/dt-bindings/clock/actions,s500-cmu.h
index a250a52a6192..a237eb26accb 100644
--- a/include/dt-bindings/clock/actions,s500-cmu.h
+++ b/include/dt-bindings/clock/actions,s500-cmu.h
@@ -74,10 +74,12 @@
#define CLK_RMII_REF 54
#define CLK_GPIO 55
-/* system clock (part 2) */
+/* additional clocks */
#define CLK_APB 56
#define CLK_DMAC 57
+#define CLK_NIC 58
+#define CLK_ETHERNET 59
-#define CLK_NR_CLKS (CLK_DMAC + 1)
+#define CLK_NR_CLKS (CLK_ETHERNET + 1)
#endif /* __DT_BINDINGS_CLOCK_S500_CMU_H */
diff --git a/include/dt-bindings/clock/alphascale,asm9260.h b/include/dt-bindings/clock/alphascale,asm9260.h
index d3871c63308b..f53f8b16883d 100644
--- a/include/dt-bindings/clock/alphascale,asm9260.h
+++ b/include/dt-bindings/clock/alphascale,asm9260.h
@@ -55,7 +55,7 @@
#define CLKID_AHB_I2S1 45
#define CLKID_AHB_MAC1 46
-/* devider */
+/* divider */
#define CLKID_SYS_CPU 47
#define CLKID_SYS_AHB 48
#define CLKID_SYS_I2S0M 49
diff --git a/include/dt-bindings/clock/am3.h b/include/dt-bindings/clock/am3.h
index 894951541276..dfbad5c87933 100644
--- a/include/dt-bindings/clock/am3.h
+++ b/include/dt-bindings/clock/am3.h
@@ -8,99 +8,6 @@
#define AM3_CLKCTRL_OFFSET 0x0
#define AM3_CLKCTRL_INDEX(offset) ((offset) - AM3_CLKCTRL_OFFSET)
-/* XXX: Compatibility part begin, remove this once compatibility support is no longer needed */
-
-/* l4_per clocks */
-#define AM3_L4_PER_CLKCTRL_OFFSET 0x14
-#define AM3_L4_PER_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_PER_CLKCTRL_OFFSET)
-#define AM3_CPGMAC0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x14)
-#define AM3_LCDC_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x18)
-#define AM3_USB_OTG_HS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x1c)
-#define AM3_TPTC0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x24)
-#define AM3_EMIF_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x28)
-#define AM3_OCMCRAM_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x2c)
-#define AM3_GPMC_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x30)
-#define AM3_MCASP0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x34)
-#define AM3_UART6_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x38)
-#define AM3_MMC1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x3c)
-#define AM3_ELM_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x40)
-#define AM3_I2C3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x44)
-#define AM3_I2C2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x48)
-#define AM3_SPI0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x4c)
-#define AM3_SPI1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x50)
-#define AM3_L4_LS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x60)
-#define AM3_MCASP1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x68)
-#define AM3_UART2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x6c)
-#define AM3_UART3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x70)
-#define AM3_UART4_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x74)
-#define AM3_UART5_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x78)
-#define AM3_TIMER7_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x7c)
-#define AM3_TIMER2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x80)
-#define AM3_TIMER3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x84)
-#define AM3_TIMER4_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x88)
-#define AM3_RNG_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x90)
-#define AM3_AES_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x94)
-#define AM3_SHAM_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xa0)
-#define AM3_GPIO2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xac)
-#define AM3_GPIO3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xb0)
-#define AM3_GPIO4_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xb4)
-#define AM3_TPCC_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xbc)
-#define AM3_D_CAN0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xc0)
-#define AM3_D_CAN1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xc4)
-#define AM3_EPWMSS1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xcc)
-#define AM3_EPWMSS0_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xd4)
-#define AM3_EPWMSS2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xd8)
-#define AM3_L3_INSTR_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xdc)
-#define AM3_L3_MAIN_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xe0)
-#define AM3_PRUSS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xe8)
-#define AM3_TIMER5_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xec)
-#define AM3_TIMER6_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xf0)
-#define AM3_MMC2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xf4)
-#define AM3_MMC3_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xf8)
-#define AM3_TPTC1_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0xfc)
-#define AM3_TPTC2_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x100)
-#define AM3_SPINLOCK_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x10c)
-#define AM3_MAILBOX_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x110)
-#define AM3_L4_HS_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x120)
-#define AM3_OCPWP_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x130)
-#define AM3_CLKDIV32K_CLKCTRL AM3_L4_PER_CLKCTRL_INDEX(0x14c)
-
-/* l4_wkup clocks */
-#define AM3_L4_WKUP_CLKCTRL_OFFSET 0x4
-#define AM3_L4_WKUP_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_WKUP_CLKCTRL_OFFSET)
-#define AM3_CONTROL_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0x4)
-#define AM3_GPIO1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0x8)
-#define AM3_L4_WKUP_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc)
-#define AM3_DEBUGSS_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0x14)
-#define AM3_WKUP_M3_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xb0)
-#define AM3_UART1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xb4)
-#define AM3_I2C1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xb8)
-#define AM3_ADC_TSC_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xbc)
-#define AM3_SMARTREFLEX0_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc0)
-#define AM3_TIMER1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc4)
-#define AM3_SMARTREFLEX1_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xc8)
-#define AM3_WD_TIMER2_CLKCTRL AM3_L4_WKUP_CLKCTRL_INDEX(0xd4)
-
-/* mpu clocks */
-#define AM3_MPU_CLKCTRL_OFFSET 0x4
-#define AM3_MPU_CLKCTRL_INDEX(offset) ((offset) - AM3_MPU_CLKCTRL_OFFSET)
-#define AM3_MPU_CLKCTRL AM3_MPU_CLKCTRL_INDEX(0x4)
-
-/* l4_rtc clocks */
-#define AM3_RTC_CLKCTRL AM3_CLKCTRL_INDEX(0x0)
-
-/* gfx_l3 clocks */
-#define AM3_GFX_L3_CLKCTRL_OFFSET 0x4
-#define AM3_GFX_L3_CLKCTRL_INDEX(offset) ((offset) - AM3_GFX_L3_CLKCTRL_OFFSET)
-#define AM3_GFX_CLKCTRL AM3_GFX_L3_CLKCTRL_INDEX(0x4)
-
-/* l4_cefuse clocks */
-#define AM3_L4_CEFUSE_CLKCTRL_OFFSET 0x20
-#define AM3_L4_CEFUSE_CLKCTRL_INDEX(offset) ((offset) - AM3_L4_CEFUSE_CLKCTRL_OFFSET)
-#define AM3_CEFUSE_CLKCTRL AM3_L4_CEFUSE_CLKCTRL_INDEX(0x20)
-
-/* XXX: Compatibility part end */
-
/* l4ls clocks */
#define AM3_L4LS_CLKCTRL_OFFSET 0x38
#define AM3_L4LS_CLKCTRL_INDEX(offset) ((offset) - AM3_L4LS_CLKCTRL_OFFSET)
diff --git a/include/dt-bindings/clock/am4.h b/include/dt-bindings/clock/am4.h
index d961e7cb3682..a65b082e9cff 100644
--- a/include/dt-bindings/clock/am4.h
+++ b/include/dt-bindings/clock/am4.h
@@ -8,104 +8,6 @@
#define AM4_CLKCTRL_OFFSET 0x20
#define AM4_CLKCTRL_INDEX(offset) ((offset) - AM4_CLKCTRL_OFFSET)
-/* XXX: Compatibility part begin, remove this once compatibility support is no longer needed */
-
-/* l4_wkup clocks */
-#define AM4_ADC_TSC_CLKCTRL AM4_CLKCTRL_INDEX(0x120)
-#define AM4_L4_WKUP_CLKCTRL AM4_CLKCTRL_INDEX(0x220)
-#define AM4_WKUP_M3_CLKCTRL AM4_CLKCTRL_INDEX(0x228)
-#define AM4_COUNTER_32K_CLKCTRL AM4_CLKCTRL_INDEX(0x230)
-#define AM4_TIMER1_CLKCTRL AM4_CLKCTRL_INDEX(0x328)
-#define AM4_WD_TIMER2_CLKCTRL AM4_CLKCTRL_INDEX(0x338)
-#define AM4_I2C1_CLKCTRL AM4_CLKCTRL_INDEX(0x340)
-#define AM4_UART1_CLKCTRL AM4_CLKCTRL_INDEX(0x348)
-#define AM4_SMARTREFLEX0_CLKCTRL AM4_CLKCTRL_INDEX(0x350)
-#define AM4_SMARTREFLEX1_CLKCTRL AM4_CLKCTRL_INDEX(0x358)
-#define AM4_CONTROL_CLKCTRL AM4_CLKCTRL_INDEX(0x360)
-#define AM4_GPIO1_CLKCTRL AM4_CLKCTRL_INDEX(0x368)
-
-/* mpu clocks */
-#define AM4_MPU_CLKCTRL AM4_CLKCTRL_INDEX(0x20)
-
-/* gfx_l3 clocks */
-#define AM4_GFX_CLKCTRL AM4_CLKCTRL_INDEX(0x20)
-
-/* l4_rtc clocks */
-#define AM4_RTC_CLKCTRL AM4_CLKCTRL_INDEX(0x20)
-
-/* l4_per clocks */
-#define AM4_L3_MAIN_CLKCTRL AM4_CLKCTRL_INDEX(0x20)
-#define AM4_AES_CLKCTRL AM4_CLKCTRL_INDEX(0x28)
-#define AM4_DES_CLKCTRL AM4_CLKCTRL_INDEX(0x30)
-#define AM4_L3_INSTR_CLKCTRL AM4_CLKCTRL_INDEX(0x40)
-#define AM4_OCMCRAM_CLKCTRL AM4_CLKCTRL_INDEX(0x50)
-#define AM4_SHAM_CLKCTRL AM4_CLKCTRL_INDEX(0x58)
-#define AM4_VPFE0_CLKCTRL AM4_CLKCTRL_INDEX(0x68)
-#define AM4_VPFE1_CLKCTRL AM4_CLKCTRL_INDEX(0x70)
-#define AM4_TPCC_CLKCTRL AM4_CLKCTRL_INDEX(0x78)
-#define AM4_TPTC0_CLKCTRL AM4_CLKCTRL_INDEX(0x80)
-#define AM4_TPTC1_CLKCTRL AM4_CLKCTRL_INDEX(0x88)
-#define AM4_TPTC2_CLKCTRL AM4_CLKCTRL_INDEX(0x90)
-#define AM4_L4_HS_CLKCTRL AM4_CLKCTRL_INDEX(0xa0)
-#define AM4_GPMC_CLKCTRL AM4_CLKCTRL_INDEX(0x220)
-#define AM4_MCASP0_CLKCTRL AM4_CLKCTRL_INDEX(0x238)
-#define AM4_MCASP1_CLKCTRL AM4_CLKCTRL_INDEX(0x240)
-#define AM4_MMC3_CLKCTRL AM4_CLKCTRL_INDEX(0x248)
-#define AM4_QSPI_CLKCTRL AM4_CLKCTRL_INDEX(0x258)
-#define AM4_USB_OTG_SS0_CLKCTRL AM4_CLKCTRL_INDEX(0x260)
-#define AM4_USB_OTG_SS1_CLKCTRL AM4_CLKCTRL_INDEX(0x268)
-#define AM4_PRUSS_CLKCTRL AM4_CLKCTRL_INDEX(0x320)
-#define AM4_L4_LS_CLKCTRL AM4_CLKCTRL_INDEX(0x420)
-#define AM4_D_CAN0_CLKCTRL AM4_CLKCTRL_INDEX(0x428)
-#define AM4_D_CAN1_CLKCTRL AM4_CLKCTRL_INDEX(0x430)
-#define AM4_EPWMSS0_CLKCTRL AM4_CLKCTRL_INDEX(0x438)
-#define AM4_EPWMSS1_CLKCTRL AM4_CLKCTRL_INDEX(0x440)
-#define AM4_EPWMSS2_CLKCTRL AM4_CLKCTRL_INDEX(0x448)
-#define AM4_EPWMSS3_CLKCTRL AM4_CLKCTRL_INDEX(0x450)
-#define AM4_EPWMSS4_CLKCTRL AM4_CLKCTRL_INDEX(0x458)
-#define AM4_EPWMSS5_CLKCTRL AM4_CLKCTRL_INDEX(0x460)
-#define AM4_ELM_CLKCTRL AM4_CLKCTRL_INDEX(0x468)
-#define AM4_GPIO2_CLKCTRL AM4_CLKCTRL_INDEX(0x478)
-#define AM4_GPIO3_CLKCTRL AM4_CLKCTRL_INDEX(0x480)
-#define AM4_GPIO4_CLKCTRL AM4_CLKCTRL_INDEX(0x488)
-#define AM4_GPIO5_CLKCTRL AM4_CLKCTRL_INDEX(0x490)
-#define AM4_GPIO6_CLKCTRL AM4_CLKCTRL_INDEX(0x498)
-#define AM4_HDQ1W_CLKCTRL AM4_CLKCTRL_INDEX(0x4a0)
-#define AM4_I2C2_CLKCTRL AM4_CLKCTRL_INDEX(0x4a8)
-#define AM4_I2C3_CLKCTRL AM4_CLKCTRL_INDEX(0x4b0)
-#define AM4_MAILBOX_CLKCTRL AM4_CLKCTRL_INDEX(0x4b8)
-#define AM4_MMC1_CLKCTRL AM4_CLKCTRL_INDEX(0x4c0)
-#define AM4_MMC2_CLKCTRL AM4_CLKCTRL_INDEX(0x4c8)
-#define AM4_RNG_CLKCTRL AM4_CLKCTRL_INDEX(0x4e0)
-#define AM4_SPI0_CLKCTRL AM4_CLKCTRL_INDEX(0x500)
-#define AM4_SPI1_CLKCTRL AM4_CLKCTRL_INDEX(0x508)
-#define AM4_SPI2_CLKCTRL AM4_CLKCTRL_INDEX(0x510)
-#define AM4_SPI3_CLKCTRL AM4_CLKCTRL_INDEX(0x518)
-#define AM4_SPI4_CLKCTRL AM4_CLKCTRL_INDEX(0x520)
-#define AM4_SPINLOCK_CLKCTRL AM4_CLKCTRL_INDEX(0x528)
-#define AM4_TIMER2_CLKCTRL AM4_CLKCTRL_INDEX(0x530)
-#define AM4_TIMER3_CLKCTRL AM4_CLKCTRL_INDEX(0x538)
-#define AM4_TIMER4_CLKCTRL AM4_CLKCTRL_INDEX(0x540)
-#define AM4_TIMER5_CLKCTRL AM4_CLKCTRL_INDEX(0x548)
-#define AM4_TIMER6_CLKCTRL AM4_CLKCTRL_INDEX(0x550)
-#define AM4_TIMER7_CLKCTRL AM4_CLKCTRL_INDEX(0x558)
-#define AM4_TIMER8_CLKCTRL AM4_CLKCTRL_INDEX(0x560)
-#define AM4_TIMER9_CLKCTRL AM4_CLKCTRL_INDEX(0x568)
-#define AM4_TIMER10_CLKCTRL AM4_CLKCTRL_INDEX(0x570)
-#define AM4_TIMER11_CLKCTRL AM4_CLKCTRL_INDEX(0x578)
-#define AM4_UART2_CLKCTRL AM4_CLKCTRL_INDEX(0x580)
-#define AM4_UART3_CLKCTRL AM4_CLKCTRL_INDEX(0x588)
-#define AM4_UART4_CLKCTRL AM4_CLKCTRL_INDEX(0x590)
-#define AM4_UART5_CLKCTRL AM4_CLKCTRL_INDEX(0x598)
-#define AM4_UART6_CLKCTRL AM4_CLKCTRL_INDEX(0x5a0)
-#define AM4_OCP2SCP0_CLKCTRL AM4_CLKCTRL_INDEX(0x5b8)
-#define AM4_OCP2SCP1_CLKCTRL AM4_CLKCTRL_INDEX(0x5c0)
-#define AM4_EMIF_CLKCTRL AM4_CLKCTRL_INDEX(0x720)
-#define AM4_DSS_CORE_CLKCTRL AM4_CLKCTRL_INDEX(0xa20)
-#define AM4_CPGMAC0_CLKCTRL AM4_CLKCTRL_INDEX(0xb20)
-
-/* XXX: Compatibility part end. */
-
/* l3s_tsc clocks */
#define AM4_L3S_TSC_CLKCTRL_OFFSET 0x120
#define AM4_L3S_TSC_CLKCTRL_INDEX(offset) ((offset) - AM4_L3S_TSC_CLKCTRL_OFFSET)
@@ -158,6 +60,7 @@
#define AM4_L3S_VPFE0_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x68)
#define AM4_L3S_VPFE1_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x70)
#define AM4_L3S_GPMC_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x220)
+#define AM4_L3S_ADC1_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x230)
#define AM4_L3S_MCASP0_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x238)
#define AM4_L3S_MCASP1_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x240)
#define AM4_L3S_MMC3_CLKCTRL AM4_L3S_CLKCTRL_INDEX(0x248)
diff --git a/include/dt-bindings/clock/amlogic,a1-peripherals-clkc.h b/include/dt-bindings/clock/amlogic,a1-peripherals-clkc.h
new file mode 100644
index 000000000000..2ce1a06dc735
--- /dev/null
+++ b/include/dt-bindings/clock/amlogic,a1-peripherals-clkc.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (c) 2019 Amlogic, Inc. All rights reserved.
+ * Author: Jian Hu <jian.hu@amlogic.com>
+ *
+ * Copyright (c) 2023, SberDevices. All Rights Reserved.
+ * Author: Dmitry Rokosov <ddrokosov@sberdevices.ru>
+ */
+
+#ifndef __A1_PERIPHERALS_CLKC_H
+#define __A1_PERIPHERALS_CLKC_H
+
+#define CLKID_XTAL_IN 0
+#define CLKID_FIXPLL_IN 1
+#define CLKID_USB_PHY_IN 2
+#define CLKID_USB_CTRL_IN 3
+#define CLKID_HIFIPLL_IN 4
+#define CLKID_SYSPLL_IN 5
+#define CLKID_DDS_IN 6
+#define CLKID_SYS 7
+#define CLKID_CLKTREE 8
+#define CLKID_RESET_CTRL 9
+#define CLKID_ANALOG_CTRL 10
+#define CLKID_PWR_CTRL 11
+#define CLKID_PAD_CTRL 12
+#define CLKID_SYS_CTRL 13
+#define CLKID_TEMP_SENSOR 14
+#define CLKID_AM2AXI_DIV 15
+#define CLKID_SPICC_B 16
+#define CLKID_SPICC_A 17
+#define CLKID_MSR 18
+#define CLKID_AUDIO 19
+#define CLKID_JTAG_CTRL 20
+#define CLKID_SARADC_EN 21
+#define CLKID_PWM_EF 22
+#define CLKID_PWM_CD 23
+#define CLKID_PWM_AB 24
+#define CLKID_CEC 25
+#define CLKID_I2C_S 26
+#define CLKID_IR_CTRL 27
+#define CLKID_I2C_M_D 28
+#define CLKID_I2C_M_C 29
+#define CLKID_I2C_M_B 30
+#define CLKID_I2C_M_A 31
+#define CLKID_ACODEC 32
+#define CLKID_OTP 33
+#define CLKID_SD_EMMC_A 34
+#define CLKID_USB_PHY 35
+#define CLKID_USB_CTRL 36
+#define CLKID_SYS_DSPB 37
+#define CLKID_SYS_DSPA 38
+#define CLKID_DMA 39
+#define CLKID_IRQ_CTRL 40
+#define CLKID_NIC 41
+#define CLKID_GIC 42
+#define CLKID_UART_C 43
+#define CLKID_UART_B 44
+#define CLKID_UART_A 45
+#define CLKID_SYS_PSRAM 46
+#define CLKID_RSA 47
+#define CLKID_CORESIGHT 48
+#define CLKID_AM2AXI_VAD 49
+#define CLKID_AUDIO_VAD 50
+#define CLKID_AXI_DMC 51
+#define CLKID_AXI_PSRAM 52
+#define CLKID_RAMB 53
+#define CLKID_RAMA 54
+#define CLKID_AXI_SPIFC 55
+#define CLKID_AXI_NIC 56
+#define CLKID_AXI_DMA 57
+#define CLKID_CPU_CTRL 58
+#define CLKID_ROM 59
+#define CLKID_PROC_I2C 60
+#define CLKID_DSPA_SEL 61
+#define CLKID_DSPB_SEL 62
+#define CLKID_DSPA_EN 63
+#define CLKID_DSPA_EN_NIC 64
+#define CLKID_DSPB_EN 65
+#define CLKID_DSPB_EN_NIC 66
+#define CLKID_RTC 67
+#define CLKID_CECA_32K 68
+#define CLKID_CECB_32K 69
+#define CLKID_24M 70
+#define CLKID_12M 71
+#define CLKID_FCLK_DIV2_DIVN 72
+#define CLKID_GEN 73
+#define CLKID_SARADC_SEL 74
+#define CLKID_SARADC 75
+#define CLKID_PWM_A 76
+#define CLKID_PWM_B 77
+#define CLKID_PWM_C 78
+#define CLKID_PWM_D 79
+#define CLKID_PWM_E 80
+#define CLKID_PWM_F 81
+#define CLKID_SPICC 82
+#define CLKID_TS 83
+#define CLKID_SPIFC 84
+#define CLKID_USB_BUS 85
+#define CLKID_SD_EMMC 86
+#define CLKID_PSRAM 87
+#define CLKID_DMC 88
+#define CLKID_SYS_A_SEL 89
+#define CLKID_SYS_A_DIV 90
+#define CLKID_SYS_A 91
+#define CLKID_SYS_B_SEL 92
+#define CLKID_SYS_B_DIV 93
+#define CLKID_SYS_B 94
+#define CLKID_DSPA_A_SEL 95
+#define CLKID_DSPA_A_DIV 96
+#define CLKID_DSPA_A 97
+#define CLKID_DSPA_B_SEL 98
+#define CLKID_DSPA_B_DIV 99
+#define CLKID_DSPA_B 100
+#define CLKID_DSPB_A_SEL 101
+#define CLKID_DSPB_A_DIV 102
+#define CLKID_DSPB_A 103
+#define CLKID_DSPB_B_SEL 104
+#define CLKID_DSPB_B_DIV 105
+#define CLKID_DSPB_B 106
+#define CLKID_RTC_32K_IN 107
+#define CLKID_RTC_32K_DIV 108
+#define CLKID_RTC_32K_XTAL 109
+#define CLKID_RTC_32K_SEL 110
+#define CLKID_CECB_32K_IN 111
+#define CLKID_CECB_32K_DIV 112
+#define CLKID_CECB_32K_SEL_PRE 113
+#define CLKID_CECB_32K_SEL 114
+#define CLKID_CECA_32K_IN 115
+#define CLKID_CECA_32K_DIV 116
+#define CLKID_CECA_32K_SEL_PRE 117
+#define CLKID_CECA_32K_SEL 118
+#define CLKID_DIV2_PRE 119
+#define CLKID_24M_DIV2 120
+#define CLKID_GEN_SEL 121
+#define CLKID_GEN_DIV 122
+#define CLKID_SARADC_DIV 123
+#define CLKID_PWM_A_SEL 124
+#define CLKID_PWM_A_DIV 125
+#define CLKID_PWM_B_SEL 126
+#define CLKID_PWM_B_DIV 127
+#define CLKID_PWM_C_SEL 128
+#define CLKID_PWM_C_DIV 129
+#define CLKID_PWM_D_SEL 130
+#define CLKID_PWM_D_DIV 131
+#define CLKID_PWM_E_SEL 132
+#define CLKID_PWM_E_DIV 133
+#define CLKID_PWM_F_SEL 134
+#define CLKID_PWM_F_DIV 135
+#define CLKID_SPICC_SEL 136
+#define CLKID_SPICC_DIV 137
+#define CLKID_SPICC_SEL2 138
+#define CLKID_TS_DIV 139
+#define CLKID_SPIFC_SEL 140
+#define CLKID_SPIFC_DIV 141
+#define CLKID_SPIFC_SEL2 142
+#define CLKID_USB_BUS_SEL 143
+#define CLKID_USB_BUS_DIV 144
+#define CLKID_SD_EMMC_SEL 145
+#define CLKID_SD_EMMC_DIV 146
+#define CLKID_SD_EMMC_SEL2 147
+#define CLKID_PSRAM_SEL 148
+#define CLKID_PSRAM_DIV 149
+#define CLKID_PSRAM_SEL2 150
+#define CLKID_DMC_SEL 151
+#define CLKID_DMC_DIV 152
+#define CLKID_DMC_SEL2 153
+#define CLKID_SYS_PLL_DIV16 154
+
+#endif /* __A1_PERIPHERALS_CLKC_H */
diff --git a/include/dt-bindings/clock/amlogic,a1-pll-clkc.h b/include/dt-bindings/clock/amlogic,a1-pll-clkc.h
new file mode 100644
index 000000000000..0dfc5e78a2d5
--- /dev/null
+++ b/include/dt-bindings/clock/amlogic,a1-pll-clkc.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (c) 2019 Amlogic, Inc. All rights reserved.
+ * Author: Jian Hu <jian.hu@amlogic.com>
+ *
+ * Copyright (c) 2023, SberDevices. All Rights Reserved.
+ * Author: Dmitry Rokosov <ddrokosov@sberdevices.ru>
+ */
+
+#ifndef __A1_PLL_CLKC_H
+#define __A1_PLL_CLKC_H
+
+#define CLKID_FIXED_PLL_DCO 0
+#define CLKID_FIXED_PLL 1
+#define CLKID_FCLK_DIV2_DIV 2
+#define CLKID_FCLK_DIV3_DIV 3
+#define CLKID_FCLK_DIV5_DIV 4
+#define CLKID_FCLK_DIV7_DIV 5
+#define CLKID_FCLK_DIV2 6
+#define CLKID_FCLK_DIV3 7
+#define CLKID_FCLK_DIV5 8
+#define CLKID_FCLK_DIV7 9
+#define CLKID_HIFI_PLL 10
+#define CLKID_SYS_PLL 11
+
+#endif /* __A1_PLL_CLKC_H */
diff --git a/include/dt-bindings/clock/amlogic,c3-peripherals-clkc.h b/include/dt-bindings/clock/amlogic,c3-peripherals-clkc.h
new file mode 100644
index 000000000000..d115c741c255
--- /dev/null
+++ b/include/dt-bindings/clock/amlogic,c3-peripherals-clkc.h
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2023 Amlogic, Inc. All rights reserved.
+ * Author: Chuan Liu <chuan.liu@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_AMLOGIC_C3_PERIPHERALS_CLKC_H
+#define _DT_BINDINGS_CLOCK_AMLOGIC_C3_PERIPHERALS_CLKC_H
+
+#define CLKID_RTC_XTAL_CLKIN 0
+#define CLKID_RTC_32K_DIV 1
+#define CLKID_RTC_32K_MUX 2
+#define CLKID_RTC_32K 3
+#define CLKID_RTC_CLK 4
+#define CLKID_SYS_RESET_CTRL 5
+#define CLKID_SYS_PWR_CTRL 6
+#define CLKID_SYS_PAD_CTRL 7
+#define CLKID_SYS_CTRL 8
+#define CLKID_SYS_TS_PLL 9
+#define CLKID_SYS_DEV_ARB 10
+#define CLKID_SYS_MMC_PCLK 11
+#define CLKID_SYS_CPU_CTRL 12
+#define CLKID_SYS_JTAG_CTRL 13
+#define CLKID_SYS_IR_CTRL 14
+#define CLKID_SYS_IRQ_CTRL 15
+#define CLKID_SYS_MSR_CLK 16
+#define CLKID_SYS_ROM 17
+#define CLKID_SYS_UART_F 18
+#define CLKID_SYS_CPU_ARB 19
+#define CLKID_SYS_RSA 20
+#define CLKID_SYS_SAR_ADC 21
+#define CLKID_SYS_STARTUP 22
+#define CLKID_SYS_SECURE 23
+#define CLKID_SYS_SPIFC 24
+#define CLKID_SYS_NNA 25
+#define CLKID_SYS_ETH_MAC 26
+#define CLKID_SYS_GIC 27
+#define CLKID_SYS_RAMA 28
+#define CLKID_SYS_BIG_NIC 29
+#define CLKID_SYS_RAMB 30
+#define CLKID_SYS_AUDIO_PCLK 31
+#define CLKID_SYS_PWM_KL 32
+#define CLKID_SYS_PWM_IJ 33
+#define CLKID_SYS_USB 34
+#define CLKID_SYS_SD_EMMC_A 35
+#define CLKID_SYS_SD_EMMC_C 36
+#define CLKID_SYS_PWM_AB 37
+#define CLKID_SYS_PWM_CD 38
+#define CLKID_SYS_PWM_EF 39
+#define CLKID_SYS_PWM_GH 40
+#define CLKID_SYS_SPICC_1 41
+#define CLKID_SYS_SPICC_0 42
+#define CLKID_SYS_UART_A 43
+#define CLKID_SYS_UART_B 44
+#define CLKID_SYS_UART_C 45
+#define CLKID_SYS_UART_D 46
+#define CLKID_SYS_UART_E 47
+#define CLKID_SYS_I2C_M_A 48
+#define CLKID_SYS_I2C_M_B 49
+#define CLKID_SYS_I2C_M_C 50
+#define CLKID_SYS_I2C_M_D 51
+#define CLKID_SYS_I2S_S_A 52
+#define CLKID_SYS_RTC 53
+#define CLKID_SYS_GE2D 54
+#define CLKID_SYS_ISP 55
+#define CLKID_SYS_GPV_ISP_NIC 56
+#define CLKID_SYS_GPV_CVE_NIC 57
+#define CLKID_SYS_MIPI_DSI_HOST 58
+#define CLKID_SYS_MIPI_DSI_PHY 59
+#define CLKID_SYS_ETH_PHY 60
+#define CLKID_SYS_ACODEC 61
+#define CLKID_SYS_DWAP 62
+#define CLKID_SYS_DOS 63
+#define CLKID_SYS_CVE 64
+#define CLKID_SYS_VOUT 65
+#define CLKID_SYS_VC9000E 66
+#define CLKID_SYS_PWM_MN 67
+#define CLKID_SYS_SD_EMMC_B 68
+#define CLKID_AXI_SYS_NIC 69
+#define CLKID_AXI_ISP_NIC 70
+#define CLKID_AXI_CVE_NIC 71
+#define CLKID_AXI_RAMB 72
+#define CLKID_AXI_RAMA 73
+#define CLKID_AXI_CPU_DMC 74
+#define CLKID_AXI_NIC 75
+#define CLKID_AXI_DMA 76
+#define CLKID_AXI_MUX_NIC 77
+#define CLKID_AXI_CVE 78
+#define CLKID_AXI_DEV1_DMC 79
+#define CLKID_AXI_DEV0_DMC 80
+#define CLKID_AXI_DSP_DMC 81
+#define CLKID_12_24M_IN 82
+#define CLKID_12M_24M 83
+#define CLKID_FCLK_25M_DIV 84
+#define CLKID_FCLK_25M 85
+#define CLKID_GEN_SEL 86
+#define CLKID_GEN_DIV 87
+#define CLKID_GEN 88
+#define CLKID_SARADC_SEL 89
+#define CLKID_SARADC_DIV 90
+#define CLKID_SARADC 91
+#define CLKID_PWM_A_SEL 92
+#define CLKID_PWM_A_DIV 93
+#define CLKID_PWM_A 94
+#define CLKID_PWM_B_SEL 95
+#define CLKID_PWM_B_DIV 96
+#define CLKID_PWM_B 97
+#define CLKID_PWM_C_SEL 98
+#define CLKID_PWM_C_DIV 99
+#define CLKID_PWM_C 100
+#define CLKID_PWM_D_SEL 101
+#define CLKID_PWM_D_DIV 102
+#define CLKID_PWM_D 103
+#define CLKID_PWM_E_SEL 104
+#define CLKID_PWM_E_DIV 105
+#define CLKID_PWM_E 106
+#define CLKID_PWM_F_SEL 107
+#define CLKID_PWM_F_DIV 108
+#define CLKID_PWM_F 109
+#define CLKID_PWM_G_SEL 110
+#define CLKID_PWM_G_DIV 111
+#define CLKID_PWM_G 112
+#define CLKID_PWM_H_SEL 113
+#define CLKID_PWM_H_DIV 114
+#define CLKID_PWM_H 115
+#define CLKID_PWM_I_SEL 116
+#define CLKID_PWM_I_DIV 117
+#define CLKID_PWM_I 118
+#define CLKID_PWM_J_SEL 119
+#define CLKID_PWM_J_DIV 120
+#define CLKID_PWM_J 121
+#define CLKID_PWM_K_SEL 122
+#define CLKID_PWM_K_DIV 123
+#define CLKID_PWM_K 124
+#define CLKID_PWM_L_SEL 125
+#define CLKID_PWM_L_DIV 126
+#define CLKID_PWM_L 127
+#define CLKID_PWM_M_SEL 128
+#define CLKID_PWM_M_DIV 129
+#define CLKID_PWM_M 130
+#define CLKID_PWM_N_SEL 131
+#define CLKID_PWM_N_DIV 132
+#define CLKID_PWM_N 133
+#define CLKID_SPICC_A_SEL 134
+#define CLKID_SPICC_A_DIV 135
+#define CLKID_SPICC_A 136
+#define CLKID_SPICC_B_SEL 137
+#define CLKID_SPICC_B_DIV 138
+#define CLKID_SPICC_B 139
+#define CLKID_SPIFC_SEL 140
+#define CLKID_SPIFC_DIV 141
+#define CLKID_SPIFC 142
+#define CLKID_SD_EMMC_A_SEL 143
+#define CLKID_SD_EMMC_A_DIV 144
+#define CLKID_SD_EMMC_A 145
+#define CLKID_SD_EMMC_B_SEL 146
+#define CLKID_SD_EMMC_B_DIV 147
+#define CLKID_SD_EMMC_B 148
+#define CLKID_SD_EMMC_C_SEL 149
+#define CLKID_SD_EMMC_C_DIV 150
+#define CLKID_SD_EMMC_C 151
+#define CLKID_TS_DIV 152
+#define CLKID_TS 153
+#define CLKID_ETH_125M_DIV 154
+#define CLKID_ETH_125M 155
+#define CLKID_ETH_RMII_DIV 156
+#define CLKID_ETH_RMII 157
+#define CLKID_MIPI_DSI_MEAS_SEL 158
+#define CLKID_MIPI_DSI_MEAS_DIV 159
+#define CLKID_MIPI_DSI_MEAS 160
+#define CLKID_DSI_PHY_SEL 161
+#define CLKID_DSI_PHY_DIV 162
+#define CLKID_DSI_PHY 163
+#define CLKID_VOUT_MCLK_SEL 164
+#define CLKID_VOUT_MCLK_DIV 165
+#define CLKID_VOUT_MCLK 166
+#define CLKID_VOUT_ENC_SEL 167
+#define CLKID_VOUT_ENC_DIV 168
+#define CLKID_VOUT_ENC 169
+#define CLKID_HCODEC_0_SEL 170
+#define CLKID_HCODEC_0_DIV 171
+#define CLKID_HCODEC_0 172
+#define CLKID_HCODEC_1_SEL 173
+#define CLKID_HCODEC_1_DIV 174
+#define CLKID_HCODEC_1 175
+#define CLKID_HCODEC 176
+#define CLKID_VC9000E_ACLK_SEL 177
+#define CLKID_VC9000E_ACLK_DIV 178
+#define CLKID_VC9000E_ACLK 179
+#define CLKID_VC9000E_CORE_SEL 180
+#define CLKID_VC9000E_CORE_DIV 181
+#define CLKID_VC9000E_CORE 182
+#define CLKID_CSI_PHY0_SEL 183
+#define CLKID_CSI_PHY0_DIV 184
+#define CLKID_CSI_PHY0 185
+#define CLKID_DEWARPA_SEL 186
+#define CLKID_DEWARPA_DIV 187
+#define CLKID_DEWARPA 188
+#define CLKID_ISP0_SEL 189
+#define CLKID_ISP0_DIV 190
+#define CLKID_ISP0 191
+#define CLKID_NNA_CORE_SEL 192
+#define CLKID_NNA_CORE_DIV 193
+#define CLKID_NNA_CORE 194
+#define CLKID_GE2D_SEL 195
+#define CLKID_GE2D_DIV 196
+#define CLKID_GE2D 197
+#define CLKID_VAPB_SEL 198
+#define CLKID_VAPB_DIV 199
+#define CLKID_VAPB 200
+
+#endif /* _DT_BINDINGS_CLOCK_AMLOGIC_C3_PERIPHERALS_CLKC_H */
diff --git a/include/dt-bindings/clock/amlogic,c3-pll-clkc.h b/include/dt-bindings/clock/amlogic,c3-pll-clkc.h
new file mode 100644
index 000000000000..fcdc558715e8
--- /dev/null
+++ b/include/dt-bindings/clock/amlogic,c3-pll-clkc.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2023 Amlogic, Inc. All rights reserved.
+ * Author: Chuan Liu <chuan.liu@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_AMLOGIC_C3_PLL_CLKC_H
+#define _DT_BINDINGS_CLOCK_AMLOGIC_C3_PLL_CLKC_H
+
+#define CLKID_FCLK_50M_EN 0
+#define CLKID_FCLK_50M 1
+#define CLKID_FCLK_DIV2_DIV 2
+#define CLKID_FCLK_DIV2 3
+#define CLKID_FCLK_DIV2P5_DIV 4
+#define CLKID_FCLK_DIV2P5 5
+#define CLKID_FCLK_DIV3_DIV 6
+#define CLKID_FCLK_DIV3 7
+#define CLKID_FCLK_DIV4_DIV 8
+#define CLKID_FCLK_DIV4 9
+#define CLKID_FCLK_DIV5_DIV 10
+#define CLKID_FCLK_DIV5 11
+#define CLKID_FCLK_DIV7_DIV 12
+#define CLKID_FCLK_DIV7 13
+#define CLKID_GP0_PLL_DCO 14
+#define CLKID_GP0_PLL 15
+#define CLKID_HIFI_PLL_DCO 16
+#define CLKID_HIFI_PLL 17
+#define CLKID_MCLK_PLL_DCO 18
+#define CLKID_MCLK_PLL_OD 19
+#define CLKID_MCLK_PLL 20
+#define CLKID_MCLK0_SEL 21
+#define CLKID_MCLK0_SEL_EN 22
+#define CLKID_MCLK0_DIV 23
+#define CLKID_MCLK0 24
+#define CLKID_MCLK1_SEL 25
+#define CLKID_MCLK1_SEL_EN 26
+#define CLKID_MCLK1_DIV 27
+#define CLKID_MCLK1 28
+
+#endif /* _DT_BINDINGS_CLOCK_AMLOGIC_C3_PLL_CLKC_H */
diff --git a/include/dt-bindings/clock/amlogic,c3-scmi-clkc.h b/include/dt-bindings/clock/amlogic,c3-scmi-clkc.h
new file mode 100644
index 000000000000..663c9b349275
--- /dev/null
+++ b/include/dt-bindings/clock/amlogic,c3-scmi-clkc.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2023 Amlogic, Inc. All rights reserved.
+ * Author: Chuan Liu <chuan.liu@amlogic.com>
+ */
+
+#ifndef __AMLOGIC_C3_SCMI_CLKC_H
+#define __AMLOGIC_C3_SCMI_CLKC_H
+
+#define CLKID_DDR_PLL_OSC 0
+#define CLKID_DDR_PHY 1
+#define CLKID_TOP_PLL_OSC 2
+#define CLKID_USB_PLL_OSC 3
+#define CLKID_MIPIISP_VOUT 4
+#define CLKID_MCLK_PLL_OSC 5
+#define CLKID_USB_CTRL 6
+#define CLKID_ETH_PLL_OSC 7
+#define CLKID_OSC 8
+#define CLKID_SYS_CLK 9
+#define CLKID_AXI_CLK 10
+#define CLKID_CPU_CLK 11
+#define CLKID_FIXED_PLL_OSC 12
+#define CLKID_GP1_PLL_OSC 13
+#define CLKID_SYS_PLL_DIV16 14
+#define CLKID_CPU_CLK_DIV16 15
+
+#endif /* __AMLOGIC_C3_SCMI_CLKC_H */
diff --git a/include/dt-bindings/clock/amlogic,s4-peripherals-clkc.h b/include/dt-bindings/clock/amlogic,s4-peripherals-clkc.h
new file mode 100644
index 000000000000..861a331963ac
--- /dev/null
+++ b/include/dt-bindings/clock/amlogic,s4-peripherals-clkc.h
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2022-2023 Amlogic, Inc. All rights reserved.
+ * Author: Yu Tu <yu.tu@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_AMLOGIC_S4_PERIPHERALS_CLKC_H
+#define _DT_BINDINGS_CLOCK_AMLOGIC_S4_PERIPHERALS_CLKC_H
+
+#define CLKID_RTC_32K_CLKIN 0
+#define CLKID_RTC_32K_DIV 1
+#define CLKID_RTC_32K_SEL 2
+#define CLKID_RTC_32K_XATL 3
+#define CLKID_RTC 4
+#define CLKID_SYS_CLK_B_SEL 5
+#define CLKID_SYS_CLK_B_DIV 6
+#define CLKID_SYS_CLK_B 7
+#define CLKID_SYS_CLK_A_SEL 8
+#define CLKID_SYS_CLK_A_DIV 9
+#define CLKID_SYS_CLK_A 10
+#define CLKID_SYS 11
+#define CLKID_CECA_32K_CLKIN 12
+#define CLKID_CECA_32K_DIV 13
+#define CLKID_CECA_32K_SEL_PRE 14
+#define CLKID_CECA_32K_SEL 15
+#define CLKID_CECA_32K_CLKOUT 16
+#define CLKID_CECB_32K_CLKIN 17
+#define CLKID_CECB_32K_DIV 18
+#define CLKID_CECB_32K_SEL_PRE 19
+#define CLKID_CECB_32K_SEL 20
+#define CLKID_CECB_32K_CLKOUT 21
+#define CLKID_SC_CLK_SEL 22
+#define CLKID_SC_CLK_DIV 23
+#define CLKID_SC 24
+#define CLKID_12_24M 25
+#define CLKID_12M_CLK_DIV 26
+#define CLKID_12_24M_CLK_SEL 27
+#define CLKID_VID_PLL_DIV 28
+#define CLKID_VID_PLL_SEL 29
+#define CLKID_VID_PLL 30
+#define CLKID_VCLK_SEL 31
+#define CLKID_VCLK2_SEL 32
+#define CLKID_VCLK_INPUT 33
+#define CLKID_VCLK2_INPUT 34
+#define CLKID_VCLK_DIV 35
+#define CLKID_VCLK2_DIV 36
+#define CLKID_VCLK 37
+#define CLKID_VCLK2 38
+#define CLKID_VCLK_DIV1 39
+#define CLKID_VCLK_DIV2_EN 40
+#define CLKID_VCLK_DIV4_EN 41
+#define CLKID_VCLK_DIV6_EN 42
+#define CLKID_VCLK_DIV12_EN 43
+#define CLKID_VCLK2_DIV1 44
+#define CLKID_VCLK2_DIV2_EN 45
+#define CLKID_VCLK2_DIV4_EN 46
+#define CLKID_VCLK2_DIV6_EN 47
+#define CLKID_VCLK2_DIV12_EN 48
+#define CLKID_VCLK_DIV2 49
+#define CLKID_VCLK_DIV4 50
+#define CLKID_VCLK_DIV6 51
+#define CLKID_VCLK_DIV12 52
+#define CLKID_VCLK2_DIV2 53
+#define CLKID_VCLK2_DIV4 54
+#define CLKID_VCLK2_DIV6 55
+#define CLKID_VCLK2_DIV12 56
+#define CLKID_CTS_ENCI_SEL 57
+#define CLKID_CTS_ENCP_SEL 58
+#define CLKID_CTS_VDAC_SEL 59
+#define CLKID_HDMI_TX_SEL 60
+#define CLKID_CTS_ENCI 61
+#define CLKID_CTS_ENCP 62
+#define CLKID_CTS_VDAC 63
+#define CLKID_HDMI_TX 64
+#define CLKID_HDMI_SEL 65
+#define CLKID_HDMI_DIV 66
+#define CLKID_HDMI 67
+#define CLKID_TS_CLK_DIV 68
+#define CLKID_TS 69
+#define CLKID_MALI_0_SEL 70
+#define CLKID_MALI_0_DIV 71
+#define CLKID_MALI_0 72
+#define CLKID_MALI_1_SEL 73
+#define CLKID_MALI_1_DIV 74
+#define CLKID_MALI_1 75
+#define CLKID_MALI_SEL 76
+#define CLKID_VDEC_P0_SEL 77
+#define CLKID_VDEC_P0_DIV 78
+#define CLKID_VDEC_P0 79
+#define CLKID_VDEC_P1_SEL 80
+#define CLKID_VDEC_P1_DIV 81
+#define CLKID_VDEC_P1 82
+#define CLKID_VDEC_SEL 83
+#define CLKID_HEVCF_P0_SEL 84
+#define CLKID_HEVCF_P0_DIV 85
+#define CLKID_HEVCF_P0 86
+#define CLKID_HEVCF_P1_SEL 87
+#define CLKID_HEVCF_P1_DIV 88
+#define CLKID_HEVCF_P1 89
+#define CLKID_HEVCF_SEL 90
+#define CLKID_VPU_0_SEL 91
+#define CLKID_VPU_0_DIV 92
+#define CLKID_VPU_0 93
+#define CLKID_VPU_1_SEL 94
+#define CLKID_VPU_1_DIV 95
+#define CLKID_VPU_1 96
+#define CLKID_VPU 97
+#define CLKID_VPU_CLKB_TMP_SEL 98
+#define CLKID_VPU_CLKB_TMP_DIV 99
+#define CLKID_VPU_CLKB_TMP 100
+#define CLKID_VPU_CLKB_DIV 101
+#define CLKID_VPU_CLKB 102
+#define CLKID_VPU_CLKC_P0_SEL 103
+#define CLKID_VPU_CLKC_P0_DIV 104
+#define CLKID_VPU_CLKC_P0 105
+#define CLKID_VPU_CLKC_P1_SEL 106
+#define CLKID_VPU_CLKC_P1_DIV 107
+#define CLKID_VPU_CLKC_P1 108
+#define CLKID_VPU_CLKC_SEL 109
+#define CLKID_VAPB_0_SEL 110
+#define CLKID_VAPB_0_DIV 111
+#define CLKID_VAPB_0 112
+#define CLKID_VAPB_1_SEL 113
+#define CLKID_VAPB_1_DIV 114
+#define CLKID_VAPB_1 115
+#define CLKID_VAPB 116
+#define CLKID_GE2D 117
+#define CLKID_VDIN_MEAS_SEL 118
+#define CLKID_VDIN_MEAS_DIV 119
+#define CLKID_VDIN_MEAS 120
+#define CLKID_SD_EMMC_C_CLK_SEL 121
+#define CLKID_SD_EMMC_C_CLK_DIV 122
+#define CLKID_SD_EMMC_C 123
+#define CLKID_SD_EMMC_A_CLK_SEL 124
+#define CLKID_SD_EMMC_A_CLK_DIV 125
+#define CLKID_SD_EMMC_A 126
+#define CLKID_SD_EMMC_B_CLK_SEL 127
+#define CLKID_SD_EMMC_B_CLK_DIV 128
+#define CLKID_SD_EMMC_B 129
+#define CLKID_SPICC0_SEL 130
+#define CLKID_SPICC0_DIV 131
+#define CLKID_SPICC0_EN 132
+#define CLKID_PWM_A_SEL 133
+#define CLKID_PWM_A_DIV 134
+#define CLKID_PWM_A 135
+#define CLKID_PWM_B_SEL 136
+#define CLKID_PWM_B_DIV 137
+#define CLKID_PWM_B 138
+#define CLKID_PWM_C_SEL 139
+#define CLKID_PWM_C_DIV 140
+#define CLKID_PWM_C 141
+#define CLKID_PWM_D_SEL 142
+#define CLKID_PWM_D_DIV 143
+#define CLKID_PWM_D 144
+#define CLKID_PWM_E_SEL 145
+#define CLKID_PWM_E_DIV 146
+#define CLKID_PWM_E 147
+#define CLKID_PWM_F_SEL 148
+#define CLKID_PWM_F_DIV 149
+#define CLKID_PWM_F 150
+#define CLKID_PWM_G_SEL 151
+#define CLKID_PWM_G_DIV 152
+#define CLKID_PWM_G 153
+#define CLKID_PWM_H_SEL 154
+#define CLKID_PWM_H_DIV 155
+#define CLKID_PWM_H 156
+#define CLKID_PWM_I_SEL 157
+#define CLKID_PWM_I_DIV 158
+#define CLKID_PWM_I 159
+#define CLKID_PWM_J_SEL 160
+#define CLKID_PWM_J_DIV 161
+#define CLKID_PWM_J 162
+#define CLKID_SARADC_SEL 163
+#define CLKID_SARADC_DIV 164
+#define CLKID_SARADC 165
+#define CLKID_GEN_SEL 166
+#define CLKID_GEN_DIV 167
+#define CLKID_GEN 168
+#define CLKID_DDR 169
+#define CLKID_DOS 170
+#define CLKID_ETHPHY 171
+#define CLKID_MALI 172
+#define CLKID_AOCPU 173
+#define CLKID_AUCPU 174
+#define CLKID_CEC 175
+#define CLKID_SDEMMC_A 176
+#define CLKID_SDEMMC_B 177
+#define CLKID_NAND 178
+#define CLKID_SMARTCARD 179
+#define CLKID_ACODEC 180
+#define CLKID_SPIFC 181
+#define CLKID_MSR 182
+#define CLKID_IR_CTRL 183
+#define CLKID_AUDIO 184
+#define CLKID_ETH 185
+#define CLKID_UART_A 186
+#define CLKID_UART_B 187
+#define CLKID_UART_C 188
+#define CLKID_UART_D 189
+#define CLKID_UART_E 190
+#define CLKID_AIFIFO 191
+#define CLKID_TS_DDR 192
+#define CLKID_TS_PLL 193
+#define CLKID_G2D 194
+#define CLKID_SPICC0 195
+#define CLKID_SPICC1 196
+#define CLKID_USB 197
+#define CLKID_I2C_M_A 198
+#define CLKID_I2C_M_B 199
+#define CLKID_I2C_M_C 200
+#define CLKID_I2C_M_D 201
+#define CLKID_I2C_M_E 202
+#define CLKID_HDMITX_APB 203
+#define CLKID_I2C_S_A 204
+#define CLKID_USB1_TO_DDR 205
+#define CLKID_HDCP22 206
+#define CLKID_MMC_APB 207
+#define CLKID_RSA 208
+#define CLKID_CPU_DEBUG 209
+#define CLKID_VPU_INTR 210
+#define CLKID_DEMOD 211
+#define CLKID_SAR_ADC 212
+#define CLKID_GIC 213
+#define CLKID_PWM_AB 214
+#define CLKID_PWM_CD 215
+#define CLKID_PWM_EF 216
+#define CLKID_PWM_GH 217
+#define CLKID_PWM_IJ 218
+#define CLKID_HDCP22_ESMCLK_SEL 219
+#define CLKID_HDCP22_ESMCLK_DIV 220
+#define CLKID_HDCP22_ESMCLK 221
+#define CLKID_HDCP22_SKPCLK_SEL 222
+#define CLKID_HDCP22_SKPCLK_DIV 223
+#define CLKID_HDCP22_SKPCLK 224
+
+#endif /* _DT_BINDINGS_CLOCK_AMLOGIC_S4_PERIPHERALS_CLKC_H */
diff --git a/include/dt-bindings/clock/amlogic,s4-pll-clkc.h b/include/dt-bindings/clock/amlogic,s4-pll-clkc.h
new file mode 100644
index 000000000000..af9f110f8b62
--- /dev/null
+++ b/include/dt-bindings/clock/amlogic,s4-pll-clkc.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2022-2023 Amlogic, Inc. All rights reserved.
+ * Author: Yu Tu <yu.tu@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_AMLOGIC_S4_PLL_CLKC_H
+#define _DT_BINDINGS_CLOCK_AMLOGIC_S4_PLL_CLKC_H
+
+#define CLKID_FIXED_PLL_DCO 0
+#define CLKID_FIXED_PLL 1
+#define CLKID_FCLK_DIV2_DIV 2
+#define CLKID_FCLK_DIV2 3
+#define CLKID_FCLK_DIV3_DIV 4
+#define CLKID_FCLK_DIV3 5
+#define CLKID_FCLK_DIV4_DIV 6
+#define CLKID_FCLK_DIV4 7
+#define CLKID_FCLK_DIV5_DIV 8
+#define CLKID_FCLK_DIV5 9
+#define CLKID_FCLK_DIV7_DIV 10
+#define CLKID_FCLK_DIV7 11
+#define CLKID_FCLK_DIV2P5_DIV 12
+#define CLKID_FCLK_DIV2P5 13
+#define CLKID_GP0_PLL_DCO 14
+#define CLKID_GP0_PLL 15
+#define CLKID_HIFI_PLL_DCO 16
+#define CLKID_HIFI_PLL 17
+#define CLKID_HDMI_PLL_DCO 18
+#define CLKID_HDMI_PLL_OD 19
+#define CLKID_HDMI_PLL 20
+#define CLKID_MPLL_50M_DIV 21
+#define CLKID_MPLL_50M 22
+#define CLKID_MPLL_PREDIV 23
+#define CLKID_MPLL0_DIV 24
+#define CLKID_MPLL0 25
+#define CLKID_MPLL1_DIV 26
+#define CLKID_MPLL1 27
+#define CLKID_MPLL2_DIV 28
+#define CLKID_MPLL2 29
+#define CLKID_MPLL3_DIV 30
+#define CLKID_MPLL3 31
+
+#endif /* _DT_BINDINGS_CLOCK_AMLOGIC_S4_PLL_CLKC_H */
diff --git a/include/dt-bindings/clock/aspeed,ast2700-scu.h b/include/dt-bindings/clock/aspeed,ast2700-scu.h
new file mode 100644
index 000000000000..bacf712e8e04
--- /dev/null
+++ b/include/dt-bindings/clock/aspeed,ast2700-scu.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Device Tree binding constants for AST2700 clock controller.
+ *
+ * Copyright (c) 2024 Aspeed Technology Inc.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_AST2700_H
+#define __DT_BINDINGS_CLOCK_AST2700_H
+
+/* SOC0 clk */
+#define SCU0_CLKIN 0
+#define SCU0_CLK_24M 1
+#define SCU0_CLK_192M 2
+#define SCU0_CLK_UART 3
+#define SCU0_CLK_UART_DIV13 3
+#define SCU0_CLK_PSP 4
+#define SCU0_CLK_HPLL 5
+#define SCU0_CLK_HPLL_DIV2 6
+#define SCU0_CLK_HPLL_DIV4 7
+#define SCU0_CLK_HPLL_DIV_AHB 8
+#define SCU0_CLK_DPLL 9
+#define SCU0_CLK_MPLL 10
+#define SCU0_CLK_MPLL_DIV2 11
+#define SCU0_CLK_MPLL_DIV4 12
+#define SCU0_CLK_MPLL_DIV8 13
+#define SCU0_CLK_MPLL_DIV_AHB 14
+#define SCU0_CLK_D0 15
+#define SCU0_CLK_D1 16
+#define SCU0_CLK_CRT0 17
+#define SCU0_CLK_CRT1 18
+#define SCU0_CLK_MPHY 19
+#define SCU0_CLK_AXI0 20
+#define SCU0_CLK_AXI1 21
+#define SCU0_CLK_AHB 22
+#define SCU0_CLK_APB 23
+#define SCU0_CLK_UART4 24
+#define SCU0_CLK_EMMCMUX 25
+#define SCU0_CLK_EMMC 26
+#define SCU0_CLK_U2PHY_CLK12M 27
+#define SCU0_CLK_U2PHY_REFCLK 28
+
+/* SOC0 clk-gate */
+#define SCU0_CLK_GATE_MCLK 29
+#define SCU0_CLK_GATE_ECLK 30
+#define SCU0_CLK_GATE_2DCLK 31
+#define SCU0_CLK_GATE_VCLK 32
+#define SCU0_CLK_GATE_BCLK 33
+#define SCU0_CLK_GATE_VGA0CLK 34
+#define SCU0_CLK_GATE_REFCLK 35
+#define SCU0_CLK_GATE_PORTBUSB2CLK 36
+#define SCU0_CLK_GATE_UHCICLK 37
+#define SCU0_CLK_GATE_VGA1CLK 38
+#define SCU0_CLK_GATE_DDRPHYCLK 39
+#define SCU0_CLK_GATE_E2M0CLK 40
+#define SCU0_CLK_GATE_HACCLK 41
+#define SCU0_CLK_GATE_PORTAUSB2CLK 42
+#define SCU0_CLK_GATE_UART4CLK 43
+#define SCU0_CLK_GATE_SLICLK 44
+#define SCU0_CLK_GATE_DACCLK 45
+#define SCU0_CLK_GATE_DP 46
+#define SCU0_CLK_GATE_E2M1CLK 47
+#define SCU0_CLK_GATE_CRT0CLK 48
+#define SCU0_CLK_GATE_CRT1CLK 49
+#define SCU0_CLK_GATE_ECDSACLK 50
+#define SCU0_CLK_GATE_RSACLK 51
+#define SCU0_CLK_GATE_RVAS0CLK 52
+#define SCU0_CLK_GATE_UFSCLK 53
+#define SCU0_CLK_GATE_EMMCCLK 54
+#define SCU0_CLK_GATE_RVAS1CLK 55
+#define SCU0_CLK_U2PHY_REFCLKSRC 56
+#define SCU0_CLK_AHBMUX 57
+#define SCU0_CLK_MPHYSRC 58
+
+/* SOC1 clk */
+#define SCU1_CLKIN 0
+#define SCU1_CLK_HPLL 1
+#define SCU1_CLK_APLL 2
+#define SCU1_CLK_APLL_DIV2 3
+#define SCU1_CLK_APLL_DIV4 4
+#define SCU1_CLK_DPLL 5
+#define SCU1_CLK_UXCLK 6
+#define SCU1_CLK_HUXCLK 7
+#define SCU1_CLK_UARTX 8
+#define SCU1_CLK_HUARTX 9
+#define SCU1_CLK_AHB 10
+#define SCU1_CLK_APB 11
+#define SCU1_CLK_UART0 12
+#define SCU1_CLK_UART1 13
+#define SCU1_CLK_UART2 14
+#define SCU1_CLK_UART3 15
+#define SCU1_CLK_UART5 16
+#define SCU1_CLK_UART6 17
+#define SCU1_CLK_UART7 18
+#define SCU1_CLK_UART8 19
+#define SCU1_CLK_UART9 20
+#define SCU1_CLK_UART10 21
+#define SCU1_CLK_UART11 22
+#define SCU1_CLK_UART12 23
+#define SCU1_CLK_UART13 24
+#define SCU1_CLK_UART14 25
+#define SCU1_CLK_APLL_DIVN 26
+#define SCU1_CLK_SDMUX 27
+#define SCU1_CLK_SDCLK 28
+#define SCU1_CLK_RMII 29
+#define SCU1_CLK_RGMII 30
+#define SCU1_CLK_MACHCLK 31
+#define SCU1_CLK_MAC0RCLK 32
+#define SCU1_CLK_MAC1RCLK 33
+#define SCU1_CLK_CAN 34
+
+/* SOC1 clk gate */
+#define SCU1_CLK_GATE_LCLK0 35
+#define SCU1_CLK_GATE_LCLK1 36
+#define SCU1_CLK_GATE_ESPI0CLK 37
+#define SCU1_CLK_GATE_ESPI1CLK 38
+#define SCU1_CLK_GATE_SDCLK 39
+#define SCU1_CLK_GATE_IPEREFCLK 40
+#define SCU1_CLK_GATE_REFCLK 41
+#define SCU1_CLK_GATE_LPCHCLK 42
+#define SCU1_CLK_GATE_MAC0CLK 43
+#define SCU1_CLK_GATE_MAC1CLK 44
+#define SCU1_CLK_GATE_MAC2CLK 45
+#define SCU1_CLK_GATE_UART0CLK 46
+#define SCU1_CLK_GATE_UART1CLK 47
+#define SCU1_CLK_GATE_UART2CLK 48
+#define SCU1_CLK_GATE_UART3CLK 49
+#define SCU1_CLK_GATE_I2CCLK 50
+#define SCU1_CLK_GATE_I3C0CLK 51
+#define SCU1_CLK_GATE_I3C1CLK 52
+#define SCU1_CLK_GATE_I3C2CLK 53
+#define SCU1_CLK_GATE_I3C3CLK 54
+#define SCU1_CLK_GATE_I3C4CLK 55
+#define SCU1_CLK_GATE_I3C5CLK 56
+#define SCU1_CLK_GATE_I3C6CLK 57
+#define SCU1_CLK_GATE_I3C7CLK 58
+#define SCU1_CLK_GATE_I3C8CLK 59
+#define SCU1_CLK_GATE_I3C9CLK 60
+#define SCU1_CLK_GATE_I3C10CLK 61
+#define SCU1_CLK_GATE_I3C11CLK 62
+#define SCU1_CLK_GATE_I3C12CLK 63
+#define SCU1_CLK_GATE_I3C13CLK 64
+#define SCU1_CLK_GATE_I3C14CLK 65
+#define SCU1_CLK_GATE_I3C15CLK 66
+#define SCU1_CLK_GATE_UART5CLK 67
+#define SCU1_CLK_GATE_UART6CLK 68
+#define SCU1_CLK_GATE_UART7CLK 69
+#define SCU1_CLK_GATE_UART8CLK 70
+#define SCU1_CLK_GATE_UART9CLK 71
+#define SCU1_CLK_GATE_UART10CLK 72
+#define SCU1_CLK_GATE_UART11CLK 73
+#define SCU1_CLK_GATE_UART12CLK 74
+#define SCU1_CLK_GATE_FSICLK 75
+#define SCU1_CLK_GATE_LTPIPHYCLK 76
+#define SCU1_CLK_GATE_LTPICLK 77
+#define SCU1_CLK_GATE_VGALCLK 78
+#define SCU1_CLK_GATE_UHCICLK 79
+#define SCU1_CLK_GATE_CANCLK 80
+#define SCU1_CLK_GATE_PCICLK 81
+#define SCU1_CLK_GATE_SLICLK 82
+#define SCU1_CLK_GATE_E2MCLK 83
+#define SCU1_CLK_GATE_PORTCUSB2CLK 84
+#define SCU1_CLK_GATE_PORTDUSB2CLK 85
+#define SCU1_CLK_GATE_LTPI1TXCLK 86
+#define SCU1_CLK_I3C 87
+
+#endif
diff --git a/include/dt-bindings/clock/aspeed-clock.h b/include/dt-bindings/clock/aspeed-clock.h
index 9ff4f6e4558c..06d568382c77 100644
--- a/include/dt-bindings/clock/aspeed-clock.h
+++ b/include/dt-bindings/clock/aspeed-clock.h
@@ -52,5 +52,6 @@
#define ASPEED_RESET_I2C 7
#define ASPEED_RESET_AHB 8
#define ASPEED_RESET_CRT1 9
+#define ASPEED_RESET_HACE 10
#endif
diff --git a/include/dt-bindings/clock/ast2600-clock.h b/include/dt-bindings/clock/ast2600-clock.h
index 62b9520a00fd..f60fff261130 100644
--- a/include/dt-bindings/clock/ast2600-clock.h
+++ b/include/dt-bindings/clock/ast2600-clock.h
@@ -57,8 +57,6 @@
#define ASPEED_CLK_GATE_I3C3CLK 40
#define ASPEED_CLK_GATE_I3C4CLK 41
#define ASPEED_CLK_GATE_I3C5CLK 42
-#define ASPEED_CLK_GATE_I3C6CLK 43
-#define ASPEED_CLK_GATE_I3C7CLK 44
#define ASPEED_CLK_GATE_FSICLK 45
@@ -87,11 +85,25 @@
#define ASPEED_CLK_MAC2RCLK 68
#define ASPEED_CLK_MAC3RCLK 69
#define ASPEED_CLK_MAC4RCLK 70
+#define ASPEED_CLK_I3C 71
+#define ASPEED_CLK_FSI 72
-/* Only list resets here that are not part of a gate */
+/* Only list resets here that are not part of a clock gate + reset pair */
#define ASPEED_RESET_ADC 55
#define ASPEED_RESET_JTAG_MASTER2 54
+
+#define ASPEED_RESET_MAC4 53
+#define ASPEED_RESET_MAC3 52
+
+#define ASPEED_RESET_I3C5 45
+#define ASPEED_RESET_I3C4 44
+#define ASPEED_RESET_I3C3 43
+#define ASPEED_RESET_I3C2 42
+#define ASPEED_RESET_I3C1 41
+#define ASPEED_RESET_I3C0 40
+#define ASPEED_RESET_I3C 39
#define ASPEED_RESET_I3C_DMA 39
+
#define ASPEED_RESET_PWM 37
#define ASPEED_RESET_PECI 36
#define ASPEED_RESET_MII 35
@@ -110,7 +122,10 @@
#define ASPEED_RESET_PCIE_DEV_OEN 20
#define ASPEED_RESET_PCIE_RC_O 19
#define ASPEED_RESET_PCIE_RC_OEN 18
+#define ASPEED_RESET_MAC2 12
+#define ASPEED_RESET_MAC1 11
#define ASPEED_RESET_PCI_DP 5
+#define ASPEED_RESET_HACE 4
#define ASPEED_RESET_AHB 1
#define ASPEED_RESET_SDRAM 0
diff --git a/include/dt-bindings/clock/at91.h b/include/dt-bindings/clock/at91.h
index 98e1b2ab6403..f2a7b7d39c0d 100644
--- a/include/dt-bindings/clock/at91.h
+++ b/include/dt-bindings/clock/at91.h
@@ -24,6 +24,7 @@
#define PMC_PLLACK 7
#define PMC_PLLBCK 8
#define PMC_AUDIOPLLCK 9
+#define PMC_AUDIOPINCK 10
/* SAMA7G5 */
#define PMC_CPUPLL (PMC_MAIN + 1)
@@ -35,6 +36,15 @@
#define PMC_AUDIOIOPLL (PMC_MAIN + 7)
#define PMC_ETHPLL (PMC_MAIN + 8)
#define PMC_CPU (PMC_MAIN + 9)
+#define PMC_MCK1 (PMC_MAIN + 10)
+
+/* SAM9X7 */
+#define PMC_PLLADIV2 (PMC_MAIN + 11)
+#define PMC_LVDSPLL (PMC_MAIN + 12)
+
+/* SAMA7D65 */
+#define PMC_MCK3 (PMC_MAIN + 13)
+#define PMC_MCK5 (PMC_MAIN + 14)
#ifndef AT91_PMC_MOSCS
#define AT91_PMC_MOSCS 0 /* MOSCS Flag */
@@ -49,4 +59,8 @@
#define AT91_PMC_GCKRDY 24 /* Generated Clocks */
#endif
+/* Slow clock. */
+#define SCKC_MD_SLCK 0
+#define SCKC_TD_SLCK 1
+
#endif
diff --git a/include/dt-bindings/clock/axg-audio-clkc.h b/include/dt-bindings/clock/axg-audio-clkc.h
index f561f5c5ef8f..607f23b83fa7 100644
--- a/include/dt-bindings/clock/axg-audio-clkc.h
+++ b/include/dt-bindings/clock/axg-audio-clkc.h
@@ -37,6 +37,26 @@
#define AUD_CLKID_SPDIFIN_CLK 56
#define AUD_CLKID_PDM_DCLK 57
#define AUD_CLKID_PDM_SYSCLK 58
+#define AUD_CLKID_MST_A_MCLK_SEL 59
+#define AUD_CLKID_MST_B_MCLK_SEL 60
+#define AUD_CLKID_MST_C_MCLK_SEL 61
+#define AUD_CLKID_MST_D_MCLK_SEL 62
+#define AUD_CLKID_MST_E_MCLK_SEL 63
+#define AUD_CLKID_MST_F_MCLK_SEL 64
+#define AUD_CLKID_MST_A_MCLK_DIV 65
+#define AUD_CLKID_MST_B_MCLK_DIV 66
+#define AUD_CLKID_MST_C_MCLK_DIV 67
+#define AUD_CLKID_MST_D_MCLK_DIV 68
+#define AUD_CLKID_MST_E_MCLK_DIV 69
+#define AUD_CLKID_MST_F_MCLK_DIV 70
+#define AUD_CLKID_SPDIFOUT_CLK_SEL 71
+#define AUD_CLKID_SPDIFOUT_CLK_DIV 72
+#define AUD_CLKID_SPDIFIN_CLK_SEL 73
+#define AUD_CLKID_SPDIFIN_CLK_DIV 74
+#define AUD_CLKID_PDM_DCLK_SEL 75
+#define AUD_CLKID_PDM_DCLK_DIV 76
+#define AUD_CLKID_PDM_SYSCLK_SEL 77
+#define AUD_CLKID_PDM_SYSCLK_DIV 78
#define AUD_CLKID_MST_A_SCLK 79
#define AUD_CLKID_MST_B_SCLK 80
#define AUD_CLKID_MST_C_SCLK 81
@@ -49,6 +69,30 @@
#define AUD_CLKID_MST_D_LRCLK 89
#define AUD_CLKID_MST_E_LRCLK 90
#define AUD_CLKID_MST_F_LRCLK 91
+#define AUD_CLKID_MST_A_SCLK_PRE_EN 92
+#define AUD_CLKID_MST_B_SCLK_PRE_EN 93
+#define AUD_CLKID_MST_C_SCLK_PRE_EN 94
+#define AUD_CLKID_MST_D_SCLK_PRE_EN 95
+#define AUD_CLKID_MST_E_SCLK_PRE_EN 96
+#define AUD_CLKID_MST_F_SCLK_PRE_EN 97
+#define AUD_CLKID_MST_A_SCLK_DIV 98
+#define AUD_CLKID_MST_B_SCLK_DIV 99
+#define AUD_CLKID_MST_C_SCLK_DIV 100
+#define AUD_CLKID_MST_D_SCLK_DIV 101
+#define AUD_CLKID_MST_E_SCLK_DIV 102
+#define AUD_CLKID_MST_F_SCLK_DIV 103
+#define AUD_CLKID_MST_A_SCLK_POST_EN 104
+#define AUD_CLKID_MST_B_SCLK_POST_EN 105
+#define AUD_CLKID_MST_C_SCLK_POST_EN 106
+#define AUD_CLKID_MST_D_SCLK_POST_EN 107
+#define AUD_CLKID_MST_E_SCLK_POST_EN 108
+#define AUD_CLKID_MST_F_SCLK_POST_EN 109
+#define AUD_CLKID_MST_A_LRCLK_DIV 110
+#define AUD_CLKID_MST_B_LRCLK_DIV 111
+#define AUD_CLKID_MST_C_LRCLK_DIV 112
+#define AUD_CLKID_MST_D_LRCLK_DIV 113
+#define AUD_CLKID_MST_E_LRCLK_DIV 114
+#define AUD_CLKID_MST_F_LRCLK_DIV 115
#define AUD_CLKID_TDMIN_A_SCLK_SEL 116
#define AUD_CLKID_TDMIN_B_SCLK_SEL 117
#define AUD_CLKID_TDMIN_C_SCLK_SEL 118
@@ -70,8 +114,24 @@
#define AUD_CLKID_TDMOUT_A_LRCLK 134
#define AUD_CLKID_TDMOUT_B_LRCLK 135
#define AUD_CLKID_TDMOUT_C_LRCLK 136
+#define AUD_CLKID_TDMIN_A_SCLK_PRE_EN 137
+#define AUD_CLKID_TDMIN_B_SCLK_PRE_EN 138
+#define AUD_CLKID_TDMIN_C_SCLK_PRE_EN 139
+#define AUD_CLKID_TDMIN_LB_SCLK_PRE_EN 140
+#define AUD_CLKID_TDMOUT_A_SCLK_PRE_EN 141
+#define AUD_CLKID_TDMOUT_B_SCLK_PRE_EN 142
+#define AUD_CLKID_TDMOUT_C_SCLK_PRE_EN 143
+#define AUD_CLKID_TDMIN_A_SCLK_POST_EN 144
+#define AUD_CLKID_TDMIN_B_SCLK_POST_EN 145
+#define AUD_CLKID_TDMIN_C_SCLK_POST_EN 146
+#define AUD_CLKID_TDMIN_LB_SCLK_POST_EN 147
+#define AUD_CLKID_TDMOUT_A_SCLK_POST_EN 148
+#define AUD_CLKID_TDMOUT_B_SCLK_POST_EN 149
+#define AUD_CLKID_TDMOUT_C_SCLK_POST_EN 150
#define AUD_CLKID_SPDIFOUT_B 151
#define AUD_CLKID_SPDIFOUT_B_CLK 152
+#define AUD_CLKID_SPDIFOUT_B_CLK_SEL 153
+#define AUD_CLKID_SPDIFOUT_B_CLK_DIV 154
#define AUD_CLKID_TDM_MCLK_PAD0 155
#define AUD_CLKID_TDM_MCLK_PAD1 156
#define AUD_CLKID_TDM_LRCLK_PAD0 157
@@ -90,5 +150,17 @@
#define AUD_CLKID_FRDDR_D 170
#define AUD_CLKID_TODDR_D 171
#define AUD_CLKID_LOOPBACK_B 172
+#define AUD_CLKID_CLK81_EN 173
+#define AUD_CLKID_SYSCLK_A_DIV 174
+#define AUD_CLKID_SYSCLK_B_DIV 175
+#define AUD_CLKID_SYSCLK_A_EN 176
+#define AUD_CLKID_SYSCLK_B_EN 177
+#define AUD_CLKID_EARCRX 178
+#define AUD_CLKID_EARCRX_CMDC_SEL 179
+#define AUD_CLKID_EARCRX_CMDC_DIV 180
+#define AUD_CLKID_EARCRX_CMDC 181
+#define AUD_CLKID_EARCRX_DMAC_SEL 182
+#define AUD_CLKID_EARCRX_DMAC_DIV 183
+#define AUD_CLKID_EARCRX_DMAC 184
#endif /* __AXG_AUDIO_CLKC_BINDINGS_H */
diff --git a/include/dt-bindings/clock/axg-clkc.h b/include/dt-bindings/clock/axg-clkc.h
index 93752ea107e3..442162822b88 100644
--- a/include/dt-bindings/clock/axg-clkc.h
+++ b/include/dt-bindings/clock/axg-clkc.h
@@ -16,6 +16,8 @@
#define CLKID_FCLK_DIV5 5
#define CLKID_FCLK_DIV7 6
#define CLKID_GP0_PLL 7
+#define CLKID_MPEG_SEL 8
+#define CLKID_MPEG_DIV 9
#define CLKID_CLK81 10
#define CLKID_MPLL0 11
#define CLKID_MPLL1 12
@@ -67,23 +69,66 @@
#define CLKID_AO_I2C 58
#define CLKID_SD_EMMC_B_CLK0 59
#define CLKID_SD_EMMC_C_CLK0 60
+#define CLKID_SD_EMMC_B_CLK0_SEL 61
+#define CLKID_SD_EMMC_B_CLK0_DIV 62
+#define CLKID_SD_EMMC_C_CLK0_SEL 63
+#define CLKID_SD_EMMC_C_CLK0_DIV 64
+#define CLKID_MPLL0_DIV 65
+#define CLKID_MPLL1_DIV 66
+#define CLKID_MPLL2_DIV 67
+#define CLKID_MPLL3_DIV 68
#define CLKID_HIFI_PLL 69
+#define CLKID_MPLL_PREDIV 70
+#define CLKID_FCLK_DIV2_DIV 71
+#define CLKID_FCLK_DIV3_DIV 72
+#define CLKID_FCLK_DIV4_DIV 73
+#define CLKID_FCLK_DIV5_DIV 74
+#define CLKID_FCLK_DIV7_DIV 75
+#define CLKID_PCIE_PLL 76
+#define CLKID_PCIE_MUX 77
+#define CLKID_PCIE_REF 78
#define CLKID_PCIE_CML_EN0 79
#define CLKID_PCIE_CML_EN1 80
+#define CLKID_GEN_CLK_SEL 82
+#define CLKID_GEN_CLK_DIV 83
#define CLKID_GEN_CLK 84
+#define CLKID_SYS_PLL_DCO 85
+#define CLKID_FIXED_PLL_DCO 86
+#define CLKID_GP0_PLL_DCO 87
+#define CLKID_HIFI_PLL_DCO 88
+#define CLKID_PCIE_PLL_DCO 89
+#define CLKID_PCIE_PLL_OD 90
+#define CLKID_VPU_0_DIV 91
#define CLKID_VPU_0_SEL 92
#define CLKID_VPU_0 93
+#define CLKID_VPU_1_DIV 94
#define CLKID_VPU_1_SEL 95
#define CLKID_VPU_1 96
#define CLKID_VPU 97
+#define CLKID_VAPB_0_DIV 98
#define CLKID_VAPB_0_SEL 99
#define CLKID_VAPB_0 100
+#define CLKID_VAPB_1_DIV 101
#define CLKID_VAPB_1_SEL 102
#define CLKID_VAPB_1 103
#define CLKID_VAPB_SEL 104
#define CLKID_VAPB 105
#define CLKID_VCLK 106
#define CLKID_VCLK2 107
+#define CLKID_VCLK_SEL 108
+#define CLKID_VCLK2_SEL 109
+#define CLKID_VCLK_INPUT 110
+#define CLKID_VCLK2_INPUT 111
+#define CLKID_VCLK_DIV 112
+#define CLKID_VCLK2_DIV 113
+#define CLKID_VCLK_DIV2_EN 114
+#define CLKID_VCLK_DIV4_EN 115
+#define CLKID_VCLK_DIV6_EN 116
+#define CLKID_VCLK_DIV12_EN 117
+#define CLKID_VCLK2_DIV2_EN 118
+#define CLKID_VCLK2_DIV4_EN 119
+#define CLKID_VCLK2_DIV6_EN 120
+#define CLKID_VCLK2_DIV12_EN 121
#define CLKID_VCLK_DIV1 122
#define CLKID_VCLK_DIV2 123
#define CLKID_VCLK_DIV4 124
@@ -94,7 +139,10 @@
#define CLKID_VCLK2_DIV4 129
#define CLKID_VCLK2_DIV6 130
#define CLKID_VCLK2_DIV12 131
+#define CLKID_CTS_ENCL_SEL 132
#define CLKID_CTS_ENCL 133
+#define CLKID_VDIN_MEAS_SEL 134
+#define CLKID_VDIN_MEAS_DIV 135
#define CLKID_VDIN_MEAS 136
#endif /* __AXG_CLKC_H */
diff --git a/include/dt-bindings/clock/axis,artpec6-clkctrl.h b/include/dt-bindings/clock/axis,artpec6-clkctrl.h
index b1f4971642e6..14e424a7c08c 100644
--- a/include/dt-bindings/clock/axis,artpec6-clkctrl.h
+++ b/include/dt-bindings/clock/axis,artpec6-clkctrl.h
@@ -2,7 +2,7 @@
/*
* ARTPEC-6 clock controller indexes
*
- * Copyright 2016 Axis Comunications AB.
+ * Copyright 2016 Axis Communications AB.
*/
#ifndef DT_BINDINGS_CLK_ARTPEC6_CLKCTRL_H
diff --git a/include/dt-bindings/clock/axis,artpec8-clk.h b/include/dt-bindings/clock/axis,artpec8-clk.h
new file mode 100644
index 000000000000..1e6e1409dd94
--- /dev/null
+++ b/include/dt-bindings/clock/axis,artpec8-clk.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2025 Samsung Electronics Co., Ltd.
+ * https://www.samsung.com
+ * Copyright (c) 2025 Axis Communications AB.
+ * https://www.axis.com
+ *
+ * Device Tree binding constants for ARTPEC-8 clock controller.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_ARTPEC8_H
+#define _DT_BINDINGS_CLOCK_ARTPEC8_H
+
+/* CMU_CMU */
+#define CLK_FOUT_SHARED0_PLL 1
+#define CLK_DOUT_SHARED0_DIV2 2
+#define CLK_DOUT_SHARED0_DIV3 3
+#define CLK_DOUT_SHARED0_DIV4 4
+#define CLK_FOUT_SHARED1_PLL 5
+#define CLK_DOUT_SHARED1_DIV2 6
+#define CLK_DOUT_SHARED1_DIV3 7
+#define CLK_DOUT_SHARED1_DIV4 8
+#define CLK_FOUT_AUDIO_PLL 9
+#define CLK_DOUT_CMU_BUS 10
+#define CLK_DOUT_CMU_BUS_DLP 11
+#define CLK_DOUT_CMU_CDC_CORE 12
+#define CLK_DOUT_CMU_OTP 13
+#define CLK_DOUT_CMU_CORE_MAIN 14
+#define CLK_DOUT_CMU_CORE_DLP 15
+#define CLK_DOUT_CMU_CPUCL_SWITCH 16
+#define CLK_DOUT_CMU_DLP_CORE 17
+#define CLK_DOUT_CMU_FSYS_BUS 18
+#define CLK_DOUT_CMU_FSYS_IP 19
+#define CLK_DOUT_CMU_FSYS_SCAN0 20
+#define CLK_DOUT_CMU_FSYS_SCAN1 21
+#define CLK_DOUT_CMU_GPU_3D 22
+#define CLK_DOUT_CMU_GPU_2D 23
+#define CLK_DOUT_CMU_IMEM_ACLK 24
+#define CLK_DOUT_CMU_IMEM_JPEG 25
+#define CLK_DOUT_CMU_MIF_SWITCH 26
+#define CLK_DOUT_CMU_MIF_BUSP 27
+#define CLK_DOUT_CMU_PERI_DISP 28
+#define CLK_DOUT_CMU_PERI_IP 29
+#define CLK_DOUT_CMU_PERI_AUDIO 30
+#define CLK_DOUT_CMU_RSP_CORE 31
+#define CLK_DOUT_CMU_TRFM_CORE 32
+#define CLK_DOUT_CMU_VCA_ACE 33
+#define CLK_DOUT_CMU_VCA_OD 34
+#define CLK_DOUT_CMU_VIO_CORE 35
+#define CLK_DOUT_CMU_VIO_AUDIO 36
+#define CLK_DOUT_CMU_VIP0_CORE 37
+#define CLK_DOUT_CMU_VIP1_CORE 38
+#define CLK_DOUT_CMU_VPP_CORE 39
+
+/* CMU_BUS */
+#define CLK_MOUT_BUS_ACLK_USER 1
+#define CLK_MOUT_BUS_DLP_USER 2
+#define CLK_DOUT_BUS_PCLK 3
+
+/* CMU_CORE */
+#define CLK_MOUT_CORE_ACLK_USER 1
+#define CLK_MOUT_CORE_DLP_USER 2
+#define CLK_DOUT_CORE_PCLK 3
+
+/* CMU_CPUCL */
+#define CLK_FOUT_CPUCL_PLL 1
+#define CLK_MOUT_CPUCL_PLL 2
+#define CLK_MOUT_CPUCL_SWITCH_USER 3
+#define CLK_DOUT_CPUCL_CPU 4
+#define CLK_DOUT_CPUCL_CLUSTER_ACLK 5
+#define CLK_DOUT_CPUCL_CLUSTER_PCLKDBG 6
+#define CLK_DOUT_CPUCL_CLUSTER_CNTCLK 7
+#define CLK_DOUT_CPUCL_CLUSTER_ATCLK 8
+#define CLK_DOUT_CPUCL_PCLK 9
+#define CLK_DOUT_CPUCL_CMUREF 10
+#define CLK_DOUT_CPUCL_DBG 11
+#define CLK_DOUT_CPUCL_PCLKDBG 12
+#define CLK_GOUT_CPUCL_CLUSTER_CPU 13
+#define CLK_GOUT_CPUCL_SHORTSTOP 14
+#define CLK_GOUT_CPUCL_CSSYS_IPCLKPORT_PCLKDBG 15
+#define CLK_GOUT_CPUCL_CSSYS_IPCLKPORT_ATCLK 16
+
+/* CMU_FSYS */
+#define CLK_FOUT_FSYS_PLL 1
+#define CLK_MOUT_FSYS_SCAN0_USER 2
+#define CLK_MOUT_FSYS_SCAN1_USER 3
+#define CLK_MOUT_FSYS_BUS_USER 4
+#define CLK_MOUT_FSYS_MMC_USER 5
+#define CLK_DOUT_FSYS_PCIE_PIPE 6
+#define CLK_DOUT_FSYS_ADC 7
+#define CLK_DOUT_FSYS_PCIE_PHY_REFCLK_SYSPLL 8
+#define CLK_DOUT_FSYS_EQOS_INT125 9
+#define CLK_DOUT_FSYS_OTP_MEM 10
+#define CLK_DOUT_FSYS_SCLK_UART 11
+#define CLK_DOUT_FSYS_EQOS_25 12
+#define CLK_DOUT_FSYS_EQOS_2p5 13
+#define CLK_DOUT_FSYS_BUS300 14
+#define CLK_DOUT_FSYS_BUS_QSPI 15
+#define CLK_DOUT_FSYS_MMC_CARD0 16
+#define CLK_DOUT_FSYS_MMC_CARD1 17
+#define CLK_DOUT_SCAN_CLK_FSYS_125 18
+#define CLK_DOUT_FSYS_QSPI 19
+#define CLK_DOUT_FSYS_SFMC_NAND 20
+#define CLK_DOUT_FSYS_SCAN_CLK_MMC 21
+#define CLK_GOUT_FSYS_USB20DRD_IPCLKPORT_ACLK_PHYCTRL_20 22
+#define CLK_GOUT_FSYS_USB20DRD_IPCLKPORT_BUS_CLK_EARLY 23
+#define CLK_GOUT_FSYS_XHB_USB_IPCLKPORT_CLK 24
+#define CLK_GOUT_FSYS_XHB_AHBBR_IPCLKPORT_CLK 25
+#define CLK_GOUT_FSYS_I2C0_IPCLKPORT_I_PCLK 26
+#define CLK_GOUT_FSYS_I2C1_IPCLKPORT_I_PCLK 27
+#define CLK_GOUT_FSYS_PWM_IPCLKPORT_I_PCLK_S0 28
+#define CLK_GOUT_FSYS_DWC_PCIE_CTL_INST_0_MSTR_ACLK_UG 29
+#define CLK_GOUT_FSYS_DWC_PCIE_CTL_INXT_0_SLV_ACLK_UG 30
+#define CLK_GOUT_FSYS_DWC_PCIE_CTL_INST_0_DBI_ACLK_UG 31
+#define CLK_GOUT_FSYS_PIPE_PAL_INST_0_I_APB_PCLK 32
+#define CLK_GOUT_FSYS_EQOS_TOP_IPCLKPORT_ACLK_I 33
+#define CLK_GOUT_FSYS_EQOS_TOP_IPCLKPORT_CLK_CSR_I 34
+#define CLK_GOUT_FSYS_EQOS_TOP_IPCLKPORT_I_RGMII_TXCLK_2P5 35
+#define CLK_GOUT_FSYS_SFMC_IPCLKPORT_I_ACLK_NAND 36
+#define CLK_GOUT_FSYS_MMC0_IPCLKPORT_SDCLKIN 37
+#define CLK_GOUT_FSYS_MMC0_IPCLKPORT_I_ACLK 38
+#define CLK_GOUT_FSYS_MMC1_IPCLKPORT_SDCLKIN 39
+#define CLK_GOUT_FSYS_MMC1_IPCLKPORT_I_ACLK 40
+#define CLK_GOUT_FSYS_PCIE_PHY_REFCLK_IN 41
+#define CLK_GOUT_FSYS_UART0_PCLK 42
+#define CLK_GOUT_FSYS_UART0_SCLK_UART 43
+#define CLK_GOUT_FSYS_BUS_QSPI 44
+#define CLK_GOUT_FSYS_QSPI_IPCLKPORT_HCLK 45
+#define CLK_GOUT_FSYS_QSPI_IPCLKPORT_SSI_CLK 46
+
+/* CMU_IMEM */
+#define CLK_MOUT_IMEM_ACLK_USER 1
+#define CLK_MOUT_IMEM_GIC_CA53 2
+#define CLK_MOUT_IMEM_GIC_CA5 3
+#define CLK_MOUT_IMEM_JPEG_USER 4
+#define CLK_GOUT_IMEM_MCT_PCLK 5
+#define CLK_GOUT_IMEM_PCLK_TMU0_APBIF 6
+
+/* CMU_PERI */
+#define CLK_MOUT_PERI_IP_USER 1
+#define CLK_MOUT_PERI_AUDIO_USER 2
+#define CLK_MOUT_PERI_I2S0 3
+#define CLK_MOUT_PERI_I2S1 4
+#define CLK_MOUT_PERI_DISP_USER 5
+#define CLK_DOUT_PERI_SPI 6
+#define CLK_DOUT_PERI_UART1 7
+#define CLK_DOUT_PERI_UART2 8
+#define CLK_DOUT_PERI_PCLK 9
+#define CLK_DOUT_PERI_I2S0 10
+#define CLK_DOUT_PERI_I2S1 11
+#define CLK_DOUT_PERI_DSIM 12
+#define CLK_GOUT_PERI_UART1_PCLK 13
+#define CLK_GOUT_PERI_UART1_SCLK_UART 14
+#define CLK_GOUT_PERI_UART2_PCLK 15
+#define CLK_GOUT_PERI_UART2_SCLK_UART 16
+#define CLK_GOUT_PERI_I2C2_IPCLKPORT_I_PCLK 17
+#define CLK_GOUT_PERI_I2C3_IPCLKPORT_I_PCLK 18
+#define CLK_GOUT_PERI_SPI0_PCLK 19
+#define CLK_GOUT_PERI_SPI0_SCLK_SPI 20
+#define CLK_GOUT_PERI_APB_ASYNC_DSIM_IPCLKPORT_PCLKS 21
+#define CLK_GOUT_PERI_I2SSC0_IPCLKPORT_CLK_HST 22
+#define CLK_GOUT_PERI_I2SSC1_IPCLKPORT_CLK_HST 23
+#define CLK_GOUT_PERI_AUDIO_OUT_IPCLKPORT_CLK 24
+#define CLK_GOUT_PERI_I2SSC0_IPCLKPORT_CLK 25
+#define CLK_GOUT_PERI_I2SSC1_IPCLKPORT_CLK 26
+#define CLK_GOUT_PERI_DMA4DSIM_IPCLKPORT_CLK_APB_CLK 27
+#define CLK_GOUT_PERI_DMA4DSIM_IPCLKPORT_CLK_AXI_CLK 28
+
+#endif /* _DT_BINDINGS_CLOCK_ARTPEC8_H */
diff --git a/include/dt-bindings/clock/bcm21664.h b/include/dt-bindings/clock/bcm21664.h
index 5a7f0e4750a8..7c7492742f3d 100644
--- a/include/dt-bindings/clock/bcm21664.h
+++ b/include/dt-bindings/clock/bcm21664.h
@@ -1,15 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 Broadcom Corporation
* Copyright 2013 Linaro Limited
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _CLOCK_BCM21664_H
diff --git a/include/dt-bindings/clock/bcm281xx.h b/include/dt-bindings/clock/bcm281xx.h
index a763460cf1af..d74ca42112e7 100644
--- a/include/dt-bindings/clock/bcm281xx.h
+++ b/include/dt-bindings/clock/bcm281xx.h
@@ -1,15 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 Broadcom Corporation
* Copyright 2013 Linaro Limited
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _CLOCK_BCM281XX_H
diff --git a/include/dt-bindings/clock/bcm63268-clock.h b/include/dt-bindings/clock/bcm63268-clock.h
index da23e691d359..dea8adc8510e 100644
--- a/include/dt-bindings/clock/bcm63268-clock.h
+++ b/include/dt-bindings/clock/bcm63268-clock.h
@@ -27,4 +27,17 @@
#define BCM63268_CLK_TBUS 27
#define BCM63268_CLK_ROBOSW250 31
+#define BCM63268_TCLK_EPHY1 0
+#define BCM63268_TCLK_EPHY2 1
+#define BCM63268_TCLK_EPHY3 2
+#define BCM63268_TCLK_GPHY1 3
+#define BCM63268_TCLK_DSL 4
+#define BCM63268_TCLK_WAKEON_EPHY 6
+#define BCM63268_TCLK_WAKEON_DSL 7
+#define BCM63268_TCLK_FAP1 11
+#define BCM63268_TCLK_FAP2 15
+#define BCM63268_TCLK_UTO_50 16
+#define BCM63268_TCLK_UTO_EXTIN 17
+#define BCM63268_TCLK_USB_REF 18
+
#endif /* __DT_BINDINGS_CLOCK_BCM63268_H */
diff --git a/include/dt-bindings/clock/boston-clock.h b/include/dt-bindings/clock/boston-clock.h
index a6f009821137..38140fa87b09 100644
--- a/include/dt-bindings/clock/boston-clock.h
+++ b/include/dt-bindings/clock/boston-clock.h
@@ -1,7 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2016 Imagination Technologies
- *
- * SPDX-License-Identifier: GPL-2.0
*/
#ifndef __DT_BINDINGS_CLOCK_BOSTON_CLOCK_H__
diff --git a/include/dt-bindings/clock/cirrus,cs2000-cp.h b/include/dt-bindings/clock/cirrus,cs2000-cp.h
new file mode 100644
index 000000000000..fe3ac71750a8
--- /dev/null
+++ b/include/dt-bindings/clock/cirrus,cs2000-cp.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2021 Daniel Mack
+ */
+
+#ifndef __DT_BINDINGS_CS2000CP_CLK_H
+#define __DT_BINDINGS_CS2000CP_CLK_H
+
+#define CS2000CP_AUX_OUTPUT_REF_CLK 0
+#define CS2000CP_AUX_OUTPUT_CLK_IN 1
+#define CS2000CP_AUX_OUTPUT_CLK_OUT 2
+#define CS2000CP_AUX_OUTPUT_PLL_LOCK 3
+
+#endif /* __DT_BINDINGS_CS2000CP_CLK_H */
diff --git a/include/dt-bindings/clock/cirrus,ep9301-syscon.h b/include/dt-bindings/clock/cirrus,ep9301-syscon.h
new file mode 100644
index 000000000000..6bb8f532e7d0
--- /dev/null
+++ b/include/dt-bindings/clock/cirrus,ep9301-syscon.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+#ifndef DT_BINDINGS_CIRRUS_EP93XX_CLOCK_H
+#define DT_BINDINGS_CIRRUS_EP93XX_CLOCK_H
+
+#define EP93XX_CLK_PLL1 0
+#define EP93XX_CLK_PLL2 1
+
+#define EP93XX_CLK_FCLK 2
+#define EP93XX_CLK_HCLK 3
+#define EP93XX_CLK_PCLK 4
+
+#define EP93XX_CLK_UART 5
+#define EP93XX_CLK_SPI 6
+#define EP93XX_CLK_PWM 7
+#define EP93XX_CLK_USB 8
+
+#define EP93XX_CLK_M2M0 9
+#define EP93XX_CLK_M2M1 10
+
+#define EP93XX_CLK_M2P0 11
+#define EP93XX_CLK_M2P1 12
+#define EP93XX_CLK_M2P2 13
+#define EP93XX_CLK_M2P3 14
+#define EP93XX_CLK_M2P4 15
+#define EP93XX_CLK_M2P5 16
+#define EP93XX_CLK_M2P6 17
+#define EP93XX_CLK_M2P7 18
+#define EP93XX_CLK_M2P8 19
+#define EP93XX_CLK_M2P9 20
+
+#define EP93XX_CLK_UART1 21
+#define EP93XX_CLK_UART2 22
+#define EP93XX_CLK_UART3 23
+
+#define EP93XX_CLK_ADC 24
+#define EP93XX_CLK_ADC_EN 25
+
+#define EP93XX_CLK_KEYPAD 26
+
+#define EP93XX_CLK_VIDEO 27
+
+#define EP93XX_CLK_I2S_MCLK 28
+#define EP93XX_CLK_I2S_SCLK 29
+#define EP93XX_CLK_I2S_LRCLK 30
+
+#endif /* DT_BINDINGS_CIRRUS_EP93XX_CLOCK_H */
diff --git a/include/dt-bindings/clock/cix,sky1.h b/include/dt-bindings/clock/cix,sky1.h
new file mode 100644
index 000000000000..9245ebd1e80a
--- /dev/null
+++ b/include/dt-bindings/clock/cix,sky1.h
@@ -0,0 +1,279 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright 2024-2025 Cix Technology Group Co., Ltd.
+ */
+
+#ifndef _DT_BINDINGS_CLK_CIX_SKY1_H
+#define _DT_BINDINGS_CLK_CIX_SKY1_H
+
+#define CLK_TREE_CPU_GICxCLK 0
+#define CLK_TREE_CPU_PPUCLK 1
+#define CLK_TREE_CPU_PERIPHCLK 2
+#define CLK_TREE_DSU_CLK 3
+#define CLK_TREE_DSU_PCLK 4
+#define CLK_TREE_CPU_CLK_BC0 5
+#define CLK_TREE_CPU_CLK_BC1 6
+#define CLK_TREE_CPU_CLK_BC2 7
+#define CLK_TREE_CPU_CLK_BC3 8
+#define CLK_TREE_CPU_CLK_MC0 9
+#define CLK_TREE_CPU_CLK_MC1 10
+#define CLK_TREE_CPU_CLK_MC2 11
+#define CLK_TREE_CPU_CLK_MC3 12
+#define CLK_TREE_CPU_CLK_LC0 13
+#define CLK_TREE_CPU_CLK_LC1 14
+#define CLK_TREE_CPU_CLK_LC2 15
+#define CLK_TREE_CPU_CLK_LC3 16
+#define CLK_TREE_CSI_CTRL0_PCLK 17
+#define CLK_TREE_CSI_CTRL1_PCLK 18
+#define CLK_TREE_CSI_CTRL2_PCLK 19
+#define CLK_TREE_CSI_CTRL3_PCLK 20
+#define CLK_TREE_CSI_DMA0_PCLK 21
+#define CLK_TREE_CSI_DMA1_PCLK 22
+#define CLK_TREE_CSI_DMA2_PCLK 23
+#define CLK_TREE_CSI_DMA3_PCLK 24
+#define CLK_TREE_CSI_PHY0_PSM 25
+#define CLK_TREE_CSI_PHY1_PSM 26
+#define CLK_TREE_CSI_PHY0_APBCLK 27
+#define CLK_TREE_CSI_PHY1_APBCLK 28
+#define CLK_TREE_FCH_APB_CLK 29
+#define CLK_TREE_GPU_CLK_400M 30
+#define CLK_TREE_GPU_CLK_CORE 31
+#define CLK_TREE_GPU_CLK_STACKS 32
+#define CLK_TREE_DP0_PIXEL0 33
+#define CLK_TREE_DP0_PIXEL1 34
+#define CLK_TREE_DP1_PIXEL0 35
+#define CLK_TREE_DP1_PIXEL1 36
+#define CLK_TREE_DP2_PIXEL0 37
+#define CLK_TREE_DP2_PIXEL1 38
+#define CLK_TREE_DP3_PIXEL0 39
+#define CLK_TREE_DP3_PIXEL1 40
+#define CLK_TREE_DP4_PIXEL0 41
+#define CLK_TREE_DP4_PIXEL1 42
+#define CLK_TREE_DPU_CLK 43
+#define CLK_TREE_DPU0_ACLK 44
+#define CLK_TREE_DPU1_ACLK 45
+#define CLK_TREE_DPU2_ACLK 46
+#define CLK_TREE_DPU3_ACLK 47
+#define CLK_TREE_DPU4_ACLK 48
+#define CLK_TREE_DPC0_VIDCLK0 49
+#define CLK_TREE_DPC0_VIDCLK1 50
+#define CLK_TREE_DPC1_VIDCLK0 51
+#define CLK_TREE_DPC1_VIDCLK1 52
+#define CLK_TREE_DPC2_VIDCLK0 53
+#define CLK_TREE_DPC2_VIDCLK1 54
+#define CLK_TREE_DPC3_VIDCLK0 55
+#define CLK_TREE_DPC3_VIDCLK1 56
+#define CLK_TREE_DPC4_VIDCLK0 57
+#define CLK_TREE_DPC4_VIDCLK1 58
+#define CLK_TREE_DPC0_APBCLK 59
+#define CLK_TREE_DPC1_APBCLK 60
+#define CLK_TREE_DPC2_APBCLK 61
+#define CLK_TREE_DPC3_APBCLK 62
+#define CLK_TREE_DPC4_APBCLK 63
+#define CLK_TREE_NPU_MEMCLK 64
+#define CLK_TREE_NPU_SYSCLK 65
+#define CLK_TREE_NPU_DBGCLK 66
+#define CLK_TREE_VPU_APBCLK 67
+#define CLK_TREE_ISP_ACLK 68
+#define CLK_TREE_ISP_SCLK 69
+#define CLK_TREE_AUDIO_CLK4 70
+#define CLK_TREE_AUDIO_CLK5 71
+#define CLK_TREE_CAMERA_MCLK0 72
+#define CLK_TREE_CAMERA_MCLK1 73
+#define CLK_TREE_CAMERA_MCLK2 74
+#define CLK_TREE_CAMERA_MCLK3 75
+#define CLK_TREE_AUDIO_CLK0 76
+#define CLK_TREE_AUDIO_CLK1 77
+#define CLK_TREE_AUDIO_CLK2 78
+#define CLK_TREE_AUDIO_CLK3 79
+#define CLK_TREE_MM_NI700_CLK 80
+#define CLK_TREE_SYS_NI700_CLK 81
+#define CLK_TREE_GMAC0_ACLK 82
+#define CLK_TREE_GMAC1_ACLK 83
+#define CLK_TREE_GMAC0_DIV_ACLK 84
+#define CLK_TREE_GMAC0_DIV_TXCLK 85
+#define CLK_TREE_GMAC0_RGMII0_TXCLK 86
+#define CLK_TREE_GMAC1_DIV_ACLK 87
+#define CLK_TREE_GMAC1_DIV_TXCLK 88
+#define CLK_TREE_GMAC1_RGMII0_TXCLK 89
+#define CLK_TREE_GMAC0_PCLK 90
+#define CLK_TREE_GMAC1_PCLK 91
+#define CLK_TREE_USB2_0_AXI_GATE 92
+#define CLK_TREE_USB2_0_APB_GATE 93
+#define CLK_TREE_USB2_1_AXI_GATE 94
+#define CLK_TREE_USB2_1_APB_GATE 95
+#define CLK_TREE_USB2_2_AXI_GATE 96
+#define CLK_TREE_USB2_2_APB_GATE 97
+#define CLK_TREE_USB2_3_AXI_GATE 98
+#define CLK_TREE_USB2_3_APB_GATE 99
+#define CLK_TREE_USB2_0_PHY_GATE 100
+#define CLK_TREE_USB2_1_PHY_GATE 101
+#define CLK_TREE_USB2_2_PHY_GATE 102
+#define CLK_TREE_USB2_3_PHY_GATE 103
+#define CLK_TREE_USB3C_DRD_AXI_GATE 104
+#define CLK_TREE_USB3C_DRD_APB_GATE 105
+#define CLK_TREE_USB3C_DRD_PHY2_GATE 106
+#define CLK_TREE_USB3C_DRD_PHY3_GATE 107
+#define CLK_TREE_USB3C_0_AXI_GATE 108
+#define CLK_TREE_USB3C_0_APB_GATE 109
+#define CLK_TREE_USB3C_0_PHY2_GATE 110
+#define CLK_TREE_USB3C_0_PHY3_GATE 111
+#define CLK_TREE_USB3C_1_AXI_GATE 112
+#define CLK_TREE_USB3C_1_APB_GATE 113
+#define CLK_TREE_USB3C_1_PHY2_GATE 114
+#define CLK_TREE_USB3C_1_PHY3_GATE 115
+#define CLK_TREE_USB3C_2_AXI_GATE 116
+#define CLK_TREE_USB3C_2_APB_GATE 117
+#define CLK_TREE_USB3C_2_PHY2_GATE 118
+#define CLK_TREE_USB3C_2_PHY3_GATE 119
+#define CLK_TREE_USB3A_0_AXI_GATE 120
+#define CLK_TREE_USB3A_0_APB_GATE 121
+#define CLK_TREE_USB3A_0_PHY2_GATE 122
+#define CLK_TREE_USB3A_1_AXI_GATE 123
+#define CLK_TREE_USB3A_1_APB_GATE 124
+#define CLK_TREE_USB3A_1_PHY2_GATE 125
+#define CLK_TREE_USB3A_PHY3_GATE 126
+#define CLK_TREE_USB2_0_CLK_SOF 127
+#define CLK_TREE_USB2_1_CLK_SOF 128
+#define CLK_TREE_USB2_2_CLK_SOF 129
+#define CLK_TREE_USB2_3_CLK_SOF 130
+#define CLK_TREE_USB3C_DRD_CLK_SOF 131
+#define CLK_TREE_USB3C_H0_CLK_SOF 132
+#define CLK_TREE_USB3C_H1_CLK_SOF 133
+#define CLK_TREE_USB3C_H2_CLK_SOF 134
+#define CLK_TREE_USB3A_H0_CLK_SOF 135
+#define CLK_TREE_USB3A_H1_CLK_SOF 136
+#define CLK_TREE_USB2_0_CLK_LPM 137
+#define CLK_TREE_USB2_1_CLK_LPM 138
+#define CLK_TREE_USB2_2_CLK_LPM 139
+#define CLK_TREE_USB2_3_CLK_LPM 140
+#define CLK_TREE_USB3C_DRD_CLK_LPM 141
+#define CLK_TREE_USB3C_H0_CLK_LPM 142
+#define CLK_TREE_USB3C_H1_CLK_LPM 143
+#define CLK_TREE_USB3C_H2_CLK_LPM 144
+#define CLK_TREE_USB3A_H0_CLK_LPM 145
+#define CLK_TREE_USB3A_H1_CLK_LPM 146
+#define CLK_TREE_USB2_0_PHY_REF 147
+#define CLK_TREE_USB2_1_PHY_REF 148
+#define CLK_TREE_USB2_2_PHY_REF 149
+#define CLK_TREE_USB2_3_PHY_REF 150
+#define CLK_TREE_USB3C_DRD_PHY_REF 151
+#define CLK_TREE_USB3C_H0_PHY_REF 152
+#define CLK_TREE_USB3C_H1_PHY_REF 153
+#define CLK_TREE_USB3C_H2_PHY_REF 154
+#define CLK_TREE_USB3A_H0_PHY_REF 155
+#define CLK_TREE_USB3A_H1_PHY_REF 156
+#define CLK_TREE_USB3C_DRD_PHY_x4_REF 157
+#define CLK_TREE_USB3C_H0_PHY_x4_REF 158
+#define CLK_TREE_USB3C_H1_PHY_x4_REF 159
+#define CLK_TREE_USB3C_H2_PHY_x4_REF 160
+#define CLK_TREE_USB3A_PHY_x2_REF 161
+#define CLK_TREE_PCIE_X8CTRL_APB 162
+#define CLK_TREE_PCIE_X4CTRL_APB 163
+#define CLK_TREE_PCIE_X2CTRL_APB 164
+#define CLK_TREE_PCIE_X1_0CTRL_APB 165
+#define CLK_TREE_PCIE_X1_1CTRL_APB 166
+#define CLK_TREE_PCIE_X8_PHY_APB 167
+#define CLK_TREE_PCIE_X4_PHY_APB 168
+#define CLK_TREE_PCIE_X211_PHY_APB 169
+#define CLK_TREE_PCIE_NI700_CLK 170
+#define CLK_TREE_PCIE_CTRL0_CLK 171
+#define CLK_TREE_PCIE_CTRL1_CLK 172
+#define CLK_TREE_PCIE_CTRL2_CLK 173
+#define CLK_TREE_PCIE_CTRL3_CLK 174
+#define CLK_TREE_PCIE_CTRL4_CLK 175
+#define CLK_TREE_CSI_CTRL0_SYSCLK 176
+#define CLK_TREE_CSI_CTRL1_SYSCLK 177
+#define CLK_TREE_CSI_CTRL2_SYSCLK 178
+#define CLK_TREE_CSI_CTRL3_SYSCLK 179
+#define CLK_TREE_CSI_CTRL0_PIXEL0_CLK 180
+#define CLK_TREE_CSI_CTRL0_PIXEL1_CLK 181
+#define CLK_TREE_CSI_CTRL0_PIXEL2_CLK 182
+#define CLK_TREE_CSI_CTRL0_PIXEL3_CLK 183
+#define CLK_TREE_CSI_CTRL1_PIXEL0_CLK 184
+#define CLK_TREE_CSI_CTRL2_PIXEL0_CLK 185
+#define CLK_TREE_CSI_CTRL2_PIXEL1_CLK 186
+#define CLK_TREE_CSI_CTRL2_PIXEL2_CLK 187
+#define CLK_TREE_CSI_CTRL2_PIXEL3_CLK 188
+#define CLK_TREE_CSI_CTRL3_PIXEL0_CLK 189
+#define CLK_TREE_CI700_GCLK0 190
+#define CLK_TREE_DDRC0_ACLK_CLK 191
+#define CLK_TREE_DDRC1_ACLK_CLK 192
+#define CLK_TREE_DDRC2_ACLK_CLK 193
+#define CLK_TREE_DDRC3_ACLK_CLK 194
+#define CLK_TREE_DDRC0_DFICLK_CLK 195
+#define CLK_TREE_DDRC1_DFICLK_CLK 196
+#define CLK_TREE_DDRC2_DFICLK_CLK 197
+#define CLK_TREE_DDRC3_DFICLK_CLK 198
+#define CLK_TREE_PHY0_SYNC_CLK 199
+#define CLK_TREE_PHY1_SYNC_CLK 200
+#define CLK_TREE_PHY2_SYNC_CLK 201
+#define CLK_TREE_PHY3_SYNC_CLK 202
+#define CLK_TREE_PHY0_BYPASS_CLK 203
+#define CLK_TREE_PHY1_BYPASS_CLK 204
+#define CLK_TREE_PHY2_BYPASS_CLK 205
+#define CLK_TREE_PHY3_BYPASS_CLK 206
+#define CLK_TREE_DDRC_0_APB 207
+#define CLK_TREE_DDRC_1_APB 208
+#define CLK_TREE_DDRC_2_APB 209
+#define CLK_TREE_DDRC_3_APB 210
+#define CLK_TREE_TZC400_0_APB 211
+#define CLK_TREE_TZC400_1_APB 212
+#define CLK_TREE_TZC400_2_APB 213
+#define CLK_TREE_TZC400_3_APB 214
+#define CLK_TREE_S5_SENSOR_HUB_25M 215
+#define CLK_TREE_S5_SENSOR_HUB_400M 216
+#define CLK_TREE_S5_CSS600_100M 217
+#define CLK_TREE_S5_DFD_800M 218
+#define CLK_TREE_S5_CSU_SE_800M 219
+#define CLK_TREE_S5_CSU_PM_800M 220
+#define CLK_TREE_PCIE_REF_B0 221
+#define CLK_TREE_PCIE_REF_B1 222
+#define CLK_TREE_PCIE_REF_B2 223
+#define CLK_TREE_PCIE_REF_B3 224
+#define CLK_TREE_PCIE_REF_B4 225
+#define CLK_TREE_PCIE_REF_PHY_X8 226
+#define CLK_TREE_PCIE_REF_PHY_X4 227
+#define CLK_TREE_PCIE_REF_PHY_X211 228
+#define CLK_TREE_GMAC_REC_CLK 229
+#define CLK_TREE_GPUTOP_PLL 230
+#define CLK_TREE_GPUCORE_PLL 231
+#define CLK_TREE_CPU_PLL_LIT 232
+#define CLK_TREE_CPU_PLL0 233
+#define CLK_TREE_CPU_PLL1 234
+#define CLK_TREE_CPU_PLL2 235
+#define CLK_TREE_CPU_PLL3 236
+#define CLK_TREE_FCH_I3C0_FUNC 237
+#define CLK_TREE_FCH_I3C1_FUNC 238
+#define CLK_TREE_FCH_DMA_ACLK 239
+#define CLK_TREE_FCH_XSPI_FUNC 240
+#define CLK_TREE_FCH_XSPI_MACLK 241
+#define CLK_TREE_FCH_TIMER_FUN 242
+#define CLK_TREE_FCH_APB_IO_S0 243
+#define CLK_TREE_FCH_I3C0_APB 244
+#define CLK_TREE_FCH_I3C1_APB 245
+#define CLK_TREE_FCH_UART0_APB 246
+#define CLK_TREE_FCH_UART1_APB 247
+#define CLK_TREE_FCH_UART2_APB 248
+#define CLK_TREE_FCH_UART3_APB 249
+#define CLK_TREE_FCH_SPI0_APB 250
+#define CLK_TREE_FCH_SPI1_APB 251
+#define CLK_TREE_FCH_XSPI_APB 252
+#define CLK_TREE_FCH_I2C0_APB 253
+#define CLK_TREE_FCH_I2C1_APB 254
+#define CLK_TREE_FCH_I2C2_APB 255
+#define CLK_TREE_FCH_I2C3_APB 256
+#define CLK_TREE_FCH_I2C4_APB 257
+#define CLK_TREE_FCH_I2C5_APB 258
+#define CLK_TREE_FCH_I2C6_APB 259
+#define CLK_TREE_FCH_I2C7_APB 260
+#define CLK_TREE_FCH_TIMER_APB 261
+#define CLK_TREE_FCH_GPIO_APB 262
+#define CLK_TREE_FCH_UART0_FUNC 263
+#define CLK_TREE_FCH_UART1_FUNC 264
+#define CLK_TREE_FCH_UART2_FUNC 265
+#define CLK_TREE_FCH_UART3_FUNC 266
+/* 267~271 not used by AP, skip */
+#define CLK_TREE_GPU_CLK_200M 272
+
+#endif
diff --git a/include/dt-bindings/clock/dra7.h b/include/dt-bindings/clock/dra7.h
index 7d57063b8a65..8a903c78c5a5 100644
--- a/include/dt-bindings/clock/dra7.h
+++ b/include/dt-bindings/clock/dra7.h
@@ -8,181 +8,6 @@
#define DRA7_CLKCTRL_OFFSET 0x20
#define DRA7_CLKCTRL_INDEX(offset) ((offset) - DRA7_CLKCTRL_OFFSET)
-/* XXX: Compatibility part begin, remove this once compatibility support is no longer needed */
-
-/* mpu clocks */
-#define DRA7_MPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-
-/* ipu clocks */
-#define _DRA7_IPU_CLKCTRL_OFFSET 0x40
-#define _DRA7_IPU_CLKCTRL_INDEX(offset) ((offset) - _DRA7_IPU_CLKCTRL_OFFSET)
-#define DRA7_MCASP1_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x50)
-#define DRA7_TIMER5_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x58)
-#define DRA7_TIMER6_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x60)
-#define DRA7_TIMER7_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x68)
-#define DRA7_TIMER8_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x70)
-#define DRA7_I2C5_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x78)
-#define DRA7_UART6_CLKCTRL _DRA7_IPU_CLKCTRL_INDEX(0x80)
-
-/* rtc clocks */
-#define DRA7_RTC_CLKCTRL_OFFSET 0x40
-#define DRA7_RTC_CLKCTRL_INDEX(offset) ((offset) - DRA7_RTC_CLKCTRL_OFFSET)
-#define DRA7_RTCSS_CLKCTRL DRA7_RTC_CLKCTRL_INDEX(0x44)
-
-/* vip clocks */
-#define DRA7_VIP1_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-#define DRA7_VIP2_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
-#define DRA7_VIP3_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
-
-/* vpe clocks */
-#define DRA7_VPE_CLKCTRL_OFFSET 0x60
-#define DRA7_VPE_CLKCTRL_INDEX(offset) ((offset) - DRA7_VPE_CLKCTRL_OFFSET)
-#define DRA7_VPE_CLKCTRL DRA7_VPE_CLKCTRL_INDEX(0x64)
-
-/* coreaon clocks */
-#define DRA7_SMARTREFLEX_MPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
-#define DRA7_SMARTREFLEX_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x38)
-
-/* l3main1 clocks */
-#define DRA7_L3_MAIN_1_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-#define DRA7_GPMC_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
-#define DRA7_TPCC_CLKCTRL DRA7_CLKCTRL_INDEX(0x70)
-#define DRA7_TPTC0_CLKCTRL DRA7_CLKCTRL_INDEX(0x78)
-#define DRA7_TPTC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x80)
-#define DRA7_VCP1_CLKCTRL DRA7_CLKCTRL_INDEX(0x88)
-#define DRA7_VCP2_CLKCTRL DRA7_CLKCTRL_INDEX(0x90)
-
-/* dma clocks */
-#define DRA7_DMA_SYSTEM_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-
-/* emif clocks */
-#define DRA7_DMM_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-
-/* atl clocks */
-#define DRA7_ATL_CLKCTRL_OFFSET 0x0
-#define DRA7_ATL_CLKCTRL_INDEX(offset) ((offset) - DRA7_ATL_CLKCTRL_OFFSET)
-#define DRA7_ATL_CLKCTRL DRA7_ATL_CLKCTRL_INDEX(0x0)
-
-/* l4cfg clocks */
-#define DRA7_L4_CFG_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-#define DRA7_SPINLOCK_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
-#define DRA7_MAILBOX1_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
-#define DRA7_MAILBOX2_CLKCTRL DRA7_CLKCTRL_INDEX(0x48)
-#define DRA7_MAILBOX3_CLKCTRL DRA7_CLKCTRL_INDEX(0x50)
-#define DRA7_MAILBOX4_CLKCTRL DRA7_CLKCTRL_INDEX(0x58)
-#define DRA7_MAILBOX5_CLKCTRL DRA7_CLKCTRL_INDEX(0x60)
-#define DRA7_MAILBOX6_CLKCTRL DRA7_CLKCTRL_INDEX(0x68)
-#define DRA7_MAILBOX7_CLKCTRL DRA7_CLKCTRL_INDEX(0x70)
-#define DRA7_MAILBOX8_CLKCTRL DRA7_CLKCTRL_INDEX(0x78)
-#define DRA7_MAILBOX9_CLKCTRL DRA7_CLKCTRL_INDEX(0x80)
-#define DRA7_MAILBOX10_CLKCTRL DRA7_CLKCTRL_INDEX(0x88)
-#define DRA7_MAILBOX11_CLKCTRL DRA7_CLKCTRL_INDEX(0x90)
-#define DRA7_MAILBOX12_CLKCTRL DRA7_CLKCTRL_INDEX(0x98)
-#define DRA7_MAILBOX13_CLKCTRL DRA7_CLKCTRL_INDEX(0xa0)
-
-/* l3instr clocks */
-#define DRA7_L3_MAIN_2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-#define DRA7_L3_INSTR_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
-
-/* iva clocks */
-#define DRA7_IVA_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-#define DRA7_SL2IF_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
-
-/* dss clocks */
-#define DRA7_DSS_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-#define DRA7_BB2D_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
-
-/* gpu clocks */
-#define DRA7_GPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-
-/* l3init clocks */
-#define DRA7_MMC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
-#define DRA7_MMC2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
-#define DRA7_USB_OTG_SS2_CLKCTRL DRA7_CLKCTRL_INDEX(0x40)
-#define DRA7_USB_OTG_SS3_CLKCTRL DRA7_CLKCTRL_INDEX(0x48)
-#define DRA7_USB_OTG_SS4_CLKCTRL DRA7_CLKCTRL_INDEX(0x50)
-#define DRA7_SATA_CLKCTRL DRA7_CLKCTRL_INDEX(0x88)
-#define DRA7_PCIE1_CLKCTRL DRA7_CLKCTRL_INDEX(0xb0)
-#define DRA7_PCIE2_CLKCTRL DRA7_CLKCTRL_INDEX(0xb8)
-#define DRA7_GMAC_CLKCTRL DRA7_CLKCTRL_INDEX(0xd0)
-#define DRA7_OCP2SCP1_CLKCTRL DRA7_CLKCTRL_INDEX(0xe0)
-#define DRA7_OCP2SCP3_CLKCTRL DRA7_CLKCTRL_INDEX(0xe8)
-#define DRA7_USB_OTG_SS1_CLKCTRL DRA7_CLKCTRL_INDEX(0xf0)
-
-/* l4per clocks */
-#define _DRA7_L4PER_CLKCTRL_OFFSET 0x0
-#define _DRA7_L4PER_CLKCTRL_INDEX(offset) ((offset) - _DRA7_L4PER_CLKCTRL_OFFSET)
-#define DRA7_L4_PER2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xc)
-#define DRA7_L4_PER3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x14)
-#define DRA7_TIMER10_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x28)
-#define DRA7_TIMER11_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x30)
-#define DRA7_TIMER2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x38)
-#define DRA7_TIMER3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x40)
-#define DRA7_TIMER4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x48)
-#define DRA7_TIMER9_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x50)
-#define DRA7_ELM_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x58)
-#define DRA7_GPIO2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x60)
-#define DRA7_GPIO3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x68)
-#define DRA7_GPIO4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x70)
-#define DRA7_GPIO5_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x78)
-#define DRA7_GPIO6_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x80)
-#define DRA7_HDQ1W_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x88)
-#define DRA7_EPWMSS1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x90)
-#define DRA7_EPWMSS2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x98)
-#define DRA7_I2C1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xa0)
-#define DRA7_I2C2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xa8)
-#define DRA7_I2C3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xb0)
-#define DRA7_I2C4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xb8)
-#define DRA7_L4_PER1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xc0)
-#define DRA7_EPWMSS0_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xc4)
-#define DRA7_TIMER13_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xc8)
-#define DRA7_TIMER14_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xd0)
-#define DRA7_TIMER15_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xd8)
-#define DRA7_MCSPI1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xf0)
-#define DRA7_MCSPI2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0xf8)
-#define DRA7_MCSPI3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x100)
-#define DRA7_MCSPI4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x108)
-#define DRA7_GPIO7_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x110)
-#define DRA7_GPIO8_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x118)
-#define DRA7_MMC3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x120)
-#define DRA7_MMC4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x128)
-#define DRA7_TIMER16_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x130)
-#define DRA7_QSPI_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x138)
-#define DRA7_UART1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x140)
-#define DRA7_UART2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x148)
-#define DRA7_UART3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x150)
-#define DRA7_UART4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x158)
-#define DRA7_MCASP2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x160)
-#define DRA7_MCASP3_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x168)
-#define DRA7_UART5_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x170)
-#define DRA7_MCASP5_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x178)
-#define DRA7_MCASP8_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x190)
-#define DRA7_MCASP4_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x198)
-#define DRA7_AES1_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1a0)
-#define DRA7_AES2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1a8)
-#define DRA7_DES_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1b0)
-#define DRA7_RNG_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1c0)
-#define DRA7_SHAM_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1c8)
-#define DRA7_UART7_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1d0)
-#define DRA7_UART8_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1e0)
-#define DRA7_UART9_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1e8)
-#define DRA7_DCAN2_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x1f0)
-#define DRA7_MCASP6_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x204)
-#define DRA7_MCASP7_CLKCTRL _DRA7_L4PER_CLKCTRL_INDEX(0x208)
-
-/* wkupaon clocks */
-#define DRA7_L4_WKUP_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-#define DRA7_WD_TIMER2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
-#define DRA7_GPIO1_CLKCTRL DRA7_CLKCTRL_INDEX(0x38)
-#define DRA7_TIMER1_CLKCTRL DRA7_CLKCTRL_INDEX(0x40)
-#define DRA7_TIMER12_CLKCTRL DRA7_CLKCTRL_INDEX(0x48)
-#define DRA7_COUNTER_32K_CLKCTRL DRA7_CLKCTRL_INDEX(0x50)
-#define DRA7_UART10_CLKCTRL DRA7_CLKCTRL_INDEX(0x80)
-#define DRA7_DCAN1_CLKCTRL DRA7_CLKCTRL_INDEX(0x88)
-#define DRA7_ADC_CLKCTRL DRA7_CLKCTRL_INDEX(0xa0)
-
-/* XXX: Compatibility part end. */
-
/* mpu clocks */
#define DRA7_MPU_MPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
@@ -267,10 +92,17 @@
#define DRA7_L3INSTR_L3_MAIN_2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
#define DRA7_L3INSTR_L3_INSTR_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
+/* iva clocks */
+#define DRA7_IVA_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+#define DRA7_SL2IF_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
+
/* dss clocks */
#define DRA7_DSS_DSS_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
#define DRA7_DSS_BB2D_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
+/* gpu clocks */
+#define DRA7_GPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+
/* l3init clocks */
#define DRA7_L3INIT_MMC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
#define DRA7_L3INIT_MMC2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
diff --git a/include/dt-bindings/clock/efm32-cmu.h b/include/dt-bindings/clock/efm32-cmu.h
deleted file mode 100644
index 4b48d15fe194..000000000000
--- a/include/dt-bindings/clock/efm32-cmu.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DT_BINDINGS_CLOCK_EFM32_CMU_H
-#define __DT_BINDINGS_CLOCK_EFM32_CMU_H
-
-#define clk_HFXO 0
-#define clk_HFRCO 1
-#define clk_LFXO 2
-#define clk_LFRCO 3
-#define clk_ULFRCO 4
-#define clk_AUXHFRCO 5
-#define clk_HFCLKNODIV 6
-#define clk_HFCLK 7
-#define clk_HFPERCLK 8
-#define clk_HFCORECLK 9
-#define clk_LFACLK 10
-#define clk_LFBCLK 11
-#define clk_WDOGCLK 12
-#define clk_HFCORECLKDMA 13
-#define clk_HFCORECLKAES 14
-#define clk_HFCORECLKUSBC 15
-#define clk_HFCORECLKUSB 16
-#define clk_HFCORECLKLE 17
-#define clk_HFCORECLKEBI 18
-#define clk_HFPERCLKUSART0 19
-#define clk_HFPERCLKUSART1 20
-#define clk_HFPERCLKUSART2 21
-#define clk_HFPERCLKUART0 22
-#define clk_HFPERCLKUART1 23
-#define clk_HFPERCLKTIMER0 24
-#define clk_HFPERCLKTIMER1 25
-#define clk_HFPERCLKTIMER2 26
-#define clk_HFPERCLKTIMER3 27
-#define clk_HFPERCLKACMP0 28
-#define clk_HFPERCLKACMP1 29
-#define clk_HFPERCLKI2C0 30
-#define clk_HFPERCLKI2C1 31
-#define clk_HFPERCLKGPIO 32
-#define clk_HFPERCLKVCMP 33
-#define clk_HFPERCLKPRS 34
-#define clk_HFPERCLKADC0 35
-#define clk_HFPERCLKDAC0 36
-
-#endif /* __DT_BINDINGS_CLOCK_EFM32_CMU_H */
diff --git a/include/dt-bindings/clock/en7523-clk.h b/include/dt-bindings/clock/en7523-clk.h
new file mode 100644
index 000000000000..edfa64045f52
--- /dev/null
+++ b/include/dt-bindings/clock/en7523-clk.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_CLOCK_AIROHA_EN7523_H_
+#define _DT_BINDINGS_CLOCK_AIROHA_EN7523_H_
+
+#define EN7523_CLK_GSW 0
+#define EN7523_CLK_EMI 1
+#define EN7523_CLK_BUS 2
+#define EN7523_CLK_SLIC 3
+#define EN7523_CLK_SPI 4
+#define EN7523_CLK_NPU 5
+#define EN7523_CLK_CRYPTO 6
+#define EN7523_CLK_PCIE 7
+
+#define EN7581_CLK_EMMC 8
+
+#endif /* _DT_BINDINGS_CLOCK_AIROHA_EN7523_H_ */
diff --git a/include/dt-bindings/clock/exynos3250.h b/include/dt-bindings/clock/exynos3250.h
index fe8214017b46..cc7268151843 100644
--- a/include/dt-bindings/clock/exynos3250.h
+++ b/include/dt-bindings/clock/exynos3250.h
@@ -257,12 +257,6 @@
#define CLK_SCLK_MMC2 249
/*
- * Total number of clocks of main CMU.
- * NOTE: Must be equal to last clock ID increased by one.
- */
-#define CLK_NR_CLKS 250
-
-/*
* CMU DMC
*/
@@ -284,12 +278,6 @@
#define CLK_DIV_DMCD 20
/*
- * Total number of clocks of main CMU.
- * NOTE: Must be equal to last clock ID increased by one.
- */
-#define NR_CLKS_DMC 21
-
-/*
* CMU ISP
*/
@@ -344,10 +332,4 @@
#define CLK_ASYNCAXIM 46
#define CLK_SCLK_MPWM_ISP 47
-/*
- * Total number of clocks of CMU_ISP.
- * NOTE: Must be equal to last clock ID increased by one.
- */
-#define NR_CLKS_ISP 48
-
#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS3250_CLOCK_H */
diff --git a/include/dt-bindings/clock/exynos4.h b/include/dt-bindings/clock/exynos4.h
index 88ec3968b90a..4ebff79ed9e2 100644
--- a/include/dt-bindings/clock/exynos4.h
+++ b/include/dt-bindings/clock/exynos4.h
@@ -209,6 +209,7 @@
#define CLK_ACLK400_MCUISP 395 /* Exynos4x12 only */
#define CLK_MOUT_HDMI 396
#define CLK_MOUT_MIXER 397
+#define CLK_MOUT_VPLLSRC 398
/* gate clocks - ppmu */
#define CLK_PPMULEFT 400
@@ -236,9 +237,7 @@
#define CLK_DIV_C2C 458 /* Exynos4x12 only */
#define CLK_DIV_GDL 459
#define CLK_DIV_GDR 460
-
-/* must be greater than maximal clock id */
-#define CLK_NR_CLKS 461
+#define CLK_DIV_CORE2 461
/* Exynos4x12 ISP clocks */
#define CLK_ISP_FIMC_ISP 1
@@ -273,6 +272,4 @@
#define CLK_ISP_DIV_MCUISP0 29
#define CLK_ISP_DIV_MCUISP1 30
-#define CLK_NR_ISP_CLKS 31
-
#endif /* _DT_BINDINGS_CLOCK_EXYNOS_4_H */
diff --git a/include/dt-bindings/clock/exynos5250.h b/include/dt-bindings/clock/exynos5250.h
index e259cc01f22f..2337c028bbe1 100644
--- a/include/dt-bindings/clock/exynos5250.h
+++ b/include/dt-bindings/clock/exynos5250.h
@@ -19,6 +19,7 @@
#define CLK_FOUT_EPLL 7
#define CLK_FOUT_VPLL 8
#define CLK_ARM_CLK 9
+#define CLK_DIV_ARM2 10
/* gate for special clocks (sclk) */
#define CLK_SCLK_CAM_BAYER 128
@@ -174,8 +175,6 @@
#define CLK_MOUT_ACLK300_DISP1_SUB 1027
#define CLK_MOUT_APLL 1028
#define CLK_MOUT_MPLL 1029
-
-/* must be greater than maximal clock id */
-#define CLK_NR_CLKS 1030
+#define CLK_MOUT_VPLLSRC 1030
#endif /* _DT_BINDINGS_CLOCK_EXYNOS_5250_H */
diff --git a/include/dt-bindings/clock/exynos5260-clk.h b/include/dt-bindings/clock/exynos5260-clk.h
index 98a58cbd81b2..dfde40ea40f0 100644
--- a/include/dt-bindings/clock/exynos5260-clk.h
+++ b/include/dt-bindings/clock/exynos5260-clk.h
@@ -137,8 +137,6 @@
#define PHYCLK_USBHOST20_PHY_CLK48MOHCI 122
#define PHYCLK_USBDRD30_UDRD30_PIPE_PCLK 123
#define PHYCLK_USBDRD30_UDRD30_PHYCLOCK 124
-#define TOP_NR_CLK 125
-
/* List Of Clocks For CMU_EGL */
@@ -153,8 +151,6 @@
#define EGL_DOUT_ACLK_EGL 9
#define EGL_DOUT_EGL2 10
#define EGL_DOUT_EGL1 11
-#define EGL_NR_CLK 12
-
/* List Of Clocks For CMU_KFC */
@@ -168,8 +164,6 @@
#define KFC_DOUT_KFC_ATCLK 8
#define KFC_DOUT_KFC2 9
#define KFC_DOUT_KFC1 10
-#define KFC_NR_CLK 11
-
/* List Of Clocks For CMU_MIF */
@@ -200,8 +194,6 @@
#define MIF_CLK_INTMEM 25
#define MIF_SCLK_LPDDR3PHY_WRAP_U1 26
#define MIF_SCLK_LPDDR3PHY_WRAP_U0 27
-#define MIF_NR_CLK 28
-
/* List Of Clocks For CMU_G3D */
@@ -211,8 +203,6 @@
#define G3D_DOUT_ACLK_G3D 4
#define G3D_CLK_G3D_HPM 5
#define G3D_CLK_G3D 6
-#define G3D_NR_CLK 7
-
/* List Of Clocks For CMU_AUD */
@@ -231,8 +221,6 @@
#define AUD_SCLK_AUD_UART 13
#define AUD_SCLK_PCM 14
#define AUD_SCLK_I2S 15
-#define AUD_NR_CLK 16
-
/* List Of Clocks For CMU_MFC */
@@ -241,8 +229,6 @@
#define MFC_CLK_MFC 3
#define MFC_CLK_SMMU2_MFCM1 4
#define MFC_CLK_SMMU2_MFCM0 5
-#define MFC_NR_CLK 6
-
/* List Of Clocks For CMU_GSCL */
@@ -272,8 +258,6 @@
#define GSCL_CLK_SMMU3_MSCL1 24
#define GSCL_SCLK_CSIS1_WRAP 25
#define GSCL_SCLK_CSIS0_WRAP 26
-#define GSCL_NR_CLK 27
-
/* List Of Clocks For CMU_FSYS */
@@ -295,8 +279,6 @@
#define FSYS_CLK_SMMU_RTIC 16
#define FSYS_PHYCLK_USBDRD30 17
#define FSYS_PHYCLK_USBHOST20 18
-#define FSYS_NR_CLK 19
-
/* List Of Clocks For CMU_PERI */
@@ -366,8 +348,6 @@
#define PERI_SCLK_SPDIF 64
#define PERI_SCLK_I2S 65
#define PERI_SCLK_PCM1 66
-#define PERI_NR_CLK 67
-
/* List Of Clocks For CMU_DISP */
@@ -406,8 +386,6 @@
#define DISP_CLK_DP 33
#define DISP_SCLK_PIXEL 34
#define DISP_MOUT_HDMI_PHY_PIXEL_USER 35
-#define DISP_NR_CLK 36
-
/* List Of Clocks For CMU_G2D */
@@ -423,8 +401,6 @@
#define G2D_CLK_SMMU_SSS 10
#define G2D_CLK_SMMU_MDMA 11
#define G2D_CLK_SMMU3_G2D 12
-#define G2D_NR_CLK 13
-
/* List Of Clocks For CMU_ISP */
@@ -461,6 +437,5 @@
#define ISP_SCLK_SPI0_EXT 31
#define ISP_SCLK_SPI1_EXT 32
#define ISP_SCLK_UART_EXT 33
-#define ISP_NR_CLK 34
#endif
diff --git a/include/dt-bindings/clock/exynos5410.h b/include/dt-bindings/clock/exynos5410.h
index 86c2ad56c5ef..7a1a93f8df6c 100644
--- a/include/dt-bindings/clock/exynos5410.h
+++ b/include/dt-bindings/clock/exynos5410.h
@@ -61,6 +61,4 @@
#define CLK_USBD301 367
#define CLK_SSS 471
-#define CLK_NR_CLKS 512
-
#endif /* _DT_BINDINGS_CLOCK_EXYNOS_5410_H */
diff --git a/include/dt-bindings/clock/exynos5420.h b/include/dt-bindings/clock/exynos5420.h
index 9fffc6ceaadd..73e82527a9e9 100644
--- a/include/dt-bindings/clock/exynos5420.h
+++ b/include/dt-bindings/clock/exynos5420.h
@@ -271,7 +271,4 @@
#define CLK_DOUT_PCLK_DREX0 798
#define CLK_DOUT_PCLK_DREX1 799
-/* must be greater than maximal clock id */
-#define CLK_NR_CLKS 800
-
#endif /* _DT_BINDINGS_CLOCK_EXYNOS_5420_H */
diff --git a/include/dt-bindings/clock/exynos5433.h b/include/dt-bindings/clock/exynos5433.h
index 25ffa53573a5..d12c1a963fa1 100644
--- a/include/dt-bindings/clock/exynos5433.h
+++ b/include/dt-bindings/clock/exynos5433.h
@@ -188,8 +188,6 @@
#define CLK_SCLK_ISP_SPI0_CAM1 252
#define CLK_SCLK_HDMI_SPDIF_DISP 253
-#define TOP_NR_CLK 254
-
/* CMU_CPIF */
#define CLK_FOUT_MPHY_PLL 1
@@ -200,8 +198,6 @@
#define CLK_SCLK_MPHY_PLL 11
#define CLK_SCLK_UFS_MPHY 11
-#define CPIF_NR_CLK 12
-
/* CMU_MIF */
#define CLK_FOUT_MEM0_PLL 1
#define CLK_FOUT_MEM1_PLL 2
@@ -396,8 +392,6 @@
#define CLK_SCLK_BUS_PLL_APOLLO 199
#define CLK_SCLK_BUS_PLL_ATLAS 200
-#define MIF_NR_CLK 201
-
/* CMU_PERIC */
#define CLK_PCLK_SPI2 1
#define CLK_PCLK_SPI1 2
@@ -468,8 +462,6 @@
#define CLK_DIV_SCLK_SCI 70
#define CLK_DIV_SCLK_SC_IN 71
-#define PERIC_NR_CLK 72
-
/* CMU_PERIS */
#define CLK_PCLK_HPM_APBIF 1
#define CLK_PCLK_TMU1_APBIF 2
@@ -513,8 +505,6 @@
#define CLK_SCLK_ANTIRBK_CNT 40
#define CLK_SCLK_OTP_CON 41
-#define PERIS_NR_CLK 42
-
/* CMU_FSYS */
#define CLK_MOUT_ACLK_FSYS_200_USER 1
#define CLK_MOUT_SCLK_MMC2_USER 2
@@ -621,8 +611,6 @@
#define CLK_SCLK_USBDRD30 114
#define CLK_PCIE 115
-#define FSYS_NR_CLK 116
-
/* CMU_G2D */
#define CLK_MUX_ACLK_G2D_266_USER 1
#define CLK_MUX_ACLK_G2D_400_USER 2
@@ -653,8 +641,6 @@
#define CLK_PCLK_G2D 25
#define CLK_PCLK_SMMU_G2D 26
-#define G2D_NR_CLK 27
-
/* CMU_DISP */
#define CLK_FOUT_DISP_PLL 1
@@ -771,8 +757,6 @@
#define CLK_PHYCLK_MIPIDPHY0_BITCLKDIV8_PHY 114
#define CLK_PHYCLK_MIPIDPHY0_RXCLKESC0_PHY 115
-#define DISP_NR_CLK 116
-
/* CMU_AUD */
#define CLK_MOUT_AUD_PLL_USER 1
#define CLK_MOUT_SCLK_AUD_PCM 2
@@ -824,8 +808,6 @@
#define CLK_SCLK_I2S_BCLK 46
#define CLK_SCLK_AUD_I2S 47
-#define AUD_NR_CLK 48
-
/* CMU_BUS{0|1|2} */
#define CLK_DIV_PCLK_BUS_133 1
@@ -840,8 +822,6 @@
#define CLK_ACLK_BUS2BEND_400 9 /* Only CMU_BUS2 */
#define CLK_ACLK_BUS2RTND_400 10 /* Only CMU_BUS2 */
-#define BUSx_NR_CLK 11
-
/* CMU_G3D */
#define CLK_FOUT_G3D_PLL 1
@@ -865,8 +845,6 @@
#define CLK_PCLK_SYSREG_G3D 18
#define CLK_SCLK_HPM_G3D 19
-#define G3D_NR_CLK 20
-
/* CMU_GSCL */
#define CLK_MOUT_ACLK_GSCL_111_USER 1
#define CLK_MOUT_ACLK_GSCL_333_USER 2
@@ -898,8 +876,6 @@
#define CLK_PCLK_SMMU_GSCL1 27
#define CLK_PCLK_SMMU_GSCL2 28
-#define GSCL_NR_CLK 29
-
/* CMU_APOLLO */
#define CLK_FOUT_APOLLO_PLL 1
@@ -935,8 +911,6 @@
#define CLK_SCLK_HPM_APOLLO 29
#define CLK_SCLK_APOLLO 30
-#define APOLLO_NR_CLK 31
-
/* CMU_ATLAS */
#define CLK_FOUT_ATLAS_PLL 1
@@ -981,8 +955,6 @@
#define CLK_ATCLK 38
#define CLK_SCLK_ATLAS 39
-#define ATLAS_NR_CLK 40
-
/* CMU_MSCL */
#define CLK_MOUT_SCLK_JPEG_USER 1
#define CLK_MOUT_ACLK_MSCL_400_USER 2
@@ -1016,8 +988,6 @@
#define CLK_PCLK_SMMU_JPEG 28
#define CLK_SCLK_JPEG 29
-#define MSCL_NR_CLK 30
-
/* CMU_MFC */
#define CLK_MOUT_ACLK_MFC_400_USER 1
@@ -1040,8 +1010,6 @@
#define CLK_PCLK_SMMU_MFC_1 17
#define CLK_PCLK_SMMU_MFC_0 18
-#define MFC_NR_CLK 19
-
/* CMU_HEVC */
#define CLK_MOUT_ACLK_HEVC_400_USER 1
@@ -1064,8 +1032,6 @@
#define CLK_PCLK_SMMU_HEVC_1 17
#define CLK_PCLK_SMMU_HEVC_0 18
-#define HEVC_NR_CLK 19
-
/* CMU_ISP */
#define CLK_MOUT_ACLK_ISP_DIS_400_USER 1
#define CLK_MOUT_ACLK_ISP_400_USER 2
@@ -1147,8 +1113,6 @@
#define CLK_SCLK_PIXELASYNCS_ISPC 76
#define CLK_SCLK_PIXELASYNCM_ISPC 77
-#define ISP_NR_CLK 78
-
/* CMU_CAM0 */
#define CLK_PHYCLK_RXBYTEECLKHS0_S4_PHY 1
#define CLK_PHYCLK_RXBYTEECLKHS0_S2A_PHY 2
@@ -1285,8 +1249,6 @@
#define CLK_SCLK_PIXELASYNCM_LITE_C_INIT 132
#define CLK_SCLK_PIXELASYNCS_LITE_C_INIT 133
-#define CAM0_NR_CLK 134
-
/* CMU_CAM1 */
#define CLK_PHYCLK_RXBYTEECLKHS0_S2B 1
@@ -1404,12 +1366,8 @@
#define CLK_ATCLK_ISP 111
#define CLK_SCLK_ISP_CA5 112
-#define CAM1_NR_CLK 113
-
/* CMU_IMEM */
#define CLK_ACLK_SLIMSSS 2
#define CLK_PCLK_SLIMSSS 35
-#define IMEM_NR_CLK 36
-
#endif /* _DT_BINDINGS_CLOCK_EXYNOS5433_H */
diff --git a/include/dt-bindings/clock/exynos7885.h b/include/dt-bindings/clock/exynos7885.h
new file mode 100644
index 000000000000..cfede84b46b9
--- /dev/null
+++ b/include/dt-bindings/clock/exynos7885.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021 Dávid Virág
+ *
+ * Device Tree binding constants for Exynos7885 clock controller.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_EXYNOS_7885_H
+#define _DT_BINDINGS_CLOCK_EXYNOS_7885_H
+
+/* CMU_TOP */
+#define CLK_FOUT_SHARED0_PLL 1
+#define CLK_FOUT_SHARED1_PLL 2
+#define CLK_DOUT_SHARED0_DIV2 3
+#define CLK_DOUT_SHARED0_DIV3 4
+#define CLK_DOUT_SHARED0_DIV4 5
+#define CLK_DOUT_SHARED0_DIV5 6
+#define CLK_DOUT_SHARED1_DIV2 7
+#define CLK_DOUT_SHARED1_DIV3 8
+#define CLK_DOUT_SHARED1_DIV4 9
+#define CLK_MOUT_CORE_BUS 10
+#define CLK_MOUT_CORE_CCI 11
+#define CLK_MOUT_CORE_G3D 12
+#define CLK_DOUT_CORE_BUS 13
+#define CLK_DOUT_CORE_CCI 14
+#define CLK_DOUT_CORE_G3D 15
+#define CLK_GOUT_CORE_BUS 16
+#define CLK_GOUT_CORE_CCI 17
+#define CLK_GOUT_CORE_G3D 18
+#define CLK_MOUT_PERI_BUS 19
+#define CLK_MOUT_PERI_SPI0 20
+#define CLK_MOUT_PERI_SPI1 21
+#define CLK_MOUT_PERI_UART0 22
+#define CLK_MOUT_PERI_UART1 23
+#define CLK_MOUT_PERI_UART2 24
+#define CLK_MOUT_PERI_USI0 25
+#define CLK_MOUT_PERI_USI1 26
+#define CLK_MOUT_PERI_USI2 27
+#define CLK_DOUT_PERI_BUS 28
+#define CLK_DOUT_PERI_SPI0 29
+#define CLK_DOUT_PERI_SPI1 30
+#define CLK_DOUT_PERI_UART0 31
+#define CLK_DOUT_PERI_UART1 32
+#define CLK_DOUT_PERI_UART2 33
+#define CLK_DOUT_PERI_USI0 34
+#define CLK_DOUT_PERI_USI1 35
+#define CLK_DOUT_PERI_USI2 36
+#define CLK_GOUT_PERI_BUS 37
+#define CLK_GOUT_PERI_SPI0 38
+#define CLK_GOUT_PERI_SPI1 39
+#define CLK_GOUT_PERI_UART0 40
+#define CLK_GOUT_PERI_UART1 41
+#define CLK_GOUT_PERI_UART2 42
+#define CLK_GOUT_PERI_USI0 43
+#define CLK_GOUT_PERI_USI1 44
+#define CLK_GOUT_PERI_USI2 45
+#define CLK_MOUT_FSYS_BUS 46
+#define CLK_MOUT_FSYS_MMC_CARD 47
+#define CLK_MOUT_FSYS_MMC_EMBD 48
+#define CLK_MOUT_FSYS_MMC_SDIO 49
+#define CLK_MOUT_FSYS_USB30DRD 50
+#define CLK_DOUT_FSYS_BUS 51
+#define CLK_DOUT_FSYS_MMC_CARD 52
+#define CLK_DOUT_FSYS_MMC_EMBD 53
+#define CLK_DOUT_FSYS_MMC_SDIO 54
+#define CLK_DOUT_FSYS_USB30DRD 55
+#define CLK_GOUT_FSYS_BUS 56
+#define CLK_GOUT_FSYS_MMC_CARD 57
+#define CLK_GOUT_FSYS_MMC_EMBD 58
+#define CLK_GOUT_FSYS_MMC_SDIO 59
+#define CLK_GOUT_FSYS_USB30DRD 60
+#define CLK_MOUT_SHARED0_PLL 61
+#define CLK_MOUT_SHARED1_PLL 62
+
+/* CMU_CORE */
+#define CLK_MOUT_CORE_BUS_USER 1
+#define CLK_MOUT_CORE_CCI_USER 2
+#define CLK_MOUT_CORE_G3D_USER 3
+#define CLK_MOUT_CORE_GIC 4
+#define CLK_DOUT_CORE_BUSP 5
+#define CLK_GOUT_CCI_ACLK 6
+#define CLK_GOUT_GIC400_CLK 7
+#define CLK_GOUT_TREX_D_CORE_ACLK 8
+#define CLK_GOUT_TREX_D_CORE_GCLK 9
+#define CLK_GOUT_TREX_D_CORE_PCLK 10
+#define CLK_GOUT_TREX_P_CORE_ACLK_P_CORE 11
+#define CLK_GOUT_TREX_P_CORE_CCLK_P_CORE 12
+#define CLK_GOUT_TREX_P_CORE_PCLK 13
+#define CLK_GOUT_TREX_P_CORE_PCLK_P_CORE 14
+
+/* CMU_PERI */
+#define CLK_MOUT_PERI_BUS_USER 1
+#define CLK_MOUT_PERI_SPI0_USER 2
+#define CLK_MOUT_PERI_SPI1_USER 3
+#define CLK_MOUT_PERI_UART0_USER 4
+#define CLK_MOUT_PERI_UART1_USER 5
+#define CLK_MOUT_PERI_UART2_USER 6
+#define CLK_MOUT_PERI_USI0_USER 7
+#define CLK_MOUT_PERI_USI1_USER 8
+#define CLK_MOUT_PERI_USI2_USER 9
+#define CLK_GOUT_GPIO_TOP_PCLK 10
+#define CLK_GOUT_HSI2C0_PCLK 11
+#define CLK_GOUT_HSI2C1_PCLK 12
+#define CLK_GOUT_HSI2C2_PCLK 13
+#define CLK_GOUT_HSI2C3_PCLK 14
+#define CLK_GOUT_I2C0_PCLK 15
+#define CLK_GOUT_I2C1_PCLK 16
+#define CLK_GOUT_I2C2_PCLK 17
+#define CLK_GOUT_I2C3_PCLK 18
+#define CLK_GOUT_I2C4_PCLK 19
+#define CLK_GOUT_I2C5_PCLK 20
+#define CLK_GOUT_I2C6_PCLK 21
+#define CLK_GOUT_I2C7_PCLK 22
+#define CLK_GOUT_PWM_MOTOR_PCLK 23
+#define CLK_GOUT_SPI0_PCLK 24
+#define CLK_GOUT_SPI0_EXT_CLK 25
+#define CLK_GOUT_SPI1_PCLK 26
+#define CLK_GOUT_SPI1_EXT_CLK 27
+#define CLK_GOUT_UART0_EXT_UCLK 28
+#define CLK_GOUT_UART0_PCLK 29
+#define CLK_GOUT_UART1_EXT_UCLK 30
+#define CLK_GOUT_UART1_PCLK 31
+#define CLK_GOUT_UART2_EXT_UCLK 32
+#define CLK_GOUT_UART2_PCLK 33
+#define CLK_GOUT_USI0_PCLK 34
+#define CLK_GOUT_USI0_SCLK 35
+#define CLK_GOUT_USI1_PCLK 36
+#define CLK_GOUT_USI1_SCLK 37
+#define CLK_GOUT_USI2_PCLK 38
+#define CLK_GOUT_USI2_SCLK 39
+#define CLK_GOUT_MCT_PCLK 40
+#define CLK_GOUT_SYSREG_PERI_PCLK 41
+#define CLK_GOUT_WDT0_PCLK 42
+#define CLK_GOUT_WDT1_PCLK 43
+
+/* CMU_FSYS */
+#define CLK_MOUT_FSYS_BUS_USER 1
+#define CLK_MOUT_FSYS_MMC_CARD_USER 2
+#define CLK_MOUT_FSYS_MMC_EMBD_USER 3
+#define CLK_MOUT_FSYS_MMC_SDIO_USER 4
+#define CLK_GOUT_MMC_CARD_ACLK 5
+#define CLK_GOUT_MMC_CARD_SDCLKIN 6
+#define CLK_GOUT_MMC_EMBD_ACLK 7
+#define CLK_GOUT_MMC_EMBD_SDCLKIN 8
+#define CLK_GOUT_MMC_SDIO_ACLK 9
+#define CLK_GOUT_MMC_SDIO_SDCLKIN 10
+#define CLK_MOUT_FSYS_USB30DRD_USER 11
+#define CLK_MOUT_USB_PLL 12
+#define CLK_FOUT_USB_PLL 13
+#define CLK_FSYS_USB20PHY_CLKCORE 14
+#define CLK_FSYS_USB30DRD_ACLK_20PHYCTRL 15
+#define CLK_FSYS_USB30DRD_ACLK_30PHYCTRL_0 16
+#define CLK_FSYS_USB30DRD_ACLK_30PHYCTRL_1 17
+#define CLK_FSYS_USB30DRD_BUS_CLK_EARLY 18
+#define CLK_FSYS_USB30DRD_REF_CLK 19
+
+#endif /* _DT_BINDINGS_CLOCK_EXYNOS_7885_H */
diff --git a/include/dt-bindings/clock/exynos850.h b/include/dt-bindings/clock/exynos850.h
new file mode 100644
index 000000000000..80dacda57229
--- /dev/null
+++ b/include/dt-bindings/clock/exynos850.h
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2021 Linaro Ltd.
+ * Author: Sam Protsenko <semen.protsenko@linaro.org>
+ *
+ * Device Tree binding constants for Exynos850 clock controller.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_EXYNOS_850_H
+#define _DT_BINDINGS_CLOCK_EXYNOS_850_H
+
+/* CMU_TOP */
+#define CLK_FOUT_SHARED0_PLL 1
+#define CLK_FOUT_SHARED1_PLL 2
+#define CLK_FOUT_MMC_PLL 3
+#define CLK_MOUT_SHARED0_PLL 4
+#define CLK_MOUT_SHARED1_PLL 5
+#define CLK_MOUT_MMC_PLL 6
+#define CLK_MOUT_CORE_BUS 7
+#define CLK_MOUT_CORE_CCI 8
+#define CLK_MOUT_CORE_MMC_EMBD 9
+#define CLK_MOUT_CORE_SSS 10
+#define CLK_MOUT_DPU 11
+#define CLK_MOUT_HSI_BUS 12
+#define CLK_MOUT_HSI_MMC_CARD 13
+#define CLK_MOUT_HSI_USB20DRD 14
+#define CLK_MOUT_PERI_BUS 15
+#define CLK_MOUT_PERI_UART 16
+#define CLK_MOUT_PERI_IP 17
+#define CLK_DOUT_SHARED0_DIV3 18
+#define CLK_DOUT_SHARED0_DIV2 19
+#define CLK_DOUT_SHARED1_DIV3 20
+#define CLK_DOUT_SHARED1_DIV2 21
+#define CLK_DOUT_SHARED0_DIV4 22
+#define CLK_DOUT_SHARED1_DIV4 23
+#define CLK_DOUT_CORE_BUS 24
+#define CLK_DOUT_CORE_CCI 25
+#define CLK_DOUT_CORE_MMC_EMBD 26
+#define CLK_DOUT_CORE_SSS 27
+#define CLK_DOUT_DPU 28
+#define CLK_DOUT_HSI_BUS 29
+#define CLK_DOUT_HSI_MMC_CARD 30
+#define CLK_DOUT_HSI_USB20DRD 31
+#define CLK_DOUT_PERI_BUS 32
+#define CLK_DOUT_PERI_UART 33
+#define CLK_DOUT_PERI_IP 34
+#define CLK_GOUT_CORE_BUS 35
+#define CLK_GOUT_CORE_CCI 36
+#define CLK_GOUT_CORE_MMC_EMBD 37
+#define CLK_GOUT_CORE_SSS 38
+#define CLK_GOUT_DPU 39
+#define CLK_GOUT_HSI_BUS 40
+#define CLK_GOUT_HSI_MMC_CARD 41
+#define CLK_GOUT_HSI_USB20DRD 42
+#define CLK_GOUT_PERI_BUS 43
+#define CLK_GOUT_PERI_UART 44
+#define CLK_GOUT_PERI_IP 45
+#define CLK_MOUT_CLKCMU_APM_BUS 46
+#define CLK_DOUT_CLKCMU_APM_BUS 47
+#define CLK_GOUT_CLKCMU_APM_BUS 48
+#define CLK_MOUT_AUD 49
+#define CLK_GOUT_AUD 50
+#define CLK_DOUT_AUD 51
+#define CLK_MOUT_IS_BUS 52
+#define CLK_MOUT_IS_ITP 53
+#define CLK_MOUT_IS_VRA 54
+#define CLK_MOUT_IS_GDC 55
+#define CLK_GOUT_IS_BUS 56
+#define CLK_GOUT_IS_ITP 57
+#define CLK_GOUT_IS_VRA 58
+#define CLK_GOUT_IS_GDC 59
+#define CLK_DOUT_IS_BUS 60
+#define CLK_DOUT_IS_ITP 61
+#define CLK_DOUT_IS_VRA 62
+#define CLK_DOUT_IS_GDC 63
+#define CLK_MOUT_MFCMSCL_MFC 64
+#define CLK_MOUT_MFCMSCL_M2M 65
+#define CLK_MOUT_MFCMSCL_MCSC 66
+#define CLK_MOUT_MFCMSCL_JPEG 67
+#define CLK_GOUT_MFCMSCL_MFC 68
+#define CLK_GOUT_MFCMSCL_M2M 69
+#define CLK_GOUT_MFCMSCL_MCSC 70
+#define CLK_GOUT_MFCMSCL_JPEG 71
+#define CLK_DOUT_MFCMSCL_MFC 72
+#define CLK_DOUT_MFCMSCL_M2M 73
+#define CLK_DOUT_MFCMSCL_MCSC 74
+#define CLK_DOUT_MFCMSCL_JPEG 75
+#define CLK_MOUT_G3D_SWITCH 76
+#define CLK_GOUT_G3D_SWITCH 77
+#define CLK_DOUT_G3D_SWITCH 78
+#define CLK_MOUT_CPUCL0_DBG 79
+#define CLK_MOUT_CPUCL0_SWITCH 80
+#define CLK_GOUT_CPUCL0_DBG 81
+#define CLK_GOUT_CPUCL0_SWITCH 82
+#define CLK_DOUT_CPUCL0_DBG 83
+#define CLK_DOUT_CPUCL0_SWITCH 84
+#define CLK_MOUT_CPUCL1_DBG 85
+#define CLK_MOUT_CPUCL1_SWITCH 86
+#define CLK_GOUT_CPUCL1_DBG 87
+#define CLK_GOUT_CPUCL1_SWITCH 88
+#define CLK_DOUT_CPUCL1_DBG 89
+#define CLK_DOUT_CPUCL1_SWITCH 90
+
+/* CMU_APM */
+#define CLK_RCO_I3C_PMIC 1
+#define OSCCLK_RCO_APM 2
+#define CLK_RCO_APM__ALV 3
+#define CLK_DLL_DCO 4
+#define CLK_MOUT_APM_BUS_USER 5
+#define CLK_MOUT_RCO_APM_I3C_USER 6
+#define CLK_MOUT_RCO_APM_USER 7
+#define CLK_MOUT_DLL_USER 8
+#define CLK_MOUT_CLKCMU_CHUB_BUS 9
+#define CLK_MOUT_APM_BUS 10
+#define CLK_MOUT_APM_I3C 11
+#define CLK_DOUT_CLKCMU_CHUB_BUS 12
+#define CLK_DOUT_APM_BUS 13
+#define CLK_DOUT_APM_I3C 14
+#define CLK_GOUT_CLKCMU_CMGP_BUS 15
+#define CLK_GOUT_CLKCMU_CHUB_BUS 16
+#define CLK_GOUT_RTC_PCLK 17
+#define CLK_GOUT_TOP_RTC_PCLK 18
+#define CLK_GOUT_I3C_PCLK 19
+#define CLK_GOUT_I3C_SCLK 20
+#define CLK_GOUT_SPEEDY_PCLK 21
+#define CLK_GOUT_GPIO_ALIVE_PCLK 22
+#define CLK_GOUT_PMU_ALIVE_PCLK 23
+#define CLK_GOUT_SYSREG_APM_PCLK 24
+
+/* CMU_AUD */
+#define CLK_DOUT_AUD_AUDIF 1
+#define CLK_DOUT_AUD_BUSD 2
+#define CLK_DOUT_AUD_BUSP 3
+#define CLK_DOUT_AUD_CNT 4
+#define CLK_DOUT_AUD_CPU 5
+#define CLK_DOUT_AUD_CPU_ACLK 6
+#define CLK_DOUT_AUD_CPU_PCLKDBG 7
+#define CLK_DOUT_AUD_FM 8
+#define CLK_DOUT_AUD_FM_SPDY 9
+#define CLK_DOUT_AUD_MCLK 10
+#define CLK_DOUT_AUD_UAIF0 11
+#define CLK_DOUT_AUD_UAIF1 12
+#define CLK_DOUT_AUD_UAIF2 13
+#define CLK_DOUT_AUD_UAIF3 14
+#define CLK_DOUT_AUD_UAIF4 15
+#define CLK_DOUT_AUD_UAIF5 16
+#define CLK_DOUT_AUD_UAIF6 17
+#define CLK_FOUT_AUD_PLL 18
+#define CLK_GOUT_AUD_ABOX_ACLK 19
+#define CLK_GOUT_AUD_ASB_CCLK 20
+#define CLK_GOUT_AUD_CA32_CCLK 21
+#define CLK_GOUT_AUD_CNT_BCLK 22
+#define CLK_GOUT_AUD_CODEC_MCLK 23
+#define CLK_GOUT_AUD_DAP_CCLK 24
+#define CLK_GOUT_AUD_GPIO_PCLK 25
+#define CLK_GOUT_AUD_PPMU_ACLK 26
+#define CLK_GOUT_AUD_PPMU_PCLK 27
+#define CLK_GOUT_AUD_SPDY_BCLK 28
+#define CLK_GOUT_AUD_SYSMMU_CLK 29
+#define CLK_GOUT_AUD_SYSREG_PCLK 30
+#define CLK_GOUT_AUD_TZPC_PCLK 31
+#define CLK_GOUT_AUD_UAIF0_BCLK 32
+#define CLK_GOUT_AUD_UAIF1_BCLK 33
+#define CLK_GOUT_AUD_UAIF2_BCLK 34
+#define CLK_GOUT_AUD_UAIF3_BCLK 35
+#define CLK_GOUT_AUD_UAIF4_BCLK 36
+#define CLK_GOUT_AUD_UAIF5_BCLK 37
+#define CLK_GOUT_AUD_UAIF6_BCLK 38
+#define CLK_GOUT_AUD_WDT_PCLK 39
+#define CLK_MOUT_AUD_CPU 40
+#define CLK_MOUT_AUD_CPU_HCH 41
+#define CLK_MOUT_AUD_CPU_USER 42
+#define CLK_MOUT_AUD_FM 43
+#define CLK_MOUT_AUD_PLL 44
+#define CLK_MOUT_AUD_TICK_USB_USER 45
+#define CLK_MOUT_AUD_UAIF0 46
+#define CLK_MOUT_AUD_UAIF1 47
+#define CLK_MOUT_AUD_UAIF2 48
+#define CLK_MOUT_AUD_UAIF3 49
+#define CLK_MOUT_AUD_UAIF4 50
+#define CLK_MOUT_AUD_UAIF5 51
+#define CLK_MOUT_AUD_UAIF6 52
+#define IOCLK_AUDIOCDCLK0 53
+#define IOCLK_AUDIOCDCLK1 54
+#define IOCLK_AUDIOCDCLK2 55
+#define IOCLK_AUDIOCDCLK3 56
+#define IOCLK_AUDIOCDCLK4 57
+#define IOCLK_AUDIOCDCLK5 58
+#define IOCLK_AUDIOCDCLK6 59
+#define TICK_USB 60
+#define CLK_GOUT_AUD_CMU_AUD_PCLK 61
+
+/* CMU_CMGP */
+#define CLK_RCO_CMGP 1
+#define CLK_MOUT_CMGP_ADC 2
+#define CLK_MOUT_CMGP_USI0 3
+#define CLK_MOUT_CMGP_USI1 4
+#define CLK_DOUT_CMGP_ADC 5
+#define CLK_DOUT_CMGP_USI0 6
+#define CLK_DOUT_CMGP_USI1 7
+#define CLK_GOUT_CMGP_ADC_S0_PCLK 8
+#define CLK_GOUT_CMGP_ADC_S1_PCLK 9
+#define CLK_GOUT_CMGP_GPIO_PCLK 10
+#define CLK_GOUT_CMGP_USI0_IPCLK 11
+#define CLK_GOUT_CMGP_USI0_PCLK 12
+#define CLK_GOUT_CMGP_USI1_IPCLK 13
+#define CLK_GOUT_CMGP_USI1_PCLK 14
+#define CLK_GOUT_SYSREG_CMGP_PCLK 15
+
+/* CMU_CPUCL0 */
+#define CLK_FOUT_CPUCL0_PLL 1
+#define CLK_MOUT_PLL_CPUCL0 2
+#define CLK_MOUT_CPUCL0_SWITCH_USER 3
+#define CLK_MOUT_CPUCL0_DBG_USER 4
+#define CLK_MOUT_CPUCL0_PLL 5
+#define CLK_DOUT_CPUCL0_CPU 6
+#define CLK_DOUT_CPUCL0_CMUREF 7
+#define CLK_DOUT_CPUCL0_PCLK 8
+#define CLK_DOUT_CLUSTER0_ACLK 9
+#define CLK_DOUT_CLUSTER0_ATCLK 10
+#define CLK_DOUT_CLUSTER0_PCLKDBG 11
+#define CLK_DOUT_CLUSTER0_PERIPHCLK 12
+#define CLK_GOUT_CLUSTER0_ATCLK 13
+#define CLK_GOUT_CLUSTER0_PCLK 14
+#define CLK_GOUT_CLUSTER0_PERIPHCLK 15
+#define CLK_GOUT_CLUSTER0_SCLK 16
+#define CLK_GOUT_CPUCL0_CMU_CPUCL0_PCLK 17
+#define CLK_GOUT_CLUSTER0_CPU 18
+#define CLK_CLUSTER0_SCLK 19
+
+/* CMU_CPUCL1 */
+#define CLK_FOUT_CPUCL1_PLL 1
+#define CLK_MOUT_PLL_CPUCL1 2
+#define CLK_MOUT_CPUCL1_SWITCH_USER 3
+#define CLK_MOUT_CPUCL1_DBG_USER 4
+#define CLK_MOUT_CPUCL1_PLL 5
+#define CLK_DOUT_CPUCL1_CPU 6
+#define CLK_DOUT_CPUCL1_CMUREF 7
+#define CLK_DOUT_CPUCL1_PCLK 8
+#define CLK_DOUT_CLUSTER1_ACLK 9
+#define CLK_DOUT_CLUSTER1_ATCLK 10
+#define CLK_DOUT_CLUSTER1_PCLKDBG 11
+#define CLK_DOUT_CLUSTER1_PERIPHCLK 12
+#define CLK_GOUT_CLUSTER1_ATCLK 13
+#define CLK_GOUT_CLUSTER1_PCLK 14
+#define CLK_GOUT_CLUSTER1_PERIPHCLK 15
+#define CLK_GOUT_CLUSTER1_SCLK 16
+#define CLK_GOUT_CPUCL1_CMU_CPUCL1_PCLK 17
+#define CLK_GOUT_CLUSTER1_CPU 18
+#define CLK_CLUSTER1_SCLK 19
+
+/* CMU_G3D */
+#define CLK_FOUT_G3D_PLL 1
+#define CLK_MOUT_G3D_PLL 2
+#define CLK_MOUT_G3D_SWITCH_USER 3
+#define CLK_MOUT_G3D_BUSD 4
+#define CLK_DOUT_G3D_BUSP 5
+#define CLK_GOUT_G3D_CMU_G3D_PCLK 6
+#define CLK_GOUT_G3D_GPU_CLK 7
+#define CLK_GOUT_G3D_TZPC_PCLK 8
+#define CLK_GOUT_G3D_GRAY2BIN_CLK 9
+#define CLK_GOUT_G3D_BUSD_CLK 10
+#define CLK_GOUT_G3D_BUSP_CLK 11
+#define CLK_GOUT_G3D_SYSREG_PCLK 12
+
+/* CMU_HSI */
+#define CLK_MOUT_HSI_BUS_USER 1
+#define CLK_MOUT_HSI_MMC_CARD_USER 2
+#define CLK_MOUT_HSI_USB20DRD_USER 3
+#define CLK_MOUT_HSI_RTC 4
+#define CLK_GOUT_USB_RTC_CLK 5
+#define CLK_GOUT_USB_REF_CLK 6
+#define CLK_GOUT_USB_PHY_REF_CLK 7
+#define CLK_GOUT_USB_PHY_ACLK 8
+#define CLK_GOUT_USB_BUS_EARLY_CLK 9
+#define CLK_GOUT_GPIO_HSI_PCLK 10
+#define CLK_GOUT_MMC_CARD_ACLK 11
+#define CLK_GOUT_MMC_CARD_SDCLKIN 12
+#define CLK_GOUT_SYSREG_HSI_PCLK 13
+#define CLK_GOUT_HSI_PPMU_ACLK 14
+#define CLK_GOUT_HSI_PPMU_PCLK 15
+#define CLK_GOUT_HSI_CMU_HSI_PCLK 16
+
+/* CMU_IS */
+#define CLK_MOUT_IS_BUS_USER 1
+#define CLK_MOUT_IS_ITP_USER 2
+#define CLK_MOUT_IS_VRA_USER 3
+#define CLK_MOUT_IS_GDC_USER 4
+#define CLK_DOUT_IS_BUSP 5
+#define CLK_GOUT_IS_CMU_IS_PCLK 6
+#define CLK_GOUT_IS_CSIS0_ACLK 7
+#define CLK_GOUT_IS_CSIS1_ACLK 8
+#define CLK_GOUT_IS_CSIS2_ACLK 9
+#define CLK_GOUT_IS_TZPC_PCLK 10
+#define CLK_GOUT_IS_CSIS_DMA_CLK 11
+#define CLK_GOUT_IS_GDC_CLK 12
+#define CLK_GOUT_IS_IPP_CLK 13
+#define CLK_GOUT_IS_ITP_CLK 14
+#define CLK_GOUT_IS_MCSC_CLK 15
+#define CLK_GOUT_IS_VRA_CLK 16
+#define CLK_GOUT_IS_PPMU_IS0_ACLK 17
+#define CLK_GOUT_IS_PPMU_IS0_PCLK 18
+#define CLK_GOUT_IS_PPMU_IS1_ACLK 19
+#define CLK_GOUT_IS_PPMU_IS1_PCLK 20
+#define CLK_GOUT_IS_SYSMMU_IS0_CLK 21
+#define CLK_GOUT_IS_SYSMMU_IS1_CLK 22
+#define CLK_GOUT_IS_SYSREG_PCLK 23
+
+/* CMU_MFCMSCL */
+#define CLK_MOUT_MFCMSCL_MFC_USER 1
+#define CLK_MOUT_MFCMSCL_M2M_USER 2
+#define CLK_MOUT_MFCMSCL_MCSC_USER 3
+#define CLK_MOUT_MFCMSCL_JPEG_USER 4
+#define CLK_DOUT_MFCMSCL_BUSP 5
+#define CLK_GOUT_MFCMSCL_CMU_MFCMSCL_PCLK 6
+#define CLK_GOUT_MFCMSCL_TZPC_PCLK 7
+#define CLK_GOUT_MFCMSCL_JPEG_ACLK 8
+#define CLK_GOUT_MFCMSCL_M2M_ACLK 9
+#define CLK_GOUT_MFCMSCL_MCSC_CLK 10
+#define CLK_GOUT_MFCMSCL_MFC_ACLK 11
+#define CLK_GOUT_MFCMSCL_PPMU_ACLK 12
+#define CLK_GOUT_MFCMSCL_PPMU_PCLK 13
+#define CLK_GOUT_MFCMSCL_SYSMMU_CLK 14
+#define CLK_GOUT_MFCMSCL_SYSREG_PCLK 15
+
+/* CMU_PERI */
+#define CLK_MOUT_PERI_BUS_USER 1
+#define CLK_MOUT_PERI_UART_USER 2
+#define CLK_MOUT_PERI_HSI2C_USER 3
+#define CLK_MOUT_PERI_SPI_USER 4
+#define CLK_DOUT_PERI_HSI2C0 5
+#define CLK_DOUT_PERI_HSI2C1 6
+#define CLK_DOUT_PERI_HSI2C2 7
+#define CLK_DOUT_PERI_SPI0 8
+#define CLK_GOUT_PERI_HSI2C0 9
+#define CLK_GOUT_PERI_HSI2C1 10
+#define CLK_GOUT_PERI_HSI2C2 11
+#define CLK_GOUT_GPIO_PERI_PCLK 12
+#define CLK_GOUT_HSI2C0_IPCLK 13
+#define CLK_GOUT_HSI2C0_PCLK 14
+#define CLK_GOUT_HSI2C1_IPCLK 15
+#define CLK_GOUT_HSI2C1_PCLK 16
+#define CLK_GOUT_HSI2C2_IPCLK 17
+#define CLK_GOUT_HSI2C2_PCLK 18
+#define CLK_GOUT_I2C0_PCLK 19
+#define CLK_GOUT_I2C1_PCLK 20
+#define CLK_GOUT_I2C2_PCLK 21
+#define CLK_GOUT_I2C3_PCLK 22
+#define CLK_GOUT_I2C4_PCLK 23
+#define CLK_GOUT_I2C5_PCLK 24
+#define CLK_GOUT_I2C6_PCLK 25
+#define CLK_GOUT_MCT_PCLK 26
+#define CLK_GOUT_PWM_MOTOR_PCLK 27
+#define CLK_GOUT_SPI0_IPCLK 28
+#define CLK_GOUT_SPI0_PCLK 29
+#define CLK_GOUT_SYSREG_PERI_PCLK 30
+#define CLK_GOUT_UART_IPCLK 31
+#define CLK_GOUT_UART_PCLK 32
+#define CLK_GOUT_WDT0_PCLK 33
+#define CLK_GOUT_WDT1_PCLK 34
+#define CLK_GOUT_BUSIF_TMU_PCLK 35
+
+/* CMU_CORE */
+#define CLK_MOUT_CORE_BUS_USER 1
+#define CLK_MOUT_CORE_CCI_USER 2
+#define CLK_MOUT_CORE_MMC_EMBD_USER 3
+#define CLK_MOUT_CORE_SSS_USER 4
+#define CLK_MOUT_CORE_GIC 5
+#define CLK_DOUT_CORE_BUSP 6
+#define CLK_GOUT_CCI_ACLK 7
+#define CLK_GOUT_GIC_CLK 8
+#define CLK_GOUT_MMC_EMBD_ACLK 9
+#define CLK_GOUT_MMC_EMBD_SDCLKIN 10
+#define CLK_GOUT_SSS_ACLK 11
+#define CLK_GOUT_SSS_PCLK 12
+#define CLK_GOUT_GPIO_CORE_PCLK 13
+#define CLK_GOUT_SYSREG_CORE_PCLK 14
+#define CLK_GOUT_PDMA_CORE_ACLK 15
+#define CLK_GOUT_SPDMA_CORE_ACLK 16
+
+/* CMU_DPU */
+#define CLK_MOUT_DPU_USER 1
+#define CLK_DOUT_DPU_BUSP 2
+#define CLK_GOUT_DPU_CMU_DPU_PCLK 3
+#define CLK_GOUT_DPU_DECON0_ACLK 4
+#define CLK_GOUT_DPU_DMA_ACLK 5
+#define CLK_GOUT_DPU_DPP_ACLK 6
+#define CLK_GOUT_DPU_PPMU_ACLK 7
+#define CLK_GOUT_DPU_PPMU_PCLK 8
+#define CLK_GOUT_DPU_SMMU_CLK 9
+#define CLK_GOUT_DPU_SYSREG_PCLK 10
+#define DPU_NR_CLK 11
+
+#endif /* _DT_BINDINGS_CLOCK_EXYNOS_850_H */
diff --git a/include/dt-bindings/clock/fsd-clk.h b/include/dt-bindings/clock/fsd-clk.h
new file mode 100644
index 000000000000..58fdec8f4c2a
--- /dev/null
+++ b/include/dt-bindings/clock/fsd-clk.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017 - 2022: Samsung Electronics Co., Ltd.
+ * https://www.samsung.com
+ * Copyright (c) 2017-2022 Tesla, Inc.
+ * https://www.tesla.com
+ *
+ * The constants defined in this header are being used in dts
+ * and fsd platform driver.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_FSD_H
+#define _DT_BINDINGS_CLOCK_FSD_H
+
+/* CMU */
+#define DOUT_CMU_PLL_SHARED0_DIV4 1
+#define DOUT_CMU_PERIC_SHARED1DIV36 2
+#define DOUT_CMU_PERIC_SHARED0DIV3_TBUCLK 3
+#define DOUT_CMU_PERIC_SHARED0DIV20 4
+#define DOUT_CMU_PERIC_SHARED1DIV4_DMACLK 5
+#define DOUT_CMU_PLL_SHARED0_DIV6 6
+#define DOUT_CMU_FSYS0_SHARED1DIV4 7
+#define DOUT_CMU_FSYS0_SHARED0DIV4 8
+#define DOUT_CMU_FSYS1_SHARED0DIV8 9
+#define DOUT_CMU_FSYS1_SHARED0DIV4 10
+#define CMU_CPUCL_SWITCH_GATE 11
+#define DOUT_CMU_IMEM_TCUCLK 12
+#define DOUT_CMU_IMEM_ACLK 13
+#define DOUT_CMU_IMEM_DMACLK 14
+#define GAT_CMU_FSYS0_SHARED0DIV4 15
+
+/* PERIC */
+#define PERIC_SCLK_UART0 1
+#define PERIC_PCLK_UART0 2
+#define PERIC_SCLK_UART1 3
+#define PERIC_PCLK_UART1 4
+#define PERIC_DMA0_IPCLKPORT_ACLK 5
+#define PERIC_DMA1_IPCLKPORT_ACLK 6
+#define PERIC_PWM0_IPCLKPORT_I_PCLK_S0 7
+#define PERIC_PWM1_IPCLKPORT_I_PCLK_S0 8
+#define PERIC_PCLK_SPI0 9
+#define PERIC_SCLK_SPI0 10
+#define PERIC_PCLK_SPI1 11
+#define PERIC_SCLK_SPI1 12
+#define PERIC_PCLK_SPI2 13
+#define PERIC_SCLK_SPI2 14
+#define PERIC_PCLK_TDM0 15
+#define PERIC_PCLK_HSI2C0 16
+#define PERIC_PCLK_HSI2C1 17
+#define PERIC_PCLK_HSI2C2 18
+#define PERIC_PCLK_HSI2C3 19
+#define PERIC_PCLK_HSI2C4 20
+#define PERIC_PCLK_HSI2C5 21
+#define PERIC_PCLK_HSI2C6 22
+#define PERIC_PCLK_HSI2C7 23
+#define PERIC_MCAN0_IPCLKPORT_CCLK 24
+#define PERIC_MCAN0_IPCLKPORT_PCLK 25
+#define PERIC_MCAN1_IPCLKPORT_CCLK 26
+#define PERIC_MCAN1_IPCLKPORT_PCLK 27
+#define PERIC_MCAN2_IPCLKPORT_CCLK 28
+#define PERIC_MCAN2_IPCLKPORT_PCLK 29
+#define PERIC_MCAN3_IPCLKPORT_CCLK 30
+#define PERIC_MCAN3_IPCLKPORT_PCLK 31
+#define PERIC_PCLK_ADCIF 32
+#define PERIC_EQOS_TOP_IPCLKPORT_CLK_PTP_REF_I 33
+#define PERIC_EQOS_TOP_IPCLKPORT_ACLK_I 34
+#define PERIC_EQOS_TOP_IPCLKPORT_HCLK_I 35
+#define PERIC_EQOS_TOP_IPCLKPORT_RGMII_CLK_I 36
+#define PERIC_EQOS_TOP_IPCLKPORT_CLK_RX_I 37
+#define PERIC_BUS_D_PERIC_IPCLKPORT_EQOSCLK 38
+#define PERIC_BUS_P_PERIC_IPCLKPORT_EQOSCLK 39
+#define PERIC_HCLK_TDM0 40
+#define PERIC_PCLK_TDM1 41
+#define PERIC_HCLK_TDM1 42
+#define PERIC_EQOS_PHYRXCLK_MUX 43
+#define PERIC_EQOS_PHYRXCLK 44
+#define PERIC_DOUT_RGMII_CLK 45
+
+/* FSYS0 */
+#define UFS0_MPHY_REFCLK_IXTAL24 1
+#define UFS0_MPHY_REFCLK_IXTAL26 2
+#define UFS1_MPHY_REFCLK_IXTAL24 3
+#define UFS1_MPHY_REFCLK_IXTAL26 4
+#define UFS0_TOP0_HCLK_BUS 5
+#define UFS0_TOP0_ACLK 6
+#define UFS0_TOP0_CLK_UNIPRO 7
+#define UFS0_TOP0_FMP_CLK 8
+#define UFS1_TOP1_HCLK_BUS 9
+#define UFS1_TOP1_ACLK 10
+#define UFS1_TOP1_CLK_UNIPRO 11
+#define UFS1_TOP1_FMP_CLK 12
+#define PCIE_SUBCTRL_INST0_DBI_ACLK_SOC 13
+#define PCIE_SUBCTRL_INST0_AUX_CLK_SOC 14
+#define PCIE_SUBCTRL_INST0_MSTR_ACLK_SOC 15
+#define PCIE_SUBCTRL_INST0_SLV_ACLK_SOC 16
+#define FSYS0_EQOS_TOP0_IPCLKPORT_CLK_PTP_REF_I 17
+#define FSYS0_EQOS_TOP0_IPCLKPORT_ACLK_I 18
+#define FSYS0_EQOS_TOP0_IPCLKPORT_HCLK_I 19
+#define FSYS0_EQOS_TOP0_IPCLKPORT_RGMII_CLK_I 20
+#define FSYS0_EQOS_TOP0_IPCLKPORT_CLK_RX_I 21
+#define FSYS0_DOUT_FSYS0_PERIBUS_GRP 22
+
+/* FSYS1 */
+#define PCIE_LINK0_IPCLKPORT_DBI_ACLK 1
+#define PCIE_LINK0_IPCLKPORT_AUX_ACLK 2
+#define PCIE_LINK0_IPCLKPORT_MSTR_ACLK 3
+#define PCIE_LINK0_IPCLKPORT_SLV_ACLK 4
+#define PCIE_LINK1_IPCLKPORT_DBI_ACLK 5
+#define PCIE_LINK1_IPCLKPORT_AUX_ACLK 6
+#define PCIE_LINK1_IPCLKPORT_MSTR_ACLK 7
+#define PCIE_LINK1_IPCLKPORT_SLV_ACLK 8
+
+/* IMEM */
+#define IMEM_DMA0_IPCLKPORT_ACLK 1
+#define IMEM_DMA1_IPCLKPORT_ACLK 2
+#define IMEM_WDT0_IPCLKPORT_PCLK 3
+#define IMEM_WDT1_IPCLKPORT_PCLK 4
+#define IMEM_WDT2_IPCLKPORT_PCLK 5
+#define IMEM_MCT_PCLK 6
+#define IMEM_TMU_CPU0_IPCLKPORT_I_CLK_TS 7
+#define IMEM_TMU_CPU2_IPCLKPORT_I_CLK_TS 8
+#define IMEM_TMU_TOP_IPCLKPORT_I_CLK_TS 9
+#define IMEM_TMU_GPU_IPCLKPORT_I_CLK_TS 10
+#define IMEM_TMU_GT_IPCLKPORT_I_CLK_TS 11
+
+/* MFC */
+#define MFC_MFC_IPCLKPORT_ACLK 1
+
+/* CAM_CSI */
+#define CAM_CSI0_0_IPCLKPORT_I_ACLK 1
+#define CAM_CSI0_1_IPCLKPORT_I_ACLK 2
+#define CAM_CSI0_2_IPCLKPORT_I_ACLK 3
+#define CAM_CSI0_3_IPCLKPORT_I_ACLK 4
+#define CAM_CSI1_0_IPCLKPORT_I_ACLK 5
+#define CAM_CSI1_1_IPCLKPORT_I_ACLK 6
+#define CAM_CSI1_2_IPCLKPORT_I_ACLK 7
+#define CAM_CSI1_3_IPCLKPORT_I_ACLK 8
+#define CAM_CSI2_0_IPCLKPORT_I_ACLK 9
+#define CAM_CSI2_1_IPCLKPORT_I_ACLK 10
+#define CAM_CSI2_2_IPCLKPORT_I_ACLK 11
+#define CAM_CSI2_3_IPCLKPORT_I_ACLK 12
+#define CAM_CSI_PLL 13
+#define CAM_CSI0_0_IPCLKPORT_I_PCLK 14
+#define CAM_CSI0_1_IPCLKPORT_I_PCLK 15
+#define CAM_CSI0_2_IPCLKPORT_I_PCLK 16
+#define CAM_CSI0_3_IPCLKPORT_I_PCLK 17
+#define CAM_CSI1_0_IPCLKPORT_I_PCLK 18
+#define CAM_CSI1_1_IPCLKPORT_I_PCLK 19
+#define CAM_CSI1_2_IPCLKPORT_I_PCLK 20
+#define CAM_CSI1_3_IPCLKPORT_I_PCLK 21
+#define CAM_CSI2_0_IPCLKPORT_I_PCLK 22
+#define CAM_CSI2_1_IPCLKPORT_I_PCLK 23
+#define CAM_CSI2_2_IPCLKPORT_I_PCLK 24
+#define CAM_CSI2_3_IPCLKPORT_I_PCLK 25
+
+#endif /*_DT_BINDINGS_CLOCK_FSD_H */
diff --git a/include/dt-bindings/clock/g12a-aoclkc.h b/include/dt-bindings/clock/g12a-aoclkc.h
index e916e49ff288..8fe7712fb12d 100644
--- a/include/dt-bindings/clock/g12a-aoclkc.h
+++ b/include/dt-bindings/clock/g12a-aoclkc.h
@@ -26,10 +26,17 @@
#define CLKID_AO_M4_FCLK 13
#define CLKID_AO_M4_HCLK 14
#define CLKID_AO_CLK81 15
+#define CLKID_AO_SAR_ADC_DIV 17
#define CLKID_AO_SAR_ADC_SEL 16
#define CLKID_AO_SAR_ADC_CLK 18
#define CLKID_AO_CTS_OSCIN 19
+#define CLKID_AO_32K_PRE 20
+#define CLKID_AO_32K_DIV 21
+#define CLKID_AO_32K_SEL 22
#define CLKID_AO_32K 23
+#define CLKID_AO_CEC_PRE 24
+#define CLKID_AO_CEC_DIV 25
+#define CLKID_AO_CEC_SEL 26
#define CLKID_AO_CEC 27
#define CLKID_AO_CTS_RTC_OSCIN 28
diff --git a/include/dt-bindings/clock/g12a-clkc.h b/include/dt-bindings/clock/g12a-clkc.h
index a93b58c5e18e..fd09819da2ec 100644
--- a/include/dt-bindings/clock/g12a-clkc.h
+++ b/include/dt-bindings/clock/g12a-clkc.h
@@ -16,6 +16,8 @@
#define CLKID_FCLK_DIV5 5
#define CLKID_FCLK_DIV7 6
#define CLKID_GP0_PLL 7
+#define CLKID_MPEG_SEL 8
+#define CLKID_MPEG_DIV 9
#define CLKID_CLK81 10
#define CLKID_MPLL0 11
#define CLKID_MPLL1 12
@@ -69,7 +71,23 @@
#define CLKID_SD_EMMC_A_CLK0 60
#define CLKID_SD_EMMC_B_CLK0 61
#define CLKID_SD_EMMC_C_CLK0 62
+#define CLKID_SD_EMMC_A_CLK0_SEL 63
+#define CLKID_SD_EMMC_A_CLK0_DIV 64
+#define CLKID_SD_EMMC_B_CLK0_SEL 65
+#define CLKID_SD_EMMC_B_CLK0_DIV 66
+#define CLKID_SD_EMMC_C_CLK0_SEL 67
+#define CLKID_SD_EMMC_C_CLK0_DIV 68
+#define CLKID_MPLL0_DIV 69
+#define CLKID_MPLL1_DIV 70
+#define CLKID_MPLL2_DIV 71
+#define CLKID_MPLL3_DIV 72
+#define CLKID_MPLL_PREDIV 73
#define CLKID_HIFI_PLL 74
+#define CLKID_FCLK_DIV2_DIV 75
+#define CLKID_FCLK_DIV3_DIV 76
+#define CLKID_FCLK_DIV4_DIV 77
+#define CLKID_FCLK_DIV5_DIV 78
+#define CLKID_FCLK_DIV7_DIV 79
#define CLKID_VCLK2_VENCI0 80
#define CLKID_VCLK2_VENCI1 81
#define CLKID_VCLK2_VENCP0 82
@@ -90,26 +108,54 @@
#define CLKID_VCLK2_VENCL 97
#define CLKID_VCLK2_OTHER1 98
#define CLKID_FCLK_DIV2P5 99
+#define CLKID_FCLK_DIV2P5_DIV 100
+#define CLKID_FIXED_PLL_DCO 101
+#define CLKID_SYS_PLL_DCO 102
+#define CLKID_GP0_PLL_DCO 103
+#define CLKID_HIFI_PLL_DCO 104
#define CLKID_DMA 105
#define CLKID_EFUSE 106
#define CLKID_ROM_BOOT 107
#define CLKID_RESET_SEC 108
#define CLKID_SEC_AHB_APB3 109
#define CLKID_VPU_0_SEL 110
+#define CLKID_VPU_0_DIV 111
#define CLKID_VPU_0 112
#define CLKID_VPU_1_SEL 113
+#define CLKID_VPU_1_DIV 114
#define CLKID_VPU_1 115
#define CLKID_VPU 116
#define CLKID_VAPB_0_SEL 117
+#define CLKID_VAPB_0_DIV 118
#define CLKID_VAPB_0 119
#define CLKID_VAPB_1_SEL 120
+#define CLKID_VAPB_1_DIV 121
#define CLKID_VAPB_1 122
#define CLKID_VAPB_SEL 123
#define CLKID_VAPB 124
+#define CLKID_HDMI_PLL_DCO 125
+#define CLKID_HDMI_PLL_OD 126
+#define CLKID_HDMI_PLL_OD2 127
#define CLKID_HDMI_PLL 128
#define CLKID_VID_PLL 129
+#define CLKID_VID_PLL_SEL 130
+#define CLKID_VID_PLL_DIV 131
+#define CLKID_VCLK_SEL 132
+#define CLKID_VCLK2_SEL 133
+#define CLKID_VCLK_INPUT 134
+#define CLKID_VCLK2_INPUT 135
+#define CLKID_VCLK_DIV 136
+#define CLKID_VCLK2_DIV 137
#define CLKID_VCLK 138
#define CLKID_VCLK2 139
+#define CLKID_VCLK_DIV2_EN 140
+#define CLKID_VCLK_DIV4_EN 141
+#define CLKID_VCLK_DIV6_EN 142
+#define CLKID_VCLK_DIV12_EN 143
+#define CLKID_VCLK2_DIV2_EN 144
+#define CLKID_VCLK2_DIV4_EN 145
+#define CLKID_VCLK2_DIV6_EN 146
+#define CLKID_VCLK2_DIV12_EN 147
#define CLKID_VCLK_DIV1 148
#define CLKID_VCLK_DIV2 149
#define CLKID_VCLK_DIV4 150
@@ -120,34 +166,126 @@
#define CLKID_VCLK2_DIV4 155
#define CLKID_VCLK2_DIV6 156
#define CLKID_VCLK2_DIV12 157
+#define CLKID_CTS_ENCI_SEL 158
+#define CLKID_CTS_ENCP_SEL 159
+#define CLKID_CTS_VDAC_SEL 160
+#define CLKID_HDMI_TX_SEL 161
#define CLKID_CTS_ENCI 162
#define CLKID_CTS_ENCP 163
#define CLKID_CTS_VDAC 164
#define CLKID_HDMI_TX 165
+#define CLKID_HDMI_SEL 166
+#define CLKID_HDMI_DIV 167
#define CLKID_HDMI 168
#define CLKID_MALI_0_SEL 169
+#define CLKID_MALI_0_DIV 170
#define CLKID_MALI_0 171
#define CLKID_MALI_1_SEL 172
+#define CLKID_MALI_1_DIV 173
#define CLKID_MALI_1 174
#define CLKID_MALI 175
+#define CLKID_MPLL_50M_DIV 176
#define CLKID_MPLL_50M 177
+#define CLKID_SYS_PLL_DIV16_EN 178
+#define CLKID_SYS_PLL_DIV16 179
+#define CLKID_CPU_CLK_DYN0_SEL 180
+#define CLKID_CPU_CLK_DYN0_DIV 181
+#define CLKID_CPU_CLK_DYN0 182
+#define CLKID_CPU_CLK_DYN1_SEL 183
+#define CLKID_CPU_CLK_DYN1_DIV 184
+#define CLKID_CPU_CLK_DYN1 185
+#define CLKID_CPU_CLK_DYN 186
#define CLKID_CPU_CLK 187
+#define CLKID_CPU_CLK_DIV16_EN 188
+#define CLKID_CPU_CLK_DIV16 189
+#define CLKID_CPU_CLK_APB_DIV 190
+#define CLKID_CPU_CLK_APB 191
+#define CLKID_CPU_CLK_ATB_DIV 192
+#define CLKID_CPU_CLK_ATB 193
+#define CLKID_CPU_CLK_AXI_DIV 194
+#define CLKID_CPU_CLK_AXI 195
+#define CLKID_CPU_CLK_TRACE_DIV 196
+#define CLKID_CPU_CLK_TRACE 197
+#define CLKID_PCIE_PLL_DCO 198
+#define CLKID_PCIE_PLL_DCO_DIV2 199
+#define CLKID_PCIE_PLL_OD 200
#define CLKID_PCIE_PLL 201
+#define CLKID_VDEC_1_SEL 202
+#define CLKID_VDEC_1_DIV 203
#define CLKID_VDEC_1 204
+#define CLKID_VDEC_HEVC_SEL 205
+#define CLKID_VDEC_HEVC_DIV 206
#define CLKID_VDEC_HEVC 207
+#define CLKID_VDEC_HEVCF_SEL 208
+#define CLKID_VDEC_HEVCF_DIV 209
#define CLKID_VDEC_HEVCF 210
+#define CLKID_TS_DIV 211
#define CLKID_TS 212
+#define CLKID_SYS1_PLL_DCO 213
+#define CLKID_SYS1_PLL 214
+#define CLKID_SYS1_PLL_DIV16_EN 215
+#define CLKID_SYS1_PLL_DIV16 216
+#define CLKID_CPUB_CLK_DYN0_SEL 217
+#define CLKID_CPUB_CLK_DYN0_DIV 218
+#define CLKID_CPUB_CLK_DYN0 219
+#define CLKID_CPUB_CLK_DYN1_SEL 220
+#define CLKID_CPUB_CLK_DYN1_DIV 221
+#define CLKID_CPUB_CLK_DYN1 222
+#define CLKID_CPUB_CLK_DYN 223
#define CLKID_CPUB_CLK 224
+#define CLKID_CPUB_CLK_DIV16_EN 225
+#define CLKID_CPUB_CLK_DIV16 226
+#define CLKID_CPUB_CLK_DIV2 227
+#define CLKID_CPUB_CLK_DIV3 228
+#define CLKID_CPUB_CLK_DIV4 229
+#define CLKID_CPUB_CLK_DIV5 230
+#define CLKID_CPUB_CLK_DIV6 231
+#define CLKID_CPUB_CLK_DIV7 232
+#define CLKID_CPUB_CLK_DIV8 233
+#define CLKID_CPUB_CLK_APB_SEL 234
+#define CLKID_CPUB_CLK_APB 235
+#define CLKID_CPUB_CLK_ATB_SEL 236
+#define CLKID_CPUB_CLK_ATB 237
+#define CLKID_CPUB_CLK_AXI_SEL 238
+#define CLKID_CPUB_CLK_AXI 239
+#define CLKID_CPUB_CLK_TRACE_SEL 240
+#define CLKID_CPUB_CLK_TRACE 241
+#define CLKID_GP1_PLL_DCO 242
#define CLKID_GP1_PLL 243
+#define CLKID_DSU_CLK_DYN0_SEL 244
+#define CLKID_DSU_CLK_DYN0_DIV 245
+#define CLKID_DSU_CLK_DYN0 246
+#define CLKID_DSU_CLK_DYN1_SEL 247
+#define CLKID_DSU_CLK_DYN1_DIV 248
+#define CLKID_DSU_CLK_DYN1 249
+#define CLKID_DSU_CLK_DYN 250
+#define CLKID_DSU_CLK_FINAL 251
#define CLKID_DSU_CLK 252
#define CLKID_CPU1_CLK 253
#define CLKID_CPU2_CLK 254
#define CLKID_CPU3_CLK 255
+#define CLKID_SPICC0_SCLK_SEL 256
+#define CLKID_SPICC0_SCLK_DIV 257
#define CLKID_SPICC0_SCLK 258
+#define CLKID_SPICC1_SCLK_SEL 259
+#define CLKID_SPICC1_SCLK_DIV 260
#define CLKID_SPICC1_SCLK 261
+#define CLKID_NNA_AXI_CLK_SEL 262
+#define CLKID_NNA_AXI_CLK_DIV 263
#define CLKID_NNA_AXI_CLK 264
+#define CLKID_NNA_CORE_CLK_SEL 265
+#define CLKID_NNA_CORE_CLK_DIV 266
#define CLKID_NNA_CORE_CLK 267
+#define CLKID_MIPI_DSI_PXCLK_DIV 268
#define CLKID_MIPI_DSI_PXCLK_SEL 269
#define CLKID_MIPI_DSI_PXCLK 270
+#define CLKID_CTS_ENCL 271
+#define CLKID_CTS_ENCL_SEL 272
+#define CLKID_MIPI_ISP_DIV 273
+#define CLKID_MIPI_ISP_SEL 274
+#define CLKID_MIPI_ISP 275
+#define CLKID_MIPI_ISP_GATE 276
+#define CLKID_MIPI_ISP_CSI_PHY0 277
+#define CLKID_MIPI_ISP_CSI_PHY1 278
#endif /* __G12A_CLKC_H */
diff --git a/include/dt-bindings/clock/google,gs101-acpm.h b/include/dt-bindings/clock/google,gs101-acpm.h
new file mode 100644
index 000000000000..e2ba89e09fa6
--- /dev/null
+++ b/include/dt-bindings/clock/google,gs101-acpm.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright 2025 Linaro Ltd.
+ *
+ * Device Tree binding constants for Google gs101 ACPM clock controller.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_GOOGLE_GS101_ACPM_H
+#define _DT_BINDINGS_CLOCK_GOOGLE_GS101_ACPM_H
+
+#define GS101_CLK_ACPM_DVFS_MIF 0
+#define GS101_CLK_ACPM_DVFS_INT 1
+#define GS101_CLK_ACPM_DVFS_CPUCL0 2
+#define GS101_CLK_ACPM_DVFS_CPUCL1 3
+#define GS101_CLK_ACPM_DVFS_CPUCL2 4
+#define GS101_CLK_ACPM_DVFS_G3D 5
+#define GS101_CLK_ACPM_DVFS_G3DL2 6
+#define GS101_CLK_ACPM_DVFS_TPU 7
+#define GS101_CLK_ACPM_DVFS_INTCAM 8
+#define GS101_CLK_ACPM_DVFS_TNR 9
+#define GS101_CLK_ACPM_DVFS_CAM 10
+#define GS101_CLK_ACPM_DVFS_MFC 11
+#define GS101_CLK_ACPM_DVFS_DISP 12
+#define GS101_CLK_ACPM_DVFS_BO 13
+
+#endif /* _DT_BINDINGS_CLOCK_GOOGLE_GS101_ACPM_H */
diff --git a/include/dt-bindings/clock/google,gs101.h b/include/dt-bindings/clock/google,gs101.h
new file mode 100644
index 000000000000..442f9e9037dc
--- /dev/null
+++ b/include/dt-bindings/clock/google,gs101.h
@@ -0,0 +1,637 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2023 Linaro Ltd.
+ * Author: Peter Griffin <peter.griffin@linaro.org>
+ *
+ * Device Tree binding constants for Google gs101 clock controller.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_GOOGLE_GS101_H
+#define _DT_BINDINGS_CLOCK_GOOGLE_GS101_H
+
+/* CMU_TOP PLL */
+#define CLK_FOUT_SHARED0_PLL 1
+#define CLK_FOUT_SHARED1_PLL 2
+#define CLK_FOUT_SHARED2_PLL 3
+#define CLK_FOUT_SHARED3_PLL 4
+#define CLK_FOUT_SPARE_PLL 5
+
+/* CMU_TOP MUX */
+#define CLK_MOUT_PLL_SHARED0 6
+#define CLK_MOUT_PLL_SHARED1 7
+#define CLK_MOUT_PLL_SHARED2 8
+#define CLK_MOUT_PLL_SHARED3 9
+#define CLK_MOUT_PLL_SPARE 10
+#define CLK_MOUT_CMU_BO_BUS 11
+#define CLK_MOUT_CMU_BUS0_BUS 12
+#define CLK_MOUT_CMU_BUS1_BUS 13
+#define CLK_MOUT_CMU_BUS2_BUS 14
+#define CLK_MOUT_CMU_CIS_CLK0 15
+#define CLK_MOUT_CMU_CIS_CLK1 16
+#define CLK_MOUT_CMU_CIS_CLK2 17
+#define CLK_MOUT_CMU_CIS_CLK3 18
+#define CLK_MOUT_CMU_CIS_CLK4 19
+#define CLK_MOUT_CMU_CIS_CLK5 20
+#define CLK_MOUT_CMU_CIS_CLK6 21
+#define CLK_MOUT_CMU_CIS_CLK7 22
+#define CLK_MOUT_CMU_CMU_BOOST 23
+#define CLK_MOUT_CMU_BOOST_OPTION1 24
+#define CLK_MOUT_CMU_CORE_BUS 25
+#define CLK_MOUT_CMU_CPUCL0_DBG 26
+#define CLK_MOUT_CMU_CPUCL0_SWITCH 27
+#define CLK_MOUT_CMU_CPUCL1_SWITCH 28
+#define CLK_MOUT_CMU_CPUCL2_SWITCH 29
+#define CLK_MOUT_CMU_CSIS_BUS 30
+#define CLK_MOUT_CMU_DISP_BUS 31
+#define CLK_MOUT_CMU_DNS_BUS 32
+#define CLK_MOUT_CMU_DPU_BUS 33
+#define CLK_MOUT_CMU_EH_BUS 34
+#define CLK_MOUT_CMU_G2D_G2D 35
+#define CLK_MOUT_CMU_G2D_MSCL 36
+#define CLK_MOUT_CMU_G3AA_G3AA 37
+#define CLK_MOUT_CMU_G3D_BUSD 38
+#define CLK_MOUT_CMU_G3D_GLB 39
+#define CLK_MOUT_CMU_G3D_SWITCH 40
+#define CLK_MOUT_CMU_GDC_GDC0 41
+#define CLK_MOUT_CMU_GDC_GDC1 42
+#define CLK_MOUT_CMU_GDC_SCSC 43
+#define CLK_MOUT_CMU_HPM 44
+#define CLK_MOUT_CMU_HSI0_BUS 45
+#define CLK_MOUT_CMU_HSI0_DPGTC 46
+#define CLK_MOUT_CMU_HSI0_USB31DRD 47
+#define CLK_MOUT_CMU_HSI0_USBDPDBG 48
+#define CLK_MOUT_CMU_HSI1_BUS 49
+#define CLK_MOUT_CMU_HSI1_PCIE 50
+#define CLK_MOUT_CMU_HSI2_BUS 51
+#define CLK_MOUT_CMU_HSI2_MMC_CARD 52
+#define CLK_MOUT_CMU_HSI2_PCIE 53
+#define CLK_MOUT_CMU_HSI2_UFS_EMBD 54
+#define CLK_MOUT_CMU_IPP_BUS 55
+#define CLK_MOUT_CMU_ITP_BUS 56
+#define CLK_MOUT_CMU_MCSC_ITSC 57
+#define CLK_MOUT_CMU_MCSC_MCSC 58
+#define CLK_MOUT_CMU_MFC_MFC 59
+#define CLK_MOUT_CMU_MIF_BUSP 60
+#define CLK_MOUT_CMU_MIF_SWITCH 61
+#define CLK_MOUT_CMU_MISC_BUS 62
+#define CLK_MOUT_CMU_MISC_SSS 63
+#define CLK_MOUT_CMU_PDP_BUS 64
+#define CLK_MOUT_CMU_PDP_VRA 65
+#define CLK_MOUT_CMU_PERIC0_BUS 66
+#define CLK_MOUT_CMU_PERIC0_IP 67
+#define CLK_MOUT_CMU_PERIC1_BUS 68
+#define CLK_MOUT_CMU_PERIC1_IP 69
+#define CLK_MOUT_CMU_TNR_BUS 70
+#define CLK_MOUT_CMU_TOP_BOOST_OPTION1 71
+#define CLK_MOUT_CMU_TOP_CMUREF 72
+#define CLK_MOUT_CMU_TPU_BUS 73
+#define CLK_MOUT_CMU_TPU_TPU 74
+#define CLK_MOUT_CMU_TPU_TPUCTL 75
+#define CLK_MOUT_CMU_TPU_UART 76
+#define CLK_MOUT_CMU_CMUREF 77
+
+/* CMU_TOP Dividers */
+#define CLK_DOUT_CMU_BO_BUS 78
+#define CLK_DOUT_CMU_BUS0_BUS 79
+#define CLK_DOUT_CMU_BUS1_BUS 80
+#define CLK_DOUT_CMU_BUS2_BUS 81
+#define CLK_DOUT_CMU_CIS_CLK0 82
+#define CLK_DOUT_CMU_CIS_CLK1 83
+#define CLK_DOUT_CMU_CIS_CLK2 84
+#define CLK_DOUT_CMU_CIS_CLK3 85
+#define CLK_DOUT_CMU_CIS_CLK4 86
+#define CLK_DOUT_CMU_CIS_CLK5 87
+#define CLK_DOUT_CMU_CIS_CLK6 88
+#define CLK_DOUT_CMU_CIS_CLK7 89
+#define CLK_DOUT_CMU_CORE_BUS 90
+#define CLK_DOUT_CMU_CPUCL0_DBG 91
+#define CLK_DOUT_CMU_CPUCL0_SWITCH 92
+#define CLK_DOUT_CMU_CPUCL1_SWITCH 93
+#define CLK_DOUT_CMU_CPUCL2_SWITCH 94
+#define CLK_DOUT_CMU_CSIS_BUS 95
+#define CLK_DOUT_CMU_DISP_BUS 96
+#define CLK_DOUT_CMU_DNS_BUS 97
+#define CLK_DOUT_CMU_DPU_BUS 98
+#define CLK_DOUT_CMU_EH_BUS 99
+#define CLK_DOUT_CMU_G2D_G2D 100
+#define CLK_DOUT_CMU_G2D_MSCL 101
+#define CLK_DOUT_CMU_G3AA_G3AA 102
+#define CLK_DOUT_CMU_G3D_BUSD 103
+#define CLK_DOUT_CMU_G3D_GLB 104
+#define CLK_DOUT_CMU_G3D_SWITCH 105
+#define CLK_DOUT_CMU_GDC_GDC0 106
+#define CLK_DOUT_CMU_GDC_GDC1 107
+#define CLK_DOUT_CMU_GDC_SCSC 108
+#define CLK_DOUT_CMU_CMU_HPM 109
+#define CLK_DOUT_CMU_HSI0_BUS 110
+#define CLK_DOUT_CMU_HSI0_DPGTC 111
+#define CLK_DOUT_CMU_HSI0_USB31DRD 112
+#define CLK_DOUT_CMU_HSI0_USBDPDBG 113
+#define CLK_DOUT_CMU_HSI1_BUS 114
+#define CLK_DOUT_CMU_HSI1_PCIE 115
+#define CLK_DOUT_CMU_HSI2_BUS 116
+#define CLK_DOUT_CMU_HSI2_MMC_CARD 117
+#define CLK_DOUT_CMU_HSI2_PCIE 118
+#define CLK_DOUT_CMU_HSI2_UFS_EMBD 119
+#define CLK_DOUT_CMU_IPP_BUS 120
+#define CLK_DOUT_CMU_ITP_BUS 121
+#define CLK_DOUT_CMU_MCSC_ITSC 122
+#define CLK_DOUT_CMU_MCSC_MCSC 123
+#define CLK_DOUT_CMU_MFC_MFC 124
+#define CLK_DOUT_CMU_MIF_BUSP 125
+#define CLK_DOUT_CMU_MISC_BUS 126
+#define CLK_DOUT_CMU_MISC_SSS 127
+#define CLK_DOUT_CMU_OTP 128
+#define CLK_DOUT_CMU_PDP_BUS 129
+#define CLK_DOUT_CMU_PDP_VRA 130
+#define CLK_DOUT_CMU_PERIC0_BUS 131
+#define CLK_DOUT_CMU_PERIC0_IP 132
+#define CLK_DOUT_CMU_PERIC1_BUS 133
+#define CLK_DOUT_CMU_PERIC1_IP 134
+#define CLK_DOUT_CMU_TNR_BUS 135
+#define CLK_DOUT_CMU_TPU_BUS 136
+#define CLK_DOUT_CMU_TPU_TPU 137
+#define CLK_DOUT_CMU_TPU_TPUCTL 138
+#define CLK_DOUT_CMU_TPU_UART 139
+#define CLK_DOUT_CMU_CMU_BOOST 140
+#define CLK_DOUT_CMU_CMU_CMUREF 141
+#define CLK_DOUT_CMU_SHARED0_DIV2 142
+#define CLK_DOUT_CMU_SHARED0_DIV3 143
+#define CLK_DOUT_CMU_SHARED0_DIV4 144
+#define CLK_DOUT_CMU_SHARED0_DIV5 145
+#define CLK_DOUT_CMU_SHARED1_DIV2 146
+#define CLK_DOUT_CMU_SHARED1_DIV3 147
+#define CLK_DOUT_CMU_SHARED1_DIV4 148
+#define CLK_DOUT_CMU_SHARED2_DIV2 149
+#define CLK_DOUT_CMU_SHARED3_DIV2 150
+
+/* CMU_TOP Gates */
+#define CLK_GOUT_CMU_BUS0_BOOST 151
+#define CLK_GOUT_CMU_BUS1_BOOST 152
+#define CLK_GOUT_CMU_BUS2_BOOST 153
+#define CLK_GOUT_CMU_CORE_BOOST 154
+#define CLK_GOUT_CMU_CPUCL0_BOOST 155
+#define CLK_GOUT_CMU_CPUCL1_BOOST 156
+#define CLK_GOUT_CMU_CPUCL2_BOOST 157
+#define CLK_GOUT_CMU_MIF_BOOST 158
+#define CLK_GOUT_CMU_MIF_SWITCH 159
+#define CLK_GOUT_CMU_BO_BUS 160
+#define CLK_GOUT_CMU_BUS0_BUS 161
+#define CLK_GOUT_CMU_BUS1_BUS 162
+#define CLK_GOUT_CMU_BUS2_BUS 163
+#define CLK_GOUT_CMU_CIS_CLK0 164
+#define CLK_GOUT_CMU_CIS_CLK1 165
+#define CLK_GOUT_CMU_CIS_CLK2 166
+#define CLK_GOUT_CMU_CIS_CLK3 167
+#define CLK_GOUT_CMU_CIS_CLK4 168
+#define CLK_GOUT_CMU_CIS_CLK5 169
+#define CLK_GOUT_CMU_CIS_CLK6 170
+#define CLK_GOUT_CMU_CIS_CLK7 171
+#define CLK_GOUT_CMU_CMU_BOOST 172
+#define CLK_GOUT_CMU_CORE_BUS 173
+#define CLK_GOUT_CMU_CPUCL0_DBG 174
+#define CLK_GOUT_CMU_CPUCL0_SWITCH 175
+#define CLK_GOUT_CMU_CPUCL1_SWITCH 176
+#define CLK_GOUT_CMU_CPUCL2_SWITCH 177
+#define CLK_GOUT_CMU_CSIS_BUS 178
+#define CLK_GOUT_CMU_DISP_BUS 179
+#define CLK_GOUT_CMU_DNS_BUS 180
+#define CLK_GOUT_CMU_DPU_BUS 181
+#define CLK_GOUT_CMU_EH_BUS 182
+#define CLK_GOUT_CMU_G2D_G2D 183
+#define CLK_GOUT_CMU_G2D_MSCL 184
+#define CLK_GOUT_CMU_G3AA_G3AA 185
+#define CLK_GOUT_CMU_G3D_BUSD 186
+#define CLK_GOUT_CMU_G3D_GLB 187
+#define CLK_GOUT_CMU_G3D_SWITCH 188
+#define CLK_GOUT_CMU_GDC_GDC0 189
+#define CLK_GOUT_CMU_GDC_GDC1 190
+#define CLK_GOUT_CMU_GDC_SCSC 191
+#define CLK_GOUT_CMU_HPM 192
+#define CLK_GOUT_CMU_HSI0_BUS 193
+#define CLK_GOUT_CMU_HSI0_DPGTC 194
+#define CLK_GOUT_CMU_HSI0_USB31DRD 195
+#define CLK_GOUT_CMU_HSI0_USBDPDBG 196
+#define CLK_GOUT_CMU_HSI1_BUS 197
+#define CLK_GOUT_CMU_HSI1_PCIE 198
+#define CLK_GOUT_CMU_HSI2_BUS 199
+#define CLK_GOUT_CMU_HSI2_MMC_CARD 200
+#define CLK_GOUT_CMU_HSI2_PCIE 201
+#define CLK_GOUT_CMU_HSI2_UFS_EMBD 202
+#define CLK_GOUT_CMU_IPP_BUS 203
+#define CLK_GOUT_CMU_ITP_BUS 204
+#define CLK_GOUT_CMU_MCSC_ITSC 205
+#define CLK_GOUT_CMU_MCSC_MCSC 206
+#define CLK_GOUT_CMU_MFC_MFC 207
+#define CLK_GOUT_CMU_MIF_BUSP 208
+#define CLK_GOUT_CMU_MISC_BUS 209
+#define CLK_GOUT_CMU_MISC_SSS 210
+#define CLK_GOUT_CMU_PDP_BUS 211
+#define CLK_GOUT_CMU_PDP_VRA 212
+#define CLK_GOUT_CMU_G3AA 213
+#define CLK_GOUT_CMU_PERIC0_BUS 214
+#define CLK_GOUT_CMU_PERIC0_IP 215
+#define CLK_GOUT_CMU_PERIC1_BUS 216
+#define CLK_GOUT_CMU_PERIC1_IP 217
+#define CLK_GOUT_CMU_TNR_BUS 218
+#define CLK_GOUT_CMU_TOP_CMUREF 219
+#define CLK_GOUT_CMU_TPU_BUS 220
+#define CLK_GOUT_CMU_TPU_TPU 221
+#define CLK_GOUT_CMU_TPU_TPUCTL 222
+#define CLK_GOUT_CMU_TPU_UART 223
+
+/* CMU_APM */
+#define CLK_MOUT_APM_FUNC 1
+#define CLK_MOUT_APM_FUNCSRC 2
+#define CLK_DOUT_APM_BOOST 3
+#define CLK_DOUT_APM_USI0_UART 4
+#define CLK_DOUT_APM_USI0_USI 5
+#define CLK_DOUT_APM_USI1_UART 6
+#define CLK_GOUT_APM_APM_CMU_APM_PCLK 7
+#define CLK_GOUT_BUS0_BOOST_OPTION1 8
+#define CLK_GOUT_CMU_BOOST_OPTION1 9
+#define CLK_GOUT_CORE_BOOST_OPTION1 10
+#define CLK_GOUT_APM_FUNC 11
+#define CLK_GOUT_APM_APBIF_GPIO_ALIVE_PCLK 12
+#define CLK_GOUT_APM_APBIF_GPIO_FAR_ALIVE_PCLK 13
+#define CLK_GOUT_APM_APBIF_PMU_ALIVE_PCLK 14
+#define CLK_GOUT_APM_APBIF_RTC_PCLK 15
+#define CLK_GOUT_APM_APBIF_TRTC_PCLK 16
+#define CLK_GOUT_APM_APM_USI0_UART_IPCLK 17
+#define CLK_GOUT_APM_APM_USI0_UART_PCLK 18
+#define CLK_GOUT_APM_APM_USI0_USI_IPCLK 19
+#define CLK_GOUT_APM_APM_USI0_USI_PCLK 20
+#define CLK_GOUT_APM_APM_USI1_UART_IPCLK 21
+#define CLK_GOUT_APM_APM_USI1_UART_PCLK 22
+#define CLK_GOUT_APM_D_TZPC_APM_PCLK 23
+#define CLK_GOUT_APM_GPC_APM_PCLK 24
+#define CLK_GOUT_APM_GREBEINTEGRATION_HCLK 25
+#define CLK_GOUT_APM_INTMEM_ACLK 26
+#define CLK_GOUT_APM_INTMEM_PCLK 27
+#define CLK_GOUT_APM_LHM_AXI_G_SWD_I_CLK 28
+#define CLK_GOUT_APM_LHM_AXI_P_AOCAPM_I_CLK 29
+#define CLK_GOUT_APM_LHM_AXI_P_APM_I_CLK 30
+#define CLK_GOUT_APM_LHS_AXI_D_APM_I_CLK 31
+#define CLK_GOUT_APM_LHS_AXI_G_DBGCORE_I_CLK 32
+#define CLK_GOUT_APM_LHS_AXI_G_SCAN2DRAM_I_CLK 33
+#define CLK_GOUT_APM_MAILBOX_APM_AOC_PCLK 34
+#define CLK_GOUT_APM_MAILBOX_APM_AP_PCLK 35
+#define CLK_GOUT_APM_MAILBOX_APM_GSA_PCLK 36
+#define CLK_GOUT_APM_MAILBOX_APM_SWD_PCLK 37
+#define CLK_GOUT_APM_MAILBOX_APM_TPU_PCLK 38
+#define CLK_GOUT_APM_MAILBOX_AP_AOC_PCLK 39
+#define CLK_GOUT_APM_MAILBOX_AP_DBGCORE_PCLK 40
+#define CLK_GOUT_APM_PMU_INTR_GEN_PCLK 41
+#define CLK_GOUT_APM_ROM_CRC32_HOST_ACLK 42
+#define CLK_GOUT_APM_ROM_CRC32_HOST_PCLK 43
+#define CLK_GOUT_APM_CLK_APM_BUS_CLK 44
+#define CLK_GOUT_APM_CLK_APM_USI0_UART_CLK 45
+#define CLK_GOUT_APM_CLK_APM_USI0_USI_CLK 46
+#define CLK_GOUT_APM_CLK_APM_USI1_UART_CLK 47
+#define CLK_GOUT_APM_SPEEDY_APM_PCLK 48
+#define CLK_GOUT_APM_SPEEDY_SUB_APM_PCLK 49
+#define CLK_GOUT_APM_SSMT_D_APM_ACLK 50
+#define CLK_GOUT_APM_SSMT_D_APM_PCLK 51
+#define CLK_GOUT_APM_SSMT_G_DBGCORE_ACLK 52
+#define CLK_GOUT_APM_SSMT_G_DBGCORE_PCLK 53
+#define CLK_GOUT_APM_SS_DBGCORE_SS_DBGCORE_HCLK 54
+#define CLK_GOUT_APM_SYSMMU_D_APM_CLK_S2 55
+#define CLK_GOUT_APM_SYSREG_APM_PCLK 56
+#define CLK_GOUT_APM_UASC_APM_ACLK 57
+#define CLK_GOUT_APM_UASC_APM_PCLK 58
+#define CLK_GOUT_APM_UASC_DBGCORE_ACLK 59
+#define CLK_GOUT_APM_UASC_DBGCORE_PCLK 60
+#define CLK_GOUT_APM_UASC_G_SWD_ACLK 61
+#define CLK_GOUT_APM_UASC_G_SWD_PCLK 62
+#define CLK_GOUT_APM_UASC_P_AOCAPM_ACLK 63
+#define CLK_GOUT_APM_UASC_P_AOCAPM_PCLK 64
+#define CLK_GOUT_APM_UASC_P_APM_ACLK 65
+#define CLK_GOUT_APM_UASC_P_APM_PCLK 66
+#define CLK_GOUT_APM_WDT_APM_PCLK 67
+#define CLK_GOUT_APM_XIU_DP_APM_ACLK 68
+#define CLK_APM_PLL_DIV2_APM 69
+#define CLK_APM_PLL_DIV4_APM 70
+#define CLK_APM_PLL_DIV16_APM 71
+
+/* CMU_HSI0 */
+#define CLK_FOUT_USB_PLL 1
+#define CLK_MOUT_PLL_USB 2
+#define CLK_MOUT_HSI0_ALT_USER 3
+#define CLK_MOUT_HSI0_BUS_USER 4
+#define CLK_MOUT_HSI0_DPGTC_USER 5
+#define CLK_MOUT_HSI0_TCXO_USER 6
+#define CLK_MOUT_HSI0_USB20_USER 7
+#define CLK_MOUT_HSI0_USB31DRD_USER 8
+#define CLK_MOUT_HSI0_USBDPDBG_USER 9
+#define CLK_MOUT_HSI0_BUS 10
+#define CLK_MOUT_HSI0_USB20_REF 11
+#define CLK_MOUT_HSI0_USB31DRD 12
+#define CLK_DOUT_HSI0_USB31DRD 13
+#define CLK_GOUT_HSI0_PCLK 14
+#define CLK_GOUT_HSI0_USB31DRD_I_USB31DRD_SUSPEND_CLK_26 15
+#define CLK_GOUT_HSI0_CLK_HSI0_ALT 16
+#define CLK_GOUT_HSI0_DP_LINK_I_DP_GTC_CLK 17
+#define CLK_GOUT_HSI0_DP_LINK_I_PCLK 18
+#define CLK_GOUT_HSI0_D_TZPC_HSI0_PCLK 19
+#define CLK_GOUT_HSI0_ETR_MIU_I_ACLK 20
+#define CLK_GOUT_HSI0_ETR_MIU_I_PCLK 21
+#define CLK_GOUT_HSI0_GPC_HSI0_PCLK 22
+#define CLK_GOUT_HSI0_LHM_AXI_G_ETR_HSI0_I_CLK 23
+#define CLK_GOUT_HSI0_LHM_AXI_P_AOCHSI0_I_CLK 24
+#define CLK_GOUT_HSI0_LHM_AXI_P_HSI0_I_CLK 25
+#define CLK_GOUT_HSI0_LHS_ACEL_D_HSI0_I_CLK 26
+#define CLK_GOUT_HSI0_LHS_AXI_D_HSI0AOC_I_CLK 27
+#define CLK_GOUT_HSI0_PPMU_HSI0_AOC_ACLK 28
+#define CLK_GOUT_HSI0_PPMU_HSI0_AOC_PCLK 29
+#define CLK_GOUT_HSI0_PPMU_HSI0_BUS0_ACLK 30
+#define CLK_GOUT_HSI0_PPMU_HSI0_BUS0_PCLK 31
+#define CLK_GOUT_HSI0_CLK_HSI0_BUS_CLK 32
+#define CLK_GOUT_HSI0_SSMT_USB_ACLK 33
+#define CLK_GOUT_HSI0_SSMT_USB_PCLK 34
+#define CLK_GOUT_HSI0_SYSMMU_USB_CLK_S2 35
+#define CLK_GOUT_HSI0_SYSREG_HSI0_PCLK 36
+#define CLK_GOUT_HSI0_UASC_HSI0_CTRL_ACLK 37
+#define CLK_GOUT_HSI0_UASC_HSI0_CTRL_PCLK 38
+#define CLK_GOUT_HSI0_UASC_HSI0_LINK_ACLK 39
+#define CLK_GOUT_HSI0_UASC_HSI0_LINK_PCLK 40
+#define CLK_GOUT_HSI0_USB31DRD_ACLK_PHYCTRL 41
+#define CLK_GOUT_HSI0_USB31DRD_BUS_CLK_EARLY 42
+#define CLK_GOUT_HSI0_USB31DRD_I_USB20_PHY_REFCLK_26 43
+#define CLK_GOUT_HSI0_USB31DRD_I_USB31DRD_REF_CLK_40 44
+#define CLK_GOUT_HSI0_USB31DRD_I_USBDPPHY_REF_SOC_PLL 45
+#define CLK_GOUT_HSI0_USB31DRD_I_USBDPPHY_SCL_APB_PCLK 46
+#define CLK_GOUT_HSI0_USB31DRD_I_USBPCS_APB_CLK 47
+#define CLK_GOUT_HSI0_USB31DRD_USBDPPHY_I_ACLK 48
+#define CLK_GOUT_HSI0_USB31DRD_USBDPPHY_UDBG_I_APB_PCLK 49
+#define CLK_GOUT_HSI0_XIU_D0_HSI0_ACLK 50
+#define CLK_GOUT_HSI0_XIU_D1_HSI0_ACLK 51
+#define CLK_GOUT_HSI0_XIU_P_HSI0_ACLK 52
+
+/* CMU_HSI2 */
+#define CLK_MOUT_HSI2_BUS_USER 1
+#define CLK_MOUT_HSI2_MMC_CARD_USER 2
+#define CLK_MOUT_HSI2_PCIE_USER 3
+#define CLK_MOUT_HSI2_UFS_EMBD_USER 4
+#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_003_PHY_REFCLK_IN 5
+#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_004_PHY_REFCLK_IN 6
+#define CLK_GOUT_HSI2_SSMT_PCIE_IA_GEN4A_1_ACLK 7
+#define CLK_GOUT_HSI2_SSMT_PCIE_IA_GEN4A_1_PCLK 8
+#define CLK_GOUT_HSI2_SSMT_PCIE_IA_GEN4B_1_ACLK 9
+#define CLK_GOUT_HSI2_SSMT_PCIE_IA_GEN4B_1_PCLK 10
+#define CLK_GOUT_HSI2_D_TZPC_HSI2_PCLK 11
+#define CLK_GOUT_HSI2_GPC_HSI2_PCLK 12
+#define CLK_GOUT_HSI2_GPIO_HSI2_PCLK 13
+#define CLK_GOUT_HSI2_HSI2_CMU_HSI2_PCLK 14
+#define CLK_GOUT_HSI2_LHM_AXI_P_HSI2_I_CLK 15
+#define CLK_GOUT_HSI2_LHS_ACEL_D_HSI2_I_CLK 16
+#define CLK_GOUT_HSI2_MMC_CARD_I_ACLK 17
+#define CLK_GOUT_HSI2_MMC_CARD_SDCLKIN 18
+#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_003_DBI_ACLK_UG 19
+#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_003_MSTR_ACLK_UG 20
+#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_003_SLV_ACLK_UG 21
+#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_003_I_DRIVER_APB_CLK 22
+#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_004_DBI_ACLK_UG 23
+#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_004_MSTR_ACLK_UG 24
+#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_004_SLV_ACLK_UG 25
+#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCIE_004_I_DRIVER_APB_CLK 26
+#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCS_PMA_PHY_UDBG_I_APB_PCLK 27
+#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCS_PMA_PIPE_PAL_PCIE_I_APB_PCLK 28
+#define CLK_GOUT_HSI2_PCIE_GEN4_1_PCS_PMA_PCIEPHY210X2_QCH_I_APB_PCLK 29
+#define CLK_GOUT_HSI2_PCIE_IA_GEN4A_1_I_CLK 30
+#define CLK_GOUT_HSI2_PCIE_IA_GEN4B_1_I_CLK 31
+#define CLK_GOUT_HSI2_PPMU_HSI2_ACLK 32
+#define CLK_GOUT_HSI2_PPMU_HSI2_PCLK 33
+#define CLK_GOUT_HSI2_QE_MMC_CARD_HSI2_ACLK 34
+#define CLK_GOUT_HSI2_QE_MMC_CARD_HSI2_PCLK 35
+#define CLK_GOUT_HSI2_QE_PCIE_GEN4A_HSI2_ACLK 36
+#define CLK_GOUT_HSI2_QE_PCIE_GEN4A_HSI2_PCLK 37
+#define CLK_GOUT_HSI2_QE_PCIE_GEN4B_HSI2_ACLK 38
+#define CLK_GOUT_HSI2_QE_PCIE_GEN4B_HSI2_PCLK 39
+#define CLK_GOUT_HSI2_QE_UFS_EMBD_HSI2_ACLK 40
+#define CLK_GOUT_HSI2_QE_UFS_EMBD_HSI2_PCLK 41
+#define CLK_GOUT_HSI2_CLK_HSI2_BUS_CLK 42
+#define CLK_GOUT_HSI2_CLK_HSI2_OSCCLK_CLK 43
+#define CLK_GOUT_HSI2_SSMT_HSI2_ACLK 44
+#define CLK_GOUT_HSI2_SSMT_HSI2_PCLK 45
+#define CLK_GOUT_HSI2_SYSMMU_HSI2_CLK_S2 46
+#define CLK_GOUT_HSI2_SYSREG_HSI2_PCLK 47
+#define CLK_GOUT_HSI2_UASC_PCIE_GEN4A_DBI_1_ACLK 48
+#define CLK_GOUT_HSI2_UASC_PCIE_GEN4A_DBI_1_PCLK 49
+#define CLK_GOUT_HSI2_UASC_PCIE_GEN4A_SLV_1_ACLK 50
+#define CLK_GOUT_HSI2_UASC_PCIE_GEN4A_SLV_1_PCLK 51
+#define CLK_GOUT_HSI2_UASC_PCIE_GEN4B_DBI_1_ACLK 52
+#define CLK_GOUT_HSI2_UASC_PCIE_GEN4B_DBI_1_PCLK 53
+#define CLK_GOUT_HSI2_UASC_PCIE_GEN4B_SLV_1_ACLK 54
+#define CLK_GOUT_HSI2_UASC_PCIE_GEN4B_SLV_1_PCLK 55
+#define CLK_GOUT_HSI2_UFS_EMBD_I_ACLK 56
+#define CLK_GOUT_HSI2_UFS_EMBD_I_CLK_UNIPRO 57
+#define CLK_GOUT_HSI2_UFS_EMBD_I_FMP_CLK 58
+#define CLK_GOUT_HSI2_XIU_D_HSI2_ACLK 59
+#define CLK_GOUT_HSI2_XIU_P_HSI2_ACLK 60
+
+/* CMU_MISC */
+#define CLK_MOUT_MISC_BUS_USER 1
+#define CLK_MOUT_MISC_SSS_USER 2
+#define CLK_MOUT_MISC_GIC 3
+#define CLK_DOUT_MISC_BUSP 4
+#define CLK_DOUT_MISC_GIC 5
+#define CLK_GOUT_MISC_MISC_CMU_MISC_PCLK 6
+#define CLK_GOUT_MISC_OTP_CON_BIRA_I_OSCCLK 7
+#define CLK_GOUT_MISC_OTP_CON_BISR_I_OSCCLK 8
+#define CLK_GOUT_MISC_OTP_CON_TOP_I_OSCCLK 9
+#define CLK_GOUT_MISC_CLK_MISC_OSCCLK_CLK 10
+#define CLK_GOUT_MISC_ADM_AHB_SSS_HCLKM 11
+#define CLK_GOUT_MISC_AD_APB_DIT_PCLKM 12
+#define CLK_GOUT_MISC_AD_APB_PUF_PCLKM 13
+#define CLK_GOUT_MISC_DIT_ICLKL2A 14
+#define CLK_GOUT_MISC_D_TZPC_MISC_PCLK 15
+#define CLK_GOUT_MISC_GIC_GICCLK 16
+#define CLK_GOUT_MISC_GPC_MISC_PCLK 17
+#define CLK_GOUT_MISC_LHM_AST_ICC_CPUGIC_I_CLK 18
+#define CLK_GOUT_MISC_LHM_AXI_D_SSS_I_CLK 19
+#define CLK_GOUT_MISC_LHM_AXI_P_GIC_I_CLK 20
+#define CLK_GOUT_MISC_LHM_AXI_P_MISC_I_CLK 21
+#define CLK_GOUT_MISC_LHS_ACEL_D_MISC_I_CLK 22
+#define CLK_GOUT_MISC_LHS_AST_IRI_GICCPU_I_CLK 23
+#define CLK_GOUT_MISC_LHS_AXI_D_SSS_I_CLK 24
+#define CLK_GOUT_MISC_MCT_PCLK 25
+#define CLK_GOUT_MISC_OTP_CON_BIRA_PCLK 26
+#define CLK_GOUT_MISC_OTP_CON_BISR_PCLK 27
+#define CLK_GOUT_MISC_OTP_CON_TOP_PCLK 28
+#define CLK_GOUT_MISC_PDMA_ACLK 29
+#define CLK_GOUT_MISC_PPMU_DMA_ACLK 30
+#define CLK_GOUT_MISC_PPMU_MISC_ACLK 31
+#define CLK_GOUT_MISC_PPMU_MISC_PCLK 32
+#define CLK_GOUT_MISC_PUF_I_CLK 33
+#define CLK_GOUT_MISC_QE_DIT_ACLK 34
+#define CLK_GOUT_MISC_QE_DIT_PCLK 35
+#define CLK_GOUT_MISC_QE_PDMA_ACLK 36
+#define CLK_GOUT_MISC_QE_PDMA_PCLK 37
+#define CLK_GOUT_MISC_QE_PPMU_DMA_ACLK 38
+#define CLK_GOUT_MISC_QE_PPMU_DMA_PCLK 39
+#define CLK_GOUT_MISC_QE_RTIC_ACLK 40
+#define CLK_GOUT_MISC_QE_RTIC_PCLK 41
+#define CLK_GOUT_MISC_QE_SPDMA_ACLK 42
+#define CLK_GOUT_MISC_QE_SPDMA_PCLK 43
+#define CLK_GOUT_MISC_QE_SSS_ACLK 44
+#define CLK_GOUT_MISC_QE_SSS_PCLK 45
+#define CLK_GOUT_MISC_CLK_MISC_BUSD_CLK 46
+#define CLK_GOUT_MISC_CLK_MISC_BUSP_CLK 47
+#define CLK_GOUT_MISC_CLK_MISC_GIC_CLK 48
+#define CLK_GOUT_MISC_CLK_MISC_SSS_CLK 49
+#define CLK_GOUT_MISC_RTIC_I_ACLK 50
+#define CLK_GOUT_MISC_RTIC_I_PCLK 51
+#define CLK_GOUT_MISC_SPDMA_ACLK 52
+#define CLK_GOUT_MISC_SSMT_DIT_ACLK 53
+#define CLK_GOUT_MISC_SSMT_DIT_PCLK 54
+#define CLK_GOUT_MISC_SSMT_PDMA_ACLK 55
+#define CLK_GOUT_MISC_SSMT_PDMA_PCLK 56
+#define CLK_GOUT_MISC_SSMT_PPMU_DMA_ACLK 57
+#define CLK_GOUT_MISC_SSMT_PPMU_DMA_PCLK 58
+#define CLK_GOUT_MISC_SSMT_RTIC_ACLK 59
+#define CLK_GOUT_MISC_SSMT_RTIC_PCLK 60
+#define CLK_GOUT_MISC_SSMT_SPDMA_ACLK 61
+#define CLK_GOUT_MISC_SSMT_SPDMA_PCLK 62
+#define CLK_GOUT_MISC_SSMT_SSS_ACLK 63
+#define CLK_GOUT_MISC_SSMT_SSS_PCLK 64
+#define CLK_GOUT_MISC_SSS_I_ACLK 65
+#define CLK_GOUT_MISC_SSS_I_PCLK 66
+#define CLK_GOUT_MISC_SYSMMU_MISC_CLK_S2 67
+#define CLK_GOUT_MISC_SYSMMU_SSS_CLK_S1 68
+#define CLK_GOUT_MISC_SYSREG_MISC_PCLK 69
+#define CLK_GOUT_MISC_TMU_SUB_PCLK 70
+#define CLK_GOUT_MISC_TMU_TOP_PCLK 71
+#define CLK_GOUT_MISC_WDT_CLUSTER0_PCLK 72
+#define CLK_GOUT_MISC_WDT_CLUSTER1_PCLK 73
+#define CLK_GOUT_MISC_XIU_D_MISC_ACLK 74
+
+/* CMU_PERIC0 */
+#define CLK_MOUT_PERIC0_BUS_USER 1
+#define CLK_MOUT_PERIC0_I3C_USER 2
+#define CLK_MOUT_PERIC0_USI0_UART_USER 3
+#define CLK_MOUT_PERIC0_USI14_USI_USER 4
+#define CLK_MOUT_PERIC0_USI1_USI_USER 5
+#define CLK_MOUT_PERIC0_USI2_USI_USER 6
+#define CLK_MOUT_PERIC0_USI3_USI_USER 7
+#define CLK_MOUT_PERIC0_USI4_USI_USER 8
+#define CLK_MOUT_PERIC0_USI5_USI_USER 9
+#define CLK_MOUT_PERIC0_USI6_USI_USER 10
+#define CLK_MOUT_PERIC0_USI7_USI_USER 11
+#define CLK_MOUT_PERIC0_USI8_USI_USER 12
+#define CLK_DOUT_PERIC0_I3C 13
+#define CLK_DOUT_PERIC0_USI0_UART 14
+#define CLK_DOUT_PERIC0_USI14_USI 15
+#define CLK_DOUT_PERIC0_USI1_USI 16
+#define CLK_DOUT_PERIC0_USI2_USI 17
+#define CLK_DOUT_PERIC0_USI3_USI 18
+#define CLK_DOUT_PERIC0_USI4_USI 19
+#define CLK_DOUT_PERIC0_USI5_USI 20
+#define CLK_DOUT_PERIC0_USI6_USI 21
+#define CLK_DOUT_PERIC0_USI7_USI 22
+#define CLK_DOUT_PERIC0_USI8_USI 23
+#define CLK_GOUT_PERIC0_IP 24
+#define CLK_GOUT_PERIC0_PERIC0_CMU_PERIC0_PCLK 25
+#define CLK_GOUT_PERIC0_CLK_PERIC0_OSCCLK_CLK 26
+#define CLK_GOUT_PERIC0_D_TZPC_PERIC0_PCLK 27
+#define CLK_GOUT_PERIC0_GPC_PERIC0_PCLK 28
+#define CLK_GOUT_PERIC0_GPIO_PERIC0_PCLK 29
+#define CLK_GOUT_PERIC0_LHM_AXI_P_PERIC0_I_CLK 30
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_0 31
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_1 32
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_10 33
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_11 34
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_12 35
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_13 36
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_14 37
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_15 38
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_2 39
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_3 40
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_4 41
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_5 42
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_6 43
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_7 44
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_8 45
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_9 46
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_0 47
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_1 48
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_10 49
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_11 50
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_12 51
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_13 52
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_14 53
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_15 54
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_2 55
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_3 56
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_4 57
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_5 58
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_6 59
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_7 60
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_8 61
+#define CLK_GOUT_PERIC0_PERIC0_TOP0_PCLK_9 62
+#define CLK_GOUT_PERIC0_PERIC0_TOP1_IPCLK_0 63
+#define CLK_GOUT_PERIC0_PERIC0_TOP1_IPCLK_2 64
+#define CLK_GOUT_PERIC0_PERIC0_TOP1_PCLK_0 65
+#define CLK_GOUT_PERIC0_PERIC0_TOP1_PCLK_2 66
+#define CLK_GOUT_PERIC0_CLK_PERIC0_BUSP_CLK 67
+#define CLK_GOUT_PERIC0_CLK_PERIC0_I3C_CLK 68
+#define CLK_GOUT_PERIC0_CLK_PERIC0_USI0_UART_CLK 69
+#define CLK_GOUT_PERIC0_CLK_PERIC0_USI14_USI_CLK 70
+#define CLK_GOUT_PERIC0_CLK_PERIC0_USI1_USI_CLK 71
+#define CLK_GOUT_PERIC0_CLK_PERIC0_USI2_USI_CLK 72
+#define CLK_GOUT_PERIC0_CLK_PERIC0_USI3_USI_CLK 73
+#define CLK_GOUT_PERIC0_CLK_PERIC0_USI4_USI_CLK 74
+#define CLK_GOUT_PERIC0_CLK_PERIC0_USI5_USI_CLK 75
+#define CLK_GOUT_PERIC0_CLK_PERIC0_USI6_USI_CLK 76
+#define CLK_GOUT_PERIC0_CLK_PERIC0_USI7_USI_CLK 77
+#define CLK_GOUT_PERIC0_CLK_PERIC0_USI8_USI_CLK 78
+#define CLK_GOUT_PERIC0_SYSREG_PERIC0_PCLK 79
+
+/* CMU_PERIC1 */
+#define CLK_MOUT_PERIC1_BUS_USER 1
+#define CLK_MOUT_PERIC1_I3C_USER 2
+#define CLK_MOUT_PERIC1_USI0_USI_USER 3
+#define CLK_MOUT_PERIC1_USI10_USI_USER 4
+#define CLK_MOUT_PERIC1_USI11_USI_USER 5
+#define CLK_MOUT_PERIC1_USI12_USI_USER 6
+#define CLK_MOUT_PERIC1_USI13_USI_USER 7
+#define CLK_MOUT_PERIC1_USI9_USI_USER 8
+#define CLK_DOUT_PERIC1_I3C 9
+#define CLK_DOUT_PERIC1_USI0_USI 10
+#define CLK_DOUT_PERIC1_USI10_USI 11
+#define CLK_DOUT_PERIC1_USI11_USI 12
+#define CLK_DOUT_PERIC1_USI12_USI 13
+#define CLK_DOUT_PERIC1_USI13_USI 14
+#define CLK_DOUT_PERIC1_USI9_USI 15
+#define CLK_GOUT_PERIC1_IP 16
+#define CLK_GOUT_PERIC1_PCLK 17
+#define CLK_GOUT_PERIC1_CLK_PERIC1_I3C_CLK 18
+#define CLK_GOUT_PERIC1_CLK_PERIC1_OSCCLK_CLK 19
+#define CLK_GOUT_PERIC1_D_TZPC_PERIC1_PCLK 20
+#define CLK_GOUT_PERIC1_GPC_PERIC1_PCLK 21
+#define CLK_GOUT_PERIC1_GPIO_PERIC1_PCLK 22
+#define CLK_GOUT_PERIC1_LHM_AXI_P_PERIC1_I_CLK 23
+#define CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_1 24
+#define CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_2 25
+#define CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_3 26
+#define CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_4 27
+#define CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_5 28
+#define CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_6 29
+#define CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_8 30
+#define CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_1 31
+#define CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_15 32
+#define CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_2 33
+#define CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_3 34
+#define CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_4 35
+#define CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_5 36
+#define CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_6 37
+#define CLK_GOUT_PERIC1_PERIC1_TOP0_PCLK_8 38
+#define CLK_GOUT_PERIC1_CLK_PERIC1_BUSP_CLK 39
+#define CLK_GOUT_PERIC1_CLK_PERIC1_USI0_USI_CLK 40
+#define CLK_GOUT_PERIC1_CLK_PERIC1_USI10_USI_CLK 41
+#define CLK_GOUT_PERIC1_CLK_PERIC1_USI11_USI_CLK 42
+#define CLK_GOUT_PERIC1_CLK_PERIC1_USI12_USI_CLK 43
+#define CLK_GOUT_PERIC1_CLK_PERIC1_USI13_USI_CLK 44
+#define CLK_GOUT_PERIC1_CLK_PERIC1_USI9_USI_CLK 45
+#define CLK_GOUT_PERIC1_SYSREG_PERIC1_PCLK 46
+
+#endif /* _DT_BINDINGS_CLOCK_GOOGLE_GS101_H */
diff --git a/include/dt-bindings/clock/gxbb-clkc.h b/include/dt-bindings/clock/gxbb-clkc.h
index 4073eb7a9da1..c0ce5e9c4151 100644
--- a/include/dt-bindings/clock/gxbb-clkc.h
+++ b/include/dt-bindings/clock/gxbb-clkc.h
@@ -15,6 +15,8 @@
#define CLKID_FCLK_DIV5 7
#define CLKID_FCLK_DIV7 8
#define CLKID_GP0_PLL 9
+#define CLKID_MPEG_SEL 10
+#define CLKID_MPEG_DIV 11
#define CLKID_CLK81 12
#define CLKID_MPLL0 13
#define CLKID_MPLL1 14
@@ -102,35 +104,92 @@
#define CLKID_SD_EMMC_C 96
#define CLKID_SAR_ADC_CLK 97
#define CLKID_SAR_ADC_SEL 98
+#define CLKID_SAR_ADC_DIV 99
#define CLKID_MALI_0_SEL 100
+#define CLKID_MALI_0_DIV 101
#define CLKID_MALI_0 102
#define CLKID_MALI_1_SEL 103
+#define CLKID_MALI_1_DIV 104
#define CLKID_MALI_1 105
#define CLKID_MALI 106
#define CLKID_CTS_AMCLK 107
+#define CLKID_CTS_AMCLK_SEL 108
+#define CLKID_CTS_AMCLK_DIV 109
#define CLKID_CTS_MCLK_I958 110
+#define CLKID_CTS_MCLK_I958_SEL 111
+#define CLKID_CTS_MCLK_I958_DIV 112
#define CLKID_CTS_I958 113
#define CLKID_32K_CLK 114
+#define CLKID_32K_CLK_SEL 115
+#define CLKID_32K_CLK_DIV 116
+#define CLKID_SD_EMMC_A_CLK0_SEL 117
+#define CLKID_SD_EMMC_A_CLK0_DIV 118
#define CLKID_SD_EMMC_A_CLK0 119
+#define CLKID_SD_EMMC_B_CLK0_SEL 120
+#define CLKID_SD_EMMC_B_CLK0_DIV 121
#define CLKID_SD_EMMC_B_CLK0 122
+#define CLKID_SD_EMMC_C_CLK0_SEL 123
+#define CLKID_SD_EMMC_C_CLK0_DIV 124
#define CLKID_SD_EMMC_C_CLK0 125
#define CLKID_VPU_0_SEL 126
+#define CLKID_VPU_0_DIV 127
#define CLKID_VPU_0 128
#define CLKID_VPU_1_SEL 129
+#define CLKID_VPU_1_DIV 130
#define CLKID_VPU_1 131
#define CLKID_VPU 132
#define CLKID_VAPB_0_SEL 133
+#define CLKID_VAPB_0_DIV 134
#define CLKID_VAPB_0 135
#define CLKID_VAPB_1_SEL 136
+#define CLKID_VAPB_1_DIV 137
#define CLKID_VAPB_1 138
#define CLKID_VAPB_SEL 139
#define CLKID_VAPB 140
+#define CLKID_HDMI_PLL_PRE_MULT 141
+#define CLKID_MPLL0_DIV 142
+#define CLKID_MPLL1_DIV 143
+#define CLKID_MPLL2_DIV 144
+#define CLKID_MPLL_PREDIV 145
+#define CLKID_FCLK_DIV2_DIV 146
+#define CLKID_FCLK_DIV3_DIV 147
+#define CLKID_FCLK_DIV4_DIV 148
+#define CLKID_FCLK_DIV5_DIV 149
+#define CLKID_FCLK_DIV7_DIV 150
+#define CLKID_VDEC_1_SEL 151
+#define CLKID_VDEC_1_DIV 152
#define CLKID_VDEC_1 153
+#define CLKID_VDEC_HEVC_SEL 154
+#define CLKID_VDEC_HEVC_DIV 155
#define CLKID_VDEC_HEVC 156
+#define CLKID_GEN_CLK_SEL 157
+#define CLKID_GEN_CLK_DIV 158
#define CLKID_GEN_CLK 159
+#define CLKID_FIXED_PLL_DCO 160
+#define CLKID_HDMI_PLL_DCO 161
+#define CLKID_HDMI_PLL_OD 162
+#define CLKID_HDMI_PLL_OD2 163
+#define CLKID_SYS_PLL_DCO 164
+#define CLKID_GP0_PLL_DCO 165
#define CLKID_VID_PLL 166
+#define CLKID_VID_PLL_SEL 167
+#define CLKID_VID_PLL_DIV 168
+#define CLKID_VCLK_SEL 169
+#define CLKID_VCLK2_SEL 170
+#define CLKID_VCLK_INPUT 171
+#define CLKID_VCLK2_INPUT 172
+#define CLKID_VCLK_DIV 173
+#define CLKID_VCLK2_DIV 174
#define CLKID_VCLK 175
#define CLKID_VCLK2 176
+#define CLKID_VCLK_DIV2_EN 177
+#define CLKID_VCLK_DIV4_EN 178
+#define CLKID_VCLK_DIV6_EN 179
+#define CLKID_VCLK_DIV12_EN 180
+#define CLKID_VCLK2_DIV2_EN 181
+#define CLKID_VCLK2_DIV4_EN 182
+#define CLKID_VCLK2_DIV6_EN 183
+#define CLKID_VCLK2_DIV12_EN 184
#define CLKID_VCLK_DIV1 185
#define CLKID_VCLK_DIV2 186
#define CLKID_VCLK_DIV4 187
@@ -141,10 +200,16 @@
#define CLKID_VCLK2_DIV4 192
#define CLKID_VCLK2_DIV6 193
#define CLKID_VCLK2_DIV12 194
+#define CLKID_CTS_ENCI_SEL 195
+#define CLKID_CTS_ENCP_SEL 196
+#define CLKID_CTS_VDAC_SEL 197
+#define CLKID_HDMI_TX_SEL 198
#define CLKID_CTS_ENCI 199
#define CLKID_CTS_ENCP 200
#define CLKID_CTS_VDAC 201
#define CLKID_HDMI_TX 202
+#define CLKID_HDMI_SEL 203
+#define CLKID_HDMI_DIV 204
#define CLKID_HDMI 205
#define CLKID_ACODEC 206
diff --git a/include/dt-bindings/clock/hi3559av100-clock.h b/include/dt-bindings/clock/hi3559av100-clock.h
new file mode 100644
index 000000000000..a4f0e997546c
--- /dev/null
+++ b/include/dt-bindings/clock/hi3559av100-clock.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later OR BSD-2-Clause */
+/*
+ * Copyright (c) 2019-2020, Huawei Tech. Co., Ltd.
+ *
+ * Author: Dongjiu Geng <gengdongjiu@huawei.com>
+ */
+
+#ifndef __DTS_HI3559AV100_CLOCK_H
+#define __DTS_HI3559AV100_CLOCK_H
+
+/* fixed rate */
+#define HI3559AV100_FIXED_1188M 1
+#define HI3559AV100_FIXED_1000M 2
+#define HI3559AV100_FIXED_842M 3
+#define HI3559AV100_FIXED_792M 4
+#define HI3559AV100_FIXED_750M 5
+#define HI3559AV100_FIXED_710M 6
+#define HI3559AV100_FIXED_680M 7
+#define HI3559AV100_FIXED_667M 8
+#define HI3559AV100_FIXED_631M 9
+#define HI3559AV100_FIXED_600M 10
+#define HI3559AV100_FIXED_568M 11
+#define HI3559AV100_FIXED_500M 12
+#define HI3559AV100_FIXED_475M 13
+#define HI3559AV100_FIXED_428M 14
+#define HI3559AV100_FIXED_400M 15
+#define HI3559AV100_FIXED_396M 16
+#define HI3559AV100_FIXED_300M 17
+#define HI3559AV100_FIXED_250M 18
+#define HI3559AV100_FIXED_198M 19
+#define HI3559AV100_FIXED_187p5M 20
+#define HI3559AV100_FIXED_150M 21
+#define HI3559AV100_FIXED_148p5M 22
+#define HI3559AV100_FIXED_125M 23
+#define HI3559AV100_FIXED_107M 24
+#define HI3559AV100_FIXED_100M 25
+#define HI3559AV100_FIXED_99M 26
+#define HI3559AV100_FIXED_74p25M 27
+#define HI3559AV100_FIXED_72M 28
+#define HI3559AV100_FIXED_60M 29
+#define HI3559AV100_FIXED_54M 30
+#define HI3559AV100_FIXED_50M 31
+#define HI3559AV100_FIXED_49p5M 32
+#define HI3559AV100_FIXED_37p125M 33
+#define HI3559AV100_FIXED_36M 34
+#define HI3559AV100_FIXED_32p4M 35
+#define HI3559AV100_FIXED_27M 36
+#define HI3559AV100_FIXED_25M 37
+#define HI3559AV100_FIXED_24M 38
+#define HI3559AV100_FIXED_12M 39
+#define HI3559AV100_FIXED_3M 40
+#define HI3559AV100_FIXED_1p6M 41
+#define HI3559AV100_FIXED_400K 42
+#define HI3559AV100_FIXED_100K 43
+#define HI3559AV100_FIXED_200M 44
+#define HI3559AV100_FIXED_75M 75
+
+#define HI3559AV100_I2C0_CLK 50
+#define HI3559AV100_I2C1_CLK 51
+#define HI3559AV100_I2C2_CLK 52
+#define HI3559AV100_I2C3_CLK 53
+#define HI3559AV100_I2C4_CLK 54
+#define HI3559AV100_I2C5_CLK 55
+#define HI3559AV100_I2C6_CLK 56
+#define HI3559AV100_I2C7_CLK 57
+#define HI3559AV100_I2C8_CLK 58
+#define HI3559AV100_I2C9_CLK 59
+#define HI3559AV100_I2C10_CLK 60
+#define HI3559AV100_I2C11_CLK 61
+
+#define HI3559AV100_SPI0_CLK 62
+#define HI3559AV100_SPI1_CLK 63
+#define HI3559AV100_SPI2_CLK 64
+#define HI3559AV100_SPI3_CLK 65
+#define HI3559AV100_SPI4_CLK 66
+#define HI3559AV100_SPI5_CLK 67
+#define HI3559AV100_SPI6_CLK 68
+
+#define HI3559AV100_EDMAC_CLK 69
+#define HI3559AV100_EDMAC_AXICLK 70
+#define HI3559AV100_EDMAC1_CLK 71
+#define HI3559AV100_EDMAC1_AXICLK 72
+#define HI3559AV100_VDMAC_CLK 73
+
+/* mux clocks */
+#define HI3559AV100_FMC_MUX 80
+#define HI3559AV100_SYSAPB_MUX 81
+#define HI3559AV100_UART_MUX 82
+#define HI3559AV100_SYSBUS_MUX 83
+#define HI3559AV100_A73_MUX 84
+#define HI3559AV100_MMC0_MUX 85
+#define HI3559AV100_MMC1_MUX 86
+#define HI3559AV100_MMC2_MUX 87
+#define HI3559AV100_MMC3_MUX 88
+
+/* gate clocks */
+#define HI3559AV100_FMC_CLK 90
+#define HI3559AV100_UART0_CLK 91
+#define HI3559AV100_UART1_CLK 92
+#define HI3559AV100_UART2_CLK 93
+#define HI3559AV100_UART3_CLK 94
+#define HI3559AV100_UART4_CLK 95
+#define HI3559AV100_MMC0_CLK 96
+#define HI3559AV100_MMC1_CLK 97
+#define HI3559AV100_MMC2_CLK 98
+#define HI3559AV100_MMC3_CLK 99
+
+#define HI3559AV100_ETH_CLK 100
+#define HI3559AV100_ETH_MACIF_CLK 101
+#define HI3559AV100_ETH1_CLK 102
+#define HI3559AV100_ETH1_MACIF_CLK 103
+
+/* complex */
+#define HI3559AV100_MAC0_CLK 110
+#define HI3559AV100_MAC1_CLK 111
+#define HI3559AV100_SATA_CLK 112
+#define HI3559AV100_USB_CLK 113
+#define HI3559AV100_USB1_CLK 114
+
+/* pll clocks */
+#define HI3559AV100_APLL_CLK 250
+#define HI3559AV100_GPLL_CLK 251
+
+#define HI3559AV100_CRG_NR_CLKS 256
+
+#define HI3559AV100_SHUB_SOURCE_SOC_24M 0
+#define HI3559AV100_SHUB_SOURCE_SOC_200M 1
+#define HI3559AV100_SHUB_SOURCE_SOC_300M 2
+#define HI3559AV100_SHUB_SOURCE_PLL 3
+#define HI3559AV100_SHUB_SOURCE_CLK 4
+
+#define HI3559AV100_SHUB_I2C0_CLK 10
+#define HI3559AV100_SHUB_I2C1_CLK 11
+#define HI3559AV100_SHUB_I2C2_CLK 12
+#define HI3559AV100_SHUB_I2C3_CLK 13
+#define HI3559AV100_SHUB_I2C4_CLK 14
+#define HI3559AV100_SHUB_I2C5_CLK 15
+#define HI3559AV100_SHUB_I2C6_CLK 16
+#define HI3559AV100_SHUB_I2C7_CLK 17
+
+#define HI3559AV100_SHUB_SPI_SOURCE_CLK 20
+#define HI3559AV100_SHUB_SPI4_SOURCE_CLK 21
+#define HI3559AV100_SHUB_SPI0_CLK 22
+#define HI3559AV100_SHUB_SPI1_CLK 23
+#define HI3559AV100_SHUB_SPI2_CLK 24
+#define HI3559AV100_SHUB_SPI3_CLK 25
+#define HI3559AV100_SHUB_SPI4_CLK 26
+
+#define HI3559AV100_SHUB_UART_CLK_32K 30
+#define HI3559AV100_SHUB_UART_SOURCE_CLK 31
+#define HI3559AV100_SHUB_UART_DIV_CLK 32
+#define HI3559AV100_SHUB_UART0_CLK 33
+#define HI3559AV100_SHUB_UART1_CLK 34
+#define HI3559AV100_SHUB_UART2_CLK 35
+#define HI3559AV100_SHUB_UART3_CLK 36
+#define HI3559AV100_SHUB_UART4_CLK 37
+#define HI3559AV100_SHUB_UART5_CLK 38
+#define HI3559AV100_SHUB_UART6_CLK 39
+
+#define HI3559AV100_SHUB_EDMAC_CLK 40
+
+#define HI3559AV100_SHUB_NR_CLKS 50
+
+#endif /* __DTS_HI3559AV100_CLOCK_H */
+
diff --git a/include/dt-bindings/clock/imx6qdl-clock.h b/include/dt-bindings/clock/imx6qdl-clock.h
index e20c43cc36f6..e5b2a1ba02bc 100644
--- a/include/dt-bindings/clock/imx6qdl-clock.h
+++ b/include/dt-bindings/clock/imx6qdl-clock.h
@@ -273,6 +273,8 @@
#define IMX6QDL_CLK_MMDC_P0_IPG 263
#define IMX6QDL_CLK_DCIC1 264
#define IMX6QDL_CLK_DCIC2 265
-#define IMX6QDL_CLK_END 266
+#define IMX6QDL_CLK_ENET_REF_SEL 266
+#define IMX6QDL_CLK_ENET_REF_PAD 267
+#define IMX6QDL_CLK_END 268
#endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */
diff --git a/include/dt-bindings/clock/imx6sll-clock.h b/include/dt-bindings/clock/imx6sll-clock.h
index f446710fe63d..494fd0c37fb5 100644
--- a/include/dt-bindings/clock/imx6sll-clock.h
+++ b/include/dt-bindings/clock/imx6sll-clock.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2016 Freescale Semiconductor, Inc.
* Copyright 2017-2018 NXP.
diff --git a/include/dt-bindings/clock/imx6ul-clock.h b/include/dt-bindings/clock/imx6ul-clock.h
index 79094338e6f1..66239ebc0e23 100644
--- a/include/dt-bindings/clock/imx6ul-clock.h
+++ b/include/dt-bindings/clock/imx6ul-clock.h
@@ -256,7 +256,12 @@
#define IMX6UL_CLK_GPIO4 247
#define IMX6UL_CLK_GPIO5 248
#define IMX6UL_CLK_MMDC_P1_IPG 249
+#define IMX6UL_CLK_ENET1_REF_125M 250
+#define IMX6UL_CLK_ENET1_REF_SEL 251
+#define IMX6UL_CLK_ENET1_REF_PAD 252
+#define IMX6UL_CLK_ENET2_REF_SEL 253
+#define IMX6UL_CLK_ENET2_REF_PAD 254
-#define IMX6UL_CLK_END 250
+#define IMX6UL_CLK_END 255
#endif /* __DT_BINDINGS_CLOCK_IMX6UL_H */
diff --git a/include/dt-bindings/clock/imx8-clock.h b/include/dt-bindings/clock/imx8-clock.h
index 82b1fc8d1ee0..2242ff54fc5e 100644
--- a/include/dt-bindings/clock/imx8-clock.h
+++ b/include/dt-bindings/clock/imx8-clock.h
@@ -7,134 +7,6 @@
#ifndef __DT_BINDINGS_CLOCK_IMX_H
#define __DT_BINDINGS_CLOCK_IMX_H
-/* SCU Clocks */
-
-#define IMX_CLK_DUMMY 0
-
-/* CPU */
-#define IMX_A35_CLK 1
-
-/* LSIO SS */
-#define IMX_LSIO_MEM_CLK 2
-#define IMX_LSIO_BUS_CLK 3
-#define IMX_LSIO_PWM0_CLK 10
-#define IMX_LSIO_PWM1_CLK 11
-#define IMX_LSIO_PWM2_CLK 12
-#define IMX_LSIO_PWM3_CLK 13
-#define IMX_LSIO_PWM4_CLK 14
-#define IMX_LSIO_PWM5_CLK 15
-#define IMX_LSIO_PWM6_CLK 16
-#define IMX_LSIO_PWM7_CLK 17
-#define IMX_LSIO_GPT0_CLK 18
-#define IMX_LSIO_GPT1_CLK 19
-#define IMX_LSIO_GPT2_CLK 20
-#define IMX_LSIO_GPT3_CLK 21
-#define IMX_LSIO_GPT4_CLK 22
-#define IMX_LSIO_FSPI0_CLK 23
-#define IMX_LSIO_FSPI1_CLK 24
-
-/* Connectivity SS */
-#define IMX_CONN_AXI_CLK_ROOT 30
-#define IMX_CONN_AHB_CLK_ROOT 31
-#define IMX_CONN_IPG_CLK_ROOT 32
-#define IMX_CONN_SDHC0_CLK 40
-#define IMX_CONN_SDHC1_CLK 41
-#define IMX_CONN_SDHC2_CLK 42
-#define IMX_CONN_ENET0_ROOT_CLK 43
-#define IMX_CONN_ENET0_BYPASS_CLK 44
-#define IMX_CONN_ENET0_RGMII_CLK 45
-#define IMX_CONN_ENET1_ROOT_CLK 46
-#define IMX_CONN_ENET1_BYPASS_CLK 47
-#define IMX_CONN_ENET1_RGMII_CLK 48
-#define IMX_CONN_GPMI_BCH_IO_CLK 49
-#define IMX_CONN_GPMI_BCH_CLK 50
-#define IMX_CONN_USB2_ACLK 51
-#define IMX_CONN_USB2_BUS_CLK 52
-#define IMX_CONN_USB2_LPM_CLK 53
-
-/* HSIO SS */
-#define IMX_HSIO_AXI_CLK 60
-#define IMX_HSIO_PER_CLK 61
-
-/* Display controller SS */
-#define IMX_DC_AXI_EXT_CLK 70
-#define IMX_DC_AXI_INT_CLK 71
-#define IMX_DC_CFG_CLK 72
-#define IMX_DC0_PLL0_CLK 80
-#define IMX_DC0_PLL1_CLK 81
-#define IMX_DC0_DISP0_CLK 82
-#define IMX_DC0_DISP1_CLK 83
-#define IMX_DC0_BYPASS0_CLK 84
-#define IMX_DC0_BYPASS1_CLK 85
-
-/* MIPI-LVDS SS */
-#define IMX_MIPI_IPG_CLK 90
-#define IMX_MIPI0_PIXEL_CLK 100
-#define IMX_MIPI0_BYPASS_CLK 101
-#define IMX_MIPI0_LVDS_PIXEL_CLK 102
-#define IMX_MIPI0_LVDS_BYPASS_CLK 103
-#define IMX_MIPI0_LVDS_PHY_CLK 104
-#define IMX_MIPI0_I2C0_CLK 105
-#define IMX_MIPI0_I2C1_CLK 106
-#define IMX_MIPI0_PWM0_CLK 107
-#define IMX_MIPI1_PIXEL_CLK 108
-#define IMX_MIPI1_BYPASS_CLK 109
-#define IMX_MIPI1_LVDS_PIXEL_CLK 110
-#define IMX_MIPI1_LVDS_BYPASS_CLK 111
-#define IMX_MIPI1_LVDS_PHY_CLK 112
-#define IMX_MIPI1_I2C0_CLK 113
-#define IMX_MIPI1_I2C1_CLK 114
-#define IMX_MIPI1_PWM0_CLK 115
-
-/* IMG SS */
-#define IMX_IMG_AXI_CLK 120
-#define IMX_IMG_IPG_CLK 121
-#define IMX_IMG_PXL_CLK 122
-
-/* MIPI-CSI SS */
-#define IMX_CSI0_CORE_CLK 130
-#define IMX_CSI0_ESC_CLK 131
-#define IMX_CSI0_PWM0_CLK 132
-#define IMX_CSI0_I2C0_CLK 133
-
-/* PARALLER CSI SS */
-#define IMX_PARALLEL_CSI_DPLL_CLK 140
-#define IMX_PARALLEL_CSI_PIXEL_CLK 141
-#define IMX_PARALLEL_CSI_MCLK_CLK 142
-
-/* VPU SS */
-#define IMX_VPU_ENC_CLK 150
-#define IMX_VPU_DEC_CLK 151
-
-/* GPU SS */
-#define IMX_GPU0_CORE_CLK 160
-#define IMX_GPU0_SHADER_CLK 161
-
-/* ADMA SS */
-#define IMX_ADMA_IPG_CLK_ROOT 165
-#define IMX_ADMA_UART0_CLK 170
-#define IMX_ADMA_UART1_CLK 171
-#define IMX_ADMA_UART2_CLK 172
-#define IMX_ADMA_UART3_CLK 173
-#define IMX_ADMA_SPI0_CLK 174
-#define IMX_ADMA_SPI1_CLK 175
-#define IMX_ADMA_SPI2_CLK 176
-#define IMX_ADMA_SPI3_CLK 177
-#define IMX_ADMA_CAN0_CLK 178
-#define IMX_ADMA_CAN1_CLK 179
-#define IMX_ADMA_CAN2_CLK 180
-#define IMX_ADMA_I2C0_CLK 181
-#define IMX_ADMA_I2C1_CLK 182
-#define IMX_ADMA_I2C2_CLK 183
-#define IMX_ADMA_I2C3_CLK 184
-#define IMX_ADMA_FTM0_CLK 185
-#define IMX_ADMA_FTM1_CLK 186
-#define IMX_ADMA_ADC0_CLK 187
-#define IMX_ADMA_PWM_CLK 188
-#define IMX_ADMA_LCD_CLK 189
-
-#define IMX_SCU_CLK_END 190
-
/* LPCG clocks */
/* LSIO SS LPCG */
@@ -292,4 +164,32 @@
#define IMX_ADMA_LPCG_CLK_END 45
+#define IMX_ADMA_ACM_AUD_CLK0_SEL 0
+#define IMX_ADMA_ACM_AUD_CLK1_SEL 1
+#define IMX_ADMA_ACM_MCLKOUT0_SEL 2
+#define IMX_ADMA_ACM_MCLKOUT1_SEL 3
+#define IMX_ADMA_ACM_ESAI0_MCLK_SEL 4
+#define IMX_ADMA_ACM_ESAI1_MCLK_SEL 5
+#define IMX_ADMA_ACM_GPT0_MUX_CLK_SEL 6
+#define IMX_ADMA_ACM_GPT1_MUX_CLK_SEL 7
+#define IMX_ADMA_ACM_GPT2_MUX_CLK_SEL 8
+#define IMX_ADMA_ACM_GPT3_MUX_CLK_SEL 9
+#define IMX_ADMA_ACM_GPT4_MUX_CLK_SEL 10
+#define IMX_ADMA_ACM_GPT5_MUX_CLK_SEL 11
+#define IMX_ADMA_ACM_SAI0_MCLK_SEL 12
+#define IMX_ADMA_ACM_SAI1_MCLK_SEL 13
+#define IMX_ADMA_ACM_SAI2_MCLK_SEL 14
+#define IMX_ADMA_ACM_SAI3_MCLK_SEL 15
+#define IMX_ADMA_ACM_SAI4_MCLK_SEL 16
+#define IMX_ADMA_ACM_SAI5_MCLK_SEL 17
+#define IMX_ADMA_ACM_SAI6_MCLK_SEL 18
+#define IMX_ADMA_ACM_SAI7_MCLK_SEL 19
+#define IMX_ADMA_ACM_SPDIF0_TX_CLK_SEL 20
+#define IMX_ADMA_ACM_SPDIF1_TX_CLK_SEL 21
+#define IMX_ADMA_ACM_MQS_TX_CLK_SEL 22
+#define IMX_ADMA_ACM_ASRC0_MUX_CLK_SEL 23
+#define IMX_ADMA_ACM_ASRC1_MUX_CLK_SEL 24
+
+#define IMX_ADMA_ACM_CLK_END 25
+
#endif /* __DT_BINDINGS_CLOCK_IMX_H */
diff --git a/include/dt-bindings/clock/imx8mm-clock.h b/include/dt-bindings/clock/imx8mm-clock.h
index 47c6f7f9582c..1f768b2eeb1a 100644
--- a/include/dt-bindings/clock/imx8mm-clock.h
+++ b/include/dt-bindings/clock/imx8mm-clock.h
@@ -281,7 +281,6 @@
#define IMX8MM_CLK_CLKOUT2_DIV 256
#define IMX8MM_CLK_CLKOUT2 257
-
#define IMX8MM_CLK_END 258
#endif
diff --git a/include/dt-bindings/clock/imx8mn-clock.h b/include/dt-bindings/clock/imx8mn-clock.h
index d24b627cb2e7..04809edab33c 100644
--- a/include/dt-bindings/clock/imx8mn-clock.h
+++ b/include/dt-bindings/clock/imx8mn-clock.h
@@ -16,40 +16,48 @@
#define IMX8MN_CLK_EXT4 7
#define IMX8MN_AUDIO_PLL1_REF_SEL 8
#define IMX8MN_AUDIO_PLL2_REF_SEL 9
-#define IMX8MN_VIDEO_PLL1_REF_SEL 10
+#define IMX8MN_VIDEO_PLL_REF_SEL 10
+#define IMX8MN_VIDEO_PLL1_REF_SEL IMX8MN_VIDEO_PLL_REF_SEL
#define IMX8MN_DRAM_PLL_REF_SEL 11
#define IMX8MN_GPU_PLL_REF_SEL 12
-#define IMX8MN_VPU_PLL_REF_SEL 13
+#define IMX8MN_M7_ALT_PLL_REF_SEL 13
+#define IMX8MN_VPU_PLL_REF_SEL IMX8MN_M7_ALT_PLL_REF_SEL
#define IMX8MN_ARM_PLL_REF_SEL 14
#define IMX8MN_SYS_PLL1_REF_SEL 15
#define IMX8MN_SYS_PLL2_REF_SEL 16
#define IMX8MN_SYS_PLL3_REF_SEL 17
#define IMX8MN_AUDIO_PLL1 18
#define IMX8MN_AUDIO_PLL2 19
-#define IMX8MN_VIDEO_PLL1 20
+#define IMX8MN_VIDEO_PLL 20
+#define IMX8MN_VIDEO_PLL1 IMX8MN_VIDEO_PLL
#define IMX8MN_DRAM_PLL 21
#define IMX8MN_GPU_PLL 22
-#define IMX8MN_VPU_PLL 23
+#define IMX8MN_M7_ALT_PLL 23
+#define IMX8MN_VPU_PLL IMX8MN_M7_ALT_PLL
#define IMX8MN_ARM_PLL 24
#define IMX8MN_SYS_PLL1 25
#define IMX8MN_SYS_PLL2 26
#define IMX8MN_SYS_PLL3 27
#define IMX8MN_AUDIO_PLL1_BYPASS 28
#define IMX8MN_AUDIO_PLL2_BYPASS 29
-#define IMX8MN_VIDEO_PLL1_BYPASS 30
+#define IMX8MN_VIDEO_PLL_BYPASS 30
+#define IMX8MN_VIDEO_PLL1_BYPASS IMX8MN_VIDEO_PLL_BYPASS
#define IMX8MN_DRAM_PLL_BYPASS 31
#define IMX8MN_GPU_PLL_BYPASS 32
-#define IMX8MN_VPU_PLL_BYPASS 33
+#define IMX8MN_M7_ALT_PLL_BYPASS 33
+#define IMX8MN_VPU_PLL_BYPASS IMX8MN_M7_ALT_PLL_BYPASS
#define IMX8MN_ARM_PLL_BYPASS 34
#define IMX8MN_SYS_PLL1_BYPASS 35
#define IMX8MN_SYS_PLL2_BYPASS 36
#define IMX8MN_SYS_PLL3_BYPASS 37
#define IMX8MN_AUDIO_PLL1_OUT 38
#define IMX8MN_AUDIO_PLL2_OUT 39
-#define IMX8MN_VIDEO_PLL1_OUT 40
+#define IMX8MN_VIDEO_PLL_OUT 40
+#define IMX8MN_VIDEO_PLL1_OUT IMX8MN_VIDEO_PLL_OUT
#define IMX8MN_DRAM_PLL_OUT 41
#define IMX8MN_GPU_PLL_OUT 42
-#define IMX8MN_VPU_PLL_OUT 43
+#define IMX8MN_M7_ALT_PLL_OUT 43
+#define IMX8MN_VPU_PLL_OUT IMX8MN_M7_ALT_PLL_OUT
#define IMX8MN_ARM_PLL_OUT 44
#define IMX8MN_SYS_PLL1_OUT 45
#define IMX8MN_SYS_PLL2_OUT 46
@@ -241,6 +249,22 @@
#define IMX8MN_CLK_CLKOUT2_DIV 219
#define IMX8MN_CLK_CLKOUT2 220
-#define IMX8MN_CLK_END 221
+#define IMX8MN_CLK_M7_CORE 221
+
+#define IMX8MN_CLK_GPT_3M 222
+#define IMX8MN_CLK_GPT1 223
+#define IMX8MN_CLK_GPT1_ROOT 224
+#define IMX8MN_CLK_GPT2 225
+#define IMX8MN_CLK_GPT2_ROOT 226
+#define IMX8MN_CLK_GPT3 227
+#define IMX8MN_CLK_GPT3_ROOT 228
+#define IMX8MN_CLK_GPT4 229
+#define IMX8MN_CLK_GPT4_ROOT 230
+#define IMX8MN_CLK_GPT5 231
+#define IMX8MN_CLK_GPT5_ROOT 232
+#define IMX8MN_CLK_GPT6 233
+#define IMX8MN_CLK_GPT6_ROOT 234
+
+#define IMX8MN_CLK_END 235
#endif
diff --git a/include/dt-bindings/clock/imx8mp-clock.h b/include/dt-bindings/clock/imx8mp-clock.h
index 43927a1b9e94..7da4243984b2 100644
--- a/include/dt-bindings/clock/imx8mp-clock.h
+++ b/include/dt-bindings/clock/imx8mp-clock.h
@@ -117,7 +117,6 @@
#define IMX8MP_CLK_AUDIO_AHB 108
#define IMX8MP_CLK_MIPI_DSI_ESC_RX 109
#define IMX8MP_CLK_IPG_ROOT 110
-#define IMX8MP_CLK_IPG_AUDIO_ROOT 111
#define IMX8MP_CLK_DRAM_ALT 112
#define IMX8MP_CLK_DRAM_APB 113
#define IMX8MP_CLK_VPU_G1 114
@@ -131,7 +130,7 @@
#define IMX8MP_CLK_SAI1 123
#define IMX8MP_CLK_SAI2 124
#define IMX8MP_CLK_SAI3 125
-#define IMX8MP_CLK_SAI4 126
+/* #define IMX8MP_CLK_SAI4 126 */
#define IMX8MP_CLK_SAI5 127
#define IMX8MP_CLK_SAI6 128
#define IMX8MP_CLK_ENET_QOS 129
@@ -318,8 +317,25 @@
#define IMX8MP_CLK_AUDIO_AXI 310
#define IMX8MP_CLK_HSIO_AXI 311
#define IMX8MP_CLK_MEDIA_ISP 312
-
-#define IMX8MP_CLK_END 313
+#define IMX8MP_CLK_MEDIA_DISP2_PIX 313
+#define IMX8MP_CLK_CLKOUT1_SEL 314
+#define IMX8MP_CLK_CLKOUT1_DIV 315
+#define IMX8MP_CLK_CLKOUT1 316
+#define IMX8MP_CLK_CLKOUT2_SEL 317
+#define IMX8MP_CLK_CLKOUT2_DIV 318
+#define IMX8MP_CLK_CLKOUT2 319
+#define IMX8MP_CLK_USB_SUSP 320
+#define IMX8MP_CLK_AUDIO_AHB_ROOT IMX8MP_CLK_AUDIO_ROOT
+#define IMX8MP_CLK_AUDIO_AXI_ROOT 321
+#define IMX8MP_CLK_SAI1_ROOT 322
+#define IMX8MP_CLK_SAI2_ROOT 323
+#define IMX8MP_CLK_SAI3_ROOT 324
+#define IMX8MP_CLK_SAI5_ROOT 325
+#define IMX8MP_CLK_SAI6_ROOT 326
+#define IMX8MP_CLK_SAI7_ROOT 327
+#define IMX8MP_CLK_PDM_ROOT 328
+#define IMX8MP_CLK_MEDIA_LDB_ROOT 329
+#define IMX8MP_CLK_END 330
#define IMX8MP_CLK_AUDIOMIX_SAI1_IPG 0
#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK1 1
@@ -360,7 +376,6 @@
#define IMX8MP_CLK_AUDIOMIX_MU2_ROOT 36
#define IMX8MP_CLK_AUDIOMIX_MU3_ROOT 37
#define IMX8MP_CLK_AUDIOMIX_EARC_PHY 38
-#define IMX8MP_CLK_AUDIOMIX_PDM_ROOT 39
#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK1_SEL 40
#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK2_SEL 41
#define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK1_SEL 42
diff --git a/include/dt-bindings/clock/imx8mq-clock.h b/include/dt-bindings/clock/imx8mq-clock.h
index 82e907ce7bdd..afa74d7ba100 100644
--- a/include/dt-bindings/clock/imx8mq-clock.h
+++ b/include/dt-bindings/clock/imx8mq-clock.h
@@ -405,25 +405,6 @@
#define IMX8MQ_VIDEO2_PLL1_REF_SEL 266
-#define IMX8MQ_SYS1_PLL_40M_CG 267
-#define IMX8MQ_SYS1_PLL_80M_CG 268
-#define IMX8MQ_SYS1_PLL_100M_CG 269
-#define IMX8MQ_SYS1_PLL_133M_CG 270
-#define IMX8MQ_SYS1_PLL_160M_CG 271
-#define IMX8MQ_SYS1_PLL_200M_CG 272
-#define IMX8MQ_SYS1_PLL_266M_CG 273
-#define IMX8MQ_SYS1_PLL_400M_CG 274
-#define IMX8MQ_SYS1_PLL_800M_CG 275
-#define IMX8MQ_SYS2_PLL_50M_CG 276
-#define IMX8MQ_SYS2_PLL_100M_CG 277
-#define IMX8MQ_SYS2_PLL_125M_CG 278
-#define IMX8MQ_SYS2_PLL_166M_CG 279
-#define IMX8MQ_SYS2_PLL_200M_CG 280
-#define IMX8MQ_SYS2_PLL_250M_CG 281
-#define IMX8MQ_SYS2_PLL_333M_CG 282
-#define IMX8MQ_SYS2_PLL_500M_CG 283
-#define IMX8MQ_SYS2_PLL_1000M_CG 284
-
#define IMX8MQ_CLK_GPU_CORE 285
#define IMX8MQ_CLK_GPU_SHADER 286
#define IMX8MQ_CLK_M4_CORE 287
diff --git a/include/dt-bindings/clock/imx8ulp-clock.h b/include/dt-bindings/clock/imx8ulp-clock.h
new file mode 100644
index 000000000000..c62d84d093a9
--- /dev/null
+++ b/include/dt-bindings/clock/imx8ulp-clock.h
@@ -0,0 +1,263 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR MIT */
+/*
+ * Copyright 2021 NXP
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_IMX8ULP_H
+#define __DT_BINDINGS_CLOCK_IMX8ULP_H
+
+#define IMX8ULP_CLK_DUMMY 0
+
+/* CGC1 */
+#define IMX8ULP_CLK_SPLL2 5
+#define IMX8ULP_CLK_SPLL3 6
+#define IMX8ULP_CLK_A35_SEL 7
+#define IMX8ULP_CLK_A35_DIV 8
+#define IMX8ULP_CLK_SPLL2_PRE_SEL 9
+#define IMX8ULP_CLK_SPLL3_PRE_SEL 10
+#define IMX8ULP_CLK_SPLL3_PFD0 11
+#define IMX8ULP_CLK_SPLL3_PFD1 12
+#define IMX8ULP_CLK_SPLL3_PFD2 13
+#define IMX8ULP_CLK_SPLL3_PFD3 14
+#define IMX8ULP_CLK_SPLL3_PFD0_DIV1 15
+#define IMX8ULP_CLK_SPLL3_PFD0_DIV2 16
+#define IMX8ULP_CLK_SPLL3_PFD1_DIV1 17
+#define IMX8ULP_CLK_SPLL3_PFD1_DIV2 18
+#define IMX8ULP_CLK_SPLL3_PFD2_DIV1 19
+#define IMX8ULP_CLK_SPLL3_PFD2_DIV2 20
+#define IMX8ULP_CLK_SPLL3_PFD3_DIV1 21
+#define IMX8ULP_CLK_SPLL3_PFD3_DIV2 22
+#define IMX8ULP_CLK_NIC_SEL 23
+#define IMX8ULP_CLK_NIC_AD_DIVPLAT 24
+#define IMX8ULP_CLK_NIC_PER_DIVPLAT 25
+#define IMX8ULP_CLK_XBAR_SEL 26
+#define IMX8ULP_CLK_XBAR_AD_DIVPLAT 27
+#define IMX8ULP_CLK_XBAR_DIVBUS 28
+#define IMX8ULP_CLK_XBAR_AD_SLOW 29
+#define IMX8ULP_CLK_SOSC_DIV1 30
+#define IMX8ULP_CLK_SOSC_DIV2 31
+#define IMX8ULP_CLK_SOSC_DIV3 32
+#define IMX8ULP_CLK_FROSC_DIV1 33
+#define IMX8ULP_CLK_FROSC_DIV2 34
+#define IMX8ULP_CLK_FROSC_DIV3 35
+#define IMX8ULP_CLK_SPLL3_VCODIV 36
+#define IMX8ULP_CLK_SPLL3_PFD0_DIV1_GATE 37
+#define IMX8ULP_CLK_SPLL3_PFD0_DIV2_GATE 38
+#define IMX8ULP_CLK_SPLL3_PFD1_DIV1_GATE 39
+#define IMX8ULP_CLK_SPLL3_PFD1_DIV2_GATE 40
+#define IMX8ULP_CLK_SPLL3_PFD2_DIV1_GATE 41
+#define IMX8ULP_CLK_SPLL3_PFD2_DIV2_GATE 42
+#define IMX8ULP_CLK_SPLL3_PFD3_DIV1_GATE 43
+#define IMX8ULP_CLK_SPLL3_PFD3_DIV2_GATE 44
+#define IMX8ULP_CLK_SOSC_DIV1_GATE 45
+#define IMX8ULP_CLK_SOSC_DIV2_GATE 46
+#define IMX8ULP_CLK_SOSC_DIV3_GATE 47
+#define IMX8ULP_CLK_FROSC_DIV1_GATE 48
+#define IMX8ULP_CLK_FROSC_DIV2_GATE 49
+#define IMX8ULP_CLK_FROSC_DIV3_GATE 50
+#define IMX8ULP_CLK_SAI4_SEL 51
+#define IMX8ULP_CLK_SAI5_SEL 52
+#define IMX8ULP_CLK_AUD_CLK1 53
+#define IMX8ULP_CLK_ARM 54
+#define IMX8ULP_CLK_ENET_TS_SEL 55
+
+#define IMX8ULP_CLK_CGC1_END 56
+
+/* CGC2 */
+#define IMX8ULP_CLK_PLL4_PRE_SEL 0
+#define IMX8ULP_CLK_PLL4 1
+#define IMX8ULP_CLK_PLL4_VCODIV 2
+#define IMX8ULP_CLK_DDR_SEL 3
+#define IMX8ULP_CLK_DDR_DIV 4
+#define IMX8ULP_CLK_LPAV_AXI_SEL 5
+#define IMX8ULP_CLK_LPAV_AXI_DIV 6
+#define IMX8ULP_CLK_LPAV_AHB_DIV 7
+#define IMX8ULP_CLK_LPAV_BUS_DIV 8
+#define IMX8ULP_CLK_PLL4_PFD0 9
+#define IMX8ULP_CLK_PLL4_PFD1 10
+#define IMX8ULP_CLK_PLL4_PFD2 11
+#define IMX8ULP_CLK_PLL4_PFD3 12
+#define IMX8ULP_CLK_PLL4_PFD0_DIV1_GATE 13
+#define IMX8ULP_CLK_PLL4_PFD0_DIV2_GATE 14
+#define IMX8ULP_CLK_PLL4_PFD1_DIV1_GATE 15
+#define IMX8ULP_CLK_PLL4_PFD1_DIV2_GATE 16
+#define IMX8ULP_CLK_PLL4_PFD2_DIV1_GATE 17
+#define IMX8ULP_CLK_PLL4_PFD2_DIV2_GATE 18
+#define IMX8ULP_CLK_PLL4_PFD3_DIV1_GATE 19
+#define IMX8ULP_CLK_PLL4_PFD3_DIV2_GATE 20
+#define IMX8ULP_CLK_PLL4_PFD0_DIV1 21
+#define IMX8ULP_CLK_PLL4_PFD0_DIV2 22
+#define IMX8ULP_CLK_PLL4_PFD1_DIV1 23
+#define IMX8ULP_CLK_PLL4_PFD1_DIV2 24
+#define IMX8ULP_CLK_PLL4_PFD2_DIV1 25
+#define IMX8ULP_CLK_PLL4_PFD2_DIV2 26
+#define IMX8ULP_CLK_PLL4_PFD3_DIV1 27
+#define IMX8ULP_CLK_PLL4_PFD3_DIV2 28
+#define IMX8ULP_CLK_CGC2_SOSC_DIV1_GATE 29
+#define IMX8ULP_CLK_CGC2_SOSC_DIV2_GATE 30
+#define IMX8ULP_CLK_CGC2_SOSC_DIV3_GATE 31
+#define IMX8ULP_CLK_CGC2_SOSC_DIV1 32
+#define IMX8ULP_CLK_CGC2_SOSC_DIV2 33
+#define IMX8ULP_CLK_CGC2_SOSC_DIV3 34
+#define IMX8ULP_CLK_CGC2_FROSC_DIV1_GATE 35
+#define IMX8ULP_CLK_CGC2_FROSC_DIV2_GATE 36
+#define IMX8ULP_CLK_CGC2_FROSC_DIV3_GATE 37
+#define IMX8ULP_CLK_CGC2_FROSC_DIV1 38
+#define IMX8ULP_CLK_CGC2_FROSC_DIV2 39
+#define IMX8ULP_CLK_CGC2_FROSC_DIV3 40
+#define IMX8ULP_CLK_AUD_CLK2 41
+#define IMX8ULP_CLK_SAI6_SEL 42
+#define IMX8ULP_CLK_SAI7_SEL 43
+#define IMX8ULP_CLK_SPDIF_SEL 44
+#define IMX8ULP_CLK_HIFI_SEL 45
+#define IMX8ULP_CLK_HIFI_DIVCORE 46
+#define IMX8ULP_CLK_HIFI_DIVPLAT 47
+#define IMX8ULP_CLK_DSI_PHY_REF 48
+
+#define IMX8ULP_CLK_CGC2_END 49
+
+/* PCC3 */
+#define IMX8ULP_CLK_WDOG3 0
+#define IMX8ULP_CLK_WDOG4 1
+#define IMX8ULP_CLK_LPIT1 2
+#define IMX8ULP_CLK_TPM4 3
+#define IMX8ULP_CLK_TPM5 4
+#define IMX8ULP_CLK_FLEXIO1 5
+#define IMX8ULP_CLK_I3C2 6
+#define IMX8ULP_CLK_LPI2C4 7
+#define IMX8ULP_CLK_LPI2C5 8
+#define IMX8ULP_CLK_LPUART4 9
+#define IMX8ULP_CLK_LPUART5 10
+#define IMX8ULP_CLK_LPSPI4 11
+#define IMX8ULP_CLK_LPSPI5 12
+#define IMX8ULP_CLK_DMA1_MP 13
+#define IMX8ULP_CLK_DMA1_CH0 14
+#define IMX8ULP_CLK_DMA1_CH1 15
+#define IMX8ULP_CLK_DMA1_CH2 16
+#define IMX8ULP_CLK_DMA1_CH3 17
+#define IMX8ULP_CLK_DMA1_CH4 18
+#define IMX8ULP_CLK_DMA1_CH5 19
+#define IMX8ULP_CLK_DMA1_CH6 20
+#define IMX8ULP_CLK_DMA1_CH7 21
+#define IMX8ULP_CLK_DMA1_CH8 22
+#define IMX8ULP_CLK_DMA1_CH9 23
+#define IMX8ULP_CLK_DMA1_CH10 24
+#define IMX8ULP_CLK_DMA1_CH11 25
+#define IMX8ULP_CLK_DMA1_CH12 26
+#define IMX8ULP_CLK_DMA1_CH13 27
+#define IMX8ULP_CLK_DMA1_CH14 28
+#define IMX8ULP_CLK_DMA1_CH15 29
+#define IMX8ULP_CLK_DMA1_CH16 30
+#define IMX8ULP_CLK_DMA1_CH17 31
+#define IMX8ULP_CLK_DMA1_CH18 32
+#define IMX8ULP_CLK_DMA1_CH19 33
+#define IMX8ULP_CLK_DMA1_CH20 34
+#define IMX8ULP_CLK_DMA1_CH21 35
+#define IMX8ULP_CLK_DMA1_CH22 36
+#define IMX8ULP_CLK_DMA1_CH23 37
+#define IMX8ULP_CLK_DMA1_CH24 38
+#define IMX8ULP_CLK_DMA1_CH25 39
+#define IMX8ULP_CLK_DMA1_CH26 40
+#define IMX8ULP_CLK_DMA1_CH27 41
+#define IMX8ULP_CLK_DMA1_CH28 42
+#define IMX8ULP_CLK_DMA1_CH29 43
+#define IMX8ULP_CLK_DMA1_CH30 44
+#define IMX8ULP_CLK_DMA1_CH31 45
+#define IMX8ULP_CLK_MU3_A 46
+#define IMX8ULP_CLK_MU0_B 47
+
+#define IMX8ULP_CLK_PCC3_END 48
+
+/* PCC4 */
+#define IMX8ULP_CLK_FLEXSPI2 0
+#define IMX8ULP_CLK_TPM6 1
+#define IMX8ULP_CLK_TPM7 2
+#define IMX8ULP_CLK_LPI2C6 3
+#define IMX8ULP_CLK_LPI2C7 4
+#define IMX8ULP_CLK_LPUART6 5
+#define IMX8ULP_CLK_LPUART7 6
+#define IMX8ULP_CLK_SAI4 7
+#define IMX8ULP_CLK_SAI5 8
+#define IMX8ULP_CLK_PCTLE 9
+#define IMX8ULP_CLK_PCTLF 10
+#define IMX8ULP_CLK_USDHC0 11
+#define IMX8ULP_CLK_USDHC1 12
+#define IMX8ULP_CLK_USDHC2 13
+#define IMX8ULP_CLK_USB0 14
+#define IMX8ULP_CLK_USB0_PHY 15
+#define IMX8ULP_CLK_USB1 16
+#define IMX8ULP_CLK_USB1_PHY 17
+#define IMX8ULP_CLK_USB_XBAR 18
+#define IMX8ULP_CLK_ENET 19
+#define IMX8ULP_CLK_SFA1 20
+#define IMX8ULP_CLK_RGPIOE 21
+#define IMX8ULP_CLK_RGPIOF 22
+
+#define IMX8ULP_CLK_PCC4_END 23
+
+/* PCC5 */
+#define IMX8ULP_CLK_TPM8 0
+#define IMX8ULP_CLK_SAI6 1
+#define IMX8ULP_CLK_SAI7 2
+#define IMX8ULP_CLK_SPDIF 3
+#define IMX8ULP_CLK_ISI 4
+#define IMX8ULP_CLK_CSI_REGS 5
+#define IMX8ULP_CLK_PCTLD 6
+#define IMX8ULP_CLK_CSI 7
+#define IMX8ULP_CLK_DSI 8
+#define IMX8ULP_CLK_WDOG5 9
+#define IMX8ULP_CLK_EPDC 10
+#define IMX8ULP_CLK_PXP 11
+#define IMX8ULP_CLK_SFA2 12
+#define IMX8ULP_CLK_GPU2D 13
+#define IMX8ULP_CLK_GPU3D 14
+#define IMX8ULP_CLK_DC_NANO 15
+#define IMX8ULP_CLK_CSI_CLK_UI 16
+#define IMX8ULP_CLK_CSI_CLK_ESC 17
+#define IMX8ULP_CLK_RGPIOD 18
+#define IMX8ULP_CLK_DMA2_MP 19
+#define IMX8ULP_CLK_DMA2_CH0 20
+#define IMX8ULP_CLK_DMA2_CH1 21
+#define IMX8ULP_CLK_DMA2_CH2 22
+#define IMX8ULP_CLK_DMA2_CH3 23
+#define IMX8ULP_CLK_DMA2_CH4 24
+#define IMX8ULP_CLK_DMA2_CH5 25
+#define IMX8ULP_CLK_DMA2_CH6 26
+#define IMX8ULP_CLK_DMA2_CH7 27
+#define IMX8ULP_CLK_DMA2_CH8 28
+#define IMX8ULP_CLK_DMA2_CH9 29
+#define IMX8ULP_CLK_DMA2_CH10 30
+#define IMX8ULP_CLK_DMA2_CH11 31
+#define IMX8ULP_CLK_DMA2_CH12 32
+#define IMX8ULP_CLK_DMA2_CH13 33
+#define IMX8ULP_CLK_DMA2_CH14 34
+#define IMX8ULP_CLK_DMA2_CH15 35
+#define IMX8ULP_CLK_DMA2_CH16 36
+#define IMX8ULP_CLK_DMA2_CH17 37
+#define IMX8ULP_CLK_DMA2_CH18 38
+#define IMX8ULP_CLK_DMA2_CH19 39
+#define IMX8ULP_CLK_DMA2_CH20 40
+#define IMX8ULP_CLK_DMA2_CH21 41
+#define IMX8ULP_CLK_DMA2_CH22 42
+#define IMX8ULP_CLK_DMA2_CH23 43
+#define IMX8ULP_CLK_DMA2_CH24 44
+#define IMX8ULP_CLK_DMA2_CH25 45
+#define IMX8ULP_CLK_DMA2_CH26 46
+#define IMX8ULP_CLK_DMA2_CH27 47
+#define IMX8ULP_CLK_DMA2_CH28 48
+#define IMX8ULP_CLK_DMA2_CH29 49
+#define IMX8ULP_CLK_DMA2_CH30 50
+#define IMX8ULP_CLK_DMA2_CH31 51
+#define IMX8ULP_CLK_MU2_B 52
+#define IMX8ULP_CLK_MU3_B 53
+#define IMX8ULP_CLK_AVD_SIM 54
+#define IMX8ULP_CLK_DSI_TX_ESC 55
+
+#define IMX8ULP_CLK_PCC5_END 56
+
+/* LPAV SIM */
+#define IMX8ULP_CLK_SIM_LPAV_HIFI_CORE 0
+#define IMX8ULP_CLK_SIM_LPAV_HIFI_PBCLK 1
+#define IMX8ULP_CLK_SIM_LPAV_HIFI_PLAT 2
+
+#endif
diff --git a/include/dt-bindings/clock/imx93-clock.h b/include/dt-bindings/clock/imx93-clock.h
new file mode 100644
index 000000000000..c393fad3a346
--- /dev/null
+++ b/include/dt-bindings/clock/imx93-clock.h
@@ -0,0 +1,214 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR MIT */
+/*
+ * Copyright 2022 NXP
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_IMX93_CLK_H
+#define __DT_BINDINGS_CLOCK_IMX93_CLK_H
+
+#define IMX93_CLK_DUMMY 0
+#define IMX93_CLK_24M 1
+#define IMX93_CLK_EXT1 2
+#define IMX93_CLK_SYS_PLL_PFD0 3
+#define IMX93_CLK_SYS_PLL_PFD0_DIV2 4
+#define IMX93_CLK_SYS_PLL_PFD1 5
+#define IMX93_CLK_SYS_PLL_PFD1_DIV2 6
+#define IMX93_CLK_SYS_PLL_PFD2 7
+#define IMX93_CLK_SYS_PLL_PFD2_DIV2 8
+#define IMX93_CLK_AUDIO_PLL 9
+#define IMX93_CLK_VIDEO_PLL 10
+#define IMX93_CLK_A55_PERIPH 11
+#define IMX93_CLK_A55_MTR_BUS 12
+#define IMX93_CLK_A55 13
+#define IMX93_CLK_M33 14
+#define IMX93_CLK_BUS_WAKEUP 15
+#define IMX93_CLK_BUS_AON 16
+#define IMX93_CLK_WAKEUP_AXI 17
+#define IMX93_CLK_SWO_TRACE 18
+#define IMX93_CLK_M33_SYSTICK 19
+#define IMX93_CLK_FLEXIO1 20
+#define IMX93_CLK_FLEXIO2 21
+#define IMX93_CLK_LPTMR1 24
+#define IMX93_CLK_LPTMR2 25
+#define IMX93_CLK_TPM2 27
+#define IMX93_CLK_TPM4 29
+#define IMX93_CLK_TPM5 30
+#define IMX93_CLK_TPM6 31
+#define IMX93_CLK_FLEXSPI1 32
+#define IMX93_CLK_CAN1 33
+#define IMX93_CLK_CAN2 34
+#define IMX93_CLK_LPUART1 35
+#define IMX93_CLK_LPUART2 36
+#define IMX93_CLK_LPUART3 37
+#define IMX93_CLK_LPUART4 38
+#define IMX93_CLK_LPUART5 39
+#define IMX93_CLK_LPUART6 40
+#define IMX93_CLK_LPUART7 41
+#define IMX93_CLK_LPUART8 42
+#define IMX93_CLK_LPI2C1 43
+#define IMX93_CLK_LPI2C2 44
+#define IMX93_CLK_LPI2C3 45
+#define IMX93_CLK_LPI2C4 46
+#define IMX93_CLK_LPI2C5 47
+#define IMX93_CLK_LPI2C6 48
+#define IMX93_CLK_LPI2C7 49
+#define IMX93_CLK_LPI2C8 50
+#define IMX93_CLK_LPSPI1 51
+#define IMX93_CLK_LPSPI2 52
+#define IMX93_CLK_LPSPI3 53
+#define IMX93_CLK_LPSPI4 54
+#define IMX93_CLK_LPSPI5 55
+#define IMX93_CLK_LPSPI6 56
+#define IMX93_CLK_LPSPI7 57
+#define IMX93_CLK_LPSPI8 58
+#define IMX93_CLK_I3C1 59
+#define IMX93_CLK_I3C2 60
+#define IMX93_CLK_USDHC1 61
+#define IMX93_CLK_USDHC2 62
+#define IMX93_CLK_USDHC3 63
+#define IMX93_CLK_SAI1 64
+#define IMX93_CLK_SAI2 65
+#define IMX93_CLK_SAI3 66
+#define IMX93_CLK_CCM_CKO1 67
+#define IMX93_CLK_CCM_CKO2 68
+#define IMX93_CLK_CCM_CKO3 69
+#define IMX93_CLK_CCM_CKO4 70
+#define IMX93_CLK_HSIO 71
+#define IMX93_CLK_HSIO_USB_TEST_60M 72
+#define IMX93_CLK_HSIO_ACSCAN_80M 73
+#define IMX93_CLK_HSIO_ACSCAN_480M 74
+#define IMX93_CLK_ML_APB 75
+#define IMX93_CLK_ML 76
+#define IMX93_CLK_MEDIA_AXI 77
+#define IMX93_CLK_MEDIA_APB 78
+#define IMX93_CLK_MEDIA_LDB 79
+#define IMX93_CLK_MEDIA_DISP_PIX 80
+#define IMX93_CLK_CAM_PIX 81
+#define IMX93_CLK_MIPI_TEST_BYTE 82
+#define IMX93_CLK_MIPI_PHY_CFG 83
+#define IMX93_CLK_ADC 84
+#define IMX93_CLK_PDM 85
+#define IMX93_CLK_TSTMR1 86
+#define IMX93_CLK_TSTMR2 87
+#define IMX93_CLK_MQS1 88
+#define IMX93_CLK_MQS2 89
+#define IMX93_CLK_AUDIO_XCVR 90
+#define IMX93_CLK_SPDIF 91
+#define IMX93_CLK_ENET 92
+#define IMX93_CLK_ENET_TIMER1 93
+#define IMX93_CLK_ENET_TIMER2 94
+#define IMX93_CLK_ENET_REF 95
+#define IMX93_CLK_ENET_REF_PHY 96
+#define IMX93_CLK_I3C1_SLOW 97
+#define IMX93_CLK_I3C2_SLOW 98
+#define IMX93_CLK_USB_PHY_BURUNIN 99
+#define IMX93_CLK_PAL_CAME_SCAN 100
+#define IMX93_CLK_A55_GATE 101
+#define IMX93_CLK_CM33_GATE 102
+#define IMX93_CLK_ADC1_GATE 103
+#define IMX93_CLK_WDOG1_GATE 104
+#define IMX93_CLK_WDOG2_GATE 105
+#define IMX93_CLK_WDOG3_GATE 106
+#define IMX93_CLK_WDOG4_GATE 107
+#define IMX93_CLK_WDOG5_GATE 108
+#define IMX93_CLK_SEMA1_GATE 109
+#define IMX93_CLK_SEMA2_GATE 110
+#define IMX93_CLK_MU_A_GATE 111
+#define IMX93_CLK_MU_B_GATE 112
+#define IMX93_CLK_EDMA1_GATE 113
+#define IMX93_CLK_EDMA2_GATE 114
+#define IMX93_CLK_FLEXSPI1_GATE 115
+#define IMX93_CLK_GPIO1_GATE 116
+#define IMX93_CLK_GPIO2_GATE 117
+#define IMX93_CLK_GPIO3_GATE 118
+#define IMX93_CLK_GPIO4_GATE 119
+#define IMX93_CLK_FLEXIO1_GATE 120
+#define IMX93_CLK_FLEXIO2_GATE 121
+#define IMX93_CLK_LPIT1_GATE 122
+#define IMX93_CLK_LPIT2_GATE 123
+#define IMX93_CLK_LPTMR1_GATE 124
+#define IMX93_CLK_LPTMR2_GATE 125
+#define IMX93_CLK_TPM1_GATE 126
+#define IMX93_CLK_TPM2_GATE 127
+#define IMX93_CLK_TPM3_GATE 128
+#define IMX93_CLK_TPM4_GATE 129
+#define IMX93_CLK_TPM5_GATE 130
+#define IMX93_CLK_TPM6_GATE 131
+#define IMX93_CLK_CAN1_GATE 132
+#define IMX93_CLK_CAN2_GATE 133
+#define IMX93_CLK_LPUART1_GATE 134
+#define IMX93_CLK_LPUART2_GATE 135
+#define IMX93_CLK_LPUART3_GATE 136
+#define IMX93_CLK_LPUART4_GATE 137
+#define IMX93_CLK_LPUART5_GATE 138
+#define IMX93_CLK_LPUART6_GATE 139
+#define IMX93_CLK_LPUART7_GATE 140
+#define IMX93_CLK_LPUART8_GATE 141
+#define IMX93_CLK_LPI2C1_GATE 142
+#define IMX93_CLK_LPI2C2_GATE 143
+#define IMX93_CLK_LPI2C3_GATE 144
+#define IMX93_CLK_LPI2C4_GATE 145
+#define IMX93_CLK_LPI2C5_GATE 146
+#define IMX93_CLK_LPI2C6_GATE 147
+#define IMX93_CLK_LPI2C7_GATE 148
+#define IMX93_CLK_LPI2C8_GATE 149
+#define IMX93_CLK_LPSPI1_GATE 150
+#define IMX93_CLK_LPSPI2_GATE 151
+#define IMX93_CLK_LPSPI3_GATE 152
+#define IMX93_CLK_LPSPI4_GATE 153
+#define IMX93_CLK_LPSPI5_GATE 154
+#define IMX93_CLK_LPSPI6_GATE 155
+#define IMX93_CLK_LPSPI7_GATE 156
+#define IMX93_CLK_LPSPI8_GATE 157
+#define IMX93_CLK_I3C1_GATE 158
+#define IMX93_CLK_I3C2_GATE 159
+#define IMX93_CLK_USDHC1_GATE 160
+#define IMX93_CLK_USDHC2_GATE 161
+#define IMX93_CLK_USDHC3_GATE 162
+#define IMX93_CLK_SAI1_GATE 163
+#define IMX93_CLK_SAI2_GATE 164
+#define IMX93_CLK_SAI3_GATE 165
+#define IMX93_CLK_MIPI_CSI_GATE 166
+#define IMX93_CLK_MIPI_DSI_GATE 167
+#define IMX93_CLK_LVDS_GATE 168
+#define IMX93_CLK_LCDIF_GATE 169
+#define IMX93_CLK_PXP_GATE 170
+#define IMX93_CLK_ISI_GATE 171
+#define IMX93_CLK_NIC_MEDIA_GATE 172
+#define IMX93_CLK_USB_CONTROLLER_GATE 173
+#define IMX93_CLK_USB_TEST_60M_GATE 174
+#define IMX93_CLK_HSIO_TROUT_24M_GATE 175
+#define IMX93_CLK_PDM_GATE 176
+#define IMX93_CLK_MQS1_GATE 177
+#define IMX93_CLK_MQS2_GATE 178
+#define IMX93_CLK_AUD_XCVR_GATE 179
+#define IMX93_CLK_SPDIF_GATE 180
+#define IMX93_CLK_HSIO_32K_GATE 181
+#define IMX93_CLK_ENET1_GATE 182
+#define IMX93_CLK_ENET_QOS_GATE 183
+#define IMX93_CLK_SYS_CNT_GATE 184
+#define IMX93_CLK_TSTMR1_GATE 185
+#define IMX93_CLK_TSTMR2_GATE 186
+#define IMX93_CLK_TMC_GATE 187
+#define IMX93_CLK_PMRO_GATE 188
+#define IMX93_CLK_32K 189
+#define IMX93_CLK_SAI1_IPG 190
+#define IMX93_CLK_SAI2_IPG 191
+#define IMX93_CLK_SAI3_IPG 192
+#define IMX93_CLK_MU1_A_GATE 193
+#define IMX93_CLK_MU1_B_GATE 194
+#define IMX93_CLK_MU2_A_GATE 195
+#define IMX93_CLK_MU2_B_GATE 196
+#define IMX93_CLK_NIC_AXI 197
+#define IMX93_CLK_ARM_PLL 198
+#define IMX93_CLK_A55_SEL 199
+#define IMX93_CLK_A55_CORE 200
+#define IMX93_CLK_PDM_IPG 201
+#define IMX91_CLK_ENET1_QOS_TSN 202
+#define IMX91_CLK_ENET_TIMER 203
+#define IMX91_CLK_ENET2_REGULAR 204
+#define IMX91_CLK_ENET2_REGULAR_GATE 205
+#define IMX91_CLK_ENET1_QOS_TSN_GATE 206
+#define IMX93_CLK_SPDIF_IPG 207
+
+#endif
diff --git a/include/dt-bindings/clock/imxrt1050-clock.h b/include/dt-bindings/clock/imxrt1050-clock.h
new file mode 100644
index 000000000000..93bef0832d16
--- /dev/null
+++ b/include/dt-bindings/clock/imxrt1050-clock.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright(C) 2019
+ * Author(s): Giulio Benetti <giulio.benetti@benettiengineering.com>
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_IMXRT1050_H
+#define __DT_BINDINGS_CLOCK_IMXRT1050_H
+
+#define IMXRT1050_CLK_DUMMY 0
+#define IMXRT1050_CLK_CKIL 1
+#define IMXRT1050_CLK_CKIH 2
+#define IMXRT1050_CLK_OSC 3
+#define IMXRT1050_CLK_PLL2_PFD0_352M 4
+#define IMXRT1050_CLK_PLL2_PFD1_594M 5
+#define IMXRT1050_CLK_PLL2_PFD2_396M 6
+#define IMXRT1050_CLK_PLL3_PFD0_720M 7
+#define IMXRT1050_CLK_PLL3_PFD1_664_62M 8
+#define IMXRT1050_CLK_PLL3_PFD2_508_24M 9
+#define IMXRT1050_CLK_PLL3_PFD3_454_74M 10
+#define IMXRT1050_CLK_PLL2_198M 11
+#define IMXRT1050_CLK_PLL3_120M 12
+#define IMXRT1050_CLK_PLL3_80M 13
+#define IMXRT1050_CLK_PLL3_60M 14
+#define IMXRT1050_CLK_PLL1_BYPASS 15
+#define IMXRT1050_CLK_PLL2_BYPASS 16
+#define IMXRT1050_CLK_PLL3_BYPASS 17
+#define IMXRT1050_CLK_PLL5_BYPASS 19
+#define IMXRT1050_CLK_PLL1_REF_SEL 20
+#define IMXRT1050_CLK_PLL2_REF_SEL 21
+#define IMXRT1050_CLK_PLL3_REF_SEL 22
+#define IMXRT1050_CLK_PLL5_REF_SEL 23
+#define IMXRT1050_CLK_PRE_PERIPH_SEL 24
+#define IMXRT1050_CLK_PERIPH_SEL 25
+#define IMXRT1050_CLK_SEMC_ALT_SEL 26
+#define IMXRT1050_CLK_SEMC_SEL 27
+#define IMXRT1050_CLK_USDHC1_SEL 28
+#define IMXRT1050_CLK_USDHC2_SEL 29
+#define IMXRT1050_CLK_LPUART_SEL 30
+#define IMXRT1050_CLK_LCDIF_SEL 31
+#define IMXRT1050_CLK_VIDEO_POST_DIV_SEL 32
+#define IMXRT1050_CLK_VIDEO_DIV 33
+#define IMXRT1050_CLK_ARM_PODF 34
+#define IMXRT1050_CLK_LPUART_PODF 35
+#define IMXRT1050_CLK_USDHC1_PODF 36
+#define IMXRT1050_CLK_USDHC2_PODF 37
+#define IMXRT1050_CLK_SEMC_PODF 38
+#define IMXRT1050_CLK_AHB_PODF 39
+#define IMXRT1050_CLK_LCDIF_PRED 40
+#define IMXRT1050_CLK_LCDIF_PODF 41
+#define IMXRT1050_CLK_USDHC1 42
+#define IMXRT1050_CLK_USDHC2 43
+#define IMXRT1050_CLK_LPUART1 44
+#define IMXRT1050_CLK_SEMC 45
+#define IMXRT1050_CLK_LCDIF_APB 46
+#define IMXRT1050_CLK_PLL1_ARM 47
+#define IMXRT1050_CLK_PLL2_SYS 48
+#define IMXRT1050_CLK_PLL3_USB_OTG 49
+#define IMXRT1050_CLK_PLL4_AUDIO 50
+#define IMXRT1050_CLK_PLL5_VIDEO 51
+#define IMXRT1050_CLK_PLL6_ENET 52
+#define IMXRT1050_CLK_PLL7_USB_HOST 53
+#define IMXRT1050_CLK_LCDIF_PIX 54
+#define IMXRT1050_CLK_USBOH3 55
+#define IMXRT1050_CLK_IPG_PDOF 56
+#define IMXRT1050_CLK_PER_CLK_SEL 57
+#define IMXRT1050_CLK_PER_PDOF 58
+#define IMXRT1050_CLK_DMA 59
+#define IMXRT1050_CLK_DMA_MUX 60
+#define IMXRT1050_CLK_END 61
+
+#endif /* __DT_BINDINGS_CLOCK_IMXRT1050_H */
diff --git a/include/dt-bindings/clock/jz4725b-cgu.h b/include/dt-bindings/clock/ingenic,jz4725b-cgu.h
index 31f1ab0fe42c..31f1ab0fe42c 100644
--- a/include/dt-bindings/clock/jz4725b-cgu.h
+++ b/include/dt-bindings/clock/ingenic,jz4725b-cgu.h
diff --git a/include/dt-bindings/clock/jz4740-cgu.h b/include/dt-bindings/clock/ingenic,jz4740-cgu.h
index e82d77028581..e82d77028581 100644
--- a/include/dt-bindings/clock/jz4740-cgu.h
+++ b/include/dt-bindings/clock/ingenic,jz4740-cgu.h
diff --git a/include/dt-bindings/clock/ingenic,jz4755-cgu.h b/include/dt-bindings/clock/ingenic,jz4755-cgu.h
new file mode 100644
index 000000000000..10098494e7df
--- /dev/null
+++ b/include/dt-bindings/clock/ingenic,jz4755-cgu.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * This header provides clock numbers for the ingenic,jz4755-cgu DT binding.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_JZ4755_CGU_H__
+#define __DT_BINDINGS_CLOCK_JZ4755_CGU_H__
+
+#define JZ4755_CLK_EXT 0
+#define JZ4755_CLK_OSC32K 1
+#define JZ4755_CLK_PLL 2
+#define JZ4755_CLK_PLL_HALF 3
+#define JZ4755_CLK_EXT_HALF 4
+#define JZ4755_CLK_CCLK 5
+#define JZ4755_CLK_H0CLK 6
+#define JZ4755_CLK_PCLK 7
+#define JZ4755_CLK_MCLK 8
+#define JZ4755_CLK_H1CLK 9
+#define JZ4755_CLK_UDC 10
+#define JZ4755_CLK_LCD 11
+#define JZ4755_CLK_UART0 12
+#define JZ4755_CLK_UART1 13
+#define JZ4755_CLK_UART2 14
+#define JZ4755_CLK_DMA 15
+#define JZ4755_CLK_MMC 16
+#define JZ4755_CLK_MMC0 17
+#define JZ4755_CLK_MMC1 18
+#define JZ4755_CLK_EXT512 19
+#define JZ4755_CLK_RTC 20
+#define JZ4755_CLK_UDC_PHY 21
+#define JZ4755_CLK_I2S 22
+#define JZ4755_CLK_SPI 23
+#define JZ4755_CLK_AIC 24
+#define JZ4755_CLK_ADC 25
+#define JZ4755_CLK_TCU 26
+#define JZ4755_CLK_BCH 27
+#define JZ4755_CLK_I2C 28
+#define JZ4755_CLK_TVE 29
+#define JZ4755_CLK_CIM 30
+#define JZ4755_CLK_AUX_CPU 31
+#define JZ4755_CLK_AHB1 32
+#define JZ4755_CLK_IDCT 33
+#define JZ4755_CLK_DB 34
+#define JZ4755_CLK_ME 35
+#define JZ4755_CLK_MC 36
+#define JZ4755_CLK_TSSI 37
+#define JZ4755_CLK_IPU 38
+
+#endif /* __DT_BINDINGS_CLOCK_JZ4755_CGU_H__ */
diff --git a/include/dt-bindings/clock/ingenic,jz4760-cgu.h b/include/dt-bindings/clock/ingenic,jz4760-cgu.h
new file mode 100644
index 000000000000..9fb04ebac6de
--- /dev/null
+++ b/include/dt-bindings/clock/ingenic,jz4760-cgu.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides clock numbers for the ingenic,jz4760-cgu DT binding.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_JZ4760_CGU_H__
+#define __DT_BINDINGS_CLOCK_JZ4760_CGU_H__
+
+#define JZ4760_CLK_EXT 0
+#define JZ4760_CLK_OSC32K 1
+#define JZ4760_CLK_PLL0 2
+#define JZ4760_CLK_PLL0_HALF 3
+#define JZ4760_CLK_PLL1 4
+#define JZ4760_CLK_CCLK 5
+#define JZ4760_CLK_HCLK 6
+#define JZ4760_CLK_SCLK 7
+#define JZ4760_CLK_H2CLK 8
+#define JZ4760_CLK_MCLK 9
+#define JZ4760_CLK_PCLK 10
+#define JZ4760_CLK_MMC_MUX 11
+#define JZ4760_CLK_MMC0 12
+#define JZ4760_CLK_MMC1 13
+#define JZ4760_CLK_MMC2 14
+#define JZ4760_CLK_CIM 15
+#define JZ4760_CLK_UHC 16
+#define JZ4760_CLK_GPU 17
+#define JZ4760_CLK_GPS 18
+#define JZ4760_CLK_SSI_MUX 19
+#define JZ4760_CLK_PCM 20
+#define JZ4760_CLK_I2S 21
+#define JZ4760_CLK_OTG 22
+#define JZ4760_CLK_SSI0 23
+#define JZ4760_CLK_SSI1 24
+#define JZ4760_CLK_SSI2 25
+#define JZ4760_CLK_DMA 26
+#define JZ4760_CLK_I2C0 27
+#define JZ4760_CLK_I2C1 28
+#define JZ4760_CLK_UART0 29
+#define JZ4760_CLK_UART1 30
+#define JZ4760_CLK_UART2 31
+#define JZ4760_CLK_UART3 32
+#define JZ4760_CLK_IPU 33
+#define JZ4760_CLK_ADC 34
+#define JZ4760_CLK_AIC 35
+#define JZ4760_CLK_VPU 36
+#define JZ4760_CLK_UHC_PHY 37
+#define JZ4760_CLK_OTG_PHY 38
+#define JZ4760_CLK_EXT512 39
+#define JZ4760_CLK_RTC 40
+#define JZ4760_CLK_LPCLK_DIV 41
+#define JZ4760_CLK_TVE 42
+#define JZ4760_CLK_LPCLK 43
+#define JZ4760_CLK_MDMA 44
+#define JZ4760_CLK_BDMA 45
+
+#endif /* __DT_BINDINGS_CLOCK_JZ4760_CGU_H__ */
diff --git a/include/dt-bindings/clock/jz4770-cgu.h b/include/dt-bindings/clock/ingenic,jz4770-cgu.h
index d68a7695a1f8..0b475e8ae321 100644
--- a/include/dt-bindings/clock/jz4770-cgu.h
+++ b/include/dt-bindings/clock/ingenic,jz4770-cgu.h
@@ -54,5 +54,6 @@
#define JZ4770_CLK_OTG_PHY 45
#define JZ4770_CLK_EXT512 46
#define JZ4770_CLK_RTC 47
+#define JZ4770_CLK_BDMA 48
#endif /* __DT_BINDINGS_CLOCK_JZ4770_CGU_H__ */
diff --git a/include/dt-bindings/clock/jz4780-cgu.h b/include/dt-bindings/clock/ingenic,jz4780-cgu.h
index 85cf8eb5081b..85cf8eb5081b 100644
--- a/include/dt-bindings/clock/jz4780-cgu.h
+++ b/include/dt-bindings/clock/ingenic,jz4780-cgu.h
diff --git a/include/dt-bindings/clock/ingenic,sysost.h b/include/dt-bindings/clock/ingenic,sysost.h
index 063791b01ab3..d7aa42c08ded 100644
--- a/include/dt-bindings/clock/ingenic,sysost.h
+++ b/include/dt-bindings/clock/ingenic,sysost.h
@@ -13,4 +13,23 @@
#define OST_CLK_PERCPU_TIMER2 3
#define OST_CLK_PERCPU_TIMER3 4
+#define OST_CLK_EVENT_TIMER 1
+
+#define OST_CLK_EVENT_TIMER0 0
+#define OST_CLK_EVENT_TIMER1 1
+#define OST_CLK_EVENT_TIMER2 2
+#define OST_CLK_EVENT_TIMER3 3
+#define OST_CLK_EVENT_TIMER4 4
+#define OST_CLK_EVENT_TIMER5 5
+#define OST_CLK_EVENT_TIMER6 6
+#define OST_CLK_EVENT_TIMER7 7
+#define OST_CLK_EVENT_TIMER8 8
+#define OST_CLK_EVENT_TIMER9 9
+#define OST_CLK_EVENT_TIMER10 10
+#define OST_CLK_EVENT_TIMER11 11
+#define OST_CLK_EVENT_TIMER12 12
+#define OST_CLK_EVENT_TIMER13 13
+#define OST_CLK_EVENT_TIMER14 14
+#define OST_CLK_EVENT_TIMER15 15
+
#endif /* __DT_BINDINGS_CLOCK_INGENIC_OST_H__ */
diff --git a/include/dt-bindings/clock/x1000-cgu.h b/include/dt-bindings/clock/ingenic,x1000-cgu.h
index f187e0719fd3..78daf44b3514 100644
--- a/include/dt-bindings/clock/x1000-cgu.h
+++ b/include/dt-bindings/clock/ingenic,x1000-cgu.h
@@ -50,5 +50,9 @@
#define X1000_CLK_PDMA 35
#define X1000_CLK_EXCLK_DIV512 36
#define X1000_CLK_RTC 37
+#define X1000_CLK_AIC 38
+#define X1000_CLK_I2SPLLMUX 39
+#define X1000_CLK_I2SPLL 40
+#define X1000_CLK_I2S 41
#endif /* __DT_BINDINGS_CLOCK_X1000_CGU_H__ */
diff --git a/include/dt-bindings/clock/x1830-cgu.h b/include/dt-bindings/clock/ingenic,x1830-cgu.h
index 88455376a950..88455376a950 100644
--- a/include/dt-bindings/clock/x1830-cgu.h
+++ b/include/dt-bindings/clock/ingenic,x1830-cgu.h
diff --git a/include/dt-bindings/clock/intel,agilex5-clkmgr.h b/include/dt-bindings/clock/intel,agilex5-clkmgr.h
new file mode 100644
index 000000000000..2f3a23b31c5c
--- /dev/null
+++ b/include/dt-bindings/clock/intel,agilex5-clkmgr.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2023, Intel Corporation
+ */
+
+#ifndef __DT_BINDINGS_INTEL_AGILEX5_CLKMGR_H
+#define __DT_BINDINGS_INTEL_AGILEX5_CLKMGR_H
+
+/* fixed rate clocks */
+#define AGILEX5_OSC1 0
+#define AGILEX5_CB_INTOSC_HS_DIV2_CLK 1
+#define AGILEX5_CB_INTOSC_LS_CLK 2
+#define AGILEX5_F2S_FREE_CLK 3
+
+/* PLL clocks */
+#define AGILEX5_MAIN_PLL_CLK 4
+#define AGILEX5_MAIN_PLL_C0_CLK 5
+#define AGILEX5_MAIN_PLL_C1_CLK 6
+#define AGILEX5_MAIN_PLL_C2_CLK 7
+#define AGILEX5_MAIN_PLL_C3_CLK 8
+#define AGILEX5_PERIPH_PLL_CLK 9
+#define AGILEX5_PERIPH_PLL_C0_CLK 10
+#define AGILEX5_PERIPH_PLL_C1_CLK 11
+#define AGILEX5_PERIPH_PLL_C2_CLK 12
+#define AGILEX5_PERIPH_PLL_C3_CLK 13
+#define AGILEX5_CORE0_FREE_CLK 14
+#define AGILEX5_CORE1_FREE_CLK 15
+#define AGILEX5_CORE2_FREE_CLK 16
+#define AGILEX5_CORE3_FREE_CLK 17
+#define AGILEX5_DSU_FREE_CLK 18
+#define AGILEX5_BOOT_CLK 19
+
+/* fixed factor clocks */
+#define AGILEX5_L3_MAIN_FREE_CLK 20
+#define AGILEX5_NOC_FREE_CLK 21
+#define AGILEX5_S2F_USR0_CLK 22
+#define AGILEX5_NOC_CLK 23
+#define AGILEX5_EMAC_A_FREE_CLK 24
+#define AGILEX5_EMAC_B_FREE_CLK 25
+#define AGILEX5_EMAC_PTP_FREE_CLK 26
+#define AGILEX5_GPIO_DB_FREE_CLK 27
+#define AGILEX5_S2F_USER0_FREE_CLK 28
+#define AGILEX5_S2F_USER1_FREE_CLK 29
+#define AGILEX5_PSI_REF_FREE_CLK 30
+#define AGILEX5_USB31_FREE_CLK 31
+
+/* Gate clocks */
+#define AGILEX5_CORE0_CLK 32
+#define AGILEX5_CORE1_CLK 33
+#define AGILEX5_CORE2_CLK 34
+#define AGILEX5_CORE3_CLK 35
+#define AGILEX5_MPU_CLK 36
+#define AGILEX5_MPU_PERIPH_CLK 37
+#define AGILEX5_MPU_CCU_CLK 38
+#define AGILEX5_L4_MAIN_CLK 39
+#define AGILEX5_L4_MP_CLK 40
+#define AGILEX5_L4_SYS_FREE_CLK 41
+#define AGILEX5_L4_SP_CLK 42
+#define AGILEX5_CS_AT_CLK 43
+#define AGILEX5_CS_TRACE_CLK 44
+#define AGILEX5_CS_PDBG_CLK 45
+#define AGILEX5_EMAC1_CLK 47
+#define AGILEX5_EMAC2_CLK 48
+#define AGILEX5_EMAC_PTP_CLK 49
+#define AGILEX5_GPIO_DB_CLK 50
+#define AGILEX5_S2F_USER0_CLK 51
+#define AGILEX5_S2F_USER1_CLK 52
+#define AGILEX5_PSI_REF_CLK 53
+#define AGILEX5_USB31_SUSPEND_CLK 54
+#define AGILEX5_EMAC0_CLK 46
+#define AGILEX5_USB31_BUS_CLK_EARLY 55
+#define AGILEX5_USB2OTG_HCLK 56
+#define AGILEX5_SPIM_0_CLK 57
+#define AGILEX5_SPIM_1_CLK 58
+#define AGILEX5_SPIS_0_CLK 59
+#define AGILEX5_SPIS_1_CLK 60
+#define AGILEX5_DMA_CORE_CLK 61
+#define AGILEX5_DMA_HS_CLK 62
+#define AGILEX5_I3C_0_CORE_CLK 63
+#define AGILEX5_I3C_1_CORE_CLK 64
+#define AGILEX5_I2C_0_PCLK 65
+#define AGILEX5_I2C_1_PCLK 66
+#define AGILEX5_I2C_EMAC0_PCLK 67
+#define AGILEX5_I2C_EMAC1_PCLK 68
+#define AGILEX5_I2C_EMAC2_PCLK 69
+#define AGILEX5_UART_0_PCLK 70
+#define AGILEX5_UART_1_PCLK 71
+#define AGILEX5_SPTIMER_0_PCLK 72
+#define AGILEX5_SPTIMER_1_PCLK 73
+#define AGILEX5_DFI_CLK 74
+#define AGILEX5_NAND_NF_CLK 75
+#define AGILEX5_NAND_BCH_CLK 76
+#define AGILEX5_SDMMC_SDPHY_REG_CLK 77
+#define AGILEX5_SDMCLK 78
+#define AGILEX5_SOFTPHY_REG_PCLK 79
+#define AGILEX5_SOFTPHY_PHY_CLK 80
+#define AGILEX5_SOFTPHY_CTRL_CLK 81
+#define AGILEX5_NUM_CLKS 82
+
+#endif /* __DT_BINDINGS_INTEL_AGILEX5_CLKMGR_H */
diff --git a/include/dt-bindings/clk/lochnagar.h b/include/dt-bindings/clock/lochnagar.h
index 8fa20551ff17..8fa20551ff17 100644
--- a/include/dt-bindings/clk/lochnagar.h
+++ b/include/dt-bindings/clock/lochnagar.h
diff --git a/include/dt-bindings/clock/loongson,ls1x-clk.h b/include/dt-bindings/clock/loongson,ls1x-clk.h
new file mode 100644
index 000000000000..d400e9ac6002
--- /dev/null
+++ b/include/dt-bindings/clock/loongson,ls1x-clk.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Loongson-1 clock tree IDs
+ *
+ * Copyright (C) 2023 Keguang Zhang <keguang.zhang@gmail.com>
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_LS1X_CLK_H__
+#define __DT_BINDINGS_CLOCK_LS1X_CLK_H__
+
+#define LS1X_CLKID_PLL 0
+#define LS1X_CLKID_CPU 1
+#define LS1X_CLKID_DC 2
+#define LS1X_CLKID_AHB 3
+#define LS1X_CLKID_APB 4
+
+#define CLK_NR_CLKS (LS1X_CLKID_APB + 1)
+
+#endif /* __DT_BINDINGS_CLOCK_LS1X_CLK_H__ */
diff --git a/include/dt-bindings/clock/loongson,ls2k-clk.h b/include/dt-bindings/clock/loongson,ls2k-clk.h
new file mode 100644
index 000000000000..8cbb86b2cf1e
--- /dev/null
+++ b/include/dt-bindings/clock/loongson,ls2k-clk.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Author: Yinbo Zhu <zhuyinbo@loongson.cn>
+ * Copyright (C) 2022-2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_LOONGSON2_H
+#define __DT_BINDINGS_CLOCK_LOONGSON2_H
+
+#define LOONGSON2_REF_100M 0
+#define LOONGSON2_NODE_PLL 1
+#define LOONGSON2_DDR_PLL 2
+#define LOONGSON2_DC_PLL 3
+#define LOONGSON2_PIX0_PLL 4
+#define LOONGSON2_PIX1_PLL 5
+#define LOONGSON2_NODE_CLK 6
+#define LOONGSON2_HDA_CLK 7
+#define LOONGSON2_GPU_CLK 8
+#define LOONGSON2_DDR_CLK 9
+#define LOONGSON2_GMAC_CLK 10
+#define LOONGSON2_DC_CLK 11
+#define LOONGSON2_APB_CLK 12
+#define LOONGSON2_USB_CLK 13
+#define LOONGSON2_SATA_CLK 14
+#define LOONGSON2_PIX0_CLK 15
+#define LOONGSON2_PIX1_CLK 16
+#define LOONGSON2_BOOT_CLK 17
+#define LOONGSON2_OUT0_GATE 18
+#define LOONGSON2_GMAC_GATE 19
+#define LOONGSON2_RIO_GATE 20
+#define LOONGSON2_DC_GATE 21
+#define LOONGSON2_GPU_GATE 22
+#define LOONGSON2_DDR_GATE 23
+#define LOONGSON2_HDA_GATE 24
+#define LOONGSON2_NODE_GATE 25
+#define LOONGSON2_EMMC_GATE 26
+#define LOONGSON2_PIX0_GATE 27
+#define LOONGSON2_PIX1_GATE 28
+#define LOONGSON2_OUT0_CLK 29
+#define LOONGSON2_RIO_CLK 30
+#define LOONGSON2_EMMC_CLK 31
+#define LOONGSON2_DES_CLK 32
+#define LOONGSON2_I2S_CLK 33
+#define LOONGSON2_MISC_CLK 34
+
+#define LS2K0300_CLK_STABLE 0
+#define LS2K0300_NODE_PLL 1
+#define LS2K0300_DDR_PLL 2
+#define LS2K0300_PIX_PLL 3
+#define LS2K0300_CLK_THSENS 4
+#define LS2K0300_CLK_NODE_DIV 5
+#define LS2K0300_CLK_NODE_PLL_GATE 6
+#define LS2K0300_CLK_NODE_SCALE 7
+#define LS2K0300_CLK_NODE_GATE 8
+#define LS2K0300_CLK_GMAC_DIV 9
+#define LS2K0300_CLK_GMAC_GATE 10
+#define LS2K0300_CLK_I2S_DIV 11
+#define LS2K0300_CLK_I2S_SCALE 12
+#define LS2K0300_CLK_I2S_GATE 13
+#define LS2K0300_CLK_DDR_DIV 14
+#define LS2K0300_CLK_DDR_GATE 15
+#define LS2K0300_CLK_NET_DIV 16
+#define LS2K0300_CLK_NET_GATE 17
+#define LS2K0300_CLK_DEV_DIV 18
+#define LS2K0300_CLK_DEV_GATE 19
+#define LS2K0300_CLK_PIX_DIV 20
+#define LS2K0300_CLK_PIX_PLL_GATE 21
+#define LS2K0300_CLK_PIX_SCALE 22
+#define LS2K0300_CLK_PIX_GATE 23
+#define LS2K0300_CLK_GMACBP_DIV 24
+#define LS2K0300_CLK_GMACBP_GATE 25
+#define LS2K0300_CLK_USB_SCALE 26
+#define LS2K0300_CLK_USB_GATE 27
+#define LS2K0300_CLK_APB_SCALE 28
+#define LS2K0300_CLK_APB_GATE 29
+#define LS2K0300_CLK_BOOT_SCALE 30
+#define LS2K0300_CLK_BOOT_GATE 31
+#define LS2K0300_CLK_SDIO_SCALE 32
+#define LS2K0300_CLK_SDIO_GATE 33
+#define LS2K0300_CLK_GMAC_IN 34
+
+#endif
diff --git a/include/dt-bindings/clock/marvell,mmp2-audio.h b/include/dt-bindings/clock/marvell,mmp2-audio.h
index 20664776f497..9653e04dedc3 100644
--- a/include/dt-bindings/clock/marvell,mmp2-audio.h
+++ b/include/dt-bindings/clock/marvell,mmp2-audio.h
@@ -6,5 +6,4 @@
#define MMP2_CLK_AUDIO_SSPA0 1
#define MMP2_CLK_AUDIO_SSPA1 2
-#define MMP2_CLK_AUDIO_NR_CLKS 3
#endif
diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h
index 87f5ad5df72f..88c2d716476f 100644
--- a/include/dt-bindings/clock/marvell,mmp2.h
+++ b/include/dt-bindings/clock/marvell,mmp2.h
@@ -32,7 +32,7 @@
#define MMP2_CLK_I2S0 31
#define MMP2_CLK_I2S1 32
-/* apb periphrals */
+/* apb peripherals */
#define MMP2_CLK_TWSI0 60
#define MMP2_CLK_TWSI1 61
#define MMP2_CLK_TWSI2 62
@@ -60,7 +60,7 @@
#define MMP3_CLK_THERMAL2 84
#define MMP3_CLK_THERMAL3 85
-/* axi periphrals */
+/* axi peripherals */
#define MMP2_CLK_SDH0 101
#define MMP2_CLK_SDH1 102
#define MMP2_CLK_SDH2 103
@@ -91,5 +91,4 @@
#define MMP3_CLK_SDH4 126
#define MMP2_CLK_AUDIO 127
-#define MMP2_NR_CLKS 200
#endif
diff --git a/include/dt-bindings/clock/marvell,pxa168.h b/include/dt-bindings/clock/marvell,pxa168.h
index caf90436b848..d1bb59187e1d 100644
--- a/include/dt-bindings/clock/marvell,pxa168.h
+++ b/include/dt-bindings/clock/marvell,pxa168.h
@@ -20,10 +20,13 @@
#define PXA168_CLK_PLL1_2_1_5 19
#define PXA168_CLK_PLL1_3_16 20
#define PXA168_CLK_PLL1_192 21
+#define PXA168_CLK_PLL1_2_1_10 22
+#define PXA168_CLK_PLL1_2_3_16 23
#define PXA168_CLK_UART_PLL 27
#define PXA168_CLK_USB_PLL 28
+#define PXA168_CLK_CLK32_2 50
-/* apb periphrals */
+/* apb peripherals */
#define PXA168_CLK_TWSI0 60
#define PXA168_CLK_TWSI1 61
#define PXA168_CLK_TWSI2 62
@@ -45,7 +48,7 @@
#define PXA168_CLK_SSP4 78
#define PXA168_CLK_TIMER 79
-/* axi periphrals */
+/* axi peripherals */
#define PXA168_CLK_DFC 100
#define PXA168_CLK_SDH0 101
#define PXA168_CLK_SDH1 102
@@ -56,6 +59,8 @@
#define PXA168_CLK_CCIC0 107
#define PXA168_CLK_CCIC0_PHY 108
#define PXA168_CLK_CCIC0_SPHY 109
+#define PXA168_CLK_SDH3 110
+#define PXA168_CLK_SDH01_AXI 111
+#define PXA168_CLK_SDH23_AXI 112
-#define PXA168_NR_CLKS 200
#endif
diff --git a/include/dt-bindings/clock/marvell,pxa1908.h b/include/dt-bindings/clock/marvell,pxa1908.h
new file mode 100644
index 000000000000..fb15b0d0cd4c
--- /dev/null
+++ b/include/dt-bindings/clock/marvell,pxa1908.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+#ifndef __DTS_MARVELL_PXA1908_CLOCK_H
+#define __DTS_MARVELL_PXA1908_CLOCK_H
+
+/* plls */
+#define PXA1908_CLK_CLK32 1
+#define PXA1908_CLK_VCTCXO 2
+#define PXA1908_CLK_PLL1_624 3
+#define PXA1908_CLK_PLL1_416 4
+#define PXA1908_CLK_PLL1_499 5
+#define PXA1908_CLK_PLL1_832 6
+#define PXA1908_CLK_PLL1_1248 7
+#define PXA1908_CLK_PLL1_D2 8
+#define PXA1908_CLK_PLL1_D4 9
+#define PXA1908_CLK_PLL1_D8 10
+#define PXA1908_CLK_PLL1_D16 11
+#define PXA1908_CLK_PLL1_D6 12
+#define PXA1908_CLK_PLL1_D12 13
+#define PXA1908_CLK_PLL1_D24 14
+#define PXA1908_CLK_PLL1_D48 15
+#define PXA1908_CLK_PLL1_D96 16
+#define PXA1908_CLK_PLL1_D13 17
+#define PXA1908_CLK_PLL1_32 18
+#define PXA1908_CLK_PLL1_208 19
+#define PXA1908_CLK_PLL1_117 20
+#define PXA1908_CLK_PLL1_416_GATE 21
+#define PXA1908_CLK_PLL1_624_GATE 22
+#define PXA1908_CLK_PLL1_832_GATE 23
+#define PXA1908_CLK_PLL1_1248_GATE 24
+#define PXA1908_CLK_PLL1_D2_GATE 25
+#define PXA1908_CLK_PLL1_499_EN 26
+#define PXA1908_CLK_PLL2VCO 27
+#define PXA1908_CLK_PLL2 28
+#define PXA1908_CLK_PLL2P 29
+#define PXA1908_CLK_PLL2VCODIV3 30
+#define PXA1908_CLK_PLL3VCO 31
+#define PXA1908_CLK_PLL3 32
+#define PXA1908_CLK_PLL3P 33
+#define PXA1908_CLK_PLL3VCODIV3 34
+#define PXA1908_CLK_PLL4VCO 35
+#define PXA1908_CLK_PLL4 36
+#define PXA1908_CLK_PLL4P 37
+#define PXA1908_CLK_PLL4VCODIV3 38
+
+/* apb (apbc) peripherals */
+#define PXA1908_CLK_UART0 1
+#define PXA1908_CLK_UART1 2
+#define PXA1908_CLK_GPIO 3
+#define PXA1908_CLK_PWM0 4
+#define PXA1908_CLK_PWM1 5
+#define PXA1908_CLK_PWM2 6
+#define PXA1908_CLK_PWM3 7
+#define PXA1908_CLK_SSP0 8
+#define PXA1908_CLK_SSP1 9
+#define PXA1908_CLK_IPC_RST 10
+#define PXA1908_CLK_RTC 11
+#define PXA1908_CLK_TWSI0 12
+#define PXA1908_CLK_KPC 13
+#define PXA1908_CLK_SWJTAG 14
+#define PXA1908_CLK_SSP2 15
+#define PXA1908_CLK_TWSI1 16
+#define PXA1908_CLK_THERMAL 17
+#define PXA1908_CLK_TWSI3 18
+
+/* apb (apbcp) peripherals */
+#define PXA1908_CLK_UART2 1
+#define PXA1908_CLK_TWSI2 2
+#define PXA1908_CLK_AICER 3
+
+/* axi (apmu) peripherals */
+#define PXA1908_CLK_CCIC1 1
+#define PXA1908_CLK_ISP 2
+#define PXA1908_CLK_DSI1 3
+#define PXA1908_CLK_DISP1 4
+#define PXA1908_CLK_CCIC0 5
+#define PXA1908_CLK_SDH0 6
+#define PXA1908_CLK_SDH1 7
+#define PXA1908_CLK_USB 8
+#define PXA1908_CLK_NF 9
+#define PXA1908_CLK_CORE_DEBUG 10
+#define PXA1908_CLK_VPU 11
+#define PXA1908_CLK_GC 12
+#define PXA1908_CLK_SDH2 13
+#define PXA1908_CLK_GC2D 14
+#define PXA1908_CLK_TRACE 15
+#define PXA1908_CLK_DVC_DFC_DEBUG 16
+
+#endif
diff --git a/include/dt-bindings/clock/marvell,pxa1928.h b/include/dt-bindings/clock/marvell,pxa1928.h
index 5dca4820297f..0c708d3d3314 100644
--- a/include/dt-bindings/clock/marvell,pxa1928.h
+++ b/include/dt-bindings/clock/marvell,pxa1928.h
@@ -36,7 +36,6 @@
#define PXA1928_CLK_THSENS_CPU 0x26
#define PXA1928_CLK_THSENS_VPU 0x27
#define PXA1928_CLK_THSENS_GC 0x28
-#define PXA1928_APBC_NR_CLKS 0x30
/* axi peripherals */
@@ -53,6 +52,4 @@
#define PXA1928_CLK_GC3D 0x5d
#define PXA1928_CLK_GC2D 0x5f
-#define PXA1928_APMU_NR_CLKS 0x60
-
#endif
diff --git a/include/dt-bindings/clock/marvell,pxa910.h b/include/dt-bindings/clock/marvell,pxa910.h
index 7bf46238946e..6caa231de0c1 100644
--- a/include/dt-bindings/clock/marvell,pxa910.h
+++ b/include/dt-bindings/clock/marvell,pxa910.h
@@ -23,7 +23,7 @@
#define PXA910_CLK_UART_PLL 27
#define PXA910_CLK_USB_PLL 28
-/* apb periphrals */
+/* apb peripherals */
#define PXA910_CLK_TWSI0 60
#define PXA910_CLK_TWSI1 61
#define PXA910_CLK_TWSI2 62
@@ -43,7 +43,7 @@
#define PXA910_CLK_TIMER0 76
#define PXA910_CLK_TIMER1 77
-/* axi periphrals */
+/* axi peripherals */
#define PXA910_CLK_DFC 100
#define PXA910_CLK_SDH0 101
#define PXA910_CLK_SDH1 102
@@ -55,5 +55,4 @@
#define PXA910_CLK_CCIC0_PHY 108
#define PXA910_CLK_CCIC0_SPHY 109
-#define PXA910_NR_CLKS 200
#endif
diff --git a/include/dt-bindings/clock/mediatek,mt6735-apmixedsys.h b/include/dt-bindings/clock/mediatek,mt6735-apmixedsys.h
new file mode 100644
index 000000000000..b4705204409c
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt6735-apmixedsys.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_CLK_MT6735_APMIXEDSYS_H
+#define _DT_BINDINGS_CLK_MT6735_APMIXEDSYS_H
+
+#define CLK_APMIXED_ARMPLL 0
+#define CLK_APMIXED_MAINPLL 1
+#define CLK_APMIXED_UNIVPLL 2
+#define CLK_APMIXED_MMPLL 3
+#define CLK_APMIXED_MSDCPLL 4
+#define CLK_APMIXED_VENCPLL 5
+#define CLK_APMIXED_TVDPLL 6
+#define CLK_APMIXED_APLL1 7
+#define CLK_APMIXED_APLL2 8
+
+#endif
diff --git a/include/dt-bindings/clock/mediatek,mt6735-imgsys.h b/include/dt-bindings/clock/mediatek,mt6735-imgsys.h
new file mode 100644
index 000000000000..f250c26c5eb4
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt6735-imgsys.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_CLK_MT6735_IMGSYS_H
+#define _DT_BINDINGS_CLK_MT6735_IMGSYS_H
+
+#define CLK_IMG_SMI_LARB2 0
+#define CLK_IMG_CAM_SMI 1
+#define CLK_IMG_CAM_CAM 2
+#define CLK_IMG_SEN_TG 3
+#define CLK_IMG_SEN_CAM 4
+#define CLK_IMG_CAM_SV 5
+#define CLK_IMG_SUFOD 6
+#define CLK_IMG_FD 7
+
+#endif /* _DT_BINDINGS_CLK_MT6735_IMGSYS_H */
diff --git a/include/dt-bindings/clock/mediatek,mt6735-infracfg.h b/include/dt-bindings/clock/mediatek,mt6735-infracfg.h
new file mode 100644
index 000000000000..d8dd51e15637
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt6735-infracfg.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_CLK_MT6735_INFRACFG_H
+#define _DT_BINDINGS_CLK_MT6735_INFRACFG_H
+
+#define CLK_INFRA_DBG 0
+#define CLK_INFRA_GCE 1
+#define CLK_INFRA_TRBG 2
+#define CLK_INFRA_CPUM 3
+#define CLK_INFRA_DEVAPC 4
+#define CLK_INFRA_AUDIO 5
+#define CLK_INFRA_GCPU 6
+#define CLK_INFRA_L2C_SRAM 7
+#define CLK_INFRA_M4U 8
+#define CLK_INFRA_CLDMA 9
+#define CLK_INFRA_CONNMCU_BUS 10
+#define CLK_INFRA_KP 11
+#define CLK_INFRA_APXGPT 12
+#define CLK_INFRA_SEJ 13
+#define CLK_INFRA_CCIF0_AP 14
+#define CLK_INFRA_CCIF1_AP 15
+#define CLK_INFRA_PMIC_SPI 16
+#define CLK_INFRA_PMIC_WRAP 17
+
+#endif
diff --git a/include/dt-bindings/clock/mediatek,mt6735-mfgcfg.h b/include/dt-bindings/clock/mediatek,mt6735-mfgcfg.h
new file mode 100644
index 000000000000..d2d99a48348a
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt6735-mfgcfg.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_CLK_MT6735_MFGCFG_H
+#define _DT_BINDINGS_CLK_MT6735_MFGCFG_H
+
+#define CLK_MFG_BG3D 0
+
+#endif /* _DT_BINDINGS_CLK_MT6735_MFGCFG_H */
diff --git a/include/dt-bindings/clock/mediatek,mt6735-pericfg.h b/include/dt-bindings/clock/mediatek,mt6735-pericfg.h
new file mode 100644
index 000000000000..16bc21bbd95b
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt6735-pericfg.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_CLK_MT6735_PERICFG_H
+#define _DT_BINDINGS_CLK_MT6735_PERICFG_H
+
+#define CLK_PERI_DISP_PWM 0
+#define CLK_PERI_THERM 1
+#define CLK_PERI_PWM1 2
+#define CLK_PERI_PWM2 3
+#define CLK_PERI_PWM3 4
+#define CLK_PERI_PWM4 5
+#define CLK_PERI_PWM5 6
+#define CLK_PERI_PWM6 7
+#define CLK_PERI_PWM7 8
+#define CLK_PERI_PWM 9
+#define CLK_PERI_USB0 10
+#define CLK_PERI_IRDA 11
+#define CLK_PERI_APDMA 12
+#define CLK_PERI_MSDC30_0 13
+#define CLK_PERI_MSDC30_1 14
+#define CLK_PERI_MSDC30_2 15
+#define CLK_PERI_MSDC30_3 16
+#define CLK_PERI_UART0 17
+#define CLK_PERI_UART1 18
+#define CLK_PERI_UART2 19
+#define CLK_PERI_UART3 20
+#define CLK_PERI_UART4 21
+#define CLK_PERI_BTIF 22
+#define CLK_PERI_I2C0 23
+#define CLK_PERI_I2C1 24
+#define CLK_PERI_I2C2 25
+#define CLK_PERI_I2C3 26
+#define CLK_PERI_AUXADC 27
+#define CLK_PERI_SPI0 28
+#define CLK_PERI_IRTX 29
+
+#endif
diff --git a/include/dt-bindings/clock/mediatek,mt6735-topckgen.h b/include/dt-bindings/clock/mediatek,mt6735-topckgen.h
new file mode 100644
index 000000000000..d4b1e113cc0a
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt6735-topckgen.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_CLK_MT6735_TOPCKGEN_H
+#define _DT_BINDINGS_CLK_MT6735_TOPCKGEN_H
+
+#define CLK_TOP_AD_SYS_26M_CK 0
+#define CLK_TOP_CLKPH_MCK_O 1
+#define CLK_TOP_DMPLL 2
+#define CLK_TOP_DPI_CK 3
+#define CLK_TOP_WHPLL_AUDIO_CK 4
+
+#define CLK_TOP_SYSPLL_D2 5
+#define CLK_TOP_SYSPLL_D3 6
+#define CLK_TOP_SYSPLL_D5 7
+#define CLK_TOP_SYSPLL1_D2 8
+#define CLK_TOP_SYSPLL1_D4 9
+#define CLK_TOP_SYSPLL1_D8 10
+#define CLK_TOP_SYSPLL1_D16 11
+#define CLK_TOP_SYSPLL2_D2 12
+#define CLK_TOP_SYSPLL2_D4 13
+#define CLK_TOP_SYSPLL3_D2 14
+#define CLK_TOP_SYSPLL3_D4 15
+#define CLK_TOP_SYSPLL4_D2 16
+#define CLK_TOP_SYSPLL4_D4 17
+#define CLK_TOP_UNIVPLL_D2 18
+#define CLK_TOP_UNIVPLL_D3 19
+#define CLK_TOP_UNIVPLL_D5 20
+#define CLK_TOP_UNIVPLL_D26 21
+#define CLK_TOP_UNIVPLL1_D2 22
+#define CLK_TOP_UNIVPLL1_D4 23
+#define CLK_TOP_UNIVPLL1_D8 24
+#define CLK_TOP_UNIVPLL2_D2 25
+#define CLK_TOP_UNIVPLL2_D4 26
+#define CLK_TOP_UNIVPLL2_D8 27
+#define CLK_TOP_UNIVPLL3_D2 28
+#define CLK_TOP_UNIVPLL3_D4 29
+#define CLK_TOP_MSDCPLL_D2 30
+#define CLK_TOP_MSDCPLL_D4 31
+#define CLK_TOP_MSDCPLL_D8 32
+#define CLK_TOP_MSDCPLL_D16 33
+#define CLK_TOP_VENCPLL_D3 34
+#define CLK_TOP_TVDPLL_D2 35
+#define CLK_TOP_TVDPLL_D4 36
+#define CLK_TOP_DMPLL_D2 37
+#define CLK_TOP_DMPLL_D4 38
+#define CLK_TOP_DMPLL_D8 39
+#define CLK_TOP_AD_SYS_26M_D2 40
+
+#define CLK_TOP_AXI_SEL 41
+#define CLK_TOP_MEM_SEL 42
+#define CLK_TOP_DDRPHY_SEL 43
+#define CLK_TOP_MM_SEL 44
+#define CLK_TOP_PWM_SEL 45
+#define CLK_TOP_VDEC_SEL 46
+#define CLK_TOP_MFG_SEL 47
+#define CLK_TOP_CAMTG_SEL 48
+#define CLK_TOP_UART_SEL 49
+#define CLK_TOP_SPI_SEL 50
+#define CLK_TOP_USB20_SEL 51
+#define CLK_TOP_MSDC50_0_SEL 52
+#define CLK_TOP_MSDC30_0_SEL 53
+#define CLK_TOP_MSDC30_1_SEL 54
+#define CLK_TOP_MSDC30_2_SEL 55
+#define CLK_TOP_MSDC30_3_SEL 56
+#define CLK_TOP_AUDIO_SEL 57
+#define CLK_TOP_AUDINTBUS_SEL 58
+#define CLK_TOP_PMICSPI_SEL 59
+#define CLK_TOP_SCP_SEL 60
+#define CLK_TOP_ATB_SEL 61
+#define CLK_TOP_DPI0_SEL 62
+#define CLK_TOP_SCAM_SEL 63
+#define CLK_TOP_MFG13M_SEL 64
+#define CLK_TOP_AUD1_SEL 65
+#define CLK_TOP_AUD2_SEL 66
+#define CLK_TOP_IRDA_SEL 67
+#define CLK_TOP_IRTX_SEL 68
+#define CLK_TOP_DISPPWM_SEL 69
+
+#endif
diff --git a/include/dt-bindings/clock/mediatek,mt6735-vdecsys.h b/include/dt-bindings/clock/mediatek,mt6735-vdecsys.h
new file mode 100644
index 000000000000..f94cec10c89f
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt6735-vdecsys.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_CLK_MT6735_VDECSYS_H
+#define _DT_BINDINGS_CLK_MT6735_VDECSYS_H
+
+#define CLK_VDEC_VDEC 0
+#define CLK_VDEC_SMI_LARB1 1
+
+#endif /* _DT_BINDINGS_CLK_MT6735_VDECSYS_H */
diff --git a/include/dt-bindings/clock/mediatek,mt6735-vencsys.h b/include/dt-bindings/clock/mediatek,mt6735-vencsys.h
new file mode 100644
index 000000000000..e5a9cb4f269f
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt6735-vencsys.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_CLK_MT6735_VENCSYS_H
+#define _DT_BINDINGS_CLK_MT6735_VENCSYS_H
+
+#define CLK_VENC_SMI_LARB3 0
+#define CLK_VENC_VENC 1
+#define CLK_VENC_JPGENC 2
+#define CLK_VENC_JPGDEC 3
+
+#endif /* _DT_BINDINGS_CLK_MT6735_VENCSYS_H */
diff --git a/include/dt-bindings/clock/mediatek,mt6795-clk.h b/include/dt-bindings/clock/mediatek,mt6795-clk.h
new file mode 100644
index 000000000000..9902906ac902
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt6795-clk.h
@@ -0,0 +1,275 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT6795_H
+#define _DT_BINDINGS_CLK_MT6795_H
+
+/* TOPCKGEN */
+#define CLK_TOP_ADSYS_26M 0
+#define CLK_TOP_CLKPH_MCK_O 1
+#define CLK_TOP_USB_SYSPLL_125M 2
+#define CLK_TOP_DSI0_DIG 3
+#define CLK_TOP_DSI1_DIG 4
+#define CLK_TOP_ARMCA53PLL_754M 5
+#define CLK_TOP_ARMCA53PLL_502M 6
+#define CLK_TOP_MAIN_H546M 7
+#define CLK_TOP_MAIN_H364M 8
+#define CLK_TOP_MAIN_H218P4M 9
+#define CLK_TOP_MAIN_H156M 10
+#define CLK_TOP_TVDPLL_445P5M 11
+#define CLK_TOP_TVDPLL_594M 12
+#define CLK_TOP_UNIV_624M 13
+#define CLK_TOP_UNIV_416M 14
+#define CLK_TOP_UNIV_249P6M 15
+#define CLK_TOP_UNIV_178P3M 16
+#define CLK_TOP_UNIV_48M 17
+#define CLK_TOP_CLKRTC_EXT 18
+#define CLK_TOP_CLKRTC_INT 19
+#define CLK_TOP_FPC 20
+#define CLK_TOP_HDMITXPLL_D2 21
+#define CLK_TOP_HDMITXPLL_D3 22
+#define CLK_TOP_ARMCA53PLL_D2 23
+#define CLK_TOP_ARMCA53PLL_D3 24
+#define CLK_TOP_APLL1 25
+#define CLK_TOP_APLL2 26
+#define CLK_TOP_DMPLL 27
+#define CLK_TOP_DMPLL_D2 28
+#define CLK_TOP_DMPLL_D4 29
+#define CLK_TOP_DMPLL_D8 30
+#define CLK_TOP_DMPLL_D16 31
+#define CLK_TOP_MMPLL 32
+#define CLK_TOP_MMPLL_D2 33
+#define CLK_TOP_MSDCPLL 34
+#define CLK_TOP_MSDCPLL_D2 35
+#define CLK_TOP_MSDCPLL_D4 36
+#define CLK_TOP_MSDCPLL2 37
+#define CLK_TOP_MSDCPLL2_D2 38
+#define CLK_TOP_MSDCPLL2_D4 39
+#define CLK_TOP_SYSPLL_D2 40
+#define CLK_TOP_SYSPLL1_D2 41
+#define CLK_TOP_SYSPLL1_D4 42
+#define CLK_TOP_SYSPLL1_D8 43
+#define CLK_TOP_SYSPLL1_D16 44
+#define CLK_TOP_SYSPLL_D3 45
+#define CLK_TOP_SYSPLL2_D2 46
+#define CLK_TOP_SYSPLL2_D4 47
+#define CLK_TOP_SYSPLL_D5 48
+#define CLK_TOP_SYSPLL3_D2 49
+#define CLK_TOP_SYSPLL3_D4 50
+#define CLK_TOP_SYSPLL_D7 51
+#define CLK_TOP_SYSPLL4_D2 52
+#define CLK_TOP_SYSPLL4_D4 53
+#define CLK_TOP_TVDPLL 54
+#define CLK_TOP_TVDPLL_D2 55
+#define CLK_TOP_TVDPLL_D4 56
+#define CLK_TOP_TVDPLL_D8 57
+#define CLK_TOP_TVDPLL_D16 58
+#define CLK_TOP_UNIVPLL_D2 59
+#define CLK_TOP_UNIVPLL1_D2 60
+#define CLK_TOP_UNIVPLL1_D4 61
+#define CLK_TOP_UNIVPLL1_D8 62
+#define CLK_TOP_UNIVPLL_D3 63
+#define CLK_TOP_UNIVPLL2_D2 64
+#define CLK_TOP_UNIVPLL2_D4 65
+#define CLK_TOP_UNIVPLL2_D8 66
+#define CLK_TOP_UNIVPLL_D5 67
+#define CLK_TOP_UNIVPLL3_D2 68
+#define CLK_TOP_UNIVPLL3_D4 69
+#define CLK_TOP_UNIVPLL3_D8 70
+#define CLK_TOP_UNIVPLL_D7 71
+#define CLK_TOP_UNIVPLL_D26 72
+#define CLK_TOP_UNIVPLL_D52 73
+#define CLK_TOP_VCODECPLL 74
+#define CLK_TOP_VCODECPLL_370P5 75
+#define CLK_TOP_VENCPLL 76
+#define CLK_TOP_VENCPLL_D2 77
+#define CLK_TOP_VENCPLL_D4 78
+#define CLK_TOP_AXI_SEL 79
+#define CLK_TOP_MEM_SEL 80
+#define CLK_TOP_DDRPHYCFG_SEL 81
+#define CLK_TOP_MM_SEL 82
+#define CLK_TOP_PWM_SEL 83
+#define CLK_TOP_VDEC_SEL 84
+#define CLK_TOP_VENC_SEL 85
+#define CLK_TOP_MFG_SEL 86
+#define CLK_TOP_CAMTG_SEL 87
+#define CLK_TOP_UART_SEL 88
+#define CLK_TOP_SPI_SEL 89
+#define CLK_TOP_USB20_SEL 90
+#define CLK_TOP_USB30_SEL 91
+#define CLK_TOP_MSDC50_0_H_SEL 92
+#define CLK_TOP_MSDC50_0_SEL 93
+#define CLK_TOP_MSDC30_1_SEL 94
+#define CLK_TOP_MSDC30_2_SEL 95
+#define CLK_TOP_MSDC30_3_SEL 96
+#define CLK_TOP_AUDIO_SEL 97
+#define CLK_TOP_AUD_INTBUS_SEL 98
+#define CLK_TOP_PMICSPI_SEL 99
+#define CLK_TOP_SCP_SEL 100
+#define CLK_TOP_MJC_SEL 101
+#define CLK_TOP_DPI0_SEL 102
+#define CLK_TOP_IRDA_SEL 103
+#define CLK_TOP_CCI400_SEL 104
+#define CLK_TOP_AUD_1_SEL 105
+#define CLK_TOP_AUD_2_SEL 106
+#define CLK_TOP_MEM_MFG_IN_SEL 107
+#define CLK_TOP_AXI_MFG_IN_SEL 108
+#define CLK_TOP_SCAM_SEL 109
+#define CLK_TOP_I2S0_M_SEL 110
+#define CLK_TOP_I2S1_M_SEL 111
+#define CLK_TOP_I2S2_M_SEL 112
+#define CLK_TOP_I2S3_M_SEL 113
+#define CLK_TOP_I2S3_B_SEL 114
+#define CLK_TOP_APLL1_DIV0 115
+#define CLK_TOP_APLL1_DIV1 116
+#define CLK_TOP_APLL1_DIV2 117
+#define CLK_TOP_APLL1_DIV3 118
+#define CLK_TOP_APLL1_DIV4 119
+#define CLK_TOP_APLL1_DIV5 120
+#define CLK_TOP_APLL2_DIV0 121
+#define CLK_TOP_APLL2_DIV1 122
+#define CLK_TOP_APLL2_DIV2 123
+#define CLK_TOP_APLL2_DIV3 124
+#define CLK_TOP_APLL2_DIV4 125
+#define CLK_TOP_APLL2_DIV5 126
+#define CLK_TOP_NR_CLK 127
+
+/* APMIXED_SYS */
+#define CLK_APMIXED_ARMCA53PLL 0
+#define CLK_APMIXED_MAINPLL 1
+#define CLK_APMIXED_UNIVPLL 2
+#define CLK_APMIXED_MMPLL 3
+#define CLK_APMIXED_MSDCPLL 4
+#define CLK_APMIXED_VENCPLL 5
+#define CLK_APMIXED_TVDPLL 6
+#define CLK_APMIXED_MPLL 7
+#define CLK_APMIXED_VCODECPLL 8
+#define CLK_APMIXED_APLL1 9
+#define CLK_APMIXED_APLL2 10
+#define CLK_APMIXED_REF2USB_TX 11
+#define CLK_APMIXED_NR_CLK 12
+
+/* INFRA_SYS */
+#define CLK_INFRA_DBGCLK 0
+#define CLK_INFRA_SMI 1
+#define CLK_INFRA_AUDIO 2
+#define CLK_INFRA_GCE 3
+#define CLK_INFRA_L2C_SRAM 4
+#define CLK_INFRA_M4U 5
+#define CLK_INFRA_MD1MCU 6
+#define CLK_INFRA_MD1BUS 7
+#define CLK_INFRA_MD1DBB 8
+#define CLK_INFRA_DEVICE_APC 9
+#define CLK_INFRA_TRNG 10
+#define CLK_INFRA_MD1LTE 11
+#define CLK_INFRA_CPUM 12
+#define CLK_INFRA_KP 13
+#define CLK_INFRA_CA53_C0_SEL 14
+#define CLK_INFRA_CA53_C1_SEL 15
+#define CLK_INFRA_NR_CLK 16
+
+/* PERI_SYS */
+#define CLK_PERI_NFI 0
+#define CLK_PERI_THERM 1
+#define CLK_PERI_PWM1 2
+#define CLK_PERI_PWM2 3
+#define CLK_PERI_PWM3 4
+#define CLK_PERI_PWM4 5
+#define CLK_PERI_PWM5 6
+#define CLK_PERI_PWM6 7
+#define CLK_PERI_PWM7 8
+#define CLK_PERI_PWM 9
+#define CLK_PERI_USB0 10
+#define CLK_PERI_USB1 11
+#define CLK_PERI_AP_DMA 12
+#define CLK_PERI_MSDC30_0 13
+#define CLK_PERI_MSDC30_1 14
+#define CLK_PERI_MSDC30_2 15
+#define CLK_PERI_MSDC30_3 16
+#define CLK_PERI_NLI_ARB 17
+#define CLK_PERI_IRDA 18
+#define CLK_PERI_UART0 19
+#define CLK_PERI_UART1 20
+#define CLK_PERI_UART2 21
+#define CLK_PERI_UART3 22
+#define CLK_PERI_I2C0 23
+#define CLK_PERI_I2C1 24
+#define CLK_PERI_I2C2 25
+#define CLK_PERI_I2C3 26
+#define CLK_PERI_I2C4 27
+#define CLK_PERI_AUXADC 28
+#define CLK_PERI_SPI0 29
+#define CLK_PERI_UART0_SEL 30
+#define CLK_PERI_UART1_SEL 31
+#define CLK_PERI_UART2_SEL 32
+#define CLK_PERI_UART3_SEL 33
+#define CLK_PERI_NR_CLK 34
+
+/* MFG */
+#define CLK_MFG_BAXI 0
+#define CLK_MFG_BMEM 1
+#define CLK_MFG_BG3D 2
+#define CLK_MFG_B26M 3
+#define CLK_MFG_NR_CLK 4
+
+/* MM_SYS */
+#define CLK_MM_SMI_COMMON 0
+#define CLK_MM_SMI_LARB0 1
+#define CLK_MM_CAM_MDP 2
+#define CLK_MM_MDP_RDMA0 3
+#define CLK_MM_MDP_RDMA1 4
+#define CLK_MM_MDP_RSZ0 5
+#define CLK_MM_MDP_RSZ1 6
+#define CLK_MM_MDP_RSZ2 7
+#define CLK_MM_MDP_TDSHP0 8
+#define CLK_MM_MDP_TDSHP1 9
+#define CLK_MM_MDP_CROP 10
+#define CLK_MM_MDP_WDMA 11
+#define CLK_MM_MDP_WROT0 12
+#define CLK_MM_MDP_WROT1 13
+#define CLK_MM_FAKE_ENG 14
+#define CLK_MM_MUTEX_32K 15
+#define CLK_MM_DISP_OVL0 16
+#define CLK_MM_DISP_OVL1 17
+#define CLK_MM_DISP_RDMA0 18
+#define CLK_MM_DISP_RDMA1 19
+#define CLK_MM_DISP_RDMA2 20
+#define CLK_MM_DISP_WDMA0 21
+#define CLK_MM_DISP_WDMA1 22
+#define CLK_MM_DISP_COLOR0 23
+#define CLK_MM_DISP_COLOR1 24
+#define CLK_MM_DISP_AAL 25
+#define CLK_MM_DISP_GAMMA 26
+#define CLK_MM_DISP_UFOE 27
+#define CLK_MM_DISP_SPLIT0 28
+#define CLK_MM_DISP_SPLIT1 29
+#define CLK_MM_DISP_MERGE 30
+#define CLK_MM_DISP_OD 31
+#define CLK_MM_DISP_PWM0MM 32
+#define CLK_MM_DISP_PWM026M 33
+#define CLK_MM_DISP_PWM1MM 34
+#define CLK_MM_DISP_PWM126M 35
+#define CLK_MM_DSI0_ENGINE 36
+#define CLK_MM_DSI0_DIGITAL 37
+#define CLK_MM_DSI1_ENGINE 38
+#define CLK_MM_DSI1_DIGITAL 39
+#define CLK_MM_DPI_PIXEL 40
+#define CLK_MM_DPI_ENGINE 41
+#define CLK_MM_NR_CLK 42
+
+/* VDEC_SYS */
+#define CLK_VDEC_CKEN 0
+#define CLK_VDEC_LARB_CKEN 1
+#define CLK_VDEC_NR_CLK 2
+
+/* VENC_SYS */
+#define CLK_VENC_LARB 0
+#define CLK_VENC_VENC 1
+#define CLK_VENC_JPGENC 2
+#define CLK_VENC_JPGDEC 3
+#define CLK_VENC_NR_CLK 4
+
+#endif /* _DT_BINDINGS_CLK_MT6795_H */
diff --git a/include/dt-bindings/clock/mediatek,mt7981-clk.h b/include/dt-bindings/clock/mediatek,mt7981-clk.h
new file mode 100644
index 000000000000..192f8cefb589
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt7981-clk.h
@@ -0,0 +1,215 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Wenzhen.Yu <wenzhen.yu@mediatek.com>
+ * Author: Jianhui Zhao <zhaojh329@gmail.com>
+ * Author: Daniel Golle <daniel@makrotopia.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT7981_H
+#define _DT_BINDINGS_CLK_MT7981_H
+
+/* TOPCKGEN */
+#define CLK_TOP_CB_CKSQ_40M 0
+#define CLK_TOP_CB_M_416M 1
+#define CLK_TOP_CB_M_D2 2
+#define CLK_TOP_CB_M_D3 3
+#define CLK_TOP_M_D3_D2 4
+#define CLK_TOP_CB_M_D4 5
+#define CLK_TOP_CB_M_D8 6
+#define CLK_TOP_M_D8_D2 7
+#define CLK_TOP_CB_MM_720M 8
+#define CLK_TOP_CB_MM_D2 9
+#define CLK_TOP_CB_MM_D3 10
+#define CLK_TOP_CB_MM_D3_D5 11
+#define CLK_TOP_CB_MM_D4 12
+#define CLK_TOP_CB_MM_D6 13
+#define CLK_TOP_MM_D6_D2 14
+#define CLK_TOP_CB_MM_D8 15
+#define CLK_TOP_CB_APLL2_196M 16
+#define CLK_TOP_APLL2_D2 17
+#define CLK_TOP_APLL2_D4 18
+#define CLK_TOP_NET1_2500M 19
+#define CLK_TOP_CB_NET1_D4 20
+#define CLK_TOP_CB_NET1_D5 21
+#define CLK_TOP_NET1_D5_D2 22
+#define CLK_TOP_NET1_D5_D4 23
+#define CLK_TOP_CB_NET1_D8 24
+#define CLK_TOP_NET1_D8_D2 25
+#define CLK_TOP_NET1_D8_D4 26
+#define CLK_TOP_CB_NET2_800M 27
+#define CLK_TOP_CB_NET2_D2 28
+#define CLK_TOP_CB_NET2_D4 29
+#define CLK_TOP_NET2_D4_D2 30
+#define CLK_TOP_NET2_D4_D4 31
+#define CLK_TOP_CB_NET2_D6 32
+#define CLK_TOP_CB_WEDMCU_208M 33
+#define CLK_TOP_CB_SGM_325M 34
+#define CLK_TOP_CKSQ_40M_D2 35
+#define CLK_TOP_CB_RTC_32K 36
+#define CLK_TOP_CB_RTC_32P7K 37
+#define CLK_TOP_USB_TX250M 38
+#define CLK_TOP_FAUD 39
+#define CLK_TOP_NFI1X 40
+#define CLK_TOP_USB_EQ_RX250M 41
+#define CLK_TOP_USB_CDR_CK 42
+#define CLK_TOP_USB_LN0_CK 43
+#define CLK_TOP_SPINFI_BCK 44
+#define CLK_TOP_SPI 45
+#define CLK_TOP_SPIM_MST 46
+#define CLK_TOP_UART_BCK 47
+#define CLK_TOP_PWM_BCK 48
+#define CLK_TOP_I2C_BCK 49
+#define CLK_TOP_PEXTP_TL 50
+#define CLK_TOP_EMMC_208M 51
+#define CLK_TOP_EMMC_400M 52
+#define CLK_TOP_DRAMC_REF 53
+#define CLK_TOP_DRAMC_MD32 54
+#define CLK_TOP_SYSAXI 55
+#define CLK_TOP_SYSAPB 56
+#define CLK_TOP_ARM_DB_MAIN 57
+#define CLK_TOP_AP2CNN_HOST 58
+#define CLK_TOP_NETSYS 59
+#define CLK_TOP_NETSYS_500M 60
+#define CLK_TOP_NETSYS_WED_MCU 61
+#define CLK_TOP_NETSYS_2X 62
+#define CLK_TOP_SGM_325M 63
+#define CLK_TOP_SGM_REG 64
+#define CLK_TOP_F26M 65
+#define CLK_TOP_EIP97B 66
+#define CLK_TOP_USB3_PHY 67
+#define CLK_TOP_AUD 68
+#define CLK_TOP_A1SYS 69
+#define CLK_TOP_AUD_L 70
+#define CLK_TOP_A_TUNER 71
+#define CLK_TOP_U2U3_REF 72
+#define CLK_TOP_U2U3_SYS 73
+#define CLK_TOP_U2U3_XHCI 74
+#define CLK_TOP_USB_FRMCNT 75
+#define CLK_TOP_NFI1X_SEL 76
+#define CLK_TOP_SPINFI_SEL 77
+#define CLK_TOP_SPI_SEL 78
+#define CLK_TOP_SPIM_MST_SEL 79
+#define CLK_TOP_UART_SEL 80
+#define CLK_TOP_PWM_SEL 81
+#define CLK_TOP_I2C_SEL 82
+#define CLK_TOP_PEXTP_TL_SEL 83
+#define CLK_TOP_EMMC_208M_SEL 84
+#define CLK_TOP_EMMC_400M_SEL 85
+#define CLK_TOP_F26M_SEL 86
+#define CLK_TOP_DRAMC_SEL 87
+#define CLK_TOP_DRAMC_MD32_SEL 88
+#define CLK_TOP_SYSAXI_SEL 89
+#define CLK_TOP_SYSAPB_SEL 90
+#define CLK_TOP_ARM_DB_MAIN_SEL 91
+#define CLK_TOP_AP2CNN_HOST_SEL 92
+#define CLK_TOP_NETSYS_SEL 93
+#define CLK_TOP_NETSYS_500M_SEL 94
+#define CLK_TOP_NETSYS_MCU_SEL 95
+#define CLK_TOP_NETSYS_2X_SEL 96
+#define CLK_TOP_SGM_325M_SEL 97
+#define CLK_TOP_SGM_REG_SEL 98
+#define CLK_TOP_EIP97B_SEL 99
+#define CLK_TOP_USB3_PHY_SEL 100
+#define CLK_TOP_AUD_SEL 101
+#define CLK_TOP_A1SYS_SEL 102
+#define CLK_TOP_AUD_L_SEL 103
+#define CLK_TOP_A_TUNER_SEL 104
+#define CLK_TOP_U2U3_SEL 105
+#define CLK_TOP_U2U3_SYS_SEL 106
+#define CLK_TOP_U2U3_XHCI_SEL 107
+#define CLK_TOP_USB_FRMCNT_SEL 108
+#define CLK_TOP_AUD_I2S_M 109
+
+/* INFRACFG */
+#define CLK_INFRA_66M_MCK 0
+#define CLK_INFRA_UART0_SEL 1
+#define CLK_INFRA_UART1_SEL 2
+#define CLK_INFRA_UART2_SEL 3
+#define CLK_INFRA_SPI0_SEL 4
+#define CLK_INFRA_SPI1_SEL 5
+#define CLK_INFRA_SPI2_SEL 6
+#define CLK_INFRA_PWM1_SEL 7
+#define CLK_INFRA_PWM2_SEL 8
+#define CLK_INFRA_PWM3_SEL 9
+#define CLK_INFRA_PWM_BSEL 10
+#define CLK_INFRA_PCIE_SEL 11
+#define CLK_INFRA_GPT_STA 12
+#define CLK_INFRA_PWM_HCK 13
+#define CLK_INFRA_PWM_STA 14
+#define CLK_INFRA_PWM1_CK 15
+#define CLK_INFRA_PWM2_CK 16
+#define CLK_INFRA_PWM3_CK 17
+#define CLK_INFRA_CQ_DMA_CK 18
+#define CLK_INFRA_AUD_BUS_CK 19
+#define CLK_INFRA_AUD_26M_CK 20
+#define CLK_INFRA_AUD_L_CK 21
+#define CLK_INFRA_AUD_AUD_CK 22
+#define CLK_INFRA_AUD_EG2_CK 23
+#define CLK_INFRA_DRAMC_26M_CK 24
+#define CLK_INFRA_DBG_CK 25
+#define CLK_INFRA_AP_DMA_CK 26
+#define CLK_INFRA_SEJ_CK 27
+#define CLK_INFRA_SEJ_13M_CK 28
+#define CLK_INFRA_THERM_CK 29
+#define CLK_INFRA_I2C0_CK 30
+#define CLK_INFRA_UART0_CK 31
+#define CLK_INFRA_UART1_CK 32
+#define CLK_INFRA_UART2_CK 33
+#define CLK_INFRA_SPI2_CK 34
+#define CLK_INFRA_SPI2_HCK_CK 35
+#define CLK_INFRA_NFI1_CK 36
+#define CLK_INFRA_SPINFI1_CK 37
+#define CLK_INFRA_NFI_HCK_CK 38
+#define CLK_INFRA_SPI0_CK 39
+#define CLK_INFRA_SPI1_CK 40
+#define CLK_INFRA_SPI0_HCK_CK 41
+#define CLK_INFRA_SPI1_HCK_CK 42
+#define CLK_INFRA_FRTC_CK 43
+#define CLK_INFRA_MSDC_CK 44
+#define CLK_INFRA_MSDC_HCK_CK 45
+#define CLK_INFRA_MSDC_133M_CK 46
+#define CLK_INFRA_MSDC_66M_CK 47
+#define CLK_INFRA_ADC_26M_CK 48
+#define CLK_INFRA_ADC_FRC_CK 49
+#define CLK_INFRA_FBIST2FPC_CK 50
+#define CLK_INFRA_I2C_MCK_CK 51
+#define CLK_INFRA_I2C_PCK_CK 52
+#define CLK_INFRA_IUSB_133_CK 53
+#define CLK_INFRA_IUSB_66M_CK 54
+#define CLK_INFRA_IUSB_SYS_CK 55
+#define CLK_INFRA_IUSB_CK 56
+#define CLK_INFRA_IPCIE_CK 57
+#define CLK_INFRA_IPCIE_PIPE_CK 58
+#define CLK_INFRA_IPCIER_CK 59
+#define CLK_INFRA_IPCIEB_CK 60
+
+/* APMIXEDSYS */
+#define CLK_APMIXED_ARMPLL 0
+#define CLK_APMIXED_NET2PLL 1
+#define CLK_APMIXED_MMPLL 2
+#define CLK_APMIXED_SGMPLL 3
+#define CLK_APMIXED_WEDMCUPLL 4
+#define CLK_APMIXED_NET1PLL 5
+#define CLK_APMIXED_MPLL 6
+#define CLK_APMIXED_APLL2 7
+
+/* SGMIISYS_0 */
+#define CLK_SGM0_TX_EN 0
+#define CLK_SGM0_RX_EN 1
+#define CLK_SGM0_CK0_EN 2
+#define CLK_SGM0_CDR_CK0_EN 3
+
+/* SGMIISYS_1 */
+#define CLK_SGM1_TX_EN 0
+#define CLK_SGM1_RX_EN 1
+#define CLK_SGM1_CK1_EN 2
+#define CLK_SGM1_CDR_CK1_EN 3
+
+/* ETHSYS */
+#define CLK_ETH_FE_EN 0
+#define CLK_ETH_GP2_EN 1
+#define CLK_ETH_GP1_EN 2
+#define CLK_ETH_WOCPU0_EN 3
+
+#endif /* _DT_BINDINGS_CLK_MT7981_H */
diff --git a/include/dt-bindings/clock/mediatek,mt7988-clk.h b/include/dt-bindings/clock/mediatek,mt7988-clk.h
new file mode 100644
index 000000000000..63376e40f14d
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt7988-clk.h
@@ -0,0 +1,280 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023 MediaTek Inc.
+ * Author: Sam Shih <sam.shih@mediatek.com>
+ * Author: Xiufeng Li <Xiufeng.Li@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT7988_H
+#define _DT_BINDINGS_CLK_MT7988_H
+
+/* APMIXEDSYS */
+
+#define CLK_APMIXED_NETSYSPLL 0
+#define CLK_APMIXED_MPLL 1
+#define CLK_APMIXED_MMPLL 2
+#define CLK_APMIXED_APLL2 3
+#define CLK_APMIXED_NET1PLL 4
+#define CLK_APMIXED_NET2PLL 5
+#define CLK_APMIXED_WEDMCUPLL 6
+#define CLK_APMIXED_SGMPLL 7
+#define CLK_APMIXED_ARM_B 8
+#define CLK_APMIXED_CCIPLL2_B 9
+#define CLK_APMIXED_USXGMIIPLL 10
+#define CLK_APMIXED_MSDCPLL 11
+
+/* TOPCKGEN */
+
+#define CLK_TOP_XTAL 0
+#define CLK_TOP_XTAL_D2 1
+#define CLK_TOP_RTC_32K 2
+#define CLK_TOP_RTC_32P7K 3
+#define CLK_TOP_MPLL_D2 4
+#define CLK_TOP_MPLL_D3_D2 5
+#define CLK_TOP_MPLL_D4 6
+#define CLK_TOP_MPLL_D8 7
+#define CLK_TOP_MPLL_D8_D2 8
+#define CLK_TOP_MMPLL_D2 9
+#define CLK_TOP_MMPLL_D3_D5 10
+#define CLK_TOP_MMPLL_D4 11
+#define CLK_TOP_MMPLL_D6_D2 12
+#define CLK_TOP_MMPLL_D8 13
+#define CLK_TOP_APLL2_D4 14
+#define CLK_TOP_NET1PLL_D4 15
+#define CLK_TOP_NET1PLL_D5 16
+#define CLK_TOP_NET1PLL_D5_D2 17
+#define CLK_TOP_NET1PLL_D5_D4 18
+#define CLK_TOP_NET1PLL_D8 19
+#define CLK_TOP_NET1PLL_D8_D2 20
+#define CLK_TOP_NET1PLL_D8_D4 21
+#define CLK_TOP_NET1PLL_D8_D8 22
+#define CLK_TOP_NET1PLL_D8_D16 23
+#define CLK_TOP_NET2PLL_D2 24
+#define CLK_TOP_NET2PLL_D4 25
+#define CLK_TOP_NET2PLL_D4_D4 26
+#define CLK_TOP_NET2PLL_D4_D8 27
+#define CLK_TOP_NET2PLL_D6 28
+#define CLK_TOP_NET2PLL_D8 29
+#define CLK_TOP_NETSYS_SEL 30
+#define CLK_TOP_NETSYS_500M_SEL 31
+#define CLK_TOP_NETSYS_2X_SEL 32
+#define CLK_TOP_NETSYS_GSW_SEL 33
+#define CLK_TOP_ETH_GMII_SEL 34
+#define CLK_TOP_NETSYS_MCU_SEL 35
+#define CLK_TOP_NETSYS_PAO_2X_SEL 36
+#define CLK_TOP_EIP197_SEL 37
+#define CLK_TOP_AXI_INFRA_SEL 38
+#define CLK_TOP_UART_SEL 39
+#define CLK_TOP_EMMC_250M_SEL 40
+#define CLK_TOP_EMMC_400M_SEL 41
+#define CLK_TOP_SPI_SEL 42
+#define CLK_TOP_SPIM_MST_SEL 43
+#define CLK_TOP_NFI1X_SEL 44
+#define CLK_TOP_SPINFI_SEL 45
+#define CLK_TOP_PWM_SEL 46
+#define CLK_TOP_I2C_SEL 47
+#define CLK_TOP_PCIE_MBIST_250M_SEL 48
+#define CLK_TOP_PEXTP_TL_SEL 49
+#define CLK_TOP_PEXTP_TL_P1_SEL 50
+#define CLK_TOP_PEXTP_TL_P2_SEL 51
+#define CLK_TOP_PEXTP_TL_P3_SEL 52
+#define CLK_TOP_USB_SYS_SEL 53
+#define CLK_TOP_USB_SYS_P1_SEL 54
+#define CLK_TOP_USB_XHCI_SEL 55
+#define CLK_TOP_USB_XHCI_P1_SEL 56
+#define CLK_TOP_USB_FRMCNT_SEL 57
+#define CLK_TOP_USB_FRMCNT_P1_SEL 58
+#define CLK_TOP_AUD_SEL 59
+#define CLK_TOP_A1SYS_SEL 60
+#define CLK_TOP_AUD_L_SEL 61
+#define CLK_TOP_A_TUNER_SEL 62
+#define CLK_TOP_SSPXTP_SEL 63
+#define CLK_TOP_USB_PHY_SEL 64
+#define CLK_TOP_USXGMII_SBUS_0_SEL 65
+#define CLK_TOP_USXGMII_SBUS_1_SEL 66
+#define CLK_TOP_SGM_0_SEL 67
+#define CLK_TOP_SGM_SBUS_0_SEL 68
+#define CLK_TOP_SGM_1_SEL 69
+#define CLK_TOP_SGM_SBUS_1_SEL 70
+#define CLK_TOP_XFI_PHY_0_XTAL_SEL 71
+#define CLK_TOP_XFI_PHY_1_XTAL_SEL 72
+#define CLK_TOP_SYSAXI_SEL 73
+#define CLK_TOP_SYSAPB_SEL 74
+#define CLK_TOP_ETH_REFCK_50M_SEL 75
+#define CLK_TOP_ETH_SYS_200M_SEL 76
+#define CLK_TOP_ETH_SYS_SEL 77
+#define CLK_TOP_ETH_XGMII_SEL 78
+#define CLK_TOP_BUS_TOPS_SEL 79
+#define CLK_TOP_NPU_TOPS_SEL 80
+#define CLK_TOP_DRAMC_SEL 81
+#define CLK_TOP_DRAMC_MD32_SEL 82
+#define CLK_TOP_INFRA_F26M_SEL 83
+#define CLK_TOP_PEXTP_P0_SEL 84
+#define CLK_TOP_PEXTP_P1_SEL 85
+#define CLK_TOP_PEXTP_P2_SEL 86
+#define CLK_TOP_PEXTP_P3_SEL 87
+#define CLK_TOP_DA_XTP_GLB_P0_SEL 88
+#define CLK_TOP_DA_XTP_GLB_P1_SEL 89
+#define CLK_TOP_DA_XTP_GLB_P2_SEL 90
+#define CLK_TOP_DA_XTP_GLB_P3_SEL 91
+#define CLK_TOP_CKM_SEL 92
+#define CLK_TOP_DA_SEL 93
+#define CLK_TOP_PEXTP_SEL 94
+#define CLK_TOP_TOPS_P2_26M_SEL 95
+#define CLK_TOP_MCUSYS_BACKUP_625M_SEL 96
+#define CLK_TOP_NETSYS_SYNC_250M_SEL 97
+#define CLK_TOP_MACSEC_SEL 98
+#define CLK_TOP_NETSYS_TOPS_400M_SEL 99
+#define CLK_TOP_NETSYS_PPEFB_250M_SEL 100
+#define CLK_TOP_NETSYS_WARP_SEL 101
+#define CLK_TOP_ETH_MII_SEL 102
+#define CLK_TOP_NPU_SEL 103
+#define CLK_TOP_AUD_I2S_M 104
+
+/* MCUSYS */
+
+#define CLK_MCU_BUS_DIV_SEL 0
+#define CLK_MCU_ARM_DIV_SEL 1
+
+/* INFRACFG_AO */
+
+#define CLK_INFRA_MUX_UART0_SEL 0
+#define CLK_INFRA_MUX_UART1_SEL 1
+#define CLK_INFRA_MUX_UART2_SEL 2
+#define CLK_INFRA_MUX_SPI0_SEL 3
+#define CLK_INFRA_MUX_SPI1_SEL 4
+#define CLK_INFRA_MUX_SPI2_SEL 5
+#define CLK_INFRA_PWM_SEL 6
+#define CLK_INFRA_PWM_CK1_SEL 7
+#define CLK_INFRA_PWM_CK2_SEL 8
+#define CLK_INFRA_PWM_CK3_SEL 9
+#define CLK_INFRA_PWM_CK4_SEL 10
+#define CLK_INFRA_PWM_CK5_SEL 11
+#define CLK_INFRA_PWM_CK6_SEL 12
+#define CLK_INFRA_PWM_CK7_SEL 13
+#define CLK_INFRA_PWM_CK8_SEL 14
+#define CLK_INFRA_PCIE_GFMUX_TL_O_P0_SEL 15
+#define CLK_INFRA_PCIE_GFMUX_TL_O_P1_SEL 16
+#define CLK_INFRA_PCIE_GFMUX_TL_O_P2_SEL 17
+#define CLK_INFRA_PCIE_GFMUX_TL_O_P3_SEL 18
+
+/* INFRACFG */
+
+#define CLK_INFRA_PCIE_PERI_26M_CK_P0 19
+#define CLK_INFRA_PCIE_PERI_26M_CK_P1 20
+#define CLK_INFRA_PCIE_PERI_26M_CK_P2 21
+#define CLK_INFRA_PCIE_PERI_26M_CK_P3 22
+#define CLK_INFRA_66M_GPT_BCK 23
+#define CLK_INFRA_66M_PWM_HCK 24
+#define CLK_INFRA_66M_PWM_BCK 25
+#define CLK_INFRA_66M_PWM_CK1 26
+#define CLK_INFRA_66M_PWM_CK2 27
+#define CLK_INFRA_66M_PWM_CK3 28
+#define CLK_INFRA_66M_PWM_CK4 29
+#define CLK_INFRA_66M_PWM_CK5 30
+#define CLK_INFRA_66M_PWM_CK6 31
+#define CLK_INFRA_66M_PWM_CK7 32
+#define CLK_INFRA_66M_PWM_CK8 33
+#define CLK_INFRA_133M_CQDMA_BCK 34
+#define CLK_INFRA_66M_AUD_SLV_BCK 35
+#define CLK_INFRA_AUD_26M 36
+#define CLK_INFRA_AUD_L 37
+#define CLK_INFRA_AUD_AUD 38
+#define CLK_INFRA_AUD_EG2 39
+#define CLK_INFRA_DRAMC_F26M 40
+#define CLK_INFRA_133M_DBG_ACKM 41
+#define CLK_INFRA_66M_AP_DMA_BCK 42
+#define CLK_INFRA_66M_SEJ_BCK 43
+#define CLK_INFRA_PRE_CK_SEJ_F13M 44
+#define CLK_INFRA_26M_THERM_SYSTEM 45
+#define CLK_INFRA_I2C_BCK 46
+#define CLK_INFRA_52M_UART0_CK 47
+#define CLK_INFRA_52M_UART1_CK 48
+#define CLK_INFRA_52M_UART2_CK 49
+#define CLK_INFRA_NFI 50
+#define CLK_INFRA_SPINFI 51
+#define CLK_INFRA_66M_NFI_HCK 52
+#define CLK_INFRA_104M_SPI0 53
+#define CLK_INFRA_104M_SPI1 54
+#define CLK_INFRA_104M_SPI2_BCK 55
+#define CLK_INFRA_66M_SPI0_HCK 56
+#define CLK_INFRA_66M_SPI1_HCK 57
+#define CLK_INFRA_66M_SPI2_HCK 58
+#define CLK_INFRA_66M_FLASHIF_AXI 59
+#define CLK_INFRA_RTC 60
+#define CLK_INFRA_26M_ADC_BCK 61
+#define CLK_INFRA_RC_ADC 62
+#define CLK_INFRA_MSDC400 63
+#define CLK_INFRA_MSDC2_HCK 64
+#define CLK_INFRA_133M_MSDC_0_HCK 65
+#define CLK_INFRA_66M_MSDC_0_HCK 66
+#define CLK_INFRA_133M_CPUM_BCK 67
+#define CLK_INFRA_BIST2FPC 68
+#define CLK_INFRA_I2C_X16W_MCK_CK_P1 69
+#define CLK_INFRA_I2C_X16W_PCK_CK_P1 70
+#define CLK_INFRA_133M_USB_HCK 71
+#define CLK_INFRA_133M_USB_HCK_CK_P1 72
+#define CLK_INFRA_66M_USB_HCK 73
+#define CLK_INFRA_66M_USB_HCK_CK_P1 74
+#define CLK_INFRA_USB_SYS 75
+#define CLK_INFRA_USB_SYS_CK_P1 76
+#define CLK_INFRA_USB_REF 77
+#define CLK_INFRA_USB_CK_P1 78
+#define CLK_INFRA_USB_FRMCNT 79
+#define CLK_INFRA_USB_FRMCNT_CK_P1 80
+#define CLK_INFRA_USB_PIPE 81
+#define CLK_INFRA_USB_PIPE_CK_P1 82
+#define CLK_INFRA_USB_UTMI 83
+#define CLK_INFRA_USB_UTMI_CK_P1 84
+#define CLK_INFRA_USB_XHCI 85
+#define CLK_INFRA_USB_XHCI_CK_P1 86
+#define CLK_INFRA_PCIE_GFMUX_TL_P0 87
+#define CLK_INFRA_PCIE_GFMUX_TL_P1 88
+#define CLK_INFRA_PCIE_GFMUX_TL_P2 89
+#define CLK_INFRA_PCIE_GFMUX_TL_P3 90
+#define CLK_INFRA_PCIE_PIPE_P0 91
+#define CLK_INFRA_PCIE_PIPE_P1 92
+#define CLK_INFRA_PCIE_PIPE_P2 93
+#define CLK_INFRA_PCIE_PIPE_P3 94
+#define CLK_INFRA_133M_PCIE_CK_P0 95
+#define CLK_INFRA_133M_PCIE_CK_P1 96
+#define CLK_INFRA_133M_PCIE_CK_P2 97
+#define CLK_INFRA_133M_PCIE_CK_P3 98
+
+/* ETHDMA */
+
+#define CLK_ETHDMA_XGP1_EN 0
+#define CLK_ETHDMA_XGP2_EN 1
+#define CLK_ETHDMA_XGP3_EN 2
+#define CLK_ETHDMA_FE_EN 3
+#define CLK_ETHDMA_GP2_EN 4
+#define CLK_ETHDMA_GP1_EN 5
+#define CLK_ETHDMA_GP3_EN 6
+#define CLK_ETHDMA_ESW_EN 7
+#define CLK_ETHDMA_CRYPT0_EN 8
+#define CLK_ETHDMA_NR_CLK 9
+
+/* SGMIISYS_0 */
+
+#define CLK_SGM0_TX_EN 0
+#define CLK_SGM0_RX_EN 1
+#define CLK_SGMII0_NR_CLK 2
+
+/* SGMIISYS_1 */
+
+#define CLK_SGM1_TX_EN 0
+#define CLK_SGM1_RX_EN 1
+#define CLK_SGMII1_NR_CLK 2
+
+/* ETHWARP */
+
+#define CLK_ETHWARP_WOCPU2_EN 0
+#define CLK_ETHWARP_WOCPU1_EN 1
+#define CLK_ETHWARP_WOCPU0_EN 2
+#define CLK_ETHWARP_NR_CLK 3
+
+/* XFIPLL */
+#define CLK_XFIPLL_PLL 0
+#define CLK_XFIPLL_PLL_EN 1
+
+#endif /* _DT_BINDINGS_CLK_MT7988_H */
diff --git a/include/dt-bindings/clock/mediatek,mt8188-clk.h b/include/dt-bindings/clock/mediatek,mt8188-clk.h
new file mode 100644
index 000000000000..0e87f61c90f4
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt8188-clk.h
@@ -0,0 +1,726 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Garmin Chang <garmin.chang@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT8188_H
+#define _DT_BINDINGS_CLK_MT8188_H
+
+/* TOPCKGEN */
+#define CLK_TOP_AXI 0
+#define CLK_TOP_SPM 1
+#define CLK_TOP_SCP 2
+#define CLK_TOP_BUS_AXIMEM 3
+#define CLK_TOP_VPP 4
+#define CLK_TOP_ETHDR 5
+#define CLK_TOP_IPE 6
+#define CLK_TOP_CAM 7
+#define CLK_TOP_CCU 8
+#define CLK_TOP_CCU_AHB 9
+#define CLK_TOP_IMG 10
+#define CLK_TOP_CAMTM 11
+#define CLK_TOP_DSP 12
+#define CLK_TOP_DSP1 13
+#define CLK_TOP_DSP2 14
+#define CLK_TOP_DSP3 15
+#define CLK_TOP_DSP4 16
+#define CLK_TOP_DSP5 17
+#define CLK_TOP_DSP6 18
+#define CLK_TOP_DSP7 19
+#define CLK_TOP_MFG_CORE_TMP 20
+#define CLK_TOP_CAMTG 21
+#define CLK_TOP_CAMTG2 22
+#define CLK_TOP_CAMTG3 23
+#define CLK_TOP_UART 24
+#define CLK_TOP_SPI 25
+#define CLK_TOP_MSDC50_0_HCLK 26
+#define CLK_TOP_MSDC50_0 27
+#define CLK_TOP_MSDC30_1 28
+#define CLK_TOP_MSDC30_2 29
+#define CLK_TOP_INTDIR 30
+#define CLK_TOP_AUD_INTBUS 31
+#define CLK_TOP_AUDIO_H 32
+#define CLK_TOP_PWRAP_ULPOSC 33
+#define CLK_TOP_ATB 34
+#define CLK_TOP_SSPM 35
+#define CLK_TOP_DP 36
+#define CLK_TOP_EDP 37
+#define CLK_TOP_DPI 38
+#define CLK_TOP_DISP_PWM0 39
+#define CLK_TOP_DISP_PWM1 40
+#define CLK_TOP_USB_TOP 41
+#define CLK_TOP_SSUSB_XHCI 42
+#define CLK_TOP_USB_TOP_2P 43
+#define CLK_TOP_SSUSB_XHCI_2P 44
+#define CLK_TOP_USB_TOP_3P 45
+#define CLK_TOP_SSUSB_XHCI_3P 46
+#define CLK_TOP_I2C 47
+#define CLK_TOP_SENINF 48
+#define CLK_TOP_SENINF1 49
+#define CLK_TOP_GCPU 50
+#define CLK_TOP_VENC 51
+#define CLK_TOP_VDEC 52
+#define CLK_TOP_PWM 53
+#define CLK_TOP_MCUPM 54
+#define CLK_TOP_SPMI_P_MST 55
+#define CLK_TOP_SPMI_M_MST 56
+#define CLK_TOP_DVFSRC 57
+#define CLK_TOP_TL 58
+#define CLK_TOP_AES_MSDCFDE 59
+#define CLK_TOP_DSI_OCC 60
+#define CLK_TOP_WPE_VPP 61
+#define CLK_TOP_HDCP 62
+#define CLK_TOP_HDCP_24M 63
+#define CLK_TOP_HDMI_APB 64
+#define CLK_TOP_SNPS_ETH_250M 65
+#define CLK_TOP_SNPS_ETH_62P4M_PTP 66
+#define CLK_TOP_SNPS_ETH_50M_RMII 67
+#define CLK_TOP_ADSP 68
+#define CLK_TOP_AUDIO_LOCAL_BUS 69
+#define CLK_TOP_ASM_H 70
+#define CLK_TOP_ASM_L 71
+#define CLK_TOP_APLL1 72
+#define CLK_TOP_APLL2 73
+#define CLK_TOP_APLL3 74
+#define CLK_TOP_APLL4 75
+#define CLK_TOP_APLL5 76
+#define CLK_TOP_I2SO1 77
+#define CLK_TOP_I2SO2 78
+#define CLK_TOP_I2SI1 79
+#define CLK_TOP_I2SI2 80
+#define CLK_TOP_DPTX 81
+#define CLK_TOP_AUD_IEC 82
+#define CLK_TOP_A1SYS_HP 83
+#define CLK_TOP_A2SYS 84
+#define CLK_TOP_A3SYS 85
+#define CLK_TOP_A4SYS 86
+#define CLK_TOP_ECC 87
+#define CLK_TOP_SPINOR 88
+#define CLK_TOP_ULPOSC 89
+#define CLK_TOP_SRCK 90
+#define CLK_TOP_MFG_CK_FAST_REF 91
+#define CLK_TOP_MAINPLL_D3 92
+#define CLK_TOP_MAINPLL_D4 93
+#define CLK_TOP_MAINPLL_D4_D2 94
+#define CLK_TOP_MAINPLL_D4_D4 95
+#define CLK_TOP_MAINPLL_D4_D8 96
+#define CLK_TOP_MAINPLL_D5 97
+#define CLK_TOP_MAINPLL_D5_D2 98
+#define CLK_TOP_MAINPLL_D5_D4 99
+#define CLK_TOP_MAINPLL_D5_D8 100
+#define CLK_TOP_MAINPLL_D6 101
+#define CLK_TOP_MAINPLL_D6_D2 102
+#define CLK_TOP_MAINPLL_D6_D4 103
+#define CLK_TOP_MAINPLL_D6_D8 104
+#define CLK_TOP_MAINPLL_D7 105
+#define CLK_TOP_MAINPLL_D7_D2 106
+#define CLK_TOP_MAINPLL_D7_D4 107
+#define CLK_TOP_MAINPLL_D7_D8 108
+#define CLK_TOP_MAINPLL_D9 109
+#define CLK_TOP_UNIVPLL_D2 110
+#define CLK_TOP_UNIVPLL_D3 111
+#define CLK_TOP_UNIVPLL_D4 112
+#define CLK_TOP_UNIVPLL_D4_D2 113
+#define CLK_TOP_UNIVPLL_D4_D4 114
+#define CLK_TOP_UNIVPLL_D4_D8 115
+#define CLK_TOP_UNIVPLL_D5 116
+#define CLK_TOP_UNIVPLL_D5_D2 117
+#define CLK_TOP_UNIVPLL_D5_D4 118
+#define CLK_TOP_UNIVPLL_D5_D8 119
+#define CLK_TOP_UNIVPLL_D6 120
+#define CLK_TOP_UNIVPLL_D6_D2 121
+#define CLK_TOP_UNIVPLL_D6_D4 122
+#define CLK_TOP_UNIVPLL_D6_D8 123
+#define CLK_TOP_UNIVPLL_D7 124
+#define CLK_TOP_UNIVPLL_192M 125
+#define CLK_TOP_UNIVPLL_192M_D4 126
+#define CLK_TOP_UNIVPLL_192M_D8 127
+#define CLK_TOP_UNIVPLL_192M_D10 128
+#define CLK_TOP_UNIVPLL_192M_D16 129
+#define CLK_TOP_UNIVPLL_192M_D32 130
+#define CLK_TOP_APLL1_D3 131
+#define CLK_TOP_APLL1_D4 132
+#define CLK_TOP_APLL2_D3 133
+#define CLK_TOP_APLL2_D4 134
+#define CLK_TOP_APLL3_D4 135
+#define CLK_TOP_APLL4_D4 136
+#define CLK_TOP_APLL5_D4 137
+#define CLK_TOP_MMPLL_D4 138
+#define CLK_TOP_MMPLL_D4_D2 139
+#define CLK_TOP_MMPLL_D5 140
+#define CLK_TOP_MMPLL_D5_D2 141
+#define CLK_TOP_MMPLL_D5_D4 142
+#define CLK_TOP_MMPLL_D6 143
+#define CLK_TOP_MMPLL_D6_D2 144
+#define CLK_TOP_MMPLL_D7 145
+#define CLK_TOP_MMPLL_D9 146
+#define CLK_TOP_TVDPLL1 147
+#define CLK_TOP_TVDPLL1_D2 148
+#define CLK_TOP_TVDPLL1_D4 149
+#define CLK_TOP_TVDPLL1_D8 150
+#define CLK_TOP_TVDPLL1_D16 151
+#define CLK_TOP_TVDPLL2 152
+#define CLK_TOP_TVDPLL2_D2 153
+#define CLK_TOP_TVDPLL2_D4 154
+#define CLK_TOP_TVDPLL2_D8 155
+#define CLK_TOP_TVDPLL2_D16 156
+#define CLK_TOP_MSDCPLL_D2 157
+#define CLK_TOP_MSDCPLL_D16 158
+#define CLK_TOP_ETHPLL 159
+#define CLK_TOP_ETHPLL_D2 160
+#define CLK_TOP_ETHPLL_D4 161
+#define CLK_TOP_ETHPLL_D8 162
+#define CLK_TOP_ETHPLL_D10 163
+#define CLK_TOP_ADSPPLL_D2 164
+#define CLK_TOP_ADSPPLL_D4 165
+#define CLK_TOP_ADSPPLL_D8 166
+#define CLK_TOP_ULPOSC1 167
+#define CLK_TOP_ULPOSC1_D2 168
+#define CLK_TOP_ULPOSC1_D4 169
+#define CLK_TOP_ULPOSC1_D8 170
+#define CLK_TOP_ULPOSC1_D7 171
+#define CLK_TOP_ULPOSC1_D10 172
+#define CLK_TOP_ULPOSC1_D16 173
+#define CLK_TOP_MPHONE_SLAVE_BCK 174
+#define CLK_TOP_PAD_FPC 175
+#define CLK_TOP_466M_FMEM 176
+#define CLK_TOP_PEXTP_PIPE 177
+#define CLK_TOP_DSI_PHY 178
+#define CLK_TOP_APLL12_CK_DIV0 179
+#define CLK_TOP_APLL12_CK_DIV1 180
+#define CLK_TOP_APLL12_CK_DIV2 181
+#define CLK_TOP_APLL12_CK_DIV3 182
+#define CLK_TOP_APLL12_CK_DIV4 183
+#define CLK_TOP_APLL12_CK_DIV9 184
+#define CLK_TOP_CFGREG_CLOCK_EN_VPP0 185
+#define CLK_TOP_CFGREG_CLOCK_EN_VPP1 186
+#define CLK_TOP_CFGREG_CLOCK_EN_VDO0 187
+#define CLK_TOP_CFGREG_CLOCK_EN_VDO1 188
+#define CLK_TOP_CFGREG_CLOCK_ISP_AXI_GALS 189
+#define CLK_TOP_CFGREG_F26M_VPP0 190
+#define CLK_TOP_CFGREG_F26M_VPP1 191
+#define CLK_TOP_CFGREG_F26M_VDO0 192
+#define CLK_TOP_CFGREG_F26M_VDO1 193
+#define CLK_TOP_CFGREG_AUD_F26M_AUD 194
+#define CLK_TOP_CFGREG_UNIPLL_SES 195
+#define CLK_TOP_CFGREG_F_PCIE_PHY_REF 196
+#define CLK_TOP_SSUSB_TOP_REF 197
+#define CLK_TOP_SSUSB_PHY_REF 198
+#define CLK_TOP_SSUSB_TOP_P1_REF 199
+#define CLK_TOP_SSUSB_PHY_P1_REF 200
+#define CLK_TOP_SSUSB_TOP_P2_REF 201
+#define CLK_TOP_SSUSB_PHY_P2_REF 202
+#define CLK_TOP_SSUSB_TOP_P3_REF 203
+#define CLK_TOP_SSUSB_PHY_P3_REF 204
+#define CLK_TOP_NR_CLK 205
+
+/* INFRACFG_AO */
+#define CLK_INFRA_AO_PMIC_TMR 0
+#define CLK_INFRA_AO_PMIC_AP 1
+#define CLK_INFRA_AO_PMIC_MD 2
+#define CLK_INFRA_AO_PMIC_CONN 3
+#define CLK_INFRA_AO_SEJ 4
+#define CLK_INFRA_AO_APXGPT 5
+#define CLK_INFRA_AO_GCE 6
+#define CLK_INFRA_AO_GCE2 7
+#define CLK_INFRA_AO_THERM 8
+#define CLK_INFRA_AO_PWM_HCLK 9
+#define CLK_INFRA_AO_PWM1 10
+#define CLK_INFRA_AO_PWM2 11
+#define CLK_INFRA_AO_PWM3 12
+#define CLK_INFRA_AO_PWM4 13
+#define CLK_INFRA_AO_PWM 14
+#define CLK_INFRA_AO_UART0 15
+#define CLK_INFRA_AO_UART1 16
+#define CLK_INFRA_AO_UART2 17
+#define CLK_INFRA_AO_UART3 18
+#define CLK_INFRA_AO_UART4 19
+#define CLK_INFRA_AO_GCE_26M 20
+#define CLK_INFRA_AO_CQ_DMA_FPC 21
+#define CLK_INFRA_AO_UART5 22
+#define CLK_INFRA_AO_HDMI_26M 23
+#define CLK_INFRA_AO_SPI0 24
+#define CLK_INFRA_AO_MSDC0 25
+#define CLK_INFRA_AO_MSDC1 26
+#define CLK_INFRA_AO_MSDC2 27
+#define CLK_INFRA_AO_MSDC0_SRC 28
+#define CLK_INFRA_AO_DVFSRC 29
+#define CLK_INFRA_AO_TRNG 30
+#define CLK_INFRA_AO_AUXADC 31
+#define CLK_INFRA_AO_CPUM 32
+#define CLK_INFRA_AO_HDMI_32K 33
+#define CLK_INFRA_AO_CEC_66M_HCLK 34
+#define CLK_INFRA_AO_PCIE_TL_26M 35
+#define CLK_INFRA_AO_MSDC1_SRC 36
+#define CLK_INFRA_AO_CEC_66M_BCLK 37
+#define CLK_INFRA_AO_PCIE_TL_96M 38
+#define CLK_INFRA_AO_DEVICE_APC 39
+#define CLK_INFRA_AO_ECC_66M_HCLK 40
+#define CLK_INFRA_AO_DEBUGSYS 41
+#define CLK_INFRA_AO_AUDIO 42
+#define CLK_INFRA_AO_PCIE_TL_32K 43
+#define CLK_INFRA_AO_DBG_TRACE 44
+#define CLK_INFRA_AO_DRAMC_F26M 45
+#define CLK_INFRA_AO_IRTX 46
+#define CLK_INFRA_AO_DISP_PWM 47
+#define CLK_INFRA_AO_CLDMA_BCLK 48
+#define CLK_INFRA_AO_AUDIO_26M_BCLK 49
+#define CLK_INFRA_AO_SPI1 50
+#define CLK_INFRA_AO_SPI2 51
+#define CLK_INFRA_AO_SPI3 52
+#define CLK_INFRA_AO_FSSPM 53
+#define CLK_INFRA_AO_SSPM_BUS_HCLK 54
+#define CLK_INFRA_AO_APDMA_BCLK 55
+#define CLK_INFRA_AO_SPI4 56
+#define CLK_INFRA_AO_SPI5 57
+#define CLK_INFRA_AO_CQ_DMA 58
+#define CLK_INFRA_AO_MSDC0_SELF 59
+#define CLK_INFRA_AO_MSDC1_SELF 60
+#define CLK_INFRA_AO_MSDC2_SELF 61
+#define CLK_INFRA_AO_I2S_DMA 62
+#define CLK_INFRA_AO_AP_MSDC0 63
+#define CLK_INFRA_AO_MD_MSDC0 64
+#define CLK_INFRA_AO_MSDC30_2 65
+#define CLK_INFRA_AO_GCPU 66
+#define CLK_INFRA_AO_PCIE_PERI_26M 67
+#define CLK_INFRA_AO_GCPU_66M_BCLK 68
+#define CLK_INFRA_AO_GCPU_133M_BCLK 69
+#define CLK_INFRA_AO_DISP_PWM1 70
+#define CLK_INFRA_AO_FBIST2FPC 71
+#define CLK_INFRA_AO_DEVICE_APC_SYNC 72
+#define CLK_INFRA_AO_PCIE_P1_PERI_26M 73
+#define CLK_INFRA_AO_133M_MCLK_CK 74
+#define CLK_INFRA_AO_66M_MCLK_CK 75
+#define CLK_INFRA_AO_PCIE_PL_P_250M_P0 76
+#define CLK_INFRA_AO_RG_AES_MSDCFDE_CK_0P 77
+#define CLK_INFRA_AO_NR_CLK 78
+
+/* APMIXEDSYS */
+#define CLK_APMIXED_ETHPLL 0
+#define CLK_APMIXED_MSDCPLL 1
+#define CLK_APMIXED_TVDPLL1 2
+#define CLK_APMIXED_TVDPLL2 3
+#define CLK_APMIXED_MMPLL 4
+#define CLK_APMIXED_MAINPLL 5
+#define CLK_APMIXED_IMGPLL 6
+#define CLK_APMIXED_UNIVPLL 7
+#define CLK_APMIXED_ADSPPLL 8
+#define CLK_APMIXED_APLL1 9
+#define CLK_APMIXED_APLL2 10
+#define CLK_APMIXED_APLL3 11
+#define CLK_APMIXED_APLL4 12
+#define CLK_APMIXED_APLL5 13
+#define CLK_APMIXED_MFGPLL 14
+#define CLK_APMIXED_PLL_SSUSB26M_EN 15
+#define CLK_APMIXED_NR_CLK 16
+
+/* AUDIODSP */
+#define CLK_AUDIODSP_AUDIO26M 0
+#define CLK_AUDIODSP_NR_CLK 1
+
+/* PERICFG_AO */
+#define CLK_PERI_AO_ETHERNET 0
+#define CLK_PERI_AO_ETHERNET_BUS 1
+#define CLK_PERI_AO_FLASHIF_BUS 2
+#define CLK_PERI_AO_FLASHIF_26M 3
+#define CLK_PERI_AO_FLASHIFLASHCK 4
+#define CLK_PERI_AO_SSUSB_2P_BUS 5
+#define CLK_PERI_AO_SSUSB_2P_XHCI 6
+#define CLK_PERI_AO_SSUSB_3P_BUS 7
+#define CLK_PERI_AO_SSUSB_3P_XHCI 8
+#define CLK_PERI_AO_SSUSB_BUS 9
+#define CLK_PERI_AO_SSUSB_XHCI 10
+#define CLK_PERI_AO_ETHERNET_MAC 11
+#define CLK_PERI_AO_PCIE_P0_FMEM 12
+#define CLK_PERI_AO_NR_CLK 13
+
+/* IMP_IIC_WRAP_C */
+#define CLK_IMP_IIC_WRAP_C_AP_CLOCK_I2C0 0
+#define CLK_IMP_IIC_WRAP_C_AP_CLOCK_I2C2 1
+#define CLK_IMP_IIC_WRAP_C_AP_CLOCK_I2C3 2
+#define CLK_IMP_IIC_WRAP_C_NR_CLK 3
+
+/* IMP_IIC_WRAP_W */
+#define CLK_IMP_IIC_WRAP_W_AP_CLOCK_I2C1 0
+#define CLK_IMP_IIC_WRAP_W_AP_CLOCK_I2C4 1
+#define CLK_IMP_IIC_WRAP_W_NR_CLK 2
+
+/* IMP_IIC_WRAP_EN */
+#define CLK_IMP_IIC_WRAP_EN_AP_CLOCK_I2C5 0
+#define CLK_IMP_IIC_WRAP_EN_AP_CLOCK_I2C6 1
+#define CLK_IMP_IIC_WRAP_EN_NR_CLK 2
+
+/* MFGCFG */
+#define CLK_MFGCFG_BG3D 0
+#define CLK_MFGCFG_NR_CLK 1
+
+/* VPPSYS0 */
+#define CLK_VPP0_MDP_FG 0
+#define CLK_VPP0_STITCH 1
+#define CLK_VPP0_PADDING 2
+#define CLK_VPP0_MDP_TCC 3
+#define CLK_VPP0_WARP0_ASYNC_TX 4
+#define CLK_VPP0_WARP1_ASYNC_TX 5
+#define CLK_VPP0_MUTEX 6
+#define CLK_VPP02VPP1_RELAY 7
+#define CLK_VPP0_VPP12VPP0_ASYNC 8
+#define CLK_VPP0_MMSYSRAM_TOP 9
+#define CLK_VPP0_MDP_AAL 10
+#define CLK_VPP0_MDP_RSZ 11
+#define CLK_VPP0_SMI_COMMON_MMSRAM 12
+#define CLK_VPP0_GALS_VDO0_LARB0_MMSRAM 13
+#define CLK_VPP0_GALS_VDO0_LARB1_MMSRAM 14
+#define CLK_VPP0_GALS_VENCSYS_MMSRAM 15
+#define CLK_VPP0_GALS_VENCSYS_CORE1_MMSRAM 16
+#define CLK_VPP0_GALS_INFRA_MMSRAM 17
+#define CLK_VPP0_GALS_CAMSYS_MMSRAM 18
+#define CLK_VPP0_GALS_VPP1_LARB5_MMSRAM 19
+#define CLK_VPP0_GALS_VPP1_LARB6_MMSRAM 20
+#define CLK_VPP0_SMI_REORDER_MMSRAM 21
+#define CLK_VPP0_SMI_IOMMU 22
+#define CLK_VPP0_GALS_IMGSYS_CAMSYS 23
+#define CLK_VPP0_MDP_RDMA 24
+#define CLK_VPP0_MDP_WROT 25
+#define CLK_VPP0_GALS_EMI0_EMI1 26
+#define CLK_VPP0_SMI_SUB_COMMON_REORDER 27
+#define CLK_VPP0_SMI_RSI 28
+#define CLK_VPP0_SMI_COMMON_LARB4 29
+#define CLK_VPP0_GALS_VDEC_VDEC_CORE1 30
+#define CLK_VPP0_GALS_VPP1_WPESYS 31
+#define CLK_VPP0_GALS_VDO0_VDO1_VENCSYS_CORE1 32
+#define CLK_VPP0_FAKE_ENG 33
+#define CLK_VPP0_MDP_HDR 34
+#define CLK_VPP0_MDP_TDSHP 35
+#define CLK_VPP0_MDP_COLOR 36
+#define CLK_VPP0_MDP_OVL 37
+#define CLK_VPP0_DSIP_RDMA 38
+#define CLK_VPP0_DISP_WDMA 39
+#define CLK_VPP0_MDP_HMS 40
+#define CLK_VPP0_WARP0_RELAY 41
+#define CLK_VPP0_WARP0_ASYNC 42
+#define CLK_VPP0_WARP1_RELAY 43
+#define CLK_VPP0_WARP1_ASYNC 44
+#define CLK_VPP0_NR_CLK 45
+
+/* WPESYS */
+#define CLK_WPE_TOP_WPE_VPP0 0
+#define CLK_WPE_TOP_SMI_LARB7 1
+#define CLK_WPE_TOP_WPESYS_EVENT_TX 2
+#define CLK_WPE_TOP_SMI_LARB7_PCLK_EN 3
+#define CLK_WPE_TOP_NR_CLK 4
+
+/* WPESYS_VPP0 */
+#define CLK_WPE_VPP0_VECI 0
+#define CLK_WPE_VPP0_VEC2I 1
+#define CLK_WPE_VPP0_VEC3I 2
+#define CLK_WPE_VPP0_WPEO 3
+#define CLK_WPE_VPP0_MSKO 4
+#define CLK_WPE_VPP0_VGEN 5
+#define CLK_WPE_VPP0_EXT 6
+#define CLK_WPE_VPP0_VFC 7
+#define CLK_WPE_VPP0_CACH0_TOP 8
+#define CLK_WPE_VPP0_CACH0_DMA 9
+#define CLK_WPE_VPP0_CACH1_TOP 10
+#define CLK_WPE_VPP0_CACH1_DMA 11
+#define CLK_WPE_VPP0_CACH2_TOP 12
+#define CLK_WPE_VPP0_CACH2_DMA 13
+#define CLK_WPE_VPP0_CACH3_TOP 14
+#define CLK_WPE_VPP0_CACH3_DMA 15
+#define CLK_WPE_VPP0_PSP 16
+#define CLK_WPE_VPP0_PSP2 17
+#define CLK_WPE_VPP0_SYNC 18
+#define CLK_WPE_VPP0_C24 19
+#define CLK_WPE_VPP0_MDP_CROP 20
+#define CLK_WPE_VPP0_ISP_CROP 21
+#define CLK_WPE_VPP0_TOP 22
+#define CLK_WPE_VPP0_NR_CLK 23
+
+/* VPPSYS1 */
+#define CLK_VPP1_SVPP1_MDP_OVL 0
+#define CLK_VPP1_SVPP1_MDP_TCC 1
+#define CLK_VPP1_SVPP1_MDP_WROT 2
+#define CLK_VPP1_SVPP1_VPP_PAD 3
+#define CLK_VPP1_SVPP2_MDP_WROT 4
+#define CLK_VPP1_SVPP2_VPP_PAD 5
+#define CLK_VPP1_SVPP3_MDP_WROT 6
+#define CLK_VPP1_SVPP3_VPP_PAD 7
+#define CLK_VPP1_SVPP1_MDP_RDMA 8
+#define CLK_VPP1_SVPP1_MDP_FG 9
+#define CLK_VPP1_SVPP2_MDP_RDMA 10
+#define CLK_VPP1_SVPP2_MDP_FG 11
+#define CLK_VPP1_SVPP3_MDP_RDMA 12
+#define CLK_VPP1_SVPP3_MDP_FG 13
+#define CLK_VPP1_VPP_SPLIT 14
+#define CLK_VPP1_SVPP2_VDO0_DL_RELAY 15
+#define CLK_VPP1_SVPP1_MDP_RSZ 16
+#define CLK_VPP1_SVPP1_MDP_TDSHP 17
+#define CLK_VPP1_SVPP1_MDP_COLOR 18
+#define CLK_VPP1_SVPP3_VDO1_DL_RELAY 19
+#define CLK_VPP1_SVPP2_MDP_RSZ 20
+#define CLK_VPP1_SVPP2_VPP_MERGE 21
+#define CLK_VPP1_SVPP2_MDP_TDSHP 22
+#define CLK_VPP1_SVPP2_MDP_COLOR 23
+#define CLK_VPP1_SVPP3_MDP_RSZ 24
+#define CLK_VPP1_SVPP3_VPP_MERGE 25
+#define CLK_VPP1_SVPP3_MDP_TDSHP 26
+#define CLK_VPP1_SVPP3_MDP_COLOR 27
+#define CLK_VPP1_GALS5 28
+#define CLK_VPP1_GALS6 29
+#define CLK_VPP1_LARB5 30
+#define CLK_VPP1_LARB6 31
+#define CLK_VPP1_SVPP1_MDP_HDR 32
+#define CLK_VPP1_SVPP1_MDP_AAL 33
+#define CLK_VPP1_SVPP2_MDP_HDR 34
+#define CLK_VPP1_SVPP2_MDP_AAL 35
+#define CLK_VPP1_SVPP3_MDP_HDR 36
+#define CLK_VPP1_SVPP3_MDP_AAL 37
+#define CLK_VPP1_DISP_MUTEX 38
+#define CLK_VPP1_SVPP2_VDO1_DL_RELAY 39
+#define CLK_VPP1_SVPP3_VDO0_DL_RELAY 40
+#define CLK_VPP1_VPP0_DL_ASYNC 41
+#define CLK_VPP1_VPP0_DL1_RELAY 42
+#define CLK_VPP1_LARB5_FAKE_ENG 43
+#define CLK_VPP1_LARB6_FAKE_ENG 44
+#define CLK_VPP1_HDMI_META 45
+#define CLK_VPP1_VPP_SPLIT_HDMI 46
+#define CLK_VPP1_DGI_IN 47
+#define CLK_VPP1_DGI_OUT 48
+#define CLK_VPP1_VPP_SPLIT_DGI 49
+#define CLK_VPP1_DL_CON_OCC 50
+#define CLK_VPP1_VPP_SPLIT_26M 51
+#define CLK_VPP1_NR_CLK 52
+
+/* IMGSYS */
+#define CLK_IMGSYS_MAIN_LARB9 0
+#define CLK_IMGSYS_MAIN_TRAW0 1
+#define CLK_IMGSYS_MAIN_TRAW1 2
+#define CLK_IMGSYS_MAIN_VCORE_GALS 3
+#define CLK_IMGSYS_MAIN_DIP0 4
+#define CLK_IMGSYS_MAIN_WPE0 5
+#define CLK_IMGSYS_MAIN_IPE 6
+#define CLK_IMGSYS_MAIN_WPE1 7
+#define CLK_IMGSYS_MAIN_WPE2 8
+#define CLK_IMGSYS_MAIN_GALS 9
+#define CLK_IMGSYS_MAIN_NR_CLK 10
+
+/* IMGSYS1_DIP_TOP */
+#define CLK_IMGSYS1_DIP_TOP_LARB10 0
+#define CLK_IMGSYS1_DIP_TOP_DIP_TOP 1
+#define CLK_IMGSYS1_DIP_TOP_NR_CLK 2
+
+/* IMGSYS1_DIP_NR */
+#define CLK_IMGSYS1_DIP_NR_LARB15 0
+#define CLK_IMGSYS1_DIP_NR_DIP_NR 1
+#define CLK_IMGSYS1_DIP_NR_NR_CLK 2
+
+/* IMGSYS_WPE1 */
+#define CLK_IMGSYS_WPE1_LARB11 0
+#define CLK_IMGSYS_WPE1 1
+#define CLK_IMGSYS_WPE1_NR_CLK 2
+
+/* IPESYS */
+#define CLK_IPE_DPE 0
+#define CLK_IPE_FDVT 1
+#define CLK_IPE_ME 2
+#define CLK_IPESYS_TOP 3
+#define CLK_IPE_SMI_LARB12 4
+#define CLK_IPE_NR_CLK 5
+
+/* IMGSYS_WPE2 */
+#define CLK_IMGSYS_WPE2_LARB11 0
+#define CLK_IMGSYS_WPE2 1
+#define CLK_IMGSYS_WPE2_NR_CLK 2
+
+/* IMGSYS_WPE3 */
+#define CLK_IMGSYS_WPE3_LARB11 0
+#define CLK_IMGSYS_WPE3 1
+#define CLK_IMGSYS_WPE3_NR_CLK 2
+
+/* CAMSYS */
+#define CLK_CAM_MAIN_LARB13 0
+#define CLK_CAM_MAIN_LARB14 1
+#define CLK_CAM_MAIN_CAM 2
+#define CLK_CAM_MAIN_CAM_SUBA 3
+#define CLK_CAM_MAIN_CAM_SUBB 4
+#define CLK_CAM_MAIN_CAMTG 5
+#define CLK_CAM_MAIN_SENINF 6
+#define CLK_CAM_MAIN_GCAMSVA 7
+#define CLK_CAM_MAIN_GCAMSVB 8
+#define CLK_CAM_MAIN_GCAMSVC 9
+#define CLK_CAM_MAIN_GCAMSVD 10
+#define CLK_CAM_MAIN_GCAMSVE 11
+#define CLK_CAM_MAIN_GCAMSVF 12
+#define CLK_CAM_MAIN_GCAMSVG 13
+#define CLK_CAM_MAIN_GCAMSVH 14
+#define CLK_CAM_MAIN_GCAMSVI 15
+#define CLK_CAM_MAIN_GCAMSVJ 16
+#define CLK_CAM_MAIN_CAMSV_TOP 17
+#define CLK_CAM_MAIN_CAMSV_CQ_A 18
+#define CLK_CAM_MAIN_CAMSV_CQ_B 19
+#define CLK_CAM_MAIN_CAMSV_CQ_C 20
+#define CLK_CAM_MAIN_FAKE_ENG 21
+#define CLK_CAM_MAIN_CAM2MM0_GALS 22
+#define CLK_CAM_MAIN_CAM2MM1_GALS 23
+#define CLK_CAM_MAIN_CAM2SYS_GALS 24
+#define CLK_CAM_MAIN_NR_CLK 25
+
+/* CAMSYS_RAWA */
+#define CLK_CAM_RAWA_LARBX 0
+#define CLK_CAM_RAWA_CAM 1
+#define CLK_CAM_RAWA_CAMTG 2
+#define CLK_CAM_RAWA_NR_CLK 3
+
+/* CAMSYS_YUVA */
+#define CLK_CAM_YUVA_LARBX 0
+#define CLK_CAM_YUVA_CAM 1
+#define CLK_CAM_YUVA_CAMTG 2
+#define CLK_CAM_YUVA_NR_CLK 3
+
+/* CAMSYS_RAWB */
+#define CLK_CAM_RAWB_LARBX 0
+#define CLK_CAM_RAWB_CAM 1
+#define CLK_CAM_RAWB_CAMTG 2
+#define CLK_CAM_RAWB_NR_CLK 3
+
+/* CAMSYS_YUVB */
+#define CLK_CAM_YUVB_LARBX 0
+#define CLK_CAM_YUVB_CAM 1
+#define CLK_CAM_YUVB_CAMTG 2
+#define CLK_CAM_YUVB_NR_CLK 3
+
+/* CCUSYS */
+#define CLK_CCU_LARB27 0
+#define CLK_CCU_AHB 1
+#define CLK_CCU_CCU0 2
+#define CLK_CCU_NR_CLK 3
+
+/* VDECSYS_SOC */
+#define CLK_VDEC1_SOC_LARB1 0
+#define CLK_VDEC1_SOC_LAT 1
+#define CLK_VDEC1_SOC_LAT_ACTIVE 2
+#define CLK_VDEC1_SOC_LAT_ENG 3
+#define CLK_VDEC1_SOC_VDEC 4
+#define CLK_VDEC1_SOC_VDEC_ACTIVE 5
+#define CLK_VDEC1_SOC_VDEC_ENG 6
+#define CLK_VDEC1_NR_CLK 7
+
+/* VDECSYS */
+#define CLK_VDEC2_LARB1 0
+#define CLK_VDEC2_LAT 1
+#define CLK_VDEC2_VDEC 2
+#define CLK_VDEC2_VDEC_ACTIVE 3
+#define CLK_VDEC2_VDEC_ENG 4
+#define CLK_VDEC2_NR_CLK 5
+
+/* VENCSYS */
+#define CLK_VENC1_LARB 0
+#define CLK_VENC1_VENC 1
+#define CLK_VENC1_JPGENC 2
+#define CLK_VENC1_JPGDEC 3
+#define CLK_VENC1_JPGDEC_C1 4
+#define CLK_VENC1_GALS 5
+#define CLK_VENC1_GALS_SRAM 6
+#define CLK_VENC1_NR_CLK 7
+
+/* VDOSYS0 */
+#define CLK_VDO0_DISP_OVL0 0
+#define CLK_VDO0_FAKE_ENG0 1
+#define CLK_VDO0_DISP_CCORR0 2
+#define CLK_VDO0_DISP_MUTEX0 3
+#define CLK_VDO0_DISP_GAMMA0 4
+#define CLK_VDO0_DISP_DITHER0 5
+#define CLK_VDO0_DISP_WDMA0 6
+#define CLK_VDO0_DISP_RDMA0 7
+#define CLK_VDO0_DSI0 8
+#define CLK_VDO0_DSI1 9
+#define CLK_VDO0_DSC_WRAP0 10
+#define CLK_VDO0_VPP_MERGE0 11
+#define CLK_VDO0_DP_INTF0 12
+#define CLK_VDO0_DISP_AAL0 13
+#define CLK_VDO0_INLINEROT0 14
+#define CLK_VDO0_APB_BUS 15
+#define CLK_VDO0_DISP_COLOR0 16
+#define CLK_VDO0_MDP_WROT0 17
+#define CLK_VDO0_DISP_RSZ0 18
+#define CLK_VDO0_DISP_POSTMASK0 19
+#define CLK_VDO0_FAKE_ENG1 20
+#define CLK_VDO0_DL_ASYNC2 21
+#define CLK_VDO0_DL_RELAY3 22
+#define CLK_VDO0_DL_RELAY4 23
+#define CLK_VDO0_SMI_GALS 24
+#define CLK_VDO0_SMI_COMMON 25
+#define CLK_VDO0_SMI_EMI 26
+#define CLK_VDO0_SMI_IOMMU 27
+#define CLK_VDO0_SMI_LARB 28
+#define CLK_VDO0_SMI_RSI 29
+#define CLK_VDO0_DSI0_DSI 30
+#define CLK_VDO0_DSI1_DSI 31
+#define CLK_VDO0_DP_INTF0_DP_INTF 32
+#define CLK_VDO0_NR_CLK 33
+
+/* VDOSYS1 */
+#define CLK_VDO1_SMI_LARB2 0
+#define CLK_VDO1_SMI_LARB3 1
+#define CLK_VDO1_GALS 2
+#define CLK_VDO1_FAKE_ENG0 3
+#define CLK_VDO1_FAKE_ENG1 4
+#define CLK_VDO1_MDP_RDMA0 5
+#define CLK_VDO1_MDP_RDMA1 6
+#define CLK_VDO1_MDP_RDMA2 7
+#define CLK_VDO1_MDP_RDMA3 8
+#define CLK_VDO1_VPP_MERGE0 9
+#define CLK_VDO1_VPP_MERGE1 10
+#define CLK_VDO1_VPP_MERGE2 11
+#define CLK_VDO1_VPP_MERGE3 12
+#define CLK_VDO1_VPP_MERGE4 13
+#define CLK_VDO1_VPP2_TO_VDO1_DL_ASYNC 14
+#define CLK_VDO1_VPP3_TO_VDO1_DL_ASYNC 15
+#define CLK_VDO1_DISP_MUTEX 16
+#define CLK_VDO1_MDP_RDMA4 17
+#define CLK_VDO1_MDP_RDMA5 18
+#define CLK_VDO1_MDP_RDMA6 19
+#define CLK_VDO1_MDP_RDMA7 20
+#define CLK_VDO1_DP_INTF0_MMCK 21
+#define CLK_VDO1_DPI0_MM 22
+#define CLK_VDO1_DPI1_MM 23
+#define CLK_VDO1_MERGE0_DL_ASYNC 24
+#define CLK_VDO1_MERGE1_DL_ASYNC 25
+#define CLK_VDO1_MERGE2_DL_ASYNC 26
+#define CLK_VDO1_MERGE3_DL_ASYNC 27
+#define CLK_VDO1_MERGE4_DL_ASYNC 28
+#define CLK_VDO1_DSC_VDO1_DL_ASYNC 29
+#define CLK_VDO1_MERGE_VDO1_DL_ASYNC 30
+#define CLK_VDO1_PADDING0 31
+#define CLK_VDO1_PADDING1 32
+#define CLK_VDO1_PADDING2 33
+#define CLK_VDO1_PADDING3 34
+#define CLK_VDO1_PADDING4 35
+#define CLK_VDO1_PADDING5 36
+#define CLK_VDO1_PADDING6 37
+#define CLK_VDO1_PADDING7 38
+#define CLK_VDO1_DISP_RSZ0 39
+#define CLK_VDO1_DISP_RSZ1 40
+#define CLK_VDO1_DISP_RSZ2 41
+#define CLK_VDO1_DISP_RSZ3 42
+#define CLK_VDO1_HDR_VDO_FE0 43
+#define CLK_VDO1_HDR_GFX_FE0 44
+#define CLK_VDO1_HDR_VDO_BE 45
+#define CLK_VDO1_HDR_VDO_FE1 46
+#define CLK_VDO1_HDR_GFX_FE1 47
+#define CLK_VDO1_DISP_MIXER 48
+#define CLK_VDO1_HDR_VDO_FE0_DL_ASYNC 49
+#define CLK_VDO1_HDR_VDO_FE1_DL_ASYNC 50
+#define CLK_VDO1_HDR_GFX_FE0_DL_ASYNC 51
+#define CLK_VDO1_HDR_GFX_FE1_DL_ASYNC 52
+#define CLK_VDO1_HDR_VDO_BE_DL_ASYNC 53
+#define CLK_VDO1_DPI0 54
+#define CLK_VDO1_DISP_MONITOR_DPI0 55
+#define CLK_VDO1_DPI1 56
+#define CLK_VDO1_DISP_MONITOR_DPI1 57
+#define CLK_VDO1_DPINTF 58
+#define CLK_VDO1_DISP_MONITOR_DPINTF 59
+#define CLK_VDO1_26M_SLOW 60
+#define CLK_VDO1_DPI1_HDMI 61
+
+#endif /* _DT_BINDINGS_CLK_MT8188_H */
diff --git a/include/dt-bindings/clock/mediatek,mt8196-clock.h b/include/dt-bindings/clock/mediatek,mt8196-clock.h
new file mode 100644
index 000000000000..ae0946ab7621
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt8196-clock.h
@@ -0,0 +1,803 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT8196_H
+#define _DT_BINDINGS_CLK_MT8196_H
+
+/* CKSYS */
+#define CLK_TOP_AXI 0
+#define CLK_TOP_MEM_SUB 1
+#define CLK_TOP_IO_NOC 2
+#define CLK_TOP_P_AXI 3
+#define CLK_TOP_UFS_PEXTP0_AXI 4
+#define CLK_TOP_PEXTP1_USB_AXI 5
+#define CLK_TOP_P_FMEM_SUB 6
+#define CLK_TOP_PEXPT0_MEM_SUB 7
+#define CLK_TOP_PEXTP1_USB_MEM_SUB 8
+#define CLK_TOP_P_NOC 9
+#define CLK_TOP_EMI_N 10
+#define CLK_TOP_EMI_S 11
+#define CLK_TOP_AP2CONN_HOST 12
+#define CLK_TOP_ATB 13
+#define CLK_TOP_CIRQ 14
+#define CLK_TOP_PBUS_156M 15
+#define CLK_TOP_EFUSE 16
+#define CLK_TOP_MCL3GIC 17
+#define CLK_TOP_MCINFRA 18
+#define CLK_TOP_DSP 19
+#define CLK_TOP_MFG_REF 20
+#define CLK_TOP_MFG_EB 21
+#define CLK_TOP_UART 22
+#define CLK_TOP_SPI0_BCLK 23
+#define CLK_TOP_SPI1_BCLK 24
+#define CLK_TOP_SPI2_BCLK 25
+#define CLK_TOP_SPI3_BCLK 26
+#define CLK_TOP_SPI4_BCLK 27
+#define CLK_TOP_SPI5_BCLK 28
+#define CLK_TOP_SPI6_BCLK 29
+#define CLK_TOP_SPI7_BCLK 30
+#define CLK_TOP_MSDC30_1 31
+#define CLK_TOP_MSDC30_2 32
+#define CLK_TOP_DISP_PWM 33
+#define CLK_TOP_USB_TOP_1P 34
+#define CLK_TOP_USB_XHCI_1P 35
+#define CLK_TOP_USB_FMCNT_P1 36
+#define CLK_TOP_I2C_P 37
+#define CLK_TOP_I2C_EAST 38
+#define CLK_TOP_I2C_WEST 39
+#define CLK_TOP_I2C_NORTH 40
+#define CLK_TOP_AES_UFSFDE 41
+#define CLK_TOP_UFS 42
+#define CLK_TOP_AUD_1 43
+#define CLK_TOP_AUD_2 44
+#define CLK_TOP_ADSP 45
+#define CLK_TOP_ADSP_UARTHUB_B 46
+#define CLK_TOP_DPMAIF_MAIN 47
+#define CLK_TOP_PWM 48
+#define CLK_TOP_MCUPM 49
+#define CLK_TOP_IPSEAST 50
+#define CLK_TOP_TL 51
+#define CLK_TOP_TL_P1 52
+#define CLK_TOP_TL_P2 53
+#define CLK_TOP_EMI_INTERFACE_546 54
+#define CLK_TOP_SDF 55
+#define CLK_TOP_UARTHUB_BCLK 56
+#define CLK_TOP_DPSW_CMP_26M 57
+#define CLK_TOP_SMAP 58
+#define CLK_TOP_SSR_PKA 59
+#define CLK_TOP_SSR_DMA 60
+#define CLK_TOP_SSR_KDF 61
+#define CLK_TOP_SSR_RNG 62
+#define CLK_TOP_SPU0 63
+#define CLK_TOP_SPU1 64
+#define CLK_TOP_DXCC 65
+#define CLK_TOP_APLL_I2SIN0 66
+#define CLK_TOP_APLL_I2SIN1 67
+#define CLK_TOP_APLL_I2SIN2 68
+#define CLK_TOP_APLL_I2SIN3 69
+#define CLK_TOP_APLL_I2SIN4 70
+#define CLK_TOP_APLL_I2SIN6 71
+#define CLK_TOP_APLL_I2SOUT0 72
+#define CLK_TOP_APLL_I2SOUT1 73
+#define CLK_TOP_APLL_I2SOUT2 74
+#define CLK_TOP_APLL_I2SOUT3 75
+#define CLK_TOP_APLL_I2SOUT4 76
+#define CLK_TOP_APLL_I2SOUT6 77
+#define CLK_TOP_APLL_FMI2S 78
+#define CLK_TOP_APLL_TDMOUT 79
+#define CLK_TOP_APLL12_DIV_TDMOUT_M 80
+#define CLK_TOP_APLL12_DIV_TDMOUT_B 81
+#define CLK_TOP_MAINPLL_D3 82
+#define CLK_TOP_MAINPLL_D4 83
+#define CLK_TOP_MAINPLL_D4_D2 84
+#define CLK_TOP_MAINPLL_D4_D4 85
+#define CLK_TOP_MAINPLL_D4_D8 86
+#define CLK_TOP_MAINPLL_D5 87
+#define CLK_TOP_MAINPLL_D5_D2 88
+#define CLK_TOP_MAINPLL_D5_D4 89
+#define CLK_TOP_MAINPLL_D5_D8 90
+#define CLK_TOP_MAINPLL_D6 91
+#define CLK_TOP_MAINPLL_D6_D2 92
+#define CLK_TOP_MAINPLL_D7 93
+#define CLK_TOP_MAINPLL_D7_D2 94
+#define CLK_TOP_MAINPLL_D7_D4 95
+#define CLK_TOP_MAINPLL_D7_D8 96
+#define CLK_TOP_MAINPLL_D9 97
+#define CLK_TOP_UNIVPLL_D4 98
+#define CLK_TOP_UNIVPLL_D4_D2 99
+#define CLK_TOP_UNIVPLL_D4_D4 100
+#define CLK_TOP_UNIVPLL_D4_D8 101
+#define CLK_TOP_UNIVPLL_D5 102
+#define CLK_TOP_UNIVPLL_D5_D2 103
+#define CLK_TOP_UNIVPLL_D5_D4 104
+#define CLK_TOP_UNIVPLL_D6 105
+#define CLK_TOP_UNIVPLL_D6_D2 106
+#define CLK_TOP_UNIVPLL_D6_D4 107
+#define CLK_TOP_UNIVPLL_D6_D8 108
+#define CLK_TOP_UNIVPLL_D6_D16 109
+#define CLK_TOP_UNIVPLL_192M 110
+#define CLK_TOP_UNIVPLL_192M_D4 111
+#define CLK_TOP_UNIVPLL_192M_D8 112
+#define CLK_TOP_UNIVPLL_192M_D16 113
+#define CLK_TOP_UNIVPLL_192M_D32 114
+#define CLK_TOP_UNIVPLL_192M_D10 115
+#define CLK_TOP_TVDPLL1_D2 116
+#define CLK_TOP_MSDCPLL_D2 117
+#define CLK_TOP_OSC_D2 118
+#define CLK_TOP_OSC_D3 119
+#define CLK_TOP_OSC_D4 120
+#define CLK_TOP_OSC_D5 121
+#define CLK_TOP_OSC_D7 122
+#define CLK_TOP_OSC_D8 123
+#define CLK_TOP_OSC_D10 124
+#define CLK_TOP_OSC_D14 125
+#define CLK_TOP_OSC_D20 126
+#define CLK_TOP_OSC_D32 127
+#define CLK_TOP_OSC_D40 128
+#define CLK_TOP_SFLASH 129
+
+/* APMIXEDSYS */
+#define CLK_APMIXED_MAINPLL 0
+#define CLK_APMIXED_UNIVPLL 1
+#define CLK_APMIXED_MSDCPLL 2
+#define CLK_APMIXED_ADSPPLL 3
+#define CLK_APMIXED_EMIPLL 4
+#define CLK_APMIXED_EMIPLL2 5
+#define CLK_APMIXED_NET1PLL 6
+#define CLK_APMIXED_SGMIIPLL 7
+
+/* CKSYS_GP2 */
+#define CLK_TOP2_SENINF0 0
+#define CLK_TOP2_SENINF1 1
+#define CLK_TOP2_SENINF2 2
+#define CLK_TOP2_SENINF3 3
+#define CLK_TOP2_SENINF4 4
+#define CLK_TOP2_SENINF5 5
+#define CLK_TOP2_IMG1 6
+#define CLK_TOP2_IPE 7
+#define CLK_TOP2_CAM 8
+#define CLK_TOP2_CAMTM 9
+#define CLK_TOP2_DPE 10
+#define CLK_TOP2_VDEC 11
+#define CLK_TOP2_CCUSYS 12
+#define CLK_TOP2_CCUTM 13
+#define CLK_TOP2_VENC 14
+#define CLK_TOP2_DP1 15
+#define CLK_TOP2_DP0 16
+#define CLK_TOP2_DISP 17
+#define CLK_TOP2_MDP 18
+#define CLK_TOP2_MMINFRA 19
+#define CLK_TOP2_MMINFRA_SNOC 20
+#define CLK_TOP2_MMUP 21
+#define CLK_TOP2_MMINFRA_AO 22
+#define CLK_TOP2_MAINPLL2_D2 23
+#define CLK_TOP2_MAINPLL2_D3 24
+#define CLK_TOP2_MAINPLL2_D4 25
+#define CLK_TOP2_MAINPLL2_D4_D2 26
+#define CLK_TOP2_MAINPLL2_D4_D4 27
+#define CLK_TOP2_MAINPLL2_D5 28
+#define CLK_TOP2_MAINPLL2_D5_D2 29
+#define CLK_TOP2_MAINPLL2_D6 30
+#define CLK_TOP2_MAINPLL2_D6_D2 31
+#define CLK_TOP2_MAINPLL2_D7 32
+#define CLK_TOP2_MAINPLL2_D7_D2 33
+#define CLK_TOP2_MAINPLL2_D9 34
+#define CLK_TOP2_UNIVPLL2_D3 35
+#define CLK_TOP2_UNIVPLL2_D4 36
+#define CLK_TOP2_UNIVPLL2_D4_D2 37
+#define CLK_TOP2_UNIVPLL2_D5 38
+#define CLK_TOP2_UNIVPLL2_D5_D2 39
+#define CLK_TOP2_UNIVPLL2_D6 40
+#define CLK_TOP2_UNIVPLL2_D6_D2 41
+#define CLK_TOP2_UNIVPLL2_D6_D4 42
+#define CLK_TOP2_UNIVPLL2_D7 43
+#define CLK_TOP2_IMGPLL_D2 44
+#define CLK_TOP2_IMGPLL_D4 45
+#define CLK_TOP2_IMGPLL_D5 46
+#define CLK_TOP2_IMGPLL_D5_D2 47
+#define CLK_TOP2_MMPLL2_D3 48
+#define CLK_TOP2_MMPLL2_D4 49
+#define CLK_TOP2_MMPLL2_D4_D2 50
+#define CLK_TOP2_MMPLL2_D5 51
+#define CLK_TOP2_MMPLL2_D5_D2 52
+#define CLK_TOP2_MMPLL2_D6 53
+#define CLK_TOP2_MMPLL2_D6_D2 54
+#define CLK_TOP2_MMPLL2_D7 55
+#define CLK_TOP2_MMPLL2_D9 56
+#define CLK_TOP2_TVDPLL1_D4 57
+#define CLK_TOP2_TVDPLL1_D8 58
+#define CLK_TOP2_TVDPLL1_D16 59
+#define CLK_TOP2_TVDPLL2_D2 60
+#define CLK_TOP2_TVDPLL2_D4 61
+#define CLK_TOP2_TVDPLL2_D8 62
+#define CLK_TOP2_TVDPLL2_D16 63
+#define CLK_TOP2_DVO 64
+#define CLK_TOP2_DVO_FAVT 65
+#define CLK_TOP2_TVDPLL3_D2 66
+#define CLK_TOP2_TVDPLL3_D4 67
+#define CLK_TOP2_TVDPLL3_D8 68
+#define CLK_TOP2_TVDPLL3_D16 69
+
+/* APMIXEDSYS_GP2 */
+#define CLK_APMIXED2_MAINPLL2 0
+#define CLK_APMIXED2_UNIVPLL2 1
+#define CLK_APMIXED2_MMPLL2 2
+#define CLK_APMIXED2_IMGPLL 3
+#define CLK_APMIXED2_TVDPLL1 4
+#define CLK_APMIXED2_TVDPLL2 5
+#define CLK_APMIXED2_TVDPLL3 6
+
+/* IMP_IIC_WRAP_E */
+#define CLK_IMPE_I2C5 0
+
+/* IMP_IIC_WRAP_W */
+#define CLK_IMPW_I2C0 0
+#define CLK_IMPW_I2C3 1
+#define CLK_IMPW_I2C6 2
+#define CLK_IMPW_I2C10 3
+
+/* IMP_IIC_WRAP_N */
+#define CLK_IMPN_I2C1 0
+#define CLK_IMPN_I2C2 1
+#define CLK_IMPN_I2C4 2
+#define CLK_IMPN_I2C7 3
+#define CLK_IMPN_I2C8 4
+#define CLK_IMPN_I2C9 5
+
+/* IMP_IIC_WRAP_C */
+#define CLK_IMPC_I2C11 0
+#define CLK_IMPC_I2C12 1
+#define CLK_IMPC_I2C13 2
+#define CLK_IMPC_I2C14 3
+
+/* PERICFG_AO */
+#define CLK_PERI_AO_UART0_BCLK 0
+#define CLK_PERI_AO_UART1_BCLK 1
+#define CLK_PERI_AO_UART2_BCLK 2
+#define CLK_PERI_AO_UART3_BCLK 3
+#define CLK_PERI_AO_UART4_BCLK 4
+#define CLK_PERI_AO_UART5_BCLK 5
+#define CLK_PERI_AO_PWM_X16W_HCLK 6
+#define CLK_PERI_AO_PWM_X16W_BCLK 7
+#define CLK_PERI_AO_PWM_PWM_BCLK0 8
+#define CLK_PERI_AO_PWM_PWM_BCLK1 9
+#define CLK_PERI_AO_PWM_PWM_BCLK2 10
+#define CLK_PERI_AO_PWM_PWM_BCLK3 11
+#define CLK_PERI_AO_SPI0_BCLK 12
+#define CLK_PERI_AO_SPI1_BCLK 13
+#define CLK_PERI_AO_SPI2_BCLK 14
+#define CLK_PERI_AO_SPI3_BCLK 15
+#define CLK_PERI_AO_SPI4_BCLK 16
+#define CLK_PERI_AO_SPI5_BCLK 17
+#define CLK_PERI_AO_SPI6_BCLK 18
+#define CLK_PERI_AO_SPI7_BCLK 19
+#define CLK_PERI_AO_AP_DMA_X32W_BCLK 20
+#define CLK_PERI_AO_MSDC1_MSDC_SRC 21
+#define CLK_PERI_AO_MSDC1_HCLK 22
+#define CLK_PERI_AO_MSDC1_AXI 23
+#define CLK_PERI_AO_MSDC1_HCLK_WRAP 24
+#define CLK_PERI_AO_MSDC2_MSDC_SRC 25
+#define CLK_PERI_AO_MSDC2_HCLK 26
+#define CLK_PERI_AO_MSDC2_AXI 27
+#define CLK_PERI_AO_MSDC2_HCLK_WRAP 28
+#define CLK_PERI_AO_FLASHIF_FLASH 29
+#define CLK_PERI_AO_FLASHIF_27M 30
+#define CLK_PERI_AO_FLASHIF_DRAM 31
+#define CLK_PERI_AO_FLASHIF_AXI 32
+#define CLK_PERI_AO_FLASHIF_BCLK 33
+
+/* UFSCFG_AO */
+#define CLK_UFSAO_UNIPRO_TX_SYM 0
+#define CLK_UFSAO_UNIPRO_RX_SYM0 1
+#define CLK_UFSAO_UNIPRO_RX_SYM1 2
+#define CLK_UFSAO_UNIPRO_SYS 3
+#define CLK_UFSAO_UNIPRO_SAP 4
+#define CLK_UFSAO_PHY_SAP 5
+#define CLK_UFSAO_UFSHCI_UFS 6
+#define CLK_UFSAO_UFSHCI_AES 7
+
+/* PEXTP0CFG_AO */
+#define CLK_PEXT_PEXTP_MAC_P0_TL 0
+#define CLK_PEXT_PEXTP_MAC_P0_REF 1
+#define CLK_PEXT_PEXTP_PHY_P0_MCU_BUS 2
+#define CLK_PEXT_PEXTP_PHY_P0_PEXTP_REF 3
+#define CLK_PEXT_PEXTP_MAC_P0_AXI_250 4
+#define CLK_PEXT_PEXTP_MAC_P0_AHB_APB 5
+#define CLK_PEXT_PEXTP_MAC_P0_PL_P 6
+#define CLK_PEXT_PEXTP_VLP_AO_P0_LP 7
+
+/* PEXTP1CFG_AO */
+#define CLK_PEXT1_PEXTP_MAC_P1_TL 0
+#define CLK_PEXT1_PEXTP_MAC_P1_REF 1
+#define CLK_PEXT1_PEXTP_MAC_P2_TL 2
+#define CLK_PEXT1_PEXTP_MAC_P2_REF 3
+#define CLK_PEXT1_PEXTP_PHY_P1_MCU_BUS 4
+#define CLK_PEXT1_PEXTP_PHY_P1_PEXTP_REF 5
+#define CLK_PEXT1_PEXTP_PHY_P2_MCU_BUS 6
+#define CLK_PEXT1_PEXTP_PHY_P2_PEXTP_REF 7
+#define CLK_PEXT1_PEXTP_MAC_P1_AXI_250 8
+#define CLK_PEXT1_PEXTP_MAC_P1_AHB_APB 9
+#define CLK_PEXT1_PEXTP_MAC_P1_PL_P 10
+#define CLK_PEXT1_PEXTP_MAC_P2_AXI_250 11
+#define CLK_PEXT1_PEXTP_MAC_P2_AHB_APB 12
+#define CLK_PEXT1_PEXTP_MAC_P2_PL_P 13
+#define CLK_PEXT1_PEXTP_VLP_AO_P1_LP 14
+#define CLK_PEXT1_PEXTP_VLP_AO_P2_LP 15
+
+/* VLP_CKSYS */
+#define CLK_VLP_APLL1 0
+#define CLK_VLP_APLL2 1
+#define CLK_VLP_SCP 2
+#define CLK_VLP_SCP_SPI 3
+#define CLK_VLP_SCP_IIC 4
+#define CLK_VLP_SCP_IIC_HS 5
+#define CLK_VLP_PWRAP_ULPOSC 6
+#define CLK_VLP_SPMI_M_TIA_32K 7
+#define CLK_VLP_APXGPT_26M_B 8
+#define CLK_VLP_DPSW 9
+#define CLK_VLP_DPSW_CENTRAL 10
+#define CLK_VLP_SPMI_M_MST 11
+#define CLK_VLP_DVFSRC 12
+#define CLK_VLP_PWM_VLP 13
+#define CLK_VLP_AXI_VLP 14
+#define CLK_VLP_SYSTIMER_26M 15
+#define CLK_VLP_SSPM 16
+#define CLK_VLP_SRCK 17
+#define CLK_VLP_CAMTG0 18
+#define CLK_VLP_CAMTG1 19
+#define CLK_VLP_CAMTG2 20
+#define CLK_VLP_CAMTG3 21
+#define CLK_VLP_CAMTG4 22
+#define CLK_VLP_CAMTG5 23
+#define CLK_VLP_CAMTG6 24
+#define CLK_VLP_CAMTG7 25
+#define CLK_VLP_SSPM_26M 26
+#define CLK_VLP_ULPOSC_SSPM 27
+#define CLK_VLP_VLP_PBUS_26M 28
+#define CLK_VLP_DEBUG_ERR_FLAG 29
+#define CLK_VLP_DPMSRDMA 30
+#define CLK_VLP_VLP_PBUS_156M 31
+#define CLK_VLP_SPM 32
+#define CLK_VLP_MMINFRA 33
+#define CLK_VLP_USB_TOP 34
+#define CLK_VLP_USB_XHCI 35
+#define CLK_VLP_NOC_VLP 36
+#define CLK_VLP_AUDIO_H 37
+#define CLK_VLP_AUD_ENGEN1 38
+#define CLK_VLP_AUD_ENGEN2 39
+#define CLK_VLP_AUD_INTBUS 40
+#define CLK_VLP_SPVLP_26M 41
+#define CLK_VLP_SPU0_VLP 42
+#define CLK_VLP_SPU1_VLP 43
+#define CLK_VLP_CLK26M 44
+#define CLK_VLP_APLL1_D4 45
+#define CLK_VLP_APLL1_D8 46
+#define CLK_VLP_APLL2_D4 47
+#define CLK_VLP_APLL2_D8 48
+
+/* DISPSYS_CONFIG */
+#define CLK_MM_CONFIG 0
+#define CLK_MM_DISP_MUTEX0 1
+#define CLK_MM_DISP_AAL0 2
+#define CLK_MM_DISP_AAL1 3
+#define CLK_MM_DISP_C3D0 4
+#define CLK_MM_DISP_C3D1 5
+#define CLK_MM_DISP_C3D2 6
+#define CLK_MM_DISP_C3D3 7
+#define CLK_MM_DISP_CCORR0 8
+#define CLK_MM_DISP_CCORR1 9
+#define CLK_MM_DISP_CCORR2 10
+#define CLK_MM_DISP_CCORR3 11
+#define CLK_MM_DISP_CHIST0 12
+#define CLK_MM_DISP_CHIST1 13
+#define CLK_MM_DISP_COLOR0 14
+#define CLK_MM_DISP_COLOR1 15
+#define CLK_MM_DISP_DITHER0 16
+#define CLK_MM_DISP_DITHER1 17
+#define CLK_MM_DISP_DLI_ASYNC0 18
+#define CLK_MM_DISP_DLI_ASYNC1 19
+#define CLK_MM_DISP_DLI_ASYNC2 20
+#define CLK_MM_DISP_DLI_ASYNC3 21
+#define CLK_MM_DISP_DLI_ASYNC4 22
+#define CLK_MM_DISP_DLI_ASYNC5 23
+#define CLK_MM_DISP_DLI_ASYNC6 24
+#define CLK_MM_DISP_DLI_ASYNC7 25
+#define CLK_MM_DISP_DLI_ASYNC8 26
+#define CLK_MM_DISP_DLI_ASYNC9 27
+#define CLK_MM_DISP_DLI_ASYNC10 28
+#define CLK_MM_DISP_DLI_ASYNC11 29
+#define CLK_MM_DISP_DLI_ASYNC12 30
+#define CLK_MM_DISP_DLI_ASYNC13 31
+#define CLK_MM_DISP_DLI_ASYNC14 32
+#define CLK_MM_DISP_DLI_ASYNC15 33
+#define CLK_MM_DISP_DLO_ASYNC0 34
+#define CLK_MM_DISP_DLO_ASYNC1 35
+#define CLK_MM_DISP_DLO_ASYNC2 36
+#define CLK_MM_DISP_DLO_ASYNC3 37
+#define CLK_MM_DISP_DLO_ASYNC4 38
+#define CLK_MM_DISP_DLO_ASYNC5 39
+#define CLK_MM_DISP_DLO_ASYNC6 40
+#define CLK_MM_DISP_DLO_ASYNC7 41
+#define CLK_MM_DISP_DLO_ASYNC8 42
+#define CLK_MM_DISP_GAMMA0 43
+#define CLK_MM_DISP_GAMMA1 44
+#define CLK_MM_MDP_AAL0 45
+#define CLK_MM_MDP_AAL1 46
+#define CLK_MM_MDP_RDMA0 47
+#define CLK_MM_DISP_POSTMASK0 48
+#define CLK_MM_DISP_POSTMASK1 49
+#define CLK_MM_MDP_RSZ0 50
+#define CLK_MM_MDP_RSZ1 51
+#define CLK_MM_DISP_SPR0 52
+#define CLK_MM_DISP_TDSHP0 53
+#define CLK_MM_DISP_TDSHP1 54
+#define CLK_MM_DISP_WDMA0 55
+#define CLK_MM_DISP_Y2R0 56
+#define CLK_MM_SMI_SUB_COMM0 57
+#define CLK_MM_DISP_FAKE_ENG0 58
+
+/* DISPSYS1_CONFIG */
+#define CLK_MM1_DISPSYS1_CONFIG 0
+#define CLK_MM1_DISPSYS1_S_CONFIG 1
+#define CLK_MM1_DISP_MUTEX0 2
+#define CLK_MM1_DISP_DLI_ASYNC20 3
+#define CLK_MM1_DISP_DLI_ASYNC21 4
+#define CLK_MM1_DISP_DLI_ASYNC22 5
+#define CLK_MM1_DISP_DLI_ASYNC23 6
+#define CLK_MM1_DISP_DLI_ASYNC24 7
+#define CLK_MM1_DISP_DLI_ASYNC25 8
+#define CLK_MM1_DISP_DLI_ASYNC26 9
+#define CLK_MM1_DISP_DLI_ASYNC27 10
+#define CLK_MM1_DISP_DLI_ASYNC28 11
+#define CLK_MM1_DISP_RELAY0 12
+#define CLK_MM1_DISP_RELAY1 13
+#define CLK_MM1_DISP_RELAY2 14
+#define CLK_MM1_DISP_RELAY3 15
+#define CLK_MM1_DISP_DP_INTF0 16
+#define CLK_MM1_DISP_DP_INTF1 17
+#define CLK_MM1_DISP_DSC_WRAP0 18
+#define CLK_MM1_DISP_DSC_WRAP1 19
+#define CLK_MM1_DISP_DSC_WRAP2 20
+#define CLK_MM1_DISP_DSC_WRAP3 21
+#define CLK_MM1_DISP_DSI0 22
+#define CLK_MM1_DISP_DSI1 23
+#define CLK_MM1_DISP_DSI2 24
+#define CLK_MM1_DISP_DVO0 25
+#define CLK_MM1_DISP_GDMA0 26
+#define CLK_MM1_DISP_MERGE0 27
+#define CLK_MM1_DISP_MERGE1 28
+#define CLK_MM1_DISP_MERGE2 29
+#define CLK_MM1_DISP_ODDMR0 30
+#define CLK_MM1_DISP_POSTALIGN0 31
+#define CLK_MM1_DISP_DITHER2 32
+#define CLK_MM1_DISP_R2Y0 33
+#define CLK_MM1_DISP_SPLITTER0 34
+#define CLK_MM1_DISP_SPLITTER1 35
+#define CLK_MM1_DISP_SPLITTER2 36
+#define CLK_MM1_DISP_SPLITTER3 37
+#define CLK_MM1_DISP_VDCM0 38
+#define CLK_MM1_DISP_WDMA1 39
+#define CLK_MM1_DISP_WDMA2 40
+#define CLK_MM1_DISP_WDMA3 41
+#define CLK_MM1_DISP_WDMA4 42
+#define CLK_MM1_MDP_RDMA1 43
+#define CLK_MM1_SMI_LARB0 44
+#define CLK_MM1_MOD1 45
+#define CLK_MM1_MOD2 46
+#define CLK_MM1_MOD3 47
+#define CLK_MM1_MOD4 48
+#define CLK_MM1_MOD5 49
+#define CLK_MM1_MOD6 50
+#define CLK_MM1_CG0 51
+#define CLK_MM1_CG1 52
+#define CLK_MM1_CG2 53
+#define CLK_MM1_CG3 54
+#define CLK_MM1_CG4 55
+#define CLK_MM1_CG5 56
+#define CLK_MM1_CG6 57
+#define CLK_MM1_CG7 58
+#define CLK_MM1_F26M 59
+
+/* OVLSYS_CONFIG */
+#define CLK_OVLSYS_CONFIG 0
+#define CLK_OVL_FAKE_ENG0 1
+#define CLK_OVL_FAKE_ENG1 2
+#define CLK_OVL_MUTEX0 3
+#define CLK_OVL_EXDMA0 4
+#define CLK_OVL_EXDMA1 5
+#define CLK_OVL_EXDMA2 6
+#define CLK_OVL_EXDMA3 7
+#define CLK_OVL_EXDMA4 8
+#define CLK_OVL_EXDMA5 9
+#define CLK_OVL_EXDMA6 10
+#define CLK_OVL_EXDMA7 11
+#define CLK_OVL_EXDMA8 12
+#define CLK_OVL_EXDMA9 13
+#define CLK_OVL_BLENDER0 14
+#define CLK_OVL_BLENDER1 15
+#define CLK_OVL_BLENDER2 16
+#define CLK_OVL_BLENDER3 17
+#define CLK_OVL_BLENDER4 18
+#define CLK_OVL_BLENDER5 19
+#define CLK_OVL_BLENDER6 20
+#define CLK_OVL_BLENDER7 21
+#define CLK_OVL_BLENDER8 22
+#define CLK_OVL_BLENDER9 23
+#define CLK_OVL_OUTPROC0 24
+#define CLK_OVL_OUTPROC1 25
+#define CLK_OVL_OUTPROC2 26
+#define CLK_OVL_OUTPROC3 27
+#define CLK_OVL_OUTPROC4 28
+#define CLK_OVL_OUTPROC5 29
+#define CLK_OVL_MDP_RSZ0 30
+#define CLK_OVL_MDP_RSZ1 31
+#define CLK_OVL_DISP_WDMA0 32
+#define CLK_OVL_DISP_WDMA1 33
+#define CLK_OVL_UFBC_WDMA0 34
+#define CLK_OVL_MDP_RDMA0 35
+#define CLK_OVL_MDP_RDMA1 36
+#define CLK_OVL_BWM0 37
+#define CLK_OVL_DLI0 38
+#define CLK_OVL_DLI1 39
+#define CLK_OVL_DLI2 40
+#define CLK_OVL_DLI3 41
+#define CLK_OVL_DLI4 42
+#define CLK_OVL_DLI5 43
+#define CLK_OVL_DLI6 44
+#define CLK_OVL_DLI7 45
+#define CLK_OVL_DLI8 46
+#define CLK_OVL_DLO0 47
+#define CLK_OVL_DLO1 48
+#define CLK_OVL_DLO2 49
+#define CLK_OVL_DLO3 50
+#define CLK_OVL_DLO4 51
+#define CLK_OVL_DLO5 52
+#define CLK_OVL_DLO6 53
+#define CLK_OVL_DLO7 54
+#define CLK_OVL_DLO8 55
+#define CLK_OVL_DLO9 56
+#define CLK_OVL_DLO10 57
+#define CLK_OVL_DLO11 58
+#define CLK_OVL_DLO12 59
+#define CLK_OVLSYS_RELAY0 60
+#define CLK_OVL_INLINEROT0 61
+#define CLK_OVL_SMI 62
+#define CLK_OVL_SMI_SMI 63
+
+
+/* OVLSYS1_CONFIG */
+#define CLK_OVL1_OVLSYS_CONFIG 0
+#define CLK_OVL1_OVL_FAKE_ENG0 1
+#define CLK_OVL1_OVL_FAKE_ENG1 2
+#define CLK_OVL1_OVL_MUTEX0 3
+#define CLK_OVL1_OVL_EXDMA0 4
+#define CLK_OVL1_OVL_EXDMA1 5
+#define CLK_OVL1_OVL_EXDMA2 6
+#define CLK_OVL1_OVL_EXDMA3 7
+#define CLK_OVL1_OVL_EXDMA4 8
+#define CLK_OVL1_OVL_EXDMA5 9
+#define CLK_OVL1_OVL_EXDMA6 10
+#define CLK_OVL1_OVL_EXDMA7 11
+#define CLK_OVL1_OVL_EXDMA8 12
+#define CLK_OVL1_OVL_EXDMA9 13
+#define CLK_OVL1_OVL_BLENDER0 14
+#define CLK_OVL1_OVL_BLENDER1 15
+#define CLK_OVL1_OVL_BLENDER2 16
+#define CLK_OVL1_OVL_BLENDER3 17
+#define CLK_OVL1_OVL_BLENDER4 18
+#define CLK_OVL1_OVL_BLENDER5 19
+#define CLK_OVL1_OVL_BLENDER6 20
+#define CLK_OVL1_OVL_BLENDER7 21
+#define CLK_OVL1_OVL_BLENDER8 22
+#define CLK_OVL1_OVL_BLENDER9 23
+#define CLK_OVL1_OVL_OUTPROC0 24
+#define CLK_OVL1_OVL_OUTPROC1 25
+#define CLK_OVL1_OVL_OUTPROC2 26
+#define CLK_OVL1_OVL_OUTPROC3 27
+#define CLK_OVL1_OVL_OUTPROC4 28
+#define CLK_OVL1_OVL_OUTPROC5 29
+#define CLK_OVL1_OVL_MDP_RSZ0 30
+#define CLK_OVL1_OVL_MDP_RSZ1 31
+#define CLK_OVL1_OVL_DISP_WDMA0 32
+#define CLK_OVL1_OVL_DISP_WDMA1 33
+#define CLK_OVL1_OVL_UFBC_WDMA0 34
+#define CLK_OVL1_OVL_MDP_RDMA0 35
+#define CLK_OVL1_OVL_MDP_RDMA1 36
+#define CLK_OVL1_OVL_BWM0 37
+#define CLK_OVL1_DLI0 38
+#define CLK_OVL1_DLI1 39
+#define CLK_OVL1_DLI2 40
+#define CLK_OVL1_DLI3 41
+#define CLK_OVL1_DLI4 42
+#define CLK_OVL1_DLI5 43
+#define CLK_OVL1_DLI6 44
+#define CLK_OVL1_DLI7 45
+#define CLK_OVL1_DLI8 46
+#define CLK_OVL1_DLO0 47
+#define CLK_OVL1_DLO1 48
+#define CLK_OVL1_DLO2 49
+#define CLK_OVL1_DLO3 50
+#define CLK_OVL1_DLO4 51
+#define CLK_OVL1_DLO5 52
+#define CLK_OVL1_DLO6 53
+#define CLK_OVL1_DLO7 54
+#define CLK_OVL1_DLO8 55
+#define CLK_OVL1_DLO9 56
+#define CLK_OVL1_DLO10 57
+#define CLK_OVL1_DLO11 58
+#define CLK_OVL1_DLO12 59
+#define CLK_OVL1_OVLSYS_RELAY0 60
+#define CLK_OVL1_OVL_INLINEROT0 61
+#define CLK_OVL1_SMI 62
+
+
+/* VDEC_SOC_GCON_BASE */
+#define CLK_VDE1_LARB1_CKEN 0
+#define CLK_VDE1_LAT_CKEN 1
+#define CLK_VDE1_LAT_ACTIVE 2
+#define CLK_VDE1_LAT_CKEN_ENG 3
+#define CLK_VDE1_VDEC_CKEN 4
+#define CLK_VDE1_VDEC_ACTIVE 5
+#define CLK_VDE1_VDEC_CKEN_ENG 6
+#define CLK_VDE1_VDEC_SOC_APTV_EN 7
+#define CLK_VDE1_VDEC_SOC_APTV_TOP_EN 8
+#define CLK_VDE1_VDEC_SOC_IPS_EN 9
+
+/* VDEC_GCON_BASE */
+#define CLK_VDE2_LARB1_CKEN 0
+#define CLK_VDE2_LAT_CKEN 1
+#define CLK_VDE2_LAT_ACTIVE 2
+#define CLK_VDE2_LAT_CKEN_ENG 3
+#define CLK_VDE2_VDEC_CKEN 4
+#define CLK_VDE2_VDEC_ACTIVE 5
+#define CLK_VDE2_VDEC_CKEN_ENG 6
+
+/* VENC_GCON */
+#define CLK_VEN1_CKE0_LARB 0
+#define CLK_VEN1_CKE1_VENC 1
+#define CLK_VEN1_CKE2_JPGENC 2
+#define CLK_VEN1_CKE3_JPGDEC 3
+#define CLK_VEN1_CKE4_JPGDEC_C1 4
+#define CLK_VEN1_CKE5_GALS 5
+#define CLK_VEN1_CKE29_VENC_ADAB_CTRL 6
+#define CLK_VEN1_CKE29_VENC_XPC_CTRL 7
+#define CLK_VEN1_CKE6_GALS_SRAM 8
+#define CLK_VEN1_RES_FLAT 9
+
+/* VENC_GCON_CORE1 */
+#define CLK_VEN2_CKE0_LARB 0
+#define CLK_VEN2_CKE1_VENC 1
+#define CLK_VEN2_CKE2_JPGENC 2
+#define CLK_VEN2_CKE3_JPGDEC 3
+#define CLK_VEN2_CKE5_GALS 4
+#define CLK_VEN2_CKE29_VENC_XPC_CTRL 5
+#define CLK_VEN2_CKE6_GALS_SRAM 6
+#define CLK_VEN2_RES_FLAT 7
+
+/* VENC_GCON_CORE2 */
+#define CLK_VEN_C2_CKE0_LARB 0
+#define CLK_VEN_C2_CKE1_VENC 1
+#define CLK_VEN_C2_CKE5_GALS 2
+#define CLK_VEN_C2_CKE29_VENC_XPC_CTRL 3
+#define CLK_VEN_C2_CKE6_GALS_SRAM 4
+#define CLK_VEN_C2_RES_FLAT 5
+
+/* MDPSYS_CONFIG */
+#define CLK_MDP_MDP_MUTEX0 0
+#define CLK_MDP_SMI0 1
+#define CLK_MDP_SMI0_SMI 2
+#define CLK_MDP_APB_BUS 3
+#define CLK_MDP_MDP_RDMA0 4
+#define CLK_MDP_MDP_RDMA1 5
+#define CLK_MDP_MDP_RDMA2 6
+#define CLK_MDP_MDP_BIRSZ0 7
+#define CLK_MDP_MDP_HDR0 8
+#define CLK_MDP_MDP_AAL0 9
+#define CLK_MDP_MDP_RSZ0 10
+#define CLK_MDP_MDP_RSZ2 11
+#define CLK_MDP_MDP_TDSHP0 12
+#define CLK_MDP_MDP_COLOR0 13
+#define CLK_MDP_MDP_WROT0 14
+#define CLK_MDP_MDP_WROT1 15
+#define CLK_MDP_MDP_WROT2 16
+#define CLK_MDP_MDP_FAKE_ENG0 17
+#define CLK_MDP_APB_DB 18
+#define CLK_MDP_MDP_DLI_ASYNC0 19
+#define CLK_MDP_MDP_DLI_ASYNC1 20
+#define CLK_MDP_MDP_DLO_ASYNC0 21
+#define CLK_MDP_MDP_DLO_ASYNC1 22
+#define CLK_MDP_MDP_DLI_ASYNC2 23
+#define CLK_MDP_MDP_DLO_ASYNC2 24
+#define CLK_MDP_MDP_DLO_ASYNC3 25
+#define CLK_MDP_IMG_DL_ASYNC0 26
+#define CLK_MDP_MDP_RROT0 27
+#define CLK_MDP_MDP_MERGE0 28
+#define CLK_MDP_MDP_C3D0 29
+#define CLK_MDP_MDP_FG0 30
+#define CLK_MDP_MDP_CLA2 31
+#define CLK_MDP_MDP_DLO_ASYNC4 32
+#define CLK_MDP_VPP_RSZ0 33
+#define CLK_MDP_VPP_RSZ1 34
+#define CLK_MDP_MDP_DLO_ASYNC5 35
+#define CLK_MDP_IMG0 36
+#define CLK_MDP_F26M 37
+#define CLK_MDP_IMG_DL_RELAY0 38
+#define CLK_MDP_IMG_DL_RELAY1 39
+
+/* MDPSYS1_CONFIG */
+#define CLK_MDP1_MDP_MUTEX0 0
+#define CLK_MDP1_SMI0 1
+#define CLK_MDP1_SMI0_SMI 2
+#define CLK_MDP1_APB_BUS 3
+#define CLK_MDP1_MDP_RDMA0 4
+#define CLK_MDP1_MDP_RDMA1 5
+#define CLK_MDP1_MDP_RDMA2 6
+#define CLK_MDP1_MDP_BIRSZ0 7
+#define CLK_MDP1_MDP_HDR0 8
+#define CLK_MDP1_MDP_AAL0 9
+#define CLK_MDP1_MDP_RSZ0 10
+#define CLK_MDP1_MDP_RSZ2 11
+#define CLK_MDP1_MDP_TDSHP0 12
+#define CLK_MDP1_MDP_COLOR0 13
+#define CLK_MDP1_MDP_WROT0 14
+#define CLK_MDP1_MDP_WROT1 15
+#define CLK_MDP1_MDP_WROT2 16
+#define CLK_MDP1_MDP_FAKE_ENG0 17
+#define CLK_MDP1_APB_DB 18
+#define CLK_MDP1_MDP_DLI_ASYNC0 19
+#define CLK_MDP1_MDP_DLI_ASYNC1 20
+#define CLK_MDP1_MDP_DLO_ASYNC0 21
+#define CLK_MDP1_MDP_DLO_ASYNC1 22
+#define CLK_MDP1_MDP_DLI_ASYNC2 23
+#define CLK_MDP1_MDP_DLO_ASYNC2 24
+#define CLK_MDP1_MDP_DLO_ASYNC3 25
+#define CLK_MDP1_IMG_DL_ASYNC0 26
+#define CLK_MDP1_MDP_RROT0 27
+#define CLK_MDP1_MDP_MERGE0 28
+#define CLK_MDP1_MDP_C3D0 29
+#define CLK_MDP1_MDP_FG0 30
+#define CLK_MDP1_MDP_CLA2 31
+#define CLK_MDP1_MDP_DLO_ASYNC4 32
+#define CLK_MDP1_VPP_RSZ0 33
+#define CLK_MDP1_VPP_RSZ1 34
+#define CLK_MDP1_MDP_DLO_ASYNC5 35
+#define CLK_MDP1_IMG0 36
+#define CLK_MDP1_F26M 37
+#define CLK_MDP1_IMG_DL_RELAY0 38
+#define CLK_MDP1_IMG_DL_RELAY1 39
+
+/* DISP_VDISP_AO_CONFIG */
+#define CLK_MM_V_DISP_VDISP_AO_CONFIG 0
+#define CLK_MM_V_DISP_DPC 1
+#define CLK_MM_V_SMI_SUB_SOMM0 2
+
+/* MFGPLL_PLL_CTRL */
+#define CLK_MFG_AO_MFGPLL 0
+
+/* MFGPLL_SC0_PLL_CTRL */
+#define CLK_MFGSC0_AO_MFGPLL_SC0 0
+
+/* MFGPLL_SC1_PLL_CTRL */
+#define CLK_MFGSC1_AO_MFGPLL_SC1 0
+
+/* CCIPLL_PLL_CTRL */
+#define CLK_CCIPLL 0
+
+/* ARMPLL_LL_PLL_CTRL */
+#define CLK_CPLL_ARMPLL_LL 0
+
+/* ARMPLL_BL_PLL_CTRL */
+#define CLK_CPBL_ARMPLL_BL 0
+
+/* ARMPLL_B_PLL_CTRL */
+#define CLK_CPB_ARMPLL_B 0
+
+/* PTPPLL_PLL_CTRL */
+#define CLK_PTPPLL 0
+
+#endif /* _DT_BINDINGS_CLK_MT8196_H */
diff --git a/include/dt-bindings/clock/mediatek,mt8365-clk.h b/include/dt-bindings/clock/mediatek,mt8365-clk.h
new file mode 100644
index 000000000000..f9aff1775810
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mt8365-clk.h
@@ -0,0 +1,373 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+ *
+ * Copyright (c) 2022 MediaTek Inc.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT8365_H
+#define _DT_BINDINGS_CLK_MT8365_H
+
+/* TOPCKGEN */
+#define CLK_TOP_CLK_NULL 0
+#define CLK_TOP_I2S0_BCK 1
+#define CLK_TOP_DSI0_LNTC_DSICK 2
+#define CLK_TOP_VPLL_DPIX 3
+#define CLK_TOP_LVDSTX_CLKDIG_CTS 4
+#define CLK_TOP_MFGPLL 5
+#define CLK_TOP_SYSPLL_D2 6
+#define CLK_TOP_SYSPLL1_D2 7
+#define CLK_TOP_SYSPLL1_D4 8
+#define CLK_TOP_SYSPLL1_D8 9
+#define CLK_TOP_SYSPLL1_D16 10
+#define CLK_TOP_SYSPLL_D3 11
+#define CLK_TOP_SYSPLL2_D2 12
+#define CLK_TOP_SYSPLL2_D4 13
+#define CLK_TOP_SYSPLL2_D8 14
+#define CLK_TOP_SYSPLL_D5 15
+#define CLK_TOP_SYSPLL3_D2 16
+#define CLK_TOP_SYSPLL3_D4 17
+#define CLK_TOP_SYSPLL_D7 18
+#define CLK_TOP_SYSPLL4_D2 19
+#define CLK_TOP_SYSPLL4_D4 20
+#define CLK_TOP_UNIVPLL 21
+#define CLK_TOP_UNIVPLL_D2 22
+#define CLK_TOP_UNIVPLL1_D2 23
+#define CLK_TOP_UNIVPLL1_D4 24
+#define CLK_TOP_UNIVPLL_D3 25
+#define CLK_TOP_UNIVPLL2_D2 26
+#define CLK_TOP_UNIVPLL2_D4 27
+#define CLK_TOP_UNIVPLL2_D8 28
+#define CLK_TOP_UNIVPLL2_D32 29
+#define CLK_TOP_UNIVPLL_D5 30
+#define CLK_TOP_UNIVPLL3_D2 31
+#define CLK_TOP_UNIVPLL3_D4 32
+#define CLK_TOP_MMPLL 33
+#define CLK_TOP_MMPLL_D2 34
+#define CLK_TOP_LVDSPLL_D2 35
+#define CLK_TOP_LVDSPLL_D4 36
+#define CLK_TOP_LVDSPLL_D8 37
+#define CLK_TOP_LVDSPLL_D16 38
+#define CLK_TOP_USB20_192M 39
+#define CLK_TOP_USB20_192M_D4 40
+#define CLK_TOP_USB20_192M_D8 41
+#define CLK_TOP_USB20_192M_D16 42
+#define CLK_TOP_USB20_192M_D32 43
+#define CLK_TOP_APLL1 44
+#define CLK_TOP_APLL1_D2 45
+#define CLK_TOP_APLL1_D4 46
+#define CLK_TOP_APLL1_D8 47
+#define CLK_TOP_APLL2 48
+#define CLK_TOP_APLL2_D2 49
+#define CLK_TOP_APLL2_D4 50
+#define CLK_TOP_APLL2_D8 51
+#define CLK_TOP_SYS_26M_D2 52
+#define CLK_TOP_MSDCPLL 53
+#define CLK_TOP_MSDCPLL_D2 54
+#define CLK_TOP_DSPPLL 55
+#define CLK_TOP_DSPPLL_D2 56
+#define CLK_TOP_DSPPLL_D4 57
+#define CLK_TOP_DSPPLL_D8 58
+#define CLK_TOP_APUPLL 59
+#define CLK_TOP_CLK26M_D52 60
+#define CLK_TOP_AXI_SEL 61
+#define CLK_TOP_MEM_SEL 62
+#define CLK_TOP_MM_SEL 63
+#define CLK_TOP_SCP_SEL 64
+#define CLK_TOP_MFG_SEL 65
+#define CLK_TOP_ATB_SEL 66
+#define CLK_TOP_CAMTG_SEL 67
+#define CLK_TOP_CAMTG1_SEL 68
+#define CLK_TOP_UART_SEL 69
+#define CLK_TOP_SPI_SEL 70
+#define CLK_TOP_MSDC50_0_HC_SEL 71
+#define CLK_TOP_MSDC2_2_HC_SEL 72
+#define CLK_TOP_MSDC50_0_SEL 73
+#define CLK_TOP_MSDC50_2_SEL 74
+#define CLK_TOP_MSDC30_1_SEL 75
+#define CLK_TOP_AUDIO_SEL 76
+#define CLK_TOP_AUD_INTBUS_SEL 77
+#define CLK_TOP_AUD_1_SEL 78
+#define CLK_TOP_AUD_2_SEL 79
+#define CLK_TOP_AUD_ENGEN1_SEL 80
+#define CLK_TOP_AUD_ENGEN2_SEL 81
+#define CLK_TOP_AUD_SPDIF_SEL 82
+#define CLK_TOP_DISP_PWM_SEL 83
+#define CLK_TOP_DXCC_SEL 84
+#define CLK_TOP_SSUSB_SYS_SEL 85
+#define CLK_TOP_SSUSB_XHCI_SEL 86
+#define CLK_TOP_SPM_SEL 87
+#define CLK_TOP_I2C_SEL 88
+#define CLK_TOP_PWM_SEL 89
+#define CLK_TOP_SENIF_SEL 90
+#define CLK_TOP_AES_FDE_SEL 91
+#define CLK_TOP_CAMTM_SEL 92
+#define CLK_TOP_DPI0_SEL 93
+#define CLK_TOP_DPI1_SEL 94
+#define CLK_TOP_DSP_SEL 95
+#define CLK_TOP_NFI2X_SEL 96
+#define CLK_TOP_NFIECC_SEL 97
+#define CLK_TOP_ECC_SEL 98
+#define CLK_TOP_ETH_SEL 99
+#define CLK_TOP_GCPU_SEL 100
+#define CLK_TOP_GCPU_CPM_SEL 101
+#define CLK_TOP_APU_SEL 102
+#define CLK_TOP_APU_IF_SEL 103
+#define CLK_TOP_MBIST_DIAG_SEL 104
+#define CLK_TOP_APLL_I2S0_SEL 105
+#define CLK_TOP_APLL_I2S1_SEL 106
+#define CLK_TOP_APLL_I2S2_SEL 107
+#define CLK_TOP_APLL_I2S3_SEL 108
+#define CLK_TOP_APLL_TDMOUT_SEL 109
+#define CLK_TOP_APLL_TDMIN_SEL 110
+#define CLK_TOP_APLL_SPDIF_SEL 111
+#define CLK_TOP_APLL12_CK_DIV0 112
+#define CLK_TOP_APLL12_CK_DIV1 113
+#define CLK_TOP_APLL12_CK_DIV2 114
+#define CLK_TOP_APLL12_CK_DIV3 115
+#define CLK_TOP_APLL12_CK_DIV4 116
+#define CLK_TOP_APLL12_CK_DIV4B 117
+#define CLK_TOP_APLL12_CK_DIV5 118
+#define CLK_TOP_APLL12_CK_DIV5B 119
+#define CLK_TOP_APLL12_CK_DIV6 120
+#define CLK_TOP_AUD_I2S0_M 121
+#define CLK_TOP_AUD_I2S1_M 122
+#define CLK_TOP_AUD_I2S2_M 123
+#define CLK_TOP_AUD_I2S3_M 124
+#define CLK_TOP_AUD_TDMOUT_M 125
+#define CLK_TOP_AUD_TDMOUT_B 126
+#define CLK_TOP_AUD_TDMIN_M 127
+#define CLK_TOP_AUD_TDMIN_B 128
+#define CLK_TOP_AUD_SPDIF_M 129
+#define CLK_TOP_USB20_48M_EN 130
+#define CLK_TOP_UNIVPLL_48M_EN 131
+#define CLK_TOP_LVDSTX_CLKDIG_EN 132
+#define CLK_TOP_VPLL_DPIX_EN 133
+#define CLK_TOP_SSUSB_TOP_CK_EN 134
+#define CLK_TOP_SSUSB_PHY_CK_EN 135
+#define CLK_TOP_CONN_32K 136
+#define CLK_TOP_CONN_26M 137
+#define CLK_TOP_DSP_32K 138
+#define CLK_TOP_DSP_26M 139
+#define CLK_TOP_NR_CLK 140
+
+/* INFRACFG */
+#define CLK_IFR_PMIC_TMR 0
+#define CLK_IFR_PMIC_AP 1
+#define CLK_IFR_PMIC_MD 2
+#define CLK_IFR_PMIC_CONN 3
+#define CLK_IFR_ICUSB 4
+#define CLK_IFR_GCE 5
+#define CLK_IFR_THERM 6
+#define CLK_IFR_PWM_HCLK 7
+#define CLK_IFR_PWM1 8
+#define CLK_IFR_PWM2 9
+#define CLK_IFR_PWM3 10
+#define CLK_IFR_PWM4 11
+#define CLK_IFR_PWM5 12
+#define CLK_IFR_PWM 13
+#define CLK_IFR_UART0 14
+#define CLK_IFR_UART1 15
+#define CLK_IFR_UART2 16
+#define CLK_IFR_DSP_UART 17
+#define CLK_IFR_GCE_26M 18
+#define CLK_IFR_CQ_DMA_FPC 19
+#define CLK_IFR_BTIF 20
+#define CLK_IFR_SPI0 21
+#define CLK_IFR_MSDC0_HCLK 22
+#define CLK_IFR_MSDC2_HCLK 23
+#define CLK_IFR_MSDC1_HCLK 24
+#define CLK_IFR_DVFSRC 25
+#define CLK_IFR_GCPU 26
+#define CLK_IFR_TRNG 27
+#define CLK_IFR_AUXADC 28
+#define CLK_IFR_CPUM 29
+#define CLK_IFR_AUXADC_MD 30
+#define CLK_IFR_AP_DMA 31
+#define CLK_IFR_DEBUGSYS 32
+#define CLK_IFR_AUDIO 33
+#define CLK_IFR_PWM_FBCLK6 34
+#define CLK_IFR_DISP_PWM 35
+#define CLK_IFR_AUD_26M_BK 36
+#define CLK_IFR_CQ_DMA 37
+#define CLK_IFR_MSDC0_SF 38
+#define CLK_IFR_MSDC1_SF 39
+#define CLK_IFR_MSDC2_SF 40
+#define CLK_IFR_AP_MSDC0 41
+#define CLK_IFR_MD_MSDC0 42
+#define CLK_IFR_MSDC0_SRC 43
+#define CLK_IFR_MSDC1_SRC 44
+#define CLK_IFR_MSDC2_SRC 45
+#define CLK_IFR_PWRAP_TMR 46
+#define CLK_IFR_PWRAP_SPI 47
+#define CLK_IFR_PWRAP_SYS 48
+#define CLK_IFR_MCU_PM_BK 49
+#define CLK_IFR_IRRX_26M 50
+#define CLK_IFR_IRRX_32K 51
+#define CLK_IFR_I2C0_AXI 52
+#define CLK_IFR_I2C1_AXI 53
+#define CLK_IFR_I2C2_AXI 54
+#define CLK_IFR_I2C3_AXI 55
+#define CLK_IFR_NIC_AXI 56
+#define CLK_IFR_NIC_SLV_AXI 57
+#define CLK_IFR_APU_AXI 58
+#define CLK_IFR_NFIECC 59
+#define CLK_IFR_NFIECC_BK 60
+#define CLK_IFR_NFI1X_BK 61
+#define CLK_IFR_NFI_BK 62
+#define CLK_IFR_MSDC2_AP_BK 63
+#define CLK_IFR_MSDC2_MD_BK 64
+#define CLK_IFR_MSDC2_BK 65
+#define CLK_IFR_SUSB_133_BK 66
+#define CLK_IFR_SUSB_66_BK 67
+#define CLK_IFR_SSUSB_SYS 68
+#define CLK_IFR_SSUSB_REF 69
+#define CLK_IFR_SSUSB_XHCI 70
+#define CLK_IFR_NR_CLK 71
+
+/* PERICFG */
+#define CLK_PERIAXI 0
+#define CLK_PERI_NR_CLK 1
+
+/* APMIXEDSYS */
+#define CLK_APMIXED_ARMPLL 0
+#define CLK_APMIXED_MAINPLL 1
+#define CLK_APMIXED_UNIVPLL 2
+#define CLK_APMIXED_MFGPLL 3
+#define CLK_APMIXED_MSDCPLL 4
+#define CLK_APMIXED_MMPLL 5
+#define CLK_APMIXED_APLL1 6
+#define CLK_APMIXED_APLL2 7
+#define CLK_APMIXED_LVDSPLL 8
+#define CLK_APMIXED_DSPPLL 9
+#define CLK_APMIXED_APUPLL 10
+#define CLK_APMIXED_UNIV_EN 11
+#define CLK_APMIXED_USB20_EN 12
+#define CLK_APMIXED_NR_CLK 13
+
+/* GCE */
+#define CLK_GCE_FAXI 0
+#define CLK_GCE_NR_CLK 1
+
+/* AUDIOTOP */
+#define CLK_AUD_AFE 0
+#define CLK_AUD_I2S 1
+#define CLK_AUD_22M 2
+#define CLK_AUD_24M 3
+#define CLK_AUD_INTDIR 4
+#define CLK_AUD_APLL2_TUNER 5
+#define CLK_AUD_APLL_TUNER 6
+#define CLK_AUD_SPDF 7
+#define CLK_AUD_HDMI 8
+#define CLK_AUD_HDMI_IN 9
+#define CLK_AUD_ADC 10
+#define CLK_AUD_DAC 11
+#define CLK_AUD_DAC_PREDIS 12
+#define CLK_AUD_TML 13
+#define CLK_AUD_I2S1_BK 14
+#define CLK_AUD_I2S2_BK 15
+#define CLK_AUD_I2S3_BK 16
+#define CLK_AUD_I2S4_BK 17
+#define CLK_AUD_NR_CLK 18
+
+/* MIPI_CSI0A */
+#define CLK_MIPI0A_CSR_CSI_EN_0A 0
+#define CLK_MIPI_RX_ANA_CSI0A_NR_CLK 1
+
+/* MIPI_CSI0B */
+#define CLK_MIPI0B_CSR_CSI_EN_0B 0
+#define CLK_MIPI_RX_ANA_CSI0B_NR_CLK 1
+
+/* MIPI_CSI1A */
+#define CLK_MIPI1A_CSR_CSI_EN_1A 0
+#define CLK_MIPI_RX_ANA_CSI1A_NR_CLK 1
+
+/* MIPI_CSI1B */
+#define CLK_MIPI1B_CSR_CSI_EN_1B 0
+#define CLK_MIPI_RX_ANA_CSI1B_NR_CLK 1
+
+/* MIPI_CSI2A */
+#define CLK_MIPI2A_CSR_CSI_EN_2A 0
+#define CLK_MIPI_RX_ANA_CSI2A_NR_CLK 1
+
+/* MIPI_CSI2B */
+#define CLK_MIPI2B_CSR_CSI_EN_2B 0
+#define CLK_MIPI_RX_ANA_CSI2B_NR_CLK 1
+
+/* MCUCFG */
+#define CLK_MCU_BUS_SEL 0
+#define CLK_MCU_NR_CLK 1
+
+/* MFGCFG */
+#define CLK_MFG_BG3D 0
+#define CLK_MFG_MBIST_DIAG 1
+#define CLK_MFG_NR_CLK 2
+
+/* MMSYS */
+#define CLK_MM_MM_MDP_RDMA0 0
+#define CLK_MM_MM_MDP_CCORR0 1
+#define CLK_MM_MM_MDP_RSZ0 2
+#define CLK_MM_MM_MDP_RSZ1 3
+#define CLK_MM_MM_MDP_TDSHP0 4
+#define CLK_MM_MM_MDP_WROT0 5
+#define CLK_MM_MM_MDP_WDMA0 6
+#define CLK_MM_MM_DISP_OVL0 7
+#define CLK_MM_MM_DISP_OVL0_2L 8
+#define CLK_MM_MM_DISP_RSZ0 9
+#define CLK_MM_MM_DISP_RDMA0 10
+#define CLK_MM_MM_DISP_WDMA0 11
+#define CLK_MM_MM_DISP_COLOR0 12
+#define CLK_MM_MM_DISP_CCORR0 13
+#define CLK_MM_MM_DISP_AAL0 14
+#define CLK_MM_MM_DISP_GAMMA0 15
+#define CLK_MM_MM_DISP_DITHER0 16
+#define CLK_MM_MM_DSI0 17
+#define CLK_MM_MM_DISP_RDMA1 18
+#define CLK_MM_MM_MDP_RDMA1 19
+#define CLK_MM_DPI0_DPI0 20
+#define CLK_MM_MM_FAKE 21
+#define CLK_MM_MM_SMI_COMMON 22
+#define CLK_MM_MM_SMI_LARB0 23
+#define CLK_MM_MM_SMI_COMM0 24
+#define CLK_MM_MM_SMI_COMM1 25
+#define CLK_MM_MM_CAM_MDP 26
+#define CLK_MM_MM_SMI_IMG 27
+#define CLK_MM_MM_SMI_CAM 28
+#define CLK_MM_IMG_IMG_DL_RELAY 29
+#define CLK_MM_IMG_IMG_DL_ASYNC_TOP 30
+#define CLK_MM_DSI0_DIG_DSI 31
+#define CLK_MM_26M_HRTWT 32
+#define CLK_MM_MM_DPI0 33
+#define CLK_MM_LVDSTX_PXL 34
+#define CLK_MM_LVDSTX_CTS 35
+#define CLK_MM_NR_CLK 36
+
+/* IMGSYS */
+#define CLK_CAM_LARB2 0
+#define CLK_CAM 1
+#define CLK_CAMTG 2
+#define CLK_CAM_SENIF 3
+#define CLK_CAMSV0 4
+#define CLK_CAMSV1 5
+#define CLK_CAM_FDVT 6
+#define CLK_CAM_WPE 7
+#define CLK_CAM_NR_CLK 8
+
+/* VDECSYS */
+#define CLK_VDEC_VDEC 0
+#define CLK_VDEC_LARB1 1
+#define CLK_VDEC_NR_CLK 2
+
+/* VENCSYS */
+#define CLK_VENC 0
+#define CLK_VENC_JPGENC 1
+#define CLK_VENC_NR_CLK 2
+
+/* APUSYS */
+#define CLK_APU_IPU_CK 0
+#define CLK_APU_AXI 1
+#define CLK_APU_JTAG 2
+#define CLK_APU_IF_CK 3
+#define CLK_APU_EDMA 4
+#define CLK_APU_AHB 5
+#define CLK_APU_NR_CLK 6
+
+#endif /* _DT_BINDINGS_CLK_MT8365_H */
diff --git a/include/dt-bindings/clock/mediatek,mtmips-sysc.h b/include/dt-bindings/clock/mediatek,mtmips-sysc.h
new file mode 100644
index 000000000000..a03335b0e077
--- /dev/null
+++ b/include/dt-bindings/clock/mediatek,mtmips-sysc.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Author: Sergio Paracuellos <sergio.paracuellos@gmail.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MTMIPS_H
+#define _DT_BINDINGS_CLK_MTMIPS_H
+
+/* Ralink RT-2880 clocks */
+
+#define RT2880_CLK_XTAL 0
+#define RT2880_CLK_CPU 1
+#define RT2880_CLK_BUS 2
+#define RT2880_CLK_TIMER 3
+#define RT2880_CLK_WATCHDOG 4
+#define RT2880_CLK_UART 5
+#define RT2880_CLK_I2C 6
+#define RT2880_CLK_UARTLITE 7
+#define RT2880_CLK_ETHERNET 8
+#define RT2880_CLK_WMAC 9
+
+/* Ralink RT-305X clocks */
+
+#define RT305X_CLK_XTAL 0
+#define RT305X_CLK_CPU 1
+#define RT305X_CLK_BUS 2
+#define RT305X_CLK_TIMER 3
+#define RT305X_CLK_WATCHDOG 4
+#define RT305X_CLK_UART 5
+#define RT305X_CLK_I2C 6
+#define RT305X_CLK_I2S 7
+#define RT305X_CLK_SPI1 8
+#define RT305X_CLK_SPI2 9
+#define RT305X_CLK_UARTLITE 10
+#define RT305X_CLK_ETHERNET 11
+#define RT305X_CLK_WMAC 12
+
+/* Ralink RT-3352 clocks */
+
+#define RT3352_CLK_XTAL 0
+#define RT3352_CLK_CPU 1
+#define RT3352_CLK_PERIPH 2
+#define RT3352_CLK_BUS 3
+#define RT3352_CLK_TIMER 4
+#define RT3352_CLK_WATCHDOG 5
+#define RT3352_CLK_UART 6
+#define RT3352_CLK_I2C 7
+#define RT3352_CLK_I2S 8
+#define RT3352_CLK_SPI1 9
+#define RT3352_CLK_SPI2 10
+#define RT3352_CLK_UARTLITE 11
+#define RT3352_CLK_ETHERNET 12
+#define RT3352_CLK_WMAC 13
+
+/* Ralink RT-3883 clocks */
+
+#define RT3883_CLK_XTAL 0
+#define RT3883_CLK_CPU 1
+#define RT3883_CLK_BUS 2
+#define RT3883_CLK_PERIPH 3
+#define RT3883_CLK_TIMER 4
+#define RT3883_CLK_WATCHDOG 5
+#define RT3883_CLK_UART 6
+#define RT3883_CLK_I2C 7
+#define RT3883_CLK_I2S 8
+#define RT3883_CLK_SPI1 9
+#define RT3883_CLK_SPI2 10
+#define RT3883_CLK_UARTLITE 11
+#define RT3883_CLK_ETHERNET 12
+#define RT3883_CLK_WMAC 13
+
+/* Ralink RT-5350 clocks */
+
+#define RT5350_CLK_XTAL 0
+#define RT5350_CLK_CPU 1
+#define RT5350_CLK_BUS 2
+#define RT5350_CLK_PERIPH 3
+#define RT5350_CLK_TIMER 4
+#define RT5350_CLK_WATCHDOG 5
+#define RT5350_CLK_UART 6
+#define RT5350_CLK_I2C 7
+#define RT5350_CLK_I2S 8
+#define RT5350_CLK_SPI1 9
+#define RT5350_CLK_SPI2 10
+#define RT5350_CLK_UARTLITE 11
+#define RT5350_CLK_ETHERNET 12
+#define RT5350_CLK_WMAC 13
+
+/* Ralink MT-7620 clocks */
+
+#define MT7620_CLK_XTAL 0
+#define MT7620_CLK_PLL 1
+#define MT7620_CLK_CPU 2
+#define MT7620_CLK_PERIPH 3
+#define MT7620_CLK_BUS 4
+#define MT7620_CLK_BBPPLL 5
+#define MT7620_CLK_SDHC 6
+#define MT7620_CLK_TIMER 7
+#define MT7620_CLK_WATCHDOG 8
+#define MT7620_CLK_UART 9
+#define MT7620_CLK_I2C 10
+#define MT7620_CLK_I2S 11
+#define MT7620_CLK_SPI1 12
+#define MT7620_CLK_SPI2 13
+#define MT7620_CLK_UARTLITE 14
+#define MT7620_CLK_MMC 15
+#define MT7620_CLK_WMAC 16
+
+/* Ralink MT-76X8 clocks */
+
+#define MT76X8_CLK_XTAL 0
+#define MT76X8_CLK_CPU 1
+#define MT76X8_CLK_BBPPLL 2
+#define MT76X8_CLK_PCMI2S 3
+#define MT76X8_CLK_PERIPH 4
+#define MT76X8_CLK_BUS 5
+#define MT76X8_CLK_SDHC 6
+#define MT76X8_CLK_TIMER 7
+#define MT76X8_CLK_WATCHDOG 8
+#define MT76X8_CLK_I2C 9
+#define MT76X8_CLK_I2S 10
+#define MT76X8_CLK_SPI1 11
+#define MT76X8_CLK_SPI2 12
+#define MT76X8_CLK_UART0 13
+#define MT76X8_CLK_UART1 14
+#define MT76X8_CLK_UART2 15
+#define MT76X8_CLK_MMC 16
+#define MT76X8_CLK_WMAC 17
+
+#endif /* _DT_BINDINGS_CLK_MTMIPS_H */
diff --git a/include/dt-bindings/clock/meson8b-clkc.h b/include/dt-bindings/clock/meson8b-clkc.h
index f33781338eda..385bf243c56c 100644
--- a/include/dt-bindings/clock/meson8b-clkc.h
+++ b/include/dt-bindings/clock/meson8b-clkc.h
@@ -100,19 +100,126 @@
#define CLKID_MPLL0 93
#define CLKID_MPLL1 94
#define CLKID_MPLL2 95
+#define CLKID_MPLL0_DIV 96
+#define CLKID_MPLL1_DIV 97
+#define CLKID_MPLL2_DIV 98
+#define CLKID_CPU_IN_SEL 99
+#define CLKID_CPU_IN_DIV2 100
+#define CLKID_CPU_IN_DIV3 101
+#define CLKID_CPU_SCALE_DIV 102
+#define CLKID_CPU_SCALE_OUT_SEL 103
+#define CLKID_MPLL_PREDIV 104
+#define CLKID_FCLK_DIV2_DIV 105
+#define CLKID_FCLK_DIV3_DIV 106
+#define CLKID_FCLK_DIV4_DIV 107
+#define CLKID_FCLK_DIV5_DIV 108
+#define CLKID_FCLK_DIV7_DIV 109
+#define CLKID_NAND_SEL 110
+#define CLKID_NAND_DIV 111
#define CLKID_NAND_CLK 112
+#define CLKID_PLL_FIXED_DCO 113
+#define CLKID_HDMI_PLL_DCO 114
+#define CLKID_PLL_SYS_DCO 115
+#define CLKID_CPU_CLK_DIV2 116
+#define CLKID_CPU_CLK_DIV3 117
+#define CLKID_CPU_CLK_DIV4 118
+#define CLKID_CPU_CLK_DIV5 119
+#define CLKID_CPU_CLK_DIV6 120
+#define CLKID_CPU_CLK_DIV7 121
+#define CLKID_CPU_CLK_DIV8 122
+#define CLKID_APB_SEL 123
#define CLKID_APB 124
+#define CLKID_PERIPH_SEL 125
#define CLKID_PERIPH 126
+#define CLKID_AXI_SEL 127
#define CLKID_AXI 128
#define CLKID_L2_DRAM 130
+#define CLKID_L2_DRAM_SEL 129
+#define CLKID_HDMI_PLL_LVDS_OUT 131
+#define CLKID_HDMI_PLL_HDMI_OUT 132
+#define CLKID_VID_PLL_IN_SEL 133
+#define CLKID_VID_PLL_IN_EN 134
+#define CLKID_VID_PLL_PRE_DIV 135
+#define CLKID_VID_PLL_POST_DIV 136
+#define CLKID_VID_PLL_FINAL_DIV 137
+#define CLKID_VCLK_IN_SEL 138
+#define CLKID_VCLK_IN_EN 139
+#define CLKID_VCLK_DIV1 140
+#define CLKID_VCLK_DIV2_DIV 141
+#define CLKID_VCLK_DIV2 142
+#define CLKID_VCLK_DIV4_DIV 143
+#define CLKID_VCLK_DIV4 144
+#define CLKID_VCLK_DIV6_DIV 145
+#define CLKID_VCLK_DIV6 146
+#define CLKID_VCLK_DIV12_DIV 147
+#define CLKID_VCLK_DIV12 148
+#define CLKID_VCLK2_IN_SEL 149
+#define CLKID_VCLK2_IN_EN 150
+#define CLKID_VCLK2_DIV1 151
+#define CLKID_VCLK2_DIV2_DIV 152
+#define CLKID_VCLK2_DIV2 153
+#define CLKID_VCLK2_DIV4_DIV 154
+#define CLKID_VCLK2_DIV4 155
+#define CLKID_VCLK2_DIV6_DIV 156
+#define CLKID_VCLK2_DIV6 157
+#define CLKID_VCLK2_DIV12_DIV 158
+#define CLKID_VCLK2_DIV12 159
+#define CLKID_CTS_ENCT_SEL 160
+#define CLKID_CTS_ENCT 161
+#define CLKID_CTS_ENCP_SEL 162
+#define CLKID_CTS_ENCP 163
+#define CLKID_CTS_ENCI_SEL 164
+#define CLKID_CTS_ENCI 165
+#define CLKID_HDMI_TX_PIXEL_SEL 166
+#define CLKID_HDMI_TX_PIXEL 167
+#define CLKID_CTS_ENCL_SEL 168
+#define CLKID_CTS_ENCL 169
+#define CLKID_CTS_VDAC0_SEL 170
+#define CLKID_CTS_VDAC0 171
+#define CLKID_HDMI_SYS_SEL 172
+#define CLKID_HDMI_SYS_DIV 173
#define CLKID_HDMI_SYS 174
+#define CLKID_MALI_0_SEL 175
+#define CLKID_MALI_0_DIV 176
+#define CLKID_MALI_0 177
+#define CLKID_MALI_1_SEL 178
+#define CLKID_MALI_1_DIV 179
+#define CLKID_MALI_1 180
+#define CLKID_GP_PLL_DCO 181
+#define CLKID_GP_PLL 182
+#define CLKID_VPU_0_SEL 183
+#define CLKID_VPU_0_DIV 184
+#define CLKID_VPU_0 185
+#define CLKID_VPU_1_SEL 186
+#define CLKID_VPU_1_DIV 187
+#define CLKID_VPU_1 189
#define CLKID_VPU 190
+#define CLKID_VDEC_1_SEL 191
+#define CLKID_VDEC_1_1_DIV 192
+#define CLKID_VDEC_1_1 193
+#define CLKID_VDEC_1_2_DIV 194
+#define CLKID_VDEC_1_2 195
#define CLKID_VDEC_1 196
+#define CLKID_VDEC_HCODEC_SEL 197
+#define CLKID_VDEC_HCODEC_DIV 198
#define CLKID_VDEC_HCODEC 199
+#define CLKID_VDEC_2_SEL 200
+#define CLKID_VDEC_2_DIV 201
#define CLKID_VDEC_2 202
+#define CLKID_VDEC_HEVC_SEL 203
+#define CLKID_VDEC_HEVC_DIV 204
+#define CLKID_VDEC_HEVC_EN 205
#define CLKID_VDEC_HEVC 206
+#define CLKID_CTS_AMCLK_SEL 207
+#define CLKID_CTS_AMCLK_DIV 208
#define CLKID_CTS_AMCLK 209
+#define CLKID_CTS_MCLK_I958_SEL 210
+#define CLKID_CTS_MCLK_I958_DIV 211
#define CLKID_CTS_MCLK_I958 212
#define CLKID_CTS_I958 213
+#define CLKID_VCLK_EN 214
+#define CLKID_VCLK2_EN 215
+#define CLKID_VID_PLL_LVDS_EN 216
+#define CLKID_HDMI_PLL_DCO_IN 217
#endif /* __MESON8B_CLKC_H */
diff --git a/include/dt-bindings/clock/microchip,lan966x.h b/include/dt-bindings/clock/microchip,lan966x.h
new file mode 100644
index 000000000000..6f9d43d76d5a
--- /dev/null
+++ b/include/dt-bindings/clock/microchip,lan966x.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021 Microchip Inc.
+ *
+ * Author: Kavyasree Kotagiri <kavyasree.kotagiri@microchip.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_LAN966X_H
+#define _DT_BINDINGS_CLK_LAN966X_H
+
+#define GCK_ID_QSPI0 0
+#define GCK_ID_QSPI1 1
+#define GCK_ID_QSPI2 2
+#define GCK_ID_SDMMC0 3
+#define GCK_ID_PI 4
+#define GCK_ID_MCAN0 5
+#define GCK_ID_MCAN1 6
+#define GCK_ID_FLEXCOM0 7
+#define GCK_ID_FLEXCOM1 8
+#define GCK_ID_FLEXCOM2 9
+#define GCK_ID_FLEXCOM3 10
+#define GCK_ID_FLEXCOM4 11
+#define GCK_ID_TIMER 12
+#define GCK_ID_USB_REFCLK 13
+
+/* Gate clocks */
+#define GCK_GATE_UHPHS 14
+#define GCK_GATE_UDPHS 15
+#define GCK_GATE_MCRAMC 16
+#define GCK_GATE_HMATRIX 17
+
+#define N_CLOCKS 18
+
+#endif
diff --git a/include/dt-bindings/clock/microchip,mpfs-clock.h b/include/dt-bindings/clock/microchip,mpfs-clock.h
new file mode 100644
index 000000000000..b52f19a2b480
--- /dev/null
+++ b/include/dt-bindings/clock/microchip,mpfs-clock.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Daire McNamara,<daire.mcnamara@microchip.com>
+ * Copyright (C) 2020-2022 Microchip Technology Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MICROCHIP_MPFS_H_
+#define _DT_BINDINGS_CLK_MICROCHIP_MPFS_H_
+
+#define CLK_CPU 0
+#define CLK_AXI 1
+#define CLK_AHB 2
+
+#define CLK_ENVM 3
+#define CLK_MAC0 4
+#define CLK_MAC1 5
+#define CLK_MMC 6
+#define CLK_TIMER 7
+#define CLK_MMUART0 8
+#define CLK_MMUART1 9
+#define CLK_MMUART2 10
+#define CLK_MMUART3 11
+#define CLK_MMUART4 12
+#define CLK_SPI0 13
+#define CLK_SPI1 14
+#define CLK_I2C0 15
+#define CLK_I2C1 16
+#define CLK_CAN0 17
+#define CLK_CAN1 18
+#define CLK_USB 19
+#define CLK_RESERVED 20
+#define CLK_RTC 21
+#define CLK_QSPI 22
+#define CLK_GPIO0 23
+#define CLK_GPIO1 24
+#define CLK_GPIO2 25
+#define CLK_DDRC 26
+#define CLK_FIC0 27
+#define CLK_FIC1 28
+#define CLK_FIC2 29
+#define CLK_FIC3 30
+#define CLK_ATHENA 31
+#define CLK_CFM 32
+
+#define CLK_RTCREF 33
+#define CLK_MSSPLL 34
+#define CLK_MSSPLL0 34
+#define CLK_MSSPLL1 35
+#define CLK_MSSPLL2 36
+#define CLK_MSSPLL3 37
+/* 38 is reserved for MSS PLL internals */
+
+/* Clock Conditioning Circuitry Clock IDs */
+
+#define CLK_CCC_PLL0 0
+#define CLK_CCC_PLL1 1
+#define CLK_CCC_DLL0 2
+#define CLK_CCC_DLL1 3
+
+#define CLK_CCC_PLL0_OUT0 4
+#define CLK_CCC_PLL0_OUT1 5
+#define CLK_CCC_PLL0_OUT2 6
+#define CLK_CCC_PLL0_OUT3 7
+
+#define CLK_CCC_PLL1_OUT0 8
+#define CLK_CCC_PLL1_OUT1 9
+#define CLK_CCC_PLL1_OUT2 10
+#define CLK_CCC_PLL1_OUT3 11
+
+#define CLK_CCC_DLL0_OUT0 12
+#define CLK_CCC_DLL0_OUT1 13
+
+#define CLK_CCC_DLL1_OUT0 14
+#define CLK_CCC_DLL1_OUT1 15
+
+#endif /* _DT_BINDINGS_CLK_MICROCHIP_MPFS_H_ */
diff --git a/include/dt-bindings/clock/mobileye,eyeq5-clk.h b/include/dt-bindings/clock/mobileye,eyeq5-clk.h
new file mode 100644
index 000000000000..f353c2988035
--- /dev/null
+++ b/include/dt-bindings/clock/mobileye,eyeq5-clk.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2024 Mobileye Vision Technologies Ltd.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_MOBILEYE_EYEQ5_CLK_H
+#define _DT_BINDINGS_CLOCK_MOBILEYE_EYEQ5_CLK_H
+
+#define EQ5C_PLL_CPU 0
+#define EQ5C_PLL_VMP 1
+#define EQ5C_PLL_PMA 2
+#define EQ5C_PLL_VDI 3
+#define EQ5C_PLL_DDR0 4
+#define EQ5C_PLL_PCI 5
+#define EQ5C_PLL_PER 6
+#define EQ5C_PLL_PMAC 7
+#define EQ5C_PLL_MPC 8
+#define EQ5C_PLL_DDR1 9
+
+#define EQ5C_DIV_OSPI 10
+
+/* EQ5C_PLL_CPU children */
+#define EQ5C_CPU_CORE0 11
+#define EQ5C_CPU_CORE1 12
+#define EQ5C_CPU_CORE2 13
+#define EQ5C_CPU_CORE3 14
+
+/* EQ5C_PLL_PER children */
+#define EQ5C_PER_OCC 15
+#define EQ5C_PER_UART 16
+#define EQ5C_PER_SPI 17
+#define EQ5C_PER_I2C 18
+#define EQ5C_PER_GPIO 19
+#define EQ5C_PER_EMMC 20
+#define EQ5C_PER_OCC_PCI 21
+
+#define EQ6LC_PLL_DDR 0
+#define EQ6LC_PLL_CPU 1
+#define EQ6LC_PLL_PER 2
+#define EQ6LC_PLL_VDI 3
+
+#define EQ6HC_CENTRAL_PLL_CPU 0
+#define EQ6HC_CENTRAL_CPU_OCC 1
+
+#define EQ6HC_WEST_PLL_PER 0
+#define EQ6HC_WEST_PER_OCC 1
+#define EQ6HC_WEST_PER_UART 2
+
+#define EQ6HC_SOUTH_PLL_VDI 0
+#define EQ6HC_SOUTH_PLL_PCIE 1
+#define EQ6HC_SOUTH_PLL_PER 2
+#define EQ6HC_SOUTH_PLL_ISP 3
+
+#define EQ6HC_SOUTH_DIV_EMMC 4
+#define EQ6HC_SOUTH_DIV_OSPI_REF 5
+#define EQ6HC_SOUTH_DIV_OSPI_SYS 6
+#define EQ6HC_SOUTH_DIV_TSU 7
+
+#define EQ6HC_ACC_PLL_XNN 0
+#define EQ6HC_ACC_PLL_VMP 1
+#define EQ6HC_ACC_PLL_PMA 2
+#define EQ6HC_ACC_PLL_MPC 3
+#define EQ6HC_ACC_PLL_NOC 4
+
+#endif
diff --git a/include/dt-bindings/clock/mt7622-clk.h b/include/dt-bindings/clock/mt7622-clk.h
index c12e7eab0788..a173eb132892 100644
--- a/include/dt-bindings/clock/mt7622-clk.h
+++ b/include/dt-bindings/clock/mt7622-clk.h
@@ -228,7 +228,7 @@
#define CLK_AUDIO_MEM_ASRC4 44
#define CLK_AUDIO_MEM_ASRC5 45
#define CLK_AUDIO_AFE_CONN 46
-#define CLK_AUDIO_NR_CLK 47
+#define CLK_AUDIO_AFE_MRGIF 47
/* SSUSBSYS */
diff --git a/include/dt-bindings/clock/mt7986-clk.h b/include/dt-bindings/clock/mt7986-clk.h
new file mode 100644
index 000000000000..5a9b169324b0
--- /dev/null
+++ b/include/dt-bindings/clock/mt7986-clk.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Sam Shih <sam.shih@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT7986_H
+#define _DT_BINDINGS_CLK_MT7986_H
+
+/* APMIXEDSYS */
+
+#define CLK_APMIXED_ARMPLL 0
+#define CLK_APMIXED_NET2PLL 1
+#define CLK_APMIXED_MMPLL 2
+#define CLK_APMIXED_SGMPLL 3
+#define CLK_APMIXED_WEDMCUPLL 4
+#define CLK_APMIXED_NET1PLL 5
+#define CLK_APMIXED_MPLL 6
+#define CLK_APMIXED_APLL2 7
+
+/* TOPCKGEN */
+
+#define CLK_TOP_XTAL 0
+#define CLK_TOP_XTAL_D2 1
+#define CLK_TOP_RTC_32K 2
+#define CLK_TOP_RTC_32P7K 3
+#define CLK_TOP_MPLL_D2 4
+#define CLK_TOP_MPLL_D4 5
+#define CLK_TOP_MPLL_D8 6
+#define CLK_TOP_MPLL_D8_D2 7
+#define CLK_TOP_MPLL_D3_D2 8
+#define CLK_TOP_MMPLL_D2 9
+#define CLK_TOP_MMPLL_D4 10
+#define CLK_TOP_MMPLL_D8 11
+#define CLK_TOP_MMPLL_D8_D2 12
+#define CLK_TOP_MMPLL_D3_D8 13
+#define CLK_TOP_MMPLL_U2PHY 14
+#define CLK_TOP_APLL2_D4 15
+#define CLK_TOP_NET1PLL_D4 16
+#define CLK_TOP_NET1PLL_D5 17
+#define CLK_TOP_NET1PLL_D5_D2 18
+#define CLK_TOP_NET1PLL_D5_D4 19
+#define CLK_TOP_NET1PLL_D8_D2 20
+#define CLK_TOP_NET1PLL_D8_D4 21
+#define CLK_TOP_NET2PLL_D4 22
+#define CLK_TOP_NET2PLL_D4_D2 23
+#define CLK_TOP_NET2PLL_D3_D2 24
+#define CLK_TOP_WEDMCUPLL_D5_D2 25
+#define CLK_TOP_NFI1X_SEL 26
+#define CLK_TOP_SPINFI_SEL 27
+#define CLK_TOP_SPI_SEL 28
+#define CLK_TOP_SPIM_MST_SEL 29
+#define CLK_TOP_UART_SEL 30
+#define CLK_TOP_PWM_SEL 31
+#define CLK_TOP_I2C_SEL 32
+#define CLK_TOP_PEXTP_TL_SEL 33
+#define CLK_TOP_EMMC_250M_SEL 34
+#define CLK_TOP_EMMC_416M_SEL 35
+#define CLK_TOP_F_26M_ADC_SEL 36
+#define CLK_TOP_DRAMC_SEL 37
+#define CLK_TOP_DRAMC_MD32_SEL 38
+#define CLK_TOP_SYSAXI_SEL 39
+#define CLK_TOP_SYSAPB_SEL 40
+#define CLK_TOP_ARM_DB_MAIN_SEL 41
+#define CLK_TOP_ARM_DB_JTSEL 42
+#define CLK_TOP_NETSYS_SEL 43
+#define CLK_TOP_NETSYS_500M_SEL 44
+#define CLK_TOP_NETSYS_MCU_SEL 45
+#define CLK_TOP_NETSYS_2X_SEL 46
+#define CLK_TOP_SGM_325M_SEL 47
+#define CLK_TOP_SGM_REG_SEL 48
+#define CLK_TOP_A1SYS_SEL 49
+#define CLK_TOP_CONN_MCUSYS_SEL 50
+#define CLK_TOP_EIP_B_SEL 51
+#define CLK_TOP_PCIE_PHY_SEL 52
+#define CLK_TOP_USB3_PHY_SEL 53
+#define CLK_TOP_F26M_SEL 54
+#define CLK_TOP_AUD_L_SEL 55
+#define CLK_TOP_A_TUNER_SEL 56
+#define CLK_TOP_U2U3_SEL 57
+#define CLK_TOP_U2U3_SYS_SEL 58
+#define CLK_TOP_U2U3_XHCI_SEL 59
+#define CLK_TOP_DA_U2_REFSEL 60
+#define CLK_TOP_DA_U2_CK_1P_SEL 61
+#define CLK_TOP_AP2CNN_HOST_SEL 62
+#define CLK_TOP_JTAG 63
+
+/* INFRACFG */
+
+#define CLK_INFRA_SYSAXI_D2 0
+#define CLK_INFRA_UART0_SEL 1
+#define CLK_INFRA_UART1_SEL 2
+#define CLK_INFRA_UART2_SEL 3
+#define CLK_INFRA_SPI0_SEL 4
+#define CLK_INFRA_SPI1_SEL 5
+#define CLK_INFRA_PWM1_SEL 6
+#define CLK_INFRA_PWM2_SEL 7
+#define CLK_INFRA_PWM_BSEL 8
+#define CLK_INFRA_PCIE_SEL 9
+#define CLK_INFRA_GPT_STA 10
+#define CLK_INFRA_PWM_HCK 11
+#define CLK_INFRA_PWM_STA 12
+#define CLK_INFRA_PWM1_CK 13
+#define CLK_INFRA_PWM2_CK 14
+#define CLK_INFRA_CQ_DMA_CK 15
+#define CLK_INFRA_EIP97_CK 16
+#define CLK_INFRA_AUD_BUS_CK 17
+#define CLK_INFRA_AUD_26M_CK 18
+#define CLK_INFRA_AUD_L_CK 19
+#define CLK_INFRA_AUD_AUD_CK 20
+#define CLK_INFRA_AUD_EG2_CK 21
+#define CLK_INFRA_DRAMC_26M_CK 22
+#define CLK_INFRA_DBG_CK 23
+#define CLK_INFRA_AP_DMA_CK 24
+#define CLK_INFRA_SEJ_CK 25
+#define CLK_INFRA_SEJ_13M_CK 26
+#define CLK_INFRA_THERM_CK 27
+#define CLK_INFRA_I2C0_CK 28
+#define CLK_INFRA_UART0_CK 29
+#define CLK_INFRA_UART1_CK 30
+#define CLK_INFRA_UART2_CK 31
+#define CLK_INFRA_NFI1_CK 32
+#define CLK_INFRA_SPINFI1_CK 33
+#define CLK_INFRA_NFI_HCK_CK 34
+#define CLK_INFRA_SPI0_CK 35
+#define CLK_INFRA_SPI1_CK 36
+#define CLK_INFRA_SPI0_HCK_CK 37
+#define CLK_INFRA_SPI1_HCK_CK 38
+#define CLK_INFRA_FRTC_CK 39
+#define CLK_INFRA_MSDC_CK 40
+#define CLK_INFRA_MSDC_HCK_CK 41
+#define CLK_INFRA_MSDC_133M_CK 42
+#define CLK_INFRA_MSDC_66M_CK 43
+#define CLK_INFRA_ADC_26M_CK 44
+#define CLK_INFRA_ADC_FRC_CK 45
+#define CLK_INFRA_FBIST2FPC_CK 46
+#define CLK_INFRA_IUSB_133_CK 47
+#define CLK_INFRA_IUSB_66M_CK 48
+#define CLK_INFRA_IUSB_SYS_CK 49
+#define CLK_INFRA_IUSB_CK 50
+#define CLK_INFRA_IPCIE_CK 51
+#define CLK_INFRA_IPCIE_PIPE_CK 52
+#define CLK_INFRA_IPCIER_CK 53
+#define CLK_INFRA_IPCIEB_CK 54
+#define CLK_INFRA_TRNG_CK 55
+
+/* SGMIISYS_0 */
+
+#define CLK_SGMII0_TX250M_EN 0
+#define CLK_SGMII0_RX250M_EN 1
+#define CLK_SGMII0_CDR_REF 2
+#define CLK_SGMII0_CDR_FB 3
+
+/* SGMIISYS_1 */
+
+#define CLK_SGMII1_TX250M_EN 0
+#define CLK_SGMII1_RX250M_EN 1
+#define CLK_SGMII1_CDR_REF 2
+#define CLK_SGMII1_CDR_FB 3
+
+/* ETHSYS */
+
+#define CLK_ETH_FE_EN 0
+#define CLK_ETH_GP2_EN 1
+#define CLK_ETH_GP1_EN 2
+#define CLK_ETH_WOCPU1_EN 3
+#define CLK_ETH_WOCPU0_EN 4
+
+#endif /* _DT_BINDINGS_CLK_MT7986_H */
diff --git a/include/dt-bindings/clock/mt8173-clk.h b/include/dt-bindings/clock/mt8173-clk.h
index 3acebe937bfc..3d00c98b9654 100644
--- a/include/dt-bindings/clock/mt8173-clk.h
+++ b/include/dt-bindings/clock/mt8173-clk.h
@@ -186,7 +186,6 @@
#define CLK_INFRA_PMICWRAP 11
#define CLK_INFRA_CLK_13M 12
#define CLK_INFRA_CA53SEL 13
-#define CLK_INFRA_CA57SEL 14 /* Deprecated. Don't use it. */
#define CLK_INFRA_CA72SEL 14
#define CLK_INFRA_NR_CLK 15
diff --git a/include/dt-bindings/clock/mt8186-clk.h b/include/dt-bindings/clock/mt8186-clk.h
new file mode 100644
index 000000000000..a70bf67af47d
--- /dev/null
+++ b/include/dt-bindings/clock/mt8186-clk.h
@@ -0,0 +1,445 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT8186_H
+#define _DT_BINDINGS_CLK_MT8186_H
+
+/* MCUSYS */
+
+#define CLK_MCU_ARMPLL_LL_SEL 0
+#define CLK_MCU_ARMPLL_BL_SEL 1
+#define CLK_MCU_ARMPLL_BUS_SEL 2
+#define CLK_MCU_NR_CLK 3
+
+/* TOPCKGEN */
+
+#define CLK_TOP_AXI 0
+#define CLK_TOP_SCP 1
+#define CLK_TOP_MFG 2
+#define CLK_TOP_CAMTG 3
+#define CLK_TOP_CAMTG1 4
+#define CLK_TOP_CAMTG2 5
+#define CLK_TOP_CAMTG3 6
+#define CLK_TOP_CAMTG4 7
+#define CLK_TOP_CAMTG5 8
+#define CLK_TOP_CAMTG6 9
+#define CLK_TOP_UART 10
+#define CLK_TOP_SPI 11
+#define CLK_TOP_MSDC50_0_HCLK 12
+#define CLK_TOP_MSDC50_0 13
+#define CLK_TOP_MSDC30_1 14
+#define CLK_TOP_AUDIO 15
+#define CLK_TOP_AUD_INTBUS 16
+#define CLK_TOP_AUD_1 17
+#define CLK_TOP_AUD_2 18
+#define CLK_TOP_AUD_ENGEN1 19
+#define CLK_TOP_AUD_ENGEN2 20
+#define CLK_TOP_DISP_PWM 21
+#define CLK_TOP_SSPM 22
+#define CLK_TOP_DXCC 23
+#define CLK_TOP_USB_TOP 24
+#define CLK_TOP_SRCK 25
+#define CLK_TOP_SPM 26
+#define CLK_TOP_I2C 27
+#define CLK_TOP_PWM 28
+#define CLK_TOP_SENINF 29
+#define CLK_TOP_SENINF1 30
+#define CLK_TOP_SENINF2 31
+#define CLK_TOP_SENINF3 32
+#define CLK_TOP_AES_MSDCFDE 33
+#define CLK_TOP_PWRAP_ULPOSC 34
+#define CLK_TOP_CAMTM 35
+#define CLK_TOP_VENC 36
+#define CLK_TOP_CAM 37
+#define CLK_TOP_IMG1 38
+#define CLK_TOP_IPE 39
+#define CLK_TOP_DPMAIF 40
+#define CLK_TOP_VDEC 41
+#define CLK_TOP_DISP 42
+#define CLK_TOP_MDP 43
+#define CLK_TOP_AUDIO_H 44
+#define CLK_TOP_UFS 45
+#define CLK_TOP_AES_FDE 46
+#define CLK_TOP_AUDIODSP 47
+#define CLK_TOP_DVFSRC 48
+#define CLK_TOP_DSI_OCC 49
+#define CLK_TOP_SPMI_MST 50
+#define CLK_TOP_SPINOR 51
+#define CLK_TOP_NNA 52
+#define CLK_TOP_NNA1 53
+#define CLK_TOP_NNA2 54
+#define CLK_TOP_SSUSB_XHCI 55
+#define CLK_TOP_SSUSB_TOP_1P 56
+#define CLK_TOP_SSUSB_XHCI_1P 57
+#define CLK_TOP_WPE 58
+#define CLK_TOP_DPI 59
+#define CLK_TOP_U3_OCC_250M 60
+#define CLK_TOP_U3_OCC_500M 61
+#define CLK_TOP_ADSP_BUS 62
+#define CLK_TOP_APLL_I2S0_MCK_SEL 63
+#define CLK_TOP_APLL_I2S1_MCK_SEL 64
+#define CLK_TOP_APLL_I2S2_MCK_SEL 65
+#define CLK_TOP_APLL_I2S4_MCK_SEL 66
+#define CLK_TOP_APLL_TDMOUT_MCK_SEL 67
+#define CLK_TOP_MAINPLL_D2 68
+#define CLK_TOP_MAINPLL_D2_D2 69
+#define CLK_TOP_MAINPLL_D2_D4 70
+#define CLK_TOP_MAINPLL_D2_D16 71
+#define CLK_TOP_MAINPLL_D3 72
+#define CLK_TOP_MAINPLL_D3_D2 73
+#define CLK_TOP_MAINPLL_D3_D4 74
+#define CLK_TOP_MAINPLL_D5 75
+#define CLK_TOP_MAINPLL_D5_D2 76
+#define CLK_TOP_MAINPLL_D5_D4 77
+#define CLK_TOP_MAINPLL_D7 78
+#define CLK_TOP_MAINPLL_D7_D2 79
+#define CLK_TOP_MAINPLL_D7_D4 80
+#define CLK_TOP_UNIVPLL 81
+#define CLK_TOP_UNIVPLL_D2 82
+#define CLK_TOP_UNIVPLL_D2_D2 83
+#define CLK_TOP_UNIVPLL_D2_D4 84
+#define CLK_TOP_UNIVPLL_D3 85
+#define CLK_TOP_UNIVPLL_D3_D2 86
+#define CLK_TOP_UNIVPLL_D3_D4 87
+#define CLK_TOP_UNIVPLL_D3_D8 88
+#define CLK_TOP_UNIVPLL_D3_D32 89
+#define CLK_TOP_UNIVPLL_D5 90
+#define CLK_TOP_UNIVPLL_D5_D2 91
+#define CLK_TOP_UNIVPLL_D5_D4 92
+#define CLK_TOP_UNIVPLL_D7 93
+#define CLK_TOP_UNIVPLL_192M 94
+#define CLK_TOP_UNIVPLL_192M_D4 95
+#define CLK_TOP_UNIVPLL_192M_D8 96
+#define CLK_TOP_UNIVPLL_192M_D16 97
+#define CLK_TOP_UNIVPLL_192M_D32 98
+#define CLK_TOP_APLL1_D2 99
+#define CLK_TOP_APLL1_D4 100
+#define CLK_TOP_APLL1_D8 101
+#define CLK_TOP_APLL2_D2 102
+#define CLK_TOP_APLL2_D4 103
+#define CLK_TOP_APLL2_D8 104
+#define CLK_TOP_MMPLL_D2 105
+#define CLK_TOP_TVDPLL_D2 106
+#define CLK_TOP_TVDPLL_D4 107
+#define CLK_TOP_TVDPLL_D8 108
+#define CLK_TOP_TVDPLL_D16 109
+#define CLK_TOP_TVDPLL_D32 110
+#define CLK_TOP_MSDCPLL_D2 111
+#define CLK_TOP_ULPOSC1 112
+#define CLK_TOP_ULPOSC1_D2 113
+#define CLK_TOP_ULPOSC1_D4 114
+#define CLK_TOP_ULPOSC1_D8 115
+#define CLK_TOP_ULPOSC1_D10 116
+#define CLK_TOP_ULPOSC1_D16 117
+#define CLK_TOP_ULPOSC1_D32 118
+#define CLK_TOP_ADSPPLL_D2 119
+#define CLK_TOP_ADSPPLL_D4 120
+#define CLK_TOP_ADSPPLL_D8 121
+#define CLK_TOP_NNAPLL_D2 122
+#define CLK_TOP_NNAPLL_D4 123
+#define CLK_TOP_NNAPLL_D8 124
+#define CLK_TOP_NNA2PLL_D2 125
+#define CLK_TOP_NNA2PLL_D4 126
+#define CLK_TOP_NNA2PLL_D8 127
+#define CLK_TOP_F_BIST2FPC 128
+#define CLK_TOP_466M_FMEM 129
+#define CLK_TOP_MPLL 130
+#define CLK_TOP_APLL12_CK_DIV0 131
+#define CLK_TOP_APLL12_CK_DIV1 132
+#define CLK_TOP_APLL12_CK_DIV2 133
+#define CLK_TOP_APLL12_CK_DIV4 134
+#define CLK_TOP_APLL12_CK_DIV_TDMOUT_M 135
+#define CLK_TOP_NR_CLK 136
+
+/* INFRACFG_AO */
+
+#define CLK_INFRA_AO_PMIC_TMR 0
+#define CLK_INFRA_AO_PMIC_AP 1
+#define CLK_INFRA_AO_PMIC_MD 2
+#define CLK_INFRA_AO_PMIC_CONN 3
+#define CLK_INFRA_AO_SCP_CORE 4
+#define CLK_INFRA_AO_SEJ 5
+#define CLK_INFRA_AO_APXGPT 6
+#define CLK_INFRA_AO_ICUSB 7
+#define CLK_INFRA_AO_GCE 8
+#define CLK_INFRA_AO_THERM 9
+#define CLK_INFRA_AO_I2C_AP 10
+#define CLK_INFRA_AO_I2C_CCU 11
+#define CLK_INFRA_AO_I2C_SSPM 12
+#define CLK_INFRA_AO_I2C_RSV 13
+#define CLK_INFRA_AO_PWM_HCLK 14
+#define CLK_INFRA_AO_PWM1 15
+#define CLK_INFRA_AO_PWM2 16
+#define CLK_INFRA_AO_PWM3 17
+#define CLK_INFRA_AO_PWM4 18
+#define CLK_INFRA_AO_PWM5 19
+#define CLK_INFRA_AO_PWM 20
+#define CLK_INFRA_AO_UART0 21
+#define CLK_INFRA_AO_UART1 22
+#define CLK_INFRA_AO_UART2 23
+#define CLK_INFRA_AO_GCE_26M 24
+#define CLK_INFRA_AO_CQ_DMA_FPC 25
+#define CLK_INFRA_AO_BTIF 26
+#define CLK_INFRA_AO_SPI0 27
+#define CLK_INFRA_AO_MSDC0 28
+#define CLK_INFRA_AO_MSDCFDE 29
+#define CLK_INFRA_AO_MSDC1 30
+#define CLK_INFRA_AO_DVFSRC 31
+#define CLK_INFRA_AO_GCPU 32
+#define CLK_INFRA_AO_TRNG 33
+#define CLK_INFRA_AO_AUXADC 34
+#define CLK_INFRA_AO_CPUM 35
+#define CLK_INFRA_AO_CCIF1_AP 36
+#define CLK_INFRA_AO_CCIF1_MD 37
+#define CLK_INFRA_AO_AUXADC_MD 38
+#define CLK_INFRA_AO_AP_DMA 39
+#define CLK_INFRA_AO_XIU 40
+#define CLK_INFRA_AO_DEVICE_APC 41
+#define CLK_INFRA_AO_CCIF_AP 42
+#define CLK_INFRA_AO_DEBUGTOP 43
+#define CLK_INFRA_AO_AUDIO 44
+#define CLK_INFRA_AO_CCIF_MD 45
+#define CLK_INFRA_AO_DXCC_SEC_CORE 46
+#define CLK_INFRA_AO_DXCC_AO 47
+#define CLK_INFRA_AO_IMP_IIC 48
+#define CLK_INFRA_AO_DRAMC_F26M 49
+#define CLK_INFRA_AO_RG_PWM_FBCLK6 50
+#define CLK_INFRA_AO_SSUSB_TOP_HCLK 51
+#define CLK_INFRA_AO_DISP_PWM 52
+#define CLK_INFRA_AO_CLDMA_BCLK 53
+#define CLK_INFRA_AO_AUDIO_26M_BCLK 54
+#define CLK_INFRA_AO_SSUSB_TOP_P1_HCLK 55
+#define CLK_INFRA_AO_SPI1 56
+#define CLK_INFRA_AO_I2C4 57
+#define CLK_INFRA_AO_MODEM_TEMP_SHARE 58
+#define CLK_INFRA_AO_SPI2 59
+#define CLK_INFRA_AO_SPI3 60
+#define CLK_INFRA_AO_SSUSB_TOP_REF 61
+#define CLK_INFRA_AO_SSUSB_TOP_XHCI 62
+#define CLK_INFRA_AO_SSUSB_TOP_P1_REF 63
+#define CLK_INFRA_AO_SSUSB_TOP_P1_XHCI 64
+#define CLK_INFRA_AO_SSPM 65
+#define CLK_INFRA_AO_SSUSB_TOP_P1_SYS 66
+#define CLK_INFRA_AO_I2C5 67
+#define CLK_INFRA_AO_I2C5_ARBITER 68
+#define CLK_INFRA_AO_I2C5_IMM 69
+#define CLK_INFRA_AO_I2C1_ARBITER 70
+#define CLK_INFRA_AO_I2C1_IMM 71
+#define CLK_INFRA_AO_I2C2_ARBITER 72
+#define CLK_INFRA_AO_I2C2_IMM 73
+#define CLK_INFRA_AO_SPI4 74
+#define CLK_INFRA_AO_SPI5 75
+#define CLK_INFRA_AO_CQ_DMA 76
+#define CLK_INFRA_AO_BIST2FPC 77
+#define CLK_INFRA_AO_MSDC0_SELF 78
+#define CLK_INFRA_AO_SPINOR 79
+#define CLK_INFRA_AO_SSPM_26M_SELF 80
+#define CLK_INFRA_AO_SSPM_32K_SELF 81
+#define CLK_INFRA_AO_I2C6 82
+#define CLK_INFRA_AO_AP_MSDC0 83
+#define CLK_INFRA_AO_MD_MSDC0 84
+#define CLK_INFRA_AO_MSDC0_SRC 85
+#define CLK_INFRA_AO_MSDC1_SRC 86
+#define CLK_INFRA_AO_SEJ_F13M 87
+#define CLK_INFRA_AO_AES_TOP0_BCLK 88
+#define CLK_INFRA_AO_MCU_PM_BCLK 89
+#define CLK_INFRA_AO_CCIF2_AP 90
+#define CLK_INFRA_AO_CCIF2_MD 91
+#define CLK_INFRA_AO_CCIF3_AP 92
+#define CLK_INFRA_AO_CCIF3_MD 93
+#define CLK_INFRA_AO_FADSP_26M 94
+#define CLK_INFRA_AO_FADSP_32K 95
+#define CLK_INFRA_AO_CCIF4_AP 96
+#define CLK_INFRA_AO_CCIF4_MD 97
+#define CLK_INFRA_AO_FADSP 98
+#define CLK_INFRA_AO_FLASHIF_133M 99
+#define CLK_INFRA_AO_FLASHIF_66M 100
+#define CLK_INFRA_AO_NR_CLK 101
+
+/* APMIXEDSYS */
+
+#define CLK_APMIXED_ARMPLL_LL 0
+#define CLK_APMIXED_ARMPLL_BL 1
+#define CLK_APMIXED_CCIPLL 2
+#define CLK_APMIXED_MAINPLL 3
+#define CLK_APMIXED_UNIV2PLL 4
+#define CLK_APMIXED_MSDCPLL 5
+#define CLK_APMIXED_MMPLL 6
+#define CLK_APMIXED_NNAPLL 7
+#define CLK_APMIXED_NNA2PLL 8
+#define CLK_APMIXED_ADSPPLL 9
+#define CLK_APMIXED_MFGPLL 10
+#define CLK_APMIXED_TVDPLL 11
+#define CLK_APMIXED_APLL1 12
+#define CLK_APMIXED_APLL2 13
+#define CLK_APMIXED_NR_CLK 14
+
+/* IMP_IIC_WRAP */
+
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C0 0
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C1 1
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C2 2
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C3 3
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C4 4
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C5 5
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C6 6
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C7 7
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C8 8
+#define CLK_IMP_IIC_WRAP_AP_CLOCK_I2C9 9
+#define CLK_IMP_IIC_WRAP_NR_CLK 10
+
+/* MFGCFG */
+
+#define CLK_MFG_BG3D 0
+#define CLK_MFG_NR_CLK 1
+
+/* MMSYS */
+
+#define CLK_MM_DISP_MUTEX0 0
+#define CLK_MM_APB_MM_BUS 1
+#define CLK_MM_DISP_OVL0 2
+#define CLK_MM_DISP_RDMA0 3
+#define CLK_MM_DISP_OVL0_2L 4
+#define CLK_MM_DISP_WDMA0 5
+#define CLK_MM_DISP_RSZ0 6
+#define CLK_MM_DISP_AAL0 7
+#define CLK_MM_DISP_CCORR0 8
+#define CLK_MM_DISP_COLOR0 9
+#define CLK_MM_SMI_INFRA 10
+#define CLK_MM_DISP_DSC_WRAP0 11
+#define CLK_MM_DISP_GAMMA0 12
+#define CLK_MM_DISP_POSTMASK0 13
+#define CLK_MM_DISP_DITHER0 14
+#define CLK_MM_SMI_COMMON 15
+#define CLK_MM_DSI0 16
+#define CLK_MM_DISP_FAKE_ENG0 17
+#define CLK_MM_DISP_FAKE_ENG1 18
+#define CLK_MM_SMI_GALS 19
+#define CLK_MM_SMI_IOMMU 20
+#define CLK_MM_DISP_RDMA1 21
+#define CLK_MM_DISP_DPI 22
+#define CLK_MM_DSI0_DSI_CK_DOMAIN 23
+#define CLK_MM_DISP_26M 24
+#define CLK_MM_NR_CLK 25
+
+/* WPESYS */
+
+#define CLK_WPE_CK_EN 0
+#define CLK_WPE_SMI_LARB8_CK_EN 1
+#define CLK_WPE_SYS_EVENT_TX_CK_EN 2
+#define CLK_WPE_SMI_LARB8_PCLK_EN 3
+#define CLK_WPE_NR_CLK 4
+
+/* IMGSYS1 */
+
+#define CLK_IMG1_LARB9_IMG1 0
+#define CLK_IMG1_LARB10_IMG1 1
+#define CLK_IMG1_DIP 2
+#define CLK_IMG1_GALS_IMG1 3
+#define CLK_IMG1_NR_CLK 4
+
+/* IMGSYS2 */
+
+#define CLK_IMG2_LARB9_IMG2 0
+#define CLK_IMG2_LARB10_IMG2 1
+#define CLK_IMG2_MFB 2
+#define CLK_IMG2_WPE 3
+#define CLK_IMG2_MSS 4
+#define CLK_IMG2_GALS_IMG2 5
+#define CLK_IMG2_NR_CLK 6
+
+/* VDECSYS */
+
+#define CLK_VDEC_LARB1_CKEN 0
+#define CLK_VDEC_LAT_CKEN 1
+#define CLK_VDEC_LAT_ACTIVE 2
+#define CLK_VDEC_LAT_CKEN_ENG 3
+#define CLK_VDEC_MINI_MDP_CKEN_CFG_RG 4
+#define CLK_VDEC_CKEN 5
+#define CLK_VDEC_ACTIVE 6
+#define CLK_VDEC_CKEN_ENG 7
+#define CLK_VDEC_NR_CLK 8
+
+/* VENCSYS */
+
+#define CLK_VENC_CKE0_LARB 0
+#define CLK_VENC_CKE1_VENC 1
+#define CLK_VENC_CKE2_JPGENC 2
+#define CLK_VENC_CKE5_GALS 3
+#define CLK_VENC_NR_CLK 4
+
+/* CAMSYS */
+
+#define CLK_CAM_LARB13 0
+#define CLK_CAM_DFP_VAD 1
+#define CLK_CAM_LARB14 2
+#define CLK_CAM 3
+#define CLK_CAMTG 4
+#define CLK_CAM_SENINF 5
+#define CLK_CAMSV1 6
+#define CLK_CAMSV2 7
+#define CLK_CAMSV3 8
+#define CLK_CAM_CCU0 9
+#define CLK_CAM_CCU1 10
+#define CLK_CAM_MRAW0 11
+#define CLK_CAM_FAKE_ENG 12
+#define CLK_CAM_CCU_GALS 13
+#define CLK_CAM2MM_GALS 14
+#define CLK_CAM_NR_CLK 15
+
+/* CAMSYS_RAWA */
+
+#define CLK_CAM_RAWA_LARBX_RAWA 0
+#define CLK_CAM_RAWA 1
+#define CLK_CAM_RAWA_CAMTG_RAWA 2
+#define CLK_CAM_RAWA_NR_CLK 3
+
+/* CAMSYS_RAWB */
+
+#define CLK_CAM_RAWB_LARBX_RAWB 0
+#define CLK_CAM_RAWB 1
+#define CLK_CAM_RAWB_CAMTG_RAWB 2
+#define CLK_CAM_RAWB_NR_CLK 3
+
+/* MDPSYS */
+
+#define CLK_MDP_RDMA0 0
+#define CLK_MDP_TDSHP0 1
+#define CLK_MDP_IMG_DL_ASYNC0 2
+#define CLK_MDP_IMG_DL_ASYNC1 3
+#define CLK_MDP_DISP_RDMA 4
+#define CLK_MDP_HMS 5
+#define CLK_MDP_SMI0 6
+#define CLK_MDP_APB_BUS 7
+#define CLK_MDP_WROT0 8
+#define CLK_MDP_RSZ0 9
+#define CLK_MDP_HDR0 10
+#define CLK_MDP_MUTEX0 11
+#define CLK_MDP_WROT1 12
+#define CLK_MDP_RSZ1 13
+#define CLK_MDP_FAKE_ENG0 14
+#define CLK_MDP_AAL0 15
+#define CLK_MDP_DISP_WDMA 16
+#define CLK_MDP_COLOR 17
+#define CLK_MDP_IMG_DL_ASYNC2 18
+#define CLK_MDP_IMG_DL_RELAY0_ASYNC0 19
+#define CLK_MDP_IMG_DL_RELAY1_ASYNC1 20
+#define CLK_MDP_IMG_DL_RELAY2_ASYNC2 21
+#define CLK_MDP_NR_CLK 22
+
+/* IPESYS */
+
+#define CLK_IPE_LARB19 0
+#define CLK_IPE_LARB20 1
+#define CLK_IPE_SMI_SUBCOM 2
+#define CLK_IPE_FD 3
+#define CLK_IPE_FE 4
+#define CLK_IPE_RSC 5
+#define CLK_IPE_DPE 6
+#define CLK_IPE_GALS_IPE 7
+#define CLK_IPE_NR_CLK 8
+
+#endif /* _DT_BINDINGS_CLK_MT8186_H */
diff --git a/include/dt-bindings/clock/mt8192-clk.h b/include/dt-bindings/clock/mt8192-clk.h
new file mode 100644
index 000000000000..5ab68f15a256
--- /dev/null
+++ b/include/dt-bindings/clock/mt8192-clk.h
@@ -0,0 +1,585 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT8192_H
+#define _DT_BINDINGS_CLK_MT8192_H
+
+/* TOPCKGEN */
+
+#define CLK_TOP_AXI_SEL 0
+#define CLK_TOP_SPM_SEL 1
+#define CLK_TOP_SCP_SEL 2
+#define CLK_TOP_BUS_AXIMEM_SEL 3
+#define CLK_TOP_DISP_SEL 4
+#define CLK_TOP_MDP_SEL 5
+#define CLK_TOP_IMG1_SEL 6
+#define CLK_TOP_IMG2_SEL 7
+#define CLK_TOP_IPE_SEL 8
+#define CLK_TOP_DPE_SEL 9
+#define CLK_TOP_CAM_SEL 10
+#define CLK_TOP_CCU_SEL 11
+#define CLK_TOP_DSP7_SEL 12
+#define CLK_TOP_MFG_REF_SEL 13
+#define CLK_TOP_MFG_PLL_SEL 14
+#define CLK_TOP_CAMTG_SEL 15
+#define CLK_TOP_CAMTG2_SEL 16
+#define CLK_TOP_CAMTG3_SEL 17
+#define CLK_TOP_CAMTG4_SEL 18
+#define CLK_TOP_CAMTG5_SEL 19
+#define CLK_TOP_CAMTG6_SEL 20
+#define CLK_TOP_UART_SEL 21
+#define CLK_TOP_SPI_SEL 22
+#define CLK_TOP_MSDC50_0_H_SEL 23
+#define CLK_TOP_MSDC50_0_SEL 24
+#define CLK_TOP_MSDC30_1_SEL 25
+#define CLK_TOP_MSDC30_2_SEL 26
+#define CLK_TOP_AUDIO_SEL 27
+#define CLK_TOP_AUD_INTBUS_SEL 28
+#define CLK_TOP_PWRAP_ULPOSC_SEL 29
+#define CLK_TOP_ATB_SEL 30
+#define CLK_TOP_DPI_SEL 31
+#define CLK_TOP_SCAM_SEL 32
+#define CLK_TOP_DISP_PWM_SEL 33
+#define CLK_TOP_USB_TOP_SEL 34
+#define CLK_TOP_SSUSB_XHCI_SEL 35
+#define CLK_TOP_I2C_SEL 36
+#define CLK_TOP_SENINF_SEL 37
+#define CLK_TOP_SENINF1_SEL 38
+#define CLK_TOP_SENINF2_SEL 39
+#define CLK_TOP_SENINF3_SEL 40
+#define CLK_TOP_TL_SEL 41
+#define CLK_TOP_DXCC_SEL 42
+#define CLK_TOP_AUD_ENGEN1_SEL 43
+#define CLK_TOP_AUD_ENGEN2_SEL 44
+#define CLK_TOP_AES_UFSFDE_SEL 45
+#define CLK_TOP_UFS_SEL 46
+#define CLK_TOP_AUD_1_SEL 47
+#define CLK_TOP_AUD_2_SEL 48
+#define CLK_TOP_ADSP_SEL 49
+#define CLK_TOP_DPMAIF_MAIN_SEL 50
+#define CLK_TOP_VENC_SEL 51
+#define CLK_TOP_VDEC_SEL 52
+#define CLK_TOP_CAMTM_SEL 53
+#define CLK_TOP_PWM_SEL 54
+#define CLK_TOP_AUDIO_H_SEL 55
+#define CLK_TOP_SPMI_MST_SEL 56
+#define CLK_TOP_AES_MSDCFDE_SEL 57
+#define CLK_TOP_SFLASH_SEL 58
+#define CLK_TOP_APLL_I2S0_M_SEL 59
+#define CLK_TOP_APLL_I2S1_M_SEL 60
+#define CLK_TOP_APLL_I2S2_M_SEL 61
+#define CLK_TOP_APLL_I2S3_M_SEL 62
+#define CLK_TOP_APLL_I2S4_M_SEL 63
+#define CLK_TOP_APLL_I2S5_M_SEL 64
+#define CLK_TOP_APLL_I2S6_M_SEL 65
+#define CLK_TOP_APLL_I2S7_M_SEL 66
+#define CLK_TOP_APLL_I2S8_M_SEL 67
+#define CLK_TOP_APLL_I2S9_M_SEL 68
+#define CLK_TOP_MAINPLL_D3 69
+#define CLK_TOP_MAINPLL_D4 70
+#define CLK_TOP_MAINPLL_D4_D2 71
+#define CLK_TOP_MAINPLL_D4_D4 72
+#define CLK_TOP_MAINPLL_D4_D8 73
+#define CLK_TOP_MAINPLL_D4_D16 74
+#define CLK_TOP_MAINPLL_D5 75
+#define CLK_TOP_MAINPLL_D5_D2 76
+#define CLK_TOP_MAINPLL_D5_D4 77
+#define CLK_TOP_MAINPLL_D5_D8 78
+#define CLK_TOP_MAINPLL_D6 79
+#define CLK_TOP_MAINPLL_D6_D2 80
+#define CLK_TOP_MAINPLL_D6_D4 81
+#define CLK_TOP_MAINPLL_D7 82
+#define CLK_TOP_MAINPLL_D7_D2 83
+#define CLK_TOP_MAINPLL_D7_D4 84
+#define CLK_TOP_MAINPLL_D7_D8 85
+#define CLK_TOP_UNIVPLL_D3 86
+#define CLK_TOP_UNIVPLL_D4 87
+#define CLK_TOP_UNIVPLL_D4_D2 88
+#define CLK_TOP_UNIVPLL_D4_D4 89
+#define CLK_TOP_UNIVPLL_D4_D8 90
+#define CLK_TOP_UNIVPLL_D5 91
+#define CLK_TOP_UNIVPLL_D5_D2 92
+#define CLK_TOP_UNIVPLL_D5_D4 93
+#define CLK_TOP_UNIVPLL_D5_D8 94
+#define CLK_TOP_UNIVPLL_D6 95
+#define CLK_TOP_UNIVPLL_D6_D2 96
+#define CLK_TOP_UNIVPLL_D6_D4 97
+#define CLK_TOP_UNIVPLL_D6_D8 98
+#define CLK_TOP_UNIVPLL_D6_D16 99
+#define CLK_TOP_UNIVPLL_D7 100
+#define CLK_TOP_APLL1 101
+#define CLK_TOP_APLL1_D2 102
+#define CLK_TOP_APLL1_D4 103
+#define CLK_TOP_APLL1_D8 104
+#define CLK_TOP_APLL2 105
+#define CLK_TOP_APLL2_D2 106
+#define CLK_TOP_APLL2_D4 107
+#define CLK_TOP_APLL2_D8 108
+#define CLK_TOP_MMPLL_D4 109
+#define CLK_TOP_MMPLL_D4_D2 110
+#define CLK_TOP_MMPLL_D5 111
+#define CLK_TOP_MMPLL_D5_D2 112
+#define CLK_TOP_MMPLL_D6 113
+#define CLK_TOP_MMPLL_D6_D2 114
+#define CLK_TOP_MMPLL_D7 115
+#define CLK_TOP_MMPLL_D9 116
+#define CLK_TOP_APUPLL 117
+#define CLK_TOP_NPUPLL 118
+#define CLK_TOP_TVDPLL 119
+#define CLK_TOP_TVDPLL_D2 120
+#define CLK_TOP_TVDPLL_D4 121
+#define CLK_TOP_TVDPLL_D8 122
+#define CLK_TOP_TVDPLL_D16 123
+#define CLK_TOP_MSDCPLL 124
+#define CLK_TOP_MSDCPLL_D2 125
+#define CLK_TOP_MSDCPLL_D4 126
+#define CLK_TOP_ULPOSC 127
+#define CLK_TOP_OSC_D2 128
+#define CLK_TOP_OSC_D4 129
+#define CLK_TOP_OSC_D8 130
+#define CLK_TOP_OSC_D10 131
+#define CLK_TOP_OSC_D16 132
+#define CLK_TOP_OSC_D20 133
+#define CLK_TOP_CSW_F26M_D2 134
+#define CLK_TOP_ADSPPLL 135
+#define CLK_TOP_UNIVPLL_192M 136
+#define CLK_TOP_UNIVPLL_192M_D2 137
+#define CLK_TOP_UNIVPLL_192M_D4 138
+#define CLK_TOP_UNIVPLL_192M_D8 139
+#define CLK_TOP_UNIVPLL_192M_D16 140
+#define CLK_TOP_UNIVPLL_192M_D32 141
+#define CLK_TOP_APLL12_DIV0 142
+#define CLK_TOP_APLL12_DIV1 143
+#define CLK_TOP_APLL12_DIV2 144
+#define CLK_TOP_APLL12_DIV3 145
+#define CLK_TOP_APLL12_DIV4 146
+#define CLK_TOP_APLL12_DIVB 147
+#define CLK_TOP_APLL12_DIV5 148
+#define CLK_TOP_APLL12_DIV6 149
+#define CLK_TOP_APLL12_DIV7 150
+#define CLK_TOP_APLL12_DIV8 151
+#define CLK_TOP_APLL12_DIV9 152
+#define CLK_TOP_SSUSB_TOP_REF 153
+#define CLK_TOP_SSUSB_PHY_REF 154
+#define CLK_TOP_NR_CLK 155
+
+/* INFRACFG */
+
+#define CLK_INFRA_PMIC_TMR 0
+#define CLK_INFRA_PMIC_AP 1
+#define CLK_INFRA_PMIC_MD 2
+#define CLK_INFRA_PMIC_CONN 3
+#define CLK_INFRA_SCPSYS 4
+#define CLK_INFRA_SEJ 5
+#define CLK_INFRA_APXGPT 6
+#define CLK_INFRA_GCE 7
+#define CLK_INFRA_GCE2 8
+#define CLK_INFRA_THERM 9
+#define CLK_INFRA_I2C0 10
+#define CLK_INFRA_AP_DMA_PSEUDO 11
+#define CLK_INFRA_I2C2 12
+#define CLK_INFRA_I2C3 13
+#define CLK_INFRA_PWM_H 14
+#define CLK_INFRA_PWM1 15
+#define CLK_INFRA_PWM2 16
+#define CLK_INFRA_PWM3 17
+#define CLK_INFRA_PWM4 18
+#define CLK_INFRA_PWM 19
+#define CLK_INFRA_UART0 20
+#define CLK_INFRA_UART1 21
+#define CLK_INFRA_UART2 22
+#define CLK_INFRA_UART3 23
+#define CLK_INFRA_GCE_26M 24
+#define CLK_INFRA_CQ_DMA_FPC 25
+#define CLK_INFRA_BTIF 26
+#define CLK_INFRA_SPI0 27
+#define CLK_INFRA_MSDC0 28
+#define CLK_INFRA_MSDC1 29
+#define CLK_INFRA_MSDC2 30
+#define CLK_INFRA_MSDC0_SRC 31
+#define CLK_INFRA_GCPU 32
+#define CLK_INFRA_TRNG 33
+#define CLK_INFRA_AUXADC 34
+#define CLK_INFRA_CPUM 35
+#define CLK_INFRA_CCIF1_AP 36
+#define CLK_INFRA_CCIF1_MD 37
+#define CLK_INFRA_AUXADC_MD 38
+#define CLK_INFRA_PCIE_TL_26M 39
+#define CLK_INFRA_MSDC1_SRC 40
+#define CLK_INFRA_MSDC2_SRC 41
+#define CLK_INFRA_PCIE_TL_96M 42
+#define CLK_INFRA_PCIE_PL_P_250M 43
+#define CLK_INFRA_DEVICE_APC 44
+#define CLK_INFRA_CCIF_AP 45
+#define CLK_INFRA_DEBUGSYS 46
+#define CLK_INFRA_AUDIO 47
+#define CLK_INFRA_CCIF_MD 48
+#define CLK_INFRA_DXCC_SEC_CORE 49
+#define CLK_INFRA_DXCC_AO 50
+#define CLK_INFRA_DBG_TRACE 51
+#define CLK_INFRA_DEVMPU_B 52
+#define CLK_INFRA_DRAMC_F26M 53
+#define CLK_INFRA_IRTX 54
+#define CLK_INFRA_SSUSB 55
+#define CLK_INFRA_DISP_PWM 56
+#define CLK_INFRA_CLDMA_B 57
+#define CLK_INFRA_AUDIO_26M_B 58
+#define CLK_INFRA_MODEM_TEMP_SHARE 59
+#define CLK_INFRA_SPI1 60
+#define CLK_INFRA_I2C4 61
+#define CLK_INFRA_SPI2 62
+#define CLK_INFRA_SPI3 63
+#define CLK_INFRA_UNIPRO_SYS 64
+#define CLK_INFRA_UNIPRO_TICK 65
+#define CLK_INFRA_UFS_MP_SAP_B 66
+#define CLK_INFRA_MD32_B 67
+#define CLK_INFRA_UNIPRO_MBIST 68
+#define CLK_INFRA_I2C5 69
+#define CLK_INFRA_I2C5_ARBITER 70
+#define CLK_INFRA_I2C5_IMM 71
+#define CLK_INFRA_I2C1_ARBITER 72
+#define CLK_INFRA_I2C1_IMM 73
+#define CLK_INFRA_I2C2_ARBITER 74
+#define CLK_INFRA_I2C2_IMM 75
+#define CLK_INFRA_SPI4 76
+#define CLK_INFRA_SPI5 77
+#define CLK_INFRA_CQ_DMA 78
+#define CLK_INFRA_UFS 79
+#define CLK_INFRA_AES_UFSFDE 80
+#define CLK_INFRA_UFS_TICK 81
+#define CLK_INFRA_SSUSB_XHCI 82
+#define CLK_INFRA_MSDC0_SELF 83
+#define CLK_INFRA_MSDC1_SELF 84
+#define CLK_INFRA_MSDC2_SELF 85
+#define CLK_INFRA_UFS_AXI 86
+#define CLK_INFRA_I2C6 87
+#define CLK_INFRA_AP_MSDC0 88
+#define CLK_INFRA_MD_MSDC0 89
+#define CLK_INFRA_CCIF5_AP 90
+#define CLK_INFRA_CCIF5_MD 91
+#define CLK_INFRA_PCIE_TOP_H_133M 92
+#define CLK_INFRA_FLASHIF_TOP_H_133M 93
+#define CLK_INFRA_PCIE_PERI_26M 94
+#define CLK_INFRA_CCIF2_AP 95
+#define CLK_INFRA_CCIF2_MD 96
+#define CLK_INFRA_CCIF3_AP 97
+#define CLK_INFRA_CCIF3_MD 98
+#define CLK_INFRA_SEJ_F13M 99
+#define CLK_INFRA_AES 100
+#define CLK_INFRA_I2C7 101
+#define CLK_INFRA_I2C8 102
+#define CLK_INFRA_FBIST2FPC 103
+#define CLK_INFRA_DEVICE_APC_SYNC 104
+#define CLK_INFRA_DPMAIF_MAIN 105
+#define CLK_INFRA_PCIE_TL_32K 106
+#define CLK_INFRA_CCIF4_AP 107
+#define CLK_INFRA_CCIF4_MD 108
+#define CLK_INFRA_SPI6 109
+#define CLK_INFRA_SPI7 110
+#define CLK_INFRA_133M 111
+#define CLK_INFRA_66M 112
+#define CLK_INFRA_66M_PERI_BUS 113
+#define CLK_INFRA_FREE_DCM_133M 114
+#define CLK_INFRA_FREE_DCM_66M 115
+#define CLK_INFRA_PERI_BUS_DCM_133M 116
+#define CLK_INFRA_PERI_BUS_DCM_66M 117
+#define CLK_INFRA_FLASHIF_PERI_26M 118
+#define CLK_INFRA_FLASHIF_SFLASH 119
+#define CLK_INFRA_AP_DMA 120
+#define CLK_INFRA_NR_CLK 121
+
+/* PERICFG */
+
+#define CLK_PERI_PERIAXI 0
+#define CLK_PERI_NR_CLK 1
+
+/* APMIXEDSYS */
+
+#define CLK_APMIXED_MAINPLL 0
+#define CLK_APMIXED_UNIVPLL 1
+#define CLK_APMIXED_USBPLL 2
+#define CLK_APMIXED_MSDCPLL 3
+#define CLK_APMIXED_MMPLL 4
+#define CLK_APMIXED_ADSPPLL 5
+#define CLK_APMIXED_MFGPLL 6
+#define CLK_APMIXED_TVDPLL 7
+#define CLK_APMIXED_APLL1 8
+#define CLK_APMIXED_APLL2 9
+#define CLK_APMIXED_MIPID26M 10
+#define CLK_APMIXED_NR_CLK 11
+
+/* SCP_ADSP */
+
+#define CLK_SCP_ADSP_AUDIODSP 0
+#define CLK_SCP_ADSP_NR_CLK 1
+
+/* IMP_IIC_WRAP_C */
+
+#define CLK_IMP_IIC_WRAP_C_I2C10 0
+#define CLK_IMP_IIC_WRAP_C_I2C11 1
+#define CLK_IMP_IIC_WRAP_C_I2C12 2
+#define CLK_IMP_IIC_WRAP_C_I2C13 3
+#define CLK_IMP_IIC_WRAP_C_NR_CLK 4
+
+/* AUDSYS */
+
+#define CLK_AUD_AFE 0
+#define CLK_AUD_22M 1
+#define CLK_AUD_24M 2
+#define CLK_AUD_APLL2_TUNER 3
+#define CLK_AUD_APLL_TUNER 4
+#define CLK_AUD_TDM 5
+#define CLK_AUD_ADC 6
+#define CLK_AUD_DAC 7
+#define CLK_AUD_DAC_PREDIS 8
+#define CLK_AUD_TML 9
+#define CLK_AUD_NLE 10
+#define CLK_AUD_I2S1_B 11
+#define CLK_AUD_I2S2_B 12
+#define CLK_AUD_I2S3_B 13
+#define CLK_AUD_I2S4_B 14
+#define CLK_AUD_CONNSYS_I2S_ASRC 15
+#define CLK_AUD_GENERAL1_ASRC 16
+#define CLK_AUD_GENERAL2_ASRC 17
+#define CLK_AUD_DAC_HIRES 18
+#define CLK_AUD_ADC_HIRES 19
+#define CLK_AUD_ADC_HIRES_TML 20
+#define CLK_AUD_ADDA6_ADC 21
+#define CLK_AUD_ADDA6_ADC_HIRES 22
+#define CLK_AUD_3RD_DAC 23
+#define CLK_AUD_3RD_DAC_PREDIS 24
+#define CLK_AUD_3RD_DAC_TML 25
+#define CLK_AUD_3RD_DAC_HIRES 26
+#define CLK_AUD_I2S5_B 27
+#define CLK_AUD_I2S6_B 28
+#define CLK_AUD_I2S7_B 29
+#define CLK_AUD_I2S8_B 30
+#define CLK_AUD_I2S9_B 31
+#define CLK_AUD_NR_CLK 32
+
+/* IMP_IIC_WRAP_E */
+
+#define CLK_IMP_IIC_WRAP_E_I2C3 0
+#define CLK_IMP_IIC_WRAP_E_NR_CLK 1
+
+/* IMP_IIC_WRAP_S */
+
+#define CLK_IMP_IIC_WRAP_S_I2C7 0
+#define CLK_IMP_IIC_WRAP_S_I2C8 1
+#define CLK_IMP_IIC_WRAP_S_I2C9 2
+#define CLK_IMP_IIC_WRAP_S_NR_CLK 3
+
+/* IMP_IIC_WRAP_WS */
+
+#define CLK_IMP_IIC_WRAP_WS_I2C1 0
+#define CLK_IMP_IIC_WRAP_WS_I2C2 1
+#define CLK_IMP_IIC_WRAP_WS_I2C4 2
+#define CLK_IMP_IIC_WRAP_WS_NR_CLK 3
+
+/* IMP_IIC_WRAP_W */
+
+#define CLK_IMP_IIC_WRAP_W_I2C5 0
+#define CLK_IMP_IIC_WRAP_W_NR_CLK 1
+
+/* IMP_IIC_WRAP_N */
+
+#define CLK_IMP_IIC_WRAP_N_I2C0 0
+#define CLK_IMP_IIC_WRAP_N_I2C6 1
+#define CLK_IMP_IIC_WRAP_N_NR_CLK 2
+
+/* MSDC_TOP */
+
+#define CLK_MSDC_TOP_AES_0P 0
+#define CLK_MSDC_TOP_SRC_0P 1
+#define CLK_MSDC_TOP_SRC_1P 2
+#define CLK_MSDC_TOP_SRC_2P 3
+#define CLK_MSDC_TOP_P_MSDC0 4
+#define CLK_MSDC_TOP_P_MSDC1 5
+#define CLK_MSDC_TOP_P_MSDC2 6
+#define CLK_MSDC_TOP_P_CFG 7
+#define CLK_MSDC_TOP_AXI 8
+#define CLK_MSDC_TOP_H_MST_0P 9
+#define CLK_MSDC_TOP_H_MST_1P 10
+#define CLK_MSDC_TOP_H_MST_2P 11
+#define CLK_MSDC_TOP_MEM_OFF_DLY_26M 12
+#define CLK_MSDC_TOP_32K 13
+#define CLK_MSDC_TOP_AHB2AXI_BRG_AXI 14
+#define CLK_MSDC_TOP_NR_CLK 15
+
+/* MSDC */
+
+#define CLK_MSDC_AXI_WRAP 0
+#define CLK_MSDC_NR_CLK 1
+
+/* MFGCFG */
+
+#define CLK_MFG_BG3D 0
+#define CLK_MFG_NR_CLK 1
+
+/* MMSYS */
+
+#define CLK_MM_DISP_MUTEX0 0
+#define CLK_MM_DISP_CONFIG 1
+#define CLK_MM_DISP_OVL0 2
+#define CLK_MM_DISP_RDMA0 3
+#define CLK_MM_DISP_OVL0_2L 4
+#define CLK_MM_DISP_WDMA0 5
+#define CLK_MM_DISP_UFBC_WDMA0 6
+#define CLK_MM_DISP_RSZ0 7
+#define CLK_MM_DISP_AAL0 8
+#define CLK_MM_DISP_CCORR0 9
+#define CLK_MM_DISP_DITHER0 10
+#define CLK_MM_SMI_INFRA 11
+#define CLK_MM_DISP_GAMMA0 12
+#define CLK_MM_DISP_POSTMASK0 13
+#define CLK_MM_DISP_DSC_WRAP0 14
+#define CLK_MM_DSI0 15
+#define CLK_MM_DISP_COLOR0 16
+#define CLK_MM_SMI_COMMON 17
+#define CLK_MM_DISP_FAKE_ENG0 18
+#define CLK_MM_DISP_FAKE_ENG1 19
+#define CLK_MM_MDP_TDSHP4 20
+#define CLK_MM_MDP_RSZ4 21
+#define CLK_MM_MDP_AAL4 22
+#define CLK_MM_MDP_HDR4 23
+#define CLK_MM_MDP_RDMA4 24
+#define CLK_MM_MDP_COLOR4 25
+#define CLK_MM_DISP_Y2R0 26
+#define CLK_MM_SMI_GALS 27
+#define CLK_MM_DISP_OVL2_2L 28
+#define CLK_MM_DISP_RDMA4 29
+#define CLK_MM_DISP_DPI0 30
+#define CLK_MM_SMI_IOMMU 31
+#define CLK_MM_DSI_DSI0 32
+#define CLK_MM_DPI_DPI0 33
+#define CLK_MM_26MHZ 34
+#define CLK_MM_32KHZ 35
+#define CLK_MM_NR_CLK 36
+
+/* IMGSYS */
+
+#define CLK_IMG_LARB9 0
+#define CLK_IMG_LARB10 1
+#define CLK_IMG_DIP 2
+#define CLK_IMG_GALS 3
+#define CLK_IMG_NR_CLK 4
+
+/* IMGSYS2 */
+
+#define CLK_IMG2_LARB11 0
+#define CLK_IMG2_LARB12 1
+#define CLK_IMG2_MFB 2
+#define CLK_IMG2_WPE 3
+#define CLK_IMG2_MSS 4
+#define CLK_IMG2_GALS 5
+#define CLK_IMG2_NR_CLK 6
+
+/* VDECSYS_SOC */
+
+#define CLK_VDEC_SOC_LARB1 0
+#define CLK_VDEC_SOC_LAT 1
+#define CLK_VDEC_SOC_LAT_ACTIVE 2
+#define CLK_VDEC_SOC_VDEC 3
+#define CLK_VDEC_SOC_VDEC_ACTIVE 4
+#define CLK_VDEC_SOC_NR_CLK 5
+
+/* VDECSYS */
+
+#define CLK_VDEC_LARB1 0
+#define CLK_VDEC_LAT 1
+#define CLK_VDEC_LAT_ACTIVE 2
+#define CLK_VDEC_VDEC 3
+#define CLK_VDEC_ACTIVE 4
+#define CLK_VDEC_NR_CLK 5
+
+/* VENCSYS */
+
+#define CLK_VENC_SET0_LARB 0
+#define CLK_VENC_SET1_VENC 1
+#define CLK_VENC_SET2_JPGENC 2
+#define CLK_VENC_SET5_GALS 3
+#define CLK_VENC_NR_CLK 4
+
+/* CAMSYS */
+
+#define CLK_CAM_LARB13 0
+#define CLK_CAM_DFP_VAD 1
+#define CLK_CAM_LARB14 2
+#define CLK_CAM_CAM 3
+#define CLK_CAM_CAMTG 4
+#define CLK_CAM_SENINF 5
+#define CLK_CAM_CAMSV0 6
+#define CLK_CAM_CAMSV1 7
+#define CLK_CAM_CAMSV2 8
+#define CLK_CAM_CAMSV3 9
+#define CLK_CAM_CCU0 10
+#define CLK_CAM_CCU1 11
+#define CLK_CAM_MRAW0 12
+#define CLK_CAM_FAKE_ENG 13
+#define CLK_CAM_CCU_GALS 14
+#define CLK_CAM_CAM2MM_GALS 15
+#define CLK_CAM_NR_CLK 16
+
+/* CAMSYS_RAWA */
+
+#define CLK_CAM_RAWA_LARBX 0
+#define CLK_CAM_RAWA_CAM 1
+#define CLK_CAM_RAWA_CAMTG 2
+#define CLK_CAM_RAWA_NR_CLK 3
+
+/* CAMSYS_RAWB */
+
+#define CLK_CAM_RAWB_LARBX 0
+#define CLK_CAM_RAWB_CAM 1
+#define CLK_CAM_RAWB_CAMTG 2
+#define CLK_CAM_RAWB_NR_CLK 3
+
+/* CAMSYS_RAWC */
+
+#define CLK_CAM_RAWC_LARBX 0
+#define CLK_CAM_RAWC_CAM 1
+#define CLK_CAM_RAWC_CAMTG 2
+#define CLK_CAM_RAWC_NR_CLK 3
+
+/* IPESYS */
+
+#define CLK_IPE_LARB19 0
+#define CLK_IPE_LARB20 1
+#define CLK_IPE_SMI_SUBCOM 2
+#define CLK_IPE_FD 3
+#define CLK_IPE_FE 4
+#define CLK_IPE_RSC 5
+#define CLK_IPE_DPE 6
+#define CLK_IPE_GALS 7
+#define CLK_IPE_NR_CLK 8
+
+/* MDPSYS */
+
+#define CLK_MDP_RDMA0 0
+#define CLK_MDP_TDSHP0 1
+#define CLK_MDP_IMG_DL_ASYNC0 2
+#define CLK_MDP_IMG_DL_ASYNC1 3
+#define CLK_MDP_RDMA1 4
+#define CLK_MDP_TDSHP1 5
+#define CLK_MDP_SMI0 6
+#define CLK_MDP_APB_BUS 7
+#define CLK_MDP_WROT0 8
+#define CLK_MDP_RSZ0 9
+#define CLK_MDP_HDR0 10
+#define CLK_MDP_MUTEX0 11
+#define CLK_MDP_WROT1 12
+#define CLK_MDP_RSZ1 13
+#define CLK_MDP_HDR1 14
+#define CLK_MDP_FAKE_ENG0 15
+#define CLK_MDP_AAL0 16
+#define CLK_MDP_AAL1 17
+#define CLK_MDP_COLOR0 18
+#define CLK_MDP_COLOR1 19
+#define CLK_MDP_IMG_DL_RELAY0_ASYNC0 20
+#define CLK_MDP_IMG_DL_RELAY1_ASYNC1 21
+#define CLK_MDP_NR_CLK 22
+
+#endif /* _DT_BINDINGS_CLK_MT8192_H */
diff --git a/include/dt-bindings/clock/mt8195-clk.h b/include/dt-bindings/clock/mt8195-clk.h
new file mode 100644
index 000000000000..d70d017ad69c
--- /dev/null
+++ b/include/dt-bindings/clock/mt8195-clk.h
@@ -0,0 +1,866 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT8195_H
+#define _DT_BINDINGS_CLK_MT8195_H
+
+/* TOPCKGEN */
+
+#define CLK_TOP_AXI 0
+#define CLK_TOP_SPM 1
+#define CLK_TOP_SCP 2
+#define CLK_TOP_BUS_AXIMEM 3
+#define CLK_TOP_VPP 4
+#define CLK_TOP_ETHDR 5
+#define CLK_TOP_IPE 6
+#define CLK_TOP_CAM 7
+#define CLK_TOP_CCU 8
+#define CLK_TOP_IMG 9
+#define CLK_TOP_CAMTM 10
+#define CLK_TOP_DSP 11
+#define CLK_TOP_DSP1 12
+#define CLK_TOP_DSP2 13
+#define CLK_TOP_DSP3 14
+#define CLK_TOP_DSP4 15
+#define CLK_TOP_DSP5 16
+#define CLK_TOP_DSP6 17
+#define CLK_TOP_DSP7 18
+#define CLK_TOP_IPU_IF 19
+#define CLK_TOP_MFG_CORE_TMP 20
+#define CLK_TOP_CAMTG 21
+#define CLK_TOP_CAMTG2 22
+#define CLK_TOP_CAMTG3 23
+#define CLK_TOP_CAMTG4 24
+#define CLK_TOP_CAMTG5 25
+#define CLK_TOP_UART 26
+#define CLK_TOP_SPI 27
+#define CLK_TOP_SPIS 28
+#define CLK_TOP_MSDC50_0_HCLK 29
+#define CLK_TOP_MSDC50_0 30
+#define CLK_TOP_MSDC30_1 31
+#define CLK_TOP_MSDC30_2 32
+#define CLK_TOP_INTDIR 33
+#define CLK_TOP_AUD_INTBUS 34
+#define CLK_TOP_AUDIO_H 35
+#define CLK_TOP_PWRAP_ULPOSC 36
+#define CLK_TOP_ATB 37
+#define CLK_TOP_PWRMCU 38
+#define CLK_TOP_DP 39
+#define CLK_TOP_EDP 40
+#define CLK_TOP_DPI 41
+#define CLK_TOP_DISP_PWM0 42
+#define CLK_TOP_DISP_PWM1 43
+#define CLK_TOP_USB_TOP 44
+#define CLK_TOP_SSUSB_XHCI 45
+#define CLK_TOP_USB_TOP_1P 46
+#define CLK_TOP_SSUSB_XHCI_1P 47
+#define CLK_TOP_USB_TOP_2P 48
+#define CLK_TOP_SSUSB_XHCI_2P 49
+#define CLK_TOP_USB_TOP_3P 50
+#define CLK_TOP_SSUSB_XHCI_3P 51
+#define CLK_TOP_I2C 52
+#define CLK_TOP_SENINF 53
+#define CLK_TOP_SENINF1 54
+#define CLK_TOP_SENINF2 55
+#define CLK_TOP_SENINF3 56
+#define CLK_TOP_GCPU 57
+#define CLK_TOP_DXCC 58
+#define CLK_TOP_DPMAIF_MAIN 59
+#define CLK_TOP_AES_UFSFDE 60
+#define CLK_TOP_UFS 61
+#define CLK_TOP_UFS_TICK1US 62
+#define CLK_TOP_UFS_MP_SAP_CFG 63
+#define CLK_TOP_VENC 64
+#define CLK_TOP_VDEC 65
+#define CLK_TOP_PWM 66
+#define CLK_TOP_MCUPM 67
+#define CLK_TOP_SPMI_P_MST 68
+#define CLK_TOP_SPMI_M_MST 69
+#define CLK_TOP_DVFSRC 70
+#define CLK_TOP_TL 71
+#define CLK_TOP_TL_P1 72
+#define CLK_TOP_AES_MSDCFDE 73
+#define CLK_TOP_DSI_OCC 74
+#define CLK_TOP_WPE_VPP 75
+#define CLK_TOP_HDCP 76
+#define CLK_TOP_HDCP_24M 77
+#define CLK_TOP_HD20_DACR_REF_CLK 78
+#define CLK_TOP_HD20_HDCP_CCLK 79
+#define CLK_TOP_HDMI_XTAL 80
+#define CLK_TOP_HDMI_APB 81
+#define CLK_TOP_SNPS_ETH_250M 82
+#define CLK_TOP_SNPS_ETH_62P4M_PTP 83
+#define CLK_TOP_SNPS_ETH_50M_RMII 84
+#define CLK_TOP_DGI_OUT 85
+#define CLK_TOP_NNA0 86
+#define CLK_TOP_NNA1 87
+#define CLK_TOP_ADSP 88
+#define CLK_TOP_ASM_H 89
+#define CLK_TOP_ASM_M 90
+#define CLK_TOP_ASM_L 91
+#define CLK_TOP_APLL1 92
+#define CLK_TOP_APLL2 93
+#define CLK_TOP_APLL3 94
+#define CLK_TOP_APLL4 95
+#define CLK_TOP_APLL5 96
+#define CLK_TOP_I2SO1_MCK 97
+#define CLK_TOP_I2SO2_MCK 98
+#define CLK_TOP_I2SI1_MCK 99
+#define CLK_TOP_I2SI2_MCK 100
+#define CLK_TOP_DPTX_MCK 101
+#define CLK_TOP_AUD_IEC_CLK 102
+#define CLK_TOP_A1SYS_HP 103
+#define CLK_TOP_A2SYS_HF 104
+#define CLK_TOP_A3SYS_HF 105
+#define CLK_TOP_A4SYS_HF 106
+#define CLK_TOP_SPINFI_BCLK 107
+#define CLK_TOP_NFI1X 108
+#define CLK_TOP_ECC 109
+#define CLK_TOP_AUDIO_LOCAL_BUS 110
+#define CLK_TOP_SPINOR 111
+#define CLK_TOP_DVIO_DGI_REF 112
+#define CLK_TOP_ULPOSC 113
+#define CLK_TOP_ULPOSC_CORE 114
+#define CLK_TOP_SRCK 115
+#define CLK_TOP_MFG_CK_FAST_REF 116
+#define CLK_TOP_CLK26M_D2 117
+#define CLK_TOP_CLK26M_D52 118
+#define CLK_TOP_IN_DGI 119
+#define CLK_TOP_IN_DGI_D2 120
+#define CLK_TOP_IN_DGI_D4 121
+#define CLK_TOP_IN_DGI_D6 122
+#define CLK_TOP_IN_DGI_D8 123
+#define CLK_TOP_MAINPLL_D3 124
+#define CLK_TOP_MAINPLL_D4 125
+#define CLK_TOP_MAINPLL_D4_D2 126
+#define CLK_TOP_MAINPLL_D4_D4 127
+#define CLK_TOP_MAINPLL_D4_D8 128
+#define CLK_TOP_MAINPLL_D5 129
+#define CLK_TOP_MAINPLL_D5_D2 130
+#define CLK_TOP_MAINPLL_D5_D4 131
+#define CLK_TOP_MAINPLL_D5_D8 132
+#define CLK_TOP_MAINPLL_D6 133
+#define CLK_TOP_MAINPLL_D6_D2 134
+#define CLK_TOP_MAINPLL_D6_D4 135
+#define CLK_TOP_MAINPLL_D6_D8 136
+#define CLK_TOP_MAINPLL_D7 137
+#define CLK_TOP_MAINPLL_D7_D2 138
+#define CLK_TOP_MAINPLL_D7_D4 139
+#define CLK_TOP_MAINPLL_D7_D8 140
+#define CLK_TOP_MAINPLL_D9 141
+#define CLK_TOP_UNIVPLL_D2 142
+#define CLK_TOP_UNIVPLL_D3 143
+#define CLK_TOP_UNIVPLL_D4 144
+#define CLK_TOP_UNIVPLL_D4_D2 145
+#define CLK_TOP_UNIVPLL_D4_D4 146
+#define CLK_TOP_UNIVPLL_D4_D8 147
+#define CLK_TOP_UNIVPLL_D5 148
+#define CLK_TOP_UNIVPLL_D5_D2 149
+#define CLK_TOP_UNIVPLL_D5_D4 150
+#define CLK_TOP_UNIVPLL_D5_D8 151
+#define CLK_TOP_UNIVPLL_D6 152
+#define CLK_TOP_UNIVPLL_D6_D2 153
+#define CLK_TOP_UNIVPLL_D6_D4 154
+#define CLK_TOP_UNIVPLL_D6_D8 155
+#define CLK_TOP_UNIVPLL_D6_D16 156
+#define CLK_TOP_UNIVPLL_D7 157
+#define CLK_TOP_UNIVPLL_192M 158
+#define CLK_TOP_UNIVPLL_192M_D4 159
+#define CLK_TOP_UNIVPLL_192M_D8 160
+#define CLK_TOP_UNIVPLL_192M_D16 161
+#define CLK_TOP_UNIVPLL_192M_D32 162
+#define CLK_TOP_APLL1_D3 163
+#define CLK_TOP_APLL1_D4 164
+#define CLK_TOP_APLL2_D3 165
+#define CLK_TOP_APLL2_D4 166
+#define CLK_TOP_APLL3_D4 167
+#define CLK_TOP_APLL4_D4 168
+#define CLK_TOP_APLL5_D4 169
+#define CLK_TOP_HDMIRX_APLL_D3 170
+#define CLK_TOP_HDMIRX_APLL_D4 171
+#define CLK_TOP_HDMIRX_APLL_D6 172
+#define CLK_TOP_MMPLL_D4 173
+#define CLK_TOP_MMPLL_D4_D2 174
+#define CLK_TOP_MMPLL_D4_D4 175
+#define CLK_TOP_MMPLL_D5 176
+#define CLK_TOP_MMPLL_D5_D2 177
+#define CLK_TOP_MMPLL_D5_D4 178
+#define CLK_TOP_MMPLL_D6 179
+#define CLK_TOP_MMPLL_D6_D2 180
+#define CLK_TOP_MMPLL_D7 181
+#define CLK_TOP_MMPLL_D9 182
+#define CLK_TOP_TVDPLL1_D2 183
+#define CLK_TOP_TVDPLL1_D4 184
+#define CLK_TOP_TVDPLL1_D8 185
+#define CLK_TOP_TVDPLL1_D16 186
+#define CLK_TOP_TVDPLL2_D2 187
+#define CLK_TOP_TVDPLL2_D4 188
+#define CLK_TOP_TVDPLL2_D8 189
+#define CLK_TOP_TVDPLL2_D16 190
+#define CLK_TOP_MSDCPLL_D2 191
+#define CLK_TOP_MSDCPLL_D4 192
+#define CLK_TOP_MSDCPLL_D16 193
+#define CLK_TOP_ETHPLL_D2 194
+#define CLK_TOP_ETHPLL_D8 195
+#define CLK_TOP_ETHPLL_D10 196
+#define CLK_TOP_DGIPLL_D2 197
+#define CLK_TOP_ULPOSC1 198
+#define CLK_TOP_ULPOSC1_D2 199
+#define CLK_TOP_ULPOSC1_D4 200
+#define CLK_TOP_ULPOSC1_D7 201
+#define CLK_TOP_ULPOSC1_D8 202
+#define CLK_TOP_ULPOSC1_D10 203
+#define CLK_TOP_ULPOSC1_D16 204
+#define CLK_TOP_ULPOSC2 205
+#define CLK_TOP_ADSPPLL_D2 206
+#define CLK_TOP_ADSPPLL_D4 207
+#define CLK_TOP_ADSPPLL_D8 208
+#define CLK_TOP_MEM_466M 209
+#define CLK_TOP_MPHONE_SLAVE_B 210
+#define CLK_TOP_PEXTP_PIPE 211
+#define CLK_TOP_UFS_RX_SYMBOL 212
+#define CLK_TOP_UFS_TX_SYMBOL 213
+#define CLK_TOP_SSUSB_U3PHY_P1_P_P0 214
+#define CLK_TOP_UFS_RX_SYMBOL1 215
+#define CLK_TOP_FPC 216
+#define CLK_TOP_HDMIRX_P 217
+#define CLK_TOP_APLL12_DIV0 218
+#define CLK_TOP_APLL12_DIV1 219
+#define CLK_TOP_APLL12_DIV2 220
+#define CLK_TOP_APLL12_DIV3 221
+#define CLK_TOP_APLL12_DIV4 222
+#define CLK_TOP_APLL12_DIV9 223
+#define CLK_TOP_CFG_VPP0 224
+#define CLK_TOP_CFG_VPP1 225
+#define CLK_TOP_CFG_VDO0 226
+#define CLK_TOP_CFG_VDO1 227
+#define CLK_TOP_CFG_UNIPLL_SES 228
+#define CLK_TOP_CFG_26M_VPP0 229
+#define CLK_TOP_CFG_26M_VPP1 230
+#define CLK_TOP_CFG_26M_AUD 231
+#define CLK_TOP_CFG_AXI_EAST 232
+#define CLK_TOP_CFG_AXI_EAST_NORTH 233
+#define CLK_TOP_CFG_AXI_NORTH 234
+#define CLK_TOP_CFG_AXI_SOUTH 235
+#define CLK_TOP_CFG_EXT_TEST 236
+#define CLK_TOP_SSUSB_REF 237
+#define CLK_TOP_SSUSB_PHY_REF 238
+#define CLK_TOP_SSUSB_P1_REF 239
+#define CLK_TOP_SSUSB_PHY_P1_REF 240
+#define CLK_TOP_SSUSB_P2_REF 241
+#define CLK_TOP_SSUSB_PHY_P2_REF 242
+#define CLK_TOP_SSUSB_P3_REF 243
+#define CLK_TOP_SSUSB_PHY_P3_REF 244
+#define CLK_TOP_NR_CLK 245
+
+/* INFRACFG_AO */
+
+#define CLK_INFRA_AO_PMIC_TMR 0
+#define CLK_INFRA_AO_PMIC_AP 1
+#define CLK_INFRA_AO_PMIC_MD 2
+#define CLK_INFRA_AO_PMIC_CONN 3
+#define CLK_INFRA_AO_SEJ 4
+#define CLK_INFRA_AO_APXGPT 5
+#define CLK_INFRA_AO_GCE 6
+#define CLK_INFRA_AO_GCE2 7
+#define CLK_INFRA_AO_THERM 8
+#define CLK_INFRA_AO_PWM_H 9
+#define CLK_INFRA_AO_PWM1 10
+#define CLK_INFRA_AO_PWM2 11
+#define CLK_INFRA_AO_PWM3 12
+#define CLK_INFRA_AO_PWM4 13
+#define CLK_INFRA_AO_PWM 14
+#define CLK_INFRA_AO_UART0 15
+#define CLK_INFRA_AO_UART1 16
+#define CLK_INFRA_AO_UART2 17
+#define CLK_INFRA_AO_UART3 18
+#define CLK_INFRA_AO_UART4 19
+#define CLK_INFRA_AO_GCE_26M 20
+#define CLK_INFRA_AO_CQ_DMA_FPC 21
+#define CLK_INFRA_AO_UART5 22
+#define CLK_INFRA_AO_HDMI_26M 23
+#define CLK_INFRA_AO_SPI0 24
+#define CLK_INFRA_AO_MSDC0 25
+#define CLK_INFRA_AO_MSDC1 26
+#define CLK_INFRA_AO_CG1_MSDC2 27
+#define CLK_INFRA_AO_MSDC0_SRC 28
+#define CLK_INFRA_AO_TRNG 29
+#define CLK_INFRA_AO_AUXADC 30
+#define CLK_INFRA_AO_CPUM 31
+#define CLK_INFRA_AO_HDMI_32K 32
+#define CLK_INFRA_AO_CEC_66M_H 33
+#define CLK_INFRA_AO_IRRX 34
+#define CLK_INFRA_AO_PCIE_TL_26M 35
+#define CLK_INFRA_AO_MSDC1_SRC 36
+#define CLK_INFRA_AO_CEC_66M_B 37
+#define CLK_INFRA_AO_PCIE_TL_96M 38
+#define CLK_INFRA_AO_DEVICE_APC 39
+#define CLK_INFRA_AO_ECC_66M_H 40
+#define CLK_INFRA_AO_DEBUGSYS 41
+#define CLK_INFRA_AO_AUDIO 42
+#define CLK_INFRA_AO_PCIE_TL_32K 43
+#define CLK_INFRA_AO_DBG_TRACE 44
+#define CLK_INFRA_AO_DRAMC_F26M 45
+#define CLK_INFRA_AO_IRTX 46
+#define CLK_INFRA_AO_SSUSB 47
+#define CLK_INFRA_AO_DISP_PWM 48
+#define CLK_INFRA_AO_CLDMA_B 49
+#define CLK_INFRA_AO_AUDIO_26M_B 50
+#define CLK_INFRA_AO_SPI1 51
+#define CLK_INFRA_AO_SPI2 52
+#define CLK_INFRA_AO_SPI3 53
+#define CLK_INFRA_AO_UNIPRO_SYS 54
+#define CLK_INFRA_AO_UNIPRO_TICK 55
+#define CLK_INFRA_AO_UFS_MP_SAP_B 56
+#define CLK_INFRA_AO_PWRMCU 57
+#define CLK_INFRA_AO_PWRMCU_BUS_H 58
+#define CLK_INFRA_AO_APDMA_B 59
+#define CLK_INFRA_AO_SPI4 60
+#define CLK_INFRA_AO_SPI5 61
+#define CLK_INFRA_AO_CQ_DMA 62
+#define CLK_INFRA_AO_AES_UFSFDE 63
+#define CLK_INFRA_AO_AES 64
+#define CLK_INFRA_AO_UFS_TICK 65
+#define CLK_INFRA_AO_SSUSB_XHCI 66
+#define CLK_INFRA_AO_MSDC0_SELF 67
+#define CLK_INFRA_AO_MSDC1_SELF 68
+#define CLK_INFRA_AO_MSDC2_SELF 69
+#define CLK_INFRA_AO_I2S_DMA 70
+#define CLK_INFRA_AO_AP_MSDC0 71
+#define CLK_INFRA_AO_MD_MSDC0 72
+#define CLK_INFRA_AO_CG3_MSDC2 73
+#define CLK_INFRA_AO_GCPU 74
+#define CLK_INFRA_AO_PCIE_PERI_26M 75
+#define CLK_INFRA_AO_GCPU_66M_B 76
+#define CLK_INFRA_AO_GCPU_133M_B 77
+#define CLK_INFRA_AO_DISP_PWM1 78
+#define CLK_INFRA_AO_FBIST2FPC 79
+#define CLK_INFRA_AO_DEVICE_APC_SYNC 80
+#define CLK_INFRA_AO_PCIE_P1_PERI_26M 81
+#define CLK_INFRA_AO_SPIS0 82
+#define CLK_INFRA_AO_SPIS1 83
+#define CLK_INFRA_AO_133M_M_PERI 84
+#define CLK_INFRA_AO_66M_M_PERI 85
+#define CLK_INFRA_AO_PCIE_PL_P_250M_P0 86
+#define CLK_INFRA_AO_PCIE_PL_P_250M_P1 87
+#define CLK_INFRA_AO_PCIE_P1_TL_96M 88
+#define CLK_INFRA_AO_AES_MSDCFDE_0P 89
+#define CLK_INFRA_AO_UFS_TX_SYMBOL 90
+#define CLK_INFRA_AO_UFS_RX_SYMBOL 91
+#define CLK_INFRA_AO_UFS_RX_SYMBOL1 92
+#define CLK_INFRA_AO_PERI_UFS_MEM_SUB 93
+#define CLK_INFRA_AO_NR_CLK 94
+
+/* APMIXEDSYS */
+
+#define CLK_APMIXED_NNAPLL 0
+#define CLK_APMIXED_RESPLL 1
+#define CLK_APMIXED_ETHPLL 2
+#define CLK_APMIXED_MSDCPLL 3
+#define CLK_APMIXED_TVDPLL1 4
+#define CLK_APMIXED_TVDPLL2 5
+#define CLK_APMIXED_MMPLL 6
+#define CLK_APMIXED_MAINPLL 7
+#define CLK_APMIXED_VDECPLL 8
+#define CLK_APMIXED_IMGPLL 9
+#define CLK_APMIXED_UNIVPLL 10
+#define CLK_APMIXED_HDMIPLL1 11
+#define CLK_APMIXED_HDMIPLL2 12
+#define CLK_APMIXED_HDMIRX_APLL 13
+#define CLK_APMIXED_USB1PLL 14
+#define CLK_APMIXED_ADSPPLL 15
+#define CLK_APMIXED_APLL1 16
+#define CLK_APMIXED_APLL2 17
+#define CLK_APMIXED_APLL3 18
+#define CLK_APMIXED_APLL4 19
+#define CLK_APMIXED_APLL5 20
+#define CLK_APMIXED_MFGPLL 21
+#define CLK_APMIXED_DGIPLL 22
+#define CLK_APMIXED_PLL_SSUSB26M 23
+#define CLK_APMIXED_NR_CLK 24
+
+/* SCP_ADSP */
+
+#define CLK_SCP_ADSP_AUDIODSP 0
+#define CLK_SCP_ADSP_NR_CLK 1
+
+/* PERICFG_AO */
+
+#define CLK_PERI_AO_ETHERNET 0
+#define CLK_PERI_AO_ETHERNET_BUS 1
+#define CLK_PERI_AO_FLASHIF_BUS 2
+#define CLK_PERI_AO_FLASHIF_FLASH 3
+#define CLK_PERI_AO_SSUSB_1P_BUS 4
+#define CLK_PERI_AO_SSUSB_1P_XHCI 5
+#define CLK_PERI_AO_SSUSB_2P_BUS 6
+#define CLK_PERI_AO_SSUSB_2P_XHCI 7
+#define CLK_PERI_AO_SSUSB_3P_BUS 8
+#define CLK_PERI_AO_SSUSB_3P_XHCI 9
+#define CLK_PERI_AO_SPINFI 10
+#define CLK_PERI_AO_ETHERNET_MAC 11
+#define CLK_PERI_AO_NFI_H 12
+#define CLK_PERI_AO_FNFI1X 13
+#define CLK_PERI_AO_PCIE_P0_MEM 14
+#define CLK_PERI_AO_PCIE_P1_MEM 15
+#define CLK_PERI_AO_NR_CLK 16
+
+/* IMP_IIC_WRAP_S */
+
+#define CLK_IMP_IIC_WRAP_S_I2C5 0
+#define CLK_IMP_IIC_WRAP_S_I2C6 1
+#define CLK_IMP_IIC_WRAP_S_I2C7 2
+#define CLK_IMP_IIC_WRAP_S_NR_CLK 3
+
+/* IMP_IIC_WRAP_W */
+
+#define CLK_IMP_IIC_WRAP_W_I2C0 0
+#define CLK_IMP_IIC_WRAP_W_I2C1 1
+#define CLK_IMP_IIC_WRAP_W_I2C2 2
+#define CLK_IMP_IIC_WRAP_W_I2C3 3
+#define CLK_IMP_IIC_WRAP_W_I2C4 4
+#define CLK_IMP_IIC_WRAP_W_NR_CLK 5
+
+/* MFGCFG */
+
+#define CLK_MFG_BG3D 0
+#define CLK_MFG_NR_CLK 1
+
+/* VPPSYS0 */
+
+#define CLK_VPP0_MDP_FG 0
+#define CLK_VPP0_STITCH 1
+#define CLK_VPP0_PADDING 2
+#define CLK_VPP0_MDP_TCC 3
+#define CLK_VPP0_WARP0_ASYNC_TX 4
+#define CLK_VPP0_WARP1_ASYNC_TX 5
+#define CLK_VPP0_MUTEX 6
+#define CLK_VPP0_VPP02VPP1_RELAY 7
+#define CLK_VPP0_VPP12VPP0_ASYNC 8
+#define CLK_VPP0_MMSYSRAM_TOP 9
+#define CLK_VPP0_MDP_AAL 10
+#define CLK_VPP0_MDP_RSZ 11
+#define CLK_VPP0_SMI_COMMON 12
+#define CLK_VPP0_GALS_VDO0_LARB0 13
+#define CLK_VPP0_GALS_VDO0_LARB1 14
+#define CLK_VPP0_GALS_VENCSYS 15
+#define CLK_VPP0_GALS_VENCSYS_CORE1 16
+#define CLK_VPP0_GALS_INFRA 17
+#define CLK_VPP0_GALS_CAMSYS 18
+#define CLK_VPP0_GALS_VPP1_LARB5 19
+#define CLK_VPP0_GALS_VPP1_LARB6 20
+#define CLK_VPP0_SMI_REORDER 21
+#define CLK_VPP0_SMI_IOMMU 22
+#define CLK_VPP0_GALS_IMGSYS_CAMSYS 23
+#define CLK_VPP0_MDP_RDMA 24
+#define CLK_VPP0_MDP_WROT 25
+#define CLK_VPP0_GALS_EMI0_EMI1 26
+#define CLK_VPP0_SMI_SUB_COMMON_REORDER 27
+#define CLK_VPP0_SMI_RSI 28
+#define CLK_VPP0_SMI_COMMON_LARB4 29
+#define CLK_VPP0_GALS_VDEC_VDEC_CORE1 30
+#define CLK_VPP0_GALS_VPP1_WPE 31
+#define CLK_VPP0_GALS_VDO0_VDO1_VENCSYS_CORE1 32
+#define CLK_VPP0_FAKE_ENG 33
+#define CLK_VPP0_MDP_HDR 34
+#define CLK_VPP0_MDP_TDSHP 35
+#define CLK_VPP0_MDP_COLOR 36
+#define CLK_VPP0_MDP_OVL 37
+#define CLK_VPP0_WARP0_RELAY 38
+#define CLK_VPP0_WARP0_MDP_DL_ASYNC 39
+#define CLK_VPP0_WARP1_RELAY 40
+#define CLK_VPP0_WARP1_MDP_DL_ASYNC 41
+#define CLK_VPP0_NR_CLK 42
+
+/* WPESYS */
+
+#define CLK_WPE_VPP0 0
+#define CLK_WPE_VPP1 1
+#define CLK_WPE_SMI_LARB7 2
+#define CLK_WPE_SMI_LARB8 3
+#define CLK_WPE_EVENT_TX 4
+#define CLK_WPE_SMI_LARB7_P 5
+#define CLK_WPE_SMI_LARB8_P 6
+#define CLK_WPE_NR_CLK 7
+
+/* WPESYS_VPP0 */
+
+#define CLK_WPE_VPP0_VECI 0
+#define CLK_WPE_VPP0_VEC2I 1
+#define CLK_WPE_VPP0_VEC3I 2
+#define CLK_WPE_VPP0_WPEO 3
+#define CLK_WPE_VPP0_MSKO 4
+#define CLK_WPE_VPP0_VGEN 5
+#define CLK_WPE_VPP0_EXT 6
+#define CLK_WPE_VPP0_VFC 7
+#define CLK_WPE_VPP0_CACH0_TOP 8
+#define CLK_WPE_VPP0_CACH0_DMA 9
+#define CLK_WPE_VPP0_CACH1_TOP 10
+#define CLK_WPE_VPP0_CACH1_DMA 11
+#define CLK_WPE_VPP0_CACH2_TOP 12
+#define CLK_WPE_VPP0_CACH2_DMA 13
+#define CLK_WPE_VPP0_CACH3_TOP 14
+#define CLK_WPE_VPP0_CACH3_DMA 15
+#define CLK_WPE_VPP0_PSP 16
+#define CLK_WPE_VPP0_PSP2 17
+#define CLK_WPE_VPP0_SYNC 18
+#define CLK_WPE_VPP0_C24 19
+#define CLK_WPE_VPP0_MDP_CROP 20
+#define CLK_WPE_VPP0_ISP_CROP 21
+#define CLK_WPE_VPP0_TOP 22
+#define CLK_WPE_VPP0_NR_CLK 23
+
+/* WPESYS_VPP1 */
+
+#define CLK_WPE_VPP1_VECI 0
+#define CLK_WPE_VPP1_VEC2I 1
+#define CLK_WPE_VPP1_VEC3I 2
+#define CLK_WPE_VPP1_WPEO 3
+#define CLK_WPE_VPP1_MSKO 4
+#define CLK_WPE_VPP1_VGEN 5
+#define CLK_WPE_VPP1_EXT 6
+#define CLK_WPE_VPP1_VFC 7
+#define CLK_WPE_VPP1_CACH0_TOP 8
+#define CLK_WPE_VPP1_CACH0_DMA 9
+#define CLK_WPE_VPP1_CACH1_TOP 10
+#define CLK_WPE_VPP1_CACH1_DMA 11
+#define CLK_WPE_VPP1_CACH2_TOP 12
+#define CLK_WPE_VPP1_CACH2_DMA 13
+#define CLK_WPE_VPP1_CACH3_TOP 14
+#define CLK_WPE_VPP1_CACH3_DMA 15
+#define CLK_WPE_VPP1_PSP 16
+#define CLK_WPE_VPP1_PSP2 17
+#define CLK_WPE_VPP1_SYNC 18
+#define CLK_WPE_VPP1_C24 19
+#define CLK_WPE_VPP1_MDP_CROP 20
+#define CLK_WPE_VPP1_ISP_CROP 21
+#define CLK_WPE_VPP1_TOP 22
+#define CLK_WPE_VPP1_NR_CLK 23
+
+/* VPPSYS1 */
+
+#define CLK_VPP1_SVPP1_MDP_OVL 0
+#define CLK_VPP1_SVPP1_MDP_TCC 1
+#define CLK_VPP1_SVPP1_MDP_WROT 2
+#define CLK_VPP1_SVPP1_VPP_PAD 3
+#define CLK_VPP1_SVPP2_MDP_WROT 4
+#define CLK_VPP1_SVPP2_VPP_PAD 5
+#define CLK_VPP1_SVPP3_MDP_WROT 6
+#define CLK_VPP1_SVPP3_VPP_PAD 7
+#define CLK_VPP1_SVPP1_MDP_RDMA 8
+#define CLK_VPP1_SVPP1_MDP_FG 9
+#define CLK_VPP1_SVPP2_MDP_RDMA 10
+#define CLK_VPP1_SVPP2_MDP_FG 11
+#define CLK_VPP1_SVPP3_MDP_RDMA 12
+#define CLK_VPP1_SVPP3_MDP_FG 13
+#define CLK_VPP1_VPP_SPLIT 14
+#define CLK_VPP1_SVPP2_VDO0_DL_RELAY 15
+#define CLK_VPP1_SVPP1_MDP_TDSHP 16
+#define CLK_VPP1_SVPP1_MDP_COLOR 17
+#define CLK_VPP1_SVPP3_VDO1_DL_RELAY 18
+#define CLK_VPP1_SVPP2_VPP_MERGE 19
+#define CLK_VPP1_SVPP2_MDP_COLOR 20
+#define CLK_VPP1_VPPSYS1_GALS 21
+#define CLK_VPP1_SVPP3_VPP_MERGE 22
+#define CLK_VPP1_SVPP3_MDP_COLOR 23
+#define CLK_VPP1_VPPSYS1_LARB 24
+#define CLK_VPP1_SVPP1_MDP_RSZ 25
+#define CLK_VPP1_SVPP1_MDP_HDR 26
+#define CLK_VPP1_SVPP1_MDP_AAL 27
+#define CLK_VPP1_SVPP2_MDP_HDR 28
+#define CLK_VPP1_SVPP2_MDP_AAL 29
+#define CLK_VPP1_DL_ASYNC 30
+#define CLK_VPP1_LARB5_FAKE_ENG 31
+#define CLK_VPP1_SVPP3_MDP_HDR 32
+#define CLK_VPP1_SVPP3_MDP_AAL 33
+#define CLK_VPP1_SVPP2_VDO1_DL_RELAY 34
+#define CLK_VPP1_LARB6_FAKE_ENG 35
+#define CLK_VPP1_SVPP2_MDP_RSZ 36
+#define CLK_VPP1_SVPP3_MDP_RSZ 37
+#define CLK_VPP1_SVPP3_VDO0_DL_RELAY 38
+#define CLK_VPP1_DISP_MUTEX 39
+#define CLK_VPP1_SVPP2_MDP_TDSHP 40
+#define CLK_VPP1_SVPP3_MDP_TDSHP 41
+#define CLK_VPP1_VPP0_DL1_RELAY 42
+#define CLK_VPP1_HDMI_META 43
+#define CLK_VPP1_VPP_SPLIT_HDMI 44
+#define CLK_VPP1_DGI_IN 45
+#define CLK_VPP1_DGI_OUT 46
+#define CLK_VPP1_VPP_SPLIT_DGI 47
+#define CLK_VPP1_VPP0_DL_ASYNC 48
+#define CLK_VPP1_VPP0_DL_RELAY 49
+#define CLK_VPP1_VPP_SPLIT_26M 50
+#define CLK_VPP1_NR_CLK 51
+
+/* IMGSYS */
+
+#define CLK_IMG_LARB9 0
+#define CLK_IMG_TRAW0 1
+#define CLK_IMG_TRAW1 2
+#define CLK_IMG_TRAW2 3
+#define CLK_IMG_TRAW3 4
+#define CLK_IMG_DIP0 5
+#define CLK_IMG_WPE0 6
+#define CLK_IMG_IPE 7
+#define CLK_IMG_DIP1 8
+#define CLK_IMG_WPE1 9
+#define CLK_IMG_GALS 10
+#define CLK_IMG_NR_CLK 11
+
+/* IMGSYS1_DIP_TOP */
+
+#define CLK_IMG1_DIP_TOP_LARB10 0
+#define CLK_IMG1_DIP_TOP_DIP_TOP 1
+#define CLK_IMG1_DIP_TOP_NR_CLK 2
+
+/* IMGSYS1_DIP_NR */
+
+#define CLK_IMG1_DIP_NR_RESERVE 0
+#define CLK_IMG1_DIP_NR_DIP_NR 1
+#define CLK_IMG1_DIP_NR_NR_CLK 2
+
+/* IMGSYS1_WPE */
+
+#define CLK_IMG1_WPE_LARB11 0
+#define CLK_IMG1_WPE_WPE 1
+#define CLK_IMG1_WPE_NR_CLK 2
+
+/* IPESYS */
+
+#define CLK_IPE_DPE 0
+#define CLK_IPE_FDVT 1
+#define CLK_IPE_ME 2
+#define CLK_IPE_TOP 3
+#define CLK_IPE_SMI_LARB12 4
+#define CLK_IPE_NR_CLK 5
+
+/* CAMSYS */
+
+#define CLK_CAM_LARB13 0
+#define CLK_CAM_LARB14 1
+#define CLK_CAM_MAIN_CAM 2
+#define CLK_CAM_MAIN_CAMTG 3
+#define CLK_CAM_SENINF 4
+#define CLK_CAM_GCAMSVA 5
+#define CLK_CAM_GCAMSVB 6
+#define CLK_CAM_GCAMSVC 7
+#define CLK_CAM_SCAMSA 8
+#define CLK_CAM_SCAMSB 9
+#define CLK_CAM_CAMSV_TOP 10
+#define CLK_CAM_CAMSV_CQ 11
+#define CLK_CAM_ADL 12
+#define CLK_CAM_ASG 13
+#define CLK_CAM_PDA 14
+#define CLK_CAM_FAKE_ENG 15
+#define CLK_CAM_MAIN_MRAW0 16
+#define CLK_CAM_MAIN_MRAW1 17
+#define CLK_CAM_MAIN_MRAW2 18
+#define CLK_CAM_MAIN_MRAW3 19
+#define CLK_CAM_CAM2MM0_GALS 20
+#define CLK_CAM_CAM2MM1_GALS 21
+#define CLK_CAM_CAM2SYS_GALS 22
+#define CLK_CAM_NR_CLK 23
+
+/* CAMSYS_RAWA */
+
+#define CLK_CAM_RAWA_LARBX 0
+#define CLK_CAM_RAWA_CAM 1
+#define CLK_CAM_RAWA_CAMTG 2
+#define CLK_CAM_RAWA_NR_CLK 3
+
+/* CAMSYS_YUVA */
+
+#define CLK_CAM_YUVA_LARBX 0
+#define CLK_CAM_YUVA_CAM 1
+#define CLK_CAM_YUVA_CAMTG 2
+#define CLK_CAM_YUVA_NR_CLK 3
+
+/* CAMSYS_RAWB */
+
+#define CLK_CAM_RAWB_LARBX 0
+#define CLK_CAM_RAWB_CAM 1
+#define CLK_CAM_RAWB_CAMTG 2
+#define CLK_CAM_RAWB_NR_CLK 3
+
+/* CAMSYS_YUVB */
+
+#define CLK_CAM_YUVB_LARBX 0
+#define CLK_CAM_YUVB_CAM 1
+#define CLK_CAM_YUVB_CAMTG 2
+#define CLK_CAM_YUVB_NR_CLK 3
+
+/* CAMSYS_MRAW */
+
+#define CLK_CAM_MRAW_LARBX 0
+#define CLK_CAM_MRAW_CAMTG 1
+#define CLK_CAM_MRAW_MRAW0 2
+#define CLK_CAM_MRAW_MRAW1 3
+#define CLK_CAM_MRAW_MRAW2 4
+#define CLK_CAM_MRAW_MRAW3 5
+#define CLK_CAM_MRAW_NR_CLK 6
+
+/* CCUSYS */
+
+#define CLK_CCU_LARB18 0
+#define CLK_CCU_AHB 1
+#define CLK_CCU_CCU0 2
+#define CLK_CCU_CCU1 3
+#define CLK_CCU_NR_CLK 4
+
+/* VDECSYS_SOC */
+
+#define CLK_VDEC_SOC_LARB1 0
+#define CLK_VDEC_SOC_LAT 1
+#define CLK_VDEC_SOC_VDEC 2
+#define CLK_VDEC_SOC_NR_CLK 3
+
+/* VDECSYS */
+
+#define CLK_VDEC_LARB1 0
+#define CLK_VDEC_LAT 1
+#define CLK_VDEC_VDEC 2
+#define CLK_VDEC_NR_CLK 3
+
+/* VDECSYS_CORE1 */
+
+#define CLK_VDEC_CORE1_LARB1 0
+#define CLK_VDEC_CORE1_LAT 1
+#define CLK_VDEC_CORE1_VDEC 2
+#define CLK_VDEC_CORE1_NR_CLK 3
+
+/* APUSYS_PLL */
+
+#define CLK_APUSYS_PLL_APUPLL 0
+#define CLK_APUSYS_PLL_NPUPLL 1
+#define CLK_APUSYS_PLL_APUPLL1 2
+#define CLK_APUSYS_PLL_APUPLL2 3
+#define CLK_APUSYS_PLL_NR_CLK 4
+
+/* VENCSYS */
+
+#define CLK_VENC_LARB 0
+#define CLK_VENC_VENC 1
+#define CLK_VENC_JPGENC 2
+#define CLK_VENC_JPGDEC 3
+#define CLK_VENC_JPGDEC_C1 4
+#define CLK_VENC_GALS 5
+#define CLK_VENC_NR_CLK 6
+
+/* VENCSYS_CORE1 */
+
+#define CLK_VENC_CORE1_LARB 0
+#define CLK_VENC_CORE1_VENC 1
+#define CLK_VENC_CORE1_JPGENC 2
+#define CLK_VENC_CORE1_JPGDEC 3
+#define CLK_VENC_CORE1_JPGDEC_C1 4
+#define CLK_VENC_CORE1_GALS 5
+#define CLK_VENC_CORE1_NR_CLK 6
+
+/* VDOSYS0 */
+
+#define CLK_VDO0_DISP_OVL0 0
+#define CLK_VDO0_DISP_COLOR0 1
+#define CLK_VDO0_DISP_COLOR1 2
+#define CLK_VDO0_DISP_CCORR0 3
+#define CLK_VDO0_DISP_CCORR1 4
+#define CLK_VDO0_DISP_AAL0 5
+#define CLK_VDO0_DISP_AAL1 6
+#define CLK_VDO0_DISP_GAMMA0 7
+#define CLK_VDO0_DISP_GAMMA1 8
+#define CLK_VDO0_DISP_DITHER0 9
+#define CLK_VDO0_DISP_DITHER1 10
+#define CLK_VDO0_DISP_OVL1 11
+#define CLK_VDO0_DISP_WDMA0 12
+#define CLK_VDO0_DISP_WDMA1 13
+#define CLK_VDO0_DISP_RDMA0 14
+#define CLK_VDO0_DISP_RDMA1 15
+#define CLK_VDO0_DSI0 16
+#define CLK_VDO0_DSI1 17
+#define CLK_VDO0_DSC_WRAP0 18
+#define CLK_VDO0_VPP_MERGE0 19
+#define CLK_VDO0_DP_INTF0 20
+#define CLK_VDO0_DISP_MUTEX0 21
+#define CLK_VDO0_DISP_IL_ROT0 22
+#define CLK_VDO0_APB_BUS 23
+#define CLK_VDO0_FAKE_ENG0 24
+#define CLK_VDO0_FAKE_ENG1 25
+#define CLK_VDO0_DL_ASYNC0 26
+#define CLK_VDO0_DL_ASYNC1 27
+#define CLK_VDO0_DL_ASYNC2 28
+#define CLK_VDO0_DL_ASYNC3 29
+#define CLK_VDO0_DL_ASYNC4 30
+#define CLK_VDO0_DISP_MONITOR0 31
+#define CLK_VDO0_DISP_MONITOR1 32
+#define CLK_VDO0_DISP_MONITOR2 33
+#define CLK_VDO0_DISP_MONITOR3 34
+#define CLK_VDO0_DISP_MONITOR4 35
+#define CLK_VDO0_SMI_GALS 36
+#define CLK_VDO0_SMI_COMMON 37
+#define CLK_VDO0_SMI_EMI 38
+#define CLK_VDO0_SMI_IOMMU 39
+#define CLK_VDO0_SMI_LARB 40
+#define CLK_VDO0_SMI_RSI 41
+#define CLK_VDO0_DSI0_DSI 42
+#define CLK_VDO0_DSI1_DSI 43
+#define CLK_VDO0_DP_INTF0_DP_INTF 44
+#define CLK_VDO0_NR_CLK 45
+
+/* VDOSYS1 */
+
+#define CLK_VDO1_SMI_LARB2 0
+#define CLK_VDO1_SMI_LARB3 1
+#define CLK_VDO1_GALS 2
+#define CLK_VDO1_FAKE_ENG0 3
+#define CLK_VDO1_FAKE_ENG 4
+#define CLK_VDO1_MDP_RDMA0 5
+#define CLK_VDO1_MDP_RDMA1 6
+#define CLK_VDO1_MDP_RDMA2 7
+#define CLK_VDO1_MDP_RDMA3 8
+#define CLK_VDO1_VPP_MERGE0 9
+#define CLK_VDO1_VPP_MERGE1 10
+#define CLK_VDO1_VPP_MERGE2 11
+#define CLK_VDO1_VPP_MERGE3 12
+#define CLK_VDO1_VPP_MERGE4 13
+#define CLK_VDO1_VPP2_TO_VDO1_DL_ASYNC 14
+#define CLK_VDO1_VPP3_TO_VDO1_DL_ASYNC 15
+#define CLK_VDO1_DISP_MUTEX 16
+#define CLK_VDO1_MDP_RDMA4 17
+#define CLK_VDO1_MDP_RDMA5 18
+#define CLK_VDO1_MDP_RDMA6 19
+#define CLK_VDO1_MDP_RDMA7 20
+#define CLK_VDO1_DP_INTF0_MM 21
+#define CLK_VDO1_DPI0_MM 22
+#define CLK_VDO1_DPI1_MM 23
+#define CLK_VDO1_DISP_MONITOR 24
+#define CLK_VDO1_MERGE0_DL_ASYNC 25
+#define CLK_VDO1_MERGE1_DL_ASYNC 26
+#define CLK_VDO1_MERGE2_DL_ASYNC 27
+#define CLK_VDO1_MERGE3_DL_ASYNC 28
+#define CLK_VDO1_MERGE4_DL_ASYNC 29
+#define CLK_VDO1_VDO0_DSC_TO_VDO1_DL_ASYNC 30
+#define CLK_VDO1_VDO0_MERGE_TO_VDO1_DL_ASYNC 31
+#define CLK_VDO1_HDR_VDO_FE0 32
+#define CLK_VDO1_HDR_GFX_FE0 33
+#define CLK_VDO1_HDR_VDO_BE 34
+#define CLK_VDO1_HDR_VDO_FE1 35
+#define CLK_VDO1_HDR_GFX_FE1 36
+#define CLK_VDO1_DISP_MIXER 37
+#define CLK_VDO1_HDR_VDO_FE0_DL_ASYNC 38
+#define CLK_VDO1_HDR_VDO_FE1_DL_ASYNC 39
+#define CLK_VDO1_HDR_GFX_FE0_DL_ASYNC 40
+#define CLK_VDO1_HDR_GFX_FE1_DL_ASYNC 41
+#define CLK_VDO1_HDR_VDO_BE_DL_ASYNC 42
+#define CLK_VDO1_DPI0 43
+#define CLK_VDO1_DISP_MONITOR_DPI0 44
+#define CLK_VDO1_DPI1 45
+#define CLK_VDO1_DISP_MONITOR_DPI1 46
+#define CLK_VDO1_DPINTF 47
+#define CLK_VDO1_DISP_MONITOR_DPINTF 48
+#define CLK_VDO1_26M_SLOW 49
+#define CLK_VDO1_DPI1_HDMI 50
+#define CLK_VDO1_NR_CLK 51
+
+
+#endif /* _DT_BINDINGS_CLK_MT8195_H */
diff --git a/include/dt-bindings/clock/nuvoton,ma35d1-clk.h b/include/dt-bindings/clock/nuvoton,ma35d1-clk.h
new file mode 100644
index 000000000000..ba2d70f776a6
--- /dev/null
+++ b/include/dt-bindings/clock/nuvoton,ma35d1-clk.h
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2023 Nuvoton Technologies.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_NUVOTON_MA35D1_CLK_H
+#define __DT_BINDINGS_CLOCK_NUVOTON_MA35D1_CLK_H
+
+/* external and internal oscillator clocks */
+#define HXT 0
+#define HXT_GATE 1
+#define LXT 2
+#define LXT_GATE 3
+#define HIRC 4
+#define HIRC_GATE 5
+#define LIRC 6
+#define LIRC_GATE 7
+/* PLLs */
+#define CAPLL 8
+#define SYSPLL 9
+#define DDRPLL 10
+#define APLL 11
+#define EPLL 12
+#define VPLL 13
+/* EPLL divider */
+#define EPLL_DIV2 14
+#define EPLL_DIV4 15
+#define EPLL_DIV8 16
+/* CPU clock, system clock, AXI, HCLK and PCLK */
+#define CA35CLK_MUX 17
+#define AXICLK_DIV2 18
+#define AXICLK_DIV4 19
+#define AXICLK_MUX 20
+#define SYSCLK0_MUX 21
+#define SYSCLK1_MUX 22
+#define SYSCLK1_DIV2 23
+#define HCLK0 24
+#define HCLK1 25
+#define HCLK2 26
+#define PCLK0 27
+#define PCLK1 28
+#define PCLK2 29
+#define HCLK3 30
+#define PCLK3 31
+#define PCLK4 32
+/* AXI and AHB peripheral clocks */
+#define USBPHY0 33
+#define USBPHY1 34
+#define DDR0_GATE 35
+#define DDR6_GATE 36
+#define CAN0_MUX 37
+#define CAN0_DIV 38
+#define CAN0_GATE 39
+#define CAN1_MUX 40
+#define CAN1_DIV 41
+#define CAN1_GATE 42
+#define CAN2_MUX 43
+#define CAN2_DIV 44
+#define CAN2_GATE 45
+#define CAN3_MUX 46
+#define CAN3_DIV 47
+#define CAN3_GATE 48
+#define SDH0_MUX 49
+#define SDH0_GATE 50
+#define SDH1_MUX 51
+#define SDH1_GATE 52
+#define NAND_GATE 53
+#define USBD_GATE 54
+#define USBH_GATE 55
+#define HUSBH0_GATE 56
+#define HUSBH1_GATE 57
+#define GFX_MUX 58
+#define GFX_GATE 59
+#define VC8K_GATE 60
+#define DCU_MUX 61
+#define DCU_GATE 62
+#define DCUP_DIV 63
+#define EMAC0_GATE 64
+#define EMAC1_GATE 65
+#define CCAP0_MUX 66
+#define CCAP0_DIV 67
+#define CCAP0_GATE 68
+#define CCAP1_MUX 69
+#define CCAP1_DIV 70
+#define CCAP1_GATE 71
+#define PDMA0_GATE 72
+#define PDMA1_GATE 73
+#define PDMA2_GATE 74
+#define PDMA3_GATE 75
+#define WH0_GATE 76
+#define WH1_GATE 77
+#define HWS_GATE 78
+#define EBI_GATE 79
+#define SRAM0_GATE 80
+#define SRAM1_GATE 81
+#define ROM_GATE 82
+#define TRA_GATE 83
+#define DBG_MUX 84
+#define DBG_GATE 85
+#define CKO_MUX 86
+#define CKO_DIV 87
+#define CKO_GATE 88
+#define GTMR_GATE 89
+#define GPA_GATE 90
+#define GPB_GATE 91
+#define GPC_GATE 92
+#define GPD_GATE 93
+#define GPE_GATE 94
+#define GPF_GATE 95
+#define GPG_GATE 96
+#define GPH_GATE 97
+#define GPI_GATE 98
+#define GPJ_GATE 99
+#define GPK_GATE 100
+#define GPL_GATE 101
+#define GPM_GATE 102
+#define GPN_GATE 103
+/* APB peripheral clocks */
+#define TMR0_MUX 104
+#define TMR0_GATE 105
+#define TMR1_MUX 106
+#define TMR1_GATE 107
+#define TMR2_MUX 108
+#define TMR2_GATE 109
+#define TMR3_MUX 110
+#define TMR3_GATE 111
+#define TMR4_MUX 112
+#define TMR4_GATE 113
+#define TMR5_MUX 114
+#define TMR5_GATE 115
+#define TMR6_MUX 116
+#define TMR6_GATE 117
+#define TMR7_MUX 118
+#define TMR7_GATE 119
+#define TMR8_MUX 120
+#define TMR8_GATE 121
+#define TMR9_MUX 122
+#define TMR9_GATE 123
+#define TMR10_MUX 124
+#define TMR10_GATE 125
+#define TMR11_MUX 126
+#define TMR11_GATE 127
+#define UART0_MUX 128
+#define UART0_DIV 129
+#define UART0_GATE 130
+#define UART1_MUX 131
+#define UART1_DIV 132
+#define UART1_GATE 133
+#define UART2_MUX 134
+#define UART2_DIV 135
+#define UART2_GATE 136
+#define UART3_MUX 137
+#define UART3_DIV 138
+#define UART3_GATE 139
+#define UART4_MUX 140
+#define UART4_DIV 141
+#define UART4_GATE 142
+#define UART5_MUX 143
+#define UART5_DIV 144
+#define UART5_GATE 145
+#define UART6_MUX 146
+#define UART6_DIV 147
+#define UART6_GATE 148
+#define UART7_MUX 149
+#define UART7_DIV 150
+#define UART7_GATE 151
+#define UART8_MUX 152
+#define UART8_DIV 153
+#define UART8_GATE 154
+#define UART9_MUX 155
+#define UART9_DIV 156
+#define UART9_GATE 157
+#define UART10_MUX 158
+#define UART10_DIV 159
+#define UART10_GATE 160
+#define UART11_MUX 161
+#define UART11_DIV 162
+#define UART11_GATE 163
+#define UART12_MUX 164
+#define UART12_DIV 165
+#define UART12_GATE 166
+#define UART13_MUX 167
+#define UART13_DIV 168
+#define UART13_GATE 169
+#define UART14_MUX 170
+#define UART14_DIV 171
+#define UART14_GATE 172
+#define UART15_MUX 173
+#define UART15_DIV 174
+#define UART15_GATE 175
+#define UART16_MUX 176
+#define UART16_DIV 177
+#define UART16_GATE 178
+#define RTC_GATE 179
+#define DDR_GATE 180
+#define KPI_MUX 181
+#define KPI_DIV 182
+#define KPI_GATE 183
+#define I2C0_GATE 184
+#define I2C1_GATE 185
+#define I2C2_GATE 186
+#define I2C3_GATE 187
+#define I2C4_GATE 188
+#define I2C5_GATE 189
+#define QSPI0_MUX 190
+#define QSPI0_GATE 191
+#define QSPI1_MUX 192
+#define QSPI1_GATE 193
+#define SMC0_MUX 194
+#define SMC0_DIV 195
+#define SMC0_GATE 196
+#define SMC1_MUX 197
+#define SMC1_DIV 198
+#define SMC1_GATE 199
+#define WDT0_MUX 200
+#define WDT0_GATE 201
+#define WDT1_MUX 202
+#define WDT1_GATE 203
+#define WDT2_MUX 204
+#define WDT2_GATE 205
+#define WWDT0_MUX 206
+#define WWDT1_MUX 207
+#define WWDT2_MUX 208
+#define EPWM0_GATE 209
+#define EPWM1_GATE 210
+#define EPWM2_GATE 211
+#define I2S0_MUX 212
+#define I2S0_GATE 213
+#define I2S1_MUX 214
+#define I2S1_GATE 215
+#define SSMCC_GATE 216
+#define SSPCC_GATE 217
+#define SPI0_MUX 218
+#define SPI0_GATE 219
+#define SPI1_MUX 220
+#define SPI1_GATE 221
+#define SPI2_MUX 222
+#define SPI2_GATE 223
+#define SPI3_MUX 224
+#define SPI3_GATE 225
+#define ECAP0_GATE 226
+#define ECAP1_GATE 227
+#define ECAP2_GATE 228
+#define QEI0_GATE 229
+#define QEI1_GATE 230
+#define QEI2_GATE 231
+#define ADC_DIV 232
+#define ADC_GATE 233
+#define EADC_DIV 234
+#define EADC_GATE 235
+#define CLK_MAX_IDX 236
+
+#endif /* __DT_BINDINGS_CLOCK_NUVOTON_MA35D1_CLK_H */
diff --git a/include/dt-bindings/clock/nuvoton,npcm7xx-clock.h b/include/dt-bindings/clock/nuvoton,npcm7xx-clock.h
index f21522605b94..3e0a9b68933d 100644
--- a/include/dt-bindings/clock/nuvoton,npcm7xx-clock.h
+++ b/include/dt-bindings/clock/nuvoton,npcm7xx-clock.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Nuvoton NPCM7xx Clock Generator binding
- * clock binding number for all clocks supportted by nuvoton,npcm7xx-clk
+ * clock binding number for all clocks supported by nuvoton,npcm7xx-clk
*
* Copyright (C) 2018 Nuvoton Technologies tali.perry@nuvoton.com
*
diff --git a/include/dt-bindings/clock/nuvoton,npcm845-clk.h b/include/dt-bindings/clock/nuvoton,npcm845-clk.h
new file mode 100644
index 000000000000..e5cce08b00e1
--- /dev/null
+++ b/include/dt-bindings/clock/nuvoton,npcm845-clk.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2021 Nuvoton Technologies.
+ * Author: Tomer Maimon <tomer.maimon@nuvoton.com>
+ *
+ * Device Tree binding constants for NPCM8XX clock controller.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_NPCM8XX_H
+#define __DT_BINDINGS_CLOCK_NPCM8XX_H
+
+#define NPCM8XX_CLK_CPU 0
+#define NPCM8XX_CLK_GFX_PIXEL 1
+#define NPCM8XX_CLK_MC 2
+#define NPCM8XX_CLK_ADC 3
+#define NPCM8XX_CLK_AHB 4
+#define NPCM8XX_CLK_TIMER 5
+#define NPCM8XX_CLK_UART 6
+#define NPCM8XX_CLK_UART2 7
+#define NPCM8XX_CLK_MMC 8
+#define NPCM8XX_CLK_SPI3 9
+#define NPCM8XX_CLK_PCI 10
+#define NPCM8XX_CLK_AXI 11
+#define NPCM8XX_CLK_APB4 12
+#define NPCM8XX_CLK_APB3 13
+#define NPCM8XX_CLK_APB2 14
+#define NPCM8XX_CLK_APB1 15
+#define NPCM8XX_CLK_APB5 16
+#define NPCM8XX_CLK_CLKOUT 17
+#define NPCM8XX_CLK_GFX 18
+#define NPCM8XX_CLK_SU 19
+#define NPCM8XX_CLK_SU48 20
+#define NPCM8XX_CLK_SDHC 21
+#define NPCM8XX_CLK_SPI0 22
+#define NPCM8XX_CLK_SPI1 23
+#define NPCM8XX_CLK_SPIX 24
+#define NPCM8XX_CLK_RG 25
+#define NPCM8XX_CLK_RCP 26
+#define NPCM8XX_CLK_PRE_ADC 27
+#define NPCM8XX_CLK_ATB 28
+#define NPCM8XX_CLK_PRE_CLK 29
+#define NPCM8XX_CLK_TH 30
+#define NPCM8XX_CLK_REFCLK 31
+#define NPCM8XX_CLK_SYSBYPCK 32
+#define NPCM8XX_CLK_MCBYPCK 33
+
+#define NPCM8XX_NUM_CLOCKS (NPCM8XX_CLK_MCBYPCK + 1)
+
+#endif
diff --git a/include/dt-bindings/clock/nvidia,tegra264.h b/include/dt-bindings/clock/nvidia,tegra264.h
new file mode 100644
index 000000000000..0fc2ad5e6cef
--- /dev/null
+++ b/include/dt-bindings/clock/nvidia,tegra264.h
@@ -0,0 +1,466 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (c) 2022-2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef DT_BINDINGS_CLOCK_NVIDIA_TEGRA264_H
+#define DT_BINDINGS_CLOCK_NVIDIA_TEGRA264_H
+
+#define TEGRA264_CLK_OSC 1
+#define TEGRA264_CLK_CLK_S 2
+#define TEGRA264_CLK_JTAG_REG 3
+#define TEGRA264_CLK_SPLL 4
+#define TEGRA264_CLK_SPLL_OUT0 5
+#define TEGRA264_CLK_SPLL_OUT1 6
+#define TEGRA264_CLK_SPLL_OUT2 7
+#define TEGRA264_CLK_SPLL_OUT3 8
+#define TEGRA264_CLK_SPLL_OUT4 9
+#define TEGRA264_CLK_SPLL_OUT5 10
+#define TEGRA264_CLK_SPLL_OUT6 11
+#define TEGRA264_CLK_SPLL_OUT7 12
+#define TEGRA264_CLK_AON_I2C 13
+#define TEGRA264_CLK_HOST1X 14
+#define TEGRA264_CLK_ISP 15
+#define TEGRA264_CLK_ISP1 16
+#define TEGRA264_CLK_ISP_ROOT 17
+#define TEGRA264_CLK_NAFLL_PVA0_CORE 18
+#define TEGRA264_CLK_NAFLL_PVA0_VPS 19
+#define TEGRA264_CLK_NVCSI 20
+#define TEGRA264_CLK_NVCSILP 21
+#define TEGRA264_CLK_PLLP_OUT0 22
+#define TEGRA264_CLK_PVA0_CPU_AXI 23
+#define TEGRA264_CLK_PVA0_VPS 24
+#define TEGRA264_CLK_PWM10 25
+#define TEGRA264_CLK_PWM2 26
+#define TEGRA264_CLK_PWM3 27
+#define TEGRA264_CLK_PWM4 28
+#define TEGRA264_CLK_PWM5 29
+#define TEGRA264_CLK_PWM9 30
+#define TEGRA264_CLK_QSPI0 31
+#define TEGRA264_CLK_QSPI0_2X_PM 32
+#define TEGRA264_CLK_RCE1_CPU 33
+#define TEGRA264_CLK_RCE1_NIC 34
+#define TEGRA264_CLK_RCE_CPU 35
+#define TEGRA264_CLK_RCE_NIC 36
+#define TEGRA264_CLK_SE 37
+#define TEGRA264_CLK_SEU1 38
+#define TEGRA264_CLK_SEU2 39
+#define TEGRA264_CLK_SEU3 40
+#define TEGRA264_CLK_SE_ROOT 41
+#define TEGRA264_CLK_SPI1 42
+#define TEGRA264_CLK_SPI2 43
+#define TEGRA264_CLK_SPI3 44
+#define TEGRA264_CLK_SPI4 45
+#define TEGRA264_CLK_SPI5 46
+#define TEGRA264_CLK_TOP_I2C 47
+#define TEGRA264_CLK_TSEC 48
+#define TEGRA264_CLK_TSEC_PKA 49
+#define TEGRA264_CLK_UART0 50
+#define TEGRA264_CLK_UART10 51
+#define TEGRA264_CLK_UART11 52
+#define TEGRA264_CLK_UART4 53
+#define TEGRA264_CLK_UART5 54
+#define TEGRA264_CLK_UART8 55
+#define TEGRA264_CLK_UART9 56
+#define TEGRA264_CLK_VI 57
+#define TEGRA264_CLK_VI1 58
+#define TEGRA264_CLK_VIC 59
+#define TEGRA264_CLK_VI_ROOT 60
+#define TEGRA264_CLK_DISPPLL 61
+#define TEGRA264_CLK_SPPLL0 62
+#define TEGRA264_CLK_SPPLL0_CLKOUT1A 63
+#define TEGRA264_CLK_SPPLL0_CLKOUT2A 64
+#define TEGRA264_CLK_SPPLL1 65
+#define TEGRA264_CLK_VPLL0 66
+#define TEGRA264_CLK_VPLL1 67
+#define TEGRA264_CLK_VPLL2 68
+#define TEGRA264_CLK_VPLL3 69
+#define TEGRA264_CLK_VPLL4 70
+#define TEGRA264_CLK_VPLL5 71
+#define TEGRA264_CLK_VPLL6 72
+#define TEGRA264_CLK_VPLL7 73
+#define TEGRA264_CLK_RG0_DIV 74
+#define TEGRA264_CLK_RG1_DIV 75
+#define TEGRA264_CLK_RG2_DIV 76
+#define TEGRA264_CLK_RG3_DIV 77
+#define TEGRA264_CLK_RG4_DIV 78
+#define TEGRA264_CLK_RG5_DIV 79
+#define TEGRA264_CLK_RG6_DIV 80
+#define TEGRA264_CLK_RG7_DIV 81
+#define TEGRA264_CLK_RG0 82
+#define TEGRA264_CLK_RG1 83
+#define TEGRA264_CLK_RG2 84
+#define TEGRA264_CLK_RG3 85
+#define TEGRA264_CLK_RG4 86
+#define TEGRA264_CLK_RG5 87
+#define TEGRA264_CLK_RG6 88
+#define TEGRA264_CLK_RG7 89
+#define TEGRA264_CLK_DISP 90
+#define TEGRA264_CLK_DSC 91
+#define TEGRA264_CLK_DSC_ROOT 92
+#define TEGRA264_CLK_HUB 93
+#define TEGRA264_CLK_VPLLX_SOR0_MUXED 94
+#define TEGRA264_CLK_VPLLX_SOR1_MUXED 95
+#define TEGRA264_CLK_VPLLX_SOR2_MUXED 96
+#define TEGRA264_CLK_VPLLX_SOR3_MUXED 97
+#define TEGRA264_CLK_LINKA_SYM 98
+#define TEGRA264_CLK_LINKB_SYM 99
+#define TEGRA264_CLK_LINKC_SYM 100
+#define TEGRA264_CLK_LINKD_SYM 101
+#define TEGRA264_CLK_PRE_SOR0 102
+#define TEGRA264_CLK_PRE_SOR1 103
+#define TEGRA264_CLK_PRE_SOR2 104
+#define TEGRA264_CLK_PRE_SOR3 105
+#define TEGRA264_CLK_SOR0_PLL_REF 106
+#define TEGRA264_CLK_SOR1_PLL_REF 107
+#define TEGRA264_CLK_SOR2_PLL_REF 108
+#define TEGRA264_CLK_SOR3_PLL_REF 109
+#define TEGRA264_CLK_SOR0_PAD 110
+#define TEGRA264_CLK_SOR1_PAD 111
+#define TEGRA264_CLK_SOR2_PAD 112
+#define TEGRA264_CLK_SOR3_PAD 113
+#define TEGRA264_CLK_SOR0_REF 114
+#define TEGRA264_CLK_SOR1_REF 115
+#define TEGRA264_CLK_SOR2_REF 116
+#define TEGRA264_CLK_SOR3_REF 117
+#define TEGRA264_CLK_SOR0_DIV 118
+#define TEGRA264_CLK_SOR1_DIV 119
+#define TEGRA264_CLK_SOR2_DIV 120
+#define TEGRA264_CLK_SOR3_DIV 121
+#define TEGRA264_CLK_SOR0 122
+#define TEGRA264_CLK_SOR1 123
+#define TEGRA264_CLK_SOR2 124
+#define TEGRA264_CLK_SOR3 125
+#define TEGRA264_CLK_SF0_SOR 126
+#define TEGRA264_CLK_SF1_SOR 127
+#define TEGRA264_CLK_SF2_SOR 128
+#define TEGRA264_CLK_SF3_SOR 129
+#define TEGRA264_CLK_SF4_SOR 130
+#define TEGRA264_CLK_SF5_SOR 131
+#define TEGRA264_CLK_SF6_SOR 132
+#define TEGRA264_CLK_SF7_SOR 133
+#define TEGRA264_CLK_SF0 134
+#define TEGRA264_CLK_SF1 135
+#define TEGRA264_CLK_SF2 136
+#define TEGRA264_CLK_SF3 137
+#define TEGRA264_CLK_SF4 138
+#define TEGRA264_CLK_SF5 139
+#define TEGRA264_CLK_SF6 140
+#define TEGRA264_CLK_SF7 141
+#define TEGRA264_CLK_MAUD 142
+#define TEGRA264_CLK_AZA_2XBIT 143
+#define TEGRA264_CLK_DCE_CPU 144
+#define TEGRA264_CLK_DCE_NIC 145
+#define TEGRA264_CLK_PLLC4 146
+#define TEGRA264_CLK_PLLC4_OUT0 147
+#define TEGRA264_CLK_PLLC4_OUT1 148
+#define TEGRA264_CLK_PLLC4_MUXED 149
+#define TEGRA264_CLK_SDMMC1 150
+#define TEGRA264_CLK_SDMMC_LEGACY_TM 151
+#define TEGRA264_CLK_PLLC0 152
+#define TEGRA264_CLK_NAFLL_BPMP 153
+#define TEGRA264_CLK_PLLP_OUT_PDIV 154
+#define TEGRA264_CLK_DISP_ROOT 155
+#define TEGRA264_CLK_ADSP 156
+#define TEGRA264_CLK_PLLA 157
+#define TEGRA264_CLK_PLLA1 158
+#define TEGRA264_CLK_PLLA1_OUT1 159
+#define TEGRA264_CLK_PLLAON 160
+#define TEGRA264_CLK_PLLAON_APE 161
+#define TEGRA264_CLK_PLLA_OUT0 162
+#define TEGRA264_CLK_AHUB 163
+#define TEGRA264_CLK_APE 164
+#define TEGRA264_CLK_I2S1_SCLK_IN 165
+#define TEGRA264_CLK_I2S2_SCLK_IN 166
+#define TEGRA264_CLK_I2S3_SCLK_IN 167
+#define TEGRA264_CLK_I2S4_SCLK_IN 168
+#define TEGRA264_CLK_I2S5_SCLK_IN 169
+#define TEGRA264_CLK_I2S6_SCLK_IN 170
+#define TEGRA264_CLK_I2S7_SCLK_IN 171
+#define TEGRA264_CLK_I2S8_SCLK_IN 172
+#define TEGRA264_CLK_I2S9_SCLK_IN 173
+#define TEGRA264_CLK_I2S1_AUDIO_SYNC 174
+#define TEGRA264_CLK_I2S2_AUDIO_SYNC 175
+#define TEGRA264_CLK_I2S3_AUDIO_SYNC 176
+#define TEGRA264_CLK_I2S4_AUDIO_SYNC 177
+#define TEGRA264_CLK_I2S5_AUDIO_SYNC 178
+#define TEGRA264_CLK_I2S6_AUDIO_SYNC 179
+#define TEGRA264_CLK_I2S7_AUDIO_SYNC 180
+#define TEGRA264_CLK_I2S8_AUDIO_SYNC 181
+#define TEGRA264_CLK_DMIC1_AUDIO_SYNC 182
+#define TEGRA264_CLK_DSPK1_AUDIO_SYNC 183
+#define TEGRA264_CLK_I2S1 184
+#define TEGRA264_CLK_I2S2 185
+#define TEGRA264_CLK_I2S3 186
+#define TEGRA264_CLK_I2S4 187
+#define TEGRA264_CLK_I2S5 188
+#define TEGRA264_CLK_I2S6 189
+#define TEGRA264_CLK_I2S7 190
+#define TEGRA264_CLK_I2S8 191
+#define TEGRA264_CLK_I2S9 192
+#define TEGRA264_CLK_DMIC1 193
+#define TEGRA264_CLK_DMIC5 194
+#define TEGRA264_CLK_DSPK1 195
+#define TEGRA264_CLK_AON_CPU 196
+#define TEGRA264_CLK_AON_NIC 197
+#define TEGRA264_CLK_BPMP 198
+#define TEGRA264_CLK_AXI_CBB 199
+#define TEGRA264_CLK_FUSE 200
+#define TEGRA264_CLK_TSENSE 201
+#define TEGRA264_CLK_CSITE 202
+#define TEGRA264_CLK_HCSITE 203
+#define TEGRA264_CLK_DBGAPB 204
+#define TEGRA264_CLK_LA 205
+#define TEGRA264_CLK_PLLREFGP 206
+#define TEGRA264_CLK_PLLE0 207
+#define TEGRA264_CLK_UPHY0_PLL0_XDIG 208
+#define TEGRA264_CLK_EQOS_APP 209
+#define TEGRA264_CLK_EQOS_MAC 210
+#define TEGRA264_CLK_EQOS_MACSEC 211
+#define TEGRA264_CLK_EQOS_TX_PCS 212
+#define TEGRA264_CLK_MGBES_PTP_REF 213
+#define TEGRA264_CLK_MGBE0_UPHY1_PLL_XDIG 214
+#define TEGRA264_CLK_MGBE0_TX_PCS 215
+#define TEGRA264_CLK_MGBE0_MAC 216
+#define TEGRA264_CLK_MGBE0_MACSEC 217
+#define TEGRA264_CLK_MGBE0_APP 218
+#define TEGRA264_CLK_MGBE1_UPHY1_PLL_XDIG 219
+#define TEGRA264_CLK_MGBE1_TX_PCS 220
+#define TEGRA264_CLK_MGBE1_MAC 221
+#define TEGRA264_CLK_MGBE1_MACSEC 222
+#define TEGRA264_CLK_MGBE1_APP 223
+#define TEGRA264_CLK_MGBE2_UPHY1_PLL_XDIG 224
+#define TEGRA264_CLK_MGBE2_TX_PCS 225
+#define TEGRA264_CLK_MGBE2_MAC 226
+#define TEGRA264_CLK_MGBE2_MACSEC 227
+#define TEGRA264_CLK_MGBE2_APP 228
+#define TEGRA264_CLK_MGBE3_UPHY1_PLL_XDIG 229
+#define TEGRA264_CLK_MGBE3_TX_PCS 230
+#define TEGRA264_CLK_MGBE3_MAC 231
+#define TEGRA264_CLK_MGBE3_MACSEC 232
+#define TEGRA264_CLK_MGBE3_APP 233
+#define TEGRA264_CLK_PLLREFUFS 234
+#define TEGRA264_CLK_PLLREFUFS_CLKOUT624 235
+#define TEGRA264_CLK_PLLREFUFS_REFCLKOUT 236
+#define TEGRA264_CLK_PLLREFUFS_UFSDEV_REFCLKOUT 237
+#define TEGRA264_CLK_UFSHC_CG_SYS 238
+#define TEGRA264_CLK_MPHY_L0_RX_LS_BIT_DIV 239
+#define TEGRA264_CLK_MPHY_L0_RX_LS_BIT 240
+#define TEGRA264_CLK_MPHY_L0_RX_LS_SYMB_DIV 241
+#define TEGRA264_CLK_MPHY_L0_RX_HS_SYMB_DIV 242
+#define TEGRA264_CLK_MPHY_L0_RX_SYMB 243
+#define TEGRA264_CLK_MPHY_L0_UPHY_TX_FIFO 244
+#define TEGRA264_CLK_MPHY_L0_TX_LS_3XBIT_DIV 245
+#define TEGRA264_CLK_MPHY_L0_TX_LS_SYMB_DIV 246
+#define TEGRA264_CLK_UPHY0_PLL4_XDIG 247
+#define TEGRA264_CLK_MPHY_L0_TX_HS_SYMB_DIV 248
+#define TEGRA264_CLK_MPHY_L0_TX_SYMB 249
+#define TEGRA264_CLK_MPHY_L0_TX_LS_3XBIT 250
+#define TEGRA264_CLK_MPHY_L0_RX_ANA 251
+#define TEGRA264_CLK_MPHY_L1_RX_ANA 252
+#define TEGRA264_CLK_MPHY_TX_1MHZ_REF 253
+#define TEGRA264_CLK_MPHY_CORE_PLL_FIXED 254
+#define TEGRA264_CLK_MPHY_IOBIST 255
+#define TEGRA264_CLK_UFSHC_CG_SYS_DIV 256
+#define TEGRA264_CLK_XUSB1_CORE 257
+#define TEGRA264_CLK_XUSB1_FALCON 258
+#define TEGRA264_CLK_XUSB1_FS 259
+#define TEGRA264_CLK_XUSB1_SS 260
+#define TEGRA264_CLK_UPHY0_USB_P0_RX_CORE 261
+#define TEGRA264_CLK_UPHY0_USB_P1_RX_CORE 262
+#define TEGRA264_CLK_UPHY0_USB_P2_RX_CORE 263
+#define TEGRA264_CLK_UPHY0_USB_P3_RX_CORE 264
+#define TEGRA264_CLK_XUSB1_CLK480M_NVWRAP_CORE 265
+#define TEGRA264_CLK_XUSB1_CORE_HOST 266
+#define TEGRA264_CLK_XUSB1_CORE_DEV 267
+#define TEGRA264_CLK_XUSB1_CORE_SUPERSPEED 268
+#define TEGRA264_CLK_XUSB1_FALCON_HOST 269
+#define TEGRA264_CLK_XUSB1_FALCON_SUPERSPEED 270
+#define TEGRA264_CLK_XUSB1_FS_HOST 271
+#define TEGRA264_CLK_XUSB1_FS_DEV 272
+#define TEGRA264_CLK_XUSB1_HS_HSICP 273
+#define TEGRA264_CLK_XUSB1_SS_DEV 274
+#define TEGRA264_CLK_XUSB1_SS_SUPERSPEED 275
+#define TEGRA264_CLK_AON_TOUCH 276
+#define TEGRA264_CLK_AUD_MCLK 277
+#define TEGRA264_CLK_EXTPERIPH1 278
+#define TEGRA264_CLK_EXTPERIPH2 279
+#define TEGRA264_CLK_EXTPERIPH3 280
+#define TEGRA264_CLK_EXTPERIPH4 281
+#define TEGRA264_CLK_JTAG_REG_UNGATED 282
+#define TEGRA264_CLK_IST_BUS 283
+#define TEGRA264_CLK_IST_BUS_RIST_MCC 284
+#define TEGRA264_CLK_MATHS_SEC_RIST 285
+#define TEGRA264_CLK_NAFLL_IST 286
+#define TEGRA264_CLK_RIST_ROOT 287
+#define TEGRA264_CLK_IST_CONTROLLER_RIST 288
+#define TEGRA264_CLK_MSS_ENCRYPT 289
+#define TEGRA264_CLK_EMC 290
+#define TEGRA264_CLK_SPPLL0_CLKOUT100 291
+#define TEGRA264_CLK_SPPLL0_CLKOUT270 292
+#define TEGRA264_CLK_SPPLL1_CLKOUT100 293
+#define TEGRA264_CLK_SPPLL1_CLKOUT270 294
+#define TEGRA264_CLK_DP_LINKA_REF 295
+#define TEGRA264_CLK_DP_LINKB_REF 296
+#define TEGRA264_CLK_DP_LINKC_REF 297
+#define TEGRA264_CLK_DP_LINKD_REF 298
+#define TEGRA264_CLK_PLLNVCSI 299
+#define TEGRA264_CLK_PLLBPMPCAM 300
+#define TEGRA264_CLK_UTMI_PLL1 301
+#define TEGRA264_CLK_UTMI_PLL1_CLKOUT48 302
+#define TEGRA264_CLK_UTMI_PLL1_CLKOUT60 303
+#define TEGRA264_CLK_UTMI_PLL1_CLKOUT480 304
+#define TEGRA264_CLK_NAFLL_ISP 305
+#define TEGRA264_CLK_NAFLL_RCE 306
+#define TEGRA264_CLK_NAFLL_RCE1 307
+#define TEGRA264_CLK_NAFLL_SE 308
+#define TEGRA264_CLK_NAFLL_VI 309
+#define TEGRA264_CLK_NAFLL_VIC 310
+#define TEGRA264_CLK_NAFLL_DCE 311
+#define TEGRA264_CLK_NAFLL_TSEC 312
+#define TEGRA264_CLK_NAFLL_CPAIR0 313
+#define TEGRA264_CLK_NAFLL_CPAIR1 314
+#define TEGRA264_CLK_NAFLL_CPAIR2 315
+#define TEGRA264_CLK_NAFLL_CPAIR3 316
+#define TEGRA264_CLK_NAFLL_CPAIR4 317
+#define TEGRA264_CLK_NAFLL_CPAIR5 318
+#define TEGRA264_CLK_NAFLL_CPAIR6 319
+#define TEGRA264_CLK_NAFLL_GPU_SYS 320
+#define TEGRA264_CLK_NAFLL_GPU_NVD 321
+#define TEGRA264_CLK_NAFLL_GPU_UPROC 322
+#define TEGRA264_CLK_NAFLL_GPU_GPC0 323
+#define TEGRA264_CLK_NAFLL_GPU_GPC1 324
+#define TEGRA264_CLK_NAFLL_GPU_GPC2 325
+#define TEGRA264_CLK_SOR_LINKA_INPUT 326
+#define TEGRA264_CLK_SOR_LINKB_INPUT 327
+#define TEGRA264_CLK_SOR_LINKC_INPUT 328
+#define TEGRA264_CLK_SOR_LINKD_INPUT 329
+#define TEGRA264_CLK_SOR_LINKA_AFIFO 330
+#define TEGRA264_CLK_SOR_LINKB_AFIFO 331
+#define TEGRA264_CLK_SOR_LINKC_AFIFO 332
+#define TEGRA264_CLK_SOR_LINKD_AFIFO 333
+#define TEGRA264_CLK_I2S1_PAD_M 334
+#define TEGRA264_CLK_I2S2_PAD_M 335
+#define TEGRA264_CLK_I2S3_PAD_M 336
+#define TEGRA264_CLK_I2S4_PAD_M 337
+#define TEGRA264_CLK_I2S5_PAD_M 338
+#define TEGRA264_CLK_I2S6_PAD_M 339
+#define TEGRA264_CLK_I2S7_PAD_M 340
+#define TEGRA264_CLK_I2S8_PAD_M 341
+#define TEGRA264_CLK_I2S9_PAD_M 342
+#define TEGRA264_CLK_BPMP_NIC 343
+#define TEGRA264_CLK_CLK1M 344
+#define TEGRA264_CLK_RDET 345
+#define TEGRA264_CLK_ADC_SOC_REF 346
+#define TEGRA264_CLK_UPHY0_PLL0_TXREF 347
+#define TEGRA264_CLK_EQOS_TX 348
+#define TEGRA264_CLK_EQOS_TX_M 349
+#define TEGRA264_CLK_EQOS_RX_PCS_IN 350
+#define TEGRA264_CLK_EQOS_RX_PCS_M 351
+#define TEGRA264_CLK_EQOS_RX_IN 352
+#define TEGRA264_CLK_EQOS_RX 353
+#define TEGRA264_CLK_EQOS_RX_M 354
+#define TEGRA264_CLK_MGBE0_UPHY1_PLL_TXREF 355
+#define TEGRA264_CLK_MGBE0_TX 356
+#define TEGRA264_CLK_MGBE0_TX_M 357
+#define TEGRA264_CLK_MGBE0_RX_PCS_IN 358
+#define TEGRA264_CLK_MGBE0_RX_PCS_M 359
+#define TEGRA264_CLK_MGBE0_RX_IN 360
+#define TEGRA264_CLK_MGBE0_RX_M 361
+#define TEGRA264_CLK_MGBE1_UPHY1_PLL_TXREF 362
+#define TEGRA264_CLK_MGBE1_TX 363
+#define TEGRA264_CLK_MGBE1_TX_M 364
+#define TEGRA264_CLK_MGBE1_RX_PCS_IN 365
+#define TEGRA264_CLK_MGBE1_RX_PCS_M 366
+#define TEGRA264_CLK_MGBE1_RX_IN 367
+#define TEGRA264_CLK_MGBE1_RX_M 368
+#define TEGRA264_CLK_MGBE2_UPHY1_PLL_TXREF 369
+#define TEGRA264_CLK_MGBE2_TX 370
+#define TEGRA264_CLK_MGBE2_TX_M 371
+#define TEGRA264_CLK_MGBE2_RX_PCS_IN 372
+#define TEGRA264_CLK_MGBE2_RX_PCS_M 373
+#define TEGRA264_CLK_MGBE2_RX_IN 374
+#define TEGRA264_CLK_MGBE2_RX_M 375
+#define TEGRA264_CLK_MGBE3_UPHY1_PLL_TXREF 376
+#define TEGRA264_CLK_MGBE3_TX 377
+#define TEGRA264_CLK_MGBE3_TX_M 378
+#define TEGRA264_CLK_MGBE3_RX_PCS_IN 379
+#define TEGRA264_CLK_MGBE3_RX_PCS_M 380
+#define TEGRA264_CLK_MGBE3_RX_IN 381
+#define TEGRA264_CLK_MGBE3_RX_M 382
+#define TEGRA264_CLK_UPHY0_USB_P0_TX_CORE 383
+#define TEGRA264_CLK_UPHY0_USB_P1_TX_CORE 384
+#define TEGRA264_CLK_UPHY0_USB_P2_TX_CORE 385
+#define TEGRA264_CLK_UPHY0_USB_P3_TX_CORE 386
+#define TEGRA264_CLK_UPHY0_USB_P0_TX 387
+#define TEGRA264_CLK_UPHY0_USB_P1_TX 388
+#define TEGRA264_CLK_UPHY0_USB_P2_TX 389
+#define TEGRA264_CLK_UPHY0_USB_P3_TX 390
+#define TEGRA264_CLK_UPHY0_USB_P0_RX_IN 391
+#define TEGRA264_CLK_UPHY0_USB_P1_RX_IN 392
+#define TEGRA264_CLK_UPHY0_USB_P2_RX_IN 393
+#define TEGRA264_CLK_UPHY0_USB_P3_RX_IN 394
+#define TEGRA264_CLK_UPHY0_USB_P0_RX_M 395
+#define TEGRA264_CLK_UPHY0_USB_P1_RX_M 396
+#define TEGRA264_CLK_UPHY0_USB_P2_RX_M 397
+#define TEGRA264_CLK_UPHY0_USB_P3_RX_M 398
+#define TEGRA264_CLK_UPHY0_LANE0_TX_M 399
+#define TEGRA264_CLK_PCIE_C1_XCLK_NOBG_M 400
+#define TEGRA264_CLK_PCIE_C2_XCLK_NOBG_M 401
+#define TEGRA264_CLK_PCIE_C3_XCLK_NOBG_M 402
+#define TEGRA264_CLK_PCIE_C4_XCLK_NOBG_M 403
+#define TEGRA264_CLK_PCIE_C5_XCLK_NOBG_M 404
+#define TEGRA264_CLK_PCIE_C1_L0_RX_M 405
+#define TEGRA264_CLK_PCIE_C1_L1_RX_M 406
+#define TEGRA264_CLK_PCIE_C1_L2_RX_M 407
+#define TEGRA264_CLK_PCIE_C1_L3_RX_M 408
+#define TEGRA264_CLK_PCIE_C2_L0_RX_M 409
+#define TEGRA264_CLK_PCIE_C2_L1_RX_M 410
+#define TEGRA264_CLK_PCIE_C2_L2_RX_M 411
+#define TEGRA264_CLK_PCIE_C2_L3_RX_M 412
+#define TEGRA264_CLK_PCIE_C3_L0_RX_M 413
+#define TEGRA264_CLK_PCIE_C3_L1_RX_M 414
+#define TEGRA264_CLK_PCIE_C4_L0_RX_M 415
+#define TEGRA264_CLK_PCIE_C4_L1_RX_M 416
+#define TEGRA264_CLK_PCIE_C4_L2_RX_M 417
+#define TEGRA264_CLK_PCIE_C4_L3_RX_M 418
+#define TEGRA264_CLK_PCIE_C4_L4_RX_M 419
+#define TEGRA264_CLK_PCIE_C4_L5_RX_M 420
+#define TEGRA264_CLK_PCIE_C4_L6_RX_M 421
+#define TEGRA264_CLK_PCIE_C4_L7_RX_M 422
+#define TEGRA264_CLK_PCIE_C5_L0_RX_M 423
+#define TEGRA264_CLK_PCIE_C5_L1_RX_M 424
+#define TEGRA264_CLK_PCIE_C5_L2_RX_M 425
+#define TEGRA264_CLK_PCIE_C5_L3_RX_M 426
+#define TEGRA264_CLK_MPHY_L0_RX_PWM_BIT_M 427
+#define TEGRA264_CLK_MPHY_L1_RX_PWM_BIT_M 428
+#define TEGRA264_CLK_DBB_UPHY0 429
+#define TEGRA264_CLK_UPHY0_UXL_CORE 430
+#define TEGRA264_CLK_ISC_CPU_ROOT 431
+#define TEGRA264_CLK_ISC_NIC 432
+#define TEGRA264_CLK_CTC_TXCLK0_M 433
+#define TEGRA264_CLK_CTC_TXCLK1_M 434
+#define TEGRA264_CLK_CTC_RXCLK0_M 435
+#define TEGRA264_CLK_CTC_RXCLK1_M 436
+#define TEGRA264_CLK_PLLREFGP_OUT 437
+#define TEGRA264_CLK_PLLREFGP_OUT1 438
+#define TEGRA264_CLK_GPU_SYS 439
+#define TEGRA264_CLK_GPU_NVD 440
+#define TEGRA264_CLK_GPU_UPROC 441
+#define TEGRA264_CLK_GPU_GPC0 442
+#define TEGRA264_CLK_GPU_GPC1 443
+#define TEGRA264_CLK_GPU_GPC2 444
+#define TEGRA264_CLK_PLLX 445
+#define TEGRA264_CLK_APE_SOUNDWIRE_MSRC0 446
+#define TEGRA264_CLK_APE_SOUNDWIRE_DATA_EN_SHAPER 447
+#define TEGRA264_CLK_AO_SOUNDWIRE_MSRC0 448
+#define TEGRA264_CLK_AO_SOUNDWIRE_DATA_EN_SHAPER 449
+#define TEGRA264_CLK_MGBE0_TX_SER 459
+#define TEGRA264_CLK_MGBE1_TX_SER 460
+#define TEGRA264_CLK_MGBE2_TX_SER 461
+#define TEGRA264_CLK_MGBE3_TX_SER 462
+#define TEGRA264_CLK_MGBE0_RX_SER 463
+#define TEGRA264_CLK_MGBE1_RX_SER 464
+#define TEGRA264_CLK_MGBE2_RX_SER 465
+#define TEGRA264_CLK_MGBE3_RX_SER 466
+#define TEGRA264_CLK_DPAUX 467
+
+#endif /* DT_BINDINGS_CLOCK_NVIDIA_TEGRA264_H */
diff --git a/include/dt-bindings/clock/nxp,imx94-clock.h b/include/dt-bindings/clock/nxp,imx94-clock.h
new file mode 100644
index 000000000000..c4ba13352b99
--- /dev/null
+++ b/include/dt-bindings/clock/nxp,imx94-clock.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright 2025 NXP
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_IMX94_H
+#define __DT_BINDINGS_CLOCK_IMX94_H
+
+#define IMX94_CLK_DISPMIX_CLK_SEL 0
+
+#define IMX94_CLK_DISPMIX_LVDS_CLK_GATE 0
+
+#endif /* __DT_BINDINGS_CLOCK_IMX94_H */
diff --git a/include/dt-bindings/clock/nxp,imx95-clock.h b/include/dt-bindings/clock/nxp,imx95-clock.h
new file mode 100644
index 000000000000..b7a713a9ac8c
--- /dev/null
+++ b/include/dt-bindings/clock/nxp,imx95-clock.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_IMX95_H
+#define __DT_BINDINGS_CLOCK_IMX95_H
+
+#define IMX95_CLK_VPUBLK_WAVE 0
+#define IMX95_CLK_VPUBLK_JPEG_ENC 1
+#define IMX95_CLK_VPUBLK_JPEG_DEC 2
+
+#define IMX95_CLK_CAMBLK_CSI2_FOR0 0
+#define IMX95_CLK_CAMBLK_CSI2_FOR1 1
+#define IMX95_CLK_CAMBLK_ISP_AXI 2
+#define IMX95_CLK_CAMBLK_ISP_PIXEL 3
+#define IMX95_CLK_CAMBLK_ISP 4
+
+#define IMX95_CLK_DISPMIX_LVDS_PHY_DIV 0
+#define IMX95_CLK_DISPMIX_LVDS_CH0_GATE 1
+#define IMX95_CLK_DISPMIX_LVDS_CH1_GATE 2
+#define IMX95_CLK_DISPMIX_PIX_DI0_GATE 3
+#define IMX95_CLK_DISPMIX_PIX_DI1_GATE 4
+
+#define IMX95_CLK_DISPMIX_ENG0_SEL 0
+#define IMX95_CLK_DISPMIX_ENG1_SEL 1
+
+#define IMX95_CLK_NETCMIX_ENETC0_RMII 0
+#define IMX95_CLK_NETCMIX_ENETC1_RMII 1
+
+#endif /* __DT_BINDINGS_CLOCK_IMX95_H */
diff --git a/include/dt-bindings/clock/px30-cru.h b/include/dt-bindings/clock/px30-cru.h
index 5b1416fcde6f..a2abf1995c34 100644
--- a/include/dt-bindings/clock/px30-cru.h
+++ b/include/dt-bindings/clock/px30-cru.h
@@ -175,8 +175,6 @@
#define PCLK_CIF 352
#define PCLK_OTP_PHY 353
-#define CLK_NR_CLKS (PCLK_OTP_PHY + 1)
-
/* pmu-clocks indices */
#define PLL_GPLL 1
@@ -195,8 +193,6 @@
#define PCLK_GPIO0_PMU 20
#define PCLK_UART0_PMU 21
-#define CLKPMU_NR_CLKS (PCLK_UART0_PMU + 1)
-
/* soft-reset indices */
#define SRST_CORE0_PO 0
#define SRST_CORE1_PO 1
diff --git a/include/dt-bindings/clock/qcom,apss-ipq.h b/include/dt-bindings/clock/qcom,apss-ipq.h
index 77b6e05492e2..0bb41e5efdef 100644
--- a/include/dt-bindings/clock/qcom,apss-ipq.h
+++ b/include/dt-bindings/clock/qcom,apss-ipq.h
@@ -8,5 +8,11 @@
#define APCS_ALIAS0_CLK_SRC 0
#define APCS_ALIAS0_CORE_CLK 1
+#define APSS_PLL_EARLY 2
+#define APSS_SILVER_CLK_SRC 3
+#define APSS_SILVER_CORE_CLK 4
+#define L3_PLL 5
+#define L3_CLK_SRC 6
+#define L3_CORE_CLK 7
#endif
diff --git a/include/dt-bindings/clock/qcom,camcc-sc7280.h b/include/dt-bindings/clock/qcom,camcc-sc7280.h
new file mode 100644
index 000000000000..56640f407309
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,camcc-sc7280.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SC7280_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SC7280_H
+
+/* CAM_CC clocks */
+#define CAM_CC_PLL0 0
+#define CAM_CC_PLL0_OUT_EVEN 1
+#define CAM_CC_PLL0_OUT_ODD 2
+#define CAM_CC_PLL1 3
+#define CAM_CC_PLL1_OUT_EVEN 4
+#define CAM_CC_PLL2 5
+#define CAM_CC_PLL2_OUT_AUX 6
+#define CAM_CC_PLL2_OUT_AUX2 7
+#define CAM_CC_PLL3 8
+#define CAM_CC_PLL3_OUT_EVEN 9
+#define CAM_CC_PLL4 10
+#define CAM_CC_PLL4_OUT_EVEN 11
+#define CAM_CC_PLL5 12
+#define CAM_CC_PLL5_OUT_EVEN 13
+#define CAM_CC_PLL6 14
+#define CAM_CC_PLL6_OUT_EVEN 15
+#define CAM_CC_PLL6_OUT_ODD 16
+#define CAM_CC_BPS_AHB_CLK 17
+#define CAM_CC_BPS_AREG_CLK 18
+#define CAM_CC_BPS_AXI_CLK 19
+#define CAM_CC_BPS_CLK 20
+#define CAM_CC_BPS_CLK_SRC 21
+#define CAM_CC_CAMNOC_AXI_CLK 22
+#define CAM_CC_CAMNOC_AXI_CLK_SRC 23
+#define CAM_CC_CAMNOC_DCD_XO_CLK 24
+#define CAM_CC_CCI_0_CLK 25
+#define CAM_CC_CCI_0_CLK_SRC 26
+#define CAM_CC_CCI_1_CLK 27
+#define CAM_CC_CCI_1_CLK_SRC 28
+#define CAM_CC_CORE_AHB_CLK 29
+#define CAM_CC_CPAS_AHB_CLK 30
+#define CAM_CC_CPHY_RX_CLK_SRC 31
+#define CAM_CC_CSI0PHYTIMER_CLK 32
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 33
+#define CAM_CC_CSI1PHYTIMER_CLK 34
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 35
+#define CAM_CC_CSI2PHYTIMER_CLK 36
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 37
+#define CAM_CC_CSI3PHYTIMER_CLK 38
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 39
+#define CAM_CC_CSI4PHYTIMER_CLK 40
+#define CAM_CC_CSI4PHYTIMER_CLK_SRC 41
+#define CAM_CC_CSIPHY0_CLK 42
+#define CAM_CC_CSIPHY1_CLK 43
+#define CAM_CC_CSIPHY2_CLK 44
+#define CAM_CC_CSIPHY3_CLK 45
+#define CAM_CC_CSIPHY4_CLK 46
+#define CAM_CC_FAST_AHB_CLK_SRC 47
+#define CAM_CC_GDSC_CLK 48
+#define CAM_CC_ICP_AHB_CLK 49
+#define CAM_CC_ICP_CLK 50
+#define CAM_CC_ICP_CLK_SRC 51
+#define CAM_CC_IFE_0_AXI_CLK 52
+#define CAM_CC_IFE_0_CLK 53
+#define CAM_CC_IFE_0_CLK_SRC 54
+#define CAM_CC_IFE_0_CPHY_RX_CLK 55
+#define CAM_CC_IFE_0_CSID_CLK 56
+#define CAM_CC_IFE_0_CSID_CLK_SRC 57
+#define CAM_CC_IFE_0_DSP_CLK 58
+#define CAM_CC_IFE_1_AXI_CLK 59
+#define CAM_CC_IFE_1_CLK 60
+#define CAM_CC_IFE_1_CLK_SRC 61
+#define CAM_CC_IFE_1_CPHY_RX_CLK 62
+#define CAM_CC_IFE_1_CSID_CLK 63
+#define CAM_CC_IFE_1_CSID_CLK_SRC 64
+#define CAM_CC_IFE_1_DSP_CLK 65
+#define CAM_CC_IFE_2_AXI_CLK 66
+#define CAM_CC_IFE_2_CLK 67
+#define CAM_CC_IFE_2_CLK_SRC 68
+#define CAM_CC_IFE_2_CPHY_RX_CLK 69
+#define CAM_CC_IFE_2_CSID_CLK 70
+#define CAM_CC_IFE_2_CSID_CLK_SRC 71
+#define CAM_CC_IFE_2_DSP_CLK 72
+#define CAM_CC_IFE_LITE_0_CLK 73
+#define CAM_CC_IFE_LITE_0_CLK_SRC 74
+#define CAM_CC_IFE_LITE_0_CPHY_RX_CLK 75
+#define CAM_CC_IFE_LITE_0_CSID_CLK 76
+#define CAM_CC_IFE_LITE_0_CSID_CLK_SRC 77
+#define CAM_CC_IFE_LITE_1_CLK 78
+#define CAM_CC_IFE_LITE_1_CLK_SRC 79
+#define CAM_CC_IFE_LITE_1_CPHY_RX_CLK 80
+#define CAM_CC_IFE_LITE_1_CSID_CLK 81
+#define CAM_CC_IFE_LITE_1_CSID_CLK_SRC 82
+#define CAM_CC_IPE_0_AHB_CLK 83
+#define CAM_CC_IPE_0_AREG_CLK 84
+#define CAM_CC_IPE_0_AXI_CLK 85
+#define CAM_CC_IPE_0_CLK 86
+#define CAM_CC_IPE_0_CLK_SRC 87
+#define CAM_CC_JPEG_CLK 88
+#define CAM_CC_JPEG_CLK_SRC 89
+#define CAM_CC_LRME_CLK 90
+#define CAM_CC_LRME_CLK_SRC 91
+#define CAM_CC_MCLK0_CLK 92
+#define CAM_CC_MCLK0_CLK_SRC 93
+#define CAM_CC_MCLK1_CLK 94
+#define CAM_CC_MCLK1_CLK_SRC 95
+#define CAM_CC_MCLK2_CLK 96
+#define CAM_CC_MCLK2_CLK_SRC 97
+#define CAM_CC_MCLK3_CLK 98
+#define CAM_CC_MCLK3_CLK_SRC 99
+#define CAM_CC_MCLK4_CLK 100
+#define CAM_CC_MCLK4_CLK_SRC 101
+#define CAM_CC_MCLK5_CLK 102
+#define CAM_CC_MCLK5_CLK_SRC 103
+#define CAM_CC_SLEEP_CLK 104
+#define CAM_CC_SLEEP_CLK_SRC 105
+#define CAM_CC_SLOW_AHB_CLK_SRC 106
+#define CAM_CC_XO_CLK_SRC 107
+
+/* CAM_CC power domains */
+#define CAM_CC_BPS_GDSC 0
+#define CAM_CC_IFE_0_GDSC 1
+#define CAM_CC_IFE_1_GDSC 2
+#define CAM_CC_IFE_2_GDSC 3
+#define CAM_CC_IPE_0_GDSC 4
+#define CAM_CC_TITAN_TOP_GDSC 5
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,camcc-sm8250.h b/include/dt-bindings/clock/qcom,camcc-sm8250.h
new file mode 100644
index 000000000000..383ead17608d
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,camcc-sm8250.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8250_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8250_H
+
+/* CAM_CC clocks */
+#define CAM_CC_BPS_AHB_CLK 0
+#define CAM_CC_BPS_AREG_CLK 1
+#define CAM_CC_BPS_AXI_CLK 2
+#define CAM_CC_BPS_CLK 3
+#define CAM_CC_BPS_CLK_SRC 4
+#define CAM_CC_CAMNOC_AXI_CLK 5
+#define CAM_CC_CAMNOC_AXI_CLK_SRC 6
+#define CAM_CC_CAMNOC_DCD_XO_CLK 7
+#define CAM_CC_CCI_0_CLK 8
+#define CAM_CC_CCI_0_CLK_SRC 9
+#define CAM_CC_CCI_1_CLK 10
+#define CAM_CC_CCI_1_CLK_SRC 11
+#define CAM_CC_CORE_AHB_CLK 12
+#define CAM_CC_CPAS_AHB_CLK 13
+#define CAM_CC_CPHY_RX_CLK_SRC 14
+#define CAM_CC_CSI0PHYTIMER_CLK 15
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 16
+#define CAM_CC_CSI1PHYTIMER_CLK 17
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 18
+#define CAM_CC_CSI2PHYTIMER_CLK 19
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 20
+#define CAM_CC_CSI3PHYTIMER_CLK 21
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 22
+#define CAM_CC_CSI4PHYTIMER_CLK 23
+#define CAM_CC_CSI4PHYTIMER_CLK_SRC 24
+#define CAM_CC_CSI5PHYTIMER_CLK 25
+#define CAM_CC_CSI5PHYTIMER_CLK_SRC 26
+#define CAM_CC_CSIPHY0_CLK 27
+#define CAM_CC_CSIPHY1_CLK 28
+#define CAM_CC_CSIPHY2_CLK 29
+#define CAM_CC_CSIPHY3_CLK 30
+#define CAM_CC_CSIPHY4_CLK 31
+#define CAM_CC_CSIPHY5_CLK 32
+#define CAM_CC_FAST_AHB_CLK_SRC 33
+#define CAM_CC_FD_CORE_CLK 34
+#define CAM_CC_FD_CORE_CLK_SRC 35
+#define CAM_CC_FD_CORE_UAR_CLK 36
+#define CAM_CC_GDSC_CLK 37
+#define CAM_CC_ICP_AHB_CLK 38
+#define CAM_CC_ICP_CLK 39
+#define CAM_CC_ICP_CLK_SRC 40
+#define CAM_CC_IFE_0_AHB_CLK 41
+#define CAM_CC_IFE_0_AREG_CLK 42
+#define CAM_CC_IFE_0_AXI_CLK 43
+#define CAM_CC_IFE_0_CLK 44
+#define CAM_CC_IFE_0_CLK_SRC 45
+#define CAM_CC_IFE_0_CPHY_RX_CLK 46
+#define CAM_CC_IFE_0_CSID_CLK 47
+#define CAM_CC_IFE_0_CSID_CLK_SRC 48
+#define CAM_CC_IFE_0_DSP_CLK 49
+#define CAM_CC_IFE_1_AHB_CLK 50
+#define CAM_CC_IFE_1_AREG_CLK 51
+#define CAM_CC_IFE_1_AXI_CLK 52
+#define CAM_CC_IFE_1_CLK 53
+#define CAM_CC_IFE_1_CLK_SRC 54
+#define CAM_CC_IFE_1_CPHY_RX_CLK 55
+#define CAM_CC_IFE_1_CSID_CLK 56
+#define CAM_CC_IFE_1_CSID_CLK_SRC 57
+#define CAM_CC_IFE_1_DSP_CLK 58
+#define CAM_CC_IFE_LITE_AHB_CLK 59
+#define CAM_CC_IFE_LITE_AXI_CLK 60
+#define CAM_CC_IFE_LITE_CLK 61
+#define CAM_CC_IFE_LITE_CLK_SRC 62
+#define CAM_CC_IFE_LITE_CPHY_RX_CLK 63
+#define CAM_CC_IFE_LITE_CSID_CLK 64
+#define CAM_CC_IFE_LITE_CSID_CLK_SRC 65
+#define CAM_CC_IPE_0_AHB_CLK 66
+#define CAM_CC_IPE_0_AREG_CLK 67
+#define CAM_CC_IPE_0_AXI_CLK 68
+#define CAM_CC_IPE_0_CLK 69
+#define CAM_CC_IPE_0_CLK_SRC 70
+#define CAM_CC_JPEG_CLK 71
+#define CAM_CC_JPEG_CLK_SRC 72
+#define CAM_CC_MCLK0_CLK 73
+#define CAM_CC_MCLK0_CLK_SRC 74
+#define CAM_CC_MCLK1_CLK 75
+#define CAM_CC_MCLK1_CLK_SRC 76
+#define CAM_CC_MCLK2_CLK 77
+#define CAM_CC_MCLK2_CLK_SRC 78
+#define CAM_CC_MCLK3_CLK 79
+#define CAM_CC_MCLK3_CLK_SRC 80
+#define CAM_CC_MCLK4_CLK 81
+#define CAM_CC_MCLK4_CLK_SRC 82
+#define CAM_CC_MCLK5_CLK 83
+#define CAM_CC_MCLK5_CLK_SRC 84
+#define CAM_CC_MCLK6_CLK 85
+#define CAM_CC_MCLK6_CLK_SRC 86
+#define CAM_CC_PLL0 87
+#define CAM_CC_PLL0_OUT_EVEN 88
+#define CAM_CC_PLL0_OUT_ODD 89
+#define CAM_CC_PLL1 90
+#define CAM_CC_PLL1_OUT_EVEN 91
+#define CAM_CC_PLL2 92
+#define CAM_CC_PLL2_OUT_MAIN 93
+#define CAM_CC_PLL3 94
+#define CAM_CC_PLL3_OUT_EVEN 95
+#define CAM_CC_PLL4 96
+#define CAM_CC_PLL4_OUT_EVEN 97
+#define CAM_CC_SBI_AHB_CLK 98
+#define CAM_CC_SBI_AXI_CLK 99
+#define CAM_CC_SBI_CLK 100
+#define CAM_CC_SBI_CPHY_RX_CLK 101
+#define CAM_CC_SBI_CSID_CLK 102
+#define CAM_CC_SBI_CSID_CLK_SRC 103
+#define CAM_CC_SBI_DIV_CLK_SRC 104
+#define CAM_CC_SBI_IFE_0_CLK 105
+#define CAM_CC_SBI_IFE_1_CLK 106
+#define CAM_CC_SLEEP_CLK 107
+#define CAM_CC_SLEEP_CLK_SRC 108
+#define CAM_CC_SLOW_AHB_CLK_SRC 109
+#define CAM_CC_XO_CLK_SRC 110
+
+/* CAM_CC resets */
+#define CAM_CC_BPS_BCR 0
+#define CAM_CC_ICP_BCR 1
+#define CAM_CC_IFE_0_BCR 2
+#define CAM_CC_IFE_1_BCR 3
+#define CAM_CC_IPE_0_BCR 4
+#define CAM_CC_SBI_BCR 5
+
+/* CAM_CC GDSCRs */
+#define BPS_GDSC 0
+#define IPE_0_GDSC 1
+#define SBI_GDSC 2
+#define IFE_0_GDSC 3
+#define IFE_1_GDSC 4
+#define TITAN_TOP_GDSC 5
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-qcm2290.h b/include/dt-bindings/clock/qcom,dispcc-qcm2290.h
new file mode 100644
index 000000000000..cb687949be41
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,dispcc-qcm2290.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_QCM2290_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_QCM2290_H
+
+/* DISP_CC clocks */
+#define DISP_CC_PLL0 0
+#define DISP_CC_MDSS_AHB_CLK 1
+#define DISP_CC_MDSS_AHB_CLK_SRC 2
+#define DISP_CC_MDSS_BYTE0_CLK 3
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 4
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 5
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 6
+#define DISP_CC_MDSS_ESC0_CLK 7
+#define DISP_CC_MDSS_ESC0_CLK_SRC 8
+#define DISP_CC_MDSS_MDP_CLK 9
+#define DISP_CC_MDSS_MDP_CLK_SRC 10
+#define DISP_CC_MDSS_MDP_LUT_CLK 11
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 12
+#define DISP_CC_MDSS_PCLK0_CLK 13
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 14
+#define DISP_CC_MDSS_VSYNC_CLK 15
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 16
+#define DISP_CC_SLEEP_CLK 17
+#define DISP_CC_SLEEP_CLK_SRC 18
+#define DISP_CC_XO_CLK 19
+#define DISP_CC_XO_CLK_SRC 20
+
+/* GDSCs */
+#define MDSS_GDSC 0
+
+/* Resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-sc7280.h b/include/dt-bindings/clock/qcom,dispcc-sc7280.h
new file mode 100644
index 000000000000..9f113f346be8
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,dispcc-sc7280.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SC7280_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SC7280_H
+
+/* DISP_CC clocks */
+#define DISP_CC_PLL0 0
+#define DISP_CC_MDSS_AHB_CLK 1
+#define DISP_CC_MDSS_AHB_CLK_SRC 2
+#define DISP_CC_MDSS_BYTE0_CLK 3
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 4
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 5
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 6
+#define DISP_CC_MDSS_DP_AUX_CLK 7
+#define DISP_CC_MDSS_DP_AUX_CLK_SRC 8
+#define DISP_CC_MDSS_DP_CRYPTO_CLK 9
+#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC 10
+#define DISP_CC_MDSS_DP_LINK_CLK 11
+#define DISP_CC_MDSS_DP_LINK_CLK_SRC 12
+#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC 13
+#define DISP_CC_MDSS_DP_LINK_INTF_CLK 14
+#define DISP_CC_MDSS_DP_PIXEL_CLK 15
+#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC 16
+#define DISP_CC_MDSS_EDP_AUX_CLK 17
+#define DISP_CC_MDSS_EDP_AUX_CLK_SRC 18
+#define DISP_CC_MDSS_EDP_LINK_CLK 19
+#define DISP_CC_MDSS_EDP_LINK_CLK_SRC 20
+#define DISP_CC_MDSS_EDP_LINK_DIV_CLK_SRC 21
+#define DISP_CC_MDSS_EDP_LINK_INTF_CLK 22
+#define DISP_CC_MDSS_EDP_PIXEL_CLK 23
+#define DISP_CC_MDSS_EDP_PIXEL_CLK_SRC 24
+#define DISP_CC_MDSS_ESC0_CLK 25
+#define DISP_CC_MDSS_ESC0_CLK_SRC 26
+#define DISP_CC_MDSS_MDP_CLK 27
+#define DISP_CC_MDSS_MDP_CLK_SRC 28
+#define DISP_CC_MDSS_MDP_LUT_CLK 29
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 30
+#define DISP_CC_MDSS_PCLK0_CLK 31
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 32
+#define DISP_CC_MDSS_ROT_CLK 33
+#define DISP_CC_MDSS_ROT_CLK_SRC 34
+#define DISP_CC_MDSS_RSCC_AHB_CLK 35
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 36
+#define DISP_CC_MDSS_VSYNC_CLK 37
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 38
+#define DISP_CC_SLEEP_CLK 39
+#define DISP_CC_XO_CLK 40
+
+/* DISP_CC power domains */
+#define DISP_CC_MDSS_CORE_GDSC 0
+
+/* DISPCC resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_RSCC_BCR 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-sc8280xp.h b/include/dt-bindings/clock/qcom,dispcc-sc8280xp.h
new file mode 100644
index 000000000000..2831c61fa979
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,dispcc-sc8280xp.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SC8280XP_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SC8280XP_H
+
+/* DISPCC clocks */
+#define DISP_CC_PLL0 0
+#define DISP_CC_PLL1 1
+#define DISP_CC_PLL1_OUT_EVEN 2
+#define DISP_CC_PLL2 3
+#define DISP_CC_MDSS_AHB1_CLK 4
+#define DISP_CC_MDSS_AHB_CLK 5
+#define DISP_CC_MDSS_AHB_CLK_SRC 6
+#define DISP_CC_MDSS_BYTE0_CLK 7
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 8
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 9
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 10
+#define DISP_CC_MDSS_BYTE1_CLK 11
+#define DISP_CC_MDSS_BYTE1_CLK_SRC 12
+#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 13
+#define DISP_CC_MDSS_BYTE1_INTF_CLK 14
+#define DISP_CC_MDSS_DPTX0_AUX_CLK 15
+#define DISP_CC_MDSS_DPTX0_AUX_CLK_SRC 16
+#define DISP_CC_MDSS_DPTX0_LINK_CLK 17
+#define DISP_CC_MDSS_DPTX0_LINK_CLK_SRC 18
+#define DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC 19
+#define DISP_CC_MDSS_DPTX0_LINK_INTF_CLK 20
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK 21
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC 22
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK 23
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC 24
+#define DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK 25
+#define DISP_CC_MDSS_DPTX1_AUX_CLK 26
+#define DISP_CC_MDSS_DPTX1_AUX_CLK_SRC 27
+#define DISP_CC_MDSS_DPTX1_LINK_CLK 28
+#define DISP_CC_MDSS_DPTX1_LINK_CLK_SRC 29
+#define DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC 30
+#define DISP_CC_MDSS_DPTX1_LINK_INTF_CLK 31
+#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK 32
+#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC 33
+#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK 34
+#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC 35
+#define DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK 36
+#define DISP_CC_MDSS_DPTX2_AUX_CLK 37
+#define DISP_CC_MDSS_DPTX2_AUX_CLK_SRC 38
+#define DISP_CC_MDSS_DPTX2_LINK_CLK 39
+#define DISP_CC_MDSS_DPTX2_LINK_CLK_SRC 40
+#define DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC 41
+#define DISP_CC_MDSS_DPTX2_LINK_INTF_CLK 42
+#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK 43
+#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC 44
+#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK 45
+#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC 46
+#define DISP_CC_MDSS_DPTX3_AUX_CLK 47
+#define DISP_CC_MDSS_DPTX3_AUX_CLK_SRC 48
+#define DISP_CC_MDSS_DPTX3_LINK_CLK 49
+#define DISP_CC_MDSS_DPTX3_LINK_CLK_SRC 50
+#define DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC 51
+#define DISP_CC_MDSS_DPTX3_LINK_INTF_CLK 52
+#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK 53
+#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC 54
+#define DISP_CC_MDSS_ESC0_CLK 55
+#define DISP_CC_MDSS_ESC0_CLK_SRC 56
+#define DISP_CC_MDSS_ESC1_CLK 57
+#define DISP_CC_MDSS_ESC1_CLK_SRC 58
+#define DISP_CC_MDSS_MDP1_CLK 59
+#define DISP_CC_MDSS_MDP_CLK 60
+#define DISP_CC_MDSS_MDP_CLK_SRC 61
+#define DISP_CC_MDSS_MDP_LUT1_CLK 62
+#define DISP_CC_MDSS_MDP_LUT_CLK 63
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 64
+#define DISP_CC_MDSS_PCLK0_CLK 65
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 66
+#define DISP_CC_MDSS_PCLK1_CLK 67
+#define DISP_CC_MDSS_PCLK1_CLK_SRC 68
+#define DISP_CC_MDSS_ROT1_CLK 69
+#define DISP_CC_MDSS_ROT_CLK 70
+#define DISP_CC_MDSS_ROT_CLK_SRC 71
+#define DISP_CC_MDSS_RSCC_AHB_CLK 72
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 73
+#define DISP_CC_MDSS_VSYNC1_CLK 74
+#define DISP_CC_MDSS_VSYNC_CLK 75
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 76
+#define DISP_CC_SLEEP_CLK 77
+#define DISP_CC_SLEEP_CLK_SRC 78
+#define DISP_CC_XO_CLK 79
+#define DISP_CC_XO_CLK_SRC 80
+
+/* DISPCC resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_RSCC_BCR 1
+
+/* DISPCC GDSCs */
+#define MDSS_GDSC 0
+#define MDSS_INT2_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-sm6125.h b/include/dt-bindings/clock/qcom,dispcc-sm6125.h
new file mode 100644
index 000000000000..4ff974f4fcc3
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,dispcc-sm6125.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SM6125_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SM6125_H
+
+#define DISP_CC_PLL0 0
+#define DISP_CC_MDSS_AHB_CLK 1
+#define DISP_CC_MDSS_AHB_CLK_SRC 2
+#define DISP_CC_MDSS_BYTE0_CLK 3
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 4
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 5
+#define DISP_CC_MDSS_DP_AUX_CLK 6
+#define DISP_CC_MDSS_DP_AUX_CLK_SRC 7
+#define DISP_CC_MDSS_DP_CRYPTO_CLK 8
+#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC 9
+#define DISP_CC_MDSS_DP_LINK_CLK 10
+#define DISP_CC_MDSS_DP_LINK_CLK_SRC 11
+#define DISP_CC_MDSS_DP_LINK_INTF_CLK 12
+#define DISP_CC_MDSS_DP_PIXEL_CLK 13
+#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC 14
+#define DISP_CC_MDSS_ESC0_CLK 15
+#define DISP_CC_MDSS_ESC0_CLK_SRC 16
+#define DISP_CC_MDSS_MDP_CLK 17
+#define DISP_CC_MDSS_MDP_CLK_SRC 18
+#define DISP_CC_MDSS_MDP_LUT_CLK 19
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 20
+#define DISP_CC_MDSS_PCLK0_CLK 21
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 22
+#define DISP_CC_MDSS_ROT_CLK 23
+#define DISP_CC_MDSS_ROT_CLK_SRC 24
+#define DISP_CC_MDSS_VSYNC_CLK 25
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 26
+#define DISP_CC_XO_CLK 27
+
+/* DISP_CC GDSCR */
+#define MDSS_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-sm6350.h b/include/dt-bindings/clock/qcom,dispcc-sm6350.h
new file mode 100644
index 000000000000..61426a80e620
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,dispcc-sm6350.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SM6350_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SM6350_H
+
+/* DISP_CC clocks */
+#define DISP_CC_PLL0 0
+#define DISP_CC_MDSS_AHB_CLK 1
+#define DISP_CC_MDSS_AHB_CLK_SRC 2
+#define DISP_CC_MDSS_BYTE0_CLK 3
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 4
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 5
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 6
+#define DISP_CC_MDSS_DP_AUX_CLK 7
+#define DISP_CC_MDSS_DP_AUX_CLK_SRC 8
+#define DISP_CC_MDSS_DP_CRYPTO_CLK 9
+#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC 10
+#define DISP_CC_MDSS_DP_LINK_CLK 11
+#define DISP_CC_MDSS_DP_LINK_CLK_SRC 12
+#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC 13
+#define DISP_CC_MDSS_DP_LINK_INTF_CLK 14
+#define DISP_CC_MDSS_DP_PIXEL_CLK 15
+#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC 16
+#define DISP_CC_MDSS_ESC0_CLK 17
+#define DISP_CC_MDSS_ESC0_CLK_SRC 18
+#define DISP_CC_MDSS_MDP_CLK 19
+#define DISP_CC_MDSS_MDP_CLK_SRC 20
+#define DISP_CC_MDSS_MDP_LUT_CLK 21
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 22
+#define DISP_CC_MDSS_PCLK0_CLK 23
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 24
+#define DISP_CC_MDSS_ROT_CLK 25
+#define DISP_CC_MDSS_ROT_CLK_SRC 26
+#define DISP_CC_MDSS_RSCC_AHB_CLK 27
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 28
+#define DISP_CC_MDSS_VSYNC_CLK 29
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 30
+#define DISP_CC_SLEEP_CLK 31
+#define DISP_CC_XO_CLK 32
+
+/* Resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_RSCC_BCR 1
+
+/* GDSCs */
+#define MDSS_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-sm8250.h b/include/dt-bindings/clock/qcom,dispcc-sm8250.h
index fdaca6ad5c85..767fdb27e514 100644
--- a/include/dt-bindings/clock/qcom,dispcc-sm8250.h
+++ b/include/dt-bindings/clock/qcom,dispcc-sm8250.h
@@ -55,6 +55,16 @@
#define DISP_CC_MDSS_VSYNC_CLK_SRC 45
#define DISP_CC_PLL0 46
#define DISP_CC_PLL1 47
+#define DISP_CC_MDSS_EDP_AUX_CLK 48
+#define DISP_CC_MDSS_EDP_AUX_CLK_SRC 49
+#define DISP_CC_MDSS_EDP_GTC_CLK 50
+#define DISP_CC_MDSS_EDP_GTC_CLK_SRC 51
+#define DISP_CC_MDSS_EDP_LINK_CLK 52
+#define DISP_CC_MDSS_EDP_LINK_CLK_SRC 53
+#define DISP_CC_MDSS_EDP_LINK_INTF_CLK 54
+#define DISP_CC_MDSS_EDP_PIXEL_CLK 55
+#define DISP_CC_MDSS_EDP_PIXEL_CLK_SRC 56
+#define DISP_CC_MDSS_EDP_LINK_DIV_CLK_SRC 57
/* DISP_CC Reset */
#define DISP_CC_MDSS_CORE_BCR 0
diff --git a/include/dt-bindings/clock/qcom,dispcc-sm8350.h b/include/dt-bindings/clock/qcom,dispcc-sm8350.h
new file mode 120000
index 000000000000..0312b4544acb
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,dispcc-sm8350.h
@@ -0,0 +1 @@
+qcom,dispcc-sm8250.h \ No newline at end of file
diff --git a/include/dt-bindings/clock/qcom,dsi-phy-28nm.h b/include/dt-bindings/clock/qcom,dsi-phy-28nm.h
new file mode 100644
index 000000000000..ab94d58377a1
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,dsi-phy-28nm.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DSI_PHY_28NM_H
+#define _DT_BINDINGS_CLK_QCOM_DSI_PHY_28NM_H
+
+#define DSI_BYTE_PLL_CLK 0
+#define DSI_PIXEL_PLL_CLK 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-apq8084.h b/include/dt-bindings/clock/qcom,gcc-apq8084.h
index 7f657cf8cc8a..a985248d6332 100644
--- a/include/dt-bindings/clock/qcom,gcc-apq8084.h
+++ b/include/dt-bindings/clock/qcom,gcc-apq8084.h
@@ -339,6 +339,7 @@
#define GCC_PCIE_1_MSTR_AXI_CLK 330
#define GCC_PCIE_1_PIPE_CLK 331
#define GCC_PCIE_1_SLV_AXI_CLK 332
+#define GCC_MMSS_GPLL0_CLK_SRC 333
/* gdscs */
#define USB_HS_HSIC_GDSC 0
diff --git a/include/dt-bindings/clock/qcom,gcc-ipq4019.h b/include/dt-bindings/clock/qcom,gcc-ipq4019.h
index 7e8a7be6dcda..fa0587857547 100644
--- a/include/dt-bindings/clock/qcom,gcc-ipq4019.h
+++ b/include/dt-bindings/clock/qcom,gcc-ipq4019.h
@@ -165,5 +165,11 @@
#define GCC_QDSS_BCR 69
#define GCC_MPM_BCR 70
#define GCC_SPDM_BCR 71
+#define ESS_MAC1_ARES 72
+#define ESS_MAC2_ARES 73
+#define ESS_MAC3_ARES 74
+#define ESS_MAC4_ARES 75
+#define ESS_MAC5_ARES 76
+#define ESS_PSGMII_ARES 77
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-ipq5018.h b/include/dt-bindings/clock/qcom,gcc-ipq5018.h
new file mode 100644
index 000000000000..f3de2fdfeea1
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-ipq5018.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_IPQ_GCC_5018_H
+#define _DT_BINDINGS_CLOCK_IPQ_GCC_5018_H
+
+#define GPLL0_MAIN 0
+#define GPLL0 1
+#define GPLL2_MAIN 2
+#define GPLL2 3
+#define GPLL4_MAIN 4
+#define GPLL4 5
+#define UBI32_PLL_MAIN 6
+#define UBI32_PLL 7
+#define ADSS_PWM_CLK_SRC 8
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 9
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 10
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 11
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 12
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 13
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 14
+#define BLSP1_UART1_APPS_CLK_SRC 15
+#define BLSP1_UART2_APPS_CLK_SRC 16
+#define CRYPTO_CLK_SRC 17
+#define GCC_ADSS_PWM_CLK 18
+#define GCC_BLSP1_AHB_CLK 19
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 20
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 21
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 22
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 23
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 24
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 25
+#define GCC_BLSP1_UART1_APPS_CLK 26
+#define GCC_BLSP1_UART2_APPS_CLK 27
+#define GCC_BTSS_LPO_CLK 28
+#define GCC_CMN_BLK_AHB_CLK 29
+#define GCC_CMN_BLK_SYS_CLK 30
+#define GCC_CRYPTO_AHB_CLK 31
+#define GCC_CRYPTO_AXI_CLK 32
+#define GCC_CRYPTO_CLK 33
+#define GCC_CRYPTO_PPE_CLK 34
+#define GCC_DCC_CLK 35
+#define GCC_GEPHY_RX_CLK 36
+#define GCC_GEPHY_TX_CLK 37
+#define GCC_GMAC0_CFG_CLK 38
+#define GCC_GMAC0_PTP_CLK 39
+#define GCC_GMAC0_RX_CLK 40
+#define GCC_GMAC0_SYS_CLK 41
+#define GCC_GMAC0_TX_CLK 42
+#define GCC_GMAC1_CFG_CLK 43
+#define GCC_GMAC1_PTP_CLK 44
+#define GCC_GMAC1_RX_CLK 45
+#define GCC_GMAC1_SYS_CLK 46
+#define GCC_GMAC1_TX_CLK 47
+#define GCC_GP1_CLK 48
+#define GCC_GP2_CLK 49
+#define GCC_GP3_CLK 50
+#define GCC_LPASS_CORE_AXIM_CLK 51
+#define GCC_LPASS_SWAY_CLK 52
+#define GCC_MDIO0_AHB_CLK 53
+#define GCC_MDIO1_AHB_CLK 54
+#define GCC_PCIE0_AHB_CLK 55
+#define GCC_PCIE0_AUX_CLK 56
+#define GCC_PCIE0_AXI_M_CLK 57
+#define GCC_PCIE0_AXI_S_BRIDGE_CLK 58
+#define GCC_PCIE0_AXI_S_CLK 59
+#define GCC_PCIE0_PIPE_CLK 60
+#define GCC_PCIE1_AHB_CLK 61
+#define GCC_PCIE1_AUX_CLK 62
+#define GCC_PCIE1_AXI_M_CLK 63
+#define GCC_PCIE1_AXI_S_BRIDGE_CLK 64
+#define GCC_PCIE1_AXI_S_CLK 65
+#define GCC_PCIE1_PIPE_CLK 66
+#define GCC_PRNG_AHB_CLK 67
+#define GCC_Q6_AXIM_CLK 68
+#define GCC_Q6_AXIM2_CLK 69
+#define GCC_Q6_AXIS_CLK 70
+#define GCC_Q6_AHB_CLK 71
+#define GCC_Q6_AHB_S_CLK 72
+#define GCC_Q6_TSCTR_1TO2_CLK 73
+#define GCC_Q6SS_ATBM_CLK 74
+#define GCC_Q6SS_PCLKDBG_CLK 75
+#define GCC_Q6SS_TRIG_CLK 76
+#define GCC_QDSS_AT_CLK 77
+#define GCC_QDSS_CFG_AHB_CLK 78
+#define GCC_QDSS_DAP_AHB_CLK 79
+#define GCC_QDSS_DAP_CLK 80
+#define GCC_QDSS_ETR_USB_CLK 81
+#define GCC_QDSS_EUD_AT_CLK 82
+#define GCC_QDSS_STM_CLK 83
+#define GCC_QDSS_TRACECLKIN_CLK 84
+#define GCC_QDSS_TSCTR_DIV8_CLK 85
+#define GCC_QPIC_AHB_CLK 86
+#define GCC_QPIC_CLK 87
+#define GCC_QPIC_IO_MACRO_CLK 88
+#define GCC_SDCC1_AHB_CLK 89
+#define GCC_SDCC1_APPS_CLK 90
+#define GCC_SLEEP_CLK_SRC 91
+#define GCC_SNOC_GMAC0_AHB_CLK 92
+#define GCC_SNOC_GMAC0_AXI_CLK 93
+#define GCC_SNOC_GMAC1_AHB_CLK 94
+#define GCC_SNOC_GMAC1_AXI_CLK 95
+#define GCC_SNOC_LPASS_AXIM_CLK 96
+#define GCC_SNOC_LPASS_SWAY_CLK 97
+#define GCC_SNOC_UBI0_AXI_CLK 98
+#define GCC_SYS_NOC_PCIE0_AXI_CLK 99
+#define GCC_SYS_NOC_PCIE1_AXI_CLK 100
+#define GCC_SYS_NOC_QDSS_STM_AXI_CLK 101
+#define GCC_SYS_NOC_USB0_AXI_CLK 102
+#define GCC_SYS_NOC_WCSS_AHB_CLK 103
+#define GCC_UBI0_AXI_CLK 104
+#define GCC_UBI0_CFG_CLK 105
+#define GCC_UBI0_CORE_CLK 106
+#define GCC_UBI0_DBG_CLK 107
+#define GCC_UBI0_NC_AXI_CLK 108
+#define GCC_UBI0_UTCM_CLK 109
+#define GCC_UNIPHY_AHB_CLK 110
+#define GCC_UNIPHY_RX_CLK 111
+#define GCC_UNIPHY_SYS_CLK 112
+#define GCC_UNIPHY_TX_CLK 113
+#define GCC_USB0_AUX_CLK 114
+#define GCC_USB0_EUD_AT_CLK 115
+#define GCC_USB0_LFPS_CLK 116
+#define GCC_USB0_MASTER_CLK 117
+#define GCC_USB0_MOCK_UTMI_CLK 118
+#define GCC_USB0_PHY_CFG_AHB_CLK 119
+#define GCC_USB0_SLEEP_CLK 120
+#define GCC_WCSS_ACMT_CLK 121
+#define GCC_WCSS_AHB_S_CLK 122
+#define GCC_WCSS_AXI_M_CLK 123
+#define GCC_WCSS_AXI_S_CLK 124
+#define GCC_WCSS_DBG_IFC_APB_BDG_CLK 125
+#define GCC_WCSS_DBG_IFC_APB_CLK 126
+#define GCC_WCSS_DBG_IFC_ATB_BDG_CLK 127
+#define GCC_WCSS_DBG_IFC_ATB_CLK 128
+#define GCC_WCSS_DBG_IFC_DAPBUS_BDG_CLK 129
+#define GCC_WCSS_DBG_IFC_DAPBUS_CLK 130
+#define GCC_WCSS_DBG_IFC_NTS_BDG_CLK 131
+#define GCC_WCSS_DBG_IFC_NTS_CLK 132
+#define GCC_WCSS_ECAHB_CLK 133
+#define GCC_XO_CLK 134
+#define GCC_XO_CLK_SRC 135
+#define GMAC0_RX_CLK_SRC 136
+#define GMAC0_TX_CLK_SRC 137
+#define GMAC1_RX_CLK_SRC 138
+#define GMAC1_TX_CLK_SRC 139
+#define GMAC_CLK_SRC 140
+#define GP1_CLK_SRC 141
+#define GP2_CLK_SRC 142
+#define GP3_CLK_SRC 143
+#define LPASS_AXIM_CLK_SRC 144
+#define LPASS_SWAY_CLK_SRC 145
+#define PCIE0_AUX_CLK_SRC 146
+#define PCIE0_AXI_CLK_SRC 147
+#define PCIE1_AUX_CLK_SRC 148
+#define PCIE1_AXI_CLK_SRC 149
+#define PCNOC_BFDCD_CLK_SRC 150
+#define Q6_AXI_CLK_SRC 151
+#define QDSS_AT_CLK_SRC 152
+#define QDSS_STM_CLK_SRC 153
+#define QDSS_TSCTR_CLK_SRC 154
+#define QDSS_TRACECLKIN_CLK_SRC 155
+#define QPIC_IO_MACRO_CLK_SRC 156
+#define SDCC1_APPS_CLK_SRC 157
+#define SYSTEM_NOC_BFDCD_CLK_SRC 158
+#define UBI0_AXI_CLK_SRC 159
+#define UBI0_CORE_CLK_SRC 160
+#define USB0_AUX_CLK_SRC 161
+#define USB0_LFPS_CLK_SRC 162
+#define USB0_MASTER_CLK_SRC 163
+#define USB0_MOCK_UTMI_CLK_SRC 164
+#define WCSS_AHB_CLK_SRC 165
+#define PCIE0_PIPE_CLK_SRC 166
+#define PCIE1_PIPE_CLK_SRC 167
+#define USB0_PIPE_CLK_SRC 168
+#define GCC_USB0_PIPE_CLK 169
+#define GMAC0_RX_DIV_CLK_SRC 170
+#define GMAC0_TX_DIV_CLK_SRC 171
+#define GMAC1_RX_DIV_CLK_SRC 172
+#define GMAC1_TX_DIV_CLK_SRC 173
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-ipq806x.h b/include/dt-bindings/clock/qcom,gcc-ipq806x.h
index 7deec14a6dee..02262d2ac899 100644
--- a/include/dt-bindings/clock/qcom,gcc-ipq806x.h
+++ b/include/dt-bindings/clock/qcom,gcc-ipq806x.h
@@ -240,7 +240,7 @@
#define PLL14 232
#define PLL14_VOTE 233
#define PLL18 234
-#define CE5_SRC 235
+#define CE5_A_CLK 235
#define CE5_H_CLK 236
#define CE5_CORE_CLK 237
#define CE3_SLEEP_CLK 238
@@ -283,5 +283,8 @@
#define EBI2_AON_CLK 281
#define NSSTCM_CLK_SRC 282
#define NSSTCM_CLK 283
+#define CE5_A_CLK_SRC 285
+#define CE5_H_CLK_SRC 286
+#define CE5_CORE_CLK_SRC 287
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-ipq8074.h b/include/dt-bindings/clock/qcom,gcc-ipq8074.h
index 8e2bec1c91bf..f9ea55811104 100644
--- a/include/dt-bindings/clock/qcom,gcc-ipq8074.h
+++ b/include/dt-bindings/clock/qcom,gcc-ipq8074.h
@@ -233,6 +233,7 @@
#define GCC_PCIE0_AXI_S_BRIDGE_CLK 224
#define GCC_PCIE0_RCHNG_CLK_SRC 225
#define GCC_PCIE0_RCHNG_CLK 226
+#define GCC_CRYPTO_PPE_CLK 227
#define GCC_BLSP1_BCR 0
#define GCC_BLSP1_QUP1_BCR 1
@@ -366,5 +367,22 @@
#define GCC_PCIE1_AHB_ARES 129
#define GCC_PCIE1_AXI_MASTER_STICKY_ARES 130
#define GCC_PCIE0_AXI_SLAVE_STICKY_ARES 131
+#define GCC_PPE_FULL_RESET 132
+#define GCC_UNIPHY0_SOFT_RESET 133
+#define GCC_UNIPHY0_XPCS_RESET 134
+#define GCC_UNIPHY1_SOFT_RESET 135
+#define GCC_UNIPHY1_XPCS_RESET 136
+#define GCC_UNIPHY2_SOFT_RESET 137
+#define GCC_UNIPHY2_XPCS_RESET 138
+#define GCC_EDMA_HW_RESET 139
+#define GCC_NSSPORT1_RESET 140
+#define GCC_NSSPORT2_RESET 141
+#define GCC_NSSPORT3_RESET 142
+#define GCC_NSSPORT4_RESET 143
+#define GCC_NSSPORT5_RESET 144
+#define GCC_NSSPORT6_RESET 145
+
+#define USB0_GDSC 0
+#define USB1_GDSC 1
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-mdm9607.h b/include/dt-bindings/clock/qcom,gcc-mdm9607.h
new file mode 100644
index 000000000000..357a680a40da
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-mdm9607.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_GCC_9607_H
+#define _DT_BINDINGS_CLK_MSM_GCC_9607_H
+
+#define GPLL0 0
+#define GPLL0_EARLY 1
+#define GPLL1 2
+#define GPLL1_VOTE 3
+#define GPLL2 4
+#define GPLL2_EARLY 5
+#define PCNOC_BFDCD_CLK_SRC 6
+#define SYSTEM_NOC_BFDCD_CLK_SRC 7
+#define GCC_SMMU_CFG_CLK 8
+#define APSS_AHB_CLK_SRC 9
+#define GCC_QDSS_DAP_CLK 10
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 11
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 12
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 13
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 14
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 15
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 16
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC 17
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC 18
+#define BLSP1_QUP5_I2C_APPS_CLK_SRC 19
+#define BLSP1_QUP5_SPI_APPS_CLK_SRC 20
+#define BLSP1_QUP6_I2C_APPS_CLK_SRC 21
+#define BLSP1_QUP6_SPI_APPS_CLK_SRC 22
+#define BLSP1_UART1_APPS_CLK_SRC 23
+#define BLSP1_UART2_APPS_CLK_SRC 24
+#define CRYPTO_CLK_SRC 25
+#define GP1_CLK_SRC 26
+#define GP2_CLK_SRC 27
+#define GP3_CLK_SRC 28
+#define PDM2_CLK_SRC 29
+#define SDCC1_APPS_CLK_SRC 30
+#define SDCC2_APPS_CLK_SRC 31
+#define APSS_TCU_CLK_SRC 32
+#define USB_HS_SYSTEM_CLK_SRC 33
+#define GCC_BLSP1_AHB_CLK 34
+#define GCC_BLSP1_SLEEP_CLK 35
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 36
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 37
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 38
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 39
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 40
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 41
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 42
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 43
+#define GCC_BLSP1_QUP5_I2C_APPS_CLK 44
+#define GCC_BLSP1_QUP5_SPI_APPS_CLK 45
+#define GCC_BLSP1_QUP6_I2C_APPS_CLK 46
+#define GCC_BLSP1_QUP6_SPI_APPS_CLK 47
+#define GCC_BLSP1_UART1_APPS_CLK 48
+#define GCC_BLSP1_UART2_APPS_CLK 49
+#define GCC_BOOT_ROM_AHB_CLK 50
+#define GCC_CRYPTO_AHB_CLK 51
+#define GCC_CRYPTO_AXI_CLK 52
+#define GCC_CRYPTO_CLK 53
+#define GCC_GP1_CLK 54
+#define GCC_GP2_CLK 55
+#define GCC_GP3_CLK 56
+#define GCC_MSS_CFG_AHB_CLK 57
+#define GCC_PDM2_CLK 58
+#define GCC_PDM_AHB_CLK 59
+#define GCC_PRNG_AHB_CLK 60
+#define GCC_SDCC1_AHB_CLK 61
+#define GCC_SDCC1_APPS_CLK 62
+#define GCC_SDCC2_AHB_CLK 63
+#define GCC_SDCC2_APPS_CLK 64
+#define GCC_USB2A_PHY_SLEEP_CLK 65
+#define GCC_USB_HS_AHB_CLK 66
+#define GCC_USB_HS_SYSTEM_CLK 67
+#define GCC_APSS_TCU_CLK 68
+#define GCC_MSS_Q6_BIMC_AXI_CLK 69
+#define BIMC_PLL 70
+#define BIMC_PLL_VOTE 71
+#define BIMC_DDR_CLK_SRC 72
+#define BLSP1_UART3_APPS_CLK_SRC 73
+#define BLSP1_UART4_APPS_CLK_SRC 74
+#define BLSP1_UART5_APPS_CLK_SRC 75
+#define BLSP1_UART6_APPS_CLK_SRC 76
+#define GCC_BLSP1_UART3_APPS_CLK 77
+#define GCC_BLSP1_UART4_APPS_CLK 78
+#define GCC_BLSP1_UART5_APPS_CLK 79
+#define GCC_BLSP1_UART6_APPS_CLK 80
+#define GCC_APSS_AHB_CLK 81
+#define GCC_APSS_AXI_CLK 82
+#define GCC_USB_HS_PHY_CFG_AHB_CLK 83
+#define GCC_USB_HSIC_CLK_SRC 84
+#define GCC_USB_HSIC_IO_CAL_CLK_SRC 85
+#define GCC_USB_HSIC_SYSTEM_CLK_SRC 86
+
+/* Resets */
+#define USB2_HS_PHY_ONLY_BCR 0
+#define QUSB2_PHY_BCR 1
+#define GCC_MSS_RESTART 2
+#define USB_HS_HSIC_BCR 3
+#define USB_HS_BCR 4
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8909.h b/include/dt-bindings/clock/qcom,gcc-msm8909.h
new file mode 100644
index 000000000000..4394ba003425
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-msm8909.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2022 Kernkonzept GmbH.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_8909_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_8909_H
+
+/* PLLs */
+#define GPLL0_EARLY 0
+#define GPLL0 1
+#define GPLL1 2
+#define GPLL1_VOTE 3
+#define GPLL2_EARLY 4
+#define GPLL2 5
+#define BIMC_PLL_EARLY 6
+#define BIMC_PLL 7
+
+/* RCGs */
+#define APSS_AHB_CLK_SRC 8
+#define BIMC_DDR_CLK_SRC 9
+#define BIMC_GPU_CLK_SRC 10
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 11
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 12
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 13
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 14
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 15
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 16
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC 17
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC 18
+#define BLSP1_QUP5_I2C_APPS_CLK_SRC 19
+#define BLSP1_QUP5_SPI_APPS_CLK_SRC 20
+#define BLSP1_QUP6_I2C_APPS_CLK_SRC 21
+#define BLSP1_QUP6_SPI_APPS_CLK_SRC 22
+#define BLSP1_UART1_APPS_CLK_SRC 23
+#define BLSP1_UART2_APPS_CLK_SRC 24
+#define BYTE0_CLK_SRC 25
+#define CAMSS_GP0_CLK_SRC 26
+#define CAMSS_GP1_CLK_SRC 27
+#define CAMSS_TOP_AHB_CLK_SRC 28
+#define CODEC_DIGCODEC_CLK_SRC 29
+#define CRYPTO_CLK_SRC 30
+#define CSI0_CLK_SRC 31
+#define CSI0PHYTIMER_CLK_SRC 32
+#define CSI1_CLK_SRC 33
+#define ESC0_CLK_SRC 34
+#define GFX3D_CLK_SRC 35
+#define GP1_CLK_SRC 36
+#define GP2_CLK_SRC 37
+#define GP3_CLK_SRC 38
+#define MCLK0_CLK_SRC 39
+#define MCLK1_CLK_SRC 40
+#define MDP_CLK_SRC 41
+#define PCLK0_CLK_SRC 42
+#define PCNOC_BFDCD_CLK_SRC 43
+#define PDM2_CLK_SRC 44
+#define SDCC1_APPS_CLK_SRC 45
+#define SDCC2_APPS_CLK_SRC 46
+#define SYSTEM_NOC_BFDCD_CLK_SRC 47
+#define ULTAUDIO_AHBFABRIC_CLK_SRC 48
+#define ULTAUDIO_LPAIF_AUX_I2S_CLK_SRC 49
+#define ULTAUDIO_LPAIF_PRI_I2S_CLK_SRC 50
+#define ULTAUDIO_LPAIF_SEC_I2S_CLK_SRC 51
+#define ULTAUDIO_XO_CLK_SRC 52
+#define USB_HS_SYSTEM_CLK_SRC 53
+#define VCODEC0_CLK_SRC 54
+#define VFE0_CLK_SRC 55
+#define VSYNC_CLK_SRC 56
+
+/* Voteable Clocks */
+#define GCC_APSS_TCU_CLK 57
+#define GCC_BLSP1_AHB_CLK 58
+#define GCC_BLSP1_SLEEP_CLK 59
+#define GCC_BOOT_ROM_AHB_CLK 60
+#define GCC_CRYPTO_CLK 61
+#define GCC_CRYPTO_AHB_CLK 62
+#define GCC_CRYPTO_AXI_CLK 63
+#define GCC_GFX_TBU_CLK 64
+#define GCC_GFX_TCU_CLK 65
+#define GCC_GTCU_AHB_CLK 66
+#define GCC_MDP_TBU_CLK 67
+#define GCC_PRNG_AHB_CLK 68
+#define GCC_SMMU_CFG_CLK 69
+#define GCC_VENUS_TBU_CLK 70
+#define GCC_VFE_TBU_CLK 71
+
+/* Branches */
+#define GCC_BIMC_GFX_CLK 72
+#define GCC_BIMC_GPU_CLK 73
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 74
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 75
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 76
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 77
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 78
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 79
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 80
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 81
+#define GCC_BLSP1_QUP5_I2C_APPS_CLK 82
+#define GCC_BLSP1_QUP5_SPI_APPS_CLK 83
+#define GCC_BLSP1_QUP6_I2C_APPS_CLK 84
+#define GCC_BLSP1_QUP6_SPI_APPS_CLK 85
+#define GCC_BLSP1_UART1_APPS_CLK 86
+#define GCC_BLSP1_UART2_APPS_CLK 87
+#define GCC_CAMSS_AHB_CLK 88
+#define GCC_CAMSS_CSI0_CLK 89
+#define GCC_CAMSS_CSI0_AHB_CLK 90
+#define GCC_CAMSS_CSI0PHY_CLK 91
+#define GCC_CAMSS_CSI0PHYTIMER_CLK 92
+#define GCC_CAMSS_CSI0PIX_CLK 93
+#define GCC_CAMSS_CSI0RDI_CLK 94
+#define GCC_CAMSS_CSI1_CLK 95
+#define GCC_CAMSS_CSI1_AHB_CLK 96
+#define GCC_CAMSS_CSI1PHY_CLK 97
+#define GCC_CAMSS_CSI1PIX_CLK 98
+#define GCC_CAMSS_CSI1RDI_CLK 99
+#define GCC_CAMSS_CSI_VFE0_CLK 100
+#define GCC_CAMSS_GP0_CLK 101
+#define GCC_CAMSS_GP1_CLK 102
+#define GCC_CAMSS_ISPIF_AHB_CLK 103
+#define GCC_CAMSS_MCLK0_CLK 104
+#define GCC_CAMSS_MCLK1_CLK 105
+#define GCC_CAMSS_TOP_AHB_CLK 106
+#define GCC_CAMSS_VFE0_CLK 107
+#define GCC_CAMSS_VFE_AHB_CLK 108
+#define GCC_CAMSS_VFE_AXI_CLK 109
+#define GCC_CODEC_DIGCODEC_CLK 110
+#define GCC_GP1_CLK 111
+#define GCC_GP2_CLK 112
+#define GCC_GP3_CLK 113
+#define GCC_MDSS_AHB_CLK 114
+#define GCC_MDSS_AXI_CLK 115
+#define GCC_MDSS_BYTE0_CLK 116
+#define GCC_MDSS_ESC0_CLK 117
+#define GCC_MDSS_MDP_CLK 118
+#define GCC_MDSS_PCLK0_CLK 119
+#define GCC_MDSS_VSYNC_CLK 120
+#define GCC_MSS_CFG_AHB_CLK 121
+#define GCC_MSS_Q6_BIMC_AXI_CLK 122
+#define GCC_OXILI_AHB_CLK 123
+#define GCC_OXILI_GFX3D_CLK 124
+#define GCC_PDM2_CLK 125
+#define GCC_PDM_AHB_CLK 126
+#define GCC_SDCC1_AHB_CLK 127
+#define GCC_SDCC1_APPS_CLK 128
+#define GCC_SDCC2_AHB_CLK 129
+#define GCC_SDCC2_APPS_CLK 130
+#define GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_CLK 131
+#define GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_LPM_CLK 132
+#define GCC_ULTAUDIO_AVSYNC_XO_CLK 133
+#define GCC_ULTAUDIO_LPAIF_AUX_I2S_CLK 134
+#define GCC_ULTAUDIO_LPAIF_PRI_I2S_CLK 135
+#define GCC_ULTAUDIO_LPAIF_SEC_I2S_CLK 136
+#define GCC_ULTAUDIO_PCNOC_MPORT_CLK 137
+#define GCC_ULTAUDIO_PCNOC_SWAY_CLK 138
+#define GCC_ULTAUDIO_STC_XO_CLK 139
+#define GCC_USB2A_PHY_SLEEP_CLK 140
+#define GCC_USB_HS_AHB_CLK 141
+#define GCC_USB_HS_PHY_CFG_AHB_CLK 142
+#define GCC_USB_HS_SYSTEM_CLK 143
+#define GCC_VENUS0_AHB_CLK 144
+#define GCC_VENUS0_AXI_CLK 145
+#define GCC_VENUS0_CORE0_VCODEC0_CLK 146
+#define GCC_VENUS0_VCODEC0_CLK 147
+
+/* Resets */
+#define GCC_AUDIO_CORE_BCR 0
+#define GCC_BLSP1_BCR 1
+#define GCC_BLSP1_QUP1_BCR 2
+#define GCC_BLSP1_QUP2_BCR 3
+#define GCC_BLSP1_QUP3_BCR 4
+#define GCC_BLSP1_QUP4_BCR 5
+#define GCC_BLSP1_QUP5_BCR 6
+#define GCC_BLSP1_QUP6_BCR 7
+#define GCC_BLSP1_UART1_BCR 8
+#define GCC_BLSP1_UART2_BCR 9
+#define GCC_CAMSS_CSI0_BCR 10
+#define GCC_CAMSS_CSI0PHY_BCR 11
+#define GCC_CAMSS_CSI0PIX_BCR 12
+#define GCC_CAMSS_CSI0RDI_BCR 13
+#define GCC_CAMSS_CSI1_BCR 14
+#define GCC_CAMSS_CSI1PHY_BCR 15
+#define GCC_CAMSS_CSI1PIX_BCR 16
+#define GCC_CAMSS_CSI1RDI_BCR 17
+#define GCC_CAMSS_CSI_VFE0_BCR 18
+#define GCC_CAMSS_GP0_BCR 19
+#define GCC_CAMSS_GP1_BCR 20
+#define GCC_CAMSS_ISPIF_BCR 21
+#define GCC_CAMSS_MCLK0_BCR 22
+#define GCC_CAMSS_MCLK1_BCR 23
+#define GCC_CAMSS_PHY0_BCR 24
+#define GCC_CAMSS_TOP_BCR 25
+#define GCC_CAMSS_TOP_AHB_BCR 26
+#define GCC_CAMSS_VFE_BCR 27
+#define GCC_CRYPTO_BCR 28
+#define GCC_MDSS_BCR 29
+#define GCC_OXILI_BCR 30
+#define GCC_PDM_BCR 31
+#define GCC_PRNG_BCR 32
+#define GCC_QUSB2_PHY_BCR 33
+#define GCC_SDCC1_BCR 34
+#define GCC_SDCC2_BCR 35
+#define GCC_ULT_AUDIO_BCR 36
+#define GCC_USB2A_PHY_BCR 37
+#define GCC_USB2_HS_PHY_ONLY_BCR 38
+#define GCC_USB_HS_BCR 39
+#define GCC_VENUS0_BCR 40
+
+/* Subsystem Restart */
+#define GCC_MSS_RESTART 41
+
+/* Power Domains */
+#define MDSS_GDSC 0
+#define OXILI_GDSC 1
+#define VENUS_GDSC 2
+#define VENUS_CORE0_GDSC 3
+#define VFE_GDSC 4
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8917.h b/include/dt-bindings/clock/qcom,gcc-msm8917.h
new file mode 100644
index 000000000000..4e3897b3669d
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-msm8917.h
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_CLK_MSM_GCC_8917_H
+#define _DT_BINDINGS_CLK_MSM_GCC_8917_H
+
+/* Clocks */
+#define APSS_AHB_CLK_SRC 0
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 1
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 2
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 3
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 4
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC 5
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC 6
+#define BLSP1_UART1_APPS_CLK_SRC 7
+#define BLSP1_UART2_APPS_CLK_SRC 8
+#define BLSP2_QUP1_I2C_APPS_CLK_SRC 9
+#define BLSP2_QUP1_SPI_APPS_CLK_SRC 10
+#define BLSP2_QUP2_I2C_APPS_CLK_SRC 11
+#define BLSP2_QUP2_SPI_APPS_CLK_SRC 12
+#define BLSP2_QUP3_I2C_APPS_CLK_SRC 13
+#define BLSP2_QUP3_SPI_APPS_CLK_SRC 14
+#define BLSP2_UART1_APPS_CLK_SRC 15
+#define BLSP2_UART2_APPS_CLK_SRC 16
+#define BYTE0_CLK_SRC 17
+#define CAMSS_GP0_CLK_SRC 18
+#define CAMSS_GP1_CLK_SRC 19
+#define CAMSS_TOP_AHB_CLK_SRC 20
+#define CCI_CLK_SRC 21
+#define CPP_CLK_SRC 22
+#define CRYPTO_CLK_SRC 23
+#define CSI0PHYTIMER_CLK_SRC 24
+#define CSI0_CLK_SRC 25
+#define CSI1PHYTIMER_CLK_SRC 26
+#define CSI1_CLK_SRC 27
+#define CSI2_CLK_SRC 28
+#define ESC0_CLK_SRC 29
+#define GCC_APSS_TCU_CLK 30
+#define GCC_BIMC_GFX_CLK 31
+#define GCC_BIMC_GPU_CLK 32
+#define GCC_BLSP1_AHB_CLK 33
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 34
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 35
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 36
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 37
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 38
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 39
+#define GCC_BLSP1_UART1_APPS_CLK 40
+#define GCC_BLSP1_UART2_APPS_CLK 41
+#define GCC_BLSP2_AHB_CLK 42
+#define GCC_BLSP2_QUP1_I2C_APPS_CLK 43
+#define GCC_BLSP2_QUP1_SPI_APPS_CLK 44
+#define GCC_BLSP2_QUP2_I2C_APPS_CLK 45
+#define GCC_BLSP2_QUP2_SPI_APPS_CLK 46
+#define GCC_BLSP2_QUP3_I2C_APPS_CLK 47
+#define GCC_BLSP2_QUP3_SPI_APPS_CLK 48
+#define GCC_BLSP2_UART1_APPS_CLK 49
+#define GCC_BLSP2_UART2_APPS_CLK 50
+#define GCC_BOOT_ROM_AHB_CLK 51
+#define GCC_CAMSS_AHB_CLK 52
+#define GCC_CAMSS_CCI_AHB_CLK 53
+#define GCC_CAMSS_CCI_CLK 54
+#define GCC_CAMSS_CPP_AHB_CLK 55
+#define GCC_CAMSS_CPP_CLK 56
+#define GCC_CAMSS_CSI0PHYTIMER_CLK 57
+#define GCC_CAMSS_CSI0PHY_CLK 58
+#define GCC_CAMSS_CSI0PIX_CLK 59
+#define GCC_CAMSS_CSI0RDI_CLK 60
+#define GCC_CAMSS_CSI0_AHB_CLK 61
+#define GCC_CAMSS_CSI0_CLK 62
+#define GCC_CAMSS_CSI1PHYTIMER_CLK 63
+#define GCC_CAMSS_CSI1PHY_CLK 64
+#define GCC_CAMSS_CSI1PIX_CLK 65
+#define GCC_CAMSS_CSI1RDI_CLK 66
+#define GCC_CAMSS_CSI1_AHB_CLK 67
+#define GCC_CAMSS_CSI1_CLK 68
+#define GCC_CAMSS_CSI2PHY_CLK 69
+#define GCC_CAMSS_CSI2PIX_CLK 70
+#define GCC_CAMSS_CSI2RDI_CLK 71
+#define GCC_CAMSS_CSI2_AHB_CLK 72
+#define GCC_CAMSS_CSI2_CLK 73
+#define GCC_CAMSS_CSI_VFE0_CLK 74
+#define GCC_CAMSS_CSI_VFE1_CLK 75
+#define GCC_CAMSS_GP0_CLK 76
+#define GCC_CAMSS_GP1_CLK 77
+#define GCC_CAMSS_ISPIF_AHB_CLK 78
+#define GCC_CAMSS_JPEG0_CLK 79
+#define GCC_CAMSS_JPEG_AHB_CLK 80
+#define GCC_CAMSS_JPEG_AXI_CLK 81
+#define GCC_CAMSS_MCLK0_CLK 82
+#define GCC_CAMSS_MCLK1_CLK 83
+#define GCC_CAMSS_MCLK2_CLK 84
+#define GCC_CAMSS_MICRO_AHB_CLK 85
+#define GCC_CAMSS_TOP_AHB_CLK 86
+#define GCC_CAMSS_VFE0_AHB_CLK 87
+#define GCC_CAMSS_VFE0_AXI_CLK 88
+#define GCC_CAMSS_VFE0_CLK 89
+#define GCC_CAMSS_VFE1_AHB_CLK 90
+#define GCC_CAMSS_VFE1_AXI_CLK 91
+#define GCC_CAMSS_VFE1_CLK 92
+#define GCC_CPP_TBU_CLK 93
+#define GCC_CRYPTO_AHB_CLK 94
+#define GCC_CRYPTO_AXI_CLK 95
+#define GCC_CRYPTO_CLK 96
+#define GCC_DCC_CLK 97
+#define GCC_GFX_TBU_CLK 98
+#define GCC_GFX_TCU_CLK 99
+#define GCC_GP1_CLK 100
+#define GCC_GP2_CLK 101
+#define GCC_GP3_CLK 102
+#define GCC_GTCU_AHB_CLK 103
+#define GCC_JPEG_TBU_CLK 104
+#define GCC_MDP_TBU_CLK 105
+#define GCC_MDSS_AHB_CLK 106
+#define GCC_MDSS_AXI_CLK 107
+#define GCC_MDSS_BYTE0_CLK 108
+#define GCC_MDSS_ESC0_CLK 109
+#define GCC_MDSS_MDP_CLK 110
+#define GCC_MDSS_PCLK0_CLK 111
+#define GCC_MDSS_VSYNC_CLK 112
+#define GCC_MSS_CFG_AHB_CLK 113
+#define GCC_MSS_Q6_BIMC_AXI_CLK 114
+#define GCC_OXILI_AHB_CLK 115
+#define GCC_OXILI_GFX3D_CLK 116
+#define GCC_PDM2_CLK 117
+#define GCC_PDM_AHB_CLK 118
+#define GCC_PRNG_AHB_CLK 119
+#define GCC_QDSS_DAP_CLK 120
+#define GCC_SDCC1_AHB_CLK 121
+#define GCC_SDCC1_APPS_CLK 122
+#define GCC_SDCC1_ICE_CORE_CLK 123
+#define GCC_SDCC2_AHB_CLK 124
+#define GCC_SDCC2_APPS_CLK 125
+#define GCC_SMMU_CFG_CLK 126
+#define GCC_USB2A_PHY_SLEEP_CLK 127
+#define GCC_USB_HS_AHB_CLK 128
+#define GCC_USB_HS_PHY_CFG_AHB_CLK 129
+#define GCC_USB_HS_SYSTEM_CLK 130
+#define GCC_VENUS0_AHB_CLK 131
+#define GCC_VENUS0_AXI_CLK 132
+#define GCC_VENUS0_CORE0_VCODEC0_CLK 133
+#define GCC_VENUS0_VCODEC0_CLK 134
+#define GCC_VENUS_TBU_CLK 135
+#define GCC_VFE1_TBU_CLK 136
+#define GCC_VFE_TBU_CLK 137
+#define GFX3D_CLK_SRC 138
+#define GP1_CLK_SRC 139
+#define GP2_CLK_SRC 140
+#define GP3_CLK_SRC 141
+#define GPLL0 142
+#define GPLL0_EARLY 143
+#define GPLL3 144
+#define GPLL3_EARLY 145
+#define GPLL4 146
+#define GPLL4_EARLY 147
+#define GPLL6 148
+#define GPLL6_EARLY 149
+#define JPEG0_CLK_SRC 150
+#define MCLK0_CLK_SRC 151
+#define MCLK1_CLK_SRC 152
+#define MCLK2_CLK_SRC 153
+#define MDP_CLK_SRC 154
+#define PCLK0_CLK_SRC 155
+#define PDM2_CLK_SRC 156
+#define SDCC1_APPS_CLK_SRC 157
+#define SDCC1_ICE_CORE_CLK_SRC 158
+#define SDCC2_APPS_CLK_SRC 159
+#define USB_HS_SYSTEM_CLK_SRC 160
+#define VCODEC0_CLK_SRC 161
+#define VFE0_CLK_SRC 162
+#define VFE1_CLK_SRC 163
+#define VSYNC_CLK_SRC 164
+#define GPLL0_SLEEP_CLK_SRC 165
+/* Addtional MSM8937-specific clocks */
+#define MSM8937_BLSP1_QUP1_I2C_APPS_CLK_SRC 166
+#define MSM8937_BLSP1_QUP1_SPI_APPS_CLK_SRC 167
+#define MSM8937_BLSP2_QUP4_I2C_APPS_CLK_SRC 168
+#define MSM8937_BLSP2_QUP4_SPI_APPS_CLK_SRC 169
+#define MSM8937_BYTE1_CLK_SRC 170
+#define MSM8937_ESC1_CLK_SRC 171
+#define MSM8937_PCLK1_CLK_SRC 172
+#define MSM8937_GCC_BLSP1_QUP1_I2C_APPS_CLK 173
+#define MSM8937_GCC_BLSP1_QUP1_SPI_APPS_CLK 174
+#define MSM8937_GCC_BLSP2_QUP4_I2C_APPS_CLK 175
+#define MSM8937_GCC_BLSP2_QUP4_SPI_APPS_CLK 176
+#define MSM8937_GCC_MDSS_BYTE1_CLK 177
+#define MSM8937_GCC_MDSS_ESC1_CLK 178
+#define MSM8937_GCC_MDSS_PCLK1_CLK 179
+#define MSM8937_GCC_OXILI_AON_CLK 180
+#define MSM8937_GCC_OXILI_TIMER_CLK 181
+
+/* GCC block resets */
+#define GCC_CAMSS_MICRO_BCR 0
+#define GCC_MSS_BCR 1
+#define GCC_QUSB2_PHY_BCR 2
+#define GCC_USB_HS_BCR 3
+#define GCC_USB2_HS_PHY_ONLY_BCR 4
+
+/* GDSCs */
+#define CPP_GDSC 0
+#define JPEG_GDSC 1
+#define MDSS_GDSC 2
+#define OXILI_GX_GDSC 3
+#define VENUS_CORE0_GDSC 4
+#define VENUS_GDSC 5
+#define VFE0_GDSC 6
+#define VFE1_GDSC 7
+/* Additional MSM8937-specific GDSCs */
+#define MSM8937_OXILI_CX_GDSC 8
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8939.h b/include/dt-bindings/clock/qcom,gcc-msm8939.h
index 0634467c4ce5..9a9bc55b49af 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8939.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8939.h
@@ -192,6 +192,13 @@
#define GCC_VENUS0_CORE0_VCODEC0_CLK 183
#define GCC_VENUS0_CORE1_VCODEC0_CLK 184
#define GCC_OXILI_TIMER_CLK 185
+#define SYSTEM_MM_NOC_BFDCD_CLK_SRC 186
+#define CSI2_CLK_SRC 187
+#define GCC_CAMSS_CSI2_AHB_CLK 188
+#define GCC_CAMSS_CSI2_CLK 189
+#define GCC_CAMSS_CSI2PHY_CLK 190
+#define GCC_CAMSS_CSI2PIX_CLK 191
+#define GCC_CAMSS_CSI2RDI_CLK 192
/* Indexes for GDSCs */
#define BIMC_GDSC 0
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8953.h b/include/dt-bindings/clock/qcom,gcc-msm8953.h
new file mode 100644
index 000000000000..13b4a62877e5
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-msm8953.h
@@ -0,0 +1,238 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_CLK_MSM_GCC_8953_H
+#define _DT_BINDINGS_CLK_MSM_GCC_8953_H
+
+/* Clocks */
+#define APC0_DROOP_DETECTOR_CLK_SRC 0
+#define APC1_DROOP_DETECTOR_CLK_SRC 1
+#define APSS_AHB_CLK_SRC 2
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 3
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 4
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 5
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 6
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 7
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 8
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC 9
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC 10
+#define BLSP1_UART1_APPS_CLK_SRC 11
+#define BLSP1_UART2_APPS_CLK_SRC 12
+#define BLSP2_QUP1_I2C_APPS_CLK_SRC 13
+#define BLSP2_QUP1_SPI_APPS_CLK_SRC 14
+#define BLSP2_QUP2_I2C_APPS_CLK_SRC 15
+#define BLSP2_QUP2_SPI_APPS_CLK_SRC 16
+#define BLSP2_QUP3_I2C_APPS_CLK_SRC 17
+#define BLSP2_QUP3_SPI_APPS_CLK_SRC 18
+#define BLSP2_QUP4_I2C_APPS_CLK_SRC 19
+#define BLSP2_QUP4_SPI_APPS_CLK_SRC 20
+#define BLSP2_UART1_APPS_CLK_SRC 21
+#define BLSP2_UART2_APPS_CLK_SRC 22
+#define BYTE0_CLK_SRC 23
+#define BYTE1_CLK_SRC 24
+#define CAMSS_GP0_CLK_SRC 25
+#define CAMSS_GP1_CLK_SRC 26
+#define CAMSS_TOP_AHB_CLK_SRC 27
+#define CCI_CLK_SRC 28
+#define CPP_CLK_SRC 29
+#define CRYPTO_CLK_SRC 30
+#define CSI0PHYTIMER_CLK_SRC 31
+#define CSI0P_CLK_SRC 32
+#define CSI0_CLK_SRC 33
+#define CSI1PHYTIMER_CLK_SRC 34
+#define CSI1P_CLK_SRC 35
+#define CSI1_CLK_SRC 36
+#define CSI2PHYTIMER_CLK_SRC 37
+#define CSI2P_CLK_SRC 38
+#define CSI2_CLK_SRC 39
+#define ESC0_CLK_SRC 40
+#define ESC1_CLK_SRC 41
+#define GCC_APC0_DROOP_DETECTOR_GPLL0_CLK 42
+#define GCC_APC1_DROOP_DETECTOR_GPLL0_CLK 43
+#define GCC_APSS_AHB_CLK 44
+#define GCC_APSS_AXI_CLK 45
+#define GCC_APSS_TCU_ASYNC_CLK 46
+#define GCC_BIMC_GFX_CLK 47
+#define GCC_BIMC_GPU_CLK 48
+#define GCC_BLSP1_AHB_CLK 49
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 50
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 51
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 52
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 53
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 54
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 55
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 56
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 57
+#define GCC_BLSP1_UART1_APPS_CLK 58
+#define GCC_BLSP1_UART2_APPS_CLK 59
+#define GCC_BLSP2_AHB_CLK 60
+#define GCC_BLSP2_QUP1_I2C_APPS_CLK 61
+#define GCC_BLSP2_QUP1_SPI_APPS_CLK 62
+#define GCC_BLSP2_QUP2_I2C_APPS_CLK 63
+#define GCC_BLSP2_QUP2_SPI_APPS_CLK 64
+#define GCC_BLSP2_QUP3_I2C_APPS_CLK 65
+#define GCC_BLSP2_QUP3_SPI_APPS_CLK 66
+#define GCC_BLSP2_QUP4_I2C_APPS_CLK 67
+#define GCC_BLSP2_QUP4_SPI_APPS_CLK 68
+#define GCC_BLSP2_UART1_APPS_CLK 69
+#define GCC_BLSP2_UART2_APPS_CLK 70
+#define GCC_BOOT_ROM_AHB_CLK 71
+#define GCC_CAMSS_AHB_CLK 72
+#define GCC_CAMSS_CCI_AHB_CLK 73
+#define GCC_CAMSS_CCI_CLK 74
+#define GCC_CAMSS_CPP_AHB_CLK 75
+#define GCC_CAMSS_CPP_AXI_CLK 76
+#define GCC_CAMSS_CPP_CLK 77
+#define GCC_CAMSS_CSI0PHYTIMER_CLK 78
+#define GCC_CAMSS_CSI0PHY_CLK 79
+#define GCC_CAMSS_CSI0PIX_CLK 80
+#define GCC_CAMSS_CSI0RDI_CLK 81
+#define GCC_CAMSS_CSI0_AHB_CLK 82
+#define GCC_CAMSS_CSI0_CLK 83
+#define GCC_CAMSS_CSI0_CSIPHY_3P_CLK 84
+#define GCC_CAMSS_CSI1PHYTIMER_CLK 85
+#define GCC_CAMSS_CSI1PHY_CLK 86
+#define GCC_CAMSS_CSI1PIX_CLK 87
+#define GCC_CAMSS_CSI1RDI_CLK 88
+#define GCC_CAMSS_CSI1_AHB_CLK 89
+#define GCC_CAMSS_CSI1_CLK 90
+#define GCC_CAMSS_CSI1_CSIPHY_3P_CLK 91
+#define GCC_CAMSS_CSI2PHYTIMER_CLK 92
+#define GCC_CAMSS_CSI2PHY_CLK 93
+#define GCC_CAMSS_CSI2PIX_CLK 94
+#define GCC_CAMSS_CSI2RDI_CLK 95
+#define GCC_CAMSS_CSI2_AHB_CLK 96
+#define GCC_CAMSS_CSI2_CLK 97
+#define GCC_CAMSS_CSI2_CSIPHY_3P_CLK 98
+#define GCC_CAMSS_CSI_VFE0_CLK 99
+#define GCC_CAMSS_CSI_VFE1_CLK 100
+#define GCC_CAMSS_GP0_CLK 101
+#define GCC_CAMSS_GP1_CLK 102
+#define GCC_CAMSS_ISPIF_AHB_CLK 103
+#define GCC_CAMSS_JPEG0_CLK 104
+#define GCC_CAMSS_JPEG_AHB_CLK 105
+#define GCC_CAMSS_JPEG_AXI_CLK 106
+#define GCC_CAMSS_MCLK0_CLK 107
+#define GCC_CAMSS_MCLK1_CLK 108
+#define GCC_CAMSS_MCLK2_CLK 109
+#define GCC_CAMSS_MCLK3_CLK 110
+#define GCC_CAMSS_MICRO_AHB_CLK 111
+#define GCC_CAMSS_TOP_AHB_CLK 112
+#define GCC_CAMSS_VFE0_AHB_CLK 113
+#define GCC_CAMSS_VFE0_AXI_CLK 114
+#define GCC_CAMSS_VFE0_CLK 115
+#define GCC_CAMSS_VFE1_AHB_CLK 116
+#define GCC_CAMSS_VFE1_AXI_CLK 117
+#define GCC_CAMSS_VFE1_CLK 118
+#define GCC_CPP_TBU_CLK 119
+#define GCC_CRYPTO_AHB_CLK 120
+#define GCC_CRYPTO_AXI_CLK 121
+#define GCC_CRYPTO_CLK 122
+#define GCC_DCC_CLK 123
+#define GCC_GP1_CLK 124
+#define GCC_GP2_CLK 125
+#define GCC_GP3_CLK 126
+#define GCC_JPEG_TBU_CLK 127
+#define GCC_MDP_TBU_CLK 128
+#define GCC_MDSS_AHB_CLK 129
+#define GCC_MDSS_AXI_CLK 130
+#define GCC_MDSS_BYTE0_CLK 131
+#define GCC_MDSS_BYTE1_CLK 132
+#define GCC_MDSS_ESC0_CLK 133
+#define GCC_MDSS_ESC1_CLK 134
+#define GCC_MDSS_MDP_CLK 135
+#define GCC_MDSS_PCLK0_CLK 136
+#define GCC_MDSS_PCLK1_CLK 137
+#define GCC_MDSS_VSYNC_CLK 138
+#define GCC_MSS_CFG_AHB_CLK 139
+#define GCC_MSS_Q6_BIMC_AXI_CLK 140
+#define GCC_OXILI_AHB_CLK 141
+#define GCC_OXILI_AON_CLK 142
+#define GCC_OXILI_GFX3D_CLK 143
+#define GCC_OXILI_TIMER_CLK 144
+#define GCC_PCNOC_USB3_AXI_CLK 145
+#define GCC_PDM2_CLK 146
+#define GCC_PDM_AHB_CLK 147
+#define GCC_PRNG_AHB_CLK 148
+#define GCC_QDSS_DAP_CLK 149
+#define GCC_QUSB_REF_CLK 150
+#define GCC_RBCPR_GFX_CLK 151
+#define GCC_SDCC1_AHB_CLK 152
+#define GCC_SDCC1_APPS_CLK 153
+#define GCC_SDCC1_ICE_CORE_CLK 154
+#define GCC_SDCC2_AHB_CLK 155
+#define GCC_SDCC2_APPS_CLK 156
+#define GCC_SMMU_CFG_CLK 157
+#define GCC_USB30_MASTER_CLK 158
+#define GCC_USB30_MOCK_UTMI_CLK 159
+#define GCC_USB30_SLEEP_CLK 160
+#define GCC_USB3_AUX_CLK 161
+#define GCC_USB3_PIPE_CLK 162
+#define GCC_USB_PHY_CFG_AHB_CLK 163
+#define GCC_USB_SS_REF_CLK 164
+#define GCC_VENUS0_AHB_CLK 165
+#define GCC_VENUS0_AXI_CLK 166
+#define GCC_VENUS0_CORE0_VCODEC0_CLK 167
+#define GCC_VENUS0_VCODEC0_CLK 168
+#define GCC_VENUS_TBU_CLK 169
+#define GCC_VFE1_TBU_CLK 170
+#define GCC_VFE_TBU_CLK 171
+#define GFX3D_CLK_SRC 172
+#define GP1_CLK_SRC 173
+#define GP2_CLK_SRC 174
+#define GP3_CLK_SRC 175
+#define GPLL0 176
+#define GPLL0_EARLY 177
+#define GPLL2 178
+#define GPLL2_EARLY 179
+#define GPLL3 180
+#define GPLL3_EARLY 181
+#define GPLL4 182
+#define GPLL4_EARLY 183
+#define GPLL6 184
+#define GPLL6_EARLY 185
+#define JPEG0_CLK_SRC 186
+#define MCLK0_CLK_SRC 187
+#define MCLK1_CLK_SRC 188
+#define MCLK2_CLK_SRC 189
+#define MCLK3_CLK_SRC 190
+#define MDP_CLK_SRC 191
+#define PCLK0_CLK_SRC 192
+#define PCLK1_CLK_SRC 193
+#define PDM2_CLK_SRC 194
+#define RBCPR_GFX_CLK_SRC 195
+#define SDCC1_APPS_CLK_SRC 196
+#define SDCC1_ICE_CORE_CLK_SRC 197
+#define SDCC2_APPS_CLK_SRC 198
+#define USB30_MASTER_CLK_SRC 199
+#define USB30_MOCK_UTMI_CLK_SRC 200
+#define USB3_AUX_CLK_SRC 201
+#define VCODEC0_CLK_SRC 202
+#define VFE0_CLK_SRC 203
+#define VFE1_CLK_SRC 204
+#define VSYNC_CLK_SRC 205
+
+/* GCC block resets */
+#define GCC_CAMSS_MICRO_BCR 0
+#define GCC_MSS_BCR 1
+#define GCC_QUSB2_PHY_BCR 2
+#define GCC_USB3PHY_PHY_BCR 3
+#define GCC_USB3_PHY_BCR 4
+#define GCC_USB_30_BCR 5
+#define GCC_MDSS_BCR 6
+#define GCC_CRYPTO_BCR 7
+#define GCC_SDCC1_BCR 8
+#define GCC_SDCC2_BCR 9
+
+/* GDSCs */
+#define CPP_GDSC 0
+#define JPEG_GDSC 1
+#define MDSS_GDSC 2
+#define OXILI_CX_GDSC 3
+#define OXILI_GX_GDSC 4
+#define USB30_GDSC 5
+#define VENUS_CORE0_GDSC 6
+#define VENUS_GDSC 7
+#define VFE0_GDSC 8
+#define VFE1_GDSC 9
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8976.h b/include/dt-bindings/clock/qcom,gcc-msm8976.h
new file mode 100644
index 000000000000..5351f48b2068
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-msm8976.h
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2016-2021, AngeloGioacchino Del Regno
+ * <angelogioacchino.delregno@somainline.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_GCC_8976_H
+#define _DT_BINDINGS_CLK_MSM_GCC_8976_H
+
+#define GPLL0 0
+#define GPLL2 1
+#define GPLL3 2
+#define GPLL4 3
+#define GPLL6 4
+#define GPLL0_CLK_SRC 5
+#define GPLL2_CLK_SRC 6
+#define GPLL3_CLK_SRC 7
+#define GPLL4_CLK_SRC 8
+#define GPLL6_CLK_SRC 9
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 10
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 11
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 12
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 13
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 14
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 15
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 16
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 17
+#define GCC_BLSP1_UART1_APPS_CLK 18
+#define GCC_BLSP1_UART2_APPS_CLK 19
+#define GCC_BLSP2_QUP1_I2C_APPS_CLK 20
+#define GCC_BLSP2_QUP1_SPI_APPS_CLK 21
+#define GCC_BLSP2_QUP2_I2C_APPS_CLK 22
+#define GCC_BLSP2_QUP2_SPI_APPS_CLK 23
+#define GCC_BLSP2_QUP3_I2C_APPS_CLK 24
+#define GCC_BLSP2_QUP3_SPI_APPS_CLK 25
+#define GCC_BLSP2_QUP4_I2C_APPS_CLK 26
+#define GCC_BLSP2_QUP4_SPI_APPS_CLK 27
+#define GCC_BLSP2_UART1_APPS_CLK 28
+#define GCC_BLSP2_UART2_APPS_CLK 29
+#define GCC_CAMSS_CCI_AHB_CLK 30
+#define GCC_CAMSS_CCI_CLK 31
+#define GCC_CAMSS_CPP_AHB_CLK 32
+#define GCC_CAMSS_CPP_AXI_CLK 33
+#define GCC_CAMSS_CPP_CLK 34
+#define GCC_CAMSS_CSI0_AHB_CLK 35
+#define GCC_CAMSS_CSI0_CLK 36
+#define GCC_CAMSS_CSI0PHY_CLK 37
+#define GCC_CAMSS_CSI0PIX_CLK 38
+#define GCC_CAMSS_CSI0RDI_CLK 39
+#define GCC_CAMSS_CSI1_AHB_CLK 40
+#define GCC_CAMSS_CSI1_CLK 41
+#define GCC_CAMSS_CSI1PHY_CLK 42
+#define GCC_CAMSS_CSI1PIX_CLK 43
+#define GCC_CAMSS_CSI1RDI_CLK 44
+#define GCC_CAMSS_CSI2_AHB_CLK 45
+#define GCC_CAMSS_CSI2_CLK 46
+#define GCC_CAMSS_CSI2PHY_CLK 47
+#define GCC_CAMSS_CSI2PIX_CLK 48
+#define GCC_CAMSS_CSI2RDI_CLK 49
+#define GCC_CAMSS_CSI_VFE0_CLK 50
+#define GCC_CAMSS_CSI_VFE1_CLK 51
+#define GCC_CAMSS_GP0_CLK 52
+#define GCC_CAMSS_GP1_CLK 53
+#define GCC_CAMSS_ISPIF_AHB_CLK 54
+#define GCC_CAMSS_JPEG0_CLK 55
+#define GCC_CAMSS_JPEG_AHB_CLK 56
+#define GCC_CAMSS_JPEG_AXI_CLK 57
+#define GCC_CAMSS_MCLK0_CLK 58
+#define GCC_CAMSS_MCLK1_CLK 59
+#define GCC_CAMSS_MCLK2_CLK 60
+#define GCC_CAMSS_MICRO_AHB_CLK 61
+#define GCC_CAMSS_CSI0PHYTIMER_CLK 62
+#define GCC_CAMSS_CSI1PHYTIMER_CLK 63
+#define GCC_CAMSS_AHB_CLK 64
+#define GCC_CAMSS_TOP_AHB_CLK 65
+#define GCC_CAMSS_VFE0_CLK 66
+#define GCC_CAMSS_VFE_AHB_CLK 67
+#define GCC_CAMSS_VFE_AXI_CLK 68
+#define GCC_CAMSS_VFE1_AHB_CLK 69
+#define GCC_CAMSS_VFE1_AXI_CLK 70
+#define GCC_CAMSS_VFE1_CLK 71
+#define GCC_DCC_CLK 72
+#define GCC_GP1_CLK 73
+#define GCC_GP2_CLK 74
+#define GCC_GP3_CLK 75
+#define GCC_MDSS_AHB_CLK 76
+#define GCC_MDSS_AXI_CLK 77
+#define GCC_MDSS_ESC0_CLK 78
+#define GCC_MDSS_ESC1_CLK 79
+#define GCC_MDSS_MDP_CLK 80
+#define GCC_MDSS_VSYNC_CLK 81
+#define GCC_MSS_CFG_AHB_CLK 82
+#define GCC_MSS_Q6_BIMC_AXI_CLK 83
+#define GCC_PDM2_CLK 84
+#define GCC_PRNG_AHB_CLK 85
+#define GCC_PDM_AHB_CLK 86
+#define GCC_RBCPR_GFX_AHB_CLK 87
+#define GCC_RBCPR_GFX_CLK 88
+#define GCC_SDCC1_AHB_CLK 89
+#define GCC_SDCC1_APPS_CLK 90
+#define GCC_SDCC1_ICE_CORE_CLK 91
+#define GCC_SDCC2_AHB_CLK 92
+#define GCC_SDCC2_APPS_CLK 93
+#define GCC_SDCC3_AHB_CLK 94
+#define GCC_SDCC3_APPS_CLK 95
+#define GCC_USB2A_PHY_SLEEP_CLK 96
+#define GCC_USB_HS_PHY_CFG_AHB_CLK 97
+#define GCC_USB_FS_AHB_CLK 98
+#define GCC_USB_FS_IC_CLK 99
+#define GCC_USB_FS_SYSTEM_CLK 100
+#define GCC_USB_HS_AHB_CLK 101
+#define GCC_USB_HS_SYSTEM_CLK 102
+#define GCC_VENUS0_AHB_CLK 103
+#define GCC_VENUS0_AXI_CLK 104
+#define GCC_VENUS0_CORE0_VCODEC0_CLK 105
+#define GCC_VENUS0_CORE1_VCODEC0_CLK 106
+#define GCC_VENUS0_VCODEC0_CLK 107
+#define GCC_APSS_AHB_CLK 108
+#define GCC_APSS_AXI_CLK 109
+#define GCC_BLSP1_AHB_CLK 110
+#define GCC_BLSP2_AHB_CLK 111
+#define GCC_BOOT_ROM_AHB_CLK 112
+#define GCC_CRYPTO_AHB_CLK 113
+#define GCC_CRYPTO_AXI_CLK 114
+#define GCC_CRYPTO_CLK 115
+#define GCC_CPP_TBU_CLK 116
+#define GCC_APSS_TCU_CLK 117
+#define GCC_JPEG_TBU_CLK 118
+#define GCC_MDP_RT_TBU_CLK 119
+#define GCC_MDP_TBU_CLK 120
+#define GCC_SMMU_CFG_CLK 121
+#define GCC_VENUS_1_TBU_CLK 122
+#define GCC_VENUS_TBU_CLK 123
+#define GCC_VFE1_TBU_CLK 124
+#define GCC_VFE_TBU_CLK 125
+#define GCC_APS_0_CLK 126
+#define GCC_APS_1_CLK 127
+#define APS_0_CLK_SRC 128
+#define APS_1_CLK_SRC 129
+#define APSS_AHB_CLK_SRC 130
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 131
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 132
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 133
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 134
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 135
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 136
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC 137
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC 138
+#define BLSP1_UART1_APPS_CLK_SRC 139
+#define BLSP1_UART2_APPS_CLK_SRC 140
+#define BLSP2_QUP1_I2C_APPS_CLK_SRC 141
+#define BLSP2_QUP1_SPI_APPS_CLK_SRC 142
+#define BLSP2_QUP2_I2C_APPS_CLK_SRC 143
+#define BLSP2_QUP2_SPI_APPS_CLK_SRC 144
+#define BLSP2_QUP3_I2C_APPS_CLK_SRC 145
+#define BLSP2_QUP3_SPI_APPS_CLK_SRC 146
+#define BLSP2_QUP4_I2C_APPS_CLK_SRC 147
+#define BLSP2_QUP4_SPI_APPS_CLK_SRC 148
+#define BLSP2_UART1_APPS_CLK_SRC 149
+#define BLSP2_UART2_APPS_CLK_SRC 150
+#define CCI_CLK_SRC 151
+#define CPP_CLK_SRC 152
+#define CSI0_CLK_SRC 153
+#define CSI1_CLK_SRC 154
+#define CSI2_CLK_SRC 155
+#define CAMSS_GP0_CLK_SRC 156
+#define CAMSS_GP1_CLK_SRC 157
+#define JPEG0_CLK_SRC 158
+#define MCLK0_CLK_SRC 159
+#define MCLK1_CLK_SRC 160
+#define MCLK2_CLK_SRC 161
+#define CSI0PHYTIMER_CLK_SRC 162
+#define CSI1PHYTIMER_CLK_SRC 163
+#define CAMSS_TOP_AHB_CLK_SRC 164
+#define VFE0_CLK_SRC 165
+#define VFE1_CLK_SRC 166
+#define CRYPTO_CLK_SRC 167
+#define GP1_CLK_SRC 168
+#define GP2_CLK_SRC 169
+#define GP3_CLK_SRC 170
+#define ESC0_CLK_SRC 171
+#define ESC1_CLK_SRC 172
+#define MDP_CLK_SRC 173
+#define VSYNC_CLK_SRC 174
+#define PDM2_CLK_SRC 175
+#define RBCPR_GFX_CLK_SRC 176
+#define SDCC1_APPS_CLK_SRC 177
+#define SDCC1_ICE_CORE_CLK_SRC 178
+#define SDCC2_APPS_CLK_SRC 179
+#define SDCC3_APPS_CLK_SRC 180
+#define USB_FS_IC_CLK_SRC 181
+#define USB_FS_SYSTEM_CLK_SRC 182
+#define USB_HS_SYSTEM_CLK_SRC 183
+#define VCODEC0_CLK_SRC 184
+#define GCC_MDSS_BYTE0_CLK_SRC 185
+#define GCC_MDSS_BYTE1_CLK_SRC 186
+#define GCC_MDSS_BYTE0_CLK 187
+#define GCC_MDSS_BYTE1_CLK 188
+#define GCC_MDSS_PCLK0_CLK_SRC 189
+#define GCC_MDSS_PCLK1_CLK_SRC 190
+#define GCC_MDSS_PCLK0_CLK 191
+#define GCC_MDSS_PCLK1_CLK 192
+#define GCC_GFX3D_CLK_SRC 193
+#define GCC_GFX3D_OXILI_CLK 194
+#define GCC_GFX3D_BIMC_CLK 195
+#define GCC_GFX3D_OXILI_AHB_CLK 196
+#define GCC_GFX3D_OXILI_AON_CLK 197
+#define GCC_GFX3D_OXILI_GMEM_CLK 198
+#define GCC_GFX3D_OXILI_TIMER_CLK 199
+#define GCC_GFX3D_TBU0_CLK 200
+#define GCC_GFX3D_TBU1_CLK 201
+#define GCC_GFX3D_TCU_CLK 202
+#define GCC_GFX3D_GTCU_AHB_CLK 203
+
+/* GCC block resets */
+#define RST_CAMSS_MICRO_BCR 0
+#define RST_USB_HS_BCR 1
+#define RST_QUSB2_PHY_BCR 2
+#define RST_USB2_HS_PHY_ONLY_BCR 3
+#define RST_USB_HS_PHY_CFG_AHB_BCR 4
+#define RST_USB_FS_BCR 5
+#define RST_CAMSS_CSI1PIX_BCR 6
+#define RST_CAMSS_CSI_VFE1_BCR 7
+#define RST_CAMSS_VFE1_BCR 8
+#define RST_CAMSS_CPP_BCR 9
+#define RST_MSS_BCR 10
+
+/* GDSCs */
+#define VENUS_GDSC 0
+#define VENUS_CORE0_GDSC 1
+#define VENUS_CORE1_GDSC 2
+#define MDSS_GDSC 3
+#define JPEG_GDSC 4
+#define VFE0_GDSC 5
+#define VFE1_GDSC 6
+#define CPP_GDSC 7
+#define OXILI_GX_GDSC 8
+#define OXILI_CX_GDSC 9
+
+#endif /* _DT_BINDINGS_CLK_MSM_GCC_8976_H */
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8994.h b/include/dt-bindings/clock/qcom,gcc-msm8994.h
index 507b8d6effd2..f6836f430bb5 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8994.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8994.h
@@ -148,6 +148,18 @@
#define GCC_USB30_SLEEP_CLK 138
#define GCC_USB_HS_AHB_CLK 139
#define GCC_USB_PHY_CFG_AHB2PHY_CLK 140
+#define CONFIG_NOC_CLK_SRC 141
+#define PERIPH_NOC_CLK_SRC 142
+#define SYSTEM_NOC_CLK_SRC 143
+#define GPLL0_OUT_MMSSCC 144
+#define GPLL0_OUT_MSSCC 145
+#define PCIE_0_PHY_LDO 146
+#define PCIE_1_PHY_LDO 147
+#define UFS_PHY_LDO 148
+#define USB_SS_PHY_LDO 149
+#define GCC_BOOT_ROM_AHB_CLK 150
+#define GCC_PRNG_AHB_CLK 151
+#define GCC_USB3_PHY_PIPE_CLK 152
/* GDSCs */
#define PCIE_GDSC 0
@@ -162,5 +174,6 @@
#define PCIE_PHY_0_RESET 2
#define PCIE_PHY_1_RESET 3
#define QUSB2_PHY_RESET 4
+#define MSS_RESET 5
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8998.h b/include/dt-bindings/clock/qcom,gcc-msm8998.h
index 72c99e486d86..5b0dde080900 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8998.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8998.h
@@ -186,10 +186,22 @@
#define UFS_UNIPRO_CORE_CLK_SRC 177
#define GCC_MMSS_GPLL0_CLK 178
#define HMSS_GPLL0_CLK_SRC 179
+#define GCC_IM_SLEEP 180
+#define AGGRE2_SNOC_NORTH_AXI 181
+#define SSC_XO 182
+#define SSC_CNOC_AHBS_CLK 183
+#define GCC_MMSS_GPLL0_DIV_CLK 184
+#define GCC_GPU_GPLL0_DIV_CLK 185
+#define GCC_GPU_GPLL0_CLK 186
+#define HLOS1_VOTE_LPASS_CORE_SMMU_CLK 187
+#define HLOS1_VOTE_LPASS_ADSP_SMMU_CLK 188
+#define GCC_MSS_Q6_BIMC_AXI_CLK 189
#define PCIE_0_GDSC 0
#define UFS_GDSC 1
#define USB_30_GDSC 2
+#define LPASS_ADSP_GDSC 3
+#define LPASS_CORE_GDSC 4
#define GCC_BLSP1_QUP1_BCR 0
#define GCC_BLSP1_QUP2_BCR 1
diff --git a/include/dt-bindings/clock/qcom,gcc-qcm2290.h b/include/dt-bindings/clock/qcom,gcc-qcm2290.h
new file mode 100644
index 000000000000..8d907035f9e4
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-qcm2290.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_QCM2290_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_QCM2290_H
+
+/* GCC clocks */
+#define GPLL0 0
+#define GPLL0_OUT_AUX2 1
+#define GPLL1 2
+#define GPLL10 3
+#define GPLL11 4
+#define GPLL3 5
+#define GPLL3_OUT_MAIN 6
+#define GPLL4 7
+#define GPLL5 8
+#define GPLL6 9
+#define GPLL6_OUT_MAIN 10
+#define GPLL7 11
+#define GPLL8 12
+#define GPLL8_OUT_MAIN 13
+#define GPLL9 14
+#define GPLL9_OUT_MAIN 15
+#define GCC_AHB2PHY_CSI_CLK 16
+#define GCC_AHB2PHY_USB_CLK 17
+#define GCC_APC_VS_CLK 18
+#define GCC_BIMC_GPU_AXI_CLK 19
+#define GCC_BOOT_ROM_AHB_CLK 20
+#define GCC_CAM_THROTTLE_NRT_CLK 21
+#define GCC_CAM_THROTTLE_RT_CLK 22
+#define GCC_CAMERA_AHB_CLK 23
+#define GCC_CAMERA_XO_CLK 24
+#define GCC_CAMSS_AXI_CLK 25
+#define GCC_CAMSS_AXI_CLK_SRC 26
+#define GCC_CAMSS_CAMNOC_ATB_CLK 27
+#define GCC_CAMSS_CAMNOC_NTS_XO_CLK 28
+#define GCC_CAMSS_CCI_0_CLK 29
+#define GCC_CAMSS_CCI_CLK_SRC 30
+#define GCC_CAMSS_CPHY_0_CLK 31
+#define GCC_CAMSS_CPHY_1_CLK 32
+#define GCC_CAMSS_CSI0PHYTIMER_CLK 33
+#define GCC_CAMSS_CSI0PHYTIMER_CLK_SRC 34
+#define GCC_CAMSS_CSI1PHYTIMER_CLK 35
+#define GCC_CAMSS_CSI1PHYTIMER_CLK_SRC 36
+#define GCC_CAMSS_MCLK0_CLK 37
+#define GCC_CAMSS_MCLK0_CLK_SRC 38
+#define GCC_CAMSS_MCLK1_CLK 39
+#define GCC_CAMSS_MCLK1_CLK_SRC 40
+#define GCC_CAMSS_MCLK2_CLK 41
+#define GCC_CAMSS_MCLK2_CLK_SRC 42
+#define GCC_CAMSS_MCLK3_CLK 43
+#define GCC_CAMSS_MCLK3_CLK_SRC 44
+#define GCC_CAMSS_NRT_AXI_CLK 45
+#define GCC_CAMSS_OPE_AHB_CLK 46
+#define GCC_CAMSS_OPE_AHB_CLK_SRC 47
+#define GCC_CAMSS_OPE_CLK 48
+#define GCC_CAMSS_OPE_CLK_SRC 49
+#define GCC_CAMSS_RT_AXI_CLK 50
+#define GCC_CAMSS_TFE_0_CLK 51
+#define GCC_CAMSS_TFE_0_CLK_SRC 52
+#define GCC_CAMSS_TFE_0_CPHY_RX_CLK 53
+#define GCC_CAMSS_TFE_0_CSID_CLK 54
+#define GCC_CAMSS_TFE_0_CSID_CLK_SRC 55
+#define GCC_CAMSS_TFE_1_CLK 56
+#define GCC_CAMSS_TFE_1_CLK_SRC 57
+#define GCC_CAMSS_TFE_1_CPHY_RX_CLK 58
+#define GCC_CAMSS_TFE_1_CSID_CLK 59
+#define GCC_CAMSS_TFE_1_CSID_CLK_SRC 60
+#define GCC_CAMSS_TFE_CPHY_RX_CLK_SRC 61
+#define GCC_CAMSS_TOP_AHB_CLK 62
+#define GCC_CAMSS_TOP_AHB_CLK_SRC 63
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 64
+#define GCC_CPUSS_AHB_CLK 65
+#define GCC_CPUSS_AHB_CLK_SRC 66
+#define GCC_CPUSS_AHB_POSTDIV_CLK_SRC 67
+#define GCC_CPUSS_GNOC_CLK 68
+#define GCC_CPUSS_THROTTLE_CORE_CLK 69
+#define GCC_CPUSS_THROTTLE_XO_CLK 70
+#define GCC_DISP_AHB_CLK 71
+#define GCC_DISP_GPLL0_CLK_SRC 72
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 73
+#define GCC_DISP_HF_AXI_CLK 74
+#define GCC_DISP_THROTTLE_CORE_CLK 75
+#define GCC_DISP_XO_CLK 76
+#define GCC_GP1_CLK 77
+#define GCC_GP1_CLK_SRC 78
+#define GCC_GP2_CLK 79
+#define GCC_GP2_CLK_SRC 80
+#define GCC_GP3_CLK 81
+#define GCC_GP3_CLK_SRC 82
+#define GCC_GPU_CFG_AHB_CLK 83
+#define GCC_GPU_GPLL0_CLK_SRC 84
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 85
+#define GCC_GPU_IREF_CLK 86
+#define GCC_GPU_MEMNOC_GFX_CLK 87
+#define GCC_GPU_SNOC_DVM_GFX_CLK 88
+#define GCC_GPU_THROTTLE_CORE_CLK 89
+#define GCC_GPU_THROTTLE_XO_CLK 90
+#define GCC_PDM2_CLK 91
+#define GCC_PDM2_CLK_SRC 92
+#define GCC_PDM_AHB_CLK 93
+#define GCC_PDM_XO4_CLK 94
+#define GCC_PWM0_XO512_CLK 95
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 96
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 97
+#define GCC_QMIP_CPUSS_CFG_AHB_CLK 98
+#define GCC_QMIP_DISP_AHB_CLK 99
+#define GCC_QMIP_GPU_CFG_AHB_CLK 100
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 101
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 102
+#define GCC_QUPV3_WRAP0_CORE_CLK 103
+#define GCC_QUPV3_WRAP0_S0_CLK 104
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 105
+#define GCC_QUPV3_WRAP0_S1_CLK 106
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 107
+#define GCC_QUPV3_WRAP0_S2_CLK 108
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 109
+#define GCC_QUPV3_WRAP0_S3_CLK 110
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 111
+#define GCC_QUPV3_WRAP0_S4_CLK 112
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 113
+#define GCC_QUPV3_WRAP0_S5_CLK 114
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 115
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 116
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 117
+#define GCC_SDCC1_AHB_CLK 118
+#define GCC_SDCC1_APPS_CLK 119
+#define GCC_SDCC1_APPS_CLK_SRC 120
+#define GCC_SDCC1_ICE_CORE_CLK 121
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 122
+#define GCC_SDCC2_AHB_CLK 123
+#define GCC_SDCC2_APPS_CLK 124
+#define GCC_SDCC2_APPS_CLK_SRC 125
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 126
+#define GCC_SYS_NOC_USB3_PRIM_AXI_CLK 127
+#define GCC_USB30_PRIM_MASTER_CLK 128
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 129
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 130
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 131
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV 132
+#define GCC_USB30_PRIM_SLEEP_CLK 133
+#define GCC_USB3_PRIM_CLKREF_CLK 134
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 135
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 136
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 137
+#define GCC_VCODEC0_AXI_CLK 138
+#define GCC_VENUS_AHB_CLK 139
+#define GCC_VENUS_CTL_AXI_CLK 140
+#define GCC_VIDEO_AHB_CLK 141
+#define GCC_VIDEO_AXI0_CLK 142
+#define GCC_VIDEO_THROTTLE_CORE_CLK 143
+#define GCC_VIDEO_VCODEC0_SYS_CLK 144
+#define GCC_VIDEO_VENUS_CLK_SRC 145
+#define GCC_VIDEO_VENUS_CTL_CLK 146
+#define GCC_VIDEO_XO_CLK 147
+
+/* GCC resets */
+#define GCC_CAMSS_OPE_BCR 0
+#define GCC_CAMSS_TFE_BCR 1
+#define GCC_CAMSS_TOP_BCR 2
+#define GCC_GPU_BCR 3
+#define GCC_MMSS_BCR 4
+#define GCC_PDM_BCR 5
+#define GCC_QUPV3_WRAPPER_0_BCR 6
+#define GCC_SDCC1_BCR 7
+#define GCC_SDCC2_BCR 8
+#define GCC_USB30_PRIM_BCR 9
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 10
+#define GCC_VCODEC0_BCR 11
+#define GCC_VENUS_BCR 12
+#define GCC_VIDEO_INTERFACE_BCR 13
+#define GCC_QUSB2PHY_PRIM_BCR 14
+#define GCC_USB3_PHY_PRIM_SP0_BCR 15
+#define GCC_USB3PHY_PHY_PRIM_SP0_BCR 16
+
+/* Indexes for GDSCs */
+#define GCC_CAMSS_TOP_GDSC 0
+#define GCC_USB30_PRIM_GDSC 1
+#define GCC_VCODEC0_GDSC 2
+#define GCC_VENUS_GDSC 3
+#define HLOS1_VOTE_TURING_MMU_TBU1_GDSC 4
+#define HLOS1_VOTE_TURING_MMU_TBU0_GDSC 5
+#define HLOS1_VOTE_MM_SNOC_MMU_TBU_RT_GDSC 6
+#define HLOS1_VOTE_MM_SNOC_MMU_TBU_NRT_GDSC 7
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-qcs404.h b/include/dt-bindings/clock/qcom,gcc-qcs404.h
index bc3051543347..126a51898571 100644
--- a/include/dt-bindings/clock/qcom,gcc-qcs404.h
+++ b/include/dt-bindings/clock/qcom,gcc-qcs404.h
@@ -177,4 +177,8 @@
#define GCC_PCIE_0_PIPE_ARES 21
#define GCC_WDSP_RESTART 22
+/* Indexes for GDSCs */
+#define MDSS_GDSC 0
+#define OXILI_GDSC 1
+
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sc7280.h b/include/dt-bindings/clock/qcom,gcc-sc7280.h
index 4394f15111c6..3d5724b79bff 100644
--- a/include/dt-bindings/clock/qcom,gcc-sc7280.h
+++ b/include/dt-bindings/clock/qcom,gcc-sc7280.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
/*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
*/
diff --git a/include/dt-bindings/clock/qcom,gcc-sc8180x.h b/include/dt-bindings/clock/qcom,gcc-sc8180x.h
index e893415ae13d..b9d8438a15ff 100644
--- a/include/dt-bindings/clock/qcom,gcc-sc8180x.h
+++ b/include/dt-bindings/clock/qcom,gcc-sc8180x.h
@@ -246,6 +246,19 @@
#define GCC_PCIE_3_CLKREF_CLK 236
#define GCC_USB3_PRIM_CLKREF_CLK 237
#define GCC_USB3_SEC_CLKREF_CLK 238
+#define GCC_UFS_MEM_CLKREF_EN 239
+#define GCC_UFS_CARD_CLKREF_EN 240
+#define GPLL9 241
+#define GCC_CAMERA_AHB_CLK 242
+#define GCC_CAMERA_XO_CLK 243
+#define GCC_CPUSS_DVM_BUS_CLK 244
+#define GCC_CPUSS_GNOC_CLK 245
+#define GCC_DISP_AHB_CLK 246
+#define GCC_DISP_XO_CLK 247
+#define GCC_GPU_CFG_AHB_CLK 248
+#define GCC_NPU_CFG_AHB_CLK 249
+#define GCC_VIDEO_AHB_CLK 250
+#define GCC_VIDEO_XO_CLK 251
#define GCC_EMAC_BCR 0
#define GCC_GPU_BCR 1
@@ -292,6 +305,10 @@
#define GCC_VIDEO_AXI0_CLK_BCR 42
#define GCC_VIDEO_AXI1_CLK_BCR 43
#define GCC_USB3_DP_PHY_SEC_BCR 44
+#define GCC_USB3_UNIPHY_MP0_BCR 45
+#define GCC_USB3_UNIPHY_MP1_BCR 46
+#define GCC_USB3UNIPHY_PHY_MP0_BCR 47
+#define GCC_USB3UNIPHY_PHY_MP1_BCR 48
/* GCC GDSCRs */
#define EMAC_GDSC 0
diff --git a/include/dt-bindings/clock/qcom,gcc-sc8280xp.h b/include/dt-bindings/clock/qcom,gcc-sc8280xp.h
new file mode 100644
index 000000000000..845491591784
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sc8280xp.h
@@ -0,0 +1,508 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Ltd.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_DIREWOLF_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_DIREWOLF_H
+
+/* GCC clocks */
+#define GCC_GPLL0 0
+#define GCC_GPLL0_OUT_EVEN 1
+#define GCC_GPLL2 2
+#define GCC_GPLL4 3
+#define GCC_GPLL7 4
+#define GCC_GPLL8 5
+#define GCC_GPLL9 6
+#define GCC_AGGRE_NOC_PCIE0_TUNNEL_AXI_CLK 7
+#define GCC_AGGRE_NOC_PCIE1_TUNNEL_AXI_CLK 8
+#define GCC_AGGRE_NOC_PCIE_4_AXI_CLK 9
+#define GCC_AGGRE_NOC_PCIE_SOUTH_SF_AXI_CLK 10
+#define GCC_AGGRE_UFS_CARD_AXI_CLK 11
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 12
+#define GCC_AGGRE_USB3_MP_AXI_CLK 13
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 14
+#define GCC_AGGRE_USB3_SEC_AXI_CLK 15
+#define GCC_AGGRE_USB4_1_AXI_CLK 16
+#define GCC_AGGRE_USB4_AXI_CLK 17
+#define GCC_AGGRE_USB_NOC_AXI_CLK 18
+#define GCC_AGGRE_USB_NOC_NORTH_AXI_CLK 19
+#define GCC_AGGRE_USB_NOC_SOUTH_AXI_CLK 20
+#define GCC_AHB2PHY0_CLK 21
+#define GCC_AHB2PHY2_CLK 22
+#define GCC_BOOT_ROM_AHB_CLK 23
+#define GCC_CAMERA_AHB_CLK 24
+#define GCC_CAMERA_HF_AXI_CLK 25
+#define GCC_CAMERA_SF_AXI_CLK 26
+#define GCC_CAMERA_THROTTLE_NRT_AXI_CLK 27
+#define GCC_CAMERA_THROTTLE_RT_AXI_CLK 28
+#define GCC_CAMERA_THROTTLE_XO_CLK 29
+#define GCC_CAMERA_XO_CLK 30
+#define GCC_CFG_NOC_USB3_MP_AXI_CLK 31
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 32
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 33
+#define GCC_CNOC_PCIE0_TUNNEL_CLK 34
+#define GCC_CNOC_PCIE1_TUNNEL_CLK 35
+#define GCC_CNOC_PCIE4_QX_CLK 36
+#define GCC_DDRSS_GPU_AXI_CLK 37
+#define GCC_DDRSS_PCIE_SF_TBU_CLK 38
+#define GCC_DISP1_AHB_CLK 39
+#define GCC_DISP1_HF_AXI_CLK 40
+#define GCC_DISP1_SF_AXI_CLK 41
+#define GCC_DISP1_THROTTLE_NRT_AXI_CLK 42
+#define GCC_DISP1_THROTTLE_RT_AXI_CLK 43
+#define GCC_DISP1_XO_CLK 44
+#define GCC_DISP_AHB_CLK 45
+#define GCC_DISP_HF_AXI_CLK 46
+#define GCC_DISP_SF_AXI_CLK 47
+#define GCC_DISP_THROTTLE_NRT_AXI_CLK 48
+#define GCC_DISP_THROTTLE_RT_AXI_CLK 49
+#define GCC_DISP_XO_CLK 50
+#define GCC_EMAC0_AXI_CLK 51
+#define GCC_EMAC0_PTP_CLK 52
+#define GCC_EMAC0_PTP_CLK_SRC 53
+#define GCC_EMAC0_RGMII_CLK 54
+#define GCC_EMAC0_RGMII_CLK_SRC 55
+#define GCC_EMAC0_SLV_AHB_CLK 56
+#define GCC_EMAC1_AXI_CLK 57
+#define GCC_EMAC1_PTP_CLK 58
+#define GCC_EMAC1_PTP_CLK_SRC 59
+#define GCC_EMAC1_RGMII_CLK 60
+#define GCC_EMAC1_RGMII_CLK_SRC 61
+#define GCC_EMAC1_SLV_AHB_CLK 62
+#define GCC_GP1_CLK 63
+#define GCC_GP1_CLK_SRC 64
+#define GCC_GP2_CLK 65
+#define GCC_GP2_CLK_SRC 66
+#define GCC_GP3_CLK 67
+#define GCC_GP3_CLK_SRC 68
+#define GCC_GP4_CLK 69
+#define GCC_GP4_CLK_SRC 70
+#define GCC_GP5_CLK 71
+#define GCC_GP5_CLK_SRC 72
+#define GCC_GPU_CFG_AHB_CLK 73
+#define GCC_GPU_GPLL0_CLK_SRC 74
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 75
+#define GCC_GPU_IREF_EN 76
+#define GCC_GPU_MEMNOC_GFX_CLK 77
+#define GCC_GPU_SNOC_DVM_GFX_CLK 78
+#define GCC_GPU_TCU_THROTTLE_AHB_CLK 79
+#define GCC_GPU_TCU_THROTTLE_CLK 80
+#define GCC_PCIE0_PHY_RCHNG_CLK 81
+#define GCC_PCIE1_PHY_RCHNG_CLK 82
+#define GCC_PCIE2A_PHY_RCHNG_CLK 83
+#define GCC_PCIE2B_PHY_RCHNG_CLK 84
+#define GCC_PCIE3A_PHY_RCHNG_CLK 85
+#define GCC_PCIE3B_PHY_RCHNG_CLK 86
+#define GCC_PCIE4_PHY_RCHNG_CLK 87
+#define GCC_PCIE_0_AUX_CLK 88
+#define GCC_PCIE_0_AUX_CLK_SRC 89
+#define GCC_PCIE_0_CFG_AHB_CLK 90
+#define GCC_PCIE_0_MSTR_AXI_CLK 91
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 92
+#define GCC_PCIE_0_PIPE_CLK 93
+#define GCC_PCIE_0_SLV_AXI_CLK 94
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 95
+#define GCC_PCIE_1_AUX_CLK 96
+#define GCC_PCIE_1_AUX_CLK_SRC 97
+#define GCC_PCIE_1_CFG_AHB_CLK 98
+#define GCC_PCIE_1_MSTR_AXI_CLK 99
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 100
+#define GCC_PCIE_1_PIPE_CLK 101
+#define GCC_PCIE_1_SLV_AXI_CLK 102
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 103
+#define GCC_PCIE_2A2B_CLKREF_CLK 104
+#define GCC_PCIE_2A_AUX_CLK 105
+#define GCC_PCIE_2A_AUX_CLK_SRC 106
+#define GCC_PCIE_2A_CFG_AHB_CLK 107
+#define GCC_PCIE_2A_MSTR_AXI_CLK 108
+#define GCC_PCIE_2A_PHY_RCHNG_CLK_SRC 109
+#define GCC_PCIE_2A_PIPE_CLK 110
+#define GCC_PCIE_2A_PIPE_CLK_SRC 111
+#define GCC_PCIE_2A_PIPE_DIV_CLK_SRC 112
+#define GCC_PCIE_2A_PIPEDIV2_CLK 113
+#define GCC_PCIE_2A_SLV_AXI_CLK 114
+#define GCC_PCIE_2A_SLV_Q2A_AXI_CLK 115
+#define GCC_PCIE_2B_AUX_CLK 116
+#define GCC_PCIE_2B_AUX_CLK_SRC 117
+#define GCC_PCIE_2B_CFG_AHB_CLK 118
+#define GCC_PCIE_2B_MSTR_AXI_CLK 119
+#define GCC_PCIE_2B_PHY_RCHNG_CLK_SRC 120
+#define GCC_PCIE_2B_PIPE_CLK 121
+#define GCC_PCIE_2B_PIPE_CLK_SRC 122
+#define GCC_PCIE_2B_PIPE_DIV_CLK_SRC 123
+#define GCC_PCIE_2B_PIPEDIV2_CLK 124
+#define GCC_PCIE_2B_SLV_AXI_CLK 125
+#define GCC_PCIE_2B_SLV_Q2A_AXI_CLK 126
+#define GCC_PCIE_3A3B_CLKREF_CLK 127
+#define GCC_PCIE_3A_AUX_CLK 128
+#define GCC_PCIE_3A_AUX_CLK_SRC 129
+#define GCC_PCIE_3A_CFG_AHB_CLK 130
+#define GCC_PCIE_3A_MSTR_AXI_CLK 131
+#define GCC_PCIE_3A_PHY_RCHNG_CLK_SRC 132
+#define GCC_PCIE_3A_PIPE_CLK 133
+#define GCC_PCIE_3A_PIPE_CLK_SRC 134
+#define GCC_PCIE_3A_PIPE_DIV_CLK_SRC 135
+#define GCC_PCIE_3A_PIPEDIV2_CLK 136
+#define GCC_PCIE_3A_SLV_AXI_CLK 137
+#define GCC_PCIE_3A_SLV_Q2A_AXI_CLK 138
+#define GCC_PCIE_3B_AUX_CLK 139
+#define GCC_PCIE_3B_AUX_CLK_SRC 140
+#define GCC_PCIE_3B_CFG_AHB_CLK 141
+#define GCC_PCIE_3B_MSTR_AXI_CLK 142
+#define GCC_PCIE_3B_PHY_RCHNG_CLK_SRC 143
+#define GCC_PCIE_3B_PIPE_CLK 144
+#define GCC_PCIE_3B_PIPE_CLK_SRC 145
+#define GCC_PCIE_3B_PIPE_DIV_CLK_SRC 146
+#define GCC_PCIE_3B_PIPEDIV2_CLK 147
+#define GCC_PCIE_3B_SLV_AXI_CLK 148
+#define GCC_PCIE_3B_SLV_Q2A_AXI_CLK 149
+#define GCC_PCIE_4_AUX_CLK 150
+#define GCC_PCIE_4_AUX_CLK_SRC 151
+#define GCC_PCIE_4_CFG_AHB_CLK 152
+#define GCC_PCIE_4_CLKREF_CLK 153
+#define GCC_PCIE_4_MSTR_AXI_CLK 154
+#define GCC_PCIE_4_PHY_RCHNG_CLK_SRC 155
+#define GCC_PCIE_4_PIPE_CLK 156
+#define GCC_PCIE_4_PIPE_CLK_SRC 157
+#define GCC_PCIE_4_PIPE_DIV_CLK_SRC 158
+#define GCC_PCIE_4_PIPEDIV2_CLK 159
+#define GCC_PCIE_4_SLV_AXI_CLK 160
+#define GCC_PCIE_4_SLV_Q2A_AXI_CLK 161
+#define GCC_PCIE_RSCC_AHB_CLK 162
+#define GCC_PCIE_RSCC_XO_CLK 163
+#define GCC_PCIE_RSCC_XO_CLK_SRC 164
+#define GCC_PCIE_THROTTLE_CFG_CLK 165
+#define GCC_PDM2_CLK 166
+#define GCC_PDM2_CLK_SRC 167
+#define GCC_PDM_AHB_CLK 168
+#define GCC_PDM_XO4_CLK 169
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 170
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 171
+#define GCC_QMIP_DISP1_AHB_CLK 172
+#define GCC_QMIP_DISP1_ROT_AHB_CLK 173
+#define GCC_QMIP_DISP_AHB_CLK 174
+#define GCC_QMIP_DISP_ROT_AHB_CLK 175
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 176
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 177
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 178
+#define GCC_QUPV3_WRAP0_CORE_CLK 179
+#define GCC_QUPV3_WRAP0_QSPI0_CLK 180
+#define GCC_QUPV3_WRAP0_S0_CLK 181
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 182
+#define GCC_QUPV3_WRAP0_S1_CLK 183
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 184
+#define GCC_QUPV3_WRAP0_S2_CLK 185
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 186
+#define GCC_QUPV3_WRAP0_S3_CLK 187
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 188
+#define GCC_QUPV3_WRAP0_S4_CLK 189
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 190
+#define GCC_QUPV3_WRAP0_S4_DIV_CLK_SRC 191
+#define GCC_QUPV3_WRAP0_S5_CLK 192
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 193
+#define GCC_QUPV3_WRAP0_S6_CLK 194
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 195
+#define GCC_QUPV3_WRAP0_S7_CLK 196
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 197
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 198
+#define GCC_QUPV3_WRAP1_CORE_CLK 199
+#define GCC_QUPV3_WRAP1_QSPI0_CLK 200
+#define GCC_QUPV3_WRAP1_S0_CLK 201
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 202
+#define GCC_QUPV3_WRAP1_S1_CLK 203
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 204
+#define GCC_QUPV3_WRAP1_S2_CLK 205
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 206
+#define GCC_QUPV3_WRAP1_S3_CLK 207
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 208
+#define GCC_QUPV3_WRAP1_S4_CLK 209
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 210
+#define GCC_QUPV3_WRAP1_S4_DIV_CLK_SRC 211
+#define GCC_QUPV3_WRAP1_S5_CLK 212
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 213
+#define GCC_QUPV3_WRAP1_S6_CLK 214
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 215
+#define GCC_QUPV3_WRAP1_S7_CLK 216
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 217
+#define GCC_QUPV3_WRAP2_CORE_2X_CLK 218
+#define GCC_QUPV3_WRAP2_CORE_CLK 219
+#define GCC_QUPV3_WRAP2_QSPI0_CLK 220
+#define GCC_QUPV3_WRAP2_S0_CLK 221
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 222
+#define GCC_QUPV3_WRAP2_S1_CLK 223
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 224
+#define GCC_QUPV3_WRAP2_S2_CLK 225
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 226
+#define GCC_QUPV3_WRAP2_S3_CLK 227
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 228
+#define GCC_QUPV3_WRAP2_S4_CLK 229
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 230
+#define GCC_QUPV3_WRAP2_S4_DIV_CLK_SRC 231
+#define GCC_QUPV3_WRAP2_S5_CLK 232
+#define GCC_QUPV3_WRAP2_S5_CLK_SRC 233
+#define GCC_QUPV3_WRAP2_S6_CLK 234
+#define GCC_QUPV3_WRAP2_S6_CLK_SRC 235
+#define GCC_QUPV3_WRAP2_S7_CLK 236
+#define GCC_QUPV3_WRAP2_S7_CLK_SRC 237
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 238
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 239
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 240
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 241
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 242
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 243
+#define GCC_SDCC2_AHB_CLK 244
+#define GCC_SDCC2_APPS_CLK 245
+#define GCC_SDCC2_APPS_CLK_SRC 246
+#define GCC_SDCC4_AHB_CLK 247
+#define GCC_SDCC4_APPS_CLK 248
+#define GCC_SDCC4_APPS_CLK_SRC 249
+#define GCC_SYS_NOC_USB_AXI_CLK 250
+#define GCC_UFS_1_CARD_CLKREF_CLK 251
+#define GCC_UFS_CARD_AHB_CLK 252
+#define GCC_UFS_CARD_AXI_CLK 253
+#define GCC_UFS_CARD_AXI_CLK_SRC 254
+#define GCC_UFS_CARD_CLKREF_CLK 255
+#define GCC_UFS_CARD_ICE_CORE_CLK 256
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 257
+#define GCC_UFS_CARD_PHY_AUX_CLK 258
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 259
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 260
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK_SRC 261
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 262
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK_SRC 263
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 264
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK_SRC 265
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK 266
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 267
+#define GCC_UFS_PHY_AHB_CLK 268
+#define GCC_UFS_PHY_AXI_CLK 269
+#define GCC_UFS_PHY_AXI_CLK_SRC 270
+#define GCC_UFS_PHY_ICE_CORE_CLK 271
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 272
+#define GCC_UFS_PHY_PHY_AUX_CLK 273
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 274
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 275
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 276
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 277
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 278
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 279
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 280
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 281
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 282
+#define GCC_UFS_REF_CLKREF_CLK 283
+#define GCC_USB2_HS0_CLKREF_CLK 284
+#define GCC_USB2_HS1_CLKREF_CLK 285
+#define GCC_USB2_HS2_CLKREF_CLK 286
+#define GCC_USB2_HS3_CLKREF_CLK 287
+#define GCC_USB30_MP_MASTER_CLK 288
+#define GCC_USB30_MP_MASTER_CLK_SRC 289
+#define GCC_USB30_MP_MOCK_UTMI_CLK 290
+#define GCC_USB30_MP_MOCK_UTMI_CLK_SRC 291
+#define GCC_USB30_MP_MOCK_UTMI_POSTDIV_CLK_SRC 292
+#define GCC_USB30_MP_SLEEP_CLK 293
+#define GCC_USB30_PRIM_MASTER_CLK 294
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 295
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 296
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 297
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 298
+#define GCC_USB30_PRIM_SLEEP_CLK 299
+#define GCC_USB30_SEC_MASTER_CLK 300
+#define GCC_USB30_SEC_MASTER_CLK_SRC 301
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 302
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 303
+#define GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC 304
+#define GCC_USB30_SEC_SLEEP_CLK 305
+#define GCC_USB34_PRIM_PHY_PIPE_CLK_SRC 306
+#define GCC_USB34_SEC_PHY_PIPE_CLK_SRC 307
+#define GCC_USB3_MP0_CLKREF_CLK 308
+#define GCC_USB3_MP1_CLKREF_CLK 309
+#define GCC_USB3_MP_PHY_AUX_CLK 310
+#define GCC_USB3_MP_PHY_AUX_CLK_SRC 311
+#define GCC_USB3_MP_PHY_COM_AUX_CLK 312
+#define GCC_USB3_MP_PHY_PIPE_0_CLK 313
+#define GCC_USB3_MP_PHY_PIPE_0_CLK_SRC 314
+#define GCC_USB3_MP_PHY_PIPE_1_CLK 315
+#define GCC_USB3_MP_PHY_PIPE_1_CLK_SRC 316
+#define GCC_USB3_PRIM_PHY_AUX_CLK 317
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 318
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 319
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 320
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 321
+#define GCC_USB3_SEC_PHY_AUX_CLK 322
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 323
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 324
+#define GCC_USB3_SEC_PHY_PIPE_CLK 325
+#define GCC_USB3_SEC_PHY_PIPE_CLK_SRC 326
+#define GCC_USB4_1_CFG_AHB_CLK 327
+#define GCC_USB4_1_DP_CLK 328
+#define GCC_USB4_1_MASTER_CLK 329
+#define GCC_USB4_1_MASTER_CLK_SRC 330
+#define GCC_USB4_1_PHY_DP_CLK_SRC 331
+#define GCC_USB4_1_PHY_P2RR2P_PIPE_CLK 332
+#define GCC_USB4_1_PHY_P2RR2P_PIPE_CLK_SRC 333
+#define GCC_USB4_1_PHY_PCIE_PIPE_CLK 334
+#define GCC_USB4_1_PHY_PCIE_PIPE_CLK_SRC 335
+#define GCC_USB4_1_PHY_PCIE_PIPE_MUX_CLK_SRC 336
+#define GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC 337
+#define GCC_USB4_1_PHY_RX0_CLK 338
+#define GCC_USB4_1_PHY_RX0_CLK_SRC 339
+#define GCC_USB4_1_PHY_RX1_CLK 340
+#define GCC_USB4_1_PHY_RX1_CLK_SRC 341
+#define GCC_USB4_1_PHY_SYS_CLK_SRC 342
+#define GCC_USB4_1_PHY_USB_PIPE_CLK 343
+#define GCC_USB4_1_SB_IF_CLK 344
+#define GCC_USB4_1_SB_IF_CLK_SRC 345
+#define GCC_USB4_1_SYS_CLK 346
+#define GCC_USB4_1_TMU_CLK 347
+#define GCC_USB4_1_TMU_CLK_SRC 348
+#define GCC_USB4_CFG_AHB_CLK 349
+#define GCC_USB4_CLKREF_CLK 350
+#define GCC_USB4_DP_CLK 351
+#define GCC_USB4_EUD_CLKREF_CLK 352
+#define GCC_USB4_MASTER_CLK 353
+#define GCC_USB4_MASTER_CLK_SRC 354
+#define GCC_USB4_PHY_DP_CLK_SRC 355
+#define GCC_USB4_PHY_P2RR2P_PIPE_CLK 356
+#define GCC_USB4_PHY_P2RR2P_PIPE_CLK_SRC 357
+#define GCC_USB4_PHY_PCIE_PIPE_CLK 358
+#define GCC_USB4_PHY_PCIE_PIPE_CLK_SRC 359
+#define GCC_USB4_PHY_PCIE_PIPE_MUX_CLK_SRC 360
+#define GCC_USB4_PHY_PCIE_PIPEGMUX_CLK_SRC 361
+#define GCC_USB4_PHY_RX0_CLK 362
+#define GCC_USB4_PHY_RX0_CLK_SRC 363
+#define GCC_USB4_PHY_RX1_CLK 364
+#define GCC_USB4_PHY_RX1_CLK_SRC 365
+#define GCC_USB4_PHY_SYS_CLK_SRC 366
+#define GCC_USB4_PHY_USB_PIPE_CLK 367
+#define GCC_USB4_SB_IF_CLK 368
+#define GCC_USB4_SB_IF_CLK_SRC 369
+#define GCC_USB4_SYS_CLK 370
+#define GCC_USB4_TMU_CLK 371
+#define GCC_USB4_TMU_CLK_SRC 372
+#define GCC_VIDEO_AHB_CLK 373
+#define GCC_VIDEO_AXI0_CLK 374
+#define GCC_VIDEO_AXI1_CLK 375
+#define GCC_VIDEO_CVP_THROTTLE_CLK 376
+#define GCC_VIDEO_VCODEC_THROTTLE_CLK 377
+#define GCC_VIDEO_XO_CLK 378
+#define GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK 379
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 380
+#define GCC_UFS_CARD_AXI_HW_CTL_CLK 381
+#define GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK 382
+#define GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK 383
+#define GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK 384
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 385
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 386
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 387
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 388
+
+/* GCC resets */
+#define GCC_EMAC0_BCR 0
+#define GCC_EMAC1_BCR 1
+#define GCC_PCIE_0_LINK_DOWN_BCR 2
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 3
+#define GCC_PCIE_0_PHY_BCR 4
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 5
+#define GCC_PCIE_0_TUNNEL_BCR 6
+#define GCC_PCIE_1_LINK_DOWN_BCR 7
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 8
+#define GCC_PCIE_1_PHY_BCR 9
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 10
+#define GCC_PCIE_1_TUNNEL_BCR 11
+#define GCC_PCIE_2A_BCR 12
+#define GCC_PCIE_2A_LINK_DOWN_BCR 13
+#define GCC_PCIE_2A_NOCSR_COM_PHY_BCR 14
+#define GCC_PCIE_2A_PHY_BCR 15
+#define GCC_PCIE_2A_PHY_NOCSR_COM_PHY_BCR 16
+#define GCC_PCIE_2B_BCR 17
+#define GCC_PCIE_2B_LINK_DOWN_BCR 18
+#define GCC_PCIE_2B_NOCSR_COM_PHY_BCR 19
+#define GCC_PCIE_2B_PHY_BCR 20
+#define GCC_PCIE_2B_PHY_NOCSR_COM_PHY_BCR 21
+#define GCC_PCIE_3A_BCR 22
+#define GCC_PCIE_3A_LINK_DOWN_BCR 23
+#define GCC_PCIE_3A_NOCSR_COM_PHY_BCR 24
+#define GCC_PCIE_3A_PHY_BCR 25
+#define GCC_PCIE_3A_PHY_NOCSR_COM_PHY_BCR 26
+#define GCC_PCIE_3B_BCR 27
+#define GCC_PCIE_3B_LINK_DOWN_BCR 28
+#define GCC_PCIE_3B_NOCSR_COM_PHY_BCR 29
+#define GCC_PCIE_3B_PHY_BCR 30
+#define GCC_PCIE_3B_PHY_NOCSR_COM_PHY_BCR 31
+#define GCC_PCIE_4_BCR 32
+#define GCC_PCIE_4_LINK_DOWN_BCR 33
+#define GCC_PCIE_4_NOCSR_COM_PHY_BCR 34
+#define GCC_PCIE_4_PHY_BCR 35
+#define GCC_PCIE_4_PHY_NOCSR_COM_PHY_BCR 36
+#define GCC_PCIE_PHY_CFG_AHB_BCR 37
+#define GCC_PCIE_PHY_COM_BCR 38
+#define GCC_PCIE_RSCC_BCR 39
+#define GCC_QUSB2PHY_HS0_MP_BCR 40
+#define GCC_QUSB2PHY_HS1_MP_BCR 41
+#define GCC_QUSB2PHY_HS2_MP_BCR 42
+#define GCC_QUSB2PHY_HS3_MP_BCR 43
+#define GCC_QUSB2PHY_PRIM_BCR 44
+#define GCC_QUSB2PHY_SEC_BCR 45
+#define GCC_SDCC2_BCR 46
+#define GCC_SDCC4_BCR 47
+#define GCC_UFS_CARD_BCR 48
+#define GCC_UFS_PHY_BCR 49
+#define GCC_USB2_PHY_PRIM_BCR 50
+#define GCC_USB2_PHY_SEC_BCR 51
+#define GCC_USB30_MP_BCR 52
+#define GCC_USB30_PRIM_BCR 53
+#define GCC_USB30_SEC_BCR 54
+#define GCC_USB3_DP_PHY_PRIM_BCR 55
+#define GCC_USB3_DP_PHY_SEC_BCR 56
+#define GCC_USB3_PHY_PRIM_BCR 57
+#define GCC_USB3_PHY_SEC_BCR 58
+#define GCC_USB3_UNIPHY_MP0_BCR 59
+#define GCC_USB3_UNIPHY_MP1_BCR 60
+#define GCC_USB3PHY_PHY_PRIM_BCR 61
+#define GCC_USB3PHY_PHY_SEC_BCR 62
+#define GCC_USB3UNIPHY_PHY_MP0_BCR 63
+#define GCC_USB3UNIPHY_PHY_MP1_BCR 64
+#define GCC_USB4_1_BCR 65
+#define GCC_USB4_1_DP_PHY_PRIM_BCR 66
+#define GCC_USB4_1_DPPHY_AUX_BCR 67
+#define GCC_USB4_1_PHY_PRIM_BCR 68
+#define GCC_USB4_BCR 69
+#define GCC_USB4_DP_PHY_PRIM_BCR 70
+#define GCC_USB4_DPPHY_AUX_BCR 71
+#define GCC_USB4_PHY_PRIM_BCR 72
+#define GCC_USB4PHY_1_PHY_PRIM_BCR 73
+#define GCC_USB4PHY_PHY_PRIM_BCR 74
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 75
+#define GCC_VIDEO_BCR 76
+#define GCC_VIDEO_AXI0_CLK_ARES 77
+#define GCC_VIDEO_AXI1_CLK_ARES 78
+
+/* GCC GDSCs */
+#define PCIE_0_TUNNEL_GDSC 0
+#define PCIE_1_TUNNEL_GDSC 1
+#define PCIE_2A_GDSC 2
+#define PCIE_2B_GDSC 3
+#define PCIE_3A_GDSC 4
+#define PCIE_3B_GDSC 5
+#define PCIE_4_GDSC 6
+#define UFS_CARD_GDSC 7
+#define UFS_PHY_GDSC 8
+#define USB30_MP_GDSC 9
+#define USB30_PRIM_GDSC 10
+#define USB30_SEC_GDSC 11
+#define EMAC_0_GDSC 12
+#define EMAC_1_GDSC 13
+#define USB4_1_GDSC 14
+#define USB4_GDSC 15
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC 16
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC 17
+#define HLOS1_VOTE_MMNOC_MMU_TBU_SF0_GDSC 18
+#define HLOS1_VOTE_MMNOC_MMU_TBU_SF1_GDSC 19
+#define HLOS1_VOTE_TURING_MMU_TBU0_GDSC 20
+#define HLOS1_VOTE_TURING_MMU_TBU1_GDSC 21
+#define HLOS1_VOTE_TURING_MMU_TBU2_GDSC 22
+#define HLOS1_VOTE_TURING_MMU_TBU3_GDSC 23
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm660.h b/include/dt-bindings/clock/qcom,gcc-sdm660.h
index df8a6f3d367e..f19018b742f5 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm660.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm660.h
@@ -138,10 +138,16 @@
#define GCC_UFS_UNIPRO_CORE_HW_CTL_CLK 128
#define GCC_RX0_USB2_CLKREF_CLK 129
#define GCC_RX1_USB2_CLKREF_CLK 130
+#define GCC_HLOS1_VOTE_LPASS_ADSP_SMMU_CLK 131
+#define GCC_HLOS1_VOTE_TURING_ADSP_SMMU_CLK 132
+#define GCC_HLOS2_VOTE_TURING_ADSP_SMMU_CLK 133
#define PCIE_0_GDSC 0
#define UFS_GDSC 1
#define USB_30_GDSC 2
+#define HLOS1_VOTE_TURING_ADSP_GDSC 3
+#define HLOS2_VOTE_TURING_ADSP_GDSC 4
+#define HLOS1_VOTE_LPASS_ADSP_GDSC 5
#define GCC_QUSB2PHY_PRIM_BCR 0
#define GCC_QUSB2PHY_SEC_BCR 1
@@ -153,5 +159,7 @@
#define GCC_USB_30_BCR 7
#define GCC_USB_PHY_CFG_AHB2PHY_BCR 8
#define GCC_MSS_RESTART 9
+#define GCC_SDCC1_BCR 10
+#define GCC_SDCC2_BCR 11
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
index 968fa65b9c42..d78b899263a2 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -199,6 +199,7 @@
#define GCC_QSPI_CNOC_PERIPH_AHB_CLK 189
#define GCC_LPASS_Q6_AXI_CLK 190
#define GCC_LPASS_SWAY_CLK 191
+#define GPLL6 192
/* GCC Resets */
#define GCC_MMSS_BCR 0
diff --git a/include/dt-bindings/clock/qcom,gcc-sdx65.h b/include/dt-bindings/clock/qcom,gcc-sdx65.h
new file mode 100644
index 000000000000..75ecc9237d8f
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sdx65.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SDX65_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SDX65_H
+
+/* GCC clocks */
+#define GPLL0 0
+#define GPLL0_OUT_EVEN 1
+#define GCC_AHB_PCIE_LINK_CLK 2
+#define GCC_BLSP1_AHB_CLK 3
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 4
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK_SRC 5
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 6
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK_SRC 7
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 8
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK_SRC 9
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 10
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK_SRC 11
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 12
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK_SRC 13
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 14
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK_SRC 15
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 16
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK_SRC 17
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 18
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK_SRC 19
+#define GCC_BLSP1_SLEEP_CLK 20
+#define GCC_BLSP1_UART1_APPS_CLK 21
+#define GCC_BLSP1_UART1_APPS_CLK_SRC 22
+#define GCC_BLSP1_UART2_APPS_CLK 23
+#define GCC_BLSP1_UART2_APPS_CLK_SRC 24
+#define GCC_BLSP1_UART3_APPS_CLK 25
+#define GCC_BLSP1_UART3_APPS_CLK_SRC 26
+#define GCC_BLSP1_UART4_APPS_CLK 27
+#define GCC_BLSP1_UART4_APPS_CLK_SRC 28
+#define GCC_BOOT_ROM_AHB_CLK 29
+#define GCC_CPUSS_AHB_CLK 30
+#define GCC_CPUSS_AHB_CLK_SRC 31
+#define GCC_CPUSS_AHB_POSTDIV_CLK_SRC 32
+#define GCC_CPUSS_GNOC_CLK 33
+#define GCC_GP1_CLK 34
+#define GCC_GP1_CLK_SRC 35
+#define GCC_GP2_CLK 36
+#define GCC_GP2_CLK_SRC 37
+#define GCC_GP3_CLK 38
+#define GCC_GP3_CLK_SRC 39
+#define GCC_PCIE_0_CLKREF_EN 40
+#define GCC_PCIE_AUX_CLK 41
+#define GCC_PCIE_AUX_CLK_SRC 42
+#define GCC_PCIE_AUX_PHY_CLK_SRC 43
+#define GCC_PCIE_CFG_AHB_CLK 44
+#define GCC_PCIE_MSTR_AXI_CLK 45
+#define GCC_PCIE_PIPE_CLK 46
+#define GCC_PCIE_PIPE_CLK_SRC 47
+#define GCC_PCIE_RCHNG_PHY_CLK 48
+#define GCC_PCIE_RCHNG_PHY_CLK_SRC 49
+#define GCC_PCIE_SLEEP_CLK 50
+#define GCC_PCIE_SLV_AXI_CLK 51
+#define GCC_PCIE_SLV_Q2A_AXI_CLK 52
+#define GCC_PDM2_CLK 53
+#define GCC_PDM2_CLK_SRC 54
+#define GCC_PDM_AHB_CLK 55
+#define GCC_PDM_XO4_CLK 56
+#define GCC_RX1_USB2_CLKREF_EN 57
+#define GCC_SDCC1_AHB_CLK 58
+#define GCC_SDCC1_APPS_CLK 59
+#define GCC_SDCC1_APPS_CLK_SRC 60
+#define GCC_SPMI_FETCHER_AHB_CLK 61
+#define GCC_SPMI_FETCHER_CLK 62
+#define GCC_SPMI_FETCHER_CLK_SRC 63
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 64
+#define GCC_USB30_MASTER_CLK 65
+#define GCC_USB30_MASTER_CLK_SRC 66
+#define GCC_USB30_MOCK_UTMI_CLK 67
+#define GCC_USB30_MOCK_UTMI_CLK_SRC 68
+#define GCC_USB30_MOCK_UTMI_POSTDIV_CLK_SRC 69
+#define GCC_USB30_MSTR_AXI_CLK 70
+#define GCC_USB30_SLEEP_CLK 71
+#define GCC_USB30_SLV_AHB_CLK 72
+#define GCC_USB3_PHY_AUX_CLK 73
+#define GCC_USB3_PHY_AUX_CLK_SRC 74
+#define GCC_USB3_PHY_PIPE_CLK 75
+#define GCC_USB3_PHY_PIPE_CLK_SRC 76
+#define GCC_USB3_PRIM_CLKREF_EN 77
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 78
+#define GCC_XO_DIV4_CLK 79
+#define GCC_XO_PCIE_LINK_CLK 80
+
+/* GCC resets */
+#define GCC_BLSP1_QUP1_BCR 0
+#define GCC_BLSP1_QUP2_BCR 1
+#define GCC_BLSP1_QUP3_BCR 2
+#define GCC_BLSP1_QUP4_BCR 3
+#define GCC_BLSP1_UART1_BCR 4
+#define GCC_BLSP1_UART2_BCR 5
+#define GCC_BLSP1_UART3_BCR 6
+#define GCC_BLSP1_UART4_BCR 7
+#define GCC_PCIE_BCR 8
+#define GCC_PCIE_LINK_DOWN_BCR 9
+#define GCC_PCIE_NOCSR_COM_PHY_BCR 10
+#define GCC_PCIE_PHY_BCR 11
+#define GCC_PCIE_PHY_CFG_AHB_BCR 12
+#define GCC_PCIE_PHY_COM_BCR 13
+#define GCC_PCIE_PHY_NOCSR_COM_PHY_BCR 14
+#define GCC_PDM_BCR 15
+#define GCC_QUSB2PHY_BCR 16
+#define GCC_SDCC1_BCR 17
+#define GCC_SPMI_FETCHER_BCR 18
+#define GCC_TCSR_PCIE_BCR 19
+#define GCC_USB30_BCR 20
+#define GCC_USB3_PHY_BCR 21
+#define GCC_USB3PHY_PHY_BCR 22
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 23
+
+/* GCC power domains */
+#define USB30_GDSC 0
+#define PCIE_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sm6115.h b/include/dt-bindings/clock/qcom,gcc-sm6115.h
new file mode 100644
index 000000000000..b91a7b460433
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sm6115.h
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM6115_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SM6115_H
+
+/* GCC clocks */
+#define GPLL0 0
+#define GPLL0_OUT_AUX2 1
+#define GPLL0_OUT_MAIN 2
+#define GPLL10 3
+#define GPLL10_OUT_MAIN 4
+#define GPLL11 5
+#define GPLL11_OUT_MAIN 6
+#define GPLL3 7
+#define GPLL4 8
+#define GPLL4_OUT_MAIN 9
+#define GPLL6 10
+#define GPLL6_OUT_MAIN 11
+#define GPLL7 12
+#define GPLL7_OUT_MAIN 13
+#define GPLL8 14
+#define GPLL8_OUT_MAIN 15
+#define GPLL9 16
+#define GPLL9_OUT_MAIN 17
+#define GCC_CAMSS_CSI0PHYTIMER_CLK 18
+#define GCC_CAMSS_CSI0PHYTIMER_CLK_SRC 19
+#define GCC_CAMSS_CSI1PHYTIMER_CLK 20
+#define GCC_CAMSS_CSI1PHYTIMER_CLK_SRC 21
+#define GCC_CAMSS_CSI2PHYTIMER_CLK 22
+#define GCC_CAMSS_CSI2PHYTIMER_CLK_SRC 23
+#define GCC_CAMSS_MCLK0_CLK 24
+#define GCC_CAMSS_MCLK0_CLK_SRC 25
+#define GCC_CAMSS_MCLK1_CLK 26
+#define GCC_CAMSS_MCLK1_CLK_SRC 27
+#define GCC_CAMSS_MCLK2_CLK 28
+#define GCC_CAMSS_MCLK2_CLK_SRC 29
+#define GCC_CAMSS_MCLK3_CLK 30
+#define GCC_CAMSS_MCLK3_CLK_SRC 31
+#define GCC_CAMSS_NRT_AXI_CLK 32
+#define GCC_CAMSS_OPE_AHB_CLK 33
+#define GCC_CAMSS_OPE_AHB_CLK_SRC 34
+#define GCC_CAMSS_OPE_CLK 35
+#define GCC_CAMSS_OPE_CLK_SRC 36
+#define GCC_CAMSS_RT_AXI_CLK 37
+#define GCC_CAMSS_TFE_0_CLK 38
+#define GCC_CAMSS_TFE_0_CLK_SRC 39
+#define GCC_CAMSS_TFE_0_CPHY_RX_CLK 40
+#define GCC_CAMSS_TFE_0_CSID_CLK 41
+#define GCC_CAMSS_TFE_0_CSID_CLK_SRC 42
+#define GCC_CAMSS_TFE_1_CLK 43
+#define GCC_CAMSS_TFE_1_CLK_SRC 44
+#define GCC_CAMSS_TFE_1_CPHY_RX_CLK 45
+#define GCC_CAMSS_TFE_1_CSID_CLK 46
+#define GCC_CAMSS_TFE_1_CSID_CLK_SRC 47
+#define GCC_CAMSS_TFE_2_CLK 48
+#define GCC_CAMSS_TFE_2_CLK_SRC 49
+#define GCC_CAMSS_TFE_2_CPHY_RX_CLK 50
+#define GCC_CAMSS_TFE_2_CSID_CLK 51
+#define GCC_CAMSS_TFE_2_CSID_CLK_SRC 52
+#define GCC_CAMSS_TFE_CPHY_RX_CLK_SRC 53
+#define GCC_CAMSS_TOP_AHB_CLK 54
+#define GCC_CAMSS_TOP_AHB_CLK_SRC 55
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 56
+#define GCC_CPUSS_AHB_CLK 57
+#define GCC_CPUSS_GNOC_CLK 60
+#define GCC_DISP_AHB_CLK 61
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 62
+#define GCC_DISP_HF_AXI_CLK 63
+#define GCC_DISP_THROTTLE_CORE_CLK 64
+#define GCC_DISP_XO_CLK 65
+#define GCC_GP1_CLK 66
+#define GCC_GP1_CLK_SRC 67
+#define GCC_GP2_CLK 68
+#define GCC_GP2_CLK_SRC 69
+#define GCC_GP3_CLK 70
+#define GCC_GP3_CLK_SRC 71
+#define GCC_GPU_CFG_AHB_CLK 72
+#define GCC_GPU_GPLL0_CLK_SRC 73
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 74
+#define GCC_GPU_IREF_CLK 75
+#define GCC_GPU_MEMNOC_GFX_CLK 76
+#define GCC_GPU_SNOC_DVM_GFX_CLK 77
+#define GCC_GPU_THROTTLE_CORE_CLK 78
+#define GCC_GPU_THROTTLE_XO_CLK 79
+#define GCC_PDM2_CLK 80
+#define GCC_PDM2_CLK_SRC 81
+#define GCC_PDM_AHB_CLK 82
+#define GCC_PDM_XO4_CLK 83
+#define GCC_PRNG_AHB_CLK 84
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 85
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 86
+#define GCC_QMIP_DISP_AHB_CLK 87
+#define GCC_QMIP_GPU_CFG_AHB_CLK 88
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 89
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 90
+#define GCC_QUPV3_WRAP0_CORE_CLK 91
+#define GCC_QUPV3_WRAP0_S0_CLK 92
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 93
+#define GCC_QUPV3_WRAP0_S1_CLK 94
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 95
+#define GCC_QUPV3_WRAP0_S2_CLK 96
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 97
+#define GCC_QUPV3_WRAP0_S3_CLK 98
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 99
+#define GCC_QUPV3_WRAP0_S4_CLK 100
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 101
+#define GCC_QUPV3_WRAP0_S5_CLK 102
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 103
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 104
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 105
+#define GCC_SDCC1_AHB_CLK 106
+#define GCC_SDCC1_APPS_CLK 107
+#define GCC_SDCC1_APPS_CLK_SRC 108
+#define GCC_SDCC1_ICE_CORE_CLK 109
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 110
+#define GCC_SDCC2_AHB_CLK 111
+#define GCC_SDCC2_APPS_CLK 112
+#define GCC_SDCC2_APPS_CLK_SRC 113
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 114
+#define GCC_SYS_NOC_UFS_PHY_AXI_CLK 115
+#define GCC_SYS_NOC_USB3_PRIM_AXI_CLK 116
+#define GCC_UFS_PHY_AHB_CLK 117
+#define GCC_UFS_PHY_AXI_CLK 118
+#define GCC_UFS_PHY_AXI_CLK_SRC 119
+#define GCC_UFS_PHY_ICE_CORE_CLK 120
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 121
+#define GCC_UFS_PHY_PHY_AUX_CLK 122
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 123
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 124
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 125
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 126
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 127
+#define GCC_USB30_PRIM_MASTER_CLK 128
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 129
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 130
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 131
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 132
+#define GCC_USB30_PRIM_SLEEP_CLK 133
+#define GCC_USB3_PRIM_CLKREF_CLK 134
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 135
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 136
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 137
+#define GCC_VCODEC0_AXI_CLK 138
+#define GCC_VENUS_AHB_CLK 139
+#define GCC_VENUS_CTL_AXI_CLK 140
+#define GCC_VIDEO_AHB_CLK 141
+#define GCC_VIDEO_AXI0_CLK 142
+#define GCC_VIDEO_THROTTLE_CORE_CLK 143
+#define GCC_VIDEO_VCODEC0_SYS_CLK 144
+#define GCC_VIDEO_VENUS_CLK_SRC 145
+#define GCC_VIDEO_VENUS_CTL_CLK 146
+#define GCC_VIDEO_XO_CLK 147
+#define GCC_AHB2PHY_CSI_CLK 148
+#define GCC_AHB2PHY_USB_CLK 149
+#define GCC_BIMC_GPU_AXI_CLK 150
+#define GCC_BOOT_ROM_AHB_CLK 151
+#define GCC_CAM_THROTTLE_NRT_CLK 152
+#define GCC_CAM_THROTTLE_RT_CLK 153
+#define GCC_CAMERA_AHB_CLK 154
+#define GCC_CAMERA_XO_CLK 155
+#define GCC_CAMSS_AXI_CLK 156
+#define GCC_CAMSS_AXI_CLK_SRC 157
+#define GCC_CAMSS_CAMNOC_ATB_CLK 158
+#define GCC_CAMSS_CAMNOC_NTS_XO_CLK 159
+#define GCC_CAMSS_CCI_0_CLK 160
+#define GCC_CAMSS_CCI_CLK_SRC 161
+#define GCC_CAMSS_CPHY_0_CLK 162
+#define GCC_CAMSS_CPHY_1_CLK 163
+#define GCC_CAMSS_CPHY_2_CLK 164
+#define GCC_UFS_CLKREF_CLK 165
+#define GCC_DISP_GPLL0_CLK_SRC 166
+
+/* GCC resets */
+#define GCC_QUSB2PHY_PRIM_BCR 0
+#define GCC_QUSB2PHY_SEC_BCR 1
+#define GCC_SDCC1_BCR 2
+#define GCC_UFS_PHY_BCR 3
+#define GCC_USB30_PRIM_BCR 4
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 5
+#define GCC_VCODEC0_BCR 6
+#define GCC_VENUS_BCR 7
+#define GCC_VIDEO_INTERFACE_BCR 8
+#define GCC_USB3PHY_PHY_PRIM_SP0_BCR 9
+#define GCC_USB3_PHY_PRIM_SP0_BCR 10
+#define GCC_SDCC2_BCR 11
+
+/* Indexes for GDSCs */
+#define GCC_CAMSS_TOP_GDSC 0
+#define GCC_UFS_PHY_GDSC 1
+#define GCC_USB30_PRIM_GDSC 2
+#define GCC_VCODEC0_GDSC 3
+#define GCC_VENUS_GDSC 4
+#define HLOS1_VOTE_TURING_MMU_TBU1_GDSC 5
+#define HLOS1_VOTE_TURING_MMU_TBU0_GDSC 6
+#define HLOS1_VOTE_MM_SNOC_MMU_TBU_RT_GDSC 7
+#define HLOS1_VOTE_MM_SNOC_MMU_TBU_NRT_GDSC 8
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sm6125.h b/include/dt-bindings/clock/qcom,gcc-sm6125.h
new file mode 100644
index 000000000000..08ea18086824
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sm6125.h
@@ -0,0 +1,240 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM6125_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SM6125_H
+
+#define GPLL0_OUT_AUX2 0
+#define GPLL0_OUT_MAIN 1
+#define GPLL6_OUT_MAIN 2
+#define GPLL7_OUT_MAIN 3
+#define GPLL8_OUT_MAIN 4
+#define GPLL9_OUT_MAIN 5
+#define GPLL0_OUT_EARLY 6
+#define GPLL3_OUT_EARLY 7
+#define GPLL4_OUT_MAIN 8
+#define GPLL5_OUT_MAIN 9
+#define GPLL6_OUT_EARLY 10
+#define GPLL7_OUT_EARLY 11
+#define GPLL8_OUT_EARLY 12
+#define GPLL9_OUT_EARLY 13
+#define GCC_AHB2PHY_CSI_CLK 14
+#define GCC_AHB2PHY_USB_CLK 15
+#define GCC_APC_VS_CLK 16
+#define GCC_BOOT_ROM_AHB_CLK 17
+#define GCC_CAMERA_AHB_CLK 18
+#define GCC_CAMERA_XO_CLK 19
+#define GCC_CAMSS_AHB_CLK_SRC 20
+#define GCC_CAMSS_CCI_AHB_CLK 21
+#define GCC_CAMSS_CCI_CLK 22
+#define GCC_CAMSS_CCI_CLK_SRC 23
+#define GCC_CAMSS_CPHY_CSID0_CLK 24
+#define GCC_CAMSS_CPHY_CSID1_CLK 25
+#define GCC_CAMSS_CPHY_CSID2_CLK 26
+#define GCC_CAMSS_CPHY_CSID3_CLK 27
+#define GCC_CAMSS_CPP_AHB_CLK 28
+#define GCC_CAMSS_CPP_AXI_CLK 29
+#define GCC_CAMSS_CPP_CLK 30
+#define GCC_CAMSS_CPP_CLK_SRC 31
+#define GCC_CAMSS_CPP_VBIF_AHB_CLK 32
+#define GCC_CAMSS_CSI0_AHB_CLK 33
+#define GCC_CAMSS_CSI0_CLK 34
+#define GCC_CAMSS_CSI0_CLK_SRC 35
+#define GCC_CAMSS_CSI0PHYTIMER_CLK 36
+#define GCC_CAMSS_CSI0PHYTIMER_CLK_SRC 37
+#define GCC_CAMSS_CSI0PIX_CLK 38
+#define GCC_CAMSS_CSI0RDI_CLK 39
+#define GCC_CAMSS_CSI1_AHB_CLK 40
+#define GCC_CAMSS_CSI1_CLK 41
+#define GCC_CAMSS_CSI1_CLK_SRC 42
+#define GCC_CAMSS_CSI1PHYTIMER_CLK 43
+#define GCC_CAMSS_CSI1PHYTIMER_CLK_SRC 44
+#define GCC_CAMSS_CSI1PIX_CLK 45
+#define GCC_CAMSS_CSI1RDI_CLK 46
+#define GCC_CAMSS_CSI2_AHB_CLK 47
+#define GCC_CAMSS_CSI2_CLK 48
+#define GCC_CAMSS_CSI2_CLK_SRC 49
+#define GCC_CAMSS_CSI2PHYTIMER_CLK 50
+#define GCC_CAMSS_CSI2PHYTIMER_CLK_SRC 51
+#define GCC_CAMSS_CSI2PIX_CLK 52
+#define GCC_CAMSS_CSI2RDI_CLK 53
+#define GCC_CAMSS_CSI3_AHB_CLK 54
+#define GCC_CAMSS_CSI3_CLK 55
+#define GCC_CAMSS_CSI3_CLK_SRC 56
+#define GCC_CAMSS_CSI3PIX_CLK 57
+#define GCC_CAMSS_CSI3RDI_CLK 58
+#define GCC_CAMSS_CSI_VFE0_CLK 59
+#define GCC_CAMSS_CSI_VFE1_CLK 60
+#define GCC_CAMSS_CSIPHY0_CLK 61
+#define GCC_CAMSS_CSIPHY1_CLK 62
+#define GCC_CAMSS_CSIPHY2_CLK 63
+#define GCC_CAMSS_CSIPHY_CLK_SRC 64
+#define GCC_CAMSS_GP0_CLK 65
+#define GCC_CAMSS_GP0_CLK_SRC 66
+#define GCC_CAMSS_GP1_CLK 67
+#define GCC_CAMSS_GP1_CLK_SRC 68
+#define GCC_CAMSS_ISPIF_AHB_CLK 69
+#define GCC_CAMSS_JPEG_AHB_CLK 70
+#define GCC_CAMSS_JPEG_AXI_CLK 71
+#define GCC_CAMSS_JPEG_CLK 72
+#define GCC_CAMSS_JPEG_CLK_SRC 73
+#define GCC_CAMSS_MCLK0_CLK 74
+#define GCC_CAMSS_MCLK0_CLK_SRC 75
+#define GCC_CAMSS_MCLK1_CLK 76
+#define GCC_CAMSS_MCLK1_CLK_SRC 77
+#define GCC_CAMSS_MCLK2_CLK 78
+#define GCC_CAMSS_MCLK2_CLK_SRC 79
+#define GCC_CAMSS_MCLK3_CLK 80
+#define GCC_CAMSS_MCLK3_CLK_SRC 81
+#define GCC_CAMSS_MICRO_AHB_CLK 82
+#define GCC_CAMSS_THROTTLE_NRT_AXI_CLK 83
+#define GCC_CAMSS_THROTTLE_RT_AXI_CLK 84
+#define GCC_CAMSS_TOP_AHB_CLK 85
+#define GCC_CAMSS_VFE0_AHB_CLK 86
+#define GCC_CAMSS_VFE0_CLK 87
+#define GCC_CAMSS_VFE0_CLK_SRC 88
+#define GCC_CAMSS_VFE0_STREAM_CLK 89
+#define GCC_CAMSS_VFE1_AHB_CLK 90
+#define GCC_CAMSS_VFE1_CLK 91
+#define GCC_CAMSS_VFE1_CLK_SRC 92
+#define GCC_CAMSS_VFE1_STREAM_CLK 93
+#define GCC_CAMSS_VFE_TSCTR_CLK 94
+#define GCC_CAMSS_VFE_VBIF_AHB_CLK 95
+#define GCC_CAMSS_VFE_VBIF_AXI_CLK 96
+#define GCC_CE1_AHB_CLK 97
+#define GCC_CE1_AXI_CLK 98
+#define GCC_CE1_CLK 99
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 100
+#define GCC_CPUSS_GNOC_CLK 101
+#define GCC_DISP_AHB_CLK 102
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 103
+#define GCC_DISP_HF_AXI_CLK 104
+#define GCC_DISP_THROTTLE_CORE_CLK 105
+#define GCC_DISP_XO_CLK 106
+#define GCC_GP1_CLK 107
+#define GCC_GP1_CLK_SRC 108
+#define GCC_GP2_CLK 109
+#define GCC_GP2_CLK_SRC 110
+#define GCC_GP3_CLK 111
+#define GCC_GP3_CLK_SRC 112
+#define GCC_GPU_CFG_AHB_CLK 113
+#define GCC_GPU_GPLL0_CLK_SRC 114
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 115
+#define GCC_GPU_MEMNOC_GFX_CLK 116
+#define GCC_GPU_SNOC_DVM_GFX_CLK 117
+#define GCC_GPU_THROTTLE_CORE_CLK 118
+#define GCC_GPU_THROTTLE_XO_CLK 119
+#define GCC_MSS_VS_CLK 120
+#define GCC_PDM2_CLK 121
+#define GCC_PDM2_CLK_SRC 122
+#define GCC_PDM_AHB_CLK 123
+#define GCC_PDM_XO4_CLK 124
+#define GCC_PRNG_AHB_CLK 125
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 126
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 127
+#define GCC_QMIP_DISP_AHB_CLK 128
+#define GCC_QMIP_GPU_CFG_AHB_CLK 129
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 130
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 131
+#define GCC_QUPV3_WRAP0_CORE_CLK 132
+#define GCC_QUPV3_WRAP0_S0_CLK 133
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 134
+#define GCC_QUPV3_WRAP0_S1_CLK 135
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 136
+#define GCC_QUPV3_WRAP0_S2_CLK 137
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 138
+#define GCC_QUPV3_WRAP0_S3_CLK 139
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 140
+#define GCC_QUPV3_WRAP0_S4_CLK 141
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 142
+#define GCC_QUPV3_WRAP0_S5_CLK 143
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 144
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 145
+#define GCC_QUPV3_WRAP1_CORE_CLK 146
+#define GCC_QUPV3_WRAP1_S0_CLK 147
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 148
+#define GCC_QUPV3_WRAP1_S1_CLK 149
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 150
+#define GCC_QUPV3_WRAP1_S2_CLK 151
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 152
+#define GCC_QUPV3_WRAP1_S3_CLK 153
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 154
+#define GCC_QUPV3_WRAP1_S4_CLK 155
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 156
+#define GCC_QUPV3_WRAP1_S5_CLK 157
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 158
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 159
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 160
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 161
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 162
+#define GCC_SDCC1_AHB_CLK 163
+#define GCC_SDCC1_APPS_CLK 164
+#define GCC_SDCC1_APPS_CLK_SRC 165
+#define GCC_SDCC1_ICE_CORE_CLK 166
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 167
+#define GCC_SDCC2_AHB_CLK 168
+#define GCC_SDCC2_APPS_CLK 169
+#define GCC_SDCC2_APPS_CLK_SRC 170
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 171
+#define GCC_SYS_NOC_UFS_PHY_AXI_CLK 172
+#define GCC_SYS_NOC_USB3_PRIM_AXI_CLK 173
+#define GCC_UFS_PHY_AHB_CLK 174
+#define GCC_UFS_PHY_AXI_CLK 175
+#define GCC_UFS_PHY_AXI_CLK_SRC 176
+#define GCC_UFS_PHY_ICE_CORE_CLK 177
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 178
+#define GCC_UFS_PHY_PHY_AUX_CLK 179
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 180
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 181
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 182
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 183
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 184
+#define GCC_USB30_PRIM_MASTER_CLK 185
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 186
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 187
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 188
+#define GCC_USB30_PRIM_SLEEP_CLK 189
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 190
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 191
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 192
+#define GCC_VDDA_VS_CLK 193
+#define GCC_VDDCX_VS_CLK 194
+#define GCC_VDDMX_VS_CLK 195
+#define GCC_VIDEO_AHB_CLK 196
+#define GCC_VIDEO_AXI0_CLK 197
+#define GCC_VIDEO_THROTTLE_CORE_CLK 198
+#define GCC_VIDEO_XO_CLK 199
+#define GCC_VS_CTRL_AHB_CLK 200
+#define GCC_VS_CTRL_CLK 201
+#define GCC_VS_CTRL_CLK_SRC 202
+#define GCC_VSENSOR_CLK_SRC 203
+#define GCC_WCSS_VS_CLK 204
+#define GCC_USB3_PRIM_CLKREF_CLK 205
+#define GCC_SYS_NOC_COMPUTE_SF_AXI_CLK 206
+#define GCC_BIMC_GPU_AXI_CLK 207
+#define GCC_UFS_MEM_CLKREF_CLK 208
+
+/* GDSCs */
+#define USB30_PRIM_GDSC 0
+#define UFS_PHY_GDSC 1
+#define CAMSS_VFE0_GDSC 2
+#define CAMSS_VFE1_GDSC 3
+#define CAMSS_TOP_GDSC 4
+#define CAM_CPP_GDSC 5
+#define HLOS1_VOTE_TURING_MMU_TBU1_GDSC 6
+#define HLOS1_VOTE_MM_SNOC_MMU_TBU_RT_GDSC 7
+#define HLOS1_VOTE_MM_SNOC_MMU_TBU_NRT_GDSC 8
+#define HLOS1_VOTE_TURING_MMU_TBU0_GDSC 9
+
+#define GCC_QUSB2PHY_PRIM_BCR 0
+#define GCC_QUSB2PHY_SEC_BCR 1
+#define GCC_UFS_PHY_BCR 2
+#define GCC_USB30_PRIM_BCR 3
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 4
+#define GCC_USB3_PHY_PRIM_SP0_BCR 5
+#define GCC_USB3PHY_PHY_PRIM_SP0_BCR 6
+#define GCC_CAMSS_MICRO_BCR 7
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sm6350.h b/include/dt-bindings/clock/qcom,gcc-sm6350.h
new file mode 100644
index 000000000000..ba584ca33c39
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sm6350.h
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM6350_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SM6350_H
+
+/* GCC clocks */
+#define GPLL0 0
+#define GPLL0_OUT_EVEN 1
+#define GPLL0_OUT_ODD 2
+#define GPLL6 3
+#define GPLL6_OUT_EVEN 4
+#define GPLL7 5
+#define GCC_AGGRE_CNOC_PERIPH_CENTER_AHB_CLK 6
+#define GCC_AGGRE_NOC_CENTER_AHB_CLK 7
+#define GCC_AGGRE_NOC_PCIE_SF_AXI_CLK 8
+#define GCC_AGGRE_NOC_PCIE_TBU_CLK 9
+#define GCC_AGGRE_NOC_WLAN_AXI_CLK 10
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 11
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 12
+#define GCC_BOOT_ROM_AHB_CLK 13
+#define GCC_CAMERA_AHB_CLK 14
+#define GCC_CAMERA_AXI_CLK 15
+#define GCC_CAMERA_THROTTLE_NRT_AXI_CLK 16
+#define GCC_CAMERA_THROTTLE_RT_AXI_CLK 17
+#define GCC_CAMERA_XO_CLK 18
+#define GCC_CE1_AHB_CLK 19
+#define GCC_CE1_AXI_CLK 20
+#define GCC_CE1_CLK 21
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 22
+#define GCC_CPUSS_AHB_CLK 23
+#define GCC_CPUSS_AHB_CLK_SRC 24
+#define GCC_CPUSS_AHB_DIV_CLK_SRC 25
+#define GCC_CPUSS_GNOC_CLK 26
+#define GCC_CPUSS_RBCPR_CLK 27
+#define GCC_DDRSS_GPU_AXI_CLK 28
+#define GCC_DISP_AHB_CLK 29
+#define GCC_DISP_AXI_CLK 30
+#define GCC_DISP_CC_SLEEP_CLK 31
+#define GCC_DISP_CC_XO_CLK 32
+#define GCC_DISP_GPLL0_CLK 33
+#define GCC_DISP_THROTTLE_AXI_CLK 34
+#define GCC_DISP_XO_CLK 35
+#define GCC_GP1_CLK 36
+#define GCC_GP1_CLK_SRC 37
+#define GCC_GP2_CLK 38
+#define GCC_GP2_CLK_SRC 39
+#define GCC_GP3_CLK 40
+#define GCC_GP3_CLK_SRC 41
+#define GCC_GPU_CFG_AHB_CLK 42
+#define GCC_GPU_GPLL0_CLK 43
+#define GCC_GPU_GPLL0_DIV_CLK 44
+#define GCC_GPU_MEMNOC_GFX_CLK 45
+#define GCC_GPU_SNOC_DVM_GFX_CLK 46
+#define GCC_NPU_AXI_CLK 47
+#define GCC_NPU_BWMON_AXI_CLK 48
+#define GCC_NPU_BWMON_DMA_CFG_AHB_CLK 49
+#define GCC_NPU_BWMON_DSP_CFG_AHB_CLK 50
+#define GCC_NPU_CFG_AHB_CLK 51
+#define GCC_NPU_DMA_CLK 52
+#define GCC_NPU_GPLL0_CLK 53
+#define GCC_NPU_GPLL0_DIV_CLK 54
+#define GCC_PCIE_0_AUX_CLK 55
+#define GCC_PCIE_0_AUX_CLK_SRC 56
+#define GCC_PCIE_0_CFG_AHB_CLK 57
+#define GCC_PCIE_0_MSTR_AXI_CLK 58
+#define GCC_PCIE_0_PIPE_CLK 59
+#define GCC_PCIE_0_SLV_AXI_CLK 60
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 61
+#define GCC_PCIE_PHY_RCHNG_CLK 62
+#define GCC_PCIE_PHY_RCHNG_CLK_SRC 63
+#define GCC_PDM2_CLK 64
+#define GCC_PDM2_CLK_SRC 65
+#define GCC_PDM_AHB_CLK 66
+#define GCC_PDM_XO4_CLK 67
+#define GCC_PRNG_AHB_CLK 68
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 69
+#define GCC_QUPV3_WRAP0_CORE_CLK 70
+#define GCC_QUPV3_WRAP0_S0_CLK 71
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 72
+#define GCC_QUPV3_WRAP0_S1_CLK 73
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 74
+#define GCC_QUPV3_WRAP0_S2_CLK 75
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 76
+#define GCC_QUPV3_WRAP0_S3_CLK 77
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 78
+#define GCC_QUPV3_WRAP0_S4_CLK 79
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 80
+#define GCC_QUPV3_WRAP0_S5_CLK 81
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 82
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 83
+#define GCC_QUPV3_WRAP1_CORE_CLK 84
+#define GCC_QUPV3_WRAP1_S0_CLK 85
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 86
+#define GCC_QUPV3_WRAP1_S1_CLK 87
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 88
+#define GCC_QUPV3_WRAP1_S2_CLK 89
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 90
+#define GCC_QUPV3_WRAP1_S3_CLK 91
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 92
+#define GCC_QUPV3_WRAP1_S4_CLK 93
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 94
+#define GCC_QUPV3_WRAP1_S5_CLK 95
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 96
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 97
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 98
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 99
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 100
+#define GCC_SDCC1_AHB_CLK 101
+#define GCC_SDCC1_APPS_CLK 102
+#define GCC_SDCC1_APPS_CLK_SRC 103
+#define GCC_SDCC1_ICE_CORE_CLK 104
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 105
+#define GCC_SDCC2_AHB_CLK 106
+#define GCC_SDCC2_APPS_CLK 107
+#define GCC_SDCC2_APPS_CLK_SRC 108
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 109
+#define GCC_UFS_MEM_CLKREF_CLK 110
+#define GCC_UFS_PHY_AHB_CLK 111
+#define GCC_UFS_PHY_AXI_CLK 112
+#define GCC_UFS_PHY_AXI_CLK_SRC 113
+#define GCC_UFS_PHY_ICE_CORE_CLK 114
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 115
+#define GCC_UFS_PHY_PHY_AUX_CLK 116
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 117
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 118
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 119
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 120
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 121
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 122
+#define GCC_USB30_PRIM_MASTER_CLK 123
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 124
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 125
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 126
+#define GCC_USB30_PRIM_MOCK_UTMI_DIV_CLK_SRC 127
+#define GCC_USB3_PRIM_CLKREF_CLK 128
+#define GCC_USB30_PRIM_SLEEP_CLK 129
+#define GCC_USB3_PRIM_PHY_AUX_CLK 130
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 131
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 132
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 133
+#define GCC_VIDEO_AHB_CLK 134
+#define GCC_VIDEO_AXI_CLK 135
+#define GCC_VIDEO_THROTTLE_AXI_CLK 136
+#define GCC_VIDEO_XO_CLK 137
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 138
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 139
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 140
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 141
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 142
+#define GCC_RX5_PCIE_CLKREF_CLK 143
+#define GCC_GPU_GPLL0_MAIN_DIV_CLK_SRC 144
+#define GCC_NPU_PLL0_MAIN_DIV_CLK_SRC 145
+
+/* GCC resets */
+#define GCC_QUSB2PHY_PRIM_BCR 0
+#define GCC_QUSB2PHY_SEC_BCR 1
+#define GCC_SDCC1_BCR 2
+#define GCC_SDCC2_BCR 3
+#define GCC_UFS_PHY_BCR 4
+#define GCC_USB30_PRIM_BCR 5
+#define GCC_PCIE_0_BCR 6
+#define GCC_PCIE_0_PHY_BCR 7
+#define GCC_QUPV3_WRAPPER_0_BCR 8
+#define GCC_QUPV3_WRAPPER_1_BCR 9
+#define GCC_USB3_PHY_PRIM_BCR 10
+#define GCC_USB3_DP_PHY_PRIM_BCR 11
+
+/* GCC GDSCs */
+#define USB30_PRIM_GDSC 0
+#define UFS_PHY_GDSC 1
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC 2
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC 3
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sm8150.h b/include/dt-bindings/clock/qcom,gcc-sm8150.h
index 3e1a91876610..921a33f24d33 100644
--- a/include/dt-bindings/clock/qcom,gcc-sm8150.h
+++ b/include/dt-bindings/clock/qcom,gcc-sm8150.h
@@ -239,9 +239,17 @@
#define GCC_USB30_PRIM_BCR 26
#define GCC_USB30_SEC_BCR 27
#define GCC_USB_PHY_CFG_AHB2PHY_BCR 28
+#define GCC_VIDEO_AXIC_CLK_BCR 29
+#define GCC_VIDEO_AXI0_CLK_BCR 30
+#define GCC_VIDEO_AXI1_CLK_BCR 31
/* GCC GDSCRs */
+#define PCIE_0_GDSC 0
+#define PCIE_1_GDSC 1
+#define UFS_CARD_GDSC 2
+#define UFS_PHY_GDSC 3
#define USB30_PRIM_GDSC 4
#define USB30_SEC_GDSC 5
+#define EMAC_GDSC 6
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sm8350.h b/include/dt-bindings/clock/qcom,gcc-sm8350.h
index f6be3da5f781..529c1b8b0417 100644
--- a/include/dt-bindings/clock/qcom,gcc-sm8350.h
+++ b/include/dt-bindings/clock/qcom,gcc-sm8350.h
@@ -8,7 +8,6 @@
#define _DT_BINDINGS_CLK_QCOM_GCC_SM8350_H
/* GCC HW clocks */
-#define CORE_BI_PLL_TEST_SE 0
#define PCIE_0_PIPE_CLK 1
#define PCIE_1_PIPE_CLK 2
#define UFS_CARD_RX_SYMBOL_0_CLK 3
diff --git a/include/dt-bindings/clock/qcom,gcc-sm8450.h b/include/dt-bindings/clock/qcom,gcc-sm8450.h
new file mode 100644
index 000000000000..7320e63c3a2f
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sm8450.h
@@ -0,0 +1,246 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM8450_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SM8450_H
+
+/* GCC HW clocks */
+#define PCIE_0_PIPE_CLK 1
+#define PCIE_1_PHY_AUX_CLK 2
+#define PCIE_1_PIPE_CLK 3
+#define UFS_PHY_RX_SYMBOL_0_CLK 4
+#define UFS_PHY_RX_SYMBOL_1_CLK 5
+#define UFS_PHY_TX_SYMBOL_0_CLK 6
+#define USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK 7
+
+/* GCC clocks */
+#define GCC_AGGRE_NOC_PCIE_0_AXI_CLK 8
+#define GCC_AGGRE_NOC_PCIE_1_AXI_CLK 9
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 10
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 11
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 12
+#define GCC_ANOC_PCIE_PWRCTL_CLK 13
+#define GCC_BOOT_ROM_AHB_CLK 14
+#define GCC_CAMERA_AHB_CLK 15
+#define GCC_CAMERA_HF_AXI_CLK 16
+#define GCC_CAMERA_SF_AXI_CLK 17
+#define GCC_CAMERA_XO_CLK 18
+#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 19
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 20
+#define GCC_CPUSS_AHB_CLK 21
+#define GCC_CPUSS_AHB_CLK_SRC 22
+#define GCC_CPUSS_AHB_POSTDIV_CLK_SRC 23
+#define GCC_CPUSS_CONFIG_NOC_SF_CLK 24
+#define GCC_DDRSS_GPU_AXI_CLK 25
+#define GCC_DDRSS_PCIE_SF_TBU_CLK 26
+#define GCC_DISP_AHB_CLK 27
+#define GCC_DISP_HF_AXI_CLK 28
+#define GCC_DISP_SF_AXI_CLK 29
+#define GCC_DISP_XO_CLK 30
+#define GCC_EUSB3_0_CLKREF_EN 31
+#define GCC_GP1_CLK 32
+#define GCC_GP1_CLK_SRC 33
+#define GCC_GP2_CLK 34
+#define GCC_GP2_CLK_SRC 35
+#define GCC_GP3_CLK 36
+#define GCC_GP3_CLK_SRC 37
+#define GCC_GPLL0 38
+#define GCC_GPLL0_OUT_EVEN 39
+#define GCC_GPLL4 40
+#define GCC_GPLL9 41
+#define GCC_GPU_CFG_AHB_CLK 42
+#define GCC_GPU_GPLL0_CLK_SRC 43
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 44
+#define GCC_GPU_MEMNOC_GFX_CLK 45
+#define GCC_GPU_SNOC_DVM_GFX_CLK 46
+#define GCC_PCIE_0_AUX_CLK 47
+#define GCC_PCIE_0_AUX_CLK_SRC 48
+#define GCC_PCIE_0_CFG_AHB_CLK 49
+#define GCC_PCIE_0_CLKREF_EN 50
+#define GCC_PCIE_0_MSTR_AXI_CLK 51
+#define GCC_PCIE_0_PHY_RCHNG_CLK 52
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 53
+#define GCC_PCIE_0_PIPE_CLK 54
+#define GCC_PCIE_0_PIPE_CLK_SRC 55
+#define GCC_PCIE_0_SLV_AXI_CLK 56
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 57
+#define GCC_PCIE_1_AUX_CLK 58
+#define GCC_PCIE_1_AUX_CLK_SRC 59
+#define GCC_PCIE_1_CFG_AHB_CLK 60
+#define GCC_PCIE_1_CLKREF_EN 61
+#define GCC_PCIE_1_MSTR_AXI_CLK 62
+#define GCC_PCIE_1_PHY_AUX_CLK 63
+#define GCC_PCIE_1_PHY_AUX_CLK_SRC 64
+#define GCC_PCIE_1_PHY_RCHNG_CLK 65
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 66
+#define GCC_PCIE_1_PIPE_CLK 67
+#define GCC_PCIE_1_PIPE_CLK_SRC 68
+#define GCC_PCIE_1_SLV_AXI_CLK 69
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 70
+#define GCC_PDM2_CLK 71
+#define GCC_PDM2_CLK_SRC 72
+#define GCC_PDM_AHB_CLK 73
+#define GCC_PDM_XO4_CLK 74
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 75
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 76
+#define GCC_QMIP_DISP_AHB_CLK 77
+#define GCC_QMIP_GPU_AHB_CLK 78
+#define GCC_QMIP_PCIE_AHB_CLK 79
+#define GCC_QMIP_VIDEO_CV_CPU_AHB_CLK 80
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 81
+#define GCC_QMIP_VIDEO_V_CPU_AHB_CLK 82
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 83
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 84
+#define GCC_QUPV3_WRAP0_CORE_CLK 85
+#define GCC_QUPV3_WRAP0_S0_CLK 86
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 87
+#define GCC_QUPV3_WRAP0_S1_CLK 88
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 89
+#define GCC_QUPV3_WRAP0_S2_CLK 90
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 91
+#define GCC_QUPV3_WRAP0_S3_CLK 92
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 93
+#define GCC_QUPV3_WRAP0_S4_CLK 94
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 95
+#define GCC_QUPV3_WRAP0_S5_CLK 96
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 97
+#define GCC_QUPV3_WRAP0_S6_CLK 98
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 99
+#define GCC_QUPV3_WRAP0_S7_CLK 100
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 101
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 102
+#define GCC_QUPV3_WRAP1_CORE_CLK 103
+#define GCC_QUPV3_WRAP1_S0_CLK 104
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 105
+#define GCC_QUPV3_WRAP1_S1_CLK 106
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 107
+#define GCC_QUPV3_WRAP1_S2_CLK 108
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 109
+#define GCC_QUPV3_WRAP1_S3_CLK 110
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 111
+#define GCC_QUPV3_WRAP1_S4_CLK 112
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 113
+#define GCC_QUPV3_WRAP1_S5_CLK 114
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 115
+#define GCC_QUPV3_WRAP1_S6_CLK 116
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 117
+#define GCC_QUPV3_WRAP2_CORE_2X_CLK 118
+#define GCC_QUPV3_WRAP2_CORE_CLK 119
+#define GCC_QUPV3_WRAP2_S0_CLK 120
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 121
+#define GCC_QUPV3_WRAP2_S1_CLK 122
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 123
+#define GCC_QUPV3_WRAP2_S2_CLK 124
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 125
+#define GCC_QUPV3_WRAP2_S3_CLK 126
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 127
+#define GCC_QUPV3_WRAP2_S4_CLK 128
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 129
+#define GCC_QUPV3_WRAP2_S5_CLK 130
+#define GCC_QUPV3_WRAP2_S5_CLK_SRC 131
+#define GCC_QUPV3_WRAP2_S6_CLK 132
+#define GCC_QUPV3_WRAP2_S6_CLK_SRC 133
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 134
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 135
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 136
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 137
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 138
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 139
+#define GCC_SDCC2_AHB_CLK 140
+#define GCC_SDCC2_APPS_CLK 141
+#define GCC_SDCC2_APPS_CLK_SRC 142
+#define GCC_SDCC2_AT_CLK 143
+#define GCC_SDCC4_AHB_CLK 144
+#define GCC_SDCC4_APPS_CLK 145
+#define GCC_SDCC4_APPS_CLK_SRC 146
+#define GCC_SDCC4_AT_CLK 147
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 148
+#define GCC_UFS_0_CLKREF_EN 149
+#define GCC_UFS_PHY_AHB_CLK 150
+#define GCC_UFS_PHY_AXI_CLK 151
+#define GCC_UFS_PHY_AXI_CLK_SRC 152
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 153
+#define GCC_UFS_PHY_ICE_CORE_CLK 154
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 155
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 156
+#define GCC_UFS_PHY_PHY_AUX_CLK 157
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 158
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 159
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 160
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 161
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 162
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 163
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 164
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 165
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 166
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 167
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 168
+#define GCC_USB30_PRIM_MASTER_CLK 169
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 170
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 171
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 172
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 173
+#define GCC_USB30_PRIM_SLEEP_CLK 174
+#define GCC_USB3_0_CLKREF_EN 175
+#define GCC_USB3_PRIM_PHY_AUX_CLK 176
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 177
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 178
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 179
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 180
+#define GCC_VIDEO_AHB_CLK 181
+#define GCC_VIDEO_AXI0_CLK 182
+#define GCC_VIDEO_AXI1_CLK 183
+#define GCC_VIDEO_XO_CLK 184
+/* Additional SM8475-specific clocks */
+#define SM8475_GCC_GPLL2 185
+#define SM8475_GCC_GPLL3 186
+
+/* GCC resets */
+#define GCC_CAMERA_BCR 0
+#define GCC_DISPLAY_BCR 1
+#define GCC_GPU_BCR 2
+#define GCC_MMSS_BCR 3
+#define GCC_PCIE_0_BCR 4
+#define GCC_PCIE_0_LINK_DOWN_BCR 5
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 6
+#define GCC_PCIE_0_PHY_BCR 7
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 8
+#define GCC_PCIE_1_BCR 9
+#define GCC_PCIE_1_LINK_DOWN_BCR 10
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 11
+#define GCC_PCIE_1_PHY_BCR 12
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 13
+#define GCC_PCIE_PHY_BCR 14
+#define GCC_PCIE_PHY_CFG_AHB_BCR 15
+#define GCC_PCIE_PHY_COM_BCR 16
+#define GCC_PDM_BCR 17
+#define GCC_QUPV3_WRAPPER_0_BCR 18
+#define GCC_QUPV3_WRAPPER_1_BCR 19
+#define GCC_QUPV3_WRAPPER_2_BCR 20
+#define GCC_QUSB2PHY_PRIM_BCR 21
+#define GCC_QUSB2PHY_SEC_BCR 22
+#define GCC_SDCC2_BCR 23
+#define GCC_SDCC4_BCR 24
+#define GCC_UFS_PHY_BCR 25
+#define GCC_USB30_PRIM_BCR 26
+#define GCC_USB3_DP_PHY_PRIM_BCR 27
+#define GCC_USB3_DP_PHY_SEC_BCR 28
+#define GCC_USB3_PHY_PRIM_BCR 29
+#define GCC_USB3_PHY_SEC_BCR 30
+#define GCC_USB3PHY_PHY_PRIM_BCR 31
+#define GCC_USB3PHY_PHY_SEC_BCR 32
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 33
+#define GCC_VIDEO_AXI0_CLK_ARES 34
+#define GCC_VIDEO_AXI1_CLK_ARES 35
+#define GCC_VIDEO_BCR 36
+
+/* GCC power domains */
+#define PCIE_0_GDSC 0
+#define PCIE_1_GDSC 1
+#define UFS_PHY_GDSC 2
+#define USB30_PRIM_GDSC 3
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,glymur-dispcc.h b/include/dt-bindings/clock/qcom,glymur-dispcc.h
new file mode 100644
index 000000000000..a845d76defe2
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,glymur-dispcc.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2025, Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_GLYMUR_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_GLYMUR_H
+
+/* DISP_CC clocks */
+#define DISP_CC_ESYNC0_CLK 0
+#define DISP_CC_ESYNC0_CLK_SRC 1
+#define DISP_CC_ESYNC1_CLK 2
+#define DISP_CC_ESYNC1_CLK_SRC 3
+#define DISP_CC_MDSS_ACCU_SHIFT_CLK 4
+#define DISP_CC_MDSS_AHB1_CLK 5
+#define DISP_CC_MDSS_AHB_CLK 6
+#define DISP_CC_MDSS_AHB_CLK_SRC 7
+#define DISP_CC_MDSS_BYTE0_CLK 8
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 9
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 10
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 11
+#define DISP_CC_MDSS_BYTE1_CLK 12
+#define DISP_CC_MDSS_BYTE1_CLK_SRC 13
+#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 14
+#define DISP_CC_MDSS_BYTE1_INTF_CLK 15
+#define DISP_CC_MDSS_DPTX0_AUX_CLK 16
+#define DISP_CC_MDSS_DPTX0_AUX_CLK_SRC 17
+#define DISP_CC_MDSS_DPTX0_LINK_CLK 18
+#define DISP_CC_MDSS_DPTX0_LINK_CLK_SRC 19
+#define DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC 20
+#define DISP_CC_MDSS_DPTX0_LINK_DPIN_CLK 21
+#define DISP_CC_MDSS_DPTX0_LINK_DPIN_DIV_CLK_SRC 22
+#define DISP_CC_MDSS_DPTX0_LINK_INTF_CLK 23
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK 24
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC 25
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK 26
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC 27
+#define DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK 28
+#define DISP_CC_MDSS_DPTX1_AUX_CLK 29
+#define DISP_CC_MDSS_DPTX1_AUX_CLK_SRC 30
+#define DISP_CC_MDSS_DPTX1_LINK_CLK 31
+#define DISP_CC_MDSS_DPTX1_LINK_CLK_SRC 32
+#define DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC 33
+#define DISP_CC_MDSS_DPTX1_LINK_DPIN_CLK 34
+#define DISP_CC_MDSS_DPTX1_LINK_DPIN_DIV_CLK_SRC 35
+#define DISP_CC_MDSS_DPTX1_LINK_INTF_CLK 36
+#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK 37
+#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC 38
+#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK 39
+#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC 40
+#define DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK 41
+#define DISP_CC_MDSS_DPTX2_AUX_CLK 42
+#define DISP_CC_MDSS_DPTX2_AUX_CLK_SRC 43
+#define DISP_CC_MDSS_DPTX2_LINK_CLK 44
+#define DISP_CC_MDSS_DPTX2_LINK_CLK_SRC 45
+#define DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC 46
+#define DISP_CC_MDSS_DPTX2_LINK_DPIN_CLK 47
+#define DISP_CC_MDSS_DPTX2_LINK_DPIN_DIV_CLK_SRC 48
+#define DISP_CC_MDSS_DPTX2_LINK_INTF_CLK 49
+#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK 50
+#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC 51
+#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK 52
+#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC 53
+#define DISP_CC_MDSS_DPTX2_USB_ROUTER_LINK_INTF_CLK 54
+#define DISP_CC_MDSS_DPTX3_AUX_CLK 55
+#define DISP_CC_MDSS_DPTX3_AUX_CLK_SRC 56
+#define DISP_CC_MDSS_DPTX3_LINK_CLK 57
+#define DISP_CC_MDSS_DPTX3_LINK_CLK_SRC 58
+#define DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC 59
+#define DISP_CC_MDSS_DPTX3_LINK_DPIN_CLK 60
+#define DISP_CC_MDSS_DPTX3_LINK_DPIN_DIV_CLK_SRC 61
+#define DISP_CC_MDSS_DPTX3_LINK_INTF_CLK 62
+#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK 63
+#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC 64
+#define DISP_CC_MDSS_ESC0_CLK 65
+#define DISP_CC_MDSS_ESC0_CLK_SRC 66
+#define DISP_CC_MDSS_ESC1_CLK 67
+#define DISP_CC_MDSS_ESC1_CLK_SRC 68
+#define DISP_CC_MDSS_MDP1_CLK 69
+#define DISP_CC_MDSS_MDP_CLK 70
+#define DISP_CC_MDSS_MDP_CLK_SRC 71
+#define DISP_CC_MDSS_MDP_LUT1_CLK 72
+#define DISP_CC_MDSS_MDP_LUT_CLK 73
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 74
+#define DISP_CC_MDSS_PCLK0_CLK 75
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 76
+#define DISP_CC_MDSS_PCLK1_CLK 77
+#define DISP_CC_MDSS_PCLK1_CLK_SRC 78
+#define DISP_CC_MDSS_PCLK2_CLK 79
+#define DISP_CC_MDSS_PCLK2_CLK_SRC 80
+#define DISP_CC_MDSS_RSCC_AHB_CLK 81
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 82
+#define DISP_CC_MDSS_VSYNC1_CLK 83
+#define DISP_CC_MDSS_VSYNC_CLK 84
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 85
+#define DISP_CC_OSC_CLK 86
+#define DISP_CC_OSC_CLK_SRC 87
+#define DISP_CC_PLL0 88
+#define DISP_CC_PLL1 89
+#define DISP_CC_SLEEP_CLK 90
+#define DISP_CC_SLEEP_CLK_SRC 91
+#define DISP_CC_XO_CLK 92
+#define DISP_CC_XO_CLK_SRC 93
+
+/* DISP_CC power domains */
+#define DISP_CC_MDSS_CORE_GDSC 0
+#define DISP_CC_MDSS_CORE_INT2_GDSC 1
+
+/* DISP_CC resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_CORE_INT2_BCR 1
+#define DISP_CC_MDSS_RSCC_BCR 2
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,glymur-gcc.h b/include/dt-bindings/clock/qcom,glymur-gcc.h
new file mode 100644
index 000000000000..10c12b8c51c3
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,glymur-gcc.h
@@ -0,0 +1,578 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_GLYMUR_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_GLYMUR_H
+
+/* GCC clocks */
+#define GCC_GPLL0 0
+#define GCC_GPLL0_OUT_EVEN 1
+#define GCC_GPLL1 2
+#define GCC_GPLL14 3
+#define GCC_GPLL14_OUT_EVEN 4
+#define GCC_GPLL4 5
+#define GCC_GPLL5 6
+#define GCC_GPLL7 7
+#define GCC_GPLL8 8
+#define GCC_GPLL9 9
+#define GCC_AGGRE_NOC_PCIE_3A_WEST_SF_AXI_CLK 10
+#define GCC_AGGRE_NOC_PCIE_3B_WEST_SF_AXI_CLK 11
+#define GCC_AGGRE_NOC_PCIE_4_WEST_SF_AXI_CLK 12
+#define GCC_AGGRE_NOC_PCIE_5_EAST_SF_AXI_CLK 13
+#define GCC_AGGRE_NOC_PCIE_6_WEST_SF_AXI_CLK 14
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 15
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 16
+#define GCC_AGGRE_USB2_PRIM_AXI_CLK 17
+#define GCC_AGGRE_USB3_MP_AXI_CLK 18
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 19
+#define GCC_AGGRE_USB3_SEC_AXI_CLK 20
+#define GCC_AGGRE_USB3_TERT_AXI_CLK 21
+#define GCC_AGGRE_USB4_0_AXI_CLK 22
+#define GCC_AGGRE_USB4_1_AXI_CLK 23
+#define GCC_AGGRE_USB4_2_AXI_CLK 24
+#define GCC_AV1E_AHB_CLK 25
+#define GCC_AV1E_AXI_CLK 26
+#define GCC_AV1E_XO_CLK 27
+#define GCC_BOOT_ROM_AHB_CLK 28
+#define GCC_CAMERA_AHB_CLK 29
+#define GCC_CAMERA_HF_AXI_CLK 30
+#define GCC_CAMERA_SF_AXI_CLK 31
+#define GCC_CAMERA_XO_CLK 32
+#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 33
+#define GCC_CFG_NOC_PCIE_ANOC_SOUTH_AHB_CLK 34
+#define GCC_CFG_NOC_USB2_PRIM_AXI_CLK 35
+#define GCC_CFG_NOC_USB3_MP_AXI_CLK 36
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 37
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 38
+#define GCC_CFG_NOC_USB3_TERT_AXI_CLK 39
+#define GCC_CFG_NOC_USB_ANOC_AHB_CLK 40
+#define GCC_CFG_NOC_USB_ANOC_SOUTH_AHB_CLK 41
+#define GCC_DISP_AHB_CLK 42
+#define GCC_DISP_HF_AXI_CLK 43
+#define GCC_EVA_AHB_CLK 44
+#define GCC_EVA_AXI0_CLK 45
+#define GCC_EVA_AXI0C_CLK 46
+#define GCC_EVA_XO_CLK 47
+#define GCC_GP1_CLK 48
+#define GCC_GP1_CLK_SRC 49
+#define GCC_GP2_CLK 50
+#define GCC_GP2_CLK_SRC 51
+#define GCC_GP3_CLK 52
+#define GCC_GP3_CLK_SRC 53
+#define GCC_GPU_CFG_AHB_CLK 54
+#define GCC_GPU_GEMNOC_GFX_CLK 55
+#define GCC_GPU_GPLL0_CLK_SRC 56
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 57
+#define GCC_PCIE_0_AUX_CLK 58
+#define GCC_PCIE_0_AUX_CLK_SRC 59
+#define GCC_PCIE_0_CFG_AHB_CLK 60
+#define GCC_PCIE_0_MSTR_AXI_CLK 61
+#define GCC_PCIE_0_PHY_RCHNG_CLK 62
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 63
+#define GCC_PCIE_0_PIPE_CLK 64
+#define GCC_PCIE_0_SLV_AXI_CLK 65
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 66
+#define GCC_PCIE_1_AUX_CLK 67
+#define GCC_PCIE_1_AUX_CLK_SRC 68
+#define GCC_PCIE_1_CFG_AHB_CLK 69
+#define GCC_PCIE_1_MSTR_AXI_CLK 70
+#define GCC_PCIE_1_PHY_RCHNG_CLK 71
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 72
+#define GCC_PCIE_1_PIPE_CLK 73
+#define GCC_PCIE_1_SLV_AXI_CLK 74
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 75
+#define GCC_PCIE_2_AUX_CLK 76
+#define GCC_PCIE_2_AUX_CLK_SRC 77
+#define GCC_PCIE_2_CFG_AHB_CLK 78
+#define GCC_PCIE_2_MSTR_AXI_CLK 79
+#define GCC_PCIE_2_PHY_RCHNG_CLK 80
+#define GCC_PCIE_2_PHY_RCHNG_CLK_SRC 81
+#define GCC_PCIE_2_PIPE_CLK 82
+#define GCC_PCIE_2_SLV_AXI_CLK 83
+#define GCC_PCIE_2_SLV_Q2A_AXI_CLK 84
+#define GCC_PCIE_3A_AUX_CLK 85
+#define GCC_PCIE_3A_AUX_CLK_SRC 86
+#define GCC_PCIE_3A_CFG_AHB_CLK 87
+#define GCC_PCIE_3A_MSTR_AXI_CLK 88
+#define GCC_PCIE_3A_PHY_RCHNG_CLK 89
+#define GCC_PCIE_3A_PHY_RCHNG_CLK_SRC 90
+#define GCC_PCIE_3A_PIPE_CLK 91
+#define GCC_PCIE_3A_PIPE_CLK_SRC 92
+#define GCC_PCIE_3A_SLV_AXI_CLK 93
+#define GCC_PCIE_3A_SLV_Q2A_AXI_CLK 94
+#define GCC_PCIE_3B_AUX_CLK 95
+#define GCC_PCIE_3B_AUX_CLK_SRC 96
+#define GCC_PCIE_3B_CFG_AHB_CLK 97
+#define GCC_PCIE_3B_MSTR_AXI_CLK 98
+#define GCC_PCIE_3B_PHY_RCHNG_CLK 99
+#define GCC_PCIE_3B_PHY_RCHNG_CLK_SRC 100
+#define GCC_PCIE_3B_PIPE_CLK 101
+#define GCC_PCIE_3B_PIPE_CLK_SRC 102
+#define GCC_PCIE_3B_PIPE_DIV2_CLK 103
+#define GCC_PCIE_3B_PIPE_DIV_CLK_SRC 104
+#define GCC_PCIE_3B_SLV_AXI_CLK 105
+#define GCC_PCIE_3B_SLV_Q2A_AXI_CLK 106
+#define GCC_PCIE_4_AUX_CLK 107
+#define GCC_PCIE_4_AUX_CLK_SRC 108
+#define GCC_PCIE_4_CFG_AHB_CLK 109
+#define GCC_PCIE_4_MSTR_AXI_CLK 110
+#define GCC_PCIE_4_PHY_RCHNG_CLK 111
+#define GCC_PCIE_4_PHY_RCHNG_CLK_SRC 112
+#define GCC_PCIE_4_PIPE_CLK 113
+#define GCC_PCIE_4_PIPE_CLK_SRC 114
+#define GCC_PCIE_4_PIPE_DIV2_CLK 115
+#define GCC_PCIE_4_PIPE_DIV_CLK_SRC 116
+#define GCC_PCIE_4_SLV_AXI_CLK 117
+#define GCC_PCIE_4_SLV_Q2A_AXI_CLK 118
+#define GCC_PCIE_5_AUX_CLK 119
+#define GCC_PCIE_5_AUX_CLK_SRC 120
+#define GCC_PCIE_5_CFG_AHB_CLK 121
+#define GCC_PCIE_5_MSTR_AXI_CLK 122
+#define GCC_PCIE_5_PHY_RCHNG_CLK 123
+#define GCC_PCIE_5_PHY_RCHNG_CLK_SRC 124
+#define GCC_PCIE_5_PIPE_CLK 125
+#define GCC_PCIE_5_PIPE_CLK_SRC 126
+#define GCC_PCIE_5_PIPE_DIV2_CLK 127
+#define GCC_PCIE_5_PIPE_DIV_CLK_SRC 128
+#define GCC_PCIE_5_SLV_AXI_CLK 129
+#define GCC_PCIE_5_SLV_Q2A_AXI_CLK 130
+#define GCC_PCIE_6_AUX_CLK 131
+#define GCC_PCIE_6_AUX_CLK_SRC 132
+#define GCC_PCIE_6_CFG_AHB_CLK 133
+#define GCC_PCIE_6_MSTR_AXI_CLK 134
+#define GCC_PCIE_6_PHY_RCHNG_CLK 135
+#define GCC_PCIE_6_PHY_RCHNG_CLK_SRC 136
+#define GCC_PCIE_6_PIPE_CLK 137
+#define GCC_PCIE_6_PIPE_CLK_SRC 138
+#define GCC_PCIE_6_PIPE_DIV2_CLK 139
+#define GCC_PCIE_6_PIPE_DIV_CLK_SRC 140
+#define GCC_PCIE_6_SLV_AXI_CLK 141
+#define GCC_PCIE_6_SLV_Q2A_AXI_CLK 142
+#define GCC_PCIE_NOC_PWRCTL_CLK 143
+#define GCC_PCIE_NOC_QOSGEN_EXTREF_CLK 144
+#define GCC_PCIE_NOC_SF_CENTER_CLK 145
+#define GCC_PCIE_NOC_SLAVE_SF_EAST_CLK 146
+#define GCC_PCIE_NOC_SLAVE_SF_WEST_CLK 147
+#define GCC_PCIE_NOC_TSCTR_CLK 148
+#define GCC_PCIE_PHY_3A_AUX_CLK 149
+#define GCC_PCIE_PHY_3A_AUX_CLK_SRC 150
+#define GCC_PCIE_PHY_3B_AUX_CLK 151
+#define GCC_PCIE_PHY_3B_AUX_CLK_SRC 152
+#define GCC_PCIE_PHY_4_AUX_CLK 153
+#define GCC_PCIE_PHY_4_AUX_CLK_SRC 154
+#define GCC_PCIE_PHY_5_AUX_CLK 155
+#define GCC_PCIE_PHY_5_AUX_CLK_SRC 156
+#define GCC_PCIE_PHY_6_AUX_CLK 157
+#define GCC_PCIE_PHY_6_AUX_CLK_SRC 158
+#define GCC_PCIE_RSCC_CFG_AHB_CLK 159
+#define GCC_PCIE_RSCC_XO_CLK 160
+#define GCC_PDM2_CLK 161
+#define GCC_PDM2_CLK_SRC 162
+#define GCC_PDM_AHB_CLK 163
+#define GCC_PDM_XO4_CLK 164
+#define GCC_QMIP_AV1E_AHB_CLK 165
+#define GCC_QMIP_CAMERA_CMD_AHB_CLK 166
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 167
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 168
+#define GCC_QMIP_GPU_AHB_CLK 169
+#define GCC_QMIP_PCIE_3A_AHB_CLK 170
+#define GCC_QMIP_PCIE_3B_AHB_CLK 171
+#define GCC_QMIP_PCIE_4_AHB_CLK 172
+#define GCC_QMIP_PCIE_5_AHB_CLK 173
+#define GCC_QMIP_PCIE_6_AHB_CLK 174
+#define GCC_QMIP_VIDEO_CV_CPU_AHB_CLK 175
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 176
+#define GCC_QMIP_VIDEO_V_CPU_AHB_CLK 177
+#define GCC_QMIP_VIDEO_VCODEC1_AHB_CLK 178
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 179
+#define GCC_QUPV3_OOB_CORE_2X_CLK 180
+#define GCC_QUPV3_OOB_CORE_CLK 181
+#define GCC_QUPV3_OOB_M_AHB_CLK 182
+#define GCC_QUPV3_OOB_QSPI_S0_CLK 183
+#define GCC_QUPV3_OOB_QSPI_S0_CLK_SRC 184
+#define GCC_QUPV3_OOB_QSPI_S1_CLK 185
+#define GCC_QUPV3_OOB_QSPI_S1_CLK_SRC 186
+#define GCC_QUPV3_OOB_S0_CLK 187
+#define GCC_QUPV3_OOB_S0_CLK_SRC 188
+#define GCC_QUPV3_OOB_S1_CLK 189
+#define GCC_QUPV3_OOB_S1_CLK_SRC 190
+#define GCC_QUPV3_OOB_S_AHB_CLK 191
+#define GCC_QUPV3_OOB_TCXO_CLK 192
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 193
+#define GCC_QUPV3_WRAP0_CORE_CLK 194
+#define GCC_QUPV3_WRAP0_QSPI_S2_CLK 195
+#define GCC_QUPV3_WRAP0_QSPI_S2_CLK_SRC 196
+#define GCC_QUPV3_WRAP0_QSPI_S3_CLK 197
+#define GCC_QUPV3_WRAP0_QSPI_S3_CLK_SRC 198
+#define GCC_QUPV3_WRAP0_QSPI_S6_CLK 199
+#define GCC_QUPV3_WRAP0_QSPI_S6_CLK_SRC 200
+#define GCC_QUPV3_WRAP0_S0_CLK 201
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 202
+#define GCC_QUPV3_WRAP0_S1_CLK 203
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 204
+#define GCC_QUPV3_WRAP0_S2_CLK 205
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 206
+#define GCC_QUPV3_WRAP0_S3_CLK 207
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 208
+#define GCC_QUPV3_WRAP0_S4_CLK 209
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 210
+#define GCC_QUPV3_WRAP0_S5_CLK 211
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 212
+#define GCC_QUPV3_WRAP0_S6_CLK 213
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 214
+#define GCC_QUPV3_WRAP0_S7_CLK 215
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 216
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 217
+#define GCC_QUPV3_WRAP1_CORE_CLK 218
+#define GCC_QUPV3_WRAP1_QSPI_S2_CLK 219
+#define GCC_QUPV3_WRAP1_QSPI_S2_CLK_SRC 220
+#define GCC_QUPV3_WRAP1_QSPI_S3_CLK 221
+#define GCC_QUPV3_WRAP1_QSPI_S3_CLK_SRC 222
+#define GCC_QUPV3_WRAP1_QSPI_S6_CLK 223
+#define GCC_QUPV3_WRAP1_QSPI_S6_CLK_SRC 224
+#define GCC_QUPV3_WRAP1_S0_CLK 225
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 226
+#define GCC_QUPV3_WRAP1_S1_CLK 227
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 228
+#define GCC_QUPV3_WRAP1_S2_CLK 229
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 230
+#define GCC_QUPV3_WRAP1_S3_CLK 231
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 232
+#define GCC_QUPV3_WRAP1_S4_CLK 233
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 234
+#define GCC_QUPV3_WRAP1_S5_CLK 235
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 236
+#define GCC_QUPV3_WRAP1_S6_CLK 237
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 238
+#define GCC_QUPV3_WRAP1_S7_CLK 239
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 240
+#define GCC_QUPV3_WRAP2_CORE_2X_CLK 241
+#define GCC_QUPV3_WRAP2_CORE_CLK 242
+#define GCC_QUPV3_WRAP2_QSPI_S2_CLK 243
+#define GCC_QUPV3_WRAP2_QSPI_S2_CLK_SRC 244
+#define GCC_QUPV3_WRAP2_QSPI_S3_CLK 245
+#define GCC_QUPV3_WRAP2_QSPI_S3_CLK_SRC 246
+#define GCC_QUPV3_WRAP2_QSPI_S6_CLK 247
+#define GCC_QUPV3_WRAP2_QSPI_S6_CLK_SRC 248
+#define GCC_QUPV3_WRAP2_S0_CLK 249
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 250
+#define GCC_QUPV3_WRAP2_S1_CLK 251
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 252
+#define GCC_QUPV3_WRAP2_S2_CLK 253
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 254
+#define GCC_QUPV3_WRAP2_S3_CLK 255
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 256
+#define GCC_QUPV3_WRAP2_S4_CLK 257
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 258
+#define GCC_QUPV3_WRAP2_S5_CLK 259
+#define GCC_QUPV3_WRAP2_S5_CLK_SRC 260
+#define GCC_QUPV3_WRAP2_S6_CLK 261
+#define GCC_QUPV3_WRAP2_S6_CLK_SRC 262
+#define GCC_QUPV3_WRAP2_S7_CLK 263
+#define GCC_QUPV3_WRAP2_S7_CLK_SRC 264
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 265
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 266
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 267
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 268
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 269
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 270
+#define GCC_SDCC2_AHB_CLK 271
+#define GCC_SDCC2_APPS_CLK 272
+#define GCC_SDCC2_APPS_CLK_SRC 273
+#define GCC_SDCC4_AHB_CLK 274
+#define GCC_SDCC4_APPS_CLK 275
+#define GCC_SDCC4_APPS_CLK_SRC 276
+#define GCC_UFS_PHY_AHB_CLK 277
+#define GCC_UFS_PHY_AXI_CLK 278
+#define GCC_UFS_PHY_AXI_CLK_SRC 279
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 280
+#define GCC_UFS_PHY_ICE_CORE_CLK 281
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 282
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 283
+#define GCC_UFS_PHY_PHY_AUX_CLK 284
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 285
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 286
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 287
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 288
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 289
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 290
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 291
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 292
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 293
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 294
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 295
+#define GCC_USB20_MASTER_CLK 296
+#define GCC_USB20_MASTER_CLK_SRC 297
+#define GCC_USB20_MOCK_UTMI_CLK 298
+#define GCC_USB20_MOCK_UTMI_CLK_SRC 299
+#define GCC_USB20_MOCK_UTMI_POSTDIV_CLK_SRC 300
+#define GCC_USB20_SLEEP_CLK 301
+#define GCC_USB30_MP_MASTER_CLK 302
+#define GCC_USB30_MP_MASTER_CLK_SRC 303
+#define GCC_USB30_MP_MOCK_UTMI_CLK 304
+#define GCC_USB30_MP_MOCK_UTMI_CLK_SRC 305
+#define GCC_USB30_MP_MOCK_UTMI_POSTDIV_CLK_SRC 306
+#define GCC_USB30_MP_SLEEP_CLK 307
+#define GCC_USB30_PRIM_MASTER_CLK 308
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 309
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 310
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 311
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 312
+#define GCC_USB30_PRIM_SLEEP_CLK 313
+#define GCC_USB30_SEC_MASTER_CLK 314
+#define GCC_USB30_SEC_MASTER_CLK_SRC 315
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 316
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 317
+#define GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC 318
+#define GCC_USB30_SEC_SLEEP_CLK 319
+#define GCC_USB30_TERT_MASTER_CLK 320
+#define GCC_USB30_TERT_MASTER_CLK_SRC 321
+#define GCC_USB30_TERT_MOCK_UTMI_CLK 322
+#define GCC_USB30_TERT_MOCK_UTMI_CLK_SRC 323
+#define GCC_USB30_TERT_MOCK_UTMI_POSTDIV_CLK_SRC 324
+#define GCC_USB30_TERT_SLEEP_CLK 325
+#define GCC_USB34_PRIM_PHY_PIPE_CLK_SRC 326
+#define GCC_USB34_SEC_PHY_PIPE_CLK_SRC 327
+#define GCC_USB34_TERT_PHY_PIPE_CLK_SRC 328
+#define GCC_USB3_MP_PHY_AUX_CLK 329
+#define GCC_USB3_MP_PHY_AUX_CLK_SRC 330
+#define GCC_USB3_MP_PHY_COM_AUX_CLK 331
+#define GCC_USB3_MP_PHY_PIPE_0_CLK 332
+#define GCC_USB3_MP_PHY_PIPE_0_CLK_SRC 333
+#define GCC_USB3_MP_PHY_PIPE_1_CLK 334
+#define GCC_USB3_MP_PHY_PIPE_1_CLK_SRC 335
+#define GCC_USB3_PRIM_PHY_AUX_CLK 336
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 337
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 338
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 339
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 340
+#define GCC_USB3_SEC_PHY_AUX_CLK 341
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 342
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 343
+#define GCC_USB3_SEC_PHY_PIPE_CLK 344
+#define GCC_USB3_SEC_PHY_PIPE_CLK_SRC 345
+#define GCC_USB3_TERT_PHY_AUX_CLK 346
+#define GCC_USB3_TERT_PHY_AUX_CLK_SRC 347
+#define GCC_USB3_TERT_PHY_COM_AUX_CLK 348
+#define GCC_USB3_TERT_PHY_PIPE_CLK 349
+#define GCC_USB3_TERT_PHY_PIPE_CLK_SRC 350
+#define GCC_USB4_0_CFG_AHB_CLK 351
+#define GCC_USB4_0_DP0_CLK 352
+#define GCC_USB4_0_DP1_CLK 353
+#define GCC_USB4_0_MASTER_CLK 354
+#define GCC_USB4_0_MASTER_CLK_SRC 355
+#define GCC_USB4_0_PHY_DP0_CLK_SRC 356
+#define GCC_USB4_0_PHY_DP0_GMUX_CLK_SRC 357
+#define GCC_USB4_0_PHY_DP1_CLK_SRC 358
+#define GCC_USB4_0_PHY_DP1_GMUX_CLK_SRC 359
+#define GCC_USB4_0_PHY_P2RR2P_PIPE_CLK 360
+#define GCC_USB4_0_PHY_P2RR2P_PIPE_CLK_SRC 361
+#define GCC_USB4_0_PHY_PCIE_PIPE_CLK 362
+#define GCC_USB4_0_PHY_PCIE_PIPE_CLK_SRC 363
+#define GCC_USB4_0_PHY_PCIE_PIPE_MUX_CLK_SRC 364
+#define GCC_USB4_0_PHY_PCIE_PIPEGMUX_CLK_SRC 365
+#define GCC_USB4_0_PHY_PIPEGMUX_CLK_SRC 366
+#define GCC_USB4_0_PHY_RX0_CLK 367
+#define GCC_USB4_0_PHY_RX0_CLK_SRC 368
+#define GCC_USB4_0_PHY_RX1_CLK 369
+#define GCC_USB4_0_PHY_RX1_CLK_SRC 370
+#define GCC_USB4_0_PHY_SYS_CLK_SRC 371
+#define GCC_USB4_0_PHY_SYS_PIPEGMUX_CLK_SRC 372
+#define GCC_USB4_0_PHY_USB_PIPE_CLK 373
+#define GCC_USB4_0_SB_IF_CLK 374
+#define GCC_USB4_0_SB_IF_CLK_SRC 375
+#define GCC_USB4_0_SYS_CLK 376
+#define GCC_USB4_0_TMU_CLK 377
+#define GCC_USB4_0_TMU_CLK_SRC 378
+#define GCC_USB4_0_UC_HRR_CLK 379
+#define GCC_USB4_1_CFG_AHB_CLK 380
+#define GCC_USB4_1_DP0_CLK 381
+#define GCC_USB4_1_DP1_CLK 382
+#define GCC_USB4_1_MASTER_CLK 383
+#define GCC_USB4_1_MASTER_CLK_SRC 384
+#define GCC_USB4_1_PHY_DP0_CLK_SRC 385
+#define GCC_USB4_1_PHY_DP0_GMUX_2_CLK_SRC 386
+#define GCC_USB4_1_PHY_DP1_CLK_SRC 387
+#define GCC_USB4_1_PHY_DP1_GMUX_2_CLK_SRC 388
+#define GCC_USB4_1_PHY_P2RR2P_PIPE_CLK 389
+#define GCC_USB4_1_PHY_P2RR2P_PIPE_CLK_SRC 390
+#define GCC_USB4_1_PHY_PCIE_PIPE_CLK 391
+#define GCC_USB4_1_PHY_PCIE_PIPE_CLK_SRC 392
+#define GCC_USB4_1_PHY_PCIE_PIPE_MUX_CLK_SRC 393
+#define GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC 394
+#define GCC_USB4_1_PHY_PIPEGMUX_CLK_SRC 395
+#define GCC_USB4_1_PHY_PLL_PIPE_CLK_SRC 396
+#define GCC_USB4_1_PHY_RX0_CLK 397
+#define GCC_USB4_1_PHY_RX0_CLK_SRC 398
+#define GCC_USB4_1_PHY_RX1_CLK 399
+#define GCC_USB4_1_PHY_RX1_CLK_SRC 400
+#define GCC_USB4_1_PHY_SYS_CLK_SRC 401
+#define GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC 402
+#define GCC_USB4_1_PHY_USB_PIPE_CLK 403
+#define GCC_USB4_1_SB_IF_CLK 404
+#define GCC_USB4_1_SB_IF_CLK_SRC 405
+#define GCC_USB4_1_SYS_CLK 406
+#define GCC_USB4_1_TMU_CLK 407
+#define GCC_USB4_1_TMU_CLK_SRC 408
+#define GCC_USB4_1_UC_HRR_CLK 409
+#define GCC_USB4_2_CFG_AHB_CLK 410
+#define GCC_USB4_2_DP0_CLK 411
+#define GCC_USB4_2_DP1_CLK 412
+#define GCC_USB4_2_MASTER_CLK 413
+#define GCC_USB4_2_MASTER_CLK_SRC 414
+#define GCC_USB4_2_PHY_DP0_CLK_SRC 415
+#define GCC_USB4_2_PHY_DP0_GMUX_CLK_SRC 416
+#define GCC_USB4_2_PHY_DP1_CLK_SRC 417
+#define GCC_USB4_2_PHY_DP1_GMUX_CLK_SRC 418
+#define GCC_USB4_2_PHY_P2RR2P_PIPE_CLK 419
+#define GCC_USB4_2_PHY_P2RR2P_PIPE_CLK_SRC 420
+#define GCC_USB4_2_PHY_PCIE_PIPE_CLK 421
+#define GCC_USB4_2_PHY_PCIE_PIPE_CLK_SRC 422
+#define GCC_USB4_2_PHY_PCIE_PIPE_MUX_CLK_SRC 423
+#define GCC_USB4_2_PHY_PCIE_PIPEGMUX_CLK_SRC 424
+#define GCC_USB4_2_PHY_PIPEGMUX_CLK_SRC 425
+#define GCC_USB4_2_PHY_RX0_CLK 426
+#define GCC_USB4_2_PHY_RX0_CLK_SRC 427
+#define GCC_USB4_2_PHY_RX1_CLK 428
+#define GCC_USB4_2_PHY_RX1_CLK_SRC 429
+#define GCC_USB4_2_PHY_SYS_CLK_SRC 430
+#define GCC_USB4_2_PHY_SYS_PIPEGMUX_CLK_SRC 431
+#define GCC_USB4_2_PHY_USB_PIPE_CLK 432
+#define GCC_USB4_2_SB_IF_CLK 433
+#define GCC_USB4_2_SB_IF_CLK_SRC 434
+#define GCC_USB4_2_SYS_CLK 435
+#define GCC_USB4_2_TMU_CLK 436
+#define GCC_USB4_2_TMU_CLK_SRC 437
+#define GCC_USB4_2_UC_HRR_CLK 438
+#define GCC_VIDEO_AHB_CLK 439
+#define GCC_VIDEO_AXI0_CLK 440
+#define GCC_VIDEO_AXI0C_CLK 441
+#define GCC_VIDEO_AXI1_CLK 442
+#define GCC_VIDEO_XO_CLK 443
+
+/* GCC power domains */
+#define GCC_PCIE_0_TUNNEL_GDSC 0
+#define GCC_PCIE_1_TUNNEL_GDSC 1
+#define GCC_PCIE_2_TUNNEL_GDSC 2
+#define GCC_PCIE_3A_GDSC 3
+#define GCC_PCIE_3A_PHY_GDSC 4
+#define GCC_PCIE_3B_GDSC 5
+#define GCC_PCIE_3B_PHY_GDSC 6
+#define GCC_PCIE_4_GDSC 7
+#define GCC_PCIE_4_PHY_GDSC 8
+#define GCC_PCIE_5_GDSC 9
+#define GCC_PCIE_5_PHY_GDSC 10
+#define GCC_PCIE_6_GDSC 11
+#define GCC_PCIE_6_PHY_GDSC 12
+#define GCC_UFS_PHY_GDSC 13
+#define GCC_USB20_PRIM_GDSC 14
+#define GCC_USB30_MP_GDSC 15
+#define GCC_USB30_PRIM_GDSC 16
+#define GCC_USB30_SEC_GDSC 17
+#define GCC_USB30_TERT_GDSC 18
+#define GCC_USB3_MP_SS0_PHY_GDSC 19
+#define GCC_USB3_MP_SS1_PHY_GDSC 20
+#define GCC_USB4_0_GDSC 21
+#define GCC_USB4_1_GDSC 22
+#define GCC_USB4_2_GDSC 23
+#define GCC_USB_0_PHY_GDSC 24
+#define GCC_USB_1_PHY_GDSC 25
+#define GCC_USB_2_PHY_GDSC 26
+
+/* GCC resets */
+#define GCC_AV1E_BCR 0
+#define GCC_CAMERA_BCR 1
+#define GCC_DISPLAY_BCR 2
+#define GCC_EVA_BCR 3
+#define GCC_GPU_BCR 4
+#define GCC_PCIE_0_LINK_DOWN_BCR 5
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 6
+#define GCC_PCIE_0_PHY_BCR 7
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 8
+#define GCC_PCIE_0_TUNNEL_BCR 9
+#define GCC_PCIE_1_LINK_DOWN_BCR 10
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 11
+#define GCC_PCIE_1_PHY_BCR 12
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 13
+#define GCC_PCIE_1_TUNNEL_BCR 14
+#define GCC_PCIE_2_LINK_DOWN_BCR 15
+#define GCC_PCIE_2_NOCSR_COM_PHY_BCR 16
+#define GCC_PCIE_2_PHY_BCR 17
+#define GCC_PCIE_2_PHY_NOCSR_COM_PHY_BCR 18
+#define GCC_PCIE_2_TUNNEL_BCR 19
+#define GCC_PCIE_3A_BCR 20
+#define GCC_PCIE_3A_LINK_DOWN_BCR 21
+#define GCC_PCIE_3A_NOCSR_COM_PHY_BCR 22
+#define GCC_PCIE_3A_PHY_BCR 23
+#define GCC_PCIE_3A_PHY_NOCSR_COM_PHY_BCR 24
+#define GCC_PCIE_3B_BCR 25
+#define GCC_PCIE_3B_LINK_DOWN_BCR 26
+#define GCC_PCIE_3B_NOCSR_COM_PHY_BCR 27
+#define GCC_PCIE_3B_PHY_BCR 28
+#define GCC_PCIE_3B_PHY_NOCSR_COM_PHY_BCR 29
+#define GCC_PCIE_4_BCR 30
+#define GCC_PCIE_4_LINK_DOWN_BCR 31
+#define GCC_PCIE_4_NOCSR_COM_PHY_BCR 32
+#define GCC_PCIE_4_PHY_BCR 33
+#define GCC_PCIE_4_PHY_NOCSR_COM_PHY_BCR 34
+#define GCC_PCIE_5_BCR 35
+#define GCC_PCIE_5_LINK_DOWN_BCR 36
+#define GCC_PCIE_5_NOCSR_COM_PHY_BCR 37
+#define GCC_PCIE_5_PHY_BCR 38
+#define GCC_PCIE_5_PHY_NOCSR_COM_PHY_BCR 39
+#define GCC_PCIE_6_BCR 40
+#define GCC_PCIE_6_LINK_DOWN_BCR 41
+#define GCC_PCIE_6_NOCSR_COM_PHY_BCR 42
+#define GCC_PCIE_6_PHY_BCR 43
+#define GCC_PCIE_6_PHY_NOCSR_COM_PHY_BCR 44
+#define GCC_PCIE_NOC_BCR 45
+#define GCC_PCIE_PHY_BCR 46
+#define GCC_PCIE_PHY_CFG_AHB_BCR 47
+#define GCC_PCIE_PHY_COM_BCR 48
+#define GCC_PCIE_RSCC_BCR 49
+#define GCC_PDM_BCR 50
+#define GCC_QUPV3_WRAPPER_0_BCR 51
+#define GCC_QUPV3_WRAPPER_1_BCR 52
+#define GCC_QUPV3_WRAPPER_2_BCR 53
+#define GCC_QUPV3_WRAPPER_OOB_BCR 54
+#define GCC_QUSB2PHY_HS0_MP_BCR 55
+#define GCC_QUSB2PHY_HS1_MP_BCR 56
+#define GCC_QUSB2PHY_PRIM_BCR 57
+#define GCC_QUSB2PHY_SEC_BCR 58
+#define GCC_QUSB2PHY_TERT_BCR 59
+#define GCC_QUSB2PHY_USB20_HS_BCR 60
+#define GCC_SDCC2_BCR 61
+#define GCC_SDCC4_BCR 62
+#define GCC_TCSR_PCIE_BCR 63
+#define GCC_UFS_PHY_BCR 64
+#define GCC_USB20_PRIM_BCR 65
+#define GCC_USB30_MP_BCR 66
+#define GCC_USB30_PRIM_BCR 67
+#define GCC_USB30_SEC_BCR 68
+#define GCC_USB30_TERT_BCR 69
+#define GCC_USB3_MP_SS0_PHY_BCR 70
+#define GCC_USB3_MP_SS1_PHY_BCR 71
+#define GCC_USB3_PHY_PRIM_BCR 72
+#define GCC_USB3_PHY_SEC_BCR 73
+#define GCC_USB3_PHY_TERT_BCR 74
+#define GCC_USB3_UNIPHY_MP0_BCR 75
+#define GCC_USB3_UNIPHY_MP1_BCR 76
+#define GCC_USB3PHY_PHY_PRIM_BCR 77
+#define GCC_USB3PHY_PHY_SEC_BCR 78
+#define GCC_USB3PHY_PHY_TERT_BCR 79
+#define GCC_USB3UNIPHY_PHY_MP0_BCR 80
+#define GCC_USB3UNIPHY_PHY_MP1_BCR 81
+#define GCC_USB4_0_BCR 82
+#define GCC_USB4_0_DP0_PHY_PRIM_BCR 83
+#define GCC_USB4_1_BCR 84
+#define GCC_USB4_2_BCR 85
+#define GCC_USB_0_PHY_BCR 86
+#define GCC_USB_1_PHY_BCR 87
+#define GCC_USB_2_PHY_BCR 88
+#define GCC_VIDEO_AXI0_CLK_ARES 89
+#define GCC_VIDEO_AXI1_CLK_ARES 90
+#define GCC_VIDEO_BCR 91
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,glymur-tcsr.h b/include/dt-bindings/clock/qcom,glymur-tcsr.h
new file mode 100644
index 000000000000..72614226b113
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,glymur-tcsr.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_TCSR_CC_GLYMUR_H
+#define _DT_BINDINGS_CLK_QCOM_TCSR_CC_GLYMUR_H
+
+/* TCSR_CC clocks */
+#define TCSR_EDP_CLKREF_EN 0
+#define TCSR_PCIE_1_CLKREF_EN 1
+#define TCSR_PCIE_2_CLKREF_EN 2
+#define TCSR_PCIE_3_CLKREF_EN 3
+#define TCSR_PCIE_4_CLKREF_EN 4
+#define TCSR_USB2_1_CLKREF_EN 5
+#define TCSR_USB2_2_CLKREF_EN 6
+#define TCSR_USB2_3_CLKREF_EN 7
+#define TCSR_USB2_4_CLKREF_EN 8
+#define TCSR_USB3_0_CLKREF_EN 9
+#define TCSR_USB3_1_CLKREF_EN 10
+#define TCSR_USB4_1_CLKREF_EN 11
+#define TCSR_USB4_2_CLKREF_EN 12
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-sc7280.h b/include/dt-bindings/clock/qcom,gpucc-sc7280.h
new file mode 100644
index 000000000000..669b23b606ba
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gpucc-sc7280.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SC7280_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SC7280_H
+
+/* GPU_CC clocks */
+#define GPU_CC_PLL0 0
+#define GPU_CC_PLL1 1
+#define GPU_CC_AHB_CLK 2
+#define GPU_CC_CB_CLK 3
+#define GPU_CC_CRC_AHB_CLK 4
+#define GPU_CC_CX_GMU_CLK 5
+#define GPU_CC_CX_SNOC_DVM_CLK 6
+#define GPU_CC_CXO_AON_CLK 7
+#define GPU_CC_CXO_CLK 8
+#define GPU_CC_GMU_CLK_SRC 9
+#define GPU_CC_GX_GMU_CLK 10
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 11
+#define GPU_CC_HUB_AHB_DIV_CLK_SRC 12
+#define GPU_CC_HUB_AON_CLK 13
+#define GPU_CC_HUB_CLK_SRC 14
+#define GPU_CC_HUB_CX_INT_CLK 15
+#define GPU_CC_HUB_CX_INT_DIV_CLK_SRC 16
+#define GPU_CC_MND1X_0_GFX3D_CLK 17
+#define GPU_CC_MND1X_1_GFX3D_CLK 18
+#define GPU_CC_SLEEP_CLK 19
+
+/* GPU_CC power domains */
+#define GPU_CC_CX_GDSC 0
+#define GPU_CC_GX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-sc8280xp.h b/include/dt-bindings/clock/qcom,gpucc-sc8280xp.h
new file mode 100644
index 000000000000..bb7da46333b0
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gpucc-sc8280xp.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SC8280XP_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SC8280XP_H
+
+/* GPU_CC clocks */
+#define GPU_CC_PLL0 0
+#define GPU_CC_PLL1 1
+#define GPU_CC_AHB_CLK 2
+#define GPU_CC_CB_CLK 3
+#define GPU_CC_CRC_AHB_CLK 4
+#define GPU_CC_CX_GMU_CLK 5
+#define GPU_CC_CX_SNOC_DVM_CLK 6
+#define GPU_CC_CXO_AON_CLK 7
+#define GPU_CC_CXO_CLK 8
+#define GPU_CC_FREQ_MEASURE_CLK 9
+#define GPU_CC_GMU_CLK_SRC 10
+#define GPU_CC_GX_GMU_CLK 11
+#define GPU_CC_GX_VSENSE_CLK 12
+#define GPU_CC_HUB_AHB_DIV_CLK_SRC 13
+#define GPU_CC_HUB_AON_CLK 14
+#define GPU_CC_HUB_CLK_SRC 15
+#define GPU_CC_HUB_CX_INT_CLK 16
+#define GPU_CC_HUB_CX_INT_DIV_CLK_SRC 17
+#define GPU_CC_SLEEP_CLK 18
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 19
+
+/* GPU_CC power domains */
+#define GPU_CC_CX_GDSC 0
+#define GPU_CC_GX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-sm6350.h b/include/dt-bindings/clock/qcom,gpucc-sm6350.h
new file mode 100644
index 000000000000..68e814fc8acd
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gpucc-sm6350.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM6350_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM6350_H
+
+/* GPU_CC clocks */
+#define GPU_CC_PLL0 0
+#define GPU_CC_PLL1 1
+#define GPU_CC_ACD_AHB_CLK 2
+#define GPU_CC_ACD_CXO_CLK 3
+#define GPU_CC_AHB_CLK 4
+#define GPU_CC_CRC_AHB_CLK 5
+#define GPU_CC_CX_GFX3D_CLK 6
+#define GPU_CC_CX_GFX3D_SLV_CLK 7
+#define GPU_CC_CX_GMU_CLK 8
+#define GPU_CC_CX_SNOC_DVM_CLK 9
+#define GPU_CC_CXO_AON_CLK 10
+#define GPU_CC_CXO_CLK 11
+#define GPU_CC_GMU_CLK_SRC 12
+#define GPU_CC_GX_CXO_CLK 13
+#define GPU_CC_GX_GFX3D_CLK 14
+#define GPU_CC_GX_GFX3D_CLK_SRC 15
+#define GPU_CC_GX_GMU_CLK 16
+#define GPU_CC_GX_VSENSE_CLK 17
+
+/* CLK_HW */
+#define GPU_CC_CRC_DIV 0
+
+/* GDSCs */
+#define GPU_CX_GDSC 0
+#define GPU_GX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-sm8350.h b/include/dt-bindings/clock/qcom,gpucc-sm8350.h
new file mode 100644
index 000000000000..2ca857f5bfd2
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gpucc-sm8350.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8350_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8350_H
+
+/* GPU_CC clocks */
+#define GPU_CC_AHB_CLK 0
+#define GPU_CC_CB_CLK 1
+#define GPU_CC_CRC_AHB_CLK 2
+#define GPU_CC_CX_APB_CLK 3
+#define GPU_CC_CX_GMU_CLK 4
+#define GPU_CC_CX_QDSS_AT_CLK 5
+#define GPU_CC_CX_QDSS_TRIG_CLK 6
+#define GPU_CC_CX_QDSS_TSCTR_CLK 7
+#define GPU_CC_CX_SNOC_DVM_CLK 8
+#define GPU_CC_CXO_AON_CLK 9
+#define GPU_CC_CXO_CLK 10
+#define GPU_CC_FREQ_MEASURE_CLK 11
+#define GPU_CC_GMU_CLK_SRC 12
+#define GPU_CC_GX_GMU_CLK 13
+#define GPU_CC_GX_QDSS_TSCTR_CLK 14
+#define GPU_CC_GX_VSENSE_CLK 15
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 16
+#define GPU_CC_HUB_AHB_DIV_CLK_SRC 17
+#define GPU_CC_HUB_AON_CLK 18
+#define GPU_CC_HUB_CLK_SRC 19
+#define GPU_CC_HUB_CX_INT_CLK 20
+#define GPU_CC_HUB_CX_INT_DIV_CLK_SRC 21
+#define GPU_CC_MND1X_0_GFX3D_CLK 22
+#define GPU_CC_MND1X_1_GFX3D_CLK 23
+#define GPU_CC_PLL0 24
+#define GPU_CC_PLL1 25
+#define GPU_CC_SLEEP_CLK 26
+
+/* GPU_CC resets */
+#define GPUCC_GPU_CC_ACD_BCR 0
+#define GPUCC_GPU_CC_CB_BCR 1
+#define GPUCC_GPU_CC_CX_BCR 2
+#define GPUCC_GPU_CC_FAST_HUB_BCR 3
+#define GPUCC_GPU_CC_GFX3D_AON_BCR 4
+#define GPUCC_GPU_CC_GMU_BCR 5
+#define GPUCC_GPU_CC_GX_BCR 6
+#define GPUCC_GPU_CC_XO_BCR 7
+
+/* GPU_CC GDSCRs */
+#define GPU_CX_GDSC 0
+#define GPU_GX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,ipq-cmn-pll.h b/include/dt-bindings/clock/qcom,ipq-cmn-pll.h
new file mode 100644
index 000000000000..936e92b3b62c
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,ipq-cmn-pll.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_IPQ_CMN_PLL_H
+#define _DT_BINDINGS_CLK_QCOM_IPQ_CMN_PLL_H
+
+/* CMN PLL core clock. */
+#define CMN_PLL_CLK 0
+
+/* The output clocks from CMN PLL of IPQ9574. */
+#define XO_24MHZ_CLK 1
+#define SLEEP_32KHZ_CLK 2
+#define PCS_31P25MHZ_CLK 3
+#define NSS_1200MHZ_CLK 4
+#define PPE_353MHZ_CLK 5
+#define ETH0_50MHZ_CLK 6
+#define ETH1_50MHZ_CLK 7
+#define ETH2_50MHZ_CLK 8
+#define ETH_25MHZ_CLK 9
+#endif
diff --git a/include/dt-bindings/clock/qcom,ipq5018-cmn-pll.h b/include/dt-bindings/clock/qcom,ipq5018-cmn-pll.h
new file mode 100644
index 000000000000..586d1c9b33b3
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,ipq5018-cmn-pll.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_IPQ5018_CMN_PLL_H
+#define _DT_BINDINGS_CLK_QCOM_IPQ5018_CMN_PLL_H
+
+/* CMN PLL core clock. */
+#define IPQ5018_CMN_PLL_CLK 0
+
+/* The output clocks from CMN PLL of IPQ5018. */
+#define IPQ5018_XO_24MHZ_CLK 1
+#define IPQ5018_SLEEP_32KHZ_CLK 2
+#define IPQ5018_ETH_50MHZ_CLK 3
+#endif
diff --git a/include/dt-bindings/clock/qcom,ipq5332-gcc.h b/include/dt-bindings/clock/qcom,ipq5332-gcc.h
new file mode 100644
index 000000000000..da9b507c30bf
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,ipq5332-gcc.h
@@ -0,0 +1,336 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_IPQ5332_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_IPQ5332_H
+
+#define GPLL0_MAIN 0
+#define GPLL0 1
+#define GPLL2_MAIN 2
+#define GPLL2 3
+#define GPLL4_MAIN 4
+#define GPLL4 5
+#define GCC_ADSS_PWM_CLK 6
+#define GCC_ADSS_PWM_CLK_SRC 7
+#define GCC_AHB_CLK 8
+#define GCC_APSS_AXI_CLK_SRC 9
+#define GCC_BLSP1_AHB_CLK 10
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 11
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 12
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK_SRC 13
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 14
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 15
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK_SRC 16
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 17
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 18
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK_SRC 19
+#define GCC_BLSP1_SLEEP_CLK 20
+#define GCC_BLSP1_UART1_APPS_CLK 21
+#define GCC_BLSP1_UART1_APPS_CLK_SRC 22
+#define GCC_BLSP1_UART2_APPS_CLK 23
+#define GCC_BLSP1_UART2_APPS_CLK_SRC 24
+#define GCC_BLSP1_UART3_APPS_CLK 25
+#define GCC_BLSP1_UART3_APPS_CLK_SRC 26
+#define GCC_CE_AHB_CLK 27
+#define GCC_CE_AXI_CLK 28
+#define GCC_CE_PCNOC_AHB_CLK 29
+#define GCC_CMN_12GPLL_AHB_CLK 30
+#define GCC_CMN_12GPLL_APU_CLK 31
+#define GCC_CMN_12GPLL_SYS_CLK 32
+#define GCC_GP1_CLK 33
+#define GCC_GP1_CLK_SRC 34
+#define GCC_GP2_CLK 35
+#define GCC_GP2_CLK_SRC 36
+#define GCC_LPASS_CORE_AXIM_CLK 37
+#define GCC_LPASS_SWAY_CLK 38
+#define GCC_LPASS_SWAY_CLK_SRC 39
+#define GCC_MDIO_AHB_CLK 40
+#define GCC_MDIO_SLAVE_AHB_CLK 41
+#define GCC_MEM_NOC_Q6_AXI_CLK 42
+#define GCC_MEM_NOC_TS_CLK 43
+#define GCC_NSS_TS_CLK 44
+#define GCC_NSS_TS_CLK_SRC 45
+#define GCC_NSSCC_CLK 46
+#define GCC_NSSCFG_CLK 47
+#define GCC_NSSNOC_ATB_CLK 48
+#define GCC_NSSNOC_NSSCC_CLK 49
+#define GCC_NSSNOC_QOSGEN_REF_CLK 50
+#define GCC_NSSNOC_SNOC_1_CLK 51
+#define GCC_NSSNOC_SNOC_CLK 52
+#define GCC_NSSNOC_TIMEOUT_REF_CLK 53
+#define GCC_NSSNOC_XO_DCD_CLK 54
+#define GCC_PCIE3X1_0_AHB_CLK 55
+#define GCC_PCIE3X1_0_AUX_CLK 56
+#define GCC_PCIE3X1_0_AXI_CLK_SRC 57
+#define GCC_PCIE3X1_0_AXI_M_CLK 58
+#define GCC_PCIE3X1_0_AXI_S_BRIDGE_CLK 59
+#define GCC_PCIE3X1_0_AXI_S_CLK 60
+#define GCC_PCIE3X1_0_PIPE_CLK 61
+#define GCC_PCIE3X1_0_RCHG_CLK 62
+#define GCC_PCIE3X1_0_RCHG_CLK_SRC 63
+#define GCC_PCIE3X1_1_AHB_CLK 64
+#define GCC_PCIE3X1_1_AUX_CLK 65
+#define GCC_PCIE3X1_1_AXI_CLK_SRC 66
+#define GCC_PCIE3X1_1_AXI_M_CLK 67
+#define GCC_PCIE3X1_1_AXI_S_BRIDGE_CLK 68
+#define GCC_PCIE3X1_1_AXI_S_CLK 69
+#define GCC_PCIE3X1_1_PIPE_CLK 70
+#define GCC_PCIE3X1_1_RCHG_CLK 71
+#define GCC_PCIE3X1_1_RCHG_CLK_SRC 72
+#define GCC_PCIE3X1_PHY_AHB_CLK 73
+#define GCC_PCIE3X2_AHB_CLK 74
+#define GCC_PCIE3X2_AUX_CLK 75
+#define GCC_PCIE3X2_AXI_M_CLK 76
+#define GCC_PCIE3X2_AXI_M_CLK_SRC 77
+#define GCC_PCIE3X2_AXI_S_BRIDGE_CLK 78
+#define GCC_PCIE3X2_AXI_S_CLK 79
+#define GCC_PCIE3X2_AXI_S_CLK_SRC 80
+#define GCC_PCIE3X2_PHY_AHB_CLK 81
+#define GCC_PCIE3X2_PIPE_CLK 82
+#define GCC_PCIE3X2_RCHG_CLK 83
+#define GCC_PCIE3X2_RCHG_CLK_SRC 84
+#define GCC_PCIE_AUX_CLK_SRC 85
+#define GCC_PCNOC_AT_CLK 86
+#define GCC_PCNOC_BFDCD_CLK_SRC 87
+#define GCC_PCNOC_LPASS_CLK 88
+#define GCC_PRNG_AHB_CLK 89
+#define GCC_Q6_AXIM_CLK_SRC 93
+#define GCC_QDSS_AT_CLK 99
+#define GCC_QDSS_AT_CLK_SRC 100
+#define GCC_QDSS_CFG_AHB_CLK 101
+#define GCC_QDSS_DAP_AHB_CLK 102
+#define GCC_QDSS_DAP_CLK 103
+#define GCC_QDSS_DAP_DIV_CLK_SRC 104
+#define GCC_QDSS_ETR_USB_CLK 105
+#define GCC_QDSS_EUD_AT_CLK 106
+#define GCC_QDSS_TSCTR_CLK_SRC 107
+#define GCC_QPIC_AHB_CLK 108
+#define GCC_QPIC_CLK 109
+#define GCC_QPIC_IO_MACRO_CLK 110
+#define GCC_QPIC_IO_MACRO_CLK_SRC 111
+#define GCC_QPIC_SLEEP_CLK 112
+#define GCC_SDCC1_AHB_CLK 113
+#define GCC_SDCC1_APPS_CLK 114
+#define GCC_SDCC1_APPS_CLK_SRC 115
+#define GCC_SLEEP_CLK_SRC 116
+#define GCC_SNOC_LPASS_CFG_CLK 117
+#define GCC_SNOC_NSSNOC_1_CLK 118
+#define GCC_SNOC_NSSNOC_CLK 119
+#define GCC_SNOC_PCIE3_1LANE_1_M_CLK 120
+#define GCC_SNOC_PCIE3_1LANE_1_S_CLK 121
+#define GCC_SNOC_PCIE3_1LANE_M_CLK 122
+#define GCC_SNOC_PCIE3_1LANE_S_CLK 123
+#define GCC_SNOC_PCIE3_2LANE_M_CLK 124
+#define GCC_SNOC_PCIE3_2LANE_S_CLK 125
+#define GCC_SNOC_USB_CLK 126
+#define GCC_SYS_NOC_AT_CLK 127
+#define GCC_SYSTEM_NOC_BFDCD_CLK_SRC 129
+#define GCC_UNIPHY0_AHB_CLK 130
+#define GCC_UNIPHY0_SYS_CLK 131
+#define GCC_UNIPHY1_AHB_CLK 132
+#define GCC_UNIPHY1_SYS_CLK 133
+#define GCC_UNIPHY_SYS_CLK_SRC 134
+#define GCC_USB0_AUX_CLK 135
+#define GCC_USB0_AUX_CLK_SRC 136
+#define GCC_USB0_EUD_AT_CLK 137
+#define GCC_USB0_LFPS_CLK 138
+#define GCC_USB0_LFPS_CLK_SRC 139
+#define GCC_USB0_MASTER_CLK 140
+#define GCC_USB0_MASTER_CLK_SRC 141
+#define GCC_USB0_MOCK_UTMI_CLK 142
+#define GCC_USB0_MOCK_UTMI_CLK_SRC 143
+#define GCC_USB0_MOCK_UTMI_DIV_CLK_SRC 144
+#define GCC_USB0_PHY_CFG_AHB_CLK 145
+#define GCC_USB0_PIPE_CLK 146
+#define GCC_USB0_SLEEP_CLK 147
+#define GCC_WCSS_AHB_CLK_SRC 148
+#define GCC_XO_CLK 160
+#define GCC_XO_CLK_SRC 161
+#define GCC_XO_DIV4_CLK 162
+#define GCC_IM_SLEEP_CLK 163
+#define GCC_NSSNOC_PCNOC_1_CLK 164
+#define GCC_MEM_NOC_AHB_CLK 165
+#define GCC_MEM_NOC_APSS_AXI_CLK 166
+#define GCC_SNOC_QOSGEN_EXTREF_DIV_CLK_SRC 167
+#define GCC_MEM_NOC_QOSGEN_EXTREF_CLK 168
+#define GCC_PCIE3X2_PIPE_CLK_SRC 169
+#define GCC_PCIE3X1_0_PIPE_CLK_SRC 170
+#define GCC_PCIE3X1_1_PIPE_CLK_SRC 171
+#define GCC_USB0_PIPE_CLK_SRC 172
+
+#define GCC_ADSS_BCR 0
+#define GCC_ADSS_PWM_CLK_ARES 1
+#define GCC_AHB_CLK_ARES 2
+#define GCC_APC0_VOLTAGE_DROOP_DETECTOR_BCR 3
+#define GCC_APC0_VOLTAGE_DROOP_DETECTOR_GPLL0_CLK_ARES 4
+#define GCC_APSS_AHB_CLK_ARES 5
+#define GCC_APSS_AXI_CLK_ARES 6
+#define GCC_BLSP1_AHB_CLK_ARES 7
+#define GCC_BLSP1_BCR 8
+#define GCC_BLSP1_QUP1_BCR 9
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK_ARES 10
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK_ARES 11
+#define GCC_BLSP1_QUP2_BCR 12
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK_ARES 13
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK_ARES 14
+#define GCC_BLSP1_QUP3_BCR 15
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK_ARES 16
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK_ARES 17
+#define GCC_BLSP1_SLEEP_CLK_ARES 18
+#define GCC_BLSP1_UART1_APPS_CLK_ARES 19
+#define GCC_BLSP1_UART1_BCR 20
+#define GCC_BLSP1_UART2_APPS_CLK_ARES 21
+#define GCC_BLSP1_UART2_BCR 22
+#define GCC_BLSP1_UART3_APPS_CLK_ARES 23
+#define GCC_BLSP1_UART3_BCR 24
+#define GCC_CE_BCR 25
+#define GCC_CMN_BLK_BCR 26
+#define GCC_CMN_LDO0_BCR 27
+#define GCC_CMN_LDO1_BCR 28
+#define GCC_DCC_BCR 29
+#define GCC_GP1_CLK_ARES 30
+#define GCC_GP2_CLK_ARES 31
+#define GCC_LPASS_BCR 32
+#define GCC_LPASS_CORE_AXIM_CLK_ARES 33
+#define GCC_LPASS_SWAY_CLK_ARES 34
+#define GCC_MDIOM_BCR 35
+#define GCC_MDIOS_BCR 36
+#define GCC_NSS_BCR 37
+#define GCC_NSS_TS_CLK_ARES 38
+#define GCC_NSSCC_CLK_ARES 39
+#define GCC_NSSCFG_CLK_ARES 40
+#define GCC_NSSNOC_ATB_CLK_ARES 41
+#define GCC_NSSNOC_NSSCC_CLK_ARES 42
+#define GCC_NSSNOC_QOSGEN_REF_CLK_ARES 43
+#define GCC_NSSNOC_SNOC_1_CLK_ARES 44
+#define GCC_NSSNOC_SNOC_CLK_ARES 45
+#define GCC_NSSNOC_TIMEOUT_REF_CLK_ARES 46
+#define GCC_NSSNOC_XO_DCD_CLK_ARES 47
+#define GCC_PCIE3X1_0_AHB_CLK_ARES 48
+#define GCC_PCIE3X1_0_AUX_CLK_ARES 49
+#define GCC_PCIE3X1_0_AXI_M_CLK_ARES 50
+#define GCC_PCIE3X1_0_AXI_S_BRIDGE_CLK_ARES 51
+#define GCC_PCIE3X1_0_AXI_S_CLK_ARES 52
+#define GCC_PCIE3X1_0_BCR 53
+#define GCC_PCIE3X1_0_LINK_DOWN_BCR 54
+#define GCC_PCIE3X1_0_PHY_BCR 55
+#define GCC_PCIE3X1_0_PHY_PHY_BCR 56
+#define GCC_PCIE3X1_1_AHB_CLK_ARES 57
+#define GCC_PCIE3X1_1_AUX_CLK_ARES 58
+#define GCC_PCIE3X1_1_AXI_M_CLK_ARES 59
+#define GCC_PCIE3X1_1_AXI_S_BRIDGE_CLK_ARES 60
+#define GCC_PCIE3X1_1_AXI_S_CLK_ARES 61
+#define GCC_PCIE3X1_1_BCR 62
+#define GCC_PCIE3X1_1_LINK_DOWN_BCR 63
+#define GCC_PCIE3X1_1_PHY_BCR 64
+#define GCC_PCIE3X1_1_PHY_PHY_BCR 65
+#define GCC_PCIE3X1_PHY_AHB_CLK_ARES 66
+#define GCC_PCIE3X2_AHB_CLK_ARES 67
+#define GCC_PCIE3X2_AUX_CLK_ARES 68
+#define GCC_PCIE3X2_AXI_M_CLK_ARES 69
+#define GCC_PCIE3X2_AXI_S_BRIDGE_CLK_ARES 70
+#define GCC_PCIE3X2_AXI_S_CLK_ARES 71
+#define GCC_PCIE3X2_BCR 72
+#define GCC_PCIE3X2_LINK_DOWN_BCR 73
+#define GCC_PCIE3X2_PHY_AHB_CLK_ARES 74
+#define GCC_PCIE3X2_PHY_BCR 75
+#define GCC_PCIE3X2PHY_PHY_BCR 76
+#define GCC_PCNOC_BCR 77
+#define GCC_PCNOC_LPASS_CLK_ARES 78
+#define GCC_PRNG_AHB_CLK_ARES 79
+#define GCC_PRNG_BCR 80
+#define GCC_Q6_AHB_CLK_ARES 81
+#define GCC_Q6_AHB_S_CLK_ARES 82
+#define GCC_Q6_AXIM_CLK_ARES 83
+#define GCC_Q6_AXIS_CLK_ARES 84
+#define GCC_Q6_TSCTR_1TO2_CLK_ARES 85
+#define GCC_Q6SS_ATBM_CLK_ARES 86
+#define GCC_Q6SS_PCLKDBG_CLK_ARES 87
+#define GCC_Q6SS_TRIG_CLK_ARES 88
+#define GCC_QDSS_APB2JTAG_CLK_ARES 89
+#define GCC_QDSS_AT_CLK_ARES 90
+#define GCC_QDSS_BCR 91
+#define GCC_QDSS_CFG_AHB_CLK_ARES 92
+#define GCC_QDSS_DAP_AHB_CLK_ARES 93
+#define GCC_QDSS_DAP_CLK_ARES 94
+#define GCC_QDSS_ETR_USB_CLK_ARES 95
+#define GCC_QDSS_EUD_AT_CLK_ARES 96
+#define GCC_QDSS_STM_CLK_ARES 97
+#define GCC_QDSS_TRACECLKIN_CLK_ARES 98
+#define GCC_QDSS_TS_CLK_ARES 99
+#define GCC_QDSS_TSCTR_DIV16_CLK_ARES 100
+#define GCC_QDSS_TSCTR_DIV2_CLK_ARES 101
+#define GCC_QDSS_TSCTR_DIV3_CLK_ARES 102
+#define GCC_QDSS_TSCTR_DIV4_CLK_ARES 103
+#define GCC_QDSS_TSCTR_DIV8_CLK_ARES 104
+#define GCC_QPIC_AHB_CLK_ARES 105
+#define GCC_QPIC_CLK_ARES 106
+#define GCC_QPIC_BCR 107
+#define GCC_QPIC_IO_MACRO_CLK_ARES 108
+#define GCC_QPIC_SLEEP_CLK_ARES 109
+#define GCC_QUSB2_0_PHY_BCR 110
+#define GCC_SDCC1_AHB_CLK_ARES 111
+#define GCC_SDCC1_APPS_CLK_ARES 112
+#define GCC_SDCC_BCR 113
+#define GCC_SNOC_BCR 114
+#define GCC_SNOC_LPASS_CFG_CLK_ARES 115
+#define GCC_SNOC_NSSNOC_1_CLK_ARES 116
+#define GCC_SNOC_NSSNOC_CLK_ARES 117
+#define GCC_SYS_NOC_QDSS_STM_AXI_CLK_ARES 118
+#define GCC_SYS_NOC_WCSS_AHB_CLK_ARES 119
+#define GCC_UNIPHY0_AHB_CLK_ARES 120
+#define GCC_UNIPHY0_BCR 121
+#define GCC_UNIPHY0_SYS_CLK_ARES 122
+#define GCC_UNIPHY1_AHB_CLK_ARES 123
+#define GCC_UNIPHY1_BCR 124
+#define GCC_UNIPHY1_SYS_CLK_ARES 125
+#define GCC_USB0_AUX_CLK_ARES 126
+#define GCC_USB0_EUD_AT_CLK_ARES 127
+#define GCC_USB0_LFPS_CLK_ARES 128
+#define GCC_USB0_MASTER_CLK_ARES 129
+#define GCC_USB0_MOCK_UTMI_CLK_ARES 130
+#define GCC_USB0_PHY_BCR 131
+#define GCC_USB0_PHY_CFG_AHB_CLK_ARES 132
+#define GCC_USB0_SLEEP_CLK_ARES 133
+#define GCC_USB3PHY_0_PHY_BCR 134
+#define GCC_USB_BCR 135
+#define GCC_WCSS_AXIM_CLK_ARES 136
+#define GCC_WCSS_AXIS_CLK_ARES 137
+#define GCC_WCSS_BCR 138
+#define GCC_WCSS_DBG_IFC_APB_BDG_CLK_ARES 139
+#define GCC_WCSS_DBG_IFC_APB_CLK_ARES 140
+#define GCC_WCSS_DBG_IFC_ATB_BDG_CLK_ARES 141
+#define GCC_WCSS_DBG_IFC_ATB_CLK_ARES 142
+#define GCC_WCSS_DBG_IFC_NTS_BDG_CLK_ARES 143
+#define GCC_WCSS_DBG_IFC_NTS_CLK_ARES 144
+#define GCC_WCSS_ECAHB_CLK_ARES 145
+#define GCC_WCSS_MST_ASYNC_BDG_CLK_ARES 146
+#define GCC_WCSS_Q6_BCR 147
+#define GCC_WCSS_SLV_ASYNC_BDG_CLK_ARES 148
+#define GCC_XO_CLK_ARES 149
+#define GCC_XO_DIV4_CLK_ARES 150
+#define GCC_Q6SS_DBG_ARES 151
+#define GCC_WCSS_DBG_BDG_ARES 152
+#define GCC_WCSS_DBG_ARES 153
+#define GCC_WCSS_AXI_S_ARES 154
+#define GCC_WCSS_AXI_M_ARES 155
+#define GCC_WCSSAON_ARES 156
+#define GCC_PCIE3X2_PIPE_ARES 157
+#define GCC_PCIE3X2_CORE_STICKY_ARES 158
+#define GCC_PCIE3X2_AXI_S_STICKY_ARES 159
+#define GCC_PCIE3X2_AXI_M_STICKY_ARES 160
+#define GCC_PCIE3X1_0_PIPE_ARES 161
+#define GCC_PCIE3X1_0_CORE_STICKY_ARES 162
+#define GCC_PCIE3X1_0_AXI_S_STICKY_ARES 163
+#define GCC_PCIE3X1_0_AXI_M_STICKY_ARES 164
+#define GCC_PCIE3X1_1_PIPE_ARES 165
+#define GCC_PCIE3X1_1_CORE_STICKY_ARES 166
+#define GCC_PCIE3X1_1_AXI_S_STICKY_ARES 167
+#define GCC_PCIE3X1_1_AXI_M_STICKY_ARES 168
+#define GCC_IM_SLEEP_CLK_ARES 169
+#define GCC_NSSNOC_PCNOC_1_CLK_ARES 170
+#define GCC_UNIPHY0_XPCS_ARES 171
+#define GCC_UNIPHY1_XPCS_ARES 172
+#endif
diff --git a/include/dt-bindings/clock/qcom,ipq5424-cmn-pll.h b/include/dt-bindings/clock/qcom,ipq5424-cmn-pll.h
new file mode 100644
index 000000000000..f643c2668c04
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,ipq5424-cmn-pll.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_IPQ5424_CMN_PLL_H
+#define _DT_BINDINGS_CLK_QCOM_IPQ5424_CMN_PLL_H
+
+/* CMN PLL core clock. */
+#define IPQ5424_CMN_PLL_CLK 0
+
+/* The output clocks from CMN PLL of IPQ5424. */
+#define IPQ5424_XO_24MHZ_CLK 1
+#define IPQ5424_SLEEP_32KHZ_CLK 2
+#define IPQ5424_PCS_31P25MHZ_CLK 3
+#define IPQ5424_NSS_300MHZ_CLK 4
+#define IPQ5424_PPE_375MHZ_CLK 5
+#define IPQ5424_ETH0_50MHZ_CLK 6
+#define IPQ5424_ETH1_50MHZ_CLK 7
+#define IPQ5424_ETH2_50MHZ_CLK 8
+#define IPQ5424_ETH_25MHZ_CLK 9
+#endif
diff --git a/include/dt-bindings/clock/qcom,ipq5424-gcc.h b/include/dt-bindings/clock/qcom,ipq5424-gcc.h
new file mode 100644
index 000000000000..3ae33a0fa002
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,ipq5424-gcc.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2018,2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_IPQ_GCC_IPQ5424_H
+#define _DT_BINDINGS_CLOCK_IPQ_GCC_IPQ5424_H
+
+#define GPLL0 0
+#define GPLL4 1
+#define GPLL2 2
+#define GPLL2_OUT_MAIN 3
+#define GCC_SLEEP_CLK_SRC 4
+#define GCC_USB0_EUD_AT_CLK 6
+#define GCC_PCIE0_AXI_M_CLK_SRC 7
+#define GCC_PCIE0_AXI_M_CLK 8
+#define GCC_PCIE1_AXI_M_CLK_SRC 9
+#define GCC_PCIE1_AXI_M_CLK 10
+#define GCC_PCIE2_AXI_M_CLK_SRC 11
+#define GCC_PCIE2_AXI_M_CLK 12
+#define GCC_PCIE3_AXI_M_CLK_SRC 13
+#define GCC_PCIE3_AXI_M_CLK 14
+#define GCC_PCIE0_AXI_S_CLK_SRC 15
+#define GCC_PCIE0_AXI_S_BRIDGE_CLK 16
+#define GCC_PCIE0_AXI_S_CLK 17
+#define GCC_PCIE1_AXI_S_CLK_SRC 18
+#define GCC_PCIE1_AXI_S_BRIDGE_CLK 19
+#define GCC_PCIE1_AXI_S_CLK 20
+#define GCC_PCIE2_AXI_S_CLK_SRC 21
+#define GCC_PCIE2_AXI_S_BRIDGE_CLK 22
+#define GCC_PCIE2_AXI_S_CLK 23
+#define GCC_PCIE3_AXI_S_CLK_SRC 24
+#define GCC_PCIE3_AXI_S_BRIDGE_CLK 25
+#define GCC_PCIE3_AXI_S_CLK 26
+#define GCC_PCIE0_PIPE_CLK_SRC 27
+#define GCC_PCIE0_PIPE_CLK 28
+#define GCC_PCIE1_PIPE_CLK_SRC 29
+#define GCC_PCIE1_PIPE_CLK 30
+#define GCC_PCIE2_PIPE_CLK_SRC 31
+#define GCC_PCIE2_PIPE_CLK 32
+#define GCC_PCIE3_PIPE_CLK_SRC 33
+#define GCC_PCIE3_PIPE_CLK 34
+#define GCC_PCIE_AUX_CLK_SRC 35
+#define GCC_PCIE0_AUX_CLK 36
+#define GCC_PCIE1_AUX_CLK 37
+#define GCC_PCIE2_AUX_CLK 38
+#define GCC_PCIE3_AUX_CLK 39
+#define GCC_PCIE0_AHB_CLK 40
+#define GCC_PCIE1_AHB_CLK 41
+#define GCC_PCIE2_AHB_CLK 42
+#define GCC_PCIE3_AHB_CLK 43
+#define GCC_USB0_AUX_CLK_SRC 44
+#define GCC_USB0_AUX_CLK 45
+#define GCC_USB0_MASTER_CLK 46
+#define GCC_USB0_MOCK_UTMI_CLK_SRC 47
+#define GCC_USB0_MOCK_UTMI_DIV_CLK_SRC 48
+#define GCC_USB0_MOCK_UTMI_CLK 49
+#define GCC_USB0_PIPE_CLK_SRC 50
+#define GCC_USB0_PIPE_CLK 51
+#define GCC_USB0_PHY_CFG_AHB_CLK 52
+#define GCC_USB0_SLEEP_CLK 53
+#define GCC_SDCC1_APPS_CLK_SRC 54
+#define GCC_SDCC1_APPS_CLK 55
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 56
+#define GCC_SDCC1_ICE_CORE_CLK 57
+#define GCC_SDCC1_AHB_CLK 58
+#define GCC_PCNOC_BFDCD_CLK_SRC 59
+#define GCC_NSSCFG_CLK 60
+#define GCC_NSSNOC_NSSCC_CLK 61
+#define GCC_NSSCC_CLK 62
+#define GCC_NSSNOC_PCNOC_1_CLK 63
+#define GCC_QPIC_AHB_CLK 64
+#define GCC_QPIC_CLK 65
+#define GCC_MDIO_AHB_CLK 66
+#define GCC_PRNG_AHB_CLK 67
+#define GCC_UNIPHY0_AHB_CLK 68
+#define GCC_UNIPHY1_AHB_CLK 69
+#define GCC_UNIPHY2_AHB_CLK 70
+#define GCC_CMN_12GPLL_AHB_CLK 71
+#define GCC_SYSTEM_NOC_BFDCD_CLK_SRC 72
+#define GCC_NSSNOC_SNOC_CLK 73
+#define GCC_NSSNOC_SNOC_1_CLK 74
+#define GCC_WCSS_AHB_CLK_SRC 75
+#define GCC_QDSS_AT_CLK_SRC 76
+#define GCC_NSSNOC_ATB_CLK 77
+#define GCC_QDSS_AT_CLK 78
+#define GCC_QDSS_TSCTR_CLK_SRC 79
+#define GCC_NSS_TS_CLK 80
+#define GCC_QPIC_IO_MACRO_CLK_SRC 81
+#define GCC_QPIC_IO_MACRO_CLK 82
+#define GCC_LPASS_AXIM_CLK_SRC 83
+#define GCC_LPASS_CORE_AXIM_CLK 84
+#define GCC_LPASS_SWAY_CLK_SRC 85
+#define GCC_LPASS_SWAY_CLK 86
+#define GCC_CNOC_LPASS_CFG_CLK 87
+#define GCC_SNOC_LPASS_CLK 88
+#define GCC_ADSS_PWM_CLK_SRC 89
+#define GCC_ADSS_PWM_CLK 90
+#define GCC_XO_CLK_SRC 91
+#define GCC_NSSNOC_XO_DCD_CLK 92
+#define GCC_NSSNOC_QOSGEN_REF_CLK 93
+#define GCC_NSSNOC_TIMEOUT_REF_CLK 94
+#define GCC_UNIPHY0_SYS_CLK 95
+#define GCC_UNIPHY1_SYS_CLK 96
+#define GCC_UNIPHY2_SYS_CLK 97
+#define GCC_CMN_12GPLL_SYS_CLK 98
+#define GCC_UNIPHY_SYS_CLK_SRC 99
+#define GCC_NSS_TS_CLK_SRC 100
+#define GCC_ANOC_PCIE0_1LANE_M_CLK 101
+#define GCC_ANOC_PCIE1_1LANE_M_CLK 102
+#define GCC_ANOC_PCIE2_2LANE_M_CLK 103
+#define GCC_ANOC_PCIE3_2LANE_M_CLK 104
+#define GCC_CNOC_PCIE0_1LANE_S_CLK 105
+#define GCC_CNOC_PCIE1_1LANE_S_CLK 106
+#define GCC_CNOC_PCIE2_2LANE_S_CLK 107
+#define GCC_CNOC_PCIE3_2LANE_S_CLK 108
+#define GCC_CNOC_USB_CLK 109
+#define GCC_CNOC_WCSS_AHB_CLK 110
+#define GCC_QUPV3_AHB_MST_CLK 111
+#define GCC_QUPV3_AHB_SLV_CLK 112
+#define GCC_QUPV3_I2C0_CLK 113
+#define GCC_QUPV3_I2C1_CLK 114
+#define GCC_QUPV3_SPI0_CLK 115
+#define GCC_QUPV3_SPI1_CLK 116
+#define GCC_QUPV3_UART0_CLK 117
+#define GCC_QUPV3_UART1_CLK 118
+#define GCC_QPIC_CLK_SRC 119
+#define GCC_QUPV3_I2C0_CLK_SRC 120
+#define GCC_QUPV3_I2C1_CLK_SRC 121
+#define GCC_QUPV3_I2C0_DIV_CLK_SRC 122
+#define GCC_QUPV3_I2C1_DIV_CLK_SRC 123
+#define GCC_QUPV3_SPI0_CLK_SRC 124
+#define GCC_QUPV3_SPI1_CLK_SRC 125
+#define GCC_QUPV3_UART0_CLK_SRC 126
+#define GCC_QUPV3_UART1_CLK_SRC 127
+#define GCC_USB1_MASTER_CLK 128
+#define GCC_USB1_MOCK_UTMI_CLK_SRC 129
+#define GCC_USB1_MOCK_UTMI_DIV_CLK_SRC 130
+#define GCC_USB1_MOCK_UTMI_CLK 131
+#define GCC_USB1_SLEEP_CLK 132
+#define GCC_USB1_PHY_CFG_AHB_CLK 133
+#define GCC_USB0_MASTER_CLK_SRC 134
+#define GCC_QDSS_DAP_CLK 135
+#define GCC_PCIE0_RCHNG_CLK_SRC 136
+#define GCC_PCIE0_RCHNG_CLK 137
+#define GCC_PCIE1_RCHNG_CLK_SRC 138
+#define GCC_PCIE1_RCHNG_CLK 139
+#define GCC_PCIE2_RCHNG_CLK_SRC 140
+#define GCC_PCIE2_RCHNG_CLK 141
+#define GCC_PCIE3_RCHNG_CLK_SRC 142
+#define GCC_PCIE3_RCHNG_CLK 143
+#define GCC_IM_SLEEP_CLK 144
+#define GCC_XO_CLK 145
+#define GPLL0_OUT_AUX 146
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,ipq5424-nsscc.h b/include/dt-bindings/clock/qcom,ipq5424-nsscc.h
new file mode 100644
index 000000000000..eeae0dc38042
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,ipq5424-nsscc.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_QCOM_IPQ5424_NSSCC_H
+#define _DT_BINDINGS_CLOCK_QCOM_IPQ5424_NSSCC_H
+
+/* NSS_CC clocks */
+#define NSS_CC_CE_APB_CLK 0
+#define NSS_CC_CE_AXI_CLK 1
+#define NSS_CC_CE_CLK_SRC 2
+#define NSS_CC_CFG_CLK_SRC 3
+#define NSS_CC_DEBUG_CLK 4
+#define NSS_CC_EIP_BFDCD_CLK_SRC 5
+#define NSS_CC_EIP_CLK 6
+#define NSS_CC_NSS_CSR_CLK 7
+#define NSS_CC_NSSNOC_CE_APB_CLK 8
+#define NSS_CC_NSSNOC_CE_AXI_CLK 9
+#define NSS_CC_NSSNOC_EIP_CLK 10
+#define NSS_CC_NSSNOC_NSS_CSR_CLK 11
+#define NSS_CC_NSSNOC_PPE_CFG_CLK 12
+#define NSS_CC_NSSNOC_PPE_CLK 13
+#define NSS_CC_PORT1_MAC_CLK 14
+#define NSS_CC_PORT1_RX_CLK 15
+#define NSS_CC_PORT1_RX_CLK_SRC 16
+#define NSS_CC_PORT1_RX_DIV_CLK_SRC 17
+#define NSS_CC_PORT1_TX_CLK 18
+#define NSS_CC_PORT1_TX_CLK_SRC 19
+#define NSS_CC_PORT1_TX_DIV_CLK_SRC 20
+#define NSS_CC_PORT2_MAC_CLK 21
+#define NSS_CC_PORT2_RX_CLK 22
+#define NSS_CC_PORT2_RX_CLK_SRC 23
+#define NSS_CC_PORT2_RX_DIV_CLK_SRC 24
+#define NSS_CC_PORT2_TX_CLK 25
+#define NSS_CC_PORT2_TX_CLK_SRC 26
+#define NSS_CC_PORT2_TX_DIV_CLK_SRC 27
+#define NSS_CC_PORT3_MAC_CLK 28
+#define NSS_CC_PORT3_RX_CLK 29
+#define NSS_CC_PORT3_RX_CLK_SRC 30
+#define NSS_CC_PORT3_RX_DIV_CLK_SRC 31
+#define NSS_CC_PORT3_TX_CLK 32
+#define NSS_CC_PORT3_TX_CLK_SRC 33
+#define NSS_CC_PORT3_TX_DIV_CLK_SRC 34
+#define NSS_CC_PPE_CLK_SRC 35
+#define NSS_CC_PPE_EDMA_CFG_CLK 36
+#define NSS_CC_PPE_EDMA_CLK 37
+#define NSS_CC_PPE_SWITCH_BTQ_CLK 38
+#define NSS_CC_PPE_SWITCH_CFG_CLK 39
+#define NSS_CC_PPE_SWITCH_CLK 40
+#define NSS_CC_PPE_SWITCH_IPE_CLK 41
+#define NSS_CC_UNIPHY_PORT1_RX_CLK 42
+#define NSS_CC_UNIPHY_PORT1_TX_CLK 43
+#define NSS_CC_UNIPHY_PORT2_RX_CLK 44
+#define NSS_CC_UNIPHY_PORT2_TX_CLK 45
+#define NSS_CC_UNIPHY_PORT3_RX_CLK 46
+#define NSS_CC_UNIPHY_PORT3_TX_CLK 47
+#define NSS_CC_XGMAC0_PTP_REF_CLK 48
+#define NSS_CC_XGMAC0_PTP_REF_DIV_CLK_SRC 49
+#define NSS_CC_XGMAC1_PTP_REF_CLK 50
+#define NSS_CC_XGMAC1_PTP_REF_DIV_CLK_SRC 51
+#define NSS_CC_XGMAC2_PTP_REF_CLK 52
+#define NSS_CC_XGMAC2_PTP_REF_DIV_CLK_SRC 53
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,ipq9574-gcc.h b/include/dt-bindings/clock/qcom,ipq9574-gcc.h
new file mode 100644
index 000000000000..0e7c319897f3
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,ipq9574-gcc.h
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2018-2023 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_IPQ_GCC_9574_H
+#define _DT_BINDINGS_CLOCK_IPQ_GCC_9574_H
+
+#define GPLL0_MAIN 0
+#define GPLL0 1
+#define GPLL2_MAIN 2
+#define GPLL2 3
+#define GPLL4_MAIN 4
+#define GPLL4 5
+#define GCC_SLEEP_CLK_SRC 6
+#define APSS_AHB_CLK_SRC 7
+#define APSS_AXI_CLK_SRC 8
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 9
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 10
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 11
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 12
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 13
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 14
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC 15
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC 16
+#define BLSP1_QUP5_I2C_APPS_CLK_SRC 17
+#define BLSP1_QUP5_SPI_APPS_CLK_SRC 18
+#define BLSP1_QUP6_I2C_APPS_CLK_SRC 19
+#define BLSP1_QUP6_SPI_APPS_CLK_SRC 20
+#define BLSP1_UART1_APPS_CLK_SRC 21
+#define BLSP1_UART2_APPS_CLK_SRC 22
+#define BLSP1_UART3_APPS_CLK_SRC 23
+#define BLSP1_UART4_APPS_CLK_SRC 24
+#define BLSP1_UART5_APPS_CLK_SRC 25
+#define BLSP1_UART6_APPS_CLK_SRC 26
+#define GCC_APSS_AHB_CLK 27
+#define GCC_APSS_AXI_CLK 28
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 29
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 30
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 31
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 32
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 33
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 34
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 35
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 36
+#define GCC_BLSP1_QUP5_I2C_APPS_CLK 37
+#define GCC_BLSP1_QUP5_SPI_APPS_CLK 38
+#define GCC_BLSP1_QUP6_I2C_APPS_CLK 39
+#define GCC_BLSP1_QUP6_SPI_APPS_CLK 40
+#define GCC_BLSP1_UART1_APPS_CLK 41
+#define GCC_BLSP1_UART2_APPS_CLK 42
+#define GCC_BLSP1_UART3_APPS_CLK 43
+#define GCC_BLSP1_UART4_APPS_CLK 44
+#define GCC_BLSP1_UART5_APPS_CLK 45
+#define GCC_BLSP1_UART6_APPS_CLK 46
+#define PCIE0_AXI_M_CLK_SRC 47
+#define GCC_PCIE0_AXI_M_CLK 48
+#define PCIE1_AXI_M_CLK_SRC 49
+#define GCC_PCIE1_AXI_M_CLK 50
+#define PCIE2_AXI_M_CLK_SRC 51
+#define GCC_PCIE2_AXI_M_CLK 52
+#define PCIE3_AXI_M_CLK_SRC 53
+#define GCC_PCIE3_AXI_M_CLK 54
+#define PCIE0_AXI_S_CLK_SRC 55
+#define GCC_PCIE0_AXI_S_BRIDGE_CLK 56
+#define GCC_PCIE0_AXI_S_CLK 57
+#define PCIE1_AXI_S_CLK_SRC 58
+#define GCC_PCIE1_AXI_S_BRIDGE_CLK 59
+#define GCC_PCIE1_AXI_S_CLK 60
+#define PCIE2_AXI_S_CLK_SRC 61
+#define GCC_PCIE2_AXI_S_BRIDGE_CLK 62
+#define GCC_PCIE2_AXI_S_CLK 63
+#define PCIE3_AXI_S_CLK_SRC 64
+#define GCC_PCIE3_AXI_S_BRIDGE_CLK 65
+#define GCC_PCIE3_AXI_S_CLK 66
+#define PCIE0_PIPE_CLK_SRC 67
+#define PCIE1_PIPE_CLK_SRC 68
+#define PCIE2_PIPE_CLK_SRC 69
+#define PCIE3_PIPE_CLK_SRC 70
+#define PCIE_AUX_CLK_SRC 71
+#define GCC_PCIE0_AUX_CLK 72
+#define GCC_PCIE1_AUX_CLK 73
+#define GCC_PCIE2_AUX_CLK 74
+#define GCC_PCIE3_AUX_CLK 75
+#define PCIE0_RCHNG_CLK_SRC 76
+#define GCC_PCIE0_RCHNG_CLK 77
+#define PCIE1_RCHNG_CLK_SRC 78
+#define GCC_PCIE1_RCHNG_CLK 79
+#define PCIE2_RCHNG_CLK_SRC 80
+#define GCC_PCIE2_RCHNG_CLK 81
+#define PCIE3_RCHNG_CLK_SRC 82
+#define GCC_PCIE3_RCHNG_CLK 83
+#define GCC_PCIE0_AHB_CLK 84
+#define GCC_PCIE1_AHB_CLK 85
+#define GCC_PCIE2_AHB_CLK 86
+#define GCC_PCIE3_AHB_CLK 87
+#define USB0_AUX_CLK_SRC 88
+#define GCC_USB0_AUX_CLK 89
+#define USB0_MASTER_CLK_SRC 90
+#define GCC_USB0_MASTER_CLK 91
+#define GCC_SNOC_USB_CLK 92
+#define GCC_ANOC_USB_AXI_CLK 93
+#define USB0_MOCK_UTMI_CLK_SRC 94
+#define USB0_MOCK_UTMI_DIV_CLK_SRC 95
+#define GCC_USB0_MOCK_UTMI_CLK 96
+#define USB0_PIPE_CLK_SRC 97
+#define GCC_USB0_PHY_CFG_AHB_CLK 98
+#define SDCC1_APPS_CLK_SRC 99
+#define GCC_SDCC1_APPS_CLK 100
+#define SDCC1_ICE_CORE_CLK_SRC 101
+#define GCC_SDCC1_ICE_CORE_CLK 102
+#define GCC_SDCC1_AHB_CLK 103
+#define PCNOC_BFDCD_CLK_SRC 104
+#define GCC_NSSCFG_CLK 105
+#define GCC_NSSNOC_NSSCC_CLK 106
+#define GCC_NSSCC_CLK 107
+#define GCC_NSSNOC_PCNOC_1_CLK 108
+#define GCC_QDSS_DAP_AHB_CLK 109
+#define GCC_QDSS_CFG_AHB_CLK 110
+#define GCC_QPIC_AHB_CLK 111
+#define GCC_QPIC_CLK 112
+#define GCC_BLSP1_AHB_CLK 113
+#define GCC_MDIO_AHB_CLK 114
+#define GCC_PRNG_AHB_CLK 115
+#define GCC_UNIPHY0_AHB_CLK 116
+#define GCC_UNIPHY1_AHB_CLK 117
+#define GCC_UNIPHY2_AHB_CLK 118
+#define GCC_CMN_12GPLL_AHB_CLK 119
+#define GCC_CMN_12GPLL_APU_CLK 120
+#define SYSTEM_NOC_BFDCD_CLK_SRC 121
+#define GCC_NSSNOC_SNOC_CLK 122
+#define GCC_NSSNOC_SNOC_1_CLK 123
+#define GCC_QDSS_ETR_USB_CLK 124
+#define WCSS_AHB_CLK_SRC 125
+#define WCSS_AXI_M_CLK_SRC 131
+#define QDSS_AT_CLK_SRC 133
+#define GCC_NSSNOC_ATB_CLK 136
+#define GCC_QDSS_AT_CLK 137
+#define GCC_SYS_NOC_AT_CLK 138
+#define GCC_PCNOC_AT_CLK 139
+#define GCC_USB0_EUD_AT_CLK 140
+#define GCC_QDSS_EUD_AT_CLK 141
+#define QDSS_STM_CLK_SRC 142
+#define GCC_QDSS_STM_CLK 143
+#define GCC_SYS_NOC_QDSS_STM_AXI_CLK 144
+#define QDSS_TRACECLKIN_CLK_SRC 145
+#define GCC_QDSS_TRACECLKIN_CLK 146
+#define QDSS_TSCTR_CLK_SRC 147
+#define GCC_QDSS_TSCTR_DIV2_CLK 150
+#define GCC_QDSS_TS_CLK 151
+#define GCC_QDSS_TSCTR_DIV4_CLK 152
+#define GCC_NSS_TS_CLK 153
+#define GCC_QDSS_TSCTR_DIV8_CLK 154
+#define GCC_QDSS_TSCTR_DIV16_CLK 155
+#define GCC_QDSS_DAP_CLK 160
+#define GCC_QDSS_APB2JTAG_CLK 161
+#define GCC_QDSS_TSCTR_DIV3_CLK 162
+#define QPIC_IO_MACRO_CLK_SRC 163
+#define GCC_QPIC_IO_MACRO_CLK 164
+#define Q6_AXI_CLK_SRC 165
+#define Q6_AXIM2_CLK_SRC 169
+#define NSSNOC_MEMNOC_BFDCD_CLK_SRC 170
+#define GCC_NSSNOC_MEMNOC_CLK 171
+#define GCC_NSSNOC_MEM_NOC_1_CLK 172
+#define GCC_NSS_TBU_CLK 173
+#define GCC_MEM_NOC_NSSNOC_CLK 174
+#define LPASS_AXIM_CLK_SRC 175
+#define LPASS_SWAY_CLK_SRC 176
+#define ADSS_PWM_CLK_SRC 177
+#define GCC_ADSS_PWM_CLK 178
+#define GP1_CLK_SRC 179
+#define GP2_CLK_SRC 180
+#define GP3_CLK_SRC 181
+#define DDRSS_SMS_SLOW_CLK_SRC 182
+#define GCC_XO_CLK_SRC 183
+#define GCC_XO_CLK 184
+#define GCC_NSSNOC_QOSGEN_REF_CLK 185
+#define GCC_NSSNOC_TIMEOUT_REF_CLK 186
+#define GCC_XO_DIV4_CLK 187
+#define GCC_UNIPHY0_SYS_CLK 188
+#define GCC_UNIPHY1_SYS_CLK 189
+#define GCC_UNIPHY2_SYS_CLK 190
+#define GCC_CMN_12GPLL_SYS_CLK 191
+#define GCC_NSSNOC_XO_DCD_CLK 192
+#define UNIPHY_SYS_CLK_SRC 194
+#define NSS_TS_CLK_SRC 195
+#define GCC_ANOC_PCIE0_1LANE_M_CLK 196
+#define GCC_ANOC_PCIE1_1LANE_M_CLK 197
+#define GCC_ANOC_PCIE2_2LANE_M_CLK 198
+#define GCC_ANOC_PCIE3_2LANE_M_CLK 199
+#define GCC_SNOC_PCIE0_1LANE_S_CLK 200
+#define GCC_SNOC_PCIE1_1LANE_S_CLK 201
+#define GCC_SNOC_PCIE2_2LANE_S_CLK 202
+#define GCC_SNOC_PCIE3_2LANE_S_CLK 203
+#define GCC_CRYPTO_CLK_SRC 204
+#define GCC_CRYPTO_CLK 205
+#define GCC_CRYPTO_AXI_CLK 206
+#define GCC_CRYPTO_AHB_CLK 207
+#define GCC_USB0_PIPE_CLK 208
+#define GCC_USB0_SLEEP_CLK 209
+#define GCC_PCIE0_PIPE_CLK 210
+#define GCC_PCIE1_PIPE_CLK 211
+#define GCC_PCIE2_PIPE_CLK 212
+#define GCC_PCIE3_PIPE_CLK 213
+#define GPLL0_OUT_AUX 214
+#endif
diff --git a/include/dt-bindings/clock/qcom,ipq9574-nsscc.h b/include/dt-bindings/clock/qcom,ipq9574-nsscc.h
new file mode 100644
index 000000000000..21a16dc0e64c
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,ipq9574-nsscc.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, 2025 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_IPQ_NSSCC_9574_H
+#define _DT_BINDINGS_CLOCK_IPQ_NSSCC_9574_H
+
+#define NSS_CC_CE_APB_CLK 0
+#define NSS_CC_CE_AXI_CLK 1
+#define NSS_CC_CE_CLK_SRC 2
+#define NSS_CC_CFG_CLK_SRC 3
+#define NSS_CC_CLC_AXI_CLK 4
+#define NSS_CC_CLC_CLK_SRC 5
+#define NSS_CC_CRYPTO_CLK 6
+#define NSS_CC_CRYPTO_CLK_SRC 7
+#define NSS_CC_CRYPTO_PPE_CLK 8
+#define NSS_CC_HAQ_AHB_CLK 9
+#define NSS_CC_HAQ_AXI_CLK 10
+#define NSS_CC_HAQ_CLK_SRC 11
+#define NSS_CC_IMEM_AHB_CLK 12
+#define NSS_CC_IMEM_CLK_SRC 13
+#define NSS_CC_IMEM_QSB_CLK 14
+#define NSS_CC_INT_CFG_CLK_SRC 15
+#define NSS_CC_NSS_CSR_CLK 16
+#define NSS_CC_NSSNOC_CE_APB_CLK 17
+#define NSS_CC_NSSNOC_CE_AXI_CLK 18
+#define NSS_CC_NSSNOC_CLC_AXI_CLK 19
+#define NSS_CC_NSSNOC_CRYPTO_CLK 20
+#define NSS_CC_NSSNOC_HAQ_AHB_CLK 21
+#define NSS_CC_NSSNOC_HAQ_AXI_CLK 22
+#define NSS_CC_NSSNOC_IMEM_AHB_CLK 23
+#define NSS_CC_NSSNOC_IMEM_QSB_CLK 24
+#define NSS_CC_NSSNOC_NSS_CSR_CLK 25
+#define NSS_CC_NSSNOC_PPE_CFG_CLK 26
+#define NSS_CC_NSSNOC_PPE_CLK 27
+#define NSS_CC_NSSNOC_UBI32_AHB0_CLK 28
+#define NSS_CC_NSSNOC_UBI32_AXI0_CLK 29
+#define NSS_CC_NSSNOC_UBI32_INT0_AHB_CLK 30
+#define NSS_CC_NSSNOC_UBI32_NC_AXI0_1_CLK 31
+#define NSS_CC_NSSNOC_UBI32_NC_AXI0_CLK 32
+#define NSS_CC_PORT1_MAC_CLK 33
+#define NSS_CC_PORT1_RX_CLK 34
+#define NSS_CC_PORT1_RX_CLK_SRC 35
+#define NSS_CC_PORT1_RX_DIV_CLK_SRC 36
+#define NSS_CC_PORT1_TX_CLK 37
+#define NSS_CC_PORT1_TX_CLK_SRC 38
+#define NSS_CC_PORT1_TX_DIV_CLK_SRC 39
+#define NSS_CC_PORT2_MAC_CLK 40
+#define NSS_CC_PORT2_RX_CLK 41
+#define NSS_CC_PORT2_RX_CLK_SRC 42
+#define NSS_CC_PORT2_RX_DIV_CLK_SRC 43
+#define NSS_CC_PORT2_TX_CLK 44
+#define NSS_CC_PORT2_TX_CLK_SRC 45
+#define NSS_CC_PORT2_TX_DIV_CLK_SRC 46
+#define NSS_CC_PORT3_MAC_CLK 47
+#define NSS_CC_PORT3_RX_CLK 48
+#define NSS_CC_PORT3_RX_CLK_SRC 49
+#define NSS_CC_PORT3_RX_DIV_CLK_SRC 50
+#define NSS_CC_PORT3_TX_CLK 51
+#define NSS_CC_PORT3_TX_CLK_SRC 52
+#define NSS_CC_PORT3_TX_DIV_CLK_SRC 53
+#define NSS_CC_PORT4_MAC_CLK 54
+#define NSS_CC_PORT4_RX_CLK 55
+#define NSS_CC_PORT4_RX_CLK_SRC 56
+#define NSS_CC_PORT4_RX_DIV_CLK_SRC 57
+#define NSS_CC_PORT4_TX_CLK 58
+#define NSS_CC_PORT4_TX_CLK_SRC 59
+#define NSS_CC_PORT4_TX_DIV_CLK_SRC 60
+#define NSS_CC_PORT5_MAC_CLK 61
+#define NSS_CC_PORT5_RX_CLK 62
+#define NSS_CC_PORT5_RX_CLK_SRC 63
+#define NSS_CC_PORT5_RX_DIV_CLK_SRC 64
+#define NSS_CC_PORT5_TX_CLK 65
+#define NSS_CC_PORT5_TX_CLK_SRC 66
+#define NSS_CC_PORT5_TX_DIV_CLK_SRC 67
+#define NSS_CC_PORT6_MAC_CLK 68
+#define NSS_CC_PORT6_RX_CLK 69
+#define NSS_CC_PORT6_RX_CLK_SRC 70
+#define NSS_CC_PORT6_RX_DIV_CLK_SRC 71
+#define NSS_CC_PORT6_TX_CLK 72
+#define NSS_CC_PORT6_TX_CLK_SRC 73
+#define NSS_CC_PORT6_TX_DIV_CLK_SRC 74
+#define NSS_CC_PPE_CLK_SRC 75
+#define NSS_CC_PPE_EDMA_CFG_CLK 76
+#define NSS_CC_PPE_EDMA_CLK 77
+#define NSS_CC_PPE_SWITCH_BTQ_CLK 78
+#define NSS_CC_PPE_SWITCH_CFG_CLK 79
+#define NSS_CC_PPE_SWITCH_CLK 80
+#define NSS_CC_PPE_SWITCH_IPE_CLK 81
+#define NSS_CC_UBI0_CLK_SRC 82
+#define NSS_CC_UBI0_DIV_CLK_SRC 83
+#define NSS_CC_UBI1_CLK_SRC 84
+#define NSS_CC_UBI1_DIV_CLK_SRC 85
+#define NSS_CC_UBI2_CLK_SRC 86
+#define NSS_CC_UBI2_DIV_CLK_SRC 87
+#define NSS_CC_UBI32_AHB0_CLK 88
+#define NSS_CC_UBI32_AHB1_CLK 89
+#define NSS_CC_UBI32_AHB2_CLK 90
+#define NSS_CC_UBI32_AHB3_CLK 91
+#define NSS_CC_UBI32_AXI0_CLK 92
+#define NSS_CC_UBI32_AXI1_CLK 93
+#define NSS_CC_UBI32_AXI2_CLK 94
+#define NSS_CC_UBI32_AXI3_CLK 95
+#define NSS_CC_UBI32_CORE0_CLK 96
+#define NSS_CC_UBI32_CORE1_CLK 97
+#define NSS_CC_UBI32_CORE2_CLK 98
+#define NSS_CC_UBI32_CORE3_CLK 99
+#define NSS_CC_UBI32_INTR0_AHB_CLK 100
+#define NSS_CC_UBI32_INTR1_AHB_CLK 101
+#define NSS_CC_UBI32_INTR2_AHB_CLK 102
+#define NSS_CC_UBI32_INTR3_AHB_CLK 103
+#define NSS_CC_UBI32_NC_AXI0_CLK 104
+#define NSS_CC_UBI32_NC_AXI1_CLK 105
+#define NSS_CC_UBI32_NC_AXI2_CLK 106
+#define NSS_CC_UBI32_NC_AXI3_CLK 107
+#define NSS_CC_UBI32_UTCM0_CLK 108
+#define NSS_CC_UBI32_UTCM1_CLK 109
+#define NSS_CC_UBI32_UTCM2_CLK 110
+#define NSS_CC_UBI32_UTCM3_CLK 111
+#define NSS_CC_UBI3_CLK_SRC 112
+#define NSS_CC_UBI3_DIV_CLK_SRC 113
+#define NSS_CC_UBI_AXI_CLK_SRC 114
+#define NSS_CC_UBI_NC_AXI_BFDCD_CLK_SRC 115
+#define NSS_CC_UNIPHY_PORT1_RX_CLK 116
+#define NSS_CC_UNIPHY_PORT1_TX_CLK 117
+#define NSS_CC_UNIPHY_PORT2_RX_CLK 118
+#define NSS_CC_UNIPHY_PORT2_TX_CLK 119
+#define NSS_CC_UNIPHY_PORT3_RX_CLK 120
+#define NSS_CC_UNIPHY_PORT3_TX_CLK 121
+#define NSS_CC_UNIPHY_PORT4_RX_CLK 122
+#define NSS_CC_UNIPHY_PORT4_TX_CLK 123
+#define NSS_CC_UNIPHY_PORT5_RX_CLK 124
+#define NSS_CC_UNIPHY_PORT5_TX_CLK 125
+#define NSS_CC_UNIPHY_PORT6_RX_CLK 126
+#define NSS_CC_UNIPHY_PORT6_TX_CLK 127
+#define NSS_CC_XGMAC0_PTP_REF_CLK 128
+#define NSS_CC_XGMAC0_PTP_REF_DIV_CLK_SRC 129
+#define NSS_CC_XGMAC1_PTP_REF_CLK 130
+#define NSS_CC_XGMAC1_PTP_REF_DIV_CLK_SRC 131
+#define NSS_CC_XGMAC2_PTP_REF_CLK 132
+#define NSS_CC_XGMAC2_PTP_REF_DIV_CLK_SRC 133
+#define NSS_CC_XGMAC3_PTP_REF_CLK 134
+#define NSS_CC_XGMAC3_PTP_REF_DIV_CLK_SRC 135
+#define NSS_CC_XGMAC4_PTP_REF_CLK 136
+#define NSS_CC_XGMAC4_PTP_REF_DIV_CLK_SRC 137
+#define NSS_CC_XGMAC5_PTP_REF_CLK 138
+#define NSS_CC_XGMAC5_PTP_REF_DIV_CLK_SRC 139
+#define UBI32_PLL 140
+#define UBI32_PLL_MAIN 141
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,kaanapali-gcc.h b/include/dt-bindings/clock/qcom,kaanapali-gcc.h
new file mode 100644
index 000000000000..890e48709f09
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,kaanapali-gcc.h
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_KAANAPALI_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_KAANAPALI_H
+
+/* GCC clocks */
+#define GCC_AGGRE_NOC_PCIE_AXI_CLK 0
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 1
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 2
+#define GCC_BOOT_ROM_AHB_CLK 3
+#define GCC_CAM_BIST_MCLK_AHB_CLK 4
+#define GCC_CAMERA_AHB_CLK 5
+#define GCC_CAMERA_HF_AXI_CLK 6
+#define GCC_CAMERA_SF_AXI_CLK 7
+#define GCC_CAMERA_XO_CLK 8
+#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 9
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 10
+#define GCC_CNOC_PCIE_SF_AXI_CLK 11
+#define GCC_DDRSS_PCIE_SF_QTB_CLK 12
+#define GCC_QMIP_CAMERA_CMD_AHB_CLK 13
+#define GCC_DISP_HF_AXI_CLK 14
+#define GCC_DISP_SF_AXI_CLK 15
+#define GCC_EVA_AHB_CLK 16
+#define GCC_EVA_AXI0_CLK 17
+#define GCC_EVA_AXI0C_CLK 18
+#define GCC_EVA_XO_CLK 19
+#define GCC_GP1_CLK 20
+#define GCC_GP1_CLK_SRC 21
+#define GCC_GP2_CLK 22
+#define GCC_GP2_CLK_SRC 23
+#define GCC_GP3_CLK 24
+#define GCC_GP3_CLK_SRC 25
+#define GCC_GPLL0 26
+#define GCC_GPLL0_OUT_EVEN 27
+#define GCC_GPLL1 28
+#define GCC_GPLL4 29
+#define GCC_GPLL7 30
+#define GCC_GPLL9 31
+#define GCC_GPU_CFG_AHB_CLK 32
+#define GCC_GPU_GEMNOC_GFX_CLK 33
+#define GCC_GPU_GPLL0_CLK_SRC 34
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 35
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 36
+#define GCC_QMIP_GPU_AHB_CLK 37
+#define GCC_PCIE_0_AUX_CLK 38
+#define GCC_PCIE_0_AUX_CLK_SRC 39
+#define GCC_PCIE_0_CFG_AHB_CLK 40
+#define GCC_PCIE_0_MSTR_AXI_CLK 41
+#define GCC_PCIE_0_PHY_AUX_CLK 42
+#define GCC_PCIE_0_PHY_AUX_CLK_SRC 43
+#define GCC_PCIE_0_PHY_RCHNG_CLK 44
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 45
+#define GCC_PCIE_0_PIPE_CLK 46
+#define GCC_PCIE_0_PIPE_CLK_SRC 47
+#define GCC_PCIE_0_SLV_AXI_CLK 48
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 49
+#define GCC_PCIE_RSCC_CFG_AHB_CLK 50
+#define GCC_PCIE_RSCC_XO_CLK 51
+#define GCC_PDM2_CLK 52
+#define GCC_PDM2_CLK_SRC 53
+#define GCC_PDM_AHB_CLK 54
+#define GCC_PDM_XO4_CLK 55
+#define GCC_QUPV3_I2C_CORE_CLK 56
+#define GCC_QUPV3_I2C_S0_CLK 57
+#define GCC_QUPV3_I2C_S0_CLK_SRC 58
+#define GCC_QUPV3_I2C_S1_CLK 59
+#define GCC_QUPV3_I2C_S1_CLK_SRC 60
+#define GCC_QUPV3_I2C_S2_CLK 61
+#define GCC_QUPV3_I2C_S2_CLK_SRC 62
+#define GCC_QUPV3_I2C_S3_CLK 63
+#define GCC_QUPV3_I2C_S3_CLK_SRC 64
+#define GCC_QUPV3_I2C_S4_CLK 65
+#define GCC_QUPV3_I2C_S4_CLK_SRC 66
+#define GCC_QUPV3_I2C_S_AHB_CLK 67
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 68
+#define GCC_QUPV3_WRAP1_CORE_CLK 69
+#define GCC_QUPV3_WRAP1_QSPI_REF_CLK 70
+#define GCC_QUPV3_WRAP1_QSPI_REF_CLK_SRC 71
+#define GCC_QUPV3_WRAP1_S0_CLK 72
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 73
+#define GCC_QUPV3_WRAP1_S1_CLK 74
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 75
+#define GCC_QUPV3_WRAP1_S2_CLK 76
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 77
+#define GCC_QUPV3_WRAP1_S3_CLK 78
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 79
+#define GCC_QUPV3_WRAP1_S4_CLK 80
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 81
+#define GCC_QUPV3_WRAP1_S5_CLK 82
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 83
+#define GCC_QUPV3_WRAP1_S6_CLK 84
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 85
+#define GCC_QUPV3_WRAP1_S7_CLK 86
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 87
+#define GCC_QUPV3_WRAP2_CORE_2X_CLK 88
+#define GCC_QUPV3_WRAP2_CORE_CLK 89
+#define GCC_QUPV3_WRAP2_S0_CLK 90
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 91
+#define GCC_QUPV3_WRAP2_S1_CLK 92
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 93
+#define GCC_QUPV3_WRAP2_S2_CLK 94
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 95
+#define GCC_QUPV3_WRAP2_S3_CLK 96
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 97
+#define GCC_QUPV3_WRAP2_S4_CLK 98
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 99
+#define GCC_QUPV3_WRAP3_CORE_2X_CLK 100
+#define GCC_QUPV3_WRAP3_CORE_CLK 101
+#define GCC_QUPV3_WRAP3_IBI_CTRL_0_CLK_SRC 102
+#define GCC_QUPV3_WRAP3_IBI_CTRL_1_CLK 103
+#define GCC_QUPV3_WRAP3_IBI_CTRL_2_CLK 104
+#define GCC_QUPV3_WRAP3_S0_CLK 105
+#define GCC_QUPV3_WRAP3_S0_CLK_SRC 106
+#define GCC_QUPV3_WRAP3_S1_CLK 107
+#define GCC_QUPV3_WRAP3_S1_CLK_SRC 108
+#define GCC_QUPV3_WRAP3_S2_CLK 109
+#define GCC_QUPV3_WRAP3_S2_CLK_SRC 110
+#define GCC_QUPV3_WRAP3_S3_CLK 111
+#define GCC_QUPV3_WRAP3_S3_CLK_SRC 112
+#define GCC_QUPV3_WRAP3_S4_CLK 113
+#define GCC_QUPV3_WRAP3_S4_CLK_SRC 114
+#define GCC_QUPV3_WRAP3_S5_CLK 115
+#define GCC_QUPV3_WRAP3_S5_CLK_SRC 116
+#define GCC_QUPV3_WRAP4_CORE_2X_CLK 117
+#define GCC_QUPV3_WRAP4_CORE_CLK 118
+#define GCC_QUPV3_WRAP4_S0_CLK 119
+#define GCC_QUPV3_WRAP4_S0_CLK_SRC 120
+#define GCC_QUPV3_WRAP4_S1_CLK 121
+#define GCC_QUPV3_WRAP4_S1_CLK_SRC 122
+#define GCC_QUPV3_WRAP4_S2_CLK 123
+#define GCC_QUPV3_WRAP4_S2_CLK_SRC 124
+#define GCC_QUPV3_WRAP4_S3_CLK 125
+#define GCC_QUPV3_WRAP4_S3_CLK_SRC 126
+#define GCC_QUPV3_WRAP4_S4_CLK 127
+#define GCC_QUPV3_WRAP4_S4_CLK_SRC 128
+#define GCC_QUPV3_WRAP_1_M_AXI_CLK 129
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 130
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 131
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 132
+#define GCC_QUPV3_WRAP_3_IBI_1_AHB_CLK 133
+#define GCC_QUPV3_WRAP_3_IBI_2_AHB_CLK 134
+#define GCC_QUPV3_WRAP_3_M_AHB_CLK 135
+#define GCC_QUPV3_WRAP_3_S_AHB_CLK 136
+#define GCC_QUPV3_WRAP_4_M_AHB_CLK 137
+#define GCC_QUPV3_WRAP_4_S_AHB_CLK 138
+#define GCC_SDCC2_AHB_CLK 139
+#define GCC_SDCC2_APPS_CLK 140
+#define GCC_SDCC2_APPS_CLK_SRC 141
+#define GCC_SDCC4_AHB_CLK 142
+#define GCC_SDCC4_APPS_CLK 143
+#define GCC_SDCC4_APPS_CLK_SRC 144
+#define GCC_UFS_PHY_AHB_CLK 145
+#define GCC_UFS_PHY_AXI_CLK 146
+#define GCC_UFS_PHY_AXI_CLK_SRC 147
+#define GCC_UFS_PHY_ICE_CORE_CLK 148
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 149
+#define GCC_UFS_PHY_PHY_AUX_CLK 150
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 151
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 152
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 153
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 154
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 155
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 156
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 157
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 158
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 159
+#define GCC_USB30_PRIM_MASTER_CLK 160
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 161
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 162
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 163
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 164
+#define GCC_USB30_PRIM_SLEEP_CLK 165
+#define GCC_USB3_PRIM_PHY_AUX_CLK 166
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 167
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 168
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 169
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 170
+#define GCC_VIDEO_AHB_CLK 171
+#define GCC_VIDEO_AXI0_CLK 172
+#define GCC_VIDEO_AXI1_CLK 173
+#define GCC_VIDEO_XO_CLK 174
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 175
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 176
+#define GCC_QMIP_DISP_DCP_SF_AHB_CLK 177
+#define GCC_QMIP_PCIE_AHB_CLK 178
+#define GCC_QMIP_VIDEO_CV_CPU_AHB_CLK 179
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 180
+#define GCC_QMIP_VIDEO_V_CPU_AHB_CLK 181
+#define GCC_DISP_AHB_CLK 182
+
+/* GCC power domains */
+#define GCC_PCIE_0_GDSC 0
+#define GCC_PCIE_0_PHY_GDSC 1
+#define GCC_UFS_MEM_PHY_GDSC 2
+#define GCC_UFS_PHY_GDSC 3
+#define GCC_USB30_PRIM_GDSC 4
+#define GCC_USB3_PHY_GDSC 5
+
+/* GCC resets */
+#define GCC_CAMERA_BCR 0
+#define GCC_DISPLAY_BCR 1
+#define GCC_EVA_AXI0_CLK_ARES 2
+#define GCC_EVA_AXI0C_CLK_ARES 3
+#define GCC_EVA_BCR 4
+#define GCC_GPU_BCR 5
+#define GCC_PCIE_0_BCR 6
+#define GCC_PCIE_0_LINK_DOWN_BCR 7
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 8
+#define GCC_PCIE_0_PHY_BCR 9
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 10
+#define GCC_PCIE_PHY_BCR 11
+#define GCC_PCIE_PHY_CFG_AHB_BCR 12
+#define GCC_PCIE_PHY_COM_BCR 13
+#define GCC_PCIE_RSCC_BCR 14
+#define GCC_PDM_BCR 15
+#define GCC_QUPV3_WRAPPER_1_BCR 16
+#define GCC_QUPV3_WRAPPER_2_BCR 17
+#define GCC_QUPV3_WRAPPER_3_BCR 18
+#define GCC_QUPV3_WRAPPER_4_BCR 19
+#define GCC_QUPV3_WRAPPER_I2C_BCR 20
+#define GCC_QUSB2PHY_PRIM_BCR 21
+#define GCC_QUSB2PHY_SEC_BCR 22
+#define GCC_SDCC2_BCR 23
+#define GCC_SDCC4_BCR 24
+#define GCC_UFS_PHY_BCR 25
+#define GCC_USB30_PRIM_BCR 26
+#define GCC_USB3_DP_PHY_PRIM_BCR 27
+#define GCC_USB3_DP_PHY_SEC_BCR 28
+#define GCC_USB3_PHY_PRIM_BCR 29
+#define GCC_USB3_PHY_SEC_BCR 30
+#define GCC_USB3PHY_PHY_PRIM_BCR 31
+#define GCC_USB3PHY_PHY_SEC_BCR 32
+#define GCC_VIDEO_AXI0_CLK_ARES 33
+#define GCC_VIDEO_AXI1_CLK_ARES 34
+#define GCC_VIDEO_BCR 35
+#define GCC_VIDEO_XO_CLK_ARES 36
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,lcc-ipq806x.h b/include/dt-bindings/clock/qcom,lcc-ipq806x.h
index 25b92bbf0ab4..e0fb4acf4ba8 100644
--- a/include/dt-bindings/clock/qcom,lcc-ipq806x.h
+++ b/include/dt-bindings/clock/qcom,lcc-ipq806x.h
@@ -19,4 +19,6 @@
#define SPDIF_CLK 10
#define AHBIX_CLK 11
+#define LCC_PCM_RESET 0
+
#endif
diff --git a/include/dt-bindings/clock/qcom,lcc-mdm9615.h b/include/dt-bindings/clock/qcom,lcc-mdm9615.h
deleted file mode 100644
index 299338ee1d88..000000000000
--- a/include/dt-bindings/clock/qcom,lcc-mdm9615.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
- * Copyright (c) BayLibre, SAS.
- * Author : Neil Armstrong <narmstrong@baylibre.com>
- */
-
-#ifndef _DT_BINDINGS_CLK_LCC_MDM9615_H
-#define _DT_BINDINGS_CLK_LCC_MDM9615_H
-
-#define PLL4 0
-#define MI2S_OSR_SRC 1
-#define MI2S_OSR_CLK 2
-#define MI2S_DIV_CLK 3
-#define MI2S_BIT_DIV_CLK 4
-#define MI2S_BIT_CLK 5
-#define PCM_SRC 6
-#define PCM_CLK_OUT 7
-#define PCM_CLK 8
-#define SLIMBUS_SRC 9
-#define AUDIO_SLIMBUS_CLK 10
-#define SPS_SLIMBUS_CLK 11
-#define CODEC_I2S_MIC_OSR_SRC 12
-#define CODEC_I2S_MIC_OSR_CLK 13
-#define CODEC_I2S_MIC_DIV_CLK 14
-#define CODEC_I2S_MIC_BIT_DIV_CLK 15
-#define CODEC_I2S_MIC_BIT_CLK 16
-#define SPARE_I2S_MIC_OSR_SRC 17
-#define SPARE_I2S_MIC_OSR_CLK 18
-#define SPARE_I2S_MIC_DIV_CLK 19
-#define SPARE_I2S_MIC_BIT_DIV_CLK 20
-#define SPARE_I2S_MIC_BIT_CLK 21
-#define CODEC_I2S_SPKR_OSR_SRC 22
-#define CODEC_I2S_SPKR_OSR_CLK 23
-#define CODEC_I2S_SPKR_DIV_CLK 24
-#define CODEC_I2S_SPKR_BIT_DIV_CLK 25
-#define CODEC_I2S_SPKR_BIT_CLK 26
-#define SPARE_I2S_SPKR_OSR_SRC 27
-#define SPARE_I2S_SPKR_OSR_CLK 28
-#define SPARE_I2S_SPKR_DIV_CLK 29
-#define SPARE_I2S_SPKR_BIT_DIV_CLK 30
-#define SPARE_I2S_SPKR_BIT_CLK 31
-
-#endif
diff --git a/include/dt-bindings/clock/qcom,lpass-sc7280.h b/include/dt-bindings/clock/qcom,lpass-sc7280.h
new file mode 100644
index 000000000000..e71ccac3a375
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,lpass-sc7280.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_LPASS_SC7280_H
+#define _DT_BINDINGS_CLK_QCOM_LPASS_SC7280_H
+
+#define LPASS_Q6SS_AHBM_CLK 0
+#define LPASS_Q6SS_AHBS_CLK 1
+#define LPASS_TOP_CC_LPI_Q6_AXIM_HS_CLK 2
+#define LPASS_QDSP6SS_XO_CLK 3
+#define LPASS_QDSP6SS_SLEEP_CLK 4
+#define LPASS_QDSP6SS_CORE_CLK 5
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,lpassaudiocc-sc7280.h b/include/dt-bindings/clock/qcom,lpassaudiocc-sc7280.h
new file mode 100644
index 000000000000..22dcd47d4513
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,lpassaudiocc-sc7280.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_LPASS_AUDIO_CC_SC7280_H
+#define _DT_BINDINGS_CLK_QCOM_LPASS_AUDIO_CC_SC7280_H
+
+/* LPASS_AUDIO_CC clocks */
+#define LPASS_AUDIO_CC_PLL 0
+#define LPASS_AUDIO_CC_PLL_OUT_AUX2 1
+#define LPASS_AUDIO_CC_PLL_OUT_AUX2_DIV_CLK_SRC 2
+#define LPASS_AUDIO_CC_PLL_OUT_MAIN_DIV_CLK_SRC 3
+#define LPASS_AUDIO_CC_CDIV_RX_MCLK_DIV_CLK_SRC 4
+#define LPASS_AUDIO_CC_CODEC_MEM0_CLK 5
+#define LPASS_AUDIO_CC_CODEC_MEM1_CLK 6
+#define LPASS_AUDIO_CC_CODEC_MEM2_CLK 7
+#define LPASS_AUDIO_CC_CODEC_MEM_CLK 8
+#define LPASS_AUDIO_CC_EXT_MCLK0_CLK 9
+#define LPASS_AUDIO_CC_EXT_MCLK0_CLK_SRC 10
+#define LPASS_AUDIO_CC_EXT_MCLK1_CLK 11
+#define LPASS_AUDIO_CC_EXT_MCLK1_CLK_SRC 12
+#define LPASS_AUDIO_CC_RX_MCLK_2X_CLK 13
+#define LPASS_AUDIO_CC_RX_MCLK_CLK 14
+#define LPASS_AUDIO_CC_RX_MCLK_CLK_SRC 15
+
+/* LPASS AUDIO CC CSR */
+#define LPASS_AUDIO_SWR_RX_CGCR 0
+#define LPASS_AUDIO_SWR_TX_CGCR 1
+#define LPASS_AUDIO_SWR_WSA_CGCR 2
+
+/* LPASS_AON_CC clocks */
+#define LPASS_AON_CC_PLL 0
+#define LPASS_AON_CC_PLL_OUT_EVEN 1
+#define LPASS_AON_CC_PLL_OUT_MAIN_CDIV_DIV_CLK_SRC 2
+#define LPASS_AON_CC_PLL_OUT_ODD 3
+#define LPASS_AON_CC_AUDIO_HM_H_CLK 4
+#define LPASS_AON_CC_CDIV_TX_MCLK_DIV_CLK_SRC 5
+#define LPASS_AON_CC_MAIN_RCG_CLK_SRC 6
+#define LPASS_AON_CC_TX_MCLK_2X_CLK 7
+#define LPASS_AON_CC_TX_MCLK_CLK 8
+#define LPASS_AON_CC_TX_MCLK_RCG_CLK_SRC 9
+#define LPASS_AON_CC_VA_MEM0_CLK 10
+
+/* LPASS_AON_CC power domains */
+#define LPASS_AON_CC_LPASS_AUDIO_HM_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,lpasscorecc-sc7280.h b/include/dt-bindings/clock/qcom,lpasscorecc-sc7280.h
new file mode 100644
index 000000000000..0324c69ce968
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,lpasscorecc-sc7280.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_LPASS_CORE_CC_SC7280_H
+#define _DT_BINDINGS_CLK_QCOM_LPASS_CORE_CC_SC7280_H
+
+/* LPASS_CORE_CC clocks */
+#define LPASS_CORE_CC_DIG_PLL 0
+#define LPASS_CORE_CC_DIG_PLL_OUT_MAIN_DIV_CLK_SRC 1
+#define LPASS_CORE_CC_DIG_PLL_OUT_ODD 2
+#define LPASS_CORE_CC_CORE_CLK 3
+#define LPASS_CORE_CC_CORE_CLK_SRC 4
+#define LPASS_CORE_CC_EXT_IF0_CLK_SRC 5
+#define LPASS_CORE_CC_EXT_IF0_IBIT_CLK 6
+#define LPASS_CORE_CC_EXT_IF1_CLK_SRC 7
+#define LPASS_CORE_CC_EXT_IF1_IBIT_CLK 8
+#define LPASS_CORE_CC_LPM_CORE_CLK 9
+#define LPASS_CORE_CC_LPM_MEM0_CORE_CLK 10
+#define LPASS_CORE_CC_SYSNOC_MPORT_CORE_CLK 11
+#define LPASS_CORE_CC_EXT_MCLK0_CLK 12
+#define LPASS_CORE_CC_EXT_MCLK0_CLK_SRC 13
+
+/* LPASS_CORE_CC power domains */
+#define LPASS_CORE_CC_LPASS_CORE_HM_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,milos-camcc.h b/include/dt-bindings/clock/qcom,milos-camcc.h
new file mode 100644
index 000000000000..21925dca9a20
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,milos-camcc.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_MILOS_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_MILOS_H
+
+/* CAM_CC clocks */
+#define CAM_CC_PLL0 0
+#define CAM_CC_PLL0_OUT_EVEN 1
+#define CAM_CC_PLL0_OUT_ODD 2
+#define CAM_CC_PLL1 3
+#define CAM_CC_PLL1_OUT_EVEN 4
+#define CAM_CC_PLL2 5
+#define CAM_CC_PLL2_OUT_EVEN 6
+#define CAM_CC_PLL3 7
+#define CAM_CC_PLL3_OUT_EVEN 8
+#define CAM_CC_PLL4 9
+#define CAM_CC_PLL4_OUT_EVEN 10
+#define CAM_CC_PLL5 11
+#define CAM_CC_PLL5_OUT_EVEN 12
+#define CAM_CC_PLL6 13
+#define CAM_CC_PLL6_OUT_EVEN 14
+#define CAM_CC_BPS_AHB_CLK 15
+#define CAM_CC_BPS_AREG_CLK 16
+#define CAM_CC_BPS_CLK 17
+#define CAM_CC_BPS_CLK_SRC 18
+#define CAM_CC_CAMNOC_ATB_CLK 19
+#define CAM_CC_CAMNOC_AXI_CLK_SRC 20
+#define CAM_CC_CAMNOC_AXI_HF_CLK 21
+#define CAM_CC_CAMNOC_AXI_SF_CLK 22
+#define CAM_CC_CAMNOC_NRT_AXI_CLK 23
+#define CAM_CC_CAMNOC_RT_AXI_CLK 24
+#define CAM_CC_CCI_0_CLK 25
+#define CAM_CC_CCI_0_CLK_SRC 26
+#define CAM_CC_CCI_1_CLK 27
+#define CAM_CC_CCI_1_CLK_SRC 28
+#define CAM_CC_CORE_AHB_CLK 29
+#define CAM_CC_CPAS_AHB_CLK 30
+#define CAM_CC_CPHY_RX_CLK_SRC 31
+#define CAM_CC_CRE_AHB_CLK 32
+#define CAM_CC_CRE_CLK 33
+#define CAM_CC_CRE_CLK_SRC 34
+#define CAM_CC_CSI0PHYTIMER_CLK 35
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 36
+#define CAM_CC_CSI1PHYTIMER_CLK 37
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 38
+#define CAM_CC_CSI2PHYTIMER_CLK 39
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 40
+#define CAM_CC_CSI3PHYTIMER_CLK 41
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 42
+#define CAM_CC_CSIPHY0_CLK 43
+#define CAM_CC_CSIPHY1_CLK 44
+#define CAM_CC_CSIPHY2_CLK 45
+#define CAM_CC_CSIPHY3_CLK 46
+#define CAM_CC_FAST_AHB_CLK_SRC 47
+#define CAM_CC_GDSC_CLK 48
+#define CAM_CC_ICP_ATB_CLK 49
+#define CAM_CC_ICP_CLK 50
+#define CAM_CC_ICP_CLK_SRC 51
+#define CAM_CC_ICP_CTI_CLK 52
+#define CAM_CC_ICP_TS_CLK 53
+#define CAM_CC_MCLK0_CLK 54
+#define CAM_CC_MCLK0_CLK_SRC 55
+#define CAM_CC_MCLK1_CLK 56
+#define CAM_CC_MCLK1_CLK_SRC 57
+#define CAM_CC_MCLK2_CLK 58
+#define CAM_CC_MCLK2_CLK_SRC 59
+#define CAM_CC_MCLK3_CLK 60
+#define CAM_CC_MCLK3_CLK_SRC 61
+#define CAM_CC_MCLK4_CLK 62
+#define CAM_CC_MCLK4_CLK_SRC 63
+#define CAM_CC_OPE_0_AHB_CLK 64
+#define CAM_CC_OPE_0_AREG_CLK 65
+#define CAM_CC_OPE_0_CLK 66
+#define CAM_CC_OPE_0_CLK_SRC 67
+#define CAM_CC_SLEEP_CLK 68
+#define CAM_CC_SLEEP_CLK_SRC 69
+#define CAM_CC_SLOW_AHB_CLK_SRC 70
+#define CAM_CC_SOC_AHB_CLK 71
+#define CAM_CC_SYS_TMR_CLK 72
+#define CAM_CC_TFE_0_AHB_CLK 73
+#define CAM_CC_TFE_0_CLK 74
+#define CAM_CC_TFE_0_CLK_SRC 75
+#define CAM_CC_TFE_0_CPHY_RX_CLK 76
+#define CAM_CC_TFE_0_CSID_CLK 77
+#define CAM_CC_TFE_0_CSID_CLK_SRC 78
+#define CAM_CC_TFE_1_AHB_CLK 79
+#define CAM_CC_TFE_1_CLK 80
+#define CAM_CC_TFE_1_CLK_SRC 81
+#define CAM_CC_TFE_1_CPHY_RX_CLK 82
+#define CAM_CC_TFE_1_CSID_CLK 83
+#define CAM_CC_TFE_1_CSID_CLK_SRC 84
+#define CAM_CC_TFE_2_AHB_CLK 85
+#define CAM_CC_TFE_2_CLK 86
+#define CAM_CC_TFE_2_CLK_SRC 87
+#define CAM_CC_TFE_2_CPHY_RX_CLK 88
+#define CAM_CC_TFE_2_CSID_CLK 89
+#define CAM_CC_TFE_2_CSID_CLK_SRC 90
+#define CAM_CC_TOP_SHIFT_CLK 91
+#define CAM_CC_XO_CLK_SRC 92
+
+/* CAM_CC resets */
+#define CAM_CC_BPS_BCR 0
+#define CAM_CC_CAMNOC_BCR 1
+#define CAM_CC_CAMSS_TOP_BCR 2
+#define CAM_CC_CCI_0_BCR 3
+#define CAM_CC_CCI_1_BCR 4
+#define CAM_CC_CPAS_BCR 5
+#define CAM_CC_CRE_BCR 6
+#define CAM_CC_CSI0PHY_BCR 7
+#define CAM_CC_CSI1PHY_BCR 8
+#define CAM_CC_CSI2PHY_BCR 9
+#define CAM_CC_CSI3PHY_BCR 10
+#define CAM_CC_ICP_BCR 11
+#define CAM_CC_MCLK0_BCR 12
+#define CAM_CC_MCLK1_BCR 13
+#define CAM_CC_MCLK2_BCR 14
+#define CAM_CC_MCLK3_BCR 15
+#define CAM_CC_MCLK4_BCR 16
+#define CAM_CC_OPE_0_BCR 17
+#define CAM_CC_TFE_0_BCR 18
+#define CAM_CC_TFE_1_BCR 19
+#define CAM_CC_TFE_2_BCR 20
+
+/* CAM_CC power domains */
+#define CAM_CC_CAMSS_TOP_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,milos-dispcc.h b/include/dt-bindings/clock/qcom,milos-dispcc.h
new file mode 100644
index 000000000000..c70f23f32f0a
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,milos-dispcc.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_MILOS_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_MILOS_H
+
+/* DISP_CC clocks */
+#define DISP_CC_PLL0 0
+#define DISP_CC_MDSS_ACCU_CLK 1
+#define DISP_CC_MDSS_AHB1_CLK 2
+#define DISP_CC_MDSS_AHB_CLK 3
+#define DISP_CC_MDSS_AHB_CLK_SRC 4
+#define DISP_CC_MDSS_BYTE0_CLK 5
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 6
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 7
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 8
+#define DISP_CC_MDSS_DPTX0_AUX_CLK 9
+#define DISP_CC_MDSS_DPTX0_AUX_CLK_SRC 10
+#define DISP_CC_MDSS_DPTX0_CRYPTO_CLK 11
+#define DISP_CC_MDSS_DPTX0_LINK_CLK 12
+#define DISP_CC_MDSS_DPTX0_LINK_CLK_SRC 13
+#define DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC 14
+#define DISP_CC_MDSS_DPTX0_LINK_INTF_CLK 15
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK 16
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC 17
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK 18
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC 19
+#define DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK 20
+#define DISP_CC_MDSS_ESC0_CLK 21
+#define DISP_CC_MDSS_ESC0_CLK_SRC 22
+#define DISP_CC_MDSS_MDP1_CLK 23
+#define DISP_CC_MDSS_MDP_CLK 24
+#define DISP_CC_MDSS_MDP_CLK_SRC 25
+#define DISP_CC_MDSS_MDP_LUT1_CLK 26
+#define DISP_CC_MDSS_MDP_LUT_CLK 27
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 28
+#define DISP_CC_MDSS_PCLK0_CLK 29
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 30
+#define DISP_CC_MDSS_RSCC_AHB_CLK 31
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 32
+#define DISP_CC_MDSS_VSYNC1_CLK 33
+#define DISP_CC_MDSS_VSYNC_CLK 34
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 35
+#define DISP_CC_SLEEP_CLK 36
+#define DISP_CC_SLEEP_CLK_SRC 37
+#define DISP_CC_XO_CLK 38
+#define DISP_CC_XO_CLK_SRC 39
+
+/* DISP_CC resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_CORE_INT2_BCR 1
+#define DISP_CC_MDSS_RSCC_BCR 2
+
+/* DISP_CC power domains */
+#define DISP_CC_MDSS_CORE_GDSC 0
+#define DISP_CC_MDSS_CORE_INT2_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,milos-gcc.h b/include/dt-bindings/clock/qcom,milos-gcc.h
new file mode 100644
index 000000000000..a530ca39e1ef
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,milos-gcc.h
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_MILOS_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_MILOS_H
+
+/* GCC clocks */
+#define GCC_GPLL0 0
+#define GCC_GPLL0_OUT_EVEN 1
+#define GCC_GPLL2 2
+#define GCC_GPLL4 3
+#define GCC_GPLL6 4
+#define GCC_GPLL7 5
+#define GCC_GPLL9 6
+#define GCC_AGGRE_NOC_PCIE_AXI_CLK 7
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 8
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 9
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 10
+#define GCC_BOOT_ROM_AHB_CLK 11
+#define GCC_CAMERA_AHB_CLK 12
+#define GCC_CAMERA_HF_AXI_CLK 13
+#define GCC_CAMERA_HF_XO_CLK 14
+#define GCC_CAMERA_SF_AXI_CLK 15
+#define GCC_CAMERA_SF_XO_CLK 16
+#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 17
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 18
+#define GCC_CNOC_PCIE_SF_AXI_CLK 19
+#define GCC_DDRSS_GPU_AXI_CLK 20
+#define GCC_DDRSS_PCIE_SF_QTB_CLK 21
+#define GCC_DISP_AHB_CLK 22
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 23
+#define GCC_DISP_HF_AXI_CLK 24
+#define GCC_DISP_XO_CLK 25
+#define GCC_GP1_CLK 26
+#define GCC_GP1_CLK_SRC 27
+#define GCC_GP2_CLK 28
+#define GCC_GP2_CLK_SRC 29
+#define GCC_GP3_CLK 30
+#define GCC_GP3_CLK_SRC 31
+#define GCC_GPU_CFG_AHB_CLK 32
+#define GCC_GPU_GPLL0_CLK_SRC 33
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 34
+#define GCC_GPU_MEMNOC_GFX_CLK 35
+#define GCC_GPU_SNOC_DVM_GFX_CLK 36
+#define GCC_PCIE_0_AUX_CLK 37
+#define GCC_PCIE_0_AUX_CLK_SRC 38
+#define GCC_PCIE_0_CFG_AHB_CLK 39
+#define GCC_PCIE_0_MSTR_AXI_CLK 40
+#define GCC_PCIE_0_PHY_RCHNG_CLK 41
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 42
+#define GCC_PCIE_0_PIPE_CLK 43
+#define GCC_PCIE_0_PIPE_CLK_SRC 44
+#define GCC_PCIE_0_PIPE_DIV2_CLK 45
+#define GCC_PCIE_0_PIPE_DIV2_CLK_SRC 46
+#define GCC_PCIE_0_SLV_AXI_CLK 47
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 48
+#define GCC_PCIE_1_AUX_CLK 49
+#define GCC_PCIE_1_AUX_CLK_SRC 50
+#define GCC_PCIE_1_CFG_AHB_CLK 51
+#define GCC_PCIE_1_MSTR_AXI_CLK 52
+#define GCC_PCIE_1_PHY_RCHNG_CLK 53
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 54
+#define GCC_PCIE_1_PIPE_CLK 55
+#define GCC_PCIE_1_PIPE_CLK_SRC 56
+#define GCC_PCIE_1_PIPE_DIV2_CLK 57
+#define GCC_PCIE_1_PIPE_DIV2_CLK_SRC 58
+#define GCC_PCIE_1_SLV_AXI_CLK 59
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 60
+#define GCC_PCIE_RSCC_CFG_AHB_CLK 61
+#define GCC_PCIE_RSCC_XO_CLK 62
+#define GCC_PDM2_CLK 63
+#define GCC_PDM2_CLK_SRC 64
+#define GCC_PDM_AHB_CLK 65
+#define GCC_PDM_XO4_CLK 66
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 67
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 68
+#define GCC_QMIP_DISP_AHB_CLK 69
+#define GCC_QMIP_GPU_AHB_CLK 70
+#define GCC_QMIP_PCIE_AHB_CLK 71
+#define GCC_QMIP_VIDEO_CV_CPU_AHB_CLK 72
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 73
+#define GCC_QMIP_VIDEO_V_CPU_AHB_CLK 74
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 75
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 76
+#define GCC_QUPV3_WRAP0_CORE_CLK 77
+#define GCC_QUPV3_WRAP0_QSPI_REF_CLK 78
+#define GCC_QUPV3_WRAP0_QSPI_REF_CLK_SRC 79
+#define GCC_QUPV3_WRAP0_S0_CLK 80
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 81
+#define GCC_QUPV3_WRAP0_S1_CLK 82
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 83
+#define GCC_QUPV3_WRAP0_S2_CLK 84
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 85
+#define GCC_QUPV3_WRAP0_S3_CLK 86
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 87
+#define GCC_QUPV3_WRAP0_S4_CLK 88
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 89
+#define GCC_QUPV3_WRAP0_S5_CLK 90
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 91
+#define GCC_QUPV3_WRAP0_S6_CLK 92
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 93
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 94
+#define GCC_QUPV3_WRAP1_CORE_CLK 95
+#define GCC_QUPV3_WRAP1_QSPI_REF_CLK 96
+#define GCC_QUPV3_WRAP1_QSPI_REF_CLK_SRC 97
+#define GCC_QUPV3_WRAP1_S0_CLK 98
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 99
+#define GCC_QUPV3_WRAP1_S1_CLK 100
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 101
+#define GCC_QUPV3_WRAP1_S2_CLK 102
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 103
+#define GCC_QUPV3_WRAP1_S3_CLK 104
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 105
+#define GCC_QUPV3_WRAP1_S4_CLK 106
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 107
+#define GCC_QUPV3_WRAP1_S5_CLK 108
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 109
+#define GCC_QUPV3_WRAP1_S6_CLK 110
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 111
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 112
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 113
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 114
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 115
+#define GCC_SDCC1_AHB_CLK 116
+#define GCC_SDCC1_APPS_CLK 117
+#define GCC_SDCC1_APPS_CLK_SRC 118
+#define GCC_SDCC1_ICE_CORE_CLK 119
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 120
+#define GCC_SDCC2_AHB_CLK 121
+#define GCC_SDCC2_APPS_CLK 122
+#define GCC_SDCC2_APPS_CLK_SRC 123
+#define GCC_UFS_PHY_AHB_CLK 124
+#define GCC_UFS_PHY_AXI_CLK 125
+#define GCC_UFS_PHY_AXI_CLK_SRC 126
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 127
+#define GCC_UFS_PHY_ICE_CORE_CLK 128
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 129
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 130
+#define GCC_UFS_PHY_PHY_AUX_CLK 131
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 132
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 133
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 134
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 135
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 136
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 137
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 138
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 139
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 140
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 141
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 142
+#define GCC_USB30_PRIM_ATB_CLK 143
+#define GCC_USB30_PRIM_MASTER_CLK 144
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 145
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 146
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 147
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 148
+#define GCC_USB30_PRIM_SLEEP_CLK 149
+#define GCC_USB3_PRIM_PHY_AUX_CLK 150
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 151
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 152
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 153
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 154
+#define GCC_VIDEO_AHB_CLK 155
+#define GCC_VIDEO_AXI0_CLK 156
+#define GCC_VIDEO_XO_CLK 157
+
+/* GCC resets */
+#define GCC_CAMERA_BCR 0
+#define GCC_DISPLAY_BCR 1
+#define GCC_GPU_BCR 2
+#define GCC_PCIE_0_BCR 3
+#define GCC_PCIE_0_LINK_DOWN_BCR 4
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 5
+#define GCC_PCIE_0_PHY_BCR 6
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 7
+#define GCC_PCIE_1_BCR 8
+#define GCC_PCIE_1_LINK_DOWN_BCR 9
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 10
+#define GCC_PCIE_1_PHY_BCR 11
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 12
+#define GCC_PCIE_RSCC_BCR 13
+#define GCC_PDM_BCR 14
+#define GCC_QUPV3_WRAPPER_0_BCR 15
+#define GCC_QUPV3_WRAPPER_1_BCR 16
+#define GCC_QUSB2PHY_PRIM_BCR 17
+#define GCC_QUSB2PHY_SEC_BCR 18
+#define GCC_SDCC1_BCR 19
+#define GCC_SDCC2_BCR 20
+#define GCC_UFS_PHY_BCR 21
+#define GCC_USB30_PRIM_BCR 22
+#define GCC_USB3_DP_PHY_PRIM_BCR 23
+#define GCC_USB3_PHY_PRIM_BCR 24
+#define GCC_USB3PHY_PHY_PRIM_BCR 25
+#define GCC_VIDEO_AXI0_CLK_ARES 26
+#define GCC_VIDEO_BCR 27
+
+/* GCC power domains */
+#define PCIE_0_GDSC 0
+#define PCIE_0_PHY_GDSC 1
+#define PCIE_1_GDSC 2
+#define PCIE_1_PHY_GDSC 3
+#define UFS_PHY_GDSC 4
+#define UFS_MEM_PHY_GDSC 5
+#define USB30_PRIM_GDSC 6
+#define USB3_PHY_GDSC 7
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,milos-gpucc.h b/include/dt-bindings/clock/qcom,milos-gpucc.h
new file mode 100644
index 000000000000..6ff1925d409f
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,milos-gpucc.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_MILOS_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_MILOS_H
+
+/* GPU_CC clocks */
+#define GPU_CC_PLL0 0
+#define GPU_CC_PLL0_OUT_EVEN 1
+#define GPU_CC_AHB_CLK 2
+#define GPU_CC_CB_CLK 3
+#define GPU_CC_CX_ACCU_SHIFT_CLK 4
+#define GPU_CC_CX_FF_CLK 5
+#define GPU_CC_CX_GMU_CLK 6
+#define GPU_CC_CXO_AON_CLK 7
+#define GPU_CC_CXO_CLK 8
+#define GPU_CC_DEMET_CLK 9
+#define GPU_CC_DEMET_DIV_CLK_SRC 10
+#define GPU_CC_DPM_CLK 11
+#define GPU_CC_FF_CLK_SRC 12
+#define GPU_CC_FREQ_MEASURE_CLK 13
+#define GPU_CC_GMU_CLK_SRC 14
+#define GPU_CC_GX_ACCU_SHIFT_CLK 15
+#define GPU_CC_GX_ACD_AHB_FF_CLK 16
+#define GPU_CC_GX_AHB_FF_CLK 17
+#define GPU_CC_GX_GMU_CLK 18
+#define GPU_CC_GX_RCG_AHB_FF_CLK 19
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 20
+#define GPU_CC_HUB_AON_CLK 21
+#define GPU_CC_HUB_CLK_SRC 22
+#define GPU_CC_HUB_CX_INT_CLK 23
+#define GPU_CC_HUB_DIV_CLK_SRC 24
+#define GPU_CC_MEMNOC_GFX_CLK 25
+#define GPU_CC_RSCC_HUB_AON_CLK 26
+#define GPU_CC_RSCC_XO_AON_CLK 27
+#define GPU_CC_SLEEP_CLK 28
+#define GPU_CC_XO_CLK_SRC 29
+#define GPU_CC_XO_DIV_CLK_SRC 30
+
+/* GPU_CC resets */
+#define GPU_CC_CB_BCR 0
+#define GPU_CC_CX_BCR 1
+#define GPU_CC_FAST_HUB_BCR 2
+#define GPU_CC_FF_BCR 3
+#define GPU_CC_GMU_BCR 4
+#define GPU_CC_GX_BCR 5
+#define GPU_CC_RBCPR_BCR 6
+#define GPU_CC_XO_BCR 7
+
+/* GPU_CC power domains */
+#define GPU_CC_CX_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,milos-videocc.h b/include/dt-bindings/clock/qcom,milos-videocc.h
new file mode 100644
index 000000000000..3544db81ffae
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,milos-videocc.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_MILOS_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_MILOS_H
+
+/* VIDEO_CC clocks */
+#define VIDEO_CC_PLL0 0
+#define VIDEO_CC_AHB_CLK 1
+#define VIDEO_CC_AHB_CLK_SRC 2
+#define VIDEO_CC_MVS0_CLK 3
+#define VIDEO_CC_MVS0_CLK_SRC 4
+#define VIDEO_CC_MVS0_DIV_CLK_SRC 5
+#define VIDEO_CC_MVS0_SHIFT_CLK 6
+#define VIDEO_CC_MVS0C_CLK 7
+#define VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC 8
+#define VIDEO_CC_MVS0C_SHIFT_CLK 9
+#define VIDEO_CC_SLEEP_CLK 10
+#define VIDEO_CC_SLEEP_CLK_SRC 11
+#define VIDEO_CC_XO_CLK 12
+#define VIDEO_CC_XO_CLK_SRC 13
+
+/* VIDEO_CC resets */
+#define VIDEO_CC_INTERFACE_BCR 0
+#define VIDEO_CC_MVS0_BCR 1
+#define VIDEO_CC_MVS0C_CLK_ARES 2
+#define VIDEO_CC_MVS0C_BCR 3
+
+/* VIDEO_CC power domains */
+#define VIDEO_CC_MVS0_GDSC 0
+#define VIDEO_CC_MVS0C_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8960.h b/include/dt-bindings/clock/qcom,mmcc-msm8960.h
index 81714fc859c5..717431d735c1 100644
--- a/include/dt-bindings/clock/qcom,mmcc-msm8960.h
+++ b/include/dt-bindings/clock/qcom,mmcc-msm8960.h
@@ -133,5 +133,7 @@
#define VCAP_CLK 124
#define VCAP_NPL_CLK 125
#define PLL15 126
+#define DSI2_PIXEL_LVDS_SRC 127
+#define LVDS_CLK 128
#endif
diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8974.h b/include/dt-bindings/clock/qcom,mmcc-msm8974.h
index a62cb0629a7a..743ee60632eb 100644
--- a/include/dt-bindings/clock/qcom,mmcc-msm8974.h
+++ b/include/dt-bindings/clock/qcom,mmcc-msm8974.h
@@ -121,7 +121,6 @@
#define MMSS_MMSSNOC_BTO_AHB_CLK 112
#define MMSS_MMSSNOC_AXI_CLK 113
#define MMSS_S0_AXI_CLK 114
-#define OCMEMCX_AHB_CLK 115
#define OCMEMCX_OCMEMNOC_CLK 116
#define OXILI_OCMEMGX_CLK 117
#define OCMEMNOC_CLK 118
diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8994.h b/include/dt-bindings/clock/qcom,mmcc-msm8994.h
new file mode 100644
index 000000000000..4b289092f5a2
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,mmcc-msm8994.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, Konrad Dybcio
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_MMCC_8994_H
+#define _DT_BINDINGS_CLK_MSM_MMCC_8994_H
+
+/* Clocks */
+#define MMPLL0_EARLY 0
+#define MMPLL0_PLL 1
+#define MMPLL1_EARLY 2
+#define MMPLL1_PLL 3
+#define MMPLL3_EARLY 4
+#define MMPLL3_PLL 5
+#define MMPLL4_EARLY 6
+#define MMPLL4_PLL 7
+#define MMPLL5_EARLY 8
+#define MMPLL5_PLL 9
+#define AXI_CLK_SRC 10
+#define RBBMTIMER_CLK_SRC 11
+#define PCLK0_CLK_SRC 12
+#define PCLK1_CLK_SRC 13
+#define MDP_CLK_SRC 14
+#define VSYNC_CLK_SRC 15
+#define BYTE0_CLK_SRC 16
+#define BYTE1_CLK_SRC 17
+#define ESC0_CLK_SRC 18
+#define ESC1_CLK_SRC 19
+#define MDSS_AHB_CLK 20
+#define MDSS_PCLK0_CLK 21
+#define MDSS_PCLK1_CLK 22
+#define MDSS_VSYNC_CLK 23
+#define MDSS_BYTE0_CLK 24
+#define MDSS_BYTE1_CLK 25
+#define MDSS_ESC0_CLK 26
+#define MDSS_ESC1_CLK 27
+#define CSI0_CLK_SRC 28
+#define CSI1_CLK_SRC 29
+#define CSI2_CLK_SRC 30
+#define CSI3_CLK_SRC 31
+#define VFE0_CLK_SRC 32
+#define VFE1_CLK_SRC 33
+#define CPP_CLK_SRC 34
+#define JPEG0_CLK_SRC 35
+#define JPEG1_CLK_SRC 36
+#define JPEG2_CLK_SRC 37
+#define CSI2PHYTIMER_CLK_SRC 38
+#define FD_CORE_CLK_SRC 39
+#define OCMEMNOC_CLK_SRC 40
+#define CCI_CLK_SRC 41
+#define MMSS_GP0_CLK_SRC 42
+#define MMSS_GP1_CLK_SRC 43
+#define JPEG_DMA_CLK_SRC 44
+#define MCLK0_CLK_SRC 45
+#define MCLK1_CLK_SRC 46
+#define MCLK2_CLK_SRC 47
+#define MCLK3_CLK_SRC 48
+#define CSI0PHYTIMER_CLK_SRC 49
+#define CSI1PHYTIMER_CLK_SRC 50
+#define EXTPCLK_CLK_SRC 51
+#define HDMI_CLK_SRC 52
+#define CAMSS_AHB_CLK 53
+#define CAMSS_CCI_CCI_AHB_CLK 54
+#define CAMSS_CCI_CCI_CLK 55
+#define CAMSS_VFE_CPP_AHB_CLK 56
+#define CAMSS_VFE_CPP_AXI_CLK 57
+#define CAMSS_VFE_CPP_CLK 58
+#define CAMSS_CSI0_AHB_CLK 59
+#define CAMSS_CSI0_CLK 60
+#define CAMSS_CSI0PHY_CLK 61
+#define CAMSS_CSI0PIX_CLK 62
+#define CAMSS_CSI0RDI_CLK 63
+#define CAMSS_CSI1_AHB_CLK 64
+#define CAMSS_CSI1_CLK 65
+#define CAMSS_CSI1PHY_CLK 66
+#define CAMSS_CSI1PIX_CLK 67
+#define CAMSS_CSI1RDI_CLK 68
+#define CAMSS_CSI2_AHB_CLK 69
+#define CAMSS_CSI2_CLK 70
+#define CAMSS_CSI2PHY_CLK 71
+#define CAMSS_CSI2PIX_CLK 72
+#define CAMSS_CSI2RDI_CLK 73
+#define CAMSS_CSI3_AHB_CLK 74
+#define CAMSS_CSI3_CLK 75
+#define CAMSS_CSI3PHY_CLK 76
+#define CAMSS_CSI3PIX_CLK 77
+#define CAMSS_CSI3RDI_CLK 78
+#define CAMSS_CSI_VFE0_CLK 79
+#define CAMSS_CSI_VFE1_CLK 80
+#define CAMSS_GP0_CLK 81
+#define CAMSS_GP1_CLK 82
+#define CAMSS_ISPIF_AHB_CLK 83
+#define CAMSS_JPEG_DMA_CLK 84
+#define CAMSS_JPEG_JPEG0_CLK 85
+#define CAMSS_JPEG_JPEG1_CLK 86
+#define CAMSS_JPEG_JPEG2_CLK 87
+#define CAMSS_JPEG_JPEG_AHB_CLK 88
+#define CAMSS_JPEG_JPEG_AXI_CLK 89
+#define CAMSS_MCLK0_CLK 90
+#define CAMSS_MCLK1_CLK 91
+#define CAMSS_MCLK2_CLK 92
+#define CAMSS_MCLK3_CLK 93
+#define CAMSS_MICRO_AHB_CLK 94
+#define CAMSS_PHY0_CSI0PHYTIMER_CLK 95
+#define CAMSS_PHY1_CSI1PHYTIMER_CLK 96
+#define CAMSS_PHY2_CSI2PHYTIMER_CLK 97
+#define CAMSS_TOP_AHB_CLK 98
+#define CAMSS_VFE_VFE0_CLK 99
+#define CAMSS_VFE_VFE1_CLK 100
+#define CAMSS_VFE_VFE_AHB_CLK 101
+#define CAMSS_VFE_VFE_AXI_CLK 102
+#define FD_AXI_CLK 103
+#define FD_CORE_CLK 104
+#define FD_CORE_UAR_CLK 105
+#define MDSS_AXI_CLK 106
+#define MDSS_EXTPCLK_CLK 107
+#define MDSS_HDMI_AHB_CLK 108
+#define MDSS_HDMI_CLK 109
+#define MDSS_MDP_CLK 110
+#define MMSS_MISC_AHB_CLK 111
+#define MMSS_MMSSNOC_AXI_CLK 112
+#define MMSS_S0_AXI_CLK 113
+#define OCMEMCX_OCMEMNOC_CLK 114
+#define OXILI_GFX3D_CLK 115
+#define OXILI_RBBMTIMER_CLK 116
+#define OXILICX_AHB_CLK 117
+#define VENUS0_AHB_CLK 118
+#define VENUS0_AXI_CLK 119
+#define VENUS0_OCMEMNOC_CLK 120
+#define VENUS0_VCODEC0_CLK 121
+#define VENUS0_CORE0_VCODEC_CLK 122
+#define VENUS0_CORE1_VCODEC_CLK 123
+#define VENUS0_CORE2_VCODEC_CLK 124
+#define AHB_CLK_SRC 125
+#define FD_AHB_CLK 126
+
+/* GDSCs */
+#define VENUS_GDSC 0
+#define VENUS_CORE0_GDSC 1
+#define VENUS_CORE1_GDSC 2
+#define VENUS_CORE2_GDSC 3
+#define CAMSS_TOP_GDSC 4
+#define MDSS_GDSC 5
+#define JPEG_GDSC 6
+#define VFE_GDSC 7
+#define CPP_GDSC 8
+#define OXILI_GX_GDSC 9
+#define OXILI_CX_GDSC 10
+#define FD_GDSC 11
+
+/* Resets */
+#define CAMSS_MICRO_BCR 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,mmcc-sdm660.h b/include/dt-bindings/clock/qcom,mmcc-sdm660.h
index f9dbc21cb5c7..ee2a89dae72d 100644
--- a/include/dt-bindings/clock/qcom,mmcc-sdm660.h
+++ b/include/dt-bindings/clock/qcom,mmcc-sdm660.h
@@ -157,6 +157,7 @@
#define BIMC_SMMU_GDSC 7
#define CAMSS_MICRO_BCR 0
+#define MDSS_BCR 1
#endif
diff --git a/include/dt-bindings/clock/qcom,qca8k-nsscc.h b/include/dt-bindings/clock/qcom,qca8k-nsscc.h
new file mode 100644
index 000000000000..0ac3e4c69a1a
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qca8k-nsscc.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_QCA8K_NSS_CC_H
+#define _DT_BINDINGS_CLK_QCOM_QCA8K_NSS_CC_H
+
+#define NSS_CC_SWITCH_CORE_CLK_SRC 0
+#define NSS_CC_SWITCH_CORE_CLK 1
+#define NSS_CC_APB_BRIDGE_CLK 2
+#define NSS_CC_MAC0_TX_CLK_SRC 3
+#define NSS_CC_MAC0_TX_DIV_CLK_SRC 4
+#define NSS_CC_MAC0_TX_CLK 5
+#define NSS_CC_MAC0_TX_SRDS1_CLK 6
+#define NSS_CC_MAC0_RX_CLK_SRC 7
+#define NSS_CC_MAC0_RX_DIV_CLK_SRC 8
+#define NSS_CC_MAC0_RX_CLK 9
+#define NSS_CC_MAC0_RX_SRDS1_CLK 10
+#define NSS_CC_MAC1_TX_CLK_SRC 11
+#define NSS_CC_MAC1_TX_DIV_CLK_SRC 12
+#define NSS_CC_MAC1_SRDS1_CH0_XGMII_RX_DIV_CLK_SRC 13
+#define NSS_CC_MAC1_SRDS1_CH0_RX_CLK 14
+#define NSS_CC_MAC1_TX_CLK 15
+#define NSS_CC_MAC1_GEPHY0_TX_CLK 16
+#define NSS_CC_MAC1_SRDS1_CH0_XGMII_RX_CLK 17
+#define NSS_CC_MAC1_RX_CLK_SRC 18
+#define NSS_CC_MAC1_RX_DIV_CLK_SRC 19
+#define NSS_CC_MAC1_SRDS1_CH0_XGMII_TX_DIV_CLK_SRC 20
+#define NSS_CC_MAC1_SRDS1_CH0_TX_CLK 21
+#define NSS_CC_MAC1_RX_CLK 22
+#define NSS_CC_MAC1_GEPHY0_RX_CLK 23
+#define NSS_CC_MAC1_SRDS1_CH0_XGMII_TX_CLK 24
+#define NSS_CC_MAC2_TX_CLK_SRC 25
+#define NSS_CC_MAC2_TX_DIV_CLK_SRC 26
+#define NSS_CC_MAC2_SRDS1_CH1_XGMII_RX_DIV_CLK_SRC 27
+#define NSS_CC_MAC2_SRDS1_CH1_RX_CLK 28
+#define NSS_CC_MAC2_TX_CLK 29
+#define NSS_CC_MAC2_GEPHY1_TX_CLK 30
+#define NSS_CC_MAC2_SRDS1_CH1_XGMII_RX_CLK 31
+#define NSS_CC_MAC2_RX_CLK_SRC 32
+#define NSS_CC_MAC2_RX_DIV_CLK_SRC 33
+#define NSS_CC_MAC2_SRDS1_CH1_XGMII_TX_DIV_CLK_SRC 34
+#define NSS_CC_MAC2_SRDS1_CH1_TX_CLK 35
+#define NSS_CC_MAC2_RX_CLK 36
+#define NSS_CC_MAC2_GEPHY1_RX_CLK 37
+#define NSS_CC_MAC2_SRDS1_CH1_XGMII_TX_CLK 38
+#define NSS_CC_MAC3_TX_CLK_SRC 39
+#define NSS_CC_MAC3_TX_DIV_CLK_SRC 40
+#define NSS_CC_MAC3_SRDS1_CH2_XGMII_RX_DIV_CLK_SRC 41
+#define NSS_CC_MAC3_SRDS1_CH2_RX_CLK 42
+#define NSS_CC_MAC3_TX_CLK 43
+#define NSS_CC_MAC3_GEPHY2_TX_CLK 44
+#define NSS_CC_MAC3_SRDS1_CH2_XGMII_RX_CLK 45
+#define NSS_CC_MAC3_RX_CLK_SRC 46
+#define NSS_CC_MAC3_RX_DIV_CLK_SRC 47
+#define NSS_CC_MAC3_SRDS1_CH2_XGMII_TX_DIV_CLK_SRC 48
+#define NSS_CC_MAC3_SRDS1_CH2_TX_CLK 49
+#define NSS_CC_MAC3_RX_CLK 50
+#define NSS_CC_MAC3_GEPHY2_RX_CLK 51
+#define NSS_CC_MAC3_SRDS1_CH2_XGMII_TX_CLK 52
+#define NSS_CC_MAC4_TX_CLK_SRC 53
+#define NSS_CC_MAC4_TX_DIV_CLK_SRC 54
+#define NSS_CC_MAC4_SRDS1_CH3_XGMII_RX_DIV_CLK_SRC 55
+#define NSS_CC_MAC4_SRDS1_CH3_RX_CLK 56
+#define NSS_CC_MAC4_TX_CLK 57
+#define NSS_CC_MAC4_GEPHY3_TX_CLK 58
+#define NSS_CC_MAC4_SRDS1_CH3_XGMII_RX_CLK 59
+#define NSS_CC_MAC4_RX_CLK_SRC 60
+#define NSS_CC_MAC4_RX_DIV_CLK_SRC 61
+#define NSS_CC_MAC4_SRDS1_CH3_XGMII_TX_DIV_CLK_SRC 62
+#define NSS_CC_MAC4_SRDS1_CH3_TX_CLK 63
+#define NSS_CC_MAC4_RX_CLK 64
+#define NSS_CC_MAC4_GEPHY3_RX_CLK 65
+#define NSS_CC_MAC4_SRDS1_CH3_XGMII_TX_CLK 66
+#define NSS_CC_MAC5_TX_CLK_SRC 67
+#define NSS_CC_MAC5_TX_DIV_CLK_SRC 68
+#define NSS_CC_MAC5_TX_SRDS0_CLK 69
+#define NSS_CC_MAC5_TX_CLK 70
+#define NSS_CC_MAC5_RX_CLK_SRC 71
+#define NSS_CC_MAC5_RX_DIV_CLK_SRC 72
+#define NSS_CC_MAC5_RX_SRDS0_CLK 73
+#define NSS_CC_MAC5_RX_CLK 74
+#define NSS_CC_MAC5_TX_SRDS0_CLK_SRC 75
+#define NSS_CC_MAC5_RX_SRDS0_CLK_SRC 76
+#define NSS_CC_AHB_CLK_SRC 77
+#define NSS_CC_AHB_CLK 78
+#define NSS_CC_SEC_CTRL_AHB_CLK 79
+#define NSS_CC_TLMM_CLK 80
+#define NSS_CC_TLMM_AHB_CLK 81
+#define NSS_CC_CNOC_AHB_CLK 82
+#define NSS_CC_MDIO_AHB_CLK 83
+#define NSS_CC_MDIO_MASTER_AHB_CLK 84
+#define NSS_CC_SYS_CLK_SRC 85
+#define NSS_CC_SRDS0_SYS_CLK 86
+#define NSS_CC_SRDS1_SYS_CLK 87
+#define NSS_CC_GEPHY0_SYS_CLK 88
+#define NSS_CC_GEPHY1_SYS_CLK 89
+#define NSS_CC_GEPHY2_SYS_CLK 90
+#define NSS_CC_GEPHY3_SYS_CLK 91
+#endif
diff --git a/include/dt-bindings/clock/qcom,qcm2290-gpucc.h b/include/dt-bindings/clock/qcom,qcm2290-gpucc.h
new file mode 100644
index 000000000000..7c76dd05278f
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qcm2290-gpucc.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_QCM2290_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_QCM2290_H
+
+/* GPU_CC clocks */
+#define GPU_CC_AHB_CLK 0
+#define GPU_CC_CRC_AHB_CLK 1
+#define GPU_CC_CX_GFX3D_CLK 2
+#define GPU_CC_CX_GMU_CLK 3
+#define GPU_CC_CX_SNOC_DVM_CLK 4
+#define GPU_CC_CXO_AON_CLK 5
+#define GPU_CC_CXO_CLK 6
+#define GPU_CC_GMU_CLK_SRC 7
+#define GPU_CC_GX_GFX3D_CLK 8
+#define GPU_CC_GX_GFX3D_CLK_SRC 9
+#define GPU_CC_PLL0 10
+#define GPU_CC_SLEEP_CLK 11
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 12
+
+/* Resets */
+#define GPU_GX_BCR 0
+
+/* GDSCs */
+#define GPU_CX_GDSC 0
+#define GPU_GX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,qcs615-camcc.h b/include/dt-bindings/clock/qcom,qcs615-camcc.h
new file mode 100644
index 000000000000..aec57dddc067
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qcs615-camcc.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_QCS615_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_QCS615_H
+
+/* CAM_CC clocks */
+#define CAM_CC_BPS_AHB_CLK 0
+#define CAM_CC_BPS_AREG_CLK 1
+#define CAM_CC_BPS_AXI_CLK 2
+#define CAM_CC_BPS_CLK 3
+#define CAM_CC_BPS_CLK_SRC 4
+#define CAM_CC_CAMNOC_ATB_CLK 5
+#define CAM_CC_CAMNOC_AXI_CLK 6
+#define CAM_CC_CCI_CLK 7
+#define CAM_CC_CCI_CLK_SRC 8
+#define CAM_CC_CORE_AHB_CLK 9
+#define CAM_CC_CPAS_AHB_CLK 10
+#define CAM_CC_CPHY_RX_CLK_SRC 11
+#define CAM_CC_CSI0PHYTIMER_CLK 12
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 13
+#define CAM_CC_CSI1PHYTIMER_CLK 14
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 15
+#define CAM_CC_CSI2PHYTIMER_CLK 16
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 17
+#define CAM_CC_CSIPHY0_CLK 18
+#define CAM_CC_CSIPHY1_CLK 19
+#define CAM_CC_CSIPHY2_CLK 20
+#define CAM_CC_FAST_AHB_CLK_SRC 21
+#define CAM_CC_ICP_ATB_CLK 22
+#define CAM_CC_ICP_CLK 23
+#define CAM_CC_ICP_CLK_SRC 24
+#define CAM_CC_ICP_CTI_CLK 25
+#define CAM_CC_ICP_TS_CLK 26
+#define CAM_CC_IFE_0_AXI_CLK 27
+#define CAM_CC_IFE_0_CLK 28
+#define CAM_CC_IFE_0_CLK_SRC 29
+#define CAM_CC_IFE_0_CPHY_RX_CLK 30
+#define CAM_CC_IFE_0_CSID_CLK 31
+#define CAM_CC_IFE_0_CSID_CLK_SRC 32
+#define CAM_CC_IFE_0_DSP_CLK 33
+#define CAM_CC_IFE_1_AXI_CLK 34
+#define CAM_CC_IFE_1_CLK 35
+#define CAM_CC_IFE_1_CLK_SRC 36
+#define CAM_CC_IFE_1_CPHY_RX_CLK 37
+#define CAM_CC_IFE_1_CSID_CLK 38
+#define CAM_CC_IFE_1_CSID_CLK_SRC 39
+#define CAM_CC_IFE_1_DSP_CLK 40
+#define CAM_CC_IFE_LITE_CLK 41
+#define CAM_CC_IFE_LITE_CLK_SRC 42
+#define CAM_CC_IFE_LITE_CPHY_RX_CLK 43
+#define CAM_CC_IFE_LITE_CSID_CLK 44
+#define CAM_CC_IFE_LITE_CSID_CLK_SRC 45
+#define CAM_CC_IPE_0_AHB_CLK 46
+#define CAM_CC_IPE_0_AREG_CLK 47
+#define CAM_CC_IPE_0_AXI_CLK 48
+#define CAM_CC_IPE_0_CLK 49
+#define CAM_CC_IPE_0_CLK_SRC 50
+#define CAM_CC_JPEG_CLK 51
+#define CAM_CC_JPEG_CLK_SRC 52
+#define CAM_CC_LRME_CLK 53
+#define CAM_CC_LRME_CLK_SRC 54
+#define CAM_CC_MCLK0_CLK 55
+#define CAM_CC_MCLK0_CLK_SRC 56
+#define CAM_CC_MCLK1_CLK 57
+#define CAM_CC_MCLK1_CLK_SRC 58
+#define CAM_CC_MCLK2_CLK 59
+#define CAM_CC_MCLK2_CLK_SRC 60
+#define CAM_CC_MCLK3_CLK 61
+#define CAM_CC_MCLK3_CLK_SRC 62
+#define CAM_CC_PLL0 63
+#define CAM_CC_PLL1 64
+#define CAM_CC_PLL2 65
+#define CAM_CC_PLL2_OUT_AUX2 66
+#define CAM_CC_PLL3 67
+#define CAM_CC_SLOW_AHB_CLK_SRC 68
+#define CAM_CC_SOC_AHB_CLK 69
+#define CAM_CC_SYS_TMR_CLK 70
+
+/* CAM_CC power domains */
+#define BPS_GDSC 0
+#define IFE_0_GDSC 1
+#define IFE_1_GDSC 2
+#define IPE_0_GDSC 3
+#define TITAN_TOP_GDSC 4
+
+/* CAM_CC resets */
+#define CAM_CC_BPS_BCR 0
+#define CAM_CC_CAMNOC_BCR 1
+#define CAM_CC_CCI_BCR 2
+#define CAM_CC_CPAS_BCR 3
+#define CAM_CC_CSI0PHY_BCR 4
+#define CAM_CC_CSI1PHY_BCR 5
+#define CAM_CC_CSI2PHY_BCR 6
+#define CAM_CC_ICP_BCR 7
+#define CAM_CC_IFE_0_BCR 8
+#define CAM_CC_IFE_1_BCR 9
+#define CAM_CC_IFE_LITE_BCR 10
+#define CAM_CC_IPE_0_BCR 11
+#define CAM_CC_JPEG_BCR 12
+#define CAM_CC_LRME_BCR 13
+#define CAM_CC_MCLK0_BCR 14
+#define CAM_CC_MCLK1_BCR 15
+#define CAM_CC_MCLK2_BCR 16
+#define CAM_CC_MCLK3_BCR 17
+#define CAM_CC_TITAN_TOP_BCR 18
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,qcs615-dispcc.h b/include/dt-bindings/clock/qcom,qcs615-dispcc.h
new file mode 100644
index 000000000000..9a29945c5762
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qcs615-dispcc.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_QCS615_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_QCS615_H
+
+/* DISP_CC clocks */
+#define DISP_CC_MDSS_AHB_CLK 0
+#define DISP_CC_MDSS_AHB_CLK_SRC 1
+#define DISP_CC_MDSS_BYTE0_CLK 2
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 3
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 4
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 5
+#define DISP_CC_MDSS_DP_AUX_CLK 6
+#define DISP_CC_MDSS_DP_AUX_CLK_SRC 7
+#define DISP_CC_MDSS_DP_CRYPTO_CLK 8
+#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC 9
+#define DISP_CC_MDSS_DP_LINK_CLK 10
+#define DISP_CC_MDSS_DP_LINK_CLK_SRC 11
+#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC 12
+#define DISP_CC_MDSS_DP_LINK_INTF_CLK 13
+#define DISP_CC_MDSS_DP_PIXEL1_CLK 14
+#define DISP_CC_MDSS_DP_PIXEL1_CLK_SRC 15
+#define DISP_CC_MDSS_DP_PIXEL_CLK 16
+#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC 17
+#define DISP_CC_MDSS_ESC0_CLK 18
+#define DISP_CC_MDSS_ESC0_CLK_SRC 19
+#define DISP_CC_MDSS_MDP_CLK 20
+#define DISP_CC_MDSS_MDP_CLK_SRC 21
+#define DISP_CC_MDSS_MDP_LUT_CLK 22
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 23
+#define DISP_CC_MDSS_PCLK0_CLK 24
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 25
+#define DISP_CC_MDSS_ROT_CLK 26
+#define DISP_CC_MDSS_ROT_CLK_SRC 27
+#define DISP_CC_MDSS_RSCC_AHB_CLK 28
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 29
+#define DISP_CC_MDSS_VSYNC_CLK 30
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 31
+#define DISP_CC_PLL0 32
+#define DISP_CC_XO_CLK 33
+
+/* DISP_CC power domains */
+#define MDSS_CORE_GDSC 0
+
+/* DISP_CC resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_RSCC_BCR 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,qcs615-gcc.h b/include/dt-bindings/clock/qcom,qcs615-gcc.h
new file mode 100644
index 000000000000..9704091636b8
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qcs615-gcc.h
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_QCS615_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_QCS615_H
+
+/* GCC clocks */
+#define GPLL0_OUT_AUX2_DIV 0
+#define GPLL3_OUT_AUX2_DIV 1
+#define GPLL0 2
+#define GPLL3 3
+#define GPLL4 4
+#define GPLL6 5
+#define GPLL6_OUT_MAIN 6
+#define GPLL7 7
+#define GPLL8 8
+#define GPLL8_OUT_MAIN 9
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 10
+#define GCC_AGGRE_USB2_SEC_AXI_CLK 11
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 12
+#define GCC_AHB2PHY_EAST_CLK 13
+#define GCC_AHB2PHY_WEST_CLK 14
+#define GCC_BOOT_ROM_AHB_CLK 15
+#define GCC_CAMERA_AHB_CLK 16
+#define GCC_CAMERA_HF_AXI_CLK 17
+#define GCC_CAMERA_XO_CLK 18
+#define GCC_CE1_AHB_CLK 19
+#define GCC_CE1_AXI_CLK 20
+#define GCC_CE1_CLK 21
+#define GCC_CFG_NOC_USB2_SEC_AXI_CLK 22
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 23
+#define GCC_CPUSS_AHB_CLK 24
+#define GCC_CPUSS_AHB_CLK_SRC 25
+#define GCC_CPUSS_GNOC_CLK 26
+#define GCC_DDRSS_GPU_AXI_CLK 27
+#define GCC_DISP_AHB_CLK 28
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 29
+#define GCC_DISP_HF_AXI_CLK 30
+#define GCC_DISP_XO_CLK 31
+#define GCC_EMAC_AXI_CLK 32
+#define GCC_EMAC_PTP_CLK 33
+#define GCC_EMAC_PTP_CLK_SRC 34
+#define GCC_EMAC_RGMII_CLK 35
+#define GCC_EMAC_RGMII_CLK_SRC 36
+#define GCC_EMAC_SLV_AHB_CLK 37
+#define GCC_GP1_CLK 38
+#define GCC_GP1_CLK_SRC 39
+#define GCC_GP2_CLK 40
+#define GCC_GP2_CLK_SRC 41
+#define GCC_GP3_CLK 42
+#define GCC_GP3_CLK_SRC 43
+#define GCC_GPU_CFG_AHB_CLK 44
+#define GCC_GPU_GPLL0_CLK_SRC 45
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 46
+#define GCC_GPU_IREF_CLK 47
+#define GCC_GPU_MEMNOC_GFX_CLK 48
+#define GCC_GPU_SNOC_DVM_GFX_CLK 49
+#define GCC_PCIE0_PHY_REFGEN_CLK 50
+#define GCC_PCIE_0_AUX_CLK 51
+#define GCC_PCIE_0_AUX_CLK_SRC 52
+#define GCC_PCIE_0_CFG_AHB_CLK 53
+#define GCC_PCIE_0_CLKREF_CLK 54
+#define GCC_PCIE_0_MSTR_AXI_CLK 55
+#define GCC_PCIE_0_PIPE_CLK 56
+#define GCC_PCIE_0_SLV_AXI_CLK 57
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 58
+#define GCC_PCIE_PHY_AUX_CLK 59
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC 60
+#define GCC_PDM2_CLK 61
+#define GCC_PDM2_CLK_SRC 62
+#define GCC_PDM_AHB_CLK 63
+#define GCC_PDM_XO4_CLK 64
+#define GCC_PRNG_AHB_CLK 65
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 66
+#define GCC_QMIP_DISP_AHB_CLK 67
+#define GCC_QMIP_PCIE_AHB_CLK 68
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 69
+#define GCC_QSPI_CNOC_PERIPH_AHB_CLK 70
+#define GCC_QSPI_CORE_CLK 71
+#define GCC_QSPI_CORE_CLK_SRC 72
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 73
+#define GCC_QUPV3_WRAP0_CORE_CLK 74
+#define GCC_QUPV3_WRAP0_S0_CLK 75
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 76
+#define GCC_QUPV3_WRAP0_S1_CLK 77
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 78
+#define GCC_QUPV3_WRAP0_S2_CLK 79
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 80
+#define GCC_QUPV3_WRAP0_S3_CLK 81
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 82
+#define GCC_QUPV3_WRAP0_S4_CLK 83
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 84
+#define GCC_QUPV3_WRAP0_S5_CLK 85
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 86
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 87
+#define GCC_QUPV3_WRAP1_CORE_CLK 88
+#define GCC_QUPV3_WRAP1_S0_CLK 89
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 90
+#define GCC_QUPV3_WRAP1_S1_CLK 91
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 92
+#define GCC_QUPV3_WRAP1_S2_CLK 93
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 94
+#define GCC_QUPV3_WRAP1_S3_CLK 95
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 96
+#define GCC_QUPV3_WRAP1_S4_CLK 97
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 98
+#define GCC_QUPV3_WRAP1_S5_CLK 99
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 100
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 101
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 102
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 103
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 104
+#define GCC_RX1_USB2_CLKREF_CLK 105
+#define GCC_RX3_USB2_CLKREF_CLK 106
+#define GCC_SDCC1_AHB_CLK 107
+#define GCC_SDCC1_APPS_CLK 108
+#define GCC_SDCC1_APPS_CLK_SRC 109
+#define GCC_SDCC1_ICE_CORE_CLK 110
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 111
+#define GCC_SDCC2_AHB_CLK 112
+#define GCC_SDCC2_APPS_CLK 113
+#define GCC_SDCC2_APPS_CLK_SRC 114
+#define GCC_SDR_CORE_CLK 115
+#define GCC_SDR_CSR_HCLK 116
+#define GCC_SDR_PRI_MI2S_CLK 117
+#define GCC_SDR_SEC_MI2S_CLK 118
+#define GCC_SDR_WR0_MEM_CLK 119
+#define GCC_SDR_WR1_MEM_CLK 120
+#define GCC_SDR_WR2_MEM_CLK 121
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 122
+#define GCC_UFS_CARD_CLKREF_CLK 123
+#define GCC_UFS_MEM_CLKREF_CLK 124
+#define GCC_UFS_PHY_AHB_CLK 125
+#define GCC_UFS_PHY_AXI_CLK 126
+#define GCC_UFS_PHY_AXI_CLK_SRC 127
+#define GCC_UFS_PHY_ICE_CORE_CLK 128
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 129
+#define GCC_UFS_PHY_PHY_AUX_CLK 130
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 131
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 132
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 133
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 134
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 135
+#define GCC_USB20_SEC_MASTER_CLK 136
+#define GCC_USB20_SEC_MASTER_CLK_SRC 137
+#define GCC_USB20_SEC_MOCK_UTMI_CLK 138
+#define GCC_USB20_SEC_MOCK_UTMI_CLK_SRC 139
+#define GCC_USB20_SEC_SLEEP_CLK 140
+#define GCC_USB2_PRIM_CLKREF_CLK 141
+#define GCC_USB2_SEC_CLKREF_CLK 142
+#define GCC_USB2_SEC_PHY_AUX_CLK 143
+#define GCC_USB2_SEC_PHY_AUX_CLK_SRC 144
+#define GCC_USB2_SEC_PHY_COM_AUX_CLK 145
+#define GCC_USB2_SEC_PHY_PIPE_CLK 146
+#define GCC_USB30_PRIM_MASTER_CLK 147
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 148
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 149
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 150
+#define GCC_USB30_PRIM_SLEEP_CLK 151
+#define GCC_USB3_PRIM_CLKREF_CLK 152
+#define GCC_USB3_PRIM_PHY_AUX_CLK 153
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 154
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 155
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 156
+#define GCC_USB3_SEC_CLKREF_CLK 157
+#define GCC_VIDEO_AHB_CLK 158
+#define GCC_VIDEO_AXI0_CLK 159
+#define GCC_VIDEO_XO_CLK 160
+#define GCC_VSENSOR_CLK_SRC 161
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 162
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 163
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 164
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 165
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 166
+
+/* GCC Resets */
+#define GCC_EMAC_BCR 0
+#define GCC_QUSB2PHY_PRIM_BCR 1
+#define GCC_QUSB2PHY_SEC_BCR 2
+#define GCC_USB30_PRIM_BCR 3
+#define GCC_USB2_PHY_SEC_BCR 4
+#define GCC_USB3_DP_PHY_SEC_BCR 5
+#define GCC_USB3PHY_PHY_SEC_BCR 6
+#define GCC_PCIE_0_BCR 7
+#define GCC_PCIE_0_PHY_BCR 8
+#define GCC_PCIE_PHY_BCR 9
+#define GCC_PCIE_PHY_COM_BCR 10
+#define GCC_UFS_PHY_BCR 11
+#define GCC_USB20_SEC_BCR 12
+#define GCC_USB3_PHY_PRIM_SP0_BCR 13
+#define GCC_USB3PHY_PHY_PRIM_SP0_BCR 14
+#define GCC_SDCC1_BCR 15
+#define GCC_SDCC2_BCR 16
+
+/* GCC power domains */
+#define EMAC_GDSC 0
+#define PCIE_0_GDSC 1
+#define UFS_PHY_GDSC 2
+#define USB20_SEC_GDSC 3
+#define USB30_PRIM_GDSC 4
+#define HLOS1_VOTE_AGGRE_NOC_MMU_AUDIO_TBU_GDSC 5
+#define HLOS1_VOTE_AGGRE_NOC_MMU_TBU1_GDSC 6
+#define HLOS1_VOTE_AGGRE_NOC_MMU_TBU2_GDSC 7
+#define HLOS1_VOTE_AGGRE_NOC_MMU_PCIE_TBU_GDSC 8
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC 9
+#define HLOS1_VOTE_MMNOC_MMU_TBU_SF_GDSC 10
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC 11
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,qcs615-gpucc.h b/include/dt-bindings/clock/qcom,qcs615-gpucc.h
new file mode 100644
index 000000000000..6d8394b90d59
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qcs615-gpucc.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_QCS615_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_QCS615_H
+
+/* GPU_CC clocks */
+#define CRC_DIV_PLL0 0
+#define CRC_DIV_PLL1 1
+#define GPU_CC_PLL0 2
+#define GPU_CC_PLL1 3
+#define GPU_CC_CRC_AHB_CLK 4
+#define GPU_CC_CX_GFX3D_CLK 5
+#define GPU_CC_CX_GFX3D_SLV_CLK 6
+#define GPU_CC_CX_GMU_CLK 7
+#define GPU_CC_CX_SNOC_DVM_CLK 8
+#define GPU_CC_CXO_AON_CLK 9
+#define GPU_CC_CXO_CLK 10
+#define GPU_CC_GMU_CLK_SRC 11
+#define GPU_CC_GX_GFX3D_CLK 12
+#define GPU_CC_GX_GFX3D_CLK_SRC 13
+#define GPU_CC_GX_GMU_CLK 14
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 15
+#define GPU_CC_SLEEP_CLK 16
+
+/* GPU_CC power domains */
+#define CX_GDSC 0
+#define GX_GDSC 1
+
+/* GPU_CC resets */
+#define GPU_CC_CX_BCR 0
+#define GPU_CC_GFX3D_AON_BCR 1
+#define GPU_CC_GMU_BCR 2
+#define GPU_CC_GX_BCR 3
+#define GPU_CC_XO_BCR 4
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,qcs615-videocc.h b/include/dt-bindings/clock/qcom,qcs615-videocc.h
new file mode 100644
index 000000000000..0ca3efb21103
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qcs615-videocc.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_QCS615_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_QCS615_H
+
+/* VIDEO_CC clocks */
+#define VIDEO_CC_SLEEP_CLK 0
+#define VIDEO_CC_SLEEP_CLK_SRC 1
+#define VIDEO_CC_VCODEC0_AXI_CLK 2
+#define VIDEO_CC_VCODEC0_CORE_CLK 3
+#define VIDEO_CC_VENUS_AHB_CLK 4
+#define VIDEO_CC_VENUS_CLK_SRC 5
+#define VIDEO_CC_VENUS_CTL_AXI_CLK 6
+#define VIDEO_CC_VENUS_CTL_CORE_CLK 7
+#define VIDEO_CC_XO_CLK 8
+#define VIDEO_PLL0 9
+
+/* VIDEO_CC power domains */
+#define VCODEC0_GDSC 0
+#define VENUS_GDSC 1
+
+/* VIDEO_CC resets */
+#define VIDEO_CC_INTERFACE_BCR 0
+#define VIDEO_CC_VCODEC0_BCR 1
+#define VIDEO_CC_VENUS_BCR 2
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,qcs8300-camcc.h b/include/dt-bindings/clock/qcom,qcs8300-camcc.h
new file mode 100644
index 000000000000..fc535c847859
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qcs8300-camcc.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_QCS8300_CAM_CC_H
+#define _DT_BINDINGS_CLK_QCOM_QCS8300_CAM_CC_H
+
+#include "qcom,sa8775p-camcc.h"
+
+/* QCS8300 introduces below new clocks compared to SA8775P */
+
+/* CAM_CC clocks */
+#define CAM_CC_TITAN_TOP_ACCU_SHIFT_CLK 86
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,qcs8300-gcc.h b/include/dt-bindings/clock/qcom,qcs8300-gcc.h
new file mode 100644
index 000000000000..a0083b1d2126
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qcs8300-gcc.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_QCS8300_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_QCS8300_H
+
+/* GCC clocks */
+#define GCC_GPLL0 0
+#define GCC_GPLL0_OUT_EVEN 1
+#define GCC_GPLL1 2
+#define GCC_GPLL4 3
+#define GCC_GPLL7 4
+#define GCC_GPLL9 5
+#define GCC_AGGRE_NOC_QUPV3_AXI_CLK 6
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 7
+#define GCC_AGGRE_USB2_PRIM_AXI_CLK 8
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 9
+#define GCC_AHB2PHY0_CLK 10
+#define GCC_AHB2PHY2_CLK 11
+#define GCC_AHB2PHY3_CLK 12
+#define GCC_BOOT_ROM_AHB_CLK 13
+#define GCC_CAMERA_AHB_CLK 14
+#define GCC_CAMERA_HF_AXI_CLK 15
+#define GCC_CAMERA_SF_AXI_CLK 16
+#define GCC_CAMERA_THROTTLE_XO_CLK 17
+#define GCC_CAMERA_XO_CLK 18
+#define GCC_CFG_NOC_USB2_PRIM_AXI_CLK 19
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 20
+#define GCC_DDRSS_GPU_AXI_CLK 21
+#define GCC_DISP_AHB_CLK 22
+#define GCC_DISP_HF_AXI_CLK 23
+#define GCC_DISP_XO_CLK 24
+#define GCC_EDP_REF_CLKREF_EN 25
+#define GCC_EMAC0_AXI_CLK 26
+#define GCC_EMAC0_PHY_AUX_CLK 27
+#define GCC_EMAC0_PHY_AUX_CLK_SRC 28
+#define GCC_EMAC0_PTP_CLK 29
+#define GCC_EMAC0_PTP_CLK_SRC 30
+#define GCC_EMAC0_RGMII_CLK 31
+#define GCC_EMAC0_RGMII_CLK_SRC 32
+#define GCC_EMAC0_SLV_AHB_CLK 33
+#define GCC_GP1_CLK 34
+#define GCC_GP1_CLK_SRC 35
+#define GCC_GP2_CLK 36
+#define GCC_GP2_CLK_SRC 37
+#define GCC_GP3_CLK 38
+#define GCC_GP3_CLK_SRC 39
+#define GCC_GP4_CLK 40
+#define GCC_GP4_CLK_SRC 41
+#define GCC_GP5_CLK 42
+#define GCC_GP5_CLK_SRC 43
+#define GCC_GPU_CFG_AHB_CLK 44
+#define GCC_GPU_GPLL0_CLK_SRC 45
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 46
+#define GCC_GPU_MEMNOC_GFX_CENTER_PIPELINE_CLK 47
+#define GCC_GPU_MEMNOC_GFX_CLK 48
+#define GCC_GPU_SNOC_DVM_GFX_CLK 49
+#define GCC_GPU_TCU_THROTTLE_AHB_CLK 50
+#define GCC_GPU_TCU_THROTTLE_CLK 51
+#define GCC_PCIE_0_AUX_CLK 52
+#define GCC_PCIE_0_AUX_CLK_SRC 53
+#define GCC_PCIE_0_CFG_AHB_CLK 54
+#define GCC_PCIE_0_MSTR_AXI_CLK 55
+#define GCC_PCIE_0_PHY_AUX_CLK 56
+#define GCC_PCIE_0_PHY_AUX_CLK_SRC 57
+#define GCC_PCIE_0_PHY_RCHNG_CLK 58
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 59
+#define GCC_PCIE_0_PIPE_CLK 60
+#define GCC_PCIE_0_PIPE_CLK_SRC 61
+#define GCC_PCIE_0_PIPE_DIV_CLK_SRC 62
+#define GCC_PCIE_0_PIPEDIV2_CLK 63
+#define GCC_PCIE_0_SLV_AXI_CLK 64
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 65
+#define GCC_PCIE_1_AUX_CLK 66
+#define GCC_PCIE_1_AUX_CLK_SRC 67
+#define GCC_PCIE_1_CFG_AHB_CLK 68
+#define GCC_PCIE_1_MSTR_AXI_CLK 69
+#define GCC_PCIE_1_PHY_AUX_CLK 70
+#define GCC_PCIE_1_PHY_AUX_CLK_SRC 71
+#define GCC_PCIE_1_PHY_RCHNG_CLK 72
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 73
+#define GCC_PCIE_1_PIPE_CLK 74
+#define GCC_PCIE_1_PIPE_CLK_SRC 75
+#define GCC_PCIE_1_PIPE_DIV_CLK_SRC 76
+#define GCC_PCIE_1_PIPEDIV2_CLK 77
+#define GCC_PCIE_1_SLV_AXI_CLK 78
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 79
+#define GCC_PCIE_CLKREF_EN 80
+#define GCC_PCIE_THROTTLE_CFG_CLK 81
+#define GCC_PDM2_CLK 82
+#define GCC_PDM2_CLK_SRC 83
+#define GCC_PDM_AHB_CLK 84
+#define GCC_PDM_XO4_CLK 85
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 86
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 87
+#define GCC_QMIP_DISP_AHB_CLK 88
+#define GCC_QMIP_DISP_ROT_AHB_CLK 89
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 90
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 91
+#define GCC_QMIP_VIDEO_VCPU_AHB_CLK 92
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 93
+#define GCC_QUPV3_WRAP0_CORE_CLK 94
+#define GCC_QUPV3_WRAP0_S0_CLK 95
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 96
+#define GCC_QUPV3_WRAP0_S1_CLK 97
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 98
+#define GCC_QUPV3_WRAP0_S2_CLK 99
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 100
+#define GCC_QUPV3_WRAP0_S3_CLK 101
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 102
+#define GCC_QUPV3_WRAP0_S4_CLK 103
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 104
+#define GCC_QUPV3_WRAP0_S5_CLK 105
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 106
+#define GCC_QUPV3_WRAP0_S6_CLK 107
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 108
+#define GCC_QUPV3_WRAP0_S7_CLK 109
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 110
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 111
+#define GCC_QUPV3_WRAP1_CORE_CLK 112
+#define GCC_QUPV3_WRAP1_S0_CLK 113
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 114
+#define GCC_QUPV3_WRAP1_S1_CLK 115
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 116
+#define GCC_QUPV3_WRAP1_S2_CLK 117
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 118
+#define GCC_QUPV3_WRAP1_S3_CLK 119
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 120
+#define GCC_QUPV3_WRAP1_S4_CLK 121
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 122
+#define GCC_QUPV3_WRAP1_S5_CLK 123
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 124
+#define GCC_QUPV3_WRAP1_S6_CLK 125
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 126
+#define GCC_QUPV3_WRAP1_S7_CLK 127
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 128
+#define GCC_QUPV3_WRAP3_CORE_2X_CLK 129
+#define GCC_QUPV3_WRAP3_CORE_CLK 130
+#define GCC_QUPV3_WRAP3_QSPI_CLK 131
+#define GCC_QUPV3_WRAP3_S0_CLK 132
+#define GCC_QUPV3_WRAP3_S0_CLK_SRC 133
+#define GCC_QUPV3_WRAP3_S0_DIV_CLK_SRC 134
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 135
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 136
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 137
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 138
+#define GCC_QUPV3_WRAP_3_M_AHB_CLK 139
+#define GCC_QUPV3_WRAP_3_S_AHB_CLK 140
+#define GCC_SDCC1_AHB_CLK 141
+#define GCC_SDCC1_APPS_CLK 142
+#define GCC_SDCC1_APPS_CLK_SRC 143
+#define GCC_SDCC1_ICE_CORE_CLK 144
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 145
+#define GCC_SGMI_CLKREF_EN 146
+#define GCC_UFS_PHY_AHB_CLK 147
+#define GCC_UFS_PHY_AXI_CLK 148
+#define GCC_UFS_PHY_AXI_CLK_SRC 149
+#define GCC_UFS_PHY_ICE_CORE_CLK 150
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 151
+#define GCC_UFS_PHY_PHY_AUX_CLK 152
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 153
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 154
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 155
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 156
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 157
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 158
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 159
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 160
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 161
+#define GCC_USB20_MASTER_CLK 162
+#define GCC_USB20_MASTER_CLK_SRC 163
+#define GCC_USB20_MOCK_UTMI_CLK 164
+#define GCC_USB20_MOCK_UTMI_CLK_SRC 165
+#define GCC_USB20_MOCK_UTMI_POSTDIV_CLK_SRC 166
+#define GCC_USB20_SLEEP_CLK 167
+#define GCC_USB30_PRIM_MASTER_CLK 168
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 169
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 170
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 171
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 172
+#define GCC_USB30_PRIM_SLEEP_CLK 173
+#define GCC_USB3_PRIM_PHY_AUX_CLK 174
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 175
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 176
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 177
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 178
+#define GCC_USB_CLKREF_EN 179
+#define GCC_VIDEO_AHB_CLK 180
+#define GCC_VIDEO_AXI0_CLK 181
+#define GCC_VIDEO_AXI1_CLK 182
+#define GCC_VIDEO_XO_CLK 183
+
+/* GCC power domains */
+#define GCC_EMAC0_GDSC 0
+#define GCC_PCIE_0_GDSC 1
+#define GCC_PCIE_1_GDSC 2
+#define GCC_UFS_PHY_GDSC 3
+#define GCC_USB20_PRIM_GDSC 4
+#define GCC_USB30_PRIM_GDSC 5
+
+/* GCC resets */
+#define GCC_EMAC0_BCR 0
+#define GCC_PCIE_0_BCR 1
+#define GCC_PCIE_0_LINK_DOWN_BCR 2
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 3
+#define GCC_PCIE_0_PHY_BCR 4
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 5
+#define GCC_PCIE_1_BCR 6
+#define GCC_PCIE_1_LINK_DOWN_BCR 7
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 8
+#define GCC_PCIE_1_PHY_BCR 9
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 10
+#define GCC_SDCC1_BCR 11
+#define GCC_UFS_PHY_BCR 12
+#define GCC_USB20_PRIM_BCR 13
+#define GCC_USB2_PHY_PRIM_BCR 14
+#define GCC_USB2_PHY_SEC_BCR 15
+#define GCC_USB30_PRIM_BCR 16
+#define GCC_USB3_DP_PHY_PRIM_BCR 17
+#define GCC_USB3_PHY_PRIM_BCR 18
+#define GCC_USB3_PHY_TERT_BCR 19
+#define GCC_USB3_UNIPHY_MP0_BCR 20
+#define GCC_USB3_UNIPHY_MP1_BCR 21
+#define GCC_USB3PHY_PHY_PRIM_BCR 22
+#define GCC_USB3UNIPHY_PHY_MP0_BCR 23
+#define GCC_USB3UNIPHY_PHY_MP1_BCR 24
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 25
+#define GCC_VIDEO_BCR 26
+#define GCC_VIDEO_AXI0_CLK_ARES 27
+#define GCC_VIDEO_AXI1_CLK_ARES 28
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,qcs8300-gpucc.h b/include/dt-bindings/clock/qcom,qcs8300-gpucc.h
new file mode 100644
index 000000000000..afa187467b4c
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qcs8300-gpucc.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPUCC_QCS8300_H
+#define _DT_BINDINGS_CLK_QCOM_GPUCC_QCS8300_H
+
+#include "qcom,sa8775p-gpucc.h"
+
+/* QCS8300 introduces below new clocks compared to SA8775P */
+
+/* GPU_CC clocks */
+#define GPU_CC_CX_ACCU_SHIFT_CLK 23
+#define GPU_CC_GX_ACCU_SHIFT_CLK 24
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,qdu1000-ecpricc.h b/include/dt-bindings/clock/qcom,qdu1000-ecpricc.h
new file mode 100644
index 000000000000..731e404a2ce6
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qdu1000-ecpricc.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_ECPRI_CC_QDU1000_H
+#define _DT_BINDINGS_CLK_QCOM_ECPRI_CC_QDU1000_H
+
+/* ECPRI_CC clocks */
+#define ECPRI_CC_PLL0 0
+#define ECPRI_CC_PLL1 1
+#define ECPRI_CC_ECPRI_CG_CLK 2
+#define ECPRI_CC_ECPRI_CLK_SRC 3
+#define ECPRI_CC_ECPRI_DMA_CLK 4
+#define ECPRI_CC_ECPRI_DMA_CLK_SRC 5
+#define ECPRI_CC_ECPRI_DMA_NOC_CLK 6
+#define ECPRI_CC_ECPRI_FAST_CLK 7
+#define ECPRI_CC_ECPRI_FAST_CLK_SRC 8
+#define ECPRI_CC_ECPRI_FAST_DIV2_CLK 9
+#define ECPRI_CC_ECPRI_FAST_DIV2_CLK_SRC 10
+#define ECPRI_CC_ECPRI_FAST_DIV2_NOC_CLK 11
+#define ECPRI_CC_ECPRI_FR_CLK 12
+#define ECPRI_CC_ECPRI_ORAN_CLK_SRC 13
+#define ECPRI_CC_ECPRI_ORAN_DIV2_CLK 14
+#define ECPRI_CC_ETH_100G_C2C0_HM_FF_CLK_SRC 15
+#define ECPRI_CC_ETH_100G_C2C0_UDP_FIFO_CLK 16
+#define ECPRI_CC_ETH_100G_C2C1_UDP_FIFO_CLK 17
+#define ECPRI_CC_ETH_100G_C2C_0_HM_FF_0_CLK 18
+#define ECPRI_CC_ETH_100G_C2C_0_HM_FF_1_CLK 19
+#define ECPRI_CC_ETH_100G_C2C_HM_FF_0_DIV_CLK_SRC 20
+#define ECPRI_CC_ETH_100G_C2C_HM_FF_1_DIV_CLK_SRC 21
+#define ECPRI_CC_ETH_100G_C2C_HM_MACSEC_CLK 22
+#define ECPRI_CC_ETH_100G_C2C_HM_MACSEC_CLK_SRC 23
+#define ECPRI_CC_ETH_100G_DBG_C2C_HM_FF_0_CLK 24
+#define ECPRI_CC_ETH_100G_DBG_C2C_HM_FF_0_DIV_CLK_SRC 25
+#define ECPRI_CC_ETH_100G_DBG_C2C_HM_FF_1_CLK 26
+#define ECPRI_CC_ETH_100G_DBG_C2C_HM_FF_1_DIV_CLK_SRC 27
+#define ECPRI_CC_ETH_100G_DBG_C2C_HM_FF_CLK_SRC 28
+#define ECPRI_CC_ETH_100G_DBG_C2C_UDP_FIFO_CLK 29
+#define ECPRI_CC_ETH_100G_FH0_HM_FF_CLK_SRC 30
+#define ECPRI_CC_ETH_100G_FH0_MACSEC_CLK_SRC 31
+#define ECPRI_CC_ETH_100G_FH1_HM_FF_CLK_SRC 32
+#define ECPRI_CC_ETH_100G_FH1_MACSEC_CLK_SRC 33
+#define ECPRI_CC_ETH_100G_FH2_HM_FF_CLK_SRC 34
+#define ECPRI_CC_ETH_100G_FH2_MACSEC_CLK_SRC 35
+#define ECPRI_CC_ETH_100G_FH_0_HM_FF_0_CLK 36
+#define ECPRI_CC_ETH_100G_FH_0_HM_FF_0_DIV_CLK_SRC 37
+#define ECPRI_CC_ETH_100G_FH_0_HM_FF_1_CLK 38
+#define ECPRI_CC_ETH_100G_FH_0_HM_FF_1_DIV_CLK_SRC 39
+#define ECPRI_CC_ETH_100G_FH_0_HM_FF_2_CLK 40
+#define ECPRI_CC_ETH_100G_FH_0_HM_FF_2_DIV_CLK_SRC 41
+#define ECPRI_CC_ETH_100G_FH_0_HM_FF_3_CLK 42
+#define ECPRI_CC_ETH_100G_FH_0_HM_FF_3_DIV_CLK_SRC 43
+#define ECPRI_CC_ETH_100G_FH_0_UDP_FIFO_CLK 44
+#define ECPRI_CC_ETH_100G_FH_1_HM_FF_0_CLK 45
+#define ECPRI_CC_ETH_100G_FH_1_HM_FF_0_DIV_CLK_SRC 46
+#define ECPRI_CC_ETH_100G_FH_1_HM_FF_1_CLK 47
+#define ECPRI_CC_ETH_100G_FH_1_HM_FF_1_DIV_CLK_SRC 48
+#define ECPRI_CC_ETH_100G_FH_1_HM_FF_2_CLK 49
+#define ECPRI_CC_ETH_100G_FH_1_HM_FF_2_DIV_CLK_SRC 50
+#define ECPRI_CC_ETH_100G_FH_1_HM_FF_3_CLK 51
+#define ECPRI_CC_ETH_100G_FH_1_HM_FF_3_DIV_CLK_SRC 52
+#define ECPRI_CC_ETH_100G_FH_1_UDP_FIFO_CLK 53
+#define ECPRI_CC_ETH_100G_FH_2_HM_FF_0_CLK 54
+#define ECPRI_CC_ETH_100G_FH_2_HM_FF_0_DIV_CLK_SRC 55
+#define ECPRI_CC_ETH_100G_FH_2_HM_FF_1_CLK 56
+#define ECPRI_CC_ETH_100G_FH_2_HM_FF_1_DIV_CLK_SRC 57
+#define ECPRI_CC_ETH_100G_FH_2_HM_FF_2_CLK 58
+#define ECPRI_CC_ETH_100G_FH_2_HM_FF_2_DIV_CLK_SRC 59
+#define ECPRI_CC_ETH_100G_FH_2_HM_FF_3_CLK 60
+#define ECPRI_CC_ETH_100G_FH_2_HM_FF_3_DIV_CLK_SRC 61
+#define ECPRI_CC_ETH_100G_FH_2_UDP_FIFO_CLK 62
+#define ECPRI_CC_ETH_100G_FH_MACSEC_0_CLK 63
+#define ECPRI_CC_ETH_100G_FH_MACSEC_1_CLK 64
+#define ECPRI_CC_ETH_100G_FH_MACSEC_2_CLK 65
+#define ECPRI_CC_ETH_100G_MAC_C2C_HM_REF_CLK 66
+#define ECPRI_CC_ETH_100G_MAC_C2C_HM_REF_CLK_SRC 67
+#define ECPRI_CC_ETH_100G_MAC_DBG_C2C_HM_REF_CLK 68
+#define ECPRI_CC_ETH_100G_MAC_DBG_C2C_HM_REF_CLK_SRC 69
+#define ECPRI_CC_ETH_100G_MAC_FH0_HM_REF_CLK 70
+#define ECPRI_CC_ETH_100G_MAC_FH0_HM_REF_CLK_SRC 71
+#define ECPRI_CC_ETH_100G_MAC_FH1_HM_REF_CLK 72
+#define ECPRI_CC_ETH_100G_MAC_FH1_HM_REF_CLK_SRC 73
+#define ECPRI_CC_ETH_100G_MAC_FH2_HM_REF_CLK 74
+#define ECPRI_CC_ETH_100G_MAC_FH2_HM_REF_CLK_SRC 75
+#define ECPRI_CC_ETH_DBG_NFAPI_AXI_CLK 76
+#define ECPRI_CC_ETH_DBG_NOC_AXI_CLK 77
+#define ECPRI_CC_ETH_PHY_0_OCK_SRAM_CLK 78
+#define ECPRI_CC_ETH_PHY_1_OCK_SRAM_CLK 79
+#define ECPRI_CC_ETH_PHY_2_OCK_SRAM_CLK 80
+#define ECPRI_CC_ETH_PHY_3_OCK_SRAM_CLK 81
+#define ECPRI_CC_ETH_PHY_4_OCK_SRAM_CLK 82
+#define ECPRI_CC_MSS_EMAC_CLK 83
+#define ECPRI_CC_MSS_EMAC_CLK_SRC 84
+#define ECPRI_CC_MSS_ORAN_CLK 85
+#define ECPRI_CC_PHY0_LANE0_RX_CLK 86
+#define ECPRI_CC_PHY0_LANE0_TX_CLK 87
+#define ECPRI_CC_PHY0_LANE1_RX_CLK 88
+#define ECPRI_CC_PHY0_LANE1_TX_CLK 89
+#define ECPRI_CC_PHY0_LANE2_RX_CLK 90
+#define ECPRI_CC_PHY0_LANE2_TX_CLK 91
+#define ECPRI_CC_PHY0_LANE3_RX_CLK 92
+#define ECPRI_CC_PHY0_LANE3_TX_CLK 93
+#define ECPRI_CC_PHY1_LANE0_RX_CLK 94
+#define ECPRI_CC_PHY1_LANE0_TX_CLK 95
+#define ECPRI_CC_PHY1_LANE1_RX_CLK 96
+#define ECPRI_CC_PHY1_LANE1_TX_CLK 97
+#define ECPRI_CC_PHY1_LANE2_RX_CLK 98
+#define ECPRI_CC_PHY1_LANE2_TX_CLK 99
+#define ECPRI_CC_PHY1_LANE3_RX_CLK 100
+#define ECPRI_CC_PHY1_LANE3_TX_CLK 101
+#define ECPRI_CC_PHY2_LANE0_RX_CLK 102
+#define ECPRI_CC_PHY2_LANE0_TX_CLK 103
+#define ECPRI_CC_PHY2_LANE1_RX_CLK 104
+#define ECPRI_CC_PHY2_LANE1_TX_CLK 105
+#define ECPRI_CC_PHY2_LANE2_RX_CLK 106
+#define ECPRI_CC_PHY2_LANE2_TX_CLK 107
+#define ECPRI_CC_PHY2_LANE3_RX_CLK 108
+#define ECPRI_CC_PHY2_LANE3_TX_CLK 109
+#define ECPRI_CC_PHY3_LANE0_RX_CLK 110
+#define ECPRI_CC_PHY3_LANE0_TX_CLK 111
+#define ECPRI_CC_PHY3_LANE1_RX_CLK 112
+#define ECPRI_CC_PHY3_LANE1_TX_CLK 113
+#define ECPRI_CC_PHY3_LANE2_RX_CLK 114
+#define ECPRI_CC_PHY3_LANE2_TX_CLK 115
+#define ECPRI_CC_PHY3_LANE3_RX_CLK 116
+#define ECPRI_CC_PHY3_LANE3_TX_CLK 117
+#define ECPRI_CC_PHY4_LANE0_RX_CLK 118
+#define ECPRI_CC_PHY4_LANE0_TX_CLK 119
+#define ECPRI_CC_PHY4_LANE1_RX_CLK 120
+#define ECPRI_CC_PHY4_LANE1_TX_CLK 121
+#define ECPRI_CC_PHY4_LANE2_RX_CLK 122
+#define ECPRI_CC_PHY4_LANE2_TX_CLK 123
+#define ECPRI_CC_PHY4_LANE3_RX_CLK 124
+#define ECPRI_CC_PHY4_LANE3_TX_CLK 125
+
+/* ECPRI_CC resets */
+#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ECPRI_SS_BCR 0
+#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ETH_C2C_BCR 1
+#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ETH_FH0_BCR 2
+#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ETH_FH1_BCR 3
+#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ETH_FH2_BCR 4
+#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_ETH_WRAPPER_TOP_BCR 5
+#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_MODEM_BCR 6
+#define ECPRI_CC_CLK_CTL_TOP_ECPRI_CC_NOC_BCR 7
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,qdu1000-gcc.h b/include/dt-bindings/clock/qcom,qdu1000-gcc.h
new file mode 100644
index 000000000000..2fd36cbfddbb
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qdu1000-gcc.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_QDU1000_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_QDU1000_H
+
+/* GCC clocks */
+#define GCC_GPLL0 0
+#define GCC_GPLL0_OUT_EVEN 1
+#define GCC_GPLL1 2
+#define GCC_GPLL2 3
+#define GCC_GPLL2_OUT_EVEN 4
+#define GCC_GPLL3 5
+#define GCC_GPLL4 6
+#define GCC_GPLL5 7
+#define GCC_GPLL5_OUT_EVEN 8
+#define GCC_GPLL6 9
+#define GCC_GPLL7 10
+#define GCC_GPLL8 11
+#define GCC_AGGRE_NOC_ECPRI_DMA_CLK 12
+#define GCC_AGGRE_NOC_ECPRI_DMA_CLK_SRC 13
+#define GCC_AGGRE_NOC_ECPRI_GSI_CLK_SRC 14
+#define GCC_BOOT_ROM_AHB_CLK 15
+#define GCC_CFG_NOC_ECPRI_CC_AHB_CLK 16
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 17
+#define GCC_DDRSS_ECPRI_DMA_CLK 18
+#define GCC_ECPRI_AHB_CLK 19
+#define GCC_ECPRI_CC_GPLL0_CLK_SRC 20
+#define GCC_ECPRI_CC_GPLL1_EVEN_CLK_SRC 21
+#define GCC_ECPRI_CC_GPLL2_EVEN_CLK_SRC 22
+#define GCC_ECPRI_CC_GPLL3_CLK_SRC 23
+#define GCC_ECPRI_CC_GPLL4_CLK_SRC 24
+#define GCC_ECPRI_CC_GPLL5_EVEN_CLK_SRC 25
+#define GCC_ECPRI_XO_CLK 26
+#define GCC_ETH_DBG_SNOC_AXI_CLK 27
+#define GCC_GEMNOC_PCIE_QX_CLK 28
+#define GCC_GP1_CLK 29
+#define GCC_GP1_CLK_SRC 30
+#define GCC_GP2_CLK 31
+#define GCC_GP2_CLK_SRC 32
+#define GCC_GP3_CLK 33
+#define GCC_GP3_CLK_SRC 34
+#define GCC_PCIE_0_AUX_CLK 35
+#define GCC_PCIE_0_AUX_CLK_SRC 36
+#define GCC_PCIE_0_CFG_AHB_CLK 37
+#define GCC_PCIE_0_CLKREF_EN 38
+#define GCC_PCIE_0_MSTR_AXI_CLK 39
+#define GCC_PCIE_0_PHY_AUX_CLK 40
+#define GCC_PCIE_0_PHY_RCHNG_CLK 41
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 42
+#define GCC_PCIE_0_PIPE_CLK 43
+#define GCC_PCIE_0_SLV_AXI_CLK 44
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 45
+#define GCC_PDM2_CLK 46
+#define GCC_PDM2_CLK_SRC 47
+#define GCC_PDM_AHB_CLK 48
+#define GCC_PDM_XO4_CLK 49
+#define GCC_QMIP_ANOC_PCIE_CLK 50
+#define GCC_QMIP_ECPRI_DMA0_CLK 51
+#define GCC_QMIP_ECPRI_DMA1_CLK 52
+#define GCC_QMIP_ECPRI_GSI_CLK 53
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 54
+#define GCC_QUPV3_WRAP0_CORE_CLK 55
+#define GCC_QUPV3_WRAP0_S0_CLK 56
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 57
+#define GCC_QUPV3_WRAP0_S1_CLK 58
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 59
+#define GCC_QUPV3_WRAP0_S2_CLK 60
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 61
+#define GCC_QUPV3_WRAP0_S3_CLK 62
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 63
+#define GCC_QUPV3_WRAP0_S4_CLK 64
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 65
+#define GCC_QUPV3_WRAP0_S5_CLK 66
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 67
+#define GCC_QUPV3_WRAP0_S6_CLK 68
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 69
+#define GCC_QUPV3_WRAP0_S7_CLK 70
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 71
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 72
+#define GCC_QUPV3_WRAP1_CORE_CLK 73
+#define GCC_QUPV3_WRAP1_S0_CLK 74
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 75
+#define GCC_QUPV3_WRAP1_S1_CLK 76
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 77
+#define GCC_QUPV3_WRAP1_S2_CLK 78
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 79
+#define GCC_QUPV3_WRAP1_S3_CLK 80
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 81
+#define GCC_QUPV3_WRAP1_S4_CLK 82
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 83
+#define GCC_QUPV3_WRAP1_S5_CLK 84
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 85
+#define GCC_QUPV3_WRAP1_S6_CLK 86
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 87
+#define GCC_QUPV3_WRAP1_S7_CLK 88
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 89
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 90
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 91
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 92
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 93
+#define GCC_SDCC5_AHB_CLK 94
+#define GCC_SDCC5_APPS_CLK 95
+#define GCC_SDCC5_APPS_CLK_SRC 96
+#define GCC_SDCC5_ICE_CORE_CLK 97
+#define GCC_SDCC5_ICE_CORE_CLK_SRC 98
+#define GCC_SNOC_CNOC_GEMNOC_PCIE_QX_CLK 99
+#define GCC_SNOC_CNOC_GEMNOC_PCIE_SOUTH_QX_CLK 100
+#define GCC_SNOC_CNOC_PCIE_QX_CLK 101
+#define GCC_SNOC_PCIE_SF_CENTER_QX_CLK 102
+#define GCC_SNOC_PCIE_SF_SOUTH_QX_CLK 103
+#define GCC_TSC_CFG_AHB_CLK 104
+#define GCC_TSC_CLK_SRC 105
+#define GCC_TSC_CNTR_CLK 106
+#define GCC_TSC_ETU_CLK 107
+#define GCC_USB2_CLKREF_EN 108
+#define GCC_USB30_PRIM_MASTER_CLK 109
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 110
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 111
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 112
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 113
+#define GCC_USB30_PRIM_SLEEP_CLK 114
+#define GCC_USB3_PRIM_PHY_AUX_CLK 115
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 116
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 117
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 118
+#define GCC_SM_BUS_AHB_CLK 119
+#define GCC_SM_BUS_XO_CLK 120
+#define GCC_SM_BUS_XO_CLK_SRC 121
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 122
+#define GCC_ETH_100G_C2C_HM_APB_CLK 123
+#define GCC_ETH_100G_FH_HM_APB_0_CLK 124
+#define GCC_ETH_100G_FH_HM_APB_1_CLK 125
+#define GCC_ETH_100G_FH_HM_APB_2_CLK 126
+#define GCC_ETH_DBG_C2C_HM_APB_CLK 127
+#define GCC_AGGRE_NOC_ECPRI_GSI_CLK 128
+#define GCC_PCIE_0_PIPE_CLK_SRC 129
+#define GCC_PCIE_0_PHY_AUX_CLK_SRC 130
+#define GCC_GPLL1_OUT_EVEN 131
+#define GCC_DDRSS_ECPRI_GSI_CLK 132
+
+/* GCC resets */
+#define GCC_ECPRI_CC_BCR 0
+#define GCC_ECPRI_SS_BCR 1
+#define GCC_ETH_WRAPPER_BCR 2
+#define GCC_PCIE_0_BCR 3
+#define GCC_PCIE_0_LINK_DOWN_BCR 4
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 5
+#define GCC_PCIE_0_PHY_BCR 6
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 7
+#define GCC_PCIE_PHY_CFG_AHB_BCR 8
+#define GCC_PCIE_PHY_COM_BCR 9
+#define GCC_PDM_BCR 10
+#define GCC_QUPV3_WRAPPER_0_BCR 11
+#define GCC_QUPV3_WRAPPER_1_BCR 12
+#define GCC_QUSB2PHY_PRIM_BCR 13
+#define GCC_QUSB2PHY_SEC_BCR 14
+#define GCC_SDCC5_BCR 15
+#define GCC_TCSR_PCIE_BCR 16
+#define GCC_TSC_BCR 17
+#define GCC_USB30_PRIM_BCR 18
+#define GCC_USB3_DP_PHY_PRIM_BCR 19
+#define GCC_USB3_DP_PHY_SEC_BCR 20
+#define GCC_USB3_PHY_PRIM_BCR 21
+#define GCC_USB3_PHY_SEC_BCR 22
+#define GCC_USB3PHY_PHY_PRIM_BCR 23
+#define GCC_USB3PHY_PHY_SEC_BCR 24
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 25
+
+/* GCC power domains */
+#define PCIE_0_GDSC 0
+#define PCIE_0_PHY_GDSC 1
+#define USB30_PRIM_GDSC 2
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h
index 8aaba7cd9589..1477a75e7f6d 100644
--- a/include/dt-bindings/clock/qcom,rpmcc.h
+++ b/include/dt-bindings/clock/qcom,rpmcc.h
@@ -149,5 +149,30 @@
#define RPM_SMD_CE2_A_CLK 103
#define RPM_SMD_CE3_CLK 104
#define RPM_SMD_CE3_A_CLK 105
+#define RPM_SMD_QUP_CLK 106
+#define RPM_SMD_QUP_A_CLK 107
+#define RPM_SMD_MMRT_CLK 108
+#define RPM_SMD_MMRT_A_CLK 109
+#define RPM_SMD_MMNRT_CLK 110
+#define RPM_SMD_MMNRT_A_CLK 111
+#define RPM_SMD_SNOC_PERIPH_CLK 112
+#define RPM_SMD_SNOC_PERIPH_A_CLK 113
+#define RPM_SMD_SNOC_LPASS_CLK 114
+#define RPM_SMD_SNOC_LPASS_A_CLK 115
+#define RPM_SMD_HWKM_CLK 116
+#define RPM_SMD_HWKM_A_CLK 117
+#define RPM_SMD_PKA_CLK 118
+#define RPM_SMD_PKA_A_CLK 119
+#define RPM_SMD_CPUSS_GNOC_CLK 120
+#define RPM_SMD_CPUSS_GNOC_A_CLK 121
+#define RPM_SMD_MSS_CFG_AHB_CLK 122
+#define RPM_SMD_MSS_CFG_AHB_A_CLK 123
+#define RPM_SMD_BIMC_FREQ_LOG 124
+#define RPM_SMD_LN_BB_CLK_PIN 125
+#define RPM_SMD_LN_BB_A_CLK_PIN 126
+#define RPM_SMD_BB_CLK3 127
+#define RPM_SMD_BB_CLK3_A 128
+#define RPM_SMD_BB_CLK3_PIN 129
+#define RPM_SMD_BB_CLK3_A_PIN 130
#endif
diff --git a/include/dt-bindings/clock/qcom,rpmh.h b/include/dt-bindings/clock/qcom,rpmh.h
index 583a99161aaa..0a7d1be0d124 100644
--- a/include/dt-bindings/clock/qcom,rpmh.h
+++ b/include/dt-bindings/clock/qcom,rpmh.h
@@ -31,5 +31,7 @@
#define RPMH_RF_CLK5_A 22
#define RPMH_PKA_CLK 23
#define RPMH_HWKM_CLK 24
+#define RPMH_QLINK_CLK 25
+#define RPMH_QLINK_CLK_A 26
#endif
diff --git a/include/dt-bindings/clock/qcom,sa8775p-camcc.h b/include/dt-bindings/clock/qcom,sa8775p-camcc.h
new file mode 100644
index 000000000000..38531acd699f
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sa8775p-camcc.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_SA8775P_CAM_CC_H
+#define _DT_BINDINGS_CLK_QCOM_SA8775P_CAM_CC_H
+
+/* CAM_CC clocks */
+#define CAM_CC_CAMNOC_AXI_CLK 0
+#define CAM_CC_CAMNOC_AXI_CLK_SRC 1
+#define CAM_CC_CAMNOC_DCD_XO_CLK 2
+#define CAM_CC_CAMNOC_XO_CLK 3
+#define CAM_CC_CCI_0_CLK 4
+#define CAM_CC_CCI_0_CLK_SRC 5
+#define CAM_CC_CCI_1_CLK 6
+#define CAM_CC_CCI_1_CLK_SRC 7
+#define CAM_CC_CCI_2_CLK 8
+#define CAM_CC_CCI_2_CLK_SRC 9
+#define CAM_CC_CCI_3_CLK 10
+#define CAM_CC_CCI_3_CLK_SRC 11
+#define CAM_CC_CORE_AHB_CLK 12
+#define CAM_CC_CPAS_AHB_CLK 13
+#define CAM_CC_CPAS_FAST_AHB_CLK 14
+#define CAM_CC_CPAS_IFE_0_CLK 15
+#define CAM_CC_CPAS_IFE_1_CLK 16
+#define CAM_CC_CPAS_IFE_LITE_CLK 17
+#define CAM_CC_CPAS_IPE_CLK 18
+#define CAM_CC_CPAS_SFE_LITE_0_CLK 19
+#define CAM_CC_CPAS_SFE_LITE_1_CLK 20
+#define CAM_CC_CPHY_RX_CLK_SRC 21
+#define CAM_CC_CSI0PHYTIMER_CLK 22
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 23
+#define CAM_CC_CSI1PHYTIMER_CLK 24
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 25
+#define CAM_CC_CSI2PHYTIMER_CLK 26
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 27
+#define CAM_CC_CSI3PHYTIMER_CLK 28
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 29
+#define CAM_CC_CSID_CLK 30
+#define CAM_CC_CSID_CLK_SRC 31
+#define CAM_CC_CSID_CSIPHY_RX_CLK 32
+#define CAM_CC_CSIPHY0_CLK 33
+#define CAM_CC_CSIPHY1_CLK 34
+#define CAM_CC_CSIPHY2_CLK 35
+#define CAM_CC_CSIPHY3_CLK 36
+#define CAM_CC_FAST_AHB_CLK_SRC 37
+#define CAM_CC_GDSC_CLK 38
+#define CAM_CC_ICP_AHB_CLK 39
+#define CAM_CC_ICP_CLK 40
+#define CAM_CC_ICP_CLK_SRC 41
+#define CAM_CC_IFE_0_CLK 42
+#define CAM_CC_IFE_0_CLK_SRC 43
+#define CAM_CC_IFE_0_FAST_AHB_CLK 44
+#define CAM_CC_IFE_1_CLK 45
+#define CAM_CC_IFE_1_CLK_SRC 46
+#define CAM_CC_IFE_1_FAST_AHB_CLK 47
+#define CAM_CC_IFE_LITE_AHB_CLK 48
+#define CAM_CC_IFE_LITE_CLK 49
+#define CAM_CC_IFE_LITE_CLK_SRC 50
+#define CAM_CC_IFE_LITE_CPHY_RX_CLK 51
+#define CAM_CC_IFE_LITE_CSID_CLK 52
+#define CAM_CC_IFE_LITE_CSID_CLK_SRC 53
+#define CAM_CC_IPE_AHB_CLK 54
+#define CAM_CC_IPE_CLK 55
+#define CAM_CC_IPE_CLK_SRC 56
+#define CAM_CC_IPE_FAST_AHB_CLK 57
+#define CAM_CC_MCLK0_CLK 58
+#define CAM_CC_MCLK0_CLK_SRC 59
+#define CAM_CC_MCLK1_CLK 60
+#define CAM_CC_MCLK1_CLK_SRC 61
+#define CAM_CC_MCLK2_CLK 62
+#define CAM_CC_MCLK2_CLK_SRC 63
+#define CAM_CC_MCLK3_CLK 64
+#define CAM_CC_MCLK3_CLK_SRC 65
+#define CAM_CC_PLL0 66
+#define CAM_CC_PLL0_OUT_EVEN 67
+#define CAM_CC_PLL0_OUT_ODD 68
+#define CAM_CC_PLL2 69
+#define CAM_CC_PLL3 70
+#define CAM_CC_PLL3_OUT_EVEN 71
+#define CAM_CC_PLL4 72
+#define CAM_CC_PLL4_OUT_EVEN 73
+#define CAM_CC_PLL5 74
+#define CAM_CC_PLL5_OUT_EVEN 75
+#define CAM_CC_SFE_LITE_0_CLK 76
+#define CAM_CC_SFE_LITE_0_FAST_AHB_CLK 77
+#define CAM_CC_SFE_LITE_1_CLK 78
+#define CAM_CC_SFE_LITE_1_FAST_AHB_CLK 79
+#define CAM_CC_SLEEP_CLK 80
+#define CAM_CC_SLEEP_CLK_SRC 81
+#define CAM_CC_SLOW_AHB_CLK_SRC 82
+#define CAM_CC_SM_OBS_CLK 83
+#define CAM_CC_XO_CLK_SRC 84
+#define CAM_CC_QDSS_DEBUG_XO_CLK 85
+
+/* CAM_CC power domains */
+#define CAM_CC_TITAN_TOP_GDSC 0
+
+/* CAM_CC resets */
+#define CAM_CC_ICP_BCR 0
+#define CAM_CC_IFE_0_BCR 1
+#define CAM_CC_IFE_1_BCR 2
+#define CAM_CC_IPE_0_BCR 3
+#define CAM_CC_SFE_LITE_0_BCR 4
+#define CAM_CC_SFE_LITE_1_BCR 5
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sa8775p-dispcc.h b/include/dt-bindings/clock/qcom,sa8775p-dispcc.h
new file mode 100644
index 000000000000..e2049e510658
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sa8775p-dispcc.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_SA8775P_DISP_CC_H
+#define _DT_BINDINGS_CLK_QCOM_SA8775P_DISP_CC_H
+
+/* DISP_CC_0/1 clocks */
+#define MDSS_DISP_CC_MDSS_AHB1_CLK 0
+#define MDSS_DISP_CC_MDSS_AHB_CLK 1
+#define MDSS_DISP_CC_MDSS_AHB_CLK_SRC 2
+#define MDSS_DISP_CC_MDSS_BYTE0_CLK 3
+#define MDSS_DISP_CC_MDSS_BYTE0_CLK_SRC 4
+#define MDSS_DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 5
+#define MDSS_DISP_CC_MDSS_BYTE0_INTF_CLK 6
+#define MDSS_DISP_CC_MDSS_BYTE1_CLK 7
+#define MDSS_DISP_CC_MDSS_BYTE1_CLK_SRC 8
+#define MDSS_DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 9
+#define MDSS_DISP_CC_MDSS_BYTE1_INTF_CLK 10
+#define MDSS_DISP_CC_MDSS_DPTX0_AUX_CLK 11
+#define MDSS_DISP_CC_MDSS_DPTX0_AUX_CLK_SRC 12
+#define MDSS_DISP_CC_MDSS_DPTX0_CRYPTO_CLK 13
+#define MDSS_DISP_CC_MDSS_DPTX0_CRYPTO_CLK_SRC 14
+#define MDSS_DISP_CC_MDSS_DPTX0_LINK_CLK 15
+#define MDSS_DISP_CC_MDSS_DPTX0_LINK_CLK_SRC 16
+#define MDSS_DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC 17
+#define MDSS_DISP_CC_MDSS_DPTX0_LINK_INTF_CLK 18
+#define MDSS_DISP_CC_MDSS_DPTX0_PIXEL0_CLK 19
+#define MDSS_DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC 20
+#define MDSS_DISP_CC_MDSS_DPTX0_PIXEL1_CLK 21
+#define MDSS_DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC 22
+#define MDSS_DISP_CC_MDSS_DPTX0_PIXEL2_CLK 23
+#define MDSS_DISP_CC_MDSS_DPTX0_PIXEL2_CLK_SRC 24
+#define MDSS_DISP_CC_MDSS_DPTX0_PIXEL3_CLK 25
+#define MDSS_DISP_CC_MDSS_DPTX0_PIXEL3_CLK_SRC 26
+#define MDSS_DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK 27
+#define MDSS_DISP_CC_MDSS_DPTX1_AUX_CLK 28
+#define MDSS_DISP_CC_MDSS_DPTX1_AUX_CLK_SRC 29
+#define MDSS_DISP_CC_MDSS_DPTX1_CRYPTO_CLK 30
+#define MDSS_DISP_CC_MDSS_DPTX1_CRYPTO_CLK_SRC 31
+#define MDSS_DISP_CC_MDSS_DPTX1_LINK_CLK 32
+#define MDSS_DISP_CC_MDSS_DPTX1_LINK_CLK_SRC 33
+#define MDSS_DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC 34
+#define MDSS_DISP_CC_MDSS_DPTX1_LINK_INTF_CLK 35
+#define MDSS_DISP_CC_MDSS_DPTX1_PIXEL0_CLK 36
+#define MDSS_DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC 37
+#define MDSS_DISP_CC_MDSS_DPTX1_PIXEL1_CLK 38
+#define MDSS_DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC 39
+#define MDSS_DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK 40
+#define MDSS_DISP_CC_MDSS_ESC0_CLK 41
+#define MDSS_DISP_CC_MDSS_ESC0_CLK_SRC 42
+#define MDSS_DISP_CC_MDSS_ESC1_CLK 43
+#define MDSS_DISP_CC_MDSS_ESC1_CLK_SRC 44
+#define MDSS_DISP_CC_MDSS_MDP1_CLK 45
+#define MDSS_DISP_CC_MDSS_MDP_CLK 46
+#define MDSS_DISP_CC_MDSS_MDP_CLK_SRC 47
+#define MDSS_DISP_CC_MDSS_MDP_LUT1_CLK 48
+#define MDSS_DISP_CC_MDSS_MDP_LUT_CLK 49
+#define MDSS_DISP_CC_MDSS_NON_GDSC_AHB_CLK 50
+#define MDSS_DISP_CC_MDSS_PCLK0_CLK 51
+#define MDSS_DISP_CC_MDSS_PCLK0_CLK_SRC 52
+#define MDSS_DISP_CC_MDSS_PCLK1_CLK 53
+#define MDSS_DISP_CC_MDSS_PCLK1_CLK_SRC 54
+#define MDSS_DISP_CC_MDSS_PLL_LOCK_MONITOR_CLK 55
+#define MDSS_DISP_CC_MDSS_RSCC_AHB_CLK 56
+#define MDSS_DISP_CC_MDSS_RSCC_VSYNC_CLK 57
+#define MDSS_DISP_CC_MDSS_VSYNC1_CLK 58
+#define MDSS_DISP_CC_MDSS_VSYNC_CLK 59
+#define MDSS_DISP_CC_MDSS_VSYNC_CLK_SRC 60
+#define MDSS_DISP_CC_PLL0 61
+#define MDSS_DISP_CC_PLL1 62
+#define MDSS_DISP_CC_SLEEP_CLK 63
+#define MDSS_DISP_CC_SLEEP_CLK_SRC 64
+#define MDSS_DISP_CC_SM_OBS_CLK 65
+#define MDSS_DISP_CC_XO_CLK 66
+#define MDSS_DISP_CC_XO_CLK_SRC 67
+
+/* DISP_CC_0/1 power domains */
+#define MDSS_DISP_CC_MDSS_CORE_GDSC 0
+#define MDSS_DISP_CC_MDSS_CORE_INT2_GDSC 1
+
+/* DISP_CC_0/1 resets */
+#define MDSS_DISP_CC_MDSS_CORE_BCR 0
+#define MDSS_DISP_CC_MDSS_RSCC_BCR 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sa8775p-gcc.h b/include/dt-bindings/clock/qcom,sa8775p-gcc.h
new file mode 100644
index 000000000000..01f54234963d
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sa8775p-gcc.h
@@ -0,0 +1,320 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SA8775P_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SA8775P_H
+
+/* GCC clocks */
+#define GCC_GPLL0 0
+#define GCC_GPLL0_OUT_EVEN 1
+#define GCC_GPLL1 2
+#define GCC_GPLL4 3
+#define GCC_GPLL5 4
+#define GCC_GPLL7 5
+#define GCC_GPLL9 6
+#define GCC_AGGRE_NOC_QUPV3_AXI_CLK 7
+#define GCC_AGGRE_UFS_CARD_AXI_CLK 8
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 9
+#define GCC_AGGRE_USB2_PRIM_AXI_CLK 10
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 11
+#define GCC_AGGRE_USB3_SEC_AXI_CLK 12
+#define GCC_AHB2PHY0_CLK 13
+#define GCC_AHB2PHY2_CLK 14
+#define GCC_AHB2PHY3_CLK 15
+#define GCC_BOOT_ROM_AHB_CLK 16
+#define GCC_CAMERA_AHB_CLK 17
+#define GCC_CAMERA_HF_AXI_CLK 18
+#define GCC_CAMERA_SF_AXI_CLK 19
+#define GCC_CAMERA_THROTTLE_XO_CLK 20
+#define GCC_CAMERA_XO_CLK 21
+#define GCC_CFG_NOC_USB2_PRIM_AXI_CLK 22
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 23
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 24
+#define GCC_DDRSS_GPU_AXI_CLK 25
+#define GCC_DISP1_AHB_CLK 26
+#define GCC_DISP1_HF_AXI_CLK 27
+#define GCC_DISP1_XO_CLK 28
+#define GCC_DISP_AHB_CLK 29
+#define GCC_DISP_HF_AXI_CLK 30
+#define GCC_DISP_XO_CLK 31
+#define GCC_EDP_REF_CLKREF_EN 32
+#define GCC_EMAC0_AXI_CLK 33
+#define GCC_EMAC0_PHY_AUX_CLK 34
+#define GCC_EMAC0_PHY_AUX_CLK_SRC 35
+#define GCC_EMAC0_PTP_CLK 36
+#define GCC_EMAC0_PTP_CLK_SRC 37
+#define GCC_EMAC0_RGMII_CLK 38
+#define GCC_EMAC0_RGMII_CLK_SRC 39
+#define GCC_EMAC0_SLV_AHB_CLK 40
+#define GCC_EMAC1_AXI_CLK 41
+#define GCC_EMAC1_PHY_AUX_CLK 42
+#define GCC_EMAC1_PHY_AUX_CLK_SRC 43
+#define GCC_EMAC1_PTP_CLK 44
+#define GCC_EMAC1_PTP_CLK_SRC 45
+#define GCC_EMAC1_RGMII_CLK 46
+#define GCC_EMAC1_RGMII_CLK_SRC 47
+#define GCC_EMAC1_SLV_AHB_CLK 48
+#define GCC_GP1_CLK 49
+#define GCC_GP1_CLK_SRC 50
+#define GCC_GP2_CLK 51
+#define GCC_GP2_CLK_SRC 52
+#define GCC_GP3_CLK 53
+#define GCC_GP3_CLK_SRC 54
+#define GCC_GP4_CLK 55
+#define GCC_GP4_CLK_SRC 56
+#define GCC_GP5_CLK 57
+#define GCC_GP5_CLK_SRC 58
+#define GCC_GPU_CFG_AHB_CLK 59
+#define GCC_GPU_GPLL0_CLK_SRC 60
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 61
+#define GCC_GPU_MEMNOC_GFX_CLK 62
+#define GCC_GPU_SNOC_DVM_GFX_CLK 63
+#define GCC_GPU_TCU_THROTTLE_AHB_CLK 64
+#define GCC_GPU_TCU_THROTTLE_CLK 65
+#define GCC_PCIE_0_AUX_CLK 66
+#define GCC_PCIE_0_AUX_CLK_SRC 67
+#define GCC_PCIE_0_CFG_AHB_CLK 68
+#define GCC_PCIE_0_MSTR_AXI_CLK 69
+#define GCC_PCIE_0_PHY_AUX_CLK 70
+#define GCC_PCIE_0_PHY_AUX_CLK_SRC 71
+#define GCC_PCIE_0_PHY_RCHNG_CLK 72
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 73
+#define GCC_PCIE_0_PIPE_CLK 74
+#define GCC_PCIE_0_PIPE_CLK_SRC 75
+#define GCC_PCIE_0_PIPE_DIV_CLK_SRC 76
+#define GCC_PCIE_0_PIPEDIV2_CLK 77
+#define GCC_PCIE_0_SLV_AXI_CLK 78
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 79
+#define GCC_PCIE_1_AUX_CLK 80
+#define GCC_PCIE_1_AUX_CLK_SRC 81
+#define GCC_PCIE_1_CFG_AHB_CLK 82
+#define GCC_PCIE_1_MSTR_AXI_CLK 83
+#define GCC_PCIE_1_PHY_AUX_CLK 84
+#define GCC_PCIE_1_PHY_AUX_CLK_SRC 85
+#define GCC_PCIE_1_PHY_RCHNG_CLK 86
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 87
+#define GCC_PCIE_1_PIPE_CLK 88
+#define GCC_PCIE_1_PIPE_CLK_SRC 89
+#define GCC_PCIE_1_PIPE_DIV_CLK_SRC 90
+#define GCC_PCIE_1_PIPEDIV2_CLK 91
+#define GCC_PCIE_1_SLV_AXI_CLK 92
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 93
+#define GCC_PCIE_CLKREF_EN 94
+#define GCC_PCIE_THROTTLE_CFG_CLK 95
+#define GCC_PDM2_CLK 96
+#define GCC_PDM2_CLK_SRC 97
+#define GCC_PDM_AHB_CLK 98
+#define GCC_PDM_XO4_CLK 99
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 100
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 101
+#define GCC_QMIP_DISP1_AHB_CLK 102
+#define GCC_QMIP_DISP1_ROT_AHB_CLK 103
+#define GCC_QMIP_DISP_AHB_CLK 104
+#define GCC_QMIP_DISP_ROT_AHB_CLK 105
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 106
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 107
+#define GCC_QMIP_VIDEO_VCPU_AHB_CLK 108
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 109
+#define GCC_QUPV3_WRAP0_CORE_CLK 110
+#define GCC_QUPV3_WRAP0_S0_CLK 111
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 112
+#define GCC_QUPV3_WRAP0_S1_CLK 113
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 114
+#define GCC_QUPV3_WRAP0_S2_CLK 115
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 116
+#define GCC_QUPV3_WRAP0_S3_CLK 117
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 118
+#define GCC_QUPV3_WRAP0_S4_CLK 119
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 120
+#define GCC_QUPV3_WRAP0_S5_CLK 121
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 122
+#define GCC_QUPV3_WRAP0_S6_CLK 123
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 124
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 125
+#define GCC_QUPV3_WRAP1_CORE_CLK 126
+#define GCC_QUPV3_WRAP1_S0_CLK 127
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 128
+#define GCC_QUPV3_WRAP1_S1_CLK 129
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 130
+#define GCC_QUPV3_WRAP1_S2_CLK 131
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 132
+#define GCC_QUPV3_WRAP1_S3_CLK 133
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 134
+#define GCC_QUPV3_WRAP1_S4_CLK 135
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 136
+#define GCC_QUPV3_WRAP1_S5_CLK 137
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 138
+#define GCC_QUPV3_WRAP1_S6_CLK 139
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 140
+#define GCC_QUPV3_WRAP2_CORE_2X_CLK 141
+#define GCC_QUPV3_WRAP2_CORE_CLK 142
+#define GCC_QUPV3_WRAP2_S0_CLK 143
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 144
+#define GCC_QUPV3_WRAP2_S1_CLK 145
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 146
+#define GCC_QUPV3_WRAP2_S2_CLK 147
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 148
+#define GCC_QUPV3_WRAP2_S3_CLK 149
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 150
+#define GCC_QUPV3_WRAP2_S4_CLK 151
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 152
+#define GCC_QUPV3_WRAP2_S5_CLK 153
+#define GCC_QUPV3_WRAP2_S5_CLK_SRC 154
+#define GCC_QUPV3_WRAP2_S6_CLK 155
+#define GCC_QUPV3_WRAP2_S6_CLK_SRC 156
+#define GCC_QUPV3_WRAP3_CORE_2X_CLK 157
+#define GCC_QUPV3_WRAP3_CORE_CLK 158
+#define GCC_QUPV3_WRAP3_QSPI_CLK 159
+#define GCC_QUPV3_WRAP3_S0_CLK 160
+#define GCC_QUPV3_WRAP3_S0_CLK_SRC 161
+#define GCC_QUPV3_WRAP3_S0_DIV_CLK_SRC 162
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 163
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 164
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 165
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 166
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 167
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 168
+#define GCC_QUPV3_WRAP_3_M_AHB_CLK 169
+#define GCC_QUPV3_WRAP_3_S_AHB_CLK 170
+#define GCC_SDCC1_AHB_CLK 171
+#define GCC_SDCC1_APPS_CLK 172
+#define GCC_SDCC1_APPS_CLK_SRC 173
+#define GCC_SDCC1_ICE_CORE_CLK 174
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 175
+#define GCC_SGMI_CLKREF_EN 176
+#define GCC_TSCSS_AHB_CLK 177
+#define GCC_TSCSS_CNTR_CLK_SRC 178
+#define GCC_TSCSS_ETU_CLK 179
+#define GCC_TSCSS_GLOBAL_CNTR_CLK 180
+#define GCC_UFS_CARD_AHB_CLK 181
+#define GCC_UFS_CARD_AXI_CLK 182
+#define GCC_UFS_CARD_AXI_CLK_SRC 183
+#define GCC_UFS_CARD_ICE_CORE_CLK 184
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 185
+#define GCC_UFS_CARD_PHY_AUX_CLK 186
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 187
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 188
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK_SRC 189
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 190
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK_SRC 191
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 192
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK_SRC 193
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK 194
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 195
+#define GCC_UFS_PHY_AHB_CLK 196
+#define GCC_UFS_PHY_AXI_CLK 197
+#define GCC_UFS_PHY_AXI_CLK_SRC 198
+#define GCC_UFS_PHY_ICE_CORE_CLK 199
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 200
+#define GCC_UFS_PHY_PHY_AUX_CLK 201
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 202
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 203
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 204
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 205
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 206
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 207
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 208
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 209
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 210
+#define GCC_USB20_MASTER_CLK 211
+#define GCC_USB20_MASTER_CLK_SRC 212
+#define GCC_USB20_MOCK_UTMI_CLK 213
+#define GCC_USB20_MOCK_UTMI_CLK_SRC 214
+#define GCC_USB20_MOCK_UTMI_POSTDIV_CLK_SRC 215
+#define GCC_USB20_SLEEP_CLK 216
+#define GCC_USB30_PRIM_MASTER_CLK 217
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 218
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 219
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 220
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 221
+#define GCC_USB30_PRIM_SLEEP_CLK 222
+#define GCC_USB30_SEC_MASTER_CLK 223
+#define GCC_USB30_SEC_MASTER_CLK_SRC 224
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 225
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 226
+#define GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC 227
+#define GCC_USB30_SEC_SLEEP_CLK 228
+#define GCC_USB3_PRIM_PHY_AUX_CLK 229
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 230
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 231
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 232
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 233
+#define GCC_USB3_SEC_PHY_AUX_CLK 234
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 235
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 236
+#define GCC_USB3_SEC_PHY_PIPE_CLK 237
+#define GCC_USB3_SEC_PHY_PIPE_CLK_SRC 238
+#define GCC_USB_CLKREF_EN 239
+#define GCC_VIDEO_AHB_CLK 240
+#define GCC_VIDEO_AXI0_CLK 241
+#define GCC_VIDEO_AXI1_CLK 242
+#define GCC_VIDEO_XO_CLK 243
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 244
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 245
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 246
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 247
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 248
+
+/* GCC resets */
+#define GCC_CAMERA_BCR 0
+#define GCC_DISPLAY1_BCR 1
+#define GCC_DISPLAY_BCR 2
+#define GCC_EMAC0_BCR 3
+#define GCC_EMAC1_BCR 4
+#define GCC_GPU_BCR 5
+#define GCC_MMSS_BCR 6
+#define GCC_PCIE_0_BCR 7
+#define GCC_PCIE_0_LINK_DOWN_BCR 8
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 9
+#define GCC_PCIE_0_PHY_BCR 10
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 11
+#define GCC_PCIE_1_BCR 12
+#define GCC_PCIE_1_LINK_DOWN_BCR 13
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 14
+#define GCC_PCIE_1_PHY_BCR 15
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 16
+#define GCC_PDM_BCR 17
+#define GCC_QUPV3_WRAPPER_0_BCR 18
+#define GCC_QUPV3_WRAPPER_1_BCR 19
+#define GCC_QUPV3_WRAPPER_2_BCR 20
+#define GCC_QUPV3_WRAPPER_3_BCR 21
+#define GCC_SDCC1_BCR 22
+#define GCC_TSCSS_BCR 23
+#define GCC_UFS_CARD_BCR 24
+#define GCC_UFS_PHY_BCR 25
+#define GCC_USB20_PRIM_BCR 26
+#define GCC_USB2_PHY_PRIM_BCR 27
+#define GCC_USB2_PHY_SEC_BCR 28
+#define GCC_USB30_PRIM_BCR 29
+#define GCC_USB30_SEC_BCR 30
+#define GCC_USB3_DP_PHY_PRIM_BCR 31
+#define GCC_USB3_DP_PHY_SEC_BCR 32
+#define GCC_USB3_PHY_PRIM_BCR 33
+#define GCC_USB3_PHY_SEC_BCR 34
+#define GCC_USB3_PHY_TERT_BCR 35
+#define GCC_USB3_UNIPHY_MP0_BCR 36
+#define GCC_USB3_UNIPHY_MP1_BCR 37
+#define GCC_USB3PHY_PHY_PRIM_BCR 38
+#define GCC_USB3PHY_PHY_SEC_BCR 39
+#define GCC_USB3UNIPHY_PHY_MP0_BCR 40
+#define GCC_USB3UNIPHY_PHY_MP1_BCR 41
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 42
+#define GCC_VIDEO_BCR 43
+#define GCC_VIDEO_AXI0_CLK_ARES 44
+#define GCC_VIDEO_AXI1_CLK_ARES 45
+
+/* GCC GDSCs */
+#define PCIE_0_GDSC 0
+#define PCIE_1_GDSC 1
+#define UFS_CARD_GDSC 2
+#define UFS_PHY_GDSC 3
+#define USB20_PRIM_GDSC 4
+#define USB30_PRIM_GDSC 5
+#define USB30_SEC_GDSC 6
+#define EMAC0_GDSC 7
+#define EMAC1_GDSC 8
+
+#endif /* _DT_BINDINGS_CLK_QCOM_GCC_SA8775P_H */
diff --git a/include/dt-bindings/clock/qcom,sa8775p-gpucc.h b/include/dt-bindings/clock/qcom,sa8775p-gpucc.h
new file mode 100644
index 000000000000..a5fd784b1ea2
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sa8775p-gpucc.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPUCC_SA8775P_H
+#define _DT_BINDINGS_CLK_QCOM_GPUCC_SA8775P_H
+
+/* GPU_CC clocks */
+#define GPU_CC_PLL0 0
+#define GPU_CC_PLL1 1
+#define GPU_CC_AHB_CLK 2
+#define GPU_CC_CB_CLK 3
+#define GPU_CC_CRC_AHB_CLK 4
+#define GPU_CC_CX_FF_CLK 5
+#define GPU_CC_CX_GMU_CLK 6
+#define GPU_CC_CX_SNOC_DVM_CLK 7
+#define GPU_CC_CXO_AON_CLK 8
+#define GPU_CC_CXO_CLK 9
+#define GPU_CC_DEMET_CLK 10
+#define GPU_CC_DEMET_DIV_CLK_SRC 11
+#define GPU_CC_FF_CLK_SRC 12
+#define GPU_CC_GMU_CLK_SRC 13
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 14
+#define GPU_CC_HUB_AHB_DIV_CLK_SRC 15
+#define GPU_CC_HUB_AON_CLK 16
+#define GPU_CC_HUB_CLK_SRC 17
+#define GPU_CC_HUB_CX_INT_CLK 18
+#define GPU_CC_HUB_CX_INT_DIV_CLK_SRC 19
+#define GPU_CC_MEMNOC_GFX_CLK 20
+#define GPU_CC_SLEEP_CLK 21
+#define GPU_CC_XO_CLK_SRC 22
+
+/* GPU_CC resets */
+#define GPUCC_GPU_CC_ACD_BCR 0
+#define GPUCC_GPU_CC_CB_BCR 1
+#define GPUCC_GPU_CC_CX_BCR 2
+#define GPUCC_GPU_CC_FAST_HUB_BCR 3
+#define GPUCC_GPU_CC_FF_BCR 4
+#define GPUCC_GPU_CC_GFX3D_AON_BCR 5
+#define GPUCC_GPU_CC_GMU_BCR 6
+#define GPUCC_GPU_CC_GX_BCR 7
+#define GPUCC_GPU_CC_XO_BCR 8
+
+/* GPU_CC power domains */
+#define GPU_CC_CX_GDSC 0
+#define GPU_CC_GX_GDSC 1
+
+#endif /* _DT_BINDINGS_CLK_QCOM_GPUCC_SA8775P_H */
diff --git a/include/dt-bindings/clock/qcom,sa8775p-videocc.h b/include/dt-bindings/clock/qcom,sa8775p-videocc.h
new file mode 100644
index 000000000000..e6325f68c317
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sa8775p-videocc.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_SA8775P_VIDEO_CC_H
+#define _DT_BINDINGS_CLK_QCOM_SA8775P_VIDEO_CC_H
+
+/* VIDEO_CC clocks */
+#define VIDEO_CC_AHB_CLK 0
+#define VIDEO_CC_AHB_CLK_SRC 1
+#define VIDEO_CC_MVS0_CLK 2
+#define VIDEO_CC_MVS0_CLK_SRC 3
+#define VIDEO_CC_MVS0_DIV_CLK_SRC 4
+#define VIDEO_CC_MVS0C_CLK 5
+#define VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC 6
+#define VIDEO_CC_MVS1_CLK 7
+#define VIDEO_CC_MVS1_CLK_SRC 8
+#define VIDEO_CC_MVS1_DIV_CLK_SRC 9
+#define VIDEO_CC_MVS1C_CLK 10
+#define VIDEO_CC_MVS1C_DIV2_DIV_CLK_SRC 11
+#define VIDEO_CC_PLL_LOCK_MONITOR_CLK 12
+#define VIDEO_CC_SLEEP_CLK 13
+#define VIDEO_CC_SLEEP_CLK_SRC 14
+#define VIDEO_CC_SM_DIV_CLK_SRC 15
+#define VIDEO_CC_SM_OBS_CLK 16
+#define VIDEO_CC_XO_CLK 17
+#define VIDEO_CC_XO_CLK_SRC 18
+#define VIDEO_PLL0 19
+#define VIDEO_PLL1 20
+
+/* VIDEO_CC power domains */
+#define VIDEO_CC_MVS0C_GDSC 0
+#define VIDEO_CC_MVS0_GDSC 1
+#define VIDEO_CC_MVS1C_GDSC 2
+#define VIDEO_CC_MVS1_GDSC 3
+
+/* VIDEO_CC resets */
+#define VIDEO_CC_INTERFACE_BCR 0
+#define VIDEO_CC_MVS0_BCR 1
+#define VIDEO_CC_MVS0C_CLK_ARES 2
+#define VIDEO_CC_MVS0C_BCR 3
+#define VIDEO_CC_MVS1_BCR 4
+#define VIDEO_CC_MVS1C_CLK_ARES 5
+#define VIDEO_CC_MVS1C_BCR 6
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sar2130p-gcc.h b/include/dt-bindings/clock/qcom,sar2130p-gcc.h
new file mode 100644
index 000000000000..69d2dd2538a6
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sar2130p-gcc.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SAR2130P_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SAR2130P_H
+
+/* GCC clocks */
+#define GCC_GPLL0 0
+#define GCC_GPLL0_OUT_EVEN 1
+#define GCC_GPLL1 2
+#define GCC_GPLL9 3
+#define GCC_GPLL9_OUT_EVEN 4
+#define GCC_AGGRE_NOC_PCIE_1_AXI_CLK 5
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 6
+#define GCC_BOOT_ROM_AHB_CLK 7
+#define GCC_CAMERA_AHB_CLK 8
+#define GCC_CAMERA_HF_AXI_CLK 9
+#define GCC_CAMERA_SF_AXI_CLK 10
+#define GCC_CAMERA_XO_CLK 11
+#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 12
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 13
+#define GCC_DDRSS_GPU_AXI_CLK 14
+#define GCC_DDRSS_PCIE_SF_CLK 15
+#define GCC_DISP_AHB_CLK 16
+#define GCC_DISP_HF_AXI_CLK 17
+#define GCC_GP1_CLK 18
+#define GCC_GP1_CLK_SRC 19
+#define GCC_GP2_CLK 20
+#define GCC_GP2_CLK_SRC 21
+#define GCC_GP3_CLK 22
+#define GCC_GP3_CLK_SRC 23
+#define GCC_GPU_CFG_AHB_CLK 24
+#define GCC_GPU_GPLL0_CLK_SRC 25
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 26
+#define GCC_GPU_MEMNOC_GFX_CLK 27
+#define GCC_GPU_SNOC_DVM_GFX_CLK 28
+#define GCC_IRIS_SS_HF_AXI1_CLK 29
+#define GCC_IRIS_SS_SPD_AXI1_CLK 30
+#define GCC_PCIE_0_AUX_CLK 31
+#define GCC_PCIE_0_AUX_CLK_SRC 32
+#define GCC_PCIE_0_CFG_AHB_CLK 33
+#define GCC_PCIE_0_MSTR_AXI_CLK 34
+#define GCC_PCIE_0_PHY_RCHNG_CLK 35
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 36
+#define GCC_PCIE_0_PIPE_CLK 37
+#define GCC_PCIE_0_PIPE_CLK_SRC 38
+#define GCC_PCIE_0_SLV_AXI_CLK 39
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 40
+#define GCC_PCIE_1_AUX_CLK 41
+#define GCC_PCIE_1_AUX_CLK_SRC 42
+#define GCC_PCIE_1_CFG_AHB_CLK 43
+#define GCC_PCIE_1_MSTR_AXI_CLK 44
+#define GCC_PCIE_1_PHY_RCHNG_CLK 45
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 46
+#define GCC_PCIE_1_PIPE_CLK 47
+#define GCC_PCIE_1_PIPE_CLK_SRC 48
+#define GCC_PCIE_1_SLV_AXI_CLK 49
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 50
+#define GCC_PDM2_CLK 51
+#define GCC_PDM2_CLK_SRC 52
+#define GCC_PDM_AHB_CLK 53
+#define GCC_PDM_XO4_CLK 54
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 55
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 56
+#define GCC_QMIP_GPU_AHB_CLK 57
+#define GCC_QMIP_PCIE_AHB_CLK 58
+#define GCC_QMIP_VIDEO_CV_CPU_AHB_CLK 59
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 60
+#define GCC_QMIP_VIDEO_LSR_AHB_CLK 61
+#define GCC_QMIP_VIDEO_V_CPU_AHB_CLK 62
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 63
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 64
+#define GCC_QUPV3_WRAP0_CORE_CLK 65
+#define GCC_QUPV3_WRAP0_S0_CLK 66
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 67
+#define GCC_QUPV3_WRAP0_S1_CLK 68
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 69
+#define GCC_QUPV3_WRAP0_S2_CLK 70
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 71
+#define GCC_QUPV3_WRAP0_S3_CLK 72
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 73
+#define GCC_QUPV3_WRAP0_S4_CLK 74
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 75
+#define GCC_QUPV3_WRAP0_S5_CLK 76
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 77
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 78
+#define GCC_QUPV3_WRAP1_CORE_CLK 79
+#define GCC_QUPV3_WRAP1_S0_CLK 80
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 81
+#define GCC_QUPV3_WRAP1_S1_CLK 82
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 83
+#define GCC_QUPV3_WRAP1_S2_CLK 84
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 85
+#define GCC_QUPV3_WRAP1_S3_CLK 86
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 87
+#define GCC_QUPV3_WRAP1_S4_CLK 88
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 89
+#define GCC_QUPV3_WRAP1_S5_CLK 90
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 91
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 92
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 93
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 94
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 95
+#define GCC_SDCC1_AHB_CLK 96
+#define GCC_SDCC1_APPS_CLK 97
+#define GCC_SDCC1_APPS_CLK_SRC 98
+#define GCC_SDCC1_ICE_CORE_CLK 99
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 100
+#define GCC_USB30_PRIM_MASTER_CLK 101
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 102
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 103
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 104
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 105
+#define GCC_USB30_PRIM_SLEEP_CLK 106
+#define GCC_USB3_PRIM_PHY_AUX_CLK 107
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 108
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 109
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 110
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 111
+#define GCC_VIDEO_AHB_CLK 112
+#define GCC_VIDEO_AXI0_CLK 113
+#define GCC_VIDEO_AXI1_CLK 114
+#define GCC_VIDEO_XO_CLK 115
+#define GCC_GPLL4 116
+#define GCC_GPLL5 117
+#define GCC_GPLL7 118
+#define GCC_DDRSS_SPAD_CLK 119
+#define GCC_DDRSS_SPAD_CLK_SRC 120
+#define GCC_VIDEO_AXI0_SREG 121
+#define GCC_VIDEO_AXI1_SREG 122
+#define GCC_IRIS_SS_HF_AXI1_SREG 123
+#define GCC_IRIS_SS_SPD_AXI1_SREG 124
+
+/* GCC resets */
+#define GCC_CAMERA_BCR 0
+#define GCC_DISPLAY_BCR 1
+#define GCC_GPU_BCR 2
+#define GCC_PCIE_0_BCR 3
+#define GCC_PCIE_0_LINK_DOWN_BCR 4
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 5
+#define GCC_PCIE_0_PHY_BCR 6
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 7
+#define GCC_PCIE_1_BCR 8
+#define GCC_PCIE_1_LINK_DOWN_BCR 9
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 10
+#define GCC_PCIE_1_PHY_BCR 11
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 12
+#define GCC_PCIE_PHY_BCR 13
+#define GCC_PCIE_PHY_CFG_AHB_BCR 14
+#define GCC_PCIE_PHY_COM_BCR 15
+#define GCC_PDM_BCR 16
+#define GCC_QUPV3_WRAPPER_0_BCR 17
+#define GCC_QUPV3_WRAPPER_1_BCR 18
+#define GCC_QUSB2PHY_PRIM_BCR 19
+#define GCC_QUSB2PHY_SEC_BCR 20
+#define GCC_SDCC1_BCR 21
+#define GCC_USB30_PRIM_BCR 22
+#define GCC_USB3_DP_PHY_PRIM_BCR 23
+#define GCC_USB3_DP_PHY_SEC_BCR 24
+#define GCC_USB3_PHY_PRIM_BCR 25
+#define GCC_USB3_PHY_SEC_BCR 26
+#define GCC_USB3PHY_PHY_PRIM_BCR 27
+#define GCC_USB3PHY_PHY_SEC_BCR 28
+#define GCC_VIDEO_AXI0_CLK_ARES 29
+#define GCC_VIDEO_AXI1_CLK_ARES 30
+#define GCC_VIDEO_BCR 31
+#define GCC_IRIS_SS_HF_AXI_CLK_ARES 32
+#define GCC_IRIS_SS_SPD_AXI_CLK_ARES 33
+#define GCC_DDRSS_SPAD_CLK_ARES 34
+
+/* GCC power domains */
+#define PCIE_0_GDSC 0
+#define PCIE_0_PHY_GDSC 1
+#define PCIE_1_GDSC 2
+#define PCIE_1_PHY_GDSC 3
+#define USB30_PRIM_GDSC 4
+#define USB3_PHY_GDSC 5
+#define HLOS1_VOTE_MM_SNOC_MMU_TBU_HF0_GDSC 6
+#define HLOS1_VOTE_MM_SNOC_MMU_TBU_SF0_GDSC 7
+#define HLOS1_VOTE_TURING_MMU_TBU0_GDSC 8
+#define HLOS1_VOTE_TURING_MMU_TBU1_GDSC 9
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sar2130p-gpucc.h b/include/dt-bindings/clock/qcom,sar2130p-gpucc.h
new file mode 100644
index 000000000000..a2204369110a
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sar2130p-gpucc.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved
+ * Copyright (c) 2024, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SAR2130P_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SAR2130P_H
+
+/* GPU_CC clocks */
+#define GPU_CC_AHB_CLK 0
+#define GPU_CC_CRC_AHB_CLK 1
+#define GPU_CC_CX_FF_CLK 2
+#define GPU_CC_CX_GMU_CLK 3
+#define GPU_CC_CXO_AON_CLK 4
+#define GPU_CC_CXO_CLK 5
+#define GPU_CC_FF_CLK_SRC 6
+#define GPU_CC_GMU_CLK_SRC 7
+#define GPU_CC_GX_GMU_CLK 8
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 9
+#define GPU_CC_HUB_AON_CLK 10
+#define GPU_CC_HUB_CLK_SRC 11
+#define GPU_CC_HUB_CX_INT_CLK 12
+#define GPU_CC_MEMNOC_GFX_CLK 13
+#define GPU_CC_PLL0 14
+#define GPU_CC_PLL1 15
+#define GPU_CC_SLEEP_CLK 16
+
+/* GDSCs */
+#define GPU_GX_GDSC 0
+#define GPU_CX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sc8180x-camcc.h b/include/dt-bindings/clock/qcom,sc8180x-camcc.h
new file mode 100644
index 000000000000..3e57b80f65e8
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sc8180x-camcc.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2025, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SC8180X_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SC8180X_H
+
+/* CAM_CC clocks */
+#define CAM_CC_BPS_AHB_CLK 0
+#define CAM_CC_BPS_AREG_CLK 1
+#define CAM_CC_BPS_AXI_CLK 2
+#define CAM_CC_BPS_CLK 3
+#define CAM_CC_BPS_CLK_SRC 4
+#define CAM_CC_CAMNOC_AXI_CLK 5
+#define CAM_CC_CAMNOC_AXI_CLK_SRC 6
+#define CAM_CC_CAMNOC_DCD_XO_CLK 7
+#define CAM_CC_CCI_0_CLK 8
+#define CAM_CC_CCI_0_CLK_SRC 9
+#define CAM_CC_CCI_1_CLK 10
+#define CAM_CC_CCI_1_CLK_SRC 11
+#define CAM_CC_CCI_2_CLK 12
+#define CAM_CC_CCI_2_CLK_SRC 13
+#define CAM_CC_CCI_3_CLK 14
+#define CAM_CC_CCI_3_CLK_SRC 15
+#define CAM_CC_CORE_AHB_CLK 16
+#define CAM_CC_CPAS_AHB_CLK 17
+#define CAM_CC_CPHY_RX_CLK_SRC 18
+#define CAM_CC_CSI0PHYTIMER_CLK 19
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 20
+#define CAM_CC_CSI1PHYTIMER_CLK 21
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 22
+#define CAM_CC_CSI2PHYTIMER_CLK 23
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 24
+#define CAM_CC_CSI3PHYTIMER_CLK 25
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 26
+#define CAM_CC_CSIPHY0_CLK 27
+#define CAM_CC_CSIPHY1_CLK 28
+#define CAM_CC_CSIPHY2_CLK 29
+#define CAM_CC_CSIPHY3_CLK 30
+#define CAM_CC_FAST_AHB_CLK_SRC 31
+#define CAM_CC_FD_CORE_CLK 32
+#define CAM_CC_FD_CORE_CLK_SRC 33
+#define CAM_CC_FD_CORE_UAR_CLK 34
+#define CAM_CC_ICP_AHB_CLK 35
+#define CAM_CC_ICP_CLK 36
+#define CAM_CC_ICP_CLK_SRC 37
+#define CAM_CC_IFE_0_AXI_CLK 38
+#define CAM_CC_IFE_0_CLK 39
+#define CAM_CC_IFE_0_CLK_SRC 40
+#define CAM_CC_IFE_0_CPHY_RX_CLK 41
+#define CAM_CC_IFE_0_CSID_CLK 42
+#define CAM_CC_IFE_0_CSID_CLK_SRC 43
+#define CAM_CC_IFE_0_DSP_CLK 44
+#define CAM_CC_IFE_1_AXI_CLK 45
+#define CAM_CC_IFE_1_CLK 46
+#define CAM_CC_IFE_1_CLK_SRC 47
+#define CAM_CC_IFE_1_CPHY_RX_CLK 48
+#define CAM_CC_IFE_1_CSID_CLK 49
+#define CAM_CC_IFE_1_CSID_CLK_SRC 50
+#define CAM_CC_IFE_1_DSP_CLK 51
+#define CAM_CC_IFE_2_AXI_CLK 52
+#define CAM_CC_IFE_2_CLK 53
+#define CAM_CC_IFE_2_CLK_SRC 54
+#define CAM_CC_IFE_2_CPHY_RX_CLK 55
+#define CAM_CC_IFE_2_CSID_CLK 56
+#define CAM_CC_IFE_2_CSID_CLK_SRC 57
+#define CAM_CC_IFE_2_DSP_CLK 58
+#define CAM_CC_IFE_3_AXI_CLK 59
+#define CAM_CC_IFE_3_CLK 60
+#define CAM_CC_IFE_3_CLK_SRC 61
+#define CAM_CC_IFE_3_CPHY_RX_CLK 62
+#define CAM_CC_IFE_3_CSID_CLK 63
+#define CAM_CC_IFE_3_CSID_CLK_SRC 64
+#define CAM_CC_IFE_3_DSP_CLK 65
+#define CAM_CC_IFE_LITE_0_CLK 66
+#define CAM_CC_IFE_LITE_0_CLK_SRC 67
+#define CAM_CC_IFE_LITE_0_CPHY_RX_CLK 68
+#define CAM_CC_IFE_LITE_0_CSID_CLK 69
+#define CAM_CC_IFE_LITE_0_CSID_CLK_SRC 70
+#define CAM_CC_IFE_LITE_1_CLK 71
+#define CAM_CC_IFE_LITE_1_CLK_SRC 72
+#define CAM_CC_IFE_LITE_1_CPHY_RX_CLK 73
+#define CAM_CC_IFE_LITE_1_CSID_CLK 74
+#define CAM_CC_IFE_LITE_1_CSID_CLK_SRC 75
+#define CAM_CC_IFE_LITE_2_CLK 76
+#define CAM_CC_IFE_LITE_2_CLK_SRC 77
+#define CAM_CC_IFE_LITE_2_CPHY_RX_CLK 78
+#define CAM_CC_IFE_LITE_2_CSID_CLK 79
+#define CAM_CC_IFE_LITE_2_CSID_CLK_SRC 80
+#define CAM_CC_IFE_LITE_3_CLK 81
+#define CAM_CC_IFE_LITE_3_CLK_SRC 82
+#define CAM_CC_IFE_LITE_3_CPHY_RX_CLK 83
+#define CAM_CC_IFE_LITE_3_CSID_CLK 84
+#define CAM_CC_IFE_LITE_3_CSID_CLK_SRC 85
+#define CAM_CC_IPE_0_AHB_CLK 86
+#define CAM_CC_IPE_0_AREG_CLK 87
+#define CAM_CC_IPE_0_AXI_CLK 88
+#define CAM_CC_IPE_0_CLK 89
+#define CAM_CC_IPE_0_CLK_SRC 90
+#define CAM_CC_IPE_1_AHB_CLK 91
+#define CAM_CC_IPE_1_AREG_CLK 92
+#define CAM_CC_IPE_1_AXI_CLK 93
+#define CAM_CC_IPE_1_CLK 94
+#define CAM_CC_JPEG_CLK 95
+#define CAM_CC_JPEG_CLK_SRC 96
+#define CAM_CC_LRME_CLK 97
+#define CAM_CC_LRME_CLK_SRC 98
+#define CAM_CC_MCLK0_CLK 99
+#define CAM_CC_MCLK0_CLK_SRC 100
+#define CAM_CC_MCLK1_CLK 101
+#define CAM_CC_MCLK1_CLK_SRC 102
+#define CAM_CC_MCLK2_CLK 103
+#define CAM_CC_MCLK2_CLK_SRC 104
+#define CAM_CC_MCLK3_CLK 105
+#define CAM_CC_MCLK3_CLK_SRC 106
+#define CAM_CC_MCLK4_CLK 107
+#define CAM_CC_MCLK4_CLK_SRC 108
+#define CAM_CC_MCLK5_CLK 109
+#define CAM_CC_MCLK5_CLK_SRC 110
+#define CAM_CC_MCLK6_CLK 111
+#define CAM_CC_MCLK6_CLK_SRC 112
+#define CAM_CC_MCLK7_CLK 113
+#define CAM_CC_MCLK7_CLK_SRC 114
+#define CAM_CC_PLL0 115
+#define CAM_CC_PLL0_OUT_EVEN 116
+#define CAM_CC_PLL0_OUT_ODD 117
+#define CAM_CC_PLL1 118
+#define CAM_CC_PLL2 119
+#define CAM_CC_PLL2_OUT_MAIN 120
+#define CAM_CC_PLL3 121
+#define CAM_CC_PLL4 122
+#define CAM_CC_PLL5 123
+#define CAM_CC_PLL6 124
+#define CAM_CC_SLOW_AHB_CLK_SRC 125
+#define CAM_CC_XO_CLK_SRC 126
+
+
+/* CAM_CC power domains */
+#define BPS_GDSC 0
+#define IFE_0_GDSC 1
+#define IFE_1_GDSC 2
+#define IFE_2_GDSC 3
+#define IFE_3_GDSC 4
+#define IPE_0_GDSC 5
+#define IPE_1_GDSC 6
+#define TITAN_TOP_GDSC 7
+
+/* CAM_CC resets */
+#define CAM_CC_BPS_BCR 0
+#define CAM_CC_CAMNOC_BCR 1
+#define CAM_CC_CCI_BCR 2
+#define CAM_CC_CPAS_BCR 3
+#define CAM_CC_CSI0PHY_BCR 4
+#define CAM_CC_CSI1PHY_BCR 5
+#define CAM_CC_CSI2PHY_BCR 6
+#define CAM_CC_CSI3PHY_BCR 7
+#define CAM_CC_FD_BCR 8
+#define CAM_CC_ICP_BCR 9
+#define CAM_CC_IFE_0_BCR 10
+#define CAM_CC_IFE_1_BCR 11
+#define CAM_CC_IFE_2_BCR 12
+#define CAM_CC_IFE_3_BCR 13
+#define CAM_CC_IFE_LITE_0_BCR 14
+#define CAM_CC_IFE_LITE_1_BCR 15
+#define CAM_CC_IFE_LITE_2_BCR 16
+#define CAM_CC_IFE_LITE_3_BCR 17
+#define CAM_CC_IPE_0_BCR 18
+#define CAM_CC_IPE_1_BCR 19
+#define CAM_CC_JPEG_BCR 20
+#define CAM_CC_LRME_BCR 21
+#define CAM_CC_MCLK0_BCR 22
+#define CAM_CC_MCLK1_BCR 23
+#define CAM_CC_MCLK2_BCR 24
+#define CAM_CC_MCLK3_BCR 25
+#define CAM_CC_MCLK4_BCR 26
+#define CAM_CC_MCLK5_BCR 27
+#define CAM_CC_MCLK6_BCR 28
+#define CAM_CC_MCLK7_BCR 29
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sc8280xp-camcc.h b/include/dt-bindings/clock/qcom,sc8280xp-camcc.h
new file mode 100644
index 000000000000..ea5ec73c8c6a
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sc8280xp-camcc.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Ltd.
+ */
+
+#ifndef __DT_BINDINGS_CLK_QCOM_CAMCC_SC8280XP_H__
+#define __DT_BINDINGS_CLK_QCOM_CAMCC_SC8280XP_H__
+
+/* CAMCC clocks */
+#define CAMCC_PLL0 0
+#define CAMCC_PLL0_OUT_EVEN 1
+#define CAMCC_PLL0_OUT_ODD 2
+#define CAMCC_PLL1 3
+#define CAMCC_PLL1_OUT_EVEN 4
+#define CAMCC_PLL2 5
+#define CAMCC_PLL3 6
+#define CAMCC_PLL3_OUT_EVEN 7
+#define CAMCC_PLL4 8
+#define CAMCC_PLL4_OUT_EVEN 9
+#define CAMCC_PLL5 10
+#define CAMCC_PLL5_OUT_EVEN 11
+#define CAMCC_PLL6 12
+#define CAMCC_PLL6_OUT_EVEN 13
+#define CAMCC_PLL7 14
+#define CAMCC_PLL7_OUT_EVEN 15
+#define CAMCC_PLL7_OUT_ODD 16
+#define CAMCC_BPS_AHB_CLK 17
+#define CAMCC_BPS_AREG_CLK 18
+#define CAMCC_BPS_AXI_CLK 19
+#define CAMCC_BPS_CLK 20
+#define CAMCC_BPS_CLK_SRC 21
+#define CAMCC_CAMNOC_AXI_CLK 22
+#define CAMCC_CAMNOC_AXI_CLK_SRC 23
+#define CAMCC_CAMNOC_DCD_XO_CLK 24
+#define CAMCC_CCI_0_CLK 25
+#define CAMCC_CCI_0_CLK_SRC 26
+#define CAMCC_CCI_1_CLK 27
+#define CAMCC_CCI_1_CLK_SRC 28
+#define CAMCC_CCI_2_CLK 29
+#define CAMCC_CCI_2_CLK_SRC 30
+#define CAMCC_CCI_3_CLK 31
+#define CAMCC_CCI_3_CLK_SRC 32
+#define CAMCC_CORE_AHB_CLK 33
+#define CAMCC_CPAS_AHB_CLK 34
+#define CAMCC_CPHY_RX_CLK_SRC 35
+#define CAMCC_CSI0PHYTIMER_CLK 36
+#define CAMCC_CSI0PHYTIMER_CLK_SRC 37
+#define CAMCC_CSI1PHYTIMER_CLK 38
+#define CAMCC_CSI1PHYTIMER_CLK_SRC 39
+#define CAMCC_CSI2PHYTIMER_CLK 40
+#define CAMCC_CSI2PHYTIMER_CLK_SRC 41
+#define CAMCC_CSI3PHYTIMER_CLK 42
+#define CAMCC_CSI3PHYTIMER_CLK_SRC 43
+#define CAMCC_CSIPHY0_CLK 44
+#define CAMCC_CSIPHY1_CLK 45
+#define CAMCC_CSIPHY2_CLK 46
+#define CAMCC_CSIPHY3_CLK 47
+#define CAMCC_FAST_AHB_CLK_SRC 48
+#define CAMCC_GDSC_CLK 49
+#define CAMCC_ICP_AHB_CLK 50
+#define CAMCC_ICP_CLK 51
+#define CAMCC_ICP_CLK_SRC 52
+#define CAMCC_IFE_0_AXI_CLK 53
+#define CAMCC_IFE_0_CLK 54
+#define CAMCC_IFE_0_CLK_SRC 55
+#define CAMCC_IFE_0_CPHY_RX_CLK 56
+#define CAMCC_IFE_0_CSID_CLK 57
+#define CAMCC_IFE_0_CSID_CLK_SRC 58
+#define CAMCC_IFE_0_DSP_CLK 59
+#define CAMCC_IFE_1_AXI_CLK 60
+#define CAMCC_IFE_1_CLK 61
+#define CAMCC_IFE_1_CLK_SRC 62
+#define CAMCC_IFE_1_CPHY_RX_CLK 63
+#define CAMCC_IFE_1_CSID_CLK 64
+#define CAMCC_IFE_1_CSID_CLK_SRC 65
+#define CAMCC_IFE_1_DSP_CLK 66
+#define CAMCC_IFE_2_AXI_CLK 67
+#define CAMCC_IFE_2_CLK 68
+#define CAMCC_IFE_2_CLK_SRC 69
+#define CAMCC_IFE_2_CPHY_RX_CLK 70
+#define CAMCC_IFE_2_CSID_CLK 71
+#define CAMCC_IFE_2_CSID_CLK_SRC 72
+#define CAMCC_IFE_2_DSP_CLK 73
+#define CAMCC_IFE_3_AXI_CLK 74
+#define CAMCC_IFE_3_CLK 75
+#define CAMCC_IFE_3_CLK_SRC 76
+#define CAMCC_IFE_3_CPHY_RX_CLK 77
+#define CAMCC_IFE_3_CSID_CLK 78
+#define CAMCC_IFE_3_CSID_CLK_SRC 79
+#define CAMCC_IFE_3_DSP_CLK 80
+#define CAMCC_IFE_LITE_0_CLK 81
+#define CAMCC_IFE_LITE_0_CLK_SRC 82
+#define CAMCC_IFE_LITE_0_CPHY_RX_CLK 83
+#define CAMCC_IFE_LITE_0_CSID_CLK 84
+#define CAMCC_IFE_LITE_0_CSID_CLK_SRC 85
+#define CAMCC_IFE_LITE_1_CLK 86
+#define CAMCC_IFE_LITE_1_CLK_SRC 87
+#define CAMCC_IFE_LITE_1_CPHY_RX_CLK 88
+#define CAMCC_IFE_LITE_1_CSID_CLK 89
+#define CAMCC_IFE_LITE_1_CSID_CLK_SRC 90
+#define CAMCC_IFE_LITE_2_CLK 91
+#define CAMCC_IFE_LITE_2_CLK_SRC 92
+#define CAMCC_IFE_LITE_2_CPHY_RX_CLK 93
+#define CAMCC_IFE_LITE_2_CSID_CLK 94
+#define CAMCC_IFE_LITE_2_CSID_CLK_SRC 95
+#define CAMCC_IFE_LITE_3_CLK 96
+#define CAMCC_IFE_LITE_3_CLK_SRC 97
+#define CAMCC_IFE_LITE_3_CPHY_RX_CLK 98
+#define CAMCC_IFE_LITE_3_CSID_CLK 99
+#define CAMCC_IFE_LITE_3_CSID_CLK_SRC 100
+#define CAMCC_IPE_0_AHB_CLK 101
+#define CAMCC_IPE_0_AREG_CLK 102
+#define CAMCC_IPE_0_AXI_CLK 103
+#define CAMCC_IPE_0_CLK 104
+#define CAMCC_IPE_0_CLK_SRC 105
+#define CAMCC_IPE_1_AHB_CLK 106
+#define CAMCC_IPE_1_AREG_CLK 107
+#define CAMCC_IPE_1_AXI_CLK 108
+#define CAMCC_IPE_1_CLK 109
+#define CAMCC_JPEG_CLK 110
+#define CAMCC_JPEG_CLK_SRC 111
+#define CAMCC_LRME_CLK 112
+#define CAMCC_LRME_CLK_SRC 113
+#define CAMCC_MCLK0_CLK 114
+#define CAMCC_MCLK0_CLK_SRC 115
+#define CAMCC_MCLK1_CLK 116
+#define CAMCC_MCLK1_CLK_SRC 117
+#define CAMCC_MCLK2_CLK 118
+#define CAMCC_MCLK2_CLK_SRC 119
+#define CAMCC_MCLK3_CLK 120
+#define CAMCC_MCLK3_CLK_SRC 121
+#define CAMCC_MCLK4_CLK 122
+#define CAMCC_MCLK4_CLK_SRC 123
+#define CAMCC_MCLK5_CLK 124
+#define CAMCC_MCLK5_CLK_SRC 125
+#define CAMCC_MCLK6_CLK 126
+#define CAMCC_MCLK6_CLK_SRC 127
+#define CAMCC_MCLK7_CLK 128
+#define CAMCC_MCLK7_CLK_SRC 129
+#define CAMCC_SLEEP_CLK 130
+#define CAMCC_SLEEP_CLK_SRC 131
+#define CAMCC_SLOW_AHB_CLK_SRC 132
+#define CAMCC_XO_CLK_SRC 133
+
+/* CAMCC resets */
+#define CAMCC_BPS_BCR 0
+#define CAMCC_CAMNOC_BCR 1
+#define CAMCC_CCI_BCR 2
+#define CAMCC_CPAS_BCR 3
+#define CAMCC_CSI0PHY_BCR 4
+#define CAMCC_CSI1PHY_BCR 5
+#define CAMCC_CSI2PHY_BCR 6
+#define CAMCC_CSI3PHY_BCR 7
+#define CAMCC_ICP_BCR 8
+#define CAMCC_IFE_0_BCR 9
+#define CAMCC_IFE_1_BCR 10
+#define CAMCC_IFE_2_BCR 11
+#define CAMCC_IFE_3_BCR 12
+#define CAMCC_IFE_LITE_0_BCR 13
+#define CAMCC_IFE_LITE_1_BCR 14
+#define CAMCC_IFE_LITE_2_BCR 15
+#define CAMCC_IFE_LITE_3_BCR 16
+#define CAMCC_IPE_0_BCR 17
+#define CAMCC_IPE_1_BCR 18
+#define CAMCC_JPEG_BCR 19
+#define CAMCC_LRME_BCR 20
+
+/* CAMCC GDSCRs */
+#define BPS_GDSC 0
+#define IFE_0_GDSC 1
+#define IFE_1_GDSC 2
+#define IFE_2_GDSC 3
+#define IFE_3_GDSC 4
+#define IPE_0_GDSC 5
+#define IPE_1_GDSC 6
+#define TITAN_TOP_GDSC 7
+
+#endif /* __DT_BINDINGS_CLK_QCOM_CAMCC_SC8280XP_H__ */
diff --git a/include/dt-bindings/clock/qcom,sc8280xp-lpasscc.h b/include/dt-bindings/clock/qcom,sc8280xp-lpasscc.h
new file mode 100644
index 000000000000..d190d57fc81a
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sc8280xp-lpasscc.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Linaro Ltd.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_LPASSCC_SC8280XP_H
+#define _DT_BINDINGS_CLK_QCOM_LPASSCC_SC8280XP_H
+
+/* LPASS AUDIO CC CSR */
+#define LPASS_AUDIO_SWR_RX_CGCR 0
+#define LPASS_AUDIO_SWR_WSA_CGCR 1
+#define LPASS_AUDIO_SWR_WSA2_CGCR 2
+
+/* LPASS TCSR */
+#define LPASS_AUDIO_SWR_TX_CGCR 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sdx75-gcc.h b/include/dt-bindings/clock/qcom,sdx75-gcc.h
new file mode 100644
index 000000000000..a470e8c4fd41
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sdx75-gcc.h
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SDX75_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SDX75_H
+
+/* GCC clocks */
+#define GPLL0 0
+#define GPLL0_OUT_EVEN 1
+#define GPLL4 2
+#define GPLL5 3
+#define GPLL6 4
+#define GPLL8 5
+#define GCC_AHB_PCIE_LINK_CLK 6
+#define GCC_BOOT_ROM_AHB_CLK 7
+#define GCC_EEE_EMAC0_CLK 8
+#define GCC_EEE_EMAC0_CLK_SRC 9
+#define GCC_EEE_EMAC1_CLK 10
+#define GCC_EEE_EMAC1_CLK_SRC 11
+#define GCC_EMAC0_AXI_CLK 12
+#define GCC_EMAC0_CC_SGMIIPHY_RX_CLK 13
+#define GCC_EMAC0_CC_SGMIIPHY_RX_CLK_SRC 14
+#define GCC_EMAC0_CC_SGMIIPHY_TX_CLK 15
+#define GCC_EMAC0_CC_SGMIIPHY_TX_CLK_SRC 16
+#define GCC_EMAC0_PHY_AUX_CLK 17
+#define GCC_EMAC0_PHY_AUX_CLK_SRC 18
+#define GCC_EMAC0_PTP_CLK 19
+#define GCC_EMAC0_PTP_CLK_SRC 20
+#define GCC_EMAC0_RGMII_CLK 21
+#define GCC_EMAC0_RGMII_CLK_SRC 22
+#define GCC_EMAC0_RPCS_RX_CLK 23
+#define GCC_EMAC0_RPCS_TX_CLK 24
+#define GCC_EMAC0_SGMIIPHY_MAC_RCLK_SRC 25
+#define GCC_EMAC0_SGMIIPHY_MAC_TCLK_SRC 26
+#define GCC_EMAC0_SLV_AHB_CLK 27
+#define GCC_EMAC0_XGXS_RX_CLK 28
+#define GCC_EMAC0_XGXS_TX_CLK 29
+#define GCC_EMAC1_AXI_CLK 30
+#define GCC_EMAC1_CC_SGMIIPHY_RX_CLK 31
+#define GCC_EMAC1_CC_SGMIIPHY_RX_CLK_SRC 32
+#define GCC_EMAC1_CC_SGMIIPHY_TX_CLK 33
+#define GCC_EMAC1_CC_SGMIIPHY_TX_CLK_SRC 34
+#define GCC_EMAC1_PHY_AUX_CLK 35
+#define GCC_EMAC1_PHY_AUX_CLK_SRC 36
+#define GCC_EMAC1_PTP_CLK 37
+#define GCC_EMAC1_PTP_CLK_SRC 38
+#define GCC_EMAC1_RGMII_CLK 39
+#define GCC_EMAC1_RGMII_CLK_SRC 40
+#define GCC_EMAC1_RPCS_RX_CLK 41
+#define GCC_EMAC1_RPCS_TX_CLK 42
+#define GCC_EMAC1_SGMIIPHY_MAC_RCLK_SRC 43
+#define GCC_EMAC1_SGMIIPHY_MAC_TCLK_SRC 44
+#define GCC_EMAC1_SLV_AHB_CLK 45
+#define GCC_EMAC1_XGXS_RX_CLK 46
+#define GCC_EMAC1_XGXS_TX_CLK 47
+#define GCC_EMAC_0_CLKREF_EN 48
+#define GCC_EMAC_1_CLKREF_EN 49
+#define GCC_GP1_CLK 50
+#define GCC_GP1_CLK_SRC 51
+#define GCC_GP2_CLK 52
+#define GCC_GP2_CLK_SRC 53
+#define GCC_GP3_CLK 54
+#define GCC_GP3_CLK_SRC 55
+#define GCC_PCIE_0_CLKREF_EN 56
+#define GCC_PCIE_1_AUX_CLK 57
+#define GCC_PCIE_1_AUX_PHY_CLK_SRC 58
+#define GCC_PCIE_1_CFG_AHB_CLK 59
+#define GCC_PCIE_1_CLKREF_EN 60
+#define GCC_PCIE_1_MSTR_AXI_CLK 61
+#define GCC_PCIE_1_PHY_RCHNG_CLK 62
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 63
+#define GCC_PCIE_1_PIPE_CLK 64
+#define GCC_PCIE_1_PIPE_CLK_SRC 65
+#define GCC_PCIE_1_PIPE_DIV2_CLK 66
+#define GCC_PCIE_1_PIPE_DIV2_CLK_SRC 67
+#define GCC_PCIE_1_SLV_AXI_CLK 68
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 69
+#define GCC_PCIE_2_AUX_CLK 70
+#define GCC_PCIE_2_AUX_PHY_CLK_SRC 71
+#define GCC_PCIE_2_CFG_AHB_CLK 72
+#define GCC_PCIE_2_CLKREF_EN 73
+#define GCC_PCIE_2_MSTR_AXI_CLK 74
+#define GCC_PCIE_2_PHY_RCHNG_CLK 75
+#define GCC_PCIE_2_PHY_RCHNG_CLK_SRC 76
+#define GCC_PCIE_2_PIPE_CLK 77
+#define GCC_PCIE_2_PIPE_CLK_SRC 78
+#define GCC_PCIE_2_PIPE_DIV2_CLK 79
+#define GCC_PCIE_2_PIPE_DIV2_CLK_SRC 80
+#define GCC_PCIE_2_SLV_AXI_CLK 81
+#define GCC_PCIE_2_SLV_Q2A_AXI_CLK 82
+#define GCC_PCIE_AUX_CLK 83
+#define GCC_PCIE_AUX_CLK_SRC 84
+#define GCC_PCIE_AUX_PHY_CLK_SRC 85
+#define GCC_PCIE_CFG_AHB_CLK 86
+#define GCC_PCIE_MSTR_AXI_CLK 87
+#define GCC_PCIE_PIPE_CLK 88
+#define GCC_PCIE_PIPE_CLK_SRC 89
+#define GCC_PCIE_RCHNG_PHY_CLK 90
+#define GCC_PCIE_RCHNG_PHY_CLK_SRC 91
+#define GCC_PCIE_SLEEP_CLK 92
+#define GCC_PCIE_SLV_AXI_CLK 93
+#define GCC_PCIE_SLV_Q2A_AXI_CLK 94
+#define GCC_PDM2_CLK 95
+#define GCC_PDM2_CLK_SRC 96
+#define GCC_PDM_AHB_CLK 97
+#define GCC_PDM_XO4_CLK 98
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 99
+#define GCC_QUPV3_WRAP0_CORE_CLK 100
+#define GCC_QUPV3_WRAP0_S0_CLK 101
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 102
+#define GCC_QUPV3_WRAP0_S1_CLK 103
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 104
+#define GCC_QUPV3_WRAP0_S2_CLK 105
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 106
+#define GCC_QUPV3_WRAP0_S3_CLK 107
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 108
+#define GCC_QUPV3_WRAP0_S4_CLK 109
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 110
+#define GCC_QUPV3_WRAP0_S5_CLK 111
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 112
+#define GCC_QUPV3_WRAP0_S6_CLK 113
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 114
+#define GCC_QUPV3_WRAP0_S7_CLK 115
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 116
+#define GCC_QUPV3_WRAP0_S8_CLK 117
+#define GCC_QUPV3_WRAP0_S8_CLK_SRC 118
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 119
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 120
+#define GCC_SDCC1_AHB_CLK 121
+#define GCC_SDCC1_APPS_CLK 122
+#define GCC_SDCC1_APPS_CLK_SRC 123
+#define GCC_SDCC2_AHB_CLK 124
+#define GCC_SDCC2_APPS_CLK 125
+#define GCC_SDCC2_APPS_CLK_SRC 126
+#define GCC_USB2_CLKREF_EN 127
+#define GCC_USB30_MASTER_CLK 128
+#define GCC_USB30_MASTER_CLK_SRC 129
+#define GCC_USB30_MOCK_UTMI_CLK 130
+#define GCC_USB30_MOCK_UTMI_CLK_SRC 131
+#define GCC_USB30_MOCK_UTMI_POSTDIV_CLK_SRC 132
+#define GCC_USB30_MSTR_AXI_CLK 133
+#define GCC_USB30_SLEEP_CLK 134
+#define GCC_USB30_SLV_AHB_CLK 135
+#define GCC_USB3_PHY_AUX_CLK 136
+#define GCC_USB3_PHY_AUX_CLK_SRC 137
+#define GCC_USB3_PHY_PIPE_CLK 138
+#define GCC_USB3_PHY_PIPE_CLK_SRC 139
+#define GCC_USB3_PRIM_CLKREF_EN 140
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 141
+#define GCC_XO_PCIE_LINK_CLK 142
+
+/* GCC power domains */
+#define GCC_EMAC0_GDSC 0
+#define GCC_EMAC1_GDSC 1
+#define GCC_PCIE_1_GDSC 2
+#define GCC_PCIE_1_PHY_GDSC 3
+#define GCC_PCIE_2_GDSC 4
+#define GCC_PCIE_2_PHY_GDSC 5
+#define GCC_PCIE_GDSC 6
+#define GCC_PCIE_PHY_GDSC 7
+#define GCC_USB30_GDSC 8
+#define GCC_USB3_PHY_GDSC 9
+
+/* GCC resets */
+#define GCC_EMAC0_BCR 0
+#define GCC_EMAC1_BCR 1
+#define GCC_EMMC_BCR 2
+#define GCC_PCIE_1_BCR 3
+#define GCC_PCIE_1_LINK_DOWN_BCR 4
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 5
+#define GCC_PCIE_1_PHY_BCR 6
+#define GCC_PCIE_2_BCR 7
+#define GCC_PCIE_2_LINK_DOWN_BCR 8
+#define GCC_PCIE_2_NOCSR_COM_PHY_BCR 9
+#define GCC_PCIE_2_PHY_BCR 10
+#define GCC_PCIE_BCR 11
+#define GCC_PCIE_LINK_DOWN_BCR 12
+#define GCC_PCIE_NOCSR_COM_PHY_BCR 13
+#define GCC_PCIE_PHY_BCR 14
+#define GCC_PCIE_PHY_CFG_AHB_BCR 15
+#define GCC_PCIE_PHY_COM_BCR 16
+#define GCC_PCIE_PHY_NOCSR_COM_PHY_BCR 17
+#define GCC_QUSB2PHY_BCR 18
+#define GCC_TCSR_PCIE_BCR 19
+#define GCC_USB30_BCR 20
+#define GCC_USB3_PHY_BCR 21
+#define GCC_USB3PHY_PHY_BCR 22
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 23
+#define GCC_EMAC0_RGMII_CLK_ARES 24
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm4450-camcc.h b/include/dt-bindings/clock/qcom,sm4450-camcc.h
new file mode 100644
index 000000000000..bf077951bf1c
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm4450-camcc.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SM4450_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SM4450_H
+
+/* CAM_CC clocks */
+#define CAM_CC_BPS_AHB_CLK 0
+#define CAM_CC_BPS_AREG_CLK 1
+#define CAM_CC_BPS_CLK 2
+#define CAM_CC_BPS_CLK_SRC 3
+#define CAM_CC_CAMNOC_ATB_CLK 4
+#define CAM_CC_CAMNOC_AXI_CLK 5
+#define CAM_CC_CAMNOC_AXI_CLK_SRC 6
+#define CAM_CC_CAMNOC_AXI_HF_CLK 7
+#define CAM_CC_CAMNOC_AXI_SF_CLK 8
+#define CAM_CC_CCI_0_CLK 9
+#define CAM_CC_CCI_0_CLK_SRC 10
+#define CAM_CC_CCI_1_CLK 11
+#define CAM_CC_CCI_1_CLK_SRC 12
+#define CAM_CC_CORE_AHB_CLK 13
+#define CAM_CC_CPAS_AHB_CLK 14
+#define CAM_CC_CPHY_RX_CLK_SRC 15
+#define CAM_CC_CRE_AHB_CLK 16
+#define CAM_CC_CRE_CLK 17
+#define CAM_CC_CRE_CLK_SRC 18
+#define CAM_CC_CSI0PHYTIMER_CLK 19
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 20
+#define CAM_CC_CSI1PHYTIMER_CLK 21
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 22
+#define CAM_CC_CSI2PHYTIMER_CLK 23
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 24
+#define CAM_CC_CSIPHY0_CLK 25
+#define CAM_CC_CSIPHY1_CLK 26
+#define CAM_CC_CSIPHY2_CLK 27
+#define CAM_CC_FAST_AHB_CLK_SRC 28
+#define CAM_CC_ICP_ATB_CLK 29
+#define CAM_CC_ICP_CLK 30
+#define CAM_CC_ICP_CLK_SRC 31
+#define CAM_CC_ICP_CTI_CLK 32
+#define CAM_CC_ICP_TS_CLK 33
+#define CAM_CC_MCLK0_CLK 34
+#define CAM_CC_MCLK0_CLK_SRC 35
+#define CAM_CC_MCLK1_CLK 36
+#define CAM_CC_MCLK1_CLK_SRC 37
+#define CAM_CC_MCLK2_CLK 38
+#define CAM_CC_MCLK2_CLK_SRC 39
+#define CAM_CC_MCLK3_CLK 40
+#define CAM_CC_MCLK3_CLK_SRC 41
+#define CAM_CC_OPE_0_AHB_CLK 42
+#define CAM_CC_OPE_0_AREG_CLK 43
+#define CAM_CC_OPE_0_CLK 44
+#define CAM_CC_OPE_0_CLK_SRC 45
+#define CAM_CC_PLL0 46
+#define CAM_CC_PLL0_OUT_EVEN 47
+#define CAM_CC_PLL0_OUT_ODD 48
+#define CAM_CC_PLL1 49
+#define CAM_CC_PLL1_OUT_EVEN 50
+#define CAM_CC_PLL2 51
+#define CAM_CC_PLL2_OUT_EVEN 52
+#define CAM_CC_PLL3 53
+#define CAM_CC_PLL3_OUT_EVEN 54
+#define CAM_CC_PLL4 55
+#define CAM_CC_PLL4_OUT_EVEN 56
+#define CAM_CC_SLOW_AHB_CLK_SRC 57
+#define CAM_CC_SOC_AHB_CLK 58
+#define CAM_CC_SYS_TMR_CLK 59
+#define CAM_CC_TFE_0_AHB_CLK 60
+#define CAM_CC_TFE_0_CLK 61
+#define CAM_CC_TFE_0_CLK_SRC 62
+#define CAM_CC_TFE_0_CPHY_RX_CLK 63
+#define CAM_CC_TFE_0_CSID_CLK 64
+#define CAM_CC_TFE_0_CSID_CLK_SRC 65
+#define CAM_CC_TFE_1_AHB_CLK 66
+#define CAM_CC_TFE_1_CLK 67
+#define CAM_CC_TFE_1_CLK_SRC 68
+#define CAM_CC_TFE_1_CPHY_RX_CLK 69
+#define CAM_CC_TFE_1_CSID_CLK 70
+#define CAM_CC_TFE_1_CSID_CLK_SRC 71
+
+/* CAM_CC power domains */
+#define CAM_CC_CAMSS_TOP_GDSC 0
+
+/* CAM_CC resets */
+#define CAM_CC_BPS_BCR 0
+#define CAM_CC_CAMNOC_BCR 1
+#define CAM_CC_CAMSS_TOP_BCR 2
+#define CAM_CC_CCI_0_BCR 3
+#define CAM_CC_CCI_1_BCR 4
+#define CAM_CC_CPAS_BCR 5
+#define CAM_CC_CRE_BCR 6
+#define CAM_CC_CSI0PHY_BCR 7
+#define CAM_CC_CSI1PHY_BCR 8
+#define CAM_CC_CSI2PHY_BCR 9
+#define CAM_CC_ICP_BCR 10
+#define CAM_CC_MCLK0_BCR 11
+#define CAM_CC_MCLK1_BCR 12
+#define CAM_CC_MCLK2_BCR 13
+#define CAM_CC_MCLK3_BCR 14
+#define CAM_CC_OPE_0_BCR 15
+#define CAM_CC_TFE_0_BCR 16
+#define CAM_CC_TFE_1_BCR 17
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm4450-dispcc.h b/include/dt-bindings/clock/qcom,sm4450-dispcc.h
new file mode 100644
index 000000000000..ca6f2ef90157
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm4450-dispcc.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SM4450_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SM4450_H
+
+/* DISP_CC clocks */
+#define DISP_CC_MDSS_AHB1_CLK 0
+#define DISP_CC_MDSS_AHB_CLK 1
+#define DISP_CC_MDSS_AHB_CLK_SRC 2
+#define DISP_CC_MDSS_BYTE0_CLK 3
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 4
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 5
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 6
+#define DISP_CC_MDSS_ESC0_CLK 7
+#define DISP_CC_MDSS_ESC0_CLK_SRC 8
+#define DISP_CC_MDSS_MDP1_CLK 9
+#define DISP_CC_MDSS_MDP_CLK 10
+#define DISP_CC_MDSS_MDP_CLK_SRC 11
+#define DISP_CC_MDSS_MDP_LUT1_CLK 12
+#define DISP_CC_MDSS_MDP_LUT_CLK 13
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 14
+#define DISP_CC_MDSS_PCLK0_CLK 15
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 16
+#define DISP_CC_MDSS_ROT1_CLK 17
+#define DISP_CC_MDSS_ROT_CLK 18
+#define DISP_CC_MDSS_ROT_CLK_SRC 19
+#define DISP_CC_MDSS_RSCC_AHB_CLK 20
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 21
+#define DISP_CC_MDSS_VSYNC1_CLK 22
+#define DISP_CC_MDSS_VSYNC_CLK 23
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 24
+#define DISP_CC_PLL0 25
+#define DISP_CC_PLL1 26
+#define DISP_CC_SLEEP_CLK 27
+#define DISP_CC_SLEEP_CLK_SRC 28
+#define DISP_CC_XO_CLK 29
+#define DISP_CC_XO_CLK_SRC 30
+
+/* DISP_CC power domains */
+#define DISP_CC_MDSS_CORE_GDSC 0
+#define DISP_CC_MDSS_CORE_INT2_GDSC 1
+
+/* DISP_CC resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_CORE_INT2_BCR 1
+#define DISP_CC_MDSS_RSCC_BCR 2
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm4450-gcc.h b/include/dt-bindings/clock/qcom,sm4450-gcc.h
new file mode 100644
index 000000000000..c18e47a86f40
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm4450-gcc.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM4450_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SM4450_H
+
+/* GCC clocks */
+#define GCC_AGGRE_NOC_PCIE_0_AXI_CLK 0
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 1
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 2
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 3
+#define GCC_BOOT_ROM_AHB_CLK 4
+#define GCC_CAMERA_AHB_CLK 5
+#define GCC_CAMERA_HF_AXI_CLK 6
+#define GCC_CAMERA_SF_AXI_CLK 7
+#define GCC_CAMERA_SLEEP_CLK 8
+#define GCC_CAMERA_XO_CLK 9
+#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 10
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 11
+#define GCC_DDRSS_GPU_AXI_CLK 12
+#define GCC_DDRSS_PCIE_SF_TBU_CLK 13
+#define GCC_DISP_AHB_CLK 14
+#define GCC_DISP_HF_AXI_CLK 15
+#define GCC_DISP_XO_CLK 16
+#define GCC_EUSB3_0_CLKREF_EN 17
+#define GCC_GP1_CLK 18
+#define GCC_GP1_CLK_SRC 19
+#define GCC_GP2_CLK 20
+#define GCC_GP2_CLK_SRC 21
+#define GCC_GP3_CLK 22
+#define GCC_GP3_CLK_SRC 23
+#define GCC_GPLL0 24
+#define GCC_GPLL0_OUT_EVEN 25
+#define GCC_GPLL0_OUT_ODD 26
+#define GCC_GPLL1 27
+#define GCC_GPLL3 28
+#define GCC_GPLL4 29
+#define GCC_GPLL9 30
+#define GCC_GPLL10 31
+#define GCC_GPU_CFG_AHB_CLK 32
+#define GCC_GPU_GPLL0_CLK_SRC 33
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 34
+#define GCC_GPU_MEMNOC_GFX_CLK 35
+#define GCC_GPU_SNOC_DVM_GFX_CLK 36
+#define GCC_HLOS1_VOTE_AGGRE_NOC_MMU_AUDIO_TBU_CLK 37
+#define GCC_HLOS1_VOTE_AGGRE_NOC_MMU_PCIE_TBU_CLK 38
+#define GCC_HLOS1_VOTE_AGGRE_NOC_MMU_TBU1_CLK 39
+#define GCC_HLOS1_VOTE_AGGRE_NOC_MMU_TBU2_CLK 40
+#define GCC_HLOS1_VOTE_MMNOC_MMU_TBU_HF0_CLK 41
+#define GCC_HLOS1_VOTE_MMNOC_MMU_TBU_HF1_CLK 42
+#define GCC_HLOS1_VOTE_MMNOC_MMU_TBU_SF0_CLK 43
+#define GCC_HLOS1_VOTE_MMU_TCU_CLK 44
+#define GCC_PCIE_0_AUX_CLK 45
+#define GCC_PCIE_0_AUX_CLK_SRC 46
+#define GCC_PCIE_0_CFG_AHB_CLK 47
+#define GCC_PCIE_0_CLKREF_EN 48
+#define GCC_PCIE_0_MSTR_AXI_CLK 49
+#define GCC_PCIE_0_PHY_RCHNG_CLK 50
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 51
+#define GCC_PCIE_0_PIPE_CLK 52
+#define GCC_PCIE_0_PIPE_CLK_SRC 53
+#define GCC_PCIE_0_PIPE_DIV2_CLK 54
+#define GCC_PCIE_0_PIPE_DIV2_CLK_SRC 55
+#define GCC_PCIE_0_SLV_AXI_CLK 56
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 57
+#define GCC_PDM2_CLK 58
+#define GCC_PDM2_CLK_SRC 59
+#define GCC_PDM_AHB_CLK 60
+#define GCC_PDM_XO4_CLK 61
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 62
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 63
+#define GCC_QMIP_DISP_AHB_CLK 64
+#define GCC_QMIP_GPU_AHB_CLK 65
+#define GCC_QMIP_PCIE_AHB_CLK 66
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 67
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 68
+#define GCC_QUPV3_WRAP0_CORE_CLK 69
+#define GCC_QUPV3_WRAP0_S0_CLK 70
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 71
+#define GCC_QUPV3_WRAP0_S1_CLK 72
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 73
+#define GCC_QUPV3_WRAP0_S2_CLK 74
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 75
+#define GCC_QUPV3_WRAP0_S3_CLK 76
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 77
+#define GCC_QUPV3_WRAP0_S4_CLK 78
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 79
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 80
+#define GCC_QUPV3_WRAP1_CORE_CLK 81
+#define GCC_QUPV3_WRAP1_S0_CLK 82
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 83
+#define GCC_QUPV3_WRAP1_S1_CLK 84
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 85
+#define GCC_QUPV3_WRAP1_S2_CLK 86
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 87
+#define GCC_QUPV3_WRAP1_S3_CLK 88
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 89
+#define GCC_QUPV3_WRAP1_S4_CLK 90
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 91
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 92
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 93
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 94
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 95
+#define GCC_SDCC1_AHB_CLK 96
+#define GCC_SDCC1_APPS_CLK 97
+#define GCC_SDCC1_APPS_CLK_SRC 98
+#define GCC_SDCC1_ICE_CORE_CLK 99
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 100
+#define GCC_SDCC2_AHB_CLK 101
+#define GCC_SDCC2_APPS_CLK 102
+#define GCC_SDCC2_APPS_CLK_SRC 103
+#define GCC_UFS_0_CLKREF_EN 104
+#define GCC_UFS_PAD_CLKREF_EN 105
+#define GCC_UFS_PHY_AHB_CLK 106
+#define GCC_UFS_PHY_AXI_CLK 107
+#define GCC_UFS_PHY_AXI_CLK_SRC 108
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 109
+#define GCC_UFS_PHY_ICE_CORE_CLK 110
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 111
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 112
+#define GCC_UFS_PHY_PHY_AUX_CLK 113
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 114
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 115
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 116
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 117
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 118
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 119
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 120
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 121
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 122
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 123
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 124
+#define GCC_USB30_PRIM_MASTER_CLK 125
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 126
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 127
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 128
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 129
+#define GCC_USB30_PRIM_SLEEP_CLK 130
+#define GCC_USB3_0_CLKREF_EN 131
+#define GCC_USB3_PRIM_PHY_AUX_CLK 132
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 133
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 134
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 135
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 136
+#define GCC_VCODEC0_AXI_CLK 137
+#define GCC_VENUS_CTL_AXI_CLK 138
+#define GCC_VIDEO_AHB_CLK 139
+#define GCC_VIDEO_THROTTLE_CORE_CLK 140
+#define GCC_VIDEO_VCODEC0_SYS_CLK 141
+#define GCC_VIDEO_VENUS_CLK_SRC 142
+#define GCC_VIDEO_VENUS_CTL_CLK 143
+#define GCC_VIDEO_XO_CLK 144
+
+/* GCC power domains */
+#define GCC_PCIE_0_GDSC 0
+#define GCC_UFS_PHY_GDSC 1
+#define GCC_USB30_PRIM_GDSC 2
+#define GCC_VCODEC0_GDSC 3
+#define GCC_VENUS_GDSC 4
+
+/* GCC resets */
+#define GCC_CAMERA_BCR 0
+#define GCC_DISPLAY_BCR 1
+#define GCC_GPU_BCR 2
+#define GCC_PCIE_0_BCR 3
+#define GCC_PCIE_0_LINK_DOWN_BCR 4
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 5
+#define GCC_PCIE_0_PHY_BCR 6
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 7
+#define GCC_PCIE_PHY_BCR 8
+#define GCC_PCIE_PHY_CFG_AHB_BCR 9
+#define GCC_PCIE_PHY_COM_BCR 10
+#define GCC_PDM_BCR 11
+#define GCC_QUPV3_WRAPPER_0_BCR 12
+#define GCC_QUPV3_WRAPPER_1_BCR 13
+#define GCC_QUSB2PHY_PRIM_BCR 14
+#define GCC_QUSB2PHY_SEC_BCR 15
+#define GCC_SDCC1_BCR 16
+#define GCC_SDCC2_BCR 17
+#define GCC_UFS_PHY_BCR 18
+#define GCC_USB30_PRIM_BCR 19
+#define GCC_USB3_DP_PHY_PRIM_BCR 20
+#define GCC_USB3_DP_PHY_SEC_BCR 21
+#define GCC_USB3_PHY_PRIM_BCR 22
+#define GCC_USB3_PHY_SEC_BCR 23
+#define GCC_USB3PHY_PHY_PRIM_BCR 24
+#define GCC_USB3PHY_PHY_SEC_BCR 25
+#define GCC_VCODEC0_BCR 26
+#define GCC_VENUS_BCR 27
+#define GCC_VIDEO_BCR 28
+#define GCC_VIDEO_VENUS_BCR 29
+#define GCC_VENUS_CTL_AXI_CLK_ARES 30
+#define GCC_VIDEO_VENUS_CTL_CLK_ARES 31
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm4450-gpucc.h b/include/dt-bindings/clock/qcom,sm4450-gpucc.h
new file mode 100644
index 000000000000..304f83e5f645
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm4450-gpucc.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM4450_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM4450_H
+
+/* GPU_CC clocks */
+#define GPU_CC_AHB_CLK 0
+#define GPU_CC_CB_CLK 1
+#define GPU_CC_CRC_AHB_CLK 2
+#define GPU_CC_CX_FF_CLK 3
+#define GPU_CC_CX_GFX3D_CLK 4
+#define GPU_CC_CX_GFX3D_SLV_CLK 5
+#define GPU_CC_CX_GMU_CLK 6
+#define GPU_CC_CX_SNOC_DVM_CLK 7
+#define GPU_CC_CXO_AON_CLK 8
+#define GPU_CC_CXO_CLK 9
+#define GPU_CC_DEMET_CLK 10
+#define GPU_CC_DEMET_DIV_CLK_SRC 11
+#define GPU_CC_FF_CLK_SRC 12
+#define GPU_CC_FREQ_MEASURE_CLK 13
+#define GPU_CC_GMU_CLK_SRC 14
+#define GPU_CC_GX_CXO_CLK 15
+#define GPU_CC_GX_FF_CLK 16
+#define GPU_CC_GX_GFX3D_CLK 17
+#define GPU_CC_GX_GFX3D_CLK_SRC 18
+#define GPU_CC_GX_GFX3D_RDVM_CLK 19
+#define GPU_CC_GX_GMU_CLK 20
+#define GPU_CC_GX_VSENSE_CLK 21
+#define GPU_CC_HUB_AHB_DIV_CLK_SRC 22
+#define GPU_CC_HUB_AON_CLK 23
+#define GPU_CC_HUB_CLK_SRC 24
+#define GPU_CC_HUB_CX_INT_CLK 25
+#define GPU_CC_HUB_CX_INT_DIV_CLK_SRC 26
+#define GPU_CC_MEMNOC_GFX_CLK 27
+#define GPU_CC_MND1X_0_GFX3D_CLK 28
+#define GPU_CC_PLL0 29
+#define GPU_CC_PLL1 30
+#define GPU_CC_SLEEP_CLK 31
+#define GPU_CC_XO_CLK_SRC 32
+#define GPU_CC_XO_DIV_CLK_SRC 33
+
+/* GPU_CC power domains */
+#define GPU_CC_CX_GDSC 0
+#define GPU_CC_GX_GDSC 1
+
+/* GPU_CC resets */
+#define GPU_CC_ACD_BCR 0
+#define GPU_CC_CB_BCR 1
+#define GPU_CC_CX_BCR 2
+#define GPU_CC_FAST_HUB_BCR 3
+#define GPU_CC_FF_BCR 4
+#define GPU_CC_GFX3D_AON_BCR 5
+#define GPU_CC_GMU_BCR 6
+#define GPU_CC_GX_BCR 7
+#define GPU_CC_XO_BCR 8
+#define GPU_CC_GX_ACD_IROOT_BCR 9
+#define GPU_CC_RBCPR_BCR 10
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm6115-dispcc.h b/include/dt-bindings/clock/qcom,sm6115-dispcc.h
new file mode 100644
index 000000000000..d1a6c45b5029
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm6115-dispcc.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SM6115_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SM6115_H
+
+/* DISP_CC clocks */
+#define DISP_CC_PLL0 0
+#define DISP_CC_PLL0_OUT_MAIN 1
+#define DISP_CC_MDSS_AHB_CLK 2
+#define DISP_CC_MDSS_AHB_CLK_SRC 3
+#define DISP_CC_MDSS_BYTE0_CLK 4
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 5
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 6
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 7
+#define DISP_CC_MDSS_ESC0_CLK 8
+#define DISP_CC_MDSS_ESC0_CLK_SRC 9
+#define DISP_CC_MDSS_MDP_CLK 10
+#define DISP_CC_MDSS_MDP_CLK_SRC 11
+#define DISP_CC_MDSS_MDP_LUT_CLK 12
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 13
+#define DISP_CC_MDSS_PCLK0_CLK 14
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 15
+#define DISP_CC_MDSS_ROT_CLK 16
+#define DISP_CC_MDSS_ROT_CLK_SRC 17
+#define DISP_CC_MDSS_VSYNC_CLK 18
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 19
+#define DISP_CC_SLEEP_CLK 20
+#define DISP_CC_SLEEP_CLK_SRC 21
+
+/* DISP_CC GDSCR */
+#define MDSS_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm6115-gpucc.h b/include/dt-bindings/clock/qcom,sm6115-gpucc.h
new file mode 100644
index 000000000000..945f21a7d745
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm6115-gpucc.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM6115_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM6115_H
+
+/* GPU_CC clocks */
+#define GPU_CC_PLL0 0
+#define GPU_CC_PLL0_OUT_AUX2 1
+#define GPU_CC_PLL1 2
+#define GPU_CC_PLL1_OUT_AUX 3
+#define GPU_CC_AHB_CLK 4
+#define GPU_CC_CRC_AHB_CLK 5
+#define GPU_CC_CX_GFX3D_CLK 6
+#define GPU_CC_CX_GMU_CLK 7
+#define GPU_CC_CX_SNOC_DVM_CLK 8
+#define GPU_CC_CXO_AON_CLK 9
+#define GPU_CC_CXO_CLK 10
+#define GPU_CC_GMU_CLK_SRC 11
+#define GPU_CC_GX_CXO_CLK 12
+#define GPU_CC_GX_GFX3D_CLK 13
+#define GPU_CC_GX_GFX3D_CLK_SRC 14
+#define GPU_CC_SLEEP_CLK 15
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 16
+
+/* Resets */
+#define GPU_GX_BCR 0
+
+/* GDSCs */
+#define GPU_CX_GDSC 0
+#define GPU_GX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm6115-lpasscc.h b/include/dt-bindings/clock/qcom,sm6115-lpasscc.h
new file mode 100644
index 000000000000..799274517c9a
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm6115-lpasscc.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Linaro Ltd.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_LPASSCC_SM6115_H
+#define _DT_BINDINGS_CLK_QCOM_LPASSCC_SM6115_H
+
+/* LPASS CC */
+#define LPASS_SWR_TX_CONFIG_CGCR 0
+
+/* LPASS_AUDIO CC */
+#define LPASS_AUDIO_SWR_RX_CGCR 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm6125-gpucc.h b/include/dt-bindings/clock/qcom,sm6125-gpucc.h
new file mode 100644
index 000000000000..ce5bd920f2c4
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm6125-gpucc.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM6125_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM6125_H
+
+/* Clocks */
+#define GPU_CC_PLL0_OUT_AUX2 0
+#define GPU_CC_PLL1_OUT_AUX2 1
+#define GPU_CC_CRC_AHB_CLK 2
+#define GPU_CC_CX_APB_CLK 3
+#define GPU_CC_CX_GFX3D_CLK 4
+#define GPU_CC_CX_GMU_CLK 5
+#define GPU_CC_CX_SNOC_DVM_CLK 6
+#define GPU_CC_CXO_AON_CLK 7
+#define GPU_CC_CXO_CLK 8
+#define GPU_CC_GMU_CLK_SRC 9
+#define GPU_CC_SLEEP_CLK 10
+#define GPU_CC_GX_GFX3D_CLK 11
+#define GPU_CC_GX_GFX3D_CLK_SRC 12
+#define GPU_CC_AHB_CLK 13
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 14
+
+/* GDSCs */
+#define GPU_CX_GDSC 0
+#define GPU_GX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm6350-camcc.h b/include/dt-bindings/clock/qcom,sm6350-camcc.h
new file mode 100644
index 000000000000..c6bcdc8fd485
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm6350-camcc.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAMCC_SM6350_H
+#define _DT_BINDINGS_CLK_QCOM_CAMCC_SM6350_H
+
+/* CAMCC clocks */
+#define CAMCC_PLL2_OUT_EARLY 0
+#define CAMCC_PLL0 1
+#define CAMCC_PLL0_OUT_EVEN 2
+#define CAMCC_PLL1 3
+#define CAMCC_PLL1_OUT_EVEN 4
+#define CAMCC_PLL2 5
+#define CAMCC_PLL2_OUT_MAIN 6
+#define CAMCC_PLL3 7
+#define CAMCC_BPS_AHB_CLK 8
+#define CAMCC_BPS_AREG_CLK 9
+#define CAMCC_BPS_AXI_CLK 10
+#define CAMCC_BPS_CLK 11
+#define CAMCC_BPS_CLK_SRC 12
+#define CAMCC_CAMNOC_ATB_CLK 13
+#define CAMCC_CAMNOC_AXI_CLK 14
+#define CAMCC_CCI_0_CLK 15
+#define CAMCC_CCI_0_CLK_SRC 16
+#define CAMCC_CCI_1_CLK 17
+#define CAMCC_CCI_1_CLK_SRC 18
+#define CAMCC_CORE_AHB_CLK 19
+#define CAMCC_CPAS_AHB_CLK 20
+#define CAMCC_CPHY_RX_CLK_SRC 21
+#define CAMCC_CSI0PHYTIMER_CLK 22
+#define CAMCC_CSI0PHYTIMER_CLK_SRC 23
+#define CAMCC_CSI1PHYTIMER_CLK 24
+#define CAMCC_CSI1PHYTIMER_CLK_SRC 25
+#define CAMCC_CSI2PHYTIMER_CLK 26
+#define CAMCC_CSI2PHYTIMER_CLK_SRC 27
+#define CAMCC_CSI3PHYTIMER_CLK 28
+#define CAMCC_CSI3PHYTIMER_CLK_SRC 29
+#define CAMCC_CSIPHY0_CLK 30
+#define CAMCC_CSIPHY1_CLK 31
+#define CAMCC_CSIPHY2_CLK 32
+#define CAMCC_CSIPHY3_CLK 33
+#define CAMCC_FAST_AHB_CLK_SRC 34
+#define CAMCC_ICP_APB_CLK 35
+#define CAMCC_ICP_ATB_CLK 36
+#define CAMCC_ICP_CLK 37
+#define CAMCC_ICP_CLK_SRC 38
+#define CAMCC_ICP_CTI_CLK 39
+#define CAMCC_ICP_TS_CLK 40
+#define CAMCC_IFE_0_AXI_CLK 41
+#define CAMCC_IFE_0_CLK 42
+#define CAMCC_IFE_0_CLK_SRC 43
+#define CAMCC_IFE_0_CPHY_RX_CLK 44
+#define CAMCC_IFE_0_CSID_CLK 45
+#define CAMCC_IFE_0_CSID_CLK_SRC 46
+#define CAMCC_IFE_0_DSP_CLK 47
+#define CAMCC_IFE_1_AXI_CLK 48
+#define CAMCC_IFE_1_CLK 49
+#define CAMCC_IFE_1_CLK_SRC 50
+#define CAMCC_IFE_1_CPHY_RX_CLK 51
+#define CAMCC_IFE_1_CSID_CLK 52
+#define CAMCC_IFE_1_CSID_CLK_SRC 53
+#define CAMCC_IFE_1_DSP_CLK 54
+#define CAMCC_IFE_2_AXI_CLK 55
+#define CAMCC_IFE_2_CLK 56
+#define CAMCC_IFE_2_CLK_SRC 57
+#define CAMCC_IFE_2_CPHY_RX_CLK 58
+#define CAMCC_IFE_2_CSID_CLK 59
+#define CAMCC_IFE_2_CSID_CLK_SRC 60
+#define CAMCC_IFE_2_DSP_CLK 61
+#define CAMCC_IFE_LITE_CLK 62
+#define CAMCC_IFE_LITE_CLK_SRC 63
+#define CAMCC_IFE_LITE_CPHY_RX_CLK 64
+#define CAMCC_IFE_LITE_CSID_CLK 65
+#define CAMCC_IFE_LITE_CSID_CLK_SRC 66
+#define CAMCC_IPE_0_AHB_CLK 67
+#define CAMCC_IPE_0_AREG_CLK 68
+#define CAMCC_IPE_0_AXI_CLK 69
+#define CAMCC_IPE_0_CLK 70
+#define CAMCC_IPE_0_CLK_SRC 71
+#define CAMCC_JPEG_CLK 72
+#define CAMCC_JPEG_CLK_SRC 73
+#define CAMCC_LRME_CLK 74
+#define CAMCC_LRME_CLK_SRC 75
+#define CAMCC_MCLK0_CLK 76
+#define CAMCC_MCLK0_CLK_SRC 77
+#define CAMCC_MCLK1_CLK 78
+#define CAMCC_MCLK1_CLK_SRC 79
+#define CAMCC_MCLK2_CLK 80
+#define CAMCC_MCLK2_CLK_SRC 81
+#define CAMCC_MCLK3_CLK 82
+#define CAMCC_MCLK3_CLK_SRC 83
+#define CAMCC_MCLK4_CLK 84
+#define CAMCC_MCLK4_CLK_SRC 85
+#define CAMCC_SLOW_AHB_CLK_SRC 86
+#define CAMCC_SOC_AHB_CLK 87
+#define CAMCC_SYS_TMR_CLK 88
+
+/* GDSCs */
+#define BPS_GDSC 0
+#define IPE_0_GDSC 1
+#define IFE_0_GDSC 2
+#define IFE_1_GDSC 3
+#define IFE_2_GDSC 4
+#define TITAN_TOP_GDSC 5
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm6350-videocc.h b/include/dt-bindings/clock/qcom,sm6350-videocc.h
new file mode 100644
index 000000000000..2af7f91fa023
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm6350-videocc.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM6350_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM6350_H
+
+/* VIDEO_CC clocks */
+#define VIDEO_PLL0 0
+#define VIDEO_PLL0_OUT_EVEN 1
+#define VIDEO_CC_IRIS_AHB_CLK 2
+#define VIDEO_CC_IRIS_CLK_SRC 3
+#define VIDEO_CC_MVS0_AXI_CLK 4
+#define VIDEO_CC_MVS0_CORE_CLK 5
+#define VIDEO_CC_MVSC_CORE_CLK 6
+#define VIDEO_CC_MVSC_CTL_AXI_CLK 7
+#define VIDEO_CC_SLEEP_CLK 8
+#define VIDEO_CC_SLEEP_CLK_SRC 9
+#define VIDEO_CC_VENUS_AHB_CLK 10
+
+/* GDSCs */
+#define MVSC_GDSC 0
+#define MVS0_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm6375-dispcc.h b/include/dt-bindings/clock/qcom,sm6375-dispcc.h
new file mode 100644
index 000000000000..1cb0bed004bd
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm6375-dispcc.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SM6375_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SM6375_H
+
+/* Clocks */
+#define DISP_CC_PLL0 0
+#define DISP_CC_MDSS_AHB_CLK 1
+#define DISP_CC_MDSS_AHB_CLK_SRC 2
+#define DISP_CC_MDSS_BYTE0_CLK 3
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 4
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 5
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 6
+#define DISP_CC_MDSS_ESC0_CLK 7
+#define DISP_CC_MDSS_ESC0_CLK_SRC 8
+#define DISP_CC_MDSS_MDP_CLK 9
+#define DISP_CC_MDSS_MDP_CLK_SRC 10
+#define DISP_CC_MDSS_MDP_LUT_CLK 11
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 12
+#define DISP_CC_MDSS_PCLK0_CLK 13
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 14
+#define DISP_CC_MDSS_ROT_CLK 15
+#define DISP_CC_MDSS_ROT_CLK_SRC 16
+#define DISP_CC_MDSS_RSCC_AHB_CLK 17
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 18
+#define DISP_CC_MDSS_VSYNC_CLK 19
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 20
+#define DISP_CC_SLEEP_CLK 21
+#define DISP_CC_XO_CLK 22
+
+/* Resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_RSCC_BCR 1
+
+/* GDSCs */
+#define MDSS_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm6375-gcc.h b/include/dt-bindings/clock/qcom,sm6375-gcc.h
new file mode 100644
index 000000000000..1e9801e1cedf
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm6375-gcc.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Konrad Dybcio <konrad.dybcio@somainline.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM6375_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SM6375_H
+
+/* Clocks */
+#define GPLL0 0
+#define GPLL0_OUT_EVEN 1
+#define GPLL0_OUT_ODD 2
+#define GPLL1 3
+#define GPLL10 4
+#define GPLL11 5
+#define GPLL3 6
+#define GPLL3_OUT_EVEN 7
+#define GPLL4 8
+#define GPLL5 9
+#define GPLL6 10
+#define GPLL6_OUT_EVEN 11
+#define GPLL7 12
+#define GPLL8 13
+#define GPLL8_OUT_EVEN 14
+#define GPLL9 15
+#define GPLL9_OUT_MAIN 16
+#define GCC_AHB2PHY_CSI_CLK 17
+#define GCC_AHB2PHY_USB_CLK 18
+#define GCC_BIMC_GPU_AXI_CLK 19
+#define GCC_BOOT_ROM_AHB_CLK 20
+#define GCC_CAM_THROTTLE_NRT_CLK 21
+#define GCC_CAM_THROTTLE_RT_CLK 22
+#define GCC_CAMERA_AHB_CLK 23
+#define GCC_CAMERA_XO_CLK 24
+#define GCC_CAMSS_AXI_CLK 25
+#define GCC_CAMSS_AXI_CLK_SRC 26
+#define GCC_CAMSS_CAMNOC_ATB_CLK 27
+#define GCC_CAMSS_CAMNOC_NTS_XO_CLK 28
+#define GCC_CAMSS_CCI_0_CLK 29
+#define GCC_CAMSS_CCI_0_CLK_SRC 30
+#define GCC_CAMSS_CCI_1_CLK 31
+#define GCC_CAMSS_CCI_1_CLK_SRC 32
+#define GCC_CAMSS_CPHY_0_CLK 33
+#define GCC_CAMSS_CPHY_1_CLK 34
+#define GCC_CAMSS_CPHY_2_CLK 35
+#define GCC_CAMSS_CPHY_3_CLK 36
+#define GCC_CAMSS_CSI0PHYTIMER_CLK 37
+#define GCC_CAMSS_CSI0PHYTIMER_CLK_SRC 38
+#define GCC_CAMSS_CSI1PHYTIMER_CLK 39
+#define GCC_CAMSS_CSI1PHYTIMER_CLK_SRC 40
+#define GCC_CAMSS_CSI2PHYTIMER_CLK 41
+#define GCC_CAMSS_CSI2PHYTIMER_CLK_SRC 42
+#define GCC_CAMSS_CSI3PHYTIMER_CLK 43
+#define GCC_CAMSS_CSI3PHYTIMER_CLK_SRC 44
+#define GCC_CAMSS_MCLK0_CLK 45
+#define GCC_CAMSS_MCLK0_CLK_SRC 46
+#define GCC_CAMSS_MCLK1_CLK 47
+#define GCC_CAMSS_MCLK1_CLK_SRC 48
+#define GCC_CAMSS_MCLK2_CLK 49
+#define GCC_CAMSS_MCLK2_CLK_SRC 50
+#define GCC_CAMSS_MCLK3_CLK 51
+#define GCC_CAMSS_MCLK3_CLK_SRC 52
+#define GCC_CAMSS_MCLK4_CLK 53
+#define GCC_CAMSS_MCLK4_CLK_SRC 54
+#define GCC_CAMSS_NRT_AXI_CLK 55
+#define GCC_CAMSS_OPE_AHB_CLK 56
+#define GCC_CAMSS_OPE_AHB_CLK_SRC 57
+#define GCC_CAMSS_OPE_CLK 58
+#define GCC_CAMSS_OPE_CLK_SRC 59
+#define GCC_CAMSS_RT_AXI_CLK 60
+#define GCC_CAMSS_TFE_0_CLK 61
+#define GCC_CAMSS_TFE_0_CLK_SRC 62
+#define GCC_CAMSS_TFE_0_CPHY_RX_CLK 63
+#define GCC_CAMSS_TFE_0_CSID_CLK 64
+#define GCC_CAMSS_TFE_0_CSID_CLK_SRC 65
+#define GCC_CAMSS_TFE_1_CLK 66
+#define GCC_CAMSS_TFE_1_CLK_SRC 67
+#define GCC_CAMSS_TFE_1_CPHY_RX_CLK 68
+#define GCC_CAMSS_TFE_1_CSID_CLK 69
+#define GCC_CAMSS_TFE_1_CSID_CLK_SRC 70
+#define GCC_CAMSS_TFE_2_CLK 71
+#define GCC_CAMSS_TFE_2_CLK_SRC 72
+#define GCC_CAMSS_TFE_2_CPHY_RX_CLK 73
+#define GCC_CAMSS_TFE_2_CSID_CLK 74
+#define GCC_CAMSS_TFE_2_CSID_CLK_SRC 75
+#define GCC_CAMSS_TFE_CPHY_RX_CLK_SRC 76
+#define GCC_CAMSS_TOP_AHB_CLK 77
+#define GCC_CAMSS_TOP_AHB_CLK_SRC 78
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 79
+#define GCC_CPUSS_AHB_CLK_SRC 80
+#define GCC_CPUSS_AHB_POSTDIV_CLK_SRC 81
+#define GCC_CPUSS_GNOC_CLK 82
+#define GCC_DISP_AHB_CLK 83
+#define GCC_DISP_GPLL0_CLK_SRC 84
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 85
+#define GCC_DISP_HF_AXI_CLK 86
+#define GCC_DISP_SLEEP_CLK 87
+#define GCC_DISP_THROTTLE_CORE_CLK 88
+#define GCC_DISP_XO_CLK 89
+#define GCC_GP1_CLK 90
+#define GCC_GP1_CLK_SRC 91
+#define GCC_GP2_CLK 92
+#define GCC_GP2_CLK_SRC 93
+#define GCC_GP3_CLK 94
+#define GCC_GP3_CLK_SRC 95
+#define GCC_GPU_CFG_AHB_CLK 96
+#define GCC_GPU_GPLL0_CLK_SRC 97
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 98
+#define GCC_GPU_MEMNOC_GFX_CLK 99
+#define GCC_GPU_SNOC_DVM_GFX_CLK 100
+#define GCC_GPU_THROTTLE_CORE_CLK 101
+#define GCC_PDM2_CLK 102
+#define GCC_PDM2_CLK_SRC 103
+#define GCC_PDM_AHB_CLK 104
+#define GCC_PDM_XO4_CLK 105
+#define GCC_PRNG_AHB_CLK 106
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 107
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 108
+#define GCC_QMIP_DISP_AHB_CLK 109
+#define GCC_QMIP_GPU_CFG_AHB_CLK 110
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 111
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 112
+#define GCC_QUPV3_WRAP0_CORE_CLK 113
+#define GCC_QUPV3_WRAP0_S0_CLK 114
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 115
+#define GCC_QUPV3_WRAP0_S1_CLK 116
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 117
+#define GCC_QUPV3_WRAP0_S2_CLK 118
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 119
+#define GCC_QUPV3_WRAP0_S3_CLK 120
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 121
+#define GCC_QUPV3_WRAP0_S4_CLK 122
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 123
+#define GCC_QUPV3_WRAP0_S5_CLK 124
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 125
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 126
+#define GCC_QUPV3_WRAP1_CORE_CLK 127
+#define GCC_QUPV3_WRAP1_S0_CLK 128
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 129
+#define GCC_QUPV3_WRAP1_S1_CLK 130
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 131
+#define GCC_QUPV3_WRAP1_S2_CLK 132
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 133
+#define GCC_QUPV3_WRAP1_S3_CLK 134
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 135
+#define GCC_QUPV3_WRAP1_S4_CLK 136
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 137
+#define GCC_QUPV3_WRAP1_S5_CLK 138
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 139
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 140
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 141
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 142
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 143
+#define GCC_RX5_PCIE_CLKREF_EN_CLK 144
+#define GCC_SDCC1_AHB_CLK 145
+#define GCC_SDCC1_APPS_CLK 146
+#define GCC_SDCC1_APPS_CLK_SRC 147
+#define GCC_SDCC1_ICE_CORE_CLK 148
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 149
+#define GCC_SDCC2_AHB_CLK 150
+#define GCC_SDCC2_APPS_CLK 151
+#define GCC_SDCC2_APPS_CLK_SRC 152
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 153
+#define GCC_SYS_NOC_UFS_PHY_AXI_CLK 154
+#define GCC_SYS_NOC_USB3_PRIM_AXI_CLK 155
+#define GCC_UFS_MEM_CLKREF_CLK 156
+#define GCC_UFS_PHY_AHB_CLK 157
+#define GCC_UFS_PHY_AXI_CLK 158
+#define GCC_UFS_PHY_AXI_CLK_SRC 159
+#define GCC_UFS_PHY_ICE_CORE_CLK 160
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 161
+#define GCC_UFS_PHY_PHY_AUX_CLK 162
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 163
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 164
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 165
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 166
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 167
+#define GCC_USB30_PRIM_MASTER_CLK 168
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 169
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 170
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 171
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 172
+#define GCC_USB30_PRIM_SLEEP_CLK 173
+#define GCC_USB3_PRIM_CLKREF_CLK 174
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 175
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 176
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 177
+#define GCC_VCODEC0_AXI_CLK 178
+#define GCC_VENUS_AHB_CLK 179
+#define GCC_VENUS_CTL_AXI_CLK 180
+#define GCC_VIDEO_AHB_CLK 181
+#define GCC_VIDEO_AXI0_CLK 182
+#define GCC_VIDEO_THROTTLE_CORE_CLK 183
+#define GCC_VIDEO_VCODEC0_SYS_CLK 184
+#define GCC_VIDEO_VENUS_CLK_SRC 185
+#define GCC_VIDEO_VENUS_CTL_CLK 186
+#define GCC_VIDEO_XO_CLK 187
+
+/* Resets */
+#define GCC_CAMSS_OPE_BCR 0
+#define GCC_CAMSS_TFE_BCR 1
+#define GCC_CAMSS_TOP_BCR 2
+#define GCC_GPU_BCR 3
+#define GCC_MMSS_BCR 4
+#define GCC_PDM_BCR 5
+#define GCC_PRNG_BCR 6
+#define GCC_QUPV3_WRAPPER_0_BCR 7
+#define GCC_QUPV3_WRAPPER_1_BCR 8
+#define GCC_QUSB2PHY_PRIM_BCR 9
+#define GCC_QUSB2PHY_SEC_BCR 10
+#define GCC_SDCC1_BCR 11
+#define GCC_SDCC2_BCR 12
+#define GCC_UFS_PHY_BCR 13
+#define GCC_USB30_PRIM_BCR 14
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 15
+#define GCC_VCODEC0_BCR 16
+#define GCC_VENUS_BCR 17
+#define GCC_VIDEO_INTERFACE_BCR 18
+#define GCC_USB3_DP_PHY_PRIM_BCR 19
+#define GCC_USB3_PHY_PRIM_SP0_BCR 20
+
+/* GDSCs */
+#define USB30_PRIM_GDSC 0
+#define UFS_PHY_GDSC 1
+#define CAMSS_TOP_GDSC 2
+#define VENUS_GDSC 3
+#define VCODEC0_GDSC 4
+#define HLOS1_VOTE_MM_SNOC_MMU_TBU_NRT_GDSC 5
+#define HLOS1_VOTE_MM_SNOC_MMU_TBU_RT_GDSC 6
+#define HLOS1_VOTE_TURING_MMU_TBU0_GDSC 7
+#define HLOS1_VOTE_TURING_MMU_TBU1_GDSC 8
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm6375-gpucc.h b/include/dt-bindings/clock/qcom,sm6375-gpucc.h
new file mode 100644
index 000000000000..0887ac03825e
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm6375-gpucc.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_BLAIR_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_BLAIR_H
+
+/* GPU CC clocks */
+#define GPU_CC_PLL0 0
+#define GPU_CC_PLL1 1
+#define GPU_CC_AHB_CLK 2
+#define GPU_CC_CX_GFX3D_CLK 3
+#define GPU_CC_CX_GFX3D_SLV_CLK 4
+#define GPU_CC_CX_GMU_CLK 5
+#define GPU_CC_CX_SNOC_DVM_CLK 6
+#define GPU_CC_CXO_AON_CLK 7
+#define GPU_CC_CXO_CLK 8
+#define GPU_CC_GMU_CLK_SRC 9
+#define GPU_CC_GX_CXO_CLK 10
+#define GPU_CC_GX_GFX3D_CLK 11
+#define GPU_CC_GX_GFX3D_CLK_SRC 12
+#define GPU_CC_GX_GMU_CLK 13
+#define GPU_CC_SLEEP_CLK 14
+
+/* GDSCs */
+#define GPU_CX_GDSC 0
+#define GPU_GX_GDSC 1
+
+/* Resets */
+#define GPU_GX_BCR 0
+#define GPU_ACD_BCR 1
+#define GPU_GX_ACD_MISC_BCR 2
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm7150-camcc.h b/include/dt-bindings/clock/qcom,sm7150-camcc.h
new file mode 100644
index 000000000000..ce73ef0fe95d
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm7150-camcc.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024, Danila Tikhonov <danila@jiaxyga.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAMCC_SM7150_H
+#define _DT_BINDINGS_CLK_QCOM_CAMCC_SM7150_H
+
+/* Hardware clocks */
+#define CAMCC_PLL0_OUT_EVEN 0
+#define CAMCC_PLL0_OUT_ODD 1
+#define CAMCC_PLL1_OUT_EVEN 2
+#define CAMCC_PLL2_OUT_EARLY 3
+#define CAMCC_PLL3_OUT_EVEN 4
+#define CAMCC_PLL4_OUT_EVEN 5
+
+/* CAMCC clock registers */
+#define CAMCC_PLL0 6
+#define CAMCC_PLL1 7
+#define CAMCC_PLL2 8
+#define CAMCC_PLL2_OUT_AUX 9
+#define CAMCC_PLL2_OUT_MAIN 10
+#define CAMCC_PLL3 11
+#define CAMCC_PLL4 12
+#define CAMCC_BPS_AHB_CLK 13
+#define CAMCC_BPS_AREG_CLK 14
+#define CAMCC_BPS_AXI_CLK 15
+#define CAMCC_BPS_CLK 16
+#define CAMCC_BPS_CLK_SRC 17
+#define CAMCC_CAMNOC_AXI_CLK 18
+#define CAMCC_CAMNOC_AXI_CLK_SRC 19
+#define CAMCC_CAMNOC_DCD_XO_CLK 20
+#define CAMCC_CCI_0_CLK 21
+#define CAMCC_CCI_0_CLK_SRC 22
+#define CAMCC_CCI_1_CLK 23
+#define CAMCC_CCI_1_CLK_SRC 24
+#define CAMCC_CORE_AHB_CLK 25
+#define CAMCC_CPAS_AHB_CLK 26
+#define CAMCC_CPHY_RX_CLK_SRC 27
+#define CAMCC_CSI0PHYTIMER_CLK 28
+#define CAMCC_CSI0PHYTIMER_CLK_SRC 29
+#define CAMCC_CSI1PHYTIMER_CLK 30
+#define CAMCC_CSI1PHYTIMER_CLK_SRC 31
+#define CAMCC_CSI2PHYTIMER_CLK 32
+#define CAMCC_CSI2PHYTIMER_CLK_SRC 33
+#define CAMCC_CSI3PHYTIMER_CLK 34
+#define CAMCC_CSI3PHYTIMER_CLK_SRC 35
+#define CAMCC_CSIPHY0_CLK 36
+#define CAMCC_CSIPHY1_CLK 37
+#define CAMCC_CSIPHY2_CLK 38
+#define CAMCC_CSIPHY3_CLK 39
+#define CAMCC_FAST_AHB_CLK_SRC 40
+#define CAMCC_FD_CORE_CLK 41
+#define CAMCC_FD_CORE_CLK_SRC 42
+#define CAMCC_FD_CORE_UAR_CLK 43
+#define CAMCC_ICP_AHB_CLK 44
+#define CAMCC_ICP_CLK 45
+#define CAMCC_ICP_CLK_SRC 46
+#define CAMCC_IFE_0_AXI_CLK 47
+#define CAMCC_IFE_0_CLK 48
+#define CAMCC_IFE_0_CLK_SRC 49
+#define CAMCC_IFE_0_CPHY_RX_CLK 50
+#define CAMCC_IFE_0_CSID_CLK 51
+#define CAMCC_IFE_0_CSID_CLK_SRC 52
+#define CAMCC_IFE_0_DSP_CLK 53
+#define CAMCC_IFE_1_AXI_CLK 54
+#define CAMCC_IFE_1_CLK 55
+#define CAMCC_IFE_1_CLK_SRC 56
+#define CAMCC_IFE_1_CPHY_RX_CLK 57
+#define CAMCC_IFE_1_CSID_CLK 58
+#define CAMCC_IFE_1_CSID_CLK_SRC 59
+#define CAMCC_IFE_1_DSP_CLK 60
+#define CAMCC_IFE_LITE_CLK 61
+#define CAMCC_IFE_LITE_CLK_SRC 62
+#define CAMCC_IFE_LITE_CPHY_RX_CLK 63
+#define CAMCC_IFE_LITE_CSID_CLK 64
+#define CAMCC_IFE_LITE_CSID_CLK_SRC 65
+#define CAMCC_IPE_0_AHB_CLK 66
+#define CAMCC_IPE_0_AREG_CLK 67
+#define CAMCC_IPE_0_AXI_CLK 68
+#define CAMCC_IPE_0_CLK 69
+#define CAMCC_IPE_0_CLK_SRC 70
+#define CAMCC_IPE_1_AHB_CLK 71
+#define CAMCC_IPE_1_AREG_CLK 72
+#define CAMCC_IPE_1_AXI_CLK 73
+#define CAMCC_IPE_1_CLK 74
+#define CAMCC_JPEG_CLK 75
+#define CAMCC_JPEG_CLK_SRC 76
+#define CAMCC_LRME_CLK 77
+#define CAMCC_LRME_CLK_SRC 78
+#define CAMCC_MCLK0_CLK 79
+#define CAMCC_MCLK0_CLK_SRC 80
+#define CAMCC_MCLK1_CLK 81
+#define CAMCC_MCLK1_CLK_SRC 82
+#define CAMCC_MCLK2_CLK 83
+#define CAMCC_MCLK2_CLK_SRC 84
+#define CAMCC_MCLK3_CLK 85
+#define CAMCC_MCLK3_CLK_SRC 86
+#define CAMCC_SLEEP_CLK 87
+#define CAMCC_SLEEP_CLK_SRC 88
+#define CAMCC_SLOW_AHB_CLK_SRC 89
+#define CAMCC_XO_CLK_SRC 90
+
+/* CAMCC GDSCRs */
+#define BPS_GDSC 0
+#define IFE_0_GDSC 1
+#define IFE_1_GDSC 2
+#define IPE_0_GDSC 3
+#define IPE_1_GDSC 4
+#define TITAN_TOP_GDSC 5
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm7150-dispcc.h b/include/dt-bindings/clock/qcom,sm7150-dispcc.h
new file mode 100644
index 000000000000..1e4e6432d506
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm7150-dispcc.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024, Danila Tikhonov <danila@jiaxyga.com>
+ * Copyright (c) 2024, David Wronek <david@mainlining.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISPCC_SM7150_H
+#define _DT_BINDINGS_CLK_QCOM_DISPCC_SM7150_H
+
+/* DISPCC clock registers */
+#define DISPCC_PLL0 0
+#define DISPCC_MDSS_AHB_CLK 1
+#define DISPCC_MDSS_AHB_CLK_SRC 2
+#define DISPCC_MDSS_BYTE0_CLK 3
+#define DISPCC_MDSS_BYTE0_CLK_SRC 4
+#define DISPCC_MDSS_BYTE0_DIV_CLK_SRC 5
+#define DISPCC_MDSS_BYTE0_INTF_CLK 6
+#define DISPCC_MDSS_BYTE1_CLK 7
+#define DISPCC_MDSS_BYTE1_CLK_SRC 8
+#define DISPCC_MDSS_BYTE1_DIV_CLK_SRC 9
+#define DISPCC_MDSS_BYTE1_INTF_CLK 10
+#define DISPCC_MDSS_DP_AUX_CLK 11
+#define DISPCC_MDSS_DP_AUX_CLK_SRC 12
+#define DISPCC_MDSS_DP_CRYPTO_CLK 13
+#define DISPCC_MDSS_DP_CRYPTO_CLK_SRC 14
+#define DISPCC_MDSS_DP_LINK_CLK 15
+#define DISPCC_MDSS_DP_LINK_CLK_SRC 16
+#define DISPCC_MDSS_DP_LINK_INTF_CLK 17
+#define DISPCC_MDSS_DP_PIXEL1_CLK 18
+#define DISPCC_MDSS_DP_PIXEL1_CLK_SRC 19
+#define DISPCC_MDSS_DP_PIXEL_CLK 20
+#define DISPCC_MDSS_DP_PIXEL_CLK_SRC 21
+#define DISPCC_MDSS_ESC0_CLK 22
+#define DISPCC_MDSS_ESC0_CLK_SRC 23
+#define DISPCC_MDSS_ESC1_CLK 24
+#define DISPCC_MDSS_ESC1_CLK_SRC 25
+#define DISPCC_MDSS_MDP_CLK 26
+#define DISPCC_MDSS_MDP_CLK_SRC 27
+#define DISPCC_MDSS_MDP_LUT_CLK 28
+#define DISPCC_MDSS_NON_GDSC_AHB_CLK 29
+#define DISPCC_MDSS_PCLK0_CLK 30
+#define DISPCC_MDSS_PCLK0_CLK_SRC 31
+#define DISPCC_MDSS_PCLK1_CLK 32
+#define DISPCC_MDSS_PCLK1_CLK_SRC 33
+#define DISPCC_MDSS_ROT_CLK 34
+#define DISPCC_MDSS_ROT_CLK_SRC 35
+#define DISPCC_MDSS_RSCC_AHB_CLK 36
+#define DISPCC_MDSS_RSCC_VSYNC_CLK 37
+#define DISPCC_MDSS_VSYNC_CLK 38
+#define DISPCC_MDSS_VSYNC_CLK_SRC 39
+#define DISPCC_XO_CLK_SRC 40
+#define DISPCC_SLEEP_CLK 41
+#define DISPCC_SLEEP_CLK_SRC 42
+
+/* DISPCC resets */
+#define DISPCC_MDSS_CORE_BCR 0
+
+/* DISPCC GDSCR */
+#define MDSS_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm7150-gcc.h b/include/dt-bindings/clock/qcom,sm7150-gcc.h
new file mode 100644
index 000000000000..7719ffc86139
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm7150-gcc.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Danila Tikhonov <danila@jiaxyga.com>
+ * Copyright (c) 2023, David Wronek <davidwronek@gmail.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM7150_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SM7150_H
+
+/* GCC clock registers */
+#define GCC_GPLL0_MAIN_DIV_CDIV 0
+#define GPLL0 1
+#define GPLL0_OUT_EVEN 2
+#define GPLL6 3
+#define GPLL7 4
+#define GCC_AGGRE_NOC_PCIE_TBU_CLK 5
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 6
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 7
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 8
+#define GCC_APC_VS_CLK 9
+#define GCC_BOOT_ROM_AHB_CLK 10
+#define GCC_CAMERA_HF_AXI_CLK 11
+#define GCC_CAMERA_SF_AXI_CLK 12
+#define GCC_CE1_AHB_CLK 13
+#define GCC_CE1_AXI_CLK 14
+#define GCC_CE1_CLK 15
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 16
+#define GCC_CPUSS_AHB_CLK 17
+#define GCC_CPUSS_AHB_CLK_SRC 18
+#define GCC_CPUSS_RBCPR_CLK 19
+#define GCC_CPUSS_RBCPR_CLK_SRC 20
+#define GCC_DDRSS_GPU_AXI_CLK 21
+#define GCC_DISP_GPLL0_CLK_SRC 22
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 23
+#define GCC_DISP_HF_AXI_CLK 24
+#define GCC_DISP_SF_AXI_CLK 25
+#define GCC_GP1_CLK 26
+#define GCC_GP1_CLK_SRC 27
+#define GCC_GP2_CLK 28
+#define GCC_GP2_CLK_SRC 29
+#define GCC_GP3_CLK 30
+#define GCC_GP3_CLK_SRC 31
+#define GCC_GPU_GPLL0_CLK_SRC 32
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 33
+#define GCC_GPU_MEMNOC_GFX_CLK 34
+#define GCC_GPU_SNOC_DVM_GFX_CLK 35
+#define GCC_GPU_VS_CLK 36
+#define GCC_NPU_AXI_CLK 37
+#define GCC_NPU_CFG_AHB_CLK 38
+#define GCC_NPU_GPLL0_CLK_SRC 39
+#define GCC_NPU_GPLL0_DIV_CLK_SRC 40
+#define GCC_PCIE_0_AUX_CLK 41
+#define GCC_PCIE_0_AUX_CLK_SRC 42
+#define GCC_PCIE_0_CFG_AHB_CLK 43
+#define GCC_PCIE_0_CLKREF_CLK 44
+#define GCC_PCIE_0_MSTR_AXI_CLK 45
+#define GCC_PCIE_0_PIPE_CLK 46
+#define GCC_PCIE_0_SLV_AXI_CLK 47
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 48
+#define GCC_PCIE_PHY_AUX_CLK 49
+#define GCC_PCIE_PHY_REFGEN_CLK 50
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC 51
+#define GCC_PDM2_CLK 52
+#define GCC_PDM2_CLK_SRC 53
+#define GCC_PDM_AHB_CLK 54
+#define GCC_PDM_XO4_CLK 55
+#define GCC_PRNG_AHB_CLK 56
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 57
+#define GCC_QUPV3_WRAP0_CORE_CLK 58
+#define GCC_QUPV3_WRAP0_S0_CLK 59
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 60
+#define GCC_QUPV3_WRAP0_S1_CLK 61
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 62
+#define GCC_QUPV3_WRAP0_S2_CLK 63
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 64
+#define GCC_QUPV3_WRAP0_S3_CLK 65
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 66
+#define GCC_QUPV3_WRAP0_S4_CLK 67
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 68
+#define GCC_QUPV3_WRAP0_S5_CLK 69
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 70
+#define GCC_QUPV3_WRAP0_S6_CLK 71
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 72
+#define GCC_QUPV3_WRAP0_S7_CLK 73
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 74
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 75
+#define GCC_QUPV3_WRAP1_CORE_CLK 76
+#define GCC_QUPV3_WRAP1_S0_CLK 77
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 78
+#define GCC_QUPV3_WRAP1_S1_CLK 79
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 80
+#define GCC_QUPV3_WRAP1_S2_CLK 81
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 82
+#define GCC_QUPV3_WRAP1_S3_CLK 83
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 84
+#define GCC_QUPV3_WRAP1_S4_CLK 85
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 86
+#define GCC_QUPV3_WRAP1_S5_CLK 87
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 88
+#define GCC_QUPV3_WRAP1_S6_CLK 89
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 90
+#define GCC_QUPV3_WRAP1_S7_CLK 91
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 92
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 93
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 94
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 95
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 96
+#define GCC_SDCC1_AHB_CLK 97
+#define GCC_SDCC1_APPS_CLK 98
+#define GCC_SDCC1_APPS_CLK_SRC 99
+#define GCC_SDCC1_ICE_CORE_CLK 100
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 101
+#define GCC_SDCC2_AHB_CLK 102
+#define GCC_SDCC2_APPS_CLK 103
+#define GCC_SDCC2_APPS_CLK_SRC 104
+#define GCC_SDCC4_AHB_CLK 105
+#define GCC_SDCC4_APPS_CLK 106
+#define GCC_SDCC4_APPS_CLK_SRC 107
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 108
+#define GCC_TSIF_AHB_CLK 109
+#define GCC_TSIF_INACTIVITY_TIMERS_CLK 110
+#define GCC_TSIF_REF_CLK 111
+#define GCC_TSIF_REF_CLK_SRC 112
+#define GCC_UFS_MEM_CLKREF_CLK 113
+#define GCC_UFS_PHY_AHB_CLK 114
+#define GCC_UFS_PHY_AXI_CLK 115
+#define GCC_UFS_PHY_AXI_CLK_SRC 116
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 117
+#define GCC_UFS_PHY_ICE_CORE_CLK 118
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 119
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 120
+#define GCC_UFS_PHY_PHY_AUX_CLK 121
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 122
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 123
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 124
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 125
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 126
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 127
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 128
+#define GCC_USB30_PRIM_MASTER_CLK 129
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 130
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 131
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 132
+#define GCC_USB30_PRIM_SLEEP_CLK 133
+#define GCC_USB3_PRIM_CLKREF_CLK 134
+#define GCC_USB3_PRIM_PHY_AUX_CLK 135
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 136
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 137
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 138
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 139
+#define GCC_VDDA_VS_CLK 140
+#define GCC_VDDCX_VS_CLK 141
+#define GCC_VDDMX_VS_CLK 142
+#define GCC_VIDEO_AXI_CLK 143
+#define GCC_VS_CTRL_AHB_CLK 144
+#define GCC_VS_CTRL_CLK 145
+#define GCC_VS_CTRL_CLK_SRC 146
+#define GCC_VSENSOR_CLK_SRC 147
+
+/* GCC Resets */
+#define GCC_PCIE_0_BCR 0
+#define GCC_PCIE_PHY_BCR 1
+#define GCC_PCIE_PHY_COM_BCR 2
+#define GCC_UFS_PHY_BCR 3
+#define GCC_USB30_PRIM_BCR 4
+#define GCC_USB3_DP_PHY_PRIM_BCR 5
+#define GCC_USB3_DP_PHY_SEC_BCR 6
+#define GCC_USB3_PHY_PRIM_BCR 7
+#define GCC_USB3_PHY_SEC_BCR 8
+#define GCC_QUSB2PHY_PRIM_BCR 9
+#define GCC_VIDEO_AXI_CLK_BCR 10
+
+/* GCC GDSCRs */
+#define PCIE_0_GDSC 0
+#define UFS_PHY_GDSC 1
+#define USB30_PRIM_GDSC 2
+#define HLOS1_VOTE_AGGRE_NOC_MMU_AUDIO_TBU_GDSC 3
+#define HLOS1_VOTE_AGGRE_NOC_MMU_PCIE_TBU_GDSC 4
+#define HLOS1_VOTE_AGGRE_NOC_MMU_TBU1_GDSC 5
+#define HLOS1_VOTE_AGGRE_NOC_MMU_TBU2_GDSC 6
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC 7
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC 8
+#define HLOS1_VOTE_MMNOC_MMU_TBU_SF_GDSC 9
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm7150-videocc.h b/include/dt-bindings/clock/qcom,sm7150-videocc.h
new file mode 100644
index 000000000000..d86e0fbb159a
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm7150-videocc.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024, Danila Tikhonov <danila@jiaxyga.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEOCC_SM7150_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEOCC_SM7150_H
+
+#define VIDEOCC_PLL0 0
+#define VIDEOCC_IRIS_AHB_CLK 1
+#define VIDEOCC_IRIS_CLK_SRC 2
+#define VIDEOCC_MVS0_AXI_CLK 3
+#define VIDEOCC_MVS0_CORE_CLK 4
+#define VIDEOCC_MVS1_AXI_CLK 5
+#define VIDEOCC_MVS1_CORE_CLK 6
+#define VIDEOCC_MVSC_CORE_CLK 7
+#define VIDEOCC_MVSC_CTL_AXI_CLK 8
+#define VIDEOCC_VENUS_AHB_CLK 9
+#define VIDEOCC_XO_CLK 10
+#define VIDEOCC_XO_CLK_SRC 11
+
+/* VIDEOCC GDSCRs */
+#define VENUS_GDSC 0
+#define VCODEC0_GDSC 1
+#define VCODEC1_GDSC 2
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8150-camcc.h b/include/dt-bindings/clock/qcom,sm8150-camcc.h
new file mode 100644
index 000000000000..5444035efa93
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8150-camcc.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8150_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8150_H
+
+/* CAM_CC clocks */
+#define CAM_CC_PLL0 0
+#define CAM_CC_PLL0_OUT_EVEN 1
+#define CAM_CC_PLL0_OUT_ODD 2
+#define CAM_CC_PLL1 3
+#define CAM_CC_PLL1_OUT_EVEN 4
+#define CAM_CC_PLL2 5
+#define CAM_CC_PLL2_OUT_MAIN 6
+#define CAM_CC_PLL3 7
+#define CAM_CC_PLL3_OUT_EVEN 8
+#define CAM_CC_PLL4 9
+#define CAM_CC_PLL4_OUT_EVEN 10
+#define CAM_CC_BPS_AHB_CLK 11
+#define CAM_CC_BPS_AREG_CLK 12
+#define CAM_CC_BPS_AXI_CLK 13
+#define CAM_CC_BPS_CLK 14
+#define CAM_CC_BPS_CLK_SRC 15
+#define CAM_CC_CAMNOC_AXI_CLK 16
+#define CAM_CC_CAMNOC_AXI_CLK_SRC 17
+#define CAM_CC_CAMNOC_DCD_XO_CLK 18
+#define CAM_CC_CCI_0_CLK 19
+#define CAM_CC_CCI_0_CLK_SRC 20
+#define CAM_CC_CCI_1_CLK 21
+#define CAM_CC_CCI_1_CLK_SRC 22
+#define CAM_CC_CORE_AHB_CLK 23
+#define CAM_CC_CPAS_AHB_CLK 24
+#define CAM_CC_CPHY_RX_CLK_SRC 25
+#define CAM_CC_CSI0PHYTIMER_CLK 26
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 27
+#define CAM_CC_CSI1PHYTIMER_CLK 28
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 29
+#define CAM_CC_CSI2PHYTIMER_CLK 30
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 31
+#define CAM_CC_CSI3PHYTIMER_CLK 32
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 33
+#define CAM_CC_CSIPHY0_CLK 34
+#define CAM_CC_CSIPHY1_CLK 35
+#define CAM_CC_CSIPHY2_CLK 36
+#define CAM_CC_CSIPHY3_CLK 37
+#define CAM_CC_FAST_AHB_CLK_SRC 38
+#define CAM_CC_FD_CORE_CLK 39
+#define CAM_CC_FD_CORE_CLK_SRC 40
+#define CAM_CC_FD_CORE_UAR_CLK 41
+#define CAM_CC_GDSC_CLK 42
+#define CAM_CC_ICP_AHB_CLK 43
+#define CAM_CC_ICP_CLK 44
+#define CAM_CC_ICP_CLK_SRC 45
+#define CAM_CC_IFE_0_AXI_CLK 46
+#define CAM_CC_IFE_0_CLK 47
+#define CAM_CC_IFE_0_CLK_SRC 48
+#define CAM_CC_IFE_0_CPHY_RX_CLK 49
+#define CAM_CC_IFE_0_CSID_CLK 50
+#define CAM_CC_IFE_0_CSID_CLK_SRC 51
+#define CAM_CC_IFE_0_DSP_CLK 52
+#define CAM_CC_IFE_1_AXI_CLK 53
+#define CAM_CC_IFE_1_CLK 54
+#define CAM_CC_IFE_1_CLK_SRC 55
+#define CAM_CC_IFE_1_CPHY_RX_CLK 56
+#define CAM_CC_IFE_1_CSID_CLK 57
+#define CAM_CC_IFE_1_CSID_CLK_SRC 58
+#define CAM_CC_IFE_1_DSP_CLK 59
+#define CAM_CC_IFE_LITE_0_CLK 60
+#define CAM_CC_IFE_LITE_0_CLK_SRC 61
+#define CAM_CC_IFE_LITE_0_CPHY_RX_CLK 62
+#define CAM_CC_IFE_LITE_0_CSID_CLK 63
+#define CAM_CC_IFE_LITE_0_CSID_CLK_SRC 64
+#define CAM_CC_IFE_LITE_1_CLK 65
+#define CAM_CC_IFE_LITE_1_CLK_SRC 66
+#define CAM_CC_IFE_LITE_1_CPHY_RX_CLK 67
+#define CAM_CC_IFE_LITE_1_CSID_CLK 68
+#define CAM_CC_IFE_LITE_1_CSID_CLK_SRC 69
+#define CAM_CC_IPE_0_AHB_CLK 70
+#define CAM_CC_IPE_0_AREG_CLK 71
+#define CAM_CC_IPE_0_AXI_CLK 72
+#define CAM_CC_IPE_0_CLK 73
+#define CAM_CC_IPE_0_CLK_SRC 74
+#define CAM_CC_IPE_1_AHB_CLK 75
+#define CAM_CC_IPE_1_AREG_CLK 76
+#define CAM_CC_IPE_1_AXI_CLK 77
+#define CAM_CC_IPE_1_CLK 78
+#define CAM_CC_JPEG_CLK 79
+#define CAM_CC_JPEG_CLK_SRC 80
+#define CAM_CC_LRME_CLK 81
+#define CAM_CC_LRME_CLK_SRC 82
+#define CAM_CC_MCLK0_CLK 83
+#define CAM_CC_MCLK0_CLK_SRC 84
+#define CAM_CC_MCLK1_CLK 85
+#define CAM_CC_MCLK1_CLK_SRC 86
+#define CAM_CC_MCLK2_CLK 87
+#define CAM_CC_MCLK2_CLK_SRC 88
+#define CAM_CC_MCLK3_CLK 89
+#define CAM_CC_MCLK3_CLK_SRC 90
+#define CAM_CC_SLOW_AHB_CLK_SRC 91
+
+/* CAM_CC power domains */
+#define TITAN_TOP_GDSC 0
+#define BPS_GDSC 1
+#define IFE_0_GDSC 2
+#define IFE_1_GDSC 3
+#define IPE_0_GDSC 4
+#define IPE_1_GDSC 5
+
+/* CAM_CC resets */
+#define CAM_CC_BPS_BCR 0
+#define CAM_CC_CAMNOC_BCR 1
+#define CAM_CC_CCI_BCR 2
+#define CAM_CC_CPAS_BCR 3
+#define CAM_CC_CSI0PHY_BCR 4
+#define CAM_CC_CSI1PHY_BCR 5
+#define CAM_CC_CSI2PHY_BCR 6
+#define CAM_CC_CSI3PHY_BCR 7
+#define CAM_CC_FD_BCR 8
+#define CAM_CC_ICP_BCR 9
+#define CAM_CC_IFE_0_BCR 10
+#define CAM_CC_IFE_1_BCR 11
+#define CAM_CC_IFE_LITE_0_BCR 12
+#define CAM_CC_IFE_LITE_1_BCR 13
+#define CAM_CC_IPE_0_BCR 14
+#define CAM_CC_IPE_1_BCR 15
+#define CAM_CC_JPEG_BCR 16
+#define CAM_CC_LRME_BCR 17
+#define CAM_CC_MCLK0_BCR 18
+#define CAM_CC_MCLK1_BCR 19
+#define CAM_CC_MCLK2_BCR 20
+#define CAM_CC_MCLK3_BCR 21
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8350-videocc.h b/include/dt-bindings/clock/qcom,sm8350-videocc.h
new file mode 100644
index 000000000000..b6945a448676
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8350-videocc.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8350_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8350_H
+
+/* Clocks */
+#define VIDEO_CC_AHB_CLK_SRC 0
+#define VIDEO_CC_MVS0_CLK 1
+#define VIDEO_CC_MVS0_CLK_SRC 2
+#define VIDEO_CC_MVS0_DIV_CLK_SRC 3
+#define VIDEO_CC_MVS0C_CLK 4
+#define VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC 5
+#define VIDEO_CC_MVS1_CLK 6
+#define VIDEO_CC_MVS1_CLK_SRC 7
+#define VIDEO_CC_MVS1_DIV2_CLK 8
+#define VIDEO_CC_MVS1_DIV_CLK_SRC 9
+#define VIDEO_CC_MVS1C_CLK 10
+#define VIDEO_CC_MVS1C_DIV2_DIV_CLK_SRC 11
+#define VIDEO_CC_SLEEP_CLK 12
+#define VIDEO_CC_SLEEP_CLK_SRC 13
+#define VIDEO_CC_XO_CLK_SRC 14
+#define VIDEO_PLL0 15
+#define VIDEO_PLL1 16
+
+/* GDSCs */
+#define MVS0C_GDSC 0
+#define MVS1C_GDSC 1
+#define MVS0_GDSC 2
+#define MVS1_GDSC 3
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8450-camcc.h b/include/dt-bindings/clock/qcom,sm8450-camcc.h
new file mode 100644
index 000000000000..7ff67acf301a
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8450-camcc.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8450_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8450_H
+
+/* CAM_CC clocks */
+#define CAM_CC_BPS_AHB_CLK 0
+#define CAM_CC_BPS_CLK 1
+#define CAM_CC_BPS_CLK_SRC 2
+#define CAM_CC_BPS_FAST_AHB_CLK 3
+#define CAM_CC_CAMNOC_AXI_CLK 4
+#define CAM_CC_CAMNOC_AXI_CLK_SRC 5
+#define CAM_CC_CAMNOC_DCD_XO_CLK 6
+#define CAM_CC_CCI_0_CLK 7
+#define CAM_CC_CCI_0_CLK_SRC 8
+#define CAM_CC_CCI_1_CLK 9
+#define CAM_CC_CCI_1_CLK_SRC 10
+#define CAM_CC_CORE_AHB_CLK 11
+#define CAM_CC_CPAS_AHB_CLK 12
+#define CAM_CC_CPAS_BPS_CLK 13
+#define CAM_CC_CPAS_FAST_AHB_CLK 14
+#define CAM_CC_CPAS_IFE_0_CLK 15
+#define CAM_CC_CPAS_IFE_1_CLK 16
+#define CAM_CC_CPAS_IFE_2_CLK 17
+#define CAM_CC_CPAS_IFE_LITE_CLK 18
+#define CAM_CC_CPAS_IPE_NPS_CLK 19
+#define CAM_CC_CPAS_SBI_CLK 20
+#define CAM_CC_CPAS_SFE_0_CLK 21
+#define CAM_CC_CPAS_SFE_1_CLK 22
+#define CAM_CC_CPHY_RX_CLK_SRC 23
+#define CAM_CC_CSI0PHYTIMER_CLK 24
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 25
+#define CAM_CC_CSI1PHYTIMER_CLK 26
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 27
+#define CAM_CC_CSI2PHYTIMER_CLK 28
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 29
+#define CAM_CC_CSI3PHYTIMER_CLK 30
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 31
+#define CAM_CC_CSI4PHYTIMER_CLK 32
+#define CAM_CC_CSI4PHYTIMER_CLK_SRC 33
+#define CAM_CC_CSI5PHYTIMER_CLK 34
+#define CAM_CC_CSI5PHYTIMER_CLK_SRC 35
+#define CAM_CC_CSID_CLK 36
+#define CAM_CC_CSID_CLK_SRC 37
+#define CAM_CC_CSID_CSIPHY_RX_CLK 38
+#define CAM_CC_CSIPHY0_CLK 39
+#define CAM_CC_CSIPHY1_CLK 40
+#define CAM_CC_CSIPHY2_CLK 41
+#define CAM_CC_CSIPHY3_CLK 42
+#define CAM_CC_CSIPHY4_CLK 43
+#define CAM_CC_CSIPHY5_CLK 44
+#define CAM_CC_FAST_AHB_CLK_SRC 45
+#define CAM_CC_GDSC_CLK 46
+#define CAM_CC_ICP_AHB_CLK 47
+#define CAM_CC_ICP_CLK 48
+#define CAM_CC_ICP_CLK_SRC 49
+#define CAM_CC_IFE_0_CLK 50
+#define CAM_CC_IFE_0_CLK_SRC 51
+#define CAM_CC_IFE_0_DSP_CLK 52
+#define CAM_CC_IFE_0_FAST_AHB_CLK 53
+#define CAM_CC_IFE_1_CLK 54
+#define CAM_CC_IFE_1_CLK_SRC 55
+#define CAM_CC_IFE_1_DSP_CLK 56
+#define CAM_CC_IFE_1_FAST_AHB_CLK 57
+#define CAM_CC_IFE_2_CLK 58
+#define CAM_CC_IFE_2_CLK_SRC 59
+#define CAM_CC_IFE_2_DSP_CLK 60
+#define CAM_CC_IFE_2_FAST_AHB_CLK 61
+#define CAM_CC_IFE_LITE_AHB_CLK 62
+#define CAM_CC_IFE_LITE_CLK 63
+#define CAM_CC_IFE_LITE_CLK_SRC 64
+#define CAM_CC_IFE_LITE_CPHY_RX_CLK 65
+#define CAM_CC_IFE_LITE_CSID_CLK 66
+#define CAM_CC_IFE_LITE_CSID_CLK_SRC 67
+#define CAM_CC_IPE_NPS_AHB_CLK 68
+#define CAM_CC_IPE_NPS_CLK 69
+#define CAM_CC_IPE_NPS_CLK_SRC 70
+#define CAM_CC_IPE_NPS_FAST_AHB_CLK 71
+#define CAM_CC_IPE_PPS_CLK 72
+#define CAM_CC_IPE_PPS_FAST_AHB_CLK 73
+#define CAM_CC_JPEG_CLK 74
+#define CAM_CC_JPEG_CLK_SRC 75
+#define CAM_CC_MCLK0_CLK 76
+#define CAM_CC_MCLK0_CLK_SRC 77
+#define CAM_CC_MCLK1_CLK 78
+#define CAM_CC_MCLK1_CLK_SRC 79
+#define CAM_CC_MCLK2_CLK 80
+#define CAM_CC_MCLK2_CLK_SRC 81
+#define CAM_CC_MCLK3_CLK 82
+#define CAM_CC_MCLK3_CLK_SRC 83
+#define CAM_CC_MCLK4_CLK 84
+#define CAM_CC_MCLK4_CLK_SRC 85
+#define CAM_CC_MCLK5_CLK 86
+#define CAM_CC_MCLK5_CLK_SRC 87
+#define CAM_CC_MCLK6_CLK 88
+#define CAM_CC_MCLK6_CLK_SRC 89
+#define CAM_CC_MCLK7_CLK 90
+#define CAM_CC_MCLK7_CLK_SRC 91
+#define CAM_CC_PLL0 92
+#define CAM_CC_PLL0_OUT_EVEN 93
+#define CAM_CC_PLL0_OUT_ODD 94
+#define CAM_CC_PLL1 95
+#define CAM_CC_PLL1_OUT_EVEN 96
+#define CAM_CC_PLL2 97
+#define CAM_CC_PLL3 98
+#define CAM_CC_PLL3_OUT_EVEN 99
+#define CAM_CC_PLL4 100
+#define CAM_CC_PLL4_OUT_EVEN 101
+#define CAM_CC_PLL5 102
+#define CAM_CC_PLL5_OUT_EVEN 103
+#define CAM_CC_PLL6 104
+#define CAM_CC_PLL6_OUT_EVEN 105
+#define CAM_CC_PLL7 106
+#define CAM_CC_PLL7_OUT_EVEN 107
+#define CAM_CC_PLL8 108
+#define CAM_CC_PLL8_OUT_EVEN 109
+#define CAM_CC_QDSS_DEBUG_CLK 110
+#define CAM_CC_QDSS_DEBUG_CLK_SRC 111
+#define CAM_CC_QDSS_DEBUG_XO_CLK 112
+#define CAM_CC_SBI_AHB_CLK 113
+#define CAM_CC_SBI_CLK 114
+#define CAM_CC_SFE_0_CLK 115
+#define CAM_CC_SFE_0_CLK_SRC 116
+#define CAM_CC_SFE_0_FAST_AHB_CLK 117
+#define CAM_CC_SFE_1_CLK 118
+#define CAM_CC_SFE_1_CLK_SRC 119
+#define CAM_CC_SFE_1_FAST_AHB_CLK 120
+#define CAM_CC_SLEEP_CLK 121
+#define CAM_CC_SLEEP_CLK_SRC 122
+#define CAM_CC_SLOW_AHB_CLK_SRC 123
+#define CAM_CC_XO_CLK_SRC 124
+
+/* CAM_CC resets */
+#define CAM_CC_BPS_BCR 0
+#define CAM_CC_ICP_BCR 1
+#define CAM_CC_IFE_0_BCR 2
+#define CAM_CC_IFE_1_BCR 3
+#define CAM_CC_IFE_2_BCR 4
+#define CAM_CC_IPE_0_BCR 5
+#define CAM_CC_QDSS_DEBUG_BCR 6
+#define CAM_CC_SBI_BCR 7
+#define CAM_CC_SFE_0_BCR 8
+#define CAM_CC_SFE_1_BCR 9
+
+/* CAM_CC GDSCRs */
+#define BPS_GDSC 0
+#define IPE_0_GDSC 1
+#define SBI_GDSC 2
+#define IFE_0_GDSC 3
+#define IFE_1_GDSC 4
+#define IFE_2_GDSC 5
+#define SFE_0_GDSC 6
+#define SFE_1_GDSC 7
+#define TITAN_TOP_GDSC 8
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8450-dispcc.h b/include/dt-bindings/clock/qcom,sm8450-dispcc.h
new file mode 100644
index 000000000000..fd16ca894971
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8450-dispcc.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SM8450_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SM8450_H
+
+/* DISP_CC clocks */
+#define DISP_CC_MDSS_AHB1_CLK 0
+#define DISP_CC_MDSS_AHB_CLK 1
+#define DISP_CC_MDSS_AHB_CLK_SRC 2
+#define DISP_CC_MDSS_BYTE0_CLK 3
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 4
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 5
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 6
+#define DISP_CC_MDSS_BYTE1_CLK 7
+#define DISP_CC_MDSS_BYTE1_CLK_SRC 8
+#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 9
+#define DISP_CC_MDSS_BYTE1_INTF_CLK 10
+#define DISP_CC_MDSS_DPTX0_AUX_CLK 11
+#define DISP_CC_MDSS_DPTX0_AUX_CLK_SRC 12
+#define DISP_CC_MDSS_DPTX0_CRYPTO_CLK 13
+#define DISP_CC_MDSS_DPTX0_LINK_CLK 14
+#define DISP_CC_MDSS_DPTX0_LINK_CLK_SRC 15
+#define DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC 16
+#define DISP_CC_MDSS_DPTX0_LINK_INTF_CLK 17
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK 18
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC 19
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK 20
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC 21
+#define DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK 22
+#define DISP_CC_MDSS_DPTX1_AUX_CLK 23
+#define DISP_CC_MDSS_DPTX1_AUX_CLK_SRC 24
+#define DISP_CC_MDSS_DPTX1_CRYPTO_CLK 25
+#define DISP_CC_MDSS_DPTX1_LINK_CLK 26
+#define DISP_CC_MDSS_DPTX1_LINK_CLK_SRC 27
+#define DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC 28
+#define DISP_CC_MDSS_DPTX1_LINK_INTF_CLK 29
+#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK 30
+#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC 31
+#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK 32
+#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC 33
+#define DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK 34
+#define DISP_CC_MDSS_DPTX2_AUX_CLK 35
+#define DISP_CC_MDSS_DPTX2_AUX_CLK_SRC 36
+#define DISP_CC_MDSS_DPTX2_CRYPTO_CLK 37
+#define DISP_CC_MDSS_DPTX2_LINK_CLK 38
+#define DISP_CC_MDSS_DPTX2_LINK_CLK_SRC 39
+#define DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC 40
+#define DISP_CC_MDSS_DPTX2_LINK_INTF_CLK 41
+#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK 42
+#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC 43
+#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK 44
+#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC 45
+#define DISP_CC_MDSS_DPTX3_AUX_CLK 46
+#define DISP_CC_MDSS_DPTX3_AUX_CLK_SRC 47
+#define DISP_CC_MDSS_DPTX3_CRYPTO_CLK 48
+#define DISP_CC_MDSS_DPTX3_LINK_CLK 49
+#define DISP_CC_MDSS_DPTX3_LINK_CLK_SRC 50
+#define DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC 51
+#define DISP_CC_MDSS_DPTX3_LINK_INTF_CLK 52
+#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK 53
+#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC 54
+#define DISP_CC_MDSS_ESC0_CLK 55
+#define DISP_CC_MDSS_ESC0_CLK_SRC 56
+#define DISP_CC_MDSS_ESC1_CLK 57
+#define DISP_CC_MDSS_ESC1_CLK_SRC 58
+#define DISP_CC_MDSS_MDP1_CLK 59
+#define DISP_CC_MDSS_MDP_CLK 60
+#define DISP_CC_MDSS_MDP_CLK_SRC 61
+#define DISP_CC_MDSS_MDP_LUT1_CLK 62
+#define DISP_CC_MDSS_MDP_LUT_CLK 63
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 64
+#define DISP_CC_MDSS_PCLK0_CLK 65
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 66
+#define DISP_CC_MDSS_PCLK1_CLK 67
+#define DISP_CC_MDSS_PCLK1_CLK_SRC 68
+#define DISP_CC_MDSS_ROT1_CLK 69
+#define DISP_CC_MDSS_ROT_CLK 70
+#define DISP_CC_MDSS_ROT_CLK_SRC 71
+#define DISP_CC_MDSS_RSCC_AHB_CLK 72
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 73
+#define DISP_CC_MDSS_VSYNC1_CLK 74
+#define DISP_CC_MDSS_VSYNC_CLK 75
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 76
+#define DISP_CC_PLL0 77
+#define DISP_CC_PLL1 78
+#define DISP_CC_SLEEP_CLK 79
+#define DISP_CC_SLEEP_CLK_SRC 80
+#define DISP_CC_XO_CLK 81
+#define DISP_CC_XO_CLK_SRC 82
+
+/* DISP_CC resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_CORE_INT2_BCR 1
+#define DISP_CC_MDSS_RSCC_BCR 2
+
+/* DISP_CC GDSCR */
+#define MDSS_GDSC 0
+#define MDSS_INT2_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8450-gpucc.h b/include/dt-bindings/clock/qcom,sm8450-gpucc.h
new file mode 100644
index 000000000000..712b171503d6
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8450-gpucc.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8450_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8450_H
+
+/* Clocks */
+#define GPU_CC_AHB_CLK 0
+#define GPU_CC_CRC_AHB_CLK 1
+#define GPU_CC_CX_APB_CLK 2
+#define GPU_CC_CX_FF_CLK 3
+#define GPU_CC_CX_GMU_CLK 4
+#define GPU_CC_CX_SNOC_DVM_CLK 5
+#define GPU_CC_CXO_AON_CLK 6
+#define GPU_CC_CXO_CLK 7
+#define GPU_CC_DEMET_CLK 8
+#define GPU_CC_DEMET_DIV_CLK_SRC 9
+#define GPU_CC_FF_CLK_SRC 10
+#define GPU_CC_FREQ_MEASURE_CLK 11
+#define GPU_CC_GMU_CLK_SRC 12
+#define GPU_CC_GX_FF_CLK 13
+#define GPU_CC_GX_GFX3D_CLK 14
+#define GPU_CC_GX_GFX3D_RDVM_CLK 15
+#define GPU_CC_GX_GMU_CLK 16
+#define GPU_CC_GX_VSENSE_CLK 17
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 18
+#define GPU_CC_HUB_AHB_DIV_CLK_SRC 19
+#define GPU_CC_HUB_AON_CLK 20
+#define GPU_CC_HUB_CLK_SRC 21
+#define GPU_CC_HUB_CX_INT_CLK 22
+#define GPU_CC_HUB_CX_INT_DIV_CLK_SRC 23
+#define GPU_CC_MEMNOC_GFX_CLK 24
+#define GPU_CC_MND1X_0_GFX3D_CLK 25
+#define GPU_CC_MND1X_1_GFX3D_CLK 26
+#define GPU_CC_PLL0 27
+#define GPU_CC_PLL1 28
+#define GPU_CC_SLEEP_CLK 29
+#define GPU_CC_XO_CLK_SRC 30
+#define GPU_CC_XO_DIV_CLK_SRC 31
+
+/* GDSCs */
+#define GPU_GX_GDSC 0
+#define GPU_CX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8450-videocc.h b/include/dt-bindings/clock/qcom,sm8450-videocc.h
new file mode 100644
index 000000000000..9d795adfe4eb
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8450-videocc.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8450_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8450_H
+
+/* VIDEO_CC clocks */
+#define VIDEO_CC_MVS0_CLK 0
+#define VIDEO_CC_MVS0_CLK_SRC 1
+#define VIDEO_CC_MVS0_DIV_CLK_SRC 2
+#define VIDEO_CC_MVS0C_CLK 3
+#define VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC 4
+#define VIDEO_CC_MVS1_CLK 5
+#define VIDEO_CC_MVS1_CLK_SRC 6
+#define VIDEO_CC_MVS1_DIV_CLK_SRC 7
+#define VIDEO_CC_MVS1C_CLK 8
+#define VIDEO_CC_MVS1C_DIV2_DIV_CLK_SRC 9
+#define VIDEO_CC_PLL0 10
+#define VIDEO_CC_PLL1 11
+
+/* VIDEO_CC power domains */
+#define VIDEO_CC_MVS0C_GDSC 0
+#define VIDEO_CC_MVS0_GDSC 1
+#define VIDEO_CC_MVS1C_GDSC 2
+#define VIDEO_CC_MVS1_GDSC 3
+
+/* VIDEO_CC resets */
+#define CVP_VIDEO_CC_INTERFACE_BCR 0
+#define CVP_VIDEO_CC_MVS0_BCR 1
+#define CVP_VIDEO_CC_MVS0C_BCR 2
+#define CVP_VIDEO_CC_MVS1_BCR 3
+#define CVP_VIDEO_CC_MVS1C_BCR 4
+#define VIDEO_CC_MVS0C_CLK_ARES 5
+#define VIDEO_CC_MVS1C_CLK_ARES 6
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8550-camcc.h b/include/dt-bindings/clock/qcom,sm8550-camcc.h
new file mode 100644
index 000000000000..a2a256691c2b
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8550-camcc.h
@@ -0,0 +1,187 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8550_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8550_H
+
+/* CAM_CC clocks */
+#define CAM_CC_BPS_AHB_CLK 0
+#define CAM_CC_BPS_CLK 1
+#define CAM_CC_BPS_CLK_SRC 2
+#define CAM_CC_BPS_FAST_AHB_CLK 3
+#define CAM_CC_CAMNOC_AXI_CLK 4
+#define CAM_CC_CAMNOC_AXI_CLK_SRC 5
+#define CAM_CC_CAMNOC_DCD_XO_CLK 6
+#define CAM_CC_CAMNOC_XO_CLK 7
+#define CAM_CC_CCI_0_CLK 8
+#define CAM_CC_CCI_0_CLK_SRC 9
+#define CAM_CC_CCI_1_CLK 10
+#define CAM_CC_CCI_1_CLK_SRC 11
+#define CAM_CC_CCI_2_CLK 12
+#define CAM_CC_CCI_2_CLK_SRC 13
+#define CAM_CC_CORE_AHB_CLK 14
+#define CAM_CC_CPAS_AHB_CLK 15
+#define CAM_CC_CPAS_BPS_CLK 16
+#define CAM_CC_CPAS_CRE_CLK 17
+#define CAM_CC_CPAS_FAST_AHB_CLK 18
+#define CAM_CC_CPAS_IFE_0_CLK 19
+#define CAM_CC_CPAS_IFE_1_CLK 20
+#define CAM_CC_CPAS_IFE_2_CLK 21
+#define CAM_CC_CPAS_IFE_LITE_CLK 22
+#define CAM_CC_CPAS_IPE_NPS_CLK 23
+#define CAM_CC_CPAS_SBI_CLK 24
+#define CAM_CC_CPAS_SFE_0_CLK 25
+#define CAM_CC_CPAS_SFE_1_CLK 26
+#define CAM_CC_CPHY_RX_CLK_SRC 27
+#define CAM_CC_CRE_AHB_CLK 28
+#define CAM_CC_CRE_CLK 29
+#define CAM_CC_CRE_CLK_SRC 30
+#define CAM_CC_CSI0PHYTIMER_CLK 31
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 32
+#define CAM_CC_CSI1PHYTIMER_CLK 33
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 34
+#define CAM_CC_CSI2PHYTIMER_CLK 35
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 36
+#define CAM_CC_CSI3PHYTIMER_CLK 37
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 38
+#define CAM_CC_CSI4PHYTIMER_CLK 39
+#define CAM_CC_CSI4PHYTIMER_CLK_SRC 40
+#define CAM_CC_CSI5PHYTIMER_CLK 41
+#define CAM_CC_CSI5PHYTIMER_CLK_SRC 42
+#define CAM_CC_CSI6PHYTIMER_CLK 43
+#define CAM_CC_CSI6PHYTIMER_CLK_SRC 44
+#define CAM_CC_CSI7PHYTIMER_CLK 45
+#define CAM_CC_CSI7PHYTIMER_CLK_SRC 46
+#define CAM_CC_CSID_CLK 47
+#define CAM_CC_CSID_CLK_SRC 48
+#define CAM_CC_CSID_CSIPHY_RX_CLK 49
+#define CAM_CC_CSIPHY0_CLK 50
+#define CAM_CC_CSIPHY1_CLK 51
+#define CAM_CC_CSIPHY2_CLK 52
+#define CAM_CC_CSIPHY3_CLK 53
+#define CAM_CC_CSIPHY4_CLK 54
+#define CAM_CC_CSIPHY5_CLK 55
+#define CAM_CC_CSIPHY6_CLK 56
+#define CAM_CC_CSIPHY7_CLK 57
+#define CAM_CC_DRV_AHB_CLK 58
+#define CAM_CC_DRV_XO_CLK 59
+#define CAM_CC_FAST_AHB_CLK_SRC 60
+#define CAM_CC_GDSC_CLK 61
+#define CAM_CC_ICP_AHB_CLK 62
+#define CAM_CC_ICP_CLK 63
+#define CAM_CC_ICP_CLK_SRC 64
+#define CAM_CC_IFE_0_CLK 65
+#define CAM_CC_IFE_0_CLK_SRC 66
+#define CAM_CC_IFE_0_DSP_CLK 67
+#define CAM_CC_IFE_0_DSP_CLK_SRC 68
+#define CAM_CC_IFE_0_FAST_AHB_CLK 69
+#define CAM_CC_IFE_1_CLK 70
+#define CAM_CC_IFE_1_CLK_SRC 71
+#define CAM_CC_IFE_1_DSP_CLK 72
+#define CAM_CC_IFE_1_DSP_CLK_SRC 73
+#define CAM_CC_IFE_1_FAST_AHB_CLK 74
+#define CAM_CC_IFE_2_CLK 75
+#define CAM_CC_IFE_2_CLK_SRC 76
+#define CAM_CC_IFE_2_DSP_CLK 77
+#define CAM_CC_IFE_2_DSP_CLK_SRC 78
+#define CAM_CC_IFE_2_FAST_AHB_CLK 79
+#define CAM_CC_IFE_LITE_AHB_CLK 80
+#define CAM_CC_IFE_LITE_CLK 81
+#define CAM_CC_IFE_LITE_CLK_SRC 82
+#define CAM_CC_IFE_LITE_CPHY_RX_CLK 83
+#define CAM_CC_IFE_LITE_CSID_CLK 84
+#define CAM_CC_IFE_LITE_CSID_CLK_SRC 85
+#define CAM_CC_IPE_NPS_AHB_CLK 86
+#define CAM_CC_IPE_NPS_CLK 87
+#define CAM_CC_IPE_NPS_CLK_SRC 88
+#define CAM_CC_IPE_NPS_FAST_AHB_CLK 89
+#define CAM_CC_IPE_PPS_CLK 90
+#define CAM_CC_IPE_PPS_FAST_AHB_CLK 91
+#define CAM_CC_JPEG_1_CLK 92
+#define CAM_CC_JPEG_CLK 93
+#define CAM_CC_JPEG_CLK_SRC 94
+#define CAM_CC_MCLK0_CLK 95
+#define CAM_CC_MCLK0_CLK_SRC 96
+#define CAM_CC_MCLK1_CLK 97
+#define CAM_CC_MCLK1_CLK_SRC 98
+#define CAM_CC_MCLK2_CLK 99
+#define CAM_CC_MCLK2_CLK_SRC 100
+#define CAM_CC_MCLK3_CLK 101
+#define CAM_CC_MCLK3_CLK_SRC 102
+#define CAM_CC_MCLK4_CLK 103
+#define CAM_CC_MCLK4_CLK_SRC 104
+#define CAM_CC_MCLK5_CLK 105
+#define CAM_CC_MCLK5_CLK_SRC 106
+#define CAM_CC_MCLK6_CLK 107
+#define CAM_CC_MCLK6_CLK_SRC 108
+#define CAM_CC_MCLK7_CLK 109
+#define CAM_CC_MCLK7_CLK_SRC 110
+#define CAM_CC_PLL0 111
+#define CAM_CC_PLL0_OUT_EVEN 112
+#define CAM_CC_PLL0_OUT_ODD 113
+#define CAM_CC_PLL1 114
+#define CAM_CC_PLL1_OUT_EVEN 115
+#define CAM_CC_PLL2 116
+#define CAM_CC_PLL3 117
+#define CAM_CC_PLL3_OUT_EVEN 118
+#define CAM_CC_PLL4 119
+#define CAM_CC_PLL4_OUT_EVEN 120
+#define CAM_CC_PLL5 121
+#define CAM_CC_PLL5_OUT_EVEN 122
+#define CAM_CC_PLL6 123
+#define CAM_CC_PLL6_OUT_EVEN 124
+#define CAM_CC_PLL7 125
+#define CAM_CC_PLL7_OUT_EVEN 126
+#define CAM_CC_PLL8 127
+#define CAM_CC_PLL8_OUT_EVEN 128
+#define CAM_CC_PLL9 129
+#define CAM_CC_PLL9_OUT_EVEN 130
+#define CAM_CC_PLL10 131
+#define CAM_CC_PLL10_OUT_EVEN 132
+#define CAM_CC_PLL11 133
+#define CAM_CC_PLL11_OUT_EVEN 134
+#define CAM_CC_PLL12 135
+#define CAM_CC_PLL12_OUT_EVEN 136
+#define CAM_CC_QDSS_DEBUG_CLK 137
+#define CAM_CC_QDSS_DEBUG_CLK_SRC 138
+#define CAM_CC_QDSS_DEBUG_XO_CLK 139
+#define CAM_CC_SBI_CLK 140
+#define CAM_CC_SBI_FAST_AHB_CLK 141
+#define CAM_CC_SFE_0_CLK 142
+#define CAM_CC_SFE_0_CLK_SRC 143
+#define CAM_CC_SFE_0_FAST_AHB_CLK 144
+#define CAM_CC_SFE_1_CLK 145
+#define CAM_CC_SFE_1_CLK_SRC 146
+#define CAM_CC_SFE_1_FAST_AHB_CLK 147
+#define CAM_CC_SLEEP_CLK 148
+#define CAM_CC_SLEEP_CLK_SRC 149
+#define CAM_CC_SLOW_AHB_CLK_SRC 150
+#define CAM_CC_XO_CLK_SRC 151
+
+/* CAM_CC power domains */
+#define CAM_CC_BPS_GDSC 0
+#define CAM_CC_IFE_0_GDSC 1
+#define CAM_CC_IFE_1_GDSC 2
+#define CAM_CC_IFE_2_GDSC 3
+#define CAM_CC_IPE_0_GDSC 4
+#define CAM_CC_SBI_GDSC 5
+#define CAM_CC_SFE_0_GDSC 6
+#define CAM_CC_SFE_1_GDSC 7
+#define CAM_CC_TITAN_TOP_GDSC 8
+
+/* CAM_CC resets */
+#define CAM_CC_BPS_BCR 0
+#define CAM_CC_DRV_BCR 1
+#define CAM_CC_ICP_BCR 2
+#define CAM_CC_IFE_0_BCR 3
+#define CAM_CC_IFE_1_BCR 4
+#define CAM_CC_IFE_2_BCR 5
+#define CAM_CC_IPE_0_BCR 6
+#define CAM_CC_QDSS_DEBUG_BCR 7
+#define CAM_CC_SBI_BCR 8
+#define CAM_CC_SFE_0_BCR 9
+#define CAM_CC_SFE_1_BCR 10
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8550-dispcc.h b/include/dt-bindings/clock/qcom,sm8550-dispcc.h
new file mode 100644
index 000000000000..ed3094c694e0
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8550-dispcc.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_SM8550_DISP_CC_H
+#define _DT_BINDINGS_CLK_QCOM_SM8550_DISP_CC_H
+
+/* DISP_CC clocks */
+#define DISP_CC_MDSS_ACCU_CLK 0
+#define DISP_CC_MDSS_AHB1_CLK 1
+#define DISP_CC_MDSS_AHB_CLK 2
+#define DISP_CC_MDSS_AHB_CLK_SRC 3
+#define DISP_CC_MDSS_BYTE0_CLK 4
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 5
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 6
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 7
+#define DISP_CC_MDSS_BYTE1_CLK 8
+#define DISP_CC_MDSS_BYTE1_CLK_SRC 9
+#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 10
+#define DISP_CC_MDSS_BYTE1_INTF_CLK 11
+#define DISP_CC_MDSS_DPTX0_AUX_CLK 12
+#define DISP_CC_MDSS_DPTX0_AUX_CLK_SRC 13
+#define DISP_CC_MDSS_DPTX0_CRYPTO_CLK 14
+#define DISP_CC_MDSS_DPTX0_LINK_CLK 15
+#define DISP_CC_MDSS_DPTX0_LINK_CLK_SRC 16
+#define DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC 17
+#define DISP_CC_MDSS_DPTX0_LINK_INTF_CLK 18
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK 19
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC 20
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK 21
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC 22
+#define DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK 23
+#define DISP_CC_MDSS_DPTX1_AUX_CLK 24
+#define DISP_CC_MDSS_DPTX1_AUX_CLK_SRC 25
+#define DISP_CC_MDSS_DPTX1_CRYPTO_CLK 26
+#define DISP_CC_MDSS_DPTX1_LINK_CLK 27
+#define DISP_CC_MDSS_DPTX1_LINK_CLK_SRC 28
+#define DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC 29
+#define DISP_CC_MDSS_DPTX1_LINK_INTF_CLK 30
+#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK 31
+#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC 32
+#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK 33
+#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC 34
+#define DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK 35
+#define DISP_CC_MDSS_DPTX2_AUX_CLK 36
+#define DISP_CC_MDSS_DPTX2_AUX_CLK_SRC 37
+#define DISP_CC_MDSS_DPTX2_CRYPTO_CLK 38
+#define DISP_CC_MDSS_DPTX2_LINK_CLK 39
+#define DISP_CC_MDSS_DPTX2_LINK_CLK_SRC 40
+#define DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC 41
+#define DISP_CC_MDSS_DPTX2_LINK_INTF_CLK 42
+#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK 43
+#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC 44
+#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK 45
+#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC 46
+#define DISP_CC_MDSS_DPTX3_AUX_CLK 47
+#define DISP_CC_MDSS_DPTX3_AUX_CLK_SRC 48
+#define DISP_CC_MDSS_DPTX3_CRYPTO_CLK 49
+#define DISP_CC_MDSS_DPTX3_LINK_CLK 50
+#define DISP_CC_MDSS_DPTX3_LINK_CLK_SRC 51
+#define DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC 52
+#define DISP_CC_MDSS_DPTX3_LINK_INTF_CLK 53
+#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK 54
+#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC 55
+#define DISP_CC_MDSS_ESC0_CLK 56
+#define DISP_CC_MDSS_ESC0_CLK_SRC 57
+#define DISP_CC_MDSS_ESC1_CLK 58
+#define DISP_CC_MDSS_ESC1_CLK_SRC 59
+#define DISP_CC_MDSS_MDP1_CLK 60
+#define DISP_CC_MDSS_MDP_CLK 61
+#define DISP_CC_MDSS_MDP_CLK_SRC 62
+#define DISP_CC_MDSS_MDP_LUT1_CLK 63
+#define DISP_CC_MDSS_MDP_LUT_CLK 64
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 65
+#define DISP_CC_MDSS_PCLK0_CLK 66
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 67
+#define DISP_CC_MDSS_PCLK1_CLK 68
+#define DISP_CC_MDSS_PCLK1_CLK_SRC 69
+#define DISP_CC_MDSS_RSCC_AHB_CLK 70
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 71
+#define DISP_CC_MDSS_VSYNC1_CLK 72
+#define DISP_CC_MDSS_VSYNC_CLK 73
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 74
+#define DISP_CC_PLL0 75
+#define DISP_CC_PLL1 76
+#define DISP_CC_SLEEP_CLK 77
+#define DISP_CC_SLEEP_CLK_SRC 78
+#define DISP_CC_XO_CLK 79
+#define DISP_CC_XO_CLK_SRC 80
+
+/* DISP_CC resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_CORE_INT2_BCR 1
+#define DISP_CC_MDSS_RSCC_BCR 2
+
+/* DISP_CC GDSCR */
+#define MDSS_GDSC 0
+#define MDSS_INT2_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8550-gcc.h b/include/dt-bindings/clock/qcom,sm8550-gcc.h
new file mode 100644
index 000000000000..3bf6f2b75c99
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8550-gcc.h
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM8550_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SM8550_H
+
+/* GCC clocks */
+#define GCC_AGGRE_NOC_PCIE_AXI_CLK 0
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 1
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 2
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 3
+#define GCC_AHB2PHY_0_CLK 4
+#define GCC_BOOT_ROM_AHB_CLK 5
+#define GCC_CAMERA_AHB_CLK 6
+#define GCC_CAMERA_HF_AXI_CLK 7
+#define GCC_CAMERA_SF_AXI_CLK 8
+#define GCC_CAMERA_XO_CLK 9
+#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 10
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 11
+#define GCC_CNOC_PCIE_SF_AXI_CLK 12
+#define GCC_DDRSS_GPU_AXI_CLK 13
+#define GCC_DDRSS_PCIE_SF_QTB_CLK 14
+#define GCC_DISP_AHB_CLK 15
+#define GCC_DISP_HF_AXI_CLK 16
+#define GCC_DISP_XO_CLK 17
+#define GCC_GP1_CLK 18
+#define GCC_GP1_CLK_SRC 19
+#define GCC_GP2_CLK 20
+#define GCC_GP2_CLK_SRC 21
+#define GCC_GP3_CLK 22
+#define GCC_GP3_CLK_SRC 23
+#define GCC_GPLL0 24
+#define GCC_GPLL0_OUT_EVEN 25
+#define GCC_GPLL4 26
+#define GCC_GPLL7 27
+#define GCC_GPLL9 28
+#define GCC_GPU_CFG_AHB_CLK 29
+#define GCC_GPU_GPLL0_CLK_SRC 30
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 31
+#define GCC_GPU_MEMNOC_GFX_CLK 32
+#define GCC_GPU_SNOC_DVM_GFX_CLK 33
+#define GCC_PCIE_0_AUX_CLK 34
+#define GCC_PCIE_0_AUX_CLK_SRC 35
+#define GCC_PCIE_0_CFG_AHB_CLK 36
+#define GCC_PCIE_0_MSTR_AXI_CLK 37
+#define GCC_PCIE_0_PHY_RCHNG_CLK 38
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 39
+#define GCC_PCIE_0_PIPE_CLK 40
+#define GCC_PCIE_0_PIPE_CLK_SRC 41
+#define GCC_PCIE_0_SLV_AXI_CLK 42
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 43
+#define GCC_PCIE_1_AUX_CLK 44
+#define GCC_PCIE_1_AUX_CLK_SRC 45
+#define GCC_PCIE_1_CFG_AHB_CLK 46
+#define GCC_PCIE_1_MSTR_AXI_CLK 47
+#define GCC_PCIE_1_PHY_AUX_CLK 48
+#define GCC_PCIE_1_PHY_AUX_CLK_SRC 49
+#define GCC_PCIE_1_PHY_RCHNG_CLK 50
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 51
+#define GCC_PCIE_1_PIPE_CLK 52
+#define GCC_PCIE_1_PIPE_CLK_SRC 53
+#define GCC_PCIE_1_SLV_AXI_CLK 54
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 55
+#define GCC_PDM2_CLK 56
+#define GCC_PDM2_CLK_SRC 57
+#define GCC_PDM_AHB_CLK 58
+#define GCC_PDM_XO4_CLK 59
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 60
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 61
+#define GCC_QMIP_DISP_AHB_CLK 62
+#define GCC_QMIP_GPU_AHB_CLK 63
+#define GCC_QMIP_PCIE_AHB_CLK 64
+#define GCC_QMIP_VIDEO_CV_CPU_AHB_CLK 65
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 66
+#define GCC_QMIP_VIDEO_V_CPU_AHB_CLK 67
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 68
+#define GCC_QUPV3_I2C_CORE_CLK 69
+#define GCC_QUPV3_I2C_S0_CLK 70
+#define GCC_QUPV3_I2C_S0_CLK_SRC 71
+#define GCC_QUPV3_I2C_S1_CLK 72
+#define GCC_QUPV3_I2C_S1_CLK_SRC 73
+#define GCC_QUPV3_I2C_S2_CLK 74
+#define GCC_QUPV3_I2C_S2_CLK_SRC 75
+#define GCC_QUPV3_I2C_S3_CLK 76
+#define GCC_QUPV3_I2C_S3_CLK_SRC 77
+#define GCC_QUPV3_I2C_S4_CLK 78
+#define GCC_QUPV3_I2C_S4_CLK_SRC 79
+#define GCC_QUPV3_I2C_S5_CLK 80
+#define GCC_QUPV3_I2C_S5_CLK_SRC 81
+#define GCC_QUPV3_I2C_S6_CLK 82
+#define GCC_QUPV3_I2C_S6_CLK_SRC 83
+#define GCC_QUPV3_I2C_S7_CLK 84
+#define GCC_QUPV3_I2C_S7_CLK_SRC 85
+#define GCC_QUPV3_I2C_S8_CLK 86
+#define GCC_QUPV3_I2C_S8_CLK_SRC 87
+#define GCC_QUPV3_I2C_S9_CLK 88
+#define GCC_QUPV3_I2C_S9_CLK_SRC 89
+#define GCC_QUPV3_I2C_S_AHB_CLK 90
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 91
+#define GCC_QUPV3_WRAP1_CORE_CLK 92
+#define GCC_QUPV3_WRAP1_S0_CLK 93
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 94
+#define GCC_QUPV3_WRAP1_S1_CLK 95
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 96
+#define GCC_QUPV3_WRAP1_S2_CLK 97
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 98
+#define GCC_QUPV3_WRAP1_S3_CLK 99
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 100
+#define GCC_QUPV3_WRAP1_S4_CLK 101
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 102
+#define GCC_QUPV3_WRAP1_S5_CLK 103
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 104
+#define GCC_QUPV3_WRAP1_S6_CLK 105
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 106
+#define GCC_QUPV3_WRAP1_S7_CLK 107
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 108
+#define GCC_QUPV3_WRAP2_CORE_2X_CLK 109
+#define GCC_QUPV3_WRAP2_CORE_CLK 110
+#define GCC_QUPV3_WRAP2_S0_CLK 111
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 112
+#define GCC_QUPV3_WRAP2_S1_CLK 113
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 114
+#define GCC_QUPV3_WRAP2_S2_CLK 115
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 116
+#define GCC_QUPV3_WRAP2_S3_CLK 117
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 118
+#define GCC_QUPV3_WRAP2_S4_CLK 119
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 120
+#define GCC_QUPV3_WRAP2_S5_CLK 121
+#define GCC_QUPV3_WRAP2_S5_CLK_SRC 122
+#define GCC_QUPV3_WRAP2_S6_CLK 123
+#define GCC_QUPV3_WRAP2_S6_CLK_SRC 124
+#define GCC_QUPV3_WRAP2_S7_CLK 125
+#define GCC_QUPV3_WRAP2_S7_CLK_SRC 126
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 127
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 128
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 129
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 130
+#define GCC_SDCC2_AHB_CLK 131
+#define GCC_SDCC2_APPS_CLK 132
+#define GCC_SDCC2_APPS_CLK_SRC 133
+#define GCC_SDCC4_AHB_CLK 134
+#define GCC_SDCC4_APPS_CLK 135
+#define GCC_SDCC4_APPS_CLK_SRC 136
+#define GCC_UFS_PHY_AHB_CLK 137
+#define GCC_UFS_PHY_AXI_CLK 138
+#define GCC_UFS_PHY_AXI_CLK_SRC 139
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 140
+#define GCC_UFS_PHY_ICE_CORE_CLK 141
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 142
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 143
+#define GCC_UFS_PHY_PHY_AUX_CLK 144
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 145
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 146
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 147
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 148
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 149
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 150
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 151
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 152
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 153
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 154
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 155
+#define GCC_USB30_PRIM_MASTER_CLK 156
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 157
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 158
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 159
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 160
+#define GCC_USB30_PRIM_SLEEP_CLK 161
+#define GCC_USB3_PRIM_PHY_AUX_CLK 162
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 163
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 164
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 165
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 166
+#define GCC_VIDEO_AHB_CLK 167
+#define GCC_VIDEO_AXI0_CLK 168
+#define GCC_VIDEO_AXI1_CLK 169
+#define GCC_VIDEO_XO_CLK 170
+
+/* GCC resets */
+#define GCC_CAMERA_BCR 0
+#define GCC_DISPLAY_BCR 1
+#define GCC_GPU_BCR 2
+#define GCC_PCIE_0_BCR 3
+#define GCC_PCIE_0_LINK_DOWN_BCR 4
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 5
+#define GCC_PCIE_0_PHY_BCR 6
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 7
+#define GCC_PCIE_1_BCR 8
+#define GCC_PCIE_1_LINK_DOWN_BCR 9
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 10
+#define GCC_PCIE_1_PHY_BCR 11
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 12
+#define GCC_PCIE_PHY_BCR 13
+#define GCC_PCIE_PHY_CFG_AHB_BCR 14
+#define GCC_PCIE_PHY_COM_BCR 15
+#define GCC_PDM_BCR 16
+#define GCC_QUPV3_WRAPPER_1_BCR 17
+#define GCC_QUPV3_WRAPPER_2_BCR 18
+#define GCC_QUPV3_WRAPPER_I2C_BCR 19
+#define GCC_QUSB2PHY_PRIM_BCR 20
+#define GCC_QUSB2PHY_SEC_BCR 21
+#define GCC_SDCC2_BCR 22
+#define GCC_SDCC4_BCR 23
+#define GCC_UFS_PHY_BCR 24
+#define GCC_USB30_PRIM_BCR 25
+#define GCC_USB3_DP_PHY_PRIM_BCR 26
+#define GCC_USB3_DP_PHY_SEC_BCR 27
+#define GCC_USB3_PHY_PRIM_BCR 28
+#define GCC_USB3_PHY_SEC_BCR 29
+#define GCC_USB3PHY_PHY_PRIM_BCR 30
+#define GCC_USB3PHY_PHY_SEC_BCR 31
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 32
+#define GCC_VIDEO_AXI0_CLK_ARES 33
+#define GCC_VIDEO_AXI1_CLK_ARES 34
+#define GCC_VIDEO_BCR 35
+
+/* GCC power domains */
+#define PCIE_0_GDSC 0
+#define PCIE_0_PHY_GDSC 1
+#define PCIE_1_GDSC 2
+#define PCIE_1_PHY_GDSC 3
+#define UFS_PHY_GDSC 4
+#define UFS_MEM_PHY_GDSC 5
+#define USB30_PRIM_GDSC 6
+#define USB3_PHY_GDSC 7
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8550-gpucc.h b/include/dt-bindings/clock/qcom,sm8550-gpucc.h
new file mode 100644
index 000000000000..a6760547a3ab
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8550-gpucc.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8550_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8550_H
+
+/* GPU_CC clocks */
+#define GPU_CC_AHB_CLK 0
+#define GPU_CC_CRC_AHB_CLK 1
+#define GPU_CC_CX_FF_CLK 2
+#define GPU_CC_CX_GMU_CLK 3
+#define GPU_CC_CXO_AON_CLK 4
+#define GPU_CC_CXO_CLK 5
+#define GPU_CC_DEMET_CLK 6
+#define GPU_CC_DEMET_DIV_CLK_SRC 7
+#define GPU_CC_FF_CLK_SRC 8
+#define GPU_CC_FREQ_MEASURE_CLK 9
+#define GPU_CC_GMU_CLK_SRC 10
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 11
+#define GPU_CC_HUB_AON_CLK 12
+#define GPU_CC_HUB_CLK_SRC 13
+#define GPU_CC_HUB_CX_INT_CLK 14
+#define GPU_CC_MEMNOC_GFX_CLK 15
+#define GPU_CC_MND1X_0_GFX3D_CLK 16
+#define GPU_CC_MND1X_1_GFX3D_CLK 17
+#define GPU_CC_PLL0 18
+#define GPU_CC_PLL1 19
+#define GPU_CC_SLEEP_CLK 20
+#define GPU_CC_XO_CLK_SRC 21
+#define GPU_CC_XO_DIV_CLK_SRC 22
+
+/* GPU_CC power domains */
+#define GPU_CC_CX_GDSC 0
+#define GPU_CC_GX_GDSC 1
+
+/* GPU_CC resets */
+#define GPUCC_GPU_CC_ACD_BCR 0
+#define GPUCC_GPU_CC_CX_BCR 1
+#define GPUCC_GPU_CC_FAST_HUB_BCR 2
+#define GPUCC_GPU_CC_FF_BCR 3
+#define GPUCC_GPU_CC_GFX3D_AON_BCR 4
+#define GPUCC_GPU_CC_GMU_BCR 5
+#define GPUCC_GPU_CC_GX_BCR 6
+#define GPUCC_GPU_CC_XO_BCR 7
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8550-tcsr.h b/include/dt-bindings/clock/qcom,sm8550-tcsr.h
new file mode 100644
index 000000000000..091cb76f953a
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8550-tcsr.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_TCSR_CC_SM8550_H
+#define _DT_BINDINGS_CLK_QCOM_TCSR_CC_SM8550_H
+
+/* TCSR CC clocks */
+#define TCSR_PCIE_0_CLKREF_EN 0
+#define TCSR_PCIE_1_CLKREF_EN 1
+#define TCSR_UFS_CLKREF_EN 2
+#define TCSR_UFS_PAD_CLKREF_EN 3
+#define TCSR_USB2_CLKREF_EN 4
+#define TCSR_USB3_CLKREF_EN 5
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8650-camcc.h b/include/dt-bindings/clock/qcom,sm8650-camcc.h
new file mode 100644
index 000000000000..df73bf35f4bf
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8650-camcc.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8650_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_SM8650_H
+
+/* CAM_CC clocks */
+#define CAM_CC_BPS_AHB_CLK 0
+#define CAM_CC_BPS_CLK 1
+#define CAM_CC_BPS_CLK_SRC 2
+#define CAM_CC_BPS_FAST_AHB_CLK 3
+#define CAM_CC_BPS_SHIFT_CLK 4
+#define CAM_CC_CAMNOC_AXI_NRT_CLK 5
+#define CAM_CC_CAMNOC_AXI_RT_CLK 6
+#define CAM_CC_CAMNOC_AXI_RT_CLK_SRC 7
+#define CAM_CC_CAMNOC_DCD_XO_CLK 8
+#define CAM_CC_CAMNOC_XO_CLK 9
+#define CAM_CC_CCI_0_CLK 10
+#define CAM_CC_CCI_0_CLK_SRC 11
+#define CAM_CC_CCI_1_CLK 12
+#define CAM_CC_CCI_1_CLK_SRC 13
+#define CAM_CC_CCI_2_CLK 14
+#define CAM_CC_CCI_2_CLK_SRC 15
+#define CAM_CC_CORE_AHB_CLK 16
+#define CAM_CC_CPAS_AHB_CLK 17
+#define CAM_CC_CPAS_BPS_CLK 18
+#define CAM_CC_CPAS_CRE_CLK 19
+#define CAM_CC_CPAS_FAST_AHB_CLK 20
+#define CAM_CC_CPAS_IFE_0_CLK 21
+#define CAM_CC_CPAS_IFE_1_CLK 22
+#define CAM_CC_CPAS_IFE_2_CLK 23
+#define CAM_CC_CPAS_IFE_LITE_CLK 24
+#define CAM_CC_CPAS_IPE_NPS_CLK 25
+#define CAM_CC_CPAS_SBI_CLK 26
+#define CAM_CC_CPAS_SFE_0_CLK 27
+#define CAM_CC_CPAS_SFE_1_CLK 28
+#define CAM_CC_CPAS_SFE_2_CLK 29
+#define CAM_CC_CPHY_RX_CLK_SRC 30
+#define CAM_CC_CRE_AHB_CLK 31
+#define CAM_CC_CRE_CLK 32
+#define CAM_CC_CRE_CLK_SRC 33
+#define CAM_CC_CSI0PHYTIMER_CLK 34
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 35
+#define CAM_CC_CSI1PHYTIMER_CLK 36
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 37
+#define CAM_CC_CSI2PHYTIMER_CLK 38
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 39
+#define CAM_CC_CSI3PHYTIMER_CLK 40
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 41
+#define CAM_CC_CSI4PHYTIMER_CLK 42
+#define CAM_CC_CSI4PHYTIMER_CLK_SRC 43
+#define CAM_CC_CSI5PHYTIMER_CLK 44
+#define CAM_CC_CSI5PHYTIMER_CLK_SRC 45
+#define CAM_CC_CSI6PHYTIMER_CLK 46
+#define CAM_CC_CSI6PHYTIMER_CLK_SRC 47
+#define CAM_CC_CSI7PHYTIMER_CLK 48
+#define CAM_CC_CSI7PHYTIMER_CLK_SRC 49
+#define CAM_CC_CSID_CLK 50
+#define CAM_CC_CSID_CLK_SRC 51
+#define CAM_CC_CSID_CSIPHY_RX_CLK 52
+#define CAM_CC_CSIPHY0_CLK 53
+#define CAM_CC_CSIPHY1_CLK 54
+#define CAM_CC_CSIPHY2_CLK 55
+#define CAM_CC_CSIPHY3_CLK 56
+#define CAM_CC_CSIPHY4_CLK 57
+#define CAM_CC_CSIPHY5_CLK 58
+#define CAM_CC_CSIPHY6_CLK 59
+#define CAM_CC_CSIPHY7_CLK 60
+#define CAM_CC_DRV_AHB_CLK 61
+#define CAM_CC_DRV_XO_CLK 62
+#define CAM_CC_FAST_AHB_CLK_SRC 63
+#define CAM_CC_GDSC_CLK 64
+#define CAM_CC_ICP_AHB_CLK 65
+#define CAM_CC_ICP_CLK 66
+#define CAM_CC_ICP_CLK_SRC 67
+#define CAM_CC_IFE_0_CLK 68
+#define CAM_CC_IFE_0_CLK_SRC 69
+#define CAM_CC_IFE_0_FAST_AHB_CLK 70
+#define CAM_CC_IFE_0_SHIFT_CLK 71
+#define CAM_CC_IFE_1_CLK 72
+#define CAM_CC_IFE_1_CLK_SRC 73
+#define CAM_CC_IFE_1_FAST_AHB_CLK 74
+#define CAM_CC_IFE_1_SHIFT_CLK 75
+#define CAM_CC_IFE_2_CLK 76
+#define CAM_CC_IFE_2_CLK_SRC 77
+#define CAM_CC_IFE_2_FAST_AHB_CLK 78
+#define CAM_CC_IFE_2_SHIFT_CLK 79
+#define CAM_CC_IFE_LITE_AHB_CLK 80
+#define CAM_CC_IFE_LITE_CLK 81
+#define CAM_CC_IFE_LITE_CLK_SRC 82
+#define CAM_CC_IFE_LITE_CPHY_RX_CLK 83
+#define CAM_CC_IFE_LITE_CSID_CLK 84
+#define CAM_CC_IFE_LITE_CSID_CLK_SRC 85
+#define CAM_CC_IPE_NPS_AHB_CLK 86
+#define CAM_CC_IPE_NPS_CLK 87
+#define CAM_CC_IPE_NPS_CLK_SRC 88
+#define CAM_CC_IPE_NPS_FAST_AHB_CLK 89
+#define CAM_CC_IPE_PPS_CLK 90
+#define CAM_CC_IPE_PPS_FAST_AHB_CLK 91
+#define CAM_CC_IPE_SHIFT_CLK 92
+#define CAM_CC_JPEG_1_CLK 93
+#define CAM_CC_JPEG_CLK 94
+#define CAM_CC_JPEG_CLK_SRC 95
+#define CAM_CC_MCLK0_CLK 96
+#define CAM_CC_MCLK0_CLK_SRC 97
+#define CAM_CC_MCLK1_CLK 98
+#define CAM_CC_MCLK1_CLK_SRC 99
+#define CAM_CC_MCLK2_CLK 100
+#define CAM_CC_MCLK2_CLK_SRC 101
+#define CAM_CC_MCLK3_CLK 102
+#define CAM_CC_MCLK3_CLK_SRC 103
+#define CAM_CC_MCLK4_CLK 104
+#define CAM_CC_MCLK4_CLK_SRC 105
+#define CAM_CC_MCLK5_CLK 106
+#define CAM_CC_MCLK5_CLK_SRC 107
+#define CAM_CC_MCLK6_CLK 108
+#define CAM_CC_MCLK6_CLK_SRC 109
+#define CAM_CC_MCLK7_CLK 110
+#define CAM_CC_MCLK7_CLK_SRC 111
+#define CAM_CC_PLL0 112
+#define CAM_CC_PLL0_OUT_EVEN 113
+#define CAM_CC_PLL0_OUT_ODD 114
+#define CAM_CC_PLL1 115
+#define CAM_CC_PLL1_OUT_EVEN 116
+#define CAM_CC_PLL2 117
+#define CAM_CC_PLL3 118
+#define CAM_CC_PLL3_OUT_EVEN 119
+#define CAM_CC_PLL4 120
+#define CAM_CC_PLL4_OUT_EVEN 121
+#define CAM_CC_PLL5 122
+#define CAM_CC_PLL5_OUT_EVEN 123
+#define CAM_CC_PLL6 124
+#define CAM_CC_PLL6_OUT_EVEN 125
+#define CAM_CC_PLL7 126
+#define CAM_CC_PLL7_OUT_EVEN 127
+#define CAM_CC_PLL8 128
+#define CAM_CC_PLL8_OUT_EVEN 129
+#define CAM_CC_PLL9 130
+#define CAM_CC_PLL9_OUT_EVEN 131
+#define CAM_CC_PLL9_OUT_ODD 132
+#define CAM_CC_PLL10 133
+#define CAM_CC_PLL10_OUT_EVEN 134
+#define CAM_CC_QDSS_DEBUG_CLK 135
+#define CAM_CC_QDSS_DEBUG_CLK_SRC 136
+#define CAM_CC_QDSS_DEBUG_XO_CLK 137
+#define CAM_CC_SBI_CLK 138
+#define CAM_CC_SBI_FAST_AHB_CLK 139
+#define CAM_CC_SBI_SHIFT_CLK 140
+#define CAM_CC_SFE_0_CLK 141
+#define CAM_CC_SFE_0_CLK_SRC 142
+#define CAM_CC_SFE_0_FAST_AHB_CLK 143
+#define CAM_CC_SFE_0_SHIFT_CLK 144
+#define CAM_CC_SFE_1_CLK 145
+#define CAM_CC_SFE_1_CLK_SRC 146
+#define CAM_CC_SFE_1_FAST_AHB_CLK 147
+#define CAM_CC_SFE_1_SHIFT_CLK 148
+#define CAM_CC_SFE_2_CLK 149
+#define CAM_CC_SFE_2_CLK_SRC 150
+#define CAM_CC_SFE_2_FAST_AHB_CLK 151
+#define CAM_CC_SFE_2_SHIFT_CLK 152
+#define CAM_CC_SLEEP_CLK 153
+#define CAM_CC_SLEEP_CLK_SRC 154
+#define CAM_CC_SLOW_AHB_CLK_SRC 155
+#define CAM_CC_TITAN_TOP_SHIFT_CLK 156
+#define CAM_CC_XO_CLK_SRC 157
+
+/* CAM_CC power domains */
+#define CAM_CC_TITAN_TOP_GDSC 0
+#define CAM_CC_BPS_GDSC 1
+#define CAM_CC_IFE_0_GDSC 2
+#define CAM_CC_IFE_1_GDSC 3
+#define CAM_CC_IFE_2_GDSC 4
+#define CAM_CC_IPE_0_GDSC 5
+#define CAM_CC_SBI_GDSC 6
+#define CAM_CC_SFE_0_GDSC 7
+#define CAM_CC_SFE_1_GDSC 8
+#define CAM_CC_SFE_2_GDSC 9
+
+/* CAM_CC resets */
+#define CAM_CC_BPS_BCR 0
+#define CAM_CC_DRV_BCR 1
+#define CAM_CC_ICP_BCR 2
+#define CAM_CC_IFE_0_BCR 3
+#define CAM_CC_IFE_1_BCR 4
+#define CAM_CC_IFE_2_BCR 5
+#define CAM_CC_IPE_0_BCR 6
+#define CAM_CC_QDSS_DEBUG_BCR 7
+#define CAM_CC_SBI_BCR 8
+#define CAM_CC_SFE_0_BCR 9
+#define CAM_CC_SFE_1_BCR 10
+#define CAM_CC_SFE_2_BCR 11
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8650-dispcc.h b/include/dt-bindings/clock/qcom,sm8650-dispcc.h
new file mode 120000
index 000000000000..c0a291188f28
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8650-dispcc.h
@@ -0,0 +1 @@
+qcom,sm8550-dispcc.h \ No newline at end of file
diff --git a/include/dt-bindings/clock/qcom,sm8650-gcc.h b/include/dt-bindings/clock/qcom,sm8650-gcc.h
new file mode 100644
index 000000000000..0c543ba46079
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8650-gcc.h
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM8650_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SM8650_H
+
+/* GCC clocks */
+#define GCC_AGGRE_NOC_PCIE_AXI_CLK 0
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 1
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 2
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 3
+#define GCC_BOOT_ROM_AHB_CLK 4
+#define GCC_CAMERA_AHB_CLK 5
+#define GCC_CAMERA_HF_AXI_CLK 6
+#define GCC_CAMERA_SF_AXI_CLK 7
+#define GCC_CAMERA_XO_CLK 8
+#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 9
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 10
+#define GCC_CNOC_PCIE_SF_AXI_CLK 11
+#define GCC_DDRSS_GPU_AXI_CLK 12
+#define GCC_DDRSS_PCIE_SF_QTB_CLK 13
+#define GCC_DISP_AHB_CLK 14
+#define GCC_DISP_HF_AXI_CLK 15
+#define GCC_DISP_XO_CLK 16
+#define GCC_GP1_CLK 17
+#define GCC_GP1_CLK_SRC 18
+#define GCC_GP2_CLK 19
+#define GCC_GP2_CLK_SRC 20
+#define GCC_GP3_CLK 21
+#define GCC_GP3_CLK_SRC 22
+#define GCC_GPLL0 23
+#define GCC_GPLL0_OUT_EVEN 24
+#define GCC_GPLL1 25
+#define GCC_GPLL3 26
+#define GCC_GPLL4 27
+#define GCC_GPLL6 28
+#define GCC_GPLL7 29
+#define GCC_GPLL9 30
+#define GCC_GPU_CFG_AHB_CLK 31
+#define GCC_GPU_GPLL0_CLK_SRC 32
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 33
+#define GCC_GPU_MEMNOC_GFX_CLK 34
+#define GCC_GPU_SNOC_DVM_GFX_CLK 35
+#define GCC_PCIE_0_AUX_CLK 36
+#define GCC_PCIE_0_AUX_CLK_SRC 37
+#define GCC_PCIE_0_CFG_AHB_CLK 38
+#define GCC_PCIE_0_MSTR_AXI_CLK 39
+#define GCC_PCIE_0_PHY_RCHNG_CLK 40
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 41
+#define GCC_PCIE_0_PIPE_CLK 42
+#define GCC_PCIE_0_PIPE_CLK_SRC 43
+#define GCC_PCIE_0_SLV_AXI_CLK 44
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 45
+#define GCC_PCIE_1_AUX_CLK 46
+#define GCC_PCIE_1_AUX_CLK_SRC 47
+#define GCC_PCIE_1_CFG_AHB_CLK 48
+#define GCC_PCIE_1_MSTR_AXI_CLK 49
+#define GCC_PCIE_1_PHY_AUX_CLK 50
+#define GCC_PCIE_1_PHY_AUX_CLK_SRC 51
+#define GCC_PCIE_1_PHY_RCHNG_CLK 52
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 53
+#define GCC_PCIE_1_PIPE_CLK 54
+#define GCC_PCIE_1_PIPE_CLK_SRC 55
+#define GCC_PCIE_1_SLV_AXI_CLK 56
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 57
+#define GCC_PDM2_CLK 58
+#define GCC_PDM2_CLK_SRC 59
+#define GCC_PDM_AHB_CLK 60
+#define GCC_PDM_XO4_CLK 61
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 62
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 63
+#define GCC_QMIP_DISP_AHB_CLK 64
+#define GCC_QMIP_GPU_AHB_CLK 65
+#define GCC_QMIP_PCIE_AHB_CLK 66
+#define GCC_QMIP_VIDEO_CV_CPU_AHB_CLK 67
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 68
+#define GCC_QMIP_VIDEO_V_CPU_AHB_CLK 69
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 70
+#define GCC_QUPV3_I2C_CORE_CLK 71
+#define GCC_QUPV3_I2C_S0_CLK 72
+#define GCC_QUPV3_I2C_S0_CLK_SRC 73
+#define GCC_QUPV3_I2C_S1_CLK 74
+#define GCC_QUPV3_I2C_S1_CLK_SRC 75
+#define GCC_QUPV3_I2C_S2_CLK 76
+#define GCC_QUPV3_I2C_S2_CLK_SRC 77
+#define GCC_QUPV3_I2C_S3_CLK 78
+#define GCC_QUPV3_I2C_S3_CLK_SRC 79
+#define GCC_QUPV3_I2C_S4_CLK 80
+#define GCC_QUPV3_I2C_S4_CLK_SRC 81
+#define GCC_QUPV3_I2C_S5_CLK 82
+#define GCC_QUPV3_I2C_S5_CLK_SRC 83
+#define GCC_QUPV3_I2C_S6_CLK 84
+#define GCC_QUPV3_I2C_S6_CLK_SRC 85
+#define GCC_QUPV3_I2C_S7_CLK 86
+#define GCC_QUPV3_I2C_S7_CLK_SRC 87
+#define GCC_QUPV3_I2C_S8_CLK 88
+#define GCC_QUPV3_I2C_S8_CLK_SRC 89
+#define GCC_QUPV3_I2C_S9_CLK 90
+#define GCC_QUPV3_I2C_S9_CLK_SRC 91
+#define GCC_QUPV3_I2C_S_AHB_CLK 92
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 93
+#define GCC_QUPV3_WRAP1_CORE_CLK 94
+#define GCC_QUPV3_WRAP1_QSPI_REF_CLK 95
+#define GCC_QUPV3_WRAP1_QSPI_REF_CLK_SRC 96
+#define GCC_QUPV3_WRAP1_S0_CLK 97
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 98
+#define GCC_QUPV3_WRAP1_S1_CLK 99
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 100
+#define GCC_QUPV3_WRAP1_S2_CLK 101
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 102
+#define GCC_QUPV3_WRAP1_S3_CLK 103
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 104
+#define GCC_QUPV3_WRAP1_S4_CLK 105
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 106
+#define GCC_QUPV3_WRAP1_S5_CLK 107
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 108
+#define GCC_QUPV3_WRAP1_S6_CLK 109
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 110
+#define GCC_QUPV3_WRAP1_S7_CLK 111
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 112
+#define GCC_QUPV3_WRAP2_CORE_2X_CLK 113
+#define GCC_QUPV3_WRAP2_CORE_CLK 114
+#define GCC_QUPV3_WRAP2_IBI_CTRL_0_CLK_SRC 115
+#define GCC_QUPV3_WRAP2_IBI_CTRL_2_CLK 116
+#define GCC_QUPV3_WRAP2_IBI_CTRL_3_CLK 117
+#define GCC_QUPV3_WRAP2_S0_CLK 118
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 119
+#define GCC_QUPV3_WRAP2_S1_CLK 120
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 121
+#define GCC_QUPV3_WRAP2_S2_CLK 122
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 123
+#define GCC_QUPV3_WRAP2_S3_CLK 124
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 125
+#define GCC_QUPV3_WRAP2_S4_CLK 126
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 127
+#define GCC_QUPV3_WRAP2_S5_CLK 128
+#define GCC_QUPV3_WRAP2_S5_CLK_SRC 129
+#define GCC_QUPV3_WRAP2_S6_CLK 130
+#define GCC_QUPV3_WRAP2_S6_CLK_SRC 131
+#define GCC_QUPV3_WRAP2_S7_CLK 132
+#define GCC_QUPV3_WRAP2_S7_CLK_SRC 133
+#define GCC_QUPV3_WRAP3_CORE_2X_CLK 134
+#define GCC_QUPV3_WRAP3_CORE_CLK 135
+#define GCC_QUPV3_WRAP3_QSPI_REF_CLK 136
+#define GCC_QUPV3_WRAP3_QSPI_REF_CLK_SRC 137
+#define GCC_QUPV3_WRAP3_S0_CLK 138
+#define GCC_QUPV3_WRAP3_S0_CLK_SRC 139
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 140
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 141
+#define GCC_QUPV3_WRAP_2_IBI_2_AHB_CLK 142
+#define GCC_QUPV3_WRAP_2_IBI_3_AHB_CLK 143
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 144
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 145
+#define GCC_QUPV3_WRAP_3_M_AHB_CLK 146
+#define GCC_QUPV3_WRAP_3_S_AHB_CLK 147
+#define GCC_SDCC2_AHB_CLK 148
+#define GCC_SDCC2_APPS_CLK 149
+#define GCC_SDCC2_APPS_CLK_SRC 150
+#define GCC_SDCC4_AHB_CLK 151
+#define GCC_SDCC4_APPS_CLK 152
+#define GCC_SDCC4_APPS_CLK_SRC 153
+#define GCC_UFS_PHY_AHB_CLK 154
+#define GCC_UFS_PHY_AXI_CLK 155
+#define GCC_UFS_PHY_AXI_CLK_SRC 156
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 157
+#define GCC_UFS_PHY_ICE_CORE_CLK 158
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 159
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 160
+#define GCC_UFS_PHY_PHY_AUX_CLK 161
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 162
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 163
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 164
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 165
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 166
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 167
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 168
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 169
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 170
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 171
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 172
+#define GCC_USB30_PRIM_MASTER_CLK 173
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 174
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 175
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 176
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 177
+#define GCC_USB30_PRIM_SLEEP_CLK 178
+#define GCC_USB3_PRIM_PHY_AUX_CLK 179
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 180
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 181
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 182
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 183
+#define GCC_VIDEO_AHB_CLK 184
+#define GCC_VIDEO_AXI0_CLK 185
+#define GCC_VIDEO_AXI1_CLK 186
+#define GCC_VIDEO_XO_CLK 187
+#define GCC_GPLL0_AO 188
+#define GCC_GPLL0_OUT_EVEN_AO 189
+#define GCC_GPLL1_AO 190
+#define GCC_GPLL3_AO 191
+#define GCC_GPLL4_AO 192
+#define GCC_GPLL6_AO 193
+
+/* GCC resets */
+#define GCC_CAMERA_BCR 0
+#define GCC_DISPLAY_BCR 1
+#define GCC_GPU_BCR 2
+#define GCC_PCIE_0_BCR 3
+#define GCC_PCIE_0_LINK_DOWN_BCR 4
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 5
+#define GCC_PCIE_0_PHY_BCR 6
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 7
+#define GCC_PCIE_1_BCR 8
+#define GCC_PCIE_1_LINK_DOWN_BCR 9
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 10
+#define GCC_PCIE_1_PHY_BCR 11
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 12
+#define GCC_PCIE_PHY_BCR 13
+#define GCC_PCIE_PHY_CFG_AHB_BCR 14
+#define GCC_PCIE_PHY_COM_BCR 15
+#define GCC_PDM_BCR 16
+#define GCC_QUPV3_WRAPPER_1_BCR 17
+#define GCC_QUPV3_WRAPPER_2_BCR 18
+#define GCC_QUPV3_WRAPPER_3_BCR 19
+#define GCC_QUPV3_WRAPPER_I2C_BCR 20
+#define GCC_QUSB2PHY_PRIM_BCR 21
+#define GCC_QUSB2PHY_SEC_BCR 22
+#define GCC_SDCC2_BCR 23
+#define GCC_SDCC4_BCR 24
+#define GCC_UFS_PHY_BCR 25
+#define GCC_USB30_PRIM_BCR 26
+#define GCC_USB3_DP_PHY_PRIM_BCR 27
+#define GCC_USB3_DP_PHY_SEC_BCR 28
+#define GCC_USB3_PHY_PRIM_BCR 29
+#define GCC_USB3_PHY_SEC_BCR 30
+#define GCC_USB3PHY_PHY_PRIM_BCR 31
+#define GCC_USB3PHY_PHY_SEC_BCR 32
+#define GCC_VIDEO_AXI0_CLK_ARES 33
+#define GCC_VIDEO_AXI1_CLK_ARES 34
+#define GCC_VIDEO_BCR 35
+
+/* GCC power domains */
+#define PCIE_0_GDSC 0
+#define PCIE_0_PHY_GDSC 1
+#define PCIE_1_GDSC 2
+#define PCIE_1_PHY_GDSC 3
+#define UFS_PHY_GDSC 4
+#define UFS_MEM_PHY_GDSC 5
+#define USB30_PRIM_GDSC 6
+#define USB3_PHY_GDSC 7
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8650-gpucc.h b/include/dt-bindings/clock/qcom,sm8650-gpucc.h
new file mode 100644
index 000000000000..d0dc457cfe75
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8650-gpucc.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8650_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SM8650_H
+
+/* GPU_CC clocks */
+#define GPU_CC_AHB_CLK 0
+#define GPU_CC_CRC_AHB_CLK 1
+#define GPU_CC_CX_ACCU_SHIFT_CLK 2
+#define GPU_CC_CX_FF_CLK 3
+#define GPU_CC_CX_GMU_CLK 4
+#define GPU_CC_CXO_AON_CLK 5
+#define GPU_CC_CXO_CLK 6
+#define GPU_CC_DEMET_CLK 7
+#define GPU_CC_DPM_CLK 8
+#define GPU_CC_FF_CLK_SRC 9
+#define GPU_CC_FREQ_MEASURE_CLK 10
+#define GPU_CC_GMU_CLK_SRC 11
+#define GPU_CC_GX_ACCU_SHIFT_CLK 12
+#define GPU_CC_GX_FF_CLK 13
+#define GPU_CC_GX_GFX3D_CLK 14
+#define GPU_CC_GX_GFX3D_RDVM_CLK 15
+#define GPU_CC_GX_GMU_CLK 16
+#define GPU_CC_GX_VSENSE_CLK 17
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 18
+#define GPU_CC_HUB_AON_CLK 19
+#define GPU_CC_HUB_CLK_SRC 20
+#define GPU_CC_HUB_CX_INT_CLK 21
+#define GPU_CC_HUB_DIV_CLK_SRC 22
+#define GPU_CC_MEMNOC_GFX_CLK 23
+#define GPU_CC_PLL0 24
+#define GPU_CC_PLL1 25
+#define GPU_CC_SLEEP_CLK 26
+
+/* GDSCs */
+#define GPU_GX_GDSC 0
+#define GPU_CX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8650-tcsr.h b/include/dt-bindings/clock/qcom,sm8650-tcsr.h
new file mode 100644
index 000000000000..b2c72d492f1f
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8650-tcsr.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_TCSR_CC_SM8650_H
+#define _DT_BINDINGS_CLK_QCOM_TCSR_CC_SM8650_H
+
+/* TCSR CC clocks */
+#define TCSR_PCIE_0_CLKREF_EN 0
+#define TCSR_PCIE_1_CLKREF_EN 1
+#define TCSR_UFS_CLKREF_EN 2
+#define TCSR_UFS_PAD_CLKREF_EN 3
+#define TCSR_USB2_CLKREF_EN 4
+#define TCSR_USB3_CLKREF_EN 5
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8650-videocc.h b/include/dt-bindings/clock/qcom,sm8650-videocc.h
new file mode 100644
index 000000000000..4e3c2d87280f
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8650-videocc.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8650_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8650_H
+
+#include "qcom,sm8450-videocc.h"
+
+/* SM8650 introduces below new clocks and resets compared to SM8450 */
+
+/* VIDEO_CC clocks */
+#define VIDEO_CC_MVS0_SHIFT_CLK 12
+#define VIDEO_CC_MVS0C_SHIFT_CLK 13
+#define VIDEO_CC_MVS1_SHIFT_CLK 14
+#define VIDEO_CC_MVS1C_SHIFT_CLK 15
+#define VIDEO_CC_XO_CLK_SRC 16
+
+/* VIDEO_CC resets */
+#define VIDEO_CC_XO_CLK_ARES 7
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8750-dispcc.h b/include/dt-bindings/clock/qcom,sm8750-dispcc.h
new file mode 100644
index 000000000000..dafb5069c96a
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8750-dispcc.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2024, Linaro Ltd.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_SM8750_DISP_CC_H
+#define _DT_BINDINGS_CLK_QCOM_SM8750_DISP_CC_H
+
+/* DISP_CC clocks */
+#define DISP_CC_ESYNC0_CLK 0
+#define DISP_CC_ESYNC0_CLK_SRC 1
+#define DISP_CC_ESYNC1_CLK 2
+#define DISP_CC_ESYNC1_CLK_SRC 3
+#define DISP_CC_MDSS_ACCU_SHIFT_CLK 4
+#define DISP_CC_MDSS_AHB1_CLK 5
+#define DISP_CC_MDSS_AHB_CLK 6
+#define DISP_CC_MDSS_AHB_CLK_SRC 7
+#define DISP_CC_MDSS_BYTE0_CLK 8
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 9
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 10
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 11
+#define DISP_CC_MDSS_BYTE1_CLK 12
+#define DISP_CC_MDSS_BYTE1_CLK_SRC 13
+#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 14
+#define DISP_CC_MDSS_BYTE1_INTF_CLK 15
+#define DISP_CC_MDSS_DPTX0_AUX_CLK 16
+#define DISP_CC_MDSS_DPTX0_AUX_CLK_SRC 17
+#define DISP_CC_MDSS_DPTX0_CRYPTO_CLK 18
+#define DISP_CC_MDSS_DPTX0_LINK_CLK 19
+#define DISP_CC_MDSS_DPTX0_LINK_CLK_SRC 20
+#define DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC 21
+#define DISP_CC_MDSS_DPTX0_LINK_INTF_CLK 22
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK 23
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC 24
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK 25
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC 26
+#define DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK 27
+#define DISP_CC_MDSS_DPTX1_AUX_CLK 28
+#define DISP_CC_MDSS_DPTX1_AUX_CLK_SRC 29
+#define DISP_CC_MDSS_DPTX1_CRYPTO_CLK 30
+#define DISP_CC_MDSS_DPTX1_LINK_CLK 31
+#define DISP_CC_MDSS_DPTX1_LINK_CLK_SRC 32
+#define DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC 33
+#define DISP_CC_MDSS_DPTX1_LINK_INTF_CLK 34
+#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK 35
+#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC 36
+#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK 37
+#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC 38
+#define DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK 39
+#define DISP_CC_MDSS_DPTX2_AUX_CLK 40
+#define DISP_CC_MDSS_DPTX2_AUX_CLK_SRC 41
+#define DISP_CC_MDSS_DPTX2_CRYPTO_CLK 42
+#define DISP_CC_MDSS_DPTX2_LINK_CLK 43
+#define DISP_CC_MDSS_DPTX2_LINK_CLK_SRC 44
+#define DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC 45
+#define DISP_CC_MDSS_DPTX2_LINK_INTF_CLK 46
+#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK 47
+#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC 48
+#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK 49
+#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC 50
+#define DISP_CC_MDSS_DPTX3_AUX_CLK 51
+#define DISP_CC_MDSS_DPTX3_AUX_CLK_SRC 52
+#define DISP_CC_MDSS_DPTX3_CRYPTO_CLK 53
+#define DISP_CC_MDSS_DPTX3_LINK_CLK 54
+#define DISP_CC_MDSS_DPTX3_LINK_CLK_SRC 55
+#define DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC 56
+#define DISP_CC_MDSS_DPTX3_LINK_INTF_CLK 57
+#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK 58
+#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC 59
+#define DISP_CC_MDSS_ESC0_CLK 60
+#define DISP_CC_MDSS_ESC0_CLK_SRC 61
+#define DISP_CC_MDSS_ESC1_CLK 62
+#define DISP_CC_MDSS_ESC1_CLK_SRC 63
+#define DISP_CC_MDSS_MDP1_CLK 64
+#define DISP_CC_MDSS_MDP_CLK 65
+#define DISP_CC_MDSS_MDP_CLK_SRC 66
+#define DISP_CC_MDSS_MDP_LUT1_CLK 67
+#define DISP_CC_MDSS_MDP_LUT_CLK 68
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 69
+#define DISP_CC_MDSS_PCLK0_CLK 70
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 71
+#define DISP_CC_MDSS_PCLK1_CLK 72
+#define DISP_CC_MDSS_PCLK1_CLK_SRC 73
+#define DISP_CC_MDSS_PCLK2_CLK 74
+#define DISP_CC_MDSS_PCLK2_CLK_SRC 75
+#define DISP_CC_MDSS_RSCC_AHB_CLK 76
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 77
+#define DISP_CC_MDSS_VSYNC1_CLK 78
+#define DISP_CC_MDSS_VSYNC_CLK 79
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 80
+#define DISP_CC_OSC_CLK 81
+#define DISP_CC_OSC_CLK_SRC 82
+#define DISP_CC_PLL0 83
+#define DISP_CC_PLL1 84
+#define DISP_CC_PLL2 85
+#define DISP_CC_SLEEP_CLK 86
+#define DISP_CC_SLEEP_CLK_SRC 87
+#define DISP_CC_XO_CLK 88
+#define DISP_CC_XO_CLK_SRC 89
+
+/* DISP_CC resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_CORE_INT2_BCR 1
+#define DISP_CC_MDSS_RSCC_BCR 2
+
+/* DISP_CC GDSCR */
+#define MDSS_GDSC 0
+#define MDSS_INT2_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8750-gcc.h b/include/dt-bindings/clock/qcom,sm8750-gcc.h
new file mode 100644
index 000000000000..e234595d7f42
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8750-gcc.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SM8750_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SM8750_H
+
+/* GCC clocks */
+#define GCC_AGGRE_NOC_PCIE_AXI_CLK 0
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 1
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 2
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 3
+#define GCC_BOOT_ROM_AHB_CLK 4
+#define GCC_CAM_BIST_MCLK_AHB_CLK 5
+#define GCC_CAMERA_AHB_CLK 6
+#define GCC_CAMERA_HF_AXI_CLK 7
+#define GCC_CAMERA_SF_AXI_CLK 8
+#define GCC_CAMERA_XO_CLK 9
+#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 10
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 11
+#define GCC_CNOC_PCIE_SF_AXI_CLK 12
+#define GCC_DDRSS_GPU_AXI_CLK 13
+#define GCC_DDRSS_PCIE_SF_QTB_CLK 14
+#define GCC_DISP_AHB_CLK 15
+#define GCC_DISP_HF_AXI_CLK 16
+#define GCC_EVA_AHB_CLK 17
+#define GCC_EVA_AXI0_CLK 18
+#define GCC_EVA_AXI0C_CLK 19
+#define GCC_EVA_XO_CLK 20
+#define GCC_GP1_CLK 21
+#define GCC_GP1_CLK_SRC 22
+#define GCC_GP2_CLK 23
+#define GCC_GP2_CLK_SRC 24
+#define GCC_GP3_CLK 25
+#define GCC_GP3_CLK_SRC 26
+#define GCC_GPLL0 27
+#define GCC_GPLL0_OUT_EVEN 28
+#define GCC_GPLL1 29
+#define GCC_GPLL4 30
+#define GCC_GPLL7 31
+#define GCC_GPLL9 32
+#define GCC_GPU_CFG_AHB_CLK 33
+#define GCC_GPU_GEMNOC_GFX_CLK 34
+#define GCC_GPU_GPLL0_CLK_SRC 35
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 36
+#define GCC_PCIE_0_AUX_CLK 37
+#define GCC_PCIE_0_AUX_CLK_SRC 38
+#define GCC_PCIE_0_CFG_AHB_CLK 39
+#define GCC_PCIE_0_MSTR_AXI_CLK 40
+#define GCC_PCIE_0_PHY_RCHNG_CLK 41
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 42
+#define GCC_PCIE_0_PIPE_CLK 43
+#define GCC_PCIE_0_PIPE_CLK_SRC 44
+#define GCC_PCIE_0_SLV_AXI_CLK 45
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 46
+#define GCC_PCIE_RSCC_CFG_AHB_CLK 47
+#define GCC_PCIE_RSCC_XO_CLK 48
+#define GCC_PDM2_CLK 49
+#define GCC_PDM2_CLK_SRC 50
+#define GCC_PDM_AHB_CLK 51
+#define GCC_PDM_XO4_CLK 52
+#define GCC_QMIP_CAMERA_CMD_AHB_CLK 53
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 54
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 55
+#define GCC_QMIP_GPU_AHB_CLK 56
+#define GCC_QMIP_PCIE_AHB_CLK 57
+#define GCC_QMIP_VIDEO_CV_CPU_AHB_CLK 58
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 59
+#define GCC_QMIP_VIDEO_V_CPU_AHB_CLK 60
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 61
+#define GCC_QUPV3_I2C_CORE_CLK 62
+#define GCC_QUPV3_I2C_S0_CLK 63
+#define GCC_QUPV3_I2C_S0_CLK_SRC 64
+#define GCC_QUPV3_I2C_S1_CLK 65
+#define GCC_QUPV3_I2C_S1_CLK_SRC 66
+#define GCC_QUPV3_I2C_S2_CLK 67
+#define GCC_QUPV3_I2C_S2_CLK_SRC 68
+#define GCC_QUPV3_I2C_S3_CLK 69
+#define GCC_QUPV3_I2C_S3_CLK_SRC 70
+#define GCC_QUPV3_I2C_S4_CLK 71
+#define GCC_QUPV3_I2C_S4_CLK_SRC 72
+#define GCC_QUPV3_I2C_S5_CLK 73
+#define GCC_QUPV3_I2C_S5_CLK_SRC 74
+#define GCC_QUPV3_I2C_S6_CLK 75
+#define GCC_QUPV3_I2C_S6_CLK_SRC 76
+#define GCC_QUPV3_I2C_S7_CLK 77
+#define GCC_QUPV3_I2C_S7_CLK_SRC 78
+#define GCC_QUPV3_I2C_S8_CLK 79
+#define GCC_QUPV3_I2C_S8_CLK_SRC 80
+#define GCC_QUPV3_I2C_S9_CLK 81
+#define GCC_QUPV3_I2C_S9_CLK_SRC 82
+#define GCC_QUPV3_I2C_S_AHB_CLK 83
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 84
+#define GCC_QUPV3_WRAP1_CORE_CLK 85
+#define GCC_QUPV3_WRAP1_QSPI_REF_CLK 86
+#define GCC_QUPV3_WRAP1_QSPI_REF_CLK_SRC 87
+#define GCC_QUPV3_WRAP1_S0_CLK 88
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 89
+#define GCC_QUPV3_WRAP1_S1_CLK 90
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 91
+#define GCC_QUPV3_WRAP1_S2_CLK 92
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 93
+#define GCC_QUPV3_WRAP1_S3_CLK 94
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 95
+#define GCC_QUPV3_WRAP1_S4_CLK 96
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 97
+#define GCC_QUPV3_WRAP1_S5_CLK 98
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 99
+#define GCC_QUPV3_WRAP1_S6_CLK 100
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 101
+#define GCC_QUPV3_WRAP1_S7_CLK 102
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 103
+#define GCC_QUPV3_WRAP2_CORE_2X_CLK 104
+#define GCC_QUPV3_WRAP2_CORE_CLK 105
+#define GCC_QUPV3_WRAP2_IBI_CTRL_0_CLK_SRC 106
+#define GCC_QUPV3_WRAP2_IBI_CTRL_2_CLK 107
+#define GCC_QUPV3_WRAP2_IBI_CTRL_3_CLK 108
+#define GCC_QUPV3_WRAP2_S0_CLK 109
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 110
+#define GCC_QUPV3_WRAP2_S1_CLK 111
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 112
+#define GCC_QUPV3_WRAP2_S2_CLK 113
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 114
+#define GCC_QUPV3_WRAP2_S3_CLK 115
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 116
+#define GCC_QUPV3_WRAP2_S4_CLK 117
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 118
+#define GCC_QUPV3_WRAP2_S5_CLK 119
+#define GCC_QUPV3_WRAP2_S5_CLK_SRC 120
+#define GCC_QUPV3_WRAP2_S6_CLK 121
+#define GCC_QUPV3_WRAP2_S6_CLK_SRC 122
+#define GCC_QUPV3_WRAP2_S7_CLK 123
+#define GCC_QUPV3_WRAP2_S7_CLK_SRC 124
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 125
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 126
+#define GCC_QUPV3_WRAP_2_IBI_2_AHB_CLK 127
+#define GCC_QUPV3_WRAP_2_IBI_3_AHB_CLK 128
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 129
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 130
+#define GCC_SDCC2_AHB_CLK 131
+#define GCC_SDCC2_APPS_CLK 132
+#define GCC_SDCC2_APPS_CLK_SRC 133
+#define GCC_SDCC4_AHB_CLK 134
+#define GCC_SDCC4_APPS_CLK 135
+#define GCC_SDCC4_APPS_CLK_SRC 136
+#define GCC_UFS_PHY_AHB_CLK 137
+#define GCC_UFS_PHY_AXI_CLK 138
+#define GCC_UFS_PHY_AXI_CLK_SRC 139
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 140
+#define GCC_UFS_PHY_ICE_CORE_CLK 141
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 142
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 143
+#define GCC_UFS_PHY_PHY_AUX_CLK 144
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 145
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 146
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 147
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 148
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 149
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 150
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 151
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 152
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 153
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 154
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 155
+#define GCC_USB30_PRIM_MASTER_CLK 156
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 157
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 158
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 159
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 160
+#define GCC_USB30_PRIM_SLEEP_CLK 161
+#define GCC_USB3_PRIM_PHY_AUX_CLK 162
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 163
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 164
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 165
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 166
+#define GCC_VIDEO_AHB_CLK 167
+#define GCC_VIDEO_AXI0_CLK 168
+#define GCC_VIDEO_AXI1_CLK 169
+#define GCC_VIDEO_XO_CLK 170
+
+/* GCC power domains */
+#define GCC_PCIE_0_GDSC 0
+#define GCC_PCIE_0_PHY_GDSC 1
+#define GCC_UFS_MEM_PHY_GDSC 2
+#define GCC_UFS_PHY_GDSC 3
+#define GCC_USB30_PRIM_GDSC 4
+#define GCC_USB3_PHY_GDSC 5
+
+/* GCC resets */
+#define GCC_CAMERA_BCR 0
+#define GCC_DISPLAY_BCR 1
+#define GCC_EVA_BCR 2
+#define GCC_GPU_BCR 3
+#define GCC_PCIE_0_BCR 4
+#define GCC_PCIE_0_LINK_DOWN_BCR 5
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 6
+#define GCC_PCIE_0_PHY_BCR 7
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 8
+#define GCC_PCIE_PHY_BCR 9
+#define GCC_PCIE_PHY_CFG_AHB_BCR 10
+#define GCC_PCIE_PHY_COM_BCR 11
+#define GCC_PCIE_RSCC_BCR 12
+#define GCC_PDM_BCR 13
+#define GCC_QUPV3_WRAPPER_1_BCR 14
+#define GCC_QUPV3_WRAPPER_2_BCR 15
+#define GCC_QUPV3_WRAPPER_I2C_BCR 16
+#define GCC_QUSB2PHY_PRIM_BCR 17
+#define GCC_QUSB2PHY_SEC_BCR 18
+#define GCC_SDCC2_BCR 19
+#define GCC_SDCC4_BCR 20
+#define GCC_UFS_PHY_BCR 21
+#define GCC_USB30_PRIM_BCR 22
+#define GCC_USB3_DP_PHY_PRIM_BCR 23
+#define GCC_USB3_DP_PHY_SEC_BCR 24
+#define GCC_USB3_PHY_PRIM_BCR 25
+#define GCC_USB3_PHY_SEC_BCR 26
+#define GCC_USB3PHY_PHY_PRIM_BCR 27
+#define GCC_USB3PHY_PHY_SEC_BCR 28
+#define GCC_VIDEO_AXI0_CLK_ARES 29
+#define GCC_VIDEO_AXI1_CLK_ARES 30
+#define GCC_VIDEO_BCR 31
+#define GCC_EVA_AXI0_CLK_ARES 32
+#define GCC_EVA_AXI0C_CLK_ARES 33
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8750-tcsr.h b/include/dt-bindings/clock/qcom,sm8750-tcsr.h
new file mode 100644
index 000000000000..1c502ac7c7f4
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8750-tcsr.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_TCSR_CC_SM8750_H
+#define _DT_BINDINGS_CLK_QCOM_TCSR_CC_SM8750_H
+
+/* TCSR_CC clocks */
+#define TCSR_PCIE_0_CLKREF_EN 0
+#define TCSR_UFS_CLKREF_EN 1
+#define TCSR_USB2_CLKREF_EN 2
+#define TCSR_USB3_CLKREF_EN 3
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,sm8750-videocc.h b/include/dt-bindings/clock/qcom,sm8750-videocc.h
new file mode 100644
index 000000000000..f3bfa2ba5160
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,sm8750-videocc.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8750_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SM8750_H
+
+/* VIDEO_CC clocks */
+#define VIDEO_CC_AHB_CLK 0
+#define VIDEO_CC_AHB_CLK_SRC 1
+#define VIDEO_CC_MVS0_CLK 2
+#define VIDEO_CC_MVS0_CLK_SRC 3
+#define VIDEO_CC_MVS0_DIV_CLK_SRC 4
+#define VIDEO_CC_MVS0_FREERUN_CLK 5
+#define VIDEO_CC_MVS0_SHIFT_CLK 6
+#define VIDEO_CC_MVS0C_CLK 7
+#define VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC 8
+#define VIDEO_CC_MVS0C_FREERUN_CLK 9
+#define VIDEO_CC_MVS0C_SHIFT_CLK 10
+#define VIDEO_CC_PLL0 11
+#define VIDEO_CC_SLEEP_CLK 12
+#define VIDEO_CC_SLEEP_CLK_SRC 13
+#define VIDEO_CC_XO_CLK 14
+#define VIDEO_CC_XO_CLK_SRC 15
+
+/* VIDEO_CC power domains */
+#define VIDEO_CC_MVS0_GDSC 0
+#define VIDEO_CC_MVS0C_GDSC 1
+
+/* VIDEO_CC resets */
+#define VIDEO_CC_INTERFACE_BCR 0
+#define VIDEO_CC_MVS0_BCR 1
+#define VIDEO_CC_MVS0C_CLK_ARES 2
+#define VIDEO_CC_MVS0C_BCR 3
+#define VIDEO_CC_MVS0_FREERUN_CLK_ARES 4
+#define VIDEO_CC_MVS0C_FREERUN_CLK_ARES 5
+#define VIDEO_CC_XO_CLK_ARES 6
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,videocc-sc7280.h b/include/dt-bindings/clock/qcom,videocc-sc7280.h
new file mode 100644
index 000000000000..9e00c3a5f75e
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,videocc-sc7280.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SC7280_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SC7280_H
+
+/* VIDEO_CC clocks */
+#define VIDEO_PLL0 0
+#define VIDEO_CC_IRIS_AHB_CLK 1
+#define VIDEO_CC_IRIS_CLK_SRC 2
+#define VIDEO_CC_MVS0_AXI_CLK 3
+#define VIDEO_CC_MVS0_CORE_CLK 4
+#define VIDEO_CC_MVSC_CORE_CLK 5
+#define VIDEO_CC_MVSC_CTL_AXI_CLK 6
+#define VIDEO_CC_SLEEP_CLK 7
+#define VIDEO_CC_SLEEP_CLK_SRC 8
+#define VIDEO_CC_VENUS_AHB_CLK 9
+#define VIDEO_CC_XO_CLK 10
+#define VIDEO_CC_XO_CLK_SRC 11
+
+/* VIDEO_CC power domains */
+#define MVS0_GDSC 0
+#define MVSC_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,videocc-sm8150.h b/include/dt-bindings/clock/qcom,videocc-sm8150.h
index e24ee840cfdb..c557b78dc572 100644
--- a/include/dt-bindings/clock/qcom,videocc-sm8150.h
+++ b/include/dt-bindings/clock/qcom,videocc-sm8150.h
@@ -16,6 +16,10 @@
/* VIDEO_CC Resets */
#define VIDEO_CC_MVSC_CORE_CLK_BCR 0
+#define VIDEO_CC_INTERFACE_BCR 1
+#define VIDEO_CC_MVS0_BCR 2
+#define VIDEO_CC_MVS1_BCR 3
+#define VIDEO_CC_MVSC_BCR 4
/* VIDEO_CC GDSCRs */
#define VENUS_GDSC 0
diff --git a/include/dt-bindings/clock/qcom,x1e80100-camcc.h b/include/dt-bindings/clock/qcom,x1e80100-camcc.h
new file mode 100644
index 000000000000..d72fdfb06a7c
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,x1e80100-camcc.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_X1E80100_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_X1E80100_H
+
+/* CAM_CC clocks */
+#define CAM_CC_BPS_AHB_CLK 0
+#define CAM_CC_BPS_CLK 1
+#define CAM_CC_BPS_CLK_SRC 2
+#define CAM_CC_BPS_FAST_AHB_CLK 3
+#define CAM_CC_CAMNOC_AXI_NRT_CLK 4
+#define CAM_CC_CAMNOC_AXI_RT_CLK 5
+#define CAM_CC_CAMNOC_AXI_RT_CLK_SRC 6
+#define CAM_CC_CAMNOC_DCD_XO_CLK 7
+#define CAM_CC_CAMNOC_XO_CLK 8
+#define CAM_CC_CCI_0_CLK 9
+#define CAM_CC_CCI_0_CLK_SRC 10
+#define CAM_CC_CCI_1_CLK 11
+#define CAM_CC_CCI_1_CLK_SRC 12
+#define CAM_CC_CORE_AHB_CLK 13
+#define CAM_CC_CPAS_AHB_CLK 14
+#define CAM_CC_CPAS_BPS_CLK 15
+#define CAM_CC_CPAS_FAST_AHB_CLK 16
+#define CAM_CC_CPAS_IFE_0_CLK 17
+#define CAM_CC_CPAS_IFE_1_CLK 18
+#define CAM_CC_CPAS_IFE_LITE_CLK 19
+#define CAM_CC_CPAS_IPE_NPS_CLK 20
+#define CAM_CC_CPAS_SFE_0_CLK 21
+#define CAM_CC_CPHY_RX_CLK_SRC 22
+#define CAM_CC_CSI0PHYTIMER_CLK 23
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 24
+#define CAM_CC_CSI1PHYTIMER_CLK 25
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 26
+#define CAM_CC_CSI2PHYTIMER_CLK 27
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 28
+#define CAM_CC_CSI3PHYTIMER_CLK 29
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 30
+#define CAM_CC_CSI4PHYTIMER_CLK 31
+#define CAM_CC_CSI4PHYTIMER_CLK_SRC 32
+#define CAM_CC_CSI5PHYTIMER_CLK 33
+#define CAM_CC_CSI5PHYTIMER_CLK_SRC 34
+#define CAM_CC_CSID_CLK 35
+#define CAM_CC_CSID_CLK_SRC 36
+#define CAM_CC_CSID_CSIPHY_RX_CLK 37
+#define CAM_CC_CSIPHY0_CLK 38
+#define CAM_CC_CSIPHY1_CLK 39
+#define CAM_CC_CSIPHY2_CLK 40
+#define CAM_CC_CSIPHY3_CLK 41
+#define CAM_CC_CSIPHY4_CLK 42
+#define CAM_CC_CSIPHY5_CLK 43
+#define CAM_CC_FAST_AHB_CLK_SRC 44
+#define CAM_CC_GDSC_CLK 45
+#define CAM_CC_ICP_AHB_CLK 46
+#define CAM_CC_ICP_CLK 47
+#define CAM_CC_ICP_CLK_SRC 48
+#define CAM_CC_IFE_0_CLK 49
+#define CAM_CC_IFE_0_CLK_SRC 50
+#define CAM_CC_IFE_0_DSP_CLK 51
+#define CAM_CC_IFE_0_FAST_AHB_CLK 52
+#define CAM_CC_IFE_1_CLK 53
+#define CAM_CC_IFE_1_CLK_SRC 54
+#define CAM_CC_IFE_1_DSP_CLK 55
+#define CAM_CC_IFE_1_FAST_AHB_CLK 56
+#define CAM_CC_IFE_LITE_AHB_CLK 57
+#define CAM_CC_IFE_LITE_CLK 58
+#define CAM_CC_IFE_LITE_CLK_SRC 59
+#define CAM_CC_IFE_LITE_CPHY_RX_CLK 60
+#define CAM_CC_IFE_LITE_CSID_CLK 61
+#define CAM_CC_IFE_LITE_CSID_CLK_SRC 62
+#define CAM_CC_IPE_NPS_AHB_CLK 63
+#define CAM_CC_IPE_NPS_CLK 64
+#define CAM_CC_IPE_NPS_CLK_SRC 65
+#define CAM_CC_IPE_NPS_FAST_AHB_CLK 66
+#define CAM_CC_IPE_PPS_CLK 67
+#define CAM_CC_IPE_PPS_FAST_AHB_CLK 68
+#define CAM_CC_JPEG_CLK 69
+#define CAM_CC_JPEG_CLK_SRC 70
+#define CAM_CC_MCLK0_CLK 71
+#define CAM_CC_MCLK0_CLK_SRC 72
+#define CAM_CC_MCLK1_CLK 73
+#define CAM_CC_MCLK1_CLK_SRC 74
+#define CAM_CC_MCLK2_CLK 75
+#define CAM_CC_MCLK2_CLK_SRC 76
+#define CAM_CC_MCLK3_CLK 77
+#define CAM_CC_MCLK3_CLK_SRC 78
+#define CAM_CC_MCLK4_CLK 79
+#define CAM_CC_MCLK4_CLK_SRC 80
+#define CAM_CC_MCLK5_CLK 81
+#define CAM_CC_MCLK5_CLK_SRC 82
+#define CAM_CC_MCLK6_CLK 83
+#define CAM_CC_MCLK6_CLK_SRC 84
+#define CAM_CC_MCLK7_CLK 85
+#define CAM_CC_MCLK7_CLK_SRC 86
+#define CAM_CC_PLL0 87
+#define CAM_CC_PLL0_OUT_EVEN 88
+#define CAM_CC_PLL0_OUT_ODD 89
+#define CAM_CC_PLL1 90
+#define CAM_CC_PLL1_OUT_EVEN 91
+#define CAM_CC_PLL2 92
+#define CAM_CC_PLL3 93
+#define CAM_CC_PLL3_OUT_EVEN 94
+#define CAM_CC_PLL4 95
+#define CAM_CC_PLL4_OUT_EVEN 96
+#define CAM_CC_PLL6 97
+#define CAM_CC_PLL6_OUT_EVEN 98
+#define CAM_CC_PLL8 99
+#define CAM_CC_PLL8_OUT_EVEN 100
+#define CAM_CC_SFE_0_CLK 101
+#define CAM_CC_SFE_0_CLK_SRC 102
+#define CAM_CC_SFE_0_FAST_AHB_CLK 103
+#define CAM_CC_SLEEP_CLK 104
+#define CAM_CC_SLEEP_CLK_SRC 105
+#define CAM_CC_SLOW_AHB_CLK_SRC 106
+#define CAM_CC_XO_CLK_SRC 107
+
+/* CAM_CC power domains */
+#define CAM_CC_BPS_GDSC 0
+#define CAM_CC_IFE_0_GDSC 1
+#define CAM_CC_IFE_1_GDSC 2
+#define CAM_CC_IPE_0_GDSC 3
+#define CAM_CC_SFE_0_GDSC 4
+#define CAM_CC_TITAN_TOP_GDSC 5
+
+/* CAM_CC resets */
+#define CAM_CC_BPS_BCR 0
+#define CAM_CC_ICP_BCR 1
+#define CAM_CC_IFE_0_BCR 2
+#define CAM_CC_IFE_1_BCR 3
+#define CAM_CC_IPE_0_BCR 4
+#define CAM_CC_SFE_0_BCR 5
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,x1e80100-dispcc.h b/include/dt-bindings/clock/qcom,x1e80100-dispcc.h
new file mode 100644
index 000000000000..49b3a9e5ce4a
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,x1e80100-dispcc.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_X1E80100_DISP_CC_H
+#define _DT_BINDINGS_CLK_QCOM_X1E80100_DISP_CC_H
+
+/* DISP_CC clocks */
+#define DISP_CC_MDSS_ACCU_CLK 0
+#define DISP_CC_MDSS_AHB1_CLK 1
+#define DISP_CC_MDSS_AHB_CLK 2
+#define DISP_CC_MDSS_AHB_CLK_SRC 3
+#define DISP_CC_MDSS_BYTE0_CLK 4
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 5
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 6
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 7
+#define DISP_CC_MDSS_BYTE1_CLK 8
+#define DISP_CC_MDSS_BYTE1_CLK_SRC 9
+#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 10
+#define DISP_CC_MDSS_BYTE1_INTF_CLK 11
+#define DISP_CC_MDSS_DPTX0_AUX_CLK 12
+#define DISP_CC_MDSS_DPTX0_AUX_CLK_SRC 13
+#define DISP_CC_MDSS_DPTX0_LINK_CLK 14
+#define DISP_CC_MDSS_DPTX0_LINK_CLK_SRC 15
+#define DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC 16
+#define DISP_CC_MDSS_DPTX0_LINK_INTF_CLK 17
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK 18
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC 19
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK 20
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC 21
+#define DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK 22
+#define DISP_CC_MDSS_DPTX1_AUX_CLK 23
+#define DISP_CC_MDSS_DPTX1_AUX_CLK_SRC 24
+#define DISP_CC_MDSS_DPTX1_LINK_CLK 25
+#define DISP_CC_MDSS_DPTX1_LINK_CLK_SRC 26
+#define DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC 27
+#define DISP_CC_MDSS_DPTX1_LINK_INTF_CLK 28
+#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK 29
+#define DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC 30
+#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK 31
+#define DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC 32
+#define DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK 33
+#define DISP_CC_MDSS_DPTX2_AUX_CLK 34
+#define DISP_CC_MDSS_DPTX2_AUX_CLK_SRC 35
+#define DISP_CC_MDSS_DPTX2_LINK_CLK 36
+#define DISP_CC_MDSS_DPTX2_LINK_CLK_SRC 37
+#define DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC 38
+#define DISP_CC_MDSS_DPTX2_LINK_INTF_CLK 39
+#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK 40
+#define DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC 41
+#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK 42
+#define DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC 43
+#define DISP_CC_MDSS_DPTX2_USB_ROUTER_LINK_INTF_CLK 44
+#define DISP_CC_MDSS_DPTX3_AUX_CLK 45
+#define DISP_CC_MDSS_DPTX3_AUX_CLK_SRC 46
+#define DISP_CC_MDSS_DPTX3_LINK_CLK 47
+#define DISP_CC_MDSS_DPTX3_LINK_CLK_SRC 48
+#define DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC 49
+#define DISP_CC_MDSS_DPTX3_LINK_INTF_CLK 50
+#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK 51
+#define DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC 52
+#define DISP_CC_MDSS_ESC0_CLK 53
+#define DISP_CC_MDSS_ESC0_CLK_SRC 54
+#define DISP_CC_MDSS_ESC1_CLK 55
+#define DISP_CC_MDSS_ESC1_CLK_SRC 56
+#define DISP_CC_MDSS_MDP1_CLK 57
+#define DISP_CC_MDSS_MDP_CLK 58
+#define DISP_CC_MDSS_MDP_CLK_SRC 59
+#define DISP_CC_MDSS_MDP_LUT1_CLK 60
+#define DISP_CC_MDSS_MDP_LUT_CLK 61
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 62
+#define DISP_CC_MDSS_PCLK0_CLK 63
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 64
+#define DISP_CC_MDSS_PCLK1_CLK 65
+#define DISP_CC_MDSS_PCLK1_CLK_SRC 66
+#define DISP_CC_MDSS_RSCC_AHB_CLK 67
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 68
+#define DISP_CC_MDSS_VSYNC1_CLK 69
+#define DISP_CC_MDSS_VSYNC_CLK 70
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 71
+#define DISP_CC_PLL0 72
+#define DISP_CC_PLL1 73
+#define DISP_CC_SLEEP_CLK 74
+#define DISP_CC_SLEEP_CLK_SRC 75
+#define DISP_CC_XO_CLK 76
+#define DISP_CC_XO_CLK_SRC 77
+
+/* DISP_CC resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_CORE_INT2_BCR 1
+#define DISP_CC_MDSS_RSCC_BCR 2
+#define DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK_ARES 3
+#define DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK_ARES 4
+#define DISP_CC_MDSS_DPTX2_USB_ROUTER_LINK_INTF_CLK_ARES 5
+
+/* DISP_CC GDSCR */
+#define MDSS_GDSC 0
+#define MDSS_INT2_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,x1e80100-gcc.h b/include/dt-bindings/clock/qcom,x1e80100-gcc.h
new file mode 100644
index 000000000000..62aa12425592
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,x1e80100-gcc.h
@@ -0,0 +1,548 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_X1E80100_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_X1E80100_H
+
+/* GCC clocks */
+#define GCC_AGGRE_NOC_USB_NORTH_AXI_CLK 0
+#define GCC_AGGRE_NOC_USB_SOUTH_AXI_CLK 1
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 2
+#define GCC_AGGRE_USB2_PRIM_AXI_CLK 3
+#define GCC_AGGRE_USB3_MP_AXI_CLK 4
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 5
+#define GCC_AGGRE_USB3_SEC_AXI_CLK 6
+#define GCC_AGGRE_USB3_TERT_AXI_CLK 7
+#define GCC_AGGRE_USB4_0_AXI_CLK 8
+#define GCC_AGGRE_USB4_1_AXI_CLK 9
+#define GCC_AGGRE_USB4_2_AXI_CLK 10
+#define GCC_AGGRE_USB_NOC_AXI_CLK 11
+#define GCC_AV1E_AHB_CLK 12
+#define GCC_AV1E_AXI_CLK 13
+#define GCC_AV1E_XO_CLK 14
+#define GCC_BOOT_ROM_AHB_CLK 15
+#define GCC_CAMERA_AHB_CLK 16
+#define GCC_CAMERA_HF_AXI_CLK 17
+#define GCC_CAMERA_SF_AXI_CLK 18
+#define GCC_CAMERA_XO_CLK 19
+#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 20
+#define GCC_CFG_NOC_PCIE_ANOC_NORTH_AHB_CLK 21
+#define GCC_CFG_NOC_PCIE_ANOC_SOUTH_AHB_CLK 22
+#define GCC_CFG_NOC_USB2_PRIM_AXI_CLK 23
+#define GCC_CFG_NOC_USB3_MP_AXI_CLK 24
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 25
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK 26
+#define GCC_CFG_NOC_USB3_TERT_AXI_CLK 27
+#define GCC_CFG_NOC_USB_ANOC_AHB_CLK 28
+#define GCC_CFG_NOC_USB_ANOC_NORTH_AHB_CLK 29
+#define GCC_CFG_NOC_USB_ANOC_SOUTH_AHB_CLK 30
+#define GCC_CNOC_PCIE1_TUNNEL_CLK 31
+#define GCC_CNOC_PCIE2_TUNNEL_CLK 32
+#define GCC_CNOC_PCIE_NORTH_SF_AXI_CLK 33
+#define GCC_CNOC_PCIE_SOUTH_SF_AXI_CLK 34
+#define GCC_CNOC_PCIE_TUNNEL_CLK 35
+#define GCC_DDRSS_GPU_AXI_CLK 36
+#define GCC_DISP_AHB_CLK 37
+#define GCC_DISP_HF_AXI_CLK 38
+#define GCC_DISP_XO_CLK 39
+#define GCC_GP1_CLK 40
+#define GCC_GP1_CLK_SRC 41
+#define GCC_GP2_CLK 42
+#define GCC_GP2_CLK_SRC 43
+#define GCC_GP3_CLK 44
+#define GCC_GP3_CLK_SRC 45
+#define GCC_GPLL0 46
+#define GCC_GPLL0_OUT_EVEN 47
+#define GCC_GPLL4 48
+#define GCC_GPLL7 49
+#define GCC_GPLL8 50
+#define GCC_GPLL9 51
+#define GCC_GPU_CFG_AHB_CLK 52
+#define GCC_GPU_GPLL0_CPH_CLK_SRC 53
+#define GCC_GPU_GPLL0_DIV_CPH_CLK_SRC 54
+#define GCC_GPU_MEMNOC_GFX_CLK 55
+#define GCC_GPU_SNOC_DVM_GFX_CLK 56
+#define GCC_PCIE0_PHY_RCHNG_CLK 57
+#define GCC_PCIE1_PHY_RCHNG_CLK 58
+#define GCC_PCIE2_PHY_RCHNG_CLK 59
+#define GCC_PCIE_0_AUX_CLK 60
+#define GCC_PCIE_0_AUX_CLK_SRC 61
+#define GCC_PCIE_0_CFG_AHB_CLK 62
+#define GCC_PCIE_0_MSTR_AXI_CLK 63
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 64
+#define GCC_PCIE_0_PIPE_CLK 65
+#define GCC_PCIE_0_SLV_AXI_CLK 66
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 67
+#define GCC_PCIE_1_AUX_CLK 68
+#define GCC_PCIE_1_AUX_CLK_SRC 69
+#define GCC_PCIE_1_CFG_AHB_CLK 70
+#define GCC_PCIE_1_MSTR_AXI_CLK 71
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 72
+#define GCC_PCIE_1_PIPE_CLK 73
+#define GCC_PCIE_1_SLV_AXI_CLK 74
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 75
+#define GCC_PCIE_2_AUX_CLK 76
+#define GCC_PCIE_2_AUX_CLK_SRC 77
+#define GCC_PCIE_2_CFG_AHB_CLK 78
+#define GCC_PCIE_2_MSTR_AXI_CLK 79
+#define GCC_PCIE_2_PHY_RCHNG_CLK_SRC 80
+#define GCC_PCIE_2_PIPE_CLK 81
+#define GCC_PCIE_2_SLV_AXI_CLK 82
+#define GCC_PCIE_2_SLV_Q2A_AXI_CLK 83
+#define GCC_PCIE_3_AUX_CLK 84
+#define GCC_PCIE_3_AUX_CLK_SRC 85
+#define GCC_PCIE_3_CFG_AHB_CLK 86
+#define GCC_PCIE_3_MSTR_AXI_CLK 87
+#define GCC_PCIE_3_PHY_AUX_CLK 88
+#define GCC_PCIE_3_PHY_RCHNG_CLK 89
+#define GCC_PCIE_3_PHY_RCHNG_CLK_SRC 90
+#define GCC_PCIE_3_PIPE_CLK 91
+#define GCC_PCIE_3_PIPE_DIV_CLK_SRC 92
+#define GCC_PCIE_3_PIPEDIV2_CLK 93
+#define GCC_PCIE_3_SLV_AXI_CLK 94
+#define GCC_PCIE_3_SLV_Q2A_AXI_CLK 95
+#define GCC_PCIE_4_AUX_CLK 96
+#define GCC_PCIE_4_AUX_CLK_SRC 97
+#define GCC_PCIE_4_CFG_AHB_CLK 98
+#define GCC_PCIE_4_MSTR_AXI_CLK 99
+#define GCC_PCIE_4_PHY_RCHNG_CLK 100
+#define GCC_PCIE_4_PHY_RCHNG_CLK_SRC 101
+#define GCC_PCIE_4_PIPE_CLK 102
+#define GCC_PCIE_4_PIPE_DIV_CLK_SRC 103
+#define GCC_PCIE_4_PIPEDIV2_CLK 104
+#define GCC_PCIE_4_SLV_AXI_CLK 105
+#define GCC_PCIE_4_SLV_Q2A_AXI_CLK 106
+#define GCC_PCIE_5_AUX_CLK 107
+#define GCC_PCIE_5_AUX_CLK_SRC 108
+#define GCC_PCIE_5_CFG_AHB_CLK 109
+#define GCC_PCIE_5_MSTR_AXI_CLK 110
+#define GCC_PCIE_5_PHY_RCHNG_CLK 111
+#define GCC_PCIE_5_PHY_RCHNG_CLK_SRC 112
+#define GCC_PCIE_5_PIPE_CLK 113
+#define GCC_PCIE_5_PIPE_DIV_CLK_SRC 114
+#define GCC_PCIE_5_PIPEDIV2_CLK 115
+#define GCC_PCIE_5_SLV_AXI_CLK 116
+#define GCC_PCIE_5_SLV_Q2A_AXI_CLK 117
+#define GCC_PCIE_6A_AUX_CLK 118
+#define GCC_PCIE_6A_AUX_CLK_SRC 119
+#define GCC_PCIE_6A_CFG_AHB_CLK 120
+#define GCC_PCIE_6A_MSTR_AXI_CLK 121
+#define GCC_PCIE_6A_PHY_AUX_CLK 122
+#define GCC_PCIE_6A_PHY_RCHNG_CLK 123
+#define GCC_PCIE_6A_PHY_RCHNG_CLK_SRC 124
+#define GCC_PCIE_6A_PIPE_CLK 125
+#define GCC_PCIE_6A_PIPE_DIV_CLK_SRC 126
+#define GCC_PCIE_6A_PIPEDIV2_CLK 127
+#define GCC_PCIE_6A_SLV_AXI_CLK 128
+#define GCC_PCIE_6A_SLV_Q2A_AXI_CLK 129
+#define GCC_PCIE_6B_AUX_CLK 130
+#define GCC_PCIE_6B_AUX_CLK_SRC 131
+#define GCC_PCIE_6B_CFG_AHB_CLK 132
+#define GCC_PCIE_6B_MSTR_AXI_CLK 133
+#define GCC_PCIE_6B_PHY_AUX_CLK 134
+#define GCC_PCIE_6B_PHY_RCHNG_CLK 135
+#define GCC_PCIE_6B_PHY_RCHNG_CLK_SRC 136
+#define GCC_PCIE_6B_PIPE_CLK 137
+#define GCC_PCIE_6B_PIPE_DIV_CLK_SRC 138
+#define GCC_PCIE_6B_PIPEDIV2_CLK 139
+#define GCC_PCIE_6B_SLV_AXI_CLK 140
+#define GCC_PCIE_6B_SLV_Q2A_AXI_CLK 141
+#define GCC_PCIE_RSCC_AHB_CLK 142
+#define GCC_PCIE_RSCC_XO_CLK 143
+#define GCC_PCIE_RSCC_XO_CLK_SRC 144
+#define GCC_PDM2_CLK 145
+#define GCC_PDM2_CLK_SRC 146
+#define GCC_PDM_AHB_CLK 147
+#define GCC_PDM_XO4_CLK 148
+#define GCC_QMIP_AV1E_AHB_CLK 149
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 150
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 151
+#define GCC_QMIP_DISP_AHB_CLK 152
+#define GCC_QMIP_GPU_AHB_CLK 153
+#define GCC_QMIP_VIDEO_CV_CPU_AHB_CLK 154
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 155
+#define GCC_QMIP_VIDEO_V_CPU_AHB_CLK 156
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 157
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 158
+#define GCC_QUPV3_WRAP0_CORE_CLK 159
+#define GCC_QUPV3_WRAP0_QSPI_S2_CLK 160
+#define GCC_QUPV3_WRAP0_QSPI_S3_CLK 161
+#define GCC_QUPV3_WRAP0_S0_CLK 162
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 163
+#define GCC_QUPV3_WRAP0_S1_CLK 164
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 165
+#define GCC_QUPV3_WRAP0_S2_CLK 166
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 167
+#define GCC_QUPV3_WRAP0_S2_DIV_CLK_SRC 168
+#define GCC_QUPV3_WRAP0_S3_CLK 169
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 170
+#define GCC_QUPV3_WRAP0_S3_DIV_CLK_SRC 171
+#define GCC_QUPV3_WRAP0_S4_CLK 172
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 173
+#define GCC_QUPV3_WRAP0_S5_CLK 174
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 175
+#define GCC_QUPV3_WRAP0_S6_CLK 176
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 177
+#define GCC_QUPV3_WRAP0_S7_CLK 178
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 179
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 180
+#define GCC_QUPV3_WRAP1_CORE_CLK 181
+#define GCC_QUPV3_WRAP1_QSPI_S2_CLK 182
+#define GCC_QUPV3_WRAP1_QSPI_S3_CLK 183
+#define GCC_QUPV3_WRAP1_S0_CLK 184
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 185
+#define GCC_QUPV3_WRAP1_S1_CLK 186
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 187
+#define GCC_QUPV3_WRAP1_S2_CLK 188
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 189
+#define GCC_QUPV3_WRAP1_S2_DIV_CLK_SRC 190
+#define GCC_QUPV3_WRAP1_S3_CLK 191
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 192
+#define GCC_QUPV3_WRAP1_S3_DIV_CLK_SRC 193
+#define GCC_QUPV3_WRAP1_S4_CLK 194
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 195
+#define GCC_QUPV3_WRAP1_S5_CLK 196
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 197
+#define GCC_QUPV3_WRAP1_S6_CLK 198
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 199
+#define GCC_QUPV3_WRAP1_S7_CLK 200
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC 201
+#define GCC_QUPV3_WRAP2_CORE_2X_CLK 202
+#define GCC_QUPV3_WRAP2_CORE_CLK 203
+#define GCC_QUPV3_WRAP2_QSPI_S2_CLK 204
+#define GCC_QUPV3_WRAP2_QSPI_S3_CLK 205
+#define GCC_QUPV3_WRAP2_S0_CLK 206
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 207
+#define GCC_QUPV3_WRAP2_S1_CLK 208
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 209
+#define GCC_QUPV3_WRAP2_S2_CLK 210
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 211
+#define GCC_QUPV3_WRAP2_S2_DIV_CLK_SRC 212
+#define GCC_QUPV3_WRAP2_S3_CLK 213
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 214
+#define GCC_QUPV3_WRAP2_S3_DIV_CLK_SRC 215
+#define GCC_QUPV3_WRAP2_S4_CLK 216
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 217
+#define GCC_QUPV3_WRAP2_S5_CLK 218
+#define GCC_QUPV3_WRAP2_S5_CLK_SRC 219
+#define GCC_QUPV3_WRAP2_S6_CLK 220
+#define GCC_QUPV3_WRAP2_S6_CLK_SRC 221
+#define GCC_QUPV3_WRAP2_S7_CLK 222
+#define GCC_QUPV3_WRAP2_S7_CLK_SRC 223
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 224
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 225
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 226
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 227
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 228
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 229
+#define GCC_SDCC2_AHB_CLK 230
+#define GCC_SDCC2_APPS_CLK 231
+#define GCC_SDCC2_APPS_CLK_SRC 232
+#define GCC_SDCC4_AHB_CLK 233
+#define GCC_SDCC4_APPS_CLK 234
+#define GCC_SDCC4_APPS_CLK_SRC 235
+#define GCC_SYS_NOC_USB_AXI_CLK 236
+#define GCC_UFS_PHY_AHB_CLK 237
+#define GCC_UFS_PHY_AXI_CLK 238
+#define GCC_UFS_PHY_AXI_CLK_SRC 239
+#define GCC_UFS_PHY_ICE_CORE_CLK 240
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 241
+#define GCC_UFS_PHY_PHY_AUX_CLK 242
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 243
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 244
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 245
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 246
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 247
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 248
+#define GCC_USB20_MASTER_CLK 249
+#define GCC_USB20_MASTER_CLK_SRC 250
+#define GCC_USB20_MOCK_UTMI_CLK 251
+#define GCC_USB20_MOCK_UTMI_CLK_SRC 252
+#define GCC_USB20_MOCK_UTMI_POSTDIV_CLK_SRC 253
+#define GCC_USB20_SLEEP_CLK 254
+#define GCC_USB30_MP_MASTER_CLK 255
+#define GCC_USB30_MP_MASTER_CLK_SRC 256
+#define GCC_USB30_MP_MOCK_UTMI_CLK 257
+#define GCC_USB30_MP_MOCK_UTMI_CLK_SRC 258
+#define GCC_USB30_MP_MOCK_UTMI_POSTDIV_CLK_SRC 259
+#define GCC_USB30_MP_SLEEP_CLK 260
+#define GCC_USB30_PRIM_MASTER_CLK 261
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 262
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 263
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 264
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 265
+#define GCC_USB30_PRIM_SLEEP_CLK 266
+#define GCC_USB30_SEC_MASTER_CLK 267
+#define GCC_USB30_SEC_MASTER_CLK_SRC 268
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 269
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 270
+#define GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC 271
+#define GCC_USB30_SEC_SLEEP_CLK 272
+#define GCC_USB30_TERT_MASTER_CLK 273
+#define GCC_USB30_TERT_MASTER_CLK_SRC 274
+#define GCC_USB30_TERT_MOCK_UTMI_CLK 275
+#define GCC_USB30_TERT_MOCK_UTMI_CLK_SRC 276
+#define GCC_USB30_TERT_MOCK_UTMI_POSTDIV_CLK_SRC 277
+#define GCC_USB30_TERT_SLEEP_CLK 278
+#define GCC_USB3_MP_PHY_AUX_CLK 279
+#define GCC_USB3_MP_PHY_AUX_CLK_SRC 280
+#define GCC_USB3_MP_PHY_COM_AUX_CLK 281
+#define GCC_USB3_MP_PHY_PIPE_0_CLK 282
+#define GCC_USB3_MP_PHY_PIPE_1_CLK 283
+#define GCC_USB3_PRIM_PHY_AUX_CLK 284
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 285
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 286
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 287
+#define GCC_USB3_SEC_PHY_AUX_CLK 288
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 289
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 290
+#define GCC_USB3_SEC_PHY_PIPE_CLK 291
+#define GCC_USB3_TERT_PHY_AUX_CLK 292
+#define GCC_USB3_TERT_PHY_AUX_CLK_SRC 293
+#define GCC_USB3_TERT_PHY_COM_AUX_CLK 294
+#define GCC_USB3_TERT_PHY_PIPE_CLK 295
+#define GCC_USB4_0_CFG_AHB_CLK 296
+#define GCC_USB4_0_DP0_CLK 297
+#define GCC_USB4_0_DP1_CLK 298
+#define GCC_USB4_0_MASTER_CLK 299
+#define GCC_USB4_0_MASTER_CLK_SRC 300
+#define GCC_USB4_0_PHY_P2RR2P_PIPE_CLK 301
+#define GCC_USB4_0_PHY_PCIE_PIPE_CLK 302
+#define GCC_USB4_0_PHY_PCIE_PIPE_CLK_SRC 303
+#define GCC_USB4_0_PHY_RX0_CLK 304
+#define GCC_USB4_0_PHY_RX1_CLK 305
+#define GCC_USB4_0_PHY_USB_PIPE_CLK 306
+#define GCC_USB4_0_SB_IF_CLK 307
+#define GCC_USB4_0_SB_IF_CLK_SRC 308
+#define GCC_USB4_0_SYS_CLK 309
+#define GCC_USB4_0_TMU_CLK 310
+#define GCC_USB4_0_TMU_CLK_SRC 311
+#define GCC_USB4_1_CFG_AHB_CLK 312
+#define GCC_USB4_1_DP0_CLK 313
+#define GCC_USB4_1_DP1_CLK 314
+#define GCC_USB4_1_MASTER_CLK 315
+#define GCC_USB4_1_MASTER_CLK_SRC 316
+#define GCC_USB4_1_PHY_P2RR2P_PIPE_CLK 317
+#define GCC_USB4_1_PHY_PCIE_PIPE_CLK 318
+#define GCC_USB4_1_PHY_PCIE_PIPE_CLK_SRC 319
+#define GCC_USB4_1_PHY_RX0_CLK 320
+#define GCC_USB4_1_PHY_RX1_CLK 321
+#define GCC_USB4_1_PHY_USB_PIPE_CLK 322
+#define GCC_USB4_1_SB_IF_CLK 323
+#define GCC_USB4_1_SB_IF_CLK_SRC 324
+#define GCC_USB4_1_SYS_CLK 325
+#define GCC_USB4_1_TMU_CLK 326
+#define GCC_USB4_1_TMU_CLK_SRC 327
+#define GCC_USB4_2_CFG_AHB_CLK 328
+#define GCC_USB4_2_DP0_CLK 329
+#define GCC_USB4_2_DP1_CLK 330
+#define GCC_USB4_2_MASTER_CLK 331
+#define GCC_USB4_2_MASTER_CLK_SRC 332
+#define GCC_USB4_2_PHY_P2RR2P_PIPE_CLK 333
+#define GCC_USB4_2_PHY_PCIE_PIPE_CLK 334
+#define GCC_USB4_2_PHY_PCIE_PIPE_CLK_SRC 335
+#define GCC_USB4_2_PHY_RX0_CLK 336
+#define GCC_USB4_2_PHY_RX1_CLK 337
+#define GCC_USB4_2_PHY_USB_PIPE_CLK 338
+#define GCC_USB4_2_SB_IF_CLK 339
+#define GCC_USB4_2_SB_IF_CLK_SRC 340
+#define GCC_USB4_2_SYS_CLK 341
+#define GCC_USB4_2_TMU_CLK 342
+#define GCC_USB4_2_TMU_CLK_SRC 343
+#define GCC_VIDEO_AHB_CLK 344
+#define GCC_VIDEO_AXI0_CLK 345
+#define GCC_VIDEO_AXI1_CLK 346
+#define GCC_VIDEO_XO_CLK 347
+#define GCC_PCIE_3_PIPE_CLK_SRC 348
+#define GCC_PCIE_4_PIPE_CLK_SRC 349
+#define GCC_PCIE_5_PIPE_CLK_SRC 350
+#define GCC_PCIE_6A_PIPE_CLK_SRC 351
+#define GCC_PCIE_6B_PIPE_CLK_SRC 352
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 353
+#define GCC_USB3_SEC_PHY_PIPE_CLK_SRC 354
+#define GCC_USB3_TERT_PHY_PIPE_CLK_SRC 355
+#define GCC_USB34_PRIM_PHY_PIPE_CLK_SRC 356
+#define GCC_USB34_SEC_PHY_PIPE_CLK_SRC 357
+#define GCC_USB34_TERT_PHY_PIPE_CLK_SRC 358
+#define GCC_USB4_0_PHY_DP0_CLK_SRC 359
+#define GCC_USB4_0_PHY_DP1_CLK_SRC 360
+#define GCC_USB4_0_PHY_P2RR2P_PIPE_CLK_SRC 361
+#define GCC_USB4_0_PHY_PCIE_PIPE_MUX_CLK_SRC 362
+#define GCC_USB4_0_PHY_RX0_CLK_SRC 363
+#define GCC_USB4_0_PHY_RX1_CLK_SRC 364
+#define GCC_USB4_0_PHY_SYS_CLK_SRC 365
+#define GCC_USB4_1_PHY_DP0_CLK_SRC 366
+#define GCC_USB4_1_PHY_DP1_CLK_SRC 367
+#define GCC_USB4_1_PHY_P2RR2P_PIPE_CLK_SRC 368
+#define GCC_USB4_1_PHY_PCIE_PIPE_MUX_CLK_SRC 369
+#define GCC_USB4_1_PHY_RX0_CLK_SRC 370
+#define GCC_USB4_1_PHY_RX1_CLK_SRC 371
+#define GCC_USB4_1_PHY_SYS_CLK_SRC 372
+#define GCC_USB4_2_PHY_DP0_CLK_SRC 373
+#define GCC_USB4_2_PHY_DP1_CLK_SRC 374
+#define GCC_USB4_2_PHY_P2RR2P_PIPE_CLK_SRC 375
+#define GCC_USB4_2_PHY_PCIE_PIPE_MUX_CLK_SRC 376
+#define GCC_USB4_2_PHY_RX0_CLK_SRC 377
+#define GCC_USB4_2_PHY_RX1_CLK_SRC 378
+#define GCC_USB4_2_PHY_SYS_CLK_SRC 379
+
+/* GCC power domains */
+#define GCC_PCIE_0_TUNNEL_GDSC 0
+#define GCC_PCIE_1_TUNNEL_GDSC 1
+#define GCC_PCIE_2_TUNNEL_GDSC 2
+#define GCC_PCIE_3_GDSC 3
+#define GCC_PCIE_3_PHY_GDSC 4
+#define GCC_PCIE_4_GDSC 5
+#define GCC_PCIE_4_PHY_GDSC 6
+#define GCC_PCIE_5_GDSC 7
+#define GCC_PCIE_5_PHY_GDSC 8
+#define GCC_PCIE_6_PHY_GDSC 9
+#define GCC_PCIE_6A_GDSC 10
+#define GCC_PCIE_6B_GDSC 11
+#define GCC_UFS_MEM_PHY_GDSC 12
+#define GCC_UFS_PHY_GDSC 13
+#define GCC_USB20_PRIM_GDSC 14
+#define GCC_USB30_MP_GDSC 15
+#define GCC_USB30_PRIM_GDSC 16
+#define GCC_USB30_SEC_GDSC 17
+#define GCC_USB30_TERT_GDSC 18
+#define GCC_USB3_MP_SS0_PHY_GDSC 19
+#define GCC_USB3_MP_SS1_PHY_GDSC 20
+#define GCC_USB4_0_GDSC 21
+#define GCC_USB4_1_GDSC 22
+#define GCC_USB4_2_GDSC 23
+#define GCC_USB_0_PHY_GDSC 24
+#define GCC_USB_1_PHY_GDSC 25
+#define GCC_USB_2_PHY_GDSC 26
+
+/* GCC resets */
+#define GCC_AV1E_BCR 0
+#define GCC_CAMERA_BCR 1
+#define GCC_DISPLAY_BCR 2
+#define GCC_GPU_BCR 3
+#define GCC_PCIE_0_LINK_DOWN_BCR 4
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 5
+#define GCC_PCIE_0_PHY_BCR 6
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 7
+#define GCC_PCIE_0_TUNNEL_BCR 8
+#define GCC_PCIE_1_LINK_DOWN_BCR 9
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 10
+#define GCC_PCIE_1_PHY_BCR 11
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 12
+#define GCC_PCIE_1_TUNNEL_BCR 13
+#define GCC_PCIE_2_LINK_DOWN_BCR 14
+#define GCC_PCIE_2_NOCSR_COM_PHY_BCR 15
+#define GCC_PCIE_2_PHY_BCR 16
+#define GCC_PCIE_2_PHY_NOCSR_COM_PHY_BCR 17
+#define GCC_PCIE_2_TUNNEL_BCR 18
+#define GCC_PCIE_3_BCR 19
+#define GCC_PCIE_3_LINK_DOWN_BCR 20
+#define GCC_PCIE_3_NOCSR_COM_PHY_BCR 21
+#define GCC_PCIE_3_PHY_BCR 22
+#define GCC_PCIE_3_PHY_NOCSR_COM_PHY_BCR 23
+#define GCC_PCIE_4_BCR 24
+#define GCC_PCIE_4_LINK_DOWN_BCR 25
+#define GCC_PCIE_4_NOCSR_COM_PHY_BCR 26
+#define GCC_PCIE_4_PHY_BCR 27
+#define GCC_PCIE_4_PHY_NOCSR_COM_PHY_BCR 28
+#define GCC_PCIE_5_BCR 29
+#define GCC_PCIE_5_LINK_DOWN_BCR 30
+#define GCC_PCIE_5_NOCSR_COM_PHY_BCR 31
+#define GCC_PCIE_5_PHY_BCR 32
+#define GCC_PCIE_5_PHY_NOCSR_COM_PHY_BCR 33
+#define GCC_PCIE_6A_BCR 34
+#define GCC_PCIE_6A_LINK_DOWN_BCR 35
+#define GCC_PCIE_6A_NOCSR_COM_PHY_BCR 36
+#define GCC_PCIE_6A_PHY_BCR 37
+#define GCC_PCIE_6A_PHY_NOCSR_COM_PHY_BCR 38
+#define GCC_PCIE_6B_BCR 39
+#define GCC_PCIE_6B_LINK_DOWN_BCR 40
+#define GCC_PCIE_6B_NOCSR_COM_PHY_BCR 41
+#define GCC_PCIE_6B_PHY_BCR 42
+#define GCC_PCIE_6B_PHY_NOCSR_COM_PHY_BCR 43
+#define GCC_PCIE_PHY_BCR 44
+#define GCC_PCIE_PHY_CFG_AHB_BCR 45
+#define GCC_PCIE_PHY_COM_BCR 46
+#define GCC_PCIE_RSCC_BCR 47
+#define GCC_PDM_BCR 48
+#define GCC_QUPV3_WRAPPER_0_BCR 49
+#define GCC_QUPV3_WRAPPER_1_BCR 50
+#define GCC_QUPV3_WRAPPER_2_BCR 51
+#define GCC_QUSB2PHY_HS0_MP_BCR 52
+#define GCC_QUSB2PHY_HS1_MP_BCR 53
+#define GCC_QUSB2PHY_PRIM_BCR 54
+#define GCC_QUSB2PHY_SEC_BCR 55
+#define GCC_QUSB2PHY_TERT_BCR 56
+#define GCC_QUSB2PHY_USB20_HS_BCR 57
+#define GCC_SDCC2_BCR 58
+#define GCC_SDCC4_BCR 59
+#define GCC_UFS_PHY_BCR 60
+#define GCC_USB20_PRIM_BCR 61
+#define GCC_USB30_MP_BCR 62
+#define GCC_USB30_PRIM_BCR 63
+#define GCC_USB30_SEC_BCR 64
+#define GCC_USB30_TERT_BCR 65
+#define GCC_USB3_MP_SS0_PHY_BCR 66
+#define GCC_USB3_MP_SS1_PHY_BCR 67
+#define GCC_USB3_PHY_PRIM_BCR 68
+#define GCC_USB3_PHY_SEC_BCR 69
+#define GCC_USB3_PHY_TERT_BCR 70
+#define GCC_USB3_UNIPHY_MP0_BCR 71
+#define GCC_USB3_UNIPHY_MP1_BCR 72
+#define GCC_USB3PHY_PHY_PRIM_BCR 73
+#define GCC_USB3PHY_PHY_SEC_BCR 74
+#define GCC_USB3PHY_PHY_TERT_BCR 75
+#define GCC_USB3UNIPHY_PHY_MP0_BCR 76
+#define GCC_USB3UNIPHY_PHY_MP1_BCR 77
+#define GCC_USB4_0_BCR 78
+#define GCC_USB4_0_DP0_PHY_PRIM_BCR 79
+#define GCC_USB4_1_DP0_PHY_SEC_BCR 80
+#define GCC_USB4_2_DP0_PHY_TERT_BCR 81
+#define GCC_USB4_1_BCR 82
+#define GCC_USB4_2_BCR 83
+#define GCC_USB_0_PHY_BCR 84
+#define GCC_USB_1_PHY_BCR 85
+#define GCC_USB_2_PHY_BCR 86
+#define GCC_VIDEO_BCR 87
+#define GCC_VIDEO_AXI0_CLK_ARES 88
+#define GCC_VIDEO_AXI1_CLK_ARES 89
+#define GCC_USB4_0_MISC_USB4_SYS_BCR 90
+#define GCC_USB4_0_MISC_RX_CLK_0_BCR 91
+#define GCC_USB4_0_MISC_RX_CLK_1_BCR 92
+#define GCC_USB4_0_MISC_USB_PIPE_BCR 93
+#define GCC_USB4_0_MISC_PCIE_PIPE_BCR 94
+#define GCC_USB4_0_MISC_TMU_BCR 95
+#define GCC_USB4_0_MISC_SB_IF_BCR 96
+#define GCC_USB4_0_MISC_HIA_MSTR_BCR 97
+#define GCC_USB4_0_MISC_AHB_BCR 98
+#define GCC_USB4_0_MISC_DP0_MAX_PCLK_BCR 99
+#define GCC_USB4_0_MISC_DP1_MAX_PCLK_BCR 100
+#define GCC_USB4_1_MISC_USB4_SYS_BCR 101
+#define GCC_USB4_1_MISC_RX_CLK_0_BCR 102
+#define GCC_USB4_1_MISC_RX_CLK_1_BCR 103
+#define GCC_USB4_1_MISC_USB_PIPE_BCR 104
+#define GCC_USB4_1_MISC_PCIE_PIPE_BCR 105
+#define GCC_USB4_1_MISC_TMU_BCR 106
+#define GCC_USB4_1_MISC_SB_IF_BCR 107
+#define GCC_USB4_1_MISC_HIA_MSTR_BCR 108
+#define GCC_USB4_1_MISC_AHB_BCR 109
+#define GCC_USB4_1_MISC_DP0_MAX_PCLK_BCR 110
+#define GCC_USB4_1_MISC_DP1_MAX_PCLK_BCR 111
+#define GCC_USB4_2_MISC_USB4_SYS_BCR 112
+#define GCC_USB4_2_MISC_RX_CLK_0_BCR 113
+#define GCC_USB4_2_MISC_RX_CLK_1_BCR 114
+#define GCC_USB4_2_MISC_USB_PIPE_BCR 115
+#define GCC_USB4_2_MISC_PCIE_PIPE_BCR 116
+#define GCC_USB4_2_MISC_TMU_BCR 117
+#define GCC_USB4_2_MISC_SB_IF_BCR 118
+#define GCC_USB4_2_MISC_HIA_MSTR_BCR 119
+#define GCC_USB4_2_MISC_AHB_BCR 120
+#define GCC_USB4_2_MISC_DP0_MAX_PCLK_BCR 121
+#define GCC_USB4_2_MISC_DP1_MAX_PCLK_BCR 122
+#define GCC_USB4PHY_PHY_PRIM_BCR 123
+#define GCC_USB4PHY_PHY_SEC_BCR 124
+#define GCC_USB4PHY_PHY_TERT_BCR 125
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,x1e80100-gpucc.h b/include/dt-bindings/clock/qcom,x1e80100-gpucc.h
new file mode 100644
index 000000000000..27b8f50541fd
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,x1e80100-gpucc.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_X1E80100_GPU_CC_H
+#define _DT_BINDINGS_CLK_QCOM_X1E80100_GPU_CC_H
+
+/* GPU_CC clocks */
+#define GPU_CC_AHB_CLK 0
+#define GPU_CC_CB_CLK 1
+#define GPU_CC_CRC_AHB_CLK 2
+#define GPU_CC_CX_FF_CLK 3
+#define GPU_CC_CX_GMU_CLK 4
+#define GPU_CC_CXO_AON_CLK 5
+#define GPU_CC_CXO_CLK 6
+#define GPU_CC_DEMET_CLK 7
+#define GPU_CC_DEMET_DIV_CLK_SRC 8
+#define GPU_CC_FF_CLK_SRC 9
+#define GPU_CC_FREQ_MEASURE_CLK 10
+#define GPU_CC_GMU_CLK_SRC 11
+#define GPU_CC_GX_GMU_CLK 12
+#define GPU_CC_GX_VSENSE_CLK 13
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 14
+#define GPU_CC_HUB_AON_CLK 15
+#define GPU_CC_HUB_CLK_SRC 16
+#define GPU_CC_HUB_CX_INT_CLK 17
+#define GPU_CC_MEMNOC_GFX_CLK 18
+#define GPU_CC_MND1X_0_GFX3D_CLK 19
+#define GPU_CC_MND1X_1_GFX3D_CLK 20
+#define GPU_CC_PLL0 21
+#define GPU_CC_PLL1 22
+#define GPU_CC_SLEEP_CLK 23
+#define GPU_CC_XO_CLK_SRC 24
+#define GPU_CC_XO_DIV_CLK_SRC 25
+#define GPU_CC_CX_ACCU_SHIFT_CLK 26
+#define GPU_CC_GX_ACCU_SHIFT_CLK 27
+
+/* GDSCs */
+#define GPU_CX_GDSC 0
+#define GPU_GX_GDSC 1
+
+/* GPU_CC resets */
+#define GPU_CC_ACD_BCR 0
+#define GPU_CC_CB_BCR 1
+#define GPU_CC_CX_BCR 2
+#define GPU_CC_FAST_HUB_BCR 3
+#define GPU_CC_FF_BCR 4
+#define GPU_CC_GFX3D_AON_BCR 5
+#define GPU_CC_GMU_BCR 6
+#define GPU_CC_GX_BCR 7
+#define GPU_CC_XO_BCR 8
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,x1e80100-tcsr.h b/include/dt-bindings/clock/qcom,x1e80100-tcsr.h
new file mode 100644
index 000000000000..bae2c4654ee2
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,x1e80100-tcsr.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_X1E80100_TCSR_CC_H
+#define _DT_BINDINGS_CLK_QCOM_X1E80100_TCSR_CC_H
+
+/* TCSR CC clocks */
+#define TCSR_PCIE_2L_4_CLKREF_EN 0
+#define TCSR_PCIE_2L_5_CLKREF_EN 1
+#define TCSR_PCIE_8L_CLKREF_EN 2
+#define TCSR_USB3_MP0_CLKREF_EN 3
+#define TCSR_USB3_MP1_CLKREF_EN 4
+#define TCSR_USB2_1_CLKREF_EN 5
+#define TCSR_UFS_PHY_CLKREF_EN 6
+#define TCSR_USB4_1_CLKREF_EN 7
+#define TCSR_USB4_2_CLKREF_EN 8
+#define TCSR_USB2_2_CLKREF_EN 9
+#define TCSR_PCIE_4L_CLKREF_EN 10
+#define TCSR_EDP_CLKREF_EN 11
+
+#endif
diff --git a/include/dt-bindings/clock/r8a73a4-clock.h b/include/dt-bindings/clock/r8a73a4-clock.h
index 1ec4827b8091..655440a3e7c6 100644
--- a/include/dt-bindings/clock/r8a73a4-clock.h
+++ b/include/dt-bindings/clock/r8a73a4-clock.h
@@ -24,6 +24,10 @@
#define R8A73A4_CLK_ZS 14
#define R8A73A4_CLK_HP 15
+/* MSTP1 */
+#define R8A73A4_CLK_TMU0 25
+#define R8A73A4_CLK_TMU3 21
+
/* MSTP2 */
#define R8A73A4_CLK_DMAC 18
#define R8A73A4_CLK_SCIFB3 17
diff --git a/include/dt-bindings/clock/r8a7779-clock.h b/include/dt-bindings/clock/r8a7779-clock.h
index f0549234b7d8..e39acdc6499c 100644
--- a/include/dt-bindings/clock/r8a7779-clock.h
+++ b/include/dt-bindings/clock/r8a7779-clock.h
@@ -19,6 +19,7 @@
#define R8A7779_CLK_OUT 7
/* MSTP 0 */
+#define R8A7779_CLK_PWM 5
#define R8A7779_CLK_HSPI 7
#define R8A7779_CLK_TMU2 14
#define R8A7779_CLK_TMU1 15
@@ -56,5 +57,4 @@
#define R8A7779_CLK_MMC1 30
#define R8A7779_CLK_MMC0 31
-
#endif /* __DT_BINDINGS_CLOCK_R8A7779_H__ */
diff --git a/include/dt-bindings/clock/r8a7790-clock.h b/include/dt-bindings/clock/r8a7790-clock.h
deleted file mode 100644
index c92ff1e60223..000000000000
--- a/include/dt-bindings/clock/r8a7790-clock.h
+++ /dev/null
@@ -1,158 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright 2013 Ideas On Board SPRL
- */
-
-#ifndef __DT_BINDINGS_CLOCK_R8A7790_H__
-#define __DT_BINDINGS_CLOCK_R8A7790_H__
-
-/* CPG */
-#define R8A7790_CLK_MAIN 0
-#define R8A7790_CLK_PLL0 1
-#define R8A7790_CLK_PLL1 2
-#define R8A7790_CLK_PLL3 3
-#define R8A7790_CLK_LB 4
-#define R8A7790_CLK_QSPI 5
-#define R8A7790_CLK_SDH 6
-#define R8A7790_CLK_SD0 7
-#define R8A7790_CLK_SD1 8
-#define R8A7790_CLK_Z 9
-#define R8A7790_CLK_RCAN 10
-#define R8A7790_CLK_ADSP 11
-
-/* MSTP0 */
-#define R8A7790_CLK_MSIOF0 0
-
-/* MSTP1 */
-#define R8A7790_CLK_VCP1 0
-#define R8A7790_CLK_VCP0 1
-#define R8A7790_CLK_VPC1 2
-#define R8A7790_CLK_VPC0 3
-#define R8A7790_CLK_JPU 6
-#define R8A7790_CLK_SSP1 9
-#define R8A7790_CLK_TMU1 11
-#define R8A7790_CLK_3DG 12
-#define R8A7790_CLK_2DDMAC 15
-#define R8A7790_CLK_FDP1_2 17
-#define R8A7790_CLK_FDP1_1 18
-#define R8A7790_CLK_FDP1_0 19
-#define R8A7790_CLK_TMU3 21
-#define R8A7790_CLK_TMU2 22
-#define R8A7790_CLK_CMT0 24
-#define R8A7790_CLK_TMU0 25
-#define R8A7790_CLK_VSP1_DU1 27
-#define R8A7790_CLK_VSP1_DU0 28
-#define R8A7790_CLK_VSP1_R 30
-#define R8A7790_CLK_VSP1_S 31
-
-/* MSTP2 */
-#define R8A7790_CLK_SCIFA2 2
-#define R8A7790_CLK_SCIFA1 3
-#define R8A7790_CLK_SCIFA0 4
-#define R8A7790_CLK_MSIOF2 5
-#define R8A7790_CLK_SCIFB0 6
-#define R8A7790_CLK_SCIFB1 7
-#define R8A7790_CLK_MSIOF1 8
-#define R8A7790_CLK_MSIOF3 15
-#define R8A7790_CLK_SCIFB2 16
-#define R8A7790_CLK_SYS_DMAC1 18
-#define R8A7790_CLK_SYS_DMAC0 19
-
-/* MSTP3 */
-#define R8A7790_CLK_IIC2 0
-#define R8A7790_CLK_TPU0 4
-#define R8A7790_CLK_MMCIF1 5
-#define R8A7790_CLK_SCIF2 10
-#define R8A7790_CLK_SDHI3 11
-#define R8A7790_CLK_SDHI2 12
-#define R8A7790_CLK_SDHI1 13
-#define R8A7790_CLK_SDHI0 14
-#define R8A7790_CLK_MMCIF0 15
-#define R8A7790_CLK_IIC0 18
-#define R8A7790_CLK_PCIEC 19
-#define R8A7790_CLK_IIC1 23
-#define R8A7790_CLK_SSUSB 28
-#define R8A7790_CLK_CMT1 29
-#define R8A7790_CLK_USBDMAC0 30
-#define R8A7790_CLK_USBDMAC1 31
-
-/* MSTP4 */
-#define R8A7790_CLK_IRQC 7
-#define R8A7790_CLK_INTC_SYS 8
-
-/* MSTP5 */
-#define R8A7790_CLK_AUDIO_DMAC1 1
-#define R8A7790_CLK_AUDIO_DMAC0 2
-#define R8A7790_CLK_ADSP_MOD 6
-#define R8A7790_CLK_THERMAL 22
-#define R8A7790_CLK_PWM 23
-
-/* MSTP7 */
-#define R8A7790_CLK_EHCI 3
-#define R8A7790_CLK_HSUSB 4
-#define R8A7790_CLK_HSCIF1 16
-#define R8A7790_CLK_HSCIF0 17
-#define R8A7790_CLK_SCIF1 20
-#define R8A7790_CLK_SCIF0 21
-#define R8A7790_CLK_DU2 22
-#define R8A7790_CLK_DU1 23
-#define R8A7790_CLK_DU0 24
-#define R8A7790_CLK_LVDS1 25
-#define R8A7790_CLK_LVDS0 26
-
-/* MSTP8 */
-#define R8A7790_CLK_MLB 2
-#define R8A7790_CLK_VIN3 8
-#define R8A7790_CLK_VIN2 9
-#define R8A7790_CLK_VIN1 10
-#define R8A7790_CLK_VIN0 11
-#define R8A7790_CLK_ETHERAVB 12
-#define R8A7790_CLK_ETHER 13
-#define R8A7790_CLK_SATA1 14
-#define R8A7790_CLK_SATA0 15
-
-/* MSTP9 */
-#define R8A7790_CLK_GPIO5 7
-#define R8A7790_CLK_GPIO4 8
-#define R8A7790_CLK_GPIO3 9
-#define R8A7790_CLK_GPIO2 10
-#define R8A7790_CLK_GPIO1 11
-#define R8A7790_CLK_GPIO0 12
-#define R8A7790_CLK_RCAN1 15
-#define R8A7790_CLK_RCAN0 16
-#define R8A7790_CLK_QSPI_MOD 17
-#define R8A7790_CLK_IICDVFS 26
-#define R8A7790_CLK_I2C3 28
-#define R8A7790_CLK_I2C2 29
-#define R8A7790_CLK_I2C1 30
-#define R8A7790_CLK_I2C0 31
-
-/* MSTP10 */
-#define R8A7790_CLK_SSI_ALL 5
-#define R8A7790_CLK_SSI9 6
-#define R8A7790_CLK_SSI8 7
-#define R8A7790_CLK_SSI7 8
-#define R8A7790_CLK_SSI6 9
-#define R8A7790_CLK_SSI5 10
-#define R8A7790_CLK_SSI4 11
-#define R8A7790_CLK_SSI3 12
-#define R8A7790_CLK_SSI2 13
-#define R8A7790_CLK_SSI1 14
-#define R8A7790_CLK_SSI0 15
-#define R8A7790_CLK_SCU_ALL 17
-#define R8A7790_CLK_SCU_DVC1 18
-#define R8A7790_CLK_SCU_DVC0 19
-#define R8A7790_CLK_SCU_CTU1_MIX1 20
-#define R8A7790_CLK_SCU_CTU0_MIX0 21
-#define R8A7790_CLK_SCU_SRC9 22
-#define R8A7790_CLK_SCU_SRC8 23
-#define R8A7790_CLK_SCU_SRC7 24
-#define R8A7790_CLK_SCU_SRC6 25
-#define R8A7790_CLK_SCU_SRC5 26
-#define R8A7790_CLK_SCU_SRC4 27
-#define R8A7790_CLK_SCU_SRC3 28
-#define R8A7790_CLK_SCU_SRC2 29
-#define R8A7790_CLK_SCU_SRC1 30
-#define R8A7790_CLK_SCU_SRC0 31
-
-#endif /* __DT_BINDINGS_CLOCK_R8A7790_H__ */
diff --git a/include/dt-bindings/clock/r8a7791-clock.h b/include/dt-bindings/clock/r8a7791-clock.h
deleted file mode 100644
index bb4f18b1b3d5..000000000000
--- a/include/dt-bindings/clock/r8a7791-clock.h
+++ /dev/null
@@ -1,161 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright 2013 Ideas On Board SPRL
- */
-
-#ifndef __DT_BINDINGS_CLOCK_R8A7791_H__
-#define __DT_BINDINGS_CLOCK_R8A7791_H__
-
-/* CPG */
-#define R8A7791_CLK_MAIN 0
-#define R8A7791_CLK_PLL0 1
-#define R8A7791_CLK_PLL1 2
-#define R8A7791_CLK_PLL3 3
-#define R8A7791_CLK_LB 4
-#define R8A7791_CLK_QSPI 5
-#define R8A7791_CLK_SDH 6
-#define R8A7791_CLK_SD0 7
-#define R8A7791_CLK_Z 8
-#define R8A7791_CLK_RCAN 9
-#define R8A7791_CLK_ADSP 10
-
-/* MSTP0 */
-#define R8A7791_CLK_MSIOF0 0
-
-/* MSTP1 */
-#define R8A7791_CLK_VCP0 1
-#define R8A7791_CLK_VPC0 3
-#define R8A7791_CLK_JPU 6
-#define R8A7791_CLK_SSP1 9
-#define R8A7791_CLK_TMU1 11
-#define R8A7791_CLK_3DG 12
-#define R8A7791_CLK_2DDMAC 15
-#define R8A7791_CLK_FDP1_1 18
-#define R8A7791_CLK_FDP1_0 19
-#define R8A7791_CLK_TMU3 21
-#define R8A7791_CLK_TMU2 22
-#define R8A7791_CLK_CMT0 24
-#define R8A7791_CLK_TMU0 25
-#define R8A7791_CLK_VSP1_DU1 27
-#define R8A7791_CLK_VSP1_DU0 28
-#define R8A7791_CLK_VSP1_S 31
-
-/* MSTP2 */
-#define R8A7791_CLK_SCIFA2 2
-#define R8A7791_CLK_SCIFA1 3
-#define R8A7791_CLK_SCIFA0 4
-#define R8A7791_CLK_MSIOF2 5
-#define R8A7791_CLK_SCIFB0 6
-#define R8A7791_CLK_SCIFB1 7
-#define R8A7791_CLK_MSIOF1 8
-#define R8A7791_CLK_SCIFB2 16
-#define R8A7791_CLK_SYS_DMAC1 18
-#define R8A7791_CLK_SYS_DMAC0 19
-
-/* MSTP3 */
-#define R8A7791_CLK_TPU0 4
-#define R8A7791_CLK_SDHI2 11
-#define R8A7791_CLK_SDHI1 12
-#define R8A7791_CLK_SDHI0 14
-#define R8A7791_CLK_MMCIF0 15
-#define R8A7791_CLK_IIC0 18
-#define R8A7791_CLK_PCIEC 19
-#define R8A7791_CLK_IIC1 23
-#define R8A7791_CLK_SSUSB 28
-#define R8A7791_CLK_CMT1 29
-#define R8A7791_CLK_USBDMAC0 30
-#define R8A7791_CLK_USBDMAC1 31
-
-/* MSTP4 */
-#define R8A7791_CLK_IRQC 7
-#define R8A7791_CLK_INTC_SYS 8
-
-/* MSTP5 */
-#define R8A7791_CLK_AUDIO_DMAC1 1
-#define R8A7791_CLK_AUDIO_DMAC0 2
-#define R8A7791_CLK_ADSP_MOD 6
-#define R8A7791_CLK_THERMAL 22
-#define R8A7791_CLK_PWM 23
-
-/* MSTP7 */
-#define R8A7791_CLK_EHCI 3
-#define R8A7791_CLK_HSUSB 4
-#define R8A7791_CLK_HSCIF2 13
-#define R8A7791_CLK_SCIF5 14
-#define R8A7791_CLK_SCIF4 15
-#define R8A7791_CLK_HSCIF1 16
-#define R8A7791_CLK_HSCIF0 17
-#define R8A7791_CLK_SCIF3 18
-#define R8A7791_CLK_SCIF2 19
-#define R8A7791_CLK_SCIF1 20
-#define R8A7791_CLK_SCIF0 21
-#define R8A7791_CLK_DU1 23
-#define R8A7791_CLK_DU0 24
-#define R8A7791_CLK_LVDS0 26
-
-/* MSTP8 */
-#define R8A7791_CLK_IPMMU_SGX 0
-#define R8A7791_CLK_MLB 2
-#define R8A7791_CLK_VIN2 9
-#define R8A7791_CLK_VIN1 10
-#define R8A7791_CLK_VIN0 11
-#define R8A7791_CLK_ETHERAVB 12
-#define R8A7791_CLK_ETHER 13
-#define R8A7791_CLK_SATA1 14
-#define R8A7791_CLK_SATA0 15
-
-/* MSTP9 */
-#define R8A7791_CLK_GYROADC 1
-#define R8A7791_CLK_GPIO7 4
-#define R8A7791_CLK_GPIO6 5
-#define R8A7791_CLK_GPIO5 7
-#define R8A7791_CLK_GPIO4 8
-#define R8A7791_CLK_GPIO3 9
-#define R8A7791_CLK_GPIO2 10
-#define R8A7791_CLK_GPIO1 11
-#define R8A7791_CLK_GPIO0 12
-#define R8A7791_CLK_RCAN1 15
-#define R8A7791_CLK_RCAN0 16
-#define R8A7791_CLK_QSPI_MOD 17
-#define R8A7791_CLK_I2C5 25
-#define R8A7791_CLK_IICDVFS 26
-#define R8A7791_CLK_I2C4 27
-#define R8A7791_CLK_I2C3 28
-#define R8A7791_CLK_I2C2 29
-#define R8A7791_CLK_I2C1 30
-#define R8A7791_CLK_I2C0 31
-
-/* MSTP10 */
-#define R8A7791_CLK_SSI_ALL 5
-#define R8A7791_CLK_SSI9 6
-#define R8A7791_CLK_SSI8 7
-#define R8A7791_CLK_SSI7 8
-#define R8A7791_CLK_SSI6 9
-#define R8A7791_CLK_SSI5 10
-#define R8A7791_CLK_SSI4 11
-#define R8A7791_CLK_SSI3 12
-#define R8A7791_CLK_SSI2 13
-#define R8A7791_CLK_SSI1 14
-#define R8A7791_CLK_SSI0 15
-#define R8A7791_CLK_SCU_ALL 17
-#define R8A7791_CLK_SCU_DVC1 18
-#define R8A7791_CLK_SCU_DVC0 19
-#define R8A7791_CLK_SCU_CTU1_MIX1 20
-#define R8A7791_CLK_SCU_CTU0_MIX0 21
-#define R8A7791_CLK_SCU_SRC9 22
-#define R8A7791_CLK_SCU_SRC8 23
-#define R8A7791_CLK_SCU_SRC7 24
-#define R8A7791_CLK_SCU_SRC6 25
-#define R8A7791_CLK_SCU_SRC5 26
-#define R8A7791_CLK_SCU_SRC4 27
-#define R8A7791_CLK_SCU_SRC3 28
-#define R8A7791_CLK_SCU_SRC2 29
-#define R8A7791_CLK_SCU_SRC1 30
-#define R8A7791_CLK_SCU_SRC0 31
-
-/* MSTP11 */
-#define R8A7791_CLK_SCIFA3 6
-#define R8A7791_CLK_SCIFA4 7
-#define R8A7791_CLK_SCIFA5 8
-
-#endif /* __DT_BINDINGS_CLOCK_R8A7791_H__ */
diff --git a/include/dt-bindings/clock/r8a7792-clock.h b/include/dt-bindings/clock/r8a7792-clock.h
deleted file mode 100644
index 2948d9ce3a14..000000000000
--- a/include/dt-bindings/clock/r8a7792-clock.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2016 Cogent Embedded, Inc.
- */
-
-#ifndef __DT_BINDINGS_CLOCK_R8A7792_H__
-#define __DT_BINDINGS_CLOCK_R8A7792_H__
-
-/* CPG */
-#define R8A7792_CLK_MAIN 0
-#define R8A7792_CLK_PLL0 1
-#define R8A7792_CLK_PLL1 2
-#define R8A7792_CLK_PLL3 3
-#define R8A7792_CLK_LB 4
-#define R8A7792_CLK_QSPI 5
-
-/* MSTP0 */
-#define R8A7792_CLK_MSIOF0 0
-
-/* MSTP1 */
-#define R8A7792_CLK_JPU 6
-#define R8A7792_CLK_TMU1 11
-#define R8A7792_CLK_TMU3 21
-#define R8A7792_CLK_TMU2 22
-#define R8A7792_CLK_CMT0 24
-#define R8A7792_CLK_TMU0 25
-#define R8A7792_CLK_VSP1DU1 27
-#define R8A7792_CLK_VSP1DU0 28
-#define R8A7792_CLK_VSP1_SY 31
-
-/* MSTP2 */
-#define R8A7792_CLK_MSIOF1 8
-#define R8A7792_CLK_SYS_DMAC1 18
-#define R8A7792_CLK_SYS_DMAC0 19
-
-/* MSTP3 */
-#define R8A7792_CLK_TPU0 4
-#define R8A7792_CLK_SDHI0 14
-#define R8A7792_CLK_CMT1 29
-
-/* MSTP4 */
-#define R8A7792_CLK_IRQC 7
-#define R8A7792_CLK_INTC_SYS 8
-
-/* MSTP5 */
-#define R8A7792_CLK_AUDIO_DMAC0 2
-#define R8A7792_CLK_THERMAL 22
-#define R8A7792_CLK_PWM 23
-
-/* MSTP7 */
-#define R8A7792_CLK_HSCIF1 16
-#define R8A7792_CLK_HSCIF0 17
-#define R8A7792_CLK_SCIF3 18
-#define R8A7792_CLK_SCIF2 19
-#define R8A7792_CLK_SCIF1 20
-#define R8A7792_CLK_SCIF0 21
-#define R8A7792_CLK_DU1 23
-#define R8A7792_CLK_DU0 24
-
-/* MSTP8 */
-#define R8A7792_CLK_VIN5 4
-#define R8A7792_CLK_VIN4 5
-#define R8A7792_CLK_VIN3 8
-#define R8A7792_CLK_VIN2 9
-#define R8A7792_CLK_VIN1 10
-#define R8A7792_CLK_VIN0 11
-#define R8A7792_CLK_ETHERAVB 12
-
-/* MSTP9 */
-#define R8A7792_CLK_GPIO7 4
-#define R8A7792_CLK_GPIO6 5
-#define R8A7792_CLK_GPIO5 7
-#define R8A7792_CLK_GPIO4 8
-#define R8A7792_CLK_GPIO3 9
-#define R8A7792_CLK_GPIO2 10
-#define R8A7792_CLK_GPIO1 11
-#define R8A7792_CLK_GPIO0 12
-#define R8A7792_CLK_GPIO11 13
-#define R8A7792_CLK_GPIO10 14
-#define R8A7792_CLK_CAN1 15
-#define R8A7792_CLK_CAN0 16
-#define R8A7792_CLK_QSPI_MOD 17
-#define R8A7792_CLK_GPIO9 19
-#define R8A7792_CLK_GPIO8 21
-#define R8A7792_CLK_I2C5 25
-#define R8A7792_CLK_IICDVFS 26
-#define R8A7792_CLK_I2C4 27
-#define R8A7792_CLK_I2C3 28
-#define R8A7792_CLK_I2C2 29
-#define R8A7792_CLK_I2C1 30
-#define R8A7792_CLK_I2C0 31
-
-/* MSTP10 */
-#define R8A7792_CLK_SSI_ALL 5
-#define R8A7792_CLK_SSI4 11
-#define R8A7792_CLK_SSI3 12
-
-#endif /* __DT_BINDINGS_CLOCK_R8A7792_H__ */
diff --git a/include/dt-bindings/clock/r8a7793-clock.h b/include/dt-bindings/clock/r8a7793-clock.h
deleted file mode 100644
index 49c66d8ed178..000000000000
--- a/include/dt-bindings/clock/r8a7793-clock.h
+++ /dev/null
@@ -1,159 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * r8a7793 clock definition
- *
- * Copyright (C) 2014 Renesas Electronics Corporation
- */
-
-#ifndef __DT_BINDINGS_CLOCK_R8A7793_H__
-#define __DT_BINDINGS_CLOCK_R8A7793_H__
-
-/* CPG */
-#define R8A7793_CLK_MAIN 0
-#define R8A7793_CLK_PLL0 1
-#define R8A7793_CLK_PLL1 2
-#define R8A7793_CLK_PLL3 3
-#define R8A7793_CLK_LB 4
-#define R8A7793_CLK_QSPI 5
-#define R8A7793_CLK_SDH 6
-#define R8A7793_CLK_SD0 7
-#define R8A7793_CLK_Z 8
-#define R8A7793_CLK_RCAN 9
-#define R8A7793_CLK_ADSP 10
-
-/* MSTP0 */
-#define R8A7793_CLK_MSIOF0 0
-
-/* MSTP1 */
-#define R8A7793_CLK_VCP0 1
-#define R8A7793_CLK_VPC0 3
-#define R8A7793_CLK_SSP1 9
-#define R8A7793_CLK_TMU1 11
-#define R8A7793_CLK_3DG 12
-#define R8A7793_CLK_2DDMAC 15
-#define R8A7793_CLK_FDP1_1 18
-#define R8A7793_CLK_FDP1_0 19
-#define R8A7793_CLK_TMU3 21
-#define R8A7793_CLK_TMU2 22
-#define R8A7793_CLK_CMT0 24
-#define R8A7793_CLK_TMU0 25
-#define R8A7793_CLK_VSP1_DU1 27
-#define R8A7793_CLK_VSP1_DU0 28
-#define R8A7793_CLK_VSP1_S 31
-
-/* MSTP2 */
-#define R8A7793_CLK_SCIFA2 2
-#define R8A7793_CLK_SCIFA1 3
-#define R8A7793_CLK_SCIFA0 4
-#define R8A7793_CLK_MSIOF2 5
-#define R8A7793_CLK_SCIFB0 6
-#define R8A7793_CLK_SCIFB1 7
-#define R8A7793_CLK_MSIOF1 8
-#define R8A7793_CLK_SCIFB2 16
-#define R8A7793_CLK_SYS_DMAC1 18
-#define R8A7793_CLK_SYS_DMAC0 19
-
-/* MSTP3 */
-#define R8A7793_CLK_TPU0 4
-#define R8A7793_CLK_SDHI2 11
-#define R8A7793_CLK_SDHI1 12
-#define R8A7793_CLK_SDHI0 14
-#define R8A7793_CLK_MMCIF0 15
-#define R8A7793_CLK_IIC0 18
-#define R8A7793_CLK_PCIEC 19
-#define R8A7793_CLK_IIC1 23
-#define R8A7793_CLK_SSUSB 28
-#define R8A7793_CLK_CMT1 29
-#define R8A7793_CLK_USBDMAC0 30
-#define R8A7793_CLK_USBDMAC1 31
-
-/* MSTP4 */
-#define R8A7793_CLK_IRQC 7
-#define R8A7793_CLK_INTC_SYS 8
-
-/* MSTP5 */
-#define R8A7793_CLK_AUDIO_DMAC1 1
-#define R8A7793_CLK_AUDIO_DMAC0 2
-#define R8A7793_CLK_ADSP_MOD 6
-#define R8A7793_CLK_THERMAL 22
-#define R8A7793_CLK_PWM 23
-
-/* MSTP7 */
-#define R8A7793_CLK_EHCI 3
-#define R8A7793_CLK_HSUSB 4
-#define R8A7793_CLK_HSCIF2 13
-#define R8A7793_CLK_SCIF5 14
-#define R8A7793_CLK_SCIF4 15
-#define R8A7793_CLK_HSCIF1 16
-#define R8A7793_CLK_HSCIF0 17
-#define R8A7793_CLK_SCIF3 18
-#define R8A7793_CLK_SCIF2 19
-#define R8A7793_CLK_SCIF1 20
-#define R8A7793_CLK_SCIF0 21
-#define R8A7793_CLK_DU1 23
-#define R8A7793_CLK_DU0 24
-#define R8A7793_CLK_LVDS0 26
-
-/* MSTP8 */
-#define R8A7793_CLK_IPMMU_SGX 0
-#define R8A7793_CLK_VIN2 9
-#define R8A7793_CLK_VIN1 10
-#define R8A7793_CLK_VIN0 11
-#define R8A7793_CLK_ETHER 13
-#define R8A7793_CLK_SATA1 14
-#define R8A7793_CLK_SATA0 15
-
-/* MSTP9 */
-#define R8A7793_CLK_GPIO7 4
-#define R8A7793_CLK_GPIO6 5
-#define R8A7793_CLK_GPIO5 7
-#define R8A7793_CLK_GPIO4 8
-#define R8A7793_CLK_GPIO3 9
-#define R8A7793_CLK_GPIO2 10
-#define R8A7793_CLK_GPIO1 11
-#define R8A7793_CLK_GPIO0 12
-#define R8A7793_CLK_RCAN1 15
-#define R8A7793_CLK_RCAN0 16
-#define R8A7793_CLK_QSPI_MOD 17
-#define R8A7793_CLK_I2C5 25
-#define R8A7793_CLK_IICDVFS 26
-#define R8A7793_CLK_I2C4 27
-#define R8A7793_CLK_I2C3 28
-#define R8A7793_CLK_I2C2 29
-#define R8A7793_CLK_I2C1 30
-#define R8A7793_CLK_I2C0 31
-
-/* MSTP10 */
-#define R8A7793_CLK_SSI_ALL 5
-#define R8A7793_CLK_SSI9 6
-#define R8A7793_CLK_SSI8 7
-#define R8A7793_CLK_SSI7 8
-#define R8A7793_CLK_SSI6 9
-#define R8A7793_CLK_SSI5 10
-#define R8A7793_CLK_SSI4 11
-#define R8A7793_CLK_SSI3 12
-#define R8A7793_CLK_SSI2 13
-#define R8A7793_CLK_SSI1 14
-#define R8A7793_CLK_SSI0 15
-#define R8A7793_CLK_SCU_ALL 17
-#define R8A7793_CLK_SCU_DVC1 18
-#define R8A7793_CLK_SCU_DVC0 19
-#define R8A7793_CLK_SCU_CTU1_MIX1 20
-#define R8A7793_CLK_SCU_CTU0_MIX0 21
-#define R8A7793_CLK_SCU_SRC9 22
-#define R8A7793_CLK_SCU_SRC8 23
-#define R8A7793_CLK_SCU_SRC7 24
-#define R8A7793_CLK_SCU_SRC6 25
-#define R8A7793_CLK_SCU_SRC5 26
-#define R8A7793_CLK_SCU_SRC4 27
-#define R8A7793_CLK_SCU_SRC3 28
-#define R8A7793_CLK_SCU_SRC2 29
-#define R8A7793_CLK_SCU_SRC1 30
-#define R8A7793_CLK_SCU_SRC0 31
-
-/* MSTP11 */
-#define R8A7793_CLK_SCIFA3 6
-#define R8A7793_CLK_SCIFA4 7
-#define R8A7793_CLK_SCIFA5 8
-
-#endif /* __DT_BINDINGS_CLOCK_R8A7793_H__ */
diff --git a/include/dt-bindings/clock/r8a7794-clock.h b/include/dt-bindings/clock/r8a7794-clock.h
deleted file mode 100644
index 649f005782d0..000000000000
--- a/include/dt-bindings/clock/r8a7794-clock.h
+++ /dev/null
@@ -1,137 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+
- *
- * Copyright (C) 2014 Renesas Electronics Corporation
- * Copyright 2013 Ideas On Board SPRL
- */
-
-#ifndef __DT_BINDINGS_CLOCK_R8A7794_H__
-#define __DT_BINDINGS_CLOCK_R8A7794_H__
-
-/* CPG */
-#define R8A7794_CLK_MAIN 0
-#define R8A7794_CLK_PLL0 1
-#define R8A7794_CLK_PLL1 2
-#define R8A7794_CLK_PLL3 3
-#define R8A7794_CLK_LB 4
-#define R8A7794_CLK_QSPI 5
-#define R8A7794_CLK_SDH 6
-#define R8A7794_CLK_SD0 7
-#define R8A7794_CLK_RCAN 8
-
-/* MSTP0 */
-#define R8A7794_CLK_MSIOF0 0
-
-/* MSTP1 */
-#define R8A7794_CLK_VCP0 1
-#define R8A7794_CLK_VPC0 3
-#define R8A7794_CLK_TMU1 11
-#define R8A7794_CLK_3DG 12
-#define R8A7794_CLK_2DDMAC 15
-#define R8A7794_CLK_FDP1_0 19
-#define R8A7794_CLK_TMU3 21
-#define R8A7794_CLK_TMU2 22
-#define R8A7794_CLK_CMT0 24
-#define R8A7794_CLK_TMU0 25
-#define R8A7794_CLK_VSP1_DU0 28
-#define R8A7794_CLK_VSP1_S 31
-
-/* MSTP2 */
-#define R8A7794_CLK_SCIFA2 2
-#define R8A7794_CLK_SCIFA1 3
-#define R8A7794_CLK_SCIFA0 4
-#define R8A7794_CLK_MSIOF2 5
-#define R8A7794_CLK_SCIFB0 6
-#define R8A7794_CLK_SCIFB1 7
-#define R8A7794_CLK_MSIOF1 8
-#define R8A7794_CLK_SCIFB2 16
-#define R8A7794_CLK_SYS_DMAC1 18
-#define R8A7794_CLK_SYS_DMAC0 19
-
-/* MSTP3 */
-#define R8A7794_CLK_SDHI2 11
-#define R8A7794_CLK_SDHI1 12
-#define R8A7794_CLK_SDHI0 14
-#define R8A7794_CLK_MMCIF0 15
-#define R8A7794_CLK_IIC0 18
-#define R8A7794_CLK_IIC1 23
-#define R8A7794_CLK_CMT1 29
-#define R8A7794_CLK_USBDMAC0 30
-#define R8A7794_CLK_USBDMAC1 31
-
-/* MSTP4 */
-#define R8A7794_CLK_IRQC 7
-#define R8A7794_CLK_INTC_SYS 8
-
-/* MSTP5 */
-#define R8A7794_CLK_AUDIO_DMAC0 2
-#define R8A7794_CLK_PWM 23
-
-/* MSTP7 */
-#define R8A7794_CLK_EHCI 3
-#define R8A7794_CLK_HSUSB 4
-#define R8A7794_CLK_HSCIF2 13
-#define R8A7794_CLK_SCIF5 14
-#define R8A7794_CLK_SCIF4 15
-#define R8A7794_CLK_HSCIF1 16
-#define R8A7794_CLK_HSCIF0 17
-#define R8A7794_CLK_SCIF3 18
-#define R8A7794_CLK_SCIF2 19
-#define R8A7794_CLK_SCIF1 20
-#define R8A7794_CLK_SCIF0 21
-#define R8A7794_CLK_DU1 23
-#define R8A7794_CLK_DU0 24
-
-/* MSTP8 */
-#define R8A7794_CLK_VIN1 10
-#define R8A7794_CLK_VIN0 11
-#define R8A7794_CLK_ETHERAVB 12
-#define R8A7794_CLK_ETHER 13
-
-/* MSTP9 */
-#define R8A7794_CLK_GPIO6 5
-#define R8A7794_CLK_GPIO5 7
-#define R8A7794_CLK_GPIO4 8
-#define R8A7794_CLK_GPIO3 9
-#define R8A7794_CLK_GPIO2 10
-#define R8A7794_CLK_GPIO1 11
-#define R8A7794_CLK_GPIO0 12
-#define R8A7794_CLK_RCAN1 15
-#define R8A7794_CLK_RCAN0 16
-#define R8A7794_CLK_QSPI_MOD 17
-#define R8A7794_CLK_I2C5 25
-#define R8A7794_CLK_I2C4 27
-#define R8A7794_CLK_I2C3 28
-#define R8A7794_CLK_I2C2 29
-#define R8A7794_CLK_I2C1 30
-#define R8A7794_CLK_I2C0 31
-
-/* MSTP10 */
-#define R8A7794_CLK_SSI_ALL 5
-#define R8A7794_CLK_SSI9 6
-#define R8A7794_CLK_SSI8 7
-#define R8A7794_CLK_SSI7 8
-#define R8A7794_CLK_SSI6 9
-#define R8A7794_CLK_SSI5 10
-#define R8A7794_CLK_SSI4 11
-#define R8A7794_CLK_SSI3 12
-#define R8A7794_CLK_SSI2 13
-#define R8A7794_CLK_SSI1 14
-#define R8A7794_CLK_SSI0 15
-#define R8A7794_CLK_SCU_ALL 17
-#define R8A7794_CLK_SCU_DVC1 18
-#define R8A7794_CLK_SCU_DVC0 19
-#define R8A7794_CLK_SCU_CTU1_MIX1 20
-#define R8A7794_CLK_SCU_CTU0_MIX0 21
-#define R8A7794_CLK_SCU_SRC6 25
-#define R8A7794_CLK_SCU_SRC5 26
-#define R8A7794_CLK_SCU_SRC4 27
-#define R8A7794_CLK_SCU_SRC3 28
-#define R8A7794_CLK_SCU_SRC2 29
-#define R8A7794_CLK_SCU_SRC1 30
-
-/* MSTP11 */
-#define R8A7794_CLK_SCIFA3 6
-#define R8A7794_CLK_SCIFA4 7
-#define R8A7794_CLK_SCIFA5 8
-
-#endif /* __DT_BINDINGS_CLOCK_R8A7794_H__ */
diff --git a/include/dt-bindings/clock/r8a779a0-cpg-mssr.h b/include/dt-bindings/clock/r8a779a0-cpg-mssr.h
index f1d737ca7ca1..124a6b8856df 100644
--- a/include/dt-bindings/clock/r8a779a0-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a779a0-cpg-mssr.h
@@ -51,5 +51,6 @@
#define R8A779A0_CLK_CBFUSA 40
#define R8A779A0_CLK_R 41
#define R8A779A0_CLK_OSC 42
+#define R8A779A0_CLK_ZG 43
#endif /* __DT_BINDINGS_CLOCK_R8A779A0_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/r8a779f0-cpg-mssr.h b/include/dt-bindings/clock/r8a779f0-cpg-mssr.h
new file mode 100644
index 000000000000..c34be5624954
--- /dev/null
+++ b/include/dt-bindings/clock/r8a779f0-cpg-mssr.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R8A779F0_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A779F0_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a779f0 CPG Core Clocks */
+
+#define R8A779F0_CLK_ZX 0
+#define R8A779F0_CLK_ZS 1
+#define R8A779F0_CLK_ZT 2
+#define R8A779F0_CLK_ZTR 3
+#define R8A779F0_CLK_S0D2 4
+#define R8A779F0_CLK_S0D3 5
+#define R8A779F0_CLK_S0D4 6
+#define R8A779F0_CLK_S0D2_MM 7
+#define R8A779F0_CLK_S0D3_MM 8
+#define R8A779F0_CLK_S0D4_MM 9
+#define R8A779F0_CLK_S0D2_RT 10
+#define R8A779F0_CLK_S0D3_RT 11
+#define R8A779F0_CLK_S0D4_RT 12
+#define R8A779F0_CLK_S0D6_RT 13
+#define R8A779F0_CLK_S0D3_PER 14
+#define R8A779F0_CLK_S0D6_PER 15
+#define R8A779F0_CLK_S0D12_PER 16
+#define R8A779F0_CLK_S0D24_PER 17
+#define R8A779F0_CLK_S0D2_HSC 18
+#define R8A779F0_CLK_S0D3_HSC 19
+#define R8A779F0_CLK_S0D4_HSC 20
+#define R8A779F0_CLK_S0D6_HSC 21
+#define R8A779F0_CLK_S0D12_HSC 22
+#define R8A779F0_CLK_S0D2_CC 23
+#define R8A779F0_CLK_CL 24
+#define R8A779F0_CLK_CL16M 25
+#define R8A779F0_CLK_CL16M_MM 26
+#define R8A779F0_CLK_CL16M_RT 27
+#define R8A779F0_CLK_CL16M_PER 28
+#define R8A779F0_CLK_CL16M_HSC 29
+#define R8A779F0_CLK_Z0 30
+#define R8A779F0_CLK_Z1 31
+#define R8A779F0_CLK_ZB3 32
+#define R8A779F0_CLK_ZB3D2 33
+#define R8A779F0_CLK_ZB3D4 34
+#define R8A779F0_CLK_SD0H 35
+#define R8A779F0_CLK_SD0 36
+#define R8A779F0_CLK_RPC 37
+#define R8A779F0_CLK_RPCD2 38
+#define R8A779F0_CLK_MSO 39
+#define R8A779F0_CLK_SASYNCRT 40
+#define R8A779F0_CLK_SASYNCPERD1 41
+#define R8A779F0_CLK_SASYNCPERD2 42
+#define R8A779F0_CLK_SASYNCPERD4 43
+#define R8A779F0_CLK_DBGSOC_HSC 44
+#define R8A779F0_CLK_RSW2 45
+#define R8A779F0_CLK_OSC 46
+#define R8A779F0_CLK_ZR 47
+#define R8A779F0_CLK_CPEX 48
+#define R8A779F0_CLK_CBFUSA 49
+#define R8A779F0_CLK_R 50
+
+#endif /* __DT_BINDINGS_CLOCK_R8A779F0_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/r8a779g0-cpg-mssr.h b/include/dt-bindings/clock/r8a779g0-cpg-mssr.h
new file mode 100644
index 000000000000..7850cdc62e28
--- /dev/null
+++ b/include/dt-bindings/clock/r8a779g0-cpg-mssr.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R8A779G0_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A779G0_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a779g0 CPG Core Clocks */
+
+#define R8A779G0_CLK_ZX 0
+#define R8A779G0_CLK_ZS 1
+#define R8A779G0_CLK_ZT 2
+#define R8A779G0_CLK_ZTR 3
+#define R8A779G0_CLK_S0D2 4
+#define R8A779G0_CLK_S0D3 5
+#define R8A779G0_CLK_S0D4 6
+#define R8A779G0_CLK_S0D1_VIO 7
+#define R8A779G0_CLK_S0D2_VIO 8
+#define R8A779G0_CLK_S0D4_VIO 9
+#define R8A779G0_CLK_S0D8_VIO 10
+#define R8A779G0_CLK_S0D1_VC 11
+#define R8A779G0_CLK_S0D2_VC 12
+#define R8A779G0_CLK_S0D4_VC 13
+#define R8A779G0_CLK_S0D2_MM 14
+#define R8A779G0_CLK_S0D4_MM 15
+#define R8A779G0_CLK_S0D2_U3DG 16
+#define R8A779G0_CLK_S0D4_U3DG 17
+#define R8A779G0_CLK_S0D2_RT 18
+#define R8A779G0_CLK_S0D3_RT 19
+#define R8A779G0_CLK_S0D4_RT 20
+#define R8A779G0_CLK_S0D6_RT 21
+#define R8A779G0_CLK_S0D24_RT 22
+#define R8A779G0_CLK_S0D2_PER 23
+#define R8A779G0_CLK_S0D3_PER 24
+#define R8A779G0_CLK_S0D4_PER 25
+#define R8A779G0_CLK_S0D6_PER 26
+#define R8A779G0_CLK_S0D12_PER 27
+#define R8A779G0_CLK_S0D24_PER 28
+#define R8A779G0_CLK_S0D1_HSC 29
+#define R8A779G0_CLK_S0D2_HSC 30
+#define R8A779G0_CLK_S0D4_HSC 31
+#define R8A779G0_CLK_S0D2_CC 32
+#define R8A779G0_CLK_SVD1_IR 33
+#define R8A779G0_CLK_SVD2_IR 34
+#define R8A779G0_CLK_SVD1_VIP 35
+#define R8A779G0_CLK_SVD2_VIP 36
+#define R8A779G0_CLK_CL 37
+#define R8A779G0_CLK_CL16M 38
+#define R8A779G0_CLK_CL16M_MM 39
+#define R8A779G0_CLK_CL16M_RT 40
+#define R8A779G0_CLK_CL16M_PER 41
+#define R8A779G0_CLK_CL16M_HSC 42
+#define R8A779G0_CLK_Z0 43
+#define R8A779G0_CLK_ZB3 44
+#define R8A779G0_CLK_ZB3D2 45
+#define R8A779G0_CLK_ZB3D4 46
+#define R8A779G0_CLK_ZG 47
+#define R8A779G0_CLK_SD0H 48
+#define R8A779G0_CLK_SD0 49
+#define R8A779G0_CLK_RPC 50
+#define R8A779G0_CLK_RPCD2 51
+#define R8A779G0_CLK_MSO 52
+#define R8A779G0_CLK_CANFD 53
+#define R8A779G0_CLK_CSI 54
+#define R8A779G0_CLK_FRAY 55
+#define R8A779G0_CLK_IPC 56
+#define R8A779G0_CLK_SASYNCRT 57
+#define R8A779G0_CLK_SASYNCPERD1 58
+#define R8A779G0_CLK_SASYNCPERD2 59
+#define R8A779G0_CLK_SASYNCPERD4 60
+#define R8A779G0_CLK_VIOBUS 61
+#define R8A779G0_CLK_VIOBUSD2 62
+#define R8A779G0_CLK_VCBUS 63
+#define R8A779G0_CLK_VCBUSD2 64
+#define R8A779G0_CLK_DSIEXT 65
+#define R8A779G0_CLK_DSIREF 66
+#define R8A779G0_CLK_ADGH 67
+#define R8A779G0_CLK_OSC 68
+#define R8A779G0_CLK_ZR0 69
+#define R8A779G0_CLK_ZR1 70
+#define R8A779G0_CLK_ZR2 71
+#define R8A779G0_CLK_IMPA 72
+#define R8A779G0_CLK_IMPAD4 73
+#define R8A779G0_CLK_CPEX 74
+#define R8A779G0_CLK_CBFUSA 75
+#define R8A779G0_CLK_R 76
+#define R8A779G0_CLK_CP 77
+
+#endif /* __DT_BINDINGS_CLOCK_R8A779G0_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/r9a06g032-sysctrl.h b/include/dt-bindings/clock/r9a06g032-sysctrl.h
index 90c0f3dc1ba1..d9d7b8b4f426 100644
--- a/include/dt-bindings/clock/r9a06g032-sysctrl.h
+++ b/include/dt-bindings/clock/r9a06g032-sysctrl.h
@@ -74,6 +74,7 @@
#define R9A06G032_CLK_DDRPHY_PCLK 81 /* AKA CLK_REF_SYNC_D4 */
#define R9A06G032_CLK_FW 81 /* AKA CLK_REF_SYNC_D4 */
#define R9A06G032_CLK_CRYPTO 81 /* AKA CLK_REF_SYNC_D4 */
+#define R9A06G032_CLK_WATCHDOG 82 /* AKA CLK_REF_SYNC_D8 */
#define R9A06G032_CLK_A7MP 84 /* AKA DIV_CA7 */
#define R9A06G032_HCLK_CAN0 85
#define R9A06G032_HCLK_CAN1 86
diff --git a/include/dt-bindings/clock/r9a07g043-cpg.h b/include/dt-bindings/clock/r9a07g043-cpg.h
new file mode 100644
index 000000000000..e1f65f1928cf
--- /dev/null
+++ b/include/dt-bindings/clock/r9a07g043-cpg.h
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R9A07G043_CPG_H__
+#define __DT_BINDINGS_CLOCK_R9A07G043_CPG_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* R9A07G043 CPG Core Clocks */
+#define R9A07G043_CLK_I 0
+#define R9A07G043_CLK_I2 1
+#define R9A07G043_CLK_S0 2
+#define R9A07G043_CLK_SPI0 3
+#define R9A07G043_CLK_SPI1 4
+#define R9A07G043_CLK_SD0 5
+#define R9A07G043_CLK_SD1 6
+#define R9A07G043_CLK_M0 7
+#define R9A07G043_CLK_M2 8 /* RZ/G2UL Only */
+#define R9A07G043_CLK_M3 9 /* RZ/G2UL Only */
+#define R9A07G043_CLK_HP 10
+#define R9A07G043_CLK_TSU 11
+#define R9A07G043_CLK_ZT 12
+#define R9A07G043_CLK_P0 13
+#define R9A07G043_CLK_P1 14
+#define R9A07G043_CLK_P2 15
+#define R9A07G043_CLK_AT 16 /* RZ/G2UL Only */
+#define R9A07G043_OSCCLK 17
+#define R9A07G043_CLK_P0_DIV2 18
+
+/* R9A07G043 Module Clocks */
+#define R9A07G043_CA55_SCLK 0 /* RZ/G2UL Only */
+#define R9A07G043_CA55_PCLK 1 /* RZ/G2UL Only */
+#define R9A07G043_CA55_ATCLK 2 /* RZ/G2UL Only */
+#define R9A07G043_CA55_GICCLK 3 /* RZ/G2UL Only */
+#define R9A07G043_CA55_PERICLK 4 /* RZ/G2UL Only */
+#define R9A07G043_CA55_ACLK 5 /* RZ/G2UL Only */
+#define R9A07G043_CA55_TSCLK 6 /* RZ/G2UL Only */
+#define R9A07G043_GIC600_GICCLK 7 /* RZ/G2UL Only */
+#define R9A07G043_IA55_CLK 8 /* RZ/G2UL Only */
+#define R9A07G043_IA55_PCLK 9 /* RZ/G2UL Only */
+#define R9A07G043_MHU_PCLK 10 /* RZ/G2UL Only */
+#define R9A07G043_SYC_CNT_CLK 11
+#define R9A07G043_DMAC_ACLK 12
+#define R9A07G043_DMAC_PCLK 13
+#define R9A07G043_OSTM0_PCLK 14
+#define R9A07G043_OSTM1_PCLK 15
+#define R9A07G043_OSTM2_PCLK 16
+#define R9A07G043_MTU_X_MCK_MTU3 17
+#define R9A07G043_POE3_CLKM_POE 18
+#define R9A07G043_WDT0_PCLK 19
+#define R9A07G043_WDT0_CLK 20
+#define R9A07G043_WDT2_PCLK 21 /* RZ/G2UL Only */
+#define R9A07G043_WDT2_CLK 22 /* RZ/G2UL Only */
+#define R9A07G043_SPI_CLK2 23
+#define R9A07G043_SPI_CLK 24
+#define R9A07G043_SDHI0_IMCLK 25
+#define R9A07G043_SDHI0_IMCLK2 26
+#define R9A07G043_SDHI0_CLK_HS 27
+#define R9A07G043_SDHI0_ACLK 28
+#define R9A07G043_SDHI1_IMCLK 29
+#define R9A07G043_SDHI1_IMCLK2 30
+#define R9A07G043_SDHI1_CLK_HS 31
+#define R9A07G043_SDHI1_ACLK 32
+#define R9A07G043_ISU_ACLK 33 /* RZ/G2UL Only */
+#define R9A07G043_ISU_PCLK 34 /* RZ/G2UL Only */
+#define R9A07G043_CRU_SYSCLK 35 /* RZ/G2UL Only */
+#define R9A07G043_CRU_VCLK 36 /* RZ/G2UL Only */
+#define R9A07G043_CRU_PCLK 37 /* RZ/G2UL Only */
+#define R9A07G043_CRU_ACLK 38 /* RZ/G2UL Only */
+#define R9A07G043_LCDC_CLK_A 39 /* RZ/G2UL Only */
+#define R9A07G043_LCDC_CLK_P 40 /* RZ/G2UL Only */
+#define R9A07G043_LCDC_CLK_D 41 /* RZ/G2UL Only */
+#define R9A07G043_SSI0_PCLK2 42
+#define R9A07G043_SSI0_PCLK_SFR 43
+#define R9A07G043_SSI1_PCLK2 44
+#define R9A07G043_SSI1_PCLK_SFR 45
+#define R9A07G043_SSI2_PCLK2 46
+#define R9A07G043_SSI2_PCLK_SFR 47
+#define R9A07G043_SSI3_PCLK2 48
+#define R9A07G043_SSI3_PCLK_SFR 49
+#define R9A07G043_SRC_CLKP 50 /* RZ/G2UL Only */
+#define R9A07G043_USB_U2H0_HCLK 51
+#define R9A07G043_USB_U2H1_HCLK 52
+#define R9A07G043_USB_U2P_EXR_CPUCLK 53
+#define R9A07G043_USB_PCLK 54
+#define R9A07G043_ETH0_CLK_AXI 55
+#define R9A07G043_ETH0_CLK_CHI 56
+#define R9A07G043_ETH1_CLK_AXI 57
+#define R9A07G043_ETH1_CLK_CHI 58
+#define R9A07G043_I2C0_PCLK 59
+#define R9A07G043_I2C1_PCLK 60
+#define R9A07G043_I2C2_PCLK 61
+#define R9A07G043_I2C3_PCLK 62
+#define R9A07G043_SCIF0_CLK_PCK 63
+#define R9A07G043_SCIF1_CLK_PCK 64
+#define R9A07G043_SCIF2_CLK_PCK 65
+#define R9A07G043_SCIF3_CLK_PCK 66
+#define R9A07G043_SCIF4_CLK_PCK 67
+#define R9A07G043_SCI0_CLKP 68
+#define R9A07G043_SCI1_CLKP 69
+#define R9A07G043_IRDA_CLKP 70
+#define R9A07G043_RSPI0_CLKB 71
+#define R9A07G043_RSPI1_CLKB 72
+#define R9A07G043_RSPI2_CLKB 73
+#define R9A07G043_CANFD_PCLK 74
+#define R9A07G043_GPIO_HCLK 75
+#define R9A07G043_ADC_ADCLK 76
+#define R9A07G043_ADC_PCLK 77
+#define R9A07G043_TSU_PCLK 78
+#define R9A07G043_NCEPLDM_DM_CLK 79 /* RZ/Five Only */
+#define R9A07G043_NCEPLDM_ACLK 80 /* RZ/Five Only */
+#define R9A07G043_NCEPLDM_TCK 81 /* RZ/Five Only */
+#define R9A07G043_NCEPLMT_ACLK 82 /* RZ/Five Only */
+#define R9A07G043_NCEPLIC_ACLK 83 /* RZ/Five Only */
+#define R9A07G043_AX45MP_CORE0_CLK 84 /* RZ/Five Only */
+#define R9A07G043_AX45MP_ACLK 85 /* RZ/Five Only */
+#define R9A07G043_IAX45_CLK 86 /* RZ/Five Only */
+#define R9A07G043_IAX45_PCLK 87 /* RZ/Five Only */
+
+/* R9A07G043 Resets */
+#define R9A07G043_CA55_RST_1_0 0 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_1_1 1 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_3_0 2 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_3_1 3 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_4 4 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_5 5 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_6 6 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_7 7 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_8 8 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_9 9 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_10 10 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_11 11 /* RZ/G2UL Only */
+#define R9A07G043_CA55_RST_12 12 /* RZ/G2UL Only */
+#define R9A07G043_GIC600_GICRESET_N 13 /* RZ/G2UL Only */
+#define R9A07G043_GIC600_DBG_GICRESET_N 14 /* RZ/G2UL Only */
+#define R9A07G043_IA55_RESETN 15 /* RZ/G2UL Only */
+#define R9A07G043_MHU_RESETN 16 /* RZ/G2UL Only */
+#define R9A07G043_DMAC_ARESETN 17
+#define R9A07G043_DMAC_RST_ASYNC 18
+#define R9A07G043_SYC_RESETN 19
+#define R9A07G043_OSTM0_PRESETZ 20
+#define R9A07G043_OSTM1_PRESETZ 21
+#define R9A07G043_OSTM2_PRESETZ 22
+#define R9A07G043_MTU_X_PRESET_MTU3 23
+#define R9A07G043_POE3_RST_M_REG 24
+#define R9A07G043_WDT0_PRESETN 25
+#define R9A07G043_WDT2_PRESETN 26 /* RZ/G2UL Only */
+#define R9A07G043_SPI_RST 27
+#define R9A07G043_SDHI0_IXRST 28
+#define R9A07G043_SDHI1_IXRST 29
+#define R9A07G043_ISU_ARESETN 30 /* RZ/G2UL Only */
+#define R9A07G043_ISU_PRESETN 31 /* RZ/G2UL Only */
+#define R9A07G043_CRU_CMN_RSTB 32 /* RZ/G2UL Only */
+#define R9A07G043_CRU_PRESETN 33 /* RZ/G2UL Only */
+#define R9A07G043_CRU_ARESETN 34 /* RZ/G2UL Only */
+#define R9A07G043_LCDC_RESET_N 35 /* RZ/G2UL Only */
+#define R9A07G043_SSI0_RST_M2_REG 36
+#define R9A07G043_SSI1_RST_M2_REG 37
+#define R9A07G043_SSI2_RST_M2_REG 38
+#define R9A07G043_SSI3_RST_M2_REG 39
+#define R9A07G043_SRC_RST 40 /* RZ/G2UL Only */
+#define R9A07G043_USB_U2H0_HRESETN 41
+#define R9A07G043_USB_U2H1_HRESETN 42
+#define R9A07G043_USB_U2P_EXL_SYSRST 43
+#define R9A07G043_USB_PRESETN 44
+#define R9A07G043_ETH0_RST_HW_N 45
+#define R9A07G043_ETH1_RST_HW_N 46
+#define R9A07G043_I2C0_MRST 47
+#define R9A07G043_I2C1_MRST 48
+#define R9A07G043_I2C2_MRST 49
+#define R9A07G043_I2C3_MRST 50
+#define R9A07G043_SCIF0_RST_SYSTEM_N 51
+#define R9A07G043_SCIF1_RST_SYSTEM_N 52
+#define R9A07G043_SCIF2_RST_SYSTEM_N 53
+#define R9A07G043_SCIF3_RST_SYSTEM_N 54
+#define R9A07G043_SCIF4_RST_SYSTEM_N 55
+#define R9A07G043_SCI0_RST 56
+#define R9A07G043_SCI1_RST 57
+#define R9A07G043_IRDA_RST 58
+#define R9A07G043_RSPI0_RST 59
+#define R9A07G043_RSPI1_RST 60
+#define R9A07G043_RSPI2_RST 61
+#define R9A07G043_CANFD_RSTP_N 62
+#define R9A07G043_CANFD_RSTC_N 63
+#define R9A07G043_GPIO_RSTN 64
+#define R9A07G043_GPIO_PORT_RESETN 65
+#define R9A07G043_GPIO_SPARE_RESETN 66
+#define R9A07G043_ADC_PRESETN 67
+#define R9A07G043_ADC_ADRST_N 68
+#define R9A07G043_TSU_PRESETN 69
+#define R9A07G043_NCEPLDM_DTM_PWR_RST_N 70 /* RZ/Five Only */
+#define R9A07G043_NCEPLDM_ARESETN 71 /* RZ/Five Only */
+#define R9A07G043_NCEPLMT_POR_RSTN 72 /* RZ/Five Only */
+#define R9A07G043_NCEPLMT_ARESETN 73 /* RZ/Five Only */
+#define R9A07G043_NCEPLIC_ARESETN 74 /* RZ/Five Only */
+#define R9A07G043_AX45MP_ARESETNM 75 /* RZ/Five Only */
+#define R9A07G043_AX45MP_ARESETNS 76 /* RZ/Five Only */
+#define R9A07G043_AX45MP_L2_RESETN 77 /* RZ/Five Only */
+#define R9A07G043_AX45MP_CORE0_RESETN 78 /* RZ/Five Only */
+#define R9A07G043_IAX45_RESETN 79 /* RZ/Five Only */
+
+#endif /* __DT_BINDINGS_CLOCK_R9A07G043_CPG_H__ */
diff --git a/include/dt-bindings/clock/r9a07g044-cpg.h b/include/dt-bindings/clock/r9a07g044-cpg.h
new file mode 100644
index 000000000000..0bb17ff1a01a
--- /dev/null
+++ b/include/dt-bindings/clock/r9a07g044-cpg.h
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R9A07G044_CPG_H__
+#define __DT_BINDINGS_CLOCK_R9A07G044_CPG_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* R9A07G044 CPG Core Clocks */
+#define R9A07G044_CLK_I 0
+#define R9A07G044_CLK_I2 1
+#define R9A07G044_CLK_G 2
+#define R9A07G044_CLK_S0 3
+#define R9A07G044_CLK_S1 4
+#define R9A07G044_CLK_SPI0 5
+#define R9A07G044_CLK_SPI1 6
+#define R9A07G044_CLK_SD0 7
+#define R9A07G044_CLK_SD1 8
+#define R9A07G044_CLK_M0 9
+#define R9A07G044_CLK_M1 10
+#define R9A07G044_CLK_M2 11
+#define R9A07G044_CLK_M3 12
+#define R9A07G044_CLK_M4 13
+#define R9A07G044_CLK_HP 14
+#define R9A07G044_CLK_TSU 15
+#define R9A07G044_CLK_ZT 16
+#define R9A07G044_CLK_P0 17
+#define R9A07G044_CLK_P1 18
+#define R9A07G044_CLK_P2 19
+#define R9A07G044_CLK_AT 20
+#define R9A07G044_OSCCLK 21
+#define R9A07G044_CLK_P0_DIV2 22
+
+/* R9A07G044 Module Clocks */
+#define R9A07G044_CA55_SCLK 0
+#define R9A07G044_CA55_PCLK 1
+#define R9A07G044_CA55_ATCLK 2
+#define R9A07G044_CA55_GICCLK 3
+#define R9A07G044_CA55_PERICLK 4
+#define R9A07G044_CA55_ACLK 5
+#define R9A07G044_CA55_TSCLK 6
+#define R9A07G044_GIC600_GICCLK 7
+#define R9A07G044_IA55_CLK 8
+#define R9A07G044_IA55_PCLK 9
+#define R9A07G044_MHU_PCLK 10
+#define R9A07G044_SYC_CNT_CLK 11
+#define R9A07G044_DMAC_ACLK 12
+#define R9A07G044_DMAC_PCLK 13
+#define R9A07G044_OSTM0_PCLK 14
+#define R9A07G044_OSTM1_PCLK 15
+#define R9A07G044_OSTM2_PCLK 16
+#define R9A07G044_MTU_X_MCK_MTU3 17
+#define R9A07G044_POE3_CLKM_POE 18
+#define R9A07G044_GPT_PCLK 19
+#define R9A07G044_POEG_A_CLKP 20
+#define R9A07G044_POEG_B_CLKP 21
+#define R9A07G044_POEG_C_CLKP 22
+#define R9A07G044_POEG_D_CLKP 23
+#define R9A07G044_WDT0_PCLK 24
+#define R9A07G044_WDT0_CLK 25
+#define R9A07G044_WDT1_PCLK 26
+#define R9A07G044_WDT1_CLK 27
+#define R9A07G044_WDT2_PCLK 28
+#define R9A07G044_WDT2_CLK 29
+#define R9A07G044_SPI_CLK2 30
+#define R9A07G044_SPI_CLK 31
+#define R9A07G044_SDHI0_IMCLK 32
+#define R9A07G044_SDHI0_IMCLK2 33
+#define R9A07G044_SDHI0_CLK_HS 34
+#define R9A07G044_SDHI0_ACLK 35
+#define R9A07G044_SDHI1_IMCLK 36
+#define R9A07G044_SDHI1_IMCLK2 37
+#define R9A07G044_SDHI1_CLK_HS 38
+#define R9A07G044_SDHI1_ACLK 39
+#define R9A07G044_GPU_CLK 40
+#define R9A07G044_GPU_AXI_CLK 41
+#define R9A07G044_GPU_ACE_CLK 42
+#define R9A07G044_ISU_ACLK 43
+#define R9A07G044_ISU_PCLK 44
+#define R9A07G044_H264_CLK_A 45
+#define R9A07G044_H264_CLK_P 46
+#define R9A07G044_CRU_SYSCLK 47
+#define R9A07G044_CRU_VCLK 48
+#define R9A07G044_CRU_PCLK 49
+#define R9A07G044_CRU_ACLK 50
+#define R9A07G044_MIPI_DSI_PLLCLK 51
+#define R9A07G044_MIPI_DSI_SYSCLK 52
+#define R9A07G044_MIPI_DSI_ACLK 53
+#define R9A07G044_MIPI_DSI_PCLK 54
+#define R9A07G044_MIPI_DSI_VCLK 55
+#define R9A07G044_MIPI_DSI_LPCLK 56
+#define R9A07G044_LCDC_CLK_A 57
+#define R9A07G044_LCDC_CLK_P 58
+#define R9A07G044_LCDC_CLK_D 59
+#define R9A07G044_SSI0_PCLK2 60
+#define R9A07G044_SSI0_PCLK_SFR 61
+#define R9A07G044_SSI1_PCLK2 62
+#define R9A07G044_SSI1_PCLK_SFR 63
+#define R9A07G044_SSI2_PCLK2 64
+#define R9A07G044_SSI2_PCLK_SFR 65
+#define R9A07G044_SSI3_PCLK2 66
+#define R9A07G044_SSI3_PCLK_SFR 67
+#define R9A07G044_SRC_CLKP 68
+#define R9A07G044_USB_U2H0_HCLK 69
+#define R9A07G044_USB_U2H1_HCLK 70
+#define R9A07G044_USB_U2P_EXR_CPUCLK 71
+#define R9A07G044_USB_PCLK 72
+#define R9A07G044_ETH0_CLK_AXI 73
+#define R9A07G044_ETH0_CLK_CHI 74
+#define R9A07G044_ETH1_CLK_AXI 75
+#define R9A07G044_ETH1_CLK_CHI 76
+#define R9A07G044_I2C0_PCLK 77
+#define R9A07G044_I2C1_PCLK 78
+#define R9A07G044_I2C2_PCLK 79
+#define R9A07G044_I2C3_PCLK 80
+#define R9A07G044_SCIF0_CLK_PCK 81
+#define R9A07G044_SCIF1_CLK_PCK 82
+#define R9A07G044_SCIF2_CLK_PCK 83
+#define R9A07G044_SCIF3_CLK_PCK 84
+#define R9A07G044_SCIF4_CLK_PCK 85
+#define R9A07G044_SCI0_CLKP 86
+#define R9A07G044_SCI1_CLKP 87
+#define R9A07G044_IRDA_CLKP 88
+#define R9A07G044_RSPI0_CLKB 89
+#define R9A07G044_RSPI1_CLKB 90
+#define R9A07G044_RSPI2_CLKB 91
+#define R9A07G044_CANFD_PCLK 92
+#define R9A07G044_GPIO_HCLK 93
+#define R9A07G044_ADC_ADCLK 94
+#define R9A07G044_ADC_PCLK 95
+#define R9A07G044_TSU_PCLK 96
+
+/* R9A07G044 Resets */
+#define R9A07G044_CA55_RST_1_0 0
+#define R9A07G044_CA55_RST_1_1 1
+#define R9A07G044_CA55_RST_3_0 2
+#define R9A07G044_CA55_RST_3_1 3
+#define R9A07G044_CA55_RST_4 4
+#define R9A07G044_CA55_RST_5 5
+#define R9A07G044_CA55_RST_6 6
+#define R9A07G044_CA55_RST_7 7
+#define R9A07G044_CA55_RST_8 8
+#define R9A07G044_CA55_RST_9 9
+#define R9A07G044_CA55_RST_10 10
+#define R9A07G044_CA55_RST_11 11
+#define R9A07G044_CA55_RST_12 12
+#define R9A07G044_GIC600_GICRESET_N 13
+#define R9A07G044_GIC600_DBG_GICRESET_N 14
+#define R9A07G044_IA55_RESETN 15
+#define R9A07G044_MHU_RESETN 16
+#define R9A07G044_DMAC_ARESETN 17
+#define R9A07G044_DMAC_RST_ASYNC 18
+#define R9A07G044_SYC_RESETN 19
+#define R9A07G044_OSTM0_PRESETZ 20
+#define R9A07G044_OSTM1_PRESETZ 21
+#define R9A07G044_OSTM2_PRESETZ 22
+#define R9A07G044_MTU_X_PRESET_MTU3 23
+#define R9A07G044_POE3_RST_M_REG 24
+#define R9A07G044_GPT_RST_C 25
+#define R9A07G044_POEG_A_RST 26
+#define R9A07G044_POEG_B_RST 27
+#define R9A07G044_POEG_C_RST 28
+#define R9A07G044_POEG_D_RST 29
+#define R9A07G044_WDT0_PRESETN 30
+#define R9A07G044_WDT1_PRESETN 31
+#define R9A07G044_WDT2_PRESETN 32
+#define R9A07G044_SPI_RST 33
+#define R9A07G044_SDHI0_IXRST 34
+#define R9A07G044_SDHI1_IXRST 35
+#define R9A07G044_GPU_RESETN 36
+#define R9A07G044_GPU_AXI_RESETN 37
+#define R9A07G044_GPU_ACE_RESETN 38
+#define R9A07G044_ISU_ARESETN 39
+#define R9A07G044_ISU_PRESETN 40
+#define R9A07G044_H264_X_RESET_VCP 41
+#define R9A07G044_H264_CP_PRESET_P 42
+#define R9A07G044_CRU_CMN_RSTB 43
+#define R9A07G044_CRU_PRESETN 44
+#define R9A07G044_CRU_ARESETN 45
+#define R9A07G044_MIPI_DSI_CMN_RSTB 46
+#define R9A07G044_MIPI_DSI_ARESET_N 47
+#define R9A07G044_MIPI_DSI_PRESET_N 48
+#define R9A07G044_LCDC_RESET_N 49
+#define R9A07G044_SSI0_RST_M2_REG 50
+#define R9A07G044_SSI1_RST_M2_REG 51
+#define R9A07G044_SSI2_RST_M2_REG 52
+#define R9A07G044_SSI3_RST_M2_REG 53
+#define R9A07G044_SRC_RST 54
+#define R9A07G044_USB_U2H0_HRESETN 55
+#define R9A07G044_USB_U2H1_HRESETN 56
+#define R9A07G044_USB_U2P_EXL_SYSRST 57
+#define R9A07G044_USB_PRESETN 58
+#define R9A07G044_ETH0_RST_HW_N 59
+#define R9A07G044_ETH1_RST_HW_N 60
+#define R9A07G044_I2C0_MRST 61
+#define R9A07G044_I2C1_MRST 62
+#define R9A07G044_I2C2_MRST 63
+#define R9A07G044_I2C3_MRST 64
+#define R9A07G044_SCIF0_RST_SYSTEM_N 65
+#define R9A07G044_SCIF1_RST_SYSTEM_N 66
+#define R9A07G044_SCIF2_RST_SYSTEM_N 67
+#define R9A07G044_SCIF3_RST_SYSTEM_N 68
+#define R9A07G044_SCIF4_RST_SYSTEM_N 69
+#define R9A07G044_SCI0_RST 70
+#define R9A07G044_SCI1_RST 71
+#define R9A07G044_IRDA_RST 72
+#define R9A07G044_RSPI0_RST 73
+#define R9A07G044_RSPI1_RST 74
+#define R9A07G044_RSPI2_RST 75
+#define R9A07G044_CANFD_RSTP_N 76
+#define R9A07G044_CANFD_RSTC_N 77
+#define R9A07G044_GPIO_RSTN 78
+#define R9A07G044_GPIO_PORT_RESETN 79
+#define R9A07G044_GPIO_SPARE_RESETN 80
+#define R9A07G044_ADC_PRESETN 81
+#define R9A07G044_ADC_ADRST_N 82
+#define R9A07G044_TSU_PRESETN 83
+
+#endif /* __DT_BINDINGS_CLOCK_R9A07G044_CPG_H__ */
diff --git a/include/dt-bindings/clock/r9a07g054-cpg.h b/include/dt-bindings/clock/r9a07g054-cpg.h
new file mode 100644
index 000000000000..43f4dbda872c
--- /dev/null
+++ b/include/dt-bindings/clock/r9a07g054-cpg.h
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R9A07G054_CPG_H__
+#define __DT_BINDINGS_CLOCK_R9A07G054_CPG_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* R9A07G054 CPG Core Clocks */
+#define R9A07G054_CLK_I 0
+#define R9A07G054_CLK_I2 1
+#define R9A07G054_CLK_G 2
+#define R9A07G054_CLK_S0 3
+#define R9A07G054_CLK_S1 4
+#define R9A07G054_CLK_SPI0 5
+#define R9A07G054_CLK_SPI1 6
+#define R9A07G054_CLK_SD0 7
+#define R9A07G054_CLK_SD1 8
+#define R9A07G054_CLK_M0 9
+#define R9A07G054_CLK_M1 10
+#define R9A07G054_CLK_M2 11
+#define R9A07G054_CLK_M3 12
+#define R9A07G054_CLK_M4 13
+#define R9A07G054_CLK_HP 14
+#define R9A07G054_CLK_TSU 15
+#define R9A07G054_CLK_ZT 16
+#define R9A07G054_CLK_P0 17
+#define R9A07G054_CLK_P1 18
+#define R9A07G054_CLK_P2 19
+#define R9A07G054_CLK_AT 20
+#define R9A07G054_OSCCLK 21
+#define R9A07G054_CLK_P0_DIV2 22
+#define R9A07G054_CLK_DRP_M 23
+#define R9A07G054_CLK_DRP_D 24
+#define R9A07G054_CLK_DRP_A 25
+
+/* R9A07G054 Module Clocks */
+#define R9A07G054_CA55_SCLK 0
+#define R9A07G054_CA55_PCLK 1
+#define R9A07G054_CA55_ATCLK 2
+#define R9A07G054_CA55_GICCLK 3
+#define R9A07G054_CA55_PERICLK 4
+#define R9A07G054_CA55_ACLK 5
+#define R9A07G054_CA55_TSCLK 6
+#define R9A07G054_GIC600_GICCLK 7
+#define R9A07G054_IA55_CLK 8
+#define R9A07G054_IA55_PCLK 9
+#define R9A07G054_MHU_PCLK 10
+#define R9A07G054_SYC_CNT_CLK 11
+#define R9A07G054_DMAC_ACLK 12
+#define R9A07G054_DMAC_PCLK 13
+#define R9A07G054_OSTM0_PCLK 14
+#define R9A07G054_OSTM1_PCLK 15
+#define R9A07G054_OSTM2_PCLK 16
+#define R9A07G054_MTU_X_MCK_MTU3 17
+#define R9A07G054_POE3_CLKM_POE 18
+#define R9A07G054_GPT_PCLK 19
+#define R9A07G054_POEG_A_CLKP 20
+#define R9A07G054_POEG_B_CLKP 21
+#define R9A07G054_POEG_C_CLKP 22
+#define R9A07G054_POEG_D_CLKP 23
+#define R9A07G054_WDT0_PCLK 24
+#define R9A07G054_WDT0_CLK 25
+#define R9A07G054_WDT1_PCLK 26
+#define R9A07G054_WDT1_CLK 27
+#define R9A07G054_WDT2_PCLK 28
+#define R9A07G054_WDT2_CLK 29
+#define R9A07G054_SPI_CLK2 30
+#define R9A07G054_SPI_CLK 31
+#define R9A07G054_SDHI0_IMCLK 32
+#define R9A07G054_SDHI0_IMCLK2 33
+#define R9A07G054_SDHI0_CLK_HS 34
+#define R9A07G054_SDHI0_ACLK 35
+#define R9A07G054_SDHI1_IMCLK 36
+#define R9A07G054_SDHI1_IMCLK2 37
+#define R9A07G054_SDHI1_CLK_HS 38
+#define R9A07G054_SDHI1_ACLK 39
+#define R9A07G054_GPU_CLK 40
+#define R9A07G054_GPU_AXI_CLK 41
+#define R9A07G054_GPU_ACE_CLK 42
+#define R9A07G054_ISU_ACLK 43
+#define R9A07G054_ISU_PCLK 44
+#define R9A07G054_H264_CLK_A 45
+#define R9A07G054_H264_CLK_P 46
+#define R9A07G054_CRU_SYSCLK 47
+#define R9A07G054_CRU_VCLK 48
+#define R9A07G054_CRU_PCLK 49
+#define R9A07G054_CRU_ACLK 50
+#define R9A07G054_MIPI_DSI_PLLCLK 51
+#define R9A07G054_MIPI_DSI_SYSCLK 52
+#define R9A07G054_MIPI_DSI_ACLK 53
+#define R9A07G054_MIPI_DSI_PCLK 54
+#define R9A07G054_MIPI_DSI_VCLK 55
+#define R9A07G054_MIPI_DSI_LPCLK 56
+#define R9A07G054_LCDC_CLK_A 57
+#define R9A07G054_LCDC_CLK_P 58
+#define R9A07G054_LCDC_CLK_D 59
+#define R9A07G054_SSI0_PCLK2 60
+#define R9A07G054_SSI0_PCLK_SFR 61
+#define R9A07G054_SSI1_PCLK2 62
+#define R9A07G054_SSI1_PCLK_SFR 63
+#define R9A07G054_SSI2_PCLK2 64
+#define R9A07G054_SSI2_PCLK_SFR 65
+#define R9A07G054_SSI3_PCLK2 66
+#define R9A07G054_SSI3_PCLK_SFR 67
+#define R9A07G054_SRC_CLKP 68
+#define R9A07G054_USB_U2H0_HCLK 69
+#define R9A07G054_USB_U2H1_HCLK 70
+#define R9A07G054_USB_U2P_EXR_CPUCLK 71
+#define R9A07G054_USB_PCLK 72
+#define R9A07G054_ETH0_CLK_AXI 73
+#define R9A07G054_ETH0_CLK_CHI 74
+#define R9A07G054_ETH1_CLK_AXI 75
+#define R9A07G054_ETH1_CLK_CHI 76
+#define R9A07G054_I2C0_PCLK 77
+#define R9A07G054_I2C1_PCLK 78
+#define R9A07G054_I2C2_PCLK 79
+#define R9A07G054_I2C3_PCLK 80
+#define R9A07G054_SCIF0_CLK_PCK 81
+#define R9A07G054_SCIF1_CLK_PCK 82
+#define R9A07G054_SCIF2_CLK_PCK 83
+#define R9A07G054_SCIF3_CLK_PCK 84
+#define R9A07G054_SCIF4_CLK_PCK 85
+#define R9A07G054_SCI0_CLKP 86
+#define R9A07G054_SCI1_CLKP 87
+#define R9A07G054_IRDA_CLKP 88
+#define R9A07G054_RSPI0_CLKB 89
+#define R9A07G054_RSPI1_CLKB 90
+#define R9A07G054_RSPI2_CLKB 91
+#define R9A07G054_CANFD_PCLK 92
+#define R9A07G054_GPIO_HCLK 93
+#define R9A07G054_ADC_ADCLK 94
+#define R9A07G054_ADC_PCLK 95
+#define R9A07G054_TSU_PCLK 96
+#define R9A07G054_STPAI_INITCLK 97
+#define R9A07G054_STPAI_ACLK 98
+#define R9A07G054_STPAI_MCLK 99
+#define R9A07G054_STPAI_DCLKIN 100
+#define R9A07G054_STPAI_ACLK_DRP 101
+
+/* R9A07G054 Resets */
+#define R9A07G054_CA55_RST_1_0 0
+#define R9A07G054_CA55_RST_1_1 1
+#define R9A07G054_CA55_RST_3_0 2
+#define R9A07G054_CA55_RST_3_1 3
+#define R9A07G054_CA55_RST_4 4
+#define R9A07G054_CA55_RST_5 5
+#define R9A07G054_CA55_RST_6 6
+#define R9A07G054_CA55_RST_7 7
+#define R9A07G054_CA55_RST_8 8
+#define R9A07G054_CA55_RST_9 9
+#define R9A07G054_CA55_RST_10 10
+#define R9A07G054_CA55_RST_11 11
+#define R9A07G054_CA55_RST_12 12
+#define R9A07G054_GIC600_GICRESET_N 13
+#define R9A07G054_GIC600_DBG_GICRESET_N 14
+#define R9A07G054_IA55_RESETN 15
+#define R9A07G054_MHU_RESETN 16
+#define R9A07G054_DMAC_ARESETN 17
+#define R9A07G054_DMAC_RST_ASYNC 18
+#define R9A07G054_SYC_RESETN 19
+#define R9A07G054_OSTM0_PRESETZ 20
+#define R9A07G054_OSTM1_PRESETZ 21
+#define R9A07G054_OSTM2_PRESETZ 22
+#define R9A07G054_MTU_X_PRESET_MTU3 23
+#define R9A07G054_POE3_RST_M_REG 24
+#define R9A07G054_GPT_RST_C 25
+#define R9A07G054_POEG_A_RST 26
+#define R9A07G054_POEG_B_RST 27
+#define R9A07G054_POEG_C_RST 28
+#define R9A07G054_POEG_D_RST 29
+#define R9A07G054_WDT0_PRESETN 30
+#define R9A07G054_WDT1_PRESETN 31
+#define R9A07G054_WDT2_PRESETN 32
+#define R9A07G054_SPI_RST 33
+#define R9A07G054_SDHI0_IXRST 34
+#define R9A07G054_SDHI1_IXRST 35
+#define R9A07G054_GPU_RESETN 36
+#define R9A07G054_GPU_AXI_RESETN 37
+#define R9A07G054_GPU_ACE_RESETN 38
+#define R9A07G054_ISU_ARESETN 39
+#define R9A07G054_ISU_PRESETN 40
+#define R9A07G054_H264_X_RESET_VCP 41
+#define R9A07G054_H264_CP_PRESET_P 42
+#define R9A07G054_CRU_CMN_RSTB 43
+#define R9A07G054_CRU_PRESETN 44
+#define R9A07G054_CRU_ARESETN 45
+#define R9A07G054_MIPI_DSI_CMN_RSTB 46
+#define R9A07G054_MIPI_DSI_ARESET_N 47
+#define R9A07G054_MIPI_DSI_PRESET_N 48
+#define R9A07G054_LCDC_RESET_N 49
+#define R9A07G054_SSI0_RST_M2_REG 50
+#define R9A07G054_SSI1_RST_M2_REG 51
+#define R9A07G054_SSI2_RST_M2_REG 52
+#define R9A07G054_SSI3_RST_M2_REG 53
+#define R9A07G054_SRC_RST 54
+#define R9A07G054_USB_U2H0_HRESETN 55
+#define R9A07G054_USB_U2H1_HRESETN 56
+#define R9A07G054_USB_U2P_EXL_SYSRST 57
+#define R9A07G054_USB_PRESETN 58
+#define R9A07G054_ETH0_RST_HW_N 59
+#define R9A07G054_ETH1_RST_HW_N 60
+#define R9A07G054_I2C0_MRST 61
+#define R9A07G054_I2C1_MRST 62
+#define R9A07G054_I2C2_MRST 63
+#define R9A07G054_I2C3_MRST 64
+#define R9A07G054_SCIF0_RST_SYSTEM_N 65
+#define R9A07G054_SCIF1_RST_SYSTEM_N 66
+#define R9A07G054_SCIF2_RST_SYSTEM_N 67
+#define R9A07G054_SCIF3_RST_SYSTEM_N 68
+#define R9A07G054_SCIF4_RST_SYSTEM_N 69
+#define R9A07G054_SCI0_RST 70
+#define R9A07G054_SCI1_RST 71
+#define R9A07G054_IRDA_RST 72
+#define R9A07G054_RSPI0_RST 73
+#define R9A07G054_RSPI1_RST 74
+#define R9A07G054_RSPI2_RST 75
+#define R9A07G054_CANFD_RSTP_N 76
+#define R9A07G054_CANFD_RSTC_N 77
+#define R9A07G054_GPIO_RSTN 78
+#define R9A07G054_GPIO_PORT_RESETN 79
+#define R9A07G054_GPIO_SPARE_RESETN 80
+#define R9A07G054_ADC_PRESETN 81
+#define R9A07G054_ADC_ADRST_N 82
+#define R9A07G054_TSU_PRESETN 83
+#define R9A07G054_STPAI_ARESETN 84
+
+#endif /* __DT_BINDINGS_CLOCK_R9A07G054_CPG_H__ */
diff --git a/include/dt-bindings/clock/r9a08g045-cpg.h b/include/dt-bindings/clock/r9a08g045-cpg.h
new file mode 100644
index 000000000000..410725b778a8
--- /dev/null
+++ b/include/dt-bindings/clock/r9a08g045-cpg.h
@@ -0,0 +1,242 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2023 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R9A08G045_CPG_H__
+#define __DT_BINDINGS_CLOCK_R9A08G045_CPG_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* R9A08G045 CPG Core Clocks */
+#define R9A08G045_CLK_I 0
+#define R9A08G045_CLK_I2 1
+#define R9A08G045_CLK_I3 2
+#define R9A08G045_CLK_S0 3
+#define R9A08G045_CLK_SPI0 4
+#define R9A08G045_CLK_SPI1 5
+#define R9A08G045_CLK_SD0 6
+#define R9A08G045_CLK_SD1 7
+#define R9A08G045_CLK_SD2 8
+#define R9A08G045_CLK_M0 9
+#define R9A08G045_CLK_HP 10
+#define R9A08G045_CLK_TSU 11
+#define R9A08G045_CLK_ZT 12
+#define R9A08G045_CLK_P0 13
+#define R9A08G045_CLK_P1 14
+#define R9A08G045_CLK_P2 15
+#define R9A08G045_CLK_P3 16
+#define R9A08G045_CLK_P4 17
+#define R9A08G045_CLK_P5 18
+#define R9A08G045_CLK_AT 19
+#define R9A08G045_CLK_OC0 20
+#define R9A08G045_CLK_OC1 21
+#define R9A08G045_OSCCLK 22
+#define R9A08G045_OSCCLK2 23
+#define R9A08G045_SWD 24
+
+/* R9A08G045 Module Clocks */
+#define R9A08G045_OCTA_ACLK 0
+#define R9A08G045_OCTA_MCLK 1
+#define R9A08G045_CA55_SCLK 2
+#define R9A08G045_CA55_PCLK 3
+#define R9A08G045_CA55_ATCLK 4
+#define R9A08G045_CA55_GICCLK 5
+#define R9A08G045_CA55_PERICLK 6
+#define R9A08G045_CA55_ACLK 7
+#define R9A08G045_CA55_TSCLK 8
+#define R9A08G045_SRAM_ACPU_ACLK0 9
+#define R9A08G045_SRAM_ACPU_ACLK1 10
+#define R9A08G045_SRAM_ACPU_ACLK2 11
+#define R9A08G045_GIC600_GICCLK 12
+#define R9A08G045_IA55_CLK 13
+#define R9A08G045_IA55_PCLK 14
+#define R9A08G045_MHU_PCLK 15
+#define R9A08G045_SYC_CNT_CLK 16
+#define R9A08G045_DMAC_ACLK 17
+#define R9A08G045_DMAC_PCLK 18
+#define R9A08G045_OSTM0_PCLK 19
+#define R9A08G045_OSTM1_PCLK 20
+#define R9A08G045_OSTM2_PCLK 21
+#define R9A08G045_OSTM3_PCLK 22
+#define R9A08G045_OSTM4_PCLK 23
+#define R9A08G045_OSTM5_PCLK 24
+#define R9A08G045_OSTM6_PCLK 25
+#define R9A08G045_OSTM7_PCLK 26
+#define R9A08G045_MTU_X_MCK_MTU3 27
+#define R9A08G045_POE3_CLKM_POE 28
+#define R9A08G045_GPT_PCLK 29
+#define R9A08G045_POEG_A_CLKP 30
+#define R9A08G045_POEG_B_CLKP 31
+#define R9A08G045_POEG_C_CLKP 32
+#define R9A08G045_POEG_D_CLKP 33
+#define R9A08G045_WDT0_PCLK 34
+#define R9A08G045_WDT0_CLK 35
+#define R9A08G045_WDT1_PCLK 36
+#define R9A08G045_WDT1_CLK 37
+#define R9A08G045_WDT2_PCLK 38
+#define R9A08G045_WDT2_CLK 39
+#define R9A08G045_SPI_HCLK 40
+#define R9A08G045_SPI_ACLK 41
+#define R9A08G045_SPI_CLK 42
+#define R9A08G045_SPI_CLKX2 43
+#define R9A08G045_SDHI0_IMCLK 44
+#define R9A08G045_SDHI0_IMCLK2 45
+#define R9A08G045_SDHI0_CLK_HS 46
+#define R9A08G045_SDHI0_ACLK 47
+#define R9A08G045_SDHI1_IMCLK 48
+#define R9A08G045_SDHI1_IMCLK2 49
+#define R9A08G045_SDHI1_CLK_HS 50
+#define R9A08G045_SDHI1_ACLK 51
+#define R9A08G045_SDHI2_IMCLK 52
+#define R9A08G045_SDHI2_IMCLK2 53
+#define R9A08G045_SDHI2_CLK_HS 54
+#define R9A08G045_SDHI2_ACLK 55
+#define R9A08G045_SSI0_PCLK2 56
+#define R9A08G045_SSI0_PCLK_SFR 57
+#define R9A08G045_SSI1_PCLK2 58
+#define R9A08G045_SSI1_PCLK_SFR 59
+#define R9A08G045_SSI2_PCLK2 60
+#define R9A08G045_SSI2_PCLK_SFR 61
+#define R9A08G045_SSI3_PCLK2 62
+#define R9A08G045_SSI3_PCLK_SFR 63
+#define R9A08G045_SRC_CLKP 64
+#define R9A08G045_USB_U2H0_HCLK 65
+#define R9A08G045_USB_U2H1_HCLK 66
+#define R9A08G045_USB_U2P_EXR_CPUCLK 67
+#define R9A08G045_USB_PCLK 68
+#define R9A08G045_ETH0_CLK_AXI 69
+#define R9A08G045_ETH0_CLK_CHI 70
+#define R9A08G045_ETH0_REFCLK 71
+#define R9A08G045_ETH1_CLK_AXI 72
+#define R9A08G045_ETH1_CLK_CHI 73
+#define R9A08G045_ETH1_REFCLK 74
+#define R9A08G045_I2C0_PCLK 75
+#define R9A08G045_I2C1_PCLK 76
+#define R9A08G045_I2C2_PCLK 77
+#define R9A08G045_I2C3_PCLK 78
+#define R9A08G045_SCIF0_CLK_PCK 79
+#define R9A08G045_SCIF1_CLK_PCK 80
+#define R9A08G045_SCIF2_CLK_PCK 81
+#define R9A08G045_SCIF3_CLK_PCK 82
+#define R9A08G045_SCIF4_CLK_PCK 83
+#define R9A08G045_SCIF5_CLK_PCK 84
+#define R9A08G045_SCI0_CLKP 85
+#define R9A08G045_SCI1_CLKP 86
+#define R9A08G045_IRDA_CLKP 87
+#define R9A08G045_RSPI0_CLKB 88
+#define R9A08G045_RSPI1_CLKB 89
+#define R9A08G045_RSPI2_CLKB 90
+#define R9A08G045_RSPI3_CLKB 91
+#define R9A08G045_RSPI4_CLKB 92
+#define R9A08G045_CANFD_PCLK 93
+#define R9A08G045_CANFD_CLK_RAM 94
+#define R9A08G045_GPIO_HCLK 95
+#define R9A08G045_ADC_ADCLK 96
+#define R9A08G045_ADC_PCLK 97
+#define R9A08G045_TSU_PCLK 98
+#define R9A08G045_PDM_PCLK 99
+#define R9A08G045_PDM_CCLK 100
+#define R9A08G045_PCI_ACLK 101
+#define R9A08G045_PCI_CLKL1PM 102
+#define R9A08G045_SPDIF_PCLK 103
+#define R9A08G045_I3C_PCLK 104
+#define R9A08G045_I3C_TCLK 105
+#define R9A08G045_VBAT_BCLK 106
+
+/* R9A08G045 Resets */
+#define R9A08G045_CA55_RST_1_0 0
+#define R9A08G045_CA55_RST_3_0 1
+#define R9A08G045_CA55_RST_4 2
+#define R9A08G045_CA55_RST_5 3
+#define R9A08G045_CA55_RST_6 4
+#define R9A08G045_CA55_RST_7 5
+#define R9A08G045_CA55_RST_8 6
+#define R9A08G045_CA55_RST_9 7
+#define R9A08G045_CA55_RST_10 8
+#define R9A08G045_CA55_RST_11 9
+#define R9A08G045_CA55_RST_12 10
+#define R9A08G045_SRAM_ACPU_ARESETN0 11
+#define R9A08G045_SRAM_ACPU_ARESETN1 12
+#define R9A08G045_SRAM_ACPU_ARESETN2 13
+#define R9A08G045_GIC600_GICRESET_N 14
+#define R9A08G045_GIC600_DBG_GICRESET_N 15
+#define R9A08G045_IA55_RESETN 16
+#define R9A08G045_MHU_RESETN 17
+#define R9A08G045_DMAC_ARESETN 18
+#define R9A08G045_DMAC_RST_ASYNC 19
+#define R9A08G045_SYC_RESETN 20
+#define R9A08G045_OSTM0_PRESETZ 21
+#define R9A08G045_OSTM1_PRESETZ 22
+#define R9A08G045_OSTM2_PRESETZ 23
+#define R9A08G045_OSTM3_PRESETZ 24
+#define R9A08G045_OSTM4_PRESETZ 25
+#define R9A08G045_OSTM5_PRESETZ 26
+#define R9A08G045_OSTM6_PRESETZ 27
+#define R9A08G045_OSTM7_PRESETZ 28
+#define R9A08G045_MTU_X_PRESET_MTU3 29
+#define R9A08G045_POE3_RST_M_REG 30
+#define R9A08G045_GPT_RST_C 31
+#define R9A08G045_POEG_A_RST 32
+#define R9A08G045_POEG_B_RST 33
+#define R9A08G045_POEG_C_RST 34
+#define R9A08G045_POEG_D_RST 35
+#define R9A08G045_WDT0_PRESETN 36
+#define R9A08G045_WDT1_PRESETN 37
+#define R9A08G045_WDT2_PRESETN 38
+#define R9A08G045_SPI_HRESETN 39
+#define R9A08G045_SPI_ARESETN 40
+#define R9A08G045_SDHI0_IXRST 41
+#define R9A08G045_SDHI1_IXRST 42
+#define R9A08G045_SDHI2_IXRST 43
+#define R9A08G045_SSI0_RST_M2_REG 44
+#define R9A08G045_SSI1_RST_M2_REG 45
+#define R9A08G045_SSI2_RST_M2_REG 46
+#define R9A08G045_SSI3_RST_M2_REG 47
+#define R9A08G045_SRC_RST 48
+#define R9A08G045_USB_U2H0_HRESETN 49
+#define R9A08G045_USB_U2H1_HRESETN 50
+#define R9A08G045_USB_U2P_EXL_SYSRST 51
+#define R9A08G045_USB_PRESETN 52
+#define R9A08G045_ETH0_RST_HW_N 53
+#define R9A08G045_ETH1_RST_HW_N 54
+#define R9A08G045_I2C0_MRST 55
+#define R9A08G045_I2C1_MRST 56
+#define R9A08G045_I2C2_MRST 57
+#define R9A08G045_I2C3_MRST 58
+#define R9A08G045_SCIF0_RST_SYSTEM_N 59
+#define R9A08G045_SCIF1_RST_SYSTEM_N 60
+#define R9A08G045_SCIF2_RST_SYSTEM_N 61
+#define R9A08G045_SCIF3_RST_SYSTEM_N 62
+#define R9A08G045_SCIF4_RST_SYSTEM_N 63
+#define R9A08G045_SCIF5_RST_SYSTEM_N 64
+#define R9A08G045_SCI0_RST 65
+#define R9A08G045_SCI1_RST 66
+#define R9A08G045_IRDA_RST 67
+#define R9A08G045_RSPI0_RST 68
+#define R9A08G045_RSPI1_RST 69
+#define R9A08G045_RSPI2_RST 70
+#define R9A08G045_RSPI3_RST 71
+#define R9A08G045_RSPI4_RST 72
+#define R9A08G045_CANFD_RSTP_N 73
+#define R9A08G045_CANFD_RSTC_N 74
+#define R9A08G045_GPIO_RSTN 75
+#define R9A08G045_GPIO_PORT_RESETN 76
+#define R9A08G045_GPIO_SPARE_RESETN 77
+#define R9A08G045_ADC_PRESETN 78
+#define R9A08G045_ADC_ADRST_N 79
+#define R9A08G045_TSU_PRESETN 80
+#define R9A08G045_OCTA_ARESETN 81
+#define R9A08G045_PDM0_PRESETNT 82
+#define R9A08G045_PCI_ARESETN 83
+#define R9A08G045_PCI_RST_B 84
+#define R9A08G045_PCI_RST_GP_B 85
+#define R9A08G045_PCI_RST_PS_B 86
+#define R9A08G045_PCI_RST_RSM_B 87
+#define R9A08G045_PCI_RST_CFG_B 88
+#define R9A08G045_PCI_RST_LOAD_B 89
+#define R9A08G045_SPDIF_RST 90
+#define R9A08G045_I3C_TRESETN 91
+#define R9A08G045_I3C_PRESETN 92
+#define R9A08G045_VBAT_BRESETN 93
+
+#endif /* __DT_BINDINGS_CLOCK_R9A08G045_CPG_H__ */
diff --git a/include/dt-bindings/clock/r9a09g011-cpg.h b/include/dt-bindings/clock/r9a09g011-cpg.h
new file mode 100644
index 000000000000..41dd585d7115
--- /dev/null
+++ b/include/dt-bindings/clock/r9a09g011-cpg.h
@@ -0,0 +1,352 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R9A09G011_CPG_H__
+#define __DT_BINDINGS_CLOCK_R9A09G011_CPG_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* Module Clocks */
+#define R9A09G011_SYS_CLK 0
+#define R9A09G011_PFC_PCLK 1
+#define R9A09G011_PMC_CORE_CLOCK 2
+#define R9A09G011_GIC_CLK 3
+#define R9A09G011_RAMA_ACLK 4
+#define R9A09G011_ROMA_ACLK 5
+#define R9A09G011_SEC_ACLK 6
+#define R9A09G011_SEC_PCLK 7
+#define R9A09G011_SEC_TCLK 8
+#define R9A09G011_DMAA_ACLK 9
+#define R9A09G011_TSU0_PCLK 10
+#define R9A09G011_TSU1_PCLK 11
+
+#define R9A09G011_CST_TRACECLK 12
+#define R9A09G011_CST_SB_CLK 13
+#define R9A09G011_CST_AHB_CLK 14
+#define R9A09G011_CST_ATB_SB_CLK 15
+#define R9A09G011_CST_TS_SB_CLK 16
+
+#define R9A09G011_SDI0_ACLK 17
+#define R9A09G011_SDI0_IMCLK 18
+#define R9A09G011_SDI0_IMCLK2 19
+#define R9A09G011_SDI0_CLK_HS 20
+#define R9A09G011_SDI1_ACLK 21
+#define R9A09G011_SDI1_IMCLK 22
+#define R9A09G011_SDI1_IMCLK2 23
+#define R9A09G011_SDI1_CLK_HS 24
+#define R9A09G011_EMM_ACLK 25
+#define R9A09G011_EMM_IMCLK 26
+#define R9A09G011_EMM_IMCLK2 27
+#define R9A09G011_EMM_CLK_HS 28
+#define R9A09G011_NFI_ACLK 29
+#define R9A09G011_NFI_NF_CLK 30
+
+#define R9A09G011_PCI_ACLK 31
+#define R9A09G011_PCI_CLK_PMU 32
+#define R9A09G011_PCI_APB_CLK 33
+#define R9A09G011_USB_ACLK_H 34
+#define R9A09G011_USB_ACLK_P 35
+#define R9A09G011_USB_PCLK 36
+#define R9A09G011_ETH0_CLK_AXI 37
+#define R9A09G011_ETH0_CLK_CHI 38
+#define R9A09G011_ETH0_GPTP_EXT 39
+
+#define R9A09G011_SDT_CLK 40
+#define R9A09G011_SDT_CLKAPB 41
+#define R9A09G011_SDT_CLK48 42
+#define R9A09G011_GRP_CLK 43
+#define R9A09G011_CIF_P0_CLK 44
+#define R9A09G011_CIF_P1_CLK 45
+#define R9A09G011_CIF_APB_CLK 46
+#define R9A09G011_DCI_CLKAXI 47
+#define R9A09G011_DCI_CLKAPB 48
+#define R9A09G011_DCI_CLKDCI2 49
+
+#define R9A09G011_HMI_PCLK 50
+#define R9A09G011_LCI_PCLK 51
+#define R9A09G011_LCI_ACLK 52
+#define R9A09G011_LCI_VCLK 53
+#define R9A09G011_LCI_LPCLK 54
+
+#define R9A09G011_AUI_CLK 55
+#define R9A09G011_AUI_CLKAXI 56
+#define R9A09G011_AUI_CLKAPB 57
+#define R9A09G011_AUMCLK 58
+#define R9A09G011_GMCLK0 59
+#define R9A09G011_GMCLK1 60
+#define R9A09G011_MTR_CLK0 61
+#define R9A09G011_MTR_CLK1 62
+#define R9A09G011_MTR_CLKAPB 63
+#define R9A09G011_GFT_CLK 64
+#define R9A09G011_GFT_CLKAPB 65
+#define R9A09G011_GFT_MCLK 66
+
+#define R9A09G011_ATGA_CLK 67
+#define R9A09G011_ATGA_CLKAPB 68
+#define R9A09G011_ATGB_CLK 69
+#define R9A09G011_ATGB_CLKAPB 70
+#define R9A09G011_SYC_CNT_CLK 71
+
+#define R9A09G011_CPERI_GRPA_PCLK 72
+#define R9A09G011_TIM0_CLK 73
+#define R9A09G011_TIM1_CLK 74
+#define R9A09G011_TIM2_CLK 75
+#define R9A09G011_TIM3_CLK 76
+#define R9A09G011_TIM4_CLK 77
+#define R9A09G011_TIM5_CLK 78
+#define R9A09G011_TIM6_CLK 79
+#define R9A09G011_TIM7_CLK 80
+#define R9A09G011_IIC_PCLK0 81
+
+#define R9A09G011_CPERI_GRPB_PCLK 82
+#define R9A09G011_TIM8_CLK 83
+#define R9A09G011_TIM9_CLK 84
+#define R9A09G011_TIM10_CLK 85
+#define R9A09G011_TIM11_CLK 86
+#define R9A09G011_TIM12_CLK 87
+#define R9A09G011_TIM13_CLK 88
+#define R9A09G011_TIM14_CLK 89
+#define R9A09G011_TIM15_CLK 90
+#define R9A09G011_IIC_PCLK1 91
+
+#define R9A09G011_CPERI_GRPC_PCLK 92
+#define R9A09G011_TIM16_CLK 93
+#define R9A09G011_TIM17_CLK 94
+#define R9A09G011_TIM18_CLK 95
+#define R9A09G011_TIM19_CLK 96
+#define R9A09G011_TIM20_CLK 97
+#define R9A09G011_TIM21_CLK 98
+#define R9A09G011_TIM22_CLK 99
+#define R9A09G011_TIM23_CLK 100
+#define R9A09G011_WDT0_PCLK 101
+#define R9A09G011_WDT0_CLK 102
+#define R9A09G011_WDT1_PCLK 103
+#define R9A09G011_WDT1_CLK 104
+
+#define R9A09G011_CPERI_GRPD_PCLK 105
+#define R9A09G011_TIM24_CLK 106
+#define R9A09G011_TIM25_CLK 107
+#define R9A09G011_TIM26_CLK 108
+#define R9A09G011_TIM27_CLK 109
+#define R9A09G011_TIM28_CLK 110
+#define R9A09G011_TIM29_CLK 111
+#define R9A09G011_TIM30_CLK 112
+#define R9A09G011_TIM31_CLK 113
+
+#define R9A09G011_CPERI_GRPE_PCLK 114
+#define R9A09G011_PWM0_CLK 115
+#define R9A09G011_PWM1_CLK 116
+#define R9A09G011_PWM2_CLK 117
+#define R9A09G011_PWM3_CLK 118
+#define R9A09G011_PWM4_CLK 119
+#define R9A09G011_PWM5_CLK 120
+#define R9A09G011_PWM6_CLK 121
+#define R9A09G011_PWM7_CLK 122
+
+#define R9A09G011_CPERI_GRPF_PCLK 123
+#define R9A09G011_PWM8_CLK 124
+#define R9A09G011_PWM9_CLK 125
+#define R9A09G011_PWM10_CLK 126
+#define R9A09G011_PWM11_CLK 127
+#define R9A09G011_PWM12_CLK 128
+#define R9A09G011_PWM13_CLK 129
+#define R9A09G011_PWM14_CLK 130
+#define R9A09G011_PWM15_CLK 131
+
+#define R9A09G011_CPERI_GRPG_PCLK 132
+#define R9A09G011_CPERI_GRPH_PCLK 133
+#define R9A09G011_URT_PCLK 134
+#define R9A09G011_URT0_CLK 135
+#define R9A09G011_URT1_CLK 136
+#define R9A09G011_CSI0_CLK 137
+#define R9A09G011_CSI1_CLK 138
+#define R9A09G011_CSI2_CLK 139
+#define R9A09G011_CSI3_CLK 140
+#define R9A09G011_CSI4_CLK 141
+#define R9A09G011_CSI5_CLK 142
+
+#define R9A09G011_ICB_ACLK1 143
+#define R9A09G011_ICB_GIC_CLK 144
+#define R9A09G011_ICB_MPCLK1 145
+#define R9A09G011_ICB_SPCLK1 146
+#define R9A09G011_ICB_CLK48 147
+#define R9A09G011_ICB_CLK48_2 148
+#define R9A09G011_ICB_CLK48_3 149
+#define R9A09G011_ICB_CLK48_4L 150
+#define R9A09G011_ICB_CLK48_4R 151
+#define R9A09G011_ICB_CLK48_5 152
+#define R9A09G011_ICB_CST_ATB_SB_CLK 153
+#define R9A09G011_ICB_CST_CS_CLK 154
+#define R9A09G011_ICB_CLK100_1 155
+#define R9A09G011_ICB_ETH0_CLK_AXI 156
+#define R9A09G011_ICB_DCI_CLKAXI 157
+#define R9A09G011_ICB_SYC_CNT_CLK 158
+
+#define R9A09G011_ICB_DRPA_ACLK 159
+#define R9A09G011_ICB_RFX_ACLK 160
+#define R9A09G011_ICB_RFX_PCLK5 161
+#define R9A09G011_ICB_MMC_ACLK 162
+
+#define R9A09G011_ICB_MPCLK3 163
+#define R9A09G011_ICB_CIMA_CLK 164
+#define R9A09G011_ICB_CIMB_CLK 165
+#define R9A09G011_ICB_BIMA_CLK 166
+#define R9A09G011_ICB_FCD_CLKAXI 167
+#define R9A09G011_ICB_VD_ACLK4 168
+#define R9A09G011_ICB_MPCLK4 169
+#define R9A09G011_ICB_VCD_PCLK4 170
+
+#define R9A09G011_CA53_CLK 171
+#define R9A09G011_CA53_ACLK 172
+#define R9A09G011_CA53_APCLK_DBG 173
+#define R9A09G011_CST_APB_CA53_CLK 174
+#define R9A09G011_CA53_ATCLK 175
+#define R9A09G011_CST_CS_CLK 176
+#define R9A09G011_CA53_TSCLK 177
+#define R9A09G011_CST_TS_CLK 178
+#define R9A09G011_CA53_APCLK_REG 179
+
+#define R9A09G011_DRPA_ACLK 180
+#define R9A09G011_DRPA_DCLK 181
+#define R9A09G011_DRPA_INITCLK 182
+
+#define R9A09G011_RAMB0_ACLK 183
+#define R9A09G011_RAMB1_ACLK 184
+#define R9A09G011_RAMB2_ACLK 185
+#define R9A09G011_RAMB3_ACLK 186
+
+#define R9A09G011_CIMA_CLKAPB 187
+#define R9A09G011_CIMA_CLK 188
+#define R9A09G011_CIMB_CLK 189
+#define R9A09G011_FAFA_CLK 190
+#define R9A09G011_STG_CLKAXI 191
+#define R9A09G011_STG_CLK0 192
+
+#define R9A09G011_BIMA_CLKAPB 193
+#define R9A09G011_BIMA_CLK 194
+#define R9A09G011_FAFB_CLK 195
+#define R9A09G011_FCD_CLK 196
+#define R9A09G011_FCD_CLKAXI 197
+
+#define R9A09G011_RIM_CLK 198
+#define R9A09G011_VCD_ACLK 199
+#define R9A09G011_VCD_PCLK 200
+#define R9A09G011_JPG0_CLK 201
+#define R9A09G011_JPG0_ACLK 202
+
+#define R9A09G011_MMC_CORE_DDRC_CLK 203
+#define R9A09G011_MMC_ACLK 204
+#define R9A09G011_MMC_PCLK 205
+#define R9A09G011_DDI_APBCLK 206
+
+/* Resets */
+#define R9A09G011_SYS_RST_N 0
+#define R9A09G011_PFC_PRESETN 1
+#define R9A09G011_RAMA_ARESETN 2
+#define R9A09G011_ROM_ARESETN 3
+#define R9A09G011_DMAA_ARESETN 4
+#define R9A09G011_SEC_ARESETN 5
+#define R9A09G011_SEC_PRESETN 6
+#define R9A09G011_SEC_RSTB 7
+#define R9A09G011_TSU0_RESETN 8
+#define R9A09G011_TSU1_RESETN 9
+#define R9A09G011_PMC_RESET_N 10
+
+#define R9A09G011_CST_NTRST 11
+#define R9A09G011_CST_NPOTRST 12
+#define R9A09G011_CST_NTRST2 13
+#define R9A09G011_CST_CS_RESETN 14
+#define R9A09G011_CST_TS_RESETN 15
+#define R9A09G011_CST_TRESETN 16
+#define R9A09G011_CST_SB_RESETN 17
+#define R9A09G011_CST_AHB_RESETN 18
+#define R9A09G011_CST_TS_SB_RESETN 19
+#define R9A09G011_CST_APB_CA53_RESETN 20
+#define R9A09G011_CST_ATB_SB_RESETN 21
+
+#define R9A09G011_SDI0_IXRST 22
+#define R9A09G011_SDI1_IXRST 23
+#define R9A09G011_EMM_IXRST 24
+#define R9A09G011_NFI_MARESETN 25
+#define R9A09G011_NFI_REG_RST_N 26
+#define R9A09G011_USB_PRESET_N 27
+#define R9A09G011_USB_DRD_RESET 28
+#define R9A09G011_USB_ARESETN_P 29
+#define R9A09G011_USB_ARESETN_H 30
+#define R9A09G011_ETH0_RST_HW_N 31
+#define R9A09G011_PCI_ARESETN 32
+
+#define R9A09G011_SDT_RSTSYSAX 33
+#define R9A09G011_GRP_RESETN 34
+#define R9A09G011_CIF_RST_N 35
+#define R9A09G011_DCU_RSTSYSAX 36
+#define R9A09G011_HMI_RST_N 37
+#define R9A09G011_HMI_PRESETN 38
+#define R9A09G011_LCI_PRESETN 39
+#define R9A09G011_LCI_ARESETN 40
+
+#define R9A09G011_AUI_RSTSYSAX 41
+#define R9A09G011_MTR_RSTSYSAX 42
+#define R9A09G011_GFT_RSTSYSAX 43
+#define R9A09G011_ATGA_RSTSYSAX 44
+#define R9A09G011_ATGB_RSTSYSAX 45
+#define R9A09G011_SYC_RST_N 46
+
+#define R9A09G011_TIM_GPA_PRESETN 47
+#define R9A09G011_TIM_GPB_PRESETN 48
+#define R9A09G011_TIM_GPC_PRESETN 49
+#define R9A09G011_TIM_GPD_PRESETN 50
+#define R9A09G011_PWM_GPE_PRESETN 51
+#define R9A09G011_PWM_GPF_PRESETN 52
+#define R9A09G011_CSI_GPG_PRESETN 53
+#define R9A09G011_CSI_GPH_PRESETN 54
+#define R9A09G011_IIC_GPA_PRESETN 55
+#define R9A09G011_IIC_GPB_PRESETN 56
+#define R9A09G011_URT_PRESETN 57
+#define R9A09G011_WDT0_PRESETN 58
+#define R9A09G011_WDT1_PRESETN 59
+
+#define R9A09G011_ICB_PD_AWO_RST_N 60
+#define R9A09G011_ICB_PD_MMC_RST_N 61
+#define R9A09G011_ICB_PD_VD0_RST_N 62
+#define R9A09G011_ICB_PD_VD1_RST_N 63
+#define R9A09G011_ICB_PD_RFX_RST_N 64
+
+#define R9A09G011_CA53_NCPUPORESET0 65
+#define R9A09G011_CA53_NCPUPORESET1 66
+#define R9A09G011_CA53_NCORERESET0 67
+#define R9A09G011_CA53_NCORERESET1 68
+#define R9A09G011_CA53_NPRESETDBG 69
+#define R9A09G011_CA53_L2RESET 70
+#define R9A09G011_CA53_NMISCRESET_HM 71
+#define R9A09G011_CA53_NMISCRESET_SM 72
+#define R9A09G011_CA53_NARESET 73
+
+#define R9A09G011_DRPA_ARESETN 74
+
+#define R9A09G011_RAMB0_ARESETN 75
+#define R9A09G011_RAMB1_ARESETN 76
+#define R9A09G011_RAMB2_ARESETN 77
+#define R9A09G011_RAMB3_ARESETN 78
+
+#define R9A09G011_CIMA_RSTSYSAX 79
+#define R9A09G011_CIMB_RSTSYSAX 80
+#define R9A09G011_FAFA_RSTSYSAX 81
+#define R9A09G011_STG_RSTSYSAX 82
+
+#define R9A09G011_BIMA_RSTSYSAX 83
+#define R9A09G011_FAFB_RSTSYSAX 84
+#define R9A09G011_FCD_RSTSYSAX 85
+#define R9A09G011_RIM_RSTSYSAX 86
+#define R9A09G011_VCD_RESETN 87
+#define R9A09G011_JPG_XRESET 88
+
+#define R9A09G011_MMC_CORE_DDRC_RSTN 89
+#define R9A09G011_MMC_ARESETN_N 90
+#define R9A09G011_MMC_PRESETN 91
+#define R9A09G011_DDI_PWROK 92
+#define R9A09G011_DDI_RESET 93
+#define R9A09G011_DDI_RESETN_APB 94
+
+#endif /* __DT_BINDINGS_CLOCK_R9A09G011_CPG_H__ */
diff --git a/include/dt-bindings/clock/raspberrypi,rp1-clocks.h b/include/dt-bindings/clock/raspberrypi,rp1-clocks.h
new file mode 100644
index 000000000000..7915fb8197bf
--- /dev/null
+++ b/include/dt-bindings/clock/raspberrypi,rp1-clocks.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2021 Raspberry Pi Ltd.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_RASPBERRYPI_RP1
+#define __DT_BINDINGS_CLOCK_RASPBERRYPI_RP1
+
+#define RP1_PLL_SYS_CORE 0
+#define RP1_PLL_AUDIO_CORE 1
+#define RP1_PLL_VIDEO_CORE 2
+
+#define RP1_PLL_SYS 3
+#define RP1_PLL_AUDIO 4
+#define RP1_PLL_VIDEO 5
+
+#define RP1_PLL_SYS_PRI_PH 6
+#define RP1_PLL_SYS_SEC_PH 7
+#define RP1_PLL_AUDIO_PRI_PH 8
+
+#define RP1_PLL_SYS_SEC 9
+#define RP1_PLL_AUDIO_SEC 10
+#define RP1_PLL_VIDEO_SEC 11
+
+#define RP1_CLK_SYS 12
+#define RP1_CLK_SLOW_SYS 13
+#define RP1_CLK_DMA 14
+#define RP1_CLK_UART 15
+#define RP1_CLK_ETH 16
+#define RP1_CLK_PWM0 17
+#define RP1_CLK_PWM1 18
+#define RP1_CLK_AUDIO_IN 19
+#define RP1_CLK_AUDIO_OUT 20
+#define RP1_CLK_I2S 21
+#define RP1_CLK_MIPI0_CFG 22
+#define RP1_CLK_MIPI1_CFG 23
+#define RP1_CLK_PCIE_AUX 24
+#define RP1_CLK_USBH0_MICROFRAME 25
+#define RP1_CLK_USBH1_MICROFRAME 26
+#define RP1_CLK_USBH0_SUSPEND 27
+#define RP1_CLK_USBH1_SUSPEND 28
+#define RP1_CLK_ETH_TSU 29
+#define RP1_CLK_ADC 30
+#define RP1_CLK_SDIO_TIMER 31
+#define RP1_CLK_SDIO_ALT_SRC 32
+#define RP1_CLK_GP0 33
+#define RP1_CLK_GP1 34
+#define RP1_CLK_GP2 35
+#define RP1_CLK_GP3 36
+#define RP1_CLK_GP4 37
+#define RP1_CLK_GP5 38
+#define RP1_CLK_VEC 39
+#define RP1_CLK_DPI 40
+#define RP1_CLK_MIPI0_DPI 41
+#define RP1_CLK_MIPI1_DPI 42
+
+/* Extra PLL output channels - RP1B0 only */
+#define RP1_PLL_VIDEO_PRI_PH 43
+#define RP1_PLL_AUDIO_TERN 44
+
+/* MIPI clocks managed by the DSI driver */
+#define RP1_CLK_MIPI0_DSI_BYTECLOCK 45
+#define RP1_CLK_MIPI1_DSI_BYTECLOCK 46
+
+#endif
diff --git a/include/dt-bindings/clock/renesas,r8a779h0-cpg-mssr.h b/include/dt-bindings/clock/renesas,r8a779h0-cpg-mssr.h
new file mode 100644
index 000000000000..7ab6cfbaf901
--- /dev/null
+++ b/include/dt-bindings/clock/renesas,r8a779h0-cpg-mssr.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2023 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_RENESAS_R8A779H0_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_RENESAS_R8A779H0_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a779h0 CPG Core Clocks */
+
+#define R8A779H0_CLK_ZX 0
+#define R8A779H0_CLK_ZD 1
+#define R8A779H0_CLK_ZS 2
+#define R8A779H0_CLK_ZT 3
+#define R8A779H0_CLK_ZTR 4
+#define R8A779H0_CLK_S0D2 5
+#define R8A779H0_CLK_S0D3 6
+#define R8A779H0_CLK_S0D4 7
+#define R8A779H0_CLK_S0D1_VIO 8
+#define R8A779H0_CLK_S0D2_VIO 9
+#define R8A779H0_CLK_S0D4_VIO 10
+#define R8A779H0_CLK_S0D8_VIO 11
+#define R8A779H0_CLK_VIOBUSD1 12
+#define R8A779H0_CLK_VIOBUSD2 13
+#define R8A779H0_CLK_S0D1_VC 14
+#define R8A779H0_CLK_S0D2_VC 15
+#define R8A779H0_CLK_S0D4_VC 16
+#define R8A779H0_CLK_VCBUSD1 17
+#define R8A779H0_CLK_VCBUSD2 18
+#define R8A779H0_CLK_S0D2_MM 19
+#define R8A779H0_CLK_S0D4_MM 20
+#define R8A779H0_CLK_S0D2_U3DG 21
+#define R8A779H0_CLK_S0D4_U3DG 22
+#define R8A779H0_CLK_S0D2_RT 23
+#define R8A779H0_CLK_S0D3_RT 24
+#define R8A779H0_CLK_S0D4_RT 25
+#define R8A779H0_CLK_S0D6_RT 26
+#define R8A779H0_CLK_S0D2_PER 27
+#define R8A779H0_CLK_S0D3_PER 28
+#define R8A779H0_CLK_S0D4_PER 29
+#define R8A779H0_CLK_S0D6_PER 30
+#define R8A779H0_CLK_S0D12_PER 31
+#define R8A779H0_CLK_S0D24_PER 32
+#define R8A779H0_CLK_S0D1_HSC 33
+#define R8A779H0_CLK_S0D2_HSC 34
+#define R8A779H0_CLK_S0D4_HSC 35
+#define R8A779H0_CLK_S0D8_HSC 36
+#define R8A779H0_CLK_SVD1_IR 37
+#define R8A779H0_CLK_SVD2_IR 38
+#define R8A779H0_CLK_IMPAD1 39
+#define R8A779H0_CLK_IMPAD4 40
+#define R8A779H0_CLK_IMPB 41
+#define R8A779H0_CLK_SVD1_VIP 42
+#define R8A779H0_CLK_SVD2_VIP 43
+#define R8A779H0_CLK_CL 44
+#define R8A779H0_CLK_CL16M 45
+#define R8A779H0_CLK_CL16M_MM 46
+#define R8A779H0_CLK_CL16M_RT 47
+#define R8A779H0_CLK_CL16M_PER 48
+#define R8A779H0_CLK_CL16M_HSC 49
+#define R8A779H0_CLK_ZC0 50
+#define R8A779H0_CLK_ZC1 51
+#define R8A779H0_CLK_ZC2 52
+#define R8A779H0_CLK_ZC3 53
+#define R8A779H0_CLK_ZB3 54
+#define R8A779H0_CLK_ZB3D2 55
+#define R8A779H0_CLK_ZB3D4 56
+#define R8A779H0_CLK_ZG 57
+#define R8A779H0_CLK_SD0H 58
+#define R8A779H0_CLK_SD0 59
+#define R8A779H0_CLK_RPC 60
+#define R8A779H0_CLK_RPCD2 61
+#define R8A779H0_CLK_MSO 62
+#define R8A779H0_CLK_CANFD 63
+#define R8A779H0_CLK_CSI 64
+#define R8A779H0_CLK_FRAY 65
+#define R8A779H0_CLK_IPC 66
+#define R8A779H0_CLK_SASYNCRT 67
+#define R8A779H0_CLK_SASYNCPERD1 68
+#define R8A779H0_CLK_SASYNCPERD2 69
+#define R8A779H0_CLK_SASYNCPERD4 70
+#define R8A779H0_CLK_DSIEXT 71
+#define R8A779H0_CLK_DSIREF 72
+#define R8A779H0_CLK_ADGH 73
+#define R8A779H0_CLK_OSC 74
+#define R8A779H0_CLK_ZR0 75
+#define R8A779H0_CLK_ZR1 76
+#define R8A779H0_CLK_ZR2 77
+#define R8A779H0_CLK_RGMII 78
+#define R8A779H0_CLK_CPEX 79
+#define R8A779H0_CLK_CP 80
+#define R8A779H0_CLK_CBFUSA 81
+#define R8A779H0_CLK_R 82
+
+#endif /* __DT_BINDINGS_CLOCK_RENESAS_R8A779H0_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/renesas,r9a08g045-vbattb.h b/include/dt-bindings/clock/renesas,r9a08g045-vbattb.h
new file mode 100644
index 000000000000..4cc8fc34b23c
--- /dev/null
+++ b/include/dt-bindings/clock/renesas,r9a08g045-vbattb.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_RENESAS_R9A08G045_VBATTB_H__
+#define __DT_BINDINGS_CLOCK_RENESAS_R9A08G045_VBATTB_H__
+
+#define VBATTB_XC 0
+#define VBATTB_XBYP 1
+#define VBATTB_MUX 2
+#define VBATTB_VBATTCLK 3
+
+#endif /* __DT_BINDINGS_CLOCK_RENESAS_R9A08G045_VBATTB_H__ */
diff --git a/include/dt-bindings/clock/renesas,r9a09g047-cpg.h b/include/dt-bindings/clock/renesas,r9a09g047-cpg.h
new file mode 100644
index 000000000000..dab24740de3c
--- /dev/null
+++ b/include/dt-bindings/clock/renesas,r9a09g047-cpg.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_RENESAS_R9A09G047_CPG_H__
+#define __DT_BINDINGS_CLOCK_RENESAS_R9A09G047_CPG_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* Core Clock list */
+#define R9A09G047_SYS_0_PCLK 0
+#define R9A09G047_CA55_0_CORECLK0 1
+#define R9A09G047_CA55_0_CORECLK1 2
+#define R9A09G047_CA55_0_CORECLK2 3
+#define R9A09G047_CA55_0_CORECLK3 4
+#define R9A09G047_CA55_0_PERIPHCLK 5
+#define R9A09G047_CM33_CLK0 6
+#define R9A09G047_CST_0_SWCLKTCK 7
+#define R9A09G047_IOTOP_0_SHCLK 8
+#define R9A09G047_SPI_CLK_SPI 9
+#define R9A09G047_GBETH_0_CLK_PTP_REF_I 10
+#define R9A09G047_GBETH_1_CLK_PTP_REF_I 11
+#define R9A09G047_USB3_0_REF_ALT_CLK_P 12
+#define R9A09G047_USB3_0_CLKCORE 13
+#define R9A09G047_USB2_0_CLK_CORE0 14
+#define R9A09G047_USB2_0_CLK_CORE1 15
+
+#endif /* __DT_BINDINGS_CLOCK_RENESAS_R9A09G047_CPG_H__ */
diff --git a/include/dt-bindings/clock/renesas,r9a09g056-cpg.h b/include/dt-bindings/clock/renesas,r9a09g056-cpg.h
new file mode 100644
index 000000000000..234dcf4f0f91
--- /dev/null
+++ b/include/dt-bindings/clock/renesas,r9a09g056-cpg.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_RENESAS_R9A09G056_CPG_H__
+#define __DT_BINDINGS_CLOCK_RENESAS_R9A09G056_CPG_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* Core Clock list */
+#define R9A09G056_SYS_0_PCLK 0
+#define R9A09G056_CA55_0_CORE_CLK0 1
+#define R9A09G056_CA55_0_CORE_CLK1 2
+#define R9A09G056_CA55_0_CORE_CLK2 3
+#define R9A09G056_CA55_0_CORE_CLK3 4
+#define R9A09G056_CA55_0_PERIPHCLK 5
+#define R9A09G056_CM33_CLK0 6
+#define R9A09G056_CST_0_SWCLKTCK 7
+#define R9A09G056_IOTOP_0_SHCLK 8
+#define R9A09G056_USB2_0_CLK_CORE0 9
+#define R9A09G056_GBETH_0_CLK_PTP_REF_I 10
+#define R9A09G056_GBETH_1_CLK_PTP_REF_I 11
+#define R9A09G056_SPI_CLK_SPI 12
+#define R9A09G056_USB3_0_REF_ALT_CLK_P 13
+#define R9A09G056_USB3_0_CLKCORE 14
+
+#endif /* __DT_BINDINGS_CLOCK_RENESAS_R9A09G056_CPG_H__ */
diff --git a/include/dt-bindings/clock/renesas,r9a09g057-cpg.h b/include/dt-bindings/clock/renesas,r9a09g057-cpg.h
new file mode 100644
index 000000000000..f91d7f72922a
--- /dev/null
+++ b/include/dt-bindings/clock/renesas,r9a09g057-cpg.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_RENESAS_R9A09G057_CPG_H__
+#define __DT_BINDINGS_CLOCK_RENESAS_R9A09G057_CPG_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* Core Clock list */
+#define R9A09G057_SYS_0_PCLK 0
+#define R9A09G057_CA55_0_CORE_CLK0 1
+#define R9A09G057_CA55_0_CORE_CLK1 2
+#define R9A09G057_CA55_0_CORE_CLK2 3
+#define R9A09G057_CA55_0_CORE_CLK3 4
+#define R9A09G057_CA55_0_PERIPHCLK 5
+#define R9A09G057_CM33_CLK0 6
+#define R9A09G057_CST_0_SWCLKTCK 7
+#define R9A09G057_IOTOP_0_SHCLK 8
+#define R9A09G057_USB2_0_CLK_CORE0 9
+#define R9A09G057_USB2_0_CLK_CORE1 10
+#define R9A09G057_GBETH_0_CLK_PTP_REF_I 11
+#define R9A09G057_GBETH_1_CLK_PTP_REF_I 12
+#define R9A09G057_SPI_CLK_SPI 13
+#define R9A09G057_USB3_0_REF_ALT_CLK_P 14
+#define R9A09G057_USB3_0_CLKCORE 15
+#define R9A09G057_USB3_1_REF_ALT_CLK_P 16
+#define R9A09G057_USB3_1_CLKCORE 17
+
+#endif /* __DT_BINDINGS_CLOCK_RENESAS_R9A09G057_CPG_H__ */
diff --git a/include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h b/include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h
new file mode 100644
index 000000000000..2a805e06487b
--- /dev/null
+++ b/include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_RENESAS_R9A09G077_CPG_H__
+#define __DT_BINDINGS_CLOCK_RENESAS_R9A09G077_CPG_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* R9A09G077 CPG Core Clocks */
+#define R9A09G077_CLK_CA55C0 0
+#define R9A09G077_CLK_CA55C1 1
+#define R9A09G077_CLK_CA55C2 2
+#define R9A09G077_CLK_CA55C3 3
+#define R9A09G077_CLK_CA55S 4
+#define R9A09G077_CLK_CR52_CPU0 5
+#define R9A09G077_CLK_CR52_CPU1 6
+#define R9A09G077_CLK_CKIO 7
+#define R9A09G077_CLK_PCLKAH 8
+#define R9A09G077_CLK_PCLKAM 9
+#define R9A09G077_CLK_PCLKAL 10
+#define R9A09G077_CLK_PCLKGPTL 11
+#define R9A09G077_CLK_PCLKH 12
+#define R9A09G077_CLK_PCLKM 13
+#define R9A09G077_CLK_PCLKL 14
+#define R9A09G077_SDHI_CLKHS 15
+#define R9A09G077_USB_CLK 16
+#define R9A09G077_ETCLKA 17
+#define R9A09G077_ETCLKB 18
+#define R9A09G077_ETCLKC 19
+#define R9A09G077_ETCLKD 20
+#define R9A09G077_ETCLKE 21
+
+#endif /* __DT_BINDINGS_CLOCK_RENESAS_R9A09G077_CPG_H__ */
diff --git a/include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h b/include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h
new file mode 100644
index 000000000000..09da0ad33be6
--- /dev/null
+++ b/include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_RENESAS_R9A09G087_CPG_H__
+#define __DT_BINDINGS_CLOCK_RENESAS_R9A09G087_CPG_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* R9A09G087 CPG Core Clocks */
+#define R9A09G087_CLK_CA55C0 0
+#define R9A09G087_CLK_CA55C1 1
+#define R9A09G087_CLK_CA55C2 2
+#define R9A09G087_CLK_CA55C3 3
+#define R9A09G087_CLK_CA55S 4
+#define R9A09G087_CLK_CR52_CPU0 5
+#define R9A09G087_CLK_CR52_CPU1 6
+#define R9A09G087_CLK_CKIO 7
+#define R9A09G087_CLK_PCLKAH 8
+#define R9A09G087_CLK_PCLKAM 9
+#define R9A09G087_CLK_PCLKAL 10
+#define R9A09G087_CLK_PCLKGPTL 11
+#define R9A09G087_CLK_PCLKH 12
+#define R9A09G087_CLK_PCLKM 13
+#define R9A09G087_CLK_PCLKL 14
+#define R9A09G087_SDHI_CLKHS 15
+#define R9A09G087_USB_CLK 16
+#define R9A09G087_ETCLKA 17
+#define R9A09G087_ETCLKB 18
+#define R9A09G087_ETCLKC 19
+#define R9A09G087_ETCLKD 20
+#define R9A09G087_ETCLKE 21
+
+#endif /* __DT_BINDINGS_CLOCK_RENESAS_R9A09G087_CPG_H__ */
diff --git a/include/dt-bindings/clock/rk3036-cru.h b/include/dt-bindings/clock/rk3036-cru.h
index 35a5a01f9697..5cbc0e2b08ff 100644
--- a/include/dt-bindings/clock/rk3036-cru.h
+++ b/include/dt-bindings/clock/rk3036-cru.h
@@ -47,6 +47,7 @@
#define SCLK_MACREF 152
#define SCLK_MACPLL 153
#define SCLK_SFC 160
+#define SCLK_USB480M 161
/* aclk gates */
#define ACLK_DMAC2 194
@@ -81,6 +82,7 @@
#define HCLK_OTG0 449
#define HCLK_OTG1 450
#define HCLK_NANDC 453
+#define HCLK_SFC 454
#define HCLK_SDMMC 456
#define HCLK_SDIO 457
#define HCLK_EMMC 459
@@ -93,8 +95,6 @@
#define HCLK_CPU 477
#define HCLK_PERI 478
-#define CLK_NR_CLKS (HCLK_PERI + 1)
-
/* soft-reset indices */
#define SRST_CORE0 0
#define SRST_CORE1 1
diff --git a/include/dt-bindings/clock/rk3128-cru.h b/include/dt-bindings/clock/rk3128-cru.h
index 6a47825dac5d..b609fcf96508 100644
--- a/include/dt-bindings/clock/rk3128-cru.h
+++ b/include/dt-bindings/clock/rk3128-cru.h
@@ -116,6 +116,7 @@
#define PCLK_GMAC 367
#define PCLK_PMU_PRE 368
#define PCLK_SIM_CARD 369
+#define PCLK_MIPIPHY 370
/* hclk gates */
#define HCLK_SPDIF 440
@@ -143,8 +144,7 @@
#define HCLK_TSP 475
#define HCLK_CRYPTO 476
#define HCLK_PERI 478
-
-#define CLK_NR_CLKS (HCLK_PERI + 1)
+#define HCLK_SFC 479
/* soft-reset indices */
#define SRST_CORE0_PO 0
diff --git a/include/dt-bindings/clock/rk3188-cru-common.h b/include/dt-bindings/clock/rk3188-cru-common.h
index afad90680fce..dd988cc9d582 100644
--- a/include/dt-bindings/clock/rk3188-cru-common.h
+++ b/include/dt-bindings/clock/rk3188-cru-common.h
@@ -103,6 +103,8 @@
#define PCLK_PERI 351
#define PCLK_DDRUPCTL 352
#define PCLK_PUBL 353
+#define PCLK_CIF0 354
+#define PCLK_CIF1 355
/* hclk gates */
#define HCLK_SDMMC 448
@@ -132,8 +134,6 @@
#define HCLK_VDPU 472
#define HCLK_HDMI 473
-#define CLK_NR_CLKS (HCLK_HDMI + 1)
-
/* soft-reset indices */
#define SRST_MCORE 2
#define SRST_CORE0 3
diff --git a/include/dt-bindings/clock/rk3228-cru.h b/include/dt-bindings/clock/rk3228-cru.h
index de550ea56eeb..138b6ce514dd 100644
--- a/include/dt-bindings/clock/rk3228-cru.h
+++ b/include/dt-bindings/clock/rk3228-cru.h
@@ -146,8 +146,6 @@
#define HCLK_S_CRYPTO 477
#define HCLK_PERI 478
-#define CLK_NR_CLKS (HCLK_PERI + 1)
-
/* soft-reset indices */
#define SRST_CORE0_PO 0
#define SRST_CORE1_PO 1
diff --git a/include/dt-bindings/clock/rk3288-cru.h b/include/dt-bindings/clock/rk3288-cru.h
index 33819acbfc56..c6034b01b050 100644
--- a/include/dt-bindings/clock/rk3288-cru.h
+++ b/include/dt-bindings/clock/rk3288-cru.h
@@ -195,8 +195,6 @@
#define HCLK_CPU 477
#define HCLK_PERI 478
-#define CLK_NR_CLKS (HCLK_PERI + 1)
-
/* soft-reset indices */
#define SRST_CORE0 0
#define SRST_CORE1 1
diff --git a/include/dt-bindings/clock/rk3308-cru.h b/include/dt-bindings/clock/rk3308-cru.h
index d97840f9ee2e..ce4cd72b9d3d 100644
--- a/include/dt-bindings/clock/rk3308-cru.h
+++ b/include/dt-bindings/clock/rk3308-cru.h
@@ -212,8 +212,6 @@
#define PCLK_CAN 233
#define PCLK_OWIRE 234
-#define CLK_NR_CLKS (PCLK_OWIRE + 1)
-
/* soft-reset indices */
/* cru_softrst_con0 */
diff --git a/include/dt-bindings/clock/rk3328-cru.h b/include/dt-bindings/clock/rk3328-cru.h
index 555b4ff660ae..8885a2e98c65 100644
--- a/include/dt-bindings/clock/rk3328-cru.h
+++ b/include/dt-bindings/clock/rk3328-cru.h
@@ -201,8 +201,6 @@
#define HCLK_RGA 340
#define HCLK_HDCP 341
-#define CLK_NR_CLKS (HCLK_HDCP + 1)
-
/* soft-reset indices */
#define SRST_CORE0_PO 0
#define SRST_CORE1_PO 1
diff --git a/include/dt-bindings/clock/rk3368-cru.h b/include/dt-bindings/clock/rk3368-cru.h
index 83c72a163fd3..b951e2906948 100644
--- a/include/dt-bindings/clock/rk3368-cru.h
+++ b/include/dt-bindings/clock/rk3368-cru.h
@@ -72,6 +72,7 @@
#define SCLK_SFC 126
#define SCLK_MAC 127
#define SCLK_MACREF_OUT 128
+#define SCLK_MIPIDSI_24M 129
#define SCLK_TIMER10 133
#define SCLK_TIMER11 134
#define SCLK_TIMER12 135
@@ -182,8 +183,6 @@
#define HCLK_BUS 477
#define HCLK_PERI 478
-#define CLK_NR_CLKS (HCLK_PERI + 1)
-
/* soft-reset indices */
#define SRST_CORE_B0 0
#define SRST_CORE_B1 1
diff --git a/include/dt-bindings/clock/rk3399-cru.h b/include/dt-bindings/clock/rk3399-cru.h
index 44e0a319f077..4c90c7703a83 100644
--- a/include/dt-bindings/clock/rk3399-cru.h
+++ b/include/dt-bindings/clock/rk3399-cru.h
@@ -335,8 +335,6 @@
#define HCLK_SDIO_NOC 495
#define HCLK_SDIOAUDIO_NOC 496
-#define CLK_NR_CLKS (HCLK_SDIOAUDIO_NOC + 1)
-
/* pmu-clocks indices */
#define PLL_PPLL 1
@@ -378,8 +376,6 @@
#define PCLK_INTR_ARB_PMU 49
#define HCLK_NOC_PMU 50
-#define CLKPMU_NR_CLKS (HCLK_NOC_PMU + 1)
-
/* soft-reset indices */
/* cru_softrst_con0 */
@@ -547,8 +543,8 @@
#define SRST_H_PERILP0 171
#define SRST_H_PERILP0_NOC 172
#define SRST_ROM 173
-#define SRST_CRYPTO_S 174
-#define SRST_CRYPTO_M 175
+#define SRST_CRYPTO0_S 174
+#define SRST_CRYPTO0_M 175
/* cru_softrst_con11 */
#define SRST_P_DCF 176
@@ -556,7 +552,7 @@
#define SRST_CM0S 178
#define SRST_CM0S_DBG 179
#define SRST_CM0S_PO 180
-#define SRST_CRYPTO 181
+#define SRST_CRYPTO0 181
#define SRST_P_PERILP1_SGRF 182
#define SRST_P_PERILP1_GRF 183
#define SRST_CRYPTO1_S 184
diff --git a/include/dt-bindings/clock/rk3568-cru.h b/include/dt-bindings/clock/rk3568-cru.h
index d29890865150..1e0aef8a645d 100644
--- a/include/dt-bindings/clock/rk3568-cru.h
+++ b/include/dt-bindings/clock/rk3568-cru.h
@@ -78,6 +78,7 @@
#define CPLL_333M 9
#define ARMCLK 10
#define USB480M 11
+#define USB480M_PHY 12
#define ACLK_CORE_NIU2BUS 18
#define CLK_CORE_PVTM 19
#define CLK_CORE_PVTM_CORE 20
@@ -482,7 +483,11 @@
#define PCLK_CORE_PVTM 450
-#define CLK_NR_CLKS (PCLK_CORE_PVTM + 1)
+/* scmi-clocks indices */
+
+#define SCMI_CLK_CPU 0
+#define SCMI_CLK_GPU 1
+#define SCMI_CLK_NPU 2
/* pmu soft-reset indices */
/* pmucru_softrst_con0 */
diff --git a/include/dt-bindings/clock/rockchip,rk3506-cru.h b/include/dt-bindings/clock/rockchip,rk3506-cru.h
new file mode 100644
index 000000000000..71d7dda23cc9
--- /dev/null
+++ b/include/dt-bindings/clock/rockchip,rk3506-cru.h
@@ -0,0 +1,285 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023-2025 Rockchip Electronics Co., Ltd.
+ * Author: Finley Xiao <finley.xiao@rock-chips.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3506_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RK3506_H
+
+/* cru plls */
+#define PLL_GPLL 0
+#define PLL_V0PLL 1
+#define PLL_V1PLL 2
+
+/* cru-clocks indices */
+#define ARMCLK 3
+#define CLK_DDR 4
+#define XIN24M_GATE 5
+#define CLK_GPLL_GATE 6
+#define CLK_V0PLL_GATE 7
+#define CLK_V1PLL_GATE 8
+#define CLK_GPLL_DIV 9
+#define CLK_GPLL_DIV_100M 10
+#define CLK_V0PLL_DIV 11
+#define CLK_V1PLL_DIV 12
+#define CLK_INT_VOICE_MATRIX0 13
+#define CLK_INT_VOICE_MATRIX1 14
+#define CLK_INT_VOICE_MATRIX2 15
+#define CLK_FRAC_UART_MATRIX0_MUX 16
+#define CLK_FRAC_UART_MATRIX1_MUX 17
+#define CLK_FRAC_VOICE_MATRIX0_MUX 18
+#define CLK_FRAC_VOICE_MATRIX1_MUX 19
+#define CLK_FRAC_COMMON_MATRIX0_MUX 20
+#define CLK_FRAC_COMMON_MATRIX1_MUX 21
+#define CLK_FRAC_COMMON_MATRIX2_MUX 22
+#define CLK_FRAC_UART_MATRIX0 23
+#define CLK_FRAC_UART_MATRIX1 24
+#define CLK_FRAC_VOICE_MATRIX0 25
+#define CLK_FRAC_VOICE_MATRIX1 26
+#define CLK_FRAC_COMMON_MATRIX0 27
+#define CLK_FRAC_COMMON_MATRIX1 28
+#define CLK_FRAC_COMMON_MATRIX2 29
+#define CLK_REF_USBPHY_TOP 30
+#define CLK_REF_DPHY_TOP 31
+#define ACLK_CORE_ROOT 32
+#define PCLK_CORE_ROOT 33
+#define PCLK_DBG 34
+#define PCLK_CORE_GRF 35
+#define PCLK_CORE_CRU 36
+#define CLK_CORE_EMA_DETECT 37
+#define CLK_REF_PVTPLL_CORE 38
+#define PCLK_GPIO1 39
+#define DBCLK_GPIO1 40
+#define ACLK_CORE_PERI_ROOT 41
+#define HCLK_CORE_PERI_ROOT 42
+#define PCLK_CORE_PERI_ROOT 43
+#define CLK_DSMC 44
+#define ACLK_DSMC 45
+#define PCLK_DSMC 46
+#define CLK_FLEXBUS_TX 47
+#define CLK_FLEXBUS_RX 48
+#define ACLK_FLEXBUS 49
+#define HCLK_FLEXBUS 50
+#define ACLK_DSMC_SLV 51
+#define HCLK_DSMC_SLV 52
+#define ACLK_BUS_ROOT 53
+#define HCLK_BUS_ROOT 54
+#define PCLK_BUS_ROOT 55
+#define ACLK_SYSRAM 56
+#define HCLK_SYSRAM 57
+#define ACLK_DMAC0 58
+#define ACLK_DMAC1 59
+#define HCLK_M0 60
+#define PCLK_BUS_GRF 61
+#define PCLK_TIMER 62
+#define CLK_TIMER0_CH0 63
+#define CLK_TIMER0_CH1 64
+#define CLK_TIMER0_CH2 65
+#define CLK_TIMER0_CH3 66
+#define CLK_TIMER0_CH4 67
+#define CLK_TIMER0_CH5 68
+#define PCLK_WDT0 69
+#define TCLK_WDT0 70
+#define PCLK_WDT1 71
+#define TCLK_WDT1 72
+#define PCLK_MAILBOX 73
+#define PCLK_INTMUX 74
+#define PCLK_SPINLOCK 75
+#define PCLK_DDRC 76
+#define HCLK_DDRPHY 77
+#define PCLK_DDRMON 78
+#define CLK_DDRMON_OSC 79
+#define PCLK_STDBY 80
+#define HCLK_USBOTG0 81
+#define HCLK_USBOTG0_PMU 82
+#define CLK_USBOTG0_ADP 83
+#define HCLK_USBOTG1 84
+#define HCLK_USBOTG1_PMU 85
+#define CLK_USBOTG1_ADP 86
+#define PCLK_USBPHY 87
+#define ACLK_DMA2DDR 88
+#define PCLK_DMA2DDR 89
+#define STCLK_M0 90
+#define CLK_DDRPHY 91
+#define CLK_DDRC_SRC 92
+#define ACLK_DDRC_0 93
+#define ACLK_DDRC_1 94
+#define CLK_DDRC 95
+#define CLK_DDRMON 96
+#define HCLK_LSPERI_ROOT 97
+#define PCLK_LSPERI_ROOT 98
+#define PCLK_UART0 99
+#define PCLK_UART1 100
+#define PCLK_UART2 101
+#define PCLK_UART3 102
+#define PCLK_UART4 103
+#define SCLK_UART0 104
+#define SCLK_UART1 105
+#define SCLK_UART2 106
+#define SCLK_UART3 107
+#define SCLK_UART4 108
+#define PCLK_I2C0 109
+#define CLK_I2C0 110
+#define PCLK_I2C1 111
+#define CLK_I2C1 112
+#define PCLK_I2C2 113
+#define CLK_I2C2 114
+#define PCLK_PWM1 115
+#define CLK_PWM1 116
+#define CLK_OSC_PWM1 117
+#define CLK_RC_PWM1 118
+#define CLK_FREQ_PWM1 119
+#define CLK_COUNTER_PWM1 120
+#define PCLK_SPI0 121
+#define CLK_SPI0 122
+#define PCLK_SPI1 123
+#define CLK_SPI1 124
+#define PCLK_GPIO2 125
+#define DBCLK_GPIO2 126
+#define PCLK_GPIO3 127
+#define DBCLK_GPIO3 128
+#define PCLK_GPIO4 129
+#define DBCLK_GPIO4 130
+#define HCLK_CAN0 131
+#define CLK_CAN0 132
+#define HCLK_CAN1 133
+#define CLK_CAN1 134
+#define HCLK_PDM 135
+#define MCLK_PDM 136
+#define CLKOUT_PDM 137
+#define MCLK_SPDIFTX 138
+#define HCLK_SPDIFTX 139
+#define HCLK_SPDIFRX 140
+#define MCLK_SPDIFRX 141
+#define MCLK_SAI0 142
+#define HCLK_SAI0 143
+#define MCLK_OUT_SAI0 144
+#define MCLK_SAI1 145
+#define HCLK_SAI1 146
+#define MCLK_OUT_SAI1 147
+#define HCLK_ASRC0 148
+#define CLK_ASRC0 149
+#define HCLK_ASRC1 150
+#define CLK_ASRC1 151
+#define PCLK_CRU 152
+#define PCLK_PMU_ROOT 153
+#define MCLK_ASRC0 154
+#define MCLK_ASRC1 155
+#define MCLK_ASRC2 156
+#define MCLK_ASRC3 157
+#define LRCK_ASRC0_SRC 158
+#define LRCK_ASRC0_DST 159
+#define LRCK_ASRC1_SRC 160
+#define LRCK_ASRC1_DST 161
+#define ACLK_HSPERI_ROOT 162
+#define HCLK_HSPERI_ROOT 163
+#define PCLK_HSPERI_ROOT 164
+#define CCLK_SRC_SDMMC 165
+#define HCLK_SDMMC 166
+#define HCLK_FSPI 167
+#define SCLK_FSPI 168
+#define PCLK_SPI2 169
+#define ACLK_MAC0 170
+#define ACLK_MAC1 171
+#define PCLK_MAC0 172
+#define PCLK_MAC1 173
+#define CLK_MAC_ROOT 174
+#define CLK_MAC0 175
+#define CLK_MAC1 176
+#define MCLK_SAI2 177
+#define HCLK_SAI2 178
+#define MCLK_OUT_SAI2 179
+#define MCLK_SAI3_SRC 180
+#define HCLK_SAI3 181
+#define MCLK_SAI3 182
+#define MCLK_OUT_SAI3 183
+#define MCLK_SAI4_SRC 184
+#define HCLK_SAI4 185
+#define MCLK_SAI4 186
+#define HCLK_DSM 187
+#define MCLK_DSM 188
+#define PCLK_AUDIO_ADC 189
+#define MCLK_AUDIO_ADC 190
+#define MCLK_AUDIO_ADC_DIV4 191
+#define PCLK_SARADC 192
+#define CLK_SARADC 193
+#define PCLK_OTPC_NS 194
+#define CLK_SBPI_OTPC_NS 195
+#define CLK_USER_OTPC_NS 196
+#define PCLK_UART5 197
+#define SCLK_UART5 198
+#define PCLK_GPIO234_IOC 199
+#define CLK_MAC_PTP_ROOT 200
+#define CLK_MAC0_PTP 201
+#define CLK_MAC1_PTP 202
+#define CLK_SPI2 203
+#define ACLK_VIO_ROOT 204
+#define HCLK_VIO_ROOT 205
+#define PCLK_VIO_ROOT 206
+#define HCLK_RGA 207
+#define ACLK_RGA 208
+#define CLK_CORE_RGA 209
+#define ACLK_VOP 210
+#define HCLK_VOP 211
+#define DCLK_VOP 212
+#define PCLK_DPHY 213
+#define PCLK_DSI_HOST 214
+#define PCLK_TSADC 215
+#define CLK_TSADC 216
+#define CLK_TSADC_TSEN 217
+#define PCLK_GPIO1_IOC 218
+#define PCLK_OTPC_S 219
+#define CLK_SBPI_OTPC_S 220
+#define CLK_USER_OTPC_S 221
+#define PCLK_OTP_MASK 222
+#define PCLK_KEYREADER 223
+#define HCLK_BOOTROM 224
+#define PCLK_DDR_SERVICE 225
+#define HCLK_CRYPTO_S 226
+#define HCLK_KEYLAD 227
+#define CLK_CORE_CRYPTO 228
+#define CLK_PKA_CRYPTO 229
+#define CLK_CORE_CRYPTO_S 230
+#define CLK_PKA_CRYPTO_S 231
+#define ACLK_CRYPTO_S 232
+#define HCLK_RNG_S 233
+#define CLK_CORE_CRYPTO_NS 234
+#define CLK_PKA_CRYPTO_NS 235
+#define ACLK_CRYPTO_NS 236
+#define HCLK_CRYPTO_NS 237
+#define HCLK_RNG 238
+#define CLK_PMU 239
+#define PCLK_PMU 240
+#define CLK_PMU_32K 241
+#define PCLK_PMU_CRU 242
+#define PCLK_PMU_GRF 243
+#define PCLK_GPIO0_IOC 244
+#define PCLK_GPIO0 245
+#define DBCLK_GPIO0 246
+#define PCLK_GPIO1_SHADOW 247
+#define DBCLK_GPIO1_SHADOW 248
+#define PCLK_PMU_HP_TIMER 249
+#define CLK_PMU_HP_TIMER 250
+#define CLK_PMU_HP_TIMER_32K 251
+#define PCLK_PWM0 252
+#define CLK_PWM0 253
+#define CLK_OSC_PWM0 254
+#define CLK_RC_PWM0 255
+#define CLK_MAC_OUT 256
+#define CLK_REF_OUT0 257
+#define CLK_REF_OUT1 258
+#define CLK_32K_FRAC 259
+#define CLK_32K_RC 260
+#define CLK_32K 261
+#define CLK_32K_PMU 262
+#define PCLK_TOUCH_KEY 263
+#define CLK_TOUCH_KEY 264
+#define CLK_REF_PHY_PLL 265
+#define CLK_REF_PHY_PMU_MUX 266
+#define CLK_WIFI_OUT 267
+#define CLK_V0PLL_REF 268
+#define CLK_V1PLL_REF 269
+#define CLK_32K_FRAC_MUX 270
+
+#endif
diff --git a/include/dt-bindings/clock/rockchip,rk3528-cru.h b/include/dt-bindings/clock/rockchip,rk3528-cru.h
new file mode 100644
index 000000000000..0245a53fc334
--- /dev/null
+++ b/include/dt-bindings/clock/rockchip,rk3528-cru.h
@@ -0,0 +1,459 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2022 Rockchip Electronics Co. Ltd.
+ * Copyright (c) 2024 Yao Zi <ziyao@disroot.org>
+ * Author: Joseph Chen <chenjh@rock-chips.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3528_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RK3528_H
+
+/* cru-clocks indices */
+#define PLL_APLL 0
+#define PLL_CPLL 1
+#define PLL_GPLL 2
+#define PLL_PPLL 3
+#define PLL_DPLL 4
+#define ARMCLK 5
+#define XIN_OSC0_HALF 6
+#define CLK_MATRIX_50M_SRC 7
+#define CLK_MATRIX_100M_SRC 8
+#define CLK_MATRIX_150M_SRC 9
+#define CLK_MATRIX_200M_SRC 10
+#define CLK_MATRIX_250M_SRC 11
+#define CLK_MATRIX_300M_SRC 12
+#define CLK_MATRIX_339M_SRC 13
+#define CLK_MATRIX_400M_SRC 14
+#define CLK_MATRIX_500M_SRC 15
+#define CLK_MATRIX_600M_SRC 16
+#define CLK_UART0_SRC 17
+#define CLK_UART0_FRAC 18
+#define SCLK_UART0 19
+#define CLK_UART1_SRC 20
+#define CLK_UART1_FRAC 21
+#define SCLK_UART1 22
+#define CLK_UART2_SRC 23
+#define CLK_UART2_FRAC 24
+#define SCLK_UART2 25
+#define CLK_UART3_SRC 26
+#define CLK_UART3_FRAC 27
+#define SCLK_UART3 28
+#define CLK_UART4_SRC 29
+#define CLK_UART4_FRAC 30
+#define SCLK_UART4 31
+#define CLK_UART5_SRC 32
+#define CLK_UART5_FRAC 33
+#define SCLK_UART5 34
+#define CLK_UART6_SRC 35
+#define CLK_UART6_FRAC 36
+#define SCLK_UART6 37
+#define CLK_UART7_SRC 38
+#define CLK_UART7_FRAC 39
+#define SCLK_UART7 40
+#define CLK_I2S0_2CH_SRC 41
+#define CLK_I2S0_2CH_FRAC 42
+#define MCLK_I2S0_2CH_SAI_SRC 43
+#define CLK_I2S3_8CH_SRC 44
+#define CLK_I2S3_8CH_FRAC 45
+#define MCLK_I2S3_8CH_SAI_SRC 46
+#define CLK_I2S1_8CH_SRC 47
+#define CLK_I2S1_8CH_FRAC 48
+#define MCLK_I2S1_8CH_SAI_SRC 49
+#define CLK_I2S2_2CH_SRC 50
+#define CLK_I2S2_2CH_FRAC 51
+#define MCLK_I2S2_2CH_SAI_SRC 52
+#define CLK_SPDIF_SRC 53
+#define CLK_SPDIF_FRAC 54
+#define MCLK_SPDIF_SRC 55
+#define DCLK_VOP_SRC0 56
+#define DCLK_VOP_SRC1 57
+#define CLK_HSM 58
+#define CLK_CORE_SRC_ACS 59
+#define CLK_CORE_SRC_PVTMUX 60
+#define CLK_CORE_SRC 61
+#define CLK_CORE 62
+#define ACLK_M_CORE_BIU 63
+#define CLK_CORE_PVTPLL_SRC 64
+#define PCLK_DBG 65
+#define SWCLKTCK 66
+#define CLK_SCANHS_CORE 67
+#define CLK_SCANHS_ACLKM_CORE 68
+#define CLK_SCANHS_PCLK_DBG 69
+#define CLK_SCANHS_PCLK_CPU_BIU 70
+#define PCLK_CPU_ROOT 71
+#define PCLK_CORE_GRF 72
+#define PCLK_DAPLITE_BIU 73
+#define PCLK_CPU_BIU 74
+#define CLK_REF_PVTPLL_CORE 75
+#define ACLK_BUS_VOPGL_ROOT 76
+#define ACLK_BUS_VOPGL_BIU 77
+#define ACLK_BUS_H_ROOT 78
+#define ACLK_BUS_H_BIU 79
+#define ACLK_BUS_ROOT 80
+#define HCLK_BUS_ROOT 81
+#define PCLK_BUS_ROOT 82
+#define ACLK_BUS_M_ROOT 83
+#define ACLK_SYSMEM_BIU 84
+#define CLK_TIMER_ROOT 85
+#define ACLK_BUS_BIU 86
+#define HCLK_BUS_BIU 87
+#define PCLK_BUS_BIU 88
+#define PCLK_DFT2APB 89
+#define PCLK_BUS_GRF 90
+#define ACLK_BUS_M_BIU 91
+#define ACLK_GIC 92
+#define ACLK_SPINLOCK 93
+#define ACLK_DMAC 94
+#define PCLK_TIMER 95
+#define CLK_TIMER0 96
+#define CLK_TIMER1 97
+#define CLK_TIMER2 98
+#define CLK_TIMER3 99
+#define CLK_TIMER4 100
+#define CLK_TIMER5 101
+#define PCLK_JDBCK_DAP 102
+#define CLK_JDBCK_DAP 103
+#define PCLK_WDT_NS 104
+#define TCLK_WDT_NS 105
+#define HCLK_TRNG_NS 106
+#define PCLK_UART0 107
+#define PCLK_DMA2DDR 108
+#define ACLK_DMA2DDR 109
+#define PCLK_PWM0 110
+#define CLK_PWM0 111
+#define CLK_CAPTURE_PWM0 112
+#define PCLK_PWM1 113
+#define CLK_PWM1 114
+#define CLK_CAPTURE_PWM1 115
+#define PCLK_SCR 116
+#define ACLK_DCF 117
+#define PCLK_INTMUX 118
+#define CLK_PPLL_I 119
+#define CLK_PPLL_MUX 120
+#define CLK_PPLL_100M_MATRIX 121
+#define CLK_PPLL_50M_MATRIX 122
+#define CLK_REF_PCIE_INNER_PHY 123
+#define CLK_REF_PCIE_100M_PHY 124
+#define ACLK_VPU_L_ROOT 125
+#define CLK_GMAC1_VPU_25M 126
+#define CLK_PPLL_125M_MATRIX 127
+#define ACLK_VPU_ROOT 128
+#define HCLK_VPU_ROOT 129
+#define PCLK_VPU_ROOT 130
+#define ACLK_VPU_BIU 131
+#define HCLK_VPU_BIU 132
+#define PCLK_VPU_BIU 133
+#define ACLK_VPU 134
+#define HCLK_VPU 135
+#define PCLK_CRU_PCIE 136
+#define PCLK_VPU_GRF 137
+#define HCLK_SFC 138
+#define SCLK_SFC 139
+#define CCLK_SRC_EMMC 140
+#define HCLK_EMMC 141
+#define ACLK_EMMC 142
+#define BCLK_EMMC 143
+#define TCLK_EMMC 144
+#define PCLK_GPIO1 145
+#define DBCLK_GPIO1 146
+#define ACLK_VPU_L_BIU 147
+#define PCLK_VPU_IOC 148
+#define HCLK_SAI_I2S0 149
+#define MCLK_SAI_I2S0 150
+#define HCLK_SAI_I2S2 151
+#define MCLK_SAI_I2S2 152
+#define PCLK_ACODEC 153
+#define MCLK_ACODEC_TX 154
+#define PCLK_GPIO3 155
+#define DBCLK_GPIO3 156
+#define PCLK_SPI1 157
+#define CLK_SPI1 158
+#define SCLK_IN_SPI1 159
+#define PCLK_UART2 160
+#define PCLK_UART5 161
+#define PCLK_UART6 162
+#define PCLK_UART7 163
+#define PCLK_I2C3 164
+#define CLK_I2C3 165
+#define PCLK_I2C5 166
+#define CLK_I2C5 167
+#define PCLK_I2C6 168
+#define CLK_I2C6 169
+#define ACLK_MAC_VPU 170
+#define PCLK_MAC_VPU 171
+#define CLK_GMAC1_RMII_VPU 172
+#define CLK_GMAC1_SRC_VPU 173
+#define PCLK_PCIE 174
+#define CLK_PCIE_AUX 175
+#define ACLK_PCIE 176
+#define HCLK_PCIE_SLV 177
+#define HCLK_PCIE_DBI 178
+#define PCLK_PCIE_PHY 179
+#define PCLK_PIPE_GRF 180
+#define CLK_PIPE_USB3OTG_COMBO 181
+#define CLK_UTMI_USB3OTG 182
+#define CLK_PCIE_PIPE_PHY 183
+#define CCLK_SRC_SDIO0 184
+#define HCLK_SDIO0 185
+#define CCLK_SRC_SDIO1 186
+#define HCLK_SDIO1 187
+#define CLK_TS_0 188
+#define CLK_TS_1 189
+#define PCLK_CAN2 190
+#define CLK_CAN2 191
+#define PCLK_CAN3 192
+#define CLK_CAN3 193
+#define PCLK_SARADC 194
+#define CLK_SARADC 195
+#define PCLK_TSADC 196
+#define CLK_TSADC 197
+#define CLK_TSADC_TSEN 198
+#define ACLK_USB3OTG 199
+#define CLK_REF_USB3OTG 200
+#define CLK_SUSPEND_USB3OTG 201
+#define ACLK_GPU_ROOT 202
+#define PCLK_GPU_ROOT 203
+#define ACLK_GPU_BIU 204
+#define PCLK_GPU_BIU 205
+#define ACLK_GPU 206
+#define CLK_GPU_PVTPLL_SRC 207
+#define ACLK_GPU_MALI 208
+#define HCLK_RKVENC_ROOT 209
+#define ACLK_RKVENC_ROOT 210
+#define PCLK_RKVENC_ROOT 211
+#define HCLK_RKVENC_BIU 212
+#define ACLK_RKVENC_BIU 213
+#define PCLK_RKVENC_BIU 214
+#define HCLK_RKVENC 215
+#define ACLK_RKVENC 216
+#define CLK_CORE_RKVENC 217
+#define HCLK_SAI_I2S1 218
+#define MCLK_SAI_I2S1 219
+#define PCLK_I2C1 220
+#define CLK_I2C1 221
+#define PCLK_I2C0 222
+#define CLK_I2C0 223
+#define CLK_UART_JTAG 224
+#define PCLK_SPI0 225
+#define CLK_SPI0 226
+#define SCLK_IN_SPI0 227
+#define PCLK_GPIO4 228
+#define DBCLK_GPIO4 229
+#define PCLK_RKVENC_IOC 230
+#define HCLK_SPDIF 231
+#define MCLK_SPDIF 232
+#define HCLK_PDM 233
+#define MCLK_PDM 234
+#define PCLK_UART1 235
+#define PCLK_UART3 236
+#define PCLK_RKVENC_GRF 237
+#define PCLK_CAN0 238
+#define CLK_CAN0 239
+#define PCLK_CAN1 240
+#define CLK_CAN1 241
+#define ACLK_VO_ROOT 242
+#define HCLK_VO_ROOT 243
+#define PCLK_VO_ROOT 244
+#define ACLK_VO_BIU 245
+#define HCLK_VO_BIU 246
+#define PCLK_VO_BIU 247
+#define HCLK_RGA2E 248
+#define ACLK_RGA2E 249
+#define CLK_CORE_RGA2E 250
+#define HCLK_VDPP 251
+#define ACLK_VDPP 252
+#define CLK_CORE_VDPP 253
+#define PCLK_VO_GRF 254
+#define PCLK_CRU 255
+#define ACLK_VOP_ROOT 256
+#define ACLK_VOP_BIU 257
+#define HCLK_VOP 258
+#define DCLK_VOP0 259
+#define DCLK_VOP1 260
+#define ACLK_VOP 261
+#define PCLK_HDMI 262
+#define CLK_SFR_HDMI 263
+#define CLK_CEC_HDMI 264
+#define CLK_SPDIF_HDMI 265
+#define CLK_HDMIPHY_TMDSSRC 266
+#define CLK_HDMIPHY_PREP 267
+#define PCLK_HDMIPHY 268
+#define HCLK_HDCP_KEY 269
+#define ACLK_HDCP 270
+#define HCLK_HDCP 271
+#define PCLK_HDCP 272
+#define HCLK_CVBS 273
+#define DCLK_CVBS 274
+#define DCLK_4X_CVBS 275
+#define ACLK_JPEG_DECODER 276
+#define HCLK_JPEG_DECODER 277
+#define ACLK_VO_L_ROOT 278
+#define ACLK_VO_L_BIU 279
+#define ACLK_MAC_VO 280
+#define PCLK_MAC_VO 281
+#define CLK_GMAC0_SRC 282
+#define CLK_GMAC0_RMII_50M 283
+#define CLK_GMAC0_TX 284
+#define CLK_GMAC0_RX 285
+#define ACLK_JPEG_ROOT 286
+#define ACLK_JPEG_BIU 287
+#define HCLK_SAI_I2S3 288
+#define MCLK_SAI_I2S3 289
+#define CLK_MACPHY 290
+#define PCLK_VCDCPHY 291
+#define PCLK_GPIO2 292
+#define DBCLK_GPIO2 293
+#define PCLK_VO_IOC 294
+#define CCLK_SRC_SDMMC0 295
+#define HCLK_SDMMC0 296
+#define PCLK_OTPC_NS 297
+#define CLK_SBPI_OTPC_NS 298
+#define CLK_USER_OTPC_NS 299
+#define CLK_HDMIHDP0 300
+#define HCLK_USBHOST 301
+#define HCLK_USBHOST_ARB 302
+#define CLK_USBHOST_OHCI 303
+#define CLK_USBHOST_UTMI 304
+#define PCLK_UART4 305
+#define PCLK_I2C4 306
+#define CLK_I2C4 307
+#define PCLK_I2C7 308
+#define CLK_I2C7 309
+#define PCLK_USBPHY 310
+#define CLK_REF_USBPHY 311
+#define HCLK_RKVDEC_ROOT 312
+#define ACLK_RKVDEC_ROOT_NDFT 313
+#define PCLK_DDRPHY_CRU 314
+#define HCLK_RKVDEC_BIU 315
+#define ACLK_RKVDEC_BIU 316
+#define ACLK_RKVDEC 317
+#define HCLK_RKVDEC 318
+#define CLK_HEVC_CA_RKVDEC 319
+#define ACLK_RKVDEC_PVTMUX_ROOT 320
+#define CLK_RKVDEC_PVTPLL_SRC 321
+#define PCLK_DDR_ROOT 322
+#define PCLK_DDR_BIU 323
+#define PCLK_DDRC 324
+#define PCLK_DDRMON 325
+#define CLK_TIMER_DDRMON 326
+#define PCLK_MSCH_BIU 327
+#define PCLK_DDR_GRF 328
+#define PCLK_DDR_HWLP 329
+#define PCLK_DDRPHY 330
+#define CLK_MSCH_BIU 331
+#define ACLK_DDR_UPCTL 332
+#define CLK_DDR_UPCTL 333
+#define CLK_DDRMON 334
+#define ACLK_DDR_SCRAMBLE 335
+#define ACLK_SPLIT 336
+#define CLK_DDRC_SRC 337
+#define CLK_DDR_PHY 338
+#define PCLK_OTPC_S 339
+#define CLK_SBPI_OTPC_S 340
+#define CLK_USER_OTPC_S 341
+#define PCLK_KEYREADER 342
+#define PCLK_BUS_SGRF 343
+#define PCLK_STIMER 344
+#define CLK_STIMER0 345
+#define CLK_STIMER1 346
+#define PCLK_WDT_S 347
+#define TCLK_WDT_S 348
+#define HCLK_TRNG_S 349
+#define HCLK_BOOTROM 350
+#define PCLK_DCF 351
+#define ACLK_SYSMEM 352
+#define HCLK_TSP 353
+#define ACLK_TSP 354
+#define CLK_CORE_TSP 355
+#define CLK_OTPC_ARB 356
+#define PCLK_OTP_MASK 357
+#define CLK_PMC_OTP 358
+#define PCLK_PMU_ROOT 359
+#define HCLK_PMU_ROOT 360
+#define PCLK_I2C2 361
+#define CLK_I2C2 362
+#define HCLK_PMU_BIU 363
+#define PCLK_PMU_BIU 364
+#define FCLK_MCU 365
+#define RTC_CLK_MCU 366
+#define PCLK_OSCCHK 367
+#define CLK_PMU_MCU_JTAG 368
+#define PCLK_PMU 369
+#define PCLK_GPIO0 370
+#define DBCLK_GPIO0 371
+#define XIN_OSC0_DIV 372
+#define CLK_DEEPSLOW 373
+#define CLK_DDR_FAIL_SAFE 374
+#define PCLK_PMU_HP_TIMER 375
+#define CLK_PMU_HP_TIMER 376
+#define CLK_PMU_32K_HP_TIMER 377
+#define PCLK_PMU_IOC 378
+#define PCLK_PMU_CRU 379
+#define PCLK_PMU_GRF 380
+#define PCLK_PMU_WDT 381
+#define TCLK_PMU_WDT 382
+#define PCLK_PMU_MAILBOX 383
+#define PCLK_SCRKEYGEN 384
+#define CLK_SCRKEYGEN 385
+#define CLK_PVTM_OSCCHK 386
+#define CLK_REFOUT 387
+#define CLK_PVTM_PMU 388
+#define PCLK_PVTM_PMU 389
+#define PCLK_PMU_SGRF 390
+#define HCLK_PMU_SRAM 391
+#define CLK_UART0 392
+#define CLK_UART1 393
+#define CLK_UART2 394
+#define CLK_UART3 395
+#define CLK_UART4 396
+#define CLK_UART5 397
+#define CLK_UART6 398
+#define CLK_UART7 399
+#define MCLK_I2S0_2CH_SAI_SRC_PRE 400
+#define MCLK_I2S1_8CH_SAI_SRC_PRE 401
+#define MCLK_I2S2_2CH_SAI_SRC_PRE 402
+#define MCLK_I2S3_8CH_SAI_SRC_PRE 403
+#define MCLK_SDPDIF_SRC_PRE 404
+#define SCLK_SDMMC_DRV 405
+#define SCLK_SDMMC_SAMPLE 406
+#define SCLK_SDIO0_DRV 407
+#define SCLK_SDIO0_SAMPLE 408
+#define SCLK_SDIO1_DRV 409
+#define SCLK_SDIO1_SAMPLE 410
+
+/* scmi-clocks indices */
+#define SCMI_PCLK_KEYREADER 0
+#define SCMI_HCLK_KLAD 1
+#define SCMI_PCLK_KLAD 2
+#define SCMI_HCLK_TRNG_S 3
+#define SCMI_HCLK_CRYPTO_S 4
+#define SCMI_PCLK_WDT_S 5
+#define SCMI_TCLK_WDT_S 6
+#define SCMI_PCLK_STIMER 7
+#define SCMI_CLK_STIMER0 8
+#define SCMI_CLK_STIMER1 9
+#define SCMI_PCLK_OTP_MASK 10
+#define SCMI_PCLK_OTPC_S 11
+#define SCMI_CLK_SBPI_OTPC_S 12
+#define SCMI_CLK_USER_OTPC_S 13
+#define SCMI_CLK_PMC_OTP 14
+#define SCMI_CLK_OTPC_ARB 15
+#define SCMI_CLK_CORE_TSP 16
+#define SCMI_ACLK_TSP 17
+#define SCMI_HCLK_TSP 18
+#define SCMI_PCLK_DCF 19
+#define SCMI_CLK_DDR 20
+#define SCMI_CLK_CPU 21
+#define SCMI_CLK_GPU 22
+#define SCMI_CORE_CRYPTO 23
+#define SCMI_ACLK_CRYPTO 24
+#define SCMI_PKA_CRYPTO 25
+#define SCMI_HCLK_CRYPTO 26
+#define SCMI_CORE_CRYPTO_S 27
+#define SCMI_ACLK_CRYPTO_S 28
+#define SCMI_PKA_CRYPTO_S 29
+#define SCMI_CORE_KLAD 30
+#define SCMI_ACLK_KLAD 31
+#define SCMI_HCLK_TRNG 32
+
+#endif // _DT_BINDINGS_CLK_ROCKCHIP_RK3528_H
diff --git a/include/dt-bindings/clock/rockchip,rk3562-cru.h b/include/dt-bindings/clock/rockchip,rk3562-cru.h
new file mode 100644
index 000000000000..a5b0b153209c
--- /dev/null
+++ b/include/dt-bindings/clock/rockchip,rk3562-cru.h
@@ -0,0 +1,379 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022-2025 Rockchip Electronics Co., Ltd.
+ * Author: Finley Xiao <finley.xiao@rock-chips.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3562_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RK3562_H
+
+/* cru-clocks indices */
+
+/* cru plls */
+#define PLL_DMPLL0 0
+#define PLL_APLL 1
+#define PLL_GPLL 2
+#define PLL_VPLL 3
+#define PLL_HPLL 4
+#define PLL_CPLL 5
+#define PLL_DPLL 6
+#define PLL_DMPLL1 7
+
+/* cru clocks */
+#define ARMCLK 8
+#define CLK_GPU 9
+#define ACLK_RKNN 10
+#define CLK_DDR 11
+#define CLK_MATRIX_50M_SRC 12
+#define CLK_MATRIX_100M_SRC 13
+#define CLK_MATRIX_125M_SRC 14
+#define CLK_MATRIX_200M_SRC 15
+#define CLK_MATRIX_300M_SRC 16
+#define ACLK_TOP 17
+#define ACLK_TOP_VIO 18
+#define CLK_CAM0_OUT2IO 19
+#define CLK_CAM1_OUT2IO 20
+#define CLK_CAM2_OUT2IO 21
+#define CLK_CAM3_OUT2IO 22
+#define ACLK_BUS 23
+#define HCLK_BUS 24
+#define PCLK_BUS 25
+#define PCLK_I2C1 26
+#define PCLK_I2C2 27
+#define PCLK_I2C3 28
+#define PCLK_I2C4 29
+#define PCLK_I2C5 30
+#define CLK_I2C 31
+#define CLK_I2C1 32
+#define CLK_I2C2 33
+#define CLK_I2C3 34
+#define CLK_I2C4 35
+#define CLK_I2C5 36
+#define DCLK_BUS_GPIO 37
+#define DCLK_BUS_GPIO3 38
+#define DCLK_BUS_GPIO4 39
+#define PCLK_TIMER 40
+#define CLK_TIMER0 41
+#define CLK_TIMER1 42
+#define CLK_TIMER2 43
+#define CLK_TIMER3 44
+#define CLK_TIMER4 45
+#define CLK_TIMER5 46
+#define PCLK_STIMER 47
+#define CLK_STIMER0 48
+#define CLK_STIMER1 49
+#define PCLK_WDTNS 50
+#define CLK_WDTNS 51
+#define PCLK_GRF 52
+#define PCLK_SGRF 53
+#define PCLK_MAILBOX 54
+#define PCLK_INTC 55
+#define ACLK_BUS_GIC400 56
+#define ACLK_BUS_SPINLOCK 57
+#define ACLK_DCF 58
+#define PCLK_DCF 59
+#define FCLK_BUS_CM0_CORE 60
+#define CLK_BUS_CM0_RTC 61
+#define HCLK_ICACHE 62
+#define HCLK_DCACHE 63
+#define PCLK_TSADC 64
+#define CLK_TSADC 65
+#define CLK_TSADC_TSEN 66
+#define PCLK_DFT2APB 67
+#define CLK_SARADC_VCCIO156 68
+#define PCLK_GMAC 69
+#define ACLK_GMAC 70
+#define CLK_GMAC_125M_CRU_I 71
+#define CLK_GMAC_50M_CRU_I 72
+#define CLK_GMAC_50M_O 73
+#define CLK_GMAC_ETH_OUT2IO 74
+#define PCLK_APB2ASB_VCCIO156 75
+#define PCLK_TO_VCCIO156 76
+#define PCLK_DSIPHY 77
+#define PCLK_DSITX 78
+#define PCLK_CPU_EMA_DET 79
+#define PCLK_HASH 80
+#define PCLK_TOPCRU 81
+#define PCLK_ASB2APB_VCCIO156 82
+#define PCLK_IOC_VCCIO156 83
+#define PCLK_GPIO3_VCCIO156 84
+#define PCLK_GPIO4_VCCIO156 85
+#define PCLK_SARADC_VCCIO156 86
+#define PCLK_MAC100 87
+#define ACLK_MAC100 89
+#define CLK_MAC100_50M_MATRIX 90
+#define HCLK_CORE 91
+#define PCLK_DDR 92
+#define CLK_MSCH_BRG_BIU 93
+#define PCLK_DDR_HWLP 94
+#define PCLK_DDR_UPCTL 95
+#define PCLK_DDR_PHY 96
+#define PCLK_DDR_DFICTL 97
+#define PCLK_DDR_DMA2DDR 98
+#define PCLK_DDR_MON 99
+#define TMCLK_DDR_MON 100
+#define PCLK_DDR_GRF 101
+#define PCLK_DDR_CRU 102
+#define PCLK_SUBDDR_CRU 103
+#define CLK_GPU_PRE 104
+#define ACLK_GPU_PRE 105
+#define CLK_GPU_BRG 107
+#define CLK_NPU_PRE 108
+#define HCLK_NPU_PRE 109
+#define HCLK_RKNN 111
+#define ACLK_PERI 112
+#define HCLK_PERI 113
+#define PCLK_PERI 114
+#define PCLK_PERICRU 115
+#define HCLK_SAI0 116
+#define CLK_SAI0_SRC 117
+#define CLK_SAI0_FRAC 118
+#define CLK_SAI0 119
+#define MCLK_SAI0 120
+#define MCLK_SAI0_OUT2IO 121
+#define HCLK_SAI1 122
+#define CLK_SAI1_SRC 123
+#define CLK_SAI1_FRAC 124
+#define CLK_SAI1 125
+#define MCLK_SAI1 126
+#define MCLK_SAI1_OUT2IO 127
+#define HCLK_SAI2 128
+#define CLK_SAI2_SRC 129
+#define CLK_SAI2_FRAC 130
+#define CLK_SAI2 131
+#define MCLK_SAI2 132
+#define MCLK_SAI2_OUT2IO 133
+#define HCLK_DSM 134
+#define CLK_DSM 135
+#define HCLK_PDM 136
+#define MCLK_PDM 137
+#define HCLK_SPDIF 138
+#define CLK_SPDIF_SRC 139
+#define CLK_SPDIF_FRAC 140
+#define CLK_SPDIF 141
+#define MCLK_SPDIF 142
+#define HCLK_SDMMC0 143
+#define CCLK_SDMMC0 144
+#define HCLK_SDMMC1 145
+#define CCLK_SDMMC1 146
+#define SCLK_SDMMC0_DRV 147
+#define SCLK_SDMMC0_SAMPLE 148
+#define SCLK_SDMMC1_DRV 149
+#define SCLK_SDMMC1_SAMPLE 150
+#define HCLK_EMMC 151
+#define ACLK_EMMC 152
+#define CCLK_EMMC 153
+#define BCLK_EMMC 154
+#define TMCLK_EMMC 155
+#define SCLK_SFC 156
+#define HCLK_SFC 157
+#define HCLK_USB2HOST 158
+#define HCLK_USB2HOST_ARB 159
+#define PCLK_SPI1 160
+#define CLK_SPI1 161
+#define SCLK_IN_SPI1 162
+#define PCLK_SPI2 163
+#define CLK_SPI2 164
+#define SCLK_IN_SPI2 165
+#define PCLK_UART1 166
+#define PCLK_UART2 167
+#define PCLK_UART3 168
+#define PCLK_UART4 169
+#define PCLK_UART5 170
+#define PCLK_UART6 171
+#define PCLK_UART7 172
+#define PCLK_UART8 173
+#define PCLK_UART9 174
+#define CLK_UART1_SRC 175
+#define CLK_UART1_FRAC 176
+#define CLK_UART1 177
+#define SCLK_UART1 178
+#define CLK_UART2_SRC 179
+#define CLK_UART2_FRAC 180
+#define CLK_UART2 181
+#define SCLK_UART2 182
+#define CLK_UART3_SRC 183
+#define CLK_UART3_FRAC 184
+#define CLK_UART3 185
+#define SCLK_UART3 186
+#define CLK_UART4_SRC 187
+#define CLK_UART4_FRAC 188
+#define CLK_UART4 189
+#define SCLK_UART4 190
+#define CLK_UART5_SRC 191
+#define CLK_UART5_FRAC 192
+#define CLK_UART5 193
+#define SCLK_UART5 194
+#define CLK_UART6_SRC 195
+#define CLK_UART6_FRAC 196
+#define CLK_UART6 197
+#define SCLK_UART6 198
+#define CLK_UART7_SRC 199
+#define CLK_UART7_FRAC 200
+#define CLK_UART7 201
+#define SCLK_UART7 202
+#define CLK_UART8_SRC 203
+#define CLK_UART8_FRAC 204
+#define CLK_UART8 205
+#define SCLK_UART8 206
+#define CLK_UART9_SRC 207
+#define CLK_UART9_FRAC 208
+#define CLK_UART9 209
+#define SCLK_UART9 210
+#define PCLK_PWM1_PERI 211
+#define CLK_PWM1_PERI 212
+#define CLK_CAPTURE_PWM1_PERI 213
+#define PCLK_PWM2_PERI 214
+#define CLK_PWM2_PERI 215
+#define CLK_CAPTURE_PWM2_PERI 216
+#define PCLK_PWM3_PERI 217
+#define CLK_PWM3_PERI 218
+#define CLK_CAPTURE_PWM3_PERI 219
+#define PCLK_CAN0 220
+#define CLK_CAN0 221
+#define PCLK_CAN1 222
+#define CLK_CAN1 223
+#define ACLK_CRYPTO 224
+#define HCLK_CRYPTO 225
+#define PCLK_CRYPTO 226
+#define CLK_CORE_CRYPTO 227
+#define CLK_PKA_CRYPTO 228
+#define HCLK_KLAD 229
+#define PCLK_KEY_READER 230
+#define HCLK_RK_RNG_NS 231
+#define HCLK_RK_RNG_S 232
+#define HCLK_TRNG_NS 233
+#define HCLK_TRNG_S 234
+#define HCLK_CRYPTO_S 235
+#define PCLK_PERI_WDT 236
+#define TCLK_PERI_WDT 237
+#define ACLK_SYSMEM 238
+#define HCLK_BOOTROM 239
+#define PCLK_PERI_GRF 240
+#define ACLK_DMAC 241
+#define ACLK_RKDMAC 242
+#define PCLK_OTPC_NS 243
+#define CLK_SBPI_OTPC_NS 244
+#define CLK_USER_OTPC_NS 245
+#define PCLK_OTPC_S 246
+#define CLK_SBPI_OTPC_S 247
+#define CLK_USER_OTPC_S 248
+#define CLK_OTPC_ARB 249
+#define PCLK_OTPPHY 250
+#define PCLK_USB2PHY 251
+#define PCLK_PIPEPHY 252
+#define PCLK_SARADC 253
+#define CLK_SARADC 254
+#define PCLK_IOC_VCCIO234 255
+#define PCLK_PERI_GPIO1 256
+#define PCLK_PERI_GPIO2 257
+#define DCLK_PERI_GPIO 258
+#define DCLK_PERI_GPIO1 259
+#define DCLK_PERI_GPIO2 260
+#define ACLK_PHP 261
+#define PCLK_PHP 262
+#define ACLK_PCIE20_MST 263
+#define ACLK_PCIE20_SLV 264
+#define ACLK_PCIE20_DBI 265
+#define PCLK_PCIE20 266
+#define CLK_PCIE20_AUX 267
+#define ACLK_USB3OTG 268
+#define CLK_USB3OTG_SUSPEND 269
+#define CLK_USB3OTG_REF 270
+#define CLK_PIPEPHY_REF_FUNC 271
+#define CLK_200M_PMU 272
+#define CLK_RTC_32K 273
+#define CLK_RTC32K_FRAC 274
+#define BUSCLK_PDPMU0 275
+#define PCLK_PMU0_CRU 276
+#define PCLK_PMU0_PMU 277
+#define CLK_PMU0_PMU 278
+#define PCLK_PMU0_HP_TIMER 279
+#define CLK_PMU0_HP_TIMER 280
+#define CLK_PMU0_32K_HP_TIMER 281
+#define PCLK_PMU0_PVTM 282
+#define CLK_PMU0_PVTM 283
+#define PCLK_IOC_PMUIO 284
+#define PCLK_PMU0_GPIO0 285
+#define DBCLK_PMU0_GPIO0 286
+#define PCLK_PMU0_GRF 287
+#define PCLK_PMU0_SGRF 288
+#define CLK_DDR_FAIL_SAFE 289
+#define PCLK_PMU0_SCRKEYGEN 290
+#define PCLK_PMU1_CRU 291
+#define HCLK_PMU1_MEM 292
+#define PCLK_PMU0_I2C0 293
+#define CLK_PMU0_I2C0 294
+#define PCLK_PMU1_UART0 295
+#define CLK_PMU1_UART0_SRC 296
+#define CLK_PMU1_UART0_FRAC 297
+#define CLK_PMU1_UART0 298
+#define SCLK_PMU1_UART0 299
+#define PCLK_PMU1_SPI0 300
+#define CLK_PMU1_SPI0 301
+#define SCLK_IN_PMU1_SPI0 302
+#define PCLK_PMU1_PWM0 303
+#define CLK_PMU1_PWM0 304
+#define CLK_CAPTURE_PMU1_PWM0 305
+#define CLK_PMU1_WIFI 306
+#define FCLK_PMU1_CM0_CORE 307
+#define CLK_PMU1_CM0_RTC 308
+#define PCLK_PMU1_WDTNS 309
+#define CLK_PMU1_WDTNS 310
+#define PCLK_PMU1_MAILBOX 311
+#define CLK_PIPEPHY_DIV 312
+#define CLK_PIPEPHY_XIN24M 313
+#define CLK_PIPEPHY_REF 314
+#define CLK_24M_SSCSRC 315
+#define CLK_USB2PHY_XIN24M 316
+#define CLK_USB2PHY_REF 317
+#define CLK_MIPIDSIPHY_XIN24M 318
+#define CLK_MIPIDSIPHY_REF 319
+#define ACLK_RGA_PRE 320
+#define HCLK_RGA_PRE 321
+#define ACLK_RGA 322
+#define HCLK_RGA 323
+#define CLK_RGA_CORE 324
+#define ACLK_JDEC 325
+#define HCLK_JDEC 326
+#define ACLK_VDPU_PRE 327
+#define CLK_RKVDEC_HEVC_CA 328
+#define HCLK_VDPU_PRE 329
+#define ACLK_RKVDEC 330
+#define HCLK_RKVDEC 331
+#define CLK_RKVENC_CORE 332
+#define ACLK_VEPU_PRE 333
+#define HCLK_VEPU_PRE 334
+#define ACLK_RKVENC 335
+#define HCLK_RKVENC 336
+#define ACLK_VI 337
+#define HCLK_VI 338
+#define PCLK_VI 339
+#define ACLK_ISP 340
+#define HCLK_ISP 341
+#define CLK_ISP 342
+#define ACLK_VICAP 343
+#define HCLK_VICAP 344
+#define DCLK_VICAP 345
+#define CSIRX0_CLK_DATA 346
+#define CSIRX1_CLK_DATA 347
+#define CSIRX2_CLK_DATA 348
+#define CSIRX3_CLK_DATA 349
+#define PCLK_CSIHOST0 350
+#define PCLK_CSIHOST1 351
+#define PCLK_CSIHOST2 352
+#define PCLK_CSIHOST3 353
+#define PCLK_CSIPHY0 354
+#define PCLK_CSIPHY1 355
+#define ACLK_VO_PRE 356
+#define HCLK_VO_PRE 357
+#define ACLK_VOP 358
+#define HCLK_VOP 359
+#define DCLK_VOP 360
+#define DCLK_VOP1 361
+#define ACLK_CRYPTO_S 362
+#define PCLK_CRYPTO_S 363
+#define CLK_CORE_CRYPTO_S 364
+#define CLK_PKA_CRYPTO_S 365
+
+#endif
diff --git a/include/dt-bindings/clock/rockchip,rk3576-cru.h b/include/dt-bindings/clock/rockchip,rk3576-cru.h
new file mode 100644
index 000000000000..ded5ce42e62a
--- /dev/null
+++ b/include/dt-bindings/clock/rockchip,rk3576-cru.h
@@ -0,0 +1,607 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2023 Rockchip Electronics Co. Ltd.
+ * Copyright (c) 2024 Collabora Ltd.
+ *
+ * Author: Elaine Zhang <zhangqing@rock-chips.com>
+ * Author: Detlev Casanova <detlev.casanova@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3576_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RK3576_H
+
+/* cru-clocks indices */
+
+/* cru plls */
+#define PLL_BPLL 0
+#define PLL_LPLL 1
+#define PLL_VPLL 2
+#define PLL_AUPLL 3
+#define PLL_CPLL 4
+#define PLL_GPLL 5
+#define PLL_PPLL 6
+#define ARMCLK_L 7
+#define ARMCLK_B 8
+
+/* cru clocks */
+#define CLK_CPLL_DIV20 9
+#define CLK_CPLL_DIV10 10
+#define CLK_GPLL_DIV8 11
+#define CLK_GPLL_DIV6 12
+#define CLK_CPLL_DIV4 13
+#define CLK_GPLL_DIV4 14
+#define CLK_SPLL_DIV2 15
+#define CLK_GPLL_DIV3 16
+#define CLK_CPLL_DIV2 17
+#define CLK_GPLL_DIV2 18
+#define CLK_SPLL_DIV1 19
+#define PCLK_TOP_ROOT 20
+#define ACLK_TOP 21
+#define HCLK_TOP 22
+#define CLK_AUDIO_FRAC_0 23
+#define CLK_AUDIO_FRAC_1 24
+#define CLK_AUDIO_FRAC_2 25
+#define CLK_AUDIO_FRAC_3 26
+#define CLK_UART_FRAC_0 27
+#define CLK_UART_FRAC_1 28
+#define CLK_UART_FRAC_2 29
+#define CLK_UART1_SRC_TOP 30
+#define CLK_AUDIO_INT_0 31
+#define CLK_AUDIO_INT_1 32
+#define CLK_AUDIO_INT_2 33
+#define CLK_PDM0_SRC_TOP 34
+#define CLK_PDM1_OUT 35
+#define CLK_GMAC0_125M_SRC 36
+#define CLK_GMAC1_125M_SRC 37
+#define LCLK_ASRC_SRC_0 38
+#define LCLK_ASRC_SRC_1 39
+#define REF_CLK0_OUT_PLL 40
+#define REF_CLK1_OUT_PLL 41
+#define REF_CLK2_OUT_PLL 42
+#define REFCLKO25M_GMAC0_OUT 43
+#define REFCLKO25M_GMAC1_OUT 44
+#define CLK_CIFOUT_OUT 45
+#define CLK_GMAC0_RMII_CRU 46
+#define CLK_GMAC1_RMII_CRU 47
+#define CLK_OTPC_AUTO_RD_G 48
+#define CLK_OTP_PHY_G 49
+#define CLK_MIPI_CAMERAOUT_M0 50
+#define CLK_MIPI_CAMERAOUT_M1 51
+#define CLK_MIPI_CAMERAOUT_M2 52
+#define MCLK_PDM0_SRC_TOP 53
+#define HCLK_AUDIO_ROOT 54
+#define HCLK_ASRC_2CH_0 55
+#define HCLK_ASRC_2CH_1 56
+#define HCLK_ASRC_4CH_0 57
+#define HCLK_ASRC_4CH_1 58
+#define CLK_ASRC_2CH_0 59
+#define CLK_ASRC_2CH_1 60
+#define CLK_ASRC_4CH_0 61
+#define CLK_ASRC_4CH_1 62
+#define MCLK_SAI0_8CH_SRC 63
+#define MCLK_SAI0_8CH 64
+#define HCLK_SAI0_8CH 65
+#define HCLK_SPDIF_RX0 66
+#define MCLK_SPDIF_RX0 67
+#define HCLK_SPDIF_RX1 68
+#define MCLK_SPDIF_RX1 69
+#define MCLK_SAI1_8CH_SRC 70
+#define MCLK_SAI1_8CH 71
+#define HCLK_SAI1_8CH 72
+#define MCLK_SAI2_2CH_SRC 73
+#define MCLK_SAI2_2CH 74
+#define HCLK_SAI2_2CH 75
+#define MCLK_SAI3_2CH_SRC 76
+#define MCLK_SAI3_2CH 77
+#define HCLK_SAI3_2CH 78
+#define MCLK_SAI4_2CH_SRC 79
+#define MCLK_SAI4_2CH 80
+#define HCLK_SAI4_2CH 81
+#define HCLK_ACDCDIG_DSM 82
+#define MCLK_ACDCDIG_DSM 83
+#define CLK_PDM1 84
+#define HCLK_PDM1 85
+#define MCLK_PDM1 86
+#define HCLK_SPDIF_TX0 87
+#define MCLK_SPDIF_TX0 88
+#define HCLK_SPDIF_TX1 89
+#define MCLK_SPDIF_TX1 90
+#define CLK_SAI1_MCLKOUT 91
+#define CLK_SAI2_MCLKOUT 92
+#define CLK_SAI3_MCLKOUT 93
+#define CLK_SAI4_MCLKOUT 94
+#define CLK_SAI0_MCLKOUT 95
+#define HCLK_BUS_ROOT 96
+#define PCLK_BUS_ROOT 97
+#define ACLK_BUS_ROOT 98
+#define HCLK_CAN0 99
+#define CLK_CAN0 100
+#define HCLK_CAN1 101
+#define CLK_CAN1 102
+#define CLK_KEY_SHIFT 103
+#define PCLK_I2C1 104
+#define PCLK_I2C2 105
+#define PCLK_I2C3 106
+#define PCLK_I2C4 107
+#define PCLK_I2C5 108
+#define PCLK_I2C6 109
+#define PCLK_I2C7 110
+#define PCLK_I2C8 111
+#define PCLK_I2C9 112
+#define PCLK_WDT_BUSMCU 113
+#define TCLK_WDT_BUSMCU 114
+#define ACLK_GIC 115
+#define CLK_I2C1 116
+#define CLK_I2C2 117
+#define CLK_I2C3 118
+#define CLK_I2C4 119
+#define CLK_I2C5 120
+#define CLK_I2C6 121
+#define CLK_I2C7 122
+#define CLK_I2C8 123
+#define CLK_I2C9 124
+#define PCLK_SARADC 125
+#define CLK_SARADC 126
+#define PCLK_TSADC 127
+#define CLK_TSADC 128
+#define PCLK_UART0 129
+#define PCLK_UART2 130
+#define PCLK_UART3 131
+#define PCLK_UART4 132
+#define PCLK_UART5 133
+#define PCLK_UART6 134
+#define PCLK_UART7 135
+#define PCLK_UART8 136
+#define PCLK_UART9 137
+#define PCLK_UART10 138
+#define PCLK_UART11 139
+#define SCLK_UART0 140
+#define SCLK_UART2 141
+#define SCLK_UART3 142
+#define SCLK_UART4 143
+#define SCLK_UART5 144
+#define SCLK_UART6 145
+#define SCLK_UART7 146
+#define SCLK_UART8 147
+#define SCLK_UART9 148
+#define SCLK_UART10 149
+#define SCLK_UART11 150
+#define PCLK_SPI0 151
+#define PCLK_SPI1 152
+#define PCLK_SPI2 153
+#define PCLK_SPI3 154
+#define PCLK_SPI4 155
+#define CLK_SPI0 156
+#define CLK_SPI1 157
+#define CLK_SPI2 158
+#define CLK_SPI3 159
+#define CLK_SPI4 160
+#define PCLK_WDT0 161
+#define TCLK_WDT0 162
+#define PCLK_PWM1 163
+#define CLK_PWM1 164
+#define CLK_OSC_PWM1 165
+#define CLK_RC_PWM1 166
+#define PCLK_BUSTIMER0 167
+#define PCLK_BUSTIMER1 168
+#define CLK_TIMER0_ROOT 169
+#define CLK_TIMER0 170
+#define CLK_TIMER1 171
+#define CLK_TIMER2 172
+#define CLK_TIMER3 173
+#define CLK_TIMER4 174
+#define CLK_TIMER5 175
+#define PCLK_MAILBOX0 176
+#define PCLK_GPIO1 177
+#define DBCLK_GPIO1 178
+#define PCLK_GPIO2 179
+#define DBCLK_GPIO2 180
+#define PCLK_GPIO3 181
+#define DBCLK_GPIO3 182
+#define PCLK_GPIO4 183
+#define DBCLK_GPIO4 184
+#define ACLK_DECOM 185
+#define PCLK_DECOM 186
+#define DCLK_DECOM 187
+#define CLK_TIMER1_ROOT 188
+#define CLK_TIMER6 189
+#define CLK_TIMER7 190
+#define CLK_TIMER8 191
+#define CLK_TIMER9 192
+#define CLK_TIMER10 193
+#define CLK_TIMER11 194
+#define ACLK_DMAC0 195
+#define ACLK_DMAC1 196
+#define ACLK_DMAC2 197
+#define ACLK_SPINLOCK 198
+#define HCLK_I3C0 199
+#define HCLK_I3C1 200
+#define HCLK_BUS_CM0_ROOT 201
+#define FCLK_BUS_CM0_CORE 202
+#define CLK_BUS_CM0_RTC 203
+#define PCLK_PMU2 204
+#define PCLK_PWM2 205
+#define CLK_PWM2 206
+#define CLK_RC_PWM2 207
+#define CLK_OSC_PWM2 208
+#define CLK_FREQ_PWM1 209
+#define CLK_COUNTER_PWM1 210
+#define SAI_SCLKIN_FREQ 211
+#define SAI_SCLKIN_COUNTER 212
+#define CLK_I3C0 213
+#define CLK_I3C1 214
+#define PCLK_CSIDPHY1 215
+#define PCLK_DDR_ROOT 216
+#define PCLK_DDR_MON_CH0 217
+#define TMCLK_DDR_MON_CH0 218
+#define ACLK_DDR_ROOT 219
+#define HCLK_DDR_ROOT 220
+#define FCLK_DDR_CM0_CORE 221
+#define CLK_DDR_TIMER_ROOT 222
+#define CLK_DDR_TIMER0 223
+#define CLK_DDR_TIMER1 224
+#define TCLK_WDT_DDR 225
+#define PCLK_WDT 226
+#define PCLK_TIMER 227
+#define CLK_DDR_CM0_RTC 228
+#define ACLK_RKNN0 229
+#define ACLK_RKNN1 230
+#define HCLK_RKNN_ROOT 231
+#define CLK_RKNN_DSU0 232
+#define PCLK_NPUTOP_ROOT 233
+#define PCLK_NPU_TIMER 234
+#define CLK_NPUTIMER_ROOT 235
+#define CLK_NPUTIMER0 236
+#define CLK_NPUTIMER1 237
+#define PCLK_NPU_WDT 238
+#define TCLK_NPU_WDT 239
+#define ACLK_RKNN_CBUF 240
+#define HCLK_NPU_CM0_ROOT 241
+#define FCLK_NPU_CM0_CORE 242
+#define CLK_NPU_CM0_RTC 243
+#define HCLK_RKNN_CBUF 244
+#define HCLK_NVM_ROOT 245
+#define ACLK_NVM_ROOT 246
+#define SCLK_FSPI_X2 247
+#define HCLK_FSPI 248
+#define CCLK_SRC_EMMC 249
+#define HCLK_EMMC 250
+#define ACLK_EMMC 251
+#define BCLK_EMMC 252
+#define TCLK_EMMC 253
+#define PCLK_PHP_ROOT 254
+#define ACLK_PHP_ROOT 255
+#define PCLK_PCIE0 256
+#define CLK_PCIE0_AUX 257
+#define ACLK_PCIE0_MST 258
+#define ACLK_PCIE0_SLV 259
+#define ACLK_PCIE0_DBI 260
+#define ACLK_USB3OTG1 261
+#define CLK_REF_USB3OTG1 262
+#define CLK_SUSPEND_USB3OTG1 263
+#define ACLK_MMU0 264
+#define ACLK_SLV_MMU0 265
+#define ACLK_MMU1 266
+#define ACLK_SLV_MMU1 267
+#define PCLK_PCIE1 268
+#define CLK_PCIE1_AUX 269
+#define ACLK_PCIE1_MST 270
+#define ACLK_PCIE1_SLV 271
+#define ACLK_PCIE1_DBI 272
+#define CLK_RXOOB0 273
+#define CLK_RXOOB1 274
+#define CLK_PMALIVE0 275
+#define CLK_PMALIVE1 276
+#define ACLK_SATA0 277
+#define ACLK_SATA1 278
+#define CLK_USB3OTG1_PIPE_PCLK 279
+#define CLK_USB3OTG1_UTMI 280
+#define CLK_USB3OTG0_PIPE_PCLK 281
+#define CLK_USB3OTG0_UTMI 282
+#define HCLK_SDGMAC_ROOT 283
+#define ACLK_SDGMAC_ROOT 284
+#define PCLK_SDGMAC_ROOT 285
+#define ACLK_GMAC0 286
+#define ACLK_GMAC1 287
+#define PCLK_GMAC0 288
+#define PCLK_GMAC1 289
+#define CCLK_SRC_SDIO 290
+#define HCLK_SDIO 291
+#define CLK_GMAC1_PTP_REF 292
+#define CLK_GMAC0_PTP_REF 293
+#define CLK_GMAC1_PTP_REF_SRC 294
+#define CLK_GMAC0_PTP_REF_SRC 295
+#define CCLK_SRC_SDMMC0 296
+#define HCLK_SDMMC0 297
+#define SCLK_FSPI1_X2 298
+#define HCLK_FSPI1 299
+#define ACLK_DSMC_ROOT 300
+#define ACLK_DSMC 301
+#define PCLK_DSMC 302
+#define CLK_DSMC_SYS 303
+#define HCLK_HSGPIO 304
+#define CLK_HSGPIO_TX 305
+#define CLK_HSGPIO_RX 306
+#define ACLK_HSGPIO 307
+#define PCLK_PHPPHY_ROOT 308
+#define PCLK_PCIE2_COMBOPHY0 309
+#define PCLK_PCIE2_COMBOPHY1 310
+#define CLK_PCIE_100M_SRC 311
+#define CLK_PCIE_100M_NDUTY_SRC 312
+#define CLK_REF_PCIE0_PHY 313
+#define CLK_REF_PCIE1_PHY 314
+#define CLK_REF_MPHY_26M 315
+#define HCLK_RKVDEC_ROOT 316
+#define ACLK_RKVDEC_ROOT 317
+#define HCLK_RKVDEC 318
+#define CLK_RKVDEC_HEVC_CA 319
+#define CLK_RKVDEC_CORE 320
+#define ACLK_UFS_ROOT 321
+#define ACLK_USB_ROOT 322
+#define PCLK_USB_ROOT 323
+#define ACLK_USB3OTG0 324
+#define CLK_REF_USB3OTG0 325
+#define CLK_SUSPEND_USB3OTG0 326
+#define ACLK_MMU2 327
+#define ACLK_SLV_MMU2 328
+#define ACLK_UFS_SYS 329
+#define ACLK_VPU_ROOT 330
+#define ACLK_VPU_MID_ROOT 331
+#define HCLK_VPU_ROOT 332
+#define ACLK_JPEG_ROOT 333
+#define ACLK_VPU_LOW_ROOT 334
+#define HCLK_RGA2E_0 335
+#define ACLK_RGA2E_0 336
+#define CLK_CORE_RGA2E_0 337
+#define ACLK_JPEG 338
+#define HCLK_JPEG 339
+#define HCLK_VDPP 340
+#define ACLK_VDPP 341
+#define CLK_CORE_VDPP 342
+#define HCLK_RGA2E_1 343
+#define ACLK_RGA2E_1 344
+#define CLK_CORE_RGA2E_1 345
+#define DCLK_EBC_FRAC_SRC 346
+#define HCLK_EBC 347
+#define ACLK_EBC 348
+#define DCLK_EBC 349
+#define HCLK_VEPU0_ROOT 350
+#define ACLK_VEPU0_ROOT 351
+#define HCLK_VEPU0 352
+#define ACLK_VEPU0 353
+#define CLK_VEPU0_CORE 354
+#define ACLK_VI_ROOT 355
+#define HCLK_VI_ROOT 356
+#define PCLK_VI_ROOT 357
+#define DCLK_VICAP 358
+#define ACLK_VICAP 359
+#define HCLK_VICAP 360
+#define CLK_ISP_CORE 361
+#define CLK_ISP_CORE_MARVIN 362
+#define CLK_ISP_CORE_VICAP 363
+#define ACLK_ISP 364
+#define HCLK_ISP 365
+#define ACLK_VPSS 366
+#define HCLK_VPSS 367
+#define CLK_CORE_VPSS 368
+#define PCLK_CSI_HOST_0 369
+#define PCLK_CSI_HOST_1 370
+#define PCLK_CSI_HOST_2 371
+#define PCLK_CSI_HOST_3 372
+#define PCLK_CSI_HOST_4 373
+#define ICLK_CSIHOST01 374
+#define ICLK_CSIHOST0 375
+#define CLK_ISP_PVTPLL_SRC 376
+#define ACLK_VI_ROOT_INTER 377
+#define CLK_VICAP_I0CLK 378
+#define CLK_VICAP_I1CLK 379
+#define CLK_VICAP_I2CLK 380
+#define CLK_VICAP_I3CLK 381
+#define CLK_VICAP_I4CLK 382
+#define ACLK_VOP_ROOT 383
+#define HCLK_VOP_ROOT 384
+#define PCLK_VOP_ROOT 385
+#define HCLK_VOP 386
+#define ACLK_VOP 387
+#define DCLK_VP0_SRC 388
+#define DCLK_VP1_SRC 389
+#define DCLK_VP2_SRC 390
+#define DCLK_VP0 391
+#define DCLK_VP1 392
+#define DCLK_VP2 393
+#define PCLK_VOPGRF 394
+#define ACLK_VO0_ROOT 395
+#define HCLK_VO0_ROOT 396
+#define PCLK_VO0_ROOT 397
+#define PCLK_VO0_GRF 398
+#define ACLK_HDCP0 399
+#define HCLK_HDCP0 400
+#define PCLK_HDCP0 401
+#define CLK_TRNG0_SKP 402
+#define PCLK_DSIHOST0 403
+#define CLK_DSIHOST0 404
+#define PCLK_HDMITX0 405
+#define CLK_HDMITX0_EARC 406
+#define CLK_HDMITX0_REF 407
+#define PCLK_EDP0 408
+#define CLK_EDP0_24M 409
+#define CLK_EDP0_200M 410
+#define MCLK_SAI5_8CH_SRC 411
+#define MCLK_SAI5_8CH 412
+#define HCLK_SAI5_8CH 413
+#define MCLK_SAI6_8CH_SRC 414
+#define MCLK_SAI6_8CH 415
+#define HCLK_SAI6_8CH 416
+#define HCLK_SPDIF_TX2 417
+#define MCLK_SPDIF_TX2 418
+#define HCLK_SPDIF_RX2 419
+#define MCLK_SPDIF_RX2 420
+#define HCLK_SAI8_8CH 421
+#define MCLK_SAI8_8CH_SRC 422
+#define MCLK_SAI8_8CH 423
+#define ACLK_VO1_ROOT 424
+#define HCLK_VO1_ROOT 425
+#define PCLK_VO1_ROOT 426
+#define MCLK_SAI7_8CH_SRC 427
+#define MCLK_SAI7_8CH 428
+#define HCLK_SAI7_8CH 429
+#define HCLK_SPDIF_TX3 430
+#define HCLK_SPDIF_TX4 431
+#define HCLK_SPDIF_TX5 432
+#define MCLK_SPDIF_TX3 433
+#define CLK_AUX16MHZ_0 434
+#define ACLK_DP0 435
+#define PCLK_DP0 436
+#define PCLK_VO1_GRF 437
+#define ACLK_HDCP1 438
+#define HCLK_HDCP1 439
+#define PCLK_HDCP1 440
+#define CLK_TRNG1_SKP 441
+#define HCLK_SAI9_8CH 442
+#define MCLK_SAI9_8CH_SRC 443
+#define MCLK_SAI9_8CH 444
+#define MCLK_SPDIF_TX4 445
+#define MCLK_SPDIF_TX5 446
+#define CLK_GPU_SRC_PRE 447
+#define CLK_GPU 448
+#define PCLK_GPU_ROOT 449
+#define ACLK_CENTER_ROOT 450
+#define ACLK_CENTER_LOW_ROOT 451
+#define HCLK_CENTER_ROOT 452
+#define PCLK_CENTER_ROOT 453
+#define ACLK_DMA2DDR 454
+#define ACLK_DDR_SHAREMEM 455
+#define PCLK_DMA2DDR 456
+#define PCLK_SHAREMEM 457
+#define HCLK_VEPU1_ROOT 458
+#define ACLK_VEPU1_ROOT 459
+#define HCLK_VEPU1 460
+#define ACLK_VEPU1 461
+#define CLK_VEPU1_CORE 462
+#define CLK_JDBCK_DAP 463
+#define PCLK_MIPI_DCPHY 464
+#define CLK_32K_USB2DEBUG 465
+#define PCLK_CSIDPHY 466
+#define PCLK_USBDPPHY 467
+#define CLK_PMUPHY_REF_SRC 468
+#define CLK_USBDP_COMBO_PHY_IMMORTAL 469
+#define CLK_HDMITXHDP 470
+#define PCLK_MPHY 471
+#define CLK_REF_OSC_MPHY 472
+#define CLK_REF_UFS_CLKOUT 473
+#define HCLK_PMU1_ROOT 474
+#define HCLK_PMU_CM0_ROOT 475
+#define CLK_200M_PMU_SRC 476
+#define CLK_100M_PMU_SRC 477
+#define CLK_50M_PMU_SRC 478
+#define FCLK_PMU_CM0_CORE 479
+#define CLK_PMU_CM0_RTC 480
+#define PCLK_PMU1 481
+#define CLK_PMU1 482
+#define PCLK_PMU1WDT 483
+#define TCLK_PMU1WDT 484
+#define PCLK_PMUTIMER 485
+#define CLK_PMUTIMER_ROOT 486
+#define CLK_PMUTIMER0 487
+#define CLK_PMUTIMER1 488
+#define PCLK_PMU1PWM 489
+#define CLK_PMU1PWM 490
+#define CLK_PMU1PWM_OSC 491
+#define PCLK_PMUPHY_ROOT 492
+#define PCLK_I2C0 493
+#define CLK_I2C0 494
+#define SCLK_UART1 495
+#define PCLK_UART1 496
+#define CLK_PMU1PWM_RC 497
+#define CLK_PDM0 498
+#define HCLK_PDM0 499
+#define MCLK_PDM0 500
+#define HCLK_VAD 501
+#define CLK_OSCCHK_PVTM 502
+#define CLK_PDM0_OUT 503
+#define CLK_HPTIMER_SRC 504
+#define PCLK_PMU0_ROOT 505
+#define PCLK_PMU0 506
+#define PCLK_GPIO0 507
+#define DBCLK_GPIO0 508
+#define CLK_OSC0_PMU1 509
+#define PCLK_PMU1_ROOT 510
+#define XIN_OSC0_DIV 511
+#define ACLK_USB 512
+#define ACLK_UFS 513
+#define ACLK_SDGMAC 514
+#define HCLK_SDGMAC 515
+#define PCLK_SDGMAC 516
+#define HCLK_VO1 517
+#define HCLK_VO0 518
+#define PCLK_CCI_ROOT 519
+#define ACLK_CCI_ROOT 520
+#define HCLK_VO0VOP_CHANNEL 521
+#define ACLK_VO0VOP_CHANNEL 522
+#define ACLK_TOP_MID 523
+#define ACLK_SECURE_HIGH 524
+#define CLK_USBPHY_REF_SRC 525
+#define CLK_PHY_REF_SRC 526
+#define CLK_CPLL_REF_SRC 527
+#define CLK_AUPLL_REF_SRC 528
+#define PCLK_SECURE_NS 529
+#define HCLK_SECURE_NS 530
+#define ACLK_SECURE_NS 531
+#define PCLK_OTPC_NS 532
+#define HCLK_CRYPTO_NS 533
+#define HCLK_TRNG_NS 534
+#define CLK_OTPC_NS 535
+#define SCLK_DSU 536
+#define SCLK_DDR 537
+#define ACLK_CRYPTO_NS 538
+#define CLK_PKA_CRYPTO_NS 539
+#define ACLK_RKVDEC_ROOT_BAK 540
+#define CLK_AUDIO_FRAC_0_SRC 541
+#define CLK_AUDIO_FRAC_1_SRC 542
+#define CLK_AUDIO_FRAC_2_SRC 543
+#define CLK_AUDIO_FRAC_3_SRC 544
+#define PCLK_HDPTX_APB 545
+
+/* secure clk */
+#define CLK_STIMER0_ROOT 546
+#define CLK_STIMER1_ROOT 547
+#define PCLK_SECURE_S 548
+#define HCLK_SECURE_S 549
+#define ACLK_SECURE_S 550
+#define CLK_PKA_CRYPTO_S 551
+#define HCLK_VO1_S 552
+#define PCLK_VO1_S 553
+#define HCLK_VO0_S 554
+#define PCLK_VO0_S 555
+#define PCLK_KLAD 556
+#define HCLK_CRYPTO_S 557
+#define HCLK_KLAD 558
+#define ACLK_CRYPTO_S 559
+#define HCLK_TRNG_S 560
+#define PCLK_OTPC_S 561
+#define CLK_OTPC_S 562
+#define PCLK_WDT_S 563
+#define TCLK_WDT_S 564
+#define PCLK_HDCP0_TRNG 565
+#define PCLK_HDCP1_TRNG 566
+#define HCLK_HDCP_KEY0 567
+#define HCLK_HDCP_KEY1 568
+#define PCLK_EDP_S 569
+#define ACLK_KLAD 570
+
+/* SCMI clocks, use these when changing clocks through SCMI */
+#define SCMI_ARMCLK_L 10
+#define SCMI_ARMCLK_B 11
+#define SCMI_CLK_GPU 456
+
+/* IOC-controlled output clocks */
+#define CLK_SAI0_MCLKOUT_TO_IO 571
+#define CLK_SAI1_MCLKOUT_TO_IO 572
+#define CLK_SAI2_MCLKOUT_TO_IO 573
+#define CLK_SAI3_MCLKOUT_TO_IO 574
+#define CLK_SAI4_MCLKOUT_TO_IO 575
+#define CLK_SAI4_MCLKOUT_TO_IO 575
+#define CLK_FSPI0_TO_IO 576
+#define CLK_FSPI1_TO_IO 577
+
+#endif
diff --git a/include/dt-bindings/clock/rockchip,rk3588-cru.h b/include/dt-bindings/clock/rockchip,rk3588-cru.h
new file mode 100644
index 000000000000..0c7d3ca2d5bc
--- /dev/null
+++ b/include/dt-bindings/clock/rockchip,rk3588-cru.h
@@ -0,0 +1,765 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2021 Rockchip Electronics Co. Ltd.
+ * Copyright (c) 2022 Collabora Ltd.
+ *
+ * Author: Elaine Zhang <zhangqing@rock-chips.com>
+ * Author: Sebastian Reichel <sebastian.reichel@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3588_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RK3588_H
+
+/* cru-clocks indices */
+
+#define PLL_B0PLL 0
+#define PLL_B1PLL 1
+#define PLL_LPLL 2
+#define PLL_V0PLL 3
+#define PLL_AUPLL 4
+#define PLL_CPLL 5
+#define PLL_GPLL 6
+#define PLL_NPLL 7
+#define PLL_PPLL 8
+#define ARMCLK_L 9
+#define ARMCLK_B01 10
+#define ARMCLK_B23 11
+#define PCLK_BIGCORE0_ROOT 12
+#define PCLK_BIGCORE0_PVTM 13
+#define PCLK_BIGCORE1_ROOT 14
+#define PCLK_BIGCORE1_PVTM 15
+#define PCLK_DSU_S_ROOT 16
+#define PCLK_DSU_ROOT 17
+#define PCLK_DSU_NS_ROOT 18
+#define PCLK_LITCORE_PVTM 19
+#define PCLK_DBG 20
+#define PCLK_DSU 21
+#define PCLK_S_DAPLITE 22
+#define PCLK_M_DAPLITE 23
+#define MBIST_MCLK_PDM1 24
+#define MBIST_CLK_ACDCDIG 25
+#define HCLK_I2S2_2CH 26
+#define HCLK_I2S3_2CH 27
+#define CLK_I2S2_2CH_SRC 28
+#define CLK_I2S2_2CH_FRAC 29
+#define CLK_I2S2_2CH 30
+#define MCLK_I2S2_2CH 31
+#define I2S2_2CH_MCLKOUT 32
+#define CLK_DAC_ACDCDIG 33
+#define CLK_I2S3_2CH_SRC 34
+#define CLK_I2S3_2CH_FRAC 35
+#define CLK_I2S3_2CH 36
+#define MCLK_I2S3_2CH 37
+#define I2S3_2CH_MCLKOUT 38
+#define PCLK_ACDCDIG 39
+#define HCLK_I2S0_8CH 40
+#define CLK_I2S0_8CH_TX_SRC 41
+#define CLK_I2S0_8CH_TX_FRAC 42
+#define MCLK_I2S0_8CH_TX 43
+#define CLK_I2S0_8CH_TX 44
+#define CLK_I2S0_8CH_RX_SRC 45
+#define CLK_I2S0_8CH_RX_FRAC 46
+#define MCLK_I2S0_8CH_RX 47
+#define CLK_I2S0_8CH_RX 48
+#define I2S0_8CH_MCLKOUT 49
+#define HCLK_PDM1 50
+#define MCLK_PDM1 51
+#define HCLK_AUDIO_ROOT 52
+#define PCLK_AUDIO_ROOT 53
+#define HCLK_SPDIF0 54
+#define CLK_SPDIF0_SRC 55
+#define CLK_SPDIF0_FRAC 56
+#define MCLK_SPDIF0 57
+#define CLK_SPDIF0 58
+#define CLK_SPDIF1 59
+#define HCLK_SPDIF1 60
+#define CLK_SPDIF1_SRC 61
+#define CLK_SPDIF1_FRAC 62
+#define MCLK_SPDIF1 63
+#define ACLK_AV1_ROOT 64
+#define ACLK_AV1 65
+#define PCLK_AV1_ROOT 66
+#define PCLK_AV1 67
+#define PCLK_MAILBOX0 68
+#define PCLK_MAILBOX1 69
+#define PCLK_MAILBOX2 70
+#define PCLK_PMU2 71
+#define PCLK_PMUCM0_INTMUX 72
+#define PCLK_DDRCM0_INTMUX 73
+#define PCLK_TOP 74
+#define PCLK_PWM1 75
+#define CLK_PWM1 76
+#define CLK_PWM1_CAPTURE 77
+#define PCLK_PWM2 78
+#define CLK_PWM2 79
+#define CLK_PWM2_CAPTURE 80
+#define PCLK_PWM3 81
+#define CLK_PWM3 82
+#define CLK_PWM3_CAPTURE 83
+#define PCLK_BUSTIMER0 84
+#define PCLK_BUSTIMER1 85
+#define CLK_BUS_TIMER_ROOT 86
+#define CLK_BUSTIMER0 87
+#define CLK_BUSTIMER1 88
+#define CLK_BUSTIMER2 89
+#define CLK_BUSTIMER3 90
+#define CLK_BUSTIMER4 91
+#define CLK_BUSTIMER5 92
+#define CLK_BUSTIMER6 93
+#define CLK_BUSTIMER7 94
+#define CLK_BUSTIMER8 95
+#define CLK_BUSTIMER9 96
+#define CLK_BUSTIMER10 97
+#define CLK_BUSTIMER11 98
+#define PCLK_WDT0 99
+#define TCLK_WDT0 100
+#define PCLK_CAN0 101
+#define CLK_CAN0 102
+#define PCLK_CAN1 103
+#define CLK_CAN1 104
+#define PCLK_CAN2 105
+#define CLK_CAN2 106
+#define ACLK_DECOM 107
+#define PCLK_DECOM 108
+#define DCLK_DECOM 109
+#define ACLK_DMAC0 110
+#define ACLK_DMAC1 111
+#define ACLK_DMAC2 112
+#define ACLK_BUS_ROOT 113
+#define ACLK_GIC 114
+#define PCLK_GPIO1 115
+#define DBCLK_GPIO1 116
+#define PCLK_GPIO2 117
+#define DBCLK_GPIO2 118
+#define PCLK_GPIO3 119
+#define DBCLK_GPIO3 120
+#define PCLK_GPIO4 121
+#define DBCLK_GPIO4 122
+#define PCLK_I2C1 123
+#define PCLK_I2C2 124
+#define PCLK_I2C3 125
+#define PCLK_I2C4 126
+#define PCLK_I2C5 127
+#define PCLK_I2C6 128
+#define PCLK_I2C7 129
+#define PCLK_I2C8 130
+#define CLK_I2C1 131
+#define CLK_I2C2 132
+#define CLK_I2C3 133
+#define CLK_I2C4 134
+#define CLK_I2C5 135
+#define CLK_I2C6 136
+#define CLK_I2C7 137
+#define CLK_I2C8 138
+#define PCLK_OTPC_NS 139
+#define CLK_OTPC_NS 140
+#define CLK_OTPC_ARB 141
+#define CLK_OTPC_AUTO_RD_G 142
+#define CLK_OTP_PHY_G 143
+#define PCLK_SARADC 144
+#define CLK_SARADC 145
+#define PCLK_SPI0 146
+#define PCLK_SPI1 147
+#define PCLK_SPI2 148
+#define PCLK_SPI3 149
+#define PCLK_SPI4 150
+#define CLK_SPI0 151
+#define CLK_SPI1 152
+#define CLK_SPI2 153
+#define CLK_SPI3 154
+#define CLK_SPI4 155
+#define ACLK_SPINLOCK 156
+#define PCLK_TSADC 157
+#define CLK_TSADC 158
+#define PCLK_UART1 159
+#define PCLK_UART2 160
+#define PCLK_UART3 161
+#define PCLK_UART4 162
+#define PCLK_UART5 163
+#define PCLK_UART6 164
+#define PCLK_UART7 165
+#define PCLK_UART8 166
+#define PCLK_UART9 167
+#define CLK_UART1_SRC 168
+#define CLK_UART1_FRAC 169
+#define CLK_UART1 170
+#define SCLK_UART1 171
+#define CLK_UART2_SRC 172
+#define CLK_UART2_FRAC 173
+#define CLK_UART2 174
+#define SCLK_UART2 175
+#define CLK_UART3_SRC 176
+#define CLK_UART3_FRAC 177
+#define CLK_UART3 178
+#define SCLK_UART3 179
+#define CLK_UART4_SRC 180
+#define CLK_UART4_FRAC 181
+#define CLK_UART4 182
+#define SCLK_UART4 183
+#define CLK_UART5_SRC 184
+#define CLK_UART5_FRAC 185
+#define CLK_UART5 186
+#define SCLK_UART5 187
+#define CLK_UART6_SRC 188
+#define CLK_UART6_FRAC 189
+#define CLK_UART6 190
+#define SCLK_UART6 191
+#define CLK_UART7_SRC 192
+#define CLK_UART7_FRAC 193
+#define CLK_UART7 194
+#define SCLK_UART7 195
+#define CLK_UART8_SRC 196
+#define CLK_UART8_FRAC 197
+#define CLK_UART8 198
+#define SCLK_UART8 199
+#define CLK_UART9_SRC 200
+#define CLK_UART9_FRAC 201
+#define CLK_UART9 202
+#define SCLK_UART9 203
+#define ACLK_CENTER_ROOT 204
+#define ACLK_CENTER_LOW_ROOT 205
+#define HCLK_CENTER_ROOT 206
+#define PCLK_CENTER_ROOT 207
+#define ACLK_DMA2DDR 208
+#define ACLK_DDR_SHAREMEM 209
+#define ACLK_CENTER_S200_ROOT 210
+#define ACLK_CENTER_S400_ROOT 211
+#define FCLK_DDR_CM0_CORE 212
+#define CLK_DDR_TIMER_ROOT 213
+#define CLK_DDR_TIMER0 214
+#define CLK_DDR_TIMER1 215
+#define TCLK_WDT_DDR 216
+#define CLK_DDR_CM0_RTC 217
+#define PCLK_WDT 218
+#define PCLK_TIMER 219
+#define PCLK_DMA2DDR 220
+#define PCLK_SHAREMEM 221
+#define CLK_50M_SRC 222
+#define CLK_100M_SRC 223
+#define CLK_150M_SRC 224
+#define CLK_200M_SRC 225
+#define CLK_250M_SRC 226
+#define CLK_300M_SRC 227
+#define CLK_350M_SRC 228
+#define CLK_400M_SRC 229
+#define CLK_450M_SRC 230
+#define CLK_500M_SRC 231
+#define CLK_600M_SRC 232
+#define CLK_650M_SRC 233
+#define CLK_700M_SRC 234
+#define CLK_800M_SRC 235
+#define CLK_1000M_SRC 236
+#define CLK_1200M_SRC 237
+#define ACLK_TOP_M300_ROOT 238
+#define ACLK_TOP_M500_ROOT 239
+#define ACLK_TOP_M400_ROOT 240
+#define ACLK_TOP_S200_ROOT 241
+#define ACLK_TOP_S400_ROOT 242
+#define CLK_MIPI_CAMARAOUT_M0 243
+#define CLK_MIPI_CAMARAOUT_M1 244
+#define CLK_MIPI_CAMARAOUT_M2 245
+#define CLK_MIPI_CAMARAOUT_M3 246
+#define CLK_MIPI_CAMARAOUT_M4 247
+#define MCLK_GMAC0_OUT 248
+#define REFCLKO25M_ETH0_OUT 249
+#define REFCLKO25M_ETH1_OUT 250
+#define CLK_CIFOUT_OUT 251
+#define PCLK_MIPI_DCPHY0 252
+#define PCLK_MIPI_DCPHY1 253
+#define PCLK_CSIPHY0 254
+#define PCLK_CSIPHY1 255
+#define ACLK_TOP_ROOT 256
+#define PCLK_TOP_ROOT 257
+#define ACLK_LOW_TOP_ROOT 258
+#define PCLK_CRU 259
+#define PCLK_GPU_ROOT 260
+#define CLK_GPU_SRC 261
+#define CLK_GPU 262
+#define CLK_GPU_COREGROUP 263
+#define CLK_GPU_STACKS 264
+#define PCLK_GPU_PVTM 265
+#define CLK_GPU_PVTM 266
+#define CLK_CORE_GPU_PVTM 267
+#define PCLK_GPU_GRF 268
+#define ACLK_ISP1_ROOT 269
+#define HCLK_ISP1_ROOT 270
+#define CLK_ISP1_CORE 271
+#define CLK_ISP1_CORE_MARVIN 272
+#define CLK_ISP1_CORE_VICAP 273
+#define ACLK_ISP1 274
+#define HCLK_ISP1 275
+#define ACLK_NPU1 276
+#define HCLK_NPU1 277
+#define ACLK_NPU2 278
+#define HCLK_NPU2 279
+#define HCLK_NPU_CM0_ROOT 280
+#define FCLK_NPU_CM0_CORE 281
+#define CLK_NPU_CM0_RTC 282
+#define PCLK_NPU_PVTM 283
+#define PCLK_NPU_GRF 284
+#define CLK_NPU_PVTM 285
+#define CLK_CORE_NPU_PVTM 286
+#define ACLK_NPU0 287
+#define HCLK_NPU0 288
+#define HCLK_NPU_ROOT 289
+#define CLK_NPU_DSU0 290
+#define PCLK_NPU_ROOT 291
+#define PCLK_NPU_TIMER 292
+#define CLK_NPUTIMER_ROOT 293
+#define CLK_NPUTIMER0 294
+#define CLK_NPUTIMER1 295
+#define PCLK_NPU_WDT 296
+#define TCLK_NPU_WDT 297
+#define HCLK_EMMC 298
+#define ACLK_EMMC 299
+#define CCLK_EMMC 300
+#define BCLK_EMMC 301
+#define TMCLK_EMMC 302
+#define SCLK_SFC 303
+#define HCLK_SFC 304
+#define HCLK_SFC_XIP 305
+#define HCLK_NVM_ROOT 306
+#define ACLK_NVM_ROOT 307
+#define CLK_GMAC0_PTP_REF 308
+#define CLK_GMAC1_PTP_REF 309
+#define CLK_GMAC_125M 310
+#define CLK_GMAC_50M 311
+#define ACLK_PHP_GIC_ITS 312
+#define ACLK_MMU_PCIE 313
+#define ACLK_MMU_PHP 314
+#define ACLK_PCIE_4L_DBI 315
+#define ACLK_PCIE_2L_DBI 316
+#define ACLK_PCIE_1L0_DBI 317
+#define ACLK_PCIE_1L1_DBI 318
+#define ACLK_PCIE_1L2_DBI 319
+#define ACLK_PCIE_4L_MSTR 320
+#define ACLK_PCIE_2L_MSTR 321
+#define ACLK_PCIE_1L0_MSTR 322
+#define ACLK_PCIE_1L1_MSTR 323
+#define ACLK_PCIE_1L2_MSTR 324
+#define ACLK_PCIE_4L_SLV 325
+#define ACLK_PCIE_2L_SLV 326
+#define ACLK_PCIE_1L0_SLV 327
+#define ACLK_PCIE_1L1_SLV 328
+#define ACLK_PCIE_1L2_SLV 329
+#define PCLK_PCIE_4L 330
+#define PCLK_PCIE_2L 331
+#define PCLK_PCIE_1L0 332
+#define PCLK_PCIE_1L1 333
+#define PCLK_PCIE_1L2 334
+#define CLK_PCIE_AUX0 335
+#define CLK_PCIE_AUX1 336
+#define CLK_PCIE_AUX2 337
+#define CLK_PCIE_AUX3 338
+#define CLK_PCIE_AUX4 339
+#define CLK_PIPEPHY0_REF 340
+#define CLK_PIPEPHY1_REF 341
+#define CLK_PIPEPHY2_REF 342
+#define PCLK_PHP_ROOT 343
+#define PCLK_GMAC0 344
+#define PCLK_GMAC1 345
+#define ACLK_PCIE_ROOT 346
+#define ACLK_PHP_ROOT 347
+#define ACLK_PCIE_BRIDGE 348
+#define ACLK_GMAC0 349
+#define ACLK_GMAC1 350
+#define CLK_PMALIVE0 351
+#define CLK_PMALIVE1 352
+#define CLK_PMALIVE2 353
+#define ACLK_SATA0 354
+#define ACLK_SATA1 355
+#define ACLK_SATA2 356
+#define CLK_RXOOB0 357
+#define CLK_RXOOB1 358
+#define CLK_RXOOB2 359
+#define ACLK_USB3OTG2 360
+#define SUSPEND_CLK_USB3OTG2 361
+#define REF_CLK_USB3OTG2 362
+#define CLK_UTMI_OTG2 363
+#define CLK_PIPEPHY0_PIPE_G 364
+#define CLK_PIPEPHY1_PIPE_G 365
+#define CLK_PIPEPHY2_PIPE_G 366
+#define CLK_PIPEPHY0_PIPE_ASIC_G 367
+#define CLK_PIPEPHY1_PIPE_ASIC_G 368
+#define CLK_PIPEPHY2_PIPE_ASIC_G 369
+#define CLK_PIPEPHY2_PIPE_U3_G 370
+#define CLK_PCIE1L2_PIPE 371
+#define CLK_PCIE4L_PIPE 372
+#define CLK_PCIE2L_PIPE 373
+#define PCLK_PCIE_COMBO_PIPE_PHY0 374
+#define PCLK_PCIE_COMBO_PIPE_PHY1 375
+#define PCLK_PCIE_COMBO_PIPE_PHY2 376
+#define PCLK_PCIE_COMBO_PIPE_PHY 377
+#define HCLK_RGA3_1 378
+#define ACLK_RGA3_1 379
+#define CLK_RGA3_1_CORE 380
+#define ACLK_RGA3_ROOT 381
+#define HCLK_RGA3_ROOT 382
+#define ACLK_RKVDEC_CCU 383
+#define HCLK_RKVDEC0 384
+#define ACLK_RKVDEC0 385
+#define CLK_RKVDEC0_CA 386
+#define CLK_RKVDEC0_HEVC_CA 387
+#define CLK_RKVDEC0_CORE 388
+#define HCLK_RKVDEC1 389
+#define ACLK_RKVDEC1 390
+#define CLK_RKVDEC1_CA 391
+#define CLK_RKVDEC1_HEVC_CA 392
+#define CLK_RKVDEC1_CORE 393
+#define HCLK_SDIO 394
+#define CCLK_SRC_SDIO 395
+#define ACLK_USB_ROOT 396
+#define HCLK_USB_ROOT 397
+#define HCLK_HOST0 398
+#define HCLK_HOST_ARB0 399
+#define HCLK_HOST1 400
+#define HCLK_HOST_ARB1 401
+#define ACLK_USB3OTG0 402
+#define SUSPEND_CLK_USB3OTG0 403
+#define REF_CLK_USB3OTG0 404
+#define ACLK_USB3OTG1 405
+#define SUSPEND_CLK_USB3OTG1 406
+#define REF_CLK_USB3OTG1 407
+#define UTMI_OHCI_CLK48_HOST0 408
+#define UTMI_OHCI_CLK48_HOST1 409
+#define HCLK_IEP2P0 410
+#define ACLK_IEP2P0 411
+#define CLK_IEP2P0_CORE 412
+#define ACLK_JPEG_ENCODER0 413
+#define HCLK_JPEG_ENCODER0 414
+#define ACLK_JPEG_ENCODER1 415
+#define HCLK_JPEG_ENCODER1 416
+#define ACLK_JPEG_ENCODER2 417
+#define HCLK_JPEG_ENCODER2 418
+#define ACLK_JPEG_ENCODER3 419
+#define HCLK_JPEG_ENCODER3 420
+#define ACLK_JPEG_DECODER 421
+#define HCLK_JPEG_DECODER 422
+#define HCLK_RGA2 423
+#define ACLK_RGA2 424
+#define CLK_RGA2_CORE 425
+#define HCLK_RGA3_0 426
+#define ACLK_RGA3_0 427
+#define CLK_RGA3_0_CORE 428
+#define ACLK_VDPU_ROOT 429
+#define ACLK_VDPU_LOW_ROOT 430
+#define HCLK_VDPU_ROOT 431
+#define ACLK_JPEG_DECODER_ROOT 432
+#define ACLK_VPU 433
+#define HCLK_VPU 434
+#define HCLK_RKVENC0_ROOT 435
+#define ACLK_RKVENC0_ROOT 436
+#define HCLK_RKVENC0 437
+#define ACLK_RKVENC0 438
+#define CLK_RKVENC0_CORE 439
+#define HCLK_RKVENC1_ROOT 440
+#define ACLK_RKVENC1_ROOT 441
+#define HCLK_RKVENC1 442
+#define ACLK_RKVENC1 443
+#define CLK_RKVENC1_CORE 444
+#define ICLK_CSIHOST01 445
+#define ICLK_CSIHOST0 446
+#define ICLK_CSIHOST1 447
+#define PCLK_CSI_HOST_0 448
+#define PCLK_CSI_HOST_1 449
+#define PCLK_CSI_HOST_2 450
+#define PCLK_CSI_HOST_3 451
+#define PCLK_CSI_HOST_4 452
+#define PCLK_CSI_HOST_5 453
+#define ACLK_FISHEYE0 454
+#define HCLK_FISHEYE0 455
+#define CLK_FISHEYE0_CORE 456
+#define ACLK_FISHEYE1 457
+#define HCLK_FISHEYE1 458
+#define CLK_FISHEYE1_CORE 459
+#define CLK_ISP0_CORE 460
+#define CLK_ISP0_CORE_MARVIN 461
+#define CLK_ISP0_CORE_VICAP 462
+#define ACLK_ISP0 463
+#define HCLK_ISP0 464
+#define ACLK_VI_ROOT 465
+#define HCLK_VI_ROOT 466
+#define PCLK_VI_ROOT 467
+#define DCLK_VICAP 468
+#define ACLK_VICAP 469
+#define HCLK_VICAP 470
+#define PCLK_DP0 471
+#define PCLK_DP1 472
+#define PCLK_S_DP0 473
+#define PCLK_S_DP1 474
+#define CLK_DP0 475
+#define CLK_DP1 476
+#define HCLK_HDCP_KEY0 477
+#define ACLK_HDCP0 478
+#define HCLK_HDCP0 479
+#define PCLK_HDCP0 480
+#define HCLK_I2S4_8CH 481
+#define ACLK_TRNG0 482
+#define PCLK_TRNG0 483
+#define ACLK_VO0_ROOT 484
+#define HCLK_VO0_ROOT 485
+#define HCLK_VO0_S_ROOT 486
+#define PCLK_VO0_ROOT 487
+#define PCLK_VO0_S_ROOT 488
+#define PCLK_VO0GRF 489
+#define CLK_I2S4_8CH_TX_SRC 490
+#define CLK_I2S4_8CH_TX_FRAC 491
+#define MCLK_I2S4_8CH_TX 492
+#define CLK_I2S4_8CH_TX 493
+#define HCLK_I2S8_8CH 494
+#define CLK_I2S8_8CH_TX_SRC 495
+#define CLK_I2S8_8CH_TX_FRAC 496
+#define MCLK_I2S8_8CH_TX 497
+#define CLK_I2S8_8CH_TX 498
+#define HCLK_SPDIF2_DP0 499
+#define CLK_SPDIF2_DP0_SRC 500
+#define CLK_SPDIF2_DP0_FRAC 501
+#define MCLK_SPDIF2_DP0 502
+#define CLK_SPDIF2_DP0 503
+#define MCLK_SPDIF2 504
+#define HCLK_SPDIF5_DP1 505
+#define CLK_SPDIF5_DP1_SRC 506
+#define CLK_SPDIF5_DP1_FRAC 507
+#define MCLK_SPDIF5_DP1 508
+#define CLK_SPDIF5_DP1 509
+#define MCLK_SPDIF5 510
+#define PCLK_EDP0 511
+#define CLK_EDP0_24M 512
+#define CLK_EDP0_200M 513
+#define PCLK_EDP1 514
+#define CLK_EDP1_24M 515
+#define CLK_EDP1_200M 516
+#define HCLK_HDCP_KEY1 517
+#define ACLK_HDCP1 518
+#define HCLK_HDCP1 519
+#define PCLK_HDCP1 520
+#define ACLK_HDMIRX 521
+#define PCLK_HDMIRX 522
+#define CLK_HDMIRX_REF 523
+#define CLK_HDMIRX_AUD_SRC 524
+#define CLK_HDMIRX_AUD_FRAC 525
+#define CLK_HDMIRX_AUD 526
+#define CLK_HDMIRX_AUD_P_MUX 527
+#define PCLK_HDMITX0 528
+#define CLK_HDMITX0_EARC 529
+#define CLK_HDMITX0_REF 530
+#define PCLK_HDMITX1 531
+#define CLK_HDMITX1_EARC 532
+#define CLK_HDMITX1_REF 533
+#define CLK_HDMITRX_REFSRC 534
+#define ACLK_TRNG1 535
+#define PCLK_TRNG1 536
+#define ACLK_HDCP1_ROOT 537
+#define ACLK_HDMIRX_ROOT 538
+#define HCLK_VO1_ROOT 539
+#define HCLK_VO1_S_ROOT 540
+#define PCLK_VO1_ROOT 541
+#define PCLK_VO1_S_ROOT 542
+#define PCLK_S_EDP0 543
+#define PCLK_S_EDP1 544
+#define PCLK_S_HDMIRX 545
+#define HCLK_I2S10_8CH 546
+#define CLK_I2S10_8CH_RX_SRC 547
+#define CLK_I2S10_8CH_RX_FRAC 548
+#define CLK_I2S10_8CH_RX 549
+#define MCLK_I2S10_8CH_RX 550
+#define HCLK_I2S7_8CH 551
+#define CLK_I2S7_8CH_RX_SRC 552
+#define CLK_I2S7_8CH_RX_FRAC 553
+#define CLK_I2S7_8CH_RX 554
+#define MCLK_I2S7_8CH_RX 555
+#define HCLK_I2S9_8CH 556
+#define CLK_I2S9_8CH_RX_SRC 557
+#define CLK_I2S9_8CH_RX_FRAC 558
+#define CLK_I2S9_8CH_RX 559
+#define MCLK_I2S9_8CH_RX 560
+#define CLK_I2S5_8CH_TX_SRC 561
+#define CLK_I2S5_8CH_TX_FRAC 562
+#define CLK_I2S5_8CH_TX 563
+#define MCLK_I2S5_8CH_TX 564
+#define HCLK_I2S5_8CH 565
+#define CLK_I2S6_8CH_TX_SRC 566
+#define CLK_I2S6_8CH_TX_FRAC 567
+#define CLK_I2S6_8CH_TX 568
+#define MCLK_I2S6_8CH_TX 569
+#define CLK_I2S6_8CH_RX_SRC 570
+#define CLK_I2S6_8CH_RX_FRAC 571
+#define CLK_I2S6_8CH_RX 572
+#define MCLK_I2S6_8CH_RX 573
+#define I2S6_8CH_MCLKOUT 574
+#define HCLK_I2S6_8CH 575
+#define HCLK_SPDIF3 576
+#define CLK_SPDIF3_SRC 577
+#define CLK_SPDIF3_FRAC 578
+#define CLK_SPDIF3 579
+#define MCLK_SPDIF3 580
+#define HCLK_SPDIF4 581
+#define CLK_SPDIF4_SRC 582
+#define CLK_SPDIF4_FRAC 583
+#define CLK_SPDIF4 584
+#define MCLK_SPDIF4 585
+#define HCLK_SPDIFRX0 586
+#define MCLK_SPDIFRX0 587
+#define HCLK_SPDIFRX1 588
+#define MCLK_SPDIFRX1 589
+#define HCLK_SPDIFRX2 590
+#define MCLK_SPDIFRX2 591
+#define ACLK_VO1USB_TOP_ROOT 592
+#define HCLK_VO1USB_TOP_ROOT 593
+#define CLK_HDMIHDP0 594
+#define CLK_HDMIHDP1 595
+#define PCLK_HDPTX0 596
+#define PCLK_HDPTX1 597
+#define PCLK_USBDPPHY0 598
+#define PCLK_USBDPPHY1 599
+#define ACLK_VOP_ROOT 600
+#define ACLK_VOP_LOW_ROOT 601
+#define HCLK_VOP_ROOT 602
+#define PCLK_VOP_ROOT 603
+#define HCLK_VOP 604
+#define ACLK_VOP 605
+#define DCLK_VOP0_SRC 606
+#define DCLK_VOP1_SRC 607
+#define DCLK_VOP2_SRC 608
+#define DCLK_VOP0 609
+#define DCLK_VOP1 610
+#define DCLK_VOP2 611
+#define DCLK_VOP3 612
+#define PCLK_DSIHOST0 613
+#define PCLK_DSIHOST1 614
+#define CLK_DSIHOST0 615
+#define CLK_DSIHOST1 616
+#define CLK_VOP_PMU 617
+#define ACLK_VOP_DOBY 618
+#define ACLK_VOP_SUB_SRC 619
+#define CLK_USBDP_PHY0_IMMORTAL 620
+#define CLK_USBDP_PHY1_IMMORTAL 621
+#define CLK_PMU0 622
+#define PCLK_PMU0 623
+#define PCLK_PMU0IOC 624
+#define PCLK_GPIO0 625
+#define DBCLK_GPIO0 626
+#define PCLK_I2C0 627
+#define CLK_I2C0 628
+#define HCLK_I2S1_8CH 629
+#define CLK_I2S1_8CH_TX_SRC 630
+#define CLK_I2S1_8CH_TX_FRAC 631
+#define CLK_I2S1_8CH_TX 632
+#define MCLK_I2S1_8CH_TX 633
+#define CLK_I2S1_8CH_RX_SRC 634
+#define CLK_I2S1_8CH_RX_FRAC 635
+#define CLK_I2S1_8CH_RX 636
+#define MCLK_I2S1_8CH_RX 637
+#define I2S1_8CH_MCLKOUT 638
+#define CLK_PMU1_50M_SRC 639
+#define CLK_PMU1_100M_SRC 640
+#define CLK_PMU1_200M_SRC 641
+#define CLK_PMU1_300M_SRC 642
+#define CLK_PMU1_400M_SRC 643
+#define HCLK_PMU1_ROOT 644
+#define PCLK_PMU1_ROOT 645
+#define PCLK_PMU0_ROOT 646
+#define HCLK_PMU_CM0_ROOT 647
+#define PCLK_PMU1 648
+#define CLK_DDR_FAIL_SAFE 649
+#define CLK_PMU1 650
+#define HCLK_PDM0 651
+#define MCLK_PDM0 652
+#define HCLK_VAD 653
+#define FCLK_PMU_CM0_CORE 654
+#define CLK_PMU_CM0_RTC 655
+#define PCLK_PMU1_IOC 656
+#define PCLK_PMU1PWM 657
+#define CLK_PMU1PWM 658
+#define CLK_PMU1PWM_CAPTURE 659
+#define PCLK_PMU1TIMER 660
+#define CLK_PMU1TIMER_ROOT 661
+#define CLK_PMU1TIMER0 662
+#define CLK_PMU1TIMER1 663
+#define CLK_UART0_SRC 664
+#define CLK_UART0_FRAC 665
+#define CLK_UART0 666
+#define SCLK_UART0 667
+#define PCLK_UART0 668
+#define PCLK_PMU1WDT 669
+#define TCLK_PMU1WDT 670
+#define CLK_CR_PARA 671
+#define CLK_USB2PHY_HDPTXRXPHY_REF 672
+#define CLK_USBDPPHY_MIPIDCPPHY_REF 673
+#define CLK_REF_PIPE_PHY0_OSC_SRC 674
+#define CLK_REF_PIPE_PHY1_OSC_SRC 675
+#define CLK_REF_PIPE_PHY2_OSC_SRC 676
+#define CLK_REF_PIPE_PHY0_PLL_SRC 677
+#define CLK_REF_PIPE_PHY1_PLL_SRC 678
+#define CLK_REF_PIPE_PHY2_PLL_SRC 679
+#define CLK_REF_PIPE_PHY0 680
+#define CLK_REF_PIPE_PHY1 681
+#define CLK_REF_PIPE_PHY2 682
+#define SCLK_SDIO_DRV 683
+#define SCLK_SDIO_SAMPLE 684
+#define SCLK_SDMMC_DRV 685
+#define SCLK_SDMMC_SAMPLE 686
+#define CLK_PCIE1L0_PIPE 687
+#define CLK_PCIE1L1_PIPE 688
+#define CLK_BIGCORE0_PVTM 689
+#define CLK_CORE_BIGCORE0_PVTM 690
+#define CLK_BIGCORE1_PVTM 691
+#define CLK_CORE_BIGCORE1_PVTM 692
+#define CLK_LITCORE_PVTM 693
+#define CLK_CORE_LITCORE_PVTM 694
+#define CLK_AUX16M_0 695
+#define CLK_AUX16M_1 696
+#define CLK_PHY0_REF_ALT_P 697
+#define CLK_PHY0_REF_ALT_M 698
+#define CLK_PHY1_REF_ALT_P 699
+#define CLK_PHY1_REF_ALT_M 700
+#define ACLK_ISP1_PRE 701
+#define HCLK_ISP1_PRE 702
+#define HCLK_NVM 703
+#define ACLK_USB 704
+#define HCLK_USB 705
+#define ACLK_JPEG_DECODER_PRE 706
+#define ACLK_VDPU_LOW_PRE 707
+#define ACLK_RKVENC1_PRE 708
+#define HCLK_RKVENC1_PRE 709
+#define HCLK_RKVDEC0_PRE 710
+#define ACLK_RKVDEC0_PRE 711
+#define HCLK_RKVDEC1_PRE 712
+#define ACLK_RKVDEC1_PRE 713
+#define ACLK_HDCP0_PRE 714
+#define HCLK_VO0 715
+#define ACLK_HDCP1_PRE 716
+#define HCLK_VO1 717
+#define ACLK_AV1_PRE 718
+#define PCLK_AV1_PRE 719
+#define HCLK_SDIO_PRE 720
+#define PCLK_VO1GRF 721
+
+/* scmi-clocks indices */
+
+#define SCMI_CLK_CPUL 0
+#define SCMI_CLK_DSU 1
+#define SCMI_CLK_CPUB01 2
+#define SCMI_CLK_CPUB23 3
+#define SCMI_CLK_DDR 4
+#define SCMI_CLK_GPU 5
+#define SCMI_CLK_NPU 6
+#define SCMI_CLK_SBUS 7
+#define SCMI_PCLK_SBUS 8
+#define SCMI_CCLK_SD 9
+#define SCMI_DCLK_SD 10
+#define SCMI_ACLK_SECURE_NS 11
+#define SCMI_HCLK_SECURE_NS 12
+#define SCMI_TCLK_WDT 13
+#define SCMI_KEYLADDER_CORE 14
+#define SCMI_KEYLADDER_RNG 15
+#define SCMI_ACLK_SECURE_S 16
+#define SCMI_HCLK_SECURE_S 17
+#define SCMI_PCLK_SECURE_S 18
+#define SCMI_CRYPTO_RNG 19
+#define SCMI_CRYPTO_CORE 20
+#define SCMI_CRYPTO_PKA 21
+#define SCMI_SPLL 22
+#define SCMI_HCLK_SD 23
+
+#endif
diff --git a/include/dt-bindings/clock/rockchip,rv1126-cru.h b/include/dt-bindings/clock/rockchip,rv1126-cru.h
new file mode 100644
index 000000000000..e89a3a5a4a34
--- /dev/null
+++ b/include/dt-bindings/clock/rockchip,rv1126-cru.h
@@ -0,0 +1,632 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2019 Rockchip Electronics Co. Ltd.
+ * Author: Finley Xiao <finley.xiao@rock-chips.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RV1126_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RV1126_H
+
+/* pmucru-clocks indices */
+
+/* pll clocks */
+#define PLL_GPLL 1
+
+/* sclk (special clocks) */
+#define CLK_OSC0_DIV32K 2
+#define CLK_RTC32K 3
+#define CLK_WIFI_DIV 4
+#define CLK_WIFI_OSC0 5
+#define CLK_WIFI 6
+#define CLK_PMU 7
+#define SCLK_UART1_DIV 8
+#define SCLK_UART1_FRACDIV 9
+#define SCLK_UART1_MUX 10
+#define SCLK_UART1 11
+#define CLK_I2C0 12
+#define CLK_I2C2 13
+#define CLK_CAPTURE_PWM0 14
+#define CLK_PWM0 15
+#define CLK_CAPTURE_PWM1 16
+#define CLK_PWM1 17
+#define CLK_SPI0 18
+#define DBCLK_GPIO0 19
+#define CLK_PMUPVTM 20
+#define CLK_CORE_PMUPVTM 21
+#define CLK_REF12M 22
+#define CLK_USBPHY_OTG_REF 23
+#define CLK_USBPHY_HOST_REF 24
+#define CLK_REF24M 25
+#define CLK_MIPIDSIPHY_REF 26
+
+/* pclk */
+#define PCLK_PDPMU 30
+#define PCLK_PMU 31
+#define PCLK_UART1 32
+#define PCLK_I2C0 33
+#define PCLK_I2C2 34
+#define PCLK_PWM0 35
+#define PCLK_PWM1 36
+#define PCLK_SPI0 37
+#define PCLK_GPIO0 38
+#define PCLK_PMUSGRF 39
+#define PCLK_PMUGRF 40
+#define PCLK_PMUCRU 41
+#define PCLK_CHIPVEROTP 42
+#define PCLK_PDPMU_NIU 43
+#define PCLK_PMUPVTM 44
+#define PCLK_SCRKEYGEN 45
+
+#define CLKPMU_NR_CLKS (PCLK_SCRKEYGEN + 1)
+
+/* cru-clocks indices */
+
+/* pll clocks */
+#define PLL_APLL 1
+#define PLL_DPLL 2
+#define PLL_CPLL 3
+#define PLL_HPLL 4
+
+/* sclk (special clocks) */
+#define ARMCLK 5
+#define USB480M 6
+#define CLK_CORE_CPUPVTM 7
+#define CLK_CPUPVTM 8
+#define CLK_SCR1 9
+#define CLK_SCR1_CORE 10
+#define CLK_SCR1_RTC 11
+#define CLK_SCR1_JTAG 12
+#define SCLK_UART0_DIV 13
+#define SCLK_UART0_FRAC 14
+#define SCLK_UART0_MUX 15
+#define SCLK_UART0 16
+#define SCLK_UART2_DIV 17
+#define SCLK_UART2_FRAC 18
+#define SCLK_UART2_MUX 19
+#define SCLK_UART2 20
+#define SCLK_UART3_DIV 21
+#define SCLK_UART3_FRAC 22
+#define SCLK_UART3_MUX 23
+#define SCLK_UART3 24
+#define SCLK_UART4_DIV 25
+#define SCLK_UART4_FRAC 26
+#define SCLK_UART4_MUX 27
+#define SCLK_UART4 28
+#define SCLK_UART5_DIV 29
+#define SCLK_UART5_FRAC 30
+#define SCLK_UART5_MUX 31
+#define SCLK_UART5 32
+#define CLK_I2C1 33
+#define CLK_I2C3 34
+#define CLK_I2C4 35
+#define CLK_I2C5 36
+#define CLK_SPI1 37
+#define CLK_CAPTURE_PWM2 38
+#define CLK_PWM2 39
+#define DBCLK_GPIO1 40
+#define DBCLK_GPIO2 41
+#define DBCLK_GPIO3 42
+#define DBCLK_GPIO4 43
+#define CLK_SARADC 44
+#define CLK_TIMER0 45
+#define CLK_TIMER1 46
+#define CLK_TIMER2 47
+#define CLK_TIMER3 48
+#define CLK_TIMER4 49
+#define CLK_TIMER5 50
+#define CLK_CAN 51
+#define CLK_NPU_TSADC 52
+#define CLK_NPU_TSADCPHY 53
+#define CLK_CPU_TSADC 54
+#define CLK_CPU_TSADCPHY 55
+#define CLK_CRYPTO_CORE 56
+#define CLK_CRYPTO_PKA 57
+#define MCLK_I2S0_TX_DIV 58
+#define MCLK_I2S0_TX_FRACDIV 59
+#define MCLK_I2S0_TX_MUX 60
+#define MCLK_I2S0_TX 61
+#define MCLK_I2S0_RX_DIV 62
+#define MCLK_I2S0_RX_FRACDIV 63
+#define MCLK_I2S0_RX_MUX 64
+#define MCLK_I2S0_RX 65
+#define MCLK_I2S0_TX_OUT2IO 66
+#define MCLK_I2S0_RX_OUT2IO 67
+#define MCLK_I2S1_DIV 68
+#define MCLK_I2S1_FRACDIV 69
+#define MCLK_I2S1_MUX 70
+#define MCLK_I2S1 71
+#define MCLK_I2S1_OUT2IO 72
+#define MCLK_I2S2_DIV 73
+#define MCLK_I2S2_FRACDIV 74
+#define MCLK_I2S2_MUX 75
+#define MCLK_I2S2 76
+#define MCLK_I2S2_OUT2IO 77
+#define MCLK_PDM 78
+#define SCLK_ADUPWM_DIV 79
+#define SCLK_AUDPWM_FRACDIV 80
+#define SCLK_AUDPWM_MUX 81
+#define SCLK_AUDPWM 82
+#define CLK_ACDCDIG_ADC 83
+#define CLK_ACDCDIG_DAC 84
+#define CLK_ACDCDIG_I2C 85
+#define CLK_VENC_CORE 86
+#define CLK_VDEC_CORE 87
+#define CLK_VDEC_CA 88
+#define CLK_VDEC_HEVC_CA 89
+#define CLK_RGA_CORE 90
+#define CLK_IEP_CORE 91
+#define CLK_ISP_DIV 92
+#define CLK_ISP_NP5 93
+#define CLK_ISP_NUX 94
+#define CLK_ISP 95
+#define CLK_CIF_OUT_DIV 96
+#define CLK_CIF_OUT_FRACDIV 97
+#define CLK_CIF_OUT_MUX 98
+#define CLK_CIF_OUT 99
+#define CLK_MIPICSI_OUT_DIV 100
+#define CLK_MIPICSI_OUT_FRACDIV 101
+#define CLK_MIPICSI_OUT_MUX 102
+#define CLK_MIPICSI_OUT 103
+#define CLK_ISPP_DIV 104
+#define CLK_ISPP_NP5 105
+#define CLK_ISPP_NUX 106
+#define CLK_ISPP 107
+#define CLK_SDMMC 108
+#define SCLK_SDMMC_DRV 109
+#define SCLK_SDMMC_SAMPLE 110
+#define CLK_SDIO 111
+#define SCLK_SDIO_DRV 112
+#define SCLK_SDIO_SAMPLE 113
+#define CLK_EMMC 114
+#define SCLK_EMMC_DRV 115
+#define SCLK_EMMC_SAMPLE 116
+#define CLK_NANDC 117
+#define SCLK_SFC 118
+#define CLK_USBHOST_UTMI_OHCI 119
+#define CLK_USBOTG_REF 120
+#define CLK_GMAC_DIV 121
+#define CLK_GMAC_RGMII_M0 122
+#define CLK_GMAC_SRC_M0 123
+#define CLK_GMAC_RGMII_M1 124
+#define CLK_GMAC_SRC_M1 125
+#define CLK_GMAC_SRC 126
+#define CLK_GMAC_REF 127
+#define CLK_GMAC_TX_SRC 128
+#define CLK_GMAC_TX_DIV5 129
+#define CLK_GMAC_TX_DIV50 130
+#define RGMII_MODE_CLK 131
+#define CLK_GMAC_RX_SRC 132
+#define CLK_GMAC_RX_DIV2 133
+#define CLK_GMAC_RX_DIV20 134
+#define RMII_MODE_CLK 135
+#define CLK_GMAC_TX_RX 136
+#define CLK_GMAC_PTPREF 137
+#define CLK_GMAC_ETHERNET_OUT 138
+#define CLK_DDRPHY 139
+#define CLK_DDR_MON 140
+#define TMCLK_DDR_MON 141
+#define CLK_NPU_DIV 142
+#define CLK_NPU_NP5 143
+#define CLK_CORE_NPU 144
+#define CLK_CORE_NPUPVTM 145
+#define CLK_NPUPVTM 146
+#define SCLK_DDRCLK 147
+#define CLK_OTP 148
+
+/* dclk */
+#define DCLK_DECOM 150
+#define DCLK_VOP_DIV 151
+#define DCLK_VOP_FRACDIV 152
+#define DCLK_VOP_MUX 153
+#define DCLK_VOP 154
+#define DCLK_CIF 155
+#define DCLK_CIFLITE 156
+
+/* aclk */
+#define ACLK_PDBUS 160
+#define ACLK_DMAC 161
+#define ACLK_DCF 162
+#define ACLK_SPINLOCK 163
+#define ACLK_DECOM 164
+#define ACLK_PDCRYPTO 165
+#define ACLK_CRYPTO 166
+#define ACLK_PDVEPU 167
+#define ACLK_VENC 168
+#define ACLK_PDVDEC 169
+#define ACLK_PDJPEG 170
+#define ACLK_VDEC 171
+#define ACLK_JPEG 172
+#define ACLK_PDVO 173
+#define ACLK_RGA 174
+#define ACLK_VOP 175
+#define ACLK_IEP 176
+#define ACLK_PDVI_DIV 177
+#define ACLK_PDVI_NP5 178
+#define ACLK_PDVI 179
+#define ACLK_ISP 180
+#define ACLK_CIF 181
+#define ACLK_CIFLITE 182
+#define ACLK_PDISPP_DIV 183
+#define ACLK_PDISPP_NP5 184
+#define ACLK_PDISPP 185
+#define ACLK_ISPP 186
+#define ACLK_PDPHP 187
+#define ACLK_PDUSB 188
+#define ACLK_USBOTG 189
+#define ACLK_PDGMAC 190
+#define ACLK_GMAC 191
+#define ACLK_PDNPU_DIV 192
+#define ACLK_PDNPU_NP5 193
+#define ACLK_PDNPU 194
+#define ACLK_NPU 195
+
+/* hclk */
+#define HCLK_PDCORE_NIU 200
+#define HCLK_PDUSB 201
+#define HCLK_PDCRYPTO 202
+#define HCLK_CRYPTO 203
+#define HCLK_PDAUDIO 204
+#define HCLK_I2S0 205
+#define HCLK_I2S1 206
+#define HCLK_I2S2 207
+#define HCLK_PDM 208
+#define HCLK_AUDPWM 209
+#define HCLK_PDVEPU 210
+#define HCLK_VENC 211
+#define HCLK_PDVDEC 212
+#define HCLK_PDJPEG 213
+#define HCLK_VDEC 214
+#define HCLK_JPEG 215
+#define HCLK_PDVO 216
+#define HCLK_RGA 217
+#define HCLK_VOP 218
+#define HCLK_IEP 219
+#define HCLK_PDVI 220
+#define HCLK_ISP 221
+#define HCLK_CIF 222
+#define HCLK_CIFLITE 223
+#define HCLK_PDISPP 224
+#define HCLK_ISPP 225
+#define HCLK_PDPHP 226
+#define HCLK_PDSDMMC 227
+#define HCLK_SDMMC 228
+#define HCLK_PDSDIO 229
+#define HCLK_SDIO 230
+#define HCLK_PDNVM 231
+#define HCLK_EMMC 232
+#define HCLK_NANDC 233
+#define HCLK_SFC 234
+#define HCLK_SFCXIP 235
+#define HCLK_PDBUS 236
+#define HCLK_USBHOST 237
+#define HCLK_USBHOST_ARB 238
+#define HCLK_PDNPU 239
+#define HCLK_NPU 240
+
+/* pclk */
+#define PCLK_CPUPVTM 245
+#define PCLK_PDBUS 246
+#define PCLK_DCF 247
+#define PCLK_WDT 248
+#define PCLK_MAILBOX 249
+#define PCLK_UART0 250
+#define PCLK_UART2 251
+#define PCLK_UART3 252
+#define PCLK_UART4 253
+#define PCLK_UART5 254
+#define PCLK_I2C1 255
+#define PCLK_I2C3 256
+#define PCLK_I2C4 257
+#define PCLK_I2C5 258
+#define PCLK_SPI1 259
+#define PCLK_PWM2 261
+#define PCLK_GPIO1 262
+#define PCLK_GPIO2 263
+#define PCLK_GPIO3 264
+#define PCLK_GPIO4 265
+#define PCLK_SARADC 266
+#define PCLK_TIMER 267
+#define PCLK_DECOM 268
+#define PCLK_CAN 269
+#define PCLK_NPU_TSADC 270
+#define PCLK_CPU_TSADC 271
+#define PCLK_ACDCDIG 272
+#define PCLK_PDVO 273
+#define PCLK_DSIHOST 274
+#define PCLK_PDVI 275
+#define PCLK_CSIHOST 276
+#define PCLK_PDGMAC 277
+#define PCLK_GMAC 278
+#define PCLK_PDDDR 279
+#define PCLK_DDR_MON 280
+#define PCLK_PDNPU 281
+#define PCLK_NPUPVTM 282
+#define PCLK_PDTOP 283
+#define PCLK_TOPCRU 284
+#define PCLK_TOPGRF 285
+#define PCLK_CPUEMADET 286
+#define PCLK_DDRPHY 287
+#define PCLK_DSIPHY 289
+#define PCLK_CSIPHY0 290
+#define PCLK_CSIPHY1 291
+#define PCLK_USBPHY_HOST 292
+#define PCLK_USBPHY_OTG 293
+#define PCLK_OTP 294
+
+#define CLK_NR_CLKS (PCLK_OTP + 1)
+
+/* pmu soft-reset indices */
+
+/* pmu_cru_softrst_con0 */
+#define SRST_PDPMU_NIU_P 0
+#define SRST_PMU_SGRF_P 1
+#define SRST_PMU_SGRF_REMAP_P 2
+#define SRST_I2C0_P 3
+#define SRST_I2C0 4
+#define SRST_I2C2_P 7
+#define SRST_I2C2 8
+#define SRST_UART1_P 9
+#define SRST_UART1 10
+#define SRST_PWM0_P 11
+#define SRST_PWM0 12
+#define SRST_PWM1_P 13
+#define SRST_PWM1 14
+#define SRST_DDR_FAIL_SAFE 15
+
+/* pmu_cru_softrst_con1 */
+#define SRST_GPIO0_P 17
+#define SRST_GPIO0_DB 18
+#define SRST_SPI0_P 19
+#define SRST_SPI0 20
+#define SRST_PMUGRF_P 21
+#define SRST_CHIPVEROTP_P 22
+#define SRST_PMUPVTM 24
+#define SRST_PMUPVTM_P 25
+#define SRST_PMUCRU_P 30
+
+/* soft-reset indices */
+
+/* cru_softrst_con0 */
+#define SRST_CORE0_PO 0
+#define SRST_CORE1_PO 1
+#define SRST_CORE2_PO 2
+#define SRST_CORE3_PO 3
+#define SRST_CORE0 4
+#define SRST_CORE1 5
+#define SRST_CORE2 6
+#define SRST_CORE3 7
+#define SRST_CORE0_DBG 8
+#define SRST_CORE1_DBG 9
+#define SRST_CORE2_DBG 10
+#define SRST_CORE3_DBG 11
+#define SRST_NL2 12
+#define SRST_CORE_NIU_A 13
+#define SRST_DBG_DAPLITE_P 14
+#define SRST_DAPLITE_P 15
+
+/* cru_softrst_con1 */
+#define SRST_PDBUS_NIU1_A 16
+#define SRST_PDBUS_NIU1_H 17
+#define SRST_PDBUS_NIU1_P 18
+#define SRST_PDBUS_NIU2_A 19
+#define SRST_PDBUS_NIU2_H 20
+#define SRST_PDBUS_NIU3_A 21
+#define SRST_PDBUS_NIU3_H 22
+#define SRST_PDBUS_HOLD_NIU1_A 23
+#define SRST_DBG_NIU_P 24
+#define SRST_PDCORE_NIIU_H 25
+#define SRST_MUC_NIU 26
+#define SRST_DCF_A 29
+#define SRST_DCF_P 30
+#define SRST_SYSTEM_SRAM_A 31
+
+/* cru_softrst_con2 */
+#define SRST_I2C1_P 32
+#define SRST_I2C1 33
+#define SRST_I2C3_P 34
+#define SRST_I2C3 35
+#define SRST_I2C4_P 36
+#define SRST_I2C4 37
+#define SRST_I2C5_P 38
+#define SRST_I2C5 39
+#define SRST_SPI1_P 40
+#define SRST_SPI1 41
+#define SRST_MCU_CORE 42
+#define SRST_PWM2_P 44
+#define SRST_PWM2 45
+#define SRST_SPINLOCK_A 46
+
+/* cru_softrst_con3 */
+#define SRST_UART0_P 48
+#define SRST_UART0 49
+#define SRST_UART2_P 50
+#define SRST_UART2 51
+#define SRST_UART3_P 52
+#define SRST_UART3 53
+#define SRST_UART4_P 54
+#define SRST_UART4 55
+#define SRST_UART5_P 56
+#define SRST_UART5 57
+#define SRST_WDT_P 58
+#define SRST_SARADC_P 59
+#define SRST_GRF_P 61
+#define SRST_TIMER_P 62
+#define SRST_MAILBOX_P 63
+
+/* cru_softrst_con4 */
+#define SRST_TIMER0 64
+#define SRST_TIMER1 65
+#define SRST_TIMER2 66
+#define SRST_TIMER3 67
+#define SRST_TIMER4 68
+#define SRST_TIMER5 69
+#define SRST_INTMUX_P 70
+#define SRST_GPIO1_P 72
+#define SRST_GPIO1_DB 73
+#define SRST_GPIO2_P 74
+#define SRST_GPIO2_DB 75
+#define SRST_GPIO3_P 76
+#define SRST_GPIO3_DB 77
+#define SRST_GPIO4_P 78
+#define SRST_GPIO4_DB 79
+
+/* cru_softrst_con5 */
+#define SRST_CAN_P 80
+#define SRST_CAN 81
+#define SRST_DECOM_A 85
+#define SRST_DECOM_P 86
+#define SRST_DECOM_D 87
+#define SRST_PDCRYPTO_NIU_A 88
+#define SRST_PDCRYPTO_NIU_H 89
+#define SRST_CRYPTO_A 90
+#define SRST_CRYPTO_H 91
+#define SRST_CRYPTO_CORE 92
+#define SRST_CRYPTO_PKA 93
+#define SRST_SGRF_P 95
+
+/* cru_softrst_con6 */
+#define SRST_PDAUDIO_NIU_H 96
+#define SRST_PDAUDIO_NIU_P 97
+#define SRST_I2S0_H 98
+#define SRST_I2S0_TX_M 99
+#define SRST_I2S0_RX_M 100
+#define SRST_I2S1_H 101
+#define SRST_I2S1_M 102
+#define SRST_I2S2_H 103
+#define SRST_I2S2_M 104
+#define SRST_PDM_H 105
+#define SRST_PDM_M 106
+#define SRST_AUDPWM_H 107
+#define SRST_AUDPWM 108
+#define SRST_ACDCDIG_P 109
+#define SRST_ACDCDIG 110
+
+/* cru_softrst_con7 */
+#define SRST_PDVEPU_NIU_A 112
+#define SRST_PDVEPU_NIU_H 113
+#define SRST_VENC_A 114
+#define SRST_VENC_H 115
+#define SRST_VENC_CORE 116
+#define SRST_PDVDEC_NIU_A 117
+#define SRST_PDVDEC_NIU_H 118
+#define SRST_VDEC_A 119
+#define SRST_VDEC_H 120
+#define SRST_VDEC_CORE 121
+#define SRST_VDEC_CA 122
+#define SRST_VDEC_HEVC_CA 123
+#define SRST_PDJPEG_NIU_A 124
+#define SRST_PDJPEG_NIU_H 125
+#define SRST_JPEG_A 126
+#define SRST_JPEG_H 127
+
+/* cru_softrst_con8 */
+#define SRST_PDVO_NIU_A 128
+#define SRST_PDVO_NIU_H 129
+#define SRST_PDVO_NIU_P 130
+#define SRST_RGA_A 131
+#define SRST_RGA_H 132
+#define SRST_RGA_CORE 133
+#define SRST_VOP_A 134
+#define SRST_VOP_H 135
+#define SRST_VOP_D 136
+#define SRST_TXBYTEHS_DSIHOST 137
+#define SRST_DSIHOST_P 138
+#define SRST_IEP_A 139
+#define SRST_IEP_H 140
+#define SRST_IEP_CORE 141
+#define SRST_ISP_RX_P 142
+
+/* cru_softrst_con9 */
+#define SRST_PDVI_NIU_A 144
+#define SRST_PDVI_NIU_H 145
+#define SRST_PDVI_NIU_P 146
+#define SRST_ISP 147
+#define SRST_CIF_A 148
+#define SRST_CIF_H 149
+#define SRST_CIF_D 150
+#define SRST_CIF_P 151
+#define SRST_CIF_I 152
+#define SRST_CIF_RX_P 153
+#define SRST_PDISPP_NIU_A 154
+#define SRST_PDISPP_NIU_H 155
+#define SRST_ISPP_A 156
+#define SRST_ISPP_H 157
+#define SRST_ISPP 158
+#define SRST_CSIHOST_P 159
+
+/* cru_softrst_con10 */
+#define SRST_PDPHPMID_NIU_A 160
+#define SRST_PDPHPMID_NIU_H 161
+#define SRST_PDNVM_NIU_H 163
+#define SRST_SDMMC_H 164
+#define SRST_SDIO_H 165
+#define SRST_EMMC_H 166
+#define SRST_SFC_H 167
+#define SRST_SFCXIP_H 168
+#define SRST_SFC 169
+#define SRST_NANDC_H 170
+#define SRST_NANDC 171
+#define SRST_PDSDMMC_H 173
+#define SRST_PDSDIO_H 174
+
+/* cru_softrst_con11 */
+#define SRST_PDUSB_NIU_A 176
+#define SRST_PDUSB_NIU_H 177
+#define SRST_USBHOST_H 178
+#define SRST_USBHOST_ARB_H 179
+#define SRST_USBHOST_UTMI 180
+#define SRST_USBOTG_A 181
+#define SRST_USBPHY_OTG_P 182
+#define SRST_USBPHY_HOST_P 183
+#define SRST_USBPHYPOR_OTG 184
+#define SRST_USBPHYPOR_HOST 185
+#define SRST_PDGMAC_NIU_A 188
+#define SRST_PDGMAC_NIU_P 189
+#define SRST_GMAC_A 190
+
+/* cru_softrst_con12 */
+#define SRST_DDR_DFICTL_P 193
+#define SRST_DDR_MON_P 194
+#define SRST_DDR_STANDBY_P 195
+#define SRST_DDR_GRF_P 196
+#define SRST_DDR_MSCH_P 197
+#define SRST_DDR_SPLIT_A 198
+#define SRST_DDR_MSCH 199
+#define SRST_DDR_DFICTL 202
+#define SRST_DDR_STANDBY 203
+#define SRST_NPUMCU_NIU 205
+#define SRST_DDRPHY_P 206
+#define SRST_DDRPHY 207
+
+/* cru_softrst_con13 */
+#define SRST_PDNPU_NIU_A 208
+#define SRST_PDNPU_NIU_H 209
+#define SRST_PDNPU_NIU_P 210
+#define SRST_NPU_A 211
+#define SRST_NPU_H 212
+#define SRST_NPU 213
+#define SRST_NPUPVTM_P 214
+#define SRST_NPUPVTM 215
+#define SRST_NPU_TSADC_P 216
+#define SRST_NPU_TSADC 217
+#define SRST_NPU_TSADCPHY 218
+#define SRST_CIFLITE_A 220
+#define SRST_CIFLITE_H 221
+#define SRST_CIFLITE_D 222
+#define SRST_CIFLITE_RX_P 223
+
+/* cru_softrst_con14 */
+#define SRST_TOPNIU_P 224
+#define SRST_TOPCRU_P 225
+#define SRST_TOPGRF_P 226
+#define SRST_CPUEMADET_P 227
+#define SRST_CSIPHY0_P 228
+#define SRST_CSIPHY1_P 229
+#define SRST_DSIPHY_P 230
+#define SRST_CPU_TSADC_P 232
+#define SRST_CPU_TSADC 233
+#define SRST_CPU_TSADCPHY 234
+#define SRST_CPUPVTM_P 235
+#define SRST_CPUPVTM 236
+
+#endif
diff --git a/include/dt-bindings/clock/rockchip,rv1126b-cru.h b/include/dt-bindings/clock/rockchip,rv1126b-cru.h
new file mode 100644
index 000000000000..721d50a1419f
--- /dev/null
+++ b/include/dt-bindings/clock/rockchip,rv1126b-cru.h
@@ -0,0 +1,392 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2025 Rockchip Electronics Co., Ltd.
+ * Author: Elaine Zhang <zhangqing@rock-chips.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RV1126B_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RV1126B_H
+
+/* pll clocks */
+#define PLL_GPLL 0
+#define PLL_CPLL 1
+#define PLL_AUPLL 2
+#define ARMCLK 3
+#define SCLK_DDR 4
+
+/* clk (clocks) */
+#define CLK_CPLL_DIV20 5
+#define CLK_CPLL_DIV10 6
+#define CLK_CPLL_DIV8 7
+#define CLK_GPLL_DIV8 8
+#define CLK_GPLL_DIV6 9
+#define CLK_GPLL_DIV4 10
+#define CLK_CPLL_DIV3 11
+#define CLK_GPLL_DIV3 12
+#define CLK_CPLL_DIV2 13
+#define CLK_GPLL_DIV2 14
+#define CLK_CM_FRAC0 15
+#define CLK_CM_FRAC1 16
+#define CLK_CM_FRAC2 17
+#define CLK_UART_FRAC0 18
+#define CLK_UART_FRAC1 19
+#define CLK_AUDIO_FRAC0 20
+#define CLK_AUDIO_FRAC1 21
+#define CLK_AUDIO_INT0 22
+#define CLK_AUDIO_INT1 23
+#define SCLK_UART0_SRC 24
+#define SCLK_UART1 25
+#define SCLK_UART2 26
+#define SCLK_UART3 27
+#define SCLK_UART4 28
+#define SCLK_UART5 29
+#define SCLK_UART6 30
+#define SCLK_UART7 31
+#define MCLK_SAI0 32
+#define MCLK_SAI1 33
+#define MCLK_SAI2 34
+#define MCLK_PDM 35
+#define CLKOUT_PDM 36
+#define MCLK_ASRC0 37
+#define MCLK_ASRC1 38
+#define MCLK_ASRC2 39
+#define MCLK_ASRC3 40
+#define CLK_ASRC0 41
+#define CLK_ASRC1 42
+#define CLK_CORE_PLL 43
+#define CLK_NPU_PLL 44
+#define CLK_VEPU_PLL 45
+#define CLK_ISP_PLL 46
+#define CLK_AISP_PLL 47
+#define CLK_SARADC0_SRC 48
+#define CLK_SARADC1_SRC 49
+#define CLK_SARADC2_SRC 50
+#define HCLK_NPU_ROOT 51
+#define PCLK_NPU_ROOT 52
+#define ACLK_VEPU_ROOT 53
+#define HCLK_VEPU_ROOT 54
+#define PCLK_VEPU_ROOT 55
+#define CLK_CORE_RGA_SRC 56
+#define ACLK_GMAC_ROOT 57
+#define ACLK_VI_ROOT 58
+#define HCLK_VI_ROOT 59
+#define PCLK_VI_ROOT 60
+#define DCLK_VICAP_ROOT 61
+#define CLK_SYS_DSMC_ROOT 62
+#define ACLK_VDO_ROOT 63
+#define ACLK_RKVDEC_ROOT 64
+#define HCLK_VDO_ROOT 65
+#define PCLK_VDO_ROOT 66
+#define DCLK_OOC_SRC 67
+#define DCLK_VOP 68
+#define DCLK_DECOM_SRC 69
+#define PCLK_DDR_ROOT 70
+#define ACLK_SYSMEM_SRC 71
+#define ACLK_TOP_ROOT 72
+#define ACLK_BUS_ROOT 73
+#define HCLK_BUS_ROOT 74
+#define PCLK_BUS_ROOT 75
+#define CCLK_SDMMC0 76
+#define CCLK_SDMMC1 77
+#define CCLK_EMMC 78
+#define SCLK_2X_FSPI0 79
+#define CLK_GMAC_PTP_REF_SRC 80
+#define CLK_GMAC_125M 81
+#define CLK_TIMER_ROOT 82
+#define TCLK_WDT_NS_SRC 83
+#define TCLK_WDT_S_SRC 84
+#define TCLK_WDT_HPMCU 85
+#define CLK_CAN0 86
+#define CLK_CAN1 87
+#define PCLK_PERI_ROOT 88
+#define ACLK_PERI_ROOT 89
+#define CLK_I2C_BUS_SRC 90
+#define CLK_SPI0 91
+#define CLK_SPI1 92
+#define BUSCLK_PMU_SRC 93
+#define CLK_PWM0 94
+#define CLK_PWM2 95
+#define CLK_PWM3 96
+#define CLK_PKA_RKCE_SRC 97
+#define ACLK_RKCE_SRC 98
+#define ACLK_VCP_ROOT 99
+#define HCLK_VCP_ROOT 100
+#define PCLK_VCP_ROOT 101
+#define CLK_CORE_FEC_SRC 102
+#define CLK_CORE_AVSP_SRC 103
+#define CLK_50M_GMAC_IOBUF_VI 104
+#define PCLK_TOP_ROOT 105
+#define CLK_MIPI0_OUT2IO 106
+#define CLK_MIPI1_OUT2IO 107
+#define CLK_MIPI2_OUT2IO 108
+#define CLK_MIPI3_OUT2IO 109
+#define CLK_CIF_OUT2IO 110
+#define CLK_MAC_OUT2IO 111
+#define MCLK_SAI0_OUT2IO 112
+#define MCLK_SAI1_OUT2IO 113
+#define MCLK_SAI2_OUT2IO 114
+#define CLK_CM_FRAC0_SRC 115
+#define CLK_CM_FRAC1_SRC 116
+#define CLK_CM_FRAC2_SRC 117
+#define CLK_UART_FRAC0_SRC 118
+#define CLK_UART_FRAC1_SRC 119
+#define CLK_AUDIO_FRAC0_SRC 120
+#define CLK_AUDIO_FRAC1_SRC 121
+#define ACLK_NPU_ROOT 122
+#define HCLK_RKNN 123
+#define ACLK_RKNN 124
+#define PCLK_GPIO3 125
+#define DBCLK_GPIO3 126
+#define PCLK_IOC_VCCIO3 127
+#define PCLK_SARADC0 128
+#define CLK_SARADC0 129
+#define HCLK_SDMMC1 130
+#define HCLK_VEPU 131
+#define ACLK_VEPU 132
+#define CLK_CORE_VEPU 133
+#define HCLK_FEC 134
+#define ACLK_FEC 135
+#define CLK_CORE_FEC 136
+#define HCLK_AVSP 137
+#define ACLK_AVSP 138
+#define BUSCLK_PMU1_ROOT 139
+#define HCLK_AISP 140
+#define ACLK_AISP 141
+#define CLK_CORE_AISP 142
+#define CLK_CORE_ISP_ROOT 143
+#define PCLK_DSMC 144
+#define ACLK_DSMC 145
+#define HCLK_CAN0 146
+#define HCLK_CAN1 147
+#define PCLK_GPIO2 148
+#define DBCLK_GPIO2 149
+#define PCLK_GPIO4 150
+#define DBCLK_GPIO4 151
+#define PCLK_GPIO5 152
+#define DBCLK_GPIO5 153
+#define PCLK_GPIO6 154
+#define DBCLK_GPIO6 155
+#define PCLK_GPIO7 156
+#define DBCLK_GPIO7 157
+#define PCLK_IOC_VCCIO2 158
+#define PCLK_IOC_VCCIO4 159
+#define PCLK_IOC_VCCIO5 160
+#define PCLK_IOC_VCCIO6 161
+#define PCLK_IOC_VCCIO7 162
+#define HCLK_ISP 163
+#define ACLK_ISP 164
+#define CLK_CORE_ISP 165
+#define HCLK_VICAP 166
+#define ACLK_VICAP 167
+#define DCLK_VICAP 168
+#define ISP0CLK_VICAP 169
+#define HCLK_VPSS 170
+#define ACLK_VPSS 171
+#define CLK_CORE_VPSS 172
+#define PCLK_CSI2HOST0 173
+#define DCLK_CSI2HOST0 174
+#define PCLK_CSI2HOST1 175
+#define DCLK_CSI2HOST1 176
+#define PCLK_CSI2HOST2 177
+#define DCLK_CSI2HOST2 178
+#define PCLK_CSI2HOST3 179
+#define DCLK_CSI2HOST3 180
+#define HCLK_SDMMC0 181
+#define ACLK_GMAC 182
+#define PCLK_GMAC 183
+#define CLK_GMAC_PTP_REF 184
+#define PCLK_CSIPHY0 185
+#define PCLK_CSIPHY1 186
+#define PCLK_MACPHY 187
+#define PCLK_SARADC1 188
+#define CLK_SARADC1 189
+#define PCLK_SARADC2 190
+#define CLK_SARADC2 191
+#define ACLK_RKVDEC 192
+#define HCLK_RKVDEC 193
+#define CLK_HEVC_CA_RKVDEC 194
+#define ACLK_VOP 195
+#define HCLK_VOP 196
+#define HCLK_RKJPEG 197
+#define ACLK_RKJPEG 198
+#define ACLK_RKMMU_DECOM 199
+#define HCLK_RKMMU_DECOM 200
+#define DCLK_DECOM 201
+#define ACLK_DECOM 202
+#define PCLK_DECOM 203
+#define PCLK_MIPI_DSI 204
+#define PCLK_DSIPHY 205
+#define ACLK_OOC 206
+#define ACLK_SYSMEM 207
+#define PCLK_DDRC 208
+#define PCLK_DDRMON 209
+#define CLK_TIMER_DDRMON 210
+#define PCLK_DFICTRL 211
+#define PCLK_DDRPHY 212
+#define PCLK_DMA2DDR 213
+#define CLK_RCOSC_SRC 214
+#define BUSCLK_PMU_MUX 215
+#define BUSCLK_PMU_ROOT 216
+#define PCLK_PMU 217
+#define CLK_XIN_RC_DIV 218
+#define CLK_32K 219
+#define PCLK_PMU_GPIO0 220
+#define DBCLK_PMU_GPIO0 221
+#define PCLK_PMU_HP_TIMER 222
+#define CLK_PMU_HP_TIMER 223
+#define CLK_PMU_32K_HP_TIMER 224
+#define PCLK_PWM1 225
+#define CLK_PWM1 226
+#define CLK_OSC_PWM1 227
+#define CLK_RC_PWM1 228
+#define CLK_FREQ_PWM1 229
+#define CLK_COUNTER_PWM1 230
+#define PCLK_I2C2 231
+#define CLK_I2C2 232
+#define PCLK_UART0 233
+#define SCLK_UART0 234
+#define PCLK_RCOSC_CTRL 235
+#define CLK_OSC_RCOSC_CTRL 236
+#define CLK_REF_RCOSC_CTRL 237
+#define PCLK_IOC_PMUIO0 238
+#define CLK_REFOUT 239
+#define CLK_PREROLL 240
+#define CLK_PREROLL_32K 241
+#define HCLK_PMU_SRAM 242
+#define PCLK_WDT_LPMCU 243
+#define TCLK_WDT_LPMCU 244
+#define CLK_LPMCU 245
+#define CLK_LPMCU_RTC 246
+#define PCLK_LPMCU_MAILBOX 247
+#define HCLK_OOC 248
+#define PCLK_SPI2AHB 249
+#define HCLK_SPI2AHB 250
+#define HCLK_FSPI1 251
+#define HCLK_XIP_FSPI1 252
+#define SCLK_1X_FSPI1 253
+#define PCLK_IOC_PMUIO1 254
+#define PCLK_AUDIO_ADC_PMU 255
+#define MCLK_AUDIO_ADC_PMU 256
+#define MCLK_AUDIO_ADC_DIV4_PMU 257
+#define MCLK_LPSAI 258
+#define ACLK_GIC400 259
+#define PCLK_WDT_NS 260
+#define TCLK_WDT_NS 261
+#define PCLK_WDT_HPMCU 262
+#define HCLK_CACHE 263
+#define PCLK_HPMCU_MAILBOX 264
+#define PCLK_HPMCU_INTMUX 265
+#define CLK_HPMCU 266
+#define CLK_HPMCU_RTC 267
+#define PCLK_RKDMA 268
+#define ACLK_RKDMA 269
+#define PCLK_DCF 270
+#define ACLK_DCF 271
+#define HCLK_RGA 272
+#define ACLK_RGA 273
+#define CLK_CORE_RGA 274
+#define PCLK_TIMER 275
+#define CLK_TIMER0 276
+#define CLK_TIMER1 277
+#define CLK_TIMER2 278
+#define CLK_TIMER3 279
+#define CLK_TIMER4 280
+#define CLK_TIMER5 281
+#define PCLK_I2C0 282
+#define CLK_I2C0 283
+#define PCLK_I2C1 284
+#define CLK_I2C1 285
+#define PCLK_I2C3 286
+#define CLK_I2C3 287
+#define PCLK_I2C4 288
+#define CLK_I2C4 289
+#define PCLK_I2C5 290
+#define CLK_I2C5 291
+#define PCLK_SPI0 292
+#define PCLK_SPI1 293
+#define PCLK_PWM0 294
+#define CLK_OSC_PWM0 295
+#define CLK_RC_PWM0 296
+#define PCLK_PWM2 297
+#define CLK_OSC_PWM2 298
+#define CLK_RC_PWM2 299
+#define PCLK_PWM3 300
+#define CLK_OSC_PWM3 301
+#define CLK_RC_PWM3 302
+#define PCLK_UART1 303
+#define PCLK_UART2 304
+#define PCLK_UART3 305
+#define PCLK_UART4 306
+#define PCLK_UART5 307
+#define PCLK_UART6 308
+#define PCLK_UART7 309
+#define PCLK_TSADC 310
+#define CLK_TSADC 311
+#define HCLK_SAI0 312
+#define HCLK_SAI1 313
+#define HCLK_SAI2 314
+#define HCLK_RKDSM 315
+#define MCLK_RKDSM 316
+#define HCLK_PDM 317
+#define HCLK_ASRC0 318
+#define HCLK_ASRC1 319
+#define PCLK_AUDIO_ADC_BUS 320
+#define MCLK_AUDIO_ADC_BUS 321
+#define MCLK_AUDIO_ADC_DIV4_BUS 322
+#define PCLK_RKCE 323
+#define HCLK_NS_RKCE 324
+#define PCLK_OTPC_NS 325
+#define CLK_SBPI_OTPC_NS 326
+#define CLK_USER_OTPC_NS 327
+#define CLK_OTPC_ARB 328
+#define PCLK_OTP_MASK 329
+#define CLK_TSADC_PHYCTRL 330
+#define LRCK_SRC_ASRC0 331
+#define LRCK_DST_ASRC0 332
+#define LRCK_SRC_ASRC1 333
+#define LRCK_DST_ASRC1 334
+#define PCLK_KEY_READER 335
+#define ACLK_NSRKCE 336
+#define CLK_PKA_NSRKCE 337
+#define PCLK_RTC_ROOT 338
+#define PCLK_GPIO1 339
+#define DBCLK_GPIO1 340
+#define PCLK_IOC_VCCIO1 341
+#define ACLK_USB3OTG 342
+#define CLK_REF_USB3OTG 343
+#define CLK_SUSPEND_USB3OTG 344
+#define HCLK_USB2HOST 345
+#define HCLK_ARB_USB2HOST 346
+#define PCLK_RTC_TEST 347
+#define HCLK_EMMC 348
+#define HCLK_FSPI0 349
+#define HCLK_XIP_FSPI0 350
+#define PCLK_PIPEPHY 351
+#define PCLK_USB2PHY 352
+#define CLK_REF_PIPEPHY_CPLL_SRC 353
+#define CLK_REF_PIPEPHY 354
+#define HCLK_VPSL 355
+#define ACLK_VPSL 356
+#define CLK_CORE_VPSL 357
+#define CLK_MACPHY 358
+#define HCLK_RKRNG_NS 359
+#define HCLK_RKRNG_S_NS 360
+#define CLK_AISP_PLL_SRC 361
+
+/* secure clks */
+#define CLK_USER_OTPC_S 362
+#define CLK_SBPI_OTPC_S 363
+#define PCLK_OTPC_S 364
+#define PCLK_KEY_READER_S 365
+#define HCLK_KL_RKCE_S 366
+#define HCLK_RKCE_S 367
+#define PCLK_WDT_S 368
+#define TCLK_WDT_S 369
+#define CLK_STIMER0 370
+#define CLK_STIMER1 371
+#define PLK_STIMER 372
+#define HCLK_RKRNG_S 373
+#define CLK_PKA_RKCE_S 374
+#define ACLK_RKCE_S 375
+
+#endif
diff --git a/include/dt-bindings/clock/s3c2410.h b/include/dt-bindings/clock/s3c2410.h
deleted file mode 100644
index 0fb65c3f2f59..000000000000
--- a/include/dt-bindings/clock/s3c2410.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
- *
- * Device Tree binding constants clock controllers of Samsung S3C2410 and later.
- */
-
-#ifndef _DT_BINDINGS_CLOCK_SAMSUNG_S3C2410_CLOCK_H
-#define _DT_BINDINGS_CLOCK_SAMSUNG_S3C2410_CLOCK_H
-
-/*
- * Let each exported clock get a unique index, which is used on DT-enabled
- * platforms to lookup the clock from a clock specifier. These indices are
- * therefore considered an ABI and so must not be changed. This implies
- * that new clocks should be added either in free spaces between clock groups
- * or at the end.
- */
-
-/* Core clocks. */
-
-/* id 1 is reserved */
-#define MPLL 2
-#define UPLL 3
-#define FCLK 4
-#define HCLK 5
-#define PCLK 6
-#define UCLK 7
-#define ARMCLK 8
-
-/* pclk-gates */
-#define PCLK_UART0 16
-#define PCLK_UART1 17
-#define PCLK_UART2 18
-#define PCLK_I2C 19
-#define PCLK_SDI 20
-#define PCLK_SPI 21
-#define PCLK_ADC 22
-#define PCLK_AC97 23
-#define PCLK_I2S 24
-#define PCLK_PWM 25
-#define PCLK_RTC 26
-#define PCLK_GPIO 27
-
-
-/* hclk-gates */
-#define HCLK_LCD 32
-#define HCLK_USBH 33
-#define HCLK_USBD 34
-#define HCLK_NAND 35
-#define HCLK_CAM 36
-
-
-#define CAMIF 40
-
-
-/* Total number of clocks. */
-#define NR_CLKS (CAMIF + 1)
-
-#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_S3C2443_CLOCK_H */
diff --git a/include/dt-bindings/clock/s3c2412.h b/include/dt-bindings/clock/s3c2412.h
deleted file mode 100644
index b4656156cc0f..000000000000
--- a/include/dt-bindings/clock/s3c2412.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
- *
- * Device Tree binding constants clock controllers of Samsung S3C2412.
- */
-
-#ifndef _DT_BINDINGS_CLOCK_SAMSUNG_S3C2412_CLOCK_H
-#define _DT_BINDINGS_CLOCK_SAMSUNG_S3C2412_CLOCK_H
-
-/*
- * Let each exported clock get a unique index, which is used on DT-enabled
- * platforms to lookup the clock from a clock specifier. These indices are
- * therefore considered an ABI and so must not be changed. This implies
- * that new clocks should be added either in free spaces between clock groups
- * or at the end.
- */
-
-/* Core clocks. */
-
-/* id 1 is reserved */
-#define MPLL 2
-#define UPLL 3
-#define MDIVCLK 4
-#define MSYSCLK 5
-#define USYSCLK 6
-#define HCLK 7
-#define PCLK 8
-#define ARMDIV 9
-#define ARMCLK 10
-
-
-/* Special clocks */
-#define SCLK_CAM 16
-#define SCLK_UART 17
-#define SCLK_I2S 18
-#define SCLK_USBD 19
-#define SCLK_USBH 20
-
-/* pclk-gates */
-#define PCLK_WDT 32
-#define PCLK_SPI 33
-#define PCLK_I2S 34
-#define PCLK_I2C 35
-#define PCLK_ADC 36
-#define PCLK_RTC 37
-#define PCLK_GPIO 38
-#define PCLK_UART2 39
-#define PCLK_UART1 40
-#define PCLK_UART0 41
-#define PCLK_SDI 42
-#define PCLK_PWM 43
-#define PCLK_USBD 44
-
-/* hclk-gates */
-#define HCLK_HALF 48
-#define HCLK_X2 49
-#define HCLK_SDRAM 50
-#define HCLK_USBH 51
-#define HCLK_LCD 52
-#define HCLK_NAND 53
-#define HCLK_DMA3 54
-#define HCLK_DMA2 55
-#define HCLK_DMA1 56
-#define HCLK_DMA0 57
-
-/* Total number of clocks. */
-#define NR_CLKS (HCLK_DMA0 + 1)
-
-#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_S3C2412_CLOCK_H */
diff --git a/include/dt-bindings/clock/s3c2443.h b/include/dt-bindings/clock/s3c2443.h
deleted file mode 100644
index a9d2f105d536..000000000000
--- a/include/dt-bindings/clock/s3c2443.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
- *
- * Device Tree binding constants clock controllers of Samsung S3C2443 and later.
- */
-
-#ifndef _DT_BINDINGS_CLOCK_SAMSUNG_S3C2443_CLOCK_H
-#define _DT_BINDINGS_CLOCK_SAMSUNG_S3C2443_CLOCK_H
-
-/*
- * Let each exported clock get a unique index, which is used on DT-enabled
- * platforms to lookup the clock from a clock specifier. These indices are
- * therefore considered an ABI and so must not be changed. This implies
- * that new clocks should be added either in free spaces between clock groups
- * or at the end.
- */
-
-/* Core clocks. */
-#define MSYSCLK 1
-#define ESYSCLK 2
-#define ARMDIV 3
-#define ARMCLK 4
-#define HCLK 5
-#define PCLK 6
-#define MPLL 7
-#define EPLL 8
-
-/* Special clocks */
-#define SCLK_HSSPI0 16
-#define SCLK_FIMD 17
-#define SCLK_I2S0 18
-#define SCLK_I2S1 19
-#define SCLK_HSMMC1 20
-#define SCLK_HSMMC_EXT 21
-#define SCLK_CAM 22
-#define SCLK_UART 23
-#define SCLK_USBH 24
-
-/* Muxes */
-#define MUX_HSSPI0 32
-#define MUX_HSSPI1 33
-#define MUX_HSMMC0 34
-#define MUX_HSMMC1 35
-
-/* hclk-gates */
-#define HCLK_DMA0 48
-#define HCLK_DMA1 49
-#define HCLK_DMA2 50
-#define HCLK_DMA3 51
-#define HCLK_DMA4 52
-#define HCLK_DMA5 53
-#define HCLK_DMA6 54
-#define HCLK_DMA7 55
-#define HCLK_CAM 56
-#define HCLK_LCD 57
-#define HCLK_USBH 58
-#define HCLK_USBD 59
-#define HCLK_IROM 60
-#define HCLK_HSMMC0 61
-#define HCLK_HSMMC1 62
-#define HCLK_CFC 63
-#define HCLK_SSMC 64
-#define HCLK_DRAM 65
-#define HCLK_2D 66
-
-/* pclk-gates */
-#define PCLK_UART0 72
-#define PCLK_UART1 73
-#define PCLK_UART2 74
-#define PCLK_UART3 75
-#define PCLK_I2C0 76
-#define PCLK_SDI 77
-#define PCLK_SPI0 78
-#define PCLK_ADC 79
-#define PCLK_AC97 80
-#define PCLK_I2S0 81
-#define PCLK_PWM 82
-#define PCLK_WDT 83
-#define PCLK_RTC 84
-#define PCLK_GPIO 85
-#define PCLK_SPI1 86
-#define PCLK_CHIPID 87
-#define PCLK_I2C1 88
-#define PCLK_I2S1 89
-#define PCLK_PCM 90
-
-/* Total number of clocks. */
-#define NR_CLKS (PCLK_PCM + 1)
-
-#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_S3C2443_CLOCK_H */
diff --git a/include/dt-bindings/clock/samsung,exynos2200-cmu.h b/include/dt-bindings/clock/samsung,exynos2200-cmu.h
new file mode 100644
index 000000000000..310552be0c8c
--- /dev/null
+++ b/include/dt-bindings/clock/samsung,exynos2200-cmu.h
@@ -0,0 +1,431 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2025 Ivaylo Ivanov <ivo.ivanov.ivanov1@gmail.com>
+ * Author: Ivaylo Ivanov <ivo.ivanov.ivanov1@gmail.com>
+ *
+ * Device Tree binding constants for Exynos2200 clock controller.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_EXYNOS2200_H
+#define _DT_BINDINGS_CLOCK_EXYNOS2200_H
+
+/* CMU_TOP */
+#define CLK_FOUT_SHARED0_PLL 1
+#define CLK_FOUT_SHARED1_PLL 2
+#define CLK_FOUT_SHARED2_PLL 3
+#define CLK_FOUT_SHARED3_PLL 4
+#define CLK_FOUT_SHARED4_PLL 5
+#define CLK_FOUT_MMC_PLL 6
+#define CLK_FOUT_SHARED_MIF_PLL 7
+
+#define CLK_MOUT_CMU_CP_MPLL_CLK_D2_USER 8
+#define CLK_MOUT_CMU_CP_MPLL_CLK_USER 9
+#define CLK_MOUT_CMU_AUD_AUDIF0 10
+#define CLK_MOUT_CMU_AUD_AUDIF1 11
+#define CLK_MOUT_CMU_AUD_CPU 12
+#define CLK_MOUT_CMU_CPUCL0_DBG_NOC 13
+#define CLK_MOUT_CMU_CPUCL0_SWITCH 14
+#define CLK_MOUT_CMU_CPUCL1_SWITCH 15
+#define CLK_MOUT_CMU_CPUCL2_SWITCH 16
+#define CLK_MOUT_CMU_DNC_NOC 17
+#define CLK_MOUT_CMU_DPUB_NOC 18
+#define CLK_MOUT_CMU_DPUF_NOC 19
+#define CLK_MOUT_CMU_DSP_NOC 20
+#define CLK_MOUT_CMU_DSU_SWITCH 21
+#define CLK_MOUT_CMU_G3D_SWITCH 22
+#define CLK_MOUT_CMU_GNPU_NOC 23
+#define CLK_MOUT_CMU_UFS_MMC_CARD 24
+#define CLK_MOUT_CMU_M2M_NOC 25
+#define CLK_MOUT_CMU_NOCL0_NOC 26
+#define CLK_MOUT_CMU_NOCL1A_NOC 27
+#define CLK_MOUT_CMU_NOCL1B_NOC0 28
+#define CLK_MOUT_CMU_NOCL1C_NOC 29
+#define CLK_MOUT_CMU_SDMA_NOC 30
+#define CLK_MOUT_CMU_CP_HISPEEDY_CLK 31
+#define CLK_MOUT_CMU_CP_SHARED0_CLK 32
+#define CLK_MOUT_CMU_CP_SHARED2_CLK 33
+#define CLK_MOUT_CMU_MUX_ALIVE_NOC 34
+#define CLK_MOUT_CMU_MUX_AUD_AUDIF0 35
+#define CLK_MOUT_CMU_MUX_AUD_AUDIF1 36
+#define CLK_MOUT_CMU_MUX_AUD_CPU 37
+#define CLK_MOUT_CMU_MUX_AUD_NOC 38
+#define CLK_MOUT_CMU_MUX_BRP_NOC 39
+#define CLK_MOUT_CMU_MUX_CIS_CLK0 40
+#define CLK_MOUT_CMU_MUX_CIS_CLK1 41
+#define CLK_MOUT_CMU_MUX_CIS_CLK2 42
+#define CLK_MOUT_CMU_MUX_CIS_CLK3 43
+#define CLK_MOUT_CMU_MUX_CIS_CLK4 44
+#define CLK_MOUT_CMU_MUX_CIS_CLK5 45
+#define CLK_MOUT_CMU_MUX_CIS_CLK6 46
+#define CLK_MOUT_CMU_MUX_CIS_CLK7 47
+#define CLK_MOUT_CMU_MUX_CMU_BOOST 48
+#define CLK_MOUT_CMU_MUX_CMU_BOOST_CAM 49
+#define CLK_MOUT_CMU_MUX_CMU_BOOST_CPU 50
+#define CLK_MOUT_CMU_MUX_CMU_BOOST_MIF 51
+#define CLK_MOUT_CMU_MUX_CPUCL0_DBG_NOC 52
+#define CLK_MOUT_CMU_MUX_CPUCL0_NOCP 53
+#define CLK_MOUT_CMU_MUX_CPUCL0_SWITCH 54
+#define CLK_MOUT_CMU_MUX_CPUCL1_SWITCH 55
+#define CLK_MOUT_CMU_MUX_CPUCL2_SWITCH 56
+#define CLK_MOUT_CMU_MUX_CSIS_DCPHY 57
+#define CLK_MOUT_CMU_MUX_CSIS_NOC 58
+#define CLK_MOUT_CMU_MUX_CSIS_OIS_MCU 59
+#define CLK_MOUT_CMU_MUX_CSTAT_NOC 60
+#define CLK_MOUT_CMU_MUX_DNC_NOC 61
+#define CLK_MOUT_CMU_MUX_DPUB 62
+#define CLK_MOUT_CMU_MUX_DPUB_ALT 63
+#define CLK_MOUT_CMU_MUX_DPUB_DSIM 64
+#define CLK_MOUT_CMU_MUX_DPUF 65
+#define CLK_MOUT_CMU_MUX_DPUF_ALT 66
+#define CLK_MOUT_CMU_MUX_DSP_NOC 67
+#define CLK_MOUT_CMU_MUX_DSU_SWITCH 68
+#define CLK_MOUT_CMU_MUX_G3D_NOCP 69
+#define CLK_MOUT_CMU_MUX_G3D_SWITCH 70
+#define CLK_MOUT_CMU_MUX_GNPU_NOC 71
+#define CLK_MOUT_CMU_MUX_HSI0_DPGTC 72
+#define CLK_MOUT_CMU_MUX_HSI0_DPOSC 73
+#define CLK_MOUT_CMU_MUX_HSI0_NOC 74
+#define CLK_MOUT_CMU_MUX_HSI0_USB32DRD 75
+#define CLK_MOUT_CMU_MUX_UFS_MMC_CARD 76
+#define CLK_MOUT_CMU_MUX_HSI1_NOC 77
+#define CLK_MOUT_CMU_MUX_HSI1_PCIE 78
+#define CLK_MOUT_CMU_MUX_UFS_UFS_EMBD 79
+#define CLK_MOUT_CMU_MUX_LME_LME 80
+#define CLK_MOUT_CMU_MUX_LME_NOC 81
+#define CLK_MOUT_CMU_MUX_M2M_NOC 82
+#define CLK_MOUT_CMU_MUX_MCSC_MCSC 83
+#define CLK_MOUT_CMU_MUX_MCSC_NOC 84
+#define CLK_MOUT_CMU_MUX_MFC0_MFC0 85
+#define CLK_MOUT_CMU_MUX_MFC0_WFD 86
+#define CLK_MOUT_CMU_MUX_MFC1_MFC1 87
+#define CLK_MOUT_CMU_MUX_MIF_NOCP 88
+#define CLK_MOUT_CMU_MUX_MIF_SWITCH 89
+#define CLK_MOUT_CMU_MUX_NOCL0_NOC 90
+#define CLK_MOUT_CMU_MUX_NOCL1A_NOC 91
+#define CLK_MOUT_CMU_MUX_NOCL1B_NOC0 92
+#define CLK_MOUT_CMU_MUX_NOCL1B_NOC1 93
+#define CLK_MOUT_CMU_MUX_NOCL1C_NOC 94
+#define CLK_MOUT_CMU_MUX_PERIC0_IP0 95
+#define CLK_MOUT_CMU_MUX_PERIC0_IP1 96
+#define CLK_MOUT_CMU_MUX_PERIC0_NOC 97
+#define CLK_MOUT_CMU_MUX_PERIC1_IP0 98
+#define CLK_MOUT_CMU_MUX_PERIC1_IP1 99
+#define CLK_MOUT_CMU_MUX_PERIC1_NOC 100
+#define CLK_MOUT_CMU_MUX_PERIC2_IP0 101
+#define CLK_MOUT_CMU_MUX_PERIC2_IP1 102
+#define CLK_MOUT_CMU_MUX_PERIC2_NOC 103
+#define CLK_MOUT_CMU_MUX_PERIS_GIC 104
+#define CLK_MOUT_CMU_MUX_PERIS_NOC 105
+#define CLK_MOUT_CMU_MUX_SDMA_NOC 106
+#define CLK_MOUT_CMU_MUX_SSP_NOC 107
+#define CLK_MOUT_CMU_MUX_VTS_DMIC 108
+#define CLK_MOUT_CMU_MUX_YUVP_NOC 109
+#define CLK_MOUT_CMU_MUX_CMU_CMUREF 110
+#define CLK_MOUT_CMU_MUX_CP_HISPEEDY_CLK 111
+#define CLK_MOUT_CMU_MUX_CP_SHARED0_CLK 112
+#define CLK_MOUT_CMU_MUX_CP_SHARED1_CLK 113
+#define CLK_MOUT_CMU_MUX_CP_SHARED2_CLK 114
+#define CLK_MOUT_CMU_M2M_FRC 115
+#define CLK_MOUT_CMU_MCSC_MCSC 116
+#define CLK_MOUT_CMU_MCSC_NOC 117
+#define CLK_MOUT_CMU_MUX_M2M_FRC 118
+#define CLK_MOUT_CMU_MUX_UFS_NOC 119
+
+#define CLK_DOUT_CMU_ALIVE_NOC 120
+#define CLK_DOUT_CMU_AUD_NOC 121
+#define CLK_DOUT_CMU_BRP_NOC 122
+#define CLK_DOUT_CMU_CMU_BOOST 123
+#define CLK_DOUT_CMU_CMU_BOOST_CAM 124
+#define CLK_DOUT_CMU_CMU_BOOST_CPU 125
+#define CLK_DOUT_CMU_CMU_BOOST_MIF 126
+#define CLK_DOUT_CMU_CPUCL0_NOCP 127
+#define CLK_DOUT_CMU_CSIS_DCPHY 128
+#define CLK_DOUT_CMU_CSIS_NOC 129
+#define CLK_DOUT_CMU_CSIS_OIS_MCU 130
+#define CLK_DOUT_CMU_CSTAT_NOC 131
+#define CLK_DOUT_CMU_DPUB_DSIM 132
+#define CLK_DOUT_CMU_LME_LME 133
+#define CLK_DOUT_CMU_G3D_NOCP 134
+#define CLK_DOUT_CMU_HSI0_DPGTC 135
+#define CLK_DOUT_CMU_HSI0_DPOSC 136
+#define CLK_DOUT_CMU_HSI0_NOC 137
+#define CLK_DOUT_CMU_HSI0_USB32DRD 138
+#define CLK_DOUT_CMU_HSI1_NOC 139
+#define CLK_DOUT_CMU_HSI1_PCIE 140
+#define CLK_DOUT_CMU_UFS_UFS_EMBD 141
+#define CLK_DOUT_CMU_LME_NOC 142
+#define CLK_DOUT_CMU_MFC0_MFC0 143
+#define CLK_DOUT_CMU_MFC0_WFD 144
+#define CLK_DOUT_CMU_MFC1_MFC1 145
+#define CLK_DOUT_CMU_MIF_NOCP 146
+#define CLK_DOUT_CMU_NOCL1B_NOC1 147
+#define CLK_DOUT_CMU_PERIC0_IP0 148
+#define CLK_DOUT_CMU_PERIC0_IP1 149
+#define CLK_DOUT_CMU_PERIC0_NOC 150
+#define CLK_DOUT_CMU_PERIC1_IP0 151
+#define CLK_DOUT_CMU_PERIC1_IP1 152
+#define CLK_DOUT_CMU_PERIC1_NOC 153
+#define CLK_DOUT_CMU_PERIC2_IP0 154
+#define CLK_DOUT_CMU_PERIC2_IP1 155
+#define CLK_DOUT_CMU_PERIC2_NOC 156
+#define CLK_DOUT_CMU_PERIS_GIC 157
+#define CLK_DOUT_CMU_PERIS_NOC 158
+#define CLK_DOUT_CMU_SSP_NOC 159
+#define CLK_DOUT_CMU_VTS_DMIC 160
+#define CLK_DOUT_CMU_YUVP_NOC 161
+#define CLK_DOUT_CMU_CP_SHARED1_CLK 162
+#define CLK_DOUT_CMU_DIV_AUD_AUDIF0 163
+#define CLK_DOUT_CMU_DIV_AUD_AUDIF0_SM 164
+#define CLK_DOUT_CMU_DIV_AUD_AUDIF1 165
+#define CLK_DOUT_CMU_DIV_AUD_AUDIF1_SM 166
+#define CLK_DOUT_CMU_DIV_AUD_CPU 167
+#define CLK_DOUT_CMU_DIV_AUD_CPU_SM 168
+#define CLK_DOUT_CMU_DIV_CIS_CLK0 169
+#define CLK_DOUT_CMU_DIV_CIS_CLK1 170
+#define CLK_DOUT_CMU_DIV_CIS_CLK2 171
+#define CLK_DOUT_CMU_DIV_CIS_CLK3 172
+#define CLK_DOUT_CMU_DIV_CIS_CLK4 173
+#define CLK_DOUT_CMU_DIV_CIS_CLK5 174
+#define CLK_DOUT_CMU_DIV_CIS_CLK6 175
+#define CLK_DOUT_CMU_DIV_CIS_CLK7 176
+#define CLK_DOUT_CMU_DIV_CPUCL0_DBG_NOC 177
+#define CLK_DOUT_CMU_DIV_CPUCL0_DBG_NOC_SM 178
+#define CLK_DOUT_CMU_DIV_CPUCL0_SWITCH 179
+#define CLK_DOUT_CMU_DIV_CPUCL0_SWITCH_SM 180
+#define CLK_DOUT_CMU_DIV_CPUCL1_SWITCH 181
+#define CLK_DOUT_CMU_DIV_CPUCL1_SWITCH_SM 182
+#define CLK_DOUT_CMU_DIV_CPUCL2_SWITCH 183
+#define CLK_DOUT_CMU_DIV_CPUCL2_SWITCH_SM 184
+#define CLK_DOUT_CMU_DIV_DNC_NOC 185
+#define CLK_DOUT_CMU_DIV_DNC_NOC_SM 186
+#define CLK_DOUT_CMU_DIV_DPUB 187
+#define CLK_DOUT_CMU_DIV_DPUB_ALT 188
+#define CLK_DOUT_CMU_DIV_DPUF 189
+#define CLK_DOUT_CMU_DIV_DPUF_ALT 190
+#define CLK_DOUT_CMU_DIV_DSP_NOC 191
+#define CLK_DOUT_CMU_DIV_DSP_NOC_SM 192
+#define CLK_DOUT_CMU_DIV_DSU_SWITCH 193
+#define CLK_DOUT_CMU_DIV_DSU_SWITCH_SM 194
+#define CLK_DOUT_CMU_DIV_G3D_SWITCH 195
+#define CLK_DOUT_CMU_DIV_G3D_SWITCH_SM 196
+#define CLK_DOUT_CMU_DIV_GNPU_NOC 197
+#define CLK_DOUT_CMU_DIV_GNPU_NOC_SM 198
+#define CLK_DOUT_CMU_DIV_UFS_MMC_CARD 199
+#define CLK_DOUT_CMU_DIV_UFS_MMC_CARD_SM 200
+#define CLK_DOUT_CMU_DIV_M2M_NOC 201
+#define CLK_DOUT_CMU_DIV_M2M_NOC_SM 202
+#define CLK_DOUT_CMU_DIV_NOCL0_NOC 203
+#define CLK_DOUT_CMU_DIV_NOCL0_NOC_SM 204
+#define CLK_DOUT_CMU_DIV_NOCL1A_NOC 205
+#define CLK_DOUT_CMU_DIV_NOCL1A_NOC_SM 206
+#define CLK_DOUT_CMU_DIV_NOCL1B_NOC0 207
+#define CLK_DOUT_CMU_DIV_NOCL1B_NOC0_SM 208
+#define CLK_DOUT_CMU_DIV_NOCL1C_NOC 209
+#define CLK_DOUT_CMU_DIV_NOCL1C_NOC_SM 210
+#define CLK_DOUT_CMU_DIV_SDMA_NOC 211
+#define CLK_DOUT_CMU_DIV_SDMA_NOC_SM 212
+#define CLK_DOUT_CMU_DIV_CP_HISPEEDY_CLK 213
+#define CLK_DOUT_CMU_DIV_CP_HISPEEDY_CLK_SM 214
+#define CLK_DOUT_CMU_DIV_CP_SHARED0_CLK 215
+#define CLK_DOUT_CMU_DIV_CP_SHARED0_CLK_SM 216
+#define CLK_DOUT_CMU_DIV_CP_SHARED2_CLK 217
+#define CLK_DOUT_CMU_DIV_CP_SHARED2_CLK_SM 218
+#define CLK_DOUT_CMU_UFS_NOC 219
+#define CLK_DOUT_CMU_DIV_M2M_FRC 220
+#define CLK_DOUT_CMU_DIV_M2M_FRC_SM 221
+#define CLK_DOUT_CMU_DIV_MCSC_MCSC 222
+#define CLK_DOUT_CMU_DIV_MCSC_MCSC_SM 223
+#define CLK_DOUT_CMU_DIV_MCSC_NOC 224
+#define CLK_DOUT_CMU_DIV_MCSC_NOC_SM 225
+#define CLK_DOUT_SHARED0_DIV1 226
+#define CLK_DOUT_SHARED0_DIV2 227
+#define CLK_DOUT_SHARED0_DIV4 228
+#define CLK_DOUT_SHARED1_DIV1 229
+#define CLK_DOUT_SHARED1_DIV2 230
+#define CLK_DOUT_SHARED1_DIV4 231
+#define CLK_DOUT_SHARED2_DIV1 232
+#define CLK_DOUT_SHARED2_DIV2 233
+#define CLK_DOUT_SHARED2_DIV4 234
+#define CLK_DOUT_SHARED3_DIV1 235
+#define CLK_DOUT_SHARED3_DIV2 236
+#define CLK_DOUT_SHARED3_DIV4 237
+#define CLK_DOUT_SHARED4_DIV1 238
+#define CLK_DOUT_SHARED4_DIV2 239
+#define CLK_DOUT_SHARED4_DIV4 240
+#define CLK_DOUT_SHARED_MIF_DIV1 241
+#define CLK_DOUT_SHARED_MIF_DIV2 242
+#define CLK_DOUT_SHARED_MIF_DIV4 243
+#define CLK_DOUT_TCXO_DIV3 244
+#define CLK_DOUT_TCXO_DIV4 245
+
+/* CMU_ALIVE */
+#define CLK_MOUT_ALIVE_NOC_USER 1
+#define CLK_MOUT_ALIVE_RCO_SPMI_USER 2
+#define CLK_MOUT_RCO_ALIVE_USER 3
+#define CLK_MOUT_ALIVE_CHUB_PERI 4
+#define CLK_MOUT_ALIVE_CMGP_NOC 5
+#define CLK_MOUT_ALIVE_CMGP_PERI 6
+#define CLK_MOUT_ALIVE_DBGCORE_NOC 7
+#define CLK_MOUT_ALIVE_DNC_NOC 8
+#define CLK_MOUT_ALIVE_CHUBVTS_NOC 9
+#define CLK_MOUT_ALIVE_GNPU_NOC 10
+#define CLK_MOUT_ALIVE_GNSS_NOC 11
+#define CLK_MOUT_ALIVE_SDMA_NOC 12
+#define CLK_MOUT_ALIVE_UFD_NOC 13
+#define CLK_MOUT_ALIVE_DBGCORE_UART 14
+#define CLK_MOUT_ALIVE_NOC 15
+#define CLK_MOUT_ALIVE_PMU_SUB 16
+#define CLK_MOUT_ALIVE_SPMI 17
+#define CLK_MOUT_ALIVE_TIMER 18
+#define CLK_MOUT_ALIVE_CSIS_NOC 19
+#define CLK_MOUT_ALIVE_DSP_NOC 20
+
+#define CLK_DOUT_ALIVE_CHUB_PERI 21
+#define CLK_DOUT_ALIVE_CMGP_NOC 22
+#define CLK_DOUT_ALIVE_CMGP_PERI 23
+#define CLK_DOUT_ALIVE_DBGCORE_NOC 24
+#define CLK_DOUT_ALIVE_DNC_NOC 25
+#define CLK_DOUT_ALIVE_CHUBVTS_NOC 26
+#define CLK_DOUT_ALIVE_GNPU_NOC 27
+#define CLK_DOUT_ALIVE_SDMA_NOC 28
+#define CLK_DOUT_ALIVE_UFD_NOC 29
+#define CLK_DOUT_ALIVE_DBGCORE_UART 30
+#define CLK_DOUT_ALIVE_NOC 31
+#define CLK_DOUT_ALIVE_PMU_SUB 32
+#define CLK_DOUT_ALIVE_SPMI 33
+#define CLK_DOUT_ALIVE_CSIS_NOC 34
+#define CLK_DOUT_ALIVE_DSP_NOC 35
+
+/* CMU_PERIS */
+#define CLK_MOUT_PERIS_GIC_USER 1
+#define CLK_MOUT_PERIS_NOC_USER 2
+#define CLK_MOUT_PERIS_GIC 3
+
+#define CLK_DOUT_PERIS_OTP 4
+#define CLK_DOUT_PERIS_DDD_CTRL 5
+
+/* CMU_CMGP */
+#define CLK_MOUT_CMGP_CLKALIVE_NOC_USER 1
+#define CLK_MOUT_CMGP_CLKALIVE_PERI_USER 2
+#define CLK_MOUT_CMGP_I2C 3
+#define CLK_MOUT_CMGP_SPI_I2C0 4
+#define CLK_MOUT_CMGP_SPI_I2C1 5
+#define CLK_MOUT_CMGP_SPI_MS_CTRL 6
+#define CLK_MOUT_CMGP_USI0 7
+#define CLK_MOUT_CMGP_USI1 8
+#define CLK_MOUT_CMGP_USI2 9
+#define CLK_MOUT_CMGP_USI3 10
+#define CLK_MOUT_CMGP_USI4 11
+#define CLK_MOUT_CMGP_USI5 12
+#define CLK_MOUT_CMGP_USI6 13
+
+#define CLK_DOUT_CMGP_I2C 14
+#define CLK_DOUT_CMGP_SPI_I2C0 15
+#define CLK_DOUT_CMGP_SPI_I2C1 16
+#define CLK_DOUT_CMGP_SPI_MS_CTRL 17
+#define CLK_DOUT_CMGP_USI0 18
+#define CLK_DOUT_CMGP_USI1 19
+#define CLK_DOUT_CMGP_USI2 20
+#define CLK_DOUT_CMGP_USI3 21
+#define CLK_DOUT_CMGP_USI4 22
+#define CLK_DOUT_CMGP_USI5 23
+#define CLK_DOUT_CMGP_USI6 24
+
+/* CMU_HSI0 */
+#define CLK_MOUT_CLKCMU_HSI0_DPGTC_USER 1
+#define CLK_MOUT_CLKCMU_HSI0_DPOSC_USER 2
+#define CLK_MOUT_CLKCMU_HSI0_NOC_USER 3
+#define CLK_MOUT_CLKCMU_HSI0_USB32DRD_USER 4
+#define CLK_MOUT_HSI0_NOC 5
+#define CLK_MOUT_HSI0_RTCCLK 6
+#define CLK_MOUT_HSI0_USB32DRD 7
+
+#define CLK_DOUT_DIV_CLK_HSI0_EUSB 8
+
+/* CMU_PERIC0 */
+#define CLK_MOUT_PERIC0_IP0_USER 1
+#define CLK_MOUT_PERIC0_IP1_USER 2
+#define CLK_MOUT_PERIC0_NOC_USER 3
+#define CLK_MOUT_PERIC0_I2C 4
+#define CLK_MOUT_PERIC0_USI04 5
+
+#define CLK_DOUT_PERIC0_I2C 6
+#define CLK_DOUT_PERIC0_USI04 7
+
+/* CMU_PERIC1 */
+#define CLK_MOUT_PERIC1_IP0_USER 1
+#define CLK_MOUT_PERIC1_IP1_USER 2
+#define CLK_MOUT_PERIC1_NOC_USER 3
+#define CLK_MOUT_PERIC1_I2C 4
+#define CLK_MOUT_PERIC1_SPI_MS_CTRL 5
+#define CLK_MOUT_PERIC1_UART_BT 6
+#define CLK_MOUT_PERIC1_USI07 7
+#define CLK_MOUT_PERIC1_USI07_SPI_I2C 8
+#define CLK_MOUT_PERIC1_USI08 9
+#define CLK_MOUT_PERIC1_USI08_SPI_I2C 10
+#define CLK_MOUT_PERIC1_USI09 11
+#define CLK_MOUT_PERIC1_USI10 12
+
+#define CLK_DOUT_PERIC1_I2C 13
+#define CLK_DOUT_PERIC1_SPI_MS_CTRL 14
+#define CLK_DOUT_PERIC1_UART_BT 15
+#define CLK_DOUT_PERIC1_USI07 16
+#define CLK_DOUT_PERIC1_USI07_SPI_I2C 17
+#define CLK_DOUT_PERIC1_USI08 18
+#define CLK_DOUT_PERIC1_USI08_SPI_I2C 19
+#define CLK_DOUT_PERIC1_USI09 20
+#define CLK_DOUT_PERIC1_USI10 21
+
+/* CMU_PERIC2 */
+#define CLK_MOUT_PERIC2_IP0_USER 1
+#define CLK_MOUT_PERIC2_IP1_USER 2
+#define CLK_MOUT_PERIC2_NOC_USER 3
+#define CLK_MOUT_PERIC2_I2C 4
+#define CLK_MOUT_PERIC2_SPI_MS_CTRL 5
+#define CLK_MOUT_PERIC2_UART_DBG 6
+#define CLK_MOUT_PERIC2_USI00 7
+#define CLK_MOUT_PERIC2_USI00_SPI_I2C 8
+#define CLK_MOUT_PERIC2_USI01 9
+#define CLK_MOUT_PERIC2_USI01_SPI_I2C 10
+#define CLK_MOUT_PERIC2_USI02 11
+#define CLK_MOUT_PERIC2_USI03 12
+#define CLK_MOUT_PERIC2_USI05 13
+#define CLK_MOUT_PERIC2_USI06 14
+#define CLK_MOUT_PERIC2_USI11 15
+
+#define CLK_DOUT_PERIC2_I2C 16
+#define CLK_DOUT_PERIC2_SPI_MS_CTRL 17
+#define CLK_DOUT_PERIC2_UART_DBG 18
+#define CLK_DOUT_PERIC2_USI00 19
+#define CLK_DOUT_PERIC2_USI00_SPI_I2C 20
+#define CLK_DOUT_PERIC2_USI01 21
+#define CLK_DOUT_PERIC2_USI01_SPI_I2C 22
+#define CLK_DOUT_PERIC2_USI02 23
+#define CLK_DOUT_PERIC2_USI03 24
+#define CLK_DOUT_PERIC2_USI05 25
+#define CLK_DOUT_PERIC2_USI06 26
+#define CLK_DOUT_PERIC2_USI11 27
+
+/* CMU_UFS */
+#define CLK_MOUT_UFS_MMC_CARD_USER 1
+#define CLK_MOUT_UFS_NOC_USER 2
+#define CLK_MOUT_UFS_UFS_EMBD_USER 3
+
+/* CMU_VTS */
+#define CLK_MOUT_CLKALIVE_VTS_NOC_USER 1
+#define CLK_MOUT_CLKALIVE_VTS_RCO_USER 2
+#define CLK_MOUT_CLKCMU_VTS_DMIC_USER 3
+#define CLK_MOUT_CLKVTS_AUD_DMIC1 4
+#define CLK_MOUT_CLKVTS_NOC 5
+#define CLK_MOUT_CLKVTS_DMIC_PAD 6
+
+#define CLK_DOUT_CLKVTS_AUD_DMIC0 7
+#define CLK_DOUT_CLKVTS_AUD_DMIC1 8
+#define CLK_DOUT_CLKVTS_CPU 9
+#define CLK_DOUT_CLKVTS_DMIC_IF 10
+#define CLK_DOUT_CLKVTS_DMIC_IF_DIV2 11
+#define CLK_DOUT_CLKVTS_NOC 12
+#define CLK_DOUT_CLKVTS_SERIAL_LIF 13
+#define CLK_DOUT_CLKVTS_SERIAL_LIF_CORE 14
+
+#endif
diff --git a/include/dt-bindings/clock/samsung,exynos7870-cmu.h b/include/dt-bindings/clock/samsung,exynos7870-cmu.h
new file mode 100644
index 000000000000..57d04bbe342d
--- /dev/null
+++ b/include/dt-bindings/clock/samsung,exynos7870-cmu.h
@@ -0,0 +1,324 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2015 Samsung Electronics Co., Ltd.
+ * Author: Kaustabh Chakraborty <kauschluss@disroot.org>
+ *
+ * Device Tree binding constants for Exynos7870 clock controller.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_EXYNOS7870_H
+#define _DT_BINDINGS_CLOCK_EXYNOS7870_H
+
+/* CMU_MIF */
+#define CLK_DOUT_MIF_APB 1
+#define CLK_DOUT_MIF_BUSD 2
+#define CLK_DOUT_MIF_CMU_DISPAUD_BUS 3
+#define CLK_DOUT_MIF_CMU_DISPAUD_DECON_ECLK 4
+#define CLK_DOUT_MIF_CMU_DISPAUD_DECON_VCLK 5
+#define CLK_DOUT_MIF_CMU_FSYS_BUS 6
+#define CLK_DOUT_MIF_CMU_FSYS_MMC0 7
+#define CLK_DOUT_MIF_CMU_FSYS_MMC1 8
+#define CLK_DOUT_MIF_CMU_FSYS_MMC2 9
+#define CLK_DOUT_MIF_CMU_FSYS_USB20DRD_REFCLK 10
+#define CLK_DOUT_MIF_CMU_G3D_SWITCH 11
+#define CLK_DOUT_MIF_CMU_ISP_CAM 12
+#define CLK_DOUT_MIF_CMU_ISP_ISP 13
+#define CLK_DOUT_MIF_CMU_ISP_SENSOR0 14
+#define CLK_DOUT_MIF_CMU_ISP_SENSOR1 15
+#define CLK_DOUT_MIF_CMU_ISP_SENSOR2 16
+#define CLK_DOUT_MIF_CMU_ISP_VRA 17
+#define CLK_DOUT_MIF_CMU_MFCMSCL_MFC 18
+#define CLK_DOUT_MIF_CMU_MFCMSCL_MSCL 19
+#define CLK_DOUT_MIF_CMU_PERI_BUS 20
+#define CLK_DOUT_MIF_CMU_PERI_SPI0 21
+#define CLK_DOUT_MIF_CMU_PERI_SPI1 22
+#define CLK_DOUT_MIF_CMU_PERI_SPI2 23
+#define CLK_DOUT_MIF_CMU_PERI_SPI3 24
+#define CLK_DOUT_MIF_CMU_PERI_SPI4 25
+#define CLK_DOUT_MIF_CMU_PERI_UART0 26
+#define CLK_DOUT_MIF_CMU_PERI_UART1 27
+#define CLK_DOUT_MIF_CMU_PERI_UART2 28
+#define CLK_DOUT_MIF_HSI2C 29
+#define CLK_FOUT_MIF_BUS_PLL 30
+#define CLK_FOUT_MIF_MEDIA_PLL 31
+#define CLK_FOUT_MIF_MEM_PLL 32
+#define CLK_GOUT_MIF_CMU_DISPAUD_BUS 33
+#define CLK_GOUT_MIF_CMU_DISPAUD_DECON_ECLK 34
+#define CLK_GOUT_MIF_CMU_DISPAUD_DECON_VCLK 35
+#define CLK_GOUT_MIF_CMU_FSYS_BUS 36
+#define CLK_GOUT_MIF_CMU_FSYS_MMC0 37
+#define CLK_GOUT_MIF_CMU_FSYS_MMC1 38
+#define CLK_GOUT_MIF_CMU_FSYS_MMC2 39
+#define CLK_GOUT_MIF_CMU_FSYS_USB20DRD_REFCLK 40
+#define CLK_GOUT_MIF_CMU_G3D_SWITCH 41
+#define CLK_GOUT_MIF_CMU_ISP_CAM 42
+#define CLK_GOUT_MIF_CMU_ISP_ISP 43
+#define CLK_GOUT_MIF_CMU_ISP_SENSOR0 44
+#define CLK_GOUT_MIF_CMU_ISP_SENSOR1 45
+#define CLK_GOUT_MIF_CMU_ISP_SENSOR2 46
+#define CLK_GOUT_MIF_CMU_ISP_VRA 47
+#define CLK_GOUT_MIF_CMU_MFCMSCL_MFC 48
+#define CLK_GOUT_MIF_CMU_MFCMSCL_MSCL 49
+#define CLK_GOUT_MIF_CMU_PERI_BUS 50
+#define CLK_GOUT_MIF_CMU_PERI_SPI0 51
+#define CLK_GOUT_MIF_CMU_PERI_SPI1 52
+#define CLK_GOUT_MIF_CMU_PERI_SPI2 53
+#define CLK_GOUT_MIF_CMU_PERI_SPI3 54
+#define CLK_GOUT_MIF_CMU_PERI_SPI4 55
+#define CLK_GOUT_MIF_CMU_PERI_UART0 56
+#define CLK_GOUT_MIF_CMU_PERI_UART1 57
+#define CLK_GOUT_MIF_CMU_PERI_UART2 58
+#define CLK_GOUT_MIF_CP_PCLK_HSI2C 59
+#define CLK_GOUT_MIF_CP_PCLK_HSI2C_BAT_0 60
+#define CLK_GOUT_MIF_CP_PCLK_HSI2C_BAT_1 61
+#define CLK_GOUT_MIF_HSI2C_AP_PCLKM 62
+#define CLK_GOUT_MIF_HSI2C_AP_PCLKS 63
+#define CLK_GOUT_MIF_HSI2C_CP_PCLKM 64
+#define CLK_GOUT_MIF_HSI2C_CP_PCLKS 65
+#define CLK_GOUT_MIF_HSI2C_IPCLK 66
+#define CLK_GOUT_MIF_HSI2C_ITCLK 67
+#define CLK_GOUT_MIF_MUX_BUSD 68
+#define CLK_GOUT_MIF_MUX_BUS_PLL 69
+#define CLK_GOUT_MIF_MUX_BUS_PLL_CON 70
+#define CLK_GOUT_MIF_MUX_CMU_DISPAUD_BUS 71
+#define CLK_GOUT_MIF_MUX_CMU_DISPAUD_DECON_ECLK 72
+#define CLK_GOUT_MIF_MUX_CMU_DISPAUD_DECON_VCLK 73
+#define CLK_GOUT_MIF_MUX_CMU_FSYS_BUS 74
+#define CLK_GOUT_MIF_MUX_CMU_FSYS_MMC0 75
+#define CLK_GOUT_MIF_MUX_CMU_FSYS_MMC1 76
+#define CLK_GOUT_MIF_MUX_CMU_FSYS_MMC2 77
+#define CLK_GOUT_MIF_MUX_CMU_FSYS_USB20DRD_REFCLK 78
+#define CLK_GOUT_MIF_MUX_CMU_ISP_CAM 79
+#define CLK_GOUT_MIF_MUX_CMU_ISP_ISP 80
+#define CLK_GOUT_MIF_MUX_CMU_ISP_SENSOR0 81
+#define CLK_GOUT_MIF_MUX_CMU_ISP_SENSOR1 82
+#define CLK_GOUT_MIF_MUX_CMU_ISP_SENSOR2 83
+#define CLK_GOUT_MIF_MUX_CMU_ISP_VRA 84
+#define CLK_GOUT_MIF_MUX_CMU_MFCMSCL_MFC 85
+#define CLK_GOUT_MIF_MUX_CMU_MFCMSCL_MSCL 86
+#define CLK_GOUT_MIF_MUX_CMU_PERI_BUS 87
+#define CLK_GOUT_MIF_MUX_CMU_PERI_SPI0 88
+#define CLK_GOUT_MIF_MUX_CMU_PERI_SPI1 89
+#define CLK_GOUT_MIF_MUX_CMU_PERI_SPI2 90
+#define CLK_GOUT_MIF_MUX_CMU_PERI_SPI3 91
+#define CLK_GOUT_MIF_MUX_CMU_PERI_SPI4 92
+#define CLK_GOUT_MIF_MUX_CMU_PERI_UART0 93
+#define CLK_GOUT_MIF_MUX_CMU_PERI_UART1 94
+#define CLK_GOUT_MIF_MUX_CMU_PERI_UART2 95
+#define CLK_GOUT_MIF_MUX_MEDIA_PLL 96
+#define CLK_GOUT_MIF_MUX_MEDIA_PLL_CON 97
+#define CLK_GOUT_MIF_MUX_MEM_PLL 98
+#define CLK_GOUT_MIF_MUX_MEM_PLL_CON 99
+#define CLK_GOUT_MIF_WRAP_ADC_IF_OSC_SYS 100
+#define CLK_GOUT_MIF_WRAP_ADC_IF_PCLK_S0 101
+#define CLK_GOUT_MIF_WRAP_ADC_IF_PCLK_S1 102
+#define CLK_MOUT_MIF_BUSD 103
+#define CLK_MOUT_MIF_CMU_DISPAUD_BUS 104
+#define CLK_MOUT_MIF_CMU_DISPAUD_DECON_ECLK 105
+#define CLK_MOUT_MIF_CMU_DISPAUD_DECON_VCLK 106
+#define CLK_MOUT_MIF_CMU_FSYS_BUS 107
+#define CLK_MOUT_MIF_CMU_FSYS_MMC0 108
+#define CLK_MOUT_MIF_CMU_FSYS_MMC1 109
+#define CLK_MOUT_MIF_CMU_FSYS_MMC2 110
+#define CLK_MOUT_MIF_CMU_FSYS_USB20DRD_REFCLK 111
+#define CLK_MOUT_MIF_CMU_ISP_CAM 112
+#define CLK_MOUT_MIF_CMU_ISP_ISP 113
+#define CLK_MOUT_MIF_CMU_ISP_SENSOR0 114
+#define CLK_MOUT_MIF_CMU_ISP_SENSOR1 115
+#define CLK_MOUT_MIF_CMU_ISP_SENSOR2 116
+#define CLK_MOUT_MIF_CMU_ISP_VRA 117
+#define CLK_MOUT_MIF_CMU_MFCMSCL_MFC 118
+#define CLK_MOUT_MIF_CMU_MFCMSCL_MSCL 119
+#define CLK_MOUT_MIF_CMU_PERI_BUS 120
+#define CLK_MOUT_MIF_CMU_PERI_SPI0 121
+#define CLK_MOUT_MIF_CMU_PERI_SPI1 122
+#define CLK_MOUT_MIF_CMU_PERI_SPI2 123
+#define CLK_MOUT_MIF_CMU_PERI_SPI3 124
+#define CLK_MOUT_MIF_CMU_PERI_SPI4 125
+#define CLK_MOUT_MIF_CMU_PERI_UART0 126
+#define CLK_MOUT_MIF_CMU_PERI_UART1 127
+#define CLK_MOUT_MIF_CMU_PERI_UART2 128
+#define MIF_NR_CLK 129
+
+/* CMU_DISPAUD */
+#define CLK_DOUT_DISPAUD_APB 1
+#define CLK_DOUT_DISPAUD_DECON_ECLK 2
+#define CLK_DOUT_DISPAUD_DECON_VCLK 3
+#define CLK_DOUT_DISPAUD_MI2S 4
+#define CLK_DOUT_DISPAUD_MIXER 5
+#define CLK_FOUT_DISPAUD_AUD_PLL 6
+#define CLK_FOUT_DISPAUD_PLL 7
+#define CLK_GOUT_DISPAUD_APB_AUD 8
+#define CLK_GOUT_DISPAUD_APB_AUD_AMP 9
+#define CLK_GOUT_DISPAUD_APB_DISP 10
+#define CLK_GOUT_DISPAUD_BUS 11
+#define CLK_GOUT_DISPAUD_BUS_DISP 12
+#define CLK_GOUT_DISPAUD_BUS_PPMU 13
+#define CLK_GOUT_DISPAUD_CON_AUD_I2S_BCLK_BT_IN 14
+#define CLK_GOUT_DISPAUD_CON_AUD_I2S_BCLK_FM_IN 15
+#define CLK_GOUT_DISPAUD_CON_CP2AUD_BCK 16
+#define CLK_GOUT_DISPAUD_CON_EXT2AUD_BCK_GPIO_I2S 17
+#define CLK_GOUT_DISPAUD_DECON_ECLK 18
+#define CLK_GOUT_DISPAUD_DECON_VCLK 19
+#define CLK_GOUT_DISPAUD_MI2S_AMP_I2SCODCLKI 20
+#define CLK_GOUT_DISPAUD_MI2S_AUD_I2SCODCLKI 21
+#define CLK_GOUT_DISPAUD_MIXER_AUD_SYSCLK 22
+#define CLK_GOUT_DISPAUD_MUX_AUD_PLL 23
+#define CLK_GOUT_DISPAUD_MUX_AUD_PLL_CON 24
+#define CLK_GOUT_DISPAUD_MUX_BUS_USER 25
+#define CLK_GOUT_DISPAUD_MUX_DECON_ECLK 26
+#define CLK_GOUT_DISPAUD_MUX_DECON_ECLK_USER 27
+#define CLK_GOUT_DISPAUD_MUX_DECON_VCLK 28
+#define CLK_GOUT_DISPAUD_MUX_DECON_VCLK_USER 29
+#define CLK_GOUT_DISPAUD_MUX_MI2S 30
+#define CLK_GOUT_DISPAUD_MUX_MIPIPHY_RXCLKESC0_USER 31
+#define CLK_GOUT_DISPAUD_MUX_MIPIPHY_RXCLKESC0_USER_CON 32
+#define CLK_GOUT_DISPAUD_MUX_MIPIPHY_TXBYTECLKHS_USER 33
+#define CLK_GOUT_DISPAUD_MUX_MIPIPHY_TXBYTECLKHS_USER_CON 34
+#define CLK_GOUT_DISPAUD_MUX_PLL 35
+#define CLK_GOUT_DISPAUD_MUX_PLL_CON 36
+#define CLK_MOUT_DISPAUD_BUS_USER 37
+#define CLK_MOUT_DISPAUD_DECON_ECLK 38
+#define CLK_MOUT_DISPAUD_DECON_ECLK_USER 39
+#define CLK_MOUT_DISPAUD_DECON_VCLK 40
+#define CLK_MOUT_DISPAUD_DECON_VCLK_USER 41
+#define CLK_MOUT_DISPAUD_MI2S 42
+#define DISPAUD_NR_CLK 43
+
+/* CMU_FSYS */
+#define CLK_FOUT_FSYS_USB_PLL 1
+#define CLK_GOUT_FSYS_BUSP3_HCLK 2
+#define CLK_GOUT_FSYS_MMC0_ACLK 3
+#define CLK_GOUT_FSYS_MMC1_ACLK 4
+#define CLK_GOUT_FSYS_MMC2_ACLK 5
+#define CLK_GOUT_FSYS_MUX_USB20DRD_PHYCLOCK_USER 6
+#define CLK_GOUT_FSYS_MUX_USB20DRD_PHYCLOCK_USER_CON 7
+#define CLK_GOUT_FSYS_MUX_USB_PLL 8
+#define CLK_GOUT_FSYS_MUX_USB_PLL_CON 9
+#define CLK_GOUT_FSYS_PDMA0_ACLK_PDMA0 10
+#define CLK_GOUT_FSYS_PPMU_ACLK 11
+#define CLK_GOUT_FSYS_PPMU_PCLK 12
+#define CLK_GOUT_FSYS_SROMC_HCLK 13
+#define CLK_GOUT_FSYS_UPSIZER_BUS1_ACLK 14
+#define CLK_GOUT_FSYS_USB20DRD_ACLK_HSDRD 15
+#define CLK_GOUT_FSYS_USB20DRD_HCLK_USB20_CTRL 16
+#define CLK_GOUT_FSYS_USB20DRD_HSDRD_REF_CLK 17
+#define FSYS_NR_CLK 18
+
+/* CMU_G3D */
+#define CLK_DOUT_G3D_APB 1
+#define CLK_DOUT_G3D_BUS 2
+#define CLK_FOUT_G3D_PLL 3
+#define CLK_GOUT_G3D_ASYNCS_D0_CLK 4
+#define CLK_GOUT_G3D_ASYNC_PCLKM 5
+#define CLK_GOUT_G3D_CLK 6
+#define CLK_GOUT_G3D_MUX 7
+#define CLK_GOUT_G3D_MUX_PLL 8
+#define CLK_GOUT_G3D_MUX_PLL_CON 9
+#define CLK_GOUT_G3D_MUX_SWITCH_USER 10
+#define CLK_GOUT_G3D_PPMU_ACLK 11
+#define CLK_GOUT_G3D_PPMU_PCLK 12
+#define CLK_GOUT_G3D_QE_ACLK 13
+#define CLK_GOUT_G3D_QE_PCLK 14
+#define CLK_GOUT_G3D_SYSREG_PCLK 15
+#define CLK_MOUT_G3D 16
+#define CLK_MOUT_G3D_SWITCH_USER 17
+#define G3D_NR_CLK 18
+
+/* CMU_ISP */
+#define CLK_DOUT_ISP_APB 1
+#define CLK_DOUT_ISP_CAM_HALF 2
+#define CLK_FOUT_ISP_PLL 3
+#define CLK_GOUT_ISP_CAM 4
+#define CLK_GOUT_ISP_CAM_HALF 5
+#define CLK_GOUT_ISP_ISPD 6
+#define CLK_GOUT_ISP_ISPD_PPMU 7
+#define CLK_GOUT_ISP_MUX_CAM 8
+#define CLK_GOUT_ISP_MUX_CAM_USER 9
+#define CLK_GOUT_ISP_MUX_ISP 10
+#define CLK_GOUT_ISP_MUX_ISPD 11
+#define CLK_GOUT_ISP_MUX_PLL 12
+#define CLK_GOUT_ISP_MUX_PLL_CON 13
+#define CLK_GOUT_ISP_MUX_RXBYTECLKHS0_SENSOR0_USER 14
+#define CLK_GOUT_ISP_MUX_RXBYTECLKHS0_SENSOR0_USER_CON 15
+#define CLK_GOUT_ISP_MUX_RXBYTECLKHS0_SENSOR1_USER 16
+#define CLK_GOUT_ISP_MUX_RXBYTECLKHS0_SENSOR1_USER_CON 17
+#define CLK_GOUT_ISP_MUX_USER 18
+#define CLK_GOUT_ISP_MUX_VRA 19
+#define CLK_GOUT_ISP_MUX_VRA_USER 20
+#define CLK_GOUT_ISP_VRA 21
+#define CLK_MOUT_ISP_CAM 22
+#define CLK_MOUT_ISP_CAM_USER 23
+#define CLK_MOUT_ISP_ISP 24
+#define CLK_MOUT_ISP_ISPD 25
+#define CLK_MOUT_ISP_USER 26
+#define CLK_MOUT_ISP_VRA 27
+#define CLK_MOUT_ISP_VRA_USER 28
+#define ISP_NR_CLK 29
+
+/* CMU_MFCMSCL */
+#define CLK_DOUT_MFCMSCL_APB 1
+#define CLK_GOUT_MFCMSCL_MFC 2
+#define CLK_GOUT_MFCMSCL_MSCL 3
+#define CLK_GOUT_MFCMSCL_MSCL_BI 4
+#define CLK_GOUT_MFCMSCL_MSCL_D 5
+#define CLK_GOUT_MFCMSCL_MSCL_JPEG 6
+#define CLK_GOUT_MFCMSCL_MSCL_POLY 7
+#define CLK_GOUT_MFCMSCL_MSCL_PPMU 8
+#define CLK_GOUT_MFCMSCL_MUX_MFC_USER 9
+#define CLK_GOUT_MFCMSCL_MUX_MSCL_USER 10
+#define CLK_MOUT_MFCMSCL_MFC_USER 11
+#define CLK_MOUT_MFCMSCL_MSCL_USER 12
+#define MFCMSCL_NR_CLK 13
+
+/* CMU_PERI */
+#define CLK_GOUT_PERI_BUSP1_PERIC0_HCLK 1
+#define CLK_GOUT_PERI_GPIO2_PCLK 2
+#define CLK_GOUT_PERI_GPIO5_PCLK 3
+#define CLK_GOUT_PERI_GPIO6_PCLK 4
+#define CLK_GOUT_PERI_GPIO7_PCLK 5
+#define CLK_GOUT_PERI_HSI2C1_IPCLK 6
+#define CLK_GOUT_PERI_HSI2C2_IPCLK 7
+#define CLK_GOUT_PERI_HSI2C3_IPCLK 8
+#define CLK_GOUT_PERI_HSI2C4_IPCLK 9
+#define CLK_GOUT_PERI_HSI2C5_IPCLK 10
+#define CLK_GOUT_PERI_HSI2C6_IPCLK 11
+#define CLK_GOUT_PERI_I2C0_PCLK 12
+#define CLK_GOUT_PERI_I2C1_PCLK 13
+#define CLK_GOUT_PERI_I2C2_PCLK 14
+#define CLK_GOUT_PERI_I2C3_PCLK 15
+#define CLK_GOUT_PERI_I2C4_PCLK 16
+#define CLK_GOUT_PERI_I2C5_PCLK 17
+#define CLK_GOUT_PERI_I2C6_PCLK 18
+#define CLK_GOUT_PERI_I2C7_PCLK 19
+#define CLK_GOUT_PERI_I2C8_PCLK 20
+#define CLK_GOUT_PERI_MCT_PCLK 21
+#define CLK_GOUT_PERI_PWM_MOTOR_OSCCLK 22
+#define CLK_GOUT_PERI_PWM_MOTOR_PCLK_S0 23
+#define CLK_GOUT_PERI_SFRIF_TMU_CPUCL0_PCLK 24
+#define CLK_GOUT_PERI_SFRIF_TMU_CPUCL1_PCLK 25
+#define CLK_GOUT_PERI_SFRIF_TMU_PCLK 26
+#define CLK_GOUT_PERI_SPI0_PCLK 27
+#define CLK_GOUT_PERI_SPI0_SPI_EXT_CLK 28
+#define CLK_GOUT_PERI_SPI1_PCLK 29
+#define CLK_GOUT_PERI_SPI1_SPI_EXT_CLK 30
+#define CLK_GOUT_PERI_SPI2_PCLK 31
+#define CLK_GOUT_PERI_SPI2_SPI_EXT_CLK 32
+#define CLK_GOUT_PERI_SPI3_PCLK 33
+#define CLK_GOUT_PERI_SPI3_SPI_EXT_CLK 34
+#define CLK_GOUT_PERI_SPI4_PCLK 35
+#define CLK_GOUT_PERI_SPI4_SPI_EXT_CLK 36
+#define CLK_GOUT_PERI_TMU_CLK 37
+#define CLK_GOUT_PERI_TMU_CPUCL0_CLK 38
+#define CLK_GOUT_PERI_TMU_CPUCL1_CLK 39
+#define CLK_GOUT_PERI_UART0_EXT_UCLK 40
+#define CLK_GOUT_PERI_UART0_PCLK 41
+#define CLK_GOUT_PERI_UART1_EXT_UCLK 42
+#define CLK_GOUT_PERI_UART1_PCLK 43
+#define CLK_GOUT_PERI_UART2_EXT_UCLK 44
+#define CLK_GOUT_PERI_UART2_PCLK 45
+#define CLK_GOUT_PERI_WDT_CPUCL0_PCLK 46
+#define CLK_GOUT_PERI_WDT_CPUCL1_PCLK 47
+#define PERI_NR_CLK 48
+
+#endif /* _DT_BINDINGS_CLOCK_EXYNOS7870_H */
diff --git a/include/dt-bindings/clock/samsung,exynos8895.h b/include/dt-bindings/clock/samsung,exynos8895.h
new file mode 100644
index 000000000000..27998c53f929
--- /dev/null
+++ b/include/dt-bindings/clock/samsung,exynos8895.h
@@ -0,0 +1,453 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2024 Ivaylo Ivanov <ivo.ivanov.ivanov1@gmail.com>
+ * Author: Ivaylo Ivanov <ivo.ivanov.ivanov1@gmail.com>
+ *
+ * Device Tree binding constants for Exynos8895 clock controller.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_EXYNOS8895_H
+#define _DT_BINDINGS_CLOCK_EXYNOS8895_H
+
+/* CMU_TOP */
+#define CLK_FOUT_SHARED0_PLL 1
+#define CLK_FOUT_SHARED1_PLL 2
+#define CLK_FOUT_SHARED2_PLL 3
+#define CLK_FOUT_SHARED3_PLL 4
+#define CLK_FOUT_SHARED4_PLL 5
+#define CLK_MOUT_PLL_SHARED0 6
+#define CLK_MOUT_PLL_SHARED1 7
+#define CLK_MOUT_PLL_SHARED2 8
+#define CLK_MOUT_PLL_SHARED3 9
+#define CLK_MOUT_PLL_SHARED4 10
+#define CLK_MOUT_CP2AP_MIF_CLK_USER 11
+#define CLK_MOUT_CMU_ABOX_CPUABOX 12
+#define CLK_MOUT_CMU_APM_BUS 13
+#define CLK_MOUT_CMU_BUS1_BUS 14
+#define CLK_MOUT_CMU_BUSC_BUS 15
+#define CLK_MOUT_CMU_BUSC_BUSPHSI2C 16
+#define CLK_MOUT_CMU_CAM_BUS 17
+#define CLK_MOUT_CMU_CAM_TPU0 18
+#define CLK_MOUT_CMU_CAM_TPU1 19
+#define CLK_MOUT_CMU_CAM_VRA 20
+#define CLK_MOUT_CMU_CIS_CLK0 21
+#define CLK_MOUT_CMU_CIS_CLK1 22
+#define CLK_MOUT_CMU_CIS_CLK2 23
+#define CLK_MOUT_CMU_CIS_CLK3 24
+#define CLK_MOUT_CMU_CORE_BUS 25
+#define CLK_MOUT_CMU_CPUCL0_SWITCH 26
+#define CLK_MOUT_CMU_CPUCL1_SWITCH 27
+#define CLK_MOUT_CMU_DBG_BUS 28
+#define CLK_MOUT_CMU_DCAM_BUS 29
+#define CLK_MOUT_CMU_DCAM_IMGD 30
+#define CLK_MOUT_CMU_DPU_BUS 31
+#define CLK_MOUT_CMU_DROOPDETECTOR 32
+#define CLK_MOUT_CMU_DSP_BUS 33
+#define CLK_MOUT_CMU_FSYS0_BUS 34
+#define CLK_MOUT_CMU_FSYS0_DPGTC 35
+#define CLK_MOUT_CMU_FSYS0_MMC_EMBD 36
+#define CLK_MOUT_CMU_FSYS0_UFS_EMBD 37
+#define CLK_MOUT_CMU_FSYS0_USBDRD30 38
+#define CLK_MOUT_CMU_FSYS1_BUS 39
+#define CLK_MOUT_CMU_FSYS1_MMC_CARD 40
+#define CLK_MOUT_CMU_FSYS1_PCIE 41
+#define CLK_MOUT_CMU_FSYS1_UFS_CARD 42
+#define CLK_MOUT_CMU_G2D_G2D 43
+#define CLK_MOUT_CMU_G2D_JPEG 44
+#define CLK_MOUT_CMU_HPM 45
+#define CLK_MOUT_CMU_IMEM_BUS 46
+#define CLK_MOUT_CMU_ISPHQ_BUS 47
+#define CLK_MOUT_CMU_ISPLP_BUS 48
+#define CLK_MOUT_CMU_IVA_BUS 49
+#define CLK_MOUT_CMU_MFC_BUS 50
+#define CLK_MOUT_CMU_MIF_SWITCH 51
+#define CLK_MOUT_CMU_PERIC0_BUS 52
+#define CLK_MOUT_CMU_PERIC0_UART_DBG 53
+#define CLK_MOUT_CMU_PERIC0_USI00 54
+#define CLK_MOUT_CMU_PERIC0_USI01 55
+#define CLK_MOUT_CMU_PERIC0_USI02 56
+#define CLK_MOUT_CMU_PERIC0_USI03 57
+#define CLK_MOUT_CMU_PERIC1_BUS 58
+#define CLK_MOUT_CMU_PERIC1_SPEEDY2 59
+#define CLK_MOUT_CMU_PERIC1_SPI_CAM0 60
+#define CLK_MOUT_CMU_PERIC1_SPI_CAM1 61
+#define CLK_MOUT_CMU_PERIC1_UART_BT 62
+#define CLK_MOUT_CMU_PERIC1_USI04 63
+#define CLK_MOUT_CMU_PERIC1_USI05 64
+#define CLK_MOUT_CMU_PERIC1_USI06 65
+#define CLK_MOUT_CMU_PERIC1_USI07 66
+#define CLK_MOUT_CMU_PERIC1_USI08 67
+#define CLK_MOUT_CMU_PERIC1_USI09 68
+#define CLK_MOUT_CMU_PERIC1_USI10 69
+#define CLK_MOUT_CMU_PERIC1_USI11 70
+#define CLK_MOUT_CMU_PERIC1_USI12 71
+#define CLK_MOUT_CMU_PERIC1_USI13 72
+#define CLK_MOUT_CMU_PERIS_BUS 73
+#define CLK_MOUT_CMU_SRDZ_BUS 74
+#define CLK_MOUT_CMU_SRDZ_IMGD 75
+#define CLK_MOUT_CMU_VPU_BUS 76
+#define CLK_DOUT_CMU_ABOX_CPUABOX 77
+#define CLK_DOUT_CMU_APM_BUS 78
+#define CLK_DOUT_CMU_BUS1_BUS 79
+#define CLK_DOUT_CMU_BUSC_BUS 80
+#define CLK_DOUT_CMU_BUSC_BUSPHSI2C 81
+#define CLK_DOUT_CMU_CAM_BUS 82
+#define CLK_DOUT_CMU_CAM_TPU0 83
+#define CLK_DOUT_CMU_CAM_TPU1 84
+#define CLK_DOUT_CMU_CAM_VRA 85
+#define CLK_DOUT_CMU_CIS_CLK0 86
+#define CLK_DOUT_CMU_CIS_CLK1 87
+#define CLK_DOUT_CMU_CIS_CLK2 88
+#define CLK_DOUT_CMU_CIS_CLK3 89
+#define CLK_DOUT_CMU_CORE_BUS 90
+#define CLK_DOUT_CMU_CPUCL0_SWITCH 91
+#define CLK_DOUT_CMU_CPUCL1_SWITCH 92
+#define CLK_DOUT_CMU_DBG_BUS 93
+#define CLK_DOUT_CMU_DCAM_BUS 94
+#define CLK_DOUT_CMU_DCAM_IMGD 95
+#define CLK_DOUT_CMU_DPU_BUS 96
+#define CLK_DOUT_CMU_DSP_BUS 97
+#define CLK_DOUT_CMU_FSYS0_BUS 98
+#define CLK_DOUT_CMU_FSYS0_DPGTC 99
+#define CLK_DOUT_CMU_FSYS0_MMC_EMBD 100
+#define CLK_DOUT_CMU_FSYS0_UFS_EMBD 101
+#define CLK_DOUT_CMU_FSYS0_USBDRD30 102
+#define CLK_DOUT_CMU_FSYS1_BUS 103
+#define CLK_DOUT_CMU_FSYS1_MMC_CARD 104
+#define CLK_DOUT_CMU_FSYS1_UFS_CARD 105
+#define CLK_DOUT_CMU_G2D_G2D 106
+#define CLK_DOUT_CMU_G2D_JPEG 107
+#define CLK_DOUT_CMU_G3D_SWITCH 108
+#define CLK_DOUT_CMU_HPM 109
+#define CLK_DOUT_CMU_IMEM_BUS 110
+#define CLK_DOUT_CMU_ISPHQ_BUS 111
+#define CLK_DOUT_CMU_ISPLP_BUS 112
+#define CLK_DOUT_CMU_IVA_BUS 113
+#define CLK_DOUT_CMU_MFC_BUS 114
+#define CLK_DOUT_CMU_MODEM_SHARED0 115
+#define CLK_DOUT_CMU_MODEM_SHARED1 116
+#define CLK_DOUT_CMU_PERIC0_BUS 117
+#define CLK_DOUT_CMU_PERIC0_UART_DBG 118
+#define CLK_DOUT_CMU_PERIC0_USI00 119
+#define CLK_DOUT_CMU_PERIC0_USI01 120
+#define CLK_DOUT_CMU_PERIC0_USI02 121
+#define CLK_DOUT_CMU_PERIC0_USI03 122
+#define CLK_DOUT_CMU_PERIC1_BUS 123
+#define CLK_DOUT_CMU_PERIC1_SPEEDY2 124
+#define CLK_DOUT_CMU_PERIC1_SPI_CAM0 125
+#define CLK_DOUT_CMU_PERIC1_SPI_CAM1 126
+#define CLK_DOUT_CMU_PERIC1_UART_BT 127
+#define CLK_DOUT_CMU_PERIC1_USI04 128
+#define CLK_DOUT_CMU_PERIC1_USI05 129
+#define CLK_DOUT_CMU_PERIC1_USI06 130
+#define CLK_DOUT_CMU_PERIC1_USI07 131
+#define CLK_DOUT_CMU_PERIC1_USI08 132
+#define CLK_DOUT_CMU_PERIC1_USI09 133
+#define CLK_DOUT_CMU_PERIC1_USI10 134
+#define CLK_DOUT_CMU_PERIC1_USI11 135
+#define CLK_DOUT_CMU_PERIC1_USI12 136
+#define CLK_DOUT_CMU_PERIC1_USI13 137
+#define CLK_DOUT_CMU_PERIS_BUS 138
+#define CLK_DOUT_CMU_SRDZ_BUS 139
+#define CLK_DOUT_CMU_SRDZ_IMGD 140
+#define CLK_DOUT_CMU_VPU_BUS 141
+#define CLK_DOUT_CMU_SHARED0_DIV2 142
+#define CLK_DOUT_CMU_SHARED0_DIV4 143
+#define CLK_DOUT_CMU_SHARED1_DIV2 144
+#define CLK_DOUT_CMU_SHARED1_DIV4 145
+#define CLK_DOUT_CMU_SHARED2_DIV2 146
+#define CLK_DOUT_CMU_SHARED3_DIV2 147
+#define CLK_DOUT_CMU_SHARED4_DIV2 148
+#define CLK_DOUT_CMU_FSYS1_PCIE 149
+#define CLK_DOUT_CMU_CP2AP_MIF_CLK_DIV2 150
+#define CLK_DOUT_CMU_CMU_OTP 151
+#define CLK_GOUT_CMU_DROOPDETECTOR 152
+#define CLK_GOUT_CMU_MIF_SWITCH 153
+#define CLK_GOUT_CMU_ABOX_CPUABOX 154
+#define CLK_GOUT_CMU_APM_BUS 155
+#define CLK_GOUT_CMU_BUS1_BUS 156
+#define CLK_GOUT_CMU_BUSC_BUS 157
+#define CLK_GOUT_CMU_BUSC_BUSPHSI2C 158
+#define CLK_GOUT_CMU_CAM_BUS 159
+#define CLK_GOUT_CMU_CAM_TPU0 160
+#define CLK_GOUT_CMU_CAM_TPU1 161
+#define CLK_GOUT_CMU_CAM_VRA 162
+#define CLK_GOUT_CMU_CIS_CLK0 163
+#define CLK_GOUT_CMU_CIS_CLK1 164
+#define CLK_GOUT_CMU_CIS_CLK2 165
+#define CLK_GOUT_CMU_CIS_CLK3 166
+#define CLK_GOUT_CMU_CORE_BUS 167
+#define CLK_GOUT_CMU_CPUCL0_SWITCH 168
+#define CLK_GOUT_CMU_CPUCL1_SWITCH 169
+#define CLK_GOUT_CMU_DBG_BUS 170
+#define CLK_GOUT_CMU_DCAM_BUS 171
+#define CLK_GOUT_CMU_DCAM_IMGD 172
+#define CLK_GOUT_CMU_DPU_BUS 173
+#define CLK_GOUT_CMU_DSP_BUS 174
+#define CLK_GOUT_CMU_FSYS0_BUS 175
+#define CLK_GOUT_CMU_FSYS0_DPGTC 176
+#define CLK_GOUT_CMU_FSYS0_MMC_EMBD 177
+#define CLK_GOUT_CMU_FSYS0_UFS_EMBD 178
+#define CLK_GOUT_CMU_FSYS0_USBDRD30 179
+#define CLK_GOUT_CMU_FSYS1_BUS 180
+#define CLK_GOUT_CMU_FSYS1_MMC_CARD 181
+#define CLK_GOUT_CMU_FSYS1_PCIE 182
+#define CLK_GOUT_CMU_FSYS1_UFS_CARD 183
+#define CLK_GOUT_CMU_G2D_G2D 184
+#define CLK_GOUT_CMU_G2D_JPEG 185
+#define CLK_GOUT_CMU_G3D_SWITCH 186
+#define CLK_GOUT_CMU_HPM 187
+#define CLK_GOUT_CMU_IMEM_BUS 188
+#define CLK_GOUT_CMU_ISPHQ_BUS 189
+#define CLK_GOUT_CMU_ISPLP_BUS 190
+#define CLK_GOUT_CMU_IVA_BUS 191
+#define CLK_GOUT_CMU_MFC_BUS 192
+#define CLK_GOUT_CMU_MODEM_SHARED0 193
+#define CLK_GOUT_CMU_MODEM_SHARED1 194
+#define CLK_GOUT_CMU_PERIC0_BUS 195
+#define CLK_GOUT_CMU_PERIC0_UART_DBG 196
+#define CLK_GOUT_CMU_PERIC0_USI00 197
+#define CLK_GOUT_CMU_PERIC0_USI01 198
+#define CLK_GOUT_CMU_PERIC0_USI02 199
+#define CLK_GOUT_CMU_PERIC0_USI03 200
+#define CLK_GOUT_CMU_PERIC1_BUS 201
+#define CLK_GOUT_CMU_PERIC1_SPEEDY2 202
+#define CLK_GOUT_CMU_PERIC1_SPI_CAM0 203
+#define CLK_GOUT_CMU_PERIC1_SPI_CAM1 204
+#define CLK_GOUT_CMU_PERIC1_UART_BT 205
+#define CLK_GOUT_CMU_PERIC1_USI04 206
+#define CLK_GOUT_CMU_PERIC1_USI05 207
+#define CLK_GOUT_CMU_PERIC1_USI06 208
+#define CLK_GOUT_CMU_PERIC1_USI07 209
+#define CLK_GOUT_CMU_PERIC1_USI08 210
+#define CLK_GOUT_CMU_PERIC1_USI09 211
+#define CLK_GOUT_CMU_PERIC1_USI10 212
+#define CLK_GOUT_CMU_PERIC1_USI11 213
+#define CLK_GOUT_CMU_PERIC1_USI12 214
+#define CLK_GOUT_CMU_PERIC1_USI13 215
+#define CLK_GOUT_CMU_PERIS_BUS 216
+#define CLK_GOUT_CMU_SRDZ_BUS 217
+#define CLK_GOUT_CMU_SRDZ_IMGD 218
+#define CLK_GOUT_CMU_VPU_BUS 219
+
+/* CMU_PERIS */
+#define CLK_MOUT_PERIS_BUS_USER 1
+#define CLK_MOUT_PERIS_GIC 2
+#define CLK_GOUT_PERIS_CMU_PERIS_PCLK 3
+#define CLK_GOUT_PERIS_AD_AXI_P_PERIS_ACLKM 4
+#define CLK_GOUT_PERIS_AD_AXI_P_PERIS_ACLKS 5
+#define CLK_GOUT_PERIS_AXI2APB_PERISP0_ACLK 6
+#define CLK_GOUT_PERIS_AXI2APB_PERISP1_ACLK 7
+#define CLK_GOUT_PERIS_BUSIF_TMU_PCLK 8
+#define CLK_GOUT_PERIS_GIC_CLK 9
+#define CLK_GOUT_PERIS_LHM_AXI_P_PERIS_I_CLK 10
+#define CLK_GOUT_PERIS_MCT_PCLK 11
+#define CLK_GOUT_PERIS_OTP_CON_BIRA_PCLK 12
+#define CLK_GOUT_PERIS_OTP_CON_TOP_PCLK 13
+#define CLK_GOUT_PERIS_PMU_PERIS_PCLK 14
+#define CLK_GOUT_PERIS_RSTNSYNC_CLK_PERIS_BUSP_CLK 15
+#define CLK_GOUT_PERIS_RSTNSYNC_CLK_PERIS_GIC_CLK 16
+#define CLK_GOUT_PERIS_SYSREG_PERIS_PCLK 17
+#define CLK_GOUT_PERIS_TZPC00_PCLK 18
+#define CLK_GOUT_PERIS_TZPC01_PCLK 19
+#define CLK_GOUT_PERIS_TZPC02_PCLK 20
+#define CLK_GOUT_PERIS_TZPC03_PCLK 21
+#define CLK_GOUT_PERIS_TZPC04_PCLK 22
+#define CLK_GOUT_PERIS_TZPC05_PCLK 23
+#define CLK_GOUT_PERIS_TZPC06_PCLK 24
+#define CLK_GOUT_PERIS_TZPC07_PCLK 25
+#define CLK_GOUT_PERIS_TZPC08_PCLK 26
+#define CLK_GOUT_PERIS_TZPC09_PCLK 27
+#define CLK_GOUT_PERIS_TZPC10_PCLK 28
+#define CLK_GOUT_PERIS_TZPC11_PCLK 29
+#define CLK_GOUT_PERIS_TZPC12_PCLK 30
+#define CLK_GOUT_PERIS_TZPC13_PCLK 31
+#define CLK_GOUT_PERIS_TZPC14_PCLK 32
+#define CLK_GOUT_PERIS_TZPC15_PCLK 33
+#define CLK_GOUT_PERIS_WDT_CLUSTER0_PCLK 34
+#define CLK_GOUT_PERIS_WDT_CLUSTER1_PCLK 35
+#define CLK_GOUT_PERIS_XIU_P_PERIS_ACLK 36
+
+/* CMU_FSYS0 */
+#define CLK_MOUT_FSYS0_BUS_USER 1
+#define CLK_MOUT_FSYS0_DPGTC_USER 2
+#define CLK_MOUT_FSYS0_MMC_EMBD_USER 3
+#define CLK_MOUT_FSYS0_UFS_EMBD_USER 4
+#define CLK_MOUT_FSYS0_USBDRD30_USER 5
+#define CLK_GOUT_FSYS0_FSYS0_CMU_FSYS0_PCLK 6
+#define CLK_GOUT_FSYS0_AHBBR_FSYS0_HCLK 7
+#define CLK_GOUT_FSYS0_AXI2AHB_FSYS0_ACLK 8
+#define CLK_GOUT_FSYS0_AXI2AHB_USB_FSYS0_ACLK 9
+#define CLK_GOUT_FSYS0_AXI2APB_FSYS0_ACLK 10
+#define CLK_GOUT_FSYS0_BTM_FSYS0_I_ACLK 11
+#define CLK_GOUT_FSYS0_BTM_FSYS0_I_PCLK 12
+#define CLK_GOUT_FSYS0_DP_LINK_I_GTC_EXT_CLK 13
+#define CLK_GOUT_FSYS0_DP_LINK_I_PCLK 14
+#define CLK_GOUT_FSYS0_ETR_MIU_I_ACLK 15
+#define CLK_GOUT_FSYS0_ETR_MIU_I_PCLK 16
+#define CLK_GOUT_FSYS0_GPIO_FSYS0_PCLK 17
+#define CLK_GOUT_FSYS0_LHM_AXI_D_USBTV_I_CLK 18
+#define CLK_GOUT_FSYS0_LHM_AXI_G_ETR_I_CLK 19
+#define CLK_GOUT_FSYS0_LHM_AXI_P_FSYS0_I_CLK 20
+#define CLK_GOUT_FSYS0_LHS_ACEL_D_FSYS0_I_CLK 21
+#define CLK_GOUT_FSYS0_MMC_EMBD_I_ACLK 22
+#define CLK_GOUT_FSYS0_MMC_EMBD_SDCLKIN 23
+#define CLK_GOUT_FSYS0_PMU_FSYS0_PCLK 24
+#define CLK_GOUT_FSYS0_BCM_FSYS0_ACLK 25
+#define CLK_GOUT_FSYS0_BCM_FSYS0_PCLK 26
+#define CLK_GOUT_FSYS0_RSTNSYNC_CLK_FSYS0_BUS_CLK 27
+#define CLK_GOUT_FSYS0_SYSREG_FSYS0_PCLK 28
+#define CLK_GOUT_FSYS0_UFS_EMBD_I_ACLK 29
+#define CLK_GOUT_FSYS0_UFS_EMBD_I_CLK_UNIPRO 30
+#define CLK_GOUT_FSYS0_UFS_EMBD_I_FMP_CLK 31
+#define CLK_GOUT_FSYS0_USBTV_I_USB30DRD_ACLK 32
+#define CLK_GOUT_FSYS0_USBTV_I_USB30DRD_REF_CLK 33
+#define CLK_GOUT_FSYS0_USBTV_I_USB30DRD_SUSPEND_CLK 34
+#define CLK_GOUT_FSYS0_USBTV_I_USBTVH_AHB_CLK 35
+#define CLK_GOUT_FSYS0_USBTV_I_USBTVH_CORE_CLK 36
+#define CLK_GOUT_FSYS0_USBTV_I_USBTVH_XIU_CLK 37
+#define CLK_GOUT_FSYS0_US_D_FSYS0_USB_ACLK 38
+#define CLK_GOUT_FSYS0_XIU_D_FSYS0_ACLK 39
+#define CLK_GOUT_FSYS0_XIU_D_FSYS0_USB_ACLK 40
+#define CLK_GOUT_FSYS0_XIU_P_FSYS0_ACLK 41
+
+/* CMU_FSYS1 */
+#define CLK_MOUT_FSYS1_BUS_USER 1
+#define CLK_MOUT_FSYS1_MMC_CARD_USER 2
+#define CLK_MOUT_FSYS1_PCIE_USER 3
+#define CLK_MOUT_FSYS1_UFS_CARD_USER 4
+#define CLK_GOUT_FSYS1_PCIE_PHY_REF_CLK_IN 5
+#define CLK_GOUT_FSYS1_ADM_AHB_SSS_HCLKM 6
+#define CLK_GOUT_FSYS1_AHBBR_FSYS1_HCLK 7
+#define CLK_GOUT_FSYS1_AXI2AHB_FSYS1_ACLK 8
+#define CLK_GOUT_FSYS1_AXI2APB_FSYS1P0_ACLK 9
+#define CLK_GOUT_FSYS1_AXI2APB_FSYS1P1_ACLK 10
+#define CLK_GOUT_FSYS1_BTM_FSYS1_I_ACLK 11
+#define CLK_GOUT_FSYS1_BTM_FSYS1_I_PCLK 12
+#define CLK_GOUT_FSYS1_FSYS1_CMU_FSYS1_PCLK 13
+#define CLK_GOUT_FSYS1_GPIO_FSYS1_PCLK 14
+#define CLK_GOUT_FSYS1_LHM_AXI_P_FSYS1_I_CLK 15
+#define CLK_GOUT_FSYS1_LHS_ACEL_D_FSYS1_I_CLK 16
+#define CLK_GOUT_FSYS1_MMC_CARD_I_ACLK 17
+#define CLK_GOUT_FSYS1_MMC_CARD_SDCLKIN 18
+#define CLK_GOUT_FSYS1_PCIE_DBI_ACLK_0 19
+#define CLK_GOUT_FSYS1_PCIE_DBI_ACLK_1 20
+#define CLK_GOUT_FSYS1_PCIE_IEEE1500_WRAPPER_FOR_PCIE_PHY_LC_X2_INST_0_I_SCL_APB_PCLK 21
+#define CLK_GOUT_FSYS1_PCIE_MSTR_ACLK_0 22
+#define CLK_GOUT_FSYS1_PCIE_MSTR_ACLK_1 23
+#define CLK_GOUT_FSYS1_PCIE_PCIE_SUB_CTRL_INST_0_I_DRIVER_APB_CLK 24
+#define CLK_GOUT_FSYS1_PCIE_PCIE_SUB_CTRL_INST_1_I_DRIVER_APB_CLK 25
+#define CLK_GOUT_FSYS1_PCIE_PIPE2_DIGITAL_X2_WRAP_INST_0_I_APB_PCLK_SCL 26
+#define CLK_GOUT_FSYS1_PCIE_SLV_ACLK_0 27
+#define CLK_GOUT_FSYS1_PCIE_SLV_ACLK_1 28
+#define CLK_GOUT_FSYS1_PMU_FSYS1_PCLK 29
+#define CLK_GOUT_FSYS1_BCM_FSYS1_ACLK 30
+#define CLK_GOUT_FSYS1_BCM_FSYS1_PCLK 31
+#define CLK_GOUT_FSYS1_RSTNSYNC_CLK_FSYS1_BUS_CLK 32
+#define CLK_GOUT_FSYS1_RTIC_I_ACLK 33
+#define CLK_GOUT_FSYS1_RTIC_I_PCLK 34
+#define CLK_GOUT_FSYS1_SSS_I_ACLK 35
+#define CLK_GOUT_FSYS1_SSS_I_PCLK 36
+#define CLK_GOUT_FSYS1_SYSREG_FSYS1_PCLK 37
+#define CLK_GOUT_FSYS1_TOE_WIFI0_I_CLK 38
+#define CLK_GOUT_FSYS1_TOE_WIFI1_I_CLK 39
+#define CLK_GOUT_FSYS1_UFS_CARD_I_ACLK 40
+#define CLK_GOUT_FSYS1_UFS_CARD_I_CLK_UNIPRO 41
+#define CLK_GOUT_FSYS1_UFS_CARD_I_FMP_CLK 42
+#define CLK_GOUT_FSYS1_XIU_D_FSYS1_ACLK 43
+#define CLK_GOUT_FSYS1_XIU_P_FSYS1_ACLK 44
+
+/* CMU_PERIC0 */
+#define CLK_MOUT_PERIC0_BUS_USER 1
+#define CLK_MOUT_PERIC0_UART_DBG_USER 2
+#define CLK_MOUT_PERIC0_USI00_USER 3
+#define CLK_MOUT_PERIC0_USI01_USER 4
+#define CLK_MOUT_PERIC0_USI02_USER 5
+#define CLK_MOUT_PERIC0_USI03_USER 6
+#define CLK_GOUT_PERIC0_PERIC0_CMU_PERIC0_PCLK 7
+#define CLK_GOUT_PERIC0_AXI2APB_PERIC0_ACLK 8
+#define CLK_GOUT_PERIC0_GPIO_PERIC0_PCLK 9
+#define CLK_GOUT_PERIC0_LHM_AXI_P_PERIC0_I_CLK 10
+#define CLK_GOUT_PERIC0_PMU_PERIC0_PCLK 11
+#define CLK_GOUT_PERIC0_PWM_I_PCLK_S0 12
+#define CLK_GOUT_PERIC0_RSTNSYNC_CLK_PERIC0_BUSP_CLK 13
+#define CLK_GOUT_PERIC0_SPEEDY2_TSP_CLK 14
+#define CLK_GOUT_PERIC0_SYSREG_PERIC0_PCLK 15
+#define CLK_GOUT_PERIC0_UART_DBG_EXT_UCLK 16
+#define CLK_GOUT_PERIC0_UART_DBG_PCLK 17
+#define CLK_GOUT_PERIC0_USI00_I_PCLK 18
+#define CLK_GOUT_PERIC0_USI00_I_SCLK_USI 19
+#define CLK_GOUT_PERIC0_USI01_I_PCLK 20
+#define CLK_GOUT_PERIC0_USI01_I_SCLK_USI 21
+#define CLK_GOUT_PERIC0_USI02_I_PCLK 22
+#define CLK_GOUT_PERIC0_USI02_I_SCLK_USI 23
+#define CLK_GOUT_PERIC0_USI03_I_PCLK 24
+#define CLK_GOUT_PERIC0_USI03_I_SCLK_USI 25
+
+/* CMU_PERIC1 */
+#define CLK_MOUT_PERIC1_BUS_USER 1
+#define CLK_MOUT_PERIC1_SPEEDY2_USER 2
+#define CLK_MOUT_PERIC1_SPI_CAM0_USER 3
+#define CLK_MOUT_PERIC1_SPI_CAM1_USER 4
+#define CLK_MOUT_PERIC1_UART_BT_USER 5
+#define CLK_MOUT_PERIC1_USI04_USER 6
+#define CLK_MOUT_PERIC1_USI05_USER 7
+#define CLK_MOUT_PERIC1_USI06_USER 8
+#define CLK_MOUT_PERIC1_USI07_USER 9
+#define CLK_MOUT_PERIC1_USI08_USER 10
+#define CLK_MOUT_PERIC1_USI09_USER 11
+#define CLK_MOUT_PERIC1_USI10_USER 12
+#define CLK_MOUT_PERIC1_USI11_USER 13
+#define CLK_MOUT_PERIC1_USI12_USER 14
+#define CLK_MOUT_PERIC1_USI13_USER 15
+#define CLK_GOUT_PERIC1_PERIC1_CMU_PERIC1_PCLK 16
+#define CLK_GOUT_PERIC1_RSTNSYNC_CLK_PERIC1_SPEEDY2_CLK 17
+#define CLK_GOUT_PERIC1_AXI2APB_PERIC1P0_ACLK 18
+#define CLK_GOUT_PERIC1_AXI2APB_PERIC1P1_ACLK 19
+#define CLK_GOUT_PERIC1_AXI2APB_PERIC1P2_ACLK 20
+#define CLK_GOUT_PERIC1_GPIO_PERIC1_PCLK 21
+#define CLK_GOUT_PERIC1_HSI2C_CAM0_IPCLK 22
+#define CLK_GOUT_PERIC1_HSI2C_CAM1_IPCLK 23
+#define CLK_GOUT_PERIC1_HSI2C_CAM2_IPCLK 24
+#define CLK_GOUT_PERIC1_HSI2C_CAM3_IPCLK 25
+#define CLK_GOUT_PERIC1_LHM_AXI_P_PERIC1_I_CLK 26
+#define CLK_GOUT_PERIC1_PMU_PERIC1_PCLK 27
+#define CLK_GOUT_PERIC1_RSTNSYNC_CLK_PERIC1_BUSP_CLK 28
+#define CLK_GOUT_PERIC1_SPEEDY2_DDI1_CLK 29
+#define CLK_GOUT_PERIC1_SPEEDY2_DDI1_SCLK 30
+#define CLK_GOUT_PERIC1_SPEEDY2_DDI2_CLK 31
+#define CLK_GOUT_PERIC1_SPEEDY2_DDI2_SCLK 32
+#define CLK_GOUT_PERIC1_SPEEDY2_DDI_CLK 33
+#define CLK_GOUT_PERIC1_SPEEDY2_DDI_SCLK 34
+#define CLK_GOUT_PERIC1_SPEEDY2_TSP1_CLK 35
+#define CLK_GOUT_PERIC1_SPEEDY2_TSP2_CLK 36
+#define CLK_GOUT_PERIC1_SPI_CAM0_PCLK 37
+#define CLK_GOUT_PERIC1_SPI_CAM0_SPI_EXT_CLK 38
+#define CLK_GOUT_PERIC1_SPI_CAM1_PCLK 39
+#define CLK_GOUT_PERIC1_SPI_CAM1_SPI_EXT_CLK 40
+#define CLK_GOUT_PERIC1_SYSREG_PERIC1_PCLK 41
+#define CLK_GOUT_PERIC1_UART_BT_EXT_UCLK 42
+#define CLK_GOUT_PERIC1_UART_BT_PCLK 43
+#define CLK_GOUT_PERIC1_USI04_I_PCLK 44
+#define CLK_GOUT_PERIC1_USI04_I_SCLK_USI 45
+#define CLK_GOUT_PERIC1_USI05_I_PCLK 46
+#define CLK_GOUT_PERIC1_USI05_I_SCLK_USI 47
+#define CLK_GOUT_PERIC1_USI06_I_PCLK 48
+#define CLK_GOUT_PERIC1_USI06_I_SCLK_USI 49
+#define CLK_GOUT_PERIC1_USI07_I_PCLK 50
+#define CLK_GOUT_PERIC1_USI07_I_SCLK_USI 51
+#define CLK_GOUT_PERIC1_USI08_I_PCLK 52
+#define CLK_GOUT_PERIC1_USI08_I_SCLK_USI 53
+#define CLK_GOUT_PERIC1_USI09_I_PCLK 54
+#define CLK_GOUT_PERIC1_USI09_I_SCLK_USI 55
+#define CLK_GOUT_PERIC1_USI10_I_PCLK 56
+#define CLK_GOUT_PERIC1_USI10_I_SCLK_USI 57
+#define CLK_GOUT_PERIC1_USI11_I_PCLK 58
+#define CLK_GOUT_PERIC1_USI11_I_SCLK_USI 59
+#define CLK_GOUT_PERIC1_USI12_I_PCLK 60
+#define CLK_GOUT_PERIC1_USI12_I_SCLK_USI 61
+#define CLK_GOUT_PERIC1_USI13_I_PCLK 62
+#define CLK_GOUT_PERIC1_USI13_I_SCLK_USI 63
+#define CLK_GOUT_PERIC1_XIU_P_PERIC1_ACLK 64
+
+#endif /* _DT_BINDINGS_CLOCK_EXYNOS8895_H */
diff --git a/include/dt-bindings/clock/samsung,exynos990.h b/include/dt-bindings/clock/samsung,exynos990.h
new file mode 100644
index 000000000000..47540307cb52
--- /dev/null
+++ b/include/dt-bindings/clock/samsung,exynos990.h
@@ -0,0 +1,438 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2024 Igor Belwon <igor.belwon@mentallysanemainliners.org>
+ *
+ * Device Tree binding constants for Exynos990 clock controller.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_EXYNOS_990_H
+#define _DT_BINDINGS_CLOCK_EXYNOS_990_H
+
+/* CMU_TOP */
+#define CLK_FOUT_SHARED0_PLL 1
+#define CLK_FOUT_SHARED1_PLL 2
+#define CLK_FOUT_SHARED2_PLL 3
+#define CLK_FOUT_SHARED3_PLL 4
+#define CLK_FOUT_SHARED4_PLL 5
+#define CLK_FOUT_G3D_PLL 6
+#define CLK_FOUT_MMC_PLL 7
+#define CLK_MOUT_PLL_SHARED0 8
+#define CLK_MOUT_PLL_SHARED1 9
+#define CLK_MOUT_PLL_SHARED2 10
+#define CLK_MOUT_PLL_SHARED3 11
+#define CLK_MOUT_PLL_SHARED4 12
+#define CLK_MOUT_PLL_MMC 13
+#define CLK_MOUT_PLL_G3D 14
+#define CLK_MOUT_CMU_APM_BUS 15
+#define CLK_MOUT_CMU_AUD_CPU 16
+#define CLK_MOUT_CMU_BUS0_BUS 17
+#define CLK_MOUT_CMU_BUS1_BUS 18
+#define CLK_MOUT_CMU_BUS1_SSS 19
+#define CLK_MOUT_CMU_CIS_CLK0 20
+#define CLK_MOUT_CMU_CIS_CLK1 21
+#define CLK_MOUT_CMU_CIS_CLK2 22
+#define CLK_MOUT_CMU_CIS_CLK3 23
+#define CLK_MOUT_CMU_CIS_CLK4 24
+#define CLK_MOUT_CMU_CIS_CLK5 25
+#define CLK_MOUT_CMU_CMU_BOOST 26
+#define CLK_MOUT_CMU_CORE_BUS 27
+#define CLK_MOUT_CMU_CPUCL0_DBG_BUS 28
+#define CLK_MOUT_CMU_CPUCL0_SWITCH 29
+#define CLK_MOUT_CMU_CPUCL1_SWITCH 30
+#define CLK_MOUT_CMU_CPUCL2_BUSP 31
+#define CLK_MOUT_CMU_CPUCL2_SWITCH 32
+#define CLK_MOUT_CMU_CSIS_BUS 33
+#define CLK_MOUT_CMU_CSIS_OIS_MCU 34
+#define CLK_MOUT_CMU_DNC_BUS 35
+#define CLK_MOUT_CMU_DNC_BUSM 36
+#define CLK_MOUT_CMU_DNS_BUS 37
+#define CLK_MOUT_CMU_DPU 38
+#define CLK_MOUT_CMU_DPU_ALT 39
+#define CLK_MOUT_CMU_DSP_BUS 40
+#define CLK_MOUT_CMU_G2D_G2D 41
+#define CLK_MOUT_CMU_G2D_MSCL 42
+#define CLK_MOUT_CMU_HPM 43
+#define CLK_MOUT_CMU_HSI0_BUS 44
+#define CLK_MOUT_CMU_HSI0_DPGTC 45
+#define CLK_MOUT_CMU_HSI0_USB31DRD 46
+#define CLK_MOUT_CMU_HSI0_USBDP_DEBUG 47
+#define CLK_MOUT_CMU_HSI1_BUS 48
+#define CLK_MOUT_CMU_HSI1_MMC_CARD 49
+#define CLK_MOUT_CMU_HSI1_PCIE 50
+#define CLK_MOUT_CMU_HSI1_UFS_CARD 51
+#define CLK_MOUT_CMU_HSI1_UFS_EMBD 52
+#define CLK_MOUT_CMU_HSI2_BUS 53
+#define CLK_MOUT_CMU_HSI2_PCIE 54
+#define CLK_MOUT_CMU_IPP_BUS 55
+#define CLK_MOUT_CMU_ITP_BUS 56
+#define CLK_MOUT_CMU_MCSC_BUS 57
+#define CLK_MOUT_CMU_MCSC_GDC 58
+#define CLK_MOUT_CMU_CMU_BOOST_CPU 59
+#define CLK_MOUT_CMU_MFC0_MFC0 60
+#define CLK_MOUT_CMU_MFC0_WFD 61
+#define CLK_MOUT_CMU_MIF_BUSP 62
+#define CLK_MOUT_CMU_MIF_SWITCH 63
+#define CLK_MOUT_CMU_NPU_BUS 64
+#define CLK_MOUT_CMU_PERIC0_BUS 65
+#define CLK_MOUT_CMU_PERIC0_IP 66
+#define CLK_MOUT_CMU_PERIC1_BUS 67
+#define CLK_MOUT_CMU_PERIC1_IP 68
+#define CLK_MOUT_CMU_PERIS_BUS 69
+#define CLK_MOUT_CMU_SSP_BUS 70
+#define CLK_MOUT_CMU_TNR_BUS 71
+#define CLK_MOUT_CMU_VRA_BUS 72
+#define CLK_DOUT_CMU_APM_BUS 73
+#define CLK_DOUT_CMU_AUD_CPU 74
+#define CLK_DOUT_CMU_BUS0_BUS 75
+#define CLK_DOUT_CMU_BUS1_BUS 76
+#define CLK_DOUT_CMU_BUS1_SSS 77
+#define CLK_DOUT_CMU_CIS_CLK0 78
+#define CLK_DOUT_CMU_CIS_CLK1 79
+#define CLK_DOUT_CMU_CIS_CLK2 80
+#define CLK_DOUT_CMU_CIS_CLK3 81
+#define CLK_DOUT_CMU_CIS_CLK4 82
+#define CLK_DOUT_CMU_CIS_CLK5 83
+#define CLK_DOUT_CMU_CMU_BOOST 84
+#define CLK_DOUT_CMU_CORE_BUS 85
+#define CLK_DOUT_CMU_CPUCL0_DBG_BUS 86
+#define CLK_DOUT_CMU_CPUCL0_SWITCH 87
+#define CLK_DOUT_CMU_CPUCL1_SWITCH 88
+#define CLK_DOUT_CMU_CPUCL2_BUSP 89
+#define CLK_DOUT_CMU_CPUCL2_SWITCH 90
+#define CLK_DOUT_CMU_CSIS_BUS 91
+#define CLK_DOUT_CMU_CSIS_OIS_MCU 92
+#define CLK_DOUT_CMU_DNC_BUS 93
+#define CLK_DOUT_CMU_DNC_BUSM 94
+#define CLK_DOUT_CMU_DNS_BUS 95
+#define CLK_DOUT_CMU_DSP_BUS 96
+#define CLK_DOUT_CMU_G2D_G2D 97
+#define CLK_DOUT_CMU_G2D_MSCL 98
+#define CLK_DOUT_CMU_G3D_SWITCH 99
+#define CLK_DOUT_CMU_HPM 100
+#define CLK_DOUT_CMU_HSI0_BUS 101
+#define CLK_DOUT_CMU_HSI0_DPGTC 102
+#define CLK_DOUT_CMU_HSI0_USB31DRD 103
+#define CLK_DOUT_CMU_HSI0_USBDP_DEBUG 104
+#define CLK_DOUT_CMU_HSI1_BUS 105
+#define CLK_DOUT_CMU_HSI1_MMC_CARD 106
+#define CLK_DOUT_CMU_HSI1_PCIE 107
+#define CLK_DOUT_CMU_HSI1_UFS_CARD 108
+#define CLK_DOUT_CMU_HSI1_UFS_EMBD 109
+#define CLK_DOUT_CMU_HSI2_BUS 110
+#define CLK_DOUT_CMU_HSI2_PCIE 111
+#define CLK_DOUT_CMU_IPP_BUS 112
+#define CLK_DOUT_CMU_ITP_BUS 113
+#define CLK_DOUT_CMU_MCSC_BUS 114
+#define CLK_DOUT_CMU_MCSC_GDC 115
+#define CLK_DOUT_CMU_CMU_BOOST_CPU 116
+#define CLK_DOUT_CMU_MFC0_MFC0 117
+#define CLK_DOUT_CMU_MFC0_WFD 118
+#define CLK_DOUT_CMU_MIF_BUSP 119
+#define CLK_DOUT_CMU_NPU_BUS 120
+#define CLK_DOUT_CMU_OTP 121
+#define CLK_DOUT_CMU_PERIC0_BUS 122
+#define CLK_DOUT_CMU_PERIC0_IP 123
+#define CLK_DOUT_CMU_PERIC1_BUS 124
+#define CLK_DOUT_CMU_PERIC1_IP 125
+#define CLK_DOUT_CMU_PERIS_BUS 126
+#define CLK_DOUT_CMU_SSP_BUS 127
+#define CLK_DOUT_CMU_TNR_BUS 128
+#define CLK_DOUT_CMU_VRA_BUS 129
+#define CLK_DOUT_CMU_DPU 130
+#define CLK_DOUT_CMU_DPU_ALT 131
+#define CLK_DOUT_CMU_SHARED0_DIV2 132
+#define CLK_DOUT_CMU_SHARED0_DIV3 133
+#define CLK_DOUT_CMU_SHARED0_DIV4 134
+#define CLK_DOUT_CMU_SHARED1_DIV2 135
+#define CLK_DOUT_CMU_SHARED1_DIV3 136
+#define CLK_DOUT_CMU_SHARED1_DIV4 137
+#define CLK_DOUT_CMU_SHARED2_DIV2 138
+#define CLK_DOUT_CMU_SHARED4_DIV2 139
+#define CLK_DOUT_CMU_SHARED4_DIV3 140
+#define CLK_DOUT_CMU_SHARED4_DIV4 141
+#define CLK_GOUT_CMU_G3D_BUS 142
+#define CLK_GOUT_CMU_MIF_SWITCH 143
+#define CLK_GOUT_CMU_APM_BUS 144
+#define CLK_GOUT_CMU_AUD_CPU 145
+#define CLK_GOUT_CMU_BUS0_BUS 146
+#define CLK_GOUT_CMU_BUS1_BUS 147
+#define CLK_GOUT_CMU_BUS1_SSS 148
+#define CLK_GOUT_CMU_CIS_CLK0 149
+#define CLK_GOUT_CMU_CIS_CLK1 150
+#define CLK_GOUT_CMU_CIS_CLK2 151
+#define CLK_GOUT_CMU_CIS_CLK3 152
+#define CLK_GOUT_CMU_CIS_CLK4 153
+#define CLK_GOUT_CMU_CIS_CLK5 154
+#define CLK_GOUT_CMU_CORE_BUS 155
+#define CLK_GOUT_CMU_CPUCL0_DBG_BUS 156
+#define CLK_GOUT_CMU_CPUCL0_SWITCH 157
+#define CLK_GOUT_CMU_CPUCL1_SWITCH 158
+#define CLK_GOUT_CMU_CPUCL2_BUSP 159
+#define CLK_GOUT_CMU_CPUCL2_SWITCH 160
+#define CLK_GOUT_CMU_CSIS_BUS 161
+#define CLK_GOUT_CMU_CSIS_OIS_MCU 162
+#define CLK_GOUT_CMU_DNC_BUS 163
+#define CLK_GOUT_CMU_DNC_BUSM 164
+#define CLK_GOUT_CMU_DNS_BUS 165
+#define CLK_GOUT_CMU_DPU 166
+#define CLK_GOUT_CMU_DPU_BUS 167
+#define CLK_GOUT_CMU_DSP_BUS 168
+#define CLK_GOUT_CMU_G2D_G2D 169
+#define CLK_GOUT_CMU_G2D_MSCL 170
+#define CLK_GOUT_CMU_G3D_SWITCH 171
+#define CLK_GOUT_CMU_HPM 172
+#define CLK_GOUT_CMU_HSI0_BUS 173
+#define CLK_GOUT_CMU_HSI0_DPGTC 174
+#define CLK_GOUT_CMU_HSI0_USB31DRD 175
+#define CLK_GOUT_CMU_HSI0_USBDP_DEBUG 176
+#define CLK_GOUT_CMU_HSI1_BUS 177
+#define CLK_GOUT_CMU_HSI1_MMC_CARD 178
+#define CLK_GOUT_CMU_HSI1_PCIE 179
+#define CLK_GOUT_CMU_HSI1_UFS_CARD 180
+#define CLK_GOUT_CMU_HSI1_UFS_EMBD 181
+#define CLK_GOUT_CMU_HSI2_BUS 182
+#define CLK_GOUT_CMU_HSI2_PCIE 183
+#define CLK_GOUT_CMU_IPP_BUS 184
+#define CLK_GOUT_CMU_ITP_BUS 185
+#define CLK_GOUT_CMU_MCSC_BUS 186
+#define CLK_GOUT_CMU_MCSC_GDC 187
+#define CLK_GOUT_CMU_MFC0_MFC0 188
+#define CLK_GOUT_CMU_MFC0_WFD 189
+#define CLK_GOUT_CMU_MIF_BUSP 190
+#define CLK_GOUT_CMU_NPU_BUS 191
+#define CLK_GOUT_CMU_PERIC0_BUS 192
+#define CLK_GOUT_CMU_PERIC0_IP 193
+#define CLK_GOUT_CMU_PERIC1_BUS 194
+#define CLK_GOUT_CMU_PERIC1_IP 195
+#define CLK_GOUT_CMU_PERIS_BUS 196
+#define CLK_GOUT_CMU_SSP_BUS 197
+#define CLK_GOUT_CMU_TNR_BUS 198
+#define CLK_GOUT_CMU_VRA_BUS 199
+#define CLK_MOUT_CMU_CMUREF 200
+#define CLK_MOUT_CMU_DPU_BUS 201
+#define CLK_MOUT_CMU_CLK_CMUREF 202
+#define CLK_DOUT_CMU_CLK_CMUREF 203
+
+/* CMU_HSI0 */
+#define CLK_MOUT_HSI0_BUS_USER 1
+#define CLK_MOUT_HSI0_USB31DRD_USER 2
+#define CLK_MOUT_HSI0_USBDP_DEBUG_USER 3
+#define CLK_MOUT_HSI0_DPGTC_USER 4
+#define CLK_GOUT_HSI0_DP_LINK_DP_GTC_CLK 5
+#define CLK_GOUT_HSI0_DP_LINK_PCLK 6
+#define CLK_GOUT_HSI0_D_TZPC_HSI0_PCLK 7
+#define CLK_GOUT_HSI0_LHM_AXI_P_HSI0_CLK 8
+#define CLK_GOUT_HSI0_PPMU_HSI0_BUS1_ACLK 9
+#define CLK_GOUT_HSI0_PPMU_HSI0_BUS1_PCLK 10
+#define CLK_GOUT_HSI0_CLK_HSI0_BUS_CLK 11
+#define CLK_GOUT_HSI0_SYSMMU_USB_CLK_S2 12
+#define CLK_GOUT_HSI0_SYSREG_HSI0_PCLK 13
+#define CLK_GOUT_HSI0_USB31DRD_ACLK_PHYCTRL 14
+#define CLK_GOUT_HSI0_USB31DRD_BUS_CLK_EARLY 15
+#define CLK_GOUT_HSI0_USB31DRD_USB31DRD_REF_CLK_40 16
+#define CLK_GOUT_HSI0_USB31DRD_USBDPPHY_REF_SOC_PLL 17
+#define CLK_GOUT_HSI0_USB31DRD_USBDPPHY_SCL_APB 18
+#define CLK_GOUT_HSI0_USB31DRD_USBPCS_APB_CLK 19
+#define CLK_GOUT_HSI0_VGEN_LITE_HSI0_CLK 20
+#define CLK_GOUT_HSI0_CMU_HSI0_PCLK 21
+#define CLK_GOUT_HSI0_XIU_D_HSI0_ACLK 22
+#define CLK_GOUT_HSI0_LHS_ACEL_D_HSI0_CLK 23
+
+/* CMU_PERIC0 */
+#define CLK_MOUT_PERIC0_BUS_USER 1
+#define CLK_MOUT_PERIC0_UART_DBG 2
+#define CLK_MOUT_PERIC0_USI00_USI_USER 3
+#define CLK_MOUT_PERIC0_USI01_USI_USER 4
+#define CLK_MOUT_PERIC0_USI02_USI_USER 5
+#define CLK_MOUT_PERIC0_USI03_USI_USER 6
+#define CLK_MOUT_PERIC0_USI04_USI_USER 7
+#define CLK_MOUT_PERIC0_USI05_USI_USER 8
+#define CLK_MOUT_PERIC0_USI13_USI_USER 9
+#define CLK_MOUT_PERIC0_USI14_USI_USER 10
+#define CLK_MOUT_PERIC0_USI15_USI_USER 11
+#define CLK_MOUT_PERIC0_USI_I2C_USER 12
+#define CLK_DOUT_PERIC0_UART_DBG 13
+#define CLK_DOUT_PERIC0_USI00_USI 14
+#define CLK_DOUT_PERIC0_USI01_USI 15
+#define CLK_DOUT_PERIC0_USI02_USI 16
+#define CLK_DOUT_PERIC0_USI03_USI 17
+#define CLK_DOUT_PERIC0_USI04_USI 18
+#define CLK_DOUT_PERIC0_USI05_USI 19
+#define CLK_DOUT_PERIC0_USI13_USI 20
+#define CLK_DOUT_PERIC0_USI14_USI 21
+#define CLK_DOUT_PERIC0_USI15_USI 22
+#define CLK_DOUT_PERIC0_USI_I2C 23
+#define CLK_GOUT_PERIC0_CMU_PCLK 24
+#define CLK_GOUT_PERIC0_OSCCLK_CLK 25
+#define CLK_GOUT_PERIC0_D_TZPC_PCLK 26
+#define CLK_GOUT_PERIC0_GPIO_PCLK 27
+#define CLK_GOUT_PERIC0_LHM_AXI_P_CLK 28
+#define CLK_GOUT_PERIC0_TOP0_IPCLK_10 29
+#define CLK_GOUT_PERIC0_TOP0_IPCLK_11 30
+#define CLK_GOUT_PERIC0_TOP0_IPCLK_12 31
+#define CLK_GOUT_PERIC0_TOP0_IPCLK_13 32
+#define CLK_GOUT_PERIC0_TOP0_IPCLK_14 33
+#define CLK_GOUT_PERIC0_TOP0_IPCLK_15 34
+#define CLK_GOUT_PERIC0_TOP0_IPCLK_4 35
+#define CLK_GOUT_PERIC0_TOP0_IPCLK_5 36
+#define CLK_GOUT_PERIC0_TOP0_IPCLK_6 37
+#define CLK_GOUT_PERIC0_TOP0_IPCLK_7 38
+#define CLK_GOUT_PERIC0_TOP0_IPCLK_8 39
+#define CLK_GOUT_PERIC0_TOP0_IPCLK_9 40
+#define CLK_GOUT_PERIC0_TOP0_PCLK_10 41
+#define CLK_GOUT_PERIC0_TOP0_PCLK_11 42
+#define CLK_GOUT_PERIC0_TOP0_PCLK_12 43
+#define CLK_GOUT_PERIC0_TOP0_PCLK_13 44
+#define CLK_GOUT_PERIC0_TOP0_PCLK_14 45
+#define CLK_GOUT_PERIC0_TOP0_PCLK_15 46
+#define CLK_GOUT_PERIC0_TOP0_PCLK_4 47
+#define CLK_GOUT_PERIC0_TOP0_PCLK_5 48
+#define CLK_GOUT_PERIC0_TOP0_PCLK_6 49
+#define CLK_GOUT_PERIC0_TOP0_PCLK_7 50
+#define CLK_GOUT_PERIC0_TOP0_PCLK_8 51
+#define CLK_GOUT_PERIC0_TOP0_PCLK_9 52
+#define CLK_GOUT_PERIC0_TOP1_IPCLK_0 53
+#define CLK_GOUT_PERIC0_TOP1_IPCLK_3 54
+#define CLK_GOUT_PERIC0_TOP1_IPCLK_4 55
+#define CLK_GOUT_PERIC0_TOP1_IPCLK_5 56
+#define CLK_GOUT_PERIC0_TOP1_IPCLK_6 57
+#define CLK_GOUT_PERIC0_TOP1_IPCLK_7 58
+#define CLK_GOUT_PERIC0_TOP1_IPCLK_8 59
+#define CLK_GOUT_PERIC0_TOP1_PCLK_0 60
+#define CLK_GOUT_PERIC0_TOP1_PCLK_15 61
+#define CLK_GOUT_PERIC0_TOP1_PCLK_3 62
+#define CLK_GOUT_PERIC0_TOP1_PCLK_4 63
+#define CLK_GOUT_PERIC0_TOP1_PCLK_5 64
+#define CLK_GOUT_PERIC0_TOP1_PCLK_6 65
+#define CLK_GOUT_PERIC0_TOP1_PCLK_7 66
+#define CLK_GOUT_PERIC0_TOP1_PCLK_8 67
+#define CLK_GOUT_PERIC0_BUSP_CLK 68
+#define CLK_GOUT_PERIC0_UART_DBG_CLK 69
+#define CLK_GOUT_PERIC0_USI00_USI_CLK 70
+#define CLK_GOUT_PERIC0_USI01_USI_CLK 71
+#define CLK_GOUT_PERIC0_USI02_USI_CLK 72
+#define CLK_GOUT_PERIC0_USI03_USI_CLK 73
+#define CLK_GOUT_PERIC0_USI04_USI_CLK 74
+#define CLK_GOUT_PERIC0_USI05_USI_CLK 75
+#define CLK_GOUT_PERIC0_USI13_USI_CLK 76
+#define CLK_GOUT_PERIC0_USI14_USI_CLK 77
+#define CLK_GOUT_PERIC0_USI15_USI_CLK 78
+#define CLK_GOUT_PERIC0_USI_I2C_CLK 79
+#define CLK_GOUT_PERIC0_SYSREG_PCLK 80
+
+/* CMU_PERIC1 */
+#define CLK_MOUT_PERIC1_BUS_USER 1
+#define CLK_MOUT_PERIC1_UART_BT_USER 2
+#define CLK_MOUT_PERIC1_USI06_USI_USER 3
+#define CLK_MOUT_PERIC1_USI07_USI_USER 4
+#define CLK_MOUT_PERIC1_USI08_USI_USER 5
+#define CLK_MOUT_PERIC1_USI09_USI_USER 6
+#define CLK_MOUT_PERIC1_USI10_USI_USER 7
+#define CLK_MOUT_PERIC1_USI11_USI_USER 8
+#define CLK_MOUT_PERIC1_USI12_USI_USER 9
+#define CLK_MOUT_PERIC1_USI18_USI_USER 10
+#define CLK_MOUT_PERIC1_USI16_USI_USER 11
+#define CLK_MOUT_PERIC1_USI17_USI_USER 12
+#define CLK_MOUT_PERIC1_USI_I2C_USER 13
+#define CLK_DOUT_PERIC1_UART_BT 14
+#define CLK_DOUT_PERIC1_USI06_USI 15
+#define CLK_DOUT_PERIC1_USI07_USI 16
+#define CLK_DOUT_PERIC1_USI08_USI 17
+#define CLK_DOUT_PERIC1_USI18_USI 18
+#define CLK_DOUT_PERIC1_USI12_USI 19
+#define CLK_DOUT_PERIC1_USI09_USI 20
+#define CLK_DOUT_PERIC1_USI10_USI 21
+#define CLK_DOUT_PERIC1_USI11_USI 22
+#define CLK_DOUT_PERIC1_USI16_USI 23
+#define CLK_DOUT_PERIC1_USI17_USI 24
+#define CLK_DOUT_PERIC1_USI_I2C 25
+#define CLK_GOUT_PERIC1_CMU_PCLK 26
+#define CLK_GOUT_PERIC1_UART_BT_CLK 27
+#define CLK_GOUT_PERIC1_USI12_USI_CLK 28
+#define CLK_GOUT_PERIC1_USI18_USI_CLK 29
+#define CLK_GOUT_PERIC1_D_TZPC_PCLK 30
+#define CLK_GOUT_PERIC1_GPIO_PCLK 31
+#define CLK_GOUT_PERIC1_LHM_AXI_P_CSIS_CLK 32
+#define CLK_GOUT_PERIC1_LHM_AXI_P_CLK 33
+#define CLK_GOUT_PERIC1_TOP0_IPCLK_10 34
+#define CLK_GOUT_PERIC1_TOP0_IPCLK_11 35
+#define CLK_GOUT_PERIC1_TOP0_IPCLK_12 36
+#define CLK_GOUT_PERIC1_TOP0_IPCLK_13 37
+#define CLK_GOUT_PERIC1_TOP0_IPCLK_14 38
+#define CLK_GOUT_PERIC1_TOP0_IPCLK_15 39
+#define CLK_GOUT_PERIC1_TOP0_IPCLK_4 40
+#define CLK_GOUT_PERIC1_TOP0_PCLK_10 41
+#define CLK_GOUT_PERIC1_TOP0_PCLK_11 42
+#define CLK_GOUT_PERIC1_TOP0_PCLK_12 43
+#define CLK_GOUT_PERIC1_TOP0_PCLK_13 44
+#define CLK_GOUT_PERIC1_TOP0_PCLK_14 45
+#define CLK_GOUT_PERIC1_TOP0_PCLK_15 46
+#define CLK_GOUT_PERIC1_TOP0_PCLK_4 47
+#define CLK_GOUT_PERIC1_TOP1_IPCLK_0 48
+#define CLK_GOUT_PERIC1_TOP1_IPCLK_1 49
+#define CLK_GOUT_PERIC1_TOP1_IPCLK_10 50
+#define CLK_GOUT_PERIC1_TOP1_IPCLK_12 51
+#define CLK_GOUT_PERIC1_TOP1_IPCLK_13 52
+#define CLK_GOUT_PERIC1_TOP1_IPCLK_14 53
+#define CLK_GOUT_PERIC1_TOP1_IPCLK_15 54
+#define CLK_GOUT_PERIC1_TOP1_IPCLK_2 55
+#define CLK_GOUT_PERIC1_TOP1_IPCLK_3 56
+#define CLK_GOUT_PERIC1_TOP1_IPCLK_4 57
+#define CLK_GOUT_PERIC1_TOP1_IPCLK_5 58
+#define CLK_GOUT_PERIC1_TOP1_IPCLK_6 59
+#define CLK_GOUT_PERIC1_TOP1_IPCLK_7 60
+#define CLK_GOUT_PERIC1_TOP1_IPCLK_9 61
+#define CLK_GOUT_PERIC1_TOP1_PCLK_0 62
+#define CLK_GOUT_PERIC1_TOP1_PCLK_1 63
+#define CLK_GOUT_PERIC1_TOP1_PCLK_10 64
+#define CLK_GOUT_PERIC1_TOP1_PCLK_12 65
+#define CLK_GOUT_PERIC1_TOP1_PCLK_13 66
+#define CLK_GOUT_PERIC1_TOP1_PCLK_14 67
+#define CLK_GOUT_PERIC1_TOP1_PCLK_15 68
+#define CLK_GOUT_PERIC1_TOP1_PCLK_2 69
+#define CLK_GOUT_PERIC1_TOP1_PCLK_3 70
+#define CLK_GOUT_PERIC1_TOP1_PCLK_4 71
+#define CLK_GOUT_PERIC1_TOP1_PCLK_5 72
+#define CLK_GOUT_PERIC1_TOP1_PCLK_6 73
+#define CLK_GOUT_PERIC1_TOP1_PCLK_7 74
+#define CLK_GOUT_PERIC1_TOP1_PCLK_9 75
+#define CLK_GOUT_PERIC1_BUSP_CLK 76
+#define CLK_GOUT_PERIC1_OSCCLK_CLK 77
+#define CLK_GOUT_PERIC1_USI06_USI_CLK 78
+#define CLK_GOUT_PERIC1_USI07_USI_CLK 79
+#define CLK_GOUT_PERIC1_USI08_USI_CLK 80
+#define CLK_GOUT_PERIC1_USI09_USI_CLK 81
+#define CLK_GOUT_PERIC1_USI10_USI_CLK 82
+#define CLK_GOUT_PERIC1_USI11_USI_CLK 83
+#define CLK_GOUT_PERIC1_USI16_USI_CLK 84
+#define CLK_GOUT_PERIC1_USI17_USI_CLK 85
+#define CLK_GOUT_PERIC1_USI_I2C_CLK 86
+#define CLK_GOUT_PERIC1_SYSREG_PCLK 87
+#define CLK_GOUT_PERIC1_USI16_I3C_PCLK 88
+#define CLK_GOUT_PERIC1_USI16_I3C_SCLK 89
+#define CLK_GOUT_PERIC1_USI17_I3C_PCLK 90
+#define CLK_GOUT_PERIC1_USI17_I3C_SCLK 91
+#define CLK_GOUT_PERIC1_XIU_P_ACLK 92
+
+/* CMU_PERIS */
+#define CLK_MOUT_PERIS_BUS_USER 1
+#define CLK_MOUT_PERIS_CLK_PERIS_GIC 2
+#define CLK_GOUT_PERIS_SYSREG_PERIS_PCLK 3
+#define CLK_GOUT_PERIS_WDT_CLUSTER2_PCLK 4
+#define CLK_GOUT_PERIS_WDT_CLUSTER0_PCLK 5
+#define CLK_CLK_PERIS_PERIS_CMU_PERIS_PCLK 6
+#define CLK_GOUT_PERIS_CLK_PERIS_BUSP_CLK 7
+#define CLK_GOUT_PERIS_CLK_PERIS_OSCCLK_CLK 8
+#define CLK_GOUT_PERIS_CLK_PERIS_GIC_CLK 9
+#define CLK_GOUT_PERIS_AD_AXI_P_PERIS_ACLKM 10
+#define CLK_GOUT_PERIS_OTP_CON_BIRA_PCLK 11
+#define CLK_GOUT_PERIS_GIC_CLK 12
+#define CLK_GOUT_PERIS_LHM_AXI_P_PERIS_CLK 13
+#define CLK_GOUT_PERIS_MCT_PCLK 14
+#define CLK_GOUT_PERIS_OTP_CON_TOP_PCLK 15
+#define CLK_GOUT_PERIS_D_TZPC_PERIS_PCLK 16
+#define CLK_GOUT_PERIS_TMU_TOP_PCLK 17
+#define CLK_GOUT_PERIS_OTP_CON_BIRA_OSCCLK 18
+#define CLK_GOUT_PERIS_OTP_CON_TOP_OSCCLK 19
+
+#endif
diff --git a/include/dt-bindings/clock/samsung,exynosautov9.h b/include/dt-bindings/clock/samsung,exynosautov9.h
new file mode 100644
index 000000000000..ce8fb8f7d718
--- /dev/null
+++ b/include/dt-bindings/clock/samsung,exynosautov9.h
@@ -0,0 +1,360 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022 Samsung Electronics Co., Ltd.
+ * Author: Chanho Park <chanho61.park@samsung.com>
+ *
+ * Device Tree binding constants for Exynos Auto V9 clock controller.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_EXYNOSAUTOV9_H
+#define _DT_BINDINGS_CLOCK_EXYNOSAUTOV9_H
+
+/* CMU_TOP */
+#define FOUT_SHARED0_PLL 1
+#define FOUT_SHARED1_PLL 2
+#define FOUT_SHARED2_PLL 3
+#define FOUT_SHARED3_PLL 4
+#define FOUT_SHARED4_PLL 5
+
+/* MUX in CMU_TOP */
+#define MOUT_SHARED0_PLL 6
+#define MOUT_SHARED1_PLL 7
+#define MOUT_SHARED2_PLL 8
+#define MOUT_SHARED3_PLL 9
+#define MOUT_SHARED4_PLL 10
+#define MOUT_CLKCMU_CMU_BOOST 11
+#define MOUT_CLKCMU_CMU_CMUREF 12
+#define MOUT_CLKCMU_ACC_BUS 13
+#define MOUT_CLKCMU_APM_BUS 14
+#define MOUT_CLKCMU_AUD_CPU 15
+#define MOUT_CLKCMU_AUD_BUS 16
+#define MOUT_CLKCMU_BUSC_BUS 17
+#define MOUT_CLKCMU_BUSMC_BUS 19
+#define MOUT_CLKCMU_CORE_BUS 20
+#define MOUT_CLKCMU_CPUCL0_SWITCH 21
+#define MOUT_CLKCMU_CPUCL0_CLUSTER 22
+#define MOUT_CLKCMU_CPUCL1_SWITCH 24
+#define MOUT_CLKCMU_CPUCL1_CLUSTER 25
+#define MOUT_CLKCMU_DPTX_BUS 26
+#define MOUT_CLKCMU_DPTX_DPGTC 27
+#define MOUT_CLKCMU_DPUM_BUS 28
+#define MOUT_CLKCMU_DPUS0_BUS 29
+#define MOUT_CLKCMU_DPUS1_BUS 30
+#define MOUT_CLKCMU_FSYS0_BUS 31
+#define MOUT_CLKCMU_FSYS0_PCIE 32
+#define MOUT_CLKCMU_FSYS1_BUS 33
+#define MOUT_CLKCMU_FSYS1_USBDRD 34
+#define MOUT_CLKCMU_FSYS1_MMC_CARD 35
+#define MOUT_CLKCMU_FSYS2_BUS 36
+#define MOUT_CLKCMU_FSYS2_UFS_EMBD 37
+#define MOUT_CLKCMU_FSYS2_ETHERNET 38
+#define MOUT_CLKCMU_G2D_G2D 39
+#define MOUT_CLKCMU_G2D_MSCL 40
+#define MOUT_CLKCMU_G3D00_SWITCH 41
+#define MOUT_CLKCMU_G3D01_SWITCH 42
+#define MOUT_CLKCMU_G3D1_SWITCH 43
+#define MOUT_CLKCMU_ISPB_BUS 44
+#define MOUT_CLKCMU_MFC_MFC 45
+#define MOUT_CLKCMU_MFC_WFD 46
+#define MOUT_CLKCMU_MIF_SWITCH 47
+#define MOUT_CLKCMU_MIF_BUSP 48
+#define MOUT_CLKCMU_NPU_BUS 49
+#define MOUT_CLKCMU_PERIC0_BUS 50
+#define MOUT_CLKCMU_PERIC0_IP 51
+#define MOUT_CLKCMU_PERIC1_BUS 52
+#define MOUT_CLKCMU_PERIC1_IP 53
+#define MOUT_CLKCMU_PERIS_BUS 54
+
+/* DIV in CMU_TOP */
+#define DOUT_SHARED0_DIV3 101
+#define DOUT_SHARED0_DIV2 102
+#define DOUT_SHARED1_DIV3 103
+#define DOUT_SHARED1_DIV2 104
+#define DOUT_SHARED1_DIV4 105
+#define DOUT_SHARED2_DIV3 106
+#define DOUT_SHARED2_DIV2 107
+#define DOUT_SHARED2_DIV4 108
+#define DOUT_SHARED4_DIV2 109
+#define DOUT_SHARED4_DIV4 110
+#define DOUT_CLKCMU_CMU_BOOST 111
+#define DOUT_CLKCMU_ACC_BUS 112
+#define DOUT_CLKCMU_APM_BUS 113
+#define DOUT_CLKCMU_AUD_CPU 114
+#define DOUT_CLKCMU_AUD_BUS 115
+#define DOUT_CLKCMU_BUSC_BUS 116
+#define DOUT_CLKCMU_BUSMC_BUS 118
+#define DOUT_CLKCMU_CORE_BUS 119
+#define DOUT_CLKCMU_CPUCL0_SWITCH 120
+#define DOUT_CLKCMU_CPUCL0_CLUSTER 121
+#define DOUT_CLKCMU_CPUCL1_SWITCH 123
+#define DOUT_CLKCMU_CPUCL1_CLUSTER 124
+#define DOUT_CLKCMU_DPTX_BUS 125
+#define DOUT_CLKCMU_DPTX_DPGTC 126
+#define DOUT_CLKCMU_DPUM_BUS 127
+#define DOUT_CLKCMU_DPUS0_BUS 128
+#define DOUT_CLKCMU_DPUS1_BUS 129
+#define DOUT_CLKCMU_FSYS0_BUS 130
+#define DOUT_CLKCMU_FSYS0_PCIE 131
+#define DOUT_CLKCMU_FSYS1_BUS 132
+#define DOUT_CLKCMU_FSYS1_USBDRD 133
+#define DOUT_CLKCMU_FSYS2_BUS 134
+#define DOUT_CLKCMU_FSYS2_UFS_EMBD 135
+#define DOUT_CLKCMU_FSYS2_ETHERNET 136
+#define DOUT_CLKCMU_G2D_G2D 137
+#define DOUT_CLKCMU_G2D_MSCL 138
+#define DOUT_CLKCMU_G3D00_SWITCH 139
+#define DOUT_CLKCMU_G3D01_SWITCH 140
+#define DOUT_CLKCMU_G3D1_SWITCH 141
+#define DOUT_CLKCMU_ISPB_BUS 142
+#define DOUT_CLKCMU_MFC_MFC 143
+#define DOUT_CLKCMU_MFC_WFD 144
+#define DOUT_CLKCMU_MIF_SWITCH 145
+#define DOUT_CLKCMU_MIF_BUSP 146
+#define DOUT_CLKCMU_NPU_BUS 147
+#define DOUT_CLKCMU_PERIC0_BUS 148
+#define DOUT_CLKCMU_PERIC0_IP 149
+#define DOUT_CLKCMU_PERIC1_BUS 150
+#define DOUT_CLKCMU_PERIC1_IP 151
+#define DOUT_CLKCMU_PERIS_BUS 152
+
+/* GAT in CMU_TOP */
+#define GOUT_CLKCMU_CMU_BOOST 201
+#define GOUT_CLKCMU_CPUCL0_BOOST 202
+#define GOUT_CLKCMU_CPUCL1_BOOST 203
+#define GOUT_CLKCMU_CORE_BOOST 204
+#define GOUT_CLKCMU_BUSC_BOOST 205
+#define GOUT_CLKCMU_BUSMC_BOOST 206
+#define GOUT_CLKCMU_MIF_BOOST 207
+#define GOUT_CLKCMU_ACC_BUS 208
+#define GOUT_CLKCMU_APM_BUS 209
+#define GOUT_CLKCMU_AUD_CPU 210
+#define GOUT_CLKCMU_AUD_BUS 211
+#define GOUT_CLKCMU_BUSC_BUS 212
+#define GOUT_CLKCMU_BUSMC_BUS 214
+#define GOUT_CLKCMU_CORE_BUS 215
+#define GOUT_CLKCMU_CPUCL0_SWITCH 216
+#define GOUT_CLKCMU_CPUCL0_CLUSTER 217
+#define GOUT_CLKCMU_CPUCL1_SWITCH 219
+#define GOUT_CLKCMU_CPUCL1_CLUSTER 220
+#define GOUT_CLKCMU_DPTX_BUS 221
+#define GOUT_CLKCMU_DPTX_DPGTC 222
+#define GOUT_CLKCMU_DPUM_BUS 223
+#define GOUT_CLKCMU_DPUS0_BUS 224
+#define GOUT_CLKCMU_DPUS1_BUS 225
+#define GOUT_CLKCMU_FSYS0_BUS 226
+#define GOUT_CLKCMU_FSYS0_PCIE 227
+#define GOUT_CLKCMU_FSYS1_BUS 228
+#define GOUT_CLKCMU_FSYS1_USBDRD 229
+#define GOUT_CLKCMU_FSYS1_MMC_CARD 230
+#define GOUT_CLKCMU_FSYS2_BUS 231
+#define GOUT_CLKCMU_FSYS2_UFS_EMBD 232
+#define GOUT_CLKCMU_FSYS2_ETHERNET 233
+#define GOUT_CLKCMU_G2D_G2D 234
+#define GOUT_CLKCMU_G2D_MSCL 235
+#define GOUT_CLKCMU_G3D00_SWITCH 236
+#define GOUT_CLKCMU_G3D01_SWITCH 237
+#define GOUT_CLKCMU_G3D1_SWITCH 238
+#define GOUT_CLKCMU_ISPB_BUS 239
+#define GOUT_CLKCMU_MFC_MFC 240
+#define GOUT_CLKCMU_MFC_WFD 241
+#define GOUT_CLKCMU_MIF_SWITCH 242
+#define GOUT_CLKCMU_MIF_BUSP 243
+#define GOUT_CLKCMU_NPU_BUS 244
+#define GOUT_CLKCMU_PERIC0_BUS 245
+#define GOUT_CLKCMU_PERIC0_IP 246
+#define GOUT_CLKCMU_PERIC1_BUS 247
+#define GOUT_CLKCMU_PERIC1_IP 248
+#define GOUT_CLKCMU_PERIS_BUS 249
+
+/* CMU_BUSMC */
+#define CLK_MOUT_BUSMC_BUS_USER 1
+#define CLK_DOUT_BUSMC_BUSP 2
+#define CLK_GOUT_BUSMC_PDMA0_PCLK 3
+#define CLK_GOUT_BUSMC_SPDMA_PCLK 4
+
+/* CMU_CORE */
+#define CLK_MOUT_CORE_BUS_USER 1
+#define CLK_DOUT_CORE_BUSP 2
+#define CLK_GOUT_CORE_CCI_CLK 3
+#define CLK_GOUT_CORE_CCI_PCLK 4
+#define CLK_GOUT_CORE_CMU_CORE_PCLK 5
+
+/* CMU_DPUM */
+#define CLK_MOUT_DPUM_BUS_USER 1
+#define CLK_DOUT_DPUM_BUSP 2
+#define CLK_GOUT_DPUM_ACLK_DECON 3
+#define CLK_GOUT_DPUM_ACLK_DMA 4
+#define CLK_GOUT_DPUM_ACLK_DPP 5
+#define CLK_GOUT_DPUM_SYSMMU_D0_CLK 6
+#define CLK_GOUT_DPUM_SYSMMU_D1_CLK 7
+#define CLK_GOUT_DPUM_SYSMMU_D2_CLK 8
+#define CLK_GOUT_DPUM_SYSMMU_D3_CLK 9
+
+/* CMU_FSYS0 */
+#define CLK_MOUT_FSYS0_BUS_USER 1
+#define CLK_MOUT_FSYS0_PCIE_USER 2
+#define CLK_GOUT_FSYS0_BUS_PCLK 3
+
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X1_REFCLK 4
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X2_REFCLK 5
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X1_DBI_ACLK 6
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X1_MSTR_ACLK 7
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X1_SLV_ACLK 8
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X2_DBI_ACLK 9
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X2_MSTR_ACLK 10
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X2_SLV_ACLK 11
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L0_X2_PIPE_CLK 12
+#define CLK_GOUT_FSYS0_PCIE_GEN3A_2L0_CLK 13
+#define CLK_GOUT_FSYS0_PCIE_GEN3B_2L0_CLK 14
+
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X1_REFCLK 15
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X2_REFCLK 16
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X1_DBI_ACLK 17
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X1_MSTR_ACLK 18
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X1_SLV_ACLK 19
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X2_DBI_ACLK 20
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X2_MSTR_ACLK 21
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X2_SLV_ACLK 22
+#define CLK_GOUT_FSYS0_PCIE_GEN3_2L1_X2_PIPE_CLK 23
+#define CLK_GOUT_FSYS0_PCIE_GEN3A_2L1_CLK 24
+#define CLK_GOUT_FSYS0_PCIE_GEN3B_2L1_CLK 25
+
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X2_REFCLK 26
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X4_REFCLK 27
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X2_DBI_ACLK 28
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X2_MSTR_ACLK 29
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X2_SLV_ACLK 30
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X4_DBI_ACLK 31
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X4_MSTR_ACLK 32
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X4_SLV_ACLK 33
+#define CLK_GOUT_FSYS0_PCIE_GEN3_4L_X4_PIPE_CLK 34
+#define CLK_GOUT_FSYS0_PCIE_GEN3A_4L_CLK 35
+#define CLK_GOUT_FSYS0_PCIE_GEN3B_4L_CLK 36
+
+/* CMU_FSYS1 */
+#define FOUT_MMC_PLL 1
+
+#define CLK_MOUT_FSYS1_BUS_USER 2
+#define CLK_MOUT_FSYS1_MMC_PLL 3
+#define CLK_MOUT_FSYS1_MMC_CARD_USER 4
+#define CLK_MOUT_FSYS1_USBDRD_USER 5
+#define CLK_MOUT_FSYS1_MMC_CARD 6
+
+#define CLK_DOUT_FSYS1_MMC_CARD 7
+
+#define CLK_GOUT_FSYS1_PCLK 8
+#define CLK_GOUT_FSYS1_MMC_CARD_SDCLKIN 9
+#define CLK_GOUT_FSYS1_MMC_CARD_ACLK 10
+#define CLK_GOUT_FSYS1_USB20DRD_0_REFCLK 11
+#define CLK_GOUT_FSYS1_USB20DRD_1_REFCLK 12
+#define CLK_GOUT_FSYS1_USB30DRD_0_REFCLK 13
+#define CLK_GOUT_FSYS1_USB30DRD_1_REFCLK 14
+#define CLK_GOUT_FSYS1_USB20_0_ACLK 15
+#define CLK_GOUT_FSYS1_USB20_1_ACLK 16
+#define CLK_GOUT_FSYS1_USB30_0_ACLK 17
+#define CLK_GOUT_FSYS1_USB30_1_ACLK 18
+
+/* CMU_FSYS2 */
+#define CLK_MOUT_FSYS2_BUS_USER 1
+#define CLK_MOUT_FSYS2_UFS_EMBD_USER 2
+#define CLK_MOUT_FSYS2_ETHERNET_USER 3
+#define CLK_GOUT_FSYS2_UFS_EMBD0_ACLK 4
+#define CLK_GOUT_FSYS2_UFS_EMBD0_UNIPRO 5
+#define CLK_GOUT_FSYS2_UFS_EMBD1_ACLK 6
+#define CLK_GOUT_FSYS2_UFS_EMBD1_UNIPRO 7
+
+/* CMU_PERIC0 */
+#define CLK_MOUT_PERIC0_BUS_USER 1
+#define CLK_MOUT_PERIC0_IP_USER 2
+#define CLK_MOUT_PERIC0_USI00_USI 3
+#define CLK_MOUT_PERIC0_USI01_USI 4
+#define CLK_MOUT_PERIC0_USI02_USI 5
+#define CLK_MOUT_PERIC0_USI03_USI 6
+#define CLK_MOUT_PERIC0_USI04_USI 7
+#define CLK_MOUT_PERIC0_USI05_USI 8
+#define CLK_MOUT_PERIC0_USI_I2C 9
+
+#define CLK_DOUT_PERIC0_USI00_USI 10
+#define CLK_DOUT_PERIC0_USI01_USI 11
+#define CLK_DOUT_PERIC0_USI02_USI 12
+#define CLK_DOUT_PERIC0_USI03_USI 13
+#define CLK_DOUT_PERIC0_USI04_USI 14
+#define CLK_DOUT_PERIC0_USI05_USI 15
+#define CLK_DOUT_PERIC0_USI_I2C 16
+
+#define CLK_GOUT_PERIC0_IPCLK_0 20
+#define CLK_GOUT_PERIC0_IPCLK_1 21
+#define CLK_GOUT_PERIC0_IPCLK_2 22
+#define CLK_GOUT_PERIC0_IPCLK_3 23
+#define CLK_GOUT_PERIC0_IPCLK_4 24
+#define CLK_GOUT_PERIC0_IPCLK_5 25
+#define CLK_GOUT_PERIC0_IPCLK_6 26
+#define CLK_GOUT_PERIC0_IPCLK_7 27
+#define CLK_GOUT_PERIC0_IPCLK_8 28
+#define CLK_GOUT_PERIC0_IPCLK_9 29
+#define CLK_GOUT_PERIC0_IPCLK_10 30
+#define CLK_GOUT_PERIC0_IPCLK_11 31
+#define CLK_GOUT_PERIC0_PCLK_0 32
+#define CLK_GOUT_PERIC0_PCLK_1 33
+#define CLK_GOUT_PERIC0_PCLK_2 34
+#define CLK_GOUT_PERIC0_PCLK_3 35
+#define CLK_GOUT_PERIC0_PCLK_4 36
+#define CLK_GOUT_PERIC0_PCLK_5 37
+#define CLK_GOUT_PERIC0_PCLK_6 38
+#define CLK_GOUT_PERIC0_PCLK_7 39
+#define CLK_GOUT_PERIC0_PCLK_8 40
+#define CLK_GOUT_PERIC0_PCLK_9 41
+#define CLK_GOUT_PERIC0_PCLK_10 42
+#define CLK_GOUT_PERIC0_PCLK_11 43
+
+/* CMU_PERIC1 */
+#define CLK_MOUT_PERIC1_BUS_USER 1
+#define CLK_MOUT_PERIC1_IP_USER 2
+#define CLK_MOUT_PERIC1_USI06_USI 3
+#define CLK_MOUT_PERIC1_USI07_USI 4
+#define CLK_MOUT_PERIC1_USI08_USI 5
+#define CLK_MOUT_PERIC1_USI09_USI 6
+#define CLK_MOUT_PERIC1_USI10_USI 7
+#define CLK_MOUT_PERIC1_USI11_USI 8
+#define CLK_MOUT_PERIC1_USI_I2C 9
+
+#define CLK_DOUT_PERIC1_USI06_USI 10
+#define CLK_DOUT_PERIC1_USI07_USI 11
+#define CLK_DOUT_PERIC1_USI08_USI 12
+#define CLK_DOUT_PERIC1_USI09_USI 13
+#define CLK_DOUT_PERIC1_USI10_USI 14
+#define CLK_DOUT_PERIC1_USI11_USI 15
+#define CLK_DOUT_PERIC1_USI_I2C 16
+
+#define CLK_GOUT_PERIC1_IPCLK_0 20
+#define CLK_GOUT_PERIC1_IPCLK_1 21
+#define CLK_GOUT_PERIC1_IPCLK_2 22
+#define CLK_GOUT_PERIC1_IPCLK_3 23
+#define CLK_GOUT_PERIC1_IPCLK_4 24
+#define CLK_GOUT_PERIC1_IPCLK_5 25
+#define CLK_GOUT_PERIC1_IPCLK_6 26
+#define CLK_GOUT_PERIC1_IPCLK_7 27
+#define CLK_GOUT_PERIC1_IPCLK_8 28
+#define CLK_GOUT_PERIC1_IPCLK_9 29
+#define CLK_GOUT_PERIC1_IPCLK_10 30
+#define CLK_GOUT_PERIC1_IPCLK_11 31
+#define CLK_GOUT_PERIC1_PCLK_0 32
+#define CLK_GOUT_PERIC1_PCLK_1 33
+#define CLK_GOUT_PERIC1_PCLK_2 34
+#define CLK_GOUT_PERIC1_PCLK_3 35
+#define CLK_GOUT_PERIC1_PCLK_4 36
+#define CLK_GOUT_PERIC1_PCLK_5 37
+#define CLK_GOUT_PERIC1_PCLK_6 38
+#define CLK_GOUT_PERIC1_PCLK_7 39
+#define CLK_GOUT_PERIC1_PCLK_8 40
+#define CLK_GOUT_PERIC1_PCLK_9 41
+#define CLK_GOUT_PERIC1_PCLK_10 42
+#define CLK_GOUT_PERIC1_PCLK_11 43
+
+/* CMU_PERIS */
+#define CLK_MOUT_PERIS_BUS_USER 1
+#define CLK_GOUT_SYSREG_PERIS_PCLK 2
+#define CLK_GOUT_WDT_CLUSTER0 3
+#define CLK_GOUT_WDT_CLUSTER1 4
+
+#endif /* _DT_BINDINGS_CLOCK_EXYNOSAUTOV9_H */
diff --git a/include/dt-bindings/clock/samsung,exynosautov920.h b/include/dt-bindings/clock/samsung,exynosautov920.h
new file mode 100644
index 000000000000..970d05167fc6
--- /dev/null
+++ b/include/dt-bindings/clock/samsung,exynosautov920.h
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024 Samsung Electronics Co., Ltd.
+ * Author: Sunyeal Hong <sunyeal.hong@samsung.com>
+ *
+ * Device Tree binding constants for ExynosAuto v920 clock controller.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_EXYNOSAUTOV920_H
+#define _DT_BINDINGS_CLOCK_EXYNOSAUTOV920_H
+
+/* CMU_TOP */
+#define FOUT_SHARED0_PLL 1
+#define FOUT_SHARED1_PLL 2
+#define FOUT_SHARED2_PLL 3
+#define FOUT_SHARED3_PLL 4
+#define FOUT_SHARED4_PLL 5
+#define FOUT_SHARED5_PLL 6
+#define FOUT_MMC_PLL 7
+
+/* MUX in CMU_TOP */
+#define MOUT_SHARED0_PLL 8
+#define MOUT_SHARED1_PLL 9
+#define MOUT_SHARED2_PLL 10
+#define MOUT_SHARED3_PLL 11
+#define MOUT_SHARED4_PLL 12
+#define MOUT_SHARED5_PLL 13
+#define MOUT_MMC_PLL 14
+#define MOUT_CLKCMU_CMU_BOOST 15
+#define MOUT_CLKCMU_CMU_CMUREF 16
+#define MOUT_CLKCMU_ACC_NOC 17
+#define MOUT_CLKCMU_ACC_ORB 18
+#define MOUT_CLKCMU_APM_NOC 19
+#define MOUT_CLKCMU_AUD_CPU 20
+#define MOUT_CLKCMU_AUD_NOC 21
+#define MOUT_CLKCMU_CPUCL0_SWITCH 22
+#define MOUT_CLKCMU_CPUCL0_CLUSTER 23
+#define MOUT_CLKCMU_CPUCL0_DBG 24
+#define MOUT_CLKCMU_CPUCL1_SWITCH 25
+#define MOUT_CLKCMU_CPUCL1_CLUSTER 26
+#define MOUT_CLKCMU_CPUCL2_SWITCH 27
+#define MOUT_CLKCMU_CPUCL2_CLUSTER 28
+#define MOUT_CLKCMU_DNC_NOC 29
+#define MOUT_CLKCMU_DPTX_NOC 30
+#define MOUT_CLKCMU_DPTX_DPGTC 31
+#define MOUT_CLKCMU_DPTX_DPOSC 32
+#define MOUT_CLKCMU_DPUB_NOC 33
+#define MOUT_CLKCMU_DPUB_DSIM 34
+#define MOUT_CLKCMU_DPUF0_NOC 35
+#define MOUT_CLKCMU_DPUF1_NOC 36
+#define MOUT_CLKCMU_DPUF2_NOC 37
+#define MOUT_CLKCMU_DSP_NOC 38
+#define MOUT_CLKCMU_G3D_SWITCH 39
+#define MOUT_CLKCMU_G3D_NOCP 40
+#define MOUT_CLKCMU_GNPU_NOC 41
+#define MOUT_CLKCMU_HSI0_NOC 42
+#define MOUT_CLKCMU_HSI1_NOC 43
+#define MOUT_CLKCMU_HSI1_USBDRD 44
+#define MOUT_CLKCMU_HSI1_MMC_CARD 45
+#define MOUT_CLKCMU_HSI2_NOC 46
+#define MOUT_CLKCMU_HSI2_NOC_UFS 47
+#define MOUT_CLKCMU_HSI2_UFS_EMBD 48
+#define MOUT_CLKCMU_HSI2_ETHERNET 49
+#define MOUT_CLKCMU_ISP_NOC 50
+#define MOUT_CLKCMU_M2M_NOC 51
+#define MOUT_CLKCMU_M2M_JPEG 52
+#define MOUT_CLKCMU_MFC_MFC 53
+#define MOUT_CLKCMU_MFC_WFD 54
+#define MOUT_CLKCMU_MFD_NOC 55
+#define MOUT_CLKCMU_MIF_SWITCH 56
+#define MOUT_CLKCMU_MIF_NOCP 57
+#define MOUT_CLKCMU_MISC_NOC 58
+#define MOUT_CLKCMU_NOCL0_NOC 59
+#define MOUT_CLKCMU_NOCL1_NOC 60
+#define MOUT_CLKCMU_NOCL2_NOC 61
+#define MOUT_CLKCMU_PERIC0_NOC 62
+#define MOUT_CLKCMU_PERIC0_IP 63
+#define MOUT_CLKCMU_PERIC1_NOC 64
+#define MOUT_CLKCMU_PERIC1_IP 65
+#define MOUT_CLKCMU_SDMA_NOC 66
+#define MOUT_CLKCMU_SNW_NOC 67
+#define MOUT_CLKCMU_SSP_NOC 68
+#define MOUT_CLKCMU_TAA_NOC 69
+
+/* DIV in CMU_TOP */
+#define DOUT_SHARED0_DIV1 70
+#define DOUT_SHARED0_DIV2 71
+#define DOUT_SHARED0_DIV3 72
+#define DOUT_SHARED0_DIV4 73
+#define DOUT_SHARED1_DIV1 74
+#define DOUT_SHARED1_DIV2 75
+#define DOUT_SHARED1_DIV3 76
+#define DOUT_SHARED1_DIV4 77
+#define DOUT_SHARED2_DIV1 78
+#define DOUT_SHARED2_DIV2 79
+#define DOUT_SHARED2_DIV3 80
+#define DOUT_SHARED2_DIV4 81
+#define DOUT_SHARED3_DIV1 82
+#define DOUT_SHARED3_DIV2 83
+#define DOUT_SHARED3_DIV3 84
+#define DOUT_SHARED3_DIV4 85
+#define DOUT_SHARED4_DIV1 86
+#define DOUT_SHARED4_DIV2 87
+#define DOUT_SHARED4_DIV3 88
+#define DOUT_SHARED4_DIV4 89
+#define DOUT_SHARED5_DIV1 90
+#define DOUT_SHARED5_DIV2 91
+#define DOUT_SHARED5_DIV3 92
+#define DOUT_SHARED5_DIV4 93
+#define DOUT_CLKCMU_CMU_BOOST 94
+#define DOUT_CLKCMU_ACC_NOC 95
+#define DOUT_CLKCMU_ACC_ORB 96
+#define DOUT_CLKCMU_APM_NOC 97
+#define DOUT_CLKCMU_AUD_CPU 98
+#define DOUT_CLKCMU_AUD_NOC 99
+#define DOUT_CLKCMU_CPUCL0_SWITCH 100
+#define DOUT_CLKCMU_CPUCL0_CLUSTER 101
+#define DOUT_CLKCMU_CPUCL0_DBG 102
+#define DOUT_CLKCMU_CPUCL1_SWITCH 103
+#define DOUT_CLKCMU_CPUCL1_CLUSTER 104
+#define DOUT_CLKCMU_CPUCL2_SWITCH 105
+#define DOUT_CLKCMU_CPUCL2_CLUSTER 106
+#define DOUT_CLKCMU_DNC_NOC 107
+#define DOUT_CLKCMU_DPTX_NOC 108
+#define DOUT_CLKCMU_DPTX_DPGTC 109
+#define DOUT_CLKCMU_DPTX_DPOSC 110
+#define DOUT_CLKCMU_DPUB_NOC 111
+#define DOUT_CLKCMU_DPUB_DSIM 112
+#define DOUT_CLKCMU_DPUF0_NOC 113
+#define DOUT_CLKCMU_DPUF1_NOC 114
+#define DOUT_CLKCMU_DPUF2_NOC 115
+#define DOUT_CLKCMU_DSP_NOC 116
+#define DOUT_CLKCMU_G3D_SWITCH 117
+#define DOUT_CLKCMU_G3D_NOCP 118
+#define DOUT_CLKCMU_GNPU_NOC 119
+#define DOUT_CLKCMU_HSI0_NOC 120
+#define DOUT_CLKCMU_HSI1_NOC 121
+#define DOUT_CLKCMU_HSI1_USBDRD 122
+#define DOUT_CLKCMU_HSI1_MMC_CARD 123
+#define DOUT_CLKCMU_HSI2_NOC 124
+#define DOUT_CLKCMU_HSI2_NOC_UFS 125
+#define DOUT_CLKCMU_HSI2_UFS_EMBD 126
+#define DOUT_CLKCMU_HSI2_ETHERNET 127
+#define DOUT_CLKCMU_ISP_NOC 128
+#define DOUT_CLKCMU_M2M_NOC 129
+#define DOUT_CLKCMU_M2M_JPEG 130
+#define DOUT_CLKCMU_MFC_MFC 131
+#define DOUT_CLKCMU_MFC_WFD 132
+#define DOUT_CLKCMU_MFD_NOC 133
+#define DOUT_CLKCMU_MIF_NOCP 134
+#define DOUT_CLKCMU_MISC_NOC 135
+#define DOUT_CLKCMU_NOCL0_NOC 136
+#define DOUT_CLKCMU_NOCL1_NOC 137
+#define DOUT_CLKCMU_NOCL2_NOC 138
+#define DOUT_CLKCMU_PERIC0_NOC 139
+#define DOUT_CLKCMU_PERIC0_IP 140
+#define DOUT_CLKCMU_PERIC1_NOC 141
+#define DOUT_CLKCMU_PERIC1_IP 142
+#define DOUT_CLKCMU_SDMA_NOC 143
+#define DOUT_CLKCMU_SNW_NOC 144
+#define DOUT_CLKCMU_SSP_NOC 145
+#define DOUT_CLKCMU_TAA_NOC 146
+#define DOUT_TCXO_DIV2 147
+
+/* CMU_CPUCL0 */
+#define CLK_FOUT_CPUCL0_PLL 1
+
+#define CLK_MOUT_PLL_CPUCL0 2
+#define CLK_MOUT_CPUCL0_CLUSTER_USER 3
+#define CLK_MOUT_CPUCL0_DBG_USER 4
+#define CLK_MOUT_CPUCL0_SWITCH_USER 5
+#define CLK_MOUT_CPUCL0_CLUSTER 6
+#define CLK_MOUT_CPUCL0_CORE 7
+
+#define CLK_DOUT_CLUSTER0_ACLK 8
+#define CLK_DOUT_CLUSTER0_ATCLK 9
+#define CLK_DOUT_CLUSTER0_MPCLK 10
+#define CLK_DOUT_CLUSTER0_PCLK 11
+#define CLK_DOUT_CLUSTER0_PERIPHCLK 12
+#define CLK_DOUT_CPUCL0_DBG_NOC 13
+#define CLK_DOUT_CPUCL0_DBG_PCLKDBG 14
+#define CLK_DOUT_CPUCL0_NOCP 15
+
+/* CMU_CPUCL1 */
+#define CLK_FOUT_CPUCL1_PLL 1
+
+#define CLK_MOUT_PLL_CPUCL1 2
+#define CLK_MOUT_CPUCL1_CLUSTER_USER 3
+#define CLK_MOUT_CPUCL1_SWITCH_USER 4
+#define CLK_MOUT_CPUCL1_CLUSTER 5
+#define CLK_MOUT_CPUCL1_CORE 6
+
+#define CLK_DOUT_CLUSTER1_ACLK 7
+#define CLK_DOUT_CLUSTER1_ATCLK 8
+#define CLK_DOUT_CLUSTER1_MPCLK 9
+#define CLK_DOUT_CLUSTER1_PCLK 10
+#define CLK_DOUT_CLUSTER1_PERIPHCLK 11
+#define CLK_DOUT_CPUCL1_NOCP 12
+
+/* CMU_CPUCL2 */
+#define CLK_FOUT_CPUCL2_PLL 1
+
+#define CLK_MOUT_PLL_CPUCL2 2
+#define CLK_MOUT_CPUCL2_CLUSTER_USER 3
+#define CLK_MOUT_CPUCL2_SWITCH_USER 4
+#define CLK_MOUT_CPUCL2_CLUSTER 5
+#define CLK_MOUT_CPUCL2_CORE 6
+
+#define CLK_DOUT_CLUSTER2_ACLK 7
+#define CLK_DOUT_CLUSTER2_ATCLK 8
+#define CLK_DOUT_CLUSTER2_MPCLK 9
+#define CLK_DOUT_CLUSTER2_PCLK 10
+#define CLK_DOUT_CLUSTER2_PERIPHCLK 11
+#define CLK_DOUT_CPUCL2_NOCP 12
+
+/* CMU_PERIC0 */
+#define CLK_MOUT_PERIC0_IP_USER 1
+#define CLK_MOUT_PERIC0_NOC_USER 2
+#define CLK_MOUT_PERIC0_USI00_USI 3
+#define CLK_MOUT_PERIC0_USI01_USI 4
+#define CLK_MOUT_PERIC0_USI02_USI 5
+#define CLK_MOUT_PERIC0_USI03_USI 6
+#define CLK_MOUT_PERIC0_USI04_USI 7
+#define CLK_MOUT_PERIC0_USI05_USI 8
+#define CLK_MOUT_PERIC0_USI06_USI 9
+#define CLK_MOUT_PERIC0_USI07_USI 10
+#define CLK_MOUT_PERIC0_USI08_USI 11
+#define CLK_MOUT_PERIC0_USI_I2C 12
+#define CLK_MOUT_PERIC0_I3C 13
+
+#define CLK_DOUT_PERIC0_USI00_USI 14
+#define CLK_DOUT_PERIC0_USI01_USI 15
+#define CLK_DOUT_PERIC0_USI02_USI 16
+#define CLK_DOUT_PERIC0_USI03_USI 17
+#define CLK_DOUT_PERIC0_USI04_USI 18
+#define CLK_DOUT_PERIC0_USI05_USI 19
+#define CLK_DOUT_PERIC0_USI06_USI 20
+#define CLK_DOUT_PERIC0_USI07_USI 21
+#define CLK_DOUT_PERIC0_USI08_USI 22
+#define CLK_DOUT_PERIC0_USI_I2C 23
+#define CLK_DOUT_PERIC0_I3C 24
+
+/* CMU_PERIC1 */
+#define CLK_MOUT_PERIC1_IP_USER 1
+#define CLK_MOUT_PERIC1_NOC_USER 2
+#define CLK_MOUT_PERIC1_USI09_USI 3
+#define CLK_MOUT_PERIC1_USI10_USI 4
+#define CLK_MOUT_PERIC1_USI11_USI 5
+#define CLK_MOUT_PERIC1_USI12_USI 6
+#define CLK_MOUT_PERIC1_USI13_USI 7
+#define CLK_MOUT_PERIC1_USI14_USI 8
+#define CLK_MOUT_PERIC1_USI15_USI 9
+#define CLK_MOUT_PERIC1_USI16_USI 10
+#define CLK_MOUT_PERIC1_USI17_USI 11
+#define CLK_MOUT_PERIC1_USI_I2C 12
+#define CLK_MOUT_PERIC1_I3C 13
+
+#define CLK_DOUT_PERIC1_USI09_USI 14
+#define CLK_DOUT_PERIC1_USI10_USI 15
+#define CLK_DOUT_PERIC1_USI11_USI 16
+#define CLK_DOUT_PERIC1_USI12_USI 17
+#define CLK_DOUT_PERIC1_USI13_USI 18
+#define CLK_DOUT_PERIC1_USI14_USI 19
+#define CLK_DOUT_PERIC1_USI15_USI 20
+#define CLK_DOUT_PERIC1_USI16_USI 21
+#define CLK_DOUT_PERIC1_USI17_USI 22
+#define CLK_DOUT_PERIC1_USI_I2C 23
+#define CLK_DOUT_PERIC1_I3C 24
+
+/* CMU_MISC */
+#define CLK_MOUT_MISC_NOC_USER 1
+#define CLK_MOUT_MISC_GIC 2
+
+#define CLK_DOUT_MISC_OTP 3
+#define CLK_DOUT_MISC_NOCP 4
+#define CLK_DOUT_MISC_OSC_DIV2 5
+
+/* CMU_HSI0 */
+#define CLK_MOUT_HSI0_NOC_USER 1
+
+#define CLK_DOUT_HSI0_PCIE_APB 2
+
+/* CMU_HSI1 */
+#define CLK_MOUT_HSI1_MMC_CARD_USER 1
+#define CLK_MOUT_HSI1_NOC_USER 2
+#define CLK_MOUT_HSI1_USBDRD_USER 3
+#define CLK_MOUT_HSI1_USBDRD 4
+
+/* CMU_HSI2 */
+#define FOUT_PLL_ETH 1
+#define CLK_MOUT_HSI2_NOC_UFS_USER 2
+#define CLK_MOUT_HSI2_UFS_EMBD_USER 3
+#define CLK_MOUT_HSI2_ETHERNET 4
+#define CLK_MOUT_HSI2_ETHERNET_USER 5
+#define CLK_DOUT_HSI2_ETHERNET 6
+#define CLK_DOUT_HSI2_ETHERNET_PTP 7
+
+/* CMU_M2M */
+#define CLK_MOUT_M2M_JPEG_USER 1
+#define CLK_MOUT_M2M_NOC_USER 2
+#define CLK_DOUT_M2M_NOCP 3
+
+/* CMU_MFC */
+#define CLK_MOUT_MFC_MFC_USER 1
+#define CLK_MOUT_MFC_WFD_USER 2
+#define CLK_DOUT_MFC_NOCP 3
+
+#endif /* _DT_BINDINGS_CLOCK_EXYNOSAUTOV920_H */
diff --git a/include/dt-bindings/clock/sifive-fu540-prci.h b/include/dt-bindings/clock/sifive-fu540-prci.h
index 3b21d0522c91..5af372e8385f 100644
--- a/include/dt-bindings/clock/sifive-fu540-prci.h
+++ b/include/dt-bindings/clock/sifive-fu540-prci.h
@@ -10,9 +10,9 @@
/* Clock indexes for use by Device Tree data and the PRCI driver */
-#define PRCI_CLK_COREPLL 0
-#define PRCI_CLK_DDRPLL 1
-#define PRCI_CLK_GEMGXLPLL 2
-#define PRCI_CLK_TLCLK 3
+#define FU540_PRCI_CLK_COREPLL 0
+#define FU540_PRCI_CLK_DDRPLL 1
+#define FU540_PRCI_CLK_GEMGXLPLL 2
+#define FU540_PRCI_CLK_TLCLK 3
#endif
diff --git a/include/dt-bindings/clock/sifive-fu740-prci.h b/include/dt-bindings/clock/sifive-fu740-prci.h
index 7899b7fee7db..672bdadbf6c0 100644
--- a/include/dt-bindings/clock/sifive-fu740-prci.h
+++ b/include/dt-bindings/clock/sifive-fu740-prci.h
@@ -11,14 +11,14 @@
/* Clock indexes for use by Device Tree data and the PRCI driver */
-#define PRCI_CLK_COREPLL 0
-#define PRCI_CLK_DDRPLL 1
-#define PRCI_CLK_GEMGXLPLL 2
-#define PRCI_CLK_DVFSCOREPLL 3
-#define PRCI_CLK_HFPCLKPLL 4
-#define PRCI_CLK_CLTXPLL 5
-#define PRCI_CLK_TLCLK 6
-#define PRCI_CLK_PCLK 7
-#define PRCI_CLK_PCIE_AUX 8
+#define FU740_PRCI_CLK_COREPLL 0
+#define FU740_PRCI_CLK_DDRPLL 1
+#define FU740_PRCI_CLK_GEMGXLPLL 2
+#define FU740_PRCI_CLK_DVFSCOREPLL 3
+#define FU740_PRCI_CLK_HFPCLKPLL 4
+#define FU740_PRCI_CLK_CLTXPLL 5
+#define FU740_PRCI_CLK_TLCLK 6
+#define FU740_PRCI_CLK_PCLK 7
+#define FU740_PRCI_CLK_PCIE_AUX 8
#endif /* __DT_BINDINGS_CLOCK_SIFIVE_FU740_PRCI_H */
diff --git a/include/dt-bindings/clock/sophgo,cv1800.h b/include/dt-bindings/clock/sophgo,cv1800.h
new file mode 100644
index 000000000000..cfbeca25a650
--- /dev/null
+++ b/include/dt-bindings/clock/sophgo,cv1800.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2023 Sophgo Ltd.
+ */
+
+#ifndef __DT_BINDINGS_SOPHGO_CV1800_CLK_H__
+#define __DT_BINDINGS_SOPHGO_CV1800_CLK_H__
+
+#define CLK_MPLL 0
+#define CLK_TPLL 1
+#define CLK_FPLL 2
+#define CLK_MIPIMPLL 3
+#define CLK_A0PLL 4
+#define CLK_DISPPLL 5
+#define CLK_CAM0PLL 6
+#define CLK_CAM1PLL 7
+
+#define CLK_MIPIMPLL_D3 8
+#define CLK_CAM0PLL_D2 9
+#define CLK_CAM0PLL_D3 10
+
+#define CLK_TPU 11
+#define CLK_TPU_FAB 12
+#define CLK_AHB_ROM 13
+#define CLK_DDR_AXI_REG 14
+#define CLK_RTC_25M 15
+#define CLK_SRC_RTC_SYS_0 16
+#define CLK_TEMPSEN 17
+#define CLK_SARADC 18
+#define CLK_EFUSE 19
+#define CLK_APB_EFUSE 20
+#define CLK_DEBUG 21
+#define CLK_AP_DEBUG 22
+#define CLK_XTAL_MISC 23
+#define CLK_AXI4_EMMC 24
+#define CLK_EMMC 25
+#define CLK_EMMC_100K 26
+#define CLK_AXI4_SD0 27
+#define CLK_SD0 28
+#define CLK_SD0_100K 29
+#define CLK_AXI4_SD1 30
+#define CLK_SD1 31
+#define CLK_SD1_100K 32
+#define CLK_SPI_NAND 33
+#define CLK_ETH0_500M 34
+#define CLK_AXI4_ETH0 35
+#define CLK_ETH1_500M 36
+#define CLK_AXI4_ETH1 37
+#define CLK_APB_GPIO 38
+#define CLK_APB_GPIO_INTR 39
+#define CLK_GPIO_DB 40
+#define CLK_AHB_SF 41
+#define CLK_AHB_SF1 42
+#define CLK_A24M 43
+#define CLK_AUDSRC 44
+#define CLK_APB_AUDSRC 45
+#define CLK_SDMA_AXI 46
+#define CLK_SDMA_AUD0 47
+#define CLK_SDMA_AUD1 48
+#define CLK_SDMA_AUD2 49
+#define CLK_SDMA_AUD3 50
+#define CLK_I2C 51
+#define CLK_APB_I2C 52
+#define CLK_APB_I2C0 53
+#define CLK_APB_I2C1 54
+#define CLK_APB_I2C2 55
+#define CLK_APB_I2C3 56
+#define CLK_APB_I2C4 57
+#define CLK_APB_WDT 58
+#define CLK_PWM_SRC 59
+#define CLK_PWM 60
+#define CLK_SPI 61
+#define CLK_APB_SPI0 62
+#define CLK_APB_SPI1 63
+#define CLK_APB_SPI2 64
+#define CLK_APB_SPI3 65
+#define CLK_1M 66
+#define CLK_CAM0_200 67
+#define CLK_PM 68
+#define CLK_TIMER0 69
+#define CLK_TIMER1 70
+#define CLK_TIMER2 71
+#define CLK_TIMER3 72
+#define CLK_TIMER4 73
+#define CLK_TIMER5 74
+#define CLK_TIMER6 75
+#define CLK_TIMER7 76
+#define CLK_UART0 77
+#define CLK_APB_UART0 78
+#define CLK_UART1 79
+#define CLK_APB_UART1 80
+#define CLK_UART2 81
+#define CLK_APB_UART2 82
+#define CLK_UART3 83
+#define CLK_APB_UART3 84
+#define CLK_UART4 85
+#define CLK_APB_UART4 86
+#define CLK_APB_I2S0 87
+#define CLK_APB_I2S1 88
+#define CLK_APB_I2S2 89
+#define CLK_APB_I2S3 90
+#define CLK_AXI4_USB 91
+#define CLK_APB_USB 92
+#define CLK_USB_125M 93
+#define CLK_USB_33K 94
+#define CLK_USB_12M 95
+#define CLK_AXI4 96
+#define CLK_AXI6 97
+#define CLK_DSI_ESC 98
+#define CLK_AXI_VIP 99
+#define CLK_SRC_VIP_SYS_0 100
+#define CLK_SRC_VIP_SYS_1 101
+#define CLK_SRC_VIP_SYS_2 102
+#define CLK_SRC_VIP_SYS_3 103
+#define CLK_SRC_VIP_SYS_4 104
+#define CLK_CSI_BE_VIP 105
+#define CLK_CSI_MAC0_VIP 106
+#define CLK_CSI_MAC1_VIP 107
+#define CLK_CSI_MAC2_VIP 108
+#define CLK_CSI0_RX_VIP 109
+#define CLK_CSI1_RX_VIP 110
+#define CLK_ISP_TOP_VIP 111
+#define CLK_IMG_D_VIP 112
+#define CLK_IMG_V_VIP 113
+#define CLK_SC_TOP_VIP 114
+#define CLK_SC_D_VIP 115
+#define CLK_SC_V1_VIP 116
+#define CLK_SC_V2_VIP 117
+#define CLK_SC_V3_VIP 118
+#define CLK_DWA_VIP 119
+#define CLK_BT_VIP 120
+#define CLK_DISP_VIP 121
+#define CLK_DSI_MAC_VIP 122
+#define CLK_LVDS0_VIP 123
+#define CLK_LVDS1_VIP 124
+#define CLK_PAD_VI_VIP 125
+#define CLK_PAD_VI1_VIP 126
+#define CLK_PAD_VI2_VIP 127
+#define CLK_CFG_REG_VIP 128
+#define CLK_VIP_IP0 129
+#define CLK_VIP_IP1 130
+#define CLK_VIP_IP2 131
+#define CLK_VIP_IP3 132
+#define CLK_IVE_VIP 133
+#define CLK_RAW_VIP 134
+#define CLK_OSDC_VIP 135
+#define CLK_CAM0_VIP 136
+#define CLK_AXI_VIDEO_CODEC 137
+#define CLK_VC_SRC0 138
+#define CLK_VC_SRC1 139
+#define CLK_VC_SRC2 140
+#define CLK_H264C 141
+#define CLK_APB_H264C 142
+#define CLK_H265C 143
+#define CLK_APB_H265C 144
+#define CLK_JPEG 145
+#define CLK_APB_JPEG 146
+#define CLK_CAM0 147
+#define CLK_CAM1 148
+#define CLK_WGN 149
+#define CLK_WGN0 150
+#define CLK_WGN1 151
+#define CLK_WGN2 152
+#define CLK_KEYSCAN 153
+#define CLK_CFG_REG_VC 154
+#define CLK_C906_0 155
+#define CLK_C906_1 156
+#define CLK_A53 157
+#define CLK_CPU_AXI0 158
+#define CLK_CPU_GIC 159
+#define CLK_XTAL_AP 160
+
+// Only for CV181x
+#define CLK_DISP_SRC_VIP 161
+
+#endif /* __DT_BINDINGS_SOPHGO_CV1800_CLK_H__ */
diff --git a/include/dt-bindings/clock/sophgo,sg2042-clkgen.h b/include/dt-bindings/clock/sophgo,sg2042-clkgen.h
new file mode 100644
index 000000000000..84f7857317a2
--- /dev/null
+++ b/include/dt-bindings/clock/sophgo,sg2042-clkgen.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (C) 2023 Sophgo Technology Inc. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_SOPHGO_SG2042_CLKGEN_H__
+#define __DT_BINDINGS_SOPHGO_SG2042_CLKGEN_H__
+
+#define DIV_CLK_MPLL_RP_CPU_NORMAL_0 0
+#define DIV_CLK_MPLL_AXI_DDR_0 1
+#define DIV_CLK_FPLL_DDR01_1 2
+#define DIV_CLK_FPLL_DDR23_1 3
+#define DIV_CLK_FPLL_RP_CPU_NORMAL_1 4
+#define DIV_CLK_FPLL_50M_A53 5
+#define DIV_CLK_FPLL_TOP_RP_CMN_DIV2 6
+#define DIV_CLK_FPLL_UART_500M 7
+#define DIV_CLK_FPLL_AHB_LPC 8
+#define DIV_CLK_FPLL_EFUSE 9
+#define DIV_CLK_FPLL_TX_ETH0 10
+#define DIV_CLK_FPLL_PTP_REF_I_ETH0 11
+#define DIV_CLK_FPLL_REF_ETH0 12
+#define DIV_CLK_FPLL_EMMC 13
+#define DIV_CLK_FPLL_SD 14
+#define DIV_CLK_FPLL_TOP_AXI0 15
+#define DIV_CLK_FPLL_TOP_AXI_HSPERI 16
+#define DIV_CLK_FPLL_AXI_DDR_1 17
+#define DIV_CLK_FPLL_DIV_TIMER1 18
+#define DIV_CLK_FPLL_DIV_TIMER2 19
+#define DIV_CLK_FPLL_DIV_TIMER3 20
+#define DIV_CLK_FPLL_DIV_TIMER4 21
+#define DIV_CLK_FPLL_DIV_TIMER5 22
+#define DIV_CLK_FPLL_DIV_TIMER6 23
+#define DIV_CLK_FPLL_DIV_TIMER7 24
+#define DIV_CLK_FPLL_DIV_TIMER8 25
+#define DIV_CLK_FPLL_100K_EMMC 26
+#define DIV_CLK_FPLL_100K_SD 27
+#define DIV_CLK_FPLL_GPIO_DB 28
+#define DIV_CLK_DPLL0_DDR01_0 29
+#define DIV_CLK_DPLL1_DDR23_0 30
+
+#define GATE_CLK_RP_CPU_NORMAL_DIV0 31
+#define GATE_CLK_AXI_DDR_DIV0 32
+
+#define GATE_CLK_RP_CPU_NORMAL_DIV1 33
+#define GATE_CLK_A53_50M 34
+#define GATE_CLK_TOP_RP_CMN_DIV2 35
+#define GATE_CLK_HSDMA 36
+#define GATE_CLK_EMMC_100M 37
+#define GATE_CLK_SD_100M 38
+#define GATE_CLK_TX_ETH0 39
+#define GATE_CLK_PTP_REF_I_ETH0 40
+#define GATE_CLK_REF_ETH0 41
+#define GATE_CLK_UART_500M 42
+#define GATE_CLK_EFUSE 43
+
+#define GATE_CLK_AHB_LPC 44
+#define GATE_CLK_AHB_ROM 45
+#define GATE_CLK_AHB_SF 46
+
+#define GATE_CLK_APB_UART 47
+#define GATE_CLK_APB_TIMER 48
+#define GATE_CLK_APB_EFUSE 49
+#define GATE_CLK_APB_GPIO 50
+#define GATE_CLK_APB_GPIO_INTR 51
+#define GATE_CLK_APB_SPI 52
+#define GATE_CLK_APB_I2C 53
+#define GATE_CLK_APB_WDT 54
+#define GATE_CLK_APB_PWM 55
+#define GATE_CLK_APB_RTC 56
+
+#define GATE_CLK_AXI_PCIE0 57
+#define GATE_CLK_AXI_PCIE1 58
+#define GATE_CLK_SYSDMA_AXI 59
+#define GATE_CLK_AXI_DBG_I2C 60
+#define GATE_CLK_AXI_SRAM 61
+#define GATE_CLK_AXI_ETH0 62
+#define GATE_CLK_AXI_EMMC 63
+#define GATE_CLK_AXI_SD 64
+#define GATE_CLK_TOP_AXI0 65
+#define GATE_CLK_TOP_AXI_HSPERI 66
+
+#define GATE_CLK_TIMER1 67
+#define GATE_CLK_TIMER2 68
+#define GATE_CLK_TIMER3 69
+#define GATE_CLK_TIMER4 70
+#define GATE_CLK_TIMER5 71
+#define GATE_CLK_TIMER6 72
+#define GATE_CLK_TIMER7 73
+#define GATE_CLK_TIMER8 74
+#define GATE_CLK_100K_EMMC 75
+#define GATE_CLK_100K_SD 76
+#define GATE_CLK_GPIO_DB 77
+
+#define GATE_CLK_AXI_DDR_DIV1 78
+#define GATE_CLK_DDR01_DIV1 79
+#define GATE_CLK_DDR23_DIV1 80
+
+#define GATE_CLK_DDR01_DIV0 81
+#define GATE_CLK_DDR23_DIV0 82
+
+#define GATE_CLK_DDR01 83
+#define GATE_CLK_DDR23 84
+#define GATE_CLK_RP_CPU_NORMAL 85
+#define GATE_CLK_AXI_DDR 86
+
+#define MUX_CLK_DDR01 87
+#define MUX_CLK_DDR23 88
+#define MUX_CLK_RP_CPU_NORMAL 89
+#define MUX_CLK_AXI_DDR 90
+
+#endif /* __DT_BINDINGS_SOPHGO_SG2042_CLKGEN_H__ */
diff --git a/include/dt-bindings/clock/sophgo,sg2042-pll.h b/include/dt-bindings/clock/sophgo,sg2042-pll.h
new file mode 100644
index 000000000000..2d519b3bf51c
--- /dev/null
+++ b/include/dt-bindings/clock/sophgo,sg2042-pll.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (C) 2023 Sophgo Technology Inc. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_SOPHGO_SG2042_PLL_H__
+#define __DT_BINDINGS_SOPHGO_SG2042_PLL_H__
+
+#define MPLL_CLK 0
+#define FPLL_CLK 1
+#define DPLL0_CLK 2
+#define DPLL1_CLK 3
+
+#endif /* __DT_BINDINGS_SOPHGO_SG2042_PLL_H__ */
diff --git a/include/dt-bindings/clock/sophgo,sg2042-rpgate.h b/include/dt-bindings/clock/sophgo,sg2042-rpgate.h
new file mode 100644
index 000000000000..8b4522d5f559
--- /dev/null
+++ b/include/dt-bindings/clock/sophgo,sg2042-rpgate.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (C) 2023 Sophgo Technology Inc. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_SOPHGO_SG2042_RPGATE_H__
+#define __DT_BINDINGS_SOPHGO_SG2042_RPGATE_H__
+
+#define GATE_CLK_RXU0 0
+#define GATE_CLK_RXU1 1
+#define GATE_CLK_RXU2 2
+#define GATE_CLK_RXU3 3
+#define GATE_CLK_RXU4 4
+#define GATE_CLK_RXU5 5
+#define GATE_CLK_RXU6 6
+#define GATE_CLK_RXU7 7
+#define GATE_CLK_RXU8 8
+#define GATE_CLK_RXU9 9
+#define GATE_CLK_RXU10 10
+#define GATE_CLK_RXU11 11
+#define GATE_CLK_RXU12 12
+#define GATE_CLK_RXU13 13
+#define GATE_CLK_RXU14 14
+#define GATE_CLK_RXU15 15
+#define GATE_CLK_RXU16 16
+#define GATE_CLK_RXU17 17
+#define GATE_CLK_RXU18 18
+#define GATE_CLK_RXU19 19
+#define GATE_CLK_RXU20 20
+#define GATE_CLK_RXU21 21
+#define GATE_CLK_RXU22 22
+#define GATE_CLK_RXU23 23
+#define GATE_CLK_RXU24 24
+#define GATE_CLK_RXU25 25
+#define GATE_CLK_RXU26 26
+#define GATE_CLK_RXU27 27
+#define GATE_CLK_RXU28 28
+#define GATE_CLK_RXU29 29
+#define GATE_CLK_RXU30 30
+#define GATE_CLK_RXU31 31
+#define GATE_CLK_MP0 32
+#define GATE_CLK_MP1 33
+#define GATE_CLK_MP2 34
+#define GATE_CLK_MP3 35
+#define GATE_CLK_MP4 36
+#define GATE_CLK_MP5 37
+#define GATE_CLK_MP6 38
+#define GATE_CLK_MP7 39
+#define GATE_CLK_MP8 40
+#define GATE_CLK_MP9 41
+#define GATE_CLK_MP10 42
+#define GATE_CLK_MP11 43
+#define GATE_CLK_MP12 44
+#define GATE_CLK_MP13 45
+#define GATE_CLK_MP14 46
+#define GATE_CLK_MP15 47
+
+#endif /* __DT_BINDINGS_SOPHGO_SG2042_RPGATE_H__ */
diff --git a/include/dt-bindings/clock/sophgo,sg2044-clk.h b/include/dt-bindings/clock/sophgo,sg2044-clk.h
new file mode 100644
index 000000000000..d9adca42548e
--- /dev/null
+++ b/include/dt-bindings/clock/sophgo,sg2044-clk.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@gmail.com>
+ */
+
+#ifndef __DT_BINDINGS_SOPHGO_SG2044_CLK_H__
+#define __DT_BINDINGS_SOPHGO_SG2044_CLK_H__
+
+#define CLK_DIV_AP_SYS_FIXED 0
+#define CLK_DIV_AP_SYS_MAIN 1
+#define CLK_DIV_RP_SYS_FIXED 2
+#define CLK_DIV_RP_SYS_MAIN 3
+#define CLK_DIV_TPU_SYS_FIXED 4
+#define CLK_DIV_TPU_SYS_MAIN 5
+#define CLK_DIV_NOC_SYS_FIXED 6
+#define CLK_DIV_NOC_SYS_MAIN 7
+#define CLK_DIV_VC_SRC0_FIXED 8
+#define CLK_DIV_VC_SRC0_MAIN 9
+#define CLK_DIV_VC_SRC1_FIXED 10
+#define CLK_DIV_VC_SRC1_MAIN 11
+#define CLK_DIV_CXP_MAC_FIXED 12
+#define CLK_DIV_CXP_MAC_MAIN 13
+#define CLK_DIV_DDR0_FIXED 14
+#define CLK_DIV_DDR0_MAIN 15
+#define CLK_DIV_DDR1_FIXED 16
+#define CLK_DIV_DDR1_MAIN 17
+#define CLK_DIV_DDR2_FIXED 18
+#define CLK_DIV_DDR2_MAIN 19
+#define CLK_DIV_DDR3_FIXED 20
+#define CLK_DIV_DDR3_MAIN 21
+#define CLK_DIV_DDR4_FIXED 22
+#define CLK_DIV_DDR4_MAIN 23
+#define CLK_DIV_DDR5_FIXED 24
+#define CLK_DIV_DDR5_MAIN 25
+#define CLK_DIV_DDR6_FIXED 26
+#define CLK_DIV_DDR6_MAIN 27
+#define CLK_DIV_DDR7_FIXED 28
+#define CLK_DIV_DDR7_MAIN 29
+#define CLK_DIV_TOP_50M 30
+#define CLK_DIV_TOP_AXI0 31
+#define CLK_DIV_TOP_AXI_HSPERI 32
+#define CLK_DIV_TIMER0 33
+#define CLK_DIV_TIMER1 34
+#define CLK_DIV_TIMER2 35
+#define CLK_DIV_TIMER3 36
+#define CLK_DIV_TIMER4 37
+#define CLK_DIV_TIMER5 38
+#define CLK_DIV_TIMER6 39
+#define CLK_DIV_TIMER7 40
+#define CLK_DIV_CXP_TEST_PHY 41
+#define CLK_DIV_CXP_TEST_ETH_PHY 42
+#define CLK_DIV_C2C0_TEST_PHY 43
+#define CLK_DIV_C2C1_TEST_PHY 44
+#define CLK_DIV_PCIE_1G 45
+#define CLK_DIV_UART_500M 46
+#define CLK_DIV_GPIO_DB 47
+#define CLK_DIV_SD 48
+#define CLK_DIV_SD_100K 49
+#define CLK_DIV_EMMC 50
+#define CLK_DIV_EMMC_100K 51
+#define CLK_DIV_EFUSE 52
+#define CLK_DIV_TX_ETH0 53
+#define CLK_DIV_PTP_REF_I_ETH0 54
+#define CLK_DIV_REF_ETH0 55
+#define CLK_DIV_PKA 56
+#define CLK_MUX_DDR0 57
+#define CLK_MUX_DDR1 58
+#define CLK_MUX_DDR2 59
+#define CLK_MUX_DDR3 60
+#define CLK_MUX_DDR4 61
+#define CLK_MUX_DDR5 62
+#define CLK_MUX_DDR6 63
+#define CLK_MUX_DDR7 64
+#define CLK_MUX_NOC_SYS 65
+#define CLK_MUX_TPU_SYS 66
+#define CLK_MUX_RP_SYS 67
+#define CLK_MUX_AP_SYS 68
+#define CLK_MUX_VC_SRC0 69
+#define CLK_MUX_VC_SRC1 70
+#define CLK_MUX_CXP_MAC 71
+#define CLK_GATE_AP_SYS 72
+#define CLK_GATE_RP_SYS 73
+#define CLK_GATE_TPU_SYS 74
+#define CLK_GATE_NOC_SYS 75
+#define CLK_GATE_VC_SRC0 76
+#define CLK_GATE_VC_SRC1 77
+#define CLK_GATE_DDR0 78
+#define CLK_GATE_DDR1 79
+#define CLK_GATE_DDR2 80
+#define CLK_GATE_DDR3 81
+#define CLK_GATE_DDR4 82
+#define CLK_GATE_DDR5 83
+#define CLK_GATE_DDR6 84
+#define CLK_GATE_DDR7 85
+#define CLK_GATE_TOP_50M 86
+#define CLK_GATE_SC_RX 87
+#define CLK_GATE_SC_RX_X0Y1 88
+#define CLK_GATE_TOP_AXI0 89
+#define CLK_GATE_INTC0 90
+#define CLK_GATE_INTC1 91
+#define CLK_GATE_INTC2 92
+#define CLK_GATE_INTC3 93
+#define CLK_GATE_MAILBOX0 94
+#define CLK_GATE_MAILBOX1 95
+#define CLK_GATE_MAILBOX2 96
+#define CLK_GATE_MAILBOX3 97
+#define CLK_GATE_TOP_AXI_HSPERI 98
+#define CLK_GATE_APB_TIMER 99
+#define CLK_GATE_TIMER0 100
+#define CLK_GATE_TIMER1 101
+#define CLK_GATE_TIMER2 102
+#define CLK_GATE_TIMER3 103
+#define CLK_GATE_TIMER4 104
+#define CLK_GATE_TIMER5 105
+#define CLK_GATE_TIMER6 106
+#define CLK_GATE_TIMER7 107
+#define CLK_GATE_CXP_CFG 108
+#define CLK_GATE_CXP_MAC 109
+#define CLK_GATE_CXP_TEST_PHY 110
+#define CLK_GATE_CXP_TEST_ETH_PHY 111
+#define CLK_GATE_PCIE_1G 112
+#define CLK_GATE_C2C0_TEST_PHY 113
+#define CLK_GATE_C2C1_TEST_PHY 114
+#define CLK_GATE_UART_500M 115
+#define CLK_GATE_APB_UART 116
+#define CLK_GATE_APB_SPI 117
+#define CLK_GATE_AHB_SPIFMC 118
+#define CLK_GATE_APB_I2C 119
+#define CLK_GATE_AXI_DBG_I2C 120
+#define CLK_GATE_GPIO_DB 121
+#define CLK_GATE_APB_GPIO_INTR 122
+#define CLK_GATE_APB_GPIO 123
+#define CLK_GATE_SD 124
+#define CLK_GATE_AXI_SD 125
+#define CLK_GATE_SD_100K 126
+#define CLK_GATE_EMMC 127
+#define CLK_GATE_AXI_EMMC 128
+#define CLK_GATE_EMMC_100K 129
+#define CLK_GATE_EFUSE 130
+#define CLK_GATE_APB_EFUSE 131
+#define CLK_GATE_SYSDMA_AXI 132
+#define CLK_GATE_TX_ETH0 133
+#define CLK_GATE_AXI_ETH0 134
+#define CLK_GATE_PTP_REF_I_ETH0 135
+#define CLK_GATE_REF_ETH0 136
+#define CLK_GATE_APB_RTC 137
+#define CLK_GATE_APB_PWM 138
+#define CLK_GATE_APB_WDT 139
+#define CLK_GATE_AXI_SRAM 140
+#define CLK_GATE_AHB_ROM 141
+#define CLK_GATE_PKA 142
+
+#endif /* __DT_BINDINGS_SOPHGO_SG2044_CLK_H__ */
diff --git a/include/dt-bindings/clock/sophgo,sg2044-pll.h b/include/dt-bindings/clock/sophgo,sg2044-pll.h
new file mode 100644
index 000000000000..817d45e700cc
--- /dev/null
+++ b/include/dt-bindings/clock/sophgo,sg2044-pll.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@gmail.com>
+ */
+
+#ifndef __DT_BINDINGS_SOPHGO_SG2044_PLL_H__
+#define __DT_BINDINGS_SOPHGO_SG2044_PLL_H__
+
+#define CLK_FPLL0 0
+#define CLK_FPLL1 1
+#define CLK_FPLL2 2
+#define CLK_DPLL0 3
+#define CLK_DPLL1 4
+#define CLK_DPLL2 5
+#define CLK_DPLL3 6
+#define CLK_DPLL4 7
+#define CLK_DPLL5 8
+#define CLK_DPLL6 9
+#define CLK_DPLL7 10
+#define CLK_MPLL0 11
+#define CLK_MPLL1 12
+#define CLK_MPLL2 13
+#define CLK_MPLL3 14
+#define CLK_MPLL4 15
+#define CLK_MPLL5 16
+
+#endif /* __DT_BINDINGS_SOPHGO_SG2044_PLL_H__ */
diff --git a/include/dt-bindings/clock/spacemit,k1-syscon.h b/include/dt-bindings/clock/spacemit,k1-syscon.h
new file mode 100644
index 000000000000..0f8b59d6753c
--- /dev/null
+++ b/include/dt-bindings/clock/spacemit,k1-syscon.h
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2024-2025 Haylen Chu <heylenay@outlook.com>
+ */
+
+#ifndef _DT_BINDINGS_SPACEMIT_CCU_H_
+#define _DT_BINDINGS_SPACEMIT_CCU_H_
+
+/* APBS (PLL) clocks */
+#define CLK_PLL1 0
+#define CLK_PLL2 1
+#define CLK_PLL3 2
+#define CLK_PLL1_D2 3
+#define CLK_PLL1_D3 4
+#define CLK_PLL1_D4 5
+#define CLK_PLL1_D5 6
+#define CLK_PLL1_D6 7
+#define CLK_PLL1_D7 8
+#define CLK_PLL1_D8 9
+#define CLK_PLL1_D11 10
+#define CLK_PLL1_D13 11
+#define CLK_PLL1_D23 12
+#define CLK_PLL1_D64 13
+#define CLK_PLL1_D10_AUD 14
+#define CLK_PLL1_D100_AUD 15
+#define CLK_PLL2_D1 16
+#define CLK_PLL2_D2 17
+#define CLK_PLL2_D3 18
+#define CLK_PLL2_D4 19
+#define CLK_PLL2_D5 20
+#define CLK_PLL2_D6 21
+#define CLK_PLL2_D7 22
+#define CLK_PLL2_D8 23
+#define CLK_PLL3_D1 24
+#define CLK_PLL3_D2 25
+#define CLK_PLL3_D3 26
+#define CLK_PLL3_D4 27
+#define CLK_PLL3_D5 28
+#define CLK_PLL3_D6 29
+#define CLK_PLL3_D7 30
+#define CLK_PLL3_D8 31
+#define CLK_PLL3_80 32
+#define CLK_PLL3_40 33
+#define CLK_PLL3_20 34
+
+/* MPMU clocks */
+#define CLK_PLL1_307P2 0
+#define CLK_PLL1_76P8 1
+#define CLK_PLL1_61P44 2
+#define CLK_PLL1_153P6 3
+#define CLK_PLL1_102P4 4
+#define CLK_PLL1_51P2 5
+#define CLK_PLL1_51P2_AP 6
+#define CLK_PLL1_57P6 7
+#define CLK_PLL1_25P6 8
+#define CLK_PLL1_12P8 9
+#define CLK_PLL1_12P8_WDT 10
+#define CLK_PLL1_6P4 11
+#define CLK_PLL1_3P2 12
+#define CLK_PLL1_1P6 13
+#define CLK_PLL1_0P8 14
+#define CLK_PLL1_409P6 15
+#define CLK_PLL1_204P8 16
+#define CLK_PLL1_491 17
+#define CLK_PLL1_245P76 18
+#define CLK_PLL1_614 19
+#define CLK_PLL1_47P26 20
+#define CLK_PLL1_31P5 21
+#define CLK_PLL1_819 22
+#define CLK_PLL1_1228 23
+#define CLK_SLOW_UART 24
+#define CLK_SLOW_UART1 25
+#define CLK_SLOW_UART2 26
+#define CLK_WDT 27
+#define CLK_RIPC 28
+#define CLK_I2S_SYSCLK 29
+#define CLK_I2S_BCLK 30
+#define CLK_APB 31
+#define CLK_WDT_BUS 32
+#define CLK_I2S_153P6 33
+#define CLK_I2S_153P6_BASE 34
+#define CLK_I2S_SYSCLK_SRC 35
+#define CLK_I2S_BCLK_FACTOR 36
+
+/* MPMU resets */
+#define RESET_WDT 0
+
+/* APBC clocks */
+#define CLK_UART0 0
+#define CLK_UART2 1
+#define CLK_UART3 2
+#define CLK_UART4 3
+#define CLK_UART5 4
+#define CLK_UART6 5
+#define CLK_UART7 6
+#define CLK_UART8 7
+#define CLK_UART9 8
+#define CLK_GPIO 9
+#define CLK_PWM0 10
+#define CLK_PWM1 11
+#define CLK_PWM2 12
+#define CLK_PWM3 13
+#define CLK_PWM4 14
+#define CLK_PWM5 15
+#define CLK_PWM6 16
+#define CLK_PWM7 17
+#define CLK_PWM8 18
+#define CLK_PWM9 19
+#define CLK_PWM10 20
+#define CLK_PWM11 21
+#define CLK_PWM12 22
+#define CLK_PWM13 23
+#define CLK_PWM14 24
+#define CLK_PWM15 25
+#define CLK_PWM16 26
+#define CLK_PWM17 27
+#define CLK_PWM18 28
+#define CLK_PWM19 29
+#define CLK_SSP3 30
+#define CLK_RTC 31
+#define CLK_TWSI0 32
+#define CLK_TWSI1 33
+#define CLK_TWSI2 34
+#define CLK_TWSI4 35
+#define CLK_TWSI5 36
+#define CLK_TWSI6 37
+#define CLK_TWSI7 38
+#define CLK_TWSI8 39
+#define CLK_TIMERS1 40
+#define CLK_TIMERS2 41
+#define CLK_AIB 42
+#define CLK_ONEWIRE 43
+#define CLK_SSPA0 44
+#define CLK_SSPA1 45
+#define CLK_DRO 46
+#define CLK_IR 47
+#define CLK_TSEN 48
+#define CLK_IPC_AP2AUD 49
+#define CLK_CAN0 50
+#define CLK_CAN0_BUS 51
+#define CLK_UART0_BUS 52
+#define CLK_UART2_BUS 53
+#define CLK_UART3_BUS 54
+#define CLK_UART4_BUS 55
+#define CLK_UART5_BUS 56
+#define CLK_UART6_BUS 57
+#define CLK_UART7_BUS 58
+#define CLK_UART8_BUS 59
+#define CLK_UART9_BUS 60
+#define CLK_GPIO_BUS 61
+#define CLK_PWM0_BUS 62
+#define CLK_PWM1_BUS 63
+#define CLK_PWM2_BUS 64
+#define CLK_PWM3_BUS 65
+#define CLK_PWM4_BUS 66
+#define CLK_PWM5_BUS 67
+#define CLK_PWM6_BUS 68
+#define CLK_PWM7_BUS 69
+#define CLK_PWM8_BUS 70
+#define CLK_PWM9_BUS 71
+#define CLK_PWM10_BUS 72
+#define CLK_PWM11_BUS 73
+#define CLK_PWM12_BUS 74
+#define CLK_PWM13_BUS 75
+#define CLK_PWM14_BUS 76
+#define CLK_PWM15_BUS 77
+#define CLK_PWM16_BUS 78
+#define CLK_PWM17_BUS 79
+#define CLK_PWM18_BUS 80
+#define CLK_PWM19_BUS 81
+#define CLK_SSP3_BUS 82
+#define CLK_RTC_BUS 83
+#define CLK_TWSI0_BUS 84
+#define CLK_TWSI1_BUS 85
+#define CLK_TWSI2_BUS 86
+#define CLK_TWSI4_BUS 87
+#define CLK_TWSI5_BUS 88
+#define CLK_TWSI6_BUS 89
+#define CLK_TWSI7_BUS 90
+#define CLK_TWSI8_BUS 91
+#define CLK_TIMERS1_BUS 92
+#define CLK_TIMERS2_BUS 93
+#define CLK_AIB_BUS 94
+#define CLK_ONEWIRE_BUS 95
+#define CLK_SSPA0_BUS 96
+#define CLK_SSPA1_BUS 97
+#define CLK_TSEN_BUS 98
+#define CLK_IPC_AP2AUD_BUS 99
+#define CLK_SSPA0_I2S_BCLK 100
+#define CLK_SSPA1_I2S_BCLK 101
+
+/* APBC resets */
+#define RESET_UART0 0
+#define RESET_UART2 1
+#define RESET_UART3 2
+#define RESET_UART4 3
+#define RESET_UART5 4
+#define RESET_UART6 5
+#define RESET_UART7 6
+#define RESET_UART8 7
+#define RESET_UART9 8
+#define RESET_GPIO 9
+#define RESET_PWM0 10
+#define RESET_PWM1 11
+#define RESET_PWM2 12
+#define RESET_PWM3 13
+#define RESET_PWM4 14
+#define RESET_PWM5 15
+#define RESET_PWM6 16
+#define RESET_PWM7 17
+#define RESET_PWM8 18
+#define RESET_PWM9 19
+#define RESET_PWM10 20
+#define RESET_PWM11 21
+#define RESET_PWM12 22
+#define RESET_PWM13 23
+#define RESET_PWM14 24
+#define RESET_PWM15 25
+#define RESET_PWM16 26
+#define RESET_PWM17 27
+#define RESET_PWM18 28
+#define RESET_PWM19 29
+#define RESET_SSP3 30
+#define RESET_RTC 31
+#define RESET_TWSI0 32
+#define RESET_TWSI1 33
+#define RESET_TWSI2 34
+#define RESET_TWSI4 35
+#define RESET_TWSI5 36
+#define RESET_TWSI6 37
+#define RESET_TWSI7 38
+#define RESET_TWSI8 39
+#define RESET_TIMERS1 40
+#define RESET_TIMERS2 41
+#define RESET_AIB 42
+#define RESET_ONEWIRE 43
+#define RESET_SSPA0 44
+#define RESET_SSPA1 45
+#define RESET_DRO 46
+#define RESET_IR 47
+#define RESET_TSEN 48
+#define RESET_IPC_AP2AUD 49
+#define RESET_CAN0 50
+
+/* APMU clocks */
+#define CLK_CCI550 0
+#define CLK_CPU_C0_HI 1
+#define CLK_CPU_C0_CORE 2
+#define CLK_CPU_C0_ACE 3
+#define CLK_CPU_C0_TCM 4
+#define CLK_CPU_C1_HI 5
+#define CLK_CPU_C1_CORE 6
+#define CLK_CPU_C1_ACE 7
+#define CLK_CCIC_4X 8
+#define CLK_CCIC1PHY 9
+#define CLK_SDH_AXI 10
+#define CLK_SDH0 11
+#define CLK_SDH1 12
+#define CLK_SDH2 13
+#define CLK_USB_P1 14
+#define CLK_USB_AXI 15
+#define CLK_USB30 16
+#define CLK_QSPI 17
+#define CLK_QSPI_BUS 18
+#define CLK_DMA 19
+#define CLK_AES 20
+#define CLK_VPU 21
+#define CLK_GPU 22
+#define CLK_EMMC 23
+#define CLK_EMMC_X 24
+#define CLK_AUDIO 25
+#define CLK_HDMI 26
+#define CLK_PMUA_ACLK 27
+#define CLK_PCIE0_MASTER 28
+#define CLK_PCIE0_SLAVE 29
+#define CLK_PCIE0_DBI 30
+#define CLK_PCIE1_MASTER 31
+#define CLK_PCIE1_SLAVE 32
+#define CLK_PCIE1_DBI 33
+#define CLK_PCIE2_MASTER 34
+#define CLK_PCIE2_SLAVE 35
+#define CLK_PCIE2_DBI 36
+#define CLK_EMAC0_BUS 37
+#define CLK_EMAC0_PTP 38
+#define CLK_EMAC1_BUS 39
+#define CLK_EMAC1_PTP 40
+#define CLK_JPG 41
+#define CLK_CCIC2PHY 42
+#define CLK_CCIC3PHY 43
+#define CLK_CSI 44
+#define CLK_CAMM0 45
+#define CLK_CAMM1 46
+#define CLK_CAMM2 47
+#define CLK_ISP_CPP 48
+#define CLK_ISP_BUS 49
+#define CLK_ISP 50
+#define CLK_DPU_MCLK 51
+#define CLK_DPU_ESC 52
+#define CLK_DPU_BIT 53
+#define CLK_DPU_PXCLK 54
+#define CLK_DPU_HCLK 55
+#define CLK_DPU_SPI 56
+#define CLK_DPU_SPI_HBUS 57
+#define CLK_DPU_SPIBUS 58
+#define CLK_DPU_SPI_ACLK 59
+#define CLK_V2D 60
+#define CLK_EMMC_BUS 61
+
+/* APMU resets */
+#define RESET_CCIC_4X 0
+#define RESET_CCIC1_PHY 1
+#define RESET_SDH_AXI 2
+#define RESET_SDH0 3
+#define RESET_SDH1 4
+#define RESET_SDH2 5
+#define RESET_USBP1_AXI 6
+#define RESET_USB_AXI 7
+#define RESET_USB30_AHB 8
+#define RESET_USB30_VCC 9
+#define RESET_USB30_PHY 10
+#define RESET_QSPI 11
+#define RESET_QSPI_BUS 12
+#define RESET_DMA 13
+#define RESET_AES 14
+#define RESET_VPU 15
+#define RESET_GPU 16
+#define RESET_EMMC 17
+#define RESET_EMMC_X 18
+#define RESET_AUDIO_SYS 19
+#define RESET_AUDIO_MCU 20
+#define RESET_AUDIO_APMU 21
+#define RESET_HDMI 22
+#define RESET_PCIE0_MASTER 23
+#define RESET_PCIE0_SLAVE 24
+#define RESET_PCIE0_DBI 25
+#define RESET_PCIE0_GLOBAL 26
+#define RESET_PCIE1_MASTER 27
+#define RESET_PCIE1_SLAVE 28
+#define RESET_PCIE1_DBI 29
+#define RESET_PCIE1_GLOBAL 30
+#define RESET_PCIE2_MASTER 31
+#define RESET_PCIE2_SLAVE 32
+#define RESET_PCIE2_DBI 33
+#define RESET_PCIE2_GLOBAL 34
+#define RESET_EMAC0 35
+#define RESET_EMAC1 36
+#define RESET_JPG 37
+#define RESET_CCIC2PHY 38
+#define RESET_CCIC3PHY 39
+#define RESET_CSI 40
+#define RESET_ISP_CPP 41
+#define RESET_ISP_BUS 42
+#define RESET_ISP 43
+#define RESET_ISP_CI 44
+#define RESET_DPU_MCLK 45
+#define RESET_DPU_ESC 46
+#define RESET_DPU_HCLK 47
+#define RESET_DPU_SPIBUS 48
+#define RESET_DPU_SPI_HBUS 49
+#define RESET_V2D 50
+#define RESET_MIPI 51
+#define RESET_MC 52
+
+/* RCPU resets */
+#define RESET_RCPU_SSP0 0
+#define RESET_RCPU_I2C0 1
+#define RESET_RCPU_UART1 2
+#define RESET_RCPU_IR 3
+#define RESET_RCPU_CAN 4
+#define RESET_RCPU_UART0 5
+#define RESET_RCPU_HDMI_AUDIO 6
+
+/* RCPU2 resets */
+#define RESET_RCPU2_PWM0 0
+#define RESET_RCPU2_PWM1 1
+#define RESET_RCPU2_PWM2 2
+#define RESET_RCPU2_PWM3 3
+#define RESET_RCPU2_PWM4 4
+#define RESET_RCPU2_PWM5 5
+#define RESET_RCPU2_PWM6 6
+#define RESET_RCPU2_PWM7 7
+#define RESET_RCPU2_PWM8 8
+#define RESET_RCPU2_PWM9 9
+
+/* APBC2 resets */
+#define RESET_APBC2_UART1 0
+#define RESET_APBC2_SSP2 1
+#define RESET_APBC2_TWSI3 2
+#define RESET_APBC2_RTC 3
+#define RESET_APBC2_TIMERS0 4
+#define RESET_APBC2_KPC 5
+#define RESET_APBC2_GPIO 6
+
+#endif /* _DT_BINDINGS_SPACEMIT_CCU_H_ */
diff --git a/include/dt-bindings/clock/sprd,ums512-clk.h b/include/dt-bindings/clock/sprd,ums512-clk.h
new file mode 100644
index 000000000000..4f1d90849944
--- /dev/null
+++ b/include/dt-bindings/clock/sprd,ums512-clk.h
@@ -0,0 +1,397 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Unisoc UMS512 SoC DTS file
+ *
+ * Copyright (C) 2022, Unisoc Inc.
+ */
+
+#ifndef _DT_BINDINGS_CLK_UMS512_H_
+#define _DT_BINDINGS_CLK_UMS512_H_
+
+#define CLK_26M_AUD 0
+#define CLK_13M 1
+#define CLK_6M5 2
+#define CLK_4M3 3
+#define CLK_2M 4
+#define CLK_1M 5
+#define CLK_250K 6
+#define CLK_RCO_25M 7
+#define CLK_RCO_4M 8
+#define CLK_RCO_2M 9
+#define CLK_ISPPLL_GATE 10
+#define CLK_DPLL0_GATE 11
+#define CLK_DPLL1_GATE 12
+#define CLK_LPLL_GATE 13
+#define CLK_TWPLL_GATE 14
+#define CLK_GPLL_GATE 15
+#define CLK_RPLL_GATE 16
+#define CLK_CPPLL_GATE 17
+#define CLK_MPLL0_GATE 18
+#define CLK_MPLL1_GATE 19
+#define CLK_MPLL2_GATE 20
+#define CLK_PMU_GATE_NUM (CLK_MPLL2_GATE + 1)
+
+#define CLK_DPLL0 0
+#define CLK_DPLL0_58M31 1
+#define CLK_ANLG_PHY_G0_NUM (CLK_DPLL0_58M31 + 1)
+
+#define CLK_MPLL1 0
+#define CLK_MPLL1_63M38 1
+#define CLK_ANLG_PHY_G2_NUM (CLK_MPLL1_63M38 + 1)
+
+#define CLK_RPLL 0
+#define CLK_AUDIO_GATE 1
+#define CLK_MPLL0 2
+#define CLK_MPLL0_56M88 3
+#define CLK_MPLL2 4
+#define CLK_MPLL2_47M13 5
+#define CLK_ANLG_PHY_G3_NUM (CLK_MPLL2_47M13 + 1)
+
+#define CLK_TWPLL 0
+#define CLK_TWPLL_768M 1
+#define CLK_TWPLL_384M 2
+#define CLK_TWPLL_192M 3
+#define CLK_TWPLL_96M 4
+#define CLK_TWPLL_48M 5
+#define CLK_TWPLL_24M 6
+#define CLK_TWPLL_12M 7
+#define CLK_TWPLL_512M 8
+#define CLK_TWPLL_256M 9
+#define CLK_TWPLL_128M 10
+#define CLK_TWPLL_64M 11
+#define CLK_TWPLL_307M2 12
+#define CLK_TWPLL_219M4 13
+#define CLK_TWPLL_170M6 14
+#define CLK_TWPLL_153M6 15
+#define CLK_TWPLL_76M8 16
+#define CLK_TWPLL_51M2 17
+#define CLK_TWPLL_38M4 18
+#define CLK_TWPLL_19M2 19
+#define CLK_TWPLL_12M29 20
+#define CLK_LPLL 21
+#define CLK_LPLL_614M4 22
+#define CLK_LPLL_409M6 23
+#define CLK_LPLL_245M76 24
+#define CLK_LPLL_30M72 25
+#define CLK_ISPPLL 26
+#define CLK_ISPPLL_468M 27
+#define CLK_ISPPLL_78M 28
+#define CLK_GPLL 29
+#define CLK_GPLL_40M 30
+#define CLK_CPPLL 31
+#define CLK_CPPLL_39M32 32
+#define CLK_ANLG_PHY_GC_NUM (CLK_CPPLL_39M32 + 1)
+
+#define CLK_AP_APB 0
+#define CLK_IPI 1
+#define CLK_AP_UART0 2
+#define CLK_AP_UART1 3
+#define CLK_AP_UART2 4
+#define CLK_AP_I2C0 5
+#define CLK_AP_I2C1 6
+#define CLK_AP_I2C2 7
+#define CLK_AP_I2C3 8
+#define CLK_AP_I2C4 9
+#define CLK_AP_SPI0 10
+#define CLK_AP_SPI1 11
+#define CLK_AP_SPI2 12
+#define CLK_AP_SPI3 13
+#define CLK_AP_IIS0 14
+#define CLK_AP_IIS1 15
+#define CLK_AP_IIS2 16
+#define CLK_AP_SIM 17
+#define CLK_AP_CE 18
+#define CLK_SDIO0_2X 19
+#define CLK_SDIO1_2X 20
+#define CLK_EMMC_2X 21
+#define CLK_VSP 22
+#define CLK_DISPC0 23
+#define CLK_DISPC0_DPI 24
+#define CLK_DSI_APB 25
+#define CLK_DSI_RXESC 26
+#define CLK_DSI_LANEBYTE 27
+#define CLK_VDSP 28
+#define CLK_VDSP_M 29
+#define CLK_AP_CLK_NUM (CLK_VDSP_M + 1)
+
+#define CLK_DSI_EB 0
+#define CLK_DISPC_EB 1
+#define CLK_VSP_EB 2
+#define CLK_VDMA_EB 3
+#define CLK_DMA_PUB_EB 4
+#define CLK_DMA_SEC_EB 5
+#define CLK_IPI_EB 6
+#define CLK_AHB_CKG_EB 7
+#define CLK_BM_CLK_EB 8
+#define CLK_AP_AHB_GATE_NUM (CLK_BM_CLK_EB + 1)
+
+#define CLK_AON_APB 0
+#define CLK_ADI 1
+#define CLK_AUX0 2
+#define CLK_AUX1 3
+#define CLK_AUX2 4
+#define CLK_PROBE 5
+#define CLK_PWM0 6
+#define CLK_PWM1 7
+#define CLK_PWM2 8
+#define CLK_PWM3 9
+#define CLK_EFUSE 10
+#define CLK_UART0 11
+#define CLK_UART1 12
+#define CLK_THM0 13
+#define CLK_THM1 14
+#define CLK_THM2 15
+#define CLK_THM3 16
+#define CLK_AON_I2C 17
+#define CLK_AON_IIS 18
+#define CLK_SCC 19
+#define CLK_APCPU_DAP 20
+#define CLK_APCPU_DAP_MTCK 21
+#define CLK_APCPU_TS 22
+#define CLK_DEBUG_TS 23
+#define CLK_DSI_TEST_S 24
+#define CLK_DJTAG_TCK 25
+#define CLK_DJTAG_TCK_HW 26
+#define CLK_AON_TMR 27
+#define CLK_AON_PMU 28
+#define CLK_DEBOUNCE 29
+#define CLK_APCPU_PMU 30
+#define CLK_TOP_DVFS 31
+#define CLK_OTG_UTMI 32
+#define CLK_OTG_REF 33
+#define CLK_CSSYS 34
+#define CLK_CSSYS_PUB 35
+#define CLK_CSSYS_APB 36
+#define CLK_AP_AXI 37
+#define CLK_AP_MM 38
+#define CLK_SDIO2_2X 39
+#define CLK_ANALOG_IO_APB 40
+#define CLK_DMC_REF_CLK 41
+#define CLK_EMC 42
+#define CLK_USB 43
+#define CLK_26M_PMU 44
+#define CLK_AON_APB_NUM (CLK_26M_PMU + 1)
+
+#define CLK_MM_AHB 0
+#define CLK_MM_MTX 1
+#define CLK_SENSOR0 2
+#define CLK_SENSOR1 3
+#define CLK_SENSOR2 4
+#define CLK_CPP 5
+#define CLK_JPG 6
+#define CLK_FD 7
+#define CLK_DCAM_IF 8
+#define CLK_DCAM_AXI 9
+#define CLK_ISP 10
+#define CLK_MIPI_CSI0 11
+#define CLK_MIPI_CSI1 12
+#define CLK_MIPI_CSI2 13
+#define CLK_MM_CLK_NUM (CLK_MIPI_CSI2 + 1)
+
+#define CLK_RC100M_CAL_EB 0
+#define CLK_DJTAG_TCK_EB 1
+#define CLK_DJTAG_EB 2
+#define CLK_AUX0_EB 3
+#define CLK_AUX1_EB 4
+#define CLK_AUX2_EB 5
+#define CLK_PROBE_EB 6
+#define CLK_MM_EB 7
+#define CLK_GPU_EB 8
+#define CLK_MSPI_EB 9
+#define CLK_APCPU_DAP_EB 10
+#define CLK_AON_CSSYS_EB 11
+#define CLK_CSSYS_APB_EB 12
+#define CLK_CSSYS_PUB_EB 13
+#define CLK_SDPHY_CFG_EB 14
+#define CLK_SDPHY_REF_EB 15
+#define CLK_EFUSE_EB 16
+#define CLK_GPIO_EB 17
+#define CLK_MBOX_EB 18
+#define CLK_KPD_EB 19
+#define CLK_AON_SYST_EB 20
+#define CLK_AP_SYST_EB 21
+#define CLK_AON_TMR_EB 22
+#define CLK_OTG_UTMI_EB 23
+#define CLK_OTG_PHY_EB 24
+#define CLK_SPLK_EB 25
+#define CLK_PIN_EB 26
+#define CLK_ANA_EB 27
+#define CLK_APCPU_TS0_EB 28
+#define CLK_APB_BUSMON_EB 29
+#define CLK_AON_IIS_EB 30
+#define CLK_SCC_EB 31
+#define CLK_THM0_EB 32
+#define CLK_THM1_EB 33
+#define CLK_THM2_EB 34
+#define CLK_ASIM_TOP_EB 35
+#define CLK_I2C_EB 36
+#define CLK_PMU_EB 37
+#define CLK_ADI_EB 38
+#define CLK_EIC_EB 39
+#define CLK_AP_INTC0_EB 40
+#define CLK_AP_INTC1_EB 41
+#define CLK_AP_INTC2_EB 42
+#define CLK_AP_INTC3_EB 43
+#define CLK_AP_INTC4_EB 44
+#define CLK_AP_INTC5_EB 45
+#define CLK_AUDCP_INTC_EB 46
+#define CLK_AP_TMR0_EB 47
+#define CLK_AP_TMR1_EB 48
+#define CLK_AP_TMR2_EB 49
+#define CLK_PWM0_EB 50
+#define CLK_PWM1_EB 51
+#define CLK_PWM2_EB 52
+#define CLK_PWM3_EB 53
+#define CLK_AP_WDG_EB 54
+#define CLK_APCPU_WDG_EB 55
+#define CLK_SERDES_EB 56
+#define CLK_ARCH_RTC_EB 57
+#define CLK_KPD_RTC_EB 58
+#define CLK_AON_SYST_RTC_EB 59
+#define CLK_AP_SYST_RTC_EB 60
+#define CLK_AON_TMR_RTC_EB 61
+#define CLK_EIC_RTC_EB 62
+#define CLK_EIC_RTCDV5_EB 63
+#define CLK_AP_WDG_RTC_EB 64
+#define CLK_AC_WDG_RTC_EB 65
+#define CLK_AP_TMR0_RTC_EB 66
+#define CLK_AP_TMR1_RTC_EB 67
+#define CLK_AP_TMR2_RTC_EB 68
+#define CLK_DCXO_LC_RTC_EB 69
+#define CLK_BB_CAL_RTC_EB 70
+#define CLK_AP_EMMC_RTC_EB 71
+#define CLK_AP_SDIO0_RTC_EB 72
+#define CLK_AP_SDIO1_RTC_EB 73
+#define CLK_AP_SDIO2_RTC_EB 74
+#define CLK_DSI_CSI_TEST_EB 75
+#define CLK_DJTAG_TCK_EN 76
+#define CLK_DPHY_REF_EB 77
+#define CLK_DMC_REF_EB 78
+#define CLK_OTG_REF_EB 79
+#define CLK_TSEN_EB 80
+#define CLK_TMR_EB 81
+#define CLK_RC100M_REF_EB 82
+#define CLK_RC100M_FDK_EB 83
+#define CLK_DEBOUNCE_EB 84
+#define CLK_DET_32K_EB 85
+#define CLK_TOP_CSSYS_EB 86
+#define CLK_AP_AXI_EN 87
+#define CLK_SDIO0_2X_EN 88
+#define CLK_SDIO0_1X_EN 89
+#define CLK_SDIO1_2X_EN 90
+#define CLK_SDIO1_1X_EN 91
+#define CLK_SDIO2_2X_EN 92
+#define CLK_SDIO2_1X_EN 93
+#define CLK_EMMC_2X_EN 94
+#define CLK_EMMC_1X_EN 95
+#define CLK_PLL_TEST_EN 96
+#define CLK_CPHY_CFG_EN 97
+#define CLK_DEBUG_TS_EN 98
+#define CLK_ACCESS_AUD_EN 99
+#define CLK_AON_APB_GATE_NUM (CLK_ACCESS_AUD_EN + 1)
+
+#define CLK_MM_CPP_EB 0
+#define CLK_MM_JPG_EB 1
+#define CLK_MM_DCAM_EB 2
+#define CLK_MM_ISP_EB 3
+#define CLK_MM_CSI2_EB 4
+#define CLK_MM_CSI1_EB 5
+#define CLK_MM_CSI0_EB 6
+#define CLK_MM_CKG_EB 7
+#define CLK_ISP_AHB_EB 8
+#define CLK_MM_DVFS_EB 9
+#define CLK_MM_FD_EB 10
+#define CLK_MM_SENSOR2_EB 11
+#define CLK_MM_SENSOR1_EB 12
+#define CLK_MM_SENSOR0_EB 13
+#define CLK_MM_MIPI_CSI2_EB 14
+#define CLK_MM_MIPI_CSI1_EB 15
+#define CLK_MM_MIPI_CSI0_EB 16
+#define CLK_DCAM_AXI_EB 17
+#define CLK_ISP_AXI_EB 18
+#define CLK_MM_CPHY_EB 19
+#define CLK_MM_GATE_CLK_NUM (CLK_MM_CPHY_EB + 1)
+
+#define CLK_SIM0_EB 0
+#define CLK_IIS0_EB 1
+#define CLK_IIS1_EB 2
+#define CLK_IIS2_EB 3
+#define CLK_APB_REG_EB 4
+#define CLK_SPI0_EB 5
+#define CLK_SPI1_EB 6
+#define CLK_SPI2_EB 7
+#define CLK_SPI3_EB 8
+#define CLK_I2C0_EB 9
+#define CLK_I2C1_EB 10
+#define CLK_I2C2_EB 11
+#define CLK_I2C3_EB 12
+#define CLK_I2C4_EB 13
+#define CLK_UART0_EB 14
+#define CLK_UART1_EB 15
+#define CLK_UART2_EB 16
+#define CLK_SIM0_32K_EB 17
+#define CLK_SPI0_LFIN_EB 18
+#define CLK_SPI1_LFIN_EB 19
+#define CLK_SPI2_LFIN_EB 20
+#define CLK_SPI3_LFIN_EB 21
+#define CLK_SDIO0_EB 22
+#define CLK_SDIO1_EB 23
+#define CLK_SDIO2_EB 24
+#define CLK_EMMC_EB 25
+#define CLK_SDIO0_32K_EB 26
+#define CLK_SDIO1_32K_EB 27
+#define CLK_SDIO2_32K_EB 28
+#define CLK_EMMC_32K_EB 29
+#define CLK_AP_APB_GATE_NUM (CLK_EMMC_32K_EB + 1)
+
+#define CLK_GPU_CORE_EB 0
+#define CLK_GPU_CORE 1
+#define CLK_GPU_MEM_EB 2
+#define CLK_GPU_MEM 3
+#define CLK_GPU_SYS_EB 4
+#define CLK_GPU_SYS 5
+#define CLK_GPU_CLK_NUM (CLK_GPU_SYS + 1)
+
+#define CLK_AUDCP_IIS0_EB 0
+#define CLK_AUDCP_IIS1_EB 1
+#define CLK_AUDCP_IIS2_EB 2
+#define CLK_AUDCP_UART_EB 3
+#define CLK_AUDCP_DMA_CP_EB 4
+#define CLK_AUDCP_DMA_AP_EB 5
+#define CLK_AUDCP_SRC48K_EB 6
+#define CLK_AUDCP_MCDT_EB 7
+#define CLK_AUDCP_VBCIFD_EB 8
+#define CLK_AUDCP_VBC_EB 9
+#define CLK_AUDCP_SPLK_EB 10
+#define CLK_AUDCP_ICU_EB 11
+#define CLK_AUDCP_DMA_AP_ASHB_EB 12
+#define CLK_AUDCP_DMA_CP_ASHB_EB 13
+#define CLK_AUDCP_AUD_EB 14
+#define CLK_AUDCP_VBC_24M_EB 15
+#define CLK_AUDCP_TMR_26M_EB 16
+#define CLK_AUDCP_DVFS_ASHB_EB 17
+#define CLK_AUDCP_AHB_GATE_NUM (CLK_AUDCP_DVFS_ASHB_EB + 1)
+
+#define CLK_AUDCP_WDG_EB 0
+#define CLK_AUDCP_RTC_WDG_EB 1
+#define CLK_AUDCP_TMR0_EB 2
+#define CLK_AUDCP_TMR1_EB 3
+#define CLK_AUDCP_APB_GATE_NUM (CLK_AUDCP_TMR1_EB + 1)
+
+#define CLK_ACORE0 0
+#define CLK_ACORE1 1
+#define CLK_ACORE2 2
+#define CLK_ACORE3 3
+#define CLK_ACORE4 4
+#define CLK_ACORE5 5
+#define CLK_PCORE0 6
+#define CLK_PCORE1 7
+#define CLK_SCU 8
+#define CLK_ACE 9
+#define CLK_PERIPH 10
+#define CLK_GIC 11
+#define CLK_ATB 12
+#define CLK_DEBUG_APB 13
+#define CLK_APCPU_SEC_NUM (CLK_DEBUG_APB + 1)
+
+#endif /* _DT_BINDINGS_CLK_UMS512_H_ */
diff --git a/include/dt-bindings/clock/st,stm32mp21-rcc.h b/include/dt-bindings/clock/st,stm32mp21-rcc.h
new file mode 100644
index 000000000000..054b785f2796
--- /dev/null
+++ b/include/dt-bindings/clock/st,stm32mp21-rcc.h
@@ -0,0 +1,426 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) STMicroelectronics 2025 - All Rights Reserved
+ * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com>
+ */
+
+#ifndef _DT_BINDINGS_STM32MP21_CLKS_H_
+#define _DT_BINDINGS_STM32MP21_CLKS_H_
+
+/* INTERNAL/EXTERNAL OSCILLATORS */
+#define HSI_CK 0
+#define HSE_CK 1
+#define MSI_CK 2
+#define LSI_CK 3
+#define LSE_CK 4
+#define I2S_CK 5
+#define RTC_CK 6
+#define SPDIF_CK_SYMB 7
+
+/* PLL CLOCKS */
+#define PLL1_CK 8
+#define PLL2_CK 9
+#define PLL4_CK 10
+#define PLL5_CK 11
+#define PLL6_CK 12
+#define PLL7_CK 13
+#define PLL8_CK 14
+
+#define CK_CPU1 15
+
+/* APB DIV CLOCKS */
+#define CK_ICN_APB1 16
+#define CK_ICN_APB2 17
+#define CK_ICN_APB3 18
+#define CK_ICN_APB4 19
+#define CK_ICN_APB5 20
+#define CK_ICN_APBDBG 21
+
+/* GLOBAL TIMER */
+#define TIMG1_CK 22
+#define TIMG2_CK 23
+
+/* FLEXGEN CLOCKS */
+#define CK_ICN_HS_MCU 24
+#define CK_ICN_SDMMC 25
+#define CK_ICN_DDR 26
+#define CK_ICN_DISPLAY 27
+#define CK_ICN_HSL 28
+#define CK_ICN_NIC 29
+#define CK_ICN_VID 30
+#define CK_FLEXGEN_07 31
+#define CK_FLEXGEN_08 32
+#define CK_FLEXGEN_09 33
+#define CK_FLEXGEN_10 34
+#define CK_FLEXGEN_11 35
+#define CK_FLEXGEN_12 36
+#define CK_FLEXGEN_13 37
+#define CK_FLEXGEN_14 38
+#define CK_FLEXGEN_15 39
+#define CK_FLEXGEN_16 40
+#define CK_FLEXGEN_17 41
+#define CK_FLEXGEN_18 42
+#define CK_FLEXGEN_19 43
+#define CK_FLEXGEN_20 44
+#define CK_FLEXGEN_21 45
+#define CK_FLEXGEN_22 46
+#define CK_FLEXGEN_23 47
+#define CK_FLEXGEN_24 48
+#define CK_FLEXGEN_25 49
+#define CK_FLEXGEN_26 50
+#define CK_FLEXGEN_27 51
+#define CK_FLEXGEN_28 52
+#define CK_FLEXGEN_29 53
+#define CK_FLEXGEN_30 54
+#define CK_FLEXGEN_31 55
+#define CK_FLEXGEN_32 56
+#define CK_FLEXGEN_33 57
+#define CK_FLEXGEN_34 58
+#define CK_FLEXGEN_35 59
+#define CK_FLEXGEN_36 60
+#define CK_FLEXGEN_37 61
+#define CK_FLEXGEN_38 62
+#define CK_FLEXGEN_39 63
+#define CK_FLEXGEN_40 64
+#define CK_FLEXGEN_41 65
+#define CK_FLEXGEN_42 66
+#define CK_FLEXGEN_43 67
+#define CK_FLEXGEN_44 68
+#define CK_FLEXGEN_45 69
+#define CK_FLEXGEN_46 70
+#define CK_FLEXGEN_47 71
+#define CK_FLEXGEN_48 72
+#define CK_FLEXGEN_49 73
+#define CK_FLEXGEN_50 74
+#define CK_FLEXGEN_51 75
+#define CK_FLEXGEN_52 76
+#define CK_FLEXGEN_53 77
+#define CK_FLEXGEN_54 78
+#define CK_FLEXGEN_55 79
+#define CK_FLEXGEN_56 80
+#define CK_FLEXGEN_57 81
+#define CK_FLEXGEN_58 82
+#define CK_FLEXGEN_59 83
+#define CK_FLEXGEN_60 84
+#define CK_FLEXGEN_61 85
+#define CK_FLEXGEN_62 86
+#define CK_FLEXGEN_63 87
+
+/* LOW SPEED MCU CLOCK */
+#define CK_ICN_LS_MCU 88
+
+#define CK_BUS_STM 89
+#define CK_BUS_FMC 90
+#define CK_BUS_ETH1 91
+#define CK_BUS_ETH2 92
+#define CK_BUS_DDRPHYC 93
+#define CK_BUS_SYSCPU1 94
+#define CK_BUS_HPDMA1 95
+#define CK_BUS_HPDMA2 96
+#define CK_BUS_HPDMA3 97
+#define CK_BUS_ADC1 98
+#define CK_BUS_ADC2 99
+#define CK_BUS_IPCC1 100
+#define CK_BUS_DCMIPSSI 101
+#define CK_BUS_CRC 102
+#define CK_BUS_MDF1 103
+#define CK_BUS_BKPSRAM 104
+#define CK_BUS_HASH1 105
+#define CK_BUS_HASH2 106
+#define CK_BUS_RNG1 107
+#define CK_BUS_RNG2 108
+#define CK_BUS_CRYP1 109
+#define CK_BUS_CRYP2 110
+#define CK_BUS_SAES 111
+#define CK_BUS_PKA 112
+#define CK_BUS_GPIOA 113
+#define CK_BUS_GPIOB 114
+#define CK_BUS_GPIOC 115
+#define CK_BUS_GPIOD 116
+#define CK_BUS_GPIOE 117
+#define CK_BUS_GPIOF 118
+#define CK_BUS_GPIOG 119
+#define CK_BUS_GPIOH 120
+#define CK_BUS_GPIOI 121
+#define CK_BUS_GPIOZ 122
+#define CK_BUS_RTC 124
+#define CK_BUS_LPUART1 125
+#define CK_BUS_LPTIM3 126
+#define CK_BUS_LPTIM4 127
+#define CK_BUS_LPTIM5 128
+#define CK_BUS_TIM2 129
+#define CK_BUS_TIM3 130
+#define CK_BUS_TIM4 131
+#define CK_BUS_TIM5 132
+#define CK_BUS_TIM6 133
+#define CK_BUS_TIM7 134
+#define CK_BUS_TIM10 135
+#define CK_BUS_TIM11 136
+#define CK_BUS_TIM12 137
+#define CK_BUS_TIM13 138
+#define CK_BUS_TIM14 139
+#define CK_BUS_LPTIM1 140
+#define CK_BUS_LPTIM2 141
+#define CK_BUS_SPI2 142
+#define CK_BUS_SPI3 143
+#define CK_BUS_SPDIFRX 144
+#define CK_BUS_USART2 145
+#define CK_BUS_USART3 146
+#define CK_BUS_UART4 147
+#define CK_BUS_UART5 148
+#define CK_BUS_I2C1 149
+#define CK_BUS_I2C2 150
+#define CK_BUS_I2C3 151
+#define CK_BUS_I3C1 152
+#define CK_BUS_I3C2 153
+#define CK_BUS_I3C3 154
+#define CK_BUS_TIM1 155
+#define CK_BUS_TIM8 156
+#define CK_BUS_TIM15 157
+#define CK_BUS_TIM16 158
+#define CK_BUS_TIM17 159
+#define CK_BUS_SAI1 160
+#define CK_BUS_SAI2 161
+#define CK_BUS_SAI3 162
+#define CK_BUS_SAI4 163
+#define CK_BUS_USART1 164
+#define CK_BUS_USART6 165
+#define CK_BUS_UART7 166
+#define CK_BUS_FDCAN 167
+#define CK_BUS_SPI1 168
+#define CK_BUS_SPI4 169
+#define CK_BUS_SPI5 170
+#define CK_BUS_SPI6 171
+#define CK_BUS_BSEC 172
+#define CK_BUS_IWDG1 173
+#define CK_BUS_IWDG2 174
+#define CK_BUS_IWDG3 175
+#define CK_BUS_IWDG4 176
+#define CK_BUS_WWDG1 177
+#define CK_BUS_VREF 178
+#define CK_BUS_DTS 179
+#define CK_BUS_SERC 180
+#define CK_BUS_HDP 181
+#define CK_BUS_DDRPERFM 182
+#define CK_BUS_OTG 183
+#define CK_BUS_LTDC 184
+#define CK_BUS_CSI 185
+#define CK_BUS_DCMIPP 186
+#define CK_BUS_DDRC 187
+#define CK_BUS_DDRCFG 188
+#define CK_BUS_STGEN 189
+#define CK_SYSDBG 190
+#define CK_KER_TIM2 191
+#define CK_KER_TIM3 192
+#define CK_KER_TIM4 193
+#define CK_KER_TIM5 194
+#define CK_KER_TIM6 195
+#define CK_KER_TIM7 196
+#define CK_KER_TIM10 197
+#define CK_KER_TIM11 198
+#define CK_KER_TIM12 199
+#define CK_KER_TIM13 200
+#define CK_KER_TIM14 201
+#define CK_KER_TIM1 202
+#define CK_KER_TIM8 203
+#define CK_KER_TIM15 204
+#define CK_KER_TIM16 205
+#define CK_KER_TIM17 206
+#define CK_BUS_SYSRAM 207
+#define CK_BUS_RETRAM 208
+#define CK_BUS_OSPI1 209
+#define CK_BUS_OTFD1 210
+#define CK_BUS_SRAM1 211
+#define CK_BUS_SDMMC1 212
+#define CK_BUS_SDMMC2 213
+#define CK_BUS_SDMMC3 214
+#define CK_BUS_DDR 215
+#define CK_BUS_RISAF4 216
+#define CK_BUS_USBHOHCI 217
+#define CK_BUS_USBHEHCI 218
+#define CK_KER_LPTIM1 219
+#define CK_KER_LPTIM2 220
+#define CK_KER_USART2 221
+#define CK_KER_UART4 222
+#define CK_KER_USART3 223
+#define CK_KER_UART5 224
+#define CK_KER_SPI2 225
+#define CK_KER_SPI3 226
+#define CK_KER_SPDIFRX 227
+#define CK_KER_I2C1 228
+#define CK_KER_I2C2 229
+#define CK_KER_I3C1 230
+#define CK_KER_I3C2 231
+#define CK_KER_I2C3 232
+#define CK_KER_I3C3 233
+#define CK_KER_SPI1 234
+#define CK_KER_SPI4 235
+#define CK_KER_SPI5 236
+#define CK_KER_SPI6 237
+#define CK_KER_USART1 238
+#define CK_KER_USART6 239
+#define CK_KER_UART7 240
+#define CK_KER_MDF1 241
+#define CK_KER_SAI1 242
+#define CK_KER_SAI2 243
+#define CK_KER_SAI3 244
+#define CK_KER_SAI4 245
+#define CK_KER_FDCAN 246
+#define CK_KER_CSI 247
+#define CK_KER_CSITXESC 248
+#define CK_KER_CSIPHY 249
+#define CK_KER_STGEN 250
+#define CK_KER_USB2PHY2EN 251
+#define CK_KER_LPUART1 252
+#define CK_KER_LPTIM3 253
+#define CK_KER_LPTIM4 254
+#define CK_KER_LPTIM5 255
+#define CK_KER_TSDBG 256
+#define CK_KER_TPIU 257
+#define CK_BUS_ETR 258
+#define CK_BUS_SYSATB 259
+#define CK_KER_ADC1 260
+#define CK_KER_ADC2 261
+#define CK_KER_OSPI1 262
+#define CK_KER_FMC 263
+#define CK_KER_SDMMC1 264
+#define CK_KER_SDMMC2 265
+#define CK_KER_SDMMC3 266
+#define CK_KER_ETH1 267
+#define CK_KER_ETH2 268
+#define CK_KER_ETH1PTP 269
+#define CK_KER_ETH2PTP 270
+#define CK_KER_USB2PHY1 271
+#define CK_KER_USB2PHY2 272
+#define CK_MCO1 273
+#define CK_MCO2 274
+#define CK_KER_DTS 275
+#define CK_ETH1_RX 276
+#define CK_ETH1_TX 277
+#define CK_ETH1_MAC 278
+#define CK_ETH2_RX 279
+#define CK_ETH2_TX 280
+#define CK_ETH2_MAC 281
+#define CK_ETH1_STP 282
+#define CK_ETH2_STP 283
+#define CK_KER_LTDC 284
+#define HSE_DIV2_CK 285
+#define CK_DBGMCU 286
+#define CK_DAP 287
+#define CK_KER_ETR 288
+#define CK_KER_STM 289
+
+#define CK_SCMI_ICN_HS_MCU 0
+#define CK_SCMI_ICN_SDMMC 1
+#define CK_SCMI_ICN_DDR 2
+#define CK_SCMI_ICN_DISPLAY 3
+#define CK_SCMI_ICN_HSL 4
+#define CK_SCMI_ICN_NIC 5
+#define CK_SCMI_FLEXGEN_07 7
+#define CK_SCMI_FLEXGEN_08 8
+#define CK_SCMI_FLEXGEN_09 9
+#define CK_SCMI_FLEXGEN_10 10
+#define CK_SCMI_FLEXGEN_11 11
+#define CK_SCMI_FLEXGEN_12 12
+#define CK_SCMI_FLEXGEN_13 13
+#define CK_SCMI_FLEXGEN_14 14
+#define CK_SCMI_FLEXGEN_15 15
+#define CK_SCMI_FLEXGEN_16 16
+#define CK_SCMI_FLEXGEN_17 17
+#define CK_SCMI_FLEXGEN_18 18
+#define CK_SCMI_FLEXGEN_19 19
+#define CK_SCMI_FLEXGEN_20 20
+#define CK_SCMI_FLEXGEN_21 21
+#define CK_SCMI_FLEXGEN_22 22
+#define CK_SCMI_FLEXGEN_23 23
+#define CK_SCMI_FLEXGEN_24 24
+#define CK_SCMI_FLEXGEN_25 25
+#define CK_SCMI_FLEXGEN_26 26
+#define CK_SCMI_FLEXGEN_27 27
+#define CK_SCMI_FLEXGEN_28 28
+#define CK_SCMI_FLEXGEN_29 29
+#define CK_SCMI_FLEXGEN_30 30
+#define CK_SCMI_FLEXGEN_31 31
+#define CK_SCMI_FLEXGEN_32 32
+#define CK_SCMI_FLEXGEN_33 33
+#define CK_SCMI_FLEXGEN_34 34
+#define CK_SCMI_FLEXGEN_35 35
+#define CK_SCMI_FLEXGEN_36 36
+#define CK_SCMI_FLEXGEN_37 37
+#define CK_SCMI_FLEXGEN_38 38
+#define CK_SCMI_FLEXGEN_39 39
+#define CK_SCMI_FLEXGEN_40 40
+#define CK_SCMI_FLEXGEN_41 41
+#define CK_SCMI_FLEXGEN_42 42
+#define CK_SCMI_FLEXGEN_43 43
+#define CK_SCMI_FLEXGEN_44 44
+#define CK_SCMI_FLEXGEN_45 45
+#define CK_SCMI_FLEXGEN_46 46
+#define CK_SCMI_FLEXGEN_47 47
+#define CK_SCMI_FLEXGEN_48 48
+#define CK_SCMI_FLEXGEN_49 49
+#define CK_SCMI_FLEXGEN_50 50
+#define CK_SCMI_FLEXGEN_51 51
+#define CK_SCMI_FLEXGEN_52 52
+#define CK_SCMI_FLEXGEN_53 53
+#define CK_SCMI_FLEXGEN_54 54
+#define CK_SCMI_FLEXGEN_55 55
+#define CK_SCMI_FLEXGEN_56 56
+#define CK_SCMI_FLEXGEN_57 57
+#define CK_SCMI_FLEXGEN_58 58
+#define CK_SCMI_FLEXGEN_59 59
+#define CK_SCMI_FLEXGEN_60 60
+#define CK_SCMI_FLEXGEN_61 61
+#define CK_SCMI_FLEXGEN_62 62
+#define CK_SCMI_FLEXGEN_63 63
+#define CK_SCMI_ICN_LS_MCU 64
+#define CK_SCMI_HSE 65
+#define CK_SCMI_LSE 66
+#define CK_SCMI_HSI 67
+#define CK_SCMI_LSI 68
+#define CK_SCMI_MSI 69
+#define CK_SCMI_HSE_DIV2 70
+#define CK_SCMI_CPU1 71
+#define CK_SCMI_SYSCPU1 72
+#define CK_SCMI_PLL2 73
+#define CK_SCMI_RTC 74
+#define CK_SCMI_RTCCK 75
+#define CK_SCMI_ICN_APB1 76
+#define CK_SCMI_ICN_APB2 77
+#define CK_SCMI_ICN_APB3 78
+#define CK_SCMI_ICN_APB4 79
+#define CK_SCMI_ICN_APB5 80
+#define CK_SCMI_ICN_APBDBG 81
+#define CK_SCMI_TIMG1 82
+#define CK_SCMI_TIMG2 83
+#define CK_SCMI_BKPSRAM 84
+#define CK_SCMI_BSEC 85
+#define CK_SCMI_BUS_ETR 86
+#define CK_SCMI_FMC 87
+#define CK_SCMI_GPIOA 88
+#define CK_SCMI_GPIOB 89
+#define CK_SCMI_GPIOC 90
+#define CK_SCMI_GPIOD 91
+#define CK_SCMI_GPIOE 92
+#define CK_SCMI_GPIOF 93
+#define CK_SCMI_GPIOG 94
+#define CK_SCMI_GPIOH 95
+#define CK_SCMI_GPIOI 96
+#define CK_SCMI_GPIOZ 97
+#define CK_SCMI_HPDMA1 98
+#define CK_SCMI_HPDMA2 99
+#define CK_SCMI_HPDMA3 100
+#define CK_SCMI_IPCC1 101
+#define CK_SCMI_RETRAM 102
+#define CK_SCMI_SRAM1 103
+#define CK_SCMI_SYSRAM 104
+#define CK_SCMI_OSPI1 105
+#define CK_SCMI_TPIU 106
+#define CK_SCMI_SYSDBG 107
+#define CK_SCMI_SYSATB 108
+#define CK_SCMI_TSDBG 109
+#define CK_SCMI_BUS_STM 110
+#define CK_SCMI_KER_STM 111
+#define CK_SCMI_KER_ETR 112
+
+#endif /* _DT_BINDINGS_STM32MP21_CLKS_H_ */
diff --git a/include/dt-bindings/clock/st,stm32mp25-rcc.h b/include/dt-bindings/clock/st,stm32mp25-rcc.h
new file mode 100644
index 000000000000..b6cf05ad4be6
--- /dev/null
+++ b/include/dt-bindings/clock/st,stm32mp25-rcc.h
@@ -0,0 +1,492 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) STMicroelectronics 2023 - All Rights Reserved
+ * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com>
+ */
+
+#ifndef _DT_BINDINGS_STM32MP25_CLKS_H_
+#define _DT_BINDINGS_STM32MP25_CLKS_H_
+
+/* INTERNAL/EXTERNAL OSCILLATORS */
+#define HSI_CK 0
+#define HSE_CK 1
+#define MSI_CK 2
+#define LSI_CK 3
+#define LSE_CK 4
+#define I2S_CK 5
+#define RTC_CK 6
+#define SPDIF_CK_SYMB 7
+
+/* PLL CLOCKS */
+#define PLL1_CK 8
+#define PLL2_CK 9
+#define PLL3_CK 10
+#define PLL4_CK 11
+#define PLL5_CK 12
+#define PLL6_CK 13
+#define PLL7_CK 14
+#define PLL8_CK 15
+
+#define CK_CPU1 16
+
+/* APB DIV CLOCKS */
+#define CK_ICN_APB1 17
+#define CK_ICN_APB2 18
+#define CK_ICN_APB3 19
+#define CK_ICN_APB4 20
+#define CK_ICN_APBDBG 21
+
+/* GLOBAL TIMER */
+#define TIMG1_CK 22
+#define TIMG2_CK 23
+
+/* FLEXGEN CLOCKS */
+#define CK_ICN_HS_MCU 24
+#define CK_ICN_SDMMC 25
+#define CK_ICN_DDR 26
+#define CK_ICN_DISPLAY 27
+#define CK_ICN_HSL 28
+#define CK_ICN_NIC 29
+#define CK_ICN_VID 30
+#define CK_FLEXGEN_07 31
+#define CK_FLEXGEN_08 32
+#define CK_FLEXGEN_09 33
+#define CK_FLEXGEN_10 34
+#define CK_FLEXGEN_11 35
+#define CK_FLEXGEN_12 36
+#define CK_FLEXGEN_13 37
+#define CK_FLEXGEN_14 38
+#define CK_FLEXGEN_15 39
+#define CK_FLEXGEN_16 40
+#define CK_FLEXGEN_17 41
+#define CK_FLEXGEN_18 42
+#define CK_FLEXGEN_19 43
+#define CK_FLEXGEN_20 44
+#define CK_FLEXGEN_21 45
+#define CK_FLEXGEN_22 46
+#define CK_FLEXGEN_23 47
+#define CK_FLEXGEN_24 48
+#define CK_FLEXGEN_25 49
+#define CK_FLEXGEN_26 50
+#define CK_FLEXGEN_27 51
+#define CK_FLEXGEN_28 52
+#define CK_FLEXGEN_29 53
+#define CK_FLEXGEN_30 54
+#define CK_FLEXGEN_31 55
+#define CK_FLEXGEN_32 56
+#define CK_FLEXGEN_33 57
+#define CK_FLEXGEN_34 58
+#define CK_FLEXGEN_35 59
+#define CK_FLEXGEN_36 60
+#define CK_FLEXGEN_37 61
+#define CK_FLEXGEN_38 62
+#define CK_FLEXGEN_39 63
+#define CK_FLEXGEN_40 64
+#define CK_FLEXGEN_41 65
+#define CK_FLEXGEN_42 66
+#define CK_FLEXGEN_43 67
+#define CK_FLEXGEN_44 68
+#define CK_FLEXGEN_45 69
+#define CK_FLEXGEN_46 70
+#define CK_FLEXGEN_47 71
+#define CK_FLEXGEN_48 72
+#define CK_FLEXGEN_49 73
+#define CK_FLEXGEN_50 74
+#define CK_FLEXGEN_51 75
+#define CK_FLEXGEN_52 76
+#define CK_FLEXGEN_53 77
+#define CK_FLEXGEN_54 78
+#define CK_FLEXGEN_55 79
+#define CK_FLEXGEN_56 80
+#define CK_FLEXGEN_57 81
+#define CK_FLEXGEN_58 82
+#define CK_FLEXGEN_59 83
+#define CK_FLEXGEN_60 84
+#define CK_FLEXGEN_61 85
+#define CK_FLEXGEN_62 86
+#define CK_FLEXGEN_63 87
+
+/* LOW SPEED MCU CLOCK */
+#define CK_ICN_LS_MCU 88
+
+#define CK_BUS_STM500 89
+#define CK_BUS_FMC 90
+#define CK_BUS_GPU 91
+#define CK_BUS_ETH1 92
+#define CK_BUS_ETH2 93
+#define CK_BUS_PCIE 94
+#define CK_BUS_DDRPHYC 95
+#define CK_BUS_SYSCPU1 96
+#define CK_BUS_ETHSW 97
+#define CK_BUS_HPDMA1 98
+#define CK_BUS_HPDMA2 99
+#define CK_BUS_HPDMA3 100
+#define CK_BUS_ADC12 101
+#define CK_BUS_ADC3 102
+#define CK_BUS_IPCC1 103
+#define CK_BUS_CCI 104
+#define CK_BUS_CRC 105
+#define CK_BUS_MDF1 106
+#define CK_BUS_OSPIIOM 107
+#define CK_BUS_BKPSRAM 108
+#define CK_BUS_HASH 109
+#define CK_BUS_RNG 110
+#define CK_BUS_CRYP1 111
+#define CK_BUS_CRYP2 112
+#define CK_BUS_SAES 113
+#define CK_BUS_PKA 114
+#define CK_BUS_GPIOA 115
+#define CK_BUS_GPIOB 116
+#define CK_BUS_GPIOC 117
+#define CK_BUS_GPIOD 118
+#define CK_BUS_GPIOE 119
+#define CK_BUS_GPIOF 120
+#define CK_BUS_GPIOG 121
+#define CK_BUS_GPIOH 122
+#define CK_BUS_GPIOI 123
+#define CK_BUS_GPIOJ 124
+#define CK_BUS_GPIOK 125
+#define CK_BUS_LPSRAM1 126
+#define CK_BUS_LPSRAM2 127
+#define CK_BUS_LPSRAM3 128
+#define CK_BUS_GPIOZ 129
+#define CK_BUS_LPDMA 130
+#define CK_BUS_HSEM 131
+#define CK_BUS_IPCC2 132
+#define CK_BUS_RTC 133
+#define CK_BUS_SPI8 134
+#define CK_BUS_LPUART1 135
+#define CK_BUS_I2C8 136
+#define CK_BUS_LPTIM3 137
+#define CK_BUS_LPTIM4 138
+#define CK_BUS_LPTIM5 139
+#define CK_BUS_IWDG5 140
+#define CK_BUS_WWDG2 141
+#define CK_BUS_I3C4 142
+#define CK_BUS_TIM2 143
+#define CK_BUS_TIM3 144
+#define CK_BUS_TIM4 145
+#define CK_BUS_TIM5 146
+#define CK_BUS_TIM6 147
+#define CK_BUS_TIM7 148
+#define CK_BUS_TIM10 149
+#define CK_BUS_TIM11 150
+#define CK_BUS_TIM12 151
+#define CK_BUS_TIM13 152
+#define CK_BUS_TIM14 153
+#define CK_BUS_LPTIM1 154
+#define CK_BUS_LPTIM2 155
+#define CK_BUS_SPI2 156
+#define CK_BUS_SPI3 157
+#define CK_BUS_SPDIFRX 158
+#define CK_BUS_USART2 159
+#define CK_BUS_USART3 160
+#define CK_BUS_UART4 161
+#define CK_BUS_UART5 162
+#define CK_BUS_I2C1 163
+#define CK_BUS_I2C2 164
+#define CK_BUS_I2C3 165
+#define CK_BUS_I2C4 166
+#define CK_BUS_I2C5 167
+#define CK_BUS_I2C6 168
+#define CK_BUS_I2C7 169
+#define CK_BUS_I3C1 170
+#define CK_BUS_I3C2 171
+#define CK_BUS_I3C3 172
+#define CK_BUS_TIM1 173
+#define CK_BUS_TIM8 174
+#define CK_BUS_TIM15 175
+#define CK_BUS_TIM16 176
+#define CK_BUS_TIM17 177
+#define CK_BUS_TIM20 178
+#define CK_BUS_SAI1 179
+#define CK_BUS_SAI2 180
+#define CK_BUS_SAI3 181
+#define CK_BUS_SAI4 182
+#define CK_BUS_USART1 183
+#define CK_BUS_USART6 184
+#define CK_BUS_UART7 185
+#define CK_BUS_UART8 186
+#define CK_BUS_UART9 187
+#define CK_BUS_FDCAN 188
+#define CK_BUS_SPI1 189
+#define CK_BUS_SPI4 190
+#define CK_BUS_SPI5 191
+#define CK_BUS_SPI6 192
+#define CK_BUS_SPI7 193
+#define CK_BUS_BSEC 194
+#define CK_BUS_IWDG1 195
+#define CK_BUS_IWDG2 196
+#define CK_BUS_IWDG3 197
+#define CK_BUS_IWDG4 198
+#define CK_BUS_WWDG1 199
+#define CK_BUS_VREF 200
+#define CK_BUS_DTS 201
+#define CK_BUS_SERC 202
+#define CK_BUS_HDP 203
+#define CK_BUS_IS2M 204
+#define CK_BUS_DSI 205
+#define CK_BUS_LTDC 206
+#define CK_BUS_CSI 207
+#define CK_BUS_DCMIPP 208
+#define CK_BUS_DDRC 209
+#define CK_BUS_DDRCFG 210
+#define CK_BUS_GICV2M 211
+#define CK_BUS_USBTC 212
+#define CK_BUS_USB3PCIEPHY 214
+#define CK_BUS_STGEN 215
+#define CK_BUS_VDEC 216
+#define CK_BUS_VENC 217
+#define CK_SYSDBG 218
+#define CK_KER_TIM2 219
+#define CK_KER_TIM3 220
+#define CK_KER_TIM4 221
+#define CK_KER_TIM5 222
+#define CK_KER_TIM6 223
+#define CK_KER_TIM7 224
+#define CK_KER_TIM10 225
+#define CK_KER_TIM11 226
+#define CK_KER_TIM12 227
+#define CK_KER_TIM13 228
+#define CK_KER_TIM14 229
+#define CK_KER_TIM1 230
+#define CK_KER_TIM8 231
+#define CK_KER_TIM15 232
+#define CK_KER_TIM16 233
+#define CK_KER_TIM17 234
+#define CK_KER_TIM20 235
+#define CK_BUS_SYSRAM 236
+#define CK_BUS_VDERAM 237
+#define CK_BUS_RETRAM 238
+#define CK_BUS_OSPI1 239
+#define CK_BUS_OSPI2 240
+#define CK_BUS_OTFD1 241
+#define CK_BUS_OTFD2 242
+#define CK_BUS_SRAM1 243
+#define CK_BUS_SRAM2 244
+#define CK_BUS_SDMMC1 245
+#define CK_BUS_SDMMC2 246
+#define CK_BUS_SDMMC3 247
+#define CK_BUS_DDR 248
+#define CK_BUS_RISAF4 249
+#define CK_BUS_USB2OHCI 250
+#define CK_BUS_USB2EHCI 251
+#define CK_BUS_USB3DR 252
+#define CK_KER_LPTIM1 253
+#define CK_KER_LPTIM2 254
+#define CK_KER_USART2 255
+#define CK_KER_UART4 256
+#define CK_KER_USART3 257
+#define CK_KER_UART5 258
+#define CK_KER_SPI2 259
+#define CK_KER_SPI3 260
+#define CK_KER_SPDIFRX 261
+#define CK_KER_I2C1 262
+#define CK_KER_I2C2 263
+#define CK_KER_I3C1 264
+#define CK_KER_I3C2 265
+#define CK_KER_I2C3 266
+#define CK_KER_I2C5 267
+#define CK_KER_I3C3 268
+#define CK_KER_I2C4 269
+#define CK_KER_I2C6 270
+#define CK_KER_I2C7 271
+#define CK_KER_SPI1 272
+#define CK_KER_SPI4 273
+#define CK_KER_SPI5 274
+#define CK_KER_SPI6 275
+#define CK_KER_SPI7 276
+#define CK_KER_USART1 277
+#define CK_KER_USART6 278
+#define CK_KER_UART7 279
+#define CK_KER_UART8 280
+#define CK_KER_UART9 281
+#define CK_KER_MDF1 282
+#define CK_KER_SAI1 283
+#define CK_KER_SAI2 284
+#define CK_KER_SAI3 285
+#define CK_KER_SAI4 286
+#define CK_KER_FDCAN 287
+#define CK_KER_DSIBLANE 288
+#define CK_KER_DSIPHY 289
+#define CK_KER_CSI 290
+#define CK_KER_CSITXESC 291
+#define CK_KER_CSIPHY 292
+#define CK_KER_LVDSPHY 293
+#define CK_KER_STGEN 294
+#define CK_KER_USB3PCIEPHY 295
+#define CK_KER_USB2PHY2EN 296
+#define CK_KER_I3C4 297
+#define CK_KER_SPI8 298
+#define CK_KER_I2C8 299
+#define CK_KER_LPUART1 300
+#define CK_KER_LPTIM3 301
+#define CK_KER_LPTIM4 302
+#define CK_KER_LPTIM5 303
+#define CK_KER_TSDBG 304
+#define CK_KER_TPIU 305
+#define CK_BUS_ETR 306
+#define CK_BUS_SYSATB 307
+#define CK_KER_ADC12 308
+#define CK_KER_ADC3 309
+#define CK_KER_OSPI1 310
+#define CK_KER_OSPI2 311
+#define CK_KER_FMC 312
+#define CK_KER_SDMMC1 313
+#define CK_KER_SDMMC2 314
+#define CK_KER_SDMMC3 315
+#define CK_KER_ETH1 316
+#define CK_KER_ETH2 317
+#define CK_KER_ETH1PTP 318
+#define CK_KER_ETH2PTP 319
+#define CK_KER_USB2PHY1 320
+#define CK_KER_USB2PHY2 321
+#define CK_KER_ETHSW 322
+#define CK_KER_ETHSWREF 323
+#define CK_MCO1 324
+#define CK_MCO2 325
+#define CK_KER_DTS 326
+#define CK_ETH1_RX 327
+#define CK_ETH1_TX 328
+#define CK_ETH1_MAC 329
+#define CK_ETH2_RX 330
+#define CK_ETH2_TX 331
+#define CK_ETH2_MAC 332
+#define CK_ETH1_STP 333
+#define CK_ETH2_STP 334
+#define CK_KER_USBTC 335
+#define CK_BUS_ADF1 336
+#define CK_KER_ADF1 337
+#define CK_BUS_LVDS 338
+#define CK_KER_LTDC 339
+#define CK_KER_GPU 340
+#define CK_BUS_ETHSWACMCFG 341
+#define CK_BUS_ETHSWACMMSG 342
+#define HSE_DIV2_CK 343
+
+#define STM32MP25_LAST_CLK 344
+
+#define CK_SCMI_ICN_HS_MCU 0
+#define CK_SCMI_ICN_SDMMC 1
+#define CK_SCMI_ICN_DDR 2
+#define CK_SCMI_ICN_DISPLAY 3
+#define CK_SCMI_ICN_HSL 4
+#define CK_SCMI_ICN_NIC 5
+#define CK_SCMI_ICN_VID 6
+#define CK_SCMI_FLEXGEN_07 7
+#define CK_SCMI_FLEXGEN_08 8
+#define CK_SCMI_FLEXGEN_09 9
+#define CK_SCMI_FLEXGEN_10 10
+#define CK_SCMI_FLEXGEN_11 11
+#define CK_SCMI_FLEXGEN_12 12
+#define CK_SCMI_FLEXGEN_13 13
+#define CK_SCMI_FLEXGEN_14 14
+#define CK_SCMI_FLEXGEN_15 15
+#define CK_SCMI_FLEXGEN_16 16
+#define CK_SCMI_FLEXGEN_17 17
+#define CK_SCMI_FLEXGEN_18 18
+#define CK_SCMI_FLEXGEN_19 19
+#define CK_SCMI_FLEXGEN_20 20
+#define CK_SCMI_FLEXGEN_21 21
+#define CK_SCMI_FLEXGEN_22 22
+#define CK_SCMI_FLEXGEN_23 23
+#define CK_SCMI_FLEXGEN_24 24
+#define CK_SCMI_FLEXGEN_25 25
+#define CK_SCMI_FLEXGEN_26 26
+#define CK_SCMI_FLEXGEN_27 27
+#define CK_SCMI_FLEXGEN_28 28
+#define CK_SCMI_FLEXGEN_29 29
+#define CK_SCMI_FLEXGEN_30 30
+#define CK_SCMI_FLEXGEN_31 31
+#define CK_SCMI_FLEXGEN_32 32
+#define CK_SCMI_FLEXGEN_33 33
+#define CK_SCMI_FLEXGEN_34 34
+#define CK_SCMI_FLEXGEN_35 35
+#define CK_SCMI_FLEXGEN_36 36
+#define CK_SCMI_FLEXGEN_37 37
+#define CK_SCMI_FLEXGEN_38 38
+#define CK_SCMI_FLEXGEN_39 39
+#define CK_SCMI_FLEXGEN_40 40
+#define CK_SCMI_FLEXGEN_41 41
+#define CK_SCMI_FLEXGEN_42 42
+#define CK_SCMI_FLEXGEN_43 43
+#define CK_SCMI_FLEXGEN_44 44
+#define CK_SCMI_FLEXGEN_45 45
+#define CK_SCMI_FLEXGEN_46 46
+#define CK_SCMI_FLEXGEN_47 47
+#define CK_SCMI_FLEXGEN_48 48
+#define CK_SCMI_FLEXGEN_49 49
+#define CK_SCMI_FLEXGEN_50 50
+#define CK_SCMI_FLEXGEN_51 51
+#define CK_SCMI_FLEXGEN_52 52
+#define CK_SCMI_FLEXGEN_53 53
+#define CK_SCMI_FLEXGEN_54 54
+#define CK_SCMI_FLEXGEN_55 55
+#define CK_SCMI_FLEXGEN_56 56
+#define CK_SCMI_FLEXGEN_57 57
+#define CK_SCMI_FLEXGEN_58 58
+#define CK_SCMI_FLEXGEN_59 59
+#define CK_SCMI_FLEXGEN_60 60
+#define CK_SCMI_FLEXGEN_61 61
+#define CK_SCMI_FLEXGEN_62 62
+#define CK_SCMI_FLEXGEN_63 63
+#define CK_SCMI_ICN_LS_MCU 64
+#define CK_SCMI_HSE 65
+#define CK_SCMI_LSE 66
+#define CK_SCMI_HSI 67
+#define CK_SCMI_LSI 68
+#define CK_SCMI_MSI 69
+#define CK_SCMI_HSE_DIV2 70
+#define CK_SCMI_CPU1 71
+#define CK_SCMI_SYSCPU1 72
+#define CK_SCMI_PLL2 73
+#define CK_SCMI_PLL3 74
+#define CK_SCMI_RTC 75
+#define CK_SCMI_RTCCK 76
+#define CK_SCMI_ICN_APB1 77
+#define CK_SCMI_ICN_APB2 78
+#define CK_SCMI_ICN_APB3 79
+#define CK_SCMI_ICN_APB4 80
+#define CK_SCMI_ICN_APBDBG 81
+#define CK_SCMI_TIMG1 82
+#define CK_SCMI_TIMG2 83
+#define CK_SCMI_BKPSRAM 84
+#define CK_SCMI_BSEC 85
+#define CK_SCMI_ETR 87
+#define CK_SCMI_FMC 88
+#define CK_SCMI_GPIOA 89
+#define CK_SCMI_GPIOB 90
+#define CK_SCMI_GPIOC 91
+#define CK_SCMI_GPIOD 92
+#define CK_SCMI_GPIOE 93
+#define CK_SCMI_GPIOF 94
+#define CK_SCMI_GPIOG 95
+#define CK_SCMI_GPIOH 96
+#define CK_SCMI_GPIOI 97
+#define CK_SCMI_GPIOJ 98
+#define CK_SCMI_GPIOK 99
+#define CK_SCMI_GPIOZ 100
+#define CK_SCMI_HPDMA1 101
+#define CK_SCMI_HPDMA2 102
+#define CK_SCMI_HPDMA3 103
+#define CK_SCMI_HSEM 104
+#define CK_SCMI_IPCC1 105
+#define CK_SCMI_IPCC2 106
+#define CK_SCMI_LPDMA 107
+#define CK_SCMI_RETRAM 108
+#define CK_SCMI_SRAM1 109
+#define CK_SCMI_SRAM2 110
+#define CK_SCMI_LPSRAM1 111
+#define CK_SCMI_LPSRAM2 112
+#define CK_SCMI_LPSRAM3 113
+#define CK_SCMI_VDERAM 114
+#define CK_SCMI_SYSRAM 115
+#define CK_SCMI_OSPI1 116
+#define CK_SCMI_OSPI2 117
+#define CK_SCMI_TPIU 118
+#define CK_SCMI_SYSDBG 119
+#define CK_SCMI_SYSATB 120
+#define CK_SCMI_TSDBG 121
+#define CK_SCMI_STM500 122
+
+#endif /* _DT_BINDINGS_STM32MP25_CLKS_H_ */
diff --git a/include/dt-bindings/clock/starfive,jh7110-crg.h b/include/dt-bindings/clock/starfive,jh7110-crg.h
new file mode 100644
index 000000000000..467ccab3bfaa
--- /dev/null
+++ b/include/dt-bindings/clock/starfive,jh7110-crg.h
@@ -0,0 +1,301 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright 2022 Emil Renner Berthing <kernel@esmil.dk>
+ * Copyright 2022 StarFive Technology Co., Ltd.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_STARFIVE_JH7110_CRG_H__
+#define __DT_BINDINGS_CLOCK_STARFIVE_JH7110_CRG_H__
+
+/* PLL clocks */
+#define JH7110_PLLCLK_PLL0_OUT 0
+#define JH7110_PLLCLK_PLL1_OUT 1
+#define JH7110_PLLCLK_PLL2_OUT 2
+#define JH7110_PLLCLK_END 3
+
+/* SYSCRG clocks */
+#define JH7110_SYSCLK_CPU_ROOT 0
+#define JH7110_SYSCLK_CPU_CORE 1
+#define JH7110_SYSCLK_CPU_BUS 2
+#define JH7110_SYSCLK_GPU_ROOT 3
+#define JH7110_SYSCLK_PERH_ROOT 4
+#define JH7110_SYSCLK_BUS_ROOT 5
+#define JH7110_SYSCLK_NOCSTG_BUS 6
+#define JH7110_SYSCLK_AXI_CFG0 7
+#define JH7110_SYSCLK_STG_AXIAHB 8
+#define JH7110_SYSCLK_AHB0 9
+#define JH7110_SYSCLK_AHB1 10
+#define JH7110_SYSCLK_APB_BUS 11
+#define JH7110_SYSCLK_APB0 12
+#define JH7110_SYSCLK_PLL0_DIV2 13
+#define JH7110_SYSCLK_PLL1_DIV2 14
+#define JH7110_SYSCLK_PLL2_DIV2 15
+#define JH7110_SYSCLK_AUDIO_ROOT 16
+#define JH7110_SYSCLK_MCLK_INNER 17
+#define JH7110_SYSCLK_MCLK 18
+#define JH7110_SYSCLK_MCLK_OUT 19
+#define JH7110_SYSCLK_ISP_2X 20
+#define JH7110_SYSCLK_ISP_AXI 21
+#define JH7110_SYSCLK_GCLK0 22
+#define JH7110_SYSCLK_GCLK1 23
+#define JH7110_SYSCLK_GCLK2 24
+#define JH7110_SYSCLK_CORE 25
+#define JH7110_SYSCLK_CORE1 26
+#define JH7110_SYSCLK_CORE2 27
+#define JH7110_SYSCLK_CORE3 28
+#define JH7110_SYSCLK_CORE4 29
+#define JH7110_SYSCLK_DEBUG 30
+#define JH7110_SYSCLK_RTC_TOGGLE 31
+#define JH7110_SYSCLK_TRACE0 32
+#define JH7110_SYSCLK_TRACE1 33
+#define JH7110_SYSCLK_TRACE2 34
+#define JH7110_SYSCLK_TRACE3 35
+#define JH7110_SYSCLK_TRACE4 36
+#define JH7110_SYSCLK_TRACE_COM 37
+#define JH7110_SYSCLK_NOC_BUS_CPU_AXI 38
+#define JH7110_SYSCLK_NOC_BUS_AXICFG0_AXI 39
+#define JH7110_SYSCLK_OSC_DIV2 40
+#define JH7110_SYSCLK_PLL1_DIV4 41
+#define JH7110_SYSCLK_PLL1_DIV8 42
+#define JH7110_SYSCLK_DDR_BUS 43
+#define JH7110_SYSCLK_DDR_AXI 44
+#define JH7110_SYSCLK_GPU_CORE 45
+#define JH7110_SYSCLK_GPU_CORE_CLK 46
+#define JH7110_SYSCLK_GPU_SYS_CLK 47
+#define JH7110_SYSCLK_GPU_APB 48
+#define JH7110_SYSCLK_GPU_RTC_TOGGLE 49
+#define JH7110_SYSCLK_NOC_BUS_GPU_AXI 50
+#define JH7110_SYSCLK_ISP_TOP_CORE 51
+#define JH7110_SYSCLK_ISP_TOP_AXI 52
+#define JH7110_SYSCLK_NOC_BUS_ISP_AXI 53
+#define JH7110_SYSCLK_HIFI4_CORE 54
+#define JH7110_SYSCLK_HIFI4_AXI 55
+#define JH7110_SYSCLK_AXI_CFG1_MAIN 56
+#define JH7110_SYSCLK_AXI_CFG1_AHB 57
+#define JH7110_SYSCLK_VOUT_SRC 58
+#define JH7110_SYSCLK_VOUT_AXI 59
+#define JH7110_SYSCLK_NOC_BUS_DISP_AXI 60
+#define JH7110_SYSCLK_VOUT_TOP_AHB 61
+#define JH7110_SYSCLK_VOUT_TOP_AXI 62
+#define JH7110_SYSCLK_VOUT_TOP_HDMITX0_MCLK 63
+#define JH7110_SYSCLK_VOUT_TOP_MIPIPHY_REF 64
+#define JH7110_SYSCLK_JPEGC_AXI 65
+#define JH7110_SYSCLK_CODAJ12_AXI 66
+#define JH7110_SYSCLK_CODAJ12_CORE 67
+#define JH7110_SYSCLK_CODAJ12_APB 68
+#define JH7110_SYSCLK_VDEC_AXI 69
+#define JH7110_SYSCLK_WAVE511_AXI 70
+#define JH7110_SYSCLK_WAVE511_BPU 71
+#define JH7110_SYSCLK_WAVE511_VCE 72
+#define JH7110_SYSCLK_WAVE511_APB 73
+#define JH7110_SYSCLK_VDEC_JPG 74
+#define JH7110_SYSCLK_VDEC_MAIN 75
+#define JH7110_SYSCLK_NOC_BUS_VDEC_AXI 76
+#define JH7110_SYSCLK_VENC_AXI 77
+#define JH7110_SYSCLK_WAVE420L_AXI 78
+#define JH7110_SYSCLK_WAVE420L_BPU 79
+#define JH7110_SYSCLK_WAVE420L_VCE 80
+#define JH7110_SYSCLK_WAVE420L_APB 81
+#define JH7110_SYSCLK_NOC_BUS_VENC_AXI 82
+#define JH7110_SYSCLK_AXI_CFG0_MAIN_DIV 83
+#define JH7110_SYSCLK_AXI_CFG0_MAIN 84
+#define JH7110_SYSCLK_AXI_CFG0_HIFI4 85
+#define JH7110_SYSCLK_AXIMEM2_AXI 86
+#define JH7110_SYSCLK_QSPI_AHB 87
+#define JH7110_SYSCLK_QSPI_APB 88
+#define JH7110_SYSCLK_QSPI_REF_SRC 89
+#define JH7110_SYSCLK_QSPI_REF 90
+#define JH7110_SYSCLK_SDIO0_AHB 91
+#define JH7110_SYSCLK_SDIO1_AHB 92
+#define JH7110_SYSCLK_SDIO0_SDCARD 93
+#define JH7110_SYSCLK_SDIO1_SDCARD 94
+#define JH7110_SYSCLK_USB_125M 95
+#define JH7110_SYSCLK_NOC_BUS_STG_AXI 96
+#define JH7110_SYSCLK_GMAC1_AHB 97
+#define JH7110_SYSCLK_GMAC1_AXI 98
+#define JH7110_SYSCLK_GMAC_SRC 99
+#define JH7110_SYSCLK_GMAC1_GTXCLK 100
+#define JH7110_SYSCLK_GMAC1_RMII_RTX 101
+#define JH7110_SYSCLK_GMAC1_PTP 102
+#define JH7110_SYSCLK_GMAC1_RX 103
+#define JH7110_SYSCLK_GMAC1_RX_INV 104
+#define JH7110_SYSCLK_GMAC1_TX 105
+#define JH7110_SYSCLK_GMAC1_TX_INV 106
+#define JH7110_SYSCLK_GMAC1_GTXC 107
+#define JH7110_SYSCLK_GMAC0_GTXCLK 108
+#define JH7110_SYSCLK_GMAC0_PTP 109
+#define JH7110_SYSCLK_GMAC_PHY 110
+#define JH7110_SYSCLK_GMAC0_GTXC 111
+#define JH7110_SYSCLK_IOMUX_APB 112
+#define JH7110_SYSCLK_MAILBOX_APB 113
+#define JH7110_SYSCLK_INT_CTRL_APB 114
+#define JH7110_SYSCLK_CAN0_APB 115
+#define JH7110_SYSCLK_CAN0_TIMER 116
+#define JH7110_SYSCLK_CAN0_CAN 117
+#define JH7110_SYSCLK_CAN1_APB 118
+#define JH7110_SYSCLK_CAN1_TIMER 119
+#define JH7110_SYSCLK_CAN1_CAN 120
+#define JH7110_SYSCLK_PWM_APB 121
+#define JH7110_SYSCLK_WDT_APB 122
+#define JH7110_SYSCLK_WDT_CORE 123
+#define JH7110_SYSCLK_TIMER_APB 124
+#define JH7110_SYSCLK_TIMER0 125
+#define JH7110_SYSCLK_TIMER1 126
+#define JH7110_SYSCLK_TIMER2 127
+#define JH7110_SYSCLK_TIMER3 128
+#define JH7110_SYSCLK_TEMP_APB 129
+#define JH7110_SYSCLK_TEMP_CORE 130
+#define JH7110_SYSCLK_SPI0_APB 131
+#define JH7110_SYSCLK_SPI1_APB 132
+#define JH7110_SYSCLK_SPI2_APB 133
+#define JH7110_SYSCLK_SPI3_APB 134
+#define JH7110_SYSCLK_SPI4_APB 135
+#define JH7110_SYSCLK_SPI5_APB 136
+#define JH7110_SYSCLK_SPI6_APB 137
+#define JH7110_SYSCLK_I2C0_APB 138
+#define JH7110_SYSCLK_I2C1_APB 139
+#define JH7110_SYSCLK_I2C2_APB 140
+#define JH7110_SYSCLK_I2C3_APB 141
+#define JH7110_SYSCLK_I2C4_APB 142
+#define JH7110_SYSCLK_I2C5_APB 143
+#define JH7110_SYSCLK_I2C6_APB 144
+#define JH7110_SYSCLK_UART0_APB 145
+#define JH7110_SYSCLK_UART0_CORE 146
+#define JH7110_SYSCLK_UART1_APB 147
+#define JH7110_SYSCLK_UART1_CORE 148
+#define JH7110_SYSCLK_UART2_APB 149
+#define JH7110_SYSCLK_UART2_CORE 150
+#define JH7110_SYSCLK_UART3_APB 151
+#define JH7110_SYSCLK_UART3_CORE 152
+#define JH7110_SYSCLK_UART4_APB 153
+#define JH7110_SYSCLK_UART4_CORE 154
+#define JH7110_SYSCLK_UART5_APB 155
+#define JH7110_SYSCLK_UART5_CORE 156
+#define JH7110_SYSCLK_PWMDAC_APB 157
+#define JH7110_SYSCLK_PWMDAC_CORE 158
+#define JH7110_SYSCLK_SPDIF_APB 159
+#define JH7110_SYSCLK_SPDIF_CORE 160
+#define JH7110_SYSCLK_I2STX0_APB 161
+#define JH7110_SYSCLK_I2STX0_BCLK_MST 162
+#define JH7110_SYSCLK_I2STX0_BCLK_MST_INV 163
+#define JH7110_SYSCLK_I2STX0_LRCK_MST 164
+#define JH7110_SYSCLK_I2STX0_BCLK 165
+#define JH7110_SYSCLK_I2STX0_BCLK_INV 166
+#define JH7110_SYSCLK_I2STX0_LRCK 167
+#define JH7110_SYSCLK_I2STX1_APB 168
+#define JH7110_SYSCLK_I2STX1_BCLK_MST 169
+#define JH7110_SYSCLK_I2STX1_BCLK_MST_INV 170
+#define JH7110_SYSCLK_I2STX1_LRCK_MST 171
+#define JH7110_SYSCLK_I2STX1_BCLK 172
+#define JH7110_SYSCLK_I2STX1_BCLK_INV 173
+#define JH7110_SYSCLK_I2STX1_LRCK 174
+#define JH7110_SYSCLK_I2SRX_APB 175
+#define JH7110_SYSCLK_I2SRX_BCLK_MST 176
+#define JH7110_SYSCLK_I2SRX_BCLK_MST_INV 177
+#define JH7110_SYSCLK_I2SRX_LRCK_MST 178
+#define JH7110_SYSCLK_I2SRX_BCLK 179
+#define JH7110_SYSCLK_I2SRX_BCLK_INV 180
+#define JH7110_SYSCLK_I2SRX_LRCK 181
+#define JH7110_SYSCLK_PDM_DMIC 182
+#define JH7110_SYSCLK_PDM_APB 183
+#define JH7110_SYSCLK_TDM_AHB 184
+#define JH7110_SYSCLK_TDM_APB 185
+#define JH7110_SYSCLK_TDM_INTERNAL 186
+#define JH7110_SYSCLK_TDM_TDM 187
+#define JH7110_SYSCLK_TDM_TDM_INV 188
+#define JH7110_SYSCLK_JTAG_CERTIFICATION_TRNG 189
+
+#define JH7110_SYSCLK_END 190
+
+/* AONCRG clocks */
+#define JH7110_AONCLK_OSC_DIV4 0
+#define JH7110_AONCLK_APB_FUNC 1
+#define JH7110_AONCLK_GMAC0_AHB 2
+#define JH7110_AONCLK_GMAC0_AXI 3
+#define JH7110_AONCLK_GMAC0_RMII_RTX 4
+#define JH7110_AONCLK_GMAC0_TX 5
+#define JH7110_AONCLK_GMAC0_TX_INV 6
+#define JH7110_AONCLK_GMAC0_RX 7
+#define JH7110_AONCLK_GMAC0_RX_INV 8
+#define JH7110_AONCLK_OTPC_APB 9
+#define JH7110_AONCLK_RTC_APB 10
+#define JH7110_AONCLK_RTC_INTERNAL 11
+#define JH7110_AONCLK_RTC_32K 12
+#define JH7110_AONCLK_RTC_CAL 13
+
+#define JH7110_AONCLK_END 14
+
+/* STGCRG clocks */
+#define JH7110_STGCLK_HIFI4_CLK_CORE 0
+#define JH7110_STGCLK_USB0_APB 1
+#define JH7110_STGCLK_USB0_UTMI_APB 2
+#define JH7110_STGCLK_USB0_AXI 3
+#define JH7110_STGCLK_USB0_LPM 4
+#define JH7110_STGCLK_USB0_STB 5
+#define JH7110_STGCLK_USB0_APP_125 6
+#define JH7110_STGCLK_USB0_REFCLK 7
+#define JH7110_STGCLK_PCIE0_AXI_MST0 8
+#define JH7110_STGCLK_PCIE0_APB 9
+#define JH7110_STGCLK_PCIE0_TL 10
+#define JH7110_STGCLK_PCIE1_AXI_MST0 11
+#define JH7110_STGCLK_PCIE1_APB 12
+#define JH7110_STGCLK_PCIE1_TL 13
+#define JH7110_STGCLK_PCIE_SLV_MAIN 14
+#define JH7110_STGCLK_SEC_AHB 15
+#define JH7110_STGCLK_SEC_MISC_AHB 16
+#define JH7110_STGCLK_GRP0_MAIN 17
+#define JH7110_STGCLK_GRP0_BUS 18
+#define JH7110_STGCLK_GRP0_STG 19
+#define JH7110_STGCLK_GRP1_MAIN 20
+#define JH7110_STGCLK_GRP1_BUS 21
+#define JH7110_STGCLK_GRP1_STG 22
+#define JH7110_STGCLK_GRP1_HIFI 23
+#define JH7110_STGCLK_E2_RTC 24
+#define JH7110_STGCLK_E2_CORE 25
+#define JH7110_STGCLK_E2_DBG 26
+#define JH7110_STGCLK_DMA1P_AXI 27
+#define JH7110_STGCLK_DMA1P_AHB 28
+
+#define JH7110_STGCLK_END 29
+
+/* ISPCRG clocks */
+#define JH7110_ISPCLK_DOM4_APB_FUNC 0
+#define JH7110_ISPCLK_MIPI_RX0_PXL 1
+#define JH7110_ISPCLK_DVP_INV 2
+#define JH7110_ISPCLK_M31DPHY_CFG_IN 3
+#define JH7110_ISPCLK_M31DPHY_REF_IN 4
+#define JH7110_ISPCLK_M31DPHY_TX_ESC_LAN0 5
+#define JH7110_ISPCLK_VIN_APB 6
+#define JH7110_ISPCLK_VIN_SYS 7
+#define JH7110_ISPCLK_VIN_PIXEL_IF0 8
+#define JH7110_ISPCLK_VIN_PIXEL_IF1 9
+#define JH7110_ISPCLK_VIN_PIXEL_IF2 10
+#define JH7110_ISPCLK_VIN_PIXEL_IF3 11
+#define JH7110_ISPCLK_VIN_P_AXI_WR 12
+#define JH7110_ISPCLK_ISPV2_TOP_WRAPPER_C 13
+
+#define JH7110_ISPCLK_END 14
+
+/* VOUTCRG clocks */
+#define JH7110_VOUTCLK_APB 0
+#define JH7110_VOUTCLK_DC8200_PIX 1
+#define JH7110_VOUTCLK_DSI_SYS 2
+#define JH7110_VOUTCLK_TX_ESC 3
+#define JH7110_VOUTCLK_DC8200_AXI 4
+#define JH7110_VOUTCLK_DC8200_CORE 5
+#define JH7110_VOUTCLK_DC8200_AHB 6
+#define JH7110_VOUTCLK_DC8200_PIX0 7
+#define JH7110_VOUTCLK_DC8200_PIX1 8
+#define JH7110_VOUTCLK_DOM_VOUT_TOP_LCD 9
+#define JH7110_VOUTCLK_DSITX_APB 10
+#define JH7110_VOUTCLK_DSITX_SYS 11
+#define JH7110_VOUTCLK_DSITX_DPI 12
+#define JH7110_VOUTCLK_DSITX_TXESC 13
+#define JH7110_VOUTCLK_MIPITX_DPHY_TXESC 14
+#define JH7110_VOUTCLK_HDMI_TX_MCLK 15
+#define JH7110_VOUTCLK_HDMI_TX_BCLK 16
+#define JH7110_VOUTCLK_HDMI_TX_SYS 17
+
+#define JH7110_VOUTCLK_END 18
+
+#endif /* __DT_BINDINGS_CLOCK_STARFIVE_JH7110_CRG_H__ */
diff --git a/include/dt-bindings/clock/starfive-jh7100-audio.h b/include/dt-bindings/clock/starfive-jh7100-audio.h
new file mode 100644
index 000000000000..fbb4eae6572b
--- /dev/null
+++ b/include/dt-bindings/clock/starfive-jh7100-audio.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright (C) 2021 Emil Renner Berthing <kernel@esmil.dk>
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_STARFIVE_JH7100_AUDIO_H__
+#define __DT_BINDINGS_CLOCK_STARFIVE_JH7100_AUDIO_H__
+
+#define JH7100_AUDCLK_ADC_MCLK 0
+#define JH7100_AUDCLK_I2S1_MCLK 1
+#define JH7100_AUDCLK_I2SADC_APB 2
+#define JH7100_AUDCLK_I2SADC_BCLK 3
+#define JH7100_AUDCLK_I2SADC_BCLK_N 4
+#define JH7100_AUDCLK_I2SADC_LRCLK 5
+#define JH7100_AUDCLK_PDM_APB 6
+#define JH7100_AUDCLK_PDM_MCLK 7
+#define JH7100_AUDCLK_I2SVAD_APB 8
+#define JH7100_AUDCLK_SPDIF 9
+#define JH7100_AUDCLK_SPDIF_APB 10
+#define JH7100_AUDCLK_PWMDAC_APB 11
+#define JH7100_AUDCLK_DAC_MCLK 12
+#define JH7100_AUDCLK_I2SDAC_APB 13
+#define JH7100_AUDCLK_I2SDAC_BCLK 14
+#define JH7100_AUDCLK_I2SDAC_BCLK_N 15
+#define JH7100_AUDCLK_I2SDAC_LRCLK 16
+#define JH7100_AUDCLK_I2S1_APB 17
+#define JH7100_AUDCLK_I2S1_BCLK 18
+#define JH7100_AUDCLK_I2S1_BCLK_N 19
+#define JH7100_AUDCLK_I2S1_LRCLK 20
+#define JH7100_AUDCLK_I2SDAC16K_APB 21
+#define JH7100_AUDCLK_APB0_BUS 22
+#define JH7100_AUDCLK_DMA1P_AHB 23
+#define JH7100_AUDCLK_USB_APB 24
+#define JH7100_AUDCLK_USB_LPM 25
+#define JH7100_AUDCLK_USB_STB 26
+#define JH7100_AUDCLK_APB_EN 27
+#define JH7100_AUDCLK_VAD_MEM 28
+
+#define JH7100_AUDCLK_END 29
+
+#endif /* __DT_BINDINGS_CLOCK_STARFIVE_JH7100_AUDIO_H__ */
diff --git a/include/dt-bindings/clock/starfive-jh7100.h b/include/dt-bindings/clock/starfive-jh7100.h
new file mode 100644
index 000000000000..aa0863b9728d
--- /dev/null
+++ b/include/dt-bindings/clock/starfive-jh7100.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright (C) 2021 Ahmad Fatoum, Pengutronix
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_STARFIVE_JH7100_H__
+#define __DT_BINDINGS_CLOCK_STARFIVE_JH7100_H__
+
+#define JH7100_CLK_CPUNDBUS_ROOT 0
+#define JH7100_CLK_DLA_ROOT 1
+#define JH7100_CLK_DSP_ROOT 2
+#define JH7100_CLK_GMACUSB_ROOT 3
+#define JH7100_CLK_PERH0_ROOT 4
+#define JH7100_CLK_PERH1_ROOT 5
+#define JH7100_CLK_VIN_ROOT 6
+#define JH7100_CLK_VOUT_ROOT 7
+#define JH7100_CLK_AUDIO_ROOT 8
+#define JH7100_CLK_CDECHIFI4_ROOT 9
+#define JH7100_CLK_CDEC_ROOT 10
+#define JH7100_CLK_VOUTBUS_ROOT 11
+#define JH7100_CLK_CPUNBUS_ROOT_DIV 12
+#define JH7100_CLK_DSP_ROOT_DIV 13
+#define JH7100_CLK_PERH0_SRC 14
+#define JH7100_CLK_PERH1_SRC 15
+#define JH7100_CLK_PLL0_TESTOUT 16
+#define JH7100_CLK_PLL1_TESTOUT 17
+#define JH7100_CLK_PLL2_TESTOUT 18
+#define JH7100_CLK_PLL2_REF 19
+#define JH7100_CLK_CPU_CORE 20
+#define JH7100_CLK_CPU_AXI 21
+#define JH7100_CLK_AHB_BUS 22
+#define JH7100_CLK_APB1_BUS 23
+#define JH7100_CLK_APB2_BUS 24
+#define JH7100_CLK_DOM3AHB_BUS 25
+#define JH7100_CLK_DOM7AHB_BUS 26
+#define JH7100_CLK_U74_CORE0 27
+#define JH7100_CLK_U74_CORE1 28
+#define JH7100_CLK_U74_AXI 29
+#define JH7100_CLK_U74RTC_TOGGLE 30
+#define JH7100_CLK_SGDMA2P_AXI 31
+#define JH7100_CLK_DMA2PNOC_AXI 32
+#define JH7100_CLK_SGDMA2P_AHB 33
+#define JH7100_CLK_DLA_BUS 34
+#define JH7100_CLK_DLA_AXI 35
+#define JH7100_CLK_DLANOC_AXI 36
+#define JH7100_CLK_DLA_APB 37
+#define JH7100_CLK_VP6_CORE 38
+#define JH7100_CLK_VP6BUS_SRC 39
+#define JH7100_CLK_VP6_AXI 40
+#define JH7100_CLK_VCDECBUS_SRC 41
+#define JH7100_CLK_VDEC_BUS 42
+#define JH7100_CLK_VDEC_AXI 43
+#define JH7100_CLK_VDECBRG_MAIN 44
+#define JH7100_CLK_VDEC_BCLK 45
+#define JH7100_CLK_VDEC_CCLK 46
+#define JH7100_CLK_VDEC_APB 47
+#define JH7100_CLK_JPEG_AXI 48
+#define JH7100_CLK_JPEG_CCLK 49
+#define JH7100_CLK_JPEG_APB 50
+#define JH7100_CLK_GC300_2X 51
+#define JH7100_CLK_GC300_AHB 52
+#define JH7100_CLK_JPCGC300_AXIBUS 53
+#define JH7100_CLK_GC300_AXI 54
+#define JH7100_CLK_JPCGC300_MAIN 55
+#define JH7100_CLK_VENC_BUS 56
+#define JH7100_CLK_VENC_AXI 57
+#define JH7100_CLK_VENCBRG_MAIN 58
+#define JH7100_CLK_VENC_BCLK 59
+#define JH7100_CLK_VENC_CCLK 60
+#define JH7100_CLK_VENC_APB 61
+#define JH7100_CLK_DDRPLL_DIV2 62
+#define JH7100_CLK_DDRPLL_DIV4 63
+#define JH7100_CLK_DDRPLL_DIV8 64
+#define JH7100_CLK_DDROSC_DIV2 65
+#define JH7100_CLK_DDRC0 66
+#define JH7100_CLK_DDRC1 67
+#define JH7100_CLK_DDRPHY_APB 68
+#define JH7100_CLK_NOC_ROB 69
+#define JH7100_CLK_NOC_COG 70
+#define JH7100_CLK_NNE_AHB 71
+#define JH7100_CLK_NNEBUS_SRC1 72
+#define JH7100_CLK_NNE_BUS 73
+#define JH7100_CLK_NNE_AXI 74
+#define JH7100_CLK_NNENOC_AXI 75
+#define JH7100_CLK_DLASLV_AXI 76
+#define JH7100_CLK_DSPX2C_AXI 77
+#define JH7100_CLK_HIFI4_SRC 78
+#define JH7100_CLK_HIFI4_COREFREE 79
+#define JH7100_CLK_HIFI4_CORE 80
+#define JH7100_CLK_HIFI4_BUS 81
+#define JH7100_CLK_HIFI4_AXI 82
+#define JH7100_CLK_HIFI4NOC_AXI 83
+#define JH7100_CLK_SGDMA1P_BUS 84
+#define JH7100_CLK_SGDMA1P_AXI 85
+#define JH7100_CLK_DMA1P_AXI 86
+#define JH7100_CLK_X2C_AXI 87
+#define JH7100_CLK_USB_BUS 88
+#define JH7100_CLK_USB_AXI 89
+#define JH7100_CLK_USBNOC_AXI 90
+#define JH7100_CLK_USBPHY_ROOTDIV 91
+#define JH7100_CLK_USBPHY_125M 92
+#define JH7100_CLK_USBPHY_PLLDIV25M 93
+#define JH7100_CLK_USBPHY_25M 94
+#define JH7100_CLK_AUDIO_DIV 95
+#define JH7100_CLK_AUDIO_SRC 96
+#define JH7100_CLK_AUDIO_12288 97
+#define JH7100_CLK_VIN_SRC 98
+#define JH7100_CLK_ISP0_BUS 99
+#define JH7100_CLK_ISP0_AXI 100
+#define JH7100_CLK_ISP0NOC_AXI 101
+#define JH7100_CLK_ISPSLV_AXI 102
+#define JH7100_CLK_ISP1_BUS 103
+#define JH7100_CLK_ISP1_AXI 104
+#define JH7100_CLK_ISP1NOC_AXI 105
+#define JH7100_CLK_VIN_BUS 106
+#define JH7100_CLK_VIN_AXI 107
+#define JH7100_CLK_VINNOC_AXI 108
+#define JH7100_CLK_VOUT_SRC 109
+#define JH7100_CLK_DISPBUS_SRC 110
+#define JH7100_CLK_DISP_BUS 111
+#define JH7100_CLK_DISP_AXI 112
+#define JH7100_CLK_DISPNOC_AXI 113
+#define JH7100_CLK_SDIO0_AHB 114
+#define JH7100_CLK_SDIO0_CCLKINT 115
+#define JH7100_CLK_SDIO0_CCLKINT_INV 116
+#define JH7100_CLK_SDIO1_AHB 117
+#define JH7100_CLK_SDIO1_CCLKINT 118
+#define JH7100_CLK_SDIO1_CCLKINT_INV 119
+#define JH7100_CLK_GMAC_AHB 120
+#define JH7100_CLK_GMAC_ROOT_DIV 121
+#define JH7100_CLK_GMAC_PTP_REF 122
+#define JH7100_CLK_GMAC_GTX 123
+#define JH7100_CLK_GMAC_RMII_TX 124
+#define JH7100_CLK_GMAC_RMII_RX 125
+#define JH7100_CLK_GMAC_TX 126
+#define JH7100_CLK_GMAC_TX_INV 127
+#define JH7100_CLK_GMAC_RX_PRE 128
+#define JH7100_CLK_GMAC_RX_INV 129
+#define JH7100_CLK_GMAC_RMII 130
+#define JH7100_CLK_GMAC_TOPHYREF 131
+#define JH7100_CLK_SPI2AHB_AHB 132
+#define JH7100_CLK_SPI2AHB_CORE 133
+#define JH7100_CLK_EZMASTER_AHB 134
+#define JH7100_CLK_E24_AHB 135
+#define JH7100_CLK_E24RTC_TOGGLE 136
+#define JH7100_CLK_QSPI_AHB 137
+#define JH7100_CLK_QSPI_APB 138
+#define JH7100_CLK_QSPI_REF 139
+#define JH7100_CLK_SEC_AHB 140
+#define JH7100_CLK_AES 141
+#define JH7100_CLK_SHA 142
+#define JH7100_CLK_PKA 143
+#define JH7100_CLK_TRNG_APB 144
+#define JH7100_CLK_OTP_APB 145
+#define JH7100_CLK_UART0_APB 146
+#define JH7100_CLK_UART0_CORE 147
+#define JH7100_CLK_UART1_APB 148
+#define JH7100_CLK_UART1_CORE 149
+#define JH7100_CLK_SPI0_APB 150
+#define JH7100_CLK_SPI0_CORE 151
+#define JH7100_CLK_SPI1_APB 152
+#define JH7100_CLK_SPI1_CORE 153
+#define JH7100_CLK_I2C0_APB 154
+#define JH7100_CLK_I2C0_CORE 155
+#define JH7100_CLK_I2C1_APB 156
+#define JH7100_CLK_I2C1_CORE 157
+#define JH7100_CLK_GPIO_APB 158
+#define JH7100_CLK_UART2_APB 159
+#define JH7100_CLK_UART2_CORE 160
+#define JH7100_CLK_UART3_APB 161
+#define JH7100_CLK_UART3_CORE 162
+#define JH7100_CLK_SPI2_APB 163
+#define JH7100_CLK_SPI2_CORE 164
+#define JH7100_CLK_SPI3_APB 165
+#define JH7100_CLK_SPI3_CORE 166
+#define JH7100_CLK_I2C2_APB 167
+#define JH7100_CLK_I2C2_CORE 168
+#define JH7100_CLK_I2C3_APB 169
+#define JH7100_CLK_I2C3_CORE 170
+#define JH7100_CLK_WDTIMER_APB 171
+#define JH7100_CLK_WDT_CORE 172
+#define JH7100_CLK_TIMER0_CORE 173
+#define JH7100_CLK_TIMER1_CORE 174
+#define JH7100_CLK_TIMER2_CORE 175
+#define JH7100_CLK_TIMER3_CORE 176
+#define JH7100_CLK_TIMER4_CORE 177
+#define JH7100_CLK_TIMER5_CORE 178
+#define JH7100_CLK_TIMER6_CORE 179
+#define JH7100_CLK_VP6INTC_APB 180
+#define JH7100_CLK_PWM_APB 181
+#define JH7100_CLK_MSI_APB 182
+#define JH7100_CLK_TEMP_APB 183
+#define JH7100_CLK_TEMP_SENSE 184
+#define JH7100_CLK_SYSERR_APB 185
+
+#define JH7100_CLK_PLL0_OUT 186
+#define JH7100_CLK_PLL1_OUT 187
+#define JH7100_CLK_PLL2_OUT 188
+
+#define JH7100_CLK_END 189
+
+#endif /* __DT_BINDINGS_CLOCK_STARFIVE_JH7100_H__ */
diff --git a/include/dt-bindings/clock/ste-db8500-clkout.h b/include/dt-bindings/clock/ste-db8500-clkout.h
new file mode 100644
index 000000000000..ca07cb2bd1bc
--- /dev/null
+++ b/include/dt-bindings/clock/ste-db8500-clkout.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __STE_CLK_DB8500_CLKOUT_H__
+#define __STE_CLK_DB8500_CLKOUT_H__
+
+#define DB8500_CLKOUT_1 0
+#define DB8500_CLKOUT_2 1
+
+#define DB8500_CLKOUT_SRC_CLK38M 0
+#define DB8500_CLKOUT_SRC_ACLK 1
+#define DB8500_CLKOUT_SRC_SYSCLK 2
+#define DB8500_CLKOUT_SRC_LCDCLK 3
+#define DB8500_CLKOUT_SRC_SDMMCCLK 4
+#define DB8500_CLKOUT_SRC_TVCLK 5
+#define DB8500_CLKOUT_SRC_TIMCLK 6
+#define DB8500_CLKOUT_SRC_CLK009 7
+
+#endif
diff --git a/include/dt-bindings/clock/stih416-clks.h b/include/dt-bindings/clock/stih416-clks.h
deleted file mode 100644
index 74302278024e..000000000000
--- a/include/dt-bindings/clock/stih416-clks.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This header provides constants clk index STMicroelectronics
- * STiH416 SoC.
- */
-#ifndef _CLK_STIH416
-#define _CLK_STIH416
-
-/* CLOCKGEN A0 */
-#define CLK_ICN_REG 0
-#define CLK_ETH1_PHY 4
-
-/* CLOCKGEN A1 */
-#define CLK_ICN_IF_2 0
-#define CLK_GMAC0_PHY 3
-
-#endif
diff --git a/include/dt-bindings/clock/stm32fx-clock.h b/include/dt-bindings/clock/stm32fx-clock.h
index 1cc89c548578..b6ff9c68cb3f 100644
--- a/include/dt-bindings/clock/stm32fx-clock.h
+++ b/include/dt-bindings/clock/stm32fx-clock.h
@@ -7,10 +7,10 @@
*/
/*
- * List of clocks wich are not derived from system clock (SYSCLOCK)
+ * List of clocks which are not derived from system clock (SYSCLOCK)
*
* The index of these clocks is the secondary index of DT bindings
- * (see Documentatoin/devicetree/bindings/clock/st,stm32-rcc.txt)
+ * (see Documentation/devicetree/bindings/clock/st,stm32-rcc.yaml)
*
* e.g:
<assigned-clocks = <&rcc 1 CLK_LSE>;
diff --git a/include/dt-bindings/clock/stm32h7-clks.h b/include/dt-bindings/clock/stm32h7-clks.h
index 6637272b3242..330b39c2c303 100644
--- a/include/dt-bindings/clock/stm32h7-clks.h
+++ b/include/dt-bindings/clock/stm32h7-clks.h
@@ -126,8 +126,8 @@
#define ADC3_CK 128
#define DSI_CK 129
#define LTDC_CK 130
-#define USART8_CK 131
-#define USART7_CK 132
+#define UART8_CK 131
+#define UART7_CK 132
#define HDMICEC_CK 133
#define I2C3_CK 134
#define I2C2_CK 135
diff --git a/include/dt-bindings/clock/stm32mp1-clks.h b/include/dt-bindings/clock/stm32mp1-clks.h
index 4cdaf135829c..0a5324bcdbda 100644
--- a/include/dt-bindings/clock/stm32mp1-clks.h
+++ b/include/dt-bindings/clock/stm32mp1-clks.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) STMicroelectronics 2018 - All Rights Reserved
* Author: Gabriel Fernandez <gabriel.fernandez@st.com> for STMicroelectronics.
@@ -248,4 +248,27 @@
#define STM32MP1_LAST_CLK 232
+/* SCMI clock identifiers */
+#define CK_SCMI_HSE 0
+#define CK_SCMI_HSI 1
+#define CK_SCMI_CSI 2
+#define CK_SCMI_LSE 3
+#define CK_SCMI_LSI 4
+#define CK_SCMI_PLL2_Q 5
+#define CK_SCMI_PLL2_R 6
+#define CK_SCMI_MPU 7
+#define CK_SCMI_AXI 8
+#define CK_SCMI_BSEC 9
+#define CK_SCMI_CRYP1 10
+#define CK_SCMI_GPIOZ 11
+#define CK_SCMI_HASH1 12
+#define CK_SCMI_I2C4 13
+#define CK_SCMI_I2C6 14
+#define CK_SCMI_IWDG1 15
+#define CK_SCMI_RNG1 16
+#define CK_SCMI_RTC 17
+#define CK_SCMI_RTCAPB 18
+#define CK_SCMI_SPI6 19
+#define CK_SCMI_USART1 20
+
#endif /* _DT_BINDINGS_STM32MP1_CLKS_H_ */
diff --git a/include/dt-bindings/clock/stm32mp13-clks.h b/include/dt-bindings/clock/stm32mp13-clks.h
new file mode 100644
index 000000000000..0bd7b54c65ff
--- /dev/null
+++ b/include/dt-bindings/clock/stm32mp13-clks.h
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */
+/*
+ * Copyright (C) STMicroelectronics 2020 - All Rights Reserved
+ * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com> for STMicroelectronics.
+ */
+
+#ifndef _DT_BINDINGS_STM32MP13_CLKS_H_
+#define _DT_BINDINGS_STM32MP13_CLKS_H_
+
+/* OSCILLATOR clocks */
+#define CK_HSE 0
+#define CK_CSI 1
+#define CK_LSI 2
+#define CK_LSE 3
+#define CK_HSI 4
+#define CK_HSE_DIV2 5
+
+/* PLL */
+#define PLL1 6
+#define PLL2 7
+#define PLL3 8
+#define PLL4 9
+
+/* ODF */
+#define PLL1_P 10
+#define PLL1_Q 11
+#define PLL1_R 12
+#define PLL2_P 13
+#define PLL2_Q 14
+#define PLL2_R 15
+#define PLL3_P 16
+#define PLL3_Q 17
+#define PLL3_R 18
+#define PLL4_P 19
+#define PLL4_Q 20
+#define PLL4_R 21
+
+#define PCLK1 22
+#define PCLK2 23
+#define PCLK3 24
+#define PCLK4 25
+#define PCLK5 26
+#define PCLK6 27
+
+/* SYSTEM CLOCK */
+#define CK_PER 28
+#define CK_MPU 29
+#define CK_AXI 30
+#define CK_MLAHB 31
+
+/* BASE TIMER */
+#define CK_TIMG1 32
+#define CK_TIMG2 33
+#define CK_TIMG3 34
+
+/* AUX */
+#define RTC 35
+
+/* TRACE & DEBUG clocks */
+#define CK_DBG 36
+#define CK_TRACE 37
+
+/* MCO clocks */
+#define CK_MCO1 38
+#define CK_MCO2 39
+
+/* IP clocks */
+#define SYSCFG 40
+#define VREF 41
+#define DTS 42
+#define PMBCTRL 43
+#define HDP 44
+#define IWDG2 45
+#define STGENRO 46
+#define USART1 47
+#define RTCAPB 48
+#define TZC 49
+#define TZPC 50
+#define IWDG1 51
+#define BSEC 52
+#define DMA1 53
+#define DMA2 54
+#define DMAMUX1 55
+#define DMAMUX2 56
+#define GPIOA 57
+#define GPIOB 58
+#define GPIOC 59
+#define GPIOD 60
+#define GPIOE 61
+#define GPIOF 62
+#define GPIOG 63
+#define GPIOH 64
+#define GPIOI 65
+#define CRYP1 66
+#define HASH1 67
+#define BKPSRAM 68
+#define MDMA 69
+#define CRC1 70
+#define USBH 71
+#define DMA3 72
+#define TSC 73
+#define PKA 74
+#define AXIMC 75
+#define MCE 76
+#define ETH1TX 77
+#define ETH2TX 78
+#define ETH1RX 79
+#define ETH2RX 80
+#define ETH1MAC 81
+#define ETH2MAC 82
+#define ETH1STP 83
+#define ETH2STP 84
+
+/* IP clocks with parents */
+#define SDMMC1_K 85
+#define SDMMC2_K 86
+#define ADC1_K 87
+#define ADC2_K 88
+#define FMC_K 89
+#define QSPI_K 90
+#define RNG1_K 91
+#define USBPHY_K 92
+#define STGEN_K 93
+#define SPDIF_K 94
+#define SPI1_K 95
+#define SPI2_K 96
+#define SPI3_K 97
+#define SPI4_K 98
+#define SPI5_K 99
+#define I2C1_K 100
+#define I2C2_K 101
+#define I2C3_K 102
+#define I2C4_K 103
+#define I2C5_K 104
+#define TIM2_K 105
+#define TIM3_K 106
+#define TIM4_K 107
+#define TIM5_K 108
+#define TIM6_K 109
+#define TIM7_K 110
+#define TIM12_K 111
+#define TIM13_K 112
+#define TIM14_K 113
+#define TIM1_K 114
+#define TIM8_K 115
+#define TIM15_K 116
+#define TIM16_K 117
+#define TIM17_K 118
+#define LPTIM1_K 119
+#define LPTIM2_K 120
+#define LPTIM3_K 121
+#define LPTIM4_K 122
+#define LPTIM5_K 123
+#define USART1_K 124
+#define USART2_K 125
+#define USART3_K 126
+#define UART4_K 127
+#define UART5_K 128
+#define USART6_K 129
+#define UART7_K 130
+#define UART8_K 131
+#define DFSDM_K 132
+#define FDCAN_K 133
+#define SAI1_K 134
+#define SAI2_K 135
+#define ADFSDM_K 136
+#define USBO_K 137
+#define LTDC_PX 138
+#define ETH1CK_K 139
+#define ETH1PTP_K 140
+#define ETH2CK_K 141
+#define ETH2PTP_K 142
+#define DCMIPP_K 143
+#define SAES_K 144
+#define DTS_K 145
+
+/* DDR */
+#define DDRC1 146
+#define DDRC1LP 147
+#define DDRC2 148
+#define DDRC2LP 149
+#define DDRPHYC 150
+#define DDRPHYCLP 151
+#define DDRCAPB 152
+#define DDRCAPBLP 153
+#define AXIDCG 154
+#define DDRPHYCAPB 155
+#define DDRPHYCAPBLP 156
+#define DDRPERFM 157
+
+#define ADC1 158
+#define ADC2 159
+#define SAI1 160
+#define SAI2 161
+
+#define STM32MP1_LAST_CLK 162
+
+/* SCMI clock identifiers */
+#define CK_SCMI_HSE 0
+#define CK_SCMI_HSI 1
+#define CK_SCMI_CSI 2
+#define CK_SCMI_LSE 3
+#define CK_SCMI_LSI 4
+#define CK_SCMI_HSE_DIV2 5
+#define CK_SCMI_PLL2_Q 6
+#define CK_SCMI_PLL2_R 7
+#define CK_SCMI_PLL3_P 8
+#define CK_SCMI_PLL3_Q 9
+#define CK_SCMI_PLL3_R 10
+#define CK_SCMI_PLL4_P 11
+#define CK_SCMI_PLL4_Q 12
+#define CK_SCMI_PLL4_R 13
+#define CK_SCMI_MPU 14
+#define CK_SCMI_AXI 15
+#define CK_SCMI_MLAHB 16
+#define CK_SCMI_CKPER 17
+#define CK_SCMI_PCLK1 18
+#define CK_SCMI_PCLK2 19
+#define CK_SCMI_PCLK3 20
+#define CK_SCMI_PCLK4 21
+#define CK_SCMI_PCLK5 22
+#define CK_SCMI_PCLK6 23
+#define CK_SCMI_CKTIMG1 24
+#define CK_SCMI_CKTIMG2 25
+#define CK_SCMI_CKTIMG3 26
+#define CK_SCMI_RTC 27
+#define CK_SCMI_RTCAPB 28
+
+#endif /* _DT_BINDINGS_STM32MP13_CLKS_H_ */
diff --git a/include/dt-bindings/clock/stratix10-clock.h b/include/dt-bindings/clock/stratix10-clock.h
index 08b98e20b7cc..636498f9e08e 100644
--- a/include/dt-bindings/clock/stratix10-clock.h
+++ b/include/dt-bindings/clock/stratix10-clock.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2017, Intel Corporation
*/
diff --git a/include/dt-bindings/clock/sun20i-d1-ccu.h b/include/dt-bindings/clock/sun20i-d1-ccu.h
new file mode 100644
index 000000000000..fdbfb404f92a
--- /dev/null
+++ b/include/dt-bindings/clock/sun20i-d1-ccu.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (C) 2020 huangzhenwei@allwinnertech.com
+ * Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_SUN20I_D1_CCU_H_
+#define _DT_BINDINGS_CLK_SUN20I_D1_CCU_H_
+
+#define CLK_PLL_CPUX 0
+#define CLK_PLL_DDR0 1
+#define CLK_PLL_PERIPH0_4X 2
+#define CLK_PLL_PERIPH0_2X 3
+#define CLK_PLL_PERIPH0_800M 4
+#define CLK_PLL_PERIPH0 5
+#define CLK_PLL_PERIPH0_DIV3 6
+#define CLK_PLL_VIDEO0_4X 7
+#define CLK_PLL_VIDEO0_2X 8
+#define CLK_PLL_VIDEO0 9
+#define CLK_PLL_VIDEO1_4X 10
+#define CLK_PLL_VIDEO1_2X 11
+#define CLK_PLL_VIDEO1 12
+#define CLK_PLL_VE 13
+#define CLK_PLL_AUDIO0_4X 14
+#define CLK_PLL_AUDIO0_2X 15
+#define CLK_PLL_AUDIO0 16
+#define CLK_PLL_AUDIO1 17
+#define CLK_PLL_AUDIO1_DIV2 18
+#define CLK_PLL_AUDIO1_DIV5 19
+#define CLK_CPUX 20
+#define CLK_CPUX_AXI 21
+#define CLK_CPUX_APB 22
+#define CLK_PSI_AHB 23
+#define CLK_APB0 24
+#define CLK_APB1 25
+#define CLK_MBUS 26
+#define CLK_DE 27
+#define CLK_BUS_DE 28
+#define CLK_DI 29
+#define CLK_BUS_DI 30
+#define CLK_G2D 31
+#define CLK_BUS_G2D 32
+#define CLK_CE 33
+#define CLK_BUS_CE 34
+#define CLK_VE 35
+#define CLK_BUS_VE 36
+#define CLK_BUS_DMA 37
+#define CLK_BUS_MSGBOX0 38
+#define CLK_BUS_MSGBOX1 39
+#define CLK_BUS_MSGBOX2 40
+#define CLK_BUS_SPINLOCK 41
+#define CLK_BUS_HSTIMER 42
+#define CLK_AVS 43
+#define CLK_BUS_DBG 44
+#define CLK_BUS_PWM 45
+#define CLK_BUS_IOMMU 46
+#define CLK_DRAM 47
+#define CLK_MBUS_DMA 48
+#define CLK_MBUS_VE 49
+#define CLK_MBUS_CE 50
+#define CLK_MBUS_TVIN 51
+#define CLK_MBUS_CSI 52
+#define CLK_MBUS_G2D 53
+#define CLK_MBUS_RISCV 54
+#define CLK_BUS_DRAM 55
+#define CLK_MMC0 56
+#define CLK_MMC1 57
+#define CLK_MMC2 58
+#define CLK_BUS_MMC0 59
+#define CLK_BUS_MMC1 60
+#define CLK_BUS_MMC2 61
+#define CLK_BUS_UART0 62
+#define CLK_BUS_UART1 63
+#define CLK_BUS_UART2 64
+#define CLK_BUS_UART3 65
+#define CLK_BUS_UART4 66
+#define CLK_BUS_UART5 67
+#define CLK_BUS_I2C0 68
+#define CLK_BUS_I2C1 69
+#define CLK_BUS_I2C2 70
+#define CLK_BUS_I2C3 71
+#define CLK_SPI0 72
+#define CLK_SPI1 73
+#define CLK_BUS_SPI0 74
+#define CLK_BUS_SPI1 75
+#define CLK_EMAC_25M 76
+#define CLK_BUS_EMAC 77
+#define CLK_IR_TX 78
+#define CLK_BUS_IR_TX 79
+#define CLK_BUS_GPADC 80
+#define CLK_BUS_THS 81
+#define CLK_I2S0 82
+#define CLK_I2S1 83
+#define CLK_I2S2 84
+#define CLK_I2S2_ASRC 85
+#define CLK_BUS_I2S0 86
+#define CLK_BUS_I2S1 87
+#define CLK_BUS_I2S2 88
+#define CLK_SPDIF_TX 89
+#define CLK_SPDIF_RX 90
+#define CLK_BUS_SPDIF 91
+#define CLK_DMIC 92
+#define CLK_BUS_DMIC 93
+#define CLK_AUDIO_DAC 94
+#define CLK_AUDIO_ADC 95
+#define CLK_BUS_AUDIO 96
+#define CLK_USB_OHCI0 97
+#define CLK_USB_OHCI1 98
+#define CLK_BUS_OHCI0 99
+#define CLK_BUS_OHCI1 100
+#define CLK_BUS_EHCI0 101
+#define CLK_BUS_EHCI1 102
+#define CLK_BUS_OTG 103
+#define CLK_BUS_LRADC 104
+#define CLK_BUS_DPSS_TOP 105
+#define CLK_HDMI_24M 106
+#define CLK_HDMI_CEC_32K 107
+#define CLK_HDMI_CEC 108
+#define CLK_BUS_HDMI 109
+#define CLK_MIPI_DSI 110
+#define CLK_BUS_MIPI_DSI 111
+#define CLK_TCON_LCD0 112
+#define CLK_BUS_TCON_LCD0 113
+#define CLK_TCON_TV 114
+#define CLK_BUS_TCON_TV 115
+#define CLK_TVE 116
+#define CLK_BUS_TVE_TOP 117
+#define CLK_BUS_TVE 118
+#define CLK_TVD 119
+#define CLK_BUS_TVD_TOP 120
+#define CLK_BUS_TVD 121
+#define CLK_LEDC 122
+#define CLK_BUS_LEDC 123
+#define CLK_CSI_TOP 124
+#define CLK_CSI_MCLK 125
+#define CLK_BUS_CSI 126
+#define CLK_TPADC 127
+#define CLK_BUS_TPADC 128
+#define CLK_BUS_TZMA 129
+#define CLK_DSP 130
+#define CLK_BUS_DSP_CFG 131
+#define CLK_RISCV 132
+#define CLK_RISCV_AXI 133
+#define CLK_BUS_RISCV_CFG 134
+#define CLK_FANOUT_24M 135
+#define CLK_FANOUT_12M 136
+#define CLK_FANOUT_16M 137
+#define CLK_FANOUT_25M 138
+#define CLK_FANOUT_32K 139
+#define CLK_FANOUT_27M 140
+#define CLK_FANOUT_PCLK 141
+#define CLK_FANOUT0 142
+#define CLK_FANOUT1 143
+#define CLK_FANOUT2 144
+#define CLK_BUS_CAN0 145
+#define CLK_BUS_CAN1 146
+
+#endif /* _DT_BINDINGS_CLK_SUN20I_D1_CCU_H_ */
diff --git a/include/dt-bindings/clock/sun20i-d1-r-ccu.h b/include/dt-bindings/clock/sun20i-d1-r-ccu.h
new file mode 100644
index 000000000000..f95c170711e5
--- /dev/null
+++ b/include/dt-bindings/clock/sun20i-d1-r-ccu.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_SUN20I_D1_R_CCU_H_
+#define _DT_BINDINGS_CLK_SUN20I_D1_R_CCU_H_
+
+#define CLK_R_AHB 0
+
+#define CLK_BUS_R_TIMER 2
+#define CLK_BUS_R_TWD 3
+#define CLK_BUS_R_PPU 4
+#define CLK_R_IR_RX 5
+#define CLK_BUS_R_IR_RX 6
+#define CLK_BUS_R_RTC 7
+#define CLK_BUS_R_CPUCFG 8
+
+#endif /* _DT_BINDINGS_CLK_SUN20I_D1_R_CCU_H_ */
diff --git a/include/dt-bindings/clock/sun50i-a100-ccu.h b/include/dt-bindings/clock/sun50i-a100-ccu.h
index 28dc36e1a232..06a2031d466b 100644
--- a/include/dt-bindings/clock/sun50i-a100-ccu.h
+++ b/include/dt-bindings/clock/sun50i-a100-ccu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2020 Yangtao Li <frank@allwinnertech.com>
*/
diff --git a/include/dt-bindings/clock/sun50i-a64-ccu.h b/include/dt-bindings/clock/sun50i-a64-ccu.h
index 318eb15c414c..4f220ea7a23c 100644
--- a/include/dt-bindings/clock/sun50i-a64-ccu.h
+++ b/include/dt-bindings/clock/sun50i-a64-ccu.h
@@ -44,7 +44,9 @@
#define _DT_BINDINGS_CLK_SUN50I_A64_H_
#define CLK_PLL_VIDEO0 7
+#define CLK_PLL_VIDEO0_2X 8
#define CLK_PLL_PERIPH0 11
+#define CLK_PLL_MIPI 17
#define CLK_CPUX 21
#define CLK_BUS_MIPI_DSI 28
@@ -113,7 +115,7 @@
#define CLK_USB_OHCI0 91
#define CLK_USB_OHCI1 93
-
+#define CLK_DRAM 94
#define CLK_DRAM_VE 95
#define CLK_DRAM_CSI 96
#define CLK_DRAM_DEINTERLACE 97
diff --git a/include/dt-bindings/clock/sun50i-h6-ccu.h b/include/dt-bindings/clock/sun50i-h6-ccu.h
index a1545cd60e75..ef9123d81937 100644
--- a/include/dt-bindings/clock/sun50i-h6-ccu.h
+++ b/include/dt-bindings/clock/sun50i-h6-ccu.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0+ or MIT)
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (C) 2017 Icenowy Zheng <icenowy@aosc.io>
*/
diff --git a/include/dt-bindings/clock/sun50i-h6-r-ccu.h b/include/dt-bindings/clock/sun50i-h6-r-ccu.h
index 890368d252c4..a96087abc86f 100644
--- a/include/dt-bindings/clock/sun50i-h6-r-ccu.h
+++ b/include/dt-bindings/clock/sun50i-h6-r-ccu.h
@@ -22,5 +22,6 @@
#define CLK_W1 12
#define CLK_R_APB2_RSB 13
+#define CLK_R_APB1_RTC 14
#endif /* _DT_BINDINGS_CLK_SUN50I_H6_R_CCU_H_ */
diff --git a/include/dt-bindings/clock/sun50i-h616-ccu.h b/include/dt-bindings/clock/sun50i-h616-ccu.h
index 4fc08b0df2f3..6889405f9fec 100644
--- a/include/dt-bindings/clock/sun50i-h616-ccu.h
+++ b/include/dt-bindings/clock/sun50i-h616-ccu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (C) 2020 Arm Ltd.
*/
@@ -111,5 +111,11 @@
#define CLK_BUS_TVE0 125
#define CLK_HDCP 126
#define CLK_BUS_HDCP 127
+#define CLK_PLL_SYSTEM_32K 128
+#define CLK_BUS_GPADC 129
+#define CLK_TCON_LCD0 130
+#define CLK_BUS_TCON_LCD0 131
+#define CLK_TCON_LCD1 132
+#define CLK_BUS_TCON_LCD1 133
#endif /* _DT_BINDINGS_CLK_SUN50I_H616_H_ */
diff --git a/include/dt-bindings/clock/sun55i-a523-ccu.h b/include/dt-bindings/clock/sun55i-a523-ccu.h
new file mode 100644
index 000000000000..54808fcfd556
--- /dev/null
+++ b/include/dt-bindings/clock/sun55i-a523-ccu.h
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (C) 2024 Arm Ltd.
+ */
+
+#ifndef _DT_BINDINGS_CLK_SUN55I_A523_CCU_H_
+#define _DT_BINDINGS_CLK_SUN55I_A523_CCU_H_
+
+#define CLK_PLL_DDR0 0
+#define CLK_PLL_PERIPH0_4X 1
+#define CLK_PLL_PERIPH0_2X 2
+#define CLK_PLL_PERIPH0_800M 3
+#define CLK_PLL_PERIPH0_480M 4
+#define CLK_PLL_PERIPH0_600M 5
+#define CLK_PLL_PERIPH0_400M 6
+#define CLK_PLL_PERIPH0_300M 7
+#define CLK_PLL_PERIPH0_200M 8
+#define CLK_PLL_PERIPH0_160M 9
+#define CLK_PLL_PERIPH0_150M 10
+#define CLK_PLL_PERIPH1_4X 11
+#define CLK_PLL_PERIPH1_2X 12
+#define CLK_PLL_PERIPH1_800M 13
+#define CLK_PLL_PERIPH1_480M 14
+#define CLK_PLL_PERIPH1_600M 15
+#define CLK_PLL_PERIPH1_400M 16
+#define CLK_PLL_PERIPH1_300M 17
+#define CLK_PLL_PERIPH1_200M 18
+#define CLK_PLL_PERIPH1_160M 19
+#define CLK_PLL_PERIPH1_150M 20
+#define CLK_PLL_GPU 21
+#define CLK_PLL_VIDEO0_8X 22
+#define CLK_PLL_VIDEO0_4X 23
+#define CLK_PLL_VIDEO0_3X 24
+#define CLK_PLL_VIDEO1_8X 25
+#define CLK_PLL_VIDEO1_4X 26
+#define CLK_PLL_VIDEO1_3X 27
+#define CLK_PLL_VIDEO2_8X 28
+#define CLK_PLL_VIDEO2_4X 29
+#define CLK_PLL_VIDEO2_3X 30
+#define CLK_PLL_VIDEO3_8X 31
+#define CLK_PLL_VIDEO3_4X 32
+#define CLK_PLL_VIDEO3_3X 33
+#define CLK_PLL_VE 34
+#define CLK_PLL_AUDIO0_4X 35
+#define CLK_PLL_AUDIO0_2X 36
+#define CLK_PLL_AUDIO0 37
+#define CLK_PLL_NPU_4X 38
+#define CLK_PLL_NPU_2X 39
+#define CLK_PLL_NPU 40
+#define CLK_AHB 41
+#define CLK_APB0 42
+#define CLK_APB1 43
+#define CLK_MBUS 44
+#define CLK_DE 45
+#define CLK_BUS_DE 46
+#define CLK_DI 47
+#define CLK_BUS_DI 48
+#define CLK_G2D 49
+#define CLK_BUS_G2D 50
+#define CLK_GPU 51
+#define CLK_BUS_GPU 52
+#define CLK_CE 53
+#define CLK_BUS_CE 54
+#define CLK_BUS_CE_SYS 55
+#define CLK_VE 56
+#define CLK_BUS_VE 57
+#define CLK_BUS_DMA 58
+#define CLK_BUS_MSGBOX 59
+#define CLK_BUS_SPINLOCK 60
+#define CLK_HSTIMER0 61
+#define CLK_HSTIMER1 62
+#define CLK_HSTIMER2 63
+#define CLK_HSTIMER3 64
+#define CLK_HSTIMER4 65
+#define CLK_HSTIMER5 66
+#define CLK_BUS_HSTIMER 67
+#define CLK_BUS_DBG 68
+#define CLK_BUS_PWM0 69
+#define CLK_BUS_PWM1 70
+#define CLK_IOMMU 71
+#define CLK_BUS_IOMMU 72
+#define CLK_DRAM 73
+#define CLK_MBUS_DMA 74
+#define CLK_MBUS_VE 75
+#define CLK_MBUS_CE 76
+#define CLK_MBUS_CSI 77
+#define CLK_MBUS_ISP 78
+#define CLK_MBUS_EMAC1 79
+#define CLK_BUS_DRAM 80
+#define CLK_NAND0 81
+#define CLK_NAND1 82
+#define CLK_BUS_NAND 83
+#define CLK_MMC0 84
+#define CLK_MMC1 85
+#define CLK_MMC2 86
+#define CLK_BUS_SYSDAP 87
+#define CLK_BUS_MMC0 88
+#define CLK_BUS_MMC1 89
+#define CLK_BUS_MMC2 90
+#define CLK_BUS_UART0 91
+#define CLK_BUS_UART1 92
+#define CLK_BUS_UART2 93
+#define CLK_BUS_UART3 94
+#define CLK_BUS_UART4 95
+#define CLK_BUS_UART5 96
+#define CLK_BUS_UART6 97
+#define CLK_BUS_UART7 98
+#define CLK_BUS_I2C0 99
+#define CLK_BUS_I2C1 100
+#define CLK_BUS_I2C2 101
+#define CLK_BUS_I2C3 102
+#define CLK_BUS_I2C4 103
+#define CLK_BUS_I2C5 104
+#define CLK_BUS_CAN 105
+#define CLK_SPI0 106
+#define CLK_SPI1 107
+#define CLK_SPI2 108
+#define CLK_SPIFC 109
+#define CLK_BUS_SPI0 110
+#define CLK_BUS_SPI1 111
+#define CLK_BUS_SPI2 112
+#define CLK_BUS_SPIFC 113
+#define CLK_EMAC0_25M 114
+#define CLK_EMAC1_25M 115
+#define CLK_BUS_EMAC0 116
+#define CLK_BUS_EMAC1 117
+#define CLK_IR_RX 118
+#define CLK_BUS_IR_RX 119
+#define CLK_IR_TX 120
+#define CLK_BUS_IR_TX 121
+#define CLK_GPADC0 122
+#define CLK_GPADC1 123
+#define CLK_BUS_GPADC0 124
+#define CLK_BUS_GPADC1 125
+#define CLK_BUS_THS 126
+#define CLK_USB_OHCI0 127
+#define CLK_USB_OHCI1 128
+#define CLK_BUS_OHCI0 129
+#define CLK_BUS_OHCI1 130
+#define CLK_BUS_EHCI0 131
+#define CLK_BUS_EHCI1 132
+#define CLK_BUS_OTG 133
+#define CLK_BUS_LRADC 134
+#define CLK_PCIE_AUX 135
+#define CLK_BUS_DISPLAY0_TOP 136
+#define CLK_BUS_DISPLAY1_TOP 137
+#define CLK_HDMI_24M 138
+#define CLK_HDMI_CEC_32K 139
+#define CLK_HDMI_CEC 140
+#define CLK_BUS_HDMI 141
+#define CLK_MIPI_DSI0 142
+#define CLK_MIPI_DSI1 143
+#define CLK_BUS_MIPI_DSI0 144
+#define CLK_BUS_MIPI_DSI1 145
+#define CLK_TCON_LCD0 146
+#define CLK_TCON_LCD1 147
+#define CLK_TCON_LCD2 148
+#define CLK_COMBOPHY_DSI0 149
+#define CLK_COMBOPHY_DSI1 150
+#define CLK_BUS_TCON_LCD0 151
+#define CLK_BUS_TCON_LCD1 152
+#define CLK_BUS_TCON_LCD2 153
+#define CLK_TCON_TV0 154
+#define CLK_TCON_TV1 155
+#define CLK_BUS_TCON_TV0 156
+#define CLK_BUS_TCON_TV1 157
+#define CLK_EDP 158
+#define CLK_BUS_EDP 159
+#define CLK_LEDC 160
+#define CLK_BUS_LEDC 161
+#define CLK_CSI_TOP 162
+#define CLK_CSI_MCLK0 163
+#define CLK_CSI_MCLK1 164
+#define CLK_CSI_MCLK2 165
+#define CLK_CSI_MCLK3 166
+#define CLK_BUS_CSI 167
+#define CLK_ISP 168
+#define CLK_DSP 169
+#define CLK_FANOUT_24M 170
+#define CLK_FANOUT_12M 171
+#define CLK_FANOUT_16M 172
+#define CLK_FANOUT_25M 173
+#define CLK_FANOUT_27M 174
+#define CLK_FANOUT_PCLK 175
+#define CLK_FANOUT0 176
+#define CLK_FANOUT1 177
+#define CLK_FANOUT2 178
+#define CLK_NPU 179
+
+#endif /* _DT_BINDINGS_CLK_SUN55I_A523_CCU_H_ */
diff --git a/include/dt-bindings/clock/sun55i-a523-mcu-ccu.h b/include/dt-bindings/clock/sun55i-a523-mcu-ccu.h
new file mode 100644
index 000000000000..6efc6bc7e11a
--- /dev/null
+++ b/include/dt-bindings/clock/sun55i-a523-mcu-ccu.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (C) 2025 Chen-Yu Tsai <wens@csie.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_SUN55I_A523_MCU_CCU_H_
+#define _DT_BINDINGS_CLK_SUN55I_A523_MCU_CCU_H_
+
+#define CLK_MCU_PLL_AUDIO1 0
+#define CLK_MCU_PLL_AUDIO1_DIV2 1
+#define CLK_MCU_PLL_AUDIO1_DIV5 2
+#define CLK_MCU_AUDIO_OUT 3
+#define CLK_MCU_DSP 4
+#define CLK_MCU_I2S0 5
+#define CLK_MCU_I2S1 6
+#define CLK_MCU_I2S2 7
+#define CLK_MCU_I2S3 8
+#define CLK_MCU_I2S3_ASRC 9
+#define CLK_BUS_MCU_I2S0 10
+#define CLK_BUS_MCU_I2S1 11
+#define CLK_BUS_MCU_I2S2 12
+#define CLK_BUS_MCU_I2S3 13
+#define CLK_MCU_SPDIF_TX 14
+#define CLK_MCU_SPDIF_RX 15
+#define CLK_BUS_MCU_SPDIF 16
+#define CLK_MCU_DMIC 17
+#define CLK_BUS_MCU_DMIC 18
+#define CLK_MCU_AUDIO_CODEC_DAC 19
+#define CLK_MCU_AUDIO_CODEC_ADC 20
+#define CLK_BUS_MCU_AUDIO_CODEC 21
+#define CLK_BUS_MCU_DSP_MSGBOX 22
+#define CLK_BUS_MCU_DSP_CFG 23
+#define CLK_BUS_MCU_NPU_HCLK 24
+#define CLK_BUS_MCU_NPU_ACLK 25
+#define CLK_MCU_TIMER0 26
+#define CLK_MCU_TIMER1 27
+#define CLK_MCU_TIMER2 28
+#define CLK_MCU_TIMER3 29
+#define CLK_MCU_TIMER4 30
+#define CLK_MCU_TIMER5 31
+#define CLK_BUS_MCU_TIMER 32
+#define CLK_BUS_MCU_DMA 33
+#define CLK_MCU_TZMA0 34
+#define CLK_MCU_TZMA1 35
+#define CLK_BUS_MCU_PUBSRAM 36
+#define CLK_MCU_MBUS_DMA 37
+#define CLK_MCU_MBUS 38
+#define CLK_MCU_RISCV 39
+#define CLK_BUS_MCU_RISCV_CFG 40
+#define CLK_BUS_MCU_RISCV_MSGBOX 41
+#define CLK_MCU_PWM0 42
+#define CLK_BUS_MCU_PWM0 43
+
+#endif /* _DT_BINDINGS_CLK_SUN55I_A523_MCU_CCU_H_ */
diff --git a/include/dt-bindings/clock/sun55i-a523-r-ccu.h b/include/dt-bindings/clock/sun55i-a523-r-ccu.h
new file mode 100644
index 000000000000..365647499b9a
--- /dev/null
+++ b/include/dt-bindings/clock/sun55i-a523-r-ccu.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (C) 2024 Arm Ltd.
+ */
+
+#ifndef _DT_BINDINGS_CLK_SUN55I_A523_R_CCU_H_
+#define _DT_BINDINGS_CLK_SUN55I_A523_R_CCU_H_
+
+#define CLK_R_AHB 0
+#define CLK_R_APB0 1
+#define CLK_R_APB1 2
+#define CLK_R_TIMER0 3
+#define CLK_R_TIMER1 4
+#define CLK_R_TIMER2 5
+#define CLK_BUS_R_TIMER 6
+#define CLK_BUS_R_TWD 7
+#define CLK_R_PWMCTRL 8
+#define CLK_BUS_R_PWMCTRL 9
+#define CLK_R_SPI 10
+#define CLK_BUS_R_SPI 11
+#define CLK_BUS_R_SPINLOCK 12
+#define CLK_BUS_R_MSGBOX 13
+#define CLK_BUS_R_UART0 14
+#define CLK_BUS_R_UART1 15
+#define CLK_BUS_R_I2C0 16
+#define CLK_BUS_R_I2C1 17
+#define CLK_BUS_R_I2C2 18
+#define CLK_BUS_R_PPU0 19
+#define CLK_BUS_R_PPU1 20
+#define CLK_BUS_R_CPU_BIST 21
+#define CLK_R_IR_RX 22
+#define CLK_BUS_R_IR_RX 23
+#define CLK_BUS_R_DMA 24
+#define CLK_BUS_R_RTC 25
+#define CLK_BUS_R_CPUCFG 26
+
+#endif /* _DT_BINDINGS_CLK_SUN55I_A523_R_CCU_H_ */
diff --git a/include/dt-bindings/clock/sun6i-rtc.h b/include/dt-bindings/clock/sun6i-rtc.h
new file mode 100644
index 000000000000..3bd3aa3d57ce
--- /dev/null
+++ b/include/dt-bindings/clock/sun6i-rtc.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+
+#ifndef _DT_BINDINGS_CLK_SUN6I_RTC_H_
+#define _DT_BINDINGS_CLK_SUN6I_RTC_H_
+
+#define CLK_OSC32K 0
+#define CLK_OSC32K_FANOUT 1
+#define CLK_IOSC 2
+
+#endif /* _DT_BINDINGS_CLK_SUN6I_RTC_H_ */
diff --git a/include/dt-bindings/clock/sun8i-h3-ccu.h b/include/dt-bindings/clock/sun8i-h3-ccu.h
index 30d2d15373a2..5d4ada2c22e6 100644
--- a/include/dt-bindings/clock/sun8i-h3-ccu.h
+++ b/include/dt-bindings/clock/sun8i-h3-ccu.h
@@ -126,7 +126,7 @@
#define CLK_USB_OHCI1 93
#define CLK_USB_OHCI2 94
#define CLK_USB_OHCI3 95
-
+#define CLK_DRAM 96
#define CLK_DRAM_VE 97
#define CLK_DRAM_CSI 98
#define CLK_DRAM_DEINTERLACE 99
diff --git a/include/dt-bindings/clock/sun8i-v3s-ccu.h b/include/dt-bindings/clock/sun8i-v3s-ccu.h
index 014ac6123d17..c4055629c9f9 100644
--- a/include/dt-bindings/clock/sun8i-v3s-ccu.h
+++ b/include/dt-bindings/clock/sun8i-v3s-ccu.h
@@ -96,7 +96,7 @@
#define CLK_TCON0 64
#define CLK_CSI_MISC 65
#define CLK_CSI0_MCLK 66
-#define CLK_CSI1_SCLK 67
+#define CLK_CSI_SCLK 67
#define CLK_CSI1_MCLK 68
#define CLK_VE 69
#define CLK_AC_DIG 70
diff --git a/include/dt-bindings/clock/suniv-ccu-f1c100s.h b/include/dt-bindings/clock/suniv-ccu-f1c100s.h
index f5ac155c9c70..d7570765f424 100644
--- a/include/dt-bindings/clock/suniv-ccu-f1c100s.h
+++ b/include/dt-bindings/clock/suniv-ccu-f1c100s.h
@@ -67,4 +67,6 @@
#define CLK_CODEC 65
#define CLK_AVS 66
+#define CLK_IR 67
+
#endif
diff --git a/include/dt-bindings/clock/sunplus,sp7021-clkc.h b/include/dt-bindings/clock/sunplus,sp7021-clkc.h
new file mode 100644
index 000000000000..cd84321eb2b5
--- /dev/null
+++ b/include/dt-bindings/clock/sunplus,sp7021-clkc.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) Sunplus Technology Co., Ltd.
+ * All rights reserved.
+ */
+#ifndef _DT_BINDINGS_CLOCK_SUNPLUS_SP7021_H
+#define _DT_BINDINGS_CLOCK_SUNPLUS_SP7021_H
+
+/* gates */
+#define CLK_RTC 0
+#define CLK_OTPRX 1
+#define CLK_NOC 2
+#define CLK_BR 3
+#define CLK_SPIFL 4
+#define CLK_PERI0 5
+#define CLK_PERI1 6
+#define CLK_STC0 7
+#define CLK_STC_AV0 8
+#define CLK_STC_AV1 9
+#define CLK_STC_AV2 10
+#define CLK_UA0 11
+#define CLK_UA1 12
+#define CLK_UA2 13
+#define CLK_UA3 14
+#define CLK_UA4 15
+#define CLK_HWUA 16
+#define CLK_DDC0 17
+#define CLK_UADMA 18
+#define CLK_CBDMA0 19
+#define CLK_CBDMA1 20
+#define CLK_SPI_COMBO_0 21
+#define CLK_SPI_COMBO_1 22
+#define CLK_SPI_COMBO_2 23
+#define CLK_SPI_COMBO_3 24
+#define CLK_AUD 25
+#define CLK_USBC0 26
+#define CLK_USBC1 27
+#define CLK_UPHY0 28
+#define CLK_UPHY1 29
+#define CLK_I2CM0 30
+#define CLK_I2CM1 31
+#define CLK_I2CM2 32
+#define CLK_I2CM3 33
+#define CLK_PMC 34
+#define CLK_CARD_CTL0 35
+#define CLK_CARD_CTL1 36
+#define CLK_CARD_CTL4 37
+#define CLK_BCH 38
+#define CLK_DDFCH 39
+#define CLK_CSIIW0 40
+#define CLK_CSIIW1 41
+#define CLK_MIPICSI0 42
+#define CLK_MIPICSI1 43
+#define CLK_HDMI_TX 44
+#define CLK_VPOST 45
+#define CLK_TGEN 46
+#define CLK_DMIX 47
+#define CLK_TCON 48
+#define CLK_GPIO 49
+#define CLK_MAILBOX 50
+#define CLK_SPIND 51
+#define CLK_I2C2CBUS 52
+#define CLK_SEC 53
+#define CLK_DVE 54
+#define CLK_GPOST0 55
+#define CLK_OSD0 56
+#define CLK_DISP_PWM 57
+#define CLK_UADBG 58
+#define CLK_FIO_CTL 59
+#define CLK_FPGA 60
+#define CLK_L2SW 61
+#define CLK_ICM 62
+#define CLK_AXI_GLOBAL 63
+
+/* plls */
+#define PLL_A 64
+#define PLL_E 65
+#define PLL_E_2P5 66
+#define PLL_E_25 67
+#define PLL_E_112P5 68
+#define PLL_F 69
+#define PLL_TV 70
+#define PLL_TV_A 71
+#define PLL_SYS 72
+
+#define CLK_MAX 73
+
+#endif
diff --git a/include/dt-bindings/clock/tegra234-clock.h b/include/dt-bindings/clock/tegra234-clock.h
index 2c82072950ee..c360455d02ee 100644
--- a/include/dt-bindings/clock/tegra234-clock.h
+++ b/include/dt-bindings/clock/tegra234-clock.h
@@ -1,14 +1,903 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. */
+/* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. */
#ifndef DT_BINDINGS_CLOCK_TEGRA234_CLOCK_H
#define DT_BINDINGS_CLOCK_TEGRA234_CLOCK_H
+/**
+ * @file
+ * @defgroup bpmp_clock_ids Clock ID's
+ * @{
+ */
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_ACTMON */
+#define TEGRA234_CLK_ACTMON 1U
+/** @brief output of gate CLK_ENB_ADSP */
+#define TEGRA234_CLK_ADSP 2U
+/** @brief output of gate CLK_ENB_ADSPNEON */
+#define TEGRA234_CLK_ADSPNEON 3U
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AHUB */
+#define TEGRA234_CLK_AHUB 4U
+/** @brief output of gate CLK_ENB_APB2APE */
+#define TEGRA234_CLK_APB2APE 5U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_APE */
+#define TEGRA234_CLK_APE 6U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AUD_MCLK */
+#define TEGRA234_CLK_AUD_MCLK 7U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AXI_CBB */
+#define TEGRA234_CLK_AXI_CBB 8U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_CAN1 */
+#define TEGRA234_CLK_CAN1 9U
+/** @brief output of gate CLK_ENB_CAN1_HOST */
+#define TEGRA234_CLK_CAN1_HOST 10U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_CAN2 */
+#define TEGRA234_CLK_CAN2 11U
+/** @brief output of gate CLK_ENB_CAN2_HOST */
+#define TEGRA234_CLK_CAN2_HOST 12U
+/** @brief output of divider CLK_RST_CONTROLLER_CLK_M_DIVIDE */
+#define TEGRA234_CLK_CLK_M 14U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC1 */
+#define TEGRA234_CLK_DMIC1 15U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC2 */
+#define TEGRA234_CLK_DMIC2 16U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC3 */
+#define TEGRA234_CLK_DMIC3 17U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC4 */
+#define TEGRA234_CLK_DMIC4 18U
+/** @brief output of gate CLK_ENB_DPAUX */
+#define TEGRA234_CLK_DPAUX 19U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVJPG1 */
+#define TEGRA234_CLK_NVJPG1 20U
+/**
+ * @brief output of mux controlled by CLK_RST_CONTROLLER_ACLK_BURST_POLICY
+ * divided by the divider controlled by ACLK_CLK_DIVISOR in
+ * CLK_RST_CONTROLLER_SUPER_ACLK_DIVIDER
+ */
+#define TEGRA234_CLK_ACLK 21U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_MSS_ENCRYPT switch divider output */
+#define TEGRA234_CLK_MSS_ENCRYPT 22U
+/** @brief clock recovered from EAVB input */
+#define TEGRA234_CLK_EQOS_RX_INPUT 23U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_AON_APB switch divider output */
+#define TEGRA234_CLK_AON_APB 25U
+/** @brief CLK_RST_CONTROLLER_AON_NIC_RATE divider output */
+#define TEGRA234_CLK_AON_NIC 26U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_AON_CPU_NIC switch divider output */
+#define TEGRA234_CLK_AON_CPU_NIC 27U
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLA1_BASE for use by audio clocks */
+#define TEGRA234_CLK_PLLA1 28U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSPK1 */
+#define TEGRA234_CLK_DSPK1 29U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSPK2 */
+#define TEGRA234_CLK_DSPK2 30U
+/**
+ * @brief controls the EMC clock frequency.
+ * @details Doing a clk_set_rate on this clock will select the
+ * appropriate clock source, program the source rate and execute a
+ * specific sequence to switch to the new clock source for both memory
+ * controllers. This can be used to control the balance between memory
+ * throughput and memory controller power.
+ */
+#define TEGRA234_CLK_EMC 31U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_EQOS_AXI_CLK_0 divider gated output */
+#define TEGRA234_CLK_EQOS_AXI 32U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_EQOS_PTP_REF_CLK_0 divider gated output */
+#define TEGRA234_CLK_EQOS_PTP_REF 33U
+/** @brief output of gate CLK_ENB_EQOS_RX */
+#define TEGRA234_CLK_EQOS_RX 34U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_EQOS_TX_CLK divider gated output */
+#define TEGRA234_CLK_EQOS_TX 35U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EXTPERIPH1 */
+#define TEGRA234_CLK_EXTPERIPH1 36U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EXTPERIPH2 */
+#define TEGRA234_CLK_EXTPERIPH2 37U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EXTPERIPH3 */
+#define TEGRA234_CLK_EXTPERIPH3 38U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EXTPERIPH4 */
+#define TEGRA234_CLK_EXTPERIPH4 39U
/** @brief output of gate CLK_ENB_FUSE */
-#define TEGRA234_CLK_FUSE 40
+#define TEGRA234_CLK_FUSE 40U
+/** @brief output of GPU GPC0 clkGen (in 1x mode same rate as GPC0 MUX2 out) */
+#define TEGRA234_CLK_GPC0CLK 41U
+/** @brief TODO */
+#define TEGRA234_CLK_GPU_PWR 42U
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_HDA2CODEC_2X */
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_HOST1X */
+#define TEGRA234_CLK_HOST1X 46U
+/** @brief xusb_hs_hsicp_clk */
+#define TEGRA234_CLK_XUSB_HS_HSICP 47U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C1 */
+#define TEGRA234_CLK_I2C1 48U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C2 */
+#define TEGRA234_CLK_I2C2 49U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C3 */
+#define TEGRA234_CLK_I2C3 50U
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C4 */
+#define TEGRA234_CLK_I2C4 51U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C6 */
+#define TEGRA234_CLK_I2C6 52U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C7 */
+#define TEGRA234_CLK_I2C7 53U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C8 */
+#define TEGRA234_CLK_I2C8 54U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C9 */
+#define TEGRA234_CLK_I2C9 55U
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S1 */
+#define TEGRA234_CLK_I2S1 56U
+/** @brief clock recovered from I2S1 input */
+#define TEGRA234_CLK_I2S1_SYNC_INPUT 57U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S2 */
+#define TEGRA234_CLK_I2S2 58U
+/** @brief clock recovered from I2S2 input */
+#define TEGRA234_CLK_I2S2_SYNC_INPUT 59U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S3 */
+#define TEGRA234_CLK_I2S3 60U
+/** @brief clock recovered from I2S3 input */
+#define TEGRA234_CLK_I2S3_SYNC_INPUT 61U
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S4 */
+#define TEGRA234_CLK_I2S4 62U
+/** @brief clock recovered from I2S4 input */
+#define TEGRA234_CLK_I2S4_SYNC_INPUT 63U
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S5 */
+#define TEGRA234_CLK_I2S5 64U
+/** @brief clock recovered from I2S5 input */
+#define TEGRA234_CLK_I2S5_SYNC_INPUT 65U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S6 */
+#define TEGRA234_CLK_I2S6 66U
+/** @brief clock recovered from I2S6 input */
+#define TEGRA234_CLK_I2S6_SYNC_INPUT 67U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_ISP */
+#define TEGRA234_CLK_ISP 69U
+/** @brief Monitored branch of EQOS_RX clock */
+#define TEGRA234_CLK_EQOS_RX_M 70U
+/** @brief CLK_RST_CONTROLLER_MAUDCLK_OUT_SWITCH_DIVIDER switch divider output (maudclk) */
+#define TEGRA234_CLK_MAUD 71U
+/** @brief output of gate CLK_ENB_MIPI_CAL */
+#define TEGRA234_CLK_MIPI_CAL 72U
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_CORE_PLL_FIXED */
+#define TEGRA234_CLK_MPHY_CORE_PLL_FIXED 73U
+/** @brief output of gate CLK_ENB_MPHY_L0_RX_ANA */
+#define TEGRA234_CLK_MPHY_L0_RX_ANA 74U
+/** @brief output of gate CLK_ENB_MPHY_L0_RX_LS_BIT */
+#define TEGRA234_CLK_MPHY_L0_RX_LS_BIT 75U
+/** @brief output of gate CLK_ENB_MPHY_L0_RX_SYMB */
+#define TEGRA234_CLK_MPHY_L0_RX_SYMB 76U
+/** @brief output of gate CLK_ENB_MPHY_L0_TX_LS_3XBIT */
+#define TEGRA234_CLK_MPHY_L0_TX_LS_3XBIT 77U
+/** @brief output of gate CLK_ENB_MPHY_L0_TX_SYMB */
+#define TEGRA234_CLK_MPHY_L0_TX_SYMB 78U
+/** @brief output of gate CLK_ENB_MPHY_L1_RX_ANA */
+#define TEGRA234_CLK_MPHY_L1_RX_ANA 79U
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_TX_1MHZ_REF */
+#define TEGRA234_CLK_MPHY_TX_1MHZ_REF 80U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVCSI */
+#define TEGRA234_CLK_NVCSI 81U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVCSILP */
+#define TEGRA234_CLK_NVCSILP 82U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVDEC */
+#define TEGRA234_CLK_NVDEC 83U
+/** @brief CLK_RST_CONTROLLER_HUBCLK_OUT_SWITCH_DIVIDER switch divider output (hubclk) */
+#define TEGRA234_CLK_HUB 84U
+/** @brief CLK_RST_CONTROLLER_DISPCLK_SWITCH_DIVIDER switch divider output (dispclk) */
+#define TEGRA234_CLK_DISP 85U
+/** @brief RG_CLK_CTRL__0_DIV divider output (nvdisplay_p0_clk) */
+#define TEGRA234_CLK_NVDISPLAY_P0 86U
+/** @brief RG_CLK_CTRL__1_DIV divider output (nvdisplay_p1_clk) */
+#define TEGRA234_CLK_NVDISPLAY_P1 87U
+/** @brief DSC_CLK (DISPCLK ÷ 3) */
+#define TEGRA234_CLK_DSC 88U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVENC */
+#define TEGRA234_CLK_NVENC 89U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVJPG */
+#define TEGRA234_CLK_NVJPG 90U
+/** @brief input from Tegra's XTAL_IN */
+#define TEGRA234_CLK_OSC 91U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_AON_TOUCH switch divider output */
+#define TEGRA234_CLK_AON_TOUCH 92U
+/** PLL controlled by CLK_RST_CONTROLLER_PLLA_BASE for use by audio clocks */
+#define TEGRA234_CLK_PLLA 93U
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLAON_BASE for use by IP blocks in the AON domain */
+#define TEGRA234_CLK_PLLAON 94U
+/** Fixed 100MHz PLL for PCIe, SATA and superspeed USB */
+#define TEGRA234_CLK_PLLE 100U
+/** @brief PLLP vco output */
+#define TEGRA234_CLK_PLLP 101U
+/** @brief PLLP clk output */
+#define TEGRA234_CLK_PLLP_OUT0 102U
+/** Fixed frequency 960MHz PLL for USB and EAVB */
+#define TEGRA234_CLK_UTMIP_PLL 103U
+/** @brief output of the divider CLK_RST_CONTROLLER_PLLA_OUT */
+#define TEGRA234_CLK_PLLA_OUT0 104U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM1 */
+#define TEGRA234_CLK_PWM1 105U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM2 */
+#define TEGRA234_CLK_PWM2 106U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM3 */
+#define TEGRA234_CLK_PWM3 107U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM4 */
+#define TEGRA234_CLK_PWM4 108U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM5 */
+#define TEGRA234_CLK_PWM5 109U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM6 */
+#define TEGRA234_CLK_PWM6 110U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM7 */
+#define TEGRA234_CLK_PWM7 111U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM8 */
+#define TEGRA234_CLK_PWM8 112U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_RCE_CPU_NIC output */
+#define TEGRA234_CLK_RCE_CPU_NIC 113U
+/** @brief CLK_RST_CONTROLLER_RCE_NIC_RATE divider output */
+#define TEGRA234_CLK_RCE_NIC 114U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_AON_I2C_SLOW switch divider output */
+#define TEGRA234_CLK_AON_I2C_SLOW 117U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SCE_CPU_NIC */
+#define TEGRA234_CLK_SCE_CPU_NIC 118U
+/** @brief output of divider CLK_RST_CONTROLLER_SCE_NIC_RATE */
+#define TEGRA234_CLK_SCE_NIC 119U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC1 */
+#define TEGRA234_CLK_SDMMC1 120U
+/** @brief Logical clk for setting the UPHY PLL3 rate */
+#define TEGRA234_CLK_UPHY_PLL3 121U
/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC4 */
-#define TEGRA234_CLK_SDMMC4 123
+#define TEGRA234_CLK_SDMMC4 123U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_SE switch divider gated output */
+#define TEGRA234_CLK_SE 124U
+/** @brief VPLL select for sor0_ref clk driven by disp_2clk_sor0_head_sel signal */
+#define TEGRA234_CLK_SOR0_PLL_REF 125U
+/** @brief Output of mux controlled by disp_2clk_sor0_pll_ref_clk_safe signal (sor0_ref_clk) */
+#define TEGRA234_CLK_SOR0_REF 126U
+/** @brief VPLL select for sor1_ref clk driven by disp_2clk_sor0_head_sel signal */
+#define TEGRA234_CLK_SOR1_PLL_REF 127U
+/** @brief SOR_PLL_REF_CLK_CTRL__0_DIV divider output */
+#define TEGRA234_CLK_PRE_SOR0_REF 128U
+/** @brief Output of mux controlled by disp_2clk_sor1_pll_ref_clk_safe signal (sor1_ref_clk) */
+#define TEGRA234_CLK_SOR1_REF 129U
+/** @brief SOR_PLL_REF_CLK_CTRL__1_DIV divider output */
+#define TEGRA234_CLK_PRE_SOR1_REF 130U
+/** @brief output of gate CLK_ENB_SOR_SAFE */
+#define TEGRA234_CLK_SOR_SAFE 131U
+/** @brief SOR_CLK_CTRL__0_DIV divider output */
+#define TEGRA234_CLK_SOR0_DIV 132U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC5 */
+#define TEGRA234_CLK_DMIC5 134U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPI1 */
+#define TEGRA234_CLK_SPI1 135U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPI2 */
+#define TEGRA234_CLK_SPI2 136U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPI3 */
+#define TEGRA234_CLK_SPI3 137U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C_SLOW */
+#define TEGRA234_CLK_I2C_SLOW 138U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC1 */
+#define TEGRA234_CLK_SYNC_DMIC1 139U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC2 */
+#define TEGRA234_CLK_SYNC_DMIC2 140U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC3 */
+#define TEGRA234_CLK_SYNC_DMIC3 141U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC4 */
+#define TEGRA234_CLK_SYNC_DMIC4 142U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DSPK1 */
+#define TEGRA234_CLK_SYNC_DSPK1 143U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DSPK2 */
+#define TEGRA234_CLK_SYNC_DSPK2 144U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S1 */
+#define TEGRA234_CLK_SYNC_I2S1 145U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S2 */
+#define TEGRA234_CLK_SYNC_I2S2 146U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S3 */
+#define TEGRA234_CLK_SYNC_I2S3 147U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S4 */
+#define TEGRA234_CLK_SYNC_I2S4 148U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S5 */
+#define TEGRA234_CLK_SYNC_I2S5 149U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S6 */
+#define TEGRA234_CLK_SYNC_I2S6 150U
+/** @brief controls MPHY_FORCE_LS_MODE upon enable & disable */
+#define TEGRA234_CLK_MPHY_FORCE_LS_MODE 151U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_TACH0 */
+#define TEGRA234_CLK_TACH0 152U
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_TSEC */
+#define TEGRA234_CLK_TSEC 153U
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PKA */
+#define TEGRA234_CLK_TSEC_PKA 154U
/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTA */
-#define TEGRA234_CLK_UARTA 155
+#define TEGRA234_CLK_UARTA 155U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTB */
+#define TEGRA234_CLK_UARTB 156U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTC */
+#define TEGRA234_CLK_UARTC 157U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTD */
+#define TEGRA234_CLK_UARTD 158U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTE */
+#define TEGRA234_CLK_UARTE 159U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTF */
+#define TEGRA234_CLK_UARTF 160U
+/** @brief output of gate CLK_ENB_PEX1_CORE_6 */
+#define TEGRA234_CLK_PEX1_C6_CORE 161U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UART_FST_MIPI_CAL */
+#define TEGRA234_CLK_UART_FST_MIPI_CAL 162U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UFSDEV_REF */
+#define TEGRA234_CLK_UFSDEV_REF 163U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UFSHC_CG_SYS */
+#define TEGRA234_CLK_UFSHC 164U
+/** @brief output of gate CLK_ENB_USB2_TRK */
+#define TEGRA234_CLK_USB2_TRK 165U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_VI */
+#define TEGRA234_CLK_VI 166U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_VIC */
+#define TEGRA234_CLK_VIC 167U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_CSITE switch divider output */
+#define TEGRA234_CLK_CSITE 168U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_IST switch divider output */
+#define TEGRA234_CLK_IST 169U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_IST_JTAG_REG_CLK_SEL */
+#define TEGRA234_CLK_JTAG_INTFC_PRE_CG 170U
+/** @brief output of gate CLK_ENB_PEX2_CORE_7 */
+#define TEGRA234_CLK_PEX2_C7_CORE 171U
+/** @brief output of gate CLK_ENB_PEX2_CORE_8 */
+#define TEGRA234_CLK_PEX2_C8_CORE 172U
+/** @brief output of gate CLK_ENB_PEX2_CORE_9 */
+#define TEGRA234_CLK_PEX2_C9_CORE 173U
+/** @brief dla0_falcon_clk */
+#define TEGRA234_CLK_DLA0_FALCON 174U
+/** @brief dla0_core_clk */
+#define TEGRA234_CLK_DLA0_CORE 175U
+/** @brief dla1_falcon_clk */
+#define TEGRA234_CLK_DLA1_FALCON 176U
+/** @brief dla1_core_clk */
+#define TEGRA234_CLK_DLA1_CORE 177U
+/** @brief Output of mux controlled by disp_2clk_sor0_clk_safe signal (sor0_clk) */
+#define TEGRA234_CLK_SOR0 178U
+/** @brief Output of mux controlled by disp_2clk_sor1_clk_safe signal (sor1_clk) */
+#define TEGRA234_CLK_SOR1 179U
+/** @brief DP macro feedback clock (same as LINKA_SYM CLKOUT) */
+#define TEGRA234_CLK_SOR_PAD_INPUT 180U
+/** @brief Output of mux controlled by disp_2clk_h0_dsi_sel signal in sf0_clk path */
+#define TEGRA234_CLK_PRE_SF0 181U
+/** @brief Output of mux controlled by disp_2clk_sf0_clk_safe signal (sf0_clk) */
+#define TEGRA234_CLK_SF0 182U
+/** @brief Output of mux controlled by disp_2clk_sf1_clk_safe signal (sf1_clk) */
+#define TEGRA234_CLK_SF1 183U
+/** @brief CLKOUT_AB output from DSI BRICK A (dsi_clkout_ab) */
+#define TEGRA234_CLK_DSI_PAD_INPUT 184U
+/** @brief output of gate CLK_ENB_PEX2_CORE_10 */
+#define TEGRA234_CLK_PEX2_C10_CORE 187U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_UARTI switch divider output (uarti_r_clk) */
+#define TEGRA234_CLK_UARTI 188U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_UARTJ switch divider output (uartj_r_clk) */
+#define TEGRA234_CLK_UARTJ 189U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_UARTH switch divider output */
+#define TEGRA234_CLK_UARTH 190U
+/** @brief ungated version of fuse clk */
+#define TEGRA234_CLK_FUSE_SERIAL 191U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_QSPI0 switch divider output (qspi0_2x_pm_clk) */
+#define TEGRA234_CLK_QSPI0_2X_PM 192U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_QSPI1 switch divider output (qspi1_2x_pm_clk) */
+#define TEGRA234_CLK_QSPI1_2X_PM 193U
+/** @brief output of the divider QSPI_CLK_DIV2_SEL in CLK_RST_CONTROLLER_CLK_SOURCE_QSPI0 (qspi0_pm_clk) */
+#define TEGRA234_CLK_QSPI0_PM 194U
+/** @brief output of the divider QSPI_CLK_DIV2_SEL in CLK_RST_CONTROLLER_CLK_SOURCE_QSPI1 (qspi1_pm_clk) */
+#define TEGRA234_CLK_QSPI1_PM 195U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_VI_CONST switch divider output */
+#define TEGRA234_CLK_VI_CONST 196U
+/** @brief NAFLL clock source for BPMP */
+#define TEGRA234_CLK_NAFLL_BPMP 197U
+/** @brief NAFLL clock source for SCE */
+#define TEGRA234_CLK_NAFLL_SCE 198U
+/** @brief NAFLL clock source for NVDEC */
+#define TEGRA234_CLK_NAFLL_NVDEC 199U
+/** @brief NAFLL clock source for NVJPG */
+#define TEGRA234_CLK_NAFLL_NVJPG 200U
+/** @brief NAFLL clock source for TSEC */
+#define TEGRA234_CLK_NAFLL_TSEC 201U
+/** @brief NAFLL clock source for VI */
+#define TEGRA234_CLK_NAFLL_VI 203U
+/** @brief NAFLL clock source for SE */
+#define TEGRA234_CLK_NAFLL_SE 204U
+/** @brief NAFLL clock source for NVENC */
+#define TEGRA234_CLK_NAFLL_NVENC 205U
+/** @brief NAFLL clock source for ISP */
+#define TEGRA234_CLK_NAFLL_ISP 206U
+/** @brief NAFLL clock source for VIC */
+#define TEGRA234_CLK_NAFLL_VIC 207U
+/** @brief NAFLL clock source for AXICBB */
+#define TEGRA234_CLK_NAFLL_AXICBB 209U
+/** @brief NAFLL clock source for NVJPG1 */
+#define TEGRA234_CLK_NAFLL_NVJPG1 210U
+/** @brief NAFLL clock source for PVA core */
+#define TEGRA234_CLK_NAFLL_PVA0_CORE 211U
+/** @brief NAFLL clock source for PVA VPS */
+#define TEGRA234_CLK_NAFLL_PVA0_VPS 212U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_DBGAPB_0 switch divider output (dbgapb_clk) */
+#define TEGRA234_CLK_DBGAPB 213U
+/** @brief NAFLL clock source for RCE */
+#define TEGRA234_CLK_NAFLL_RCE 214U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_LA switch divider output (la_r_clk) */
+#define TEGRA234_CLK_LA 215U
+/** @brief output of the divider CLK_RST_CONTROLLER_PLLP_OUTD */
+#define TEGRA234_CLK_PLLP_OUT_JTAG 216U
+/** @brief AXI_CBB branch sharing gate control with SDMMC4 */
+#define TEGRA234_CLK_SDMMC4_AXICIF 217U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC_LEGACY_TM switch divider output */
+#define TEGRA234_CLK_SDMMC_LEGACY_TM 219U
+/** @brief output of gate CLK_ENB_PEX0_CORE_0 */
+#define TEGRA234_CLK_PEX0_C0_CORE 220U
+/** @brief output of gate CLK_ENB_PEX0_CORE_1 */
+#define TEGRA234_CLK_PEX0_C1_CORE 221U
+/** @brief output of gate CLK_ENB_PEX0_CORE_2 */
+#define TEGRA234_CLK_PEX0_C2_CORE 222U
+/** @brief output of gate CLK_ENB_PEX0_CORE_3 */
+#define TEGRA234_CLK_PEX0_C3_CORE 223U
+/** @brief output of gate CLK_ENB_PEX0_CORE_4 */
+#define TEGRA234_CLK_PEX0_C4_CORE 224U
+/** @brief output of gate CLK_ENB_PEX1_CORE_5 */
+#define TEGRA234_CLK_PEX1_C5_CORE 225U
+/** @brief Monitored branch of PEX0_C0_CORE clock */
+#define TEGRA234_CLK_PEX0_C0_CORE_M 229U
+/** @brief Monitored branch of PEX0_C1_CORE clock */
+#define TEGRA234_CLK_PEX0_C1_CORE_M 230U
+/** @brief Monitored branch of PEX0_C2_CORE clock */
+#define TEGRA234_CLK_PEX0_C2_CORE_M 231U
+/** @brief Monitored branch of PEX0_C3_CORE clock */
+#define TEGRA234_CLK_PEX0_C3_CORE_M 232U
+/** @brief Monitored branch of PEX0_C4_CORE clock */
+#define TEGRA234_CLK_PEX0_C4_CORE_M 233U
+/** @brief Monitored branch of PEX1_C5_CORE clock */
+#define TEGRA234_CLK_PEX1_C5_CORE_M 234U
+/** @brief Monitored branch of PEX1_C6_CORE clock */
+#define TEGRA234_CLK_PEX1_C6_CORE_M 235U
+/** @brief output of GPU GPC1 clkGen (in 1x mode same rate as GPC1 MUX2 out) */
+#define TEGRA234_CLK_GPC1CLK 236U
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC4_BASE */
+#define TEGRA234_CLK_PLLC4 237U
+/** @brief PLLC4 VCO followed by DIV3 path */
+#define TEGRA234_CLK_PLLC4_OUT1 239U
+/** @brief PLLC4 VCO followed by DIV5 path */
+#define TEGRA234_CLK_PLLC4_OUT2 240U
+/** @brief output of the mux controlled by PLLC4_CLK_SEL */
+#define TEGRA234_CLK_PLLC4_MUXED 241U
+/** @brief PLLC4 VCO followed by DIV2 path */
+#define TEGRA234_CLK_PLLC4_VCO_DIV2 242U
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLNVHS_BASE */
+#define TEGRA234_CLK_PLLNVHS 243U
+/** @brief Monitored branch of PEX2_C7_CORE clock */
+#define TEGRA234_CLK_PEX2_C7_CORE_M 244U
+/** @brief Monitored branch of PEX2_C8_CORE clock */
+#define TEGRA234_CLK_PEX2_C8_CORE_M 245U
+/** @brief Monitored branch of PEX2_C9_CORE clock */
+#define TEGRA234_CLK_PEX2_C9_CORE_M 246U
+/** @brief Monitored branch of PEX2_C10_CORE clock */
+#define TEGRA234_CLK_PEX2_C10_CORE_M 247U
+/** @brief RX clock recovered from MGBE0 lane input */
+#define TEGRA234_CLK_MGBE0_RX_INPUT 248U
+/** @brief RX clock recovered from MGBE1 lane input */
+#define TEGRA234_CLK_MGBE1_RX_INPUT 249U
+/** @brief RX clock recovered from MGBE2 lane input */
+#define TEGRA234_CLK_MGBE2_RX_INPUT 250U
+/** @brief RX clock recovered from MGBE3 lane input */
+#define TEGRA234_CLK_MGBE3_RX_INPUT 251U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_PEX_SATA_USB_RX_BYP switch divider output */
+#define TEGRA234_CLK_PEX_SATA_USB_RX_BYP 254U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_PEX_USB_PAD_PLL0_MGMT switch divider output */
+#define TEGRA234_CLK_PEX_USB_PAD_PLL0_MGMT 255U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_PEX_USB_PAD_PLL1_MGMT switch divider output */
+#define TEGRA234_CLK_PEX_USB_PAD_PLL1_MGMT 256U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_PEX_USB_PAD_PLL2_MGMT switch divider output */
+#define TEGRA234_CLK_PEX_USB_PAD_PLL2_MGMT 257U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_PEX_USB_PAD_PLL3_MGMT switch divider output */
+#define TEGRA234_CLK_PEX_USB_PAD_PLL3_MGMT 258U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_NVHS_RX_BYP switch divider output */
+#define TEGRA234_CLK_NVHS_RX_BYP_REF 263U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_NVHS_PLL0_MGMT switch divider output */
+#define TEGRA234_CLK_NVHS_PLL0_MGMT 264U
+/** @brief xusb_core_dev_clk */
+#define TEGRA234_CLK_XUSB_CORE_DEV 265U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_XUSB_CORE_HOST switch divider output */
+#define TEGRA234_CLK_XUSB_CORE_MUX 266U
+/** @brief xusb_core_host_clk */
+#define TEGRA234_CLK_XUSB_CORE_HOST 267U
+/** @brief xusb_core_superspeed_clk */
+#define TEGRA234_CLK_XUSB_CORE_SS 268U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_XUSB_FALCON switch divider output */
+#define TEGRA234_CLK_XUSB_FALCON 269U
+/** @brief xusb_falcon_host_clk */
+#define TEGRA234_CLK_XUSB_FALCON_HOST 270U
+/** @brief xusb_falcon_superspeed_clk */
+#define TEGRA234_CLK_XUSB_FALCON_SS 271U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_XUSB_FS switch divider output */
+#define TEGRA234_CLK_XUSB_FS 272U
+/** @brief xusb_fs_host_clk */
+#define TEGRA234_CLK_XUSB_FS_HOST 273U
+/** @brief xusb_fs_dev_clk */
+#define TEGRA234_CLK_XUSB_FS_DEV 274U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_XUSB_SS switch divider output */
+#define TEGRA234_CLK_XUSB_SS 275U
+/** @brief xusb_ss_dev_clk */
+#define TEGRA234_CLK_XUSB_SS_DEV 276U
+/** @brief xusb_ss_superspeed_clk */
+#define TEGRA234_CLK_XUSB_SS_SUPERSPEED 277U
+/** @brief NAFLL clock source for CPU cluster 0 */
+#define TEGRA234_CLK_NAFLL_CLUSTER0 280U /* TODO: remove */
+#define TEGRA234_CLK_NAFLL_CLUSTER0_CORE 280U
+/** @brief NAFLL clock source for CPU cluster 1 */
+#define TEGRA234_CLK_NAFLL_CLUSTER1 281U /* TODO: remove */
+#define TEGRA234_CLK_NAFLL_CLUSTER1_CORE 281U
+/** @brief NAFLL clock source for CPU cluster 2 */
+#define TEGRA234_CLK_NAFLL_CLUSTER2 282U /* TODO: remove */
+#define TEGRA234_CLK_NAFLL_CLUSTER2_CORE 282U
+/** @brief CLK_RST_CONTROLLER_CAN1_CORE_RATE divider output */
+#define TEGRA234_CLK_CAN1_CORE 284U
+/** @brief CLK_RST_CONTROLLER_CAN2_CORE_RATE divider outputt */
+#define TEGRA234_CLK_CAN2_CORE 285U
+/** @brief CLK_RST_CONTROLLER_PLLA1_OUT1 switch divider output */
+#define TEGRA234_CLK_PLLA1_OUT1 286U
+/** @brief NVHS PLL hardware power sequencer (overrides 'manual' programming of PLL) */
+#define TEGRA234_CLK_PLLNVHS_HPS 287U
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLREFE_BASE */
+#define TEGRA234_CLK_PLLREFE_VCOOUT 288U
+/** @brief 32K input clock provided by PMIC */
+#define TEGRA234_CLK_CLK_32K 289U
+/** @brief Fixed 48MHz clock divided down from utmipll */
+#define TEGRA234_CLK_UTMIPLL_CLKOUT48 291U
+/** @brief Fixed 480MHz clock divided down from utmipll */
+#define TEGRA234_CLK_UTMIPLL_CLKOUT480 292U
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLNVCSI_BASE */
+#define TEGRA234_CLK_PLLNVCSI 294U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_PVA0_CPU_AXI switch divider output */
+#define TEGRA234_CLK_PVA0_CPU_AXI 295U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_PVA0_VPS switch divider output */
+#define TEGRA234_CLK_PVA0_VPS 297U
+/** @brief DLA0_CORE_NAFLL */
+#define TEGRA234_CLK_NAFLL_DLA0_CORE 299U
+/** @brief DLA0_FALCON_NAFLL */
+#define TEGRA234_CLK_NAFLL_DLA0_FALCON 300U
+/** @brief DLA1_CORE_NAFLL */
+#define TEGRA234_CLK_NAFLL_DLA1_CORE 301U
+/** @brief DLA1_FALCON_NAFLL */
+#define TEGRA234_CLK_NAFLL_DLA1_FALCON 302U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AON_UART_FST_MIPI_CAL */
+#define TEGRA234_CLK_AON_UART_FST_MIPI_CAL 303U
+/** @brief GPU system clock */
+#define TEGRA234_CLK_GPUSYS 304U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C5 */
+#define TEGRA234_CLK_I2C5 305U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_SE switch divider free running clk */
+#define TEGRA234_CLK_FR_SE 306U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_BPMP_CPU_NIC switch divider output */
+#define TEGRA234_CLK_BPMP_CPU_NIC 307U
+/** @brief output of gate CLK_ENB_BPMP_CPU */
+#define TEGRA234_CLK_BPMP_CPU 308U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_TSC switch divider output */
+#define TEGRA234_CLK_TSC 309U
+/** @brief output of mem pll A sync mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EMC */
+#define TEGRA234_CLK_EMCSA_MPLL 310U
+/** @brief output of mem pll B sync mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EMCSB */
+#define TEGRA234_CLK_EMCSB_MPLL 311U
+/** @brief output of mem pll C sync mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EMCSC */
+#define TEGRA234_CLK_EMCSC_MPLL 312U
+/** @brief output of mem pll D sync mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EMCSD */
+#define TEGRA234_CLK_EMCSD_MPLL 313U
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC_BASE */
+#define TEGRA234_CLK_PLLC 314U
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC2_BASE */
+#define TEGRA234_CLK_PLLC2 315U
+/** @brief CLK_RST_CONTROLLER_TSC_HS_SUPER_CLK_DIVIDER skip divider output */
+#define TEGRA234_CLK_TSC_REF 317U
+/** @brief Dummy clock to ensure minimum SoC voltage for fuse burning */
+#define TEGRA234_CLK_FUSE_BURN 318U
+/** @brief GBE PLL */
+#define TEGRA234_CLK_PLLGBE 319U
+/** @brief GBE PLL hardware power sequencer */
+#define TEGRA234_CLK_PLLGBE_HPS 320U
+/** @brief output of EMC CDB side A fixed (DIV4) divider */
+#define TEGRA234_CLK_EMCSA_EMC 321U
+/** @brief output of EMC CDB side B fixed (DIV4) divider */
+#define TEGRA234_CLK_EMCSB_EMC 322U
+/** @brief output of EMC CDB side C fixed (DIV4) divider */
+#define TEGRA234_CLK_EMCSC_EMC 323U
+/** @brief output of EMC CDB side D fixed (DIV4) divider */
+#define TEGRA234_CLK_EMCSD_EMC 324U
+/** @brief PLLE hardware power sequencer (overrides 'manual' programming of PLL) */
+#define TEGRA234_CLK_PLLE_HPS 326U
+/** @brief CLK_ENB_PLLREFE_OUT gate output */
+#define TEGRA234_CLK_PLLREFE_VCOOUT_GATED 327U
+/** @brief TEGRA234_CLK_SOR_SAFE clk source (PLLP_OUT0 divided by 17) */
+#define TEGRA234_CLK_PLLP_DIV17 328U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_SOC_THERM switch divider output */
+#define TEGRA234_CLK_SOC_THERM 329U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_TSENSE switch divider output */
+#define TEGRA234_CLK_TSENSE 330U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_SEU1 switch divider free running clk */
+#define TEGRA234_CLK_FR_SEU1 331U
+/** @brief NAFLL clock source for OFA */
+#define TEGRA234_CLK_NAFLL_OFA 333U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_OFA switch divider output */
+#define TEGRA234_CLK_OFA 334U
+/** @brief NAFLL clock source for SEU1 */
+#define TEGRA234_CLK_NAFLL_SEU1 335U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_SEU1 switch divider gated output */
+#define TEGRA234_CLK_SEU1 336U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPI4 */
+#define TEGRA234_CLK_SPI4 337U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPI5 */
+#define TEGRA234_CLK_SPI5 338U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DCE_CPU_NIC */
+#define TEGRA234_CLK_DCE_CPU_NIC 339U
+/** @brief output of divider CLK_RST_CONTROLLER_DCE_NIC_RATE */
+#define TEGRA234_CLK_DCE_NIC 340U
+/** @brief NAFLL clock source for DCE */
+#define TEGRA234_CLK_NAFLL_DCE 341U
+/** @brief Monitored branch of MPHY_L0_RX_ANA clock */
+#define TEGRA234_CLK_MPHY_L0_RX_ANA_M 342U
+/** @brief Monitored branch of MPHY_L1_RX_ANA clock */
+#define TEGRA234_CLK_MPHY_L1_RX_ANA_M 343U
+/** @brief ungated version of TX symbol clock after fixed 1/2 divider */
+#define TEGRA234_CLK_MPHY_L0_TX_PRE_SYMB 344U
+/** @brief output of divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_L0_TX_LS_SYMB */
+#define TEGRA234_CLK_MPHY_L0_TX_LS_SYMB_DIV 345U
+/** @brief output of gate CLK_ENB_MPHY_L0_TX_2X_SYMB */
+#define TEGRA234_CLK_MPHY_L0_TX_2X_SYMB 346U
+/** @brief output of SW_MPHY_L0_TX_HS_SYMB divider in CLK_RST_CONTROLLER_MPHY_L0_TX_CLK_CTRL_0 */
+#define TEGRA234_CLK_MPHY_L0_TX_HS_SYMB_DIV 347U
+/** @brief output of SW_MPHY_L0_TX_LS_3XBIT divider in CLK_RST_CONTROLLER_MPHY_L0_TX_CLK_CTRL_0 */
+#define TEGRA234_CLK_MPHY_L0_TX_LS_3XBIT_DIV 348U
+/** @brief LS/HS divider mux SW_MPHY_L0_TX_LS_HS_SEL in CLK_RST_CONTROLLER_MPHY_L0_TX_CLK_CTRL_0 */
+#define TEGRA234_CLK_MPHY_L0_TX_MUX_SYMB_DIV 349U
+/** @brief Monitored branch of MPHY_L0_TX_SYMB clock */
+#define TEGRA234_CLK_MPHY_L0_TX_SYMB_M 350U
+/** @brief output of divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_L0_RX_LS_SYMB */
+#define TEGRA234_CLK_MPHY_L0_RX_LS_SYMB_DIV 351U
+/** @brief output of SW_MPHY_L0_RX_HS_SYMB divider in CLK_RST_CONTROLLER_MPHY_L0_RX_CLK_CTRL_0 */
+#define TEGRA234_CLK_MPHY_L0_RX_HS_SYMB_DIV 352U
+/** @brief output of SW_MPHY_L0_RX_LS_BIT divider in CLK_RST_CONTROLLER_MPHY_L0_RX_CLK_CTRL_0 */
+#define TEGRA234_CLK_MPHY_L0_RX_LS_BIT_DIV 353U
+/** @brief LS/HS divider mux SW_MPHY_L0_RX_LS_HS_SEL in CLK_RST_CONTROLLER_MPHY_L0_RX_CLK_CTRL_0 */
+#define TEGRA234_CLK_MPHY_L0_RX_MUX_SYMB_DIV 354U
+/** @brief Monitored branch of MPHY_L0_RX_SYMB clock */
+#define TEGRA234_CLK_MPHY_L0_RX_SYMB_M 355U
+/** @brief Monitored branch of MBGE0 RX input clock */
+#define TEGRA234_CLK_MGBE0_RX_INPUT_M 357U
+/** @brief Monitored branch of MBGE1 RX input clock */
+#define TEGRA234_CLK_MGBE1_RX_INPUT_M 358U
+/** @brief Monitored branch of MBGE2 RX input clock */
+#define TEGRA234_CLK_MGBE2_RX_INPUT_M 359U
+/** @brief Monitored branch of MBGE3 RX input clock */
+#define TEGRA234_CLK_MGBE3_RX_INPUT_M 360U
+/** @brief Monitored branch of MGBE0 RX PCS mux output */
+#define TEGRA234_CLK_MGBE0_RX_PCS_M 361U
+/** @brief Monitored branch of MGBE1 RX PCS mux output */
+#define TEGRA234_CLK_MGBE1_RX_PCS_M 362U
+/** @brief Monitored branch of MGBE2 RX PCS mux output */
+#define TEGRA234_CLK_MGBE2_RX_PCS_M 363U
+/** @brief Monitored branch of MGBE3 RX PCS mux output */
+#define TEGRA234_CLK_MGBE3_RX_PCS_M 364U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_TACH1 */
+#define TEGRA234_CLK_TACH1 365U
+/** @brief GBE_UPHY_MGBES_APP_CLK switch divider gated output */
+#define TEGRA234_CLK_MGBES_APP 366U
+/** @brief Logical clk for setting GBE UPHY PLL2 TX_REF rate */
+#define TEGRA234_CLK_UPHY_GBE_PLL2_TX_REF 367U
+/** @brief Logical clk for setting GBE UPHY PLL2 XDIG rate */
+#define TEGRA234_CLK_UPHY_GBE_PLL2_XDIG 368U
+/** @brief RX PCS clock recovered from MGBE0 lane input */
+#define TEGRA234_CLK_MGBE0_RX_PCS_INPUT 369U
+/** @brief RX PCS clock recovered from MGBE1 lane input */
+#define TEGRA234_CLK_MGBE1_RX_PCS_INPUT 370U
+/** @brief RX PCS clock recovered from MGBE2 lane input */
+#define TEGRA234_CLK_MGBE2_RX_PCS_INPUT 371U
+/** @brief RX PCS clock recovered from MGBE3 lane input */
+#define TEGRA234_CLK_MGBE3_RX_PCS_INPUT 372U
+/** @brief output of mux controlled by GBE_UPHY_MGBE0_RX_PCS_CLK_SRC_SEL */
+#define TEGRA234_CLK_MGBE0_RX_PCS 373U
+/** @brief GBE_UPHY_MGBE0_TX_CLK divider gated output */
+#define TEGRA234_CLK_MGBE0_TX 374U
+/** @brief GBE_UPHY_MGBE0_TX_PCS_CLK divider gated output */
+#define TEGRA234_CLK_MGBE0_TX_PCS 375U
+/** @brief GBE_UPHY_MGBE0_MAC_CLK divider output */
+#define TEGRA234_CLK_MGBE0_MAC_DIVIDER 376U
+/** @brief GBE_UPHY_MGBE0_MAC_CLK gate output */
+#define TEGRA234_CLK_MGBE0_MAC 377U
+/** @brief GBE_UPHY_MGBE0_MACSEC_CLK gate output */
+#define TEGRA234_CLK_MGBE0_MACSEC 378U
+/** @brief GBE_UPHY_MGBE0_EEE_PCS_CLK gate output */
+#define TEGRA234_CLK_MGBE0_EEE_PCS 379U
+/** @brief GBE_UPHY_MGBE0_APP_CLK gate output */
+#define TEGRA234_CLK_MGBE0_APP 380U
+/** @brief GBE_UPHY_MGBE0_PTP_REF_CLK divider gated output */
+#define TEGRA234_CLK_MGBE0_PTP_REF 381U
+/** @brief output of mux controlled by GBE_UPHY_MGBE1_RX_PCS_CLK_SRC_SEL */
+#define TEGRA234_CLK_MGBE1_RX_PCS 382U
+/** @brief GBE_UPHY_MGBE1_TX_CLK divider gated output */
+#define TEGRA234_CLK_MGBE1_TX 383U
+/** @brief GBE_UPHY_MGBE1_TX_PCS_CLK divider gated output */
+#define TEGRA234_CLK_MGBE1_TX_PCS 384U
+/** @brief GBE_UPHY_MGBE1_MAC_CLK divider output */
+#define TEGRA234_CLK_MGBE1_MAC_DIVIDER 385U
+/** @brief GBE_UPHY_MGBE1_MAC_CLK gate output */
+#define TEGRA234_CLK_MGBE1_MAC 386U
+/** @brief GBE_UPHY_MGBE1_MACSEC_CLK gate output */
+#define TEGRA234_CLK_MGBE1_MACSEC 387U
+/** @brief GBE_UPHY_MGBE1_EEE_PCS_CLK gate output */
+#define TEGRA234_CLK_MGBE1_EEE_PCS 388U
+/** @brief GBE_UPHY_MGBE1_APP_CLK gate output */
+#define TEGRA234_CLK_MGBE1_APP 389U
+/** @brief GBE_UPHY_MGBE1_PTP_REF_CLK divider gated output */
+#define TEGRA234_CLK_MGBE1_PTP_REF 390U
+/** @brief output of mux controlled by GBE_UPHY_MGBE2_RX_PCS_CLK_SRC_SEL */
+#define TEGRA234_CLK_MGBE2_RX_PCS 391U
+/** @brief GBE_UPHY_MGBE2_TX_CLK divider gated output */
+#define TEGRA234_CLK_MGBE2_TX 392U
+/** @brief GBE_UPHY_MGBE2_TX_PCS_CLK divider gated output */
+#define TEGRA234_CLK_MGBE2_TX_PCS 393U
+/** @brief GBE_UPHY_MGBE2_MAC_CLK divider output */
+#define TEGRA234_CLK_MGBE2_MAC_DIVIDER 394U
+/** @brief GBE_UPHY_MGBE2_MAC_CLK gate output */
+#define TEGRA234_CLK_MGBE2_MAC 395U
+/** @brief GBE_UPHY_MGBE2_MACSEC_CLK gate output */
+#define TEGRA234_CLK_MGBE2_MACSEC 396U
+/** @brief GBE_UPHY_MGBE2_EEE_PCS_CLK gate output */
+#define TEGRA234_CLK_MGBE2_EEE_PCS 397U
+/** @brief GBE_UPHY_MGBE2_APP_CLK gate output */
+#define TEGRA234_CLK_MGBE2_APP 398U
+/** @brief GBE_UPHY_MGBE2_PTP_REF_CLK divider gated output */
+#define TEGRA234_CLK_MGBE2_PTP_REF 399U
+/** @brief output of mux controlled by GBE_UPHY_MGBE3_RX_PCS_CLK_SRC_SEL */
+#define TEGRA234_CLK_MGBE3_RX_PCS 400U
+/** @brief GBE_UPHY_MGBE3_TX_CLK divider gated output */
+#define TEGRA234_CLK_MGBE3_TX 401U
+/** @brief GBE_UPHY_MGBE3_TX_PCS_CLK divider gated output */
+#define TEGRA234_CLK_MGBE3_TX_PCS 402U
+/** @brief GBE_UPHY_MGBE3_MAC_CLK divider output */
+#define TEGRA234_CLK_MGBE3_MAC_DIVIDER 403U
+/** @brief GBE_UPHY_MGBE3_MAC_CLK gate output */
+#define TEGRA234_CLK_MGBE3_MAC 404U
+/** @brief GBE_UPHY_MGBE3_MACSEC_CLK gate output */
+#define TEGRA234_CLK_MGBE3_MACSEC 405U
+/** @brief GBE_UPHY_MGBE3_EEE_PCS_CLK gate output */
+#define TEGRA234_CLK_MGBE3_EEE_PCS 406U
+/** @brief GBE_UPHY_MGBE3_APP_CLK gate output */
+#define TEGRA234_CLK_MGBE3_APP 407U
+/** @brief GBE_UPHY_MGBE3_PTP_REF_CLK divider gated output */
+#define TEGRA234_CLK_MGBE3_PTP_REF 408U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_GBE_RX_BYP switch divider output */
+#define TEGRA234_CLK_GBE_RX_BYP_REF 409U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_GBE_PLL0_MGMT switch divider output */
+#define TEGRA234_CLK_GBE_PLL0_MGMT 410U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_GBE_PLL1_MGMT switch divider output */
+#define TEGRA234_CLK_GBE_PLL1_MGMT 411U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_GBE_PLL2_MGMT switch divider output */
+#define TEGRA234_CLK_GBE_PLL2_MGMT 412U
+/** @brief output of gate CLK_ENB_EQOS_MACSEC_RX */
+#define TEGRA234_CLK_EQOS_MACSEC_RX 413U
+/** @brief output of gate CLK_ENB_EQOS_MACSEC_TX */
+#define TEGRA234_CLK_EQOS_MACSEC_TX 414U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_EQOS_TX_CLK divider ungated output */
+#define TEGRA234_CLK_EQOS_TX_DIVIDER 415U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_NVHS_PLL1_MGMT switch divider output */
+#define TEGRA234_CLK_NVHS_PLL1_MGMT 416U
+/** @brief CLK_RST_CONTROLLER_CLK_SOURCE_EMCHUB mux output */
+#define TEGRA234_CLK_EMCHUB 417U
+/** @brief clock recovered from I2S7 input */
+#define TEGRA234_CLK_I2S7_SYNC_INPUT 418U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S7 */
+#define TEGRA234_CLK_SYNC_I2S7 419U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S7 */
+#define TEGRA234_CLK_I2S7 420U
+/** @brief Monitored output of I2S7 pad macro mux */
+#define TEGRA234_CLK_I2S7_PAD_M 421U
+/** @brief clock recovered from I2S8 input */
+#define TEGRA234_CLK_I2S8_SYNC_INPUT 422U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S8 */
+#define TEGRA234_CLK_SYNC_I2S8 423U
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S8 */
+#define TEGRA234_CLK_I2S8 424U
+/** @brief Monitored output of I2S8 pad macro mux */
+#define TEGRA234_CLK_I2S8_PAD_M 425U
+/** @brief NAFLL clock source for GPU GPC0 */
+#define TEGRA234_CLK_NAFLL_GPC0 426U
+/** @brief NAFLL clock source for GPU GPC1 */
+#define TEGRA234_CLK_NAFLL_GPC1 427U
+/** @brief NAFLL clock source for GPU SYSCLK */
+#define TEGRA234_CLK_NAFLL_GPUSYS 428U
+/** @brief NAFLL clock source for CPU cluster 0 DSUCLK */
+#define TEGRA234_CLK_NAFLL_DSU0 429U /* TODO: remove */
+#define TEGRA234_CLK_NAFLL_CLUSTER0_DSU 429U
+/** @brief NAFLL clock source for CPU cluster 1 DSUCLK */
+#define TEGRA234_CLK_NAFLL_DSU1 430U /* TODO: remove */
+#define TEGRA234_CLK_NAFLL_CLUSTER1_DSU 430U
+/** @brief NAFLL clock source for CPU cluster 2 DSUCLK */
+#define TEGRA234_CLK_NAFLL_DSU2 431U /* TODO: remove */
+#define TEGRA234_CLK_NAFLL_CLUSTER2_DSU 431U
+/** @brief output of gate CLK_ENB_SCE_CPU */
+#define TEGRA234_CLK_SCE_CPU 432U
+/** @brief output of gate CLK_ENB_RCE_CPU */
+#define TEGRA234_CLK_RCE_CPU 433U
+/** @brief output of gate CLK_ENB_DCE_CPU */
+#define TEGRA234_CLK_DCE_CPU 434U
+/** @brief DSIPLL VCO output */
+#define TEGRA234_CLK_DSIPLL_VCO 435U
+/** @brief DSIPLL SYNC_CLKOUTP/N differential output */
+#define TEGRA234_CLK_DSIPLL_CLKOUTPN 436U
+/** @brief DSIPLL SYNC_CLKOUTA output */
+#define TEGRA234_CLK_DSIPLL_CLKOUTA 437U
+/** @brief SPPLL0 VCO output */
+#define TEGRA234_CLK_SPPLL0_VCO 438U
+/** @brief SPPLL0 SYNC_CLKOUTP/N differential output */
+#define TEGRA234_CLK_SPPLL0_CLKOUTPN 439U
+/** @brief SPPLL0 SYNC_CLKOUTA output */
+#define TEGRA234_CLK_SPPLL0_CLKOUTA 440U
+/** @brief SPPLL0 SYNC_CLKOUTB output */
+#define TEGRA234_CLK_SPPLL0_CLKOUTB 441U
+/** @brief SPPLL0 CLKOUT_DIVBY10 output */
+#define TEGRA234_CLK_SPPLL0_DIV10 442U
+/** @brief SPPLL0 CLKOUT_DIVBY25 output */
+#define TEGRA234_CLK_SPPLL0_DIV25 443U
+/** @brief SPPLL0 CLKOUT_DIVBY27P/N differential output */
+#define TEGRA234_CLK_SPPLL0_DIV27PN 444U
+/** @brief SPPLL1 VCO output */
+#define TEGRA234_CLK_SPPLL1_VCO 445U
+/** @brief SPPLL1 SYNC_CLKOUTP/N differential output */
+#define TEGRA234_CLK_SPPLL1_CLKOUTPN 446U
+/** @brief SPPLL1 CLKOUT_DIVBY27P/N differential output */
+#define TEGRA234_CLK_SPPLL1_DIV27PN 447U
+/** @brief VPLL0 reference clock */
+#define TEGRA234_CLK_VPLL0_REF 448U
+/** @brief VPLL0 */
+#define TEGRA234_CLK_VPLL0 449U
+/** @brief VPLL1 */
+#define TEGRA234_CLK_VPLL1 450U
+/** @brief NVDISPLAY_P0_CLK reference select */
+#define TEGRA234_CLK_NVDISPLAY_P0_REF 451U
+/** @brief RG0_PCLK */
+#define TEGRA234_CLK_RG0 452U
+/** @brief RG1_PCLK */
+#define TEGRA234_CLK_RG1 453U
+/** @brief DISPPLL output */
+#define TEGRA234_CLK_DISPPLL 454U
+/** @brief DISPHUBPLL output */
+#define TEGRA234_CLK_DISPHUBPLL 455U
+/** @brief CLK_RST_CONTROLLER_DSI_LP_SWITCH_DIVIDER switch divider output (dsi_lp_clk) */
+#define TEGRA234_CLK_DSI_LP 456U
+/** @brief CLK_RST_CONTROLLER_AZA2XBITCLK_OUT_SWITCH_DIVIDER switch divider output (aza_2xbitclk) */
+#define TEGRA234_CLK_AZA_2XBIT 457U
+/** @brief aza_2xbitclk / 2 (aza_bitclk) */
+#define TEGRA234_CLK_AZA_BIT 458U
+/** @brief SWITCH_DSI_CORE_PIXEL_MISC_DSI_CORE_CLK_SRC switch output (dsi_core_clk) */
+#define TEGRA234_CLK_DSI_CORE 459U
+/** @brief Output of mux controlled by pkt_wr_fifo_signal from dsi (dsi_pixel_clk) */
+#define TEGRA234_CLK_DSI_PIXEL 460U
+/** @brief Output of mux controlled by disp_2clk_sor0_dp_sel (pre_sor0_clk) */
+#define TEGRA234_CLK_PRE_SOR0 461U
+/** @brief Output of mux controlled by disp_2clk_sor1_dp_sel (pre_sor1_clk) */
+#define TEGRA234_CLK_PRE_SOR1 462U
+/** @brief CLK_RST_CONTROLLER_LINK_REFCLK_CFG__0 output */
+#define TEGRA234_CLK_DP_LINK_REF 463U
+/** @brief Link clock input from DP macro brick PLL */
+#define TEGRA234_CLK_SOR_LINKA_INPUT 464U
+/** @brief SOR AFIFO clock outut */
+#define TEGRA234_CLK_SOR_LINKA_AFIFO 465U
+/** @brief Monitored branch of linka_afifo_clk */
+#define TEGRA234_CLK_SOR_LINKA_AFIFO_M 466U
+/** @brief Monitored branch of rg0_pclk */
+#define TEGRA234_CLK_RG0_M 467U
+/** @brief Monitored branch of rg1_pclk */
+#define TEGRA234_CLK_RG1_M 468U
+/** @brief Monitored branch of sor0_clk */
+#define TEGRA234_CLK_SOR0_M 469U
+/** @brief Monitored branch of sor1_clk */
+#define TEGRA234_CLK_SOR1_M 470U
+/** @brief EMC PLLHUB output */
+#define TEGRA234_CLK_PLLHUB 471U
+/** @brief output of fixed (DIV2) MC HUB divider */
+#define TEGRA234_CLK_MCHUB 472U
+/** @brief output of divider controlled by EMC side A MC_EMC_SAFE_SAME_FREQ */
+#define TEGRA234_CLK_EMCSA_MC 473U
+/** @brief output of divider controlled by EMC side B MC_EMC_SAFE_SAME_FREQ */
+#define TEGRA234_CLK_EMCSB_MC 474U
+/** @brief output of divider controlled by EMC side C MC_EMC_SAFE_SAME_FREQ */
+#define TEGRA234_CLK_EMCSC_MC 475U
+/** @brief output of divider controlled by EMC side D MC_EMC_SAFE_SAME_FREQ */
+#define TEGRA234_CLK_EMCSD_MC 476U
+
+/** @} */
#endif
diff --git a/include/dt-bindings/clock/tegra30-car.h b/include/dt-bindings/clock/tegra30-car.h
index f193663e6f28..763b81f80908 100644
--- a/include/dt-bindings/clock/tegra30-car.h
+++ b/include/dt-bindings/clock/tegra30-car.h
@@ -271,6 +271,7 @@
#define TEGRA30_CLK_AUDIO3_MUX 306
#define TEGRA30_CLK_AUDIO4_MUX 307
#define TEGRA30_CLK_SPDIF_MUX 308
-#define TEGRA30_CLK_CLK_MAX 309
+#define TEGRA30_CLK_CSIA_PAD 309
+#define TEGRA30_CLK_CSIB_PAD 310
#endif /* _DT_BINDINGS_CLOCK_TEGRA30_CAR_H */
diff --git a/include/dt-bindings/clock/thead,th1520-clk-ap.h b/include/dt-bindings/clock/thead,th1520-clk-ap.h
new file mode 100644
index 000000000000..09a9aa7b3ab1
--- /dev/null
+++ b/include/dt-bindings/clock/thead,th1520-clk-ap.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2023 Vivo Communication Technology Co. Ltd.
+ * Authors: Yangtao Li <frank.li@vivo.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_TH1520_H_
+#define _DT_BINDINGS_CLK_TH1520_H_
+
+#define CLK_CPU_PLL0 0
+#define CLK_CPU_PLL1 1
+#define CLK_GMAC_PLL 2
+#define CLK_VIDEO_PLL 3
+#define CLK_DPU0_PLL 4
+#define CLK_DPU1_PLL 5
+#define CLK_TEE_PLL 6
+#define CLK_C910_I0 7
+#define CLK_C910 8
+#define CLK_BROM 9
+#define CLK_BMU 10
+#define CLK_AHB2_CPUSYS_HCLK 11
+#define CLK_APB3_CPUSYS_PCLK 12
+#define CLK_AXI4_CPUSYS2_ACLK 13
+#define CLK_AON2CPU_A2X 14
+#define CLK_X2X_CPUSYS 15
+#define CLK_AXI_ACLK 16
+#define CLK_CPU2AON_X2H 17
+#define CLK_PERI_AHB_HCLK 18
+#define CLK_CPU2PERI_X2H 19
+#define CLK_PERI_APB_PCLK 20
+#define CLK_PERI2APB_PCLK 21
+#define CLK_PERISYS_APB1_HCLK 22
+#define CLK_PERISYS_APB2_HCLK 23
+#define CLK_PERISYS_APB3_HCLK 24
+#define CLK_PERISYS_APB4_HCLK 25
+#define CLK_OSC12M 26
+#define CLK_OUT1 27
+#define CLK_OUT2 28
+#define CLK_OUT3 29
+#define CLK_OUT4 30
+#define CLK_APB_PCLK 31
+#define CLK_NPU 32
+#define CLK_NPU_AXI 33
+#define CLK_VI 34
+#define CLK_VI_AHB 35
+#define CLK_VO_AXI 36
+#define CLK_VP_APB 37
+#define CLK_VP_AXI 38
+#define CLK_CPU2VP 39
+#define CLK_VENC 40
+#define CLK_DPU0 41
+#define CLK_DPU1 42
+#define CLK_EMMC_SDIO 43
+#define CLK_GMAC1 44
+#define CLK_PADCTRL1 45
+#define CLK_DSMART 46
+#define CLK_PADCTRL0 47
+#define CLK_GMAC_AXI 48
+#define CLK_GPIO3 49
+#define CLK_GMAC0 50
+#define CLK_PWM 51
+#define CLK_QSPI0 52
+#define CLK_QSPI1 53
+#define CLK_SPI 54
+#define CLK_UART0_PCLK 55
+#define CLK_UART1_PCLK 56
+#define CLK_UART2_PCLK 57
+#define CLK_UART3_PCLK 58
+#define CLK_UART4_PCLK 59
+#define CLK_UART5_PCLK 60
+#define CLK_GPIO0 61
+#define CLK_GPIO1 62
+#define CLK_GPIO2 63
+#define CLK_I2C0 64
+#define CLK_I2C1 65
+#define CLK_I2C2 66
+#define CLK_I2C3 67
+#define CLK_I2C4 68
+#define CLK_I2C5 69
+#define CLK_SPINLOCK 70
+#define CLK_DMA 71
+#define CLK_MBOX0 72
+#define CLK_MBOX1 73
+#define CLK_MBOX2 74
+#define CLK_MBOX3 75
+#define CLK_WDT0 76
+#define CLK_WDT1 77
+#define CLK_TIMER0 78
+#define CLK_TIMER1 79
+#define CLK_SRAM0 80
+#define CLK_SRAM1 81
+#define CLK_SRAM2 82
+#define CLK_SRAM3 83
+#define CLK_PLL_GMAC_100M 84
+#define CLK_UART_SCLK 85
+
+/* VO clocks */
+#define CLK_AXI4_VO_ACLK 0
+#define CLK_GPU_MEM 1
+#define CLK_GPU_CORE 2
+#define CLK_GPU_CFG_ACLK 3
+#define CLK_DPU_PIXELCLK0 4
+#define CLK_DPU_PIXELCLK1 5
+#define CLK_DPU_HCLK 6
+#define CLK_DPU_ACLK 7
+#define CLK_DPU_CCLK 8
+#define CLK_HDMI_SFR 9
+#define CLK_HDMI_PCLK 10
+#define CLK_HDMI_CEC 11
+#define CLK_MIPI_DSI0_PCLK 12
+#define CLK_MIPI_DSI1_PCLK 13
+#define CLK_MIPI_DSI0_CFG 14
+#define CLK_MIPI_DSI1_CFG 15
+#define CLK_MIPI_DSI0_REFCLK 16
+#define CLK_MIPI_DSI1_REFCLK 17
+#define CLK_HDMI_I2S 18
+#define CLK_X2H_DPU1_ACLK 19
+#define CLK_X2H_DPU_ACLK 20
+#define CLK_AXI4_VO_PCLK 21
+#define CLK_IOPMP_VOSYS_DPU_PCLK 22
+#define CLK_IOPMP_VOSYS_DPU1_PCLK 23
+#define CLK_IOPMP_VOSYS_GPU_PCLK 24
+#define CLK_IOPMP_DPU1_ACLK 25
+#define CLK_IOPMP_DPU_ACLK 26
+#define CLK_IOPMP_GPU_ACLK 27
+#define CLK_MIPIDSI0_PIXCLK 28
+#define CLK_MIPIDSI1_PIXCLK 29
+#define CLK_HDMI_PIXCLK 30
+
+#endif
diff --git a/include/dt-bindings/clock/ti-dra7-atl.h b/include/dt-bindings/clock/ti-dra7-atl.h
index 42dd4164f6f4..b0e71e3cce95 100644
--- a/include/dt-bindings/clock/ti-dra7-atl.h
+++ b/include/dt-bindings/clock/ti-dra7-atl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* This header provides constants for DRA7 ATL (Audio Tracking Logic)
*
@@ -6,15 +7,6 @@
* Copyright (C) 2013 Texas Instruments, Inc.
*
* Peter Ujfalusi <peter.ujfalusi@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _DT_BINDINGS_CLK_DRA7_ATL_H
diff --git a/include/dt-bindings/clock/toshiba,tmpv770x.h b/include/dt-bindings/clock/toshiba,tmpv770x.h
new file mode 100644
index 000000000000..a36c89266686
--- /dev/null
+++ b/include/dt-bindings/clock/toshiba,tmpv770x.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_CLOCK_TOSHIBA_TMPV770X_H_
+#define _DT_BINDINGS_CLOCK_TOSHIBA_TMPV770X_H_
+
+/* PLL */
+#define TMPV770X_PLL_PIPLL0 0
+#define TMPV770X_PLL_PIPLL1 1
+#define TMPV770X_PLL_PIDNNPLL 2
+#define TMPV770X_PLL_PIETHERPLL 3
+#define TMPV770X_PLL_PIDDRCPLL 4
+#define TMPV770X_PLL_PIVOIFPLL 5
+#define TMPV770X_PLL_PIIMGERPLL 6
+
+/* Clocks */
+#define TMPV770X_CLK_PIPLL1_DIV1 0
+#define TMPV770X_CLK_PIPLL1_DIV2 1
+#define TMPV770X_CLK_PIPLL1_DIV4 2
+#define TMPV770X_CLK_PIDNNPLL_DIV1 3
+#define TMPV770X_CLK_DDRC_PHY_PLL0 4
+#define TMPV770X_CLK_DDRC_PHY_PLL1 5
+#define TMPV770X_CLK_D_PHYPLL 6
+#define TMPV770X_CLK_PHY_PCIEPLL 7
+#define TMPV770X_CLK_CA53CL0 8
+#define TMPV770X_CLK_CA53CL1 9
+#define TMPV770X_CLK_PISDMAC 10
+#define TMPV770X_CLK_PIPDMAC0 11
+#define TMPV770X_CLK_PIPDMAC1 12
+#define TMPV770X_CLK_PIWRAM 13
+#define TMPV770X_CLK_DDRC0 14
+#define TMPV770X_CLK_DDRC0_SCLK 15
+#define TMPV770X_CLK_DDRC0_NCLK 16
+#define TMPV770X_CLK_DDRC0_MCLK 17
+#define TMPV770X_CLK_DDRC0_APBCLK 18
+#define TMPV770X_CLK_DDRC1 19
+#define TMPV770X_CLK_DDRC1_SCLK 20
+#define TMPV770X_CLK_DDRC1_NCLK 21
+#define TMPV770X_CLK_DDRC1_MCLK 22
+#define TMPV770X_CLK_DDRC1_APBCLK 23
+#define TMPV770X_CLK_HOX 24
+#define TMPV770X_CLK_PCIE_MSTR 25
+#define TMPV770X_CLK_PCIE_AUX 26
+#define TMPV770X_CLK_PIINTC 27
+#define TMPV770X_CLK_PIETHER_BUS 28
+#define TMPV770X_CLK_PISPI0 29
+#define TMPV770X_CLK_PISPI1 30
+#define TMPV770X_CLK_PISPI2 31
+#define TMPV770X_CLK_PISPI3 32
+#define TMPV770X_CLK_PISPI4 33
+#define TMPV770X_CLK_PISPI5 34
+#define TMPV770X_CLK_PISPI6 35
+#define TMPV770X_CLK_PIUART0 36
+#define TMPV770X_CLK_PIUART1 37
+#define TMPV770X_CLK_PIUART2 38
+#define TMPV770X_CLK_PIUART3 39
+#define TMPV770X_CLK_PII2C0 40
+#define TMPV770X_CLK_PII2C1 41
+#define TMPV770X_CLK_PII2C2 42
+#define TMPV770X_CLK_PII2C3 43
+#define TMPV770X_CLK_PII2C4 44
+#define TMPV770X_CLK_PII2C5 45
+#define TMPV770X_CLK_PII2C6 46
+#define TMPV770X_CLK_PII2C7 47
+#define TMPV770X_CLK_PII2C8 48
+#define TMPV770X_CLK_PIGPIO 49
+#define TMPV770X_CLK_PIPGM 50
+#define TMPV770X_CLK_PIPCMIF 51
+#define TMPV770X_CLK_PIPCMIF_AUDIO_O 52
+#define TMPV770X_CLK_PIPCMIF_AUDIO_I 53
+#define TMPV770X_CLK_PICMPT0 54
+#define TMPV770X_CLK_PICMPT1 55
+#define TMPV770X_CLK_PITSC 56
+#define TMPV770X_CLK_PIUWDT 57
+#define TMPV770X_CLK_PISWDT 58
+#define TMPV770X_CLK_WDTCLK 59
+#define TMPV770X_CLK_PISUBUS_150M 60
+#define TMPV770X_CLK_PISUBUS_300M 61
+#define TMPV770X_CLK_PIPMU 62
+#define TMPV770X_CLK_PIGPMU 63
+#define TMPV770X_CLK_PITMU 64
+#define TMPV770X_CLK_WRCK 65
+#define TMPV770X_CLK_PIEMM 66
+#define TMPV770X_CLK_PIMISC 67
+#define TMPV770X_CLK_PIGCOMM 68
+#define TMPV770X_CLK_PIDCOMM 69
+#define TMPV770X_CLK_PICKMON 70
+#define TMPV770X_CLK_PIMBUS 71
+#define TMPV770X_CLK_SBUSCLK 72
+#define TMPV770X_CLK_DDR0_APBCLKCLK 73
+#define TMPV770X_CLK_DDR1_APBCLKCLK 74
+#define TMPV770X_CLK_DSP0_PBCLK 75
+#define TMPV770X_CLK_DSP1_PBCLK 76
+#define TMPV770X_CLK_DSP2_PBCLK 77
+#define TMPV770X_CLK_DSP3_PBCLK 78
+#define TMPV770X_CLK_DSVIIF0_APBCLK 79
+#define TMPV770X_CLK_VIIF0_APBCLK 80
+#define TMPV770X_CLK_VIIF0_CFGCLK 81
+#define TMPV770X_CLK_VIIF1_APBCLK 82
+#define TMPV770X_CLK_VIIF1_CFGCLK 83
+#define TMPV770X_CLK_VIIF2_APBCLK 84
+#define TMPV770X_CLK_VIIF2_CFGCLK 85
+#define TMPV770X_CLK_VIIF3_APBCLK 86
+#define TMPV770X_CLK_VIIF3_CFGCLK 87
+#define TMPV770X_CLK_VIIF4_APBCLK 88
+#define TMPV770X_CLK_VIIF4_CFGCLK 89
+#define TMPV770X_CLK_VIIF5_APBCLK 90
+#define TMPV770X_CLK_VIIF5_CFGCLK 91
+#define TMPV770X_CLK_VOIF_SBUSCLK 92
+#define TMPV770X_CLK_VOIF_PROCCLK 93
+#define TMPV770X_CLK_VOIF_DPHYCFGCLK 94
+#define TMPV770X_CLK_DNN0 95
+#define TMPV770X_CLK_STMAT 96
+#define TMPV770X_CLK_HWA0 97
+#define TMPV770X_CLK_AFFINE0 98
+#define TMPV770X_CLK_HAMAT 99
+#define TMPV770X_CLK_SMLDB 100
+#define TMPV770X_CLK_HWA0_ASYNC 101
+#define TMPV770X_CLK_HWA2 102
+#define TMPV770X_CLK_FLMAT 103
+#define TMPV770X_CLK_PYRAMID 104
+#define TMPV770X_CLK_HWA2_ASYNC 105
+#define TMPV770X_CLK_DSP0 106
+#define TMPV770X_CLK_VIIFBS0 107
+#define TMPV770X_CLK_VIIFBS0_L2ISP 108
+#define TMPV770X_CLK_VIIFBS0_L1ISP 109
+#define TMPV770X_CLK_VIIFBS0_PROC 110
+#define TMPV770X_CLK_VIIFBS1 111
+#define TMPV770X_CLK_VIIFBS2 112
+#define TMPV770X_CLK_VIIFOP_MBUS 113
+#define TMPV770X_CLK_VIIFOP0_PROC 114
+#define TMPV770X_CLK_PIETHER_2P5M 115
+#define TMPV770X_CLK_PIETHER_25M 116
+#define TMPV770X_CLK_PIETHER_50M 117
+#define TMPV770X_CLK_PIETHER_125M 118
+#define TMPV770X_CLK_VOIF0_DPHYCFG 119
+#define TMPV770X_CLK_VOIF0_PROC 120
+#define TMPV770X_CLK_VOIF0_SBUS 121
+#define TMPV770X_CLK_VOIF0_DSIREF 122
+#define TMPV770X_CLK_VOIF0_PIXEL 123
+#define TMPV770X_CLK_PIREFCLK 124
+#define TMPV770X_CLK_SBUS 125
+#define TMPV770X_CLK_BUSLCK 126
+#define TMPV770X_CLK_VIIFBS1_L2ISP 127
+#define TMPV770X_CLK_VIIFBS1_L1ISP 128
+#define TMPV770X_CLK_VIIFBS1_PROC 129
+
+/* Reset */
+#define TMPV770X_RESET_PIETHER_2P5M 0
+#define TMPV770X_RESET_PIETHER_25M 1
+#define TMPV770X_RESET_PIETHER_50M 2
+#define TMPV770X_RESET_PIETHER_125M 3
+#define TMPV770X_RESET_HOX 4
+#define TMPV770X_RESET_PCIE_MSTR 5
+#define TMPV770X_RESET_PCIE_AUX 6
+#define TMPV770X_RESET_PIINTC 7
+#define TMPV770X_RESET_PIETHER_BUS 8
+#define TMPV770X_RESET_PISPI0 9
+#define TMPV770X_RESET_PISPI1 10
+#define TMPV770X_RESET_PISPI2 11
+#define TMPV770X_RESET_PISPI3 12
+#define TMPV770X_RESET_PISPI4 13
+#define TMPV770X_RESET_PISPI5 14
+#define TMPV770X_RESET_PISPI6 15
+#define TMPV770X_RESET_PIUART0 16
+#define TMPV770X_RESET_PIUART1 17
+#define TMPV770X_RESET_PIUART2 18
+#define TMPV770X_RESET_PIUART3 19
+#define TMPV770X_RESET_PII2C0 20
+#define TMPV770X_RESET_PII2C1 21
+#define TMPV770X_RESET_PII2C2 22
+#define TMPV770X_RESET_PII2C3 23
+#define TMPV770X_RESET_PII2C4 24
+#define TMPV770X_RESET_PII2C5 25
+#define TMPV770X_RESET_PII2C6 26
+#define TMPV770X_RESET_PII2C7 27
+#define TMPV770X_RESET_PII2C8 28
+#define TMPV770X_RESET_PIPCMIF 29
+#define TMPV770X_RESET_PICKMON 30
+#define TMPV770X_RESET_SBUSCLK 31
+#define TMPV770X_RESET_VIIFBS0 32
+#define TMPV770X_RESET_VIIFBS0_APB 33
+#define TMPV770X_RESET_VIIFBS0_L2ISP 34
+#define TMPV770X_RESET_VIIFBS0_L1ISP 35
+#define TMPV770X_RESET_VIIFBS1 36
+#define TMPV770X_RESET_VIIFBS1_APB 37
+#define TMPV770X_RESET_VIIFBS1_L2ISP 38
+#define TMPV770X_RESET_VIIFBS1_L1ISP 39
+
+#endif /*_DT_BINDINGS_CLOCK_TOSHIBA_TMPV770X_H_ */
diff --git a/include/dt-bindings/clk/versaclock.h b/include/dt-bindings/clock/versaclock.h
index c6a6a0946564..c6a6a0946564 100644
--- a/include/dt-bindings/clk/versaclock.h
+++ b/include/dt-bindings/clock/versaclock.h
diff --git a/include/dt-bindings/clock/xlnx-zynqmp-clk.h b/include/dt-bindings/clock/xlnx-zynqmp-clk.h
index cdc4c0b9a374..f0f7ddd3dcbd 100644
--- a/include/dt-bindings/clock/xlnx-zynqmp-clk.h
+++ b/include/dt-bindings/clock/xlnx-zynqmp-clk.h
@@ -9,6 +9,13 @@
#ifndef _DT_BINDINGS_CLK_ZYNQMP_H
#define _DT_BINDINGS_CLK_ZYNQMP_H
+/*
+ * These bindings are deprecated, because they do not match the actual
+ * concept of bindings but rather contain pure firmware values.
+ * Instead include the header in the DTS source directory.
+ */
+#warning "These bindings are deprecated. Instead use the header in the DTS source directory."
+
#define IOPLL 0
#define RPLL 1
#define APLL 2
diff --git a/include/dt-bindings/clock/zx296718-clock.h b/include/dt-bindings/clock/zx296718-clock.h
deleted file mode 100644
index bf2ff6d2ee23..000000000000
--- a/include/dt-bindings/clock/zx296718-clock.h
+++ /dev/null
@@ -1,164 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2015 - 2016 ZTE Corporation.
- */
-#ifndef __DT_BINDINGS_CLOCK_ZX296718_H
-#define __DT_BINDINGS_CLOCK_ZX296718_H
-
-/* PLL */
-#define ZX296718_PLL_CPU 1
-#define ZX296718_PLL_MAC 2
-#define ZX296718_PLL_MM0 3
-#define ZX296718_PLL_MM1 4
-#define ZX296718_PLL_VGA 5
-#define ZX296718_PLL_DDR 6
-#define ZX296718_PLL_AUDIO 7
-#define ZX296718_PLL_HSIC 8
-#define CPU_DBG_GATE 9
-#define A72_GATE 10
-#define CPU_PERI_GATE 11
-#define A53_GATE 12
-#define DDR1_GATE 13
-#define DDR0_GATE 14
-#define SD1_WCLK 15
-#define SD1_AHB 16
-#define SD0_WCLK 17
-#define SD0_AHB 18
-#define EMMC_WCLK 19
-#define EMMC_NAND_AXI 20
-#define NAND_WCLK 21
-#define EMMC_NAND_AHB 22
-#define LSP1_148M5 23
-#define LSP1_99M 24
-#define LSP1_24M 25
-#define LSP0_74M25 26
-#define LSP0_32K 27
-#define LSP0_148M5 28
-#define LSP0_99M 29
-#define LSP0_24M 30
-#define DEMUX_AXI 31
-#define DEMUX_APB 32
-#define DEMUX_148M5 33
-#define DEMUX_108M 34
-#define AUDIO_APB 35
-#define AUDIO_99M 36
-#define AUDIO_24M 37
-#define AUDIO_16M384 38
-#define AUDIO_32K 39
-#define WDT_WCLK 40
-#define TIMER_WCLK 41
-#define VDE_ACLK 42
-#define VCE_ACLK 43
-#define HDE_ACLK 44
-#define GPU_ACLK 45
-#define SAPPU_ACLK 46
-#define SAPPU_WCLK 47
-#define VOU_ACLK 48
-#define VOU_MAIN_WCLK 49
-#define VOU_AUX_WCLK 50
-#define VOU_PPU_WCLK 51
-#define MIPI_CFG_CLK 52
-#define VGA_I2C_WCLK 53
-#define MIPI_REF_CLK 54
-#define HDMI_OSC_CEC 55
-#define HDMI_OSC_CLK 56
-#define HDMI_XCLK 57
-#define VIU_M0_ACLK 58
-#define VIU_M1_ACLK 59
-#define VIU_WCLK 60
-#define VIU_JPEG_WCLK 61
-#define VIU_CFG_CLK 62
-#define TS_SYS_WCLK 63
-#define TS_SYS_108M 64
-#define USB20_HCLK 65
-#define USB20_PHY_CLK 66
-#define USB21_HCLK 67
-#define USB21_PHY_CLK 68
-#define GMAC_RMIICLK 69
-#define GMAC_PCLK 70
-#define GMAC_ACLK 71
-#define GMAC_RFCLK 72
-#define TEMPSENSOR_GATE 73
-
-#define TOP_NR_CLKS 74
-
-
-#define LSP0_TIMER3_PCLK 1
-#define LSP0_TIMER3_WCLK 2
-#define LSP0_TIMER4_PCLK 3
-#define LSP0_TIMER4_WCLK 4
-#define LSP0_TIMER5_PCLK 5
-#define LSP0_TIMER5_WCLK 6
-#define LSP0_UART3_PCLK 7
-#define LSP0_UART3_WCLK 8
-#define LSP0_UART1_PCLK 9
-#define LSP0_UART1_WCLK 10
-#define LSP0_UART2_PCLK 11
-#define LSP0_UART2_WCLK 12
-#define LSP0_SPIFC0_PCLK 13
-#define LSP0_SPIFC0_WCLK 14
-#define LSP0_I2C4_PCLK 15
-#define LSP0_I2C4_WCLK 16
-#define LSP0_I2C5_PCLK 17
-#define LSP0_I2C5_WCLK 18
-#define LSP0_SSP0_PCLK 19
-#define LSP0_SSP0_WCLK 20
-#define LSP0_SSP1_PCLK 21
-#define LSP0_SSP1_WCLK 22
-#define LSP0_USIM_PCLK 23
-#define LSP0_USIM_WCLK 24
-#define LSP0_GPIO_PCLK 25
-#define LSP0_GPIO_WCLK 26
-#define LSP0_I2C3_PCLK 27
-#define LSP0_I2C3_WCLK 28
-
-#define LSP0_NR_CLKS 29
-
-
-#define LSP1_UART4_PCLK 1
-#define LSP1_UART4_WCLK 2
-#define LSP1_UART5_PCLK 3
-#define LSP1_UART5_WCLK 4
-#define LSP1_PWM_PCLK 5
-#define LSP1_PWM_WCLK 6
-#define LSP1_I2C2_PCLK 7
-#define LSP1_I2C2_WCLK 8
-#define LSP1_SSP2_PCLK 9
-#define LSP1_SSP2_WCLK 10
-#define LSP1_SSP3_PCLK 11
-#define LSP1_SSP3_WCLK 12
-#define LSP1_SSP4_PCLK 13
-#define LSP1_SSP4_WCLK 14
-#define LSP1_USIM1_PCLK 15
-#define LSP1_USIM1_WCLK 16
-
-#define LSP1_NR_CLKS 17
-
-
-#define AUDIO_I2S0_WCLK 1
-#define AUDIO_I2S0_PCLK 2
-#define AUDIO_I2S1_WCLK 3
-#define AUDIO_I2S1_PCLK 4
-#define AUDIO_I2S2_WCLK 5
-#define AUDIO_I2S2_PCLK 6
-#define AUDIO_I2S3_WCLK 7
-#define AUDIO_I2S3_PCLK 8
-#define AUDIO_I2C0_WCLK 9
-#define AUDIO_I2C0_PCLK 10
-#define AUDIO_SPDIF0_WCLK 11
-#define AUDIO_SPDIF0_PCLK 12
-#define AUDIO_SPDIF1_WCLK 13
-#define AUDIO_SPDIF1_PCLK 14
-#define AUDIO_TIMER_WCLK 15
-#define AUDIO_TIMER_PCLK 16
-#define AUDIO_TDM_WCLK 17
-#define AUDIO_TDM_PCLK 18
-#define AUDIO_TS_PCLK 19
-#define I2S0_WCLK_MUX 20
-#define I2S1_WCLK_MUX 21
-#define I2S2_WCLK_MUX 22
-#define I2S3_WCLK_MUX 23
-
-#define AUDIO_NR_CLKS 24
-
-#endif
diff --git a/include/dt-bindings/display/sdtv-standards.h b/include/dt-bindings/display/sdtv-standards.h
index fbc1a3db2ea7..8249a2b47b79 100644
--- a/include/dt-bindings/display/sdtv-standards.h
+++ b/include/dt-bindings/display/sdtv-standards.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only or X11 */
+/* SPDX-License-Identifier: GPL-2.0-only OR X11 */
/*
* Copyright 2019 Pengutronix, Marco Felsch <kernel@pengutronix.de>
*/
diff --git a/include/dt-bindings/dma/fsl-edma.h b/include/dt-bindings/dma/fsl-edma.h
new file mode 100644
index 000000000000..fd11478cfe9c
--- /dev/null
+++ b/include/dt-bindings/dma/fsl-edma.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+
+#ifndef _FSL_EDMA_DT_BINDING_H_
+#define _FSL_EDMA_DT_BINDING_H_
+
+/* Receive Channel */
+#define FSL_EDMA_RX 0x1
+
+/* iMX8 audio remote DMA */
+#define FSL_EDMA_REMOTE 0x2
+
+/* FIFO is continue memory region */
+#define FSL_EDMA_MULTI_FIFO 0x4
+
+/* Channel need stick to even channel */
+#define FSL_EDMA_EVEN_CH 0x8
+
+/* Channel need stick to odd channel */
+#define FSL_EDMA_ODD_CH 0x10
+
+#endif
diff --git a/include/dt-bindings/firmware/imx/rsrc.h b/include/dt-bindings/firmware/imx/rsrc.h
index 43885056557c..1a8c025d77b8 100644
--- a/include/dt-bindings/firmware/imx/rsrc.h
+++ b/include/dt-bindings/firmware/imx/rsrc.h
@@ -13,34 +13,38 @@
* never be changed or removed (only added to at the end of the list).
*/
-#define IMX_SC_R_A53 0
-#define IMX_SC_R_A53_0 1
-#define IMX_SC_R_A53_1 2
-#define IMX_SC_R_A53_2 3
-#define IMX_SC_R_A53_3 4
-#define IMX_SC_R_A72 5
-#define IMX_SC_R_A72_0 6
-#define IMX_SC_R_A72_1 7
-#define IMX_SC_R_A72_2 8
-#define IMX_SC_R_A72_3 9
+#define IMX_SC_R_AP_0 0
+#define IMX_SC_R_AP_0_0 1
+#define IMX_SC_R_AP_0_1 2
+#define IMX_SC_R_AP_0_2 3
+#define IMX_SC_R_AP_0_3 4
+#define IMX_SC_R_AP_1 5
+#define IMX_SC_R_AP_1_0 6
+#define IMX_SC_R_AP_1_1 7
+#define IMX_SC_R_AP_1_2 8
+#define IMX_SC_R_AP_1_3 9
#define IMX_SC_R_CCI 10
#define IMX_SC_R_DB 11
#define IMX_SC_R_DRC_0 12
#define IMX_SC_R_DRC_1 13
#define IMX_SC_R_GIC_SMMU 14
-#define IMX_SC_R_IRQSTR_M4_0 15
-#define IMX_SC_R_IRQSTR_M4_1 16
-#define IMX_SC_R_SMMU 17
-#define IMX_SC_R_GIC 18
+#define IMX_SC_R_IRQSTR_MCU_0 15
+#define IMX_SC_R_IRQSTR_MCU_1 16
+#define IMX_SC_R_SMMU_0 17
+#define IMX_SC_R_GIC_0 18
#define IMX_SC_R_DC_0_BLIT0 19
#define IMX_SC_R_DC_0_BLIT1 20
#define IMX_SC_R_DC_0_BLIT2 21
#define IMX_SC_R_DC_0_BLIT_OUT 22
-#define IMX_SC_R_PERF 23
+#define IMX_SC_R_PERF_0 23
+#define IMX_SC_R_USB_1_PHY 24
#define IMX_SC_R_DC_0_WARP 25
+#define IMX_SC_R_V2X_MU_0 26
+#define IMX_SC_R_V2X_MU_1 27
#define IMX_SC_R_DC_0_VIDEO0 28
#define IMX_SC_R_DC_0_VIDEO1 29
#define IMX_SC_R_DC_0_FRAC0 30
+#define IMX_SC_R_V2X_MU_2 31
#define IMX_SC_R_DC_0 32
#define IMX_SC_R_GPU_2_PID0 33
#define IMX_SC_R_DC_0_PLL_0 34
@@ -49,11 +53,17 @@
#define IMX_SC_R_DC_1_BLIT1 37
#define IMX_SC_R_DC_1_BLIT2 38
#define IMX_SC_R_DC_1_BLIT_OUT 39
+#define IMX_SC_R_V2X_MU_3 40
+#define IMX_SC_R_V2X_MU_4 41
#define IMX_SC_R_DC_1_WARP 42
+#define IMX_SC_R_STM 43
+#define IMX_SC_R_SECVIO 44
#define IMX_SC_R_DC_1_VIDEO0 45
#define IMX_SC_R_DC_1_VIDEO1 46
#define IMX_SC_R_DC_1_FRAC0 47
+#define IMX_SC_R_V2X 48
#define IMX_SC_R_DC_1 49
+#define IMX_SC_R_UNUSED14 50
#define IMX_SC_R_DC_1_PLL_0 51
#define IMX_SC_R_DC_1_PLL_1 52
#define IMX_SC_R_SPI_0 53
@@ -144,10 +154,10 @@
#define IMX_SC_R_DMA_1_CH29 137
#define IMX_SC_R_DMA_1_CH30 138
#define IMX_SC_R_DMA_1_CH31 139
-#define IMX_SC_R_UNUSED1 140
-#define IMX_SC_R_UNUSED2 141
-#define IMX_SC_R_UNUSED3 142
-#define IMX_SC_R_UNUSED4 143
+#define IMX_SC_R_V2X_PID0 140
+#define IMX_SC_R_V2X_PID1 141
+#define IMX_SC_R_V2X_PID2 142
+#define IMX_SC_R_V2X_PID3 143
#define IMX_SC_R_GPU_0_PID0 144
#define IMX_SC_R_GPU_0_PID1 145
#define IMX_SC_R_GPU_0_PID2 146
@@ -176,7 +186,7 @@
#define IMX_SC_R_PCIE_B 169
#define IMX_SC_R_SATA_0 170
#define IMX_SC_R_SERDES_1 171
-#define IMX_SC_R_HSIO_GPIO 172
+#define IMX_SC_R_HSIO_GPIO_0 172
#define IMX_SC_R_MATCH_15 173
#define IMX_SC_R_MATCH_16 174
#define IMX_SC_R_MATCH_17 175
@@ -243,15 +253,15 @@
#define IMX_SC_R_ROM_0 236
#define IMX_SC_R_FSPI_0 237
#define IMX_SC_R_FSPI_1 238
-#define IMX_SC_R_IEE 239
-#define IMX_SC_R_IEE_R0 240
-#define IMX_SC_R_IEE_R1 241
-#define IMX_SC_R_IEE_R2 242
-#define IMX_SC_R_IEE_R3 243
-#define IMX_SC_R_IEE_R4 244
-#define IMX_SC_R_IEE_R5 245
-#define IMX_SC_R_IEE_R6 246
-#define IMX_SC_R_IEE_R7 247
+#define IMX_SC_R_IEE_0 239
+#define IMX_SC_R_IEE_0_R0 240
+#define IMX_SC_R_IEE_0_R1 241
+#define IMX_SC_R_IEE_0_R2 242
+#define IMX_SC_R_IEE_0_R3 243
+#define IMX_SC_R_IEE_0_R4 244
+#define IMX_SC_R_IEE_0_R5 245
+#define IMX_SC_R_IEE_0_R6 246
+#define IMX_SC_R_IEE_0_R7 247
#define IMX_SC_R_SDHC_0 248
#define IMX_SC_R_SDHC_1 249
#define IMX_SC_R_SDHC_2 250
@@ -282,46 +292,50 @@
#define IMX_SC_R_LVDS_2_PWM_0 275
#define IMX_SC_R_LVDS_2_I2C_0 276
#define IMX_SC_R_LVDS_2_I2C_1 277
-#define IMX_SC_R_M4_0_PID0 278
-#define IMX_SC_R_M4_0_PID1 279
-#define IMX_SC_R_M4_0_PID2 280
-#define IMX_SC_R_M4_0_PID3 281
-#define IMX_SC_R_M4_0_PID4 282
-#define IMX_SC_R_M4_0_RGPIO 283
-#define IMX_SC_R_M4_0_SEMA42 284
-#define IMX_SC_R_M4_0_TPM 285
-#define IMX_SC_R_M4_0_PIT 286
-#define IMX_SC_R_M4_0_UART 287
-#define IMX_SC_R_M4_0_I2C 288
-#define IMX_SC_R_M4_0_INTMUX 289
-#define IMX_SC_R_M4_0_MU_0B 292
-#define IMX_SC_R_M4_0_MU_0A0 293
-#define IMX_SC_R_M4_0_MU_0A1 294
-#define IMX_SC_R_M4_0_MU_0A2 295
-#define IMX_SC_R_M4_0_MU_0A3 296
-#define IMX_SC_R_M4_0_MU_1A 297
-#define IMX_SC_R_M4_1_PID0 298
-#define IMX_SC_R_M4_1_PID1 299
-#define IMX_SC_R_M4_1_PID2 300
-#define IMX_SC_R_M4_1_PID3 301
-#define IMX_SC_R_M4_1_PID4 302
-#define IMX_SC_R_M4_1_RGPIO 303
-#define IMX_SC_R_M4_1_SEMA42 304
-#define IMX_SC_R_M4_1_TPM 305
-#define IMX_SC_R_M4_1_PIT 306
-#define IMX_SC_R_M4_1_UART 307
-#define IMX_SC_R_M4_1_I2C 308
-#define IMX_SC_R_M4_1_INTMUX 309
-#define IMX_SC_R_M4_1_MU_0B 312
-#define IMX_SC_R_M4_1_MU_0A0 313
-#define IMX_SC_R_M4_1_MU_0A1 314
-#define IMX_SC_R_M4_1_MU_0A2 315
-#define IMX_SC_R_M4_1_MU_0A3 316
-#define IMX_SC_R_M4_1_MU_1A 317
+#define IMX_SC_R_MCU_0_PID0 278
+#define IMX_SC_R_MCU_0_PID1 279
+#define IMX_SC_R_MCU_0_PID2 280
+#define IMX_SC_R_MCU_0_PID3 281
+#define IMX_SC_R_MCU_0_PID4 282
+#define IMX_SC_R_MCU_0_RGPIO 283
+#define IMX_SC_R_MCU_0_SEMA42 284
+#define IMX_SC_R_MCU_0_TPM 285
+#define IMX_SC_R_MCU_0_PIT 286
+#define IMX_SC_R_MCU_0_UART 287
+#define IMX_SC_R_MCU_0_I2C 288
+#define IMX_SC_R_MCU_0_INTMUX 289
+#define IMX_SC_R_ENET_0_A0 290
+#define IMX_SC_R_ENET_0_A1 291
+#define IMX_SC_R_MCU_0_MU_0B 292
+#define IMX_SC_R_MCU_0_MU_0A0 293
+#define IMX_SC_R_MCU_0_MU_0A1 294
+#define IMX_SC_R_MCU_0_MU_0A2 295
+#define IMX_SC_R_MCU_0_MU_0A3 296
+#define IMX_SC_R_MCU_0_MU_1A 297
+#define IMX_SC_R_MCU_1_PID0 298
+#define IMX_SC_R_MCU_1_PID1 299
+#define IMX_SC_R_MCU_1_PID2 300
+#define IMX_SC_R_MCU_1_PID3 301
+#define IMX_SC_R_MCU_1_PID4 302
+#define IMX_SC_R_MCU_1_RGPIO 303
+#define IMX_SC_R_MCU_1_SEMA42 304
+#define IMX_SC_R_MCU_1_TPM 305
+#define IMX_SC_R_MCU_1_PIT 306
+#define IMX_SC_R_MCU_1_UART 307
+#define IMX_SC_R_MCU_1_I2C 308
+#define IMX_SC_R_MCU_1_INTMUX 309
+#define IMX_SC_R_UNUSED17 310
+#define IMX_SC_R_UNUSED18 311
+#define IMX_SC_R_MCU_1_MU_0B 312
+#define IMX_SC_R_MCU_1_MU_0A0 313
+#define IMX_SC_R_MCU_1_MU_0A1 314
+#define IMX_SC_R_MCU_1_MU_0A2 315
+#define IMX_SC_R_MCU_1_MU_0A3 316
+#define IMX_SC_R_MCU_1_MU_1A 317
#define IMX_SC_R_SAI_0 318
#define IMX_SC_R_SAI_1 319
#define IMX_SC_R_SAI_2 320
-#define IMX_SC_R_IRQSTR_SCU2 321
+#define IMX_SC_R_IRQSTR_AP_0 321
#define IMX_SC_R_IRQSTR_DSP 322
#define IMX_SC_R_ELCDIF_PLL 323
#define IMX_SC_R_OCRAM 324
@@ -366,33 +380,33 @@
#define IMX_SC_R_VPU_PID5 363
#define IMX_SC_R_VPU_PID6 364
#define IMX_SC_R_VPU_PID7 365
-#define IMX_SC_R_VPU_UART 366
-#define IMX_SC_R_VPUCORE 367
-#define IMX_SC_R_VPUCORE_0 368
-#define IMX_SC_R_VPUCORE_1 369
-#define IMX_SC_R_VPUCORE_2 370
-#define IMX_SC_R_VPUCORE_3 371
+#define IMX_SC_R_ENET_0_A2 366
+#define IMX_SC_R_ENET_1_A0 367
+#define IMX_SC_R_ENET_1_A1 368
+#define IMX_SC_R_ENET_1_A2 369
+#define IMX_SC_R_ENET_1_A3 370
+#define IMX_SC_R_ENET_1_A4 371
#define IMX_SC_R_DMA_4_CH0 372
#define IMX_SC_R_DMA_4_CH1 373
#define IMX_SC_R_DMA_4_CH2 374
#define IMX_SC_R_DMA_4_CH3 375
#define IMX_SC_R_DMA_4_CH4 376
-#define IMX_SC_R_ISI_CH0 377
-#define IMX_SC_R_ISI_CH1 378
-#define IMX_SC_R_ISI_CH2 379
-#define IMX_SC_R_ISI_CH3 380
-#define IMX_SC_R_ISI_CH4 381
-#define IMX_SC_R_ISI_CH5 382
-#define IMX_SC_R_ISI_CH6 383
-#define IMX_SC_R_ISI_CH7 384
-#define IMX_SC_R_MJPEG_DEC_S0 385
-#define IMX_SC_R_MJPEG_DEC_S1 386
-#define IMX_SC_R_MJPEG_DEC_S2 387
-#define IMX_SC_R_MJPEG_DEC_S3 388
-#define IMX_SC_R_MJPEG_ENC_S0 389
-#define IMX_SC_R_MJPEG_ENC_S1 390
-#define IMX_SC_R_MJPEG_ENC_S2 391
-#define IMX_SC_R_MJPEG_ENC_S3 392
+#define IMX_SC_R_ISI_0_CH0 377
+#define IMX_SC_R_ISI_0_CH1 378
+#define IMX_SC_R_ISI_0_CH2 379
+#define IMX_SC_R_ISI_0_CH3 380
+#define IMX_SC_R_ISI_0_CH4 381
+#define IMX_SC_R_ISI_0_CH5 382
+#define IMX_SC_R_ISI_0_CH6 383
+#define IMX_SC_R_ISI_0_CH7 384
+#define IMX_SC_R_MJPEG_0_DEC_S0 385
+#define IMX_SC_R_MJPEG_0_DEC_S1 386
+#define IMX_SC_R_MJPEG_0_DEC_S2 387
+#define IMX_SC_R_MJPEG_0_DEC_S3 388
+#define IMX_SC_R_MJPEG_0_ENC_S0 389
+#define IMX_SC_R_MJPEG_0_ENC_S1 390
+#define IMX_SC_R_MJPEG_0_ENC_S2 391
+#define IMX_SC_R_MJPEG_0_ENC_S3 392
#define IMX_SC_R_MIPI_0 393
#define IMX_SC_R_MIPI_0_PWM_0 394
#define IMX_SC_R_MIPI_0_I2C_0 395
@@ -507,11 +521,11 @@
#define IMX_SC_R_SECO_MU_3 504
#define IMX_SC_R_SECO_MU_4 505
#define IMX_SC_R_HDMI_RX_PWM_0 506
-#define IMX_SC_R_A35 507
-#define IMX_SC_R_A35_0 508
-#define IMX_SC_R_A35_1 509
-#define IMX_SC_R_A35_2 510
-#define IMX_SC_R_A35_3 511
+#define IMX_SC_R_AP_2 507
+#define IMX_SC_R_AP_2_0 508
+#define IMX_SC_R_AP_2_1 509
+#define IMX_SC_R_AP_2_2 510
+#define IMX_SC_R_AP_2_3 511
#define IMX_SC_R_DSP 512
#define IMX_SC_R_DSP_RAM 513
#define IMX_SC_R_CAAM_JR1_OUT 514
@@ -532,8 +546,8 @@
#define IMX_SC_R_BOARD_R5 529
#define IMX_SC_R_BOARD_R6 530
#define IMX_SC_R_BOARD_R7 531
-#define IMX_SC_R_MJPEG_DEC_MP 532
-#define IMX_SC_R_MJPEG_ENC_MP 533
+#define IMX_SC_R_MJPEG_0_DEC_MP 532
+#define IMX_SC_R_MJPEG_0_ENC_MP 533
#define IMX_SC_R_VPU_TS_0 534
#define IMX_SC_R_VPU_MU_0 535
#define IMX_SC_R_VPU_MU_1 536
@@ -566,6 +580,105 @@
#define IMX_SC_PM_CLK_BYPASS 4 /* Bypass clock */
/*
+ * Compatibility defines for sc_rsrc_t
+ */
+#define IMX_SC_R_A35 IMX_SC_R_AP_2
+#define IMX_SC_R_A35_0 IMX_SC_R_AP_2_0
+#define IMX_SC_R_A35_1 IMX_SC_R_AP_2_1
+#define IMX_SC_R_A35_2 IMX_SC_R_AP_2_2
+#define IMX_SC_R_A35_3 IMX_SC_R_AP_2_3
+#define IMX_SC_R_A53 IMX_SC_R_AP_0
+#define IMX_SC_R_A53_0 IMX_SC_R_AP_0_0
+#define IMX_SC_R_A53_1 IMX_SC_R_AP_0_1
+#define IMX_SC_R_A53_2 IMX_SC_R_AP_0_2
+#define IMX_SC_R_A53_3 IMX_SC_R_AP_0_3
+#define IMX_SC_R_A72 IMX_SC_R_AP_1
+#define IMX_SC_R_A72_0 IMX_SC_R_AP_1_0
+#define IMX_SC_R_A72_1 IMX_SC_R_AP_1_1
+#define IMX_SC_R_A72_2 IMX_SC_R_AP_1_2
+#define IMX_SC_R_A72_3 IMX_SC_R_AP_1_3
+#define IMX_SC_R_GIC IMX_SC_R_GIC_0
+#define IMX_SC_R_HSIO_GPIO IMX_SC_R_HSIO_GPIO_0
+#define IMX_SC_R_IEE IMX_SC_R_IEE_0
+#define IMX_SC_R_IEE_R0 IMX_SC_R_IEE_0_R0
+#define IMX_SC_R_IEE_R1 IMX_SC_R_IEE_0_R1
+#define IMX_SC_R_IEE_R2 IMX_SC_R_IEE_0_R2
+#define IMX_SC_R_IEE_R3 IMX_SC_R_IEE_0_R3
+#define IMX_SC_R_IEE_R4 IMX_SC_R_IEE_0_R4
+#define IMX_SC_R_IEE_R5 IMX_SC_R_IEE_0_R5
+#define IMX_SC_R_IEE_R6 IMX_SC_R_IEE_0_R6
+#define IMX_SC_R_IEE_R7 IMX_SC_R_IEE_0_R7
+#define IMX_SC_R_IRQSTR_M4_0 IMX_SC_R_IRQSTR_MCU_0
+#define IMX_SC_R_IRQSTR_M4_1 IMX_SC_R_IRQSTR_MCU_1
+#define IMX_SC_R_IRQSTR_SCU2 IMX_SC_R_IRQSTR_AP_0
+#define IMX_SC_R_ISI_CH0 IMX_SC_R_ISI_0_CH0
+#define IMX_SC_R_ISI_CH1 IMX_SC_R_ISI_0_CH1
+#define IMX_SC_R_ISI_CH2 IMX_SC_R_ISI_0_CH2
+#define IMX_SC_R_ISI_CH3 IMX_SC_R_ISI_0_CH3
+#define IMX_SC_R_ISI_CH4 IMX_SC_R_ISI_0_CH4
+#define IMX_SC_R_ISI_CH5 IMX_SC_R_ISI_0_CH5
+#define IMX_SC_R_ISI_CH6 IMX_SC_R_ISI_0_CH6
+#define IMX_SC_R_ISI_CH7 IMX_SC_R_ISI_0_CH7
+#define IMX_SC_R_M4_0_I2C IMX_SC_R_MCU_0_I2C
+#define IMX_SC_R_M4_0_INTMUX IMX_SC_R_MCU_0_INTMUX
+#define IMX_SC_R_M4_0_MU_0A0 IMX_SC_R_MCU_0_MU_0A0
+#define IMX_SC_R_M4_0_MU_0A1 IMX_SC_R_MCU_0_MU_0A1
+#define IMX_SC_R_M4_0_MU_0A2 IMX_SC_R_MCU_0_MU_0A2
+#define IMX_SC_R_M4_0_MU_0A3 IMX_SC_R_MCU_0_MU_0A3
+#define IMX_SC_R_M4_0_MU_0B IMX_SC_R_MCU_0_MU_0B
+#define IMX_SC_R_M4_0_MU_1A IMX_SC_R_MCU_0_MU_1A
+#define IMX_SC_R_M4_0_PID0 IMX_SC_R_MCU_0_PID0
+#define IMX_SC_R_M4_0_PID1 IMX_SC_R_MCU_0_PID1
+#define IMX_SC_R_M4_0_PID2 IMX_SC_R_MCU_0_PID2
+#define IMX_SC_R_M4_0_PID3 IMX_SC_R_MCU_0_PID3
+#define IMX_SC_R_M4_0_PID4 IMX_SC_R_MCU_0_PID4
+#define IMX_SC_R_M4_0_PIT IMX_SC_R_MCU_0_PIT
+#define IMX_SC_R_M4_0_RGPIO IMX_SC_R_MCU_0_RGPIO
+#define IMX_SC_R_M4_0_SEMA42 IMX_SC_R_MCU_0_SEMA42
+#define IMX_SC_R_M4_0_TPM IMX_SC_R_MCU_0_TPM
+#define IMX_SC_R_M4_0_UART IMX_SC_R_MCU_0_UART
+#define IMX_SC_R_M4_1_I2C IMX_SC_R_MCU_1_I2C
+#define IMX_SC_R_M4_1_INTMUX IMX_SC_R_MCU_1_INTMUX
+#define IMX_SC_R_M4_1_MU_0A0 IMX_SC_R_MCU_1_MU_0A0
+#define IMX_SC_R_M4_1_MU_0A1 IMX_SC_R_MCU_1_MU_0A1
+#define IMX_SC_R_M4_1_MU_0A2 IMX_SC_R_MCU_1_MU_0A2
+#define IMX_SC_R_M4_1_MU_0A3 IMX_SC_R_MCU_1_MU_0A3
+#define IMX_SC_R_M4_1_MU_0B IMX_SC_R_MCU_1_MU_0B
+#define IMX_SC_R_M4_1_MU_1A IMX_SC_R_MCU_1_MU_1A
+#define IMX_SC_R_M4_1_PID0 IMX_SC_R_MCU_1_PID0
+#define IMX_SC_R_M4_1_PID1 IMX_SC_R_MCU_1_PID1
+#define IMX_SC_R_M4_1_PID2 IMX_SC_R_MCU_1_PID2
+#define IMX_SC_R_M4_1_PID3 IMX_SC_R_MCU_1_PID3
+#define IMX_SC_R_M4_1_PID4 IMX_SC_R_MCU_1_PID4
+#define IMX_SC_R_M4_1_PIT IMX_SC_R_MCU_1_PIT
+#define IMX_SC_R_M4_1_RGPIO IMX_SC_R_MCU_1_RGPIO
+#define IMX_SC_R_M4_1_SEMA42 IMX_SC_R_MCU_1_SEMA42
+#define IMX_SC_R_M4_1_TPM IMX_SC_R_MCU_1_TPM
+#define IMX_SC_R_M4_1_UART IMX_SC_R_MCU_1_UART
+#define IMX_SC_R_MJPEG_DEC_MP IMX_SC_R_MJPEG_0_DEC_MP
+#define IMX_SC_R_MJPEG_DEC_S0 IMX_SC_R_MJPEG_0_DEC_S0
+#define IMX_SC_R_MJPEG_DEC_S1 IMX_SC_R_MJPEG_0_DEC_S1
+#define IMX_SC_R_MJPEG_DEC_S2 IMX_SC_R_MJPEG_0_DEC_S2
+#define IMX_SC_R_MJPEG_DEC_S3 IMX_SC_R_MJPEG_0_DEC_S3
+#define IMX_SC_R_MJPEG_ENC_MP IMX_SC_R_MJPEG_0_ENC_MP
+#define IMX_SC_R_MJPEG_ENC_S0 IMX_SC_R_MJPEG_0_ENC_S0
+#define IMX_SC_R_MJPEG_ENC_S1 IMX_SC_R_MJPEG_0_ENC_S1
+#define IMX_SC_R_MJPEG_ENC_S2 IMX_SC_R_MJPEG_0_ENC_S2
+#define IMX_SC_R_MJPEG_ENC_S3 IMX_SC_R_MJPEG_0_ENC_S3
+#define IMX_SC_R_PERF IMX_SC_R_PERF_0
+#define IMX_SC_R_SMMU IMX_SC_R_SMMU_0
+#define IMX_SC_R_VPU_UART IMX_SC_R_ENET_0_A2
+#define IMX_SC_R_VPUCORE IMX_SC_R_ENET_1_A0
+#define IMX_SC_R_VPUCORE_0 IMX_SC_R_ENET_1_A1
+#define IMX_SC_R_VPUCORE_1 IMX_SC_R_ENET_1_A2
+#define IMX_SC_R_VPUCORE_2 IMX_SC_R_ENET_1_A3
+#define IMX_SC_R_VPUCORE_3 IMX_SC_R_ENET_1_A4
+#define IMX_SC_R_UNUSED1 IMX_SC_R_V2X_PID0
+#define IMX_SC_R_UNUSED2 IMX_SC_R_V2X_PID1
+#define IMX_SC_R_UNUSED3 IMX_SC_R_V2X_PID2
+#define IMX_SC_R_UNUSED4 IMX_SC_R_V2X_PID3
+
+/*
* Defines for SC CONTROL
*/
#define IMX_SC_C_TEMP 0
@@ -630,6 +743,10 @@
#define IMX_SC_C_INTF_SEL 59
#define IMX_SC_C_RXC_DLY 60
#define IMX_SC_C_TIMER_SEL 61
-#define IMX_SC_C_LAST 62
+#define IMX_SC_C_MISC0 62
+#define IMX_SC_C_MISC1 63
+#define IMX_SC_C_MISC2 64
+#define IMX_SC_C_MISC3 65
+#define IMX_SC_C_LAST 66
#endif /* __DT_BINDINGS_RSCRC_IMX_H */
diff --git a/include/dt-bindings/firmware/qcom,scm.h b/include/dt-bindings/firmware/qcom,scm.h
new file mode 100644
index 000000000000..6de8b08e1e79
--- /dev/null
+++ b/include/dt-bindings/firmware/qcom,scm.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (c) 2010-2015, 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015 Linaro Ltd.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_FIRMWARE_QCOM_SCM_H
+#define _DT_BINDINGS_FIRMWARE_QCOM_SCM_H
+
+#define QCOM_SCM_VMID_TZ 0x1
+#define QCOM_SCM_VMID_HLOS 0x3
+#define QCOM_SCM_VMID_SSC_Q6 0x5
+#define QCOM_SCM_VMID_ADSP_Q6 0x6
+#define QCOM_SCM_VMID_CP_TOUCH 0x8
+#define QCOM_SCM_VMID_CP_BITSTREAM 0x9
+#define QCOM_SCM_VMID_CP_PIXEL 0xA
+#define QCOM_SCM_VMID_CP_NON_PIXEL 0xB
+#define QCOM_SCM_VMID_CP_CAMERA 0xD
+#define QCOM_SCM_VMID_HLOS_FREE 0xE
+#define QCOM_SCM_VMID_MSS_MSA 0xF
+#define QCOM_SCM_VMID_MSS_NONMSA 0x10
+#define QCOM_SCM_VMID_CP_SEC_DISPLAY 0x11
+#define QCOM_SCM_VMID_CP_APP 0x12
+#define QCOM_SCM_VMID_LPASS 0x16
+#define QCOM_SCM_VMID_WLAN 0x18
+#define QCOM_SCM_VMID_WLAN_CE 0x19
+#define QCOM_SCM_VMID_CP_SPSS_SP 0x1A
+#define QCOM_SCM_VMID_CP_CAMERA_PREVIEW 0x1D
+#define QCOM_SCM_VMID_CDSP 0x1E
+#define QCOM_SCM_VMID_CP_SPSS_SP_SHARED 0x22
+#define QCOM_SCM_VMID_CP_SPSS_HLOS_SHARED 0x24
+#define QCOM_SCM_VMID_ADSP_HEAP 0x25
+#define QCOM_SCM_VMID_CP_CDSP 0x2A
+#define QCOM_SCM_VMID_NAV 0x2B
+#define QCOM_SCM_VMID_TVM 0x2D
+#define QCOM_SCM_VMID_OEMVM 0x31
+
+#endif
diff --git a/include/dt-bindings/gce/mediatek,mt6795-gce.h b/include/dt-bindings/gce/mediatek,mt6795-gce.h
new file mode 100644
index 000000000000..97d5ba2d2b44
--- /dev/null
+++ b/include/dt-bindings/gce/mediatek,mt6795-gce.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+#ifndef _DT_BINDINGS_GCE_MT6795_H
+#define _DT_BINDINGS_GCE_MT6795_H
+
+/* GCE HW thread priority */
+#define CMDQ_THR_PRIO_LOWEST 0
+#define CMDQ_THR_PRIO_NORMAL 1
+#define CMDQ_THR_PRIO_NORMAL_2 2
+#define CMDQ_THR_PRIO_MEDIUM 3
+#define CMDQ_THR_PRIO_MEDIUM_2 4
+#define CMDQ_THR_PRIO_HIGH 5
+#define CMDQ_THR_PRIO_HIGHER 6
+#define CMDQ_THR_PRIO_HIGHEST 7
+
+/* GCE SUBSYS */
+#define SUBSYS_1300XXXX 0
+#define SUBSYS_1400XXXX 1
+#define SUBSYS_1401XXXX 2
+#define SUBSYS_1402XXXX 3
+#define SUBSYS_1500XXXX 4
+#define SUBSYS_1600XXXX 5
+#define SUBSYS_1700XXXX 6
+#define SUBSYS_1800XXXX 7
+#define SUBSYS_1000XXXX 8
+#define SUBSYS_1001XXXX 9
+#define SUBSYS_1002XXXX 10
+#define SUBSYS_1003XXXX 11
+#define SUBSYS_1004XXXX 12
+#define SUBSYS_1005XXXX 13
+#define SUBSYS_1020XXXX 14
+#define SUBSYS_1021XXXX 15
+#define SUBSYS_1120XXXX 16
+#define SUBSYS_1121XXXX 17
+#define SUBSYS_1122XXXX 18
+#define SUBSYS_1123XXXX 19
+#define SUBSYS_1124XXXX 20
+#define SUBSYS_1125XXXX 21
+#define SUBSYS_1126XXXX 22
+
+/* GCE HW EVENT */
+#define CMDQ_EVENT_MDP_RDMA0_SOF 0
+#define CMDQ_EVENT_MDP_RDMA1_SOF 1
+#define CMDQ_EVENT_MDP_DSI0_TE_SOF 2
+#define CMDQ_EVENT_MDP_DSI1_TE_SOF 3
+#define CMDQ_EVENT_MDP_MVW_SOF 4
+#define CMDQ_EVENT_MDP_TDSHP0_SOF 5
+#define CMDQ_EVENT_MDP_TDSHP1_SOF 6
+#define CMDQ_EVENT_MDP_WDMA_SOF 7
+#define CMDQ_EVENT_MDP_WROT0_SOF 8
+#define CMDQ_EVENT_MDP_WROT1_SOF 9
+#define CMDQ_EVENT_MDP_CROP_SOF 10
+#define CMDQ_EVENT_DISP_OVL0_SOF 11
+#define CMDQ_EVENT_DISP_OVL1_SOF 12
+#define CMDQ_EVENT_DISP_RDMA0_SOF 13
+#define CMDQ_EVENT_DISP_RDMA1_SOF 14
+#define CMDQ_EVENT_DISP_RDMA2_SOF 15
+#define CMDQ_EVENT_DISP_WDMA0_SOF 16
+#define CMDQ_EVENT_DISP_WDMA1_SOF 17
+#define CMDQ_EVENT_DISP_COLOR0_SOF 18
+#define CMDQ_EVENT_DISP_COLOR1_SOF 19
+#define CMDQ_EVENT_DISP_AAL_SOF 20
+#define CMDQ_EVENT_DISP_GAMMA_SOF 21
+#define CMDQ_EVENT_DISP_UFOE_SOF 22
+#define CMDQ_EVENT_DISP_PWM0_SOF 23
+#define CMDQ_EVENT_DISP_PWM1_SOF 24
+#define CMDQ_EVENT_DISP_OD_SOF 25
+#define CMDQ_EVENT_MDP_RDMA0_EOF 26
+#define CMDQ_EVENT_MDP_RDMA1_EOF 27
+#define CMDQ_EVENT_MDP_RSZ0_EOF 28
+#define CMDQ_EVENT_MDP_RSZ1_EOF 29
+#define CMDQ_EVENT_MDP_RSZ2_EOF 30
+#define CMDQ_EVENT_MDP_TDSHP0_EOF 31
+#define CMDQ_EVENT_MDP_TDSHP1_EOF 32
+#define CMDQ_EVENT_MDP_WDMA_EOF 33
+#define CMDQ_EVENT_MDP_WROT0_WRITE_EOF 34
+#define CMDQ_EVENT_MDP_WROT0_READ_EOF 35
+#define CMDQ_EVENT_MDP_WROT1_WRITE_EOF 36
+#define CMDQ_EVENT_MDP_WROT1_READ_EOF 37
+#define CMDQ_EVENT_MDP_CROP_EOF 38
+#define CMDQ_EVENT_DISP_OVL0_EOF 39
+#define CMDQ_EVENT_DISP_OVL1_EOF 40
+#define CMDQ_EVENT_DISP_RDMA0_EOF 41
+#define CMDQ_EVENT_DISP_RDMA1_EOF 42
+#define CMDQ_EVENT_DISP_RDMA2_EOF 43
+#define CMDQ_EVENT_DISP_WDMA0_EOF 44
+#define CMDQ_EVENT_DISP_WDMA1_EOF 45
+#define CMDQ_EVENT_DISP_COLOR0_EOF 46
+#define CMDQ_EVENT_DISP_COLOR1_EOF 47
+#define CMDQ_EVENT_DISP_AAL_EOF 48
+#define CMDQ_EVENT_DISP_GAMMA_EOF 49
+#define CMDQ_EVENT_DISP_UFOE_EOF 50
+#define CMDQ_EVENT_DISP_DPI0_EOF 51
+#define CMDQ_EVENT_MUTEX0_STREAM_EOF 52
+#define CMDQ_EVENT_MUTEX1_STREAM_EOF 53
+#define CMDQ_EVENT_MUTEX2_STREAM_EOF 54
+#define CMDQ_EVENT_MUTEX3_STREAM_EOF 55
+#define CMDQ_EVENT_MUTEX4_STREAM_EOF 56
+#define CMDQ_EVENT_MUTEX5_STREAM_EOF 57
+#define CMDQ_EVENT_MUTEX6_STREAM_EOF 58
+#define CMDQ_EVENT_MUTEX7_STREAM_EOF 59
+#define CMDQ_EVENT_MUTEX8_STREAM_EOF 60
+#define CMDQ_EVENT_MUTEX9_STREAM_EOF 61
+#define CMDQ_EVENT_DISP_RDMA0_UNDERRUN 62
+#define CMDQ_EVENT_DISP_RDMA1_UNDERRUN 63
+#define CMDQ_EVENT_DISP_RDMA2_UNDERRUN 64
+#define CMDQ_EVENT_ISP_PASS2_2_EOF 129
+#define CMDQ_EVENT_ISP_PASS2_1_EOF 130
+#define CMDQ_EVENT_ISP_PASS2_0_EOF 131
+#define CMDQ_EVENT_ISP_PASS1_1_EOF 132
+#define CMDQ_EVENT_ISP_PASS1_0_EOF 133
+#define CMDQ_EVENT_CAMSV_2_PASS1_EOF 134
+#define CMDQ_EVENT_CAMSV_1_PASS1_EOF 135
+#define CMDQ_EVENT_SENINF_CAM1_2_3_FIFO_FULL 136
+#define CMDQ_EVENT_SENINF_CAM0_FIFO_FULL 137
+#define CMDQ_EVENT_JPGENC_PASS2_EOF 257
+#define CMDQ_EVENT_JPGENC_PASS1_EOF 258
+#define CMDQ_EVENT_JPGDEC_EOF 259
+
+#endif
diff --git a/include/dt-bindings/gce/mt8186-gce.h b/include/dt-bindings/gce/mt8186-gce.h
new file mode 100644
index 000000000000..f12e3cb586ce
--- /dev/null
+++ b/include/dt-bindings/gce/mt8186-gce.h
@@ -0,0 +1,421 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ * Author: Yongqiang Niu <yongqiang.niu@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_GCE_MT8186_H
+#define _DT_BINDINGS_GCE_MT8186_H
+
+/* assign timeout 0 also means default */
+#define CMDQ_NO_TIMEOUT 0xffffffff
+#define CMDQ_TIMEOUT_DEFAULT 1000
+
+/* GCE thread priority */
+#define CMDQ_THR_PRIO_LOWEST 0
+#define CMDQ_THR_PRIO_1 1
+#define CMDQ_THR_PRIO_2 2
+#define CMDQ_THR_PRIO_3 3
+#define CMDQ_THR_PRIO_4 4
+#define CMDQ_THR_PRIO_5 5
+#define CMDQ_THR_PRIO_6 6
+#define CMDQ_THR_PRIO_HIGHEST 7
+
+/* CPR count in 32bit register */
+#define GCE_CPR_COUNT 1312
+
+/* GCE subsys table */
+#define SUBSYS_1300XXXX 0
+#define SUBSYS_1400XXXX 1
+#define SUBSYS_1401XXXX 2
+#define SUBSYS_1402XXXX 3
+#define SUBSYS_1502XXXX 4
+#define SUBSYS_1582XXXX 5
+#define SUBSYS_1B00XXXX 6
+#define SUBSYS_1C00XXXX 7
+#define SUBSYS_1C10XXXX 8
+#define SUBSYS_1000XXXX 9
+#define SUBSYS_1001XXXX 10
+#define SUBSYS_1020XXXX 11
+#define SUBSYS_1021XXXX 12
+#define SUBSYS_1022XXXX 13
+#define SUBSYS_1023XXXX 14
+#define SUBSYS_1060XXXX 15
+#define SUBSYS_1602XXXX 16
+#define SUBSYS_1608XXXX 17
+#define SUBSYS_1700XXXX 18
+#define SUBSYS_1701XXXX 19
+#define SUBSYS_1702XXXX 20
+#define SUBSYS_1703XXXX 21
+#define SUBSYS_1706XXXX 22
+#define SUBSYS_1A00XXXX 23
+#define SUBSYS_1A01XXXX 24
+#define SUBSYS_1A02XXXX 25
+#define SUBSYS_1A03XXXX 26
+#define SUBSYS_1A04XXXX 27
+#define SUBSYS_1A05XXXX 28
+#define SUBSYS_1A06XXXX 29
+#define SUBSYS_NO_SUPPORT 99
+
+/* GCE General Purpose Register (GPR) support
+ * Leave note for scenario usage here
+ */
+/* GCE: write mask */
+#define GCE_GPR_R00 0x00
+#define GCE_GPR_R01 0x01
+/* MDP: P1: JPEG dest */
+#define GCE_GPR_R02 0x02
+#define GCE_GPR_R03 0x03
+/* MDP: PQ color */
+#define GCE_GPR_R04 0x04
+/* MDP: 2D sharpness */
+#define GCE_GPR_R05 0x05
+/* DISP: poll esd */
+#define GCE_GPR_R06 0x06
+#define GCE_GPR_R07 0x07
+/* MDP: P4: 2D sharpness dst */
+#define GCE_GPR_R08 0x08
+#define GCE_GPR_R09 0x09
+/* VCU: poll with timeout for GPR timer */
+#define GCE_GPR_R10 0x0A
+#define GCE_GPR_R11 0x0B
+/* CMDQ: debug */
+#define GCE_GPR_R12 0x0C
+#define GCE_GPR_R13 0x0D
+/* CMDQ: P7: debug */
+#define GCE_GPR_R14 0x0E
+#define GCE_GPR_R15 0x0F
+
+/* GCE hardware events */
+/* VDEC */
+#define CMDQ_EVENT_LINE_COUNT_THRESHOLD_INTERRUPT 0
+#define CMDQ_EVENT_VDEC_INT 1
+#define CMDQ_EVENT_VDEC_PAUSE 2
+#define CMDQ_EVENT_VDEC_DEC_ERROR 3
+#define CMDQ_EVENT_MDEC_TIMEOUT 4
+#define CMDQ_EVENT_DRAM_ACCESS_DONE 5
+#define CMDQ_EVENT_INI_FETCH_RDY 6
+#define CMDQ_EVENT_PROCESS_FLAG 7
+#define CMDQ_EVENT_SEARCH_START_CODE_DONE 8
+#define CMDQ_EVENT_REF_REORDER_DONE 9
+#define CMDQ_EVENT_WP_TBLE_DONE 10
+#define CMDQ_EVENT_COUNT_SRAM_CLR_DONE 11
+#define CMDQ_EVENT_GCE_CNT_OP_THRESHOLD 15
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_0 16
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_1 17
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_2 18
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_3 19
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_4 20
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_5 21
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_6 22
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_7 23
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_8 24
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_9 25
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_10 26
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_11 27
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_12 28
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_13 29
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_14 30
+#define CMDQ_EVENT_VDEC_MINI_MDP_EVENT_15 31
+#define CMDQ_EVENT_WPE_GCE_FRAME_DONE 32
+
+/* CAM */
+#define CMDQ_EVENT_ISP_FRAME_DONE_A 65
+#define CMDQ_EVENT_ISP_FRAME_DONE_B 66
+#define CMDQ_EVENT_CAMSV1_PASS1_DONE 70
+#define CMDQ_EVENT_CAMSV2_PASS1_DONE 71
+#define CMDQ_EVENT_CAMSV3_PASS1_DONE 72
+#define CMDQ_EVENT_MRAW_0_PASS1_DONE 73
+#define CMDQ_EVENT_SENINF_CAM0_FIFO_FULL 75
+#define CMDQ_EVENT_SENINF_CAM1_FIFO_FULL 76
+#define CMDQ_EVENT_SENINF_CAM2_FIFO_FULL 77
+#define CMDQ_EVENT_SENINF_CAM3_FIFO_FULL 78
+#define CMDQ_EVENT_SENINF_CAM4_FIFO_FULL 79
+#define CMDQ_EVENT_SENINF_CAM5_FIFO_FULL 80
+#define CMDQ_EVENT_SENINF_CAM6_FIFO_FULL 81
+#define CMDQ_EVENT_SENINF_CAM7_FIFO_FULL 82
+#define CMDQ_EVENT_SENINF_CAM8_FIFO_FULL 83
+#define CMDQ_EVENT_SENINF_CAM9_FIFO_FULL 84
+#define CMDQ_EVENT_SENINF_CAM10_FIFO_FULL 85
+#define CMDQ_EVENT_SENINF_CAM11_FIFO_FULL 86
+#define CMDQ_EVENT_SENINF_CAM12_FIFO_FULL 87
+#define CMDQ_EVENT_TG_OVRUN_A_INT 88
+#define CMDQ_EVENT_DMA_R1_ERROR_A_INT 89
+#define CMDQ_EVENT_TG_OVRUN_B_INT 90
+#define CMDQ_EVENT_DMA_R1_ERROR_B_INT 91
+#define CMDQ_EVENT_TG_OVRUN_M0_INT 94
+#define CMDQ_EVENT_R1_ERROR_M0_INT 95
+#define CMDQ_EVENT_TG_GRABERR_M0_INT 96
+#define CMDQ_EVENT_TG_GRABERR_A_INT 98
+#define CMDQ_EVENT_CQ_VR_SNAP_A_INT 99
+#define CMDQ_EVENT_TG_GRABERR_B_INT 100
+#define CMDQ_EVENT_CQ_VR_SNAP_B_INT 101
+/* VENC */
+#define CMDQ_EVENT_VENC_CMDQ_FRAME_DONE 129
+#define CMDQ_EVENT_VENC_CMDQ_PAUSE_DONE 130
+#define CMDQ_EVENT_JPGENC_CMDQ_DONE 131
+#define CMDQ_EVENT_VENC_CMDQ_MB_DONE 132
+#define CMDQ_EVENT_VENC_CMDQ_128BYTE_CNT_DONE 133
+#define CMDQ_EVENT_VENC_CMDQ_PPS_DONE 136
+#define CMDQ_EVENT_VENC_CMDQ_SPS_DONE 137
+#define CMDQ_EVENT_VENC_CMDQ_VPS_DONE 138
+/* IPE */
+#define CMDQ_EVENT_FDVT_DONE 161
+#define CMDQ_EVENT_FE_DONE 162
+#define CMDQ_EVENT_RSC_DONE 163
+#define CMDQ_EVENT_DVS_DONE_ASYNC_SHOT 164
+#define CMDQ_EVENT_DVP_DONE_ASYNC_SHOT 165
+/* IMG2 */
+#define CMDQ_EVENT_GCE_IMG2_EVENT0 193
+#define CMDQ_EVENT_GCE_IMG2_EVENT1 194
+#define CMDQ_EVENT_GCE_IMG2_EVENT2 195
+#define CMDQ_EVENT_GCE_IMG2_EVENT3 196
+#define CMDQ_EVENT_GCE_IMG2_EVENT4 197
+#define CMDQ_EVENT_GCE_IMG2_EVENT5 198
+#define CMDQ_EVENT_GCE_IMG2_EVENT6 199
+#define CMDQ_EVENT_GCE_IMG2_EVENT7 200
+#define CMDQ_EVENT_GCE_IMG2_EVENT8 201
+#define CMDQ_EVENT_GCE_IMG2_EVENT9 202
+#define CMDQ_EVENT_GCE_IMG2_EVENT10 203
+#define CMDQ_EVENT_GCE_IMG2_EVENT11 204
+#define CMDQ_EVENT_GCE_IMG2_EVENT12 205
+#define CMDQ_EVENT_GCE_IMG2_EVENT13 206
+#define CMDQ_EVENT_GCE_IMG2_EVENT14 207
+#define CMDQ_EVENT_GCE_IMG2_EVENT15 208
+#define CMDQ_EVENT_GCE_IMG2_EVENT16 209
+#define CMDQ_EVENT_GCE_IMG2_EVENT17 210
+#define CMDQ_EVENT_GCE_IMG2_EVENT18 211
+#define CMDQ_EVENT_GCE_IMG2_EVENT19 212
+#define CMDQ_EVENT_GCE_IMG2_EVENT20 213
+#define CMDQ_EVENT_GCE_IMG2_EVENT21 214
+#define CMDQ_EVENT_GCE_IMG2_EVENT22 215
+#define CMDQ_EVENT_GCE_IMG2_EVENT23 216
+/* IMG1 */
+#define CMDQ_EVENT_GCE_IMG1_EVENT0 225
+#define CMDQ_EVENT_GCE_IMG1_EVENT1 226
+#define CMDQ_EVENT_GCE_IMG1_EVENT2 227
+#define CMDQ_EVENT_GCE_IMG1_EVENT3 228
+#define CMDQ_EVENT_GCE_IMG1_EVENT4 229
+#define CMDQ_EVENT_GCE_IMG1_EVENT5 230
+#define CMDQ_EVENT_GCE_IMG1_EVENT6 231
+#define CMDQ_EVENT_GCE_IMG1_EVENT7 232
+#define CMDQ_EVENT_GCE_IMG1_EVENT8 233
+#define CMDQ_EVENT_GCE_IMG1_EVENT9 234
+#define CMDQ_EVENT_GCE_IMG1_EVENT10 235
+#define CMDQ_EVENT_GCE_IMG1_EVENT11 236
+#define CMDQ_EVENT_GCE_IMG1_EVENT12 237
+#define CMDQ_EVENT_GCE_IMG1_EVENT13 238
+#define CMDQ_EVENT_GCE_IMG1_EVENT14 239
+#define CMDQ_EVENT_GCE_IMG1_EVENT15 240
+#define CMDQ_EVENT_GCE_IMG1_EVENT16 241
+#define CMDQ_EVENT_GCE_IMG1_EVENT17 242
+#define CMDQ_EVENT_GCE_IMG1_EVENT18 243
+#define CMDQ_EVENT_GCE_IMG1_EVENT19 244
+#define CMDQ_EVENT_GCE_IMG1_EVENT20 245
+#define CMDQ_EVENT_GCE_IMG1_EVENT21 246
+#define CMDQ_EVENT_GCE_IMG1_EVENT22 247
+#define CMDQ_EVENT_GCE_IMG1_EVENT23 248
+/* MDP */
+#define CMDQ_EVENT_MDP_RDMA0_SOF 256
+#define CMDQ_EVENT_MDP_RDMA1_SOF 257
+#define CMDQ_EVENT_MDP_AAL0_SOF 258
+#define CMDQ_EVENT_MDP_AAL1_SOF 259
+#define CMDQ_EVENT_MDP_HDR0_SOF 260
+#define CMDQ_EVENT_MDP_RSZ0_SOF 261
+#define CMDQ_EVENT_MDP_RSZ1_SOF 262
+#define CMDQ_EVENT_MDP_WROT0_SOF 263
+#define CMDQ_EVENT_MDP_WROT1_SOF 264
+#define CMDQ_EVENT_MDP_TDSHP0_SOF 265
+#define CMDQ_EVENT_MDP_TDSHP1_SOF 266
+#define CMDQ_EVENT_IMG_DL_RELAY0_SOF 267
+#define CMDQ_EVENT_IMG_DL_RELAY1_SOF 268
+#define CMDQ_EVENT_MDP_COLOR0_SOF 269
+#define CMDQ_EVENT_MDP_WROT3_FRAME_DONE 288
+#define CMDQ_EVENT_MDP_WROT2_FRAME_DONE 289
+#define CMDQ_EVENT_MDP_WROT1_FRAME_DONE 290
+#define CMDQ_EVENT_MDP_WROT0_FRAME_DONE 291
+#define CMDQ_EVENT_MDP_TDSHP3_FRAME_DONE 292
+#define CMDQ_EVENT_MDP_TDSHP2_FRAME_DONE 293
+#define CMDQ_EVENT_MDP_TDSHP1_FRAME_DONE 294
+#define CMDQ_EVENT_MDP_TDSHP0_FRAME_DONE 295
+#define CMDQ_EVENT_MDP_RSZ3_FRAME_DONE 296
+#define CMDQ_EVENT_MDP_RSZ2_FRAME_DONE 297
+#define CMDQ_EVENT_MDP_RSZ1_FRAME_DONE 298
+#define CMDQ_EVENT_MDP_RSZ0_FRAME_DONE 299
+#define CMDQ_EVENT_MDP_RDMA3_FRAME_DONE 300
+#define CMDQ_EVENT_MDP_RDMA2_FRAME_DONE 301
+#define CMDQ_EVENT_MDP_RDMA1_FRAME_DONE 302
+#define CMDQ_EVENT_MDP_RDMA0_FRAME_DONE 303
+#define CMDQ_EVENT_MDP_HDR1_FRAME_DONE 304
+#define CMDQ_EVENT_MDP_HDR0_FRAME_DONE 305
+#define CMDQ_EVENT_MDP_COLOR0_FRAME_DONE 306
+#define CMDQ_EVENT_MDP_AAL3_FRAME_DONE 307
+#define CMDQ_EVENT_MDP_AAL2_FRAME_DONE 308
+#define CMDQ_EVENT_MDP_AAL1_FRAME_DONE 309
+#define CMDQ_EVENT_MDP_AAL0_FRAME_DONE 310
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_0 320
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_1 321
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_2 322
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_3 323
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_4 324
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_5 325
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_6 326
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_7 327
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_8 328
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_9 329
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_10 330
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_11 331
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_12 332
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_13 333
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_14 334
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_15 335
+#define CMDQ_EVENT_MDP_WROT3_SW_RST_DONE_ENG_EVENT 336
+#define CMDQ_EVENT_MDP_WROT2_SW_RST_DONE_ENG_EVENT 337
+#define CMDQ_EVENT_MDP_WROT1_SW_RST_DONE_ENG_EVENT 338
+#define CMDQ_EVENT_MDP_WROT0_SW_RST_DONE_ENG_EVENT 339
+#define CMDQ_EVENT_MDP_RDMA3_SW_RST_DONE_ENG_EVENT 340
+#define CMDQ_EVENT_MDP_RDMA2_SW_RST_DONE_ENG_EVENT 341
+#define CMDQ_EVENT_MDP_RDMA1_SW_RST_DONE_ENG_EVENT 342
+#define CMDQ_EVENT_MDP_RDMA0_SW_RST_DONE_ENG_EVENT 343
+/* DISP */
+#define CMDQ_EVENT_DISP_OVL0_SOF 384
+#define CMDQ_EVENT_DISP_OVL0_2L_SOF 385
+#define CMDQ_EVENT_DISP_RDMA0_SOF 386
+#define CMDQ_EVENT_DISP_RSZ0_SOF 387
+#define CMDQ_EVENT_DISP_COLOR0_SOF 388
+#define CMDQ_EVENT_DISP_CCORR0_SOF 389
+#define CMDQ_EVENT_DISP_CCORR1_SOF 390
+#define CMDQ_EVENT_DISP_AAL0_SOF 391
+#define CMDQ_EVENT_DISP_GAMMA0_SOF 392
+#define CMDQ_EVENT_DISP_POSTMASK0_SOF 393
+#define CMDQ_EVENT_DISP_DITHER0_SOF 394
+#define CMDQ_EVENT_DISP_CM0_SOF 395
+#define CMDQ_EVENT_DISP_SPR0_SOF 396
+#define CMDQ_EVENT_DISP_DSC_WRAP0_SOF 397
+#define CMDQ_EVENT_DSI0_SOF 398
+#define CMDQ_EVENT_DISP_WDMA0_SOF 399
+#define CMDQ_EVENT_DISP_PWM0_SOF 400
+#define CMDQ_EVENT_DSI0_FRAME_DONE 410
+#define CMDQ_EVENT_DISP_WDMA0_FRAME_DONE 411
+#define CMDQ_EVENT_DISP_SPR0_FRAME_DONE 412
+#define CMDQ_EVENT_DISP_RSZ0_FRAME_DONE 413
+#define CMDQ_EVENT_DISP_RDMA0_FRAME_DONE 414
+#define CMDQ_EVENT_DISP_POSTMASK0_FRAME_DONE 415
+#define CMDQ_EVENT_DISP_OVL0_FRAME_DONE 416
+#define CMDQ_EVENT_DISP_OVL0_2L_FRAME_DONE 417
+#define CMDQ_EVENT_DISP_GAMMA0_FRAME_DONE 418
+#define CMDQ_EVENT_DISP_DSC_WRAP0_CORE0_FRAME_DONE 420
+#define CMDQ_EVENT_DISP_DITHER0_FRAME_DONE 421
+#define CMDQ_EVENT_DISP_COLOR0_FRAME_DONE 422
+#define CMDQ_EVENT_DISP_CM0_FRAME_DONE 423
+#define CMDQ_EVENT_DISP_CCORR1_FRAME_DONE 424
+#define CMDQ_EVENT_DISP_CCORR0_FRAME_DONE 425
+#define CMDQ_EVENT_DISP_AAL0_FRAME_DONE 426
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_0 434
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_1 435
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_2 436
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_3 437
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_4 438
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_5 439
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_6 440
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_7 441
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_8 442
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_9 443
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_10 444
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_11 445
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_12 446
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_13 447
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_14 448
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_15 449
+#define CMDQ_EVENT_DSI0_TE_ENG_EVENT 450
+#define CMDQ_EVENT_DSI0_IRQ_ENG_EVENT 451
+#define CMDQ_EVENT_DSI0_DONE_ENG_EVENT 452
+#define CMDQ_EVENT_DISP_WDMA0_SW_RST_DONE_ENG_EVENT 453
+#define CMDQ_EVENT_DISP_SMIASSERT_ENG_EVENT 454
+#define CMDQ_EVENT_DISP_POSTMASK0_RST_DONE_ENG_EVENT 455
+#define CMDQ_EVENT_DISP_OVL0_RST_DONE_ENG_EVENT 456
+#define CMDQ_EVENT_DISP_OVL0_2L_RST_DONE_ENG_EVENT 457
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_0 458
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_1 459
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_2 460
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_3 461
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_4 462
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_5 463
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_6 464
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_7 465
+#define CMDQ_EVENT_OUT_EVENT_0 898
+
+/* CMDQ sw tokens
+ * Following definitions are gce sw token which may use by clients
+ * event operation API.
+ * Note that token 512 to 639 may set secure
+ */
+
+/* end of hw event and begin of sw token */
+#define CMDQ_MAX_HW_EVENT 512
+
+/* Config thread notify trigger thread */
+#define CMDQ_SYNC_TOKEN_CONFIG_DIRTY 640
+/* Trigger thread notify config thread */
+#define CMDQ_SYNC_TOKEN_STREAM_EOF 641
+/* Block Trigger thread until the ESD check finishes. */
+#define CMDQ_SYNC_TOKEN_ESD_EOF 642
+#define CMDQ_SYNC_TOKEN_STREAM_BLOCK 643
+/* check CABC setup finish */
+#define CMDQ_SYNC_TOKEN_CABC_EOF 644
+
+/* Notify normal CMDQ there are some secure task done
+ * MUST NOT CHANGE, this token sync with secure world
+ */
+#define CMDQ_SYNC_SECURE_THR_EOF 647
+
+/* CMDQ use sw token */
+#define CMDQ_SYNC_TOKEN_USER_0 649
+#define CMDQ_SYNC_TOKEN_USER_1 650
+#define CMDQ_SYNC_TOKEN_POLL_MONITOR 651
+#define CMDQ_SYNC_TOKEN_TPR_LOCK 652
+
+/* ISP sw token */
+#define CMDQ_SYNC_TOKEN_MSS 665
+#define CMDQ_SYNC_TOKEN_MSF 666
+
+/* DISP sw token */
+#define CMDQ_SYNC_TOKEN_SODI 671
+
+/* GPR access tokens (for register backup)
+ * There are 15 32-bit GPR, 3 GPR form a set
+ * (64-bit for address, 32-bit for value)
+ * MUST NOT CHANGE, these tokens sync with MDP
+ */
+#define CMDQ_SYNC_TOKEN_GPR_SET_0 700
+#define CMDQ_SYNC_TOKEN_GPR_SET_1 701
+#define CMDQ_SYNC_TOKEN_GPR_SET_2 702
+#define CMDQ_SYNC_TOKEN_GPR_SET_3 703
+#define CMDQ_SYNC_TOKEN_GPR_SET_4 704
+
+/* Resource lock event to control resource in GCE thread */
+#define CMDQ_SYNC_RESOURCE_WROT0 710
+#define CMDQ_SYNC_RESOURCE_WROT1 711
+
+/* event for gpr timer, used in sleep and poll with timeout */
+#define CMDQ_TOKEN_GPR_TIMER_R0 994
+#define CMDQ_TOKEN_GPR_TIMER_R1 995
+#define CMDQ_TOKEN_GPR_TIMER_R2 996
+#define CMDQ_TOKEN_GPR_TIMER_R3 997
+#define CMDQ_TOKEN_GPR_TIMER_R4 998
+#define CMDQ_TOKEN_GPR_TIMER_R5 999
+#define CMDQ_TOKEN_GPR_TIMER_R6 1000
+#define CMDQ_TOKEN_GPR_TIMER_R7 1001
+#define CMDQ_TOKEN_GPR_TIMER_R8 1002
+#define CMDQ_TOKEN_GPR_TIMER_R9 1003
+#define CMDQ_TOKEN_GPR_TIMER_R10 1004
+#define CMDQ_TOKEN_GPR_TIMER_R11 1005
+#define CMDQ_TOKEN_GPR_TIMER_R12 1006
+#define CMDQ_TOKEN_GPR_TIMER_R13 1007
+#define CMDQ_TOKEN_GPR_TIMER_R14 1008
+#define CMDQ_TOKEN_GPR_TIMER_R15 1009
+
+#define CMDQ_EVENT_MAX 0x3FF
+/* CMDQ sw tokens END */
+
+#endif
diff --git a/include/dt-bindings/gce/mt8192-gce.h b/include/dt-bindings/gce/mt8192-gce.h
new file mode 100644
index 000000000000..9e5a0eb040a0
--- /dev/null
+++ b/include/dt-bindings/gce/mt8192-gce.h
@@ -0,0 +1,335 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ * Author: Yongqiang Niu <yongqiang.niu@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_GCE_MT8192_H
+#define _DT_BINDINGS_GCE_MT8192_H
+
+/* assign timeout 0 also means default */
+#define CMDQ_NO_TIMEOUT 0xffffffff
+#define CMDQ_TIMEOUT_DEFAULT 1000
+
+/* GCE thread priority */
+#define CMDQ_THR_PRIO_LOWEST 0
+#define CMDQ_THR_PRIO_1 1
+#define CMDQ_THR_PRIO_2 2
+#define CMDQ_THR_PRIO_3 3
+#define CMDQ_THR_PRIO_4 4
+#define CMDQ_THR_PRIO_5 5
+#define CMDQ_THR_PRIO_6 6
+#define CMDQ_THR_PRIO_HIGHEST 7
+
+/* CPR count in 32bit register */
+#define GCE_CPR_COUNT 1312
+
+/* GCE subsys table */
+#define SUBSYS_1300XXXX 0
+#define SUBSYS_1400XXXX 1
+#define SUBSYS_1401XXXX 2
+#define SUBSYS_1402XXXX 3
+#define SUBSYS_1502XXXX 4
+#define SUBSYS_1880XXXX 5
+#define SUBSYS_1881XXXX 6
+#define SUBSYS_1882XXXX 7
+#define SUBSYS_1883XXXX 8
+#define SUBSYS_1884XXXX 9
+#define SUBSYS_1000XXXX 10
+#define SUBSYS_1001XXXX 11
+#define SUBSYS_1002XXXX 12
+#define SUBSYS_1003XXXX 13
+#define SUBSYS_1004XXXX 14
+#define SUBSYS_1005XXXX 15
+#define SUBSYS_1020XXXX 16
+#define SUBSYS_1028XXXX 17
+#define SUBSYS_1700XXXX 18
+#define SUBSYS_1701XXXX 19
+#define SUBSYS_1702XXXX 20
+#define SUBSYS_1703XXXX 21
+#define SUBSYS_1800XXXX 22
+#define SUBSYS_1801XXXX 23
+#define SUBSYS_1802XXXX 24
+#define SUBSYS_1804XXXX 25
+#define SUBSYS_1805XXXX 26
+#define SUBSYS_1808XXXX 27
+#define SUBSYS_180aXXXX 28
+#define SUBSYS_180bXXXX 29
+
+#define CMDQ_EVENT_VDEC_LAT_SOF_0 0
+#define CMDQ_EVENT_VDEC_LAT_FRAME_DONE_0 1
+#define CMDQ_EVENT_VDEC_LAT_FRAME_DONE_1 2
+#define CMDQ_EVENT_VDEC_LAT_FRAME_DONE_2 3
+#define CMDQ_EVENT_VDEC_LAT_FRAME_DONE_3 4
+#define CMDQ_EVENT_VDEC_LAT_FRAME_DONE_4 5
+#define CMDQ_EVENT_VDEC_LAT_FRAME_DONE_5 6
+#define CMDQ_EVENT_VDEC_LAT_FRAME_DONE_6 7
+#define CMDQ_EVENT_VDEC_LAT_ENG_EVENT_0 8
+#define CMDQ_EVENT_VDEC_LAT_ENG_EVENT_1 9
+#define CMDQ_EVENT_VDEC_LAT_ENG_EVENT_2 10
+#define CMDQ_EVENT_VDEC_LAT_ENG_EVENT_3 11
+#define CMDQ_EVENT_VDEC_LAT_ENG_EVENT_4 12
+#define CMDQ_EVENT_VDEC_LAT_ENG_EVENT_5 13
+#define CMDQ_EVENT_VDEC_LAT_ENG_EVENT_6 14
+#define CMDQ_EVENT_VDEC_LAT_ENG_EVENT_7 15
+
+#define CMDQ_EVENT_ISP_FRAME_DONE_A 65
+#define CMDQ_EVENT_ISP_FRAME_DONE_B 66
+#define CMDQ_EVENT_ISP_FRAME_DONE_C 67
+#define CMDQ_EVENT_CAMSV0_PASS1_DONE 68
+#define CMDQ_EVENT_CAMSV02_PASS1_DONE 69
+#define CMDQ_EVENT_CAMSV1_PASS1_DONE 70
+#define CMDQ_EVENT_CAMSV2_PASS1_DONE 71
+#define CMDQ_EVENT_CAMSV3_PASS1_DONE 72
+#define CMDQ_EVENT_MRAW_0_PASS1_DONE 73
+#define CMDQ_EVENT_MRAW_1_PASS1_DONE 74
+#define CMDQ_EVENT_SENINF_CAM0_FIFO_FULL 75
+#define CMDQ_EVENT_SENINF_CAM1_FIFO_FULL 76
+#define CMDQ_EVENT_SENINF_CAM2_FIFO_FULL 77
+#define CMDQ_EVENT_SENINF_CAM3_FIFO_FULL 78
+#define CMDQ_EVENT_SENINF_CAM4_FIFO_FULL 79
+#define CMDQ_EVENT_SENINF_CAM5_FIFO_FULL 80
+#define CMDQ_EVENT_SENINF_CAM6_FIFO_FULL 81
+#define CMDQ_EVENT_SENINF_CAM7_FIFO_FULL 82
+#define CMDQ_EVENT_SENINF_CAM8_FIFO_FULL 83
+#define CMDQ_EVENT_SENINF_CAM9_FIFO_FULL 84
+#define CMDQ_EVENT_SENINF_CAM10_FIFO_FULL 85
+#define CMDQ_EVENT_SENINF_CAM11_FIFO_FULL 86
+#define CMDQ_EVENT_SENINF_CAM12_FIFO_FULL 87
+#define CMDQ_EVENT_TG_OVRUN_A_INT 88
+#define CMDQ_EVENT_DMA_R1_ERROR_A_INT 89
+#define CMDQ_EVENT_TG_OVRUN_B_INT 90
+#define CMDQ_EVENT_DMA_R1_ERROR_B_INT 91
+#define CMDQ_EVENT_TG_OVRUN_C_INT 92
+#define CMDQ_EVENT_DMA_R1_ERROR_C_INT 93
+#define CMDQ_EVENT_TG_OVRUN_M0_INT 94
+#define CMDQ_EVENT_DMA_R1_ERROR_M0_INT 95
+#define CMDQ_EVENT_TG_GRABERR_M0_INT 96
+#define CMDQ_EVENT_TG_GRABERR_M1_INT 97
+#define CMDQ_EVENT_TG_GRABERR_A_INT 98
+#define CMDQ_EVENT_CQ_VR_SNAP_A_INT 99
+#define CMDQ_EVENT_TG_GRABERR_B_INT 100
+#define CMDQ_EVENT_CQ_VR_SNAP_B_INT 101
+#define CMDQ_EVENT_TG_GRABERR_C_INT 102
+#define CMDQ_EVENT_CQ_VR_SNAP_C_INT 103
+
+#define CMDQ_EVENT_VENC_CMDQ_FRAME_DONE 129
+#define CMDQ_EVENT_VENC_CMDQ_PAUSE_DONE 130
+#define CMDQ_EVENT_JPGENC_CMDQ_DONE 131
+#define CMDQ_EVENT_VENC_CMDQ_MB_DONE 132
+#define CMDQ_EVENT_VENC_CMDQ_128BYTE_CNT_DONE 133
+#define CMDQ_EVENT_VENC_C0_CMDQ_WP_2ND_STAGE_DONE 134
+#define CMDQ_EVENT_VENC_C0_CMDQ_WP_3RD_STAGE_DONE 135
+#define CMDQ_EVENT_VENC_CMDQ_PPS_DONE 136
+#define CMDQ_EVENT_VENC_CMDQ_SPS_DONE 137
+#define CMDQ_EVENT_VENC_CMDQ_VPS_DONE 138
+
+#define CMDQ_EVENT_VDEC_CORE0_SOF_0 160
+#define CMDQ_EVENT_VDEC_CORE0_FRAME_DONE_0 161
+#define CMDQ_EVENT_VDEC_CORE0_FRAME_DONE_1 162
+#define CMDQ_EVENT_VDEC_CORE0_FRAME_DONE_2 163
+#define CMDQ_EVENT_VDEC_CORE0_FRAME_DONE_3 164
+#define CMDQ_EVENT_VDEC_CORE0_FRAME_DONE_4 165
+#define CMDQ_EVENT_VDEC_CORE0_FRAME_DONE_5 166
+#define CMDQ_EVENT_VDEC_CORE0_FRAME_DONE_6 167
+#define CMDQ_EVENT_VDEC_CORE0_ENG_EVENT_0 168
+#define CMDQ_EVENT_VDEC_CORE0_ENG_EVENT_1 169
+#define CMDQ_EVENT_VDEC_CORE0_ENG_EVENT_2 170
+#define CMDQ_EVENT_VDEC_CORE0_ENG_EVENT_3 171
+#define CMDQ_EVENT_VDEC_CORE0_ENG_EVENT_4 172
+#define CMDQ_EVENT_VDEC_CORE0_ENG_EVENT_5 173
+#define CMDQ_EVENT_VDEC_CORE0_ENG_EVENT_6 174
+#define CMDQ_EVENT_VDEC_CORE0_ENG_EVENT_7 175
+#define CMDQ_EVENT_FDVT_DONE 177
+#define CMDQ_EVENT_FE_DONE 178
+#define CMDQ_EVENT_RSC_DONE 179
+#define CMDQ_EVENT_DVS_DONE_ASYNC_SHOT 180
+#define CMDQ_EVENT_DVP_DONE_ASYNC_SHOT 181
+
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_0 193
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_1 194
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_2 195
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_3 196
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_4 197
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_5 198
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_6 199
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_7 200
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_8 201
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_9 202
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_10 203
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_11 204
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_12 205
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_13 206
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_14 207
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_15 208
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_16 209
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_17 210
+#define CMDQ_EVENT_IMG2_DIP_FRAME_DONE_P2_18 211
+#define CMDQ_EVENT_IMG2_DIP_DMA_ERR_EVENT 212
+#define CMDQ_EVENT_IMG2_AMD_FRAME_DONE 213
+#define CMDQ_EVENT_IMG2_MFB_DONE_LINK_MISC 214
+#define CMDQ_EVENT_IMG2_WPE_A_DONE_LINK_MISC 215
+#define CMDQ_EVENT_IMG2_MSS_DONE_LINK_MISC 216
+
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_0 225
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_1 226
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_2 227
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_3 228
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_4 229
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_5 230
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_6 231
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_7 232
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_8 233
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_9 234
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_10 235
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_11 236
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_12 237
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_13 238
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_14 239
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_15 240
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_16 241
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_17 242
+#define CMDQ_EVENT_IMG1_DIP_FRAME_DONE_P2_18 243
+#define CMDQ_EVENT_IMG1_DIP_DMA_ERR_EVENT 244
+#define CMDQ_EVENT_IMG1_AMD_FRAME_DONE 245
+#define CMDQ_EVENT_IMG1_MFB_DONE_LINK_MISC 246
+#define CMDQ_EVENT_IMG1_WPE_A_DONE_LINK_MISC 247
+#define CMDQ_EVENT_IMG1_MSS_DONE_LINK_MISC 248
+
+#define CMDQ_EVENT_MDP_RDMA0_SOF 256
+#define CMDQ_EVENT_MDP_RDMA1_SOF 257
+#define CMDQ_EVENT_MDP_AAL0_SOF 258
+#define CMDQ_EVENT_MDP_AAL1_SOF 259
+#define CMDQ_EVENT_MDP_HDR0_SOF 260
+#define CMDQ_EVENT_MDP_HDR1_SOF 261
+#define CMDQ_EVENT_MDP_RSZ0_SOF 262
+#define CMDQ_EVENT_MDP_RSZ1_SOF 263
+#define CMDQ_EVENT_MDP_WROT0_SOF 264
+#define CMDQ_EVENT_MDP_WROT1_SOF 265
+#define CMDQ_EVENT_MDP_TDSHP0_SOF 266
+#define CMDQ_EVENT_MDP_TDSHP1_SOF 267
+#define CMDQ_EVENT_IMG_DL_RELAY0_SOF 268
+#define CMDQ_EVENT_IMG_DL_RELAY1_SOF 269
+#define CMDQ_EVENT_MDP_COLOR0_SOF 270
+#define CMDQ_EVENT_MDP_COLOR1_SOF 271
+#define CMDQ_EVENT_MDP_WROT1_FRAME_DONE 290
+#define CMDQ_EVENT_MDP_WROT0_FRAME_DONE 291
+#define CMDQ_EVENT_MDP_TDSHP1_FRAME_DONE 294
+#define CMDQ_EVENT_MDP_TDSHP0_FRAME_DONE 295
+#define CMDQ_EVENT_MDP_RSZ1_FRAME_DONE 302
+#define CMDQ_EVENT_MDP_RSZ0_FRAME_DONE 303
+#define CMDQ_EVENT_MDP_RDMA1_FRAME_DONE 306
+#define CMDQ_EVENT_MDP_RDMA0_FRAME_DONE 307
+#define CMDQ_EVENT_MDP_HDR1_FRAME_DONE 308
+#define CMDQ_EVENT_MDP_HDR0_FRAME_DONE 309
+#define CMDQ_EVENT_MDP_COLOR1_FRAME_DONE 312
+#define CMDQ_EVENT_MDP_COLOR0_FRAME_DONE 313
+#define CMDQ_EVENT_MDP_AAL1_FRAME_DONE 316
+#define CMDQ_EVENT_MDP_AAL0_FRAME_DONE 317
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_0 320
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_1 321
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_2 322
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_3 323
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_4 324
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_5 325
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_6 326
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_7 327
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_8 328
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_9 329
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_10 330
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_11 331
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_12 332
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_13 333
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_14 334
+#define CMDQ_EVENT_MDP_STREAM_DONE_ENG_EVENT_15 335
+#define CMDQ_EVENT_MDP_WROT1_SW_RST_DONE_ENG_EVENT 338
+#define CMDQ_EVENT_MDP_WROT0_SW_RST_DONE_ENG_EVENT 339
+#define CMDQ_EVENT_MDP_RDMA1_SW_RST_DONE_ENG_EVENT 342
+#define CMDQ_EVENT_MDP_RDMA0_SW_RST_DONE_ENG_EVENT 343
+
+#define CMDQ_EVENT_DISP_OVL0_SOF 384
+#define CMDQ_EVENT_DISP_OVL0_2L_SOF 385
+#define CMDQ_EVENT_DISP_RDMA0_SOF 386
+#define CMDQ_EVENT_DISP_RSZ0_SOF 387
+#define CMDQ_EVENT_DISP_COLOR0_SOF 388
+#define CMDQ_EVENT_DISP_CCORR0_SOF 389
+#define CMDQ_EVENT_DISP_AAL0_SOF 390
+#define CMDQ_EVENT_DISP_GAMMA0_SOF 391
+#define CMDQ_EVENT_DISP_POSTMASK0_SOF 392
+#define CMDQ_EVENT_DISP_DITHER0_SOF 393
+#define CMDQ_EVENT_DISP_DSC_WRAP0_CORE0_SOF 394
+#define CMDQ_EVENT_DISP_DSC_WRAP0_CORE1_SOF 395
+#define CMDQ_EVENT_DSI0_SOF 396
+#define CMDQ_EVENT_DISP_WDMA0_SOF 397
+#define CMDQ_EVENT_DISP_UFBC_WDMA0_SOF 398
+#define CMDQ_EVENT_DISP_PWM0_SOF 399
+#define CMDQ_EVENT_DISP_OVL2_2L_SOF 400
+#define CMDQ_EVENT_DISP_RDMA4_SOF 401
+#define CMDQ_EVENT_DISP_DPI0_SOF 402
+#define CMDQ_EVENT_MDP_RDMA4_SOF 403
+#define CMDQ_EVENT_MDP_HDR4_SOF 404
+#define CMDQ_EVENT_MDP_RSZ4_SOF 405
+#define CMDQ_EVENT_MDP_AAL4_SOF 406
+#define CMDQ_EVENT_MDP_TDSHP4_SOF 407
+#define CMDQ_EVENT_MDP_COLOR4_SOF 408
+#define CMDQ_EVENT_DISP_Y2R0_SOF 409
+#define CMDQ_EVENT_MDP_TDSHP4_FRAME_DONE 410
+#define CMDQ_EVENT_MDP_RSZ4_FRAME_DONE 411
+#define CMDQ_EVENT_MDP_RDMA4_FRAME_DONE 412
+#define CMDQ_EVENT_MDP_HDR4_FRAME_DONE 413
+#define CMDQ_EVENT_MDP_COLOR4_FRAME_DONE 414
+#define CMDQ_EVENT_MDP_AAL4_FRAME_DONE 415
+#define CMDQ_EVENT_DSI0_FRAME_DONE 416
+#define CMDQ_EVENT_DISP_WDMA0_FRAME_DONE 417
+#define CMDQ_EVENT_DISP_UFBC_WDMA0_FRAME_DONE 418
+#define CMDQ_EVENT_DISP_RSZ0_FRAME_DONE 419
+#define CMDQ_EVENT_DISP_RDMA4_FRAME_DONE 420
+#define CMDQ_EVENT_DISP_RDMA0_FRAME_DONE 421
+#define CMDQ_EVENT_DISP_POSTMASK0_FRAME_DONE 422
+#define CMDQ_EVENT_DISP_OVL2_2L_FRAME_DONE 423
+#define CMDQ_EVENT_DISP_OVL0_FRAME_DONE 424
+#define CMDQ_EVENT_DISP_OVL0_2L_FRAME_DONE 425
+#define CMDQ_EVENT_DISP_GAMMA0_FRAME_DONE 426
+#define CMDQ_EVENT_DISP_DSC_WRAP0_CORE1_FRAME_DONE 427
+#define CMDQ_EVENT_DISP_DSC_WRAP0_CORE0_FRAME_DONE 428
+#define CMDQ_EVENT_DISP_DPI0_FRAME_DONE 429
+#define CMDQ_EVENT_DISP_DITHER0_FRAME_DONE 430
+#define CMDQ_EVENT_DISP_COLOR0_FRAME_DONE 431
+#define CMDQ_EVENT_DISP_CCORR0_FRAME_DONE 432
+#define CMDQ_EVENT_DISP_AAL0_FRAME_DONE 433
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_0 434
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_1 435
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_2 436
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_3 437
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_4 438
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_5 439
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_6 440
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_7 441
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_8 442
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_9 443
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_10 444
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_11 445
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_12 446
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_13 447
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_14 448
+#define CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_15 449
+#define CMDQ_EVENT_DSI0_TE_ENG_EVENT 450
+#define CMDQ_EVENT_DSI0_IRQ_ENG_EVENT 451
+#define CMDQ_EVENT_DSI0_DONE_ENG_EVENT 452
+#define CMDQ_EVENT_DISP_WDMA0_SW_RST_DONE_ENG_EVENT 453
+#define CMDQ_EVENT_DISP_SMIASSERT_ENG_EVENT 454
+#define CMDQ_EVENT_DISP_POSTMASK0_RST_DONE_ENG_EVENT 455
+#define CMDQ_EVENT_DISP_OVL2_2L_RST_DONE_ENG_EVENT 456
+#define CMDQ_EVENT_DISP_OVL0_RST_DONE_ENG_EVENT 457
+#define CMDQ_EVENT_DISP_OVL0_2L_RST_DONE_ENG_EVENT 458
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_0 459
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_1 460
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_2 461
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_3 462
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_4 463
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_5 464
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_6 465
+#define CMDQ_EVENT_BUF_UNDERRUN_ENG_EVENT_7 466
+#define CMDQ_MAX_HW_EVENT 512
+
+#endif
diff --git a/include/dt-bindings/gce/mt8195-gce.h b/include/dt-bindings/gce/mt8195-gce.h
new file mode 100644
index 000000000000..dcfb302b8a5b
--- /dev/null
+++ b/include/dt-bindings/gce/mt8195-gce.h
@@ -0,0 +1,812 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Jason-JH Lin <jason0jh.lin@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_GCE_MT8195_H
+#define _DT_BINDINGS_GCE_MT8195_H
+
+/* assign timeout 0 also means default */
+#define CMDQ_NO_TIMEOUT 0xffffffff
+#define CMDQ_TIMEOUT_DEFAULT 1000
+
+/* GCE thread priority */
+#define CMDQ_THR_PRIO_LOWEST 0
+#define CMDQ_THR_PRIO_1 1
+#define CMDQ_THR_PRIO_2 2
+#define CMDQ_THR_PRIO_3 3
+#define CMDQ_THR_PRIO_4 4
+#define CMDQ_THR_PRIO_5 5
+#define CMDQ_THR_PRIO_6 6
+#define CMDQ_THR_PRIO_HIGHEST 7
+
+/* CPR count in 32bit register */
+#define GCE_CPR_COUNT 1312
+
+/* GCE subsys table */
+#define SUBSYS_1400XXXX 0
+#define SUBSYS_1401XXXX 1
+#define SUBSYS_1402XXXX 2
+#define SUBSYS_1c00XXXX 3
+#define SUBSYS_1c01XXXX 4
+#define SUBSYS_1c02XXXX 5
+#define SUBSYS_1c10XXXX 6
+#define SUBSYS_1c11XXXX 7
+#define SUBSYS_1c12XXXX 8
+#define SUBSYS_14f0XXXX 9
+#define SUBSYS_14f1XXXX 10
+#define SUBSYS_14f2XXXX 11
+#define SUBSYS_1800XXXX 12
+#define SUBSYS_1801XXXX 13
+#define SUBSYS_1802XXXX 14
+#define SUBSYS_1803XXXX 15
+#define SUBSYS_1032XXXX 16
+#define SUBSYS_1033XXXX 17
+#define SUBSYS_1600XXXX 18
+#define SUBSYS_1601XXXX 19
+#define SUBSYS_14e0XXXX 20
+#define SUBSYS_1c20XXXX 21
+#define SUBSYS_1c30XXXX 22
+#define SUBSYS_1c40XXXX 23
+#define SUBSYS_1c50XXXX 24
+#define SUBSYS_1c60XXXX 25
+
+/* GCE General Purpose Register (GPR) support */
+#define GCE_GPR_R00 0x0
+#define GCE_GPR_R01 0x1
+#define GCE_GPR_R02 0x2
+#define GCE_GPR_R03 0x3
+#define GCE_GPR_R04 0x4
+#define GCE_GPR_R05 0x5
+#define GCE_GPR_R06 0x6
+#define GCE_GPR_R07 0x7
+#define GCE_GPR_R08 0x8
+#define GCE_GPR_R09 0x9
+#define GCE_GPR_R10 0xa
+#define GCE_GPR_R11 0xb
+#define GCE_GPR_R12 0xc
+#define GCE_GPR_R13 0xd
+#define GCE_GPR_R14 0xe
+#define GCE_GPR_R15 0xf
+
+/* GCE hw event id */
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_0 1
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_1 2
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_2 3
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_3 4
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_4 5
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_5 6
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_6 7
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_7 8
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_8 9
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_9 10
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_10 11
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_11 12
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_12 13
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_13 14
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW0_14 15
+#define CMDQ_EVENT_TRAW0_DMA_ERROR_INT 16
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_0 17
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_1 18
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_2 19
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_3 20
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_4 21
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_5 22
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_6 23
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_7 24
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_8 25
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_9 26
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_10 27
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_11 28
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_12 29
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_13 30
+#define CMDQ_EVENT_CQ_THR_DONE_TRAW1_14 31
+#define CMDQ_EVENT_TRAW1_DMA_ERROR_INT 32
+
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_0 65
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_1 66
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_2 67
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_3 68
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_4 69
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_5 70
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_6 71
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_7 72
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_8 73
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_9 74
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_10 75
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_11 76
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_12 77
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_13 78
+#define CMDQ_EVENT_DIP0_FRAME_DONE_P2_14 79
+#define CMDQ_EVENT_DIP0_DMA_ERR 80
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_0 81
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_1 82
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_2 83
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_3 84
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_4 85
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_5 86
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_6 87
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_7 88
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_8 89
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_9 90
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_10 91
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_11 92
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_12 93
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_13 94
+#define CMDQ_EVENT_PQA0_FRAME_DONE_P2_14 95
+#define CMDQ_EVENT_PQA0_DMA_ERR 96
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_0 97
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_1 98
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_2 99
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_3 100
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_4 101
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_5 102
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_6 103
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_7 104
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_8 105
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_9 106
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_10 107
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_11 108
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_12 109
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_13 110
+#define CMDQ_EVENT_PQB0_FRAME_DONE_P2_14 111
+#define CMDQ_EVENT_PQB0_DMA_ERR 112
+#define CMDQ_EVENT_DIP0_DUMMY_0 113
+#define CMDQ_EVENT_DIP0_DUMMY_1 114
+#define CMDQ_EVENT_DIP0_DUMMY_2 115
+#define CMDQ_EVENT_DIP0_DUMMY_3 116
+#define CMDQ_EVENT_WPE0_EIS_GCE_FRAME_DONE 117
+#define CMDQ_EVENT_WPE0_EIS_DONE_SYNC_OUT 118
+#define CMDQ_EVENT_WPE0_TNR_GCE_FRAME_DONE 119
+#define CMDQ_EVENT_WPE0_TNR_DONE_SYNC_OUT 120
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_0 121
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_1 122
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_2 123
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_3 124
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_4 125
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_5 126
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_6 127
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_7 128
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_8 129
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_9 130
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_10 131
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_11 132
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_12 133
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_13 134
+#define CMDQ_EVENT_WPE0_EIS_FRAME_DONE_P2_14 135
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_0 136
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_1 137
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_2 138
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_3 139
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_4 140
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_5 141
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_6 142
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_7 143
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_8 144
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_9 145
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_10 146
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_11 147
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_12 148
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_13 149
+#define CMDQ_EVENT_WPE0_TNR_FRAME_DONE_P2_14 150
+#define CMDQ_EVENT_WPE0_DUMMY_0 151
+#define CMDQ_EVENT_IMGSYS_IPE_DUMMY 152
+#define CMDQ_EVENT_IMGSYS_IPE_FDVT_DONE 153
+#define CMDQ_EVENT_IMGSYS_IPE_ME_DONE 154
+#define CMDQ_EVENT_IMGSYS_IPE_DVS_DONE 155
+#define CMDQ_EVENT_IMGSYS_IPE_DVP_DONE 156
+
+#define CMDQ_EVENT_TPR_0 194
+#define CMDQ_EVENT_TPR_1 195
+#define CMDQ_EVENT_TPR_2 196
+#define CMDQ_EVENT_TPR_3 197
+#define CMDQ_EVENT_TPR_4 198
+#define CMDQ_EVENT_TPR_5 199
+#define CMDQ_EVENT_TPR_6 200
+#define CMDQ_EVENT_TPR_7 201
+#define CMDQ_EVENT_TPR_8 202
+#define CMDQ_EVENT_TPR_9 203
+#define CMDQ_EVENT_TPR_10 204
+#define CMDQ_EVENT_TPR_11 205
+#define CMDQ_EVENT_TPR_12 206
+#define CMDQ_EVENT_TPR_13 207
+#define CMDQ_EVENT_TPR_14 208
+#define CMDQ_EVENT_TPR_15 209
+#define CMDQ_EVENT_TPR_16 210
+#define CMDQ_EVENT_TPR_17 211
+#define CMDQ_EVENT_TPR_18 212
+#define CMDQ_EVENT_TPR_19 213
+#define CMDQ_EVENT_TPR_20 214
+#define CMDQ_EVENT_TPR_21 215
+#define CMDQ_EVENT_TPR_22 216
+#define CMDQ_EVENT_TPR_23 217
+#define CMDQ_EVENT_TPR_24 218
+#define CMDQ_EVENT_TPR_25 219
+#define CMDQ_EVENT_TPR_26 220
+#define CMDQ_EVENT_TPR_27 221
+#define CMDQ_EVENT_TPR_28 222
+#define CMDQ_EVENT_TPR_29 223
+#define CMDQ_EVENT_TPR_30 224
+#define CMDQ_EVENT_TPR_31 225
+#define CMDQ_EVENT_TPR_TIMEOUT_0 226
+#define CMDQ_EVENT_TPR_TIMEOUT_1 227
+#define CMDQ_EVENT_TPR_TIMEOUT_2 228
+#define CMDQ_EVENT_TPR_TIMEOUT_3 229
+#define CMDQ_EVENT_TPR_TIMEOUT_4 230
+#define CMDQ_EVENT_TPR_TIMEOUT_5 231
+#define CMDQ_EVENT_TPR_TIMEOUT_6 232
+#define CMDQ_EVENT_TPR_TIMEOUT_7 233
+#define CMDQ_EVENT_TPR_TIMEOUT_8 234
+#define CMDQ_EVENT_TPR_TIMEOUT_9 235
+#define CMDQ_EVENT_TPR_TIMEOUT_10 236
+#define CMDQ_EVENT_TPR_TIMEOUT_11 237
+#define CMDQ_EVENT_TPR_TIMEOUT_12 238
+#define CMDQ_EVENT_TPR_TIMEOUT_13 239
+#define CMDQ_EVENT_TPR_TIMEOUT_14 240
+#define CMDQ_EVENT_TPR_TIMEOUT_15 241
+
+#define CMDQ_EVENT_VPP0_MDP_RDMA_SOF 256
+#define CMDQ_EVENT_VPP0_MDP_FG_SOF 257
+#define CMDQ_EVENT_VPP0_STITCH_SOF 258
+#define CMDQ_EVENT_VPP0_MDP_HDR_SOF 259
+#define CMDQ_EVENT_VPP0_MDP_AAL_SOF 260
+#define CMDQ_EVENT_VPP0_MDP_RSZ_IN_RSZ_SOF 261
+#define CMDQ_EVENT_VPP0_MDP_TDSHP_SOF 262
+#define CMDQ_EVENT_VPP0_DISP_COLOR_SOF 263
+#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_SOF 264
+#define CMDQ_EVENT_VPP0_VPP_PADDING_IN_PADDING_SOF 265
+#define CMDQ_EVENT_VPP0_MDP_TCC_IN_SOF 266
+#define CMDQ_EVENT_VPP0_MDP_WROT_SOF 267
+
+#define CMDQ_EVENT_VPP0_WARP0_MMSYS_TOP_RELAY_SOF_PRE 269
+#define CMDQ_EVENT_VPP0_WARP1_MMSYS_TOP_RELAY_SOF_PRE 270
+#define CMDQ_EVENT_VPP0_VPP1_MMSYS_TOP_RELAY_SOF 271
+#define CMDQ_EVENT_VPP0_VPP1_IN_MMSYS_TOP_RELAY_SOF_PRE 272
+
+#define CMDQ_EVENT_VPP0_MDP_RDMA_FRAME_DONE 288
+#define CMDQ_EVENT_VPP0_MDP_FG_TILE_DONE 289
+#define CMDQ_EVENT_VPP0_STITCH_FRAME_DONE 290
+#define CMDQ_EVENT_VPP0_MDP_HDR_FRAME_DONE 291
+#define CMDQ_EVENT_VPP0_MDP_AAL_FRAME_DONE 292
+#define CMDQ_EVENT_VPP0_MDP_RSZ_FRAME_DONE 293
+#define CMDQ_EVENT_VPP0_MDP_TDSHP_FRAME_DONE 294
+#define CMDQ_EVENT_VPP0_DISP_COLOR_FRAME_DONE 295
+#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_FRAME_DONE 296
+#define CMDQ_EVENT_VPP0_VPP_PADDING_IN_PADDING_FRAME_DONE 297
+#define CMDQ_EVENT_VPP0_MDP_TCC_TCC_FRAME_DONE 298
+#define CMDQ_EVENT_VPP0_MDP_WROT_VIDO_WDONE 299
+
+#define CMDQ_EVENT_VPP0_STREAM_DONE_0 320
+#define CMDQ_EVENT_VPP0_STREAM_DONE_1 321
+#define CMDQ_EVENT_VPP0_STREAM_DONE_2 322
+#define CMDQ_EVENT_VPP0_STREAM_DONE_3 323
+#define CMDQ_EVENT_VPP0_STREAM_DONE_4 324
+#define CMDQ_EVENT_VPP0_STREAM_DONE_5 325
+#define CMDQ_EVENT_VPP0_STREAM_DONE_6 326
+#define CMDQ_EVENT_VPP0_STREAM_DONE_7 327
+#define CMDQ_EVENT_VPP0_STREAM_DONE_8 328
+#define CMDQ_EVENT_VPP0_STREAM_DONE_9 329
+#define CMDQ_EVENT_VPP0_STREAM_DONE_10 330
+#define CMDQ_EVENT_VPP0_STREAM_DONE_11 331
+#define CMDQ_EVENT_VPP0_STREAM_DONE_12 332
+#define CMDQ_EVENT_VPP0_STREAM_DONE_13 333
+#define CMDQ_EVENT_VPP0_STREAM_DONE_14 334
+#define CMDQ_EVENT_VPP0_STREAM_DONE_15 335
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_0 336
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_1 337
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_2 338
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_3 339
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_4 340
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_5 341
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_6 342
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_7 343
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_8 344
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_9 345
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_10 346
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_11 347
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_12 348
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_13 349
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_14 350
+#define CMDQ_EVENT_VPP0_BUF_UNDERRUN_15 351
+#define CMDQ_EVENT_VPP0_MDP_RDMA_SW_RST_DONE 352
+#define CMDQ_EVENT_VPP0_MDP_RDMA_PM_VALID 353
+#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_FRAME_RESET_DONE_PULSE 354
+#define CMDQ_EVENT_VPP0_MDP_WROT_SW_RST_DONE 355
+
+#define CMDQ_EVENT_VPP1_HDMI_META_SOF 384
+#define CMDQ_EVENT_VPP1_DGI_SOF 385
+#define CMDQ_EVENT_VPP1_VPP_SPLIT_SOF 386
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_TCC_SOF 387
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_RDMA_SOF 388
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_RDMA_SOF 389
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_RDMA_SOF 390
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_FG_SOF 391
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_FG_SOF 392
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_FG_SOF 393
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_HDR_SOF 394
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_HDR_SOF 395
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_HDR_SOF 396
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_AAL_SOF 397
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_AAL_SOF 398
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_AAL_SOF 399
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_RSZ_SOF 400
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_RSZ_SOF 401
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_RSZ_SOF 402
+#define CMDQ_EVENT_VPP1_SVPP1_TDSHP_SOF 403
+#define CMDQ_EVENT_VPP1_SVPP2_TDSHP_SOF 404
+#define CMDQ_EVENT_VPP1_SVPP3_TDSHP_SOF 405
+#define CMDQ_EVENT_VPP1_SVPP2_VPP_MERGE_SOF 406
+#define CMDQ_EVENT_VPP1_SVPP3_VPP_MERGE_SOF 407
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_COLOR_SOF 408
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_COLOR_SOF 409
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_COLOR_SOF 410
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_OVL_SOF 411
+#define CMDQ_EVENT_VPP1_SVPP1_VPP_PAD_SOF 412
+#define CMDQ_EVENT_VPP1_SVPP2_VPP_PAD_SOF 413
+#define CMDQ_EVENT_VPP1_SVPP3_VPP_PAD_SOF 414
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_WROT_SOF 415
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_WROT_SOF 416
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_WROT_SOF 417
+#define CMDQ_EVENT_VPP1_VPP0_DL_IRLY_SOF 418
+#define CMDQ_EVENT_VPP1_VPP0_DL_ORLY_SOF 419
+#define CMDQ_EVENT_VPP1_VDO0_DL_ORLY_0_SOF 420
+#define CMDQ_EVENT_VPP1_VDO0_DL_ORLY_1_SOF 421
+#define CMDQ_EVENT_VPP1_VDO1_DL_ORLY_0_SOF 422
+#define CMDQ_EVENT_VPP1_VDO1_DL_ORLY_1_SOF 423
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_RDMA_FRAME_DONE 424
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_RDMA_FRAME_DONE 425
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_RDMA_FRAME_DONE 426
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_WROT_FRAME_DONE 427
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_WROT_FRAME_DONE 428
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_WROT_FRAME_DONE 429
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_OVL_FRAME_DONE 430
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_RSZ_FRAME_DONE 431
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_RSZ_FRAME_DONE 432
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_RSZ_FRAME_DONE 433
+#define CMDQ_EVENT_VPP1_FRAME_DONE_10 434
+#define CMDQ_EVENT_VPP1_FRAME_DONE_11 435
+#define CMDQ_EVENT_VPP1_FRAME_DONE_12 436
+#define CMDQ_EVENT_VPP1_FRAME_DONE_13 437
+#define CMDQ_EVENT_VPP1_FRAME_DONE_14 438
+#define CMDQ_EVENT_VPP1_STREAM_DONE_0 439
+#define CMDQ_EVENT_VPP1_STREAM_DONE_1 440
+#define CMDQ_EVENT_VPP1_STREAM_DONE_2 441
+#define CMDQ_EVENT_VPP1_STREAM_DONE_3 442
+#define CMDQ_EVENT_VPP1_STREAM_DONE_4 443
+#define CMDQ_EVENT_VPP1_STREAM_DONE_5 444
+#define CMDQ_EVENT_VPP1_STREAM_DONE_6 445
+#define CMDQ_EVENT_VPP1_STREAM_DONE_7 446
+#define CMDQ_EVENT_VPP1_STREAM_DONE_8 447
+#define CMDQ_EVENT_VPP1_STREAM_DONE_9 448
+#define CMDQ_EVENT_VPP1_STREAM_DONE_10 449
+#define CMDQ_EVENT_VPP1_STREAM_DONE_11 450
+#define CMDQ_EVENT_VPP1_STREAM_DONE_12 451
+#define CMDQ_EVENT_VPP1_STREAM_DONE_13 452
+#define CMDQ_EVENT_VPP1_STREAM_DONE_14 453
+#define CMDQ_EVENT_VPP1_STREAM_DONE_15 454
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_0 455
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_1 456
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_2 457
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_3 458
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_4 459
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_5 460
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_6 461
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_7 462
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_8 463
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_9 464
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_10 465
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_11 466
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_12 467
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_13 468
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_14 469
+#define CMDQ_EVENT_VPP1_MDP_BUF_UNDERRUN_15 470
+#define CMDQ_EVENT_VPP1_DGI_0 471
+#define CMDQ_EVENT_VPP1_DGI_1 472
+#define CMDQ_EVENT_VPP1_DGI_2 473
+#define CMDQ_EVENT_VPP1_DGI_3 474
+#define CMDQ_EVENT_VPP1_DGI_4 475
+#define CMDQ_EVENT_VPP1_DGI_5 476
+#define CMDQ_EVENT_VPP1_DGI_6 477
+#define CMDQ_EVENT_VPP1_DGI_7 478
+#define CMDQ_EVENT_VPP1_DGI_8 479
+#define CMDQ_EVENT_VPP1_DGI_9 480
+#define CMDQ_EVENT_VPP1_DGI_10 481
+#define CMDQ_EVENT_VPP1_DGI_11 482
+#define CMDQ_EVENT_VPP1_DGI_12 483
+#define CMDQ_EVENT_VPP1_DGI_13 484
+#define CMDQ_EVENT_VPP1_SVPP3_VPP_MERGE 485
+#define CMDQ_EVENT_VPP1_SVPP2_VPP_MERGE 486
+#define CMDQ_EVENT_VPP1_MDP_OVL_FRAME_RESET_DONE_PULSE 487
+#define CMDQ_EVENT_VPP1_VPP_SPLIT_DGI 488
+#define CMDQ_EVENT_VPP1_VPP_SPLIT_HDMI 489
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_WROT_SW_RST_DONE 490
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_WROT_SW_RST_DONE 491
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_WROT_SW_RST_DONE 492
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_FG_TILE_DONE 493
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_FG_TILE_DONE 494
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_FG_TILE_DONE 495
+
+#define CMDQ_EVENT_VDO0_DISP_OVL0_SOF 512
+#define CMDQ_EVENT_VDO0_DISP_WDMA0_SOF 513
+#define CMDQ_EVENT_VDO0_DISP_RDMA0_SOF 514
+#define CMDQ_EVENT_VDO0_DISP_COLOR0_SOF 515
+#define CMDQ_EVENT_VDO0_DISP_CCORR0_SOF 516
+#define CMDQ_EVENT_VDO0_DISP_AAL0_SOF 517
+#define CMDQ_EVENT_VDO0_DISP_GAMMA0_SOF 518
+#define CMDQ_EVENT_VDO0_DISP_DITHER0_SOF 519
+#define CMDQ_EVENT_VDO0_DSI0_SOF 520
+#define CMDQ_EVENT_VDO0_DSC_WRAP0C0_SOF 521
+#define CMDQ_EVENT_VDO0_DISP_OVL1_SOF 522
+#define CMDQ_EVENT_VDO0_DISP_WDMA1_SOF 523
+#define CMDQ_EVENT_VDO0_DISP_RDMA1_SOF 524
+#define CMDQ_EVENT_VDO0_DISP_COLOR1_SOF 525
+#define CMDQ_EVENT_VDO0_DISP_CCORR1_SOF 526
+#define CMDQ_EVENT_VDO0_DISP_AAL1_SOF 527
+#define CMDQ_EVENT_VDO0_DISP_GAMMA1_SOF 528
+#define CMDQ_EVENT_VDO0_DISP_DITHER1_SOF 529
+#define CMDQ_EVENT_VDO0_DSI1_SOF 530
+#define CMDQ_EVENT_VDO0_DSC_WRAP0C1_SOF 531
+#define CMDQ_EVENT_VDO0_VPP_MERGE0_SOF 532
+#define CMDQ_EVENT_VDO0_DP_INTF0_SOF 533
+#define CMDQ_EVENT_VDO0_VPP1_DL_RELAY0_SOF 534
+#define CMDQ_EVENT_VDO0_VPP1_DL_RELAY1_SOF 535
+#define CMDQ_EVENT_VDO0_VDO1_DL_RELAY2_SOF 536
+#define CMDQ_EVENT_VDO0_VDO0_DL_RELAY3_SOF 537
+#define CMDQ_EVENT_VDO0_VDO0_DL_RELAY4_SOF 538
+#define CMDQ_EVENT_VDO0_DISP_PWM0_SOF 539
+#define CMDQ_EVENT_VDO0_DISP_PWM1_SOF 540
+
+#define CMDQ_EVENT_VDO0_DISP_OVL0_FRAME_DONE 544
+#define CMDQ_EVENT_VDO0_DISP_WDMA0_FRAME_DONE 545
+#define CMDQ_EVENT_VDO0_DISP_RDMA0_FRAME_DONE 546
+#define CMDQ_EVENT_VDO0_DISP_COLOR0_FRAME_DONE 547
+#define CMDQ_EVENT_VDO0_DISP_CCORR0_FRAME_DONE 548
+#define CMDQ_EVENT_VDO0_DISP_AAL0_FRAME_DONE 549
+#define CMDQ_EVENT_VDO0_DISP_GAMMA0_FRAME_DONE 550
+#define CMDQ_EVENT_VDO0_DISP_DITHER0_FRAME_DONE 551
+#define CMDQ_EVENT_VDO0_DSI0_FRAME_DONE 552
+#define CMDQ_EVENT_VDO0_DSC_WRAP0C0_FRAME_DONE 553
+#define CMDQ_EVENT_VDO0_DISP_OVL1_FRAME_DONE 554
+#define CMDQ_EVENT_VDO0_DISP_WDMA1_FRAME_DONE 555
+#define CMDQ_EVENT_VDO0_DISP_RDMA1_FRAME_DONE 556
+#define CMDQ_EVENT_VDO0_DISP_COLOR1_FRAME_DONE 557
+#define CMDQ_EVENT_VDO0_DISP_CCORR1_FRAME_DONE 558
+#define CMDQ_EVENT_VDO0_DISP_AAL1_FRAME_DONE 559
+#define CMDQ_EVENT_VDO0_DISP_GAMMA1_FRAME_DONE 560
+#define CMDQ_EVENT_VDO0_DISP_DITHER1_FRAME_DONE 561
+#define CMDQ_EVENT_VDO0_DSI1_FRAME_DONE 562
+#define CMDQ_EVENT_VDO0_DSC_WRAP0C1_FRAME_DONE 563
+
+#define CMDQ_EVENT_VDO0_DP_INTF0_FRAME_DONE 565
+
+#define CMDQ_EVENT_VDO0_DISP_SMIASSERT_ENG 576
+#define CMDQ_EVENT_VDO0_DSI0_IRQ_ENG_EVENT_MM 577
+#define CMDQ_EVENT_VDO0_DSI0_TE_ENG_EVENT_MM 578
+#define CMDQ_EVENT_VDO0_DSI0_DONE_ENG_EVENT_MM 579
+#define CMDQ_EVENT_VDO0_DSI0_SOF_ENG_EVENT_MM 580
+#define CMDQ_EVENT_VDO0_DSI0_VACTL_ENG_EVENT_MM 581
+#define CMDQ_EVENT_VDO0_DSI1_IRQ_ENG_EVENT_MM 582
+#define CMDQ_EVENT_VDO0_DSI1_TE_ENG_EVENT_MM 583
+#define CMDQ_EVENT_VDO0_DSI1_DONE_ENG_EVENT_MM 584
+#define CMDQ_EVENT_VDO0_DSI1_SOF_ENG_EVENT_MM 585
+#define CMDQ_EVENT_VDO0_DSI1_VACTL_ENG_EVENT_MM 586
+#define CMDQ_EVENT_VDO0_DISP_WDMA0_SW_RST_DONE_ENG 587
+#define CMDQ_EVENT_VDO0_DISP_WDMA1_SW_RST_DONE_ENG 588
+#define CMDQ_EVENT_VDO0_DISP_OVL0_RST_DONE_ENG 589
+#define CMDQ_EVENT_VDO0_DISP_OVL1_RST_DONE_ENG 590
+#define CMDQ_EVENT_VDO0_DP_INTF0_VSYNC_START_ENG_EVENT_MM 591
+#define CMDQ_EVENT_VDO0_DP_INTF0_VSYNC_END_ENG_EVENT_MM 592
+#define CMDQ_EVENT_VDO0_DP_INTF0_VDE_START_ENG_EVENT_MM 593
+#define CMDQ_EVENT_VDO0_DP_INTF0_VDE_END_ENG_EVENT_MM 594
+#define CMDQ_EVENT_VDO0_DP_INTF0_TARGET_LINE_ENG_EVENT_MM 595
+#define CMDQ_EVENT_VDO0_VPP_MERGE0_ENG 596
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_0 597
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_1 598
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_2 599
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_3 600
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_4 601
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_5 602
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_6 603
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_7 604
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_8 605
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_9 606
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_10 607
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_11 608
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_12 609
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_13 610
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_14 611
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_15 612
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_0 613
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_1 614
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_2 615
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_3 616
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_4 617
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_5 618
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_6 619
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_7 620
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_8 621
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_9 622
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_10 623
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_11 624
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_12 625
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_13 626
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_14 627
+#define CMDQ_EVENT_VDO0_DISP_BUF_UNDERRUN_15 628
+
+#define CMDQ_EVENT_VDO1_MDP_RDMA0_SOF 640
+#define CMDQ_EVENT_VDO1_MDP_RDMA1_SOF 641
+#define CMDQ_EVENT_VDO1_MDP_RDMA2_SOF 642
+#define CMDQ_EVENT_VDO1_MDP_RDMA3_SOF 643
+#define CMDQ_EVENT_VDO1_MDP_RDMA4_SOF 644
+#define CMDQ_EVENT_VDO1_MDP_RDMA5_SOF 645
+#define CMDQ_EVENT_VDO1_MDP_RDMA6_SOF 646
+#define CMDQ_EVENT_VDO1_MDP_RDMA7_SOF 647
+#define CMDQ_EVENT_VDO1_VPP_MERGE0_SOF 648
+#define CMDQ_EVENT_VDO1_VPP_MERGE1_SOF 649
+#define CMDQ_EVENT_VDO1_VPP_MERGE2_SOF 650
+#define CMDQ_EVENT_VDO1_VPP_MERGE3_SOF 651
+#define CMDQ_EVENT_VDO1_VPP_MERGE4_SOF 652
+#define CMDQ_EVENT_VDO1_VPP2_DL_RELAY_SOF 653
+#define CMDQ_EVENT_VDO1_VPP3_DL_RELAY_SOF 654
+#define CMDQ_EVENT_VDO1_VDO0_DSC_DL_ASYNC_SOF 655
+#define CMDQ_EVENT_VDO1_VDO0_MERGE_DL_ASYNC_SOF 656
+#define CMDQ_EVENT_VDO1_OUT_DL_RELAY_SOF 657
+#define CMDQ_EVENT_VDO1_DISP_MIXER_SOF 658
+#define CMDQ_EVENT_VDO1_HDR_VDO_FE0_SOF 659
+#define CMDQ_EVENT_VDO1_HDR_VDO_FE1_SOF 660
+#define CMDQ_EVENT_VDO1_HDR_GFX_FE0_SOF 661
+#define CMDQ_EVENT_VDO1_HDR_GFX_FE1_SOF 662
+#define CMDQ_EVENT_VDO1_HDR_VDO_BE0_SOF 663
+#define CMDQ_EVENT_VDO1_HDR_MLOAD_SOF 664
+
+#define CMDQ_EVENT_VDO1_MDP_RDMA0_FRAME_DONE 672
+#define CMDQ_EVENT_VDO1_MDP_RDMA1_FRAME_DONE 673
+#define CMDQ_EVENT_VDO1_MDP_RDMA2_FRAME_DONE 674
+#define CMDQ_EVENT_VDO1_MDP_RDMA3_FRAME_DONE 675
+#define CMDQ_EVENT_VDO1_MDP_RDMA4_FRAME_DONE 676
+#define CMDQ_EVENT_VDO1_MDP_RDMA5_FRAME_DONE 677
+#define CMDQ_EVENT_VDO1_MDP_RDMA6_FRAME_DONE 678
+#define CMDQ_EVENT_VDO1_MDP_RDMA7_FRAME_DONE 679
+#define CMDQ_EVENT_VDO1_VPP_MERGE0_FRAME_DONE 680
+#define CMDQ_EVENT_VDO1_VPP_MERGE1_FRAME_DONE 681
+#define CMDQ_EVENT_VDO1_VPP_MERGE2_FRAME_DONE 682
+#define CMDQ_EVENT_VDO1_VPP_MERGE3_FRAME_DONE 683
+#define CMDQ_EVENT_VDO1_VPP_MERGE4_FRAME_DONE 684
+#define CMDQ_EVENT_VDO1_DPI0_FRAME_DONE 685
+#define CMDQ_EVENT_VDO1_DPI1_FRAME_DONE 686
+#define CMDQ_EVENT_VDO1_DP_INTF0_FRAME_DONE 687
+#define CMDQ_EVENT_VDO1_DISP_MIXER_FRAME_DONE_MM 688
+
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_0 704
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_1 705
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_2 706
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_3 707
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_4 708
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_5 709
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_6 710
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_7 711
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_8 712
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_9 713
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_10 714
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_11 715
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_12 716
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_13 717
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_14 718
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_15 719
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_0 720
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_1 721
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_2 722
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_3 723
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_4 724
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_5 725
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_6 726
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_7 727
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_8 728
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_9 729
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_10 730
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_11 731
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_12 732
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_13 733
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_14 734
+#define CMDQ_EVENT_VDO1_BUF_UNDERRUN_ENG_15 735
+#define CMDQ_EVENT_VDO1_MDP_RDMA0_SW_RST_DONE 736
+#define CMDQ_EVENT_VDO1_MDP_RDMA1_SW_RST_DONE 737
+#define CMDQ_EVENT_VDO1_MDP_RDMA2_SW_RST_DONE 738
+#define CMDQ_EVENT_VDO1_MDP_RDMA3_SW_RST_DONE 739
+#define CMDQ_EVENT_VDO1_MDP_RDMA4_SW_RST_DONE 740
+#define CMDQ_EVENT_VDO1_MDP_RDMA5_SW_RST_DONE 741
+#define CMDQ_EVENT_VDO1_MDP_RDMA6_SW_RST_DONE 742
+#define CMDQ_EVENT_VDO1_MDP_RDMA7_SW_RST_DONE 743
+
+#define CMDQ_EVENT_VDO1_DP0_VDE_END_ENG_EVENT_MM 745
+#define CMDQ_EVENT_VDO1_DP0_VDE_START_ENG_EVENT_MM 746
+#define CMDQ_EVENT_VDO1_DP0_VSYNC_END_ENG_EVENT_MM 747
+#define CMDQ_EVENT_VDO1_DP0_VSYNC_START_ENG_EVENT_MM 748
+#define CMDQ_EVENT_VDO1_DP0_TARGET_LINE_ENG_EVENT_MM 749
+#define CMDQ_EVENT_VDO1_VPP_MERGE0 750
+#define CMDQ_EVENT_VDO1_VPP_MERGE1 751
+#define CMDQ_EVENT_VDO1_VPP_MERGE2 752
+#define CMDQ_EVENT_VDO1_VPP_MERGE3 753
+#define CMDQ_EVENT_VDO1_VPP_MERGE4 754
+#define CMDQ_EVENT_VDO1_HDMITX 755
+#define CMDQ_EVENT_VDO1_HDR_VDO_BE0_ADL_TRIG_EVENT_MM 756
+#define CMDQ_EVENT_VDO1_HDR_GFX_FE1_THDR_ADL_TRIG_EVENT_MM 757
+#define CMDQ_EVENT_VDO1_HDR_GFX_FE1_DM_ADL_TRIG_EVENT_MM 758
+#define CMDQ_EVENT_VDO1_HDR_GFX_FE0_THDR_ADL_TRIG_EVENT_MM 759
+#define CMDQ_EVENT_VDO1_HDR_GFX_FE0_DM_ADL_TRIG_EVENT_MM 760
+#define CMDQ_EVENT_VDO1_HDR_VDO_FE1_ADL_TRIG_EVENT_MM 761
+#define CMDQ_EVENT_VDO1_HDR_VDO_FE1_AD0_TRIG_EVENT_MM 762
+
+#define CMDQ_EVENT_CAM_A_PASS1_DONE 769
+#define CMDQ_EVENT_CAM_B_PASS1_DONE 770
+#define CMDQ_EVENT_GCAMSV_A_PASS1_DONE 771
+#define CMDQ_EVENT_GCAMSV_B_PASS1_DONE 772
+#define CMDQ_EVENT_MRAW_0_PASS1_DONE 773
+#define CMDQ_EVENT_MRAW_1_PASS1_DONE 774
+#define CMDQ_EVENT_MRAW_2_PASS1_DONE 775
+#define CMDQ_EVENT_MRAW_3_PASS1_DONE 776
+#define CMDQ_EVENT_SENINF_CAM0_FIFO_FULL_X 777
+#define CMDQ_EVENT_SENINF_CAM1_FIFO_FULL_X 778
+#define CMDQ_EVENT_SENINF_CAM2_FIFO_FULL 779
+#define CMDQ_EVENT_SENINF_CAM3_FIFO_FULL 780
+#define CMDQ_EVENT_SENINF_CAM4_FIFO_FULL 781
+#define CMDQ_EVENT_SENINF_CAM5_FIFO_FULL 782
+#define CMDQ_EVENT_SENINF_CAM6_FIFO_FULL 783
+#define CMDQ_EVENT_SENINF_CAM7_FIFO_FULL 784
+#define CMDQ_EVENT_SENINF_CAM8_FIFO_FULL 785
+#define CMDQ_EVENT_SENINF_CAM9_FIFO_FULL 786
+#define CMDQ_EVENT_SENINF_CAM10_FIFO_FULL_X 787
+#define CMDQ_EVENT_SENINF_CAM11_FIFO_FULL_X 788
+#define CMDQ_EVENT_SENINF_CAM12_FIFO_FULL_X 789
+#define CMDQ_EVENT_SENINF_CAM13_FIFO_FULL_X 790
+#define CMDQ_EVENT_TG_OVRUN_MRAW0_INT_X0 791
+#define CMDQ_EVENT_TG_OVRUN_MRAW1_INT_X0 792
+#define CMDQ_EVENT_TG_OVRUN_MRAW2_INT 793
+#define CMDQ_EVENT_TG_OVRUN_MRAW3_INT 794
+#define CMDQ_EVENT_DMA_R1_ERROR_MRAW0_INT 795
+#define CMDQ_EVENT_DMA_R1_ERROR_MRAW1_INT 796
+#define CMDQ_EVENT_DMA_R1_ERROR_MRAW2_INT 797
+#define CMDQ_EVENT_DMA_R1_ERROR_MRAW3_INT 798
+#define CMDQ_EVENT_U_CAMSYS_PDA_IRQO_EVENT_DONE_D1 799
+#define CMDQ_EVENT_SUBB_TG_INT4 800
+#define CMDQ_EVENT_SUBB_TG_INT3 801
+#define CMDQ_EVENT_SUBB_TG_INT2 802
+#define CMDQ_EVENT_SUBB_TG_INT1 803
+#define CMDQ_EVENT_SUBA_TG_INT4 804
+#define CMDQ_EVENT_SUBA_TG_INT3 805
+#define CMDQ_EVENT_SUBA_TG_INT2 806
+#define CMDQ_EVENT_SUBA_TG_INT1 807
+#define CMDQ_EVENT_SUBB_DRZS4NO_R1_LOW_LATENCY_LINE_CNT_INT 808
+#define CMDQ_EVENT_SUBB_YUVO_R3_LOW_LATENCY_LINE_CNT_INT 809
+#define CMDQ_EVENT_SUBB_YUVO_R1_LOW_LATENCY_LINE_CNT_INT 810
+#define CMDQ_EVENT_SUBB_IMGO_R1_LOW_LATENCY_LINE_CNT_INT 811
+#define CMDQ_EVENT_SUBA_DRZS4NO_R1_LOW_LATENCY_LINE_CNT_INT 812
+#define CMDQ_EVENT_SUBA_YUVO_R3_LOW_LATENCY_LINE_CNT_INT 813
+#define CMDQ_EVENT_SUBA_YUVO_R1_LOW_LATENCY_LINE_CNT_INT 814
+#define CMDQ_EVENT_SUBA_IMGO_R1_LOW_LATENCY_LINE_CNT_INT 815
+#define CMDQ_EVENT_GCE1_SOF_0 816
+#define CMDQ_EVENT_GCE1_SOF_1 817
+#define CMDQ_EVENT_GCE1_SOF_2 818
+#define CMDQ_EVENT_GCE1_SOF_3 819
+#define CMDQ_EVENT_GCE1_SOF_4 820
+#define CMDQ_EVENT_GCE1_SOF_5 821
+#define CMDQ_EVENT_GCE1_SOF_6 822
+#define CMDQ_EVENT_GCE1_SOF_7 823
+#define CMDQ_EVENT_GCE1_SOF_8 824
+#define CMDQ_EVENT_GCE1_SOF_9 825
+#define CMDQ_EVENT_GCE1_SOF_10 826
+#define CMDQ_EVENT_GCE1_SOF_11 827
+#define CMDQ_EVENT_GCE1_SOF_12 828
+#define CMDQ_EVENT_GCE1_SOF_13 829
+#define CMDQ_EVENT_GCE1_SOF_14 830
+#define CMDQ_EVENT_GCE1_SOF_15 831
+
+#define CMDQ_EVENT_VDEC_LAT_LINE_COUNT_THRESHOLD_INTERRUPT 832
+#define CMDQ_EVENT_VDEC_LAT_VDEC_INT 833
+#define CMDQ_EVENT_VDEC_LAT_VDEC_PAUSE 834
+#define CMDQ_EVENT_VDEC_LAT_VDEC_DEC_ERROR 835
+#define CMDQ_EVENT_VDEC_LAT_MC_BUSY_OVERFLOW_MDEC_TIMEOUT 836
+#define CMDQ_EVENT_VDEC_LAT_VDEC_FRAME_DONE 837
+#define CMDQ_EVENT_VDEC_LAT_INI_FETCH_RDY 838
+#define CMDQ_EVENT_VDEC_LAT_PROCESS_FLAG 839
+#define CMDQ_EVENT_VDEC_LAT_SEARCH_START_CODE_DONE 840
+#define CMDQ_EVENT_VDEC_LAT_REF_REORDER_DONE 841
+#define CMDQ_EVENT_VDEC_LAT_WP_TBLE_DONE 842
+#define CMDQ_EVENT_VDEC_LAT_COUNT_SRAM_CLR_DONE_AND_CTX_SRAM_CLR_DONE 843
+#define CMDQ_EVENT_VDEC_LAT_GCE_CNT_OP_THRESHOLD 847
+
+#define CMDQ_EVENT_VDEC_LAT1_LINE_COUNT_THRESHOLD_INTERRUPT 848
+#define CMDQ_EVENT_VDEC_LAT1_VDEC_INT 849
+#define CMDQ_EVENT_VDEC_LAT1_VDEC_PAUSE 850
+#define CMDQ_EVENT_VDEC_LAT1_VDEC_DEC_ERROR 851
+#define CMDQ_EVENT_VDEC_LAT1_MC_BUSY_OVERFLOW_MDEC_TIMEOUT 852
+#define CMDQ_EVENT_VDEC_LAT1_VDEC_FRAME_DONE 853
+#define CMDQ_EVENT_VDEC_LAT1_INI_FETCH_RDY 854
+#define CMDQ_EVENT_VDEC_LAT1_PROCESS_FLAG 855
+#define CMDQ_EVENT_VDEC_LAT1_SEARCH_START_CODE_DONE 856
+#define CMDQ_EVENT_VDEC_LAT1_REF_REORDER_DONE 857
+#define CMDQ_EVENT_VDEC_LAT1_WP_TBLE_DONE 858
+#define CMDQ_EVENT_VDEC_LAT1_COUNT_SRAM_CLR_DONE_AND_CTX_SRAM_CLR_DONE 859
+#define CMDQ_EVENT_VDEC_LAT1_GCE_CNT_OP_THRESHOLD 863
+
+#define CMDQ_EVENT_VDEC_SOC_GLOBAL_CON_250_0 864
+#define CMDQ_EVENT_VDEC_SOC_GLOBAL_CON_250_1 865
+
+#define CMDQ_EVENT_VDEC_SOC_GLOBAL_CON_250_8 872
+#define CMDQ_EVENT_VDEC_SOC_GLOBAL_CON_250_9 873
+
+#define CMDQ_EVENT_VDEC_CORE_LINE_COUNT_THRESHOLD_INTERRUPT 896
+#define CMDQ_EVENT_VDEC_CORE_VDEC_INT 897
+#define CMDQ_EVENT_VDEC_CORE_VDEC_PAUSE 898
+#define CMDQ_EVENT_VDEC_CORE_VDEC_DEC_ERROR 899
+#define CMDQ_EVENT_VDEC_CORE_MC_BUSY_OVERFLOW_MDEC_TIMEOUT 900
+#define CMDQ_EVENT_VDEC_CORE_VDEC_FRAME_DONE 901
+#define CMDQ_EVENT_VDEC_CORE_INI_FETCH_RDY 902
+#define CMDQ_EVENT_VDEC_CORE_PROCESS_FLAG 903
+#define CMDQ_EVENT_VDEC_CORE_SEARCH_START_CODE_DONE 904
+#define CMDQ_EVENT_VDEC_CORE_REF_REORDER_DONE 905
+#define CMDQ_EVENT_VDEC_CORE_WP_TBLE_DONE 906
+#define CMDQ_EVENT_VDEC_CORE_COUNT_SRAM_CLR_DONE_AND_CTX_SRAM_CLR_DONE 907
+#define CMDQ_EVENT_VDEC_CORE_GCE_CNT_OP_THRESHOLD 911
+
+#define CMDQ_EVENT_VDEC_CORE1_LINE_COUNT_THRESHOLD_INTERRUPT 912
+#define CMDQ_EVENT_VDEC_CORE1_VDEC_INT 913
+#define CMDQ_EVENT_VDEC_CORE1_VDEC_PAUSE 914
+#define CMDQ_EVENT_VDEC_CORE1_VDEC_DEC_ERROR 915
+#define CMDQ_EVENT_VDEC_CORE1_MC_BUSY_OVERFLOW_MDEC_TIMEOUT 916
+#define CMDQ_EVENT_VDEC_CORE1_VDEC_FRAME_DONE 917
+#define CMDQ_EVENT_VDEC_CORE1_INI_FETCH_RDY 918
+#define CMDQ_EVENT_VDEC_CORE1_PROCESS_FLAG 919
+#define CMDQ_EVENT_VDEC_CORE1_SEARCH_START_CODE_DONE 920
+#define CMDQ_EVENT_VDEC_CORE1_REF_REORDER_DONE 921
+#define CMDQ_EVENT_VDEC_CORE1_WP_TBLE_DONE 922
+#define CMDQ_EVENT_VDEC_CORE1_COUNT_SRAM_CLR_DONE_AND_CTX_SRAM_CLR_DONE 923
+#define CMDQ_EVENT_VDEC_CORE1_CNT_OP_THRESHOLD 927
+
+#define CMDQ_EVENT_VENC_TOP_FRAME_DONE 929
+#define CMDQ_EVENT_VENC_TOP_PAUSE_DONE 930
+#define CMDQ_EVENT_VENC_TOP_JPGENC_DONE 931
+#define CMDQ_EVENT_VENC_TOP_MB_DONE 932
+#define CMDQ_EVENT_VENC_TOP_128BYTE_DONE 933
+#define CMDQ_EVENT_VENC_TOP_JPGDEC_DONE 934
+#define CMDQ_EVENT_VENC_TOP_JPGDEC_C1_DONE 935
+#define CMDQ_EVENT_VENC_TOP_JPGDEC_INSUFF_DONE 936
+#define CMDQ_EVENT_VENC_TOP_JPGDEC_C1_INSUFF_DONE 937
+#define CMDQ_EVENT_VENC_TOP_WP_2ND_STAGE_DONE 938
+#define CMDQ_EVENT_VENC_TOP_WP_3RD_STAGE_DONE 939
+#define CMDQ_EVENT_VENC_TOP_PPS_HEADER_DONE 940
+#define CMDQ_EVENT_VENC_TOP_SPS_HEADER_DONE 941
+#define CMDQ_EVENT_VENC_TOP_VPS_HEADER_DONE 942
+
+#define CMDQ_EVENT_VENC_CORE1_TOP_FRAME_DONE 945
+#define CMDQ_EVENT_VENC_CORE1_TOP_PAUSE_DONE 946
+#define CMDQ_EVENT_VENC_CORE1_TOP_JPGENC_DONE 947
+#define CMDQ_EVENT_VENC_CORE1_TOP_MB_DONE 948
+#define CMDQ_EVENT_VENC_CORE1_TOP_128BYTE_DONE 949
+#define CMDQ_EVENT_VENC_CORE1_TOP_JPGDEC_DONE 950
+#define CMDQ_EVENT_VENC_CORE1_TOP_JPGDEC_C1_DONE 951
+#define CMDQ_EVENT_VENC_CORE1_TOP_JPGDEC_INSUFF_DONE 952
+#define CMDQ_EVENT_VENC_CORE1_TOP_JPGDEC_C1_INSUFF_DONE 953
+#define CMDQ_EVENT_VENC_CORE1_TOP_WP_2ND_STAGE_DONE 954
+#define CMDQ_EVENT_VENC_CORE1_TOP_WP_3RD_STAGE_DONE 955
+#define CMDQ_EVENT_VENC_CORE1_TOP_PPS_HEADER_DONE 956
+#define CMDQ_EVENT_VENC_CORE1_TOP_SPS_HEADER_DONE 957
+#define CMDQ_EVENT_VENC_CORE1_TOP_VPS_HEADER_DONE 958
+
+#define CMDQ_EVENT_WPE_VPP0_WPE_GCE_FRAME_DONE 962
+#define CMDQ_EVENT_WPE_VPP0_WPE_DONE_SYNC_OUT 963
+
+#define CMDQ_EVENT_WPE_VPP1_WPE_GCE_FRAME_DONE 969
+#define CMDQ_EVENT_WPE_VPP1_WPE_DONE_SYNC_OUT 970
+
+#define CMDQ_EVENT_DP_TX_VBLANK_FALLING 994
+#define CMDQ_EVENT_DP_TX_VSC_FINISH 995
+
+#define CMDQ_EVENT_OUTPIN_0 1018
+#define CMDQ_EVENT_OUTPIN_1 1019
+
+/* end of hw event */
+#define CMDQ_MAX_HW_EVENT 1019
+
+#endif
diff --git a/include/dt-bindings/gpio/amlogic,t7-periphs-pinctrl.h b/include/dt-bindings/gpio/amlogic,t7-periphs-pinctrl.h
new file mode 100644
index 000000000000..4e16d31a71c9
--- /dev/null
+++ b/include/dt-bindings/gpio/amlogic,t7-periphs-pinctrl.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2023 Amlogic, Inc. All rights reserved.
+ * Author: Huqiang Qin <huqiang.qin@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_AMLOGIC_T7_GPIO_H
+#define _DT_BINDINGS_AMLOGIC_T7_GPIO_H
+
+#define GPIOB_0 0
+#define GPIOB_1 1
+#define GPIOB_2 2
+#define GPIOB_3 3
+#define GPIOB_4 4
+#define GPIOB_5 5
+#define GPIOB_6 6
+#define GPIOB_7 7
+#define GPIOB_8 8
+#define GPIOB_9 9
+#define GPIOB_10 10
+#define GPIOB_11 11
+#define GPIOB_12 12
+
+#define GPIOC_0 13
+#define GPIOC_1 14
+#define GPIOC_2 15
+#define GPIOC_3 16
+#define GPIOC_4 17
+#define GPIOC_5 18
+#define GPIOC_6 19
+
+#define GPIOX_0 20
+#define GPIOX_1 21
+#define GPIOX_2 22
+#define GPIOX_3 23
+#define GPIOX_4 24
+#define GPIOX_5 25
+#define GPIOX_6 26
+#define GPIOX_7 27
+#define GPIOX_8 28
+#define GPIOX_9 29
+#define GPIOX_10 30
+#define GPIOX_11 31
+#define GPIOX_12 32
+#define GPIOX_13 33
+#define GPIOX_14 34
+#define GPIOX_15 35
+#define GPIOX_16 36
+#define GPIOX_17 37
+#define GPIOX_18 38
+#define GPIOX_19 39
+
+#define GPIOW_0 40
+#define GPIOW_1 41
+#define GPIOW_2 42
+#define GPIOW_3 43
+#define GPIOW_4 44
+#define GPIOW_5 45
+#define GPIOW_6 46
+#define GPIOW_7 47
+#define GPIOW_8 48
+#define GPIOW_9 49
+#define GPIOW_10 50
+#define GPIOW_11 51
+#define GPIOW_12 52
+#define GPIOW_13 53
+#define GPIOW_14 54
+#define GPIOW_15 55
+#define GPIOW_16 56
+
+#define GPIOD_0 57
+#define GPIOD_1 58
+#define GPIOD_2 59
+#define GPIOD_3 60
+#define GPIOD_4 61
+#define GPIOD_5 62
+#define GPIOD_6 63
+#define GPIOD_7 64
+#define GPIOD_8 65
+#define GPIOD_9 66
+#define GPIOD_10 67
+#define GPIOD_11 68
+#define GPIOD_12 69
+
+#define GPIOE_0 70
+#define GPIOE_1 71
+#define GPIOE_2 72
+#define GPIOE_3 73
+#define GPIOE_4 74
+#define GPIOE_5 75
+#define GPIOE_6 76
+
+#define GPIOZ_0 77
+#define GPIOZ_1 78
+#define GPIOZ_2 79
+#define GPIOZ_3 80
+#define GPIOZ_4 81
+#define GPIOZ_5 82
+#define GPIOZ_6 83
+#define GPIOZ_7 84
+#define GPIOZ_8 85
+#define GPIOZ_9 86
+#define GPIOZ_10 87
+#define GPIOZ_11 88
+#define GPIOZ_12 89
+#define GPIOZ_13 90
+
+#define GPIOT_0 91
+#define GPIOT_1 92
+#define GPIOT_2 93
+#define GPIOT_3 94
+#define GPIOT_4 95
+#define GPIOT_5 96
+#define GPIOT_6 97
+#define GPIOT_7 98
+#define GPIOT_8 99
+#define GPIOT_9 100
+#define GPIOT_10 101
+#define GPIOT_11 102
+#define GPIOT_12 103
+#define GPIOT_13 104
+#define GPIOT_14 105
+#define GPIOT_15 106
+#define GPIOT_16 107
+#define GPIOT_17 108
+#define GPIOT_18 109
+#define GPIOT_19 110
+#define GPIOT_20 111
+#define GPIOT_21 112
+#define GPIOT_22 113
+#define GPIOT_23 114
+
+#define GPIOM_0 115
+#define GPIOM_1 116
+#define GPIOM_2 117
+#define GPIOM_3 118
+#define GPIOM_4 119
+#define GPIOM_5 120
+#define GPIOM_6 121
+#define GPIOM_7 122
+#define GPIOM_8 123
+#define GPIOM_9 124
+#define GPIOM_10 125
+#define GPIOM_11 126
+#define GPIOM_12 127
+#define GPIOM_13 128
+
+#define GPIOY_0 129
+#define GPIOY_1 130
+#define GPIOY_2 131
+#define GPIOY_3 132
+#define GPIOY_4 133
+#define GPIOY_5 134
+#define GPIOY_6 135
+#define GPIOY_7 136
+#define GPIOY_8 137
+#define GPIOY_9 138
+#define GPIOY_10 139
+#define GPIOY_11 140
+#define GPIOY_12 141
+#define GPIOY_13 142
+#define GPIOY_14 143
+#define GPIOY_15 144
+#define GPIOY_16 145
+#define GPIOY_17 146
+#define GPIOY_18 147
+
+#define GPIOH_0 148
+#define GPIOH_1 149
+#define GPIOH_2 150
+#define GPIOH_3 151
+#define GPIOH_4 152
+#define GPIOH_5 153
+#define GPIOH_6 154
+#define GPIOH_7 155
+
+#define GPIO_TEST_N 156
+
+#endif /* _DT_BINDINGS_AMLOGIC_T7_GPIO_H */
diff --git a/include/dt-bindings/gpio/amlogic-c3-gpio.h b/include/dt-bindings/gpio/amlogic-c3-gpio.h
new file mode 100644
index 000000000000..75c8da6f505f
--- /dev/null
+++ b/include/dt-bindings/gpio/amlogic-c3-gpio.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2021 Amlogic, Inc. All rights reserved.
+ * Author: Huqiang Qin <huqiang.qin@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_AMLOGIC_C3_GPIO_H
+#define _DT_BINDINGS_AMLOGIC_C3_GPIO_H
+
+#define GPIOE_0 0
+#define GPIOE_1 1
+#define GPIOE_2 2
+#define GPIOE_3 3
+#define GPIOE_4 4
+
+#define GPIOB_0 5
+#define GPIOB_1 6
+#define GPIOB_2 7
+#define GPIOB_3 8
+#define GPIOB_4 9
+#define GPIOB_5 10
+#define GPIOB_6 11
+#define GPIOB_7 12
+#define GPIOB_8 13
+#define GPIOB_9 14
+#define GPIOB_10 15
+#define GPIOB_11 16
+#define GPIOB_12 17
+#define GPIOB_13 18
+#define GPIOB_14 19
+
+#define GPIOC_0 20
+#define GPIOC_1 21
+#define GPIOC_2 22
+#define GPIOC_3 23
+#define GPIOC_4 24
+#define GPIOC_5 25
+#define GPIOC_6 26
+
+#define GPIOX_0 27
+#define GPIOX_1 28
+#define GPIOX_2 29
+#define GPIOX_3 30
+#define GPIOX_4 31
+#define GPIOX_5 32
+#define GPIOX_6 33
+#define GPIOX_7 34
+#define GPIOX_8 35
+#define GPIOX_9 36
+#define GPIOX_10 37
+#define GPIOX_11 38
+#define GPIOX_12 39
+#define GPIOX_13 40
+
+#define GPIOD_0 41
+#define GPIOD_1 42
+#define GPIOD_2 43
+#define GPIOD_3 44
+#define GPIOD_4 45
+#define GPIOD_5 46
+#define GPIOD_6 47
+
+#define GPIOA_0 48
+#define GPIOA_1 49
+#define GPIOA_2 50
+#define GPIOA_3 51
+#define GPIOA_4 52
+#define GPIOA_5 53
+
+#define GPIO_TEST_N 54
+
+#endif /* _DT_BINDINGS_AMLOGIC_C3_GPIO_H */
diff --git a/include/dt-bindings/gpio/gpio.h b/include/dt-bindings/gpio/gpio.h
index c029467e828b..b5d531237448 100644
--- a/include/dt-bindings/gpio/gpio.h
+++ b/include/dt-bindings/gpio/gpio.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
/*
* This header provides constants for most GPIO bindings.
*
@@ -39,4 +39,7 @@
/* Bit 5 express pull down */
#define GPIO_PULL_DOWN 32
+/* Bit 6 express pull disable */
+#define GPIO_PULL_DISABLE 64
+
#endif
diff --git a/include/dt-bindings/gpio/meson-g12a-gpio.h b/include/dt-bindings/gpio/meson-g12a-gpio.h
index f7bd69350d18..fa7bb0bbf010 100644
--- a/include/dt-bindings/gpio/meson-g12a-gpio.h
+++ b/include/dt-bindings/gpio/meson-g12a-gpio.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2018 Amlogic, Inc. All rights reserved.
* Author: Xingyu Chen <xingyu.chen@amlogic.com>
diff --git a/include/dt-bindings/gpio/meson-s4-gpio.h b/include/dt-bindings/gpio/meson-s4-gpio.h
new file mode 100644
index 000000000000..35aee21b94f1
--- /dev/null
+++ b/include/dt-bindings/gpio/meson-s4-gpio.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2021 Amlogic, Inc. All rights reserved.
+ * Author: Qianggui Song <qianggui.song@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_MESON_S4_GPIO_H
+#define _DT_BINDINGS_MESON_S4_GPIO_H
+
+#define GPIOB_0 0
+#define GPIOB_1 1
+#define GPIOB_2 2
+#define GPIOB_3 3
+#define GPIOB_4 4
+#define GPIOB_5 5
+#define GPIOB_6 6
+#define GPIOB_7 7
+#define GPIOB_8 8
+#define GPIOB_9 9
+#define GPIOB_10 10
+#define GPIOB_11 11
+#define GPIOB_12 12
+#define GPIOB_13 13
+
+#define GPIOC_0 14
+#define GPIOC_1 15
+#define GPIOC_2 16
+#define GPIOC_3 17
+#define GPIOC_4 18
+#define GPIOC_5 19
+#define GPIOC_6 20
+#define GPIOC_7 21
+
+#define GPIOE_0 22
+#define GPIOE_1 23
+
+#define GPIOD_0 24
+#define GPIOD_1 25
+#define GPIOD_2 26
+#define GPIOD_3 27
+#define GPIOD_4 28
+#define GPIOD_5 29
+#define GPIOD_6 30
+#define GPIOD_7 31
+#define GPIOD_8 32
+#define GPIOD_9 33
+#define GPIOD_10 34
+#define GPIOD_11 35
+
+#define GPIOH_0 36
+#define GPIOH_1 37
+#define GPIOH_2 38
+#define GPIOH_3 39
+#define GPIOH_4 40
+#define GPIOH_5 41
+#define GPIOH_6 42
+#define GPIOH_7 43
+#define GPIOH_8 44
+#define GPIOH_9 45
+#define GPIOH_10 46
+#define GPIOH_11 47
+
+#define GPIOX_0 48
+#define GPIOX_1 49
+#define GPIOX_2 50
+#define GPIOX_3 51
+#define GPIOX_4 52
+#define GPIOX_5 53
+#define GPIOX_6 54
+#define GPIOX_7 55
+#define GPIOX_8 56
+#define GPIOX_9 57
+#define GPIOX_10 58
+#define GPIOX_11 59
+#define GPIOX_12 60
+#define GPIOX_13 61
+#define GPIOX_14 62
+#define GPIOX_15 63
+#define GPIOX_16 64
+#define GPIOX_17 65
+#define GPIOX_18 66
+#define GPIOX_19 67
+
+#define GPIOZ_0 68
+#define GPIOZ_1 69
+#define GPIOZ_2 70
+#define GPIOZ_3 71
+#define GPIOZ_4 72
+#define GPIOZ_5 73
+#define GPIOZ_6 74
+#define GPIOZ_7 75
+#define GPIOZ_8 76
+#define GPIOZ_9 77
+#define GPIOZ_10 78
+#define GPIOZ_11 79
+#define GPIOZ_12 80
+
+#define GPIO_TEST_N 81
+#endif /* _DT_BINDINGS_MESON_S4_GPIO_H */
diff --git a/include/dt-bindings/gpio/msc313-gpio.h b/include/dt-bindings/gpio/msc313-gpio.h
index 2dd56683d3c1..5458c6580a02 100644
--- a/include/dt-bindings/gpio/msc313-gpio.h
+++ b/include/dt-bindings/gpio/msc313-gpio.h
@@ -50,4 +50,75 @@
#define MSC313_GPIO_SPI0_DI (MSC313_GPIO_SPI0 + 2)
#define MSC313_GPIO_SPI0_DO (MSC313_GPIO_SPI0 + 3)
+/* SSD20x */
+#define SSD20XD_GPIO_FUART 0
+#define SSD20XD_GPIO_FUART_RX (SSD20XD_GPIO_FUART + 0)
+#define SSD20XD_GPIO_FUART_TX (SSD20XD_GPIO_FUART + 1)
+#define SSD20XD_GPIO_FUART_CTS (SSD20XD_GPIO_FUART + 2)
+#define SSD20XD_GPIO_FUART_RTS (SSD20XD_GPIO_FUART + 3)
+
+#define SSD20XD_GPIO_SD (SSD20XD_GPIO_FUART_RTS + 1)
+#define SSD20XD_GPIO_SD_CLK (SSD20XD_GPIO_SD + 0)
+#define SSD20XD_GPIO_SD_CMD (SSD20XD_GPIO_SD + 1)
+#define SSD20XD_GPIO_SD_D0 (SSD20XD_GPIO_SD + 2)
+#define SSD20XD_GPIO_SD_D1 (SSD20XD_GPIO_SD + 3)
+#define SSD20XD_GPIO_SD_D2 (SSD20XD_GPIO_SD + 4)
+#define SSD20XD_GPIO_SD_D3 (SSD20XD_GPIO_SD + 5)
+
+#define SSD20XD_GPIO_UART0 (SSD20XD_GPIO_SD_D3 + 1)
+#define SSD20XD_GPIO_UART0_RX (SSD20XD_GPIO_UART0 + 0)
+#define SSD20XD_GPIO_UART0_TX (SSD20XD_GPIO_UART0 + 1)
+
+#define SSD20XD_GPIO_UART1 (SSD20XD_GPIO_UART0_TX + 1)
+#define SSD20XD_GPIO_UART1_RX (SSD20XD_GPIO_UART1 + 0)
+#define SSD20XD_GPIO_UART1_TX (SSD20XD_GPIO_UART1 + 1)
+
+#define SSD20XD_GPIO_TTL (SSD20XD_GPIO_UART1_TX + 1)
+#define SSD20XD_GPIO_TTL0 (SSD20XD_GPIO_TTL + 0)
+#define SSD20XD_GPIO_TTL1 (SSD20XD_GPIO_TTL + 1)
+#define SSD20XD_GPIO_TTL2 (SSD20XD_GPIO_TTL + 2)
+#define SSD20XD_GPIO_TTL3 (SSD20XD_GPIO_TTL + 3)
+#define SSD20XD_GPIO_TTL4 (SSD20XD_GPIO_TTL + 4)
+#define SSD20XD_GPIO_TTL5 (SSD20XD_GPIO_TTL + 5)
+#define SSD20XD_GPIO_TTL6 (SSD20XD_GPIO_TTL + 6)
+#define SSD20XD_GPIO_TTL7 (SSD20XD_GPIO_TTL + 7)
+#define SSD20XD_GPIO_TTL8 (SSD20XD_GPIO_TTL + 8)
+#define SSD20XD_GPIO_TTL9 (SSD20XD_GPIO_TTL + 9)
+#define SSD20XD_GPIO_TTL10 (SSD20XD_GPIO_TTL + 10)
+#define SSD20XD_GPIO_TTL11 (SSD20XD_GPIO_TTL + 11)
+#define SSD20XD_GPIO_TTL12 (SSD20XD_GPIO_TTL + 12)
+#define SSD20XD_GPIO_TTL13 (SSD20XD_GPIO_TTL + 13)
+#define SSD20XD_GPIO_TTL14 (SSD20XD_GPIO_TTL + 14)
+#define SSD20XD_GPIO_TTL15 (SSD20XD_GPIO_TTL + 15)
+#define SSD20XD_GPIO_TTL16 (SSD20XD_GPIO_TTL + 16)
+#define SSD20XD_GPIO_TTL17 (SSD20XD_GPIO_TTL + 17)
+#define SSD20XD_GPIO_TTL18 (SSD20XD_GPIO_TTL + 18)
+#define SSD20XD_GPIO_TTL19 (SSD20XD_GPIO_TTL + 19)
+#define SSD20XD_GPIO_TTL20 (SSD20XD_GPIO_TTL + 20)
+#define SSD20XD_GPIO_TTL21 (SSD20XD_GPIO_TTL + 21)
+#define SSD20XD_GPIO_TTL22 (SSD20XD_GPIO_TTL + 22)
+#define SSD20XD_GPIO_TTL23 (SSD20XD_GPIO_TTL + 23)
+#define SSD20XD_GPIO_TTL24 (SSD20XD_GPIO_TTL + 24)
+#define SSD20XD_GPIO_TTL25 (SSD20XD_GPIO_TTL + 25)
+#define SSD20XD_GPIO_TTL26 (SSD20XD_GPIO_TTL + 26)
+#define SSD20XD_GPIO_TTL27 (SSD20XD_GPIO_TTL + 27)
+
+#define SSD20XD_GPIO_GPIO (SSD20XD_GPIO_TTL27 + 1)
+#define SSD20XD_GPIO_GPIO0 (SSD20XD_GPIO_GPIO + 0)
+#define SSD20XD_GPIO_GPIO1 (SSD20XD_GPIO_GPIO + 1)
+#define SSD20XD_GPIO_GPIO2 (SSD20XD_GPIO_GPIO + 2)
+#define SSD20XD_GPIO_GPIO3 (SSD20XD_GPIO_GPIO + 3)
+#define SSD20XD_GPIO_GPIO4 (SSD20XD_GPIO_GPIO + 4)
+#define SSD20XD_GPIO_GPIO5 (SSD20XD_GPIO_GPIO + 5)
+#define SSD20XD_GPIO_GPIO6 (SSD20XD_GPIO_GPIO + 6)
+#define SSD20XD_GPIO_GPIO7 (SSD20XD_GPIO_GPIO + 7)
+#define SSD20XD_GPIO_GPIO10 (SSD20XD_GPIO_GPIO + 8)
+#define SSD20XD_GPIO_GPIO11 (SSD20XD_GPIO_GPIO + 9)
+#define SSD20XD_GPIO_GPIO12 (SSD20XD_GPIO_GPIO + 10)
+#define SSD20XD_GPIO_GPIO13 (SSD20XD_GPIO_GPIO + 11)
+#define SSD20XD_GPIO_GPIO14 (SSD20XD_GPIO_GPIO + 12)
+#define SSD20XD_GPIO_GPIO85 (SSD20XD_GPIO_GPIO + 13)
+#define SSD20XD_GPIO_GPIO86 (SSD20XD_GPIO_GPIO + 14)
+#define SSD20XD_GPIO_GPIO90 (SSD20XD_GPIO_GPIO + 15)
+
#endif /* _DT_BINDINGS_MSC313_GPIO_H */
diff --git a/include/dt-bindings/gpio/tegra234-gpio.h b/include/dt-bindings/gpio/tegra234-gpio.h
new file mode 100644
index 000000000000..784673c2c752
--- /dev/null
+++ b/include/dt-bindings/gpio/tegra234-gpio.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. */
+
+/*
+ * This header provides constants for binding nvidia,tegra234-gpio*.
+ *
+ * The first cell in Tegra's GPIO specifier is the GPIO ID. The macros below
+ * provide names for this.
+ *
+ * The second cell contains standard flag values specified in gpio.h.
+ */
+
+#ifndef _DT_BINDINGS_GPIO_TEGRA234_GPIO_H
+#define _DT_BINDINGS_GPIO_TEGRA234_GPIO_H
+
+#include <dt-bindings/gpio/gpio.h>
+
+/* GPIOs implemented by main GPIO controller */
+#define TEGRA234_MAIN_GPIO_PORT_A 0
+#define TEGRA234_MAIN_GPIO_PORT_B 1
+#define TEGRA234_MAIN_GPIO_PORT_C 2
+#define TEGRA234_MAIN_GPIO_PORT_D 3
+#define TEGRA234_MAIN_GPIO_PORT_E 4
+#define TEGRA234_MAIN_GPIO_PORT_F 5
+#define TEGRA234_MAIN_GPIO_PORT_G 6
+#define TEGRA234_MAIN_GPIO_PORT_H 7
+#define TEGRA234_MAIN_GPIO_PORT_I 8
+#define TEGRA234_MAIN_GPIO_PORT_J 9
+#define TEGRA234_MAIN_GPIO_PORT_K 10
+#define TEGRA234_MAIN_GPIO_PORT_L 11
+#define TEGRA234_MAIN_GPIO_PORT_M 12
+#define TEGRA234_MAIN_GPIO_PORT_N 13
+#define TEGRA234_MAIN_GPIO_PORT_P 14
+#define TEGRA234_MAIN_GPIO_PORT_Q 15
+#define TEGRA234_MAIN_GPIO_PORT_R 16
+#define TEGRA234_MAIN_GPIO_PORT_X 17
+#define TEGRA234_MAIN_GPIO_PORT_Y 18
+#define TEGRA234_MAIN_GPIO_PORT_Z 19
+#define TEGRA234_MAIN_GPIO_PORT_AC 20
+#define TEGRA234_MAIN_GPIO_PORT_AD 21
+#define TEGRA234_MAIN_GPIO_PORT_AE 22
+#define TEGRA234_MAIN_GPIO_PORT_AF 23
+#define TEGRA234_MAIN_GPIO_PORT_AG 24
+
+#define TEGRA234_MAIN_GPIO(port, offset) \
+ ((TEGRA234_MAIN_GPIO_PORT_##port * 8) + offset)
+
+/* GPIOs implemented by AON GPIO controller */
+#define TEGRA234_AON_GPIO_PORT_AA 0
+#define TEGRA234_AON_GPIO_PORT_BB 1
+#define TEGRA234_AON_GPIO_PORT_CC 2
+#define TEGRA234_AON_GPIO_PORT_DD 3
+#define TEGRA234_AON_GPIO_PORT_EE 4
+#define TEGRA234_AON_GPIO_PORT_GG 5
+
+#define TEGRA234_AON_GPIO(port, offset) \
+ ((TEGRA234_AON_GPIO_PORT_##port * 8) + offset)
+
+#endif
diff --git a/include/dt-bindings/gpio/tegra241-gpio.h b/include/dt-bindings/gpio/tegra241-gpio.h
new file mode 100644
index 000000000000..80cee3016be6
--- /dev/null
+++ b/include/dt-bindings/gpio/tegra241-gpio.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. */
+
+/*
+ * This header provides constants for the nvidia,tegra241-gpio DT binding.
+ *
+ * The first cell in Tegra's GPIO specifier is the GPIO ID. The macros below
+ * provide names for this.
+ *
+ * The second cell contains standard flag values specified in gpio.h.
+ */
+
+#ifndef _DT_BINDINGS_GPIO_TEGRA241_GPIO_H
+#define _DT_BINDINGS_GPIO_TEGRA241_GPIO_H
+
+#include <dt-bindings/gpio/gpio.h>
+
+/* GPIOs implemented by main GPIO controller */
+#define TEGRA241_MAIN_GPIO_PORT_A 0
+#define TEGRA241_MAIN_GPIO_PORT_B 1
+#define TEGRA241_MAIN_GPIO_PORT_C 2
+#define TEGRA241_MAIN_GPIO_PORT_D 3
+#define TEGRA241_MAIN_GPIO_PORT_E 4
+#define TEGRA241_MAIN_GPIO_PORT_F 5
+#define TEGRA241_MAIN_GPIO_PORT_G 6
+#define TEGRA241_MAIN_GPIO_PORT_H 7
+#define TEGRA241_MAIN_GPIO_PORT_I 8
+#define TEGRA241_MAIN_GPIO_PORT_J 9
+#define TEGRA241_MAIN_GPIO_PORT_K 10
+#define TEGRA241_MAIN_GPIO_PORT_L 11
+
+#define TEGRA241_MAIN_GPIO(port, offset) \
+ ((TEGRA241_MAIN_GPIO_PORT_##port * 8) + (offset))
+
+/* GPIOs implemented by AON GPIO controller */
+#define TEGRA241_AON_GPIO_PORT_AA 0
+#define TEGRA241_AON_GPIO_PORT_BB 1
+
+#define TEGRA241_AON_GPIO(port, offset) \
+ ((TEGRA241_AON_GPIO_PORT_##port * 8) + (offset))
+
+#endif
diff --git a/include/dt-bindings/gpio/tegra256-gpio.h b/include/dt-bindings/gpio/tegra256-gpio.h
new file mode 100644
index 000000000000..a0353a302aeb
--- /dev/null
+++ b/include/dt-bindings/gpio/tegra256-gpio.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+/*
+ * This header provides constants for the nvidia,tegra256-gpio DT binding.
+ *
+ * The first cell in Tegra's GPIO specifier is the GPIO ID.
+ * The macros below provide names for this.
+ *
+ * The second cell contains standard flag values specified in gpio.h.
+ */
+
+#ifndef _DT_BINDINGS_GPIO_TEGRA256_GPIO_H
+#define _DT_BINDINGS_GPIO_TEGRA256_GPIO_H
+
+#include <dt-bindings/gpio/gpio.h>
+
+/* GPIOs implemented by main GPIO controller */
+#define TEGRA256_MAIN_GPIO_PORT_A 0
+#define TEGRA256_MAIN_GPIO_PORT_B 1
+#define TEGRA256_MAIN_GPIO_PORT_C 2
+#define TEGRA256_MAIN_GPIO_PORT_D 3
+
+#define TEGRA256_MAIN_GPIO(port, offset) \
+ ((TEGRA256_MAIN_GPIO_PORT_##port * 8) + (offset))
+
+#endif
+
diff --git a/include/dt-bindings/i3c/i3c.h b/include/dt-bindings/i3c/i3c.h
new file mode 100644
index 000000000000..373439218bba
--- /dev/null
+++ b/include/dt-bindings/i3c/i3c.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef _DT_BINDINGS_I3C_I3C_H
+#define _DT_BINDINGS_I3C_I3C_H
+
+#define I2C_FM (1 << 4)
+#define I2C_FM_PLUS (0 << 4)
+
+#define I2C_FILTER (0 << 5)
+#define I2C_NO_FILTER_HIGH_FREQUENCY (1 << 5)
+#define I2C_NO_FILTER_LOW_FREQUENCY (2 << 5)
+
+#endif
diff --git a/include/dt-bindings/iio/adc/adi,ad4695.h b/include/dt-bindings/iio/adc/adi,ad4695.h
new file mode 100644
index 000000000000..fea4525d2710
--- /dev/null
+++ b/include/dt-bindings/iio/adc/adi,ad4695.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_ADI_AD4695_H
+#define _DT_BINDINGS_ADI_AD4695_H
+
+#define AD4695_COMMON_MODE_REFGND 0xFF
+#define AD4695_COMMON_MODE_COM 0xFE
+
+#define AD4695_TRIGGER_EVENT_BUSY 0
+#define AD4695_TRIGGER_EVENT_ALERT 1
+
+#define AD4695_TRIGGER_PIN_GP0 0
+#define AD4695_TRIGGER_PIN_GP2 2
+#define AD4695_TRIGGER_PIN_GP3 3
+
+#endif /* _DT_BINDINGS_ADI_AD4695_H */
diff --git a/include/dt-bindings/iio/adc/adi,ad7606.h b/include/dt-bindings/iio/adc/adi,ad7606.h
new file mode 100644
index 000000000000..f38a6d72b6dc
--- /dev/null
+++ b/include/dt-bindings/iio/adc/adi,ad7606.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_ADI_AD7606_H
+#define _DT_BINDINGS_ADI_AD7606_H
+
+#define AD7606_TRIGGER_EVENT_BUSY 0
+#define AD7606_TRIGGER_EVENT_FRSTDATA 1
+
+#endif /* _DT_BINDINGS_ADI_AD7606_H */
diff --git a/include/dt-bindings/iio/adc/adi,ad7768-1.h b/include/dt-bindings/iio/adc/adi,ad7768-1.h
new file mode 100644
index 000000000000..34d92856a50b
--- /dev/null
+++ b/include/dt-bindings/iio/adc/adi,ad7768-1.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_ADI_AD7768_1_H
+#define _DT_BINDINGS_ADI_AD7768_1_H
+
+#define AD7768_TRIGGER_SOURCE_SYNC_OUT 0
+#define AD7768_TRIGGER_SOURCE_GPIO3 1
+#define AD7768_TRIGGER_SOURCE_DRDY 2
+
+#endif /* _DT_BINDINGS_ADI_AD7768_1_H */
diff --git a/include/dt-bindings/iio/adc/at91-sama5d2_adc.h b/include/dt-bindings/iio/adc/at91-sama5d2_adc.h
index 70f99dbdbb42..866d36530583 100644
--- a/include/dt-bindings/iio/adc/at91-sama5d2_adc.h
+++ b/include/dt-bindings/iio/adc/at91-sama5d2_adc.h
@@ -13,4 +13,7 @@
/* pressure channel index */
#define AT91_SAMA5D2_ADC_P_CHANNEL 26
+/* SAMA7G5 Temperature sensor channel index. */
+#define AT91_SAMA7G5_ADC_TEMP_CHANNEL 31
+
#endif
diff --git a/include/dt-bindings/iio/adc/gehc,pmc-adc.h b/include/dt-bindings/iio/adc/gehc,pmc-adc.h
new file mode 100644
index 000000000000..2f291e3c76ae
--- /dev/null
+++ b/include/dt-bindings/iio/adc/gehc,pmc-adc.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+
+#ifndef _DT_BINDINGS_IIO_ADC_GEHC_PMC_ADC_H
+#define _DT_BINDINGS_IIO_ADC_GEHC_PMC_ADC_H
+
+/* ADC channel type */
+#define GEHC_PMC_ADC_VOLTAGE 0
+#define GEHC_PMC_ADC_CURRENT 1
+
+#endif
diff --git a/include/dt-bindings/iio/adc/ingenic,adc.h b/include/dt-bindings/iio/adc/ingenic,adc.h
index 4627a00e369e..a6ccc031635b 100644
--- a/include/dt-bindings/iio/adc/ingenic,adc.h
+++ b/include/dt-bindings/iio/adc/ingenic,adc.h
@@ -13,5 +13,6 @@
#define INGENIC_ADC_TOUCH_YN 6
#define INGENIC_ADC_TOUCH_XD 7
#define INGENIC_ADC_TOUCH_YD 8
+#define INGENIC_ADC_AUX0 9
#endif
diff --git a/include/dt-bindings/iio/adc/mediatek,mt6357-auxadc.h b/include/dt-bindings/iio/adc/mediatek,mt6357-auxadc.h
new file mode 100644
index 000000000000..03ebb1d23953
--- /dev/null
+++ b/include/dt-bindings/iio/adc/mediatek,mt6357-auxadc.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+
+#ifndef _DT_BINDINGS_MEDIATEK_MT6357_AUXADC_H
+#define _DT_BINDINGS_MEDIATEK_MT6357_AUXADC_H
+
+/* ADC Channel Index */
+#define MT6357_AUXADC_BATADC 0
+#define MT6357_AUXADC_ISENSE 1
+#define MT6357_AUXADC_VCDT 2
+#define MT6357_AUXADC_BAT_TEMP 3
+#define MT6357_AUXADC_CHIP_TEMP 4
+#define MT6357_AUXADC_ACCDET 5
+#define MT6357_AUXADC_VDCXO 6
+#define MT6357_AUXADC_TSX_TEMP 7
+#define MT6357_AUXADC_HPOFS_CAL 8
+#define MT6357_AUXADC_DCXO_TEMP 9
+#define MT6357_AUXADC_VCORE_TEMP 10
+#define MT6357_AUXADC_VPROC_TEMP 11
+#define MT6357_AUXADC_VBAT 12
+
+#endif
diff --git a/include/dt-bindings/iio/adc/mediatek,mt6358-auxadc.h b/include/dt-bindings/iio/adc/mediatek,mt6358-auxadc.h
new file mode 100644
index 000000000000..efa08398fafd
--- /dev/null
+++ b/include/dt-bindings/iio/adc/mediatek,mt6358-auxadc.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+
+#ifndef _DT_BINDINGS_MEDIATEK_MT6358_AUXADC_H
+#define _DT_BINDINGS_MEDIATEK_MT6358_AUXADC_H
+
+/* ADC Channel Index */
+#define MT6358_AUXADC_BATADC 0
+#define MT6358_AUXADC_VCDT 1
+#define MT6358_AUXADC_BAT_TEMP 2
+#define MT6358_AUXADC_CHIP_TEMP 3
+#define MT6358_AUXADC_ACCDET 4
+#define MT6358_AUXADC_VDCXO 5
+#define MT6358_AUXADC_TSX_TEMP 6
+#define MT6358_AUXADC_HPOFS_CAL 7
+#define MT6358_AUXADC_DCXO_TEMP 8
+#define MT6358_AUXADC_VBIF 9
+#define MT6358_AUXADC_VCORE_TEMP 10
+#define MT6358_AUXADC_VPROC_TEMP 11
+#define MT6358_AUXADC_VGPU_TEMP 12
+#define MT6358_AUXADC_VBAT 13
+
+#endif
diff --git a/include/dt-bindings/iio/adc/mediatek,mt6359-auxadc.h b/include/dt-bindings/iio/adc/mediatek,mt6359-auxadc.h
new file mode 100644
index 000000000000..59826393ee7e
--- /dev/null
+++ b/include/dt-bindings/iio/adc/mediatek,mt6359-auxadc.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+
+#ifndef _DT_BINDINGS_MEDIATEK_MT6359_AUXADC_H
+#define _DT_BINDINGS_MEDIATEK_MT6359_AUXADC_H
+
+/* ADC Channel Index */
+#define MT6359_AUXADC_BATADC 0
+#define MT6359_AUXADC_BAT_TEMP 1
+#define MT6359_AUXADC_CHIP_TEMP 2
+#define MT6359_AUXADC_ACCDET 3
+#define MT6359_AUXADC_VDCXO 4
+#define MT6359_AUXADC_TSX_TEMP 5
+#define MT6359_AUXADC_HPOFS_CAL 6
+#define MT6359_AUXADC_DCXO_TEMP 7
+#define MT6359_AUXADC_VBIF 8
+#define MT6359_AUXADC_VCORE_TEMP 9
+#define MT6359_AUXADC_VPROC_TEMP 10
+#define MT6359_AUXADC_VGPU_TEMP 11
+#define MT6359_AUXADC_VBAT 12
+#define MT6359_AUXADC_IBAT 13
+
+#endif
diff --git a/include/dt-bindings/iio/adc/mediatek,mt6363-auxadc.h b/include/dt-bindings/iio/adc/mediatek,mt6363-auxadc.h
new file mode 100644
index 000000000000..92d135477d0e
--- /dev/null
+++ b/include/dt-bindings/iio/adc/mediatek,mt6363-auxadc.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+
+#ifndef _DT_BINDINGS_MEDIATEK_MT6363_AUXADC_H
+#define _DT_BINDINGS_MEDIATEK_MT6363_AUXADC_H
+
+/* ADC Channel Index */
+#define MT6363_AUXADC_BATADC 0
+#define MT6363_AUXADC_VCDT 1
+#define MT6363_AUXADC_BAT_TEMP 2
+#define MT6363_AUXADC_CHIP_TEMP 3
+#define MT6363_AUXADC_VSYSSNS 4
+#define MT6363_AUXADC_VTREF 5
+#define MT6363_AUXADC_VCORE_TEMP 6
+#define MT6363_AUXADC_VPROC_TEMP 7
+#define MT6363_AUXADC_VGPU_TEMP 8
+#define MT6363_AUXADC_VIN1 9
+#define MT6363_AUXADC_VIN2 10
+#define MT6363_AUXADC_VIN3 11
+#define MT6363_AUXADC_VIN4 12
+#define MT6363_AUXADC_VIN5 13
+#define MT6363_AUXADC_VIN6 14
+#define MT6363_AUXADC_VIN7 15
+
+#endif
diff --git a/include/dt-bindings/iio/adc/mediatek,mt6370_adc.h b/include/dt-bindings/iio/adc/mediatek,mt6370_adc.h
new file mode 100644
index 000000000000..6ee725547763
--- /dev/null
+++ b/include/dt-bindings/iio/adc/mediatek,mt6370_adc.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+
+#ifndef __DT_BINDINGS_MEDIATEK_MT6370_ADC_H__
+#define __DT_BINDINGS_MEDIATEK_MT6370_ADC_H__
+
+/* ADC Channel Index */
+#define MT6370_CHAN_VBUSDIV5 0
+#define MT6370_CHAN_VBUSDIV2 1
+#define MT6370_CHAN_VSYS 2
+#define MT6370_CHAN_VBAT 3
+#define MT6370_CHAN_TS_BAT 4
+#define MT6370_CHAN_IBUS 5
+#define MT6370_CHAN_IBAT 6
+#define MT6370_CHAN_CHG_VDDP 7
+#define MT6370_CHAN_TEMP_JC 8
+#define MT6370_CHAN_MAX 9
+
+#endif
diff --git a/include/dt-bindings/iio/adc/mediatek,mt6373-auxadc.h b/include/dt-bindings/iio/adc/mediatek,mt6373-auxadc.h
new file mode 100644
index 000000000000..17cab86d355e
--- /dev/null
+++ b/include/dt-bindings/iio/adc/mediatek,mt6373-auxadc.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+
+#ifndef _DT_BINDINGS_MEDIATEK_MT6373_AUXADC_H
+#define _DT_BINDINGS_MEDIATEK_MT6373_AUXADC_H
+
+/* ADC Channel Index */
+#define MT6373_AUXADC_CHIP_TEMP 0
+#define MT6373_AUXADC_VCORE_TEMP 1
+#define MT6373_AUXADC_VPROC_TEMP 2
+#define MT6373_AUXADC_VGPU_TEMP 3
+#define MT6373_AUXADC_VIN1 4
+#define MT6373_AUXADC_VIN2 5
+#define MT6373_AUXADC_VIN3 6
+#define MT6373_AUXADC_VIN4 7
+#define MT6373_AUXADC_VIN5 8
+#define MT6373_AUXADC_VIN6 9
+#define MT6373_AUXADC_VIN7 10
+
+#endif
diff --git a/include/dt-bindings/iio/addac/adi,ad74413r.h b/include/dt-bindings/iio/addac/adi,ad74413r.h
new file mode 100644
index 000000000000..204f92bbd79f
--- /dev/null
+++ b/include/dt-bindings/iio/addac/adi,ad74413r.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _DT_BINDINGS_ADI_AD74413R_H
+#define _DT_BINDINGS_ADI_AD74413R_H
+
+#define CH_FUNC_HIGH_IMPEDANCE 0x0
+#define CH_FUNC_VOLTAGE_OUTPUT 0x1
+#define CH_FUNC_CURRENT_OUTPUT 0x2
+#define CH_FUNC_VOLTAGE_INPUT 0x3
+#define CH_FUNC_CURRENT_INPUT_EXT_POWER 0x4
+#define CH_FUNC_CURRENT_INPUT_LOOP_POWER 0x5
+#define CH_FUNC_RESISTANCE_INPUT 0x6
+#define CH_FUNC_DIGITAL_INPUT_LOGIC 0x7
+#define CH_FUNC_DIGITAL_INPUT_LOOP_POWER 0x8
+#define CH_FUNC_CURRENT_INPUT_EXT_POWER_HART 0x9
+#define CH_FUNC_CURRENT_INPUT_LOOP_POWER_HART 0xA
+
+#define CH_FUNC_MIN CH_FUNC_HIGH_IMPEDANCE
+#define CH_FUNC_MAX CH_FUNC_CURRENT_INPUT_LOOP_POWER_HART
+
+#endif /* _DT_BINDINGS_ADI_AD74413R_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pm7325.h b/include/dt-bindings/iio/qcom,spmi-adc7-pm7325.h
new file mode 100644
index 000000000000..96908014e09e
--- /dev/null
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pm7325.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_PM7325_H
+#define _DT_BINDINGS_QCOM_SPMI_VADC_PM7325_H
+
+#ifndef PM7325_SID
+#define PM7325_SID 1
+#endif
+
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+
+/* ADC channels for PM7325_ADC for PMIC7 */
+#define PM7325_ADC7_REF_GND (PM7325_SID << 8 | ADC7_REF_GND)
+#define PM7325_ADC7_1P25VREF (PM7325_SID << 8 | ADC7_1P25VREF)
+#define PM7325_ADC7_VREF_VADC (PM7325_SID << 8 | ADC7_VREF_VADC)
+#define PM7325_ADC7_DIE_TEMP (PM7325_SID << 8 | ADC7_DIE_TEMP)
+
+#define PM7325_ADC7_AMUX_THM1 (PM7325_SID << 8 | ADC7_AMUX_THM1)
+#define PM7325_ADC7_AMUX_THM2 (PM7325_SID << 8 | ADC7_AMUX_THM2)
+#define PM7325_ADC7_AMUX_THM3 (PM7325_SID << 8 | ADC7_AMUX_THM3)
+#define PM7325_ADC7_AMUX_THM4 (PM7325_SID << 8 | ADC7_AMUX_THM4)
+#define PM7325_ADC7_AMUX_THM5 (PM7325_SID << 8 | ADC7_AMUX_THM5)
+#define PM7325_ADC7_GPIO1 (PM7325_SID << 8 | ADC7_GPIO1)
+#define PM7325_ADC7_GPIO2 (PM7325_SID << 8 | ADC7_GPIO2)
+#define PM7325_ADC7_GPIO3 (PM7325_SID << 8 | ADC7_GPIO3)
+#define PM7325_ADC7_GPIO4 (PM7325_SID << 8 | ADC7_GPIO4)
+
+/* 30k pull-up1 */
+#define PM7325_ADC7_AMUX_THM1_30K_PU (PM7325_SID << 8 | ADC7_AMUX_THM1_30K_PU)
+#define PM7325_ADC7_AMUX_THM2_30K_PU (PM7325_SID << 8 | ADC7_AMUX_THM2_30K_PU)
+#define PM7325_ADC7_AMUX_THM3_30K_PU (PM7325_SID << 8 | ADC7_AMUX_THM3_30K_PU)
+#define PM7325_ADC7_AMUX_THM4_30K_PU (PM7325_SID << 8 | ADC7_AMUX_THM4_30K_PU)
+#define PM7325_ADC7_AMUX_THM5_30K_PU (PM7325_SID << 8 | ADC7_AMUX_THM5_30K_PU)
+#define PM7325_ADC7_GPIO1_30K_PU (PM7325_SID << 8 | ADC7_GPIO1_30K_PU)
+#define PM7325_ADC7_GPIO2_30K_PU (PM7325_SID << 8 | ADC7_GPIO2_30K_PU)
+#define PM7325_ADC7_GPIO3_30K_PU (PM7325_SID << 8 | ADC7_GPIO3_30K_PU)
+#define PM7325_ADC7_GPIO4_30K_PU (PM7325_SID << 8 | ADC7_GPIO4_30K_PU)
+
+/* 100k pull-up2 */
+#define PM7325_ADC7_AMUX_THM1_100K_PU (PM7325_SID << 8 | ADC7_AMUX_THM1_100K_PU)
+#define PM7325_ADC7_AMUX_THM2_100K_PU (PM7325_SID << 8 | ADC7_AMUX_THM2_100K_PU)
+#define PM7325_ADC7_AMUX_THM3_100K_PU (PM7325_SID << 8 | ADC7_AMUX_THM3_100K_PU)
+#define PM7325_ADC7_AMUX_THM4_100K_PU (PM7325_SID << 8 | ADC7_AMUX_THM4_100K_PU)
+#define PM7325_ADC7_AMUX_THM5_100K_PU (PM7325_SID << 8 | ADC7_AMUX_THM5_100K_PU)
+#define PM7325_ADC7_GPIO1_100K_PU (PM7325_SID << 8 | ADC7_GPIO1_100K_PU)
+#define PM7325_ADC7_GPIO2_100K_PU (PM7325_SID << 8 | ADC7_GPIO2_100K_PU)
+#define PM7325_ADC7_GPIO3_100K_PU (PM7325_SID << 8 | ADC7_GPIO3_100K_PU)
+#define PM7325_ADC7_GPIO4_100K_PU (PM7325_SID << 8 | ADC7_GPIO4_100K_PU)
+
+/* 400k pull-up3 */
+#define PM7325_ADC7_AMUX_THM1_400K_PU (PM7325_SID << 8 | ADC7_AMUX_THM1_400K_PU)
+#define PM7325_ADC7_AMUX_THM2_400K_PU (PM7325_SID << 8 | ADC7_AMUX_THM2_400K_PU)
+#define PM7325_ADC7_AMUX_THM3_400K_PU (PM7325_SID << 8 | ADC7_AMUX_THM3_400K_PU)
+#define PM7325_ADC7_AMUX_THM4_400K_PU (PM7325_SID << 8 | ADC7_AMUX_THM4_400K_PU)
+#define PM7325_ADC7_AMUX_THM5_400K_PU (PM7325_SID << 8 | ADC7_AMUX_THM5_400K_PU)
+#define PM7325_ADC7_GPIO1_400K_PU (PM7325_SID << 8 | ADC7_GPIO1_400K_PU)
+#define PM7325_ADC7_GPIO2_400K_PU (PM7325_SID << 8 | ADC7_GPIO2_400K_PU)
+#define PM7325_ADC7_GPIO3_400K_PU (PM7325_SID << 8 | ADC7_GPIO3_400K_PU)
+#define PM7325_ADC7_GPIO4_400K_PU (PM7325_SID << 8 | ADC7_GPIO4_400K_PU)
+
+/* 1/3 Divider */
+#define PM7325_ADC7_GPIO4_DIV3 (PM7325_SID << 8 | ADC7_GPIO4_DIV3)
+
+#define PM7325_ADC7_VPH_PWR (PM7325_SID << 8 | ADC7_VPH_PWR)
+
+#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PM7325_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h
index 9426f27a1946..5d98f7d48a1e 100644
--- a/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h
@@ -6,62 +6,60 @@
#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_PM8350_H
#define _DT_BINDINGS_QCOM_SPMI_VADC_PM8350_H
-#ifndef PM8350_SID
-#define PM8350_SID 1
-#endif
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
/* ADC channels for PM8350_ADC for PMIC7 */
-#define PM8350_ADC7_REF_GND (PM8350_SID << 8 | 0x0)
-#define PM8350_ADC7_1P25VREF (PM8350_SID << 8 | 0x01)
-#define PM8350_ADC7_VREF_VADC (PM8350_SID << 8 | 0x02)
-#define PM8350_ADC7_DIE_TEMP (PM8350_SID << 8 | 0x03)
+#define PM8350_ADC7_REF_GND(sid) ((sid) << 8 | ADC7_REF_GND)
+#define PM8350_ADC7_1P25VREF(sid) ((sid) << 8 | ADC7_1P25VREF)
+#define PM8350_ADC7_VREF_VADC(sid) ((sid) << 8 | ADC7_VREF_VADC)
+#define PM8350_ADC7_DIE_TEMP(sid) ((sid) << 8 | ADC7_DIE_TEMP)
-#define PM8350_ADC7_AMUX_THM1 (PM8350_SID << 8 | 0x04)
-#define PM8350_ADC7_AMUX_THM2 (PM8350_SID << 8 | 0x05)
-#define PM8350_ADC7_AMUX_THM3 (PM8350_SID << 8 | 0x06)
-#define PM8350_ADC7_AMUX_THM4 (PM8350_SID << 8 | 0x07)
-#define PM8350_ADC7_AMUX_THM5 (PM8350_SID << 8 | 0x08)
-#define PM8350_ADC7_GPIO1 (PM8350_SID << 8 | 0x0a)
-#define PM8350_ADC7_GPIO2 (PM8350_SID << 8 | 0x0b)
-#define PM8350_ADC7_GPIO3 (PM8350_SID << 8 | 0x0c)
-#define PM8350_ADC7_GPIO4 (PM8350_SID << 8 | 0x0d)
+#define PM8350_ADC7_AMUX_THM1(sid) ((sid) << 8 | ADC7_AMUX_THM1)
+#define PM8350_ADC7_AMUX_THM2(sid) ((sid) << 8 | ADC7_AMUX_THM2)
+#define PM8350_ADC7_AMUX_THM3(sid) ((sid) << 8 | ADC7_AMUX_THM3)
+#define PM8350_ADC7_AMUX_THM4(sid) ((sid) << 8 | ADC7_AMUX_THM4)
+#define PM8350_ADC7_AMUX_THM5(sid) ((sid) << 8 | ADC7_AMUX_THM5)
+#define PM8350_ADC7_GPIO1(sid) ((sid) << 8 | ADC7_GPIO1)
+#define PM8350_ADC7_GPIO2(sid) ((sid) << 8 | ADC7_GPIO2)
+#define PM8350_ADC7_GPIO3(sid) ((sid) << 8 | ADC7_GPIO3)
+#define PM8350_ADC7_GPIO4(sid) ((sid) << 8 | ADC7_GPIO4)
/* 30k pull-up1 */
-#define PM8350_ADC7_AMUX_THM1_30K_PU (PM8350_SID << 8 | 0x24)
-#define PM8350_ADC7_AMUX_THM2_30K_PU (PM8350_SID << 8 | 0x25)
-#define PM8350_ADC7_AMUX_THM3_30K_PU (PM8350_SID << 8 | 0x26)
-#define PM8350_ADC7_AMUX_THM4_30K_PU (PM8350_SID << 8 | 0x27)
-#define PM8350_ADC7_AMUX_THM5_30K_PU (PM8350_SID << 8 | 0x28)
-#define PM8350_ADC7_GPIO1_30K_PU (PM8350_SID << 8 | 0x2a)
-#define PM8350_ADC7_GPIO2_30K_PU (PM8350_SID << 8 | 0x2b)
-#define PM8350_ADC7_GPIO3_30K_PU (PM8350_SID << 8 | 0x2c)
-#define PM8350_ADC7_GPIO4_30K_PU (PM8350_SID << 8 | 0x2d)
+#define PM8350_ADC7_AMUX_THM1_30K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM1_30K_PU)
+#define PM8350_ADC7_AMUX_THM2_30K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM2_30K_PU)
+#define PM8350_ADC7_AMUX_THM3_30K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM3_30K_PU)
+#define PM8350_ADC7_AMUX_THM4_30K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM4_30K_PU)
+#define PM8350_ADC7_AMUX_THM5_30K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM5_30K_PU)
+#define PM8350_ADC7_GPIO1_30K_PU(sid) ((sid) << 8 | ADC7_GPIO1_30K_PU)
+#define PM8350_ADC7_GPIO2_30K_PU(sid) ((sid) << 8 | ADC7_GPIO2_30K_PU)
+#define PM8350_ADC7_GPIO3_30K_PU(sid) ((sid) << 8 | ADC7_GPIO3_30K_PU)
+#define PM8350_ADC7_GPIO4_30K_PU(sid) ((sid) << 8 | ADC7_GPIO4_30K_PU)
/* 100k pull-up2 */
-#define PM8350_ADC7_AMUX_THM1_100K_PU (PM8350_SID << 8 | 0x44)
-#define PM8350_ADC7_AMUX_THM2_100K_PU (PM8350_SID << 8 | 0x45)
-#define PM8350_ADC7_AMUX_THM3_100K_PU (PM8350_SID << 8 | 0x46)
-#define PM8350_ADC7_AMUX_THM4_100K_PU (PM8350_SID << 8 | 0x47)
-#define PM8350_ADC7_AMUX_THM5_100K_PU (PM8350_SID << 8 | 0x48)
-#define PM8350_ADC7_GPIO1_100K_PU (PM8350_SID << 8 | 0x4a)
-#define PM8350_ADC7_GPIO2_100K_PU (PM8350_SID << 8 | 0x4b)
-#define PM8350_ADC7_GPIO3_100K_PU (PM8350_SID << 8 | 0x4c)
-#define PM8350_ADC7_GPIO4_100K_PU (PM8350_SID << 8 | 0x4d)
+#define PM8350_ADC7_AMUX_THM1_100K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM1_100K_PU)
+#define PM8350_ADC7_AMUX_THM2_100K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM2_100K_PU)
+#define PM8350_ADC7_AMUX_THM3_100K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM3_100K_PU)
+#define PM8350_ADC7_AMUX_THM4_100K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM4_100K_PU)
+#define PM8350_ADC7_AMUX_THM5_100K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM5_100K_PU)
+#define PM8350_ADC7_GPIO1_100K_PU(sid) ((sid) << 8 | ADC7_GPIO1_100K_PU)
+#define PM8350_ADC7_GPIO2_100K_PU(sid) ((sid) << 8 | ADC7_GPIO2_100K_PU)
+#define PM8350_ADC7_GPIO3_100K_PU(sid) ((sid) << 8 | ADC7_GPIO3_100K_PU)
+#define PM8350_ADC7_GPIO4_100K_PU(sid) ((sid) << 8 | ADC7_GPIO4_100K_PU)
/* 400k pull-up3 */
-#define PM8350_ADC7_AMUX_THM1_400K_PU (PM8350_SID << 8 | 0x64)
-#define PM8350_ADC7_AMUX_THM2_400K_PU (PM8350_SID << 8 | 0x65)
-#define PM8350_ADC7_AMUX_THM3_400K_PU (PM8350_SID << 8 | 0x66)
-#define PM8350_ADC7_AMUX_THM4_400K_PU (PM8350_SID << 8 | 0x67)
-#define PM8350_ADC7_AMUX_THM5_400K_PU (PM8350_SID << 8 | 0x68)
-#define PM8350_ADC7_GPIO1_400K_PU (PM8350_SID << 8 | 0x6a)
-#define PM8350_ADC7_GPIO2_400K_PU (PM8350_SID << 8 | 0x6b)
-#define PM8350_ADC7_GPIO3_400K_PU (PM8350_SID << 8 | 0x6c)
-#define PM8350_ADC7_GPIO4_400K_PU (PM8350_SID << 8 | 0x6d)
+#define PM8350_ADC7_AMUX_THM1_400K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM1_400K_PU)
+#define PM8350_ADC7_AMUX_THM2_400K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM2_400K_PU)
+#define PM8350_ADC7_AMUX_THM3_400K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM3_400K_PU)
+#define PM8350_ADC7_AMUX_THM4_400K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM4_400K_PU)
+#define PM8350_ADC7_AMUX_THM5_400K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM5_400K_PU)
+#define PM8350_ADC7_GPIO1_400K_PU(sid) ((sid) << 8 | ADC7_GPIO1_400K_PU)
+#define PM8350_ADC7_GPIO2_400K_PU(sid) ((sid) << 8 | ADC7_GPIO2_400K_PU)
+#define PM8350_ADC7_GPIO3_400K_PU(sid) ((sid) << 8 | ADC7_GPIO3_400K_PU)
+#define PM8350_ADC7_GPIO4_400K_PU(sid) ((sid) << 8 | ADC7_GPIO4_400K_PU)
/* 1/3 Divider */
-#define PM8350_ADC7_GPIO4_DIV3 (PM8350_SID << 8 | 0x8d)
+#define PM8350_ADC7_GPIO4_DIV3(sid) ((sid) << 8 | ADC7_GPIO4_DIV3)
-#define PM8350_ADC7_VPH_PWR (PM8350_SID << 8 | 0x8e)
+#define PM8350_ADC7_VPH_PWR(sid) ((sid) << 8 | ADC7_VPH_PWR)
#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PM8350_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h
index dc2497c27e16..57c7977666d3 100644
--- a/include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h
@@ -10,79 +10,81 @@
#define PM8350B_SID 3
#endif
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+
/* ADC channels for PM8350B_ADC for PMIC7 */
-#define PM8350B_ADC7_REF_GND (PM8350B_SID << 8 | 0x0)
-#define PM8350B_ADC7_1P25VREF (PM8350B_SID << 8 | 0x01)
-#define PM8350B_ADC7_VREF_VADC (PM8350B_SID << 8 | 0x02)
-#define PM8350B_ADC7_DIE_TEMP (PM8350B_SID << 8 | 0x03)
+#define PM8350B_ADC7_REF_GND (PM8350B_SID << 8 | ADC7_REF_GND)
+#define PM8350B_ADC7_1P25VREF (PM8350B_SID << 8 | ADC7_1P25VREF)
+#define PM8350B_ADC7_VREF_VADC (PM8350B_SID << 8 | ADC7_VREF_VADC)
+#define PM8350B_ADC7_DIE_TEMP (PM8350B_SID << 8 | ADC7_DIE_TEMP)
-#define PM8350B_ADC7_AMUX_THM1 (PM8350B_SID << 8 | 0x04)
-#define PM8350B_ADC7_AMUX_THM2 (PM8350B_SID << 8 | 0x05)
-#define PM8350B_ADC7_AMUX_THM3 (PM8350B_SID << 8 | 0x06)
-#define PM8350B_ADC7_AMUX_THM4 (PM8350B_SID << 8 | 0x07)
-#define PM8350B_ADC7_AMUX_THM5 (PM8350B_SID << 8 | 0x08)
-#define PM8350B_ADC7_AMUX_THM6 (PM8350B_SID << 8 | 0x09)
-#define PM8350B_ADC7_GPIO1 (PM8350B_SID << 8 | 0x0a)
-#define PM8350B_ADC7_GPIO2 (PM8350B_SID << 8 | 0x0b)
-#define PM8350B_ADC7_GPIO3 (PM8350B_SID << 8 | 0x0c)
-#define PM8350B_ADC7_GPIO4 (PM8350B_SID << 8 | 0x0d)
+#define PM8350B_ADC7_AMUX_THM1 (PM8350B_SID << 8 | ADC7_AMUX_THM1)
+#define PM8350B_ADC7_AMUX_THM2 (PM8350B_SID << 8 | ADC7_AMUX_THM2)
+#define PM8350B_ADC7_AMUX_THM3 (PM8350B_SID << 8 | ADC7_AMUX_THM3)
+#define PM8350B_ADC7_AMUX_THM4 (PM8350B_SID << 8 | ADC7_AMUX_THM4)
+#define PM8350B_ADC7_AMUX_THM5 (PM8350B_SID << 8 | ADC7_AMUX_THM5)
+#define PM8350B_ADC7_AMUX_THM6 (PM8350B_SID << 8 | ADC7_AMUX_THM6)
+#define PM8350B_ADC7_GPIO1 (PM8350B_SID << 8 | ADC7_GPIO1)
+#define PM8350B_ADC7_GPIO2 (PM8350B_SID << 8 | ADC7_GPIO2)
+#define PM8350B_ADC7_GPIO3 (PM8350B_SID << 8 | ADC7_GPIO3)
+#define PM8350B_ADC7_GPIO4 (PM8350B_SID << 8 | ADC7_GPIO4)
-#define PM8350B_ADC7_CHG_TEMP (PM8350B_SID << 8 | 0x10)
-#define PM8350B_ADC7_USB_IN_V_16 (PM8350B_SID << 8 | 0x11)
-#define PM8350B_ADC7_VDC_16 (PM8350B_SID << 8 | 0x12)
-#define PM8350B_ADC7_CC1_ID (PM8350B_SID << 8 | 0x13)
-#define PM8350B_ADC7_VREF_BAT_THERM (PM8350B_SID << 8 | 0x15)
-#define PM8350B_ADC7_IIN_FB (PM8350B_SID << 8 | 0x17)
+#define PM8350B_ADC7_CHG_TEMP (PM8350B_SID << 8 | ADC7_CHG_TEMP)
+#define PM8350B_ADC7_USB_IN_V_16 (PM8350B_SID << 8 | ADC7_USB_IN_V_16)
+#define PM8350B_ADC7_VDC_16 (PM8350B_SID << 8 | ADC7_VDC_16)
+#define PM8350B_ADC7_CC1_ID (PM8350B_SID << 8 | ADC7_CC1_ID)
+#define PM8350B_ADC7_VREF_BAT_THERM (PM8350B_SID << 8 | ADC7_VREF_BAT_THERM)
+#define PM8350B_ADC7_IIN_FB (PM8350B_SID << 8 | ADC7_IIN_FB)
/* 30k pull-up1 */
-#define PM8350B_ADC7_AMUX_THM1_30K_PU (PM8350B_SID << 8 | 0x24)
-#define PM8350B_ADC7_AMUX_THM2_30K_PU (PM8350B_SID << 8 | 0x25)
-#define PM8350B_ADC7_AMUX_THM3_30K_PU (PM8350B_SID << 8 | 0x26)
-#define PM8350B_ADC7_AMUX_THM4_30K_PU (PM8350B_SID << 8 | 0x27)
-#define PM8350B_ADC7_AMUX_THM5_30K_PU (PM8350B_SID << 8 | 0x28)
-#define PM8350B_ADC7_AMUX_THM6_30K_PU (PM8350B_SID << 8 | 0x29)
-#define PM8350B_ADC7_GPIO1_30K_PU (PM8350B_SID << 8 | 0x2a)
-#define PM8350B_ADC7_GPIO2_30K_PU (PM8350B_SID << 8 | 0x2b)
-#define PM8350B_ADC7_GPIO3_30K_PU (PM8350B_SID << 8 | 0x2c)
-#define PM8350B_ADC7_GPIO4_30K_PU (PM8350B_SID << 8 | 0x2d)
-#define PM8350B_ADC7_CC1_ID_30K_PU (PM8350B_SID << 8 | 0x33)
+#define PM8350B_ADC7_AMUX_THM1_30K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM1_30K_PU)
+#define PM8350B_ADC7_AMUX_THM2_30K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM2_30K_PU)
+#define PM8350B_ADC7_AMUX_THM3_30K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM3_30K_PU)
+#define PM8350B_ADC7_AMUX_THM4_30K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM4_30K_PU)
+#define PM8350B_ADC7_AMUX_THM5_30K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM5_30K_PU)
+#define PM8350B_ADC7_AMUX_THM6_30K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM6_30K_PU)
+#define PM8350B_ADC7_GPIO1_30K_PU (PM8350B_SID << 8 | ADC7_GPIO1_30K_PU)
+#define PM8350B_ADC7_GPIO2_30K_PU (PM8350B_SID << 8 | ADC7_GPIO2_30K_PU)
+#define PM8350B_ADC7_GPIO3_30K_PU (PM8350B_SID << 8 | ADC7_GPIO3_30K_PU)
+#define PM8350B_ADC7_GPIO4_30K_PU (PM8350B_SID << 8 | ADC7_GPIO4_30K_PU)
+#define PM8350B_ADC7_CC1_ID_30K_PU (PM8350B_SID << 8 | ADC7_CC1_ID_30K_PU)
/* 100k pull-up2 */
-#define PM8350B_ADC7_AMUX_THM1_100K_PU (PM8350B_SID << 8 | 0x44)
-#define PM8350B_ADC7_AMUX_THM2_100K_PU (PM8350B_SID << 8 | 0x45)
-#define PM8350B_ADC7_AMUX_THM3_100K_PU (PM8350B_SID << 8 | 0x46)
-#define PM8350B_ADC7_AMUX_THM4_100K_PU (PM8350B_SID << 8 | 0x47)
-#define PM8350B_ADC7_AMUX_THM5_100K_PU (PM8350B_SID << 8 | 0x48)
-#define PM8350B_ADC7_AMUX_THM6_100K_PU (PM8350B_SID << 8 | 0x49)
-#define PM8350B_ADC7_GPIO1_100K_PU (PM8350B_SID << 8 | 0x4a)
-#define PM8350B_ADC7_GPIO2_100K_PU (PM8350B_SID << 8 | 0x4b)
-#define PM8350B_ADC7_GPIO3_100K_PU (PM8350B_SID << 8 | 0x4c)
-#define PM8350B_ADC7_GPIO4_100K_PU (PM8350B_SID << 8 | 0x4d)
-#define PM8350B_ADC7_CC1_ID_100K_PU (PM8350B_SID << 8 | 0x53)
+#define PM8350B_ADC7_AMUX_THM1_100K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM1_100K_PU)
+#define PM8350B_ADC7_AMUX_THM2_100K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM2_100K_PU)
+#define PM8350B_ADC7_AMUX_THM3_100K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM3_100K_PU)
+#define PM8350B_ADC7_AMUX_THM4_100K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM4_100K_PU)
+#define PM8350B_ADC7_AMUX_THM5_100K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM5_100K_PU)
+#define PM8350B_ADC7_AMUX_THM6_100K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM6_100K_PU)
+#define PM8350B_ADC7_GPIO1_100K_PU (PM8350B_SID << 8 | ADC7_GPIO1_100K_PU)
+#define PM8350B_ADC7_GPIO2_100K_PU (PM8350B_SID << 8 | ADC7_GPIO2_100K_PU)
+#define PM8350B_ADC7_GPIO3_100K_PU (PM8350B_SID << 8 | ADC7_GPIO3_100K_PU)
+#define PM8350B_ADC7_GPIO4_100K_PU (PM8350B_SID << 8 | ADC7_GPIO4_100K_PU)
+#define PM8350B_ADC7_CC1_ID_100K_PU (PM8350B_SID << 8 | ADC7_CC1_ID_100K_PU)
/* 400k pull-up3 */
-#define PM8350B_ADC7_AMUX_THM1_400K_PU (PM8350B_SID << 8 | 0x64)
-#define PM8350B_ADC7_AMUX_THM2_400K_PU (PM8350B_SID << 8 | 0x65)
-#define PM8350B_ADC7_AMUX_THM3_400K_PU (PM8350B_SID << 8 | 0x66)
-#define PM8350B_ADC7_AMUX_THM4_400K_PU (PM8350B_SID << 8 | 0x67)
-#define PM8350B_ADC7_AMUX_THM5_400K_PU (PM8350B_SID << 8 | 0x68)
-#define PM8350B_ADC7_AMUX_THM6_400K_PU (PM8350B_SID << 8 | 0x69)
-#define PM8350B_ADC7_GPIO1_400K_PU (PM8350B_SID << 8 | 0x6a)
-#define PM8350B_ADC7_GPIO2_400K_PU (PM8350B_SID << 8 | 0x6b)
-#define PM8350B_ADC7_GPIO3_400K_PU (PM8350B_SID << 8 | 0x6c)
-#define PM8350B_ADC7_GPIO4_400K_PU (PM8350B_SID << 8 | 0x6d)
-#define PM8350B_ADC7_CC1_ID_400K_PU (PM8350B_SID << 8 | 0x73)
+#define PM8350B_ADC7_AMUX_THM1_400K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM1_400K_PU)
+#define PM8350B_ADC7_AMUX_THM2_400K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM2_400K_PU)
+#define PM8350B_ADC7_AMUX_THM3_400K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM3_400K_PU)
+#define PM8350B_ADC7_AMUX_THM4_400K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM4_400K_PU)
+#define PM8350B_ADC7_AMUX_THM5_400K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM5_400K_PU)
+#define PM8350B_ADC7_AMUX_THM6_400K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM6_400K_PU)
+#define PM8350B_ADC7_GPIO1_400K_PU (PM8350B_SID << 8 | ADC7_GPIO1_400K_PU)
+#define PM8350B_ADC7_GPIO2_400K_PU (PM8350B_SID << 8 | ADC7_GPIO2_400K_PU)
+#define PM8350B_ADC7_GPIO3_400K_PU (PM8350B_SID << 8 | ADC7_GPIO3_400K_PU)
+#define PM8350B_ADC7_GPIO4_400K_PU (PM8350B_SID << 8 | ADC7_GPIO4_400K_PU)
+#define PM8350B_ADC7_CC1_ID_400K_PU (PM8350B_SID << 8 | ADC7_CC1_ID_400K_PU)
/* 1/3 Divider */
-#define PM8350B_ADC7_GPIO1_DIV3 (PM8350B_SID << 8 | 0x8a)
-#define PM8350B_ADC7_GPIO2_DIV3 (PM8350B_SID << 8 | 0x8b)
-#define PM8350B_ADC7_GPIO3_DIV3 (PM8350B_SID << 8 | 0x8c)
-#define PM8350B_ADC7_GPIO4_DIV3 (PM8350B_SID << 8 | 0x8d)
+#define PM8350B_ADC7_GPIO1_DIV3 (PM8350B_SID << 8 | ADC7_GPIO1_DIV3)
+#define PM8350B_ADC7_GPIO2_DIV3 (PM8350B_SID << 8 | ADC7_GPIO2_DIV3)
+#define PM8350B_ADC7_GPIO3_DIV3 (PM8350B_SID << 8 | ADC7_GPIO3_DIV3)
+#define PM8350B_ADC7_GPIO4_DIV3 (PM8350B_SID << 8 | ADC7_GPIO4_DIV3)
-#define PM8350B_ADC7_VPH_PWR (PM8350B_SID << 8 | 0x8e)
-#define PM8350B_ADC7_VBAT_SNS (PM8350B_SID << 8 | 0x8f)
+#define PM8350B_ADC7_VPH_PWR (PM8350B_SID << 8 | ADC7_VPH_PWR)
+#define PM8350B_ADC7_VBAT_SNS (PM8350B_SID << 8 | ADC7_VBAT_SNS)
-#define PM8350B_ADC7_SBUx (PM8350B_SID << 8 | 0x94)
-#define PM8350B_ADC7_VBAT_2S_MID (PM8350B_SID << 8 | 0x96)
+#define PM8350B_ADC7_SBUx (PM8350B_SID << 8 | ADC7_SBU)
+#define PM8350B_ADC7_VBAT_2S_MID (PM8350B_SID << 8 | ADC7_VBAT_2S_MID)
#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PM8350B_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h b/include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h
index 6c296870e95b..3d1a41a22cef 100644
--- a/include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h
@@ -10,37 +10,39 @@
#define PMK8350_SID 0
#endif
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+
/* ADC channels for PMK8350_ADC for PMIC7 */
-#define PMK8350_ADC7_REF_GND (PMK8350_SID << 8 | 0x0)
-#define PMK8350_ADC7_1P25VREF (PMK8350_SID << 8 | 0x01)
-#define PMK8350_ADC7_VREF_VADC (PMK8350_SID << 8 | 0x02)
-#define PMK8350_ADC7_DIE_TEMP (PMK8350_SID << 8 | 0x03)
+#define PMK8350_ADC7_REF_GND (PMK8350_SID << 8 | ADC7_REF_GND)
+#define PMK8350_ADC7_1P25VREF (PMK8350_SID << 8 | ADC7_1P25VREF)
+#define PMK8350_ADC7_VREF_VADC (PMK8350_SID << 8 | ADC7_VREF_VADC)
+#define PMK8350_ADC7_DIE_TEMP (PMK8350_SID << 8 | ADC7_DIE_TEMP)
-#define PMK8350_ADC7_AMUX_THM1 (PMK8350_SID << 8 | 0x04)
-#define PMK8350_ADC7_AMUX_THM2 (PMK8350_SID << 8 | 0x05)
-#define PMK8350_ADC7_AMUX_THM3 (PMK8350_SID << 8 | 0x06)
-#define PMK8350_ADC7_AMUX_THM4 (PMK8350_SID << 8 | 0x07)
-#define PMK8350_ADC7_AMUX_THM5 (PMK8350_SID << 8 | 0x08)
+#define PMK8350_ADC7_AMUX_THM1 (PMK8350_SID << 8 | ADC7_AMUX_THM1)
+#define PMK8350_ADC7_AMUX_THM2 (PMK8350_SID << 8 | ADC7_AMUX_THM2)
+#define PMK8350_ADC7_AMUX_THM3 (PMK8350_SID << 8 | ADC7_AMUX_THM3)
+#define PMK8350_ADC7_AMUX_THM4 (PMK8350_SID << 8 | ADC7_AMUX_THM4)
+#define PMK8350_ADC7_AMUX_THM5 (PMK8350_SID << 8 | ADC7_AMUX_THM5)
/* 30k pull-up1 */
-#define PMK8350_ADC7_AMUX_THM1_30K_PU (PMK8350_SID << 8 | 0x24)
-#define PMK8350_ADC7_AMUX_THM2_30K_PU (PMK8350_SID << 8 | 0x25)
-#define PMK8350_ADC7_AMUX_THM3_30K_PU (PMK8350_SID << 8 | 0x26)
-#define PMK8350_ADC7_AMUX_THM4_30K_PU (PMK8350_SID << 8 | 0x27)
-#define PMK8350_ADC7_AMUX_THM5_30K_PU (PMK8350_SID << 8 | 0x28)
+#define PMK8350_ADC7_AMUX_THM1_30K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM1_30K_PU)
+#define PMK8350_ADC7_AMUX_THM2_30K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM2_30K_PU)
+#define PMK8350_ADC7_AMUX_THM3_30K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM3_30K_PU)
+#define PMK8350_ADC7_AMUX_THM4_30K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM4_30K_PU)
+#define PMK8350_ADC7_AMUX_THM5_30K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM5_30K_PU)
/* 100k pull-up2 */
-#define PMK8350_ADC7_AMUX_THM1_100K_PU (PMK8350_SID << 8 | 0x44)
-#define PMK8350_ADC7_AMUX_THM2_100K_PU (PMK8350_SID << 8 | 0x45)
-#define PMK8350_ADC7_AMUX_THM3_100K_PU (PMK8350_SID << 8 | 0x46)
-#define PMK8350_ADC7_AMUX_THM4_100K_PU (PMK8350_SID << 8 | 0x47)
-#define PMK8350_ADC7_AMUX_THM5_100K_PU (PMK8350_SID << 8 | 0x48)
+#define PMK8350_ADC7_AMUX_THM1_100K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM1_100K_PU)
+#define PMK8350_ADC7_AMUX_THM2_100K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM2_100K_PU)
+#define PMK8350_ADC7_AMUX_THM3_100K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM3_100K_PU)
+#define PMK8350_ADC7_AMUX_THM4_100K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM4_100K_PU)
+#define PMK8350_ADC7_AMUX_THM5_100K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM5_100K_PU)
/* 400k pull-up3 */
-#define PMK8350_ADC7_AMUX_THM1_400K_PU (PMK8350_SID << 8 | 0x64)
-#define PMK8350_ADC7_AMUX_THM2_400K_PU (PMK8350_SID << 8 | 0x65)
-#define PMK8350_ADC7_AMUX_THM3_400K_PU (PMK8350_SID << 8 | 0x66)
-#define PMK8350_ADC7_AMUX_THM4_400K_PU (PMK8350_SID << 8 | 0x67)
-#define PMK8350_ADC7_AMUX_THM5_400K_PU (PMK8350_SID << 8 | 0x68)
+#define PMK8350_ADC7_AMUX_THM1_400K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM1_400K_PU)
+#define PMK8350_ADC7_AMUX_THM2_400K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM2_400K_PU)
+#define PMK8350_ADC7_AMUX_THM3_400K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM3_400K_PU)
+#define PMK8350_ADC7_AMUX_THM4_400K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM4_400K_PU)
+#define PMK8350_ADC7_AMUX_THM5_400K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM5_400K_PU)
#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PMK8350_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h
index d6df1b19e5ff..c5adfa82b20d 100644
--- a/include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h
@@ -10,19 +10,21 @@
#define PMR735A_SID 4
#endif
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+
/* ADC channels for PMR735A_ADC for PMIC7 */
-#define PMR735A_ADC7_REF_GND (PMR735A_SID << 8 | 0x0)
-#define PMR735A_ADC7_1P25VREF (PMR735A_SID << 8 | 0x01)
-#define PMR735A_ADC7_VREF_VADC (PMR735A_SID << 8 | 0x02)
-#define PMR735A_ADC7_DIE_TEMP (PMR735A_SID << 8 | 0x03)
+#define PMR735A_ADC7_REF_GND (PMR735A_SID << 8 | ADC7_REF_GND)
+#define PMR735A_ADC7_1P25VREF (PMR735A_SID << 8 | ADC7_1P25VREF)
+#define PMR735A_ADC7_VREF_VADC (PMR735A_SID << 8 | ADC7_VREF_VADC)
+#define PMR735A_ADC7_DIE_TEMP (PMR735A_SID << 8 | ADC7_DIE_TEMP)
-#define PMR735A_ADC7_GPIO1 (PMR735A_SID << 8 | 0x0a)
-#define PMR735A_ADC7_GPIO2 (PMR735A_SID << 8 | 0x0b)
-#define PMR735A_ADC7_GPIO3 (PMR735A_SID << 8 | 0x0c)
+#define PMR735A_ADC7_GPIO1 (PMR735A_SID << 8 | ADC7_GPIO1)
+#define PMR735A_ADC7_GPIO2 (PMR735A_SID << 8 | ADC7_GPIO2)
+#define PMR735A_ADC7_GPIO3 (PMR735A_SID << 8 | ADC7_GPIO3)
/* 100k pull-up2 */
-#define PMR735A_ADC7_GPIO1_100K_PU (PMR735A_SID << 8 | 0x4a)
-#define PMR735A_ADC7_GPIO2_100K_PU (PMR735A_SID << 8 | 0x4b)
-#define PMR735A_ADC7_GPIO3_100K_PU (PMR735A_SID << 8 | 0x4c)
+#define PMR735A_ADC7_GPIO1_100K_PU (PMR735A_SID << 8 | ADC7_GPIO1_100K_PU)
+#define PMR735A_ADC7_GPIO2_100K_PU (PMR735A_SID << 8 | ADC7_GPIO2_100K_PU)
+#define PMR735A_ADC7_GPIO3_100K_PU (PMR735A_SID << 8 | ADC7_GPIO3_100K_PU)
#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PMR735A_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h
index 8da0e7dab315..fdb8dd9ae541 100644
--- a/include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h
@@ -10,19 +10,21 @@
#define PMR735B_SID 5
#endif
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+
/* ADC channels for PMR735B_ADC for PMIC7 */
-#define PMR735B_ADC7_REF_GND (PMR735B_SID << 8 | 0x0)
-#define PMR735B_ADC7_1P25VREF (PMR735B_SID << 8 | 0x01)
-#define PMR735B_ADC7_VREF_VADC (PMR735B_SID << 8 | 0x02)
-#define PMR735B_ADC7_DIE_TEMP (PMR735B_SID << 8 | 0x03)
+#define PMR735B_ADC7_REF_GND (PMR735B_SID << 8 | ADC7_REF_GND)
+#define PMR735B_ADC7_1P25VREF (PMR735B_SID << 8 | ADC7_1P25VREF)
+#define PMR735B_ADC7_VREF_VADC (PMR735B_SID << 8 | ADC7_VREF_VADC)
+#define PMR735B_ADC7_DIE_TEMP (PMR735B_SID << 8 | ADC7_DIE_TEMP)
-#define PMR735B_ADC7_GPIO1 (PMR735B_SID << 8 | 0x0a)
-#define PMR735B_ADC7_GPIO2 (PMR735B_SID << 8 | 0x0b)
-#define PMR735B_ADC7_GPIO3 (PMR735B_SID << 8 | 0x0c)
+#define PMR735B_ADC7_GPIO1 (PMR735B_SID << 8 | ADC7_GPIO1)
+#define PMR735B_ADC7_GPIO2 (PMR735B_SID << 8 | ADC7_GPIO2)
+#define PMR735B_ADC7_GPIO3 (PMR735B_SID << 8 | ADC7_GPIO3)
/* 100k pull-up2 */
-#define PMR735B_ADC7_GPIO1_100K_PU (PMR735B_SID << 8 | 0x4a)
-#define PMR735B_ADC7_GPIO2_100K_PU (PMR735B_SID << 8 | 0x4b)
-#define PMR735B_ADC7_GPIO3_100K_PU (PMR735B_SID << 8 | 0x4c)
+#define PMR735B_ADC7_GPIO1_100K_PU (PMR735B_SID << 8 | ADC7_GPIO1_100K_PU)
+#define PMR735B_ADC7_GPIO2_100K_PU (PMR735B_SID << 8 | ADC7_GPIO2_100K_PU)
+#define PMR735B_ADC7_GPIO3_100K_PU (PMR735B_SID << 8 | ADC7_GPIO3_100K_PU)
#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PMR735B_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-smb139x.h b/include/dt-bindings/iio/qcom,spmi-adc7-smb139x.h
new file mode 100644
index 000000000000..c0680d1285cf
--- /dev/null
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-smb139x.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_SMB139X_H
+#define _DT_BINDINGS_QCOM_SPMI_VADC_SMB139X_H
+
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+
+#define SMB139x_1_ADC7_SMB_TEMP (SMB139x_1_SID << 8 | ADC7_SMB_TEMP)
+#define SMB139x_1_ADC7_ICHG_SMB (SMB139x_1_SID << 8 | ADC7_ICHG_SMB)
+#define SMB139x_1_ADC7_IIN_SMB (SMB139x_1_SID << 8 | ADC7_IIN_SMB)
+
+#define SMB139x_2_ADC7_SMB_TEMP (SMB139x_2_SID << 8 | ADC7_SMB_TEMP)
+#define SMB139x_2_ADC7_ICHG_SMB (SMB139x_2_SID << 8 | ADC7_ICHG_SMB)
+#define SMB139x_2_ADC7_IIN_SMB (SMB139x_2_SID << 8 | ADC7_IIN_SMB)
+
+#endif
diff --git a/include/dt-bindings/iio/qcom,spmi-vadc.h b/include/dt-bindings/iio/qcom,spmi-vadc.h
index 08adfe25964c..ef07ecd4d585 100644
--- a/include/dt-bindings/iio/qcom,spmi-vadc.h
+++ b/include/dt-bindings/iio/qcom,spmi-vadc.h
@@ -239,12 +239,15 @@
#define ADC7_GPIO3 0x0c
#define ADC7_GPIO4 0x0d
+#define ADC7_SMB_TEMP 0x06
#define ADC7_CHG_TEMP 0x10
#define ADC7_USB_IN_V_16 0x11
#define ADC7_VDC_16 0x12
#define ADC7_CC1_ID 0x13
#define ADC7_VREF_BAT_THERM 0x15
#define ADC7_IIN_FB 0x17
+#define ADC7_ICHG_SMB 0x18
+#define ADC7_IIN_SMB 0x19
/* 30k pull-up1 */
#define ADC7_AMUX_THM1_30K_PU 0x24
diff --git a/include/dt-bindings/input/cros-ec-keyboard.h b/include/dt-bindings/input/cros-ec-keyboard.h
index f0ae03634a96..afc12f6aa642 100644
--- a/include/dt-bindings/input/cros-ec-keyboard.h
+++ b/include/dt-bindings/input/cros-ec-keyboard.h
@@ -100,4 +100,108 @@
MATRIX_KEY(0x07, 0x0b, KEY_UP) \
MATRIX_KEY(0x07, 0x0c, KEY_LEFT)
+/* No numpad */
+#define CROS_TOP_ROW_KEYMAP_V30 \
+ MATRIX_KEY(0x00, 0x01, KEY_F11) /* T11 */ \
+ MATRIX_KEY(0x00, 0x02, KEY_F1) /* T1 */ \
+ MATRIX_KEY(0x00, 0x04, KEY_F10) /* T10 */ \
+ MATRIX_KEY(0x00, 0x0b, KEY_F14) /* T14 */ \
+ MATRIX_KEY(0x00, 0x0c, KEY_F15) /* T15 */ \
+ MATRIX_KEY(0x01, 0x02, KEY_F4) /* T4 */ \
+ MATRIX_KEY(0x01, 0x04, KEY_F7) /* T7 */ \
+ MATRIX_KEY(0x01, 0x05, KEY_F12) /* T12 */ \
+ MATRIX_KEY(0x01, 0x09, KEY_F9) /* T9 */ \
+ MATRIX_KEY(0x02, 0x02, KEY_F3) /* T3 */ \
+ MATRIX_KEY(0x02, 0x04, KEY_F6) /* T6 */ \
+ MATRIX_KEY(0x02, 0x0b, KEY_F8) /* T8 */ \
+ MATRIX_KEY(0x03, 0x02, KEY_F2) /* T2 */ \
+ MATRIX_KEY(0x03, 0x05, KEY_F13) /* T13 */ \
+ MATRIX_KEY(0x04, 0x04, KEY_F5) /* T5 */
+
+#define CROS_MAIN_KEYMAP_V30 /* Keycode */ \
+ MATRIX_KEY(0x00, 0x03, KEY_B) /* 50 */ \
+ MATRIX_KEY(0x00, 0x05, KEY_N) /* 51 */ \
+ MATRIX_KEY(0x00, 0x06, KEY_RO) /* 56 (JIS) */ \
+ MATRIX_KEY(0x00, 0x08, KEY_EQUAL) /* 13 */ \
+ MATRIX_KEY(0x00, 0x09, KEY_HOME) /* 80 (Numpad) */ \
+ MATRIX_KEY(0x00, 0x0a, KEY_RIGHTALT) /* 62 */ \
+ MATRIX_KEY(0x00, 0x10, KEY_FN) /* 127 */ \
+ \
+ MATRIX_KEY(0x01, 0x01, KEY_ESC) /* 110 */ \
+ MATRIX_KEY(0x01, 0x03, KEY_G) /* 35 */ \
+ MATRIX_KEY(0x01, 0x06, KEY_H) /* 36 */ \
+ MATRIX_KEY(0x01, 0x08, KEY_APOSTROPHE) /* 41 */ \
+ MATRIX_KEY(0x01, 0x0b, KEY_BACKSPACE) /* 15 */ \
+ MATRIX_KEY(0x01, 0x0c, KEY_HENKAN) /* 65 (JIS) */ \
+ MATRIX_KEY(0x01, 0x0e, KEY_LEFTCTRL) /* 58 */ \
+ \
+ MATRIX_KEY(0x02, 0x01, KEY_TAB) /* 16 */ \
+ MATRIX_KEY(0x02, 0x03, KEY_T) /* 21 */ \
+ MATRIX_KEY(0x02, 0x05, KEY_RIGHTBRACE) /* 28 */ \
+ MATRIX_KEY(0x02, 0x06, KEY_Y) /* 22 */ \
+ MATRIX_KEY(0x02, 0x08, KEY_LEFTBRACE) /* 27 */ \
+ MATRIX_KEY(0x02, 0x09, KEY_DELETE) /* 76 (Numpad) */ \
+ MATRIX_KEY(0x02, 0x0c, KEY_PAGEUP) /* 85 (Numpad) */ \
+ MATRIX_KEY(0x02, 0x011, KEY_YEN) /* 14 (JIS) */ \
+ \
+ MATRIX_KEY(0x03, 0x00, KEY_LEFTMETA) /* Launcher */ \
+ MATRIX_KEY(0x03, 0x01, KEY_GRAVE) /* 1 */ \
+ MATRIX_KEY(0x03, 0x03, KEY_5) /* 6 */ \
+ MATRIX_KEY(0x03, 0x04, KEY_S) /* 32 */ \
+ MATRIX_KEY(0x03, 0x06, KEY_MINUS) /* 12 */ \
+ MATRIX_KEY(0x03, 0x08, KEY_6) /* 7 */ \
+ MATRIX_KEY(0x03, 0x09, KEY_SLEEP) /* Lock */ \
+ MATRIX_KEY(0x03, 0x0b, KEY_BACKSLASH) /* 29 */ \
+ MATRIX_KEY(0x03, 0x0c, KEY_MUHENKAN) /* 63 (JIS) */ \
+ MATRIX_KEY(0x03, 0x0e, KEY_RIGHTCTRL) /* 64 */ \
+ \
+ MATRIX_KEY(0x04, 0x01, KEY_A) /* 31 */ \
+ MATRIX_KEY(0x04, 0x02, KEY_D) /* 33 */ \
+ MATRIX_KEY(0x04, 0x03, KEY_F) /* 34 */ \
+ MATRIX_KEY(0x04, 0x05, KEY_K) /* 38 */ \
+ MATRIX_KEY(0x04, 0x06, KEY_J) /* 37 */ \
+ MATRIX_KEY(0x04, 0x08, KEY_SEMICOLON) /* 40 */ \
+ MATRIX_KEY(0x04, 0x09, KEY_L) /* 39 */ \
+ MATRIX_KEY(0x04, 0x0b, KEY_ENTER) /* 43 */ \
+ MATRIX_KEY(0x04, 0x0c, KEY_END) /* 81 (Numpad) */ \
+ \
+ MATRIX_KEY(0x05, 0x01, KEY_1) /* 2 */ \
+ MATRIX_KEY(0x05, 0x02, KEY_COMMA) /* 53 */ \
+ MATRIX_KEY(0x05, 0x03, KEY_DOT) /* 54 */ \
+ MATRIX_KEY(0x05, 0x04, KEY_SLASH) /* 55 */ \
+ MATRIX_KEY(0x05, 0x05, KEY_C) /* 48 */ \
+ MATRIX_KEY(0x05, 0x06, KEY_SPACE) /* 61 */ \
+ MATRIX_KEY(0x05, 0x07, KEY_LEFTSHIFT) /* 44 */ \
+ MATRIX_KEY(0x05, 0x08, KEY_X) /* 47 */ \
+ MATRIX_KEY(0x05, 0x09, KEY_V) /* 49 */ \
+ MATRIX_KEY(0x05, 0x0b, KEY_M) /* 52 */ \
+ MATRIX_KEY(0x05, 0x0c, KEY_PAGEDOWN) /* 86 (Numpad) */ \
+ \
+ MATRIX_KEY(0x06, 0x01, KEY_Z) /* 46 */ \
+ MATRIX_KEY(0x06, 0x02, KEY_3) /* 4 */ \
+ MATRIX_KEY(0x06, 0x03, KEY_4) /* 5 */ \
+ MATRIX_KEY(0x06, 0x04, KEY_2) /* 3 */ \
+ MATRIX_KEY(0x06, 0x05, KEY_8) /* 9 */ \
+ MATRIX_KEY(0x06, 0x06, KEY_0) /* 11 */ \
+ MATRIX_KEY(0x06, 0x08, KEY_7) /* 8 */ \
+ MATRIX_KEY(0x06, 0x09, KEY_9) /* 10 */ \
+ MATRIX_KEY(0x06, 0x0b, KEY_DOWN) /* 84 */ \
+ MATRIX_KEY(0x06, 0x0c, KEY_RIGHT) /* 89 */ \
+ MATRIX_KEY(0x06, 0x0d, KEY_LEFTALT) /* 60 */ \
+ MATRIX_KEY(0x06, 0x0f, KEY_ASSISTANT) /* 128 */ \
+ MATRIX_KEY(0x06, 0x11, KEY_BACKSLASH) /* 42 (JIS, ISO) */ \
+ \
+ MATRIX_KEY(0x07, 0x01, KEY_U) /* 23 */ \
+ MATRIX_KEY(0x07, 0x02, KEY_I) /* 24 */ \
+ MATRIX_KEY(0x07, 0x03, KEY_O) /* 25 */ \
+ MATRIX_KEY(0x07, 0x04, KEY_P) /* 26 */ \
+ MATRIX_KEY(0x07, 0x05, KEY_Q) /* 17 */ \
+ MATRIX_KEY(0x07, 0x06, KEY_W) /* 18 */ \
+ MATRIX_KEY(0x07, 0x07, KEY_RIGHTSHIFT) /* 57 */ \
+ MATRIX_KEY(0x07, 0x08, KEY_E) /* 19 */ \
+ MATRIX_KEY(0x07, 0x09, KEY_R) /* 20 */ \
+ MATRIX_KEY(0x07, 0x0b, KEY_UP) /* 83 */ \
+ MATRIX_KEY(0x07, 0x0c, KEY_LEFT) /* 79 */ \
+ MATRIX_KEY(0x07, 0x11, KEY_102ND) /* 45 (ISO) */
+
#endif /* _CROS_EC_KEYBOARD_H */
diff --git a/include/dt-bindings/interconnect/fsl,imx8mp.h b/include/dt-bindings/interconnect/fsl,imx8mp.h
new file mode 100644
index 000000000000..7357d417529a
--- /dev/null
+++ b/include/dt-bindings/interconnect/fsl,imx8mp.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Interconnect framework driver for i.MX SoC
+ *
+ * Copyright 2022 NXP
+ * Peng Fan <peng.fan@nxp.com>
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_IMX8MP_H
+#define __DT_BINDINGS_INTERCONNECT_IMX8MP_H
+
+#define IMX8MP_ICN_NOC 0
+#define IMX8MP_ICN_MAIN 1
+#define IMX8MP_ICS_DRAM 2
+#define IMX8MP_ICS_OCRAM 3
+#define IMX8MP_ICM_A53 4
+#define IMX8MP_ICM_SUPERMIX 5
+#define IMX8MP_ICM_GIC 6
+#define IMX8MP_ICM_MLMIX 7
+
+#define IMX8MP_ICN_AUDIO 8
+#define IMX8MP_ICM_DSP 9
+#define IMX8MP_ICM_SDMA2PER 10
+#define IMX8MP_ICM_SDMA2BURST 11
+#define IMX8MP_ICM_SDMA3PER 12
+#define IMX8MP_ICM_SDMA3BURST 13
+#define IMX8MP_ICM_EDMA 14
+
+#define IMX8MP_ICN_GPU 15
+#define IMX8MP_ICM_GPU2D 16
+#define IMX8MP_ICM_GPU3D 17
+
+#define IMX8MP_ICN_HDMI 18
+#define IMX8MP_ICM_HRV 19
+#define IMX8MP_ICM_LCDIF_HDMI 20
+#define IMX8MP_ICM_HDCP 21
+
+#define IMX8MP_ICN_HSIO 22
+#define IMX8MP_ICM_NOC_PCIE 23
+#define IMX8MP_ICM_USB1 24
+#define IMX8MP_ICM_USB2 25
+#define IMX8MP_ICM_PCIE 26
+
+#define IMX8MP_ICN_MEDIA 27
+#define IMX8MP_ICM_LCDIF_RD 28
+#define IMX8MP_ICM_LCDIF_WR 29
+#define IMX8MP_ICM_ISI0 30
+#define IMX8MP_ICM_ISI1 31
+#define IMX8MP_ICM_ISI2 32
+#define IMX8MP_ICM_ISP0 33
+#define IMX8MP_ICM_ISP1 34
+#define IMX8MP_ICM_DWE 35
+
+#define IMX8MP_ICN_VIDEO 36
+#define IMX8MP_ICM_VPU_G1 37
+#define IMX8MP_ICM_VPU_G2 38
+#define IMX8MP_ICM_VPU_H1 39
+
+#endif /* __DT_BINDINGS_INTERCONNECT_IMX8MP_H */
diff --git a/include/dt-bindings/interconnect/mediatek,mt8183.h b/include/dt-bindings/interconnect/mediatek,mt8183.h
new file mode 100644
index 000000000000..1088c350258d
--- /dev/null
+++ b/include/dt-bindings/interconnect/mediatek,mt8183.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Copyright (c) 2024 Collabora Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_MEDIATEK_MT8183_H
+#define __DT_BINDINGS_INTERCONNECT_MEDIATEK_MT8183_H
+
+#define SLAVE_DDR_EMI 0
+#define MASTER_MCUSYS 1
+#define MASTER_MFG 2
+#define MASTER_MMSYS 3
+#define MASTER_MM_VPU 4
+#define MASTER_MM_DISP 5
+#define MASTER_MM_VDEC 6
+#define MASTER_MM_VENC 7
+#define MASTER_MM_CAM 8
+#define MASTER_MM_IMG 9
+#define MASTER_MM_MDP 10
+
+#endif
diff --git a/include/dt-bindings/interconnect/mediatek,mt8195.h b/include/dt-bindings/interconnect/mediatek,mt8195.h
new file mode 100644
index 000000000000..33e0e6cde732
--- /dev/null
+++ b/include/dt-bindings/interconnect/mediatek,mt8195.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ * Copyright (c) 2024 Collabora Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_MEDIATEK_MT8195_H
+#define __DT_BINDINGS_INTERCONNECT_MEDIATEK_MT8195_H
+
+#define SLAVE_DDR_EMI 0
+#define MASTER_MCUSYS 1
+#define MASTER_GPUSYS 2
+#define MASTER_MMSYS 3
+#define MASTER_MM_VPU 4
+#define MASTER_MM_DISP 5
+#define MASTER_MM_VDEC 6
+#define MASTER_MM_VENC 7
+#define MASTER_MM_CAM 8
+#define MASTER_MM_IMG 9
+#define MASTER_MM_MDP 10
+#define MASTER_VPUSYS 11
+#define MASTER_VPU_0 12
+#define MASTER_VPU_1 13
+#define MASTER_MDLASYS 14
+#define MASTER_MDLA_0 15
+#define MASTER_UFS 16
+#define MASTER_PCIE_0 17
+#define MASTER_PCIE_1 18
+#define MASTER_USB 19
+#define MASTER_DBGIF 20
+#define SLAVE_HRT_DDR_EMI 21
+#define MASTER_HRT_MMSYS 22
+#define MASTER_HRT_MM_DISP 23
+#define MASTER_HRT_MM_VDEC 24
+#define MASTER_HRT_MM_VENC 25
+#define MASTER_HRT_MM_CAM 26
+#define MASTER_HRT_MM_IMG 27
+#define MASTER_HRT_MM_MDP 28
+#define MASTER_HRT_DBGIF 29
+#define MASTER_WIFI 30
+#define MASTER_BT 31
+#define MASTER_NETSYS 32
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,glymur-rpmh.h b/include/dt-bindings/interconnect/qcom,glymur-rpmh.h
new file mode 100644
index 000000000000..6a0e754345e4
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,glymur-rpmh.h
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2025, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_GLYMUR_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_GLYMUR_H
+
+#define MASTER_CRYPTO 0
+#define MASTER_SOCCP_PROC 1
+#define MASTER_QDSS_ETR 2
+#define MASTER_QDSS_ETR_1 3
+#define SLAVE_A1NOC_SNOC 4
+
+#define MASTER_UFS_MEM 0
+#define MASTER_USB3_2 1
+#define MASTER_USB4_2 2
+#define SLAVE_A2NOC_SNOC 3
+
+#define MASTER_QSPI_0 0
+#define MASTER_QUP_0 1
+#define MASTER_QUP_1 2
+#define MASTER_QUP_2 3
+#define MASTER_SP 4
+#define MASTER_SDCC_2 5
+#define MASTER_SDCC_4 6
+#define MASTER_USB2 7
+#define MASTER_USB3_MP 8
+#define SLAVE_A3NOC_SNOC 9
+
+#define MASTER_USB3_0 0
+#define MASTER_USB3_1 1
+#define MASTER_USB4_0 2
+#define MASTER_USB4_1 3
+#define SLAVE_A4NOC_HSCNOC 4
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define MASTER_QUP_CORE_2 2
+#define SLAVE_QUP_CORE_0 3
+#define SLAVE_QUP_CORE_1 4
+#define SLAVE_QUP_CORE_2 5
+
+#define MASTER_CNOC_CFG 0
+#define SLAVE_AHB2PHY_SOUTH 1
+#define SLAVE_AHB2PHY_NORTH 2
+#define SLAVE_AHB2PHY_2 3
+#define SLAVE_AHB2PHY_3 4
+#define SLAVE_AV1_ENC_CFG 5
+#define SLAVE_CAMERA_CFG 6
+#define SLAVE_CLK_CTL 7
+#define SLAVE_CRYPTO_0_CFG 8
+#define SLAVE_DISPLAY_CFG 9
+#define SLAVE_GFX3D_CFG 10
+#define SLAVE_IMEM_CFG 11
+#define SLAVE_PCIE_0_CFG 12
+#define SLAVE_PCIE_1_CFG 13
+#define SLAVE_PCIE_2_CFG 14
+#define SLAVE_PCIE_3A_CFG 15
+#define SLAVE_PCIE_3B_CFG 16
+#define SLAVE_PCIE_4_CFG 17
+#define SLAVE_PCIE_5_CFG 18
+#define SLAVE_PCIE_6_CFG 19
+#define SLAVE_PCIE_RSCC 20
+#define SLAVE_PDM 21
+#define SLAVE_PRNG 22
+#define SLAVE_QDSS_CFG 23
+#define SLAVE_QSPI_0 24
+#define SLAVE_QUP_0 25
+#define SLAVE_QUP_1 26
+#define SLAVE_QUP_2 27
+#define SLAVE_SDCC_2 28
+#define SLAVE_SDCC_4 29
+#define SLAVE_SMMUV3_CFG 30
+#define SLAVE_TCSR 31
+#define SLAVE_TLMM 32
+#define SLAVE_UFS_MEM_CFG 33
+#define SLAVE_USB2 34
+#define SLAVE_USB3_0 35
+#define SLAVE_USB3_1 36
+#define SLAVE_USB3_2 37
+#define SLAVE_USB3_MP 38
+#define SLAVE_USB4_0 39
+#define SLAVE_USB4_1 40
+#define SLAVE_USB4_2 41
+#define SLAVE_VENUS_CFG 42
+#define SLAVE_CNOC_PCIE_SLAVE_EAST_CFG 43
+#define SLAVE_CNOC_PCIE_SLAVE_WEST_CFG 44
+#define SLAVE_LPASS_QTB_CFG 45
+#define SLAVE_CNOC_MNOC_CFG 46
+#define SLAVE_NSP_QTB_CFG 47
+#define SLAVE_PCIE_EAST_ANOC_CFG 48
+#define SLAVE_PCIE_WEST_ANOC_CFG 49
+#define SLAVE_QDSS_STM 50
+#define SLAVE_TCU 51
+
+#define MASTER_HSCNOC_CNOC 0
+#define SLAVE_AOSS 1
+#define SLAVE_IPC_ROUTER_CFG 2
+#define SLAVE_SOCCP 3
+#define SLAVE_TME_CFG 4
+#define SLAVE_APPSS 5
+#define SLAVE_CNOC_CFG 6
+#define SLAVE_BOOT_IMEM 7
+#define SLAVE_IMEM 8
+
+#define MASTER_GPU_TCU 0
+#define MASTER_PCIE_TCU 1
+#define MASTER_SYS_TCU 2
+#define MASTER_APPSS_PROC 3
+#define MASTER_AGGRE_NOC_EAST 4
+#define MASTER_GFX3D 5
+#define MASTER_LPASS_GEM_NOC 6
+#define MASTER_MNOC_HF_MEM_NOC 7
+#define MASTER_MNOC_SF_MEM_NOC 8
+#define MASTER_COMPUTE_NOC 9
+#define MASTER_PCIE_EAST 10
+#define MASTER_PCIE_WEST 11
+#define MASTER_SNOC_SF_MEM_NOC 12
+#define MASTER_WLAN_Q6 13
+#define MASTER_GIC 14
+#define SLAVE_HSCNOC_CNOC 15
+#define SLAVE_LLCC 16
+#define SLAVE_PCIE_EAST 17
+#define SLAVE_PCIE_WEST 18
+
+#define MASTER_LPIAON_NOC 0
+#define SLAVE_LPASS_GEM_NOC 1
+
+#define MASTER_LPASS_LPINOC 0
+#define SLAVE_LPIAON_NOC_LPASS_AG_NOC 1
+
+#define MASTER_LPASS_PROC 0
+#define SLAVE_LPICX_NOC_LPIAON_NOC 1
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_AV1_ENC 0
+#define MASTER_CAMNOC_HF 1
+#define MASTER_CAMNOC_ICP 2
+#define MASTER_CAMNOC_SF 3
+#define MASTER_EVA 4
+#define MASTER_MDP 5
+#define MASTER_CDSP_HCP 6
+#define MASTER_VIDEO 7
+#define MASTER_VIDEO_CV_PROC 8
+#define MASTER_VIDEO_V_PROC 9
+#define MASTER_CNOC_MNOC_CFG 10
+#define SLAVE_MNOC_HF_MEM_NOC 11
+#define SLAVE_MNOC_SF_MEM_NOC 12
+#define SLAVE_SERVICE_MNOC 13
+
+#define MASTER_CPUCP 0
+#define SLAVE_NSINOC_SYSTEM_NOC 1
+#define SLAVE_SERVICE_NSINOC 2
+
+#define MASTER_CDSP_PROC 0
+#define SLAVE_NSP0_HSC_NOC 1
+
+#define MASTER_OOBMSS_SP_PROC 0
+#define SLAVE_OOBMSS_SNOC 1
+
+#define MASTER_PCIE_EAST_ANOC_CFG 0
+#define MASTER_PCIE_0 1
+#define MASTER_PCIE_1 2
+#define MASTER_PCIE_5 3
+#define SLAVE_PCIE_EAST_MEM_NOC 4
+#define SLAVE_SERVICE_PCIE_EAST_AGGRE_NOC 5
+
+#define MASTER_HSCNOC_PCIE_EAST 0
+#define MASTER_CNOC_PCIE_EAST_SLAVE_CFG 1
+#define SLAVE_HSCNOC_PCIE_EAST_MS_MPU_CFG 2
+#define SLAVE_SERVICE_PCIE_EAST 3
+#define SLAVE_PCIE_0 4
+#define SLAVE_PCIE_1 5
+#define SLAVE_PCIE_5 6
+
+#define MASTER_PCIE_WEST_ANOC_CFG 0
+#define MASTER_PCIE_2 1
+#define MASTER_PCIE_3A 2
+#define MASTER_PCIE_3B 3
+#define MASTER_PCIE_4 4
+#define MASTER_PCIE_6 5
+#define SLAVE_PCIE_WEST_MEM_NOC 6
+#define SLAVE_SERVICE_PCIE_WEST_AGGRE_NOC 7
+
+#define MASTER_HSCNOC_PCIE_WEST 0
+#define MASTER_CNOC_PCIE_WEST_SLAVE_CFG 1
+#define SLAVE_HSCNOC_PCIE_WEST_MS_MPU_CFG 2
+#define SLAVE_SERVICE_PCIE_WEST 3
+#define SLAVE_PCIE_2 4
+#define SLAVE_PCIE_3A 5
+#define SLAVE_PCIE_3B 6
+#define SLAVE_PCIE_4 7
+#define SLAVE_PCIE_6 8
+
+#define MASTER_A1NOC_SNOC 0
+#define MASTER_A2NOC_SNOC 1
+#define MASTER_A3NOC_SNOC 2
+#define MASTER_NSINOC_SNOC 3
+#define MASTER_OOBMSS 4
+#define SLAVE_SNOC_GEM_NOC_SF 5
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,ipq5332.h b/include/dt-bindings/interconnect/qcom,ipq5332.h
new file mode 100644
index 000000000000..16475bb07a48
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,ipq5332.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+#ifndef INTERCONNECT_QCOM_IPQ5332_H
+#define INTERCONNECT_QCOM_IPQ5332_H
+
+#define MASTER_SNOC_PCIE3_1_M 0
+#define SLAVE_SNOC_PCIE3_1_M 1
+#define MASTER_ANOC_PCIE3_1_S 2
+#define SLAVE_ANOC_PCIE3_1_S 3
+#define MASTER_SNOC_PCIE3_2_M 4
+#define SLAVE_SNOC_PCIE3_2_M 5
+#define MASTER_ANOC_PCIE3_2_S 6
+#define SLAVE_ANOC_PCIE3_2_S 7
+#define MASTER_SNOC_USB 8
+#define SLAVE_SNOC_USB 9
+#define MASTER_NSSNOC_NSSCC 10
+#define SLAVE_NSSNOC_NSSCC 11
+#define MASTER_NSSNOC_SNOC_0 12
+#define SLAVE_NSSNOC_SNOC_0 13
+#define MASTER_NSSNOC_SNOC_1 14
+#define SLAVE_NSSNOC_SNOC_1 15
+#define MASTER_NSSNOC_ATB 16
+#define SLAVE_NSSNOC_ATB 17
+#define MASTER_NSSNOC_PCNOC_1 18
+#define SLAVE_NSSNOC_PCNOC_1 19
+#define MASTER_NSSNOC_QOSGEN_REF 20
+#define SLAVE_NSSNOC_QOSGEN_REF 21
+#define MASTER_NSSNOC_TIMEOUT_REF 22
+#define SLAVE_NSSNOC_TIMEOUT_REF 23
+#define MASTER_NSSNOC_XO_DCD 24
+#define SLAVE_NSSNOC_XO_DCD 25
+
+#define MASTER_NSSNOC_PPE 0
+#define SLAVE_NSSNOC_PPE 1
+#define MASTER_NSSNOC_PPE_CFG 2
+#define SLAVE_NSSNOC_PPE_CFG 3
+#define MASTER_NSSNOC_NSS_CSR 4
+#define SLAVE_NSSNOC_NSS_CSR 5
+#define MASTER_NSSNOC_CE_APB 6
+#define SLAVE_NSSNOC_CE_APB 7
+#define MASTER_NSSNOC_CE_AXI 8
+#define SLAVE_NSSNOC_CE_AXI 9
+
+#define MASTER_CNOC_AHB 0
+#define SLAVE_CNOC_AHB 1
+
+#endif /* INTERCONNECT_QCOM_IPQ5332_H */
diff --git a/include/dt-bindings/interconnect/qcom,ipq5424.h b/include/dt-bindings/interconnect/qcom,ipq5424.h
new file mode 100644
index 000000000000..07b786bee7d6
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,ipq5424.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+#ifndef INTERCONNECT_QCOM_IPQ5424_H
+#define INTERCONNECT_QCOM_IPQ5424_H
+
+#define MASTER_ANOC_PCIE0 0
+#define SLAVE_ANOC_PCIE0 1
+#define MASTER_CNOC_PCIE0 2
+#define SLAVE_CNOC_PCIE0 3
+#define MASTER_ANOC_PCIE1 4
+#define SLAVE_ANOC_PCIE1 5
+#define MASTER_CNOC_PCIE1 6
+#define SLAVE_CNOC_PCIE1 7
+#define MASTER_ANOC_PCIE2 8
+#define SLAVE_ANOC_PCIE2 9
+#define MASTER_CNOC_PCIE2 10
+#define SLAVE_CNOC_PCIE2 11
+#define MASTER_ANOC_PCIE3 12
+#define SLAVE_ANOC_PCIE3 13
+#define MASTER_CNOC_PCIE3 14
+#define SLAVE_CNOC_PCIE3 15
+#define MASTER_CNOC_USB 16
+#define SLAVE_CNOC_USB 17
+#define MASTER_NSSNOC_NSSCC 18
+#define SLAVE_NSSNOC_NSSCC 19
+#define MASTER_NSSNOC_SNOC_0 20
+#define SLAVE_NSSNOC_SNOC_0 21
+#define MASTER_NSSNOC_SNOC_1 22
+#define SLAVE_NSSNOC_SNOC_1 23
+#define MASTER_NSSNOC_PCNOC_1 24
+#define SLAVE_NSSNOC_PCNOC_1 25
+#define MASTER_NSSNOC_QOSGEN_REF 26
+#define SLAVE_NSSNOC_QOSGEN_REF 27
+#define MASTER_NSSNOC_TIMEOUT_REF 28
+#define SLAVE_NSSNOC_TIMEOUT_REF 29
+#define MASTER_NSSNOC_XO_DCD 30
+#define SLAVE_NSSNOC_XO_DCD 31
+#define MASTER_NSSNOC_ATB 32
+#define SLAVE_NSSNOC_ATB 33
+#define MASTER_CNOC_LPASS_CFG 34
+#define SLAVE_CNOC_LPASS_CFG 35
+#define MASTER_SNOC_LPASS 36
+#define SLAVE_SNOC_LPASS 37
+
+#define MASTER_CPU 0
+#define SLAVE_L3 1
+
+#define MASTER_NSSNOC_PPE 0
+#define SLAVE_NSSNOC_PPE 1
+#define MASTER_NSSNOC_PPE_CFG 2
+#define SLAVE_NSSNOC_PPE_CFG 3
+#define MASTER_NSSNOC_NSS_CSR 4
+#define SLAVE_NSSNOC_NSS_CSR 5
+#define MASTER_NSSNOC_CE_AXI 6
+#define SLAVE_NSSNOC_CE_AXI 7
+#define MASTER_NSSNOC_CE_APB 8
+#define SLAVE_NSSNOC_CE_APB 9
+#define MASTER_NSSNOC_EIP 10
+#define SLAVE_NSSNOC_EIP 11
+
+#endif /* INTERCONNECT_QCOM_IPQ5424_H */
diff --git a/include/dt-bindings/interconnect/qcom,ipq9574.h b/include/dt-bindings/interconnect/qcom,ipq9574.h
new file mode 100644
index 000000000000..42019335c7dd
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,ipq9574.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+#ifndef INTERCONNECT_QCOM_IPQ9574_H
+#define INTERCONNECT_QCOM_IPQ9574_H
+
+#define MASTER_ANOC_PCIE0 0
+#define SLAVE_ANOC_PCIE0 1
+#define MASTER_SNOC_PCIE0 2
+#define SLAVE_SNOC_PCIE0 3
+#define MASTER_ANOC_PCIE1 4
+#define SLAVE_ANOC_PCIE1 5
+#define MASTER_SNOC_PCIE1 6
+#define SLAVE_SNOC_PCIE1 7
+#define MASTER_ANOC_PCIE2 8
+#define SLAVE_ANOC_PCIE2 9
+#define MASTER_SNOC_PCIE2 10
+#define SLAVE_SNOC_PCIE2 11
+#define MASTER_ANOC_PCIE3 12
+#define SLAVE_ANOC_PCIE3 13
+#define MASTER_SNOC_PCIE3 14
+#define SLAVE_SNOC_PCIE3 15
+#define MASTER_USB 16
+#define SLAVE_USB 17
+#define MASTER_USB_AXI 18
+#define SLAVE_USB_AXI 19
+#define MASTER_NSSNOC_NSSCC 20
+#define SLAVE_NSSNOC_NSSCC 21
+#define MASTER_NSSNOC_SNOC_0 22
+#define SLAVE_NSSNOC_SNOC_0 23
+#define MASTER_NSSNOC_SNOC_1 24
+#define SLAVE_NSSNOC_SNOC_1 25
+#define MASTER_NSSNOC_PCNOC_1 26
+#define SLAVE_NSSNOC_PCNOC_1 27
+#define MASTER_NSSNOC_QOSGEN_REF 28
+#define SLAVE_NSSNOC_QOSGEN_REF 29
+#define MASTER_NSSNOC_TIMEOUT_REF 30
+#define SLAVE_NSSNOC_TIMEOUT_REF 31
+#define MASTER_NSSNOC_XO_DCD 32
+#define SLAVE_NSSNOC_XO_DCD 33
+#define MASTER_NSSNOC_ATB 34
+#define SLAVE_NSSNOC_ATB 35
+#define MASTER_MEM_NOC_NSSNOC 36
+#define SLAVE_MEM_NOC_NSSNOC 37
+#define MASTER_NSSNOC_MEMNOC 38
+#define SLAVE_NSSNOC_MEMNOC 39
+#define MASTER_NSSNOC_MEM_NOC_1 40
+#define SLAVE_NSSNOC_MEM_NOC_1 41
+
+#define MASTER_NSSNOC_PPE 0
+#define SLAVE_NSSNOC_PPE 1
+#define MASTER_NSSNOC_PPE_CFG 2
+#define SLAVE_NSSNOC_PPE_CFG 3
+#define MASTER_NSSNOC_NSS_CSR 4
+#define SLAVE_NSSNOC_NSS_CSR 5
+#define MASTER_NSSNOC_IMEM_QSB 6
+#define SLAVE_NSSNOC_IMEM_QSB 7
+#define MASTER_NSSNOC_IMEM_AHB 8
+#define SLAVE_NSSNOC_IMEM_AHB 9
+
+#endif /* INTERCONNECT_QCOM_IPQ9574_H */
diff --git a/include/dt-bindings/interconnect/qcom,kaanapali-rpmh.h b/include/dt-bindings/interconnect/qcom,kaanapali-rpmh.h
new file mode 100644
index 000000000000..dde3f9abd677
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,kaanapali-rpmh.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_KAANAPALI_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_KAANAPALI_H
+
+#define MASTER_QSPI_0 0
+#define MASTER_CRYPTO 1
+#define MASTER_QUP_1 2
+#define MASTER_SDCC_4 3
+#define MASTER_UFS_MEM 4
+#define MASTER_USB3 5
+#define MASTER_QUP_2 6
+#define MASTER_QUP_3 7
+#define MASTER_QUP_4 8
+#define MASTER_IPA 9
+#define MASTER_SOCCP_PROC 10
+#define MASTER_SP 11
+#define MASTER_QDSS_ETR 12
+#define MASTER_QDSS_ETR_1 13
+#define MASTER_SDCC_2 14
+#define SLAVE_A1NOC_SNOC 15
+#define SLAVE_A2NOC_SNOC 16
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define MASTER_QUP_CORE_2 2
+#define MASTER_QUP_CORE_3 3
+#define MASTER_QUP_CORE_4 4
+#define SLAVE_QUP_CORE_0 5
+#define SLAVE_QUP_CORE_1 6
+#define SLAVE_QUP_CORE_2 7
+#define SLAVE_QUP_CORE_3 8
+#define SLAVE_QUP_CORE_4 9
+
+#define MASTER_CNOC_CFG 0
+#define SLAVE_AHB2PHY_SOUTH 1
+#define SLAVE_AHB2PHY_NORTH 2
+#define SLAVE_CAMERA_CFG 3
+#define SLAVE_CLK_CTL 4
+#define SLAVE_CRYPTO_0_CFG 5
+#define SLAVE_DISPLAY_CFG 6
+#define SLAVE_EVA_CFG 7
+#define SLAVE_GFX3D_CFG 8
+#define SLAVE_I2C 9
+#define SLAVE_I3C_IBI0_CFG 10
+#define SLAVE_I3C_IBI1_CFG 11
+#define SLAVE_IMEM_CFG 12
+#define SLAVE_IPC_ROUTER_CFG 13
+#define SLAVE_CNOC_MSS 14
+#define SLAVE_PCIE_CFG 15
+#define SLAVE_PRNG 16
+#define SLAVE_QDSS_CFG 17
+#define SLAVE_QSPI_0 18
+#define SLAVE_QUP_1 19
+#define SLAVE_QUP_2 20
+#define SLAVE_QUP_3 21
+#define SLAVE_QUP_4 22
+#define SLAVE_SDCC_2 23
+#define SLAVE_SDCC_4 24
+#define SLAVE_SPSS_CFG 25
+#define SLAVE_TCSR 26
+#define SLAVE_TLMM 27
+#define SLAVE_UFS_MEM_CFG 28
+#define SLAVE_USB3 29
+#define SLAVE_VENUS_CFG 30
+#define SLAVE_VSENSE_CTRL_CFG 31
+#define SLAVE_CNOC_MNOC_CFG 32
+#define SLAVE_PCIE_ANOC_CFG 33
+#define SLAVE_QDSS_STM 34
+#define SLAVE_TCU 35
+
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define SLAVE_AOSS 2
+#define SLAVE_IPA_CFG 3
+#define SLAVE_IPC_ROUTER_FENCE 4
+#define SLAVE_SOCCP 5
+#define SLAVE_TME_CFG 6
+#define SLAVE_APPSS 7
+#define SLAVE_CNOC_CFG 8
+#define SLAVE_DDRSS_CFG 9
+#define SLAVE_BOOT_IMEM 10
+#define SLAVE_IMEM 11
+#define SLAVE_PCIE_0 12
+
+#define MASTER_GPU_TCU 0
+#define MASTER_SYS_TCU 1
+#define MASTER_APPSS_PROC 2
+#define MASTER_GFX3D 3
+#define MASTER_LPASS_GEM_NOC 4
+#define MASTER_MSS_PROC 5
+#define MASTER_MNOC_HF_MEM_NOC 6
+#define MASTER_MNOC_SF_MEM_NOC 7
+#define MASTER_COMPUTE_NOC 8
+#define MASTER_ANOC_PCIE_GEM_NOC 9
+#define MASTER_QPACE 10
+#define MASTER_SNOC_SF_MEM_NOC 11
+#define MASTER_WLAN_Q6 12
+#define MASTER_GIC 13
+#define SLAVE_GEM_NOC_CNOC 14
+#define SLAVE_LLCC 15
+#define SLAVE_MEM_NOC_PCIE_SNOC 16
+
+#define MASTER_LPIAON_NOC 0
+#define SLAVE_LPASS_GEM_NOC 1
+
+#define MASTER_LPASS_LPINOC 0
+#define SLAVE_LPIAON_NOC_LPASS_AG_NOC 1
+
+#define MASTER_LPASS_PROC 0
+#define SLAVE_LPICX_NOC_LPIAON_NOC 1
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_CAMNOC_HF 0
+#define MASTER_CAMNOC_NRT_ICP_SF 1
+#define MASTER_CAMNOC_RT_CDM_SF 2
+#define MASTER_CAMNOC_SF 3
+#define MASTER_MDP 4
+#define MASTER_MDSS_DCP 5
+#define MASTER_CDSP_HCP 6
+#define MASTER_VIDEO_CV_PROC 7
+#define MASTER_VIDEO_EVA 8
+#define MASTER_VIDEO_MVP 9
+#define MASTER_VIDEO_V_PROC 10
+#define MASTER_CNOC_MNOC_CFG 11
+#define SLAVE_MNOC_HF_MEM_NOC 12
+#define SLAVE_MNOC_SF_MEM_NOC 13
+#define SLAVE_SERVICE_MNOC 14
+
+#define MASTER_CDSP_PROC 0
+#define SLAVE_CDSP_MEM_NOC 1
+
+#define MASTER_PCIE_ANOC_CFG 0
+#define MASTER_PCIE_0 1
+#define SLAVE_ANOC_PCIE_GEM_NOC 2
+#define SLAVE_SERVICE_PCIE_ANOC 3
+
+#define MASTER_A1NOC_SNOC 0
+#define MASTER_A2NOC_SNOC 1
+#define MASTER_APSS_NOC 2
+#define MASTER_CNOC_SNOC 3
+#define SLAVE_SNOC_GEM_NOC_SF 4
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,milos-rpmh.h b/include/dt-bindings/interconnect/qcom,milos-rpmh.h
new file mode 100644
index 000000000000..9326d7d9c2a3
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,milos-rpmh.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_MILOS_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_MILOS_H
+
+#define MASTER_QUP_1 0
+#define MASTER_UFS_MEM 1
+#define MASTER_USB3_0 2
+#define SLAVE_A1NOC_SNOC 3
+
+#define MASTER_QDSS_BAM 0
+#define MASTER_QSPI_0 1
+#define MASTER_QUP_0 2
+#define MASTER_CRYPTO 3
+#define MASTER_IPA 4
+#define MASTER_QDSS_ETR 5
+#define MASTER_QDSS_ETR_1 6
+#define MASTER_SDCC_1 7
+#define MASTER_SDCC_2 8
+#define SLAVE_A2NOC_SNOC 9
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define SLAVE_QUP_CORE_0 2
+#define SLAVE_QUP_CORE_1 3
+
+#define MASTER_CNOC_CFG 0
+#define SLAVE_AHB2PHY_SOUTH 1
+#define SLAVE_AHB2PHY_NORTH 2
+#define SLAVE_CAMERA_CFG 3
+#define SLAVE_CLK_CTL 4
+#define SLAVE_RBCPR_CX_CFG 5
+#define SLAVE_RBCPR_MXA_CFG 6
+#define SLAVE_CRYPTO_0_CFG 7
+#define SLAVE_CX_RDPM 8
+#define SLAVE_GFX3D_CFG 9
+#define SLAVE_IMEM_CFG 10
+#define SLAVE_CNOC_MSS 11
+#define SLAVE_MX_2_RDPM 12
+#define SLAVE_MX_RDPM 13
+#define SLAVE_PDM 14
+#define SLAVE_QDSS_CFG 15
+#define SLAVE_QSPI_0 16
+#define SLAVE_QUP_0 17
+#define SLAVE_QUP_1 18
+#define SLAVE_SDC1 19
+#define SLAVE_SDCC_2 20
+#define SLAVE_TCSR 21
+#define SLAVE_TLMM 22
+#define SLAVE_UFS_MEM_CFG 23
+#define SLAVE_USB3_0 24
+#define SLAVE_VENUS_CFG 25
+#define SLAVE_VSENSE_CTRL_CFG 26
+#define SLAVE_WLAN 27
+#define SLAVE_CNOC_MNOC_HF_CFG 28
+#define SLAVE_CNOC_MNOC_SF_CFG 29
+#define SLAVE_NSP_QTB_CFG 30
+#define SLAVE_PCIE_ANOC_CFG 31
+#define SLAVE_WLAN_Q6_THROTTLE_CFG 32
+#define SLAVE_SERVICE_CNOC_CFG 33
+#define SLAVE_QDSS_STM 34
+#define SLAVE_TCU 35
+
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define SLAVE_AOSS 2
+#define SLAVE_DISPLAY_CFG 3
+#define SLAVE_IPA_CFG 4
+#define SLAVE_IPC_ROUTER_CFG 5
+#define SLAVE_PCIE_0_CFG 6
+#define SLAVE_PCIE_1_CFG 7
+#define SLAVE_PRNG 8
+#define SLAVE_TME_CFG 9
+#define SLAVE_APPSS 10
+#define SLAVE_CNOC_CFG 11
+#define SLAVE_DDRSS_CFG 12
+#define SLAVE_IMEM 13
+#define SLAVE_PIMEM 14
+#define SLAVE_SERVICE_CNOC 15
+#define SLAVE_PCIE_0 16
+#define SLAVE_PCIE_1 17
+
+#define MASTER_GPU_TCU 0
+#define MASTER_SYS_TCU 1
+#define MASTER_APPSS_PROC 2
+#define MASTER_GFX3D 3
+#define MASTER_LPASS_GEM_NOC 4
+#define MASTER_MSS_PROC 5
+#define MASTER_MNOC_HF_MEM_NOC 6
+#define MASTER_MNOC_SF_MEM_NOC 7
+#define MASTER_COMPUTE_NOC 8
+#define MASTER_ANOC_PCIE_GEM_NOC 9
+#define MASTER_SNOC_GC_MEM_NOC 10
+#define MASTER_SNOC_SF_MEM_NOC 11
+#define MASTER_WLAN_Q6 12
+#define SLAVE_GEM_NOC_CNOC 13
+#define SLAVE_LLCC 14
+#define SLAVE_MEM_NOC_PCIE_SNOC 15
+
+#define MASTER_LPASS_PROC 0
+#define SLAVE_LPASS_GEM_NOC 1
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_CAMNOC_HF 0
+#define MASTER_CAMNOC_ICP 1
+#define MASTER_CAMNOC_SF 2
+#define MASTER_MDP 3
+#define MASTER_VIDEO 4
+#define MASTER_CNOC_MNOC_HF_CFG 5
+#define MASTER_CNOC_MNOC_SF_CFG 6
+#define SLAVE_MNOC_HF_MEM_NOC 7
+#define SLAVE_MNOC_SF_MEM_NOC 8
+#define SLAVE_SERVICE_MNOC_HF 9
+#define SLAVE_SERVICE_MNOC_SF 10
+
+#define MASTER_CDSP_PROC 0
+#define SLAVE_CDSP_MEM_NOC 1
+
+#define MASTER_PCIE_ANOC_CFG 0
+#define MASTER_PCIE_0 1
+#define MASTER_PCIE_1 2
+#define SLAVE_ANOC_PCIE_GEM_NOC 3
+#define SLAVE_SERVICE_PCIE_ANOC 4
+
+#define MASTER_A1NOC_SNOC 0
+#define MASTER_A2NOC_SNOC 1
+#define MASTER_APSS_NOC 2
+#define MASTER_CNOC_SNOC 3
+#define MASTER_PIMEM 4
+#define MASTER_GIC 5
+#define SLAVE_SNOC_GEM_NOC_GC 6
+#define SLAVE_SNOC_GEM_NOC_SF 7
+
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,msm8909.h b/include/dt-bindings/interconnect/qcom,msm8909.h
new file mode 100644
index 000000000000..76365d8aec21
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,msm8909.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Qualcomm MSM8909 interconnect IDs
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_MSM8909_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_MSM8909_H
+
+/* BIMC fabric */
+#define MAS_APPS_PROC 0
+#define MAS_OXILI 1
+#define MAS_SNOC_BIMC_0 2
+#define MAS_SNOC_BIMC_1 3
+#define MAS_TCU_0 4
+#define MAS_TCU_1 5
+#define SLV_EBI 6
+#define SLV_BIMC_SNOC 7
+
+/* PCNOC fabric */
+#define MAS_AUDIO 0
+#define MAS_SPDM 1
+#define MAS_DEHR 2
+#define MAS_QPIC 3
+#define MAS_BLSP_1 4
+#define MAS_USB_HS 5
+#define MAS_CRYPTO 6
+#define MAS_SDCC_1 7
+#define MAS_SDCC_2 8
+#define MAS_SNOC_PCNOC 9
+#define PCNOC_M_0 10
+#define PCNOC_M_1 11
+#define PCNOC_INT_0 12
+#define PCNOC_INT_1 13
+#define PCNOC_S_0 14
+#define PCNOC_S_1 15
+#define PCNOC_S_2 16
+#define PCNOC_S_3 17
+#define PCNOC_S_4 18
+#define PCNOC_S_5 19
+#define PCNOC_S_7 20
+#define SLV_TCSR 21
+#define SLV_SDCC_1 22
+#define SLV_BLSP_1 23
+#define SLV_CRYPTO_0_CFG 24
+#define SLV_MESSAGE_RAM 25
+#define SLV_PDM 26
+#define SLV_PRNG 27
+#define SLV_USB_HS 28
+#define SLV_QPIC 29
+#define SLV_SPDM 30
+#define SLV_SDCC_2 31
+#define SLV_AUDIO 32
+#define SLV_DEHR_CFG 33
+#define SLV_SNOC_CFG 34
+#define SLV_QDSS_CFG 35
+#define SLV_USB_PHY 36
+#define SLV_CAMERA_SS_CFG 37
+#define SLV_DISP_SS_CFG 38
+#define SLV_VENUS_CFG 39
+#define SLV_TLMM 40
+#define SLV_GPU_CFG 41
+#define SLV_IMEM_CFG 42
+#define SLV_BIMC_CFG 43
+#define SLV_PMIC_ARB 44
+#define SLV_TCU 45
+#define SLV_PCNOC_SNOC 46
+
+/* SNOC fabric */
+#define MAS_QDSS_BAM 0
+#define MAS_BIMC_SNOC 1
+#define MAS_MDP 2
+#define MAS_PCNOC_SNOC 3
+#define MAS_VENUS 4
+#define MAS_VFE 5
+#define MAS_QDSS_ETR 6
+#define MM_INT_0 7
+#define MM_INT_1 8
+#define MM_INT_2 9
+#define MM_INT_BIMC 10
+#define QDSS_INT 11
+#define SNOC_INT_0 12
+#define SNOC_INT_1 13
+#define SNOC_INT_BIMC 14
+#define SLV_KPSS_AHB 15
+#define SLV_SNOC_BIMC_0 16
+#define SLV_SNOC_BIMC_1 17
+#define SLV_IMEM 18
+#define SLV_SNOC_PCNOC 19
+#define SLV_QDSS_STM 20
+#define SLV_CATS_0 21
+#define SLV_CATS_1 22
+
+#endif /* __DT_BINDINGS_INTERCONNECT_QCOM_MSM8909_H */
diff --git a/include/dt-bindings/interconnect/qcom,msm8937.h b/include/dt-bindings/interconnect/qcom,msm8937.h
new file mode 100644
index 000000000000..98b8a4637aab
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,msm8937.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Qualcomm MSM8937 interconnect IDs
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_MSM8937_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_MSM8937_H
+
+/* BIMC fabric */
+#define MAS_APPS_PROC 0
+#define MAS_OXILI 1
+#define MAS_SNOC_BIMC_0 2
+#define MAS_SNOC_BIMC_2 3
+#define MAS_SNOC_BIMC_1 4
+#define MAS_TCU_0 5
+#define SLV_EBI 6
+#define SLV_BIMC_SNOC 7
+
+/* PCNOC fabric */
+#define MAS_SPDM 0
+#define MAS_BLSP_1 1
+#define MAS_BLSP_2 2
+#define MAS_USB_HS1 3
+#define MAS_XI_USB_HS1 4
+#define MAS_CRYPTO 5
+#define MAS_SDCC_1 6
+#define MAS_SDCC_2 7
+#define MAS_SNOC_PCNOC 8
+#define PCNOC_M_0 9
+#define PCNOC_M_1 10
+#define PCNOC_INT_0 11
+#define PCNOC_INT_1 12
+#define PCNOC_INT_2 13
+#define PCNOC_INT_3 14
+#define PCNOC_S_0 15
+#define PCNOC_S_1 16
+#define PCNOC_S_2 17
+#define PCNOC_S_3 18
+#define PCNOC_S_4 19
+#define PCNOC_S_6 20
+#define PCNOC_S_7 21
+#define PCNOC_S_8 22
+#define SLV_SDCC_2 23
+#define SLV_SPDM 24
+#define SLV_PDM 25
+#define SLV_PRNG 26
+#define SLV_TCSR 27
+#define SLV_SNOC_CFG 28
+#define SLV_MESSAGE_RAM 29
+#define SLV_CAMERA_SS_CFG 30
+#define SLV_DISP_SS_CFG 31
+#define SLV_VENUS_CFG 32
+#define SLV_GPU_CFG 33
+#define SLV_TLMM 34
+#define SLV_BLSP_1 35
+#define SLV_BLSP_2 36
+#define SLV_PMIC_ARB 37
+#define SLV_SDCC_1 38
+#define SLV_CRYPTO_0_CFG 39
+#define SLV_USB_HS 40
+#define SLV_TCU 41
+#define SLV_PCNOC_SNOC 42
+
+/* SNOC fabric */
+#define MAS_QDSS_BAM 0
+#define MAS_BIMC_SNOC 1
+#define MAS_PCNOC_SNOC 2
+#define MAS_QDSS_ETR 3
+#define QDSS_INT 4
+#define SNOC_INT_0 5
+#define SNOC_INT_1 6
+#define SNOC_INT_2 7
+#define SLV_KPSS_AHB 8
+#define SLV_WCSS 9
+#define SLV_SNOC_BIMC_1 10
+#define SLV_IMEM 11
+#define SLV_SNOC_PCNOC 12
+#define SLV_QDSS_STM 13
+#define SLV_CATS_1 14
+#define SLV_LPASS 15
+
+/* SNOC-MM fabric */
+#define MAS_JPEG 0
+#define MAS_MDP 1
+#define MAS_VENUS 2
+#define MAS_VFE0 3
+#define MAS_VFE1 4
+#define MAS_CPP 5
+#define SLV_SNOC_BIMC_0 6
+#define SLV_SNOC_BIMC_2 7
+#define SLV_CATS_0 8
+
+#endif /* __DT_BINDINGS_INTERCONNECT_QCOM_MSM8937_H */
diff --git a/include/dt-bindings/interconnect/qcom,msm8953.h b/include/dt-bindings/interconnect/qcom,msm8953.h
new file mode 100644
index 000000000000..12564c434af7
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,msm8953.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Qualcomm MSM8953 interconnect IDs
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_MSM8953_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_MSM8953_H
+
+/* BIMC fabric */
+#define MAS_APPS_PROC 0
+#define MAS_OXILI 1
+#define MAS_SNOC_BIMC_0 2
+#define MAS_SNOC_BIMC_2 3
+#define MAS_SNOC_BIMC_1 4
+#define MAS_TCU_0 5
+#define SLV_EBI 6
+#define SLV_BIMC_SNOC 7
+
+/* PCNOC fabric */
+#define MAS_SPDM 0
+#define MAS_BLSP_1 1
+#define MAS_BLSP_2 2
+#define MAS_USB3 3
+#define MAS_CRYPTO 4
+#define MAS_SDCC_1 5
+#define MAS_SDCC_2 6
+#define MAS_SNOC_PCNOC 7
+#define PCNOC_M_0 8
+#define PCNOC_M_1 9
+#define PCNOC_INT_1 10
+#define PCNOC_INT_2 11
+#define PCNOC_S_0 12
+#define PCNOC_S_1 13
+#define PCNOC_S_2 14
+#define PCNOC_S_3 15
+#define PCNOC_S_4 16
+#define PCNOC_S_6 17
+#define PCNOC_S_7 18
+#define PCNOC_S_8 19
+#define PCNOC_S_9 20
+#define SLV_SPDM 21
+#define SLV_PDM 22
+#define SLV_TCSR 23
+#define SLV_SNOC_CFG 24
+#define SLV_TLMM 25
+#define SLV_MESSAGE_RAM 26
+#define SLV_BLSP_1 27
+#define SLV_BLSP_2 28
+#define SLV_PRNG 29
+#define SLV_CAMERA_SS_CFG 30
+#define SLV_DISP_SS_CFG 31
+#define SLV_VENUS_CFG 32
+#define SLV_GPU_CFG 33
+#define SLV_SDCC_1 34
+#define SLV_SDCC_2 35
+#define SLV_CRYPTO_0_CFG 36
+#define SLV_PMIC_ARB 37
+#define SLV_USB3 38
+#define SLV_IPA_CFG 39
+#define SLV_TCU 40
+#define SLV_PCNOC_SNOC 41
+
+/* SNOC fabric */
+#define MAS_QDSS_BAM 0
+#define MAS_BIMC_SNOC 1
+#define MAS_PCNOC_SNOC 2
+#define MAS_IPA 3
+#define MAS_QDSS_ETR 4
+#define QDSS_INT 5
+#define SNOC_INT_0 6
+#define SNOC_INT_1 7
+#define SNOC_INT_2 8
+#define SLV_KPSS_AHB 9
+#define SLV_WCSS 10
+#define SLV_SNOC_BIMC_1 11
+#define SLV_IMEM 12
+#define SLV_SNOC_PCNOC 13
+#define SLV_QDSS_STM 14
+#define SLV_CATS_1 15
+#define SLV_LPASS 16
+
+/* SNOC-MM fabric */
+#define MAS_JPEG 0
+#define MAS_MDP 1
+#define MAS_VENUS 2
+#define MAS_VFE0 3
+#define MAS_VFE1 4
+#define MAS_CPP 5
+#define SLV_SNOC_BIMC_0 6
+#define SLV_SNOC_BIMC_2 7
+#define SLV_CATS_0 8
+
+#endif /* __DT_BINDINGS_INTERCONNECT_QCOM_MSM8953_H */
diff --git a/include/dt-bindings/interconnect/qcom,msm8976.h b/include/dt-bindings/interconnect/qcom,msm8976.h
new file mode 100644
index 000000000000..4ea90f22320e
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,msm8976.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Qualcomm MSM8976 interconnect IDs
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_MSM8976_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_MSM8976_H
+
+/* BIMC fabric */
+#define MAS_APPS_PROC 0
+#define MAS_SMMNOC_BIMC 1
+#define MAS_SNOC_BIMC 2
+#define MAS_TCU_0 3
+#define SLV_EBI 4
+#define SLV_BIMC_SNOC 5
+
+/* PCNOC fabric */
+#define MAS_USB_HS2 0
+#define MAS_BLSP_1 1
+#define MAS_USB_HS1 2
+#define MAS_BLSP_2 3
+#define MAS_CRYPTO 4
+#define MAS_SDCC_1 5
+#define MAS_SDCC_2 6
+#define MAS_SDCC_3 7
+#define MAS_SNOC_PCNOC 8
+#define MAS_LPASS_AHB 9
+#define MAS_SPDM 10
+#define MAS_DEHR 11
+#define MAS_XM_USB_HS1 12
+#define PCNOC_M_0 13
+#define PCNOC_M_1 14
+#define PCNOC_INT_0 15
+#define PCNOC_INT_1 16
+#define PCNOC_INT_2 17
+#define PCNOC_S_1 18
+#define PCNOC_S_2 19
+#define PCNOC_S_3 20
+#define PCNOC_S_4 21
+#define PCNOC_S_8 22
+#define PCNOC_S_9 23
+#define SLV_TCSR 24
+#define SLV_TLMM 25
+#define SLV_CRYPTO_0_CFG 26
+#define SLV_MESSAGE_RAM 27
+#define SLV_PDM 28
+#define SLV_PRNG 29
+#define SLV_PMIC_ARB 30
+#define SLV_SNOC_CFG 31
+#define SLV_DCC_CFG 32
+#define SLV_CAMERA_SS_CFG 33
+#define SLV_DISP_SS_CFG 34
+#define SLV_VENUS_CFG 35
+#define SLV_SDCC_1 36
+#define SLV_BLSP_1 37
+#define SLV_USB_HS 38
+#define SLV_SDCC_3 39
+#define SLV_SDCC_2 40
+#define SLV_GPU_CFG 41
+#define SLV_USB_HS2 42
+#define SLV_BLSP_2 43
+#define SLV_PCNOC_SNOC 44
+
+/* SNOC fabric */
+#define MAS_QDSS_BAM 0
+#define MAS_BIMC_SNOC 1
+#define MAS_PCNOC_SNOC 2
+#define MAS_QDSS_ETR 3
+#define MAS_LPASS_PROC 4
+#define MAS_IPA 5
+#define QDSS_INT 6
+#define SNOC_INT_0 7
+#define SNOC_INT_1 8
+#define SNOC_INT_2 9
+#define SLV_KPSS_AHB 10
+#define SLV_SNOC_BIMC 11
+#define SLV_IMEM 12
+#define SLV_SNOC_PCNOC 13
+#define SLV_QDSS_STM 14
+#define SLV_CATS_0 15
+#define SLV_CATS_1 16
+#define SLV_LPASS 17
+
+/* SNOC-MM fabric */
+#define MAS_JPEG 0
+#define MAS_OXILI 1
+#define MAS_MDP0 2
+#define MAS_MDP1 3
+#define MAS_VENUS_0 4
+#define MAS_VENUS_1 5
+#define MAS_VFE_0 6
+#define MAS_VFE_1 7
+#define MAS_CPP 8
+#define MM_INT_0 9
+#define SLV_SMMNOC_BIMC 10
+
+#endif /* __DT_BINDINGS_INTERCONNECT_QCOM_MSM8976_H */
diff --git a/include/dt-bindings/interconnect/qcom,msm8996-cbf.h b/include/dt-bindings/interconnect/qcom,msm8996-cbf.h
new file mode 100644
index 000000000000..aac5e69f6bd5
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,msm8996-cbf.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2023 Linaro Ltd. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_MSM8996_CBF_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_MSM8996_CBF_H
+
+#define MASTER_CBF_M4M 0
+#define SLAVE_CBF_M4M 1
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,msm8996.h b/include/dt-bindings/interconnect/qcom,msm8996.h
new file mode 100644
index 000000000000..a0b7c0ec7bed
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,msm8996.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * Qualcomm MSM8996 interconnect IDs
+ *
+ * Copyright (c) 2021 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_MSM8996_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_MSM8996_H
+
+/* A0NOC */
+#define MASTER_PCIE_0 0
+#define MASTER_PCIE_1 1
+#define MASTER_PCIE_2 2
+
+/* A1NOC */
+#define MASTER_CNOC_A1NOC 0
+#define MASTER_CRYPTO_CORE0 1
+#define MASTER_PNOC_A1NOC 2
+
+/* A2NOC */
+#define MASTER_USB3 0
+#define MASTER_IPA 1
+#define MASTER_UFS 2
+
+/* BIMC */
+#define MASTER_AMPSS_M0 0
+#define MASTER_GRAPHICS_3D 1
+#define MASTER_MNOC_BIMC 2
+#define MASTER_SNOC_BIMC 3
+#define SLAVE_EBI_CH0 4
+#define SLAVE_HMSS_L3 5
+#define SLAVE_BIMC_SNOC_0 6
+#define SLAVE_BIMC_SNOC_1 7
+
+/* CNOC */
+#define MASTER_SNOC_CNOC 0
+#define MASTER_QDSS_DAP 1
+#define SLAVE_CNOC_A1NOC 2
+#define SLAVE_CLK_CTL 3
+#define SLAVE_TCSR 4
+#define SLAVE_TLMM 5
+#define SLAVE_CRYPTO_0_CFG 6
+#define SLAVE_MPM 7
+#define SLAVE_PIMEM_CFG 8
+#define SLAVE_IMEM_CFG 9
+#define SLAVE_MESSAGE_RAM 10
+#define SLAVE_BIMC_CFG 11
+#define SLAVE_PMIC_ARB 12
+#define SLAVE_PRNG 13
+#define SLAVE_DCC_CFG 14
+#define SLAVE_RBCPR_MX 15
+#define SLAVE_QDSS_CFG 16
+#define SLAVE_RBCPR_CX 17
+#define SLAVE_QDSS_RBCPR_APU 18
+#define SLAVE_CNOC_MNOC_CFG 19
+#define SLAVE_SNOC_CFG 20
+#define SLAVE_SNOC_MPU_CFG 21
+#define SLAVE_EBI1_PHY_CFG 22
+#define SLAVE_A0NOC_CFG 23
+#define SLAVE_PCIE_1_CFG 24
+#define SLAVE_PCIE_2_CFG 25
+#define SLAVE_PCIE_0_CFG 26
+#define SLAVE_PCIE20_AHB2PHY 27
+#define SLAVE_A0NOC_MPU_CFG 28
+#define SLAVE_UFS_CFG 29
+#define SLAVE_A1NOC_CFG 30
+#define SLAVE_A1NOC_MPU_CFG 31
+#define SLAVE_A2NOC_CFG 32
+#define SLAVE_A2NOC_MPU_CFG 33
+#define SLAVE_SSC_CFG 34
+#define SLAVE_A0NOC_SMMU_CFG 35
+#define SLAVE_A1NOC_SMMU_CFG 36
+#define SLAVE_A2NOC_SMMU_CFG 37
+#define SLAVE_LPASS_SMMU_CFG 38
+#define SLAVE_CNOC_MNOC_MMSS_CFG 39
+
+/* MNOC */
+#define MASTER_CNOC_MNOC_CFG 0
+#define MASTER_CPP 1
+#define MASTER_JPEG 2
+#define MASTER_MDP_PORT0 3
+#define MASTER_MDP_PORT1 4
+#define MASTER_ROTATOR 5
+#define MASTER_VIDEO_P0 6
+#define MASTER_VFE 7
+#define MASTER_SNOC_VMEM 8
+#define MASTER_VIDEO_P0_OCMEM 9
+#define MASTER_CNOC_MNOC_MMSS_CFG 10
+#define SLAVE_MNOC_BIMC 11
+#define SLAVE_VMEM 12
+#define SLAVE_SERVICE_MNOC 13
+#define SLAVE_MMAGIC_CFG 14
+#define SLAVE_CPR_CFG 15
+#define SLAVE_MISC_CFG 16
+#define SLAVE_VENUS_THROTTLE_CFG 17
+#define SLAVE_VENUS_CFG 18
+#define SLAVE_VMEM_CFG 19
+#define SLAVE_DSA_CFG 20
+#define SLAVE_MMSS_CLK_CFG 21
+#define SLAVE_DSA_MPU_CFG 22
+#define SLAVE_MNOC_MPU_CFG 23
+#define SLAVE_DISPLAY_CFG 24
+#define SLAVE_DISPLAY_THROTTLE_CFG 25
+#define SLAVE_CAMERA_CFG 26
+#define SLAVE_CAMERA_THROTTLE_CFG 27
+#define SLAVE_GRAPHICS_3D_CFG 28
+#define SLAVE_SMMU_MDP_CFG 29
+#define SLAVE_SMMU_ROT_CFG 30
+#define SLAVE_SMMU_VENUS_CFG 31
+#define SLAVE_SMMU_CPP_CFG 32
+#define SLAVE_SMMU_JPEG_CFG 33
+#define SLAVE_SMMU_VFE_CFG 34
+
+/* PNOC */
+#define MASTER_SNOC_PNOC 0
+#define MASTER_SDCC_1 1
+#define MASTER_SDCC_2 2
+#define MASTER_SDCC_4 3
+#define MASTER_USB_HS 4
+#define MASTER_BLSP_1 5
+#define MASTER_BLSP_2 6
+#define MASTER_TSIF 7
+#define SLAVE_PNOC_A1NOC 8
+#define SLAVE_USB_HS 9
+#define SLAVE_SDCC_2 10
+#define SLAVE_SDCC_4 11
+#define SLAVE_TSIF 12
+#define SLAVE_BLSP_2 13
+#define SLAVE_SDCC_1 14
+#define SLAVE_BLSP_1 15
+#define SLAVE_PDM 16
+#define SLAVE_AHB2PHY 17
+
+/* SNOC */
+#define MASTER_HMSS 0
+#define MASTER_QDSS_BAM 1
+#define MASTER_SNOC_CFG 2
+#define MASTER_BIMC_SNOC_0 3
+#define MASTER_BIMC_SNOC_1 4
+#define MASTER_A0NOC_SNOC 5
+#define MASTER_A1NOC_SNOC 6
+#define MASTER_A2NOC_SNOC 7
+#define MASTER_QDSS_ETR 8
+#define SLAVE_A0NOC_SNOC 9
+#define SLAVE_A1NOC_SNOC 10
+#define SLAVE_A2NOC_SNOC 11
+#define SLAVE_HMSS 12
+#define SLAVE_LPASS 13
+#define SLAVE_USB3 14
+#define SLAVE_SNOC_BIMC 15
+#define SLAVE_SNOC_CNOC 16
+#define SLAVE_IMEM 17
+#define SLAVE_PIMEM 18
+#define SLAVE_SNOC_VMEM 19
+#define SLAVE_SNOC_PNOC 20
+#define SLAVE_QDSS_STM 21
+#define SLAVE_PCIE_0 22
+#define SLAVE_PCIE_1 23
+#define SLAVE_PCIE_2 24
+#define SLAVE_SERVICE_SNOC 25
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,qcm2290.h b/include/dt-bindings/interconnect/qcom,qcm2290.h
new file mode 100644
index 000000000000..6cbbb7fe0bd3
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,qcm2290.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* QCM2290 interconnect IDs */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_QCM2290_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_QCM2290_H
+
+/* BIMC */
+#define MASTER_APPSS_PROC 0
+#define MASTER_SNOC_BIMC_RT 1
+#define MASTER_SNOC_BIMC_NRT 2
+#define MASTER_SNOC_BIMC 3
+#define MASTER_TCU_0 4
+#define MASTER_GFX3D 5
+#define SLAVE_EBI1 6
+#define SLAVE_BIMC_SNOC 7
+
+/* CNOC */
+#define MASTER_SNOC_CNOC 0
+#define MASTER_QDSS_DAP 1
+#define SLAVE_BIMC_CFG 2
+#define SLAVE_CAMERA_NRT_THROTTLE_CFG 3
+#define SLAVE_CAMERA_RT_THROTTLE_CFG 4
+#define SLAVE_CAMERA_CFG 5
+#define SLAVE_CLK_CTL 6
+#define SLAVE_CRYPTO_0_CFG 7
+#define SLAVE_DISPLAY_CFG 8
+#define SLAVE_DISPLAY_THROTTLE_CFG 9
+#define SLAVE_GPU_CFG 10
+#define SLAVE_HWKM 11
+#define SLAVE_IMEM_CFG 12
+#define SLAVE_IPA_CFG 13
+#define SLAVE_LPASS 14
+#define SLAVE_MESSAGE_RAM 15
+#define SLAVE_PDM 16
+#define SLAVE_PIMEM_CFG 17
+#define SLAVE_PKA_WRAPPER 18
+#define SLAVE_PMIC_ARB 19
+#define SLAVE_PRNG 20
+#define SLAVE_QDSS_CFG 21
+#define SLAVE_QM_CFG 22
+#define SLAVE_QM_MPU_CFG 23
+#define SLAVE_QPIC 24
+#define SLAVE_QUP_0 25
+#define SLAVE_SDCC_1 26
+#define SLAVE_SDCC_2 27
+#define SLAVE_SNOC_CFG 28
+#define SLAVE_TCSR 29
+#define SLAVE_USB3 30
+#define SLAVE_VENUS_CFG 31
+#define SLAVE_VENUS_THROTTLE_CFG 32
+#define SLAVE_VSENSE_CTRL_CFG 33
+#define SLAVE_SERVICE_CNOC 34
+
+/* SNOC */
+#define MASTER_CRYPTO_CORE0 0
+#define MASTER_SNOC_CFG 1
+#define MASTER_TIC 2
+#define MASTER_ANOC_SNOC 3
+#define MASTER_BIMC_SNOC 4
+#define MASTER_PIMEM 5
+#define MASTER_QDSS_BAM 6
+#define MASTER_QUP_0 7
+#define MASTER_IPA 8
+#define MASTER_QDSS_ETR 9
+#define MASTER_SDCC_1 10
+#define MASTER_SDCC_2 11
+#define MASTER_QPIC 12
+#define MASTER_USB3_0 13
+#define SLAVE_APPSS 14
+#define SLAVE_SNOC_CNOC 15
+#define SLAVE_IMEM 16
+#define SLAVE_PIMEM 17
+#define SLAVE_SNOC_BIMC 18
+#define SLAVE_SERVICE_SNOC 19
+#define SLAVE_QDSS_STM 20
+#define SLAVE_TCU 21
+#define SLAVE_ANOC_SNOC 22
+
+/* QUP Virtual */
+#define MASTER_QUP_CORE_0 0
+#define SLAVE_QUP_CORE_0 1
+
+/* MMNRT Virtual */
+#define MASTER_CAMNOC_SF 0
+#define MASTER_VIDEO_P0 1
+#define MASTER_VIDEO_PROC 2
+#define SLAVE_SNOC_BIMC_NRT 3
+
+/* MMRT Virtual */
+#define MASTER_CAMNOC_HF 0
+#define MASTER_MDP0 1
+#define SLAVE_SNOC_BIMC_RT 2
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,qcs615-rpmh.h b/include/dt-bindings/interconnect/qcom,qcs615-rpmh.h
new file mode 100644
index 000000000000..84ae0d39e73c
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,qcs615-rpmh.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_QCS615_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_QCS615_H
+
+#define MASTER_A1NOC_CFG 1
+#define MASTER_QDSS_BAM 2
+#define MASTER_QSPI 3
+#define MASTER_QUP_0 4
+#define MASTER_BLSP_1 5
+#define MASTER_CNOC_A2NOC 6
+#define MASTER_CRYPTO 7
+#define MASTER_IPA 8
+#define MASTER_EMAC_EVB 9
+#define MASTER_PCIE 10
+#define MASTER_QDSS_ETR 11
+#define MASTER_SDCC_1 12
+#define MASTER_SDCC_2 13
+#define MASTER_UFS_MEM 14
+#define MASTER_USB2 15
+#define MASTER_USB3_0 16
+#define SLAVE_A1NOC_SNOC 17
+#define SLAVE_LPASS_SNOC 18
+#define SLAVE_ANOC_PCIE_SNOC 19
+#define SLAVE_SERVICE_A2NOC 20
+
+#define MASTER_CAMNOC_HF0_UNCOMP 1
+#define MASTER_CAMNOC_HF1_UNCOMP 2
+#define MASTER_CAMNOC_SF_UNCOMP 3
+#define SLAVE_CAMNOC_UNCOMP 4
+
+#define MASTER_SPDM 1
+#define MASTER_SNOC_CNOC 2
+#define MASTER_QDSS_DAP 3
+#define SLAVE_A1NOC_CFG 4
+#define SLAVE_AHB2PHY_EAST 5
+#define SLAVE_AHB2PHY_WEST 6
+#define SLAVE_AOP 7
+#define SLAVE_AOSS 8
+#define SLAVE_CAMERA_CFG 9
+#define SLAVE_CLK_CTL 10
+#define SLAVE_RBCPR_CX_CFG 11
+#define SLAVE_RBCPR_MX_CFG 12
+#define SLAVE_CRYPTO_0_CFG 13
+#define SLAVE_CNOC_DDRSS 14
+#define SLAVE_DISPLAY_CFG 15
+#define SLAVE_EMAC_AVB_CFG 16
+#define SLAVE_GLM 17
+#define SLAVE_GFX3D_CFG 18
+#define SLAVE_IMEM_CFG 19
+#define SLAVE_IPA_CFG 20
+#define SLAVE_CNOC_MNOC_CFG 21
+#define SLAVE_PCIE_CFG 22
+#define SLAVE_PIMEM_CFG 23
+#define SLAVE_PRNG 24
+#define SLAVE_QDSS_CFG 25
+#define SLAVE_QSPI 26
+#define SLAVE_QUP_0 27
+#define SLAVE_QUP_1 28
+#define SLAVE_SDCC_1 29
+#define SLAVE_SDCC_2 30
+#define SLAVE_SNOC_CFG 31
+#define SLAVE_SPDM_WRAPPER 32
+#define SLAVE_TCSR 33
+#define SLAVE_TLMM_EAST 34
+#define SLAVE_TLMM_SOUTH 35
+#define SLAVE_TLMM_WEST 36
+#define SLAVE_UFS_MEM_CFG 37
+#define SLAVE_USB2 38
+#define SLAVE_USB3 39
+#define SLAVE_VENUS_CFG 40
+#define SLAVE_VSENSE_CTRL_CFG 41
+#define SLAVE_CNOC_A2NOC 42
+#define SLAVE_SERVICE_CNOC 43
+
+#define MASTER_CNOC_DC_NOC 1
+#define SLAVE_DC_NOC_GEMNOC 2
+#define SLAVE_LLCC_CFG 3
+
+#define MASTER_APPSS_PROC 1
+#define MASTER_GPU_TCU 2
+#define MASTER_SYS_TCU 3
+#define MASTER_GEM_NOC_CFG 4
+#define MASTER_GFX3D 5
+#define MASTER_MNOC_HF_MEM_NOC 6
+#define MASTER_MNOC_SF_MEM_NOC 7
+#define MASTER_SNOC_GC_MEM_NOC 8
+#define MASTER_SNOC_SF_MEM_NOC 9
+#define SLAVE_MSS_PROC_MS_MPU_CFG 10
+#define SLAVE_GEM_NOC_SNOC 11
+#define SLAVE_LLCC 12
+#define SLAVE_MEM_NOC_PCIE_SNOC 13
+#define SLAVE_SERVICE_GEM_NOC 14
+
+#define MASTER_IPA_CORE 1
+#define SLAVE_IPA_CORE 2
+
+#define MASTER_LLCC 1
+#define SLAVE_EBI1 2
+
+#define MASTER_CNOC_MNOC_CFG 1
+#define MASTER_CAMNOC_HF0 2
+#define MASTER_CAMNOC_HF1 3
+#define MASTER_CAMNOC_SF 4
+#define MASTER_MDP0 5
+#define MASTER_ROTATOR 6
+#define MASTER_VIDEO_P0 7
+#define MASTER_VIDEO_PROC 8
+#define SLAVE_MNOC_SF_MEM_NOC 9
+#define SLAVE_MNOC_HF_MEM_NOC 10
+#define SLAVE_SERVICE_MNOC 11
+
+#define MASTER_SNOC_CFG 1
+#define MASTER_A1NOC_SNOC 2
+#define MASTER_GEM_NOC_SNOC 3
+#define MASTER_GEM_NOC_PCIE_SNOC 4
+#define MASTER_LPASS_ANOC 5
+#define MASTER_ANOC_PCIE_SNOC 6
+#define MASTER_PIMEM 7
+#define MASTER_GIC 8
+#define SLAVE_APPSS 9
+#define SLAVE_SNOC_CNOC 10
+#define SLAVE_SNOC_GEM_NOC_SF 11
+#define SLAVE_SNOC_MEM_NOC_GC 12
+#define SLAVE_IMEM 13
+#define SLAVE_PIMEM 14
+#define SLAVE_SERVICE_SNOC 15
+#define SLAVE_PCIE_0 16
+#define SLAVE_QDSS_STM 17
+#define SLAVE_TCU 18
+
+#endif
+
diff --git a/include/dt-bindings/interconnect/qcom,qcs8300-rpmh.h b/include/dt-bindings/interconnect/qcom,qcs8300-rpmh.h
new file mode 100644
index 000000000000..c5eeafa1b1dd
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,qcs8300-rpmh.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_QCS8300_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_QCS8300_H
+
+#define MASTER_QUP_3 0
+#define MASTER_EMAC 1
+#define MASTER_SDC 2
+#define MASTER_UFS_MEM 3
+#define MASTER_USB2 4
+#define MASTER_USB3_0 5
+#define SLAVE_A1NOC_SNOC 6
+
+#define MASTER_QDSS_BAM 0
+#define MASTER_QUP_0 1
+#define MASTER_QUP_1 2
+#define MASTER_CNOC_A2NOC 3
+#define MASTER_CRYPTO_CORE0 4
+#define MASTER_CRYPTO_CORE1 5
+#define MASTER_IPA 6
+#define MASTER_QDSS_ETR_0 7
+#define MASTER_QDSS_ETR_1 8
+#define SLAVE_A2NOC_SNOC 9
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define MASTER_QUP_CORE_3 2
+#define SLAVE_QUP_CORE_0 3
+#define SLAVE_QUP_CORE_1 4
+#define SLAVE_QUP_CORE_3 5
+
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define SLAVE_AHB2PHY_2 2
+#define SLAVE_AHB2PHY_3 3
+#define SLAVE_ANOC_THROTTLE_CFG 4
+#define SLAVE_AOSS 5
+#define SLAVE_APPSS 6
+#define SLAVE_BOOT_ROM 7
+#define SLAVE_CAMERA_CFG 8
+#define SLAVE_CAMERA_NRT_THROTTLE_CFG 9
+#define SLAVE_CAMERA_RT_THROTTLE_CFG 10
+#define SLAVE_CLK_CTL 11
+#define SLAVE_CDSP_CFG 12
+#define SLAVE_RBCPR_CX_CFG 13
+#define SLAVE_RBCPR_MMCX_CFG 14
+#define SLAVE_RBCPR_MX_CFG 15
+#define SLAVE_CPR_NSPCX 16
+#define SLAVE_CPR_NSPHMX 17
+#define SLAVE_CRYPTO_0_CFG 18
+#define SLAVE_CX_RDPM 19
+#define SLAVE_DISPLAY_CFG 20
+#define SLAVE_DISPLAY_RT_THROTTLE_CFG 21
+#define SLAVE_EMAC_CFG 22
+#define SLAVE_GP_DSP0_CFG 23
+#define SLAVE_GPDSP0_THROTTLE_CFG 24
+#define SLAVE_GPU_TCU_THROTTLE_CFG 25
+#define SLAVE_GFX3D_CFG 26
+#define SLAVE_HWKM 27
+#define SLAVE_IMEM_CFG 28
+#define SLAVE_IPA_CFG 29
+#define SLAVE_IPC_ROUTER_CFG 30
+#define SLAVE_LPASS 31
+#define SLAVE_LPASS_THROTTLE_CFG 32
+#define SLAVE_MX_RDPM 33
+#define SLAVE_MXC_RDPM 34
+#define SLAVE_PCIE_0_CFG 35
+#define SLAVE_PCIE_1_CFG 36
+#define SLAVE_PCIE_TCU_THROTTLE_CFG 37
+#define SLAVE_PCIE_THROTTLE_CFG 38
+#define SLAVE_PDM 39
+#define SLAVE_PIMEM_CFG 40
+#define SLAVE_PKA_WRAPPER_CFG 41
+#define SLAVE_QDSS_CFG 42
+#define SLAVE_QM_CFG 43
+#define SLAVE_QM_MPU_CFG 44
+#define SLAVE_QUP_0 45
+#define SLAVE_QUP_1 46
+#define SLAVE_QUP_3 47
+#define SLAVE_SAIL_THROTTLE_CFG 48
+#define SLAVE_SDC1 49
+#define SLAVE_SECURITY 50
+#define SLAVE_SNOC_THROTTLE_CFG 51
+#define SLAVE_TCSR 52
+#define SLAVE_TLMM 53
+#define SLAVE_TSC_CFG 54
+#define SLAVE_UFS_MEM_CFG 55
+#define SLAVE_USB2 56
+#define SLAVE_USB3_0 57
+#define SLAVE_VENUS_CFG 58
+#define SLAVE_VENUS_CVP_THROTTLE_CFG 59
+#define SLAVE_VENUS_V_CPU_THROTTLE_CFG 60
+#define SLAVE_VENUS_VCODEC_THROTTLE_CFG 61
+#define SLAVE_DDRSS_CFG 62
+#define SLAVE_GPDSP_NOC_CFG 63
+#define SLAVE_CNOC_MNOC_HF_CFG 64
+#define SLAVE_CNOC_MNOC_SF_CFG 65
+#define SLAVE_PCIE_ANOC_CFG 66
+#define SLAVE_SNOC_CFG 67
+#define SLAVE_BOOT_IMEM 68
+#define SLAVE_IMEM 69
+#define SLAVE_PIMEM 70
+#define SLAVE_PCIE_0 71
+#define SLAVE_PCIE_1 72
+#define SLAVE_QDSS_STM 73
+#define SLAVE_TCU 74
+
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_LLCC_CFG 1
+#define SLAVE_GEM_NOC_CFG 2
+
+#define MASTER_GPU_TCU 0
+#define MASTER_PCIE_TCU 1
+#define MASTER_SYS_TCU 2
+#define MASTER_APPSS_PROC 3
+#define MASTER_COMPUTE_NOC 4
+#define MASTER_GEM_NOC_CFG 5
+#define MASTER_GPDSP_SAIL 6
+#define MASTER_GFX3D 7
+#define MASTER_MNOC_HF_MEM_NOC 8
+#define MASTER_MNOC_SF_MEM_NOC 9
+#define MASTER_ANOC_PCIE_GEM_NOC 10
+#define MASTER_SNOC_GC_MEM_NOC 11
+#define MASTER_SNOC_SF_MEM_NOC 12
+#define SLAVE_GEM_NOC_CNOC 13
+#define SLAVE_LLCC 14
+#define SLAVE_GEM_NOC_PCIE_CNOC 15
+#define SLAVE_SERVICE_GEM_NOC_1 16
+#define SLAVE_SERVICE_GEM_NOC_2 17
+#define SLAVE_SERVICE_GEM_NOC 18
+#define SLAVE_SERVICE_GEM_NOC2 19
+
+#define MASTER_SAILSS_MD0 0
+#define MASTER_DSP0 1
+#define SLAVE_GP_DSP_SAIL_NOC 2
+
+#define MASTER_CNOC_LPASS_AG_NOC 0
+#define MASTER_LPASS_PROC 1
+#define SLAVE_LPASS_CORE_CFG 2
+#define SLAVE_LPASS_LPI_CFG 3
+#define SLAVE_LPASS_MPU_CFG 4
+#define SLAVE_LPASS_TOP_CFG 5
+#define SLAVE_LPASS_SNOC 6
+#define SLAVE_SERVICES_LPASS_AML_NOC 7
+#define SLAVE_SERVICE_LPASS_AG_NOC 8
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_CAMNOC_HF 0
+#define MASTER_CAMNOC_ICP 1
+#define MASTER_CAMNOC_SF 2
+#define MASTER_MDP0 3
+#define MASTER_MDP1 4
+#define MASTER_CNOC_MNOC_HF_CFG 5
+#define MASTER_CNOC_MNOC_SF_CFG 6
+#define MASTER_VIDEO_P0 7
+#define MASTER_VIDEO_PROC 8
+#define MASTER_VIDEO_V_PROC 9
+#define SLAVE_MNOC_HF_MEM_NOC 10
+#define SLAVE_MNOC_SF_MEM_NOC 11
+#define SLAVE_SERVICE_MNOC_HF 12
+#define SLAVE_SERVICE_MNOC_SF 13
+
+#define MASTER_CDSP_NOC_CFG 0
+#define MASTER_CDSP_PROC 1
+#define SLAVE_HCP_A 2
+#define SLAVE_CDSP_MEM_NOC 3
+#define SLAVE_SERVICE_NSP_NOC 4
+
+#define MASTER_PCIE_0 0
+#define MASTER_PCIE_1 1
+#define SLAVE_ANOC_PCIE_GEM_NOC 2
+
+#define MASTER_GIC_AHB 0
+#define MASTER_A1NOC_SNOC 1
+#define MASTER_A2NOC_SNOC 2
+#define MASTER_LPASS_ANOC 3
+#define MASTER_SNOC_CFG 4
+#define MASTER_PIMEM 5
+#define MASTER_GIC 6
+#define SLAVE_SNOC_GEM_NOC_GC 7
+#define SLAVE_SNOC_GEM_NOC_SF 8
+#define SLAVE_SERVICE_SNOC 9
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,qdu1000-rpmh.h b/include/dt-bindings/interconnect/qcom,qdu1000-rpmh.h
new file mode 100644
index 000000000000..7f0ad1571128
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,qdu1000-rpmh.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_QDU1000_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_QDU1000_H
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define SLAVE_QUP_CORE_0 2
+#define SLAVE_QUP_CORE_1 3
+
+#define MASTER_SYS_TCU 0
+#define MASTER_APPSS_PROC 1
+#define MASTER_GEMNOC_ECPRI_DMA 2
+#define MASTER_FEC_2_GEMNOC 3
+#define MASTER_ANOC_PCIE_GEM_NOC 4
+#define MASTER_SNOC_GC_MEM_NOC 5
+#define MASTER_SNOC_SF_MEM_NOC 6
+#define MASTER_MSS_PROC 7
+#define SLAVE_GEM_NOC_CNOC 8
+#define SLAVE_LLCC 9
+#define SLAVE_GEMNOC_MODEM_CNOC 10
+#define SLAVE_MEM_NOC_PCIE_SNOC 11
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_GIC_AHB 0
+#define MASTER_QDSS_BAM 1
+#define MASTER_QPIC 2
+#define MASTER_QSPI_0 3
+#define MASTER_QUP_0 4
+#define MASTER_QUP_1 5
+#define MASTER_SNOC_CFG 6
+#define MASTER_ANOC_SNOC 7
+#define MASTER_ANOC_GSI 8
+#define MASTER_GEM_NOC_CNOC 9
+#define MASTER_GEMNOC_MODEM_CNOC 10
+#define MASTER_GEM_NOC_PCIE_SNOC 11
+#define MASTER_CRYPTO 12
+#define MASTER_ECPRI_GSI 13
+#define MASTER_PIMEM 14
+#define MASTER_SNOC_ECPRI_DMA 15
+#define MASTER_GIC 16
+#define MASTER_PCIE 17
+#define MASTER_QDSS_ETR 18
+#define MASTER_QDSS_ETR_1 19
+#define MASTER_SDCC_1 20
+#define MASTER_USB3 21
+#define SLAVE_AHB2PHY_SOUTH 22
+#define SLAVE_AHB2PHY_NORTH 23
+#define SLAVE_AHB2PHY_EAST 24
+#define SLAVE_AOSS 25
+#define SLAVE_CLK_CTL 26
+#define SLAVE_RBCPR_CX_CFG 27
+#define SLAVE_RBCPR_MX_CFG 28
+#define SLAVE_CRYPTO_0_CFG 29
+#define SLAVE_ECPRI_CFG 30
+#define SLAVE_IMEM_CFG 31
+#define SLAVE_IPC_ROUTER_CFG 32
+#define SLAVE_CNOC_MSS 33
+#define SLAVE_PCIE_CFG 34
+#define SLAVE_PDM 35
+#define SLAVE_PIMEM_CFG 36
+#define SLAVE_PRNG 37
+#define SLAVE_QDSS_CFG 38
+#define SLAVE_QPIC 40
+#define SLAVE_QSPI_0 41
+#define SLAVE_QUP_0 42
+#define SLAVE_QUP_1 43
+#define SLAVE_SDCC_2 44
+#define SLAVE_SMBUS_CFG 45
+#define SLAVE_SNOC_CFG 46
+#define SLAVE_TCSR 47
+#define SLAVE_TLMM 48
+#define SLAVE_TME_CFG 49
+#define SLAVE_TSC_CFG 50
+#define SLAVE_USB3_0 51
+#define SLAVE_VSENSE_CTRL_CFG 52
+#define SLAVE_A1NOC_SNOC 53
+#define SLAVE_ANOC_SNOC_GSI 54
+#define SLAVE_DDRSS_CFG 55
+#define SLAVE_ECPRI_GEMNOC 56
+#define SLAVE_SNOC_GEM_NOC_GC 57
+#define SLAVE_SNOC_GEM_NOC_SF 58
+#define SLAVE_MODEM_OFFLINE 59
+#define SLAVE_ANOC_PCIE_GEM_NOC 60
+#define SLAVE_IMEM 61
+#define SLAVE_PIMEM 62
+#define SLAVE_SERVICE_SNOC 63
+#define SLAVE_ETHERNET_SS 64
+#define SLAVE_PCIE_0 65
+#define SLAVE_QDSS_STM 66
+#define SLAVE_TCU 67
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,rpm-icc.h b/include/dt-bindings/interconnect/qcom,rpm-icc.h
new file mode 100644
index 000000000000..2cd56f91e5c5
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,rpm-icc.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_RPM_ICC_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_RPM_ICC_H
+
+#define RPM_ACTIVE_TAG (1 << 0)
+#define RPM_SLEEP_TAG (1 << 1)
+#define RPM_ALWAYS_TAG (RPM_ACTIVE_TAG | RPM_SLEEP_TAG)
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sa8775p-rpmh.h b/include/dt-bindings/interconnect/qcom,sa8775p-rpmh.h
new file mode 100644
index 000000000000..f21c39d0928e
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sa8775p-rpmh.h
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SA8775P_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SA8775P_H
+
+/* aggre1_noc */
+#define MASTER_QUP_3 0
+#define MASTER_EMAC 1
+#define MASTER_EMAC_1 2
+#define MASTER_SDC 3
+#define MASTER_UFS_MEM 4
+#define MASTER_USB2 5
+#define MASTER_USB3_0 6
+#define MASTER_USB3_1 7
+#define SLAVE_A1NOC_SNOC 8
+
+/* aggre2_noc */
+#define MASTER_QDSS_BAM 0
+#define MASTER_QUP_0 1
+#define MASTER_QUP_1 2
+#define MASTER_QUP_2 3
+#define MASTER_CNOC_A2NOC 4
+#define MASTER_CRYPTO_CORE0 5
+#define MASTER_CRYPTO_CORE1 6
+#define MASTER_IPA 7
+#define MASTER_QDSS_ETR_0 8
+#define MASTER_QDSS_ETR_1 9
+#define MASTER_UFS_CARD 10
+#define SLAVE_A2NOC_SNOC 11
+
+/* clk_virt */
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define MASTER_QUP_CORE_2 2
+#define MASTER_QUP_CORE_3 3
+#define SLAVE_QUP_CORE_0 4
+#define SLAVE_QUP_CORE_1 5
+#define SLAVE_QUP_CORE_2 6
+#define SLAVE_QUP_CORE_3 7
+
+/* config_noc */
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define SLAVE_AHB2PHY_0 2
+#define SLAVE_AHB2PHY_1 3
+#define SLAVE_AHB2PHY_2 4
+#define SLAVE_AHB2PHY_3 5
+#define SLAVE_ANOC_THROTTLE_CFG 6
+#define SLAVE_AOSS 7
+#define SLAVE_APPSS 8
+#define SLAVE_BOOT_ROM 9
+#define SLAVE_CAMERA_CFG 10
+#define SLAVE_CAMERA_NRT_THROTTLE_CFG 11
+#define SLAVE_CAMERA_RT_THROTTLE_CFG 12
+#define SLAVE_CLK_CTL 13
+#define SLAVE_CDSP_CFG 14
+#define SLAVE_CDSP1_CFG 15
+#define SLAVE_RBCPR_CX_CFG 16
+#define SLAVE_RBCPR_MMCX_CFG 17
+#define SLAVE_RBCPR_MX_CFG 18
+#define SLAVE_CPR_NSPCX 19
+#define SLAVE_CRYPTO_0_CFG 20
+#define SLAVE_CX_RDPM 21
+#define SLAVE_DISPLAY_CFG 22
+#define SLAVE_DISPLAY_RT_THROTTLE_CFG 23
+#define SLAVE_DISPLAY1_CFG 24
+#define SLAVE_DISPLAY1_RT_THROTTLE_CFG 25
+#define SLAVE_EMAC_CFG 26
+#define SLAVE_EMAC1_CFG 27
+#define SLAVE_GP_DSP0_CFG 28
+#define SLAVE_GP_DSP1_CFG 29
+#define SLAVE_GPDSP0_THROTTLE_CFG 30
+#define SLAVE_GPDSP1_THROTTLE_CFG 31
+#define SLAVE_GPU_TCU_THROTTLE_CFG 32
+#define SLAVE_GFX3D_CFG 33
+#define SLAVE_HWKM 34
+#define SLAVE_IMEM_CFG 35
+#define SLAVE_IPA_CFG 36
+#define SLAVE_IPC_ROUTER_CFG 37
+#define SLAVE_LPASS 38
+#define SLAVE_LPASS_THROTTLE_CFG 39
+#define SLAVE_MX_RDPM 40
+#define SLAVE_MXC_RDPM 41
+#define SLAVE_PCIE_0_CFG 42
+#define SLAVE_PCIE_1_CFG 43
+#define SLAVE_PCIE_RSC_CFG 44
+#define SLAVE_PCIE_TCU_THROTTLE_CFG 45
+#define SLAVE_PCIE_THROTTLE_CFG 46
+#define SLAVE_PDM 47
+#define SLAVE_PIMEM_CFG 48
+#define SLAVE_PKA_WRAPPER_CFG 49
+#define SLAVE_QDSS_CFG 50
+#define SLAVE_QM_CFG 51
+#define SLAVE_QM_MPU_CFG 52
+#define SLAVE_QUP_0 53
+#define SLAVE_QUP_1 54
+#define SLAVE_QUP_2 55
+#define SLAVE_QUP_3 56
+#define SLAVE_SAIL_THROTTLE_CFG 57
+#define SLAVE_SDC1 58
+#define SLAVE_SECURITY 59
+#define SLAVE_SNOC_THROTTLE_CFG 60
+#define SLAVE_TCSR 61
+#define SLAVE_TLMM 62
+#define SLAVE_TSC_CFG 63
+#define SLAVE_UFS_CARD_CFG 64
+#define SLAVE_UFS_MEM_CFG 65
+#define SLAVE_USB2 66
+#define SLAVE_USB3_0 67
+#define SLAVE_USB3_1 68
+#define SLAVE_VENUS_CFG 69
+#define SLAVE_VENUS_CVP_THROTTLE_CFG 70
+#define SLAVE_VENUS_V_CPU_THROTTLE_CFG 71
+#define SLAVE_VENUS_VCODEC_THROTTLE_CFG 72
+#define SLAVE_DDRSS_CFG 73
+#define SLAVE_GPDSP_NOC_CFG 74
+#define SLAVE_CNOC_MNOC_HF_CFG 75
+#define SLAVE_CNOC_MNOC_SF_CFG 76
+#define SLAVE_PCIE_ANOC_CFG 77
+#define SLAVE_SNOC_CFG 78
+#define SLAVE_BOOT_IMEM 79
+#define SLAVE_IMEM 80
+#define SLAVE_PIMEM 81
+#define SLAVE_PCIE_0 82
+#define SLAVE_PCIE_1 83
+#define SLAVE_QDSS_STM 84
+#define SLAVE_TCU 85
+
+/* dc_noc */
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_LLCC_CFG 1
+#define SLAVE_GEM_NOC_CFG 2
+
+/* gem_noc */
+#define MASTER_GPU_TCU 0
+#define MASTER_PCIE_TCU 1
+#define MASTER_SYS_TCU 2
+#define MASTER_APPSS_PROC 3
+#define MASTER_COMPUTE_NOC 4
+#define MASTER_COMPUTE_NOC_1 5
+#define MASTER_GEM_NOC_CFG 6
+#define MASTER_GPDSP_SAIL 7
+#define MASTER_GFX3D 8
+#define MASTER_MNOC_HF_MEM_NOC 9
+#define MASTER_MNOC_SF_MEM_NOC 10
+#define MASTER_ANOC_PCIE_GEM_NOC 11
+#define MASTER_SNOC_GC_MEM_NOC 12
+#define MASTER_SNOC_SF_MEM_NOC 13
+#define SLAVE_GEM_NOC_CNOC 14
+#define SLAVE_LLCC 15
+#define SLAVE_GEM_NOC_PCIE_CNOC 16
+#define SLAVE_SERVICE_GEM_NOC_1 17
+#define SLAVE_SERVICE_GEM_NOC_2 18
+#define SLAVE_SERVICE_GEM_NOC 19
+#define SLAVE_SERVICE_GEM_NOC2 20
+
+/* gpdsp_anoc */
+#define MASTER_DSP0 0
+#define MASTER_DSP1 1
+#define SLAVE_GP_DSP_SAIL_NOC 2
+
+/* lpass_ag_noc */
+#define MASTER_CNOC_LPASS_AG_NOC 0
+#define MASTER_LPASS_PROC 1
+#define SLAVE_LPASS_CORE_CFG 2
+#define SLAVE_LPASS_LPI_CFG 3
+#define SLAVE_LPASS_MPU_CFG 4
+#define SLAVE_LPASS_TOP_CFG 5
+#define SLAVE_LPASS_SNOC 6
+#define SLAVE_SERVICES_LPASS_AML_NOC 7
+#define SLAVE_SERVICE_LPASS_AG_NOC 8
+
+/* mc_virt */
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+/*mmss_noc */
+#define MASTER_CAMNOC_HF 0
+#define MASTER_CAMNOC_ICP 1
+#define MASTER_CAMNOC_SF 2
+#define MASTER_MDP0 3
+#define MASTER_MDP1 4
+#define MASTER_MDP_CORE1_0 5
+#define MASTER_MDP_CORE1_1 6
+#define MASTER_CNOC_MNOC_HF_CFG 7
+#define MASTER_CNOC_MNOC_SF_CFG 8
+#define MASTER_VIDEO_P0 9
+#define MASTER_VIDEO_P1 10
+#define MASTER_VIDEO_PROC 11
+#define MASTER_VIDEO_V_PROC 12
+#define SLAVE_MNOC_HF_MEM_NOC 13
+#define SLAVE_MNOC_SF_MEM_NOC 14
+#define SLAVE_SERVICE_MNOC_HF 15
+#define SLAVE_SERVICE_MNOC_SF 16
+
+/* nspa_noc */
+#define MASTER_CDSP_NOC_CFG 0
+#define MASTER_CDSP_PROC 1
+#define SLAVE_HCP_A 2
+#define SLAVE_CDSP_MEM_NOC 3
+#define SLAVE_SERVICE_NSP_NOC 4
+
+/* nspb_noc */
+#define MASTER_CDSPB_NOC_CFG 0
+#define MASTER_CDSP_PROC_B 1
+#define SLAVE_CDSPB_MEM_NOC 2
+#define SLAVE_HCP_B 3
+#define SLAVE_SERVICE_NSPB_NOC 4
+
+/* pcie_anoc */
+#define MASTER_PCIE_0 0
+#define MASTER_PCIE_1 1
+#define SLAVE_ANOC_PCIE_GEM_NOC 2
+
+/* system_noc */
+#define MASTER_GIC_AHB 0
+#define MASTER_A1NOC_SNOC 1
+#define MASTER_A2NOC_SNOC 2
+#define MASTER_LPASS_ANOC 3
+#define MASTER_SNOC_CFG 4
+#define MASTER_PIMEM 5
+#define MASTER_GIC 6
+#define SLAVE_SNOC_GEM_NOC_GC 7
+#define SLAVE_SNOC_GEM_NOC_SF 8
+#define SLAVE_SERVICE_SNOC 9
+
+#endif /* __DT_BINDINGS_INTERCONNECT_QCOM_SA8775P_H */
diff --git a/include/dt-bindings/interconnect/qcom,sar2130p-rpmh.h b/include/dt-bindings/interconnect/qcom,sar2130p-rpmh.h
new file mode 100644
index 000000000000..aec7cbb7cd70
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sar2130p-rpmh.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2024, Linaro Ltd.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SAR2130P_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SAR2130P_H
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define SLAVE_QUP_CORE_0 2
+#define SLAVE_QUP_CORE_1 3
+
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define MASTER_QDSS_DAP 2
+#define SLAVE_AHB2PHY_SOUTH 3
+#define SLAVE_AOSS 4
+#define SLAVE_CAMERA_CFG 5
+#define SLAVE_CLK_CTL 6
+#define SLAVE_CDSP_CFG 7
+#define SLAVE_RBCPR_CX_CFG 8
+#define SLAVE_RBCPR_MMCX_CFG 9
+#define SLAVE_RBCPR_MXA_CFG 10
+#define SLAVE_RBCPR_MXC_CFG 11
+#define SLAVE_CPR_NSPCX 12
+#define SLAVE_CRYPTO_0_CFG 13
+#define SLAVE_CX_RDPM 14
+#define SLAVE_DISPLAY_CFG 15
+#define SLAVE_GFX3D_CFG 16
+#define SLAVE_IMEM_CFG 17
+#define SLAVE_IPC_ROUTER_CFG 18
+#define SLAVE_LPASS 19
+#define SLAVE_MX_RDPM 20
+#define SLAVE_PCIE_0_CFG 21
+#define SLAVE_PCIE_1_CFG 22
+#define SLAVE_PDM 23
+#define SLAVE_PIMEM_CFG 24
+#define SLAVE_PRNG 25
+#define SLAVE_QDSS_CFG 26
+#define SLAVE_QSPI_0 27
+#define SLAVE_QUP_0 28
+#define SLAVE_QUP_1 29
+#define SLAVE_SDCC_1 30
+#define SLAVE_TCSR 31
+#define SLAVE_TLMM 32
+#define SLAVE_TME_CFG 33
+#define SLAVE_USB3_0 34
+#define SLAVE_VENUS_CFG 35
+#define SLAVE_VSENSE_CTRL_CFG 36
+#define SLAVE_WLAN_Q6_CFG 37
+#define SLAVE_DDRSS_CFG 38
+#define SLAVE_CNOC_MNOC_CFG 39
+#define SLAVE_SNOC_CFG 40
+#define SLAVE_IMEM 41
+#define SLAVE_PIMEM 42
+#define SLAVE_SERVICE_CNOC 43
+#define SLAVE_PCIE_0 44
+#define SLAVE_PCIE_1 45
+#define SLAVE_QDSS_STM 46
+#define SLAVE_TCU 47
+
+#define MASTER_GPU_TCU 0
+#define MASTER_SYS_TCU 1
+#define MASTER_APPSS_PROC 2
+#define MASTER_GFX3D 3
+#define MASTER_MNOC_HF_MEM_NOC 4
+#define MASTER_MNOC_SF_MEM_NOC 5
+#define MASTER_COMPUTE_NOC 6
+#define MASTER_ANOC_PCIE_GEM_NOC 7
+#define MASTER_SNOC_GC_MEM_NOC 8
+#define MASTER_SNOC_SF_MEM_NOC 9
+#define MASTER_WLAN_Q6 10
+#define SLAVE_GEM_NOC_CNOC 11
+#define SLAVE_LLCC 12
+#define SLAVE_MEM_NOC_PCIE_SNOC 13
+
+#define MASTER_CNOC_LPASS_AG_NOC 0
+#define MASTER_LPASS_PROC 1
+#define SLAVE_LPASS_CORE_CFG 2
+#define SLAVE_LPASS_LPI_CFG 3
+#define SLAVE_LPASS_MPU_CFG 4
+#define SLAVE_LPASS_TOP_CFG 5
+#define SLAVE_LPASS_SNOC 6
+#define SLAVE_SERVICES_LPASS_AML_NOC 7
+#define SLAVE_SERVICE_LPASS_AG_NOC 8
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_CAMNOC_HF 0
+#define MASTER_CAMNOC_ICP 1
+#define MASTER_CAMNOC_SF 2
+#define MASTER_LSR 3
+#define MASTER_MDP 4
+#define MASTER_CNOC_MNOC_CFG 5
+#define MASTER_VIDEO 6
+#define MASTER_VIDEO_CV_PROC 7
+#define MASTER_VIDEO_PROC 8
+#define MASTER_VIDEO_V_PROC 9
+#define SLAVE_MNOC_HF_MEM_NOC 10
+#define SLAVE_MNOC_SF_MEM_NOC 11
+#define SLAVE_SERVICE_MNOC 12
+
+#define MASTER_CDSP_NOC_CFG 0
+#define MASTER_CDSP_PROC 1
+#define SLAVE_CDSP_MEM_NOC 2
+#define SLAVE_SERVICE_NSP_NOC 3
+
+#define MASTER_PCIE_0 0
+#define MASTER_PCIE_1 1
+#define SLAVE_ANOC_PCIE_GEM_NOC 2
+
+#define MASTER_GIC_AHB 0
+#define MASTER_QDSS_BAM 1
+#define MASTER_QSPI_0 2
+#define MASTER_QUP_0 3
+#define MASTER_QUP_1 4
+#define MASTER_A2NOC_SNOC 5
+#define MASTER_CNOC_DATAPATH 6
+#define MASTER_LPASS_ANOC 7
+#define MASTER_SNOC_CFG 8
+#define MASTER_CRYPTO 9
+#define MASTER_PIMEM 10
+#define MASTER_GIC 11
+#define MASTER_QDSS_ETR 12
+#define MASTER_QDSS_ETR_1 13
+#define MASTER_SDCC_1 14
+#define MASTER_USB3_0 15
+#define SLAVE_A2NOC_SNOC 16
+#define SLAVE_SNOC_GEM_NOC_GC 17
+#define SLAVE_SNOC_GEM_NOC_SF 18
+#define SLAVE_SERVICE_SNOC 19
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sc7180.h b/include/dt-bindings/interconnect/qcom,sc7180.h
index f9970f6032eb..de5d5867bd67 100644
--- a/include/dt-bindings/interconnect/qcom,sc7180.h
+++ b/include/dt-bindings/interconnect/qcom,sc7180.h
@@ -108,9 +108,6 @@
#define SLAVE_LLCC 11
#define SLAVE_SERVICE_GEM_NOC 12
-#define MASTER_IPA_CORE 0
-#define SLAVE_IPA_CORE 1
-
#define MASTER_LLCC 0
#define SLAVE_EBI1 1
diff --git a/include/dt-bindings/interconnect/qcom,sc7280.h b/include/dt-bindings/interconnect/qcom,sc7280.h
new file mode 100644
index 000000000000..21b000443999
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sc7280.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Qualcomm SC7280 interconnect IDs
+ *
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SC7280_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SC7280_H
+
+#define MASTER_QSPI_0 0
+#define MASTER_QUP_0 1
+#define MASTER_QUP_1 2
+#define MASTER_A1NOC_CFG 3
+#define MASTER_PCIE_0 4
+#define MASTER_PCIE_1 5
+#define MASTER_SDCC_1 6
+#define MASTER_SDCC_2 7
+#define MASTER_SDCC_4 8
+#define MASTER_UFS_MEM 9
+#define MASTER_USB2 10
+#define MASTER_USB3_0 11
+#define SLAVE_A1NOC_SNOC 12
+#define SLAVE_ANOC_PCIE_GEM_NOC 13
+#define SLAVE_SERVICE_A1NOC 14
+
+#define MASTER_QDSS_BAM 0
+#define MASTER_A2NOC_CFG 1
+#define MASTER_CNOC_A2NOC 2
+#define MASTER_CRYPTO 3
+#define MASTER_IPA 4
+#define MASTER_QDSS_ETR 5
+#define SLAVE_A2NOC_SNOC 6
+#define SLAVE_SERVICE_A2NOC 7
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define SLAVE_QUP_CORE_0 2
+#define SLAVE_QUP_CORE_1 3
+
+#define MASTER_CNOC3_CNOC2 0
+#define MASTER_QDSS_DAP 1
+#define SLAVE_AHB2PHY_SOUTH 2
+#define SLAVE_AHB2PHY_NORTH 3
+#define SLAVE_CAMERA_CFG 4
+#define SLAVE_CLK_CTL 5
+#define SLAVE_CDSP_CFG 6
+#define SLAVE_RBCPR_CX_CFG 7
+#define SLAVE_RBCPR_MX_CFG 8
+#define SLAVE_CRYPTO_0_CFG 9
+#define SLAVE_CX_RDPM 10
+#define SLAVE_DCC_CFG 11
+#define SLAVE_DISPLAY_CFG 12
+#define SLAVE_GFX3D_CFG 13
+#define SLAVE_HWKM 14
+#define SLAVE_IMEM_CFG 15
+#define SLAVE_IPA_CFG 16
+#define SLAVE_IPC_ROUTER_CFG 17
+#define SLAVE_LPASS 18
+#define SLAVE_CNOC_MSS 19
+#define SLAVE_MX_RDPM 20
+#define SLAVE_PCIE_0_CFG 21
+#define SLAVE_PCIE_1_CFG 22
+#define SLAVE_PDM 23
+#define SLAVE_PIMEM_CFG 24
+#define SLAVE_PKA_WRAPPER_CFG 25
+#define SLAVE_PMU_WRAPPER_CFG 26
+#define SLAVE_QDSS_CFG 27
+#define SLAVE_QSPI_0 28
+#define SLAVE_QUP_0 29
+#define SLAVE_QUP_1 30
+#define SLAVE_SDCC_1 31
+#define SLAVE_SDCC_2 32
+#define SLAVE_SDCC_4 33
+#define SLAVE_SECURITY 34
+#define SLAVE_TCSR 35
+#define SLAVE_TLMM 36
+#define SLAVE_UFS_MEM_CFG 37
+#define SLAVE_USB2 38
+#define SLAVE_USB3_0 39
+#define SLAVE_VENUS_CFG 40
+#define SLAVE_VSENSE_CTRL_CFG 41
+#define SLAVE_A1NOC_CFG 42
+#define SLAVE_A2NOC_CFG 43
+#define SLAVE_CNOC2_CNOC3 44
+#define SLAVE_CNOC_MNOC_CFG 45
+#define SLAVE_SNOC_CFG 46
+
+#define MASTER_CNOC2_CNOC3 0
+#define MASTER_GEM_NOC_CNOC 1
+#define MASTER_GEM_NOC_PCIE_SNOC 2
+#define SLAVE_AOSS 3
+#define SLAVE_APPSS 4
+#define SLAVE_CNOC3_CNOC2 5
+#define SLAVE_CNOC_A2NOC 6
+#define SLAVE_DDRSS_CFG 7
+#define SLAVE_BOOT_IMEM 8
+#define SLAVE_IMEM 9
+#define SLAVE_PIMEM 10
+#define SLAVE_PCIE_0 11
+#define SLAVE_PCIE_1 12
+#define SLAVE_QDSS_STM 13
+#define SLAVE_TCU 14
+
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_LLCC_CFG 1
+#define SLAVE_GEM_NOC_CFG 2
+
+#define MASTER_GPU_TCU 0
+#define MASTER_SYS_TCU 1
+#define MASTER_APPSS_PROC 2
+#define MASTER_COMPUTE_NOC 3
+#define MASTER_GEM_NOC_CFG 4
+#define MASTER_GFX3D 5
+#define MASTER_MNOC_HF_MEM_NOC 6
+#define MASTER_MNOC_SF_MEM_NOC 7
+#define MASTER_ANOC_PCIE_GEM_NOC 8
+#define MASTER_SNOC_GC_MEM_NOC 9
+#define MASTER_SNOC_SF_MEM_NOC 10
+#define SLAVE_MSS_PROC_MS_MPU_CFG 11
+#define SLAVE_MCDMA_MS_MPU_CFG 12
+#define SLAVE_GEM_NOC_CNOC 13
+#define SLAVE_LLCC 14
+#define SLAVE_MEM_NOC_PCIE_SNOC 15
+#define SLAVE_SERVICE_GEM_NOC_1 16
+#define SLAVE_SERVICE_GEM_NOC_2 17
+#define SLAVE_SERVICE_GEM_NOC 18
+
+#define MASTER_CNOC_LPASS_AG_NOC 0
+#define SLAVE_LPASS_CORE_CFG 1
+#define SLAVE_LPASS_LPI_CFG 2
+#define SLAVE_LPASS_MPU_CFG 3
+#define SLAVE_LPASS_TOP_CFG 4
+#define SLAVE_SERVICES_LPASS_AML_NOC 5
+#define SLAVE_SERVICE_LPASS_AG_NOC 6
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_CNOC_MNOC_CFG 0
+#define MASTER_VIDEO_P0 1
+#define MASTER_VIDEO_PROC 2
+#define MASTER_CAMNOC_HF 3
+#define MASTER_CAMNOC_ICP 4
+#define MASTER_CAMNOC_SF 5
+#define MASTER_MDP0 6
+#define SLAVE_MNOC_HF_MEM_NOC 7
+#define SLAVE_MNOC_SF_MEM_NOC 8
+#define SLAVE_SERVICE_MNOC 9
+
+#define MASTER_CDSP_NOC_CFG 0
+#define MASTER_CDSP_PROC 1
+#define SLAVE_CDSP_MEM_NOC 2
+#define SLAVE_SERVICE_NSP_NOC 3
+
+#define MASTER_A1NOC_SNOC 0
+#define MASTER_A2NOC_SNOC 1
+#define MASTER_SNOC_CFG 2
+#define MASTER_PIMEM 3
+#define MASTER_GIC 4
+#define SLAVE_SNOC_GEM_NOC_GC 5
+#define SLAVE_SNOC_GEM_NOC_SF 6
+#define SLAVE_SERVICE_SNOC 7
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sc8180x.h b/include/dt-bindings/interconnect/qcom,sc8180x.h
new file mode 100644
index 000000000000..0bdc8d6cb401
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sc8180x.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Qualcomm SC8180x interconnect IDs
+ *
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SC8180X_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SC8180X_H
+
+#define MASTER_A1NOC_CFG 0
+#define MASTER_UFS_CARD 1
+#define MASTER_UFS_GEN4 2
+#define MASTER_UFS_MEM 3
+#define MASTER_USB3 4
+#define MASTER_USB3_1 5
+#define MASTER_USB3_2 6
+#define A1NOC_SNOC_SLV 7
+#define SLAVE_SERVICE_A1NOC 8
+
+#define MASTER_A2NOC_CFG 0
+#define MASTER_QDSS_BAM 1
+#define MASTER_QSPI_0 2
+#define MASTER_QSPI_1 3
+#define MASTER_QUP_0 4
+#define MASTER_QUP_1 5
+#define MASTER_QUP_2 6
+#define MASTER_SENSORS_AHB 7
+#define MASTER_CRYPTO_CORE_0 8
+#define MASTER_IPA 9
+#define MASTER_EMAC 10
+#define MASTER_PCIE 11
+#define MASTER_PCIE_1 12
+#define MASTER_PCIE_2 13
+#define MASTER_PCIE_3 14
+#define MASTER_QDSS_ETR 15
+#define MASTER_SDCC_2 16
+#define MASTER_SDCC_4 17
+#define A2NOC_SNOC_SLV 18
+#define SLAVE_ANOC_PCIE_GEM_NOC 19
+#define SLAVE_SERVICE_A2NOC 20
+
+#define MASTER_CAMNOC_HF0_UNCOMP 0
+#define MASTER_CAMNOC_HF1_UNCOMP 1
+#define MASTER_CAMNOC_SF_UNCOMP 2
+#define SLAVE_CAMNOC_UNCOMP 3
+
+#define MASTER_NPU 0
+#define SLAVE_CDSP_MEM_NOC 1
+
+#define SNOC_CNOC_MAS 0
+#define SLAVE_A1NOC_CFG 1
+#define SLAVE_A2NOC_CFG 2
+#define SLAVE_AHB2PHY_CENTER 3
+#define SLAVE_AHB2PHY_EAST 4
+#define SLAVE_AHB2PHY_WEST 5
+#define SLAVE_AHB2PHY_SOUTH 6
+#define SLAVE_AOP 7
+#define SLAVE_AOSS 8
+#define SLAVE_CAMERA_CFG 9
+#define SLAVE_CLK_CTL 10
+#define SLAVE_CDSP_CFG 11
+#define SLAVE_RBCPR_CX_CFG 12
+#define SLAVE_RBCPR_MMCX_CFG 13
+#define SLAVE_RBCPR_MX_CFG 14
+#define SLAVE_CRYPTO_0_CFG 15
+#define SLAVE_CNOC_DDRSS 16
+#define SLAVE_DISPLAY_CFG 17
+#define SLAVE_EMAC_CFG 18
+#define SLAVE_GLM 19
+#define SLAVE_GRAPHICS_3D_CFG 20
+#define SLAVE_IMEM_CFG 21
+#define SLAVE_IPA_CFG 22
+#define SLAVE_CNOC_MNOC_CFG 23
+#define SLAVE_NPU_CFG 24
+#define SLAVE_PCIE_0_CFG 25
+#define SLAVE_PCIE_1_CFG 26
+#define SLAVE_PCIE_2_CFG 27
+#define SLAVE_PCIE_3_CFG 28
+#define SLAVE_PDM 29
+#define SLAVE_PIMEM_CFG 30
+#define SLAVE_PRNG 31
+#define SLAVE_QDSS_CFG 32
+#define SLAVE_QSPI_0 33
+#define SLAVE_QSPI_1 34
+#define SLAVE_QUP_1 35
+#define SLAVE_QUP_2 36
+#define SLAVE_QUP_0 37
+#define SLAVE_SDCC_2 38
+#define SLAVE_SDCC_4 39
+#define SLAVE_SECURITY 40
+#define SLAVE_SNOC_CFG 41
+#define SLAVE_SPSS_CFG 42
+#define SLAVE_TCSR 43
+#define SLAVE_TLMM_EAST 44
+#define SLAVE_TLMM_SOUTH 45
+#define SLAVE_TLMM_WEST 46
+#define SLAVE_TSIF 47
+#define SLAVE_UFS_CARD_CFG 48
+#define SLAVE_UFS_MEM_0_CFG 49
+#define SLAVE_UFS_MEM_1_CFG 50
+#define SLAVE_USB3 51
+#define SLAVE_USB3_1 52
+#define SLAVE_USB3_2 53
+#define SLAVE_VENUS_CFG 54
+#define SLAVE_VSENSE_CTRL_CFG 55
+#define SLAVE_SERVICE_CNOC 56
+
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_GEM_NOC_CFG 1
+#define SLAVE_LLCC_CFG 2
+
+#define MASTER_AMPSS_M0 0
+#define MASTER_GPU_TCU 1
+#define MASTER_SYS_TCU 2
+#define MASTER_GEM_NOC_CFG 3
+#define MASTER_COMPUTE_NOC 4
+#define MASTER_GRAPHICS_3D 5
+#define MASTER_MNOC_HF_MEM_NOC 6
+#define MASTER_MNOC_SF_MEM_NOC 7
+#define MASTER_GEM_NOC_PCIE_SNOC 8
+#define MASTER_SNOC_GC_MEM_NOC 9
+#define MASTER_SNOC_SF_MEM_NOC 10
+#define MASTER_ECC 11
+#define SLAVE_MSS_PROC_MS_MPU_CFG 12
+#define SLAVE_ECC 13
+#define SLAVE_GEM_NOC_SNOC 14
+#define SLAVE_LLCC 15
+#define SLAVE_SERVICE_GEM_NOC 16
+#define SLAVE_SERVICE_GEM_NOC_1 17
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI_CH0 1
+
+#define MASTER_CNOC_MNOC_CFG 0
+#define MASTER_CAMNOC_HF0 1
+#define MASTER_CAMNOC_HF1 2
+#define MASTER_CAMNOC_SF 3
+#define MASTER_MDP_PORT0 4
+#define MASTER_MDP_PORT1 5
+#define MASTER_ROTATOR 6
+#define MASTER_VIDEO_P0 7
+#define MASTER_VIDEO_P1 8
+#define MASTER_VIDEO_PROC 9
+#define SLAVE_MNOC_SF_MEM_NOC 10
+#define SLAVE_MNOC_HF_MEM_NOC 11
+#define SLAVE_SERVICE_MNOC 12
+
+#define MASTER_SNOC_CFG 0
+#define A1NOC_SNOC_MAS 1
+#define A2NOC_SNOC_MAS 2
+#define MASTER_GEM_NOC_SNOC 3
+#define MASTER_PIMEM 4
+#define MASTER_GIC 5
+#define SLAVE_APPSS 6
+#define SNOC_CNOC_SLV 7
+#define SLAVE_SNOC_GEM_NOC_GC 8
+#define SLAVE_SNOC_GEM_NOC_SF 9
+#define SLAVE_OCIMEM 10
+#define SLAVE_PIMEM 11
+#define SLAVE_SERVICE_SNOC 12
+#define SLAVE_PCIE_0 13
+#define SLAVE_PCIE_1 14
+#define SLAVE_PCIE_2 15
+#define SLAVE_PCIE_3 16
+#define SLAVE_QDSS_STM 17
+#define SLAVE_TCU 18
+
+#define MASTER_MNOC_HF_MEM_NOC_DISPLAY 0
+#define MASTER_MNOC_SF_MEM_NOC_DISPLAY 1
+#define SLAVE_LLCC_DISPLAY 2
+
+#define MASTER_LLCC_DISPLAY 0
+#define SLAVE_EBI_CH0_DISPLAY 1
+
+#define MASTER_MDP_PORT0_DISPLAY 0
+#define MASTER_MDP_PORT1_DISPLAY 1
+#define MASTER_ROTATOR_DISPLAY 2
+#define SLAVE_MNOC_SF_MEM_NOC_DISPLAY 3
+#define SLAVE_MNOC_HF_MEM_NOC_DISPLAY 4
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define MASTER_QUP_CORE_2 2
+#define SLAVE_QUP_CORE_0 3
+#define SLAVE_QUP_CORE_1 4
+#define SLAVE_QUP_CORE_2 5
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sc8280xp.h b/include/dt-bindings/interconnect/qcom,sc8280xp.h
new file mode 100644
index 000000000000..f89f47e99c6d
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sc8280xp.h
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Ltd.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SC8280XP_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SC8280XP_H
+
+/* aggre1_noc */
+#define MASTER_QSPI_0 0
+#define MASTER_QUP_1 1
+#define MASTER_QUP_2 2
+#define MASTER_A1NOC_CFG 3
+#define MASTER_IPA 4
+#define MASTER_EMAC_1 5
+#define MASTER_SDCC_4 6
+#define MASTER_UFS_MEM 7
+#define MASTER_USB3_0 8
+#define MASTER_USB3_1 9
+#define MASTER_USB3_MP 10
+#define MASTER_USB4_0 11
+#define MASTER_USB4_1 12
+#define SLAVE_A1NOC_SNOC 13
+#define SLAVE_USB_NOC_SNOC 14
+#define SLAVE_SERVICE_A1NOC 15
+
+/* aggre2_noc */
+#define MASTER_QDSS_BAM 0
+#define MASTER_QUP_0 1
+#define MASTER_A2NOC_CFG 2
+#define MASTER_CRYPTO 3
+#define MASTER_SENSORS_PROC 4
+#define MASTER_SP 5
+#define MASTER_EMAC 6
+#define MASTER_PCIE_0 7
+#define MASTER_PCIE_1 8
+#define MASTER_PCIE_2A 9
+#define MASTER_PCIE_2B 10
+#define MASTER_PCIE_3A 11
+#define MASTER_PCIE_3B 12
+#define MASTER_PCIE_4 13
+#define MASTER_QDSS_ETR 14
+#define MASTER_SDCC_2 15
+#define MASTER_UFS_CARD 16
+#define SLAVE_A2NOC_SNOC 17
+#define SLAVE_ANOC_PCIE_GEM_NOC 18
+#define SLAVE_SERVICE_A2NOC 19
+
+/* clk_virt */
+/* 0 was used by MASTER_IPA_CORE, now represented as RPMh clock */
+#define MASTER_QUP_CORE_0 1
+#define MASTER_QUP_CORE_1 2
+#define MASTER_QUP_CORE_2 3
+/* 4 was used by SLAVE_IPA_CORE, now represented as RPMh clock */
+#define SLAVE_QUP_CORE_0 5
+#define SLAVE_QUP_CORE_1 6
+#define SLAVE_QUP_CORE_2 7
+
+/* config_noc */
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define SLAVE_AHB2PHY_0 2
+#define SLAVE_AHB2PHY_1 3
+#define SLAVE_AHB2PHY_2 4
+#define SLAVE_AOSS 5
+#define SLAVE_APPSS 6
+#define SLAVE_CAMERA_CFG 7
+#define SLAVE_CLK_CTL 8
+#define SLAVE_CDSP_CFG 9
+#define SLAVE_CDSP1_CFG 10
+#define SLAVE_RBCPR_CX_CFG 11
+#define SLAVE_RBCPR_MMCX_CFG 12
+#define SLAVE_RBCPR_MX_CFG 13
+#define SLAVE_CPR_NSPCX 14
+#define SLAVE_CRYPTO_0_CFG 15
+#define SLAVE_CX_RDPM 16
+#define SLAVE_DCC_CFG 17
+#define SLAVE_DISPLAY_CFG 18
+#define SLAVE_DISPLAY1_CFG 19
+#define SLAVE_EMAC_CFG 20
+#define SLAVE_EMAC1_CFG 21
+#define SLAVE_GFX3D_CFG 22
+#define SLAVE_HWKM 23
+#define SLAVE_IMEM_CFG 24
+#define SLAVE_IPA_CFG 25
+#define SLAVE_IPC_ROUTER_CFG 26
+#define SLAVE_LPASS 27
+#define SLAVE_MX_RDPM 28
+#define SLAVE_MXC_RDPM 29
+#define SLAVE_PCIE_0_CFG 30
+#define SLAVE_PCIE_1_CFG 31
+#define SLAVE_PCIE_2A_CFG 32
+#define SLAVE_PCIE_2B_CFG 33
+#define SLAVE_PCIE_3A_CFG 34
+#define SLAVE_PCIE_3B_CFG 35
+#define SLAVE_PCIE_4_CFG 36
+#define SLAVE_PCIE_RSC_CFG 37
+#define SLAVE_PDM 38
+#define SLAVE_PIMEM_CFG 39
+#define SLAVE_PKA_WRAPPER_CFG 40
+#define SLAVE_PMU_WRAPPER_CFG 41
+#define SLAVE_QDSS_CFG 42
+#define SLAVE_QSPI_0 43
+#define SLAVE_QUP_0 44
+#define SLAVE_QUP_1 45
+#define SLAVE_QUP_2 46
+#define SLAVE_SDCC_2 47
+#define SLAVE_SDCC_4 48
+#define SLAVE_SECURITY 49
+#define SLAVE_SMMUV3_CFG 50
+#define SLAVE_SMSS_CFG 51
+#define SLAVE_SPSS_CFG 52
+#define SLAVE_TCSR 53
+#define SLAVE_TLMM 54
+#define SLAVE_UFS_CARD_CFG 55
+#define SLAVE_UFS_MEM_CFG 56
+#define SLAVE_USB3_0 57
+#define SLAVE_USB3_1 58
+#define SLAVE_USB3_MP 59
+#define SLAVE_USB4_0 60
+#define SLAVE_USB4_1 61
+#define SLAVE_VENUS_CFG 62
+#define SLAVE_VSENSE_CTRL_CFG 63
+#define SLAVE_VSENSE_CTRL_R_CFG 64
+#define SLAVE_A1NOC_CFG 65
+#define SLAVE_A2NOC_CFG 66
+#define SLAVE_ANOC_PCIE_BRIDGE_CFG 67
+#define SLAVE_DDRSS_CFG 68
+#define SLAVE_CNOC_MNOC_CFG 69
+#define SLAVE_SNOC_CFG 70
+#define SLAVE_SNOC_SF_BRIDGE_CFG 71
+#define SLAVE_IMEM 72
+#define SLAVE_PIMEM 73
+#define SLAVE_SERVICE_CNOC 74
+#define SLAVE_PCIE_0 75
+#define SLAVE_PCIE_1 76
+#define SLAVE_PCIE_2A 77
+#define SLAVE_PCIE_2B 78
+#define SLAVE_PCIE_3A 79
+#define SLAVE_PCIE_3B 80
+#define SLAVE_PCIE_4 81
+#define SLAVE_QDSS_STM 82
+#define SLAVE_SMSS 83
+#define SLAVE_TCU 84
+
+/* dc_noc */
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_LLCC_CFG 1
+#define SLAVE_GEM_NOC_CFG 2
+
+/* gem_noc */
+#define MASTER_GPU_TCU 0
+#define MASTER_PCIE_TCU 1
+#define MASTER_SYS_TCU 2
+#define MASTER_APPSS_PROC 3
+#define MASTER_COMPUTE_NOC 4
+#define MASTER_COMPUTE_NOC_1 5
+#define MASTER_GEM_NOC_CFG 6
+#define MASTER_GFX3D 7
+#define MASTER_MNOC_HF_MEM_NOC 8
+#define MASTER_MNOC_SF_MEM_NOC 9
+#define MASTER_ANOC_PCIE_GEM_NOC 10
+#define MASTER_SNOC_GC_MEM_NOC 11
+#define MASTER_SNOC_SF_MEM_NOC 12
+#define SLAVE_GEM_NOC_CNOC 13
+#define SLAVE_LLCC 14
+#define SLAVE_GEM_NOC_PCIE_CNOC 15
+#define SLAVE_SERVICE_GEM_NOC_1 16
+#define SLAVE_SERVICE_GEM_NOC_2 17
+#define SLAVE_SERVICE_GEM_NOC 18
+
+/* lpass_ag_noc */
+#define MASTER_CNOC_LPASS_AG_NOC 0
+#define MASTER_LPASS_PROC 1
+#define SLAVE_LPASS_CORE_CFG 2
+#define SLAVE_LPASS_LPI_CFG 3
+#define SLAVE_LPASS_MPU_CFG 4
+#define SLAVE_LPASS_TOP_CFG 5
+#define SLAVE_LPASS_SNOC 6
+#define SLAVE_SERVICES_LPASS_AML_NOC 7
+#define SLAVE_SERVICE_LPASS_AG_NOC 8
+
+/* mc_virt */
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+/*mmss_noc */
+#define MASTER_CAMNOC_HF 0
+#define MASTER_MDP0 1
+#define MASTER_MDP1 2
+#define MASTER_MDP_CORE1_0 3
+#define MASTER_MDP_CORE1_1 4
+#define MASTER_CNOC_MNOC_CFG 5
+#define MASTER_ROTATOR 6
+#define MASTER_ROTATOR_1 7
+#define MASTER_VIDEO_P0 8
+#define MASTER_VIDEO_P1 9
+#define MASTER_VIDEO_PROC 10
+#define MASTER_CAMNOC_ICP 11
+#define MASTER_CAMNOC_SF 12
+#define SLAVE_MNOC_HF_MEM_NOC 13
+#define SLAVE_MNOC_SF_MEM_NOC 14
+#define SLAVE_SERVICE_MNOC 15
+
+/* nspa_noc */
+#define MASTER_CDSP_NOC_CFG 0
+#define MASTER_CDSP_PROC 1
+#define SLAVE_CDSP_MEM_NOC 2
+#define SLAVE_NSP_XFR 3
+#define SLAVE_SERVICE_NSP_NOC 4
+
+/* nspb_noc */
+#define MASTER_CDSPB_NOC_CFG 0
+#define MASTER_CDSP_PROC_B 1
+#define SLAVE_CDSPB_MEM_NOC 2
+#define SLAVE_NSPB_XFR 3
+#define SLAVE_SERVICE_NSPB_NOC 4
+
+/* system_noc */
+#define MASTER_A1NOC_SNOC 0
+#define MASTER_A2NOC_SNOC 1
+#define MASTER_USB_NOC_SNOC 2
+#define MASTER_LPASS_ANOC 3
+#define MASTER_SNOC_CFG 4
+#define MASTER_PIMEM 5
+#define MASTER_GIC 6
+#define SLAVE_SNOC_GEM_NOC_GC 7
+#define SLAVE_SNOC_GEM_NOC_SF 8
+#define SLAVE_SERVICE_SNOC 9
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sdm670-rpmh.h b/include/dt-bindings/interconnect/qcom,sdm670-rpmh.h
new file mode 100644
index 000000000000..9b516cc360bb
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sdm670-rpmh.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Qualcomm SDM670 interconnect IDs
+ *
+ * Copyright (c) 2022, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SDM670_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SDM670_H
+
+#define MASTER_A1NOC_CFG 0
+#define MASTER_BLSP_1 1
+#define MASTER_TSIF 2
+#define MASTER_EMMC 3
+#define MASTER_SDCC_2 4
+#define MASTER_SDCC_4 5
+#define MASTER_UFS_MEM 6
+#define SLAVE_A1NOC_SNOC 7
+#define SLAVE_SERVICE_A1NOC 8
+
+#define MASTER_A2NOC_CFG 0
+#define MASTER_QDSS_BAM 1
+#define MASTER_BLSP_2 2
+#define MASTER_CNOC_A2NOC 3
+#define MASTER_CRYPTO_CORE_0 4
+#define MASTER_IPA 5
+#define MASTER_QDSS_ETR 6
+#define MASTER_USB3 7
+#define SLAVE_A2NOC_SNOC 8
+#define SLAVE_SERVICE_A2NOC 9
+
+
+#define MASTER_SPDM 0
+#define MASTER_SNOC_CNOC 1
+#define SLAVE_A1NOC_CFG 2
+#define SLAVE_A2NOC_CFG 3
+#define SLAVE_AOP 4
+#define SLAVE_AOSS 5
+#define SLAVE_CAMERA_CFG 6
+#define SLAVE_CLK_CTL 7
+#define SLAVE_CDSP_CFG 8
+#define SLAVE_RBCPR_CX_CFG 9
+#define SLAVE_CRYPTO_0_CFG 10
+#define SLAVE_DCC_CFG 11
+#define SLAVE_CNOC_DDRSS 12
+#define SLAVE_DISPLAY_CFG 13
+#define SLAVE_EMMC_CFG 14
+#define SLAVE_GLM 15
+#define SLAVE_GRAPHICS_3D_CFG 16
+#define SLAVE_IMEM_CFG 17
+#define SLAVE_IPA_CFG 18
+#define SLAVE_CNOC_MNOC_CFG 19
+#define SLAVE_PDM 20
+#define SLAVE_SOUTH_PHY_CFG 21
+#define SLAVE_PIMEM_CFG 22
+#define SLAVE_PRNG 23
+#define SLAVE_QDSS_CFG 24
+#define SLAVE_BLSP_2 25
+#define SLAVE_BLSP_1 26
+#define SLAVE_SDCC_2 27
+#define SLAVE_SDCC_4 28
+#define SLAVE_SNOC_CFG 29
+#define SLAVE_SPDM_WRAPPER 30
+#define SLAVE_TCSR 31
+#define SLAVE_TLMM_NORTH 32
+#define SLAVE_TLMM_SOUTH 33
+#define SLAVE_TSIF 34
+#define SLAVE_UFS_MEM_CFG 35
+#define SLAVE_USB3 36
+#define SLAVE_VENUS_CFG 37
+#define SLAVE_VSENSE_CTRL_CFG 38
+#define SLAVE_CNOC_A2NOC 39
+#define SLAVE_SERVICE_CNOC 40
+
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_LLCC_CFG 1
+#define SLAVE_MEM_NOC_CFG 2
+
+#define MASTER_AMPSS_M0 0
+#define MASTER_GNOC_CFG 1
+#define SLAVE_GNOC_SNOC 2
+#define SLAVE_GNOC_MEM_NOC 3
+#define SLAVE_SERVICE_GNOC 4
+
+#define MASTER_TCU_0 0
+#define MASTER_MEM_NOC_CFG 1
+#define MASTER_GNOC_MEM_NOC 2
+#define MASTER_MNOC_HF_MEM_NOC 3
+#define MASTER_MNOC_SF_MEM_NOC 4
+#define MASTER_SNOC_GC_MEM_NOC 5
+#define MASTER_SNOC_SF_MEM_NOC 6
+#define MASTER_GRAPHICS_3D 7
+#define SLAVE_MSS_PROC_MS_MPU_CFG 8
+#define SLAVE_MEM_NOC_GNOC 9
+#define SLAVE_LLCC 10
+#define SLAVE_MEM_NOC_SNOC 11
+#define SLAVE_SERVICE_MEM_NOC 12
+#define MASTER_LLCC 13
+#define SLAVE_EBI_CH0 14
+
+#define MASTER_CNOC_MNOC_CFG 0
+#define MASTER_CAMNOC_HF0 1
+#define MASTER_CAMNOC_HF1 2
+#define MASTER_CAMNOC_SF 3
+#define MASTER_MDP_PORT0 4
+#define MASTER_MDP_PORT1 5
+#define MASTER_ROTATOR 6
+#define MASTER_VIDEO_P0 7
+#define MASTER_VIDEO_P1 8
+#define MASTER_VIDEO_PROC 9
+#define SLAVE_MNOC_SF_MEM_NOC 10
+#define SLAVE_MNOC_HF_MEM_NOC 11
+#define SLAVE_SERVICE_MNOC 12
+
+#define MASTER_SNOC_CFG 0
+#define MASTER_A1NOC_SNOC 1
+#define MASTER_A2NOC_SNOC 2
+#define MASTER_GNOC_SNOC 3
+#define MASTER_MEM_NOC_SNOC 4
+#define MASTER_PIMEM 5
+#define MASTER_GIC 6
+#define SLAVE_APPSS 7
+#define SLAVE_SNOC_CNOC 8
+#define SLAVE_SNOC_MEM_NOC_GC 9
+#define SLAVE_SNOC_MEM_NOC_SF 10
+#define SLAVE_OCIMEM 11
+#define SLAVE_PIMEM 12
+#define SLAVE_SERVICE_SNOC 13
+#define SLAVE_QDSS_STM 14
+#define SLAVE_TCU 15
+#define MASTER_CAMNOC_HF0_UNCOMP 16
+#define MASTER_CAMNOC_HF1_UNCOMP 17
+#define MASTER_CAMNOC_SF_UNCOMP 18
+#define SLAVE_CAMNOC_UNCOMP 19
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sdx55.h b/include/dt-bindings/interconnect/qcom,sdx55.h
index bfb6524a2d90..1925f0784ab2 100644
--- a/include/dt-bindings/interconnect/qcom,sdx55.h
+++ b/include/dt-bindings/interconnect/qcom,sdx55.h
@@ -70,7 +70,5 @@
#define SLAVE_QDSS_STM 48
#define SLAVE_TCU 49
-#define MASTER_IPA_CORE 0
-#define SLAVE_IPA_CORE 1
#endif
diff --git a/include/dt-bindings/interconnect/qcom,sdx65.h b/include/dt-bindings/interconnect/qcom,sdx65.h
new file mode 100644
index 000000000000..b25288aa7d74
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sdx65.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SDX65_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SDX65_H
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_TCU_0 0
+#define MASTER_SNOC_GC_MEM_NOC 1
+#define MASTER_APPSS_PROC 2
+#define SLAVE_LLCC 3
+#define SLAVE_MEM_NOC_SNOC 4
+#define SLAVE_MEM_NOC_PCIE_SNOC 5
+
+#define MASTER_AUDIO 0
+#define MASTER_BLSP_1 1
+#define MASTER_QDSS_BAM 2
+#define MASTER_QPIC 3
+#define MASTER_SNOC_CFG 4
+#define MASTER_SPMI_FETCHER 5
+#define MASTER_ANOC_SNOC 6
+#define MASTER_IPA 7
+#define MASTER_MEM_NOC_SNOC 8
+#define MASTER_MEM_NOC_PCIE_SNOC 9
+#define MASTER_CRYPTO 10
+#define MASTER_IPA_PCIE 11
+#define MASTER_PCIE_0 12
+#define MASTER_QDSS_ETR 13
+#define MASTER_SDCC_1 14
+#define MASTER_USB3 15
+#define SLAVE_AOSS 16
+#define SLAVE_APPSS 17
+#define SLAVE_AUDIO 18
+#define SLAVE_BLSP_1 19
+#define SLAVE_CLK_CTL 20
+#define SLAVE_CRYPTO_0_CFG 21
+#define SLAVE_CNOC_DDRSS 22
+#define SLAVE_ECC_CFG 23
+#define SLAVE_IMEM_CFG 24
+#define SLAVE_IPA_CFG 25
+#define SLAVE_CNOC_MSS 26
+#define SLAVE_PCIE_PARF 27
+#define SLAVE_PDM 28
+#define SLAVE_PRNG 29
+#define SLAVE_QDSS_CFG 30
+#define SLAVE_QPIC 31
+#define SLAVE_SDCC_1 32
+#define SLAVE_SNOC_CFG 33
+#define SLAVE_SPMI_FETCHER 34
+#define SLAVE_SPMI_VGI_COEX 35
+#define SLAVE_TCSR 36
+#define SLAVE_TLMM 37
+#define SLAVE_USB3 38
+#define SLAVE_USB3_PHY_CFG 39
+#define SLAVE_ANOC_SNOC 40
+#define SLAVE_SNOC_MEM_NOC_GC 41
+#define SLAVE_IMEM 42
+#define SLAVE_SERVICE_SNOC 43
+#define SLAVE_PCIE_0 44
+#define SLAVE_QDSS_STM 45
+#define SLAVE_TCU 46
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sdx75.h b/include/dt-bindings/interconnect/qcom,sdx75.h
new file mode 100644
index 000000000000..0e19ee8f1687
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sdx75.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SDX75_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SDX75_H
+
+#define MASTER_QUP_CORE_0 1
+#define SLAVE_QUP_CORE_0 3
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_LAGG_CFG 1
+#define SLAVE_MCCC_MASTER 2
+#define SLAVE_GEM_NOC_CFG 3
+#define SLAVE_SNOOP_BWMON 4
+
+#define MASTER_SYS_TCU 0
+#define MASTER_APPSS_PROC 1
+#define MASTER_GEM_NOC_CFG 2
+#define MASTER_MSS_PROC 3
+#define MASTER_ANOC_PCIE_GEM_NOC 4
+#define MASTER_SNOC_SF_MEM_NOC 5
+#define MASTER_GIC 6
+#define MASTER_IPA_PCIE 7
+#define SLAVE_GEM_NOC_CNOC 8
+#define SLAVE_LLCC 9
+#define SLAVE_MEM_NOC_PCIE_SNOC 10
+#define SLAVE_SERVICE_GEM_NOC 11
+
+#define MASTER_PCIE_0 0
+#define MASTER_PCIE_1 1
+#define MASTER_PCIE_2 2
+#define SLAVE_ANOC_PCIE_GEM_NOC 3
+
+#define MASTER_AUDIO 0
+#define MASTER_GIC_AHB 1
+#define MASTER_PCIE_RSCC 2
+#define MASTER_QDSS_BAM 3
+#define MASTER_QPIC 4
+#define MASTER_QUP_0 5
+#define MASTER_ANOC_SNOC 6
+#define MASTER_GEM_NOC_CNOC 7
+#define MASTER_GEM_NOC_PCIE_SNOC 8
+#define MASTER_SNOC_CFG 9
+#define MASTER_PCIE_ANOC_CFG 10
+#define MASTER_CRYPTO 11
+#define MASTER_IPA 12
+#define MASTER_MVMSS 13
+#define MASTER_EMAC_0 14
+#define MASTER_EMAC_1 15
+#define MASTER_QDSS_ETR 16
+#define MASTER_QDSS_ETR_1 17
+#define MASTER_SDCC_1 18
+#define MASTER_SDCC_4 19
+#define MASTER_USB3_0 20
+#define SLAVE_ETH0_CFG 21
+#define SLAVE_ETH1_CFG 22
+#define SLAVE_AUDIO 23
+#define SLAVE_CLK_CTL 24
+#define SLAVE_CRYPTO_0_CFG 25
+#define SLAVE_IMEM_CFG 26
+#define SLAVE_IPA_CFG 27
+#define SLAVE_IPC_ROUTER_CFG 28
+#define SLAVE_CNOC_MSS 29
+#define SLAVE_ICBDI_MVMSS_CFG 30
+#define SLAVE_PCIE_0_CFG 31
+#define SLAVE_PCIE_1_CFG 32
+#define SLAVE_PCIE_2_CFG 33
+#define SLAVE_PCIE_RSC_CFG 34
+#define SLAVE_PDM 35
+#define SLAVE_PRNG 36
+#define SLAVE_QDSS_CFG 37
+#define SLAVE_QPIC 38
+#define SLAVE_QUP_0 39
+#define SLAVE_SDCC_1 40
+#define SLAVE_SDCC_4 41
+#define SLAVE_SPMI_VGI_COEX 42
+#define SLAVE_TCSR 43
+#define SLAVE_TLMM 44
+#define SLAVE_USB3 45
+#define SLAVE_USB3_PHY_CFG 46
+#define SLAVE_A1NOC_CFG 47
+#define SLAVE_DDRSS_CFG 48
+#define SLAVE_SNOC_GEM_NOC_SF 49
+#define SLAVE_SNOC_CFG 50
+#define SLAVE_PCIE_ANOC_CFG 51
+#define SLAVE_IMEM 52
+#define SLAVE_SERVICE_PCIE_ANOC 53
+#define SLAVE_SERVICE_SNOC 54
+#define SLAVE_PCIE_0 55
+#define SLAVE_PCIE_1 56
+#define SLAVE_PCIE_2 57
+#define SLAVE_QDSS_STM 58
+#define SLAVE_TCU 59
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sm6115.h b/include/dt-bindings/interconnect/qcom,sm6115.h
new file mode 100644
index 000000000000..21090e585f05
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sm6115.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM6115_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SM6115_H
+
+/* BIMC */
+#define MASTER_AMPSS_M0 0
+#define MASTER_SNOC_BIMC_RT 1
+#define MASTER_SNOC_BIMC_NRT 2
+#define SNOC_BIMC_MAS 3
+#define MASTER_GRAPHICS_3D 4
+#define MASTER_TCU_0 5
+#define SLAVE_EBI_CH0 6
+#define BIMC_SNOC_SLV 7
+
+/* CNOC */
+#define SNOC_CNOC_MAS 0
+#define MASTER_QDSS_DAP 1
+#define SLAVE_AHB2PHY_USB 2
+#define SLAVE_APSS_THROTTLE_CFG 3
+#define SLAVE_BIMC_CFG 4
+#define SLAVE_BOOT_ROM 5
+#define SLAVE_CAMERA_NRT_THROTTLE_CFG 6
+#define SLAVE_CAMERA_RT_THROTTLE_CFG 7
+#define SLAVE_CAMERA_CFG 8
+#define SLAVE_CLK_CTL 9
+#define SLAVE_RBCPR_CX_CFG 10
+#define SLAVE_RBCPR_MX_CFG 11
+#define SLAVE_CRYPTO_0_CFG 12
+#define SLAVE_DCC_CFG 13
+#define SLAVE_DDR_PHY_CFG 14
+#define SLAVE_DDR_SS_CFG 15
+#define SLAVE_DISPLAY_CFG 16
+#define SLAVE_DISPLAY_THROTTLE_CFG 17
+#define SLAVE_GPU_CFG 18
+#define SLAVE_GPU_THROTTLE_CFG 19
+#define SLAVE_HWKM_CORE 20
+#define SLAVE_IMEM_CFG 21
+#define SLAVE_IPA_CFG 22
+#define SLAVE_LPASS 23
+#define SLAVE_MAPSS 24
+#define SLAVE_MDSP_MPU_CFG 25
+#define SLAVE_MESSAGE_RAM 26
+#define SLAVE_CNOC_MSS 27
+#define SLAVE_PDM 28
+#define SLAVE_PIMEM_CFG 29
+#define SLAVE_PKA_CORE 30
+#define SLAVE_PMIC_ARB 31
+#define SLAVE_QDSS_CFG 32
+#define SLAVE_QM_CFG 33
+#define SLAVE_QM_MPU_CFG 34
+#define SLAVE_QPIC 35
+#define SLAVE_QUP_0 36
+#define SLAVE_RPM 37
+#define SLAVE_SDCC_1 38
+#define SLAVE_SDCC_2 39
+#define SLAVE_SECURITY 40
+#define SLAVE_SNOC_CFG 41
+#define SLAVE_TCSR 42
+#define SLAVE_TLMM 43
+#define SLAVE_USB3 44
+#define SLAVE_VENUS_CFG 45
+#define SLAVE_VENUS_THROTTLE_CFG 46
+#define SLAVE_VSENSE_CTRL_CFG 47
+#define SLAVE_SERVICE_CNOC 48
+
+/* SNOC */
+#define MASTER_CRYPTO_CORE0 0
+#define MASTER_SNOC_CFG 1
+#define MASTER_TIC 2
+#define MASTER_ANOC_SNOC 3
+#define BIMC_SNOC_MAS 4
+#define MASTER_PIMEM 5
+#define MASTER_QDSS_BAM 6
+#define MASTER_QPIC 7
+#define MASTER_QUP_0 8
+#define MASTER_IPA 9
+#define MASTER_QDSS_ETR 10
+#define MASTER_SDCC_1 11
+#define MASTER_SDCC_2 12
+#define MASTER_USB3 13
+#define SLAVE_APPSS 14
+#define SNOC_CNOC_SLV 15
+#define SLAVE_OCIMEM 16
+#define SLAVE_PIMEM 17
+#define SNOC_BIMC_SLV 18
+#define SLAVE_SERVICE_SNOC 19
+#define SLAVE_QDSS_STM 20
+#define SLAVE_TCU 21
+#define SLAVE_ANOC_SNOC 22
+
+/* CLK Virtual */
+#define MASTER_QUP_CORE_0 0
+#define SLAVE_QUP_CORE_0 1
+
+/* MMRT Virtual */
+#define MASTER_CAMNOC_HF 0
+#define MASTER_MDP_PORT0 1
+#define SLAVE_SNOC_BIMC_RT 2
+
+/* MMNRT Virtual */
+#define MASTER_CAMNOC_SF 0
+#define MASTER_VIDEO_P0 1
+#define MASTER_VIDEO_PROC 2
+#define SLAVE_SNOC_BIMC_NRT 3
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sm6350.h b/include/dt-bindings/interconnect/qcom,sm6350.h
new file mode 100644
index 000000000000..e662cede9aaa
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sm6350.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Qualcomm SM6350 interconnect IDs
+ *
+ * Copyright (C) 2022 Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM6350_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SM6350_H
+
+#define MASTER_A1NOC_CFG 0
+#define MASTER_QUP_0 1
+#define MASTER_EMMC 2
+#define MASTER_UFS_MEM 3
+#define A1NOC_SNOC_SLV 4
+#define SLAVE_SERVICE_A1NOC 5
+
+#define MASTER_A2NOC_CFG 0
+#define MASTER_QDSS_BAM 1
+#define MASTER_QUP_1 2
+#define MASTER_CRYPTO_CORE_0 3
+#define MASTER_IPA 4
+#define MASTER_QDSS_ETR 5
+#define MASTER_SDCC_2 6
+#define MASTER_USB3 7
+#define A2NOC_SNOC_SLV 8
+#define SLAVE_SERVICE_A2NOC 9
+
+#define MASTER_CAMNOC_HF0_UNCOMP 0
+#define MASTER_CAMNOC_ICP_UNCOMP 1
+#define MASTER_CAMNOC_SF_UNCOMP 2
+#define MASTER_QUP_CORE_0 3
+#define MASTER_QUP_CORE_1 4
+#define MASTER_LLCC 5
+#define SLAVE_CAMNOC_UNCOMP 6
+#define SLAVE_QUP_CORE_0 7
+#define SLAVE_QUP_CORE_1 8
+#define SLAVE_EBI_CH0 9
+
+#define MASTER_NPU 0
+#define MASTER_NPU_PROC 1
+#define SLAVE_CDSP_GEM_NOC 2
+
+#define SNOC_CNOC_MAS 0
+#define MASTER_QDSS_DAP 1
+#define SLAVE_A1NOC_CFG 2
+#define SLAVE_A2NOC_CFG 3
+#define SLAVE_AHB2PHY 4
+#define SLAVE_AHB2PHY_2 5
+#define SLAVE_AOSS 6
+#define SLAVE_BOOT_ROM 7
+#define SLAVE_CAMERA_CFG 8
+#define SLAVE_CAMERA_NRT_THROTTLE_CFG 9
+#define SLAVE_CAMERA_RT_THROTTLE_CFG 10
+#define SLAVE_CLK_CTL 11
+#define SLAVE_RBCPR_CX_CFG 12
+#define SLAVE_RBCPR_MX_CFG 13
+#define SLAVE_CRYPTO_0_CFG 14
+#define SLAVE_DCC_CFG 15
+#define SLAVE_CNOC_DDRSS 16
+#define SLAVE_DISPLAY_CFG 17
+#define SLAVE_DISPLAY_THROTTLE_CFG 18
+#define SLAVE_EMMC_CFG 19
+#define SLAVE_GLM 20
+#define SLAVE_GRAPHICS_3D_CFG 21
+#define SLAVE_IMEM_CFG 22
+#define SLAVE_IPA_CFG 23
+#define SLAVE_CNOC_MNOC_CFG 24
+#define SLAVE_CNOC_MSS 25
+#define SLAVE_NPU_CFG 26
+#define SLAVE_PDM 27
+#define SLAVE_PIMEM_CFG 28
+#define SLAVE_PRNG 29
+#define SLAVE_QDSS_CFG 30
+#define SLAVE_QM_CFG 31
+#define SLAVE_QM_MPU_CFG 32
+#define SLAVE_QUP_0 33
+#define SLAVE_QUP_1 34
+#define SLAVE_SDCC_2 35
+#define SLAVE_SECURITY 36
+#define SLAVE_SNOC_CFG 37
+#define SLAVE_TCSR 38
+#define SLAVE_UFS_MEM_CFG 39
+#define SLAVE_USB3 40
+#define SLAVE_VENUS_CFG 41
+#define SLAVE_VENUS_THROTTLE_CFG 42
+#define SLAVE_VSENSE_CTRL_CFG 43
+#define SLAVE_SERVICE_CNOC 44
+
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_GEM_NOC_CFG 1
+#define SLAVE_LLCC_CFG 2
+
+#define MASTER_AMPSS_M0 0
+#define MASTER_SYS_TCU 1
+#define MASTER_GEM_NOC_CFG 2
+#define MASTER_COMPUTE_NOC 3
+#define MASTER_MNOC_HF_MEM_NOC 4
+#define MASTER_MNOC_SF_MEM_NOC 5
+#define MASTER_SNOC_GC_MEM_NOC 6
+#define MASTER_SNOC_SF_MEM_NOC 7
+#define MASTER_GRAPHICS_3D 8
+#define SLAVE_MCDMA_MS_MPU_CFG 9
+#define SLAVE_MSS_PROC_MS_MPU_CFG 10
+#define SLAVE_GEM_NOC_SNOC 11
+#define SLAVE_LLCC 12
+#define SLAVE_SERVICE_GEM_NOC 13
+
+#define MASTER_CNOC_MNOC_CFG 0
+#define MASTER_VIDEO_P0 1
+#define MASTER_VIDEO_PROC 2
+#define MASTER_CAMNOC_HF 3
+#define MASTER_CAMNOC_ICP 4
+#define MASTER_CAMNOC_SF 5
+#define MASTER_MDP_PORT0 6
+#define SLAVE_MNOC_HF_MEM_NOC 7
+#define SLAVE_MNOC_SF_MEM_NOC 8
+#define SLAVE_SERVICE_MNOC 9
+
+#define MASTER_NPU_SYS 0
+#define MASTER_NPU_NOC_CFG 1
+#define SLAVE_NPU_CAL_DP0 2
+#define SLAVE_NPU_CP 3
+#define SLAVE_NPU_INT_DMA_BWMON_CFG 4
+#define SLAVE_NPU_DPM 5
+#define SLAVE_ISENSE_CFG 6
+#define SLAVE_NPU_LLM_CFG 7
+#define SLAVE_NPU_TCM 8
+#define SLAVE_NPU_COMPUTE_NOC 9
+#define SLAVE_SERVICE_NPU_NOC 10
+
+#define MASTER_SNOC_CFG 0
+#define A1NOC_SNOC_MAS 1
+#define A2NOC_SNOC_MAS 2
+#define MASTER_GEM_NOC_SNOC 3
+#define MASTER_PIMEM 4
+#define MASTER_GIC 5
+#define SLAVE_APPSS 6
+#define SNOC_CNOC_SLV 7
+#define SLAVE_SNOC_GEM_NOC_GC 8
+#define SLAVE_SNOC_GEM_NOC_SF 9
+#define SLAVE_OCIMEM 10
+#define SLAVE_PIMEM 11
+#define SLAVE_SERVICE_SNOC 12
+#define SLAVE_QDSS_STM 13
+#define SLAVE_TCU 14
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sm7150-rpmh.h b/include/dt-bindings/interconnect/qcom,sm7150-rpmh.h
new file mode 100644
index 000000000000..1f610eb832aa
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sm7150-rpmh.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Qualcomm SM7150 interconnect IDs
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024, Danila Tikhonov <danila@jiaxyga.com>
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM7150_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SM7150_H
+
+#define MASTER_A1NOC_CFG 0
+#define MASTER_QUP_0 1
+#define MASTER_TSIF 2
+#define MASTER_EMMC 3
+#define MASTER_SDCC_2 4
+#define MASTER_SDCC_4 5
+#define MASTER_UFS_MEM 6
+#define A1NOC_SNOC_SLV 7
+#define SLAVE_SERVICE_A1NOC 8
+
+#define MASTER_A2NOC_CFG 0
+#define MASTER_QDSS_BAM 1
+#define MASTER_QUP_1 2
+#define MASTER_CNOC_A2NOC 3
+#define MASTER_CRYPTO_CORE_0 4
+#define MASTER_IPA 5
+#define MASTER_PCIE 6
+#define MASTER_QDSS_ETR 7
+#define MASTER_USB3 8
+#define A2NOC_SNOC_SLV 9
+#define SLAVE_ANOC_PCIE_GEM_NOC 10
+#define SLAVE_SERVICE_A2NOC 11
+
+#define MASTER_CAMNOC_HF0_UNCOMP 0
+#define MASTER_CAMNOC_RT_UNCOMP 1
+#define MASTER_CAMNOC_SF_UNCOMP 2
+#define MASTER_CAMNOC_NRT_UNCOMP 3
+#define SLAVE_CAMNOC_UNCOMP 4
+
+#define MASTER_NPU 0
+#define SLAVE_CDSP_GEM_NOC 1
+
+#define MASTER_SPDM 0
+#define SNOC_CNOC_MAS 1
+#define MASTER_QDSS_DAP 2
+#define SLAVE_A1NOC_CFG 3
+#define SLAVE_A2NOC_CFG 4
+#define SLAVE_AHB2PHY_NORTH 5
+#define SLAVE_AHB2PHY_SOUTH 6
+#define SLAVE_AHB2PHY_WEST 7
+#define SLAVE_AOP 8
+#define SLAVE_AOSS 9
+#define SLAVE_CAMERA_CFG 10
+#define SLAVE_CAMERA_NRT_THROTTLE_CFG 11
+#define SLAVE_CAMERA_RT_THROTTLE_CFG 12
+#define SLAVE_CLK_CTL 13
+#define SLAVE_CDSP_CFG 14
+#define SLAVE_RBCPR_CX_CFG 15
+#define SLAVE_RBCPR_MX_CFG 16
+#define SLAVE_CRYPTO_0_CFG 17
+#define SLAVE_CNOC_DDRSS 18
+#define SLAVE_DISPLAY_CFG 19
+#define SLAVE_DISPLAY_THROTTLE_CFG 20
+#define SLAVE_EMMC_CFG 21
+#define SLAVE_GLM 22
+#define SLAVE_GRAPHICS_3D_CFG 23
+#define SLAVE_IMEM_CFG 24
+#define SLAVE_IPA_CFG 25
+#define SLAVE_CNOC_MNOC_CFG 26
+#define SLAVE_PCIE_CFG 27
+#define SLAVE_PDM 28
+#define SLAVE_PIMEM_CFG 29
+#define SLAVE_PRNG 30
+#define SLAVE_QDSS_CFG 31
+#define SLAVE_QUP_0 32
+#define SLAVE_QUP_1 33
+#define SLAVE_SDCC_2 34
+#define SLAVE_SDCC_4 35
+#define SLAVE_SNOC_CFG 36
+#define SLAVE_SPDM_WRAPPER 37
+#define SLAVE_TCSR 38
+#define SLAVE_TLMM_NORTH 39
+#define SLAVE_TLMM_SOUTH 40
+#define SLAVE_TLMM_WEST 41
+#define SLAVE_TSIF 42
+#define SLAVE_UFS_MEM_CFG 43
+#define SLAVE_USB3 44
+#define SLAVE_VENUS_CFG 45
+#define SLAVE_VENUS_CVP_THROTTLE_CFG 46
+#define SLAVE_VENUS_THROTTLE_CFG 47
+#define SLAVE_VSENSE_CTRL_CFG 48
+#define SLAVE_CNOC_A2NOC 49
+#define SLAVE_SERVICE_CNOC 50
+
+#define MASTER_CNOC_DC_NOC 0
+#define SLAVE_GEM_NOC_CFG 1
+#define SLAVE_LLCC_CFG 2
+
+#define MASTER_AMPSS_M0 0
+#define MASTER_SYS_TCU 1
+#define MASTER_GEM_NOC_CFG 2
+#define MASTER_COMPUTE_NOC 3
+#define MASTER_MNOC_HF_MEM_NOC 4
+#define MASTER_MNOC_SF_MEM_NOC 5
+#define MASTER_GEM_NOC_PCIE_SNOC 6
+#define MASTER_SNOC_GC_MEM_NOC 7
+#define MASTER_SNOC_SF_MEM_NOC 8
+#define MASTER_GRAPHICS_3D 9
+#define SLAVE_MSS_PROC_MS_MPU_CFG 10
+#define SLAVE_GEM_NOC_SNOC 11
+#define SLAVE_LLCC 12
+#define SLAVE_SERVICE_GEM_NOC 13
+
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI_CH0 1
+
+#define MASTER_CNOC_MNOC_CFG 0
+#define MASTER_CAMNOC_HF0 1
+#define MASTER_CAMNOC_NRT 2
+#define MASTER_CAMNOC_RT 3
+#define MASTER_CAMNOC_SF 4
+#define MASTER_MDP_PORT0 5
+#define MASTER_MDP_PORT1 6
+#define MASTER_ROTATOR 7
+#define MASTER_VIDEO_P0 8
+#define MASTER_VIDEO_P1 9
+#define MASTER_VIDEO_PROC 10
+#define SLAVE_MNOC_SF_MEM_NOC 11
+#define SLAVE_MNOC_HF_MEM_NOC 12
+#define SLAVE_SERVICE_MNOC 13
+
+#define MASTER_SNOC_CFG 0
+#define A1NOC_SNOC_MAS 1
+#define A2NOC_SNOC_MAS 2
+#define MASTER_GEM_NOC_SNOC 3
+#define MASTER_PIMEM 4
+#define MASTER_GIC 5
+#define SLAVE_APPSS 6
+#define SNOC_CNOC_SLV 7
+#define SLAVE_SNOC_GEM_NOC_GC 8
+#define SLAVE_SNOC_GEM_NOC_SF 9
+#define SLAVE_OCIMEM 10
+#define SLAVE_PIMEM 11
+#define SLAVE_SERVICE_SNOC 12
+#define SLAVE_QDSS_STM 13
+#define SLAVE_TCU 14
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sm8150.h b/include/dt-bindings/interconnect/qcom,sm8150.h
index a25684680c42..ef292791f52e 100644
--- a/include/dt-bindings/interconnect/qcom,sm8150.h
+++ b/include/dt-bindings/interconnect/qcom,sm8150.h
@@ -121,9 +121,6 @@
#define SLAVE_LLCC 15
#define SLAVE_SERVICE_GEM_NOC 16
-#define MASTER_IPA_CORE 0
-#define SLAVE_IPA_CORE 1
-
#define MASTER_LLCC 0
#define SLAVE_EBI_CH0 1
diff --git a/include/dt-bindings/interconnect/qcom,sm8250.h b/include/dt-bindings/interconnect/qcom,sm8250.h
index 1b4d9fbe888d..2a656c02df4b 100644
--- a/include/dt-bindings/interconnect/qcom,sm8250.h
+++ b/include/dt-bindings/interconnect/qcom,sm8250.h
@@ -115,9 +115,6 @@
#define SLAVE_SERVICE_GEM_NOC_2 15
#define SLAVE_SERVICE_GEM_NOC 16
-#define MASTER_IPA_CORE 0
-#define SLAVE_IPA_CORE 1
-
#define MASTER_LLCC 0
#define SLAVE_EBI_CH0 1
@@ -169,4 +166,11 @@
#define SLAVE_QDSS_STM 17
#define SLAVE_TCU 18
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define MASTER_QUP_CORE_2 2
+#define SLAVE_QUP_CORE_0 3
+#define SLAVE_QUP_CORE_1 4
+#define SLAVE_QUP_CORE_2 5
+
#endif
diff --git a/include/dt-bindings/interconnect/qcom,sm8350.h b/include/dt-bindings/interconnect/qcom,sm8350.h
index c7f7ed315aeb..2282f93607bc 100644
--- a/include/dt-bindings/interconnect/qcom,sm8350.h
+++ b/include/dt-bindings/interconnect/qcom,sm8350.h
@@ -119,9 +119,6 @@
#define SLAVE_SERVICE_GEM_NOC_1 16
#define SLAVE_SERVICE_GEM_NOC_2 17
#define SLAVE_SERVICE_GEM_NOC 18
-#define MASTER_MNOC_HF_MEM_NOC_DISP 19
-#define MASTER_MNOC_SF_MEM_NOC_DISP 20
-#define SLAVE_LLCC_DISP 21
#define MASTER_CNOC_LPASS_AG_NOC 0
#define SLAVE_LPASS_CORE_CFG 1
@@ -133,8 +130,6 @@
#define MASTER_LLCC 0
#define SLAVE_EBI1 1
-#define MASTER_LLCC_DISP 2
-#define SLAVE_EBI1_DISP 3
#define MASTER_CAMNOC_HF 0
#define MASTER_CAMNOC_ICP 1
@@ -149,11 +144,6 @@
#define SLAVE_MNOC_HF_MEM_NOC 10
#define SLAVE_MNOC_SF_MEM_NOC 11
#define SLAVE_SERVICE_MNOC 12
-#define MASTER_MDP0_DISP 13
-#define MASTER_MDP1_DISP 14
-#define MASTER_ROTATOR_DISP 15
-#define SLAVE_MNOC_HF_MEM_NOC_DISP 16
-#define SLAVE_MNOC_SF_MEM_NOC_DISP 17
#define MASTER_CDSP_NOC_CFG 0
#define MASTER_CDSP_PROC 1
diff --git a/include/dt-bindings/interconnect/qcom,sm8450.h b/include/dt-bindings/interconnect/qcom,sm8450.h
new file mode 100644
index 000000000000..8f3c5e1fb4c4
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sm8450.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Linaro Limited
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM8450_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SM8450_H
+
+#define MASTER_QSPI_0 0
+#define MASTER_QUP_1 1
+#define MASTER_A1NOC_CFG 2
+#define MASTER_SDCC_4 3
+#define MASTER_UFS_MEM 4
+#define MASTER_USB3_0 5
+#define SLAVE_A1NOC_SNOC 6
+#define SLAVE_SERVICE_A1NOC 7
+
+#define MASTER_QDSS_BAM 0
+#define MASTER_QUP_0 1
+#define MASTER_QUP_2 2
+#define MASTER_A2NOC_CFG 3
+#define MASTER_CRYPTO 4
+#define MASTER_IPA 5
+#define MASTER_SENSORS_PROC 6
+#define MASTER_SP 7
+#define MASTER_QDSS_ETR 8
+#define MASTER_QDSS_ETR_1 9
+#define MASTER_SDCC_2 10
+#define SLAVE_A2NOC_SNOC 11
+#define SLAVE_SERVICE_A2NOC 12
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define MASTER_QUP_CORE_2 2
+#define SLAVE_QUP_CORE_0 3
+#define SLAVE_QUP_CORE_1 4
+#define SLAVE_QUP_CORE_2 5
+
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define SLAVE_AHB2PHY_SOUTH 2
+#define SLAVE_AHB2PHY_NORTH 3
+#define SLAVE_AOSS 4
+#define SLAVE_CAMERA_CFG 5
+#define SLAVE_CLK_CTL 6
+#define SLAVE_CDSP_CFG 7
+#define SLAVE_RBCPR_CX_CFG 8
+#define SLAVE_RBCPR_MMCX_CFG 9
+#define SLAVE_RBCPR_MXA_CFG 10
+#define SLAVE_RBCPR_MXC_CFG 11
+#define SLAVE_CRYPTO_0_CFG 12
+#define SLAVE_CX_RDPM 13
+#define SLAVE_DISPLAY_CFG 14
+#define SLAVE_GFX3D_CFG 15
+#define SLAVE_IMEM_CFG 16
+#define SLAVE_IPA_CFG 17
+#define SLAVE_IPC_ROUTER_CFG 18
+#define SLAVE_LPASS 19
+#define SLAVE_CNOC_MSS 20
+#define SLAVE_MX_RDPM 21
+#define SLAVE_PCIE_0_CFG 22
+#define SLAVE_PCIE_1_CFG 23
+#define SLAVE_PDM 24
+#define SLAVE_PIMEM_CFG 25
+#define SLAVE_PRNG 26
+#define SLAVE_QDSS_CFG 27
+#define SLAVE_QSPI_0 28
+#define SLAVE_QUP_0 29
+#define SLAVE_QUP_1 30
+#define SLAVE_QUP_2 31
+#define SLAVE_SDCC_2 32
+#define SLAVE_SDCC_4 33
+#define SLAVE_SPSS_CFG 34
+#define SLAVE_TCSR 35
+#define SLAVE_TLMM 36
+#define SLAVE_TME_CFG 37
+#define SLAVE_UFS_MEM_CFG 38
+#define SLAVE_USB3_0 39
+#define SLAVE_VENUS_CFG 40
+#define SLAVE_VSENSE_CTRL_CFG 41
+#define SLAVE_A1NOC_CFG 42
+#define SLAVE_A2NOC_CFG 43
+#define SLAVE_DDRSS_CFG 44
+#define SLAVE_CNOC_MNOC_CFG 45
+#define SLAVE_PCIE_ANOC_CFG 46
+#define SLAVE_SNOC_CFG 47
+#define SLAVE_IMEM 48
+#define SLAVE_PIMEM 49
+#define SLAVE_SERVICE_CNOC 50
+#define SLAVE_PCIE_0 51
+#define SLAVE_PCIE_1 52
+#define SLAVE_QDSS_STM 53
+#define SLAVE_TCU 54
+
+#define MASTER_GPU_TCU 0
+#define MASTER_SYS_TCU 1
+#define MASTER_APPSS_PROC 2
+#define MASTER_GFX3D 3
+#define MASTER_MSS_PROC 4
+#define MASTER_MNOC_HF_MEM_NOC 5
+#define MASTER_MNOC_SF_MEM_NOC 6
+#define MASTER_COMPUTE_NOC 7
+#define MASTER_ANOC_PCIE_GEM_NOC 8
+#define MASTER_SNOC_GC_MEM_NOC 9
+#define MASTER_SNOC_SF_MEM_NOC 10
+#define SLAVE_GEM_NOC_CNOC 11
+#define SLAVE_LLCC 12
+#define SLAVE_MEM_NOC_PCIE_SNOC 13
+#define MASTER_MNOC_HF_MEM_NOC_DISP 14
+#define MASTER_MNOC_SF_MEM_NOC_DISP 15
+#define MASTER_ANOC_PCIE_GEM_NOC_DISP 16
+#define SLAVE_LLCC_DISP 17
+
+#define MASTER_CNOC_LPASS_AG_NOC 0
+#define MASTER_LPASS_PROC 1
+#define SLAVE_LPASS_CORE_CFG 2
+#define SLAVE_LPASS_LPI_CFG 3
+#define SLAVE_LPASS_MPU_CFG 4
+#define SLAVE_LPASS_TOP_CFG 5
+#define SLAVE_LPASS_SNOC 6
+#define SLAVE_SERVICES_LPASS_AML_NOC 7
+#define SLAVE_SERVICE_LPASS_AG_NOC 8
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+#define MASTER_LLCC_DISP 2
+#define SLAVE_EBI1_DISP 3
+
+#define MASTER_CAMNOC_HF 0
+#define MASTER_CAMNOC_ICP 1
+#define MASTER_CAMNOC_SF 2
+#define MASTER_MDP 3
+#define MASTER_CNOC_MNOC_CFG 4
+#define MASTER_ROTATOR 5
+#define MASTER_CDSP_HCP 6
+#define MASTER_VIDEO 7
+#define MASTER_VIDEO_CV_PROC 8
+#define MASTER_VIDEO_PROC 9
+#define MASTER_VIDEO_V_PROC 10
+#define SLAVE_MNOC_HF_MEM_NOC 11
+#define SLAVE_MNOC_SF_MEM_NOC 12
+#define SLAVE_SERVICE_MNOC 13
+#define MASTER_MDP_DISP 14
+#define MASTER_ROTATOR_DISP 15
+#define SLAVE_MNOC_HF_MEM_NOC_DISP 16
+#define SLAVE_MNOC_SF_MEM_NOC_DISP 17
+
+#define MASTER_CDSP_NOC_CFG 0
+#define MASTER_CDSP_PROC 1
+#define SLAVE_CDSP_MEM_NOC 2
+#define SLAVE_SERVICE_NSP_NOC 3
+
+#define MASTER_PCIE_ANOC_CFG 0
+#define MASTER_PCIE_0 1
+#define MASTER_PCIE_1 2
+#define SLAVE_ANOC_PCIE_GEM_NOC 3
+#define SLAVE_SERVICE_PCIE_ANOC 4
+
+#define MASTER_GIC_AHB 0
+#define MASTER_A1NOC_SNOC 1
+#define MASTER_A2NOC_SNOC 2
+#define MASTER_LPASS_ANOC 3
+#define MASTER_SNOC_CFG 4
+#define MASTER_PIMEM 5
+#define MASTER_GIC 6
+#define SLAVE_SNOC_GEM_NOC_GC 7
+#define SLAVE_SNOC_GEM_NOC_SF 8
+#define SLAVE_SERVICE_SNOC 9
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sm8550-rpmh.h b/include/dt-bindings/interconnect/qcom,sm8550-rpmh.h
new file mode 100644
index 000000000000..b38d0da7886f
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sm8550-rpmh.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Limited
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM8550_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SM8550_H
+
+#define MASTER_QSPI_0 0
+#define MASTER_QUP_1 1
+#define MASTER_SDCC_4 2
+#define MASTER_UFS_MEM 3
+#define MASTER_USB3_0 4
+#define SLAVE_A1NOC_SNOC 5
+
+#define MASTER_QDSS_BAM 0
+#define MASTER_QUP_2 1
+#define MASTER_CRYPTO 2
+#define MASTER_IPA 3
+#define MASTER_SP 4
+#define MASTER_QDSS_ETR 5
+#define MASTER_QDSS_ETR_1 6
+#define MASTER_SDCC_2 7
+#define SLAVE_A2NOC_SNOC 8
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define MASTER_QUP_CORE_2 2
+#define SLAVE_QUP_CORE_0 3
+#define SLAVE_QUP_CORE_1 4
+#define SLAVE_QUP_CORE_2 5
+
+#define MASTER_CNOC_CFG 0
+#define SLAVE_AHB2PHY_SOUTH 1
+#define SLAVE_AHB2PHY_NORTH 2
+#define SLAVE_APPSS 3
+#define SLAVE_CAMERA_CFG 4
+#define SLAVE_CLK_CTL 5
+#define SLAVE_RBCPR_CX_CFG 6
+#define SLAVE_RBCPR_MMCX_CFG 7
+#define SLAVE_RBCPR_MXA_CFG 8
+#define SLAVE_RBCPR_MXC_CFG 9
+#define SLAVE_CPR_NSPCX 10
+#define SLAVE_CRYPTO_0_CFG 11
+#define SLAVE_CX_RDPM 12
+#define SLAVE_DISPLAY_CFG 13
+#define SLAVE_GFX3D_CFG 14
+#define SLAVE_I2C 15
+#define SLAVE_IMEM_CFG 16
+#define SLAVE_IPA_CFG 17
+#define SLAVE_IPC_ROUTER_CFG 18
+#define SLAVE_CNOC_MSS 19
+#define SLAVE_MX_RDPM 20
+#define SLAVE_PCIE_0_CFG 21
+#define SLAVE_PCIE_1_CFG 22
+#define SLAVE_PDM 23
+#define SLAVE_PIMEM_CFG 24
+#define SLAVE_PRNG 25
+#define SLAVE_QDSS_CFG 26
+#define SLAVE_QSPI_0 27
+#define SLAVE_QUP_1 28
+#define SLAVE_QUP_2 29
+#define SLAVE_SDCC_2 30
+#define SLAVE_SDCC_4 31
+#define SLAVE_SPSS_CFG 32
+#define SLAVE_TCSR 33
+#define SLAVE_TLMM 34
+#define SLAVE_UFS_MEM_CFG 35
+#define SLAVE_USB3_0 36
+#define SLAVE_VENUS_CFG 37
+#define SLAVE_VSENSE_CTRL_CFG 38
+#define SLAVE_LPASS_QTB_CFG 39
+#define SLAVE_CNOC_MNOC_CFG 40
+#define SLAVE_NSP_QTB_CFG 41
+#define SLAVE_PCIE_ANOC_CFG 42
+#define SLAVE_QDSS_STM 43
+#define SLAVE_TCU 44
+
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define SLAVE_AOSS 2
+#define SLAVE_TME_CFG 3
+#define SLAVE_CNOC_CFG 4
+#define SLAVE_DDRSS_CFG 5
+#define SLAVE_BOOT_IMEM 6
+#define SLAVE_IMEM 7
+#define SLAVE_PCIE_0 8
+#define SLAVE_PCIE_1 9
+
+#define MASTER_GPU_TCU 0
+#define MASTER_SYS_TCU 1
+#define MASTER_APPSS_PROC 2
+#define MASTER_GFX3D 3
+#define MASTER_LPASS_GEM_NOC 4
+#define MASTER_MSS_PROC 5
+#define MASTER_MNOC_HF_MEM_NOC 6
+#define MASTER_MNOC_SF_MEM_NOC 7
+#define MASTER_COMPUTE_NOC 8
+#define MASTER_ANOC_PCIE_GEM_NOC 9
+#define MASTER_SNOC_GC_MEM_NOC 10
+#define MASTER_SNOC_SF_MEM_NOC 11
+#define SLAVE_GEM_NOC_CNOC 12
+#define SLAVE_LLCC 13
+#define SLAVE_MEM_NOC_PCIE_SNOC 14
+#define MASTER_MNOC_HF_MEM_NOC_DISP 15
+#define MASTER_ANOC_PCIE_GEM_NOC_DISP 16
+#define SLAVE_LLCC_DISP 17
+#define MASTER_MNOC_HF_MEM_NOC_CAM_IFE_0 18
+#define MASTER_MNOC_SF_MEM_NOC_CAM_IFE_0 19
+#define MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_0 20
+#define SLAVE_LLCC_CAM_IFE_0 21
+#define MASTER_MNOC_HF_MEM_NOC_CAM_IFE_1 22
+#define MASTER_MNOC_SF_MEM_NOC_CAM_IFE_1 23
+#define MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_1 24
+#define SLAVE_LLCC_CAM_IFE_1 25
+#define MASTER_MNOC_HF_MEM_NOC_CAM_IFE_2 26
+#define MASTER_MNOC_SF_MEM_NOC_CAM_IFE_2 27
+#define MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_2 28
+#define SLAVE_LLCC_CAM_IFE_2 29
+
+#define MASTER_LPIAON_NOC 0
+#define SLAVE_LPASS_GEM_NOC 1
+
+#define MASTER_LPASS_LPINOC 0
+#define SLAVE_LPIAON_NOC_LPASS_AG_NOC 1
+
+#define MASTER_LPASS_PROC 0
+#define SLAVE_LPICX_NOC_LPIAON_NOC 1
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+#define MASTER_LLCC_DISP 2
+#define SLAVE_EBI1_DISP 3
+#define MASTER_LLCC_CAM_IFE_0 4
+#define SLAVE_EBI1_CAM_IFE_0 5
+#define MASTER_LLCC_CAM_IFE_1 6
+#define SLAVE_EBI1_CAM_IFE_1 7
+#define MASTER_LLCC_CAM_IFE_2 8
+#define SLAVE_EBI1_CAM_IFE_2 9
+
+#define MASTER_CAMNOC_HF 0
+#define MASTER_CAMNOC_ICP 1
+#define MASTER_CAMNOC_SF 2
+#define MASTER_MDP 3
+#define MASTER_CDSP_HCP 4
+#define MASTER_VIDEO 5
+#define MASTER_VIDEO_CV_PROC 6
+#define MASTER_VIDEO_PROC 7
+#define MASTER_VIDEO_V_PROC 8
+#define MASTER_CNOC_MNOC_CFG 9
+#define SLAVE_MNOC_HF_MEM_NOC 10
+#define SLAVE_MNOC_SF_MEM_NOC 11
+#define SLAVE_SERVICE_MNOC 12
+#define MASTER_MDP_DISP 13
+#define SLAVE_MNOC_HF_MEM_NOC_DISP 14
+#define MASTER_CAMNOC_HF_CAM_IFE_0 15
+#define MASTER_CAMNOC_ICP_CAM_IFE_0 16
+#define MASTER_CAMNOC_SF_CAM_IFE_0 17
+#define SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_0 18
+#define SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_0 19
+#define MASTER_CAMNOC_HF_CAM_IFE_1 20
+#define MASTER_CAMNOC_ICP_CAM_IFE_1 21
+#define MASTER_CAMNOC_SF_CAM_IFE_1 22
+#define SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_1 23
+#define SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_1 24
+#define MASTER_CAMNOC_HF_CAM_IFE_2 25
+#define MASTER_CAMNOC_ICP_CAM_IFE_2 26
+#define MASTER_CAMNOC_SF_CAM_IFE_2 27
+#define SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_2 28
+#define SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_2 29
+
+#define MASTER_CDSP_PROC 0
+#define SLAVE_CDSP_MEM_NOC 1
+
+#define MASTER_PCIE_ANOC_CFG 0
+#define MASTER_PCIE_0 1
+#define MASTER_PCIE_1 2
+#define SLAVE_ANOC_PCIE_GEM_NOC 3
+#define SLAVE_SERVICE_PCIE_ANOC 4
+
+#define MASTER_GIC_AHB 0
+#define MASTER_A1NOC_SNOC 1
+#define MASTER_A2NOC_SNOC 2
+#define MASTER_GIC 3
+#define SLAVE_SNOC_GEM_NOC_GC 4
+#define SLAVE_SNOC_GEM_NOC_SF 5
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sm8650-rpmh.h b/include/dt-bindings/interconnect/qcom,sm8650-rpmh.h
new file mode 100644
index 000000000000..1216aa352d55
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sm8650-rpmh.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM8650_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SM8650_H
+
+#define MASTER_QSPI_0 0
+#define MASTER_QUP_1 1
+#define MASTER_QUP_3 2
+#define MASTER_SDCC_4 3
+#define MASTER_UFS_MEM 4
+#define MASTER_USB3_0 5
+#define SLAVE_A1NOC_SNOC 6
+
+#define MASTER_QDSS_BAM 0
+#define MASTER_QUP_2 1
+#define MASTER_CRYPTO 2
+#define MASTER_IPA 3
+#define MASTER_SP 4
+#define MASTER_QDSS_ETR 5
+#define MASTER_QDSS_ETR_1 6
+#define MASTER_SDCC_2 7
+#define SLAVE_A2NOC_SNOC 8
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define MASTER_QUP_CORE_2 2
+#define SLAVE_QUP_CORE_0 3
+#define SLAVE_QUP_CORE_1 4
+#define SLAVE_QUP_CORE_2 5
+
+#define MASTER_CNOC_CFG 0
+#define SLAVE_AHB2PHY_SOUTH 1
+#define SLAVE_AHB2PHY_NORTH 2
+#define SLAVE_CAMERA_CFG 3
+#define SLAVE_CLK_CTL 4
+#define SLAVE_RBCPR_CX_CFG 5
+#define SLAVE_CPR_HMX 6
+#define SLAVE_RBCPR_MMCX_CFG 7
+#define SLAVE_RBCPR_MXA_CFG 8
+#define SLAVE_RBCPR_MXC_CFG 9
+#define SLAVE_CPR_NSPCX 10
+#define SLAVE_CRYPTO_0_CFG 11
+#define SLAVE_CX_RDPM 12
+#define SLAVE_DISPLAY_CFG 13
+#define SLAVE_GFX3D_CFG 14
+#define SLAVE_I2C 15
+#define SLAVE_I3C_IBI0_CFG 16
+#define SLAVE_I3C_IBI1_CFG 17
+#define SLAVE_IMEM_CFG 18
+#define SLAVE_CNOC_MSS 19
+#define SLAVE_MX_2_RDPM 20
+#define SLAVE_MX_RDPM 21
+#define SLAVE_PCIE_0_CFG 22
+#define SLAVE_PCIE_1_CFG 23
+#define SLAVE_PCIE_RSCC 24
+#define SLAVE_PDM 25
+#define SLAVE_PRNG 26
+#define SLAVE_QDSS_CFG 27
+#define SLAVE_QSPI_0 28
+#define SLAVE_QUP_3 29
+#define SLAVE_QUP_1 30
+#define SLAVE_QUP_2 31
+#define SLAVE_SDCC_2 32
+#define SLAVE_SDCC_4 33
+#define SLAVE_SPSS_CFG 34
+#define SLAVE_TCSR 35
+#define SLAVE_TLMM 36
+#define SLAVE_UFS_MEM_CFG 37
+#define SLAVE_USB3_0 38
+#define SLAVE_VENUS_CFG 39
+#define SLAVE_VSENSE_CTRL_CFG 40
+#define SLAVE_CNOC_MNOC_CFG 41
+#define SLAVE_NSP_QTB_CFG 42
+#define SLAVE_PCIE_ANOC_CFG 43
+#define SLAVE_SERVICE_CNOC_CFG 44
+#define SLAVE_QDSS_STM 45
+#define SLAVE_TCU 46
+
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define SLAVE_AOSS 2
+#define SLAVE_IPA_CFG 3
+#define SLAVE_IPC_ROUTER_CFG 4
+#define SLAVE_TME_CFG 5
+#define SLAVE_APPSS 6
+#define SLAVE_CNOC_CFG 7
+#define SLAVE_DDRSS_CFG 8
+#define SLAVE_IMEM 9
+#define SLAVE_SERVICE_CNOC 10
+#define SLAVE_PCIE_0 11
+#define SLAVE_PCIE_1 12
+
+#define MASTER_GPU_TCU 0
+#define MASTER_SYS_TCU 1
+#define MASTER_UBWC_P_TCU 2
+#define MASTER_APPSS_PROC 3
+#define MASTER_GFX3D 4
+#define MASTER_LPASS_GEM_NOC 5
+#define MASTER_MSS_PROC 6
+#define MASTER_MNOC_HF_MEM_NOC 7
+#define MASTER_MNOC_SF_MEM_NOC 8
+#define MASTER_COMPUTE_NOC 9
+#define MASTER_ANOC_PCIE_GEM_NOC 10
+#define MASTER_SNOC_SF_MEM_NOC 11
+#define MASTER_UBWC_P 12
+#define MASTER_GIC 13
+#define SLAVE_GEM_NOC_CNOC 14
+#define SLAVE_LLCC 15
+#define SLAVE_MEM_NOC_PCIE_SNOC 16
+
+#define MASTER_LPIAON_NOC 0
+#define SLAVE_LPASS_GEM_NOC 1
+
+#define MASTER_LPASS_LPINOC 0
+#define SLAVE_LPIAON_NOC_LPASS_AG_NOC 1
+
+#define MASTER_LPASS_PROC 0
+#define SLAVE_LPICX_NOC_LPIAON_NOC 1
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_CAMNOC_HF 0
+#define MASTER_CAMNOC_ICP 1
+#define MASTER_CAMNOC_SF 2
+#define MASTER_MDP 3
+#define MASTER_CDSP_HCP 4
+#define MASTER_VIDEO 5
+#define MASTER_VIDEO_CV_PROC 6
+#define MASTER_VIDEO_PROC 7
+#define MASTER_VIDEO_V_PROC 8
+#define MASTER_CNOC_MNOC_CFG 9
+#define SLAVE_MNOC_HF_MEM_NOC 10
+#define SLAVE_MNOC_SF_MEM_NOC 11
+#define SLAVE_SERVICE_MNOC 12
+
+#define MASTER_CDSP_PROC 0
+#define SLAVE_CDSP_MEM_NOC 1
+
+#define MASTER_PCIE_ANOC_CFG 0
+#define MASTER_PCIE_0 1
+#define MASTER_PCIE_1 2
+#define SLAVE_ANOC_PCIE_GEM_NOC 3
+#define SLAVE_SERVICE_PCIE_ANOC 4
+
+#define MASTER_A1NOC_SNOC 0
+#define MASTER_A2NOC_SNOC 1
+#define SLAVE_SNOC_GEM_NOC_SF 2
+#define MASTER_APSS_NOC 3
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sm8750-rpmh.h b/include/dt-bindings/interconnect/qcom,sm8750-rpmh.h
new file mode 100644
index 000000000000..30563952a646
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sm8750-rpmh.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM8750_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SM8750_H
+
+#define MASTER_QSPI_0 0
+#define MASTER_QUP_1 1
+#define MASTER_QUP_3 2
+#define MASTER_SDCC_4 3
+#define MASTER_UFS_MEM 4
+#define MASTER_USB3_0 5
+#define SLAVE_A1NOC_SNOC 6
+
+#define MASTER_QDSS_BAM 0
+#define MASTER_QUP_2 1
+#define MASTER_CRYPTO 2
+#define MASTER_IPA 3
+#define MASTER_SOCCP_AGGR_NOC 4
+#define MASTER_SP 5
+#define MASTER_QDSS_ETR 6
+#define MASTER_QDSS_ETR_1 7
+#define MASTER_SDCC_2 8
+#define SLAVE_A2NOC_SNOC 9
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define MASTER_QUP_CORE_2 2
+#define SLAVE_QUP_CORE_0 3
+#define SLAVE_QUP_CORE_1 4
+#define SLAVE_QUP_CORE_2 5
+
+#define MASTER_CNOC_CFG 0
+#define SLAVE_AHB2PHY_SOUTH 1
+#define SLAVE_AHB2PHY_NORTH 2
+#define SLAVE_CAMERA_CFG 3
+#define SLAVE_CLK_CTL 4
+#define SLAVE_CRYPTO_0_CFG 5
+#define SLAVE_DISPLAY_CFG 6
+#define SLAVE_EVA_CFG 7
+#define SLAVE_GFX3D_CFG 8
+#define SLAVE_I2C 9
+#define SLAVE_I3C_IBI0_CFG 10
+#define SLAVE_I3C_IBI1_CFG 11
+#define SLAVE_IMEM_CFG 12
+#define SLAVE_CNOC_MSS 13
+#define SLAVE_PCIE_CFG 14
+#define SLAVE_PRNG 15
+#define SLAVE_QDSS_CFG 16
+#define SLAVE_QSPI_0 17
+#define SLAVE_QUP_3 18
+#define SLAVE_QUP_1 19
+#define SLAVE_QUP_2 20
+#define SLAVE_SDCC_2 21
+#define SLAVE_SDCC_4 22
+#define SLAVE_SPSS_CFG 23
+#define SLAVE_TCSR 24
+#define SLAVE_TLMM 25
+#define SLAVE_UFS_MEM_CFG 26
+#define SLAVE_USB3_0 27
+#define SLAVE_VENUS_CFG 28
+#define SLAVE_VSENSE_CTRL_CFG 29
+#define SLAVE_CNOC_MNOC_CFG 30
+#define SLAVE_PCIE_ANOC_CFG 31
+#define SLAVE_QDSS_STM 32
+#define SLAVE_TCU 33
+
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define SLAVE_AOSS 2
+#define SLAVE_IPA_CFG 3
+#define SLAVE_IPC_ROUTER_CFG 4
+#define SLAVE_SOCCP 5
+#define SLAVE_TME_CFG 6
+#define SLAVE_APPSS 7
+#define SLAVE_CNOC_CFG 8
+#define SLAVE_DDRSS_CFG 9
+#define SLAVE_BOOT_IMEM 10
+#define SLAVE_IMEM 11
+#define SLAVE_BOOT_IMEM_2 12
+#define SLAVE_SERVICE_CNOC 13
+#define SLAVE_PCIE_0 14
+
+#define MASTER_GPU_TCU 0
+#define MASTER_SYS_TCU 1
+#define MASTER_APPSS_PROC 2
+#define MASTER_GFX3D 3
+#define MASTER_LPASS_GEM_NOC 4
+#define MASTER_MSS_PROC 5
+#define MASTER_MNOC_HF_MEM_NOC 6
+#define MASTER_MNOC_SF_MEM_NOC 7
+#define MASTER_COMPUTE_NOC 8
+#define MASTER_ANOC_PCIE_GEM_NOC 9
+#define MASTER_SNOC_SF_MEM_NOC 10
+#define MASTER_UBWC_P 11
+#define MASTER_GIC 12
+#define SLAVE_UBWC_P 13
+#define SLAVE_GEM_NOC_CNOC 14
+#define SLAVE_LLCC 15
+#define SLAVE_MEM_NOC_PCIE_SNOC 16
+
+#define MASTER_LPIAON_NOC 0
+#define SLAVE_LPASS_GEM_NOC 1
+
+#define MASTER_LPASS_LPINOC 0
+#define SLAVE_LPIAON_NOC_LPASS_AG_NOC 1
+
+#define MASTER_LPASS_PROC 0
+#define SLAVE_LPICX_NOC_LPIAON_NOC 1
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_CAMNOC_HF 0
+#define MASTER_CAMNOC_NRT_ICP_SF 1
+#define MASTER_CAMNOC_RT_CDM_SF 2
+#define MASTER_CAMNOC_SF 3
+#define MASTER_MDP 4
+#define MASTER_CDSP_HCP 5
+#define MASTER_VIDEO_CV_PROC 6
+#define MASTER_VIDEO_EVA 7
+#define MASTER_VIDEO_MVP 8
+#define MASTER_VIDEO_V_PROC 9
+#define MASTER_CNOC_MNOC_CFG 10
+#define SLAVE_MNOC_HF_MEM_NOC 11
+#define SLAVE_MNOC_SF_MEM_NOC 12
+#define SLAVE_SERVICE_MNOC 13
+
+#define MASTER_CDSP_PROC 0
+#define SLAVE_CDSP_MEM_NOC 1
+
+#define MASTER_PCIE_ANOC_CFG 0
+#define MASTER_PCIE_0 1
+#define SLAVE_ANOC_PCIE_GEM_NOC 2
+#define SLAVE_SERVICE_PCIE_ANOC 3
+
+#define MASTER_A1NOC_SNOC 0
+#define MASTER_A2NOC_SNOC 1
+#define SLAVE_SNOC_GEM_NOC_SF 2
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,x1e80100-rpmh.h b/include/dt-bindings/interconnect/qcom,x1e80100-rpmh.h
new file mode 100644
index 000000000000..7d9710881149
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,x1e80100-rpmh.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_X1E80100_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_X1E80100_H
+
+#define MASTER_QSPI_0 0
+#define MASTER_QUP_1 1
+#define MASTER_SDCC_4 2
+#define MASTER_UFS_MEM 3
+#define SLAVE_A1NOC_SNOC 4
+
+#define MASTER_QUP_0 0
+#define MASTER_QUP_2 1
+#define MASTER_CRYPTO 2
+#define MASTER_SP 3
+#define MASTER_QDSS_ETR 4
+#define MASTER_QDSS_ETR_1 5
+#define MASTER_SDCC_2 6
+#define SLAVE_A2NOC_SNOC 7
+
+#define MASTER_DDR_PERF_MODE 0
+#define MASTER_QUP_CORE_0 1
+#define MASTER_QUP_CORE_1 2
+#define MASTER_QUP_CORE_2 3
+#define SLAVE_DDR_PERF_MODE 4
+#define SLAVE_QUP_CORE_0 5
+#define SLAVE_QUP_CORE_1 6
+#define SLAVE_QUP_CORE_2 7
+
+#define MASTER_CNOC_CFG 0
+#define SLAVE_AHB2PHY_SOUTH 1
+#define SLAVE_AHB2PHY_NORTH 2
+#define SLAVE_AHB2PHY_2 3
+#define SLAVE_AV1_ENC_CFG 4
+#define SLAVE_CAMERA_CFG 5
+#define SLAVE_CLK_CTL 6
+#define SLAVE_CRYPTO_0_CFG 7
+#define SLAVE_DISPLAY_CFG 8
+#define SLAVE_GFX3D_CFG 9
+#define SLAVE_IMEM_CFG 10
+#define SLAVE_IPC_ROUTER_CFG 11
+#define SLAVE_PCIE_0_CFG 12
+#define SLAVE_PCIE_1_CFG 13
+#define SLAVE_PCIE_2_CFG 14
+#define SLAVE_PCIE_3_CFG 15
+#define SLAVE_PCIE_4_CFG 16
+#define SLAVE_PCIE_5_CFG 17
+#define SLAVE_PCIE_6A_CFG 18
+#define SLAVE_PCIE_6B_CFG 19
+#define SLAVE_PCIE_RSC_CFG 20
+#define SLAVE_PDM 21
+#define SLAVE_PRNG 22
+#define SLAVE_QDSS_CFG 23
+#define SLAVE_QSPI_0 24
+#define SLAVE_QUP_0 25
+#define SLAVE_QUP_1 26
+#define SLAVE_QUP_2 27
+#define SLAVE_SDCC_2 28
+#define SLAVE_SDCC_4 29
+#define SLAVE_SMMUV3_CFG 30
+#define SLAVE_TCSR 31
+#define SLAVE_TLMM 32
+#define SLAVE_UFS_MEM_CFG 33
+#define SLAVE_USB2 34
+#define SLAVE_USB3_0 35
+#define SLAVE_USB3_1 36
+#define SLAVE_USB3_2 37
+#define SLAVE_USB3_MP 38
+#define SLAVE_USB4_0 39
+#define SLAVE_USB4_1 40
+#define SLAVE_USB4_2 41
+#define SLAVE_VENUS_CFG 42
+#define SLAVE_LPASS_QTB_CFG 43
+#define SLAVE_CNOC_MNOC_CFG 44
+#define SLAVE_NSP_QTB_CFG 45
+#define SLAVE_QDSS_STM 46
+#define SLAVE_TCU 47
+
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define SLAVE_AOSS 2
+#define SLAVE_TME_CFG 3
+#define SLAVE_APPSS 4
+#define SLAVE_CNOC_CFG 5
+#define SLAVE_BOOT_IMEM 6
+#define SLAVE_IMEM 7
+#define SLAVE_PCIE_0 8
+#define SLAVE_PCIE_1 9
+#define SLAVE_PCIE_2 10
+#define SLAVE_PCIE_3 11
+#define SLAVE_PCIE_4 12
+#define SLAVE_PCIE_5 13
+#define SLAVE_PCIE_6A 14
+#define SLAVE_PCIE_6B 15
+
+#define MASTER_GPU_TCU 0
+#define MASTER_PCIE_TCU 1
+#define MASTER_SYS_TCU 2
+#define MASTER_APPSS_PROC 3
+#define MASTER_GFX3D 4
+#define MASTER_LPASS_GEM_NOC 5
+#define MASTER_MNOC_HF_MEM_NOC 6
+#define MASTER_MNOC_SF_MEM_NOC 7
+#define MASTER_COMPUTE_NOC 8
+#define MASTER_ANOC_PCIE_GEM_NOC 9
+#define MASTER_SNOC_SF_MEM_NOC 10
+#define MASTER_GIC2 11
+#define SLAVE_GEM_NOC_CNOC 12
+#define SLAVE_LLCC 13
+#define SLAVE_MEM_NOC_PCIE_SNOC 14
+
+#define MASTER_LPIAON_NOC 0
+#define SLAVE_LPASS_GEM_NOC 1
+
+#define MASTER_LPASS_LPINOC 0
+#define SLAVE_LPIAON_NOC_LPASS_AG_NOC 1
+
+#define MASTER_LPASS_PROC 0
+#define SLAVE_LPICX_NOC_LPIAON_NOC 1
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_AV1_ENC 0
+#define MASTER_CAMNOC_HF 1
+#define MASTER_CAMNOC_ICP 2
+#define MASTER_CAMNOC_SF 3
+#define MASTER_EVA 4
+#define MASTER_MDP 5
+#define MASTER_VIDEO 6
+#define MASTER_VIDEO_CV_PROC 7
+#define MASTER_VIDEO_V_PROC 8
+#define MASTER_CNOC_MNOC_CFG 9
+#define SLAVE_MNOC_HF_MEM_NOC 10
+#define SLAVE_MNOC_SF_MEM_NOC 11
+#define SLAVE_SERVICE_MNOC 12
+
+#define MASTER_CDSP_PROC 0
+#define SLAVE_CDSP_MEM_NOC 1
+
+#define MASTER_PCIE_NORTH 0
+#define MASTER_PCIE_SOUTH 1
+#define SLAVE_ANOC_PCIE_GEM_NOC 2
+
+#define MASTER_PCIE_3 0
+#define MASTER_PCIE_4 1
+#define MASTER_PCIE_5 2
+#define SLAVE_PCIE_NORTH 3
+
+#define MASTER_PCIE_0 0
+#define MASTER_PCIE_1 1
+#define MASTER_PCIE_2 2
+#define MASTER_PCIE_6A 3
+#define MASTER_PCIE_6B 4
+#define SLAVE_PCIE_SOUTH 5
+
+#define MASTER_A1NOC_SNOC 0
+#define MASTER_A2NOC_SNOC 1
+#define MASTER_GIC1 2
+#define MASTER_USB_NOC_SNOC 3
+#define SLAVE_SNOC_GEM_NOC_SF 4
+
+#define MASTER_AGGRE_USB_NORTH 0
+#define MASTER_AGGRE_USB_SOUTH 1
+#define SLAVE_USB_NOC_SNOC 2
+
+#define MASTER_USB2 0
+#define MASTER_USB3_MP 1
+#define SLAVE_AGGRE_USB_NORTH 2
+
+#define MASTER_USB3_0 0
+#define MASTER_USB3_1 1
+#define MASTER_USB3_2 2
+#define MASTER_USB4_0 3
+#define MASTER_USB4_1 4
+#define MASTER_USB4_2 5
+#define SLAVE_AGGRE_USB_SOUTH 6
+
+#endif
diff --git a/include/dt-bindings/interrupt-controller/amlogic,meson-g12a-gpio-intc.h b/include/dt-bindings/interrupt-controller/amlogic,meson-g12a-gpio-intc.h
new file mode 100644
index 000000000000..bd415cb7b669
--- /dev/null
+++ b/include/dt-bindings/interrupt-controller/amlogic,meson-g12a-gpio-intc.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2023 Amlogic, Inc. All rights reserved.
+ * Author: Huqiang Qin <huqiang.qin@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_IRQ_MESON_G12A_GPIO_H
+#define _DT_BINDINGS_IRQ_MESON_G12A_GPIO_H
+
+/* IRQID[11:0] - GPIOAO[11:0] */
+#define IRQID_GPIOAO_0 0
+#define IRQID_GPIOAO_1 1
+#define IRQID_GPIOAO_2 2
+#define IRQID_GPIOAO_3 3
+#define IRQID_GPIOAO_4 4
+#define IRQID_GPIOAO_5 5
+#define IRQID_GPIOAO_6 6
+#define IRQID_GPIOAO_7 7
+#define IRQID_GPIOAO_8 8
+#define IRQID_GPIOAO_9 9
+#define IRQID_GPIOAO_10 10
+#define IRQID_GPIOAO_11 11
+
+/* IRQID[27:12] - GPIOZ[15:0] */
+#define IRQID_GPIOZ_0 12
+#define IRQID_GPIOZ_1 13
+#define IRQID_GPIOZ_2 14
+#define IRQID_GPIOZ_3 15
+#define IRQID_GPIOZ_4 16
+#define IRQID_GPIOZ_5 17
+#define IRQID_GPIOZ_6 18
+#define IRQID_GPIOZ_7 19
+#define IRQID_GPIOZ_8 20
+#define IRQID_GPIOZ_9 21
+#define IRQID_GPIOZ_10 22
+#define IRQID_GPIOZ_11 23
+#define IRQID_GPIOZ_12 24
+#define IRQID_GPIOZ_13 25
+#define IRQID_GPIOZ_14 26
+#define IRQID_GPIOZ_15 27
+
+/* IRQID[36:28] - GPIOH[8:0] */
+#define IRQID_GPIOH_0 28
+#define IRQID_GPIOH_1 29
+#define IRQID_GPIOH_2 30
+#define IRQID_GPIOH_3 31
+#define IRQID_GPIOH_4 32
+#define IRQID_GPIOH_5 33
+#define IRQID_GPIOH_6 34
+#define IRQID_GPIOH_7 35
+#define IRQID_GPIOH_8 36
+
+/* IRQID[52:37] - BOOT[15:0] */
+#define IRQID_BOOT_0 37
+#define IRQID_BOOT_1 38
+#define IRQID_BOOT_2 39
+#define IRQID_BOOT_3 40
+#define IRQID_BOOT_4 41
+#define IRQID_BOOT_5 42
+#define IRQID_BOOT_6 43
+#define IRQID_BOOT_7 44
+#define IRQID_BOOT_8 45
+#define IRQID_BOOT_9 46
+#define IRQID_BOOT_10 47
+#define IRQID_BOOT_11 48
+#define IRQID_BOOT_12 49
+#define IRQID_BOOT_13 50
+#define IRQID_BOOT_14 51
+#define IRQID_BOOT_15 52
+
+/* IRQID[60:53] - GPIOC[7:0] */
+#define IRQID_GPIOC_0 53
+#define IRQID_GPIOC_1 54
+#define IRQID_GPIOC_2 55
+#define IRQID_GPIOC_3 56
+#define IRQID_GPIOC_4 57
+#define IRQID_GPIOC_5 58
+#define IRQID_GPIOC_6 59
+#define IRQID_GPIOC_7 60
+
+/* IRQID[76:61] - GPIOA[15:0] */
+#define IRQID_GPIOA_0 61
+#define IRQID_GPIOA_1 62
+#define IRQID_GPIOA_2 63
+#define IRQID_GPIOA_3 64
+#define IRQID_GPIOA_4 65
+#define IRQID_GPIOA_5 66
+#define IRQID_GPIOA_6 67
+#define IRQID_GPIOA_7 68
+#define IRQID_GPIOA_8 69
+#define IRQID_GPIOA_9 70
+#define IRQID_GPIOA_10 71
+#define IRQID_GPIOA_11 72
+#define IRQID_GPIOA_12 73
+#define IRQID_GPIOA_13 74
+#define IRQID_GPIOA_14 75
+#define IRQID_GPIOA_15 76
+
+/* IRQID[96:77] - GPIOX[19:0] */
+#define IRQID_GPIOX_0 77
+#define IRQID_GPIOX_1 78
+#define IRQID_GPIOX_2 79
+#define IRQID_GPIOX_3 80
+#define IRQID_GPIOX_4 81
+#define IRQID_GPIOX_5 82
+#define IRQID_GPIOX_6 83
+#define IRQID_GPIOX_7 84
+#define IRQID_GPIOX_8 85
+#define IRQID_GPIOX_9 86
+#define IRQID_GPIOX_10 87
+#define IRQID_GPIOX_11 88
+#define IRQID_GPIOX_12 89
+#define IRQID_GPIOX_13 90
+#define IRQID_GPIOX_14 91
+#define IRQID_GPIOX_15 92
+#define IRQID_GPIOX_16 93
+#define IRQID_GPIOX_17 94
+#define IRQID_GPIOX_18 95
+#define IRQID_GPIOX_19 96
+
+/* IRQID[99:97] - GPIOE[2:0] */
+#define IRQID_GPIOE_0 97
+#define IRQID_GPIOE_1 98
+#define IRQID_GPIOE_2 99
+
+#endif /* _DT_BINDINGS_IRQ_MESON_G12A_GPIO_H */
diff --git a/include/dt-bindings/interrupt-controller/apple-aic.h b/include/dt-bindings/interrupt-controller/apple-aic.h
index 604f2bb30ac0..bf3aac0e5491 100644
--- a/include/dt-bindings/interrupt-controller/apple-aic.h
+++ b/include/dt-bindings/interrupt-controller/apple-aic.h
@@ -11,5 +11,7 @@
#define AIC_TMR_HV_VIRT 1
#define AIC_TMR_GUEST_PHYS 2
#define AIC_TMR_GUEST_VIRT 3
+#define AIC_CPU_PMU_E 4
+#define AIC_CPU_PMU_P 5
#endif
diff --git a/include/dt-bindings/interrupt-controller/arm-gic.h b/include/dt-bindings/interrupt-controller/arm-gic.h
index 35b6f69b7db6..887f53363e8a 100644
--- a/include/dt-bindings/interrupt-controller/arm-gic.h
+++ b/include/dt-bindings/interrupt-controller/arm-gic.h
@@ -12,6 +12,8 @@
#define GIC_SPI 0
#define GIC_PPI 1
+#define GIC_ESPI 2
+#define GIC_EPPI 3
/*
* Interrupt specifier cell 2.
diff --git a/include/dt-bindings/interrupt-controller/aspeed-scu-ic.h b/include/dt-bindings/interrupt-controller/aspeed-scu-ic.h
index f315d5a7f5ee..7dd04424afcc 100644
--- a/include/dt-bindings/interrupt-controller/aspeed-scu-ic.h
+++ b/include/dt-bindings/interrupt-controller/aspeed-scu-ic.h
@@ -20,4 +20,18 @@
#define ASPEED_AST2600_SCU_IC1_LPC_RESET_LO_TO_HI 0
#define ASPEED_AST2600_SCU_IC1_LPC_RESET_HI_TO_LO 1
+#define ASPEED_AST2700_SCU_IC0_PCIE_PERST_LO_TO_HI 3
+#define ASPEED_AST2700_SCU_IC0_PCIE_PERST_HI_TO_LO 2
+
+#define ASPEED_AST2700_SCU_IC1_PCIE_RCRST_LO_TO_HI 3
+#define ASPEED_AST2700_SCU_IC1_PCIE_RCRST_HI_TO_LO 2
+
+#define ASPEED_AST2700_SCU_IC2_PCIE_PERST_LO_TO_HI 3
+#define ASPEED_AST2700_SCU_IC2_PCIE_PERST_HI_TO_LO 2
+#define ASPEED_AST2700_SCU_IC2_LPC_RESET_LO_TO_HI 1
+#define ASPEED_AST2700_SCU_IC2_LPC_RESET_HI_TO_LO 0
+
+#define ASPEED_AST2700_SCU_IC3_LPC_RESET_LO_TO_HI 1
+#define ASPEED_AST2700_SCU_IC3_LPC_RESET_HI_TO_LO 0
+
#endif /* _DT_BINDINGS_INTERRUPT_CONTROLLER_ASPEED_SCU_IC_H_ */
diff --git a/include/dt-bindings/interrupt-controller/irqc-rzg2l.h b/include/dt-bindings/interrupt-controller/irqc-rzg2l.h
new file mode 100644
index 000000000000..34ce778885a1
--- /dev/null
+++ b/include/dt-bindings/interrupt-controller/irqc-rzg2l.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * This header provides constants for Renesas RZ/G2L family IRQC bindings.
+ *
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ *
+ */
+
+#ifndef __DT_BINDINGS_IRQC_RZG2L_H
+#define __DT_BINDINGS_IRQC_RZG2L_H
+
+/* NMI maps to SPI0 */
+#define RZG2L_NMI 0
+
+/* IRQ0-7 map to SPI1-8 */
+#define RZG2L_IRQ0 1
+#define RZG2L_IRQ1 2
+#define RZG2L_IRQ2 3
+#define RZG2L_IRQ3 4
+#define RZG2L_IRQ4 5
+#define RZG2L_IRQ5 6
+#define RZG2L_IRQ6 7
+#define RZG2L_IRQ7 8
+
+#endif /* __DT_BINDINGS_IRQC_RZG2L_H */
diff --git a/include/dt-bindings/leds/common.h b/include/dt-bindings/leds/common.h
index 52b619d44ba2..4f017bea0123 100644
--- a/include/dt-bindings/leds/common.h
+++ b/include/dt-bindings/leds/common.h
@@ -33,7 +33,12 @@
#define LED_COLOR_ID_MULTI 8 /* For multicolor LEDs */
#define LED_COLOR_ID_RGB 9 /* For multicolor LEDs that can do arbitrary color,
so this would include RGBW and similar */
-#define LED_COLOR_ID_MAX 10
+#define LED_COLOR_ID_PURPLE 10
+#define LED_COLOR_ID_ORANGE 11
+#define LED_COLOR_ID_PINK 12
+#define LED_COLOR_ID_CYAN 13
+#define LED_COLOR_ID_LIME 14
+#define LED_COLOR_ID_MAX 15
/* Standard LED functions */
/* Keyboard LEDs, usually it would be input4::capslock etc. */
@@ -41,6 +46,7 @@
#define LED_FUNCTION_CAPSLOCK "capslock"
#define LED_FUNCTION_SCROLLLOCK "scrolllock"
#define LED_FUNCTION_NUMLOCK "numlock"
+#define LED_FUNCTION_FNLOCK "fnlock"
/* Obsolete equivalents: "tpacpi::thinklight" (IBM/Lenovo Thinkpads),
"lp5523:kb{1,2,3,4,5,6}" (Nokia N900) */
#define LED_FUNCTION_KBD_BACKLIGHT "kbd_backlight"
@@ -60,6 +66,13 @@
#define LED_FUNCTION_MICMUTE "micmute"
#define LED_FUNCTION_MUTE "mute"
+/* Used for player LEDs as found on game controllers from e.g. Nintendo, Sony. */
+#define LED_FUNCTION_PLAYER1 "player-1"
+#define LED_FUNCTION_PLAYER2 "player-2"
+#define LED_FUNCTION_PLAYER3 "player-3"
+#define LED_FUNCTION_PLAYER4 "player-4"
+#define LED_FUNCTION_PLAYER5 "player-5"
+
/* Miscelleaus functions. Use functions above if you can. */
#define LED_FUNCTION_ACTIVITY "activity"
#define LED_FUNCTION_ALARM "alarm"
@@ -78,17 +91,24 @@
#define LED_FUNCTION_INDICATOR "indicator"
#define LED_FUNCTION_LAN "lan"
#define LED_FUNCTION_MAIL "mail"
+#define LED_FUNCTION_MOBILE "mobile"
#define LED_FUNCTION_MTD "mtd"
#define LED_FUNCTION_PANIC "panic"
#define LED_FUNCTION_PROGRAMMING "programming"
#define LED_FUNCTION_RX "rx"
#define LED_FUNCTION_SD "sd"
+#define LED_FUNCTION_SPEED_LAN "speed-lan"
+#define LED_FUNCTION_SPEED_WAN "speed-wan"
#define LED_FUNCTION_STANDBY "standby"
#define LED_FUNCTION_TORCH "torch"
#define LED_FUNCTION_TX "tx"
#define LED_FUNCTION_USB "usb"
#define LED_FUNCTION_WAN "wan"
+#define LED_FUNCTION_WAN_ONLINE "wan-online"
#define LED_FUNCTION_WLAN "wlan"
+#define LED_FUNCTION_WLAN_2GHZ "wlan-2ghz"
+#define LED_FUNCTION_WLAN_5GHZ "wlan-5ghz"
+#define LED_FUNCTION_WLAN_6GHZ "wlan-6ghz"
#define LED_FUNCTION_WPS "wps"
#endif /* __DT_BINDINGS_LEDS_H */
diff --git a/include/dt-bindings/leds/leds-lp55xx.h b/include/dt-bindings/leds/leds-lp55xx.h
new file mode 100644
index 000000000000..a4fb4567715d
--- /dev/null
+++ b/include/dt-bindings/leds/leds-lp55xx.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+#ifndef _DT_BINDINGS_LEDS_LP55XX_H
+#define _DT_BINDINGS_LEDS_LP55XX_H
+
+#define LP55XX_CP_OFF 0
+#define LP55XX_CP_BYPASS 1
+#define LP55XX_CP_BOOST 2
+#define LP55XX_CP_AUTO 3
+
+#endif /* _DT_BINDINGS_LEDS_LP55XX_H */
diff --git a/include/dt-bindings/leds/rt4831-backlight.h b/include/dt-bindings/leds/rt4831-backlight.h
new file mode 100644
index 000000000000..125c6351bba0
--- /dev/null
+++ b/include/dt-bindings/leds/rt4831-backlight.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * This header provides constants for rt4831 backlight bindings.
+ *
+ * Copyright (C) 2020, Richtek Technology Corp.
+ * Author: ChiYuan Huang <cy_huang@richtek.com>
+ */
+
+#ifndef _DT_BINDINGS_RT4831_BACKLIGHT_H
+#define _DT_BINDINGS_RT4831_BACKLIGHT_H
+
+#define RT4831_BLOVPLVL_17V 0
+#define RT4831_BLOVPLVL_21V 1
+#define RT4831_BLOVPLVL_25V 2
+#define RT4831_BLOVPLVL_29V 3
+
+#define RT4831_BLED_CH1EN (1 << 0)
+#define RT4831_BLED_CH2EN (1 << 1)
+#define RT4831_BLED_CH3EN (1 << 2)
+#define RT4831_BLED_CH4EN (1 << 3)
+#define RT4831_BLED_ALLCHEN ((1 << 4) - 1)
+
+#endif /* _DT_BINDINGS_RT4831_BACKLIGHT_H */
diff --git a/include/dt-bindings/mailbox/mediatek,mt8188-gce.h b/include/dt-bindings/mailbox/mediatek,mt8188-gce.h
new file mode 100644
index 000000000000..119865787b47
--- /dev/null
+++ b/include/dt-bindings/mailbox/mediatek,mt8188-gce.h
@@ -0,0 +1,967 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ *
+ */
+#ifndef _DT_BINDINGS_GCE_MT8188_H
+#define _DT_BINDINGS_GCE_MT8188_H
+
+#define CMDQ_THR_PRIO_LOWEST 0
+#define CMDQ_THR_PRIO_1 1
+#define CMDQ_THR_PRIO_2 2
+#define CMDQ_THR_PRIO_3 3
+#define CMDQ_THR_PRIO_4 4
+#define CMDQ_THR_PRIO_5 5
+#define CMDQ_THR_PRIO_6 6
+#define CMDQ_THR_PRIO_HIGHEST 7
+
+#define SUBSYS_1400XXXX 0
+#define SUBSYS_1401XXXX 1
+#define SUBSYS_1402XXXX 2
+#define SUBSYS_1c00XXXX 3
+#define SUBSYS_1c01XXXX 4
+#define SUBSYS_1c02XXXX 5
+#define SUBSYS_1c10XXXX 6
+#define SUBSYS_1c11XXXX 7
+#define SUBSYS_1c12XXXX 8
+#define SUBSYS_14f0XXXX 9
+#define SUBSYS_14f1XXXX 10
+#define SUBSYS_14f2XXXX 11
+#define SUBSYS_1800XXXX 12
+#define SUBSYS_1801XXXX 13
+#define SUBSYS_1802XXXX 14
+#define SUBSYS_1803XXXX 15
+#define SUBSYS_1032XXXX 16
+#define SUBSYS_1033XXXX 17
+#define SUBSYS_1600XXXX 18
+#define SUBSYS_1601XXXX 19
+#define SUBSYS_14e0XXXX 20
+#define SUBSYS_1c20XXXX 21
+#define SUBSYS_1c30XXXX 22
+#define SUBSYS_1c40XXXX 23
+#define SUBSYS_1c50XXXX 24
+#define SUBSYS_1c60XXXX 25
+#define SUBSYS_NO_SUPPORT 99
+
+#define CMDQ_EVENT_IMG_SOF 0
+#define CMDQ_EVENT_IMG_TRAW0_CQ_THR_DONE_0 1
+#define CMDQ_EVENT_IMG_TRAW0_CQ_THR_DONE_1 2
+#define CMDQ_EVENT_IMG_TRAW0_CQ_THR_DONE_2 3
+#define CMDQ_EVENT_IMG_TRAW0_CQ_THR_DONE_3 4
+#define CMDQ_EVENT_IMG_TRAW0_CQ_THR_DONE_4 5
+#define CMDQ_EVENT_IMG_TRAW0_CQ_THR_DONE_5 6
+#define CMDQ_EVENT_IMG_TRAW0_CQ_THR_DONE_6 7
+#define CMDQ_EVENT_IMG_TRAW0_CQ_THR_DONE_7 8
+#define CMDQ_EVENT_IMG_TRAW0_CQ_THR_DONE_8 9
+#define CMDQ_EVENT_IMG_TRAW0_CQ_THR_DONE_9 10
+#define CMDQ_EVENT_IMG_TRAW0_DMA_ERROR_INT 11
+#define CMDQ_EVENT_IMG_TRAW1_CQ_THR_DONE_0 12
+#define CMDQ_EVENT_IMG_TRAW1_CQ_THR_DONE_1 13
+#define CMDQ_EVENT_IMG_TRAW1_CQ_THR_DONE_2 14
+#define CMDQ_EVENT_IMG_TRAW1_CQ_THR_DONE_3 15
+#define CMDQ_EVENT_IMG_TRAW1_CQ_THR_DONE_4 16
+#define CMDQ_EVENT_IMG_TRAW1_CQ_THR_DONE_5 17
+#define CMDQ_EVENT_IMG_TRAW1_CQ_THR_DONE_6 18
+#define CMDQ_EVENT_IMG_TRAW1_CQ_THR_DONE_7 19
+#define CMDQ_EVENT_IMG_TRAW1_CQ_THR_DONE_8 20
+#define CMDQ_EVENT_IMG_TRAW1_CQ_THR_DONE_9 21
+#define CMDQ_EVENT_IMG_TRAW1_DMA_ERROR_INT 22
+#define CMDQ_EVENT_IMG_ADL_RESERVED 23
+#define CMDQ_EVENT_IMG_DIP_CQ_THR_DONE_0 24
+#define CMDQ_EVENT_IMG_DIP_CQ_THR_DONE_1 25
+#define CMDQ_EVENT_IMG_DIP_CQ_THR_DONE_2 26
+#define CMDQ_EVENT_IMG_DIP_CQ_THR_DONE_3 27
+#define CMDQ_EVENT_IMG_DIP_CQ_THR_DONE_4 28
+#define CMDQ_EVENT_IMG_DIP_CQ_THR_DONE_5 29
+#define CMDQ_EVENT_IMG_DIP_CQ_THR_DONE_6 30
+#define CMDQ_EVENT_IMG_DIP_CQ_THR_DONE_7 31
+#define CMDQ_EVENT_IMG_DIP_CQ_THR_DONE_8 32
+#define CMDQ_EVENT_IMG_DIP_CQ_THR_DONE_9 33
+#define CMDQ_EVENT_IMG_DIP_DMA_ERR 34
+#define CMDQ_EVENT_IMG_DIP_NR_DMA_ERR 35
+#define CMDQ_EVENT_DIP_DUMMY_0 36
+#define CMDQ_EVENT_DIP_DUMMY_1 37
+#define CMDQ_EVENT_DIP_DUMMY_2 38
+#define CMDQ_EVENT_IMG_WPE_EIS_GCE_FRAME_DONE 39
+#define CMDQ_EVENT_IMG_WPE_EIS_DONE_SYNC_OUT 40
+#define CMDQ_EVENT_IMG_WPE_EIS_CQ_THR_DONE_0 41
+#define CMDQ_EVENT_IMG_WPE_EIS_CQ_THR_DONE_1 42
+#define CMDQ_EVENT_IMG_WPE_EIS_CQ_THR_DONE_2 43
+#define CMDQ_EVENT_IMG_WPE_EIS_CQ_THR_DONE_3 44
+#define CMDQ_EVENT_IMG_WPE_EIS_CQ_THR_DONE_4 45
+#define CMDQ_EVENT_IMG_WPE_EIS_CQ_THR_DONE_5 46
+#define CMDQ_EVENT_IMG_WPE_EIS_CQ_THR_DONE_6 47
+#define CMDQ_EVENT_IMG_WPE_EIS_CQ_THR_DONE_7 48
+#define CMDQ_EVENT_IMG_WPE_EIS_CQ_THR_DONE_8 49
+#define CMDQ_EVENT_IMG_WPE_EIS_CQ_THR_DONE_9 50
+#define CMDQ_EVENT_IMG_PQDIP_A_CQ_THR_DONE_0 51
+#define CMDQ_EVENT_IMG_PQDIP_A_CQ_THR_DONE_1 52
+#define CMDQ_EVENT_IMG_PQDIP_A_CQ_THR_DONE_2 53
+#define CMDQ_EVENT_IMG_PQDIP_A_CQ_THR_DONE_3 54
+#define CMDQ_EVENT_IMG_PQDIP_A_CQ_THR_DONE_4 55
+#define CMDQ_EVENT_IMG_PQDIP_A_CQ_THR_DONE_5 56
+#define CMDQ_EVENT_IMG_PQDIP_A_CQ_THR_DONE_6 57
+#define CMDQ_EVENT_IMG_PQDIP_A_CQ_THR_DONE_7 58
+#define CMDQ_EVENT_IMG_PQDIP_A_CQ_THR_DONE_8 59
+#define CMDQ_EVENT_IMG_PQDIP_A_CQ_THR_DONE_9 60
+#define CMDQ_EVENT_IMG_PQDIP_A_DMA_ERR 61
+#define CMDQ_EVENT_WPE0_DUMMY_0 62
+#define CMDQ_EVENT_WPE0_DUMMY_1 63
+#define CMDQ_EVENT_WPE0_DUMMY_2 64
+#define CMDQ_EVENT_IMG_WPE_TNR_GCE_FRAME_DONE 65
+#define CMDQ_EVENT_IMG_WPE_TNR_DONE_SYNC_OUT 66
+#define CMDQ_EVENT_IMG_WPE_TNR_CQ_THR_DONE_0 67
+#define CMDQ_EVENT_IMG_WPE_TNR_CQ_THR_DONE_1 68
+#define CMDQ_EVENT_IMG_WPE_TNR_CQ_THR_DONE_2 69
+#define CMDQ_EVENT_IMG_WPE_TNR_CQ_THR_DONE_3 70
+#define CMDQ_EVENT_IMG_WPE_TNR_CQ_THR_DONE_4 71
+#define CMDQ_EVENT_IMG_WPE_TNR_CQ_THR_DONE_5 72
+#define CMDQ_EVENT_IMG_WPE_TNR_CQ_THR_DONE_6 73
+#define CMDQ_EVENT_IMG_WPE_TNR_CQ_THR_DONE_7 74
+#define CMDQ_EVENT_IMG_WPE_TNR_CQ_THR_DONE_8 75
+#define CMDQ_EVENT_IMG_WPE_TNR_CQ_THR_DONE_9 76
+#define CMDQ_EVENT_IMG_PQDIP_B_CQ_THR_DONE_0 77
+#define CMDQ_EVENT_IMG_PQDIP_B_CQ_THR_DONE_1 78
+#define CMDQ_EVENT_IMG_PQDIP_B_CQ_THR_DONE_2 79
+#define CMDQ_EVENT_IMG_PQDIP_B_CQ_THR_DONE_3 80
+#define CMDQ_EVENT_IMG_PQDIP_B_CQ_THR_DONE_4 81
+#define CMDQ_EVENT_IMG_PQDIP_B_CQ_THR_DONE_5 82
+#define CMDQ_EVENT_IMG_PQDIP_B_CQ_THR_DONE_6 83
+#define CMDQ_EVENT_IMG_PQDIP_B_CQ_THR_DONE_7 84
+#define CMDQ_EVENT_IMG_PQDIP_B_CQ_THR_DONE_8 85
+#define CMDQ_EVENT_IMG_PQDIP_B_CQ_THR_DONE_9 86
+#define CMDQ_EVENT_IMG_PQDIP_B_DMA_ERR 87
+#define CMDQ_EVENT_WPE1_DUMMY_0 88
+#define CMDQ_EVENT_WPE1_DUMMY_1 89
+#define CMDQ_EVENT_WPE1_DUMMY_2 90
+#define CMDQ_EVENT_IMG_WPE_LITE_GCE_FRAME_DONE 91
+#define CMDQ_EVENT_IMG_WPE_LITE_DONE_SYNC_OUT 92
+#define CMDQ_EVENT_IMG_WPE_LITE_CQ_THR_DONE_0 93
+#define CMDQ_EVENT_IMG_WPE_LITE_CQ_THR_DONE_1 94
+#define CMDQ_EVENT_IMG_WPE_LITE_CQ_THR_DONE_2 95
+#define CMDQ_EVENT_IMG_WPE_LITE_CQ_THR_DONE_3 96
+#define CMDQ_EVENT_IMG_WPE_LITE_CQ_THR_DONE_4 97
+#define CMDQ_EVENT_IMG_WPE_LITE_CQ_THR_DONE_5 98
+#define CMDQ_EVENT_IMG_WPE_LITE_CQ_THR_DONE_6 99
+#define CMDQ_EVENT_IMG_WPE_LITE_CQ_THR_DONE_7 100
+#define CMDQ_EVENT_IMG_WPE_LITE_CQ_THR_DONE_8 101
+#define CMDQ_EVENT_IMG_WPE_LITE_CQ_THR_DONE_9 102
+#define CMDQ_EVENT_IMG_XTRAW_CQ_THR_DONE_0 103
+#define CMDQ_EVENT_IMG_XTRAW_CQ_THR_DONE_1 104
+#define CMDQ_EVENT_IMG_XTRAW_CQ_THR_DONE_2 105
+#define CMDQ_EVENT_IMG_XTRAW_CQ_THR_DONE_3 106
+#define CMDQ_EVENT_IMG_XTRAW_CQ_THR_DONE_4 107
+#define CMDQ_EVENT_IMG_XTRAW_CQ_THR_DONE_5 108
+#define CMDQ_EVENT_IMG_XTRAW_CQ_THR_DONE_6 109
+#define CMDQ_EVENT_IMG_XTRAW_CQ_THR_DONE_7 110
+#define CMDQ_EVENT_IMG_XTRAW_CQ_THR_DONE_8 111
+#define CMDQ_EVENT_IMG_XTRAW_CQ_THR_DONE_9 112
+#define CMDQ_EVENT_IMG_XTRAW_DMA_ERR_EVENT 113
+#define CMDQ_EVENT_WPE2_DUMMY_0 114
+#define CMDQ_EVENT_WPE2_DUMMY_1 115
+#define CMDQ_EVENT_WPE2_DUMMY_2 116
+#define CMDQ_EVENT_IMG_IMGSYS_IPE_DUMMY 117
+#define CMDQ_EVENT_IMG_IMGSYS_IPE_FDVT_DONE 118
+#define CMDQ_EVENT_IMG_IMGSYS_IPE_ME_DONE 119
+#define CMDQ_EVENT_IMG_IMGSYS_IPE_DVS_DONE 120
+#define CMDQ_EVENT_IMG_IMGSYS_IPE_DVP_DONE 121
+#define CMDQ_EVENT_FDVT1_RESERVED 122
+#define CMDQ_EVENT_IMG_ENG_EVENT 123
+#define CMDQ_EVENT_CAMSUBA_SW_PASS1_DONE 129
+#define CMDQ_EVENT_CAMSUBB_SW_PASS1_DONE 130
+#define CMDQ_EVENT_CAMSUBC_SW_PASS1_DONE 131
+#define CMDQ_EVENT_GCAMSV_A_1_SW_PASS1_DONE 132
+#define CMDQ_EVENT_GCAMSV_A_2_SW_PASS1_DONE 133
+#define CMDQ_EVENT_GCAMSV_B_1_SW_PASS1_DONE 134
+#define CMDQ_EVENT_GCAMSV_B_2_SW_PASS1_DONE 135
+#define CMDQ_EVENT_GCAMSV_C_1_SW_PASS1_DONE 136
+#define CMDQ_EVENT_GCAMSV_C_2_SW_PASS1_DONE 137
+#define CMDQ_EVENT_GCAMSV_D_1_SW_PASS1_DONE 138
+#define CMDQ_EVENT_GCAMSV_D_2_SW_PASS1_DONE 139
+#define CMDQ_EVENT_GCAMSV_E_1_SW_PASS1_DONE 140
+#define CMDQ_EVENT_GCAMSV_E_2_SW_PASS1_DONE 141
+#define CMDQ_EVENT_GCAMSV_F_1_SW_PASS1_DONE 142
+#define CMDQ_EVENT_GCAMSV_F_2_SW_PASS1_DONE 143
+#define CMDQ_EVENT_GCAMSV_G_1_SW_PASS1_DONE 144
+#define CMDQ_EVENT_GCAMSV_G_2_SW_PASS1_DONE 145
+#define CMDQ_EVENT_GCAMSV_H_1_SW_PASS1_DONE 146
+#define CMDQ_EVENT_GCAMSV_H_2_SW_PASS1_DONE 147
+#define CMDQ_EVENT_GCAMSV_I_1_SW_PASS1_DONE 148
+#define CMDQ_EVENT_GCAMSV_I_2_SW_PASS1_DONE 149
+#define CMDQ_EVENT_GCAMSV_J_1_SW_PASS1_DONE 150
+#define CMDQ_EVENT_GCAMSV_J_2_SW_PASS1_DONE 151
+#define CMDQ_EVENT_MRAW_0_SW_PASS1_DONE 152
+#define CMDQ_EVENT_MRAW_1_SW_PASS1_DONE 153
+#define CMDQ_EVENT_MRAW_2_SW_PASS1_DONE 154
+#define CMDQ_EVENT_MRAW_3_SW_PASS1_DONE 155
+#define CMDQ_EVENT_SENINF_CAM0_FIFO_FULL 156
+#define CMDQ_EVENT_SENINF_CAM1_FIFO_FULL 157
+#define CMDQ_EVENT_SENINF_CAM2_FIFO_FULL 158
+#define CMDQ_EVENT_SENINF_CAM3_FIFO_FULL 159
+#define CMDQ_EVENT_SENINF_CAM4_FIFO_FULL 160
+#define CMDQ_EVENT_SENINF_CAM5_FIFO_FULL 161
+#define CMDQ_EVENT_SENINF_CAM6_FIFO_FULL 162
+#define CMDQ_EVENT_SENINF_CAM7_FIFO_FULL 163
+#define CMDQ_EVENT_SENINF_CAM8_FIFO_FULL 164
+#define CMDQ_EVENT_SENINF_CAM9_FIFO_FULL 165
+#define CMDQ_EVENT_SENINF_CAM10_FIFO_FULL 166
+#define CMDQ_EVENT_SENINF_CAM11_FIFO_FULL 167
+#define CMDQ_EVENT_SENINF_CAM12_FIFO_FULL 168
+#define CMDQ_EVENT_SENINF_CAM13_FIFO_FULL 169
+#define CMDQ_EVENT_SENINF_CAM14_FIFO_FULL 170
+#define CMDQ_EVENT_SENINF_CAM15_FIFO_FULL 171
+#define CMDQ_EVENT_SENINF_CAM16_FIFO_FULL 172
+#define CMDQ_EVENT_SENINF_CAM17_FIFO_FULL 173
+#define CMDQ_EVENT_SENINF_CAM18_FIFO_FULL 174
+#define CMDQ_EVENT_SENINF_CAM19_FIFO_FULL 175
+#define CMDQ_EVENT_SENINF_CAM20_FIFO_FULL 176
+#define CMDQ_EVENT_SENINF_CAM21_FIFO_FULL 177
+#define CMDQ_EVENT_SENINF_CAM22_FIFO_FULL 178
+#define CMDQ_EVENT_SENINF_CAM23_FIFO_FULL 179
+#define CMDQ_EVENT_SENINF_CAM24_FIFO_FULL 180
+#define CMDQ_EVENT_SENINF_CAM25_FIFO_FULL 181
+#define CMDQ_EVENT_SENINF_CAM26_FIFO_FULL 182
+#define CMDQ_EVENT_TG_OVRUN_MRAW0_INT 183
+#define CMDQ_EVENT_TG_OVRUN_MRAW1_INT 184
+#define CMDQ_EVENT_TG_OVRUN_MRAW2_INT 185
+#define CMDQ_EVENT_TG_OVRUN_MRAW3_INT 186
+#define CMDQ_EVENT_DMA_R1_ERROR_MRAW0_INT 187
+#define CMDQ_EVENT_DMA_R1_ERROR_MRAW1_INT 188
+#define CMDQ_EVENT_DMA_R1_ERROR_MRAW2_INT 189
+#define CMDQ_EVENT_DMA_R1_ERROR_MRAW3_INT 190
+#define CMDQ_EVENT_PDA0_IRQO_EVENT_DONE_D1 191
+#define CMDQ_EVENT_PDA1_IRQO_EVENT_DONE_D1 192
+#define CMDQ_EVENT_CAM_SUBA_TG_INT1 193
+#define CMDQ_EVENT_CAM_SUBA_TG_INT2 194
+#define CMDQ_EVENT_CAM_SUBA_TG_INT3 195
+#define CMDQ_EVENT_CAM_SUBA_TG_INT4 196
+#define CMDQ_EVENT_CAM_SUBB_TG_INT1 197
+#define CMDQ_EVENT_CAM_SUBB_TG_INT2 198
+#define CMDQ_EVENT_CAM_SUBB_TG_INT3 199
+#define CMDQ_EVENT_CAM_SUBB_TG_INT4 200
+#define CMDQ_EVENT_CAM_SUBC_TG_INT1 201
+#define CMDQ_EVENT_CAM_SUBC_TG_INT2 202
+#define CMDQ_EVENT_CAM_SUBC_TG_INT3 203
+#define CMDQ_EVENT_CAM_SUBC_TG_INT4 204
+#define CMDQ_EVENT_CAM_SUBA_IMGO_R1_LOW_LATENCY_LINE_CNT_INT 205
+#define CMDQ_EVENT_CAM_SUBA_YUVO_R1_LOW_LATENCY_LINE_CNT_INT 206
+#define CMDQ_EVENT_CAM_SUBA_YUVO_R3_LOW_LATENCY_LINE_CNT_INT 207
+#define CMDQ_EVENT_CAM_SUBA_DRZS4NO_R1_LOW_LATENCY_LINE_CNT_INT 208
+#define CMDQ_EVENT_CAM_SUBB_IMGO_R1_LOW_LATENCY_LINE_CNT_INT 209
+#define CMDQ_EVENT_CAM_SUBB_YUVO_R1_LOW_LATENCY_LINE_CNT_INT 210
+#define CMDQ_EVENT_CAM_SUBB_YUVO_R3_LOW_LATENCY_LINE_CNT_INT 211
+#define CMDQ_EVENT_CAM_SUBB_DRZS4NO_R1_LOW_LATENCY_LINE_CNT_INT 212
+#define CMDQ_EVENT_CAM_SUBC_IMGO_R1_LOW_LATENCY_LINE_CNT_INT 213
+#define CMDQ_EVENT_CAM_SUBC_YUVO_R1_LOW_LATENCY_LINE_CNT_INT 214
+#define CMDQ_EVENT_CAM_SUBC_YUVO_R3_LOW_LATENCY_LINE_CNT_INT 215
+#define CMDQ_EVENT_CAM_SUBC_DRZS4NO_R1_LOW_LATENCY_LINE_CNT_INT 216
+#define CMDQ_EVENT_RAW_SEL_SOF_SUBA 217
+#define CMDQ_EVENT_RAW_SEL_SOF_SUBB 218
+#define CMDQ_EVENT_RAW_SEL_SOF_SUBC 219
+#define CMDQ_EVENT_CAM_SUBA_RING_BUFFER_OVERFLOW_INT_IN 220
+#define CMDQ_EVENT_CAM_SUBB_RING_BUFFER_OVERFLOW_INT_IN 221
+#define CMDQ_EVENT_CAM_SUBC_RING_BUFFER_OVERFLOW_INT_IN 222
+#define CMDQ_EVENT_VPP0_MDP_RDMA_SOF 256
+#define CMDQ_EVENT_VPP0_MDP_FG_SOF 257
+#define CMDQ_EVENT_VPP0_STITCH_SOF 258
+#define CMDQ_EVENT_VPP0_MDP_HDR_SOF 259
+#define CMDQ_EVENT_VPP0_MDP_AAL_SOF 260
+#define CMDQ_EVENT_VPP0_MDP_RSZ_IN_RSZ_SOF 261
+#define CMDQ_EVENT_VPP0_MDP_TDSHP_SOF 262
+#define CMDQ_EVENT_VPP0_DISP_COLOR_SOF 263
+#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_SOF 264
+#define CMDQ_EVENT_VPP0_VPP_PADDING_IN_PADDING_SOF 265
+#define CMDQ_EVENT_VPP0_MDP_TCC_IN_SOF 266
+#define CMDQ_EVENT_VPP0_MDP_WROT_SOF 267
+#define CMDQ_EVENT_VPP0_WARP0_MMSYS_TOP_RELAY_SOF_PRE 269
+#define CMDQ_EVENT_VPP0_WARP1_MMSYS_TOP_RELAY_SOF_PRE 270
+#define CMDQ_EVENT_VPP0_VPP1_MMSYS_TOP_RELAY_SOF 271
+#define CMDQ_EVENT_VPP0_VPP1_IN_MMSYS_TOP_RELAY_SOF_PRE 272
+#define CMDQ_EVENT_VPP0_DISP_RDMA_SOF 273
+#define CMDQ_EVENT_VPP0_DISP_WDMA_SOF 274
+#define CMDQ_EVENT_VPP0_MDP_HMS_SOF 275
+#define CMDQ_EVENT_VPP0_MDP_RDMA_FRAME_DONE 288
+#define CMDQ_EVENT_VPP0_MDP_FG_TILE_DONE 289
+#define CMDQ_EVENT_VPP0_STITCH_FRAME_DONE 290
+#define CMDQ_EVENT_VPP0_MDP_HDR_FRAME_DONE 291
+#define CMDQ_EVENT_VPP0_MDP_AAL_FRAME_DONE 292
+#define CMDQ_EVENT_VPP0_MDP_RSZ_FRAME_DONE 293
+#define CMDQ_EVENT_VPP0_MDP_TDSHP_FRAME_DONE 294
+#define CMDQ_EVENT_VPP0_DISP_COLOR_FRAME_DONE 295
+#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_FRAME_DONE 296
+#define CMDQ_EVENT_VPP0_VPP_PADDING_IN_PADDING_FRAME_DONE 297
+#define CMDQ_EVENT_VPP0_MDP_TCC_TCC_FRAME_DONE 298
+#define CMDQ_EVENT_VPP0_MDP_WROT_VIDO_WDONE 299
+#define CMDQ_EVENT_VPP0_DISP_RDMA_FRAME_DONE 305
+#define CMDQ_EVENT_VPP0_DISP_WDMA_FRAME_DONE 306
+#define CMDQ_EVENT_VPP0_MDP_HMS_FRAME_DONE 307
+#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_0 320
+#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_1 321
+#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_2 322
+#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_3 323
+#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_4 324
+#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_5 325
+#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_6 326
+#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_7 327
+#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_8 328
+#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_9 329
+#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_10 330
+#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_11 331
+#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_12 332
+#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_13 333
+#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_14 334
+#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_15 335
+#define CMDQ_EVENT_VPP0_DISP_RDMA_0_UNDERRUN 336
+#define CMDQ_EVENT_VPP0_DISP_RDMA_1_UNDERRUN 337
+#define CMDQ_EVENT_VPP0_U_MERGE4_UNDERRUN 338
+#define CMDQ_EVENT_VPP0_U_VPP_SPLIT_VIDEO_0_OVERFLOW 339
+#define CMDQ_EVENT_VPP0_U_VPP_SPLIT_VIDEO_1_OVERFLOW 340
+#define CMDQ_EVENT_VPP0_DSI_0_UNDERRUN 341
+#define CMDQ_EVENT_VPP0_DSI_1_UNDERRUN 342
+#define CMDQ_EVENT_VPP0_DP_INTF_0 343
+#define CMDQ_EVENT_VPP0_DP_INTF_1 344
+#define CMDQ_EVENT_VPP0_DPI_0 345
+#define CMDQ_EVENT_VPP0_DPI_1 346
+#define CMDQ_EVENT_VPP0_MDP_RDMA_SW_RST_DONE 352
+#define CMDQ_EVENT_VPP0_MDP_RDMA_PM_VALID_EVENT 353
+#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_FRAME_RESET_DONE_PULSE 354
+#define CMDQ_EVENT_VPP0_MDP_WROT_SW_RST_DONE 355
+#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_TARGET_MATCH_0 356
+#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_TARGET_MATCH_1 357
+#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_TARGET_MATCH_2 358
+#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_TARGET_MATCH_3 359
+#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_TARGET_MATCH_4 360
+#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_TARGET_MATCH_5 361
+#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_TARGET_MATCH_6 362
+#define CMDQ_EVENT_VPP0_DISP_RDMA_DISP_RDMA_VALID_EVENT 363
+#define CMDQ_EVENT_VPP0_DISP_RDMA_DISP_RDMA_TARGET_LINE_EVENT 364
+#define CMDQ_EVENT_VPP0_DISP_WDMA_SW_RST_DONE 365
+#define CMDQ_EVENT_VPP0_DISP_WDMA_WDMA_VALID_EVENT 366
+#define CMDQ_EVENT_VPP0_DISP_WDMA_WDMA_TARGET_LINE_EVENT 367
+#define CMDQ_EVENT_VPP1_HDMI_META_SOF 384
+#define CMDQ_EVENT_VPP1_DGI_SOF 385
+#define CMDQ_EVENT_VPP1_VPP_SPLIT_SOF 386
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_TCC_SOF 387
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_RDMA_SOF 388
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_RDMA_SOF 389
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_RDMA_SOF 390
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_FG_SOF 391
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_FG_SOF 392
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_FG_SOF 393
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_HDR_SOF 394
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_HDR_SOF 395
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_HDR_SOF 396
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_AAL_SOF 397
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_AAL_SOF 398
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_AAL_SOF 399
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_RSZ_SOF 400
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_RSZ_SOF 401
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_RSZ_SOF 402
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_TDSHP_SOF 403
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_TDSHP_SOF 404
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_TDSHP_SOF 405
+#define CMDQ_EVENT_VPP1_SVPP2_VPP_MERGE_SOF 406
+#define CMDQ_EVENT_VPP1_SVPP3_VPP_MERGE_SOF 407
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_COLOR_SOF 408
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_COLOR_SOF 409
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_COLOR_SOF 410
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_OVL_SOF 411
+#define CMDQ_EVENT_VPP1_SVPP1_VPP_PAD_SOF 412
+#define CMDQ_EVENT_VPP1_SVPP2_VPP_PAD_SOF 413
+#define CMDQ_EVENT_VPP1_SVPP3_VPP_PAD_SOF 414
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_WROT_SOF 415
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_WROT_SOF 416
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_WROT_SOF 417
+#define CMDQ_EVENT_VPP1_VPP0_DL_IRLY_SOF 418
+#define CMDQ_EVENT_VPP1_VPP0_DL_ORLY_SOF 419
+#define CMDQ_EVENT_VPP1_VDO0_DL_ORLY_0_SOF 420
+#define CMDQ_EVENT_VPP1_VDO0_DL_ORLY_1_SOF 421
+#define CMDQ_EVENT_VPP1_VDO1_DL_ORLY_0_SOF 422
+#define CMDQ_EVENT_VPP1_VDO1_DL_ORLY_1_SOF 423
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_RDMA_FRAME_DONE 424
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_RDMA_FRAME_DONE 425
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_RDMA_FRAME_DONE 426
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_WROT_FRAME_DONE 427
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_WROT_FRAME_DONE 428
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_WROT_FRAME_DONE 429
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_OVL_FRAME_DONE 430
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_RSZ_FRAME_DONE 431
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_RSZ_FRAME_DONE 432
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_RSZ_FRAME_DONE 433
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_FG_TILE_DONE 434
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_FG_TILE_DONE 435
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_FG_TILE_DONE 436
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_HDR_FRAME_DONE 437
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_HDR_FRAME_DONE 438
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_HDR_FRAME_DONE 439
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_AAL_FRAME_DONE 440
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_AAL_FRAME_DONE 441
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_AAL_FRAME_DONE 442
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_TDSHP_FRAME_DONE 443
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_TDSHP_FRAME_DONE 444
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_TDSHP_FRAME_DONE 445
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_COLOR_FRAME_DONE 446
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_COLOR_FRAME_DONE 447
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_COLOR_FRAME_DONE 448
+#define CMDQ_EVENT_VPP1_SVPP1_VPP_PAD_FRAME_DONE 449
+#define CMDQ_EVENT_VPP1_SVPP2_VPP_PAD_FRAME_DONE 450
+#define CMDQ_EVENT_VPP1_SVPP3_VPP_PAD_FRAME_DONE 451
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_TCC_FRAME_DONE 452
+#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_0 456
+#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_1 457
+#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_2 458
+#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_3 459
+#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_4 460
+#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_5 461
+#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_6 462
+#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_7 463
+#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_8 464
+#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_9 465
+#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_10 466
+#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_11 467
+#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_12 468
+#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_13 469
+#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_14 470
+#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_15 471
+#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_0 472
+#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_1 473
+#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_2 474
+#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_3 475
+#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_4 476
+#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_5 477
+#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_6 478
+#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_7 479
+#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_8 480
+#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_9 481
+#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_10 482
+#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_11 483
+#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_12 484
+#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_13 485
+#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_14 486
+#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_15 487
+#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_0 488
+#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_1 489
+#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_2 490
+#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_3 491
+#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_4 492
+#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_5 493
+#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_6 494
+#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_7 495
+#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_8 496
+#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_9 497
+#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_10 498
+#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_11 499
+#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_12 500
+#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_13 501
+#define CMDQ_EVENT_VPP1_SVPP3_VPP_MERGE_GCE_EVENT 502
+#define CMDQ_EVENT_VPP1_SVPP2_VPP_MERGE_GCE_EVENT 503
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_OVL_GCE_EVENT 504
+#define CMDQ_EVENT_VPP1_VPP_SPLIT_DGI_GCE_EVENT 505
+#define CMDQ_EVENT_VPP1_VPP_SPLIT_HDMI_GCE_EVENT 506
+#define CMDQ_EVENT_VPP1_SVPP3_MDP_WROT_SW_RST_DONE_GCE_EVENT 507
+#define CMDQ_EVENT_VPP1_SVPP2_MDP_WROT_SW_RST_DONE_GCE_EVENT 508
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_WROT_SW_RST_DONE_GCE_EVENT 509
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_OVL_NEW_EVENT_0 510
+#define CMDQ_EVENT_VPP1_SVPP1_MDP_OVL_NEW_EVENT_1 511
+#define CMDQ_EVENT_VDO0_DISP_OVL0_SOF 512
+#define CMDQ_EVENT_VDO0_DISP_WDMA0_SOF 513
+#define CMDQ_EVENT_VDO0_DISP_RDMA0_SOF 514
+#define CMDQ_EVENT_VDO0_DISP_COLOR0_SOF 515
+#define CMDQ_EVENT_VDO0_DISP_CCORR0_SOF 516
+#define CMDQ_EVENT_VDO0_DISP_AAL0_SOF 517
+#define CMDQ_EVENT_VDO0_DISP_GAMMA0_SOF 518
+#define CMDQ_EVENT_VDO0_DISP_DITHER0_SOF 519
+#define CMDQ_EVENT_VDO0_DSI0_SOF 520
+#define CMDQ_EVENT_VDO0_DSC_WRAP0C0_SOF 521
+#define CMDQ_EVENT_VDO0_DISP_OVL1_SOF 522
+#define CMDQ_EVENT_VDO0_DISP_WDMA1_SOF 523
+#define CMDQ_EVENT_VDO0_DISP_RDMA1_SOF 524
+#define CMDQ_EVENT_VDO0_DISP_COLOR1_SOF 525
+#define CMDQ_EVENT_VDO0_DISP_CCORR1_SOF 526
+#define CMDQ_EVENT_VDO0_DISP_AAL1_SOF 527
+#define CMDQ_EVENT_VDO0_DISP_GAMMA1_SOF 528
+#define CMDQ_EVENT_VDO0_DISP_DITHER1_SOF 529
+#define CMDQ_EVENT_VDO0_DSI1_SOF 530
+#define CMDQ_EVENT_VDO0_DSC_WRAP0C1_SOF 531
+#define CMDQ_EVENT_VDO0_VPP_MERGE0_SOF 532
+#define CMDQ_EVENT_VDO0_DP_INTF0_SOF 533
+#define CMDQ_EVENT_VDO0_DISP_DPI0_SOF 534
+#define CMDQ_EVENT_VDO0_DISP_DPI1_SOF 535
+#define CMDQ_EVENT_VDO0_DISP_POSTMASK0_SOF 536
+#define CMDQ_EVENT_VDO0_MDP_WROT0_SOF 537
+#define CMDQ_EVENT_VDO0_DISP_RSZ0_SOF 538
+#define CMDQ_EVENT_VDO0_VPP1_DL_RELAY0_SOF 539
+#define CMDQ_EVENT_VDO0_VPP1_DL_RELAY1_SOF 540
+#define CMDQ_EVENT_VDO0_VDO1_DL_RELAY2_SOF 541
+#define CMDQ_EVENT_VDO0_VDO0_DL_RELAY3_SOF 542
+#define CMDQ_EVENT_VDO0_VDO0_DL_RELAY4_SOF 543
+#define CMDQ_EVENT_VDO0_DISP_PWM0_SOF 544
+#define CMDQ_EVENT_VDO0_DISP_PWM1_SOF 545
+#define CMDQ_EVENT_VDO0_DISP_OVL0_O_FRAME_DONE 546
+#define CMDQ_EVENT_VDO0_DISP_WDMA0_FRAME_DONE 547
+#define CMDQ_EVENT_VDO0_DISP_RDMA0_FRAME_DONE 548
+#define CMDQ_EVENT_VDO0_DISP_COLOR0_O_FRAME_DONE 549
+#define CMDQ_EVENT_VDO0_DISP_CCORR0_O_FRAME_DONE 550
+#define CMDQ_EVENT_VDO0_DISP_AAL0_O_FRAME_DONE 551
+#define CMDQ_EVENT_VDO0_DISP_GAMMA0_O_FRAME_DONE 552
+#define CMDQ_EVENT_VDO0_DISP_DITHER0_O_FRAME_DONE 553
+#define CMDQ_EVENT_VDO0_DSI0_FRAME_DONE 554
+#define CMDQ_EVENT_VDO0_DSC_WRAP0_O_FRAME_DONE_0 555
+#define CMDQ_EVENT_VDO0_DISP_OVL1_O_FRAME_DONE 556
+#define CMDQ_EVENT_VDO0_DISP_WDMA1_O_FRAME_DONE 557
+#define CMDQ_EVENT_VDO0_DISP_RDMA1_O_FRAME_DONE 558
+#define CMDQ_EVENT_VDO0_DISP_COLOR1_O_FRAME_DONE 559
+#define CMDQ_EVENT_VDO0_DISP_CCORR1_O_FRAME_DONE 560
+#define CMDQ_EVENT_VDO0_DISP_AAL1_O_FRAME_DONE 561
+#define CMDQ_EVENT_VDO0_DISP_GAMMA1_O_FRAME_DONE 562
+#define CMDQ_EVENT_VDO0_DISP_DITHER1_O_FRAME_DONE 563
+#define CMDQ_EVENT_VDO0_DSI1_FRAME_DONE 564
+#define CMDQ_EVENT_VDO0_DSC_WRAP0_O_FRAME_DONE_1 565
+#define CMDQ_EVENT_VDO0_DP_INTF0_FRAME_DONE 567
+#define CMDQ_EVENT_VDO0_DISP_DPI0_O_FRAME_DONE 568
+#define CMDQ_EVENT_VDO0_DISP_DPI1_O_FRAME_DONE 569
+#define CMDQ_EVENT_VDO0_DISP_POSTMASK0_O_FRAME_DONE 570
+#define CMDQ_EVENT_VDO0_MDP_WROT0_O_FRAME_DONE 571
+#define CMDQ_EVENT_VDO0_DISP_RSZ0_O_FRAME_DONE 572
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_0 574
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_1 575
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_2 576
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_3 577
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_4 578
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_5 579
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_6 580
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_7 581
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_8 582
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_9 583
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_10 584
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_11 585
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_12 586
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_13 587
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_14 588
+#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_15 589
+#define CMDQ_EVENT_VDO0_DISP_RDMA_0_UNDERRUN 590
+#define CMDQ_EVENT_VDO0_DISP_RDMA_1_UNDERRUN 591
+#define CMDQ_EVENT_VDO0_U_MERGE4_UNDERRUN 592
+#define CMDQ_EVENT_VDO0_DSI_0_UNDERRUN 595
+#define CMDQ_EVENT_VDO0_DSI_1_UNDERRUN 596
+#define CMDQ_EVENT_VDO0_DP_INTF_0 597
+#define CMDQ_EVENT_VDO0_DP_INTF_1 598
+#define CMDQ_EVENT_VDO0_DPI_0 599
+#define CMDQ_EVENT_VDO0_DPI_1 600
+#define CMDQ_EVENT_VDO0_DISP_SMIASSERT_ENG_EVENT 606
+#define CMDQ_EVENT_VDO0_DSI0_O_DSI_IRQ_EVENT_MM 607
+#define CMDQ_EVENT_VDO0_DSI0_TE_ENG_EVENT_MM 608
+#define CMDQ_EVENT_VDO0_DSI0_O_DSI_DONE_EVENT_MM 609
+#define CMDQ_EVENT_VDO0_DSI0_O_DSI_VACTL_EVENT_MM 610
+#define CMDQ_EVENT_VDO0_DSI1_O_DSI_IRQ_EVENT_MM 611
+#define CMDQ_EVENT_VDO0_DSI1_TE_ENG_EVENT_MM 612
+#define CMDQ_EVENT_VDO0_DSI1_O_DSI_DONE_EVENT_MM 613
+#define CMDQ_EVENT_VDO0_DSI1_O_DSI_VACTL_EVENT_MM 614
+#define CMDQ_EVENT_VDO0_DP_INTF0_O_DP_VSYNC_START_EVENT_MM 615
+#define CMDQ_EVENT_VDO0_DP_INTF0_O_DP_VSYNC_END_EVENT_MM 616
+#define CMDQ_EVENT_VDO0_DP_INTF0_O_DP_VDE_START_EVENT_MM 617
+#define CMDQ_EVENT_VDO0_DP_INTF0_O_DP_VDE_END_EVENT_MM 618
+#define CMDQ_EVENT_VDO0_DP_INTF0_O_VACT_TARGET_LINE_EVENT_MM 619
+#define CMDQ_EVENT_VDO0_DP_INTF0_O_LAST_SAFE_BLANK_EVENT_MM 620
+#define CMDQ_EVENT_VDO0_DP_INTF0_O_LAST_LINE_EVENT_MM 621
+#define CMDQ_EVENT_VDO0_DP_INTF0_O_TRIGGER_LOOP_CLEAR_EVENT_MM 622
+#define CMDQ_EVENT_VDO0_DP_INTF0_O_TARGET_LINE_0_EVENT_MM 623
+#define CMDQ_EVENT_VDO0_DP_INTF0_O_TARGET_LINE_1_EVENT_MM 624
+#define CMDQ_EVENT_VDO0_DISP_POSTMASK0_O_FRAME_RESET_DONE_PULSE 625
+#define CMDQ_EVENT_VDO0_VPP_MERGE0_O_VPP_MERGE_EVENT 626
+#define CMDQ_EVENT_VDO0_DISP_OVL0_O_FRAME_RESET_DONE_PULSE 627
+#define CMDQ_EVENT_VDO0_DISP_RDMA0_O_DISP_RDMA_TARGET_LINE_EVENT 628
+#define CMDQ_EVENT_VDO0_DISP_WDMA0_O_WDMA_TARGET_LINE_EVENT 629
+#define CMDQ_EVENT_VDO0_DISP_WDMA0_O_SW_RST_DONE 630
+#define CMDQ_EVENT_VDO0_DISP_OVL0_O_TARGET_MATCH_EVENT_0 631
+#define CMDQ_EVENT_VDO0_DISP_OVL0_O_TARGET_MATCH_EVENT_1 632
+#define CMDQ_EVENT_VDO0_DISP_OVL0_O_TARGET_MATCH_EVENT_2 633
+#define CMDQ_EVENT_VDO0_DISP_OVL0_O_TARGET_MATCH_EVENT_3 634
+#define CMDQ_EVENT_VDO0_DISP_OVL0_O_TARGET_MATCH_EVENT_4 635
+#define CMDQ_EVENT_VDO0_DISP_OVL0_O_TARGET_MATCH_EVENT_5 636
+#define CMDQ_EVENT_VDO0_DISP_OVL0_O_TARGET_MATCH_EVENT_6 637
+#define CMDQ_EVENT_VDO0_MDP_WROT0_O_SW_RST_DONE 638
+#define CMDQ_EVENT_VDO0_RESERVED 639
+#define CMDQ_EVENT_VDO1_MDP_RDMA0_SOF 640
+#define CMDQ_EVENT_VDO1_MDP_RDMA1_SOF 641
+#define CMDQ_EVENT_VDO1_MDP_RDMA2_SOF 642
+#define CMDQ_EVENT_VDO1_MDP_RDMA3_SOF 643
+#define CMDQ_EVENT_VDO1_MDP_RDMA4_SOF 644
+#define CMDQ_EVENT_VDO1_MDP_RDMA5_SOF 645
+#define CMDQ_EVENT_VDO1_MDP_RDMA6_SOF 646
+#define CMDQ_EVENT_VDO1_MDP_RDMA7_SOF 647
+#define CMDQ_EVENT_VDO1_DISP_PADDING0_SOF 648
+#define CMDQ_EVENT_VDO1_DISP_PADDING1_SOF 649
+#define CMDQ_EVENT_VDO1_DISP_PADDING2_SOF 650
+#define CMDQ_EVENT_VDO1_DISP_PADDING3_SOF 651
+#define CMDQ_EVENT_VDO1_DISP_PADDING4_SOF 652
+#define CMDQ_EVENT_VDO1_DISP_PADDING5_SOF 653
+#define CMDQ_EVENT_VDO1_DISP_PADDING6_SOF 654
+#define CMDQ_EVENT_VDO1_DISP_PADDING7_SOF 655
+#define CMDQ_EVENT_VDO1_DISP_RSZ0_SOF 656
+#define CMDQ_EVENT_VDO1_DISP_RSZ1_SOF 657
+#define CMDQ_EVENT_VDO1_DISP_RSZ2_SOF 658
+#define CMDQ_EVENT_VDO1_DISP_RSZ3_SOF 659
+#define CMDQ_EVENT_VDO1_VPP_MERGE0_SOF 660
+#define CMDQ_EVENT_VDO1_VPP_MERGE1_SOF 661
+#define CMDQ_EVENT_VDO1_VPP_MERGE2_SOF 662
+#define CMDQ_EVENT_VDO1_VPP_MERGE3_SOF 663
+#define CMDQ_EVENT_VDO1_VPP_MERGE4_SOF 664
+#define CMDQ_EVENT_VDO1_VPP2_DL_RELAY_SOF 665
+#define CMDQ_EVENT_VDO1_VPP3_DL_RELAY_SOF 666
+#define CMDQ_EVENT_VDO0_DSC_DL_ASYNC_SOF 667
+#define CMDQ_EVENT_VDO0_MERGE_DL_ASYNC_SOF 668
+#define CMDQ_EVENT_VDO1_OUT_DL_RELAY_SOF 669
+#define CMDQ_EVENT_VDO1_DISP_MIXER_SOF 670
+#define CMDQ_EVENT_VDO1_HDR_VDO_FE0_SOF 671
+#define CMDQ_EVENT_VDO1_HDR_VDO_FE1_SOF 672
+#define CMDQ_EVENT_VDO1_HDR_GFX_FE0_SOF 673
+#define CMDQ_EVENT_VDO1_HDR_GFX_FE1_SOF 674
+#define CMDQ_EVENT_VDO1_HDR_VDO_BE0_SOF 675
+#define CMDQ_EVENT_VDO1_HDR_MLOAD_SOF 676
+#define CMDQ_EVENT_VDO1_DPI0_EXT_SOF 677
+#define CMDQ_EVENT_VDO1_DPI1_EXT_SOF 678
+#define CMDQ_EVENT_VDO1_DP_INTF_EXT_EXT_SOF 679
+#define CMDQ_EVENT_VDO1_MDP_RDMA0_FRAME_DONE 680
+#define CMDQ_EVENT_VDO1_MDP_RDMA1_FRAME_DONE 681
+#define CMDQ_EVENT_VDO1_MDP_RDMA2_FRAME_DONE 682
+#define CMDQ_EVENT_VDO1_MDP_RDMA3_FRAME_DONE 683
+#define CMDQ_EVENT_VDO1_MDP_RDMA4_FRAME_DONE 684
+#define CMDQ_EVENT_VDO1_MDP_RDMA5_FRAME_DONE 685
+#define CMDQ_EVENT_VDO1_MDP_RDMA6_FRAME_DONE 686
+#define CMDQ_EVENT_VDO1_MDP_RDMA7_FRAME_DONE 687
+#define CMDQ_EVENT_VDO1_DISP_PADDING0_FRAME_DONE 688
+#define CMDQ_EVENT_VDO1_DISP_PADDING1_FRAME_DONE 689
+#define CMDQ_EVENT_VDO1_DISP_PADDING2_FRAME_DONE 690
+#define CMDQ_EVENT_VDO1_DISP_PADDING3_FRAME_DONE 691
+#define CMDQ_EVENT_VDO1_DISP_PADDING4_FRAME_DONE 692
+#define CMDQ_EVENT_VDO1_DISP_PADDING5_FRAME_DONE 693
+#define CMDQ_EVENT_VDO1_DISP_PADDING6_FRAME_DONE 694
+#define CMDQ_EVENT_VDO1_DISP_PADDING7_FRAME_DONE 695
+#define CMDQ_EVENT_VDO1_DISP_RSZ0_FRAME_DONE 696
+#define CMDQ_EVENT_VDO1_DISP_RSZ1_FRAME_DONE 697
+#define CMDQ_EVENT_VDO1_DISP_RSZ2_FRAME_DONE 698
+#define CMDQ_EVENT_VDO1_DISP_RSZ3_FRAME_DONE 699
+#define CMDQ_EVENT_VDO1_VPP_MERGE0_FRAME_DONE 700
+#define CMDQ_EVENT_VDO1_VPP_MERGE1_FRAME_DONE 701
+#define CMDQ_EVENT_VDO1_VPP_MERGE2_FRAME_DONE 702
+#define CMDQ_EVENT_VDO1_VPP_MERGE3_FRAME_DONE 703
+#define CMDQ_EVENT_VDO1_VPP_MERGE4_FRAME_DONE 704
+#define CMDQ_EVENT_VDO1_DPI0_FRAME_DONE 705
+#define CMDQ_EVENT_VDO1_DPI1_FRAME_DONE 706
+#define CMDQ_EVENT_VDO1_DP_INTF0_FRAME_DONE 707
+#define CMDQ_EVENT_VDO1_DISP_MIXER_FRAME_DONE_MM 708
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_0 709
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_1 710
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_2 711
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_3 712
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_4 713
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_5 714
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_6 715
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_7 716
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_8 717
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_9 718
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_10 719
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_11 720
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_12 721
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_13 722
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_14 723
+#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_15 724
+#define CMDQ_EVENT_VDO1_DISP_RDMA_0_UNDERRUN 725
+#define CMDQ_EVENT_VDO1_DISP_RDMA_1_UNDERRUN 726
+#define CMDQ_EVENT_VDO1_U_MERGE4_UNDERRUN 727
+#define CMDQ_EVENT_VDO1_U_VPP_SPLIT_VIDEO_0_OVERFLOW 728
+#define CMDQ_EVENT_VDO1_U_VPP_SPLIT_VIDEO_1_OVERFLOW 729
+#define CMDQ_EVENT_VDO1_DSI_0_UNDERRUN 730
+#define CMDQ_EVENT_VDO1_DSI_1_UNDERRUN 731
+#define CMDQ_EVENT_VDO1_DP_INTF_0 732
+#define CMDQ_EVENT_VDO1_DP_INTF_1 733
+#define CMDQ_EVENT_VDO1_DPI_0 734
+#define CMDQ_EVENT_VDO1_DPI_1 735
+#define CMDQ_EVENT_VDO1_MDP_RDMA0_SW_RST_DONE 741
+#define CMDQ_EVENT_VDO1_MDP_RDMA1_SW_RST_DONE 742
+#define CMDQ_EVENT_VDO1_MDP_RDMA2_SW_RST_DONE 743
+#define CMDQ_EVENT_VDO1_MDP_RDMA3_SW_RST_DONE 744
+#define CMDQ_EVENT_VDO1_MDP_RDMA4_SW_RST_DONE 745
+#define CMDQ_EVENT_VDO1_MDP_RDMA5_SW_RST_DONE 746
+#define CMDQ_EVENT_VDO1_MDP_RDMA6_SW_RST_DONE 747
+#define CMDQ_EVENT_VDO1_MDP_RDMA7_SW_RST_DONE 748
+#define CMDQ_EVENT_VDO1_DP0_VDE_END_ENG_EVENT_MM 749
+#define CMDQ_EVENT_VDO1_DP0_VDE_START_ENG_EVENT_MM 750
+#define CMDQ_EVENT_VDO1_DP0_VSYNC_END_ENG_EVENT_MM 751
+#define CMDQ_EVENT_VDO1_DP0_VSYNC_START_ENG_EVENT_MM 752
+#define CMDQ_EVENT_VDO1_DP0_TARGET_LINE_ENG_EVENT_MM 753
+#define CMDQ_EVENT_VDO1_VPP_MERGE0_EVENT 754
+#define CMDQ_EVENT_VDO1_VPP_MERGE1_EVENT 755
+#define CMDQ_EVENT_VDO1_VPP_MERGE2_EVENT 756
+#define CMDQ_EVENT_VDO1_VPP_MERGE3_EVENT 757
+#define CMDQ_EVENT_VDO1_VPP_MERGE4_EVENT 758
+#define CMDQ_EVENT_VDO1_HDMITX_EVENT 759
+#define CMDQ_EVENT_VDO1_HDR_VDO_BE0_ADL_TRIG_EVENT_MM 760
+#define CMDQ_EVENT_VDO1_HDR_GFX_FE1_THDR_ADL_TRIG_EVENT_MM 761
+#define CMDQ_EVENT_VDO1_HDR_GFX_FE1_DM_ADL_TRIG_EVENT_MM 762
+#define CMDQ_EVENT_VDO1_HDR_GFX_FE0_THDR_ADL_TRIG_EVENT_MM 763
+#define CMDQ_EVENT_VDO1_HDR_GFX_FE0_DM_ADL_TRIG_EVENT_MM 764
+#define CMDQ_EVENT_VDO1_HDR_VDO_FE1_ADL_TRIG_EVENT_MM 765
+#define CMDQ_EVENT_VDO1_HDR_VDO_FE1_AD0_TRIG_EVENT_MM 766
+#define CMDQ_EVENT_VDO1_DPI0_TARGET_LINE_1_EVENT_MM 767
+#define CMDQ_EVENT_HANDSHAKE_0 768
+#define CMDQ_EVENT_HANDSHAKE_1 769
+#define CMDQ_EVENT_HANDSHAKE_2 770
+#define CMDQ_EVENT_HANDSHAKE_3 771
+#define CMDQ_EVENT_HANDSHAKE_4 772
+#define CMDQ_EVENT_HANDSHAKE_5 773
+#define CMDQ_EVENT_HANDSHAKE_6 774
+#define CMDQ_EVENT_HANDSHAKE_7 775
+#define CMDQ_EVENT_HANDSHAKE_8 776
+#define CMDQ_EVENT_HANDSHAKE_9 777
+#define CMDQ_EVENT_HANDSHAKE_10 778
+#define CMDQ_EVENT_HANDSHAKE_11 779
+#define CMDQ_EVENT_HANDSHAKE_12 780
+#define CMDQ_EVENT_HANDSHAKE_13 781
+#define CMDQ_EVENT_HANDSHAKE_14 782
+#define CMDQ_EVENT_HANDSHAKE_15 783
+#define CMDQ_EVENT_VDEC_SOC_EVENT_0 800
+#define CMDQ_EVENT_VDEC_SOC_EVENT_1 801
+#define CMDQ_EVENT_VDEC_SOC_EVENT_2 802
+#define CMDQ_EVENT_VDEC_SOC_EVENT_3 803
+#define CMDQ_EVENT_VDEC_SOC_EVENT_4 804
+#define CMDQ_EVENT_VDEC_SOC_EVENT_5 805
+#define CMDQ_EVENT_VDEC_SOC_EVENT_6 806
+#define CMDQ_EVENT_VDEC_SOC_EVENT_7 807
+#define CMDQ_EVENT_VDEC_SOC_EVENT_8 808
+#define CMDQ_EVENT_VDEC_SOC_EVENT_9 809
+#define CMDQ_EVENT_VDEC_SOC_EVENT_10 810
+#define CMDQ_EVENT_VDEC_SOC_EVENT_11 811
+#define CMDQ_EVENT_VDEC_SOC_EVENT_12 812
+#define CMDQ_EVENT_VDEC_SOC_EVENT_13 813
+#define CMDQ_EVENT_VDEC_SOC_EVENT_14 814
+#define CMDQ_EVENT_VDEC_SOC_EVENT_15 815
+#define CMDQ_EVENT_VDEC_CORE0_EVENT_0 832
+#define CMDQ_EVENT_VDEC_CORE0_EVENT_1 833
+#define CMDQ_EVENT_VDEC_CORE0_EVENT_2 834
+#define CMDQ_EVENT_VDEC_CORE0_EVENT_3 835
+#define CMDQ_EVENT_VDEC_CORE0_EVENT_4 836
+#define CMDQ_EVENT_VDEC_CORE0_EVENT_5 837
+#define CMDQ_EVENT_VDEC_CORE0_EVENT_6 838
+#define CMDQ_EVENT_VDEC_CORE0_EVENT_7 839
+#define CMDQ_EVENT_VDEC_CORE0_EVENT_8 840
+#define CMDQ_EVENT_VDEC_CORE0_EVENT_9 841
+#define CMDQ_EVENT_VDEC_CORE0_EVENT_10 842
+#define CMDQ_EVENT_VDEC_CORE0_EVENT_11 843
+#define CMDQ_EVENT_VDEC_CORE0_EVENT_12 844
+#define CMDQ_EVENT_VDEC_CORE0_EVENT_13 845
+#define CMDQ_EVENT_VDEC_CORE0_EVENT_14 846
+#define CMDQ_EVENT_VDEC_CORE0_EVENT_15 847
+#define CMDQ_EVENT_VENC_TOP_VENC_FRAME_DONE 865
+#define CMDQ_EVENT_VENC_TOP_VENC_PAUSE_DONE 866
+#define CMDQ_EVENT_VENC_TOP_JPGENC_DONE 867
+#define CMDQ_EVENT_VENC_TOP_VENC_MB_DONE 868
+#define CMDQ_EVENT_VENC_TOP_VENC_128BYTE_DONE 869
+#define CMDQ_EVENT_VENC_TOP_JPGDEC_DONE 870
+#define CMDQ_EVENT_VENC_TOP_VENC_SLICE_DONE 871
+#define CMDQ_EVENT_VENC_TOP_JPGDEC_INSUFF_DONE 872
+#define CMDQ_EVENT_VENC_TOP_WP_2ND_STAGE_DONE 874
+#define CMDQ_EVENT_VENC_TOP_WP_3RD_STAGE_DONE 875
+#define CMDQ_EVENT_VENC_TOP_PPS_HEADER_DONE 876
+#define CMDQ_EVENT_VENC_TOP_SPS_HEADER_DONE 877
+#define CMDQ_EVENT_VENC_TOP_VPS_HEADER_DONE 878
+#define CMDQ_EVENT_WPE_VPP0_WPE_GCE_FRAME_DONE 882
+#define CMDQ_EVENT_WPE_VPP0_WPE_DONE_SYNC_OUT 883
+#define CMDQ_EVENT_SVPP1_MDP_OVL_NEW_EVENT_2 896
+#define CMDQ_EVENT_SVPP1_MDP_OVL_NEW_EVENT_3 897
+#define CMDQ_EVENT_SVPP1_MDP_OVL_NEW_EVENT_4 898
+#define CMDQ_EVENT_SVPP1_MDP_OVL_NEW_EVENT_5 899
+#define CMDQ_EVENT_SVPP1_MDP_OVL_NEW_EVENT_6 900
+#define CMDQ_EVENT_VDO1_DPI0_TARGET_LINE_0_EVENT_MM 928
+#define CMDQ_EVENT_VDO1_DPI0_TRIGGER_LOOP_CLEAR_EVENT_MM 929
+#define CMDQ_EVENT_VDO1_DPI0_LAST_LINE_EVENT_MM 930
+#define CMDQ_EVENT_VDO1_DPI0_LAST_SAFE_BLANK_EVENT_MM 931
+#define CMDQ_EVENT_VDO1_DPI0_VSYNC_START_EVENT_MM 932
+#define CMDQ_EVENT_VDO1_DPI1_TARGET_LINE_1_EVENT_MM 933
+#define CMDQ_EVENT_VDO1_DPI1_TARGET_LINE_0_EVENT_MM 934
+#define CMDQ_EVENT_VDO1_DPI1_TRIGGER_LOOP_CLEAR_EVENT_MM 935
+#define CMDQ_EVENT_VDO1_DPI1_LAST_LINE_EVENT_MM 936
+#define CMDQ_EVENT_VDO1_DPI1_LAST_SAFE_BLANK_EVENT_MM 937
+#define CMDQ_EVENT_VDO1_DPI1_VSYNC_START_EVENT_MM 938
+#define CMDQ_EVENT_VDO1_DP_INTF_TARGET_LINE_1_EVENT_MM 939
+#define CMDQ_EVENT_VDO1_DP_INTF_TARGET_LINE_0_EVENT_MM 940
+#define CMDQ_EVENT_VDO1_DP_INTF_TRIGGER_LOOP_CLEAR_EVENT_MM 941
+#define CMDQ_EVENT_VDO1_DP_INTF_LAST_LINE_EVENT_MM 942
+#define CMDQ_EVENT_VDO1_DP_INTF_LAST_SAFE_BLANK_EVENT_MM 943
+#define CMDQ_EVENT_VBLANK_FALLING 946
+#define CMDQ_EVENT_VSC_FINISH 947
+#define CMDQ_EVENT_TPR_0 962
+#define CMDQ_EVENT_TPR_1 963
+#define CMDQ_EVENT_TPR_2 964
+#define CMDQ_EVENT_TPR_3 965
+#define CMDQ_EVENT_TPR_4 966
+#define CMDQ_EVENT_TPR_5 967
+#define CMDQ_EVENT_TPR_6 968
+#define CMDQ_EVENT_TPR_7 969
+#define CMDQ_EVENT_TPR_8 970
+#define CMDQ_EVENT_TPR_9 971
+#define CMDQ_EVENT_TPR_10 972
+#define CMDQ_EVENT_TPR_11 973
+#define CMDQ_EVENT_TPR_12 974
+#define CMDQ_EVENT_TPR_13 975
+#define CMDQ_EVENT_TPR_14 976
+#define CMDQ_EVENT_TPR_15 977
+#define CMDQ_EVENT_TPR_16 978
+#define CMDQ_EVENT_TPR_17 979
+#define CMDQ_EVENT_TPR_18 980
+#define CMDQ_EVENT_TPR_19 981
+#define CMDQ_EVENT_TPR_20 982
+#define CMDQ_EVENT_TPR_21 983
+#define CMDQ_EVENT_TPR_22 984
+#define CMDQ_EVENT_TPR_23 985
+#define CMDQ_EVENT_TPR_24 986
+#define CMDQ_EVENT_TPR_25 987
+#define CMDQ_EVENT_TPR_26 988
+#define CMDQ_EVENT_TPR_27 989
+#define CMDQ_EVENT_TPR_28 990
+#define CMDQ_EVENT_TPR_29 991
+#define CMDQ_EVENT_TPR_30 992
+#define CMDQ_EVENT_TPR_31 993
+#define CMDQ_EVENT_TPR_TIMEOUT_0 994
+#define CMDQ_EVENT_TPR_TIMEOUT_1 995
+#define CMDQ_EVENT_TPR_TIMEOUT_2 996
+#define CMDQ_EVENT_TPR_TIMEOUT_3 997
+#define CMDQ_EVENT_TPR_TIMEOUT_4 998
+#define CMDQ_EVENT_TPR_TIMEOUT_5 999
+#define CMDQ_EVENT_TPR_TIMEOUT_6 1000
+#define CMDQ_EVENT_TPR_TIMEOUT_7 1001
+#define CMDQ_EVENT_TPR_TIMEOUT_8 1002
+#define CMDQ_EVENT_TPR_TIMEOUT_9 1003
+#define CMDQ_EVENT_TPR_TIMEOUT_10 1004
+#define CMDQ_EVENT_TPR_TIMEOUT_11 1005
+#define CMDQ_EVENT_TPR_TIMEOUT_12 1006
+#define CMDQ_EVENT_TPR_TIMEOUT_13 1007
+#define CMDQ_EVENT_TPR_TIMEOUT_14 1008
+#define CMDQ_EVENT_TPR_TIMEOUT_15 1009
+#define CMDQ_EVENT_OUTPIN_0 1018
+#define CMDQ_EVENT_OUTPIN_1 1019
+
+#define CMDQ_SYNC_TOKEN_IMGSYS_WPE_EIS 124
+#define CMDQ_SYNC_TOKEN_IMGSYS_WPE_TNR 125
+#define CMDQ_SYNC_TOKEN_IMGSYS_WPE_LITE 126
+#define CMDQ_SYNC_TOKEN_IMGSYS_TRAW 127
+#define CMDQ_SYNC_TOKEN_IMGSYS_LTRAW 128
+#define CMDQ_SYNC_TOKEN_CAMSYS_POOL_1 223
+#define CMDQ_SYNC_TOKEN_CAMSYS_POOL_2 224
+#define CMDQ_SYNC_TOKEN_CAMSYS_POOL_3 225
+#define CMDQ_SYNC_TOKEN_CAMSYS_POOL_4 226
+#define CMDQ_SYNC_TOKEN_CAMSYS_POOL_5 227
+#define CMDQ_SYNC_TOKEN_CAMSYS_POOL_6 228
+#define CMDQ_SYNC_TOKEN_CAMSYS_POOL_7 229
+#define CMDQ_SYNC_TOKEN_CAMSYS_POOL_8 230
+#define CMDQ_SYNC_TOKEN_CAMSYS_POOL_9 231
+#define CMDQ_SYNC_TOKEN_CAMSYS_POOL_10 232
+#define CMDQ_SYNC_TOKEN_IMGSYS_XTRAW 233
+#define CMDQ_SYNC_TOKEN_IMGSYS_DIP 234
+#define CMDQ_SYNC_TOKEN_IMGSYS_PQDIP_A 235
+#define CMDQ_SYNC_TOKEN_IMGSYS_PQDIP_B 236
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_1 237
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_2 238
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_3 239
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_4 240
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_5 241
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_6 242
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_7 243
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_8 244
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_9 245
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_10 246
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_11 247
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_12 248
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_13 249
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_14 250
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_15 251
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_16 252
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_17 253
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_18 254
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_19 255
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_20 276
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_21 277
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_22 278
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_23 279
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_24 280
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_25 281
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_26 282
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_27 283
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_28 284
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_29 285
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_30 286
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_31 287
+#define CMDQ_SYNC_TOKEN_IPESYS_ME 300
+#define CMDQ_SYNC_TOKEN_IMGSYS_VSS_TRAW 301
+#define CMDQ_SYNC_TOKEN_IMGSYS_VSS_LTRAW 302
+#define CMDQ_SYNC_TOKEN_IMGSYS_VSS_XTRAW 303
+#define CMDQ_SYNC_TOKEN_IMGSYS_VSS_DIP 304
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_32 308
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_33 309
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_34 310
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_35 311
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_36 312
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_37 313
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_38 314
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_39 315
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_40 316
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_41 370
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_42 371
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_43 372
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_44 373
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_45 374
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_46 375
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_47 376
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_48 377
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_49 378
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_50 379
+#define CMDQ_SYNC_TOKEN_TZMP_ISP_WAIT 380
+#define CMDQ_SYNC_TOKEN_TZMP_ISP_SET 381
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_51 790
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_52 791
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_53 792
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_54 793
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_55 794
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_56 795
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_57 796
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_58 797
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_59 798
+#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_60 799
+#define CMDQ_SYNC_TOKEN_PREBUILT_MDP_WAIT 816
+#define CMDQ_SYNC_TOKEN_PREBUILT_MDP_SET 817
+#define CMDQ_SYNC_TOKEN_PREBUILT_MDP_LOCK 818
+#define CMDQ_SYNC_TOKEN_PREBUILT_MML_WAIT 819
+#define CMDQ_SYNC_TOKEN_PREBUILT_MML_SET 820
+#define CMDQ_SYNC_TOKEN_PREBUILT_MML_LOCK 821
+#define CMDQ_SYNC_TOKEN_PREBUILT_VFMT_WAIT 822
+#define CMDQ_SYNC_TOKEN_PREBUILT_VFMT_SET 823
+#define CMDQ_SYNC_TOKEN_PREBUILT_VFMT_LOCK 824
+#define CMDQ_SYNC_TOKEN_PREBUILT_DISP_WAIT 825
+#define CMDQ_SYNC_TOKEN_PREBUILT_DISP_SET 826
+#define CMDQ_SYNC_TOKEN_PREBUILT_DISP_LOCK 827
+#define CMDQ_SYNC_TOKEN_CONFIG_DIRTY 848
+#define CMDQ_SYNC_TOKEN_STREAM_EOF 849
+#define CMDQ_SYNC_TOKEN_ESD_EOF 850
+#define CMDQ_SYNC_TOKEN_STREAM_BLOCK 851
+#define CMDQ_SYNC_TOKEN_CABC_EOF 852
+#define CMDQ_SYNC_TOKEN_VENC_INPUT_READY 853
+#define CMDQ_SYNC_TOKEN_VENC_EOF 854
+#define CMDQ_SYNC_TOKEN_SECURE_THR_EOF 855
+#define CMDQ_SYNC_TOKEN_USER_0 856
+#define CMDQ_SYNC_TOKEN_USER_1 857
+#define CMDQ_SYNC_TOKEN_POLL_MONITOR 858
+#define CMDQ_TOKEN_TPR_LOCK 859
+#define CMDQ_SYNC_TOKEN_MSS 860
+#define CMDQ_SYNC_TOKEN_MSF 861
+#define CMDQ_SYNC_TOKEN_GPR_SET_0 884
+#define CMDQ_SYNC_TOKEN_GPR_SET_1 885
+#define CMDQ_SYNC_TOKEN_GPR_SET_2 886
+#define CMDQ_SYNC_TOKEN_GPR_SET_3 887
+#define CMDQ_SYNC_TOKEN_GPR_SET_4 888
+#define CMDQ_SYNC_RESOURCE_WROT0 889
+#define CMDQ_SYNC_RESOURCE_WROT1 890
+#define CMDQ_SYNC_TOKEN_DISP_VA_START 1012
+#define CMDQ_SYNC_TOKEN_DISP_VA_END 1013
+
+#endif
diff --git a/include/dt-bindings/mailbox/qcom-ipcc.h b/include/dt-bindings/mailbox/qcom-ipcc.h
index 4c23eefed5f3..fd85a79381b3 100644
--- a/include/dt-bindings/mailbox/qcom-ipcc.h
+++ b/include/dt-bindings/mailbox/qcom-ipcc.h
@@ -8,6 +8,7 @@
/* Signal IDs for MPROC protocol */
#define IPCC_MPROC_SIGNAL_GLINK_QMP 0
+#define IPCC_MPROC_SIGNAL_TZ 1
#define IPCC_MPROC_SIGNAL_SMP2P 2
#define IPCC_MPROC_SIGNAL_PING 3
@@ -29,5 +30,10 @@
#define IPCC_CLIENT_PCIE1 14
#define IPCC_CLIENT_PCIE2 15
#define IPCC_CLIENT_SPSS 16
+#define IPCC_CLIENT_NSP1 18
+#define IPCC_CLIENT_TME 23
+#define IPCC_CLIENT_WPSS 24
+#define IPCC_CLIENT_GPDSP0 31
+#define IPCC_CLIENT_GPDSP1 32
#endif
diff --git a/include/dt-bindings/mailbox/tegra186-hsp.h b/include/dt-bindings/mailbox/tegra186-hsp.h
index 3bdec7a84d35..b9ccae2aa9e2 100644
--- a/include/dt-bindings/mailbox/tegra186-hsp.h
+++ b/include/dt-bindings/mailbox/tegra186-hsp.h
@@ -16,6 +16,11 @@
#define TEGRA_HSP_MBOX_TYPE_AS 0x3
/*
+ * These define the types of shared mailbox supported based on data size.
+ */
+#define TEGRA_HSP_MBOX_TYPE_SM_128BIT (1 << 8)
+
+/*
* These defines represent the bit associated with the given master ID in the
* doorbell registers.
*/
diff --git a/include/dt-bindings/media/c8sectpfe.h b/include/dt-bindings/media/c8sectpfe.h
deleted file mode 100644
index 6b1fb6f5413b..000000000000
--- a/include/dt-bindings/media/c8sectpfe.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DT_C8SECTPFE_H
-#define __DT_C8SECTPFE_H
-
-#define STV0367_TDA18212_NIMA_1 0
-#define STV0367_TDA18212_NIMA_2 1
-#define STV0367_TDA18212_NIMB_1 2
-#define STV0367_TDA18212_NIMB_2 3
-
-#define STV0903_6110_LNB24_NIMA 4
-#define STV0903_6110_LNB24_NIMB 5
-
-#endif /* __DT_C8SECTPFE_H */
diff --git a/include/dt-bindings/media/tvp5150.h b/include/dt-bindings/media/tvp5150.h
index dda00c038530..ba34c420c303 100644
--- a/include/dt-bindings/media/tvp5150.h
+++ b/include/dt-bindings/media/tvp5150.h
@@ -2,7 +2,7 @@
/*
tvp5150.h - definition for tvp5150 inputs
- Copyright (C) 2006 Hans Verkuil (hverkuil@xs4all.nl)
+ Copyright (C) 2006 Hans Verkuil (hverkuil@kernel.org)
*/
diff --git a/include/dt-bindings/media/video-interfaces.h b/include/dt-bindings/media/video-interfaces.h
new file mode 100644
index 000000000000..0b19c9b2e627
--- /dev/null
+++ b/include/dt-bindings/media/video-interfaces.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (C) 2022 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+#ifndef __DT_BINDINGS_MEDIA_VIDEO_INTERFACES_H__
+#define __DT_BINDINGS_MEDIA_VIDEO_INTERFACES_H__
+
+#define MEDIA_BUS_TYPE_CSI2_CPHY 1
+#define MEDIA_BUS_TYPE_CSI1 2
+#define MEDIA_BUS_TYPE_CCP2 3
+#define MEDIA_BUS_TYPE_CSI2_DPHY 4
+#define MEDIA_BUS_TYPE_PARALLEL 5
+#define MEDIA_BUS_TYPE_BT656 6
+
+#define MEDIA_BUS_CSI2_CPHY_LINE_ORDER_ABC 0
+#define MEDIA_BUS_CSI2_CPHY_LINE_ORDER_ACB 1
+#define MEDIA_BUS_CSI2_CPHY_LINE_ORDER_BAC 2
+#define MEDIA_BUS_CSI2_CPHY_LINE_ORDER_BCA 3
+#define MEDIA_BUS_CSI2_CPHY_LINE_ORDER_CAB 4
+#define MEDIA_BUS_CSI2_CPHY_LINE_ORDER_CBA 5
+
+#define MEDIA_PCLK_SAMPLE_FALLING_EDGE 0
+#define MEDIA_PCLK_SAMPLE_RISING_EDGE 1
+#define MEDIA_PCLK_SAMPLE_DUAL_EDGE 2
+
+#endif /* __DT_BINDINGS_MEDIA_VIDEO_INTERFACES_H__ */
diff --git a/include/dt-bindings/memory/mediatek,mt6893-memory-port.h b/include/dt-bindings/memory/mediatek,mt6893-memory-port.h
new file mode 100644
index 000000000000..26e8b400db0d
--- /dev/null
+++ b/include/dt-bindings/memory/mediatek,mt6893-memory-port.h
@@ -0,0 +1,288 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2020 MediaTek Inc.
+ * Copyright (c) 2025 Collabora Ltd
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+#ifndef _DT_BINDINGS_MEMORY_MT6893_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MT6893_LARB_PORT_H_
+
+#include <dt-bindings/memory/mtk-memory-port.h>
+
+/*
+ * MM IOMMU supports 16GB dma address.
+ *
+ * The address will preassign like this:
+ *
+ * modules dma-address-region larbs-ports
+ * disp 0 ~ 4G larb0/2
+ * vcodec 4G ~ 8G larb4/5/7
+ * cam/mdp 8G ~ 12G larb9/11/13/14/16/17/18/19/20
+ * CCU0 0x4000_0000 ~ 0x43ff_ffff larb13: port 9/10
+ * CCU1 0x4400_0000 ~ 0x47ff_ffff larb14: port 4/5
+ *
+ * larb3/6/8/10/12/15 are null.
+ */
+
+/* larb0 */
+#define M4U_PORT_L0_DISP_POSTMASK0 MTK_M4U_DOM_ID(0, 0)
+#define M4U_PORT_L0_MDP_RDMA4 MTK_M4U_DOM_ID(0, 1)
+#define M4U_PORT_L0_OVL_RDMA0_HDR MTK_M4U_DOM_ID(0, 2)
+#define M4U_PORT_L0_OVL_2L_RDMA1_HDR MTK_M4U_DOM_ID(0, 3)
+#define M4U_PORT_L0_OVL_2L_RDMA3_HDR MTK_M4U_DOM_ID(0, 4)
+#define M4U_PORT_L0_OVL_RDMA0 MTK_M4U_DOM_ID(0, 5)
+#define M4U_PORT_L0_OVL_2L_RDMA1 MTK_M4U_DOM_ID(0, 6)
+#define M4U_PORT_L0_OVL_2L_RDMA3 MTK_M4U_DOM_ID(0, 7)
+#define M4U_PORT_L0_OVL_RDMA1_SYSRAM MTK_M4U_DOM_ID(0, 8)
+#define M4U_PORT_L0_OVL_2L_RDMA0_SYSRAM MTK_M4U_DOM_ID(0, 9)
+#define M4U_PORT_L0_OVL_2L_RDMA2_SYSRAM MTK_M4U_DOM_ID(0, 10)
+#define M4U_PORT_L0_DISP_WDMA0 MTK_M4U_DOM_ID(0, 11)
+#define M4U_PORT_L0_DISP_RDMA0 MTK_M4U_DOM_ID(0, 12)
+#define M4U_PORT_L0_DISP_UFBC_WDMA0 MTK_M4U_DOM_ID(0, 13)
+#define M4U_PORT_L0_DISP_FAKE0 MTK_M4U_DOM_ID(0, 14)
+
+/* larb1 */
+#define M4U_PORT_L1_DISP_POSTMASK1 MTK_M4U_DOM_ID(1, 0)
+#define M4U_PORT_L1_MDP_RDMA5 MTK_M4U_DOM_ID(1, 1)
+#define M4U_PORT_L1_OVL_RDMA1_HDR MTK_M4U_DOM_ID(1, 2)
+#define M4U_PORT_L1_OVL_2L_RDMA0_HDR MTK_M4U_DOM_ID(1, 3)
+#define M4U_PORT_L1_OVL_2L_RDMA2_HDR MTK_M4U_DOM_ID(1, 4)
+#define M4U_PORT_L1_OVL_RDMA1 MTK_M4U_DOM_ID(1, 5)
+#define M4U_PORT_L1_OVL_2L_RDMA0 MTK_M4U_DOM_ID(1, 6)
+#define M4U_PORT_L1_OVL_2L_RDMA2 MTK_M4U_DOM_ID(1, 7)
+#define M4U_PORT_L1_OVL_RDMA0_SYSRAM MTK_M4U_DOM_ID(1, 8)
+#define M4U_PORT_L1_OVL_2L_RDMA1_SYSRAM MTK_M4U_DOM_ID(1, 9)
+#define M4U_PORT_L1_OVL_2L_RDMA3_SYSRAM MTK_M4U_DOM_ID(1, 10)
+#define M4U_PORT_L1_DISP_WDMA1 MTK_M4U_DOM_ID(1, 11)
+#define M4U_PORT_L1_DISP_RDMA1 MTK_M4U_DOM_ID(1, 12)
+#define M4U_PORT_L1_DISP_UFBC_WDMA1 MTK_M4U_DOM_ID(1, 13)
+#define M4U_PORT_L1_DISP_FAKE1 MTK_M4U_DOM_ID(1, 14)
+
+/* larb2 */
+#define M4U_PORT_L2_MDP_RDMA0 MTK_M4U_DOM_ID(2, 0)
+#define M4U_PORT_L2_MDP_RDMA2 MTK_M4U_DOM_ID(2, 1)
+#define M4U_PORT_L2_MDP_WROT0 MTK_M4U_DOM_ID(2, 2)
+#define M4U_PORT_L2_MDP_WROT2 MTK_M4U_DOM_ID(2, 3)
+#define M4U_PORT_L2_MDP_FILMGRAIN0 MTK_M4U_DOM_ID(2, 4)
+#define M4U_PORT_L2_MDP_FAKE0 MTK_M4U_DOM_ID(2, 5)
+
+/* larb3: null */
+
+/* larb4 */
+#define M4U_PORT_L4_VDEC_MC_EXT_MDP MTK_M4U_DOM_ID(4, 0)
+#define M4U_PORT_L4_VDEC_UFO_EXT_MDP MTK_M4U_DOM_ID(4, 1)
+#define M4U_PORT_L4_VDEC_PP_EXT_MDP MTK_M4U_DOM_ID(4, 2)
+#define M4U_PORT_L4_VDEC_PRED_RD_EXT_MDP MTK_M4U_DOM_ID(4, 3)
+#define M4U_PORT_L4_VDEC_PRED_WR_EXT_MDP MTK_M4U_DOM_ID(4, 4)
+#define M4U_PORT_L4_VDEC_PPWRAP_EXT_MDP MTK_M4U_DOM_ID(4, 5)
+#define M4U_PORT_L4_VDEC_TILE_EXT_MDP MTK_M4U_DOM_ID(4, 6)
+#define M4U_PORT_L4_VDEC_VLD_EXT_MDP MTK_M4U_DOM_ID(4, 7)
+#define M4U_PORT_L4_VDEC_VLD2_EXT_MDP MTK_M4U_DOM_ID(4, 8)
+#define M4U_PORT_L4_VDEC_AVC_MV_EXT_MDP MTK_M4U_DOM_ID(4, 9)
+#define M4U_PORT_L4_VDEC_RG_CTRL_DMA_EXT_MDP MTK_M4U_DOM_ID(4, 10)
+
+/* larb5 */
+#define M4U_PORT_L5_VDEC_LAT0_VLD_EXT_DISP MTK_M4U_DOM_ID(5, 0)
+#define M4U_PORT_L5_VDEC_LAT0_VLD2_EXT_DISP MTK_M4U_DOM_ID(5, 1)
+#define M4U_PORT_L5_VDEC_LAT0_AVC_MV_EXT_DISP MTK_M4U_DOM_ID(5, 2)
+#define M4U_PORT_L5_VDEC_LAT0_PRED_RD_EXT_DISP MTK_M4U_DOM_ID(5, 3)
+#define M4U_PORT_L5_VDEC_LAT0_TILE_EXT_DISP MTK_M4U_DOM_ID(5, 4)
+#define M4U_PORT_L5_VDEC_LAT0_WDMA_EXT_DISP MTK_M4U_DOM_ID(5, 5)
+#define M4U_PORT_L5_VDEC_LAT0_RG_CTRL_DMA_EXT_DISP MTK_M4U_DOM_ID(5, 6)
+#define M4U_PORT_L5_VDEC_UFO_ENC_EXT_DISP MTK_M4U_DOM_ID(5, 7)
+
+/* larb6: null */
+
+/* larb7 */
+#define M4U_PORT_L7_VENC_RCPU_DISP MTK_M4U_DOM_ID(7, 0)
+#define M4U_PORT_L7_VENC_REC_DISP MTK_M4U_DOM_ID(7, 1)
+#define M4U_PORT_L7_VENC_BSDMA_DISP MTK_M4U_DOM_ID(7, 2)
+#define M4U_PORT_L7_VENC_SV_COMV_DISP MTK_M4U_DOM_ID(7, 3)
+#define M4U_PORT_L7_VENC_RD_COMV_DISP MTK_M4U_DOM_ID(7, 4)
+#define M4U_PORT_L7_VENC_NBM_RDMA_DISP MTK_M4U_DOM_ID(7, 5)
+#define M4U_PORT_L7_VENC_NBM_RDMA_LITE_DISP MTK_M4U_DOM_ID(7, 6)
+#define M4U_PORT_L7_JPGENC_Y_RDMA_DISP MTK_M4U_DOM_ID(7, 7)
+#define M4U_PORT_L7_JPGENC_C_RDMA_DISP MTK_M4U_DOM_ID(7, 8)
+#define M4U_PORT_L7_JPGENC_Q_TABLE_DISP MTK_M4U_DOM_ID(7, 9)
+#define M4U_PORT_L7_JPGENC_BSDMA_DISP MTK_M4U_DOM_ID(7, 10)
+#define M4U_PORT_L7_JPGENC_WDMA0_DISP MTK_M4U_DOM_ID(7, 11)
+#define M4U_PORT_L7_JPGENC_BSDMA0_DISP MTK_M4U_DOM_ID(7, 12)
+#define M4U_PORT_L7_VENC_NBM_WDMA_DISP MTK_M4U_DOM_ID(7, 13)
+#define M4U_PORT_L7_VENC_NBM_WDMA_LITE_DISP MTK_M4U_DOM_ID(7, 14)
+#define M4U_PORT_L7_VENC_CUR_LUMA_DISP MTK_M4U_DOM_ID(7, 15)
+#define M4U_PORT_L7_VENC_CUR_CHROMA_DISP MTK_M4U_DOM_ID(7, 16)
+#define M4U_PORT_L7_VENC_REF_LUMA_DISP MTK_M4U_DOM_ID(7, 17)
+#define M4U_PORT_L7_VENC_REF_CHROMA_DISP MTK_M4U_DOM_ID(7, 18)
+#define M4U_PORT_L7_VENC_SUB_R_LUMA_DISP MTK_M4U_DOM_ID(7, 19)
+#define M4U_PORT_L7_VENC_SUB_W_LUMA_DISP MTK_M4U_DOM_ID(7, 20)
+#define M4U_PORT_L7_VENC_FCS_NBM_RDMA_DISP MTK_M4U_DOM_ID(7, 21)
+#define M4U_PORT_L7_VENC_FCS_NBM_WDMA_DISP MTK_M4U_DOM_ID(7, 22)
+#define M4U_PORT_L7_JPGENC_WDMA1_DISP MTK_M4U_DOM_ID(7, 23)
+#define M4U_PORT_L7_JPGENC_BSDMA1_DISP MTK_M4U_DOM_ID(7, 24)
+#define M4U_PORT_L7_JPGENC_HUFF_OFFSET1_DISP MTK_M4U_DOM_ID(7, 25)
+#define M4U_PORT_L7_JPGENC_HUFF_OFFSET0_DISP MTK_M4U_DOM_ID(7, 26)
+
+/* larb8: null */
+
+/* larb9 */
+#define M4U_PORT_L9_IMG_IMGI_D1_MDP MTK_M4U_DOM_ID(9, 0)
+#define M4U_PORT_L9_IMG_IMGBI_D1_MDP MTK_M4U_DOM_ID(9, 1)
+#define M4U_PORT_L9_IMG_DMGI_D1_MDP MTK_M4U_DOM_ID(9, 2)
+#define M4U_PORT_L9_IMG_DEPI_D1_MDP MTK_M4U_DOM_ID(9, 3)
+#define M4U_PORT_L9_IMG_ICE_D1_MDP MTK_M4U_DOM_ID(9, 4)
+#define M4U_PORT_L9_IMG_SMTI_D1_MDP MTK_M4U_DOM_ID(9, 5)
+#define M4U_PORT_L9_IMG_SMTO_D2_MDP MTK_M4U_DOM_ID(9, 6)
+#define M4U_PORT_L9_IMG_SMTO_D1_MDP MTK_M4U_DOM_ID(9, 7)
+#define M4U_PORT_L9_IMG_CRZO_D1_MDP MTK_M4U_DOM_ID(9, 8)
+#define M4U_PORT_L9_IMG_IMG3O_D1_MDP MTK_M4U_DOM_ID(9, 9)
+#define M4U_PORT_L9_IMG_VIPI_D1_MDP MTK_M4U_DOM_ID(9, 10)
+#define M4U_PORT_L9_IMG_SMTI_D5_MDP MTK_M4U_DOM_ID(9, 11)
+#define M4U_PORT_L9_IMG_TIMGO_D1_MDP MTK_M4U_DOM_ID(9, 12)
+#define M4U_PORT_L9_IMG_UFBC_W0_MDP MTK_M4U_DOM_ID(9, 13)
+#define M4U_PORT_L9_IMG_UFBC_R0_MDP MTK_M4U_DOM_ID(9, 14)
+#define M4U_PORT_L9_IMG_WPE_RDMA1_MDP MTK_M4U_DOM_ID(9, 15)
+#define M4U_PORT_L9_IMG_WPE_RDMA0_MDP MTK_M4U_DOM_ID(9, 16)
+#define M4U_PORT_L9_IMG_WPE_WDMA_MDP MTK_M4U_DOM_ID(9, 17)
+#define M4U_PORT_L9_IMG_MFB_RDMA0_MDP MTK_M4U_DOM_ID(9, 18)
+#define M4U_PORT_L9_IMG_MFB_RDMA1_MDP MTK_M4U_DOM_ID(9, 19)
+#define M4U_PORT_L9_IMG_MFB_RDMA2_MDP MTK_M4U_DOM_ID(9, 20)
+#define M4U_PORT_L9_IMG_MFB_RDMA3_MDP MTK_M4U_DOM_ID(9, 21)
+#define M4U_PORT_L9_IMG_MFB_RDMA4_MDP MTK_M4U_DOM_ID(9, 22)
+#define M4U_PORT_L9_IMG_MFB_RDMA5_MDP MTK_M4U_DOM_ID(9, 23)
+#define M4U_PORT_L9_IMG_MFB_WDMA0_MDP MTK_M4U_DOM_ID(9, 24)
+#define M4U_PORT_L9_IMG_MFB_WDMA1_MDP MTK_M4U_DOM_ID(9, 25)
+#define M4U_PORT_L9_IMG_RESERVE6_MDP MTK_M4U_DOM_ID(9, 26)
+#define M4U_PORT_L9_IMG_RESERVE7_MDP MTK_M4U_DOM_ID(9, 27)
+#define M4U_PORT_L9_IMG_RESERVE8_MDP MTK_M4U_DOM_ID(9, 28)
+
+/* larb10: null */
+
+/* larb11 */
+#define M4U_PORT_L11_IMG_IMGI_D1_DISP MTK_M4U_DOM_ID(11, 0)
+#define M4U_PORT_L11_IMG_IMGBI_D1_DISP MTK_M4U_DOM_ID(11, 1)
+#define M4U_PORT_L11_IMG_DMGI_D1_DISP MTK_M4U_DOM_ID(11, 2)
+#define M4U_PORT_L11_IMG_DEPI_D1_DISP MTK_M4U_DOM_ID(11, 3)
+#define M4U_PORT_L11_IMG_ICE_D1_DISP MTK_M4U_DOM_ID(11, 4)
+#define M4U_PORT_L11_IMG_SMTI_D1_DISP MTK_M4U_DOM_ID(11, 5)
+#define M4U_PORT_L11_IMG_SMTO_D2_DISP MTK_M4U_DOM_ID(11, 6)
+#define M4U_PORT_L11_IMG_SMTO_D1_DISP MTK_M4U_DOM_ID(11, 7)
+#define M4U_PORT_L11_IMG_CRZO_D1_DISP MTK_M4U_DOM_ID(11, 8)
+#define M4U_PORT_L11_IMG_IMG3O_D1_DISP MTK_M4U_DOM_ID(11, 9)
+#define M4U_PORT_L11_IMG_VIPI_D1_DISP MTK_M4U_DOM_ID(11, 10)
+#define M4U_PORT_L11_IMG_SMTI_D5_DISP MTK_M4U_DOM_ID(11, 11)
+#define M4U_PORT_L11_IMG_TIMGO_D1_DISP MTK_M4U_DOM_ID(11, 12)
+#define M4U_PORT_L11_IMG_UFBC_W0_DISP MTK_M4U_DOM_ID(11, 13)
+#define M4U_PORT_L11_IMG_UFBC_R0_DISP MTK_M4U_DOM_ID(11, 14)
+#define M4U_PORT_L11_IMG_WPE_RDMA1_DISP MTK_M4U_DOM_ID(11, 15)
+#define M4U_PORT_L11_IMG_WPE_RDMA0_DISP MTK_M4U_DOM_ID(11, 16)
+#define M4U_PORT_L11_IMG_WPE_WDMA_DISP MTK_M4U_DOM_ID(11, 17)
+#define M4U_PORT_L11_IMG_MFB_RDMA0_DISP MTK_M4U_DOM_ID(11, 18)
+#define M4U_PORT_L11_IMG_MFB_RDMA1_DISP MTK_M4U_DOM_ID(11, 19)
+#define M4U_PORT_L11_IMG_MFB_RDMA2_DISP MTK_M4U_DOM_ID(11, 20)
+#define M4U_PORT_L11_IMG_MFB_RDMA3_DISP MTK_M4U_DOM_ID(11, 21)
+#define M4U_PORT_L11_IMG_MFB_RDMA4_DISP MTK_M4U_DOM_ID(11, 22)
+#define M4U_PORT_L11_IMG_MFB_RDMA5_DISP MTK_M4U_DOM_ID(11, 23)
+#define M4U_PORT_L11_IMG_MFB_WDMA0_DISP MTK_M4U_DOM_ID(11, 24)
+#define M4U_PORT_L11_IMG_MFB_WDMA1_DISP MTK_M4U_DOM_ID(11, 25)
+#define M4U_PORT_L11_IMG_RESERVE6_DISP MTK_M4U_DOM_ID(11, 26)
+#define M4U_PORT_L11_IMG_RESERVE7_DISP MTK_M4U_DOM_ID(11, 27)
+#define M4U_PORT_L11_IMG_RESERVE8_DISP MTK_M4U_DOM_ID(11, 28)
+
+/* larb12: null */
+
+/* larb13 */
+#define M4U_PORT_L13_CAM_MRAWI_MDP MTK_M4U_DOM_ID(13, 0)
+#define M4U_PORT_L13_CAM_MRAWO0_MDP MTK_M4U_DOM_ID(13, 1)
+#define M4U_PORT_L13_CAM_MRAWO1_MDP MTK_M4U_DOM_ID(13, 2)
+#define M4U_PORT_L13_CAM_CAMSV1_MDP MTK_M4U_DOM_ID(13, 3)
+#define M4U_PORT_L13_CAM_CAMSV2_MDP MTK_M4U_DOM_ID(13, 4)
+#define M4U_PORT_L13_CAM_CAMSV3_MDP MTK_M4U_DOM_ID(13, 5)
+#define M4U_PORT_L13_CAM_CAMSV4_MDP MTK_M4U_DOM_ID(13, 6)
+#define M4U_PORT_L13_CAM_CAMSV5_MDP MTK_M4U_DOM_ID(13, 7)
+#define M4U_PORT_L13_CAM_CAMSV6_MDP MTK_M4U_DOM_ID(13, 8)
+#define M4U_PORT_L13_CAM_CCUI_MDP MTK_M4U_DOM_ID(13, 9)
+#define M4U_PORT_L13_CAM_CCUO_MDP MTK_M4U_DOM_ID(13, 10)
+#define M4U_PORT_L13_CAM_FAKE_MDP MTK_M4U_DOM_ID(13, 11)
+
+/* larb14 */
+#define M4U_PORT_L14_CAM_MRAWI_DISP MTK_M4U_DOM_ID(14, 0)
+#define M4U_PORT_L14_CAM_MRAWO0_DISP MTK_M4U_DOM_ID(14, 1)
+#define M4U_PORT_L14_CAM_MRAWO1_DISP MTK_M4U_DOM_ID(14, 2)
+#define M4U_PORT_L14_CAM_CAMSV0_DISP MTK_M4U_DOM_ID(14, 3)
+#define M4U_PORT_L14_CAM_CCUI_DISP MTK_M4U_DOM_ID(14, 4)
+#define M4U_PORT_L14_CAM_CCUO_DISP MTK_M4U_DOM_ID(14, 5)
+
+/* larb15: null */
+
+/* larb16 */
+#define M4U_PORT_L16_CAM_IMGO_R1_A_MDP MTK_M4U_DOM_ID(16, 0)
+#define M4U_PORT_L16_CAM_RRZO_R1_A_MDP MTK_M4U_DOM_ID(16, 1)
+#define M4U_PORT_L16_CAM_CQI_R1_A_MDP MTK_M4U_DOM_ID(16, 2)
+#define M4U_PORT_L16_CAM_BPCI_R1_A_MDP MTK_M4U_DOM_ID(16, 3)
+#define M4U_PORT_L16_CAM_YUVO_R1_A_MDP MTK_M4U_DOM_ID(16, 4)
+#define M4U_PORT_L16_CAM_UFDI_R2_A_MDP MTK_M4U_DOM_ID(16, 5)
+#define M4U_PORT_L16_CAM_RAWI_R2_A_MDP MTK_M4U_DOM_ID(16, 6)
+#define M4U_PORT_L16_CAM_RAWI_R3_A_MDP MTK_M4U_DOM_ID(16, 7)
+#define M4U_PORT_L16_CAM_AAO_R1_A_MDP MTK_M4U_DOM_ID(16, 8)
+#define M4U_PORT_L16_CAM_AFO_R1_A_MDP MTK_M4U_DOM_ID(16, 9)
+#define M4U_PORT_L16_CAM_FLKO_R1_A_MDP MTK_M4U_DOM_ID(16, 10)
+#define M4U_PORT_L16_CAM_LCESO_R1_A_MDP MTK_M4U_DOM_ID(16, 11)
+#define M4U_PORT_L16_CAM_CRZO_R1_A_MDP MTK_M4U_DOM_ID(16, 12)
+#define M4U_PORT_L16_CAM_LTMSO_R1_A_MDP MTK_M4U_DOM_ID(16, 13)
+#define M4U_PORT_L16_CAM_RSSO_R1_A_MDP MTK_M4U_DOM_ID(16, 14)
+#define M4U_PORT_L16_CAM_AAHO_R1_A_MDP MTK_M4U_DOM_ID(16, 15)
+#define M4U_PORT_L16_CAM_LSCI_R1_A_MDP MTK_M4U_DOM_ID(16, 16)
+
+/* larb17 */
+#define M4U_PORT_L17_CAM_IMGO_R1_B_DISP MTK_M4U_DOM_ID(17, 0)
+#define M4U_PORT_L17_CAM_RRZO_R1_B_DISP MTK_M4U_DOM_ID(17, 1)
+#define M4U_PORT_L17_CAM_CQI_R1_B_DISP MTK_M4U_DOM_ID(17, 2)
+#define M4U_PORT_L17_CAM_BPCI_R1_B_DISP MTK_M4U_DOM_ID(17, 3)
+#define M4U_PORT_L17_CAM_YUVO_R1_B_DISP MTK_M4U_DOM_ID(17, 4)
+#define M4U_PORT_L17_CAM_UFDI_R2_B_DISP MTK_M4U_DOM_ID(17, 5)
+#define M4U_PORT_L17_CAM_RAWI_R2_B_DISP MTK_M4U_DOM_ID(17, 6)
+#define M4U_PORT_L17_CAM_RAWI_R3_B_DISP MTK_M4U_DOM_ID(17, 7)
+#define M4U_PORT_L17_CAM_AAO_R1_B_DISP MTK_M4U_DOM_ID(17, 8)
+#define M4U_PORT_L17_CAM_AFO_R1_B_DISP MTK_M4U_DOM_ID(17, 9)
+#define M4U_PORT_L17_CAM_FLKO_R1_B_DISP MTK_M4U_DOM_ID(17, 10)
+#define M4U_PORT_L17_CAM_LCESO_R1_B_DISP MTK_M4U_DOM_ID(17, 11)
+#define M4U_PORT_L17_CAM_CRZO_R1_B_DISP MTK_M4U_DOM_ID(17, 12)
+#define M4U_PORT_L17_CAM_LTMSO_R1_B_DISP MTK_M4U_DOM_ID(17, 13)
+#define M4U_PORT_L17_CAM_RSSO_R1_B_DISP MTK_M4U_DOM_ID(17, 14)
+#define M4U_PORT_L17_CAM_AAHO_R1_B_DISP MTK_M4U_DOM_ID(17, 15)
+#define M4U_PORT_L17_CAM_LSCI_R1_B_DISP MTK_M4U_DOM_ID(17, 16)
+
+/* larb18 */
+#define M4U_PORT_L18_CAM_IMGO_R1_C_MDP MTK_M4U_DOM_ID(18, 0)
+#define M4U_PORT_L18_CAM_RRZO_R1_C_MDP MTK_M4U_DOM_ID(18, 1)
+#define M4U_PORT_L18_CAM_CQI_R1_C_MDP MTK_M4U_DOM_ID(18, 2)
+#define M4U_PORT_L18_CAM_BPCI_R1_C_MDP MTK_M4U_DOM_ID(18, 3)
+#define M4U_PORT_L18_CAM_YUVO_R1_C_MDP MTK_M4U_DOM_ID(18, 4)
+#define M4U_PORT_L18_CAM_UFDI_R2_C_MDP MTK_M4U_DOM_ID(18, 5)
+#define M4U_PORT_L18_CAM_RAWI_R2_C_MDP MTK_M4U_DOM_ID(18, 6)
+#define M4U_PORT_L18_CAM_RAWI_R3_C_MDP MTK_M4U_DOM_ID(18, 7)
+#define M4U_PORT_L18_CAM_AAO_R1_C_MDP MTK_M4U_DOM_ID(18, 8)
+#define M4U_PORT_L18_CAM_AFO_R1_C_MDP MTK_M4U_DOM_ID(18, 9)
+#define M4U_PORT_L18_CAM_FLKO_R1_C_MDP MTK_M4U_DOM_ID(18, 10)
+#define M4U_PORT_L18_CAM_LCESO_R1_C_MDP MTK_M4U_DOM_ID(18, 11)
+#define M4U_PORT_L18_CAM_CRZO_R1_C_MDP MTK_M4U_DOM_ID(18, 12)
+#define M4U_PORT_L18_CAM_LTMSO_R1_C_MDP MTK_M4U_DOM_ID(18, 13)
+#define M4U_PORT_L18_CAM_RSSO_R1_C_MDP MTK_M4U_DOM_ID(18, 14)
+#define M4U_PORT_L18_CAM_AAHO_R1_C_MDP MTK_M4U_DOM_ID(18, 15)
+#define M4U_PORT_L18_CAM_LSCI_R1_C_MDP MTK_M4U_DOM_ID(18, 16)
+
+/* larb19 */
+#define M4U_PORT_L19_IPE_DVS_RDMA_DISP MTK_M4U_DOM_ID(19, 0)
+#define M4U_PORT_L19_IPE_DVS_WDMA_DISP MTK_M4U_DOM_ID(19, 1)
+#define M4U_PORT_L19_IPE_DVP_RDMA_DISP MTK_M4U_DOM_ID(19, 2)
+#define M4U_PORT_L19_IPE_DVP_WDMA_DISP MTK_M4U_DOM_ID(19, 3)
+
+/* larb20 */
+#define M4U_PORT_L20_IPE_FDVT_RDA_DISP MTK_M4U_DOM_ID(20, 0)
+#define M4U_PORT_L20_IPE_FDVT_RDB_DISP MTK_M4U_DOM_ID(20, 1)
+#define M4U_PORT_L20_IPE_FDVT_WRA_DISP MTK_M4U_DOM_ID(20, 2)
+#define M4U_PORT_L20_IPE_FDVT_WRB_DISP MTK_M4U_DOM_ID(20, 3)
+#define M4U_PORT_L20_IPE_RSC_RDMA0_DISP MTK_M4U_DOM_ID(20, 4)
+#define M4U_PORT_L20_IPE_RSC_WDMA_DISP MTK_M4U_DOM_ID(20, 5)
+
+#endif
diff --git a/include/dt-bindings/memory/mediatek,mt8188-memory-port.h b/include/dt-bindings/memory/mediatek,mt8188-memory-port.h
new file mode 100644
index 000000000000..337ab11262af
--- /dev/null
+++ b/include/dt-bindings/memory/mediatek,mt8188-memory-port.h
@@ -0,0 +1,489 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Chengci Xu <chengci.xu@mediatek.com>
+ */
+#ifndef _DT_BINDINGS_MEMORY_MEDIATEK_MT8188_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MEDIATEK_MT8188_LARB_PORT_H_
+
+#include <dt-bindings/memory/mtk-memory-port.h>
+
+/*
+ * MM IOMMU larbs:
+ * From below, for example larb11 has larb11a/larb11b/larb11c,
+ * the index of larb is not in order. So we reindexed these larbs from a
+ * software view.
+ */
+#define SMI_L0_ID 0
+#define SMI_L1_ID 1
+#define SMI_L2_ID 2
+#define SMI_L3_ID 3
+#define SMI_L4_ID 4
+#define SMI_L5_ID 5
+#define SMI_L6_ID 6
+#define SMI_L7_ID 7
+#define SMI_L9_ID 8
+#define SMI_L10_ID 9
+#define SMI_L11A_ID 10
+#define SMI_L11B_ID 11
+#define SMI_L11C_ID 12
+#define SMI_L12_ID 13
+#define SMI_L13_ID 14
+#define SMI_L14_ID 15
+#define SMI_L15_ID 16
+#define SMI_L16A_ID 17
+#define SMI_L16B_ID 18
+#define SMI_L17A_ID 19
+#define SMI_L17B_ID 20
+#define SMI_L19_ID 21
+#define SMI_L21_ID 22
+#define SMI_L23_ID 23
+#define SMI_L27_ID 24
+#define SMI_L28_ID 25
+
+/*
+ * MM IOMMU supports 16GB dma address. We separate it to four ranges:
+ * 0 ~ 4G; 4G ~ 8G; 8G ~ 12G; 12G ~ 16G, we could adjust these masters
+ * locate in anyone region. BUT:
+ * a) Make sure all the ports inside a larb are in one range.
+ * b) The iova of any master can NOT cross the 4G/8G/12G boundary.
+ *
+ * This is the suggested mapping in this SoC:
+ *
+ * modules dma-address-region larbs-ports
+ * disp 0 ~ 4G larb0/1/2/3
+ * vcodec 4G ~ 8G larb19(21)[1]/21(22)/23
+ * cam/mdp 8G ~ 12G the other larbs.
+ * N/A 12G ~ 16G
+ * CCU0 0x24000_0000 ~ 0x243ff_ffff larb27(24): port 0/1
+ * CCU1 0x24400_0000 ~ 0x247ff_ffff larb27(24): port 2/3
+ *
+ * This SoC have two MM IOMMU HWs, this is the connected information:
+ * iommu-vdo: larb0/2/5/9/10/11A/11C/13/16B/17B/19/21
+ * iommu-vpp: larb1/3/4/6/7/11B/12/14/15/16A/17A/23/27
+ *
+ * [1]: This is larb19, but the index is 21 from the SW view.
+ */
+
+/* MM IOMMU ports */
+/* LARB 0 -- VDO-0 */
+#define M4U_PORT_L0_DISP_RDMA1 MTK_M4U_ID(SMI_L0_ID, 0)
+#define M4U_PORT_L0_DISP_WDMA0 MTK_M4U_ID(SMI_L0_ID, 1)
+#define M4U_PORT_L0_DISP_OVL0_RDMA0 MTK_M4U_ID(SMI_L0_ID, 2)
+#define M4U_PORT_L0_DISP_OVL0_RDMA1 MTK_M4U_ID(SMI_L0_ID, 3)
+#define M4U_PORT_L0_DISP_OVL0_HDR MTK_M4U_ID(SMI_L0_ID, 4)
+#define M4U_PORT_L0_DISP_POSTMASK0 MTK_M4U_ID(SMI_L0_ID, 5)
+#define M4U_PORT_L0_DISP_FAKE_ENG0 MTK_M4U_ID(SMI_L0_ID, 6)
+
+/* LARB 1 -- VD0-0 */
+#define M4U_PORT_L1_DISP_RDMA0 MTK_M4U_ID(SMI_L1_ID, 0)
+#define M4U_PORT_L1_DISP_WDMA1 MTK_M4U_ID(SMI_L1_ID, 1)
+#define M4U_PORT_L1_DISP_OVL1_RDMA0 MTK_M4U_ID(SMI_L1_ID, 2)
+#define M4U_PORT_L1_DISP_OVL1_RDMA1 MTK_M4U_ID(SMI_L1_ID, 3)
+#define M4U_PORT_L1_DISP_OVL1_HDR MTK_M4U_ID(SMI_L1_ID, 4)
+#define M4U_PORT_L1_DISP_WROT0 MTK_M4U_ID(SMI_L1_ID, 5)
+#define M4U_PORT_L1_DISP_FAKE_ENG1 MTK_M4U_ID(SMI_L1_ID, 6)
+
+/* LARB 2 -- VDO-1 */
+#define M4U_PORT_L2_MDP_RDMA0 MTK_M4U_ID(SMI_L2_ID, 0)
+#define M4U_PORT_L2_MDP_RDMA2 MTK_M4U_ID(SMI_L2_ID, 1)
+#define M4U_PORT_L2_MDP_RDMA4 MTK_M4U_ID(SMI_L2_ID, 2)
+#define M4U_PORT_L2_MDP_RDMA6 MTK_M4U_ID(SMI_L2_ID, 3)
+#define M4U_PORT_L2_DISP_FAKE1 MTK_M4U_ID(SMI_L2_ID, 4)
+
+/* LARB 3 -- VDO-1 */
+#define M4U_PORT_L3_MDP_RDMA1 MTK_M4U_ID(SMI_L3_ID, 0)
+#define M4U_PORT_L3_MDP_RDMA3 MTK_M4U_ID(SMI_L3_ID, 1)
+#define M4U_PORT_L3_MDP_RDMA5 MTK_M4U_ID(SMI_L3_ID, 2)
+#define M4U_PORT_L3_MDP_RDMA7 MTK_M4U_ID(SMI_L3_ID, 3)
+#define M4U_PORT_L3_HDR_DS_SMI MTK_M4U_ID(SMI_L3_ID, 4)
+#define M4U_PORT_L3_HDR_ADL_SMI MTK_M4U_ID(SMI_L3_ID, 5)
+#define M4U_PORT_L3_DISP_FAKE1 MTK_M4U_ID(SMI_L3_ID, 6)
+
+/* LARB 4 -- VPP-0 */
+#define M4U_PORT_L4_MDP_RDMA MTK_M4U_ID(SMI_L4_ID, 0)
+#define M4U_PORT_L4_MDP_FG MTK_M4U_ID(SMI_L4_ID, 1)
+#define M4U_PORT_L4_MDP_OVL MTK_M4U_ID(SMI_L4_ID, 2)
+#define M4U_PORT_L4_MDP_WROT MTK_M4U_ID(SMI_L4_ID, 3)
+#define M4U_PORT_L4_FAKE_ENG MTK_M4U_ID(SMI_L4_ID, 4)
+#define M4U_PORT_L4_DISP_RDMA MTK_M4U_ID(SMI_L4_ID, 5)
+#define M4U_PORT_L4_DISP_WDMA MTK_M4U_ID(SMI_L4_ID, 6)
+
+/* LARB 5 -- VPP-1 */
+#define M4U_PORT_L5_SVPP1_MDP_RDMA MTK_M4U_ID(SMI_L5_ID, 0)
+#define M4U_PORT_L5_SVPP1_MDP_FG MTK_M4U_ID(SMI_L5_ID, 1)
+#define M4U_PORT_L5_SVPP1_MDP_OVL MTK_M4U_ID(SMI_L5_ID, 2)
+#define M4U_PORT_L5_SVPP1_MDP_WROT MTK_M4U_ID(SMI_L5_ID, 3)
+#define M4U_PORT_L5_SVPP2_MDP_RDMA MTK_M4U_ID(SMI_L5_ID, 4)
+#define M4U_PORT_L5_SVPP2_MDP_FG MTK_M4U_ID(SMI_L5_ID, 5)
+#define M4U_PORT_L5_SVPP2_MDP_WROT MTK_M4U_ID(SMI_L5_ID, 6)
+#define M4U_PORT_L5_LARB5_FAKE_ENG MTK_M4U_ID(SMI_L5_ID, 7)
+
+/* LARB 6 -- VPP-1 */
+#define M4U_PORT_L6_SVPP3_MDP_RDMA MTK_M4U_ID(SMI_L6_ID, 0)
+#define M4U_PORT_L6_SVPP3_MDP_FG MTK_M4U_ID(SMI_L6_ID, 1)
+#define M4U_PORT_L6_SVPP3_MDP_WROT MTK_M4U_ID(SMI_L6_ID, 2)
+#define M4U_PORT_L6_LARB6_FAKE_ENG MTK_M4U_ID(SMI_L6_ID, 3)
+
+/* LARB 7 -- WPE */
+#define M4U_PORT_L7_WPE_RDMA_0 MTK_M4U_ID(SMI_L7_ID, 0)
+#define M4U_PORT_L7_WPE_RDMA_1 MTK_M4U_ID(SMI_L7_ID, 1)
+#define M4U_PORT_L7_WPE_WDMA_0 MTK_M4U_ID(SMI_L7_ID, 2)
+
+/* LARB 9 -- IMG-M */
+#define M4U_PORT_L9_IMGI_T1_A MTK_M4U_ID(SMI_L9_ID, 0)
+#define M4U_PORT_L9_UFDI_T1_A MTK_M4U_ID(SMI_L9_ID, 1)
+#define M4U_PORT_L9_IMGBI_T1_A MTK_M4U_ID(SMI_L9_ID, 2)
+#define M4U_PORT_L9_IMGCI_T1_A MTK_M4U_ID(SMI_L9_ID, 3)
+#define M4U_PORT_L9_SMTI_T1_A MTK_M4U_ID(SMI_L9_ID, 4)
+#define M4U_PORT_L9_SMTI_T4_A MTK_M4U_ID(SMI_L9_ID, 5)
+#define M4U_PORT_L9_TNCSTI_T1_A MTK_M4U_ID(SMI_L9_ID, 6)
+#define M4U_PORT_L9_TNCSTI_T4_A MTK_M4U_ID(SMI_L9_ID, 7)
+#define M4U_PORT_L9_YUVO_T1_A MTK_M4U_ID(SMI_L9_ID, 8)
+#define M4U_PORT_L9_YUVBO_T1_A MTK_M4U_ID(SMI_L9_ID, 9)
+#define M4U_PORT_L9_YUVCO_T1_A MTK_M4U_ID(SMI_L9_ID, 10)
+#define M4U_PORT_L9_TIMGO_T1_A MTK_M4U_ID(SMI_L9_ID, 11)
+#define M4U_PORT_L9_YUVO_T2_A MTK_M4U_ID(SMI_L9_ID, 12)
+#define M4U_PORT_L9_YUVO_T5_A MTK_M4U_ID(SMI_L9_ID, 13)
+#define M4U_PORT_L9_IMGI_T1_B MTK_M4U_ID(SMI_L9_ID, 14)
+#define M4U_PORT_L9_IMGBI_T1_B MTK_M4U_ID(SMI_L9_ID, 15)
+#define M4U_PORT_L9_IMGCI_T1_B MTK_M4U_ID(SMI_L9_ID, 16)
+#define M4U_PORT_L9_SMTI_T4_B MTK_M4U_ID(SMI_L9_ID, 17)
+#define M4U_PORT_L9_TNCSO_T1_A MTK_M4U_ID(SMI_L9_ID, 18)
+#define M4U_PORT_L9_SMTO_T1_A MTK_M4U_ID(SMI_L9_ID, 19)
+#define M4U_PORT_L9_SMTO_T4_A MTK_M4U_ID(SMI_L9_ID, 20)
+#define M4U_PORT_L9_TNCSTO_T1_A MTK_M4U_ID(SMI_L9_ID, 21)
+#define M4U_PORT_L9_YUVO_T2_B MTK_M4U_ID(SMI_L9_ID, 22)
+#define M4U_PORT_L9_YUVO_T5_B MTK_M4U_ID(SMI_L9_ID, 23)
+#define M4U_PORT_L9_SMTO_T4_B MTK_M4U_ID(SMI_L9_ID, 24)
+
+/* LARB 10 -- IMG-D */
+#define M4U_PORT_L10_IMGI_D1 MTK_M4U_ID(SMI_L10_ID, 0)
+#define M4U_PORT_L10_IMGBI_D1 MTK_M4U_ID(SMI_L10_ID, 1)
+#define M4U_PORT_L10_IMGCI_D1 MTK_M4U_ID(SMI_L10_ID, 2)
+#define M4U_PORT_L10_IMGDI_D1 MTK_M4U_ID(SMI_L10_ID, 3)
+#define M4U_PORT_L10_DEPI_D1 MTK_M4U_ID(SMI_L10_ID, 4)
+#define M4U_PORT_L10_DMGI_D1 MTK_M4U_ID(SMI_L10_ID, 5)
+#define M4U_PORT_L10_SMTI_D1 MTK_M4U_ID(SMI_L10_ID, 6)
+#define M4U_PORT_L10_RECI_D1 MTK_M4U_ID(SMI_L10_ID, 7)
+#define M4U_PORT_L10_RECI_D1_N MTK_M4U_ID(SMI_L10_ID, 8)
+#define M4U_PORT_L10_TNRWI_D1 MTK_M4U_ID(SMI_L10_ID, 9)
+#define M4U_PORT_L10_TNRCI_D1 MTK_M4U_ID(SMI_L10_ID, 10)
+#define M4U_PORT_L10_TNRCI_D1_N MTK_M4U_ID(SMI_L10_ID, 11)
+#define M4U_PORT_L10_IMG4O_D1 MTK_M4U_ID(SMI_L10_ID, 12)
+#define M4U_PORT_L10_IMG4BO_D1 MTK_M4U_ID(SMI_L10_ID, 13)
+#define M4U_PORT_L10_SMTI_D8 MTK_M4U_ID(SMI_L10_ID, 14)
+#define M4U_PORT_L10_SMTO_D1 MTK_M4U_ID(SMI_L10_ID, 15)
+#define M4U_PORT_L10_TNRMO_D1 MTK_M4U_ID(SMI_L10_ID, 16)
+#define M4U_PORT_L10_TNRMO_D1_N MTK_M4U_ID(SMI_L10_ID, 17)
+#define M4U_PORT_L10_SMTO_D8 MTK_M4U_ID(SMI_L10_ID, 18)
+#define M4U_PORT_L10_DBGO_D1 MTK_M4U_ID(SMI_L10_ID, 19)
+
+/* LARB 11A -- IMG-D */
+#define M4U_PORT_L11A_WPE_RDMA_0 MTK_M4U_ID(SMI_L11A_ID, 0)
+#define M4U_PORT_L11A_WPE_RDMA_1 MTK_M4U_ID(SMI_L11A_ID, 1)
+#define M4U_PORT_L11A_WPE_RDMA_4P_0 MTK_M4U_ID(SMI_L11A_ID, 2)
+#define M4U_PORT_L11A_WPE_RDMA_4P_1 MTK_M4U_ID(SMI_L11A_ID, 3)
+#define M4U_PORT_L11A_WPE_CQ0 MTK_M4U_ID(SMI_L11A_ID, 4)
+#define M4U_PORT_L11A_WPE_CQ1 MTK_M4U_ID(SMI_L11A_ID, 5)
+#define M4U_PORT_L11A_PIMGI_P1 MTK_M4U_ID(SMI_L11A_ID, 6)
+#define M4U_PORT_L11A_PIMGBI_P1 MTK_M4U_ID(SMI_L11A_ID, 7)
+#define M4U_PORT_L11A_PIMGCI_P1 MTK_M4U_ID(SMI_L11A_ID, 8)
+#define M4U_PORT_L11A_IMGI_T1_C MTK_M4U_ID(SMI_L11A_ID, 9)
+#define M4U_PORT_L11A_IMGBI_T1_C MTK_M4U_ID(SMI_L11A_ID, 10)
+#define M4U_PORT_L11A_IMGCI_T1_C MTK_M4U_ID(SMI_L11A_ID, 11)
+#define M4U_PORT_L11A_SMTI_T1_C MTK_M4U_ID(SMI_L11A_ID, 12)
+#define M4U_PORT_L11A_SMTI_T4_C MTK_M4U_ID(SMI_L11A_ID, 13)
+#define M4U_PORT_L11A_SMTI_T6_C MTK_M4U_ID(SMI_L11A_ID, 14)
+#define M4U_PORT_L11A_YUVO_T1_C MTK_M4U_ID(SMI_L11A_ID, 15)
+#define M4U_PORT_L11A_YUVBO_T1_C MTK_M4U_ID(SMI_L11A_ID, 16)
+#define M4U_PORT_L11A_YUVCO_T1_C MTK_M4U_ID(SMI_L11A_ID, 17)
+#define M4U_PORT_L11A_WPE_WDMA_0 MTK_M4U_ID(SMI_L11A_ID, 18)
+#define M4U_PORT_L11A_WPE_WDMA_4P_0 MTK_M4U_ID(SMI_L11A_ID, 19)
+#define M4U_PORT_L11A_WROT_P1 MTK_M4U_ID(SMI_L11A_ID, 20)
+#define M4U_PORT_L11A_TCCSO_P1 MTK_M4U_ID(SMI_L11A_ID, 21)
+#define M4U_PORT_L11A_TCCSI_P1 MTK_M4U_ID(SMI_L11A_ID, 22)
+#define M4U_PORT_L11A_TIMGO_T1_C MTK_M4U_ID(SMI_L11A_ID, 23)
+#define M4U_PORT_L11A_YUVO_T2_C MTK_M4U_ID(SMI_L11A_ID, 24)
+#define M4U_PORT_L11A_YUVO_T5_C MTK_M4U_ID(SMI_L11A_ID, 25)
+#define M4U_PORT_L11A_SMTO_T1_C MTK_M4U_ID(SMI_L11A_ID, 26)
+#define M4U_PORT_L11A_SMTO_T4_C MTK_M4U_ID(SMI_L11A_ID, 27)
+#define M4U_PORT_L11A_SMTO_T6_C MTK_M4U_ID(SMI_L11A_ID, 28)
+#define M4U_PORT_L11A_DBGO_T1_C MTK_M4U_ID(SMI_L11A_ID, 29)
+
+/* LARB 11B -- IMG-D */
+#define M4U_PORT_L11B_WPE_RDMA_0 MTK_M4U_ID(SMI_L11B_ID, 0)
+#define M4U_PORT_L11B_WPE_RDMA_1 MTK_M4U_ID(SMI_L11B_ID, 1)
+#define M4U_PORT_L11B_WPE_RDMA_4P_0 MTK_M4U_ID(SMI_L11B_ID, 2)
+#define M4U_PORT_L11B_WPE_RDMA_4P_1 MTK_M4U_ID(SMI_L11B_ID, 3)
+#define M4U_PORT_L11B_WPE_CQ0 MTK_M4U_ID(SMI_L11B_ID, 4)
+#define M4U_PORT_L11B_WPE_CQ1 MTK_M4U_ID(SMI_L11B_ID, 5)
+#define M4U_PORT_L11B_PIMGI_P1 MTK_M4U_ID(SMI_L11B_ID, 6)
+#define M4U_PORT_L11B_PIMGBI_P1 MTK_M4U_ID(SMI_L11B_ID, 7)
+#define M4U_PORT_L11B_PIMGCI_P1 MTK_M4U_ID(SMI_L11B_ID, 8)
+#define M4U_PORT_L11B_IMGI_T1_C MTK_M4U_ID(SMI_L11B_ID, 9)
+#define M4U_PORT_L11B_IMGBI_T1_C MTK_M4U_ID(SMI_L11B_ID, 10)
+#define M4U_PORT_L11B_IMGCI_T1_C MTK_M4U_ID(SMI_L11B_ID, 11)
+#define M4U_PORT_L11B_SMTI_T1_C MTK_M4U_ID(SMI_L11B_ID, 12)
+#define M4U_PORT_L11B_SMTI_T4_C MTK_M4U_ID(SMI_L11B_ID, 13)
+#define M4U_PORT_L11B_SMTI_T6_C MTK_M4U_ID(SMI_L11B_ID, 14)
+#define M4U_PORT_L11B_YUVO_T1_C MTK_M4U_ID(SMI_L11B_ID, 15)
+#define M4U_PORT_L11B_YUVBO_T1_C MTK_M4U_ID(SMI_L11B_ID, 16)
+#define M4U_PORT_L11B_YUVCO_T1_C MTK_M4U_ID(SMI_L11B_ID, 17)
+#define M4U_PORT_L11B_WPE_WDMA_0 MTK_M4U_ID(SMI_L11B_ID, 18)
+#define M4U_PORT_L11B_WPE_WDMA_4P_0 MTK_M4U_ID(SMI_L11B_ID, 19)
+#define M4U_PORT_L11B_WROT_P1 MTK_M4U_ID(SMI_L11B_ID, 20)
+#define M4U_PORT_L11B_TCCSO_P1 MTK_M4U_ID(SMI_L11B_ID, 21)
+#define M4U_PORT_L11B_TCCSI_P1 MTK_M4U_ID(SMI_L11B_ID, 22)
+#define M4U_PORT_L11B_TIMGO_T1_C MTK_M4U_ID(SMI_L11B_ID, 23)
+#define M4U_PORT_L11B_YUVO_T2_C MTK_M4U_ID(SMI_L11B_ID, 24)
+#define M4U_PORT_L11B_YUVO_T5_C MTK_M4U_ID(SMI_L11B_ID, 25)
+#define M4U_PORT_L11B_SMTO_T1_C MTK_M4U_ID(SMI_L11B_ID, 26)
+#define M4U_PORT_L11B_SMTO_T4_C MTK_M4U_ID(SMI_L11B_ID, 27)
+#define M4U_PORT_L11B_SMTO_T6_C MTK_M4U_ID(SMI_L11B_ID, 28)
+#define M4U_PORT_L11B_DBGO_T1_C MTK_M4U_ID(SMI_L11B_ID, 29)
+
+/* LARB 11C -- IMG-D */
+#define M4U_PORT_L11C_WPE_RDMA_0 MTK_M4U_ID(SMI_L11C_ID, 0)
+#define M4U_PORT_L11C_WPE_RDMA_1 MTK_M4U_ID(SMI_L11C_ID, 1)
+#define M4U_PORT_L11C_WPE_RDMA_4P_0 MTK_M4U_ID(SMI_L11C_ID, 2)
+#define M4U_PORT_L11C_WPE_RDMA_4P_1 MTK_M4U_ID(SMI_L11C_ID, 3)
+#define M4U_PORT_L11C_WPE_CQ0 MTK_M4U_ID(SMI_L11C_ID, 4)
+#define M4U_PORT_L11C_WPE_CQ1 MTK_M4U_ID(SMI_L11C_ID, 5)
+#define M4U_PORT_L11C_PIMGI_P1 MTK_M4U_ID(SMI_L11C_ID, 6)
+#define M4U_PORT_L11C_PIMGBI_P1 MTK_M4U_ID(SMI_L11C_ID, 7)
+#define M4U_PORT_L11C_PIMGCI_P1 MTK_M4U_ID(SMI_L11C_ID, 8)
+#define M4U_PORT_L11C_IMGI_T1_C MTK_M4U_ID(SMI_L11C_ID, 9)
+#define M4U_PORT_L11C_IMGBI_T1_C MTK_M4U_ID(SMI_L11C_ID, 10)
+#define M4U_PORT_L11C_IMGCI_T1_C MTK_M4U_ID(SMI_L11C_ID, 11)
+#define M4U_PORT_L11C_SMTI_T1_C MTK_M4U_ID(SMI_L11C_ID, 12)
+#define M4U_PORT_L11C_SMTI_T4_C MTK_M4U_ID(SMI_L11C_ID, 13)
+#define M4U_PORT_L11C_SMTI_T6_C MTK_M4U_ID(SMI_L11C_ID, 14)
+#define M4U_PORT_L11C_YUVO_T1_C MTK_M4U_ID(SMI_L11C_ID, 15)
+#define M4U_PORT_L11C_YUVBO_T1_C MTK_M4U_ID(SMI_L11C_ID, 16)
+#define M4U_PORT_L11C_YUVCO_T1_C MTK_M4U_ID(SMI_L11C_ID, 17)
+#define M4U_PORT_L11C_WPE_WDMA_0 MTK_M4U_ID(SMI_L11C_ID, 18)
+#define M4U_PORT_L11C_WPE_WDMA_4P_0 MTK_M4U_ID(SMI_L11C_ID, 19)
+#define M4U_PORT_L11C_WROT_P1 MTK_M4U_ID(SMI_L11C_ID, 20)
+#define M4U_PORT_L11C_TCCSO_P1 MTK_M4U_ID(SMI_L11C_ID, 21)
+#define M4U_PORT_L11C_TCCSI_P1 MTK_M4U_ID(SMI_L11C_ID, 22)
+#define M4U_PORT_L11C_TIMGO_T1_C MTK_M4U_ID(SMI_L11C_ID, 23)
+#define M4U_PORT_L11C_YUVO_T2_C MTK_M4U_ID(SMI_L11C_ID, 24)
+#define M4U_PORT_L11C_YUVO_T5_C MTK_M4U_ID(SMI_L11C_ID, 25)
+#define M4U_PORT_L11C_SMTO_T1_C MTK_M4U_ID(SMI_L11C_ID, 26)
+#define M4U_PORT_L11C_SMTO_T4_C MTK_M4U_ID(SMI_L11C_ID, 27)
+#define M4U_PORT_L11C_SMTO_T6_C MTK_M4U_ID(SMI_L11C_ID, 28)
+#define M4U_PORT_L11C_DBGO_T1_C MTK_M4U_ID(SMI_L11C_ID, 29)
+
+/* LARB 12 -- IPE */
+#define M4U_PORT_L12_FDVT_RDA_0 MTK_M4U_ID(SMI_L12_ID, 0)
+#define M4U_PORT_L12_FDVT_RDB_0 MTK_M4U_ID(SMI_L12_ID, 1)
+#define M4U_PORT_L12_FDVT_WRA_0 MTK_M4U_ID(SMI_L12_ID, 2)
+#define M4U_PORT_L12_FDVT_WRB_0 MTK_M4U_ID(SMI_L12_ID, 3)
+#define M4U_PORT_L12_ME_RDMA MTK_M4U_ID(SMI_L12_ID, 4)
+#define M4U_PORT_L12_ME_WDMA MTK_M4U_ID(SMI_L12_ID, 5)
+#define M4U_PORT_L12_DVS_RDMA MTK_M4U_ID(SMI_L12_ID, 6)
+#define M4U_PORT_L12_DVS_WDMA MTK_M4U_ID(SMI_L12_ID, 7)
+#define M4U_PORT_L12_DVP_RDMA MTK_M4U_ID(SMI_L12_ID, 8)
+#define M4U_PORT_L12_DVP_WDMA MTK_M4U_ID(SMI_L12_ID, 9)
+#define M4U_PORT_L12_FDVT_2ND_RDA_0 MTK_M4U_ID(SMI_L12_ID, 10)
+#define M4U_PORT_L12_FDVT_2ND_RDB_0 MTK_M4U_ID(SMI_L12_ID, 11)
+#define M4U_PORT_L12_FDVT_2ND_WRA_0 MTK_M4U_ID(SMI_L12_ID, 12)
+#define M4U_PORT_L12_FDVT_2ND_WRB_0 MTK_M4U_ID(SMI_L12_ID, 13)
+#define M4U_PORT_L12_DHZEI_E1 MTK_M4U_ID(SMI_L12_ID, 14)
+#define M4U_PORT_L12_DHZEO_E1 MTK_M4U_ID(SMI_L12_ID, 15)
+
+/* LARB 13 -- CAM-1 */
+#define M4U_PORT_L13_CAMSV_CQI_E1 MTK_M4U_ID(SMI_L13_ID, 0)
+#define M4U_PORT_L13_CAMSV_CQI_E2 MTK_M4U_ID(SMI_L13_ID, 1)
+#define M4U_PORT_L13_GCAMSV_A_IMGO_1 MTK_M4U_ID(SMI_L13_ID, 2)
+#define M4U_PORT_L13_GCAMSV_C_IMGO_1 MTK_M4U_ID(SMI_L13_ID, 3)
+#define M4U_PORT_L13_GCAMSV_A_IMGO_2 MTK_M4U_ID(SMI_L13_ID, 4)
+#define M4U_PORT_L13_GCAMSV_C_IMGO_2 MTK_M4U_ID(SMI_L13_ID, 5)
+#define M4U_PORT_L13_PDAI_A_0 MTK_M4U_ID(SMI_L13_ID, 6)
+#define M4U_PORT_L13_PDAI_A_1 MTK_M4U_ID(SMI_L13_ID, 7)
+#define M4U_PORT_L13_CAMSV_CQI_B_E1 MTK_M4U_ID(SMI_L13_ID, 8)
+#define M4U_PORT_L13_CAMSV_CQI_B_E2 MTK_M4U_ID(SMI_L13_ID, 9)
+#define M4U_PORT_L13_CAMSV_CQI_C_E1 MTK_M4U_ID(SMI_L13_ID, 10)
+#define M4U_PORT_L13_CAMSV_CQI_C_E2 MTK_M4U_ID(SMI_L13_ID, 11)
+#define M4U_PORT_L13_GCAMSV_E_IMGO_1 MTK_M4U_ID(SMI_L13_ID, 12)
+#define M4U_PORT_L13_GCAMSV_E_IMGO_2 MTK_M4U_ID(SMI_L13_ID, 13)
+#define M4U_PORT_L13_GCAMSV_A_UFEO_1 MTK_M4U_ID(SMI_L13_ID, 14)
+#define M4U_PORT_L13_GCAMSV_C_UFEO_1 MTK_M4U_ID(SMI_L13_ID, 15)
+#define M4U_PORT_L13_GCAMSV_A_UFEO_2 MTK_M4U_ID(SMI_L13_ID, 16)
+#define M4U_PORT_L13_GCAMSV_C_UFEO_2 MTK_M4U_ID(SMI_L13_ID, 17)
+#define M4U_PORT_L13_GCAMSV_E_UFEO_1 MTK_M4U_ID(SMI_L13_ID, 18)
+#define M4U_PORT_L13_GCAMSV_E_UFEO_2 MTK_M4U_ID(SMI_L13_ID, 19)
+#define M4U_PORT_L13_GCAMSV_G_IMGO_1 MTK_M4U_ID(SMI_L13_ID, 20)
+#define M4U_PORT_L13_GCAMSV_G_IMGO_2 MTK_M4U_ID(SMI_L13_ID, 21)
+#define M4U_PORT_L13_PDAO_A MTK_M4U_ID(SMI_L13_ID, 22)
+#define M4U_PORT_L13_PDAO_C MTK_M4U_ID(SMI_L13_ID, 23)
+
+/* LARB 14 -- CAM-1 */
+#define M4U_PORT_L14_GCAMSV_B_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 0)
+#define M4U_PORT_L14_GCAMSV_B_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 1)
+#define M4U_PORT_L14_SCAMSV_A_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 2)
+#define M4U_PORT_L14_SCAMSV_A_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 3)
+#define M4U_PORT_L14_SCAMSV_B_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 4)
+#define M4U_PORT_L14_SCAMSV_B_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 5)
+#define M4U_PORT_L14_PDAI_B_0 MTK_M4U_ID(SMI_L14_ID, 6)
+#define M4U_PORT_L14_PDAI_B_1 MTK_M4U_ID(SMI_L14_ID, 7)
+#define M4U_PORT_L14_GCAMSV_D_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 8)
+#define M4U_PORT_L14_GCAMSV_D_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 9)
+#define M4U_PORT_L14_GCAMSV_F_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 10)
+#define M4U_PORT_L14_GCAMSV_F_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 11)
+#define M4U_PORT_L14_GCAMSV_H_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 12)
+#define M4U_PORT_L14_GCAMSV_H_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 13)
+#define M4U_PORT_L14_GCAMSV_B_UFEO_1 MTK_M4U_ID(SMI_L14_ID, 14)
+#define M4U_PORT_L14_GCAMSV_B_UFEO_2 MTK_M4U_ID(SMI_L14_ID, 15)
+#define M4U_PORT_L14_GCAMSV_D_UFEO_1 MTK_M4U_ID(SMI_L14_ID, 16)
+#define M4U_PORT_L14_GCAMSV_D_UFEO_2 MTK_M4U_ID(SMI_L14_ID, 17)
+#define M4U_PORT_L14_PDAO_B MTK_M4U_ID(SMI_L14_ID, 18)
+#define M4U_PORT_L14_IPUI MTK_M4U_ID(SMI_L14_ID, 19)
+#define M4U_PORT_L14_IPUO MTK_M4U_ID(SMI_L14_ID, 20)
+#define M4U_PORT_L14_IPU3O MTK_M4U_ID(SMI_L14_ID, 21)
+#define M4U_PORT_L14_FAKE MTK_M4U_ID(SMI_L14_ID, 22)
+
+/* LARB 15 -- IMG-D */
+#define M4U_PORT_L15_VIPI_D1 MTK_M4U_ID(SMI_L15_ID, 0)
+#define M4U_PORT_L15_VIPBI_D1 MTK_M4U_ID(SMI_L15_ID, 1)
+#define M4U_PORT_L15_SMTI_D6 MTK_M4U_ID(SMI_L15_ID, 2)
+#define M4U_PORT_L15_TNCSTI_D1 MTK_M4U_ID(SMI_L15_ID, 3)
+#define M4U_PORT_L15_TNCSTI_D4 MTK_M4U_ID(SMI_L15_ID, 4)
+#define M4U_PORT_L15_SMTI_D4 MTK_M4U_ID(SMI_L15_ID, 5)
+#define M4U_PORT_L15_IMG3O_D1 MTK_M4U_ID(SMI_L15_ID, 6)
+#define M4U_PORT_L15_IMG3BO_D1 MTK_M4U_ID(SMI_L15_ID, 7)
+#define M4U_PORT_L15_IMG3CO_D1 MTK_M4U_ID(SMI_L15_ID, 8)
+#define M4U_PORT_L15_IMG2O_D1 MTK_M4U_ID(SMI_L15_ID, 9)
+#define M4U_PORT_L15_SMTI_D9 MTK_M4U_ID(SMI_L15_ID, 10)
+#define M4U_PORT_L15_SMTO_D4 MTK_M4U_ID(SMI_L15_ID, 11)
+#define M4U_PORT_L15_FEO_D1 MTK_M4U_ID(SMI_L15_ID, 12)
+#define M4U_PORT_L15_TNCSO_D1 MTK_M4U_ID(SMI_L15_ID, 13)
+#define M4U_PORT_L15_TNCSTO_D1 MTK_M4U_ID(SMI_L15_ID, 14)
+#define M4U_PORT_L15_SMTO_D6 MTK_M4U_ID(SMI_L15_ID, 15)
+#define M4U_PORT_L15_SMTO_D9 MTK_M4U_ID(SMI_L15_ID, 16)
+#define M4U_PORT_L15_TNCO_D1 MTK_M4U_ID(SMI_L15_ID, 17)
+#define M4U_PORT_L15_TNCO_D1_N MTK_M4U_ID(SMI_L15_ID, 18)
+
+/* LARB 16A -- CAM */
+#define M4U_PORT_L16A_IMGO_R1 MTK_M4U_ID(SMI_L16A_ID, 0)
+#define M4U_PORT_L16A_CQI_R1 MTK_M4U_ID(SMI_L16A_ID, 1)
+#define M4U_PORT_L16A_CQI_R2 MTK_M4U_ID(SMI_L16A_ID, 2)
+#define M4U_PORT_L16A_BPCI_R1 MTK_M4U_ID(SMI_L16A_ID, 3)
+#define M4U_PORT_L16A_LSCI_R1 MTK_M4U_ID(SMI_L16A_ID, 4)
+#define M4U_PORT_L16A_RAWI_R2 MTK_M4U_ID(SMI_L16A_ID, 5)
+#define M4U_PORT_L16A_RAWI_R3 MTK_M4U_ID(SMI_L16A_ID, 6)
+#define M4U_PORT_L16A_UFDI_R2 MTK_M4U_ID(SMI_L16A_ID, 7)
+#define M4U_PORT_L16A_UFDI_R3 MTK_M4U_ID(SMI_L16A_ID, 8)
+#define M4U_PORT_L16A_RAWI_R4 MTK_M4U_ID(SMI_L16A_ID, 9)
+#define M4U_PORT_L16A_RAWI_R5 MTK_M4U_ID(SMI_L16A_ID, 10)
+#define M4U_PORT_L16A_AAI_R1 MTK_M4U_ID(SMI_L16A_ID, 11)
+#define M4U_PORT_L16A_UFDI_R5 MTK_M4U_ID(SMI_L16A_ID, 12)
+#define M4U_PORT_L16A_FHO_R1 MTK_M4U_ID(SMI_L16A_ID, 13)
+#define M4U_PORT_L16A_AAO_R1 MTK_M4U_ID(SMI_L16A_ID, 14)
+#define M4U_PORT_L16A_TSFSO_R1 MTK_M4U_ID(SMI_L16A_ID, 15)
+#define M4U_PORT_L16A_FLKO_R1 MTK_M4U_ID(SMI_L16A_ID, 16)
+
+/* LARB 16B -- CAM */
+#define M4U_PORT_L16B_IMGO_R1 MTK_M4U_ID(SMI_L16B_ID, 0)
+#define M4U_PORT_L16B_CQI_R1 MTK_M4U_ID(SMI_L16B_ID, 1)
+#define M4U_PORT_L16B_CQI_R2 MTK_M4U_ID(SMI_L16B_ID, 2)
+#define M4U_PORT_L16B_BPCI_R1 MTK_M4U_ID(SMI_L16B_ID, 3)
+#define M4U_PORT_L16B_LSCI_R1 MTK_M4U_ID(SMI_L16B_ID, 4)
+#define M4U_PORT_L16B_RAWI_R2 MTK_M4U_ID(SMI_L16B_ID, 5)
+#define M4U_PORT_L16B_RAWI_R3 MTK_M4U_ID(SMI_L16B_ID, 6)
+#define M4U_PORT_L16B_UFDI_R2 MTK_M4U_ID(SMI_L16B_ID, 7)
+#define M4U_PORT_L16B_UFDI_R3 MTK_M4U_ID(SMI_L16B_ID, 8)
+#define M4U_PORT_L16B_RAWI_R4 MTK_M4U_ID(SMI_L16B_ID, 9)
+#define M4U_PORT_L16B_RAWI_R5 MTK_M4U_ID(SMI_L16B_ID, 10)
+#define M4U_PORT_L16B_AAI_R1 MTK_M4U_ID(SMI_L16B_ID, 11)
+#define M4U_PORT_L16B_UFDI_R5 MTK_M4U_ID(SMI_L16B_ID, 12)
+#define M4U_PORT_L16B_FHO_R1 MTK_M4U_ID(SMI_L16B_ID, 13)
+#define M4U_PORT_L16B_AAO_R1 MTK_M4U_ID(SMI_L16B_ID, 14)
+#define M4U_PORT_L16B_TSFSO_R1 MTK_M4U_ID(SMI_L16B_ID, 15)
+#define M4U_PORT_L16B_FLKO_R1 MTK_M4U_ID(SMI_L16B_ID, 16)
+
+/* LARB 17A -- CAM */
+#define M4U_PORT_L17A_YUVO_R1 MTK_M4U_ID(SMI_L17A_ID, 0)
+#define M4U_PORT_L17A_YUVO_R3 MTK_M4U_ID(SMI_L17A_ID, 1)
+#define M4U_PORT_L17A_YUVCO_R1 MTK_M4U_ID(SMI_L17A_ID, 2)
+#define M4U_PORT_L17A_YUVO_R2 MTK_M4U_ID(SMI_L17A_ID, 3)
+#define M4U_PORT_L17A_RZH1N2TO_R1 MTK_M4U_ID(SMI_L17A_ID, 4)
+#define M4U_PORT_L17A_DRZS4NO_R1 MTK_M4U_ID(SMI_L17A_ID, 5)
+#define M4U_PORT_L17A_TNCSO_R1 MTK_M4U_ID(SMI_L17A_ID, 6)
+
+/* LARB 17B -- CAM */
+#define M4U_PORT_L17B_YUVO_R1 MTK_M4U_ID(SMI_L17B_ID, 0)
+#define M4U_PORT_L17B_YUVO_R3 MTK_M4U_ID(SMI_L17B_ID, 1)
+#define M4U_PORT_L17B_YUVCO_R1 MTK_M4U_ID(SMI_L17B_ID, 2)
+#define M4U_PORT_L17B_YUVO_R2 MTK_M4U_ID(SMI_L17B_ID, 3)
+#define M4U_PORT_L17B_RZH1N2TO_R1 MTK_M4U_ID(SMI_L17B_ID, 4)
+#define M4U_PORT_L17B_DRZS4NO_R1 MTK_M4U_ID(SMI_L17B_ID, 5)
+#define M4U_PORT_L17B_TNCSO_R1 MTK_M4U_ID(SMI_L17B_ID, 6)
+
+/* LARB 19 -- VENC */
+#define M4U_PORT_L19_VENC_RCPU MTK_M4U_ID(SMI_L19_ID, 0)
+#define M4U_PORT_L19_VENC_REC MTK_M4U_ID(SMI_L19_ID, 1)
+#define M4U_PORT_L19_VENC_BSDMA MTK_M4U_ID(SMI_L19_ID, 2)
+#define M4U_PORT_L19_VENC_SV_COMV MTK_M4U_ID(SMI_L19_ID, 3)
+#define M4U_PORT_L19_VENC_RD_COMV MTK_M4U_ID(SMI_L19_ID, 4)
+#define M4U_PORT_L19_VENC_NBM_RDMA MTK_M4U_ID(SMI_L19_ID, 5)
+#define M4U_PORT_L19_VENC_NBM_RDMA_LITE MTK_M4U_ID(SMI_L19_ID, 6)
+#define M4U_PORT_L19_JPGENC_Y_RDMA MTK_M4U_ID(SMI_L19_ID, 7)
+#define M4U_PORT_L19_JPGENC_C_RDMA MTK_M4U_ID(SMI_L19_ID, 8)
+#define M4U_PORT_L19_JPGENC_Q_TABLE MTK_M4U_ID(SMI_L19_ID, 9)
+#define M4U_PORT_L19_VENC_SUB_W_LUMA MTK_M4U_ID(SMI_L19_ID, 10)
+#define M4U_PORT_L19_VENC_FCS_NBM_RDMA MTK_M4U_ID(SMI_L19_ID, 11)
+#define M4U_PORT_L19_JPGENC_BSDMA MTK_M4U_ID(SMI_L19_ID, 12)
+#define M4U_PORT_L19_JPGDEC_WDMA_0 MTK_M4U_ID(SMI_L19_ID, 13)
+#define M4U_PORT_L19_JPGDEC_BSDMA_0 MTK_M4U_ID(SMI_L19_ID, 14)
+#define M4U_PORT_L19_VENC_NBM_WDMA MTK_M4U_ID(SMI_L19_ID, 15)
+#define M4U_PORT_L19_VENC_NBM_WDMA_LITE MTK_M4U_ID(SMI_L19_ID, 16)
+#define M4U_PORT_L19_VENC_FCS_NBM_WDMA MTK_M4U_ID(SMI_L19_ID, 17)
+#define M4U_PORT_L19_JPGDEC_WDMA_1 MTK_M4U_ID(SMI_L19_ID, 18)
+#define M4U_PORT_L19_JPGDEC_BSDMA_1 MTK_M4U_ID(SMI_L19_ID, 19)
+#define M4U_PORT_L19_JPGDEC_HUFF_OFFSET_1 MTK_M4U_ID(SMI_L19_ID, 20)
+#define M4U_PORT_L19_JPGDEC_HUFF_OFFSET_0 MTK_M4U_ID(SMI_L19_ID, 21)
+#define M4U_PORT_L19_VENC_CUR_LUMA MTK_M4U_ID(SMI_L19_ID, 22)
+#define M4U_PORT_L19_VENC_CUR_CHROMA MTK_M4U_ID(SMI_L19_ID, 23)
+#define M4U_PORT_L19_VENC_REF_LUMA MTK_M4U_ID(SMI_L19_ID, 24)
+#define M4U_PORT_L19_VENC_REF_CHROMA MTK_M4U_ID(SMI_L19_ID, 25)
+#define M4U_PORT_L19_VENC_SUB_R_LUMA MTK_M4U_ID(SMI_L19_ID, 26)
+
+/* LARB 21 -- VDEC-CORE0 */
+#define M4U_PORT_L21_HW_VDEC_MC_EXT MTK_M4U_ID(SMI_L21_ID, 0)
+#define M4U_PORT_L21_HW_VDEC_UFO_EXT MTK_M4U_ID(SMI_L21_ID, 1)
+#define M4U_PORT_L21_HW_VDEC_PP_EXT MTK_M4U_ID(SMI_L21_ID, 2)
+#define M4U_PORT_L21_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(SMI_L21_ID, 3)
+#define M4U_PORT_L21_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(SMI_L21_ID, 4)
+#define M4U_PORT_L21_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(SMI_L21_ID, 5)
+#define M4U_PORT_L21_HW_VDEC_TILE_EXT MTK_M4U_ID(SMI_L21_ID, 6)
+#define M4U_PORT_L21_HW_VDEC_VLD_EXT MTK_M4U_ID(SMI_L21_ID, 7)
+#define M4U_PORT_L21_HW_VDEC_VLD2_EXT MTK_M4U_ID(SMI_L21_ID, 8)
+#define M4U_PORT_L21_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(SMI_L21_ID, 9)
+#define M4U_PORT_L21_HW_VDEC_UFO_EXT_C MTK_M4U_ID(SMI_L21_ID, 10)
+
+/* LARB 23 -- VDEC-SOC */
+#define M4U_PORT_L23_HW_VDEC_LAT0_VLD_EXT MTK_M4U_ID(SMI_L23_ID, 0)
+#define M4U_PORT_L23_HW_VDEC_LAT0_VLD2_EXT MTK_M4U_ID(SMI_L23_ID, 1)
+#define M4U_PORT_L23_HW_VDEC_LAT0_AVC_MV_EXT MTK_M4U_ID(SMI_L23_ID, 2)
+#define M4U_PORT_L23_HW_VDEC_LAT0_PRED_RD_EXT MTK_M4U_ID(SMI_L23_ID, 3)
+#define M4U_PORT_L23_HW_VDEC_LAT0_TILE_EXT MTK_M4U_ID(SMI_L23_ID, 4)
+#define M4U_PORT_L23_HW_VDEC_LAT0_WDMA_EXT MTK_M4U_ID(SMI_L23_ID, 5)
+#define M4U_PORT_L23_HW_VDEC_UFO_ENC_EXT MTK_M4U_ID(SMI_L23_ID, 6)
+#define M4U_PORT_L23_HW_VDEC_UFO_ENC_EXT_C MTK_M4U_ID(SMI_L23_ID, 7)
+#define M4U_PORT_L23_HW_VDEC_MC_EXT_C MTK_M4U_ID(SMI_L23_ID, 8)
+
+/* LARB 27 -- CCU */
+#define M4U_PORT_L27_CCUI MTK_M4U_ID(SMI_L27_ID, 0)
+#define M4U_PORT_L27_CCUO MTK_M4U_ID(SMI_L27_ID, 1)
+#define M4U_PORT_L27_CCUI2 MTK_M4U_ID(SMI_L27_ID, 2)
+#define M4U_PORT_L27_CCUO2 MTK_M4U_ID(SMI_L27_ID, 3)
+
+/* LARB 28 -- AXI-CCU */
+#define M4U_PORT_L28_CCU_AXI_0 MTK_M4U_ID(SMI_L28_ID, 0)
+
+/* infra/peri */
+#define IFR_IOMMU_PORT_PCIE_0 MTK_IFAIOMMU_PERI_ID(0)
+
+#endif
diff --git a/include/dt-bindings/memory/mediatek,mt8189-memory-port.h b/include/dt-bindings/memory/mediatek,mt8189-memory-port.h
new file mode 100644
index 000000000000..849fead3d0f7
--- /dev/null
+++ b/include/dt-bindings/memory/mediatek,mt8189-memory-port.h
@@ -0,0 +1,283 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Author: Zhengnan chen <zhengnan.chen@mediatek.com>
+ */
+#ifndef _DT_BINDINGS_MEMORY_MEDIATEK_MT8189_MEMORY_PORT_H_
+#define _DT_BINDINGS_MEMORY_MEDIATEK_MT8189_MEMORY_PORT_H_
+
+#include <dt-bindings/memory/mtk-memory-port.h>
+
+#define SMI_L0_ID (0)
+#define SMI_L1_ID (1)
+#define SMI_L2_ID (2)
+#define SMI_L4_ID (3)
+#define SMI_L7_ID (4)
+#define SMI_L9_ID (5)
+#define SMI_L11_ID (6)
+#define SMI_L13_ID (7)
+#define SMI_L14_ID (8)
+#define SMI_L16_ID (9)
+#define SMI_L17_ID (10)
+#define SMI_L19_ID (11)
+#define SMI_L20_ID (12)
+
+/*
+ * MM IOMMU supports 16GB dma address. We separate it to four ranges:
+ * 0 ~ 4G; 4G ~ 8G; 8G ~ 12G; 12G ~ 16G, we could adjust these masters
+ * locate in anyone region. BUT:
+ * a) Make sure all the ports inside a larb are in one range.
+ * b) The iova of any master can NOT cross the 4G/8G/12G boundary.
+ *
+ * This is the suggested mapping in this SoC:
+ *
+ * modules dma-address-region larbs-ports
+ * disp/mdp 0 ~ 4G larb0/1/2
+ * vcodec 4G ~ 8G larb4/7
+ * imgsys/cam/ipesys 8G ~ 12G the other larbs.
+ * N/A 12G ~ 16G
+ */
+
+/* Larb0 -- disp */
+#define M4U_L0_P0_DISP_OVL0_4L_HDR MTK_M4U_ID(SMI_L0_ID, 0)
+#define M4U_L0_P1_DISP_OVL0_4L_RDMA0 MTK_M4U_ID(SMI_L0_ID, 1)
+#define M4U_L0_P2_DISP_OVL1_4L_RDMA1 MTK_M4U_ID(SMI_L0_ID, 2)
+#define M4U_L0_P3_DISP_OVL0_4L_RDMA2 MTK_M4U_ID(SMI_L0_ID, 3)
+#define M4U_L0_P4_DISP_OVL1_4L_RDMA3 MTK_M4U_ID(SMI_L0_ID, 4)
+#define M4U_L0_P5_DISP_RDMA0 MTK_M4U_ID(SMI_L0_ID, 5)
+#define M4U_L0_P6_DISP_WDMA0 MTK_M4U_ID(SMI_L0_ID, 6)
+#define M4U_L0_P7_DISP_FAKE_ENG0 MTK_M4U_ID(SMI_L0_ID, 7)
+
+/* Larb1 -- disp */
+#define M4U_L1_P0_DISP_OVL1_4L_HDR MTK_M4U_ID(SMI_L1_ID, 0)
+#define M4U_L1_P1_DISP_OVL1_4L_RDMA0 MTK_M4U_ID(SMI_L1_ID, 1)
+#define M4U_L1_P2_DISP_OVL0_4L_RDMA1 MTK_M4U_ID(SMI_L1_ID, 2)
+#define M4U_L1_P3_DISP_OVL1_4L_RDMA2 MTK_M4U_ID(SMI_L1_ID, 3)
+#define M4U_L1_P4_DISP_OVL0_4L_RDMA3 MTK_M4U_ID(SMI_L1_ID, 4)
+#define M4U_L1_P5_DISP_RDMA1 MTK_M4U_ID(SMI_L1_ID, 5)
+#define M4U_L1_P6_DISP_WDMA1 MTK_M4U_ID(SMI_L1_ID, 6)
+#define M4U_L1_P7_DISP_FAKE_ENG1 MTK_M4U_ID(SMI_L1_ID, 7)
+
+/* Larb2 -- mmlsys(mdp) */
+#define M4U_L2_P0_MDP_RDMA0 MTK_M4U_ID(SMI_L2_ID, 0)
+#define M4U_L2_P1_MDP_RDMA1 MTK_M4U_ID(SMI_L2_ID, 1)
+#define M4U_L2_P2_MDP_WROT0 MTK_M4U_ID(SMI_L2_ID, 2)
+#define M4U_L2_P3_MDP_WROT1 MTK_M4U_ID(SMI_L2_ID, 3)
+#define M4U_L2_P4_MDP_DUMMY0 MTK_M4U_ID(SMI_L2_ID, 4)
+#define M4U_L2_P5_MDP_DUMMY1 MTK_M4U_ID(SMI_L2_ID, 5)
+#define M4U_L2_P6_MDP_RDMA2 MTK_M4U_ID(SMI_L2_ID, 6)
+#define M4U_L2_P7_MDP_RDMA3 MTK_M4U_ID(SMI_L2_ID, 7)
+#define M4U_L2_P8_MDP_WROT2 MTK_M4U_ID(SMI_L2_ID, 8)
+#define M4U_L2_P9_MDP_WROT3 MTK_M4U_ID(SMI_L2_ID, 9)
+#define M4U_L2_P10_DISP_FAKE0 MTK_M4U_ID(SMI_L2_ID, 10)
+
+/* Larb3: null */
+
+/* Larb4 -- vdec */
+#define M4U_L4_P0_HW_VDEC_MC_EXT MTK_M4U_ID(SMI_L4_ID, 0)
+#define M4U_L4_P1_HW_VDEC_UFO_EXT MTK_M4U_ID(SMI_L4_ID, 1)
+#define M4U_L4_P2_HW_VDEC_PP_EXT MTK_M4U_ID(SMI_L4_ID, 2)
+#define M4U_L4_P3_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(SMI_L4_ID, 3)
+#define M4U_L4_P4_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(SMI_L4_ID, 4)
+#define M4U_L4_P5_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(SMI_L4_ID, 5)
+#define M4U_L4_P6_HW_VDEC_TILE_EXT MTK_M4U_ID(SMI_L4_ID, 6)
+#define M4U_L4_P7_HW_VDEC_VLD_EXT MTK_M4U_ID(SMI_L4_ID, 7)
+#define M4U_L4_P8_HW_VDEC_VLD2_EXT MTK_M4U_ID(SMI_L4_ID, 8)
+#define M4U_L4_P9_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(SMI_L4_ID, 9)
+#define M4U_L4_P10_HW_VDEC_RG_CTRL_DMA_EXT MTK_M4U_ID(SMI_L4_ID, 10)
+#define M4U_L4_P11_HW_VDEC_UFO_ENC_EXT MTK_M4U_ID(SMI_L4_ID, 11)
+
+/* Larb5: null */
+
+/* Larb6: null */
+
+/* Larb7 -- venc */
+#define M4U_L7_P0_VENC_RCPU MTK_M4U_ID(SMI_L7_ID, 0)
+#define M4U_L7_P1_VENC_REC MTK_M4U_ID(SMI_L7_ID, 1)
+#define M4U_L7_P2_VENC_BSDMA MTK_M4U_ID(SMI_L7_ID, 2)
+#define M4U_L7_P3_VENC_SV_COMV MTK_M4U_ID(SMI_L7_ID, 3)
+#define M4U_L7_P4_VENC_RD_COMV MTK_M4U_ID(SMI_L7_ID, 4)
+#define M4U_L7_P5_JPGENC_Y_RDMA MTK_M4U_ID(SMI_L7_ID, 5)
+#define M4U_L7_P6_JPGENC_C_RDMA MTK_M4U_ID(SMI_L7_ID, 6)
+#define M4U_L7_P7_JPGENC_Q_RDMA MTK_M4U_ID(SMI_L7_ID, 7)
+#define M4U_L7_P8_VENC_SUB_W_LUMA MTK_M4U_ID(SMI_L7_ID, 8)
+#define M4U_L7_P9_JPGENC_BSDMA MTK_M4U_ID(SMI_L7_ID, 9)
+#define M4U_L7_P10_VENC_CUR_LUMA MTK_M4U_ID(SMI_L7_ID, 10)
+#define M4U_L7_P11_VENC_CUR_CHROMA MTK_M4U_ID(SMI_L7_ID, 11)
+#define M4U_L7_P12_VENC_REF_LUMA MTK_M4U_ID(SMI_L7_ID, 12)
+#define M4U_L7_P13_VENC_REF_CHROMA MTK_M4U_ID(SMI_L7_ID, 13)
+#define M4U_L7_P14_VENC_SUB_R_LUMA MTK_M4U_ID(SMI_L7_ID, 14)
+#define M4U_L7_P15_JPGDEC_WDMA MTK_M4U_ID(SMI_L7_ID, 15)
+#define M4U_L7_P16_JPGDEC_BSDMA MTK_M4U_ID(SMI_L7_ID, 16)
+#define M4U_L7_P17_JPGDEC_HUFF_OFFSET MTK_M4U_ID(SMI_L7_ID, 17)
+
+/* Larb8: null */
+
+/* Larb9 --imgsys */
+#define M4U_L9_P0_IMGI_D1 MTK_M4U_ID(SMI_L9_ID, 0)
+#define M4U_L9_P1_IMGBI_D1 MTK_M4U_ID(SMI_L9_ID, 1)
+#define M4U_L9_P2_DMGI_D1 MTK_M4U_ID(SMI_L9_ID, 2)
+#define M4U_L9_P3_DEPI_D1 MTK_M4U_ID(SMI_L9_ID, 3)
+#define M4U_L9_P4_LCE_D1 MTK_M4U_ID(SMI_L9_ID, 4)
+#define M4U_L9_P5_SMTI_D1 MTK_M4U_ID(SMI_L9_ID, 5)
+#define M4U_L9_P6_SMTO_D2 MTK_M4U_ID(SMI_L9_ID, 6)
+#define M4U_L9_P7_SMTO_D1 MTK_M4U_ID(SMI_L9_ID, 7)
+#define M4U_L9_P8_CRZO_D1 MTK_M4U_ID(SMI_L9_ID, 8)
+#define M4U_L9_P9_IMG3O_D1 MTK_M4U_ID(SMI_L9_ID, 9)
+#define M4U_L9_P10_VIPI_D1 MTK_M4U_ID(SMI_L9_ID, 10)
+#define M4U_L9_P11_SMTI_D5 MTK_M4U_ID(SMI_L9_ID, 11)
+#define M4U_L9_P12_TIMGO_D1 MTK_M4U_ID(SMI_L9_ID, 12)
+#define M4U_L9_P13_UFBC_W0 MTK_M4U_ID(SMI_L9_ID, 13)
+#define M4U_L9_P14_UFBC_R0 MTK_M4U_ID(SMI_L9_ID, 14)
+#define M4U_L9_P15_WPE_RDMA1 MTK_M4U_ID(SMI_L9_ID, 15)
+#define M4U_L9_P16_WPE_RDMA0 MTK_M4U_ID(SMI_L9_ID, 16)
+#define M4U_L9_P17_WPE_WDMA MTK_M4U_ID(SMI_L9_ID, 17)
+#define M4U_L9_P18_MFB_RDMA0 MTK_M4U_ID(SMI_L9_ID, 18)
+#define M4U_L9_P19_MFB_RDMA1 MTK_M4U_ID(SMI_L9_ID, 19)
+#define M4U_L9_P20_MFB_RDMA2 MTK_M4U_ID(SMI_L9_ID, 20)
+#define M4U_L9_P21_MFB_RDMA3 MTK_M4U_ID(SMI_L9_ID, 21)
+#define M4U_L9_P22_MFB_RDMA4 MTK_M4U_ID(SMI_L9_ID, 22)
+#define M4U_L9_P23_MFB_RDMA5 MTK_M4U_ID(SMI_L9_ID, 23)
+#define M4U_L9_P24_MFB_WDMA0 MTK_M4U_ID(SMI_L9_ID, 24)
+#define M4U_L9_P25_MFB_WDMA1 MTK_M4U_ID(SMI_L9_ID, 25)
+#define M4U_L9_P26_RESERVE6 MTK_M4U_ID(SMI_L9_ID, 26)
+#define M4U_L9_P27_RESERVE7 MTK_M4U_ID(SMI_L9_ID, 27)
+#define M4U_L9_P28_RESERVE8 MTK_M4U_ID(SMI_L9_ID, 28)
+
+/* Larb10: null */
+
+/* Larb11 -- imgsys */
+#define M4U_L11_P0_IMGI_D1 MTK_M4U_ID(SMI_L11_ID, 0)
+#define M4U_L11_P1_IMGBI_D1 MTK_M4U_ID(SMI_L11_ID, 1)
+#define M4U_L11_P2_DMGI_D1 MTK_M4U_ID(SMI_L11_ID, 2)
+#define M4U_L11_P3_DEPI_D1 MTK_M4U_ID(SMI_L11_ID, 3)
+#define M4U_L11_P4_LCE_D1 MTK_M4U_ID(SMI_L11_ID, 4)
+#define M4U_L11_P5_SMTI_D1 MTK_M4U_ID(SMI_L11_ID, 5)
+#define M4U_L11_P6_SMTO_D2 MTK_M4U_ID(SMI_L11_ID, 6)
+#define M4U_L11_P7_SMTO_D1 MTK_M4U_ID(SMI_L11_ID, 7)
+#define M4U_L11_P8_CRZO_D1 MTK_M4U_ID(SMI_L11_ID, 8)
+#define M4U_L11_P9_IMG3O_D1 MTK_M4U_ID(SMI_L11_ID, 9)
+#define M4U_L11_P10_VIPI_D1 MTK_M4U_ID(SMI_L11_ID, 10)
+#define M4U_L11_P11_SMTI_D5 MTK_M4U_ID(SMI_L11_ID, 11)
+#define M4U_L11_P12_TIMGO_D1 MTK_M4U_ID(SMI_L11_ID, 12)
+#define M4U_L11_P13_UFBC_W0 MTK_M4U_ID(SMI_L11_ID, 13)
+#define M4U_L11_P14_UFBC_R0 MTK_M4U_ID(SMI_L11_ID, 14)
+#define M4U_L11_P15_WPE_RDMA1 MTK_M4U_ID(SMI_L11_ID, 15)
+#define M4U_L11_P16_WPE_RDMA0 MTK_M4U_ID(SMI_L11_ID, 16)
+#define M4U_L11_P17_WPE_WDMA MTK_M4U_ID(SMI_L11_ID, 17)
+#define M4U_L11_P18_MFB_RDMA0 MTK_M4U_ID(SMI_L11_ID, 18)
+#define M4U_L11_P19_MFB_RDMA1 MTK_M4U_ID(SMI_L11_ID, 19)
+#define M4U_L11_P20_MFB_RDMA2 MTK_M4U_ID(SMI_L11_ID, 20)
+#define M4U_L11_P21_MFB_RDMA3 MTK_M4U_ID(SMI_L11_ID, 21)
+#define M4U_L11_P22_MFB_RDMA4 MTK_M4U_ID(SMI_L11_ID, 22)
+#define M4U_L11_P23_MFB_RDMA5 MTK_M4U_ID(SMI_L11_ID, 23)
+#define M4U_L11_P24_MFB_WDMA0 MTK_M4U_ID(SMI_L11_ID, 24)
+#define M4U_L11_P25_MFB_WDMA1 MTK_M4U_ID(SMI_L11_ID, 25)
+#define M4U_L11_P26_RESERVE6 MTK_M4U_ID(SMI_L11_ID, 26)
+#define M4U_L11_P27_RESERVE7 MTK_M4U_ID(SMI_L11_ID, 27)
+#define M4U_L11_P28_RESERVE8 MTK_M4U_ID(SMI_L11_ID, 28)
+
+/* Larb12: null */
+
+/* Larb13 -- cam */
+#define M4U_L13_P0_MRAWI MTK_M4U_ID(SMI_L13_ID, 0)
+#define M4U_L13_P1_MRAWO_0 MTK_M4U_ID(SMI_L13_ID, 1)
+#define M4U_L13_P2_MRAWO_1 MTK_M4U_ID(SMI_L13_ID, 2)
+#define M4U_L13_P3_CAMSV_1 MTK_M4U_ID(SMI_L13_ID, 3)
+#define M4U_L13_P4_CAMSV_2 MTK_M4U_ID(SMI_L13_ID, 4)
+#define M4U_L13_P5_CAMSV_3 MTK_M4U_ID(SMI_L13_ID, 5)
+#define M4U_L13_P6_CAMSV_4 MTK_M4U_ID(SMI_L13_ID, 6)
+#define M4U_L13_P7_CAMSV_5 MTK_M4U_ID(SMI_L13_ID, 7)
+#define M4U_L13_P8_CAMSV_6 MTK_M4U_ID(SMI_L13_ID, 8)
+#define M4U_L13_P9_CCUI MTK_M4U_ID(SMI_L13_ID, 9)
+#define M4U_L13_P10_CCUO MTK_M4U_ID(SMI_L13_ID, 10)
+#define M4U_L13_P11_FAKE MTK_M4U_ID(SMI_L13_ID, 11)
+#define M4U_L13_P12_PDAI_0 MTK_M4U_ID(SMI_L13_ID, 12)
+#define M4U_L13_P13_PDAI_1 MTK_M4U_ID(SMI_L13_ID, 13)
+#define M4U_L13_P14_PDAO MTK_M4U_ID(SMI_L13_ID, 14)
+
+/* Larb14 -- cam */
+#define M4U_L14_P0_RESERVE MTK_M4U_ID(SMI_L14_ID, 0)
+#define M4U_L14_P1_RESERVE MTK_M4U_ID(SMI_L14_ID, 1)
+#define M4U_L14_P2_RESERVE MTK_M4U_ID(SMI_L14_ID, 2)
+#define M4U_L14_P3_CAMSV_0 MTK_M4U_ID(SMI_L14_ID, 3)
+#define M4U_L14_P4_CCUI MTK_M4U_ID(SMI_L14_ID, 4)
+#define M4U_L14_P5_CCUO MTK_M4U_ID(SMI_L14_ID, 5)
+#define M4U_L14_P6_CAMSV_7 MTK_M4U_ID(SMI_L14_ID, 6)
+#define M4U_L14_P7_CAMSV_8 MTK_M4U_ID(SMI_L14_ID, 7)
+#define M4U_L14_P8_CAMSV_9 MTK_M4U_ID(SMI_L14_ID, 8)
+#define M4U_L14_P9_CAMSV_10 MTK_M4U_ID(SMI_L14_ID, 9)
+
+/* Larb15: null */
+
+/* Larb16 -- cam */
+#define M4U_L16_P0_IMGO_R1_A MTK_M4U_ID(SMI_L16_ID, 0)
+#define M4U_L16_P1_RRZO_R1_A MTK_M4U_ID(SMI_L16_ID, 1)
+#define M4U_L16_P2_CQI_R1_A MTK_M4U_ID(SMI_L16_ID, 2)
+#define M4U_L16_P3_BPCI_R1_A MTK_M4U_ID(SMI_L16_ID, 3)
+#define M4U_L16_P4_YUVO_R1_A MTK_M4U_ID(SMI_L16_ID, 4)
+#define M4U_L16_P5_UFDI_R2_A MTK_M4U_ID(SMI_L16_ID, 5)
+#define M4U_L16_P6_RAWI_R2_A MTK_M4U_ID(SMI_L16_ID, 6)
+#define M4U_L16_P7_RAWI_R3_A MTK_M4U_ID(SMI_L16_ID, 7)
+#define M4U_L16_P8_AAO_R1_A MTK_M4U_ID(SMI_L16_ID, 8)
+#define M4U_L16_P9_AFO_R1_A MTK_M4U_ID(SMI_L16_ID, 9)
+#define M4U_L16_P10_FLKO_R1_A MTK_M4U_ID(SMI_L16_ID, 10)
+#define M4U_L16_P11_LCESO_R1_A MTK_M4U_ID(SMI_L16_ID, 11)
+#define M4U_L16_P12_CRZO_R1_A MTK_M4U_ID(SMI_L16_ID, 12)
+#define M4U_L16_P13_LTMSO_R1_A MTK_M4U_ID(SMI_L16_ID, 13)
+#define M4U_L16_P14_RSSO_R1_A MTK_M4U_ID(SMI_L16_ID, 14)
+#define M4U_L16_P15_AAHO_R1_A MTK_M4U_ID(SMI_L16_ID, 15)
+#define M4U_L16_P16_LSCI_R1_A MTK_M4U_ID(SMI_L16_ID, 16)
+
+/* Larb17 -- cam */
+#define M4U_L17_P0_IMGO_R1_B MTK_M4U_ID(SMI_L17_ID, 0)
+#define M4U_L17_P1_RRZO_R1_B MTK_M4U_ID(SMI_L17_ID, 1)
+#define M4U_L17_P2_CQI_R1_B MTK_M4U_ID(SMI_L17_ID, 2)
+#define M4U_L17_P3_BPCI_R1_B MTK_M4U_ID(SMI_L17_ID, 3)
+#define M4U_L17_P4_YUVO_R1_B MTK_M4U_ID(SMI_L17_ID, 4)
+#define M4U_L17_P5_UFDI_R2_B MTK_M4U_ID(SMI_L17_ID, 5)
+#define M4U_L17_P6_RAWI_R2_B MTK_M4U_ID(SMI_L17_ID, 6)
+#define M4U_L17_P7_RAWI_R3_B MTK_M4U_ID(SMI_L17_ID, 7)
+#define M4U_L17_P8_AAO_R1_B MTK_M4U_ID(SMI_L17_ID, 8)
+#define M4U_L17_P9_AFO_R1_B MTK_M4U_ID(SMI_L17_ID, 9)
+#define M4U_L17_P10_FLKO_R1_B MTK_M4U_ID(SMI_L17_ID, 10)
+#define M4U_L17_P11_LCESO_R1_B MTK_M4U_ID(SMI_L17_ID, 11)
+#define M4U_L17_P12_CRZO_R1_B MTK_M4U_ID(SMI_L17_ID, 12)
+#define M4U_L17_P13_LTMSO_R1_B MTK_M4U_ID(SMI_L17_ID, 13)
+#define M4U_L17_P14_RSSO_R1_B MTK_M4U_ID(SMI_L17_ID, 14)
+#define M4U_L17_P15_AAHO_R1_B MTK_M4U_ID(SMI_L17_ID, 15)
+#define M4U_L17_P16_LSCI_R1_B MTK_M4U_ID(SMI_L17_ID, 16)
+
+/* Larb19 -- ipesys */
+#define M4U_L19_P0_DVS_RDMA MTK_M4U_ID(SMI_L19_ID, 0)
+#define M4U_L19_P1_DVS_WDMA MTK_M4U_ID(SMI_L19_ID, 1)
+#define M4U_L19_P2_DVP_RDMA MTK_M4U_ID(SMI_L19_ID, 2)
+#define M4U_L19_P3_DVP_WDMA MTK_M4U_ID(SMI_L19_ID, 3)
+
+/* Larb20 -- ipesys */
+#define M4U_L20_P0_FDVT_RDA_0 MTK_M4U_ID(SMI_L20_ID, 0)
+#define M4U_L20_P1_FDVT_RDB_0 MTK_M4U_ID(SMI_L20_ID, 1)
+#define M4U_L20_P2_FDVT_WRA_0 MTK_M4U_ID(SMI_L20_ID, 2)
+#define M4U_L20_P3_FDVT_WRB_0 MTK_M4U_ID(SMI_L20_ID, 3)
+#define M4U_L20_P4_RSC_RDMA MTK_M4U_ID(SMI_L20_ID, 4)
+#define M4U_L20_P5_RSC_WDMA MTK_M4U_ID(SMI_L20_ID, 5)
+
+/* fake larb21 for gce */
+#define M4U_L21_GCE_DM MTK_M4U_ID(21, 0)
+#define M4U_L21_GCE_MM MTK_M4U_ID(21, 1)
+
+/* fake larb & port for svp and dual svp and wfd */
+#define M4U_PORT_SVP_HEAP MTK_M4U_ID(22, 0)
+#define M4U_PORT_DUAL_SVP_HEAP MTK_M4U_ID(22, 1)
+#define M4U_PORT_WFD_HEAP MTK_M4U_ID(22, 2)
+
+/* fake larb0 for apu */
+#define M4U_L0_APU_DATA MTK_M4U_ID(0, 0)
+#define M4U_L0_APU_CODE MTK_M4U_ID(0, 1)
+#define M4U_L0_APU_SECURE MTK_M4U_ID(0, 2)
+#define M4U_L0_APU_VLM MTK_M4U_ID(0, 3)
+
+/* infra/peri */
+#define IFR_IOMMU_PORT_PCIE_0 MTK_IFAIOMMU_PERI_ID(0, 26)
+
+#endif
diff --git a/include/dt-bindings/memory/mediatek,mt8365-larb-port.h b/include/dt-bindings/memory/mediatek,mt8365-larb-port.h
new file mode 100644
index 000000000000..56d5a5dd519e
--- /dev/null
+++ b/include/dt-bindings/memory/mediatek,mt8365-larb-port.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Yong Wu <yong.wu@mediatek.com>
+ */
+#ifndef _DT_BINDINGS_MEMORY_MT8365_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MT8365_LARB_PORT_H_
+
+#include <dt-bindings/memory/mtk-memory-port.h>
+
+#define M4U_LARB0_ID 0
+#define M4U_LARB1_ID 1
+#define M4U_LARB2_ID 2
+#define M4U_LARB3_ID 3
+
+/* larb0 */
+#define M4U_PORT_DISP_OVL0 MTK_M4U_ID(M4U_LARB0_ID, 0)
+#define M4U_PORT_DISP_OVL0_2L MTK_M4U_ID(M4U_LARB0_ID, 1)
+#define M4U_PORT_DISP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 2)
+#define M4U_PORT_DISP_WDMA0 MTK_M4U_ID(M4U_LARB0_ID, 3)
+#define M4U_PORT_DISP_RDMA1 MTK_M4U_ID(M4U_LARB0_ID, 4)
+#define M4U_PORT_MDP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 5)
+#define M4U_PORT_MDP_WROT1 MTK_M4U_ID(M4U_LARB0_ID, 6)
+#define M4U_PORT_MDP_WROT0 MTK_M4U_ID(M4U_LARB0_ID, 7)
+#define M4U_PORT_MDP_RDMA1 MTK_M4U_ID(M4U_LARB0_ID, 8)
+#define M4U_PORT_DISP_FAKE0 MTK_M4U_ID(M4U_LARB0_ID, 9)
+#define M4U_PORT_APU_READ MTK_M4U_ID(M4U_LARB0_ID, 10)
+#define M4U_PORT_APU_WRITE MTK_M4U_ID(M4U_LARB0_ID, 11)
+
+/* larb1 */
+#define M4U_PORT_VENC_RCPU MTK_M4U_ID(M4U_LARB1_ID, 0)
+#define M4U_PORT_VENC_REC MTK_M4U_ID(M4U_LARB1_ID, 1)
+#define M4U_PORT_VENC_BSDMA MTK_M4U_ID(M4U_LARB1_ID, 2)
+#define M4U_PORT_VENC_SV_COMV MTK_M4U_ID(M4U_LARB1_ID, 3)
+#define M4U_PORT_VENC_RD_COMV MTK_M4U_ID(M4U_LARB1_ID, 4)
+#define M4U_PORT_VENC_NBM_RDMA MTK_M4U_ID(M4U_LARB1_ID, 5)
+#define M4U_PORT_VENC_NBM_RDMA_LITE MTK_M4U_ID(M4U_LARB1_ID, 6)
+#define M4U_PORT_JPGENC_Y_RDMA MTK_M4U_ID(M4U_LARB1_ID, 7)
+#define M4U_PORT_JPGENC_C_RDMA MTK_M4U_ID(M4U_LARB1_ID, 8)
+#define M4U_PORT_JPGENC_Q_TABLE MTK_M4U_ID(M4U_LARB1_ID, 9)
+#define M4U_PORT_JPGENC_BSDMA MTK_M4U_ID(M4U_LARB1_ID, 10)
+#define M4U_PORT_JPGDEC_WDMA MTK_M4U_ID(M4U_LARB1_ID, 11)
+#define M4U_PORT_JPGDEC_BSDMA MTK_M4U_ID(M4U_LARB1_ID, 12)
+#define M4U_PORT_VENC_NBM_WDMA MTK_M4U_ID(M4U_LARB1_ID, 13)
+#define M4U_PORT_VENC_NBM_WDMA_LITE MTK_M4U_ID(M4U_LARB1_ID, 14)
+#define M4U_PORT_VENC_CUR_LUMA MTK_M4U_ID(M4U_LARB1_ID, 15)
+#define M4U_PORT_VENC_CUR_CHROMA MTK_M4U_ID(M4U_LARB1_ID, 16)
+#define M4U_PORT_VENC_REF_LUMA MTK_M4U_ID(M4U_LARB1_ID, 17)
+#define M4U_PORT_VENC_REF_CHROMA MTK_M4U_ID(M4U_LARB1_ID, 18)
+
+/* larb2 */
+#define M4U_PORT_CAM_IMGO MTK_M4U_ID(M4U_LARB2_ID, 0)
+#define M4U_PORT_CAM_RRZO MTK_M4U_ID(M4U_LARB2_ID, 1)
+#define M4U_PORT_CAM_AAO MTK_M4U_ID(M4U_LARB2_ID, 2)
+#define M4U_PORT_CAM_LCS MTK_M4U_ID(M4U_LARB2_ID, 3)
+#define M4U_PORT_CAM_ESFKO MTK_M4U_ID(M4U_LARB2_ID, 4)
+#define M4U_PORT_CAM_CAM_SV0 MTK_M4U_ID(M4U_LARB2_ID, 5)
+#define M4U_PORT_CAM_CAM_SV1 MTK_M4U_ID(M4U_LARB2_ID, 6)
+#define M4U_PORT_CAM_LSCI MTK_M4U_ID(M4U_LARB2_ID, 7)
+#define M4U_PORT_CAM_LSCI_D MTK_M4U_ID(M4U_LARB2_ID, 8)
+#define M4U_PORT_CAM_AFO MTK_M4U_ID(M4U_LARB2_ID, 9)
+#define M4U_PORT_CAM_SPARE MTK_M4U_ID(M4U_LARB2_ID, 10)
+#define M4U_PORT_CAM_BPCI MTK_M4U_ID(M4U_LARB2_ID, 11)
+#define M4U_PORT_CAM_BPCI_D MTK_M4U_ID(M4U_LARB2_ID, 12)
+#define M4U_PORT_CAM_UFDI MTK_M4U_ID(M4U_LARB2_ID, 13)
+#define M4U_PORT_CAM_IMGI MTK_M4U_ID(M4U_LARB2_ID, 14)
+#define M4U_PORT_CAM_IMG2O MTK_M4U_ID(M4U_LARB2_ID, 15)
+#define M4U_PORT_CAM_IMG3O MTK_M4U_ID(M4U_LARB2_ID, 16)
+#define M4U_PORT_CAM_WPE0_I MTK_M4U_ID(M4U_LARB2_ID, 17)
+#define M4U_PORT_CAM_WPE1_I MTK_M4U_ID(M4U_LARB2_ID, 18)
+#define M4U_PORT_CAM_WPE_O MTK_M4U_ID(M4U_LARB2_ID, 19)
+#define M4U_PORT_CAM_FD0_I MTK_M4U_ID(M4U_LARB2_ID, 20)
+#define M4U_PORT_CAM_FD1_I MTK_M4U_ID(M4U_LARB2_ID, 21)
+#define M4U_PORT_CAM_FD0_O MTK_M4U_ID(M4U_LARB2_ID, 22)
+#define M4U_PORT_CAM_FD1_O MTK_M4U_ID(M4U_LARB2_ID, 23)
+
+/* larb3 */
+#define M4U_PORT_HW_VDEC_MC_EXT MTK_M4U_ID(M4U_LARB3_ID, 0)
+#define M4U_PORT_HW_VDEC_UFO_EXT MTK_M4U_ID(M4U_LARB3_ID, 1)
+#define M4U_PORT_HW_VDEC_PP_EXT MTK_M4U_ID(M4U_LARB3_ID, 2)
+#define M4U_PORT_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(M4U_LARB3_ID, 3)
+#define M4U_PORT_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(M4U_LARB3_ID, 4)
+#define M4U_PORT_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(M4U_LARB3_ID, 5)
+#define M4U_PORT_HW_VDEC_TILE_EXT MTK_M4U_ID(M4U_LARB3_ID, 6)
+#define M4U_PORT_HW_VDEC_VLD_EXT MTK_M4U_ID(M4U_LARB3_ID, 7)
+#define M4U_PORT_HW_VDEC_VLD2_EXT MTK_M4U_ID(M4U_LARB3_ID, 8)
+#define M4U_PORT_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(M4U_LARB3_ID, 9)
+#define M4U_PORT_HW_VDEC_RG_CTRL_DMA_EXT MTK_M4U_ID(M4U_LARB3_ID, 10)
+
+#endif
diff --git a/include/dt-bindings/memory/mt6795-larb-port.h b/include/dt-bindings/memory/mt6795-larb-port.h
new file mode 100644
index 000000000000..58cf6a6b6372
--- /dev/null
+++ b/include/dt-bindings/memory/mt6795-larb-port.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_MEMORY_MT6795_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MT6795_LARB_PORT_H_
+
+#include <dt-bindings/memory/mtk-memory-port.h>
+
+#define M4U_LARB0_ID 0
+#define M4U_LARB1_ID 1
+#define M4U_LARB2_ID 2
+#define M4U_LARB3_ID 3
+#define M4U_LARB4_ID 4
+
+/* larb0 */
+#define M4U_PORT_DISP_OVL0 MTK_M4U_ID(M4U_LARB0_ID, 0)
+#define M4U_PORT_DISP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 1)
+#define M4U_PORT_DISP_RDMA1 MTK_M4U_ID(M4U_LARB0_ID, 2)
+#define M4U_PORT_DISP_WDMA0 MTK_M4U_ID(M4U_LARB0_ID, 3)
+#define M4U_PORT_DISP_OVL1 MTK_M4U_ID(M4U_LARB0_ID, 4)
+#define M4U_PORT_DISP_RDMA2 MTK_M4U_ID(M4U_LARB0_ID, 5)
+#define M4U_PORT_DISP_WDMA1 MTK_M4U_ID(M4U_LARB0_ID, 6)
+#define M4U_PORT_DISP_OD_R MTK_M4U_ID(M4U_LARB0_ID, 7)
+#define M4U_PORT_DISP_OD_W MTK_M4U_ID(M4U_LARB0_ID, 8)
+#define M4U_PORT_MDP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 9)
+#define M4U_PORT_MDP_RDMA1 MTK_M4U_ID(M4U_LARB0_ID, 10)
+#define M4U_PORT_MDP_WDMA MTK_M4U_ID(M4U_LARB0_ID, 11)
+#define M4U_PORT_MDP_WROT0 MTK_M4U_ID(M4U_LARB0_ID, 12)
+#define M4U_PORT_MDP_WROT1 MTK_M4U_ID(M4U_LARB0_ID, 13)
+
+/* larb1 */
+#define M4U_PORT_VDEC_MC MTK_M4U_ID(M4U_LARB1_ID, 0)
+#define M4U_PORT_VDEC_PP MTK_M4U_ID(M4U_LARB1_ID, 1)
+#define M4U_PORT_VDEC_UFO MTK_M4U_ID(M4U_LARB1_ID, 2)
+#define M4U_PORT_VDEC_VLD MTK_M4U_ID(M4U_LARB1_ID, 3)
+#define M4U_PORT_VDEC_VLD2 MTK_M4U_ID(M4U_LARB1_ID, 4)
+#define M4U_PORT_VDEC_AVC_MV MTK_M4U_ID(M4U_LARB1_ID, 5)
+#define M4U_PORT_VDEC_PRED_RD MTK_M4U_ID(M4U_LARB1_ID, 6)
+#define M4U_PORT_VDEC_PRED_WR MTK_M4U_ID(M4U_LARB1_ID, 7)
+#define M4U_PORT_VDEC_PPWRAP MTK_M4U_ID(M4U_LARB1_ID, 8)
+
+/* larb2 */
+#define M4U_PORT_CAM_IMGO MTK_M4U_ID(M4U_LARB2_ID, 0)
+#define M4U_PORT_CAM_RRZO MTK_M4U_ID(M4U_LARB2_ID, 1)
+#define M4U_PORT_CAM_AAO MTK_M4U_ID(M4U_LARB2_ID, 2)
+#define M4U_PORT_CAM_LCSO MTK_M4U_ID(M4U_LARB2_ID, 3)
+#define M4U_PORT_CAM_ESFKO MTK_M4U_ID(M4U_LARB2_ID, 4)
+#define M4U_PORT_CAM_IMGO_S MTK_M4U_ID(M4U_LARB2_ID, 5)
+#define M4U_PORT_CAM_LSCI MTK_M4U_ID(M4U_LARB2_ID, 6)
+#define M4U_PORT_CAM_LSCI_D MTK_M4U_ID(M4U_LARB2_ID, 7)
+#define M4U_PORT_CAM_BPCI MTK_M4U_ID(M4U_LARB2_ID, 8)
+#define M4U_PORT_CAM_BPCI_D MTK_M4U_ID(M4U_LARB2_ID, 9)
+#define M4U_PORT_CAM_UFDI MTK_M4U_ID(M4U_LARB2_ID, 10)
+#define M4U_PORT_CAM_IMGI MTK_M4U_ID(M4U_LARB2_ID, 11)
+#define M4U_PORT_CAM_IMG2O MTK_M4U_ID(M4U_LARB2_ID, 12)
+#define M4U_PORT_CAM_IMG3O MTK_M4U_ID(M4U_LARB2_ID, 13)
+#define M4U_PORT_CAM_VIPI MTK_M4U_ID(M4U_LARB2_ID, 14)
+#define M4U_PORT_CAM_VIP2I MTK_M4U_ID(M4U_LARB2_ID, 15)
+#define M4U_PORT_CAM_VIP3I MTK_M4U_ID(M4U_LARB2_ID, 16)
+#define M4U_PORT_CAM_LCEI MTK_M4U_ID(M4U_LARB2_ID, 17)
+#define M4U_PORT_CAM_RB MTK_M4U_ID(M4U_LARB2_ID, 18)
+#define M4U_PORT_CAM_RP MTK_M4U_ID(M4U_LARB2_ID, 19)
+#define M4U_PORT_CAM_WR MTK_M4U_ID(M4U_LARB2_ID, 20)
+
+/* larb3 */
+#define M4U_PORT_VENC_RCPU MTK_M4U_ID(M4U_LARB3_ID, 0)
+#define M4U_PORT_VENC_REC MTK_M4U_ID(M4U_LARB3_ID, 1)
+#define M4U_PORT_VENC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 2)
+#define M4U_PORT_VENC_SV_COMV MTK_M4U_ID(M4U_LARB3_ID, 3)
+#define M4U_PORT_VENC_RD_COMV MTK_M4U_ID(M4U_LARB3_ID, 4)
+#define M4U_PORT_JPGENC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 5)
+#define M4U_PORT_REMDC_SDMA MTK_M4U_ID(M4U_LARB3_ID, 6)
+#define M4U_PORT_REMDC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 7)
+#define M4U_PORT_JPGENC_RDMA MTK_M4U_ID(M4U_LARB3_ID, 8)
+#define M4U_PORT_JPGENC_SDMA MTK_M4U_ID(M4U_LARB3_ID, 9)
+#define M4U_PORT_JPGDEC_WDMA MTK_M4U_ID(M4U_LARB3_ID, 10)
+#define M4U_PORT_JPGDEC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 11)
+#define M4U_PORT_VENC_CUR_LUMA MTK_M4U_ID(M4U_LARB3_ID, 12)
+#define M4U_PORT_VENC_CUR_CHROMA MTK_M4U_ID(M4U_LARB3_ID, 13)
+#define M4U_PORT_VENC_REF_LUMA MTK_M4U_ID(M4U_LARB3_ID, 14)
+#define M4U_PORT_VENC_REF_CHROMA MTK_M4U_ID(M4U_LARB3_ID, 15)
+#define M4U_PORT_REMDC_WDMA MTK_M4U_ID(M4U_LARB3_ID, 16)
+#define M4U_PORT_VENC_NBM_RDMA MTK_M4U_ID(M4U_LARB3_ID, 17)
+#define M4U_PORT_VENC_NBM_WDMA MTK_M4U_ID(M4U_LARB3_ID, 18)
+
+/* larb4 */
+#define M4U_PORT_MJC_MV_RD MTK_M4U_ID(M4U_LARB4_ID, 0)
+#define M4U_PORT_MJC_MV_WR MTK_M4U_ID(M4U_LARB4_ID, 1)
+#define M4U_PORT_MJC_DMA_RD MTK_M4U_ID(M4U_LARB4_ID, 2)
+#define M4U_PORT_MJC_DMA_WR MTK_M4U_ID(M4U_LARB4_ID, 3)
+
+#endif
diff --git a/include/dt-bindings/memory/mt8186-memory-port.h b/include/dt-bindings/memory/mt8186-memory-port.h
new file mode 100644
index 000000000000..2bc6e4433048
--- /dev/null
+++ b/include/dt-bindings/memory/mt8186-memory-port.h
@@ -0,0 +1,217 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ *
+ * Author: Anan Sun <anan.sun@mediatek.com>
+ * Author: Yong Wu <yong.wu@mediatek.com>
+ */
+#ifndef _DT_BINDINGS_MEMORY_MT8186_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MT8186_LARB_PORT_H_
+
+#include <dt-bindings/memory/mtk-memory-port.h>
+
+/*
+ * MM IOMMU supports 16GB dma address. We separate it to four ranges:
+ * 0 ~ 4G; 4G ~ 8G; 8G ~ 12G; 12G ~ 16G, we could adjust these masters
+ * locate in anyone region. BUT:
+ * a) Make sure all the ports inside a larb are in one range.
+ * b) The iova of any master can NOT cross the 4G/8G/12G boundary.
+ *
+ * This is the suggested mapping in this SoC:
+ *
+ * modules dma-address-region larbs-ports
+ * disp 0 ~ 4G larb0/1/2
+ * vcodec 4G ~ 8G larb4/7
+ * cam/mdp 8G ~ 12G the other larbs.
+ * N/A 12G ~ 16G
+ * CCU0 0x24000_0000 ~ 0x243ff_ffff larb13: port 9/10
+ * CCU1 0x24400_0000 ~ 0x247ff_ffff larb14: port 4/5
+ */
+
+/* MM IOMMU ports */
+/* LARB 0 -- MMSYS */
+#define IOMMU_PORT_L0_DISP_POSTMASK0 MTK_M4U_ID(0, 0)
+#define IOMMU_PORT_L0_REVERSED MTK_M4U_ID(0, 1)
+#define IOMMU_PORT_L0_OVL_RDMA0 MTK_M4U_ID(0, 2)
+#define IOMMU_PORT_L0_DISP_FAKE0 MTK_M4U_ID(0, 3)
+
+/* LARB 1 -- MMSYS */
+#define IOMMU_PORT_L1_DISP_RDMA1 MTK_M4U_ID(1, 0)
+#define IOMMU_PORT_L1_OVL_2L_RDMA0 MTK_M4U_ID(1, 1)
+#define IOMMU_PORT_L1_DISP_RDMA0 MTK_M4U_ID(1, 2)
+#define IOMMU_PORT_L1_DISP_WDMA0 MTK_M4U_ID(1, 3)
+#define IOMMU_PORT_L1_DISP_FAKE1 MTK_M4U_ID(1, 4)
+
+/* LARB 2 -- MMSYS */
+#define IOMMU_PORT_L2_MDP_RDMA0 MTK_M4U_ID(2, 0)
+#define IOMMU_PORT_L2_MDP_RDMA1 MTK_M4U_ID(2, 1)
+#define IOMMU_PORT_L2_MDP_WROT0 MTK_M4U_ID(2, 2)
+#define IOMMU_PORT_L2_MDP_WROT1 MTK_M4U_ID(2, 3)
+#define IOMMU_PORT_L2_DISP_FAKE0 MTK_M4U_ID(2, 4)
+
+/* LARB 4 -- VDEC */
+#define IOMMU_PORT_L4_HW_VDEC_MC_EXT MTK_M4U_ID(4, 0)
+#define IOMMU_PORT_L4_HW_VDEC_UFO_EXT MTK_M4U_ID(4, 1)
+#define IOMMU_PORT_L4_HW_VDEC_PP_EXT MTK_M4U_ID(4, 2)
+#define IOMMU_PORT_L4_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(4, 3)
+#define IOMMU_PORT_L4_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(4, 4)
+#define IOMMU_PORT_L4_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(4, 5)
+#define IOMMU_PORT_L4_HW_VDEC_TILE_EXT MTK_M4U_ID(4, 6)
+#define IOMMU_PORT_L4_HW_VDEC_VLD_EXT MTK_M4U_ID(4, 7)
+#define IOMMU_PORT_L4_HW_VDEC_VLD2_EXT MTK_M4U_ID(4, 8)
+#define IOMMU_PORT_L4_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(4, 9)
+#define IOMMU_PORT_L4_HW_VDEC_UFO_ENC_EXT MTK_M4U_ID(4, 10)
+#define IOMMU_PORT_L4_HW_VDEC_RG_CTRL_DMA_EXT MTK_M4U_ID(4, 11)
+#define IOMMU_PORT_L4_HW_MINI_MDP_R0_EXT MTK_M4U_ID(4, 12)
+#define IOMMU_PORT_L4_HW_MINI_MDP_W0_EXT MTK_M4U_ID(4, 13)
+
+/* LARB 7 -- VENC */
+#define IOMMU_PORT_L7_VENC_RCPU MTK_M4U_ID(7, 0)
+#define IOMMU_PORT_L7_VENC_REC MTK_M4U_ID(7, 1)
+#define IOMMU_PORT_L7_VENC_BSDMA MTK_M4U_ID(7, 2)
+#define IOMMU_PORT_L7_VENC_SV_COMV MTK_M4U_ID(7, 3)
+#define IOMMU_PORT_L7_VENC_RD_COMV MTK_M4U_ID(7, 4)
+#define IOMMU_PORT_L7_VENC_CUR_LUMA MTK_M4U_ID(7, 5)
+#define IOMMU_PORT_L7_VENC_CUR_CHROMA MTK_M4U_ID(7, 6)
+#define IOMMU_PORT_L7_VENC_REF_LUMA MTK_M4U_ID(7, 7)
+#define IOMMU_PORT_L7_VENC_REF_CHROMA MTK_M4U_ID(7, 8)
+#define IOMMU_PORT_L7_JPGENC_Y_RDMA MTK_M4U_ID(7, 9)
+#define IOMMU_PORT_L7_JPGENC_C_RDMA MTK_M4U_ID(7, 10)
+#define IOMMU_PORT_L7_JPGENC_Q_TABLE MTK_M4U_ID(7, 11)
+#define IOMMU_PORT_L7_JPGENC_BSDMA MTK_M4U_ID(7, 12)
+
+/* LARB 8 -- WPE */
+#define IOMMU_PORT_L8_WPE_RDMA_0 MTK_M4U_ID(8, 0)
+#define IOMMU_PORT_L8_WPE_RDMA_1 MTK_M4U_ID(8, 1)
+#define IOMMU_PORT_L8_WPE_WDMA_0 MTK_M4U_ID(8, 2)
+
+/* LARB 9 -- IMG-1 */
+#define IOMMU_PORT_L9_IMG_IMGI_D1 MTK_M4U_ID(9, 0)
+#define IOMMU_PORT_L9_IMG_IMGBI_D1 MTK_M4U_ID(9, 1)
+#define IOMMU_PORT_L9_IMG_DMGI_D1 MTK_M4U_ID(9, 2)
+#define IOMMU_PORT_L9_IMG_DEPI_D1 MTK_M4U_ID(9, 3)
+#define IOMMU_PORT_L9_IMG_LCE_D1 MTK_M4U_ID(9, 4)
+#define IOMMU_PORT_L9_IMG_SMTI_D1 MTK_M4U_ID(9, 5)
+#define IOMMU_PORT_L9_IMG_SMTO_D2 MTK_M4U_ID(9, 6)
+#define IOMMU_PORT_L9_IMG_SMTO_D1 MTK_M4U_ID(9, 7)
+#define IOMMU_PORT_L9_IMG_CRZO_D1 MTK_M4U_ID(9, 8)
+#define IOMMU_PORT_L9_IMG_IMG3O_D1 MTK_M4U_ID(9, 9)
+#define IOMMU_PORT_L9_IMG_VIPI_D1 MTK_M4U_ID(9, 10)
+#define IOMMU_PORT_L9_IMG_SMTI_D5 MTK_M4U_ID(9, 11)
+#define IOMMU_PORT_L9_IMG_TIMGO_D1 MTK_M4U_ID(9, 12)
+#define IOMMU_PORT_L9_IMG_UFBC_W0 MTK_M4U_ID(9, 13)
+#define IOMMU_PORT_L9_IMG_UFBC_R0 MTK_M4U_ID(9, 14)
+#define IOMMU_PORT_L9_IMG_WPE_RDMA1 MTK_M4U_ID(9, 15)
+#define IOMMU_PORT_L9_IMG_WPE_RDMA0 MTK_M4U_ID(9, 16)
+#define IOMMU_PORT_L9_IMG_WPE_WDMA MTK_M4U_ID(9, 17)
+#define IOMMU_PORT_L9_IMG_MFB_RDMA0 MTK_M4U_ID(9, 18)
+#define IOMMU_PORT_L9_IMG_MFB_RDMA1 MTK_M4U_ID(9, 19)
+#define IOMMU_PORT_L9_IMG_MFB_RDMA2 MTK_M4U_ID(9, 20)
+#define IOMMU_PORT_L9_IMG_MFB_RDMA3 MTK_M4U_ID(9, 21)
+#define IOMMU_PORT_L9_IMG_MFB_RDMA4 MTK_M4U_ID(9, 22)
+#define IOMMU_PORT_L9_IMG_MFB_RDMA5 MTK_M4U_ID(9, 23)
+#define IOMMU_PORT_L9_IMG_MFB_WDMA0 MTK_M4U_ID(9, 24)
+#define IOMMU_PORT_L9_IMG_MFB_WDMA1 MTK_M4U_ID(9, 25)
+#define IOMMU_PORT_L9_IMG_RESERVE6 MTK_M4U_ID(9, 26)
+#define IOMMU_PORT_L9_IMG_RESERVE7 MTK_M4U_ID(9, 27)
+#define IOMMU_PORT_L9_IMG_RESERVE8 MTK_M4U_ID(9, 28)
+
+/* LARB 11 -- IMG-2 */
+#define IOMMU_PORT_L11_IMG_IMGI_D1 MTK_M4U_ID(11, 0)
+#define IOMMU_PORT_L11_IMG_IMGBI_D1 MTK_M4U_ID(11, 1)
+#define IOMMU_PORT_L11_IMG_DMGI_D1 MTK_M4U_ID(11, 2)
+#define IOMMU_PORT_L11_IMG_DEPI_D1 MTK_M4U_ID(11, 3)
+#define IOMMU_PORT_L11_IMG_LCE_D1 MTK_M4U_ID(11, 4)
+#define IOMMU_PORT_L11_IMG_SMTI_D1 MTK_M4U_ID(11, 5)
+#define IOMMU_PORT_L11_IMG_SMTO_D2 MTK_M4U_ID(11, 6)
+#define IOMMU_PORT_L11_IMG_SMTO_D1 MTK_M4U_ID(11, 7)
+#define IOMMU_PORT_L11_IMG_CRZO_D1 MTK_M4U_ID(11, 8)
+#define IOMMU_PORT_L11_IMG_IMG3O_D1 MTK_M4U_ID(11, 9)
+#define IOMMU_PORT_L11_IMG_VIPI_D1 MTK_M4U_ID(11, 10)
+#define IOMMU_PORT_L11_IMG_SMTI_D5 MTK_M4U_ID(11, 11)
+#define IOMMU_PORT_L11_IMG_TIMGO_D1 MTK_M4U_ID(11, 12)
+#define IOMMU_PORT_L11_IMG_UFBC_W0 MTK_M4U_ID(11, 13)
+#define IOMMU_PORT_L11_IMG_UFBC_R0 MTK_M4U_ID(11, 14)
+#define IOMMU_PORT_L11_IMG_WPE_RDMA1 MTK_M4U_ID(11, 15)
+#define IOMMU_PORT_L11_IMG_WPE_RDMA0 MTK_M4U_ID(11, 16)
+#define IOMMU_PORT_L11_IMG_WPE_WDMA MTK_M4U_ID(11, 17)
+#define IOMMU_PORT_L11_IMG_MFB_RDMA0 MTK_M4U_ID(11, 18)
+#define IOMMU_PORT_L11_IMG_MFB_RDMA1 MTK_M4U_ID(11, 19)
+#define IOMMU_PORT_L11_IMG_MFB_RDMA2 MTK_M4U_ID(11, 20)
+#define IOMMU_PORT_L11_IMG_MFB_RDMA3 MTK_M4U_ID(11, 21)
+#define IOMMU_PORT_L11_IMG_MFB_RDMA4 MTK_M4U_ID(11, 22)
+#define IOMMU_PORT_L11_IMG_MFB_RDMA5 MTK_M4U_ID(11, 23)
+#define IOMMU_PORT_L11_IMG_MFB_WDMA0 MTK_M4U_ID(11, 24)
+#define IOMMU_PORT_L11_IMG_MFB_WDMA1 MTK_M4U_ID(11, 25)
+#define IOMMU_PORT_L11_IMG_RESERVE6 MTK_M4U_ID(11, 26)
+#define IOMMU_PORT_L11_IMG_RESERVE7 MTK_M4U_ID(11, 27)
+#define IOMMU_PORT_L11_IMG_RESERVE8 MTK_M4U_ID(11, 28)
+
+/* LARB 13 -- CAM */
+#define IOMMU_PORT_L13_CAM_MRAWI MTK_M4U_ID(13, 0)
+#define IOMMU_PORT_L13_CAM_MRAWO_0 MTK_M4U_ID(13, 1)
+#define IOMMU_PORT_L13_CAM_MRAWO_1 MTK_M4U_ID(13, 2)
+#define IOMMU_PORT_L13_CAM_CAMSV_4 MTK_M4U_ID(13, 6)
+#define IOMMU_PORT_L13_CAM_CAMSV_5 MTK_M4U_ID(13, 7)
+#define IOMMU_PORT_L13_CAM_CAMSV_6 MTK_M4U_ID(13, 8)
+#define IOMMU_PORT_L13_CAM_CCUI MTK_M4U_ID(13, 9)
+#define IOMMU_PORT_L13_CAM_CCUO MTK_M4U_ID(13, 10)
+#define IOMMU_PORT_L13_CAM_FAKE MTK_M4U_ID(13, 11)
+
+/* LARB 14 -- CAM */
+#define IOMMU_PORT_L14_CAM_CCUI MTK_M4U_ID(14, 4)
+#define IOMMU_PORT_L14_CAM_CCUO MTK_M4U_ID(14, 5)
+
+/* LARB 16 -- RAW-A */
+#define IOMMU_PORT_L16_CAM_IMGO_R1_A MTK_M4U_ID(16, 0)
+#define IOMMU_PORT_L16_CAM_RRZO_R1_A MTK_M4U_ID(16, 1)
+#define IOMMU_PORT_L16_CAM_CQI_R1_A MTK_M4U_ID(16, 2)
+#define IOMMU_PORT_L16_CAM_BPCI_R1_A MTK_M4U_ID(16, 3)
+#define IOMMU_PORT_L16_CAM_YUVO_R1_A MTK_M4U_ID(16, 4)
+#define IOMMU_PORT_L16_CAM_UFDI_R2_A MTK_M4U_ID(16, 5)
+#define IOMMU_PORT_L16_CAM_RAWI_R2_A MTK_M4U_ID(16, 6)
+#define IOMMU_PORT_L16_CAM_RAWI_R3_A MTK_M4U_ID(16, 7)
+#define IOMMU_PORT_L16_CAM_AAO_R1_A MTK_M4U_ID(16, 8)
+#define IOMMU_PORT_L16_CAM_AFO_R1_A MTK_M4U_ID(16, 9)
+#define IOMMU_PORT_L16_CAM_FLKO_R1_A MTK_M4U_ID(16, 10)
+#define IOMMU_PORT_L16_CAM_LCESO_R1_A MTK_M4U_ID(16, 11)
+#define IOMMU_PORT_L16_CAM_CRZO_R1_A MTK_M4U_ID(16, 12)
+#define IOMMU_PORT_L16_CAM_LTMSO_R1_A MTK_M4U_ID(16, 13)
+#define IOMMU_PORT_L16_CAM_RSSO_R1_A MTK_M4U_ID(16, 14)
+#define IOMMU_PORT_L16_CAM_AAHO_R1_A MTK_M4U_ID(16, 15)
+#define IOMMU_PORT_L16_CAM_LSCI_R1_A MTK_M4U_ID(16, 16)
+
+/* LARB 17 -- RAW-B */
+#define IOMMU_PORT_L17_CAM_IMGO_R1_B MTK_M4U_ID(17, 0)
+#define IOMMU_PORT_L17_CAM_RRZO_R1_B MTK_M4U_ID(17, 1)
+#define IOMMU_PORT_L17_CAM_CQI_R1_B MTK_M4U_ID(17, 2)
+#define IOMMU_PORT_L17_CAM_BPCI_R1_B MTK_M4U_ID(17, 3)
+#define IOMMU_PORT_L17_CAM_YUVO_R1_B MTK_M4U_ID(17, 4)
+#define IOMMU_PORT_L17_CAM_UFDI_R2_B MTK_M4U_ID(17, 5)
+#define IOMMU_PORT_L17_CAM_RAWI_R2_B MTK_M4U_ID(17, 6)
+#define IOMMU_PORT_L17_CAM_RAWI_R3_B MTK_M4U_ID(17, 7)
+#define IOMMU_PORT_L17_CAM_AAO_R1_B MTK_M4U_ID(17, 8)
+#define IOMMU_PORT_L17_CAM_AFO_R1_B MTK_M4U_ID(17, 9)
+#define IOMMU_PORT_L17_CAM_FLKO_R1_B MTK_M4U_ID(17, 10)
+#define IOMMU_PORT_L17_CAM_LCESO_R1_B MTK_M4U_ID(17, 11)
+#define IOMMU_PORT_L17_CAM_CRZO_R1_B MTK_M4U_ID(17, 12)
+#define IOMMU_PORT_L17_CAM_LTMSO_R1_B MTK_M4U_ID(17, 13)
+#define IOMMU_PORT_L17_CAM_RSSO_R1_B MTK_M4U_ID(17, 14)
+#define IOMMU_PORT_L17_CAM_AAHO_R1_B MTK_M4U_ID(17, 15)
+#define IOMMU_PORT_L17_CAM_LSCI_R1_B MTK_M4U_ID(17, 16)
+
+/* LARB 19 -- IPE */
+#define IOMMU_PORT_L19_IPE_DVS_RDMA MTK_M4U_ID(19, 0)
+#define IOMMU_PORT_L19_IPE_DVS_WDMA MTK_M4U_ID(19, 1)
+#define IOMMU_PORT_L19_IPE_DVP_RDMA MTK_M4U_ID(19, 2)
+#define IOMMU_PORT_L19_IPE_DVP_WDMA MTK_M4U_ID(19, 3)
+
+/* LARB 20 -- IPE */
+#define IOMMU_PORT_L20_IPE_FDVT_RDA MTK_M4U_ID(20, 0)
+#define IOMMU_PORT_L20_IPE_FDVT_RDB MTK_M4U_ID(20, 1)
+#define IOMMU_PORT_L20_IPE_FDVT_WRA MTK_M4U_ID(20, 2)
+#define IOMMU_PORT_L20_IPE_FDVT_WRB MTK_M4U_ID(20, 3)
+#define IOMMU_PORT_L20_IPE_RSC_RDMA0 MTK_M4U_ID(20, 4)
+#define IOMMU_PORT_L20_IPE_RSC_WDMA MTK_M4U_ID(20, 5)
+
+#endif
diff --git a/include/dt-bindings/memory/mt8195-memory-port.h b/include/dt-bindings/memory/mt8195-memory-port.h
new file mode 100644
index 000000000000..70ba9f498eeb
--- /dev/null
+++ b/include/dt-bindings/memory/mt8195-memory-port.h
@@ -0,0 +1,408 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Yong Wu <yong.wu@mediatek.com>
+ */
+#ifndef _DT_BINDINGS_MEMORY_MT8195_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MT8195_LARB_PORT_H_
+
+#include <dt-bindings/memory/mtk-memory-port.h>
+
+/*
+ * MM IOMMU supports 16GB dma address. We separate it to four ranges:
+ * 0 ~ 4G; 4G ~ 8G; 8G ~ 12G; 12G ~ 16G, we could adjust these masters
+ * locate in anyone region. BUT:
+ * a) Make sure all the ports inside a larb are in one range.
+ * b) The iova of any master can NOT cross the 4G/8G/12G boundary.
+ *
+ * This is the suggested mapping in this SoC:
+ *
+ * modules dma-address-region larbs-ports
+ * disp 0 ~ 4G larb0/1/2/3
+ * vcodec 4G ~ 8G larb19/20/21/22/23/24
+ * cam/mdp 8G ~ 12G the other larbs.
+ * N/A 12G ~ 16G
+ * CCU0 0x24000_0000 ~ 0x243ff_ffff larb18: port 0/1
+ * CCU1 0x24400_0000 ~ 0x247ff_ffff larb18: port 2/3
+ *
+ * This SoC have two IOMMU HWs, this is the detailed connected information:
+ * iommu-vdo: larb0/2/5/7/9/10/11/13/17/19/21/24/25/28
+ * iommu-vpp: larb1/3/4/6/8/12/14/16/18/20/22/23/26/27
+ */
+
+/* MM IOMMU ports */
+/* larb0 */
+#define M4U_PORT_L0_DISP_RDMA0 MTK_M4U_ID(0, 0)
+#define M4U_PORT_L0_DISP_WDMA0 MTK_M4U_ID(0, 1)
+#define M4U_PORT_L0_DISP_OVL0_RDMA0 MTK_M4U_ID(0, 2)
+#define M4U_PORT_L0_DISP_OVL0_RDMA1 MTK_M4U_ID(0, 3)
+#define M4U_PORT_L0_DISP_OVL0_HDR MTK_M4U_ID(0, 4)
+#define M4U_PORT_L0_DISP_FAKE0 MTK_M4U_ID(0, 5)
+
+/* larb1 */
+#define M4U_PORT_L1_DISP_RDMA0 MTK_M4U_ID(1, 0)
+#define M4U_PORT_L1_DISP_WDMA0 MTK_M4U_ID(1, 1)
+#define M4U_PORT_L1_DISP_OVL0_RDMA0 MTK_M4U_ID(1, 2)
+#define M4U_PORT_L1_DISP_OVL0_RDMA1 MTK_M4U_ID(1, 3)
+#define M4U_PORT_L1_DISP_OVL0_HDR MTK_M4U_ID(1, 4)
+#define M4U_PORT_L1_DISP_FAKE0 MTK_M4U_ID(1, 5)
+
+/* larb2 */
+#define M4U_PORT_L2_MDP_RDMA0 MTK_M4U_ID(2, 0)
+#define M4U_PORT_L2_MDP_RDMA2 MTK_M4U_ID(2, 1)
+#define M4U_PORT_L2_MDP_RDMA4 MTK_M4U_ID(2, 2)
+#define M4U_PORT_L2_MDP_RDMA6 MTK_M4U_ID(2, 3)
+#define M4U_PORT_L2_DISP_FAKE1 MTK_M4U_ID(2, 4)
+
+/* larb3 */
+#define M4U_PORT_L3_MDP_RDMA1 MTK_M4U_ID(3, 0)
+#define M4U_PORT_L3_MDP_RDMA3 MTK_M4U_ID(3, 1)
+#define M4U_PORT_L3_MDP_RDMA5 MTK_M4U_ID(3, 2)
+#define M4U_PORT_L3_MDP_RDMA7 MTK_M4U_ID(3, 3)
+#define M4U_PORT_L3_HDR_DS MTK_M4U_ID(3, 4)
+#define M4U_PORT_L3_HDR_ADL MTK_M4U_ID(3, 5)
+#define M4U_PORT_L3_DISP_FAKE1 MTK_M4U_ID(3, 6)
+
+/* larb4 */
+#define M4U_PORT_L4_MDP_RDMA MTK_M4U_ID(4, 0)
+#define M4U_PORT_L4_MDP_FG MTK_M4U_ID(4, 1)
+#define M4U_PORT_L4_MDP_OVL MTK_M4U_ID(4, 2)
+#define M4U_PORT_L4_MDP_WROT MTK_M4U_ID(4, 3)
+#define M4U_PORT_L4_FAKE MTK_M4U_ID(4, 4)
+
+/* larb5 */
+#define M4U_PORT_L5_SVPP1_MDP_RDMA MTK_M4U_ID(5, 0)
+#define M4U_PORT_L5_SVPP1_MDP_FG MTK_M4U_ID(5, 1)
+#define M4U_PORT_L5_SVPP1_MDP_OVL MTK_M4U_ID(5, 2)
+#define M4U_PORT_L5_SVPP1_MDP_WROT MTK_M4U_ID(5, 3)
+#define M4U_PORT_L5_SVPP2_MDP_RDMA MTK_M4U_ID(5, 4)
+#define M4U_PORT_L5_SVPP2_MDP_FG MTK_M4U_ID(5, 5)
+#define M4U_PORT_L5_SVPP2_MDP_WROT MTK_M4U_ID(5, 6)
+#define M4U_PORT_L5_FAKE MTK_M4U_ID(5, 7)
+
+/* larb6 */
+#define M4U_PORT_L6_SVPP3_MDP_RDMA MTK_M4U_ID(6, 0)
+#define M4U_PORT_L6_SVPP3_MDP_FG MTK_M4U_ID(6, 1)
+#define M4U_PORT_L6_SVPP3_MDP_WROT MTK_M4U_ID(6, 2)
+#define M4U_PORT_L6_FAKE MTK_M4U_ID(6, 3)
+
+/* larb7 */
+#define M4U_PORT_L7_IMG_WPE_RDMA0 MTK_M4U_ID(7, 0)
+#define M4U_PORT_L7_IMG_WPE_RDMA1 MTK_M4U_ID(7, 1)
+#define M4U_PORT_L7_IMG_WPE_WDMA0 MTK_M4U_ID(7, 2)
+
+/* larb8 */
+#define M4U_PORT_L8_IMG_WPE_RDMA0 MTK_M4U_ID(8, 0)
+#define M4U_PORT_L8_IMG_WPE_RDMA1 MTK_M4U_ID(8, 1)
+#define M4U_PORT_L8_IMG_WPE_WDMA0 MTK_M4U_ID(8, 2)
+
+/* larb9 */
+#define M4U_PORT_L9_IMG_IMGI_T1_A MTK_M4U_ID(9, 0)
+#define M4U_PORT_L9_IMG_IMGBI_T1_A MTK_M4U_ID(9, 1)
+#define M4U_PORT_L9_IMG_IMGCI_T1_A MTK_M4U_ID(9, 2)
+#define M4U_PORT_L9_IMG_SMTI_T1_A MTK_M4U_ID(9, 3)
+#define M4U_PORT_L9_IMG_TNCSTI_T1_A MTK_M4U_ID(9, 4)
+#define M4U_PORT_L9_IMG_TNCSTI_T4_A MTK_M4U_ID(9, 5)
+#define M4U_PORT_L9_IMG_YUVO_T1_A MTK_M4U_ID(9, 6)
+#define M4U_PORT_L9_IMG_TIMGO_T1_A MTK_M4U_ID(9, 7)
+#define M4U_PORT_L9_IMG_YUVO_T2_A MTK_M4U_ID(9, 8)
+#define M4U_PORT_L9_IMG_IMGI_T1_B MTK_M4U_ID(9, 9)
+#define M4U_PORT_L9_IMG_IMGBI_T1_B MTK_M4U_ID(9, 10)
+#define M4U_PORT_L9_IMG_IMGCI_T1_B MTK_M4U_ID(9, 11)
+#define M4U_PORT_L9_IMG_YUVO_T5_A MTK_M4U_ID(9, 12)
+#define M4U_PORT_L9_IMG_SMTI_T1_B MTK_M4U_ID(9, 13)
+#define M4U_PORT_L9_IMG_TNCSO_T1_A MTK_M4U_ID(9, 14)
+#define M4U_PORT_L9_IMG_SMTO_T1_A MTK_M4U_ID(9, 15)
+#define M4U_PORT_L9_IMG_TNCSTO_T1_A MTK_M4U_ID(9, 16)
+#define M4U_PORT_L9_IMG_YUVO_T2_B MTK_M4U_ID(9, 17)
+#define M4U_PORT_L9_IMG_YUVO_T5_B MTK_M4U_ID(9, 18)
+#define M4U_PORT_L9_IMG_SMTO_T1_B MTK_M4U_ID(9, 19)
+
+/* larb10 */
+#define M4U_PORT_L10_IMG_IMGI_D1_A MTK_M4U_ID(10, 0)
+#define M4U_PORT_L10_IMG_IMGCI_D1_A MTK_M4U_ID(10, 1)
+#define M4U_PORT_L10_IMG_DEPI_D1_A MTK_M4U_ID(10, 2)
+#define M4U_PORT_L10_IMG_DMGI_D1_A MTK_M4U_ID(10, 3)
+#define M4U_PORT_L10_IMG_VIPI_D1_A MTK_M4U_ID(10, 4)
+#define M4U_PORT_L10_IMG_TNRWI_D1_A MTK_M4U_ID(10, 5)
+#define M4U_PORT_L10_IMG_RECI_D1_A MTK_M4U_ID(10, 6)
+#define M4U_PORT_L10_IMG_SMTI_D1_A MTK_M4U_ID(10, 7)
+#define M4U_PORT_L10_IMG_SMTI_D6_A MTK_M4U_ID(10, 8)
+#define M4U_PORT_L10_IMG_PIMGI_P1_A MTK_M4U_ID(10, 9)
+#define M4U_PORT_L10_IMG_PIMGBI_P1_A MTK_M4U_ID(10, 10)
+#define M4U_PORT_L10_IMG_PIMGCI_P1_A MTK_M4U_ID(10, 11)
+#define M4U_PORT_L10_IMG_PIMGI_P1_B MTK_M4U_ID(10, 12)
+#define M4U_PORT_L10_IMG_PIMGBI_P1_B MTK_M4U_ID(10, 13)
+#define M4U_PORT_L10_IMG_PIMGCI_P1_B MTK_M4U_ID(10, 14)
+#define M4U_PORT_L10_IMG_IMG3O_D1_A MTK_M4U_ID(10, 15)
+#define M4U_PORT_L10_IMG_IMG4O_D1_A MTK_M4U_ID(10, 16)
+#define M4U_PORT_L10_IMG_IMG3CO_D1_A MTK_M4U_ID(10, 17)
+#define M4U_PORT_L10_IMG_FEO_D1_A MTK_M4U_ID(10, 18)
+#define M4U_PORT_L10_IMG_IMG2O_D1_A MTK_M4U_ID(10, 19)
+#define M4U_PORT_L10_IMG_TNRWO_D1_A MTK_M4U_ID(10, 20)
+#define M4U_PORT_L10_IMG_SMTO_D1_A MTK_M4U_ID(10, 21)
+#define M4U_PORT_L10_IMG_WROT_P1_A MTK_M4U_ID(10, 22)
+#define M4U_PORT_L10_IMG_WROT_P1_B MTK_M4U_ID(10, 23)
+
+/* larb11 */
+#define M4U_PORT_L11_IMG_WPE_EIS_RDMA0_A MTK_M4U_ID(11, 0)
+#define M4U_PORT_L11_IMG_WPE_EIS_RDMA1_A MTK_M4U_ID(11, 1)
+#define M4U_PORT_L11_IMG_WPE_EIS_WDMA0_A MTK_M4U_ID(11, 2)
+#define M4U_PORT_L11_IMG_WPE_TNR_RDMA0_A MTK_M4U_ID(11, 3)
+#define M4U_PORT_L11_IMG_WPE_TNR_RDMA1_A MTK_M4U_ID(11, 4)
+#define M4U_PORT_L11_IMG_WPE_TNR_WDMA0_A MTK_M4U_ID(11, 5)
+#define M4U_PORT_L11_IMG_WPE_EIS_CQ0_A MTK_M4U_ID(11, 6)
+#define M4U_PORT_L11_IMG_WPE_EIS_CQ1_A MTK_M4U_ID(11, 7)
+#define M4U_PORT_L11_IMG_WPE_TNR_CQ0_A MTK_M4U_ID(11, 8)
+#define M4U_PORT_L11_IMG_WPE_TNR_CQ1_A MTK_M4U_ID(11, 9)
+
+/* larb12 */
+#define M4U_PORT_L12_IMG_FDVT_RDA MTK_M4U_ID(12, 0)
+#define M4U_PORT_L12_IMG_FDVT_RDB MTK_M4U_ID(12, 1)
+#define M4U_PORT_L12_IMG_FDVT_WRA MTK_M4U_ID(12, 2)
+#define M4U_PORT_L12_IMG_FDVT_WRB MTK_M4U_ID(12, 3)
+#define M4U_PORT_L12_IMG_ME_RDMA MTK_M4U_ID(12, 4)
+#define M4U_PORT_L12_IMG_ME_WDMA MTK_M4U_ID(12, 5)
+#define M4U_PORT_L12_IMG_DVS_RDMA MTK_M4U_ID(12, 6)
+#define M4U_PORT_L12_IMG_DVS_WDMA MTK_M4U_ID(12, 7)
+#define M4U_PORT_L12_IMG_DVP_RDMA MTK_M4U_ID(12, 8)
+#define M4U_PORT_L12_IMG_DVP_WDMA MTK_M4U_ID(12, 9)
+
+/* larb13 */
+#define M4U_PORT_L13_CAM_CAMSV_CQI_E1 MTK_M4U_ID(13, 0)
+#define M4U_PORT_L13_CAM_CAMSV_CQI_E2 MTK_M4U_ID(13, 1)
+#define M4U_PORT_L13_CAM_GCAMSV_A_IMGO_0 MTK_M4U_ID(13, 2)
+#define M4U_PORT_L13_CAM_SCAMSV_A_IMGO_0 MTK_M4U_ID(13, 3)
+#define M4U_PORT_L13_CAM_GCAMSV_B_IMGO_0 MTK_M4U_ID(13, 4)
+#define M4U_PORT_L13_CAM_GCAMSV_B_IMGO_1 MTK_M4U_ID(13, 5)
+#define M4U_PORT_L13_CAM_GCAMSV_A_UFEO_0 MTK_M4U_ID(13, 6)
+#define M4U_PORT_L13_CAM_GCAMSV_B_UFEO_0 MTK_M4U_ID(13, 7)
+#define M4U_PORT_L13_CAM_PDAI_0 MTK_M4U_ID(13, 8)
+#define M4U_PORT_L13_CAM_FAKE MTK_M4U_ID(13, 9)
+
+/* larb14 */
+#define M4U_PORT_L14_CAM_GCAMSV_A_IMGO_1 MTK_M4U_ID(14, 0)
+#define M4U_PORT_L14_CAM_SCAMSV_A_IMGO_1 MTK_M4U_ID(14, 1)
+#define M4U_PORT_L14_CAM_GCAMSV_B_IMGO_0 MTK_M4U_ID(14, 2)
+#define M4U_PORT_L14_CAM_GCAMSV_B_IMGO_1 MTK_M4U_ID(14, 3)
+#define M4U_PORT_L14_CAM_SCAMSV_B_IMGO_0 MTK_M4U_ID(14, 4)
+#define M4U_PORT_L14_CAM_SCAMSV_B_IMGO_1 MTK_M4U_ID(14, 5)
+#define M4U_PORT_L14_CAM_IPUI MTK_M4U_ID(14, 6)
+#define M4U_PORT_L14_CAM_IPU2I MTK_M4U_ID(14, 7)
+#define M4U_PORT_L14_CAM_IPUO MTK_M4U_ID(14, 8)
+#define M4U_PORT_L14_CAM_IPU2O MTK_M4U_ID(14, 9)
+#define M4U_PORT_L14_CAM_IPU3O MTK_M4U_ID(14, 10)
+#define M4U_PORT_L14_CAM_GCAMSV_A_UFEO_1 MTK_M4U_ID(14, 11)
+#define M4U_PORT_L14_CAM_GCAMSV_B_UFEO_1 MTK_M4U_ID(14, 12)
+#define M4U_PORT_L14_CAM_PDAI_1 MTK_M4U_ID(14, 13)
+#define M4U_PORT_L14_CAM_PDAO MTK_M4U_ID(14, 14)
+
+/* larb15: null */
+
+/* larb16 */
+#define M4U_PORT_L16_CAM_IMGO_R1 MTK_M4U_ID(16, 0)
+#define M4U_PORT_L16_CAM_CQI_R1 MTK_M4U_ID(16, 1)
+#define M4U_PORT_L16_CAM_CQI_R2 MTK_M4U_ID(16, 2)
+#define M4U_PORT_L16_CAM_BPCI_R1 MTK_M4U_ID(16, 3)
+#define M4U_PORT_L16_CAM_LSCI_R1 MTK_M4U_ID(16, 4)
+#define M4U_PORT_L16_CAM_RAWI_R2 MTK_M4U_ID(16, 5)
+#define M4U_PORT_L16_CAM_RAWI_R3 MTK_M4U_ID(16, 6)
+#define M4U_PORT_L16_CAM_UFDI_R2 MTK_M4U_ID(16, 7)
+#define M4U_PORT_L16_CAM_UFDI_R3 MTK_M4U_ID(16, 8)
+#define M4U_PORT_L16_CAM_RAWI_R4 MTK_M4U_ID(16, 9)
+#define M4U_PORT_L16_CAM_RAWI_R5 MTK_M4U_ID(16, 10)
+#define M4U_PORT_L16_CAM_AAI_R1 MTK_M4U_ID(16, 11)
+#define M4U_PORT_L16_CAM_FHO_R1 MTK_M4U_ID(16, 12)
+#define M4U_PORT_L16_CAM_AAO_R1 MTK_M4U_ID(16, 13)
+#define M4U_PORT_L16_CAM_TSFSO_R1 MTK_M4U_ID(16, 14)
+#define M4U_PORT_L16_CAM_FLKO_R1 MTK_M4U_ID(16, 15)
+
+/* larb17 */
+#define M4U_PORT_L17_CAM_YUVO_R1 MTK_M4U_ID(17, 0)
+#define M4U_PORT_L17_CAM_YUVO_R3 MTK_M4U_ID(17, 1)
+#define M4U_PORT_L17_CAM_YUVCO_R1 MTK_M4U_ID(17, 2)
+#define M4U_PORT_L17_CAM_YUVO_R2 MTK_M4U_ID(17, 3)
+#define M4U_PORT_L17_CAM_RZH1N2TO_R1 MTK_M4U_ID(17, 4)
+#define M4U_PORT_L17_CAM_DRZS4NO_R1 MTK_M4U_ID(17, 5)
+#define M4U_PORT_L17_CAM_TNCSO_R1 MTK_M4U_ID(17, 6)
+
+/* larb18 */
+#define M4U_PORT_L18_CAM_CCUI MTK_M4U_ID(18, 0)
+#define M4U_PORT_L18_CAM_CCUO MTK_M4U_ID(18, 1)
+#define M4U_PORT_L18_CAM_CCUI2 MTK_M4U_ID(18, 2)
+#define M4U_PORT_L18_CAM_CCUO2 MTK_M4U_ID(18, 3)
+
+/* larb19 */
+#define M4U_PORT_L19_VENC_RCPU MTK_M4U_ID(19, 0)
+#define M4U_PORT_L19_VENC_REC MTK_M4U_ID(19, 1)
+#define M4U_PORT_L19_VENC_BSDMA MTK_M4U_ID(19, 2)
+#define M4U_PORT_L19_VENC_SV_COMV MTK_M4U_ID(19, 3)
+#define M4U_PORT_L19_VENC_RD_COMV MTK_M4U_ID(19, 4)
+#define M4U_PORT_L19_VENC_NBM_RDMA MTK_M4U_ID(19, 5)
+#define M4U_PORT_L19_VENC_NBM_RDMA_LITE MTK_M4U_ID(19, 6)
+#define M4U_PORT_L19_JPGENC_Y_RDMA MTK_M4U_ID(19, 7)
+#define M4U_PORT_L19_JPGENC_C_RDMA MTK_M4U_ID(19, 8)
+#define M4U_PORT_L19_JPGENC_Q_TABLE MTK_M4U_ID(19, 9)
+#define M4U_PORT_L19_VENC_SUB_W_LUMA MTK_M4U_ID(19, 10)
+#define M4U_PORT_L19_VENC_FCS_NBM_RDMA MTK_M4U_ID(19, 11)
+#define M4U_PORT_L19_JPGENC_BSDMA MTK_M4U_ID(19, 12)
+#define M4U_PORT_L19_JPGDEC_WDMA0 MTK_M4U_ID(19, 13)
+#define M4U_PORT_L19_JPGDEC_BSDMA0 MTK_M4U_ID(19, 14)
+#define M4U_PORT_L19_VENC_NBM_WDMA MTK_M4U_ID(19, 15)
+#define M4U_PORT_L19_VENC_NBM_WDMA_LITE MTK_M4U_ID(19, 16)
+#define M4U_PORT_L19_VENC_FCS_NBM_WDMA MTK_M4U_ID(19, 17)
+#define M4U_PORT_L19_JPGDEC_WDMA1 MTK_M4U_ID(19, 18)
+#define M4U_PORT_L19_JPGDEC_BSDMA1 MTK_M4U_ID(19, 19)
+#define M4U_PORT_L19_JPGDEC_BUFF_OFFSET1 MTK_M4U_ID(19, 20)
+#define M4U_PORT_L19_JPGDEC_BUFF_OFFSET0 MTK_M4U_ID(19, 21)
+#define M4U_PORT_L19_VENC_CUR_LUMA MTK_M4U_ID(19, 22)
+#define M4U_PORT_L19_VENC_CUR_CHROMA MTK_M4U_ID(19, 23)
+#define M4U_PORT_L19_VENC_REF_LUMA MTK_M4U_ID(19, 24)
+#define M4U_PORT_L19_VENC_REF_CHROMA MTK_M4U_ID(19, 25)
+#define M4U_PORT_L19_VENC_SUB_R_CHROMA MTK_M4U_ID(19, 26)
+
+/* larb20 */
+#define M4U_PORT_L20_VENC_RCPU MTK_M4U_ID(20, 0)
+#define M4U_PORT_L20_VENC_REC MTK_M4U_ID(20, 1)
+#define M4U_PORT_L20_VENC_BSDMA MTK_M4U_ID(20, 2)
+#define M4U_PORT_L20_VENC_SV_COMV MTK_M4U_ID(20, 3)
+#define M4U_PORT_L20_VENC_RD_COMV MTK_M4U_ID(20, 4)
+#define M4U_PORT_L20_VENC_NBM_RDMA MTK_M4U_ID(20, 5)
+#define M4U_PORT_L20_VENC_NBM_RDMA_LITE MTK_M4U_ID(20, 6)
+#define M4U_PORT_L20_JPGENC_Y_RDMA MTK_M4U_ID(20, 7)
+#define M4U_PORT_L20_JPGENC_C_RDMA MTK_M4U_ID(20, 8)
+#define M4U_PORT_L20_JPGENC_Q_TABLE MTK_M4U_ID(20, 9)
+#define M4U_PORT_L20_VENC_SUB_W_LUMA MTK_M4U_ID(20, 10)
+#define M4U_PORT_L20_VENC_FCS_NBM_RDMA MTK_M4U_ID(20, 11)
+#define M4U_PORT_L20_JPGENC_BSDMA MTK_M4U_ID(20, 12)
+#define M4U_PORT_L20_JPGDEC_WDMA0 MTK_M4U_ID(20, 13)
+#define M4U_PORT_L20_JPGDEC_BSDMA0 MTK_M4U_ID(20, 14)
+#define M4U_PORT_L20_VENC_NBM_WDMA MTK_M4U_ID(20, 15)
+#define M4U_PORT_L20_VENC_NBM_WDMA_LITE MTK_M4U_ID(20, 16)
+#define M4U_PORT_L20_VENC_FCS_NBM_WDMA MTK_M4U_ID(20, 17)
+#define M4U_PORT_L20_JPGDEC_WDMA1 MTK_M4U_ID(20, 18)
+#define M4U_PORT_L20_JPGDEC_BSDMA1 MTK_M4U_ID(20, 19)
+#define M4U_PORT_L20_JPGDEC_BUFF_OFFSET1 MTK_M4U_ID(20, 20)
+#define M4U_PORT_L20_JPGDEC_BUFF_OFFSET0 MTK_M4U_ID(20, 21)
+#define M4U_PORT_L20_VENC_CUR_LUMA MTK_M4U_ID(20, 22)
+#define M4U_PORT_L20_VENC_CUR_CHROMA MTK_M4U_ID(20, 23)
+#define M4U_PORT_L20_VENC_REF_LUMA MTK_M4U_ID(20, 24)
+#define M4U_PORT_L20_VENC_REF_CHROMA MTK_M4U_ID(20, 25)
+#define M4U_PORT_L20_VENC_SUB_R_CHROMA MTK_M4U_ID(20, 26)
+
+/* larb21 */
+#define M4U_PORT_L21_VDEC_MC_EXT MTK_M4U_ID(21, 0)
+#define M4U_PORT_L21_VDEC_UFO_EXT MTK_M4U_ID(21, 1)
+#define M4U_PORT_L21_VDEC_PP_EXT MTK_M4U_ID(21, 2)
+#define M4U_PORT_L21_VDEC_PRED_RD_EXT MTK_M4U_ID(21, 3)
+#define M4U_PORT_L21_VDEC_PRED_WR_EXT MTK_M4U_ID(21, 4)
+#define M4U_PORT_L21_VDEC_PPWRAP_EXT MTK_M4U_ID(21, 5)
+#define M4U_PORT_L21_VDEC_TILE_EXT MTK_M4U_ID(21, 6)
+#define M4U_PORT_L21_VDEC_VLD_EXT MTK_M4U_ID(21, 7)
+#define M4U_PORT_L21_VDEC_VLD2_EXT MTK_M4U_ID(21, 8)
+#define M4U_PORT_L21_VDEC_AVC_MV_EXT MTK_M4U_ID(21, 9)
+
+/* larb22 */
+#define M4U_PORT_L22_VDEC_MC_EXT MTK_M4U_ID(22, 0)
+#define M4U_PORT_L22_VDEC_UFO_EXT MTK_M4U_ID(22, 1)
+#define M4U_PORT_L22_VDEC_PP_EXT MTK_M4U_ID(22, 2)
+#define M4U_PORT_L22_VDEC_PRED_RD_EXT MTK_M4U_ID(22, 3)
+#define M4U_PORT_L22_VDEC_PRED_WR_EXT MTK_M4U_ID(22, 4)
+#define M4U_PORT_L22_VDEC_PPWRAP_EXT MTK_M4U_ID(22, 5)
+#define M4U_PORT_L22_VDEC_TILE_EXT MTK_M4U_ID(22, 6)
+#define M4U_PORT_L22_VDEC_VLD_EXT MTK_M4U_ID(22, 7)
+#define M4U_PORT_L22_VDEC_VLD2_EXT MTK_M4U_ID(22, 8)
+#define M4U_PORT_L22_VDEC_AVC_MV_EXT MTK_M4U_ID(22, 9)
+
+/* larb23 */
+#define M4U_PORT_L23_VDEC_UFO_ENC_EXT MTK_M4U_ID(23, 0)
+#define M4U_PORT_L23_VDEC_RDMA_EXT MTK_M4U_ID(23, 1)
+
+/* larb24 */
+#define M4U_PORT_L24_VDEC_LAT0_VLD_EXT MTK_M4U_ID(24, 0)
+#define M4U_PORT_L24_VDEC_LAT0_VLD2_EXT MTK_M4U_ID(24, 1)
+#define M4U_PORT_L24_VDEC_LAT0_AVC_MC_EXT MTK_M4U_ID(24, 2)
+#define M4U_PORT_L24_VDEC_LAT0_PRED_RD_EXT MTK_M4U_ID(24, 3)
+#define M4U_PORT_L24_VDEC_LAT0_TILE_EXT MTK_M4U_ID(24, 4)
+#define M4U_PORT_L24_VDEC_LAT0_WDMA_EXT MTK_M4U_ID(24, 5)
+#define M4U_PORT_L24_VDEC_LAT1_VLD_EXT MTK_M4U_ID(24, 6)
+#define M4U_PORT_L24_VDEC_LAT1_VLD2_EXT MTK_M4U_ID(24, 7)
+#define M4U_PORT_L24_VDEC_LAT1_AVC_MC_EXT MTK_M4U_ID(24, 8)
+#define M4U_PORT_L24_VDEC_LAT1_PRED_RD_EXT MTK_M4U_ID(24, 9)
+#define M4U_PORT_L24_VDEC_LAT1_TILE_EXT MTK_M4U_ID(24, 10)
+#define M4U_PORT_L24_VDEC_LAT1_WDMA_EXT MTK_M4U_ID(24, 11)
+
+/* larb25 */
+#define M4U_PORT_L25_CAM_MRAW0_LSCI_M1 MTK_M4U_ID(25, 0)
+#define M4U_PORT_L25_CAM_MRAW0_CQI_M1 MTK_M4U_ID(25, 1)
+#define M4U_PORT_L25_CAM_MRAW0_CQI_M2 MTK_M4U_ID(25, 2)
+#define M4U_PORT_L25_CAM_MRAW0_IMGO_M1 MTK_M4U_ID(25, 3)
+#define M4U_PORT_L25_CAM_MRAW0_IMGBO_M1 MTK_M4U_ID(25, 4)
+#define M4U_PORT_L25_CAM_MRAW2_LSCI_M1 MTK_M4U_ID(25, 5)
+#define M4U_PORT_L25_CAM_MRAW2_CQI_M1 MTK_M4U_ID(25, 6)
+#define M4U_PORT_L25_CAM_MRAW2_CQI_M2 MTK_M4U_ID(25, 7)
+#define M4U_PORT_L25_CAM_MRAW2_IMGO_M1 MTK_M4U_ID(25, 8)
+#define M4U_PORT_L25_CAM_MRAW2_IMGBO_M1 MTK_M4U_ID(25, 9)
+#define M4U_PORT_L25_CAM_MRAW0_AFO_M1 MTK_M4U_ID(25, 10)
+#define M4U_PORT_L25_CAM_MRAW2_AFO_M1 MTK_M4U_ID(25, 11)
+
+/* larb26 */
+#define M4U_PORT_L26_CAM_MRAW1_LSCI_M1 MTK_M4U_ID(26, 0)
+#define M4U_PORT_L26_CAM_MRAW1_CQI_M1 MTK_M4U_ID(26, 1)
+#define M4U_PORT_L26_CAM_MRAW1_CQI_M2 MTK_M4U_ID(26, 2)
+#define M4U_PORT_L26_CAM_MRAW1_IMGO_M1 MTK_M4U_ID(26, 3)
+#define M4U_PORT_L26_CAM_MRAW1_IMGBO_M1 MTK_M4U_ID(26, 4)
+#define M4U_PORT_L26_CAM_MRAW3_LSCI_M1 MTK_M4U_ID(26, 5)
+#define M4U_PORT_L26_CAM_MRAW3_CQI_M1 MTK_M4U_ID(26, 6)
+#define M4U_PORT_L26_CAM_MRAW3_CQI_M2 MTK_M4U_ID(26, 7)
+#define M4U_PORT_L26_CAM_MRAW3_IMGO_M1 MTK_M4U_ID(26, 8)
+#define M4U_PORT_L26_CAM_MRAW3_IMGBO_M1 MTK_M4U_ID(26, 9)
+#define M4U_PORT_L26_CAM_MRAW1_AFO_M1 MTK_M4U_ID(26, 10)
+#define M4U_PORT_L26_CAM_MRAW3_AFO_M1 MTK_M4U_ID(26, 11)
+
+/* larb27 */
+#define M4U_PORT_L27_CAM_IMGO_R1 MTK_M4U_ID(27, 0)
+#define M4U_PORT_L27_CAM_CQI_R1 MTK_M4U_ID(27, 1)
+#define M4U_PORT_L27_CAM_CQI_R2 MTK_M4U_ID(27, 2)
+#define M4U_PORT_L27_CAM_BPCI_R1 MTK_M4U_ID(27, 3)
+#define M4U_PORT_L27_CAM_LSCI_R1 MTK_M4U_ID(27, 4)
+#define M4U_PORT_L27_CAM_RAWI_R2 MTK_M4U_ID(27, 5)
+#define M4U_PORT_L27_CAM_RAWI_R3 MTK_M4U_ID(27, 6)
+#define M4U_PORT_L27_CAM_UFDI_R2 MTK_M4U_ID(27, 7)
+#define M4U_PORT_L27_CAM_UFDI_R3 MTK_M4U_ID(27, 8)
+#define M4U_PORT_L27_CAM_RAWI_R4 MTK_M4U_ID(27, 9)
+#define M4U_PORT_L27_CAM_RAWI_R5 MTK_M4U_ID(27, 10)
+#define M4U_PORT_L27_CAM_AAI_R1 MTK_M4U_ID(27, 11)
+#define M4U_PORT_L27_CAM_FHO_R1 MTK_M4U_ID(27, 12)
+#define M4U_PORT_L27_CAM_AAO_R1 MTK_M4U_ID(27, 13)
+#define M4U_PORT_L27_CAM_TSFSO_R1 MTK_M4U_ID(27, 14)
+#define M4U_PORT_L27_CAM_FLKO_R1 MTK_M4U_ID(27, 15)
+
+/* larb28 */
+#define M4U_PORT_L28_CAM_YUVO_R1 MTK_M4U_ID(28, 0)
+#define M4U_PORT_L28_CAM_YUVO_R3 MTK_M4U_ID(28, 1)
+#define M4U_PORT_L28_CAM_YUVCO_R1 MTK_M4U_ID(28, 2)
+#define M4U_PORT_L28_CAM_YUVO_R2 MTK_M4U_ID(28, 3)
+#define M4U_PORT_L28_CAM_RZH1N2TO_R1 MTK_M4U_ID(28, 4)
+#define M4U_PORT_L28_CAM_DRZS4NO_R1 MTK_M4U_ID(28, 5)
+#define M4U_PORT_L28_CAM_TNCSO_R1 MTK_M4U_ID(28, 6)
+
+/* Infra iommu ports */
+/* PCIe1: read: BIT16; write BIT17. */
+#define IOMMU_PORT_INFRA_PCIE1 MTK_IFAIOMMU_PERI_ID(16)
+/* PCIe0: read: BIT18; write BIT19. */
+#define IOMMU_PORT_INFRA_PCIE0 MTK_IFAIOMMU_PERI_ID(18)
+#define IOMMU_PORT_INFRA_SSUSB_P3_R MTK_IFAIOMMU_PERI_ID(20)
+#define IOMMU_PORT_INFRA_SSUSB_P3_W MTK_IFAIOMMU_PERI_ID(21)
+#define IOMMU_PORT_INFRA_SSUSB_P2_R MTK_IFAIOMMU_PERI_ID(22)
+#define IOMMU_PORT_INFRA_SSUSB_P2_W MTK_IFAIOMMU_PERI_ID(23)
+#define IOMMU_PORT_INFRA_SSUSB_P1_1_R MTK_IFAIOMMU_PERI_ID(24)
+#define IOMMU_PORT_INFRA_SSUSB_P1_1_W MTK_IFAIOMMU_PERI_ID(25)
+#define IOMMU_PORT_INFRA_SSUSB_P1_0_R MTK_IFAIOMMU_PERI_ID(26)
+#define IOMMU_PORT_INFRA_SSUSB_P1_0_W MTK_IFAIOMMU_PERI_ID(27)
+#define IOMMU_PORT_INFRA_SSUSB2_R MTK_IFAIOMMU_PERI_ID(28)
+#define IOMMU_PORT_INFRA_SSUSB2_W MTK_IFAIOMMU_PERI_ID(29)
+#define IOMMU_PORT_INFRA_SSUSB_R MTK_IFAIOMMU_PERI_ID(30)
+#define IOMMU_PORT_INFRA_SSUSB_W MTK_IFAIOMMU_PERI_ID(31)
+
+#endif
diff --git a/include/dt-bindings/memory/mtk-memory-port.h b/include/dt-bindings/memory/mtk-memory-port.h
index 7d64103209af..2f68a0511a25 100644
--- a/include/dt-bindings/memory/mtk-memory-port.h
+++ b/include/dt-bindings/memory/mtk-memory-port.h
@@ -12,4 +12,6 @@
#define MTK_M4U_TO_LARB(id) (((id) >> 5) & 0x1f)
#define MTK_M4U_TO_PORT(id) ((id) & 0x1f)
+#define MTK_IFAIOMMU_PERI_ID(port) MTK_M4U_ID(0, port)
+
#endif
diff --git a/include/dt-bindings/memory/nvidia,tegra264.h b/include/dt-bindings/memory/nvidia,tegra264.h
new file mode 100644
index 000000000000..521405c01f84
--- /dev/null
+++ b/include/dt-bindings/memory/nvidia,tegra264.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef DT_BINDINGS_MEMORY_NVIDIA_TEGRA264_H
+#define DT_BINDINGS_MEMORY_NVIDIA_TEGRA264_H
+
+#define TEGRA264_SID(x) ((x) << 8)
+
+/*
+ * SMMU stream IDs
+ */
+
+#define TEGRA264_SID_AON TEGRA264_SID(0x01)
+#define TEGRA264_SID_APE TEGRA264_SID(0x02)
+#define TEGRA264_SID_ETR TEGRA264_SID(0x03)
+#define TEGRA264_SID_BPMP TEGRA264_SID(0x04)
+#define TEGRA264_SID_DCE TEGRA264_SID(0x05)
+#define TEGRA264_SID_EQOS TEGRA264_SID(0x06)
+#define TEGRA264_SID_GPCDMA TEGRA264_SID(0x08)
+#define TEGRA264_SID_DISP TEGRA264_SID(0x09)
+#define TEGRA264_SID_HDA TEGRA264_SID(0x0a)
+#define TEGRA264_SID_HOST1X TEGRA264_SID(0x0b)
+#define TEGRA264_SID_ISP0 TEGRA264_SID(0x0c)
+#define TEGRA264_SID_ISP1 TEGRA264_SID(0x0d)
+#define TEGRA264_SID_PMA0 TEGRA264_SID(0x0e)
+#define TEGRA264_SID_FSI0 TEGRA264_SID(0x0f)
+#define TEGRA264_SID_FSI1 TEGRA264_SID(0x10)
+#define TEGRA264_SID_PVA TEGRA264_SID(0x11)
+#define TEGRA264_SID_SDMMC0 TEGRA264_SID(0x12)
+#define TEGRA264_SID_MGBE0 TEGRA264_SID(0x13)
+#define TEGRA264_SID_MGBE1 TEGRA264_SID(0x14)
+#define TEGRA264_SID_MGBE2 TEGRA264_SID(0x15)
+#define TEGRA264_SID_MGBE3 TEGRA264_SID(0x16)
+#define TEGRA264_SID_MSSSEQ TEGRA264_SID(0x17)
+#define TEGRA264_SID_SE TEGRA264_SID(0x18)
+#define TEGRA264_SID_SEU1 TEGRA264_SID(0x19)
+#define TEGRA264_SID_SEU2 TEGRA264_SID(0x1a)
+#define TEGRA264_SID_SEU3 TEGRA264_SID(0x1b)
+#define TEGRA264_SID_PSC TEGRA264_SID(0x1c)
+#define TEGRA264_SID_OESP TEGRA264_SID(0x23)
+#define TEGRA264_SID_SB TEGRA264_SID(0x24)
+#define TEGRA264_SID_XSPI0 TEGRA264_SID(0x25)
+#define TEGRA264_SID_TSEC TEGRA264_SID(0x29)
+#define TEGRA264_SID_UFS TEGRA264_SID(0x2a)
+#define TEGRA264_SID_RCE TEGRA264_SID(0x2b)
+#define TEGRA264_SID_RCE1 TEGRA264_SID(0x2c)
+#define TEGRA264_SID_VI TEGRA264_SID(0x2e)
+#define TEGRA264_SID_VI1 TEGRA264_SID(0x2f)
+#define TEGRA264_SID_VIC TEGRA264_SID(0x30)
+#define TEGRA264_SID_XUSB_DEV TEGRA264_SID(0x32)
+#define TEGRA264_SID_XUSB_DEV1 TEGRA264_SID(0x33)
+#define TEGRA264_SID_XUSB_DEV2 TEGRA264_SID(0x34)
+#define TEGRA264_SID_XUSB_DEV3 TEGRA264_SID(0x35)
+#define TEGRA264_SID_XUSB_DEV4 TEGRA264_SID(0x36)
+#define TEGRA264_SID_XUSB_DEV5 TEGRA264_SID(0x37)
+
+/*
+ * memory client IDs
+ */
+
+/* HOST1X read client */
+#define TEGRA264_MEMORY_CLIENT_HOST1XR 0x16
+/* VIC read client */
+#define TEGRA264_MEMORY_CLIENT_VICR 0x6c
+/* VIC Write client */
+#define TEGRA264_MEMORY_CLIENT_VICW 0x6d
+/* VI R5 Write client */
+#define TEGRA264_MEMORY_CLIENT_VIW 0x72
+#define TEGRA264_MEMORY_CLIENT_NVDECSRD2MC 0x78
+#define TEGRA264_MEMORY_CLIENT_NVDECSWR2MC 0x79
+/* Audio processor(APE) Read client */
+#define TEGRA264_MEMORY_CLIENT_APER 0x7a
+/* Audio processor(APE) Write client */
+#define TEGRA264_MEMORY_CLIENT_APEW 0x7b
+/* Audio DMA Read client */
+#define TEGRA264_MEMORY_CLIENT_APEDMAR 0x9f
+/* Audio DMA Write client */
+#define TEGRA264_MEMORY_CLIENT_APEDMAW 0xa0
+#define TEGRA264_MEMORY_CLIENT_GPUR02MC 0xb6
+#define TEGRA264_MEMORY_CLIENT_GPUW02MC 0xb7
+/* VI Falcon Read client */
+#define TEGRA264_MEMORY_CLIENT_VIFALCONR 0xbc
+/* VI Falcon Write client */
+#define TEGRA264_MEMORY_CLIENT_VIFALCONW 0xbd
+/* Read Client of RCE */
+#define TEGRA264_MEMORY_CLIENT_RCER 0xd2
+/* Write client of RCE */
+#define TEGRA264_MEMORY_CLIENT_RCEW 0xd3
+/* PCIE0/MSI Write clients */
+#define TEGRA264_MEMORY_CLIENT_PCIE0W 0xd9
+/* PCIE1/RPX4 Read clients */
+#define TEGRA264_MEMORY_CLIENT_PCIE1R 0xda
+/* PCIE1/RPX4 Write clients */
+#define TEGRA264_MEMORY_CLIENT_PCIE1W 0xdb
+/* PCIE2/DMX4 Read clients */
+#define TEGRA264_MEMORY_CLIENT_PCIE2AR 0xdc
+/* PCIE2/DMX4 Write clients */
+#define TEGRA264_MEMORY_CLIENT_PCIE2AW 0xdd
+/* PCIE3/RPX4 Read clients */
+#define TEGRA264_MEMORY_CLIENT_PCIE3R 0xde
+/* PCIE3/RPX4 Write clients */
+#define TEGRA264_MEMORY_CLIENT_PCIE3W 0xdf
+/* PCIE4/DMX8 Read clients */
+#define TEGRA264_MEMORY_CLIENT_PCIE4R 0xe0
+/* PCIE4/DMX8 Write clients */
+#define TEGRA264_MEMORY_CLIENT_PCIE4W 0xe1
+/* PCIE5/DMX4 Read clients */
+#define TEGRA264_MEMORY_CLIENT_PCIE5R 0xe2
+/* PCIE5/DMX4 Write clients */
+#define TEGRA264_MEMORY_CLIENT_PCIE5W 0xe3
+/* UFS Read client */
+#define TEGRA264_MEMORY_CLIENT_UFSR 0x15c
+/* UFS write client */
+#define TEGRA264_MEMORY_CLIENT_UFSW 0x15d
+/* HDA Read client */
+#define TEGRA264_MEMORY_CLIENT_HDAR 0x17c
+/* HDA Write client */
+#define TEGRA264_MEMORY_CLIENT_HDAW 0x17d
+/* Disp ISO Read Client */
+#define TEGRA264_MEMORY_CLIENT_DISPR 0x182
+/* MGBE0 Read mccif */
+#define TEGRA264_MEMORY_CLIENT_MGBE0R 0x1a2
+/* MGBE0 Write mccif */
+#define TEGRA264_MEMORY_CLIENT_MGBE0W 0x1a3
+/* MGBE1 Read mccif */
+#define TEGRA264_MEMORY_CLIENT_MGBE1R 0x1a4
+/* MGBE1 Write mccif */
+#define TEGRA264_MEMORY_CLIENT_MGBE1W 0x1a5
+/* VI1 R5 Write client */
+#define TEGRA264_MEMORY_CLIENT_VI1W 0x1a6
+/* SDMMC0 Read mccif */
+#define TEGRA264_MEMORY_CLIENT_SDMMC0R 0x1c2
+/* SDMMC0 Write mccif */
+#define TEGRA264_MEMORY_CLIENT_SDMMC0W 0x1c3
+
+#endif /* DT_BINDINGS_MEMORY_NVIDIA_TEGRA264_H */
diff --git a/include/dt-bindings/memory/tegra210-mc.h b/include/dt-bindings/memory/tegra210-mc.h
index 5e082547f179..881bf78aa8b2 100644
--- a/include/dt-bindings/memory/tegra210-mc.h
+++ b/include/dt-bindings/memory/tegra210-mc.h
@@ -75,4 +75,78 @@
#define TEGRA210_MC_RESET_ETR 28
#define TEGRA210_MC_RESET_TSECB 29
+#define TEGRA210_MC_PTCR 0
+#define TEGRA210_MC_DISPLAY0A 1
+#define TEGRA210_MC_DISPLAY0AB 2
+#define TEGRA210_MC_DISPLAY0B 3
+#define TEGRA210_MC_DISPLAY0BB 4
+#define TEGRA210_MC_DISPLAY0C 5
+#define TEGRA210_MC_DISPLAY0CB 6
+#define TEGRA210_MC_AFIR 14
+#define TEGRA210_MC_AVPCARM7R 15
+#define TEGRA210_MC_DISPLAYHC 16
+#define TEGRA210_MC_DISPLAYHCB 17
+#define TEGRA210_MC_HDAR 21
+#define TEGRA210_MC_HOST1XDMAR 22
+#define TEGRA210_MC_HOST1XR 23
+#define TEGRA210_MC_NVENCSRD 28
+#define TEGRA210_MC_PPCSAHBDMAR 29
+#define TEGRA210_MC_PPCSAHBSLVR 30
+#define TEGRA210_MC_SATAR 31
+#define TEGRA210_MC_MPCORER 39
+#define TEGRA210_MC_NVENCSWR 43
+#define TEGRA210_MC_AFIW 49
+#define TEGRA210_MC_AVPCARM7W 50
+#define TEGRA210_MC_HDAW 53
+#define TEGRA210_MC_HOST1XW 54
+#define TEGRA210_MC_MPCOREW 57
+#define TEGRA210_MC_PPCSAHBDMAW 59
+#define TEGRA210_MC_PPCSAHBSLVW 60
+#define TEGRA210_MC_SATAW 61
+#define TEGRA210_MC_ISPRA 68
+#define TEGRA210_MC_ISPWA 70
+#define TEGRA210_MC_ISPWB 71
+#define TEGRA210_MC_XUSB_HOSTR 74
+#define TEGRA210_MC_XUSB_HOSTW 75
+#define TEGRA210_MC_XUSB_DEVR 76
+#define TEGRA210_MC_XUSB_DEVW 77
+#define TEGRA210_MC_ISPRAB 78
+#define TEGRA210_MC_ISPWAB 80
+#define TEGRA210_MC_ISPWBB 81
+#define TEGRA210_MC_TSECSRD 84
+#define TEGRA210_MC_TSECSWR 85
+#define TEGRA210_MC_A9AVPSCR 86
+#define TEGRA210_MC_A9AVPSCW 87
+#define TEGRA210_MC_GPUSRD 88
+#define TEGRA210_MC_GPUSWR 89
+#define TEGRA210_MC_DISPLAYT 90
+#define TEGRA210_MC_SDMMCRA 96
+#define TEGRA210_MC_SDMMCRAA 97
+#define TEGRA210_MC_SDMMCR 98
+#define TEGRA210_MC_SDMMCRAB 99
+#define TEGRA210_MC_SDMMCWA 100
+#define TEGRA210_MC_SDMMCWAA 101
+#define TEGRA210_MC_SDMMCW 102
+#define TEGRA210_MC_SDMMCWAB 103
+#define TEGRA210_MC_VICSRD 108
+#define TEGRA210_MC_VICSWR 109
+#define TEGRA210_MC_VIW 114
+#define TEGRA210_MC_DISPLAYD 115
+#define TEGRA210_MC_NVDECSRD 120
+#define TEGRA210_MC_NVDECSWR 121
+#define TEGRA210_MC_APER 122
+#define TEGRA210_MC_APEW 123
+#define TEGRA210_MC_NVJPGRD 126
+#define TEGRA210_MC_NVJPGWR 127
+#define TEGRA210_MC_SESRD 128
+#define TEGRA210_MC_SESWR 129
+#define TEGRA210_MC_AXIAPR 130
+#define TEGRA210_MC_AXIAPW 131
+#define TEGRA210_MC_ETRR 132
+#define TEGRA210_MC_ETRW 133
+#define TEGRA210_MC_TSECSRDB 134
+#define TEGRA210_MC_TSECSWRB 135
+#define TEGRA210_MC_GPUSRD2 136
+#define TEGRA210_MC_GPUSWR2 137
+
#endif
diff --git a/include/dt-bindings/memory/tegra234-mc.h b/include/dt-bindings/memory/tegra234-mc.h
new file mode 100644
index 000000000000..6e60d55491b3
--- /dev/null
+++ b/include/dt-bindings/memory/tegra234-mc.h
@@ -0,0 +1,544 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef DT_BINDINGS_MEMORY_TEGRA234_MC_H
+#define DT_BINDINGS_MEMORY_TEGRA234_MC_H
+
+/* special clients */
+#define TEGRA234_SID_INVALID 0x00
+#define TEGRA234_SID_PASSTHROUGH 0x7f
+
+/* ISO stream IDs */
+#define TEGRA234_SID_ISO_NVDISPLAY 0x01
+#define TEGRA234_SID_ISO_VI 0x02
+#define TEGRA234_SID_ISO_VIFALC 0x03
+#define TEGRA234_SID_ISO_VI2 0x04
+#define TEGRA234_SID_ISO_VI2FALC 0x05
+#define TEGRA234_SID_ISO_VI_VM2 0x06
+#define TEGRA234_SID_ISO_VI2_VM2 0x07
+
+/* NISO0 stream IDs */
+#define TEGRA234_SID_AON 0x01
+#define TEGRA234_SID_APE 0x02
+#define TEGRA234_SID_HDA 0x03
+#define TEGRA234_SID_GPCDMA 0x04
+#define TEGRA234_SID_ETR 0x05
+#define TEGRA234_SID_MGBE 0x06
+#define TEGRA234_SID_NVDISPLAY 0x07
+#define TEGRA234_SID_DCE 0x08
+#define TEGRA234_SID_PSC 0x09
+#define TEGRA234_SID_RCE 0x0a
+#define TEGRA234_SID_SCE 0x0b
+#define TEGRA234_SID_UFSHC 0x0c
+#define TEGRA234_SID_APE_1 0x0d
+#define TEGRA234_SID_GPCDMA_1 0x0e
+#define TEGRA234_SID_GPCDMA_2 0x0f
+#define TEGRA234_SID_GPCDMA_3 0x10
+#define TEGRA234_SID_GPCDMA_4 0x11
+#define TEGRA234_SID_PCIE0 0x12
+#define TEGRA234_SID_PCIE4 0x13
+#define TEGRA234_SID_PCIE5 0x14
+#define TEGRA234_SID_PCIE6 0x15
+#define TEGRA234_SID_RCE_VM2 0x16
+#define TEGRA234_SID_RCE_SERVER 0x17
+#define TEGRA234_SID_SMMU_TEST 0x18
+#define TEGRA234_SID_UFS_1 0x19
+#define TEGRA234_SID_UFS_2 0x1a
+#define TEGRA234_SID_UFS_3 0x1b
+#define TEGRA234_SID_UFS_4 0x1c
+#define TEGRA234_SID_UFS_5 0x1d
+#define TEGRA234_SID_UFS_6 0x1e
+#define TEGRA234_SID_PCIE9 0x1f
+#define TEGRA234_SID_VSE_GPCDMA_VM0 0x20
+#define TEGRA234_SID_VSE_GPCDMA_VM1 0x21
+#define TEGRA234_SID_VSE_GPCDMA_VM2 0x22
+#define TEGRA234_SID_NVDLA1 0x23
+#define TEGRA234_SID_NVENC 0x24
+#define TEGRA234_SID_NVJPG1 0x25
+#define TEGRA234_SID_OFA 0x26
+#define TEGRA234_SID_MGBE_VF1 0x49
+#define TEGRA234_SID_MGBE_VF2 0x4a
+#define TEGRA234_SID_MGBE_VF3 0x4b
+#define TEGRA234_SID_MGBE_VF4 0x4c
+#define TEGRA234_SID_MGBE_VF5 0x4d
+#define TEGRA234_SID_MGBE_VF6 0x4e
+#define TEGRA234_SID_MGBE_VF7 0x4f
+#define TEGRA234_SID_MGBE_VF8 0x50
+#define TEGRA234_SID_MGBE_VF9 0x51
+#define TEGRA234_SID_MGBE_VF10 0x52
+#define TEGRA234_SID_MGBE_VF11 0x53
+#define TEGRA234_SID_MGBE_VF12 0x54
+#define TEGRA234_SID_MGBE_VF13 0x55
+#define TEGRA234_SID_MGBE_VF14 0x56
+#define TEGRA234_SID_MGBE_VF15 0x57
+#define TEGRA234_SID_MGBE_VF16 0x58
+#define TEGRA234_SID_MGBE_VF17 0x59
+#define TEGRA234_SID_MGBE_VF18 0x5a
+#define TEGRA234_SID_MGBE_VF19 0x5b
+#define TEGRA234_SID_MGBE_VF20 0x5c
+#define TEGRA234_SID_APE_2 0x5e
+#define TEGRA234_SID_APE_3 0x5f
+#define TEGRA234_SID_UFS_7 0x60
+#define TEGRA234_SID_UFS_8 0x61
+#define TEGRA234_SID_UFS_9 0x62
+#define TEGRA234_SID_UFS_10 0x63
+#define TEGRA234_SID_UFS_11 0x64
+#define TEGRA234_SID_UFS_12 0x65
+#define TEGRA234_SID_UFS_13 0x66
+#define TEGRA234_SID_UFS_14 0x67
+#define TEGRA234_SID_UFS_15 0x68
+#define TEGRA234_SID_UFS_16 0x69
+#define TEGRA234_SID_UFS_17 0x6a
+#define TEGRA234_SID_UFS_18 0x6b
+#define TEGRA234_SID_UFS_19 0x6c
+#define TEGRA234_SID_UFS_20 0x6d
+#define TEGRA234_SID_GPCDMA_5 0x6e
+#define TEGRA234_SID_GPCDMA_6 0x6f
+#define TEGRA234_SID_GPCDMA_7 0x70
+#define TEGRA234_SID_GPCDMA_8 0x71
+#define TEGRA234_SID_GPCDMA_9 0x72
+
+/* NISO1 stream IDs */
+#define TEGRA234_SID_SDMMC1A 0x01
+#define TEGRA234_SID_SDMMC4 0x02
+#define TEGRA234_SID_EQOS 0x03
+#define TEGRA234_SID_HWMP_PMA 0x04
+#define TEGRA234_SID_PCIE1 0x05
+#define TEGRA234_SID_PCIE2 0x06
+#define TEGRA234_SID_PCIE3 0x07
+#define TEGRA234_SID_PCIE7 0x08
+#define TEGRA234_SID_PCIE8 0x09
+#define TEGRA234_SID_PCIE10 0x0b
+#define TEGRA234_SID_QSPI0 0x0c
+#define TEGRA234_SID_QSPI1 0x0d
+#define TEGRA234_SID_XUSB_HOST 0x0e
+#define TEGRA234_SID_XUSB_DEV 0x0f
+#define TEGRA234_SID_BPMP 0x10
+#define TEGRA234_SID_FSI 0x11
+#define TEGRA234_SID_PVA0_VM0 0x12
+#define TEGRA234_SID_PVA0_VM1 0x13
+#define TEGRA234_SID_PVA0_VM2 0x14
+#define TEGRA234_SID_PVA0_VM3 0x15
+#define TEGRA234_SID_PVA0_VM4 0x16
+#define TEGRA234_SID_PVA0_VM5 0x17
+#define TEGRA234_SID_PVA0_VM6 0x18
+#define TEGRA234_SID_PVA0_VM7 0x19
+#define TEGRA234_SID_XUSB_VF0 0x1a
+#define TEGRA234_SID_XUSB_VF1 0x1b
+#define TEGRA234_SID_XUSB_VF2 0x1c
+#define TEGRA234_SID_XUSB_VF3 0x1d
+#define TEGRA234_SID_EQOS_VF1 0x1e
+#define TEGRA234_SID_EQOS_VF2 0x1f
+#define TEGRA234_SID_EQOS_VF3 0x20
+#define TEGRA234_SID_EQOS_VF4 0x21
+#define TEGRA234_SID_ISP_VM2 0x22
+#define TEGRA234_SID_HOST1X 0x27
+#define TEGRA234_SID_ISP 0x28
+#define TEGRA234_SID_NVDEC 0x29
+#define TEGRA234_SID_NVJPG 0x2a
+#define TEGRA234_SID_NVDLA0 0x2b
+#define TEGRA234_SID_PVA0 0x2c
+#define TEGRA234_SID_SES_SE0 0x2d
+#define TEGRA234_SID_SES_SE1 0x2e
+#define TEGRA234_SID_SES_SE2 0x2f
+#define TEGRA234_SID_SEU1_SE0 0x30
+#define TEGRA234_SID_SEU1_SE1 0x31
+#define TEGRA234_SID_SEU1_SE2 0x32
+#define TEGRA234_SID_TSEC 0x33
+#define TEGRA234_SID_VIC 0x34
+#define TEGRA234_SID_HC_VM0 0x3d
+#define TEGRA234_SID_HC_VM1 0x3e
+#define TEGRA234_SID_HC_VM2 0x3f
+#define TEGRA234_SID_HC_VM3 0x40
+#define TEGRA234_SID_HC_VM4 0x41
+#define TEGRA234_SID_HC_VM5 0x42
+#define TEGRA234_SID_HC_VM6 0x43
+#define TEGRA234_SID_HC_VM7 0x44
+#define TEGRA234_SID_SE_VM0 0x45
+#define TEGRA234_SID_SE_VM1 0x46
+#define TEGRA234_SID_SE_VM2 0x47
+#define TEGRA234_SID_ISPFALC 0x48
+#define TEGRA234_SID_NISO1_SMMU_TEST 0x49
+#define TEGRA234_SID_TSEC_VM0 0x4a
+
+/* Shared stream IDs */
+#define TEGRA234_SID_HOST1X_CTX0 0x35
+#define TEGRA234_SID_HOST1X_CTX1 0x36
+#define TEGRA234_SID_HOST1X_CTX2 0x37
+#define TEGRA234_SID_HOST1X_CTX3 0x38
+#define TEGRA234_SID_HOST1X_CTX4 0x39
+#define TEGRA234_SID_HOST1X_CTX5 0x3a
+#define TEGRA234_SID_HOST1X_CTX6 0x3b
+#define TEGRA234_SID_HOST1X_CTX7 0x3c
+
+/*
+ * memory client IDs
+ */
+
+/* Misses from System Memory Management Unit (SMMU) Page Table Cache (PTC) */
+#define TEGRA234_MEMORY_CLIENT_PTCR 0x00
+/* MSS internal memqual MIU7 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU7R 0x01
+/* MSS internal memqual MIU7 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU7W 0x02
+/* MSS internal memqual MIU8 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU8R 0x03
+/* MSS internal memqual MIU8 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU8W 0x04
+/* MSS internal memqual MIU9 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU9R 0x05
+/* MSS internal memqual MIU9 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU9W 0x06
+/* MSS internal memqual MIU10 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU10R 0x07
+/* MSS internal memqual MIU10 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU10W 0x08
+/* MSS internal memqual MIU11 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU11R 0x09
+/* MSS internal memqual MIU11 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU11W 0x0a
+/* MSS internal memqual MIU12 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU12R 0x0b
+/* MSS internal memqual MIU12 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU12W 0x0c
+/* MSS internal memqual MIU13 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU13R 0x0d
+/* MSS internal memqual MIU13 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU13W 0x0e
+#define TEGRA234_MEMORY_CLIENT_NVL5RHP 0x13
+#define TEGRA234_MEMORY_CLIENT_NVL5R 0x14
+/* High-definition audio (HDA) read clients */
+#define TEGRA234_MEMORY_CLIENT_HDAR 0x15
+/* Host channel data read clients */
+#define TEGRA234_MEMORY_CLIENT_HOST1XDMAR 0x16
+#define TEGRA234_MEMORY_CLIENT_NVL5W 0x17
+#define TEGRA234_MEMORY_CLIENT_NVL6RHP 0x18
+#define TEGRA234_MEMORY_CLIENT_NVL6R 0x19
+#define TEGRA234_MEMORY_CLIENT_NVL6W 0x1a
+#define TEGRA234_MEMORY_CLIENT_NVL7RHP 0x1b
+#define TEGRA234_MEMORY_CLIENT_NVENCSRD 0x1c
+#define TEGRA234_MEMORY_CLIENT_NVL7R 0x1d
+#define TEGRA234_MEMORY_CLIENT_NVL7W 0x1e
+#define TEGRA234_MEMORY_CLIENT_NVL8RHP 0x20
+#define TEGRA234_MEMORY_CLIENT_NVL8R 0x21
+#define TEGRA234_MEMORY_CLIENT_NVL8W 0x22
+#define TEGRA234_MEMORY_CLIENT_NVL9RHP 0x23
+#define TEGRA234_MEMORY_CLIENT_NVL9R 0x24
+#define TEGRA234_MEMORY_CLIENT_NVL9W 0x25
+/* PCIE6 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE6AR 0x28
+/* PCIE6 write clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE6AW 0x29
+/* PCIE7 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE7AR 0x2a
+#define TEGRA234_MEMORY_CLIENT_NVENCSWR 0x2b
+/* DLA0ARDB read clients */
+#define TEGRA234_MEMORY_CLIENT_DLA0RDB 0x2c
+/* DLA0ARDB1 read clients */
+#define TEGRA234_MEMORY_CLIENT_DLA0RDB1 0x2d
+/* DLA0 writes */
+#define TEGRA234_MEMORY_CLIENT_DLA0WRB 0x2e
+/* DLA1ARDB read clients */
+#define TEGRA234_MEMORY_CLIENT_DLA1RDB 0x2f
+/* PCIE7 write clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE7AW 0x30
+/* PCIE8 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE8AR 0x32
+/* High-definition audio (HDA) write clients */
+#define TEGRA234_MEMORY_CLIENT_HDAW 0x35
+/* Writes from Cortex-A9 4 CPU cores via the L2 cache */
+#define TEGRA234_MEMORY_CLIENT_MPCOREW 0x39
+/* OFAA client */
+#define TEGRA234_MEMORY_CLIENT_OFAR1 0x3a
+/* PCIE8 write clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE8AW 0x3b
+/* PCIE9 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE9AR 0x3c
+/* PCIE6r1 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE6AR1 0x3d
+/* PCIE9 write clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE9AW 0x3e
+/* PCIE10 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE10AR 0x3f
+/* PCIE10 write clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE10AW 0x40
+/* ISP read client for Crossbar A */
+#define TEGRA234_MEMORY_CLIENT_ISPRA 0x44
+/* ISP read client 1 for Crossbar A */
+#define TEGRA234_MEMORY_CLIENT_ISPFALR 0x45
+/* ISP Write client for Crossbar A */
+#define TEGRA234_MEMORY_CLIENT_ISPWA 0x46
+/* ISP Write client Crossbar B */
+#define TEGRA234_MEMORY_CLIENT_ISPWB 0x47
+/* PCIE10r1 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE10AR1 0x48
+/* PCIE7r1 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE7AR1 0x49
+/* XUSB_HOST read clients */
+#define TEGRA234_MEMORY_CLIENT_XUSB_HOSTR 0x4a
+/* XUSB_HOST write clients */
+#define TEGRA234_MEMORY_CLIENT_XUSB_HOSTW 0x4b
+/* XUSB read clients */
+#define TEGRA234_MEMORY_CLIENT_XUSB_DEVR 0x4c
+/* XUSB_DEV write clients */
+#define TEGRA234_MEMORY_CLIENT_XUSB_DEVW 0x4d
+/* TSEC Memory Return Data Client Description */
+#define TEGRA234_MEMORY_CLIENT_TSECSRD 0x54
+/* TSEC Memory Write Client Description */
+#define TEGRA234_MEMORY_CLIENT_TSECSWR 0x55
+/* XSPI writes */
+#define TEGRA234_MEMORY_CLIENT_XSPI1W 0x56
+/* MGBE0 read client */
+#define TEGRA234_MEMORY_CLIENT_MGBEARD 0x58
+/* MGBEB read client */
+#define TEGRA234_MEMORY_CLIENT_MGBEBRD 0x59
+/* MGBEC read client */
+#define TEGRA234_MEMORY_CLIENT_MGBECRD 0x5a
+/* MGBED read client */
+#define TEGRA234_MEMORY_CLIENT_MGBEDRD 0x5b
+/* MGBE0 write client */
+#define TEGRA234_MEMORY_CLIENT_MGBEAWR 0x5c
+/* OFAA client */
+#define TEGRA234_MEMORY_CLIENT_OFAR 0x5d
+/* OFAA writes */
+#define TEGRA234_MEMORY_CLIENT_OFAW 0x5e
+/* MGBEB write client */
+#define TEGRA234_MEMORY_CLIENT_MGBEBWR 0x5f
+/* sdmmca memory read client */
+#define TEGRA234_MEMORY_CLIENT_SDMMCRA 0x60
+/* MGBEC write client */
+#define TEGRA234_MEMORY_CLIENT_MGBECWR 0x61
+/* sdmmcd memory read client */
+#define TEGRA234_MEMORY_CLIENT_SDMMCRAB 0x63
+/* sdmmca memory write client */
+#define TEGRA234_MEMORY_CLIENT_SDMMCWA 0x64
+/* MGBED write client */
+#define TEGRA234_MEMORY_CLIENT_MGBEDWR 0x65
+/* sdmmcd memory write client */
+#define TEGRA234_MEMORY_CLIENT_SDMMCWAB 0x67
+/* SE Memory Return Data Client Description */
+#define TEGRA234_MEMORY_CLIENT_SEU1RD 0x68
+/* SE Memory Write Client Description */
+#define TEGRA234_MEMORY_CLIENT_SUE1WR 0x69
+#define TEGRA234_MEMORY_CLIENT_VICSRD 0x6c
+#define TEGRA234_MEMORY_CLIENT_VICSWR 0x6d
+/* DLA1ARDB1 read clients */
+#define TEGRA234_MEMORY_CLIENT_DLA1RDB1 0x6e
+/* DLA1 writes */
+#define TEGRA234_MEMORY_CLIENT_DLA1WRB 0x6f
+/* VI FLACON read clients */
+#define TEGRA234_MEMORY_CLIENT_VI2FALR 0x71
+/* VI Write client */
+#define TEGRA234_MEMORY_CLIENT_VI2W 0x70
+/* VI Write client */
+#define TEGRA234_MEMORY_CLIENT_VIW 0x72
+/* NISO display read client */
+#define TEGRA234_MEMORY_CLIENT_NVDISPNISOR 0x73
+/* NVDISPNISO writes */
+#define TEGRA234_MEMORY_CLIENT_NVDISPNISOW 0x74
+/* XSPI client */
+#define TEGRA234_MEMORY_CLIENT_XSPI0R 0x75
+/* XSPI writes */
+#define TEGRA234_MEMORY_CLIENT_XSPI0W 0x76
+/* XSPI client */
+#define TEGRA234_MEMORY_CLIENT_XSPI1R 0x77
+#define TEGRA234_MEMORY_CLIENT_NVDECSRD 0x78
+#define TEGRA234_MEMORY_CLIENT_NVDECSWR 0x79
+/* Audio Processing (APE) engine read clients */
+#define TEGRA234_MEMORY_CLIENT_APER 0x7a
+/* Audio Processing (APE) engine write clients */
+#define TEGRA234_MEMORY_CLIENT_APEW 0x7b
+/* VI2FAL writes */
+#define TEGRA234_MEMORY_CLIENT_VI2FALW 0x7c
+#define TEGRA234_MEMORY_CLIENT_NVJPGSRD 0x7e
+#define TEGRA234_MEMORY_CLIENT_NVJPGSWR 0x7f
+/* SE Memory Return Data Client Description */
+#define TEGRA234_MEMORY_CLIENT_SESRD 0x80
+/* SE Memory Write Client Description */
+#define TEGRA234_MEMORY_CLIENT_SESWR 0x81
+/* AXI AP and DFD-AUX0/1 read clients Both share the same interface on the on MSS */
+#define TEGRA234_MEMORY_CLIENT_AXIAPR 0x82
+/* AXI AP and DFD-AUX0/1 write clients Both sahre the same interface on MSS */
+#define TEGRA234_MEMORY_CLIENT_AXIAPW 0x83
+/* ETR read clients */
+#define TEGRA234_MEMORY_CLIENT_ETRR 0x84
+/* ETR write clients */
+#define TEGRA234_MEMORY_CLIENT_ETRW 0x85
+/* AXI Switch read client */
+#define TEGRA234_MEMORY_CLIENT_AXISR 0x8c
+/* AXI Switch write client */
+#define TEGRA234_MEMORY_CLIENT_AXISW 0x8d
+/* EQOS read client */
+#define TEGRA234_MEMORY_CLIENT_EQOSR 0x8e
+/* EQOS write client */
+#define TEGRA234_MEMORY_CLIENT_EQOSW 0x8f
+/* UFSHC read client */
+#define TEGRA234_MEMORY_CLIENT_UFSHCR 0x90
+/* UFSHC write client */
+#define TEGRA234_MEMORY_CLIENT_UFSHCW 0x91
+/* NVDISPLAY read client */
+#define TEGRA234_MEMORY_CLIENT_NVDISPLAYR 0x92
+/* BPMP read client */
+#define TEGRA234_MEMORY_CLIENT_BPMPR 0x93
+/* BPMP write client */
+#define TEGRA234_MEMORY_CLIENT_BPMPW 0x94
+/* BPMPDMA read client */
+#define TEGRA234_MEMORY_CLIENT_BPMPDMAR 0x95
+/* BPMPDMA write client */
+#define TEGRA234_MEMORY_CLIENT_BPMPDMAW 0x96
+/* AON read client */
+#define TEGRA234_MEMORY_CLIENT_AONR 0x97
+/* AON write client */
+#define TEGRA234_MEMORY_CLIENT_AONW 0x98
+/* AONDMA read client */
+#define TEGRA234_MEMORY_CLIENT_AONDMAR 0x99
+/* AONDMA write client */
+#define TEGRA234_MEMORY_CLIENT_AONDMAW 0x9a
+/* SCE read client */
+#define TEGRA234_MEMORY_CLIENT_SCER 0x9b
+/* SCE write client */
+#define TEGRA234_MEMORY_CLIENT_SCEW 0x9c
+/* SCEDMA read client */
+#define TEGRA234_MEMORY_CLIENT_SCEDMAR 0x9d
+/* SCEDMA write client */
+#define TEGRA234_MEMORY_CLIENT_SCEDMAW 0x9e
+/* APEDMA read client */
+#define TEGRA234_MEMORY_CLIENT_APEDMAR 0x9f
+/* APEDMA write client */
+#define TEGRA234_MEMORY_CLIENT_APEDMAW 0xa0
+/* NVDISPLAY read client instance 2 */
+#define TEGRA234_MEMORY_CLIENT_NVDISPLAYR1 0xa1
+#define TEGRA234_MEMORY_CLIENT_VICSRD1 0xa2
+/* MSS internal memqual MIU0 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU0R 0xa6
+/* MSS internal memqual MIU0 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU0W 0xa7
+/* MSS internal memqual MIU1 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU1R 0xa8
+/* MSS internal memqual MIU1 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU1W 0xa9
+/* MSS internal memqual MIU2 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU2R 0xae
+/* MSS internal memqual MIU2 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU2W 0xaf
+/* MSS internal memqual MIU3 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU3R 0xb0
+/* MSS internal memqual MIU3 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU3W 0xb1
+/* MSS internal memqual MIU4 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU4R 0xb2
+/* MSS internal memqual MIU4 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU4W 0xb3
+#define TEGRA234_MEMORY_CLIENT_DPMUR 0xb4
+#define TEGRA234_MEMORY_CLIENT_DPMUW 0xb5
+#define TEGRA234_MEMORY_CLIENT_NVL0R 0xb6
+#define TEGRA234_MEMORY_CLIENT_NVL0W 0xb7
+#define TEGRA234_MEMORY_CLIENT_NVL1R 0xb8
+#define TEGRA234_MEMORY_CLIENT_NVL1W 0xb9
+#define TEGRA234_MEMORY_CLIENT_NVL2R 0xba
+#define TEGRA234_MEMORY_CLIENT_NVL2W 0xbb
+/* VI FLACON read clients */
+#define TEGRA234_MEMORY_CLIENT_VIFALR 0xbc
+/* VIFAL write clients */
+#define TEGRA234_MEMORY_CLIENT_VIFALW 0xbd
+/* DLA0ARDA read clients */
+#define TEGRA234_MEMORY_CLIENT_DLA0RDA 0xbe
+/* DLA0 Falcon read clients */
+#define TEGRA234_MEMORY_CLIENT_DLA0FALRDB 0xbf
+/* DLA0 write clients */
+#define TEGRA234_MEMORY_CLIENT_DLA0WRA 0xc0
+/* DLA0 write clients */
+#define TEGRA234_MEMORY_CLIENT_DLA0FALWRB 0xc1
+/* DLA1ARDA read clients */
+#define TEGRA234_MEMORY_CLIENT_DLA1RDA 0xc2
+/* DLA1 Falcon read clients */
+#define TEGRA234_MEMORY_CLIENT_DLA1FALRDB 0xc3
+/* DLA1 write clients */
+#define TEGRA234_MEMORY_CLIENT_DLA1WRA 0xc4
+/* DLA1 write clients */
+#define TEGRA234_MEMORY_CLIENT_DLA1FALWRB 0xc5
+/* PVA0RDA read clients */
+#define TEGRA234_MEMORY_CLIENT_PVA0RDA 0xc6
+/* PVA0RDB read clients */
+#define TEGRA234_MEMORY_CLIENT_PVA0RDB 0xc7
+/* PVA0RDC read clients */
+#define TEGRA234_MEMORY_CLIENT_PVA0RDC 0xc8
+/* PVA0WRA write clients */
+#define TEGRA234_MEMORY_CLIENT_PVA0WRA 0xc9
+/* PVA0WRB write clients */
+#define TEGRA234_MEMORY_CLIENT_PVA0WRB 0xca
+/* PVA0WRC write clients */
+#define TEGRA234_MEMORY_CLIENT_PVA0WRC 0xcb
+/* RCE read client */
+#define TEGRA234_MEMORY_CLIENT_RCER 0xd2
+/* RCE write client */
+#define TEGRA234_MEMORY_CLIENT_RCEW 0xd3
+/* RCEDMA read client */
+#define TEGRA234_MEMORY_CLIENT_RCEDMAR 0xd4
+/* RCEDMA write client */
+#define TEGRA234_MEMORY_CLIENT_RCEDMAW 0xd5
+/* PCIE0 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE0R 0xd8
+/* PCIE0 write clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE0W 0xd9
+/* PCIE1 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE1R 0xda
+/* PCIE1 write clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE1W 0xdb
+/* PCIE2 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE2AR 0xdc
+/* PCIE2 write clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE2AW 0xdd
+/* PCIE3 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE3R 0xde
+/* PCIE3 write clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE3W 0xdf
+/* PCIE4 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE4R 0xe0
+/* PCIE4 write clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE4W 0xe1
+/* PCIE5 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE5R 0xe2
+/* PCIE5 write clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE5W 0xe3
+/* ISP read client 1 for Crossbar A */
+#define TEGRA234_MEMORY_CLIENT_ISPFALW 0xe4
+#define TEGRA234_MEMORY_CLIENT_NVL3R 0xe5
+#define TEGRA234_MEMORY_CLIENT_NVL3W 0xe6
+#define TEGRA234_MEMORY_CLIENT_NVL4R 0xe7
+#define TEGRA234_MEMORY_CLIENT_NVL4W 0xe8
+/* DLA0ARDA1 read clients */
+#define TEGRA234_MEMORY_CLIENT_DLA0RDA1 0xe9
+/* DLA1ARDA1 read clients */
+#define TEGRA234_MEMORY_CLIENT_DLA1RDA1 0xea
+/* PVA0RDA1 read clients */
+#define TEGRA234_MEMORY_CLIENT_PVA0RDA1 0xeb
+/* PVA0RDB1 read clients */
+#define TEGRA234_MEMORY_CLIENT_PVA0RDB1 0xec
+/* PCIE5r1 read clients */
+#define TEGRA234_MEMORY_CLIENT_PCIE5R1 0xef
+#define TEGRA234_MEMORY_CLIENT_NVENCSRD1 0xf0
+/* ISP read client for Crossbar A */
+#define TEGRA234_MEMORY_CLIENT_ISPRA1 0xf2
+#define TEGRA234_MEMORY_CLIENT_NVL0RHP 0xf4
+#define TEGRA234_MEMORY_CLIENT_NVL1RHP 0xf5
+#define TEGRA234_MEMORY_CLIENT_NVL2RHP 0xf6
+#define TEGRA234_MEMORY_CLIENT_NVL3RHP 0xf7
+#define TEGRA234_MEMORY_CLIENT_NVL4RHP 0xf8
+/* MSS internal memqual MIU5 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU5R 0xfc
+/* MSS internal memqual MIU5 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU5W 0xfd
+/* MSS internal memqual MIU6 read clients */
+#define TEGRA234_MEMORY_CLIENT_MIU6R 0xfe
+/* MSS internal memqual MIU6 write clients */
+#define TEGRA234_MEMORY_CLIENT_MIU6W 0xff
+#define TEGRA234_MEMORY_CLIENT_NVJPG1SRD 0x123
+#define TEGRA234_MEMORY_CLIENT_NVJPG1SWR 0x124
+
+/* ICC ID's for dummy MC clients used to represent CPU Clusters */
+#define TEGRA_ICC_MC_CPU_CLUSTER0 1003
+#define TEGRA_ICC_MC_CPU_CLUSTER1 1004
+#define TEGRA_ICC_MC_CPU_CLUSTER2 1005
+
+#endif
diff --git a/include/dt-bindings/mfd/cros_ec.h b/include/dt-bindings/mfd/cros_ec.h
new file mode 100644
index 000000000000..3b29cd049578
--- /dev/null
+++ b/include/dt-bindings/mfd/cros_ec.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * DTS binding definitions used for the Chromium OS Embedded Controller.
+ *
+ * Copyright (c) 2022 The Chromium OS Authors. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_MFD_CROS_EC_H
+#define _DT_BINDINGS_MFD_CROS_EC_H
+
+/* Typed channel for keyboard backlight. */
+#define CROS_EC_PWM_DT_KB_LIGHT 0
+/* Typed channel for display backlight. */
+#define CROS_EC_PWM_DT_DISPLAY_LIGHT 1
+/* Number of typed channels. */
+#define CROS_EC_PWM_DT_COUNT 2
+
+#endif
diff --git a/include/dt-bindings/mfd/st,stpmic1.h b/include/dt-bindings/mfd/st,stpmic1.h
index 321cd08797d9..9dd15b9c743e 100644
--- a/include/dt-bindings/mfd/st,stpmic1.h
+++ b/include/dt-bindings/mfd/st,stpmic1.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
/*
* Copyright (C) STMicroelectronics 2018 - All Rights Reserved
* Author: Philippe Peurichard <philippe.peurichard@st.com>,
diff --git a/include/dt-bindings/mfd/stm32f4-rcc.h b/include/dt-bindings/mfd/stm32f4-rcc.h
index 309e8c79f27b..36448a5619a1 100644
--- a/include/dt-bindings/mfd/stm32f4-rcc.h
+++ b/include/dt-bindings/mfd/stm32f4-rcc.h
@@ -34,7 +34,6 @@
#define STM32F4_AHB1_RESET(bit) (STM32F4_RCC_AHB1_##bit + (0x10 * 8))
#define STM32F4_AHB1_CLOCK(bit) (STM32F4_RCC_AHB1_##bit)
-
/* AHB2 */
#define STM32F4_RCC_AHB2_DCMI 0
#define STM32F4_RCC_AHB2_CRYP 4
diff --git a/include/dt-bindings/mfd/stm32f7-rcc.h b/include/dt-bindings/mfd/stm32f7-rcc.h
index a90f3613c584..a4e4f9271395 100644
--- a/include/dt-bindings/mfd/stm32f7-rcc.h
+++ b/include/dt-bindings/mfd/stm32f7-rcc.h
@@ -64,6 +64,7 @@
#define STM32F7_RCC_APB1_TIM14 8
#define STM32F7_RCC_APB1_LPTIM1 9
#define STM32F7_RCC_APB1_WWDG 11
+#define STM32F7_RCC_APB1_CAN3 13
#define STM32F7_RCC_APB1_SPI2 14
#define STM32F7_RCC_APB1_SPI3 15
#define STM32F7_RCC_APB1_SPDIFRX 16
@@ -107,6 +108,7 @@
#define STM32F7_RCC_APB2_SAI1 22
#define STM32F7_RCC_APB2_SAI2 23
#define STM32F7_RCC_APB2_LTDC 26
+#define STM32F7_RCC_APB2_DSI 27
#define STM32F7_APB2_RESET(bit) (STM32F7_RCC_APB2_##bit + (0x24 * 8))
#define STM32F7_APB2_CLOCK(bit) (STM32F7_RCC_APB2_##bit + 0xA0)
diff --git a/include/dt-bindings/mux/ti-serdes.h b/include/dt-bindings/mux/ti-serdes.h
index d417b9268b16..b0b1091aad6d 100644
--- a/include/dt-bindings/mux/ti-serdes.h
+++ b/include/dt-bindings/mux/ti-serdes.h
@@ -6,6 +6,14 @@
#ifndef _DT_BINDINGS_MUX_TI_SERDES
#define _DT_BINDINGS_MUX_TI_SERDES
+/*
+ * These bindings are deprecated, because they do not match the actual
+ * concept of bindings but rather contain pure constants values used only
+ * in DTS board files.
+ * Instead include the header in the DTS source directory.
+ */
+#warning "These bindings are deprecated. Instead, use the header in the DTS source directory."
+
/* J721E */
#define J721E_SERDES0_LANE0_QSGMII_LANE1 0x0
@@ -95,4 +103,88 @@
#define AM64_SERDES0_LANE0_PCIE0 0x0
#define AM64_SERDES0_LANE0_USB 0x1
+/* J721S2 */
+
+#define J721S2_SERDES0_LANE0_EDP_LANE0 0x0
+#define J721S2_SERDES0_LANE0_PCIE1_LANE0 0x1
+#define J721S2_SERDES0_LANE0_IP3_UNUSED 0x2
+#define J721S2_SERDES0_LANE0_IP4_UNUSED 0x3
+
+#define J721S2_SERDES0_LANE1_EDP_LANE1 0x0
+#define J721S2_SERDES0_LANE1_PCIE1_LANE1 0x1
+#define J721S2_SERDES0_LANE1_USB 0x2
+#define J721S2_SERDES0_LANE1_IP4_UNUSED 0x3
+
+#define J721S2_SERDES0_LANE2_EDP_LANE2 0x0
+#define J721S2_SERDES0_LANE2_PCIE1_LANE2 0x1
+#define J721S2_SERDES0_LANE2_IP3_UNUSED 0x2
+#define J721S2_SERDES0_LANE2_IP4_UNUSED 0x3
+
+#define J721S2_SERDES0_LANE3_EDP_LANE3 0x0
+#define J721S2_SERDES0_LANE3_PCIE1_LANE3 0x1
+#define J721S2_SERDES0_LANE3_USB 0x2
+#define J721S2_SERDES0_LANE3_IP4_UNUSED 0x3
+
+/* J784S4 */
+
+#define J784S4_SERDES0_LANE0_IP1_UNUSED 0x0
+#define J784S4_SERDES0_LANE0_PCIE1_LANE0 0x1
+#define J784S4_SERDES0_LANE0_IP3_UNUSED 0x2
+#define J784S4_SERDES0_LANE0_IP4_UNUSED 0x3
+
+#define J784S4_SERDES0_LANE1_IP1_UNUSED 0x0
+#define J784S4_SERDES0_LANE1_PCIE1_LANE1 0x1
+#define J784S4_SERDES0_LANE1_IP3_UNUSED 0x2
+#define J784S4_SERDES0_LANE1_IP4_UNUSED 0x3
+
+#define J784S4_SERDES0_LANE2_PCIE3_LANE0 0x0
+#define J784S4_SERDES0_LANE2_PCIE1_LANE2 0x1
+#define J784S4_SERDES0_LANE2_IP3_UNUSED 0x2
+#define J784S4_SERDES0_LANE2_IP4_UNUSED 0x3
+
+#define J784S4_SERDES0_LANE3_PCIE3_LANE1 0x0
+#define J784S4_SERDES0_LANE3_PCIE1_LANE3 0x1
+#define J784S4_SERDES0_LANE3_USB 0x2
+#define J784S4_SERDES0_LANE3_IP4_UNUSED 0x3
+
+#define J784S4_SERDES1_LANE0_QSGMII_LANE3 0x0
+#define J784S4_SERDES1_LANE0_PCIE0_LANE0 0x1
+#define J784S4_SERDES1_LANE0_IP3_UNUSED 0x2
+#define J784S4_SERDES1_LANE0_IP4_UNUSED 0x3
+
+#define J784S4_SERDES1_LANE1_QSGMII_LANE4 0x0
+#define J784S4_SERDES1_LANE1_PCIE0_LANE1 0x1
+#define J784S4_SERDES1_LANE1_IP3_UNUSED 0x2
+#define J784S4_SERDES1_LANE1_IP4_UNUSED 0x3
+
+#define J784S4_SERDES1_LANE2_QSGMII_LANE1 0x0
+#define J784S4_SERDES1_LANE2_PCIE0_LANE2 0x1
+#define J784S4_SERDES1_LANE2_PCIE2_LANE0 0x2
+#define J784S4_SERDES1_LANE2_IP4_UNUSED 0x3
+
+#define J784S4_SERDES1_LANE3_QSGMII_LANE2 0x0
+#define J784S4_SERDES1_LANE3_PCIE0_LANE3 0x1
+#define J784S4_SERDES1_LANE3_PCIE2_LANE1 0x2
+#define J784S4_SERDES1_LANE3_IP4_UNUSED 0x3
+
+#define J784S4_SERDES2_LANE0_QSGMII_LANE5 0x0
+#define J784S4_SERDES2_LANE0_IP2_UNUSED 0x1
+#define J784S4_SERDES2_LANE0_IP3_UNUSED 0x2
+#define J784S4_SERDES2_LANE0_IP4_UNUSED 0x3
+
+#define J784S4_SERDES2_LANE1_QSGMII_LANE6 0x0
+#define J784S4_SERDES2_LANE1_IP2_UNUSED 0x1
+#define J784S4_SERDES2_LANE1_IP3_UNUSED 0x2
+#define J784S4_SERDES2_LANE1_IP4_UNUSED 0x3
+
+#define J784S4_SERDES2_LANE2_QSGMII_LANE7 0x0
+#define J784S4_SERDES2_LANE2_QSGMII_LANE1 0x1
+#define J784S4_SERDES2_LANE2_IP3_UNUSED 0x2
+#define J784S4_SERDES2_LANE2_IP4_UNUSED 0x3
+
+#define J784S4_SERDES2_LANE3_QSGMII_LANE8 0x0
+#define J784S4_SERDES2_LANE3_QSGMII_LANE2 0x1
+#define J784S4_SERDES2_LANE3_IP3_UNUSED 0x2
+#define J784S4_SERDES2_LANE3_IP4_UNUSED 0x3
+
#endif /* _DT_BINDINGS_MUX_TI_SERDES */
diff --git a/include/dt-bindings/net/pcs-rzn1-miic.h b/include/dt-bindings/net/pcs-rzn1-miic.h
new file mode 100644
index 000000000000..784782eaec9e
--- /dev/null
+++ b/include/dt-bindings/net/pcs-rzn1-miic.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2022 Schneider-Electric
+ *
+ * Clément Léger <clement.leger@bootlin.com>
+ */
+
+#ifndef _DT_BINDINGS_PCS_RZN1_MIIC
+#define _DT_BINDINGS_PCS_RZN1_MIIC
+
+/*
+ * Reefer to the datasheet [1] section 8.2.1, Internal Connection of Ethernet
+ * Ports to check the available combination
+ *
+ * [1] REN_r01uh0750ej0140-rzn1-introduction_MAT_20210228.pdf
+ */
+
+#define MIIC_GMAC1_PORT 0
+#define MIIC_GMAC2_PORT 1
+#define MIIC_RTOS_PORT 2
+#define MIIC_SERCOS_PORTA 3
+#define MIIC_SERCOS_PORTB 4
+#define MIIC_ETHERCAT_PORTA 5
+#define MIIC_ETHERCAT_PORTB 6
+#define MIIC_ETHERCAT_PORTC 7
+#define MIIC_SWITCH_PORTA 8
+#define MIIC_SWITCH_PORTB 9
+#define MIIC_SWITCH_PORTC 10
+#define MIIC_SWITCH_PORTD 11
+#define MIIC_HSR_PORTA 12
+#define MIIC_HSR_PORTB 13
+
+#endif
diff --git a/include/dt-bindings/net/renesas,r9a09g077-pcs-miic.h b/include/dt-bindings/net/renesas,r9a09g077-pcs-miic.h
new file mode 100644
index 000000000000..43a2b5743a63
--- /dev/null
+++ b/include/dt-bindings/net/renesas,r9a09g077-pcs-miic.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2025 Renesas Electronics Corporation.
+ */
+
+#ifndef _DT_BINDINGS_RENASAS_R9A09G077_PCS_MIIC_H
+#define _DT_BINDINGS_RENASAS_R9A09G077_PCS_MIIC_H
+
+/*
+ * Media Interface Connection Matrix
+ * ===========================================================
+ *
+ * Selects the function of the Media interface of the MAC to be used
+ *
+ * SW_MODE[2:0] | Port 0 | Port 1 | Port 2 | Port 3
+ * -------------|-------------|-------------|-------------|-------------
+ * 000b | ETHSW Port0 | ETHSW Port1 | ETHSW Port2 | GMAC1
+ * 001b | ESC Port0 | ESC Port1 | GMAC2 | GMAC1
+ * 010b | ESC Port0 | ESC Port1 | ETHSW Port2 | GMAC1
+ * 011b | ESC Port0 | ESC Port1 | ESC Port2 | GMAC1
+ * 100b | ETHSW Port0 | ESC Port1 | ESC Port2 | GMAC1
+ * 101b | ETHSW Port0 | ESC Port1 | ETHSW Port2 | GMAC1
+ * 110b | ETHSW Port0 | ETHSW Port1 | GMAC2 | GMAC1
+ * 111b | GMAC0 | GMAC1 | GMAC2 | -
+ */
+#define ETHSS_GMAC0_PORT 0
+#define ETHSS_GMAC1_PORT 1
+#define ETHSS_GMAC2_PORT 2
+#define ETHSS_ESC_PORT0 3
+#define ETHSS_ESC_PORT1 4
+#define ETHSS_ESC_PORT2 5
+#define ETHSS_ETHSW_PORT0 6
+#define ETHSS_ETHSW_PORT1 7
+#define ETHSS_ETHSW_PORT2 8
+
+#endif
diff --git a/include/dt-bindings/net/ti-dp83867.h b/include/dt-bindings/net/ti-dp83867.h
index 6fc4b445d3a1..b8a4f3ff4a3b 100644
--- a/include/dt-bindings/net/ti-dp83867.h
+++ b/include/dt-bindings/net/ti-dp83867.h
@@ -1,10 +1,10 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
/*
* Device Tree constants for the Texas Instruments DP83867 PHY
*
* Author: Dan Murphy <dmurphy@ti.com>
*
- * Copyright: (C) 2015 Texas Instruments, Inc.
+ * Copyright (C) 2015-2024 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef _DT_BINDINGS_TI_DP83867_H
diff --git a/include/dt-bindings/net/ti-dp83869.h b/include/dt-bindings/net/ti-dp83869.h
index 218b1a64e975..917114aad7d0 100644
--- a/include/dt-bindings/net/ti-dp83869.h
+++ b/include/dt-bindings/net/ti-dp83869.h
@@ -1,10 +1,10 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
/*
* Device Tree constants for the Texas Instruments DP83869 PHY
*
* Author: Dan Murphy <dmurphy@ti.com>
*
- * Copyright: (C) 2019 Texas Instruments, Inc.
+ * Copyright (C) 2015-2024 Texas Instruments Incorporated - https://www.ti.com/
*/
#ifndef _DT_BINDINGS_TI_DP83869_H
diff --git a/include/dt-bindings/nvmem/microchip,sama7g5-otpc.h b/include/dt-bindings/nvmem/microchip,sama7g5-otpc.h
new file mode 100644
index 000000000000..f570b23165a2
--- /dev/null
+++ b/include/dt-bindings/nvmem/microchip,sama7g5-otpc.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+
+#ifndef _DT_BINDINGS_NVMEM_MICROCHIP_OTPC_H
+#define _DT_BINDINGS_NVMEM_MICROCHIP_OTPC_H
+
+/*
+ * Need to have it as a multiple of 4 as NVMEM memory is registered with
+ * stride = 4.
+ */
+#define OTP_PKT(id) ((id) * 4)
+
+#endif
diff --git a/include/dt-bindings/phy/phy-cadence.h b/include/dt-bindings/phy/phy-cadence.h
index 4652bcb86265..0671991208fc 100644
--- a/include/dt-bindings/phy/phy-cadence.h
+++ b/include/dt-bindings/phy/phy-cadence.h
@@ -6,15 +6,18 @@
#ifndef _DT_BINDINGS_CADENCE_SERDES_H
#define _DT_BINDINGS_CADENCE_SERDES_H
-/* Torrent */
-#define TORRENT_SERDES_NO_SSC 0
-#define TORRENT_SERDES_EXTERNAL_SSC 1
-#define TORRENT_SERDES_INTERNAL_SSC 2
+#define CDNS_SERDES_NO_SSC 0
+#define CDNS_SERDES_EXTERNAL_SSC 1
+#define CDNS_SERDES_INTERNAL_SSC 2
+/* Torrent */
#define CDNS_TORRENT_REFCLK_DRIVER 0
+#define CDNS_TORRENT_DERIVED_REFCLK 1
+#define CDNS_TORRENT_RECEIVED_REFCLK 2
/* Sierra */
#define CDNS_SIERRA_PLL_CMNLC 0
#define CDNS_SIERRA_PLL_CMNLC1 1
+#define CDNS_SIERRA_DERIVED_REFCLK 2
#endif /* _DT_BINDINGS_CADENCE_SERDES_H */
diff --git a/include/dt-bindings/phy/phy-imx8-pcie.h b/include/dt-bindings/phy/phy-imx8-pcie.h
new file mode 100644
index 000000000000..8bbe2d6538d8
--- /dev/null
+++ b/include/dt-bindings/phy/phy-imx8-pcie.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * This header provides constants for i.MX8 PCIe.
+ */
+
+#ifndef _DT_BINDINGS_IMX8_PCIE_H
+#define _DT_BINDINGS_IMX8_PCIE_H
+
+/* Reference clock PAD mode */
+#define IMX8_PCIE_REFCLK_PAD_UNUSED 0
+#define IMX8_PCIE_REFCLK_PAD_INPUT 1
+#define IMX8_PCIE_REFCLK_PAD_OUTPUT 2
+
+#endif /* _DT_BINDINGS_IMX8_PCIE_H */
diff --git a/include/dt-bindings/phy/phy-lan966x-serdes.h b/include/dt-bindings/phy/phy-lan966x-serdes.h
new file mode 100644
index 000000000000..4330269a901e
--- /dev/null
+++ b/include/dt-bindings/phy/phy-lan966x-serdes.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+
+#ifndef __PHY_LAN966X_SERDES_H__
+#define __PHY_LAN966X_SERDES_H__
+
+#define CU(x) (x)
+#define CU_MAX CU(2)
+#define SERDES6G(x) (CU_MAX + 1 + (x))
+#define SERDES6G_MAX SERDES6G(3)
+#define RGMII(x) (SERDES6G_MAX + 1 + (x))
+#define RGMII_MAX RGMII(2)
+#define SERDES_MAX (RGMII_MAX + 1)
+
+#endif
diff --git a/include/dt-bindings/phy/phy-qcom-qmp.h b/include/dt-bindings/phy/phy-qcom-qmp.h
new file mode 100644
index 000000000000..6b43ea9e0051
--- /dev/null
+++ b/include/dt-bindings/phy/phy-qcom-qmp.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Qualcomm QMP PHY constants
+ *
+ * Copyright (C) 2022 Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_PHY_QMP
+#define _DT_BINDINGS_PHY_QMP
+
+/* QMP USB4-USB3-DP clocks */
+#define QMP_USB43DP_USB3_PIPE_CLK 0
+#define QMP_USB43DP_DP_LINK_CLK 1
+#define QMP_USB43DP_DP_VCO_DIV_CLK 2
+
+/* QMP USB4-USB3-DP PHYs */
+#define QMP_USB43DP_USB3_PHY 0
+#define QMP_USB43DP_DP_PHY 1
+
+/* QMP PCIE PHYs */
+#define QMP_PCIE_PIPE_CLK 0
+#define QMP_PCIE_PHY_AUX_CLK 1
+
+#endif /* _DT_BINDINGS_PHY_QMP */
diff --git a/include/dt-bindings/phy/phy.h b/include/dt-bindings/phy/phy.h
index 887a31b250a8..6b901b342348 100644
--- a/include/dt-bindings/phy/phy.h
+++ b/include/dt-bindings/phy/phy.h
@@ -20,5 +20,8 @@
#define PHY_TYPE_XPCS 7
#define PHY_TYPE_SGMII 8
#define PHY_TYPE_QSGMII 9
+#define PHY_TYPE_DPHY 10
+#define PHY_TYPE_CPHY 11
+#define PHY_TYPE_USXGMII 12
#endif /* _DT_BINDINGS_PHY */
diff --git a/include/dt-bindings/pinctrl/amlogic,pinctrl.h b/include/dt-bindings/pinctrl/amlogic,pinctrl.h
new file mode 100644
index 000000000000..7d40aecc7147
--- /dev/null
+++ b/include/dt-bindings/pinctrl/amlogic,pinctrl.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2024 Amlogic, Inc. All rights reserved.
+ * Author: Xianwei Zhao <xianwei.zhao@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_AMLOGIC_PINCTRL_H
+#define _DT_BINDINGS_AMLOGIC_PINCTRL_H
+/* Normal PIN bank */
+#define AMLOGIC_GPIO_A 0
+#define AMLOGIC_GPIO_B 1
+#define AMLOGIC_GPIO_C 2
+#define AMLOGIC_GPIO_D 3
+#define AMLOGIC_GPIO_E 4
+#define AMLOGIC_GPIO_F 5
+#define AMLOGIC_GPIO_G 6
+#define AMLOGIC_GPIO_H 7
+#define AMLOGIC_GPIO_I 8
+#define AMLOGIC_GPIO_J 9
+#define AMLOGIC_GPIO_K 10
+#define AMLOGIC_GPIO_L 11
+#define AMLOGIC_GPIO_M 12
+#define AMLOGIC_GPIO_N 13
+#define AMLOGIC_GPIO_O 14
+#define AMLOGIC_GPIO_P 15
+#define AMLOGIC_GPIO_Q 16
+#define AMLOGIC_GPIO_R 17
+#define AMLOGIC_GPIO_S 18
+#define AMLOGIC_GPIO_T 19
+#define AMLOGIC_GPIO_U 20
+#define AMLOGIC_GPIO_V 21
+#define AMLOGIC_GPIO_W 22
+#define AMLOGIC_GPIO_X 23
+#define AMLOGIC_GPIO_Y 24
+#define AMLOGIC_GPIO_Z 25
+
+/* Special PIN bank */
+#define AMLOGIC_GPIO_DV 26
+#define AMLOGIC_GPIO_AO 27
+#define AMLOGIC_GPIO_CC 28
+#define AMLOGIC_GPIO_TEST_N 29
+#define AMLOGIC_GPIO_ANALOG 30
+
+#define AML_PINMUX(bank, offset, mode) (((((bank) << 8) + (offset)) << 8) | (mode))
+
+#endif /* _DT_BINDINGS_AMLOGIC_PINCTRL_H */
diff --git a/include/dt-bindings/pinctrl/apple.h b/include/dt-bindings/pinctrl/apple.h
new file mode 100644
index 000000000000..ea0a6f466592
--- /dev/null
+++ b/include/dt-bindings/pinctrl/apple.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR MIT */
+/*
+ * This header provides constants for Apple pinctrl bindings.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_APPLE_H
+#define _DT_BINDINGS_PINCTRL_APPLE_H
+
+#define APPLE_PINMUX(pin, func) ((pin) | ((func) << 16))
+#define APPLE_PIN(pinmux) ((pinmux) & 0xffff)
+#define APPLE_FUNC(pinmux) ((pinmux) >> 16)
+
+#endif /* _DT_BINDINGS_PINCTRL_APPLE_H */
diff --git a/include/dt-bindings/pinctrl/hisi.h b/include/dt-bindings/pinctrl/hisi.h
index 0359bfdc9119..2175ec89c82f 100644
--- a/include/dt-bindings/pinctrl/hisi.h
+++ b/include/dt-bindings/pinctrl/hisi.h
@@ -1,17 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* This header provides constants for hisilicon pinctrl bindings.
*
- * Copyright (c) 2015 Hisilicon Limited.
+ * Copyright (c) 2015 HiSilicon Limited.
* Copyright (c) 2015 Linaro Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _DT_BINDINGS_PINCTRL_HISI_H
diff --git a/include/dt-bindings/pinctrl/k3.h b/include/dt-bindings/pinctrl/k3.h
deleted file mode 100644
index e085f102b283..000000000000
--- a/include/dt-bindings/pinctrl/k3.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This header provides constants for pinctrl bindings for TI's K3 SoC
- * family.
- *
- * Copyright (C) 2018-2021 Texas Instruments Incorporated - https://www.ti.com/
- */
-#ifndef _DT_BINDINGS_PINCTRL_TI_K3_H
-#define _DT_BINDINGS_PINCTRL_TI_K3_H
-
-#define PULLUDEN_SHIFT (16)
-#define PULLTYPESEL_SHIFT (17)
-#define RXACTIVE_SHIFT (18)
-
-#define PULL_DISABLE (1 << PULLUDEN_SHIFT)
-#define PULL_ENABLE (0 << PULLUDEN_SHIFT)
-
-#define PULL_UP (1 << PULLTYPESEL_SHIFT | PULL_ENABLE)
-#define PULL_DOWN (0 << PULLTYPESEL_SHIFT | PULL_ENABLE)
-
-#define INPUT_EN (1 << RXACTIVE_SHIFT)
-#define INPUT_DISABLE (0 << RXACTIVE_SHIFT)
-
-/* Only these macros are expected be used directly in device tree files */
-#define PIN_OUTPUT (INPUT_DISABLE | PULL_DISABLE)
-#define PIN_OUTPUT_PULLUP (INPUT_DISABLE | PULL_UP)
-#define PIN_OUTPUT_PULLDOWN (INPUT_DISABLE | PULL_DOWN)
-#define PIN_INPUT (INPUT_EN | PULL_DISABLE)
-#define PIN_INPUT_PULLUP (INPUT_EN | PULL_UP)
-#define PIN_INPUT_PULLDOWN (INPUT_EN | PULL_DOWN)
-
-#define AM65X_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
-#define AM65X_WKUP_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
-
-#define J721E_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
-#define J721E_WKUP_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
-
-#define AM64X_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
-#define AM64X_MCU_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode))
-
-#endif
diff --git a/include/dt-bindings/pinctrl/keystone.h b/include/dt-bindings/pinctrl/keystone.h
index 7f97d776a8ff..66f8aecada53 100644
--- a/include/dt-bindings/pinctrl/keystone.h
+++ b/include/dt-bindings/pinctrl/keystone.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* This header provides constants for Keystone pinctrl bindings.
*
* Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _DT_BINDINGS_PINCTRL_KEYSTONE_H
diff --git a/include/dt-bindings/pinctrl/mediatek,mt8188-pinfunc.h b/include/dt-bindings/pinctrl/mediatek,mt8188-pinfunc.h
new file mode 100644
index 000000000000..2688da2f621f
--- /dev/null
+++ b/include/dt-bindings/pinctrl/mediatek,mt8188-pinfunc.h
@@ -0,0 +1,1280 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2022 MediaTek Inc.
+ * Author: Hui Liu <hui.liu@mediatek.com>
+ */
+
+#ifndef __MEDIATEK_MT8188_PINFUNC_H
+#define __MEDIATEK_MT8188_PINFUNC_H
+
+#include "mt65xx.h"
+
+#define PINMUX_GPIO0__FUNC_B_GPIO0 (MTK_PIN_NO(0) | 0)
+#define PINMUX_GPIO0__FUNC_B0_TP_GPIO0_AO (MTK_PIN_NO(0) | 1)
+#define PINMUX_GPIO0__FUNC_O_SPIM5_CSB (MTK_PIN_NO(0) | 2)
+#define PINMUX_GPIO0__FUNC_O_UTXD1 (MTK_PIN_NO(0) | 3)
+#define PINMUX_GPIO0__FUNC_O_DMIC3_CLK (MTK_PIN_NO(0) | 4)
+#define PINMUX_GPIO0__FUNC_B0_I2SIN_MCK (MTK_PIN_NO(0) | 5)
+#define PINMUX_GPIO0__FUNC_O_I2SO2_MCK (MTK_PIN_NO(0) | 6)
+#define PINMUX_GPIO0__FUNC_B0_DBG_MON_A0 (MTK_PIN_NO(0) | 7)
+
+#define PINMUX_GPIO1__FUNC_B_GPIO1 (MTK_PIN_NO(1) | 0)
+#define PINMUX_GPIO1__FUNC_B0_TP_GPIO1_AO (MTK_PIN_NO(1) | 1)
+#define PINMUX_GPIO1__FUNC_O_SPIM5_CLK (MTK_PIN_NO(1) | 2)
+#define PINMUX_GPIO1__FUNC_I1_URXD1 (MTK_PIN_NO(1) | 3)
+#define PINMUX_GPIO1__FUNC_I0_DMIC3_DAT (MTK_PIN_NO(1) | 4)
+#define PINMUX_GPIO1__FUNC_B0_I2SIN_BCK (MTK_PIN_NO(1) | 5)
+#define PINMUX_GPIO1__FUNC_B0_I2SO2_BCK (MTK_PIN_NO(1) | 6)
+#define PINMUX_GPIO1__FUNC_B0_DBG_MON_A1 (MTK_PIN_NO(1) | 7)
+
+#define PINMUX_GPIO2__FUNC_B_GPIO2 (MTK_PIN_NO(2) | 0)
+#define PINMUX_GPIO2__FUNC_B0_TP_GPIO2_AO (MTK_PIN_NO(2) | 1)
+#define PINMUX_GPIO2__FUNC_B0_SPIM5_MOSI (MTK_PIN_NO(2) | 2)
+#define PINMUX_GPIO2__FUNC_O_URTS1 (MTK_PIN_NO(2) | 3)
+#define PINMUX_GPIO2__FUNC_I0_DMIC3_DAT_R (MTK_PIN_NO(2) | 4)
+#define PINMUX_GPIO2__FUNC_B0_I2SIN_WS (MTK_PIN_NO(2) | 5)
+#define PINMUX_GPIO2__FUNC_B0_I2SO2_WS (MTK_PIN_NO(2) | 6)
+#define PINMUX_GPIO2__FUNC_B0_DBG_MON_A2 (MTK_PIN_NO(2) | 7)
+
+#define PINMUX_GPIO3__FUNC_B_GPIO3 (MTK_PIN_NO(3) | 0)
+#define PINMUX_GPIO3__FUNC_B0_TP_GPIO3_AO (MTK_PIN_NO(3) | 1)
+#define PINMUX_GPIO3__FUNC_B0_SPIM5_MISO (MTK_PIN_NO(3) | 2)
+#define PINMUX_GPIO3__FUNC_I1_UCTS1 (MTK_PIN_NO(3) | 3)
+#define PINMUX_GPIO3__FUNC_O_DMIC4_CLK (MTK_PIN_NO(3) | 4)
+#define PINMUX_GPIO3__FUNC_I0_I2SIN_D0 (MTK_PIN_NO(3) | 5)
+#define PINMUX_GPIO3__FUNC_O_I2SO2_D0 (MTK_PIN_NO(3) | 6)
+#define PINMUX_GPIO3__FUNC_B0_DBG_MON_A3 (MTK_PIN_NO(3) | 7)
+
+#define PINMUX_GPIO4__FUNC_B_GPIO4 (MTK_PIN_NO(4) | 0)
+#define PINMUX_GPIO4__FUNC_B0_TP_GPIO4_AO (MTK_PIN_NO(4) | 1)
+#define PINMUX_GPIO4__FUNC_I0_SPDIF_IN2 (MTK_PIN_NO(4) | 2)
+#define PINMUX_GPIO4__FUNC_O_I2SO1_MCK (MTK_PIN_NO(4) | 3)
+#define PINMUX_GPIO4__FUNC_I0_DMIC4_DAT (MTK_PIN_NO(4) | 4)
+#define PINMUX_GPIO4__FUNC_I0_I2SIN_D1 (MTK_PIN_NO(4) | 5)
+#define PINMUX_GPIO4__FUNC_O_I2SO2_D1 (MTK_PIN_NO(4) | 6)
+#define PINMUX_GPIO4__FUNC_B0_DBG_MON_A4 (MTK_PIN_NO(4) | 7)
+
+#define PINMUX_GPIO5__FUNC_B_GPIO5 (MTK_PIN_NO(5) | 0)
+#define PINMUX_GPIO5__FUNC_B0_TP_GPIO5_AO (MTK_PIN_NO(5) | 1)
+#define PINMUX_GPIO5__FUNC_I0_SPDIF_IN1 (MTK_PIN_NO(5) | 2)
+#define PINMUX_GPIO5__FUNC_O_I2SO1_BCK (MTK_PIN_NO(5) | 3)
+#define PINMUX_GPIO5__FUNC_I0_DMIC4_DAT_R (MTK_PIN_NO(5) | 4)
+#define PINMUX_GPIO5__FUNC_I0_I2SIN_D2 (MTK_PIN_NO(5) | 5)
+#define PINMUX_GPIO5__FUNC_O_I2SO2_D2 (MTK_PIN_NO(5) | 6)
+#define PINMUX_GPIO5__FUNC_B0_DBG_MON_A5 (MTK_PIN_NO(5) | 7)
+
+#define PINMUX_GPIO6__FUNC_B_GPIO6 (MTK_PIN_NO(6) | 0)
+#define PINMUX_GPIO6__FUNC_B0_TP_GPIO6_AO (MTK_PIN_NO(6) | 1)
+#define PINMUX_GPIO6__FUNC_I0_SPDIF_IN0 (MTK_PIN_NO(6) | 2)
+#define PINMUX_GPIO6__FUNC_O_I2SO1_WS (MTK_PIN_NO(6) | 3)
+#define PINMUX_GPIO6__FUNC_O_DMIC1_CLK (MTK_PIN_NO(6) | 4)
+#define PINMUX_GPIO6__FUNC_I0_I2SIN_D3 (MTK_PIN_NO(6) | 5)
+#define PINMUX_GPIO6__FUNC_O_I2SO2_D3 (MTK_PIN_NO(6) | 6)
+#define PINMUX_GPIO6__FUNC_B0_MD32_0_GPIO0 (MTK_PIN_NO(6) | 7)
+
+#define PINMUX_GPIO7__FUNC_B_GPIO7 (MTK_PIN_NO(7) | 0)
+#define PINMUX_GPIO7__FUNC_B0_TP_GPIO7_AO (MTK_PIN_NO(7) | 1)
+#define PINMUX_GPIO7__FUNC_O_SPIM3_CSB (MTK_PIN_NO(7) | 2)
+#define PINMUX_GPIO7__FUNC_B0_TDMIN_MCK (MTK_PIN_NO(7) | 3)
+#define PINMUX_GPIO7__FUNC_I0_DMIC1_DAT (MTK_PIN_NO(7) | 4)
+#define PINMUX_GPIO7__FUNC_O_CMVREF0 (MTK_PIN_NO(7) | 5)
+#define PINMUX_GPIO7__FUNC_O_CLKM0 (MTK_PIN_NO(7) | 6)
+#define PINMUX_GPIO7__FUNC_B0_DBG_MON_A6 (MTK_PIN_NO(7) | 7)
+
+#define PINMUX_GPIO8__FUNC_B_GPIO8 (MTK_PIN_NO(8) | 0)
+#define PINMUX_GPIO8__FUNC_B0_TP_GPIO0_AO (MTK_PIN_NO(8) | 1)
+#define PINMUX_GPIO8__FUNC_O_SPIM3_CLK (MTK_PIN_NO(8) | 2)
+#define PINMUX_GPIO8__FUNC_B0_TDMIN_BCK (MTK_PIN_NO(8) | 3)
+#define PINMUX_GPIO8__FUNC_I0_DMIC1_DAT_R (MTK_PIN_NO(8) | 4)
+#define PINMUX_GPIO8__FUNC_O_CMVREF1 (MTK_PIN_NO(8) | 5)
+#define PINMUX_GPIO8__FUNC_O_CLKM1 (MTK_PIN_NO(8) | 6)
+#define PINMUX_GPIO8__FUNC_B0_DBG_MON_A7 (MTK_PIN_NO(8) | 7)
+
+#define PINMUX_GPIO9__FUNC_B_GPIO9 (MTK_PIN_NO(9) | 0)
+#define PINMUX_GPIO9__FUNC_B0_TP_GPIO1_AO (MTK_PIN_NO(9) | 1)
+#define PINMUX_GPIO9__FUNC_B0_SPIM3_MOSI (MTK_PIN_NO(9) | 2)
+#define PINMUX_GPIO9__FUNC_B0_TDMIN_LRCK (MTK_PIN_NO(9) | 3)
+#define PINMUX_GPIO9__FUNC_O_DMIC2_CLK (MTK_PIN_NO(9) | 4)
+#define PINMUX_GPIO9__FUNC_O_CMFLASH0 (MTK_PIN_NO(9) | 5)
+#define PINMUX_GPIO9__FUNC_O_PWM_0 (MTK_PIN_NO(9) | 6)
+#define PINMUX_GPIO9__FUNC_B0_DBG_MON_A8 (MTK_PIN_NO(9) | 7)
+
+#define PINMUX_GPIO10__FUNC_B_GPIO10 (MTK_PIN_NO(10) | 0)
+#define PINMUX_GPIO10__FUNC_B0_TP_GPIO2_AO (MTK_PIN_NO(10) | 1)
+#define PINMUX_GPIO10__FUNC_B0_SPIM3_MISO (MTK_PIN_NO(10) | 2)
+#define PINMUX_GPIO10__FUNC_I0_TDMIN_DI (MTK_PIN_NO(10) | 3)
+#define PINMUX_GPIO10__FUNC_I0_DMIC2_DAT (MTK_PIN_NO(10) | 4)
+#define PINMUX_GPIO10__FUNC_O_CMFLASH1 (MTK_PIN_NO(10) | 5)
+#define PINMUX_GPIO10__FUNC_O_PWM_1 (MTK_PIN_NO(10) | 6)
+#define PINMUX_GPIO10__FUNC_B0_DBG_MON_A9 (MTK_PIN_NO(10) | 7)
+
+#define PINMUX_GPIO11__FUNC_B_GPIO11 (MTK_PIN_NO(11) | 0)
+#define PINMUX_GPIO11__FUNC_B0_TP_GPIO3_AO (MTK_PIN_NO(11) | 1)
+#define PINMUX_GPIO11__FUNC_O_SPDIF_OUT (MTK_PIN_NO(11) | 2)
+#define PINMUX_GPIO11__FUNC_O_I2SO1_D0 (MTK_PIN_NO(11) | 3)
+#define PINMUX_GPIO11__FUNC_I0_DMIC2_DAT_R (MTK_PIN_NO(11) | 4)
+#define PINMUX_GPIO11__FUNC_I0_DVFSRC_EXT_REQ (MTK_PIN_NO(11) | 5)
+#define PINMUX_GPIO11__FUNC_O_CMVREF6 (MTK_PIN_NO(11) | 6)
+#define PINMUX_GPIO11__FUNC_B0_DBG_MON_A10 (MTK_PIN_NO(11) | 7)
+
+#define PINMUX_GPIO12__FUNC_B_GPIO12 (MTK_PIN_NO(12) | 0)
+#define PINMUX_GPIO12__FUNC_B0_TP_GPIO4_AO (MTK_PIN_NO(12) | 1)
+#define PINMUX_GPIO12__FUNC_O_SPIM4_CSB (MTK_PIN_NO(12) | 2)
+#define PINMUX_GPIO12__FUNC_B1_JTMS_SEL3 (MTK_PIN_NO(12) | 3)
+#define PINMUX_GPIO12__FUNC_B1_APU_JTAG_TMS (MTK_PIN_NO(12) | 4)
+#define PINMUX_GPIO12__FUNC_I0_VPU_UDI_TMS (MTK_PIN_NO(12) | 5)
+#define PINMUX_GPIO12__FUNC_I0_IPU_JTAG_TMS (MTK_PIN_NO(12) | 6)
+#define PINMUX_GPIO12__FUNC_I0_HDMITX20_HTPLG (MTK_PIN_NO(12) | 7)
+
+#define PINMUX_GPIO13__FUNC_B_GPIO13 (MTK_PIN_NO(13) | 0)
+#define PINMUX_GPIO13__FUNC_B0_TP_GPIO5_AO (MTK_PIN_NO(13) | 1)
+#define PINMUX_GPIO13__FUNC_O_SPIM4_CLK (MTK_PIN_NO(13) | 2)
+#define PINMUX_GPIO13__FUNC_I0_JTCK_SEL3 (MTK_PIN_NO(13) | 3)
+#define PINMUX_GPIO13__FUNC_I0_APU_JTAG_TCK (MTK_PIN_NO(13) | 4)
+#define PINMUX_GPIO13__FUNC_I0_VPU_UDI_TCK (MTK_PIN_NO(13) | 5)
+#define PINMUX_GPIO13__FUNC_I0_IPU_JTAG_TCK (MTK_PIN_NO(13) | 6)
+#define PINMUX_GPIO13__FUNC_B1_HDMITX20_CEC (MTK_PIN_NO(13) | 7)
+
+#define PINMUX_GPIO14__FUNC_B_GPIO14 (MTK_PIN_NO(14) | 0)
+#define PINMUX_GPIO14__FUNC_B0_TP_GPIO6_AO (MTK_PIN_NO(14) | 1)
+#define PINMUX_GPIO14__FUNC_B0_SPIM4_MOSI (MTK_PIN_NO(14) | 2)
+#define PINMUX_GPIO14__FUNC_I1_JTDI_SEL3 (MTK_PIN_NO(14) | 3)
+#define PINMUX_GPIO14__FUNC_I1_APU_JTAG_TDI (MTK_PIN_NO(14) | 4)
+#define PINMUX_GPIO14__FUNC_I0_VPU_UDI_TDI (MTK_PIN_NO(14) | 5)
+#define PINMUX_GPIO14__FUNC_I0_IPU_JTAG_TDI (MTK_PIN_NO(14) | 6)
+#define PINMUX_GPIO14__FUNC_B1_HDMITX20_SCL (MTK_PIN_NO(14) | 7)
+
+#define PINMUX_GPIO15__FUNC_B_GPIO15 (MTK_PIN_NO(15) | 0)
+#define PINMUX_GPIO15__FUNC_B0_TP_GPIO7_AO (MTK_PIN_NO(15) | 1)
+#define PINMUX_GPIO15__FUNC_B0_SPIM4_MISO (MTK_PIN_NO(15) | 2)
+#define PINMUX_GPIO15__FUNC_O_JTDO_SEL3 (MTK_PIN_NO(15) | 3)
+#define PINMUX_GPIO15__FUNC_O_APU_JTAG_TDO (MTK_PIN_NO(15) | 4)
+#define PINMUX_GPIO15__FUNC_O_VPU_UDI_TDO (MTK_PIN_NO(15) | 5)
+#define PINMUX_GPIO15__FUNC_O_IPU_JTAG_TDO (MTK_PIN_NO(15) | 6)
+#define PINMUX_GPIO15__FUNC_B1_HDMITX20_SDA (MTK_PIN_NO(15) | 7)
+
+#define PINMUX_GPIO16__FUNC_B_GPIO16 (MTK_PIN_NO(16) | 0)
+#define PINMUX_GPIO16__FUNC_B0_TP_GPIO0_AO (MTK_PIN_NO(16) | 1)
+#define PINMUX_GPIO16__FUNC_O_UTXD3 (MTK_PIN_NO(16) | 2)
+#define PINMUX_GPIO16__FUNC_I1_JTRSTn_SEL3 (MTK_PIN_NO(16) | 3)
+#define PINMUX_GPIO16__FUNC_I0_APU_JTAG_TRST (MTK_PIN_NO(16) | 4)
+#define PINMUX_GPIO16__FUNC_I0_VPU_UDI_NTRST (MTK_PIN_NO(16) | 5)
+#define PINMUX_GPIO16__FUNC_I0_IPU_JTAG_TRST (MTK_PIN_NO(16) | 6)
+#define PINMUX_GPIO16__FUNC_O_HDMITX20_PWR5V (MTK_PIN_NO(16) | 7)
+
+#define PINMUX_GPIO17__FUNC_B_GPIO17 (MTK_PIN_NO(17) | 0)
+#define PINMUX_GPIO17__FUNC_B0_TP_GPIO1_AO (MTK_PIN_NO(17) | 1)
+#define PINMUX_GPIO17__FUNC_I1_URXD3 (MTK_PIN_NO(17) | 2)
+#define PINMUX_GPIO17__FUNC_O_CMFLASH2 (MTK_PIN_NO(17) | 3)
+#define PINMUX_GPIO17__FUNC_I0_EDP_TX_HPD (MTK_PIN_NO(17) | 4)
+#define PINMUX_GPIO17__FUNC_I0_DVFSRC_EXT_REQ (MTK_PIN_NO(17) | 5)
+#define PINMUX_GPIO17__FUNC_O_CMVREF7 (MTK_PIN_NO(17) | 6)
+#define PINMUX_GPIO17__FUNC_B0_MD32_0_GPIO1 (MTK_PIN_NO(17) | 7)
+
+#define PINMUX_GPIO18__FUNC_B_GPIO18 (MTK_PIN_NO(18) | 0)
+#define PINMUX_GPIO18__FUNC_B0_TP_GPIO2_AO (MTK_PIN_NO(18) | 1)
+#define PINMUX_GPIO18__FUNC_O_CMFLASH0 (MTK_PIN_NO(18) | 2)
+#define PINMUX_GPIO18__FUNC_O_CMVREF4 (MTK_PIN_NO(18) | 3)
+#define PINMUX_GPIO18__FUNC_B0_TDMIN_MCK (MTK_PIN_NO(18) | 4)
+#define PINMUX_GPIO18__FUNC_O_UTXD1 (MTK_PIN_NO(18) | 5)
+#define PINMUX_GPIO18__FUNC_O_TP_UTXD1_AO (MTK_PIN_NO(18) | 6)
+#define PINMUX_GPIO18__FUNC_B0_DBG_MON_A11 (MTK_PIN_NO(18) | 7)
+
+#define PINMUX_GPIO19__FUNC_B_GPIO19 (MTK_PIN_NO(19) | 0)
+#define PINMUX_GPIO19__FUNC_B0_TP_GPIO3_AO (MTK_PIN_NO(19) | 1)
+#define PINMUX_GPIO19__FUNC_O_CMFLASH1 (MTK_PIN_NO(19) | 2)
+#define PINMUX_GPIO19__FUNC_O_CMVREF5 (MTK_PIN_NO(19) | 3)
+#define PINMUX_GPIO19__FUNC_B0_TDMIN_BCK (MTK_PIN_NO(19) | 4)
+#define PINMUX_GPIO19__FUNC_I1_URXD1 (MTK_PIN_NO(19) | 5)
+#define PINMUX_GPIO19__FUNC_I1_TP_URXD1_AO (MTK_PIN_NO(19) | 6)
+#define PINMUX_GPIO19__FUNC_B0_DBG_MON_A12 (MTK_PIN_NO(19) | 7)
+
+#define PINMUX_GPIO20__FUNC_B_GPIO20 (MTK_PIN_NO(20) | 0)
+#define PINMUX_GPIO20__FUNC_B0_TP_GPIO4_AO (MTK_PIN_NO(20) | 1)
+#define PINMUX_GPIO20__FUNC_O_CMFLASH2 (MTK_PIN_NO(20) | 2)
+#define PINMUX_GPIO20__FUNC_O_CLKM2 (MTK_PIN_NO(20) | 3)
+#define PINMUX_GPIO20__FUNC_B0_TDMIN_LRCK (MTK_PIN_NO(20) | 4)
+#define PINMUX_GPIO20__FUNC_O_URTS1 (MTK_PIN_NO(20) | 5)
+#define PINMUX_GPIO20__FUNC_O_TP_URTS1_AO (MTK_PIN_NO(20) | 6)
+#define PINMUX_GPIO20__FUNC_B0_DBG_MON_A13 (MTK_PIN_NO(20) | 7)
+
+#define PINMUX_GPIO21__FUNC_B_GPIO21 (MTK_PIN_NO(21) | 0)
+#define PINMUX_GPIO21__FUNC_B0_TP_GPIO5_AO (MTK_PIN_NO(21) | 1)
+#define PINMUX_GPIO21__FUNC_O_CMFLASH3 (MTK_PIN_NO(21) | 2)
+#define PINMUX_GPIO21__FUNC_O_CLKM3 (MTK_PIN_NO(21) | 3)
+#define PINMUX_GPIO21__FUNC_I0_TDMIN_DI (MTK_PIN_NO(21) | 4)
+#define PINMUX_GPIO21__FUNC_I1_UCTS1 (MTK_PIN_NO(21) | 5)
+#define PINMUX_GPIO21__FUNC_I1_TP_UCTS1_AO (MTK_PIN_NO(21) | 6)
+#define PINMUX_GPIO21__FUNC_B0_DBG_MON_A14 (MTK_PIN_NO(21) | 7)
+
+#define PINMUX_GPIO22__FUNC_B_GPIO22 (MTK_PIN_NO(22) | 0)
+#define PINMUX_GPIO22__FUNC_O_CMMCLK0 (MTK_PIN_NO(22) | 1)
+#define PINMUX_GPIO22__FUNC_B0_TP_GPIO6_AO (MTK_PIN_NO(22) | 5)
+#define PINMUX_GPIO22__FUNC_B0_DBG_MON_A15 (MTK_PIN_NO(22) | 7)
+
+#define PINMUX_GPIO23__FUNC_B_GPIO23 (MTK_PIN_NO(23) | 0)
+#define PINMUX_GPIO23__FUNC_O_CMMCLK1 (MTK_PIN_NO(23) | 1)
+#define PINMUX_GPIO23__FUNC_O_PWM_2 (MTK_PIN_NO(23) | 3)
+#define PINMUX_GPIO23__FUNC_B1_PCIE_PHY_I2C_SCL (MTK_PIN_NO(23) | 4)
+#define PINMUX_GPIO23__FUNC_B0_TP_GPIO7_AO (MTK_PIN_NO(23) | 5)
+#define PINMUX_GPIO23__FUNC_I0_DP_TX_HPD (MTK_PIN_NO(23) | 6)
+#define PINMUX_GPIO23__FUNC_B0_DBG_MON_A16 (MTK_PIN_NO(23) | 7)
+
+#define PINMUX_GPIO24__FUNC_B_GPIO24 (MTK_PIN_NO(24) | 0)
+#define PINMUX_GPIO24__FUNC_O_CMMCLK2 (MTK_PIN_NO(24) | 1)
+#define PINMUX_GPIO24__FUNC_O_PWM_3 (MTK_PIN_NO(24) | 3)
+#define PINMUX_GPIO24__FUNC_B1_PCIE_PHY_I2C_SDA (MTK_PIN_NO(24) | 4)
+#define PINMUX_GPIO24__FUNC_I0_DVFSRC_EXT_REQ (MTK_PIN_NO(24) | 5)
+#define PINMUX_GPIO24__FUNC_I0_EDP_TX_HPD (MTK_PIN_NO(24) | 6)
+#define PINMUX_GPIO24__FUNC_B0_MD32_0_GPIO2 (MTK_PIN_NO(24) | 7)
+
+#define PINMUX_GPIO25__FUNC_B_GPIO25 (MTK_PIN_NO(25) | 0)
+#define PINMUX_GPIO25__FUNC_O_LCM_RST (MTK_PIN_NO(25) | 1)
+#define PINMUX_GPIO25__FUNC_O_LCM1_RST (MTK_PIN_NO(25) | 2)
+#define PINMUX_GPIO25__FUNC_I0_DP_TX_HPD (MTK_PIN_NO(25) | 3)
+
+#define PINMUX_GPIO26__FUNC_B_GPIO26 (MTK_PIN_NO(26) | 0)
+#define PINMUX_GPIO26__FUNC_I0_DSI_TE (MTK_PIN_NO(26) | 1)
+#define PINMUX_GPIO26__FUNC_I0_DSI1_TE (MTK_PIN_NO(26) | 2)
+#define PINMUX_GPIO26__FUNC_I0_EDP_TX_HPD (MTK_PIN_NO(26) | 3)
+
+#define PINMUX_GPIO27__FUNC_B_GPIO27 (MTK_PIN_NO(27) | 0)
+#define PINMUX_GPIO27__FUNC_O_LCM1_RST (MTK_PIN_NO(27) | 1)
+#define PINMUX_GPIO27__FUNC_O_LCM_RST (MTK_PIN_NO(27) | 2)
+#define PINMUX_GPIO27__FUNC_I0_DP_TX_HPD (MTK_PIN_NO(27) | 3)
+#define PINMUX_GPIO27__FUNC_O_CMVREF2 (MTK_PIN_NO(27) | 4)
+#define PINMUX_GPIO27__FUNC_O_mbistwriteen_trigger (MTK_PIN_NO(27) | 5)
+#define PINMUX_GPIO27__FUNC_O_PWM_2 (MTK_PIN_NO(27) | 6)
+#define PINMUX_GPIO27__FUNC_B0_DBG_MON_A17 (MTK_PIN_NO(27) | 7)
+
+#define PINMUX_GPIO28__FUNC_B_GPIO28 (MTK_PIN_NO(28) | 0)
+#define PINMUX_GPIO28__FUNC_I0_DSI1_TE (MTK_PIN_NO(28) | 1)
+#define PINMUX_GPIO28__FUNC_I0_DSI_TE (MTK_PIN_NO(28) | 2)
+#define PINMUX_GPIO28__FUNC_I0_EDP_TX_HPD (MTK_PIN_NO(28) | 3)
+#define PINMUX_GPIO28__FUNC_O_CMVREF3 (MTK_PIN_NO(28) | 4)
+#define PINMUX_GPIO28__FUNC_O_mbistreaden_trigger (MTK_PIN_NO(28) | 5)
+#define PINMUX_GPIO28__FUNC_O_PWM_3 (MTK_PIN_NO(28) | 6)
+#define PINMUX_GPIO28__FUNC_B0_DBG_MON_A18 (MTK_PIN_NO(28) | 7)
+
+#define PINMUX_GPIO29__FUNC_B_GPIO29 (MTK_PIN_NO(29) | 0)
+#define PINMUX_GPIO29__FUNC_O_DISP_PWM0 (MTK_PIN_NO(29) | 1)
+#define PINMUX_GPIO29__FUNC_O_DISP_PWM1 (MTK_PIN_NO(29) | 2)
+
+#define PINMUX_GPIO30__FUNC_B_GPIO30 (MTK_PIN_NO(30) | 0)
+#define PINMUX_GPIO30__FUNC_O_DISP_PWM1 (MTK_PIN_NO(30) | 1)
+#define PINMUX_GPIO30__FUNC_O_DISP_PWM0 (MTK_PIN_NO(30) | 2)
+#define PINMUX_GPIO30__FUNC_O_CMFLASH3 (MTK_PIN_NO(30) | 3)
+#define PINMUX_GPIO30__FUNC_O_PWM_1 (MTK_PIN_NO(30) | 4)
+#define PINMUX_GPIO30__FUNC_B0_DBG_MON_A19 (MTK_PIN_NO(30) | 7)
+
+#define PINMUX_GPIO31__FUNC_B_GPIO31 (MTK_PIN_NO(31) | 0)
+#define PINMUX_GPIO31__FUNC_O_UTXD0 (MTK_PIN_NO(31) | 1)
+#define PINMUX_GPIO31__FUNC_O_TP_UTXD1_AO (MTK_PIN_NO(31) | 2)
+#define PINMUX_GPIO31__FUNC_O_ADSP_UTXD0 (MTK_PIN_NO(31) | 3)
+#define PINMUX_GPIO31__FUNC_O_TP_UTXD2_AO (MTK_PIN_NO(31) | 4)
+#define PINMUX_GPIO31__FUNC_O_MD32_0_TXD (MTK_PIN_NO(31) | 5)
+#define PINMUX_GPIO31__FUNC_O_MD32_1_TXD (MTK_PIN_NO(31) | 6)
+#define PINMUX_GPIO31__FUNC_O_SSPM_UTXD_AO (MTK_PIN_NO(31) | 7)
+
+#define PINMUX_GPIO32__FUNC_B_GPIO32 (MTK_PIN_NO(32) | 0)
+#define PINMUX_GPIO32__FUNC_I1_URXD0 (MTK_PIN_NO(32) | 1)
+#define PINMUX_GPIO32__FUNC_I1_TP_URXD1_AO (MTK_PIN_NO(32) | 2)
+#define PINMUX_GPIO32__FUNC_I1_ADSP_URXD0 (MTK_PIN_NO(32) | 3)
+#define PINMUX_GPIO32__FUNC_I1_TP_URXD2_AO (MTK_PIN_NO(32) | 4)
+#define PINMUX_GPIO32__FUNC_I1_MD32_0_RXD (MTK_PIN_NO(32) | 5)
+#define PINMUX_GPIO32__FUNC_I1_MD32_1_RXD (MTK_PIN_NO(32) | 6)
+#define PINMUX_GPIO32__FUNC_I1_SSPM_URXD_AO (MTK_PIN_NO(32) | 7)
+
+#define PINMUX_GPIO33__FUNC_B_GPIO33 (MTK_PIN_NO(33) | 0)
+#define PINMUX_GPIO33__FUNC_O_UTXD1 (MTK_PIN_NO(33) | 1)
+#define PINMUX_GPIO33__FUNC_O_URTS2 (MTK_PIN_NO(33) | 2)
+#define PINMUX_GPIO33__FUNC_O_ADSP_UTXD0 (MTK_PIN_NO(33) | 3)
+#define PINMUX_GPIO33__FUNC_O_TP_UTXD1_AO (MTK_PIN_NO(33) | 4)
+#define PINMUX_GPIO33__FUNC_O_mbistwriteen_trigger (MTK_PIN_NO(33) | 5)
+#define PINMUX_GPIO33__FUNC_O_MD32_0_TXD (MTK_PIN_NO(33) | 6)
+#define PINMUX_GPIO33__FUNC_O_SSPM_UTXD_AO (MTK_PIN_NO(33) | 7)
+
+#define PINMUX_GPIO34__FUNC_B_GPIO34 (MTK_PIN_NO(34) | 0)
+#define PINMUX_GPIO34__FUNC_I1_URXD1 (MTK_PIN_NO(34) | 1)
+#define PINMUX_GPIO34__FUNC_I1_UCTS2 (MTK_PIN_NO(34) | 2)
+#define PINMUX_GPIO34__FUNC_I1_ADSP_URXD0 (MTK_PIN_NO(34) | 3)
+#define PINMUX_GPIO34__FUNC_I1_TP_URXD1_AO (MTK_PIN_NO(34) | 4)
+#define PINMUX_GPIO34__FUNC_O_mbistreaden_trigger (MTK_PIN_NO(34) | 5)
+#define PINMUX_GPIO34__FUNC_I1_MD32_0_RXD (MTK_PIN_NO(34) | 6)
+#define PINMUX_GPIO34__FUNC_I1_SSPM_URXD_AO (MTK_PIN_NO(34) | 7)
+
+#define PINMUX_GPIO35__FUNC_B_GPIO35 (MTK_PIN_NO(35) | 0)
+#define PINMUX_GPIO35__FUNC_O_UTXD2 (MTK_PIN_NO(35) | 1)
+#define PINMUX_GPIO35__FUNC_O_URTS1 (MTK_PIN_NO(35) | 2)
+#define PINMUX_GPIO35__FUNC_O_ADSP_UTXD0 (MTK_PIN_NO(35) | 3)
+#define PINMUX_GPIO35__FUNC_O_TP_URTS1_AO (MTK_PIN_NO(35) | 4)
+#define PINMUX_GPIO35__FUNC_O_TP_UTXD2_AO (MTK_PIN_NO(35) | 5)
+#define PINMUX_GPIO35__FUNC_O_MD32_1_TXD (MTK_PIN_NO(35) | 6)
+#define PINMUX_GPIO35__FUNC_B0_DBG_MON_A20 (MTK_PIN_NO(35) | 7)
+
+#define PINMUX_GPIO36__FUNC_B_GPIO36 (MTK_PIN_NO(36) | 0)
+#define PINMUX_GPIO36__FUNC_I1_URXD2 (MTK_PIN_NO(36) | 1)
+#define PINMUX_GPIO36__FUNC_I1_UCTS1 (MTK_PIN_NO(36) | 2)
+#define PINMUX_GPIO36__FUNC_I1_ADSP_URXD0 (MTK_PIN_NO(36) | 3)
+#define PINMUX_GPIO36__FUNC_I1_TP_UCTS1_AO (MTK_PIN_NO(36) | 4)
+#define PINMUX_GPIO36__FUNC_I1_TP_URXD2_AO (MTK_PIN_NO(36) | 5)
+#define PINMUX_GPIO36__FUNC_I1_MD32_1_RXD (MTK_PIN_NO(36) | 6)
+#define PINMUX_GPIO36__FUNC_B0_DBG_MON_A21 (MTK_PIN_NO(36) | 7)
+
+#define PINMUX_GPIO37__FUNC_B_GPIO37 (MTK_PIN_NO(37) | 0)
+#define PINMUX_GPIO37__FUNC_B1_JTMS_SEL1 (MTK_PIN_NO(37) | 1)
+#define PINMUX_GPIO37__FUNC_I0_UDI_TMS (MTK_PIN_NO(37) | 2)
+#define PINMUX_GPIO37__FUNC_I1_SPM_JTAG_TMS (MTK_PIN_NO(37) | 3)
+#define PINMUX_GPIO37__FUNC_I1_ADSP_JTAG0_TMS (MTK_PIN_NO(37) | 4)
+#define PINMUX_GPIO37__FUNC_I1_SCP_JTAG0_TMS (MTK_PIN_NO(37) | 5)
+#define PINMUX_GPIO37__FUNC_I1_CCU0_JTAG_TMS (MTK_PIN_NO(37) | 6)
+#define PINMUX_GPIO37__FUNC_I1_MCUPM_JTAG_TMS (MTK_PIN_NO(37) | 7)
+
+#define PINMUX_GPIO38__FUNC_B_GPIO38 (MTK_PIN_NO(38) | 0)
+#define PINMUX_GPIO38__FUNC_I0_JTCK_SEL1 (MTK_PIN_NO(38) | 1)
+#define PINMUX_GPIO38__FUNC_I0_UDI_TCK (MTK_PIN_NO(38) | 2)
+#define PINMUX_GPIO38__FUNC_I1_SPM_JTAG_TCK (MTK_PIN_NO(38) | 3)
+#define PINMUX_GPIO38__FUNC_I0_ADSP_JTAG0_TCK (MTK_PIN_NO(38) | 4)
+#define PINMUX_GPIO38__FUNC_I1_SCP_JTAG0_TCK (MTK_PIN_NO(38) | 5)
+#define PINMUX_GPIO38__FUNC_I1_CCU0_JTAG_TCK (MTK_PIN_NO(38) | 6)
+#define PINMUX_GPIO38__FUNC_I1_MCUPM_JTAG_TCK (MTK_PIN_NO(38) | 7)
+
+#define PINMUX_GPIO39__FUNC_B_GPIO39 (MTK_PIN_NO(39) | 0)
+#define PINMUX_GPIO39__FUNC_I1_JTDI_SEL1 (MTK_PIN_NO(39) | 1)
+#define PINMUX_GPIO39__FUNC_I0_UDI_TDI (MTK_PIN_NO(39) | 2)
+#define PINMUX_GPIO39__FUNC_I1_SPM_JTAG_TDI (MTK_PIN_NO(39) | 3)
+#define PINMUX_GPIO39__FUNC_I1_ADSP_JTAG0_TDI (MTK_PIN_NO(39) | 4)
+#define PINMUX_GPIO39__FUNC_I1_SCP_JTAG0_TDI (MTK_PIN_NO(39) | 5)
+#define PINMUX_GPIO39__FUNC_I1_CCU0_JTAG_TDI (MTK_PIN_NO(39) | 6)
+#define PINMUX_GPIO39__FUNC_I1_MCUPM_JTAG_TDI (MTK_PIN_NO(39) | 7)
+
+#define PINMUX_GPIO40__FUNC_B_GPIO40 (MTK_PIN_NO(40) | 0)
+#define PINMUX_GPIO40__FUNC_O_JTDO_SEL1 (MTK_PIN_NO(40) | 1)
+#define PINMUX_GPIO40__FUNC_O_UDI_TDO (MTK_PIN_NO(40) | 2)
+#define PINMUX_GPIO40__FUNC_O_SPM_JTAG_TDO (MTK_PIN_NO(40) | 3)
+#define PINMUX_GPIO40__FUNC_O_ADSP_JTAG0_TDO (MTK_PIN_NO(40) | 4)
+#define PINMUX_GPIO40__FUNC_O_SCP_JTAG0_TDO (MTK_PIN_NO(40) | 5)
+#define PINMUX_GPIO40__FUNC_O_CCU0_JTAG_TDO (MTK_PIN_NO(40) | 6)
+#define PINMUX_GPIO40__FUNC_O_MCUPM_JTAG_TDO (MTK_PIN_NO(40) | 7)
+
+#define PINMUX_GPIO41__FUNC_B_GPIO41 (MTK_PIN_NO(41) | 0)
+#define PINMUX_GPIO41__FUNC_I1_JTRSTn_SEL1 (MTK_PIN_NO(41) | 1)
+#define PINMUX_GPIO41__FUNC_I0_UDI_NTRST (MTK_PIN_NO(41) | 2)
+#define PINMUX_GPIO41__FUNC_I0_SPM_JTAG_TRSTN (MTK_PIN_NO(41) | 3)
+#define PINMUX_GPIO41__FUNC_I1_ADSP_JTAG0_TRSTN (MTK_PIN_NO(41) | 4)
+#define PINMUX_GPIO41__FUNC_I0_SCP_JTAG0_TRSTN (MTK_PIN_NO(41) | 5)
+#define PINMUX_GPIO41__FUNC_I1_CCU0_JTAG_TRST (MTK_PIN_NO(41) | 6)
+#define PINMUX_GPIO41__FUNC_I0_MCUPM_JTAG_TRSTN (MTK_PIN_NO(41) | 7)
+
+#define PINMUX_GPIO42__FUNC_B_GPIO42 (MTK_PIN_NO(42) | 0)
+#define PINMUX_GPIO42__FUNC_B1_KPCOL0 (MTK_PIN_NO(42) | 1)
+
+#define PINMUX_GPIO43__FUNC_B_GPIO43 (MTK_PIN_NO(43) | 0)
+#define PINMUX_GPIO43__FUNC_B1_KPCOL1 (MTK_PIN_NO(43) | 1)
+#define PINMUX_GPIO43__FUNC_I0_DP_TX_HPD (MTK_PIN_NO(43) | 2)
+#define PINMUX_GPIO43__FUNC_O_CMFLASH2 (MTK_PIN_NO(43) | 3)
+#define PINMUX_GPIO43__FUNC_I0_DVFSRC_EXT_REQ (MTK_PIN_NO(43) | 4)
+#define PINMUX_GPIO43__FUNC_O_mbistwriteen_trigger (MTK_PIN_NO(43) | 7)
+
+#define PINMUX_GPIO44__FUNC_B_GPIO44 (MTK_PIN_NO(44) | 0)
+#define PINMUX_GPIO44__FUNC_B1_KPROW0 (MTK_PIN_NO(44) | 1)
+
+#define PINMUX_GPIO45__FUNC_B_GPIO45 (MTK_PIN_NO(45) | 0)
+#define PINMUX_GPIO45__FUNC_B1_KPROW1 (MTK_PIN_NO(45) | 1)
+#define PINMUX_GPIO45__FUNC_I0_EDP_TX_HPD (MTK_PIN_NO(45) | 2)
+#define PINMUX_GPIO45__FUNC_O_CMFLASH3 (MTK_PIN_NO(45) | 3)
+#define PINMUX_GPIO45__FUNC_B0_I2SIN_MCK (MTK_PIN_NO(45) | 4)
+#define PINMUX_GPIO45__FUNC_O_mbistreaden_trigger (MTK_PIN_NO(45) | 7)
+
+#define PINMUX_GPIO46__FUNC_B_GPIO46 (MTK_PIN_NO(46) | 0)
+#define PINMUX_GPIO46__FUNC_I0_DP_TX_HPD (MTK_PIN_NO(46) | 1)
+#define PINMUX_GPIO46__FUNC_O_PWM_0 (MTK_PIN_NO(46) | 2)
+#define PINMUX_GPIO46__FUNC_I0_VBUSVALID_2P (MTK_PIN_NO(46) | 3)
+#define PINMUX_GPIO46__FUNC_B0_DBG_MON_A22 (MTK_PIN_NO(46) | 7)
+
+#define PINMUX_GPIO47__FUNC_B_GPIO47 (MTK_PIN_NO(47) | 0)
+#define PINMUX_GPIO47__FUNC_I1_WAKEN (MTK_PIN_NO(47) | 1)
+#define PINMUX_GPIO47__FUNC_O_GDU_TROOPS_DET0 (MTK_PIN_NO(47) | 6)
+
+#define PINMUX_GPIO48__FUNC_B_GPIO48 (MTK_PIN_NO(48) | 0)
+#define PINMUX_GPIO48__FUNC_O_PERSTN (MTK_PIN_NO(48) | 1)
+#define PINMUX_GPIO48__FUNC_O_GDU_TROOPS_DET1 (MTK_PIN_NO(48) | 6)
+
+#define PINMUX_GPIO49__FUNC_B_GPIO49 (MTK_PIN_NO(49) | 0)
+#define PINMUX_GPIO49__FUNC_B1_CLKREQN (MTK_PIN_NO(49) | 1)
+#define PINMUX_GPIO49__FUNC_O_GDU_TROOPS_DET2 (MTK_PIN_NO(49) | 6)
+
+#define PINMUX_GPIO50__FUNC_B_GPIO50 (MTK_PIN_NO(50) | 0)
+#define PINMUX_GPIO50__FUNC_O_HDMITX20_PWR5V (MTK_PIN_NO(50) | 1)
+#define PINMUX_GPIO50__FUNC_I1_IDDIG_1P (MTK_PIN_NO(50) | 3)
+#define PINMUX_GPIO50__FUNC_I1_SCP_JTAG1_TMS (MTK_PIN_NO(50) | 4)
+#define PINMUX_GPIO50__FUNC_I1_SSPM_JTAG_TMS (MTK_PIN_NO(50) | 5)
+#define PINMUX_GPIO50__FUNC_I1_MD32_0_JTAG_TMS (MTK_PIN_NO(50) | 6)
+#define PINMUX_GPIO50__FUNC_I1_MD32_1_JTAG_TMS (MTK_PIN_NO(50) | 7)
+
+#define PINMUX_GPIO51__FUNC_B_GPIO51 (MTK_PIN_NO(51) | 0)
+#define PINMUX_GPIO51__FUNC_I0_HDMITX20_HTPLG (MTK_PIN_NO(51) | 1)
+#define PINMUX_GPIO51__FUNC_I0_EDP_TX_HPD (MTK_PIN_NO(51) | 2)
+#define PINMUX_GPIO51__FUNC_O_USB_DRVVBUS_1P (MTK_PIN_NO(51) | 3)
+#define PINMUX_GPIO51__FUNC_I1_SCP_JTAG1_TCK (MTK_PIN_NO(51) | 4)
+#define PINMUX_GPIO51__FUNC_I1_SSPM_JTAG_TCK (MTK_PIN_NO(51) | 5)
+#define PINMUX_GPIO51__FUNC_I1_MD32_0_JTAG_TCK (MTK_PIN_NO(51) | 6)
+#define PINMUX_GPIO51__FUNC_I1_MD32_1_JTAG_TCK (MTK_PIN_NO(51) | 7)
+
+#define PINMUX_GPIO52__FUNC_B_GPIO52 (MTK_PIN_NO(52) | 0)
+#define PINMUX_GPIO52__FUNC_B1_HDMITX20_CEC (MTK_PIN_NO(52) | 1)
+#define PINMUX_GPIO52__FUNC_I0_VBUSVALID_1P (MTK_PIN_NO(52) | 3)
+#define PINMUX_GPIO52__FUNC_I1_SCP_JTAG1_TDI (MTK_PIN_NO(52) | 4)
+#define PINMUX_GPIO52__FUNC_I1_SSPM_JTAG_TDI (MTK_PIN_NO(52) | 5)
+#define PINMUX_GPIO52__FUNC_I1_MD32_0_JTAG_TDI (MTK_PIN_NO(52) | 6)
+#define PINMUX_GPIO52__FUNC_I1_MD32_1_JTAG_TDI (MTK_PIN_NO(52) | 7)
+
+#define PINMUX_GPIO53__FUNC_B_GPIO53 (MTK_PIN_NO(53) | 0)
+#define PINMUX_GPIO53__FUNC_B1_HDMITX20_SCL (MTK_PIN_NO(53) | 1)
+#define PINMUX_GPIO53__FUNC_I1_IDDIG_2P (MTK_PIN_NO(53) | 3)
+#define PINMUX_GPIO53__FUNC_O_SCP_JTAG1_TDO (MTK_PIN_NO(53) | 4)
+#define PINMUX_GPIO53__FUNC_O_SSPM_JTAG_TDO (MTK_PIN_NO(53) | 5)
+#define PINMUX_GPIO53__FUNC_O_MD32_0_JTAG_TDO (MTK_PIN_NO(53) | 6)
+#define PINMUX_GPIO53__FUNC_O_MD32_1_JTAG_TDO (MTK_PIN_NO(53) | 7)
+
+#define PINMUX_GPIO54__FUNC_B_GPIO54 (MTK_PIN_NO(54) | 0)
+#define PINMUX_GPIO54__FUNC_B1_HDMITX20_SDA (MTK_PIN_NO(54) | 1)
+#define PINMUX_GPIO54__FUNC_O_USB_DRVVBUS_2P (MTK_PIN_NO(54) | 3)
+#define PINMUX_GPIO54__FUNC_I0_SCP_JTAG1_TRSTN (MTK_PIN_NO(54) | 4)
+#define PINMUX_GPIO54__FUNC_I0_SSPM_JTAG_TRSTN (MTK_PIN_NO(54) | 5)
+#define PINMUX_GPIO54__FUNC_I1_MD32_0_JTAG_TRST (MTK_PIN_NO(54) | 6)
+#define PINMUX_GPIO54__FUNC_I1_MD32_1_JTAG_TRST (MTK_PIN_NO(54) | 7)
+
+#define PINMUX_GPIO55__FUNC_B_GPIO55 (MTK_PIN_NO(55) | 0)
+#define PINMUX_GPIO55__FUNC_B1_SCL0 (MTK_PIN_NO(55) | 1)
+#define PINMUX_GPIO55__FUNC_B1_SCP_SCL0 (MTK_PIN_NO(55) | 2)
+#define PINMUX_GPIO55__FUNC_B1_SCP_SCL1 (MTK_PIN_NO(55) | 3)
+#define PINMUX_GPIO55__FUNC_B1_PCIE_PHY_I2C_SCL (MTK_PIN_NO(55) | 4)
+
+#define PINMUX_GPIO56__FUNC_B_GPIO56 (MTK_PIN_NO(56) | 0)
+#define PINMUX_GPIO56__FUNC_B1_SDA0 (MTK_PIN_NO(56) | 1)
+#define PINMUX_GPIO56__FUNC_B1_SCP_SDA0 (MTK_PIN_NO(56) | 2)
+#define PINMUX_GPIO56__FUNC_B1_SCP_SDA1 (MTK_PIN_NO(56) | 3)
+#define PINMUX_GPIO56__FUNC_B1_PCIE_PHY_I2C_SDA (MTK_PIN_NO(56) | 4)
+
+#define PINMUX_GPIO57__FUNC_B_GPIO57 (MTK_PIN_NO(57) | 0)
+#define PINMUX_GPIO57__FUNC_B1_SCL1 (MTK_PIN_NO(57) | 1)
+
+#define PINMUX_GPIO58__FUNC_B_GPIO58 (MTK_PIN_NO(58) | 0)
+#define PINMUX_GPIO58__FUNC_B1_SDA1 (MTK_PIN_NO(58) | 1)
+
+#define PINMUX_GPIO59__FUNC_B_GPIO59 (MTK_PIN_NO(59) | 0)
+#define PINMUX_GPIO59__FUNC_B1_SCL2 (MTK_PIN_NO(59) | 1)
+#define PINMUX_GPIO59__FUNC_B1_SCP_SCL0 (MTK_PIN_NO(59) | 2)
+#define PINMUX_GPIO59__FUNC_B1_SCP_SCL1 (MTK_PIN_NO(59) | 3)
+
+#define PINMUX_GPIO60__FUNC_B_GPIO60 (MTK_PIN_NO(60) | 0)
+#define PINMUX_GPIO60__FUNC_B1_SDA2 (MTK_PIN_NO(60) | 1)
+#define PINMUX_GPIO60__FUNC_B1_SCP_SDA0 (MTK_PIN_NO(60) | 2)
+#define PINMUX_GPIO60__FUNC_B1_SCP_SDA1 (MTK_PIN_NO(60) | 3)
+
+#define PINMUX_GPIO61__FUNC_B_GPIO61 (MTK_PIN_NO(61) | 0)
+#define PINMUX_GPIO61__FUNC_B1_SCL3 (MTK_PIN_NO(61) | 1)
+#define PINMUX_GPIO61__FUNC_B1_SCP_SCL0 (MTK_PIN_NO(61) | 2)
+#define PINMUX_GPIO61__FUNC_B1_SCP_SCL1 (MTK_PIN_NO(61) | 3)
+#define PINMUX_GPIO61__FUNC_B1_PCIE_PHY_I2C_SCL (MTK_PIN_NO(61) | 4)
+
+#define PINMUX_GPIO62__FUNC_B_GPIO62 (MTK_PIN_NO(62) | 0)
+#define PINMUX_GPIO62__FUNC_B1_SDA3 (MTK_PIN_NO(62) | 1)
+#define PINMUX_GPIO62__FUNC_B1_SCP_SDA0 (MTK_PIN_NO(62) | 2)
+#define PINMUX_GPIO62__FUNC_B1_SCP_SDA1 (MTK_PIN_NO(62) | 3)
+#define PINMUX_GPIO62__FUNC_B1_PCIE_PHY_I2C_SDA (MTK_PIN_NO(62) | 4)
+
+#define PINMUX_GPIO63__FUNC_B_GPIO63 (MTK_PIN_NO(63) | 0)
+#define PINMUX_GPIO63__FUNC_B1_SCL4 (MTK_PIN_NO(63) | 1)
+
+#define PINMUX_GPIO64__FUNC_B_GPIO64 (MTK_PIN_NO(64) | 0)
+#define PINMUX_GPIO64__FUNC_B1_SDA4 (MTK_PIN_NO(64) | 1)
+
+#define PINMUX_GPIO65__FUNC_B_GPIO65 (MTK_PIN_NO(65) | 0)
+#define PINMUX_GPIO65__FUNC_B1_SCL5 (MTK_PIN_NO(65) | 1)
+#define PINMUX_GPIO65__FUNC_B1_SCP_SCL0 (MTK_PIN_NO(65) | 2)
+#define PINMUX_GPIO65__FUNC_B1_SCP_SCL1 (MTK_PIN_NO(65) | 3)
+
+#define PINMUX_GPIO66__FUNC_B_GPIO66 (MTK_PIN_NO(66) | 0)
+#define PINMUX_GPIO66__FUNC_B1_SDA5 (MTK_PIN_NO(66) | 1)
+#define PINMUX_GPIO66__FUNC_B1_SCP_SDA0 (MTK_PIN_NO(66) | 2)
+#define PINMUX_GPIO66__FUNC_B1_SCP_SDA1 (MTK_PIN_NO(66) | 3)
+
+#define PINMUX_GPIO67__FUNC_B_GPIO67 (MTK_PIN_NO(67) | 0)
+#define PINMUX_GPIO67__FUNC_B1_SCL6 (MTK_PIN_NO(67) | 1)
+#define PINMUX_GPIO67__FUNC_B1_SCP_SCL0 (MTK_PIN_NO(67) | 2)
+#define PINMUX_GPIO67__FUNC_B1_SCP_SCL1 (MTK_PIN_NO(67) | 3)
+#define PINMUX_GPIO67__FUNC_B1_PCIE_PHY_I2C_SCL (MTK_PIN_NO(67) | 4)
+
+#define PINMUX_GPIO68__FUNC_B_GPIO68 (MTK_PIN_NO(68) | 0)
+#define PINMUX_GPIO68__FUNC_B1_SDA6 (MTK_PIN_NO(68) | 1)
+#define PINMUX_GPIO68__FUNC_B1_SCP_SDA0 (MTK_PIN_NO(68) | 2)
+#define PINMUX_GPIO68__FUNC_B1_SCP_SDA1 (MTK_PIN_NO(68) | 3)
+#define PINMUX_GPIO68__FUNC_B1_PCIE_PHY_I2C_SDA (MTK_PIN_NO(68) | 4)
+
+#define PINMUX_GPIO69__FUNC_B_GPIO69 (MTK_PIN_NO(69) | 0)
+#define PINMUX_GPIO69__FUNC_O_SPIM0_CSB (MTK_PIN_NO(69) | 1)
+#define PINMUX_GPIO69__FUNC_O_SCP_SPI0_CS (MTK_PIN_NO(69) | 2)
+#define PINMUX_GPIO69__FUNC_O_DMIC3_CLK (MTK_PIN_NO(69) | 3)
+#define PINMUX_GPIO69__FUNC_B0_MD32_1_GPIO0 (MTK_PIN_NO(69) | 4)
+#define PINMUX_GPIO69__FUNC_O_CMVREF0 (MTK_PIN_NO(69) | 5)
+#define PINMUX_GPIO69__FUNC_O_GDU_SUM_TROOP0_0 (MTK_PIN_NO(69) | 6)
+#define PINMUX_GPIO69__FUNC_B0_DBG_MON_A23 (MTK_PIN_NO(69) | 7)
+
+#define PINMUX_GPIO70__FUNC_B_GPIO70 (MTK_PIN_NO(70) | 0)
+#define PINMUX_GPIO70__FUNC_O_SPIM0_CLK (MTK_PIN_NO(70) | 1)
+#define PINMUX_GPIO70__FUNC_O_SCP_SPI0_CK (MTK_PIN_NO(70) | 2)
+#define PINMUX_GPIO70__FUNC_I0_DMIC3_DAT (MTK_PIN_NO(70) | 3)
+#define PINMUX_GPIO70__FUNC_B0_MD32_1_GPIO1 (MTK_PIN_NO(70) | 4)
+#define PINMUX_GPIO70__FUNC_O_CMVREF1 (MTK_PIN_NO(70) | 5)
+#define PINMUX_GPIO70__FUNC_O_GDU_SUM_TROOP0_1 (MTK_PIN_NO(70) | 6)
+#define PINMUX_GPIO70__FUNC_B0_DBG_MON_A24 (MTK_PIN_NO(70) | 7)
+
+#define PINMUX_GPIO71__FUNC_B_GPIO71 (MTK_PIN_NO(71) | 0)
+#define PINMUX_GPIO71__FUNC_B0_SPIM0_MOSI (MTK_PIN_NO(71) | 1)
+#define PINMUX_GPIO71__FUNC_O_SCP_SPI0_MO (MTK_PIN_NO(71) | 2)
+#define PINMUX_GPIO71__FUNC_I0_DMIC3_DAT_R (MTK_PIN_NO(71) | 3)
+#define PINMUX_GPIO71__FUNC_B0_MD32_1_GPIO2 (MTK_PIN_NO(71) | 4)
+#define PINMUX_GPIO71__FUNC_O_CMVREF2 (MTK_PIN_NO(71) | 5)
+#define PINMUX_GPIO71__FUNC_O_GDU_SUM_TROOP0_2 (MTK_PIN_NO(71) | 6)
+#define PINMUX_GPIO71__FUNC_B0_DBG_MON_A25 (MTK_PIN_NO(71) | 7)
+
+#define PINMUX_GPIO72__FUNC_B_GPIO72 (MTK_PIN_NO(72) | 0)
+#define PINMUX_GPIO72__FUNC_B0_SPIM0_MISO (MTK_PIN_NO(72) | 1)
+#define PINMUX_GPIO72__FUNC_I0_SCP_SPI0_MI (MTK_PIN_NO(72) | 2)
+#define PINMUX_GPIO72__FUNC_O_DMIC4_CLK (MTK_PIN_NO(72) | 3)
+#define PINMUX_GPIO72__FUNC_O_CMVREF3 (MTK_PIN_NO(72) | 5)
+#define PINMUX_GPIO72__FUNC_O_GDU_SUM_TROOP1_0 (MTK_PIN_NO(72) | 6)
+#define PINMUX_GPIO72__FUNC_B0_DBG_MON_A26 (MTK_PIN_NO(72) | 7)
+
+#define PINMUX_GPIO73__FUNC_B_GPIO73 (MTK_PIN_NO(73) | 0)
+#define PINMUX_GPIO73__FUNC_B0_SPIM0_MIO2 (MTK_PIN_NO(73) | 1)
+#define PINMUX_GPIO73__FUNC_O_UTXD3 (MTK_PIN_NO(73) | 2)
+#define PINMUX_GPIO73__FUNC_I0_DMIC4_DAT (MTK_PIN_NO(73) | 3)
+#define PINMUX_GPIO73__FUNC_O_CLKM0 (MTK_PIN_NO(73) | 4)
+#define PINMUX_GPIO73__FUNC_O_CMVREF4 (MTK_PIN_NO(73) | 5)
+#define PINMUX_GPIO73__FUNC_O_GDU_SUM_TROOP1_1 (MTK_PIN_NO(73) | 6)
+#define PINMUX_GPIO73__FUNC_B0_DBG_MON_A27 (MTK_PIN_NO(73) | 7)
+
+#define PINMUX_GPIO74__FUNC_B_GPIO74 (MTK_PIN_NO(74) | 0)
+#define PINMUX_GPIO74__FUNC_B0_SPIM0_MIO3 (MTK_PIN_NO(74) | 1)
+#define PINMUX_GPIO74__FUNC_I1_URXD3 (MTK_PIN_NO(74) | 2)
+#define PINMUX_GPIO74__FUNC_I0_DMIC4_DAT_R (MTK_PIN_NO(74) | 3)
+#define PINMUX_GPIO74__FUNC_O_CLKM1 (MTK_PIN_NO(74) | 4)
+#define PINMUX_GPIO74__FUNC_O_CMVREF5 (MTK_PIN_NO(74) | 5)
+#define PINMUX_GPIO74__FUNC_O_GDU_SUM_TROOP1_2 (MTK_PIN_NO(74) | 6)
+#define PINMUX_GPIO74__FUNC_B0_DBG_MON_A28 (MTK_PIN_NO(74) | 7)
+
+#define PINMUX_GPIO75__FUNC_B_GPIO75 (MTK_PIN_NO(75) | 0)
+#define PINMUX_GPIO75__FUNC_O_SPIM1_CSB (MTK_PIN_NO(75) | 1)
+#define PINMUX_GPIO75__FUNC_O_SCP_SPI1_A_CS (MTK_PIN_NO(75) | 2)
+#define PINMUX_GPIO75__FUNC_B0_TDMIN_MCK (MTK_PIN_NO(75) | 3)
+#define PINMUX_GPIO75__FUNC_B1_SCP_SCL0 (MTK_PIN_NO(75) | 4)
+#define PINMUX_GPIO75__FUNC_O_CMVREF6 (MTK_PIN_NO(75) | 5)
+#define PINMUX_GPIO75__FUNC_O_GDU_SUM_TROOP2_0 (MTK_PIN_NO(75) | 6)
+#define PINMUX_GPIO75__FUNC_B0_DBG_MON_A29 (MTK_PIN_NO(75) | 7)
+
+#define PINMUX_GPIO76__FUNC_B_GPIO76 (MTK_PIN_NO(76) | 0)
+#define PINMUX_GPIO76__FUNC_O_SPIM1_CLK (MTK_PIN_NO(76) | 1)
+#define PINMUX_GPIO76__FUNC_O_SCP_SPI1_A_CK (MTK_PIN_NO(76) | 2)
+#define PINMUX_GPIO76__FUNC_B0_TDMIN_BCK (MTK_PIN_NO(76) | 3)
+#define PINMUX_GPIO76__FUNC_B1_SCP_SDA0 (MTK_PIN_NO(76) | 4)
+#define PINMUX_GPIO76__FUNC_O_CMVREF7 (MTK_PIN_NO(76) | 5)
+#define PINMUX_GPIO76__FUNC_O_GDU_SUM_TROOP2_1 (MTK_PIN_NO(76) | 6)
+#define PINMUX_GPIO76__FUNC_B0_DBG_MON_A30 (MTK_PIN_NO(76) | 7)
+
+#define PINMUX_GPIO77__FUNC_B_GPIO77 (MTK_PIN_NO(77) | 0)
+#define PINMUX_GPIO77__FUNC_B0_SPIM1_MOSI (MTK_PIN_NO(77) | 1)
+#define PINMUX_GPIO77__FUNC_O_SCP_SPI1_A_MO (MTK_PIN_NO(77) | 2)
+#define PINMUX_GPIO77__FUNC_B0_TDMIN_LRCK (MTK_PIN_NO(77) | 3)
+#define PINMUX_GPIO77__FUNC_B1_SCP_SCL1 (MTK_PIN_NO(77) | 4)
+#define PINMUX_GPIO77__FUNC_O_GDU_SUM_TROOP2_2 (MTK_PIN_NO(77) | 6)
+#define PINMUX_GPIO77__FUNC_B0_DBG_MON_A31 (MTK_PIN_NO(77) | 7)
+
+#define PINMUX_GPIO78__FUNC_B_GPIO78 (MTK_PIN_NO(78) | 0)
+#define PINMUX_GPIO78__FUNC_B0_SPIM1_MISO (MTK_PIN_NO(78) | 1)
+#define PINMUX_GPIO78__FUNC_I0_SCP_SPI1_A_MI (MTK_PIN_NO(78) | 2)
+#define PINMUX_GPIO78__FUNC_I0_TDMIN_DI (MTK_PIN_NO(78) | 3)
+#define PINMUX_GPIO78__FUNC_B1_SCP_SDA1 (MTK_PIN_NO(78) | 4)
+#define PINMUX_GPIO78__FUNC_B0_DBG_MON_A32 (MTK_PIN_NO(78) | 7)
+
+#define PINMUX_GPIO79__FUNC_B_GPIO79 (MTK_PIN_NO(79) | 0)
+#define PINMUX_GPIO79__FUNC_O_SPIM2_CSB (MTK_PIN_NO(79) | 1)
+#define PINMUX_GPIO79__FUNC_O_SCP_SPI2_CS (MTK_PIN_NO(79) | 2)
+#define PINMUX_GPIO79__FUNC_O_I2SO1_MCK (MTK_PIN_NO(79) | 3)
+#define PINMUX_GPIO79__FUNC_O_UTXD2 (MTK_PIN_NO(79) | 4)
+#define PINMUX_GPIO79__FUNC_O_TP_UTXD2_AO (MTK_PIN_NO(79) | 5)
+#define PINMUX_GPIO79__FUNC_B0_PCM_SYNC (MTK_PIN_NO(79) | 6)
+#define PINMUX_GPIO79__FUNC_B0_DBG_MON_B0 (MTK_PIN_NO(79) | 7)
+
+#define PINMUX_GPIO80__FUNC_B_GPIO80 (MTK_PIN_NO(80) | 0)
+#define PINMUX_GPIO80__FUNC_O_SPIM2_CLK (MTK_PIN_NO(80) | 1)
+#define PINMUX_GPIO80__FUNC_O_SCP_SPI2_CK (MTK_PIN_NO(80) | 2)
+#define PINMUX_GPIO80__FUNC_O_I2SO1_BCK (MTK_PIN_NO(80) | 3)
+#define PINMUX_GPIO80__FUNC_I1_URXD2 (MTK_PIN_NO(80) | 4)
+#define PINMUX_GPIO80__FUNC_I1_TP_URXD2_AO (MTK_PIN_NO(80) | 5)
+#define PINMUX_GPIO80__FUNC_B0_PCM_CLK (MTK_PIN_NO(80) | 6)
+#define PINMUX_GPIO80__FUNC_B0_DBG_MON_B1 (MTK_PIN_NO(80) | 7)
+
+#define PINMUX_GPIO81__FUNC_B_GPIO81 (MTK_PIN_NO(81) | 0)
+#define PINMUX_GPIO81__FUNC_B0_SPIM2_MOSI (MTK_PIN_NO(81) | 1)
+#define PINMUX_GPIO81__FUNC_O_SCP_SPI2_MO (MTK_PIN_NO(81) | 2)
+#define PINMUX_GPIO81__FUNC_O_I2SO1_WS (MTK_PIN_NO(81) | 3)
+#define PINMUX_GPIO81__FUNC_O_URTS2 (MTK_PIN_NO(81) | 4)
+#define PINMUX_GPIO81__FUNC_O_TP_URTS2_AO (MTK_PIN_NO(81) | 5)
+#define PINMUX_GPIO81__FUNC_O_PCM_DO (MTK_PIN_NO(81) | 6)
+#define PINMUX_GPIO81__FUNC_B0_DBG_MON_B2 (MTK_PIN_NO(81) | 7)
+
+#define PINMUX_GPIO82__FUNC_B_GPIO82 (MTK_PIN_NO(82) | 0)
+#define PINMUX_GPIO82__FUNC_B0_SPIM2_MISO (MTK_PIN_NO(82) | 1)
+#define PINMUX_GPIO82__FUNC_I0_SCP_SPI2_MI (MTK_PIN_NO(82) | 2)
+#define PINMUX_GPIO82__FUNC_O_I2SO1_D0 (MTK_PIN_NO(82) | 3)
+#define PINMUX_GPIO82__FUNC_I1_UCTS2 (MTK_PIN_NO(82) | 4)
+#define PINMUX_GPIO82__FUNC_I1_TP_UCTS2_AO (MTK_PIN_NO(82) | 5)
+#define PINMUX_GPIO82__FUNC_I0_PCM_DI (MTK_PIN_NO(82) | 6)
+#define PINMUX_GPIO82__FUNC_B0_DBG_MON_B3 (MTK_PIN_NO(82) | 7)
+
+#define PINMUX_GPIO83__FUNC_B_GPIO83 (MTK_PIN_NO(83) | 0)
+#define PINMUX_GPIO83__FUNC_I1_IDDIG (MTK_PIN_NO(83) | 1)
+
+#define PINMUX_GPIO84__FUNC_B_GPIO84 (MTK_PIN_NO(84) | 0)
+#define PINMUX_GPIO84__FUNC_O_USB_DRVVBUS (MTK_PIN_NO(84) | 1)
+
+#define PINMUX_GPIO85__FUNC_B_GPIO85 (MTK_PIN_NO(85) | 0)
+#define PINMUX_GPIO85__FUNC_I0_VBUSVALID (MTK_PIN_NO(85) | 1)
+
+#define PINMUX_GPIO86__FUNC_B_GPIO86 (MTK_PIN_NO(86) | 0)
+#define PINMUX_GPIO86__FUNC_I1_IDDIG_1P (MTK_PIN_NO(86) | 1)
+#define PINMUX_GPIO86__FUNC_O_UTXD1 (MTK_PIN_NO(86) | 2)
+#define PINMUX_GPIO86__FUNC_O_URTS2 (MTK_PIN_NO(86) | 3)
+#define PINMUX_GPIO86__FUNC_O_PWM_2 (MTK_PIN_NO(86) | 4)
+#define PINMUX_GPIO86__FUNC_B0_TP_GPIO4_AO (MTK_PIN_NO(86) | 5)
+#define PINMUX_GPIO86__FUNC_O_AUXIF_ST0 (MTK_PIN_NO(86) | 6)
+#define PINMUX_GPIO86__FUNC_B0_DBG_MON_B4 (MTK_PIN_NO(86) | 7)
+
+#define PINMUX_GPIO87__FUNC_B_GPIO87 (MTK_PIN_NO(87) | 0)
+#define PINMUX_GPIO87__FUNC_O_USB_DRVVBUS_1P (MTK_PIN_NO(87) | 1)
+#define PINMUX_GPIO87__FUNC_I1_URXD1 (MTK_PIN_NO(87) | 2)
+#define PINMUX_GPIO87__FUNC_I1_UCTS2 (MTK_PIN_NO(87) | 3)
+#define PINMUX_GPIO87__FUNC_O_PWM_3 (MTK_PIN_NO(87) | 4)
+#define PINMUX_GPIO87__FUNC_B0_TP_GPIO5_AO (MTK_PIN_NO(87) | 5)
+#define PINMUX_GPIO87__FUNC_O_AUXIF_CLK0 (MTK_PIN_NO(87) | 6)
+#define PINMUX_GPIO87__FUNC_B0_DBG_MON_B5 (MTK_PIN_NO(87) | 7)
+
+#define PINMUX_GPIO88__FUNC_B_GPIO88 (MTK_PIN_NO(88) | 0)
+#define PINMUX_GPIO88__FUNC_I0_VBUSVALID_1P (MTK_PIN_NO(88) | 1)
+#define PINMUX_GPIO88__FUNC_O_UTXD2 (MTK_PIN_NO(88) | 2)
+#define PINMUX_GPIO88__FUNC_O_URTS1 (MTK_PIN_NO(88) | 3)
+#define PINMUX_GPIO88__FUNC_O_CLKM2 (MTK_PIN_NO(88) | 4)
+#define PINMUX_GPIO88__FUNC_B0_TP_GPIO6_AO (MTK_PIN_NO(88) | 5)
+#define PINMUX_GPIO88__FUNC_O_AUXIF_ST1 (MTK_PIN_NO(88) | 6)
+#define PINMUX_GPIO88__FUNC_B0_DBG_MON_B6 (MTK_PIN_NO(88) | 7)
+
+#define PINMUX_GPIO89__FUNC_B_GPIO89 (MTK_PIN_NO(89) | 0)
+#define PINMUX_GPIO89__FUNC_I1_IDDIG_2P (MTK_PIN_NO(89) | 1)
+#define PINMUX_GPIO89__FUNC_I1_URXD2 (MTK_PIN_NO(89) | 2)
+#define PINMUX_GPIO89__FUNC_I1_UCTS1 (MTK_PIN_NO(89) | 3)
+#define PINMUX_GPIO89__FUNC_O_CLKM3 (MTK_PIN_NO(89) | 4)
+#define PINMUX_GPIO89__FUNC_B0_TP_GPIO7_AO (MTK_PIN_NO(89) | 5)
+#define PINMUX_GPIO89__FUNC_O_AUXIF_CLK1 (MTK_PIN_NO(89) | 6)
+#define PINMUX_GPIO89__FUNC_B0_DBG_MON_B7 (MTK_PIN_NO(89) | 7)
+
+#define PINMUX_GPIO90__FUNC_B_GPIO90 (MTK_PIN_NO(90) | 0)
+#define PINMUX_GPIO90__FUNC_O_USB_DRVVBUS_2P (MTK_PIN_NO(90) | 1)
+#define PINMUX_GPIO90__FUNC_O_UTXD3 (MTK_PIN_NO(90) | 2)
+#define PINMUX_GPIO90__FUNC_O_ADSP_UTXD0 (MTK_PIN_NO(90) | 3)
+#define PINMUX_GPIO90__FUNC_O_SSPM_UTXD_AO (MTK_PIN_NO(90) | 4)
+#define PINMUX_GPIO90__FUNC_O_MD32_0_TXD (MTK_PIN_NO(90) | 5)
+#define PINMUX_GPIO90__FUNC_O_MD32_1_TXD (MTK_PIN_NO(90) | 6)
+#define PINMUX_GPIO90__FUNC_B0_DBG_MON_B8 (MTK_PIN_NO(90) | 7)
+
+#define PINMUX_GPIO91__FUNC_B_GPIO91 (MTK_PIN_NO(91) | 0)
+#define PINMUX_GPIO91__FUNC_I0_VBUSVALID_2P (MTK_PIN_NO(91) | 1)
+#define PINMUX_GPIO91__FUNC_I1_URXD3 (MTK_PIN_NO(91) | 2)
+#define PINMUX_GPIO91__FUNC_I1_ADSP_URXD0 (MTK_PIN_NO(91) | 3)
+#define PINMUX_GPIO91__FUNC_I1_SSPM_URXD_AO (MTK_PIN_NO(91) | 4)
+#define PINMUX_GPIO91__FUNC_I1_MD32_0_RXD (MTK_PIN_NO(91) | 5)
+#define PINMUX_GPIO91__FUNC_I1_MD32_1_RXD (MTK_PIN_NO(91) | 6)
+#define PINMUX_GPIO91__FUNC_B0_DBG_MON_B9 (MTK_PIN_NO(91) | 7)
+
+#define PINMUX_GPIO92__FUNC_B_GPIO92 (MTK_PIN_NO(92) | 0)
+#define PINMUX_GPIO92__FUNC_O_PWRAP_SPI0_CSN (MTK_PIN_NO(92) | 1)
+
+#define PINMUX_GPIO93__FUNC_B_GPIO93 (MTK_PIN_NO(93) | 0)
+#define PINMUX_GPIO93__FUNC_O_PWRAP_SPI0_CK (MTK_PIN_NO(93) | 1)
+
+#define PINMUX_GPIO94__FUNC_B_GPIO94 (MTK_PIN_NO(94) | 0)
+#define PINMUX_GPIO94__FUNC_B0_PWRAP_SPI0_MO (MTK_PIN_NO(94) | 1)
+#define PINMUX_GPIO94__FUNC_B0_PWRAP_SPI0_MI (MTK_PIN_NO(94) | 2)
+
+#define PINMUX_GPIO95__FUNC_B_GPIO95 (MTK_PIN_NO(95) | 0)
+#define PINMUX_GPIO95__FUNC_B0_PWRAP_SPI0_MI (MTK_PIN_NO(95) | 1)
+#define PINMUX_GPIO95__FUNC_B0_PWRAP_SPI0_MO (MTK_PIN_NO(95) | 2)
+
+#define PINMUX_GPIO96__FUNC_B_GPIO96 (MTK_PIN_NO(96) | 0)
+#define PINMUX_GPIO96__FUNC_O_SRCLKENA0 (MTK_PIN_NO(96) | 1)
+
+#define PINMUX_GPIO97__FUNC_B_GPIO97 (MTK_PIN_NO(97) | 0)
+#define PINMUX_GPIO97__FUNC_O_SRCLKENA1 (MTK_PIN_NO(97) | 1)
+
+#define PINMUX_GPIO98__FUNC_B_GPIO98 (MTK_PIN_NO(98) | 0)
+#define PINMUX_GPIO98__FUNC_O_SCP_VREQ_VAO (MTK_PIN_NO(98) | 1)
+#define PINMUX_GPIO98__FUNC_I0_DVFSRC_EXT_REQ (MTK_PIN_NO(98) | 2)
+
+#define PINMUX_GPIO99__FUNC_B_GPIO99 (MTK_PIN_NO(99) | 0)
+#define PINMUX_GPIO99__FUNC_I0_RTC32K_CK (MTK_PIN_NO(99) | 1)
+
+#define PINMUX_GPIO100__FUNC_B_GPIO100 (MTK_PIN_NO(100) | 0)
+#define PINMUX_GPIO100__FUNC_O_WATCHDOG (MTK_PIN_NO(100) | 1)
+
+#define PINMUX_GPIO101__FUNC_B_GPIO101 (MTK_PIN_NO(101) | 0)
+#define PINMUX_GPIO101__FUNC_O_AUD_CLK_MOSI (MTK_PIN_NO(101) | 1)
+#define PINMUX_GPIO101__FUNC_O_I2SO1_MCK (MTK_PIN_NO(101) | 2)
+#define PINMUX_GPIO101__FUNC_B0_I2SIN_BCK (MTK_PIN_NO(101) | 3)
+
+#define PINMUX_GPIO102__FUNC_B_GPIO102 (MTK_PIN_NO(102) | 0)
+#define PINMUX_GPIO102__FUNC_O_AUD_SYNC_MOSI (MTK_PIN_NO(102) | 1)
+#define PINMUX_GPIO102__FUNC_O_I2SO1_BCK (MTK_PIN_NO(102) | 2)
+#define PINMUX_GPIO102__FUNC_B0_I2SIN_WS (MTK_PIN_NO(102) | 3)
+
+#define PINMUX_GPIO103__FUNC_B_GPIO103 (MTK_PIN_NO(103) | 0)
+#define PINMUX_GPIO103__FUNC_O_AUD_DAT_MOSI0 (MTK_PIN_NO(103) | 1)
+#define PINMUX_GPIO103__FUNC_O_I2SO1_WS (MTK_PIN_NO(103) | 2)
+#define PINMUX_GPIO103__FUNC_I0_I2SIN_D0 (MTK_PIN_NO(103) | 3)
+
+#define PINMUX_GPIO104__FUNC_B_GPIO104 (MTK_PIN_NO(104) | 0)
+#define PINMUX_GPIO104__FUNC_O_AUD_DAT_MOSI1 (MTK_PIN_NO(104) | 1)
+#define PINMUX_GPIO104__FUNC_O_I2SO1_D0 (MTK_PIN_NO(104) | 2)
+#define PINMUX_GPIO104__FUNC_I0_I2SIN_D1 (MTK_PIN_NO(104) | 3)
+
+#define PINMUX_GPIO105__FUNC_B_GPIO105 (MTK_PIN_NO(105) | 0)
+#define PINMUX_GPIO105__FUNC_I0_AUD_DAT_MISO0 (MTK_PIN_NO(105) | 1)
+#define PINMUX_GPIO105__FUNC_I0_VOW_DAT_MISO (MTK_PIN_NO(105) | 2)
+#define PINMUX_GPIO105__FUNC_I0_I2SIN_D2 (MTK_PIN_NO(105) | 3)
+
+#define PINMUX_GPIO106__FUNC_B_GPIO106 (MTK_PIN_NO(106) | 0)
+#define PINMUX_GPIO106__FUNC_I0_AUD_DAT_MISO1 (MTK_PIN_NO(106) | 1)
+#define PINMUX_GPIO106__FUNC_I0_VOW_CLK_MISO (MTK_PIN_NO(106) | 2)
+#define PINMUX_GPIO106__FUNC_I0_I2SIN_D3 (MTK_PIN_NO(106) | 3)
+
+#define PINMUX_GPIO107__FUNC_B_GPIO107 (MTK_PIN_NO(107) | 0)
+#define PINMUX_GPIO107__FUNC_B0_I2SIN_MCK (MTK_PIN_NO(107) | 1)
+#define PINMUX_GPIO107__FUNC_I0_SPLIN_MCK (MTK_PIN_NO(107) | 2)
+#define PINMUX_GPIO107__FUNC_I0_SPDIF_IN0 (MTK_PIN_NO(107) | 3)
+#define PINMUX_GPIO107__FUNC_O_CMVREF4 (MTK_PIN_NO(107) | 4)
+#define PINMUX_GPIO107__FUNC_O_AUXIF_ST0 (MTK_PIN_NO(107) | 5)
+#define PINMUX_GPIO107__FUNC_O_PGD_LV_LSC_PWR0 (MTK_PIN_NO(107) | 6)
+
+#define PINMUX_GPIO108__FUNC_B_GPIO108 (MTK_PIN_NO(108) | 0)
+#define PINMUX_GPIO108__FUNC_B0_I2SIN_BCK (MTK_PIN_NO(108) | 1)
+#define PINMUX_GPIO108__FUNC_I0_SPLIN_LRCK (MTK_PIN_NO(108) | 2)
+#define PINMUX_GPIO108__FUNC_O_DMIC4_CLK (MTK_PIN_NO(108) | 3)
+#define PINMUX_GPIO108__FUNC_O_CMVREF5 (MTK_PIN_NO(108) | 4)
+#define PINMUX_GPIO108__FUNC_O_AUXIF_CLK0 (MTK_PIN_NO(108) | 5)
+#define PINMUX_GPIO108__FUNC_O_PGD_LV_LSC_PWR1 (MTK_PIN_NO(108) | 6)
+#define PINMUX_GPIO108__FUNC_B0_DBG_MON_B10 (MTK_PIN_NO(108) | 7)
+
+#define PINMUX_GPIO109__FUNC_B_GPIO109 (MTK_PIN_NO(109) | 0)
+#define PINMUX_GPIO109__FUNC_B0_I2SIN_WS (MTK_PIN_NO(109) | 1)
+#define PINMUX_GPIO109__FUNC_I0_SPLIN_BCK (MTK_PIN_NO(109) | 2)
+#define PINMUX_GPIO109__FUNC_I0_DMIC4_DAT (MTK_PIN_NO(109) | 3)
+#define PINMUX_GPIO109__FUNC_O_CMVREF6 (MTK_PIN_NO(109) | 4)
+#define PINMUX_GPIO109__FUNC_O_AUXIF_ST1 (MTK_PIN_NO(109) | 5)
+#define PINMUX_GPIO109__FUNC_O_PGD_LV_LSC_PWR2 (MTK_PIN_NO(109) | 6)
+#define PINMUX_GPIO109__FUNC_B0_DBG_MON_B11 (MTK_PIN_NO(109) | 7)
+
+#define PINMUX_GPIO110__FUNC_B_GPIO110 (MTK_PIN_NO(110) | 0)
+#define PINMUX_GPIO110__FUNC_I0_I2SIN_D0 (MTK_PIN_NO(110) | 1)
+#define PINMUX_GPIO110__FUNC_I0_SPLIN_D0 (MTK_PIN_NO(110) | 2)
+#define PINMUX_GPIO110__FUNC_I0_DMIC4_DAT_R (MTK_PIN_NO(110) | 3)
+#define PINMUX_GPIO110__FUNC_O_CMVREF7 (MTK_PIN_NO(110) | 4)
+#define PINMUX_GPIO110__FUNC_O_AUXIF_CLK1 (MTK_PIN_NO(110) | 5)
+#define PINMUX_GPIO110__FUNC_O_PGD_LV_LSC_PWR3 (MTK_PIN_NO(110) | 6)
+#define PINMUX_GPIO110__FUNC_B0_DBG_MON_B12 (MTK_PIN_NO(110) | 7)
+
+#define PINMUX_GPIO111__FUNC_B_GPIO111 (MTK_PIN_NO(111) | 0)
+#define PINMUX_GPIO111__FUNC_I0_I2SIN_D1 (MTK_PIN_NO(111) | 1)
+#define PINMUX_GPIO111__FUNC_I0_SPLIN_D1 (MTK_PIN_NO(111) | 2)
+#define PINMUX_GPIO111__FUNC_O_DMIC3_CLK (MTK_PIN_NO(111) | 3)
+#define PINMUX_GPIO111__FUNC_O_SPDIF_OUT (MTK_PIN_NO(111) | 4)
+#define PINMUX_GPIO111__FUNC_O_PGD_LV_LSC_PWR4 (MTK_PIN_NO(111) | 6)
+#define PINMUX_GPIO111__FUNC_B0_DBG_MON_B13 (MTK_PIN_NO(111) | 7)
+
+#define PINMUX_GPIO112__FUNC_B_GPIO112 (MTK_PIN_NO(112) | 0)
+#define PINMUX_GPIO112__FUNC_I0_I2SIN_D2 (MTK_PIN_NO(112) | 1)
+#define PINMUX_GPIO112__FUNC_I0_SPLIN_D2 (MTK_PIN_NO(112) | 2)
+#define PINMUX_GPIO112__FUNC_I0_DMIC3_DAT (MTK_PIN_NO(112) | 3)
+#define PINMUX_GPIO112__FUNC_B0_TDMIN_MCK (MTK_PIN_NO(112) | 4)
+#define PINMUX_GPIO112__FUNC_O_I2SO1_WS (MTK_PIN_NO(112) | 5)
+#define PINMUX_GPIO112__FUNC_O_PGD_LV_LSC_PWR5 (MTK_PIN_NO(112) | 6)
+#define PINMUX_GPIO112__FUNC_B0_DBG_MON_B14 (MTK_PIN_NO(112) | 7)
+
+#define PINMUX_GPIO113__FUNC_B_GPIO113 (MTK_PIN_NO(113) | 0)
+#define PINMUX_GPIO113__FUNC_I0_I2SIN_D3 (MTK_PIN_NO(113) | 1)
+#define PINMUX_GPIO113__FUNC_I0_SPLIN_D3 (MTK_PIN_NO(113) | 2)
+#define PINMUX_GPIO113__FUNC_I0_DMIC3_DAT_R (MTK_PIN_NO(113) | 3)
+#define PINMUX_GPIO113__FUNC_B0_TDMIN_BCK (MTK_PIN_NO(113) | 4)
+#define PINMUX_GPIO113__FUNC_O_I2SO1_D0 (MTK_PIN_NO(113) | 5)
+#define PINMUX_GPIO113__FUNC_B0_DBG_MON_B15 (MTK_PIN_NO(113) | 7)
+
+#define PINMUX_GPIO114__FUNC_B_GPIO114 (MTK_PIN_NO(114) | 0)
+#define PINMUX_GPIO114__FUNC_O_I2SO2_MCK (MTK_PIN_NO(114) | 1)
+#define PINMUX_GPIO114__FUNC_B0_I2SIN_MCK (MTK_PIN_NO(114) | 2)
+#define PINMUX_GPIO114__FUNC_I1_MCUPM_JTAG_TMS (MTK_PIN_NO(114) | 3)
+#define PINMUX_GPIO114__FUNC_B1_APU_JTAG_TMS (MTK_PIN_NO(114) | 4)
+#define PINMUX_GPIO114__FUNC_I1_SCP_JTAG1_TMS (MTK_PIN_NO(114) | 5)
+#define PINMUX_GPIO114__FUNC_I1_SPM_JTAG_TMS (MTK_PIN_NO(114) | 6)
+#define PINMUX_GPIO114__FUNC_B0_DBG_MON_B16 (MTK_PIN_NO(114) | 7)
+
+#define PINMUX_GPIO115__FUNC_B_GPIO115 (MTK_PIN_NO(115) | 0)
+#define PINMUX_GPIO115__FUNC_B0_I2SO2_BCK (MTK_PIN_NO(115) | 1)
+#define PINMUX_GPIO115__FUNC_B0_I2SIN_BCK (MTK_PIN_NO(115) | 2)
+#define PINMUX_GPIO115__FUNC_I1_MCUPM_JTAG_TCK (MTK_PIN_NO(115) | 3)
+#define PINMUX_GPIO115__FUNC_I0_APU_JTAG_TCK (MTK_PIN_NO(115) | 4)
+#define PINMUX_GPIO115__FUNC_I1_SCP_JTAG1_TCK (MTK_PIN_NO(115) | 5)
+#define PINMUX_GPIO115__FUNC_I1_SPM_JTAG_TCK (MTK_PIN_NO(115) | 6)
+#define PINMUX_GPIO115__FUNC_B0_DBG_MON_B17 (MTK_PIN_NO(115) | 7)
+
+#define PINMUX_GPIO116__FUNC_B_GPIO116 (MTK_PIN_NO(116) | 0)
+#define PINMUX_GPIO116__FUNC_B0_I2SO2_WS (MTK_PIN_NO(116) | 1)
+#define PINMUX_GPIO116__FUNC_B0_I2SIN_WS (MTK_PIN_NO(116) | 2)
+#define PINMUX_GPIO116__FUNC_I1_MCUPM_JTAG_TDI (MTK_PIN_NO(116) | 3)
+#define PINMUX_GPIO116__FUNC_I1_APU_JTAG_TDI (MTK_PIN_NO(116) | 4)
+#define PINMUX_GPIO116__FUNC_I1_SCP_JTAG1_TDI (MTK_PIN_NO(116) | 5)
+#define PINMUX_GPIO116__FUNC_I1_SPM_JTAG_TDI (MTK_PIN_NO(116) | 6)
+#define PINMUX_GPIO116__FUNC_B0_DBG_MON_B18 (MTK_PIN_NO(116) | 7)
+
+#define PINMUX_GPIO117__FUNC_B_GPIO117 (MTK_PIN_NO(117) | 0)
+#define PINMUX_GPIO117__FUNC_O_I2SO2_D0 (MTK_PIN_NO(117) | 1)
+#define PINMUX_GPIO117__FUNC_I0_I2SIN_D0 (MTK_PIN_NO(117) | 2)
+#define PINMUX_GPIO117__FUNC_O_MCUPM_JTAG_TDO (MTK_PIN_NO(117) | 3)
+#define PINMUX_GPIO117__FUNC_O_APU_JTAG_TDO (MTK_PIN_NO(117) | 4)
+#define PINMUX_GPIO117__FUNC_O_SCP_JTAG1_TDO (MTK_PIN_NO(117) | 5)
+#define PINMUX_GPIO117__FUNC_O_SPM_JTAG_TDO (MTK_PIN_NO(117) | 6)
+#define PINMUX_GPIO117__FUNC_B0_DBG_MON_B19 (MTK_PIN_NO(117) | 7)
+
+#define PINMUX_GPIO118__FUNC_B_GPIO118 (MTK_PIN_NO(118) | 0)
+#define PINMUX_GPIO118__FUNC_O_I2SO2_D1 (MTK_PIN_NO(118) | 1)
+#define PINMUX_GPIO118__FUNC_I0_I2SIN_D1 (MTK_PIN_NO(118) | 2)
+#define PINMUX_GPIO118__FUNC_I0_MCUPM_JTAG_TRSTN (MTK_PIN_NO(118) | 3)
+#define PINMUX_GPIO118__FUNC_I0_APU_JTAG_TRST (MTK_PIN_NO(118) | 4)
+#define PINMUX_GPIO118__FUNC_I0_SCP_JTAG1_TRSTN (MTK_PIN_NO(118) | 5)
+#define PINMUX_GPIO118__FUNC_I0_SPM_JTAG_TRSTN (MTK_PIN_NO(118) | 6)
+#define PINMUX_GPIO118__FUNC_B0_DBG_MON_B20 (MTK_PIN_NO(118) | 7)
+
+#define PINMUX_GPIO119__FUNC_B_GPIO119 (MTK_PIN_NO(119) | 0)
+#define PINMUX_GPIO119__FUNC_O_I2SO2_D2 (MTK_PIN_NO(119) | 1)
+#define PINMUX_GPIO119__FUNC_I0_I2SIN_D2 (MTK_PIN_NO(119) | 2)
+#define PINMUX_GPIO119__FUNC_O_UTXD3 (MTK_PIN_NO(119) | 3)
+#define PINMUX_GPIO119__FUNC_B0_TDMIN_LRCK (MTK_PIN_NO(119) | 4)
+#define PINMUX_GPIO119__FUNC_O_I2SO1_MCK (MTK_PIN_NO(119) | 5)
+#define PINMUX_GPIO119__FUNC_O_SSPM_UTXD_AO (MTK_PIN_NO(119) | 6)
+#define PINMUX_GPIO119__FUNC_B0_DBG_MON_B21 (MTK_PIN_NO(119) | 7)
+
+#define PINMUX_GPIO120__FUNC_B_GPIO120 (MTK_PIN_NO(120) | 0)
+#define PINMUX_GPIO120__FUNC_O_I2SO2_D3 (MTK_PIN_NO(120) | 1)
+#define PINMUX_GPIO120__FUNC_I0_I2SIN_D3 (MTK_PIN_NO(120) | 2)
+#define PINMUX_GPIO120__FUNC_I1_URXD3 (MTK_PIN_NO(120) | 3)
+#define PINMUX_GPIO120__FUNC_I0_TDMIN_DI (MTK_PIN_NO(120) | 4)
+#define PINMUX_GPIO120__FUNC_O_I2SO1_BCK (MTK_PIN_NO(120) | 5)
+#define PINMUX_GPIO120__FUNC_I1_SSPM_URXD_AO (MTK_PIN_NO(120) | 6)
+#define PINMUX_GPIO120__FUNC_B0_DBG_MON_B22 (MTK_PIN_NO(120) | 7)
+
+#define PINMUX_GPIO121__FUNC_B_GPIO121 (MTK_PIN_NO(121) | 0)
+#define PINMUX_GPIO121__FUNC_B0_PCM_CLK (MTK_PIN_NO(121) | 1)
+#define PINMUX_GPIO121__FUNC_O_SPIM4_CSB (MTK_PIN_NO(121) | 2)
+#define PINMUX_GPIO121__FUNC_O_SCP_SPI1_B_CS (MTK_PIN_NO(121) | 3)
+#define PINMUX_GPIO121__FUNC_O_TP_UTXD2_AO (MTK_PIN_NO(121) | 4)
+#define PINMUX_GPIO121__FUNC_O_AUXIF_ST0 (MTK_PIN_NO(121) | 5)
+#define PINMUX_GPIO121__FUNC_O_PGD_DA_EFUSE_RDY (MTK_PIN_NO(121) | 6)
+#define PINMUX_GPIO121__FUNC_B0_DBG_MON_B23 (MTK_PIN_NO(121) | 7)
+
+#define PINMUX_GPIO122__FUNC_B_GPIO122 (MTK_PIN_NO(122) | 0)
+#define PINMUX_GPIO122__FUNC_B0_PCM_SYNC (MTK_PIN_NO(122) | 1)
+#define PINMUX_GPIO122__FUNC_O_SPIM4_CLK (MTK_PIN_NO(122) | 2)
+#define PINMUX_GPIO122__FUNC_O_SCP_SPI1_B_CK (MTK_PIN_NO(122) | 3)
+#define PINMUX_GPIO122__FUNC_I1_TP_URXD2_AO (MTK_PIN_NO(122) | 4)
+#define PINMUX_GPIO122__FUNC_O_AUXIF_CLK0 (MTK_PIN_NO(122) | 5)
+#define PINMUX_GPIO122__FUNC_O_PGD_DA_EFUSE_RDY_PRE (MTK_PIN_NO(122) | 6)
+#define PINMUX_GPIO122__FUNC_B0_DBG_MON_B24 (MTK_PIN_NO(122) | 7)
+
+#define PINMUX_GPIO123__FUNC_B_GPIO123 (MTK_PIN_NO(123) | 0)
+#define PINMUX_GPIO123__FUNC_O_PCM_DO (MTK_PIN_NO(123) | 1)
+#define PINMUX_GPIO123__FUNC_B0_SPIM4_MOSI (MTK_PIN_NO(123) | 2)
+#define PINMUX_GPIO123__FUNC_O_SCP_SPI1_B_MO (MTK_PIN_NO(123) | 3)
+#define PINMUX_GPIO123__FUNC_O_TP_URTS2_AO (MTK_PIN_NO(123) | 4)
+#define PINMUX_GPIO123__FUNC_O_AUXIF_ST1 (MTK_PIN_NO(123) | 5)
+#define PINMUX_GPIO123__FUNC_O_PGD_DA_PWRGD_RESET (MTK_PIN_NO(123) | 6)
+#define PINMUX_GPIO123__FUNC_B0_DBG_MON_B25 (MTK_PIN_NO(123) | 7)
+
+#define PINMUX_GPIO124__FUNC_B_GPIO124 (MTK_PIN_NO(124) | 0)
+#define PINMUX_GPIO124__FUNC_I0_PCM_DI (MTK_PIN_NO(124) | 1)
+#define PINMUX_GPIO124__FUNC_B0_SPIM4_MISO (MTK_PIN_NO(124) | 2)
+#define PINMUX_GPIO124__FUNC_I0_SCP_SPI1_B_MI (MTK_PIN_NO(124) | 3)
+#define PINMUX_GPIO124__FUNC_I1_TP_UCTS2_AO (MTK_PIN_NO(124) | 4)
+#define PINMUX_GPIO124__FUNC_O_AUXIF_CLK1 (MTK_PIN_NO(124) | 5)
+#define PINMUX_GPIO124__FUNC_O_PGD_DA_PWRGD_ENB (MTK_PIN_NO(124) | 6)
+#define PINMUX_GPIO124__FUNC_B0_DBG_MON_B26 (MTK_PIN_NO(124) | 7)
+
+#define PINMUX_GPIO125__FUNC_B_GPIO125 (MTK_PIN_NO(125) | 0)
+#define PINMUX_GPIO125__FUNC_O_DMIC1_CLK (MTK_PIN_NO(125) | 1)
+#define PINMUX_GPIO125__FUNC_O_SPINOR_CK (MTK_PIN_NO(125) | 2)
+#define PINMUX_GPIO125__FUNC_B0_TDMIN_MCK (MTK_PIN_NO(125) | 3)
+#define PINMUX_GPIO125__FUNC_O_LVTS_FOUT (MTK_PIN_NO(125) | 6)
+#define PINMUX_GPIO125__FUNC_B0_DBG_MON_B27 (MTK_PIN_NO(125) | 7)
+
+#define PINMUX_GPIO126__FUNC_B_GPIO126 (MTK_PIN_NO(126) | 0)
+#define PINMUX_GPIO126__FUNC_I0_DMIC1_DAT (MTK_PIN_NO(126) | 1)
+#define PINMUX_GPIO126__FUNC_O_SPINOR_CS (MTK_PIN_NO(126) | 2)
+#define PINMUX_GPIO126__FUNC_B0_TDMIN_BCK (MTK_PIN_NO(126) | 3)
+#define PINMUX_GPIO126__FUNC_O_LVTS_SDO (MTK_PIN_NO(126) | 6)
+#define PINMUX_GPIO126__FUNC_B0_DBG_MON_B28 (MTK_PIN_NO(126) | 7)
+
+#define PINMUX_GPIO127__FUNC_B_GPIO127 (MTK_PIN_NO(127) | 0)
+#define PINMUX_GPIO127__FUNC_I0_DMIC1_DAT_R (MTK_PIN_NO(127) | 1)
+#define PINMUX_GPIO127__FUNC_B0_SPINOR_IO0 (MTK_PIN_NO(127) | 2)
+#define PINMUX_GPIO127__FUNC_B0_TDMIN_LRCK (MTK_PIN_NO(127) | 3)
+#define PINMUX_GPIO127__FUNC_I0_LVTS_26M (MTK_PIN_NO(127) | 6)
+#define PINMUX_GPIO127__FUNC_B0_DBG_MON_B29 (MTK_PIN_NO(127) | 7)
+
+#define PINMUX_GPIO128__FUNC_B_GPIO128 (MTK_PIN_NO(128) | 0)
+#define PINMUX_GPIO128__FUNC_O_DMIC2_CLK (MTK_PIN_NO(128) | 1)
+#define PINMUX_GPIO128__FUNC_B0_SPINOR_IO1 (MTK_PIN_NO(128) | 2)
+#define PINMUX_GPIO128__FUNC_I0_TDMIN_DI (MTK_PIN_NO(128) | 3)
+#define PINMUX_GPIO128__FUNC_I0_LVTS_SCF (MTK_PIN_NO(128) | 6)
+#define PINMUX_GPIO128__FUNC_B0_DBG_MON_B30 (MTK_PIN_NO(128) | 7)
+
+#define PINMUX_GPIO129__FUNC_B_GPIO129 (MTK_PIN_NO(129) | 0)
+#define PINMUX_GPIO129__FUNC_I0_DMIC2_DAT (MTK_PIN_NO(129) | 1)
+#define PINMUX_GPIO129__FUNC_B0_SPINOR_IO2 (MTK_PIN_NO(129) | 2)
+#define PINMUX_GPIO129__FUNC_I0_SPDIF_IN1 (MTK_PIN_NO(129) | 3)
+#define PINMUX_GPIO129__FUNC_I0_LVTS_SCK (MTK_PIN_NO(129) | 6)
+#define PINMUX_GPIO129__FUNC_B0_DBG_MON_B31 (MTK_PIN_NO(129) | 7)
+
+#define PINMUX_GPIO130__FUNC_B_GPIO130 (MTK_PIN_NO(130) | 0)
+#define PINMUX_GPIO130__FUNC_I0_DMIC2_DAT_R (MTK_PIN_NO(130) | 1)
+#define PINMUX_GPIO130__FUNC_B0_SPINOR_IO3 (MTK_PIN_NO(130) | 2)
+#define PINMUX_GPIO130__FUNC_I0_SPDIF_IN2 (MTK_PIN_NO(130) | 3)
+#define PINMUX_GPIO130__FUNC_I0_LVTS_SDI (MTK_PIN_NO(130) | 6)
+#define PINMUX_GPIO130__FUNC_B0_DBG_MON_B32 (MTK_PIN_NO(130) | 7)
+
+#define PINMUX_GPIO131__FUNC_B_GPIO131 (MTK_PIN_NO(131) | 0)
+#define PINMUX_GPIO131__FUNC_O_DPI_D0 (MTK_PIN_NO(131) | 1)
+#define PINMUX_GPIO131__FUNC_O_GBE_TXD3 (MTK_PIN_NO(131) | 2)
+#define PINMUX_GPIO131__FUNC_O_DMIC1_CLK (MTK_PIN_NO(131) | 3)
+#define PINMUX_GPIO131__FUNC_O_I2SO2_MCK (MTK_PIN_NO(131) | 4)
+#define PINMUX_GPIO131__FUNC_B0_TP_GPIO0_AO (MTK_PIN_NO(131) | 5)
+#define PINMUX_GPIO131__FUNC_O_SPIM5_CSB (MTK_PIN_NO(131) | 6)
+#define PINMUX_GPIO131__FUNC_O_PGD_LV_HSC_PWR0 (MTK_PIN_NO(131) | 7)
+
+#define PINMUX_GPIO132__FUNC_B_GPIO132 (MTK_PIN_NO(132) | 0)
+#define PINMUX_GPIO132__FUNC_O_DPI_D1 (MTK_PIN_NO(132) | 1)
+#define PINMUX_GPIO132__FUNC_O_GBE_TXD2 (MTK_PIN_NO(132) | 2)
+#define PINMUX_GPIO132__FUNC_I0_DMIC1_DAT (MTK_PIN_NO(132) | 3)
+#define PINMUX_GPIO132__FUNC_B0_I2SO2_BCK (MTK_PIN_NO(132) | 4)
+#define PINMUX_GPIO132__FUNC_B0_TP_GPIO1_AO (MTK_PIN_NO(132) | 5)
+#define PINMUX_GPIO132__FUNC_O_SPIM5_CLK (MTK_PIN_NO(132) | 6)
+#define PINMUX_GPIO132__FUNC_O_PGD_LV_HSC_PWR1 (MTK_PIN_NO(132) | 7)
+
+#define PINMUX_GPIO133__FUNC_B_GPIO133 (MTK_PIN_NO(133) | 0)
+#define PINMUX_GPIO133__FUNC_O_DPI_D2 (MTK_PIN_NO(133) | 1)
+#define PINMUX_GPIO133__FUNC_O_GBE_TXD1 (MTK_PIN_NO(133) | 2)
+#define PINMUX_GPIO133__FUNC_I0_DMIC1_DAT_R (MTK_PIN_NO(133) | 3)
+#define PINMUX_GPIO133__FUNC_B0_I2SO2_WS (MTK_PIN_NO(133) | 4)
+#define PINMUX_GPIO133__FUNC_B0_TP_GPIO2_AO (MTK_PIN_NO(133) | 5)
+#define PINMUX_GPIO133__FUNC_B0_SPIM5_MOSI (MTK_PIN_NO(133) | 6)
+#define PINMUX_GPIO133__FUNC_O_PGD_LV_HSC_PWR2 (MTK_PIN_NO(133) | 7)
+
+#define PINMUX_GPIO134__FUNC_B_GPIO134 (MTK_PIN_NO(134) | 0)
+#define PINMUX_GPIO134__FUNC_O_DPI_D3 (MTK_PIN_NO(134) | 1)
+#define PINMUX_GPIO134__FUNC_O_GBE_TXD0 (MTK_PIN_NO(134) | 2)
+#define PINMUX_GPIO134__FUNC_O_DMIC2_CLK (MTK_PIN_NO(134) | 3)
+#define PINMUX_GPIO134__FUNC_O_I2SO2_D0 (MTK_PIN_NO(134) | 4)
+#define PINMUX_GPIO134__FUNC_B0_TP_GPIO3_AO (MTK_PIN_NO(134) | 5)
+#define PINMUX_GPIO134__FUNC_B0_SPIM5_MISO (MTK_PIN_NO(134) | 6)
+#define PINMUX_GPIO134__FUNC_O_PGD_LV_HSC_PWR3 (MTK_PIN_NO(134) | 7)
+
+#define PINMUX_GPIO135__FUNC_B_GPIO135 (MTK_PIN_NO(135) | 0)
+#define PINMUX_GPIO135__FUNC_O_DPI_D4 (MTK_PIN_NO(135) | 1)
+#define PINMUX_GPIO135__FUNC_I0_GBE_RXD3 (MTK_PIN_NO(135) | 2)
+#define PINMUX_GPIO135__FUNC_I0_DMIC2_DAT (MTK_PIN_NO(135) | 3)
+#define PINMUX_GPIO135__FUNC_O_I2SO2_D1 (MTK_PIN_NO(135) | 4)
+#define PINMUX_GPIO135__FUNC_B0_TP_GPIO4_AO (MTK_PIN_NO(135) | 5)
+#define PINMUX_GPIO135__FUNC_I1_WAKEN (MTK_PIN_NO(135) | 6)
+#define PINMUX_GPIO135__FUNC_O_PGD_LV_HSC_PWR4 (MTK_PIN_NO(135) | 7)
+
+#define PINMUX_GPIO136__FUNC_B_GPIO136 (MTK_PIN_NO(136) | 0)
+#define PINMUX_GPIO136__FUNC_O_DPI_D5 (MTK_PIN_NO(136) | 1)
+#define PINMUX_GPIO136__FUNC_I0_GBE_RXD2 (MTK_PIN_NO(136) | 2)
+#define PINMUX_GPIO136__FUNC_I0_DMIC2_DAT_R (MTK_PIN_NO(136) | 3)
+#define PINMUX_GPIO136__FUNC_O_I2SO2_D2 (MTK_PIN_NO(136) | 4)
+#define PINMUX_GPIO136__FUNC_B0_TP_GPIO5_AO (MTK_PIN_NO(136) | 5)
+#define PINMUX_GPIO136__FUNC_O_PERSTN (MTK_PIN_NO(136) | 6)
+#define PINMUX_GPIO136__FUNC_O_PGD_LV_HSC_PWR5 (MTK_PIN_NO(136) | 7)
+
+#define PINMUX_GPIO137__FUNC_B_GPIO137 (MTK_PIN_NO(137) | 0)
+#define PINMUX_GPIO137__FUNC_O_DPI_D6 (MTK_PIN_NO(137) | 1)
+#define PINMUX_GPIO137__FUNC_I0_GBE_RXD1 (MTK_PIN_NO(137) | 2)
+#define PINMUX_GPIO137__FUNC_O_DMIC3_CLK (MTK_PIN_NO(137) | 3)
+#define PINMUX_GPIO137__FUNC_O_I2SO2_D3 (MTK_PIN_NO(137) | 4)
+#define PINMUX_GPIO137__FUNC_B0_TP_GPIO6_AO (MTK_PIN_NO(137) | 5)
+#define PINMUX_GPIO137__FUNC_B1_CLKREQN (MTK_PIN_NO(137) | 6)
+#define PINMUX_GPIO137__FUNC_O_PWM_0 (MTK_PIN_NO(137) | 7)
+
+#define PINMUX_GPIO138__FUNC_B_GPIO138 (MTK_PIN_NO(138) | 0)
+#define PINMUX_GPIO138__FUNC_O_DPI_D7 (MTK_PIN_NO(138) | 1)
+#define PINMUX_GPIO138__FUNC_I0_GBE_RXD0 (MTK_PIN_NO(138) | 2)
+#define PINMUX_GPIO138__FUNC_I0_DMIC3_DAT (MTK_PIN_NO(138) | 3)
+#define PINMUX_GPIO138__FUNC_O_CLKM2 (MTK_PIN_NO(138) | 4)
+#define PINMUX_GPIO138__FUNC_B0_TP_GPIO7_AO (MTK_PIN_NO(138) | 5)
+#define PINMUX_GPIO138__FUNC_B0_MD32_0_GPIO0 (MTK_PIN_NO(138) | 7)
+
+#define PINMUX_GPIO139__FUNC_B_GPIO139 (MTK_PIN_NO(139) | 0)
+#define PINMUX_GPIO139__FUNC_O_DPI_D8 (MTK_PIN_NO(139) | 1)
+#define PINMUX_GPIO139__FUNC_B0_GBE_TXC (MTK_PIN_NO(139) | 2)
+#define PINMUX_GPIO139__FUNC_I0_DMIC3_DAT_R (MTK_PIN_NO(139) | 3)
+#define PINMUX_GPIO139__FUNC_O_CLKM3 (MTK_PIN_NO(139) | 4)
+#define PINMUX_GPIO139__FUNC_O_TP_UTXD2_AO (MTK_PIN_NO(139) | 5)
+#define PINMUX_GPIO139__FUNC_O_UTXD2 (MTK_PIN_NO(139) | 6)
+#define PINMUX_GPIO139__FUNC_B0_MD32_0_GPIO1 (MTK_PIN_NO(139) | 7)
+
+#define PINMUX_GPIO140__FUNC_B_GPIO140 (MTK_PIN_NO(140) | 0)
+#define PINMUX_GPIO140__FUNC_O_DPI_D9 (MTK_PIN_NO(140) | 1)
+#define PINMUX_GPIO140__FUNC_I0_GBE_RXC (MTK_PIN_NO(140) | 2)
+#define PINMUX_GPIO140__FUNC_O_DMIC4_CLK (MTK_PIN_NO(140) | 3)
+#define PINMUX_GPIO140__FUNC_O_PWM_2 (MTK_PIN_NO(140) | 4)
+#define PINMUX_GPIO140__FUNC_I1_TP_URXD2_AO (MTK_PIN_NO(140) | 5)
+#define PINMUX_GPIO140__FUNC_I1_URXD2 (MTK_PIN_NO(140) | 6)
+#define PINMUX_GPIO140__FUNC_B0_MD32_0_GPIO2 (MTK_PIN_NO(140) | 7)
+
+#define PINMUX_GPIO141__FUNC_B_GPIO141 (MTK_PIN_NO(141) | 0)
+#define PINMUX_GPIO141__FUNC_O_DPI_D10 (MTK_PIN_NO(141) | 1)
+#define PINMUX_GPIO141__FUNC_I0_GBE_RXDV (MTK_PIN_NO(141) | 2)
+#define PINMUX_GPIO141__FUNC_I0_DMIC4_DAT (MTK_PIN_NO(141) | 3)
+#define PINMUX_GPIO141__FUNC_O_PWM_3 (MTK_PIN_NO(141) | 4)
+#define PINMUX_GPIO141__FUNC_O_TP_URTS2_AO (MTK_PIN_NO(141) | 5)
+#define PINMUX_GPIO141__FUNC_O_URTS2 (MTK_PIN_NO(141) | 6)
+#define PINMUX_GPIO141__FUNC_B0_MD32_1_GPIO0 (MTK_PIN_NO(141) | 7)
+
+#define PINMUX_GPIO142__FUNC_B_GPIO142 (MTK_PIN_NO(142) | 0)
+#define PINMUX_GPIO142__FUNC_O_DPI_D11 (MTK_PIN_NO(142) | 1)
+#define PINMUX_GPIO142__FUNC_O_GBE_TXEN (MTK_PIN_NO(142) | 2)
+#define PINMUX_GPIO142__FUNC_I0_DMIC4_DAT_R (MTK_PIN_NO(142) | 3)
+#define PINMUX_GPIO142__FUNC_O_PWM_1 (MTK_PIN_NO(142) | 4)
+#define PINMUX_GPIO142__FUNC_I1_TP_UCTS2_AO (MTK_PIN_NO(142) | 5)
+#define PINMUX_GPIO142__FUNC_I1_UCTS2 (MTK_PIN_NO(142) | 6)
+#define PINMUX_GPIO142__FUNC_B0_MD32_1_GPIO1 (MTK_PIN_NO(142) | 7)
+
+#define PINMUX_GPIO143__FUNC_B_GPIO143 (MTK_PIN_NO(143) | 0)
+#define PINMUX_GPIO143__FUNC_O_DPI_D12 (MTK_PIN_NO(143) | 1)
+#define PINMUX_GPIO143__FUNC_O_GBE_MDC (MTK_PIN_NO(143) | 2)
+#define PINMUX_GPIO143__FUNC_B0_MD32_0_GPIO0 (MTK_PIN_NO(143) | 3)
+#define PINMUX_GPIO143__FUNC_O_CLKM0 (MTK_PIN_NO(143) | 4)
+#define PINMUX_GPIO143__FUNC_O_SPIM3_CSB (MTK_PIN_NO(143) | 5)
+#define PINMUX_GPIO143__FUNC_O_UTXD1 (MTK_PIN_NO(143) | 6)
+#define PINMUX_GPIO143__FUNC_B0_MD32_1_GPIO2 (MTK_PIN_NO(143) | 7)
+
+#define PINMUX_GPIO144__FUNC_B_GPIO144 (MTK_PIN_NO(144) | 0)
+#define PINMUX_GPIO144__FUNC_O_DPI_D13 (MTK_PIN_NO(144) | 1)
+#define PINMUX_GPIO144__FUNC_B1_GBE_MDIO (MTK_PIN_NO(144) | 2)
+#define PINMUX_GPIO144__FUNC_B0_MD32_0_GPIO1 (MTK_PIN_NO(144) | 3)
+#define PINMUX_GPIO144__FUNC_O_CLKM1 (MTK_PIN_NO(144) | 4)
+#define PINMUX_GPIO144__FUNC_O_SPIM3_CLK (MTK_PIN_NO(144) | 5)
+#define PINMUX_GPIO144__FUNC_I1_URXD1 (MTK_PIN_NO(144) | 6)
+#define PINMUX_GPIO144__FUNC_O_PGD_HV_HSC_PWR0 (MTK_PIN_NO(144) | 7)
+
+#define PINMUX_GPIO145__FUNC_B_GPIO145 (MTK_PIN_NO(145) | 0)
+#define PINMUX_GPIO145__FUNC_O_DPI_D14 (MTK_PIN_NO(145) | 1)
+#define PINMUX_GPIO145__FUNC_O_GBE_TXER (MTK_PIN_NO(145) | 2)
+#define PINMUX_GPIO145__FUNC_B0_MD32_1_GPIO0 (MTK_PIN_NO(145) | 3)
+#define PINMUX_GPIO145__FUNC_O_CMFLASH0 (MTK_PIN_NO(145) | 4)
+#define PINMUX_GPIO145__FUNC_B0_SPIM3_MOSI (MTK_PIN_NO(145) | 5)
+#define PINMUX_GPIO145__FUNC_B0_GBE_AUX_PPS2 (MTK_PIN_NO(145) | 6)
+#define PINMUX_GPIO145__FUNC_O_PGD_HV_HSC_PWR1 (MTK_PIN_NO(145) | 7)
+
+#define PINMUX_GPIO146__FUNC_B_GPIO146 (MTK_PIN_NO(146) | 0)
+#define PINMUX_GPIO146__FUNC_O_DPI_D15 (MTK_PIN_NO(146) | 1)
+#define PINMUX_GPIO146__FUNC_I0_GBE_RXER (MTK_PIN_NO(146) | 2)
+#define PINMUX_GPIO146__FUNC_B0_MD32_1_GPIO1 (MTK_PIN_NO(146) | 3)
+#define PINMUX_GPIO146__FUNC_O_CMFLASH1 (MTK_PIN_NO(146) | 4)
+#define PINMUX_GPIO146__FUNC_B0_SPIM3_MISO (MTK_PIN_NO(146) | 5)
+#define PINMUX_GPIO146__FUNC_B0_GBE_AUX_PPS3 (MTK_PIN_NO(146) | 6)
+#define PINMUX_GPIO146__FUNC_O_PGD_HV_HSC_PWR2 (MTK_PIN_NO(146) | 7)
+
+#define PINMUX_GPIO147__FUNC_B_GPIO147 (MTK_PIN_NO(147) | 0)
+#define PINMUX_GPIO147__FUNC_O_DPI_HSYNC (MTK_PIN_NO(147) | 1)
+#define PINMUX_GPIO147__FUNC_I0_GBE_COL (MTK_PIN_NO(147) | 2)
+#define PINMUX_GPIO147__FUNC_O_I2SO1_MCK (MTK_PIN_NO(147) | 3)
+#define PINMUX_GPIO147__FUNC_O_CMVREF0 (MTK_PIN_NO(147) | 4)
+#define PINMUX_GPIO147__FUNC_O_SPDIF_OUT (MTK_PIN_NO(147) | 5)
+#define PINMUX_GPIO147__FUNC_O_URTS1 (MTK_PIN_NO(147) | 6)
+#define PINMUX_GPIO147__FUNC_O_PGD_HV_HSC_PWR3 (MTK_PIN_NO(147) | 7)
+
+#define PINMUX_GPIO148__FUNC_B_GPIO148 (MTK_PIN_NO(148) | 0)
+#define PINMUX_GPIO148__FUNC_O_DPI_VSYNC (MTK_PIN_NO(148) | 1)
+#define PINMUX_GPIO148__FUNC_I0_GBE_INTR (MTK_PIN_NO(148) | 2)
+#define PINMUX_GPIO148__FUNC_O_I2SO1_BCK (MTK_PIN_NO(148) | 3)
+#define PINMUX_GPIO148__FUNC_O_CMVREF1 (MTK_PIN_NO(148) | 4)
+#define PINMUX_GPIO148__FUNC_I0_SPDIF_IN0 (MTK_PIN_NO(148) | 5)
+#define PINMUX_GPIO148__FUNC_I1_UCTS1 (MTK_PIN_NO(148) | 6)
+#define PINMUX_GPIO148__FUNC_O_PGD_HV_HSC_PWR4 (MTK_PIN_NO(148) | 7)
+
+#define PINMUX_GPIO149__FUNC_B_GPIO149 (MTK_PIN_NO(149) | 0)
+#define PINMUX_GPIO149__FUNC_O_DPI_DE (MTK_PIN_NO(149) | 1)
+#define PINMUX_GPIO149__FUNC_B0_GBE_AUX_PPS0 (MTK_PIN_NO(149) | 2)
+#define PINMUX_GPIO149__FUNC_O_I2SO1_WS (MTK_PIN_NO(149) | 3)
+#define PINMUX_GPIO149__FUNC_O_CMVREF2 (MTK_PIN_NO(149) | 4)
+#define PINMUX_GPIO149__FUNC_I0_SPDIF_IN1 (MTK_PIN_NO(149) | 5)
+#define PINMUX_GPIO149__FUNC_O_UTXD3 (MTK_PIN_NO(149) | 6)
+#define PINMUX_GPIO149__FUNC_O_PGD_HV_HSC_PWR5 (MTK_PIN_NO(149) | 7)
+
+#define PINMUX_GPIO150__FUNC_B_GPIO150 (MTK_PIN_NO(150) | 0)
+#define PINMUX_GPIO150__FUNC_O_DPI_CK (MTK_PIN_NO(150) | 1)
+#define PINMUX_GPIO150__FUNC_B0_GBE_AUX_PPS1 (MTK_PIN_NO(150) | 2)
+#define PINMUX_GPIO150__FUNC_O_I2SO1_D0 (MTK_PIN_NO(150) | 3)
+#define PINMUX_GPIO150__FUNC_O_CMVREF3 (MTK_PIN_NO(150) | 4)
+#define PINMUX_GPIO150__FUNC_I0_SPDIF_IN2 (MTK_PIN_NO(150) | 5)
+#define PINMUX_GPIO150__FUNC_I1_URXD3 (MTK_PIN_NO(150) | 6)
+
+#define PINMUX_GPIO151__FUNC_B_GPIO151 (MTK_PIN_NO(151) | 0)
+#define PINMUX_GPIO151__FUNC_B1_MSDC0_DAT7 (MTK_PIN_NO(151) | 1)
+
+#define PINMUX_GPIO152__FUNC_B_GPIO152 (MTK_PIN_NO(152) | 0)
+#define PINMUX_GPIO152__FUNC_B1_MSDC0_DAT6 (MTK_PIN_NO(152) | 1)
+
+#define PINMUX_GPIO153__FUNC_B_GPIO153 (MTK_PIN_NO(153) | 0)
+#define PINMUX_GPIO153__FUNC_B1_MSDC0_DAT5 (MTK_PIN_NO(153) | 1)
+
+#define PINMUX_GPIO154__FUNC_B_GPIO154 (MTK_PIN_NO(154) | 0)
+#define PINMUX_GPIO154__FUNC_B1_MSDC0_DAT4 (MTK_PIN_NO(154) | 1)
+
+#define PINMUX_GPIO155__FUNC_B_GPIO155 (MTK_PIN_NO(155) | 0)
+#define PINMUX_GPIO155__FUNC_O_MSDC0_RSTB (MTK_PIN_NO(155) | 1)
+
+#define PINMUX_GPIO156__FUNC_B_GPIO156 (MTK_PIN_NO(156) | 0)
+#define PINMUX_GPIO156__FUNC_B1_MSDC0_CMD (MTK_PIN_NO(156) | 1)
+
+#define PINMUX_GPIO157__FUNC_B_GPIO157 (MTK_PIN_NO(157) | 0)
+#define PINMUX_GPIO157__FUNC_B1_MSDC0_CLK (MTK_PIN_NO(157) | 1)
+
+#define PINMUX_GPIO158__FUNC_B_GPIO158 (MTK_PIN_NO(158) | 0)
+#define PINMUX_GPIO158__FUNC_B1_MSDC0_DAT3 (MTK_PIN_NO(158) | 1)
+
+#define PINMUX_GPIO159__FUNC_B_GPIO159 (MTK_PIN_NO(159) | 0)
+#define PINMUX_GPIO159__FUNC_B1_MSDC0_DAT2 (MTK_PIN_NO(159) | 1)
+
+#define PINMUX_GPIO160__FUNC_B_GPIO160 (MTK_PIN_NO(160) | 0)
+#define PINMUX_GPIO160__FUNC_B1_MSDC0_DAT1 (MTK_PIN_NO(160) | 1)
+
+#define PINMUX_GPIO161__FUNC_B_GPIO161 (MTK_PIN_NO(161) | 0)
+#define PINMUX_GPIO161__FUNC_B1_MSDC0_DAT0 (MTK_PIN_NO(161) | 1)
+
+#define PINMUX_GPIO162__FUNC_B_GPIO162 (MTK_PIN_NO(162) | 0)
+#define PINMUX_GPIO162__FUNC_B0_MSDC0_DSL (MTK_PIN_NO(162) | 1)
+
+#define PINMUX_GPIO163__FUNC_B_GPIO163 (MTK_PIN_NO(163) | 0)
+#define PINMUX_GPIO163__FUNC_B1_MSDC1_CMD (MTK_PIN_NO(163) | 1)
+#define PINMUX_GPIO163__FUNC_O_SPDIF_OUT (MTK_PIN_NO(163) | 2)
+#define PINMUX_GPIO163__FUNC_I1_MD32_0_JTAG_TMS (MTK_PIN_NO(163) | 3)
+#define PINMUX_GPIO163__FUNC_I1_ADSP_JTAG0_TMS (MTK_PIN_NO(163) | 4)
+#define PINMUX_GPIO163__FUNC_I1_SCP_JTAG0_TMS (MTK_PIN_NO(163) | 5)
+#define PINMUX_GPIO163__FUNC_I1_CCU0_JTAG_TMS (MTK_PIN_NO(163) | 6)
+#define PINMUX_GPIO163__FUNC_I0_IPU_JTAG_TMS (MTK_PIN_NO(163) | 7)
+
+#define PINMUX_GPIO164__FUNC_B_GPIO164 (MTK_PIN_NO(164) | 0)
+#define PINMUX_GPIO164__FUNC_B1_MSDC1_CLK (MTK_PIN_NO(164) | 1)
+#define PINMUX_GPIO164__FUNC_I0_SPDIF_IN0 (MTK_PIN_NO(164) | 2)
+#define PINMUX_GPIO164__FUNC_I1_MD32_0_JTAG_TCK (MTK_PIN_NO(164) | 3)
+#define PINMUX_GPIO164__FUNC_I0_ADSP_JTAG0_TCK (MTK_PIN_NO(164) | 4)
+#define PINMUX_GPIO164__FUNC_I1_SCP_JTAG0_TCK (MTK_PIN_NO(164) | 5)
+#define PINMUX_GPIO164__FUNC_I1_CCU0_JTAG_TCK (MTK_PIN_NO(164) | 6)
+#define PINMUX_GPIO164__FUNC_I0_IPU_JTAG_TCK (MTK_PIN_NO(164) | 7)
+
+#define PINMUX_GPIO165__FUNC_B_GPIO165 (MTK_PIN_NO(165) | 0)
+#define PINMUX_GPIO165__FUNC_B1_MSDC1_DAT0 (MTK_PIN_NO(165) | 1)
+#define PINMUX_GPIO165__FUNC_I0_SPDIF_IN1 (MTK_PIN_NO(165) | 2)
+#define PINMUX_GPIO165__FUNC_I1_MD32_0_JTAG_TDI (MTK_PIN_NO(165) | 3)
+#define PINMUX_GPIO165__FUNC_I1_ADSP_JTAG0_TDI (MTK_PIN_NO(165) | 4)
+#define PINMUX_GPIO165__FUNC_I1_SCP_JTAG0_TDI (MTK_PIN_NO(165) | 5)
+#define PINMUX_GPIO165__FUNC_I1_CCU0_JTAG_TDI (MTK_PIN_NO(165) | 6)
+#define PINMUX_GPIO165__FUNC_I0_IPU_JTAG_TDI (MTK_PIN_NO(165) | 7)
+
+#define PINMUX_GPIO166__FUNC_B_GPIO166 (MTK_PIN_NO(166) | 0)
+#define PINMUX_GPIO166__FUNC_B1_MSDC1_DAT1 (MTK_PIN_NO(166) | 1)
+#define PINMUX_GPIO166__FUNC_I0_SPDIF_IN2 (MTK_PIN_NO(166) | 2)
+#define PINMUX_GPIO166__FUNC_O_MD32_0_JTAG_TDO (MTK_PIN_NO(166) | 3)
+#define PINMUX_GPIO166__FUNC_O_ADSP_JTAG0_TDO (MTK_PIN_NO(166) | 4)
+#define PINMUX_GPIO166__FUNC_O_SCP_JTAG0_TDO (MTK_PIN_NO(166) | 5)
+#define PINMUX_GPIO166__FUNC_O_CCU0_JTAG_TDO (MTK_PIN_NO(166) | 6)
+#define PINMUX_GPIO166__FUNC_O_IPU_JTAG_TDO (MTK_PIN_NO(166) | 7)
+
+#define PINMUX_GPIO167__FUNC_B_GPIO167 (MTK_PIN_NO(167) | 0)
+#define PINMUX_GPIO167__FUNC_B1_MSDC1_DAT2 (MTK_PIN_NO(167) | 1)
+#define PINMUX_GPIO167__FUNC_O_PWM_0 (MTK_PIN_NO(167) | 2)
+#define PINMUX_GPIO167__FUNC_I1_MD32_0_JTAG_TRST (MTK_PIN_NO(167) | 3)
+#define PINMUX_GPIO167__FUNC_I1_ADSP_JTAG0_TRSTN (MTK_PIN_NO(167) | 4)
+#define PINMUX_GPIO167__FUNC_I0_SCP_JTAG0_TRSTN (MTK_PIN_NO(167) | 5)
+#define PINMUX_GPIO167__FUNC_I1_CCU0_JTAG_TRST (MTK_PIN_NO(167) | 6)
+#define PINMUX_GPIO167__FUNC_I0_IPU_JTAG_TRST (MTK_PIN_NO(167) | 7)
+
+#define PINMUX_GPIO168__FUNC_B_GPIO168 (MTK_PIN_NO(168) | 0)
+#define PINMUX_GPIO168__FUNC_B1_MSDC1_DAT3 (MTK_PIN_NO(168) | 1)
+#define PINMUX_GPIO168__FUNC_O_PWM_1 (MTK_PIN_NO(168) | 2)
+#define PINMUX_GPIO168__FUNC_O_CLKM0 (MTK_PIN_NO(168) | 3)
+
+#define PINMUX_GPIO169__FUNC_B_GPIO169 (MTK_PIN_NO(169) | 0)
+#define PINMUX_GPIO169__FUNC_B1_MSDC2_CMD (MTK_PIN_NO(169) | 1)
+#define PINMUX_GPIO169__FUNC_O_LVTS_FOUT (MTK_PIN_NO(169) | 2)
+#define PINMUX_GPIO169__FUNC_I1_MD32_1_JTAG_TMS (MTK_PIN_NO(169) | 3)
+#define PINMUX_GPIO169__FUNC_I0_UDI_TMS (MTK_PIN_NO(169) | 4)
+#define PINMUX_GPIO169__FUNC_I0_VPU_UDI_TMS (MTK_PIN_NO(169) | 5)
+#define PINMUX_GPIO169__FUNC_B0_TDMIN_MCK (MTK_PIN_NO(169) | 6)
+#define PINMUX_GPIO169__FUNC_I1_SSPM_JTAG_TMS (MTK_PIN_NO(169) | 7)
+
+#define PINMUX_GPIO170__FUNC_B_GPIO170 (MTK_PIN_NO(170) | 0)
+#define PINMUX_GPIO170__FUNC_B1_MSDC2_CLK (MTK_PIN_NO(170) | 1)
+#define PINMUX_GPIO170__FUNC_O_LVTS_SDO (MTK_PIN_NO(170) | 2)
+#define PINMUX_GPIO170__FUNC_I1_MD32_1_JTAG_TCK (MTK_PIN_NO(170) | 3)
+#define PINMUX_GPIO170__FUNC_I0_UDI_TCK (MTK_PIN_NO(170) | 4)
+#define PINMUX_GPIO170__FUNC_I0_VPU_UDI_TCK (MTK_PIN_NO(170) | 5)
+#define PINMUX_GPIO170__FUNC_B0_TDMIN_BCK (MTK_PIN_NO(170) | 6)
+#define PINMUX_GPIO170__FUNC_I1_SSPM_JTAG_TCK (MTK_PIN_NO(170) | 7)
+
+#define PINMUX_GPIO171__FUNC_B_GPIO171 (MTK_PIN_NO(171) | 0)
+#define PINMUX_GPIO171__FUNC_B1_MSDC2_DAT0 (MTK_PIN_NO(171) | 1)
+#define PINMUX_GPIO171__FUNC_I0_LVTS_26M (MTK_PIN_NO(171) | 2)
+#define PINMUX_GPIO171__FUNC_I1_MD32_1_JTAG_TDI (MTK_PIN_NO(171) | 3)
+#define PINMUX_GPIO171__FUNC_I0_UDI_TDI (MTK_PIN_NO(171) | 4)
+#define PINMUX_GPIO171__FUNC_I0_VPU_UDI_TDI (MTK_PIN_NO(171) | 5)
+#define PINMUX_GPIO171__FUNC_B0_TDMIN_LRCK (MTK_PIN_NO(171) | 6)
+#define PINMUX_GPIO171__FUNC_I1_SSPM_JTAG_TDI (MTK_PIN_NO(171) | 7)
+
+#define PINMUX_GPIO172__FUNC_B_GPIO172 (MTK_PIN_NO(172) | 0)
+#define PINMUX_GPIO172__FUNC_B1_MSDC2_DAT1 (MTK_PIN_NO(172) | 1)
+#define PINMUX_GPIO172__FUNC_I0_LVTS_SCF (MTK_PIN_NO(172) | 2)
+#define PINMUX_GPIO172__FUNC_O_MD32_1_JTAG_TDO (MTK_PIN_NO(172) | 3)
+#define PINMUX_GPIO172__FUNC_O_UDI_TDO (MTK_PIN_NO(172) | 4)
+#define PINMUX_GPIO172__FUNC_O_VPU_UDI_TDO (MTK_PIN_NO(172) | 5)
+#define PINMUX_GPIO172__FUNC_I0_TDMIN_DI (MTK_PIN_NO(172) | 6)
+#define PINMUX_GPIO172__FUNC_O_SSPM_JTAG_TDO (MTK_PIN_NO(172) | 7)
+
+#define PINMUX_GPIO173__FUNC_B_GPIO173 (MTK_PIN_NO(173) | 0)
+#define PINMUX_GPIO173__FUNC_B1_MSDC2_DAT2 (MTK_PIN_NO(173) | 1)
+#define PINMUX_GPIO173__FUNC_I0_LVTS_SCK (MTK_PIN_NO(173) | 2)
+#define PINMUX_GPIO173__FUNC_I1_MD32_1_JTAG_TRST (MTK_PIN_NO(173) | 3)
+#define PINMUX_GPIO173__FUNC_I0_UDI_NTRST (MTK_PIN_NO(173) | 4)
+#define PINMUX_GPIO173__FUNC_I0_VPU_UDI_NTRST (MTK_PIN_NO(173) | 5)
+#define PINMUX_GPIO173__FUNC_I0_SSPM_JTAG_TRSTN (MTK_PIN_NO(173) | 7)
+
+#define PINMUX_GPIO174__FUNC_B_GPIO174 (MTK_PIN_NO(174) | 0)
+#define PINMUX_GPIO174__FUNC_B1_MSDC2_DAT3 (MTK_PIN_NO(174) | 1)
+#define PINMUX_GPIO174__FUNC_I0_LVTS_SDI (MTK_PIN_NO(174) | 2)
+
+#define PINMUX_GPIO175__FUNC_B_GPIO175 (MTK_PIN_NO(175) | 0)
+#define PINMUX_GPIO175__FUNC_B0_SPMI_M_SCL (MTK_PIN_NO(175) | 1)
+
+#define PINMUX_GPIO176__FUNC_B_GPIO176 (MTK_PIN_NO(176) | 0)
+#define PINMUX_GPIO176__FUNC_B0_SPMI_M_SDA (MTK_PIN_NO(176) | 1)
+
+#endif /* __MEDIATEK_MT8188-PINFUNC_H */
diff --git a/include/dt-bindings/pinctrl/mt65xx.h b/include/dt-bindings/pinctrl/mt65xx.h
index 7e16e58fe1f7..f5934abcd1bd 100644
--- a/include/dt-bindings/pinctrl/mt65xx.h
+++ b/include/dt-bindings/pinctrl/mt65xx.h
@@ -16,6 +16,15 @@
#define MTK_PUPD_SET_R1R0_10 102
#define MTK_PUPD_SET_R1R0_11 103
+#define MTK_PULL_SET_RSEL_000 200
+#define MTK_PULL_SET_RSEL_001 201
+#define MTK_PULL_SET_RSEL_010 202
+#define MTK_PULL_SET_RSEL_011 203
+#define MTK_PULL_SET_RSEL_100 204
+#define MTK_PULL_SET_RSEL_101 205
+#define MTK_PULL_SET_RSEL_110 206
+#define MTK_PULL_SET_RSEL_111 207
+
#define MTK_DRIVE_2mA 2
#define MTK_DRIVE_4mA 4
#define MTK_DRIVE_6mA 6
diff --git a/include/dt-bindings/pinctrl/mt6795-pinfunc.h b/include/dt-bindings/pinctrl/mt6795-pinfunc.h
new file mode 100644
index 000000000000..dfd3f6f13e0d
--- /dev/null
+++ b/include/dt-bindings/pinctrl/mt6795-pinfunc.h
@@ -0,0 +1,908 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __DTS_MT6795_PINFUNC_H
+#define __DTS_MT6795_PINFUNC_H
+
+#include <dt-bindings/pinctrl/mt65xx.h>
+
+#define PINMUX_GPIO0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define PINMUX_GPIO0__FUNC_IRDA_PDN (MTK_PIN_NO(0) | 1)
+#define PINMUX_GPIO0__FUNC_I2S1_WS (MTK_PIN_NO(0) | 2)
+#define PINMUX_GPIO0__FUNC_TDD_TMS (MTK_PIN_NO(0) | 4)
+#define PINMUX_GPIO0__FUNC_UTXD0 (MTK_PIN_NO(0) | 5)
+
+#define PINMUX_GPIO1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define PINMUX_GPIO1__FUNC_IRDA_RXD (MTK_PIN_NO(1) | 1)
+#define PINMUX_GPIO1__FUNC_I2S1_BCK (MTK_PIN_NO(1) | 2)
+#define PINMUX_GPIO1__FUNC_SDA4 (MTK_PIN_NO(1) | 3)
+#define PINMUX_GPIO1__FUNC_TDD_TCK (MTK_PIN_NO(1) | 4)
+#define PINMUX_GPIO1__FUNC_URXD0 (MTK_PIN_NO(1) | 5)
+
+#define PINMUX_GPIO2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define PINMUX_GPIO2__FUNC_IRDA_TXD (MTK_PIN_NO(2) | 1)
+#define PINMUX_GPIO2__FUNC_I2S1_MCK (MTK_PIN_NO(2) | 2)
+#define PINMUX_GPIO2__FUNC_SCL4 (MTK_PIN_NO(2) | 3)
+#define PINMUX_GPIO2__FUNC_TDD_TDI (MTK_PIN_NO(2) | 4)
+#define PINMUX_GPIO2__FUNC_UTXD3 (MTK_PIN_NO(2) | 5)
+
+#define PINMUX_GPIO3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define PINMUX_GPIO3__FUNC_DSI1_TE (MTK_PIN_NO(3) | 1)
+#define PINMUX_GPIO3__FUNC_I2S1_DO_1 (MTK_PIN_NO(3) | 2)
+#define PINMUX_GPIO3__FUNC_SDA3 (MTK_PIN_NO(3) | 3)
+#define PINMUX_GPIO3__FUNC_TDD_TDO (MTK_PIN_NO(3) | 4)
+#define PINMUX_GPIO3__FUNC_URXD3 (MTK_PIN_NO(3) | 5)
+
+#define PINMUX_GPIO4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define PINMUX_GPIO4__FUNC_DISP_PWM1 (MTK_PIN_NO(4) | 1)
+#define PINMUX_GPIO4__FUNC_I2S1_DO_2 (MTK_PIN_NO(4) | 2)
+#define PINMUX_GPIO4__FUNC_SCL3 (MTK_PIN_NO(4) | 3)
+#define PINMUX_GPIO4__FUNC_TDD_TRSTN (MTK_PIN_NO(4) | 4)
+
+#define PINMUX_GPIO5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define PINMUX_GPIO5__FUNC_PCM1_CLK (MTK_PIN_NO(5) | 1)
+#define PINMUX_GPIO5__FUNC_I2S2_WS (MTK_PIN_NO(5) | 2)
+#define PINMUX_GPIO5__FUNC_SPI_CK_3 (MTK_PIN_NO(5) | 3)
+#define PINMUX_GPIO5__FUNC_LTE_MD32_JTAG_TMS (MTK_PIN_NO(5) | 4)
+#define PINMUX_GPIO5__FUNC_AP_MD32_JTAG_TMS (MTK_PIN_NO(5) | 5)
+
+#define PINMUX_GPIO6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define PINMUX_GPIO6__FUNC_PCM1_SYNC (MTK_PIN_NO(6) | 1)
+#define PINMUX_GPIO6__FUNC_I2S2_BCK (MTK_PIN_NO(6) | 2)
+#define PINMUX_GPIO6__FUNC_SPI_MI_3 (MTK_PIN_NO(6) | 3)
+#define PINMUX_GPIO6__FUNC_LTE_MD32_JTAG_TCK (MTK_PIN_NO(6) | 4)
+#define PINMUX_GPIO6__FUNC_AP_MD32_JTAG_TCK (MTK_PIN_NO(6) | 5)
+
+#define PINMUX_GPIO7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define PINMUX_GPIO7__FUNC_PCM1_DI (MTK_PIN_NO(7) | 1)
+#define PINMUX_GPIO7__FUNC_I2S2_DI_1 (MTK_PIN_NO(7) | 2)
+#define PINMUX_GPIO7__FUNC_SPI_MO_3 (MTK_PIN_NO(7) | 3)
+#define PINMUX_GPIO7__FUNC_LTE_MD32_JTAG_TDI (MTK_PIN_NO(7) | 4)
+#define PINMUX_GPIO7__FUNC_AP_MD32_JTAG_TDI (MTK_PIN_NO(7) | 5)
+
+#define PINMUX_GPIO8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define PINMUX_GPIO8__FUNC_PCM1_DO (MTK_PIN_NO(8) | 1)
+#define PINMUX_GPIO8__FUNC_I2S2_DI_2 (MTK_PIN_NO(8) | 2)
+#define PINMUX_GPIO8__FUNC_SPI_CS_3 (MTK_PIN_NO(8) | 3)
+#define PINMUX_GPIO8__FUNC_LTE_MD32_JTAG_TDO (MTK_PIN_NO(8) | 4)
+#define PINMUX_GPIO8__FUNC_AP_MD32_JTAG_TDO (MTK_PIN_NO(8) | 5)
+
+#define PINMUX_GPIO9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define PINMUX_GPIO9__FUNC_USB_DRVVBUS (MTK_PIN_NO(9) | 1)
+#define PINMUX_GPIO9__FUNC_I2S2_MCK (MTK_PIN_NO(9) | 2)
+#define PINMUX_GPIO9__FUNC_LTE_MD32_JTAG_TRST (MTK_PIN_NO(9) | 4)
+#define PINMUX_GPIO9__FUNC_AP_MD32_JTAG_TRST (MTK_PIN_NO(9) | 5)
+
+#define PINMUX_GPIO10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define PINMUX_GPIO10__FUNC_I2S0_WS (MTK_PIN_NO(10) | 2)
+
+#define PINMUX_GPIO11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define PINMUX_GPIO11__FUNC_I2S0_BCK (MTK_PIN_NO(11) | 2)
+
+#define PINMUX_GPIO12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define PINMUX_GPIO12__FUNC_I2S0_MCK (MTK_PIN_NO(12) | 2)
+
+#define PINMUX_GPIO13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define PINMUX_GPIO13__FUNC_I2S0_DO (MTK_PIN_NO(13) | 2)
+
+#define PINMUX_GPIO14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define PINMUX_GPIO14__FUNC_I2S0_DI (MTK_PIN_NO(14) | 2)
+#define PINMUX_GPIO14__FUNC_DISP_PWM1 (MTK_PIN_NO(14) | 3)
+#define PINMUX_GPIO14__FUNC_PWM4 (MTK_PIN_NO(14) | 4)
+#define PINMUX_GPIO14__FUNC_IRDA_RXD (MTK_PIN_NO(14) | 5)
+#define PINMUX_GPIO14__FUNC_I2S1_BCK (MTK_PIN_NO(14) | 6)
+
+#define PINMUX_GPIO15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define PINMUX_GPIO15__FUNC_DSI1_TE (MTK_PIN_NO(15) | 2)
+#define PINMUX_GPIO15__FUNC_USB_DRVVBUS (MTK_PIN_NO(15) | 3)
+#define PINMUX_GPIO15__FUNC_PWM5 (MTK_PIN_NO(15) | 4)
+#define PINMUX_GPIO15__FUNC_IRDA_TXD (MTK_PIN_NO(15) | 5)
+#define PINMUX_GPIO15__FUNC_I2S1_MCK (MTK_PIN_NO(15) | 6)
+
+#define PINMUX_GPIO16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0)
+#define PINMUX_GPIO16__FUNC_IDDIG (MTK_PIN_NO(16) | 1)
+#define PINMUX_GPIO16__FUNC_FLASH (MTK_PIN_NO(16) | 2)
+#define PINMUX_GPIO16__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(16) | 3)
+#define PINMUX_GPIO16__FUNC_PWM5 (MTK_PIN_NO(16) | 4)
+
+#define PINMUX_GPIO17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0)
+#define PINMUX_GPIO17__FUNC_SIM1_SCLK (MTK_PIN_NO(17) | 1)
+#define PINMUX_GPIO17__FUNC_SIM2_SCLK (MTK_PIN_NO(17) | 2)
+
+#define PINMUX_GPIO18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define PINMUX_GPIO18__FUNC_SIM1_SRST (MTK_PIN_NO(18) | 1)
+#define PINMUX_GPIO18__FUNC_SIM2_SRST (MTK_PIN_NO(18) | 2)
+
+#define PINMUX_GPIO19__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define PINMUX_GPIO19__FUNC_SIM1_SDAT (MTK_PIN_NO(19) | 1)
+#define PINMUX_GPIO19__FUNC_SIM2_SDAT (MTK_PIN_NO(19) | 2)
+
+#define PINMUX_GPIO20__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define PINMUX_GPIO20__FUNC_SIM2_SCLK (MTK_PIN_NO(20) | 1)
+#define PINMUX_GPIO20__FUNC_SIM1_SCLK (MTK_PIN_NO(20) | 2)
+
+#define PINMUX_GPIO21__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define PINMUX_GPIO21__FUNC_SIM2_SRST (MTK_PIN_NO(21) | 1)
+#define PINMUX_GPIO21__FUNC_SIM1_SRST (MTK_PIN_NO(21) | 2)
+
+#define PINMUX_GPIO22__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define PINMUX_GPIO22__FUNC_SIM2_SDAT (MTK_PIN_NO(22) | 1)
+#define PINMUX_GPIO22__FUNC_SIM1_SDAT (MTK_PIN_NO(22) | 2)
+
+#define PINMUX_GPIO23__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define PINMUX_GPIO23__FUNC_MSDC3_DAT0 (MTK_PIN_NO(23) | 1)
+
+#define PINMUX_GPIO24__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define PINMUX_GPIO24__FUNC_MSDC3_DAT1 (MTK_PIN_NO(24) | 1)
+
+#define PINMUX_GPIO25__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define PINMUX_GPIO25__FUNC_MSDC3_DAT2 (MTK_PIN_NO(25) | 1)
+
+#define PINMUX_GPIO26__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define PINMUX_GPIO26__FUNC_MSDC3_DAT3 (MTK_PIN_NO(26) | 1)
+
+#define PINMUX_GPIO27__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define PINMUX_GPIO27__FUNC_MSDC3_CLK (MTK_PIN_NO(27) | 1)
+
+#define PINMUX_GPIO28__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define PINMUX_GPIO28__FUNC_MSDC3_CMD (MTK_PIN_NO(28) | 1)
+
+#define PINMUX_GPIO29__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define PINMUX_GPIO29__FUNC_PTA_RXD (MTK_PIN_NO(29) | 1)
+#define PINMUX_GPIO29__FUNC_UCTS2 (MTK_PIN_NO(29) | 2)
+
+#define PINMUX_GPIO30__FUNC_GPIO30 (MTK_PIN_NO(30) | 0)
+#define PINMUX_GPIO30__FUNC_PTA_TXD (MTK_PIN_NO(30) | 1)
+#define PINMUX_GPIO30__FUNC_URTS2 (MTK_PIN_NO(30) | 2)
+
+#define PINMUX_GPIO31__FUNC_GPIO31 (MTK_PIN_NO(31) | 0)
+#define PINMUX_GPIO31__FUNC_URXD2 (MTK_PIN_NO(31) | 1)
+#define PINMUX_GPIO31__FUNC_UTXD2 (MTK_PIN_NO(31) | 2)
+
+#define PINMUX_GPIO32__FUNC_GPIO32 (MTK_PIN_NO(32) | 0)
+#define PINMUX_GPIO32__FUNC_UTXD2 (MTK_PIN_NO(32) | 1)
+#define PINMUX_GPIO32__FUNC_URXD2 (MTK_PIN_NO(32) | 2)
+
+#define PINMUX_GPIO33__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define PINMUX_GPIO33__FUNC_MRG_CLK (MTK_PIN_NO(33) | 1)
+#define PINMUX_GPIO33__FUNC_PCM0_CLK (MTK_PIN_NO(33) | 2)
+
+#define PINMUX_GPIO34__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define PINMUX_GPIO34__FUNC_MRG_DI (MTK_PIN_NO(34) | 1)
+#define PINMUX_GPIO34__FUNC_PCM0_DI (MTK_PIN_NO(34) | 2)
+
+#define PINMUX_GPIO35__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define PINMUX_GPIO35__FUNC_MRG_DO (MTK_PIN_NO(35) | 1)
+#define PINMUX_GPIO35__FUNC_PCM0_DO (MTK_PIN_NO(35) | 2)
+
+#define PINMUX_GPIO36__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define PINMUX_GPIO36__FUNC_MRG_SYNC (MTK_PIN_NO(36) | 1)
+#define PINMUX_GPIO36__FUNC_PCM0_SYNC (MTK_PIN_NO(36) | 2)
+
+#define PINMUX_GPIO37__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define PINMUX_GPIO37__FUNC_GPS_SYNC (MTK_PIN_NO(37) | 1)
+
+#define PINMUX_GPIO38__FUNC_GPIO38 (MTK_PIN_NO(38) | 0)
+#define PINMUX_GPIO38__FUNC_DAIRSTB (MTK_PIN_NO(38) | 1)
+
+#define PINMUX_GPIO39__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define PINMUX_GPIO39__FUNC_CM2MCLK (MTK_PIN_NO(39) | 1)
+
+#define PINMUX_GPIO40__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define PINMUX_GPIO40__FUNC_CM3MCLK (MTK_PIN_NO(40) | 1)
+#define PINMUX_GPIO40__FUNC_IRDA_PDN (MTK_PIN_NO(40) | 2)
+#define PINMUX_GPIO40__FUNC_PWM6 (MTK_PIN_NO(40) | 3)
+#define PINMUX_GPIO40__FUNC_I2S1_WS (MTK_PIN_NO(40) | 4)
+
+#define PINMUX_GPIO41__FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
+#define PINMUX_GPIO41__FUNC_CMPCLK (MTK_PIN_NO(41) | 1)
+#define PINMUX_GPIO41__FUNC_CMCSK (MTK_PIN_NO(41) | 2)
+#define PINMUX_GPIO41__FUNC_FLASH (MTK_PIN_NO(41) | 3)
+
+#define PINMUX_GPIO42__FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
+#define PINMUX_GPIO42__FUNC_CMMCLK (MTK_PIN_NO(42) | 1)
+
+#define PINMUX_GPIO43__FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
+#define PINMUX_GPIO43__FUNC_SDA2 (MTK_PIN_NO(43) | 1)
+
+#define PINMUX_GPIO44__FUNC_GPIO44 (MTK_PIN_NO(44) | 0)
+#define PINMUX_GPIO44__FUNC_SCL2 (MTK_PIN_NO(44) | 1)
+
+#define PINMUX_GPIO45__FUNC_GPIO45 (MTK_PIN_NO(45) | 0)
+#define PINMUX_GPIO45__FUNC_SDA0 (MTK_PIN_NO(45) | 1)
+
+#define PINMUX_GPIO46__FUNC_GPIO46 (MTK_PIN_NO(46) | 0)
+#define PINMUX_GPIO46__FUNC_SCL0 (MTK_PIN_NO(46) | 1)
+
+#define PINMUX_GPIO47__FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
+#define PINMUX_GPIO47__FUNC_BPI_BUS0 (MTK_PIN_NO(47) | 1)
+
+#define PINMUX_GPIO48__FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
+#define PINMUX_GPIO48__FUNC_BPI_BUS1 (MTK_PIN_NO(48) | 1)
+
+#define PINMUX_GPIO49__FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
+#define PINMUX_GPIO49__FUNC_BPI_BUS2 (MTK_PIN_NO(49) | 1)
+
+#define PINMUX_GPIO50__FUNC_GPIO50 (MTK_PIN_NO(50) | 0)
+#define PINMUX_GPIO50__FUNC_BPI_BUS3 (MTK_PIN_NO(50) | 1)
+
+#define PINMUX_GPIO51__FUNC_GPIO51 (MTK_PIN_NO(51) | 0)
+#define PINMUX_GPIO51__FUNC_BPI_BUS4 (MTK_PIN_NO(51) | 1)
+
+#define PINMUX_GPIO52__FUNC_GPIO52 (MTK_PIN_NO(52) | 0)
+#define PINMUX_GPIO52__FUNC_BPI_BUS5 (MTK_PIN_NO(52) | 1)
+
+#define PINMUX_GPIO53__FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
+#define PINMUX_GPIO53__FUNC_BPI_BUS6 (MTK_PIN_NO(53) | 1)
+
+#define PINMUX_GPIO54__FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
+#define PINMUX_GPIO54__FUNC_BPI_BUS7 (MTK_PIN_NO(54) | 1)
+
+#define PINMUX_GPIO55__FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
+#define PINMUX_GPIO55__FUNC_BPI_BUS8 (MTK_PIN_NO(55) | 1)
+
+#define PINMUX_GPIO56__FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
+#define PINMUX_GPIO56__FUNC_BPI_BUS9 (MTK_PIN_NO(56) | 1)
+
+#define PINMUX_GPIO57__FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
+#define PINMUX_GPIO57__FUNC_BPI_BUS10 (MTK_PIN_NO(57) | 1)
+
+#define PINMUX_GPIO58__FUNC_GPIO58 (MTK_PIN_NO(58) | 0)
+#define PINMUX_GPIO58__FUNC_BPI_BUS11 (MTK_PIN_NO(58) | 1)
+
+#define PINMUX_GPIO59__FUNC_GPIO59 (MTK_PIN_NO(59) | 0)
+#define PINMUX_GPIO59__FUNC_BPI_BUS12 (MTK_PIN_NO(59) | 1)
+
+#define PINMUX_GPIO60__FUNC_GPIO60 (MTK_PIN_NO(60) | 0)
+#define PINMUX_GPIO60__FUNC_BPI_BUS13 (MTK_PIN_NO(60) | 1)
+
+#define PINMUX_GPIO61__FUNC_GPIO61 (MTK_PIN_NO(61) | 0)
+#define PINMUX_GPIO61__FUNC_BPI_BUS14 (MTK_PIN_NO(61) | 1)
+
+#define PINMUX_GPIO62__FUNC_GPIO62 (MTK_PIN_NO(62) | 0)
+#define PINMUX_GPIO62__FUNC_RFIC1_BSI_CK (MTK_PIN_NO(62) | 1)
+
+#define PINMUX_GPIO63__FUNC_GPIO63 (MTK_PIN_NO(63) | 0)
+#define PINMUX_GPIO63__FUNC_RFIC1_BSI_D0 (MTK_PIN_NO(63) | 1)
+
+#define PINMUX_GPIO64__FUNC_GPIO64 (MTK_PIN_NO(64) | 0)
+#define PINMUX_GPIO64__FUNC_RFIC1_BSI_D1 (MTK_PIN_NO(64) | 1)
+
+#define PINMUX_GPIO65__FUNC_GPIO65 (MTK_PIN_NO(65) | 0)
+#define PINMUX_GPIO65__FUNC_RFIC1_BSI_D2 (MTK_PIN_NO(65) | 1)
+
+#define PINMUX_GPIO66__FUNC_GPIO66 (MTK_PIN_NO(66) | 0)
+#define PINMUX_GPIO66__FUNC_RFIC1_BSI_CS (MTK_PIN_NO(66) | 1)
+
+#define PINMUX_GPIO67__FUNC_GPIO67 (MTK_PIN_NO(67) | 0)
+#define PINMUX_GPIO67__FUNC_TD_TXBPI (MTK_PIN_NO(67) | 1)
+
+#define PINMUX_GPIO68__FUNC_GPIO68 (MTK_PIN_NO(68) | 0)
+#define PINMUX_GPIO68__FUNC_RFIC0_BSI_CK (MTK_PIN_NO(68) | 1)
+
+#define PINMUX_GPIO69__FUNC_GPIO69 (MTK_PIN_NO(69) | 0)
+#define PINMUX_GPIO69__FUNC_RFIC0_BSI_D0 (MTK_PIN_NO(69) | 1)
+
+#define PINMUX_GPIO70__FUNC_GPIO70 (MTK_PIN_NO(70) | 0)
+#define PINMUX_GPIO70__FUNC_RFIC0_BSI_D1 (MTK_PIN_NO(70) | 1)
+
+#define PINMUX_GPIO71__FUNC_GPIO71 (MTK_PIN_NO(71) | 0)
+#define PINMUX_GPIO71__FUNC_RFIC0_BSI_D2 (MTK_PIN_NO(71) | 1)
+
+#define PINMUX_GPIO72__FUNC_GPIO72 (MTK_PIN_NO(72) | 0)
+#define PINMUX_GPIO72__FUNC_RFIC0_BSI_CS (MTK_PIN_NO(72) | 1)
+
+#define PINMUX_GPIO73__FUNC_GPIO73 (MTK_PIN_NO(73) | 0)
+#define PINMUX_GPIO73__FUNC_MISC_BSI_DO (MTK_PIN_NO(73) | 1)
+
+#define PINMUX_GPIO74__FUNC_GPIO74 (MTK_PIN_NO(74) | 0)
+#define PINMUX_GPIO74__FUNC_MISC_BSI_CK (MTK_PIN_NO(74) | 1)
+
+#define PINMUX_GPIO75__FUNC_GPIO75 (MTK_PIN_NO(75) | 0)
+#define PINMUX_GPIO75__FUNC_MISC_BSI_CS0B (MTK_PIN_NO(75) | 1)
+#define PINMUX_GPIO75__FUNC_MIPI1_SCLK (MTK_PIN_NO(75) | 2)
+
+#define PINMUX_GPIO76__FUNC_GPIO76 (MTK_PIN_NO(76) | 0)
+#define PINMUX_GPIO76__FUNC_MISC_BSI_CS1B (MTK_PIN_NO(76) | 1)
+
+#define PINMUX_GPIO77__FUNC_GPIO77 (MTK_PIN_NO(77) | 0)
+#define PINMUX_GPIO77__FUNC_MISC_BSI_DI (MTK_PIN_NO(77) | 1)
+#define PINMUX_GPIO77__FUNC_MIPI1_SDATA (MTK_PIN_NO(77) | 2)
+
+#define PINMUX_GPIO78__FUNC_GPIO78 (MTK_PIN_NO(78) | 0)
+#define PINMUX_GPIO78__FUNC_LTE_TXBPI (MTK_PIN_NO(78) | 1)
+
+#define PINMUX_GPIO79__FUNC_GPIO79 (MTK_PIN_NO(79) | 0)
+#define PINMUX_GPIO79__FUNC_BPI_BUS15 (MTK_PIN_NO(79) | 1)
+
+#define PINMUX_GPIO80__FUNC_GPIO80 (MTK_PIN_NO(80) | 0)
+#define PINMUX_GPIO80__FUNC_BPI_BUS16 (MTK_PIN_NO(80) | 1)
+
+#define PINMUX_GPIO81__FUNC_GPIO81 (MTK_PIN_NO(81) | 0)
+#define PINMUX_GPIO81__FUNC_BPI_BUS17 (MTK_PIN_NO(81) | 1)
+
+#define PINMUX_GPIO82__FUNC_GPIO82 (MTK_PIN_NO(82) | 0)
+#define PINMUX_GPIO82__FUNC_BPI_BUS18 (MTK_PIN_NO(82) | 1)
+
+#define PINMUX_GPIO83__FUNC_GPIO83 (MTK_PIN_NO(83) | 0)
+#define PINMUX_GPIO83__FUNC_BPI_BUS19 (MTK_PIN_NO(83) | 1)
+
+#define PINMUX_GPIO84__FUNC_GPIO84 (MTK_PIN_NO(84) | 0)
+#define PINMUX_GPIO84__FUNC_BPI_BUS20 (MTK_PIN_NO(84) | 1)
+
+#define PINMUX_GPIO85__FUNC_GPIO85 (MTK_PIN_NO(85) | 0)
+#define PINMUX_GPIO85__FUNC_BPI_BUS21 (MTK_PIN_NO(85) | 1)
+
+#define PINMUX_GPIO86__FUNC_GPIO86 (MTK_PIN_NO(86) | 0)
+#define PINMUX_GPIO86__FUNC_BPI_BUS22 (MTK_PIN_NO(86) | 1)
+
+#define PINMUX_GPIO87__FUNC_GPIO87 (MTK_PIN_NO(87) | 0)
+#define PINMUX_GPIO87__FUNC_BPI_BUS23 (MTK_PIN_NO(87) | 1)
+
+#define PINMUX_GPIO88__FUNC_GPIO88 (MTK_PIN_NO(88) | 0)
+#define PINMUX_GPIO88__FUNC_BPI_BUS24 (MTK_PIN_NO(88) | 1)
+
+#define PINMUX_GPIO89__FUNC_GPIO89 (MTK_PIN_NO(89) | 0)
+#define PINMUX_GPIO89__FUNC_BPI_BUS25 (MTK_PIN_NO(89) | 1)
+
+#define PINMUX_GPIO90__FUNC_GPIO90 (MTK_PIN_NO(90) | 0)
+#define PINMUX_GPIO90__FUNC_BPI_BUS26 (MTK_PIN_NO(90) | 1)
+
+#define PINMUX_GPIO91__FUNC_GPIO91 (MTK_PIN_NO(91) | 0)
+#define PINMUX_GPIO91__FUNC_BPI_BUS27 (MTK_PIN_NO(91) | 1)
+
+#define PINMUX_GPIO92__FUNC_GPIO92 (MTK_PIN_NO(92) | 0)
+#define PINMUX_GPIO92__FUNC_PCM1_CLK (MTK_PIN_NO(92) | 1)
+#define PINMUX_GPIO92__FUNC_I2S0_BCK (MTK_PIN_NO(92) | 2)
+#define PINMUX_GPIO92__FUNC_NLD6 (MTK_PIN_NO(92) | 3)
+
+#define PINMUX_GPIO93__FUNC_GPIO93 (MTK_PIN_NO(93) | 0)
+#define PINMUX_GPIO93__FUNC_PCM1_SYNC (MTK_PIN_NO(93) | 1)
+#define PINMUX_GPIO93__FUNC_I2S0_WS (MTK_PIN_NO(93) | 2)
+#define PINMUX_GPIO93__FUNC_NLD7 (MTK_PIN_NO(93) | 3)
+
+#define PINMUX_GPIO94__FUNC_GPIO94 (MTK_PIN_NO(94) | 0)
+#define PINMUX_GPIO94__FUNC_PCM1_DI (MTK_PIN_NO(94) | 1)
+#define PINMUX_GPIO94__FUNC_I2S0_DI (MTK_PIN_NO(94) | 2)
+#define PINMUX_GPIO94__FUNC_NREB (MTK_PIN_NO(94) | 3)
+
+#define PINMUX_GPIO95__FUNC_GPIO95 (MTK_PIN_NO(95) | 0)
+#define PINMUX_GPIO95__FUNC_PCM1_DO (MTK_PIN_NO(95) | 1)
+#define PINMUX_GPIO95__FUNC_I2S0_DO (MTK_PIN_NO(95) | 2)
+#define PINMUX_GPIO95__FUNC_NRNB0 (MTK_PIN_NO(95) | 3)
+
+#define PINMUX_GPIO96__FUNC_GPIO96 (MTK_PIN_NO(96) | 0)
+#define PINMUX_GPIO96__FUNC_URXD1 (MTK_PIN_NO(96) | 1)
+#define PINMUX_GPIO96__FUNC_UTXD1 (MTK_PIN_NO(96) | 2)
+#define PINMUX_GPIO96__FUNC_NWEB (MTK_PIN_NO(96) | 3)
+
+#define PINMUX_GPIO97__FUNC_GPIO97 (MTK_PIN_NO(97) | 0)
+#define PINMUX_GPIO97__FUNC_UTXD1 (MTK_PIN_NO(97) | 1)
+#define PINMUX_GPIO97__FUNC_URXD1 (MTK_PIN_NO(97) | 2)
+#define PINMUX_GPIO97__FUNC_NCEB0 (MTK_PIN_NO(97) | 3)
+
+#define PINMUX_GPIO98__FUNC_GPIO98 (MTK_PIN_NO(98) | 0)
+#define PINMUX_GPIO98__FUNC_URTS1 (MTK_PIN_NO(98) | 1)
+#define PINMUX_GPIO98__FUNC_UCTS1 (MTK_PIN_NO(98) | 2)
+#define PINMUX_GPIO98__FUNC_NALE (MTK_PIN_NO(98) | 3)
+
+#define PINMUX_GPIO99__FUNC_GPIO99 (MTK_PIN_NO(99) | 0)
+#define PINMUX_GPIO99__FUNC_UCTS1 (MTK_PIN_NO(99) | 1)
+#define PINMUX_GPIO99__FUNC_URTS1 (MTK_PIN_NO(99) | 2)
+#define PINMUX_GPIO99__FUNC_NCLE (MTK_PIN_NO(99) | 3)
+
+#define PINMUX_GPIO100__FUNC_GPIO100 (MTK_PIN_NO(100) | 0)
+#define PINMUX_GPIO100__FUNC_MSDC2_DAT0 (MTK_PIN_NO(100) | 1)
+#define PINMUX_GPIO100__FUNC_URXD1 (MTK_PIN_NO(100) | 2)
+#define PINMUX_GPIO100__FUNC_USB_DRVVBUS (MTK_PIN_NO(100) | 3)
+#define PINMUX_GPIO100__FUNC_SDA4 (MTK_PIN_NO(100) | 4)
+
+#define PINMUX_GPIO101__FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
+#define PINMUX_GPIO101__FUNC_MSDC2_DAT1 (MTK_PIN_NO(101) | 1)
+#define PINMUX_GPIO101__FUNC_UTXD1 (MTK_PIN_NO(101) | 2)
+#define PINMUX_GPIO101__FUNC_SCL4 (MTK_PIN_NO(101) | 4)
+
+#define PINMUX_GPIO102__FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
+#define PINMUX_GPIO102__FUNC_MSDC2_DAT2 (MTK_PIN_NO(102) | 1)
+#define PINMUX_GPIO102__FUNC_URTS1 (MTK_PIN_NO(102) | 2)
+#define PINMUX_GPIO102__FUNC_UTXD0 (MTK_PIN_NO(102) | 3)
+#define PINMUX_GPIO102__FUNC_PWM0 (MTK_PIN_NO(102) | 5)
+#define PINMUX_GPIO102__FUNC_SPI_CK_1 (MTK_PIN_NO(102) | 6)
+
+#define PINMUX_GPIO103__FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
+#define PINMUX_GPIO103__FUNC_MSDC2_DAT3 (MTK_PIN_NO(103) | 1)
+#define PINMUX_GPIO103__FUNC_UCTS1 (MTK_PIN_NO(103) | 2)
+#define PINMUX_GPIO103__FUNC_URXD0 (MTK_PIN_NO(103) | 3)
+#define PINMUX_GPIO103__FUNC_PWM1 (MTK_PIN_NO(103) | 5)
+#define PINMUX_GPIO103__FUNC_SPI_MI_1 (MTK_PIN_NO(103) | 6)
+
+#define PINMUX_GPIO104__FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
+#define PINMUX_GPIO104__FUNC_MSDC2_CLK (MTK_PIN_NO(104) | 1)
+#define PINMUX_GPIO104__FUNC_NLD4 (MTK_PIN_NO(104) | 2)
+#define PINMUX_GPIO104__FUNC_UTXD3 (MTK_PIN_NO(104) | 3)
+#define PINMUX_GPIO104__FUNC_SDA3 (MTK_PIN_NO(104) | 4)
+#define PINMUX_GPIO104__FUNC_PWM2 (MTK_PIN_NO(104) | 5)
+#define PINMUX_GPIO104__FUNC_SPI_MO_1 (MTK_PIN_NO(104) | 6)
+
+#define PINMUX_GPIO105__FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
+#define PINMUX_GPIO105__FUNC_MSDC2_CMD (MTK_PIN_NO(105) | 1)
+#define PINMUX_GPIO105__FUNC_NLD5 (MTK_PIN_NO(105) | 2)
+#define PINMUX_GPIO105__FUNC_URXD3 (MTK_PIN_NO(105) | 3)
+#define PINMUX_GPIO105__FUNC_SCL3 (MTK_PIN_NO(105) | 4)
+#define PINMUX_GPIO105__FUNC_PWM3 (MTK_PIN_NO(105) | 5)
+#define PINMUX_GPIO105__FUNC_SPI_CS_1 (MTK_PIN_NO(105) | 6)
+
+#define PINMUX_GPIO106__FUNC_GPIO106 (MTK_PIN_NO(106) | 0)
+#define PINMUX_GPIO106__FUNC_LCM_RST (MTK_PIN_NO(106) | 1)
+
+#define PINMUX_GPIO107__FUNC_GPIO107 (MTK_PIN_NO(107) | 0)
+#define PINMUX_GPIO107__FUNC_DSI_TE (MTK_PIN_NO(107) | 1)
+
+#define PINMUX_GPIO108__FUNC_GPIO108 (MTK_PIN_NO(108) | 0)
+#define PINMUX_GPIO108__FUNC_JTMS (MTK_PIN_NO(108) | 1)
+#define PINMUX_GPIO108__FUNC_MFG_JTAG_TMS (MTK_PIN_NO(108) | 2)
+#define PINMUX_GPIO108__FUNC_TDD_TMS (MTK_PIN_NO(108) | 3)
+#define PINMUX_GPIO108__FUNC_LTE_MD32_JTAG_TMS (MTK_PIN_NO(108) | 4)
+#define PINMUX_GPIO108__FUNC_AP_MD32_JTAG_TMS (MTK_PIN_NO(108) | 5)
+#define PINMUX_GPIO108__FUNC_DFD_TMS (MTK_PIN_NO(108) | 6)
+
+#define PINMUX_GPIO109__FUNC_GPIO109 (MTK_PIN_NO(109) | 0)
+#define PINMUX_GPIO109__FUNC_JTCK (MTK_PIN_NO(109) | 1)
+#define PINMUX_GPIO109__FUNC_MFG_JTAG_TCK (MTK_PIN_NO(109) | 2)
+#define PINMUX_GPIO109__FUNC_TDD_TCK (MTK_PIN_NO(109) | 3)
+#define PINMUX_GPIO109__FUNC_LTE_MD32_JTAG_TCK (MTK_PIN_NO(109) | 4)
+#define PINMUX_GPIO109__FUNC_AP_MD32_JTAG_TCK (MTK_PIN_NO(109) | 5)
+#define PINMUX_GPIO109__FUNC_DFD_TCK (MTK_PIN_NO(109) | 6)
+
+#define PINMUX_GPIO110__FUNC_GPIO110 (MTK_PIN_NO(110) | 0)
+#define PINMUX_GPIO110__FUNC_JTDI (MTK_PIN_NO(110) | 1)
+#define PINMUX_GPIO110__FUNC_MFG_JTAG_TDI (MTK_PIN_NO(110) | 2)
+#define PINMUX_GPIO110__FUNC_TDD_TDI (MTK_PIN_NO(110) | 3)
+#define PINMUX_GPIO110__FUNC_LTE_MD32_JTAG_TDI (MTK_PIN_NO(110) | 4)
+#define PINMUX_GPIO110__FUNC_AP_MD32_JTAG_TDI (MTK_PIN_NO(110) | 5)
+#define PINMUX_GPIO110__FUNC_DFD_TDI (MTK_PIN_NO(110) | 6)
+
+#define PINMUX_GPIO111__FUNC_GPIO111 (MTK_PIN_NO(111) | 0)
+#define PINMUX_GPIO111__FUNC_JTDO (MTK_PIN_NO(111) | 1)
+#define PINMUX_GPIO111__FUNC_MFG_JTAG_TDO (MTK_PIN_NO(111) | 2)
+#define PINMUX_GPIO111__FUNC_TDD_TDO (MTK_PIN_NO(111) | 3)
+#define PINMUX_GPIO111__FUNC_LTE_MD32_JTAG_TDO (MTK_PIN_NO(111) | 4)
+#define PINMUX_GPIO111__FUNC_AP_MD32_JTAG_TDO (MTK_PIN_NO(111) | 5)
+#define PINMUX_GPIO111__FUNC_DFD_TDO (MTK_PIN_NO(111) | 6)
+
+#define PINMUX_GPIO112__FUNC_GPIO112 (MTK_PIN_NO(112) | 0)
+#define PINMUX_GPIO112__FUNC_JTRST_B (MTK_PIN_NO(112) | 1)
+#define PINMUX_GPIO112__FUNC_MFG_JTAG_TRSTN (MTK_PIN_NO(112) | 2)
+#define PINMUX_GPIO112__FUNC_TDD_TRSTN (MTK_PIN_NO(112) | 3)
+#define PINMUX_GPIO112__FUNC_LTE_MD32_JTAG_TRST (MTK_PIN_NO(112) | 4)
+#define PINMUX_GPIO112__FUNC_AP_MD32_JTAG_TRST (MTK_PIN_NO(112) | 5)
+#define PINMUX_GPIO112__FUNC_DFD_NTRST (MTK_PIN_NO(112) | 6)
+
+#define PINMUX_GPIO113__FUNC_GPIO113 (MTK_PIN_NO(113) | 0)
+#define PINMUX_GPIO113__FUNC_URXD0 (MTK_PIN_NO(113) | 1)
+#define PINMUX_GPIO113__FUNC_UTXD0 (MTK_PIN_NO(113) | 2)
+#define PINMUX_GPIO113__FUNC_MD_URXD (MTK_PIN_NO(113) | 3)
+#define PINMUX_GPIO113__FUNC_LTE_URXD (MTK_PIN_NO(113) | 4)
+#define PINMUX_GPIO113__FUNC_TDD_TXD (MTK_PIN_NO(113) | 5)
+#define PINMUX_GPIO113__FUNC_I2S2_WS (MTK_PIN_NO(113) | 6)
+
+#define PINMUX_GPIO114__FUNC_GPIO114 (MTK_PIN_NO(114) | 0)
+#define PINMUX_GPIO114__FUNC_UTXD0 (MTK_PIN_NO(114) | 1)
+#define PINMUX_GPIO114__FUNC_URXD0 (MTK_PIN_NO(114) | 2)
+#define PINMUX_GPIO114__FUNC_MD_UTXD (MTK_PIN_NO(114) | 3)
+#define PINMUX_GPIO114__FUNC_LTE_UTXD (MTK_PIN_NO(114) | 4)
+#define PINMUX_GPIO114__FUNC_TDD_TXD (MTK_PIN_NO(114) | 5)
+#define PINMUX_GPIO114__FUNC_I2S2_BCK (MTK_PIN_NO(114) | 6)
+
+#define PINMUX_GPIO115__FUNC_GPIO115 (MTK_PIN_NO(115) | 0)
+#define PINMUX_GPIO115__FUNC_URTS0 (MTK_PIN_NO(115) | 1)
+#define PINMUX_GPIO115__FUNC_UCTS0 (MTK_PIN_NO(115) | 2)
+#define PINMUX_GPIO115__FUNC_MD_URXD (MTK_PIN_NO(115) | 3)
+#define PINMUX_GPIO115__FUNC_LTE_URXD (MTK_PIN_NO(115) | 4)
+#define PINMUX_GPIO115__FUNC_TDD_TXD (MTK_PIN_NO(115) | 5)
+#define PINMUX_GPIO115__FUNC_I2S2_MCK (MTK_PIN_NO(115) | 6)
+
+#define PINMUX_GPIO116__FUNC_GPIO116 (MTK_PIN_NO(116) | 0)
+#define PINMUX_GPIO116__FUNC_UCTS0 (MTK_PIN_NO(116) | 1)
+#define PINMUX_GPIO116__FUNC_URTS0 (MTK_PIN_NO(116) | 2)
+#define PINMUX_GPIO116__FUNC_MD_UTXD (MTK_PIN_NO(116) | 3)
+#define PINMUX_GPIO116__FUNC_LTE_UTXD (MTK_PIN_NO(116) | 4)
+#define PINMUX_GPIO116__FUNC_TDD_TXD (MTK_PIN_NO(116) | 5)
+#define PINMUX_GPIO116__FUNC_I2S2_DI_1 (MTK_PIN_NO(116) | 6)
+
+#define PINMUX_GPIO117__FUNC_GPIO117 (MTK_PIN_NO(117) | 0)
+#define PINMUX_GPIO117__FUNC_URXD3 (MTK_PIN_NO(117) | 1)
+#define PINMUX_GPIO117__FUNC_UTXD3 (MTK_PIN_NO(117) | 2)
+#define PINMUX_GPIO117__FUNC_MD_URXD (MTK_PIN_NO(117) | 3)
+#define PINMUX_GPIO117__FUNC_LTE_URXD (MTK_PIN_NO(117) | 4)
+#define PINMUX_GPIO117__FUNC_TDD_TXD (MTK_PIN_NO(117) | 5)
+
+#define PINMUX_GPIO118__FUNC_GPIO118 (MTK_PIN_NO(118) | 0)
+#define PINMUX_GPIO118__FUNC_UTXD3 (MTK_PIN_NO(118) | 1)
+#define PINMUX_GPIO118__FUNC_URXD3 (MTK_PIN_NO(118) | 2)
+#define PINMUX_GPIO118__FUNC_MD_UTXD (MTK_PIN_NO(118) | 3)
+#define PINMUX_GPIO118__FUNC_LTE_UTXD (MTK_PIN_NO(118) | 4)
+#define PINMUX_GPIO118__FUNC_TDD_TXD (MTK_PIN_NO(118) | 5)
+
+#define PINMUX_GPIO119__FUNC_GPIO119 (MTK_PIN_NO(119) | 0)
+#define PINMUX_GPIO119__FUNC_KROW0 (MTK_PIN_NO(119) | 1)
+
+#define PINMUX_GPIO120__FUNC_GPIO120 (MTK_PIN_NO(120) | 0)
+#define PINMUX_GPIO120__FUNC_KROW1 (MTK_PIN_NO(120) | 1)
+#define PINMUX_GPIO120__FUNC_PWM6 (MTK_PIN_NO(120) | 3)
+
+#define PINMUX_GPIO121__FUNC_GPIO121 (MTK_PIN_NO(121) | 0)
+#define PINMUX_GPIO121__FUNC_KROW2 (MTK_PIN_NO(121) | 1)
+#define PINMUX_GPIO121__FUNC_IRDA_PDN (MTK_PIN_NO(121) | 2)
+#define PINMUX_GPIO121__FUNC_I2S1_DO_1 (MTK_PIN_NO(121) | 3)
+#define PINMUX_GPIO121__FUNC_USB_DRVVBUS (MTK_PIN_NO(121) | 4)
+#define PINMUX_GPIO121__FUNC_SPI_CK_2 (MTK_PIN_NO(121) | 5)
+#define PINMUX_GPIO121__FUNC_PWM4 (MTK_PIN_NO(121) | 6)
+
+#define PINMUX_GPIO122__FUNC_GPIO122 (MTK_PIN_NO(122) | 0)
+#define PINMUX_GPIO122__FUNC_KCOL0 (MTK_PIN_NO(122) | 1)
+
+#define PINMUX_GPIO123__FUNC_GPIO123 (MTK_PIN_NO(123) | 0)
+#define PINMUX_GPIO123__FUNC_KCOL1 (MTK_PIN_NO(123) | 1)
+#define PINMUX_GPIO123__FUNC_IRDA_RXD (MTK_PIN_NO(123) | 2)
+#define PINMUX_GPIO123__FUNC_I2S2_DI_2 (MTK_PIN_NO(123) | 3)
+#define PINMUX_GPIO123__FUNC_PWM5 (MTK_PIN_NO(123) | 4)
+
+#define PINMUX_GPIO124__FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
+#define PINMUX_GPIO124__FUNC_KCOL2 (MTK_PIN_NO(124) | 1)
+#define PINMUX_GPIO124__FUNC_IRDA_TXD (MTK_PIN_NO(124) | 2)
+#define PINMUX_GPIO124__FUNC_I2S1_DO_2 (MTK_PIN_NO(124) | 3)
+#define PINMUX_GPIO124__FUNC_USB_DRVVBUS (MTK_PIN_NO(124) | 4)
+#define PINMUX_GPIO124__FUNC_SPI_MI_2 (MTK_PIN_NO(124) | 5)
+#define PINMUX_GPIO124__FUNC_PWM3 (MTK_PIN_NO(124) | 6)
+
+#define PINMUX_GPIO125__FUNC_GPIO125 (MTK_PIN_NO(125) | 0)
+#define PINMUX_GPIO125__FUNC_SDA1 (MTK_PIN_NO(125) | 1)
+
+#define PINMUX_GPIO126__FUNC_GPIO126 (MTK_PIN_NO(126) | 0)
+#define PINMUX_GPIO126__FUNC_SCL1 (MTK_PIN_NO(126) | 1)
+
+#define PINMUX_GPIO127__FUNC_GPIO127 (MTK_PIN_NO(127) | 0)
+#define PINMUX_GPIO127__FUNC_MD_EINT1 (MTK_PIN_NO(127) | 1)
+#define PINMUX_GPIO127__FUNC_DISP_PWM1 (MTK_PIN_NO(127) | 2)
+#define PINMUX_GPIO127__FUNC_SPI_MO_2 (MTK_PIN_NO(127) | 3)
+
+#define PINMUX_GPIO128__FUNC_GPIO128 (MTK_PIN_NO(128) | 0)
+#define PINMUX_GPIO128__FUNC_MD_EINT2 (MTK_PIN_NO(128) | 1)
+#define PINMUX_GPIO128__FUNC_DSI1_TE (MTK_PIN_NO(128) | 2)
+#define PINMUX_GPIO128__FUNC_SPI_CS_2 (MTK_PIN_NO(128) | 3)
+
+#define PINMUX_GPIO129__FUNC_GPIO129 (MTK_PIN_NO(129) | 0)
+#define PINMUX_GPIO129__FUNC_I2S3_WS (MTK_PIN_NO(129) | 1)
+#define PINMUX_GPIO129__FUNC_I2S2_WS (MTK_PIN_NO(129) | 2)
+#define PINMUX_GPIO129__FUNC_PWM0 (MTK_PIN_NO(129) | 3)
+
+#define PINMUX_GPIO130__FUNC_GPIO130 (MTK_PIN_NO(130) | 0)
+#define PINMUX_GPIO130__FUNC_I2S3_BCK (MTK_PIN_NO(130) | 1)
+#define PINMUX_GPIO130__FUNC_I2S2_BCK (MTK_PIN_NO(130) | 2)
+#define PINMUX_GPIO130__FUNC_PWM1 (MTK_PIN_NO(130) | 3)
+
+#define PINMUX_GPIO131__FUNC_GPIO131 (MTK_PIN_NO(131) | 0)
+#define PINMUX_GPIO131__FUNC_I2S3_MCK (MTK_PIN_NO(131) | 1)
+#define PINMUX_GPIO131__FUNC_I2S2_MCK (MTK_PIN_NO(131) | 2)
+#define PINMUX_GPIO131__FUNC_PWM2 (MTK_PIN_NO(131) | 3)
+
+#define PINMUX_GPIO132__FUNC_GPIO132 (MTK_PIN_NO(132) | 0)
+#define PINMUX_GPIO132__FUNC_I2S3_DO_1 (MTK_PIN_NO(132) | 1)
+#define PINMUX_GPIO132__FUNC_I2S2_DI_1 (MTK_PIN_NO(132) | 2)
+#define PINMUX_GPIO132__FUNC_PWM3 (MTK_PIN_NO(132) | 3)
+
+#define PINMUX_GPIO133__FUNC_GPIO133 (MTK_PIN_NO(133) | 0)
+#define PINMUX_GPIO133__FUNC_I2S3_DO_2 (MTK_PIN_NO(133) | 1)
+#define PINMUX_GPIO133__FUNC_I2S2_DI_2 (MTK_PIN_NO(133) | 2)
+#define PINMUX_GPIO133__FUNC_PWM4 (MTK_PIN_NO(133) | 3)
+
+#define PINMUX_GPIO134__FUNC_GPIO134 (MTK_PIN_NO(134) | 0)
+#define PINMUX_GPIO134__FUNC_I2S3_DO_3 (MTK_PIN_NO(134) | 1)
+#define PINMUX_GPIO134__FUNC_DISP_PWM1 (MTK_PIN_NO(134) | 2)
+#define PINMUX_GPIO134__FUNC_I2S1_DO_1 (MTK_PIN_NO(134) | 3)
+#define PINMUX_GPIO134__FUNC_PWM5 (MTK_PIN_NO(134) | 4)
+
+#define PINMUX_GPIO135__FUNC_GPIO135 (MTK_PIN_NO(135) | 0)
+#define PINMUX_GPIO135__FUNC_I2S3_DO_4 (MTK_PIN_NO(135) | 1)
+#define PINMUX_GPIO135__FUNC_DSI1_TE (MTK_PIN_NO(135) | 2)
+#define PINMUX_GPIO135__FUNC_I2S1_DO_2 (MTK_PIN_NO(135) | 3)
+#define PINMUX_GPIO135__FUNC_PWM6 (MTK_PIN_NO(135) | 4)
+
+#define PINMUX_GPIO136__FUNC_GPIO136 (MTK_PIN_NO(136) | 0)
+#define PINMUX_GPIO136__FUNC_SDA3 (MTK_PIN_NO(136) | 1)
+
+#define PINMUX_GPIO137__FUNC_GPIO137 (MTK_PIN_NO(137) | 0)
+#define PINMUX_GPIO137__FUNC_SCL3 (MTK_PIN_NO(137) | 1)
+
+#define PINMUX_GPIO138__FUNC_GPIO138 (MTK_PIN_NO(138) | 0)
+#define PINMUX_GPIO138__FUNC_DPI_CK (MTK_PIN_NO(138) | 1)
+#define PINMUX_GPIO138__FUNC_NLD6 (MTK_PIN_NO(138) | 2)
+#define PINMUX_GPIO138__FUNC_UTXD0 (MTK_PIN_NO(138) | 3)
+#define PINMUX_GPIO138__FUNC_USB_DRVVBUS (MTK_PIN_NO(138) | 4)
+#define PINMUX_GPIO138__FUNC_IRDA_PDN (MTK_PIN_NO(138) | 5)
+
+#define PINMUX_GPIO139__FUNC_GPIO139 (MTK_PIN_NO(139) | 0)
+#define PINMUX_GPIO139__FUNC_DPI_DE (MTK_PIN_NO(139) | 1)
+#define PINMUX_GPIO139__FUNC_NLD7 (MTK_PIN_NO(139) | 2)
+#define PINMUX_GPIO139__FUNC_URXD0 (MTK_PIN_NO(139) | 3)
+#define PINMUX_GPIO139__FUNC_MD_UTXD (MTK_PIN_NO(139) | 4)
+#define PINMUX_GPIO139__FUNC_IRDA_RXD (MTK_PIN_NO(139) | 5)
+
+#define PINMUX_GPIO140__FUNC_GPIO140 (MTK_PIN_NO(140) | 0)
+#define PINMUX_GPIO140__FUNC_DPI_D0 (MTK_PIN_NO(140) | 1)
+#define PINMUX_GPIO140__FUNC_NREB (MTK_PIN_NO(140) | 2)
+#define PINMUX_GPIO140__FUNC_UCTS0 (MTK_PIN_NO(140) | 3)
+#define PINMUX_GPIO140__FUNC_MD_URXD (MTK_PIN_NO(140) | 4)
+#define PINMUX_GPIO140__FUNC_IRDA_TXD (MTK_PIN_NO(140) | 5)
+
+#define PINMUX_GPIO141__FUNC_GPIO141 (MTK_PIN_NO(141) | 0)
+#define PINMUX_GPIO141__FUNC_DPI_D1 (MTK_PIN_NO(141) | 1)
+#define PINMUX_GPIO141__FUNC_NRNB0 (MTK_PIN_NO(141) | 2)
+#define PINMUX_GPIO141__FUNC_URTS0 (MTK_PIN_NO(141) | 3)
+#define PINMUX_GPIO141__FUNC_LTE_UTXD (MTK_PIN_NO(141) | 4)
+#define PINMUX_GPIO141__FUNC_I2S2_WS (MTK_PIN_NO(141) | 5)
+
+#define PINMUX_GPIO142__FUNC_GPIO142 (MTK_PIN_NO(142) | 0)
+#define PINMUX_GPIO142__FUNC_DPI_D2 (MTK_PIN_NO(142) | 1)
+#define PINMUX_GPIO142__FUNC_NWEB (MTK_PIN_NO(142) | 2)
+#define PINMUX_GPIO142__FUNC_UTXD1 (MTK_PIN_NO(142) | 3)
+#define PINMUX_GPIO142__FUNC_LTE_URXD (MTK_PIN_NO(142) | 4)
+#define PINMUX_GPIO142__FUNC_I2S2_BCK (MTK_PIN_NO(142) | 5)
+
+#define PINMUX_GPIO143__FUNC_GPIO143 (MTK_PIN_NO(143) | 0)
+#define PINMUX_GPIO143__FUNC_DPI_D3 (MTK_PIN_NO(143) | 1)
+#define PINMUX_GPIO143__FUNC_NCEB0 (MTK_PIN_NO(143) | 2)
+#define PINMUX_GPIO143__FUNC_URXD1 (MTK_PIN_NO(143) | 3)
+#define PINMUX_GPIO143__FUNC_TDD_TXD (MTK_PIN_NO(143) | 4)
+#define PINMUX_GPIO143__FUNC_I2S2_MCK (MTK_PIN_NO(143) | 5)
+
+#define PINMUX_GPIO144__FUNC_GPIO144 (MTK_PIN_NO(144) | 0)
+#define PINMUX_GPIO144__FUNC_DPI_D4 (MTK_PIN_NO(144) | 1)
+#define PINMUX_GPIO144__FUNC_NALE (MTK_PIN_NO(144) | 2)
+#define PINMUX_GPIO144__FUNC_UCTS1 (MTK_PIN_NO(144) | 3)
+#define PINMUX_GPIO144__FUNC_TDD_TMS (MTK_PIN_NO(144) | 4)
+#define PINMUX_GPIO144__FUNC_I2S2_DI_1 (MTK_PIN_NO(144) | 5)
+
+#define PINMUX_GPIO145__FUNC_GPIO145 (MTK_PIN_NO(145) | 0)
+#define PINMUX_GPIO145__FUNC_DPI_D5 (MTK_PIN_NO(145) | 1)
+#define PINMUX_GPIO145__FUNC_NCLE (MTK_PIN_NO(145) | 2)
+#define PINMUX_GPIO145__FUNC_URTS1 (MTK_PIN_NO(145) | 3)
+#define PINMUX_GPIO145__FUNC_TDD_TCK (MTK_PIN_NO(145) | 4)
+#define PINMUX_GPIO145__FUNC_I2S2_DI_2 (MTK_PIN_NO(145) | 5)
+
+#define PINMUX_GPIO146__FUNC_GPIO146 (MTK_PIN_NO(146) | 0)
+#define PINMUX_GPIO146__FUNC_DPI_D6 (MTK_PIN_NO(146) | 1)
+#define PINMUX_GPIO146__FUNC_NLD8 (MTK_PIN_NO(146) | 2)
+#define PINMUX_GPIO146__FUNC_UTXD2 (MTK_PIN_NO(146) | 3)
+#define PINMUX_GPIO146__FUNC_TDD_TDI (MTK_PIN_NO(146) | 4)
+
+#define PINMUX_GPIO147__FUNC_GPIO147 (MTK_PIN_NO(147) | 0)
+#define PINMUX_GPIO147__FUNC_DPI_D7 (MTK_PIN_NO(147) | 1)
+#define PINMUX_GPIO147__FUNC_NLD9 (MTK_PIN_NO(147) | 2)
+#define PINMUX_GPIO147__FUNC_URXD2 (MTK_PIN_NO(147) | 3)
+#define PINMUX_GPIO147__FUNC_TDD_TDO (MTK_PIN_NO(147) | 4)
+#define PINMUX_GPIO147__FUNC_I2S1_WS (MTK_PIN_NO(147) | 5)
+
+#define PINMUX_GPIO148__FUNC_GPIO148 (MTK_PIN_NO(148) | 0)
+#define PINMUX_GPIO148__FUNC_DPI_D8 (MTK_PIN_NO(148) | 1)
+#define PINMUX_GPIO148__FUNC_NLD10 (MTK_PIN_NO(148) | 2)
+#define PINMUX_GPIO148__FUNC_UCTS2 (MTK_PIN_NO(148) | 3)
+#define PINMUX_GPIO148__FUNC_TDD_TRSTN (MTK_PIN_NO(148) | 4)
+#define PINMUX_GPIO148__FUNC_I2S1_BCK (MTK_PIN_NO(148) | 5)
+
+#define PINMUX_GPIO149__FUNC_GPIO149 (MTK_PIN_NO(149) | 0)
+#define PINMUX_GPIO149__FUNC_DPI_D9 (MTK_PIN_NO(149) | 1)
+#define PINMUX_GPIO149__FUNC_NLD11 (MTK_PIN_NO(149) | 2)
+#define PINMUX_GPIO149__FUNC_URTS2 (MTK_PIN_NO(149) | 3)
+#define PINMUX_GPIO149__FUNC_LTE_MD32_JTAG_TMS (MTK_PIN_NO(149) | 4)
+#define PINMUX_GPIO149__FUNC_I2S1_MCK (MTK_PIN_NO(149) | 5)
+
+#define PINMUX_GPIO150__FUNC_GPIO150 (MTK_PIN_NO(150) | 0)
+#define PINMUX_GPIO150__FUNC_DPI_D10 (MTK_PIN_NO(150) | 1)
+#define PINMUX_GPIO150__FUNC_NLD12 (MTK_PIN_NO(150) | 2)
+#define PINMUX_GPIO150__FUNC_UTXD3 (MTK_PIN_NO(150) | 3)
+#define PINMUX_GPIO150__FUNC_LTE_MD32_JTAG_TCK (MTK_PIN_NO(150) | 4)
+#define PINMUX_GPIO150__FUNC_I2S1_DO_1 (MTK_PIN_NO(150) | 5)
+
+#define PINMUX_GPIO151__FUNC_GPIO151 (MTK_PIN_NO(151) | 0)
+#define PINMUX_GPIO151__FUNC_DPI_D11 (MTK_PIN_NO(151) | 1)
+#define PINMUX_GPIO151__FUNC_NLD13 (MTK_PIN_NO(151) | 2)
+#define PINMUX_GPIO151__FUNC_URXD3 (MTK_PIN_NO(151) | 3)
+#define PINMUX_GPIO151__FUNC_LTE_MD32_JTAG_TDI (MTK_PIN_NO(151) | 4)
+#define PINMUX_GPIO151__FUNC_I2S1_DO_2 (MTK_PIN_NO(151) | 5)
+
+#define PINMUX_GPIO152__FUNC_GPIO152 (MTK_PIN_NO(152) | 0)
+#define PINMUX_GPIO152__FUNC_DPI_HSYNC (MTK_PIN_NO(152) | 1)
+#define PINMUX_GPIO152__FUNC_NLD14 (MTK_PIN_NO(152) | 2)
+#define PINMUX_GPIO152__FUNC_UCTS3 (MTK_PIN_NO(152) | 3)
+#define PINMUX_GPIO152__FUNC_LTE_MD32_JTAG_TDO (MTK_PIN_NO(152) | 4)
+#define PINMUX_GPIO152__FUNC_DSI1_TE (MTK_PIN_NO(152) | 5)
+
+#define PINMUX_GPIO153__FUNC_GPIO153 (MTK_PIN_NO(153) | 0)
+#define PINMUX_GPIO153__FUNC_DPI_VSYNC (MTK_PIN_NO(153) | 1)
+#define PINMUX_GPIO153__FUNC_NLD15 (MTK_PIN_NO(153) | 2)
+#define PINMUX_GPIO153__FUNC_URTS3 (MTK_PIN_NO(153) | 3)
+#define PINMUX_GPIO153__FUNC_LTE_MD32_JTAG_TRST (MTK_PIN_NO(153) | 4)
+#define PINMUX_GPIO153__FUNC_DISP_PWM1 (MTK_PIN_NO(153) | 5)
+
+#define PINMUX_GPIO154__FUNC_GPIO154 (MTK_PIN_NO(154) | 0)
+#define PINMUX_GPIO154__FUNC_MSDC0_DAT0 (MTK_PIN_NO(154) | 1)
+#define PINMUX_GPIO154__FUNC_NLD8 (MTK_PIN_NO(154) | 2)
+
+#define PINMUX_GPIO155__FUNC_GPIO155 (MTK_PIN_NO(155) | 0)
+#define PINMUX_GPIO155__FUNC_MSDC0_DAT1 (MTK_PIN_NO(155) | 1)
+#define PINMUX_GPIO155__FUNC_NLD9 (MTK_PIN_NO(155) | 2)
+
+#define PINMUX_GPIO156__FUNC_GPIO156 (MTK_PIN_NO(156) | 0)
+#define PINMUX_GPIO156__FUNC_MSDC0_DAT2 (MTK_PIN_NO(156) | 1)
+#define PINMUX_GPIO156__FUNC_NLD10 (MTK_PIN_NO(156) | 2)
+
+#define PINMUX_GPIO157__FUNC_GPIO157 (MTK_PIN_NO(157) | 0)
+#define PINMUX_GPIO157__FUNC_MSDC0_DAT3 (MTK_PIN_NO(157) | 1)
+#define PINMUX_GPIO157__FUNC_NLD11 (MTK_PIN_NO(157) | 2)
+
+#define PINMUX_GPIO158__FUNC_GPIO158 (MTK_PIN_NO(158) | 0)
+#define PINMUX_GPIO158__FUNC_MSDC0_DAT4 (MTK_PIN_NO(158) | 1)
+#define PINMUX_GPIO158__FUNC_NLD12 (MTK_PIN_NO(158) | 2)
+
+#define PINMUX_GPIO159__FUNC_GPIO159 (MTK_PIN_NO(159) | 0)
+#define PINMUX_GPIO159__FUNC_MSDC0_DAT5 (MTK_PIN_NO(159) | 1)
+#define PINMUX_GPIO159__FUNC_NLD13 (MTK_PIN_NO(159) | 2)
+
+#define PINMUX_GPIO160__FUNC_GPIO160 (MTK_PIN_NO(160) | 0)
+#define PINMUX_GPIO160__FUNC_MSDC0_DAT6 (MTK_PIN_NO(160) | 1)
+#define PINMUX_GPIO160__FUNC_NLD14 (MTK_PIN_NO(160) | 2)
+
+#define PINMUX_GPIO161__FUNC_GPIO161 (MTK_PIN_NO(161) | 0)
+#define PINMUX_GPIO161__FUNC_MSDC0_DAT7 (MTK_PIN_NO(161) | 1)
+#define PINMUX_GPIO161__FUNC_NLD15 (MTK_PIN_NO(161) | 2)
+
+#define PINMUX_GPIO162__FUNC_GPIO162 (MTK_PIN_NO(162) | 0)
+#define PINMUX_GPIO162__FUNC_MSDC0_CMD (MTK_PIN_NO(162) | 1)
+
+#define PINMUX_GPIO163__FUNC_GPIO163 (MTK_PIN_NO(163) | 0)
+#define PINMUX_GPIO163__FUNC_MSDC0_CLK (MTK_PIN_NO(163) | 1)
+
+#define PINMUX_GPIO164__FUNC_GPIO164 (MTK_PIN_NO(164) | 0)
+#define PINMUX_GPIO164__FUNC_MSDC0_DSL (MTK_PIN_NO(164) | 1)
+
+#define PINMUX_GPIO165__FUNC_GPIO165 (MTK_PIN_NO(165) | 0)
+#define PINMUX_GPIO165__FUNC_MSDC0_RSTB (MTK_PIN_NO(165) | 1)
+
+#define PINMUX_GPIO166__FUNC_GPIO166 (MTK_PIN_NO(166) | 0)
+#define PINMUX_GPIO166__FUNC_SPI_CK_0 (MTK_PIN_NO(166) | 1)
+#define PINMUX_GPIO166__FUNC_PWM0 (MTK_PIN_NO(166) | 3)
+
+#define PINMUX_GPIO167__FUNC_GPIO167 (MTK_PIN_NO(167) | 0)
+#define PINMUX_GPIO167__FUNC_SPI_MI_0 (MTK_PIN_NO(167) | 1)
+#define PINMUX_GPIO167__FUNC_PWM1 (MTK_PIN_NO(167) | 3)
+#define PINMUX_GPIO167__FUNC_SPI_MO_0 (MTK_PIN_NO(167) | 4)
+
+#define PINMUX_GPIO168__FUNC_GPIO168 (MTK_PIN_NO(168) | 0)
+#define PINMUX_GPIO168__FUNC_SPI_MO_0 (MTK_PIN_NO(168) | 1)
+#define PINMUX_GPIO168__FUNC_MD_EINT3 (MTK_PIN_NO(168) | 2)
+#define PINMUX_GPIO168__FUNC_PWM2 (MTK_PIN_NO(168) | 3)
+#define PINMUX_GPIO168__FUNC_SPI_MI_0 (MTK_PIN_NO(168) | 4)
+
+#define PINMUX_GPIO169__FUNC_GPIO169 (MTK_PIN_NO(169) | 0)
+#define PINMUX_GPIO169__FUNC_SPI_CS_0 (MTK_PIN_NO(169) | 1)
+#define PINMUX_GPIO169__FUNC_MD_EINT4 (MTK_PIN_NO(169) | 2)
+#define PINMUX_GPIO169__FUNC_PWM3 (MTK_PIN_NO(169) | 3)
+
+#define PINMUX_GPIO170__FUNC_GPIO170 (MTK_PIN_NO(170) | 0)
+#define PINMUX_GPIO170__FUNC_MSDC1_CMD (MTK_PIN_NO(170) | 1)
+
+#define PINMUX_GPIO171__FUNC_GPIO171 (MTK_PIN_NO(171) | 0)
+#define PINMUX_GPIO171__FUNC_MSDC1_DAT0 (MTK_PIN_NO(171) | 1)
+
+#define PINMUX_GPIO172__FUNC_GPIO172 (MTK_PIN_NO(172) | 0)
+#define PINMUX_GPIO172__FUNC_MSDC1_DAT1 (MTK_PIN_NO(172) | 1)
+
+#define PINMUX_GPIO173__FUNC_GPIO173 (MTK_PIN_NO(173) | 0)
+#define PINMUX_GPIO173__FUNC_MSDC1_DAT2 (MTK_PIN_NO(173) | 1)
+
+#define PINMUX_GPIO174__FUNC_GPIO174 (MTK_PIN_NO(174) | 0)
+#define PINMUX_GPIO174__FUNC_MSDC1_DAT3 (MTK_PIN_NO(174) | 1)
+
+#define PINMUX_GPIO175__FUNC_GPIO175 (MTK_PIN_NO(175) | 0)
+#define PINMUX_GPIO175__FUNC_MSDC1_CLK (MTK_PIN_NO(175) | 1)
+
+#define PINMUX_GPIO176__FUNC_GPIO176 (MTK_PIN_NO(176) | 0)
+#define PINMUX_GPIO176__FUNC_PWRAP_SPIMI (MTK_PIN_NO(176) | 1)
+#define PINMUX_GPIO176__FUNC_PWRAP_SPIMO (MTK_PIN_NO(176) | 2)
+
+#define PINMUX_GPIO177__FUNC_GPIO177 (MTK_PIN_NO(177) | 0)
+#define PINMUX_GPIO177__FUNC_PWRAP_SPIMO (MTK_PIN_NO(177) | 1)
+#define PINMUX_GPIO177__FUNC_PWRAP_SPIMI (MTK_PIN_NO(177) | 2)
+
+#define PINMUX_GPIO178__FUNC_GPIO178 (MTK_PIN_NO(178) | 0)
+#define PINMUX_GPIO178__FUNC_PWRAP_SPICK (MTK_PIN_NO(178) | 1)
+
+#define PINMUX_GPIO179__FUNC_GPIO179 (MTK_PIN_NO(179) | 0)
+#define PINMUX_GPIO179__FUNC_PWRAP_SPICS (MTK_PIN_NO(179) | 1)
+
+#define PINMUX_GPIO180__FUNC_GPIO180 (MTK_PIN_NO(180) | 0)
+#define PINMUX_GPIO180__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(180) | 1)
+#define PINMUX_GPIO180__FUNC_I2S1_WS (MTK_PIN_NO(180) | 2)
+#define PINMUX_GPIO180__FUNC_I2S2_WS (MTK_PIN_NO(180) | 3)
+#define PINMUX_GPIO180__FUNC_I2S0_WS (MTK_PIN_NO(180) | 4)
+
+#define PINMUX_GPIO181__FUNC_GPIO181 (MTK_PIN_NO(181) | 0)
+#define PINMUX_GPIO181__FUNC_AUD_DAT_MISO_1 (MTK_PIN_NO(181) | 1)
+#define PINMUX_GPIO181__FUNC_I2S1_BCK (MTK_PIN_NO(181) | 2)
+#define PINMUX_GPIO181__FUNC_I2S2_BCK (MTK_PIN_NO(181) | 3)
+#define PINMUX_GPIO181__FUNC_I2S0_BCK (MTK_PIN_NO(181) | 4)
+
+#define PINMUX_GPIO182__FUNC_GPIO182 (MTK_PIN_NO(182) | 0)
+#define PINMUX_GPIO182__FUNC_AUD_DAT_MOSI_1 (MTK_PIN_NO(182) | 1)
+#define PINMUX_GPIO182__FUNC_I2S1_MCK (MTK_PIN_NO(182) | 2)
+#define PINMUX_GPIO182__FUNC_I2S2_MCK (MTK_PIN_NO(182) | 3)
+#define PINMUX_GPIO182__FUNC_I2S0_MCK (MTK_PIN_NO(182) | 4)
+
+#define PINMUX_GPIO183__FUNC_GPIO183 (MTK_PIN_NO(183) | 0)
+#define PINMUX_GPIO183__FUNC_AUD_DAT_MISO_2 (MTK_PIN_NO(183) | 1)
+#define PINMUX_GPIO183__FUNC_I2S1_DO_1 (MTK_PIN_NO(183) | 2)
+#define PINMUX_GPIO183__FUNC_I2S2_DI_1 (MTK_PIN_NO(183) | 3)
+#define PINMUX_GPIO183__FUNC_I2S0_DO (MTK_PIN_NO(183) | 4)
+
+#define PINMUX_GPIO184__FUNC_GPIO184 (MTK_PIN_NO(184) | 0)
+#define PINMUX_GPIO184__FUNC_AUD_DAT_MOSI_2 (MTK_PIN_NO(184) | 1)
+#define PINMUX_GPIO184__FUNC_I2S1_DO_2 (MTK_PIN_NO(184) | 2)
+#define PINMUX_GPIO184__FUNC_I2S2_DI_2 (MTK_PIN_NO(184) | 3)
+#define PINMUX_GPIO184__FUNC_I2S0_DI (MTK_PIN_NO(184) | 4)
+
+#define PINMUX_GPIO185__FUNC_GPIO185 (MTK_PIN_NO(185) | 0)
+#define PINMUX_GPIO185__FUNC_RTC32K_CK (MTK_PIN_NO(185) | 1)
+
+#define PINMUX_GPIO186__FUNC_GPIO186 (MTK_PIN_NO(186) | 0)
+#define PINMUX_GPIO186__FUNC_DISP_PWM0 (MTK_PIN_NO(186) | 1)
+#define PINMUX_GPIO186__FUNC_DISP_PWM1 (MTK_PIN_NO(186) | 2)
+
+#define PINMUX_GPIO187__FUNC_GPIO187 (MTK_PIN_NO(187) | 0)
+#define PINMUX_GPIO187__FUNC_SRCLKENAI (MTK_PIN_NO(187) | 1)
+
+#define PINMUX_GPIO188__FUNC_GPIO188 (MTK_PIN_NO(188) | 0)
+#define PINMUX_GPIO188__FUNC_SRCLKENAI2 (MTK_PIN_NO(188) | 1)
+
+#define PINMUX_GPIO189__FUNC_GPIO189 (MTK_PIN_NO(189) | 0)
+#define PINMUX_GPIO189__FUNC_SRCLKENA0 (MTK_PIN_NO(189) | 1)
+
+#define PINMUX_GPIO190__FUNC_GPIO190 (MTK_PIN_NO(190) | 0)
+#define PINMUX_GPIO190__FUNC_SRCLKENA1 (MTK_PIN_NO(190) | 1)
+
+#define PINMUX_GPIO191__FUNC_GPIO191 (MTK_PIN_NO(191) | 0)
+#define PINMUX_GPIO191__FUNC_WATCHDOG_AO (MTK_PIN_NO(191) | 1)
+
+#define PINMUX_GPIO192__FUNC_GPIO192 (MTK_PIN_NO(192) | 0)
+#define PINMUX_GPIO192__FUNC_I2S0_WS (MTK_PIN_NO(192) | 1)
+#define PINMUX_GPIO192__FUNC_I2S1_WS (MTK_PIN_NO(192) | 2)
+#define PINMUX_GPIO192__FUNC_I2S2_WS (MTK_PIN_NO(192) | 3)
+#define PINMUX_GPIO192__FUNC_NCEB1 (MTK_PIN_NO(192) | 4)
+
+#define PINMUX_GPIO193__FUNC_GPIO193 (MTK_PIN_NO(193) | 0)
+#define PINMUX_GPIO193__FUNC_I2S0_BCK (MTK_PIN_NO(193) | 1)
+#define PINMUX_GPIO193__FUNC_I2S1_BCK (MTK_PIN_NO(193) | 2)
+#define PINMUX_GPIO193__FUNC_I2S2_BCK (MTK_PIN_NO(193) | 3)
+#define PINMUX_GPIO193__FUNC_NRNB1 (MTK_PIN_NO(193) | 4)
+
+#define PINMUX_GPIO194__FUNC_GPIO194 (MTK_PIN_NO(194) | 0)
+#define PINMUX_GPIO194__FUNC_I2S0_MCK (MTK_PIN_NO(194) | 1)
+#define PINMUX_GPIO194__FUNC_I2S1_MCK (MTK_PIN_NO(194) | 2)
+#define PINMUX_GPIO194__FUNC_I2S2_MCK (MTK_PIN_NO(194) | 3)
+
+#define PINMUX_GPIO195__FUNC_GPIO195 (MTK_PIN_NO(195) | 0)
+#define PINMUX_GPIO195__FUNC_I2S0_DO (MTK_PIN_NO(195) | 1)
+#define PINMUX_GPIO195__FUNC_I2S1_DO_1 (MTK_PIN_NO(195) | 2)
+#define PINMUX_GPIO195__FUNC_I2S2_DI_1 (MTK_PIN_NO(195) | 3)
+
+#define PINMUX_GPIO196__FUNC_GPIO196 (MTK_PIN_NO(196) | 0)
+#define PINMUX_GPIO196__FUNC_I2S0_DI (MTK_PIN_NO(196) | 1)
+#define PINMUX_GPIO196__FUNC_I2S1_DO_2 (MTK_PIN_NO(196) | 2)
+#define PINMUX_GPIO196__FUNC_I2S2_DI_2 (MTK_PIN_NO(196) | 3)
+
+
+#endif
diff --git a/include/dt-bindings/pinctrl/mt8135-pinfunc.h b/include/dt-bindings/pinctrl/mt8135-pinfunc.h
new file mode 100644
index 000000000000..ce0cb5a440eb
--- /dev/null
+++ b/include/dt-bindings/pinctrl/mt8135-pinfunc.h
@@ -0,0 +1,1294 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Hongzhou.Yang <hongzhou.yang@mediatek.com>
+ */
+
+#ifndef __DTS_MT8135_PINFUNC_H
+#define __DTS_MT8135_PINFUNC_H
+
+#include <dt-bindings/pinctrl/mt65xx.h>
+
+#define MT8135_PIN_0_MSDC0_DAT7__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define MT8135_PIN_0_MSDC0_DAT7__FUNC_MSDC0_DAT7 (MTK_PIN_NO(0) | 1)
+#define MT8135_PIN_0_MSDC0_DAT7__FUNC_EINT49 (MTK_PIN_NO(0) | 2)
+#define MT8135_PIN_0_MSDC0_DAT7__FUNC_I2SOUT_DAT (MTK_PIN_NO(0) | 3)
+#define MT8135_PIN_0_MSDC0_DAT7__FUNC_DAC_DAT_OUT (MTK_PIN_NO(0) | 4)
+#define MT8135_PIN_0_MSDC0_DAT7__FUNC_PCM1_DO (MTK_PIN_NO(0) | 5)
+#define MT8135_PIN_0_MSDC0_DAT7__FUNC_SPI1_MO (MTK_PIN_NO(0) | 6)
+#define MT8135_PIN_0_MSDC0_DAT7__FUNC_NALE (MTK_PIN_NO(0) | 7)
+
+#define MT8135_PIN_1_MSDC0_DAT6__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define MT8135_PIN_1_MSDC0_DAT6__FUNC_MSDC0_DAT6 (MTK_PIN_NO(1) | 1)
+#define MT8135_PIN_1_MSDC0_DAT6__FUNC_EINT48 (MTK_PIN_NO(1) | 2)
+#define MT8135_PIN_1_MSDC0_DAT6__FUNC_I2SIN_WS (MTK_PIN_NO(1) | 3)
+#define MT8135_PIN_1_MSDC0_DAT6__FUNC_DAC_WS (MTK_PIN_NO(1) | 4)
+#define MT8135_PIN_1_MSDC0_DAT6__FUNC_PCM1_WS (MTK_PIN_NO(1) | 5)
+#define MT8135_PIN_1_MSDC0_DAT6__FUNC_SPI1_CSN (MTK_PIN_NO(1) | 6)
+#define MT8135_PIN_1_MSDC0_DAT6__FUNC_NCLE (MTK_PIN_NO(1) | 7)
+
+#define MT8135_PIN_2_MSDC0_DAT5__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define MT8135_PIN_2_MSDC0_DAT5__FUNC_MSDC0_DAT5 (MTK_PIN_NO(2) | 1)
+#define MT8135_PIN_2_MSDC0_DAT5__FUNC_EINT47 (MTK_PIN_NO(2) | 2)
+#define MT8135_PIN_2_MSDC0_DAT5__FUNC_I2SIN_CK (MTK_PIN_NO(2) | 3)
+#define MT8135_PIN_2_MSDC0_DAT5__FUNC_DAC_CK (MTK_PIN_NO(2) | 4)
+#define MT8135_PIN_2_MSDC0_DAT5__FUNC_PCM1_CK (MTK_PIN_NO(2) | 5)
+#define MT8135_PIN_2_MSDC0_DAT5__FUNC_SPI1_CLK (MTK_PIN_NO(2) | 6)
+#define MT8135_PIN_2_MSDC0_DAT5__FUNC_NLD4 (MTK_PIN_NO(2) | 7)
+
+#define MT8135_PIN_3_MSDC0_DAT4__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define MT8135_PIN_3_MSDC0_DAT4__FUNC_MSDC0_DAT4 (MTK_PIN_NO(3) | 1)
+#define MT8135_PIN_3_MSDC0_DAT4__FUNC_EINT46 (MTK_PIN_NO(3) | 2)
+#define MT8135_PIN_3_MSDC0_DAT4__FUNC_A_FUNC_CK (MTK_PIN_NO(3) | 3)
+#define MT8135_PIN_3_MSDC0_DAT4__FUNC_LSCE1B_2X (MTK_PIN_NO(3) | 6)
+#define MT8135_PIN_3_MSDC0_DAT4__FUNC_NLD5 (MTK_PIN_NO(3) | 7)
+
+#define MT8135_PIN_4_MSDC0_CMD__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define MT8135_PIN_4_MSDC0_CMD__FUNC_MSDC0_CMD (MTK_PIN_NO(4) | 1)
+#define MT8135_PIN_4_MSDC0_CMD__FUNC_EINT41 (MTK_PIN_NO(4) | 2)
+#define MT8135_PIN_4_MSDC0_CMD__FUNC_A_FUNC_DOUT_0 (MTK_PIN_NO(4) | 3)
+#define MT8135_PIN_4_MSDC0_CMD__FUNC_USB_TEST_IO_0 (MTK_PIN_NO(4) | 5)
+#define MT8135_PIN_4_MSDC0_CMD__FUNC_LRSTB_2X (MTK_PIN_NO(4) | 6)
+#define MT8135_PIN_4_MSDC0_CMD__FUNC_NRNB (MTK_PIN_NO(4) | 7)
+
+#define MT8135_PIN_5_MSDC0_CLK__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define MT8135_PIN_5_MSDC0_CLK__FUNC_MSDC0_CLK (MTK_PIN_NO(5) | 1)
+#define MT8135_PIN_5_MSDC0_CLK__FUNC_EINT40 (MTK_PIN_NO(5) | 2)
+#define MT8135_PIN_5_MSDC0_CLK__FUNC_A_FUNC_DOUT_1 (MTK_PIN_NO(5) | 3)
+#define MT8135_PIN_5_MSDC0_CLK__FUNC_USB_TEST_IO_1 (MTK_PIN_NO(5) | 5)
+#define MT8135_PIN_5_MSDC0_CLK__FUNC_LPTE (MTK_PIN_NO(5) | 6)
+#define MT8135_PIN_5_MSDC0_CLK__FUNC_NREB (MTK_PIN_NO(5) | 7)
+
+#define MT8135_PIN_6_MSDC0_DAT3__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define MT8135_PIN_6_MSDC0_DAT3__FUNC_MSDC0_DAT3 (MTK_PIN_NO(6) | 1)
+#define MT8135_PIN_6_MSDC0_DAT3__FUNC_EINT45 (MTK_PIN_NO(6) | 2)
+#define MT8135_PIN_6_MSDC0_DAT3__FUNC_A_FUNC_DOUT_2 (MTK_PIN_NO(6) | 3)
+#define MT8135_PIN_6_MSDC0_DAT3__FUNC_USB_TEST_IO_2 (MTK_PIN_NO(6) | 5)
+#define MT8135_PIN_6_MSDC0_DAT3__FUNC_LSCE0B_2X (MTK_PIN_NO(6) | 6)
+#define MT8135_PIN_6_MSDC0_DAT3__FUNC_NLD7 (MTK_PIN_NO(6) | 7)
+
+#define MT8135_PIN_7_MSDC0_DAT2__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define MT8135_PIN_7_MSDC0_DAT2__FUNC_MSDC0_DAT2 (MTK_PIN_NO(7) | 1)
+#define MT8135_PIN_7_MSDC0_DAT2__FUNC_EINT44 (MTK_PIN_NO(7) | 2)
+#define MT8135_PIN_7_MSDC0_DAT2__FUNC_A_FUNC_DOUT_3 (MTK_PIN_NO(7) | 3)
+#define MT8135_PIN_7_MSDC0_DAT2__FUNC_USB_TEST_IO_3 (MTK_PIN_NO(7) | 5)
+#define MT8135_PIN_7_MSDC0_DAT2__FUNC_LSA0_2X (MTK_PIN_NO(7) | 6)
+#define MT8135_PIN_7_MSDC0_DAT2__FUNC_NLD14 (MTK_PIN_NO(7) | 7)
+
+#define MT8135_PIN_8_MSDC0_DAT1__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define MT8135_PIN_8_MSDC0_DAT1__FUNC_MSDC0_DAT1 (MTK_PIN_NO(8) | 1)
+#define MT8135_PIN_8_MSDC0_DAT1__FUNC_EINT43 (MTK_PIN_NO(8) | 2)
+#define MT8135_PIN_8_MSDC0_DAT1__FUNC_USB_TEST_IO_4 (MTK_PIN_NO(8) | 5)
+#define MT8135_PIN_8_MSDC0_DAT1__FUNC_LSCK_2X (MTK_PIN_NO(8) | 6)
+#define MT8135_PIN_8_MSDC0_DAT1__FUNC_NLD11 (MTK_PIN_NO(8) | 7)
+
+#define MT8135_PIN_9_MSDC0_DAT0__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define MT8135_PIN_9_MSDC0_DAT0__FUNC_MSDC0_DAT0 (MTK_PIN_NO(9) | 1)
+#define MT8135_PIN_9_MSDC0_DAT0__FUNC_EINT42 (MTK_PIN_NO(9) | 2)
+#define MT8135_PIN_9_MSDC0_DAT0__FUNC_USB_TEST_IO_5 (MTK_PIN_NO(9) | 5)
+#define MT8135_PIN_9_MSDC0_DAT0__FUNC_LSDA_2X (MTK_PIN_NO(9) | 6)
+
+#define MT8135_PIN_10_NCEB0__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define MT8135_PIN_10_NCEB0__FUNC_NCEB0 (MTK_PIN_NO(10) | 1)
+#define MT8135_PIN_10_NCEB0__FUNC_EINT139 (MTK_PIN_NO(10) | 2)
+#define MT8135_PIN_10_NCEB0__FUNC_TESTA_OUT4 (MTK_PIN_NO(10) | 7)
+
+#define MT8135_PIN_11_NCEB1__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define MT8135_PIN_11_NCEB1__FUNC_NCEB1 (MTK_PIN_NO(11) | 1)
+#define MT8135_PIN_11_NCEB1__FUNC_EINT140 (MTK_PIN_NO(11) | 2)
+#define MT8135_PIN_11_NCEB1__FUNC_USB_DRVVBUS (MTK_PIN_NO(11) | 6)
+#define MT8135_PIN_11_NCEB1__FUNC_TESTA_OUT5 (MTK_PIN_NO(11) | 7)
+
+#define MT8135_PIN_12_NRNB__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define MT8135_PIN_12_NRNB__FUNC_NRNB (MTK_PIN_NO(12) | 1)
+#define MT8135_PIN_12_NRNB__FUNC_EINT141 (MTK_PIN_NO(12) | 2)
+#define MT8135_PIN_12_NRNB__FUNC_A_FUNC_DOUT_4 (MTK_PIN_NO(12) | 3)
+#define MT8135_PIN_12_NRNB__FUNC_TESTA_OUT6 (MTK_PIN_NO(12) | 7)
+
+#define MT8135_PIN_13_NCLE__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define MT8135_PIN_13_NCLE__FUNC_NCLE (MTK_PIN_NO(13) | 1)
+#define MT8135_PIN_13_NCLE__FUNC_EINT142 (MTK_PIN_NO(13) | 2)
+#define MT8135_PIN_13_NCLE__FUNC_A_FUNC_DOUT_5 (MTK_PIN_NO(13) | 3)
+#define MT8135_PIN_13_NCLE__FUNC_CM2PDN_1X (MTK_PIN_NO(13) | 4)
+#define MT8135_PIN_13_NCLE__FUNC_NALE (MTK_PIN_NO(13) | 6)
+#define MT8135_PIN_13_NCLE__FUNC_TESTA_OUT7 (MTK_PIN_NO(13) | 7)
+
+#define MT8135_PIN_14_NALE__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define MT8135_PIN_14_NALE__FUNC_NALE (MTK_PIN_NO(14) | 1)
+#define MT8135_PIN_14_NALE__FUNC_EINT143 (MTK_PIN_NO(14) | 2)
+#define MT8135_PIN_14_NALE__FUNC_A_FUNC_DOUT_6 (MTK_PIN_NO(14) | 3)
+#define MT8135_PIN_14_NALE__FUNC_CM2MCLK_1X (MTK_PIN_NO(14) | 4)
+#define MT8135_PIN_14_NALE__FUNC_IRDA_RXD (MTK_PIN_NO(14) | 5)
+#define MT8135_PIN_14_NALE__FUNC_NCLE (MTK_PIN_NO(14) | 6)
+#define MT8135_PIN_14_NALE__FUNC_TESTA_OUT8 (MTK_PIN_NO(14) | 7)
+
+#define MT8135_PIN_15_NREB__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define MT8135_PIN_15_NREB__FUNC_NREB (MTK_PIN_NO(15) | 1)
+#define MT8135_PIN_15_NREB__FUNC_EINT144 (MTK_PIN_NO(15) | 2)
+#define MT8135_PIN_15_NREB__FUNC_A_FUNC_DOUT_7 (MTK_PIN_NO(15) | 3)
+#define MT8135_PIN_15_NREB__FUNC_CM2RST_1X (MTK_PIN_NO(15) | 4)
+#define MT8135_PIN_15_NREB__FUNC_IRDA_TXD (MTK_PIN_NO(15) | 5)
+#define MT8135_PIN_15_NREB__FUNC_TESTA_OUT9 (MTK_PIN_NO(15) | 7)
+
+#define MT8135_PIN_16_NWEB__FUNC_GPIO16 (MTK_PIN_NO(16) | 0)
+#define MT8135_PIN_16_NWEB__FUNC_NWEB (MTK_PIN_NO(16) | 1)
+#define MT8135_PIN_16_NWEB__FUNC_EINT145 (MTK_PIN_NO(16) | 2)
+#define MT8135_PIN_16_NWEB__FUNC_A_FUNC_DIN_0 (MTK_PIN_NO(16) | 3)
+#define MT8135_PIN_16_NWEB__FUNC_CM2PCLK_1X (MTK_PIN_NO(16) | 4)
+#define MT8135_PIN_16_NWEB__FUNC_IRDA_PDN (MTK_PIN_NO(16) | 5)
+#define MT8135_PIN_16_NWEB__FUNC_TESTA_OUT10 (MTK_PIN_NO(16) | 7)
+
+#define MT8135_PIN_17_NLD0__FUNC_GPIO17 (MTK_PIN_NO(17) | 0)
+#define MT8135_PIN_17_NLD0__FUNC_NLD0 (MTK_PIN_NO(17) | 1)
+#define MT8135_PIN_17_NLD0__FUNC_EINT146 (MTK_PIN_NO(17) | 2)
+#define MT8135_PIN_17_NLD0__FUNC_A_FUNC_DIN_1 (MTK_PIN_NO(17) | 3)
+#define MT8135_PIN_17_NLD0__FUNC_CM2DAT_1X_0 (MTK_PIN_NO(17) | 4)
+#define MT8135_PIN_17_NLD0__FUNC_I2SIN_CK (MTK_PIN_NO(17) | 5)
+#define MT8135_PIN_17_NLD0__FUNC_DAC_CK (MTK_PIN_NO(17) | 6)
+#define MT8135_PIN_17_NLD0__FUNC_TESTA_OUT11 (MTK_PIN_NO(17) | 7)
+
+#define MT8135_PIN_18_NLD1__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define MT8135_PIN_18_NLD1__FUNC_NLD1 (MTK_PIN_NO(18) | 1)
+#define MT8135_PIN_18_NLD1__FUNC_EINT147 (MTK_PIN_NO(18) | 2)
+#define MT8135_PIN_18_NLD1__FUNC_A_FUNC_DIN_2 (MTK_PIN_NO(18) | 3)
+#define MT8135_PIN_18_NLD1__FUNC_CM2DAT_1X_1 (MTK_PIN_NO(18) | 4)
+#define MT8135_PIN_18_NLD1__FUNC_I2SIN_WS (MTK_PIN_NO(18) | 5)
+#define MT8135_PIN_18_NLD1__FUNC_DAC_WS (MTK_PIN_NO(18) | 6)
+#define MT8135_PIN_18_NLD1__FUNC_TESTA_OUT12 (MTK_PIN_NO(18) | 7)
+
+#define MT8135_PIN_19_NLD2__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define MT8135_PIN_19_NLD2__FUNC_NLD2 (MTK_PIN_NO(19) | 1)
+#define MT8135_PIN_19_NLD2__FUNC_EINT148 (MTK_PIN_NO(19) | 2)
+#define MT8135_PIN_19_NLD2__FUNC_A_FUNC_DIN_3 (MTK_PIN_NO(19) | 3)
+#define MT8135_PIN_19_NLD2__FUNC_CM2DAT_1X_2 (MTK_PIN_NO(19) | 4)
+#define MT8135_PIN_19_NLD2__FUNC_I2SOUT_DAT (MTK_PIN_NO(19) | 5)
+#define MT8135_PIN_19_NLD2__FUNC_DAC_DAT_OUT (MTK_PIN_NO(19) | 6)
+#define MT8135_PIN_19_NLD2__FUNC_TESTA_OUT13 (MTK_PIN_NO(19) | 7)
+
+#define MT8135_PIN_20_NLD3__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define MT8135_PIN_20_NLD3__FUNC_NLD3 (MTK_PIN_NO(20) | 1)
+#define MT8135_PIN_20_NLD3__FUNC_EINT149 (MTK_PIN_NO(20) | 2)
+#define MT8135_PIN_20_NLD3__FUNC_A_FUNC_DIN_4 (MTK_PIN_NO(20) | 3)
+#define MT8135_PIN_20_NLD3__FUNC_CM2DAT_1X_3 (MTK_PIN_NO(20) | 4)
+#define MT8135_PIN_20_NLD3__FUNC_TESTA_OUT14 (MTK_PIN_NO(20) | 7)
+
+#define MT8135_PIN_21_NLD4__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define MT8135_PIN_21_NLD4__FUNC_NLD4 (MTK_PIN_NO(21) | 1)
+#define MT8135_PIN_21_NLD4__FUNC_EINT150 (MTK_PIN_NO(21) | 2)
+#define MT8135_PIN_21_NLD4__FUNC_A_FUNC_DIN_5 (MTK_PIN_NO(21) | 3)
+#define MT8135_PIN_21_NLD4__FUNC_CM2DAT_1X_4 (MTK_PIN_NO(21) | 4)
+#define MT8135_PIN_21_NLD4__FUNC_TESTA_OUT15 (MTK_PIN_NO(21) | 7)
+
+#define MT8135_PIN_22_NLD5__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define MT8135_PIN_22_NLD5__FUNC_NLD5 (MTK_PIN_NO(22) | 1)
+#define MT8135_PIN_22_NLD5__FUNC_EINT151 (MTK_PIN_NO(22) | 2)
+#define MT8135_PIN_22_NLD5__FUNC_A_FUNC_DIN_6 (MTK_PIN_NO(22) | 3)
+#define MT8135_PIN_22_NLD5__FUNC_CM2DAT_1X_5 (MTK_PIN_NO(22) | 4)
+#define MT8135_PIN_22_NLD5__FUNC_TESTA_OUT16 (MTK_PIN_NO(22) | 7)
+
+#define MT8135_PIN_23_NLD6__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define MT8135_PIN_23_NLD6__FUNC_NLD6 (MTK_PIN_NO(23) | 1)
+#define MT8135_PIN_23_NLD6__FUNC_EINT152 (MTK_PIN_NO(23) | 2)
+#define MT8135_PIN_23_NLD6__FUNC_A_FUNC_DIN_7 (MTK_PIN_NO(23) | 3)
+#define MT8135_PIN_23_NLD6__FUNC_CM2DAT_1X_6 (MTK_PIN_NO(23) | 4)
+#define MT8135_PIN_23_NLD6__FUNC_TESTA_OUT17 (MTK_PIN_NO(23) | 7)
+
+#define MT8135_PIN_24_NLD7__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define MT8135_PIN_24_NLD7__FUNC_NLD7 (MTK_PIN_NO(24) | 1)
+#define MT8135_PIN_24_NLD7__FUNC_EINT153 (MTK_PIN_NO(24) | 2)
+#define MT8135_PIN_24_NLD7__FUNC_A_FUNC_DIN_8 (MTK_PIN_NO(24) | 3)
+#define MT8135_PIN_24_NLD7__FUNC_CM2DAT_1X_7 (MTK_PIN_NO(24) | 4)
+#define MT8135_PIN_24_NLD7__FUNC_TESTA_OUT18 (MTK_PIN_NO(24) | 7)
+
+#define MT8135_PIN_25_NLD8__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define MT8135_PIN_25_NLD8__FUNC_NLD8 (MTK_PIN_NO(25) | 1)
+#define MT8135_PIN_25_NLD8__FUNC_EINT154 (MTK_PIN_NO(25) | 2)
+#define MT8135_PIN_25_NLD8__FUNC_CM2DAT_1X_8 (MTK_PIN_NO(25) | 4)
+
+#define MT8135_PIN_26_NLD9__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define MT8135_PIN_26_NLD9__FUNC_NLD9 (MTK_PIN_NO(26) | 1)
+#define MT8135_PIN_26_NLD9__FUNC_EINT155 (MTK_PIN_NO(26) | 2)
+#define MT8135_PIN_26_NLD9__FUNC_CM2DAT_1X_9 (MTK_PIN_NO(26) | 4)
+#define MT8135_PIN_26_NLD9__FUNC_PWM1 (MTK_PIN_NO(26) | 5)
+
+#define MT8135_PIN_27_NLD10__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define MT8135_PIN_27_NLD10__FUNC_NLD10 (MTK_PIN_NO(27) | 1)
+#define MT8135_PIN_27_NLD10__FUNC_EINT156 (MTK_PIN_NO(27) | 2)
+#define MT8135_PIN_27_NLD10__FUNC_CM2VSYNC_1X (MTK_PIN_NO(27) | 4)
+#define MT8135_PIN_27_NLD10__FUNC_PWM2 (MTK_PIN_NO(27) | 5)
+
+#define MT8135_PIN_28_NLD11__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define MT8135_PIN_28_NLD11__FUNC_NLD11 (MTK_PIN_NO(28) | 1)
+#define MT8135_PIN_28_NLD11__FUNC_EINT157 (MTK_PIN_NO(28) | 2)
+#define MT8135_PIN_28_NLD11__FUNC_CM2HSYNC_1X (MTK_PIN_NO(28) | 4)
+#define MT8135_PIN_28_NLD11__FUNC_PWM3 (MTK_PIN_NO(28) | 5)
+
+#define MT8135_PIN_29_NLD12__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define MT8135_PIN_29_NLD12__FUNC_NLD12 (MTK_PIN_NO(29) | 1)
+#define MT8135_PIN_29_NLD12__FUNC_EINT158 (MTK_PIN_NO(29) | 2)
+#define MT8135_PIN_29_NLD12__FUNC_I2SIN_CK (MTK_PIN_NO(29) | 3)
+#define MT8135_PIN_29_NLD12__FUNC_DAC_CK (MTK_PIN_NO(29) | 4)
+#define MT8135_PIN_29_NLD12__FUNC_PCM1_CK (MTK_PIN_NO(29) | 5)
+
+#define MT8135_PIN_30_NLD13__FUNC_GPIO30 (MTK_PIN_NO(30) | 0)
+#define MT8135_PIN_30_NLD13__FUNC_NLD13 (MTK_PIN_NO(30) | 1)
+#define MT8135_PIN_30_NLD13__FUNC_EINT159 (MTK_PIN_NO(30) | 2)
+#define MT8135_PIN_30_NLD13__FUNC_I2SIN_WS (MTK_PIN_NO(30) | 3)
+#define MT8135_PIN_30_NLD13__FUNC_DAC_WS (MTK_PIN_NO(30) | 4)
+#define MT8135_PIN_30_NLD13__FUNC_PCM1_WS (MTK_PIN_NO(30) | 5)
+
+#define MT8135_PIN_31_NLD14__FUNC_GPIO31 (MTK_PIN_NO(31) | 0)
+#define MT8135_PIN_31_NLD14__FUNC_NLD14 (MTK_PIN_NO(31) | 1)
+#define MT8135_PIN_31_NLD14__FUNC_EINT160 (MTK_PIN_NO(31) | 2)
+#define MT8135_PIN_31_NLD14__FUNC_I2SOUT_DAT (MTK_PIN_NO(31) | 3)
+#define MT8135_PIN_31_NLD14__FUNC_DAC_DAT_OUT (MTK_PIN_NO(31) | 4)
+#define MT8135_PIN_31_NLD14__FUNC_PCM1_DO (MTK_PIN_NO(31) | 5)
+
+#define MT8135_PIN_32_NLD15__FUNC_GPIO32 (MTK_PIN_NO(32) | 0)
+#define MT8135_PIN_32_NLD15__FUNC_NLD15 (MTK_PIN_NO(32) | 1)
+#define MT8135_PIN_32_NLD15__FUNC_EINT161 (MTK_PIN_NO(32) | 2)
+#define MT8135_PIN_32_NLD15__FUNC_DISP_PWM (MTK_PIN_NO(32) | 3)
+#define MT8135_PIN_32_NLD15__FUNC_PWM4 (MTK_PIN_NO(32) | 4)
+#define MT8135_PIN_32_NLD15__FUNC_PCM1_DI (MTK_PIN_NO(32) | 5)
+
+#define MT8135_PIN_33_MSDC0_RSTB__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define MT8135_PIN_33_MSDC0_RSTB__FUNC_MSDC0_RSTB (MTK_PIN_NO(33) | 1)
+#define MT8135_PIN_33_MSDC0_RSTB__FUNC_EINT50 (MTK_PIN_NO(33) | 2)
+#define MT8135_PIN_33_MSDC0_RSTB__FUNC_I2SIN_DAT (MTK_PIN_NO(33) | 3)
+#define MT8135_PIN_33_MSDC0_RSTB__FUNC_PCM1_DI (MTK_PIN_NO(33) | 5)
+#define MT8135_PIN_33_MSDC0_RSTB__FUNC_SPI1_MI (MTK_PIN_NO(33) | 6)
+#define MT8135_PIN_33_MSDC0_RSTB__FUNC_NLD10 (MTK_PIN_NO(33) | 7)
+
+#define MT8135_PIN_34_IDDIG__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define MT8135_PIN_34_IDDIG__FUNC_IDDIG (MTK_PIN_NO(34) | 1)
+#define MT8135_PIN_34_IDDIG__FUNC_EINT34 (MTK_PIN_NO(34) | 2)
+
+#define MT8135_PIN_35_SCL3__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define MT8135_PIN_35_SCL3__FUNC_SCL3 (MTK_PIN_NO(35) | 1)
+#define MT8135_PIN_35_SCL3__FUNC_EINT96 (MTK_PIN_NO(35) | 2)
+#define MT8135_PIN_35_SCL3__FUNC_CLKM6 (MTK_PIN_NO(35) | 3)
+#define MT8135_PIN_35_SCL3__FUNC_PWM6 (MTK_PIN_NO(35) | 4)
+
+#define MT8135_PIN_36_SDA3__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define MT8135_PIN_36_SDA3__FUNC_SDA3 (MTK_PIN_NO(36) | 1)
+#define MT8135_PIN_36_SDA3__FUNC_EINT97 (MTK_PIN_NO(36) | 2)
+
+#define MT8135_PIN_37_AUD_CLK_MOSI__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define MT8135_PIN_37_AUD_CLK_MOSI__FUNC_AUD_CLK (MTK_PIN_NO(37) | 1)
+#define MT8135_PIN_37_AUD_CLK_MOSI__FUNC_ADC_CK (MTK_PIN_NO(37) | 2)
+#define MT8135_PIN_37_AUD_CLK_MOSI__FUNC_HDMI_SDATA0 (MTK_PIN_NO(37) | 3)
+#define MT8135_PIN_37_AUD_CLK_MOSI__FUNC_EINT19 (MTK_PIN_NO(37) | 4)
+#define MT8135_PIN_37_AUD_CLK_MOSI__FUNC_USB_TEST_IO_6 (MTK_PIN_NO(37) | 5)
+#define MT8135_PIN_37_AUD_CLK_MOSI__FUNC_TESTA_OUT19 (MTK_PIN_NO(37) | 7)
+
+#define MT8135_PIN_38_AUD_DAT_MOSI__FUNC_GPIO38 (MTK_PIN_NO(38) | 0)
+#define MT8135_PIN_38_AUD_DAT_MOSI__FUNC_AUD_DAT_MOSI (MTK_PIN_NO(38) | 1)
+#define MT8135_PIN_38_AUD_DAT_MOSI__FUNC_ADC_WS (MTK_PIN_NO(38) | 2)
+#define MT8135_PIN_38_AUD_DAT_MOSI__FUNC_AUD_DAT_MISO (MTK_PIN_NO(38) | 3)
+#define MT8135_PIN_38_AUD_DAT_MOSI__FUNC_EINT21 (MTK_PIN_NO(38) | 4)
+#define MT8135_PIN_38_AUD_DAT_MOSI__FUNC_USB_TEST_IO_7 (MTK_PIN_NO(38) | 5)
+#define MT8135_PIN_38_AUD_DAT_MOSI__FUNC_TESTA_OUT20 (MTK_PIN_NO(38) | 7)
+
+#define MT8135_PIN_39_AUD_DAT_MISO__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define MT8135_PIN_39_AUD_DAT_MISO__FUNC_AUD_DAT_MISO (MTK_PIN_NO(39) | 1)
+#define MT8135_PIN_39_AUD_DAT_MISO__FUNC_ADC_DAT_IN (MTK_PIN_NO(39) | 2)
+#define MT8135_PIN_39_AUD_DAT_MISO__FUNC_AUD_DAT_MOSI (MTK_PIN_NO(39) | 3)
+#define MT8135_PIN_39_AUD_DAT_MISO__FUNC_EINT20 (MTK_PIN_NO(39) | 4)
+#define MT8135_PIN_39_AUD_DAT_MISO__FUNC_USB_TEST_IO_8 (MTK_PIN_NO(39) | 5)
+#define MT8135_PIN_39_AUD_DAT_MISO__FUNC_TESTA_OUT21 (MTK_PIN_NO(39) | 7)
+
+#define MT8135_PIN_40_DAC_CLK__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define MT8135_PIN_40_DAC_CLK__FUNC_DAC_CK (MTK_PIN_NO(40) | 1)
+#define MT8135_PIN_40_DAC_CLK__FUNC_EINT22 (MTK_PIN_NO(40) | 2)
+#define MT8135_PIN_40_DAC_CLK__FUNC_HDMI_SDATA1 (MTK_PIN_NO(40) | 3)
+#define MT8135_PIN_40_DAC_CLK__FUNC_USB_TEST_IO_9 (MTK_PIN_NO(40) | 5)
+#define MT8135_PIN_40_DAC_CLK__FUNC_TESTA_OUT22 (MTK_PIN_NO(40) | 7)
+
+#define MT8135_PIN_41_DAC_WS__FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
+#define MT8135_PIN_41_DAC_WS__FUNC_DAC_WS (MTK_PIN_NO(41) | 1)
+#define MT8135_PIN_41_DAC_WS__FUNC_EINT24 (MTK_PIN_NO(41) | 2)
+#define MT8135_PIN_41_DAC_WS__FUNC_HDMI_SDATA2 (MTK_PIN_NO(41) | 3)
+#define MT8135_PIN_41_DAC_WS__FUNC_USB_TEST_IO_10 (MTK_PIN_NO(41) | 5)
+#define MT8135_PIN_41_DAC_WS__FUNC_TESTA_OUT23 (MTK_PIN_NO(41) | 7)
+
+#define MT8135_PIN_42_DAC_DAT_OUT__FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
+#define MT8135_PIN_42_DAC_DAT_OUT__FUNC_DAC_DAT_OUT (MTK_PIN_NO(42) | 1)
+#define MT8135_PIN_42_DAC_DAT_OUT__FUNC_EINT23 (MTK_PIN_NO(42) | 2)
+#define MT8135_PIN_42_DAC_DAT_OUT__FUNC_HDMI_SDATA3 (MTK_PIN_NO(42) | 3)
+#define MT8135_PIN_42_DAC_DAT_OUT__FUNC_USB_TEST_IO_11 (MTK_PIN_NO(42) | 5)
+#define MT8135_PIN_42_DAC_DAT_OUT__FUNC_TESTA_OUT24 (MTK_PIN_NO(42) | 7)
+
+#define MT8135_PIN_43_PWRAP_SPI0_MO__FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
+#define MT8135_PIN_43_PWRAP_SPI0_MO__FUNC_PWRAP_SPIDI (MTK_PIN_NO(43) | 1)
+#define MT8135_PIN_43_PWRAP_SPI0_MO__FUNC_EINT29 (MTK_PIN_NO(43) | 2)
+
+#define MT8135_PIN_44_PWRAP_SPI0_MI__FUNC_GPIO44 (MTK_PIN_NO(44) | 0)
+#define MT8135_PIN_44_PWRAP_SPI0_MI__FUNC_PWRAP_SPIDO (MTK_PIN_NO(44) | 1)
+#define MT8135_PIN_44_PWRAP_SPI0_MI__FUNC_EINT28 (MTK_PIN_NO(44) | 2)
+
+#define MT8135_PIN_45_PWRAP_SPI0_CSN__FUNC_GPIO45 (MTK_PIN_NO(45) | 0)
+#define MT8135_PIN_45_PWRAP_SPI0_CSN__FUNC_PWRAP_SPICS_B_I (MTK_PIN_NO(45) | 1)
+#define MT8135_PIN_45_PWRAP_SPI0_CSN__FUNC_EINT27 (MTK_PIN_NO(45) | 2)
+
+#define MT8135_PIN_46_PWRAP_SPI0_CLK__FUNC_GPIO46 (MTK_PIN_NO(46) | 0)
+#define MT8135_PIN_46_PWRAP_SPI0_CLK__FUNC_PWRAP_SPICK_I (MTK_PIN_NO(46) | 1)
+#define MT8135_PIN_46_PWRAP_SPI0_CLK__FUNC_EINT26 (MTK_PIN_NO(46) | 2)
+
+#define MT8135_PIN_47_PWRAP_EVENT__FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
+#define MT8135_PIN_47_PWRAP_EVENT__FUNC_PWRAP_EVENT_IN (MTK_PIN_NO(47) | 1)
+#define MT8135_PIN_47_PWRAP_EVENT__FUNC_EINT25 (MTK_PIN_NO(47) | 2)
+#define MT8135_PIN_47_PWRAP_EVENT__FUNC_TESTA_OUT2 (MTK_PIN_NO(47) | 7)
+
+#define MT8135_PIN_48_RTC32K_CK__FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
+#define MT8135_PIN_48_RTC32K_CK__FUNC_RTC32K_CK (MTK_PIN_NO(48) | 1)
+
+#define MT8135_PIN_49_WATCHDOG__FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
+#define MT8135_PIN_49_WATCHDOG__FUNC_WATCHDOG (MTK_PIN_NO(49) | 1)
+#define MT8135_PIN_49_WATCHDOG__FUNC_EINT36 (MTK_PIN_NO(49) | 2)
+
+#define MT8135_PIN_50_SRCLKENA__FUNC_GPIO50 (MTK_PIN_NO(50) | 0)
+#define MT8135_PIN_50_SRCLKENA__FUNC_SRCLKENA (MTK_PIN_NO(50) | 1)
+#define MT8135_PIN_50_SRCLKENA__FUNC_EINT38 (MTK_PIN_NO(50) | 2)
+
+#define MT8135_PIN_51_SRCVOLTEN__FUNC_GPIO51 (MTK_PIN_NO(51) | 0)
+#define MT8135_PIN_51_SRCVOLTEN__FUNC_SRCVOLTEN (MTK_PIN_NO(51) | 1)
+#define MT8135_PIN_51_SRCVOLTEN__FUNC_EINT37 (MTK_PIN_NO(51) | 2)
+
+#define MT8135_PIN_52_EINT0__FUNC_GPIO52 (MTK_PIN_NO(52) | 0)
+#define MT8135_PIN_52_EINT0__FUNC_EINT0 (MTK_PIN_NO(52) | 1)
+#define MT8135_PIN_52_EINT0__FUNC_PWM1 (MTK_PIN_NO(52) | 2)
+#define MT8135_PIN_52_EINT0__FUNC_CLKM0 (MTK_PIN_NO(52) | 3)
+#define MT8135_PIN_52_EINT0__FUNC_SPDIF_OUT (MTK_PIN_NO(52) | 4)
+#define MT8135_PIN_52_EINT0__FUNC_USB_TEST_IO_12 (MTK_PIN_NO(52) | 5)
+#define MT8135_PIN_52_EINT0__FUNC_USB_SCL (MTK_PIN_NO(52) | 7)
+
+#define MT8135_PIN_53_URXD2__FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
+#define MT8135_PIN_53_URXD2__FUNC_URXD2 (MTK_PIN_NO(53) | 1)
+#define MT8135_PIN_53_URXD2__FUNC_EINT83 (MTK_PIN_NO(53) | 2)
+#define MT8135_PIN_53_URXD2__FUNC_HDMI_LRCK (MTK_PIN_NO(53) | 4)
+#define MT8135_PIN_53_URXD2__FUNC_CLKM3 (MTK_PIN_NO(53) | 5)
+#define MT8135_PIN_53_URXD2__FUNC_UTXD2 (MTK_PIN_NO(53) | 7)
+
+#define MT8135_PIN_54_UTXD2__FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
+#define MT8135_PIN_54_UTXD2__FUNC_UTXD2 (MTK_PIN_NO(54) | 1)
+#define MT8135_PIN_54_UTXD2__FUNC_EINT82 (MTK_PIN_NO(54) | 2)
+#define MT8135_PIN_54_UTXD2__FUNC_HDMI_BCK_OUT (MTK_PIN_NO(54) | 4)
+#define MT8135_PIN_54_UTXD2__FUNC_CLKM2 (MTK_PIN_NO(54) | 5)
+#define MT8135_PIN_54_UTXD2__FUNC_URXD2 (MTK_PIN_NO(54) | 7)
+
+#define MT8135_PIN_55_UCTS2__FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
+#define MT8135_PIN_55_UCTS2__FUNC_UCTS2 (MTK_PIN_NO(55) | 1)
+#define MT8135_PIN_55_UCTS2__FUNC_EINT84 (MTK_PIN_NO(55) | 2)
+#define MT8135_PIN_55_UCTS2__FUNC_PWM1 (MTK_PIN_NO(55) | 5)
+#define MT8135_PIN_55_UCTS2__FUNC_URTS2 (MTK_PIN_NO(55) | 7)
+
+#define MT8135_PIN_56_URTS2__FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
+#define MT8135_PIN_56_URTS2__FUNC_URTS2 (MTK_PIN_NO(56) | 1)
+#define MT8135_PIN_56_URTS2__FUNC_EINT85 (MTK_PIN_NO(56) | 2)
+#define MT8135_PIN_56_URTS2__FUNC_PWM2 (MTK_PIN_NO(56) | 5)
+#define MT8135_PIN_56_URTS2__FUNC_UCTS2 (MTK_PIN_NO(56) | 7)
+
+#define MT8135_PIN_57_JTCK__FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
+#define MT8135_PIN_57_JTCK__FUNC_JTCK (MTK_PIN_NO(57) | 1)
+#define MT8135_PIN_57_JTCK__FUNC_EINT188 (MTK_PIN_NO(57) | 2)
+#define MT8135_PIN_57_JTCK__FUNC_DSP1_ICK (MTK_PIN_NO(57) | 3)
+
+#define MT8135_PIN_58_JTDO__FUNC_GPIO58 (MTK_PIN_NO(58) | 0)
+#define MT8135_PIN_58_JTDO__FUNC_JTDO (MTK_PIN_NO(58) | 1)
+#define MT8135_PIN_58_JTDO__FUNC_EINT190 (MTK_PIN_NO(58) | 2)
+#define MT8135_PIN_58_JTDO__FUNC_DSP2_IMS (MTK_PIN_NO(58) | 3)
+
+#define MT8135_PIN_59_JTRST_B__FUNC_GPIO59 (MTK_PIN_NO(59) | 0)
+#define MT8135_PIN_59_JTRST_B__FUNC_JTRST_B (MTK_PIN_NO(59) | 1)
+#define MT8135_PIN_59_JTRST_B__FUNC_EINT0 (MTK_PIN_NO(59) | 2)
+#define MT8135_PIN_59_JTRST_B__FUNC_DSP2_ICK (MTK_PIN_NO(59) | 3)
+
+#define MT8135_PIN_60_JTDI__FUNC_GPIO60 (MTK_PIN_NO(60) | 0)
+#define MT8135_PIN_60_JTDI__FUNC_JTDI (MTK_PIN_NO(60) | 1)
+#define MT8135_PIN_60_JTDI__FUNC_EINT189 (MTK_PIN_NO(60) | 2)
+#define MT8135_PIN_60_JTDI__FUNC_DSP1_IMS (MTK_PIN_NO(60) | 3)
+
+#define MT8135_PIN_61_JRTCK__FUNC_GPIO61 (MTK_PIN_NO(61) | 0)
+#define MT8135_PIN_61_JRTCK__FUNC_JRTCK (MTK_PIN_NO(61) | 1)
+#define MT8135_PIN_61_JRTCK__FUNC_EINT187 (MTK_PIN_NO(61) | 2)
+#define MT8135_PIN_61_JRTCK__FUNC_DSP1_ID (MTK_PIN_NO(61) | 3)
+
+#define MT8135_PIN_62_JTMS__FUNC_GPIO62 (MTK_PIN_NO(62) | 0)
+#define MT8135_PIN_62_JTMS__FUNC_JTMS (MTK_PIN_NO(62) | 1)
+#define MT8135_PIN_62_JTMS__FUNC_EINT191 (MTK_PIN_NO(62) | 2)
+#define MT8135_PIN_62_JTMS__FUNC_DSP2_ID (MTK_PIN_NO(62) | 3)
+
+#define MT8135_PIN_63_MSDC1_INSI__FUNC_GPIO63 (MTK_PIN_NO(63) | 0)
+#define MT8135_PIN_63_MSDC1_INSI__FUNC_MSDC1_INSI (MTK_PIN_NO(63) | 1)
+#define MT8135_PIN_63_MSDC1_INSI__FUNC_SCL5 (MTK_PIN_NO(63) | 3)
+#define MT8135_PIN_63_MSDC1_INSI__FUNC_PWM6 (MTK_PIN_NO(63) | 4)
+#define MT8135_PIN_63_MSDC1_INSI__FUNC_CLKM5 (MTK_PIN_NO(63) | 5)
+#define MT8135_PIN_63_MSDC1_INSI__FUNC_TESTB_OUT6 (MTK_PIN_NO(63) | 7)
+
+#define MT8135_PIN_64_MSDC1_SDWPI__FUNC_GPIO64 (MTK_PIN_NO(64) | 0)
+#define MT8135_PIN_64_MSDC1_SDWPI__FUNC_MSDC1_SDWPI (MTK_PIN_NO(64) | 1)
+#define MT8135_PIN_64_MSDC1_SDWPI__FUNC_EINT58 (MTK_PIN_NO(64) | 2)
+#define MT8135_PIN_64_MSDC1_SDWPI__FUNC_SDA5 (MTK_PIN_NO(64) | 3)
+#define MT8135_PIN_64_MSDC1_SDWPI__FUNC_PWM7 (MTK_PIN_NO(64) | 4)
+#define MT8135_PIN_64_MSDC1_SDWPI__FUNC_CLKM6 (MTK_PIN_NO(64) | 5)
+#define MT8135_PIN_64_MSDC1_SDWPI__FUNC_TESTB_OUT7 (MTK_PIN_NO(64) | 7)
+
+#define MT8135_PIN_65_MSDC2_INSI__FUNC_GPIO65 (MTK_PIN_NO(65) | 0)
+#define MT8135_PIN_65_MSDC2_INSI__FUNC_MSDC2_INSI (MTK_PIN_NO(65) | 1)
+#define MT8135_PIN_65_MSDC2_INSI__FUNC_USB_TEST_IO_27 (MTK_PIN_NO(65) | 5)
+#define MT8135_PIN_65_MSDC2_INSI__FUNC_TESTA_OUT3 (MTK_PIN_NO(65) | 7)
+
+#define MT8135_PIN_66_MSDC2_SDWPI__FUNC_GPIO66 (MTK_PIN_NO(66) | 0)
+#define MT8135_PIN_66_MSDC2_SDWPI__FUNC_MSDC2_SDWPI (MTK_PIN_NO(66) | 1)
+#define MT8135_PIN_66_MSDC2_SDWPI__FUNC_EINT66 (MTK_PIN_NO(66) | 2)
+#define MT8135_PIN_66_MSDC2_SDWPI__FUNC_USB_TEST_IO_28 (MTK_PIN_NO(66) | 5)
+
+#define MT8135_PIN_67_URXD4__FUNC_GPIO67 (MTK_PIN_NO(67) | 0)
+#define MT8135_PIN_67_URXD4__FUNC_URXD4 (MTK_PIN_NO(67) | 1)
+#define MT8135_PIN_67_URXD4__FUNC_EINT89 (MTK_PIN_NO(67) | 2)
+#define MT8135_PIN_67_URXD4__FUNC_URXD1 (MTK_PIN_NO(67) | 3)
+#define MT8135_PIN_67_URXD4__FUNC_UTXD4 (MTK_PIN_NO(67) | 6)
+#define MT8135_PIN_67_URXD4__FUNC_TESTB_OUT10 (MTK_PIN_NO(67) | 7)
+
+#define MT8135_PIN_68_UTXD4__FUNC_GPIO68 (MTK_PIN_NO(68) | 0)
+#define MT8135_PIN_68_UTXD4__FUNC_UTXD4 (MTK_PIN_NO(68) | 1)
+#define MT8135_PIN_68_UTXD4__FUNC_EINT88 (MTK_PIN_NO(68) | 2)
+#define MT8135_PIN_68_UTXD4__FUNC_UTXD1 (MTK_PIN_NO(68) | 3)
+#define MT8135_PIN_68_UTXD4__FUNC_URXD4 (MTK_PIN_NO(68) | 6)
+#define MT8135_PIN_68_UTXD4__FUNC_TESTB_OUT11 (MTK_PIN_NO(68) | 7)
+
+#define MT8135_PIN_69_URXD1__FUNC_GPIO69 (MTK_PIN_NO(69) | 0)
+#define MT8135_PIN_69_URXD1__FUNC_URXD1 (MTK_PIN_NO(69) | 1)
+#define MT8135_PIN_69_URXD1__FUNC_EINT79 (MTK_PIN_NO(69) | 2)
+#define MT8135_PIN_69_URXD1__FUNC_URXD4 (MTK_PIN_NO(69) | 3)
+#define MT8135_PIN_69_URXD1__FUNC_UTXD1 (MTK_PIN_NO(69) | 6)
+#define MT8135_PIN_69_URXD1__FUNC_TESTB_OUT24 (MTK_PIN_NO(69) | 7)
+
+#define MT8135_PIN_70_UTXD1__FUNC_GPIO70 (MTK_PIN_NO(70) | 0)
+#define MT8135_PIN_70_UTXD1__FUNC_UTXD1 (MTK_PIN_NO(70) | 1)
+#define MT8135_PIN_70_UTXD1__FUNC_EINT78 (MTK_PIN_NO(70) | 2)
+#define MT8135_PIN_70_UTXD1__FUNC_UTXD4 (MTK_PIN_NO(70) | 3)
+#define MT8135_PIN_70_UTXD1__FUNC_URXD1 (MTK_PIN_NO(70) | 6)
+#define MT8135_PIN_70_UTXD1__FUNC_TESTB_OUT25 (MTK_PIN_NO(70) | 7)
+
+#define MT8135_PIN_71_UCTS1__FUNC_GPIO71 (MTK_PIN_NO(71) | 0)
+#define MT8135_PIN_71_UCTS1__FUNC_UCTS1 (MTK_PIN_NO(71) | 1)
+#define MT8135_PIN_71_UCTS1__FUNC_EINT80 (MTK_PIN_NO(71) | 2)
+#define MT8135_PIN_71_UCTS1__FUNC_CLKM0 (MTK_PIN_NO(71) | 5)
+#define MT8135_PIN_71_UCTS1__FUNC_URTS1 (MTK_PIN_NO(71) | 6)
+#define MT8135_PIN_71_UCTS1__FUNC_TESTB_OUT31 (MTK_PIN_NO(71) | 7)
+
+#define MT8135_PIN_72_URTS1__FUNC_GPIO72 (MTK_PIN_NO(72) | 0)
+#define MT8135_PIN_72_URTS1__FUNC_URTS1 (MTK_PIN_NO(72) | 1)
+#define MT8135_PIN_72_URTS1__FUNC_EINT81 (MTK_PIN_NO(72) | 2)
+#define MT8135_PIN_72_URTS1__FUNC_CLKM1 (MTK_PIN_NO(72) | 5)
+#define MT8135_PIN_72_URTS1__FUNC_UCTS1 (MTK_PIN_NO(72) | 6)
+#define MT8135_PIN_72_URTS1__FUNC_TESTB_OUT21 (MTK_PIN_NO(72) | 7)
+
+#define MT8135_PIN_73_PWM1__FUNC_GPIO73 (MTK_PIN_NO(73) | 0)
+#define MT8135_PIN_73_PWM1__FUNC_PWM1 (MTK_PIN_NO(73) | 1)
+#define MT8135_PIN_73_PWM1__FUNC_EINT73 (MTK_PIN_NO(73) | 2)
+#define MT8135_PIN_73_PWM1__FUNC_USB_DRVVBUS (MTK_PIN_NO(73) | 5)
+#define MT8135_PIN_73_PWM1__FUNC_DISP_PWM (MTK_PIN_NO(73) | 6)
+#define MT8135_PIN_73_PWM1__FUNC_TESTB_OUT8 (MTK_PIN_NO(73) | 7)
+
+#define MT8135_PIN_74_PWM2__FUNC_GPIO74 (MTK_PIN_NO(74) | 0)
+#define MT8135_PIN_74_PWM2__FUNC_PWM2 (MTK_PIN_NO(74) | 1)
+#define MT8135_PIN_74_PWM2__FUNC_EINT74 (MTK_PIN_NO(74) | 2)
+#define MT8135_PIN_74_PWM2__FUNC_DPI33_CK (MTK_PIN_NO(74) | 3)
+#define MT8135_PIN_74_PWM2__FUNC_PWM5 (MTK_PIN_NO(74) | 4)
+#define MT8135_PIN_74_PWM2__FUNC_URXD2 (MTK_PIN_NO(74) | 5)
+#define MT8135_PIN_74_PWM2__FUNC_DISP_PWM (MTK_PIN_NO(74) | 6)
+#define MT8135_PIN_74_PWM2__FUNC_TESTB_OUT9 (MTK_PIN_NO(74) | 7)
+
+#define MT8135_PIN_75_PWM3__FUNC_GPIO75 (MTK_PIN_NO(75) | 0)
+#define MT8135_PIN_75_PWM3__FUNC_PWM3 (MTK_PIN_NO(75) | 1)
+#define MT8135_PIN_75_PWM3__FUNC_EINT75 (MTK_PIN_NO(75) | 2)
+#define MT8135_PIN_75_PWM3__FUNC_DPI33_D0 (MTK_PIN_NO(75) | 3)
+#define MT8135_PIN_75_PWM3__FUNC_PWM6 (MTK_PIN_NO(75) | 4)
+#define MT8135_PIN_75_PWM3__FUNC_UTXD2 (MTK_PIN_NO(75) | 5)
+#define MT8135_PIN_75_PWM3__FUNC_DISP_PWM (MTK_PIN_NO(75) | 6)
+#define MT8135_PIN_75_PWM3__FUNC_TESTB_OUT12 (MTK_PIN_NO(75) | 7)
+
+#define MT8135_PIN_76_PWM4__FUNC_GPIO76 (MTK_PIN_NO(76) | 0)
+#define MT8135_PIN_76_PWM4__FUNC_PWM4 (MTK_PIN_NO(76) | 1)
+#define MT8135_PIN_76_PWM4__FUNC_EINT76 (MTK_PIN_NO(76) | 2)
+#define MT8135_PIN_76_PWM4__FUNC_DPI33_D1 (MTK_PIN_NO(76) | 3)
+#define MT8135_PIN_76_PWM4__FUNC_PWM7 (MTK_PIN_NO(76) | 4)
+#define MT8135_PIN_76_PWM4__FUNC_DISP_PWM (MTK_PIN_NO(76) | 6)
+#define MT8135_PIN_76_PWM4__FUNC_TESTB_OUT13 (MTK_PIN_NO(76) | 7)
+
+#define MT8135_PIN_77_MSDC2_DAT2__FUNC_GPIO77 (MTK_PIN_NO(77) | 0)
+#define MT8135_PIN_77_MSDC2_DAT2__FUNC_MSDC2_DAT2 (MTK_PIN_NO(77) | 1)
+#define MT8135_PIN_77_MSDC2_DAT2__FUNC_EINT63 (MTK_PIN_NO(77) | 2)
+#define MT8135_PIN_77_MSDC2_DAT2__FUNC_DSP2_IMS (MTK_PIN_NO(77) | 4)
+#define MT8135_PIN_77_MSDC2_DAT2__FUNC_DPI33_D6 (MTK_PIN_NO(77) | 6)
+#define MT8135_PIN_77_MSDC2_DAT2__FUNC_TESTA_OUT25 (MTK_PIN_NO(77) | 7)
+
+#define MT8135_PIN_78_MSDC2_DAT3__FUNC_GPIO78 (MTK_PIN_NO(78) | 0)
+#define MT8135_PIN_78_MSDC2_DAT3__FUNC_MSDC2_DAT3 (MTK_PIN_NO(78) | 1)
+#define MT8135_PIN_78_MSDC2_DAT3__FUNC_EINT64 (MTK_PIN_NO(78) | 2)
+#define MT8135_PIN_78_MSDC2_DAT3__FUNC_DSP2_ID (MTK_PIN_NO(78) | 4)
+#define MT8135_PIN_78_MSDC2_DAT3__FUNC_DPI33_D7 (MTK_PIN_NO(78) | 6)
+#define MT8135_PIN_78_MSDC2_DAT3__FUNC_TESTA_OUT26 (MTK_PIN_NO(78) | 7)
+
+#define MT8135_PIN_79_MSDC2_CMD__FUNC_GPIO79 (MTK_PIN_NO(79) | 0)
+#define MT8135_PIN_79_MSDC2_CMD__FUNC_MSDC2_CMD (MTK_PIN_NO(79) | 1)
+#define MT8135_PIN_79_MSDC2_CMD__FUNC_EINT60 (MTK_PIN_NO(79) | 2)
+#define MT8135_PIN_79_MSDC2_CMD__FUNC_DSP1_IMS (MTK_PIN_NO(79) | 4)
+#define MT8135_PIN_79_MSDC2_CMD__FUNC_PCM1_WS (MTK_PIN_NO(79) | 5)
+#define MT8135_PIN_79_MSDC2_CMD__FUNC_DPI33_D3 (MTK_PIN_NO(79) | 6)
+#define MT8135_PIN_79_MSDC2_CMD__FUNC_TESTA_OUT0 (MTK_PIN_NO(79) | 7)
+
+#define MT8135_PIN_80_MSDC2_CLK__FUNC_GPIO80 (MTK_PIN_NO(80) | 0)
+#define MT8135_PIN_80_MSDC2_CLK__FUNC_MSDC2_CLK (MTK_PIN_NO(80) | 1)
+#define MT8135_PIN_80_MSDC2_CLK__FUNC_EINT59 (MTK_PIN_NO(80) | 2)
+#define MT8135_PIN_80_MSDC2_CLK__FUNC_DSP1_ICK (MTK_PIN_NO(80) | 4)
+#define MT8135_PIN_80_MSDC2_CLK__FUNC_PCM1_CK (MTK_PIN_NO(80) | 5)
+#define MT8135_PIN_80_MSDC2_CLK__FUNC_DPI33_D2 (MTK_PIN_NO(80) | 6)
+#define MT8135_PIN_80_MSDC2_CLK__FUNC_TESTA_OUT1 (MTK_PIN_NO(80) | 7)
+
+#define MT8135_PIN_81_MSDC2_DAT1__FUNC_GPIO81 (MTK_PIN_NO(81) | 0)
+#define MT8135_PIN_81_MSDC2_DAT1__FUNC_MSDC2_DAT1 (MTK_PIN_NO(81) | 1)
+#define MT8135_PIN_81_MSDC2_DAT1__FUNC_EINT62 (MTK_PIN_NO(81) | 2)
+#define MT8135_PIN_81_MSDC2_DAT1__FUNC_DSP2_ICK (MTK_PIN_NO(81) | 4)
+#define MT8135_PIN_81_MSDC2_DAT1__FUNC_PCM1_DO (MTK_PIN_NO(81) | 5)
+#define MT8135_PIN_81_MSDC2_DAT1__FUNC_DPI33_D5 (MTK_PIN_NO(81) | 6)
+
+#define MT8135_PIN_82_MSDC2_DAT0__FUNC_GPIO82 (MTK_PIN_NO(82) | 0)
+#define MT8135_PIN_82_MSDC2_DAT0__FUNC_MSDC2_DAT0 (MTK_PIN_NO(82) | 1)
+#define MT8135_PIN_82_MSDC2_DAT0__FUNC_EINT61 (MTK_PIN_NO(82) | 2)
+#define MT8135_PIN_82_MSDC2_DAT0__FUNC_DSP1_ID (MTK_PIN_NO(82) | 4)
+#define MT8135_PIN_82_MSDC2_DAT0__FUNC_PCM1_DI (MTK_PIN_NO(82) | 5)
+#define MT8135_PIN_82_MSDC2_DAT0__FUNC_DPI33_D4 (MTK_PIN_NO(82) | 6)
+
+#define MT8135_PIN_83_MSDC1_DAT0__FUNC_GPIO83 (MTK_PIN_NO(83) | 0)
+#define MT8135_PIN_83_MSDC1_DAT0__FUNC_MSDC1_DAT0 (MTK_PIN_NO(83) | 1)
+#define MT8135_PIN_83_MSDC1_DAT0__FUNC_EINT53 (MTK_PIN_NO(83) | 2)
+#define MT8135_PIN_83_MSDC1_DAT0__FUNC_SCL1 (MTK_PIN_NO(83) | 3)
+#define MT8135_PIN_83_MSDC1_DAT0__FUNC_PWM2 (MTK_PIN_NO(83) | 4)
+#define MT8135_PIN_83_MSDC1_DAT0__FUNC_CLKM1 (MTK_PIN_NO(83) | 5)
+#define MT8135_PIN_83_MSDC1_DAT0__FUNC_TESTB_OUT2 (MTK_PIN_NO(83) | 7)
+
+#define MT8135_PIN_84_MSDC1_DAT1__FUNC_GPIO84 (MTK_PIN_NO(84) | 0)
+#define MT8135_PIN_84_MSDC1_DAT1__FUNC_MSDC1_DAT1 (MTK_PIN_NO(84) | 1)
+#define MT8135_PIN_84_MSDC1_DAT1__FUNC_EINT54 (MTK_PIN_NO(84) | 2)
+#define MT8135_PIN_84_MSDC1_DAT1__FUNC_SDA1 (MTK_PIN_NO(84) | 3)
+#define MT8135_PIN_84_MSDC1_DAT1__FUNC_PWM3 (MTK_PIN_NO(84) | 4)
+#define MT8135_PIN_84_MSDC1_DAT1__FUNC_CLKM2 (MTK_PIN_NO(84) | 5)
+#define MT8135_PIN_84_MSDC1_DAT1__FUNC_TESTB_OUT3 (MTK_PIN_NO(84) | 7)
+
+#define MT8135_PIN_85_MSDC1_CMD__FUNC_GPIO85 (MTK_PIN_NO(85) | 0)
+#define MT8135_PIN_85_MSDC1_CMD__FUNC_MSDC1_CMD (MTK_PIN_NO(85) | 1)
+#define MT8135_PIN_85_MSDC1_CMD__FUNC_EINT52 (MTK_PIN_NO(85) | 2)
+#define MT8135_PIN_85_MSDC1_CMD__FUNC_SDA0 (MTK_PIN_NO(85) | 3)
+#define MT8135_PIN_85_MSDC1_CMD__FUNC_PWM1 (MTK_PIN_NO(85) | 4)
+#define MT8135_PIN_85_MSDC1_CMD__FUNC_CLKM0 (MTK_PIN_NO(85) | 5)
+#define MT8135_PIN_85_MSDC1_CMD__FUNC_TESTB_OUT1 (MTK_PIN_NO(85) | 7)
+
+#define MT8135_PIN_86_MSDC1_CLK__FUNC_GPIO86 (MTK_PIN_NO(86) | 0)
+#define MT8135_PIN_86_MSDC1_CLK__FUNC_MSDC1_CLK (MTK_PIN_NO(86) | 1)
+#define MT8135_PIN_86_MSDC1_CLK__FUNC_EINT51 (MTK_PIN_NO(86) | 2)
+#define MT8135_PIN_86_MSDC1_CLK__FUNC_SCL0 (MTK_PIN_NO(86) | 3)
+#define MT8135_PIN_86_MSDC1_CLK__FUNC_DISP_PWM (MTK_PIN_NO(86) | 4)
+#define MT8135_PIN_86_MSDC1_CLK__FUNC_TESTB_OUT0 (MTK_PIN_NO(86) | 7)
+
+#define MT8135_PIN_87_MSDC1_DAT2__FUNC_GPIO87 (MTK_PIN_NO(87) | 0)
+#define MT8135_PIN_87_MSDC1_DAT2__FUNC_MSDC1_DAT2 (MTK_PIN_NO(87) | 1)
+#define MT8135_PIN_87_MSDC1_DAT2__FUNC_EINT55 (MTK_PIN_NO(87) | 2)
+#define MT8135_PIN_87_MSDC1_DAT2__FUNC_SCL4 (MTK_PIN_NO(87) | 3)
+#define MT8135_PIN_87_MSDC1_DAT2__FUNC_PWM4 (MTK_PIN_NO(87) | 4)
+#define MT8135_PIN_87_MSDC1_DAT2__FUNC_CLKM3 (MTK_PIN_NO(87) | 5)
+#define MT8135_PIN_87_MSDC1_DAT2__FUNC_TESTB_OUT4 (MTK_PIN_NO(87) | 7)
+
+#define MT8135_PIN_88_MSDC1_DAT3__FUNC_GPIO88 (MTK_PIN_NO(88) | 0)
+#define MT8135_PIN_88_MSDC1_DAT3__FUNC_MSDC1_DAT3 (MTK_PIN_NO(88) | 1)
+#define MT8135_PIN_88_MSDC1_DAT3__FUNC_EINT56 (MTK_PIN_NO(88) | 2)
+#define MT8135_PIN_88_MSDC1_DAT3__FUNC_SDA4 (MTK_PIN_NO(88) | 3)
+#define MT8135_PIN_88_MSDC1_DAT3__FUNC_PWM5 (MTK_PIN_NO(88) | 4)
+#define MT8135_PIN_88_MSDC1_DAT3__FUNC_CLKM4 (MTK_PIN_NO(88) | 5)
+#define MT8135_PIN_88_MSDC1_DAT3__FUNC_TESTB_OUT5 (MTK_PIN_NO(88) | 7)
+
+#define MT8135_PIN_89_MSDC4_DAT0__FUNC_GPIO89 (MTK_PIN_NO(89) | 0)
+#define MT8135_PIN_89_MSDC4_DAT0__FUNC_MSDC4_DAT0 (MTK_PIN_NO(89) | 1)
+#define MT8135_PIN_89_MSDC4_DAT0__FUNC_EINT133 (MTK_PIN_NO(89) | 2)
+#define MT8135_PIN_89_MSDC4_DAT0__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(89) | 4)
+#define MT8135_PIN_89_MSDC4_DAT0__FUNC_USB_DRVVBUS (MTK_PIN_NO(89) | 5)
+#define MT8135_PIN_89_MSDC4_DAT0__FUNC_A_FUNC_DIN_9 (MTK_PIN_NO(89) | 6)
+#define MT8135_PIN_89_MSDC4_DAT0__FUNC_LPTE (MTK_PIN_NO(89) | 7)
+
+#define MT8135_PIN_90_MSDC4_DAT1__FUNC_GPIO90 (MTK_PIN_NO(90) | 0)
+#define MT8135_PIN_90_MSDC4_DAT1__FUNC_MSDC4_DAT1 (MTK_PIN_NO(90) | 1)
+#define MT8135_PIN_90_MSDC4_DAT1__FUNC_EINT134 (MTK_PIN_NO(90) | 2)
+#define MT8135_PIN_90_MSDC4_DAT1__FUNC_A_FUNC_DIN_10 (MTK_PIN_NO(90) | 6)
+#define MT8135_PIN_90_MSDC4_DAT1__FUNC_LRSTB_1X (MTK_PIN_NO(90) | 7)
+
+#define MT8135_PIN_91_MSDC4_DAT5__FUNC_GPIO91 (MTK_PIN_NO(91) | 0)
+#define MT8135_PIN_91_MSDC4_DAT5__FUNC_MSDC4_DAT5 (MTK_PIN_NO(91) | 1)
+#define MT8135_PIN_91_MSDC4_DAT5__FUNC_EINT136 (MTK_PIN_NO(91) | 2)
+#define MT8135_PIN_91_MSDC4_DAT5__FUNC_I2SIN_WS (MTK_PIN_NO(91) | 3)
+#define MT8135_PIN_91_MSDC4_DAT5__FUNC_DAC_WS (MTK_PIN_NO(91) | 4)
+#define MT8135_PIN_91_MSDC4_DAT5__FUNC_PCM1_WS (MTK_PIN_NO(91) | 5)
+#define MT8135_PIN_91_MSDC4_DAT5__FUNC_A_FUNC_DIN_11 (MTK_PIN_NO(91) | 6)
+#define MT8135_PIN_91_MSDC4_DAT5__FUNC_SPI1_CSN (MTK_PIN_NO(91) | 7)
+
+#define MT8135_PIN_92_MSDC4_DAT6__FUNC_GPIO92 (MTK_PIN_NO(92) | 0)
+#define MT8135_PIN_92_MSDC4_DAT6__FUNC_MSDC4_DAT6 (MTK_PIN_NO(92) | 1)
+#define MT8135_PIN_92_MSDC4_DAT6__FUNC_EINT137 (MTK_PIN_NO(92) | 2)
+#define MT8135_PIN_92_MSDC4_DAT6__FUNC_I2SOUT_DAT (MTK_PIN_NO(92) | 3)
+#define MT8135_PIN_92_MSDC4_DAT6__FUNC_DAC_DAT_OUT (MTK_PIN_NO(92) | 4)
+#define MT8135_PIN_92_MSDC4_DAT6__FUNC_PCM1_DO (MTK_PIN_NO(92) | 5)
+#define MT8135_PIN_92_MSDC4_DAT6__FUNC_A_FUNC_DIN_12 (MTK_PIN_NO(92) | 6)
+#define MT8135_PIN_92_MSDC4_DAT6__FUNC_SPI1_MO (MTK_PIN_NO(92) | 7)
+
+#define MT8135_PIN_93_MSDC4_DAT7__FUNC_GPIO93 (MTK_PIN_NO(93) | 0)
+#define MT8135_PIN_93_MSDC4_DAT7__FUNC_MSDC4_DAT7 (MTK_PIN_NO(93) | 1)
+#define MT8135_PIN_93_MSDC4_DAT7__FUNC_EINT138 (MTK_PIN_NO(93) | 2)
+#define MT8135_PIN_93_MSDC4_DAT7__FUNC_I2SIN_DAT (MTK_PIN_NO(93) | 3)
+#define MT8135_PIN_93_MSDC4_DAT7__FUNC_PCM1_DI (MTK_PIN_NO(93) | 5)
+#define MT8135_PIN_93_MSDC4_DAT7__FUNC_A_FUNC_DIN_13 (MTK_PIN_NO(93) | 6)
+#define MT8135_PIN_93_MSDC4_DAT7__FUNC_SPI1_MI (MTK_PIN_NO(93) | 7)
+
+#define MT8135_PIN_94_MSDC4_DAT4__FUNC_GPIO94 (MTK_PIN_NO(94) | 0)
+#define MT8135_PIN_94_MSDC4_DAT4__FUNC_MSDC4_DAT4 (MTK_PIN_NO(94) | 1)
+#define MT8135_PIN_94_MSDC4_DAT4__FUNC_EINT135 (MTK_PIN_NO(94) | 2)
+#define MT8135_PIN_94_MSDC4_DAT4__FUNC_I2SIN_CK (MTK_PIN_NO(94) | 3)
+#define MT8135_PIN_94_MSDC4_DAT4__FUNC_DAC_CK (MTK_PIN_NO(94) | 4)
+#define MT8135_PIN_94_MSDC4_DAT4__FUNC_PCM1_CK (MTK_PIN_NO(94) | 5)
+#define MT8135_PIN_94_MSDC4_DAT4__FUNC_A_FUNC_DIN_14 (MTK_PIN_NO(94) | 6)
+#define MT8135_PIN_94_MSDC4_DAT4__FUNC_SPI1_CLK (MTK_PIN_NO(94) | 7)
+
+#define MT8135_PIN_95_MSDC4_DAT2__FUNC_GPIO95 (MTK_PIN_NO(95) | 0)
+#define MT8135_PIN_95_MSDC4_DAT2__FUNC_MSDC4_DAT2 (MTK_PIN_NO(95) | 1)
+#define MT8135_PIN_95_MSDC4_DAT2__FUNC_EINT131 (MTK_PIN_NO(95) | 2)
+#define MT8135_PIN_95_MSDC4_DAT2__FUNC_I2SIN_WS (MTK_PIN_NO(95) | 3)
+#define MT8135_PIN_95_MSDC4_DAT2__FUNC_CM2PDN_2X (MTK_PIN_NO(95) | 4)
+#define MT8135_PIN_95_MSDC4_DAT2__FUNC_DAC_WS (MTK_PIN_NO(95) | 5)
+#define MT8135_PIN_95_MSDC4_DAT2__FUNC_PCM1_WS (MTK_PIN_NO(95) | 6)
+#define MT8135_PIN_95_MSDC4_DAT2__FUNC_LSCE0B_1X (MTK_PIN_NO(95) | 7)
+
+#define MT8135_PIN_96_MSDC4_CLK__FUNC_GPIO96 (MTK_PIN_NO(96) | 0)
+#define MT8135_PIN_96_MSDC4_CLK__FUNC_MSDC4_CLK (MTK_PIN_NO(96) | 1)
+#define MT8135_PIN_96_MSDC4_CLK__FUNC_EINT129 (MTK_PIN_NO(96) | 2)
+#define MT8135_PIN_96_MSDC4_CLK__FUNC_DPI1_CK_2X (MTK_PIN_NO(96) | 3)
+#define MT8135_PIN_96_MSDC4_CLK__FUNC_CM2PCLK_2X (MTK_PIN_NO(96) | 4)
+#define MT8135_PIN_96_MSDC4_CLK__FUNC_PWM4 (MTK_PIN_NO(96) | 5)
+#define MT8135_PIN_96_MSDC4_CLK__FUNC_PCM1_DI (MTK_PIN_NO(96) | 6)
+#define MT8135_PIN_96_MSDC4_CLK__FUNC_LSCK_1X (MTK_PIN_NO(96) | 7)
+
+#define MT8135_PIN_97_MSDC4_DAT3__FUNC_GPIO97 (MTK_PIN_NO(97) | 0)
+#define MT8135_PIN_97_MSDC4_DAT3__FUNC_MSDC4_DAT3 (MTK_PIN_NO(97) | 1)
+#define MT8135_PIN_97_MSDC4_DAT3__FUNC_EINT132 (MTK_PIN_NO(97) | 2)
+#define MT8135_PIN_97_MSDC4_DAT3__FUNC_I2SOUT_DAT (MTK_PIN_NO(97) | 3)
+#define MT8135_PIN_97_MSDC4_DAT3__FUNC_CM2RST_2X (MTK_PIN_NO(97) | 4)
+#define MT8135_PIN_97_MSDC4_DAT3__FUNC_DAC_DAT_OUT (MTK_PIN_NO(97) | 5)
+#define MT8135_PIN_97_MSDC4_DAT3__FUNC_PCM1_DO (MTK_PIN_NO(97) | 6)
+#define MT8135_PIN_97_MSDC4_DAT3__FUNC_LSCE1B_1X (MTK_PIN_NO(97) | 7)
+
+#define MT8135_PIN_98_MSDC4_CMD__FUNC_GPIO98 (MTK_PIN_NO(98) | 0)
+#define MT8135_PIN_98_MSDC4_CMD__FUNC_MSDC4_CMD (MTK_PIN_NO(98) | 1)
+#define MT8135_PIN_98_MSDC4_CMD__FUNC_EINT128 (MTK_PIN_NO(98) | 2)
+#define MT8135_PIN_98_MSDC4_CMD__FUNC_DPI1_DE_2X (MTK_PIN_NO(98) | 3)
+#define MT8135_PIN_98_MSDC4_CMD__FUNC_PWM3 (MTK_PIN_NO(98) | 5)
+#define MT8135_PIN_98_MSDC4_CMD__FUNC_LSDA_1X (MTK_PIN_NO(98) | 7)
+
+#define MT8135_PIN_99_MSDC4_RSTB__FUNC_GPIO99 (MTK_PIN_NO(99) | 0)
+#define MT8135_PIN_99_MSDC4_RSTB__FUNC_MSDC4_RSTB (MTK_PIN_NO(99) | 1)
+#define MT8135_PIN_99_MSDC4_RSTB__FUNC_EINT130 (MTK_PIN_NO(99) | 2)
+#define MT8135_PIN_99_MSDC4_RSTB__FUNC_I2SIN_CK (MTK_PIN_NO(99) | 3)
+#define MT8135_PIN_99_MSDC4_RSTB__FUNC_CM2MCLK_2X (MTK_PIN_NO(99) | 4)
+#define MT8135_PIN_99_MSDC4_RSTB__FUNC_DAC_CK (MTK_PIN_NO(99) | 5)
+#define MT8135_PIN_99_MSDC4_RSTB__FUNC_PCM1_CK (MTK_PIN_NO(99) | 6)
+#define MT8135_PIN_99_MSDC4_RSTB__FUNC_LSA0_1X (MTK_PIN_NO(99) | 7)
+
+#define MT8135_PIN_100_SDA0__FUNC_GPIO100 (MTK_PIN_NO(100) | 0)
+#define MT8135_PIN_100_SDA0__FUNC_SDA0 (MTK_PIN_NO(100) | 1)
+#define MT8135_PIN_100_SDA0__FUNC_EINT91 (MTK_PIN_NO(100) | 2)
+#define MT8135_PIN_100_SDA0__FUNC_CLKM1 (MTK_PIN_NO(100) | 3)
+#define MT8135_PIN_100_SDA0__FUNC_PWM1 (MTK_PIN_NO(100) | 4)
+#define MT8135_PIN_100_SDA0__FUNC_A_FUNC_DIN_15 (MTK_PIN_NO(100) | 7)
+
+#define MT8135_PIN_101_SCL0__FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
+#define MT8135_PIN_101_SCL0__FUNC_SCL0 (MTK_PIN_NO(101) | 1)
+#define MT8135_PIN_101_SCL0__FUNC_EINT90 (MTK_PIN_NO(101) | 2)
+#define MT8135_PIN_101_SCL0__FUNC_CLKM0 (MTK_PIN_NO(101) | 3)
+#define MT8135_PIN_101_SCL0__FUNC_DISP_PWM (MTK_PIN_NO(101) | 4)
+#define MT8135_PIN_101_SCL0__FUNC_A_FUNC_DIN_16 (MTK_PIN_NO(101) | 7)
+
+#define MT8135_PIN_102_EINT10_AUXIN2__FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
+#define MT8135_PIN_102_EINT10_AUXIN2__FUNC_EINT10 (MTK_PIN_NO(102) | 1)
+#define MT8135_PIN_102_EINT10_AUXIN2__FUNC_USB_TEST_IO_16 (MTK_PIN_NO(102) | 5)
+#define MT8135_PIN_102_EINT10_AUXIN2__FUNC_TESTB_OUT16 (MTK_PIN_NO(102) | 6)
+#define MT8135_PIN_102_EINT10_AUXIN2__FUNC_A_FUNC_DIN_17 (MTK_PIN_NO(102) | 7)
+
+#define MT8135_PIN_103_EINT11_AUXIN3__FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
+#define MT8135_PIN_103_EINT11_AUXIN3__FUNC_EINT11 (MTK_PIN_NO(103) | 1)
+#define MT8135_PIN_103_EINT11_AUXIN3__FUNC_USB_TEST_IO_17 (MTK_PIN_NO(103) | 5)
+#define MT8135_PIN_103_EINT11_AUXIN3__FUNC_TESTB_OUT17 (MTK_PIN_NO(103) | 6)
+#define MT8135_PIN_103_EINT11_AUXIN3__FUNC_A_FUNC_DIN_18 (MTK_PIN_NO(103) | 7)
+
+#define MT8135_PIN_104_EINT16_AUXIN4__FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
+#define MT8135_PIN_104_EINT16_AUXIN4__FUNC_EINT16 (MTK_PIN_NO(104) | 1)
+#define MT8135_PIN_104_EINT16_AUXIN4__FUNC_USB_TEST_IO_18 (MTK_PIN_NO(104) | 5)
+#define MT8135_PIN_104_EINT16_AUXIN4__FUNC_TESTB_OUT18 (MTK_PIN_NO(104) | 6)
+#define MT8135_PIN_104_EINT16_AUXIN4__FUNC_A_FUNC_DIN_19 (MTK_PIN_NO(104) | 7)
+
+#define MT8135_PIN_105_I2S_CLK__FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
+#define MT8135_PIN_105_I2S_CLK__FUNC_I2SIN_CK (MTK_PIN_NO(105) | 1)
+#define MT8135_PIN_105_I2S_CLK__FUNC_EINT10 (MTK_PIN_NO(105) | 2)
+#define MT8135_PIN_105_I2S_CLK__FUNC_DAC_CK (MTK_PIN_NO(105) | 3)
+#define MT8135_PIN_105_I2S_CLK__FUNC_PCM1_CK (MTK_PIN_NO(105) | 4)
+#define MT8135_PIN_105_I2S_CLK__FUNC_USB_TEST_IO_19 (MTK_PIN_NO(105) | 5)
+#define MT8135_PIN_105_I2S_CLK__FUNC_TESTB_OUT19 (MTK_PIN_NO(105) | 6)
+#define MT8135_PIN_105_I2S_CLK__FUNC_A_FUNC_DIN_20 (MTK_PIN_NO(105) | 7)
+
+#define MT8135_PIN_106_I2S_WS__FUNC_GPIO106 (MTK_PIN_NO(106) | 0)
+#define MT8135_PIN_106_I2S_WS__FUNC_I2SIN_WS (MTK_PIN_NO(106) | 1)
+#define MT8135_PIN_106_I2S_WS__FUNC_EINT13 (MTK_PIN_NO(106) | 2)
+#define MT8135_PIN_106_I2S_WS__FUNC_DAC_WS (MTK_PIN_NO(106) | 3)
+#define MT8135_PIN_106_I2S_WS__FUNC_PCM1_WS (MTK_PIN_NO(106) | 4)
+#define MT8135_PIN_106_I2S_WS__FUNC_USB_TEST_IO_20 (MTK_PIN_NO(106) | 5)
+#define MT8135_PIN_106_I2S_WS__FUNC_TESTB_OUT20 (MTK_PIN_NO(106) | 6)
+#define MT8135_PIN_106_I2S_WS__FUNC_A_FUNC_DIN_21 (MTK_PIN_NO(106) | 7)
+
+#define MT8135_PIN_107_I2S_DATA_IN__FUNC_GPIO107 (MTK_PIN_NO(107) | 0)
+#define MT8135_PIN_107_I2S_DATA_IN__FUNC_I2SIN_DAT (MTK_PIN_NO(107) | 1)
+#define MT8135_PIN_107_I2S_DATA_IN__FUNC_EINT11 (MTK_PIN_NO(107) | 2)
+#define MT8135_PIN_107_I2S_DATA_IN__FUNC_PCM1_DI (MTK_PIN_NO(107) | 4)
+#define MT8135_PIN_107_I2S_DATA_IN__FUNC_USB_TEST_IO_21 (MTK_PIN_NO(107) | 5)
+#define MT8135_PIN_107_I2S_DATA_IN__FUNC_TESTB_OUT22 (MTK_PIN_NO(107) | 6)
+#define MT8135_PIN_107_I2S_DATA_IN__FUNC_A_FUNC_DIN_22 (MTK_PIN_NO(107) | 7)
+
+#define MT8135_PIN_108_I2S_DATA_OUT__FUNC_GPIO108 (MTK_PIN_NO(108) | 0)
+#define MT8135_PIN_108_I2S_DATA_OUT__FUNC_I2SOUT_DAT (MTK_PIN_NO(108) | 1)
+#define MT8135_PIN_108_I2S_DATA_OUT__FUNC_EINT12 (MTK_PIN_NO(108) | 2)
+#define MT8135_PIN_108_I2S_DATA_OUT__FUNC_DAC_DAT_OUT (MTK_PIN_NO(108) | 3)
+#define MT8135_PIN_108_I2S_DATA_OUT__FUNC_PCM1_DO (MTK_PIN_NO(108) | 4)
+#define MT8135_PIN_108_I2S_DATA_OUT__FUNC_USB_TEST_IO_22 (MTK_PIN_NO(108) | 5)
+#define MT8135_PIN_108_I2S_DATA_OUT__FUNC_TESTB_OUT23 (MTK_PIN_NO(108) | 6)
+#define MT8135_PIN_108_I2S_DATA_OUT__FUNC_A_FUNC_DIN_23 (MTK_PIN_NO(108) | 7)
+
+#define MT8135_PIN_109_EINT5__FUNC_GPIO109 (MTK_PIN_NO(109) | 0)
+#define MT8135_PIN_109_EINT5__FUNC_EINT5 (MTK_PIN_NO(109) | 1)
+#define MT8135_PIN_109_EINT5__FUNC_PWM5 (MTK_PIN_NO(109) | 2)
+#define MT8135_PIN_109_EINT5__FUNC_CLKM3 (MTK_PIN_NO(109) | 3)
+#define MT8135_PIN_109_EINT5__FUNC_GPU_JTRSTB (MTK_PIN_NO(109) | 4)
+#define MT8135_PIN_109_EINT5__FUNC_USB_TEST_IO_23 (MTK_PIN_NO(109) | 5)
+#define MT8135_PIN_109_EINT5__FUNC_TESTB_OUT26 (MTK_PIN_NO(109) | 6)
+#define MT8135_PIN_109_EINT5__FUNC_A_FUNC_DIN_24 (MTK_PIN_NO(109) | 7)
+
+#define MT8135_PIN_110_EINT6__FUNC_GPIO110 (MTK_PIN_NO(110) | 0)
+#define MT8135_PIN_110_EINT6__FUNC_EINT6 (MTK_PIN_NO(110) | 1)
+#define MT8135_PIN_110_EINT6__FUNC_PWM6 (MTK_PIN_NO(110) | 2)
+#define MT8135_PIN_110_EINT6__FUNC_CLKM4 (MTK_PIN_NO(110) | 3)
+#define MT8135_PIN_110_EINT6__FUNC_GPU_JTMS (MTK_PIN_NO(110) | 4)
+#define MT8135_PIN_110_EINT6__FUNC_USB_TEST_IO_24 (MTK_PIN_NO(110) | 5)
+#define MT8135_PIN_110_EINT6__FUNC_TESTB_OUT27 (MTK_PIN_NO(110) | 6)
+#define MT8135_PIN_110_EINT6__FUNC_A_FUNC_DIN_25 (MTK_PIN_NO(110) | 7)
+
+#define MT8135_PIN_111_EINT7__FUNC_GPIO111 (MTK_PIN_NO(111) | 0)
+#define MT8135_PIN_111_EINT7__FUNC_EINT7 (MTK_PIN_NO(111) | 1)
+#define MT8135_PIN_111_EINT7__FUNC_PWM7 (MTK_PIN_NO(111) | 2)
+#define MT8135_PIN_111_EINT7__FUNC_CLKM5 (MTK_PIN_NO(111) | 3)
+#define MT8135_PIN_111_EINT7__FUNC_GPU_JTDO (MTK_PIN_NO(111) | 4)
+#define MT8135_PIN_111_EINT7__FUNC_USB_TEST_IO_25 (MTK_PIN_NO(111) | 5)
+#define MT8135_PIN_111_EINT7__FUNC_TESTB_OUT28 (MTK_PIN_NO(111) | 6)
+#define MT8135_PIN_111_EINT7__FUNC_A_FUNC_DIN_26 (MTK_PIN_NO(111) | 7)
+
+#define MT8135_PIN_112_EINT8__FUNC_GPIO112 (MTK_PIN_NO(112) | 0)
+#define MT8135_PIN_112_EINT8__FUNC_EINT8 (MTK_PIN_NO(112) | 1)
+#define MT8135_PIN_112_EINT8__FUNC_DISP_PWM (MTK_PIN_NO(112) | 2)
+#define MT8135_PIN_112_EINT8__FUNC_CLKM6 (MTK_PIN_NO(112) | 3)
+#define MT8135_PIN_112_EINT8__FUNC_GPU_JTDI (MTK_PIN_NO(112) | 4)
+#define MT8135_PIN_112_EINT8__FUNC_USB_TEST_IO_26 (MTK_PIN_NO(112) | 5)
+#define MT8135_PIN_112_EINT8__FUNC_TESTB_OUT29 (MTK_PIN_NO(112) | 6)
+#define MT8135_PIN_112_EINT8__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(112) | 7)
+
+#define MT8135_PIN_113_EINT9__FUNC_GPIO113 (MTK_PIN_NO(113) | 0)
+#define MT8135_PIN_113_EINT9__FUNC_EINT9 (MTK_PIN_NO(113) | 1)
+#define MT8135_PIN_113_EINT9__FUNC_GPU_JTCK (MTK_PIN_NO(113) | 4)
+#define MT8135_PIN_113_EINT9__FUNC_USB_DRVVBUS (MTK_PIN_NO(113) | 5)
+#define MT8135_PIN_113_EINT9__FUNC_TESTB_OUT30 (MTK_PIN_NO(113) | 6)
+#define MT8135_PIN_113_EINT9__FUNC_A_FUNC_DIN_27 (MTK_PIN_NO(113) | 7)
+
+#define MT8135_PIN_114_LPCE1B__FUNC_GPIO114 (MTK_PIN_NO(114) | 0)
+#define MT8135_PIN_114_LPCE1B__FUNC_LPCE1B (MTK_PIN_NO(114) | 1)
+#define MT8135_PIN_114_LPCE1B__FUNC_EINT127 (MTK_PIN_NO(114) | 2)
+#define MT8135_PIN_114_LPCE1B__FUNC_PWM2 (MTK_PIN_NO(114) | 5)
+#define MT8135_PIN_114_LPCE1B__FUNC_TESTB_OUT14 (MTK_PIN_NO(114) | 6)
+#define MT8135_PIN_114_LPCE1B__FUNC_A_FUNC_DIN_28 (MTK_PIN_NO(114) | 7)
+
+#define MT8135_PIN_115_LPCE0B__FUNC_GPIO115 (MTK_PIN_NO(115) | 0)
+#define MT8135_PIN_115_LPCE0B__FUNC_LPCE0B (MTK_PIN_NO(115) | 1)
+#define MT8135_PIN_115_LPCE0B__FUNC_EINT126 (MTK_PIN_NO(115) | 2)
+#define MT8135_PIN_115_LPCE0B__FUNC_PWM1 (MTK_PIN_NO(115) | 5)
+#define MT8135_PIN_115_LPCE0B__FUNC_TESTB_OUT15 (MTK_PIN_NO(115) | 6)
+#define MT8135_PIN_115_LPCE0B__FUNC_A_FUNC_DIN_29 (MTK_PIN_NO(115) | 7)
+
+#define MT8135_PIN_116_DISP_PWM__FUNC_GPIO116 (MTK_PIN_NO(116) | 0)
+#define MT8135_PIN_116_DISP_PWM__FUNC_DISP_PWM (MTK_PIN_NO(116) | 1)
+#define MT8135_PIN_116_DISP_PWM__FUNC_EINT77 (MTK_PIN_NO(116) | 2)
+#define MT8135_PIN_116_DISP_PWM__FUNC_LSDI (MTK_PIN_NO(116) | 3)
+#define MT8135_PIN_116_DISP_PWM__FUNC_PWM1 (MTK_PIN_NO(116) | 4)
+#define MT8135_PIN_116_DISP_PWM__FUNC_PWM2 (MTK_PIN_NO(116) | 5)
+#define MT8135_PIN_116_DISP_PWM__FUNC_PWM3 (MTK_PIN_NO(116) | 7)
+
+#define MT8135_PIN_117_EINT1__FUNC_GPIO117 (MTK_PIN_NO(117) | 0)
+#define MT8135_PIN_117_EINT1__FUNC_EINT1 (MTK_PIN_NO(117) | 1)
+#define MT8135_PIN_117_EINT1__FUNC_PWM2 (MTK_PIN_NO(117) | 2)
+#define MT8135_PIN_117_EINT1__FUNC_CLKM1 (MTK_PIN_NO(117) | 3)
+#define MT8135_PIN_117_EINT1__FUNC_USB_TEST_IO_13 (MTK_PIN_NO(117) | 5)
+#define MT8135_PIN_117_EINT1__FUNC_USB_SDA (MTK_PIN_NO(117) | 7)
+
+#define MT8135_PIN_118_EINT2__FUNC_GPIO118 (MTK_PIN_NO(118) | 0)
+#define MT8135_PIN_118_EINT2__FUNC_EINT2 (MTK_PIN_NO(118) | 1)
+#define MT8135_PIN_118_EINT2__FUNC_PWM3 (MTK_PIN_NO(118) | 2)
+#define MT8135_PIN_118_EINT2__FUNC_CLKM2 (MTK_PIN_NO(118) | 3)
+#define MT8135_PIN_118_EINT2__FUNC_USB_TEST_IO_14 (MTK_PIN_NO(118) | 5)
+#define MT8135_PIN_118_EINT2__FUNC_SRCLKENAI2 (MTK_PIN_NO(118) | 6)
+#define MT8135_PIN_118_EINT2__FUNC_A_FUNC_DIN_30 (MTK_PIN_NO(118) | 7)
+
+#define MT8135_PIN_119_EINT3__FUNC_GPIO119 (MTK_PIN_NO(119) | 0)
+#define MT8135_PIN_119_EINT3__FUNC_EINT3 (MTK_PIN_NO(119) | 1)
+#define MT8135_PIN_119_EINT3__FUNC_USB_TEST_IO_15 (MTK_PIN_NO(119) | 5)
+#define MT8135_PIN_119_EINT3__FUNC_SRCLKENAI1 (MTK_PIN_NO(119) | 6)
+#define MT8135_PIN_119_EINT3__FUNC_EXT_26M_CK (MTK_PIN_NO(119) | 7)
+
+#define MT8135_PIN_120_EINT4__FUNC_GPIO120 (MTK_PIN_NO(120) | 0)
+#define MT8135_PIN_120_EINT4__FUNC_EINT4 (MTK_PIN_NO(120) | 1)
+#define MT8135_PIN_120_EINT4__FUNC_PWM4 (MTK_PIN_NO(120) | 2)
+#define MT8135_PIN_120_EINT4__FUNC_USB_DRVVBUS (MTK_PIN_NO(120) | 5)
+#define MT8135_PIN_120_EINT4__FUNC_A_FUNC_DIN_31 (MTK_PIN_NO(120) | 7)
+
+#define MT8135_PIN_121_DPIDE__FUNC_GPIO121 (MTK_PIN_NO(121) | 0)
+#define MT8135_PIN_121_DPIDE__FUNC_DPI0_DE (MTK_PIN_NO(121) | 1)
+#define MT8135_PIN_121_DPIDE__FUNC_EINT100 (MTK_PIN_NO(121) | 2)
+#define MT8135_PIN_121_DPIDE__FUNC_I2SOUT_DAT (MTK_PIN_NO(121) | 3)
+#define MT8135_PIN_121_DPIDE__FUNC_DAC_DAT_OUT (MTK_PIN_NO(121) | 4)
+#define MT8135_PIN_121_DPIDE__FUNC_PCM1_DO (MTK_PIN_NO(121) | 5)
+#define MT8135_PIN_121_DPIDE__FUNC_IRDA_TXD (MTK_PIN_NO(121) | 6)
+
+#define MT8135_PIN_122_DPICK__FUNC_GPIO122 (MTK_PIN_NO(122) | 0)
+#define MT8135_PIN_122_DPICK__FUNC_DPI0_CK (MTK_PIN_NO(122) | 1)
+#define MT8135_PIN_122_DPICK__FUNC_EINT101 (MTK_PIN_NO(122) | 2)
+#define MT8135_PIN_122_DPICK__FUNC_I2SIN_DAT (MTK_PIN_NO(122) | 3)
+#define MT8135_PIN_122_DPICK__FUNC_PCM1_DI (MTK_PIN_NO(122) | 5)
+#define MT8135_PIN_122_DPICK__FUNC_IRDA_PDN (MTK_PIN_NO(122) | 6)
+
+#define MT8135_PIN_123_DPIG4__FUNC_GPIO123 (MTK_PIN_NO(123) | 0)
+#define MT8135_PIN_123_DPIG4__FUNC_DPI0_G4 (MTK_PIN_NO(123) | 1)
+#define MT8135_PIN_123_DPIG4__FUNC_EINT114 (MTK_PIN_NO(123) | 2)
+#define MT8135_PIN_123_DPIG4__FUNC_CM2DAT_2X_0 (MTK_PIN_NO(123) | 4)
+#define MT8135_PIN_123_DPIG4__FUNC_DSP2_ID (MTK_PIN_NO(123) | 5)
+
+#define MT8135_PIN_124_DPIG5__FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
+#define MT8135_PIN_124_DPIG5__FUNC_DPI0_G5 (MTK_PIN_NO(124) | 1)
+#define MT8135_PIN_124_DPIG5__FUNC_EINT115 (MTK_PIN_NO(124) | 2)
+#define MT8135_PIN_124_DPIG5__FUNC_CM2DAT_2X_1 (MTK_PIN_NO(124) | 4)
+#define MT8135_PIN_124_DPIG5__FUNC_DSP2_ICK (MTK_PIN_NO(124) | 5)
+
+#define MT8135_PIN_125_DPIR3__FUNC_GPIO125 (MTK_PIN_NO(125) | 0)
+#define MT8135_PIN_125_DPIR3__FUNC_DPI0_R3 (MTK_PIN_NO(125) | 1)
+#define MT8135_PIN_125_DPIR3__FUNC_EINT121 (MTK_PIN_NO(125) | 2)
+#define MT8135_PIN_125_DPIR3__FUNC_CM2DAT_2X_7 (MTK_PIN_NO(125) | 4)
+
+#define MT8135_PIN_126_DPIG1__FUNC_GPIO126 (MTK_PIN_NO(126) | 0)
+#define MT8135_PIN_126_DPIG1__FUNC_DPI0_G1 (MTK_PIN_NO(126) | 1)
+#define MT8135_PIN_126_DPIG1__FUNC_EINT111 (MTK_PIN_NO(126) | 2)
+#define MT8135_PIN_126_DPIG1__FUNC_DSP1_ICK (MTK_PIN_NO(126) | 5)
+
+#define MT8135_PIN_127_DPIVSYNC__FUNC_GPIO127 (MTK_PIN_NO(127) | 0)
+#define MT8135_PIN_127_DPIVSYNC__FUNC_DPI0_VSYNC (MTK_PIN_NO(127) | 1)
+#define MT8135_PIN_127_DPIVSYNC__FUNC_EINT98 (MTK_PIN_NO(127) | 2)
+#define MT8135_PIN_127_DPIVSYNC__FUNC_I2SIN_CK (MTK_PIN_NO(127) | 3)
+#define MT8135_PIN_127_DPIVSYNC__FUNC_DAC_CK (MTK_PIN_NO(127) | 4)
+#define MT8135_PIN_127_DPIVSYNC__FUNC_PCM1_CK (MTK_PIN_NO(127) | 5)
+
+#define MT8135_PIN_128_DPIHSYNC__FUNC_GPIO128 (MTK_PIN_NO(128) | 0)
+#define MT8135_PIN_128_DPIHSYNC__FUNC_DPI0_HSYNC (MTK_PIN_NO(128) | 1)
+#define MT8135_PIN_128_DPIHSYNC__FUNC_EINT99 (MTK_PIN_NO(128) | 2)
+#define MT8135_PIN_128_DPIHSYNC__FUNC_I2SIN_WS (MTK_PIN_NO(128) | 3)
+#define MT8135_PIN_128_DPIHSYNC__FUNC_DAC_WS (MTK_PIN_NO(128) | 4)
+#define MT8135_PIN_128_DPIHSYNC__FUNC_PCM1_WS (MTK_PIN_NO(128) | 5)
+#define MT8135_PIN_128_DPIHSYNC__FUNC_IRDA_RXD (MTK_PIN_NO(128) | 6)
+
+#define MT8135_PIN_129_DPIB0__FUNC_GPIO129 (MTK_PIN_NO(129) | 0)
+#define MT8135_PIN_129_DPIB0__FUNC_DPI0_B0 (MTK_PIN_NO(129) | 1)
+#define MT8135_PIN_129_DPIB0__FUNC_EINT102 (MTK_PIN_NO(129) | 2)
+#define MT8135_PIN_129_DPIB0__FUNC_SCL0 (MTK_PIN_NO(129) | 4)
+#define MT8135_PIN_129_DPIB0__FUNC_DISP_PWM (MTK_PIN_NO(129) | 5)
+
+#define MT8135_PIN_130_DPIB1__FUNC_GPIO130 (MTK_PIN_NO(130) | 0)
+#define MT8135_PIN_130_DPIB1__FUNC_DPI0_B1 (MTK_PIN_NO(130) | 1)
+#define MT8135_PIN_130_DPIB1__FUNC_EINT103 (MTK_PIN_NO(130) | 2)
+#define MT8135_PIN_130_DPIB1__FUNC_CLKM0 (MTK_PIN_NO(130) | 3)
+#define MT8135_PIN_130_DPIB1__FUNC_SDA0 (MTK_PIN_NO(130) | 4)
+#define MT8135_PIN_130_DPIB1__FUNC_PWM1 (MTK_PIN_NO(130) | 5)
+
+#define MT8135_PIN_131_DPIB2__FUNC_GPIO131 (MTK_PIN_NO(131) | 0)
+#define MT8135_PIN_131_DPIB2__FUNC_DPI0_B2 (MTK_PIN_NO(131) | 1)
+#define MT8135_PIN_131_DPIB2__FUNC_EINT104 (MTK_PIN_NO(131) | 2)
+#define MT8135_PIN_131_DPIB2__FUNC_CLKM1 (MTK_PIN_NO(131) | 3)
+#define MT8135_PIN_131_DPIB2__FUNC_SCL1 (MTK_PIN_NO(131) | 4)
+#define MT8135_PIN_131_DPIB2__FUNC_PWM2 (MTK_PIN_NO(131) | 5)
+
+#define MT8135_PIN_132_DPIB3__FUNC_GPIO132 (MTK_PIN_NO(132) | 0)
+#define MT8135_PIN_132_DPIB3__FUNC_DPI0_B3 (MTK_PIN_NO(132) | 1)
+#define MT8135_PIN_132_DPIB3__FUNC_EINT105 (MTK_PIN_NO(132) | 2)
+#define MT8135_PIN_132_DPIB3__FUNC_CLKM2 (MTK_PIN_NO(132) | 3)
+#define MT8135_PIN_132_DPIB3__FUNC_SDA1 (MTK_PIN_NO(132) | 4)
+#define MT8135_PIN_132_DPIB3__FUNC_PWM3 (MTK_PIN_NO(132) | 5)
+
+#define MT8135_PIN_133_DPIB4__FUNC_GPIO133 (MTK_PIN_NO(133) | 0)
+#define MT8135_PIN_133_DPIB4__FUNC_DPI0_B4 (MTK_PIN_NO(133) | 1)
+#define MT8135_PIN_133_DPIB4__FUNC_EINT106 (MTK_PIN_NO(133) | 2)
+#define MT8135_PIN_133_DPIB4__FUNC_CLKM3 (MTK_PIN_NO(133) | 3)
+#define MT8135_PIN_133_DPIB4__FUNC_SCL2 (MTK_PIN_NO(133) | 4)
+#define MT8135_PIN_133_DPIB4__FUNC_PWM4 (MTK_PIN_NO(133) | 5)
+
+#define MT8135_PIN_134_DPIB5__FUNC_GPIO134 (MTK_PIN_NO(134) | 0)
+#define MT8135_PIN_134_DPIB5__FUNC_DPI0_B5 (MTK_PIN_NO(134) | 1)
+#define MT8135_PIN_134_DPIB5__FUNC_EINT107 (MTK_PIN_NO(134) | 2)
+#define MT8135_PIN_134_DPIB5__FUNC_CLKM4 (MTK_PIN_NO(134) | 3)
+#define MT8135_PIN_134_DPIB5__FUNC_SDA2 (MTK_PIN_NO(134) | 4)
+#define MT8135_PIN_134_DPIB5__FUNC_PWM5 (MTK_PIN_NO(134) | 5)
+
+#define MT8135_PIN_135_DPIB6__FUNC_GPIO135 (MTK_PIN_NO(135) | 0)
+#define MT8135_PIN_135_DPIB6__FUNC_DPI0_B6 (MTK_PIN_NO(135) | 1)
+#define MT8135_PIN_135_DPIB6__FUNC_EINT108 (MTK_PIN_NO(135) | 2)
+#define MT8135_PIN_135_DPIB6__FUNC_CLKM5 (MTK_PIN_NO(135) | 3)
+#define MT8135_PIN_135_DPIB6__FUNC_SCL3 (MTK_PIN_NO(135) | 4)
+#define MT8135_PIN_135_DPIB6__FUNC_PWM6 (MTK_PIN_NO(135) | 5)
+
+#define MT8135_PIN_136_DPIB7__FUNC_GPIO136 (MTK_PIN_NO(136) | 0)
+#define MT8135_PIN_136_DPIB7__FUNC_DPI0_B7 (MTK_PIN_NO(136) | 1)
+#define MT8135_PIN_136_DPIB7__FUNC_EINT109 (MTK_PIN_NO(136) | 2)
+#define MT8135_PIN_136_DPIB7__FUNC_CLKM6 (MTK_PIN_NO(136) | 3)
+#define MT8135_PIN_136_DPIB7__FUNC_SDA3 (MTK_PIN_NO(136) | 4)
+#define MT8135_PIN_136_DPIB7__FUNC_PWM7 (MTK_PIN_NO(136) | 5)
+
+#define MT8135_PIN_137_DPIG0__FUNC_GPIO137 (MTK_PIN_NO(137) | 0)
+#define MT8135_PIN_137_DPIG0__FUNC_DPI0_G0 (MTK_PIN_NO(137) | 1)
+#define MT8135_PIN_137_DPIG0__FUNC_EINT110 (MTK_PIN_NO(137) | 2)
+#define MT8135_PIN_137_DPIG0__FUNC_DSP1_ID (MTK_PIN_NO(137) | 5)
+
+#define MT8135_PIN_138_DPIG2__FUNC_GPIO138 (MTK_PIN_NO(138) | 0)
+#define MT8135_PIN_138_DPIG2__FUNC_DPI0_G2 (MTK_PIN_NO(138) | 1)
+#define MT8135_PIN_138_DPIG2__FUNC_EINT112 (MTK_PIN_NO(138) | 2)
+#define MT8135_PIN_138_DPIG2__FUNC_DSP1_IMS (MTK_PIN_NO(138) | 5)
+
+#define MT8135_PIN_139_DPIG3__FUNC_GPIO139 (MTK_PIN_NO(139) | 0)
+#define MT8135_PIN_139_DPIG3__FUNC_DPI0_G3 (MTK_PIN_NO(139) | 1)
+#define MT8135_PIN_139_DPIG3__FUNC_EINT113 (MTK_PIN_NO(139) | 2)
+#define MT8135_PIN_139_DPIG3__FUNC_DSP2_IMS (MTK_PIN_NO(139) | 5)
+
+#define MT8135_PIN_140_DPIG6__FUNC_GPIO140 (MTK_PIN_NO(140) | 0)
+#define MT8135_PIN_140_DPIG6__FUNC_DPI0_G6 (MTK_PIN_NO(140) | 1)
+#define MT8135_PIN_140_DPIG6__FUNC_EINT116 (MTK_PIN_NO(140) | 2)
+#define MT8135_PIN_140_DPIG6__FUNC_CM2DAT_2X_2 (MTK_PIN_NO(140) | 4)
+
+#define MT8135_PIN_141_DPIG7__FUNC_GPIO141 (MTK_PIN_NO(141) | 0)
+#define MT8135_PIN_141_DPIG7__FUNC_DPI0_G7 (MTK_PIN_NO(141) | 1)
+#define MT8135_PIN_141_DPIG7__FUNC_EINT117 (MTK_PIN_NO(141) | 2)
+#define MT8135_PIN_141_DPIG7__FUNC_CM2DAT_2X_3 (MTK_PIN_NO(141) | 4)
+
+#define MT8135_PIN_142_DPIR0__FUNC_GPIO142 (MTK_PIN_NO(142) | 0)
+#define MT8135_PIN_142_DPIR0__FUNC_DPI0_R0 (MTK_PIN_NO(142) | 1)
+#define MT8135_PIN_142_DPIR0__FUNC_EINT118 (MTK_PIN_NO(142) | 2)
+#define MT8135_PIN_142_DPIR0__FUNC_CM2DAT_2X_4 (MTK_PIN_NO(142) | 4)
+
+#define MT8135_PIN_143_DPIR1__FUNC_GPIO143 (MTK_PIN_NO(143) | 0)
+#define MT8135_PIN_143_DPIR1__FUNC_DPI0_R1 (MTK_PIN_NO(143) | 1)
+#define MT8135_PIN_143_DPIR1__FUNC_EINT119 (MTK_PIN_NO(143) | 2)
+#define MT8135_PIN_143_DPIR1__FUNC_CM2DAT_2X_5 (MTK_PIN_NO(143) | 4)
+
+#define MT8135_PIN_144_DPIR2__FUNC_GPIO144 (MTK_PIN_NO(144) | 0)
+#define MT8135_PIN_144_DPIR2__FUNC_DPI0_R2 (MTK_PIN_NO(144) | 1)
+#define MT8135_PIN_144_DPIR2__FUNC_EINT120 (MTK_PIN_NO(144) | 2)
+#define MT8135_PIN_144_DPIR2__FUNC_CM2DAT_2X_6 (MTK_PIN_NO(144) | 4)
+
+#define MT8135_PIN_145_DPIR4__FUNC_GPIO145 (MTK_PIN_NO(145) | 0)
+#define MT8135_PIN_145_DPIR4__FUNC_DPI0_R4 (MTK_PIN_NO(145) | 1)
+#define MT8135_PIN_145_DPIR4__FUNC_EINT122 (MTK_PIN_NO(145) | 2)
+#define MT8135_PIN_145_DPIR4__FUNC_CM2DAT_2X_8 (MTK_PIN_NO(145) | 4)
+
+#define MT8135_PIN_146_DPIR5__FUNC_GPIO146 (MTK_PIN_NO(146) | 0)
+#define MT8135_PIN_146_DPIR5__FUNC_DPI0_R5 (MTK_PIN_NO(146) | 1)
+#define MT8135_PIN_146_DPIR5__FUNC_EINT123 (MTK_PIN_NO(146) | 2)
+#define MT8135_PIN_146_DPIR5__FUNC_CM2DAT_2X_9 (MTK_PIN_NO(146) | 4)
+
+#define MT8135_PIN_147_DPIR6__FUNC_GPIO147 (MTK_PIN_NO(147) | 0)
+#define MT8135_PIN_147_DPIR6__FUNC_DPI0_R6 (MTK_PIN_NO(147) | 1)
+#define MT8135_PIN_147_DPIR6__FUNC_EINT124 (MTK_PIN_NO(147) | 2)
+#define MT8135_PIN_147_DPIR6__FUNC_CM2VSYNC_2X (MTK_PIN_NO(147) | 4)
+
+#define MT8135_PIN_148_DPIR7__FUNC_GPIO148 (MTK_PIN_NO(148) | 0)
+#define MT8135_PIN_148_DPIR7__FUNC_DPI0_R7 (MTK_PIN_NO(148) | 1)
+#define MT8135_PIN_148_DPIR7__FUNC_EINT125 (MTK_PIN_NO(148) | 2)
+#define MT8135_PIN_148_DPIR7__FUNC_CM2HSYNC_2X (MTK_PIN_NO(148) | 4)
+
+#define MT8135_PIN_149_TDN3__FUNC_GPIO149 (MTK_PIN_NO(149) | 0)
+#define MT8135_PIN_149_TDN3__FUNC_EINT36 (MTK_PIN_NO(149) | 2)
+
+#define MT8135_PIN_150_TDP3__FUNC_GPIO150 (MTK_PIN_NO(150) | 0)
+#define MT8135_PIN_150_TDP3__FUNC_EINT35 (MTK_PIN_NO(150) | 2)
+
+#define MT8135_PIN_151_TDN2__FUNC_GPIO151 (MTK_PIN_NO(151) | 0)
+#define MT8135_PIN_151_TDN2__FUNC_EINT169 (MTK_PIN_NO(151) | 2)
+
+#define MT8135_PIN_152_TDP2__FUNC_GPIO152 (MTK_PIN_NO(152) | 0)
+#define MT8135_PIN_152_TDP2__FUNC_EINT168 (MTK_PIN_NO(152) | 2)
+
+#define MT8135_PIN_153_TCN__FUNC_GPIO153 (MTK_PIN_NO(153) | 0)
+#define MT8135_PIN_153_TCN__FUNC_EINT163 (MTK_PIN_NO(153) | 2)
+
+#define MT8135_PIN_154_TCP__FUNC_GPIO154 (MTK_PIN_NO(154) | 0)
+#define MT8135_PIN_154_TCP__FUNC_EINT162 (MTK_PIN_NO(154) | 2)
+
+#define MT8135_PIN_155_TDN1__FUNC_GPIO155 (MTK_PIN_NO(155) | 0)
+#define MT8135_PIN_155_TDN1__FUNC_EINT167 (MTK_PIN_NO(155) | 2)
+
+#define MT8135_PIN_156_TDP1__FUNC_GPIO156 (MTK_PIN_NO(156) | 0)
+#define MT8135_PIN_156_TDP1__FUNC_EINT166 (MTK_PIN_NO(156) | 2)
+
+#define MT8135_PIN_157_TDN0__FUNC_GPIO157 (MTK_PIN_NO(157) | 0)
+#define MT8135_PIN_157_TDN0__FUNC_EINT165 (MTK_PIN_NO(157) | 2)
+
+#define MT8135_PIN_158_TDP0__FUNC_GPIO158 (MTK_PIN_NO(158) | 0)
+#define MT8135_PIN_158_TDP0__FUNC_EINT164 (MTK_PIN_NO(158) | 2)
+
+#define MT8135_PIN_159_RDN3__FUNC_GPIO159 (MTK_PIN_NO(159) | 0)
+#define MT8135_PIN_159_RDN3__FUNC_EINT18 (MTK_PIN_NO(159) | 2)
+
+#define MT8135_PIN_160_RDP3__FUNC_GPIO160 (MTK_PIN_NO(160) | 0)
+#define MT8135_PIN_160_RDP3__FUNC_EINT30 (MTK_PIN_NO(160) | 2)
+
+#define MT8135_PIN_161_RDN2__FUNC_GPIO161 (MTK_PIN_NO(161) | 0)
+#define MT8135_PIN_161_RDN2__FUNC_EINT31 (MTK_PIN_NO(161) | 2)
+
+#define MT8135_PIN_162_RDP2__FUNC_GPIO162 (MTK_PIN_NO(162) | 0)
+#define MT8135_PIN_162_RDP2__FUNC_EINT32 (MTK_PIN_NO(162) | 2)
+
+#define MT8135_PIN_163_RCN__FUNC_GPIO163 (MTK_PIN_NO(163) | 0)
+#define MT8135_PIN_163_RCN__FUNC_EINT33 (MTK_PIN_NO(163) | 2)
+
+#define MT8135_PIN_164_RCP__FUNC_GPIO164 (MTK_PIN_NO(164) | 0)
+#define MT8135_PIN_164_RCP__FUNC_EINT39 (MTK_PIN_NO(164) | 2)
+
+#define MT8135_PIN_165_RDN1__FUNC_GPIO165 (MTK_PIN_NO(165) | 0)
+
+#define MT8135_PIN_166_RDP1__FUNC_GPIO166 (MTK_PIN_NO(166) | 0)
+
+#define MT8135_PIN_167_RDN0__FUNC_GPIO167 (MTK_PIN_NO(167) | 0)
+
+#define MT8135_PIN_168_RDP0__FUNC_GPIO168 (MTK_PIN_NO(168) | 0)
+
+#define MT8135_PIN_169_RDN1_A__FUNC_GPIO169 (MTK_PIN_NO(169) | 0)
+#define MT8135_PIN_169_RDN1_A__FUNC_CMDAT6 (MTK_PIN_NO(169) | 1)
+#define MT8135_PIN_169_RDN1_A__FUNC_EINT175 (MTK_PIN_NO(169) | 2)
+
+#define MT8135_PIN_170_RDP1_A__FUNC_GPIO170 (MTK_PIN_NO(170) | 0)
+#define MT8135_PIN_170_RDP1_A__FUNC_CMDAT7 (MTK_PIN_NO(170) | 1)
+#define MT8135_PIN_170_RDP1_A__FUNC_EINT174 (MTK_PIN_NO(170) | 2)
+
+#define MT8135_PIN_171_RCN_A__FUNC_GPIO171 (MTK_PIN_NO(171) | 0)
+#define MT8135_PIN_171_RCN_A__FUNC_CMDAT8 (MTK_PIN_NO(171) | 1)
+#define MT8135_PIN_171_RCN_A__FUNC_EINT171 (MTK_PIN_NO(171) | 2)
+
+#define MT8135_PIN_172_RCP_A__FUNC_GPIO172 (MTK_PIN_NO(172) | 0)
+#define MT8135_PIN_172_RCP_A__FUNC_CMDAT9 (MTK_PIN_NO(172) | 1)
+#define MT8135_PIN_172_RCP_A__FUNC_EINT170 (MTK_PIN_NO(172) | 2)
+
+#define MT8135_PIN_173_RDN0_A__FUNC_GPIO173 (MTK_PIN_NO(173) | 0)
+#define MT8135_PIN_173_RDN0_A__FUNC_CMHSYNC (MTK_PIN_NO(173) | 1)
+#define MT8135_PIN_173_RDN0_A__FUNC_EINT173 (MTK_PIN_NO(173) | 2)
+
+#define MT8135_PIN_174_RDP0_A__FUNC_GPIO174 (MTK_PIN_NO(174) | 0)
+#define MT8135_PIN_174_RDP0_A__FUNC_CMVSYNC (MTK_PIN_NO(174) | 1)
+#define MT8135_PIN_174_RDP0_A__FUNC_EINT172 (MTK_PIN_NO(174) | 2)
+
+#define MT8135_PIN_175_RDN1_B__FUNC_GPIO175 (MTK_PIN_NO(175) | 0)
+#define MT8135_PIN_175_RDN1_B__FUNC_CMDAT2 (MTK_PIN_NO(175) | 1)
+#define MT8135_PIN_175_RDN1_B__FUNC_EINT181 (MTK_PIN_NO(175) | 2)
+#define MT8135_PIN_175_RDN1_B__FUNC_CMCSD2 (MTK_PIN_NO(175) | 3)
+
+#define MT8135_PIN_176_RDP1_B__FUNC_GPIO176 (MTK_PIN_NO(176) | 0)
+#define MT8135_PIN_176_RDP1_B__FUNC_CMDAT3 (MTK_PIN_NO(176) | 1)
+#define MT8135_PIN_176_RDP1_B__FUNC_EINT180 (MTK_PIN_NO(176) | 2)
+#define MT8135_PIN_176_RDP1_B__FUNC_CMCSD3 (MTK_PIN_NO(176) | 3)
+
+#define MT8135_PIN_177_RCN_B__FUNC_GPIO177 (MTK_PIN_NO(177) | 0)
+#define MT8135_PIN_177_RCN_B__FUNC_CMDAT4 (MTK_PIN_NO(177) | 1)
+#define MT8135_PIN_177_RCN_B__FUNC_EINT177 (MTK_PIN_NO(177) | 2)
+
+#define MT8135_PIN_178_RCP_B__FUNC_GPIO178 (MTK_PIN_NO(178) | 0)
+#define MT8135_PIN_178_RCP_B__FUNC_CMDAT5 (MTK_PIN_NO(178) | 1)
+#define MT8135_PIN_178_RCP_B__FUNC_EINT176 (MTK_PIN_NO(178) | 2)
+
+#define MT8135_PIN_179_RDN0_B__FUNC_GPIO179 (MTK_PIN_NO(179) | 0)
+#define MT8135_PIN_179_RDN0_B__FUNC_CMDAT0 (MTK_PIN_NO(179) | 1)
+#define MT8135_PIN_179_RDN0_B__FUNC_EINT179 (MTK_PIN_NO(179) | 2)
+#define MT8135_PIN_179_RDN0_B__FUNC_CMCSD0 (MTK_PIN_NO(179) | 3)
+
+#define MT8135_PIN_180_RDP0_B__FUNC_GPIO180 (MTK_PIN_NO(180) | 0)
+#define MT8135_PIN_180_RDP0_B__FUNC_CMDAT1 (MTK_PIN_NO(180) | 1)
+#define MT8135_PIN_180_RDP0_B__FUNC_EINT178 (MTK_PIN_NO(180) | 2)
+#define MT8135_PIN_180_RDP0_B__FUNC_CMCSD1 (MTK_PIN_NO(180) | 3)
+
+#define MT8135_PIN_181_CMPCLK__FUNC_GPIO181 (MTK_PIN_NO(181) | 0)
+#define MT8135_PIN_181_CMPCLK__FUNC_CMPCLK (MTK_PIN_NO(181) | 1)
+#define MT8135_PIN_181_CMPCLK__FUNC_EINT182 (MTK_PIN_NO(181) | 2)
+#define MT8135_PIN_181_CMPCLK__FUNC_CMCSK (MTK_PIN_NO(181) | 3)
+#define MT8135_PIN_181_CMPCLK__FUNC_CM2MCLK_4X (MTK_PIN_NO(181) | 4)
+#define MT8135_PIN_181_CMPCLK__FUNC_TS_AUXADC_SEL_3 (MTK_PIN_NO(181) | 5)
+#define MT8135_PIN_181_CMPCLK__FUNC_VENC_TEST_CK (MTK_PIN_NO(181) | 6)
+#define MT8135_PIN_181_CMPCLK__FUNC_TESTA_OUT27 (MTK_PIN_NO(181) | 7)
+
+#define MT8135_PIN_182_CMMCLK__FUNC_GPIO182 (MTK_PIN_NO(182) | 0)
+#define MT8135_PIN_182_CMMCLK__FUNC_CMMCLK (MTK_PIN_NO(182) | 1)
+#define MT8135_PIN_182_CMMCLK__FUNC_EINT183 (MTK_PIN_NO(182) | 2)
+#define MT8135_PIN_182_CMMCLK__FUNC_TS_AUXADC_SEL_2 (MTK_PIN_NO(182) | 5)
+#define MT8135_PIN_182_CMMCLK__FUNC_TESTA_OUT28 (MTK_PIN_NO(182) | 7)
+
+#define MT8135_PIN_183_CMRST__FUNC_GPIO183 (MTK_PIN_NO(183) | 0)
+#define MT8135_PIN_183_CMRST__FUNC_CMRST (MTK_PIN_NO(183) | 1)
+#define MT8135_PIN_183_CMRST__FUNC_EINT185 (MTK_PIN_NO(183) | 2)
+#define MT8135_PIN_183_CMRST__FUNC_TS_AUXADC_SEL_1 (MTK_PIN_NO(183) | 5)
+#define MT8135_PIN_183_CMRST__FUNC_TESTA_OUT30 (MTK_PIN_NO(183) | 7)
+
+#define MT8135_PIN_184_CMPDN__FUNC_GPIO184 (MTK_PIN_NO(184) | 0)
+#define MT8135_PIN_184_CMPDN__FUNC_CMPDN (MTK_PIN_NO(184) | 1)
+#define MT8135_PIN_184_CMPDN__FUNC_EINT184 (MTK_PIN_NO(184) | 2)
+#define MT8135_PIN_184_CMPDN__FUNC_TS_AUXADC_SEL_0 (MTK_PIN_NO(184) | 5)
+#define MT8135_PIN_184_CMPDN__FUNC_TESTA_OUT29 (MTK_PIN_NO(184) | 7)
+
+#define MT8135_PIN_185_CMFLASH__FUNC_GPIO185 (MTK_PIN_NO(185) | 0)
+#define MT8135_PIN_185_CMFLASH__FUNC_CMFLASH (MTK_PIN_NO(185) | 1)
+#define MT8135_PIN_185_CMFLASH__FUNC_EINT186 (MTK_PIN_NO(185) | 2)
+#define MT8135_PIN_185_CMFLASH__FUNC_CM2MCLK_3X (MTK_PIN_NO(185) | 3)
+#define MT8135_PIN_185_CMFLASH__FUNC_MFG_TEST_CK_1 (MTK_PIN_NO(185) | 6)
+#define MT8135_PIN_185_CMFLASH__FUNC_TESTA_OUT31 (MTK_PIN_NO(185) | 7)
+
+#define MT8135_PIN_186_MRG_I2S_PCM_CLK__FUNC_GPIO186 (MTK_PIN_NO(186) | 0)
+#define MT8135_PIN_186_MRG_I2S_PCM_CLK__FUNC_MRG_I2S_P_CLK (MTK_PIN_NO(186) | 1)
+#define MT8135_PIN_186_MRG_I2S_PCM_CLK__FUNC_EINT14 (MTK_PIN_NO(186) | 2)
+#define MT8135_PIN_186_MRG_I2S_PCM_CLK__FUNC_I2SIN_CK (MTK_PIN_NO(186) | 3)
+#define MT8135_PIN_186_MRG_I2S_PCM_CLK__FUNC_PCM0_CK (MTK_PIN_NO(186) | 4)
+#define MT8135_PIN_186_MRG_I2S_PCM_CLK__FUNC_DSP2_ICK (MTK_PIN_NO(186) | 5)
+#define MT8135_PIN_186_MRG_I2S_PCM_CLK__FUNC_IMG_TEST_CK (MTK_PIN_NO(186) | 6)
+#define MT8135_PIN_186_MRG_I2S_PCM_CLK__FUNC_USB_SCL (MTK_PIN_NO(186) | 7)
+
+#define MT8135_PIN_187_MRG_I2S_PCM_SYNC__FUNC_GPIO187 (MTK_PIN_NO(187) | 0)
+#define MT8135_PIN_187_MRG_I2S_PCM_SYNC__FUNC_MRG_I2S_SYNC (MTK_PIN_NO(187) | 1)
+#define MT8135_PIN_187_MRG_I2S_PCM_SYNC__FUNC_EINT16 (MTK_PIN_NO(187) | 2)
+#define MT8135_PIN_187_MRG_I2S_PCM_SYNC__FUNC_I2SIN_WS (MTK_PIN_NO(187) | 3)
+#define MT8135_PIN_187_MRG_I2S_PCM_SYNC__FUNC_PCM0_WS (MTK_PIN_NO(187) | 4)
+#define MT8135_PIN_187_MRG_I2S_PCM_SYNC__FUNC_DISP_TEST_CK (MTK_PIN_NO(187) | 6)
+
+#define MT8135_PIN_188_MRG_I2S_PCM_RX__FUNC_GPIO188 (MTK_PIN_NO(188) | 0)
+#define MT8135_PIN_188_MRG_I2S_PCM_RX__FUNC_MRG_I2S_PCM_RX (MTK_PIN_NO(188) | 1)
+#define MT8135_PIN_188_MRG_I2S_PCM_RX__FUNC_EINT15 (MTK_PIN_NO(188) | 2)
+#define MT8135_PIN_188_MRG_I2S_PCM_RX__FUNC_I2SIN_DAT (MTK_PIN_NO(188) | 3)
+#define MT8135_PIN_188_MRG_I2S_PCM_RX__FUNC_PCM0_DI (MTK_PIN_NO(188) | 4)
+#define MT8135_PIN_188_MRG_I2S_PCM_RX__FUNC_DSP2_ID (MTK_PIN_NO(188) | 5)
+#define MT8135_PIN_188_MRG_I2S_PCM_RX__FUNC_MFG_TEST_CK (MTK_PIN_NO(188) | 6)
+#define MT8135_PIN_188_MRG_I2S_PCM_RX__FUNC_USB_SDA (MTK_PIN_NO(188) | 7)
+
+#define MT8135_PIN_189_MRG_I2S_PCM_TX__FUNC_GPIO189 (MTK_PIN_NO(189) | 0)
+#define MT8135_PIN_189_MRG_I2S_PCM_TX__FUNC_MRG_I2S_PCM_TX (MTK_PIN_NO(189) | 1)
+#define MT8135_PIN_189_MRG_I2S_PCM_TX__FUNC_EINT17 (MTK_PIN_NO(189) | 2)
+#define MT8135_PIN_189_MRG_I2S_PCM_TX__FUNC_I2SOUT_DAT (MTK_PIN_NO(189) | 3)
+#define MT8135_PIN_189_MRG_I2S_PCM_TX__FUNC_PCM0_DO (MTK_PIN_NO(189) | 4)
+#define MT8135_PIN_189_MRG_I2S_PCM_TX__FUNC_VDEC_TEST_CK (MTK_PIN_NO(189) | 6)
+
+#define MT8135_PIN_190_SRCLKENAI__FUNC_GPIO190 (MTK_PIN_NO(190) | 0)
+#define MT8135_PIN_190_SRCLKENAI__FUNC_SRCLKENAI (MTK_PIN_NO(190) | 1)
+
+#define MT8135_PIN_191_URXD3__FUNC_GPIO191 (MTK_PIN_NO(191) | 0)
+#define MT8135_PIN_191_URXD3__FUNC_URXD3 (MTK_PIN_NO(191) | 1)
+#define MT8135_PIN_191_URXD3__FUNC_EINT87 (MTK_PIN_NO(191) | 2)
+#define MT8135_PIN_191_URXD3__FUNC_UTXD3 (MTK_PIN_NO(191) | 3)
+#define MT8135_PIN_191_URXD3__FUNC_TS_AUX_ST (MTK_PIN_NO(191) | 5)
+#define MT8135_PIN_191_URXD3__FUNC_PWM4 (MTK_PIN_NO(191) | 6)
+
+#define MT8135_PIN_192_UTXD3__FUNC_GPIO192 (MTK_PIN_NO(192) | 0)
+#define MT8135_PIN_192_UTXD3__FUNC_UTXD3 (MTK_PIN_NO(192) | 1)
+#define MT8135_PIN_192_UTXD3__FUNC_EINT86 (MTK_PIN_NO(192) | 2)
+#define MT8135_PIN_192_UTXD3__FUNC_URXD3 (MTK_PIN_NO(192) | 3)
+#define MT8135_PIN_192_UTXD3__FUNC_TS_AUX_CS_B (MTK_PIN_NO(192) | 5)
+#define MT8135_PIN_192_UTXD3__FUNC_PWM3 (MTK_PIN_NO(192) | 6)
+
+#define MT8135_PIN_193_SDA2__FUNC_GPIO193 (MTK_PIN_NO(193) | 0)
+#define MT8135_PIN_193_SDA2__FUNC_SDA2 (MTK_PIN_NO(193) | 1)
+#define MT8135_PIN_193_SDA2__FUNC_EINT95 (MTK_PIN_NO(193) | 2)
+#define MT8135_PIN_193_SDA2__FUNC_CLKM5 (MTK_PIN_NO(193) | 3)
+#define MT8135_PIN_193_SDA2__FUNC_PWM5 (MTK_PIN_NO(193) | 4)
+#define MT8135_PIN_193_SDA2__FUNC_TS_AUX_PWDB (MTK_PIN_NO(193) | 5)
+
+#define MT8135_PIN_194_SCL2__FUNC_GPIO194 (MTK_PIN_NO(194) | 0)
+#define MT8135_PIN_194_SCL2__FUNC_SCL2 (MTK_PIN_NO(194) | 1)
+#define MT8135_PIN_194_SCL2__FUNC_EINT94 (MTK_PIN_NO(194) | 2)
+#define MT8135_PIN_194_SCL2__FUNC_CLKM4 (MTK_PIN_NO(194) | 3)
+#define MT8135_PIN_194_SCL2__FUNC_PWM4 (MTK_PIN_NO(194) | 4)
+#define MT8135_PIN_194_SCL2__FUNC_TS_AUXADC_TEST_CK (MTK_PIN_NO(194) | 5)
+
+#define MT8135_PIN_195_SDA1__FUNC_GPIO195 (MTK_PIN_NO(195) | 0)
+#define MT8135_PIN_195_SDA1__FUNC_SDA1 (MTK_PIN_NO(195) | 1)
+#define MT8135_PIN_195_SDA1__FUNC_EINT93 (MTK_PIN_NO(195) | 2)
+#define MT8135_PIN_195_SDA1__FUNC_CLKM3 (MTK_PIN_NO(195) | 3)
+#define MT8135_PIN_195_SDA1__FUNC_PWM3 (MTK_PIN_NO(195) | 4)
+#define MT8135_PIN_195_SDA1__FUNC_TS_AUX_SCLK_PWDB (MTK_PIN_NO(195) | 5)
+
+#define MT8135_PIN_196_SCL1__FUNC_GPIO196 (MTK_PIN_NO(196) | 0)
+#define MT8135_PIN_196_SCL1__FUNC_SCL1 (MTK_PIN_NO(196) | 1)
+#define MT8135_PIN_196_SCL1__FUNC_EINT92 (MTK_PIN_NO(196) | 2)
+#define MT8135_PIN_196_SCL1__FUNC_CLKM2 (MTK_PIN_NO(196) | 3)
+#define MT8135_PIN_196_SCL1__FUNC_PWM2 (MTK_PIN_NO(196) | 4)
+#define MT8135_PIN_196_SCL1__FUNC_TS_AUX_DIN (MTK_PIN_NO(196) | 5)
+
+#define MT8135_PIN_197_MSDC3_DAT2__FUNC_GPIO197 (MTK_PIN_NO(197) | 0)
+#define MT8135_PIN_197_MSDC3_DAT2__FUNC_MSDC3_DAT2 (MTK_PIN_NO(197) | 1)
+#define MT8135_PIN_197_MSDC3_DAT2__FUNC_EINT71 (MTK_PIN_NO(197) | 2)
+#define MT8135_PIN_197_MSDC3_DAT2__FUNC_SCL6 (MTK_PIN_NO(197) | 3)
+#define MT8135_PIN_197_MSDC3_DAT2__FUNC_PWM5 (MTK_PIN_NO(197) | 4)
+#define MT8135_PIN_197_MSDC3_DAT2__FUNC_CLKM4 (MTK_PIN_NO(197) | 5)
+#define MT8135_PIN_197_MSDC3_DAT2__FUNC_MFG_TEST_CK_2 (MTK_PIN_NO(197) | 6)
+
+#define MT8135_PIN_198_MSDC3_DAT3__FUNC_GPIO198 (MTK_PIN_NO(198) | 0)
+#define MT8135_PIN_198_MSDC3_DAT3__FUNC_MSDC3_DAT3 (MTK_PIN_NO(198) | 1)
+#define MT8135_PIN_198_MSDC3_DAT3__FUNC_EINT72 (MTK_PIN_NO(198) | 2)
+#define MT8135_PIN_198_MSDC3_DAT3__FUNC_SDA6 (MTK_PIN_NO(198) | 3)
+#define MT8135_PIN_198_MSDC3_DAT3__FUNC_PWM6 (MTK_PIN_NO(198) | 4)
+#define MT8135_PIN_198_MSDC3_DAT3__FUNC_CLKM5 (MTK_PIN_NO(198) | 5)
+#define MT8135_PIN_198_MSDC3_DAT3__FUNC_MFG_TEST_CK_3 (MTK_PIN_NO(198) | 6)
+
+#define MT8135_PIN_199_MSDC3_CMD__FUNC_GPIO199 (MTK_PIN_NO(199) | 0)
+#define MT8135_PIN_199_MSDC3_CMD__FUNC_MSDC3_CMD (MTK_PIN_NO(199) | 1)
+#define MT8135_PIN_199_MSDC3_CMD__FUNC_EINT68 (MTK_PIN_NO(199) | 2)
+#define MT8135_PIN_199_MSDC3_CMD__FUNC_SDA2 (MTK_PIN_NO(199) | 3)
+#define MT8135_PIN_199_MSDC3_CMD__FUNC_PWM2 (MTK_PIN_NO(199) | 4)
+#define MT8135_PIN_199_MSDC3_CMD__FUNC_CLKM1 (MTK_PIN_NO(199) | 5)
+#define MT8135_PIN_199_MSDC3_CMD__FUNC_MFG_TEST_CK_4 (MTK_PIN_NO(199) | 6)
+
+#define MT8135_PIN_200_MSDC3_CLK__FUNC_GPIO200 (MTK_PIN_NO(200) | 0)
+#define MT8135_PIN_200_MSDC3_CLK__FUNC_MSDC3_CLK (MTK_PIN_NO(200) | 1)
+#define MT8135_PIN_200_MSDC3_CLK__FUNC_EINT67 (MTK_PIN_NO(200) | 2)
+#define MT8135_PIN_200_MSDC3_CLK__FUNC_SCL2 (MTK_PIN_NO(200) | 3)
+#define MT8135_PIN_200_MSDC3_CLK__FUNC_PWM1 (MTK_PIN_NO(200) | 4)
+#define MT8135_PIN_200_MSDC3_CLK__FUNC_CLKM0 (MTK_PIN_NO(200) | 5)
+
+#define MT8135_PIN_201_MSDC3_DAT1__FUNC_GPIO201 (MTK_PIN_NO(201) | 0)
+#define MT8135_PIN_201_MSDC3_DAT1__FUNC_MSDC3_DAT1 (MTK_PIN_NO(201) | 1)
+#define MT8135_PIN_201_MSDC3_DAT1__FUNC_EINT70 (MTK_PIN_NO(201) | 2)
+#define MT8135_PIN_201_MSDC3_DAT1__FUNC_SDA3 (MTK_PIN_NO(201) | 3)
+#define MT8135_PIN_201_MSDC3_DAT1__FUNC_PWM4 (MTK_PIN_NO(201) | 4)
+#define MT8135_PIN_201_MSDC3_DAT1__FUNC_CLKM3 (MTK_PIN_NO(201) | 5)
+
+#define MT8135_PIN_202_MSDC3_DAT0__FUNC_GPIO202 (MTK_PIN_NO(202) | 0)
+#define MT8135_PIN_202_MSDC3_DAT0__FUNC_MSDC3_DAT0 (MTK_PIN_NO(202) | 1)
+#define MT8135_PIN_202_MSDC3_DAT0__FUNC_EINT69 (MTK_PIN_NO(202) | 2)
+#define MT8135_PIN_202_MSDC3_DAT0__FUNC_SCL3 (MTK_PIN_NO(202) | 3)
+#define MT8135_PIN_202_MSDC3_DAT0__FUNC_PWM3 (MTK_PIN_NO(202) | 4)
+#define MT8135_PIN_202_MSDC3_DAT0__FUNC_CLKM2 (MTK_PIN_NO(202) | 5)
+
+#endif /* __DTS_MT8135_PINFUNC_H */
diff --git a/include/dt-bindings/pinctrl/mt8183-pinfunc.h b/include/dt-bindings/pinctrl/mt8183-pinfunc.h
new file mode 100644
index 000000000000..6221cd712718
--- /dev/null
+++ b/include/dt-bindings/pinctrl/mt8183-pinfunc.h
@@ -0,0 +1,1120 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ * Author: Zhiyong Tao <zhiyong.tao@mediatek.com>
+ *
+ */
+
+#ifndef __MT8183_PINFUNC_H
+#define __MT8183_PINFUNC_H
+
+#include <dt-bindings/pinctrl/mt65xx.h>
+
+#define PINMUX_GPIO0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define PINMUX_GPIO0__FUNC_MRG_SYNC (MTK_PIN_NO(0) | 1)
+#define PINMUX_GPIO0__FUNC_PCM0_SYNC (MTK_PIN_NO(0) | 2)
+#define PINMUX_GPIO0__FUNC_TP_GPIO0_AO (MTK_PIN_NO(0) | 3)
+#define PINMUX_GPIO0__FUNC_SRCLKENAI0 (MTK_PIN_NO(0) | 4)
+#define PINMUX_GPIO0__FUNC_SCP_SPI2_CS (MTK_PIN_NO(0) | 5)
+#define PINMUX_GPIO0__FUNC_I2S3_MCK (MTK_PIN_NO(0) | 6)
+#define PINMUX_GPIO0__FUNC_SPI2_CSB (MTK_PIN_NO(0) | 7)
+
+#define PINMUX_GPIO1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define PINMUX_GPIO1__FUNC_MRG_CLK (MTK_PIN_NO(1) | 1)
+#define PINMUX_GPIO1__FUNC_PCM0_CLK (MTK_PIN_NO(1) | 2)
+#define PINMUX_GPIO1__FUNC_TP_GPIO1_AO (MTK_PIN_NO(1) | 3)
+#define PINMUX_GPIO1__FUNC_CLKM3 (MTK_PIN_NO(1) | 4)
+#define PINMUX_GPIO1__FUNC_SCP_SPI2_MO (MTK_PIN_NO(1) | 5)
+#define PINMUX_GPIO1__FUNC_I2S3_BCK (MTK_PIN_NO(1) | 6)
+#define PINMUX_GPIO1__FUNC_SPI2_MO (MTK_PIN_NO(1) | 7)
+
+#define PINMUX_GPIO2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define PINMUX_GPIO2__FUNC_MRG_DO (MTK_PIN_NO(2) | 1)
+#define PINMUX_GPIO2__FUNC_PCM0_DO (MTK_PIN_NO(2) | 2)
+#define PINMUX_GPIO2__FUNC_TP_GPIO2_AO (MTK_PIN_NO(2) | 3)
+#define PINMUX_GPIO2__FUNC_SCL6 (MTK_PIN_NO(2) | 4)
+#define PINMUX_GPIO2__FUNC_SCP_SPI2_CK (MTK_PIN_NO(2) | 5)
+#define PINMUX_GPIO2__FUNC_I2S3_LRCK (MTK_PIN_NO(2) | 6)
+#define PINMUX_GPIO2__FUNC_SPI2_CLK (MTK_PIN_NO(2) | 7)
+
+#define PINMUX_GPIO3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define PINMUX_GPIO3__FUNC_MRG_DI (MTK_PIN_NO(3) | 1)
+#define PINMUX_GPIO3__FUNC_PCM0_DI (MTK_PIN_NO(3) | 2)
+#define PINMUX_GPIO3__FUNC_TP_GPIO3_AO (MTK_PIN_NO(3) | 3)
+#define PINMUX_GPIO3__FUNC_SDA6 (MTK_PIN_NO(3) | 4)
+#define PINMUX_GPIO3__FUNC_TDM_MCK (MTK_PIN_NO(3) | 5)
+#define PINMUX_GPIO3__FUNC_I2S3_DO (MTK_PIN_NO(3) | 6)
+#define PINMUX_GPIO3__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(3) | 7)
+
+#define PINMUX_GPIO4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define PINMUX_GPIO4__FUNC_PWM_B (MTK_PIN_NO(4) | 1)
+#define PINMUX_GPIO4__FUNC_I2S0_MCK (MTK_PIN_NO(4) | 2)
+#define PINMUX_GPIO4__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(4) | 3)
+#define PINMUX_GPIO4__FUNC_MD_URXD1 (MTK_PIN_NO(4) | 4)
+#define PINMUX_GPIO4__FUNC_TDM_BCK (MTK_PIN_NO(4) | 5)
+#define PINMUX_GPIO4__FUNC_TP_GPIO4_AO (MTK_PIN_NO(4) | 6)
+#define PINMUX_GPIO4__FUNC_DAP_MD32_SWD (MTK_PIN_NO(4) | 7)
+
+#define PINMUX_GPIO5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define PINMUX_GPIO5__FUNC_PWM_C (MTK_PIN_NO(5) | 1)
+#define PINMUX_GPIO5__FUNC_I2S0_BCK (MTK_PIN_NO(5) | 2)
+#define PINMUX_GPIO5__FUNC_SSPM_URXD_AO (MTK_PIN_NO(5) | 3)
+#define PINMUX_GPIO5__FUNC_MD_UTXD1 (MTK_PIN_NO(5) | 4)
+#define PINMUX_GPIO5__FUNC_TDM_LRCK (MTK_PIN_NO(5) | 5)
+#define PINMUX_GPIO5__FUNC_TP_GPIO5_AO (MTK_PIN_NO(5) | 6)
+#define PINMUX_GPIO5__FUNC_DAP_MD32_SWCK (MTK_PIN_NO(5) | 7)
+
+#define PINMUX_GPIO6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define PINMUX_GPIO6__FUNC_PWM_A (MTK_PIN_NO(6) | 1)
+#define PINMUX_GPIO6__FUNC_I2S0_LRCK (MTK_PIN_NO(6) | 2)
+#define PINMUX_GPIO6__FUNC_IDDIG (MTK_PIN_NO(6) | 3)
+#define PINMUX_GPIO6__FUNC_MD_URXD0 (MTK_PIN_NO(6) | 4)
+#define PINMUX_GPIO6__FUNC_TDM_DATA0 (MTK_PIN_NO(6) | 5)
+#define PINMUX_GPIO6__FUNC_TP_GPIO6_AO (MTK_PIN_NO(6) | 6)
+#define PINMUX_GPIO6__FUNC_CMFLASH (MTK_PIN_NO(6) | 7)
+
+#define PINMUX_GPIO7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define PINMUX_GPIO7__FUNC_SPI1_B_MI (MTK_PIN_NO(7) | 1)
+#define PINMUX_GPIO7__FUNC_I2S0_DI (MTK_PIN_NO(7) | 2)
+#define PINMUX_GPIO7__FUNC_USB_DRVVBUS (MTK_PIN_NO(7) | 3)
+#define PINMUX_GPIO7__FUNC_MD_UTXD0 (MTK_PIN_NO(7) | 4)
+#define PINMUX_GPIO7__FUNC_TDM_DATA1 (MTK_PIN_NO(7) | 5)
+#define PINMUX_GPIO7__FUNC_TP_GPIO7_AO (MTK_PIN_NO(7) | 6)
+#define PINMUX_GPIO7__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(7) | 7)
+
+#define PINMUX_GPIO8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define PINMUX_GPIO8__FUNC_SPI1_B_CSB (MTK_PIN_NO(8) | 1)
+#define PINMUX_GPIO8__FUNC_ANT_SEL3 (MTK_PIN_NO(8) | 2)
+#define PINMUX_GPIO8__FUNC_SCL7 (MTK_PIN_NO(8) | 3)
+#define PINMUX_GPIO8__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(8) | 4)
+#define PINMUX_GPIO8__FUNC_TDM_DATA2 (MTK_PIN_NO(8) | 5)
+#define PINMUX_GPIO8__FUNC_MD_INT0 (MTK_PIN_NO(8) | 6)
+#define PINMUX_GPIO8__FUNC_JTRSTN_SEL1 (MTK_PIN_NO(8) | 7)
+
+#define PINMUX_GPIO9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define PINMUX_GPIO9__FUNC_SPI1_B_MO (MTK_PIN_NO(9) | 1)
+#define PINMUX_GPIO9__FUNC_ANT_SEL4 (MTK_PIN_NO(9) | 2)
+#define PINMUX_GPIO9__FUNC_CMMCLK2 (MTK_PIN_NO(9) | 3)
+#define PINMUX_GPIO9__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(9) | 4)
+#define PINMUX_GPIO9__FUNC_SSPM_JTAG_TRSTN (MTK_PIN_NO(9) | 5)
+#define PINMUX_GPIO9__FUNC_IO_JTAG_TRSTN (MTK_PIN_NO(9) | 6)
+#define PINMUX_GPIO9__FUNC_DBG_MON_B10 (MTK_PIN_NO(9) | 7)
+
+#define PINMUX_GPIO10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define PINMUX_GPIO10__FUNC_SPI1_B_CLK (MTK_PIN_NO(10) | 1)
+#define PINMUX_GPIO10__FUNC_ANT_SEL5 (MTK_PIN_NO(10) | 2)
+#define PINMUX_GPIO10__FUNC_CMMCLK3 (MTK_PIN_NO(10) | 3)
+#define PINMUX_GPIO10__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(10) | 4)
+#define PINMUX_GPIO10__FUNC_TDM_DATA3 (MTK_PIN_NO(10) | 5)
+#define PINMUX_GPIO10__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(10) | 6)
+#define PINMUX_GPIO10__FUNC_DBG_MON_B11 (MTK_PIN_NO(10) | 7)
+
+#define PINMUX_GPIO11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define PINMUX_GPIO11__FUNC_TP_URXD1_AO (MTK_PIN_NO(11) | 1)
+#define PINMUX_GPIO11__FUNC_IDDIG (MTK_PIN_NO(11) | 2)
+#define PINMUX_GPIO11__FUNC_SCL6 (MTK_PIN_NO(11) | 3)
+#define PINMUX_GPIO11__FUNC_UCTS1 (MTK_PIN_NO(11) | 4)
+#define PINMUX_GPIO11__FUNC_UCTS0 (MTK_PIN_NO(11) | 5)
+#define PINMUX_GPIO11__FUNC_SRCLKENAI1 (MTK_PIN_NO(11) | 6)
+#define PINMUX_GPIO11__FUNC_I2S5_MCK (MTK_PIN_NO(11) | 7)
+
+#define PINMUX_GPIO12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define PINMUX_GPIO12__FUNC_TP_UTXD1_AO (MTK_PIN_NO(12) | 1)
+#define PINMUX_GPIO12__FUNC_USB_DRVVBUS (MTK_PIN_NO(12) | 2)
+#define PINMUX_GPIO12__FUNC_SDA6 (MTK_PIN_NO(12) | 3)
+#define PINMUX_GPIO12__FUNC_URTS1 (MTK_PIN_NO(12) | 4)
+#define PINMUX_GPIO12__FUNC_URTS0 (MTK_PIN_NO(12) | 5)
+#define PINMUX_GPIO12__FUNC_I2S2_DI2 (MTK_PIN_NO(12) | 6)
+#define PINMUX_GPIO12__FUNC_I2S5_BCK (MTK_PIN_NO(12) | 7)
+
+#define PINMUX_GPIO13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define PINMUX_GPIO13__FUNC_DBPI_D0 (MTK_PIN_NO(13) | 1)
+#define PINMUX_GPIO13__FUNC_SPI5_MI (MTK_PIN_NO(13) | 2)
+#define PINMUX_GPIO13__FUNC_PCM0_SYNC (MTK_PIN_NO(13) | 3)
+#define PINMUX_GPIO13__FUNC_MD_URXD0 (MTK_PIN_NO(13) | 4)
+#define PINMUX_GPIO13__FUNC_ANT_SEL3 (MTK_PIN_NO(13) | 5)
+#define PINMUX_GPIO13__FUNC_I2S0_MCK (MTK_PIN_NO(13) | 6)
+#define PINMUX_GPIO13__FUNC_DBG_MON_B15 (MTK_PIN_NO(13) | 7)
+
+#define PINMUX_GPIO14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define PINMUX_GPIO14__FUNC_DBPI_D1 (MTK_PIN_NO(14) | 1)
+#define PINMUX_GPIO14__FUNC_SPI5_CSB (MTK_PIN_NO(14) | 2)
+#define PINMUX_GPIO14__FUNC_PCM0_CLK (MTK_PIN_NO(14) | 3)
+#define PINMUX_GPIO14__FUNC_MD_UTXD0 (MTK_PIN_NO(14) | 4)
+#define PINMUX_GPIO14__FUNC_ANT_SEL4 (MTK_PIN_NO(14) | 5)
+#define PINMUX_GPIO14__FUNC_I2S0_BCK (MTK_PIN_NO(14) | 6)
+#define PINMUX_GPIO14__FUNC_DBG_MON_B16 (MTK_PIN_NO(14) | 7)
+
+#define PINMUX_GPIO15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define PINMUX_GPIO15__FUNC_DBPI_D2 (MTK_PIN_NO(15) | 1)
+#define PINMUX_GPIO15__FUNC_SPI5_MO (MTK_PIN_NO(15) | 2)
+#define PINMUX_GPIO15__FUNC_PCM0_DO (MTK_PIN_NO(15) | 3)
+#define PINMUX_GPIO15__FUNC_MD_URXD1 (MTK_PIN_NO(15) | 4)
+#define PINMUX_GPIO15__FUNC_ANT_SEL5 (MTK_PIN_NO(15) | 5)
+#define PINMUX_GPIO15__FUNC_I2S0_LRCK (MTK_PIN_NO(15) | 6)
+#define PINMUX_GPIO15__FUNC_DBG_MON_B17 (MTK_PIN_NO(15) | 7)
+
+#define PINMUX_GPIO16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0)
+#define PINMUX_GPIO16__FUNC_DBPI_D3 (MTK_PIN_NO(16) | 1)
+#define PINMUX_GPIO16__FUNC_SPI5_CLK (MTK_PIN_NO(16) | 2)
+#define PINMUX_GPIO16__FUNC_PCM0_DI (MTK_PIN_NO(16) | 3)
+#define PINMUX_GPIO16__FUNC_MD_UTXD1 (MTK_PIN_NO(16) | 4)
+#define PINMUX_GPIO16__FUNC_ANT_SEL6 (MTK_PIN_NO(16) | 5)
+#define PINMUX_GPIO16__FUNC_I2S0_DI (MTK_PIN_NO(16) | 6)
+#define PINMUX_GPIO16__FUNC_DBG_MON_B23 (MTK_PIN_NO(16) | 7)
+
+#define PINMUX_GPIO17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0)
+#define PINMUX_GPIO17__FUNC_DBPI_D4 (MTK_PIN_NO(17) | 1)
+#define PINMUX_GPIO17__FUNC_SPI4_MI (MTK_PIN_NO(17) | 2)
+#define PINMUX_GPIO17__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(17) | 3)
+#define PINMUX_GPIO17__FUNC_MD_INT0 (MTK_PIN_NO(17) | 4)
+#define PINMUX_GPIO17__FUNC_ANT_SEL7 (MTK_PIN_NO(17) | 5)
+#define PINMUX_GPIO17__FUNC_I2S3_MCK (MTK_PIN_NO(17) | 6)
+#define PINMUX_GPIO17__FUNC_DBG_MON_A1 (MTK_PIN_NO(17) | 7)
+
+#define PINMUX_GPIO18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define PINMUX_GPIO18__FUNC_DBPI_D5 (MTK_PIN_NO(18) | 1)
+#define PINMUX_GPIO18__FUNC_SPI4_CSB (MTK_PIN_NO(18) | 2)
+#define PINMUX_GPIO18__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(18) | 3)
+#define PINMUX_GPIO18__FUNC_MD_INT0 (MTK_PIN_NO(18) | 4)
+#define PINMUX_GPIO18__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(18) | 5)
+#define PINMUX_GPIO18__FUNC_I2S3_BCK (MTK_PIN_NO(18) | 6)
+#define PINMUX_GPIO18__FUNC_DBG_MON_A2 (MTK_PIN_NO(18) | 7)
+
+#define PINMUX_GPIO19__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define PINMUX_GPIO19__FUNC_DBPI_D6 (MTK_PIN_NO(19) | 1)
+#define PINMUX_GPIO19__FUNC_SPI4_MO (MTK_PIN_NO(19) | 2)
+#define PINMUX_GPIO19__FUNC_CONN_MCU_TDO (MTK_PIN_NO(19) | 3)
+#define PINMUX_GPIO19__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(19) | 4)
+#define PINMUX_GPIO19__FUNC_URXD1 (MTK_PIN_NO(19) | 5)
+#define PINMUX_GPIO19__FUNC_I2S3_LRCK (MTK_PIN_NO(19) | 6)
+#define PINMUX_GPIO19__FUNC_DBG_MON_A3 (MTK_PIN_NO(19) | 7)
+
+#define PINMUX_GPIO20__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define PINMUX_GPIO20__FUNC_DBPI_D7 (MTK_PIN_NO(20) | 1)
+#define PINMUX_GPIO20__FUNC_SPI4_CLK (MTK_PIN_NO(20) | 2)
+#define PINMUX_GPIO20__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(20) | 3)
+#define PINMUX_GPIO20__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(20) | 4)
+#define PINMUX_GPIO20__FUNC_UTXD1 (MTK_PIN_NO(20) | 5)
+#define PINMUX_GPIO20__FUNC_I2S3_DO (MTK_PIN_NO(20) | 6)
+#define PINMUX_GPIO20__FUNC_DBG_MON_A19 (MTK_PIN_NO(20) | 7)
+
+#define PINMUX_GPIO21__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define PINMUX_GPIO21__FUNC_DBPI_D8 (MTK_PIN_NO(21) | 1)
+#define PINMUX_GPIO21__FUNC_SPI3_MI (MTK_PIN_NO(21) | 2)
+#define PINMUX_GPIO21__FUNC_CONN_MCU_TMS (MTK_PIN_NO(21) | 3)
+#define PINMUX_GPIO21__FUNC_DAP_MD32_SWD (MTK_PIN_NO(21) | 4)
+#define PINMUX_GPIO21__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(21) | 5)
+#define PINMUX_GPIO21__FUNC_I2S2_MCK (MTK_PIN_NO(21) | 6)
+#define PINMUX_GPIO21__FUNC_DBG_MON_B5 (MTK_PIN_NO(21) | 7)
+
+#define PINMUX_GPIO22__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define PINMUX_GPIO22__FUNC_DBPI_D9 (MTK_PIN_NO(22) | 1)
+#define PINMUX_GPIO22__FUNC_SPI3_CSB (MTK_PIN_NO(22) | 2)
+#define PINMUX_GPIO22__FUNC_CONN_MCU_TCK (MTK_PIN_NO(22) | 3)
+#define PINMUX_GPIO22__FUNC_DAP_MD32_SWCK (MTK_PIN_NO(22) | 4)
+#define PINMUX_GPIO22__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(22) | 5)
+#define PINMUX_GPIO22__FUNC_I2S2_BCK (MTK_PIN_NO(22) | 6)
+#define PINMUX_GPIO22__FUNC_DBG_MON_B6 (MTK_PIN_NO(22) | 7)
+
+#define PINMUX_GPIO23__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define PINMUX_GPIO23__FUNC_DBPI_D10 (MTK_PIN_NO(23) | 1)
+#define PINMUX_GPIO23__FUNC_SPI3_MO (MTK_PIN_NO(23) | 2)
+#define PINMUX_GPIO23__FUNC_CONN_MCU_TDI (MTK_PIN_NO(23) | 3)
+#define PINMUX_GPIO23__FUNC_UCTS1 (MTK_PIN_NO(23) | 4)
+#define PINMUX_GPIO23__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(23) | 5)
+#define PINMUX_GPIO23__FUNC_I2S2_LRCK (MTK_PIN_NO(23) | 6)
+#define PINMUX_GPIO23__FUNC_DBG_MON_B7 (MTK_PIN_NO(23) | 7)
+
+#define PINMUX_GPIO24__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define PINMUX_GPIO24__FUNC_DBPI_D11 (MTK_PIN_NO(24) | 1)
+#define PINMUX_GPIO24__FUNC_SPI3_CLK (MTK_PIN_NO(24) | 2)
+#define PINMUX_GPIO24__FUNC_SRCLKENAI0 (MTK_PIN_NO(24) | 3)
+#define PINMUX_GPIO24__FUNC_URTS1 (MTK_PIN_NO(24) | 4)
+#define PINMUX_GPIO24__FUNC_IO_JTAG_TCK (MTK_PIN_NO(24) | 5)
+#define PINMUX_GPIO24__FUNC_I2S2_DI (MTK_PIN_NO(24) | 6)
+#define PINMUX_GPIO24__FUNC_DBG_MON_B31 (MTK_PIN_NO(24) | 7)
+
+#define PINMUX_GPIO25__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define PINMUX_GPIO25__FUNC_DBPI_HSYNC (MTK_PIN_NO(25) | 1)
+#define PINMUX_GPIO25__FUNC_ANT_SEL0 (MTK_PIN_NO(25) | 2)
+#define PINMUX_GPIO25__FUNC_SCL6 (MTK_PIN_NO(25) | 3)
+#define PINMUX_GPIO25__FUNC_KPCOL2 (MTK_PIN_NO(25) | 4)
+#define PINMUX_GPIO25__FUNC_IO_JTAG_TMS (MTK_PIN_NO(25) | 5)
+#define PINMUX_GPIO25__FUNC_I2S1_MCK (MTK_PIN_NO(25) | 6)
+#define PINMUX_GPIO25__FUNC_DBG_MON_B0 (MTK_PIN_NO(25) | 7)
+
+#define PINMUX_GPIO26__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define PINMUX_GPIO26__FUNC_DBPI_VSYNC (MTK_PIN_NO(26) | 1)
+#define PINMUX_GPIO26__FUNC_ANT_SEL1 (MTK_PIN_NO(26) | 2)
+#define PINMUX_GPIO26__FUNC_SDA6 (MTK_PIN_NO(26) | 3)
+#define PINMUX_GPIO26__FUNC_KPROW2 (MTK_PIN_NO(26) | 4)
+#define PINMUX_GPIO26__FUNC_IO_JTAG_TDI (MTK_PIN_NO(26) | 5)
+#define PINMUX_GPIO26__FUNC_I2S1_BCK (MTK_PIN_NO(26) | 6)
+#define PINMUX_GPIO26__FUNC_DBG_MON_B1 (MTK_PIN_NO(26) | 7)
+
+#define PINMUX_GPIO27__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define PINMUX_GPIO27__FUNC_DBPI_DE (MTK_PIN_NO(27) | 1)
+#define PINMUX_GPIO27__FUNC_ANT_SEL2 (MTK_PIN_NO(27) | 2)
+#define PINMUX_GPIO27__FUNC_SCL7 (MTK_PIN_NO(27) | 3)
+#define PINMUX_GPIO27__FUNC_DMIC_CLK (MTK_PIN_NO(27) | 4)
+#define PINMUX_GPIO27__FUNC_IO_JTAG_TDO (MTK_PIN_NO(27) | 5)
+#define PINMUX_GPIO27__FUNC_I2S1_LRCK (MTK_PIN_NO(27) | 6)
+#define PINMUX_GPIO27__FUNC_DBG_MON_B9 (MTK_PIN_NO(27) | 7)
+
+#define PINMUX_GPIO28__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define PINMUX_GPIO28__FUNC_DBPI_CK (MTK_PIN_NO(28) | 1)
+#define PINMUX_GPIO28__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(28) | 2)
+#define PINMUX_GPIO28__FUNC_SDA7 (MTK_PIN_NO(28) | 3)
+#define PINMUX_GPIO28__FUNC_DMIC_DAT (MTK_PIN_NO(28) | 4)
+#define PINMUX_GPIO28__FUNC_IO_JTAG_TRSTN (MTK_PIN_NO(28) | 5)
+#define PINMUX_GPIO28__FUNC_I2S1_DO (MTK_PIN_NO(28) | 6)
+#define PINMUX_GPIO28__FUNC_DBG_MON_B32 (MTK_PIN_NO(28) | 7)
+
+#define PINMUX_GPIO29__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define PINMUX_GPIO29__FUNC_MSDC1_CLK (MTK_PIN_NO(29) | 1)
+#define PINMUX_GPIO29__FUNC_IO_JTAG_TCK (MTK_PIN_NO(29) | 2)
+#define PINMUX_GPIO29__FUNC_UDI_TCK (MTK_PIN_NO(29) | 3)
+#define PINMUX_GPIO29__FUNC_CONN_DSP_JCK (MTK_PIN_NO(29) | 4)
+#define PINMUX_GPIO29__FUNC_SSPM_JTAG_TCK (MTK_PIN_NO(29) | 5)
+#define PINMUX_GPIO29__FUNC_PCM1_CLK (MTK_PIN_NO(29) | 6)
+#define PINMUX_GPIO29__FUNC_DBG_MON_A6 (MTK_PIN_NO(29) | 7)
+
+#define PINMUX_GPIO30__FUNC_GPIO30 (MTK_PIN_NO(30) | 0)
+#define PINMUX_GPIO30__FUNC_MSDC1_DAT3 (MTK_PIN_NO(30) | 1)
+#define PINMUX_GPIO30__FUNC_DAP_MD32_SWD (MTK_PIN_NO(30) | 2)
+#define PINMUX_GPIO30__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(30) | 3)
+#define PINMUX_GPIO30__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(30) | 4)
+#define PINMUX_GPIO30__FUNC_SSPM_JTAG_TRSTN (MTK_PIN_NO(30) | 5)
+#define PINMUX_GPIO30__FUNC_PCM1_DI (MTK_PIN_NO(30) | 6)
+#define PINMUX_GPIO30__FUNC_DBG_MON_A7 (MTK_PIN_NO(30) | 7)
+
+#define PINMUX_GPIO31__FUNC_GPIO31 (MTK_PIN_NO(31) | 0)
+#define PINMUX_GPIO31__FUNC_MSDC1_CMD (MTK_PIN_NO(31) | 1)
+#define PINMUX_GPIO31__FUNC_IO_JTAG_TMS (MTK_PIN_NO(31) | 2)
+#define PINMUX_GPIO31__FUNC_UDI_TMS (MTK_PIN_NO(31) | 3)
+#define PINMUX_GPIO31__FUNC_CONN_DSP_JMS (MTK_PIN_NO(31) | 4)
+#define PINMUX_GPIO31__FUNC_SSPM_JTAG_TMS (MTK_PIN_NO(31) | 5)
+#define PINMUX_GPIO31__FUNC_PCM1_SYNC (MTK_PIN_NO(31) | 6)
+#define PINMUX_GPIO31__FUNC_DBG_MON_A8 (MTK_PIN_NO(31) | 7)
+
+#define PINMUX_GPIO32__FUNC_GPIO32 (MTK_PIN_NO(32) | 0)
+#define PINMUX_GPIO32__FUNC_MSDC1_DAT0 (MTK_PIN_NO(32) | 1)
+#define PINMUX_GPIO32__FUNC_IO_JTAG_TDI (MTK_PIN_NO(32) | 2)
+#define PINMUX_GPIO32__FUNC_UDI_TDI (MTK_PIN_NO(32) | 3)
+#define PINMUX_GPIO32__FUNC_CONN_DSP_JDI (MTK_PIN_NO(32) | 4)
+#define PINMUX_GPIO32__FUNC_SSPM_JTAG_TDI (MTK_PIN_NO(32) | 5)
+#define PINMUX_GPIO32__FUNC_PCM1_DO0 (MTK_PIN_NO(32) | 6)
+#define PINMUX_GPIO32__FUNC_DBG_MON_A9 (MTK_PIN_NO(32) | 7)
+
+#define PINMUX_GPIO33__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define PINMUX_GPIO33__FUNC_MSDC1_DAT2 (MTK_PIN_NO(33) | 1)
+#define PINMUX_GPIO33__FUNC_IO_JTAG_TRSTN (MTK_PIN_NO(33) | 2)
+#define PINMUX_GPIO33__FUNC_UDI_NTRST (MTK_PIN_NO(33) | 3)
+#define PINMUX_GPIO33__FUNC_DAP_MD32_SWCK (MTK_PIN_NO(33) | 4)
+#define PINMUX_GPIO33__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(33) | 5)
+#define PINMUX_GPIO33__FUNC_PCM1_DO2 (MTK_PIN_NO(33) | 6)
+#define PINMUX_GPIO33__FUNC_DBG_MON_A10 (MTK_PIN_NO(33) | 7)
+
+#define PINMUX_GPIO34__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define PINMUX_GPIO34__FUNC_MSDC1_DAT1 (MTK_PIN_NO(34) | 1)
+#define PINMUX_GPIO34__FUNC_IO_JTAG_TDO (MTK_PIN_NO(34) | 2)
+#define PINMUX_GPIO34__FUNC_UDI_TDO (MTK_PIN_NO(34) | 3)
+#define PINMUX_GPIO34__FUNC_CONN_DSP_JDO (MTK_PIN_NO(34) | 4)
+#define PINMUX_GPIO34__FUNC_SSPM_JTAG_TDO (MTK_PIN_NO(34) | 5)
+#define PINMUX_GPIO34__FUNC_PCM1_DO1 (MTK_PIN_NO(34) | 6)
+#define PINMUX_GPIO34__FUNC_DBG_MON_A11 (MTK_PIN_NO(34) | 7)
+
+#define PINMUX_GPIO35__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define PINMUX_GPIO35__FUNC_MD1_SIM2_SIO (MTK_PIN_NO(35) | 1)
+#define PINMUX_GPIO35__FUNC_CCU_JTAG_TDO (MTK_PIN_NO(35) | 2)
+#define PINMUX_GPIO35__FUNC_MD1_SIM1_SIO (MTK_PIN_NO(35) | 3)
+#define PINMUX_GPIO35__FUNC_SCP_JTAG_TDO (MTK_PIN_NO(35) | 5)
+#define PINMUX_GPIO35__FUNC_CONN_DSP_JMS (MTK_PIN_NO(35) | 6)
+#define PINMUX_GPIO35__FUNC_DBG_MON_A28 (MTK_PIN_NO(35) | 7)
+
+#define PINMUX_GPIO36__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define PINMUX_GPIO36__FUNC_MD1_SIM2_SRST (MTK_PIN_NO(36) | 1)
+#define PINMUX_GPIO36__FUNC_CCU_JTAG_TMS (MTK_PIN_NO(36) | 2)
+#define PINMUX_GPIO36__FUNC_MD1_SIM1_SRST (MTK_PIN_NO(36) | 3)
+#define PINMUX_GPIO36__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(36) | 4)
+#define PINMUX_GPIO36__FUNC_SCP_JTAG_TMS (MTK_PIN_NO(36) | 5)
+#define PINMUX_GPIO36__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(36) | 6)
+#define PINMUX_GPIO36__FUNC_DBG_MON_A29 (MTK_PIN_NO(36) | 7)
+
+#define PINMUX_GPIO37__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define PINMUX_GPIO37__FUNC_MD1_SIM2_SCLK (MTK_PIN_NO(37) | 1)
+#define PINMUX_GPIO37__FUNC_CCU_JTAG_TDI (MTK_PIN_NO(37) | 2)
+#define PINMUX_GPIO37__FUNC_MD1_SIM1_SCLK (MTK_PIN_NO(37) | 3)
+#define PINMUX_GPIO37__FUNC_SCP_JTAG_TDI (MTK_PIN_NO(37) | 5)
+#define PINMUX_GPIO37__FUNC_CONN_DSP_JDO (MTK_PIN_NO(37) | 6)
+#define PINMUX_GPIO37__FUNC_DBG_MON_A30 (MTK_PIN_NO(37) | 7)
+
+#define PINMUX_GPIO38__FUNC_GPIO38 (MTK_PIN_NO(38) | 0)
+#define PINMUX_GPIO38__FUNC_MD1_SIM1_SCLK (MTK_PIN_NO(38) | 1)
+#define PINMUX_GPIO38__FUNC_MD1_SIM2_SCLK (MTK_PIN_NO(38) | 3)
+#define PINMUX_GPIO38__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(38) | 4)
+#define PINMUX_GPIO38__FUNC_DBG_MON_A20 (MTK_PIN_NO(38) | 7)
+
+#define PINMUX_GPIO39__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define PINMUX_GPIO39__FUNC_MD1_SIM1_SRST (MTK_PIN_NO(39) | 1)
+#define PINMUX_GPIO39__FUNC_CCU_JTAG_TCK (MTK_PIN_NO(39) | 2)
+#define PINMUX_GPIO39__FUNC_MD1_SIM2_SRST (MTK_PIN_NO(39) | 3)
+#define PINMUX_GPIO39__FUNC_SCP_JTAG_TCK (MTK_PIN_NO(39) | 5)
+#define PINMUX_GPIO39__FUNC_CONN_DSP_JCK (MTK_PIN_NO(39) | 6)
+#define PINMUX_GPIO39__FUNC_DBG_MON_A31 (MTK_PIN_NO(39) | 7)
+
+#define PINMUX_GPIO40__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define PINMUX_GPIO40__FUNC_MD1_SIM1_SIO (MTK_PIN_NO(40) | 1)
+#define PINMUX_GPIO40__FUNC_CCU_JTAG_TRST (MTK_PIN_NO(40) | 2)
+#define PINMUX_GPIO40__FUNC_MD1_SIM2_SIO (MTK_PIN_NO(40) | 3)
+#define PINMUX_GPIO40__FUNC_SCP_JTAG_TRSTN (MTK_PIN_NO(40) | 5)
+#define PINMUX_GPIO40__FUNC_CONN_DSP_JDI (MTK_PIN_NO(40) | 6)
+#define PINMUX_GPIO40__FUNC_DBG_MON_A32 (MTK_PIN_NO(40) | 7)
+
+#define PINMUX_GPIO41__FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
+#define PINMUX_GPIO41__FUNC_IDDIG (MTK_PIN_NO(41) | 1)
+#define PINMUX_GPIO41__FUNC_URXD1 (MTK_PIN_NO(41) | 2)
+#define PINMUX_GPIO41__FUNC_UCTS0 (MTK_PIN_NO(41) | 3)
+#define PINMUX_GPIO41__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(41) | 4)
+#define PINMUX_GPIO41__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(41) | 5)
+#define PINMUX_GPIO41__FUNC_DMIC_CLK (MTK_PIN_NO(41) | 6)
+
+#define PINMUX_GPIO42__FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
+#define PINMUX_GPIO42__FUNC_USB_DRVVBUS (MTK_PIN_NO(42) | 1)
+#define PINMUX_GPIO42__FUNC_UTXD1 (MTK_PIN_NO(42) | 2)
+#define PINMUX_GPIO42__FUNC_URTS0 (MTK_PIN_NO(42) | 3)
+#define PINMUX_GPIO42__FUNC_SSPM_URXD_AO (MTK_PIN_NO(42) | 4)
+#define PINMUX_GPIO42__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(42) | 5)
+#define PINMUX_GPIO42__FUNC_DMIC_DAT (MTK_PIN_NO(42) | 6)
+
+#define PINMUX_GPIO43__FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
+#define PINMUX_GPIO43__FUNC_DISP_PWM (MTK_PIN_NO(43) | 1)
+
+#define PINMUX_GPIO44__FUNC_GPIO44 (MTK_PIN_NO(44) | 0)
+#define PINMUX_GPIO44__FUNC_DSI_TE (MTK_PIN_NO(44) | 1)
+
+#define PINMUX_GPIO45__FUNC_GPIO45 (MTK_PIN_NO(45) | 0)
+#define PINMUX_GPIO45__FUNC_LCM_RST (MTK_PIN_NO(45) | 1)
+
+#define PINMUX_GPIO46__FUNC_GPIO46 (MTK_PIN_NO(46) | 0)
+#define PINMUX_GPIO46__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(46) | 1)
+#define PINMUX_GPIO46__FUNC_URXD1 (MTK_PIN_NO(46) | 2)
+#define PINMUX_GPIO46__FUNC_UCTS1 (MTK_PIN_NO(46) | 3)
+#define PINMUX_GPIO46__FUNC_CCU_UTXD_AO (MTK_PIN_NO(46) | 4)
+#define PINMUX_GPIO46__FUNC_TP_UCTS1_AO (MTK_PIN_NO(46) | 5)
+#define PINMUX_GPIO46__FUNC_IDDIG (MTK_PIN_NO(46) | 6)
+#define PINMUX_GPIO46__FUNC_I2S5_LRCK (MTK_PIN_NO(46) | 7)
+
+#define PINMUX_GPIO47__FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
+#define PINMUX_GPIO47__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(47) | 1)
+#define PINMUX_GPIO47__FUNC_UTXD1 (MTK_PIN_NO(47) | 2)
+#define PINMUX_GPIO47__FUNC_URTS1 (MTK_PIN_NO(47) | 3)
+#define PINMUX_GPIO47__FUNC_CCU_URXD_AO (MTK_PIN_NO(47) | 4)
+#define PINMUX_GPIO47__FUNC_TP_URTS1_AO (MTK_PIN_NO(47) | 5)
+#define PINMUX_GPIO47__FUNC_USB_DRVVBUS (MTK_PIN_NO(47) | 6)
+#define PINMUX_GPIO47__FUNC_I2S5_DO (MTK_PIN_NO(47) | 7)
+
+#define PINMUX_GPIO48__FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
+#define PINMUX_GPIO48__FUNC_SCL5 (MTK_PIN_NO(48) | 1)
+
+#define PINMUX_GPIO49__FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
+#define PINMUX_GPIO49__FUNC_SDA5 (MTK_PIN_NO(49) | 1)
+
+#define PINMUX_GPIO50__FUNC_GPIO50 (MTK_PIN_NO(50) | 0)
+#define PINMUX_GPIO50__FUNC_SCL3 (MTK_PIN_NO(50) | 1)
+
+#define PINMUX_GPIO51__FUNC_GPIO51 (MTK_PIN_NO(51) | 0)
+#define PINMUX_GPIO51__FUNC_SDA3 (MTK_PIN_NO(51) | 1)
+
+#define PINMUX_GPIO52__FUNC_GPIO52 (MTK_PIN_NO(52) | 0)
+#define PINMUX_GPIO52__FUNC_BPI_ANT2 (MTK_PIN_NO(52) | 1)
+
+#define PINMUX_GPIO53__FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
+#define PINMUX_GPIO53__FUNC_BPI_ANT0 (MTK_PIN_NO(53) | 1)
+
+#define PINMUX_GPIO54__FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
+#define PINMUX_GPIO54__FUNC_BPI_OLAT1 (MTK_PIN_NO(54) | 1)
+
+#define PINMUX_GPIO55__FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
+#define PINMUX_GPIO55__FUNC_BPI_BUS8 (MTK_PIN_NO(55) | 1)
+
+#define PINMUX_GPIO56__FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
+#define PINMUX_GPIO56__FUNC_BPI_BUS9 (MTK_PIN_NO(56) | 1)
+#define PINMUX_GPIO56__FUNC_SCL_6306 (MTK_PIN_NO(56) | 2)
+
+#define PINMUX_GPIO57__FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
+#define PINMUX_GPIO57__FUNC_BPI_BUS10 (MTK_PIN_NO(57) | 1)
+#define PINMUX_GPIO57__FUNC_SDA_6306 (MTK_PIN_NO(57) | 2)
+
+#define PINMUX_GPIO58__FUNC_GPIO58 (MTK_PIN_NO(58) | 0)
+#define PINMUX_GPIO58__FUNC_RFIC0_BSI_D2 (MTK_PIN_NO(58) | 1)
+#define PINMUX_GPIO58__FUNC_SPM_BSI_D2 (MTK_PIN_NO(58) | 2)
+#define PINMUX_GPIO58__FUNC_PWM_B (MTK_PIN_NO(58) | 3)
+
+#define PINMUX_GPIO59__FUNC_GPIO59 (MTK_PIN_NO(59) | 0)
+#define PINMUX_GPIO59__FUNC_RFIC0_BSI_D1 (MTK_PIN_NO(59) | 1)
+#define PINMUX_GPIO59__FUNC_SPM_BSI_D1 (MTK_PIN_NO(59) | 2)
+
+#define PINMUX_GPIO60__FUNC_GPIO60 (MTK_PIN_NO(60) | 0)
+#define PINMUX_GPIO60__FUNC_RFIC0_BSI_D0 (MTK_PIN_NO(60) | 1)
+#define PINMUX_GPIO60__FUNC_SPM_BSI_D0 (MTK_PIN_NO(60) | 2)
+
+#define PINMUX_GPIO61__FUNC_GPIO61 (MTK_PIN_NO(61) | 0)
+#define PINMUX_GPIO61__FUNC_MIPI1_SDATA (MTK_PIN_NO(61) | 1)
+
+#define PINMUX_GPIO62__FUNC_GPIO62 (MTK_PIN_NO(62) | 0)
+#define PINMUX_GPIO62__FUNC_MIPI1_SCLK (MTK_PIN_NO(62) | 1)
+
+#define PINMUX_GPIO63__FUNC_GPIO63 (MTK_PIN_NO(63) | 0)
+#define PINMUX_GPIO63__FUNC_MIPI0_SDATA (MTK_PIN_NO(63) | 1)
+
+#define PINMUX_GPIO64__FUNC_GPIO64 (MTK_PIN_NO(64) | 0)
+#define PINMUX_GPIO64__FUNC_MIPI0_SCLK (MTK_PIN_NO(64) | 1)
+
+#define PINMUX_GPIO65__FUNC_GPIO65 (MTK_PIN_NO(65) | 0)
+#define PINMUX_GPIO65__FUNC_MIPI3_SDATA (MTK_PIN_NO(65) | 1)
+#define PINMUX_GPIO65__FUNC_BPI_OLAT2 (MTK_PIN_NO(65) | 2)
+
+#define PINMUX_GPIO66__FUNC_GPIO66 (MTK_PIN_NO(66) | 0)
+#define PINMUX_GPIO66__FUNC_MIPI3_SCLK (MTK_PIN_NO(66) | 1)
+#define PINMUX_GPIO66__FUNC_BPI_OLAT3 (MTK_PIN_NO(66) | 2)
+
+#define PINMUX_GPIO67__FUNC_GPIO67 (MTK_PIN_NO(67) | 0)
+#define PINMUX_GPIO67__FUNC_MIPI2_SDATA (MTK_PIN_NO(67) | 1)
+
+#define PINMUX_GPIO68__FUNC_GPIO68 (MTK_PIN_NO(68) | 0)
+#define PINMUX_GPIO68__FUNC_MIPI2_SCLK (MTK_PIN_NO(68) | 1)
+
+#define PINMUX_GPIO69__FUNC_GPIO69 (MTK_PIN_NO(69) | 0)
+#define PINMUX_GPIO69__FUNC_BPI_BUS7 (MTK_PIN_NO(69) | 1)
+
+#define PINMUX_GPIO70__FUNC_GPIO70 (MTK_PIN_NO(70) | 0)
+#define PINMUX_GPIO70__FUNC_BPI_BUS6 (MTK_PIN_NO(70) | 1)
+
+#define PINMUX_GPIO71__FUNC_GPIO71 (MTK_PIN_NO(71) | 0)
+#define PINMUX_GPIO71__FUNC_BPI_BUS5 (MTK_PIN_NO(71) | 1)
+
+#define PINMUX_GPIO72__FUNC_GPIO72 (MTK_PIN_NO(72) | 0)
+#define PINMUX_GPIO72__FUNC_BPI_BUS4 (MTK_PIN_NO(72) | 1)
+
+#define PINMUX_GPIO73__FUNC_GPIO73 (MTK_PIN_NO(73) | 0)
+#define PINMUX_GPIO73__FUNC_BPI_BUS3 (MTK_PIN_NO(73) | 1)
+
+#define PINMUX_GPIO74__FUNC_GPIO74 (MTK_PIN_NO(74) | 0)
+#define PINMUX_GPIO74__FUNC_BPI_BUS2 (MTK_PIN_NO(74) | 1)
+
+#define PINMUX_GPIO75__FUNC_GPIO75 (MTK_PIN_NO(75) | 0)
+#define PINMUX_GPIO75__FUNC_BPI_BUS1 (MTK_PIN_NO(75) | 1)
+
+#define PINMUX_GPIO76__FUNC_GPIO76 (MTK_PIN_NO(76) | 0)
+#define PINMUX_GPIO76__FUNC_BPI_BUS0 (MTK_PIN_NO(76) | 1)
+
+#define PINMUX_GPIO77__FUNC_GPIO77 (MTK_PIN_NO(77) | 0)
+#define PINMUX_GPIO77__FUNC_BPI_ANT1 (MTK_PIN_NO(77) | 1)
+
+#define PINMUX_GPIO78__FUNC_GPIO78 (MTK_PIN_NO(78) | 0)
+#define PINMUX_GPIO78__FUNC_BPI_OLAT0 (MTK_PIN_NO(78) | 1)
+
+#define PINMUX_GPIO79__FUNC_GPIO79 (MTK_PIN_NO(79) | 0)
+#define PINMUX_GPIO79__FUNC_BPI_PA_VM1 (MTK_PIN_NO(79) | 1)
+#define PINMUX_GPIO79__FUNC_MIPI4_SDATA (MTK_PIN_NO(79) | 2)
+
+#define PINMUX_GPIO80__FUNC_GPIO80 (MTK_PIN_NO(80) | 0)
+#define PINMUX_GPIO80__FUNC_BPI_PA_VM0 (MTK_PIN_NO(80) | 1)
+#define PINMUX_GPIO80__FUNC_MIPI4_SCLK (MTK_PIN_NO(80) | 2)
+
+#define PINMUX_GPIO81__FUNC_GPIO81 (MTK_PIN_NO(81) | 0)
+#define PINMUX_GPIO81__FUNC_SDA1 (MTK_PIN_NO(81) | 1)
+
+#define PINMUX_GPIO82__FUNC_GPIO82 (MTK_PIN_NO(82) | 0)
+#define PINMUX_GPIO82__FUNC_SDA0 (MTK_PIN_NO(82) | 1)
+
+#define PINMUX_GPIO83__FUNC_GPIO83 (MTK_PIN_NO(83) | 0)
+#define PINMUX_GPIO83__FUNC_SCL0 (MTK_PIN_NO(83) | 1)
+
+#define PINMUX_GPIO84__FUNC_GPIO84 (MTK_PIN_NO(84) | 0)
+#define PINMUX_GPIO84__FUNC_SCL1 (MTK_PIN_NO(84) | 1)
+
+#define PINMUX_GPIO85__FUNC_GPIO85 (MTK_PIN_NO(85) | 0)
+#define PINMUX_GPIO85__FUNC_SPI0_MI (MTK_PIN_NO(85) | 1)
+#define PINMUX_GPIO85__FUNC_SCP_SPI0_MI (MTK_PIN_NO(85) | 2)
+#define PINMUX_GPIO85__FUNC_CLKM3 (MTK_PIN_NO(85) | 3)
+#define PINMUX_GPIO85__FUNC_I2S1_BCK (MTK_PIN_NO(85) | 4)
+#define PINMUX_GPIO85__FUNC_MFG_DFD_JTAG_TDO (MTK_PIN_NO(85) | 5)
+#define PINMUX_GPIO85__FUNC_DFD_TDO (MTK_PIN_NO(85) | 6)
+#define PINMUX_GPIO85__FUNC_JTDO_SEL1 (MTK_PIN_NO(85) | 7)
+
+#define PINMUX_GPIO86__FUNC_GPIO86 (MTK_PIN_NO(86) | 0)
+#define PINMUX_GPIO86__FUNC_SPI0_CSB (MTK_PIN_NO(86) | 1)
+#define PINMUX_GPIO86__FUNC_SCP_SPI0_CS (MTK_PIN_NO(86) | 2)
+#define PINMUX_GPIO86__FUNC_CLKM0 (MTK_PIN_NO(86) | 3)
+#define PINMUX_GPIO86__FUNC_I2S1_LRCK (MTK_PIN_NO(86) | 4)
+#define PINMUX_GPIO86__FUNC_MFG_DFD_JTAG_TMS (MTK_PIN_NO(86) | 5)
+#define PINMUX_GPIO86__FUNC_DFD_TMS (MTK_PIN_NO(86) | 6)
+#define PINMUX_GPIO86__FUNC_JTMS_SEL1 (MTK_PIN_NO(86) | 7)
+
+#define PINMUX_GPIO87__FUNC_GPIO87 (MTK_PIN_NO(87) | 0)
+#define PINMUX_GPIO87__FUNC_SPI0_MO (MTK_PIN_NO(87) | 1)
+#define PINMUX_GPIO87__FUNC_SCP_SPI0_MO (MTK_PIN_NO(87) | 2)
+#define PINMUX_GPIO87__FUNC_SDA1 (MTK_PIN_NO(87) | 3)
+#define PINMUX_GPIO87__FUNC_I2S1_DO (MTK_PIN_NO(87) | 4)
+#define PINMUX_GPIO87__FUNC_MFG_DFD_JTAG_TDI (MTK_PIN_NO(87) | 5)
+#define PINMUX_GPIO87__FUNC_DFD_TDI (MTK_PIN_NO(87) | 6)
+#define PINMUX_GPIO87__FUNC_JTDI_SEL1 (MTK_PIN_NO(87) | 7)
+
+#define PINMUX_GPIO88__FUNC_GPIO88 (MTK_PIN_NO(88) | 0)
+#define PINMUX_GPIO88__FUNC_SPI0_CLK (MTK_PIN_NO(88) | 1)
+#define PINMUX_GPIO88__FUNC_SCP_SPI0_CK (MTK_PIN_NO(88) | 2)
+#define PINMUX_GPIO88__FUNC_SCL1 (MTK_PIN_NO(88) | 3)
+#define PINMUX_GPIO88__FUNC_I2S1_MCK (MTK_PIN_NO(88) | 4)
+#define PINMUX_GPIO88__FUNC_MFG_DFD_JTAG_TCK (MTK_PIN_NO(88) | 5)
+#define PINMUX_GPIO88__FUNC_DFD_TCK_XI (MTK_PIN_NO(88) | 6)
+#define PINMUX_GPIO88__FUNC_JTCK_SEL1 (MTK_PIN_NO(88) | 7)
+
+#define PINMUX_GPIO89__FUNC_GPIO89 (MTK_PIN_NO(89) | 0)
+#define PINMUX_GPIO89__FUNC_SRCLKENAI0 (MTK_PIN_NO(89) | 1)
+#define PINMUX_GPIO89__FUNC_PWM_C (MTK_PIN_NO(89) | 2)
+#define PINMUX_GPIO89__FUNC_I2S5_BCK (MTK_PIN_NO(89) | 3)
+#define PINMUX_GPIO89__FUNC_ANT_SEL6 (MTK_PIN_NO(89) | 4)
+#define PINMUX_GPIO89__FUNC_SDA8 (MTK_PIN_NO(89) | 5)
+#define PINMUX_GPIO89__FUNC_CMVREF0 (MTK_PIN_NO(89) | 6)
+#define PINMUX_GPIO89__FUNC_DBG_MON_A21 (MTK_PIN_NO(89) | 7)
+
+#define PINMUX_GPIO90__FUNC_GPIO90 (MTK_PIN_NO(90) | 0)
+#define PINMUX_GPIO90__FUNC_PWM_A (MTK_PIN_NO(90) | 1)
+#define PINMUX_GPIO90__FUNC_CMMCLK2 (MTK_PIN_NO(90) | 2)
+#define PINMUX_GPIO90__FUNC_I2S5_LRCK (MTK_PIN_NO(90) | 3)
+#define PINMUX_GPIO90__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(90) | 4)
+#define PINMUX_GPIO90__FUNC_SCL8 (MTK_PIN_NO(90) | 5)
+#define PINMUX_GPIO90__FUNC_PTA_RXD (MTK_PIN_NO(90) | 6)
+#define PINMUX_GPIO90__FUNC_DBG_MON_A22 (MTK_PIN_NO(90) | 7)
+
+#define PINMUX_GPIO91__FUNC_GPIO91 (MTK_PIN_NO(91) | 0)
+#define PINMUX_GPIO91__FUNC_KPROW1 (MTK_PIN_NO(91) | 1)
+#define PINMUX_GPIO91__FUNC_PWM_B (MTK_PIN_NO(91) | 2)
+#define PINMUX_GPIO91__FUNC_I2S5_DO (MTK_PIN_NO(91) | 3)
+#define PINMUX_GPIO91__FUNC_ANT_SEL7 (MTK_PIN_NO(91) | 4)
+#define PINMUX_GPIO91__FUNC_CMMCLK3 (MTK_PIN_NO(91) | 5)
+#define PINMUX_GPIO91__FUNC_PTA_TXD (MTK_PIN_NO(91) | 6)
+
+#define PINMUX_GPIO92__FUNC_GPIO92 (MTK_PIN_NO(92) | 0)
+#define PINMUX_GPIO92__FUNC_KPROW0 (MTK_PIN_NO(92) | 1)
+
+#define PINMUX_GPIO93__FUNC_GPIO93 (MTK_PIN_NO(93) | 0)
+#define PINMUX_GPIO93__FUNC_KPCOL0 (MTK_PIN_NO(93) | 1)
+#define PINMUX_GPIO93__FUNC_DBG_MON_B27 (MTK_PIN_NO(93) | 7)
+
+#define PINMUX_GPIO94__FUNC_GPIO94 (MTK_PIN_NO(94) | 0)
+#define PINMUX_GPIO94__FUNC_KPCOL1 (MTK_PIN_NO(94) | 1)
+#define PINMUX_GPIO94__FUNC_I2S2_DI2 (MTK_PIN_NO(94) | 2)
+#define PINMUX_GPIO94__FUNC_I2S5_MCK (MTK_PIN_NO(94) | 3)
+#define PINMUX_GPIO94__FUNC_CMMCLK2 (MTK_PIN_NO(94) | 4)
+#define PINMUX_GPIO94__FUNC_SCP_SPI2_MI (MTK_PIN_NO(94) | 5)
+#define PINMUX_GPIO94__FUNC_SRCLKENAI1 (MTK_PIN_NO(94) | 6)
+#define PINMUX_GPIO94__FUNC_SPI2_MI (MTK_PIN_NO(94) | 7)
+
+#define PINMUX_GPIO95__FUNC_GPIO95 (MTK_PIN_NO(95) | 0)
+#define PINMUX_GPIO95__FUNC_URXD0 (MTK_PIN_NO(95) | 1)
+#define PINMUX_GPIO95__FUNC_UTXD0 (MTK_PIN_NO(95) | 2)
+#define PINMUX_GPIO95__FUNC_MD_URXD0 (MTK_PIN_NO(95) | 3)
+#define PINMUX_GPIO95__FUNC_MD_URXD1 (MTK_PIN_NO(95) | 4)
+#define PINMUX_GPIO95__FUNC_SSPM_URXD_AO (MTK_PIN_NO(95) | 5)
+#define PINMUX_GPIO95__FUNC_CCU_URXD_AO (MTK_PIN_NO(95) | 6)
+
+#define PINMUX_GPIO96__FUNC_GPIO96 (MTK_PIN_NO(96) | 0)
+#define PINMUX_GPIO96__FUNC_UTXD0 (MTK_PIN_NO(96) | 1)
+#define PINMUX_GPIO96__FUNC_URXD0 (MTK_PIN_NO(96) | 2)
+#define PINMUX_GPIO96__FUNC_MD_UTXD0 (MTK_PIN_NO(96) | 3)
+#define PINMUX_GPIO96__FUNC_MD_UTXD1 (MTK_PIN_NO(96) | 4)
+#define PINMUX_GPIO96__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(96) | 5)
+#define PINMUX_GPIO96__FUNC_CCU_UTXD_AO (MTK_PIN_NO(96) | 6)
+#define PINMUX_GPIO96__FUNC_DBG_MON_B2 (MTK_PIN_NO(96) | 7)
+
+#define PINMUX_GPIO97__FUNC_GPIO97 (MTK_PIN_NO(97) | 0)
+#define PINMUX_GPIO97__FUNC_UCTS0 (MTK_PIN_NO(97) | 1)
+#define PINMUX_GPIO97__FUNC_I2S2_MCK (MTK_PIN_NO(97) | 2)
+#define PINMUX_GPIO97__FUNC_IDDIG (MTK_PIN_NO(97) | 3)
+#define PINMUX_GPIO97__FUNC_CONN_MCU_TDO (MTK_PIN_NO(97) | 4)
+#define PINMUX_GPIO97__FUNC_SSPM_JTAG_TDO (MTK_PIN_NO(97) | 5)
+#define PINMUX_GPIO97__FUNC_IO_JTAG_TDO (MTK_PIN_NO(97) | 6)
+#define PINMUX_GPIO97__FUNC_DBG_MON_B3 (MTK_PIN_NO(97) | 7)
+
+#define PINMUX_GPIO98__FUNC_GPIO98 (MTK_PIN_NO(98) | 0)
+#define PINMUX_GPIO98__FUNC_URTS0 (MTK_PIN_NO(98) | 1)
+#define PINMUX_GPIO98__FUNC_I2S2_BCK (MTK_PIN_NO(98) | 2)
+#define PINMUX_GPIO98__FUNC_USB_DRVVBUS (MTK_PIN_NO(98) | 3)
+#define PINMUX_GPIO98__FUNC_CONN_MCU_TMS (MTK_PIN_NO(98) | 4)
+#define PINMUX_GPIO98__FUNC_SSPM_JTAG_TMS (MTK_PIN_NO(98) | 5)
+#define PINMUX_GPIO98__FUNC_IO_JTAG_TMS (MTK_PIN_NO(98) | 6)
+#define PINMUX_GPIO98__FUNC_DBG_MON_B4 (MTK_PIN_NO(98) | 7)
+
+#define PINMUX_GPIO99__FUNC_GPIO99 (MTK_PIN_NO(99) | 0)
+#define PINMUX_GPIO99__FUNC_CMMCLK0 (MTK_PIN_NO(99) | 1)
+#define PINMUX_GPIO99__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(99) | 4)
+#define PINMUX_GPIO99__FUNC_DBG_MON_B28 (MTK_PIN_NO(99) | 7)
+
+#define PINMUX_GPIO100__FUNC_GPIO100 (MTK_PIN_NO(100) | 0)
+#define PINMUX_GPIO100__FUNC_CMMCLK1 (MTK_PIN_NO(100) | 1)
+#define PINMUX_GPIO100__FUNC_PWM_C (MTK_PIN_NO(100) | 2)
+#define PINMUX_GPIO100__FUNC_MD_INT1_C2K_UIM0_HOT_PLUG (MTK_PIN_NO(100) | 3)
+#define PINMUX_GPIO100__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(100) | 4)
+#define PINMUX_GPIO100__FUNC_DBG_MON_B29 (MTK_PIN_NO(100) | 7)
+
+#define PINMUX_GPIO101__FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
+#define PINMUX_GPIO101__FUNC_CLKM2 (MTK_PIN_NO(101) | 1)
+#define PINMUX_GPIO101__FUNC_I2S2_LRCK (MTK_PIN_NO(101) | 2)
+#define PINMUX_GPIO101__FUNC_CMVREF1 (MTK_PIN_NO(101) | 3)
+#define PINMUX_GPIO101__FUNC_CONN_MCU_TCK (MTK_PIN_NO(101) | 4)
+#define PINMUX_GPIO101__FUNC_SSPM_JTAG_TCK (MTK_PIN_NO(101) | 5)
+#define PINMUX_GPIO101__FUNC_IO_JTAG_TCK (MTK_PIN_NO(101) | 6)
+
+#define PINMUX_GPIO102__FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
+#define PINMUX_GPIO102__FUNC_CLKM1 (MTK_PIN_NO(102) | 1)
+#define PINMUX_GPIO102__FUNC_I2S2_DI (MTK_PIN_NO(102) | 2)
+#define PINMUX_GPIO102__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(102) | 3)
+#define PINMUX_GPIO102__FUNC_CONN_MCU_TDI (MTK_PIN_NO(102) | 4)
+#define PINMUX_GPIO102__FUNC_SSPM_JTAG_TDI (MTK_PIN_NO(102) | 5)
+#define PINMUX_GPIO102__FUNC_IO_JTAG_TDI (MTK_PIN_NO(102) | 6)
+#define PINMUX_GPIO102__FUNC_DBG_MON_B8 (MTK_PIN_NO(102) | 7)
+
+#define PINMUX_GPIO103__FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
+#define PINMUX_GPIO103__FUNC_SCL2 (MTK_PIN_NO(103) | 1)
+
+#define PINMUX_GPIO104__FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
+#define PINMUX_GPIO104__FUNC_SDA2 (MTK_PIN_NO(104) | 1)
+
+#define PINMUX_GPIO105__FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
+#define PINMUX_GPIO105__FUNC_SCL4 (MTK_PIN_NO(105) | 1)
+
+#define PINMUX_GPIO106__FUNC_GPIO106 (MTK_PIN_NO(106) | 0)
+#define PINMUX_GPIO106__FUNC_SDA4 (MTK_PIN_NO(106) | 1)
+
+#define PINMUX_GPIO107__FUNC_GPIO107 (MTK_PIN_NO(107) | 0)
+#define PINMUX_GPIO107__FUNC_DMIC_CLK (MTK_PIN_NO(107) | 1)
+#define PINMUX_GPIO107__FUNC_ANT_SEL0 (MTK_PIN_NO(107) | 2)
+#define PINMUX_GPIO107__FUNC_CLKM0 (MTK_PIN_NO(107) | 3)
+#define PINMUX_GPIO107__FUNC_SDA7 (MTK_PIN_NO(107) | 4)
+#define PINMUX_GPIO107__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(107) | 5)
+#define PINMUX_GPIO107__FUNC_PWM_A (MTK_PIN_NO(107) | 6)
+#define PINMUX_GPIO107__FUNC_DBG_MON_B12 (MTK_PIN_NO(107) | 7)
+
+#define PINMUX_GPIO108__FUNC_GPIO108 (MTK_PIN_NO(108) | 0)
+#define PINMUX_GPIO108__FUNC_CMMCLK2 (MTK_PIN_NO(108) | 1)
+#define PINMUX_GPIO108__FUNC_ANT_SEL1 (MTK_PIN_NO(108) | 2)
+#define PINMUX_GPIO108__FUNC_CLKM1 (MTK_PIN_NO(108) | 3)
+#define PINMUX_GPIO108__FUNC_SCL8 (MTK_PIN_NO(108) | 4)
+#define PINMUX_GPIO108__FUNC_DAP_MD32_SWD (MTK_PIN_NO(108) | 5)
+#define PINMUX_GPIO108__FUNC_PWM_B (MTK_PIN_NO(108) | 6)
+#define PINMUX_GPIO108__FUNC_DBG_MON_B13 (MTK_PIN_NO(108) | 7)
+
+#define PINMUX_GPIO109__FUNC_GPIO109 (MTK_PIN_NO(109) | 0)
+#define PINMUX_GPIO109__FUNC_DMIC_DAT (MTK_PIN_NO(109) | 1)
+#define PINMUX_GPIO109__FUNC_ANT_SEL2 (MTK_PIN_NO(109) | 2)
+#define PINMUX_GPIO109__FUNC_CLKM2 (MTK_PIN_NO(109) | 3)
+#define PINMUX_GPIO109__FUNC_SDA8 (MTK_PIN_NO(109) | 4)
+#define PINMUX_GPIO109__FUNC_DAP_MD32_SWCK (MTK_PIN_NO(109) | 5)
+#define PINMUX_GPIO109__FUNC_PWM_C (MTK_PIN_NO(109) | 6)
+#define PINMUX_GPIO109__FUNC_DBG_MON_B14 (MTK_PIN_NO(109) | 7)
+
+#define PINMUX_GPIO110__FUNC_GPIO110 (MTK_PIN_NO(110) | 0)
+#define PINMUX_GPIO110__FUNC_SCL7 (MTK_PIN_NO(110) | 1)
+#define PINMUX_GPIO110__FUNC_ANT_SEL0 (MTK_PIN_NO(110) | 2)
+#define PINMUX_GPIO110__FUNC_TP_URXD1_AO (MTK_PIN_NO(110) | 3)
+#define PINMUX_GPIO110__FUNC_USB_DRVVBUS (MTK_PIN_NO(110) | 4)
+#define PINMUX_GPIO110__FUNC_SRCLKENAI1 (MTK_PIN_NO(110) | 5)
+#define PINMUX_GPIO110__FUNC_KPCOL2 (MTK_PIN_NO(110) | 6)
+#define PINMUX_GPIO110__FUNC_URXD1 (MTK_PIN_NO(110) | 7)
+
+#define PINMUX_GPIO111__FUNC_GPIO111 (MTK_PIN_NO(111) | 0)
+#define PINMUX_GPIO111__FUNC_CMMCLK3 (MTK_PIN_NO(111) | 1)
+#define PINMUX_GPIO111__FUNC_ANT_SEL1 (MTK_PIN_NO(111) | 2)
+#define PINMUX_GPIO111__FUNC_SRCLKENAI0 (MTK_PIN_NO(111) | 3)
+#define PINMUX_GPIO111__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(111) | 4)
+#define PINMUX_GPIO111__FUNC_MD_INT2_C2K_UIM1_HOT_PLUG (MTK_PIN_NO(111) | 5)
+#define PINMUX_GPIO111__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(111) | 7)
+
+#define PINMUX_GPIO112__FUNC_GPIO112 (MTK_PIN_NO(112) | 0)
+#define PINMUX_GPIO112__FUNC_SDA7 (MTK_PIN_NO(112) | 1)
+#define PINMUX_GPIO112__FUNC_ANT_SEL2 (MTK_PIN_NO(112) | 2)
+#define PINMUX_GPIO112__FUNC_TP_UTXD1_AO (MTK_PIN_NO(112) | 3)
+#define PINMUX_GPIO112__FUNC_IDDIG (MTK_PIN_NO(112) | 4)
+#define PINMUX_GPIO112__FUNC_AGPS_SYNC (MTK_PIN_NO(112) | 5)
+#define PINMUX_GPIO112__FUNC_KPROW2 (MTK_PIN_NO(112) | 6)
+#define PINMUX_GPIO112__FUNC_UTXD1 (MTK_PIN_NO(112) | 7)
+
+#define PINMUX_GPIO113__FUNC_GPIO113 (MTK_PIN_NO(113) | 0)
+#define PINMUX_GPIO113__FUNC_CONN_TOP_CLK (MTK_PIN_NO(113) | 1)
+#define PINMUX_GPIO113__FUNC_SCL6 (MTK_PIN_NO(113) | 3)
+#define PINMUX_GPIO113__FUNC_AUXIF_CLK0 (MTK_PIN_NO(113) | 4)
+#define PINMUX_GPIO113__FUNC_TP_UCTS1_AO (MTK_PIN_NO(113) | 6)
+
+#define PINMUX_GPIO114__FUNC_GPIO114 (MTK_PIN_NO(114) | 0)
+#define PINMUX_GPIO114__FUNC_CONN_TOP_DATA (MTK_PIN_NO(114) | 1)
+#define PINMUX_GPIO114__FUNC_SDA6 (MTK_PIN_NO(114) | 3)
+#define PINMUX_GPIO114__FUNC_AUXIF_ST0 (MTK_PIN_NO(114) | 4)
+#define PINMUX_GPIO114__FUNC_TP_URTS1_AO (MTK_PIN_NO(114) | 6)
+
+#define PINMUX_GPIO115__FUNC_GPIO115 (MTK_PIN_NO(115) | 0)
+#define PINMUX_GPIO115__FUNC_CONN_BT_CLK (MTK_PIN_NO(115) | 1)
+#define PINMUX_GPIO115__FUNC_UTXD1 (MTK_PIN_NO(115) | 2)
+#define PINMUX_GPIO115__FUNC_PTA_TXD (MTK_PIN_NO(115) | 3)
+#define PINMUX_GPIO115__FUNC_AUXIF_CLK1 (MTK_PIN_NO(115) | 4)
+#define PINMUX_GPIO115__FUNC_DAP_MD32_SWD (MTK_PIN_NO(115) | 5)
+#define PINMUX_GPIO115__FUNC_TP_UTXD1_AO (MTK_PIN_NO(115) | 6)
+
+#define PINMUX_GPIO116__FUNC_GPIO116 (MTK_PIN_NO(116) | 0)
+#define PINMUX_GPIO116__FUNC_CONN_BT_DATA (MTK_PIN_NO(116) | 1)
+#define PINMUX_GPIO116__FUNC_IPU_JTAG_TRST (MTK_PIN_NO(116) | 2)
+#define PINMUX_GPIO116__FUNC_AUXIF_ST1 (MTK_PIN_NO(116) | 4)
+#define PINMUX_GPIO116__FUNC_DAP_MD32_SWCK (MTK_PIN_NO(116) | 5)
+#define PINMUX_GPIO116__FUNC_TP_URXD2_AO (MTK_PIN_NO(116) | 6)
+#define PINMUX_GPIO116__FUNC_DBG_MON_A0 (MTK_PIN_NO(116) | 7)
+
+#define PINMUX_GPIO117__FUNC_GPIO117 (MTK_PIN_NO(117) | 0)
+#define PINMUX_GPIO117__FUNC_CONN_WF_HB0 (MTK_PIN_NO(117) | 1)
+#define PINMUX_GPIO117__FUNC_IPU_JTAG_TDO (MTK_PIN_NO(117) | 2)
+#define PINMUX_GPIO117__FUNC_TP_UTXD2_AO (MTK_PIN_NO(117) | 6)
+#define PINMUX_GPIO117__FUNC_DBG_MON_A4 (MTK_PIN_NO(117) | 7)
+
+#define PINMUX_GPIO118__FUNC_GPIO118 (MTK_PIN_NO(118) | 0)
+#define PINMUX_GPIO118__FUNC_CONN_WF_HB1 (MTK_PIN_NO(118) | 1)
+#define PINMUX_GPIO118__FUNC_IPU_JTAG_TDI (MTK_PIN_NO(118) | 2)
+#define PINMUX_GPIO118__FUNC_SSPM_URXD_AO (MTK_PIN_NO(118) | 5)
+#define PINMUX_GPIO118__FUNC_TP_UCTS2_AO (MTK_PIN_NO(118) | 6)
+#define PINMUX_GPIO118__FUNC_DBG_MON_A5 (MTK_PIN_NO(118) | 7)
+
+#define PINMUX_GPIO119__FUNC_GPIO119 (MTK_PIN_NO(119) | 0)
+#define PINMUX_GPIO119__FUNC_CONN_WF_HB2 (MTK_PIN_NO(119) | 1)
+#define PINMUX_GPIO119__FUNC_IPU_JTAG_TCK (MTK_PIN_NO(119) | 2)
+#define PINMUX_GPIO119__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(119) | 5)
+#define PINMUX_GPIO119__FUNC_TP_URTS2_AO (MTK_PIN_NO(119) | 6)
+
+#define PINMUX_GPIO120__FUNC_GPIO120 (MTK_PIN_NO(120) | 0)
+#define PINMUX_GPIO120__FUNC_CONN_WB_PTA (MTK_PIN_NO(120) | 1)
+#define PINMUX_GPIO120__FUNC_IPU_JTAG_TMS (MTK_PIN_NO(120) | 2)
+#define PINMUX_GPIO120__FUNC_CCU_URXD_AO (MTK_PIN_NO(120) | 5)
+
+#define PINMUX_GPIO121__FUNC_GPIO121 (MTK_PIN_NO(121) | 0)
+#define PINMUX_GPIO121__FUNC_CONN_HRST_B (MTK_PIN_NO(121) | 1)
+#define PINMUX_GPIO121__FUNC_URXD1 (MTK_PIN_NO(121) | 2)
+#define PINMUX_GPIO121__FUNC_PTA_RXD (MTK_PIN_NO(121) | 3)
+#define PINMUX_GPIO121__FUNC_CCU_UTXD_AO (MTK_PIN_NO(121) | 5)
+#define PINMUX_GPIO121__FUNC_TP_URXD1_AO (MTK_PIN_NO(121) | 6)
+
+#define PINMUX_GPIO122__FUNC_GPIO122 (MTK_PIN_NO(122) | 0)
+#define PINMUX_GPIO122__FUNC_MSDC0_CMD (MTK_PIN_NO(122) | 1)
+#define PINMUX_GPIO122__FUNC_SSPM_URXD2_AO (MTK_PIN_NO(122) | 2)
+#define PINMUX_GPIO122__FUNC_ANT_SEL1 (MTK_PIN_NO(122) | 3)
+#define PINMUX_GPIO122__FUNC_DBG_MON_A12 (MTK_PIN_NO(122) | 7)
+
+#define PINMUX_GPIO123__FUNC_GPIO123 (MTK_PIN_NO(123) | 0)
+#define PINMUX_GPIO123__FUNC_MSDC0_DAT0 (MTK_PIN_NO(123) | 1)
+#define PINMUX_GPIO123__FUNC_ANT_SEL0 (MTK_PIN_NO(123) | 3)
+#define PINMUX_GPIO123__FUNC_DBG_MON_A13 (MTK_PIN_NO(123) | 7)
+
+#define PINMUX_GPIO124__FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
+#define PINMUX_GPIO124__FUNC_MSDC0_CLK (MTK_PIN_NO(124) | 1)
+#define PINMUX_GPIO124__FUNC_DBG_MON_A14 (MTK_PIN_NO(124) | 7)
+
+#define PINMUX_GPIO125__FUNC_GPIO125 (MTK_PIN_NO(125) | 0)
+#define PINMUX_GPIO125__FUNC_MSDC0_DAT2 (MTK_PIN_NO(125) | 1)
+#define PINMUX_GPIO125__FUNC_MRG_CLK (MTK_PIN_NO(125) | 3)
+#define PINMUX_GPIO125__FUNC_DBG_MON_A15 (MTK_PIN_NO(125) | 7)
+
+#define PINMUX_GPIO126__FUNC_GPIO126 (MTK_PIN_NO(126) | 0)
+#define PINMUX_GPIO126__FUNC_MSDC0_DAT4 (MTK_PIN_NO(126) | 1)
+#define PINMUX_GPIO126__FUNC_ANT_SEL5 (MTK_PIN_NO(126) | 3)
+#define PINMUX_GPIO126__FUNC_UFS_MPHY_SCL (MTK_PIN_NO(126) | 6)
+#define PINMUX_GPIO126__FUNC_DBG_MON_A16 (MTK_PIN_NO(126) | 7)
+
+#define PINMUX_GPIO127__FUNC_GPIO127 (MTK_PIN_NO(127) | 0)
+#define PINMUX_GPIO127__FUNC_MSDC0_DAT6 (MTK_PIN_NO(127) | 1)
+#define PINMUX_GPIO127__FUNC_ANT_SEL4 (MTK_PIN_NO(127) | 3)
+#define PINMUX_GPIO127__FUNC_UFS_MPHY_SDA (MTK_PIN_NO(127) | 6)
+#define PINMUX_GPIO127__FUNC_DBG_MON_A17 (MTK_PIN_NO(127) | 7)
+
+#define PINMUX_GPIO128__FUNC_GPIO128 (MTK_PIN_NO(128) | 0)
+#define PINMUX_GPIO128__FUNC_MSDC0_DAT1 (MTK_PIN_NO(128) | 1)
+#define PINMUX_GPIO128__FUNC_ANT_SEL2 (MTK_PIN_NO(128) | 3)
+#define PINMUX_GPIO128__FUNC_UFS_UNIPRO_SDA (MTK_PIN_NO(128) | 6)
+#define PINMUX_GPIO128__FUNC_DBG_MON_A18 (MTK_PIN_NO(128) | 7)
+
+#define PINMUX_GPIO129__FUNC_GPIO129 (MTK_PIN_NO(129) | 0)
+#define PINMUX_GPIO129__FUNC_MSDC0_DAT5 (MTK_PIN_NO(129) | 1)
+#define PINMUX_GPIO129__FUNC_ANT_SEL3 (MTK_PIN_NO(129) | 3)
+#define PINMUX_GPIO129__FUNC_UFS_UNIPRO_SCL (MTK_PIN_NO(129) | 6)
+#define PINMUX_GPIO129__FUNC_DBG_MON_A23 (MTK_PIN_NO(129) | 7)
+
+#define PINMUX_GPIO130__FUNC_GPIO130 (MTK_PIN_NO(130) | 0)
+#define PINMUX_GPIO130__FUNC_MSDC0_DAT7 (MTK_PIN_NO(130) | 1)
+#define PINMUX_GPIO130__FUNC_MRG_DO (MTK_PIN_NO(130) | 3)
+#define PINMUX_GPIO130__FUNC_DBG_MON_A24 (MTK_PIN_NO(130) | 7)
+
+#define PINMUX_GPIO131__FUNC_GPIO131 (MTK_PIN_NO(131) | 0)
+#define PINMUX_GPIO131__FUNC_MSDC0_DSL (MTK_PIN_NO(131) | 1)
+#define PINMUX_GPIO131__FUNC_MRG_SYNC (MTK_PIN_NO(131) | 3)
+#define PINMUX_GPIO131__FUNC_DBG_MON_A25 (MTK_PIN_NO(131) | 7)
+
+#define PINMUX_GPIO132__FUNC_GPIO132 (MTK_PIN_NO(132) | 0)
+#define PINMUX_GPIO132__FUNC_MSDC0_DAT3 (MTK_PIN_NO(132) | 1)
+#define PINMUX_GPIO132__FUNC_MRG_DI (MTK_PIN_NO(132) | 3)
+#define PINMUX_GPIO132__FUNC_DBG_MON_A26 (MTK_PIN_NO(132) | 7)
+
+#define PINMUX_GPIO133__FUNC_GPIO133 (MTK_PIN_NO(133) | 0)
+#define PINMUX_GPIO133__FUNC_MSDC0_RSTB (MTK_PIN_NO(133) | 1)
+#define PINMUX_GPIO133__FUNC_AGPS_SYNC (MTK_PIN_NO(133) | 3)
+#define PINMUX_GPIO133__FUNC_DBG_MON_A27 (MTK_PIN_NO(133) | 7)
+
+#define PINMUX_GPIO134__FUNC_GPIO134 (MTK_PIN_NO(134) | 0)
+#define PINMUX_GPIO134__FUNC_RTC32K_CK (MTK_PIN_NO(134) | 1)
+
+#define PINMUX_GPIO135__FUNC_GPIO135 (MTK_PIN_NO(135) | 0)
+#define PINMUX_GPIO135__FUNC_WATCHDOG (MTK_PIN_NO(135) | 1)
+
+#define PINMUX_GPIO136__FUNC_GPIO136 (MTK_PIN_NO(136) | 0)
+#define PINMUX_GPIO136__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(136) | 1)
+#define PINMUX_GPIO136__FUNC_AUD_CLK_MISO (MTK_PIN_NO(136) | 2)
+#define PINMUX_GPIO136__FUNC_I2S1_MCK (MTK_PIN_NO(136) | 3)
+#define PINMUX_GPIO136__FUNC_UFS_UNIPRO_SCL (MTK_PIN_NO(136) | 6)
+
+#define PINMUX_GPIO137__FUNC_GPIO137 (MTK_PIN_NO(137) | 0)
+#define PINMUX_GPIO137__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(137) | 1)
+#define PINMUX_GPIO137__FUNC_AUD_SYNC_MISO (MTK_PIN_NO(137) | 2)
+#define PINMUX_GPIO137__FUNC_I2S1_BCK (MTK_PIN_NO(137) | 3)
+
+#define PINMUX_GPIO138__FUNC_GPIO138 (MTK_PIN_NO(138) | 0)
+#define PINMUX_GPIO138__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(138) | 1)
+#define PINMUX_GPIO138__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(138) | 2)
+#define PINMUX_GPIO138__FUNC_I2S1_LRCK (MTK_PIN_NO(138) | 3)
+#define PINMUX_GPIO138__FUNC_DBG_MON_B24 (MTK_PIN_NO(138) | 7)
+
+#define PINMUX_GPIO139__FUNC_GPIO139 (MTK_PIN_NO(139) | 0)
+#define PINMUX_GPIO139__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(139) | 1)
+#define PINMUX_GPIO139__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(139) | 2)
+#define PINMUX_GPIO139__FUNC_I2S1_DO (MTK_PIN_NO(139) | 3)
+#define PINMUX_GPIO139__FUNC_UFS_MPHY_SDA (MTK_PIN_NO(139) | 6)
+
+#define PINMUX_GPIO140__FUNC_GPIO140 (MTK_PIN_NO(140) | 0)
+#define PINMUX_GPIO140__FUNC_AUD_CLK_MISO (MTK_PIN_NO(140) | 1)
+#define PINMUX_GPIO140__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(140) | 2)
+#define PINMUX_GPIO140__FUNC_I2S0_MCK (MTK_PIN_NO(140) | 3)
+#define PINMUX_GPIO140__FUNC_UFS_UNIPRO_SDA (MTK_PIN_NO(140) | 6)
+
+#define PINMUX_GPIO141__FUNC_GPIO141 (MTK_PIN_NO(141) | 0)
+#define PINMUX_GPIO141__FUNC_AUD_SYNC_MISO (MTK_PIN_NO(141) | 1)
+#define PINMUX_GPIO141__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(141) | 2)
+#define PINMUX_GPIO141__FUNC_I2S0_BCK (MTK_PIN_NO(141) | 3)
+
+#define PINMUX_GPIO142__FUNC_GPIO142 (MTK_PIN_NO(142) | 0)
+#define PINMUX_GPIO142__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(142) | 1)
+#define PINMUX_GPIO142__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(142) | 2)
+#define PINMUX_GPIO142__FUNC_I2S0_LRCK (MTK_PIN_NO(142) | 3)
+#define PINMUX_GPIO142__FUNC_VOW_DAT_MISO (MTK_PIN_NO(142) | 4)
+#define PINMUX_GPIO142__FUNC_DBG_MON_B25 (MTK_PIN_NO(142) | 7)
+
+#define PINMUX_GPIO143__FUNC_GPIO143 (MTK_PIN_NO(143) | 0)
+#define PINMUX_GPIO143__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(143) | 1)
+#define PINMUX_GPIO143__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(143) | 2)
+#define PINMUX_GPIO143__FUNC_I2S0_DI (MTK_PIN_NO(143) | 3)
+#define PINMUX_GPIO143__FUNC_VOW_CLK_MISO (MTK_PIN_NO(143) | 4)
+#define PINMUX_GPIO143__FUNC_UFS_MPHY_SCL (MTK_PIN_NO(143) | 6)
+#define PINMUX_GPIO143__FUNC_DBG_MON_B26 (MTK_PIN_NO(143) | 7)
+
+#define PINMUX_GPIO144__FUNC_GPIO144 (MTK_PIN_NO(144) | 0)
+#define PINMUX_GPIO144__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(144) | 1)
+#define PINMUX_GPIO144__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(144) | 2)
+
+#define PINMUX_GPIO145__FUNC_GPIO145 (MTK_PIN_NO(145) | 0)
+#define PINMUX_GPIO145__FUNC_PWRAP_SPI0_CSN (MTK_PIN_NO(145) | 1)
+
+#define PINMUX_GPIO146__FUNC_GPIO146 (MTK_PIN_NO(146) | 0)
+#define PINMUX_GPIO146__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(146) | 1)
+#define PINMUX_GPIO146__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(146) | 2)
+
+#define PINMUX_GPIO147__FUNC_GPIO147 (MTK_PIN_NO(147) | 0)
+#define PINMUX_GPIO147__FUNC_PWRAP_SPI0_CK (MTK_PIN_NO(147) | 1)
+
+#define PINMUX_GPIO148__FUNC_GPIO148 (MTK_PIN_NO(148) | 0)
+#define PINMUX_GPIO148__FUNC_SRCLKENA0 (MTK_PIN_NO(148) | 1)
+
+#define PINMUX_GPIO149__FUNC_GPIO149 (MTK_PIN_NO(149) | 0)
+#define PINMUX_GPIO149__FUNC_SRCLKENA1 (MTK_PIN_NO(149) | 1)
+
+#define PINMUX_GPIO150__FUNC_GPIO150 (MTK_PIN_NO(150) | 0)
+#define PINMUX_GPIO150__FUNC_PWM_A (MTK_PIN_NO(150) | 1)
+#define PINMUX_GPIO150__FUNC_CMFLASH (MTK_PIN_NO(150) | 2)
+#define PINMUX_GPIO150__FUNC_CLKM0 (MTK_PIN_NO(150) | 3)
+#define PINMUX_GPIO150__FUNC_DBG_MON_B30 (MTK_PIN_NO(150) | 7)
+
+#define PINMUX_GPIO151__FUNC_GPIO151 (MTK_PIN_NO(151) | 0)
+#define PINMUX_GPIO151__FUNC_PWM_B (MTK_PIN_NO(151) | 1)
+#define PINMUX_GPIO151__FUNC_CMVREF0 (MTK_PIN_NO(151) | 2)
+#define PINMUX_GPIO151__FUNC_CLKM1 (MTK_PIN_NO(151) | 3)
+#define PINMUX_GPIO151__FUNC_DBG_MON_B20 (MTK_PIN_NO(151) | 7)
+
+#define PINMUX_GPIO152__FUNC_GPIO152 (MTK_PIN_NO(152) | 0)
+#define PINMUX_GPIO152__FUNC_PWM_C (MTK_PIN_NO(152) | 1)
+#define PINMUX_GPIO152__FUNC_CMFLASH (MTK_PIN_NO(152) | 2)
+#define PINMUX_GPIO152__FUNC_CLKM2 (MTK_PIN_NO(152) | 3)
+#define PINMUX_GPIO152__FUNC_DBG_MON_B21 (MTK_PIN_NO(152) | 7)
+
+#define PINMUX_GPIO153__FUNC_GPIO153 (MTK_PIN_NO(153) | 0)
+#define PINMUX_GPIO153__FUNC_PWM_A (MTK_PIN_NO(153) | 1)
+#define PINMUX_GPIO153__FUNC_CMVREF0 (MTK_PIN_NO(153) | 2)
+#define PINMUX_GPIO153__FUNC_CLKM3 (MTK_PIN_NO(153) | 3)
+#define PINMUX_GPIO153__FUNC_DBG_MON_B22 (MTK_PIN_NO(153) | 7)
+
+#define PINMUX_GPIO154__FUNC_GPIO154 (MTK_PIN_NO(154) | 0)
+#define PINMUX_GPIO154__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(154) | 1)
+#define PINMUX_GPIO154__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(154) | 2)
+#define PINMUX_GPIO154__FUNC_DBG_MON_B18 (MTK_PIN_NO(154) | 7)
+
+#define PINMUX_GPIO155__FUNC_GPIO155 (MTK_PIN_NO(155) | 0)
+#define PINMUX_GPIO155__FUNC_ANT_SEL0 (MTK_PIN_NO(155) | 1)
+#define PINMUX_GPIO155__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(155) | 2)
+#define PINMUX_GPIO155__FUNC_CMVREF1 (MTK_PIN_NO(155) | 3)
+#define PINMUX_GPIO155__FUNC_SCP_JTAG_TDI (MTK_PIN_NO(155) | 7)
+
+#define PINMUX_GPIO156__FUNC_GPIO156 (MTK_PIN_NO(156) | 0)
+#define PINMUX_GPIO156__FUNC_ANT_SEL1 (MTK_PIN_NO(156) | 1)
+#define PINMUX_GPIO156__FUNC_SRCLKENAI0 (MTK_PIN_NO(156) | 2)
+#define PINMUX_GPIO156__FUNC_SCL6 (MTK_PIN_NO(156) | 3)
+#define PINMUX_GPIO156__FUNC_KPCOL2 (MTK_PIN_NO(156) | 4)
+#define PINMUX_GPIO156__FUNC_IDDIG (MTK_PIN_NO(156) | 5)
+#define PINMUX_GPIO156__FUNC_SCP_JTAG_TCK (MTK_PIN_NO(156) | 7)
+
+#define PINMUX_GPIO157__FUNC_GPIO157 (MTK_PIN_NO(157) | 0)
+#define PINMUX_GPIO157__FUNC_ANT_SEL2 (MTK_PIN_NO(157) | 1)
+#define PINMUX_GPIO157__FUNC_SRCLKENAI1 (MTK_PIN_NO(157) | 2)
+#define PINMUX_GPIO157__FUNC_SDA6 (MTK_PIN_NO(157) | 3)
+#define PINMUX_GPIO157__FUNC_KPROW2 (MTK_PIN_NO(157) | 4)
+#define PINMUX_GPIO157__FUNC_USB_DRVVBUS (MTK_PIN_NO(157) | 5)
+#define PINMUX_GPIO157__FUNC_SCP_JTAG_TRSTN (MTK_PIN_NO(157) | 7)
+
+#define PINMUX_GPIO158__FUNC_GPIO158 (MTK_PIN_NO(158) | 0)
+#define PINMUX_GPIO158__FUNC_ANT_SEL3 (MTK_PIN_NO(158) | 1)
+
+#define PINMUX_GPIO159__FUNC_GPIO159 (MTK_PIN_NO(159) | 0)
+#define PINMUX_GPIO159__FUNC_ANT_SEL4 (MTK_PIN_NO(159) | 1)
+
+#define PINMUX_GPIO160__FUNC_GPIO160 (MTK_PIN_NO(160) | 0)
+#define PINMUX_GPIO160__FUNC_ANT_SEL5 (MTK_PIN_NO(160) | 1)
+
+#define PINMUX_GPIO161__FUNC_GPIO161 (MTK_PIN_NO(161) | 0)
+#define PINMUX_GPIO161__FUNC_SPI1_A_MI (MTK_PIN_NO(161) | 1)
+#define PINMUX_GPIO161__FUNC_SCP_SPI1_MI (MTK_PIN_NO(161) | 2)
+#define PINMUX_GPIO161__FUNC_IDDIG (MTK_PIN_NO(161) | 3)
+#define PINMUX_GPIO161__FUNC_ANT_SEL6 (MTK_PIN_NO(161) | 4)
+#define PINMUX_GPIO161__FUNC_KPCOL2 (MTK_PIN_NO(161) | 5)
+#define PINMUX_GPIO161__FUNC_PTA_RXD (MTK_PIN_NO(161) | 6)
+#define PINMUX_GPIO161__FUNC_DBG_MON_B19 (MTK_PIN_NO(161) | 7)
+
+#define PINMUX_GPIO162__FUNC_GPIO162 (MTK_PIN_NO(162) | 0)
+#define PINMUX_GPIO162__FUNC_SPI1_A_CSB (MTK_PIN_NO(162) | 1)
+#define PINMUX_GPIO162__FUNC_SCP_SPI1_CS (MTK_PIN_NO(162) | 2)
+#define PINMUX_GPIO162__FUNC_USB_DRVVBUS (MTK_PIN_NO(162) | 3)
+#define PINMUX_GPIO162__FUNC_ANT_SEL5 (MTK_PIN_NO(162) | 4)
+#define PINMUX_GPIO162__FUNC_KPROW2 (MTK_PIN_NO(162) | 5)
+#define PINMUX_GPIO162__FUNC_PTA_TXD (MTK_PIN_NO(162) | 6)
+
+#define PINMUX_GPIO163__FUNC_GPIO163 (MTK_PIN_NO(163) | 0)
+#define PINMUX_GPIO163__FUNC_SPI1_A_MO (MTK_PIN_NO(163) | 1)
+#define PINMUX_GPIO163__FUNC_SCP_SPI1_MO (MTK_PIN_NO(163) | 2)
+#define PINMUX_GPIO163__FUNC_SDA1 (MTK_PIN_NO(163) | 3)
+#define PINMUX_GPIO163__FUNC_ANT_SEL4 (MTK_PIN_NO(163) | 4)
+#define PINMUX_GPIO163__FUNC_CMMCLK2 (MTK_PIN_NO(163) | 5)
+#define PINMUX_GPIO163__FUNC_DMIC_CLK (MTK_PIN_NO(163) | 6)
+
+#define PINMUX_GPIO164__FUNC_GPIO164 (MTK_PIN_NO(164) | 0)
+#define PINMUX_GPIO164__FUNC_SPI1_A_CLK (MTK_PIN_NO(164) | 1)
+#define PINMUX_GPIO164__FUNC_SCP_SPI1_CK (MTK_PIN_NO(164) | 2)
+#define PINMUX_GPIO164__FUNC_SCL1 (MTK_PIN_NO(164) | 3)
+#define PINMUX_GPIO164__FUNC_ANT_SEL3 (MTK_PIN_NO(164) | 4)
+#define PINMUX_GPIO164__FUNC_CMMCLK3 (MTK_PIN_NO(164) | 5)
+#define PINMUX_GPIO164__FUNC_DMIC_DAT (MTK_PIN_NO(164) | 6)
+
+#define PINMUX_GPIO165__FUNC_GPIO165 (MTK_PIN_NO(165) | 0)
+#define PINMUX_GPIO165__FUNC_PWM_B (MTK_PIN_NO(165) | 1)
+#define PINMUX_GPIO165__FUNC_CMMCLK2 (MTK_PIN_NO(165) | 2)
+#define PINMUX_GPIO165__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(165) | 3)
+#define PINMUX_GPIO165__FUNC_TDM_MCK_2ND (MTK_PIN_NO(165) | 6)
+#define PINMUX_GPIO165__FUNC_SCP_JTAG_TDO (MTK_PIN_NO(165) | 7)
+
+#define PINMUX_GPIO166__FUNC_GPIO166 (MTK_PIN_NO(166) | 0)
+#define PINMUX_GPIO166__FUNC_ANT_SEL6 (MTK_PIN_NO(166) | 1)
+
+#define PINMUX_GPIO167__FUNC_GPIO167 (MTK_PIN_NO(167) | 0)
+#define PINMUX_GPIO167__FUNC_RFIC0_BSI_EN (MTK_PIN_NO(167) | 1)
+#define PINMUX_GPIO167__FUNC_SPM_BSI_EN (MTK_PIN_NO(167) | 2)
+
+#define PINMUX_GPIO168__FUNC_GPIO168 (MTK_PIN_NO(168) | 0)
+#define PINMUX_GPIO168__FUNC_RFIC0_BSI_CK (MTK_PIN_NO(168) | 1)
+#define PINMUX_GPIO168__FUNC_SPM_BSI_CK (MTK_PIN_NO(168) | 2)
+
+#define PINMUX_GPIO169__FUNC_GPIO169 (MTK_PIN_NO(169) | 0)
+#define PINMUX_GPIO169__FUNC_PWM_C (MTK_PIN_NO(169) | 1)
+#define PINMUX_GPIO169__FUNC_CMMCLK3 (MTK_PIN_NO(169) | 2)
+#define PINMUX_GPIO169__FUNC_CMVREF1 (MTK_PIN_NO(169) | 3)
+#define PINMUX_GPIO169__FUNC_ANT_SEL7 (MTK_PIN_NO(169) | 4)
+#define PINMUX_GPIO169__FUNC_AGPS_SYNC (MTK_PIN_NO(169) | 5)
+#define PINMUX_GPIO169__FUNC_TDM_BCK_2ND (MTK_PIN_NO(169) | 6)
+#define PINMUX_GPIO169__FUNC_SCP_JTAG_TMS (MTK_PIN_NO(169) | 7)
+
+#define PINMUX_GPIO170__FUNC_GPIO170 (MTK_PIN_NO(170) | 0)
+#define PINMUX_GPIO170__FUNC_I2S1_BCK (MTK_PIN_NO(170) | 1)
+#define PINMUX_GPIO170__FUNC_I2S3_BCK (MTK_PIN_NO(170) | 2)
+#define PINMUX_GPIO170__FUNC_SCL7 (MTK_PIN_NO(170) | 3)
+#define PINMUX_GPIO170__FUNC_I2S5_BCK (MTK_PIN_NO(170) | 4)
+#define PINMUX_GPIO170__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(170) | 5)
+#define PINMUX_GPIO170__FUNC_TDM_LRCK_2ND (MTK_PIN_NO(170) | 6)
+#define PINMUX_GPIO170__FUNC_ANT_SEL3 (MTK_PIN_NO(170) | 7)
+
+#define PINMUX_GPIO171__FUNC_GPIO171 (MTK_PIN_NO(171) | 0)
+#define PINMUX_GPIO171__FUNC_I2S1_LRCK (MTK_PIN_NO(171) | 1)
+#define PINMUX_GPIO171__FUNC_I2S3_LRCK (MTK_PIN_NO(171) | 2)
+#define PINMUX_GPIO171__FUNC_SDA7 (MTK_PIN_NO(171) | 3)
+#define PINMUX_GPIO171__FUNC_I2S5_LRCK (MTK_PIN_NO(171) | 4)
+#define PINMUX_GPIO171__FUNC_URXD1 (MTK_PIN_NO(171) | 5)
+#define PINMUX_GPIO171__FUNC_TDM_DATA0_2ND (MTK_PIN_NO(171) | 6)
+#define PINMUX_GPIO171__FUNC_ANT_SEL4 (MTK_PIN_NO(171) | 7)
+
+#define PINMUX_GPIO172__FUNC_GPIO172 (MTK_PIN_NO(172) | 0)
+#define PINMUX_GPIO172__FUNC_I2S1_DO (MTK_PIN_NO(172) | 1)
+#define PINMUX_GPIO172__FUNC_I2S3_DO (MTK_PIN_NO(172) | 2)
+#define PINMUX_GPIO172__FUNC_SCL8 (MTK_PIN_NO(172) | 3)
+#define PINMUX_GPIO172__FUNC_I2S5_DO (MTK_PIN_NO(172) | 4)
+#define PINMUX_GPIO172__FUNC_UTXD1 (MTK_PIN_NO(172) | 5)
+#define PINMUX_GPIO172__FUNC_TDM_DATA1_2ND (MTK_PIN_NO(172) | 6)
+#define PINMUX_GPIO172__FUNC_ANT_SEL5 (MTK_PIN_NO(172) | 7)
+
+#define PINMUX_GPIO173__FUNC_GPIO173 (MTK_PIN_NO(173) | 0)
+#define PINMUX_GPIO173__FUNC_I2S1_MCK (MTK_PIN_NO(173) | 1)
+#define PINMUX_GPIO173__FUNC_I2S3_MCK (MTK_PIN_NO(173) | 2)
+#define PINMUX_GPIO173__FUNC_SDA8 (MTK_PIN_NO(173) | 3)
+#define PINMUX_GPIO173__FUNC_I2S5_MCK (MTK_PIN_NO(173) | 4)
+#define PINMUX_GPIO173__FUNC_UCTS0 (MTK_PIN_NO(173) | 5)
+#define PINMUX_GPIO173__FUNC_TDM_DATA2_2ND (MTK_PIN_NO(173) | 6)
+#define PINMUX_GPIO173__FUNC_ANT_SEL6 (MTK_PIN_NO(173) | 7)
+
+#define PINMUX_GPIO174__FUNC_GPIO174 (MTK_PIN_NO(174) | 0)
+#define PINMUX_GPIO174__FUNC_I2S2_DI (MTK_PIN_NO(174) | 1)
+#define PINMUX_GPIO174__FUNC_I2S0_DI (MTK_PIN_NO(174) | 2)
+#define PINMUX_GPIO174__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(174) | 3)
+#define PINMUX_GPIO174__FUNC_I2S2_DI2 (MTK_PIN_NO(174) | 4)
+#define PINMUX_GPIO174__FUNC_URTS0 (MTK_PIN_NO(174) | 5)
+#define PINMUX_GPIO174__FUNC_TDM_DATA3_2ND (MTK_PIN_NO(174) | 6)
+#define PINMUX_GPIO174__FUNC_ANT_SEL7 (MTK_PIN_NO(174) | 7)
+
+#define PINMUX_GPIO175__FUNC_GPIO175 (MTK_PIN_NO(175) | 0)
+#define PINMUX_GPIO175__FUNC_ANT_SEL7 (MTK_PIN_NO(175) | 1)
+
+#define PINMUX_GPIO176__FUNC_GPIO176 (MTK_PIN_NO(176) | 0)
+
+#define PINMUX_GPIO177__FUNC_GPIO177 (MTK_PIN_NO(177) | 0)
+
+#define PINMUX_GPIO178__FUNC_GPIO178 (MTK_PIN_NO(178) | 0)
+
+#define PINMUX_GPIO179__FUNC_GPIO179 (MTK_PIN_NO(179) | 0)
+
+#endif /* __MT8183-PINFUNC_H */
diff --git a/include/dt-bindings/pinctrl/mt8186-pinfunc.h b/include/dt-bindings/pinctrl/mt8186-pinfunc.h
new file mode 100644
index 000000000000..18d6683c6f65
--- /dev/null
+++ b/include/dt-bindings/pinctrl/mt8186-pinfunc.h
@@ -0,0 +1,1174 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2021 MediaTek Inc.
+ * Author: Guodong Liu <Guodong.Liu@mediatek.com>
+ *
+ */
+
+#ifndef __MT8186_PINFUNC_H
+#define __MT8186_PINFUNC_H
+
+#include "mt65xx.h"
+
+#define PINMUX_GPIO0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define PINMUX_GPIO0__FUNC_I2S0_MCK (MTK_PIN_NO(0) | 1)
+#define PINMUX_GPIO0__FUNC_SPI0_CLK_B (MTK_PIN_NO(0) | 2)
+#define PINMUX_GPIO0__FUNC_I2S2_MCK (MTK_PIN_NO(0) | 3)
+#define PINMUX_GPIO0__FUNC_CMFLASH0 (MTK_PIN_NO(0) | 4)
+#define PINMUX_GPIO0__FUNC_SCP_SPI0_CK (MTK_PIN_NO(0) | 5)
+#define PINMUX_GPIO0__FUNC_TP_GPIO0_AO (MTK_PIN_NO(0) | 6)
+#define PINMUX_GPIO0__FUNC_DBG_MON_A0 (MTK_PIN_NO(0) | 7)
+
+#define PINMUX_GPIO1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define PINMUX_GPIO1__FUNC_I2S0_BCK (MTK_PIN_NO(1) | 1)
+#define PINMUX_GPIO1__FUNC_SPI0_CSB_B (MTK_PIN_NO(1) | 2)
+#define PINMUX_GPIO1__FUNC_I2S2_BCK (MTK_PIN_NO(1) | 3)
+#define PINMUX_GPIO1__FUNC_CMFLASH1 (MTK_PIN_NO(1) | 4)
+#define PINMUX_GPIO1__FUNC_SCP_SPI0_CS (MTK_PIN_NO(1) | 5)
+#define PINMUX_GPIO1__FUNC_TP_GPIO1_AO (MTK_PIN_NO(1) | 6)
+
+#define PINMUX_GPIO2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define PINMUX_GPIO2__FUNC_I2S0_LRCK (MTK_PIN_NO(2) | 1)
+#define PINMUX_GPIO2__FUNC_SPI0_MO_B (MTK_PIN_NO(2) | 2)
+#define PINMUX_GPIO2__FUNC_I2S2_LRCK (MTK_PIN_NO(2) | 3)
+#define PINMUX_GPIO2__FUNC_CMFLASH2 (MTK_PIN_NO(2) | 4)
+#define PINMUX_GPIO2__FUNC_SCP_SPI0_MO (MTK_PIN_NO(2) | 5)
+#define PINMUX_GPIO2__FUNC_TP_GPIO2_AO (MTK_PIN_NO(2) | 6)
+
+#define PINMUX_GPIO3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define PINMUX_GPIO3__FUNC_I2S0_DI (MTK_PIN_NO(3) | 1)
+#define PINMUX_GPIO3__FUNC_SPI0_MI_B (MTK_PIN_NO(3) | 2)
+#define PINMUX_GPIO3__FUNC_I2S2_DI (MTK_PIN_NO(3) | 3)
+#define PINMUX_GPIO3__FUNC_SRCLKENAI1 (MTK_PIN_NO(3) | 4)
+#define PINMUX_GPIO3__FUNC_SCP_SPI0_MI (MTK_PIN_NO(3) | 5)
+#define PINMUX_GPIO3__FUNC_TP_GPIO3_AO (MTK_PIN_NO(3) | 6)
+
+#define PINMUX_GPIO4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define PINMUX_GPIO4__FUNC_I2S3_DO (MTK_PIN_NO(4) | 1)
+#define PINMUX_GPIO4__FUNC_I2S1_DO (MTK_PIN_NO(4) | 3)
+#define PINMUX_GPIO4__FUNC_TP_GPIO4_AO (MTK_PIN_NO(4) | 6)
+
+#define PINMUX_GPIO5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define PINMUX_GPIO5__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(5) | 1)
+#define PINMUX_GPIO5__FUNC_TP_GPIO5_AO (MTK_PIN_NO(5) | 6)
+
+#define PINMUX_GPIO6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define PINMUX_GPIO6__FUNC_I2S3_MCK (MTK_PIN_NO(6) | 1)
+#define PINMUX_GPIO6__FUNC_SPI1_CLK_B (MTK_PIN_NO(6) | 2)
+#define PINMUX_GPIO6__FUNC_I2S1_MCK (MTK_PIN_NO(6) | 3)
+#define PINMUX_GPIO6__FUNC_DPI_DATA22 (MTK_PIN_NO(6) | 4)
+#define PINMUX_GPIO6__FUNC_TP_GPIO6_AO (MTK_PIN_NO(6) | 6)
+
+#define PINMUX_GPIO7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define PINMUX_GPIO7__FUNC_I2S3_BCK (MTK_PIN_NO(7) | 1)
+#define PINMUX_GPIO7__FUNC_SPI1_CSB_B (MTK_PIN_NO(7) | 2)
+#define PINMUX_GPIO7__FUNC_I2S1_BCK (MTK_PIN_NO(7) | 3)
+#define PINMUX_GPIO7__FUNC_DPI_DATA23 (MTK_PIN_NO(7) | 4)
+#define PINMUX_GPIO7__FUNC_TP_GPIO7_AO (MTK_PIN_NO(7) | 6)
+
+#define PINMUX_GPIO8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define PINMUX_GPIO8__FUNC_I2S3_LRCK (MTK_PIN_NO(8) | 1)
+#define PINMUX_GPIO8__FUNC_SPI1_MO_B (MTK_PIN_NO(8) | 2)
+#define PINMUX_GPIO8__FUNC_I2S1_LRCK (MTK_PIN_NO(8) | 3)
+#define PINMUX_GPIO8__FUNC_CONN_UART0_RXD (MTK_PIN_NO(8) | 4)
+#define PINMUX_GPIO8__FUNC_SSPM_URXD_AO (MTK_PIN_NO(8) | 5)
+#define PINMUX_GPIO8__FUNC_ADSP_UART_RX (MTK_PIN_NO(8) | 6)
+#define PINMUX_GPIO8__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(8) | 7)
+
+#define PINMUX_GPIO9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define PINMUX_GPIO9__FUNC_I2S3_DO (MTK_PIN_NO(9) | 1)
+#define PINMUX_GPIO9__FUNC_SPI1_MI_B (MTK_PIN_NO(9) | 2)
+#define PINMUX_GPIO9__FUNC_I2S1_DO (MTK_PIN_NO(9) | 3)
+#define PINMUX_GPIO9__FUNC_CONN_UART0_TXD (MTK_PIN_NO(9) | 4)
+#define PINMUX_GPIO9__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(9) | 5)
+#define PINMUX_GPIO9__FUNC_ADSP_UART_TX (MTK_PIN_NO(9) | 6)
+#define PINMUX_GPIO9__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(9) | 7)
+
+#define PINMUX_GPIO10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define PINMUX_GPIO10__FUNC_I2S0_MCK (MTK_PIN_NO(10) | 1)
+#define PINMUX_GPIO10__FUNC_SPI4_CLK_A (MTK_PIN_NO(10) | 2)
+#define PINMUX_GPIO10__FUNC_I2S2_MCK (MTK_PIN_NO(10) | 3)
+#define PINMUX_GPIO10__FUNC_SPM_JTAG_TDI (MTK_PIN_NO(10) | 4)
+#define PINMUX_GPIO10__FUNC_SCP_JTAG_TDI (MTK_PIN_NO(10) | 5)
+#define PINMUX_GPIO10__FUNC_ADSP_JTAG_TDI (MTK_PIN_NO(10) | 6)
+#define PINMUX_GPIO10__FUNC_CONN_MCU_TDI (MTK_PIN_NO(10) | 7)
+
+#define PINMUX_GPIO11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define PINMUX_GPIO11__FUNC_I2S0_BCK (MTK_PIN_NO(11) | 1)
+#define PINMUX_GPIO11__FUNC_SPI4_CSB_A (MTK_PIN_NO(11) | 2)
+#define PINMUX_GPIO11__FUNC_I2S2_BCK (MTK_PIN_NO(11) | 3)
+#define PINMUX_GPIO11__FUNC_SPM_JTAG_TRSTN (MTK_PIN_NO(11) | 4)
+#define PINMUX_GPIO11__FUNC_SCP_JTAG_TRSTN (MTK_PIN_NO(11) | 5)
+#define PINMUX_GPIO11__FUNC_ADSP_JTAG_TRSTN (MTK_PIN_NO(11) | 6)
+#define PINMUX_GPIO11__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(11) | 7)
+
+#define PINMUX_GPIO12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define PINMUX_GPIO12__FUNC_I2S0_LRCK (MTK_PIN_NO(12) | 1)
+#define PINMUX_GPIO12__FUNC_SPI4_MO_A (MTK_PIN_NO(12) | 2)
+#define PINMUX_GPIO12__FUNC_I2S2_LRCK (MTK_PIN_NO(12) | 3)
+#define PINMUX_GPIO12__FUNC_SPM_JTAG_TCK (MTK_PIN_NO(12) | 4)
+#define PINMUX_GPIO12__FUNC_SCP_JTAG_TCK (MTK_PIN_NO(12) | 5)
+#define PINMUX_GPIO12__FUNC_ADSP_JTAG_TCK (MTK_PIN_NO(12) | 6)
+#define PINMUX_GPIO12__FUNC_CONN_MCU_TCK (MTK_PIN_NO(12) | 7)
+
+#define PINMUX_GPIO13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define PINMUX_GPIO13__FUNC_I2S0_DI (MTK_PIN_NO(13) | 1)
+#define PINMUX_GPIO13__FUNC_SPI4_MI_A (MTK_PIN_NO(13) | 2)
+#define PINMUX_GPIO13__FUNC_I2S2_DI (MTK_PIN_NO(13) | 3)
+#define PINMUX_GPIO13__FUNC_SPM_JTAG_TDO (MTK_PIN_NO(13) | 4)
+#define PINMUX_GPIO13__FUNC_SCP_JTAG_TDO (MTK_PIN_NO(13) | 5)
+#define PINMUX_GPIO13__FUNC_ADSP_JTAG_TDO (MTK_PIN_NO(13) | 6)
+#define PINMUX_GPIO13__FUNC_CONN_MCU_TDO (MTK_PIN_NO(13) | 7)
+
+#define PINMUX_GPIO14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define PINMUX_GPIO14__FUNC_CLKM0 (MTK_PIN_NO(14) | 3)
+#define PINMUX_GPIO14__FUNC_SPM_JTAG_TMS (MTK_PIN_NO(14) | 4)
+#define PINMUX_GPIO14__FUNC_SCP_JTAG_TMS (MTK_PIN_NO(14) | 5)
+#define PINMUX_GPIO14__FUNC_ADSP_JTAG_TMS (MTK_PIN_NO(14) | 6)
+#define PINMUX_GPIO14__FUNC_CONN_MCU_TMS (MTK_PIN_NO(14) | 7)
+
+#define PINMUX_GPIO15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define PINMUX_GPIO15__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(15) | 1)
+#define PINMUX_GPIO15__FUNC_SRCLKENAI1 (MTK_PIN_NO(15) | 2)
+#define PINMUX_GPIO15__FUNC_CLKM1 (MTK_PIN_NO(15) | 3)
+#define PINMUX_GPIO15__FUNC_PWM0 (MTK_PIN_NO(15) | 4)
+
+#define PINMUX_GPIO16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0)
+#define PINMUX_GPIO16__FUNC_CONN_WIFI_TXD (MTK_PIN_NO(16) | 1)
+#define PINMUX_GPIO16__FUNC_SRCLKENAI0 (MTK_PIN_NO(16) | 2)
+#define PINMUX_GPIO16__FUNC_CLKM2 (MTK_PIN_NO(16) | 3)
+#define PINMUX_GPIO16__FUNC_PWM1 (MTK_PIN_NO(16) | 4)
+
+#define PINMUX_GPIO17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0)
+#define PINMUX_GPIO17__FUNC_CLKM3 (MTK_PIN_NO(17) | 3)
+#define PINMUX_GPIO17__FUNC_PWM2 (MTK_PIN_NO(17) | 4)
+#define PINMUX_GPIO17__FUNC_DBG_MON_A32 (MTK_PIN_NO(17) | 7)
+
+#define PINMUX_GPIO18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define PINMUX_GPIO18__FUNC_CMVREF0 (MTK_PIN_NO(18) | 2)
+#define PINMUX_GPIO18__FUNC_SPI2_CLK_B (MTK_PIN_NO(18) | 6)
+#define PINMUX_GPIO18__FUNC_DBG_MON_A26 (MTK_PIN_NO(18) | 7)
+
+#define PINMUX_GPIO19__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define PINMUX_GPIO19__FUNC_CMVREF1 (MTK_PIN_NO(19) | 2)
+#define PINMUX_GPIO19__FUNC_ANT_SEL3 (MTK_PIN_NO(19) | 5)
+#define PINMUX_GPIO19__FUNC_SPI2_CSB_B (MTK_PIN_NO(19) | 6)
+#define PINMUX_GPIO19__FUNC_DBG_MON_A2 (MTK_PIN_NO(19) | 7)
+
+#define PINMUX_GPIO20__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define PINMUX_GPIO20__FUNC_CMVREF2 (MTK_PIN_NO(20) | 2)
+#define PINMUX_GPIO20__FUNC_ANT_SEL4 (MTK_PIN_NO(20) | 5)
+#define PINMUX_GPIO20__FUNC_SPI2_MO_B (MTK_PIN_NO(20) | 6)
+#define PINMUX_GPIO20__FUNC_DBG_MON_A3 (MTK_PIN_NO(20) | 7)
+
+#define PINMUX_GPIO21__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define PINMUX_GPIO21__FUNC_I2S0_MCK (MTK_PIN_NO(21) | 1)
+#define PINMUX_GPIO21__FUNC_I2S1_MCK (MTK_PIN_NO(21) | 2)
+#define PINMUX_GPIO21__FUNC_I2S3_MCK (MTK_PIN_NO(21) | 3)
+#define PINMUX_GPIO21__FUNC_ANT_SEL5 (MTK_PIN_NO(21) | 5)
+#define PINMUX_GPIO21__FUNC_SPI2_MI_B (MTK_PIN_NO(21) | 6)
+#define PINMUX_GPIO21__FUNC_DBG_MON_A4 (MTK_PIN_NO(21) | 7)
+
+#define PINMUX_GPIO22__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define PINMUX_GPIO22__FUNC_I2S0_BCK (MTK_PIN_NO(22) | 1)
+#define PINMUX_GPIO22__FUNC_I2S1_BCK (MTK_PIN_NO(22) | 2)
+#define PINMUX_GPIO22__FUNC_I2S3_BCK (MTK_PIN_NO(22) | 3)
+#define PINMUX_GPIO22__FUNC_TDM_RX_LRCK (MTK_PIN_NO(22) | 4)
+#define PINMUX_GPIO22__FUNC_ANT_SEL6 (MTK_PIN_NO(22) | 5)
+#define PINMUX_GPIO22__FUNC_DBG_MON_A5 (MTK_PIN_NO(22) | 7)
+
+#define PINMUX_GPIO23__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define PINMUX_GPIO23__FUNC_I2S0_LRCK (MTK_PIN_NO(23) | 1)
+#define PINMUX_GPIO23__FUNC_I2S1_LRCK (MTK_PIN_NO(23) | 2)
+#define PINMUX_GPIO23__FUNC_I2S3_LRCK (MTK_PIN_NO(23) | 3)
+#define PINMUX_GPIO23__FUNC_TDM_RX_BCK (MTK_PIN_NO(23) | 4)
+#define PINMUX_GPIO23__FUNC_ANT_SEL7 (MTK_PIN_NO(23) | 5)
+#define PINMUX_GPIO23__FUNC_DBG_MON_A6 (MTK_PIN_NO(23) | 7)
+
+#define PINMUX_GPIO24__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define PINMUX_GPIO24__FUNC_I2S0_DI (MTK_PIN_NO(24) | 1)
+#define PINMUX_GPIO24__FUNC_I2S1_DO (MTK_PIN_NO(24) | 2)
+#define PINMUX_GPIO24__FUNC_I2S3_DO (MTK_PIN_NO(24) | 3)
+#define PINMUX_GPIO24__FUNC_TDM_RX_MCK (MTK_PIN_NO(24) | 4)
+#define PINMUX_GPIO24__FUNC_DBG_MON_A7 (MTK_PIN_NO(24) | 7)
+
+#define PINMUX_GPIO25__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define PINMUX_GPIO25__FUNC_I2S2_MCK (MTK_PIN_NO(25) | 1)
+#define PINMUX_GPIO25__FUNC_PCM_CLK (MTK_PIN_NO(25) | 2)
+#define PINMUX_GPIO25__FUNC_SPI4_CLK_B (MTK_PIN_NO(25) | 3)
+#define PINMUX_GPIO25__FUNC_TDM_RX_DATA0 (MTK_PIN_NO(25) | 4)
+#define PINMUX_GPIO25__FUNC_DBG_MON_A8 (MTK_PIN_NO(25) | 7)
+
+#define PINMUX_GPIO26__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define PINMUX_GPIO26__FUNC_I2S2_BCK (MTK_PIN_NO(26) | 1)
+#define PINMUX_GPIO26__FUNC_PCM_SYNC (MTK_PIN_NO(26) | 2)
+#define PINMUX_GPIO26__FUNC_SPI4_CSB_B (MTK_PIN_NO(26) | 3)
+#define PINMUX_GPIO26__FUNC_TDM_RX_DATA1 (MTK_PIN_NO(26) | 4)
+#define PINMUX_GPIO26__FUNC_DBG_MON_A9 (MTK_PIN_NO(26) | 7)
+
+#define PINMUX_GPIO27__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define PINMUX_GPIO27__FUNC_I2S2_LRCK (MTK_PIN_NO(27) | 1)
+#define PINMUX_GPIO27__FUNC_PCM_DI (MTK_PIN_NO(27) | 2)
+#define PINMUX_GPIO27__FUNC_SPI4_MO_B (MTK_PIN_NO(27) | 3)
+#define PINMUX_GPIO27__FUNC_TDM_RX_DATA2 (MTK_PIN_NO(27) | 4)
+#define PINMUX_GPIO27__FUNC_DBG_MON_A10 (MTK_PIN_NO(27) | 7)
+
+#define PINMUX_GPIO28__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define PINMUX_GPIO28__FUNC_I2S2_DI (MTK_PIN_NO(28) | 1)
+#define PINMUX_GPIO28__FUNC_PCM_DO (MTK_PIN_NO(28) | 2)
+#define PINMUX_GPIO28__FUNC_SPI4_MI_B (MTK_PIN_NO(28) | 3)
+#define PINMUX_GPIO28__FUNC_TDM_RX_DATA3 (MTK_PIN_NO(28) | 4)
+
+#define PINMUX_GPIO29__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define PINMUX_GPIO29__FUNC_ANT_SEL0 (MTK_PIN_NO(29) | 1)
+#define PINMUX_GPIO29__FUNC_GPS_L1_ELNA_EN (MTK_PIN_NO(29) | 2)
+
+#define PINMUX_GPIO30__FUNC_GPIO30 (MTK_PIN_NO(30) | 0)
+#define PINMUX_GPIO30__FUNC_ANT_SEL1 (MTK_PIN_NO(30) | 1)
+
+#define PINMUX_GPIO31__FUNC_GPIO31 (MTK_PIN_NO(31) | 0)
+#define PINMUX_GPIO31__FUNC_ANT_SEL2 (MTK_PIN_NO(31) | 1)
+#define PINMUX_GPIO31__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(31) | 2)
+#define PINMUX_GPIO31__FUNC_SRCLKENAI1 (MTK_PIN_NO(31) | 3)
+
+#define PINMUX_GPIO32__FUNC_GPIO32 (MTK_PIN_NO(32) | 0)
+#define PINMUX_GPIO32__FUNC_URXD0 (MTK_PIN_NO(32) | 1)
+#define PINMUX_GPIO32__FUNC_UTXD0 (MTK_PIN_NO(32) | 2)
+#define PINMUX_GPIO32__FUNC_ADSP_UART_RX (MTK_PIN_NO(32) | 3)
+#define PINMUX_GPIO32__FUNC_TP_URXD1_AO (MTK_PIN_NO(32) | 4)
+
+#define PINMUX_GPIO33__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define PINMUX_GPIO33__FUNC_UTXD0 (MTK_PIN_NO(33) | 1)
+#define PINMUX_GPIO33__FUNC_URXD0 (MTK_PIN_NO(33) | 2)
+#define PINMUX_GPIO33__FUNC_ADSP_UART_TX (MTK_PIN_NO(33) | 3)
+#define PINMUX_GPIO33__FUNC_TP_UTXD1_AO (MTK_PIN_NO(33) | 4)
+
+#define PINMUX_GPIO34__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define PINMUX_GPIO34__FUNC_URXD1 (MTK_PIN_NO(34) | 1)
+#define PINMUX_GPIO34__FUNC_TP_URXD2_AO (MTK_PIN_NO(34) | 2)
+#define PINMUX_GPIO34__FUNC_SSPM_URXD_AO (MTK_PIN_NO(34) | 3)
+#define PINMUX_GPIO34__FUNC_ADSP_UART_RX (MTK_PIN_NO(34) | 4)
+#define PINMUX_GPIO34__FUNC_CONN_UART0_RXD (MTK_PIN_NO(34) | 5)
+
+#define PINMUX_GPIO35__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define PINMUX_GPIO35__FUNC_UTXD1 (MTK_PIN_NO(35) | 1)
+#define PINMUX_GPIO35__FUNC_TP_UTXD2_AO (MTK_PIN_NO(35) | 2)
+#define PINMUX_GPIO35__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(35) | 3)
+#define PINMUX_GPIO35__FUNC_ADSP_UART_TX (MTK_PIN_NO(35) | 4)
+#define PINMUX_GPIO35__FUNC_CONN_UART0_TXD (MTK_PIN_NO(35) | 5)
+#define PINMUX_GPIO35__FUNC_CONN_WIFI_TXD (MTK_PIN_NO(35) | 6)
+
+#define PINMUX_GPIO36__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define PINMUX_GPIO36__FUNC_SPI0_CLK_A (MTK_PIN_NO(36) | 1)
+#define PINMUX_GPIO36__FUNC_CLKM0 (MTK_PIN_NO(36) | 2)
+#define PINMUX_GPIO36__FUNC_SCP_SPI0_CK (MTK_PIN_NO(36) | 4)
+#define PINMUX_GPIO36__FUNC_SPINOR_CK (MTK_PIN_NO(36) | 5)
+#define PINMUX_GPIO36__FUNC_DBG_MON_A11 (MTK_PIN_NO(36) | 7)
+
+#define PINMUX_GPIO37__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define PINMUX_GPIO37__FUNC_SPI0_CSB_A (MTK_PIN_NO(37) | 1)
+#define PINMUX_GPIO37__FUNC_CLKM1 (MTK_PIN_NO(37) | 2)
+#define PINMUX_GPIO37__FUNC_PWM0 (MTK_PIN_NO(37) | 3)
+#define PINMUX_GPIO37__FUNC_SCP_SPI0_CS (MTK_PIN_NO(37) | 4)
+#define PINMUX_GPIO37__FUNC_SPINOR_CS (MTK_PIN_NO(37) | 5)
+#define PINMUX_GPIO37__FUNC_DBG_MON_A12 (MTK_PIN_NO(37) | 7)
+
+#define PINMUX_GPIO38__FUNC_GPIO38 (MTK_PIN_NO(38) | 0)
+#define PINMUX_GPIO38__FUNC_SPI0_MO_A (MTK_PIN_NO(38) | 1)
+#define PINMUX_GPIO38__FUNC_CLKM2 (MTK_PIN_NO(38) | 2)
+#define PINMUX_GPIO38__FUNC_PWM1 (MTK_PIN_NO(38) | 3)
+#define PINMUX_GPIO38__FUNC_SCP_SPI0_MO (MTK_PIN_NO(38) | 4)
+#define PINMUX_GPIO38__FUNC_SPINOR_IO0 (MTK_PIN_NO(38) | 5)
+#define PINMUX_GPIO38__FUNC_DBG_MON_A13 (MTK_PIN_NO(38) | 7)
+
+#define PINMUX_GPIO39__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define PINMUX_GPIO39__FUNC_SPI0_MI_A (MTK_PIN_NO(39) | 1)
+#define PINMUX_GPIO39__FUNC_CLKM3 (MTK_PIN_NO(39) | 2)
+#define PINMUX_GPIO39__FUNC_PWM2 (MTK_PIN_NO(39) | 3)
+#define PINMUX_GPIO39__FUNC_SCP_SPI0_MI (MTK_PIN_NO(39) | 4)
+#define PINMUX_GPIO39__FUNC_SPINOR_IO1 (MTK_PIN_NO(39) | 5)
+#define PINMUX_GPIO39__FUNC_DBG_MON_A14 (MTK_PIN_NO(39) | 7)
+
+#define PINMUX_GPIO40__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define PINMUX_GPIO40__FUNC_SPI1_CLK_A (MTK_PIN_NO(40) | 1)
+#define PINMUX_GPIO40__FUNC_SCP_SPI1_CK (MTK_PIN_NO(40) | 2)
+#define PINMUX_GPIO40__FUNC_UCTS0 (MTK_PIN_NO(40) | 4)
+#define PINMUX_GPIO40__FUNC_SPINOR_IO2 (MTK_PIN_NO(40) | 5)
+#define PINMUX_GPIO40__FUNC_TP_UCTS1_AO (MTK_PIN_NO(40) | 6)
+#define PINMUX_GPIO40__FUNC_DBG_MON_A15 (MTK_PIN_NO(40) | 7)
+
+#define PINMUX_GPIO41__FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
+#define PINMUX_GPIO41__FUNC_SPI1_CSB_A (MTK_PIN_NO(41) | 1)
+#define PINMUX_GPIO41__FUNC_SCP_SPI1_CS (MTK_PIN_NO(41) | 2)
+#define PINMUX_GPIO41__FUNC_PWM0 (MTK_PIN_NO(41) | 3)
+#define PINMUX_GPIO41__FUNC_URTS0 (MTK_PIN_NO(41) | 4)
+#define PINMUX_GPIO41__FUNC_SPINOR_IO3 (MTK_PIN_NO(41) | 5)
+#define PINMUX_GPIO41__FUNC_TP_URTS1_AO (MTK_PIN_NO(41) | 6)
+#define PINMUX_GPIO41__FUNC_DBG_MON_A16 (MTK_PIN_NO(41) | 7)
+
+#define PINMUX_GPIO42__FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
+#define PINMUX_GPIO42__FUNC_SPI1_MO_A (MTK_PIN_NO(42) | 1)
+#define PINMUX_GPIO42__FUNC_SCP_SPI1_MO (MTK_PIN_NO(42) | 2)
+#define PINMUX_GPIO42__FUNC_PWM1 (MTK_PIN_NO(42) | 3)
+#define PINMUX_GPIO42__FUNC_UCTS1 (MTK_PIN_NO(42) | 4)
+#define PINMUX_GPIO42__FUNC_TP_UCTS2_AO (MTK_PIN_NO(42) | 6)
+#define PINMUX_GPIO42__FUNC_DBG_MON_A17 (MTK_PIN_NO(42) | 7)
+
+#define PINMUX_GPIO43__FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
+#define PINMUX_GPIO43__FUNC_SPI1_MI_A (MTK_PIN_NO(43) | 1)
+#define PINMUX_GPIO43__FUNC_SCP_SPI1_MI (MTK_PIN_NO(43) | 2)
+#define PINMUX_GPIO43__FUNC_PWM2 (MTK_PIN_NO(43) | 3)
+#define PINMUX_GPIO43__FUNC_URTS1 (MTK_PIN_NO(43) | 4)
+#define PINMUX_GPIO43__FUNC_TP_URTS2_AO (MTK_PIN_NO(43) | 6)
+#define PINMUX_GPIO43__FUNC_DBG_MON_A18 (MTK_PIN_NO(43) | 7)
+
+#define PINMUX_GPIO44__FUNC_GPIO44 (MTK_PIN_NO(44) | 0)
+#define PINMUX_GPIO44__FUNC_SPI2_CLK_A (MTK_PIN_NO(44) | 1)
+#define PINMUX_GPIO44__FUNC_SCP_SPI0_CK (MTK_PIN_NO(44) | 2)
+#define PINMUX_GPIO44__FUNC_DBG_MON_A19 (MTK_PIN_NO(44) | 7)
+
+#define PINMUX_GPIO45__FUNC_GPIO45 (MTK_PIN_NO(45) | 0)
+#define PINMUX_GPIO45__FUNC_SPI2_CSB_A (MTK_PIN_NO(45) | 1)
+#define PINMUX_GPIO45__FUNC_SCP_SPI0_CS (MTK_PIN_NO(45) | 2)
+#define PINMUX_GPIO45__FUNC_DBG_MON_A20 (MTK_PIN_NO(45) | 7)
+
+#define PINMUX_GPIO46__FUNC_GPIO46 (MTK_PIN_NO(46) | 0)
+#define PINMUX_GPIO46__FUNC_SPI2_MO_A (MTK_PIN_NO(46) | 1)
+#define PINMUX_GPIO46__FUNC_SCP_SPI0_MO (MTK_PIN_NO(46) | 2)
+#define PINMUX_GPIO46__FUNC_DBG_MON_A21 (MTK_PIN_NO(46) | 7)
+
+#define PINMUX_GPIO47__FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
+#define PINMUX_GPIO47__FUNC_SPI2_MI_A (MTK_PIN_NO(47) | 1)
+#define PINMUX_GPIO47__FUNC_SCP_SPI0_MI (MTK_PIN_NO(47) | 2)
+#define PINMUX_GPIO47__FUNC_DBG_MON_A22 (MTK_PIN_NO(47) | 7)
+
+#define PINMUX_GPIO48__FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
+#define PINMUX_GPIO48__FUNC_SPI3_CLK (MTK_PIN_NO(48) | 1)
+#define PINMUX_GPIO48__FUNC_TP_URXD1_AO (MTK_PIN_NO(48) | 2)
+#define PINMUX_GPIO48__FUNC_TP_URXD2_AO (MTK_PIN_NO(48) | 3)
+#define PINMUX_GPIO48__FUNC_URXD1 (MTK_PIN_NO(48) | 4)
+#define PINMUX_GPIO48__FUNC_I2S2_MCK (MTK_PIN_NO(48) | 5)
+#define PINMUX_GPIO48__FUNC_SCP_SPI0_CK (MTK_PIN_NO(48) | 6)
+
+#define PINMUX_GPIO49__FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
+#define PINMUX_GPIO49__FUNC_SPI3_CSB (MTK_PIN_NO(49) | 1)
+#define PINMUX_GPIO49__FUNC_TP_UTXD1_AO (MTK_PIN_NO(49) | 2)
+#define PINMUX_GPIO49__FUNC_TP_UTXD2_AO (MTK_PIN_NO(49) | 3)
+#define PINMUX_GPIO49__FUNC_UTXD1 (MTK_PIN_NO(49) | 4)
+#define PINMUX_GPIO49__FUNC_I2S2_BCK (MTK_PIN_NO(49) | 5)
+#define PINMUX_GPIO49__FUNC_SCP_SPI0_CS (MTK_PIN_NO(49) | 6)
+
+#define PINMUX_GPIO50__FUNC_GPIO50 (MTK_PIN_NO(50) | 0)
+#define PINMUX_GPIO50__FUNC_SPI3_MO (MTK_PIN_NO(50) | 1)
+#define PINMUX_GPIO50__FUNC_I2S2_LRCK (MTK_PIN_NO(50) | 5)
+#define PINMUX_GPIO50__FUNC_SCP_SPI0_MO (MTK_PIN_NO(50) | 6)
+
+#define PINMUX_GPIO51__FUNC_GPIO51 (MTK_PIN_NO(51) | 0)
+#define PINMUX_GPIO51__FUNC_SPI3_MI (MTK_PIN_NO(51) | 1)
+#define PINMUX_GPIO51__FUNC_I2S2_DI (MTK_PIN_NO(51) | 5)
+#define PINMUX_GPIO51__FUNC_SCP_SPI0_MI (MTK_PIN_NO(51) | 6)
+
+#define PINMUX_GPIO52__FUNC_GPIO52 (MTK_PIN_NO(52) | 0)
+#define PINMUX_GPIO52__FUNC_SPI5_CLK (MTK_PIN_NO(52) | 1)
+#define PINMUX_GPIO52__FUNC_I2S2_MCK (MTK_PIN_NO(52) | 2)
+#define PINMUX_GPIO52__FUNC_I2S1_MCK (MTK_PIN_NO(52) | 3)
+#define PINMUX_GPIO52__FUNC_SCP_SPI1_CK (MTK_PIN_NO(52) | 4)
+#define PINMUX_GPIO52__FUNC_LVTS_26M (MTK_PIN_NO(52) | 5)
+#define PINMUX_GPIO52__FUNC_DFD_TCK_XI (MTK_PIN_NO(52) | 6)
+#define PINMUX_GPIO52__FUNC_DBG_MON_B30 (MTK_PIN_NO(52) | 7)
+
+#define PINMUX_GPIO53__FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
+#define PINMUX_GPIO53__FUNC_SPI5_CSB (MTK_PIN_NO(53) | 1)
+#define PINMUX_GPIO53__FUNC_I2S2_BCK (MTK_PIN_NO(53) | 2)
+#define PINMUX_GPIO53__FUNC_I2S1_BCK (MTK_PIN_NO(53) | 3)
+#define PINMUX_GPIO53__FUNC_SCP_SPI1_CS (MTK_PIN_NO(53) | 4)
+#define PINMUX_GPIO53__FUNC_LVTS_FOUT (MTK_PIN_NO(53) | 5)
+#define PINMUX_GPIO53__FUNC_DFD_TDI (MTK_PIN_NO(53) | 6)
+#define PINMUX_GPIO53__FUNC_DBG_MON_B31 (MTK_PIN_NO(53) | 7)
+
+#define PINMUX_GPIO54__FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
+#define PINMUX_GPIO54__FUNC_SPI5_MO (MTK_PIN_NO(54) | 1)
+#define PINMUX_GPIO54__FUNC_I2S2_LRCK (MTK_PIN_NO(54) | 2)
+#define PINMUX_GPIO54__FUNC_I2S1_LRCK (MTK_PIN_NO(54) | 3)
+#define PINMUX_GPIO54__FUNC_SCP_SPI1_MO (MTK_PIN_NO(54) | 4)
+#define PINMUX_GPIO54__FUNC_LVTS_SCK (MTK_PIN_NO(54) | 5)
+#define PINMUX_GPIO54__FUNC_DFD_TDO (MTK_PIN_NO(54) | 6)
+#define PINMUX_GPIO54__FUNC_DBG_MON_A1 (MTK_PIN_NO(54) | 7)
+
+#define PINMUX_GPIO55__FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
+#define PINMUX_GPIO55__FUNC_SPI5_MI (MTK_PIN_NO(55) | 1)
+#define PINMUX_GPIO55__FUNC_I2S2_DI (MTK_PIN_NO(55) | 2)
+#define PINMUX_GPIO55__FUNC_I2S1_DO (MTK_PIN_NO(55) | 3)
+#define PINMUX_GPIO55__FUNC_SCP_SPI1_MI (MTK_PIN_NO(55) | 4)
+#define PINMUX_GPIO55__FUNC_LVTS_SDO (MTK_PIN_NO(55) | 5)
+#define PINMUX_GPIO55__FUNC_DFD_TMS (MTK_PIN_NO(55) | 6)
+#define PINMUX_GPIO55__FUNC_DBG_MON_B32 (MTK_PIN_NO(55) | 7)
+
+#define PINMUX_GPIO56__FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
+#define PINMUX_GPIO56__FUNC_I2S1_DO (MTK_PIN_NO(56) | 1)
+#define PINMUX_GPIO56__FUNC_I2S3_DO (MTK_PIN_NO(56) | 2)
+#define PINMUX_GPIO56__FUNC_DBG_MON_A23 (MTK_PIN_NO(56) | 7)
+
+#define PINMUX_GPIO57__FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
+#define PINMUX_GPIO57__FUNC_I2S1_BCK (MTK_PIN_NO(57) | 1)
+#define PINMUX_GPIO57__FUNC_I2S3_BCK (MTK_PIN_NO(57) | 2)
+#define PINMUX_GPIO57__FUNC_DBG_MON_A24 (MTK_PIN_NO(57) | 7)
+
+#define PINMUX_GPIO58__FUNC_GPIO58 (MTK_PIN_NO(58) | 0)
+#define PINMUX_GPIO58__FUNC_I2S1_LRCK (MTK_PIN_NO(58) | 1)
+#define PINMUX_GPIO58__FUNC_I2S3_LRCK (MTK_PIN_NO(58) | 2)
+#define PINMUX_GPIO58__FUNC_DBG_MON_A25 (MTK_PIN_NO(58) | 7)
+
+#define PINMUX_GPIO59__FUNC_GPIO59 (MTK_PIN_NO(59) | 0)
+#define PINMUX_GPIO59__FUNC_I2S1_MCK (MTK_PIN_NO(59) | 1)
+#define PINMUX_GPIO59__FUNC_I2S3_MCK (MTK_PIN_NO(59) | 2)
+#define PINMUX_GPIO59__FUNC_DBG_MON_A27 (MTK_PIN_NO(59) | 7)
+
+#define PINMUX_GPIO60__FUNC_GPIO60 (MTK_PIN_NO(60) | 0)
+#define PINMUX_GPIO60__FUNC_TDM_RX_LRCK (MTK_PIN_NO(60) | 1)
+#define PINMUX_GPIO60__FUNC_ANT_SEL3 (MTK_PIN_NO(60) | 2)
+#define PINMUX_GPIO60__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(60) | 5)
+
+#define PINMUX_GPIO61__FUNC_GPIO61 (MTK_PIN_NO(61) | 0)
+#define PINMUX_GPIO61__FUNC_TDM_RX_BCK (MTK_PIN_NO(61) | 1)
+#define PINMUX_GPIO61__FUNC_ANT_SEL4 (MTK_PIN_NO(61) | 2)
+#define PINMUX_GPIO61__FUNC_SPINOR_CK (MTK_PIN_NO(61) | 4)
+#define PINMUX_GPIO61__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(61) | 5)
+
+#define PINMUX_GPIO62__FUNC_GPIO62 (MTK_PIN_NO(62) | 0)
+#define PINMUX_GPIO62__FUNC_TDM_RX_MCK (MTK_PIN_NO(62) | 1)
+#define PINMUX_GPIO62__FUNC_ANT_SEL5 (MTK_PIN_NO(62) | 2)
+#define PINMUX_GPIO62__FUNC_SPINOR_CS (MTK_PIN_NO(62) | 4)
+#define PINMUX_GPIO62__FUNC_CONN_MCU_TDI (MTK_PIN_NO(62) | 5)
+
+#define PINMUX_GPIO63__FUNC_GPIO63 (MTK_PIN_NO(63) | 0)
+#define PINMUX_GPIO63__FUNC_TDM_RX_DATA0 (MTK_PIN_NO(63) | 1)
+#define PINMUX_GPIO63__FUNC_ANT_SEL6 (MTK_PIN_NO(63) | 2)
+#define PINMUX_GPIO63__FUNC_SPINOR_IO0 (MTK_PIN_NO(63) | 4)
+#define PINMUX_GPIO63__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(63) | 5)
+
+#define PINMUX_GPIO64__FUNC_GPIO64 (MTK_PIN_NO(64) | 0)
+#define PINMUX_GPIO64__FUNC_TDM_RX_DATA1 (MTK_PIN_NO(64) | 1)
+#define PINMUX_GPIO64__FUNC_ANT_SEL7 (MTK_PIN_NO(64) | 2)
+#define PINMUX_GPIO64__FUNC_PWM0 (MTK_PIN_NO(64) | 3)
+#define PINMUX_GPIO64__FUNC_SPINOR_IO1 (MTK_PIN_NO(64) | 4)
+#define PINMUX_GPIO64__FUNC_CONN_MCU_TCK (MTK_PIN_NO(64) | 5)
+
+#define PINMUX_GPIO65__FUNC_GPIO65 (MTK_PIN_NO(65) | 0)
+#define PINMUX_GPIO65__FUNC_TDM_RX_DATA2 (MTK_PIN_NO(65) | 1)
+#define PINMUX_GPIO65__FUNC_UCTS0 (MTK_PIN_NO(65) | 2)
+#define PINMUX_GPIO65__FUNC_PWM1 (MTK_PIN_NO(65) | 3)
+#define PINMUX_GPIO65__FUNC_SPINOR_IO2 (MTK_PIN_NO(65) | 4)
+#define PINMUX_GPIO65__FUNC_CONN_MCU_TDO (MTK_PIN_NO(65) | 5)
+#define PINMUX_GPIO65__FUNC_TP_UCTS1_AO (MTK_PIN_NO(65) | 6)
+#define PINMUX_GPIO65__FUNC_TP_UCTS2_AO (MTK_PIN_NO(65) | 7)
+
+#define PINMUX_GPIO66__FUNC_GPIO66 (MTK_PIN_NO(66) | 0)
+#define PINMUX_GPIO66__FUNC_TDM_RX_DATA3 (MTK_PIN_NO(66) | 1)
+#define PINMUX_GPIO66__FUNC_URTS0 (MTK_PIN_NO(66) | 2)
+#define PINMUX_GPIO66__FUNC_PWM2 (MTK_PIN_NO(66) | 3)
+#define PINMUX_GPIO66__FUNC_SPINOR_IO3 (MTK_PIN_NO(66) | 4)
+#define PINMUX_GPIO66__FUNC_CONN_MCU_TMS (MTK_PIN_NO(66) | 5)
+#define PINMUX_GPIO66__FUNC_TP_URTS1_AO (MTK_PIN_NO(66) | 6)
+#define PINMUX_GPIO66__FUNC_TP_URTS2_AO (MTK_PIN_NO(66) | 7)
+
+#define PINMUX_GPIO67__FUNC_GPIO67 (MTK_PIN_NO(67) | 0)
+#define PINMUX_GPIO67__FUNC_MSDC0_DSL (MTK_PIN_NO(67) | 1)
+
+#define PINMUX_GPIO68__FUNC_GPIO68 (MTK_PIN_NO(68) | 0)
+#define PINMUX_GPIO68__FUNC_MSDC0_CLK (MTK_PIN_NO(68) | 1)
+
+#define PINMUX_GPIO69__FUNC_GPIO69 (MTK_PIN_NO(69) | 0)
+#define PINMUX_GPIO69__FUNC_MSDC0_CMD (MTK_PIN_NO(69) | 1)
+
+#define PINMUX_GPIO70__FUNC_GPIO70 (MTK_PIN_NO(70) | 0)
+#define PINMUX_GPIO70__FUNC_MSDC0_RSTB (MTK_PIN_NO(70) | 1)
+
+#define PINMUX_GPIO71__FUNC_GPIO71 (MTK_PIN_NO(71) | 0)
+#define PINMUX_GPIO71__FUNC_MSDC0_DAT0 (MTK_PIN_NO(71) | 1)
+
+#define PINMUX_GPIO72__FUNC_GPIO72 (MTK_PIN_NO(72) | 0)
+#define PINMUX_GPIO72__FUNC_MSDC0_DAT1 (MTK_PIN_NO(72) | 1)
+
+#define PINMUX_GPIO73__FUNC_GPIO73 (MTK_PIN_NO(73) | 0)
+#define PINMUX_GPIO73__FUNC_MSDC0_DAT2 (MTK_PIN_NO(73) | 1)
+
+#define PINMUX_GPIO74__FUNC_GPIO74 (MTK_PIN_NO(74) | 0)
+#define PINMUX_GPIO74__FUNC_MSDC0_DAT3 (MTK_PIN_NO(74) | 1)
+
+#define PINMUX_GPIO75__FUNC_GPIO75 (MTK_PIN_NO(75) | 0)
+#define PINMUX_GPIO75__FUNC_MSDC0_DAT4 (MTK_PIN_NO(75) | 1)
+
+#define PINMUX_GPIO76__FUNC_GPIO76 (MTK_PIN_NO(76) | 0)
+#define PINMUX_GPIO76__FUNC_MSDC0_DAT5 (MTK_PIN_NO(76) | 1)
+
+#define PINMUX_GPIO77__FUNC_GPIO77 (MTK_PIN_NO(77) | 0)
+#define PINMUX_GPIO77__FUNC_MSDC0_DAT6 (MTK_PIN_NO(77) | 1)
+
+#define PINMUX_GPIO78__FUNC_GPIO78 (MTK_PIN_NO(78) | 0)
+#define PINMUX_GPIO78__FUNC_MSDC0_DAT7 (MTK_PIN_NO(78) | 1)
+
+#define PINMUX_GPIO79__FUNC_GPIO79 (MTK_PIN_NO(79) | 0)
+#define PINMUX_GPIO79__FUNC_KPCOL0 (MTK_PIN_NO(79) | 1)
+
+#define PINMUX_GPIO80__FUNC_GPIO80 (MTK_PIN_NO(80) | 0)
+#define PINMUX_GPIO80__FUNC_KPCOL1 (MTK_PIN_NO(80) | 1)
+#define PINMUX_GPIO80__FUNC_GPS_L1_ELNA_EN (MTK_PIN_NO(80) | 2)
+#define PINMUX_GPIO80__FUNC_PWM0 (MTK_PIN_NO(80) | 3)
+#define PINMUX_GPIO80__FUNC_CLKM0 (MTK_PIN_NO(80) | 4)
+
+#define PINMUX_GPIO81__FUNC_GPIO81 (MTK_PIN_NO(81) | 0)
+#define PINMUX_GPIO81__FUNC_KPROW0 (MTK_PIN_NO(81) | 1)
+#define PINMUX_GPIO81__FUNC_PWM1 (MTK_PIN_NO(81) | 3)
+#define PINMUX_GPIO81__FUNC_CLKM1 (MTK_PIN_NO(81) | 4)
+
+#define PINMUX_GPIO82__FUNC_GPIO82 (MTK_PIN_NO(82) | 0)
+#define PINMUX_GPIO82__FUNC_KPROW1 (MTK_PIN_NO(82) | 1)
+#define PINMUX_GPIO82__FUNC_PWM2 (MTK_PIN_NO(82) | 3)
+#define PINMUX_GPIO82__FUNC_CLKM2 (MTK_PIN_NO(82) | 4)
+
+#define PINMUX_GPIO83__FUNC_GPIO83 (MTK_PIN_NO(83) | 0)
+#define PINMUX_GPIO83__FUNC_AP_GOOD (MTK_PIN_NO(83) | 1)
+#define PINMUX_GPIO83__FUNC_GPS_PPS (MTK_PIN_NO(83) | 2)
+#define PINMUX_GPIO83__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(83) | 4)
+#define PINMUX_GPIO83__FUNC_DBG_MON_A28 (MTK_PIN_NO(83) | 7)
+
+#define PINMUX_GPIO84__FUNC_GPIO84 (MTK_PIN_NO(84) | 0)
+#define PINMUX_GPIO84__FUNC_MSDC1_CLK (MTK_PIN_NO(84) | 1)
+#define PINMUX_GPIO84__FUNC_ADSP_JTAG_TCK (MTK_PIN_NO(84) | 2)
+#define PINMUX_GPIO84__FUNC_UDI_TCK (MTK_PIN_NO(84) | 4)
+#define PINMUX_GPIO84__FUNC_CONN_DSP_JCK (MTK_PIN_NO(84) | 5)
+#define PINMUX_GPIO84__FUNC_SSPM_JTAG_TCK (MTK_PIN_NO(84) | 6)
+#define PINMUX_GPIO84__FUNC_DFD_TCK_XI (MTK_PIN_NO(84) | 7)
+
+#define PINMUX_GPIO85__FUNC_GPIO85 (MTK_PIN_NO(85) | 0)
+#define PINMUX_GPIO85__FUNC_MSDC1_CMD (MTK_PIN_NO(85) | 1)
+#define PINMUX_GPIO85__FUNC_ADSP_JTAG_TMS (MTK_PIN_NO(85) | 2)
+#define PINMUX_GPIO85__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(85) | 3)
+#define PINMUX_GPIO85__FUNC_UDI_TMS (MTK_PIN_NO(85) | 4)
+#define PINMUX_GPIO85__FUNC_CONN_DSP_JMS (MTK_PIN_NO(85) | 5)
+#define PINMUX_GPIO85__FUNC_SSPM_JTAG_TMS (MTK_PIN_NO(85) | 6)
+#define PINMUX_GPIO85__FUNC_DFD_TMS (MTK_PIN_NO(85) | 7)
+
+#define PINMUX_GPIO86__FUNC_GPIO86 (MTK_PIN_NO(86) | 0)
+#define PINMUX_GPIO86__FUNC_MSDC1_DAT0 (MTK_PIN_NO(86) | 1)
+#define PINMUX_GPIO86__FUNC_ADSP_JTAG_TDI (MTK_PIN_NO(86) | 2)
+#define PINMUX_GPIO86__FUNC_UDI_TDI (MTK_PIN_NO(86) | 4)
+#define PINMUX_GPIO86__FUNC_CONN_DSP_JDI (MTK_PIN_NO(86) | 5)
+#define PINMUX_GPIO86__FUNC_SSPM_JTAG_TDI (MTK_PIN_NO(86) | 6)
+#define PINMUX_GPIO86__FUNC_DFD_TDI (MTK_PIN_NO(86) | 7)
+
+#define PINMUX_GPIO87__FUNC_GPIO87 (MTK_PIN_NO(87) | 0)
+#define PINMUX_GPIO87__FUNC_MSDC1_DAT1 (MTK_PIN_NO(87) | 1)
+#define PINMUX_GPIO87__FUNC_ADSP_JTAG_TDO (MTK_PIN_NO(87) | 2)
+#define PINMUX_GPIO87__FUNC_UDI_TDO (MTK_PIN_NO(87) | 4)
+#define PINMUX_GPIO87__FUNC_CONN_DSP_JDO (MTK_PIN_NO(87) | 5)
+#define PINMUX_GPIO87__FUNC_SSPM_JTAG_TDO (MTK_PIN_NO(87) | 6)
+#define PINMUX_GPIO87__FUNC_DFD_TDO (MTK_PIN_NO(87) | 7)
+
+#define PINMUX_GPIO88__FUNC_GPIO88 (MTK_PIN_NO(88) | 0)
+#define PINMUX_GPIO88__FUNC_MSDC1_DAT2 (MTK_PIN_NO(88) | 1)
+#define PINMUX_GPIO88__FUNC_ADSP_JTAG_TRSTN (MTK_PIN_NO(88) | 2)
+#define PINMUX_GPIO88__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(88) | 3)
+#define PINMUX_GPIO88__FUNC_UDI_NTRST (MTK_PIN_NO(88) | 4)
+#define PINMUX_GPIO88__FUNC_CONN_WIFI_TXD (MTK_PIN_NO(88) | 5)
+#define PINMUX_GPIO88__FUNC_SSPM_JTAG_TRSTN (MTK_PIN_NO(88) | 6)
+
+#define PINMUX_GPIO89__FUNC_GPIO89 (MTK_PIN_NO(89) | 0)
+#define PINMUX_GPIO89__FUNC_MSDC1_DAT3 (MTK_PIN_NO(89) | 1)
+#define PINMUX_GPIO89__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(89) | 5)
+
+#define PINMUX_GPIO90__FUNC_GPIO90 (MTK_PIN_NO(90) | 0)
+#define PINMUX_GPIO90__FUNC_IDDIG_P0 (MTK_PIN_NO(90) | 1)
+#define PINMUX_GPIO90__FUNC_PGD_HV_HSC_PWR4 (MTK_PIN_NO(90) | 4)
+#define PINMUX_GPIO90__FUNC_GDU_SUM_TROOP2_2 (MTK_PIN_NO(90) | 5)
+
+#define PINMUX_GPIO91__FUNC_GPIO91 (MTK_PIN_NO(91) | 0)
+#define PINMUX_GPIO91__FUNC_USB_DRVVBUS_P0 (MTK_PIN_NO(91) | 1)
+#define PINMUX_GPIO91__FUNC_PGD_HV_HSC_PWR5 (MTK_PIN_NO(91) | 4)
+#define PINMUX_GPIO91__FUNC_GDU_TROOPS_DET0 (MTK_PIN_NO(91) | 5)
+
+#define PINMUX_GPIO92__FUNC_GPIO92 (MTK_PIN_NO(92) | 0)
+#define PINMUX_GPIO92__FUNC_VBUS_VALID_P0 (MTK_PIN_NO(92) | 1)
+#define PINMUX_GPIO92__FUNC_PGD_DA_EFUSE_RDY (MTK_PIN_NO(92) | 4)
+#define PINMUX_GPIO92__FUNC_GDU_TROOPS_DET1 (MTK_PIN_NO(92) | 5)
+
+#define PINMUX_GPIO93__FUNC_GPIO93 (MTK_PIN_NO(93) | 0)
+#define PINMUX_GPIO93__FUNC_IDDIG_P1 (MTK_PIN_NO(93) | 1)
+#define PINMUX_GPIO93__FUNC_PWM0 (MTK_PIN_NO(93) | 2)
+#define PINMUX_GPIO93__FUNC_CLKM0 (MTK_PIN_NO(93) | 3)
+#define PINMUX_GPIO93__FUNC_PGD_DA_EFUSE_RDY_PRE (MTK_PIN_NO(93) | 4)
+#define PINMUX_GPIO93__FUNC_GDU_TROOPS_DET2 (MTK_PIN_NO(93) | 5)
+
+#define PINMUX_GPIO94__FUNC_GPIO94 (MTK_PIN_NO(94) | 0)
+#define PINMUX_GPIO94__FUNC_USB_DRVVBUS_P1 (MTK_PIN_NO(94) | 1)
+#define PINMUX_GPIO94__FUNC_PWM1 (MTK_PIN_NO(94) | 2)
+#define PINMUX_GPIO94__FUNC_CLKM1 (MTK_PIN_NO(94) | 3)
+#define PINMUX_GPIO94__FUNC_PGD_DA_PWRGD_RESET (MTK_PIN_NO(94) | 4)
+
+#define PINMUX_GPIO95__FUNC_GPIO95 (MTK_PIN_NO(95) | 0)
+#define PINMUX_GPIO95__FUNC_VBUS_VALID_P1 (MTK_PIN_NO(95) | 1)
+#define PINMUX_GPIO95__FUNC_PWM2 (MTK_PIN_NO(95) | 2)
+#define PINMUX_GPIO95__FUNC_CLKM2 (MTK_PIN_NO(95) | 3)
+#define PINMUX_GPIO95__FUNC_PGD_DA_PWRGD_ENB (MTK_PIN_NO(95) | 4)
+
+#define PINMUX_GPIO96__FUNC_GPIO96 (MTK_PIN_NO(96) | 0)
+#define PINMUX_GPIO96__FUNC_DSI_TE (MTK_PIN_NO(96) | 1)
+#define PINMUX_GPIO96__FUNC_DBG_MON_A29 (MTK_PIN_NO(96) | 7)
+
+#define PINMUX_GPIO97__FUNC_GPIO97 (MTK_PIN_NO(97) | 0)
+#define PINMUX_GPIO97__FUNC_DISP_PWM (MTK_PIN_NO(97) | 1)
+#define PINMUX_GPIO97__FUNC_DBG_MON_A30 (MTK_PIN_NO(97) | 7)
+
+#define PINMUX_GPIO98__FUNC_GPIO98 (MTK_PIN_NO(98) | 0)
+#define PINMUX_GPIO98__FUNC_LCM_RST (MTK_PIN_NO(98) | 1)
+
+#define PINMUX_GPIO99__FUNC_GPIO99 (MTK_PIN_NO(99) | 0)
+#define PINMUX_GPIO99__FUNC_DPI_PCLK (MTK_PIN_NO(99) | 1)
+#define PINMUX_GPIO99__FUNC_GPS_L1_ELNA_EN (MTK_PIN_NO(99) | 2)
+#define PINMUX_GPIO99__FUNC_SSPM_JTAG_TCK (MTK_PIN_NO(99) | 3)
+#define PINMUX_GPIO99__FUNC_ANT_SEL0 (MTK_PIN_NO(99) | 5)
+#define PINMUX_GPIO99__FUNC_TP_GPIO0_AO (MTK_PIN_NO(99) | 6)
+#define PINMUX_GPIO99__FUNC_PGD_LV_LSC_PWR0 (MTK_PIN_NO(99) | 7)
+
+#define PINMUX_GPIO100__FUNC_GPIO100 (MTK_PIN_NO(100) | 0)
+#define PINMUX_GPIO100__FUNC_DPI_VSYNC (MTK_PIN_NO(100) | 1)
+#define PINMUX_GPIO100__FUNC_KPCOL2 (MTK_PIN_NO(100) | 2)
+#define PINMUX_GPIO100__FUNC_SSPM_JTAG_TMS (MTK_PIN_NO(100) | 3)
+#define PINMUX_GPIO100__FUNC_ANT_SEL1 (MTK_PIN_NO(100) | 5)
+#define PINMUX_GPIO100__FUNC_TP_GPIO1_AO (MTK_PIN_NO(100) | 6)
+#define PINMUX_GPIO100__FUNC_PGD_LV_LSC_PWR1 (MTK_PIN_NO(100) | 7)
+
+#define PINMUX_GPIO101__FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
+#define PINMUX_GPIO101__FUNC_DPI_HSYNC (MTK_PIN_NO(101) | 1)
+#define PINMUX_GPIO101__FUNC_KPROW2 (MTK_PIN_NO(101) | 2)
+#define PINMUX_GPIO101__FUNC_SSPM_JTAG_TDI (MTK_PIN_NO(101) | 3)
+#define PINMUX_GPIO101__FUNC_ANT_SEL2 (MTK_PIN_NO(101) | 5)
+#define PINMUX_GPIO101__FUNC_TP_GPIO2_AO (MTK_PIN_NO(101) | 6)
+#define PINMUX_GPIO101__FUNC_PGD_LV_LSC_PWR2 (MTK_PIN_NO(101) | 7)
+
+#define PINMUX_GPIO102__FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
+#define PINMUX_GPIO102__FUNC_DPI_DE (MTK_PIN_NO(102) | 1)
+#define PINMUX_GPIO102__FUNC_SSPM_JTAG_TDO (MTK_PIN_NO(102) | 3)
+#define PINMUX_GPIO102__FUNC_ANT_SEL3 (MTK_PIN_NO(102) | 5)
+#define PINMUX_GPIO102__FUNC_TP_GPIO3_AO (MTK_PIN_NO(102) | 6)
+#define PINMUX_GPIO102__FUNC_PGD_LV_LSC_PWR3 (MTK_PIN_NO(102) | 7)
+
+#define PINMUX_GPIO103__FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
+#define PINMUX_GPIO103__FUNC_DPI_DATA0 (MTK_PIN_NO(103) | 1)
+#define PINMUX_GPIO103__FUNC_SSPM_JTAG_TRSTN (MTK_PIN_NO(103) | 3)
+#define PINMUX_GPIO103__FUNC_CLKM0 (MTK_PIN_NO(103) | 4)
+#define PINMUX_GPIO103__FUNC_ANT_SEL4 (MTK_PIN_NO(103) | 5)
+#define PINMUX_GPIO103__FUNC_TP_GPIO4_AO (MTK_PIN_NO(103) | 6)
+#define PINMUX_GPIO103__FUNC_PGD_LV_LSC_PWR4 (MTK_PIN_NO(103) | 7)
+
+#define PINMUX_GPIO104__FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
+#define PINMUX_GPIO104__FUNC_DPI_DATA1 (MTK_PIN_NO(104) | 1)
+#define PINMUX_GPIO104__FUNC_GPS_PPS (MTK_PIN_NO(104) | 2)
+#define PINMUX_GPIO104__FUNC_UCTS2 (MTK_PIN_NO(104) | 3)
+#define PINMUX_GPIO104__FUNC_CLKM1 (MTK_PIN_NO(104) | 4)
+#define PINMUX_GPIO104__FUNC_ANT_SEL5 (MTK_PIN_NO(104) | 5)
+#define PINMUX_GPIO104__FUNC_TP_GPIO5_AO (MTK_PIN_NO(104) | 6)
+#define PINMUX_GPIO104__FUNC_PGD_LV_LSC_PWR5 (MTK_PIN_NO(104) | 7)
+
+#define PINMUX_GPIO105__FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
+#define PINMUX_GPIO105__FUNC_DPI_DATA2 (MTK_PIN_NO(105) | 1)
+#define PINMUX_GPIO105__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(105) | 2)
+#define PINMUX_GPIO105__FUNC_URTS2 (MTK_PIN_NO(105) | 3)
+#define PINMUX_GPIO105__FUNC_CLKM2 (MTK_PIN_NO(105) | 4)
+#define PINMUX_GPIO105__FUNC_ANT_SEL6 (MTK_PIN_NO(105) | 5)
+#define PINMUX_GPIO105__FUNC_TP_GPIO6_AO (MTK_PIN_NO(105) | 6)
+#define PINMUX_GPIO105__FUNC_PGD_LV_HSC_PWR0 (MTK_PIN_NO(105) | 7)
+
+#define PINMUX_GPIO106__FUNC_GPIO106 (MTK_PIN_NO(106) | 0)
+#define PINMUX_GPIO106__FUNC_DPI_DATA3 (MTK_PIN_NO(106) | 1)
+#define PINMUX_GPIO106__FUNC_TP_UTXD1_AO (MTK_PIN_NO(106) | 2)
+#define PINMUX_GPIO106__FUNC_UTXD2 (MTK_PIN_NO(106) | 3)
+#define PINMUX_GPIO106__FUNC_PWM0 (MTK_PIN_NO(106) | 4)
+#define PINMUX_GPIO106__FUNC_ANT_SEL7 (MTK_PIN_NO(106) | 5)
+#define PINMUX_GPIO106__FUNC_TP_GPIO7_AO (MTK_PIN_NO(106) | 6)
+#define PINMUX_GPIO106__FUNC_PGD_LV_HSC_PWR1 (MTK_PIN_NO(106) | 7)
+
+#define PINMUX_GPIO107__FUNC_GPIO107 (MTK_PIN_NO(107) | 0)
+#define PINMUX_GPIO107__FUNC_DPI_DATA4 (MTK_PIN_NO(107) | 1)
+#define PINMUX_GPIO107__FUNC_TP_URXD1_AO (MTK_PIN_NO(107) | 2)
+#define PINMUX_GPIO107__FUNC_URXD2 (MTK_PIN_NO(107) | 3)
+#define PINMUX_GPIO107__FUNC_PWM1 (MTK_PIN_NO(107) | 4)
+#define PINMUX_GPIO107__FUNC_GDU_SUM_TROOP0_0 (MTK_PIN_NO(107) | 6)
+#define PINMUX_GPIO107__FUNC_PGD_LV_HSC_PWR2 (MTK_PIN_NO(107) | 7)
+
+#define PINMUX_GPIO108__FUNC_GPIO108 (MTK_PIN_NO(108) | 0)
+#define PINMUX_GPIO108__FUNC_DPI_DATA5 (MTK_PIN_NO(108) | 1)
+#define PINMUX_GPIO108__FUNC_TP_UCTS1_AO (MTK_PIN_NO(108) | 2)
+#define PINMUX_GPIO108__FUNC_UCTS0 (MTK_PIN_NO(108) | 3)
+#define PINMUX_GPIO108__FUNC_PWM2 (MTK_PIN_NO(108) | 4)
+#define PINMUX_GPIO108__FUNC_GDU_SUM_TROOP0_1 (MTK_PIN_NO(108) | 6)
+#define PINMUX_GPIO108__FUNC_PGD_LV_HSC_PWR3 (MTK_PIN_NO(108) | 7)
+
+#define PINMUX_GPIO109__FUNC_GPIO109 (MTK_PIN_NO(109) | 0)
+#define PINMUX_GPIO109__FUNC_DPI_DATA6 (MTK_PIN_NO(109) | 1)
+#define PINMUX_GPIO109__FUNC_TP_URTS1_AO (MTK_PIN_NO(109) | 2)
+#define PINMUX_GPIO109__FUNC_URTS0 (MTK_PIN_NO(109) | 3)
+#define PINMUX_GPIO109__FUNC_I2S0_DI (MTK_PIN_NO(109) | 4)
+#define PINMUX_GPIO109__FUNC_I2S2_DI (MTK_PIN_NO(109) | 5)
+#define PINMUX_GPIO109__FUNC_GDU_SUM_TROOP0_2 (MTK_PIN_NO(109) | 6)
+#define PINMUX_GPIO109__FUNC_PGD_LV_HSC_PWR4 (MTK_PIN_NO(109) | 7)
+
+#define PINMUX_GPIO110__FUNC_GPIO110 (MTK_PIN_NO(110) | 0)
+#define PINMUX_GPIO110__FUNC_DPI_DATA7 (MTK_PIN_NO(110) | 1)
+#define PINMUX_GPIO110__FUNC_TP_UCTS2_AO (MTK_PIN_NO(110) | 2)
+#define PINMUX_GPIO110__FUNC_UCTS1 (MTK_PIN_NO(110) | 3)
+#define PINMUX_GPIO110__FUNC_I2S3_BCK (MTK_PIN_NO(110) | 4)
+#define PINMUX_GPIO110__FUNC_I2S1_BCK (MTK_PIN_NO(110) | 5)
+#define PINMUX_GPIO110__FUNC_GDU_SUM_TROOP1_0 (MTK_PIN_NO(110) | 6)
+#define PINMUX_GPIO110__FUNC_PGD_LV_HSC_PWR5 (MTK_PIN_NO(110) | 7)
+
+#define PINMUX_GPIO111__FUNC_GPIO111 (MTK_PIN_NO(111) | 0)
+#define PINMUX_GPIO111__FUNC_DPI_DATA8 (MTK_PIN_NO(111) | 1)
+#define PINMUX_GPIO111__FUNC_TP_URTS2_AO (MTK_PIN_NO(111) | 2)
+#define PINMUX_GPIO111__FUNC_URTS1 (MTK_PIN_NO(111) | 3)
+#define PINMUX_GPIO111__FUNC_I2S3_MCK (MTK_PIN_NO(111) | 4)
+#define PINMUX_GPIO111__FUNC_I2S1_MCK (MTK_PIN_NO(111) | 5)
+#define PINMUX_GPIO111__FUNC_GDU_SUM_TROOP1_1 (MTK_PIN_NO(111) | 6)
+#define PINMUX_GPIO111__FUNC_PGD_HV_HSC_PWR0 (MTK_PIN_NO(111) | 7)
+
+#define PINMUX_GPIO112__FUNC_GPIO112 (MTK_PIN_NO(112) | 0)
+#define PINMUX_GPIO112__FUNC_DPI_DATA9 (MTK_PIN_NO(112) | 1)
+#define PINMUX_GPIO112__FUNC_TP_URXD2_AO (MTK_PIN_NO(112) | 2)
+#define PINMUX_GPIO112__FUNC_URXD1 (MTK_PIN_NO(112) | 3)
+#define PINMUX_GPIO112__FUNC_I2S3_LRCK (MTK_PIN_NO(112) | 4)
+#define PINMUX_GPIO112__FUNC_I2S1_LRCK (MTK_PIN_NO(112) | 5)
+#define PINMUX_GPIO112__FUNC_GDU_SUM_TROOP1_2 (MTK_PIN_NO(112) | 6)
+#define PINMUX_GPIO112__FUNC_PGD_HV_HSC_PWR1 (MTK_PIN_NO(112) | 7)
+
+#define PINMUX_GPIO113__FUNC_GPIO113 (MTK_PIN_NO(113) | 0)
+#define PINMUX_GPIO113__FUNC_DPI_DATA10 (MTK_PIN_NO(113) | 1)
+#define PINMUX_GPIO113__FUNC_TP_UTXD2_AO (MTK_PIN_NO(113) | 2)
+#define PINMUX_GPIO113__FUNC_UTXD1 (MTK_PIN_NO(113) | 3)
+#define PINMUX_GPIO113__FUNC_I2S3_DO (MTK_PIN_NO(113) | 4)
+#define PINMUX_GPIO113__FUNC_I2S1_DO (MTK_PIN_NO(113) | 5)
+#define PINMUX_GPIO113__FUNC_GDU_SUM_TROOP2_0 (MTK_PIN_NO(113) | 6)
+#define PINMUX_GPIO113__FUNC_PGD_HV_HSC_PWR2 (MTK_PIN_NO(113) | 7)
+
+#define PINMUX_GPIO114__FUNC_GPIO114 (MTK_PIN_NO(114) | 0)
+#define PINMUX_GPIO114__FUNC_DPI_DATA11 (MTK_PIN_NO(114) | 1)
+#define PINMUX_GPIO114__FUNC_GDU_SUM_TROOP2_1 (MTK_PIN_NO(114) | 6)
+#define PINMUX_GPIO114__FUNC_PGD_HV_HSC_PWR3 (MTK_PIN_NO(114) | 7)
+
+#define PINMUX_GPIO115__FUNC_GPIO115 (MTK_PIN_NO(115) | 0)
+#define PINMUX_GPIO115__FUNC_PCM_CLK (MTK_PIN_NO(115) | 1)
+#define PINMUX_GPIO115__FUNC_I2S0_BCK (MTK_PIN_NO(115) | 2)
+#define PINMUX_GPIO115__FUNC_I2S2_BCK (MTK_PIN_NO(115) | 3)
+
+#define PINMUX_GPIO116__FUNC_GPIO116 (MTK_PIN_NO(116) | 0)
+#define PINMUX_GPIO116__FUNC_PCM_SYNC (MTK_PIN_NO(116) | 1)
+#define PINMUX_GPIO116__FUNC_I2S0_LRCK (MTK_PIN_NO(116) | 2)
+#define PINMUX_GPIO116__FUNC_I2S2_LRCK (MTK_PIN_NO(116) | 3)
+
+#define PINMUX_GPIO117__FUNC_GPIO117 (MTK_PIN_NO(117) | 0)
+#define PINMUX_GPIO117__FUNC_PCM_DI (MTK_PIN_NO(117) | 1)
+#define PINMUX_GPIO117__FUNC_I2S0_DI (MTK_PIN_NO(117) | 2)
+#define PINMUX_GPIO117__FUNC_I2S2_DI (MTK_PIN_NO(117) | 3)
+
+#define PINMUX_GPIO118__FUNC_GPIO118 (MTK_PIN_NO(118) | 0)
+#define PINMUX_GPIO118__FUNC_PCM_DO (MTK_PIN_NO(118) | 1)
+#define PINMUX_GPIO118__FUNC_I2S0_MCK (MTK_PIN_NO(118) | 2)
+#define PINMUX_GPIO118__FUNC_I2S2_MCK (MTK_PIN_NO(118) | 3)
+#define PINMUX_GPIO118__FUNC_I2S3_DO (MTK_PIN_NO(118) | 4)
+#define PINMUX_GPIO118__FUNC_I2S1_DO (MTK_PIN_NO(118) | 5)
+
+#define PINMUX_GPIO119__FUNC_GPIO119 (MTK_PIN_NO(119) | 0)
+#define PINMUX_GPIO119__FUNC_JTMS_SEL1 (MTK_PIN_NO(119) | 1)
+#define PINMUX_GPIO119__FUNC_UDI_TMS (MTK_PIN_NO(119) | 2)
+#define PINMUX_GPIO119__FUNC_DFD_TMS (MTK_PIN_NO(119) | 3)
+#define PINMUX_GPIO119__FUNC_SPM_JTAG_TMS (MTK_PIN_NO(119) | 4)
+#define PINMUX_GPIO119__FUNC_SCP_JTAG_TMS (MTK_PIN_NO(119) | 5)
+#define PINMUX_GPIO119__FUNC_ADSP_JTAG_TMS (MTK_PIN_NO(119) | 6)
+
+#define PINMUX_GPIO120__FUNC_GPIO120 (MTK_PIN_NO(120) | 0)
+#define PINMUX_GPIO120__FUNC_JTCK_SEL1 (MTK_PIN_NO(120) | 1)
+#define PINMUX_GPIO120__FUNC_UDI_TCK (MTK_PIN_NO(120) | 2)
+#define PINMUX_GPIO120__FUNC_DFD_TCK_XI (MTK_PIN_NO(120) | 3)
+#define PINMUX_GPIO120__FUNC_SPM_JTAG_TCK (MTK_PIN_NO(120) | 4)
+#define PINMUX_GPIO120__FUNC_SCP_JTAG_TCK (MTK_PIN_NO(120) | 5)
+#define PINMUX_GPIO120__FUNC_ADSP_JTAG_TCK (MTK_PIN_NO(120) | 6)
+
+#define PINMUX_GPIO121__FUNC_GPIO121 (MTK_PIN_NO(121) | 0)
+#define PINMUX_GPIO121__FUNC_JTDI_SEL1 (MTK_PIN_NO(121) | 1)
+#define PINMUX_GPIO121__FUNC_UDI_TDI (MTK_PIN_NO(121) | 2)
+#define PINMUX_GPIO121__FUNC_DFD_TDI (MTK_PIN_NO(121) | 3)
+#define PINMUX_GPIO121__FUNC_SPM_JTAG_TDI (MTK_PIN_NO(121) | 4)
+#define PINMUX_GPIO121__FUNC_SCP_JTAG_TDI (MTK_PIN_NO(121) | 5)
+#define PINMUX_GPIO121__FUNC_ADSP_JTAG_TDI (MTK_PIN_NO(121) | 6)
+
+#define PINMUX_GPIO122__FUNC_GPIO122 (MTK_PIN_NO(122) | 0)
+#define PINMUX_GPIO122__FUNC_JTDO_SEL1 (MTK_PIN_NO(122) | 1)
+#define PINMUX_GPIO122__FUNC_UDI_TDO (MTK_PIN_NO(122) | 2)
+#define PINMUX_GPIO122__FUNC_DFD_TDO (MTK_PIN_NO(122) | 3)
+#define PINMUX_GPIO122__FUNC_SPM_JTAG_TDO (MTK_PIN_NO(122) | 4)
+#define PINMUX_GPIO122__FUNC_SCP_JTAG_TDO (MTK_PIN_NO(122) | 5)
+#define PINMUX_GPIO122__FUNC_ADSP_JTAG_TDO (MTK_PIN_NO(122) | 6)
+
+#define PINMUX_GPIO123__FUNC_GPIO123 (MTK_PIN_NO(123) | 0)
+#define PINMUX_GPIO123__FUNC_JTRSTN_SEL1 (MTK_PIN_NO(123) | 1)
+#define PINMUX_GPIO123__FUNC_UDI_NTRST (MTK_PIN_NO(123) | 2)
+#define PINMUX_GPIO123__FUNC_SPM_JTAG_TRSTN (MTK_PIN_NO(123) | 4)
+#define PINMUX_GPIO123__FUNC_SCP_JTAG_TRSTN (MTK_PIN_NO(123) | 5)
+#define PINMUX_GPIO123__FUNC_ADSP_JTAG_TRSTN (MTK_PIN_NO(123) | 6)
+
+#define PINMUX_GPIO124__FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
+#define PINMUX_GPIO124__FUNC_CMMCLK0 (MTK_PIN_NO(124) | 1)
+#define PINMUX_GPIO124__FUNC_CLKM0 (MTK_PIN_NO(124) | 2)
+#define PINMUX_GPIO124__FUNC_PWM0 (MTK_PIN_NO(124) | 3)
+
+#define PINMUX_GPIO125__FUNC_GPIO125 (MTK_PIN_NO(125) | 0)
+#define PINMUX_GPIO125__FUNC_CMMCLK1 (MTK_PIN_NO(125) | 1)
+#define PINMUX_GPIO125__FUNC_CLKM1 (MTK_PIN_NO(125) | 2)
+#define PINMUX_GPIO125__FUNC_PWM1 (MTK_PIN_NO(125) | 3)
+#define PINMUX_GPIO125__FUNC_DBG_MON_B0 (MTK_PIN_NO(125) | 7)
+
+#define PINMUX_GPIO126__FUNC_GPIO126 (MTK_PIN_NO(126) | 0)
+#define PINMUX_GPIO126__FUNC_CMMCLK2 (MTK_PIN_NO(126) | 1)
+#define PINMUX_GPIO126__FUNC_CLKM2 (MTK_PIN_NO(126) | 2)
+#define PINMUX_GPIO126__FUNC_PWM2 (MTK_PIN_NO(126) | 3)
+#define PINMUX_GPIO126__FUNC_DBG_MON_B1 (MTK_PIN_NO(126) | 7)
+
+#define PINMUX_GPIO127__FUNC_GPIO127 (MTK_PIN_NO(127) | 0)
+#define PINMUX_GPIO127__FUNC_SCL0 (MTK_PIN_NO(127) | 1)
+#define PINMUX_GPIO127__FUNC_SCP_SCL0 (MTK_PIN_NO(127) | 4)
+#define PINMUX_GPIO127__FUNC_SCP_SCL1 (MTK_PIN_NO(127) | 5)
+
+#define PINMUX_GPIO128__FUNC_GPIO128 (MTK_PIN_NO(128) | 0)
+#define PINMUX_GPIO128__FUNC_SDA0 (MTK_PIN_NO(128) | 1)
+#define PINMUX_GPIO128__FUNC_SCP_SDA0 (MTK_PIN_NO(128) | 4)
+#define PINMUX_GPIO128__FUNC_SCP_SDA1 (MTK_PIN_NO(128) | 5)
+
+#define PINMUX_GPIO129__FUNC_GPIO129 (MTK_PIN_NO(129) | 0)
+#define PINMUX_GPIO129__FUNC_SCL1 (MTK_PIN_NO(129) | 1)
+#define PINMUX_GPIO129__FUNC_SCP_SCL0 (MTK_PIN_NO(129) | 4)
+#define PINMUX_GPIO129__FUNC_SCP_SCL1 (MTK_PIN_NO(129) | 5)
+#define PINMUX_GPIO129__FUNC_DBG_MON_B4 (MTK_PIN_NO(129) | 7)
+
+#define PINMUX_GPIO130__FUNC_GPIO130 (MTK_PIN_NO(130) | 0)
+#define PINMUX_GPIO130__FUNC_SDA1 (MTK_PIN_NO(130) | 1)
+#define PINMUX_GPIO130__FUNC_SCP_SDA0 (MTK_PIN_NO(130) | 4)
+#define PINMUX_GPIO130__FUNC_SCP_SDA1 (MTK_PIN_NO(130) | 5)
+#define PINMUX_GPIO130__FUNC_DBG_MON_B5 (MTK_PIN_NO(130) | 7)
+
+#define PINMUX_GPIO131__FUNC_GPIO131 (MTK_PIN_NO(131) | 0)
+#define PINMUX_GPIO131__FUNC_SCL2 (MTK_PIN_NO(131) | 1)
+#define PINMUX_GPIO131__FUNC_SSPM_UTXD_AO (MTK_PIN_NO(131) | 2)
+#define PINMUX_GPIO131__FUNC_CONN_UART0_TXD (MTK_PIN_NO(131) | 3)
+#define PINMUX_GPIO131__FUNC_SCP_SCL0 (MTK_PIN_NO(131) | 4)
+#define PINMUX_GPIO131__FUNC_SCP_SCL1 (MTK_PIN_NO(131) | 5)
+#define PINMUX_GPIO131__FUNC_DBG_MON_B6 (MTK_PIN_NO(131) | 7)
+
+#define PINMUX_GPIO132__FUNC_GPIO132 (MTK_PIN_NO(132) | 0)
+#define PINMUX_GPIO132__FUNC_SDA2 (MTK_PIN_NO(132) | 1)
+#define PINMUX_GPIO132__FUNC_SSPM_URXD_AO (MTK_PIN_NO(132) | 2)
+#define PINMUX_GPIO132__FUNC_CONN_UART0_RXD (MTK_PIN_NO(132) | 3)
+#define PINMUX_GPIO132__FUNC_SCP_SDA0 (MTK_PIN_NO(132) | 4)
+#define PINMUX_GPIO132__FUNC_SCP_SDA1 (MTK_PIN_NO(132) | 5)
+#define PINMUX_GPIO132__FUNC_DBG_MON_B7 (MTK_PIN_NO(132) | 7)
+
+#define PINMUX_GPIO133__FUNC_GPIO133 (MTK_PIN_NO(133) | 0)
+#define PINMUX_GPIO133__FUNC_SCL3 (MTK_PIN_NO(133) | 1)
+#define PINMUX_GPIO133__FUNC_SCP_SCL0 (MTK_PIN_NO(133) | 4)
+#define PINMUX_GPIO133__FUNC_SCP_SCL1 (MTK_PIN_NO(133) | 5)
+#define PINMUX_GPIO133__FUNC_DBG_MON_B8 (MTK_PIN_NO(133) | 7)
+
+#define PINMUX_GPIO134__FUNC_GPIO134 (MTK_PIN_NO(134) | 0)
+#define PINMUX_GPIO134__FUNC_SDA3 (MTK_PIN_NO(134) | 1)
+#define PINMUX_GPIO134__FUNC_GPS_PPS (MTK_PIN_NO(134) | 3)
+#define PINMUX_GPIO134__FUNC_SCP_SDA0 (MTK_PIN_NO(134) | 4)
+#define PINMUX_GPIO134__FUNC_SCP_SDA1 (MTK_PIN_NO(134) | 5)
+#define PINMUX_GPIO134__FUNC_DBG_MON_B9 (MTK_PIN_NO(134) | 7)
+
+#define PINMUX_GPIO135__FUNC_GPIO135 (MTK_PIN_NO(135) | 0)
+#define PINMUX_GPIO135__FUNC_SCL4 (MTK_PIN_NO(135) | 1)
+#define PINMUX_GPIO135__FUNC_TP_UTXD1_AO (MTK_PIN_NO(135) | 2)
+#define PINMUX_GPIO135__FUNC_UTXD1 (MTK_PIN_NO(135) | 3)
+#define PINMUX_GPIO135__FUNC_SCP_SCL0 (MTK_PIN_NO(135) | 4)
+#define PINMUX_GPIO135__FUNC_SCP_SCL1 (MTK_PIN_NO(135) | 5)
+#define PINMUX_GPIO135__FUNC_DBG_MON_B10 (MTK_PIN_NO(135) | 7)
+
+#define PINMUX_GPIO136__FUNC_GPIO136 (MTK_PIN_NO(136) | 0)
+#define PINMUX_GPIO136__FUNC_SDA4 (MTK_PIN_NO(136) | 1)
+#define PINMUX_GPIO136__FUNC_TP_URXD1_AO (MTK_PIN_NO(136) | 2)
+#define PINMUX_GPIO136__FUNC_URXD1 (MTK_PIN_NO(136) | 3)
+#define PINMUX_GPIO136__FUNC_SCP_SDA0 (MTK_PIN_NO(136) | 4)
+#define PINMUX_GPIO136__FUNC_SCP_SDA1 (MTK_PIN_NO(136) | 5)
+#define PINMUX_GPIO136__FUNC_DBG_MON_B11 (MTK_PIN_NO(136) | 7)
+
+#define PINMUX_GPIO137__FUNC_GPIO137 (MTK_PIN_NO(137) | 0)
+#define PINMUX_GPIO137__FUNC_SCL5 (MTK_PIN_NO(137) | 1)
+#define PINMUX_GPIO137__FUNC_UTXD2 (MTK_PIN_NO(137) | 2)
+#define PINMUX_GPIO137__FUNC_UCTS1 (MTK_PIN_NO(137) | 3)
+#define PINMUX_GPIO137__FUNC_SCP_SCL0 (MTK_PIN_NO(137) | 4)
+#define PINMUX_GPIO137__FUNC_SCP_SCL1 (MTK_PIN_NO(137) | 5)
+
+#define PINMUX_GPIO138__FUNC_GPIO138 (MTK_PIN_NO(138) | 0)
+#define PINMUX_GPIO138__FUNC_SDA5 (MTK_PIN_NO(138) | 1)
+#define PINMUX_GPIO138__FUNC_URXD2 (MTK_PIN_NO(138) | 2)
+#define PINMUX_GPIO138__FUNC_URTS1 (MTK_PIN_NO(138) | 3)
+#define PINMUX_GPIO138__FUNC_SCP_SDA0 (MTK_PIN_NO(138) | 4)
+#define PINMUX_GPIO138__FUNC_SCP_SDA1 (MTK_PIN_NO(138) | 5)
+
+#define PINMUX_GPIO139__FUNC_GPIO139 (MTK_PIN_NO(139) | 0)
+#define PINMUX_GPIO139__FUNC_SCL6 (MTK_PIN_NO(139) | 1)
+#define PINMUX_GPIO139__FUNC_UTXD1 (MTK_PIN_NO(139) | 2)
+#define PINMUX_GPIO139__FUNC_TP_UTXD1_AO (MTK_PIN_NO(139) | 3)
+#define PINMUX_GPIO139__FUNC_SCP_SCL0 (MTK_PIN_NO(139) | 4)
+#define PINMUX_GPIO139__FUNC_SCP_SCL1 (MTK_PIN_NO(139) | 5)
+#define PINMUX_GPIO139__FUNC_DBG_MON_B12 (MTK_PIN_NO(139) | 7)
+
+#define PINMUX_GPIO140__FUNC_GPIO140 (MTK_PIN_NO(140) | 0)
+#define PINMUX_GPIO140__FUNC_SDA6 (MTK_PIN_NO(140) | 1)
+#define PINMUX_GPIO140__FUNC_URXD1 (MTK_PIN_NO(140) | 2)
+#define PINMUX_GPIO140__FUNC_TP_URXD1_AO (MTK_PIN_NO(140) | 3)
+#define PINMUX_GPIO140__FUNC_SCP_SDA0 (MTK_PIN_NO(140) | 4)
+#define PINMUX_GPIO140__FUNC_SCP_SDA1 (MTK_PIN_NO(140) | 5)
+#define PINMUX_GPIO140__FUNC_DBG_MON_B13 (MTK_PIN_NO(140) | 7)
+
+#define PINMUX_GPIO141__FUNC_GPIO141 (MTK_PIN_NO(141) | 0)
+#define PINMUX_GPIO141__FUNC_SCL7 (MTK_PIN_NO(141) | 1)
+#define PINMUX_GPIO141__FUNC_URTS0 (MTK_PIN_NO(141) | 2)
+#define PINMUX_GPIO141__FUNC_TP_URTS1_AO (MTK_PIN_NO(141) | 3)
+#define PINMUX_GPIO141__FUNC_SCP_SCL0 (MTK_PIN_NO(141) | 4)
+#define PINMUX_GPIO141__FUNC_SCP_SCL1 (MTK_PIN_NO(141) | 5)
+#define PINMUX_GPIO141__FUNC_UDI_TCK (MTK_PIN_NO(141) | 6)
+#define PINMUX_GPIO141__FUNC_DBG_MON_B14 (MTK_PIN_NO(141) | 7)
+
+#define PINMUX_GPIO142__FUNC_GPIO142 (MTK_PIN_NO(142) | 0)
+#define PINMUX_GPIO142__FUNC_SDA7 (MTK_PIN_NO(142) | 1)
+#define PINMUX_GPIO142__FUNC_UCTS0 (MTK_PIN_NO(142) | 2)
+#define PINMUX_GPIO142__FUNC_TP_UCTS1_AO (MTK_PIN_NO(142) | 3)
+#define PINMUX_GPIO142__FUNC_SCP_SDA0 (MTK_PIN_NO(142) | 4)
+#define PINMUX_GPIO142__FUNC_SCP_SDA1 (MTK_PIN_NO(142) | 5)
+
+#define PINMUX_GPIO143__FUNC_GPIO143 (MTK_PIN_NO(143) | 0)
+#define PINMUX_GPIO143__FUNC_SCL8 (MTK_PIN_NO(143) | 1)
+#define PINMUX_GPIO143__FUNC_SCP_SCL0 (MTK_PIN_NO(143) | 4)
+#define PINMUX_GPIO143__FUNC_SCP_SCL1 (MTK_PIN_NO(143) | 5)
+#define PINMUX_GPIO143__FUNC_DBG_MON_B16 (MTK_PIN_NO(143) | 7)
+
+#define PINMUX_GPIO144__FUNC_GPIO144 (MTK_PIN_NO(144) | 0)
+#define PINMUX_GPIO144__FUNC_SDA8 (MTK_PIN_NO(144) | 1)
+#define PINMUX_GPIO144__FUNC_SCP_SDA0 (MTK_PIN_NO(144) | 4)
+#define PINMUX_GPIO144__FUNC_SCP_SDA1 (MTK_PIN_NO(144) | 5)
+#define PINMUX_GPIO144__FUNC_DBG_MON_B17 (MTK_PIN_NO(144) | 7)
+
+#define PINMUX_GPIO145__FUNC_GPIO145 (MTK_PIN_NO(145) | 0)
+#define PINMUX_GPIO145__FUNC_SCL9 (MTK_PIN_NO(145) | 1)
+#define PINMUX_GPIO145__FUNC_CMVREF1 (MTK_PIN_NO(145) | 2)
+#define PINMUX_GPIO145__FUNC_GPS_PPS (MTK_PIN_NO(145) | 3)
+#define PINMUX_GPIO145__FUNC_SCP_SCL0 (MTK_PIN_NO(145) | 4)
+#define PINMUX_GPIO145__FUNC_SCP_SCL1 (MTK_PIN_NO(145) | 5)
+#define PINMUX_GPIO145__FUNC_DBG_MON_B18 (MTK_PIN_NO(145) | 7)
+
+#define PINMUX_GPIO146__FUNC_GPIO146 (MTK_PIN_NO(146) | 0)
+#define PINMUX_GPIO146__FUNC_SDA9 (MTK_PIN_NO(146) | 1)
+#define PINMUX_GPIO146__FUNC_CMVREF0 (MTK_PIN_NO(146) | 2)
+#define PINMUX_GPIO146__FUNC_SCP_SDA0 (MTK_PIN_NO(146) | 4)
+#define PINMUX_GPIO146__FUNC_SCP_SDA1 (MTK_PIN_NO(146) | 5)
+#define PINMUX_GPIO146__FUNC_DBG_MON_B19 (MTK_PIN_NO(146) | 7)
+
+#define PINMUX_GPIO147__FUNC_GPIO147 (MTK_PIN_NO(147) | 0)
+#define PINMUX_GPIO147__FUNC_CMFLASH0 (MTK_PIN_NO(147) | 1)
+#define PINMUX_GPIO147__FUNC_LVTS_SDI (MTK_PIN_NO(147) | 2)
+#define PINMUX_GPIO147__FUNC_DPI_DATA12 (MTK_PIN_NO(147) | 3)
+#define PINMUX_GPIO147__FUNC_TP_GPIO0_AO (MTK_PIN_NO(147) | 4)
+#define PINMUX_GPIO147__FUNC_ANT_SEL3 (MTK_PIN_NO(147) | 5)
+#define PINMUX_GPIO147__FUNC_DFD_TCK_XI (MTK_PIN_NO(147) | 6)
+#define PINMUX_GPIO147__FUNC_DBG_MON_B20 (MTK_PIN_NO(147) | 7)
+
+#define PINMUX_GPIO148__FUNC_GPIO148 (MTK_PIN_NO(148) | 0)
+#define PINMUX_GPIO148__FUNC_CMFLASH1 (MTK_PIN_NO(148) | 1)
+#define PINMUX_GPIO148__FUNC_LVTS_SCF (MTK_PIN_NO(148) | 2)
+#define PINMUX_GPIO148__FUNC_DPI_DATA13 (MTK_PIN_NO(148) | 3)
+#define PINMUX_GPIO148__FUNC_TP_GPIO1_AO (MTK_PIN_NO(148) | 4)
+#define PINMUX_GPIO148__FUNC_ANT_SEL4 (MTK_PIN_NO(148) | 5)
+#define PINMUX_GPIO148__FUNC_DFD_TMS (MTK_PIN_NO(148) | 6)
+#define PINMUX_GPIO148__FUNC_DBG_MON_B21 (MTK_PIN_NO(148) | 7)
+
+#define PINMUX_GPIO149__FUNC_GPIO149 (MTK_PIN_NO(149) | 0)
+#define PINMUX_GPIO149__FUNC_CMFLASH2 (MTK_PIN_NO(149) | 1)
+#define PINMUX_GPIO149__FUNC_CLKM0 (MTK_PIN_NO(149) | 2)
+#define PINMUX_GPIO149__FUNC_DPI_DATA14 (MTK_PIN_NO(149) | 3)
+#define PINMUX_GPIO149__FUNC_TP_GPIO2_AO (MTK_PIN_NO(149) | 4)
+#define PINMUX_GPIO149__FUNC_ANT_SEL5 (MTK_PIN_NO(149) | 5)
+#define PINMUX_GPIO149__FUNC_DFD_TDI (MTK_PIN_NO(149) | 6)
+#define PINMUX_GPIO149__FUNC_DBG_MON_B22 (MTK_PIN_NO(149) | 7)
+
+#define PINMUX_GPIO150__FUNC_GPIO150 (MTK_PIN_NO(150) | 0)
+#define PINMUX_GPIO150__FUNC_CLKM1 (MTK_PIN_NO(150) | 2)
+#define PINMUX_GPIO150__FUNC_DPI_DATA15 (MTK_PIN_NO(150) | 3)
+#define PINMUX_GPIO150__FUNC_TP_GPIO3_AO (MTK_PIN_NO(150) | 4)
+#define PINMUX_GPIO150__FUNC_ANT_SEL6 (MTK_PIN_NO(150) | 5)
+#define PINMUX_GPIO150__FUNC_DFD_TDO (MTK_PIN_NO(150) | 6)
+#define PINMUX_GPIO150__FUNC_DBG_MON_B23 (MTK_PIN_NO(150) | 7)
+
+#define PINMUX_GPIO151__FUNC_GPIO151 (MTK_PIN_NO(151) | 0)
+#define PINMUX_GPIO151__FUNC_GPS_L1_ELNA_EN (MTK_PIN_NO(151) | 1)
+#define PINMUX_GPIO151__FUNC_CLKM2 (MTK_PIN_NO(151) | 2)
+#define PINMUX_GPIO151__FUNC_DPI_DATA16 (MTK_PIN_NO(151) | 3)
+#define PINMUX_GPIO151__FUNC_TP_GPIO4_AO (MTK_PIN_NO(151) | 4)
+#define PINMUX_GPIO151__FUNC_ANT_SEL7 (MTK_PIN_NO(151) | 5)
+#define PINMUX_GPIO151__FUNC_UDI_TMS (MTK_PIN_NO(151) | 6)
+#define PINMUX_GPIO151__FUNC_DBG_MON_B24 (MTK_PIN_NO(151) | 7)
+
+#define PINMUX_GPIO152__FUNC_GPIO152 (MTK_PIN_NO(152) | 0)
+#define PINMUX_GPIO152__FUNC_CLKM3 (MTK_PIN_NO(152) | 2)
+#define PINMUX_GPIO152__FUNC_DPI_DATA17 (MTK_PIN_NO(152) | 3)
+#define PINMUX_GPIO152__FUNC_TP_GPIO5_AO (MTK_PIN_NO(152) | 4)
+
+#define PINMUX_GPIO153__FUNC_GPIO153 (MTK_PIN_NO(153) | 0)
+#define PINMUX_GPIO153__FUNC_CONN_TCXOENA_REQ (MTK_PIN_NO(153) | 1)
+#define PINMUX_GPIO153__FUNC_DPI_DATA18 (MTK_PIN_NO(153) | 3)
+#define PINMUX_GPIO153__FUNC_TP_GPIO6_AO (MTK_PIN_NO(153) | 4)
+#define PINMUX_GPIO153__FUNC_UDI_TDI (MTK_PIN_NO(153) | 6)
+#define PINMUX_GPIO153__FUNC_DBG_MON_B26 (MTK_PIN_NO(153) | 7)
+
+#define PINMUX_GPIO154__FUNC_GPIO154 (MTK_PIN_NO(154) | 0)
+#define PINMUX_GPIO154__FUNC_PWM0 (MTK_PIN_NO(154) | 1)
+#define PINMUX_GPIO154__FUNC_CMVREF2 (MTK_PIN_NO(154) | 2)
+#define PINMUX_GPIO154__FUNC_DPI_DATA19 (MTK_PIN_NO(154) | 3)
+#define PINMUX_GPIO154__FUNC_TP_GPIO7_AO (MTK_PIN_NO(154) | 4)
+#define PINMUX_GPIO154__FUNC_UDI_TDO (MTK_PIN_NO(154) | 6)
+#define PINMUX_GPIO154__FUNC_DBG_MON_B27 (MTK_PIN_NO(154) | 7)
+
+#define PINMUX_GPIO155__FUNC_GPIO155 (MTK_PIN_NO(155) | 0)
+#define PINMUX_GPIO155__FUNC_PWM1 (MTK_PIN_NO(155) | 1)
+#define PINMUX_GPIO155__FUNC_CMVREF1 (MTK_PIN_NO(155) | 2)
+#define PINMUX_GPIO155__FUNC_DPI_DATA20 (MTK_PIN_NO(155) | 3)
+#define PINMUX_GPIO155__FUNC_UDI_NTRST (MTK_PIN_NO(155) | 6)
+#define PINMUX_GPIO155__FUNC_DBG_MON_B28 (MTK_PIN_NO(155) | 7)
+
+#define PINMUX_GPIO156__FUNC_GPIO156 (MTK_PIN_NO(156) | 0)
+#define PINMUX_GPIO156__FUNC_PWM2 (MTK_PIN_NO(156) | 1)
+#define PINMUX_GPIO156__FUNC_CMVREF0 (MTK_PIN_NO(156) | 2)
+#define PINMUX_GPIO156__FUNC_DPI_DATA21 (MTK_PIN_NO(156) | 3)
+
+#define PINMUX_GPIO157__FUNC_GPIO157 (MTK_PIN_NO(157) | 0)
+#define PINMUX_GPIO157__FUNC_PWRAP_SPI0_CSN (MTK_PIN_NO(157) | 1)
+
+#define PINMUX_GPIO158__FUNC_GPIO158 (MTK_PIN_NO(158) | 0)
+#define PINMUX_GPIO158__FUNC_PWRAP_SPI0_CK (MTK_PIN_NO(158) | 1)
+
+#define PINMUX_GPIO159__FUNC_GPIO159 (MTK_PIN_NO(159) | 0)
+#define PINMUX_GPIO159__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(159) | 1)
+#define PINMUX_GPIO159__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(159) | 2)
+
+#define PINMUX_GPIO160__FUNC_GPIO160 (MTK_PIN_NO(160) | 0)
+#define PINMUX_GPIO160__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(160) | 1)
+#define PINMUX_GPIO160__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(160) | 2)
+
+#define PINMUX_GPIO161__FUNC_GPIO161 (MTK_PIN_NO(161) | 0)
+#define PINMUX_GPIO161__FUNC_SRCLKENA0 (MTK_PIN_NO(161) | 1)
+
+#define PINMUX_GPIO162__FUNC_GPIO162 (MTK_PIN_NO(162) | 0)
+#define PINMUX_GPIO162__FUNC_SRCLKENA1 (MTK_PIN_NO(162) | 1)
+#define PINMUX_GPIO162__FUNC_DBG_MON_A31 (MTK_PIN_NO(162) | 7)
+
+#define PINMUX_GPIO163__FUNC_GPIO163 (MTK_PIN_NO(163) | 0)
+#define PINMUX_GPIO163__FUNC_SCP_VREQ_VAO (MTK_PIN_NO(163) | 1)
+#define PINMUX_GPIO163__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(163) | 2)
+
+#define PINMUX_GPIO164__FUNC_GPIO164 (MTK_PIN_NO(164) | 0)
+#define PINMUX_GPIO164__FUNC_RTC32K_CK (MTK_PIN_NO(164) | 1)
+
+#define PINMUX_GPIO165__FUNC_GPIO165 (MTK_PIN_NO(165) | 0)
+#define PINMUX_GPIO165__FUNC_WATCHDOG (MTK_PIN_NO(165) | 1)
+
+#define PINMUX_GPIO166__FUNC_GPIO166 (MTK_PIN_NO(166) | 0)
+#define PINMUX_GPIO166__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(166) | 1)
+#define PINMUX_GPIO166__FUNC_AUD_CLK_MISO (MTK_PIN_NO(166) | 2)
+#define PINMUX_GPIO166__FUNC_I2S1_MCK (MTK_PIN_NO(166) | 3)
+
+#define PINMUX_GPIO167__FUNC_GPIO167 (MTK_PIN_NO(167) | 0)
+#define PINMUX_GPIO167__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(167) | 1)
+#define PINMUX_GPIO167__FUNC_AUD_SYNC_MISO (MTK_PIN_NO(167) | 2)
+#define PINMUX_GPIO167__FUNC_I2S1_BCK (MTK_PIN_NO(167) | 3)
+
+#define PINMUX_GPIO168__FUNC_GPIO168 (MTK_PIN_NO(168) | 0)
+#define PINMUX_GPIO168__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(168) | 1)
+#define PINMUX_GPIO168__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(168) | 2)
+#define PINMUX_GPIO168__FUNC_I2S1_LRCK (MTK_PIN_NO(168) | 3)
+
+#define PINMUX_GPIO169__FUNC_GPIO169 (MTK_PIN_NO(169) | 0)
+#define PINMUX_GPIO169__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(169) | 1)
+#define PINMUX_GPIO169__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(169) | 2)
+#define PINMUX_GPIO169__FUNC_I2S1_DO (MTK_PIN_NO(169) | 3)
+
+#define PINMUX_GPIO170__FUNC_GPIO170 (MTK_PIN_NO(170) | 0)
+#define PINMUX_GPIO170__FUNC_AUD_CLK_MISO (MTK_PIN_NO(170) | 1)
+#define PINMUX_GPIO170__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(170) | 2)
+#define PINMUX_GPIO170__FUNC_I2S2_MCK (MTK_PIN_NO(170) | 3)
+
+#define PINMUX_GPIO171__FUNC_GPIO171 (MTK_PIN_NO(171) | 0)
+#define PINMUX_GPIO171__FUNC_AUD_SYNC_MISO (MTK_PIN_NO(171) | 1)
+#define PINMUX_GPIO171__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(171) | 2)
+#define PINMUX_GPIO171__FUNC_I2S2_BCK (MTK_PIN_NO(171) | 3)
+
+#define PINMUX_GPIO172__FUNC_GPIO172 (MTK_PIN_NO(172) | 0)
+#define PINMUX_GPIO172__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(172) | 1)
+#define PINMUX_GPIO172__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(172) | 2)
+#define PINMUX_GPIO172__FUNC_I2S2_LRCK (MTK_PIN_NO(172) | 3)
+#define PINMUX_GPIO172__FUNC_VOW_DAT_MISO (MTK_PIN_NO(172) | 4)
+
+#define PINMUX_GPIO173__FUNC_GPIO173 (MTK_PIN_NO(173) | 0)
+#define PINMUX_GPIO173__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(173) | 1)
+#define PINMUX_GPIO173__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(173) | 2)
+#define PINMUX_GPIO173__FUNC_I2S2_DI (MTK_PIN_NO(173) | 3)
+#define PINMUX_GPIO173__FUNC_VOW_CLK_MISO (MTK_PIN_NO(173) | 4)
+
+#define PINMUX_GPIO174__FUNC_GPIO174 (MTK_PIN_NO(174) | 0)
+#define PINMUX_GPIO174__FUNC_CONN_TOP_CLK (MTK_PIN_NO(174) | 1)
+#define PINMUX_GPIO174__FUNC_AUXIF_CLK (MTK_PIN_NO(174) | 2)
+#define PINMUX_GPIO174__FUNC_DFD_TCK_XI (MTK_PIN_NO(174) | 3)
+#define PINMUX_GPIO174__FUNC_DBG_MON_B3 (MTK_PIN_NO(174) | 7)
+
+#define PINMUX_GPIO175__FUNC_GPIO175 (MTK_PIN_NO(175) | 0)
+#define PINMUX_GPIO175__FUNC_CONN_TOP_DATA (MTK_PIN_NO(175) | 1)
+#define PINMUX_GPIO175__FUNC_AUXIF_ST (MTK_PIN_NO(175) | 2)
+#define PINMUX_GPIO175__FUNC_DFD_TMS (MTK_PIN_NO(175) | 3)
+#define PINMUX_GPIO175__FUNC_DBG_MON_B15 (MTK_PIN_NO(175) | 7)
+
+#define PINMUX_GPIO176__FUNC_GPIO176 (MTK_PIN_NO(176) | 0)
+#define PINMUX_GPIO176__FUNC_CONN_BT_CLK (MTK_PIN_NO(176) | 1)
+#define PINMUX_GPIO176__FUNC_DFD_TDI (MTK_PIN_NO(176) | 3)
+#define PINMUX_GPIO176__FUNC_DBG_MON_B2 (MTK_PIN_NO(176) | 7)
+
+#define PINMUX_GPIO177__FUNC_GPIO177 (MTK_PIN_NO(177) | 0)
+#define PINMUX_GPIO177__FUNC_CONN_BT_DATA (MTK_PIN_NO(177) | 1)
+#define PINMUX_GPIO177__FUNC_DFD_TDO (MTK_PIN_NO(177) | 3)
+
+#define PINMUX_GPIO178__FUNC_GPIO178 (MTK_PIN_NO(178) | 0)
+#define PINMUX_GPIO178__FUNC_CONN_HRST_B (MTK_PIN_NO(178) | 1)
+#define PINMUX_GPIO178__FUNC_UDI_TMS (MTK_PIN_NO(178) | 3)
+#define PINMUX_GPIO178__FUNC_DBG_MON_B25 (MTK_PIN_NO(178) | 7)
+
+#define PINMUX_GPIO179__FUNC_GPIO179 (MTK_PIN_NO(179) | 0)
+#define PINMUX_GPIO179__FUNC_CONN_WB_PTA (MTK_PIN_NO(179) | 1)
+#define PINMUX_GPIO179__FUNC_UDI_TCK (MTK_PIN_NO(179) | 3)
+#define PINMUX_GPIO179__FUNC_DBG_MON_B29 (MTK_PIN_NO(179) | 7)
+
+#define PINMUX_GPIO180__FUNC_GPIO180 (MTK_PIN_NO(180) | 0)
+#define PINMUX_GPIO180__FUNC_CONN_WF_CTRL0 (MTK_PIN_NO(180) | 1)
+#define PINMUX_GPIO180__FUNC_UDI_TDI (MTK_PIN_NO(180) | 3)
+
+#define PINMUX_GPIO181__FUNC_GPIO181 (MTK_PIN_NO(181) | 0)
+#define PINMUX_GPIO181__FUNC_CONN_WF_CTRL1 (MTK_PIN_NO(181) | 1)
+#define PINMUX_GPIO181__FUNC_UDI_TDO (MTK_PIN_NO(181) | 3)
+
+#define PINMUX_GPIO182__FUNC_GPIO182 (MTK_PIN_NO(182) | 0)
+#define PINMUX_GPIO182__FUNC_CONN_WF_CTRL2 (MTK_PIN_NO(182) | 1)
+#define PINMUX_GPIO182__FUNC_UDI_NTRST (MTK_PIN_NO(182) | 3)
+
+#define PINMUX_GPIO183__FUNC_GPIO183 (MTK_PIN_NO(183) | 0)
+#define PINMUX_GPIO183__FUNC_SPMI_SCL (MTK_PIN_NO(183) | 1)
+
+#define PINMUX_GPIO184__FUNC_GPIO184 (MTK_PIN_NO(184) | 0)
+#define PINMUX_GPIO184__FUNC_SPMI_SDA (MTK_PIN_NO(184) | 1)
+
+#endif /* __MT8186_PINFUNC_H */
diff --git a/include/dt-bindings/pinctrl/mt8365-pinfunc.h b/include/dt-bindings/pinctrl/mt8365-pinfunc.h
new file mode 100644
index 000000000000..e2ec8af57dcf
--- /dev/null
+++ b/include/dt-bindings/pinctrl/mt8365-pinfunc.h
@@ -0,0 +1,858 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 MediaTek Inc.
+ */
+#ifndef __MT8365_PINFUNC_H
+#define __MT8365_PINFUNC_H
+
+#include <dt-bindings/pinctrl/mt65xx.h>
+
+#define MT8365_PIN_0_GPIO0__FUNC_GPIO0 (MTK_PIN_NO(0) | 0)
+#define MT8365_PIN_0_GPIO0__FUNC_DPI_D0 (MTK_PIN_NO(0) | 1)
+#define MT8365_PIN_0_GPIO0__FUNC_PWM_A (MTK_PIN_NO(0) | 2)
+#define MT8365_PIN_0_GPIO0__FUNC_I2S2_BCK (MTK_PIN_NO(0) | 3)
+#define MT8365_PIN_0_GPIO0__FUNC_EXT_TXD0 (MTK_PIN_NO(0) | 4)
+#define MT8365_PIN_0_GPIO0__FUNC_CONN_MCU_TDO (MTK_PIN_NO(0) | 5)
+#define MT8365_PIN_0_GPIO0__FUNC_DBG_MON_A0 (MTK_PIN_NO(0) | 7)
+
+#define MT8365_PIN_1_GPIO1__FUNC_GPIO1 (MTK_PIN_NO(1) | 0)
+#define MT8365_PIN_1_GPIO1__FUNC_DPI_D1 (MTK_PIN_NO(1) | 1)
+#define MT8365_PIN_1_GPIO1__FUNC_PWM_B (MTK_PIN_NO(1) | 2)
+#define MT8365_PIN_1_GPIO1__FUNC_I2S2_LRCK (MTK_PIN_NO(1) | 3)
+#define MT8365_PIN_1_GPIO1__FUNC_EXT_TXD1 (MTK_PIN_NO(1) | 4)
+#define MT8365_PIN_1_GPIO1__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(1) | 5)
+#define MT8365_PIN_1_GPIO1__FUNC_DBG_MON_A1 (MTK_PIN_NO(1) | 7)
+
+#define MT8365_PIN_2_GPIO2__FUNC_GPIO2 (MTK_PIN_NO(2) | 0)
+#define MT8365_PIN_2_GPIO2__FUNC_DPI_D2 (MTK_PIN_NO(2) | 1)
+#define MT8365_PIN_2_GPIO2__FUNC_PWM_C (MTK_PIN_NO(2) | 2)
+#define MT8365_PIN_2_GPIO2__FUNC_I2S2_MCK (MTK_PIN_NO(2) | 3)
+#define MT8365_PIN_2_GPIO2__FUNC_EXT_TXD2 (MTK_PIN_NO(2) | 4)
+#define MT8365_PIN_2_GPIO2__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(2) | 5)
+#define MT8365_PIN_2_GPIO2__FUNC_DBG_MON_A2 (MTK_PIN_NO(2) | 7)
+
+#define MT8365_PIN_3_GPIO3__FUNC_GPIO3 (MTK_PIN_NO(3) | 0)
+#define MT8365_PIN_3_GPIO3__FUNC_DPI_D3 (MTK_PIN_NO(3) | 1)
+#define MT8365_PIN_3_GPIO3__FUNC_CLKM0 (MTK_PIN_NO(3) | 2)
+#define MT8365_PIN_3_GPIO3__FUNC_I2S2_DI (MTK_PIN_NO(3) | 3)
+#define MT8365_PIN_3_GPIO3__FUNC_EXT_TXD3 (MTK_PIN_NO(3) | 4)
+#define MT8365_PIN_3_GPIO3__FUNC_CONN_MCU_TCK (MTK_PIN_NO(3) | 5)
+#define MT8365_PIN_3_GPIO3__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(3) | 6)
+#define MT8365_PIN_3_GPIO3__FUNC_DBG_MON_A3 (MTK_PIN_NO(3) | 7)
+
+#define MT8365_PIN_4_GPIO4__FUNC_GPIO4 (MTK_PIN_NO(4) | 0)
+#define MT8365_PIN_4_GPIO4__FUNC_DPI_D4 (MTK_PIN_NO(4) | 1)
+#define MT8365_PIN_4_GPIO4__FUNC_CLKM1 (MTK_PIN_NO(4) | 2)
+#define MT8365_PIN_4_GPIO4__FUNC_I2S1_BCK (MTK_PIN_NO(4) | 3)
+#define MT8365_PIN_4_GPIO4__FUNC_EXT_TXC (MTK_PIN_NO(4) | 4)
+#define MT8365_PIN_4_GPIO4__FUNC_CONN_MCU_TDI (MTK_PIN_NO(4) | 5)
+#define MT8365_PIN_4_GPIO4__FUNC_VDEC_TEST_CK (MTK_PIN_NO(4) | 6)
+#define MT8365_PIN_4_GPIO4__FUNC_DBG_MON_A4 (MTK_PIN_NO(4) | 7)
+
+#define MT8365_PIN_5_GPIO5__FUNC_GPIO5 (MTK_PIN_NO(5) | 0)
+#define MT8365_PIN_5_GPIO5__FUNC_DPI_D5 (MTK_PIN_NO(5) | 1)
+#define MT8365_PIN_5_GPIO5__FUNC_CLKM2 (MTK_PIN_NO(5) | 2)
+#define MT8365_PIN_5_GPIO5__FUNC_I2S1_LRCK (MTK_PIN_NO(5) | 3)
+#define MT8365_PIN_5_GPIO5__FUNC_EXT_RXER (MTK_PIN_NO(5) | 4)
+#define MT8365_PIN_5_GPIO5__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(5) | 5)
+#define MT8365_PIN_5_GPIO5__FUNC_MM_TEST_CK (MTK_PIN_NO(5) | 6)
+#define MT8365_PIN_5_GPIO5__FUNC_DBG_MON_A5 (MTK_PIN_NO(5) | 7)
+
+#define MT8365_PIN_6_GPIO6__FUNC_GPIO6 (MTK_PIN_NO(6) | 0)
+#define MT8365_PIN_6_GPIO6__FUNC_DPI_D6 (MTK_PIN_NO(6) | 1)
+#define MT8365_PIN_6_GPIO6__FUNC_CLKM3 (MTK_PIN_NO(6) | 2)
+#define MT8365_PIN_6_GPIO6__FUNC_I2S1_MCK (MTK_PIN_NO(6) | 3)
+#define MT8365_PIN_6_GPIO6__FUNC_EXT_RXC (MTK_PIN_NO(6) | 4)
+#define MT8365_PIN_6_GPIO6__FUNC_CONN_MCU_TMS (MTK_PIN_NO(6) | 5)
+#define MT8365_PIN_6_GPIO6__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(6) | 6)
+#define MT8365_PIN_6_GPIO6__FUNC_DBG_MON_A6 (MTK_PIN_NO(6) | 7)
+
+#define MT8365_PIN_7_GPIO7__FUNC_GPIO7 (MTK_PIN_NO(7) | 0)
+#define MT8365_PIN_7_GPIO7__FUNC_DPI_D7 (MTK_PIN_NO(7) | 1)
+#define MT8365_PIN_7_GPIO7__FUNC_I2S1_DO (MTK_PIN_NO(7) | 3)
+#define MT8365_PIN_7_GPIO7__FUNC_EXT_RXDV (MTK_PIN_NO(7) | 4)
+#define MT8365_PIN_7_GPIO7__FUNC_CONN_DSP_JCK (MTK_PIN_NO(7) | 5)
+#define MT8365_PIN_7_GPIO7__FUNC_DBG_MON_A7 (MTK_PIN_NO(7) | 7)
+
+#define MT8365_PIN_8_GPIO8__FUNC_GPIO8 (MTK_PIN_NO(8) | 0)
+#define MT8365_PIN_8_GPIO8__FUNC_DPI_D8 (MTK_PIN_NO(8) | 1)
+#define MT8365_PIN_8_GPIO8__FUNC_SPI_CLK (MTK_PIN_NO(8) | 2)
+#define MT8365_PIN_8_GPIO8__FUNC_I2S0_BCK (MTK_PIN_NO(8) | 3)
+#define MT8365_PIN_8_GPIO8__FUNC_EXT_RXD0 (MTK_PIN_NO(8) | 4)
+#define MT8365_PIN_8_GPIO8__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(8) | 5)
+#define MT8365_PIN_8_GPIO8__FUNC_DBG_MON_A8 (MTK_PIN_NO(8) | 7)
+
+#define MT8365_PIN_9_GPIO9__FUNC_GPIO9 (MTK_PIN_NO(9) | 0)
+#define MT8365_PIN_9_GPIO9__FUNC_DPI_D9 (MTK_PIN_NO(9) | 1)
+#define MT8365_PIN_9_GPIO9__FUNC_SPI_CSB (MTK_PIN_NO(9) | 2)
+#define MT8365_PIN_9_GPIO9__FUNC_I2S0_LRCK (MTK_PIN_NO(9) | 3)
+#define MT8365_PIN_9_GPIO9__FUNC_EXT_RXD1 (MTK_PIN_NO(9) | 4)
+#define MT8365_PIN_9_GPIO9__FUNC_CONN_DSP_JDI (MTK_PIN_NO(9) | 5)
+#define MT8365_PIN_9_GPIO9__FUNC_DBG_MON_A9 (MTK_PIN_NO(9) | 7)
+
+#define MT8365_PIN_10_GPIO10__FUNC_GPIO10 (MTK_PIN_NO(10) | 0)
+#define MT8365_PIN_10_GPIO10__FUNC_DPI_D10 (MTK_PIN_NO(10) | 1)
+#define MT8365_PIN_10_GPIO10__FUNC_SPI_MI (MTK_PIN_NO(10) | 2)
+#define MT8365_PIN_10_GPIO10__FUNC_I2S0_MCK (MTK_PIN_NO(10) | 3)
+#define MT8365_PIN_10_GPIO10__FUNC_EXT_RXD2 (MTK_PIN_NO(10) | 4)
+#define MT8365_PIN_10_GPIO10__FUNC_CONN_DSP_JMS (MTK_PIN_NO(10) | 5)
+#define MT8365_PIN_10_GPIO10__FUNC_DBG_MON_A10 (MTK_PIN_NO(10) | 7)
+
+#define MT8365_PIN_11_GPIO11__FUNC_GPIO11 (MTK_PIN_NO(11) | 0)
+#define MT8365_PIN_11_GPIO11__FUNC_DPI_D11 (MTK_PIN_NO(11) | 1)
+#define MT8365_PIN_11_GPIO11__FUNC_SPI_MO (MTK_PIN_NO(11) | 2)
+#define MT8365_PIN_11_GPIO11__FUNC_I2S0_DI (MTK_PIN_NO(11) | 3)
+#define MT8365_PIN_11_GPIO11__FUNC_EXT_RXD3 (MTK_PIN_NO(11) | 4)
+#define MT8365_PIN_11_GPIO11__FUNC_CONN_DSP_JDO (MTK_PIN_NO(11) | 5)
+#define MT8365_PIN_11_GPIO11__FUNC_DBG_MON_A11 (MTK_PIN_NO(11) | 7)
+
+#define MT8365_PIN_12_GPIO12__FUNC_GPIO12 (MTK_PIN_NO(12) | 0)
+#define MT8365_PIN_12_GPIO12__FUNC_DPI_DE (MTK_PIN_NO(12) | 1)
+#define MT8365_PIN_12_GPIO12__FUNC_UCTS1 (MTK_PIN_NO(12) | 2)
+#define MT8365_PIN_12_GPIO12__FUNC_I2S3_BCK (MTK_PIN_NO(12) | 3)
+#define MT8365_PIN_12_GPIO12__FUNC_EXT_TXEN (MTK_PIN_NO(12) | 4)
+#define MT8365_PIN_12_GPIO12__FUNC_O_WIFI_TXD (MTK_PIN_NO(12) | 5)
+#define MT8365_PIN_12_GPIO12__FUNC_DBG_MON_A12 (MTK_PIN_NO(12) | 7)
+
+#define MT8365_PIN_13_GPIO13__FUNC_GPIO13 (MTK_PIN_NO(13) | 0)
+#define MT8365_PIN_13_GPIO13__FUNC_DPI_VSYNC (MTK_PIN_NO(13) | 1)
+#define MT8365_PIN_13_GPIO13__FUNC_URTS1 (MTK_PIN_NO(13) | 2)
+#define MT8365_PIN_13_GPIO13__FUNC_I2S3_LRCK (MTK_PIN_NO(13) | 3)
+#define MT8365_PIN_13_GPIO13__FUNC_EXT_COL (MTK_PIN_NO(13) | 4)
+#define MT8365_PIN_13_GPIO13__FUNC_SPDIF_IN (MTK_PIN_NO(13) | 5)
+#define MT8365_PIN_13_GPIO13__FUNC_DBG_MON_A13 (MTK_PIN_NO(13) | 7)
+
+#define MT8365_PIN_14_GPIO14__FUNC_GPIO14 (MTK_PIN_NO(14) | 0)
+#define MT8365_PIN_14_GPIO14__FUNC_DPI_CK (MTK_PIN_NO(14) | 1)
+#define MT8365_PIN_14_GPIO14__FUNC_UCTS2 (MTK_PIN_NO(14) | 2)
+#define MT8365_PIN_14_GPIO14__FUNC_I2S3_MCK (MTK_PIN_NO(14) | 3)
+#define MT8365_PIN_14_GPIO14__FUNC_EXT_MDIO (MTK_PIN_NO(14) | 4)
+#define MT8365_PIN_14_GPIO14__FUNC_SPDIF_OUT (MTK_PIN_NO(14) | 5)
+#define MT8365_PIN_14_GPIO14__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(14) | 6)
+#define MT8365_PIN_14_GPIO14__FUNC_DBG_MON_A14 (MTK_PIN_NO(14) | 7)
+
+#define MT8365_PIN_15_GPIO15__FUNC_GPIO15 (MTK_PIN_NO(15) | 0)
+#define MT8365_PIN_15_GPIO15__FUNC_DPI_HSYNC (MTK_PIN_NO(15) | 1)
+#define MT8365_PIN_15_GPIO15__FUNC_URTS2 (MTK_PIN_NO(15) | 2)
+#define MT8365_PIN_15_GPIO15__FUNC_I2S3_DO (MTK_PIN_NO(15) | 3)
+#define MT8365_PIN_15_GPIO15__FUNC_EXT_MDC (MTK_PIN_NO(15) | 4)
+#define MT8365_PIN_15_GPIO15__FUNC_IRRX (MTK_PIN_NO(15) | 5)
+#define MT8365_PIN_15_GPIO15__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(15) | 6)
+#define MT8365_PIN_15_GPIO15__FUNC_DBG_MON_A15 (MTK_PIN_NO(15) | 7)
+
+#define MT8365_PIN_16_GPIO16__FUNC_GPIO16 (MTK_PIN_NO(16) | 0)
+#define MT8365_PIN_16_GPIO16__FUNC_DPI_D12 (MTK_PIN_NO(16) | 1)
+#define MT8365_PIN_16_GPIO16__FUNC_USB_DRVVBUS (MTK_PIN_NO(16) | 2)
+#define MT8365_PIN_16_GPIO16__FUNC_PWM_A (MTK_PIN_NO(16) | 3)
+#define MT8365_PIN_16_GPIO16__FUNC_CLKM0 (MTK_PIN_NO(16) | 4)
+#define MT8365_PIN_16_GPIO16__FUNC_ANT_SEL0 (MTK_PIN_NO(16) | 5)
+#define MT8365_PIN_16_GPIO16__FUNC_TSF_IN (MTK_PIN_NO(16) | 6)
+#define MT8365_PIN_16_GPIO16__FUNC_DBG_MON_A16 (MTK_PIN_NO(16) | 7)
+
+#define MT8365_PIN_17_GPIO17__FUNC_GPIO17 (MTK_PIN_NO(17) | 0)
+#define MT8365_PIN_17_GPIO17__FUNC_DPI_D13 (MTK_PIN_NO(17) | 1)
+#define MT8365_PIN_17_GPIO17__FUNC_IDDIG (MTK_PIN_NO(17) | 2)
+#define MT8365_PIN_17_GPIO17__FUNC_PWM_B (MTK_PIN_NO(17) | 3)
+#define MT8365_PIN_17_GPIO17__FUNC_CLKM1 (MTK_PIN_NO(17) | 4)
+#define MT8365_PIN_17_GPIO17__FUNC_ANT_SEL1 (MTK_PIN_NO(17) | 5)
+#define MT8365_PIN_17_GPIO17__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(17) | 6)
+#define MT8365_PIN_17_GPIO17__FUNC_DBG_MON_A17 (MTK_PIN_NO(17) | 7)
+
+#define MT8365_PIN_18_GPIO18__FUNC_GPIO18 (MTK_PIN_NO(18) | 0)
+#define MT8365_PIN_18_GPIO18__FUNC_DPI_D14 (MTK_PIN_NO(18) | 1)
+#define MT8365_PIN_18_GPIO18__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(18) | 2)
+#define MT8365_PIN_18_GPIO18__FUNC_PWM_C (MTK_PIN_NO(18) | 3)
+#define MT8365_PIN_18_GPIO18__FUNC_CLKM2 (MTK_PIN_NO(18) | 4)
+#define MT8365_PIN_18_GPIO18__FUNC_ANT_SEL2 (MTK_PIN_NO(18) | 5)
+#define MT8365_PIN_18_GPIO18__FUNC_MFG_TEST_CK (MTK_PIN_NO(18) | 6)
+#define MT8365_PIN_18_GPIO18__FUNC_DBG_MON_A18 (MTK_PIN_NO(18) | 7)
+
+#define MT8365_PIN_19_DISP_PWM__FUNC_GPIO19 (MTK_PIN_NO(19) | 0)
+#define MT8365_PIN_19_DISP_PWM__FUNC_DISP_PWM (MTK_PIN_NO(19) | 1)
+#define MT8365_PIN_19_DISP_PWM__FUNC_PWM_A (MTK_PIN_NO(19) | 2)
+#define MT8365_PIN_19_DISP_PWM__FUNC_DBG_MON_A19 (MTK_PIN_NO(19) | 7)
+
+#define MT8365_PIN_20_LCM_RST__FUNC_GPIO20 (MTK_PIN_NO(20) | 0)
+#define MT8365_PIN_20_LCM_RST__FUNC_LCM_RST (MTK_PIN_NO(20) | 1)
+#define MT8365_PIN_20_LCM_RST__FUNC_PWM_B (MTK_PIN_NO(20) | 2)
+#define MT8365_PIN_20_LCM_RST__FUNC_DBG_MON_A20 (MTK_PIN_NO(20) | 7)
+
+#define MT8365_PIN_21_DSI_TE__FUNC_GPIO21 (MTK_PIN_NO(21) | 0)
+#define MT8365_PIN_21_DSI_TE__FUNC_DSI_TE (MTK_PIN_NO(21) | 1)
+#define MT8365_PIN_21_DSI_TE__FUNC_PWM_C (MTK_PIN_NO(21) | 2)
+#define MT8365_PIN_21_DSI_TE__FUNC_ANT_SEL0 (MTK_PIN_NO(21) | 3)
+#define MT8365_PIN_21_DSI_TE__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(21) | 4)
+#define MT8365_PIN_21_DSI_TE__FUNC_DBG_MON_A21 (MTK_PIN_NO(21) | 7)
+
+#define MT8365_PIN_22_KPROW0__FUNC_GPIO22 (MTK_PIN_NO(22) | 0)
+#define MT8365_PIN_22_KPROW0__FUNC_KPROW0 (MTK_PIN_NO(22) | 1)
+#define MT8365_PIN_22_KPROW0__FUNC_DBG_MON_A22 (MTK_PIN_NO(22) | 7)
+
+#define MT8365_PIN_23_KPROW1__FUNC_GPIO23 (MTK_PIN_NO(23) | 0)
+#define MT8365_PIN_23_KPROW1__FUNC_KPROW1 (MTK_PIN_NO(23) | 1)
+#define MT8365_PIN_23_KPROW1__FUNC_IDDIG (MTK_PIN_NO(23) | 2)
+#define MT8365_PIN_23_KPROW1__FUNC_WIFI_TXD (MTK_PIN_NO(23) | 3)
+#define MT8365_PIN_23_KPROW1__FUNC_CLKM3 (MTK_PIN_NO(23) | 4)
+#define MT8365_PIN_23_KPROW1__FUNC_ANT_SEL1 (MTK_PIN_NO(23) | 5)
+#define MT8365_PIN_23_KPROW1__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(23) | 6)
+#define MT8365_PIN_23_KPROW1__FUNC_DBG_MON_B0 (MTK_PIN_NO(23) | 7)
+
+#define MT8365_PIN_24_KPCOL0__FUNC_GPIO24 (MTK_PIN_NO(24) | 0)
+#define MT8365_PIN_24_KPCOL0__FUNC_KPCOL0 (MTK_PIN_NO(24) | 1)
+#define MT8365_PIN_24_KPCOL0__FUNC_DBG_MON_A23 (MTK_PIN_NO(24) | 7)
+
+#define MT8365_PIN_25_KPCOL1__FUNC_GPIO25 (MTK_PIN_NO(25) | 0)
+#define MT8365_PIN_25_KPCOL1__FUNC_KPCOL1 (MTK_PIN_NO(25) | 1)
+#define MT8365_PIN_25_KPCOL1__FUNC_USB_DRVVBUS (MTK_PIN_NO(25) | 2)
+#define MT8365_PIN_25_KPCOL1__FUNC_APU_JTAG_TRST (MTK_PIN_NO(25) | 3)
+#define MT8365_PIN_25_KPCOL1__FUNC_UDI_NTRST_XI (MTK_PIN_NO(25) | 4)
+#define MT8365_PIN_25_KPCOL1__FUNC_DFD_NTRST_XI (MTK_PIN_NO(25) | 5)
+#define MT8365_PIN_25_KPCOL1__FUNC_CONN_TEST_CK (MTK_PIN_NO(25) | 6)
+#define MT8365_PIN_25_KPCOL1__FUNC_DBG_MON_B1 (MTK_PIN_NO(25) | 7)
+
+#define MT8365_PIN_26_SPI_CS__FUNC_GPIO26 (MTK_PIN_NO(26) | 0)
+#define MT8365_PIN_26_SPI_CS__FUNC_SPI_CSB (MTK_PIN_NO(26) | 1)
+#define MT8365_PIN_26_SPI_CS__FUNC_APU_JTAG_TMS (MTK_PIN_NO(26) | 3)
+#define MT8365_PIN_26_SPI_CS__FUNC_UDI_TMS_XI (MTK_PIN_NO(26) | 4)
+#define MT8365_PIN_26_SPI_CS__FUNC_DFD_TMS_XI (MTK_PIN_NO(26) | 5)
+#define MT8365_PIN_26_SPI_CS__FUNC_CONN_TEST_CK (MTK_PIN_NO(26) | 6)
+#define MT8365_PIN_26_SPI_CS__FUNC_DBG_MON_A24 (MTK_PIN_NO(26) | 7)
+
+#define MT8365_PIN_27_SPI_CK__FUNC_GPIO27 (MTK_PIN_NO(27) | 0)
+#define MT8365_PIN_27_SPI_CK__FUNC_SPI_CLK (MTK_PIN_NO(27) | 1)
+#define MT8365_PIN_27_SPI_CK__FUNC_APU_JTAG_TCK (MTK_PIN_NO(27) | 3)
+#define MT8365_PIN_27_SPI_CK__FUNC_UDI_TCK_XI (MTK_PIN_NO(27) | 4)
+#define MT8365_PIN_27_SPI_CK__FUNC_DFD_TCK_XI (MTK_PIN_NO(27) | 5)
+#define MT8365_PIN_27_SPI_CK__FUNC_APU_TEST_CK (MTK_PIN_NO(27) | 6)
+#define MT8365_PIN_27_SPI_CK__FUNC_DBG_MON_A25 (MTK_PIN_NO(27) | 7)
+
+#define MT8365_PIN_28_SPI_MI__FUNC_GPIO28 (MTK_PIN_NO(28) | 0)
+#define MT8365_PIN_28_SPI_MI__FUNC_SPI_MI (MTK_PIN_NO(28) | 1)
+#define MT8365_PIN_28_SPI_MI__FUNC_SPI_MO (MTK_PIN_NO(28) | 2)
+#define MT8365_PIN_28_SPI_MI__FUNC_APU_JTAG_TDI (MTK_PIN_NO(28) | 3)
+#define MT8365_PIN_28_SPI_MI__FUNC_UDI_TDI_XI (MTK_PIN_NO(28) | 4)
+#define MT8365_PIN_28_SPI_MI__FUNC_DFD_TDI_XI (MTK_PIN_NO(28) | 5)
+#define MT8365_PIN_28_SPI_MI__FUNC_DSP_TEST_CK (MTK_PIN_NO(28) | 6)
+#define MT8365_PIN_28_SPI_MI__FUNC_DBG_MON_A26 (MTK_PIN_NO(28) | 7)
+
+#define MT8365_PIN_29_SPI_MO__FUNC_GPIO29 (MTK_PIN_NO(29) | 0)
+#define MT8365_PIN_29_SPI_MO__FUNC_SPI_MO (MTK_PIN_NO(29) | 1)
+#define MT8365_PIN_29_SPI_MO__FUNC_SPI_MI (MTK_PIN_NO(29) | 2)
+#define MT8365_PIN_29_SPI_MO__FUNC_APU_JTAG_TDO (MTK_PIN_NO(29) | 3)
+#define MT8365_PIN_29_SPI_MO__FUNC_UDI_TDO (MTK_PIN_NO(29) | 4)
+#define MT8365_PIN_29_SPI_MO__FUNC_DFD_TDO (MTK_PIN_NO(29) | 5)
+#define MT8365_PIN_29_SPI_MO__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(29) | 6)
+#define MT8365_PIN_29_SPI_MO__FUNC_DBG_MON_A27 (MTK_PIN_NO(29) | 7)
+
+#define MT8365_PIN_30_JTMS__FUNC_GPIO30 (MTK_PIN_NO(30) | 0)
+#define MT8365_PIN_30_JTMS__FUNC_JTMS (MTK_PIN_NO(30) | 1)
+#define MT8365_PIN_30_JTMS__FUNC_DFD_TMS_XI (MTK_PIN_NO(30) | 2)
+#define MT8365_PIN_30_JTMS__FUNC_UDI_TMS_XI (MTK_PIN_NO(30) | 3)
+#define MT8365_PIN_30_JTMS__FUNC_MCU_SPM_TMS (MTK_PIN_NO(30) | 4)
+#define MT8365_PIN_30_JTMS__FUNC_CONN_MCU_TMS (MTK_PIN_NO(30) | 5)
+#define MT8365_PIN_30_JTMS__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(30) | 6)
+
+#define MT8365_PIN_31_JTCK__FUNC_GPIO31 (MTK_PIN_NO(31) | 0)
+#define MT8365_PIN_31_JTCK__FUNC_JTCK (MTK_PIN_NO(31) | 1)
+#define MT8365_PIN_31_JTCK__FUNC_DFD_TCK_XI (MTK_PIN_NO(31) | 2)
+#define MT8365_PIN_31_JTCK__FUNC_UDI_TCK_XI (MTK_PIN_NO(31) | 3)
+#define MT8365_PIN_31_JTCK__FUNC_MCU_SPM_TCK (MTK_PIN_NO(31) | 4)
+#define MT8365_PIN_31_JTCK__FUNC_CONN_MCU_TCK (MTK_PIN_NO(31) | 5)
+#define MT8365_PIN_31_JTCK__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(31) | 6)
+
+#define MT8365_PIN_32_JTDI__FUNC_GPIO32 (MTK_PIN_NO(32) | 0)
+#define MT8365_PIN_32_JTDI__FUNC_JTDI (MTK_PIN_NO(32) | 1)
+#define MT8365_PIN_32_JTDI__FUNC_DFD_TDI_XI (MTK_PIN_NO(32) | 2)
+#define MT8365_PIN_32_JTDI__FUNC_UDI_TDI_XI (MTK_PIN_NO(32) | 3)
+#define MT8365_PIN_32_JTDI__FUNC_MCU_SPM_TDI (MTK_PIN_NO(32) | 4)
+#define MT8365_PIN_32_JTDI__FUNC_CONN_MCU_TDI (MTK_PIN_NO(32) | 5)
+
+#define MT8365_PIN_33_JTDO__FUNC_GPIO33 (MTK_PIN_NO(33) | 0)
+#define MT8365_PIN_33_JTDO__FUNC_JTDO (MTK_PIN_NO(33) | 1)
+#define MT8365_PIN_33_JTDO__FUNC_DFD_TDO (MTK_PIN_NO(33) | 2)
+#define MT8365_PIN_33_JTDO__FUNC_UDI_TDO (MTK_PIN_NO(33) | 3)
+#define MT8365_PIN_33_JTDO__FUNC_MCU_SPM_TDO (MTK_PIN_NO(33) | 4)
+#define MT8365_PIN_33_JTDO__FUNC_CONN_MCU_TDO (MTK_PIN_NO(33) | 5)
+
+#define MT8365_PIN_34_JTRST__FUNC_GPIO34 (MTK_PIN_NO(34) | 0)
+#define MT8365_PIN_34_JTRST__FUNC_JTRST (MTK_PIN_NO(34) | 1)
+#define MT8365_PIN_34_JTRST__FUNC_DFD_NTRST_XI (MTK_PIN_NO(34) | 2)
+#define MT8365_PIN_34_JTRST__FUNC_UDI_NTRST_XI (MTK_PIN_NO(34) | 3)
+#define MT8365_PIN_34_JTRST__FUNC_MCU_SPM_NTRST (MTK_PIN_NO(34) | 4)
+#define MT8365_PIN_34_JTRST__FUNC_CONN_MCU_TRST_B (MTK_PIN_NO(34) | 5)
+
+#define MT8365_PIN_35_URXD0__FUNC_GPIO35 (MTK_PIN_NO(35) | 0)
+#define MT8365_PIN_35_URXD0__FUNC_URXD0 (MTK_PIN_NO(35) | 1)
+#define MT8365_PIN_35_URXD0__FUNC_UTXD0 (MTK_PIN_NO(35) | 2)
+#define MT8365_PIN_35_URXD0__FUNC_DSP_URXD0 (MTK_PIN_NO(35) | 7)
+
+#define MT8365_PIN_36_UTXD0__FUNC_GPIO36 (MTK_PIN_NO(36) | 0)
+#define MT8365_PIN_36_UTXD0__FUNC_UTXD0 (MTK_PIN_NO(36) | 1)
+#define MT8365_PIN_36_UTXD0__FUNC_URXD0 (MTK_PIN_NO(36) | 2)
+#define MT8365_PIN_36_UTXD0__FUNC_DSP_UTXD0 (MTK_PIN_NO(36) | 7)
+
+#define MT8365_PIN_37_URXD1__FUNC_GPIO37 (MTK_PIN_NO(37) | 0)
+#define MT8365_PIN_37_URXD1__FUNC_URXD1 (MTK_PIN_NO(37) | 1)
+#define MT8365_PIN_37_URXD1__FUNC_UTXD1 (MTK_PIN_NO(37) | 2)
+#define MT8365_PIN_37_URXD1__FUNC_UCTS2 (MTK_PIN_NO(37) | 3)
+#define MT8365_PIN_37_URXD1__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(37) | 4)
+#define MT8365_PIN_37_URXD1__FUNC_CONN_UART0_RXD (MTK_PIN_NO(37) | 5)
+#define MT8365_PIN_37_URXD1__FUNC_I2S0_MCK (MTK_PIN_NO(37) | 6)
+#define MT8365_PIN_37_URXD1__FUNC_DSP_URXD0 (MTK_PIN_NO(37) | 7)
+
+#define MT8365_PIN_38_UTXD1__FUNC_GPIO38 (MTK_PIN_NO(38) | 0)
+#define MT8365_PIN_38_UTXD1__FUNC_UTXD1 (MTK_PIN_NO(38) | 1)
+#define MT8365_PIN_38_UTXD1__FUNC_URXD1 (MTK_PIN_NO(38) | 2)
+#define MT8365_PIN_38_UTXD1__FUNC_URTS2 (MTK_PIN_NO(38) | 3)
+#define MT8365_PIN_38_UTXD1__FUNC_ANT_SEL2 (MTK_PIN_NO(38) | 4)
+#define MT8365_PIN_38_UTXD1__FUNC_CONN_UART0_TXD (MTK_PIN_NO(38) | 5)
+#define MT8365_PIN_38_UTXD1__FUNC_I2S1_MCK (MTK_PIN_NO(38) | 6)
+#define MT8365_PIN_38_UTXD1__FUNC_DSP_UTXD0 (MTK_PIN_NO(38) | 7)
+
+#define MT8365_PIN_39_URXD2__FUNC_GPIO39 (MTK_PIN_NO(39) | 0)
+#define MT8365_PIN_39_URXD2__FUNC_URXD2 (MTK_PIN_NO(39) | 1)
+#define MT8365_PIN_39_URXD2__FUNC_UTXD2 (MTK_PIN_NO(39) | 2)
+#define MT8365_PIN_39_URXD2__FUNC_UCTS1 (MTK_PIN_NO(39) | 3)
+#define MT8365_PIN_39_URXD2__FUNC_IDDIG (MTK_PIN_NO(39) | 4)
+#define MT8365_PIN_39_URXD2__FUNC_CONN_MCU_DBGACK_N (MTK_PIN_NO(39) | 5)
+#define MT8365_PIN_39_URXD2__FUNC_I2S2_MCK (MTK_PIN_NO(39) | 6)
+#define MT8365_PIN_39_URXD2__FUNC_DSP_URXD0 (MTK_PIN_NO(39) | 7)
+
+#define MT8365_PIN_40_UTXD2__FUNC_GPIO40 (MTK_PIN_NO(40) | 0)
+#define MT8365_PIN_40_UTXD2__FUNC_UTXD2 (MTK_PIN_NO(40) | 1)
+#define MT8365_PIN_40_UTXD2__FUNC_URXD2 (MTK_PIN_NO(40) | 2)
+#define MT8365_PIN_40_UTXD2__FUNC_URTS1 (MTK_PIN_NO(40) | 3)
+#define MT8365_PIN_40_UTXD2__FUNC_USB_DRVVBUS (MTK_PIN_NO(40) | 4)
+#define MT8365_PIN_40_UTXD2__FUNC_CONN_MCU_DBGI_N (MTK_PIN_NO(40) | 5)
+#define MT8365_PIN_40_UTXD2__FUNC_I2S3_MCK (MTK_PIN_NO(40) | 6)
+#define MT8365_PIN_40_UTXD2__FUNC_DSP_UTXD0 (MTK_PIN_NO(40) | 7)
+
+#define MT8365_PIN_41_PWRAP_SPI0_MI__FUNC_GPIO41 (MTK_PIN_NO(41) | 0)
+#define MT8365_PIN_41_PWRAP_SPI0_MI__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(41) | 1)
+#define MT8365_PIN_41_PWRAP_SPI0_MI__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(41) | 2)
+
+#define MT8365_PIN_42_PWRAP_SPI0_MO__FUNC_GPIO42 (MTK_PIN_NO(42) | 0)
+#define MT8365_PIN_42_PWRAP_SPI0_MO__FUNC_PWRAP_SPI0_MO (MTK_PIN_NO(42) | 1)
+#define MT8365_PIN_42_PWRAP_SPI0_MO__FUNC_PWRAP_SPI0_MI (MTK_PIN_NO(42) | 2)
+
+#define MT8365_PIN_43_PWRAP_SPI0_CK__FUNC_GPIO43 (MTK_PIN_NO(43) | 0)
+#define MT8365_PIN_43_PWRAP_SPI0_CK__FUNC_PWRAP_SPI0_CK (MTK_PIN_NO(43) | 1)
+
+#define MT8365_PIN_44_PWRAP_SPI0_CSN__FUNC_GPIO44 (MTK_PIN_NO(44) | 0)
+#define MT8365_PIN_44_PWRAP_SPI0_CSN__FUNC_PWRAP_SPI0_CSN (MTK_PIN_NO(44) | 1)
+
+#define MT8365_PIN_45_RTC32K_CK__FUNC_GPIO45 (MTK_PIN_NO(45) | 0)
+#define MT8365_PIN_45_RTC32K_CK__FUNC_RTC32K_CK (MTK_PIN_NO(45) | 1)
+
+#define MT8365_PIN_46_WATCHDOG__FUNC_GPIO46 (MTK_PIN_NO(46) | 0)
+#define MT8365_PIN_46_WATCHDOG__FUNC_WATCHDOG (MTK_PIN_NO(46) | 1)
+
+#define MT8365_PIN_47_SRCLKENA0__FUNC_GPIO47 (MTK_PIN_NO(47) | 0)
+#define MT8365_PIN_47_SRCLKENA0__FUNC_SRCLKENA0 (MTK_PIN_NO(47) | 1)
+#define MT8365_PIN_47_SRCLKENA0__FUNC_SRCLKENA1 (MTK_PIN_NO(47) | 2)
+
+#define MT8365_PIN_48_SRCLKENA1__FUNC_GPIO48 (MTK_PIN_NO(48) | 0)
+#define MT8365_PIN_48_SRCLKENA1__FUNC_SRCLKENA1 (MTK_PIN_NO(48) | 1)
+
+#define MT8365_PIN_49_AUD_CLK_MOSI__FUNC_GPIO49 (MTK_PIN_NO(49) | 0)
+#define MT8365_PIN_49_AUD_CLK_MOSI__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(49) | 1)
+#define MT8365_PIN_49_AUD_CLK_MOSI__FUNC_AUD_CLK_MISO (MTK_PIN_NO(49) | 2)
+#define MT8365_PIN_49_AUD_CLK_MOSI__FUNC_I2S1_MCK (MTK_PIN_NO(49) | 3)
+
+#define MT8365_PIN_50_AUD_SYNC_MOSI__FUNC_GPIO50 (MTK_PIN_NO(50) | 0)
+#define MT8365_PIN_50_AUD_SYNC_MOSI__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(50) | 1)
+#define MT8365_PIN_50_AUD_SYNC_MOSI__FUNC_AUD_SYNC_MISO (MTK_PIN_NO(50) | 2)
+#define MT8365_PIN_50_AUD_SYNC_MOSI__FUNC_I2S1_BCK (MTK_PIN_NO(50) | 3)
+
+#define MT8365_PIN_51_AUD_DAT_MOSI0__FUNC_GPIO51 (MTK_PIN_NO(51) | 0)
+#define MT8365_PIN_51_AUD_DAT_MOSI0__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(51) | 1)
+#define MT8365_PIN_51_AUD_DAT_MOSI0__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(51) | 2)
+#define MT8365_PIN_51_AUD_DAT_MOSI0__FUNC_I2S1_LRCK (MTK_PIN_NO(51) | 3)
+
+#define MT8365_PIN_52_AUD_DAT_MOSI1__FUNC_GPIO52 (MTK_PIN_NO(52) | 0)
+#define MT8365_PIN_52_AUD_DAT_MOSI1__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(52) | 1)
+#define MT8365_PIN_52_AUD_DAT_MOSI1__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(52) | 2)
+#define MT8365_PIN_52_AUD_DAT_MOSI1__FUNC_I2S1_DO (MTK_PIN_NO(52) | 3)
+
+#define MT8365_PIN_53_AUD_CLK_MISO__FUNC_GPIO53 (MTK_PIN_NO(53) | 0)
+#define MT8365_PIN_53_AUD_CLK_MISO__FUNC_AUD_CLK_MISO (MTK_PIN_NO(53) | 1)
+#define MT8365_PIN_53_AUD_CLK_MISO__FUNC_AUD_CLK_MOSI (MTK_PIN_NO(53) | 2)
+#define MT8365_PIN_53_AUD_CLK_MISO__FUNC_I2S2_MCK (MTK_PIN_NO(53) | 3)
+
+#define MT8365_PIN_54_AUD_SYNC_MISO__FUNC_GPIO54 (MTK_PIN_NO(54) | 0)
+#define MT8365_PIN_54_AUD_SYNC_MISO__FUNC_AUD_SYNC_MISO (MTK_PIN_NO(54) | 1)
+#define MT8365_PIN_54_AUD_SYNC_MISO__FUNC_AUD_SYNC_MOSI (MTK_PIN_NO(54) | 2)
+#define MT8365_PIN_54_AUD_SYNC_MISO__FUNC_I2S2_BCK (MTK_PIN_NO(54) | 3)
+
+#define MT8365_PIN_55_AUD_DAT_MISO0__FUNC_GPIO55 (MTK_PIN_NO(55) | 0)
+#define MT8365_PIN_55_AUD_DAT_MISO0__FUNC_AUD_DAT_MISO0 (MTK_PIN_NO(55) | 1)
+#define MT8365_PIN_55_AUD_DAT_MISO0__FUNC_AUD_DAT_MOSI0 (MTK_PIN_NO(55) | 2)
+#define MT8365_PIN_55_AUD_DAT_MISO0__FUNC_I2S2_LRCK (MTK_PIN_NO(55) | 3)
+
+#define MT8365_PIN_56_AUD_DAT_MISO1__FUNC_GPIO56 (MTK_PIN_NO(56) | 0)
+#define MT8365_PIN_56_AUD_DAT_MISO1__FUNC_AUD_DAT_MISO1 (MTK_PIN_NO(56) | 1)
+#define MT8365_PIN_56_AUD_DAT_MISO1__FUNC_AUD_DAT_MOSI1 (MTK_PIN_NO(56) | 2)
+#define MT8365_PIN_56_AUD_DAT_MISO1__FUNC_I2S2_DI (MTK_PIN_NO(56) | 3)
+
+#define MT8365_PIN_57_SDA0__FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
+#define MT8365_PIN_57_SDA0__FUNC_SDA0_0 (MTK_PIN_NO(57) | 1)
+
+#define MT8365_PIN_58_SCL0__FUNC_GPIO58 (MTK_PIN_NO(58) | 0)
+#define MT8365_PIN_58_SCL0__FUNC_SCL0_0 (MTK_PIN_NO(58) | 1)
+
+#define MT8365_PIN_59_SDA1__FUNC_GPIO59 (MTK_PIN_NO(59) | 0)
+#define MT8365_PIN_59_SDA1__FUNC_SDA1_0 (MTK_PIN_NO(59) | 1)
+#define MT8365_PIN_59_SDA1__FUNC_USB_SDA (MTK_PIN_NO(59) | 6)
+#define MT8365_PIN_59_SDA1__FUNC_DBG_SDA (MTK_PIN_NO(59) | 7)
+
+#define MT8365_PIN_60_SCL1__FUNC_GPIO60 (MTK_PIN_NO(60) | 0)
+#define MT8365_PIN_60_SCL1__FUNC_SCL1_0 (MTK_PIN_NO(60) | 1)
+#define MT8365_PIN_60_SCL1__FUNC_USB_SCL (MTK_PIN_NO(60) | 6)
+#define MT8365_PIN_60_SCL1__FUNC_DBG_SCL (MTK_PIN_NO(60) | 7)
+
+#define MT8365_PIN_61_SDA2__FUNC_GPIO61 (MTK_PIN_NO(61) | 0)
+#define MT8365_PIN_61_SDA2__FUNC_SDA2_0 (MTK_PIN_NO(61) | 1)
+
+#define MT8365_PIN_62_SCL2__FUNC_GPIO62 (MTK_PIN_NO(62) | 0)
+#define MT8365_PIN_62_SCL2__FUNC_SCL2_0 (MTK_PIN_NO(62) | 1)
+
+#define MT8365_PIN_63_SDA3__FUNC_GPIO63 (MTK_PIN_NO(63) | 0)
+#define MT8365_PIN_63_SDA3__FUNC_SDA3_0 (MTK_PIN_NO(63) | 1)
+
+#define MT8365_PIN_64_SCL3__FUNC_GPIO64 (MTK_PIN_NO(64) | 0)
+#define MT8365_PIN_64_SCL3__FUNC_SCL3_0 (MTK_PIN_NO(64) | 1)
+
+#define MT8365_PIN_65_CMMCLK0__FUNC_GPIO65 (MTK_PIN_NO(65) | 0)
+#define MT8365_PIN_65_CMMCLK0__FUNC_CMMCLK0 (MTK_PIN_NO(65) | 1)
+#define MT8365_PIN_65_CMMCLK0__FUNC_CMMCLK1 (MTK_PIN_NO(65) | 2)
+#define MT8365_PIN_65_CMMCLK0__FUNC_DBG_MON_A28 (MTK_PIN_NO(65) | 7)
+
+#define MT8365_PIN_66_CMMCLK1__FUNC_GPIO66 (MTK_PIN_NO(66) | 0)
+#define MT8365_PIN_66_CMMCLK1__FUNC_CMMCLK1 (MTK_PIN_NO(66) | 1)
+#define MT8365_PIN_66_CMMCLK1__FUNC_CMMCLK0 (MTK_PIN_NO(66) | 2)
+#define MT8365_PIN_66_CMMCLK1__FUNC_DBG_MON_B2 (MTK_PIN_NO(66) | 7)
+
+#define MT8365_PIN_67_CMPCLK__FUNC_GPIO67 (MTK_PIN_NO(67) | 0)
+#define MT8365_PIN_67_CMPCLK__FUNC_CMPCLK (MTK_PIN_NO(67) | 1)
+#define MT8365_PIN_67_CMPCLK__FUNC_ANT_SEL0 (MTK_PIN_NO(67) | 2)
+#define MT8365_PIN_67_CMPCLK__FUNC_TDM_RX_BCK (MTK_PIN_NO(67) | 4)
+#define MT8365_PIN_67_CMPCLK__FUNC_I2S0_BCK (MTK_PIN_NO(67) | 5)
+#define MT8365_PIN_67_CMPCLK__FUNC_DBG_MON_B3 (MTK_PIN_NO(67) | 7)
+
+#define MT8365_PIN_68_CMDAT0__FUNC_GPIO68 (MTK_PIN_NO(68) | 0)
+#define MT8365_PIN_68_CMDAT0__FUNC_CMDAT0 (MTK_PIN_NO(68) | 1)
+#define MT8365_PIN_68_CMDAT0__FUNC_ANT_SEL1 (MTK_PIN_NO(68) | 2)
+#define MT8365_PIN_68_CMDAT0__FUNC_TDM_RX_LRCK (MTK_PIN_NO(68) | 4)
+#define MT8365_PIN_68_CMDAT0__FUNC_I2S0_LRCK (MTK_PIN_NO(68) | 5)
+#define MT8365_PIN_68_CMDAT0__FUNC_DBG_MON_B4 (MTK_PIN_NO(68) | 7)
+
+#define MT8365_PIN_69_CMDAT1__FUNC_GPIO69 (MTK_PIN_NO(69) | 0)
+#define MT8365_PIN_69_CMDAT1__FUNC_CMDAT1 (MTK_PIN_NO(69) | 1)
+#define MT8365_PIN_69_CMDAT1__FUNC_ANT_SEL2 (MTK_PIN_NO(69) | 2)
+#define MT8365_PIN_69_CMDAT1__FUNC_DVFSRC_EXT_REQ (MTK_PIN_NO(69) | 3)
+#define MT8365_PIN_69_CMDAT1__FUNC_TDM_RX_MCK (MTK_PIN_NO(69) | 4)
+#define MT8365_PIN_69_CMDAT1__FUNC_I2S0_MCK (MTK_PIN_NO(69) | 5)
+#define MT8365_PIN_69_CMDAT1__FUNC_DBG_MON_B5 (MTK_PIN_NO(69) | 7)
+
+#define MT8365_PIN_70_CMDAT2__FUNC_GPIO70 (MTK_PIN_NO(70) | 0)
+#define MT8365_PIN_70_CMDAT2__FUNC_CMDAT2 (MTK_PIN_NO(70) | 1)
+#define MT8365_PIN_70_CMDAT2__FUNC_ANT_SEL3 (MTK_PIN_NO(70) | 2)
+#define MT8365_PIN_70_CMDAT2__FUNC_TDM_RX_DI (MTK_PIN_NO(70) | 4)
+#define MT8365_PIN_70_CMDAT2__FUNC_I2S0_DI (MTK_PIN_NO(70) | 5)
+#define MT8365_PIN_70_CMDAT2__FUNC_DBG_MON_B6 (MTK_PIN_NO(70) | 7)
+
+#define MT8365_PIN_71_CMDAT3__FUNC_GPIO71 (MTK_PIN_NO(71) | 0)
+#define MT8365_PIN_71_CMDAT3__FUNC_CMDAT3 (MTK_PIN_NO(71) | 1)
+#define MT8365_PIN_71_CMDAT3__FUNC_ANT_SEL4 (MTK_PIN_NO(71) | 2)
+#define MT8365_PIN_71_CMDAT3__FUNC_DBG_MON_B7 (MTK_PIN_NO(71) | 7)
+
+#define MT8365_PIN_72_CMDAT4__FUNC_GPIO72 (MTK_PIN_NO(72) | 0)
+#define MT8365_PIN_72_CMDAT4__FUNC_CMDAT4 (MTK_PIN_NO(72) | 1)
+#define MT8365_PIN_72_CMDAT4__FUNC_ANT_SEL5 (MTK_PIN_NO(72) | 2)
+#define MT8365_PIN_72_CMDAT4__FUNC_I2S3_BCK (MTK_PIN_NO(72) | 5)
+#define MT8365_PIN_72_CMDAT4__FUNC_DBG_MON_B8 (MTK_PIN_NO(72) | 7)
+
+#define MT8365_PIN_73_CMDAT5__FUNC_GPIO73 (MTK_PIN_NO(73) | 0)
+#define MT8365_PIN_73_CMDAT5__FUNC_CMDAT5 (MTK_PIN_NO(73) | 1)
+#define MT8365_PIN_73_CMDAT5__FUNC_ANT_SEL6 (MTK_PIN_NO(73) | 2)
+#define MT8365_PIN_73_CMDAT5__FUNC_I2S3_LRCK (MTK_PIN_NO(73) | 5)
+#define MT8365_PIN_73_CMDAT5__FUNC_DBG_MON_B9 (MTK_PIN_NO(73) | 7)
+
+#define MT8365_PIN_74_CMDAT6__FUNC_GPIO74 (MTK_PIN_NO(74) | 0)
+#define MT8365_PIN_74_CMDAT6__FUNC_CMDAT6 (MTK_PIN_NO(74) | 1)
+#define MT8365_PIN_74_CMDAT6__FUNC_ANT_SEL7 (MTK_PIN_NO(74) | 2)
+#define MT8365_PIN_74_CMDAT6__FUNC_I2S3_MCK (MTK_PIN_NO(74) | 5)
+#define MT8365_PIN_74_CMDAT6__FUNC_DBG_MON_B10 (MTK_PIN_NO(74) | 7)
+
+#define MT8365_PIN_75_CMDAT7__FUNC_GPIO75 (MTK_PIN_NO(75) | 0)
+#define MT8365_PIN_75_CMDAT7__FUNC_CMDAT7 (MTK_PIN_NO(75) | 1)
+#define MT8365_PIN_75_CMDAT7__FUNC_I2S3_DO (MTK_PIN_NO(75) | 5)
+#define MT8365_PIN_75_CMDAT7__FUNC_DBG_MON_B11 (MTK_PIN_NO(75) | 7)
+
+#define MT8365_PIN_76_CMDAT8__FUNC_GPIO76 (MTK_PIN_NO(76) | 0)
+#define MT8365_PIN_76_CMDAT8__FUNC_CMDAT8 (MTK_PIN_NO(76) | 1)
+#define MT8365_PIN_76_CMDAT8__FUNC_PCM_CLK (MTK_PIN_NO(76) | 5)
+#define MT8365_PIN_76_CMDAT8__FUNC_DBG_MON_A29 (MTK_PIN_NO(76) | 7)
+
+#define MT8365_PIN_77_CMDAT9__FUNC_GPIO77 (MTK_PIN_NO(77) | 0)
+#define MT8365_PIN_77_CMDAT9__FUNC_CMDAT9 (MTK_PIN_NO(77) | 1)
+#define MT8365_PIN_77_CMDAT9__FUNC_PCM_SYNC (MTK_PIN_NO(77) | 5)
+#define MT8365_PIN_77_CMDAT9__FUNC_DBG_MON_A30 (MTK_PIN_NO(77) | 7)
+
+#define MT8365_PIN_78_CMHSYNC__FUNC_GPIO78 (MTK_PIN_NO(78) | 0)
+#define MT8365_PIN_78_CMHSYNC__FUNC_CMHSYNC (MTK_PIN_NO(78) | 1)
+#define MT8365_PIN_78_CMHSYNC__FUNC_PCM_RX (MTK_PIN_NO(78) | 5)
+#define MT8365_PIN_78_CMHSYNC__FUNC_DBG_MON_A31 (MTK_PIN_NO(78) | 7)
+
+#define MT8365_PIN_79_CMVSYNC__FUNC_GPIO79 (MTK_PIN_NO(79) | 0)
+#define MT8365_PIN_79_CMVSYNC__FUNC_CMVSYNC (MTK_PIN_NO(79) | 1)
+#define MT8365_PIN_79_CMVSYNC__FUNC_PCM_TX (MTK_PIN_NO(79) | 5)
+#define MT8365_PIN_79_CMVSYNC__FUNC_DBG_MON_A32 (MTK_PIN_NO(79) | 7)
+
+#define MT8365_PIN_80_MSDC2_CMD__FUNC_GPIO80 (MTK_PIN_NO(80) | 0)
+#define MT8365_PIN_80_MSDC2_CMD__FUNC_MSDC2_CMD (MTK_PIN_NO(80) | 1)
+#define MT8365_PIN_80_MSDC2_CMD__FUNC_TDM_TX_LRCK (MTK_PIN_NO(80) | 2)
+#define MT8365_PIN_80_MSDC2_CMD__FUNC_UTXD1 (MTK_PIN_NO(80) | 3)
+#define MT8365_PIN_80_MSDC2_CMD__FUNC_DPI_D19 (MTK_PIN_NO(80) | 4)
+#define MT8365_PIN_80_MSDC2_CMD__FUNC_UDI_TMS_XI (MTK_PIN_NO(80) | 5)
+#define MT8365_PIN_80_MSDC2_CMD__FUNC_ADSP_JTAG_TMS (MTK_PIN_NO(80) | 6)
+
+#define MT8365_PIN_81_MSDC2_CLK__FUNC_GPIO81 (MTK_PIN_NO(81) | 0)
+#define MT8365_PIN_81_MSDC2_CLK__FUNC_MSDC2_CLK (MTK_PIN_NO(81) | 1)
+#define MT8365_PIN_81_MSDC2_CLK__FUNC_TDM_TX_BCK (MTK_PIN_NO(81) | 2)
+#define MT8365_PIN_81_MSDC2_CLK__FUNC_URXD1 (MTK_PIN_NO(81) | 3)
+#define MT8365_PIN_81_MSDC2_CLK__FUNC_DPI_D20 (MTK_PIN_NO(81) | 4)
+#define MT8365_PIN_81_MSDC2_CLK__FUNC_UDI_TCK_XI (MTK_PIN_NO(81) | 5)
+#define MT8365_PIN_81_MSDC2_CLK__FUNC_ADSP_JTAG_TCK (MTK_PIN_NO(81) | 6)
+
+#define MT8365_PIN_82_MSDC2_DAT0__FUNC_GPIO82 (MTK_PIN_NO(82) | 0)
+#define MT8365_PIN_82_MSDC2_DAT0__FUNC_MSDC2_DAT0 (MTK_PIN_NO(82) | 1)
+#define MT8365_PIN_82_MSDC2_DAT0__FUNC_TDM_TX_DATA0 (MTK_PIN_NO(82) | 2)
+#define MT8365_PIN_82_MSDC2_DAT0__FUNC_UTXD2 (MTK_PIN_NO(82) | 3)
+#define MT8365_PIN_82_MSDC2_DAT0__FUNC_DPI_D21 (MTK_PIN_NO(82) | 4)
+#define MT8365_PIN_82_MSDC2_DAT0__FUNC_UDI_TDI_XI (MTK_PIN_NO(82) | 5)
+#define MT8365_PIN_82_MSDC2_DAT0__FUNC_ADSP_JTAG_TDI (MTK_PIN_NO(82) | 6)
+
+#define MT8365_PIN_83_MSDC2_DAT1__FUNC_GPIO83 (MTK_PIN_NO(83) | 0)
+#define MT8365_PIN_83_MSDC2_DAT1__FUNC_MSDC2_DAT1 (MTK_PIN_NO(83) | 1)
+#define MT8365_PIN_83_MSDC2_DAT1__FUNC_TDM_TX_DATA1 (MTK_PIN_NO(83) | 2)
+#define MT8365_PIN_83_MSDC2_DAT1__FUNC_URXD2 (MTK_PIN_NO(83) | 3)
+#define MT8365_PIN_83_MSDC2_DAT1__FUNC_DPI_D22 (MTK_PIN_NO(83) | 4)
+#define MT8365_PIN_83_MSDC2_DAT1__FUNC_UDI_TDO (MTK_PIN_NO(83) | 5)
+#define MT8365_PIN_83_MSDC2_DAT1__FUNC_ADSP_JTAG_TDO (MTK_PIN_NO(83) | 6)
+
+#define MT8365_PIN_84_MSDC2_DAT2__FUNC_GPIO84 (MTK_PIN_NO(84) | 0)
+#define MT8365_PIN_84_MSDC2_DAT2__FUNC_MSDC2_DAT2 (MTK_PIN_NO(84) | 1)
+#define MT8365_PIN_84_MSDC2_DAT2__FUNC_TDM_TX_DATA2 (MTK_PIN_NO(84) | 2)
+#define MT8365_PIN_84_MSDC2_DAT2__FUNC_PWM_A (MTK_PIN_NO(84) | 3)
+#define MT8365_PIN_84_MSDC2_DAT2__FUNC_DPI_D23 (MTK_PIN_NO(84) | 4)
+#define MT8365_PIN_84_MSDC2_DAT2__FUNC_UDI_NTRST_XI (MTK_PIN_NO(84) | 5)
+#define MT8365_PIN_84_MSDC2_DAT2__FUNC_ADSP_JTAG_TRST (MTK_PIN_NO(84) | 6)
+
+#define MT8365_PIN_85_MSDC2_DAT3__FUNC_GPIO85 (MTK_PIN_NO(85) | 0)
+#define MT8365_PIN_85_MSDC2_DAT3__FUNC_MSDC2_DAT3 (MTK_PIN_NO(85) | 1)
+#define MT8365_PIN_85_MSDC2_DAT3__FUNC_TDM_TX_DATA3 (MTK_PIN_NO(85) | 2)
+#define MT8365_PIN_85_MSDC2_DAT3__FUNC_PWM_B (MTK_PIN_NO(85) | 3)
+#define MT8365_PIN_85_MSDC2_DAT3__FUNC_EXT_FRAME_SYNC (MTK_PIN_NO(85) | 5)
+
+#define MT8365_PIN_86_MSDC2_DSL__FUNC_GPIO86 (MTK_PIN_NO(86) | 0)
+#define MT8365_PIN_86_MSDC2_DSL__FUNC_MSDC2_DSL (MTK_PIN_NO(86) | 1)
+#define MT8365_PIN_86_MSDC2_DSL__FUNC_TDM_TX_MCK (MTK_PIN_NO(86) | 2)
+#define MT8365_PIN_86_MSDC2_DSL__FUNC_PWM_C (MTK_PIN_NO(86) | 3)
+
+#define MT8365_PIN_87_MSDC1_CMD__FUNC_GPIO87 (MTK_PIN_NO(87) | 0)
+#define MT8365_PIN_87_MSDC1_CMD__FUNC_MSDC1_CMD (MTK_PIN_NO(87) | 1)
+#define MT8365_PIN_87_MSDC1_CMD__FUNC_CONN_MCU_AICE_TMSC (MTK_PIN_NO(87) | 2)
+#define MT8365_PIN_87_MSDC1_CMD__FUNC_DFD_TMS_XI (MTK_PIN_NO(87) | 3)
+#define MT8365_PIN_87_MSDC1_CMD__FUNC_APU_JTAG_TMS (MTK_PIN_NO(87) | 4)
+#define MT8365_PIN_87_MSDC1_CMD__FUNC_MCU_SPM_TMS (MTK_PIN_NO(87) | 5)
+#define MT8365_PIN_87_MSDC1_CMD__FUNC_CONN_DSP_JMS (MTK_PIN_NO(87) | 6)
+#define MT8365_PIN_87_MSDC1_CMD__FUNC_ADSP_JTAG_TMS (MTK_PIN_NO(87) | 7)
+
+#define MT8365_PIN_88_MSDC1_CLK__FUNC_GPIO88 (MTK_PIN_NO(88) | 0)
+#define MT8365_PIN_88_MSDC1_CLK__FUNC_MSDC1_CLK (MTK_PIN_NO(88) | 1)
+#define MT8365_PIN_88_MSDC1_CLK__FUNC_CONN_MCU_AICE_TCKC (MTK_PIN_NO(88) | 2)
+#define MT8365_PIN_88_MSDC1_CLK__FUNC_DFD_TCK_XI (MTK_PIN_NO(88) | 3)
+#define MT8365_PIN_88_MSDC1_CLK__FUNC_APU_JTAG_TCK (MTK_PIN_NO(88) | 4)
+#define MT8365_PIN_88_MSDC1_CLK__FUNC_MCU_SPM_TCK (MTK_PIN_NO(88) | 5)
+#define MT8365_PIN_88_MSDC1_CLK__FUNC_CONN_DSP_JCK (MTK_PIN_NO(88) | 6)
+#define MT8365_PIN_88_MSDC1_CLK__FUNC_ADSP_JTAG_TCK (MTK_PIN_NO(88) | 7)
+
+#define MT8365_PIN_89_MSDC1_DAT0__FUNC_GPIO89 (MTK_PIN_NO(89) | 0)
+#define MT8365_PIN_89_MSDC1_DAT0__FUNC_MSDC1_DAT0 (MTK_PIN_NO(89) | 1)
+#define MT8365_PIN_89_MSDC1_DAT0__FUNC_PWM_C (MTK_PIN_NO(89) | 2)
+#define MT8365_PIN_89_MSDC1_DAT0__FUNC_DFD_TDI_XI (MTK_PIN_NO(89) | 3)
+#define MT8365_PIN_89_MSDC1_DAT0__FUNC_APU_JTAG_TDI (MTK_PIN_NO(89) | 4)
+#define MT8365_PIN_89_MSDC1_DAT0__FUNC_MCU_SPM_TDI (MTK_PIN_NO(89) | 5)
+#define MT8365_PIN_89_MSDC1_DAT0__FUNC_CONN_DSP_JDI (MTK_PIN_NO(89) | 6)
+#define MT8365_PIN_89_MSDC1_DAT0__FUNC_ADSP_JTAG_TDI (MTK_PIN_NO(89) | 7)
+
+#define MT8365_PIN_90_MSDC1_DAT1__FUNC_GPIO90 (MTK_PIN_NO(90) | 0)
+#define MT8365_PIN_90_MSDC1_DAT1__FUNC_MSDC1_DAT1 (MTK_PIN_NO(90) | 1)
+#define MT8365_PIN_90_MSDC1_DAT1__FUNC_SPDIF_IN (MTK_PIN_NO(90) | 2)
+#define MT8365_PIN_90_MSDC1_DAT1__FUNC_DFD_TDO (MTK_PIN_NO(90) | 3)
+#define MT8365_PIN_90_MSDC1_DAT1__FUNC_APU_JTAG_TDO (MTK_PIN_NO(90) | 4)
+#define MT8365_PIN_90_MSDC1_DAT1__FUNC_MCU_SPM_TDO (MTK_PIN_NO(90) | 5)
+#define MT8365_PIN_90_MSDC1_DAT1__FUNC_CONN_DSP_JDO (MTK_PIN_NO(90) | 6)
+#define MT8365_PIN_90_MSDC1_DAT1__FUNC_ADSP_JTAG_TDO (MTK_PIN_NO(90) | 7)
+
+#define MT8365_PIN_91_MSDC1_DAT2__FUNC_GPIO91 (MTK_PIN_NO(91) | 0)
+#define MT8365_PIN_91_MSDC1_DAT2__FUNC_MSDC1_DAT2 (MTK_PIN_NO(91) | 1)
+#define MT8365_PIN_91_MSDC1_DAT2__FUNC_SPDIF_OUT (MTK_PIN_NO(91) | 2)
+#define MT8365_PIN_91_MSDC1_DAT2__FUNC_DFD_NTRST_XI (MTK_PIN_NO(91) | 3)
+#define MT8365_PIN_91_MSDC1_DAT2__FUNC_APU_JTAG_TRST (MTK_PIN_NO(91) | 4)
+#define MT8365_PIN_91_MSDC1_DAT2__FUNC_MCU_SPM_NTRST (MTK_PIN_NO(91) | 5)
+#define MT8365_PIN_91_MSDC1_DAT2__FUNC_CONN_DSP_JINTP (MTK_PIN_NO(91) | 6)
+#define MT8365_PIN_91_MSDC1_DAT2__FUNC_ADSP_JTAG_TRST (MTK_PIN_NO(91) | 7)
+
+#define MT8365_PIN_92_MSDC1_DAT3__FUNC_GPIO92 (MTK_PIN_NO(92) | 0)
+#define MT8365_PIN_92_MSDC1_DAT3__FUNC_MSDC1_DAT3 (MTK_PIN_NO(92) | 1)
+#define MT8365_PIN_92_MSDC1_DAT3__FUNC_IRRX (MTK_PIN_NO(92) | 2)
+#define MT8365_PIN_92_MSDC1_DAT3__FUNC_PWM_A (MTK_PIN_NO(92) | 3)
+
+#define MT8365_PIN_93_MSDC0_DAT7__FUNC_GPIO93 (MTK_PIN_NO(93) | 0)
+#define MT8365_PIN_93_MSDC0_DAT7__FUNC_MSDC0_DAT7 (MTK_PIN_NO(93) | 1)
+#define MT8365_PIN_93_MSDC0_DAT7__FUNC_NLD7 (MTK_PIN_NO(93) | 2)
+
+#define MT8365_PIN_94_MSDC0_DAT6__FUNC_GPIO94 (MTK_PIN_NO(94) | 0)
+#define MT8365_PIN_94_MSDC0_DAT6__FUNC_MSDC0_DAT6 (MTK_PIN_NO(94) | 1)
+#define MT8365_PIN_94_MSDC0_DAT6__FUNC_NLD6 (MTK_PIN_NO(94) | 2)
+
+#define MT8365_PIN_95_MSDC0_DAT5__FUNC_GPIO95 (MTK_PIN_NO(95) | 0)
+#define MT8365_PIN_95_MSDC0_DAT5__FUNC_MSDC0_DAT5 (MTK_PIN_NO(95) | 1)
+#define MT8365_PIN_95_MSDC0_DAT5__FUNC_NLD4 (MTK_PIN_NO(95) | 2)
+
+#define MT8365_PIN_96_MSDC0_DAT4__FUNC_GPIO96 (MTK_PIN_NO(96) | 0)
+#define MT8365_PIN_96_MSDC0_DAT4__FUNC_MSDC0_DAT4 (MTK_PIN_NO(96) | 1)
+#define MT8365_PIN_96_MSDC0_DAT4__FUNC_NLD3 (MTK_PIN_NO(96) | 2)
+
+#define MT8365_PIN_97_MSDC0_RSTB__FUNC_GPIO97 (MTK_PIN_NO(97) | 0)
+#define MT8365_PIN_97_MSDC0_RSTB__FUNC_MSDC0_RSTB (MTK_PIN_NO(97) | 1)
+#define MT8365_PIN_97_MSDC0_RSTB__FUNC_NLD0 (MTK_PIN_NO(97) | 2)
+
+#define MT8365_PIN_98_MSDC0_CMD__FUNC_GPIO98 (MTK_PIN_NO(98) | 0)
+#define MT8365_PIN_98_MSDC0_CMD__FUNC_MSDC0_CMD (MTK_PIN_NO(98) | 1)
+#define MT8365_PIN_98_MSDC0_CMD__FUNC_NALE (MTK_PIN_NO(98) | 2)
+
+#define MT8365_PIN_99_MSDC0_CLK__FUNC_GPIO99 (MTK_PIN_NO(99) | 0)
+#define MT8365_PIN_99_MSDC0_CLK__FUNC_MSDC0_CLK (MTK_PIN_NO(99) | 1)
+#define MT8365_PIN_99_MSDC0_CLK__FUNC_NWEB (MTK_PIN_NO(99) | 2)
+
+#define MT8365_PIN_100_MSDC0_DAT3__FUNC_GPIO100 (MTK_PIN_NO(100) | 0)
+#define MT8365_PIN_100_MSDC0_DAT3__FUNC_MSDC0_DAT3 (MTK_PIN_NO(100) | 1)
+#define MT8365_PIN_100_MSDC0_DAT3__FUNC_NLD1 (MTK_PIN_NO(100) | 2)
+
+#define MT8365_PIN_101_MSDC0_DAT2__FUNC_GPIO101 (MTK_PIN_NO(101) | 0)
+#define MT8365_PIN_101_MSDC0_DAT2__FUNC_MSDC0_DAT2 (MTK_PIN_NO(101) | 1)
+#define MT8365_PIN_101_MSDC0_DAT2__FUNC_NLD5 (MTK_PIN_NO(101) | 2)
+
+#define MT8365_PIN_102_MSDC0_DAT1__FUNC_GPIO102 (MTK_PIN_NO(102) | 0)
+#define MT8365_PIN_102_MSDC0_DAT1__FUNC_MSDC0_DAT1 (MTK_PIN_NO(102) | 1)
+#define MT8365_PIN_102_MSDC0_DAT1__FUNC_NDQS (MTK_PIN_NO(102) | 2)
+
+#define MT8365_PIN_103_MSDC0_DAT0__FUNC_GPIO103 (MTK_PIN_NO(103) | 0)
+#define MT8365_PIN_103_MSDC0_DAT0__FUNC_MSDC0_DAT0 (MTK_PIN_NO(103) | 1)
+#define MT8365_PIN_103_MSDC0_DAT0__FUNC_NLD2 (MTK_PIN_NO(103) | 2)
+
+#define MT8365_PIN_104_MSDC0_DSL__FUNC_GPIO104 (MTK_PIN_NO(104) | 0)
+#define MT8365_PIN_104_MSDC0_DSL__FUNC_MSDC0_DSL (MTK_PIN_NO(104) | 1)
+
+#define MT8365_PIN_105_NCLE__FUNC_GPIO105 (MTK_PIN_NO(105) | 0)
+#define MT8365_PIN_105_NCLE__FUNC_NCLE (MTK_PIN_NO(105) | 1)
+#define MT8365_PIN_105_NCLE__FUNC_TDM_RX_MCK (MTK_PIN_NO(105) | 2)
+#define MT8365_PIN_105_NCLE__FUNC_DBG_MON_B12 (MTK_PIN_NO(105) | 7)
+
+#define MT8365_PIN_106_NCEB1__FUNC_GPIO106 (MTK_PIN_NO(106) | 0)
+#define MT8365_PIN_106_NCEB1__FUNC_NCEB1 (MTK_PIN_NO(106) | 1)
+#define MT8365_PIN_106_NCEB1__FUNC_TDM_RX_BCK (MTK_PIN_NO(106) | 2)
+#define MT8365_PIN_106_NCEB1__FUNC_DBG_MON_B13 (MTK_PIN_NO(106) | 7)
+
+#define MT8365_PIN_107_NCEB0__FUNC_GPIO107 (MTK_PIN_NO(107) | 0)
+#define MT8365_PIN_107_NCEB0__FUNC_NCEB0 (MTK_PIN_NO(107) | 1)
+#define MT8365_PIN_107_NCEB0__FUNC_TDM_RX_LRCK (MTK_PIN_NO(107) | 2)
+#define MT8365_PIN_107_NCEB0__FUNC_DBG_MON_B14 (MTK_PIN_NO(107) | 7)
+
+#define MT8365_PIN_108_NREB__FUNC_GPIO108 (MTK_PIN_NO(108) | 0)
+#define MT8365_PIN_108_NREB__FUNC_NREB (MTK_PIN_NO(108) | 1)
+#define MT8365_PIN_108_NREB__FUNC_TDM_RX_DI (MTK_PIN_NO(108) | 2)
+#define MT8365_PIN_108_NREB__FUNC_DBG_MON_B15 (MTK_PIN_NO(108) | 7)
+
+#define MT8365_PIN_109_NRNB__FUNC_GPIO109 (MTK_PIN_NO(109) | 0)
+#define MT8365_PIN_109_NRNB__FUNC_NRNB (MTK_PIN_NO(109) | 1)
+#define MT8365_PIN_109_NRNB__FUNC_TSF_IN (MTK_PIN_NO(109) | 2)
+#define MT8365_PIN_109_NRNB__FUNC_DBG_MON_B16 (MTK_PIN_NO(109) | 7)
+
+#define MT8365_PIN_110_PCM_CLK__FUNC_GPIO110 (MTK_PIN_NO(110) | 0)
+#define MT8365_PIN_110_PCM_CLK__FUNC_PCM_CLK (MTK_PIN_NO(110) | 1)
+#define MT8365_PIN_110_PCM_CLK__FUNC_I2S0_BCK (MTK_PIN_NO(110) | 2)
+#define MT8365_PIN_110_PCM_CLK__FUNC_I2S3_BCK (MTK_PIN_NO(110) | 3)
+#define MT8365_PIN_110_PCM_CLK__FUNC_SPDIF_IN (MTK_PIN_NO(110) | 4)
+#define MT8365_PIN_110_PCM_CLK__FUNC_DPI_D15 (MTK_PIN_NO(110) | 5)
+
+#define MT8365_PIN_111_PCM_SYNC__FUNC_GPIO111 (MTK_PIN_NO(111) | 0)
+#define MT8365_PIN_111_PCM_SYNC__FUNC_PCM_SYNC (MTK_PIN_NO(111) | 1)
+#define MT8365_PIN_111_PCM_SYNC__FUNC_I2S0_LRCK (MTK_PIN_NO(111) | 2)
+#define MT8365_PIN_111_PCM_SYNC__FUNC_I2S3_LRCK (MTK_PIN_NO(111) | 3)
+#define MT8365_PIN_111_PCM_SYNC__FUNC_SPDIF_OUT (MTK_PIN_NO(111) | 4)
+#define MT8365_PIN_111_PCM_SYNC__FUNC_DPI_D16 (MTK_PIN_NO(111) | 5)
+
+#define MT8365_PIN_112_PCM_RX__FUNC_GPIO112 (MTK_PIN_NO(112) | 0)
+#define MT8365_PIN_112_PCM_RX__FUNC_PCM_RX (MTK_PIN_NO(112) | 1)
+#define MT8365_PIN_112_PCM_RX__FUNC_I2S0_DI (MTK_PIN_NO(112) | 2)
+#define MT8365_PIN_112_PCM_RX__FUNC_I2S3_MCK (MTK_PIN_NO(112) | 3)
+#define MT8365_PIN_112_PCM_RX__FUNC_IRRX (MTK_PIN_NO(112) | 4)
+#define MT8365_PIN_112_PCM_RX__FUNC_DPI_D17 (MTK_PIN_NO(112) | 5)
+
+#define MT8365_PIN_113_PCM_TX__FUNC_GPIO113 (MTK_PIN_NO(113) | 0)
+#define MT8365_PIN_113_PCM_TX__FUNC_PCM_TX (MTK_PIN_NO(113) | 1)
+#define MT8365_PIN_113_PCM_TX__FUNC_I2S0_MCK (MTK_PIN_NO(113) | 2)
+#define MT8365_PIN_113_PCM_TX__FUNC_I2S3_DO (MTK_PIN_NO(113) | 3)
+#define MT8365_PIN_113_PCM_TX__FUNC_PWM_B (MTK_PIN_NO(113) | 4)
+#define MT8365_PIN_113_PCM_TX__FUNC_DPI_D18 (MTK_PIN_NO(113) | 5)
+
+#define MT8365_PIN_114_I2S_DATA_IN__FUNC_GPIO114 (MTK_PIN_NO(114) | 0)
+#define MT8365_PIN_114_I2S_DATA_IN__FUNC_I2S0_DI (MTK_PIN_NO(114) | 1)
+#define MT8365_PIN_114_I2S_DATA_IN__FUNC_I2S1_DO (MTK_PIN_NO(114) | 2)
+#define MT8365_PIN_114_I2S_DATA_IN__FUNC_I2S2_DI (MTK_PIN_NO(114) | 3)
+#define MT8365_PIN_114_I2S_DATA_IN__FUNC_I2S3_DO (MTK_PIN_NO(114) | 4)
+#define MT8365_PIN_114_I2S_DATA_IN__FUNC_PWM_A (MTK_PIN_NO(114) | 5)
+#define MT8365_PIN_114_I2S_DATA_IN__FUNC_SPDIF_IN (MTK_PIN_NO(114) | 6)
+#define MT8365_PIN_114_I2S_DATA_IN__FUNC_DBG_MON_B17 (MTK_PIN_NO(114) | 7)
+
+#define MT8365_PIN_115_I2S_LRCK__FUNC_GPIO115 (MTK_PIN_NO(115) | 0)
+#define MT8365_PIN_115_I2S_LRCK__FUNC_I2S0_LRCK (MTK_PIN_NO(115) | 1)
+#define MT8365_PIN_115_I2S_LRCK__FUNC_I2S1_LRCK (MTK_PIN_NO(115) | 2)
+#define MT8365_PIN_115_I2S_LRCK__FUNC_I2S2_LRCK (MTK_PIN_NO(115) | 3)
+#define MT8365_PIN_115_I2S_LRCK__FUNC_I2S3_LRCK (MTK_PIN_NO(115) | 4)
+#define MT8365_PIN_115_I2S_LRCK__FUNC_PWM_B (MTK_PIN_NO(115) | 5)
+#define MT8365_PIN_115_I2S_LRCK__FUNC_SPDIF_OUT (MTK_PIN_NO(115) | 6)
+#define MT8365_PIN_115_I2S_LRCK__FUNC_DBG_MON_B18 (MTK_PIN_NO(115) | 7)
+
+#define MT8365_PIN_116_I2S_BCK__FUNC_GPIO116 (MTK_PIN_NO(116) | 0)
+#define MT8365_PIN_116_I2S_BCK__FUNC_I2S0_BCK (MTK_PIN_NO(116) | 1)
+#define MT8365_PIN_116_I2S_BCK__FUNC_I2S1_BCK (MTK_PIN_NO(116) | 2)
+#define MT8365_PIN_116_I2S_BCK__FUNC_I2S2_BCK (MTK_PIN_NO(116) | 3)
+#define MT8365_PIN_116_I2S_BCK__FUNC_I2S3_BCK (MTK_PIN_NO(116) | 4)
+#define MT8365_PIN_116_I2S_BCK__FUNC_PWM_C (MTK_PIN_NO(116) | 5)
+#define MT8365_PIN_116_I2S_BCK__FUNC_IRRX (MTK_PIN_NO(116) | 6)
+#define MT8365_PIN_116_I2S_BCK__FUNC_DBG_MON_B19 (MTK_PIN_NO(116) | 7)
+
+#define MT8365_PIN_117_DMIC0_CLK__FUNC_GPIO117 (MTK_PIN_NO(117) | 0)
+#define MT8365_PIN_117_DMIC0_CLK__FUNC_DMIC0_CLK (MTK_PIN_NO(117) | 1)
+#define MT8365_PIN_117_DMIC0_CLK__FUNC_I2S2_BCK (MTK_PIN_NO(117) | 2)
+#define MT8365_PIN_117_DMIC0_CLK__FUNC_DBG_MON_B20 (MTK_PIN_NO(117) | 7)
+
+#define MT8365_PIN_118_DMIC0_DAT0__FUNC_GPIO118 (MTK_PIN_NO(118) | 0)
+#define MT8365_PIN_118_DMIC0_DAT0__FUNC_DMIC0_DAT0 (MTK_PIN_NO(118) | 1)
+#define MT8365_PIN_118_DMIC0_DAT0__FUNC_I2S2_DI (MTK_PIN_NO(118) | 2)
+#define MT8365_PIN_118_DMIC0_DAT0__FUNC_DBG_MON_B21 (MTK_PIN_NO(118) | 7)
+
+#define MT8365_PIN_119_DMIC0_DAT1__FUNC_GPIO119 (MTK_PIN_NO(119) | 0)
+#define MT8365_PIN_119_DMIC0_DAT1__FUNC_DMIC0_DAT1 (MTK_PIN_NO(119) | 1)
+#define MT8365_PIN_119_DMIC0_DAT1__FUNC_I2S2_LRCK (MTK_PIN_NO(119) | 2)
+#define MT8365_PIN_119_DMIC0_DAT1__FUNC_DBG_MON_B22 (MTK_PIN_NO(119) | 7)
+
+#define MT8365_PIN_120_DMIC1_CLK__FUNC_GPIO120 (MTK_PIN_NO(120) | 0)
+#define MT8365_PIN_120_DMIC1_CLK__FUNC_DMIC1_CLK (MTK_PIN_NO(120) | 1)
+#define MT8365_PIN_120_DMIC1_CLK__FUNC_I2S2_MCK (MTK_PIN_NO(120) | 2)
+#define MT8365_PIN_120_DMIC1_CLK__FUNC_DBG_MON_B23 (MTK_PIN_NO(120) | 7)
+
+#define MT8365_PIN_121_DMIC1_DAT0__FUNC_GPIO121 (MTK_PIN_NO(121) | 0)
+#define MT8365_PIN_121_DMIC1_DAT0__FUNC_DMIC1_DAT0 (MTK_PIN_NO(121) | 1)
+#define MT8365_PIN_121_DMIC1_DAT0__FUNC_I2S1_BCK (MTK_PIN_NO(121) | 2)
+#define MT8365_PIN_121_DMIC1_DAT0__FUNC_DBG_MON_B24 (MTK_PIN_NO(121) | 7)
+
+#define MT8365_PIN_122_DMIC1_DAT1__FUNC_GPIO122 (MTK_PIN_NO(122) | 0)
+#define MT8365_PIN_122_DMIC1_DAT1__FUNC_DMIC1_DAT1 (MTK_PIN_NO(122) | 1)
+#define MT8365_PIN_122_DMIC1_DAT1__FUNC_I2S1_LRCK (MTK_PIN_NO(122) | 2)
+#define MT8365_PIN_122_DMIC1_DAT1__FUNC_DBG_MON_B25 (MTK_PIN_NO(122) | 7)
+
+#define MT8365_PIN_123_DMIC2_CLK__FUNC_GPIO123 (MTK_PIN_NO(123) | 0)
+#define MT8365_PIN_123_DMIC2_CLK__FUNC_DMIC2_CLK (MTK_PIN_NO(123) | 1)
+#define MT8365_PIN_123_DMIC2_CLK__FUNC_I2S1_MCK (MTK_PIN_NO(123) | 2)
+#define MT8365_PIN_123_DMIC2_CLK__FUNC_DBG_MON_B26 (MTK_PIN_NO(123) | 7)
+
+#define MT8365_PIN_124_DMIC2_DAT0__FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
+#define MT8365_PIN_124_DMIC2_DAT0__FUNC_DMIC2_DAT0 (MTK_PIN_NO(124) | 1)
+#define MT8365_PIN_124_DMIC2_DAT0__FUNC_I2S1_DO (MTK_PIN_NO(124) | 2)
+#define MT8365_PIN_124_DMIC2_DAT0__FUNC_DBG_MON_B27 (MTK_PIN_NO(124) | 7)
+
+#define MT8365_PIN_125_DMIC2_DAT1__FUNC_GPIO125 (MTK_PIN_NO(125) | 0)
+#define MT8365_PIN_125_DMIC2_DAT1__FUNC_DMIC2_DAT1 (MTK_PIN_NO(125) | 1)
+#define MT8365_PIN_125_DMIC2_DAT1__FUNC_TDM_RX_BCK (MTK_PIN_NO(125) | 2)
+#define MT8365_PIN_125_DMIC2_DAT1__FUNC_DBG_MON_B28 (MTK_PIN_NO(125) | 7)
+
+#define MT8365_PIN_126_DMIC3_CLK__FUNC_GPIO126 (MTK_PIN_NO(126) | 0)
+#define MT8365_PIN_126_DMIC3_CLK__FUNC_DMIC3_CLK (MTK_PIN_NO(126) | 1)
+#define MT8365_PIN_126_DMIC3_CLK__FUNC_TDM_RX_LRCK (MTK_PIN_NO(126) | 2)
+
+#define MT8365_PIN_127_DMIC3_DAT0__FUNC_GPIO127 (MTK_PIN_NO(127) | 0)
+#define MT8365_PIN_127_DMIC3_DAT0__FUNC_DMIC3_DAT0 (MTK_PIN_NO(127) | 1)
+#define MT8365_PIN_127_DMIC3_DAT0__FUNC_TDM_RX_DI (MTK_PIN_NO(127) | 2)
+
+#define MT8365_PIN_128_DMIC3_DAT1__FUNC_GPIO128 (MTK_PIN_NO(128) | 0)
+#define MT8365_PIN_128_DMIC3_DAT1__FUNC_DMIC3_DAT1 (MTK_PIN_NO(128) | 1)
+#define MT8365_PIN_128_DMIC3_DAT1__FUNC_TDM_RX_MCK (MTK_PIN_NO(128) | 2)
+#define MT8365_PIN_128_DMIC3_DAT1__FUNC_VAD_CLK (MTK_PIN_NO(128) | 3)
+
+#define MT8365_PIN_129_TDM_TX_BCK__FUNC_GPIO129 (MTK_PIN_NO(129) | 0)
+#define MT8365_PIN_129_TDM_TX_BCK__FUNC_TDM_TX_BCK (MTK_PIN_NO(129) | 1)
+#define MT8365_PIN_129_TDM_TX_BCK__FUNC_I2S3_BCK (MTK_PIN_NO(129) | 2)
+#define MT8365_PIN_129_TDM_TX_BCK__FUNC_ckmon1_ck (MTK_PIN_NO(129) | 3)
+
+#define MT8365_PIN_130_TDM_TX_LRCK__FUNC_GPIO130 (MTK_PIN_NO(130) | 0)
+#define MT8365_PIN_130_TDM_TX_LRCK__FUNC_TDM_TX_LRCK (MTK_PIN_NO(130) | 1)
+#define MT8365_PIN_130_TDM_TX_LRCK__FUNC_I2S3_LRCK (MTK_PIN_NO(130) | 2)
+#define MT8365_PIN_130_TDM_TX_LRCK__FUNC_ckmon2_ck (MTK_PIN_NO(130) | 3)
+
+#define MT8365_PIN_131_TDM_TX_MCK__FUNC_GPIO131 (MTK_PIN_NO(131) | 0)
+#define MT8365_PIN_131_TDM_TX_MCK__FUNC_TDM_TX_MCK (MTK_PIN_NO(131) | 1)
+#define MT8365_PIN_131_TDM_TX_MCK__FUNC_I2S3_MCK (MTK_PIN_NO(131) | 2)
+#define MT8365_PIN_131_TDM_TX_MCK__FUNC_ckmon3_ck (MTK_PIN_NO(131) | 3)
+
+#define MT8365_PIN_132_TDM_TX_DATA0__FUNC_GPIO132 (MTK_PIN_NO(132) | 0)
+#define MT8365_PIN_132_TDM_TX_DATA0__FUNC_TDM_TX_DATA0 (MTK_PIN_NO(132) | 1)
+#define MT8365_PIN_132_TDM_TX_DATA0__FUNC_I2S3_DO (MTK_PIN_NO(132) | 2)
+#define MT8365_PIN_132_TDM_TX_DATA0__FUNC_ckmon4_ck (MTK_PIN_NO(132) | 3)
+#define MT8365_PIN_132_TDM_TX_DATA0__FUNC_DBG_MON_B29 (MTK_PIN_NO(132) | 7)
+
+#define MT8365_PIN_133_TDM_TX_DATA1__FUNC_GPIO133 (MTK_PIN_NO(133) | 0)
+#define MT8365_PIN_133_TDM_TX_DATA1__FUNC_TDM_TX_DATA1 (MTK_PIN_NO(133) | 1)
+#define MT8365_PIN_133_TDM_TX_DATA1__FUNC_DBG_MON_B30 (MTK_PIN_NO(133) | 7)
+
+#define MT8365_PIN_134_TDM_TX_DATA2__FUNC_GPIO134 (MTK_PIN_NO(134) | 0)
+#define MT8365_PIN_134_TDM_TX_DATA2__FUNC_TDM_TX_DATA2 (MTK_PIN_NO(134) | 1)
+#define MT8365_PIN_134_TDM_TX_DATA2__FUNC_DBG_MON_B31 (MTK_PIN_NO(134) | 7)
+
+#define MT8365_PIN_135_TDM_TX_DATA3__FUNC_GPIO135 (MTK_PIN_NO(135) | 0)
+#define MT8365_PIN_135_TDM_TX_DATA3__FUNC_TDM_TX_DATA3 (MTK_PIN_NO(135) | 1)
+#define MT8365_PIN_135_TDM_TX_DATA3__FUNC_DBG_MON_B32 (MTK_PIN_NO(135) | 7)
+
+#define MT8365_PIN_136_CONN_TOP_CLK__FUNC_GPIO136 (MTK_PIN_NO(136) | 0)
+#define MT8365_PIN_136_CONN_TOP_CLK__FUNC_CONN_TOP_CLK (MTK_PIN_NO(136) | 1)
+
+#define MT8365_PIN_137_CONN_TOP_DATA__FUNC_GPIO137 (MTK_PIN_NO(137) | 0)
+#define MT8365_PIN_137_CONN_TOP_DATA__FUNC_CONN_TOP_DATA (MTK_PIN_NO(137) | 1)
+
+#define MT8365_PIN_138_CONN_HRST_B__FUNC_GPIO138 (MTK_PIN_NO(138) | 0)
+#define MT8365_PIN_138_CONN_HRST_B__FUNC_CONN_HRST_B (MTK_PIN_NO(138) | 1)
+
+#define MT8365_PIN_139_CONN_WB_PTA__FUNC_GPIO139 (MTK_PIN_NO(139) | 0)
+#define MT8365_PIN_139_CONN_WB_PTA__FUNC_CONN_WB_PTA (MTK_PIN_NO(139) | 1)
+
+#define MT8365_PIN_140_CONN_BT_CLK__FUNC_GPIO140 (MTK_PIN_NO(140) | 0)
+#define MT8365_PIN_140_CONN_BT_CLK__FUNC_CONN_BT_CLK (MTK_PIN_NO(140) | 1)
+
+#define MT8365_PIN_141_CONN_BT_DATA__FUNC_GPIO141 (MTK_PIN_NO(141) | 0)
+#define MT8365_PIN_141_CONN_BT_DATA__FUNC_CONN_BT_DATA (MTK_PIN_NO(141) | 1)
+
+#define MT8365_PIN_142_CONN_WF_CTRL0__FUNC_GPIO142 (MTK_PIN_NO(142) | 0)
+#define MT8365_PIN_142_CONN_WF_CTRL0__FUNC_CONN_WF_CTRL0 (MTK_PIN_NO(142) | 1)
+
+#define MT8365_PIN_143_CONN_WF_CTRL1__FUNC_GPIO143 (MTK_PIN_NO(143) | 0)
+#define MT8365_PIN_143_CONN_WF_CTRL1__FUNC_CONN_WF_CTRL1 (MTK_PIN_NO(143) | 1)
+
+#define MT8365_PIN_144_CONN_WF_CTRL2__FUNC_GPIO144 (MTK_PIN_NO(144) | 0)
+#define MT8365_PIN_144_CONN_WF_CTRL2__FUNC_CONN_WF_CTRL2 (MTK_PIN_NO(144) | 1)
+
+#endif /* __MT8365_PINFUNC_H */
diff --git a/include/dt-bindings/pinctrl/pinctrl-cv1800b.h b/include/dt-bindings/pinctrl/pinctrl-cv1800b.h
new file mode 100644
index 000000000000..0593fc33d470
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-cv1800b.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ * This file is generated from vendor pinout definition.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_CV1800B_H
+#define _DT_BINDINGS_PINCTRL_CV1800B_H
+
+#include <dt-bindings/pinctrl/pinctrl-cv18xx.h>
+
+#define PIN_AUD_AOUTR 1
+#define PIN_SD0_CLK 3
+#define PIN_SD0_CMD 4
+#define PIN_SD0_D0 5
+#define PIN_SD0_D1 7
+#define PIN_SD0_D2 8
+#define PIN_SD0_D3 9
+#define PIN_SD0_CD 11
+#define PIN_SD0_PWR_EN 12
+#define PIN_SPK_EN 14
+#define PIN_UART0_TX 15
+#define PIN_UART0_RX 16
+#define PIN_SPINOR_HOLD_X 17
+#define PIN_SPINOR_SCK 18
+#define PIN_SPINOR_MOSI 19
+#define PIN_SPINOR_WP_X 20
+#define PIN_SPINOR_MISO 21
+#define PIN_SPINOR_CS_X 22
+#define PIN_IIC0_SCL 23
+#define PIN_IIC0_SDA 24
+#define PIN_AUX0 25
+#define PIN_PWR_VBAT_DET 30
+#define PIN_PWR_SEQ2 31
+#define PIN_XTAL_XIN 33
+#define PIN_SD1_GPIO0 35
+#define PIN_SD1_GPIO1 36
+#define PIN_SD1_D3 38
+#define PIN_SD1_D2 39
+#define PIN_SD1_D1 40
+#define PIN_SD1_D0 41
+#define PIN_SD1_CMD 42
+#define PIN_SD1_CLK 43
+#define PIN_ADC1 44
+#define PIN_USB_VBUS_DET 45
+#define PIN_ETH_TXP 47
+#define PIN_ETH_TXM 48
+#define PIN_ETH_RXP 49
+#define PIN_ETH_RXM 50
+#define PIN_MIPIRX4N 56
+#define PIN_MIPIRX4P 57
+#define PIN_MIPIRX3N 58
+#define PIN_MIPIRX3P 59
+#define PIN_MIPIRX2N 60
+#define PIN_MIPIRX2P 61
+#define PIN_MIPIRX1N 62
+#define PIN_MIPIRX1P 63
+#define PIN_MIPIRX0N 64
+#define PIN_MIPIRX0P 65
+#define PIN_AUD_AINL_MIC 67
+
+#endif /* _DT_BINDINGS_PINCTRL_CV1800B_H */
diff --git a/include/dt-bindings/pinctrl/pinctrl-cv1812h.h b/include/dt-bindings/pinctrl/pinctrl-cv1812h.h
new file mode 100644
index 000000000000..2908de347919
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-cv1812h.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ * This file is generated from vendor pinout definition.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_CV1812H_H
+#define _DT_BINDINGS_PINCTRL_CV1812H_H
+
+#include <dt-bindings/pinctrl/pinctrl-cv18xx.h>
+
+#define PINPOS(row, col) \
+ ((((row) - 'A' + 1) << 8) + ((col) - 1))
+
+#define PIN_MIPI_TXM4 PINPOS('A', 2)
+#define PIN_MIPIRX0N PINPOS('A', 4)
+#define PIN_MIPIRX3P PINPOS('A', 6)
+#define PIN_MIPIRX4P PINPOS('A', 7)
+#define PIN_VIVO_D2 PINPOS('A', 9)
+#define PIN_VIVO_D3 PINPOS('A', 10)
+#define PIN_VIVO_D10 PINPOS('A', 12)
+#define PIN_USB_VBUS_DET PINPOS('A', 13)
+#define PIN_MIPI_TXP3 PINPOS('B', 1)
+#define PIN_MIPI_TXM3 PINPOS('B', 2)
+#define PIN_MIPI_TXP4 PINPOS('B', 3)
+#define PIN_MIPIRX0P PINPOS('B', 4)
+#define PIN_MIPIRX1N PINPOS('B', 5)
+#define PIN_MIPIRX2N PINPOS('B', 6)
+#define PIN_MIPIRX4N PINPOS('B', 7)
+#define PIN_MIPIRX5N PINPOS('B', 8)
+#define PIN_VIVO_D1 PINPOS('B', 9)
+#define PIN_VIVO_D5 PINPOS('B', 10)
+#define PIN_VIVO_D7 PINPOS('B', 11)
+#define PIN_VIVO_D9 PINPOS('B', 12)
+#define PIN_USB_ID PINPOS('B', 13)
+#define PIN_ETH_RXM PINPOS('B', 15)
+#define PIN_MIPI_TXP2 PINPOS('C', 1)
+#define PIN_MIPI_TXM2 PINPOS('C', 2)
+#define PIN_CAM_PD0 PINPOS('C', 3)
+#define PIN_CAM_MCLK0 PINPOS('C', 4)
+#define PIN_MIPIRX1P PINPOS('C', 5)
+#define PIN_MIPIRX2P PINPOS('C', 6)
+#define PIN_MIPIRX3N PINPOS('C', 7)
+#define PIN_MIPIRX5P PINPOS('C', 8)
+#define PIN_VIVO_CLK PINPOS('C', 9)
+#define PIN_VIVO_D6 PINPOS('C', 10)
+#define PIN_VIVO_D8 PINPOS('C', 11)
+#define PIN_USB_VBUS_EN PINPOS('C', 12)
+#define PIN_ETH_RXP PINPOS('C', 14)
+#define PIN_GPIO_RTX PINPOS('C', 15)
+#define PIN_MIPI_TXP1 PINPOS('D', 1)
+#define PIN_MIPI_TXM1 PINPOS('D', 2)
+#define PIN_CAM_MCLK1 PINPOS('D', 3)
+#define PIN_IIC3_SCL PINPOS('D', 4)
+#define PIN_VIVO_D4 PINPOS('D', 10)
+#define PIN_ETH_TXM PINPOS('D', 14)
+#define PIN_ETH_TXP PINPOS('D', 15)
+#define PIN_MIPI_TXP0 PINPOS('E', 1)
+#define PIN_MIPI_TXM0 PINPOS('E', 2)
+#define PIN_CAM_PD1 PINPOS('E', 4)
+#define PIN_CAM_RST0 PINPOS('E', 5)
+#define PIN_VIVO_D0 PINPOS('E', 10)
+#define PIN_ADC1 PINPOS('E', 13)
+#define PIN_ADC2 PINPOS('E', 14)
+#define PIN_ADC3 PINPOS('E', 15)
+#define PIN_AUD_AOUTL PINPOS('F', 2)
+#define PIN_IIC3_SDA PINPOS('F', 4)
+#define PIN_SD1_D2 PINPOS('F', 14)
+#define PIN_AUD_AOUTR PINPOS('G', 2)
+#define PIN_SD1_D3 PINPOS('G', 13)
+#define PIN_SD1_CLK PINPOS('G', 14)
+#define PIN_SD1_CMD PINPOS('G', 15)
+#define PIN_AUD_AINL_MIC PINPOS('H', 1)
+#define PIN_RSTN PINPOS('H', 12)
+#define PIN_PWM0_BUCK PINPOS('H', 13)
+#define PIN_SD1_D1 PINPOS('H', 14)
+#define PIN_SD1_D0 PINPOS('H', 15)
+#define PIN_AUD_AINR_MIC PINPOS('J', 1)
+#define PIN_IIC2_SCL PINPOS('J', 13)
+#define PIN_IIC2_SDA PINPOS('J', 14)
+#define PIN_SD0_CD PINPOS('K', 2)
+#define PIN_SD0_D1 PINPOS('K', 3)
+#define PIN_UART2_RX PINPOS('K', 13)
+#define PIN_UART2_CTS PINPOS('K', 14)
+#define PIN_UART2_TX PINPOS('K', 15)
+#define PIN_SD0_CLK PINPOS('L', 1)
+#define PIN_SD0_D0 PINPOS('L', 2)
+#define PIN_SD0_CMD PINPOS('L', 3)
+#define PIN_CLK32K PINPOS('L', 14)
+#define PIN_UART2_RTS PINPOS('L', 15)
+#define PIN_SD0_D3 PINPOS('M', 1)
+#define PIN_SD0_D2 PINPOS('M', 2)
+#define PIN_UART0_RX PINPOS('M', 4)
+#define PIN_UART0_TX PINPOS('M', 5)
+#define PIN_JTAG_CPU_TRST PINPOS('M', 6)
+#define PIN_PWR_ON PINPOS('M', 11)
+#define PIN_PWR_GPIO2 PINPOS('M', 12)
+#define PIN_PWR_GPIO0 PINPOS('M', 13)
+#define PIN_CLK25M PINPOS('M', 14)
+#define PIN_SD0_PWR_EN PINPOS('N', 1)
+#define PIN_SPK_EN PINPOS('N', 3)
+#define PIN_JTAG_CPU_TCK PINPOS('N', 4)
+#define PIN_JTAG_CPU_TMS PINPOS('N', 6)
+#define PIN_PWR_WAKEUP1 PINPOS('N', 11)
+#define PIN_PWR_WAKEUP0 PINPOS('N', 12)
+#define PIN_PWR_GPIO1 PINPOS('N', 13)
+#define PIN_EMMC_DAT3 PINPOS('P', 1)
+#define PIN_EMMC_DAT0 PINPOS('P', 2)
+#define PIN_EMMC_DAT2 PINPOS('P', 3)
+#define PIN_EMMC_RSTN PINPOS('P', 4)
+#define PIN_AUX0 PINPOS('P', 5)
+#define PIN_IIC0_SDA PINPOS('P', 6)
+#define PIN_PWR_SEQ3 PINPOS('P', 10)
+#define PIN_PWR_VBAT_DET PINPOS('P', 11)
+#define PIN_PWR_SEQ1 PINPOS('P', 12)
+#define PIN_PWR_BUTTON1 PINPOS('P', 13)
+#define PIN_EMMC_DAT1 PINPOS('R', 2)
+#define PIN_EMMC_CMD PINPOS('R', 3)
+#define PIN_EMMC_CLK PINPOS('R', 4)
+#define PIN_IIC0_SCL PINPOS('R', 6)
+#define PIN_GPIO_ZQ PINPOS('R', 10)
+#define PIN_PWR_RSTN PINPOS('R', 11)
+#define PIN_PWR_SEQ2 PINPOS('R', 12)
+#define PIN_XTAL_XIN PINPOS('R', 13)
+
+#endif /* _DT_BINDINGS_PINCTRL_CV1812H_H */
diff --git a/include/dt-bindings/pinctrl/pinctrl-cv18xx.h b/include/dt-bindings/pinctrl/pinctrl-cv18xx.h
new file mode 100644
index 000000000000..bc92ad1067ec
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-cv18xx.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2023 Sophgo Ltd.
+ *
+ * Author: Inochi Amaoto <inochiama@outlook.com>
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_CV18XX_H
+#define _DT_BINDINGS_PINCTRL_CV18XX_H
+
+#define PIN_MUX_INVALD 0xff
+
+#define PINMUX2(pin, mux, mux2) \
+ (((pin) & 0xffff) | (((mux) & 0xff) << 16) | (((mux2) & 0xff) << 24))
+
+#define PINMUX(pin, mux) \
+ PINMUX2(pin, mux, PIN_MUX_INVALD)
+
+#endif /* _DT_BINDINGS_PINCTRL_CV18XX_H */
diff --git a/include/dt-bindings/pinctrl/pinctrl-sg2000.h b/include/dt-bindings/pinctrl/pinctrl-sg2000.h
new file mode 100644
index 000000000000..4871f9a7c6c1
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-sg2000.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ * This file is generated from vendor pinout definition.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_SG2000_H
+#define _DT_BINDINGS_PINCTRL_SG2000_H
+
+#include <dt-bindings/pinctrl/pinctrl-cv18xx.h>
+
+#define PINPOS(row, col) \
+ ((((row) - 'A' + 1) << 8) + ((col) - 1))
+
+#define PIN_MIPI_TXM4 PINPOS('A', 2)
+#define PIN_MIPIRX0N PINPOS('A', 4)
+#define PIN_MIPIRX3P PINPOS('A', 6)
+#define PIN_MIPIRX4P PINPOS('A', 7)
+#define PIN_VIVO_D2 PINPOS('A', 9)
+#define PIN_VIVO_D3 PINPOS('A', 10)
+#define PIN_VIVO_D10 PINPOS('A', 12)
+#define PIN_USB_VBUS_DET PINPOS('A', 13)
+#define PIN_MIPI_TXP3 PINPOS('B', 1)
+#define PIN_MIPI_TXM3 PINPOS('B', 2)
+#define PIN_MIPI_TXP4 PINPOS('B', 3)
+#define PIN_MIPIRX0P PINPOS('B', 4)
+#define PIN_MIPIRX1N PINPOS('B', 5)
+#define PIN_MIPIRX2N PINPOS('B', 6)
+#define PIN_MIPIRX4N PINPOS('B', 7)
+#define PIN_MIPIRX5N PINPOS('B', 8)
+#define PIN_VIVO_D1 PINPOS('B', 9)
+#define PIN_VIVO_D5 PINPOS('B', 10)
+#define PIN_VIVO_D7 PINPOS('B', 11)
+#define PIN_VIVO_D9 PINPOS('B', 12)
+#define PIN_USB_ID PINPOS('B', 13)
+#define PIN_ETH_RXM PINPOS('B', 15)
+#define PIN_MIPI_TXP2 PINPOS('C', 1)
+#define PIN_MIPI_TXM2 PINPOS('C', 2)
+#define PIN_CAM_PD0 PINPOS('C', 3)
+#define PIN_CAM_MCLK0 PINPOS('C', 4)
+#define PIN_MIPIRX1P PINPOS('C', 5)
+#define PIN_MIPIRX2P PINPOS('C', 6)
+#define PIN_MIPIRX3N PINPOS('C', 7)
+#define PIN_MIPIRX5P PINPOS('C', 8)
+#define PIN_VIVO_CLK PINPOS('C', 9)
+#define PIN_VIVO_D6 PINPOS('C', 10)
+#define PIN_VIVO_D8 PINPOS('C', 11)
+#define PIN_USB_VBUS_EN PINPOS('C', 12)
+#define PIN_ETH_RXP PINPOS('C', 14)
+#define PIN_GPIO_RTX PINPOS('C', 15)
+#define PIN_MIPI_TXP1 PINPOS('D', 1)
+#define PIN_MIPI_TXM1 PINPOS('D', 2)
+#define PIN_CAM_MCLK1 PINPOS('D', 3)
+#define PIN_IIC3_SCL PINPOS('D', 4)
+#define PIN_VIVO_D4 PINPOS('D', 10)
+#define PIN_ETH_TXM PINPOS('D', 14)
+#define PIN_ETH_TXP PINPOS('D', 15)
+#define PIN_MIPI_TXP0 PINPOS('E', 1)
+#define PIN_MIPI_TXM0 PINPOS('E', 2)
+#define PIN_CAM_PD1 PINPOS('E', 4)
+#define PIN_CAM_RST0 PINPOS('E', 5)
+#define PIN_VIVO_D0 PINPOS('E', 10)
+#define PIN_ADC1 PINPOS('E', 13)
+#define PIN_ADC2 PINPOS('E', 14)
+#define PIN_ADC3 PINPOS('E', 15)
+#define PIN_AUD_AOUTL PINPOS('F', 2)
+#define PIN_IIC3_SDA PINPOS('F', 4)
+#define PIN_SD1_D2 PINPOS('F', 14)
+#define PIN_AUD_AOUTR PINPOS('G', 2)
+#define PIN_SD1_D3 PINPOS('G', 13)
+#define PIN_SD1_CLK PINPOS('G', 14)
+#define PIN_SD1_CMD PINPOS('G', 15)
+#define PIN_AUD_AINL_MIC PINPOS('H', 1)
+#define PIN_RSTN PINPOS('H', 12)
+#define PIN_PWM0_BUCK PINPOS('H', 13)
+#define PIN_SD1_D1 PINPOS('H', 14)
+#define PIN_SD1_D0 PINPOS('H', 15)
+#define PIN_AUD_AINR_MIC PINPOS('J', 1)
+#define PIN_IIC2_SCL PINPOS('J', 13)
+#define PIN_IIC2_SDA PINPOS('J', 14)
+#define PIN_SD0_CD PINPOS('K', 2)
+#define PIN_SD0_D1 PINPOS('K', 3)
+#define PIN_UART2_RX PINPOS('K', 13)
+#define PIN_UART2_CTS PINPOS('K', 14)
+#define PIN_UART2_TX PINPOS('K', 15)
+#define PIN_SD0_CLK PINPOS('L', 1)
+#define PIN_SD0_D0 PINPOS('L', 2)
+#define PIN_SD0_CMD PINPOS('L', 3)
+#define PIN_CLK32K PINPOS('L', 14)
+#define PIN_UART2_RTS PINPOS('L', 15)
+#define PIN_SD0_D3 PINPOS('M', 1)
+#define PIN_SD0_D2 PINPOS('M', 2)
+#define PIN_UART0_RX PINPOS('M', 4)
+#define PIN_UART0_TX PINPOS('M', 5)
+#define PIN_JTAG_CPU_TRST PINPOS('M', 6)
+#define PIN_PWR_ON PINPOS('M', 11)
+#define PIN_PWR_GPIO2 PINPOS('M', 12)
+#define PIN_PWR_GPIO0 PINPOS('M', 13)
+#define PIN_CLK25M PINPOS('M', 14)
+#define PIN_SD0_PWR_EN PINPOS('N', 1)
+#define PIN_SPK_EN PINPOS('N', 3)
+#define PIN_JTAG_CPU_TCK PINPOS('N', 4)
+#define PIN_JTAG_CPU_TMS PINPOS('N', 6)
+#define PIN_PWR_WAKEUP1 PINPOS('N', 11)
+#define PIN_PWR_WAKEUP0 PINPOS('N', 12)
+#define PIN_PWR_GPIO1 PINPOS('N', 13)
+#define PIN_EMMC_DAT3 PINPOS('P', 1)
+#define PIN_EMMC_DAT0 PINPOS('P', 2)
+#define PIN_EMMC_DAT2 PINPOS('P', 3)
+#define PIN_EMMC_RSTN PINPOS('P', 4)
+#define PIN_AUX0 PINPOS('P', 5)
+#define PIN_IIC0_SDA PINPOS('P', 6)
+#define PIN_PWR_SEQ3 PINPOS('P', 10)
+#define PIN_PWR_VBAT_DET PINPOS('P', 11)
+#define PIN_PWR_SEQ1 PINPOS('P', 12)
+#define PIN_PWR_BUTTON1 PINPOS('P', 13)
+#define PIN_EMMC_DAT1 PINPOS('R', 2)
+#define PIN_EMMC_CMD PINPOS('R', 3)
+#define PIN_EMMC_CLK PINPOS('R', 4)
+#define PIN_IIC0_SCL PINPOS('R', 6)
+#define PIN_GPIO_ZQ PINPOS('R', 10)
+#define PIN_PWR_RSTN PINPOS('R', 11)
+#define PIN_PWR_SEQ2 PINPOS('R', 12)
+#define PIN_XTAL_XIN PINPOS('R', 13)
+
+#endif /* _DT_BINDINGS_PINCTRL_SG2000_H */
diff --git a/include/dt-bindings/pinctrl/pinctrl-sg2002.h b/include/dt-bindings/pinctrl/pinctrl-sg2002.h
new file mode 100644
index 000000000000..3c36cfa0a550
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-sg2002.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ * This file is generated from vendor pinout definition.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_SG2002_H
+#define _DT_BINDINGS_PINCTRL_SG2002_H
+
+#include <dt-bindings/pinctrl/pinctrl-cv18xx.h>
+
+#define PIN_AUD_AINL_MIC 2
+#define PIN_AUD_AOUTR 4
+#define PIN_SD0_CLK 6
+#define PIN_SD0_CMD 7
+#define PIN_SD0_D0 8
+#define PIN_SD0_D1 10
+#define PIN_SD0_D2 11
+#define PIN_SD0_D3 12
+#define PIN_SD0_CD 14
+#define PIN_SD0_PWR_EN 15
+#define PIN_SPK_EN 17
+#define PIN_UART0_TX 18
+#define PIN_UART0_RX 19
+#define PIN_EMMC_DAT2 20
+#define PIN_EMMC_CLK 21
+#define PIN_EMMC_DAT0 22
+#define PIN_EMMC_DAT3 23
+#define PIN_EMMC_CMD 24
+#define PIN_EMMC_DAT1 25
+#define PIN_JTAG_CPU_TMS 26
+#define PIN_JTAG_CPU_TCK 27
+#define PIN_IIC0_SCL 28
+#define PIN_IIC0_SDA 29
+#define PIN_AUX0 30
+#define PIN_GPIO_ZQ 35
+#define PIN_PWR_VBAT_DET 38
+#define PIN_PWR_RSTN 39
+#define PIN_PWR_SEQ1 40
+#define PIN_PWR_SEQ2 41
+#define PIN_PWR_WAKEUP0 43
+#define PIN_PWR_BUTTON1 44
+#define PIN_XTAL_XIN 45
+#define PIN_PWR_GPIO0 47
+#define PIN_PWR_GPIO1 48
+#define PIN_PWR_GPIO2 49
+#define PIN_SD1_D3 51
+#define PIN_SD1_D2 52
+#define PIN_SD1_D1 53
+#define PIN_SD1_D0 54
+#define PIN_SD1_CMD 55
+#define PIN_SD1_CLK 56
+#define PIN_PWM0_BUCK 58
+#define PIN_ADC1 59
+#define PIN_USB_VBUS_DET 60
+#define PIN_ETH_TXP 62
+#define PIN_ETH_TXM 63
+#define PIN_ETH_RXP 64
+#define PIN_ETH_RXM 65
+#define PIN_GPIO_RTX 67
+#define PIN_MIPIRX4N 72
+#define PIN_MIPIRX4P 73
+#define PIN_MIPIRX3N 74
+#define PIN_MIPIRX3P 75
+#define PIN_MIPIRX2N 76
+#define PIN_MIPIRX2P 77
+#define PIN_MIPIRX1N 78
+#define PIN_MIPIRX1P 79
+#define PIN_MIPIRX0N 80
+#define PIN_MIPIRX0P 81
+#define PIN_MIPI_TXM2 83
+#define PIN_MIPI_TXP2 84
+#define PIN_MIPI_TXM1 85
+#define PIN_MIPI_TXP1 86
+#define PIN_MIPI_TXM0 87
+#define PIN_MIPI_TXP0 88
+
+#endif /* _DT_BINDINGS_PINCTRL_SG2002_H */
diff --git a/include/dt-bindings/pinctrl/pinctrl-sg2042.h b/include/dt-bindings/pinctrl/pinctrl-sg2042.h
new file mode 100644
index 000000000000..79d5bb8e04f8
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-sg2042.h
@@ -0,0 +1,196 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_SG2042_H
+#define _DT_BINDINGS_PINCTRL_SG2042_H
+
+#define PINMUX(pin, mux) \
+ (((pin) & 0xffff) | (((mux) & 0xff) << 16))
+
+#define PIN_LPC_LCLK 0
+#define PIN_LPC_LFRAME 1
+#define PIN_LPC_LAD0 2
+#define PIN_LPC_LAD1 3
+#define PIN_LPC_LAD2 4
+#define PIN_LPC_LAD3 5
+#define PIN_LPC_LDRQ0 6
+#define PIN_LPC_LDRQ1 7
+#define PIN_LPC_SERIRQ 8
+#define PIN_LPC_CLKRUN 9
+#define PIN_LPC_LPME 10
+#define PIN_LPC_LPCPD 11
+#define PIN_LPC_LSMI 12
+#define PIN_PCIE0_L0_RESET 13
+#define PIN_PCIE0_L1_RESET 14
+#define PIN_PCIE0_L0_WAKEUP 15
+#define PIN_PCIE0_L1_WAKEUP 16
+#define PIN_PCIE0_L0_CLKREQ_IN 17
+#define PIN_PCIE0_L1_CLKREQ_IN 18
+#define PIN_PCIE1_L0_RESET 19
+#define PIN_PCIE1_L1_RESET 20
+#define PIN_PCIE1_L0_WAKEUP 21
+#define PIN_PCIE1_L1_WAKEUP 22
+#define PIN_PCIE1_L0_CLKREQ_IN 23
+#define PIN_PCIE1_L1_CLKREQ_IN 24
+#define PIN_SPIF0_CLK_SEL1 25
+#define PIN_SPIF0_CLK_SEL0 26
+#define PIN_SPIF0_WP 27
+#define PIN_SPIF0_HOLD 28
+#define PIN_SPIF0_SDI 29
+#define PIN_SPIF0_CS 30
+#define PIN_SPIF0_SCK 31
+#define PIN_SPIF0_SDO 32
+#define PIN_SPIF1_CLK_SEL1 33
+#define PIN_SPIF1_CLK_SEL0 34
+#define PIN_SPIF1_WP 35
+#define PIN_SPIF1_HOLD 36
+#define PIN_SPIF1_SDI 37
+#define PIN_SPIF1_CS 38
+#define PIN_SPIF1_SCK 39
+#define PIN_SPIF1_SDO 40
+#define PIN_EMMC_WP 41
+#define PIN_EMMC_CD 42
+#define PIN_EMMC_RST 43
+#define PIN_EMMC_PWR_EN 44
+#define PIN_SDIO_CD 45
+#define PIN_SDIO_WP 46
+#define PIN_SDIO_RST 47
+#define PIN_SDIO_PWR_EN 48
+#define PIN_RGMII0_TXD0 49
+#define PIN_RGMII0_TXD1 50
+#define PIN_RGMII0_TXD2 51
+#define PIN_RGMII0_TXD3 52
+#define PIN_RGMII0_TXCTRL 53
+#define PIN_RGMII0_RXD0 54
+#define PIN_RGMII0_RXD1 55
+#define PIN_RGMII0_RXD2 56
+#define PIN_RGMII0_RXD3 57
+#define PIN_RGMII0_RXCTRL 58
+#define PIN_RGMII0_TXC 59
+#define PIN_RGMII0_RXC 60
+#define PIN_RGMII0_REFCLKO 61
+#define PIN_RGMII0_IRQ 62
+#define PIN_RGMII0_MDC 63
+#define PIN_RGMII0_MDIO 64
+#define PIN_PWM0 65
+#define PIN_PWM1 66
+#define PIN_PWM2 67
+#define PIN_PWM3 68
+#define PIN_FAN0 69
+#define PIN_FAN1 70
+#define PIN_FAN2 71
+#define PIN_FAN3 72
+#define PIN_IIC0_SDA 73
+#define PIN_IIC0_SCL 74
+#define PIN_IIC1_SDA 75
+#define PIN_IIC1_SCL 76
+#define PIN_IIC2_SDA 77
+#define PIN_IIC2_SCL 78
+#define PIN_IIC3_SDA 79
+#define PIN_IIC3_SCL 80
+#define PIN_UART0_TX 81
+#define PIN_UART0_RX 82
+#define PIN_UART0_RTS 83
+#define PIN_UART0_CTS 84
+#define PIN_UART1_TX 85
+#define PIN_UART1_RX 86
+#define PIN_UART1_RTS 87
+#define PIN_UART1_CTS 88
+#define PIN_UART2_TX 89
+#define PIN_UART2_RX 90
+#define PIN_UART2_RTS 91
+#define PIN_UART2_CTS 92
+#define PIN_UART3_TX 93
+#define PIN_UART3_RX 94
+#define PIN_UART3_RTS 95
+#define PIN_UART3_CTS 96
+#define PIN_SPI0_CS0 97
+#define PIN_SPI0_CS1 98
+#define PIN_SPI0_SDI 99
+#define PIN_SPI0_SDO 100
+#define PIN_SPI0_SCK 101
+#define PIN_SPI1_CS0 102
+#define PIN_SPI1_CS1 103
+#define PIN_SPI1_SDI 104
+#define PIN_SPI1_SDO 105
+#define PIN_SPI1_SCK 106
+#define PIN_JTAG0_TDO 107
+#define PIN_JTAG0_TCK 108
+#define PIN_JTAG0_TDI 109
+#define PIN_JTAG0_TMS 110
+#define PIN_JTAG0_TRST 111
+#define PIN_JTAG0_SRST 112
+#define PIN_JTAG1_TDO 113
+#define PIN_JTAG1_TCK 114
+#define PIN_JTAG1_TDI 115
+#define PIN_JTAG1_TMS 116
+#define PIN_JTAG1_TRST 117
+#define PIN_JTAG1_SRST 118
+#define PIN_JTAG2_TDO 119
+#define PIN_JTAG2_TCK 120
+#define PIN_JTAG2_TDI 121
+#define PIN_JTAG2_TMS 122
+#define PIN_JTAG2_TRST 123
+#define PIN_JTAG2_SRST 124
+#define PIN_GPIO0 125
+#define PIN_GPIO1 126
+#define PIN_GPIO2 127
+#define PIN_GPIO3 128
+#define PIN_GPIO4 129
+#define PIN_GPIO5 130
+#define PIN_GPIO6 131
+#define PIN_GPIO7 132
+#define PIN_GPIO8 133
+#define PIN_GPIO9 134
+#define PIN_GPIO10 135
+#define PIN_GPIO11 136
+#define PIN_GPIO12 137
+#define PIN_GPIO13 138
+#define PIN_GPIO14 139
+#define PIN_GPIO15 140
+#define PIN_GPIO16 141
+#define PIN_GPIO17 142
+#define PIN_GPIO18 143
+#define PIN_GPIO19 144
+#define PIN_GPIO20 145
+#define PIN_GPIO21 146
+#define PIN_GPIO22 147
+#define PIN_GPIO23 148
+#define PIN_GPIO24 149
+#define PIN_GPIO25 150
+#define PIN_GPIO26 151
+#define PIN_GPIO27 152
+#define PIN_GPIO28 153
+#define PIN_GPIO29 154
+#define PIN_GPIO30 155
+#define PIN_GPIO31 156
+#define PIN_MODE_SEL0 157
+#define PIN_MODE_SEL1 158
+#define PIN_MODE_SEL2 159
+#define PIN_BOOT_SEL0 160
+#define PIN_BOOT_SEL1 161
+#define PIN_BOOT_SEL2 162
+#define PIN_BOOT_SEL3 163
+#define PIN_BOOT_SEL4 164
+#define PIN_BOOT_SEL5 165
+#define PIN_BOOT_SEL6 166
+#define PIN_BOOT_SEL7 167
+#define PIN_MULTI_SCKT 168
+#define PIN_SCKT_ID0 169
+#define PIN_SCKT_ID1 170
+#define PIN_PLL_CLK_IN_MAIN 171
+#define PIN_PLL_CLK_IN_DDR_L 172
+#define PIN_PLL_CLK_IN_DDR_R 173
+#define PIN_XTAL_32K 174
+#define PIN_SYS_RST 175
+#define PIN_PWR_BUTTON 176
+#define PIN_TEST_EN 177
+#define PIN_TEST_MODE_MBIST 178
+#define PIN_TEST_MODE_SCAN 179
+#define PIN_TEST_MODE_BSD 180
+#define PIN_BISR_BYP 181
+
+#endif /* _DT_BINDINGS_PINCTRL_SG2042_H */
diff --git a/include/dt-bindings/pinctrl/pinctrl-sg2044.h b/include/dt-bindings/pinctrl/pinctrl-sg2044.h
new file mode 100644
index 000000000000..2a619f681c39
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-sg2044.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2024 Inochi Amaoto <inochiama@outlook.com>
+ *
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_SG2044_H
+#define _DT_BINDINGS_PINCTRL_SG2044_H
+
+#define PINMUX(pin, mux) \
+ (((pin) & 0xffff) | (((mux) & 0xff) << 16))
+
+#define PIN_IIC0_SMBSUS_IN 0
+#define PIN_IIC0_SMBSUS_OUT 1
+#define PIN_IIC0_SMBALERT 2
+#define PIN_IIC1_SMBSUS_IN 3
+#define PIN_IIC1_SMBSUS_OUT 4
+#define PIN_IIC1_SMBALERT 5
+#define PIN_IIC2_SMBSUS_IN 6
+#define PIN_IIC2_SMBSUS_OUT 7
+#define PIN_IIC2_SMBALERT 8
+#define PIN_IIC3_SMBSUS_IN 9
+#define PIN_IIC3_SMBSUS_OUT 10
+#define PIN_IIC3_SMBALERT 11
+#define PIN_PCIE0_L0_RESET 12
+#define PIN_PCIE0_L1_RESET 13
+#define PIN_PCIE0_L0_WAKEUP 14
+#define PIN_PCIE0_L1_WAKEUP 15
+#define PIN_PCIE0_L0_CLKREQ_IN 16
+#define PIN_PCIE0_L1_CLKREQ_IN 17
+#define PIN_PCIE1_L0_RESET 18
+#define PIN_PCIE1_L1_RESET 19
+#define PIN_PCIE1_L0_WAKEUP 20
+#define PIN_PCIE1_L1_WAKEUP 21
+#define PIN_PCIE1_L0_CLKREQ_IN 22
+#define PIN_PCIE1_L1_CLKREQ_IN 23
+#define PIN_PCIE2_L0_RESET 24
+#define PIN_PCIE2_L1_RESET 25
+#define PIN_PCIE2_L0_WAKEUP 26
+#define PIN_PCIE2_L1_WAKEUP 27
+#define PIN_PCIE2_L0_CLKREQ_IN 28
+#define PIN_PCIE2_L1_CLKREQ_IN 29
+#define PIN_PCIE3_L0_RESET 30
+#define PIN_PCIE3_L1_RESET 31
+#define PIN_PCIE3_L0_WAKEUP 32
+#define PIN_PCIE3_L1_WAKEUP 33
+#define PIN_PCIE3_L0_CLKREQ_IN 34
+#define PIN_PCIE3_L1_CLKREQ_IN 35
+#define PIN_PCIE4_L0_RESET 36
+#define PIN_PCIE4_L1_RESET 37
+#define PIN_PCIE4_L0_WAKEUP 38
+#define PIN_PCIE4_L1_WAKEUP 39
+#define PIN_PCIE4_L0_CLKREQ_IN 40
+#define PIN_PCIE4_L1_CLKREQ_IN 41
+#define PIN_SPIF0_CLK_SEL1 42
+#define PIN_SPIF0_CLK_SEL0 43
+#define PIN_SPIF0_WP 44
+#define PIN_SPIF0_HOLD 45
+#define PIN_SPIF0_SDI 46
+#define PIN_SPIF0_CS 47
+#define PIN_SPIF0_SCK 48
+#define PIN_SPIF0_SDO 49
+#define PIN_SPIF1_CLK_SEL1 50
+#define PIN_SPIF1_CLK_SEL0 51
+#define PIN_SPIF1_WP 52
+#define PIN_SPIF1_HOLD 53
+#define PIN_SPIF1_SDI 54
+#define PIN_SPIF1_CS 55
+#define PIN_SPIF1_SCK 56
+#define PIN_SPIF1_SDO 57
+#define PIN_EMMC_WP 58
+#define PIN_EMMC_CD 59
+#define PIN_EMMC_RST 60
+#define PIN_EMMC_PWR_EN 61
+#define PIN_SDIO_CD 62
+#define PIN_SDIO_WP 63
+#define PIN_SDIO_RST 64
+#define PIN_SDIO_PWR_EN 65
+#define PIN_RGMII0_TXD0 66
+#define PIN_RGMII0_TXD1 67
+#define PIN_RGMII0_TXD2 68
+#define PIN_RGMII0_TXD3 69
+#define PIN_RGMII0_TXCTRL 70
+#define PIN_RGMII0_RXD0 71
+#define PIN_RGMII0_RXD1 72
+#define PIN_RGMII0_RXD2 73
+#define PIN_RGMII0_RXD3 74
+#define PIN_RGMII0_RXCTRL 75
+#define PIN_RGMII0_TXC 76
+#define PIN_RGMII0_RXC 77
+#define PIN_RGMII0_REFCLKO 78
+#define PIN_RGMII0_IRQ 79
+#define PIN_RGMII0_MDC 80
+#define PIN_RGMII0_MDIO 81
+#define PIN_PWM0 82
+#define PIN_PWM1 83
+#define PIN_PWM2 84
+#define PIN_PWM3 85
+#define PIN_FAN0 86
+#define PIN_FAN1 87
+#define PIN_FAN2 88
+#define PIN_FAN3 89
+#define PIN_IIC0_SDA 90
+#define PIN_IIC0_SCL 91
+#define PIN_IIC1_SDA 92
+#define PIN_IIC1_SCL 93
+#define PIN_IIC2_SDA 94
+#define PIN_IIC2_SCL 95
+#define PIN_IIC3_SDA 96
+#define PIN_IIC3_SCL 97
+#define PIN_UART0_TX 98
+#define PIN_UART0_RX 99
+#define PIN_UART0_RTS 100
+#define PIN_UART0_CTS 101
+#define PIN_UART1_TX 102
+#define PIN_UART1_RX 103
+#define PIN_UART1_RTS 104
+#define PIN_UART1_CTS 105
+#define PIN_UART2_TX 106
+#define PIN_UART2_RX 107
+#define PIN_UART2_RTS 108
+#define PIN_UART2_CTS 109
+#define PIN_UART3_TX 110
+#define PIN_UART3_RX 111
+#define PIN_UART3_RTS 112
+#define PIN_UART3_CTS 113
+#define PIN_SPI0_CS0 114
+#define PIN_SPI0_CS1 115
+#define PIN_SPI0_SDI 116
+#define PIN_SPI0_SDO 117
+#define PIN_SPI0_SCK 118
+#define PIN_SPI1_CS0 119
+#define PIN_SPI1_CS1 120
+#define PIN_SPI1_SDI 121
+#define PIN_SPI1_SDO 122
+#define PIN_SPI1_SCK 123
+#define PIN_JTAG0_TDO 124
+#define PIN_JTAG0_TCK 125
+#define PIN_JTAG0_TDI 126
+#define PIN_JTAG0_TMS 127
+#define PIN_JTAG0_TRST 128
+#define PIN_JTAG0_SRST 129
+#define PIN_JTAG1_TDO 130
+#define PIN_JTAG1_TCK 131
+#define PIN_JTAG1_TDI 132
+#define PIN_JTAG1_TMS 133
+#define PIN_JTAG1_TRST 134
+#define PIN_JTAG1_SRST 135
+#define PIN_JTAG2_TDO 136
+#define PIN_JTAG2_TCK 137
+#define PIN_JTAG2_TDI 138
+#define PIN_JTAG2_TMS 139
+#define PIN_JTAG2_TRST 140
+#define PIN_JTAG2_SRST 141
+#define PIN_JTAG3_TDO 142
+#define PIN_JTAG3_TCK 143
+#define PIN_JTAG3_TDI 144
+#define PIN_JTAG3_TMS 145
+#define PIN_JTAG3_TRST 146
+#define PIN_JTAG3_SRST 147
+#define PIN_GPIO0 148
+#define PIN_GPIO1 149
+#define PIN_GPIO2 150
+#define PIN_GPIO3 151
+#define PIN_GPIO4 152
+#define PIN_GPIO5 153
+#define PIN_GPIO6 154
+#define PIN_GPIO7 155
+#define PIN_GPIO8 156
+#define PIN_GPIO9 157
+#define PIN_GPIO10 158
+#define PIN_GPIO11 159
+#define PIN_GPIO12 160
+#define PIN_GPIO13 161
+#define PIN_GPIO14 162
+#define PIN_GPIO15 163
+#define PIN_GPIO16 164
+#define PIN_GPIO17 165
+#define PIN_GPIO18 166
+#define PIN_GPIO19 167
+#define PIN_GPIO20 168
+#define PIN_GPIO21 169
+#define PIN_GPIO22 170
+#define PIN_GPIO23 171
+#define PIN_GPIO24 172
+#define PIN_GPIO25 173
+#define PIN_GPIO26 174
+#define PIN_GPIO27 175
+#define PIN_GPIO28 176
+#define PIN_GPIO29 177
+#define PIN_GPIO30 178
+#define PIN_GPIO31 179
+#define PIN_MODE_SEL0 180
+#define PIN_MODE_SEL1 181
+#define PIN_MODE_SEL2 182
+#define PIN_BOOT_SEL0 183
+#define PIN_BOOT_SEL1 184
+#define PIN_BOOT_SEL2 185
+#define PIN_BOOT_SEL3 186
+#define PIN_BOOT_SEL4 187
+#define PIN_BOOT_SEL5 188
+#define PIN_BOOT_SEL6 189
+#define PIN_BOOT_SEL7 190
+#define PIN_MULTI_SCKT 191
+#define PIN_SCKT_ID0 192
+#define PIN_SCKT_ID1 193
+#define PIN_PLL_CLK_IN_MAIN 194
+#define PIN_PLL_CLK_IN_DDR_0 195
+#define PIN_PLL_CLK_IN_DDR_1 196
+#define PIN_PLL_CLK_IN_DDR_2 197
+#define PIN_PLL_CLK_IN_DDR_3 198
+#define PIN_XTAL_32K 199
+#define PIN_SYS_RST 200
+#define PIN_PWR_BUTTON 201
+#define PIN_TEST_EN 202
+#define PIN_TEST_MODE_MBIST 203
+#define PIN_TEST_MODE_SCAN 204
+#define PIN_TEST_MODE_BSD 205
+#define PIN_BISR_BYP 206
+
+#endif /* _DT_BINDINGS_PINCTRL_SG2044_H */
diff --git a/include/dt-bindings/pinctrl/pinctrl-starfive-jh7100.h b/include/dt-bindings/pinctrl/pinctrl-starfive-jh7100.h
new file mode 100644
index 000000000000..a200f546d078
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-starfive-jh7100.h
@@ -0,0 +1,275 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright (C) 2021 Emil Renner Berthing <kernel@esmil.dk>
+ */
+
+#ifndef __DT_BINDINGS_PINCTRL_STARFIVE_JH7100_H__
+#define __DT_BINDINGS_PINCTRL_STARFIVE_JH7100_H__
+
+#define PAD_GPIO_OFFSET 0
+#define PAD_FUNC_SHARE_OFFSET 64
+#define PAD_GPIO(x) (PAD_GPIO_OFFSET + (x))
+#define PAD_FUNC_SHARE(x) (PAD_FUNC_SHARE_OFFSET + (x))
+
+/*
+ * GPIOMUX bits:
+ * | 31 - 24 | 23 - 16 | 15 - 8 | 7 | 6 | 5 - 0 |
+ * | dout | doen | din | dout rev | doen rev | gpio nr |
+ *
+ * dout: output signal
+ * doen: output enable signal
+ * din: optional input signal, 0xff = none
+ * dout rev: output signal reverse bit
+ * doen rev: output enable signal reverse bit
+ * gpio nr: gpio number, 0 - 63
+ */
+#define GPIOMUX(n, dout, doen, din) ( \
+ (((dout) & 0x80000000) >> (31 - 7)) | (((dout) & 0xff) << 24) | \
+ (((doen) & 0x80000000) >> (31 - 6)) | (((doen) & 0xff) << 16) | \
+ (((din) & 0xff) << 8) | \
+ ((n) & 0x3f))
+
+#define GPO_REVERSE 0x80000000
+
+#define GPO_LOW 0
+#define GPO_HIGH 1
+#define GPO_ENABLE 0
+#define GPO_DISABLE 1
+#define GPO_CLK_GMAC_PAPHYREF 2
+#define GPO_JTAG_TDO 3
+#define GPO_JTAG_TDO_OEN 4
+#define GPO_DMIC_CLK_OUT 5
+#define GPO_DSP_JTDOEN_PAD 6
+#define GPO_DSP_JTDO_PAD 7
+#define GPO_I2C0_PAD_SCK_OE 8
+#define GPO_I2C0_PAD_SCK_OEN (GPO_I2C0_PAD_SCK_OE | GPO_REVERSE)
+#define GPO_I2C0_PAD_SDA_OE 9
+#define GPO_I2C0_PAD_SDA_OEN (GPO_I2C0_PAD_SDA_OE | GPO_REVERSE)
+#define GPO_I2C1_PAD_SCK_OE 10
+#define GPO_I2C1_PAD_SCK_OEN (GPO_I2C1_PAD_SCK_OE | GPO_REVERSE)
+#define GPO_I2C1_PAD_SDA_OE 11
+#define GPO_I2C1_PAD_SDA_OEN (GPO_I2C1_PAD_SDA_OE | GPO_REVERSE)
+#define GPO_I2C2_PAD_SCK_OE 12
+#define GPO_I2C2_PAD_SCK_OEN (GPO_I2C2_PAD_SCK_OE | GPO_REVERSE)
+#define GPO_I2C2_PAD_SDA_OE 13
+#define GPO_I2C2_PAD_SDA_OEN (GPO_I2C2_PAD_SDA_OE | GPO_REVERSE)
+#define GPO_I2C3_PAD_SCK_OE 14
+#define GPO_I2C3_PAD_SCK_OEN (GPO_I2C3_PAD_SCK_OE | GPO_REVERSE)
+#define GPO_I2C3_PAD_SDA_OE 15
+#define GPO_I2C3_PAD_SDA_OEN (GPO_I2C3_PAD_SDA_OE | GPO_REVERSE)
+#define GPO_I2SRX_BCLK_OUT 16
+#define GPO_I2SRX_BCLK_OUT_OEN 17
+#define GPO_I2SRX_LRCK_OUT 18
+#define GPO_I2SRX_LRCK_OUT_OEN 19
+#define GPO_I2SRX_MCLK_OUT 20
+#define GPO_I2STX_BCLK_OUT 21
+#define GPO_I2STX_BCLK_OUT_OEN 22
+#define GPO_I2STX_LRCK_OUT 23
+#define GPO_I2STX_LRCK_OUT_OEN 24
+#define GPO_I2STX_MCLK_OUT 25
+#define GPO_I2STX_SDOUT0 26
+#define GPO_I2STX_SDOUT1 27
+#define GPO_LCD_PAD_CSM_N 28
+#define GPO_PWM_PAD_OE_N_BIT0 29
+#define GPO_PWM_PAD_OE_N_BIT1 30
+#define GPO_PWM_PAD_OE_N_BIT2 31
+#define GPO_PWM_PAD_OE_N_BIT3 32
+#define GPO_PWM_PAD_OE_N_BIT4 33
+#define GPO_PWM_PAD_OE_N_BIT5 34
+#define GPO_PWM_PAD_OE_N_BIT6 35
+#define GPO_PWM_PAD_OE_N_BIT7 36
+#define GPO_PWM_PAD_OUT_BIT0 37
+#define GPO_PWM_PAD_OUT_BIT1 38
+#define GPO_PWM_PAD_OUT_BIT2 39
+#define GPO_PWM_PAD_OUT_BIT3 40
+#define GPO_PWM_PAD_OUT_BIT4 41
+#define GPO_PWM_PAD_OUT_BIT5 42
+#define GPO_PWM_PAD_OUT_BIT6 43
+#define GPO_PWM_PAD_OUT_BIT7 44
+#define GPO_PWMDAC_LEFT_OUT 45
+#define GPO_PWMDAC_RIGHT_OUT 46
+#define GPO_QSPI_CSN1_OUT 47
+#define GPO_QSPI_CSN2_OUT 48
+#define GPO_QSPI_CSN3_OUT 49
+#define GPO_REGISTER23_SCFG_CMSENSOR_RST0 50
+#define GPO_REGISTER23_SCFG_CMSENSOR_RST1 51
+#define GPO_REGISTER32_SCFG_GMAC_PHY_RSTN 52
+#define GPO_SDIO0_PAD_CARD_POWER_EN 53
+#define GPO_SDIO0_PAD_CCLK_OUT 54
+#define GPO_SDIO0_PAD_CCMD_OE 55
+#define GPO_SDIO0_PAD_CCMD_OEN (GPO_SDIO0_PAD_CCMD_OE | GPO_REVERSE)
+#define GPO_SDIO0_PAD_CCMD_OUT 56
+#define GPO_SDIO0_PAD_CDATA_OE_BIT0 57
+#define GPO_SDIO0_PAD_CDATA_OEN_BIT0 (GPO_SDIO0_PAD_CDATA_OE_BIT0 | GPO_REVERSE)
+#define GPO_SDIO0_PAD_CDATA_OE_BIT1 58
+#define GPO_SDIO0_PAD_CDATA_OEN_BIT1 (GPO_SDIO0_PAD_CDATA_OE_BIT1 | GPO_REVERSE)
+#define GPO_SDIO0_PAD_CDATA_OE_BIT2 59
+#define GPO_SDIO0_PAD_CDATA_OEN_BIT2 (GPO_SDIO0_PAD_CDATA_OE_BIT2 | GPO_REVERSE)
+#define GPO_SDIO0_PAD_CDATA_OE_BIT3 60
+#define GPO_SDIO0_PAD_CDATA_OEN_BIT3 (GPO_SDIO0_PAD_CDATA_OE_BIT3 | GPO_REVERSE)
+#define GPO_SDIO0_PAD_CDATA_OE_BIT4 61
+#define GPO_SDIO0_PAD_CDATA_OEN_BIT4 (GPO_SDIO0_PAD_CDATA_OE_BIT4 | GPO_REVERSE)
+#define GPO_SDIO0_PAD_CDATA_OE_BIT5 62
+#define GPO_SDIO0_PAD_CDATA_OEN_BIT5 (GPO_SDIO0_PAD_CDATA_OE_BIT5 | GPO_REVERSE)
+#define GPO_SDIO0_PAD_CDATA_OE_BIT6 63
+#define GPO_SDIO0_PAD_CDATA_OEN_BIT6 (GPO_SDIO0_PAD_CDATA_OE_BIT6 | GPO_REVERSE)
+#define GPO_SDIO0_PAD_CDATA_OE_BIT7 64
+#define GPO_SDIO0_PAD_CDATA_OEN_BIT7 (GPO_SDIO0_PAD_CDATA_OE_BIT7 | GPO_REVERSE)
+#define GPO_SDIO0_PAD_CDATA_OUT_BIT0 65
+#define GPO_SDIO0_PAD_CDATA_OUT_BIT1 66
+#define GPO_SDIO0_PAD_CDATA_OUT_BIT2 67
+#define GPO_SDIO0_PAD_CDATA_OUT_BIT3 68
+#define GPO_SDIO0_PAD_CDATA_OUT_BIT4 69
+#define GPO_SDIO0_PAD_CDATA_OUT_BIT5 70
+#define GPO_SDIO0_PAD_CDATA_OUT_BIT6 71
+#define GPO_SDIO0_PAD_CDATA_OUT_BIT7 72
+#define GPO_SDIO0_PAD_RST_N 73
+#define GPO_SDIO1_PAD_CARD_POWER_EN 74
+#define GPO_SDIO1_PAD_CCLK_OUT 75
+#define GPO_SDIO1_PAD_CCMD_OE 76
+#define GPO_SDIO1_PAD_CCMD_OEN (GPO_SDIO1_PAD_CCMD_OE | GPO_REVERSE)
+#define GPO_SDIO1_PAD_CCMD_OUT 77
+#define GPO_SDIO1_PAD_CDATA_OE_BIT0 78
+#define GPO_SDIO1_PAD_CDATA_OEN_BIT0 (GPO_SDIO1_PAD_CDATA_OE_BIT0 | GPO_REVERSE)
+#define GPO_SDIO1_PAD_CDATA_OE_BIT1 79
+#define GPO_SDIO1_PAD_CDATA_OEN_BIT1 (GPO_SDIO1_PAD_CDATA_OE_BIT1 | GPO_REVERSE)
+#define GPO_SDIO1_PAD_CDATA_OE_BIT2 80
+#define GPO_SDIO1_PAD_CDATA_OEN_BIT2 (GPO_SDIO1_PAD_CDATA_OE_BIT2 | GPO_REVERSE)
+#define GPO_SDIO1_PAD_CDATA_OE_BIT3 81
+#define GPO_SDIO1_PAD_CDATA_OEN_BIT3 (GPO_SDIO1_PAD_CDATA_OE_BIT3 | GPO_REVERSE)
+#define GPO_SDIO1_PAD_CDATA_OE_BIT4 82
+#define GPO_SDIO1_PAD_CDATA_OEN_BIT4 (GPO_SDIO1_PAD_CDATA_OE_BIT4 | GPO_REVERSE)
+#define GPO_SDIO1_PAD_CDATA_OE_BIT5 83
+#define GPO_SDIO1_PAD_CDATA_OEN_BIT5 (GPO_SDIO1_PAD_CDATA_OE_BIT5 | GPO_REVERSE)
+#define GPO_SDIO1_PAD_CDATA_OE_BIT6 84
+#define GPO_SDIO1_PAD_CDATA_OEN_BIT6 (GPO_SDIO1_PAD_CDATA_OE_BIT6 | GPO_REVERSE)
+#define GPO_SDIO1_PAD_CDATA_OE_BIT7 85
+#define GPO_SDIO1_PAD_CDATA_OEN_BIT7 (GPO_SDIO1_PAD_CDATA_OE_BIT7 | GPO_REVERSE)
+#define GPO_SDIO1_PAD_CDATA_OUT_BIT0 86
+#define GPO_SDIO1_PAD_CDATA_OUT_BIT1 87
+#define GPO_SDIO1_PAD_CDATA_OUT_BIT2 88
+#define GPO_SDIO1_PAD_CDATA_OUT_BIT3 89
+#define GPO_SDIO1_PAD_CDATA_OUT_BIT4 90
+#define GPO_SDIO1_PAD_CDATA_OUT_BIT5 91
+#define GPO_SDIO1_PAD_CDATA_OUT_BIT6 92
+#define GPO_SDIO1_PAD_CDATA_OUT_BIT7 93
+#define GPO_SDIO1_PAD_RST_N 94
+#define GPO_SPDIF_TX_SDOUT 95
+#define GPO_SPDIF_TX_SDOUT_OEN 96
+#define GPO_SPI0_PAD_OE_N 97
+#define GPO_SPI0_PAD_SCK_OUT 98
+#define GPO_SPI0_PAD_SS_0_N 99
+#define GPO_SPI0_PAD_SS_1_N 100
+#define GPO_SPI0_PAD_TXD 101
+#define GPO_SPI1_PAD_OE_N 102
+#define GPO_SPI1_PAD_SCK_OUT 103
+#define GPO_SPI1_PAD_SS_0_N 104
+#define GPO_SPI1_PAD_SS_1_N 105
+#define GPO_SPI1_PAD_TXD 106
+#define GPO_SPI2_PAD_OE_N 107
+#define GPO_SPI2_PAD_SCK_OUT 108
+#define GPO_SPI2_PAD_SS_0_N 109
+#define GPO_SPI2_PAD_SS_1_N 110
+#define GPO_SPI2_PAD_TXD 111
+#define GPO_SPI2AHB_PAD_OE_N_BIT0 112
+#define GPO_SPI2AHB_PAD_OE_N_BIT1 113
+#define GPO_SPI2AHB_PAD_OE_N_BIT2 114
+#define GPO_SPI2AHB_PAD_OE_N_BIT3 115
+#define GPO_SPI2AHB_PAD_TXD_BIT0 116
+#define GPO_SPI2AHB_PAD_TXD_BIT1 117
+#define GPO_SPI2AHB_PAD_TXD_BIT2 118
+#define GPO_SPI2AHB_PAD_TXD_BIT3 119
+#define GPO_SPI3_PAD_OE_N 120
+#define GPO_SPI3_PAD_SCK_OUT 121
+#define GPO_SPI3_PAD_SS_0_N 122
+#define GPO_SPI3_PAD_SS_1_N 123
+#define GPO_SPI3_PAD_TXD 124
+#define GPO_UART0_PAD_DTRN 125
+#define GPO_UART0_PAD_RTSN 126
+#define GPO_UART0_PAD_SOUT 127
+#define GPO_UART1_PAD_SOUT 128
+#define GPO_UART2_PAD_DTR_N 129
+#define GPO_UART2_PAD_RTS_N 130
+#define GPO_UART2_PAD_SOUT 131
+#define GPO_UART3_PAD_SOUT 132
+#define GPO_USB_DRV_BUS 133
+
+#define GPI_CPU_JTAG_TCK 0
+#define GPI_CPU_JTAG_TDI 1
+#define GPI_CPU_JTAG_TMS 2
+#define GPI_CPU_JTAG_TRST 3
+#define GPI_DMIC_SDIN_BIT0 4
+#define GPI_DMIC_SDIN_BIT1 5
+#define GPI_DSP_JTCK_PAD 6
+#define GPI_DSP_JTDI_PAD 7
+#define GPI_DSP_JTMS_PAD 8
+#define GPI_DSP_TRST_PAD 9
+#define GPI_I2C0_PAD_SCK_IN 10
+#define GPI_I2C0_PAD_SDA_IN 11
+#define GPI_I2C1_PAD_SCK_IN 12
+#define GPI_I2C1_PAD_SDA_IN 13
+#define GPI_I2C2_PAD_SCK_IN 14
+#define GPI_I2C2_PAD_SDA_IN 15
+#define GPI_I2C3_PAD_SCK_IN 16
+#define GPI_I2C3_PAD_SDA_IN 17
+#define GPI_I2SRX_BCLK_IN 18
+#define GPI_I2SRX_LRCK_IN 19
+#define GPI_I2SRX_SDIN_BIT0 20
+#define GPI_I2SRX_SDIN_BIT1 21
+#define GPI_I2SRX_SDIN_BIT2 22
+#define GPI_I2STX_BCLK_IN 23
+#define GPI_I2STX_LRCK_IN 24
+#define GPI_SDIO0_PAD_CARD_DETECT_N 25
+#define GPI_SDIO0_PAD_CARD_WRITE_PRT 26
+#define GPI_SDIO0_PAD_CCMD_IN 27
+#define GPI_SDIO0_PAD_CDATA_IN_BIT0 28
+#define GPI_SDIO0_PAD_CDATA_IN_BIT1 29
+#define GPI_SDIO0_PAD_CDATA_IN_BIT2 30
+#define GPI_SDIO0_PAD_CDATA_IN_BIT3 31
+#define GPI_SDIO0_PAD_CDATA_IN_BIT4 32
+#define GPI_SDIO0_PAD_CDATA_IN_BIT5 33
+#define GPI_SDIO0_PAD_CDATA_IN_BIT6 34
+#define GPI_SDIO0_PAD_CDATA_IN_BIT7 35
+#define GPI_SDIO1_PAD_CARD_DETECT_N 36
+#define GPI_SDIO1_PAD_CARD_WRITE_PRT 37
+#define GPI_SDIO1_PAD_CCMD_IN 38
+#define GPI_SDIO1_PAD_CDATA_IN_BIT0 39
+#define GPI_SDIO1_PAD_CDATA_IN_BIT1 40
+#define GPI_SDIO1_PAD_CDATA_IN_BIT2 41
+#define GPI_SDIO1_PAD_CDATA_IN_BIT3 42
+#define GPI_SDIO1_PAD_CDATA_IN_BIT4 43
+#define GPI_SDIO1_PAD_CDATA_IN_BIT5 44
+#define GPI_SDIO1_PAD_CDATA_IN_BIT6 45
+#define GPI_SDIO1_PAD_CDATA_IN_BIT7 46
+#define GPI_SPDIF_RX_SDIN 47
+#define GPI_SPI0_PAD_RXD 48
+#define GPI_SPI0_PAD_SS_IN_N 49
+#define GPI_SPI1_PAD_RXD 50
+#define GPI_SPI1_PAD_SS_IN_N 51
+#define GPI_SPI2_PAD_RXD 52
+#define GPI_SPI2_PAD_SS_IN_N 53
+#define GPI_SPI2AHB_PAD_RXD_BIT0 54
+#define GPI_SPI2AHB_PAD_RXD_BIT1 55
+#define GPI_SPI2AHB_PAD_RXD_BIT2 56
+#define GPI_SPI2AHB_PAD_RXD_BIT3 57
+#define GPI_SPI2AHB_PAD_SS_N 58
+#define GPI_SPI2AHB_SLV_SCLKIN 59
+#define GPI_SPI3_PAD_RXD 60
+#define GPI_SPI3_PAD_SS_IN_N 61
+#define GPI_UART0_PAD_CTSN 62
+#define GPI_UART0_PAD_DCDN 63
+#define GPI_UART0_PAD_DSRN 64
+#define GPI_UART0_PAD_RIN 65
+#define GPI_UART0_PAD_SIN 66
+#define GPI_UART1_PAD_SIN 67
+#define GPI_UART2_PAD_CTS_N 68
+#define GPI_UART2_PAD_DCD_N 69
+#define GPI_UART2_PAD_DSR_N 70
+#define GPI_UART2_PAD_RI_N 71
+#define GPI_UART2_PAD_SIN 72
+#define GPI_UART3_PAD_SIN 73
+#define GPI_USB_OVER_CURRENT 74
+
+#define GPI_NONE 0xff
+
+#endif /* __DT_BINDINGS_PINCTRL_STARFIVE_JH7100_H__ */
diff --git a/include/dt-bindings/pinctrl/pinctrl-zynq.h b/include/dt-bindings/pinctrl/pinctrl-zynq.h
new file mode 100644
index 000000000000..bbfc345f017d
--- /dev/null
+++ b/include/dt-bindings/pinctrl/pinctrl-zynq.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * MIO pin configuration defines for Xilinx Zynq
+ *
+ * Copyright (C) 2021 Xilinx, Inc.
+ */
+
+#ifndef _DT_BINDINGS_PINCTRL_ZYNQ_H
+#define _DT_BINDINGS_PINCTRL_ZYNQ_H
+
+/* Configuration options for different power supplies */
+#define IO_STANDARD_LVCMOS18 1
+#define IO_STANDARD_LVCMOS25 2
+#define IO_STANDARD_LVCMOS33 3
+#define IO_STANDARD_HSTL 4
+
+#endif /* _DT_BINDINGS_PINCTRL_ZYNQ_H */
diff --git a/include/dt-bindings/pinctrl/r7s9210-pinctrl.h b/include/dt-bindings/pinctrl/r7s9210-pinctrl.h
index 2d0c23e5d3a7..8736ce038eca 100644
--- a/include/dt-bindings/pinctrl/r7s9210-pinctrl.h
+++ b/include/dt-bindings/pinctrl/r7s9210-pinctrl.h
@@ -42,6 +42,6 @@
/*
* Convert a port and pin label to its global pin index
*/
- #define RZA2_PIN(port, pin) ((port) * RZA2_PINS_PER_PORT + (pin))
+#define RZA2_PIN(port, pin) ((port) * RZA2_PINS_PER_PORT + (pin))
#endif /* __DT_BINDINGS_PINCTRL_RENESAS_RZA2_H */
diff --git a/include/dt-bindings/pinctrl/renesas,r9a09g047-pinctrl.h b/include/dt-bindings/pinctrl/renesas,r9a09g047-pinctrl.h
new file mode 100644
index 000000000000..5917096720bd
--- /dev/null
+++ b/include/dt-bindings/pinctrl/renesas,r9a09g047-pinctrl.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * This header provides constants for Renesas RZ/G3E family pinctrl bindings.
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ *
+ */
+
+#ifndef __DT_BINDINGS_PINCTRL_RENESAS_R9A09G047_PINCTRL_H__
+#define __DT_BINDINGS_PINCTRL_RENESAS_R9A09G047_PINCTRL_H__
+
+#include <dt-bindings/pinctrl/rzg2l-pinctrl.h>
+
+/* RZG3E_Px = Offset address of PFC_P_mn - 0x20 */
+#define RZG3E_P0 0
+#define RZG3E_P1 1
+#define RZG3E_P2 2
+#define RZG3E_P3 3
+#define RZG3E_P4 4
+#define RZG3E_P5 5
+#define RZG3E_P6 6
+#define RZG3E_P7 7
+#define RZG3E_P8 8
+#define RZG3E_PA 10
+#define RZG3E_PB 11
+#define RZG3E_PC 12
+#define RZG3E_PD 13
+#define RZG3E_PE 14
+#define RZG3E_PF 15
+#define RZG3E_PG 16
+#define RZG3E_PH 17
+#define RZG3E_PJ 19
+#define RZG3E_PK 20
+#define RZG3E_PL 21
+#define RZG3E_PM 22
+#define RZG3E_PS 28
+
+#define RZG3E_PORT_PINMUX(b, p, f) RZG2L_PORT_PINMUX(RZG3E_P##b, p, f)
+#define RZG3E_GPIO(port, pin) RZG2L_GPIO(RZG3E_P##port, pin)
+
+#endif /* __DT_BINDINGS_PINCTRL_RENESAS_R9A09G047_PINCTRL_H__ */
diff --git a/include/dt-bindings/pinctrl/renesas,r9a09g057-pinctrl.h b/include/dt-bindings/pinctrl/renesas,r9a09g057-pinctrl.h
new file mode 100644
index 000000000000..2e83bf43160b
--- /dev/null
+++ b/include/dt-bindings/pinctrl/renesas,r9a09g057-pinctrl.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * This header provides constants for Renesas RZ/V2H family pinctrl bindings.
+ *
+ * Copyright (C) 2024 Renesas Electronics Corp.
+ *
+ */
+
+#ifndef __DT_BINDINGS_PINCTRL_RENESAS_R9A09G057_PINCTRL_H__
+#define __DT_BINDINGS_PINCTRL_RENESAS_R9A09G057_PINCTRL_H__
+
+#include <dt-bindings/pinctrl/rzg2l-pinctrl.h>
+
+/* RZV2H_Px = Offset address of PFC_P_mn - 0x20 */
+#define RZV2H_P0 0
+#define RZV2H_P1 1
+#define RZV2H_P2 2
+#define RZV2H_P3 3
+#define RZV2H_P4 4
+#define RZV2H_P5 5
+#define RZV2H_P6 6
+#define RZV2H_P7 7
+#define RZV2H_P8 8
+#define RZV2H_P9 9
+#define RZV2H_PA 10
+#define RZV2H_PB 11
+
+#define RZV2H_PORT_PINMUX(b, p, f) RZG2L_PORT_PINMUX(RZV2H_P##b, p, f)
+#define RZV2H_GPIO(port, pin) RZG2L_GPIO(RZV2H_P##port, pin)
+
+#endif /* __DT_BINDINGS_PINCTRL_RENESAS_R9A09G057_PINCTRL_H__ */
diff --git a/include/dt-bindings/pinctrl/renesas,r9a09g077-pinctrl.h b/include/dt-bindings/pinctrl/renesas,r9a09g077-pinctrl.h
new file mode 100644
index 000000000000..f088793f23ee
--- /dev/null
+++ b/include/dt-bindings/pinctrl/renesas,r9a09g077-pinctrl.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * This header provides constants for Renesas RZ/T2H family pinctrl bindings.
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ */
+
+#ifndef __DT_BINDINGS_PINCTRL_RENESAS_R9A09G077_PINCTRL_H__
+#define __DT_BINDINGS_PINCTRL_RENESAS_R9A09G077_PINCTRL_H__
+
+#define RZT2H_PINS_PER_PORT 8
+
+/*
+ * Create the pin index from its bank and position numbers and store in
+ * the upper 16 bits the alternate function identifier
+ */
+#define RZT2H_PORT_PINMUX(b, p, f) ((b) * RZT2H_PINS_PER_PORT + (p) | ((f) << 16))
+
+/* Convert a port and pin label to its global pin index */
+#define RZT2H_GPIO(port, pin) ((port) * RZT2H_PINS_PER_PORT + (pin))
+
+#endif /* __DT_BINDINGS_PINCTRL_RENESAS_R9A09G077_PINCTRL_H__ */
diff --git a/include/dt-bindings/pinctrl/rzg2l-pinctrl.h b/include/dt-bindings/pinctrl/rzg2l-pinctrl.h
new file mode 100644
index 000000000000..c78ed5e5efb7
--- /dev/null
+++ b/include/dt-bindings/pinctrl/rzg2l-pinctrl.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * This header provides constants for Renesas RZ/G2L family pinctrl bindings.
+ *
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ *
+ */
+
+#ifndef __DT_BINDINGS_RZG2L_PINCTRL_H
+#define __DT_BINDINGS_RZG2L_PINCTRL_H
+
+#define RZG2L_PINS_PER_PORT 8
+
+/*
+ * Create the pin index from its bank and position numbers and store in
+ * the upper 16 bits the alternate function identifier
+ */
+#define RZG2L_PORT_PINMUX(b, p, f) ((b) * RZG2L_PINS_PER_PORT + (p) | ((f) << 16))
+
+/* Convert a port and pin label to its global pin index */
+#define RZG2L_GPIO(port, pin) ((port) * RZG2L_PINS_PER_PORT + (pin))
+
+#endif /* __DT_BINDINGS_RZG2L_PINCTRL_H */
diff --git a/include/dt-bindings/pinctrl/rzv2m-pinctrl.h b/include/dt-bindings/pinctrl/rzv2m-pinctrl.h
new file mode 100644
index 000000000000..525532cd15da
--- /dev/null
+++ b/include/dt-bindings/pinctrl/rzv2m-pinctrl.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * This header provides constants for Renesas RZ/V2M pinctrl bindings.
+ *
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ *
+ */
+
+#ifndef __DT_BINDINGS_RZV2M_PINCTRL_H
+#define __DT_BINDINGS_RZV2M_PINCTRL_H
+
+#define RZV2M_PINS_PER_PORT 16
+
+/*
+ * Create the pin index from its bank and position numbers and store in
+ * the upper 16 bits the alternate function identifier
+ */
+#define RZV2M_PORT_PINMUX(b, p, f) ((b) * RZV2M_PINS_PER_PORT + (p) | ((f) << 16))
+
+/* Convert a port and pin label to its global pin index */
+#define RZV2M_GPIO(port, pin) ((port) * RZV2M_PINS_PER_PORT + (pin))
+
+#endif /* __DT_BINDINGS_RZV2M_PINCTRL_H */
diff --git a/include/dt-bindings/pinctrl/samsung.h b/include/dt-bindings/pinctrl/samsung.h
deleted file mode 100644
index b1832506b923..000000000000
--- a/include/dt-bindings/pinctrl/samsung.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Samsung's Exynos pinctrl bindings
- *
- * Copyright (c) 2016 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- * Author: Krzysztof Kozlowski <krzk@kernel.org>
- */
-
-#ifndef __DT_BINDINGS_PINCTRL_SAMSUNG_H__
-#define __DT_BINDINGS_PINCTRL_SAMSUNG_H__
-
-#define EXYNOS_PIN_PULL_NONE 0
-#define EXYNOS_PIN_PULL_DOWN 1
-#define EXYNOS_PIN_PULL_UP 3
-
-#define S3C64XX_PIN_PULL_NONE 0
-#define S3C64XX_PIN_PULL_DOWN 1
-#define S3C64XX_PIN_PULL_UP 2
-
-/* Pin function in power down mode */
-#define EXYNOS_PIN_PDN_OUT0 0
-#define EXYNOS_PIN_PDN_OUT1 1
-#define EXYNOS_PIN_PDN_INPUT 2
-#define EXYNOS_PIN_PDN_PREV 3
-
-/* Drive strengths for Exynos3250, Exynos4 (all) and Exynos5250 */
-#define EXYNOS4_PIN_DRV_LV1 0
-#define EXYNOS4_PIN_DRV_LV2 2
-#define EXYNOS4_PIN_DRV_LV3 1
-#define EXYNOS4_PIN_DRV_LV4 3
-
-/* Drive strengths for Exynos5260 */
-#define EXYNOS5260_PIN_DRV_LV1 0
-#define EXYNOS5260_PIN_DRV_LV2 1
-#define EXYNOS5260_PIN_DRV_LV4 2
-#define EXYNOS5260_PIN_DRV_LV6 3
-
-/* Drive strengths for Exynos5410, Exynos542x and Exynos5800 */
-#define EXYNOS5420_PIN_DRV_LV1 0
-#define EXYNOS5420_PIN_DRV_LV2 1
-#define EXYNOS5420_PIN_DRV_LV3 2
-#define EXYNOS5420_PIN_DRV_LV4 3
-
-/* Drive strengths for Exynos5433 */
-#define EXYNOS5433_PIN_DRV_FAST_SR1 0
-#define EXYNOS5433_PIN_DRV_FAST_SR2 1
-#define EXYNOS5433_PIN_DRV_FAST_SR3 2
-#define EXYNOS5433_PIN_DRV_FAST_SR4 3
-#define EXYNOS5433_PIN_DRV_FAST_SR5 4
-#define EXYNOS5433_PIN_DRV_FAST_SR6 5
-#define EXYNOS5433_PIN_DRV_SLOW_SR1 8
-#define EXYNOS5433_PIN_DRV_SLOW_SR2 9
-#define EXYNOS5433_PIN_DRV_SLOW_SR3 0xa
-#define EXYNOS5433_PIN_DRV_SLOW_SR4 0xb
-#define EXYNOS5433_PIN_DRV_SLOW_SR5 0xc
-#define EXYNOS5433_PIN_DRV_SLOW_SR6 0xf
-
-#define EXYNOS_PIN_FUNC_INPUT 0
-#define EXYNOS_PIN_FUNC_OUTPUT 1
-#define EXYNOS_PIN_FUNC_2 2
-#define EXYNOS_PIN_FUNC_3 3
-#define EXYNOS_PIN_FUNC_4 4
-#define EXYNOS_PIN_FUNC_5 5
-#define EXYNOS_PIN_FUNC_6 6
-#define EXYNOS_PIN_FUNC_EINT 0xf
-#define EXYNOS_PIN_FUNC_F EXYNOS_PIN_FUNC_EINT
-
-/* Drive strengths for Exynos7 FSYS1 block */
-#define EXYNOS7_FSYS1_PIN_DRV_LV1 0
-#define EXYNOS7_FSYS1_PIN_DRV_LV2 4
-#define EXYNOS7_FSYS1_PIN_DRV_LV3 2
-#define EXYNOS7_FSYS1_PIN_DRV_LV4 6
-#define EXYNOS7_FSYS1_PIN_DRV_LV5 1
-#define EXYNOS7_FSYS1_PIN_DRV_LV6 5
-
-#endif /* __DT_BINDINGS_PINCTRL_SAMSUNG_H__ */
diff --git a/include/dt-bindings/pinctrl/sppctl-sp7021.h b/include/dt-bindings/pinctrl/sppctl-sp7021.h
new file mode 100644
index 000000000000..629aa9b5ffbc
--- /dev/null
+++ b/include/dt-bindings/pinctrl/sppctl-sp7021.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Sunplus SP7021 dt-bindings Pinctrl header file
+ * Copyright (C) Sunplus Tech/Tibbo Tech.
+ * Author: Dvorkin Dmitry <dvorkin@tibbo.com>
+ */
+
+#ifndef __DT_BINDINGS_PINCTRL_SPPCTL_SP7021_H__
+#define __DT_BINDINGS_PINCTRL_SPPCTL_SP7021_H__
+
+#include <dt-bindings/pinctrl/sppctl.h>
+
+/*
+ * Please don't change the order of the following defines.
+ * They are based on order of 'hardware' control register
+ * defined in MOON2 ~ MOON3 registers.
+ */
+#define MUXF_GPIO 0
+#define MUXF_IOP 1
+#define MUXF_L2SW_CLK_OUT 2
+#define MUXF_L2SW_MAC_SMI_MDC 3
+#define MUXF_L2SW_LED_FLASH0 4
+#define MUXF_L2SW_LED_FLASH1 5
+#define MUXF_L2SW_LED_ON0 6
+#define MUXF_L2SW_LED_ON1 7
+#define MUXF_L2SW_MAC_SMI_MDIO 8
+#define MUXF_L2SW_P0_MAC_RMII_TXEN 9
+#define MUXF_L2SW_P0_MAC_RMII_TXD0 10
+#define MUXF_L2SW_P0_MAC_RMII_TXD1 11
+#define MUXF_L2SW_P0_MAC_RMII_CRSDV 12
+#define MUXF_L2SW_P0_MAC_RMII_RXD0 13
+#define MUXF_L2SW_P0_MAC_RMII_RXD1 14
+#define MUXF_L2SW_P0_MAC_RMII_RXER 15
+#define MUXF_L2SW_P1_MAC_RMII_TXEN 16
+#define MUXF_L2SW_P1_MAC_RMII_TXD0 17
+#define MUXF_L2SW_P1_MAC_RMII_TXD1 18
+#define MUXF_L2SW_P1_MAC_RMII_CRSDV 19
+#define MUXF_L2SW_P1_MAC_RMII_RXD0 20
+#define MUXF_L2SW_P1_MAC_RMII_RXD1 21
+#define MUXF_L2SW_P1_MAC_RMII_RXER 22
+#define MUXF_DAISY_MODE 23
+#define MUXF_SDIO_CLK 24
+#define MUXF_SDIO_CMD 25
+#define MUXF_SDIO_D0 26
+#define MUXF_SDIO_D1 27
+#define MUXF_SDIO_D2 28
+#define MUXF_SDIO_D3 29
+#define MUXF_PWM0 30
+#define MUXF_PWM1 31
+#define MUXF_PWM2 32
+#define MUXF_PWM3 33
+#define MUXF_PWM4 34
+#define MUXF_PWM5 35
+#define MUXF_PWM6 36
+#define MUXF_PWM7 37
+#define MUXF_ICM0_D 38
+#define MUXF_ICM1_D 39
+#define MUXF_ICM2_D 40
+#define MUXF_ICM3_D 41
+#define MUXF_ICM0_CLK 42
+#define MUXF_ICM1_CLK 43
+#define MUXF_ICM2_CLK 44
+#define MUXF_ICM3_CLK 45
+#define MUXF_SPIM0_INT 46
+#define MUXF_SPIM0_CLK 47
+#define MUXF_SPIM0_EN 48
+#define MUXF_SPIM0_DO 49
+#define MUXF_SPIM0_DI 50
+#define MUXF_SPIM1_INT 51
+#define MUXF_SPIM1_CLK 52
+#define MUXF_SPIM1_EN 53
+#define MUXF_SPIM1_DO 54
+#define MUXF_SPIM1_DI 55
+#define MUXF_SPIM2_INT 56
+#define MUXF_SPIM2_CLK 57
+#define MUXF_SPIM2_EN 58
+#define MUXF_SPIM2_DO 59
+#define MUXF_SPIM2_DI 60
+#define MUXF_SPIM3_INT 61
+#define MUXF_SPIM3_CLK 62
+#define MUXF_SPIM3_EN 63
+#define MUXF_SPIM3_DO 64
+#define MUXF_SPIM3_DI 65
+#define MUXF_SPI0S_INT 66
+#define MUXF_SPI0S_CLK 67
+#define MUXF_SPI0S_EN 68
+#define MUXF_SPI0S_DO 69
+#define MUXF_SPI0S_DI 70
+#define MUXF_SPI1S_INT 71
+#define MUXF_SPI1S_CLK 72
+#define MUXF_SPI1S_EN 73
+#define MUXF_SPI1S_DO 74
+#define MUXF_SPI1S_DI 75
+#define MUXF_SPI2S_INT 76
+#define MUXF_SPI2S_CLK 77
+#define MUXF_SPI2S_EN 78
+#define MUXF_SPI2S_DO 79
+#define MUXF_SPI2S_DI 80
+#define MUXF_SPI3S_INT 81
+#define MUXF_SPI3S_CLK 82
+#define MUXF_SPI3S_EN 83
+#define MUXF_SPI3S_DO 84
+#define MUXF_SPI3S_DI 85
+#define MUXF_I2CM0_CLK 86
+#define MUXF_I2CM0_DAT 87
+#define MUXF_I2CM1_CLK 88
+#define MUXF_I2CM1_DAT 89
+#define MUXF_I2CM2_CLK 90
+#define MUXF_I2CM2_DAT 91
+#define MUXF_I2CM3_CLK 92
+#define MUXF_I2CM3_DAT 93
+#define MUXF_UA1_TX 94
+#define MUXF_UA1_RX 95
+#define MUXF_UA1_CTS 96
+#define MUXF_UA1_RTS 97
+#define MUXF_UA2_TX 98
+#define MUXF_UA2_RX 99
+#define MUXF_UA2_CTS 100
+#define MUXF_UA2_RTS 101
+#define MUXF_UA3_TX 102
+#define MUXF_UA3_RX 103
+#define MUXF_UA3_CTS 104
+#define MUXF_UA3_RTS 105
+#define MUXF_UA4_TX 106
+#define MUXF_UA4_RX 107
+#define MUXF_UA4_CTS 108
+#define MUXF_UA4_RTS 109
+#define MUXF_TIMER0_INT 110
+#define MUXF_TIMER1_INT 111
+#define MUXF_TIMER2_INT 112
+#define MUXF_TIMER3_INT 113
+#define MUXF_GPIO_INT0 114
+#define MUXF_GPIO_INT1 115
+#define MUXF_GPIO_INT2 116
+#define MUXF_GPIO_INT3 117
+#define MUXF_GPIO_INT4 118
+#define MUXF_GPIO_INT5 119
+#define MUXF_GPIO_INT6 120
+#define MUXF_GPIO_INT7 121
+
+/*
+ * Please don't change the order of the following defines.
+ * They are based on order of items in array 'sppctl_list_funcs'
+ * in Sunplus pinctrl driver.
+ */
+#define GROP_SPI_FLASH 122
+#define GROP_SPI_FLASH_4BIT 123
+#define GROP_SPI_NAND 124
+#define GROP_CARD0_EMMC 125
+#define GROP_SD_CARD 126
+#define GROP_UA0 127
+#define GROP_ACHIP_DEBUG 128
+#define GROP_ACHIP_UA2AXI 129
+#define GROP_FPGA_IFX 130
+#define GROP_HDMI_TX 131
+#define GROP_AUD_EXT_ADC_IFX0 132
+#define GROP_AUD_EXT_DAC_IFX0 133
+#define GROP_SPDIF_RX 134
+#define GROP_SPDIF_TX 135
+#define GROP_TDMTX_IFX0 136
+#define GROP_TDMRX_IFX0 137
+#define GROP_PDMRX_IFX0 138
+#define GROP_PCM_IEC_TX 139
+#define GROP_LCDIF 140
+#define GROP_DVD_DSP_DEBUG 141
+#define GROP_I2C_DEBUG 142
+#define GROP_I2C_SLAVE 143
+#define GROP_WAKEUP 144
+#define GROP_UART2AXI 145
+#define GROP_USB0_I2C 146
+#define GROP_USB1_I2C 147
+#define GROP_USB0_OTG 148
+#define GROP_USB1_OTG 149
+#define GROP_UPHY0_DEBUG 150
+#define GROP_UPHY1_DEBUG 151
+#define GROP_UPHY0_EXT 152
+#define GROP_PROBE_PORT 153
+
+#endif
diff --git a/include/dt-bindings/pinctrl/sppctl.h b/include/dt-bindings/pinctrl/sppctl.h
new file mode 100644
index 000000000000..50557265dbfc
--- /dev/null
+++ b/include/dt-bindings/pinctrl/sppctl.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Sunplus dt-bindings Pinctrl header file
+ * Copyright (C) Sunplus Tech / Tibbo Tech.
+ * Author: Dvorkin Dmitry <dvorkin@tibbo.com>
+ */
+
+#ifndef __DT_BINDINGS_PINCTRL_SPPCTL_H__
+#define __DT_BINDINGS_PINCTRL_SPPCTL_H__
+
+#define IOP_G_MASTE (0x01 << 0)
+#define IOP_G_FIRST (0x01 << 1)
+
+#define SPPCTL_PCTL_G_PMUX (0x00 | IOP_G_MASTE)
+#define SPPCTL_PCTL_G_GPIO (IOP_G_FIRST | IOP_G_MASTE)
+#define SPPCTL_PCTL_G_IOPP (IOP_G_FIRST | 0x00)
+
+#define SPPCTL_PCTL_L_OUT (0x01 << 0) /* Output LOW */
+#define SPPCTL_PCTL_L_OU1 (0x01 << 1) /* Output HIGH */
+#define SPPCTL_PCTL_L_INV (0x01 << 2) /* Input Invert */
+#define SPPCTL_PCTL_L_ONV (0x01 << 3) /* Output Invert */
+#define SPPCTL_PCTL_L_ODR (0x01 << 4) /* Output Open Drain */
+
+/*
+ * pack into 32-bit value:
+ * pin# (8bit), typ (8bit), function (8bit), flag (8bit)
+ */
+#define SPPCTL_IOPAD(pin, typ, fun, flg) (((pin) << 24) | ((typ) << 16) | \
+ ((fun) << 8) | (flg))
+
+#endif
diff --git a/include/dt-bindings/pinctrl/starfive,jh7110-pinctrl.h b/include/dt-bindings/pinctrl/starfive,jh7110-pinctrl.h
new file mode 100644
index 000000000000..3865f0139639
--- /dev/null
+++ b/include/dt-bindings/pinctrl/starfive,jh7110-pinctrl.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright (C) 2022 Emil Renner Berthing <kernel@esmil.dk>
+ * Copyright (C) 2022 StarFive Technology Co., Ltd.
+ */
+
+#ifndef __DT_BINDINGS_PINCTRL_STARFIVE_JH7110_H__
+#define __DT_BINDINGS_PINCTRL_STARFIVE_JH7110_H__
+
+/* sys_iomux pins */
+#define PAD_GPIO0 0
+#define PAD_GPIO1 1
+#define PAD_GPIO2 2
+#define PAD_GPIO3 3
+#define PAD_GPIO4 4
+#define PAD_GPIO5 5
+#define PAD_GPIO6 6
+#define PAD_GPIO7 7
+#define PAD_GPIO8 8
+#define PAD_GPIO9 9
+#define PAD_GPIO10 10
+#define PAD_GPIO11 11
+#define PAD_GPIO12 12
+#define PAD_GPIO13 13
+#define PAD_GPIO14 14
+#define PAD_GPIO15 15
+#define PAD_GPIO16 16
+#define PAD_GPIO17 17
+#define PAD_GPIO18 18
+#define PAD_GPIO19 19
+#define PAD_GPIO20 20
+#define PAD_GPIO21 21
+#define PAD_GPIO22 22
+#define PAD_GPIO23 23
+#define PAD_GPIO24 24
+#define PAD_GPIO25 25
+#define PAD_GPIO26 26
+#define PAD_GPIO27 27
+#define PAD_GPIO28 28
+#define PAD_GPIO29 29
+#define PAD_GPIO30 30
+#define PAD_GPIO31 31
+#define PAD_GPIO32 32
+#define PAD_GPIO33 33
+#define PAD_GPIO34 34
+#define PAD_GPIO35 35
+#define PAD_GPIO36 36
+#define PAD_GPIO37 37
+#define PAD_GPIO38 38
+#define PAD_GPIO39 39
+#define PAD_GPIO40 40
+#define PAD_GPIO41 41
+#define PAD_GPIO42 42
+#define PAD_GPIO43 43
+#define PAD_GPIO44 44
+#define PAD_GPIO45 45
+#define PAD_GPIO46 46
+#define PAD_GPIO47 47
+#define PAD_GPIO48 48
+#define PAD_GPIO49 49
+#define PAD_GPIO50 50
+#define PAD_GPIO51 51
+#define PAD_GPIO52 52
+#define PAD_GPIO53 53
+#define PAD_GPIO54 54
+#define PAD_GPIO55 55
+#define PAD_GPIO56 56
+#define PAD_GPIO57 57
+#define PAD_GPIO58 58
+#define PAD_GPIO59 59
+#define PAD_GPIO60 60
+#define PAD_GPIO61 61
+#define PAD_GPIO62 62
+#define PAD_GPIO63 63
+#define PAD_SD0_CLK 64
+#define PAD_SD0_CMD 65
+#define PAD_SD0_DATA0 66
+#define PAD_SD0_DATA1 67
+#define PAD_SD0_DATA2 68
+#define PAD_SD0_DATA3 69
+#define PAD_SD0_DATA4 70
+#define PAD_SD0_DATA5 71
+#define PAD_SD0_DATA6 72
+#define PAD_SD0_DATA7 73
+#define PAD_SD0_STRB 74
+#define PAD_GMAC1_MDC 75
+#define PAD_GMAC1_MDIO 76
+#define PAD_GMAC1_RXD0 77
+#define PAD_GMAC1_RXD1 78
+#define PAD_GMAC1_RXD2 79
+#define PAD_GMAC1_RXD3 80
+#define PAD_GMAC1_RXDV 81
+#define PAD_GMAC1_RXC 82
+#define PAD_GMAC1_TXD0 83
+#define PAD_GMAC1_TXD1 84
+#define PAD_GMAC1_TXD2 85
+#define PAD_GMAC1_TXD3 86
+#define PAD_GMAC1_TXEN 87
+#define PAD_GMAC1_TXC 88
+#define PAD_QSPI_SCLK 89
+#define PAD_QSPI_CS0 90
+#define PAD_QSPI_DATA0 91
+#define PAD_QSPI_DATA1 92
+#define PAD_QSPI_DATA2 93
+#define PAD_QSPI_DATA3 94
+
+/* aon_iomux pins */
+#define PAD_TESTEN 0
+#define PAD_RGPIO0 1
+#define PAD_RGPIO1 2
+#define PAD_RGPIO2 3
+#define PAD_RGPIO3 4
+#define PAD_RSTN 5
+#define PAD_GMAC0_MDC 6
+#define PAD_GMAC0_MDIO 7
+#define PAD_GMAC0_RXD0 8
+#define PAD_GMAC0_RXD1 9
+#define PAD_GMAC0_RXD2 10
+#define PAD_GMAC0_RXD3 11
+#define PAD_GMAC0_RXDV 12
+#define PAD_GMAC0_RXC 13
+#define PAD_GMAC0_TXD0 14
+#define PAD_GMAC0_TXD1 15
+#define PAD_GMAC0_TXD2 16
+#define PAD_GMAC0_TXD3 17
+#define PAD_GMAC0_TXEN 18
+#define PAD_GMAC0_TXC 19
+
+#define GPOUT_LOW 0
+#define GPOUT_HIGH 1
+
+#define GPOEN_ENABLE 0
+#define GPOEN_DISABLE 1
+
+#define GPI_NONE 255
+
+#endif
diff --git a/include/dt-bindings/pinctrl/stm32-pinfunc.h b/include/dt-bindings/pinctrl/stm32-pinfunc.h
index e6fb8ada3f4d..af3fd388329a 100644
--- a/include/dt-bindings/pinctrl/stm32-pinfunc.h
+++ b/include/dt-bindings/pinctrl/stm32-pinfunc.h
@@ -26,6 +26,7 @@
#define AF14 0xf
#define AF15 0x10
#define ANALOG 0x11
+#define RSVD 0x12
/* define Pins number*/
#define PIN_NO(port, line) (((port) - 'A') * 0x10 + (line))
@@ -37,6 +38,9 @@
#define STM32MP_PKG_AB 0x2
#define STM32MP_PKG_AC 0x4
#define STM32MP_PKG_AD 0x8
+#define STM32MP_PKG_AI 0x100
+#define STM32MP_PKG_AK 0x400
+#define STM32MP_PKG_AL 0x800
#endif /* _DT_BINDINGS_STM32_PINFUNC_H */
diff --git a/include/dt-bindings/power/allwinner,sun20i-d1-ppu.h b/include/dt-bindings/power/allwinner,sun20i-d1-ppu.h
new file mode 100644
index 000000000000..23cfb57256d6
--- /dev/null
+++ b/include/dt-bindings/power/allwinner,sun20i-d1-ppu.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_POWER_SUN20I_D1_PPU_H_
+#define _DT_BINDINGS_POWER_SUN20I_D1_PPU_H_
+
+#define PD_CPU 0
+#define PD_VE 1
+#define PD_DSP 2
+
+#endif /* _DT_BINDINGS_POWER_SUN20I_D1_PPU_H_ */
diff --git a/include/dt-bindings/power/allwinner,sun55i-a523-pck-600.h b/include/dt-bindings/power/allwinner,sun55i-a523-pck-600.h
new file mode 100644
index 000000000000..6b3d8ea7bb69
--- /dev/null
+++ b/include/dt-bindings/power/allwinner,sun55i-a523-pck-600.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_POWER_SUN55I_A523_PCK600_H_
+#define _DT_BINDINGS_POWER_SUN55I_A523_PCK600_H_
+
+#define PD_VE 0
+#define PD_GPU 1
+#define PD_VI 2
+#define PD_VO0 3
+#define PD_VO1 4
+#define PD_DE 5
+#define PD_NAND 6
+#define PD_PCIE 7
+
+#endif /* _DT_BINDINGS_POWER_SUN55I_A523_PCK600_H_ */
diff --git a/include/dt-bindings/power/allwinner,sun55i-a523-ppu.h b/include/dt-bindings/power/allwinner,sun55i-a523-ppu.h
new file mode 100644
index 000000000000..bc9aba73c19a
--- /dev/null
+++ b/include/dt-bindings/power/allwinner,sun55i-a523-ppu.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_POWER_SUN55I_A523_PPU_H_
+#define _DT_BINDINGS_POWER_SUN55I_A523_PPU_H_
+
+#define PD_DSP 0
+#define PD_NPU 1
+#define PD_AUDIO 2
+#define PD_SRAM 3
+#define PD_RISCV 4
+
+#endif /* _DT_BINDINGS_POWER_SUN55I_A523_PPU_H_ */
diff --git a/include/dt-bindings/power/allwinner,sun8i-v853-ppu.h b/include/dt-bindings/power/allwinner,sun8i-v853-ppu.h
new file mode 100644
index 000000000000..b1c18a490613
--- /dev/null
+++ b/include/dt-bindings/power/allwinner,sun8i-v853-ppu.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_POWER_SUN8I_V853_PPU_H_
+#define _DT_BINDINGS_POWER_SUN8I_V853_PPU_H_
+
+#define PD_RISCV 0
+#define PD_NPU 1
+#define PD_VE 2
+
+#endif
diff --git a/include/dt-bindings/power/amlogic,a4-pwrc.h b/include/dt-bindings/power/amlogic,a4-pwrc.h
new file mode 100644
index 000000000000..bd2f9c558d22
--- /dev/null
+++ b/include/dt-bindings/power/amlogic,a4-pwrc.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (C) 2024 Amlogic, Inc. All rights reserved
+ */
+#ifndef _DT_BINDINGS_AMLOGIC_A4_POWER_H
+#define _DT_BINDINGS_AMLOGIC_A4_POWER_H
+
+#define PWRC_A4_AUDIO_ID 0
+#define PWRC_A4_SDIOA_ID 1
+#define PWRC_A4_EMMC_ID 2
+#define PWRC_A4_USB_COMB_ID 3
+#define PWRC_A4_ETH_ID 4
+#define PWRC_A4_VOUT_ID 5
+#define PWRC_A4_AUDIO_PDM_ID 6
+#define PWRC_A4_DMC_ID 7
+#define PWRC_A4_SYS_WRAP_ID 8
+#define PWRC_A4_AO_I2C_S_ID 9
+#define PWRC_A4_AO_UART_ID 10
+#define PWRC_A4_AO_IR_ID 11
+
+#endif
diff --git a/include/dt-bindings/power/amlogic,a5-pwrc.h b/include/dt-bindings/power/amlogic,a5-pwrc.h
new file mode 100644
index 000000000000..3a6f53eb959f
--- /dev/null
+++ b/include/dt-bindings/power/amlogic,a5-pwrc.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (C) 2024 Amlogic, Inc. All rights reserved
+ */
+
+#ifndef _DT_BINDINGS_AMLOGIC_A5_POWER_H
+#define _DT_BINDINGS_AMLOGIC_A5_POWER_H
+
+#define PWRC_A5_NNA_ID 0
+#define PWRC_A5_AUDIO_ID 1
+#define PWRC_A5_SDIOA_ID 2
+#define PWRC_A5_EMMC_ID 3
+#define PWRC_A5_USB_COMB_ID 4
+#define PWRC_A5_ETH_ID 5
+#define PWRC_A5_RSA_ID 6
+#define PWRC_A5_AUDIO_PDM_ID 7
+#define PWRC_A5_DMC_ID 8
+#define PWRC_A5_SYS_WRAP_ID 9
+#define PWRC_A5_DSPA_ID 10
+
+#endif
diff --git a/include/dt-bindings/power/amlogic,c3-pwrc.h b/include/dt-bindings/power/amlogic,c3-pwrc.h
new file mode 100644
index 000000000000..61759df4b2e7
--- /dev/null
+++ b/include/dt-bindings/power/amlogic,c3-pwrc.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2023 Amlogic, Inc.
+ * Author: hongyu chen1 <hongyu.chen1@amlogic.com>
+ */
+#ifndef _DT_BINDINGS_AMLOGIC_C3_POWER_H
+#define _DT_BINDINGS_AMLOGIC_C3_POWER_H
+
+#define PWRC_C3_NNA_ID 0
+#define PWRC_C3_AUDIO_ID 1
+#define PWRC_C3_RESV_SEC_ID 2
+#define PWRC_C3_SDIOA_ID 3
+#define PWRC_C3_EMMC_ID 4
+#define PWRC_C3_USB_COMB_ID 5
+#define PWRC_C3_SDCARD_ID 6
+#define PWRC_C3_ETH_ID 7
+#define PWRC_C3_RESV0_ID 8
+#define PWRC_C3_GE2D_ID 9
+#define PWRC_C3_CVE_ID 10
+#define PWRC_C3_GDC_WRAP_ID 11
+#define PWRC_C3_ISP_TOP_ID 12
+#define PWRC_C3_MIPI_ISP_WRAP_ID 13
+#define PWRC_C3_VCODEC_ID 14
+
+#endif
diff --git a/include/dt-bindings/power/amlogic,s6-pwrc.h b/include/dt-bindings/power/amlogic,s6-pwrc.h
new file mode 100644
index 000000000000..2c005864ae73
--- /dev/null
+++ b/include/dt-bindings/power/amlogic,s6-pwrc.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (C) 2025 Amlogic, Inc. All rights reserved
+ */
+#ifndef _DT_BINDINGS_AMLOGIC_S6_POWER_H
+#define _DT_BINDINGS_AMLOGIC_S6_POWER_H
+
+#define PWRC_S6_DSPA_ID 0
+#define PWRC_S6_DOS_HEVC_ID 1
+#define PWRC_S6_DOS_VDEC_ID 2
+#define PWRC_S6_VPU_HDMI_ID 3
+#define PWRC_S6_U2DRD_ID 4
+#define PWRC_S6_U3DRD_ID 5
+#define PWRC_S6_SD_EMMC_C_ID 6
+#define PWRC_S6_GE2D_ID 7
+#define PWRC_S6_AMFC_ID 8
+#define PWRC_S6_VC9000E_ID 9
+#define PWRC_S6_DEWARP_ID 10
+#define PWRC_S6_VICP_ID 11
+#define PWRC_S6_SD_EMMC_A_ID 12
+#define PWRC_S6_SD_EMMC_B_ID 13
+#define PWRC_S6_ETH_ID 14
+#define PWRC_S6_PCIE_ID 15
+#define PWRC_S6_NNA_4T_ID 16
+#define PWRC_S6_AUDIO_ID 17
+#define PWRC_S6_AUCPU_ID 18
+#define PWRC_S6_ADAPT_ID 19
+
+#endif
diff --git a/include/dt-bindings/power/amlogic,s7-pwrc.h b/include/dt-bindings/power/amlogic,s7-pwrc.h
new file mode 100644
index 000000000000..3f21d095f784
--- /dev/null
+++ b/include/dt-bindings/power/amlogic,s7-pwrc.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (C) 2025 Amlogic, Inc. All rights reserved
+ */
+#ifndef _DT_BINDINGS_AMLOGIC_S7_POWER_H
+#define _DT_BINDINGS_AMLOGIC_S7_POWER_H
+
+#define PWRC_S7_DOS_HEVC_ID 0
+#define PWRC_S7_DOS_VDEC_ID 1
+#define PWRC_S7_VPU_HDMI_ID 2
+#define PWRC_S7_USB_COMB_ID 3
+#define PWRC_S7_SD_EMMC_C_ID 4
+#define PWRC_S7_GE2D_ID 5
+#define PWRC_S7_SD_EMMC_A_ID 6
+#define PWRC_S7_SD_EMMC_B_ID 7
+#define PWRC_S7_ETH_ID 8
+#define PWRC_S7_AUCPU_ID 9
+#define PWRC_S7_AUDIO_ID 10
+
+#endif
diff --git a/include/dt-bindings/power/amlogic,s7d-pwrc.h b/include/dt-bindings/power/amlogic,s7d-pwrc.h
new file mode 100644
index 000000000000..c6998553670a
--- /dev/null
+++ b/include/dt-bindings/power/amlogic,s7d-pwrc.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (C) 2025 Amlogic, Inc. All rights reserved
+ */
+#ifndef _DT_BINDINGS_AMLOGIC_S7D_POWER_H
+#define _DT_BINDINGS_AMLOGIC_S7D_POWER_H
+
+#define PWRC_S7D_DOS_HCODEC_ID 0
+#define PWRC_S7D_DOS_HEVC_ID 1
+#define PWRC_S7D_DOS_VDEC_ID 2
+#define PWRC_S7D_VPU_HDMI_ID 3
+#define PWRC_S7D_USB_U2DRD_ID 4
+#define PWRC_S7D_USB_U2H_ID 5
+#define PWRC_S7D_SSD_EMMC_C_ID 6
+#define PWRC_S7D_GE2D_ID 7
+#define PWRC_S7D_AMFC_ID 8
+#define PWRC_S7D_EMMC_A_ID 9
+#define PWRC_S7D_EMMC_B_ID 10
+#define PWRC_S7D_ETH_ID 11
+#define PWRC_S7D_AUCPU_ID 12
+#define PWRC_S7D_AUDIO_ID 13
+#define PWRC_S7D_SRAMA_ID 14
+#define PWRC_S7D_DMC0_ID 15
+#define PWRC_S7D_DMC1_ID 16
+#define PWRC_S7D_DDR_ID 17
+
+#endif
diff --git a/include/dt-bindings/power/amlogic,t7-pwrc.h b/include/dt-bindings/power/amlogic,t7-pwrc.h
new file mode 100644
index 000000000000..1f1f2739cc26
--- /dev/null
+++ b/include/dt-bindings/power/amlogic,t7-pwrc.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2023 Amlogic, Inc.
+ * Author: Hongyu Chen <hongyu.chen1@amlogic.com>
+ */
+#ifndef _DT_BINDINGS_AMLOGIC_T7_POWER_H
+#define _DT_BINDINGS_AMLOGIC_T7_POWER_H
+
+#define PWRC_T7_DSPA_ID 0
+#define PWRC_T7_DSPB_ID 1
+#define PWRC_T7_DOS_HCODEC_ID 2
+#define PWRC_T7_DOS_HEVC_ID 3
+#define PWRC_T7_DOS_VDEC_ID 4
+#define PWRC_T7_DOS_WAVE_ID 5
+#define PWRC_T7_VPU_HDMI_ID 6
+#define PWRC_T7_USB_COMB_ID 7
+#define PWRC_T7_PCIE_ID 8
+#define PWRC_T7_GE2D_ID 9
+#define PWRC_T7_SRAMA_ID 10
+#define PWRC_T7_SRAMB_ID 11
+#define PWRC_T7_HDMIRX_ID 12
+#define PWRC_T7_VI_CLK1_ID 13
+#define PWRC_T7_VI_CLK2_ID 14
+#define PWRC_T7_ETH_ID 15
+#define PWRC_T7_ISP_ID 16
+#define PWRC_T7_MIPI_ISP_ID 17
+#define PWRC_T7_GDC_ID 18
+#define PWRC_T7_CVE_ID 18
+#define PWRC_T7_DEWARP_ID 19
+#define PWRC_T7_SDIO_A_ID 20
+#define PWRC_T7_SDIO_B_ID 21
+#define PWRC_T7_EMMC_ID 22
+#define PWRC_T7_MALI_SC0_ID 23
+#define PWRC_T7_MALI_SC1_ID 24
+#define PWRC_T7_MALI_SC2_ID 25
+#define PWRC_T7_MALI_SC3_ID 26
+#define PWRC_T7_MALI_TOP_ID 27
+#define PWRC_T7_NNA_CORE0_ID 28
+#define PWRC_T7_NNA_CORE1_ID 29
+#define PWRC_T7_NNA_CORE2_ID 30
+#define PWRC_T7_NNA_CORE3_ID 31
+#define PWRC_T7_NNA_TOP_ID 32
+#define PWRC_T7_DDR0_ID 33
+#define PWRC_T7_DDR1_ID 34
+#define PWRC_T7_DMC0_ID 35
+#define PWRC_T7_DMC1_ID 36
+#define PWRC_T7_NOC_ID 37
+#define PWRC_T7_NIC2_ID 38
+#define PWRC_T7_NIC3_ID 39
+#define PWRC_T7_CCI_ID 40
+#define PWRC_T7_MIPI_DSI0_ID 41
+#define PWRC_T7_SPICC0_ID 42
+#define PWRC_T7_SPICC1_ID 43
+#define PWRC_T7_SPICC2_ID 44
+#define PWRC_T7_SPICC3_ID 45
+#define PWRC_T7_SPICC4_ID 46
+#define PWRC_T7_SPICC5_ID 47
+#define PWRC_T7_EDP0_ID 48
+#define PWRC_T7_EDP1_ID 49
+#define PWRC_T7_MIPI_DSI1_ID 50
+#define PWRC_T7_AUDIO_ID 51
+
+#endif
diff --git a/include/dt-bindings/power/fsl,imx93-power.h b/include/dt-bindings/power/fsl,imx93-power.h
new file mode 100644
index 000000000000..17f9f015bf7d
--- /dev/null
+++ b/include/dt-bindings/power/fsl,imx93-power.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright 2022 NXP
+ */
+
+#ifndef __DT_BINDINGS_IMX93_POWER_H__
+#define __DT_BINDINGS_IMX93_POWER_H__
+
+#define IMX93_MEDIABLK_PD_MIPI_DSI 0
+#define IMX93_MEDIABLK_PD_MIPI_CSI 1
+#define IMX93_MEDIABLK_PD_PXP 2
+#define IMX93_MEDIABLK_PD_LCDIF 3
+#define IMX93_MEDIABLK_PD_ISI 4
+
+#endif
diff --git a/include/dt-bindings/power/imx8mm-power.h b/include/dt-bindings/power/imx8mm-power.h
new file mode 100644
index 000000000000..648938f24c8e
--- /dev/null
+++ b/include/dt-bindings/power/imx8mm-power.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (C) 2020 Pengutronix, Lucas Stach <kernel@pengutronix.de>
+ */
+
+#ifndef __DT_BINDINGS_IMX8MM_POWER_H__
+#define __DT_BINDINGS_IMX8MM_POWER_H__
+
+#define IMX8MM_POWER_DOMAIN_HSIOMIX 0
+#define IMX8MM_POWER_DOMAIN_PCIE 1
+#define IMX8MM_POWER_DOMAIN_OTG1 2
+#define IMX8MM_POWER_DOMAIN_OTG2 3
+#define IMX8MM_POWER_DOMAIN_GPUMIX 4
+#define IMX8MM_POWER_DOMAIN_GPU 5
+#define IMX8MM_POWER_DOMAIN_VPUMIX 6
+#define IMX8MM_POWER_DOMAIN_VPUG1 7
+#define IMX8MM_POWER_DOMAIN_VPUG2 8
+#define IMX8MM_POWER_DOMAIN_VPUH1 9
+#define IMX8MM_POWER_DOMAIN_DISPMIX 10
+#define IMX8MM_POWER_DOMAIN_MIPI 11
+
+#define IMX8MM_VPUBLK_PD_G1 0
+#define IMX8MM_VPUBLK_PD_G2 1
+#define IMX8MM_VPUBLK_PD_H1 2
+
+#define IMX8MM_DISPBLK_PD_CSI_BRIDGE 0
+#define IMX8MM_DISPBLK_PD_LCDIF 1
+#define IMX8MM_DISPBLK_PD_MIPI_DSI 2
+#define IMX8MM_DISPBLK_PD_MIPI_CSI 3
+
+#endif
diff --git a/include/dt-bindings/power/imx8mn-power.h b/include/dt-bindings/power/imx8mn-power.h
new file mode 100644
index 000000000000..eedd0e581939
--- /dev/null
+++ b/include/dt-bindings/power/imx8mn-power.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (C) 2020 Compass Electronics Group, LLC
+ */
+
+#ifndef __DT_BINDINGS_IMX8MN_POWER_H__
+#define __DT_BINDINGS_IMX8MN_POWER_H__
+
+#define IMX8MN_POWER_DOMAIN_HSIOMIX 0
+#define IMX8MN_POWER_DOMAIN_OTG1 1
+#define IMX8MN_POWER_DOMAIN_GPUMIX 2
+#define IMX8MN_POWER_DOMAIN_DISPMIX 3
+#define IMX8MN_POWER_DOMAIN_MIPI 4
+
+#define IMX8MN_DISPBLK_PD_MIPI_DSI 0
+#define IMX8MN_DISPBLK_PD_MIPI_CSI 1
+#define IMX8MN_DISPBLK_PD_LCDIF 2
+#define IMX8MN_DISPBLK_PD_ISI 3
+
+#endif
diff --git a/include/dt-bindings/power/imx8mp-power.h b/include/dt-bindings/power/imx8mp-power.h
new file mode 100644
index 000000000000..2fe3c2abad13
--- /dev/null
+++ b/include/dt-bindings/power/imx8mp-power.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (C) 2020 Pengutronix, Sascha Hauer <kernel@pengutronix.de>
+ */
+
+#ifndef __DT_BINDINGS_IMX8MP_POWER_DOMAIN_POWER_H__
+#define __DT_BINDINGS_IMX8MP_POWER_DOMAIN_POWER_H__
+
+#define IMX8MP_POWER_DOMAIN_MIPI_PHY1 0
+#define IMX8MP_POWER_DOMAIN_PCIE_PHY 1
+#define IMX8MP_POWER_DOMAIN_USB1_PHY 2
+#define IMX8MP_POWER_DOMAIN_USB2_PHY 3
+#define IMX8MP_POWER_DOMAIN_MLMIX 4
+#define IMX8MP_POWER_DOMAIN_AUDIOMIX 5
+#define IMX8MP_POWER_DOMAIN_GPU2D 6
+#define IMX8MP_POWER_DOMAIN_GPUMIX 7
+#define IMX8MP_POWER_DOMAIN_VPUMIX 8
+#define IMX8MP_POWER_DOMAIN_GPU3D 9
+#define IMX8MP_POWER_DOMAIN_MEDIAMIX 10
+#define IMX8MP_POWER_DOMAIN_VPU_G1 11
+#define IMX8MP_POWER_DOMAIN_VPU_G2 12
+#define IMX8MP_POWER_DOMAIN_VPU_VC8000E 13
+#define IMX8MP_POWER_DOMAIN_HDMIMIX 14
+#define IMX8MP_POWER_DOMAIN_HDMI_PHY 15
+#define IMX8MP_POWER_DOMAIN_MIPI_PHY2 16
+#define IMX8MP_POWER_DOMAIN_HSIOMIX 17
+#define IMX8MP_POWER_DOMAIN_MEDIAMIX_ISPDWP 18
+
+#define IMX8MP_HSIOBLK_PD_USB 0
+#define IMX8MP_HSIOBLK_PD_USB_PHY1 1
+#define IMX8MP_HSIOBLK_PD_USB_PHY2 2
+#define IMX8MP_HSIOBLK_PD_PCIE 3
+#define IMX8MP_HSIOBLK_PD_PCIE_PHY 4
+
+#define IMX8MP_MEDIABLK_PD_MIPI_DSI_1 0
+#define IMX8MP_MEDIABLK_PD_MIPI_CSI2_1 1
+#define IMX8MP_MEDIABLK_PD_LCDIF_1 2
+#define IMX8MP_MEDIABLK_PD_ISI 3
+#define IMX8MP_MEDIABLK_PD_MIPI_CSI2_2 4
+#define IMX8MP_MEDIABLK_PD_LCDIF_2 5
+#define IMX8MP_MEDIABLK_PD_ISP 6
+#define IMX8MP_MEDIABLK_PD_DWE 7
+#define IMX8MP_MEDIABLK_PD_MIPI_DSI_2 8
+
+#define IMX8MP_HDMIBLK_PD_IRQSTEER 0
+#define IMX8MP_HDMIBLK_PD_LCDIF 1
+#define IMX8MP_HDMIBLK_PD_PAI 2
+#define IMX8MP_HDMIBLK_PD_PVI 3
+#define IMX8MP_HDMIBLK_PD_TRNG 4
+#define IMX8MP_HDMIBLK_PD_HDMI_TX 5
+#define IMX8MP_HDMIBLK_PD_HDMI_TX_PHY 6
+#define IMX8MP_HDMIBLK_PD_HDCP 7
+#define IMX8MP_HDMIBLK_PD_HRV 8
+
+#define IMX8MP_VPUBLK_PD_G1 0
+#define IMX8MP_VPUBLK_PD_G2 1
+#define IMX8MP_VPUBLK_PD_VC8000E 2
+
+#endif
diff --git a/include/dt-bindings/power/imx8mq-power.h b/include/dt-bindings/power/imx8mq-power.h
index 8a513bd9166e..9f7d0f1e7c32 100644
--- a/include/dt-bindings/power/imx8mq-power.h
+++ b/include/dt-bindings/power/imx8mq-power.h
@@ -18,4 +18,7 @@
#define IMX8M_POWER_DOMAIN_MIPI_CSI2 9
#define IMX8M_POWER_DOMAIN_PCIE2 10
+#define IMX8MQ_VPUBLK_PD_G1 0
+#define IMX8MQ_VPUBLK_PD_G2 1
+
#endif
diff --git a/include/dt-bindings/power/imx8ulp-power.h b/include/dt-bindings/power/imx8ulp-power.h
new file mode 100644
index 000000000000..a556b2e96df1
--- /dev/null
+++ b/include/dt-bindings/power/imx8ulp-power.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright 2021 NXP
+ */
+
+#ifndef __DT_BINDINGS_IMX8ULP_POWER_H__
+#define __DT_BINDINGS_IMX8ULP_POWER_H__
+
+#define IMX8ULP_PD_DMA1 0
+#define IMX8ULP_PD_FLEXSPI2 1
+#define IMX8ULP_PD_USB0 2
+#define IMX8ULP_PD_USDHC0 3
+#define IMX8ULP_PD_USDHC1 4
+#define IMX8ULP_PD_USDHC2_USB1 5
+#define IMX8ULP_PD_DCNANO 6
+#define IMX8ULP_PD_EPDC 7
+#define IMX8ULP_PD_DMA2 8
+#define IMX8ULP_PD_GPU2D 9
+#define IMX8ULP_PD_GPU3D 10
+#define IMX8ULP_PD_HIFI4 11
+#define IMX8ULP_PD_ISI 12
+#define IMX8ULP_PD_MIPI_CSI 13
+#define IMX8ULP_PD_MIPI_DSI 14
+#define IMX8ULP_PD_PXP 15
+
+#endif
diff --git a/include/dt-bindings/power/marvell,pxa1908-power.h b/include/dt-bindings/power/marvell,pxa1908-power.h
new file mode 100644
index 000000000000..19b088351af1
--- /dev/null
+++ b/include/dt-bindings/power/marvell,pxa1908-power.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Marvell PXA1908 power domains
+ *
+ * Copyright 2025, Duje Mihanović <duje@dujemihanovic.xyz>
+ */
+
+#ifndef __DTS_MARVELL_PXA1908_POWER_H
+#define __DTS_MARVELL_PXA1908_POWER_H
+
+#define PXA1908_POWER_DOMAIN_VPU 0
+#define PXA1908_POWER_DOMAIN_GPU 1
+#define PXA1908_POWER_DOMAIN_GPU2D 2
+#define PXA1908_POWER_DOMAIN_DSI 3
+#define PXA1908_POWER_DOMAIN_ISP 4
+
+#endif
diff --git a/include/dt-bindings/power/mediatek,mt6735-power-controller.h b/include/dt-bindings/power/mediatek,mt6735-power-controller.h
new file mode 100644
index 000000000000..6957075fcb9e
--- /dev/null
+++ b/include/dt-bindings/power/mediatek,mt6735-power-controller.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_POWER_MT6735_POWER_CONTROLLER_H
+#define _DT_BINDINGS_POWER_MT6735_POWER_CONTROLLER_H
+
+#define MT6735_POWER_DOMAIN_MD1 0
+#define MT6735_POWER_DOMAIN_CONN 1
+#define MT6735_POWER_DOMAIN_DIS 2
+#define MT6735_POWER_DOMAIN_MFG 3
+#define MT6735_POWER_DOMAIN_ISP 4
+#define MT6735_POWER_DOMAIN_VDE 5
+#define MT6735_POWER_DOMAIN_VEN 6
+
+#endif
diff --git a/include/dt-bindings/power/mediatek,mt6893-power.h b/include/dt-bindings/power/mediatek,mt6893-power.h
new file mode 100644
index 000000000000..aeab51bb2ad8
--- /dev/null
+++ b/include/dt-bindings/power/mediatek,mt6893-power.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2025 Collabora Ltd
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_POWER_MT6893_POWER_H
+#define _DT_BINDINGS_POWER_MT6893_POWER_H
+
+#define MT6893_POWER_DOMAIN_CONN 0
+#define MT6893_POWER_DOMAIN_MFG0 1
+#define MT6893_POWER_DOMAIN_MFG1 2
+#define MT6893_POWER_DOMAIN_MFG2 3
+#define MT6893_POWER_DOMAIN_MFG3 4
+#define MT6893_POWER_DOMAIN_MFG4 5
+#define MT6893_POWER_DOMAIN_MFG5 6
+#define MT6893_POWER_DOMAIN_MFG6 7
+#define MT6893_POWER_DOMAIN_ISP 8
+#define MT6893_POWER_DOMAIN_ISP2 9
+#define MT6893_POWER_DOMAIN_IPE 10
+#define MT6893_POWER_DOMAIN_VDEC0 11
+#define MT6893_POWER_DOMAIN_VDEC1 12
+#define MT6893_POWER_DOMAIN_VENC0 13
+#define MT6893_POWER_DOMAIN_VENC1 14
+#define MT6893_POWER_DOMAIN_MDP 15
+#define MT6893_POWER_DOMAIN_DISP 16
+#define MT6893_POWER_DOMAIN_AUDIO 17
+#define MT6893_POWER_DOMAIN_ADSP 18
+#define MT6893_POWER_DOMAIN_CAM 19
+#define MT6893_POWER_DOMAIN_CAM_RAWA 20
+#define MT6893_POWER_DOMAIN_CAM_RAWB 21
+#define MT6893_POWER_DOMAIN_CAM_RAWC 22
+#define MT6893_POWER_DOMAIN_DP_TX 23
+
+#endif /* _DT_BINDINGS_POWER_MT6893_POWER_H */
diff --git a/include/dt-bindings/power/mediatek,mt8188-power.h b/include/dt-bindings/power/mediatek,mt8188-power.h
new file mode 100644
index 000000000000..57e75cf3aa2c
--- /dev/null
+++ b/include/dt-bindings/power/mediatek,mt8188-power.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Garmin Chang <garmin.chang@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_POWER_MT8188_POWER_H
+#define _DT_BINDINGS_POWER_MT8188_POWER_H
+
+#define MT8188_POWER_DOMAIN_MFG0 0
+#define MT8188_POWER_DOMAIN_MFG1 1
+#define MT8188_POWER_DOMAIN_MFG2 2
+#define MT8188_POWER_DOMAIN_MFG3 3
+#define MT8188_POWER_DOMAIN_MFG4 4
+#define MT8188_POWER_DOMAIN_PEXTP_MAC_P0 5
+#define MT8188_POWER_DOMAIN_PEXTP_PHY_TOP 6
+#define MT8188_POWER_DOMAIN_CSIRX_TOP 7
+#define MT8188_POWER_DOMAIN_ETHER 8
+#define MT8188_POWER_DOMAIN_HDMI_TX 9
+#define MT8188_POWER_DOMAIN_ADSP_AO 10
+#define MT8188_POWER_DOMAIN_ADSP_INFRA 11
+#define MT8188_POWER_DOMAIN_ADSP 12
+#define MT8188_POWER_DOMAIN_AUDIO 13
+#define MT8188_POWER_DOMAIN_AUDIO_ASRC 14
+#define MT8188_POWER_DOMAIN_VPPSYS0 15
+#define MT8188_POWER_DOMAIN_VDOSYS0 16
+#define MT8188_POWER_DOMAIN_VDOSYS1 17
+#define MT8188_POWER_DOMAIN_DP_TX 18
+#define MT8188_POWER_DOMAIN_EDP_TX 19
+#define MT8188_POWER_DOMAIN_VPPSYS1 20
+#define MT8188_POWER_DOMAIN_WPE 21
+#define MT8188_POWER_DOMAIN_VDEC0 22
+#define MT8188_POWER_DOMAIN_VDEC1 23
+#define MT8188_POWER_DOMAIN_VENC 24
+#define MT8188_POWER_DOMAIN_IMG_VCORE 25
+#define MT8188_POWER_DOMAIN_IMG_MAIN 26
+#define MT8188_POWER_DOMAIN_DIP 27
+#define MT8188_POWER_DOMAIN_IPE 28
+#define MT8188_POWER_DOMAIN_CAM_VCORE 29
+#define MT8188_POWER_DOMAIN_CAM_MAIN 30
+#define MT8188_POWER_DOMAIN_CAM_SUBA 31
+#define MT8188_POWER_DOMAIN_CAM_SUBB 32
+
+#endif /* _DT_BINDINGS_POWER_MT8188_POWER_H */
diff --git a/include/dt-bindings/power/mediatek,mt8196-power.h b/include/dt-bindings/power/mediatek,mt8196-power.h
new file mode 100644
index 000000000000..0f622a93c807
--- /dev/null
+++ b/include/dt-bindings/power/mediatek,mt8196-power.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2025 Collabora Ltd
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_POWER_MT8196_POWER_H
+#define _DT_BINDINGS_POWER_MT8196_POWER_H
+
+/* SCPSYS Secure Power Manager - Direct Control */
+#define MT8196_POWER_DOMAIN_MD 0
+#define MT8196_POWER_DOMAIN_CONN 1
+#define MT8196_POWER_DOMAIN_SSUSB_P0 2
+#define MT8196_POWER_DOMAIN_SSUSB_DP_PHY_P0 3
+#define MT8196_POWER_DOMAIN_SSUSB_P1 4
+#define MT8196_POWER_DOMAIN_SSUSB_P23 5
+#define MT8196_POWER_DOMAIN_SSUSB_PHY_P2 6
+#define MT8196_POWER_DOMAIN_PEXTP_MAC0 7
+#define MT8196_POWER_DOMAIN_PEXTP_MAC1 8
+#define MT8196_POWER_DOMAIN_PEXTP_MAC2 9
+#define MT8196_POWER_DOMAIN_PEXTP_PHY0 10
+#define MT8196_POWER_DOMAIN_PEXTP_PHY1 11
+#define MT8196_POWER_DOMAIN_PEXTP_PHY2 12
+#define MT8196_POWER_DOMAIN_AUDIO 13
+#define MT8196_POWER_DOMAIN_ADSP_TOP_DORMANT 14
+#define MT8196_POWER_DOMAIN_ADSP_INFRA 15
+#define MT8196_POWER_DOMAIN_ADSP_AO 16
+
+/* SCPSYS Secure Power Manager - HW Voter */
+#define MT8196_POWER_DOMAIN_MM_PROC_DORMANT 0
+#define MT8196_POWER_DOMAIN_SSR 1
+
+/* HFRPSYS MultiMedia Power Control (MMPC) - HW Voter */
+#define MT8196_POWER_DOMAIN_VDE0 0
+#define MT8196_POWER_DOMAIN_VDE1 1
+#define MT8196_POWER_DOMAIN_VDE_VCORE0 2
+#define MT8196_POWER_DOMAIN_VEN0 3
+#define MT8196_POWER_DOMAIN_VEN1 4
+#define MT8196_POWER_DOMAIN_VEN2 5
+#define MT8196_POWER_DOMAIN_DISP_VCORE 6
+#define MT8196_POWER_DOMAIN_DIS0_DORMANT 7
+#define MT8196_POWER_DOMAIN_DIS1_DORMANT 8
+#define MT8196_POWER_DOMAIN_OVL0_DORMANT 9
+#define MT8196_POWER_DOMAIN_OVL1_DORMANT 10
+#define MT8196_POWER_DOMAIN_DISP_EDPTX_DORMANT 11
+#define MT8196_POWER_DOMAIN_DISP_DPTX_DORMANT 12
+#define MT8196_POWER_DOMAIN_MML0_SHUTDOWN 13
+#define MT8196_POWER_DOMAIN_MML1_SHUTDOWN 14
+#define MT8196_POWER_DOMAIN_MM_INFRA0 15
+#define MT8196_POWER_DOMAIN_MM_INFRA1 16
+#define MT8196_POWER_DOMAIN_MM_INFRA_AO 17
+#define MT8196_POWER_DOMAIN_CSI_BS_RX 18
+#define MT8196_POWER_DOMAIN_CSI_LS_RX 19
+#define MT8196_POWER_DOMAIN_DSI_PHY0 20
+#define MT8196_POWER_DOMAIN_DSI_PHY1 21
+#define MT8196_POWER_DOMAIN_DSI_PHY2 22
+
+#endif /* _DT_BINDINGS_POWER_MT8196_POWER_H */
diff --git a/include/dt-bindings/power/mediatek,mt8365-power.h b/include/dt-bindings/power/mediatek,mt8365-power.h
new file mode 100644
index 000000000000..e6cfd0ec7871
--- /dev/null
+++ b/include/dt-bindings/power/mediatek,mt8365-power.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ */
+
+#ifndef _DT_BINDINGS_POWER_MT8365_POWER_H
+#define _DT_BINDINGS_POWER_MT8365_POWER_H
+
+#define MT8365_POWER_DOMAIN_MM 0
+#define MT8365_POWER_DOMAIN_CONN 1
+#define MT8365_POWER_DOMAIN_MFG 2
+#define MT8365_POWER_DOMAIN_AUDIO 3
+#define MT8365_POWER_DOMAIN_CAM 4
+#define MT8365_POWER_DOMAIN_DSP 5
+#define MT8365_POWER_DOMAIN_VDEC 6
+#define MT8365_POWER_DOMAIN_VENC 7
+#define MT8365_POWER_DOMAIN_APU 8
+
+#endif /* _DT_BINDINGS_POWER_MT8365_POWER_H */
diff --git a/include/dt-bindings/power/meson-a1-power.h b/include/dt-bindings/power/meson-a1-power.h
index 6cf50bfb8ccf..724c370d6853 100644
--- a/include/dt-bindings/power/meson-a1-power.h
+++ b/include/dt-bindings/power/meson-a1-power.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2019 Amlogic, Inc.
* Author: Jianxin Pan <jianxin.pan@amlogic.com>
diff --git a/include/dt-bindings/power/meson-axg-power.h b/include/dt-bindings/power/meson-axg-power.h
index e5243884b249..ace0e468ce21 100644
--- a/include/dt-bindings/power/meson-axg-power.h
+++ b/include/dt-bindings/power/meson-axg-power.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2020 BayLibre, SAS
* Author: Neil Armstrong <narmstrong@baylibre.com>
diff --git a/include/dt-bindings/power/meson-g12a-power.h b/include/dt-bindings/power/meson-g12a-power.h
index bb5e67a842de..01fd0ac4dd08 100644
--- a/include/dt-bindings/power/meson-g12a-power.h
+++ b/include/dt-bindings/power/meson-g12a-power.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2019 BayLibre, SAS
* Author: Neil Armstrong <narmstrong@baylibre.com>
@@ -9,5 +9,7 @@
#define PWRC_G12A_VPU_ID 0
#define PWRC_G12A_ETH_ID 1
+#define PWRC_G12A_NNA_ID 2
+#define PWRC_G12A_ISP_ID 3
#endif
diff --git a/include/dt-bindings/power/meson-gxbb-power.h b/include/dt-bindings/power/meson-gxbb-power.h
index 1262dac696c0..8d0b32b6c02c 100644
--- a/include/dt-bindings/power/meson-gxbb-power.h
+++ b/include/dt-bindings/power/meson-gxbb-power.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2019 BayLibre, SAS
* Author: Neil Armstrong <narmstrong@baylibre.com>
diff --git a/include/dt-bindings/power/meson-s4-power.h b/include/dt-bindings/power/meson-s4-power.h
new file mode 100644
index 000000000000..f210a524a592
--- /dev/null
+++ b/include/dt-bindings/power/meson-s4-power.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2021 Amlogic, Inc.
+ * Author: Shunzhou Jiang <shunzhou.jiang@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_MESON_S4_POWER_H
+#define _DT_BINDINGS_MESON_S4_POWER_H
+
+#define PWRC_S4_DOS_HEVC_ID 0
+#define PWRC_S4_DOS_VDEC_ID 1
+#define PWRC_S4_VPU_HDMI_ID 2
+#define PWRC_S4_USB_COMB_ID 3
+#define PWRC_S4_GE2D_ID 4
+#define PWRC_S4_ETH_ID 5
+#define PWRC_S4_DEMOD_ID 6
+#define PWRC_S4_AUDIO_ID 7
+
+#endif
diff --git a/include/dt-bindings/power/meson-sm1-power.h b/include/dt-bindings/power/meson-sm1-power.h
index a020ab00c134..d78e710dbfff 100644
--- a/include/dt-bindings/power/meson-sm1-power.h
+++ b/include/dt-bindings/power/meson-sm1-power.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2019 BayLibre, SAS
* Author: Neil Armstrong <narmstrong@baylibre.com>
diff --git a/include/dt-bindings/power/meson8-power.h b/include/dt-bindings/power/meson8-power.h
index dd8b2ddb82a7..7a55ba2cd22e 100644
--- a/include/dt-bindings/power/meson8-power.h
+++ b/include/dt-bindings/power/meson8-power.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2019 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
*/
diff --git a/include/dt-bindings/power/mt6795-power.h b/include/dt-bindings/power/mt6795-power.h
new file mode 100644
index 000000000000..b0fc26cb1da4
--- /dev/null
+++ b/include/dt-bindings/power/mt6795-power.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+#ifndef _DT_BINDINGS_POWER_MT6795_POWER_H
+#define _DT_BINDINGS_POWER_MT6795_POWER_H
+
+#define MT6795_POWER_DOMAIN_MM 0
+#define MT6795_POWER_DOMAIN_VDEC 1
+#define MT6795_POWER_DOMAIN_VENC 2
+#define MT6795_POWER_DOMAIN_ISP 3
+#define MT6795_POWER_DOMAIN_MJC 4
+#define MT6795_POWER_DOMAIN_AUDIO 5
+#define MT6795_POWER_DOMAIN_MFG_ASYNC 6
+#define MT6795_POWER_DOMAIN_MFG_2D 7
+#define MT6795_POWER_DOMAIN_MFG 8
+#define MT6795_POWER_DOMAIN_MODEM 9
+
+#endif /* _DT_BINDINGS_POWER_MT6795_POWER_H */
diff --git a/include/dt-bindings/power/mt6797-power.h b/include/dt-bindings/power/mt6797-power.h
index a60c1d81cf75..bd451d860e6a 100644
--- a/include/dt-bindings/power/mt6797-power.h
+++ b/include/dt-bindings/power/mt6797-power.h
@@ -1,14 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017 MediaTek Inc.
* Author: Mars.C <mars.cheng@mediatek.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _DT_BINDINGS_POWER_MT6797_POWER_H
diff --git a/include/dt-bindings/power/mt8186-power.h b/include/dt-bindings/power/mt8186-power.h
new file mode 100644
index 000000000000..429f7197f6b6
--- /dev/null
+++ b/include/dt-bindings/power/mt8186-power.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_POWER_MT8186_POWER_H
+#define _DT_BINDINGS_POWER_MT8186_POWER_H
+
+#define MT8186_POWER_DOMAIN_MFG0 0
+#define MT8186_POWER_DOMAIN_MFG1 1
+#define MT8186_POWER_DOMAIN_MFG2 2
+#define MT8186_POWER_DOMAIN_MFG3 3
+#define MT8186_POWER_DOMAIN_SSUSB 4
+#define MT8186_POWER_DOMAIN_SSUSB_P1 5
+#define MT8186_POWER_DOMAIN_DIS 6
+#define MT8186_POWER_DOMAIN_IMG 7
+#define MT8186_POWER_DOMAIN_IMG2 8
+#define MT8186_POWER_DOMAIN_IPE 9
+#define MT8186_POWER_DOMAIN_CAM 10
+#define MT8186_POWER_DOMAIN_CAM_RAWA 11
+#define MT8186_POWER_DOMAIN_CAM_RAWB 12
+#define MT8186_POWER_DOMAIN_VENC 13
+#define MT8186_POWER_DOMAIN_VDEC 14
+#define MT8186_POWER_DOMAIN_WPE 15
+#define MT8186_POWER_DOMAIN_CONN_ON 16
+#define MT8186_POWER_DOMAIN_CSIRX_TOP 17
+#define MT8186_POWER_DOMAIN_ADSP_AO 18
+#define MT8186_POWER_DOMAIN_ADSP_INFRA 19
+#define MT8186_POWER_DOMAIN_ADSP_TOP 20
+
+#endif /* _DT_BINDINGS_POWER_MT8186_POWER_H */
diff --git a/include/dt-bindings/power/mt8195-power.h b/include/dt-bindings/power/mt8195-power.h
new file mode 100644
index 000000000000..b20ca4b3e3a8
--- /dev/null
+++ b/include/dt-bindings/power/mt8195-power.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_POWER_MT8195_POWER_H
+#define _DT_BINDINGS_POWER_MT8195_POWER_H
+
+#define MT8195_POWER_DOMAIN_PCIE_MAC_P0 0
+#define MT8195_POWER_DOMAIN_PCIE_MAC_P1 1
+#define MT8195_POWER_DOMAIN_PCIE_PHY 2
+#define MT8195_POWER_DOMAIN_SSUSB_PCIE_PHY 3
+#define MT8195_POWER_DOMAIN_CSI_RX_TOP 4
+#define MT8195_POWER_DOMAIN_ETHER 5
+#define MT8195_POWER_DOMAIN_ADSP 6
+#define MT8195_POWER_DOMAIN_AUDIO 7
+#define MT8195_POWER_DOMAIN_MFG0 8
+#define MT8195_POWER_DOMAIN_MFG1 9
+#define MT8195_POWER_DOMAIN_MFG2 10
+#define MT8195_POWER_DOMAIN_MFG3 11
+#define MT8195_POWER_DOMAIN_MFG4 12
+#define MT8195_POWER_DOMAIN_MFG5 13
+#define MT8195_POWER_DOMAIN_MFG6 14
+#define MT8195_POWER_DOMAIN_VPPSYS0 15
+#define MT8195_POWER_DOMAIN_VDOSYS0 16
+#define MT8195_POWER_DOMAIN_VPPSYS1 17
+#define MT8195_POWER_DOMAIN_VDOSYS1 18
+#define MT8195_POWER_DOMAIN_DP_TX 19
+#define MT8195_POWER_DOMAIN_EPD_TX 20
+#define MT8195_POWER_DOMAIN_HDMI_TX 21
+#define MT8195_POWER_DOMAIN_WPESYS 22
+#define MT8195_POWER_DOMAIN_VDEC0 23
+#define MT8195_POWER_DOMAIN_VDEC1 24
+#define MT8195_POWER_DOMAIN_VDEC2 25
+#define MT8195_POWER_DOMAIN_VENC 26
+#define MT8195_POWER_DOMAIN_VENC_CORE1 27
+#define MT8195_POWER_DOMAIN_IMG 28
+#define MT8195_POWER_DOMAIN_DIP 29
+#define MT8195_POWER_DOMAIN_IPE 30
+#define MT8195_POWER_DOMAIN_CAM 31
+#define MT8195_POWER_DOMAIN_CAM_RAWA 32
+#define MT8195_POWER_DOMAIN_CAM_RAWB 33
+#define MT8195_POWER_DOMAIN_CAM_MRAW 34
+
+#endif /* _DT_BINDINGS_POWER_MT8195_POWER_H */
diff --git a/include/dt-bindings/power/nvidia,tegra264-bpmp.h b/include/dt-bindings/power/nvidia,tegra264-bpmp.h
new file mode 100644
index 000000000000..2eef4a2a02b0
--- /dev/null
+++ b/include/dt-bindings/power/nvidia,tegra264-bpmp.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (c) 2022-2024, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef DT_BINDINGS_POWER_NVIDIA_TEGRA264_BPMP_H
+#define DT_BINDINGS_POWER_NVIDIA_TEGRA264_BPMP_H
+
+#define TEGRA264_POWER_DOMAIN_DISP 1
+#define TEGRA264_POWER_DOMAIN_AUD 2
+/* reserved 3:9 */
+#define TEGRA264_POWER_DOMAIN_XUSB_SS 10
+#define TEGRA264_POWER_DOMAIN_XUSB_DEV 11
+#define TEGRA264_POWER_DOMAIN_XUSB_HOST 12
+#define TEGRA264_POWER_DOMAIN_MGBE0 13
+#define TEGRA264_POWER_DOMAIN_MGBE1 14
+#define TEGRA264_POWER_DOMAIN_MGBE2 15
+#define TEGRA264_POWER_DOMAIN_MGBE3 16
+#define TEGRA264_POWER_DOMAIN_VI 17
+#define TEGRA264_POWER_DOMAIN_VIC 18
+#define TEGRA264_POWER_DOMAIN_ISP0 19
+#define TEGRA264_POWER_DOMAIN_ISP1 20
+#define TEGRA264_POWER_DOMAIN_PVA0 21
+#define TEGRA264_POWER_DOMAIN_GPU 22
+
+#endif /* DT_BINDINGS_POWER_NVIDIA_TEGRA264_BPMP_H */
diff --git a/include/dt-bindings/power/qcom,rpmhpd.h b/include/dt-bindings/power/qcom,rpmhpd.h
new file mode 100644
index 000000000000..50e7c886709d
--- /dev/null
+++ b/include/dt-bindings/power/qcom,rpmhpd.h
@@ -0,0 +1,268 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_POWER_QCOM_RPMHPD_H
+#define _DT_BINDINGS_POWER_QCOM_RPMHPD_H
+
+/* Generic RPMH Power Domain Indexes */
+#define RPMHPD_CX 0
+#define RPMHPD_CX_AO 1
+#define RPMHPD_EBI 2
+#define RPMHPD_GFX 3
+#define RPMHPD_LCX 4
+#define RPMHPD_LMX 5
+#define RPMHPD_MMCX 6
+#define RPMHPD_MMCX_AO 7
+#define RPMHPD_MX 8
+#define RPMHPD_MX_AO 9
+#define RPMHPD_MXC 10
+#define RPMHPD_MXC_AO 11
+#define RPMHPD_MSS 12
+#define RPMHPD_NSP 13
+#define RPMHPD_NSP0 14
+#define RPMHPD_NSP1 15
+#define RPMHPD_QPHY 16
+#define RPMHPD_DDR 17
+#define RPMHPD_XO 18
+#define RPMHPD_NSP2 19
+#define RPMHPD_GMXC 20
+
+/* RPMh Power Domain performance levels */
+#define RPMH_REGULATOR_LEVEL_RETENTION 16
+#define RPMH_REGULATOR_LEVEL_MIN_SVS 48
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_D3 50
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_D2_1 51
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_D2 52
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_D1_1 54
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_D1 56
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_D0 60
+#define RPMH_REGULATOR_LEVEL_LOW_SVS 64
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_P1 72
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_L0 76
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_L1 80
+#define RPMH_REGULATOR_LEVEL_LOW_SVS_L2 96
+#define RPMH_REGULATOR_LEVEL_SVS 128
+#define RPMH_REGULATOR_LEVEL_SVS_L0 144
+#define RPMH_REGULATOR_LEVEL_SVS_L1 192
+#define RPMH_REGULATOR_LEVEL_SVS_L2 224
+#define RPMH_REGULATOR_LEVEL_NOM 256
+#define RPMH_REGULATOR_LEVEL_NOM_L0 288
+#define RPMH_REGULATOR_LEVEL_NOM_L1 320
+#define RPMH_REGULATOR_LEVEL_NOM_L2 336
+#define RPMH_REGULATOR_LEVEL_TURBO 384
+#define RPMH_REGULATOR_LEVEL_TURBO_L0 400
+#define RPMH_REGULATOR_LEVEL_TURBO_L1 416
+#define RPMH_REGULATOR_LEVEL_TURBO_L2 432
+#define RPMH_REGULATOR_LEVEL_TURBO_L3 448
+#define RPMH_REGULATOR_LEVEL_TURBO_L4 452
+#define RPMH_REGULATOR_LEVEL_TURBO_L5 456
+#define RPMH_REGULATOR_LEVEL_SUPER_TURBO 464
+#define RPMH_REGULATOR_LEVEL_SUPER_TURBO_NO_CPR 480
+
+/*
+ * Platform-specific power domain bindings. Don't add new entries here, use
+ * RPMHPD_* above.
+ */
+
+/* SA8775P Power Domain Indexes */
+#define SA8775P_CX 0
+#define SA8775P_CX_AO 1
+#define SA8775P_DDR 2
+#define SA8775P_EBI 3
+#define SA8775P_GFX 4
+#define SA8775P_LCX 5
+#define SA8775P_LMX 6
+#define SA8775P_MMCX 7
+#define SA8775P_MMCX_AO 8
+#define SA8775P_MSS 9
+#define SA8775P_MX 10
+#define SA8775P_MX_AO 11
+#define SA8775P_MXC 12
+#define SA8775P_MXC_AO 13
+#define SA8775P_NSP0 14
+#define SA8775P_NSP1 15
+#define SA8775P_XO 16
+
+/* SDM670 Power Domain Indexes */
+#define SDM670_MX 0
+#define SDM670_MX_AO 1
+#define SDM670_CX 2
+#define SDM670_CX_AO 3
+#define SDM670_LMX 4
+#define SDM670_LCX 5
+#define SDM670_GFX 6
+#define SDM670_MSS 7
+
+/* SDM845 Power Domain Indexes */
+#define SDM845_EBI 0
+#define SDM845_MX 1
+#define SDM845_MX_AO 2
+#define SDM845_CX 3
+#define SDM845_CX_AO 4
+#define SDM845_LMX 5
+#define SDM845_LCX 6
+#define SDM845_GFX 7
+#define SDM845_MSS 8
+
+/* SDX55 Power Domain Indexes */
+#define SDX55_MSS 0
+#define SDX55_MX 1
+#define SDX55_CX 2
+
+/* SDX65 Power Domain Indexes */
+#define SDX65_MSS 0
+#define SDX65_MX 1
+#define SDX65_MX_AO 2
+#define SDX65_CX 3
+#define SDX65_CX_AO 4
+#define SDX65_MXC 5
+
+/* SM6350 Power Domain Indexes */
+#define SM6350_CX 0
+#define SM6350_GFX 1
+#define SM6350_LCX 2
+#define SM6350_LMX 3
+#define SM6350_MSS 4
+#define SM6350_MX 5
+
+/* SM8150 Power Domain Indexes */
+#define SM8150_MSS 0
+#define SM8150_EBI 1
+#define SM8150_LMX 2
+#define SM8150_LCX 3
+#define SM8150_GFX 4
+#define SM8150_MX 5
+#define SM8150_MX_AO 6
+#define SM8150_CX 7
+#define SM8150_CX_AO 8
+#define SM8150_MMCX 9
+#define SM8150_MMCX_AO 10
+
+/* SA8155P is a special case, kept for backwards compatibility */
+#define SA8155P_CX SM8150_CX
+#define SA8155P_CX_AO SM8150_CX_AO
+#define SA8155P_EBI SM8150_EBI
+#define SA8155P_GFX SM8150_GFX
+#define SA8155P_MSS SM8150_MSS
+#define SA8155P_MX SM8150_MX
+#define SA8155P_MX_AO SM8150_MX_AO
+
+/* SM8250 Power Domain Indexes */
+#define SM8250_CX 0
+#define SM8250_CX_AO 1
+#define SM8250_EBI 2
+#define SM8250_GFX 3
+#define SM8250_LCX 4
+#define SM8250_LMX 5
+#define SM8250_MMCX 6
+#define SM8250_MMCX_AO 7
+#define SM8250_MX 8
+#define SM8250_MX_AO 9
+
+/* SM8350 Power Domain Indexes */
+#define SM8350_CX 0
+#define SM8350_CX_AO 1
+#define SM8350_EBI 2
+#define SM8350_GFX 3
+#define SM8350_LCX 4
+#define SM8350_LMX 5
+#define SM8350_MMCX 6
+#define SM8350_MMCX_AO 7
+#define SM8350_MX 8
+#define SM8350_MX_AO 9
+#define SM8350_MXC 10
+#define SM8350_MXC_AO 11
+#define SM8350_MSS 12
+
+/* SM8450 Power Domain Indexes */
+#define SM8450_CX 0
+#define SM8450_CX_AO 1
+#define SM8450_EBI 2
+#define SM8450_GFX 3
+#define SM8450_LCX 4
+#define SM8450_LMX 5
+#define SM8450_MMCX 6
+#define SM8450_MMCX_AO 7
+#define SM8450_MX 8
+#define SM8450_MX_AO 9
+#define SM8450_MXC 10
+#define SM8450_MXC_AO 11
+#define SM8450_MSS 12
+
+/* SM8550 Power Domain Indexes */
+#define SM8550_CX 0
+#define SM8550_CX_AO 1
+#define SM8550_EBI 2
+#define SM8550_GFX 3
+#define SM8550_LCX 4
+#define SM8550_LMX 5
+#define SM8550_MMCX 6
+#define SM8550_MMCX_AO 7
+#define SM8550_MX 8
+#define SM8550_MX_AO 9
+#define SM8550_MXC 10
+#define SM8550_MXC_AO 11
+#define SM8550_MSS 12
+#define SM8550_NSP 13
+
+/* QDU1000/QRU1000 Power Domain Indexes */
+#define QDU1000_EBI 0
+#define QDU1000_MSS 1
+#define QDU1000_CX 2
+#define QDU1000_MX 3
+
+/* SC7180 Power Domain Indexes */
+#define SC7180_CX 0
+#define SC7180_CX_AO 1
+#define SC7180_GFX 2
+#define SC7180_MX 3
+#define SC7180_MX_AO 4
+#define SC7180_LMX 5
+#define SC7180_LCX 6
+#define SC7180_MSS 7
+
+/* SC7280 Power Domain Indexes */
+#define SC7280_CX 0
+#define SC7280_CX_AO 1
+#define SC7280_EBI 2
+#define SC7280_GFX 3
+#define SC7280_MX 4
+#define SC7280_MX_AO 5
+#define SC7280_LMX 6
+#define SC7280_LCX 7
+#define SC7280_MSS 8
+
+/* SC8180X Power Domain Indexes */
+#define SC8180X_CX 0
+#define SC8180X_CX_AO 1
+#define SC8180X_EBI 2
+#define SC8180X_GFX 3
+#define SC8180X_LCX 4
+#define SC8180X_LMX 5
+#define SC8180X_MMCX 6
+#define SC8180X_MMCX_AO 7
+#define SC8180X_MSS 8
+#define SC8180X_MX 9
+#define SC8180X_MX_AO 10
+
+/* SC8280XP Power Domain Indexes */
+#define SC8280XP_CX 0
+#define SC8280XP_CX_AO 1
+#define SC8280XP_DDR 2
+#define SC8280XP_EBI 3
+#define SC8280XP_GFX 4
+#define SC8280XP_LCX 5
+#define SC8280XP_LMX 6
+#define SC8280XP_MMCX 7
+#define SC8280XP_MMCX_AO 8
+#define SC8280XP_MSS 9
+#define SC8280XP_MX 10
+#define SC8280XP_MXC 12
+#define SC8280XP_MX_AO 11
+#define SC8280XP_NSP 13
+#define SC8280XP_QPHY 14
+#define SC8280XP_XO 15
+
+#endif
diff --git a/include/dt-bindings/power/qcom-aoss-qmp.h b/include/dt-bindings/power/qcom-aoss-qmp.h
deleted file mode 100644
index ec336d31dee4..000000000000
--- a/include/dt-bindings/power/qcom-aoss-qmp.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2018, Linaro Ltd. */
-
-#ifndef __DT_BINDINGS_POWER_QCOM_AOSS_QMP_H
-#define __DT_BINDINGS_POWER_QCOM_AOSS_QMP_H
-
-#define AOSS_QMP_LS_CDSP 0
-#define AOSS_QMP_LS_LPASS 1
-#define AOSS_QMP_LS_MODEM 2
-#define AOSS_QMP_LS_SLPI 3
-#define AOSS_QMP_LS_SPSS 4
-#define AOSS_QMP_LS_VENUS 5
-
-#endif
diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h
index eedb5d94c020..4371ac941f29 100644
--- a/include/dt-bindings/power/qcom-rpmpd.h
+++ b/include/dt-bindings/power/qcom-rpmpd.h
@@ -4,96 +4,39 @@
#ifndef _DT_BINDINGS_POWER_QCOM_RPMPD_H
#define _DT_BINDINGS_POWER_QCOM_RPMPD_H
-/* SDM845 Power Domain Indexes */
-#define SDM845_EBI 0
-#define SDM845_MX 1
-#define SDM845_MX_AO 2
-#define SDM845_CX 3
-#define SDM845_CX_AO 4
-#define SDM845_LMX 5
-#define SDM845_LCX 6
-#define SDM845_GFX 7
-#define SDM845_MSS 8
-
-/* SDX55 Power Domain Indexes */
-#define SDX55_MSS 0
-#define SDX55_MX 1
-#define SDX55_CX 2
-
-/* SM8150 Power Domain Indexes */
-#define SM8150_MSS 0
-#define SM8150_EBI 1
-#define SM8150_LMX 2
-#define SM8150_LCX 3
-#define SM8150_GFX 4
-#define SM8150_MX 5
-#define SM8150_MX_AO 6
-#define SM8150_CX 7
-#define SM8150_CX_AO 8
-#define SM8150_MMCX 9
-#define SM8150_MMCX_AO 10
-
-/* SM8250 Power Domain Indexes */
-#define SM8250_CX 0
-#define SM8250_CX_AO 1
-#define SM8250_EBI 2
-#define SM8250_GFX 3
-#define SM8250_LCX 4
-#define SM8250_LMX 5
-#define SM8250_MMCX 6
-#define SM8250_MMCX_AO 7
-#define SM8250_MX 8
-#define SM8250_MX_AO 9
-
-/* SM8350 Power Domain Indexes */
-#define SM8350_CX 0
-#define SM8350_CX_AO 1
-#define SM8350_EBI 2
-#define SM8350_GFX 3
-#define SM8350_LCX 4
-#define SM8350_LMX 5
-#define SM8350_MMCX 6
-#define SM8350_MMCX_AO 7
-#define SM8350_MX 8
-#define SM8350_MX_AO 9
-#define SM8350_MXC 10
-#define SM8350_MXC_AO 11
-#define SM8350_MSS 12
-
-/* SC7180 Power Domain Indexes */
-#define SC7180_CX 0
-#define SC7180_CX_AO 1
-#define SC7180_GFX 2
-#define SC7180_MX 3
-#define SC7180_MX_AO 4
-#define SC7180_LMX 5
-#define SC7180_LCX 6
-#define SC7180_MSS 7
-
-/* SC7280 Power Domain Indexes */
-#define SC7280_CX 0
-#define SC7280_CX_AO 1
-#define SC7280_EBI 2
-#define SC7280_GFX 3
-#define SC7280_MX 4
-#define SC7280_MX_AO 5
-#define SC7280_LMX 6
-#define SC7280_LCX 7
-#define SC7280_MSS 8
-
-/* SDM845 Power Domain performance levels */
-#define RPMH_REGULATOR_LEVEL_RETENTION 16
-#define RPMH_REGULATOR_LEVEL_MIN_SVS 48
-#define RPMH_REGULATOR_LEVEL_LOW_SVS 64
-#define RPMH_REGULATOR_LEVEL_SVS 128
-#define RPMH_REGULATOR_LEVEL_SVS_L0 144
-#define RPMH_REGULATOR_LEVEL_SVS_L1 192
-#define RPMH_REGULATOR_LEVEL_SVS_L2 224
-#define RPMH_REGULATOR_LEVEL_NOM 256
-#define RPMH_REGULATOR_LEVEL_NOM_L1 320
-#define RPMH_REGULATOR_LEVEL_NOM_L2 336
-#define RPMH_REGULATOR_LEVEL_TURBO 384
-#define RPMH_REGULATOR_LEVEL_TURBO_L1 416
+#include <dt-bindings/power/qcom,rpmhpd.h>
+
+/* Generic RPM Power Domain Indexes */
+#define RPMPD_VDDCX 0
+#define RPMPD_VDDCX_AO 1
+/* VFC and VFL are mutually exclusive and can not be present on the same platform */
+#define RPMPD_VDDCX_VFC 2
+#define RPMPD_VDDCX_VFL 2
+#define RPMPD_VDDMX 3
+#define RPMPD_VDDMX_AO 4
+#define RPMPD_VDDMX_VFL 5
+#define RPMPD_SSCCX 6
+#define RPMPD_SSCCX_VFL 7
+#define RPMPD_SSCMX 8
+#define RPMPD_SSCMX_VFL 9
+
+/*
+ * Platform-specific power domain bindings. Don't add new entries here, use
+ * RPMPD_* above.
+ */
+
+/* MDM9607 Power Domains */
+#define MDM9607_VDDCX RPMPD_VDDCX
+#define MDM9607_VDDCX_AO RPMPD_VDDCX_AO
+#define MDM9607_VDDCX_VFL RPMPD_VDDCX_VFL
+#define MDM9607_VDDMX RPMPD_VDDMX
+#define MDM9607_VDDMX_AO RPMPD_VDDMX_AO
+#define MDM9607_VDDMX_VFL RPMPD_VDDMX_VFL
+
+/* MSM8226 Power Domain Indexes */
+#define MSM8226_VDDCX RPMPD_VDDCX
+#define MSM8226_VDDCX_AO RPMPD_VDDCX_AO
+#define MSM8226_VDDCX_VFC RPMPD_VDDCX_VFC
/* MSM8939 Power Domains */
#define MSM8939_VDDMDCX 0
@@ -106,19 +49,63 @@
#define MSM8939_VDDMX_AO 7
/* MSM8916 Power Domain Indexes */
-#define MSM8916_VDDCX 0
-#define MSM8916_VDDCX_AO 1
-#define MSM8916_VDDCX_VFC 2
-#define MSM8916_VDDMX 3
-#define MSM8916_VDDMX_AO 4
+#define MSM8916_VDDCX RPMPD_VDDCX
+#define MSM8916_VDDCX_AO RPMPD_VDDCX_AO
+#define MSM8916_VDDCX_VFC RPMPD_VDDCX_VFC
+#define MSM8916_VDDMX RPMPD_VDDMX
+#define MSM8916_VDDMX_AO RPMPD_VDDMX_AO
+
+/* MSM8909 Power Domain Indexes */
+#define MSM8909_VDDCX MSM8916_VDDCX
+#define MSM8909_VDDCX_AO MSM8916_VDDCX_AO
+#define MSM8909_VDDCX_VFC MSM8916_VDDCX_VFC
+#define MSM8909_VDDMX MSM8916_VDDMX
+#define MSM8909_VDDMX_AO MSM8916_VDDMX_AO
+
+/* MSM8917 Power Domain Indexes */
+#define MSM8917_VDDCX RPMPD_VDDCX
+#define MSM8917_VDDCX_AO RPMPD_VDDCX_AO
+#define MSM8917_VDDCX_VFL RPMPD_VDDCX_VFL
+#define MSM8917_VDDMX RPMPD_VDDMX
+#define MSM8917_VDDMX_AO RPMPD_VDDMX_AO
+
+/* MSM8937 Power Domain Indexes */
+#define MSM8937_VDDCX MSM8917_VDDCX
+#define MSM8937_VDDCX_AO MSM8917_VDDCX_AO
+#define MSM8937_VDDCX_VFL MSM8917_VDDCX_VFL
+#define MSM8937_VDDMX MSM8917_VDDMX
+#define MSM8937_VDDMX_AO MSM8917_VDDMX_AO
+
+/* QM215 Power Domain Indexes */
+#define QM215_VDDCX MSM8917_VDDCX
+#define QM215_VDDCX_AO MSM8917_VDDCX_AO
+#define QM215_VDDCX_VFL MSM8917_VDDCX_VFL
+#define QM215_VDDMX MSM8917_VDDMX
+#define QM215_VDDMX_AO MSM8917_VDDMX_AO
+
+/* MSM8953 Power Domain Indexes */
+#define MSM8953_VDDMD 0
+#define MSM8953_VDDMD_AO 1
+#define MSM8953_VDDCX 2
+#define MSM8953_VDDCX_AO 3
+#define MSM8953_VDDCX_VFL 4
+#define MSM8953_VDDMX 5
+#define MSM8953_VDDMX_AO 6
+
+/* MSM8974 Power Domain Indexes */
+#define MSM8974_VDDCX 0
+#define MSM8974_VDDCX_AO 1
+#define MSM8974_VDDCX_VFC 2
+#define MSM8974_VDDGFX 3
+#define MSM8974_VDDGFX_VFC 4
/* MSM8976 Power Domain Indexes */
-#define MSM8976_VDDCX 0
-#define MSM8976_VDDCX_AO 1
-#define MSM8976_VDDCX_VFL 2
-#define MSM8976_VDDMX 3
-#define MSM8976_VDDMX_AO 4
-#define MSM8976_VDDMX_VFL 5
+#define MSM8976_VDDCX RPMPD_VDDCX
+#define MSM8976_VDDCX_AO RPMPD_VDDCX_AO
+#define MSM8976_VDDCX_VFL RPMPD_VDDCX_VFL
+#define MSM8976_VDDMX RPMPD_VDDMX
+#define MSM8976_VDDMX_AO RPMPD_VDDMX_AO
+#define MSM8976_VDDMX_VFL RPMPD_VDDMX_VFL
/* MSM8994 Power Domain Indexes */
#define MSM8994_VDDCX 0
@@ -139,16 +126,26 @@
#define MSM8996_VDDSSCX_VFC 6
/* MSM8998 Power Domain Indexes */
-#define MSM8998_VDDCX 0
-#define MSM8998_VDDCX_AO 1
-#define MSM8998_VDDCX_VFL 2
-#define MSM8998_VDDMX 3
-#define MSM8998_VDDMX_AO 4
-#define MSM8998_VDDMX_VFL 5
-#define MSM8998_SSCCX 6
-#define MSM8998_SSCCX_VFL 7
-#define MSM8998_SSCMX 8
-#define MSM8998_SSCMX_VFL 9
+#define MSM8998_VDDCX RPMPD_VDDCX
+#define MSM8998_VDDCX_AO RPMPD_VDDCX_AO
+#define MSM8998_VDDCX_VFL RPMPD_VDDCX_VFL
+#define MSM8998_VDDMX RPMPD_VDDMX
+#define MSM8998_VDDMX_AO RPMPD_VDDMX_AO
+#define MSM8998_VDDMX_VFL RPMPD_VDDMX_VFL
+#define MSM8998_SSCCX RPMPD_SSCCX
+#define MSM8998_SSCCX_VFL RPMPD_SSCCX_VFL
+#define MSM8998_SSCMX RPMPD_SSCMX
+#define MSM8998_SSCMX_VFL RPMPD_SSCMX_VFL
+
+/* QCM2290 Power Domains */
+#define QCM2290_VDDCX 0
+#define QCM2290_VDDCX_AO 1
+#define QCM2290_VDDCX_VFL 2
+#define QCM2290_VDDMX 3
+#define QCM2290_VDDMX_AO 4
+#define QCM2290_VDDMX_VFL 5
+#define QCM2290_VDD_LPI_CX 6
+#define QCM2290_VDD_LPI_MX 7
/* QCS404 Power Domains */
#define QCS404_VDDMX 0
@@ -160,16 +157,46 @@
#define QCS404_LPIMX_VFL 6
/* SDM660 Power Domains */
-#define SDM660_VDDCX 0
-#define SDM660_VDDCX_AO 1
-#define SDM660_VDDCX_VFL 2
-#define SDM660_VDDMX 3
-#define SDM660_VDDMX_AO 4
-#define SDM660_VDDMX_VFL 5
-#define SDM660_SSCCX 6
-#define SDM660_SSCCX_VFL 7
-#define SDM660_SSCMX 8
-#define SDM660_SSCMX_VFL 9
+#define SDM660_VDDCX RPMPD_VDDCX
+#define SDM660_VDDCX_AO RPMPD_VDDCX_AO
+#define SDM660_VDDCX_VFL RPMPD_VDDCX_VFL
+#define SDM660_VDDMX RPMPD_VDDMX
+#define SDM660_VDDMX_AO RPMPD_VDDMX_AO
+#define SDM660_VDDMX_VFL RPMPD_VDDMX_VFL
+#define SDM660_SSCCX RPMPD_SSCCX
+#define SDM660_SSCCX_VFL RPMPD_SSCCX_VFL
+#define SDM660_SSCMX RPMPD_SSCMX
+#define SDM660_SSCMX_VFL RPMPD_SSCMX_VFL
+
+/* SM6115 Power Domains */
+#define SM6115_VDDCX 0
+#define SM6115_VDDCX_AO 1
+#define SM6115_VDDCX_VFL 2
+#define SM6115_VDDMX 3
+#define SM6115_VDDMX_AO 4
+#define SM6115_VDDMX_VFL 5
+#define SM6115_VDD_LPI_CX 6
+#define SM6115_VDD_LPI_MX 7
+
+/* SM6125 Power Domains */
+#define SM6125_VDDCX RPMPD_VDDCX
+#define SM6125_VDDCX_AO RPMPD_VDDCX_AO
+#define SM6125_VDDCX_VFL RPMPD_VDDCX_VFL
+#define SM6125_VDDMX RPMPD_VDDMX
+#define SM6125_VDDMX_AO RPMPD_VDDMX_AO
+#define SM6125_VDDMX_VFL RPMPD_VDDMX_VFL
+
+/* SM6375 Power Domain Indexes */
+#define SM6375_VDDCX 0
+#define SM6375_VDDCX_AO 1
+#define SM6375_VDDCX_VFL 2
+#define SM6375_VDDMX 3
+#define SM6375_VDDMX_AO 4
+#define SM6375_VDDMX_VFL 5
+#define SM6375_VDDGX 6
+#define SM6375_VDDGX_AO 7
+#define SM6375_VDD_LPI_CX 8
+#define SM6375_VDD_LPI_MX 9
/* RPM SMD Power Domain performance levels */
#define RPM_SMD_LEVEL_RETENTION 16
diff --git a/include/dt-bindings/power/r8a7795-sysc.h b/include/dt-bindings/power/r8a7795-sysc.h
index eea6ad69f0b0..ff5323858572 100644
--- a/include/dt-bindings/power/r8a7795-sysc.h
+++ b/include/dt-bindings/power/r8a7795-sysc.h
@@ -30,7 +30,6 @@
#define R8A7795_PD_CA53_SCU 21
#define R8A7795_PD_3DG_E 22
#define R8A7795_PD_A3IR 24
-#define R8A7795_PD_A2VC0 25 /* ES1.x only */
#define R8A7795_PD_A2VC1 26
/* Always-on power area */
diff --git a/include/dt-bindings/power/r8a779f0-sysc.h b/include/dt-bindings/power/r8a779f0-sysc.h
new file mode 100644
index 000000000000..cde1536e9ed0
--- /dev/null
+++ b/include/dt-bindings/power/r8a779f0-sysc.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A779F0_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A779F0_SYSC_H__
+
+/*
+ * These power domain indices match the Power Domain Register Numbers (PDR)
+ */
+
+#define R8A779F0_PD_A1E0D0C0 0
+#define R8A779F0_PD_A1E0D0C1 1
+#define R8A779F0_PD_A1E0D1C0 2
+#define R8A779F0_PD_A1E0D1C1 3
+#define R8A779F0_PD_A1E1D0C0 4
+#define R8A779F0_PD_A1E1D0C1 5
+#define R8A779F0_PD_A1E1D1C0 6
+#define R8A779F0_PD_A1E1D1C1 7
+#define R8A779F0_PD_A2E0D0 16
+#define R8A779F0_PD_A2E0D1 17
+#define R8A779F0_PD_A2E1D0 18
+#define R8A779F0_PD_A2E1D1 19
+#define R8A779F0_PD_A3E0 20
+#define R8A779F0_PD_A3E1 21
+
+/* Always-on power area */
+#define R8A779F0_PD_ALWAYS_ON 64
+
+#endif /* __DT_BINDINGS_POWER_R8A779A0_SYSC_H__*/
diff --git a/include/dt-bindings/power/r8a779g0-sysc.h b/include/dt-bindings/power/r8a779g0-sysc.h
new file mode 100644
index 000000000000..c7b139fb075f
--- /dev/null
+++ b/include/dt-bindings/power/r8a779g0-sysc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2022 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A779G0_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A779G0_SYSC_H__
+
+/*
+ * These power domain indices match the Power Domain Register Numbers (PDR)
+ */
+
+#define R8A779G0_PD_A1E0D0C0 0
+#define R8A779G0_PD_A1E0D0C1 1
+#define R8A779G0_PD_A1E0D1C0 2
+#define R8A779G0_PD_A1E0D1C1 3
+#define R8A779G0_PD_A2E0D0 16
+#define R8A779G0_PD_A2E0D1 17
+#define R8A779G0_PD_A3E0 20
+#define R8A779G0_PD_A33DGA 24
+#define R8A779G0_PD_A23DGB 25
+#define R8A779G0_PD_A1DSP0 33
+#define R8A779G0_PD_A2IMP01 34
+#define R8A779G0_PD_A2PSC 35
+#define R8A779G0_PD_A2CV0 36
+#define R8A779G0_PD_A2CV1 37
+#define R8A779G0_PD_A1CNN0 41
+#define R8A779G0_PD_A2CN0 42
+#define R8A779G0_PD_A3IR 43
+#define R8A779G0_PD_A1DSP1 45
+#define R8A779G0_PD_A2IMP23 46
+#define R8A779G0_PD_A2DMA 47
+#define R8A779G0_PD_A2CV2 48
+#define R8A779G0_PD_A2CV3 49
+#define R8A779G0_PD_A1DSP2 53
+#define R8A779G0_PD_A1DSP3 54
+#define R8A779G0_PD_A3VIP0 56
+#define R8A779G0_PD_A3VIP1 57
+#define R8A779G0_PD_A3VIP2 58
+#define R8A779G0_PD_A3ISP0 60
+#define R8A779G0_PD_A3ISP1 61
+#define R8A779G0_PD_A3DUL 62
+
+/* Always-on power area */
+#define R8A779G0_PD_ALWAYS_ON 64
+
+#endif /* __DT_BINDINGS_POWER_R8A779G0_SYSC_H__*/
diff --git a/include/dt-bindings/power/renesas,r8a779h0-sysc.h b/include/dt-bindings/power/renesas,r8a779h0-sysc.h
new file mode 100644
index 000000000000..f27976f523e8
--- /dev/null
+++ b/include/dt-bindings/power/renesas,r8a779h0-sysc.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2023 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_POWER_RENESAS_R8A779H0_SYSC_H__
+#define __DT_BINDINGS_POWER_RENESAS_R8A779H0_SYSC_H__
+
+/*
+ * These power domain indices match the Power Domain Register Numbers (PDR)
+ */
+
+#define R8A779H0_PD_A1E0D0C0 0
+#define R8A779H0_PD_A1E0D0C1 1
+#define R8A779H0_PD_A1E0D0C2 2
+#define R8A779H0_PD_A1E0D0C3 3
+#define R8A779H0_PD_A2E0D0 16
+#define R8A779H0_PD_A3CR0 21
+#define R8A779H0_PD_A3CR1 22
+#define R8A779H0_PD_A3CR2 23
+#define R8A779H0_PD_A33DGA 24
+#define R8A779H0_PD_A23DGB 25
+#define R8A779H0_PD_C4 31
+#define R8A779H0_PD_A1DSP0 33
+#define R8A779H0_PD_A2IMP01 34
+#define R8A779H0_PD_A2PSC 35
+#define R8A779H0_PD_A2CV0 36
+#define R8A779H0_PD_A2CV1 37
+#define R8A779H0_PD_A3IMR0 38
+#define R8A779H0_PD_A3IMR1 39
+#define R8A779H0_PD_A3VC 40
+#define R8A779H0_PD_A2CN0 42
+#define R8A779H0_PD_A1CN0 44
+#define R8A779H0_PD_A1DSP1 45
+#define R8A779H0_PD_A2DMA 47
+#define R8A779H0_PD_A2CV2 48
+#define R8A779H0_PD_A2CV3 49
+#define R8A779H0_PD_A3IMR2 50
+#define R8A779H0_PD_A3IMR3 51
+#define R8A779H0_PD_A3PCI 52
+#define R8A779H0_PD_A2PCIPHY 53
+#define R8A779H0_PD_A3VIP0 56
+#define R8A779H0_PD_A3VIP2 58
+#define R8A779H0_PD_A3ISP0 60
+#define R8A779H0_PD_A3DUL 62
+
+/* Always-on power area */
+#define R8A779H0_PD_ALWAYS_ON 64
+
+#endif /* __DT_BINDINGS_POWER_RENESAS_R8A779H0_SYSC_H__ */
diff --git a/include/dt-bindings/power/rk3568-power.h b/include/dt-bindings/power/rk3568-power.h
new file mode 100644
index 000000000000..6cc1af1a9d26
--- /dev/null
+++ b/include/dt-bindings/power/rk3568-power.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_BINDINGS_POWER_RK3568_POWER_H__
+#define __DT_BINDINGS_POWER_RK3568_POWER_H__
+
+/* VD_CORE */
+#define RK3568_PD_CPU_0 0
+#define RK3568_PD_CPU_1 1
+#define RK3568_PD_CPU_2 2
+#define RK3568_PD_CPU_3 3
+#define RK3568_PD_CORE_ALIVE 4
+
+/* VD_PMU */
+#define RK3568_PD_PMU 5
+
+/* VD_NPU */
+#define RK3568_PD_NPU 6
+
+/* VD_GPU */
+#define RK3568_PD_GPU 7
+
+/* VD_LOGIC */
+#define RK3568_PD_VI 8
+#define RK3568_PD_VO 9
+#define RK3568_PD_RGA 10
+#define RK3568_PD_VPU 11
+#define RK3568_PD_CENTER 12
+#define RK3568_PD_RKVDEC 13
+#define RK3568_PD_RKVENC 14
+#define RK3568_PD_PIPE 15
+#define RK3568_PD_LOGIC_ALIVE 16
+
+#endif
diff --git a/include/dt-bindings/power/rk3588-power.h b/include/dt-bindings/power/rk3588-power.h
new file mode 100644
index 000000000000..6b91a50cc6d6
--- /dev/null
+++ b/include/dt-bindings/power/rk3588-power.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+#ifndef __DT_BINDINGS_POWER_RK3588_POWER_H__
+#define __DT_BINDINGS_POWER_RK3588_POWER_H__
+
+/* VD_LITDSU */
+#define RK3588_PD_CPU_0 0
+#define RK3588_PD_CPU_1 1
+#define RK3588_PD_CPU_2 2
+#define RK3588_PD_CPU_3 3
+
+/* VD_BIGCORE0 */
+#define RK3588_PD_CPU_4 4
+#define RK3588_PD_CPU_5 5
+
+/* VD_BIGCORE1 */
+#define RK3588_PD_CPU_6 6
+#define RK3588_PD_CPU_7 7
+
+/* VD_NPU */
+#define RK3588_PD_NPU 8
+#define RK3588_PD_NPUTOP 9
+#define RK3588_PD_NPU1 10
+#define RK3588_PD_NPU2 11
+
+/* VD_GPU */
+#define RK3588_PD_GPU 12
+
+/* VD_VCODEC */
+#define RK3588_PD_VCODEC 13
+#define RK3588_PD_RKVDEC0 14
+#define RK3588_PD_RKVDEC1 15
+#define RK3588_PD_VENC0 16
+#define RK3588_PD_VENC1 17
+
+/* VD_DD01 */
+#define RK3588_PD_DDR01 18
+
+/* VD_DD23 */
+#define RK3588_PD_DDR23 19
+
+/* VD_LOGIC */
+#define RK3588_PD_CENTER 20
+#define RK3588_PD_VDPU 21
+#define RK3588_PD_RGA30 22
+#define RK3588_PD_AV1 23
+#define RK3588_PD_VOP 24
+#define RK3588_PD_VO0 25
+#define RK3588_PD_VO1 26
+#define RK3588_PD_VI 27
+#define RK3588_PD_ISP1 28
+#define RK3588_PD_FEC 29
+#define RK3588_PD_RGA31 30
+#define RK3588_PD_USB 31
+#define RK3588_PD_PHP 32
+#define RK3588_PD_GMAC 33
+#define RK3588_PD_PCIE 34
+#define RK3588_PD_NVM 35
+#define RK3588_PD_NVM0 36
+#define RK3588_PD_SDIO 37
+#define RK3588_PD_AUDIO 38
+#define RK3588_PD_SECURE 39
+#define RK3588_PD_SDMMC 40
+#define RK3588_PD_CRYPTO 41
+#define RK3588_PD_BUS 42
+
+/* VD_PMU */
+#define RK3588_PD_PMU1 43
+
+#endif
diff --git a/include/dt-bindings/power/rockchip,rk3528-power.h b/include/dt-bindings/power/rockchip,rk3528-power.h
new file mode 100644
index 000000000000..318923cdaaf6
--- /dev/null
+++ b/include/dt-bindings/power/rockchip,rk3528-power.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+#ifndef __DT_BINDINGS_POWER_RK3528_POWER_H__
+#define __DT_BINDINGS_POWER_RK3528_POWER_H__
+
+#define RK3528_PD_PMU 0
+#define RK3528_PD_BUS 1
+#define RK3528_PD_DDR 2
+#define RK3528_PD_MSCH 3
+
+/* VD_GPU */
+#define RK3528_PD_GPU 4
+
+/* VD_LOGIC */
+#define RK3528_PD_RKVDEC 5
+#define RK3528_PD_RKVENC 6
+#define RK3528_PD_VO 7
+#define RK3528_PD_VPU 8
+
+#endif
diff --git a/include/dt-bindings/power/rockchip,rk3562-power.h b/include/dt-bindings/power/rockchip,rk3562-power.h
new file mode 100644
index 000000000000..5182c2427a55
--- /dev/null
+++ b/include/dt-bindings/power/rockchip,rk3562-power.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022-2024 Rockchip Electronics Co., Ltd.
+ */
+#ifndef __DT_BINDINGS_POWER_RK3562_POWER_H__
+#define __DT_BINDINGS_POWER_RK3562_POWER_H__
+
+/* VD_CORE */
+#define RK3562_PD_CPU_0 0
+#define RK3562_PD_CPU_1 1
+#define RK3562_PD_CPU_2 2
+#define RK3562_PD_CPU_3 3
+#define RK3562_PD_CORE_ALIVE 4
+
+/* VD_PMU */
+#define RK3562_PD_PMU 5
+#define RK3562_PD_PMU_ALIVE 6
+
+/* VD_NPU */
+#define RK3562_PD_NPU 7
+
+/* VD_GPU */
+#define RK3562_PD_GPU 8
+
+/* VD_LOGIC */
+#define RK3562_PD_DDR 9
+#define RK3562_PD_VEPU 10
+#define RK3562_PD_VDPU 11
+#define RK3562_PD_VI 12
+#define RK3562_PD_VO 13
+#define RK3562_PD_RGA 14
+#define RK3562_PD_PHP 15
+#define RK3562_PD_LOGIC_ALIVE 16
+
+#endif
diff --git a/include/dt-bindings/power/rockchip,rk3576-power.h b/include/dt-bindings/power/rockchip,rk3576-power.h
new file mode 100644
index 000000000000..324a056aa851
--- /dev/null
+++ b/include/dt-bindings/power/rockchip,rk3576-power.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+#ifndef __DT_BINDINGS_POWER_RK3576_POWER_H__
+#define __DT_BINDINGS_POWER_RK3576_POWER_H__
+
+/* VD_NPU */
+#define RK3576_PD_NPU 0
+#define RK3576_PD_NPUTOP 1
+#define RK3576_PD_NPU0 2
+#define RK3576_PD_NPU1 3
+
+/* VD_GPU */
+#define RK3576_PD_GPU 4
+
+/* VD_LOGIC */
+#define RK3576_PD_NVM 5
+#define RK3576_PD_SDGMAC 6
+#define RK3576_PD_USB 7
+#define RK3576_PD_PHP 8
+#define RK3576_PD_SUBPHP 9
+#define RK3576_PD_AUDIO 10
+#define RK3576_PD_VEPU0 11
+#define RK3576_PD_VEPU1 12
+#define RK3576_PD_VPU 13
+#define RK3576_PD_VDEC 14
+#define RK3576_PD_VI 15
+#define RK3576_PD_VO0 16
+#define RK3576_PD_VO1 17
+#define RK3576_PD_VOP 18
+
+#endif
diff --git a/include/dt-bindings/power/rockchip,rv1126-power.h b/include/dt-bindings/power/rockchip,rv1126-power.h
new file mode 100644
index 000000000000..38a68e000d38
--- /dev/null
+++ b/include/dt-bindings/power/rockchip,rv1126-power.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __DT_BINDINGS_POWER_RV1126_POWER_H__
+#define __DT_BINDINGS_POWER_RV1126_POWER_H__
+
+/* VD_CORE */
+#define RV1126_PD_CPU_0 0
+#define RV1126_PD_CPU_1 1
+#define RV1126_PD_CPU_2 2
+#define RV1126_PD_CPU_3 3
+#define RV1126_PD_CORE_ALIVE 4
+
+/* VD_PMU */
+#define RV1126_PD_PMU 5
+#define RV1126_PD_PMU_ALIVE 6
+
+/* VD_NPU */
+#define RV1126_PD_NPU 7
+
+/* VD_VEPU */
+#define RV1126_PD_VEPU 8
+
+/* VD_LOGIC */
+#define RV1126_PD_VI 9
+#define RV1126_PD_VO 10
+#define RV1126_PD_ISPP 11
+#define RV1126_PD_VDPU 12
+#define RV1126_PD_CRYPTO 13
+#define RV1126_PD_DDR 14
+#define RV1126_PD_NVM 15
+#define RV1126_PD_SDIO 16
+#define RV1126_PD_USB 17
+#define RV1126_PD_LOGIC_ALIVE 18
+
+#endif
diff --git a/include/dt-bindings/power/rockchip,rv1126b-power-controller.h b/include/dt-bindings/power/rockchip,rv1126b-power-controller.h
new file mode 100644
index 000000000000..48ea87a4423c
--- /dev/null
+++ b/include/dt-bindings/power/rockchip,rv1126b-power-controller.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2024 Rockchip Electronics Co., Ltd.
+ * Author: Finley Xiao <finley.xiao@rock-chips.com>
+ */
+
+#ifndef __DT_BINDINGS_POWER_RV1126B_POWER_CONTROLLER_H__
+#define __DT_BINDINGS_POWER_RV1126B_POWER_CONTROLLER_H__
+
+/* VD_NPU */
+#define RV1126B_PD_NPU 0
+
+/* VD_LOGIC */
+#define RV1126B_PD_VDO 1
+#define RV1126B_PD_AIISP 2
+
+#endif
diff --git a/include/dt-bindings/power/starfive,jh7110-pmu.h b/include/dt-bindings/power/starfive,jh7110-pmu.h
new file mode 100644
index 000000000000..7b4f24927dee
--- /dev/null
+++ b/include/dt-bindings/power/starfive,jh7110-pmu.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2022-2023 StarFive Technology Co., Ltd.
+ * Author: Walker Chen <walker.chen@starfivetech.com>
+ */
+#ifndef __DT_BINDINGS_POWER_JH7110_POWER_H__
+#define __DT_BINDINGS_POWER_JH7110_POWER_H__
+
+#define JH7110_PD_SYSTOP 0
+#define JH7110_PD_CPU 1
+#define JH7110_PD_GPUA 2
+#define JH7110_PD_VDEC 3
+#define JH7110_PD_VOUT 4
+#define JH7110_PD_ISP 5
+#define JH7110_PD_VENC 6
+
+/* AON Power Domain */
+#define JH7110_AON_PD_DPHY_TX 0
+#define JH7110_AON_PD_DPHY_RX 1
+
+#endif
diff --git a/include/dt-bindings/power/summit,smb347-charger.h b/include/dt-bindings/power/summit,smb347-charger.h
index d918bf321a71..14f2f9cf2020 100644
--- a/include/dt-bindings/power/summit,smb347-charger.h
+++ b/include/dt-bindings/power/summit,smb347-charger.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0-or-later or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0-or-later OR MIT) */
/*
* Author: David Heidelberg <david@ixit.cz>
*/
@@ -16,4 +16,8 @@
#define SMB3XX_CHG_ENABLE_PIN_ACTIVE_LOW 1
#define SMB3XX_CHG_ENABLE_PIN_ACTIVE_HIGH 2
+/* Polarity of INOK signal */
+#define SMB3XX_SYSOK_INOK_ACTIVE_LOW 0
+#define SMB3XX_SYSOK_INOK_ACTIVE_HIGH 1
+
#endif
diff --git a/include/dt-bindings/power/tegra234-powergate.h b/include/dt-bindings/power/tegra234-powergate.h
new file mode 100644
index 000000000000..b0fec2ddec84
--- /dev/null
+++ b/include/dt-bindings/power/tegra234-powergate.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef __ABI_MACH_T234_POWERGATE_T234_H_
+#define __ABI_MACH_T234_POWERGATE_T234_H_
+
+#define TEGRA234_POWER_DOMAIN_OFA 1U
+#define TEGRA234_POWER_DOMAIN_AUD 2U
+#define TEGRA234_POWER_DOMAIN_DISP 3U
+#define TEGRA234_POWER_DOMAIN_PCIEX8A 5U
+#define TEGRA234_POWER_DOMAIN_PCIEX4A 6U
+#define TEGRA234_POWER_DOMAIN_PCIEX4BA 7U
+#define TEGRA234_POWER_DOMAIN_PCIEX4BB 8U
+#define TEGRA234_POWER_DOMAIN_PCIEX1A 9U
+#define TEGRA234_POWER_DOMAIN_XUSBA 10U
+#define TEGRA234_POWER_DOMAIN_XUSBB 11U
+#define TEGRA234_POWER_DOMAIN_XUSBC 12U
+#define TEGRA234_POWER_DOMAIN_PCIEX4CA 13U
+#define TEGRA234_POWER_DOMAIN_PCIEX4CB 14U
+#define TEGRA234_POWER_DOMAIN_PCIEX4CC 15U
+#define TEGRA234_POWER_DOMAIN_PCIEX8B 16U
+#define TEGRA234_POWER_DOMAIN_MGBEA 17U
+#define TEGRA234_POWER_DOMAIN_MGBEB 18U
+#define TEGRA234_POWER_DOMAIN_MGBEC 19U
+#define TEGRA234_POWER_DOMAIN_MGBED 20U
+#define TEGRA234_POWER_DOMAIN_ISPA 22U
+#define TEGRA234_POWER_DOMAIN_NVDEC 23U
+#define TEGRA234_POWER_DOMAIN_NVJPGA 24U
+#define TEGRA234_POWER_DOMAIN_NVENC 25U
+#define TEGRA234_POWER_DOMAIN_VI 28U
+#define TEGRA234_POWER_DOMAIN_VIC 29U
+#define TEGRA234_POWER_DOMAIN_PVA 30U
+#define TEGRA234_POWER_DOMAIN_DLAA 32U
+#define TEGRA234_POWER_DOMAIN_DLAB 33U
+#define TEGRA234_POWER_DOMAIN_CV 34U
+#define TEGRA234_POWER_DOMAIN_GPU 35U
+#define TEGRA234_POWER_DOMAIN_NVJPGB 36U
+
+#endif
diff --git a/include/dt-bindings/power/thead,th1520-power.h b/include/dt-bindings/power/thead,th1520-power.h
new file mode 100644
index 000000000000..8395bd1459f3
--- /dev/null
+++ b/include/dt-bindings/power/thead,th1520-power.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2022 Alibaba Group Holding Limited.
+ * Copyright (c) 2024 Samsung Electronics Co., Ltd.
+ * Author: Michal Wilczynski <m.wilczynski@samsung.com>
+ */
+
+#ifndef __DT_BINDINGS_POWER_TH1520_H
+#define __DT_BINDINGS_POWER_TH1520_H
+
+#define TH1520_AUDIO_PD 0
+#define TH1520_VDEC_PD 1
+#define TH1520_NPU_PD 2
+#define TH1520_VENC_PD 3
+#define TH1520_GPU_PD 4
+#define TH1520_DSP0_PD 5
+#define TH1520_DSP1_PD 6
+
+#endif
diff --git a/include/dt-bindings/power/xlnx-zynqmp-power.h b/include/dt-bindings/power/xlnx-zynqmp-power.h
index 0d9a412fd5e0..618024cbb20d 100644
--- a/include/dt-bindings/power/xlnx-zynqmp-power.h
+++ b/include/dt-bindings/power/xlnx-zynqmp-power.h
@@ -6,6 +6,12 @@
#ifndef _DT_BINDINGS_ZYNQMP_POWER_H
#define _DT_BINDINGS_ZYNQMP_POWER_H
+#define PD_RPU_0 7
+#define PD_RPU_1 8
+#define PD_R5_0_ATCM 15
+#define PD_R5_0_BTCM 16
+#define PD_R5_1_ATCM 17
+#define PD_R5_1_BTCM 18
#define PD_USB_0 22
#define PD_USB_1 23
#define PD_TTC_0 24
diff --git a/include/dt-bindings/regulator/nxp,pca9450-regulator.h b/include/dt-bindings/regulator/nxp,pca9450-regulator.h
new file mode 100644
index 000000000000..08434caef429
--- /dev/null
+++ b/include/dt-bindings/regulator/nxp,pca9450-regulator.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Device Tree binding constants for the NXP PCA9450A/B/C PMIC regulators
+ */
+
+#ifndef _DT_BINDINGS_REGULATORS_NXP_PCA9450_H
+#define _DT_BINDINGS_REGULATORS_NXP_PCA9450_H
+
+/*
+ * Buck mode constants which may be used in devicetree properties (eg.
+ * regulator-initial-mode, regulator-allowed-modes).
+ * See the manufacturer's datasheet for more information on these modes.
+ */
+
+#define PCA9450_BUCK_MODE_AUTO 0
+#define PCA9450_BUCK_MODE_FORCE_PWM 1
+
+#endif
diff --git a/include/dt-bindings/regulator/richtek,rt5190a-regulator.h b/include/dt-bindings/regulator/richtek,rt5190a-regulator.h
new file mode 100644
index 000000000000..63f99d4c1cb3
--- /dev/null
+++ b/include/dt-bindings/regulator/richtek,rt5190a-regulator.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __DT_BINDINGS_RICHTEK_RT5190A_REGULATOR_H__
+#define __DT_BINDINGS_RICHTEK_RT5190A_REGULATOR_H__
+
+/*
+ * BUCK/LDO mode constants which may be used in devicetree properties
+ * (eg. regulator-allowed-modes).
+ * See the manufacturer's datasheet for more information on these modes.
+ */
+
+#define RT5190A_OPMODE_AUTO 0
+#define RT5190A_OPMODE_FPWM 1
+
+#endif
diff --git a/include/dt-bindings/regulator/st,stm32mp13-regulator.h b/include/dt-bindings/regulator/st,stm32mp13-regulator.h
new file mode 100644
index 000000000000..b3a974dfc585
--- /dev/null
+++ b/include/dt-bindings/regulator/st,stm32mp13-regulator.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2022, STMicroelectronics - All Rights Reserved
+ */
+
+#ifndef __DT_BINDINGS_REGULATOR_ST_STM32MP13_REGULATOR_H
+#define __DT_BINDINGS_REGULATOR_ST_STM32MP13_REGULATOR_H
+
+/* SCMI voltage domains identifiers */
+
+/* SOC Internal regulators */
+#define VOLTD_SCMI_REG11 0
+#define VOLTD_SCMI_REG18 1
+#define VOLTD_SCMI_USB33 2
+#define VOLTD_SCMI_SDMMC1_IO 3
+#define VOLTD_SCMI_SDMMC2_IO 4
+#define VOLTD_SCMI_VREFBUF 5
+
+/* STPMIC1 regulators */
+#define VOLTD_SCMI_STPMIC1_BUCK1 6
+#define VOLTD_SCMI_STPMIC1_BUCK2 7
+#define VOLTD_SCMI_STPMIC1_BUCK3 8
+#define VOLTD_SCMI_STPMIC1_BUCK4 9
+#define VOLTD_SCMI_STPMIC1_LDO1 10
+#define VOLTD_SCMI_STPMIC1_LDO2 11
+#define VOLTD_SCMI_STPMIC1_LDO3 12
+#define VOLTD_SCMI_STPMIC1_LDO4 13
+#define VOLTD_SCMI_STPMIC1_LDO5 14
+#define VOLTD_SCMI_STPMIC1_LDO6 15
+#define VOLTD_SCMI_STPMIC1_VREFDDR 16
+#define VOLTD_SCMI_STPMIC1_BOOST 17
+#define VOLTD_SCMI_STPMIC1_PWR_SW1 18
+#define VOLTD_SCMI_STPMIC1_PWR_SW2 19
+
+/* External regulators */
+#define VOLTD_SCMI_REGU0 20
+#define VOLTD_SCMI_REGU1 21
+#define VOLTD_SCMI_REGU2 22
+#define VOLTD_SCMI_REGU3 23
+#define VOLTD_SCMI_REGU4 24
+
+#endif /*__DT_BINDINGS_REGULATOR_ST_STM32MP13_REGULATOR_H */
diff --git a/include/dt-bindings/regulator/st,stm32mp15-regulator.h b/include/dt-bindings/regulator/st,stm32mp15-regulator.h
new file mode 100644
index 000000000000..7052507cb3e5
--- /dev/null
+++ b/include/dt-bindings/regulator/st,stm32mp15-regulator.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2025, STMicroelectronics - All Rights Reserved
+ */
+
+#ifndef __DT_BINDINGS_REGULATOR_ST_STM32MP15_REGULATOR_H
+#define __DT_BINDINGS_REGULATOR_ST_STM32MP15_REGULATOR_H
+
+/* SCMI voltage domain identifiers */
+
+/* SOC Internal regulators */
+#define VOLTD_SCMI_REG11 0
+#define VOLTD_SCMI_REG18 1
+#define VOLTD_SCMI_USB33 2
+
+/* STPMIC1 regulators */
+#define VOLTD_SCMI_STPMIC1_BUCK1 3
+#define VOLTD_SCMI_STPMIC1_BUCK2 4
+#define VOLTD_SCMI_STPMIC1_BUCK3 5
+#define VOLTD_SCMI_STPMIC1_BUCK4 6
+#define VOLTD_SCMI_STPMIC1_LDO1 7
+#define VOLTD_SCMI_STPMIC1_LDO2 8
+#define VOLTD_SCMI_STPMIC1_LDO3 9
+#define VOLTD_SCMI_STPMIC1_LDO4 10
+#define VOLTD_SCMI_STPMIC1_LDO5 11
+#define VOLTD_SCMI_STPMIC1_LDO6 12
+#define VOLTD_SCMI_STPMIC1_VREFDDR 13
+#define VOLTD_SCMI_STPMIC1_BOOST 14
+#define VOLTD_SCMI_STPMIC1_PWR_SW1 15
+#define VOLTD_SCMI_STPMIC1_PWR_SW2 16
+#define VOLTD_SCMI_VREFBUF 17
+
+/* External regulators */
+#define VOLTD_SCMI_REGU0 18
+#define VOLTD_SCMI_REGU1 19
+#define VOLTD_SCMI_REGU2 20
+#define VOLTD_SCMI_REGU3 21
+#define VOLTD_SCMI_REGU4 22
+
+#endif /*__DT_BINDINGS_REGULATOR_ST_STM32MP15_REGULATOR_H */
diff --git a/include/dt-bindings/regulator/st,stm32mp25-regulator.h b/include/dt-bindings/regulator/st,stm32mp25-regulator.h
new file mode 100644
index 000000000000..3c3d30911dd0
--- /dev/null
+++ b/include/dt-bindings/regulator/st,stm32mp25-regulator.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2024, STMicroelectronics - All Rights Reserved
+ */
+
+#ifndef __DT_BINDINGS_REGULATOR_ST_STM32MP25_REGULATOR_H
+#define __DT_BINDINGS_REGULATOR_ST_STM32MP25_REGULATOR_H
+
+/* SCMI voltage domains identifiers */
+
+/* SOC Internal regulators */
+#define VOLTD_SCMI_VDDIO1 0
+#define VOLTD_SCMI_VDDIO2 1
+#define VOLTD_SCMI_VDDIO3 2
+#define VOLTD_SCMI_VDDIO4 3
+#define VOLTD_SCMI_VDDIO 4
+#define VOLTD_SCMI_UCPD 5
+#define VOLTD_SCMI_USB33 6
+#define VOLTD_SCMI_ADC 7
+#define VOLTD_SCMI_GPU 8
+#define VOLTD_SCMI_VREFBUF 9
+
+/* STPMIC2 regulators */
+#define VOLTD_SCMI_STPMIC2_BUCK1 10
+#define VOLTD_SCMI_STPMIC2_BUCK2 11
+#define VOLTD_SCMI_STPMIC2_BUCK3 12
+#define VOLTD_SCMI_STPMIC2_BUCK4 13
+#define VOLTD_SCMI_STPMIC2_BUCK5 14
+#define VOLTD_SCMI_STPMIC2_BUCK6 15
+#define VOLTD_SCMI_STPMIC2_BUCK7 16
+#define VOLTD_SCMI_STPMIC2_LDO1 17
+#define VOLTD_SCMI_STPMIC2_LDO2 18
+#define VOLTD_SCMI_STPMIC2_LDO3 19
+#define VOLTD_SCMI_STPMIC2_LDO4 20
+#define VOLTD_SCMI_STPMIC2_LDO5 21
+#define VOLTD_SCMI_STPMIC2_LDO6 22
+#define VOLTD_SCMI_STPMIC2_LDO7 23
+#define VOLTD_SCMI_STPMIC2_LDO8 24
+#define VOLTD_SCMI_STPMIC2_REFDDR 25
+
+/* External regulators */
+#define VOLTD_SCMI_REGU0 26
+#define VOLTD_SCMI_REGU1 27
+#define VOLTD_SCMI_REGU2 28
+#define VOLTD_SCMI_REGU3 29
+#define VOLTD_SCMI_REGU4 30
+
+#endif /*__DT_BINDINGS_REGULATOR_ST_STM32MP25_REGULATOR_H */
diff --git a/include/dt-bindings/regulator/ti,tps62864.h b/include/dt-bindings/regulator/ti,tps62864.h
new file mode 100644
index 000000000000..8db31f23d956
--- /dev/null
+++ b/include/dt-bindings/regulator/ti,tps62864.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_REGULATOR_TI_TPS62864_H
+#define _DT_BINDINGS_REGULATOR_TI_TPS62864_H
+
+#define TPS62864_MODE_NORMAL 0
+#define TPS62864_MODE_FPWM 1
+
+#endif
diff --git a/include/dt-bindings/reset/airoha,en7523-reset.h b/include/dt-bindings/reset/airoha,en7523-reset.h
new file mode 100644
index 000000000000..211e8a23a21c
--- /dev/null
+++ b/include/dt-bindings/reset/airoha,en7523-reset.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024 iopsys Software Solutions AB.
+ * Copyright (C) 2025 Genexis AB.
+ *
+ * Author: Mikhail Kshevetskiy <mikhail.kshevetskiy@iopsys.eu>
+ *
+ * based on
+ * include/dt-bindings/reset/airoha,en7581-reset.h
+ * by Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#ifndef __DT_BINDINGS_RESET_CONTROLLER_AIROHA_EN7523_H_
+#define __DT_BINDINGS_RESET_CONTROLLER_AIROHA_EN7523_H_
+
+/* RST_CTRL2 */
+#define EN7523_XPON_PHY_RST 0
+#define EN7523_XSI_MAC_RST 1
+#define EN7523_XSI_PHY_RST 2
+#define EN7523_NPU_RST 3
+#define EN7523_I2S_RST 4
+#define EN7523_TRNG_RST 5
+#define EN7523_TRNG_MSTART_RST 6
+#define EN7523_DUAL_HSI0_RST 7
+#define EN7523_DUAL_HSI1_RST 8
+#define EN7523_HSI_RST 9
+#define EN7523_DUAL_HSI0_MAC_RST 10
+#define EN7523_DUAL_HSI1_MAC_RST 11
+#define EN7523_HSI_MAC_RST 12
+#define EN7523_WDMA_RST 13
+#define EN7523_WOE0_RST 14
+#define EN7523_WOE1_RST 15
+#define EN7523_HSDMA_RST 16
+#define EN7523_I2C2RBUS_RST 17
+#define EN7523_TDMA_RST 18
+/* RST_CTRL1 */
+#define EN7523_PCM1_ZSI_ISI_RST 19
+#define EN7523_FE_PDMA_RST 20
+#define EN7523_FE_QDMA_RST 21
+#define EN7523_PCM_SPIWP_RST 22
+#define EN7523_CRYPTO_RST 23
+#define EN7523_TIMER_RST 24
+#define EN7523_PCM1_RST 25
+#define EN7523_UART_RST 26
+#define EN7523_GPIO_RST 27
+#define EN7523_GDMA_RST 28
+#define EN7523_I2C_MASTER_RST 29
+#define EN7523_PCM2_ZSI_ISI_RST 30
+#define EN7523_SFC_RST 31
+#define EN7523_UART2_RST 32
+#define EN7523_GDMP_RST 33
+#define EN7523_FE_RST 34
+#define EN7523_USB_HOST_P0_RST 35
+#define EN7523_GSW_RST 36
+#define EN7523_SFC2_PCM_RST 37
+#define EN7523_PCIE0_RST 38
+#define EN7523_PCIE1_RST 39
+#define EN7523_PCIE_HB_RST 40
+#define EN7523_XPON_MAC_RST 41
+
+#endif /* __DT_BINDINGS_RESET_CONTROLLER_AIROHA_EN7523_H_ */
diff --git a/include/dt-bindings/reset/airoha,en7581-reset.h b/include/dt-bindings/reset/airoha,en7581-reset.h
new file mode 100644
index 000000000000..6544a1790b83
--- /dev/null
+++ b/include/dt-bindings/reset/airoha,en7581-reset.h
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#ifndef __DT_BINDINGS_RESET_CONTROLLER_AIROHA_EN7581_H_
+#define __DT_BINDINGS_RESET_CONTROLLER_AIROHA_EN7581_H_
+
+/* RST_CTRL2 */
+#define EN7581_XPON_PHY_RST 0
+#define EN7581_CPU_TIMER2_RST 1
+#define EN7581_HSUART_RST 2
+#define EN7581_UART4_RST 3
+#define EN7581_UART5_RST 4
+#define EN7581_I2C2_RST 5
+#define EN7581_XSI_MAC_RST 6
+#define EN7581_XSI_PHY_RST 7
+#define EN7581_NPU_RST 8
+#define EN7581_I2S_RST 9
+#define EN7581_TRNG_RST 10
+#define EN7581_TRNG_MSTART_RST 11
+#define EN7581_DUAL_HSI0_RST 12
+#define EN7581_DUAL_HSI1_RST 13
+#define EN7581_HSI_RST 14
+#define EN7581_DUAL_HSI0_MAC_RST 15
+#define EN7581_DUAL_HSI1_MAC_RST 16
+#define EN7581_HSI_MAC_RST 17
+#define EN7581_WDMA_RST 18
+#define EN7581_WOE0_RST 19
+#define EN7581_WOE1_RST 20
+#define EN7581_HSDMA_RST 21
+#define EN7581_TDMA_RST 22
+#define EN7581_EMMC_RST 23
+#define EN7581_SOE_RST 24
+#define EN7581_PCIE2_RST 25
+#define EN7581_XFP_MAC_RST 26
+#define EN7581_USB_HOST_P1_RST 27
+#define EN7581_USB_HOST_P1_U3_PHY_RST 28
+/* RST_CTRL1 */
+#define EN7581_PCM1_ZSI_ISI_RST 29
+#define EN7581_FE_PDMA_RST 30
+#define EN7581_FE_QDMA_RST 31
+#define EN7581_PCM_SPIWP_RST 32
+#define EN7581_CRYPTO_RST 33
+#define EN7581_TIMER_RST 34
+#define EN7581_PCM1_RST 35
+#define EN7581_UART_RST 36
+#define EN7581_GPIO_RST 37
+#define EN7581_GDMA_RST 38
+#define EN7581_I2C_MASTER_RST 39
+#define EN7581_PCM2_ZSI_ISI_RST 40
+#define EN7581_SFC_RST 41
+#define EN7581_UART2_RST 42
+#define EN7581_GDMP_RST 43
+#define EN7581_FE_RST 44
+#define EN7581_USB_HOST_P0_RST 45
+#define EN7581_GSW_RST 46
+#define EN7581_SFC2_PCM_RST 47
+#define EN7581_PCIE0_RST 48
+#define EN7581_PCIE1_RST 49
+#define EN7581_CPU_TIMER_RST 50
+#define EN7581_PCIE_HB_RST 51
+#define EN7581_XPON_MAC_RST 52
+
+#endif /* __DT_BINDINGS_RESET_CONTROLLER_AIROHA_EN7581_H_ */
diff --git a/include/dt-bindings/reset/altr,rst-mgr-s10.h b/include/dt-bindings/reset/altr,rst-mgr-s10.h
index 70ea3a09dbe1..04c4d0c6fd34 100644
--- a/include/dt-bindings/reset/altr,rst-mgr-s10.h
+++ b/include/dt-bindings/reset/altr,rst-mgr-s10.h
@@ -63,12 +63,15 @@
#define I2C2_RESET 74
#define I2C3_RESET 75
#define I2C4_RESET 76
-/* 77-79 is empty */
+#define I3C0_RESET 77
+#define I3C1_RESET 78
+/* 79 is empty */
#define UART0_RESET 80
#define UART1_RESET 81
/* 82-87 is empty */
#define GPIO0_RESET 88
#define GPIO1_RESET 89
+#define WATCHDOG4_RESET 90
/* BRGMODRST */
#define SOC2FPGA_RESET 96
diff --git a/include/dt-bindings/reset/amlogic,c3-reset.h b/include/dt-bindings/reset/amlogic,c3-reset.h
new file mode 100644
index 000000000000..d9127863f603
--- /dev/null
+++ b/include/dt-bindings/reset/amlogic,c3-reset.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2023 Amlogic, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_AMLOGIC_C3_RESET_H
+#define _DT_BINDINGS_AMLOGIC_C3_RESET_H
+
+/* RESET0 */
+/* 0-3 */
+#define RESET_USBCTRL 4
+/* 5-7 */
+#define RESET_USBPHY20 8
+/* 9 */
+#define RESET_USB2DRD 10
+#define RESET_MIPI_DSI_HOST 11
+#define RESET_MIPI_DSI_PHY 12
+/* 13-20 */
+#define RESET_GE2D 21
+#define RESET_DWAP 22
+/* 23-31 */
+
+/* RESET1 */
+#define RESET_AUDIO 32
+/* 33-34 */
+#define RESET_DDRAPB 35
+#define RESET_DDR 36
+#define RESET_DOS_CAPB3 37
+#define RESET_DOS 38
+/* 39-46 */
+#define RESET_NNA 47
+#define RESET_ETHERNET 48
+#define RESET_ISP 49
+#define RESET_VC9000E_APB 50
+#define RESET_VC9000E_A 51
+/* 52 */
+#define RESET_VC9000E_CORE 53
+/* 54-63 */
+
+/* RESET2 */
+#define RESET_ABUS_ARB 64
+#define RESET_IRCTRL 65
+/* 66 */
+#define RESET_TEMP_PII 67
+/* 68-72 */
+#define RESET_SPICC_0 73
+#define RESET_SPICC_1 74
+#define RESET_RSA 75
+
+/* 76-79 */
+#define RESET_MSR_CLK 80
+#define RESET_SPIFC 81
+#define RESET_SAR_ADC 82
+/* 83-87 */
+#define RESET_ACODEC 88
+/* 89-90 */
+#define RESET_WATCHDOG 91
+/* 92-95 */
+
+/* RESET3 */
+#define RESET_ISP_NIC_GPV 96
+#define RESET_ISP_NIC_MAIN 97
+#define RESET_ISP_NIC_VCLK 98
+#define RESET_ISP_NIC_VOUT 99
+#define RESET_ISP_NIC_ALL 100
+#define RESET_VOUT 101
+#define RESET_VOUT_VENC 102
+/* 103 */
+#define RESET_CVE_NIC_GPV 104
+#define RESET_CVE_NIC_MAIN 105
+#define RESET_CVE_NIC_GE2D 106
+#define RESET_CVE_NIC_DW 106
+#define RESET_CVE_NIC_CVE 108
+#define RESET_CVE_NIC_ALL 109
+#define RESET_CVE 110
+/* 112-127 */
+
+/* RESET4 */
+#define RESET_RTC 128
+#define RESET_PWM_AB 129
+#define RESET_PWM_CD 130
+#define RESET_PWM_EF 131
+#define RESET_PWM_GH 132
+#define RESET_PWM_IJ 133
+#define RESET_PWM_KL 134
+#define RESET_PWM_MN 135
+/* 136-137 */
+#define RESET_UART_A 138
+#define RESET_UART_B 139
+#define RESET_UART_C 140
+#define RESET_UART_D 141
+#define RESET_UART_E 142
+#define RESET_UART_F 143
+#define RESET_I2C_S_A 144
+#define RESET_I2C_M_A 145
+#define RESET_I2C_M_B 146
+#define RESET_I2C_M_C 147
+#define RESET_I2C_M_D 148
+/* 149-151 */
+#define RESET_SD_EMMC_A 152
+#define RESET_SD_EMMC_B 153
+#define RESET_SD_EMMC_C 154
+
+/* RESET5 */
+/* 160-172 */
+#define RESET_BRG_NIC_NNA 173
+#define RESET_BRG_MUX_NIC_MAIN 174
+#define RESET_BRG_AO_NIC_ALL 175
+/* 176-183 */
+#define RESET_BRG_NIC_VAPB 184
+#define RESET_BRG_NIC_SDIO_B 185
+#define RESET_BRG_NIC_SDIO_A 186
+#define RESET_BRG_NIC_EMMC 187
+#define RESET_BRG_NIC_DSU 188
+#define RESET_BRG_NIC_SYSCLK 189
+#define RESET_BRG_NIC_MAIN 190
+#define RESET_BRG_NIC_ALL 191
+
+#endif
diff --git a/include/dt-bindings/reset/amlogic,meson-a1-audio-reset.h b/include/dt-bindings/reset/amlogic,meson-a1-audio-reset.h
new file mode 100644
index 000000000000..7693552f1507
--- /dev/null
+++ b/include/dt-bindings/reset/amlogic,meson-a1-audio-reset.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2024, SaluteDevices. All Rights Reserved.
+ *
+ * Author: Jan Dakinevich <jan.dakinevich@salutedevices.com>
+ */
+
+#ifndef _DT_BINDINGS_AMLOGIC_MESON_A1_AUDIO_RESET_H
+#define _DT_BINDINGS_AMLOGIC_MESON_A1_AUDIO_RESET_H
+
+#define AUD_RESET_DDRARB 0
+#define AUD_RESET_TDMIN_A 1
+#define AUD_RESET_TDMIN_B 2
+#define AUD_RESET_TDMIN_LB 3
+#define AUD_RESET_LOOPBACK 4
+#define AUD_RESET_TDMOUT_A 5
+#define AUD_RESET_TDMOUT_B 6
+#define AUD_RESET_FRDDR_A 7
+#define AUD_RESET_FRDDR_B 8
+#define AUD_RESET_TODDR_A 9
+#define AUD_RESET_TODDR_B 10
+#define AUD_RESET_SPDIFIN 11
+#define AUD_RESET_RESAMPLE 12
+#define AUD_RESET_EQDRC 13
+#define AUD_RESET_LOCKER 14
+#define AUD_RESET_TOACODEC 30
+#define AUD_RESET_CLKTREE 31
+
+#define AUD_VAD_RESET_DDRARB 0
+#define AUD_VAD_RESET_PDM 1
+#define AUD_VAD_RESET_TDMIN_VAD 2
+#define AUD_VAD_RESET_TODDR_VAD 3
+#define AUD_VAD_RESET_TOVAD 4
+#define AUD_VAD_RESET_CLKTREE 5
+
+#endif /* _DT_BINDINGS_AMLOGIC_MESON_A1_AUDIO_RESET_H */
diff --git a/include/dt-bindings/reset/amlogic,meson-g12a-reset.h b/include/dt-bindings/reset/amlogic,meson-g12a-reset.h
index 6d487c5eba2c..45f6b8a951d0 100644
--- a/include/dt-bindings/reset/amlogic,meson-g12a-reset.h
+++ b/include/dt-bindings/reset/amlogic,meson-g12a-reset.h
@@ -69,7 +69,9 @@
#define RESET_PARSER_FETCH 72
#define RESET_CTL 73
#define RESET_PARSER_TOP 74
-/* 75-77 */
+/* 75 */
+#define RESET_NNA 76
+/* 77 */
#define RESET_DVALIN 78
#define RESET_HDMITX 79
/* 80-95 */
diff --git a/include/dt-bindings/reset/amlogic,meson-s4-reset.h b/include/dt-bindings/reset/amlogic,meson-s4-reset.h
new file mode 100644
index 000000000000..eab428eb8ad6
--- /dev/null
+++ b/include/dt-bindings/reset/amlogic,meson-s4-reset.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2021 Amlogic, Inc. All rights reserved.
+ * Author: Zelong Dong <zelong.dong@amlogic.com>
+ *
+ */
+
+#ifndef _DT_BINDINGS_AMLOGIC_MESON_S4_RESET_H
+#define _DT_BINDINGS_AMLOGIC_MESON_S4_RESET_H
+
+/* RESET0 */
+#define RESET_USB_DDR0 0
+#define RESET_USB_DDR1 1
+#define RESET_USB_DDR2 2
+#define RESET_USB_DDR3 3
+#define RESET_USBCTRL 4
+/* 5-7 */
+#define RESET_USBPHY20 8
+#define RESET_USBPHY21 9
+/* 10-15 */
+#define RESET_HDMITX_APB 16
+#define RESET_BRG_VCBUS_DEC 17
+#define RESET_VCBUS 18
+#define RESET_VID_PLL_DIV 19
+#define RESET_VDI6 20
+#define RESET_GE2D 21
+#define RESET_HDMITXPHY 22
+#define RESET_VID_LOCK 23
+#define RESET_VENCL 24
+#define RESET_VDAC 25
+#define RESET_VENCP 26
+#define RESET_VENCI 27
+#define RESET_RDMA 28
+#define RESET_HDMI_TX 29
+#define RESET_VIU 30
+#define RESET_VENC 31
+
+/* RESET1 */
+#define RESET_AUDIO 32
+#define RESET_MALI_APB 33
+#define RESET_MALI 34
+#define RESET_DDR_APB 35
+#define RESET_DDR 36
+#define RESET_DOS_APB 37
+#define RESET_DOS 38
+/* 39-47 */
+#define RESET_ETH 48
+/* 49-51 */
+#define RESET_DEMOD 52
+/* 53-63 */
+
+/* RESET2 */
+#define RESET_ABUS_ARB 64
+#define RESET_IR_CTRL 65
+#define RESET_TEMPSENSOR_DDR 66
+#define RESET_TEMPSENSOR_PLL 67
+/* 68-71 */
+#define RESET_SMART_CARD 72
+#define RESET_SPICC0 73
+/* 74 */
+#define RESET_RSA 75
+/* 76-79 */
+#define RESET_MSR_CLK 80
+#define RESET_SPIFC 81
+#define RESET_SARADC 82
+/* 83-87 */
+#define RESET_ACODEC 88
+#define RESET_CEC 89
+#define RESET_AFIFO 90
+#define RESET_WATCHDOG 91
+/* 92-95 */
+
+/* RESET3 */
+/* 96-127 */
+
+/* RESET4 */
+/* 128-131 */
+#define RESET_PWM_AB 132
+#define RESET_PWM_CD 133
+#define RESET_PWM_EF 134
+#define RESET_PWM_GH 135
+#define RESET_PWM_IJ 136
+/* 137 */
+#define RESET_UART_A 138
+#define RESET_UART_B 139
+#define RESET_UART_C 140
+#define RESET_UART_D 141
+#define RESET_UART_E 142
+/* 143 */
+#define RESET_I2C_S_A 144
+#define RESET_I2C_M_A 145
+#define RESET_I2C_M_B 146
+#define RESET_I2C_M_C 147
+#define RESET_I2C_M_D 148
+#define RESET_I2C_M_E 149
+/* 150-151 */
+#define RESET_SD_EMMC_A 152
+#define RESET_SD_EMMC_B 153
+#define RESET_NAND_EMMC 154
+/* 155-159 */
+
+/* RESET5 */
+#define RESET_BRG_VDEC_PIPL0 160
+#define RESET_BRG_HEVCF_PIPL0 161
+/* 162 */
+#define RESET_BRG_HCODEC_PIPL0 163
+#define RESET_BRG_GE2D_PIPL0 164
+#define RESET_BRG_VPU_PIPL0 165
+#define RESET_BRG_CPU_PIPL0 166
+#define RESET_BRG_MALI_PIPL0 167
+/* 168 */
+#define RESET_BRG_MALI_PIPL1 169
+/* 170-171 */
+#define RESET_BRG_HEVCF_PIPL1 172
+#define RESET_BRG_HEVCB_PIPL1 173
+/* 174-183 */
+#define RESET_RAMA 184
+/* 185-186 */
+#define RESET_BRG_NIC_VAPB 187
+#define RESET_BRG_NIC_DSU 188
+#define RESET_BRG_NIC_SYSCLK 189
+#define RESET_BRG_NIC_MAIN 190
+#define RESET_BRG_NIC_ALL 191
+
+#endif
diff --git a/include/dt-bindings/reset/aspeed,ast2700-scu.h b/include/dt-bindings/reset/aspeed,ast2700-scu.h
new file mode 100644
index 000000000000..d53c719b7a66
--- /dev/null
+++ b/include/dt-bindings/reset/aspeed,ast2700-scu.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Device Tree binding constants for AST2700 reset controller.
+ *
+ * Copyright (c) 2024 Aspeed Technology Inc.
+ */
+
+#ifndef _MACH_ASPEED_AST2700_RESET_H_
+#define _MACH_ASPEED_AST2700_RESET_H_
+
+/* SOC0 */
+#define SCU0_RESET_SDRAM 0
+#define SCU0_RESET_DDRPHY 1
+#define SCU0_RESET_RSA 2
+#define SCU0_RESET_SHA3 3
+#define SCU0_RESET_HACE 4
+#define SCU0_RESET_SOC 5
+#define SCU0_RESET_VIDEO 6
+#define SCU0_RESET_2D 7
+#define SCU0_RESET_PCIS 8
+#define SCU0_RESET_RVAS0 9
+#define SCU0_RESET_RVAS1 10
+#define SCU0_RESET_SM3 11
+#define SCU0_RESET_SM4 12
+#define SCU0_RESET_CRT0 13
+#define SCU0_RESET_ECC 14
+#define SCU0_RESET_DP_PCI 15
+#define SCU0_RESET_UFS 16
+#define SCU0_RESET_EMMC 17
+#define SCU0_RESET_PCIE1RST 18
+#define SCU0_RESET_PCIE1RSTOE 19
+#define SCU0_RESET_PCIE0RST 20
+#define SCU0_RESET_PCIE0RSTOE 21
+#define SCU0_RESET_JTAG 22
+#define SCU0_RESET_MCTP0 23
+#define SCU0_RESET_MCTP1 24
+#define SCU0_RESET_XDMA0 25
+#define SCU0_RESET_XDMA1 26
+#define SCU0_RESET_H2X1 27
+#define SCU0_RESET_DP 28
+#define SCU0_RESET_DP_MCU 29
+#define SCU0_RESET_SSP 30
+#define SCU0_RESET_H2X0 31
+#define SCU0_RESET_PORTA_VHUB 32
+#define SCU0_RESET_PORTA_PHY3 33
+#define SCU0_RESET_PORTA_XHCI 34
+#define SCU0_RESET_PORTB_VHUB 35
+#define SCU0_RESET_PORTB_PHY3 36
+#define SCU0_RESET_PORTB_XHCI 37
+#define SCU0_RESET_PORTA_VHUB_EHCI 38
+#define SCU0_RESET_PORTB_VHUB_EHCI 39
+#define SCU0_RESET_UHCI 40
+#define SCU0_RESET_TSP 41
+#define SCU0_RESET_E2M0 42
+#define SCU0_RESET_E2M1 43
+#define SCU0_RESET_VLINK 44
+
+/* SOC1 */
+#define SCU1_RESET_LPC0 0
+#define SCU1_RESET_LPC1 1
+#define SCU1_RESET_MII 2
+#define SCU1_RESET_PECI 3
+#define SCU1_RESET_PWM 4
+#define SCU1_RESET_MAC0 5
+#define SCU1_RESET_MAC1 6
+#define SCU1_RESET_MAC2 7
+#define SCU1_RESET_ADC 8
+#define SCU1_RESET_SD 9
+#define SCU1_RESET_ESPI0 10
+#define SCU1_RESET_ESPI1 11
+#define SCU1_RESET_JTAG1 12
+#define SCU1_RESET_SPI0 13
+#define SCU1_RESET_SPI1 14
+#define SCU1_RESET_SPI2 15
+#define SCU1_RESET_I3C0 16
+#define SCU1_RESET_I3C1 17
+#define SCU1_RESET_I3C2 18
+#define SCU1_RESET_I3C3 19
+#define SCU1_RESET_I3C4 20
+#define SCU1_RESET_I3C5 21
+#define SCU1_RESET_I3C6 22
+#define SCU1_RESET_I3C7 23
+#define SCU1_RESET_I3C8 24
+#define SCU1_RESET_I3C9 25
+#define SCU1_RESET_I3C10 26
+#define SCU1_RESET_I3C11 27
+#define SCU1_RESET_I3C12 28
+#define SCU1_RESET_I3C13 29
+#define SCU1_RESET_I3C14 30
+#define SCU1_RESET_I3C15 31
+#define SCU1_RESET_MCU0 32
+#define SCU1_RESET_MCU1 33
+#define SCU1_RESET_H2A_SPI1 34
+#define SCU1_RESET_H2A_SPI2 35
+#define SCU1_RESET_UART0 36
+#define SCU1_RESET_UART1 37
+#define SCU1_RESET_UART2 38
+#define SCU1_RESET_UART3 39
+#define SCU1_RESET_I2C_FILTER 40
+#define SCU1_RESET_CALIPTRA 41
+#define SCU1_RESET_XDMA 42
+#define SCU1_RESET_FSI 43
+#define SCU1_RESET_CAN 44
+#define SCU1_RESET_MCTP 45
+#define SCU1_RESET_I2C 46
+#define SCU1_RESET_UART6 47
+#define SCU1_RESET_UART7 48
+#define SCU1_RESET_UART8 49
+#define SCU1_RESET_UART9 50
+#define SCU1_RESET_LTPI0 51
+#define SCU1_RESET_VGAL 52
+#define SCU1_RESET_LTPI1 53
+#define SCU1_RESET_ACE 54
+#define SCU1_RESET_E2M 55
+#define SCU1_RESET_UHCI 56
+#define SCU1_RESET_PORTC_USB2UART 57
+#define SCU1_RESET_PORTC_VHUB_EHCI 58
+#define SCU1_RESET_PORTD_USB2UART 59
+#define SCU1_RESET_PORTD_VHUB_EHCI 60
+#define SCU1_RESET_H2X 61
+#define SCU1_RESET_I3CDMA 62
+#define SCU1_RESET_PCIE2RST 63
+
+#endif /* _MACH_ASPEED_AST2700_RESET_H_ */
diff --git a/include/dt-bindings/reset/bcm63268-reset.h b/include/dt-bindings/reset/bcm63268-reset.h
index 6a6403a4c2d5..d87a7882782a 100644
--- a/include/dt-bindings/reset/bcm63268-reset.h
+++ b/include/dt-bindings/reset/bcm63268-reset.h
@@ -23,4 +23,8 @@
#define BCM63268_RST_PCIE_HARD 17
#define BCM63268_RST_GPHY 18
+#define BCM63268_TRST_SW 29
+#define BCM63268_TRST_HW 30
+#define BCM63268_TRST_POR 31
+
#endif /* __DT_BINDINGS_RESET_BCM63268_H */
diff --git a/include/dt-bindings/reset/bt1-ccu.h b/include/dt-bindings/reset/bt1-ccu.h
index 3578e83026bc..c691efaa678f 100644
--- a/include/dt-bindings/reset/bt1-ccu.h
+++ b/include/dt-bindings/reset/bt1-ccu.h
@@ -21,5 +21,14 @@
#define CCU_SYS_SATA_REF_RST 0
#define CCU_SYS_APB_RST 1
+#define CCU_SYS_DDR_FULL_RST 2
+#define CCU_SYS_DDR_INIT_RST 3
+#define CCU_SYS_PCIE_PCS_PHY_RST 4
+#define CCU_SYS_PCIE_PIPE0_RST 5
+#define CCU_SYS_PCIE_CORE_RST 6
+#define CCU_SYS_PCIE_PWR_RST 7
+#define CCU_SYS_PCIE_STICKY_RST 8
+#define CCU_SYS_PCIE_NSTICKY_RST 9
+#define CCU_SYS_PCIE_HOT_RST 10
#endif /* __DT_BINDINGS_RESET_BT1_CCU_H */
diff --git a/include/dt-bindings/reset/canaan,k230-rst.h b/include/dt-bindings/reset/canaan,k230-rst.h
new file mode 100644
index 000000000000..e4f6612607fe
--- /dev/null
+++ b/include/dt-bindings/reset/canaan,k230-rst.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2023-2024 Canaan Bright Sight Co., Ltd
+ * Copyright (C) 2024-2025 Junhui Liu <junhui.liu@pigmoral.tech>
+ */
+#ifndef _DT_BINDINGS_CANAAN_K230_RST_H_
+#define _DT_BINDINGS_CANAAN_K230_RST_H_
+
+#define RST_CPU0 0
+#define RST_CPU1 1
+#define RST_CPU0_FLUSH 2
+#define RST_CPU1_FLUSH 3
+#define RST_AI 4
+#define RST_VPU 5
+#define RST_HISYS 6
+#define RST_HISYS_AHB 7
+#define RST_SDIO0 8
+#define RST_SDIO1 9
+#define RST_SDIO_AXI 10
+#define RST_USB0 11
+#define RST_USB1 12
+#define RST_USB0_AHB 13
+#define RST_USB1_AHB 14
+#define RST_SPI0 15
+#define RST_SPI1 16
+#define RST_SPI2 17
+#define RST_SEC 18
+#define RST_PDMA 19
+#define RST_SDMA 20
+#define RST_DECOMPRESS 21
+#define RST_SRAM 22
+#define RST_SHRM_AXIM 23
+#define RST_SHRM_AXIS 24
+#define RST_NONAI2D 25
+#define RST_MCTL 26
+#define RST_ISP 27
+#define RST_ISP_DW 28
+#define RST_DPU 29
+#define RST_DISP 30
+#define RST_GPU 31
+#define RST_AUDIO 32
+#define RST_TIMER0 33
+#define RST_TIMER1 34
+#define RST_TIMER2 35
+#define RST_TIMER3 36
+#define RST_TIMER4 37
+#define RST_TIMER5 38
+#define RST_TIMER_APB 39
+#define RST_HDI 40
+#define RST_WDT0 41
+#define RST_WDT1 42
+#define RST_WDT0_APB 43
+#define RST_WDT1_APB 44
+#define RST_TS_APB 45
+#define RST_MAILBOX 46
+#define RST_STC 47
+#define RST_PMU 48
+#define RST_LOSYS_APB 49
+#define RST_UART0 50
+#define RST_UART1 51
+#define RST_UART2 52
+#define RST_UART3 53
+#define RST_UART4 54
+#define RST_I2C0 55
+#define RST_I2C1 56
+#define RST_I2C2 57
+#define RST_I2C3 58
+#define RST_I2C4 59
+#define RST_JAMLINK0_APB 60
+#define RST_JAMLINK1_APB 61
+#define RST_JAMLINK2_APB 62
+#define RST_JAMLINK3_APB 63
+#define RST_CODEC_APB 64
+#define RST_GPIO_DB 65
+#define RST_GPIO_APB 66
+#define RST_ADC 67
+#define RST_ADC_APB 68
+#define RST_PWM_APB 69
+#define RST_SHRM_APB 70
+#define RST_CSI0 71
+#define RST_CSI1 72
+#define RST_CSI2 73
+#define RST_CSI_DPHY 74
+#define RST_ISP_AHB 75
+#define RST_M0 76
+#define RST_M1 77
+#define RST_M2 78
+#define RST_SPI2AXI 79
+
+#endif
diff --git a/include/dt-bindings/reset/delta,tn48m-reset.h b/include/dt-bindings/reset/delta,tn48m-reset.h
new file mode 100644
index 000000000000..d4e9ed12de3e
--- /dev/null
+++ b/include/dt-bindings/reset/delta,tn48m-reset.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Delta TN48M CPLD GPIO driver
+ *
+ * Copyright (C) 2021 Sartura Ltd.
+ *
+ * Author: Robert Marko <robert.marko@sartura.hr>
+ */
+
+#ifndef _DT_BINDINGS_RESET_TN48M_H
+#define _DT_BINDINGS_RESET_TN48M_H
+
+#define CPU_88F7040_RESET 0
+#define CPU_88F6820_RESET 1
+#define MAC_98DX3265_RESET 2
+#define PHY_88E1680_RESET 3
+#define PHY_88E1512_RESET 4
+#define POE_RESET 5
+
+#endif /* _DT_BINDINGS_RESET_TN48M_H */
diff --git a/include/dt-bindings/reset/eswin,eic7700-reset.h b/include/dt-bindings/reset/eswin,eic7700-reset.h
new file mode 100644
index 000000000000..a370c9f74307
--- /dev/null
+++ b/include/dt-bindings/reset/eswin,eic7700-reset.h
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright 2025, Beijing ESWIN Computing Technology Co., Ltd..
+ * All rights reserved.
+ *
+ * Device Tree binding constants for EIC7700 reset controller.
+ *
+ * Authors:
+ * Yifeng Huang <huangyifeng@eswincomputing.com>
+ * Xuyang Dong <dongxuyang@eswincomputing.com>
+ */
+
+#ifndef __DT_ESWIN_EIC7700_RESET_H__
+#define __DT_ESWIN_EIC7700_RESET_H__
+
+#define EIC7700_RESET_NOC_NSP 0
+#define EIC7700_RESET_NOC_CFG 1
+#define EIC7700_RESET_RNOC_NSP 2
+#define EIC7700_RESET_SNOC_TCU 3
+#define EIC7700_RESET_SNOC_U84 4
+#define EIC7700_RESET_SNOC_PCIE_XSR 5
+#define EIC7700_RESET_SNOC_PCIE_XMR 6
+#define EIC7700_RESET_SNOC_PCIE_PR 7
+#define EIC7700_RESET_SNOC_NPU 8
+#define EIC7700_RESET_SNOC_JTAG 9
+#define EIC7700_RESET_SNOC_DSP 10
+#define EIC7700_RESET_SNOC_DDRC1_P2 11
+#define EIC7700_RESET_SNOC_DDRC1_P1 12
+#define EIC7700_RESET_SNOC_DDRC0_P2 13
+#define EIC7700_RESET_SNOC_DDRC0_P1 14
+#define EIC7700_RESET_SNOC_D2D 15
+#define EIC7700_RESET_SNOC_AON 16
+#define EIC7700_RESET_GPU_AXI 17
+#define EIC7700_RESET_GPU_CFG 18
+#define EIC7700_RESET_GPU_GRAY 19
+#define EIC7700_RESET_GPU_JONES 20
+#define EIC7700_RESET_GPU_SPU 21
+#define EIC7700_RESET_DSP_AXI 22
+#define EIC7700_RESET_DSP_CFG 23
+#define EIC7700_RESET_DSP_DIV4 24
+#define EIC7700_RESET_DSP_DIV0 25
+#define EIC7700_RESET_DSP_DIV1 26
+#define EIC7700_RESET_DSP_DIV2 27
+#define EIC7700_RESET_DSP_DIV3 28
+#define EIC7700_RESET_D2D_AXI 29
+#define EIC7700_RESET_D2D_CFG 30
+#define EIC7700_RESET_D2D_PRST 31
+#define EIC7700_RESET_D2D_RAW_PCS 32
+#define EIC7700_RESET_D2D_RX 33
+#define EIC7700_RESET_D2D_TX 34
+#define EIC7700_RESET_D2D_CORE 35
+#define EIC7700_RESET_DDR1_ARST 36
+#define EIC7700_RESET_DDR1_TRACE 37
+#define EIC7700_RESET_DDR0_ARST 38
+#define EIC7700_RESET_DDR_CFG 39
+#define EIC7700_RESET_DDR0_TRACE 40
+#define EIC7700_RESET_DDR_CORE 41
+#define EIC7700_RESET_DDR_PRST 42
+#define EIC7700_RESET_TCU_AXI 43
+#define EIC7700_RESET_TCU_CFG 44
+#define EIC7700_RESET_TCU_TBU0 45
+#define EIC7700_RESET_TCU_TBU1 46
+#define EIC7700_RESET_TCU_TBU2 47
+#define EIC7700_RESET_TCU_TBU3 48
+#define EIC7700_RESET_TCU_TBU4 49
+#define EIC7700_RESET_TCU_TBU5 50
+#define EIC7700_RESET_TCU_TBU6 51
+#define EIC7700_RESET_TCU_TBU7 52
+#define EIC7700_RESET_TCU_TBU8 53
+#define EIC7700_RESET_TCU_TBU9 54
+#define EIC7700_RESET_TCU_TBU10 55
+#define EIC7700_RESET_TCU_TBU11 56
+#define EIC7700_RESET_TCU_TBU12 57
+#define EIC7700_RESET_TCU_TBU13 58
+#define EIC7700_RESET_TCU_TBU14 59
+#define EIC7700_RESET_TCU_TBU15 60
+#define EIC7700_RESET_TCU_TBU16 61
+#define EIC7700_RESET_NPU_AXI 62
+#define EIC7700_RESET_NPU_CFG 63
+#define EIC7700_RESET_NPU_CORE 64
+#define EIC7700_RESET_NPU_E31CORE 65
+#define EIC7700_RESET_NPU_E31BUS 66
+#define EIC7700_RESET_NPU_E31DBG 67
+#define EIC7700_RESET_NPU_LLC 68
+#define EIC7700_RESET_HSP_AXI 69
+#define EIC7700_RESET_HSP_CFG 70
+#define EIC7700_RESET_HSP_POR 71
+#define EIC7700_RESET_MSHC0_PHY 72
+#define EIC7700_RESET_MSHC1_PHY 73
+#define EIC7700_RESET_MSHC2_PHY 74
+#define EIC7700_RESET_MSHC0_TXRX 75
+#define EIC7700_RESET_MSHC1_TXRX 76
+#define EIC7700_RESET_MSHC2_TXRX 77
+#define EIC7700_RESET_SATA_ASIC0 78
+#define EIC7700_RESET_SATA_OOB 79
+#define EIC7700_RESET_SATA_PMALIVE 80
+#define EIC7700_RESET_SATA_RBC 81
+#define EIC7700_RESET_DMA0 82
+#define EIC7700_RESET_HSP_DMA 83
+#define EIC7700_RESET_USB0_VAUX 84
+#define EIC7700_RESET_USB1_VAUX 85
+#define EIC7700_RESET_HSP_SD1_PRST 86
+#define EIC7700_RESET_HSP_SD0_PRST 87
+#define EIC7700_RESET_HSP_EMMC_PRST 88
+#define EIC7700_RESET_HSP_DMA_PRST 89
+#define EIC7700_RESET_HSP_SD1_ARST 90
+#define EIC7700_RESET_HSP_SD0_ARST 91
+#define EIC7700_RESET_HSP_EMMC_ARST 92
+#define EIC7700_RESET_HSP_DMA_ARST 93
+#define EIC7700_RESET_HSP_ETH1_ARST 94
+#define EIC7700_RESET_HSP_ETH0_ARST 95
+#define EIC7700_RESET_SATA_ARST 96
+#define EIC7700_RESET_PCIE_CFG 97
+#define EIC7700_RESET_PCIE_POWEUP 98
+#define EIC7700_RESET_PCIE_PERST 99
+#define EIC7700_RESET_I2C0 100
+#define EIC7700_RESET_I2C1 101
+#define EIC7700_RESET_I2C2 102
+#define EIC7700_RESET_I2C3 103
+#define EIC7700_RESET_I2C4 104
+#define EIC7700_RESET_I2C5 105
+#define EIC7700_RESET_I2C6 106
+#define EIC7700_RESET_I2C7 107
+#define EIC7700_RESET_I2C8 108
+#define EIC7700_RESET_I2C9 109
+#define EIC7700_RESET_FAN 110
+#define EIC7700_RESET_PVT0 111
+#define EIC7700_RESET_PVT1 112
+#define EIC7700_RESET_MBOX0 113
+#define EIC7700_RESET_MBOX1 114
+#define EIC7700_RESET_MBOX2 115
+#define EIC7700_RESET_MBOX3 116
+#define EIC7700_RESET_MBOX4 117
+#define EIC7700_RESET_MBOX5 118
+#define EIC7700_RESET_MBOX6 119
+#define EIC7700_RESET_MBOX7 120
+#define EIC7700_RESET_MBOX8 121
+#define EIC7700_RESET_MBOX9 122
+#define EIC7700_RESET_MBOX10 123
+#define EIC7700_RESET_MBOX11 124
+#define EIC7700_RESET_MBOX12 125
+#define EIC7700_RESET_MBOX13 126
+#define EIC7700_RESET_MBOX14 127
+#define EIC7700_RESET_MBOX15 128
+#define EIC7700_RESET_UART0 129
+#define EIC7700_RESET_UART1 130
+#define EIC7700_RESET_UART2 131
+#define EIC7700_RESET_UART3 132
+#define EIC7700_RESET_UART4 133
+#define EIC7700_RESET_GPIO0 134
+#define EIC7700_RESET_GPIO1 135
+#define EIC7700_RESET_TIMER 136
+#define EIC7700_RESET_SSI0 137
+#define EIC7700_RESET_SSI1 138
+#define EIC7700_RESET_WDT0 139
+#define EIC7700_RESET_WDT1 140
+#define EIC7700_RESET_WDT2 141
+#define EIC7700_RESET_WDT3 142
+#define EIC7700_RESET_LSP_CFG 143
+#define EIC7700_RESET_U84_CORE0 144
+#define EIC7700_RESET_U84_CORE1 145
+#define EIC7700_RESET_U84_CORE2 146
+#define EIC7700_RESET_U84_CORE3 147
+#define EIC7700_RESET_U84_BUS 148
+#define EIC7700_RESET_U84_DBG 149
+#define EIC7700_RESET_U84_TRACECOM 150
+#define EIC7700_RESET_U84_TRACE0 151
+#define EIC7700_RESET_U84_TRACE1 152
+#define EIC7700_RESET_U84_TRACE2 153
+#define EIC7700_RESET_U84_TRACE3 154
+#define EIC7700_RESET_SCPU_CORE 155
+#define EIC7700_RESET_SCPU_BUS 156
+#define EIC7700_RESET_SCPU_DBG 157
+#define EIC7700_RESET_LPCPU_CORE 158
+#define EIC7700_RESET_LPCPU_BUS 159
+#define EIC7700_RESET_LPCPU_DBG 160
+#define EIC7700_RESET_VC_CFG 161
+#define EIC7700_RESET_VC_AXI 162
+#define EIC7700_RESET_VC_MONCFG 163
+#define EIC7700_RESET_JD_CFG 164
+#define EIC7700_RESET_JD_AXI 165
+#define EIC7700_RESET_JE_CFG 166
+#define EIC7700_RESET_JE_AXI 167
+#define EIC7700_RESET_VD_CFG 168
+#define EIC7700_RESET_VD_AXI 169
+#define EIC7700_RESET_VE_AXI 170
+#define EIC7700_RESET_VE_CFG 171
+#define EIC7700_RESET_G2D_CORE 172
+#define EIC7700_RESET_G2D_CFG 173
+#define EIC7700_RESET_G2D_AXI 174
+#define EIC7700_RESET_VI_AXI 175
+#define EIC7700_RESET_VI_CFG 176
+#define EIC7700_RESET_VI_DWE 177
+#define EIC7700_RESET_DVP 178
+#define EIC7700_RESET_ISP0 179
+#define EIC7700_RESET_ISP1 180
+#define EIC7700_RESET_SHUTTR0 181
+#define EIC7700_RESET_SHUTTR1 182
+#define EIC7700_RESET_SHUTTR2 183
+#define EIC7700_RESET_SHUTTR3 184
+#define EIC7700_RESET_SHUTTR4 185
+#define EIC7700_RESET_SHUTTR5 186
+#define EIC7700_RESET_VO_MIPI 187
+#define EIC7700_RESET_VO_PRST 188
+#define EIC7700_RESET_VO_HDMI_PRST 189
+#define EIC7700_RESET_VO_HDMI_PHY 190
+#define EIC7700_RESET_VO_HDMI 191
+#define EIC7700_RESET_VO_I2S 192
+#define EIC7700_RESET_VO_I2S_PRST 193
+#define EIC7700_RESET_VO_AXI 194
+#define EIC7700_RESET_VO_CFG 195
+#define EIC7700_RESET_VO_DC 196
+#define EIC7700_RESET_VO_DC_PRST 197
+#define EIC7700_RESET_BOOTSPI_HRST 198
+#define EIC7700_RESET_BOOTSPI 199
+#define EIC7700_RESET_ANO1 200
+#define EIC7700_RESET_ANO0 201
+#define EIC7700_RESET_DMA1_ARST 202
+#define EIC7700_RESET_DMA1_HRST 203
+#define EIC7700_RESET_FPRT 204
+#define EIC7700_RESET_HBLOCK 205
+#define EIC7700_RESET_SECSR 206
+#define EIC7700_RESET_OTP 207
+#define EIC7700_RESET_PKA 208
+#define EIC7700_RESET_SPACC 209
+#define EIC7700_RESET_TRNG 210
+#define EIC7700_RESET_TIMER0_0 211
+#define EIC7700_RESET_TIMER0_1 212
+#define EIC7700_RESET_TIMER0_2 213
+#define EIC7700_RESET_TIMER0_3 214
+#define EIC7700_RESET_TIMER0_4 215
+#define EIC7700_RESET_TIMER0_5 216
+#define EIC7700_RESET_TIMER0_6 217
+#define EIC7700_RESET_TIMER0_7 218
+#define EIC7700_RESET_TIMER0_N 219
+#define EIC7700_RESET_TIMER1_0 220
+#define EIC7700_RESET_TIMER1_1 221
+#define EIC7700_RESET_TIMER1_2 222
+#define EIC7700_RESET_TIMER1_3 223
+#define EIC7700_RESET_TIMER1_4 224
+#define EIC7700_RESET_TIMER1_5 225
+#define EIC7700_RESET_TIMER1_6 226
+#define EIC7700_RESET_TIMER1_7 227
+#define EIC7700_RESET_TIMER1_N 228
+#define EIC7700_RESET_TIMER2_0 229
+#define EIC7700_RESET_TIMER2_1 230
+#define EIC7700_RESET_TIMER2_2 231
+#define EIC7700_RESET_TIMER2_3 232
+#define EIC7700_RESET_TIMER2_4 233
+#define EIC7700_RESET_TIMER2_5 234
+#define EIC7700_RESET_TIMER2_6 235
+#define EIC7700_RESET_TIMER2_7 236
+#define EIC7700_RESET_TIMER2_N 237
+#define EIC7700_RESET_TIMER3_0 238
+#define EIC7700_RESET_TIMER3_1 239
+#define EIC7700_RESET_TIMER3_2 240
+#define EIC7700_RESET_TIMER3_3 241
+#define EIC7700_RESET_TIMER3_4 242
+#define EIC7700_RESET_TIMER3_5 243
+#define EIC7700_RESET_TIMER3_6 244
+#define EIC7700_RESET_TIMER3_7 245
+#define EIC7700_RESET_TIMER3_N 246
+#define EIC7700_RESET_RTC 247
+#define EIC7700_RESET_MNOC_SNOC_NSP 248
+#define EIC7700_RESET_MNOC_VC 249
+#define EIC7700_RESET_MNOC_CFG 250
+#define EIC7700_RESET_MNOC_HSP 251
+#define EIC7700_RESET_MNOC_GPU 252
+#define EIC7700_RESET_MNOC_DDRC1_P3 253
+#define EIC7700_RESET_MNOC_DDRC0_P3 254
+#define EIC7700_RESET_RNOC_VO 255
+#define EIC7700_RESET_RNOC_VI 256
+#define EIC7700_RESET_RNOC_SNOC_NSP 257
+#define EIC7700_RESET_RNOC_CFG 258
+#define EIC7700_RESET_MNOC_DDRC1_P4 259
+#define EIC7700_RESET_MNOC_DDRC0_P4 260
+#define EIC7700_RESET_CNOC_VO_CFG 261
+#define EIC7700_RESET_CNOC_VI_CFG 262
+#define EIC7700_RESET_CNOC_VC_CFG 263
+#define EIC7700_RESET_CNOC_TCU_CFG 264
+#define EIC7700_RESET_CNOC_PCIE_CFG 265
+#define EIC7700_RESET_CNOC_NPU_CFG 266
+#define EIC7700_RESET_CNOC_LSP_CFG 267
+#define EIC7700_RESET_CNOC_HSP_CFG 268
+#define EIC7700_RESET_CNOC_GPU_CFG 269
+#define EIC7700_RESET_CNOC_DSPT_CFG 270
+#define EIC7700_RESET_CNOC_DDRT1_CFG 271
+#define EIC7700_RESET_CNOC_DDRT0_CFG 272
+#define EIC7700_RESET_CNOC_D2D_CFG 273
+#define EIC7700_RESET_CNOC_CFG 274
+#define EIC7700_RESET_CNOC_CLMM_CFG 275
+#define EIC7700_RESET_CNOC_AON_CFG 276
+#define EIC7700_RESET_LNOC_CFG 277
+#define EIC7700_RESET_LNOC_NPU_LLC 278
+#define EIC7700_RESET_LNOC_DDRC1_P0 279
+#define EIC7700_RESET_LNOC_DDRC0_P0 280
+
+#endif /* __DT_ESWIN_EIC7700_RESET_H__ */
diff --git a/include/dt-bindings/reset/fsl,imx8ulp-sim-lpav.h b/include/dt-bindings/reset/fsl,imx8ulp-sim-lpav.h
new file mode 100644
index 000000000000..adf95bb26d21
--- /dev/null
+++ b/include/dt-bindings/reset/fsl,imx8ulp-sim-lpav.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright 2025 NXP
+ */
+
+#ifndef DT_BINDING_RESET_IMX8ULP_SIM_LPAV_H
+#define DT_BINDING_RESET_IMX8ULP_SIM_LPAV_H
+
+#define IMX8ULP_SIM_LPAV_HIFI4_DSP_DBG_RST 0
+#define IMX8ULP_SIM_LPAV_HIFI4_DSP_RST 1
+#define IMX8ULP_SIM_LPAV_HIFI4_DSP_STALL 2
+#define IMX8ULP_SIM_LPAV_DSI_RST_BYTE_N 3
+#define IMX8ULP_SIM_LPAV_DSI_RST_ESC_N 4
+#define IMX8ULP_SIM_LPAV_DSI_RST_DPI_N 5
+
+#endif /* DT_BINDING_RESET_IMX8ULP_SIM_LPAV_H */
diff --git a/include/dt-bindings/reset/imx8mp-reset-audiomix.h b/include/dt-bindings/reset/imx8mp-reset-audiomix.h
new file mode 100644
index 000000000000..746c1337ed99
--- /dev/null
+++ b/include/dt-bindings/reset/imx8mp-reset-audiomix.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/*
+ * Copyright 2025 NXP
+ */
+
+#ifndef DT_BINDING_RESET_IMX8MP_AUDIOMIX_H
+#define DT_BINDING_RESET_IMX8MP_AUDIOMIX_H
+
+#define IMX8MP_AUDIOMIX_EARC_RESET 0
+#define IMX8MP_AUDIOMIX_EARC_PHY_RESET 1
+#define IMX8MP_AUDIOMIX_DSP_RUNSTALL 2
+
+#endif /* DT_BINDING_RESET_IMX8MP_AUDIOMIX_H */
diff --git a/include/dt-bindings/reset/imx8ulp-pcc-reset.h b/include/dt-bindings/reset/imx8ulp-pcc-reset.h
new file mode 100644
index 000000000000..e99a4735c3c4
--- /dev/null
+++ b/include/dt-bindings/reset/imx8ulp-pcc-reset.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2021 NXP
+ */
+
+#ifndef DT_BINDING_PCC_RESET_IMX8ULP_H
+#define DT_BINDING_PCC_RESET_IMX8ULP_H
+
+/* PCC3 */
+#define PCC3_WDOG3_SWRST 0
+#define PCC3_WDOG4_SWRST 1
+#define PCC3_LPIT1_SWRST 2
+#define PCC3_TPM4_SWRST 3
+#define PCC3_TPM5_SWRST 4
+#define PCC3_FLEXIO1_SWRST 5
+#define PCC3_I3C2_SWRST 6
+#define PCC3_LPI2C4_SWRST 7
+#define PCC3_LPI2C5_SWRST 8
+#define PCC3_LPUART4_SWRST 9
+#define PCC3_LPUART5_SWRST 10
+#define PCC3_LPSPI4_SWRST 11
+#define PCC3_LPSPI5_SWRST 12
+
+/* PCC4 */
+#define PCC4_FLEXSPI2_SWRST 0
+#define PCC4_TPM6_SWRST 1
+#define PCC4_TPM7_SWRST 2
+#define PCC4_LPI2C6_SWRST 3
+#define PCC4_LPI2C7_SWRST 4
+#define PCC4_LPUART6_SWRST 5
+#define PCC4_LPUART7_SWRST 6
+#define PCC4_SAI4_SWRST 7
+#define PCC4_SAI5_SWRST 8
+#define PCC4_USDHC0_SWRST 9
+#define PCC4_USDHC1_SWRST 10
+#define PCC4_USDHC2_SWRST 11
+#define PCC4_USB0_SWRST 12
+#define PCC4_USB0_PHY_SWRST 13
+#define PCC4_USB1_SWRST 14
+#define PCC4_USB1_PHY_SWRST 15
+#define PCC4_ENET_SWRST 16
+
+/* PCC5 */
+#define PCC5_TPM8_SWRST 0
+#define PCC5_SAI6_SWRST 1
+#define PCC5_SAI7_SWRST 2
+#define PCC5_SPDIF_SWRST 3
+#define PCC5_ISI_SWRST 4
+#define PCC5_CSI_REGS_SWRST 5
+#define PCC5_CSI_SWRST 6
+#define PCC5_DSI_SWRST 7
+#define PCC5_WDOG5_SWRST 8
+#define PCC5_EPDC_SWRST 9
+#define PCC5_PXP_SWRST 10
+#define PCC5_GPU2D_SWRST 11
+#define PCC5_GPU3D_SWRST 12
+#define PCC5_DC_NANO_SWRST 13
+
+#endif /*DT_BINDING_RESET_IMX8ULP_H */
diff --git a/include/dt-bindings/reset/mediatek,mt6735-infracfg.h b/include/dt-bindings/reset/mediatek,mt6735-infracfg.h
new file mode 100644
index 000000000000..9df969090377
--- /dev/null
+++ b/include/dt-bindings/reset/mediatek,mt6735-infracfg.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_RESET_MT6735_INFRACFG_H
+#define _DT_BINDINGS_RESET_MT6735_INFRACFG_H
+
+#define MT6735_INFRA_RST0_EMI_REG 0
+#define MT6735_INFRA_RST0_DRAMC0_AO 1
+#define MT6735_INFRA_RST0_AP_CIRQ_EINT 2
+#define MT6735_INFRA_RST0_APXGPT 3
+#define MT6735_INFRA_RST0_SCPSYS 4
+#define MT6735_INFRA_RST0_KP 5
+#define MT6735_INFRA_RST0_PMIC_WRAP 6
+#define MT6735_INFRA_RST0_CLDMA_AO_TOP 7
+#define MT6735_INFRA_RST0_USBSIF_TOP 8
+#define MT6735_INFRA_RST0_EMI 9
+#define MT6735_INFRA_RST0_CCIF 10
+#define MT6735_INFRA_RST0_DRAMC0 11
+#define MT6735_INFRA_RST0_EMI_AO_REG 12
+#define MT6735_INFRA_RST0_CCIF_AO 13
+#define MT6735_INFRA_RST0_TRNG 14
+#define MT6735_INFRA_RST0_SYS_CIRQ 15
+#define MT6735_INFRA_RST0_GCE 16
+#define MT6735_INFRA_RST0_M4U 17
+#define MT6735_INFRA_RST0_CCIF1 18
+#define MT6735_INFRA_RST0_CLDMA_TOP_PD 19
+
+#endif
diff --git a/include/dt-bindings/reset/mediatek,mt6735-mfgcfg.h b/include/dt-bindings/reset/mediatek,mt6735-mfgcfg.h
new file mode 100644
index 000000000000..c489242b226e
--- /dev/null
+++ b/include/dt-bindings/reset/mediatek,mt6735-mfgcfg.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_RESET_MT6735_MFGCFG_H
+#define _DT_BINDINGS_RESET_MT6735_MFGCFG_H
+
+#define MT6735_MFG_RST0_AXI 0
+#define MT6735_MFG_RST0_G3D 1
+
+#endif /* _DT_BINDINGS_RESET_MT6735_MFGCFG_H */
diff --git a/include/dt-bindings/reset/mediatek,mt6735-pericfg.h b/include/dt-bindings/reset/mediatek,mt6735-pericfg.h
new file mode 100644
index 000000000000..a62bb192835a
--- /dev/null
+++ b/include/dt-bindings/reset/mediatek,mt6735-pericfg.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_RESET_MT6735_PERICFG_H
+#define _DT_BINDINGS_RESET_MT6735_PERICFG_H
+
+#define MT6735_PERI_RST0_UART0 0
+#define MT6735_PERI_RST0_UART1 1
+#define MT6735_PERI_RST0_UART2 2
+#define MT6735_PERI_RST0_UART3 3
+#define MT6735_PERI_RST0_UART4 4
+#define MT6735_PERI_RST0_BTIF 5
+#define MT6735_PERI_RST0_DISP_PWM_PERI 6
+#define MT6735_PERI_RST0_PWM 7
+#define MT6735_PERI_RST0_AUXADC 8
+#define MT6735_PERI_RST0_DMA 9
+#define MT6735_PERI_RST0_IRDA 10
+#define MT6735_PERI_RST0_IRTX 11
+#define MT6735_PERI_RST0_THERM 12
+#define MT6735_PERI_RST0_MSDC2 13
+#define MT6735_PERI_RST0_MSDC3 14
+#define MT6735_PERI_RST0_MSDC0 15
+#define MT6735_PERI_RST0_MSDC1 16
+#define MT6735_PERI_RST0_I2C0 17
+#define MT6735_PERI_RST0_I2C1 18
+#define MT6735_PERI_RST0_I2C2 19
+#define MT6735_PERI_RST0_I2C3 20
+#define MT6735_PERI_RST0_USB 21
+
+#define MT6735_PERI_RST1_SPI0 22
+
+#endif
diff --git a/include/dt-bindings/reset/mediatek,mt6735-vdecsys.h b/include/dt-bindings/reset/mediatek,mt6735-vdecsys.h
new file mode 100644
index 000000000000..b6ae5d249192
--- /dev/null
+++ b/include/dt-bindings/reset/mediatek,mt6735-vdecsys.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_RESET_MT6735_VDECSYS_H
+#define _DT_BINDINGS_RESET_MT6735_VDECSYS_H
+
+#define MT6735_VDEC_RST0_VDEC 0
+#define MT6735_VDEC_RST1_SMI_LARB1 1
+
+#endif /* _DT_BINDINGS_RESET_MT6735_VDECSYS_H */
diff --git a/include/dt-bindings/reset/mediatek,mt6735-wdt.h b/include/dt-bindings/reset/mediatek,mt6735-wdt.h
new file mode 100644
index 000000000000..c6056e676d46
--- /dev/null
+++ b/include/dt-bindings/reset/mediatek,mt6735-wdt.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_RESET_MEDIATEK_MT6735_WDT_H_
+#define _DT_BINDINGS_RESET_MEDIATEK_MT6735_WDT_H_
+
+#define MT6735_TOPRGU_MM_RST 1
+#define MT6735_TOPRGU_MFG_RST 2
+#define MT6735_TOPRGU_VENC_RST 3
+#define MT6735_TOPRGU_VDEC_RST 4
+#define MT6735_TOPRGU_IMG_RST 5
+#define MT6735_TOPRGU_MD_RST 7
+#define MT6735_TOPRGU_CONN_RST 9
+#define MT6735_TOPRGU_C2K_SW_RST 14
+#define MT6735_TOPRGU_C2K_RST 15
+#define MT6735_TOPRGU_RST_NUM 9
+
+#endif
diff --git a/include/dt-bindings/reset/mediatek,mt6795-resets.h b/include/dt-bindings/reset/mediatek,mt6795-resets.h
new file mode 100644
index 000000000000..5464a4a79a70
--- /dev/null
+++ b/include/dt-bindings/reset/mediatek,mt6795-resets.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT6795
+#define _DT_BINDINGS_RESET_CONTROLLER_MT6795
+
+/* INFRACFG resets */
+#define MT6795_INFRA_RST0_SCPSYS_RST 0
+#define MT6795_INFRA_RST0_PMIC_WRAP_RST 1
+#define MT6795_INFRA_RST1_MIPI_DSI_RST 2
+#define MT6795_INFRA_RST1_MIPI_CSI_RST 3
+#define MT6795_INFRA_RST1_MM_IOMMU_RST 4
+
+/* MMSYS resets */
+#define MT6795_MMSYS_SW0_RST_B_SMI_COMMON 0
+#define MT6795_MMSYS_SW0_RST_B_SMI_LARB 1
+#define MT6795_MMSYS_SW0_RST_B_CAM_MDP 2
+#define MT6795_MMSYS_SW0_RST_B_MDP_RDMA0 3
+#define MT6795_MMSYS_SW0_RST_B_MDP_RDMA1 4
+#define MT6795_MMSYS_SW0_RST_B_MDP_RSZ0 5
+#define MT6795_MMSYS_SW0_RST_B_MDP_RSZ1 6
+#define MT6795_MMSYS_SW0_RST_B_MDP_RSZ2 7
+#define MT6795_MMSYS_SW0_RST_B_MDP_TDSHP0 8
+#define MT6795_MMSYS_SW0_RST_B_MDP_TDSHP1 9
+#define MT6795_MMSYS_SW0_RST_B_MDP_WDMA 10
+#define MT6795_MMSYS_SW0_RST_B_MDP_WROT0 11
+#define MT6795_MMSYS_SW0_RST_B_MDP_WROT1 12
+#define MT6795_MMSYS_SW0_RST_B_MDP_CROP 13
+
+/* PERICFG resets */
+#define MT6795_PERI_NFI_SW_RST 0
+#define MT6795_PERI_THERM_SW_RST 1
+#define MT6795_PERI_MSDC1_SW_RST 2
+
+/* TOPRGU resets */
+#define MT6795_TOPRGU_INFRA_SW_RST 0
+#define MT6795_TOPRGU_MM_SW_RST 1
+#define MT6795_TOPRGU_MFG_SW_RST 2
+#define MT6795_TOPRGU_VENC_SW_RST 3
+#define MT6795_TOPRGU_VDEC_SW_RST 4
+#define MT6795_TOPRGU_IMG_SW_RST 5
+#define MT6795_TOPRGU_DDRPHY_SW_RST 6
+#define MT6795_TOPRGU_MD_SW_RST 7
+#define MT6795_TOPRGU_INFRA_AO_SW_RST 8
+#define MT6795_TOPRGU_MD_LITE_SW_RST 9
+#define MT6795_TOPRGU_APMIXED_SW_RST 10
+#define MT6795_TOPRGU_PWRAP_SPI_CTL_RST 11
+#define MT6795_TOPRGU_SW_RST_NUM 12
+
+#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT6795 */
diff --git a/include/dt-bindings/reset/mediatek,mt7988-resets.h b/include/dt-bindings/reset/mediatek,mt7988-resets.h
new file mode 100644
index 000000000000..0eb152889a89
--- /dev/null
+++ b/include/dt-bindings/reset/mediatek,mt7988-resets.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023 Daniel Golle <daniel@makrotopia.org>
+ * Author: Daniel Golle <daniel@makrotopia.org>
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT7988
+#define _DT_BINDINGS_RESET_CONTROLLER_MT7988
+
+/* ETHWARP resets */
+#define MT7988_ETHWARP_RST_SWITCH 0
+
+/* INFRA resets */
+#define MT7988_INFRA_RST0_PEXTP_MAC_SWRST 0
+#define MT7988_INFRA_RST1_THERM_CTRL_SWRST 1
+
+
+#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT7988 */
+
diff --git a/include/dt-bindings/reset/mediatek,mt8196-resets.h b/include/dt-bindings/reset/mediatek,mt8196-resets.h
new file mode 100644
index 000000000000..46ced0850d91
--- /dev/null
+++ b/include/dt-bindings/reset/mediatek,mt8196-resets.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2025 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8196
+#define _DT_BINDINGS_RESET_CONTROLLER_MT8196
+
+/* PEXTP0 resets */
+#define MT8196_PEXTP0_RST0_PCIE0_MAC 0
+#define MT8196_PEXTP0_RST0_PCIE0_PHY 1
+
+/* PEXTP1 resets */
+#define MT8196_PEXTP1_RST0_PCIE1_MAC 0
+#define MT8196_PEXTP1_RST0_PCIE1_PHY 1
+#define MT8196_PEXTP1_RST0_PCIE2_MAC 2
+#define MT8196_PEXTP1_RST0_PCIE2_PHY 3
+
+/* UFS resets */
+#define MT8196_UFSAO_RST0_UFS_MPHY 0
+#define MT8196_UFSAO_RST1_UFS_UNIPRO 1
+#define MT8196_UFSAO_RST1_UFS_CRYPTO 2
+#define MT8196_UFSAO_RST1_UFSHCI 3
+
+#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8196 */
diff --git a/include/dt-bindings/reset-controller/mt2712-resets.h b/include/dt-bindings/reset/mt2712-resets.h
index 9e7ee762f076..9e7ee762f076 100644
--- a/include/dt-bindings/reset-controller/mt2712-resets.h
+++ b/include/dt-bindings/reset/mt2712-resets.h
diff --git a/include/dt-bindings/reset/mt7621-reset.h b/include/dt-bindings/reset/mt7621-reset.h
new file mode 100644
index 000000000000..7572c6b41453
--- /dev/null
+++ b/include/dt-bindings/reset/mt7621-reset.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021 Sergio Paracuellos
+ * Author: Sergio Paracuellos <sergio.paracuellos@gmail.com>
+ */
+
+#ifndef DT_BINDING_MT7621_RESET_H
+#define DT_BINDING_MT7621_RESET_H
+
+#define MT7621_RST_SYS 0
+#define MT7621_RST_MCM 2
+#define MT7621_RST_HSDMA 5
+#define MT7621_RST_FE 6
+#define MT7621_RST_SPDIFTX 7
+#define MT7621_RST_TIMER 8
+#define MT7621_RST_INT 9
+#define MT7621_RST_MC 10
+#define MT7621_RST_PCM 11
+#define MT7621_RST_PIO 13
+#define MT7621_RST_GDMA 14
+#define MT7621_RST_NFI 15
+#define MT7621_RST_I2C 16
+#define MT7621_RST_I2S 17
+#define MT7621_RST_SPI 18
+#define MT7621_RST_UART1 19
+#define MT7621_RST_UART2 20
+#define MT7621_RST_UART3 21
+#define MT7621_RST_ETH 23
+#define MT7621_RST_PCIE0 24
+#define MT7621_RST_PCIE1 25
+#define MT7621_RST_PCIE2 26
+#define MT7621_RST_AUX_STCK 28
+#define MT7621_RST_CRYPTO 29
+#define MT7621_RST_SDXC 30
+#define MT7621_RST_PPE 31
+
+#endif /* DT_BINDING_MT7621_RESET_H */
diff --git a/include/dt-bindings/reset/mt7986-resets.h b/include/dt-bindings/reset/mt7986-resets.h
new file mode 100644
index 000000000000..af3d16c81192
--- /dev/null
+++ b/include/dt-bindings/reset/mt7986-resets.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Sam Shih <sam.shih@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT7986
+#define _DT_BINDINGS_RESET_CONTROLLER_MT7986
+
+/* INFRACFG resets */
+#define MT7986_INFRACFG_PEXTP_MAC_SW_RST 6
+#define MT7986_INFRACFG_SSUSB_SW_RST 7
+#define MT7986_INFRACFG_EIP97_SW_RST 8
+#define MT7986_INFRACFG_AUDIO_SW_RST 13
+#define MT7986_INFRACFG_CQ_DMA_SW_RST 14
+
+#define MT7986_INFRACFG_TRNG_SW_RST 17
+#define MT7986_INFRACFG_AP_DMA_SW_RST 32
+#define MT7986_INFRACFG_I2C_SW_RST 33
+#define MT7986_INFRACFG_NFI_SW_RST 34
+#define MT7986_INFRACFG_SPI0_SW_RST 35
+#define MT7986_INFRACFG_SPI1_SW_RST 36
+#define MT7986_INFRACFG_UART0_SW_RST 37
+#define MT7986_INFRACFG_UART1_SW_RST 38
+#define MT7986_INFRACFG_UART2_SW_RST 39
+#define MT7986_INFRACFG_AUXADC_SW_RST 43
+
+#define MT7986_INFRACFG_APXGPT_SW_RST 66
+#define MT7986_INFRACFG_PWM_SW_RST 68
+
+#define MT7986_INFRACFG_SW_RST_NUM 69
+
+/* TOPRGU resets */
+#define MT7986_TOPRGU_APMIXEDSYS_SW_RST 0
+#define MT7986_TOPRGU_SGMII0_SW_RST 1
+#define MT7986_TOPRGU_SGMII1_SW_RST 2
+#define MT7986_TOPRGU_INFRA_SW_RST 3
+#define MT7986_TOPRGU_U2PHY_SW_RST 5
+#define MT7986_TOPRGU_PCIE_SW_RST 6
+#define MT7986_TOPRGU_SSUSB_SW_RST 7
+#define MT7986_TOPRGU_ETHDMA_SW_RST 20
+#define MT7986_TOPRGU_CONSYS_SW_RST 23
+
+#define MT7986_TOPRGU_SW_RST_NUM 24
+
+/* ETHSYS Subsystem resets */
+#define MT7986_ETHSYS_FE_SW_RST 6
+#define MT7986_ETHSYS_PMTR_SW_RST 8
+#define MT7986_ETHSYS_GMAC_SW_RST 23
+#define MT7986_ETHSYS_PPE0_SW_RST 30
+#define MT7986_ETHSYS_PPE1_SW_RST 31
+
+#define MT7986_ETHSYS_SW_RST_NUM 32
+
+#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT7986 */
diff --git a/include/dt-bindings/reset/mt8173-resets.h b/include/dt-bindings/reset/mt8173-resets.h
index ba8636eda5ae..6a60c7cecc4c 100644
--- a/include/dt-bindings/reset/mt8173-resets.h
+++ b/include/dt-bindings/reset/mt8173-resets.h
@@ -27,6 +27,8 @@
#define MT8173_INFRA_GCE_FAXI_RST 40
#define MT8173_INFRA_MMIOMMURST 47
+/* MMSYS resets */
+#define MT8173_MMSYS_SW0_RST_B_DISP_DSI0 25
/* PERICFG resets */
#define MT8173_PERI_UART0_SW_RST 0
diff --git a/include/dt-bindings/reset-controller/mt8183-resets.h b/include/dt-bindings/reset/mt8183-resets.h
index a1bbd41e0d12..48c5d2de0a38 100644
--- a/include/dt-bindings/reset-controller/mt8183-resets.h
+++ b/include/dt-bindings/reset/mt8183-resets.h
@@ -80,6 +80,9 @@
#define MT8183_INFRACFG_SW_RST_NUM 128
+/* MMSYS resets */
+#define MT8183_MMSYS_SW0_RST_B_DISP_DSI0 25
+
#define MT8183_TOPRGU_MM_SW_RST 1
#define MT8183_TOPRGU_MFG_SW_RST 2
#define MT8183_TOPRGU_VENC_SW_RST 3
diff --git a/include/dt-bindings/reset/mt8186-resets.h b/include/dt-bindings/reset/mt8186-resets.h
new file mode 100644
index 000000000000..2e9029c22f38
--- /dev/null
+++ b/include/dt-bindings/reset/mt8186-resets.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Runyang Chen <runyang.chen@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8186
+#define _DT_BINDINGS_RESET_CONTROLLER_MT8186
+
+/* TOPRGU resets */
+#define MT8186_TOPRGU_INFRA_SW_RST 0
+#define MT8186_TOPRGU_MM_SW_RST 1
+#define MT8186_TOPRGU_MFG_SW_RST 2
+#define MT8186_TOPRGU_VENC_SW_RST 3
+#define MT8186_TOPRGU_VDEC_SW_RST 4
+#define MT8186_TOPRGU_IMG_SW_RST 5
+#define MT8186_TOPRGU_DDR_SW_RST 6
+#define MT8186_TOPRGU_INFRA_AO_SW_RST 8
+#define MT8186_TOPRGU_CONNSYS_SW_RST 9
+#define MT8186_TOPRGU_APMIXED_SW_RST 10
+#define MT8186_TOPRGU_PWRAP_SW_RST 11
+#define MT8186_TOPRGU_CONN_MCU_SW_RST 12
+#define MT8186_TOPRGU_IPNNA_SW_RST 13
+#define MT8186_TOPRGU_WPE_SW_RST 14
+#define MT8186_TOPRGU_ADSP_SW_RST 15
+#define MT8186_TOPRGU_AUDIO_SW_RST 17
+#define MT8186_TOPRGU_CAM_MAIN_SW_RST 18
+#define MT8186_TOPRGU_CAM_RAWA_SW_RST 19
+#define MT8186_TOPRGU_CAM_RAWB_SW_RST 20
+#define MT8186_TOPRGU_IPE_SW_RST 21
+#define MT8186_TOPRGU_IMG2_SW_RST 22
+#define MT8186_TOPRGU_SW_RST_NUM 23
+
+/* MMSYS resets */
+#define MT8186_MMSYS_SW0_RST_B_DISP_DSI0 19
+
+/* INFRA resets */
+#define MT8186_INFRA_THERMAL_CTRL_RST 0
+#define MT8186_INFRA_PTP_CTRL_RST 1
+
+#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8186 */
diff --git a/include/dt-bindings/reset/mt8188-resets.h b/include/dt-bindings/reset/mt8188-resets.h
new file mode 100644
index 000000000000..5a58c54e7d20
--- /dev/null
+++ b/include/dt-bindings/reset/mt8188-resets.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)*/
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Runyang Chen <runyang.chen@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8188
+#define _DT_BINDINGS_RESET_CONTROLLER_MT8188
+
+#define MT8188_TOPRGU_CONN_MCU_SW_RST 0
+#define MT8188_TOPRGU_INFRA_GRST_SW_RST 1
+#define MT8188_TOPRGU_IPU0_SW_RST 2
+#define MT8188_TOPRGU_IPU1_SW_RST 3
+#define MT8188_TOPRGU_IPU2_SW_RST 4
+#define MT8188_TOPRGU_AUD_ASRC_SW_RST 5
+#define MT8188_TOPRGU_INFRA_SW_RST 6
+#define MT8188_TOPRGU_MMSYS_SW_RST 7
+#define MT8188_TOPRGU_MFG_SW_RST 8
+#define MT8188_TOPRGU_VENC_SW_RST 9
+#define MT8188_TOPRGU_VDEC_SW_RST 10
+#define MT8188_TOPRGU_CAM_VCORE_SW_RST 11
+#define MT8188_TOPRGU_SCP_SW_RST 12
+#define MT8188_TOPRGU_APMIXEDSYS_SW_RST 13
+#define MT8188_TOPRGU_AUDIO_SW_RST 14
+#define MT8188_TOPRGU_CAMSYS_SW_RST 15
+#define MT8188_TOPRGU_MJC_SW_RST 16
+#define MT8188_TOPRGU_PERI_SW_RST 17
+#define MT8188_TOPRGU_PERI_AO_SW_RST 18
+#define MT8188_TOPRGU_PCIE_SW_RST 19
+#define MT8188_TOPRGU_ADSPSYS_SW_RST 21
+#define MT8188_TOPRGU_DPTX_SW_RST 22
+#define MT8188_TOPRGU_SPMI_MST_SW_RST 23
+
+#define MT8188_TOPRGU_SW_RST_NUM 24
+
+/* INFRA resets */
+#define MT8188_INFRA_RST1_THERMAL_MCU_RST 0
+#define MT8188_INFRA_RST1_THERMAL_CTRL_RST 1
+#define MT8188_INFRA_RST3_PTP_CTRL_RST 2
+
+#define MT8188_VDO0_RST_DISP_OVL0 0
+#define MT8188_VDO0_RST_FAKE_ENG0 1
+#define MT8188_VDO0_RST_DISP_CCORR0 2
+#define MT8188_VDO0_RST_DISP_MUTEX0 3
+#define MT8188_VDO0_RST_DISP_GAMMA0 4
+#define MT8188_VDO0_RST_DISP_DITHER0 5
+#define MT8188_VDO0_RST_DISP_WDMA0 6
+#define MT8188_VDO0_RST_DISP_RDMA0 7
+#define MT8188_VDO0_RST_DSI0 8
+#define MT8188_VDO0_RST_DSI1 9
+#define MT8188_VDO0_RST_DSC_WRAP0 10
+#define MT8188_VDO0_RST_VPP_MERGE0 11
+#define MT8188_VDO0_RST_DP_INTF0 12
+#define MT8188_VDO0_RST_DISP_AAL0 13
+#define MT8188_VDO0_RST_INLINEROT0 14
+#define MT8188_VDO0_RST_APB_BUS 15
+#define MT8188_VDO0_RST_DISP_COLOR0 16
+#define MT8188_VDO0_RST_MDP_WROT0 17
+#define MT8188_VDO0_RST_DISP_RSZ0 18
+
+#define MT8188_VDO1_RST_SMI_LARB2 0
+#define MT8188_VDO1_RST_SMI_LARB3 1
+#define MT8188_VDO1_RST_GALS 2
+#define MT8188_VDO1_RST_FAKE_ENG0 3
+#define MT8188_VDO1_RST_FAKE_ENG1 4
+#define MT8188_VDO1_RST_MDP_RDMA0 5
+#define MT8188_VDO1_RST_MDP_RDMA1 6
+#define MT8188_VDO1_RST_MDP_RDMA2 7
+#define MT8188_VDO1_RST_MDP_RDMA3 8
+#define MT8188_VDO1_RST_VPP_MERGE0 9
+#define MT8188_VDO1_RST_VPP_MERGE1 10
+#define MT8188_VDO1_RST_VPP_MERGE2 11
+#define MT8188_VDO1_RST_VPP_MERGE3 12
+#define MT8188_VDO1_RST_VPP_MERGE4 13
+#define MT8188_VDO1_RST_VPP2_TO_VDO1_DL_ASYNC 14
+#define MT8188_VDO1_RST_VPP3_TO_VDO1_DL_ASYNC 15
+#define MT8188_VDO1_RST_DISP_MUTEX 16
+#define MT8188_VDO1_RST_MDP_RDMA4 17
+#define MT8188_VDO1_RST_MDP_RDMA5 18
+#define MT8188_VDO1_RST_MDP_RDMA6 19
+#define MT8188_VDO1_RST_MDP_RDMA7 20
+#define MT8188_VDO1_RST_DP_INTF1_MMCK 21
+#define MT8188_VDO1_RST_DPI0_MM_CK 22
+#define MT8188_VDO1_RST_DPI1_MM_CK 23
+#define MT8188_VDO1_RST_MERGE0_DL_ASYNC 24
+#define MT8188_VDO1_RST_MERGE1_DL_ASYNC 25
+#define MT8188_VDO1_RST_MERGE2_DL_ASYNC 26
+#define MT8188_VDO1_RST_MERGE3_DL_ASYNC 27
+#define MT8188_VDO1_RST_MERGE4_DL_ASYNC 28
+#define MT8188_VDO1_RST_VDO0_DSC_TO_VDO1_DL_ASYNC 29
+#define MT8188_VDO1_RST_VDO0_MERGE_TO_VDO1_DL_ASYNC 30
+#define MT8188_VDO1_RST_PADDING0 31
+#define MT8188_VDO1_RST_PADDING1 32
+#define MT8188_VDO1_RST_PADDING2 33
+#define MT8188_VDO1_RST_PADDING3 34
+#define MT8188_VDO1_RST_PADDING4 35
+#define MT8188_VDO1_RST_PADDING5 36
+#define MT8188_VDO1_RST_PADDING6 37
+#define MT8188_VDO1_RST_PADDING7 38
+#define MT8188_VDO1_RST_DISP_RSZ0 39
+#define MT8188_VDO1_RST_DISP_RSZ1 40
+#define MT8188_VDO1_RST_DISP_RSZ2 41
+#define MT8188_VDO1_RST_DISP_RSZ3 42
+#define MT8188_VDO1_RST_HDR_VDO_FE0 43
+#define MT8188_VDO1_RST_HDR_GFX_FE0 44
+#define MT8188_VDO1_RST_HDR_VDO_BE 45
+#define MT8188_VDO1_RST_HDR_VDO_FE1 46
+#define MT8188_VDO1_RST_HDR_GFX_FE1 47
+#define MT8188_VDO1_RST_DISP_MIXER 48
+#define MT8188_VDO1_RST_HDR_VDO_FE0_DL_ASYNC 49
+#define MT8188_VDO1_RST_HDR_VDO_FE1_DL_ASYNC 50
+#define MT8188_VDO1_RST_HDR_GFX_FE0_DL_ASYNC 51
+#define MT8188_VDO1_RST_HDR_GFX_FE1_DL_ASYNC 52
+#define MT8188_VDO1_RST_HDR_VDO_BE_DL_ASYNC 53
+
+#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8188 */
diff --git a/include/dt-bindings/reset-controller/mt8192-resets.h b/include/dt-bindings/reset/mt8192-resets.h
index be9a7ca245b9..12e2087c90a3 100644
--- a/include/dt-bindings/reset-controller/mt8192-resets.h
+++ b/include/dt-bindings/reset/mt8192-resets.h
@@ -7,6 +7,7 @@
#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8192
#define _DT_BINDINGS_RESET_CONTROLLER_MT8192
+/* TOPRGU resets */
#define MT8192_TOPRGU_MM_SW_RST 1
#define MT8192_TOPRGU_MFG_SW_RST 2
#define MT8192_TOPRGU_VENC_SW_RST 3
@@ -27,4 +28,14 @@
#define MT8192_TOPRGU_SW_RST_NUM 23
+/* MMSYS resets */
+#define MT8192_MMSYS_SW0_RST_B_DISP_DSI0 15
+
+/* INFRA resets */
+#define MT8192_INFRA_RST0_THERM_CTRL_SWRST 0
+#define MT8192_INFRA_RST2_PEXTP_PHY_SWRST 1
+#define MT8192_INFRA_RST3_THERM_CTRL_PTP_SWRST 2
+#define MT8192_INFRA_RST4_PCIE_TOP_SWRST 3
+#define MT8192_INFRA_RST4_THERM_CTRL_MCU_SWRST 4
+
#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8192 */
diff --git a/include/dt-bindings/reset/mt8195-resets.h b/include/dt-bindings/reset/mt8195-resets.h
new file mode 100644
index 000000000000..e61660438d61
--- /dev/null
+++ b/include/dt-bindings/reset/mt8195-resets.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)*/
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Christine Zhu <christine.zhu@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8195
+#define _DT_BINDINGS_RESET_CONTROLLER_MT8195
+
+/* TOPRGU resets */
+#define MT8195_TOPRGU_CONN_MCU_SW_RST 0
+#define MT8195_TOPRGU_INFRA_GRST_SW_RST 1
+#define MT8195_TOPRGU_APU_SW_RST 2
+#define MT8195_TOPRGU_INFRA_AO_GRST_SW_RST 6
+#define MT8195_TOPRGU_MMSYS_SW_RST 7
+#define MT8195_TOPRGU_MFG_SW_RST 8
+#define MT8195_TOPRGU_VENC_SW_RST 9
+#define MT8195_TOPRGU_VDEC_SW_RST 10
+#define MT8195_TOPRGU_IMG_SW_RST 11
+#define MT8195_TOPRGU_APMIXEDSYS_SW_RST 13
+#define MT8195_TOPRGU_AUDIO_SW_RST 14
+#define MT8195_TOPRGU_CAMSYS_SW_RST 15
+#define MT8195_TOPRGU_EDPTX_SW_RST 16
+#define MT8195_TOPRGU_ADSPSYS_SW_RST 21
+#define MT8195_TOPRGU_DPTX_SW_RST 22
+#define MT8195_TOPRGU_SPMI_MST_SW_RST 23
+
+#define MT8195_TOPRGU_SW_RST_NUM 16
+
+/* INFRA resets */
+#define MT8195_INFRA_RST0_THERM_CTRL_SWRST 0
+#define MT8195_INFRA_RST3_THERM_CTRL_PTP_SWRST 1
+#define MT8195_INFRA_RST4_THERM_CTRL_MCU_SWRST 2
+#define MT8195_INFRA_RST2_PCIE_P0_SWRST 3
+#define MT8195_INFRA_RST2_PCIE_P1_SWRST 4
+#define MT8195_INFRA_RST2_USBSIF_P1_SWRST 5
+
+/* VDOSYS1 */
+#define MT8195_VDOSYS1_SW0_RST_B_SMI_LARB2 0
+#define MT8195_VDOSYS1_SW0_RST_B_SMI_LARB3 1
+#define MT8195_VDOSYS1_SW0_RST_B_GALS 2
+#define MT8195_VDOSYS1_SW0_RST_B_FAKE_ENG0 3
+#define MT8195_VDOSYS1_SW0_RST_B_FAKE_ENG1 4
+#define MT8195_VDOSYS1_SW0_RST_B_MDP_RDMA0 5
+#define MT8195_VDOSYS1_SW0_RST_B_MDP_RDMA1 6
+#define MT8195_VDOSYS1_SW0_RST_B_MDP_RDMA2 7
+#define MT8195_VDOSYS1_SW0_RST_B_MDP_RDMA3 8
+#define MT8195_VDOSYS1_SW0_RST_B_VPP_MERGE0 9
+#define MT8195_VDOSYS1_SW0_RST_B_VPP_MERGE1 10
+#define MT8195_VDOSYS1_SW0_RST_B_VPP_MERGE2 11
+#define MT8195_VDOSYS1_SW0_RST_B_VPP_MERGE3 12
+#define MT8195_VDOSYS1_SW0_RST_B_VPP_MERGE4 13
+#define MT8195_VDOSYS1_SW0_RST_B_VPP2_TO_VDO1_DL_ASYNC 14
+#define MT8195_VDOSYS1_SW0_RST_B_VPP3_TO_VDO1_DL_ASYNC 15
+#define MT8195_VDOSYS1_SW0_RST_B_DISP_MUTEX 16
+#define MT8195_VDOSYS1_SW0_RST_B_MDP_RDMA4 17
+#define MT8195_VDOSYS1_SW0_RST_B_MDP_RDMA5 18
+#define MT8195_VDOSYS1_SW0_RST_B_MDP_RDMA6 19
+#define MT8195_VDOSYS1_SW0_RST_B_MDP_RDMA7 20
+#define MT8195_VDOSYS1_SW0_RST_B_DP_INTF0 21
+#define MT8195_VDOSYS1_SW0_RST_B_DPI0 22
+#define MT8195_VDOSYS1_SW0_RST_B_DPI1 23
+#define MT8195_VDOSYS1_SW0_RST_B_DISP_MONITOR 24
+#define MT8195_VDOSYS1_SW0_RST_B_MERGE0_DL_ASYNC 25
+#define MT8195_VDOSYS1_SW0_RST_B_MERGE1_DL_ASYNC 26
+#define MT8195_VDOSYS1_SW0_RST_B_MERGE2_DL_ASYNC 27
+#define MT8195_VDOSYS1_SW0_RST_B_MERGE3_DL_ASYNC 28
+#define MT8195_VDOSYS1_SW0_RST_B_MERGE4_DL_ASYNC 29
+#define MT8195_VDOSYS1_SW0_RST_B_VDO0_DSC_TO_VDO1_DL_ASYNC 30
+#define MT8195_VDOSYS1_SW0_RST_B_VDO0_MERGE_TO_VDO1_DL_ASYNC 31
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_VDO_FE0 32
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_GFX_FE0 33
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_VDO_BE 34
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_VDO_FE1 48
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_GFX_FE1 49
+#define MT8195_VDOSYS1_SW1_RST_B_DISP_MIXER 50
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_VDO_FE0_DL_ASYNC 51
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_VDO_FE1_DL_ASYNC 52
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_GFX_FE0_DL_ASYNC 53
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_GFX_FE1_DL_ASYNC 54
+#define MT8195_VDOSYS1_SW1_RST_B_HDR_VDO_BE_DL_ASYNC 55
+
+#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8195 */
diff --git a/include/dt-bindings/reset/nuvoton,ma35d1-reset.h b/include/dt-bindings/reset/nuvoton,ma35d1-reset.h
new file mode 100644
index 000000000000..2e99ee0d68c5
--- /dev/null
+++ b/include/dt-bindings/reset/nuvoton,ma35d1-reset.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2023 Nuvoton Technologies.
+ * Author: Chi-Fen Li <cfli0@nuvoton.com>
+ *
+ * Device Tree binding constants for MA35D1 reset controller.
+ */
+
+#ifndef __DT_BINDINGS_RESET_MA35D1_H
+#define __DT_BINDINGS_RESET_MA35D1_H
+
+#define MA35D1_RESET_CHIP 0
+#define MA35D1_RESET_CA35CR0 1
+#define MA35D1_RESET_CA35CR1 2
+#define MA35D1_RESET_CM4 3
+#define MA35D1_RESET_PDMA0 4
+#define MA35D1_RESET_PDMA1 5
+#define MA35D1_RESET_PDMA2 6
+#define MA35D1_RESET_PDMA3 7
+#define MA35D1_RESET_DISP 8
+#define MA35D1_RESET_VCAP0 9
+#define MA35D1_RESET_VCAP1 10
+#define MA35D1_RESET_GFX 11
+#define MA35D1_RESET_VDEC 12
+#define MA35D1_RESET_WHC0 13
+#define MA35D1_RESET_WHC1 14
+#define MA35D1_RESET_GMAC0 15
+#define MA35D1_RESET_GMAC1 16
+#define MA35D1_RESET_HWSEM 17
+#define MA35D1_RESET_EBI 18
+#define MA35D1_RESET_HSUSBH0 19
+#define MA35D1_RESET_HSUSBH1 20
+#define MA35D1_RESET_HSUSBD 21
+#define MA35D1_RESET_USBHL 22
+#define MA35D1_RESET_SDH0 23
+#define MA35D1_RESET_SDH1 24
+#define MA35D1_RESET_NAND 25
+#define MA35D1_RESET_GPIO 26
+#define MA35D1_RESET_MCTLP 27
+#define MA35D1_RESET_MCTLC 28
+#define MA35D1_RESET_DDRPUB 29
+#define MA35D1_RESET_TMR0 30
+#define MA35D1_RESET_TMR1 31
+#define MA35D1_RESET_TMR2 32
+#define MA35D1_RESET_TMR3 33
+#define MA35D1_RESET_I2C0 34
+#define MA35D1_RESET_I2C1 35
+#define MA35D1_RESET_I2C2 36
+#define MA35D1_RESET_I2C3 37
+#define MA35D1_RESET_QSPI0 38
+#define MA35D1_RESET_SPI0 39
+#define MA35D1_RESET_SPI1 40
+#define MA35D1_RESET_SPI2 41
+#define MA35D1_RESET_UART0 42
+#define MA35D1_RESET_UART1 43
+#define MA35D1_RESET_UART2 44
+#define MA35D1_RESET_UART3 45
+#define MA35D1_RESET_UART4 46
+#define MA35D1_RESET_UART5 47
+#define MA35D1_RESET_UART6 48
+#define MA35D1_RESET_UART7 49
+#define MA35D1_RESET_CANFD0 50
+#define MA35D1_RESET_CANFD1 51
+#define MA35D1_RESET_EADC0 52
+#define MA35D1_RESET_I2S0 53
+#define MA35D1_RESET_SC0 54
+#define MA35D1_RESET_SC1 55
+#define MA35D1_RESET_QSPI1 56
+#define MA35D1_RESET_SPI3 57
+#define MA35D1_RESET_EPWM0 58
+#define MA35D1_RESET_EPWM1 59
+#define MA35D1_RESET_QEI0 60
+#define MA35D1_RESET_QEI1 61
+#define MA35D1_RESET_ECAP0 62
+#define MA35D1_RESET_ECAP1 63
+#define MA35D1_RESET_CANFD2 64
+#define MA35D1_RESET_ADC0 65
+#define MA35D1_RESET_TMR4 66
+#define MA35D1_RESET_TMR5 67
+#define MA35D1_RESET_TMR6 68
+#define MA35D1_RESET_TMR7 69
+#define MA35D1_RESET_TMR8 70
+#define MA35D1_RESET_TMR9 71
+#define MA35D1_RESET_TMR10 72
+#define MA35D1_RESET_TMR11 73
+#define MA35D1_RESET_UART8 74
+#define MA35D1_RESET_UART9 75
+#define MA35D1_RESET_UART10 76
+#define MA35D1_RESET_UART11 77
+#define MA35D1_RESET_UART12 78
+#define MA35D1_RESET_UART13 79
+#define MA35D1_RESET_UART14 80
+#define MA35D1_RESET_UART15 81
+#define MA35D1_RESET_UART16 82
+#define MA35D1_RESET_I2S1 83
+#define MA35D1_RESET_I2C4 84
+#define MA35D1_RESET_I2C5 85
+#define MA35D1_RESET_EPWM2 86
+#define MA35D1_RESET_ECAP2 87
+#define MA35D1_RESET_QEI2 88
+#define MA35D1_RESET_CANFD3 89
+#define MA35D1_RESET_KPI 90
+#define MA35D1_RESET_GIC 91
+#define MA35D1_RESET_SSMCC 92
+#define MA35D1_RESET_SSPCC 93
+#define MA35D1_RESET_COUNT 94
+
+#endif
diff --git a/include/dt-bindings/reset/nvidia,tegra114-car.h b/include/dt-bindings/reset/nvidia,tegra114-car.h
new file mode 100644
index 000000000000..9b8c320402db
--- /dev/null
+++ b/include/dt-bindings/reset/nvidia,tegra114-car.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * This header provides Tegra114-specific constants for binding
+ * nvidia,tegra114-car.
+ */
+
+#ifndef _DT_BINDINGS_RESET_NVIDIA_TEGRA114_CAR_H
+#define _DT_BINDINGS_RESET_NVIDIA_TEGRA114_CAR_H
+
+#define TEGRA114_RESET(x) (5 * 32 + (x))
+#define TEGRA114_RST_DFLL_DVCO TEGRA114_RESET(0)
+
+#endif /* _DT_BINDINGS_RESET_NVIDIA_TEGRA114_CAR_H */
diff --git a/include/dt-bindings/reset/nvidia,tegra264.h b/include/dt-bindings/reset/nvidia,tegra264.h
new file mode 100644
index 000000000000..a61a56bb232b
--- /dev/null
+++ b/include/dt-bindings/reset/nvidia,tegra264.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (c) 2022-2025, NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef DT_BINDINGS_RESET_NVIDIA_TEGRA264_H
+#define DT_BINDINGS_RESET_NVIDIA_TEGRA264_H
+
+#define TEGRA264_RESET_APE_TKE 1
+#define TEGRA264_RESET_CEC 2
+#define TEGRA264_RESET_ADSP_ALL 3
+#define TEGRA264_RESET_RCE_ALL 4
+#define TEGRA264_RESET_UFSHC 5
+#define TEGRA264_RESET_UFSHC_AXI_M 6
+#define TEGRA264_RESET_UFSHC_LP_SEQ 7
+#define TEGRA264_RESET_DPAUX 8
+#define TEGRA264_RESET_EQOS_PCS 9
+#define TEGRA264_RESET_HWPM 10
+#define TEGRA264_RESET_I2C1 11
+#define TEGRA264_RESET_I2C2 12
+#define TEGRA264_RESET_I2C3 13
+#define TEGRA264_RESET_I2C4 14
+#define TEGRA264_RESET_I2C6 15
+#define TEGRA264_RESET_I2C7 16
+#define TEGRA264_RESET_I2C8 17
+#define TEGRA264_RESET_I2C9 18
+#define TEGRA264_RESET_ISP 19
+#define TEGRA264_RESET_LA 20
+#define TEGRA264_RESET_NVCSI 21
+#define TEGRA264_RESET_EQOS_MAC 22
+#define TEGRA264_RESET_PWM10 23
+#define TEGRA264_RESET_PWM2 24
+#define TEGRA264_RESET_PWM3 25
+#define TEGRA264_RESET_PWM4 26
+#define TEGRA264_RESET_PWM5 27
+#define TEGRA264_RESET_PWM9 28
+#define TEGRA264_RESET_QSPI0 29
+#define TEGRA264_RESET_HDA 30
+#define TEGRA264_RESET_HDACODEC 31
+#define TEGRA264_RESET_I2C0 32
+#define TEGRA264_RESET_I2C10 33
+#define TEGRA264_RESET_SDMMC1 34
+#define TEGRA264_RESET_MIPI_CAL 35
+#define TEGRA264_RESET_SPI1 36
+#define TEGRA264_RESET_SPI2 37
+#define TEGRA264_RESET_SPI3 38
+#define TEGRA264_RESET_SPI4 39
+#define TEGRA264_RESET_SPI5 40
+#define TEGRA264_RESET_SPI7 41
+#define TEGRA264_RESET_SPI8 42
+#define TEGRA264_RESET_SPI9 43
+#define TEGRA264_RESET_TACH0 44
+#define TEGRA264_RESET_TSEC 45
+#define TEGRA264_RESET_VI 46
+#define TEGRA264_RESET_VI1 47
+#define TEGRA264_RESET_PVA0_ALL 48
+#define TEGRA264_RESET_VIC 49
+#define TEGRA264_RESET_MPHY_CLK_CTL 50
+#define TEGRA264_RESET_MPHY_L0_RX 51
+#define TEGRA264_RESET_MPHY_L0_TX 52
+#define TEGRA264_RESET_MPHY_L1_RX 53
+#define TEGRA264_RESET_MPHY_L1_TX 54
+#define TEGRA264_RESET_ISP1 55
+#define TEGRA264_RESET_I2C11 56
+#define TEGRA264_RESET_I2C12 57
+#define TEGRA264_RESET_I2C14 58
+#define TEGRA264_RESET_I2C15 59
+#define TEGRA264_RESET_I2C16 60
+#define TEGRA264_RESET_EQOS_MACSEC 61
+#define TEGRA264_RESET_MGBE0_PCS 62
+#define TEGRA264_RESET_MGBE0_MAC 63
+#define TEGRA264_RESET_MGBE0_MACSEC 64
+#define TEGRA264_RESET_MGBE1_PCS 65
+#define TEGRA264_RESET_MGBE1_MAC 66
+#define TEGRA264_RESET_MGBE1_MACSEC 67
+#define TEGRA264_RESET_MGBE2_PCS 68
+#define TEGRA264_RESET_MGBE2_MAC 69
+#define TEGRA264_RESET_MGBE2_MACSEC 70
+#define TEGRA264_RESET_MGBE3_PCS 71
+#define TEGRA264_RESET_MGBE3_MAC 72
+#define TEGRA264_RESET_MGBE3_MACSEC 73
+#define TEGRA264_RESET_ADSP_CORE0 74
+#define TEGRA264_RESET_ADSP_CORE1 75
+#define TEGRA264_RESET_APE 76
+#define TEGRA264_RESET_XUSB1_PADCTL 77
+#define TEGRA264_RESET_AON_CPU_ALL 78
+#define TEGRA264_RESET_AON_HSP 79
+#define TEGRA264_RESET_UART4 80
+#define TEGRA264_RESET_UART5 81
+#define TEGRA264_RESET_UART9 82
+#define TEGRA264_RESET_UART10 83
+#define TEGRA264_RESET_UART8 84
+
+#endif /* DT_BINDINGS_RESET_NVIDIA_TEGRA264_H */
diff --git a/include/dt-bindings/reset/qcom,gcc-ipq5018.h b/include/dt-bindings/reset/qcom,gcc-ipq5018.h
new file mode 100644
index 000000000000..8f03c92fc23b
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,gcc-ipq5018.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_RESET_IPQ_GCC_5018_H
+#define _DT_BINDINGS_RESET_IPQ_GCC_5018_H
+
+#define GCC_APC0_VOLTAGE_DROOP_DETECTOR_BCR 0
+#define GCC_BLSP1_BCR 1
+#define GCC_BLSP1_QUP1_BCR 2
+#define GCC_BLSP1_QUP2_BCR 3
+#define GCC_BLSP1_QUP3_BCR 4
+#define GCC_BLSP1_UART1_BCR 5
+#define GCC_BLSP1_UART2_BCR 6
+#define GCC_BOOT_ROM_BCR 7
+#define GCC_BTSS_BCR 8
+#define GCC_CMN_BLK_BCR 9
+#define GCC_CMN_LDO_BCR 10
+#define GCC_CE_BCR 11
+#define GCC_CRYPTO_BCR 12
+#define GCC_DCC_BCR 13
+#define GCC_DCD_BCR 14
+#define GCC_DDRSS_BCR 15
+#define GCC_EDPD_BCR 16
+#define GCC_GEPHY_BCR 17
+#define GCC_GEPHY_MDC_SW_ARES 18
+#define GCC_GEPHY_DSP_HW_ARES 19
+#define GCC_GEPHY_RX_ARES 20
+#define GCC_GEPHY_TX_ARES 21
+#define GCC_GMAC0_BCR 22
+#define GCC_GMAC0_CFG_ARES 23
+#define GCC_GMAC0_SYS_ARES 24
+#define GCC_GMAC1_BCR 25
+#define GCC_GMAC1_CFG_ARES 26
+#define GCC_GMAC1_SYS_ARES 27
+#define GCC_IMEM_BCR 28
+#define GCC_LPASS_BCR 29
+#define GCC_MDIO0_BCR 30
+#define GCC_MDIO1_BCR 31
+#define GCC_MPM_BCR 32
+#define GCC_PCIE0_BCR 33
+#define GCC_PCIE0_LINK_DOWN_BCR 34
+#define GCC_PCIE0_PHY_BCR 35
+#define GCC_PCIE0PHY_PHY_BCR 36
+#define GCC_PCIE0_PIPE_ARES 37
+#define GCC_PCIE0_SLEEP_ARES 38
+#define GCC_PCIE0_CORE_STICKY_ARES 39
+#define GCC_PCIE0_AXI_MASTER_ARES 40
+#define GCC_PCIE0_AXI_SLAVE_ARES 41
+#define GCC_PCIE0_AHB_ARES 42
+#define GCC_PCIE0_AXI_MASTER_STICKY_ARES 43
+#define GCC_PCIE0_AXI_SLAVE_STICKY_ARES 44
+#define GCC_PCIE1_BCR 45
+#define GCC_PCIE1_LINK_DOWN_BCR 46
+#define GCC_PCIE1_PHY_BCR 47
+#define GCC_PCIE1PHY_PHY_BCR 48
+#define GCC_PCIE1_PIPE_ARES 49
+#define GCC_PCIE1_SLEEP_ARES 50
+#define GCC_PCIE1_CORE_STICKY_ARES 51
+#define GCC_PCIE1_AXI_MASTER_ARES 52
+#define GCC_PCIE1_AXI_SLAVE_ARES 53
+#define GCC_PCIE1_AHB_ARES 54
+#define GCC_PCIE1_AXI_MASTER_STICKY_ARES 55
+#define GCC_PCIE1_AXI_SLAVE_STICKY_ARES 56
+#define GCC_PCNOC_BCR 57
+#define GCC_PCNOC_BUS_TIMEOUT0_BCR 58
+#define GCC_PCNOC_BUS_TIMEOUT1_BCR 59
+#define GCC_PCNOC_BUS_TIMEOUT2_BCR 60
+#define GCC_PCNOC_BUS_TIMEOUT3_BCR 61
+#define GCC_PCNOC_BUS_TIMEOUT4_BCR 62
+#define GCC_PCNOC_BUS_TIMEOUT5_BCR 63
+#define GCC_PCNOC_BUS_TIMEOUT6_BCR 64
+#define GCC_PCNOC_BUS_TIMEOUT7_BCR 65
+#define GCC_PCNOC_BUS_TIMEOUT8_BCR 66
+#define GCC_PCNOC_BUS_TIMEOUT9_BCR 67
+#define GCC_PCNOC_BUS_TIMEOUT10_BCR 68
+#define GCC_PCNOC_BUS_TIMEOUT11_BCR 69
+#define GCC_PRNG_BCR 70
+#define GCC_Q6SS_DBG_ARES 71
+#define GCC_Q6_AHB_S_ARES 72
+#define GCC_Q6_AHB_ARES 73
+#define GCC_Q6_AXIM2_ARES 74
+#define GCC_Q6_AXIM_ARES 75
+#define GCC_Q6_AXIS_ARES 76
+#define GCC_QDSS_BCR 77
+#define GCC_QPIC_BCR 78
+#define GCC_QUSB2_0_PHY_BCR 79
+#define GCC_SDCC1_BCR 80
+#define GCC_SEC_CTRL_BCR 81
+#define GCC_SPDM_BCR 82
+#define GCC_SYSTEM_NOC_BCR 83
+#define GCC_TCSR_BCR 84
+#define GCC_TLMM_BCR 85
+#define GCC_UBI0_AXI_ARES 86
+#define GCC_UBI0_AHB_ARES 87
+#define GCC_UBI0_NC_AXI_ARES 88
+#define GCC_UBI0_DBG_ARES 89
+#define GCC_UBI0_UTCM_ARES 90
+#define GCC_UBI0_CORE_ARES 91
+#define GCC_UBI32_BCR 92
+#define GCC_UNIPHY_BCR 93
+#define GCC_UNIPHY_AHB_ARES 94
+#define GCC_UNIPHY_SYS_ARES 95
+#define GCC_UNIPHY_RX_ARES 96
+#define GCC_UNIPHY_TX_ARES 97
+#define GCC_USB0_BCR 98
+#define GCC_USB0_PHY_BCR 99
+#define GCC_WCSS_BCR 100
+#define GCC_WCSS_DBG_ARES 101
+#define GCC_WCSS_ECAHB_ARES 102
+#define GCC_WCSS_ACMT_ARES 103
+#define GCC_WCSS_DBG_BDG_ARES 104
+#define GCC_WCSS_AHB_S_ARES 105
+#define GCC_WCSS_AXI_M_ARES 106
+#define GCC_WCSS_AXI_S_ARES 107
+#define GCC_WCSS_Q6_BCR 108
+#define GCC_WCSSAON_RESET 109
+#define GCC_UNIPHY_SOFT_RESET 110
+#define GCC_GEPHY_MISC_ARES 111
+
+#endif
diff --git a/include/dt-bindings/reset/qcom,gcc-ipq806x.h b/include/dt-bindings/reset/qcom,gcc-ipq806x.h
index 26b6f9200620..020c9cf18751 100644
--- a/include/dt-bindings/reset/qcom,gcc-ipq806x.h
+++ b/include/dt-bindings/reset/qcom,gcc-ipq806x.h
@@ -163,5 +163,10 @@
#define NSS_CAL_PRBS_RST_N_RESET 154
#define NSS_LCKDT_RST_N_RESET 155
#define NSS_SRDS_N_RESET 156
+#define CRYPTO_ENG1_RESET 157
+#define CRYPTO_ENG2_RESET 158
+#define CRYPTO_ENG3_RESET 159
+#define CRYPTO_ENG4_RESET 160
+#define CRYPTO_AHB_RESET 161
#endif
diff --git a/include/dt-bindings/reset/qcom,ipq5424-gcc.h b/include/dt-bindings/reset/qcom,ipq5424-gcc.h
new file mode 100644
index 000000000000..16a72771c79a
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,ipq5424-gcc.h
@@ -0,0 +1,310 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2018,2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_RESET_IPQ_GCC_IPQ5424_H
+#define _DT_BINDINGS_RESET_IPQ_GCC_IPQ5424_H
+
+#define GCC_QUPV3_BCR 0
+#define GCC_QUPV3_I2C0_BCR 1
+#define GCC_QUPV3_UART0_BCR 2
+#define GCC_QUPV3_I2C1_BCR 3
+#define GCC_QUPV3_UART1_BCR 4
+#define GCC_QUPV3_SPI0_BCR 5
+#define GCC_QUPV3_SPI1_BCR 6
+#define GCC_IMEM_BCR 7
+#define GCC_TME_BCR 8
+#define GCC_DDRSS_BCR 9
+#define GCC_PRNG_BCR 10
+#define GCC_BOOT_ROM_BCR 11
+#define GCC_NSS_BCR 12
+#define GCC_MDIO_BCR 13
+#define GCC_UNIPHY0_BCR 14
+#define GCC_UNIPHY1_BCR 15
+#define GCC_UNIPHY2_BCR 16
+#define GCC_WCSS_BCR 17
+#define GCC_SEC_CTRL_BCR 19
+#define GCC_TME_SEC_BUS_BCR 20
+#define GCC_ADSS_BCR 21
+#define GCC_LPASS_BCR 22
+#define GCC_PCIE0_BCR 23
+#define GCC_PCIE0_LINK_DOWN_BCR 24
+#define GCC_PCIE0PHY_PHY_BCR 25
+#define GCC_PCIE0_PHY_BCR 26
+#define GCC_PCIE1_BCR 27
+#define GCC_PCIE1_LINK_DOWN_BCR 28
+#define GCC_PCIE1PHY_PHY_BCR 29
+#define GCC_PCIE1_PHY_BCR 30
+#define GCC_PCIE2_BCR 31
+#define GCC_PCIE2_LINK_DOWN_BCR 32
+#define GCC_PCIE2PHY_PHY_BCR 33
+#define GCC_PCIE2_PHY_BCR 34
+#define GCC_PCIE3_BCR 35
+#define GCC_PCIE3_LINK_DOWN_BCR 36
+#define GCC_PCIE3PHY_PHY_BCR 37
+#define GCC_PCIE3_PHY_BCR 38
+#define GCC_USB_BCR 39
+#define GCC_QUSB2_0_PHY_BCR 40
+#define GCC_USB0_PHY_BCR 41
+#define GCC_USB3PHY_0_PHY_BCR 42
+#define GCC_QDSS_BCR 43
+#define GCC_SNOC_BCR 44
+#define GCC_ANOC_BCR 45
+#define GCC_PCNOC_BCR 46
+#define GCC_PCNOC_BUS_TIMEOUT0_BCR 47
+#define GCC_PCNOC_BUS_TIMEOUT1_BCR 48
+#define GCC_PCNOC_BUS_TIMEOUT2_BCR 49
+#define GCC_PCNOC_BUS_TIMEOUT3_BCR 50
+#define GCC_PCNOC_BUS_TIMEOUT4_BCR 51
+#define GCC_PCNOC_BUS_TIMEOUT5_BCR 52
+#define GCC_PCNOC_BUS_TIMEOUT6_BCR 53
+#define GCC_PCNOC_BUS_TIMEOUT7_BCR 54
+#define GCC_PCNOC_BUS_TIMEOUT8_BCR 55
+#define GCC_PCNOC_BUS_TIMEOUT9_BCR 56
+#define GCC_QPIC_BCR 57
+#define GCC_SDCC_BCR 58
+#define GCC_DCC_BCR 59
+#define GCC_SPDM_BCR 60
+#define GCC_MPM_BCR 61
+#define GCC_APC0_VOLTAGE_DROOP_DETECTOR_BCR 62
+#define GCC_RBCPR_BCR 63
+#define GCC_CMN_BLK_BCR 64
+#define GCC_TCSR_BCR 65
+#define GCC_TLMM_BCR 66
+#define GCC_QUPV3_AHB_MST_ARES 67
+#define GCC_QUPV3_CORE_ARES 68
+#define GCC_QUPV3_2X_CORE_ARES 69
+#define GCC_QUPV3_SLEEP_ARES 70
+#define GCC_QUPV3_AHB_SLV_ARES 71
+#define GCC_QUPV3_I2C0_ARES 72
+#define GCC_QUPV3_UART0_ARES 73
+#define GCC_QUPV3_I2C1_ARES 74
+#define GCC_QUPV3_UART1_ARES 75
+#define GCC_QUPV3_SPI0_ARES 76
+#define GCC_QUPV3_SPI1_ARES 77
+#define GCC_DEBUG_ARES 78
+#define GCC_GP1_ARES 79
+#define GCC_GP2_ARES 80
+#define GCC_GP3_ARES 81
+#define GCC_IMEM_AXI_ARES 82
+#define GCC_IMEM_CFG_AHB_ARES 83
+#define GCC_TME_ARES 84
+#define GCC_TME_TS_ARES 85
+#define GCC_TME_SLOW_ARES 86
+#define GCC_TME_RTC_TOGGLE_ARES 87
+#define GCC_TIC_ARES 88
+#define GCC_PRNG_AHB_ARES 89
+#define GCC_BOOT_ROM_AHB_ARES 90
+#define GCC_NSSNOC_ATB_ARES 91
+#define GCC_NSS_TS_ARES 92
+#define GCC_NSSNOC_QOSGEN_REF_ARES 93
+#define GCC_NSSNOC_TIMEOUT_REF_ARES 94
+#define GCC_NSSNOC_MEMNOC_ARES 95
+#define GCC_NSSNOC_SNOC_ARES 96
+#define GCC_NSSCFG_ARES 97
+#define GCC_NSSNOC_NSSCC_ARES 98
+#define GCC_NSSCC_ARES 99
+#define GCC_MDIO_AHB_ARES 100
+#define GCC_UNIPHY0_SYS_ARES 101
+#define GCC_UNIPHY0_AHB_ARES 102
+#define GCC_UNIPHY1_SYS_ARES 103
+#define GCC_UNIPHY1_AHB_ARES 104
+#define GCC_UNIPHY2_SYS_ARES 105
+#define GCC_UNIPHY2_AHB_ARES 106
+#define GCC_NSSNOC_XO_DCD_ARES 107
+#define GCC_NSSNOC_SNOC_1_ARES 108
+#define GCC_NSSNOC_PCNOC_1_ARES 109
+#define GCC_NSSNOC_MEMNOC_1_ARES 110
+#define GCC_DDRSS_ATB_ARES 111
+#define GCC_DDRSS_AHB_ARES 112
+#define GCC_GEMNOC_AHB_ARES 113
+#define GCC_GEMNOC_Q6_AXI_ARES 114
+#define GCC_GEMNOC_NSSNOC_ARES 115
+#define GCC_GEMNOC_SNOC_ARES 116
+#define GCC_GEMNOC_APSS_ARES 117
+#define GCC_GEMNOC_QOSGEN_EXTREF_ARES 118
+#define GCC_GEMNOC_TS_ARES 119
+#define GCC_DDRSS_SMS_SLOW_ARES 120
+#define GCC_GEMNOC_CNOC_ARES 121
+#define GCC_GEMNOC_XO_DBG_ARES 122
+#define GCC_GEMNOC_ANOC_ARES 123
+#define GCC_DDRSS_LLCC_ATB_ARES 124
+#define GCC_LLCC_TPDM_CFG_ARES 125
+#define GCC_TME_BUS_ARES 126
+#define GCC_SEC_CTRL_ACC_ARES 127
+#define GCC_SEC_CTRL_ARES 128
+#define GCC_SEC_CTRL_SENSE_ARES 129
+#define GCC_SEC_CTRL_AHB_ARES 130
+#define GCC_SEC_CTRL_BOOT_ROM_PATCH_ARES 131
+#define GCC_ADSS_PWM_ARES 132
+#define GCC_TME_ATB_ARES 133
+#define GCC_TME_DBGAPB_ARES 134
+#define GCC_TME_DEBUG_ARES 135
+#define GCC_TME_AT_ARES 136
+#define GCC_TME_APB_ARES 137
+#define GCC_TME_DMI_DBG_HS_ARES 138
+#define GCC_APSS_AHB_ARES 139
+#define GCC_APSS_AXI_ARES 140
+#define GCC_CPUSS_TRIG_ARES 141
+#define GCC_APSS_DBG_ARES 142
+#define GCC_APSS_TS_ARES 143
+#define GCC_APSS_ATB_ARES 144
+#define GCC_Q6_AXIM_ARES 145
+#define GCC_Q6_AXIS_ARES 146
+#define GCC_Q6_AHB_ARES 147
+#define GCC_Q6_AHB_S_ARES 148
+#define GCC_Q6SS_ATBM_ARES 149
+#define GCC_Q6_TSCTR_1TO2_ARES 150
+#define GCC_Q6SS_PCLKDBG_ARES 151
+#define GCC_Q6SS_TRIG_ARES 152
+#define GCC_Q6SS_BOOT_CBCR_ARES 153
+#define GCC_WCSS_DBG_IFC_APB_ARES 154
+#define GCC_WCSS_DBG_IFC_ATB_ARES 155
+#define GCC_WCSS_DBG_IFC_NTS_ARES 156
+#define GCC_WCSS_DBG_IFC_DAPBUS_ARES 157
+#define GCC_WCSS_DBG_IFC_APB_BDG_ARES 158
+#define GCC_WCSS_DBG_IFC_NTS_BDG_ARES 159
+#define GCC_WCSS_DBG_IFC_DAPBUS_BDG_ARES 160
+#define GCC_WCSS_ECAHB_ARES 161
+#define GCC_WCSS_ACMT_ARES 162
+#define GCC_WCSS_AHB_S_ARES 163
+#define GCC_WCSS_AXI_M_ARES 164
+#define GCC_PCNOC_WAPSS_ARES 165
+#define GCC_SNOC_WAPSS_ARES 166
+#define GCC_LPASS_SWAY_ARES 167
+#define GCC_LPASS_CORE_AXIM_ARES 168
+#define GCC_PCIE0_AHB_ARES 169
+#define GCC_PCIE0_AXI_M_ARES 170
+#define GCC_PCIE0_AXI_S_ARES 171
+#define GCC_PCIE0_AXI_S_BRIDGE_ARES 172
+#define GCC_PCIE0_PIPE_ARES 173
+#define GCC_PCIE0_AUX_ARES 174
+#define GCC_PCIE1_AHB_ARES 175
+#define GCC_PCIE1_AXI_M_ARES 176
+#define GCC_PCIE1_AXI_S_ARES 177
+#define GCC_PCIE1_AXI_S_BRIDGE_ARES 178
+#define GCC_PCIE1_PIPE_ARES 179
+#define GCC_PCIE1_AUX_ARES 180
+#define GCC_PCIE2_AHB_ARES 181
+#define GCC_PCIE2_AXI_M_ARES 182
+#define GCC_PCIE2_AXI_S_ARES 183
+#define GCC_PCIE2_AXI_S_BRIDGE_ARES 184
+#define GCC_PCIE2_PIPE_ARES 185
+#define GCC_PCIE2_AUX_ARES 186
+#define GCC_PCIE3_AHB_ARES 187
+#define GCC_PCIE3_AXI_M_ARES 188
+#define GCC_PCIE3_AXI_S_ARES 189
+#define GCC_PCIE3_AXI_S_BRIDGE_ARES 190
+#define GCC_PCIE3_PIPE_ARES 191
+#define GCC_PCIE3_AUX_ARES 192
+#define GCC_USB0_MASTER_ARES 193
+#define GCC_USB0_AUX_ARES 194
+#define GCC_USB0_MOCK_UTMI_ARES 195
+#define GCC_USB0_PIPE_ARES 196
+#define GCC_USB0_SLEEP_ARES 197
+#define GCC_USB0_PHY_CFG_AHB_ARES 198
+#define GCC_QDSS_AT_ARES 199
+#define GCC_QDSS_STM_ARES 200
+#define GCC_QDSS_TRACECLKIN_ARES 201
+#define GCC_QDSS_TSCTR_DIV2_ARES 202
+#define GCC_QDSS_TSCTR_DIV3_ARES 203
+#define GCC_QDSS_TSCTR_DIV4_ARES 204
+#define GCC_QDSS_TSCTR_DIV8_ARES 205
+#define GCC_QDSS_TSCTR_DIV16_ARES 206
+#define GCC_QDSS_DAP_ARES 207
+#define GCC_QDSS_APB2JTAG_ARES 208
+#define GCC_QDSS_ETR_USB_ARES 209
+#define GCC_QDSS_DAP_AHB_ARES 210
+#define GCC_QDSS_CFG_AHB_ARES 211
+#define GCC_QDSS_EUD_AT_ARES 212
+#define GCC_QDSS_TS_ARES 213
+#define GCC_QDSS_USB_ARES 214
+#define GCC_SYS_NOC_AXI_ARES 215
+#define GCC_SNOC_QOSGEN_EXTREF_ARES 216
+#define GCC_CNOC_LPASS_CFG_ARES 217
+#define GCC_SYS_NOC_AT_ARES 218
+#define GCC_SNOC_PCNOC_AHB_ARES 219
+#define GCC_SNOC_TME_ARES 220
+#define GCC_SNOC_XO_DCD_ARES 221
+#define GCC_SNOC_TS_ARES 222
+#define GCC_ANOC0_AXI_ARES 223
+#define GCC_ANOC_PCIE0_1LANE_M_ARES 224
+#define GCC_ANOC_PCIE2_2LANE_M_ARES 225
+#define GCC_ANOC_PCIE1_1LANE_M_ARES 226
+#define GCC_ANOC_PCIE3_2LANE_M_ARES 227
+#define GCC_ANOC_PCNOC_AHB_ARES 228
+#define GCC_ANOC_QOSGEN_EXTREF_ARES 229
+#define GCC_ANOC_XO_DCD_ARES 230
+#define GCC_SNOC_XO_DBG_ARES 231
+#define GCC_AGGRNOC_ATB_ARES 232
+#define GCC_AGGRNOC_TS_ARES 233
+#define GCC_USB0_EUD_AT_ARES 234
+#define GCC_PCNOC_TIC_ARES 235
+#define GCC_PCNOC_AHB_ARES 236
+#define GCC_PCNOC_XO_DBG_ARES 237
+#define GCC_SNOC_LPASS_ARES 238
+#define GCC_PCNOC_AT_ARES 239
+#define GCC_PCNOC_XO_DCD_ARES 240
+#define GCC_PCNOC_TS_ARES 241
+#define GCC_PCNOC_BUS_TIMEOUT0_AHB_ARES 242
+#define GCC_PCNOC_BUS_TIMEOUT1_AHB_ARES 243
+#define GCC_PCNOC_BUS_TIMEOUT2_AHB_ARES 244
+#define GCC_PCNOC_BUS_TIMEOUT3_AHB_ARES 245
+#define GCC_PCNOC_BUS_TIMEOUT4_AHB_ARES 246
+#define GCC_PCNOC_BUS_TIMEOUT5_AHB_ARES 247
+#define GCC_PCNOC_BUS_TIMEOUT6_AHB_ARES 248
+#define GCC_PCNOC_BUS_TIMEOUT7_AHB_ARES 249
+#define GCC_Q6_AXIM_RESET 250
+#define GCC_Q6_AXIS_RESET 251
+#define GCC_Q6_AHB_S_RESET 252
+#define GCC_Q6_AHB_RESET 253
+#define GCC_Q6SS_DBG_RESET 254
+#define GCC_WCSS_ECAHB_RESET 255
+#define GCC_WCSS_DBG_BDG_RESET 256
+#define GCC_WCSS_DBG_RESET 257
+#define GCC_WCSS_AXI_M_RESET 258
+#define GCC_WCSS_AHB_S_RESET 259
+#define GCC_WCSS_ACMT_RESET 260
+#define GCC_WCSSAON_RESET 261
+#define GCC_PCIE0_PIPE_RESET 262
+#define GCC_PCIE0_CORE_STICKY_RESET 263
+#define GCC_PCIE0_AXI_S_STICKY_RESET 264
+#define GCC_PCIE0_AXI_S_RESET 265
+#define GCC_PCIE0_AXI_M_STICKY_RESET 266
+#define GCC_PCIE0_AXI_M_RESET 267
+#define GCC_PCIE0_AUX_RESET 268
+#define GCC_PCIE0_AHB_RESET 269
+#define GCC_PCIE1_PIPE_RESET 270
+#define GCC_PCIE1_CORE_STICKY_RESET 271
+#define GCC_PCIE1_AXI_S_STICKY_RESET 272
+#define GCC_PCIE1_AXI_S_RESET 273
+#define GCC_PCIE1_AXI_M_STICKY_RESET 274
+#define GCC_PCIE1_AXI_M_RESET 275
+#define GCC_PCIE1_AUX_RESET 276
+#define GCC_PCIE1_AHB_RESET 277
+#define GCC_PCIE2_PIPE_RESET 278
+#define GCC_PCIE2_CORE_STICKY_RESET 279
+#define GCC_PCIE2_AXI_S_STICKY_RESET 280
+#define GCC_PCIE2_AXI_S_RESET 281
+#define GCC_PCIE2_AXI_M_STICKY_RESET 282
+#define GCC_PCIE2_AXI_M_RESET 283
+#define GCC_PCIE2_AUX_RESET 284
+#define GCC_PCIE2_AHB_RESET 285
+#define GCC_PCIE3_PIPE_RESET 286
+#define GCC_PCIE3_CORE_STICKY_RESET 287
+#define GCC_PCIE3_AXI_S_STICKY_RESET 288
+#define GCC_PCIE3_AXI_S_RESET 289
+#define GCC_PCIE3_AXI_M_STICKY_RESET 290
+#define GCC_PCIE3_AXI_M_RESET 291
+#define GCC_PCIE3_AUX_RESET 292
+#define GCC_PCIE3_AHB_RESET 293
+#define GCC_NSS_PARTIAL_RESET 294
+#define GCC_UNIPHY0_XPCS_ARES 295
+#define GCC_UNIPHY1_XPCS_ARES 296
+#define GCC_UNIPHY2_XPCS_ARES 297
+#define GCC_USB1_BCR 298
+#define GCC_QUSB2_1_PHY_BCR 299
+
+#endif
diff --git a/include/dt-bindings/reset/qcom,ipq5424-nsscc.h b/include/dt-bindings/reset/qcom,ipq5424-nsscc.h
new file mode 100644
index 000000000000..9627e3b0ad30
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,ipq5424-nsscc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef _DT_BINDINGS_RESET_QCOM_IPQ5424_NSSCC_H
+#define _DT_BINDINGS_RESET_QCOM_IPQ5424_NSSCC_H
+
+#define NSS_CC_CE_APB_CLK_ARES 0
+#define NSS_CC_CE_AXI_CLK_ARES 1
+#define NSS_CC_DEBUG_CLK_ARES 2
+#define NSS_CC_EIP_CLK_ARES 3
+#define NSS_CC_NSS_CSR_CLK_ARES 4
+#define NSS_CC_NSSNOC_CE_APB_CLK_ARES 5
+#define NSS_CC_NSSNOC_CE_AXI_CLK_ARES 6
+#define NSS_CC_NSSNOC_EIP_CLK_ARES 7
+#define NSS_CC_NSSNOC_NSS_CSR_CLK_ARES 8
+#define NSS_CC_NSSNOC_PPE_CLK_ARES 9
+#define NSS_CC_NSSNOC_PPE_CFG_CLK_ARES 10
+#define NSS_CC_PORT1_MAC_CLK_ARES 11
+#define NSS_CC_PORT1_RX_CLK_ARES 12
+#define NSS_CC_PORT1_TX_CLK_ARES 13
+#define NSS_CC_PORT2_MAC_CLK_ARES 14
+#define NSS_CC_PORT2_RX_CLK_ARES 15
+#define NSS_CC_PORT2_TX_CLK_ARES 16
+#define NSS_CC_PORT3_MAC_CLK_ARES 17
+#define NSS_CC_PORT3_RX_CLK_ARES 18
+#define NSS_CC_PORT3_TX_CLK_ARES 19
+#define NSS_CC_PPE_BCR 20
+#define NSS_CC_PPE_EDMA_CLK_ARES 21
+#define NSS_CC_PPE_EDMA_CFG_CLK_ARES 22
+#define NSS_CC_PPE_SWITCH_BTQ_CLK_ARES 23
+#define NSS_CC_PPE_SWITCH_CLK_ARES 24
+#define NSS_CC_PPE_SWITCH_CFG_CLK_ARES 25
+#define NSS_CC_PPE_SWITCH_IPE_CLK_ARES 26
+#define NSS_CC_UNIPHY_PORT1_RX_CLK_ARES 27
+#define NSS_CC_UNIPHY_PORT1_TX_CLK_ARES 28
+#define NSS_CC_UNIPHY_PORT2_RX_CLK_ARES 29
+#define NSS_CC_UNIPHY_PORT2_TX_CLK_ARES 30
+#define NSS_CC_UNIPHY_PORT3_RX_CLK_ARES 31
+#define NSS_CC_UNIPHY_PORT3_TX_CLK_ARES 32
+#define NSS_CC_XGMAC0_PTP_REF_CLK_ARES 33
+#define NSS_CC_XGMAC1_PTP_REF_CLK_ARES 34
+#define NSS_CC_XGMAC2_PTP_REF_CLK_ARES 35
+
+#endif
diff --git a/include/dt-bindings/reset/qcom,ipq9574-gcc.h b/include/dt-bindings/reset/qcom,ipq9574-gcc.h
new file mode 100644
index 000000000000..c709d103673d
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,ipq9574-gcc.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2018-2023, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_RESET_IPQ_GCC_9574_H
+#define _DT_BINDINGS_RESET_IPQ_GCC_9574_H
+
+#define GCC_ADSS_BCR 0
+#define GCC_APC0_VOLTAGE_DROOP_DETECTOR_BCR 1
+#define GCC_BLSP1_BCR 2
+#define GCC_BLSP1_QUP1_BCR 3
+#define GCC_BLSP1_QUP2_BCR 4
+#define GCC_BLSP1_QUP3_BCR 5
+#define GCC_BLSP1_QUP4_BCR 6
+#define GCC_BLSP1_QUP5_BCR 7
+#define GCC_BLSP1_QUP6_BCR 8
+#define GCC_BLSP1_UART1_BCR 9
+#define GCC_BLSP1_UART2_BCR 10
+#define GCC_BLSP1_UART3_BCR 11
+#define GCC_BLSP1_UART4_BCR 12
+#define GCC_BLSP1_UART5_BCR 13
+#define GCC_BLSP1_UART6_BCR 14
+#define GCC_BOOT_ROM_BCR 15
+#define GCC_MDIO_BCR 16
+#define GCC_NSS_BCR 17
+#define GCC_NSS_TBU_BCR 18
+#define GCC_PCIE0_BCR 19
+#define GCC_PCIE0_LINK_DOWN_BCR 20
+#define GCC_PCIE0_PHY_BCR 21
+#define GCC_PCIE0PHY_PHY_BCR 22
+#define GCC_PCIE1_BCR 23
+#define GCC_PCIE1_LINK_DOWN_BCR 24
+#define GCC_PCIE1_PHY_BCR 25
+#define GCC_PCIE1PHY_PHY_BCR 26
+#define GCC_PCIE2_BCR 27
+#define GCC_PCIE2_LINK_DOWN_BCR 28
+#define GCC_PCIE2_PHY_BCR 29
+#define GCC_PCIE2PHY_PHY_BCR 30
+#define GCC_PCIE3_BCR 31
+#define GCC_PCIE3_LINK_DOWN_BCR 32
+#define GCC_PCIE3_PHY_BCR 33
+#define GCC_PCIE3PHY_PHY_BCR 34
+#define GCC_PRNG_BCR 35
+#define GCC_QUSB2_0_PHY_BCR 36
+#define GCC_SDCC_BCR 37
+#define GCC_TLMM_BCR 38
+#define GCC_UNIPHY0_BCR 39
+#define GCC_UNIPHY1_BCR 40
+#define GCC_UNIPHY2_BCR 41
+#define GCC_USB0_PHY_BCR 42
+#define GCC_USB3PHY_0_PHY_BCR 43
+#define GCC_USB_BCR 44
+#define GCC_ANOC0_TBU_BCR 45
+#define GCC_ANOC1_TBU_BCR 46
+#define GCC_ANOC_BCR 47
+#define GCC_APSS_TCU_BCR 48
+#define GCC_CMN_BLK_BCR 49
+#define GCC_CMN_BLK_AHB_ARES 50
+#define GCC_CMN_BLK_SYS_ARES 51
+#define GCC_CMN_BLK_APU_ARES 52
+#define GCC_DCC_BCR 53
+#define GCC_DDRSS_BCR 54
+#define GCC_IMEM_BCR 55
+#define GCC_LPASS_BCR 56
+#define GCC_MPM_BCR 57
+#define GCC_MSG_RAM_BCR 58
+#define GCC_NSSNOC_MEMNOC_1_ARES 59
+#define GCC_NSSNOC_PCNOC_1_ARES 60
+#define GCC_NSSNOC_SNOC_1_ARES 61
+#define GCC_NSSNOC_XO_DCD_ARES 62
+#define GCC_NSSNOC_TS_ARES 63
+#define GCC_NSSCC_ARES 64
+#define GCC_NSSNOC_NSSCC_ARES 65
+#define GCC_NSSNOC_ATB_ARES 66
+#define GCC_NSSNOC_MEMNOC_ARES 67
+#define GCC_NSSNOC_QOSGEN_REF_ARES 68
+#define GCC_NSSNOC_SNOC_ARES 69
+#define GCC_NSSNOC_TIMEOUT_REF_ARES 70
+#define GCC_NSS_CFG_ARES 71
+#define GCC_UBI0_DBG_ARES 72
+#define GCC_PCIE0_AHB_ARES 73
+#define GCC_PCIE0_AUX_ARES 74
+#define GCC_PCIE0_AXI_M_ARES 75
+#define GCC_PCIE0_AXI_M_STICKY_ARES 76
+#define GCC_PCIE0_AXI_S_ARES 77
+#define GCC_PCIE0_AXI_S_STICKY_ARES 78
+#define GCC_PCIE0_CORE_STICKY_ARES 79
+#define GCC_PCIE0_PIPE_ARES 80
+#define GCC_PCIE1_AHB_ARES 81
+#define GCC_PCIE1_AUX_ARES 82
+#define GCC_PCIE1_AXI_M_ARES 83
+#define GCC_PCIE1_AXI_M_STICKY_ARES 84
+#define GCC_PCIE1_AXI_S_ARES 85
+#define GCC_PCIE1_AXI_S_STICKY_ARES 86
+#define GCC_PCIE1_CORE_STICKY_ARES 87
+#define GCC_PCIE1_PIPE_ARES 88
+#define GCC_PCIE2_AHB_ARES 89
+#define GCC_PCIE2_AUX_ARES 90
+#define GCC_PCIE2_AXI_M_ARES 91
+#define GCC_PCIE2_AXI_M_STICKY_ARES 92
+#define GCC_PCIE2_AXI_S_ARES 93
+#define GCC_PCIE2_AXI_S_STICKY_ARES 94
+#define GCC_PCIE2_CORE_STICKY_ARES 95
+#define GCC_PCIE2_PIPE_ARES 96
+#define GCC_PCIE3_AHB_ARES 97
+#define GCC_PCIE3_AUX_ARES 98
+#define GCC_PCIE3_AXI_M_ARES 99
+#define GCC_PCIE3_AXI_M_STICKY_ARES 100
+#define GCC_PCIE3_AXI_S_ARES 101
+#define GCC_PCIE3_AXI_S_STICKY_ARES 102
+#define GCC_PCIE3_CORE_STICKY_ARES 103
+#define GCC_PCIE3_PIPE_ARES 104
+#define GCC_PCNOC_BCR 105
+#define GCC_PCNOC_BUS_TIMEOUT0_BCR 106
+#define GCC_PCNOC_BUS_TIMEOUT1_BCR 107
+#define GCC_PCNOC_BUS_TIMEOUT2_BCR 108
+#define GCC_PCNOC_BUS_TIMEOUT3_BCR 109
+#define GCC_PCNOC_BUS_TIMEOUT4_BCR 110
+#define GCC_PCNOC_BUS_TIMEOUT5_BCR 111
+#define GCC_PCNOC_BUS_TIMEOUT6_BCR 112
+#define GCC_PCNOC_BUS_TIMEOUT7_BCR 113
+#define GCC_PCNOC_BUS_TIMEOUT8_BCR 114
+#define GCC_PCNOC_BUS_TIMEOUT9_BCR 115
+#define GCC_PCNOC_TBU_BCR 116
+#define GCC_Q6SS_DBG_ARES 117
+#define GCC_Q6_AHB_ARES 118
+#define GCC_Q6_AHB_S_ARES 119
+#define GCC_Q6_AXIM2_ARES 120
+#define GCC_Q6_AXIM_ARES 121
+#define GCC_QDSS_BCR 122
+#define GCC_QPIC_BCR 123
+#define GCC_QPIC_AHB_ARES 124
+#define GCC_QPIC_ARES 125
+#define GCC_RBCPR_BCR 126
+#define GCC_RBCPR_MX_BCR 127
+#define GCC_SEC_CTRL_BCR 128
+#define GCC_SMMU_CFG_BCR 129
+#define GCC_SNOC_BCR 130
+#define GCC_SPDM_BCR 131
+#define GCC_TME_BCR 132
+#define GCC_UNIPHY0_SYS_RESET 133
+#define GCC_UNIPHY0_AHB_RESET 134
+#define GCC_UNIPHY0_XPCS_RESET 135
+#define GCC_UNIPHY1_SYS_RESET 136
+#define GCC_UNIPHY1_AHB_RESET 137
+#define GCC_UNIPHY1_XPCS_RESET 138
+#define GCC_UNIPHY2_SYS_RESET 139
+#define GCC_UNIPHY2_AHB_RESET 140
+#define GCC_UNIPHY2_XPCS_RESET 141
+#define GCC_USB_MISC_RESET 142
+#define GCC_WCSSAON_RESET 143
+#define GCC_WCSS_ACMT_ARES 144
+#define GCC_WCSS_AHB_S_ARES 145
+#define GCC_WCSS_AXI_M_ARES 146
+#define GCC_WCSS_BCR 147
+#define GCC_WCSS_DBG_ARES 148
+#define GCC_WCSS_DBG_BDG_ARES 149
+#define GCC_WCSS_ECAHB_ARES 150
+#define GCC_WCSS_Q6_BCR 151
+#define GCC_WCSS_Q6_TBU_BCR 152
+#define GCC_TCSR_BCR 153
+#define GCC_CRYPTO_BCR 154
+
+#endif
diff --git a/include/dt-bindings/reset/qcom,ipq9574-nsscc.h b/include/dt-bindings/reset/qcom,ipq9574-nsscc.h
new file mode 100644
index 000000000000..7f152e98b99c
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,ipq9574-nsscc.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, 2025 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_RESET_IPQ_NSSCC_9574_H
+#define _DT_BINDINGS_RESET_IPQ_NSSCC_9574_H
+
+#define EDMA_HW_RESET 0
+#define NSS_CC_CE_BCR 1
+#define NSS_CC_CLC_BCR 2
+#define NSS_CC_EIP197_BCR 3
+#define NSS_CC_HAQ_BCR 4
+#define NSS_CC_IMEM_BCR 5
+#define NSS_CC_MAC_BCR 6
+#define NSS_CC_PPE_BCR 7
+#define NSS_CC_UBI_BCR 8
+#define NSS_CC_UNIPHY_BCR 9
+#define UBI3_CLKRST_CLAMP_ENABLE 10
+#define UBI3_CORE_CLAMP_ENABLE 11
+#define UBI2_CLKRST_CLAMP_ENABLE 12
+#define UBI2_CORE_CLAMP_ENABLE 13
+#define UBI1_CLKRST_CLAMP_ENABLE 14
+#define UBI1_CORE_CLAMP_ENABLE 15
+#define UBI0_CLKRST_CLAMP_ENABLE 16
+#define UBI0_CORE_CLAMP_ENABLE 17
+#define NSSNOC_NSS_CSR_ARES 18
+#define NSS_CSR_ARES 19
+#define PPE_BTQ_ARES 20
+#define PPE_IPE_ARES 21
+#define PPE_ARES 22
+#define PPE_CFG_ARES 23
+#define PPE_EDMA_ARES 24
+#define PPE_EDMA_CFG_ARES 25
+#define CRY_PPE_ARES 26
+#define NSSNOC_PPE_ARES 27
+#define NSSNOC_PPE_CFG_ARES 28
+#define PORT1_MAC_ARES 29
+#define PORT2_MAC_ARES 30
+#define PORT3_MAC_ARES 31
+#define PORT4_MAC_ARES 32
+#define PORT5_MAC_ARES 33
+#define PORT6_MAC_ARES 34
+#define XGMAC0_PTP_REF_ARES 35
+#define XGMAC1_PTP_REF_ARES 36
+#define XGMAC2_PTP_REF_ARES 37
+#define XGMAC3_PTP_REF_ARES 38
+#define XGMAC4_PTP_REF_ARES 39
+#define XGMAC5_PTP_REF_ARES 40
+#define HAQ_AHB_ARES 41
+#define HAQ_AXI_ARES 42
+#define NSSNOC_HAQ_AHB_ARES 43
+#define NSSNOC_HAQ_AXI_ARES 44
+#define CE_APB_ARES 45
+#define CE_AXI_ARES 46
+#define NSSNOC_CE_APB_ARES 47
+#define NSSNOC_CE_AXI_ARES 48
+#define CRYPTO_ARES 49
+#define NSSNOC_CRYPTO_ARES 50
+#define NSSNOC_NC_AXI0_1_ARES 51
+#define UBI0_CORE_ARES 52
+#define UBI1_CORE_ARES 53
+#define UBI2_CORE_ARES 54
+#define UBI3_CORE_ARES 55
+#define NC_AXI0_ARES 56
+#define UTCM0_ARES 57
+#define NC_AXI1_ARES 58
+#define UTCM1_ARES 59
+#define NC_AXI2_ARES 60
+#define UTCM2_ARES 61
+#define NC_AXI3_ARES 62
+#define UTCM3_ARES 63
+#define NSSNOC_NC_AXI0_ARES 64
+#define AHB0_ARES 65
+#define INTR0_AHB_ARES 66
+#define AHB1_ARES 67
+#define INTR1_AHB_ARES 68
+#define AHB2_ARES 69
+#define INTR2_AHB_ARES 70
+#define AHB3_ARES 71
+#define INTR3_AHB_ARES 72
+#define NSSNOC_AHB0_ARES 73
+#define NSSNOC_INT0_AHB_ARES 74
+#define AXI0_ARES 75
+#define AXI1_ARES 76
+#define AXI2_ARES 77
+#define AXI3_ARES 78
+#define NSSNOC_AXI0_ARES 79
+#define IMEM_QSB_ARES 80
+#define NSSNOC_IMEM_QSB_ARES 81
+#define IMEM_AHB_ARES 82
+#define NSSNOC_IMEM_AHB_ARES 83
+#define UNIPHY_PORT1_RX_ARES 84
+#define UNIPHY_PORT1_TX_ARES 85
+#define UNIPHY_PORT2_RX_ARES 86
+#define UNIPHY_PORT2_TX_ARES 87
+#define UNIPHY_PORT3_RX_ARES 88
+#define UNIPHY_PORT3_TX_ARES 89
+#define UNIPHY_PORT4_RX_ARES 90
+#define UNIPHY_PORT4_TX_ARES 91
+#define UNIPHY_PORT5_RX_ARES 92
+#define UNIPHY_PORT5_TX_ARES 93
+#define UNIPHY_PORT6_RX_ARES 94
+#define UNIPHY_PORT6_TX_ARES 95
+#define PORT1_RX_ARES 96
+#define PORT1_TX_ARES 97
+#define PORT2_RX_ARES 98
+#define PORT2_TX_ARES 99
+#define PORT3_RX_ARES 100
+#define PORT3_TX_ARES 101
+#define PORT4_RX_ARES 102
+#define PORT4_TX_ARES 103
+#define PORT5_RX_ARES 104
+#define PORT5_TX_ARES 105
+#define PORT6_RX_ARES 106
+#define PORT6_TX_ARES 107
+#define PPE_FULL_RESET 108
+#define UNIPHY0_SOFT_RESET 109
+#define UNIPHY1_SOFT_RESET 110
+#define UNIPHY2_SOFT_RESET 111
+#define UNIPHY_PORT1_ARES 112
+#define UNIPHY_PORT2_ARES 113
+#define UNIPHY_PORT3_ARES 114
+#define UNIPHY_PORT4_ARES 115
+#define UNIPHY_PORT5_ARES 116
+#define UNIPHY_PORT6_ARES 117
+#define NSSPORT1_RESET 118
+#define NSSPORT2_RESET 119
+#define NSSPORT3_RESET 120
+#define NSSPORT4_RESET 121
+#define NSSPORT5_RESET 122
+#define NSSPORT6_RESET 123
+
+#endif
diff --git a/include/dt-bindings/reset/qcom,qca8k-nsscc.h b/include/dt-bindings/reset/qcom,qca8k-nsscc.h
new file mode 100644
index 000000000000..c71167a3bd41
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,qca8k-nsscc.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_RESET_QCOM_QCA8K_NSS_CC_H
+#define _DT_BINDINGS_RESET_QCOM_QCA8K_NSS_CC_H
+
+#define NSS_CC_SWITCH_CORE_ARES 1
+#define NSS_CC_APB_BRIDGE_ARES 2
+#define NSS_CC_MAC0_TX_ARES 3
+#define NSS_CC_MAC0_TX_SRDS1_ARES 4
+#define NSS_CC_MAC0_RX_ARES 5
+#define NSS_CC_MAC0_RX_SRDS1_ARES 6
+#define NSS_CC_MAC1_SRDS1_CH0_RX_ARES 7
+#define NSS_CC_MAC1_TX_ARES 8
+#define NSS_CC_MAC1_GEPHY0_TX_ARES 9
+#define NSS_CC_MAC1_SRDS1_CH0_XGMII_RX_ARES 10
+#define NSS_CC_MAC1_SRDS1_CH0_TX_ARES 11
+#define NSS_CC_MAC1_RX_ARES 12
+#define NSS_CC_MAC1_GEPHY0_RX_ARES 13
+#define NSS_CC_MAC1_SRDS1_CH0_XGMII_TX_ARES 14
+#define NSS_CC_MAC2_SRDS1_CH1_RX_ARES 15
+#define NSS_CC_MAC2_TX_ARES 16
+#define NSS_CC_MAC2_GEPHY1_TX_ARES 17
+#define NSS_CC_MAC2_SRDS1_CH1_XGMII_RX_ARES 18
+#define NSS_CC_MAC2_SRDS1_CH1_TX_ARES 19
+#define NSS_CC_MAC2_RX_ARES 20
+#define NSS_CC_MAC2_GEPHY1_RX_ARES 21
+#define NSS_CC_MAC2_SRDS1_CH1_XGMII_TX_ARES 22
+#define NSS_CC_MAC3_SRDS1_CH2_RX_ARES 23
+#define NSS_CC_MAC3_TX_ARES 24
+#define NSS_CC_MAC3_GEPHY2_TX_ARES 25
+#define NSS_CC_MAC3_SRDS1_CH2_XGMII_RX_ARES 26
+#define NSS_CC_MAC3_SRDS1_CH2_TX_ARES 27
+#define NSS_CC_MAC3_RX_ARES 28
+#define NSS_CC_MAC3_GEPHY2_RX_ARES 29
+#define NSS_CC_MAC3_SRDS1_CH2_XGMII_TX_ARES 30
+#define NSS_CC_MAC4_SRDS1_CH3_RX_ARES 31
+#define NSS_CC_MAC4_TX_ARES 32
+#define NSS_CC_MAC4_GEPHY3_TX_ARES 33
+#define NSS_CC_MAC4_SRDS1_CH3_XGMII_RX_ARES 34
+#define NSS_CC_MAC4_SRDS1_CH3_TX_ARES 35
+#define NSS_CC_MAC4_RX_ARES 36
+#define NSS_CC_MAC4_GEPHY3_RX_ARES 37
+#define NSS_CC_MAC4_SRDS1_CH3_XGMII_TX_ARES 38
+#define NSS_CC_MAC5_TX_ARES 39
+#define NSS_CC_MAC5_TX_SRDS0_ARES 40
+#define NSS_CC_MAC5_RX_ARES 41
+#define NSS_CC_MAC5_RX_SRDS0_ARES 42
+#define NSS_CC_AHB_ARES 43
+#define NSS_CC_SEC_CTRL_AHB_ARES 44
+#define NSS_CC_TLMM_ARES 45
+#define NSS_CC_TLMM_AHB_ARES 46
+#define NSS_CC_CNOC_AHB_ARES 47
+#define NSS_CC_MDIO_AHB_ARES 48
+#define NSS_CC_MDIO_MASTER_AHB_ARES 49
+#define NSS_CC_SRDS0_SYS_ARES 50
+#define NSS_CC_SRDS1_SYS_ARES 51
+#define NSS_CC_GEPHY0_SYS_ARES 52
+#define NSS_CC_GEPHY1_SYS_ARES 53
+#define NSS_CC_GEPHY2_SYS_ARES 54
+#define NSS_CC_GEPHY3_SYS_ARES 55
+#define NSS_CC_SEC_CTRL_ARES 56
+#define NSS_CC_SEC_CTRL_SENSE_ARES 57
+#define NSS_CC_SLEEP_ARES 58
+#define NSS_CC_DEBUG_ARES 59
+#define NSS_CC_GEPHY0_ARES 60
+#define NSS_CC_GEPHY1_ARES 61
+#define NSS_CC_GEPHY2_ARES 62
+#define NSS_CC_GEPHY3_ARES 63
+#define NSS_CC_DSP_ARES 64
+#define NSS_CC_GEPHY_FULL_ARES 65
+#define NSS_CC_GLOBAL_ARES 66
+#define NSS_CC_XPCS_ARES 67
+#endif
diff --git a/include/dt-bindings/reset/qcom,sar2130p-gpucc.h b/include/dt-bindings/reset/qcom,sar2130p-gpucc.h
new file mode 100644
index 000000000000..99ba5f092e2a
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,sar2130p-gpucc.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_RESET_QCOM_GPU_CC_SAR2130P_H
+#define _DT_BINDINGS_RESET_QCOM_GPU_CC_SAR2130P_H
+
+#define GPUCC_GPU_CC_GX_BCR 0
+#define GPUCC_GPU_CC_ACD_BCR 1
+#define GPUCC_GPU_CC_GX_ACD_IROOT_BCR 2
+
+#endif
diff --git a/include/dt-bindings/reset/qcom,sdm845-pdc.h b/include/dt-bindings/reset/qcom,sdm845-pdc.h
index 53c37f9c319a..03a0c0eb8147 100644
--- a/include/dt-bindings/reset/qcom,sdm845-pdc.h
+++ b/include/dt-bindings/reset/qcom,sdm845-pdc.h
@@ -16,5 +16,7 @@
#define PDC_DISPLAY_SYNC_RESET 7
#define PDC_COMPUTE_SYNC_RESET 8
#define PDC_MODEM_SYNC_RESET 9
+#define PDC_WLAN_RF_SYNC_RESET 10
+#define PDC_WPSS_SYNC_RESET 11
#endif
diff --git a/include/dt-bindings/reset/qcom,sm8350-videocc.h b/include/dt-bindings/reset/qcom,sm8350-videocc.h
new file mode 100644
index 000000000000..cd356b207a4a
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,sm8350-videocc.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_RESET_QCOM_VIDEO_CC_SM8350_H
+#define _DT_BINDINGS_RESET_QCOM_VIDEO_CC_SM8350_H
+
+#define VIDEO_CC_CVP_INTERFACE_BCR 0
+#define VIDEO_CC_CVP_MVS0_BCR 1
+#define VIDEO_CC_MVS0C_CLK_ARES 2
+#define VIDEO_CC_CVP_MVS0C_BCR 3
+#define VIDEO_CC_CVP_MVS1_BCR 4
+#define VIDEO_CC_MVS1C_CLK_ARES 5
+#define VIDEO_CC_CVP_MVS1C_BCR 6
+
+#endif
diff --git a/include/dt-bindings/reset/qcom,sm8450-gpucc.h b/include/dt-bindings/reset/qcom,sm8450-gpucc.h
new file mode 100644
index 000000000000..58ba8f987107
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,sm8450-gpucc.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_RESET_QCOM_GPU_CC_SM8450_H
+#define _DT_BINDINGS_RESET_QCOM_GPU_CC_SM8450_H
+
+#define GPUCC_GPU_CC_ACD_BCR 0
+#define GPUCC_GPU_CC_CX_BCR 1
+#define GPUCC_GPU_CC_FAST_HUB_BCR 2
+#define GPUCC_GPU_CC_FF_BCR 3
+#define GPUCC_GPU_CC_GFX3D_AON_BCR 4
+#define GPUCC_GPU_CC_GMU_BCR 5
+#define GPUCC_GPU_CC_GX_BCR 6
+#define GPUCC_GPU_CC_XO_BCR 7
+#define GPUCC_GPU_CC_GX_ACD_IROOT_BCR 8
+
+#endif
diff --git a/include/dt-bindings/reset/qcom,sm8650-gpucc.h b/include/dt-bindings/reset/qcom,sm8650-gpucc.h
new file mode 100644
index 000000000000..f021a6cccc66
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,sm8650-gpucc.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef _DT_BINDINGS_RESET_QCOM_GPU_CC_SM8650_H
+#define _DT_BINDINGS_RESET_QCOM_GPU_CC_SM8650_H
+
+#define GPUCC_GPU_CC_ACD_BCR 0
+#define GPUCC_GPU_CC_CX_BCR 1
+#define GPUCC_GPU_CC_FAST_HUB_BCR 2
+#define GPUCC_GPU_CC_FF_BCR 3
+#define GPUCC_GPU_CC_GFX3D_AON_BCR 4
+#define GPUCC_GPU_CC_GMU_BCR 5
+#define GPUCC_GPU_CC_GX_BCR 6
+#define GPUCC_GPU_CC_XO_BCR 7
+#define GPUCC_GPU_CC_GX_ACD_IROOT_BCR 8
+
+#endif
diff --git a/include/dt-bindings/reset/qcom,x1e80100-gpucc.h b/include/dt-bindings/reset/qcom,x1e80100-gpucc.h
new file mode 100644
index 000000000000..32b43e71a16f
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,x1e80100-gpucc.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_RESET_QCOM_X1E80100_GPU_CC_H
+#define _DT_BINDINGS_RESET_QCOM_X1E80100_GPU_CC_H
+
+#define GPUCC_GPU_CC_ACD_BCR 0
+#define GPUCC_GPU_CC_CB_BCR 1
+#define GPUCC_GPU_CC_CX_BCR 2
+#define GPUCC_GPU_CC_FAST_HUB_BCR 3
+#define GPUCC_GPU_CC_FF_BCR 4
+#define GPUCC_GPU_CC_GFX3D_AON_BCR 5
+#define GPUCC_GPU_CC_GMU_BCR 6
+#define GPUCC_GPU_CC_GX_BCR 7
+#define GPUCC_GPU_CC_XO_BCR 8
+
+#endif
diff --git a/include/dt-bindings/reset/rockchip,rk3506-cru.h b/include/dt-bindings/reset/rockchip,rk3506-cru.h
new file mode 100644
index 000000000000..31c0d4aa410f
--- /dev/null
+++ b/include/dt-bindings/reset/rockchip,rk3506-cru.h
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023-2025 Rockchip Electronics Co., Ltd.
+ * Author: Finley Xiao <finley.xiao@rock-chips.com>
+ */
+
+#ifndef _DT_BINDINGS_REST_ROCKCHIP_RK3506_H
+#define _DT_BINDINGS_REST_ROCKCHIP_RK3506_H
+
+/* CRU-->SOFTRST_CON00 */
+#define SRST_NCOREPORESET0_AC 0
+#define SRST_NCOREPORESET1_AC 1
+#define SRST_NCOREPORESET2_AC 2
+#define SRST_NCORESET0_AC 3
+#define SRST_NCORESET1_AC 4
+#define SRST_NCORESET2_AC 5
+#define SRST_NL2RESET_AC 6
+#define SRST_A_CORE_BIU_AC 7
+#define SRST_H_M0_AC 8
+
+/* CRU-->SOFTRST_CON02 */
+#define SRST_NDBGRESET 9
+#define SRST_P_CORE_BIU 10
+#define SRST_PMU 11
+
+/* CRU-->SOFTRST_CON03 */
+#define SRST_P_DBG 12
+#define SRST_POT_DBG 13
+#define SRST_P_CORE_GRF 14
+#define SRST_CORE_EMA_DETECT 15
+#define SRST_REF_PVTPLL_CORE 16
+#define SRST_P_GPIO1 17
+#define SRST_DB_GPIO1 18
+
+/* CRU-->SOFTRST_CON04 */
+#define SRST_A_CORE_PERI_BIU 19
+#define SRST_A_DSMC 20
+#define SRST_P_DSMC 21
+#define SRST_FLEXBUS 22
+#define SRST_A_FLEXBUS 23
+#define SRST_H_FLEXBUS 24
+#define SRST_A_DSMC_SLV 25
+#define SRST_H_DSMC_SLV 26
+#define SRST_DSMC_SLV 27
+
+/* CRU-->SOFTRST_CON05 */
+#define SRST_A_BUS_BIU 28
+#define SRST_H_BUS_BIU 29
+#define SRST_P_BUS_BIU 30
+#define SRST_A_SYSRAM 31
+#define SRST_H_SYSRAM 32
+#define SRST_A_DMAC0 33
+#define SRST_A_DMAC1 34
+#define SRST_H_M0 35
+#define SRST_M0_JTAG 36
+#define SRST_H_CRYPTO 37
+
+/* CRU-->SOFTRST_CON06 */
+#define SRST_H_RNG 38
+#define SRST_P_BUS_GRF 39
+#define SRST_P_TIMER0 40
+#define SRST_TIMER0_CH0 41
+#define SRST_TIMER0_CH1 42
+#define SRST_TIMER0_CH2 43
+#define SRST_TIMER0_CH3 44
+#define SRST_TIMER0_CH4 45
+#define SRST_TIMER0_CH5 46
+#define SRST_P_WDT0 47
+#define SRST_T_WDT0 48
+#define SRST_P_WDT1 49
+#define SRST_T_WDT1 50
+#define SRST_P_MAILBOX 51
+#define SRST_P_INTMUX 52
+#define SRST_P_SPINLOCK 53
+
+/* CRU-->SOFTRST_CON07 */
+#define SRST_P_DDRC 54
+#define SRST_H_DDRPHY 55
+#define SRST_P_DDRMON 56
+#define SRST_DDRMON_OSC 57
+#define SRST_P_DDR_LPC 58
+#define SRST_H_USBOTG0 59
+#define SRST_USBOTG0_ADP 60
+#define SRST_H_USBOTG1 61
+#define SRST_USBOTG1_ADP 62
+#define SRST_P_USBPHY 63
+#define SRST_USBPHY_POR 64
+#define SRST_USBPHY_OTG0 65
+#define SRST_USBPHY_OTG1 66
+
+/* CRU-->SOFTRST_CON08 */
+#define SRST_A_DMA2DDR 67
+#define SRST_P_DMA2DDR 68
+
+/* CRU-->SOFTRST_CON09 */
+#define SRST_USBOTG0_UTMI 69
+#define SRST_USBOTG1_UTMI 70
+
+/* CRU-->SOFTRST_CON10 */
+#define SRST_A_DDRC_0 71
+#define SRST_A_DDRC_1 72
+#define SRST_A_DDR_BIU 73
+#define SRST_DDRC 74
+#define SRST_DDRMON 75
+
+/* CRU-->SOFTRST_CON11 */
+#define SRST_H_LSPERI_BIU 76
+#define SRST_P_UART0 77
+#define SRST_P_UART1 78
+#define SRST_P_UART2 79
+#define SRST_P_UART3 80
+#define SRST_P_UART4 81
+#define SRST_UART0 82
+#define SRST_UART1 83
+#define SRST_UART2 84
+#define SRST_UART3 85
+#define SRST_UART4 86
+#define SRST_P_I2C0 87
+#define SRST_I2C0 88
+
+/* CRU-->SOFTRST_CON12 */
+#define SRST_P_I2C1 89
+#define SRST_I2C1 90
+#define SRST_P_I2C2 91
+#define SRST_I2C2 92
+#define SRST_P_PWM1 93
+#define SRST_PWM1 94
+#define SRST_P_SPI0 95
+#define SRST_SPI0 96
+#define SRST_P_SPI1 97
+#define SRST_SPI1 98
+#define SRST_P_GPIO2 99
+#define SRST_DB_GPIO2 100
+
+/* CRU-->SOFTRST_CON13 */
+#define SRST_P_GPIO3 101
+#define SRST_DB_GPIO3 102
+#define SRST_P_GPIO4 103
+#define SRST_DB_GPIO4 104
+#define SRST_H_CAN0 105
+#define SRST_CAN0 106
+#define SRST_H_CAN1 107
+#define SRST_CAN1 108
+#define SRST_H_PDM 109
+#define SRST_M_PDM 110
+#define SRST_PDM 111
+#define SRST_SPDIFTX 112
+#define SRST_H_SPDIFTX 113
+#define SRST_H_SPDIFRX 114
+#define SRST_SPDIFRX 115
+#define SRST_M_SAI0 116
+
+/* CRU-->SOFTRST_CON14 */
+#define SRST_H_SAI0 117
+#define SRST_M_SAI1 118
+#define SRST_H_SAI1 119
+#define SRST_H_ASRC0 120
+#define SRST_ASRC0 121
+#define SRST_H_ASRC1 122
+#define SRST_ASRC1 123
+
+/* CRU-->SOFTRST_CON17 */
+#define SRST_H_HSPERI_BIU 124
+#define SRST_H_SDMMC 125
+#define SRST_H_FSPI 126
+#define SRST_S_FSPI 127
+#define SRST_P_SPI2 128
+#define SRST_A_MAC0 129
+#define SRST_A_MAC1 130
+
+/* CRU-->SOFTRST_CON18 */
+#define SRST_M_SAI2 131
+#define SRST_H_SAI2 132
+#define SRST_H_SAI3 133
+#define SRST_M_SAI3 134
+#define SRST_H_SAI4 135
+#define SRST_M_SAI4 136
+#define SRST_H_DSM 137
+#define SRST_M_DSM 138
+#define SRST_P_AUDIO_ADC 139
+#define SRST_M_AUDIO_ADC 140
+
+/* CRU-->SOFTRST_CON19 */
+#define SRST_P_SARADC 141
+#define SRST_SARADC 142
+#define SRST_SARADC_PHY 143
+#define SRST_P_OTPC_NS 144
+#define SRST_SBPI_OTPC_NS 145
+#define SRST_USER_OTPC_NS 146
+#define SRST_P_UART5 147
+#define SRST_UART5 148
+#define SRST_P_GPIO234_IOC 149
+
+/* CRU-->SOFTRST_CON21 */
+#define SRST_A_VIO_BIU 150
+#define SRST_H_VIO_BIU 151
+#define SRST_H_RGA 152
+#define SRST_A_RGA 153
+#define SRST_CORE_RGA 154
+#define SRST_A_VOP 155
+#define SRST_H_VOP 156
+#define SRST_VOP 157
+#define SRST_P_DPHY 158
+#define SRST_P_DSI_HOST 159
+#define SRST_P_TSADC 160
+#define SRST_TSADC 161
+
+/* CRU-->SOFTRST_CON22 */
+#define SRST_P_GPIO1_IOC 162
+
+#endif
diff --git a/include/dt-bindings/reset/rockchip,rk3528-cru.h b/include/dt-bindings/reset/rockchip,rk3528-cru.h
new file mode 100644
index 000000000000..6b024c5f2e1c
--- /dev/null
+++ b/include/dt-bindings/reset/rockchip,rk3528-cru.h
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2022 Rockchip Electronics Co. Ltd.
+ * Copyright (c) 2024 Yao Zi <ziyao@disroot.org>
+ * Author: Joseph Chen <chenjh@rock-chips.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_ROCKCHIP_RK3528_H
+#define _DT_BINDINGS_RESET_ROCKCHIP_RK3528_H
+
+#define SRST_CORE0_PO 0
+#define SRST_CORE1_PO 1
+#define SRST_CORE2_PO 2
+#define SRST_CORE3_PO 3
+#define SRST_CORE0 4
+#define SRST_CORE1 5
+#define SRST_CORE2 6
+#define SRST_CORE3 7
+#define SRST_NL2 8
+#define SRST_CORE_BIU 9
+#define SRST_CORE_CRYPTO 10
+#define SRST_P_DBG 11
+#define SRST_POT_DBG 12
+#define SRST_NT_DBG 13
+#define SRST_P_CORE_GRF 14
+#define SRST_P_DAPLITE_BIU 15
+#define SRST_P_CPU_BIU 16
+#define SRST_REF_PVTPLL_CORE 17
+#define SRST_A_BUS_VOPGL_BIU 18
+#define SRST_A_BUS_H_BIU 19
+#define SRST_A_SYSMEM_BIU 20
+#define SRST_A_BUS_BIU 21
+#define SRST_H_BUS_BIU 22
+#define SRST_P_BUS_BIU 23
+#define SRST_P_DFT2APB 24
+#define SRST_P_BUS_GRF 25
+#define SRST_A_BUS_M_BIU 26
+#define SRST_A_GIC 27
+#define SRST_A_SPINLOCK 28
+#define SRST_A_DMAC 29
+#define SRST_P_TIMER 30
+#define SRST_TIMER0 31
+#define SRST_TIMER1 32
+#define SRST_TIMER2 33
+#define SRST_TIMER3 34
+#define SRST_TIMER4 35
+#define SRST_TIMER5 36
+#define SRST_P_JDBCK_DAP 37
+#define SRST_JDBCK_DAP 38
+#define SRST_P_WDT_NS 39
+#define SRST_T_WDT_NS 40
+#define SRST_H_TRNG_NS 41
+#define SRST_P_UART0 42
+#define SRST_S_UART0 43
+#define SRST_PKA_CRYPTO 44
+#define SRST_A_CRYPTO 45
+#define SRST_H_CRYPTO 46
+#define SRST_P_DMA2DDR 47
+#define SRST_A_DMA2DDR 48
+#define SRST_P_PWM0 49
+#define SRST_PWM0 50
+#define SRST_P_PWM1 51
+#define SRST_PWM1 52
+#define SRST_P_SCR 53
+#define SRST_A_DCF 54
+#define SRST_P_INTMUX 55
+#define SRST_A_VPU_BIU 56
+#define SRST_H_VPU_BIU 57
+#define SRST_P_VPU_BIU 58
+#define SRST_A_VPU 59
+#define SRST_H_VPU 60
+#define SRST_P_CRU_PCIE 61
+#define SRST_P_VPU_GRF 62
+#define SRST_H_SFC 63
+#define SRST_S_SFC 64
+#define SRST_C_EMMC 65
+#define SRST_H_EMMC 66
+#define SRST_A_EMMC 67
+#define SRST_B_EMMC 68
+#define SRST_T_EMMC 69
+#define SRST_P_GPIO1 70
+#define SRST_DB_GPIO1 71
+#define SRST_A_VPU_L_BIU 72
+#define SRST_P_VPU_IOC 73
+#define SRST_H_SAI_I2S0 74
+#define SRST_M_SAI_I2S0 75
+#define SRST_H_SAI_I2S2 76
+#define SRST_M_SAI_I2S2 77
+#define SRST_P_ACODEC 78
+#define SRST_P_GPIO3 79
+#define SRST_DB_GPIO3 80
+#define SRST_P_SPI1 81
+#define SRST_SPI1 82
+#define SRST_P_UART2 83
+#define SRST_S_UART2 84
+#define SRST_P_UART5 85
+#define SRST_S_UART5 86
+#define SRST_P_UART6 87
+#define SRST_S_UART6 88
+#define SRST_P_UART7 89
+#define SRST_S_UART7 90
+#define SRST_P_I2C3 91
+#define SRST_I2C3 92
+#define SRST_P_I2C5 93
+#define SRST_I2C5 94
+#define SRST_P_I2C6 95
+#define SRST_I2C6 96
+#define SRST_A_MAC 97
+#define SRST_P_PCIE 98
+#define SRST_PCIE_PIPE_PHY 99
+#define SRST_PCIE_POWER_UP 100
+#define SRST_P_PCIE_PHY 101
+#define SRST_P_PIPE_GRF 102
+#define SRST_H_SDIO0 103
+#define SRST_H_SDIO1 104
+#define SRST_TS_0 105
+#define SRST_TS_1 106
+#define SRST_P_CAN2 107
+#define SRST_CAN2 108
+#define SRST_P_CAN3 109
+#define SRST_CAN3 110
+#define SRST_P_SARADC 111
+#define SRST_SARADC 112
+#define SRST_SARADC_PHY 113
+#define SRST_P_TSADC 114
+#define SRST_TSADC 115
+#define SRST_A_USB3OTG 116
+#define SRST_A_GPU_BIU 117
+#define SRST_P_GPU_BIU 118
+#define SRST_A_GPU 119
+#define SRST_REF_PVTPLL_GPU 120
+#define SRST_H_RKVENC_BIU 121
+#define SRST_A_RKVENC_BIU 122
+#define SRST_P_RKVENC_BIU 123
+#define SRST_H_RKVENC 124
+#define SRST_A_RKVENC 125
+#define SRST_CORE_RKVENC 126
+#define SRST_H_SAI_I2S1 127
+#define SRST_M_SAI_I2S1 128
+#define SRST_P_I2C1 129
+#define SRST_I2C1 130
+#define SRST_P_I2C0 131
+#define SRST_I2C0 132
+#define SRST_P_SPI0 133
+#define SRST_SPI0 134
+#define SRST_P_GPIO4 135
+#define SRST_DB_GPIO4 136
+#define SRST_P_RKVENC_IOC 137
+#define SRST_H_SPDIF 138
+#define SRST_M_SPDIF 139
+#define SRST_H_PDM 140
+#define SRST_M_PDM 141
+#define SRST_P_UART1 142
+#define SRST_S_UART1 143
+#define SRST_P_UART3 144
+#define SRST_S_UART3 145
+#define SRST_P_RKVENC_GRF 146
+#define SRST_P_CAN0 147
+#define SRST_CAN0 148
+#define SRST_P_CAN1 149
+#define SRST_CAN1 150
+#define SRST_A_VO_BIU 151
+#define SRST_H_VO_BIU 152
+#define SRST_P_VO_BIU 153
+#define SRST_H_RGA2E 154
+#define SRST_A_RGA2E 155
+#define SRST_CORE_RGA2E 156
+#define SRST_H_VDPP 157
+#define SRST_A_VDPP 158
+#define SRST_CORE_VDPP 159
+#define SRST_P_VO_GRF 160
+#define SRST_P_CRU 161
+#define SRST_A_VOP_BIU 162
+#define SRST_H_VOP 163
+#define SRST_D_VOP0 164
+#define SRST_D_VOP1 165
+#define SRST_A_VOP 166
+#define SRST_P_HDMI 167
+#define SRST_HDMI 168
+#define SRST_P_HDMIPHY 169
+#define SRST_H_HDCP_KEY 170
+#define SRST_A_HDCP 171
+#define SRST_H_HDCP 172
+#define SRST_P_HDCP 173
+#define SRST_H_CVBS 174
+#define SRST_D_CVBS_VOP 175
+#define SRST_D_4X_CVBS_VOP 176
+#define SRST_A_JPEG_DECODER 177
+#define SRST_H_JPEG_DECODER 178
+#define SRST_A_VO_L_BIU 179
+#define SRST_A_MAC_VO 180
+#define SRST_A_JPEG_BIU 181
+#define SRST_H_SAI_I2S3 182
+#define SRST_M_SAI_I2S3 183
+#define SRST_MACPHY 184
+#define SRST_P_VCDCPHY 185
+#define SRST_P_GPIO2 186
+#define SRST_DB_GPIO2 187
+#define SRST_P_VO_IOC 188
+#define SRST_H_SDMMC0 189
+#define SRST_P_OTPC_NS 190
+#define SRST_SBPI_OTPC_NS 191
+#define SRST_USER_OTPC_NS 192
+#define SRST_HDMIHDP0 193
+#define SRST_H_USBHOST 194
+#define SRST_H_USBHOST_ARB 195
+#define SRST_HOST_UTMI 196
+#define SRST_P_UART4 197
+#define SRST_S_UART4 198
+#define SRST_P_I2C4 199
+#define SRST_I2C4 200
+#define SRST_P_I2C7 201
+#define SRST_I2C7 202
+#define SRST_P_USBPHY 203
+#define SRST_USBPHY_POR 204
+#define SRST_USBPHY_OTG 205
+#define SRST_USBPHY_HOST 206
+#define SRST_P_DDRPHY_CRU 207
+#define SRST_H_RKVDEC_BIU 208
+#define SRST_A_RKVDEC_BIU 209
+#define SRST_A_RKVDEC 210
+#define SRST_H_RKVDEC 211
+#define SRST_HEVC_CA_RKVDEC 212
+#define SRST_REF_PVTPLL_RKVDEC 213
+#define SRST_P_DDR_BIU 214
+#define SRST_P_DDRC 215
+#define SRST_P_DDRMON 216
+#define SRST_TIMER_DDRMON 217
+#define SRST_P_MSCH_BIU 218
+#define SRST_P_DDR_GRF 219
+#define SRST_P_DDR_HWLP 220
+#define SRST_P_DDRPHY 221
+#define SRST_MSCH_BIU 222
+#define SRST_A_DDR_UPCTL 223
+#define SRST_DDR_UPCTL 224
+#define SRST_DDRMON 225
+#define SRST_A_DDR_SCRAMBLE 226
+#define SRST_A_SPLIT 227
+#define SRST_DDR_PHY 228
+
+#endif // _DT_BINDINGS_RESET_ROCKCHIP_RK3528_H
diff --git a/include/dt-bindings/reset/rockchip,rk3562-cru.h b/include/dt-bindings/reset/rockchip,rk3562-cru.h
new file mode 100644
index 000000000000..8df95113056e
--- /dev/null
+++ b/include/dt-bindings/reset/rockchip,rk3562-cru.h
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024-2025 Rockchip Electronics Co. Ltd.
+ *
+ * Author: Elaine Zhang <zhangqing@rock-chips.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_ROCKCHIP_RK3562_H
+#define _DT_BINDINGS_RESET_ROCKCHIP_RK3562_H
+
+/********Name=SOFTRST_CON01,Offset=0x404********/
+#define SRST_A_TOP_BIU 0
+#define SRST_A_TOP_VIO_BIU 1
+#define SRST_REF_PVTPLL_LOGIC 2
+/********Name=SOFTRST_CON03,Offset=0x40C********/
+#define SRST_NCOREPORESET0 3
+#define SRST_NCOREPORESET1 4
+#define SRST_NCOREPORESET2 5
+#define SRST_NCOREPORESET3 6
+#define SRST_NCORESET0 7
+#define SRST_NCORESET1 8
+#define SRST_NCORESET2 9
+#define SRST_NCORESET3 10
+#define SRST_NL2RESET 11
+/********Name=SOFTRST_CON04,Offset=0x410********/
+#define SRST_DAP 12
+#define SRST_P_DBG_DAPLITE 13
+#define SRST_REF_PVTPLL_CORE 14
+/********Name=SOFTRST_CON05,Offset=0x414********/
+#define SRST_A_CORE_BIU 15
+#define SRST_P_CORE_BIU 16
+#define SRST_H_CORE_BIU 17
+/********Name=SOFTRST_CON06,Offset=0x418********/
+#define SRST_A_NPU_BIU 18
+#define SRST_H_NPU_BIU 19
+#define SRST_A_RKNN 20
+#define SRST_H_RKNN 21
+#define SRST_REF_PVTPLL_NPU 22
+/********Name=SOFTRST_CON08,Offset=0x420********/
+#define SRST_A_GPU_BIU 23
+#define SRST_GPU 24
+#define SRST_REF_PVTPLL_GPU 25
+#define SRST_GPU_BRG_BIU 26
+/********Name=SOFTRST_CON09,Offset=0x424********/
+#define SRST_RKVENC_CORE 27
+#define SRST_A_VEPU_BIU 28
+#define SRST_H_VEPU_BIU 29
+#define SRST_A_RKVENC 30
+#define SRST_H_RKVENC 31
+/********Name=SOFTRST_CON10,Offset=0x428********/
+#define SRST_RKVDEC_HEVC_CA 32
+#define SRST_A_VDPU_BIU 33
+#define SRST_H_VDPU_BIU 34
+#define SRST_A_RKVDEC 35
+#define SRST_H_RKVDEC 36
+/********Name=SOFTRST_CON11,Offset=0x42C********/
+#define SRST_A_VI_BIU 37
+#define SRST_H_VI_BIU 38
+#define SRST_P_VI_BIU 39
+#define SRST_ISP 40
+#define SRST_A_VICAP 41
+#define SRST_H_VICAP 42
+#define SRST_D_VICAP 43
+#define SRST_I0_VICAP 44
+#define SRST_I1_VICAP 45
+#define SRST_I2_VICAP 46
+#define SRST_I3_VICAP 47
+/********Name=SOFTRST_CON12,Offset=0x430********/
+#define SRST_P_CSIHOST0 48
+#define SRST_P_CSIHOST1 49
+#define SRST_P_CSIHOST2 50
+#define SRST_P_CSIHOST3 51
+#define SRST_P_CSIPHY0 52
+#define SRST_P_CSIPHY1 53
+/********Name=SOFTRST_CON13,Offset=0x434********/
+#define SRST_A_VO_BIU 54
+#define SRST_H_VO_BIU 55
+#define SRST_A_VOP 56
+#define SRST_H_VOP 57
+#define SRST_D_VOP 58
+#define SRST_D_VOP1 59
+/********Name=SOFTRST_CON14,Offset=0x438********/
+#define SRST_A_RGA_BIU 60
+#define SRST_H_RGA_BIU 61
+#define SRST_A_RGA 62
+#define SRST_H_RGA 63
+#define SRST_RGA_CORE 64
+#define SRST_A_JDEC 65
+#define SRST_H_JDEC 66
+/********Name=SOFTRST_CON15,Offset=0x43C********/
+#define SRST_B_EBK_BIU 67
+#define SRST_P_EBK_BIU 68
+#define SRST_AHB2AXI_EBC 69
+#define SRST_H_EBC 70
+#define SRST_D_EBC 71
+#define SRST_H_EINK 72
+#define SRST_P_EINK 73
+/********Name=SOFTRST_CON16,Offset=0x440********/
+#define SRST_P_PHP_BIU 74
+#define SRST_A_PHP_BIU 75
+#define SRST_P_PCIE20 76
+#define SRST_PCIE20_POWERUP 77
+#define SRST_USB3OTG 78
+/********Name=SOFTRST_CON17,Offset=0x444********/
+#define SRST_PIPEPHY 79
+/********Name=SOFTRST_CON18,Offset=0x448********/
+#define SRST_A_BUS_BIU 80
+#define SRST_H_BUS_BIU 81
+#define SRST_P_BUS_BIU 82
+/********Name=SOFTRST_CON19,Offset=0x44C********/
+#define SRST_P_I2C1 83
+#define SRST_P_I2C2 84
+#define SRST_P_I2C3 85
+#define SRST_P_I2C4 86
+#define SRST_P_I2C5 87
+#define SRST_I2C1 88
+#define SRST_I2C2 89
+#define SRST_I2C3 90
+#define SRST_I2C4 91
+#define SRST_I2C5 92
+/********Name=SOFTRST_CON20,Offset=0x450********/
+#define SRST_BUS_GPIO3 93
+#define SRST_BUS_GPIO4 94
+/********Name=SOFTRST_CON21,Offset=0x454********/
+#define SRST_P_TIMER 95
+#define SRST_TIMER0 96
+#define SRST_TIMER1 97
+#define SRST_TIMER2 98
+#define SRST_TIMER3 99
+#define SRST_TIMER4 100
+#define SRST_TIMER5 101
+#define SRST_P_STIMER 102
+#define SRST_STIMER0 103
+#define SRST_STIMER1 104
+/********Name=SOFTRST_CON22,Offset=0x458********/
+#define SRST_P_WDTNS 105
+#define SRST_WDTNS 106
+#define SRST_P_GRF 107
+#define SRST_P_SGRF 108
+#define SRST_P_MAILBOX 109
+#define SRST_P_INTC 110
+#define SRST_A_BUS_GIC400 111
+#define SRST_A_BUS_GIC400_DEBUG 112
+/********Name=SOFTRST_CON23,Offset=0x45C********/
+#define SRST_A_BUS_SPINLOCK 113
+#define SRST_A_DCF 114
+#define SRST_P_DCF 115
+#define SRST_F_BUS_CM0_CORE 116
+#define SRST_T_BUS_CM0_JTAG 117
+#define SRST_H_ICACHE 118
+#define SRST_H_DCACHE 119
+/********Name=SOFTRST_CON24,Offset=0x460********/
+#define SRST_P_TSADC 120
+#define SRST_TSADC 121
+#define SRST_TSADCPHY 122
+#define SRST_P_DFT2APB 123
+/********Name=SOFTRST_CON25,Offset=0x464********/
+#define SRST_A_GMAC 124
+#define SRST_P_APB2ASB_VCCIO156 125
+#define SRST_P_DSIPHY 126
+#define SRST_P_DSITX 127
+#define SRST_P_CPU_EMA_DET 128
+#define SRST_P_HASH 129
+#define SRST_P_TOPCRU 130
+/********Name=SOFTRST_CON26,Offset=0x468********/
+#define SRST_P_ASB2APB_VCCIO156 131
+#define SRST_P_IOC_VCCIO156 132
+#define SRST_P_GPIO3_VCCIO156 133
+#define SRST_P_GPIO4_VCCIO156 134
+#define SRST_P_SARADC_VCCIO156 135
+#define SRST_SARADC_VCCIO156 136
+#define SRST_SARADC_VCCIO156_PHY 137
+/********Name=SOFTRST_CON27,Offset=0x46c********/
+#define SRST_A_MAC100 138
+
+/********Name=PMU0SOFTRST_CON00,Offset=0x10200********/
+#define SRST_P_PMU0_CRU 139
+#define SRST_P_PMU0_PMU 140
+#define SRST_PMU0_PMU 141
+#define SRST_P_PMU0_HP_TIMER 142
+#define SRST_PMU0_HP_TIMER 143
+#define SRST_PMU0_32K_HP_TIMER 144
+#define SRST_P_PMU0_PVTM 145
+#define SRST_PMU0_PVTM 146
+#define SRST_P_IOC_PMUIO 147
+#define SRST_P_PMU0_GPIO0 148
+#define SRST_PMU0_GPIO0 149
+#define SRST_P_PMU0_GRF 150
+#define SRST_P_PMU0_SGRF 151
+/********Name=PMU0SOFTRST_CON01,Offset=0x10204********/
+#define SRST_DDR_FAIL_SAFE 152
+#define SRST_P_PMU0_SCRKEYGEN 153
+/********Name=PMU0SOFTRST_CON02,Offset=0x10208********/
+#define SRST_P_PMU0_I2C0 154
+#define SRST_PMU0_I2C0 155
+
+/********Name=PMU1SOFTRST_CON00,Offset=0x18200********/
+#define SRST_P_PMU1_CRU 156
+#define SRST_H_PMU1_MEM 157
+#define SRST_H_PMU1_BIU 158
+#define SRST_P_PMU1_BIU 159
+#define SRST_P_PMU1_UART0 160
+#define SRST_S_PMU1_UART0 161
+/********Name=PMU1SOFTRST_CON01,Offset=0x18204********/
+#define SRST_P_PMU1_SPI0 162
+#define SRST_PMU1_SPI0 163
+#define SRST_P_PMU1_PWM0 164
+#define SRST_PMU1_PWM0 165
+/********Name=PMU1SOFTRST_CON02,Offset=0x18208********/
+#define SRST_F_PMU1_CM0_CORE 166
+#define SRST_T_PMU1_CM0_JTAG 167
+#define SRST_P_PMU1_WDTNS 168
+#define SRST_PMU1_WDTNS 169
+#define SRST_PMU1_MAILBOX 170
+
+/********Name=DDRSOFTRST_CON00,Offset=0x20200********/
+#define SRST_MSCH_BRG_BIU 171
+#define SRST_P_MSCH_BIU 172
+#define SRST_P_DDR_HWLP 173
+#define SRST_P_DDR_PHY 290
+#define SRST_P_DDR_DFICTL 174
+#define SRST_P_DDR_DMA2DDR 175
+/********Name=DDRSOFTRST_CON01,Offset=0x20204********/
+#define SRST_P_DDR_MON 176
+#define SRST_TM_DDR_MON 177
+#define SRST_P_DDR_GRF 178
+#define SRST_P_DDR_CRU 179
+#define SRST_P_SUBDDR_CRU 180
+
+/********Name=SUBDDRSOFTRST_CON00,Offset=0x28200********/
+#define SRST_MSCH_BIU 181
+#define SRST_DDR_PHY 182
+#define SRST_DDR_DFICTL 183
+#define SRST_DDR_SCRAMBLE 184
+#define SRST_DDR_MON 185
+#define SRST_A_DDR_SPLIT 186
+#define SRST_DDR_DMA2DDR 187
+
+/********Name=PERISOFTRST_CON01,Offset=0x30404********/
+#define SRST_A_PERI_BIU 188
+#define SRST_H_PERI_BIU 189
+#define SRST_P_PERI_BIU 190
+#define SRST_P_PERICRU 191
+/********Name=PERISOFTRST_CON02,Offset=0x30408********/
+#define SRST_H_SAI0_8CH 192
+#define SRST_M_SAI0_8CH 193
+#define SRST_H_SAI1_8CH 194
+#define SRST_M_SAI1_8CH 195
+#define SRST_H_SAI2_2CH 196
+#define SRST_M_SAI2_2CH 197
+/********Name=PERISOFTRST_CON03,Offset=0x3040C********/
+#define SRST_H_DSM 198
+#define SRST_DSM 199
+#define SRST_H_PDM 200
+#define SRST_M_PDM 201
+#define SRST_H_SPDIF 202
+#define SRST_M_SPDIF 203
+/********Name=PERISOFTRST_CON04,Offset=0x30410********/
+#define SRST_H_SDMMC0 204
+#define SRST_H_SDMMC1 205
+#define SRST_H_EMMC 206
+#define SRST_A_EMMC 207
+#define SRST_C_EMMC 208
+#define SRST_B_EMMC 209
+#define SRST_T_EMMC 210
+#define SRST_S_SFC 211
+#define SRST_H_SFC 212
+/********Name=PERISOFTRST_CON05,Offset=0x30414********/
+#define SRST_H_USB2HOST 213
+#define SRST_H_USB2HOST_ARB 214
+#define SRST_USB2HOST_UTMI 215
+/********Name=PERISOFTRST_CON06,Offset=0x30418********/
+#define SRST_P_SPI1 216
+#define SRST_SPI1 217
+#define SRST_P_SPI2 218
+#define SRST_SPI2 219
+/********Name=PERISOFTRST_CON07,Offset=0x3041C********/
+#define SRST_P_UART1 220
+#define SRST_P_UART2 221
+#define SRST_P_UART3 222
+#define SRST_P_UART4 223
+#define SRST_P_UART5 224
+#define SRST_P_UART6 225
+#define SRST_P_UART7 226
+#define SRST_P_UART8 227
+#define SRST_P_UART9 228
+#define SRST_S_UART1 229
+#define SRST_S_UART2 230
+/********Name=PERISOFTRST_CON08,Offset=0x30420********/
+#define SRST_S_UART3 231
+#define SRST_S_UART4 232
+#define SRST_S_UART5 233
+#define SRST_S_UART6 234
+#define SRST_S_UART7 235
+/********Name=PERISOFTRST_CON09,Offset=0x30424********/
+#define SRST_S_UART8 236
+#define SRST_S_UART9 237
+/********Name=PERISOFTRST_CON10,Offset=0x30428********/
+#define SRST_P_PWM1_PERI 238
+#define SRST_PWM1_PERI 239
+#define SRST_P_PWM2_PERI 240
+#define SRST_PWM2_PERI 241
+#define SRST_P_PWM3_PERI 242
+#define SRST_PWM3_PERI 243
+/********Name=PERISOFTRST_CON11,Offset=0x3042C********/
+#define SRST_P_CAN0 244
+#define SRST_CAN0 245
+#define SRST_P_CAN1 246
+#define SRST_CAN1 247
+/********Name=PERISOFTRST_CON12,Offset=0x30430********/
+#define SRST_A_CRYPTO 248
+#define SRST_H_CRYPTO 249
+#define SRST_P_CRYPTO 250
+#define SRST_CORE_CRYPTO 251
+#define SRST_PKA_CRYPTO 252
+#define SRST_H_KLAD 253
+#define SRST_P_KEY_READER 254
+#define SRST_H_RK_RNG_NS 255
+#define SRST_H_RK_RNG_S 256
+#define SRST_H_TRNG_NS 257
+#define SRST_H_TRNG_S 258
+#define SRST_H_CRYPTO_S 259
+/********Name=PERISOFTRST_CON13,Offset=0x30434********/
+#define SRST_P_PERI_WDT 260
+#define SRST_T_PERI_WDT 261
+#define SRST_A_SYSMEM 262
+#define SRST_H_BOOTROM 263
+#define SRST_P_PERI_GRF 264
+#define SRST_A_DMAC 265
+#define SRST_A_RKDMAC 267
+/********Name=PERISOFTRST_CON14,Offset=0x30438********/
+#define SRST_P_OTPC_NS 268
+#define SRST_SBPI_OTPC_NS 269
+#define SRST_USER_OTPC_NS 270
+#define SRST_P_OTPC_S 271
+#define SRST_SBPI_OTPC_S 272
+#define SRST_USER_OTPC_S 273
+#define SRST_OTPC_ARB 274
+#define SRST_P_OTPPHY 275
+#define SRST_OTP_NPOR 276
+/********Name=PERISOFTRST_CON15,Offset=0x3043C********/
+#define SRST_P_USB2PHY 277
+#define SRST_USB2PHY_POR 278
+#define SRST_USB2PHY_OTG 279
+#define SRST_USB2PHY_HOST 280
+#define SRST_P_PIPEPHY 281
+/********Name=PERISOFTRST_CON16,Offset=0x30440********/
+#define SRST_P_SARADC 282
+#define SRST_SARADC 283
+#define SRST_SARADC_PHY 284
+#define SRST_P_IOC_VCCIO234 285
+/********Name=PERISOFTRST_CON17,Offset=0x30444********/
+#define SRST_P_PERI_GPIO1 286
+#define SRST_P_PERI_GPIO2 287
+#define SRST_PERI_GPIO1 288
+#define SRST_PERI_GPIO2 289
+
+#endif
diff --git a/include/dt-bindings/reset/rockchip,rk3576-cru.h b/include/dt-bindings/reset/rockchip,rk3576-cru.h
new file mode 100644
index 000000000000..ae856906f3a3
--- /dev/null
+++ b/include/dt-bindings/reset/rockchip,rk3576-cru.h
@@ -0,0 +1,564 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2023 Rockchip Electronics Co. Ltd.
+ * Copyright (c) 2024 Collabora Ltd.
+ *
+ * Author: Elaine Zhang <zhangqing@rock-chips.com>
+ * Author: Detlev Casanova <detlev.casanova@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_ROCKCHIP_RK3576_H
+#define _DT_BINDINGS_RESET_ROCKCHIP_RK3576_H
+
+#define SRST_A_TOP_BIU 0
+#define SRST_P_TOP_BIU 1
+#define SRST_A_TOP_MID_BIU 2
+#define SRST_A_SECURE_HIGH_BIU 3
+#define SRST_H_TOP_BIU 4
+
+#define SRST_H_VO0VOP_CHANNEL_BIU 5
+#define SRST_A_VO0VOP_CHANNEL_BIU 6
+
+#define SRST_BISRINTF 7
+
+#define SRST_H_AUDIO_BIU 8
+#define SRST_H_ASRC_2CH_0 9
+#define SRST_H_ASRC_2CH_1 10
+#define SRST_H_ASRC_4CH_0 11
+#define SRST_H_ASRC_4CH_1 12
+#define SRST_ASRC_2CH_0 13
+#define SRST_ASRC_2CH_1 14
+#define SRST_ASRC_4CH_0 15
+#define SRST_ASRC_4CH_1 16
+#define SRST_M_SAI0_8CH 17
+#define SRST_H_SAI0_8CH 18
+#define SRST_H_SPDIF_RX0 19
+#define SRST_M_SPDIF_RX0 20
+
+#define SRST_H_SPDIF_RX1 21
+#define SRST_M_SPDIF_RX1 22
+#define SRST_M_SAI1_8CH 23
+#define SRST_H_SAI1_8CH 24
+#define SRST_M_SAI2_2CH 25
+#define SRST_H_SAI2_2CH 26
+#define SRST_M_SAI3_2CH 27
+#define SRST_H_SAI3_2CH 28
+
+#define SRST_M_SAI4_2CH 29
+#define SRST_H_SAI4_2CH 30
+#define SRST_H_ACDCDIG_DSM 31
+#define SRST_M_ACDCDIG_DSM 32
+#define SRST_PDM1 33
+#define SRST_H_PDM1 34
+#define SRST_M_PDM1 35
+#define SRST_H_SPDIF_TX0 36
+#define SRST_M_SPDIF_TX0 37
+#define SRST_H_SPDIF_TX1 38
+#define SRST_M_SPDIF_TX1 39
+
+#define SRST_A_BUS_BIU 40
+#define SRST_P_BUS_BIU 41
+#define SRST_P_CRU 42
+#define SRST_H_CAN0 43
+#define SRST_CAN0 44
+#define SRST_H_CAN1 45
+#define SRST_CAN1 46
+#define SRST_P_INTMUX2BUS 47
+#define SRST_P_VCCIO_IOC 48
+#define SRST_H_BUS_BIU 49
+#define SRST_KEY_SHIFT 50
+
+#define SRST_P_I2C1 51
+#define SRST_P_I2C2 52
+#define SRST_P_I2C3 53
+#define SRST_P_I2C4 54
+#define SRST_P_I2C5 55
+#define SRST_P_I2C6 56
+#define SRST_P_I2C7 57
+#define SRST_P_I2C8 58
+#define SRST_P_I2C9 59
+#define SRST_P_WDT_BUSMCU 60
+#define SRST_T_WDT_BUSMCU 61
+#define SRST_A_GIC 62
+#define SRST_I2C1 63
+#define SRST_I2C2 64
+#define SRST_I2C3 65
+#define SRST_I2C4 66
+
+#define SRST_I2C5 67
+#define SRST_I2C6 68
+#define SRST_I2C7 69
+#define SRST_I2C8 70
+#define SRST_I2C9 71
+#define SRST_P_SARADC 72
+#define SRST_SARADC 73
+#define SRST_P_TSADC 74
+#define SRST_TSADC 75
+#define SRST_P_UART0 76
+#define SRST_P_UART2 77
+#define SRST_P_UART3 78
+#define SRST_P_UART4 79
+#define SRST_P_UART5 80
+#define SRST_P_UART6 81
+
+#define SRST_P_UART7 82
+#define SRST_P_UART8 83
+#define SRST_P_UART9 84
+#define SRST_P_UART10 85
+#define SRST_P_UART11 86
+#define SRST_S_UART0 87
+#define SRST_S_UART2 88
+#define SRST_S_UART3 89
+#define SRST_S_UART4 90
+#define SRST_S_UART5 91
+
+#define SRST_S_UART6 92
+#define SRST_S_UART7 93
+#define SRST_S_UART8 94
+#define SRST_S_UART9 95
+#define SRST_S_UART10 96
+#define SRST_S_UART11 97
+#define SRST_P_SPI0 98
+#define SRST_P_SPI1 99
+#define SRST_P_SPI2 100
+
+#define SRST_P_SPI3 101
+#define SRST_P_SPI4 102
+#define SRST_SPI0 103
+#define SRST_SPI1 104
+#define SRST_SPI2 105
+#define SRST_SPI3 106
+#define SRST_SPI4 107
+#define SRST_P_WDT0 108
+#define SRST_T_WDT0 109
+#define SRST_P_SYS_GRF 110
+#define SRST_P_PWM1 111
+#define SRST_PWM1 112
+
+#define SRST_P_BUSTIMER0 113
+#define SRST_P_BUSTIMER1 114
+#define SRST_TIMER0 115
+#define SRST_TIMER1 116
+#define SRST_TIMER2 117
+#define SRST_TIMER3 118
+#define SRST_TIMER4 119
+#define SRST_TIMER5 120
+#define SRST_P_BUSIOC 121
+#define SRST_P_MAILBOX0 122
+#define SRST_P_GPIO1 123
+
+#define SRST_GPIO1 124
+#define SRST_P_GPIO2 125
+#define SRST_GPIO2 126
+#define SRST_P_GPIO3 127
+#define SRST_GPIO3 128
+#define SRST_P_GPIO4 129
+#define SRST_GPIO4 130
+#define SRST_A_DECOM 131
+#define SRST_P_DECOM 132
+#define SRST_D_DECOM 133
+#define SRST_TIMER6 134
+#define SRST_TIMER7 135
+#define SRST_TIMER8 136
+#define SRST_TIMER9 137
+#define SRST_TIMER10 138
+
+#define SRST_TIMER11 139
+#define SRST_A_DMAC0 140
+#define SRST_A_DMAC1 141
+#define SRST_A_DMAC2 142
+#define SRST_A_SPINLOCK 143
+#define SRST_REF_PVTPLL_BUS 144
+#define SRST_H_I3C0 145
+#define SRST_H_I3C1 146
+#define SRST_H_BUS_CM0_BIU 147
+#define SRST_F_BUS_CM0_CORE 148
+#define SRST_T_BUS_CM0_JTAG 149
+
+#define SRST_P_INTMUX2PMU 150
+#define SRST_P_INTMUX2DDR 151
+#define SRST_P_PVTPLL_BUS 152
+#define SRST_P_PWM2 153
+#define SRST_PWM2 154
+#define SRST_FREQ_PWM1 155
+#define SRST_COUNTER_PWM1 156
+#define SRST_I3C0 157
+#define SRST_I3C1 158
+
+#define SRST_P_DDR_MON_CH0 159
+#define SRST_P_DDR_BIU 160
+#define SRST_P_DDR_UPCTL_CH0 161
+#define SRST_TM_DDR_MON_CH0 162
+#define SRST_A_DDR_BIU 163
+#define SRST_DFI_CH0 164
+#define SRST_DDR_MON_CH0 165
+#define SRST_P_DDR_HWLP_CH0 166
+#define SRST_P_DDR_MON_CH1 167
+#define SRST_P_DDR_HWLP_CH1 168
+
+#define SRST_P_DDR_UPCTL_CH1 169
+#define SRST_TM_DDR_MON_CH1 170
+#define SRST_DFI_CH1 171
+#define SRST_A_DDR01_MSCH0 172
+#define SRST_A_DDR01_MSCH1 173
+#define SRST_DDR_MON_CH1 174
+#define SRST_DDR_SCRAMBLE_CH0 175
+#define SRST_DDR_SCRAMBLE_CH1 176
+#define SRST_P_AHB2APB 177
+#define SRST_H_AHB2APB 178
+#define SRST_H_DDR_BIU 179
+#define SRST_F_DDR_CM0_CORE 180
+
+#define SRST_P_DDR01_MSCH0 181
+#define SRST_P_DDR01_MSCH1 182
+#define SRST_DDR_TIMER0 183
+#define SRST_DDR_TIMER1 184
+#define SRST_T_WDT_DDR 185
+#define SRST_P_WDT 186
+#define SRST_P_TIMER 187
+#define SRST_T_DDR_CM0_JTAG 188
+#define SRST_P_DDR_GRF 189
+
+#define SRST_DDR_UPCTL_CH0 190
+#define SRST_A_DDR_UPCTL_0_CH0 191
+#define SRST_A_DDR_UPCTL_1_CH0 192
+#define SRST_A_DDR_UPCTL_2_CH0 193
+#define SRST_A_DDR_UPCTL_3_CH0 194
+#define SRST_A_DDR_UPCTL_4_CH0 195
+
+#define SRST_DDR_UPCTL_CH1 196
+#define SRST_A_DDR_UPCTL_0_CH1 197
+#define SRST_A_DDR_UPCTL_1_CH1 198
+#define SRST_A_DDR_UPCTL_2_CH1 199
+#define SRST_A_DDR_UPCTL_3_CH1 200
+#define SRST_A_DDR_UPCTL_4_CH1 201
+
+#define SRST_REF_PVTPLL_DDR 202
+#define SRST_P_PVTPLL_DDR 203
+
+#define SRST_A_RKNN0 204
+#define SRST_A_RKNN0_BIU 205
+#define SRST_L_RKNN0_BIU 206
+
+#define SRST_A_RKNN1 207
+#define SRST_A_RKNN1_BIU 208
+#define SRST_L_RKNN1_BIU 209
+
+#define SRST_NPU_DAP 210
+#define SRST_L_NPUSUBSYS_BIU 211
+#define SRST_P_NPUTOP_BIU 212
+#define SRST_P_NPU_TIMER 213
+#define SRST_NPUTIMER0 214
+#define SRST_NPUTIMER1 215
+#define SRST_P_NPU_WDT 216
+#define SRST_T_NPU_WDT 217
+
+#define SRST_A_RKNN_CBUF 218
+#define SRST_A_RVCORE0 219
+#define SRST_P_NPU_GRF 220
+#define SRST_P_PVTPLL_NPU 221
+#define SRST_NPU_PVTPLL 222
+#define SRST_H_NPU_CM0_BIU 223
+#define SRST_F_NPU_CM0_CORE 224
+#define SRST_T_NPU_CM0_JTAG 225
+#define SRST_A_RKNNTOP_BIU 226
+#define SRST_H_RKNN_CBUF 227
+#define SRST_H_RKNNTOP_BIU 228
+
+#define SRST_H_NVM_BIU 229
+#define SRST_A_NVM_BIU 230
+#define SRST_S_FSPI 231
+#define SRST_H_FSPI 232
+#define SRST_C_EMMC 233
+#define SRST_H_EMMC 234
+#define SRST_A_EMMC 235
+#define SRST_B_EMMC 236
+#define SRST_T_EMMC 237
+
+#define SRST_P_GRF 238
+#define SRST_P_PHP_BIU 239
+#define SRST_A_PHP_BIU 240
+#define SRST_P_PCIE0 241
+#define SRST_PCIE0_POWER_UP 242
+
+#define SRST_A_USB3OTG1 243
+#define SRST_A_MMU0 244
+#define SRST_A_SLV_MMU0 245
+#define SRST_A_MMU1 246
+
+#define SRST_A_SLV_MMU1 247
+#define SRST_P_PCIE1 248
+#define SRST_PCIE1_POWER_UP 249
+
+#define SRST_RXOOB0 250
+#define SRST_RXOOB1 251
+#define SRST_PMALIVE0 252
+#define SRST_PMALIVE1 253
+#define SRST_A_SATA0 254
+#define SRST_A_SATA1 255
+#define SRST_ASIC1 256
+#define SRST_ASIC0 257
+
+#define SRST_P_CSIDPHY1 258
+#define SRST_SCAN_CSIDPHY1 259
+
+#define SRST_P_SDGMAC_GRF 260
+#define SRST_P_SDGMAC_BIU 261
+#define SRST_A_SDGMAC_BIU 262
+#define SRST_H_SDGMAC_BIU 263
+#define SRST_A_GMAC0 264
+#define SRST_A_GMAC1 265
+#define SRST_P_GMAC0 266
+#define SRST_P_GMAC1 267
+#define SRST_H_SDIO 268
+
+#define SRST_H_SDMMC0 269
+#define SRST_S_FSPI1 270
+#define SRST_H_FSPI1 271
+#define SRST_A_DSMC_BIU 272
+#define SRST_A_DSMC 273
+#define SRST_P_DSMC 274
+#define SRST_H_HSGPIO 275
+#define SRST_HSGPIO 276
+#define SRST_A_HSGPIO 277
+
+#define SRST_H_RKVDEC 278
+#define SRST_H_RKVDEC_BIU 279
+#define SRST_A_RKVDEC_BIU 280
+#define SRST_RKVDEC_HEVC_CA 281
+#define SRST_RKVDEC_CORE 282
+
+#define SRST_A_USB_BIU 283
+#define SRST_P_USBUFS_BIU 284
+#define SRST_A_USB3OTG0 285
+#define SRST_A_UFS_BIU 286
+#define SRST_A_MMU2 287
+#define SRST_A_SLV_MMU2 288
+#define SRST_A_UFS_SYS 289
+
+#define SRST_A_UFS 290
+#define SRST_P_USBUFS_GRF 291
+#define SRST_P_UFS_GRF 292
+
+#define SRST_H_VPU_BIU 293
+#define SRST_A_JPEG_BIU 294
+#define SRST_A_RGA_BIU 295
+#define SRST_A_VDPP_BIU 296
+#define SRST_A_EBC_BIU 297
+#define SRST_H_RGA2E_0 298
+#define SRST_A_RGA2E_0 299
+#define SRST_CORE_RGA2E_0 300
+
+#define SRST_A_JPEG 301
+#define SRST_H_JPEG 302
+#define SRST_H_VDPP 303
+#define SRST_A_VDPP 304
+#define SRST_CORE_VDPP 305
+#define SRST_H_RGA2E_1 306
+#define SRST_A_RGA2E_1 307
+#define SRST_CORE_RGA2E_1 308
+#define SRST_H_EBC 309
+#define SRST_A_EBC 310
+#define SRST_D_EBC 311
+
+#define SRST_H_VEPU0_BIU 312
+#define SRST_A_VEPU0_BIU 313
+#define SRST_H_VEPU0 314
+#define SRST_A_VEPU0 315
+#define SRST_VEPU0_CORE 316
+
+#define SRST_A_VI_BIU 317
+#define SRST_H_VI_BIU 318
+#define SRST_P_VI_BIU 319
+#define SRST_D_VICAP 320
+#define SRST_A_VICAP 321
+#define SRST_H_VICAP 322
+#define SRST_ISP0 323
+#define SRST_ISP0_VICAP 324
+
+#define SRST_CORE_VPSS 325
+#define SRST_P_CSI_HOST_0 326
+#define SRST_P_CSI_HOST_1 327
+#define SRST_P_CSI_HOST_2 328
+#define SRST_P_CSI_HOST_3 329
+#define SRST_P_CSI_HOST_4 330
+
+#define SRST_CIFIN 331
+#define SRST_VICAP_I0CLK 332
+#define SRST_VICAP_I1CLK 333
+#define SRST_VICAP_I2CLK 334
+#define SRST_VICAP_I3CLK 335
+#define SRST_VICAP_I4CLK 336
+
+#define SRST_A_VOP_BIU 337
+#define SRST_A_VOP2_BIU 338
+#define SRST_H_VOP_BIU 339
+#define SRST_P_VOP_BIU 340
+#define SRST_H_VOP 341
+#define SRST_A_VOP 342
+#define SRST_D_VP0 343
+
+#define SRST_D_VP1 344
+#define SRST_D_VP2 345
+#define SRST_P_VOP2_BIU 346
+#define SRST_P_VOPGRF 347
+
+#define SRST_H_VO0_BIU 348
+#define SRST_P_VO0_BIU 349
+#define SRST_A_HDCP0_BIU 350
+#define SRST_P_VO0_GRF 351
+#define SRST_A_HDCP0 352
+#define SRST_H_HDCP0 353
+#define SRST_HDCP0 354
+
+#define SRST_P_DSIHOST0 355
+#define SRST_DSIHOST0 356
+#define SRST_P_HDMITX0 357
+#define SRST_HDMITX0_REF 358
+#define SRST_P_EDP0 359
+#define SRST_EDP0_24M 360
+
+#define SRST_M_SAI5_8CH 361
+#define SRST_H_SAI5_8CH 362
+#define SRST_M_SAI6_8CH 363
+#define SRST_H_SAI6_8CH 364
+#define SRST_H_SPDIF_TX2 365
+#define SRST_M_SPDIF_TX2 366
+#define SRST_H_SPDIF_RX2 367
+#define SRST_M_SPDIF_RX2 368
+
+#define SRST_H_SAI8_8CH 369
+#define SRST_M_SAI8_8CH 370
+
+#define SRST_H_VO1_BIU 371
+#define SRST_P_VO1_BIU 372
+#define SRST_M_SAI7_8CH 373
+#define SRST_H_SAI7_8CH 374
+#define SRST_H_SPDIF_TX3 375
+#define SRST_H_SPDIF_TX4 376
+#define SRST_H_SPDIF_TX5 377
+#define SRST_M_SPDIF_TX3 378
+
+#define SRST_DP0 379
+#define SRST_P_VO1_GRF 380
+#define SRST_A_HDCP1_BIU 381
+#define SRST_A_HDCP1 382
+#define SRST_H_HDCP1 383
+#define SRST_HDCP1 384
+#define SRST_H_SAI9_8CH 385
+#define SRST_M_SAI9_8CH 386
+#define SRST_M_SPDIF_TX4 387
+#define SRST_M_SPDIF_TX5 388
+
+#define SRST_GPU 389
+#define SRST_A_S_GPU_BIU 390
+#define SRST_A_M0_GPU_BIU 391
+#define SRST_P_GPU_BIU 392
+#define SRST_P_GPU_GRF 393
+#define SRST_GPU_PVTPLL 394
+#define SRST_P_PVTPLL_GPU 395
+
+#define SRST_A_CENTER_BIU 396
+#define SRST_A_DMA2DDR 397
+#define SRST_A_DDR_SHAREMEM 398
+#define SRST_A_DDR_SHAREMEM_BIU 399
+#define SRST_H_CENTER_BIU 400
+#define SRST_P_CENTER_GRF 401
+#define SRST_P_DMA2DDR 402
+#define SRST_P_SHAREMEM 403
+#define SRST_P_CENTER_BIU 404
+
+#define SRST_LINKSYM_HDMITXPHY0 405
+
+#define SRST_DP0_PIXELCLK 406
+#define SRST_PHY_DP0_TX 407
+#define SRST_DP1_PIXELCLK 408
+#define SRST_DP2_PIXELCLK 409
+
+#define SRST_H_VEPU1_BIU 410
+#define SRST_A_VEPU1_BIU 411
+#define SRST_H_VEPU1 412
+#define SRST_A_VEPU1 413
+#define SRST_VEPU1_CORE 414
+
+#define SRST_P_PHPPHY_CRU 415
+#define SRST_P_APB2ASB_SLV_CHIP_TOP 416
+#define SRST_P_PCIE2_COMBOPHY0 417
+#define SRST_P_PCIE2_COMBOPHY0_GRF 418
+#define SRST_P_PCIE2_COMBOPHY1 419
+#define SRST_P_PCIE2_COMBOPHY1_GRF 420
+
+#define SRST_PCIE0_PIPE_PHY 421
+#define SRST_PCIE1_PIPE_PHY 422
+
+#define SRST_H_CRYPTO_NS 423
+#define SRST_H_TRNG_NS 424
+#define SRST_P_OTPC_NS 425
+#define SRST_OTPC_NS 426
+
+#define SRST_P_HDPTX_GRF 427
+#define SRST_P_HDPTX_APB 428
+#define SRST_P_MIPI_DCPHY 429
+#define SRST_P_DCPHY_GRF 430
+#define SRST_P_BOT0_APB2ASB 431
+#define SRST_P_BOT1_APB2ASB 432
+#define SRST_USB2DEBUG 433
+#define SRST_P_CSIPHY_GRF 434
+#define SRST_P_CSIPHY 435
+#define SRST_P_USBPHY_GRF_0 436
+#define SRST_P_USBPHY_GRF_1 437
+#define SRST_P_USBDP_GRF 438
+#define SRST_P_USBDPPHY 439
+#define SRST_USBDP_COMBO_PHY_INIT 440
+
+#define SRST_USBDP_COMBO_PHY_CMN 441
+#define SRST_USBDP_COMBO_PHY_LANE 442
+#define SRST_USBDP_COMBO_PHY_PCS 443
+#define SRST_M_MIPI_DCPHY 444
+#define SRST_S_MIPI_DCPHY 445
+#define SRST_SCAN_CSIPHY 446
+#define SRST_P_VCCIO6_IOC 447
+#define SRST_OTGPHY_0 448
+#define SRST_OTGPHY_1 449
+#define SRST_HDPTX_INIT 450
+#define SRST_HDPTX_CMN 451
+#define SRST_HDPTX_LANE 452
+#define SRST_HDMITXHDP 453
+
+#define SRST_MPHY_INIT 454
+#define SRST_P_MPHY_GRF 455
+#define SRST_P_VCCIO7_IOC 456
+
+#define SRST_H_PMU1_BIU 457
+#define SRST_P_PMU1_NIU 458
+#define SRST_H_PMU_CM0_BIU 459
+#define SRST_PMU_CM0_CORE 460
+#define SRST_PMU_CM0_JTAG 461
+
+#define SRST_P_CRU_PMU1 462
+#define SRST_P_PMU1_GRF 463
+#define SRST_P_PMU1_IOC 464
+#define SRST_P_PMU1WDT 465
+#define SRST_T_PMU1WDT 466
+#define SRST_P_PMUTIMER 467
+#define SRST_PMUTIMER0 468
+#define SRST_PMUTIMER1 469
+#define SRST_P_PMU1PWM 470
+#define SRST_PMU1PWM 471
+
+#define SRST_P_I2C0 472
+#define SRST_I2C0 473
+#define SRST_S_UART1 474
+#define SRST_P_UART1 475
+#define SRST_PDM0 476
+#define SRST_H_PDM0 477
+
+#define SRST_M_PDM0 478
+#define SRST_H_VAD 479
+
+#define SRST_P_PMU0GRF 480
+#define SRST_P_PMU0IOC 481
+#define SRST_P_GPIO0 482
+#define SRST_DB_GPIO0 483
+
+#endif
diff --git a/include/dt-bindings/reset/rockchip,rk3588-cru.h b/include/dt-bindings/reset/rockchip,rk3588-cru.h
new file mode 100644
index 000000000000..878beae6dc3b
--- /dev/null
+++ b/include/dt-bindings/reset/rockchip,rk3588-cru.h
@@ -0,0 +1,795 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2021, 2024 Rockchip Electronics Co. Ltd.
+ * Copyright (c) 2022 Collabora Ltd.
+ *
+ * Author: Elaine Zhang <zhangqing@rock-chips.com>
+ * Author: Sebastian Reichel <sebastian.reichel@collabora.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_ROCKCHIP_RK3588_H
+#define _DT_BINDINGS_RESET_ROCKCHIP_RK3588_H
+
+#define SRST_A_TOP_BIU 0
+#define SRST_P_TOP_BIU 1
+#define SRST_P_CSIPHY0 2
+#define SRST_CSIPHY0 3
+#define SRST_P_CSIPHY1 4
+#define SRST_CSIPHY1 5
+#define SRST_A_TOP_M500_BIU 6
+
+#define SRST_A_TOP_M400_BIU 7
+#define SRST_A_TOP_S200_BIU 8
+#define SRST_A_TOP_S400_BIU 9
+#define SRST_A_TOP_M300_BIU 10
+#define SRST_USBDP_COMBO_PHY0_INIT 11
+#define SRST_USBDP_COMBO_PHY0_CMN 12
+#define SRST_USBDP_COMBO_PHY0_LANE 13
+#define SRST_USBDP_COMBO_PHY0_PCS 14
+#define SRST_USBDP_COMBO_PHY1_INIT 15
+
+#define SRST_USBDP_COMBO_PHY1_CMN 16
+#define SRST_USBDP_COMBO_PHY1_LANE 17
+#define SRST_USBDP_COMBO_PHY1_PCS 18
+#define SRST_DCPHY0 19
+#define SRST_P_MIPI_DCPHY0 20
+#define SRST_P_MIPI_DCPHY0_GRF 21
+
+#define SRST_DCPHY1 22
+#define SRST_P_MIPI_DCPHY1 23
+#define SRST_P_MIPI_DCPHY1_GRF 24
+#define SRST_P_APB2ASB_SLV_CDPHY 25
+#define SRST_P_APB2ASB_SLV_CSIPHY 26
+#define SRST_P_APB2ASB_SLV_VCCIO3_5 27
+#define SRST_P_APB2ASB_SLV_VCCIO6 28
+#define SRST_P_APB2ASB_SLV_EMMCIO 29
+#define SRST_P_APB2ASB_SLV_IOC_TOP 30
+#define SRST_P_APB2ASB_SLV_IOC_RIGHT 31
+
+#define SRST_P_CRU 32
+#define SRST_A_CHANNEL_SECURE2VO1USB 33
+#define SRST_A_CHANNEL_SECURE2CENTER 34
+#define SRST_H_CHANNEL_SECURE2VO1USB 35
+#define SRST_H_CHANNEL_SECURE2CENTER 36
+
+#define SRST_P_CHANNEL_SECURE2VO1USB 37
+#define SRST_P_CHANNEL_SECURE2CENTER 38
+
+#define SRST_H_AUDIO_BIU 39
+#define SRST_P_AUDIO_BIU 40
+#define SRST_H_I2S0_8CH 41
+#define SRST_M_I2S0_8CH_TX 42
+#define SRST_M_I2S0_8CH_RX 43
+#define SRST_P_ACDCDIG 44
+#define SRST_H_I2S2_2CH 45
+#define SRST_H_I2S3_2CH 46
+
+#define SRST_M_I2S2_2CH 47
+#define SRST_M_I2S3_2CH 48
+#define SRST_DAC_ACDCDIG 49
+#define SRST_H_SPDIF0 50
+
+#define SRST_M_SPDIF0 51
+#define SRST_H_SPDIF1 52
+#define SRST_M_SPDIF1 53
+#define SRST_H_PDM1 54
+#define SRST_PDM1 55
+
+#define SRST_A_BUS_BIU 56
+#define SRST_P_BUS_BIU 57
+#define SRST_A_GIC 58
+#define SRST_A_GIC_DBG 59
+#define SRST_A_DMAC0 60
+#define SRST_A_DMAC1 61
+#define SRST_A_DMAC2 62
+#define SRST_P_I2C1 63
+#define SRST_P_I2C2 64
+#define SRST_P_I2C3 65
+#define SRST_P_I2C4 66
+#define SRST_P_I2C5 67
+#define SRST_P_I2C6 68
+#define SRST_P_I2C7 69
+#define SRST_P_I2C8 70
+
+#define SRST_I2C1 71
+#define SRST_I2C2 72
+#define SRST_I2C3 73
+#define SRST_I2C4 74
+#define SRST_I2C5 75
+#define SRST_I2C6 76
+#define SRST_I2C7 77
+#define SRST_I2C8 78
+#define SRST_P_CAN0 79
+#define SRST_CAN0 80
+#define SRST_P_CAN1 81
+#define SRST_CAN1 82
+#define SRST_P_CAN2 83
+#define SRST_CAN2 84
+#define SRST_P_SARADC 85
+
+#define SRST_P_TSADC 86
+#define SRST_TSADC 87
+#define SRST_P_UART1 88
+#define SRST_P_UART2 89
+#define SRST_P_UART3 90
+#define SRST_P_UART4 91
+#define SRST_P_UART5 92
+#define SRST_P_UART6 93
+#define SRST_P_UART7 94
+#define SRST_P_UART8 95
+#define SRST_P_UART9 96
+#define SRST_S_UART1 97
+
+#define SRST_S_UART2 98
+#define SRST_S_UART3 99
+#define SRST_S_UART4 100
+#define SRST_S_UART5 101
+#define SRST_S_UART6 102
+#define SRST_S_UART7 103
+
+#define SRST_S_UART8 104
+#define SRST_S_UART9 105
+#define SRST_P_SPI0 106
+#define SRST_P_SPI1 107
+#define SRST_P_SPI2 108
+#define SRST_P_SPI3 109
+#define SRST_P_SPI4 110
+#define SRST_SPI0 111
+#define SRST_SPI1 112
+#define SRST_SPI2 113
+#define SRST_SPI3 114
+#define SRST_SPI4 115
+
+#define SRST_P_WDT0 116
+#define SRST_T_WDT0 117
+#define SRST_P_SYS_GRF 118
+#define SRST_P_PWM1 119
+#define SRST_PWM1 120
+#define SRST_P_PWM2 121
+#define SRST_PWM2 122
+#define SRST_P_PWM3 123
+#define SRST_PWM3 124
+#define SRST_P_BUSTIMER0 125
+#define SRST_P_BUSTIMER1 126
+#define SRST_BUSTIMER0 127
+
+#define SRST_BUSTIMER1 128
+#define SRST_BUSTIMER2 129
+#define SRST_BUSTIMER3 130
+#define SRST_BUSTIMER4 131
+#define SRST_BUSTIMER5 132
+#define SRST_BUSTIMER6 133
+#define SRST_BUSTIMER7 134
+#define SRST_BUSTIMER8 135
+#define SRST_BUSTIMER9 136
+#define SRST_BUSTIMER10 137
+#define SRST_BUSTIMER11 138
+#define SRST_P_MAILBOX0 139
+#define SRST_P_MAILBOX1 140
+#define SRST_P_MAILBOX2 141
+#define SRST_P_GPIO1 142
+#define SRST_GPIO1 143
+
+#define SRST_P_GPIO2 144
+#define SRST_GPIO2 145
+#define SRST_P_GPIO3 146
+#define SRST_GPIO3 147
+#define SRST_P_GPIO4 148
+#define SRST_GPIO4 149
+#define SRST_A_DECOM 150
+#define SRST_P_DECOM 151
+#define SRST_D_DECOM 152
+#define SRST_P_TOP 153
+#define SRST_A_GICADB_GIC2CORE_BUS 154
+#define SRST_P_DFT2APB 155
+#define SRST_P_APB2ASB_MST_TOP 156
+#define SRST_P_APB2ASB_MST_CDPHY 157
+#define SRST_P_APB2ASB_MST_BOT_RIGHT 158
+
+#define SRST_P_APB2ASB_MST_IOC_TOP 159
+#define SRST_P_APB2ASB_MST_IOC_RIGHT 160
+#define SRST_P_APB2ASB_MST_CSIPHY 161
+#define SRST_P_APB2ASB_MST_VCCIO3_5 162
+#define SRST_P_APB2ASB_MST_VCCIO6 163
+#define SRST_P_APB2ASB_MST_EMMCIO 164
+#define SRST_A_SPINLOCK 165
+#define SRST_P_OTPC_NS 166
+#define SRST_OTPC_NS 167
+#define SRST_OTPC_ARB 168
+
+#define SRST_P_BUSIOC 169
+#define SRST_P_PMUCM0_INTMUX 170
+#define SRST_P_DDRCM0_INTMUX 171
+
+#define SRST_P_DDR_DFICTL_CH0 172
+#define SRST_P_DDR_MON_CH0 173
+#define SRST_P_DDR_STANDBY_CH0 174
+#define SRST_P_DDR_UPCTL_CH0 175
+#define SRST_TM_DDR_MON_CH0 176
+#define SRST_P_DDR_GRF_CH01 177
+#define SRST_DFI_CH0 178
+#define SRST_SBR_CH0 179
+#define SRST_DDR_UPCTL_CH0 180
+#define SRST_DDR_DFICTL_CH0 181
+#define SRST_DDR_MON_CH0 182
+#define SRST_DDR_STANDBY_CH0 183
+#define SRST_A_DDR_UPCTL_CH0 184
+#define SRST_P_DDR_DFICTL_CH1 185
+#define SRST_P_DDR_MON_CH1 186
+#define SRST_P_DDR_STANDBY_CH1 187
+
+#define SRST_P_DDR_UPCTL_CH1 188
+#define SRST_TM_DDR_MON_CH1 189
+#define SRST_DFI_CH1 190
+#define SRST_SBR_CH1 191
+#define SRST_DDR_UPCTL_CH1 192
+#define SRST_DDR_DFICTL_CH1 193
+#define SRST_DDR_MON_CH1 194
+#define SRST_DDR_STANDBY_CH1 195
+#define SRST_A_DDR_UPCTL_CH1 196
+#define SRST_A_DDR01_MSCH0 197
+#define SRST_A_DDR01_RS_MSCH0 198
+#define SRST_A_DDR01_FRS_MSCH0 199
+
+#define SRST_A_DDR01_SCRAMBLE0 200
+#define SRST_A_DDR01_FRS_SCRAMBLE0 201
+#define SRST_A_DDR01_MSCH1 202
+#define SRST_A_DDR01_RS_MSCH1 203
+#define SRST_A_DDR01_FRS_MSCH1 204
+#define SRST_A_DDR01_SCRAMBLE1 205
+#define SRST_A_DDR01_FRS_SCRAMBLE1 206
+#define SRST_P_DDR01_MSCH0 207
+#define SRST_P_DDR01_MSCH1 208
+
+#define SRST_P_DDR_DFICTL_CH2 209
+#define SRST_P_DDR_MON_CH2 210
+#define SRST_P_DDR_STANDBY_CH2 211
+#define SRST_P_DDR_UPCTL_CH2 212
+#define SRST_TM_DDR_MON_CH2 213
+#define SRST_P_DDR_GRF_CH23 214
+#define SRST_DFI_CH2 215
+#define SRST_SBR_CH2 216
+#define SRST_DDR_UPCTL_CH2 217
+#define SRST_DDR_DFICTL_CH2 218
+#define SRST_DDR_MON_CH2 219
+#define SRST_DDR_STANDBY_CH2 220
+#define SRST_A_DDR_UPCTL_CH2 221
+#define SRST_P_DDR_DFICTL_CH3 222
+#define SRST_P_DDR_MON_CH3 223
+#define SRST_P_DDR_STANDBY_CH3 224
+
+#define SRST_P_DDR_UPCTL_CH3 225
+#define SRST_TM_DDR_MON_CH3 226
+#define SRST_DFI_CH3 227
+#define SRST_SBR_CH3 228
+#define SRST_DDR_UPCTL_CH3 229
+#define SRST_DDR_DFICTL_CH3 230
+#define SRST_DDR_MON_CH3 231
+#define SRST_DDR_STANDBY_CH3 232
+#define SRST_A_DDR_UPCTL_CH3 233
+#define SRST_A_DDR23_MSCH2 234
+#define SRST_A_DDR23_RS_MSCH2 235
+#define SRST_A_DDR23_FRS_MSCH2 236
+
+#define SRST_A_DDR23_SCRAMBLE2 237
+#define SRST_A_DDR23_FRS_SCRAMBLE2 238
+#define SRST_A_DDR23_MSCH3 239
+#define SRST_A_DDR23_RS_MSCH3 240
+#define SRST_A_DDR23_FRS_MSCH3 241
+#define SRST_A_DDR23_SCRAMBLE3 242
+#define SRST_A_DDR23_FRS_SCRAMBLE3 243
+#define SRST_P_DDR23_MSCH2 244
+#define SRST_P_DDR23_MSCH3 245
+
+#define SRST_ISP1 246
+#define SRST_ISP1_VICAP 247
+#define SRST_A_ISP1_BIU 248
+#define SRST_H_ISP1_BIU 249
+
+#define SRST_A_RKNN1 250
+#define SRST_A_RKNN1_BIU 251
+#define SRST_H_RKNN1 252
+#define SRST_H_RKNN1_BIU 253
+
+#define SRST_A_RKNN2 254
+#define SRST_A_RKNN2_BIU 255
+#define SRST_H_RKNN2 256
+#define SRST_H_RKNN2_BIU 257
+
+#define SRST_A_RKNN_DSU0 258
+#define SRST_P_NPUTOP_BIU 259
+#define SRST_P_NPU_TIMER 260
+#define SRST_NPUTIMER0 261
+#define SRST_NPUTIMER1 262
+#define SRST_P_NPU_WDT 263
+#define SRST_T_NPU_WDT 264
+#define SRST_P_NPU_PVTM 265
+#define SRST_P_NPU_GRF 266
+#define SRST_NPU_PVTM 267
+
+#define SRST_NPU_PVTPLL 268
+#define SRST_H_NPU_CM0_BIU 269
+#define SRST_F_NPU_CM0_CORE 270
+#define SRST_T_NPU_CM0_JTAG 271
+#define SRST_A_RKNN0 272
+#define SRST_A_RKNN0_BIU 273
+#define SRST_H_RKNN0 274
+#define SRST_H_RKNN0_BIU 275
+
+#define SRST_H_NVM_BIU 276
+#define SRST_A_NVM_BIU 277
+#define SRST_H_EMMC 278
+#define SRST_A_EMMC 279
+#define SRST_C_EMMC 280
+#define SRST_B_EMMC 281
+#define SRST_T_EMMC 282
+#define SRST_S_SFC 283
+#define SRST_H_SFC 284
+#define SRST_H_SFC_XIP 285
+
+#define SRST_P_GRF 286
+#define SRST_P_DEC_BIU 287
+#define SRST_P_PHP_BIU 288
+#define SRST_A_PCIE_GRIDGE 289
+#define SRST_A_PHP_BIU 290
+#define SRST_A_GMAC0 291
+#define SRST_A_GMAC1 292
+#define SRST_A_PCIE_BIU 293
+#define SRST_PCIE0_POWER_UP 294
+#define SRST_PCIE1_POWER_UP 295
+#define SRST_PCIE2_POWER_UP 296
+
+#define SRST_PCIE3_POWER_UP 297
+#define SRST_PCIE4_POWER_UP 298
+#define SRST_P_PCIE0 299
+#define SRST_P_PCIE1 300
+#define SRST_P_PCIE2 301
+#define SRST_P_PCIE3 302
+
+#define SRST_P_PCIE4 303
+#define SRST_A_PHP_GIC_ITS 304
+#define SRST_A_MMU_PCIE 305
+#define SRST_A_MMU_PHP 306
+#define SRST_A_MMU_BIU 307
+
+#define SRST_A_USB3OTG2 308
+
+#define SRST_PMALIVE0 309
+#define SRST_PMALIVE1 310
+#define SRST_PMALIVE2 311
+#define SRST_A_SATA0 312
+#define SRST_A_SATA1 313
+#define SRST_A_SATA2 314
+#define SRST_RXOOB0 315
+#define SRST_RXOOB1 316
+#define SRST_RXOOB2 317
+#define SRST_ASIC0 318
+#define SRST_ASIC1 319
+#define SRST_ASIC2 320
+
+#define SRST_A_RKVDEC_CCU 321
+#define SRST_H_RKVDEC0 322
+#define SRST_A_RKVDEC0 323
+#define SRST_H_RKVDEC0_BIU 324
+#define SRST_A_RKVDEC0_BIU 325
+#define SRST_RKVDEC0_CA 326
+#define SRST_RKVDEC0_HEVC_CA 327
+#define SRST_RKVDEC0_CORE 328
+
+#define SRST_H_RKVDEC1 329
+#define SRST_A_RKVDEC1 330
+#define SRST_H_RKVDEC1_BIU 331
+#define SRST_A_RKVDEC1_BIU 332
+#define SRST_RKVDEC1_CA 333
+#define SRST_RKVDEC1_HEVC_CA 334
+#define SRST_RKVDEC1_CORE 335
+
+#define SRST_A_USB_BIU 336
+#define SRST_H_USB_BIU 337
+#define SRST_A_USB3OTG0 338
+#define SRST_A_USB3OTG1 339
+#define SRST_H_HOST0 340
+#define SRST_H_HOST_ARB0 341
+#define SRST_H_HOST1 342
+#define SRST_H_HOST_ARB1 343
+#define SRST_A_USB_GRF 344
+#define SRST_C_USB2P0_HOST0 345
+
+#define SRST_C_USB2P0_HOST1 346
+#define SRST_HOST_UTMI0 347
+#define SRST_HOST_UTMI1 348
+
+#define SRST_A_VDPU_BIU 349
+#define SRST_A_VDPU_LOW_BIU 350
+#define SRST_H_VDPU_BIU 351
+#define SRST_A_JPEG_DECODER_BIU 352
+#define SRST_A_VPU 353
+#define SRST_H_VPU 354
+#define SRST_A_JPEG_ENCODER0 355
+#define SRST_H_JPEG_ENCODER0 356
+#define SRST_A_JPEG_ENCODER1 357
+#define SRST_H_JPEG_ENCODER1 358
+#define SRST_A_JPEG_ENCODER2 359
+#define SRST_H_JPEG_ENCODER2 360
+
+#define SRST_A_JPEG_ENCODER3 361
+#define SRST_H_JPEG_ENCODER3 362
+#define SRST_A_JPEG_DECODER 363
+#define SRST_H_JPEG_DECODER 364
+#define SRST_H_IEP2P0 365
+#define SRST_A_IEP2P0 366
+#define SRST_IEP2P0_CORE 367
+#define SRST_H_RGA2 368
+#define SRST_A_RGA2 369
+#define SRST_RGA2_CORE 370
+#define SRST_H_RGA3_0 371
+#define SRST_A_RGA3_0 372
+#define SRST_RGA3_0_CORE 373
+
+#define SRST_H_RKVENC0_BIU 374
+#define SRST_A_RKVENC0_BIU 375
+#define SRST_H_RKVENC0 376
+#define SRST_A_RKVENC0 377
+#define SRST_RKVENC0_CORE 378
+
+#define SRST_H_RKVENC1_BIU 379
+#define SRST_A_RKVENC1_BIU 380
+#define SRST_H_RKVENC1 381
+#define SRST_A_RKVENC1 382
+#define SRST_RKVENC1_CORE 383
+
+#define SRST_A_VI_BIU 384
+#define SRST_H_VI_BIU 385
+#define SRST_P_VI_BIU 386
+#define SRST_D_VICAP 387
+#define SRST_A_VICAP 388
+#define SRST_H_VICAP 389
+#define SRST_ISP0 390
+#define SRST_ISP0_VICAP 391
+
+#define SRST_FISHEYE0 392
+#define SRST_FISHEYE1 393
+#define SRST_P_CSI_HOST_0 394
+#define SRST_P_CSI_HOST_1 395
+#define SRST_P_CSI_HOST_2 396
+#define SRST_P_CSI_HOST_3 397
+#define SRST_P_CSI_HOST_4 398
+#define SRST_P_CSI_HOST_5 399
+
+#define SRST_CSIHOST0_VICAP 400
+#define SRST_CSIHOST1_VICAP 401
+#define SRST_CSIHOST2_VICAP 402
+#define SRST_CSIHOST3_VICAP 403
+#define SRST_CSIHOST4_VICAP 404
+#define SRST_CSIHOST5_VICAP 405
+#define SRST_CIFIN 406
+
+#define SRST_A_VOP_BIU 407
+#define SRST_A_VOP_LOW_BIU 408
+#define SRST_H_VOP_BIU 409
+#define SRST_P_VOP_BIU 410
+#define SRST_H_VOP 411
+#define SRST_A_VOP 412
+#define SRST_D_VOP0 413
+#define SRST_D_VOP2HDMI_BRIDGE0 414
+#define SRST_D_VOP2HDMI_BRIDGE1 415
+
+#define SRST_D_VOP1 416
+#define SRST_D_VOP2 417
+#define SRST_D_VOP3 418
+#define SRST_P_VOPGRF 419
+#define SRST_P_DSIHOST0 420
+#define SRST_P_DSIHOST1 421
+#define SRST_DSIHOST0 422
+#define SRST_DSIHOST1 423
+#define SRST_VOP_PMU 424
+#define SRST_P_VOP_CHANNEL_BIU 425
+
+#define SRST_H_VO0_BIU 426
+#define SRST_H_VO0_S_BIU 427
+#define SRST_P_VO0_BIU 428
+#define SRST_P_VO0_S_BIU 429
+#define SRST_A_HDCP0_BIU 430
+#define SRST_P_VO0GRF 431
+#define SRST_H_HDCP_KEY0 432
+#define SRST_A_HDCP0 433
+#define SRST_H_HDCP0 434
+#define SRST_HDCP0 435
+
+#define SRST_P_TRNG0 436
+#define SRST_DP0 437
+#define SRST_DP1 438
+#define SRST_H_I2S4_8CH 439
+#define SRST_M_I2S4_8CH_TX 440
+#define SRST_H_I2S8_8CH 441
+
+#define SRST_M_I2S8_8CH_TX 442
+#define SRST_H_SPDIF2_DP0 443
+#define SRST_M_SPDIF2_DP0 444
+#define SRST_H_SPDIF5_DP1 445
+#define SRST_M_SPDIF5_DP1 446
+
+#define SRST_A_HDCP1_BIU 447
+#define SRST_A_VO1_BIU 448
+#define SRST_H_VOP1_BIU 449
+#define SRST_H_VOP1_S_BIU 450
+#define SRST_P_VOP1_BIU 451
+#define SRST_P_VO1GRF 452
+#define SRST_P_VO1_S_BIU 453
+
+#define SRST_H_I2S7_8CH 454
+#define SRST_M_I2S7_8CH_RX 455
+#define SRST_H_HDCP_KEY1 456
+#define SRST_A_HDCP1 457
+#define SRST_H_HDCP1 458
+#define SRST_HDCP1 459
+#define SRST_P_TRNG1 460
+#define SRST_P_HDMITX0 461
+
+#define SRST_HDMITX0_REF 462
+#define SRST_P_HDMITX1 463
+#define SRST_HDMITX1_REF 464
+#define SRST_A_HDMIRX 465
+#define SRST_P_HDMIRX 466
+#define SRST_HDMIRX_REF 467
+
+#define SRST_P_EDP0 468
+#define SRST_EDP0_24M 469
+#define SRST_P_EDP1 470
+#define SRST_EDP1_24M 471
+#define SRST_M_I2S5_8CH_TX 472
+#define SRST_H_I2S5_8CH 473
+#define SRST_M_I2S6_8CH_TX 474
+
+#define SRST_M_I2S6_8CH_RX 475
+#define SRST_H_I2S6_8CH 476
+#define SRST_H_SPDIF3 477
+#define SRST_M_SPDIF3 478
+#define SRST_H_SPDIF4 479
+#define SRST_M_SPDIF4 480
+#define SRST_H_SPDIFRX0 481
+#define SRST_M_SPDIFRX0 482
+#define SRST_H_SPDIFRX1 483
+#define SRST_M_SPDIFRX1 484
+
+#define SRST_H_SPDIFRX2 485
+#define SRST_M_SPDIFRX2 486
+#define SRST_LINKSYM_HDMITXPHY0 487
+#define SRST_LINKSYM_HDMITXPHY1 488
+#define SRST_VO1_BRIDGE0 489
+#define SRST_VO1_BRIDGE1 490
+
+#define SRST_H_I2S9_8CH 491
+#define SRST_M_I2S9_8CH_RX 492
+#define SRST_H_I2S10_8CH 493
+#define SRST_M_I2S10_8CH_RX 494
+#define SRST_P_S_HDMIRX 495
+
+#define SRST_GPU 496
+#define SRST_SYS_GPU 497
+#define SRST_A_S_GPU_BIU 498
+#define SRST_A_M0_GPU_BIU 499
+#define SRST_A_M1_GPU_BIU 500
+#define SRST_A_M2_GPU_BIU 501
+#define SRST_A_M3_GPU_BIU 502
+#define SRST_P_GPU_BIU 503
+#define SRST_P_GPU_PVTM 504
+
+#define SRST_GPU_PVTM 505
+#define SRST_P_GPU_GRF 506
+#define SRST_GPU_PVTPLL 507
+#define SRST_GPU_JTAG 508
+
+#define SRST_A_AV1_BIU 509
+#define SRST_A_AV1 510
+#define SRST_P_AV1_BIU 511
+#define SRST_P_AV1 512
+
+#define SRST_A_DDR_BIU 513
+#define SRST_A_DMA2DDR 514
+#define SRST_A_DDR_SHAREMEM 515
+#define SRST_A_DDR_SHAREMEM_BIU 516
+#define SRST_A_CENTER_S200_BIU 517
+#define SRST_A_CENTER_S400_BIU 518
+#define SRST_H_AHB2APB 519
+#define SRST_H_CENTER_BIU 520
+#define SRST_F_DDR_CM0_CORE 521
+
+#define SRST_DDR_TIMER0 522
+#define SRST_DDR_TIMER1 523
+#define SRST_T_WDT_DDR 524
+#define SRST_T_DDR_CM0_JTAG 525
+#define SRST_P_CENTER_GRF 526
+#define SRST_P_AHB2APB 527
+#define SRST_P_WDT 528
+#define SRST_P_TIMER 529
+#define SRST_P_DMA2DDR 530
+#define SRST_P_SHAREMEM 531
+#define SRST_P_CENTER_BIU 532
+#define SRST_P_CENTER_CHANNEL_BIU 533
+
+#define SRST_P_USBDPGRF0 534
+#define SRST_P_USBDPPHY0 535
+#define SRST_P_USBDPGRF1 536
+#define SRST_P_USBDPPHY1 537
+#define SRST_P_HDPTX0 538
+#define SRST_P_HDPTX1 539
+#define SRST_P_APB2ASB_SLV_BOT_RIGHT 540
+#define SRST_P_USB2PHY_U3_0_GRF0 541
+#define SRST_P_USB2PHY_U3_1_GRF0 542
+#define SRST_P_USB2PHY_U2_0_GRF0 543
+#define SRST_P_USB2PHY_U2_1_GRF0 544
+#define SRST_HDPTX0_ROPLL 545
+#define SRST_HDPTX0_LCPLL 546
+#define SRST_HDPTX0 547
+#define SRST_HDPTX1_ROPLL 548
+
+#define SRST_HDPTX1_LCPLL 549
+#define SRST_HDPTX1 550
+#define SRST_HDPTX0_HDMIRXPHY_SET 551
+#define SRST_USBDP_COMBO_PHY0 552
+#define SRST_USBDP_COMBO_PHY0_LCPLL 553
+#define SRST_USBDP_COMBO_PHY0_ROPLL 554
+#define SRST_USBDP_COMBO_PHY0_PCS_HS 555
+#define SRST_USBDP_COMBO_PHY1 556
+#define SRST_USBDP_COMBO_PHY1_LCPLL 557
+#define SRST_USBDP_COMBO_PHY1_ROPLL 558
+#define SRST_USBDP_COMBO_PHY1_PCS_HS 559
+#define SRST_HDMIHDP0 560
+#define SRST_HDMIHDP1 561
+
+#define SRST_A_VO1USB_TOP_BIU 562
+#define SRST_H_VO1USB_TOP_BIU 563
+
+#define SRST_H_SDIO_BIU 564
+#define SRST_H_SDIO 565
+#define SRST_SDIO 566
+
+#define SRST_H_RGA3_BIU 567
+#define SRST_A_RGA3_BIU 568
+#define SRST_H_RGA3_1 569
+#define SRST_A_RGA3_1 570
+#define SRST_RGA3_1_CORE 571
+
+#define SRST_REF_PIPE_PHY0 572
+#define SRST_REF_PIPE_PHY1 573
+#define SRST_REF_PIPE_PHY2 574
+
+#define SRST_P_PHPTOP_CRU 575
+#define SRST_P_PCIE2_GRF0 576
+#define SRST_P_PCIE2_GRF1 577
+#define SRST_P_PCIE2_GRF2 578
+#define SRST_P_PCIE2_PHY0 579
+#define SRST_P_PCIE2_PHY1 580
+#define SRST_P_PCIE2_PHY2 581
+#define SRST_P_PCIE3_PHY 582
+#define SRST_P_APB2ASB_SLV_CHIP_TOP 583
+#define SRST_PCIE30_PHY 584
+
+#define SRST_H_PMU1_BIU 585
+#define SRST_P_PMU1_BIU 586
+#define SRST_H_PMU_CM0_BIU 587
+#define SRST_F_PMU_CM0_CORE 588
+#define SRST_T_PMU1_CM0_JTAG 589
+
+#define SRST_DDR_FAIL_SAFE 590
+#define SRST_P_CRU_PMU1 591
+#define SRST_P_PMU1_GRF 592
+#define SRST_P_PMU1_IOC 593
+#define SRST_P_PMU1WDT 594
+#define SRST_T_PMU1WDT 595
+#define SRST_P_PMU1TIMER 596
+#define SRST_PMU1TIMER0 597
+#define SRST_PMU1TIMER1 598
+#define SRST_P_PMU1PWM 599
+#define SRST_PMU1PWM 600
+
+#define SRST_P_I2C0 601
+#define SRST_I2C0 602
+#define SRST_S_UART0 603
+#define SRST_P_UART0 604
+#define SRST_H_I2S1_8CH 605
+#define SRST_M_I2S1_8CH_TX 606
+#define SRST_M_I2S1_8CH_RX 607
+#define SRST_H_PDM0 608
+#define SRST_PDM0 609
+
+#define SRST_H_VAD 610
+#define SRST_HDPTX0_INIT 611
+#define SRST_HDPTX0_CMN 612
+#define SRST_HDPTX0_LANE 613
+#define SRST_HDPTX1_INIT 614
+
+#define SRST_HDPTX1_CMN 615
+#define SRST_HDPTX1_LANE 616
+#define SRST_M_MIPI_DCPHY0 617
+#define SRST_S_MIPI_DCPHY0 618
+#define SRST_M_MIPI_DCPHY1 619
+#define SRST_S_MIPI_DCPHY1 620
+#define SRST_OTGPHY_U3_0 621
+#define SRST_OTGPHY_U3_1 622
+#define SRST_OTGPHY_U2_0 623
+#define SRST_OTGPHY_U2_1 624
+
+#define SRST_P_PMU0GRF 625
+#define SRST_P_PMU0IOC 626
+#define SRST_P_GPIO0 627
+#define SRST_GPIO0 628
+
+#define SRST_A_SECURE_NS_BIU 629
+#define SRST_H_SECURE_NS_BIU 630
+#define SRST_A_SECURE_S_BIU 631
+#define SRST_H_SECURE_S_BIU 632
+#define SRST_P_SECURE_S_BIU 633
+#define SRST_CRYPTO_CORE 634
+
+#define SRST_CRYPTO_PKA 635
+#define SRST_CRYPTO_RNG 636
+#define SRST_A_CRYPTO 637
+#define SRST_H_CRYPTO 638
+#define SRST_KEYLADDER_CORE 639
+#define SRST_KEYLADDER_RNG 640
+#define SRST_A_KEYLADDER 641
+#define SRST_H_KEYLADDER 642
+#define SRST_P_OTPC_S 643
+#define SRST_OTPC_S 644
+#define SRST_WDT_S 645
+
+#define SRST_T_WDT_S 646
+#define SRST_H_BOOTROM 647
+#define SRST_A_DCF 648
+#define SRST_P_DCF 649
+#define SRST_H_BOOTROM_NS 650
+#define SRST_P_KEYLADDER 651
+#define SRST_H_TRNG_S 652
+
+#define SRST_H_TRNG_NS 653
+#define SRST_D_SDMMC_BUFFER 654
+#define SRST_H_SDMMC 655
+#define SRST_H_SDMMC_BUFFER 656
+#define SRST_SDMMC 657
+#define SRST_P_TRNG_CHK 658
+#define SRST_TRNG_S 659
+
+#define SRST_A_HDMIRX_BIU 660
+
+/* SCMI Secure Resets */
+
+/* Name=SECURE_SOFTRST_CON00,Offset=0xA00 */
+#define SCMI_SRST_A_SECURE_NS_BIU 10
+#define SCMI_SRST_H_SECURE_NS_BIU 11
+#define SCMI_SRST_A_SECURE_S_BIU 12
+#define SCMI_SRST_H_SECURE_S_BIU 13
+#define SCMI_SRST_P_SECURE_S_BIU 14
+#define SCMI_SRST_CRYPTO_CORE 15
+/* Name=SECURE_SOFTRST_CON01,Offset=0xA04 */
+#define SCMI_SRST_CRYPTO_PKA 16
+#define SCMI_SRST_CRYPTO_RNG 17
+#define SCMI_SRST_A_CRYPTO 18
+#define SCMI_SRST_H_CRYPTO 19
+#define SCMI_SRST_KEYLADDER_CORE 25
+#define SCMI_SRST_KEYLADDER_RNG 26
+#define SCMI_SRST_A_KEYLADDER 27
+#define SCMI_SRST_H_KEYLADDER 28
+#define SCMI_SRST_P_OTPC_S 29
+#define SCMI_SRST_OTPC_S 30
+#define SCMI_SRST_WDT_S 31
+/* Name=SECURE_SOFTRST_CON02,Offset=0xA08 */
+#define SCMI_SRST_T_WDT_S 32
+#define SCMI_SRST_H_BOOTROM 33
+#define SCMI_SRST_A_DCF 34
+#define SCMI_SRST_P_DCF 35
+#define SCMI_SRST_H_BOOTROM_NS 37
+#define SCMI_SRST_P_KEYLADDER 46
+#define SCMI_SRST_H_TRNG_S 47
+/* Name=SECURE_SOFTRST_CON03,Offset=0xA0C */
+#define SCMI_SRST_H_TRNG_NS 48
+#define SCMI_SRST_D_SDMMC_BUFFER 49
+#define SCMI_SRST_H_SDMMC 50
+#define SCMI_SRST_H_SDMMC_BUFFER 51
+#define SCMI_SRST_SDMMC 52
+#define SCMI_SRST_P_TRNG_CHK 53
+#define SCMI_SRST_TRNG_S 54
+
+
+#endif
diff --git a/include/dt-bindings/reset/rockchip,rv1126b-cru.h b/include/dt-bindings/reset/rockchip,rv1126b-cru.h
new file mode 100644
index 000000000000..a7712db319d0
--- /dev/null
+++ b/include/dt-bindings/reset/rockchip,rv1126b-cru.h
@@ -0,0 +1,405 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2025 Rockchip Electronics Co., Ltd.
+ * Author: Elaine Zhang <zhangqing@rock-chips.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_ROCKCHIP_RV1126B_H
+#define _DT_BINDINGS_RESET_ROCKCHIP_RV1126B_H
+
+/* ==========================list all of reset fields id=========================== */
+/* TOPCRU-->SOFTRST_CON00 */
+
+/* TOPCRU-->SOFTRST_CON15 */
+#define SRST_P_CRU 0
+#define SRST_P_CRU_BIU 1
+
+/* BUSCRU-->SOFTRST_CON00 */
+#define SRST_A_TOP_BIU 2
+#define SRST_A_RKCE_BIU 3
+#define SRST_A_BUS_BIU 4
+#define SRST_H_BUS_BIU 5
+#define SRST_P_BUS_BIU 6
+#define SRST_P_CRU_BUS 7
+#define SRST_P_SYS_GRF 8
+#define SRST_H_BOOTROM 9
+#define SRST_A_GIC400 10
+#define SRST_A_SPINLOCK 11
+#define SRST_P_WDT_NS 12
+#define SRST_T_WDT_NS 13
+
+/* BUSCRU-->SOFTRST_CON01 */
+#define SRST_P_WDT_HPMCU 14
+#define SRST_T_WDT_HPMCU 15
+#define SRST_H_CACHE 16
+#define SRST_P_HPMCU_MAILBOX 17
+#define SRST_P_HPMCU_INTMUX 18
+#define SRST_HPMCU_FULL_CLUSTER 19
+#define SRST_HPMCU_PWUP 20
+#define SRST_HPMCU_ONLY_CORE 21
+#define SRST_T_HPMCU_JTAG 22
+#define SRST_P_RKDMA 23
+#define SRST_A_RKDMA 24
+
+/* BUSCRU-->SOFTRST_CON02 */
+#define SRST_P_DCF 25
+#define SRST_A_DCF 26
+#define SRST_H_RGA 27
+#define SRST_A_RGA 28
+#define SRST_CORE_RGA 29
+#define SRST_P_TIMER 30
+#define SRST_TIMER0 31
+#define SRST_TIMER1 32
+#define SRST_TIMER2 33
+#define SRST_TIMER3 34
+#define SRST_TIMER4 35
+#define SRST_TIMER5 36
+#define SRST_A_RKCE 37
+#define SRST_PKA_RKCE 38
+#define SRST_H_RKRNG_S 39
+#define SRST_H_RKRNG_NS 40
+
+/* BUSCRU-->SOFTRST_CON03 */
+#define SRST_P_I2C0 41
+#define SRST_I2C0 42
+#define SRST_P_I2C1 43
+#define SRST_I2C1 44
+#define SRST_P_I2C3 45
+#define SRST_I2C3 46
+#define SRST_P_I2C4 47
+#define SRST_I2C4 48
+#define SRST_P_I2C5 49
+#define SRST_I2C5 50
+#define SRST_P_SPI0 51
+#define SRST_SPI0 52
+#define SRST_P_SPI1 53
+#define SRST_SPI1 54
+
+/* BUSCRU-->SOFTRST_CON04 */
+#define SRST_P_PWM0 55
+#define SRST_PWM0 56
+#define SRST_P_PWM2 57
+#define SRST_PWM2 58
+#define SRST_P_PWM3 59
+#define SRST_PWM3 60
+
+/* BUSCRU-->SOFTRST_CON05 */
+#define SRST_P_UART1 61
+#define SRST_S_UART1 62
+#define SRST_P_UART2 63
+#define SRST_S_UART2 64
+#define SRST_P_UART3 65
+#define SRST_S_UART3 66
+#define SRST_P_UART4 67
+#define SRST_S_UART4 68
+#define SRST_P_UART5 69
+#define SRST_S_UART5 70
+#define SRST_P_UART6 71
+#define SRST_S_UART6 72
+#define SRST_P_UART7 73
+#define SRST_S_UART7 74
+
+/* BUSCRU-->SOFTRST_CON06 */
+#define SRST_P_TSADC 75
+#define SRST_TSADC 76
+#define SRST_H_SAI0 77
+#define SRST_M_SAI0 78
+#define SRST_H_SAI1 79
+#define SRST_M_SAI1 80
+#define SRST_H_SAI2 81
+#define SRST_M_SAI2 82
+#define SRST_H_RKDSM 83
+#define SRST_M_RKDSM 84
+#define SRST_H_PDM 85
+#define SRST_M_PDM 86
+#define SRST_PDM 87
+
+/* BUSCRU-->SOFTRST_CON07 */
+#define SRST_H_ASRC0 88
+#define SRST_ASRC0 89
+#define SRST_H_ASRC1 90
+#define SRST_ASRC1 91
+#define SRST_P_AUDIO_ADC_BUS 92
+#define SRST_M_AUDIO_ADC_BUS 93
+#define SRST_P_RKCE 94
+#define SRST_H_NS_RKCE 95
+#define SRST_P_OTPC_NS 96
+#define SRST_SBPI_OTPC_NS 97
+#define SRST_USER_OTPC_NS 98
+#define SRST_OTPC_ARB 99
+#define SRST_P_OTP_MASK 100
+
+/* PERICRU-->SOFTRST_CON00 */
+#define SRST_A_PERI_BIU 101
+#define SRST_P_PERI_BIU 102
+#define SRST_P_RTC_BIU 103
+#define SRST_P_CRU_PERI 104
+#define SRST_P_PERI_GRF 105
+#define SRST_P_GPIO1 106
+#define SRST_DB_GPIO1 107
+#define SRST_P_IOC_VCCIO1 108
+#define SRST_A_USB3OTG 109
+#define SRST_H_USB2HOST 110
+#define SRST_H_ARB_USB2HOST 111
+#define SRST_P_RTC_TEST 112
+
+/* PERICRU-->SOFTRST_CON01 */
+#define SRST_H_EMMC 113
+#define SRST_H_FSPI0 114
+#define SRST_H_XIP_FSPI0 115
+#define SRST_S_2X_FSPI0 116
+#define SRST_UTMI_USB2HOST 117
+#define SRST_REF_PIPEPHY 118
+#define SRST_P_PIPEPHY 119
+#define SRST_P_PIPEPHY_GRF 120
+#define SRST_P_USB2PHY 121
+#define SRST_POR_USB2PHY 122
+#define SRST_OTG_USB2PHY 123
+#define SRST_HOST_USB2PHY 124
+
+/* CORECRU-->SOFTRST_CON00 */
+#define SRST_REF_PVTPLL_CORE 125
+#define SRST_NCOREPORESET0 126
+#define SRST_NCORESET0 127
+#define SRST_NCOREPORESET1 128
+#define SRST_NCORESET1 129
+#define SRST_NCOREPORESET2 130
+#define SRST_NCORESET2 131
+#define SRST_NCOREPORESET3 132
+#define SRST_NCORESET3 133
+#define SRST_NDBGRESET 134
+#define SRST_NL2RESET 135
+
+/* CORECRU-->SOFTRST_CON01 */
+#define SRST_A_CORE_BIU 136
+#define SRST_P_CORE_BIU 137
+#define SRST_H_CORE_BIU 138
+#define SRST_P_DBG 139
+#define SRST_POT_DBG 140
+#define SRST_NT_DBG 141
+#define SRST_P_CORE_PVTPLL 142
+#define SRST_P_CRU_CORE 143
+#define SRST_P_CORE_GRF 144
+#define SRST_P_DFT2APB 145
+
+/* PMUCRU-->SOFTRST_CON00 */
+#define SRST_H_PMU_BIU 146
+#define SRST_P_PMU_GPIO0 147
+#define SRST_DB_PMU_GPIO0 148
+#define SRST_P_PMU_HP_TIMER 149
+#define SRST_PMU_HP_TIMER 150
+#define SRST_PMU_32K_HP_TIMER 151
+
+/* PMUCRU-->SOFTRST_CON01 */
+#define SRST_P_PWM1 152
+#define SRST_PWM1 153
+#define SRST_P_I2C2 154
+#define SRST_I2C2 155
+#define SRST_P_UART0 156
+#define SRST_S_UART0 157
+
+/* PMUCRU-->SOFTRST_CON02 */
+#define SRST_P_RCOSC_CTRL 158
+#define SRST_REF_RCOSC_CTRL 159
+#define SRST_P_IOC_PMUIO0 160
+#define SRST_P_CRU_PMU 161
+#define SRST_P_PMU_GRF 162
+#define SRST_PREROLL 163
+#define SRST_PREROLL_32K 164
+#define SRST_H_PMU_SRAM 165
+
+/* PMUCRU-->SOFTRST_CON03 */
+#define SRST_P_WDT_LPMCU 166
+#define SRST_T_WDT_LPMCU 167
+#define SRST_LPMCU_FULL_CLUSTER 168
+#define SRST_LPMCU_PWUP 169
+#define SRST_LPMCU_ONLY_CORE 170
+#define SRST_T_LPMCU_JTAG 171
+#define SRST_P_LPMCU_MAILBOX 172
+
+/* PMU1CRU-->SOFTRST_CON00 */
+#define SRST_P_SPI2AHB 173
+#define SRST_H_SPI2AHB 174
+#define SRST_H_FSPI1 175
+#define SRST_H_XIP_FSPI1 176
+#define SRST_S_1X_FSPI1 177
+#define SRST_P_IOC_PMUIO1 178
+#define SRST_P_CRU_PMU1 179
+#define SRST_P_AUDIO_ADC_PMU 180
+#define SRST_M_AUDIO_ADC_PMU 181
+#define SRST_H_PMU1_BIU 182
+
+/* PMU1CRU-->SOFTRST_CON01 */
+#define SRST_P_LPDMA 183
+#define SRST_A_LPDMA 184
+#define SRST_H_LPSAI 185
+#define SRST_M_LPSAI 186
+#define SRST_P_AOA_TDD 187
+#define SRST_P_AOA_FE 188
+#define SRST_P_AOA_AAD 189
+#define SRST_P_AOA_APB 190
+#define SRST_P_AOA_SRAM 191
+
+/* DDRCRU-->SOFTRST_CON00 */
+#define SRST_P_DDR_BIU 192
+#define SRST_P_DDRC 193
+#define SRST_P_DDRMON 194
+#define SRST_TIMER_DDRMON 195
+#define SRST_P_DFICTRL 196
+#define SRST_P_DDR_GRF 197
+#define SRST_P_CRU_DDR 198
+#define SRST_P_DDRPHY 199
+#define SRST_P_DMA2DDR 200
+
+/* SUBDDRCRU-->SOFTRST_CON00 */
+#define SRST_A_SYSMEM_BIU 201
+#define SRST_A_SYSMEM 202
+#define SRST_A_DDR_BIU 203
+#define SRST_A_DDRSCH0_CPU 204
+#define SRST_A_DDRSCH1_NPU 205
+#define SRST_A_DDRSCH2_POE 206
+#define SRST_A_DDRSCH3_VI 207
+#define SRST_CORE_DDRC 208
+#define SRST_DDRMON 209
+#define SRST_DFICTRL 210
+#define SRST_RS 211
+#define SRST_A_DMA2DDR 212
+#define SRST_DDRPHY 213
+
+/* VICRU-->SOFTRST_CON00 */
+#define SRST_REF_PVTPLL_ISP 214
+#define SRST_A_GMAC_BIU 215
+#define SRST_A_VI_BIU 216
+#define SRST_H_VI_BIU 217
+#define SRST_P_VI_BIU 218
+#define SRST_P_CRU_VI 219
+#define SRST_P_VI_GRF 220
+#define SRST_P_VI_PVTPLL 221
+#define SRST_P_DSMC 222
+#define SRST_A_DSMC 223
+#define SRST_H_CAN0 224
+#define SRST_CAN0 225
+#define SRST_H_CAN1 226
+#define SRST_CAN1 227
+
+/* VICRU-->SOFTRST_CON01 */
+#define SRST_P_GPIO2 228
+#define SRST_DB_GPIO2 229
+#define SRST_P_GPIO4 230
+#define SRST_DB_GPIO4 231
+#define SRST_P_GPIO5 232
+#define SRST_DB_GPIO5 233
+#define SRST_P_GPIO6 234
+#define SRST_DB_GPIO6 235
+#define SRST_P_GPIO7 236
+#define SRST_DB_GPIO7 237
+#define SRST_P_IOC_VCCIO2 238
+#define SRST_P_IOC_VCCIO4 239
+#define SRST_P_IOC_VCCIO5 240
+#define SRST_P_IOC_VCCIO6 241
+#define SRST_P_IOC_VCCIO7 242
+
+/* VICRU-->SOFTRST_CON02 */
+#define SRST_CORE_ISP 243
+#define SRST_H_VICAP 244
+#define SRST_A_VICAP 245
+#define SRST_D_VICAP 246
+#define SRST_ISP0_VICAP 247
+#define SRST_CORE_VPSS 248
+#define SRST_CORE_VPSL 249
+#define SRST_P_CSI2HOST0 250
+#define SRST_P_CSI2HOST1 251
+#define SRST_P_CSI2HOST2 252
+#define SRST_P_CSI2HOST3 253
+#define SRST_H_SDMMC0 254
+#define SRST_A_GMAC 255
+#define SRST_P_CSIPHY0 256
+#define SRST_P_CSIPHY1 257
+
+/* VICRU-->SOFTRST_CON03 */
+#define SRST_P_MACPHY 258
+#define SRST_MACPHY 259
+#define SRST_P_SARADC1 260
+#define SRST_SARADC1 261
+#define SRST_P_SARADC2 262
+#define SRST_SARADC2 263
+
+/* VEPUCRU-->SOFTRST_CON00 */
+#define SRST_REF_PVTPLL_VEPU 264
+#define SRST_A_VEPU_BIU 265
+#define SRST_H_VEPU_BIU 266
+#define SRST_P_VEPU_BIU 267
+#define SRST_P_CRU_VEPU 268
+#define SRST_P_VEPU_GRF 269
+#define SRST_P_GPIO3 270
+#define SRST_DB_GPIO3 271
+#define SRST_P_IOC_VCCIO3 272
+#define SRST_P_SARADC0 273
+#define SRST_SARADC0 274
+#define SRST_H_SDMMC1 275
+
+/* VEPUCRU-->SOFTRST_CON01 */
+#define SRST_P_VEPU_PVTPLL 276
+#define SRST_H_VEPU 277
+#define SRST_A_VEPU 278
+#define SRST_CORE_VEPU 279
+
+/* NPUCRU-->SOFTRST_CON00 */
+#define SRST_REF_PVTPLL_NPU 280
+#define SRST_A_NPU_BIU 281
+#define SRST_H_NPU_BIU 282
+#define SRST_P_NPU_BIU 283
+#define SRST_P_CRU_NPU 284
+#define SRST_P_NPU_GRF 285
+#define SRST_P_NPU_PVTPLL 286
+#define SRST_H_RKNN 287
+#define SRST_A_RKNN 288
+
+/* VDOCRU-->SOFTRST_CON00 */
+#define SRST_A_RKVDEC_BIU 289
+#define SRST_A_VDO_BIU 290
+#define SRST_H_VDO_BIU 291
+#define SRST_P_VDO_BIU 292
+#define SRST_P_CRU_VDO 293
+#define SRST_P_VDO_GRF 294
+#define SRST_A_RKVDEC 295
+#define SRST_H_RKVDEC 296
+#define SRST_HEVC_CA_RKVDEC 297
+#define SRST_A_VOP 298
+#define SRST_H_VOP 299
+#define SRST_D_VOP 300
+#define SRST_A_OOC 301
+#define SRST_H_OOC 302
+#define SRST_D_OOC 303
+
+/* VDOCRU-->SOFTRST_CON01 */
+#define SRST_H_RKJPEG 304
+#define SRST_A_RKJPEG 305
+#define SRST_A_RKMMU_DECOM 306
+#define SRST_H_RKMMU_DECOM 307
+#define SRST_D_DECOM 308
+#define SRST_A_DECOM 309
+#define SRST_P_DECOM 310
+#define SRST_P_MIPI_DSI 311
+#define SRST_P_DSIPHY 312
+
+/* VCPCRU-->SOFTRST_CON00 */
+#define SRST_REF_PVTPLL_VCP 313
+#define SRST_A_VCP_BIU 314
+#define SRST_H_VCP_BIU 315
+#define SRST_P_VCP_BIU 316
+#define SRST_P_CRU_VCP 317
+#define SRST_P_VCP_GRF 318
+#define SRST_P_VCP_PVTPLL 319
+#define SRST_A_AISP_BIU 320
+#define SRST_H_AISP_BIU 321
+#define SRST_CORE_AISP 322
+
+/* VCPCRU-->SOFTRST_CON01 */
+#define SRST_H_FEC 323
+#define SRST_A_FEC 324
+#define SRST_CORE_FEC 325
+#define SRST_H_AVSP 326
+#define SRST_A_AVSP 327
+
+#endif
diff --git a/include/dt-bindings/reset/sama7g5-reset.h b/include/dt-bindings/reset/sama7g5-reset.h
new file mode 100644
index 000000000000..2116f41d04e0
--- /dev/null
+++ b/include/dt-bindings/reset/sama7g5-reset.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef __DT_BINDINGS_RESET_SAMA7G5_H
+#define __DT_BINDINGS_RESET_SAMA7G5_H
+
+#define SAMA7G5_RESET_USB_PHY1 4
+#define SAMA7G5_RESET_USB_PHY2 5
+#define SAMA7G5_RESET_USB_PHY3 6
+
+#endif /* __DT_BINDINGS_RESET_SAMA7G5_H */
diff --git a/include/dt-bindings/reset/sophgo,sg2042-reset.h b/include/dt-bindings/reset/sophgo,sg2042-reset.h
new file mode 100644
index 000000000000..9ab0980625c1
--- /dev/null
+++ b/include/dt-bindings/reset/sophgo,sg2042-reset.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (C) 2023 Sophgo Technology Inc. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_RESET_SOPHGO_SG2042_H_
+#define __DT_BINDINGS_RESET_SOPHGO_SG2042_H_
+
+#define RST_MAIN_AP 0
+#define RST_RISCV_CPU 1
+#define RST_RISCV_LOW_SPEED_LOGIC 2
+#define RST_RISCV_CMN 3
+#define RST_HSDMA 4
+#define RST_SYSDMA 5
+#define RST_EFUSE0 6
+#define RST_EFUSE1 7
+#define RST_RTC 8
+#define RST_TIMER 9
+#define RST_WDT 10
+#define RST_AHB_ROM0 11
+#define RST_AHB_ROM1 12
+#define RST_I2C0 13
+#define RST_I2C1 14
+#define RST_I2C2 15
+#define RST_I2C3 16
+#define RST_GPIO0 17
+#define RST_GPIO1 18
+#define RST_GPIO2 19
+#define RST_PWM 20
+#define RST_AXI_SRAM0 21
+#define RST_AXI_SRAM1 22
+#define RST_SF0 23
+#define RST_SF1 24
+#define RST_LPC 25
+#define RST_ETH0 26
+#define RST_EMMC 27
+#define RST_SD 28
+#define RST_UART0 29
+#define RST_UART1 30
+#define RST_UART2 31
+#define RST_UART3 32
+#define RST_SPI0 33
+#define RST_SPI1 34
+#define RST_DBG_I2C 35
+#define RST_PCIE0 36
+#define RST_PCIE1 37
+#define RST_DDR0 38
+#define RST_DDR1 39
+#define RST_DDR2 40
+#define RST_DDR3 41
+#define RST_FAU0 42
+#define RST_FAU1 43
+#define RST_FAU2 44
+#define RST_RXU0 45
+#define RST_RXU1 46
+#define RST_RXU2 47
+#define RST_RXU3 48
+#define RST_RXU4 49
+#define RST_RXU5 50
+#define RST_RXU6 51
+#define RST_RXU7 52
+#define RST_RXU8 53
+#define RST_RXU9 54
+#define RST_RXU10 55
+#define RST_RXU11 56
+#define RST_RXU12 57
+#define RST_RXU13 58
+#define RST_RXU14 59
+#define RST_RXU15 60
+#define RST_RXU16 61
+#define RST_RXU17 62
+#define RST_RXU18 63
+#define RST_RXU19 64
+#define RST_RXU20 65
+#define RST_RXU21 66
+#define RST_RXU22 67
+#define RST_RXU23 68
+#define RST_RXU24 69
+#define RST_RXU25 70
+#define RST_RXU26 71
+#define RST_RXU27 72
+#define RST_RXU28 73
+#define RST_RXU29 74
+#define RST_RXU30 75
+#define RST_RXU31 76
+
+#endif /* __DT_BINDINGS_RESET_SOPHGO_SG2042_H_ */
diff --git a/include/dt-bindings/reset/st,stm32mp21-rcc.h b/include/dt-bindings/reset/st,stm32mp21-rcc.h
new file mode 100644
index 000000000000..6463bd73d025
--- /dev/null
+++ b/include/dt-bindings/reset/st,stm32mp21-rcc.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) STMicroelectronics 2025 - All Rights Reserved
+ * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com>
+ */
+
+#ifndef _DT_BINDINGS_STM32MP21_RESET_H_
+#define _DT_BINDINGS_STM32MP21_RESET_H_
+
+#define TIM1_R 0
+#define TIM2_R 1
+#define TIM3_R 2
+#define TIM4_R 3
+#define TIM5_R 4
+#define TIM6_R 5
+#define TIM7_R 6
+#define TIM8_R 7
+#define TIM10_R 8
+#define TIM11_R 9
+#define TIM12_R 10
+#define TIM13_R 11
+#define TIM14_R 12
+#define TIM15_R 13
+#define TIM16_R 14
+#define TIM17_R 15
+#define LPTIM1_R 16
+#define LPTIM2_R 17
+#define LPTIM3_R 18
+#define LPTIM4_R 19
+#define LPTIM5_R 20
+#define SPI1_R 21
+#define SPI2_R 22
+#define SPI3_R 23
+#define SPI4_R 24
+#define SPI5_R 25
+#define SPI6_R 26
+#define SPDIFRX_R 27
+#define USART1_R 28
+#define USART2_R 29
+#define USART3_R 30
+#define UART4_R 31
+#define UART5_R 32
+#define USART6_R 33
+#define UART7_R 34
+#define LPUART1_R 35
+#define I2C1_R 36
+#define I2C2_R 37
+#define I2C3_R 38
+#define SAI1_R 39
+#define SAI2_R 40
+#define SAI3_R 41
+#define SAI4_R 42
+#define MDF1_R 43
+#define FDCAN_R 44
+#define HDP_R 45
+#define ADC1_R 46
+#define ADC2_R 47
+#define ETH1_R 48
+#define ETH2_R 49
+#define USBH_R 50
+#define USB2PHY1_R 51
+#define USB2PHY2_R 52
+#define SDMMC1_R 53
+#define SDMMC1DLL_R 54
+#define SDMMC2_R 55
+#define SDMMC2DLL_R 56
+#define SDMMC3_R 57
+#define SDMMC3DLL_R 58
+#define LTDC_R 59
+#define CSI_R 60
+#define DCMIPP_R 61
+#define DCMIPSSI_R 62
+#define WWDG1_R 63
+#define VREF_R 64
+#define DTS_R 65
+#define CRC_R 66
+#define SERC_R 67
+#define I3C1_R 68
+#define I3C2_R 69
+#define I3C3_R 70
+#define IWDG2_KER_R 71
+#define IWDG4_KER_R 72
+#define RNG1_R 73
+#define RNG2_R 74
+#define PKA_R 75
+#define SAES_R 76
+#define HASH1_R 77
+#define HASH2_R 78
+#define CRYP1_R 79
+#define CRYP2_R 80
+#define OSPI1_R 81
+#define OSPI1DLL_R 82
+#define OTG_R 83
+#define FMC_R 84
+#define DBG_R 85
+#define GPIOA_R 86
+#define GPIOB_R 87
+#define GPIOC_R 88
+#define GPIOD_R 89
+#define GPIOE_R 90
+#define GPIOF_R 91
+#define GPIOG_R 92
+#define GPIOH_R 93
+#define GPIOI_R 94
+#define GPIOZ_R 95
+#define HPDMA1_R 96
+#define HPDMA2_R 97
+#define HPDMA3_R 98
+#define IPCC1_R 99
+#define C2_HOLDBOOT_R 100
+#define C1_HOLDBOOT_R 101
+#define C1_R 102
+#define C1P1POR_R 103
+#define C1P1_R 104
+#define C2_R 105
+#define SYS_R 106
+#define VSW_R 107
+#define C1MS_R 108
+#define DDRCP_R 109
+#define DDRCAPB_R 110
+#define DDRPHYCAPB_R 111
+#define DDRCFG_R 112
+#define DDR_R 113
+#define DDRPERFM_R 114
+#define IWDG1_SYS_R 116
+#define IWDG2_SYS_R 117
+#define IWDG3_SYS_R 118
+#define IWDG4_SYS_R 119
+
+#define RST_SCMI_C1_R 0
+#define RST_SCMI_C2_R 1
+#define RST_SCMI_C1_HOLDBOOT_R 2
+#define RST_SCMI_C2_HOLDBOOT_R 3
+#define RST_SCMI_FMC 4
+#define RST_SCMI_OSPI1 5
+#define RST_SCMI_OSPI1DLL 6
+
+#endif /* _DT_BINDINGS_STM32MP21_RESET_H_ */
diff --git a/include/dt-bindings/reset/st,stm32mp25-rcc.h b/include/dt-bindings/reset/st,stm32mp25-rcc.h
new file mode 100644
index 000000000000..748e78ae20bd
--- /dev/null
+++ b/include/dt-bindings/reset/st,stm32mp25-rcc.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) STMicroelectronics 2023 - All Rights Reserved
+ * Author(s): Gabriel Fernandez <gabriel.fernandez@foss.st.com>
+ */
+
+#ifndef _DT_BINDINGS_STM32MP25_RESET_H_
+#define _DT_BINDINGS_STM32MP25_RESET_H_
+
+#define TIM1_R 0
+#define TIM2_R 1
+#define TIM3_R 2
+#define TIM4_R 3
+#define TIM5_R 4
+#define TIM6_R 5
+#define TIM7_R 6
+#define TIM8_R 7
+#define TIM10_R 8
+#define TIM11_R 9
+#define TIM12_R 10
+#define TIM13_R 11
+#define TIM14_R 12
+#define TIM15_R 13
+#define TIM16_R 14
+#define TIM17_R 15
+#define TIM20_R 16
+#define LPTIM1_R 17
+#define LPTIM2_R 18
+#define LPTIM3_R 19
+#define LPTIM4_R 20
+#define LPTIM5_R 21
+#define SPI1_R 22
+#define SPI2_R 23
+#define SPI3_R 24
+#define SPI4_R 25
+#define SPI5_R 26
+#define SPI6_R 27
+#define SPI7_R 28
+#define SPI8_R 29
+#define SPDIFRX_R 30
+#define USART1_R 31
+#define USART2_R 32
+#define USART3_R 33
+#define UART4_R 34
+#define UART5_R 35
+#define USART6_R 36
+#define UART7_R 37
+#define UART8_R 38
+#define UART9_R 39
+#define LPUART1_R 40
+#define IS2M_R 41
+#define I2C1_R 42
+#define I2C2_R 43
+#define I2C3_R 44
+#define I2C4_R 45
+#define I2C5_R 46
+#define I2C6_R 47
+#define I2C7_R 48
+#define I2C8_R 49
+#define SAI1_R 50
+#define SAI2_R 51
+#define SAI3_R 52
+#define SAI4_R 53
+#define MDF1_R 54
+#define MDF2_R 55
+#define FDCAN_R 56
+#define HDP_R 57
+#define ADC12_R 58
+#define ADC3_R 59
+#define ETH1_R 60
+#define ETH2_R 61
+#define USBH_R 62
+#define USB2PHY1_R 63
+#define USB2PHY2_R 64
+#define USB3DR_R 65
+#define USB3PCIEPHY_R 66
+#define USBTC_R 67
+#define ETHSW_R 68
+#define SDMMC1_R 69
+#define SDMMC1DLL_R 70
+#define SDMMC2_R 71
+#define SDMMC2DLL_R 72
+#define SDMMC3_R 73
+#define SDMMC3DLL_R 74
+#define GPU_R 75
+#define LTDC_R 76
+#define DSI_R 77
+#define LVDS_R 78
+#define CSI_R 79
+#define DCMIPP_R 80
+#define CCI_R 81
+#define VDEC_R 82
+#define VENC_R 83
+#define WWDG1_R 84
+#define WWDG2_R 85
+#define VREF_R 86
+#define DTS_R 87
+#define CRC_R 88
+#define SERC_R 89
+#define OSPIIOM_R 90
+#define I3C1_R 91
+#define I3C2_R 92
+#define I3C3_R 93
+#define I3C4_R 94
+#define IWDG2_KER_R 95
+#define IWDG4_KER_R 96
+#define RNG_R 97
+#define PKA_R 98
+#define SAES_R 99
+#define HASH_R 100
+#define CRYP1_R 101
+#define CRYP2_R 102
+#define PCIE_R 103
+#define OSPI1_R 104
+#define OSPI1DLL_R 105
+#define OSPI2_R 106
+#define OSPI2DLL_R 107
+#define FMC_R 108
+#define DBG_R 109
+#define GPIOA_R 110
+#define GPIOB_R 111
+#define GPIOC_R 112
+#define GPIOD_R 113
+#define GPIOE_R 114
+#define GPIOF_R 115
+#define GPIOG_R 116
+#define GPIOH_R 117
+#define GPIOI_R 118
+#define GPIOJ_R 119
+#define GPIOK_R 120
+#define GPIOZ_R 121
+#define HPDMA1_R 122
+#define HPDMA2_R 123
+#define HPDMA3_R 124
+#define LPDMA_R 125
+#define HSEM_R 126
+#define IPCC1_R 127
+#define IPCC2_R 128
+#define C2_HOLDBOOT_R 129
+#define C1_HOLDBOOT_R 130
+#define C1_R 131
+#define C1P1POR_R 132
+#define C1P1_R 133
+#define C2_R 134
+#define C3_R 135
+#define SYS_R 136
+#define VSW_R 137
+#define C1MS_R 138
+#define DDRCP_R 139
+#define DDRCAPB_R 140
+#define DDRPHYCAPB_R 141
+#define DDRCFG_R 142
+#define DDR_R 143
+
+#define STM32MP25_LAST_RESET 144
+
+#define RST_SCMI_C1_R 0
+#define RST_SCMI_C2_R 1
+#define RST_SCMI_C1_HOLDBOOT_R 2
+#define RST_SCMI_C2_HOLDBOOT_R 3
+#define RST_SCMI_FMC 4
+#define RST_SCMI_OSPI1 5
+#define RST_SCMI_OSPI1DLL 6
+#define RST_SCMI_OSPI2 7
+#define RST_SCMI_OSPI2DLL 8
+
+#endif /* _DT_BINDINGS_STM32MP25_RESET_H_ */
diff --git a/include/dt-bindings/reset/starfive,jh7110-crg.h b/include/dt-bindings/reset/starfive,jh7110-crg.h
new file mode 100644
index 000000000000..eaf4a0d84f6a
--- /dev/null
+++ b/include/dt-bindings/reset/starfive,jh7110-crg.h
@@ -0,0 +1,214 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright (C) 2022 Emil Renner Berthing <kernel@esmil.dk>
+ * Copyright (C) 2022 StarFive Technology Co., Ltd.
+ */
+
+#ifndef __DT_BINDINGS_RESET_STARFIVE_JH7110_CRG_H__
+#define __DT_BINDINGS_RESET_STARFIVE_JH7110_CRG_H__
+
+/* SYSCRG resets */
+#define JH7110_SYSRST_JTAG_APB 0
+#define JH7110_SYSRST_SYSCON_APB 1
+#define JH7110_SYSRST_IOMUX_APB 2
+#define JH7110_SYSRST_BUS 3
+#define JH7110_SYSRST_DEBUG 4
+#define JH7110_SYSRST_CORE0 5
+#define JH7110_SYSRST_CORE1 6
+#define JH7110_SYSRST_CORE2 7
+#define JH7110_SYSRST_CORE3 8
+#define JH7110_SYSRST_CORE4 9
+#define JH7110_SYSRST_CORE0_ST 10
+#define JH7110_SYSRST_CORE1_ST 11
+#define JH7110_SYSRST_CORE2_ST 12
+#define JH7110_SYSRST_CORE3_ST 13
+#define JH7110_SYSRST_CORE4_ST 14
+#define JH7110_SYSRST_TRACE0 15
+#define JH7110_SYSRST_TRACE1 16
+#define JH7110_SYSRST_TRACE2 17
+#define JH7110_SYSRST_TRACE3 18
+#define JH7110_SYSRST_TRACE4 19
+#define JH7110_SYSRST_TRACE_COM 20
+#define JH7110_SYSRST_GPU_APB 21
+#define JH7110_SYSRST_GPU_DOMA 22
+#define JH7110_SYSRST_NOC_BUS_APB 23
+#define JH7110_SYSRST_NOC_BUS_AXICFG0_AXI 24
+#define JH7110_SYSRST_NOC_BUS_CPU_AXI 25
+#define JH7110_SYSRST_NOC_BUS_DISP_AXI 26
+#define JH7110_SYSRST_NOC_BUS_GPU_AXI 27
+#define JH7110_SYSRST_NOC_BUS_ISP_AXI 28
+#define JH7110_SYSRST_NOC_BUS_DDRC 29
+#define JH7110_SYSRST_NOC_BUS_STG_AXI 30
+#define JH7110_SYSRST_NOC_BUS_VDEC_AXI 31
+
+#define JH7110_SYSRST_NOC_BUS_VENC_AXI 32
+#define JH7110_SYSRST_AXI_CFG1_AHB 33
+#define JH7110_SYSRST_AXI_CFG1_MAIN 34
+#define JH7110_SYSRST_AXI_CFG0_MAIN 35
+#define JH7110_SYSRST_AXI_CFG0_MAIN_DIV 36
+#define JH7110_SYSRST_AXI_CFG0_HIFI4 37
+#define JH7110_SYSRST_DDR_AXI 38
+#define JH7110_SYSRST_DDR_OSC 39
+#define JH7110_SYSRST_DDR_APB 40
+#define JH7110_SYSRST_ISP_TOP 41
+#define JH7110_SYSRST_ISP_TOP_AXI 42
+#define JH7110_SYSRST_VOUT_TOP_SRC 43
+#define JH7110_SYSRST_CODAJ12_AXI 44
+#define JH7110_SYSRST_CODAJ12_CORE 45
+#define JH7110_SYSRST_CODAJ12_APB 46
+#define JH7110_SYSRST_WAVE511_AXI 47
+#define JH7110_SYSRST_WAVE511_BPU 48
+#define JH7110_SYSRST_WAVE511_VCE 49
+#define JH7110_SYSRST_WAVE511_APB 50
+#define JH7110_SYSRST_VDEC_JPG 51
+#define JH7110_SYSRST_VDEC_MAIN 52
+#define JH7110_SYSRST_AXIMEM0_AXI 53
+#define JH7110_SYSRST_WAVE420L_AXI 54
+#define JH7110_SYSRST_WAVE420L_BPU 55
+#define JH7110_SYSRST_WAVE420L_VCE 56
+#define JH7110_SYSRST_WAVE420L_APB 57
+#define JH7110_SYSRST_AXIMEM1_AXI 58
+#define JH7110_SYSRST_AXIMEM2_AXI 59
+#define JH7110_SYSRST_INTMEM 60
+#define JH7110_SYSRST_QSPI_AHB 61
+#define JH7110_SYSRST_QSPI_APB 62
+#define JH7110_SYSRST_QSPI_REF 63
+
+#define JH7110_SYSRST_SDIO0_AHB 64
+#define JH7110_SYSRST_SDIO1_AHB 65
+#define JH7110_SYSRST_GMAC1_AXI 66
+#define JH7110_SYSRST_GMAC1_AHB 67
+#define JH7110_SYSRST_MAILBOX_APB 68
+#define JH7110_SYSRST_SPI0_APB 69
+#define JH7110_SYSRST_SPI1_APB 70
+#define JH7110_SYSRST_SPI2_APB 71
+#define JH7110_SYSRST_SPI3_APB 72
+#define JH7110_SYSRST_SPI4_APB 73
+#define JH7110_SYSRST_SPI5_APB 74
+#define JH7110_SYSRST_SPI6_APB 75
+#define JH7110_SYSRST_I2C0_APB 76
+#define JH7110_SYSRST_I2C1_APB 77
+#define JH7110_SYSRST_I2C2_APB 78
+#define JH7110_SYSRST_I2C3_APB 79
+#define JH7110_SYSRST_I2C4_APB 80
+#define JH7110_SYSRST_I2C5_APB 81
+#define JH7110_SYSRST_I2C6_APB 82
+#define JH7110_SYSRST_UART0_APB 83
+#define JH7110_SYSRST_UART0_CORE 84
+#define JH7110_SYSRST_UART1_APB 85
+#define JH7110_SYSRST_UART1_CORE 86
+#define JH7110_SYSRST_UART2_APB 87
+#define JH7110_SYSRST_UART2_CORE 88
+#define JH7110_SYSRST_UART3_APB 89
+#define JH7110_SYSRST_UART3_CORE 90
+#define JH7110_SYSRST_UART4_APB 91
+#define JH7110_SYSRST_UART4_CORE 92
+#define JH7110_SYSRST_UART5_APB 93
+#define JH7110_SYSRST_UART5_CORE 94
+#define JH7110_SYSRST_SPDIF_APB 95
+
+#define JH7110_SYSRST_PWMDAC_APB 96
+#define JH7110_SYSRST_PDM_DMIC 97
+#define JH7110_SYSRST_PDM_APB 98
+#define JH7110_SYSRST_I2SRX_APB 99
+#define JH7110_SYSRST_I2SRX_BCLK 100
+#define JH7110_SYSRST_I2STX0_APB 101
+#define JH7110_SYSRST_I2STX0_BCLK 102
+#define JH7110_SYSRST_I2STX1_APB 103
+#define JH7110_SYSRST_I2STX1_BCLK 104
+#define JH7110_SYSRST_TDM_AHB 105
+#define JH7110_SYSRST_TDM_CORE 106
+#define JH7110_SYSRST_TDM_APB 107
+#define JH7110_SYSRST_PWM_APB 108
+#define JH7110_SYSRST_WDT_APB 109
+#define JH7110_SYSRST_WDT_CORE 110
+#define JH7110_SYSRST_CAN0_APB 111
+#define JH7110_SYSRST_CAN0_CORE 112
+#define JH7110_SYSRST_CAN0_TIMER 113
+#define JH7110_SYSRST_CAN1_APB 114
+#define JH7110_SYSRST_CAN1_CORE 115
+#define JH7110_SYSRST_CAN1_TIMER 116
+#define JH7110_SYSRST_TIMER_APB 117
+#define JH7110_SYSRST_TIMER0 118
+#define JH7110_SYSRST_TIMER1 119
+#define JH7110_SYSRST_TIMER2 120
+#define JH7110_SYSRST_TIMER3 121
+#define JH7110_SYSRST_INT_CTRL_APB 122
+#define JH7110_SYSRST_TEMP_APB 123
+#define JH7110_SYSRST_TEMP_CORE 124
+#define JH7110_SYSRST_JTAG_CERTIFICATION 125
+
+#define JH7110_SYSRST_END 126
+
+/* AONCRG resets */
+#define JH7110_AONRST_GMAC0_AXI 0
+#define JH7110_AONRST_GMAC0_AHB 1
+#define JH7110_AONRST_IOMUX 2
+#define JH7110_AONRST_PMU_APB 3
+#define JH7110_AONRST_PMU_WKUP 4
+#define JH7110_AONRST_RTC_APB 5
+#define JH7110_AONRST_RTC_CAL 6
+#define JH7110_AONRST_RTC_32K 7
+
+#define JH7110_AONRST_END 8
+
+/* STGCRG resets */
+#define JH7110_STGRST_SYSCON 0
+#define JH7110_STGRST_HIFI4_CORE 1
+#define JH7110_STGRST_HIFI4_AXI 2
+#define JH7110_STGRST_SEC_AHB 3
+#define JH7110_STGRST_E24_CORE 4
+#define JH7110_STGRST_DMA1P_AXI 5
+#define JH7110_STGRST_DMA1P_AHB 6
+#define JH7110_STGRST_USB0_AXI 7
+#define JH7110_STGRST_USB0_APB 8
+#define JH7110_STGRST_USB0_UTMI_APB 9
+#define JH7110_STGRST_USB0_PWRUP 10
+#define JH7110_STGRST_PCIE0_AXI_MST0 11
+#define JH7110_STGRST_PCIE0_AXI_SLV0 12
+#define JH7110_STGRST_PCIE0_AXI_SLV 13
+#define JH7110_STGRST_PCIE0_BRG 14
+#define JH7110_STGRST_PCIE0_CORE 15
+#define JH7110_STGRST_PCIE0_APB 16
+#define JH7110_STGRST_PCIE1_AXI_MST0 17
+#define JH7110_STGRST_PCIE1_AXI_SLV0 18
+#define JH7110_STGRST_PCIE1_AXI_SLV 19
+#define JH7110_STGRST_PCIE1_BRG 20
+#define JH7110_STGRST_PCIE1_CORE 21
+#define JH7110_STGRST_PCIE1_APB 22
+
+#define JH7110_STGRST_END 23
+
+/* ISPCRG resets */
+#define JH7110_ISPRST_ISPV2_TOP_WRAPPER_P 0
+#define JH7110_ISPRST_ISPV2_TOP_WRAPPER_C 1
+#define JH7110_ISPRST_M31DPHY_HW 2
+#define JH7110_ISPRST_M31DPHY_B09_AON 3
+#define JH7110_ISPRST_VIN_APB 4
+#define JH7110_ISPRST_VIN_PIXEL_IF0 5
+#define JH7110_ISPRST_VIN_PIXEL_IF1 6
+#define JH7110_ISPRST_VIN_PIXEL_IF2 7
+#define JH7110_ISPRST_VIN_PIXEL_IF3 8
+#define JH7110_ISPRST_VIN_SYS 9
+#define JH7110_ISPRST_VIN_P_AXI_RD 10
+#define JH7110_ISPRST_VIN_P_AXI_WR 11
+
+#define JH7110_ISPRST_END 12
+
+/* VOUTCRG resets */
+#define JH7110_VOUTRST_DC8200_AXI 0
+#define JH7110_VOUTRST_DC8200_AHB 1
+#define JH7110_VOUTRST_DC8200_CORE 2
+#define JH7110_VOUTRST_DSITX_DPI 3
+#define JH7110_VOUTRST_DSITX_APB 4
+#define JH7110_VOUTRST_DSITX_RXESC 5
+#define JH7110_VOUTRST_DSITX_SYS 6
+#define JH7110_VOUTRST_DSITX_TXBYTEHS 7
+#define JH7110_VOUTRST_DSITX_TXESC 8
+#define JH7110_VOUTRST_HDMI_TX_HDMI 9
+#define JH7110_VOUTRST_MIPITX_DPHY_SYS 10
+#define JH7110_VOUTRST_MIPITX_DPHY_TXBYTEHS 11
+
+#define JH7110_VOUTRST_END 12
+
+#endif /* __DT_BINDINGS_RESET_STARFIVE_JH7110_CRG_H__ */
diff --git a/include/dt-bindings/reset/starfive-jh7100.h b/include/dt-bindings/reset/starfive-jh7100.h
new file mode 100644
index 000000000000..540e19254f39
--- /dev/null
+++ b/include/dt-bindings/reset/starfive-jh7100.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright (C) 2021 Ahmad Fatoum, Pengutronix
+ */
+
+#ifndef __DT_BINDINGS_RESET_STARFIVE_JH7100_H__
+#define __DT_BINDINGS_RESET_STARFIVE_JH7100_H__
+
+#define JH7100_RSTN_DOM3AHB_BUS 0
+#define JH7100_RSTN_DOM7AHB_BUS 1
+#define JH7100_RST_U74 2
+#define JH7100_RSTN_U74_AXI 3
+#define JH7100_RSTN_SGDMA2P_AHB 4
+#define JH7100_RSTN_SGDMA2P_AXI 5
+#define JH7100_RSTN_DMA2PNOC_AXI 6
+#define JH7100_RSTN_DLA_AXI 7
+#define JH7100_RSTN_DLANOC_AXI 8
+#define JH7100_RSTN_DLA_APB 9
+#define JH7100_RST_VP6_DRESET 10
+#define JH7100_RST_VP6_BRESET 11
+#define JH7100_RSTN_VP6_AXI 12
+#define JH7100_RSTN_VDECBRG_MAIN 13
+#define JH7100_RSTN_VDEC_AXI 14
+#define JH7100_RSTN_VDEC_BCLK 15
+#define JH7100_RSTN_VDEC_CCLK 16
+#define JH7100_RSTN_VDEC_APB 17
+#define JH7100_RSTN_JPEG_AXI 18
+#define JH7100_RSTN_JPEG_CCLK 19
+#define JH7100_RSTN_JPEG_APB 20
+#define JH7100_RSTN_JPCGC300_MAIN 21
+#define JH7100_RSTN_GC300_2X 22
+#define JH7100_RSTN_GC300_AXI 23
+#define JH7100_RSTN_GC300_AHB 24
+#define JH7100_RSTN_VENC_AXI 25
+#define JH7100_RSTN_VENCBRG_MAIN 26
+#define JH7100_RSTN_VENC_BCLK 27
+#define JH7100_RSTN_VENC_CCLK 28
+#define JH7100_RSTN_VENC_APB 29
+#define JH7100_RSTN_DDRPHY_APB 30
+#define JH7100_RSTN_NOC_ROB 31
+#define JH7100_RSTN_NOC_COG 32
+#define JH7100_RSTN_HIFI4_AXI 33
+#define JH7100_RSTN_HIFI4NOC_AXI 34
+#define JH7100_RST_HIFI4_DRESET 35
+#define JH7100_RST_HIFI4_BRESET 36
+#define JH7100_RSTN_USB_AXI 37
+#define JH7100_RSTN_USBNOC_AXI 38
+#define JH7100_RSTN_SGDMA1P_AXI 39
+#define JH7100_RSTN_DMA1P_AXI 40
+#define JH7100_RSTN_X2C_AXI 41
+#define JH7100_RSTN_NNE_AHB 42
+#define JH7100_RSTN_NNE_AXI 43
+#define JH7100_RSTN_NNENOC_AXI 44
+#define JH7100_RSTN_DLASLV_AXI 45
+#define JH7100_RSTN_DSPX2C_AXI 46
+#define JH7100_RSTN_VIN_SRC 47
+#define JH7100_RSTN_ISPSLV_AXI 48
+#define JH7100_RSTN_VIN_AXI 49
+#define JH7100_RSTN_VINNOC_AXI 50
+#define JH7100_RSTN_ISP0_AXI 51
+#define JH7100_RSTN_ISP0NOC_AXI 52
+#define JH7100_RSTN_ISP1_AXI 53
+#define JH7100_RSTN_ISP1NOC_AXI 54
+#define JH7100_RSTN_VOUT_SRC 55
+#define JH7100_RSTN_DISP_AXI 56
+#define JH7100_RSTN_DISPNOC_AXI 57
+#define JH7100_RSTN_SDIO0_AHB 58
+#define JH7100_RSTN_SDIO1_AHB 59
+#define JH7100_RSTN_GMAC_AHB 60
+#define JH7100_RSTN_SPI2AHB_AHB 61
+#define JH7100_RSTN_SPI2AHB_CORE 62
+#define JH7100_RSTN_EZMASTER_AHB 63
+#define JH7100_RST_E24 64
+#define JH7100_RSTN_QSPI_AHB 65
+#define JH7100_RSTN_QSPI_CORE 66
+#define JH7100_RSTN_QSPI_APB 67
+#define JH7100_RSTN_SEC_AHB 68
+#define JH7100_RSTN_AES 69
+#define JH7100_RSTN_PKA 70
+#define JH7100_RSTN_SHA 71
+#define JH7100_RSTN_TRNG_APB 72
+#define JH7100_RSTN_OTP_APB 73
+#define JH7100_RSTN_UART0_APB 74
+#define JH7100_RSTN_UART0_CORE 75
+#define JH7100_RSTN_UART1_APB 76
+#define JH7100_RSTN_UART1_CORE 77
+#define JH7100_RSTN_SPI0_APB 78
+#define JH7100_RSTN_SPI0_CORE 79
+#define JH7100_RSTN_SPI1_APB 80
+#define JH7100_RSTN_SPI1_CORE 81
+#define JH7100_RSTN_I2C0_APB 82
+#define JH7100_RSTN_I2C0_CORE 83
+#define JH7100_RSTN_I2C1_APB 84
+#define JH7100_RSTN_I2C1_CORE 85
+#define JH7100_RSTN_GPIO_APB 86
+#define JH7100_RSTN_UART2_APB 87
+#define JH7100_RSTN_UART2_CORE 88
+#define JH7100_RSTN_UART3_APB 89
+#define JH7100_RSTN_UART3_CORE 90
+#define JH7100_RSTN_SPI2_APB 91
+#define JH7100_RSTN_SPI2_CORE 92
+#define JH7100_RSTN_SPI3_APB 93
+#define JH7100_RSTN_SPI3_CORE 94
+#define JH7100_RSTN_I2C2_APB 95
+#define JH7100_RSTN_I2C2_CORE 96
+#define JH7100_RSTN_I2C3_APB 97
+#define JH7100_RSTN_I2C3_CORE 98
+#define JH7100_RSTN_WDTIMER_APB 99
+#define JH7100_RSTN_WDT 100
+#define JH7100_RSTN_TIMER0 101
+#define JH7100_RSTN_TIMER1 102
+#define JH7100_RSTN_TIMER2 103
+#define JH7100_RSTN_TIMER3 104
+#define JH7100_RSTN_TIMER4 105
+#define JH7100_RSTN_TIMER5 106
+#define JH7100_RSTN_TIMER6 107
+#define JH7100_RSTN_VP6INTC_APB 108
+#define JH7100_RSTN_PWM_APB 109
+#define JH7100_RSTN_MSI_APB 110
+#define JH7100_RSTN_TEMP_APB 111
+#define JH7100_RSTN_TEMP_SENSE 112
+#define JH7100_RSTN_SYSERR_APB 113
+
+#define JH7100_RSTN_END 114
+
+#endif /* __DT_BINDINGS_RESET_STARFIVE_JH7100_H__ */
diff --git a/include/dt-bindings/reset/stericsson,db8500-prcc-reset.h b/include/dt-bindings/reset/stericsson,db8500-prcc-reset.h
new file mode 100644
index 000000000000..ea906896c70f
--- /dev/null
+++ b/include/dt-bindings/reset/stericsson,db8500-prcc-reset.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _DT_BINDINGS_STE_PRCC_RESET
+#define _DT_BINDINGS_STE_PRCC_RESET
+
+#define DB8500_PRCC_1 1
+#define DB8500_PRCC_2 2
+#define DB8500_PRCC_3 3
+#define DB8500_PRCC_6 6
+
+/* Reset lines on PRCC 1 */
+#define DB8500_PRCC_1_RESET_UART0 0
+#define DB8500_PRCC_1_RESET_UART1 1
+#define DB8500_PRCC_1_RESET_I2C1 2
+#define DB8500_PRCC_1_RESET_MSP0 3
+#define DB8500_PRCC_1_RESET_MSP1 4
+#define DB8500_PRCC_1_RESET_SDI0 5
+#define DB8500_PRCC_1_RESET_I2C2 6
+#define DB8500_PRCC_1_RESET_SPI3 7
+#define DB8500_PRCC_1_RESET_SLIMBUS0 8
+#define DB8500_PRCC_1_RESET_I2C4 9
+#define DB8500_PRCC_1_RESET_MSP3 10
+#define DB8500_PRCC_1_RESET_PER_MSP3 11
+#define DB8500_PRCC_1_RESET_PER_MSP1 12
+#define DB8500_PRCC_1_RESET_PER_MSP0 13
+#define DB8500_PRCC_1_RESET_PER_SLIMBUS 14
+
+/* Reset lines on PRCC 2 */
+#define DB8500_PRCC_2_RESET_I2C3 0
+#define DB8500_PRCC_2_RESET_PWL 1
+#define DB8500_PRCC_2_RESET_SDI4 2
+#define DB8500_PRCC_2_RESET_MSP2 3
+#define DB8500_PRCC_2_RESET_SDI1 4
+#define DB8500_PRCC_2_RESET_SDI3 5
+#define DB8500_PRCC_2_RESET_HSIRX 6
+#define DB8500_PRCC_2_RESET_HSITX 7
+#define DB8500_PRCC_1_RESET_PER_MSP2 8
+
+/* Reset lines on PRCC 3 */
+#define DB8500_PRCC_3_RESET_SSP0 1
+#define DB8500_PRCC_3_RESET_SSP1 2
+#define DB8500_PRCC_3_RESET_I2C0 3
+#define DB8500_PRCC_3_RESET_SDI2 4
+#define DB8500_PRCC_3_RESET_SKE 5
+#define DB8500_PRCC_3_RESET_UART2 6
+#define DB8500_PRCC_3_RESET_SDI5 7
+
+/* Reset lines on PRCC 6 */
+#define DB8500_PRCC_3_RESET_RNG 0
+
+#endif
diff --git a/include/dt-bindings/reset/stih415-resets.h b/include/dt-bindings/reset/stih415-resets.h
deleted file mode 100644
index 96f7831a1db0..000000000000
--- a/include/dt-bindings/reset/stih415-resets.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This header provides constants for the reset controller
- * based peripheral powerdown requests on the STMicroelectronics
- * STiH415 SoC.
- */
-#ifndef _DT_BINDINGS_RESET_CONTROLLER_STIH415
-#define _DT_BINDINGS_RESET_CONTROLLER_STIH415
-
-#define STIH415_EMISS_POWERDOWN 0
-#define STIH415_NAND_POWERDOWN 1
-#define STIH415_KEYSCAN_POWERDOWN 2
-#define STIH415_USB0_POWERDOWN 3
-#define STIH415_USB1_POWERDOWN 4
-#define STIH415_USB2_POWERDOWN 5
-#define STIH415_SATA0_POWERDOWN 6
-#define STIH415_SATA1_POWERDOWN 7
-#define STIH415_PCIE_POWERDOWN 8
-
-#define STIH415_ETH0_SOFTRESET 0
-#define STIH415_ETH1_SOFTRESET 1
-#define STIH415_IRB_SOFTRESET 2
-#define STIH415_USB0_SOFTRESET 3
-#define STIH415_USB1_SOFTRESET 4
-#define STIH415_USB2_SOFTRESET 5
-#define STIH415_KEYSCAN_SOFTRESET 6
-
-#endif /* _DT_BINDINGS_RESET_CONTROLLER_STIH415 */
diff --git a/include/dt-bindings/reset/stih416-resets.h b/include/dt-bindings/reset/stih416-resets.h
deleted file mode 100644
index f682c906ed5a..000000000000
--- a/include/dt-bindings/reset/stih416-resets.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This header provides constants for the reset controller
- * based peripheral powerdown requests on the STMicroelectronics
- * STiH416 SoC.
- */
-#ifndef _DT_BINDINGS_RESET_CONTROLLER_STIH416
-#define _DT_BINDINGS_RESET_CONTROLLER_STIH416
-
-#define STIH416_EMISS_POWERDOWN 0
-#define STIH416_NAND_POWERDOWN 1
-#define STIH416_KEYSCAN_POWERDOWN 2
-#define STIH416_USB0_POWERDOWN 3
-#define STIH416_USB1_POWERDOWN 4
-#define STIH416_USB2_POWERDOWN 5
-#define STIH416_USB3_POWERDOWN 6
-#define STIH416_SATA0_POWERDOWN 7
-#define STIH416_SATA1_POWERDOWN 8
-#define STIH416_PCIE0_POWERDOWN 9
-#define STIH416_PCIE1_POWERDOWN 10
-
-#define STIH416_ETH0_SOFTRESET 0
-#define STIH416_ETH1_SOFTRESET 1
-#define STIH416_IRB_SOFTRESET 2
-#define STIH416_USB0_SOFTRESET 3
-#define STIH416_USB1_SOFTRESET 4
-#define STIH416_USB2_SOFTRESET 5
-#define STIH416_USB3_SOFTRESET 6
-#define STIH416_SATA0_SOFTRESET 7
-#define STIH416_SATA1_SOFTRESET 8
-#define STIH416_PCIE0_SOFTRESET 9
-#define STIH416_PCIE1_SOFTRESET 10
-#define STIH416_AUD_DAC_SOFTRESET 11
-#define STIH416_HDTVOUT_SOFTRESET 12
-#define STIH416_VTAC_M_RX_SOFTRESET 13
-#define STIH416_VTAC_A_RX_SOFTRESET 14
-#define STIH416_SYNC_HD_SOFTRESET 15
-#define STIH416_SYNC_SD_SOFTRESET 16
-#define STIH416_BLITTER_SOFTRESET 17
-#define STIH416_GPU_SOFTRESET 18
-#define STIH416_VTAC_M_TX_SOFTRESET 19
-#define STIH416_VTAC_A_TX_SOFTRESET 20
-#define STIH416_VTG_AUX_SOFTRESET 21
-#define STIH416_JPEG_DEC_SOFTRESET 22
-#define STIH416_HVA_SOFTRESET 23
-#define STIH416_COMPO_M_SOFTRESET 24
-#define STIH416_COMPO_A_SOFTRESET 25
-#define STIH416_VP8_DEC_SOFTRESET 26
-#define STIH416_VTG_MAIN_SOFTRESET 27
-#define STIH416_KEYSCAN_SOFTRESET 28
-
-#endif /* _DT_BINDINGS_RESET_CONTROLLER_STIH416 */
diff --git a/include/dt-bindings/reset/stm32mp1-resets.h b/include/dt-bindings/reset/stm32mp1-resets.h
index f0c3aaef67a0..9071f139649f 100644
--- a/include/dt-bindings/reset/stm32mp1-resets.h
+++ b/include/dt-bindings/reset/stm32mp1-resets.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) STMicroelectronics 2018 - All Rights Reserved
* Author: Gabriel Fernandez <gabriel.fernandez@st.com> for STMicroelectronics.
@@ -7,6 +7,7 @@
#ifndef _DT_BINDINGS_STM32MP1_RESET_H_
#define _DT_BINDINGS_STM32MP1_RESET_H_
+#define MCU_HOLD_BOOT_R 2144
#define LTDC_R 3072
#define DSI_R 3076
#define DDRPERFM_R 3080
@@ -105,4 +106,18 @@
#define GPIOJ_R 19785
#define GPIOK_R 19786
+/* SCMI reset domain identifiers */
+#define RST_SCMI_SPI6 0
+#define RST_SCMI_I2C4 1
+#define RST_SCMI_I2C6 2
+#define RST_SCMI_USART1 3
+#define RST_SCMI_STGEN 4
+#define RST_SCMI_GPIOZ 5
+#define RST_SCMI_CRYP1 6
+#define RST_SCMI_HASH1 7
+#define RST_SCMI_RNG1 8
+#define RST_SCMI_MDMA 9
+#define RST_SCMI_MCU 10
+#define RST_SCMI_MCU_HOLD_BOOT 11
+
#endif /* _DT_BINDINGS_STM32MP1_RESET_H_ */
diff --git a/include/dt-bindings/reset/stm32mp13-resets.h b/include/dt-bindings/reset/stm32mp13-resets.h
new file mode 100644
index 000000000000..ecb37c7ddde1
--- /dev/null
+++ b/include/dt-bindings/reset/stm32mp13-resets.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */
+/*
+ * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
+ * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com> for STMicroelectronics.
+ */
+
+#ifndef _DT_BINDINGS_STM32MP13_RESET_H_
+#define _DT_BINDINGS_STM32MP13_RESET_H_
+
+#define TIM2_R 13568
+#define TIM3_R 13569
+#define TIM4_R 13570
+#define TIM5_R 13571
+#define TIM6_R 13572
+#define TIM7_R 13573
+#define LPTIM1_R 13577
+#define SPI2_R 13579
+#define SPI3_R 13580
+#define USART3_R 13583
+#define UART4_R 13584
+#define UART5_R 13585
+#define UART7_R 13586
+#define UART8_R 13587
+#define I2C1_R 13589
+#define I2C2_R 13590
+#define SPDIF_R 13594
+#define TIM1_R 13632
+#define TIM8_R 13633
+#define SPI1_R 13640
+#define USART6_R 13645
+#define SAI1_R 13648
+#define SAI2_R 13649
+#define DFSDM_R 13652
+#define FDCAN_R 13656
+#define LPTIM2_R 13696
+#define LPTIM3_R 13697
+#define LPTIM4_R 13698
+#define LPTIM5_R 13699
+#define SYSCFG_R 13707
+#define VREF_R 13709
+#define DTS_R 13712
+#define PMBCTRL_R 13713
+#define LTDC_R 13760
+#define DCMIPP_R 13761
+#define DDRPERFM_R 13768
+#define USBPHY_R 13776
+#define STGEN_R 13844
+#define USART1_R 13888
+#define USART2_R 13889
+#define SPI4_R 13890
+#define SPI5_R 13891
+#define I2C3_R 13892
+#define I2C4_R 13893
+#define I2C5_R 13894
+#define TIM12_R 13895
+#define TIM13_R 13896
+#define TIM14_R 13897
+#define TIM15_R 13898
+#define TIM16_R 13899
+#define TIM17_R 13900
+#define DMA1_R 13952
+#define DMA2_R 13953
+#define DMAMUX1_R 13954
+#define DMA3_R 13955
+#define DMAMUX2_R 13956
+#define ADC1_R 13957
+#define ADC2_R 13958
+#define USBO_R 13960
+#define GPIOA_R 14080
+#define GPIOB_R 14081
+#define GPIOC_R 14082
+#define GPIOD_R 14083
+#define GPIOE_R 14084
+#define GPIOF_R 14085
+#define GPIOG_R 14086
+#define GPIOH_R 14087
+#define GPIOI_R 14088
+#define TSC_R 14095
+#define PKA_R 14146
+#define SAES_R 14147
+#define CRYP1_R 14148
+#define HASH1_R 14149
+#define RNG1_R 14150
+#define AXIMC_R 14160
+#define MDMA_R 14208
+#define MCE_R 14209
+#define ETH1MAC_R 14218
+#define FMC_R 14220
+#define QSPI_R 14222
+#define SDMMC1_R 14224
+#define SDMMC2_R 14225
+#define CRC1_R 14228
+#define USBH_R 14232
+#define ETH2MAC_R 14238
+
+/* SCMI reset domain identifiers */
+#define RST_SCMI_LTDC 0
+#define RST_SCMI_MDMA 1
+
+#endif /* _DT_BINDINGS_STM32MP13_RESET_H_ */
diff --git a/include/dt-bindings/reset/sun20i-d1-ccu.h b/include/dt-bindings/reset/sun20i-d1-ccu.h
new file mode 100644
index 000000000000..79e52aca5912
--- /dev/null
+++ b/include/dt-bindings/reset/sun20i-d1-ccu.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2020 huangzhenwei@allwinnertech.com
+ * Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
+ */
+
+#ifndef _DT_BINDINGS_RST_SUN20I_D1_CCU_H_
+#define _DT_BINDINGS_RST_SUN20I_D1_CCU_H_
+
+#define RST_MBUS 0
+#define RST_BUS_DE 1
+#define RST_BUS_DI 2
+#define RST_BUS_G2D 3
+#define RST_BUS_CE 4
+#define RST_BUS_VE 5
+#define RST_BUS_DMA 6
+#define RST_BUS_MSGBOX0 7
+#define RST_BUS_MSGBOX1 8
+#define RST_BUS_MSGBOX2 9
+#define RST_BUS_SPINLOCK 10
+#define RST_BUS_HSTIMER 11
+#define RST_BUS_DBG 12
+#define RST_BUS_PWM 13
+#define RST_BUS_DRAM 14
+#define RST_BUS_MMC0 15
+#define RST_BUS_MMC1 16
+#define RST_BUS_MMC2 17
+#define RST_BUS_UART0 18
+#define RST_BUS_UART1 19
+#define RST_BUS_UART2 20
+#define RST_BUS_UART3 21
+#define RST_BUS_UART4 22
+#define RST_BUS_UART5 23
+#define RST_BUS_I2C0 24
+#define RST_BUS_I2C1 25
+#define RST_BUS_I2C2 26
+#define RST_BUS_I2C3 27
+#define RST_BUS_SPI0 28
+#define RST_BUS_SPI1 29
+#define RST_BUS_EMAC 30
+#define RST_BUS_IR_TX 31
+#define RST_BUS_GPADC 32
+#define RST_BUS_THS 33
+#define RST_BUS_I2S0 34
+#define RST_BUS_I2S1 35
+#define RST_BUS_I2S2 36
+#define RST_BUS_SPDIF 37
+#define RST_BUS_DMIC 38
+#define RST_BUS_AUDIO 39
+#define RST_USB_PHY0 40
+#define RST_USB_PHY1 41
+#define RST_BUS_OHCI0 42
+#define RST_BUS_OHCI1 43
+#define RST_BUS_EHCI0 44
+#define RST_BUS_EHCI1 45
+#define RST_BUS_OTG 46
+#define RST_BUS_LRADC 47
+#define RST_BUS_DPSS_TOP 48
+#define RST_BUS_HDMI_SUB 49
+#define RST_BUS_HDMI_MAIN 50
+#define RST_BUS_MIPI_DSI 51
+#define RST_BUS_TCON_LCD0 52
+#define RST_BUS_TCON_TV 53
+#define RST_BUS_LVDS0 54
+#define RST_BUS_TVE 55
+#define RST_BUS_TVE_TOP 56
+#define RST_BUS_TVD 57
+#define RST_BUS_TVD_TOP 58
+#define RST_BUS_LEDC 59
+#define RST_BUS_CSI 60
+#define RST_BUS_TPADC 61
+#define RST_DSP 62
+#define RST_BUS_DSP_CFG 63
+#define RST_BUS_DSP_DBG 64
+#define RST_BUS_RISCV_CFG 65
+#define RST_BUS_CAN0 66
+#define RST_BUS_CAN1 67
+
+#endif /* _DT_BINDINGS_RST_SUN20I_D1_CCU_H_ */
diff --git a/include/dt-bindings/reset/sun20i-d1-r-ccu.h b/include/dt-bindings/reset/sun20i-d1-r-ccu.h
new file mode 100644
index 000000000000..e20babc990af
--- /dev/null
+++ b/include/dt-bindings/reset/sun20i-d1-r-ccu.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
+ */
+
+#ifndef _DT_BINDINGS_RST_SUN20I_D1_R_CCU_H_
+#define _DT_BINDINGS_RST_SUN20I_D1_R_CCU_H_
+
+#define RST_BUS_R_TIMER 0
+#define RST_BUS_R_TWD 1
+#define RST_BUS_R_PPU 2
+#define RST_BUS_R_IR_RX 3
+#define RST_BUS_R_RTC 4
+#define RST_BUS_R_CPUCFG 5
+
+#endif /* _DT_BINDINGS_RST_SUN20I_D1_R_CCU_H_ */
diff --git a/include/dt-bindings/reset/sun50i-a100-ccu.h b/include/dt-bindings/reset/sun50i-a100-ccu.h
index 55c0ada99885..d13764bc1860 100644
--- a/include/dt-bindings/reset/sun50i-a100-ccu.h
+++ b/include/dt-bindings/reset/sun50i-a100-ccu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2020 Yangtao Li <frank@allwinnertech.com>
*/
diff --git a/include/dt-bindings/reset/sun50i-a100-r-ccu.h b/include/dt-bindings/reset/sun50i-a100-r-ccu.h
index 737bf6f66626..1e7c4431f03c 100644
--- a/include/dt-bindings/reset/sun50i-a100-r-ccu.h
+++ b/include/dt-bindings/reset/sun50i-a100-r-ccu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2020 Yangtao Li <frank@allwinnertech.com>
*/
diff --git a/include/dt-bindings/reset/sun50i-h6-ccu.h b/include/dt-bindings/reset/sun50i-h6-ccu.h
index 81106f455097..d038ddfa4818 100644
--- a/include/dt-bindings/reset/sun50i-h6-ccu.h
+++ b/include/dt-bindings/reset/sun50i-h6-ccu.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0+ or MIT)
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (C) 2017 Icenowy Zheng <icenowy@aosc.io>
*/
diff --git a/include/dt-bindings/reset/sun50i-h6-r-ccu.h b/include/dt-bindings/reset/sun50i-h6-r-ccu.h
index 7950e799c76d..d541ade884fc 100644
--- a/include/dt-bindings/reset/sun50i-h6-r-ccu.h
+++ b/include/dt-bindings/reset/sun50i-h6-r-ccu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (C) 2016 Icenowy Zheng <icenowy@aosc.xyz>
*/
diff --git a/include/dt-bindings/reset/sun50i-h616-ccu.h b/include/dt-bindings/reset/sun50i-h616-ccu.h
index cb6285a8d128..ba626f7015b5 100644
--- a/include/dt-bindings/reset/sun50i-h616-ccu.h
+++ b/include/dt-bindings/reset/sun50i-h616-ccu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (C) 2020 Arm Ltd.
*/
@@ -66,5 +66,9 @@
#define RST_BUS_TVE0 57
#define RST_BUS_HDCP 58
#define RST_BUS_KEYADC 59
+#define RST_BUS_GPADC 60
+#define RST_BUS_TCON_LCD0 61
+#define RST_BUS_TCON_LCD1 62
+#define RST_BUS_LVDS 63
#endif /* _DT_BINDINGS_RESET_SUN50I_H616_H_ */
diff --git a/include/dt-bindings/reset/sun55i-a523-ccu.h b/include/dt-bindings/reset/sun55i-a523-ccu.h
new file mode 100644
index 000000000000..70df503f34fe
--- /dev/null
+++ b/include/dt-bindings/reset/sun55i-a523-ccu.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2024 Arm Ltd.
+ */
+
+#ifndef _DT_BINDINGS_RST_SUN55I_A523_CCU_H_
+#define _DT_BINDINGS_RST_SUN55I_A523_CCU_H_
+
+#define RST_MBUS 0
+#define RST_BUS_NSI 1
+#define RST_BUS_DE 2
+#define RST_BUS_DI 3
+#define RST_BUS_G2D 4
+#define RST_BUS_SYS 5
+#define RST_BUS_GPU 6
+#define RST_BUS_CE 7
+#define RST_BUS_SYS_CE 8
+#define RST_BUS_VE 9
+#define RST_BUS_DMA 10
+#define RST_BUS_MSGBOX 11
+#define RST_BUS_SPINLOCK 12
+#define RST_BUS_CPUXTIMER 13
+#define RST_BUS_DBG 14
+#define RST_BUS_PWM0 15
+#define RST_BUS_PWM1 16
+#define RST_BUS_DRAM 17
+#define RST_BUS_NAND 18
+#define RST_BUS_MMC0 19
+#define RST_BUS_MMC1 20
+#define RST_BUS_MMC2 21
+#define RST_BUS_SYSDAP 22
+#define RST_BUS_UART0 23
+#define RST_BUS_UART1 24
+#define RST_BUS_UART2 25
+#define RST_BUS_UART3 26
+#define RST_BUS_UART4 27
+#define RST_BUS_UART5 28
+#define RST_BUS_UART6 29
+#define RST_BUS_UART7 30
+#define RST_BUS_I2C0 31
+#define RST_BUS_I2C1 32
+#define RST_BUS_I2C2 33
+#define RST_BUS_I2C3 34
+#define RST_BUS_I2C4 35
+#define RST_BUS_I2C5 36
+#define RST_BUS_CAN 37
+#define RST_BUS_SPI0 38
+#define RST_BUS_SPI1 39
+#define RST_BUS_SPI2 40
+#define RST_BUS_SPIFC 41
+#define RST_BUS_EMAC0 42
+#define RST_BUS_EMAC1 43
+#define RST_BUS_IR_RX 44
+#define RST_BUS_IR_TX 45
+#define RST_BUS_GPADC0 46
+#define RST_BUS_GPADC1 47
+#define RST_BUS_THS 48
+#define RST_USB_PHY0 49
+#define RST_USB_PHY1 50
+#define RST_BUS_OHCI0 51
+#define RST_BUS_OHCI1 52
+#define RST_BUS_EHCI0 53
+#define RST_BUS_EHCI1 54
+#define RST_BUS_OTG 55
+#define RST_BUS_3 56
+#define RST_BUS_LRADC 57
+#define RST_BUS_PCIE_USB3 58
+#define RST_BUS_DISPLAY0_TOP 59
+#define RST_BUS_DISPLAY1_TOP 60
+#define RST_BUS_HDMI_MAIN 61
+#define RST_BUS_HDMI_SUB 62
+#define RST_BUS_MIPI_DSI0 63
+#define RST_BUS_MIPI_DSI1 64
+#define RST_BUS_TCON_LCD0 65
+#define RST_BUS_TCON_LCD1 66
+#define RST_BUS_TCON_LCD2 67
+#define RST_BUS_TCON_TV0 68
+#define RST_BUS_TCON_TV1 69
+#define RST_BUS_LVDS0 70
+#define RST_BUS_LVDS1 71
+#define RST_BUS_EDP 72
+#define RST_BUS_VIDEO_OUT0 73
+#define RST_BUS_VIDEO_OUT1 74
+#define RST_BUS_LEDC 75
+#define RST_BUS_CSI 76
+#define RST_BUS_ISP 77
+
+#endif /* _DT_BINDINGS_RST_SUN55I_A523_CCU_H_ */
diff --git a/include/dt-bindings/reset/sun55i-a523-mcu-ccu.h b/include/dt-bindings/reset/sun55i-a523-mcu-ccu.h
new file mode 100644
index 000000000000..a89a0b44f08b
--- /dev/null
+++ b/include/dt-bindings/reset/sun55i-a523-mcu-ccu.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (C) 2025 Chen-Yu Tsai <wens@csie.org>
+ */
+
+#ifndef _DT_BINDINGS_RST_SUN55I_A523_MCU_CCU_H_
+#define _DT_BINDINGS_RST_SUN55I_A523_MCU_CCU_H_
+
+#define RST_BUS_MCU_I2S0 0
+#define RST_BUS_MCU_I2S1 1
+#define RST_BUS_MCU_I2S2 2
+#define RST_BUS_MCU_I2S3 3
+#define RST_BUS_MCU_SPDIF 4
+#define RST_BUS_MCU_DMIC 5
+#define RST_BUS_MCU_AUDIO_CODEC 6
+#define RST_BUS_MCU_DSP_MSGBOX 7
+#define RST_BUS_MCU_DSP_CFG 8
+#define RST_BUS_MCU_NPU 9
+#define RST_BUS_MCU_TIMER 10
+#define RST_BUS_MCU_DSP_DEBUG 11
+#define RST_BUS_MCU_DSP 12
+#define RST_BUS_MCU_DMA 13
+#define RST_BUS_MCU_PUBSRAM 14
+#define RST_BUS_MCU_RISCV_CFG 15
+#define RST_BUS_MCU_RISCV_DEBUG 16
+#define RST_BUS_MCU_RISCV_CORE 17
+#define RST_BUS_MCU_RISCV_MSGBOX 18
+#define RST_BUS_MCU_PWM0 19
+
+#endif /* _DT_BINDINGS_RST_SUN55I_A523_MCU_CCU_H_ */
diff --git a/include/dt-bindings/reset/sun55i-a523-r-ccu.h b/include/dt-bindings/reset/sun55i-a523-r-ccu.h
new file mode 100644
index 000000000000..eb31ae9958d6
--- /dev/null
+++ b/include/dt-bindings/reset/sun55i-a523-r-ccu.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (C) 2024 Arm Ltd.
+ */
+
+#ifndef _DT_BINDINGS_RST_SUN55I_A523_R_CCU_H_
+#define _DT_BINDINGS_RST_SUN55I_A523_R_CCU_H_
+
+#define RST_BUS_R_TIMER 0
+#define RST_BUS_R_TWD 1
+#define RST_BUS_R_PWMCTRL 2
+#define RST_BUS_R_SPI 3
+#define RST_BUS_R_SPINLOCK 4
+#define RST_BUS_R_MSGBOX 5
+#define RST_BUS_R_UART0 6
+#define RST_BUS_R_UART1 7
+#define RST_BUS_R_I2C0 8
+#define RST_BUS_R_I2C1 9
+#define RST_BUS_R_I2C2 10
+#define RST_BUS_R_PPU1 11
+#define RST_BUS_R_IR_RX 12
+#define RST_BUS_R_RTC 13
+#define RST_BUS_R_CPUCFG 14
+#define RST_BUS_R_PPU0 15
+
+#endif /* _DT_BINDINGS_RST_SUN55I_A523_R_CCU_H_ */
diff --git a/include/dt-bindings/reset/sunplus,sp7021-reset.h b/include/dt-bindings/reset/sunplus,sp7021-reset.h
new file mode 100644
index 000000000000..ab486707387f
--- /dev/null
+++ b/include/dt-bindings/reset/sunplus,sp7021-reset.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) Sunplus Technology Co., Ltd.
+ * All rights reserved.
+ */
+#ifndef _DT_BINDINGS_RST_SUNPLUS_SP7021_H
+#define _DT_BINDINGS_RST_SUNPLUS_SP7021_H
+
+#define RST_SYSTEM 0
+#define RST_RTC 1
+#define RST_IOCTL 2
+#define RST_IOP 3
+#define RST_OTPRX 4
+#define RST_NOC 5
+#define RST_BR 6
+#define RST_RBUS_L00 7
+#define RST_SPIFL 8
+#define RST_SDCTRL0 9
+#define RST_PERI0 10
+#define RST_A926 11
+#define RST_UMCTL2 12
+#define RST_PERI1 13
+#define RST_DDR_PHY0 14
+#define RST_ACHIP 15
+#define RST_STC0 16
+#define RST_STC_AV0 17
+#define RST_STC_AV1 18
+#define RST_STC_AV2 19
+#define RST_UA0 20
+#define RST_UA1 21
+#define RST_UA2 22
+#define RST_UA3 23
+#define RST_UA4 24
+#define RST_HWUA 25
+#define RST_DDC0 26
+#define RST_UADMA 27
+#define RST_CBDMA0 28
+#define RST_CBDMA1 29
+#define RST_SPI_COMBO_0 30
+#define RST_SPI_COMBO_1 31
+#define RST_SPI_COMBO_2 32
+#define RST_SPI_COMBO_3 33
+#define RST_AUD 34
+#define RST_USBC0 35
+#define RST_USBC1 36
+#define RST_UPHY0 37
+#define RST_UPHY1 38
+#define RST_I2CM0 39
+#define RST_I2CM1 40
+#define RST_I2CM2 41
+#define RST_I2CM3 42
+#define RST_PMC 43
+#define RST_CARD_CTL0 44
+#define RST_CARD_CTL1 45
+#define RST_CARD_CTL4 46
+#define RST_BCH 47
+#define RST_DDFCH 48
+#define RST_CSIIW0 49
+#define RST_CSIIW1 50
+#define RST_MIPICSI0 51
+#define RST_MIPICSI1 52
+#define RST_HDMI_TX 53
+#define RST_VPOST 54
+#define RST_TGEN 55
+#define RST_DMIX 56
+#define RST_TCON 57
+#define RST_INTERRUPT 58
+#define RST_RGST 59
+#define RST_GPIO 60
+#define RST_RBUS_TOP 61
+#define RST_MAILBOX 62
+#define RST_SPIND 63
+#define RST_I2C2CBUS 64
+#define RST_SEC 65
+#define RST_DVE 66
+#define RST_GPOST0 67
+#define RST_OSD0 68
+#define RST_DISP_PWM 69
+#define RST_UADBG 70
+#define RST_DUMMY_MASTER 71
+#define RST_FIO_CTL 72
+#define RST_FPGA 73
+#define RST_L2SW 74
+#define RST_ICM 75
+#define RST_AXI_GLOBAL 76
+
+#endif
diff --git a/include/dt-bindings/reset/tegra234-reset.h b/include/dt-bindings/reset/tegra234-reset.h
index b3c63be06d2d..85cc423a7bdf 100644
--- a/include/dt-bindings/reset/tegra234-reset.h
+++ b/include/dt-bindings/reset/tegra234-reset.h
@@ -1,10 +1,182 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. */
+/* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. */
#ifndef DT_BINDINGS_RESET_TEGRA234_RESET_H
#define DT_BINDINGS_RESET_TEGRA234_RESET_H
-#define TEGRA234_RESET_SDMMC4 85
-#define TEGRA234_RESET_UARTA 100
+/**
+ * @file
+ * @defgroup bpmp_reset_ids Reset ID's
+ * @brief Identifiers for Resets controllable by firmware
+ * @{
+ */
+#define TEGRA234_RESET_ACTMON 1U
+#define TEGRA234_RESET_ADSP_ALL 2U
+#define TEGRA234_RESET_DSI_CORE 3U
+#define TEGRA234_RESET_CAN1 4U
+#define TEGRA234_RESET_CAN2 5U
+#define TEGRA234_RESET_DLA0 6U
+#define TEGRA234_RESET_DLA1 7U
+#define TEGRA234_RESET_DPAUX 8U
+#define TEGRA234_RESET_OFA 9U
+#define TEGRA234_RESET_NVJPG1 10U
+#define TEGRA234_RESET_PEX1_CORE_6 11U
+#define TEGRA234_RESET_PEX1_CORE_6_APB 12U
+#define TEGRA234_RESET_PEX1_COMMON_APB 13U
+#define TEGRA234_RESET_PEX2_CORE_7 14U
+#define TEGRA234_RESET_PEX2_CORE_7_APB 15U
+#define TEGRA234_RESET_NVDISPLAY 16U
+#define TEGRA234_RESET_EQOS 17U
+#define TEGRA234_RESET_GPCDMA 18U
+#define TEGRA234_RESET_GPU 19U
+#define TEGRA234_RESET_HDA 20U
+#define TEGRA234_RESET_HDACODEC 21U
+#define TEGRA234_RESET_EQOS_MACSEC 22U
+#define TEGRA234_RESET_EQOS_MACSEC_SECURE 23U
+#define TEGRA234_RESET_I2C1 24U
+#define TEGRA234_RESET_PEX2_CORE_8 25U
+#define TEGRA234_RESET_PEX2_CORE_8_APB 26U
+#define TEGRA234_RESET_PEX2_CORE_9 27U
+#define TEGRA234_RESET_PEX2_CORE_9_APB 28U
+#define TEGRA234_RESET_I2C2 29U
+#define TEGRA234_RESET_I2C3 30U
+#define TEGRA234_RESET_I2C4 31U
+#define TEGRA234_RESET_I2C6 32U
+#define TEGRA234_RESET_I2C7 33U
+#define TEGRA234_RESET_I2C8 34U
+#define TEGRA234_RESET_I2C9 35U
+#define TEGRA234_RESET_ISP 36U
+#define TEGRA234_RESET_MIPI_CAL 37U
+#define TEGRA234_RESET_MPHY_CLK_CTL 38U
+#define TEGRA234_RESET_MPHY_L0_RX 39U
+#define TEGRA234_RESET_MPHY_L0_TX 40U
+#define TEGRA234_RESET_MPHY_L1_RX 41U
+#define TEGRA234_RESET_MPHY_L1_TX 42U
+#define TEGRA234_RESET_NVCSI 43U
+#define TEGRA234_RESET_NVDEC 44U
+#define TEGRA234_RESET_MGBE0_PCS 45U
+#define TEGRA234_RESET_MGBE0_MAC 46U
+#define TEGRA234_RESET_MGBE0_MACSEC 47U
+#define TEGRA234_RESET_MGBE0_MACSEC_SECURE 48U
+#define TEGRA234_RESET_MGBE1_PCS 49U
+#define TEGRA234_RESET_MGBE1_MAC 50U
+#define TEGRA234_RESET_MGBE1_MACSEC 51U
+#define TEGRA234_RESET_MGBE1_MACSEC_SECURE 52U
+#define TEGRA234_RESET_MGBE2_PCS 53U
+#define TEGRA234_RESET_MGBE2_MAC 54U
+#define TEGRA234_RESET_MGBE2_MACSEC 55U
+#define TEGRA234_RESET_PEX2_CORE_10 56U
+#define TEGRA234_RESET_PEX2_CORE_10_APB 57U
+#define TEGRA234_RESET_PEX2_COMMON_APB 58U
+#define TEGRA234_RESET_NVENC 59U
+#define TEGRA234_RESET_MGBE2_MACSEC_SECURE 60U
+#define TEGRA234_RESET_NVJPG 61U
+#define TEGRA234_RESET_LA 64U
+#define TEGRA234_RESET_HWPM 65U
+#define TEGRA234_RESET_PVA0_ALL 66U
+#define TEGRA234_RESET_CEC 67U
+#define TEGRA234_RESET_PWM1 68U
+#define TEGRA234_RESET_PWM2 69U
+#define TEGRA234_RESET_PWM3 70U
+#define TEGRA234_RESET_PWM4 71U
+#define TEGRA234_RESET_PWM5 72U
+#define TEGRA234_RESET_PWM6 73U
+#define TEGRA234_RESET_PWM7 74U
+#define TEGRA234_RESET_PWM8 75U
+#define TEGRA234_RESET_QSPI0 76U
+#define TEGRA234_RESET_QSPI1 77U
+#define TEGRA234_RESET_I2S7 78U
+#define TEGRA234_RESET_I2S8 79U
+#define TEGRA234_RESET_SCE_ALL 80U
+#define TEGRA234_RESET_RCE_ALL 81U
+#define TEGRA234_RESET_SDMMC1 82U
+#define TEGRA234_RESET_RSVD_83 83U
+#define TEGRA234_RESET_RSVD_84 84U
+#define TEGRA234_RESET_SDMMC4 85U
+#define TEGRA234_RESET_MGBE3_PCS 87U
+#define TEGRA234_RESET_MGBE3_MAC 88U
+#define TEGRA234_RESET_MGBE3_MACSEC 89U
+#define TEGRA234_RESET_MGBE3_MACSEC_SECURE 90U
+#define TEGRA234_RESET_SPI1 91U
+#define TEGRA234_RESET_SPI2 92U
+#define TEGRA234_RESET_SPI3 93U
+#define TEGRA234_RESET_SPI4 94U
+#define TEGRA234_RESET_TACH0 95U
+#define TEGRA234_RESET_TACH1 96U
+#define TEGRA234_RESET_SPI5 97U
+#define TEGRA234_RESET_TSEC 98U
+#define TEGRA234_RESET_UARTI 99U
+#define TEGRA234_RESET_UARTA 100U
+#define TEGRA234_RESET_UARTB 101U
+#define TEGRA234_RESET_UARTC 102U
+#define TEGRA234_RESET_UARTD 103U
+#define TEGRA234_RESET_UARTE 104U
+#define TEGRA234_RESET_UARTF 105U
+#define TEGRA234_RESET_UARTJ 106U
+#define TEGRA234_RESET_UARTH 107U
+#define TEGRA234_RESET_UFSHC 108U
+#define TEGRA234_RESET_UFSHC_AXI_M 109U
+#define TEGRA234_RESET_UFSHC_LP_SEQ 110U
+#define TEGRA234_RESET_RSVD_111 111U
+#define TEGRA234_RESET_VI 112U
+#define TEGRA234_RESET_VIC 113U
+#define TEGRA234_RESET_XUSB_PADCTL 114U
+#define TEGRA234_RESET_VI2 115U
+#define TEGRA234_RESET_PEX0_CORE_0 116U
+#define TEGRA234_RESET_PEX0_CORE_1 117U
+#define TEGRA234_RESET_PEX0_CORE_2 118U
+#define TEGRA234_RESET_PEX0_CORE_3 119U
+#define TEGRA234_RESET_PEX0_CORE_4 120U
+#define TEGRA234_RESET_PEX0_CORE_0_APB 121U
+#define TEGRA234_RESET_PEX0_CORE_1_APB 122U
+#define TEGRA234_RESET_PEX0_CORE_2_APB 123U
+#define TEGRA234_RESET_PEX0_CORE_3_APB 124U
+#define TEGRA234_RESET_PEX0_CORE_4_APB 125U
+#define TEGRA234_RESET_PEX0_COMMON_APB 126U
+#define TEGRA234_RESET_RSVD_127 127U
+#define TEGRA234_RESET_NVHS_UPHY_PLL1 128U
+#define TEGRA234_RESET_PEX1_CORE_5 129U
+#define TEGRA234_RESET_PEX1_CORE_5_APB 130U
+#define TEGRA234_RESET_GBE_UPHY 131U
+#define TEGRA234_RESET_GBE_UPHY_PM 132U
+#define TEGRA234_RESET_NVHS_UPHY 133U
+#define TEGRA234_RESET_NVHS_UPHY_PLL0 134U
+#define TEGRA234_RESET_NVHS_UPHY_L0 135U
+#define TEGRA234_RESET_NVHS_UPHY_L1 136U
+#define TEGRA234_RESET_NVHS_UPHY_L2 137U
+#define TEGRA234_RESET_NVHS_UPHY_L3 138U
+#define TEGRA234_RESET_NVHS_UPHY_L4 139U
+#define TEGRA234_RESET_NVHS_UPHY_L5 140U
+#define TEGRA234_RESET_NVHS_UPHY_L6 141U
+#define TEGRA234_RESET_NVHS_UPHY_L7 142U
+#define TEGRA234_RESET_NVHS_UPHY_PM 143U
+#define TEGRA234_RESET_DMIC5 144U
+#define TEGRA234_RESET_APE 145U
+#define TEGRA234_RESET_PEX_USB_UPHY 146U
+#define TEGRA234_RESET_PEX_USB_UPHY_L0 147U
+#define TEGRA234_RESET_PEX_USB_UPHY_L1 148U
+#define TEGRA234_RESET_PEX_USB_UPHY_L2 149U
+#define TEGRA234_RESET_PEX_USB_UPHY_L3 150U
+#define TEGRA234_RESET_PEX_USB_UPHY_L4 151U
+#define TEGRA234_RESET_PEX_USB_UPHY_L5 152U
+#define TEGRA234_RESET_PEX_USB_UPHY_L6 153U
+#define TEGRA234_RESET_PEX_USB_UPHY_L7 154U
+#define TEGRA234_RESET_PEX_USB_UPHY_PLL0 159U
+#define TEGRA234_RESET_PEX_USB_UPHY_PLL1 160U
+#define TEGRA234_RESET_PEX_USB_UPHY_PLL2 161U
+#define TEGRA234_RESET_PEX_USB_UPHY_PLL3 162U
+#define TEGRA234_RESET_GBE_UPHY_L0 163U
+#define TEGRA234_RESET_GBE_UPHY_L1 164U
+#define TEGRA234_RESET_GBE_UPHY_L2 165U
+#define TEGRA234_RESET_GBE_UPHY_L3 166U
+#define TEGRA234_RESET_GBE_UPHY_L4 167U
+#define TEGRA234_RESET_GBE_UPHY_L5 168U
+#define TEGRA234_RESET_GBE_UPHY_L6 169U
+#define TEGRA234_RESET_GBE_UPHY_L7 170U
+#define TEGRA234_RESET_GBE_UPHY_PLL0 171U
+#define TEGRA234_RESET_GBE_UPHY_PLL1 172U
+#define TEGRA234_RESET_GBE_UPHY_PLL2 173U
+
+/** @} */
#endif
diff --git a/include/dt-bindings/reset/thead,th1520-reset.h b/include/dt-bindings/reset/thead,th1520-reset.h
new file mode 100644
index 000000000000..ba6805b6b12a
--- /dev/null
+++ b/include/dt-bindings/reset/thead,th1520-reset.h
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024 Samsung Electronics Co., Ltd.
+ * Author: Michal Wilczynski <m.wilczynski@samsung.com>
+ */
+
+#ifndef _DT_BINDINGS_TH1520_RESET_H
+#define _DT_BINDINGS_TH1520_RESET_H
+
+/* AO Subsystem */
+#define TH1520_RESET_ID_SYSTEM 0
+#define TH1520_RESET_ID_RTC_APB 1
+#define TH1520_RESET_ID_RTC_REF 2
+#define TH1520_RESET_ID_AOGPIO_DB 3
+#define TH1520_RESET_ID_AOGPIO_APB 4
+#define TH1520_RESET_ID_AOI2C_APB 5
+#define TH1520_RESET_ID_PVT_APB 6
+#define TH1520_RESET_ID_E902_CORE 7
+#define TH1520_RESET_ID_E902_HAD 8
+#define TH1520_RESET_ID_AOTIMER_APB 9
+#define TH1520_RESET_ID_AOTIMER_CORE 10
+#define TH1520_RESET_ID_AOWDT_APB 11
+#define TH1520_RESET_ID_APSYS 12
+#define TH1520_RESET_ID_NPUSYS 13
+#define TH1520_RESET_ID_DDRSYS 14
+#define TH1520_RESET_ID_AXI_AP2CP 15
+#define TH1520_RESET_ID_AXI_CP2AP 16
+#define TH1520_RESET_ID_AXI_CP2SRAM 17
+#define TH1520_RESET_ID_AUDSYS_CORE 18
+#define TH1520_RESET_ID_AUDSYS_IOPMP 19
+#define TH1520_RESET_ID_AUDSYS 20
+#define TH1520_RESET_ID_DSP0 21
+#define TH1520_RESET_ID_DSP1 22
+#define TH1520_RESET_ID_GPU_MODULE 23
+#define TH1520_RESET_ID_VDEC 24
+#define TH1520_RESET_ID_VENC 25
+#define TH1520_RESET_ID_ADC_APB 26
+#define TH1520_RESET_ID_AUDGPIO_DB 27
+#define TH1520_RESET_ID_AUDGPIO_APB 28
+#define TH1520_RESET_ID_AOUART_IF 29
+#define TH1520_RESET_ID_AOUART_APB 30
+#define TH1520_RESET_ID_SRAM_AXI_P0 31
+#define TH1520_RESET_ID_SRAM_AXI_P1 32
+#define TH1520_RESET_ID_SRAM_AXI_P2 33
+#define TH1520_RESET_ID_SRAM_AXI_P3 34
+#define TH1520_RESET_ID_SRAM_AXI_P4 35
+#define TH1520_RESET_ID_SRAM_AXI_CORE 36
+#define TH1520_RESET_ID_SE 37
+
+/* AP Subsystem */
+#define TH1520_RESET_ID_BROM 0
+#define TH1520_RESET_ID_C910_TOP 1
+#define TH1520_RESET_ID_NPU 2
+#define TH1520_RESET_ID_WDT0 3
+#define TH1520_RESET_ID_WDT1 4
+#define TH1520_RESET_ID_C910_C0 5
+#define TH1520_RESET_ID_C910_C1 6
+#define TH1520_RESET_ID_C910_C2 7
+#define TH1520_RESET_ID_C910_C3 8
+#define TH1520_RESET_ID_CHIP_DBG_CORE 9
+#define TH1520_RESET_ID_CHIP_DBG_AXI 10
+#define TH1520_RESET_ID_AXI4_CPUSYS2_AXI 11
+#define TH1520_RESET_ID_AXI4_CPUSYS2_APB 12
+#define TH1520_RESET_ID_X2H_CPUSYS 13
+#define TH1520_RESET_ID_AHB2_CPUSYS 14
+#define TH1520_RESET_ID_APB3_CPUSYS 15
+#define TH1520_RESET_ID_MBOX0_APB 16
+#define TH1520_RESET_ID_MBOX1_APB 17
+#define TH1520_RESET_ID_MBOX2_APB 18
+#define TH1520_RESET_ID_MBOX3_APB 19
+#define TH1520_RESET_ID_TIMER0_APB 20
+#define TH1520_RESET_ID_TIMER0_CORE 21
+#define TH1520_RESET_ID_TIMER1_APB 22
+#define TH1520_RESET_ID_TIMER1_CORE 23
+#define TH1520_RESET_ID_PERISYS_AHB 24
+#define TH1520_RESET_ID_PERISYS_APB1 25
+#define TH1520_RESET_ID_PERISYS_APB2 26
+#define TH1520_RESET_ID_GMAC0_APB 27
+#define TH1520_RESET_ID_GMAC0_AHB 28
+#define TH1520_RESET_ID_GMAC0_CLKGEN 29
+#define TH1520_RESET_ID_GMAC0_AXI 30
+#define TH1520_RESET_ID_UART0_APB 31
+#define TH1520_RESET_ID_UART0_IF 32
+#define TH1520_RESET_ID_UART1_APB 33
+#define TH1520_RESET_ID_UART1_IF 34
+#define TH1520_RESET_ID_UART2_APB 35
+#define TH1520_RESET_ID_UART2_IF 36
+#define TH1520_RESET_ID_UART3_APB 37
+#define TH1520_RESET_ID_UART3_IF 38
+#define TH1520_RESET_ID_UART4_APB 39
+#define TH1520_RESET_ID_UART4_IF 40
+#define TH1520_RESET_ID_UART5_APB 41
+#define TH1520_RESET_ID_UART5_IF 42
+#define TH1520_RESET_ID_QSPI0_IF 43
+#define TH1520_RESET_ID_QSPI0_APB 44
+#define TH1520_RESET_ID_QSPI1_IF 45
+#define TH1520_RESET_ID_QSPI1_APB 46
+#define TH1520_RESET_ID_SPI_IF 47
+#define TH1520_RESET_ID_SPI_APB 48
+#define TH1520_RESET_ID_I2C0_APB 49
+#define TH1520_RESET_ID_I2C0_CORE 50
+#define TH1520_RESET_ID_I2C1_APB 51
+#define TH1520_RESET_ID_I2C1_CORE 52
+#define TH1520_RESET_ID_I2C2_APB 53
+#define TH1520_RESET_ID_I2C2_CORE 54
+#define TH1520_RESET_ID_I2C3_APB 55
+#define TH1520_RESET_ID_I2C3_CORE 56
+#define TH1520_RESET_ID_I2C4_APB 57
+#define TH1520_RESET_ID_I2C4_CORE 58
+#define TH1520_RESET_ID_I2C5_APB 59
+#define TH1520_RESET_ID_I2C5_CORE 60
+#define TH1520_RESET_ID_GPIO0_DB 61
+#define TH1520_RESET_ID_GPIO0_APB 62
+#define TH1520_RESET_ID_GPIO1_DB 63
+#define TH1520_RESET_ID_GPIO1_APB 64
+#define TH1520_RESET_ID_GPIO2_DB 65
+#define TH1520_RESET_ID_GPIO2_APB 66
+#define TH1520_RESET_ID_PWM_COUNTER 67
+#define TH1520_RESET_ID_PWM_APB 68
+#define TH1520_RESET_ID_PADCTRL0_APB 69
+#define TH1520_RESET_ID_CPU2PERI_X2H 70
+#define TH1520_RESET_ID_CPU2AON_X2H 71
+#define TH1520_RESET_ID_AON2CPU_A2X 72
+#define TH1520_RESET_ID_NPUSYS_AXI 73
+#define TH1520_RESET_ID_NPUSYS_AXI_APB 74
+#define TH1520_RESET_ID_CPU2VP_X2P 75
+#define TH1520_RESET_ID_CPU2VI_X2H 76
+#define TH1520_RESET_ID_BMU_AXI 77
+#define TH1520_RESET_ID_BMU_APB 78
+#define TH1520_RESET_ID_DMAC_CPUSYS_AXI 79
+#define TH1520_RESET_ID_DMAC_CPUSYS_AHB 80
+#define TH1520_RESET_ID_SPINLOCK 81
+#define TH1520_RESET_ID_CFG2TEE 82
+#define TH1520_RESET_ID_DSMART 83
+#define TH1520_RESET_ID_GPIO3_DB 84
+#define TH1520_RESET_ID_GPIO3_APB 85
+#define TH1520_RESET_ID_PERI_I2S 86
+#define TH1520_RESET_ID_PERI_APB3 87
+#define TH1520_RESET_ID_PERI2PERI1_APB 88
+#define TH1520_RESET_ID_VPSYS_APB 89
+#define TH1520_RESET_ID_PERISYS_APB4 90
+#define TH1520_RESET_ID_GMAC1_APB 91
+#define TH1520_RESET_ID_GMAC1_AHB 92
+#define TH1520_RESET_ID_GMAC1_CLKGEN 93
+#define TH1520_RESET_ID_GMAC1_AXI 94
+#define TH1520_RESET_ID_GMAC_AXI 95
+#define TH1520_RESET_ID_GMAC_AXI_APB 96
+#define TH1520_RESET_ID_PADCTRL1_APB 97
+#define TH1520_RESET_ID_VOSYS_AXI 98
+#define TH1520_RESET_ID_VOSYS_AXI_APB 99
+#define TH1520_RESET_ID_VOSYS_AXI_X2X 100
+#define TH1520_RESET_ID_MISC2VP_X2X 101
+#define TH1520_RESET_ID_DSPSYS 102
+#define TH1520_RESET_ID_VISYS 103
+#define TH1520_RESET_ID_VOSYS 104
+#define TH1520_RESET_ID_VPSYS 105
+
+/* DSP Subsystem */
+#define TH1520_RESET_ID_X2X_DSP1 0
+#define TH1520_RESET_ID_X2X_DSP0 1
+#define TH1520_RESET_ID_X2X_SLAVE_DSP1 2
+#define TH1520_RESET_ID_X2X_SLAVE_DSP0 3
+#define TH1520_RESET_ID_DSP0_CORE 4
+#define TH1520_RESET_ID_DSP0_DEBUG 5
+#define TH1520_RESET_ID_DSP0_APB 6
+#define TH1520_RESET_ID_DSP1_CORE 7
+#define TH1520_RESET_ID_DSP1_DEBUG 8
+#define TH1520_RESET_ID_DSP1_APB 9
+#define TH1520_RESET_ID_DSPSYS_APB 10
+#define TH1520_RESET_ID_AXI4_DSPSYS_SLV 11
+#define TH1520_RESET_ID_AXI4_DSPSYS 12
+#define TH1520_RESET_ID_AXI4_DSP_RS 13
+
+/* MISC Subsystem */
+#define TH1520_RESET_ID_EMMC_SDIO_CLKGEN 0
+#define TH1520_RESET_ID_EMMC 1
+#define TH1520_RESET_ID_MISCSYS_AXI 2
+#define TH1520_RESET_ID_MISCSYS_AXI_APB 3
+#define TH1520_RESET_ID_SDIO0 4
+#define TH1520_RESET_ID_SDIO1 5
+#define TH1520_RESET_ID_USB3_APB 6
+#define TH1520_RESET_ID_USB3_PHY 7
+#define TH1520_RESET_ID_USB3_VCC 8
+
+/* VI Subsystem */
+#define TH1520_RESET_ID_ISP0 0
+#define TH1520_RESET_ID_ISP1 1
+#define TH1520_RESET_ID_CSI0_APB 2
+#define TH1520_RESET_ID_CSI1_APB 3
+#define TH1520_RESET_ID_CSI2_APB 4
+#define TH1520_RESET_ID_MIPI_FIFO 5
+#define TH1520_RESET_ID_ISP_VENC_APB 6
+#define TH1520_RESET_ID_VIPRE_APB 7
+#define TH1520_RESET_ID_VIPRE_AXI 8
+#define TH1520_RESET_ID_DW200_APB 9
+#define TH1520_RESET_ID_VISYS3_AXI 10
+#define TH1520_RESET_ID_VISYS2_AXI 11
+#define TH1520_RESET_ID_VISYS1_AXI 12
+#define TH1520_RESET_ID_VISYS_AXI 13
+#define TH1520_RESET_ID_VISYS_APB 14
+#define TH1520_RESET_ID_ISP_VENC_AXI 15
+
+/* VO Subsystem */
+#define TH1520_RESET_ID_GPU 0
+#define TH1520_RESET_ID_GPU_CLKGEN 1
+#define TH1520_RESET_ID_DPU_AHB 5
+#define TH1520_RESET_ID_DPU_AXI 6
+#define TH1520_RESET_ID_DPU_CORE 7
+#define TH1520_RESET_ID_DSI0_APB 8
+#define TH1520_RESET_ID_DSI1_APB 9
+#define TH1520_RESET_ID_HDMI 10
+#define TH1520_RESET_ID_HDMI_APB 11
+#define TH1520_RESET_ID_VOAXI 12
+#define TH1520_RESET_ID_VOAXI_APB 13
+#define TH1520_RESET_ID_X2H_DPU_AXI 14
+#define TH1520_RESET_ID_X2H_DPU_AHB 15
+#define TH1520_RESET_ID_X2H_DPU1_AXI 16
+#define TH1520_RESET_ID_X2H_DPU1_AHB 17
+
+/* VP Subsystem */
+#define TH1520_RESET_ID_VPSYS_AXI_APB 0
+#define TH1520_RESET_ID_VPSYS_AXI 1
+#define TH1520_RESET_ID_FCE_APB 2
+#define TH1520_RESET_ID_FCE_CORE 3
+#define TH1520_RESET_ID_FCE_X2X_MASTER 4
+#define TH1520_RESET_ID_FCE_X2X_SLAVE 5
+#define TH1520_RESET_ID_G2D_APB 6
+#define TH1520_RESET_ID_G2D_ACLK 7
+#define TH1520_RESET_ID_G2D_CORE 8
+#define TH1520_RESET_ID_VDEC_APB 9
+#define TH1520_RESET_ID_VDEC_ACLK 10
+#define TH1520_RESET_ID_VDEC_CORE 11
+#define TH1520_RESET_ID_VENC_APB 12
+#define TH1520_RESET_ID_VENC_CORE 13
+
+#endif /* _DT_BINDINGS_TH1520_RESET_H */
diff --git a/include/dt-bindings/reset/toshiba,tmpv770x.h b/include/dt-bindings/reset/toshiba,tmpv770x.h
new file mode 100644
index 000000000000..9452bef31425
--- /dev/null
+++ b/include/dt-bindings/reset/toshiba,tmpv770x.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_RESET_TOSHIBA_TMPV770X_H_
+#define _DT_BINDINGS_RESET_TOSHIBA_TMPV770X_H_
+
+/* Reset */
+#define TMPV770X_RESET_PIETHER_2P5M 0
+#define TMPV770X_RESET_PIETHER_25M 1
+#define TMPV770X_RESET_PIETHER_50M 2
+#define TMPV770X_RESET_PIETHER_125M 3
+#define TMPV770X_RESET_HOX 4
+#define TMPV770X_RESET_PCIE_MSTR 5
+#define TMPV770X_RESET_PCIE_AUX 6
+#define TMPV770X_RESET_PIINTC 7
+#define TMPV770X_RESET_PIETHER_BUS 8
+#define TMPV770X_RESET_PISPI0 9
+#define TMPV770X_RESET_PISPI1 10
+#define TMPV770X_RESET_PISPI2 11
+#define TMPV770X_RESET_PISPI3 12
+#define TMPV770X_RESET_PISPI4 13
+#define TMPV770X_RESET_PISPI5 14
+#define TMPV770X_RESET_PISPI6 15
+#define TMPV770X_RESET_PIUART0 16
+#define TMPV770X_RESET_PIUART1 17
+#define TMPV770X_RESET_PIUART2 18
+#define TMPV770X_RESET_PIUART3 19
+#define TMPV770X_RESET_PII2C0 20
+#define TMPV770X_RESET_PII2C1 21
+#define TMPV770X_RESET_PII2C2 22
+#define TMPV770X_RESET_PII2C3 23
+#define TMPV770X_RESET_PII2C4 24
+#define TMPV770X_RESET_PII2C5 25
+#define TMPV770X_RESET_PII2C6 26
+#define TMPV770X_RESET_PII2C7 27
+#define TMPV770X_RESET_PII2C8 28
+#define TMPV770X_RESET_PIPCMIF 29
+#define TMPV770X_RESET_PICKMON 30
+#define TMPV770X_RESET_SBUSCLK 31
+#define TMPV770X_RESET_VIIFBS0 32
+#define TMPV770X_RESET_VIIFBS0_APB 33
+#define TMPV770X_RESET_VIIFBS0_L2ISP 34
+#define TMPV770X_RESET_VIIFBS0_L1ISP 35
+#define TMPV770X_RESET_VIIFBS1 36
+#define TMPV770X_RESET_VIIFBS1_APB 37
+#define TMPV770X_RESET_VIIFBS1_L2ISP 38
+#define TMPV770X_RESET_VIIFBS1_L1ISP 39
+
+#endif /*_DT_BINDINGS_RESET_TOSHIBA_TMPV770X_H_ */
diff --git a/include/dt-bindings/soc/cpm1-fsl,tsa.h b/include/dt-bindings/soc/cpm1-fsl,tsa.h
new file mode 100644
index 000000000000..2cc44e867dbe
--- /dev/null
+++ b/include/dt-bindings/soc/cpm1-fsl,tsa.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+
+#ifndef __DT_BINDINGS_SOC_FSL_TSA_H
+#define __DT_BINDINGS_SOC_FSL_TSA_H
+
+#define FSL_CPM_TSA_NU 0 /* Pseuso Cell Id for not used item */
+#define FSL_CPM_TSA_SCC2 1
+#define FSL_CPM_TSA_SCC3 2
+#define FSL_CPM_TSA_SCC4 3
+#define FSL_CPM_TSA_SMC1 4
+#define FSL_CPM_TSA_SMC2 5
+
+#endif
diff --git a/include/dt-bindings/soc/qcom,gpr.h b/include/dt-bindings/soc/qcom,gpr.h
new file mode 100644
index 000000000000..3107da59319c
--- /dev/null
+++ b/include/dt-bindings/soc/qcom,gpr.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+
+#ifndef __DT_BINDINGS_QCOM_GPR_H
+#define __DT_BINDINGS_QCOM_GPR_H
+
+/* DOMAINS */
+
+#define GPR_DOMAIN_ID_MODEM 1
+#define GPR_DOMAIN_ID_ADSP 2
+#define GPR_DOMAIN_ID_APPS 3
+
+/* Static Services */
+
+#define GPR_APM_MODULE_IID 1
+#define GPR_PRM_MODULE_IID 2
+#define GPR_AMDB_MODULE_IID 3
+#define GPR_VCPM_MODULE_IID 4
+
+#endif /* __DT_BINDINGS_QCOM_GPR_H */
diff --git a/include/dt-bindings/soc/qe-fsl,tsa.h b/include/dt-bindings/soc/qe-fsl,tsa.h
new file mode 100644
index 000000000000..3cf3df9c0968
--- /dev/null
+++ b/include/dt-bindings/soc/qe-fsl,tsa.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+
+#ifndef __DT_BINDINGS_SOC_FSL_QE_TSA_H
+#define __DT_BINDINGS_SOC_FSL_QE_TSA_H
+
+#define FSL_QE_TSA_NU 0
+#define FSL_QE_TSA_UCC1 1
+#define FSL_QE_TSA_UCC2 2
+#define FSL_QE_TSA_UCC3 3
+#define FSL_QE_TSA_UCC4 4
+#define FSL_QE_TSA_UCC5 5
+
+#endif
diff --git a/include/dt-bindings/soc/rockchip,vop2.h b/include/dt-bindings/soc/rockchip,vop2.h
new file mode 100644
index 000000000000..668f199df9f0
--- /dev/null
+++ b/include/dt-bindings/soc/rockchip,vop2.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+
+#ifndef __DT_BINDINGS_ROCKCHIP_VOP2_H
+#define __DT_BINDINGS_ROCKCHIP_VOP2_H
+
+#define ROCKCHIP_VOP2_EP_RGB0 1
+#define ROCKCHIP_VOP2_EP_HDMI0 2
+#define ROCKCHIP_VOP2_EP_EDP0 3
+#define ROCKCHIP_VOP2_EP_MIPI0 4
+#define ROCKCHIP_VOP2_EP_LVDS0 5
+#define ROCKCHIP_VOP2_EP_MIPI1 6
+#define ROCKCHIP_VOP2_EP_LVDS1 7
+#define ROCKCHIP_VOP2_EP_HDMI1 8
+#define ROCKCHIP_VOP2_EP_EDP1 9
+#define ROCKCHIP_VOP2_EP_DP0 10
+#define ROCKCHIP_VOP2_EP_DP1 11
+
+#endif /* __DT_BINDINGS_ROCKCHIP_VOP2_H */
diff --git a/include/dt-bindings/soc/samsung,boot-mode.h b/include/dt-bindings/soc/samsung,boot-mode.h
new file mode 100644
index 000000000000..47ef1cdd3916
--- /dev/null
+++ b/include/dt-bindings/soc/samsung,boot-mode.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022 Samsung Electronics Co., Ltd.
+ * Author: Chanho Park <chanho61.park@samsung.com>
+ *
+ * Device Tree bindings for Samsung Boot Mode.
+ */
+
+#ifndef __DT_BINDINGS_SAMSUNG_BOOT_MODE_H
+#define __DT_BINDINGS_SAMSUNG_BOOT_MODE_H
+
+/* Boot mode definitions for Exynos Auto v9 SoC */
+
+#define EXYNOSAUTOV9_BOOT_FASTBOOT 0xfa
+#define EXYNOSAUTOV9_BOOT_BOOTLOADER 0xfc
+#define EXYNOSAUTOV9_BOOT_RECOVERY 0xff
+
+#endif /* __DT_BINDINGS_SAMSUNG_BOOT_MODE_H */
diff --git a/include/dt-bindings/soc/samsung,exynos-usi.h b/include/dt-bindings/soc/samsung,exynos-usi.h
new file mode 100644
index 000000000000..b46de214dd09
--- /dev/null
+++ b/include/dt-bindings/soc/samsung,exynos-usi.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021 Linaro Ltd.
+ * Author: Sam Protsenko <semen.protsenko@linaro.org>
+ *
+ * Device Tree bindings for Samsung Exynos USI (Universal Serial Interface).
+ */
+
+#ifndef __DT_BINDINGS_SAMSUNG_EXYNOS_USI_H
+#define __DT_BINDINGS_SAMSUNG_EXYNOS_USI_H
+
+#define USI_MODE_NONE 0
+#define USI_MODE_UART 1
+#define USI_MODE_SPI 2
+#define USI_MODE_I2C 3
+#define USI_MODE_I2C1 4
+#define USI_MODE_I2C0_1 5
+#define USI_MODE_UART_I2C1 6
+
+/* Deprecated */
+#define USI_V2_NONE USI_MODE_NONE
+#define USI_V2_UART USI_MODE_UART
+#define USI_V2_SPI USI_MODE_SPI
+#define USI_V2_I2C USI_MODE_I2C
+
+#endif /* __DT_BINDINGS_SAMSUNG_EXYNOS_USI_H */
diff --git a/include/dt-bindings/soc/zte,pm_domains.h b/include/dt-bindings/soc/zte,pm_domains.h
deleted file mode 100644
index df044705a5ec..000000000000
--- a/include/dt-bindings/soc/zte,pm_domains.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2017 Linaro Ltd.
- *
- * Author: Baoyou Xie <baoyou.xie@linaro.org>
- */
-
-#ifndef _DT_BINDINGS_SOC_ZTE_PM_DOMAINS_H
-#define _DT_BINDINGS_SOC_ZTE_PM_DOMAINS_H
-
-#define DM_ZX296718_SAPPU 0
-#define DM_ZX296718_VDE 1 /* g1v6 */
-#define DM_ZX296718_VCE 2 /* h1v6 */
-#define DM_ZX296718_HDE 3 /* g2v2 */
-#define DM_ZX296718_VIU 4
-#define DM_ZX296718_USB20 5
-#define DM_ZX296718_USB21 6
-#define DM_ZX296718_USB30 7
-#define DM_ZX296718_HSIC 8
-#define DM_ZX296718_GMAC 9
-#define DM_ZX296718_TS 10
-#define DM_ZX296718_VOU 11
-
-#endif /* _DT_BINDINGS_SOC_ZTE_PM_DOMAINS_H */
diff --git a/include/dt-bindings/sound/audio-graph.h b/include/dt-bindings/sound/audio-graph.h
new file mode 100644
index 000000000000..bdb70c6b7332
--- /dev/null
+++ b/include/dt-bindings/sound/audio-graph.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * audio-graph.h
+ *
+ * Copyright (c) 2024 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ */
+#ifndef __AUDIO_GRAPH_H
+#define __AUDIO_GRAPH_H
+
+/*
+ * used in
+ * link-trigger-order
+ * link-trigger-order-start
+ * link-trigger-order-stop
+ *
+ * default is
+ * link-trigger-order = <SND_SOC_TRIGGER_LINK
+ * SND_SOC_TRIGGER_COMPONENT
+ * SND_SOC_TRIGGER_DAI>;
+ */
+#define SND_SOC_TRIGGER_LINK 0
+#define SND_SOC_TRIGGER_COMPONENT 1
+#define SND_SOC_TRIGGER_DAI 2
+#define SND_SOC_TRIGGER_SIZE 3 /* shoud be last */
+
+#endif /* __AUDIO_GRAPH_H */
diff --git a/include/dt-bindings/sound/cs35l45.h b/include/dt-bindings/sound/cs35l45.h
new file mode 100644
index 000000000000..25386af18445
--- /dev/null
+++ b/include/dt-bindings/sound/cs35l45.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * cs35l45.h -- CS35L45 ALSA SoC audio driver DT bindings header
+ *
+ * Copyright 2022 Cirrus Logic, Inc.
+ */
+
+#ifndef DT_CS35L45_H
+#define DT_CS35L45_H
+
+/*
+ * cirrus,asp-sdout-hiz-ctrl
+ *
+ * TX_HIZ_UNUSED: TX pin high-impedance during unused slots.
+ * TX_HIZ_DISABLED: TX pin high-impedance when all channels disabled.
+ */
+#define CS35L45_ASP_TX_HIZ_UNUSED 0x1
+#define CS35L45_ASP_TX_HIZ_DISABLED 0x2
+
+/*
+ * Optional GPIOX Sub-nodes:
+ * The cs35l45 node can have up to three "cirrus,gpio-ctrlX" ('X' = [1,2,3])
+ * sub-nodes for configuring the GPIO pins.
+ *
+ * - gpio-dir : GPIO pin direction. Valid only when 'gpio-ctrl'
+ * is 1.
+ * 0 = Output
+ * 1 = Input (Default)
+ *
+ * - gpio-lvl : GPIO level. Valid only when 'gpio-ctrl' is 1 and 'gpio-dir' is 0.
+ *
+ * 0 = Low (Default)
+ * 1 = High
+ *
+ * - gpio-op-cfg : GPIO output configuration. Valid only when 'gpio-ctrl' is 1
+ * and 'gpio-dir' is 0.
+ *
+ * 0 = CMOS (Default)
+ * 1 = Open Drain
+ *
+ * - gpio-pol : GPIO output polarity select. Valid only when 'gpio-ctrl' is 1
+ * and 'gpio-dir' is 0.
+ *
+ * 0 = Non-inverted, Active High (Default)
+ * 1 = Inverted, Active Low
+ *
+ * - gpio-invert : Defines the polarity of the GPIO pin if configured
+ * as input.
+ *
+ * 0 = Not inverted (Default)
+ * 1 = Inverted
+ *
+ * - gpio-ctrl : Defines the function of the GPIO pin.
+ *
+ * GPIO1:
+ * 0 = High impedance input (Default)
+ * 1 = Pin acts as a GPIO, direction controlled by 'gpio-dir'
+ * 2 = Pin acts as MDSYNC, direction controlled by MDSYNC
+ * 3-7 = Reserved
+ *
+ * GPIO2:
+ * 0 = High impedance input (Default)
+ * 1 = Pin acts as a GPIO, direction controlled by 'gpio-dir'
+ * 2 = Pin acts as open drain INT
+ * 3 = Reserved
+ * 4 = Pin acts as push-pull output INT. Active low.
+ * 5 = Pin acts as push-pull output INT. Active high.
+ * 6,7 = Reserved
+ *
+ * GPIO3:
+ * 0 = High impedance input (Default)
+ * 1 = Pin acts as a GPIO, direction controlled by 'gpio-dir'
+ * 2-7 = Reserved
+ */
+#define CS35L45_NUM_GPIOS 0x3
+
+#endif /* DT_CS35L45_H */
diff --git a/include/dt-bindings/sound/cs48l32.h b/include/dt-bindings/sound/cs48l32.h
new file mode 100644
index 000000000000..4e82260fff67
--- /dev/null
+++ b/include/dt-bindings/sound/cs48l32.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Device Tree defines for CS48L32 DSP.
+ *
+ * Copyright (C) 2016-2018, 2022, 2025 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#ifndef DT_BINDINGS_SOUND_CS48L32_H
+#define DT_BINDINGS_SOUND_CS48L32_H
+
+/* Values for cirrus,in-type */
+#define CS48L32_IN_TYPE_DIFF 0
+#define CS48L32_IN_TYPE_SE 1
+
+/* Values for cirrus,pdm-sup */
+#define CS48L32_PDM_SUP_VOUT_MIC 0
+#define CS48L32_PDM_SUP_MICBIAS1 1
+
+#endif
diff --git a/include/dt-bindings/sound/microchip,pdmc.h b/include/dt-bindings/sound/microchip,pdmc.h
new file mode 100644
index 000000000000..96cde94ce74f
--- /dev/null
+++ b/include/dt-bindings/sound/microchip,pdmc.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_BINDINGS_MICROCHIP_PDMC_H__
+#define __DT_BINDINGS_MICROCHIP_PDMC_H__
+
+/* PDM microphone's pin placement */
+#define MCHP_PDMC_DS0 0
+#define MCHP_PDMC_DS1 1
+
+/* PDM microphone clock edge sampling */
+#define MCHP_PDMC_CLK_POSITIVE 0
+#define MCHP_PDMC_CLK_NEGATIVE 1
+
+#endif /* __DT_BINDINGS_MICROCHIP_PDMC_H__ */
diff --git a/include/dt-bindings/sound/qcom,lpass.h b/include/dt-bindings/sound/qcom,lpass.h
index 7b0b80b38699..a9404c3b8884 100644
--- a/include/dt-bindings/sound/qcom,lpass.h
+++ b/include/dt-bindings/sound/qcom,lpass.h
@@ -10,6 +10,37 @@
#define LPASS_DP_RX 5
+#define LPASS_CDC_DMA_RX0 6
+#define LPASS_CDC_DMA_RX1 7
+#define LPASS_CDC_DMA_RX2 8
+#define LPASS_CDC_DMA_RX3 9
+#define LPASS_CDC_DMA_RX4 10
+#define LPASS_CDC_DMA_RX5 11
+#define LPASS_CDC_DMA_RX6 12
+#define LPASS_CDC_DMA_RX7 13
+#define LPASS_CDC_DMA_RX8 14
+#define LPASS_CDC_DMA_RX9 15
+
+#define LPASS_CDC_DMA_TX0 16
+#define LPASS_CDC_DMA_TX1 17
+#define LPASS_CDC_DMA_TX2 18
+#define LPASS_CDC_DMA_TX3 19
+#define LPASS_CDC_DMA_TX4 20
+#define LPASS_CDC_DMA_TX5 21
+#define LPASS_CDC_DMA_TX6 22
+#define LPASS_CDC_DMA_TX7 23
+#define LPASS_CDC_DMA_TX8 24
+
+#define LPASS_CDC_DMA_VA_TX0 25
+#define LPASS_CDC_DMA_VA_TX1 26
+#define LPASS_CDC_DMA_VA_TX2 27
+#define LPASS_CDC_DMA_VA_TX3 28
+#define LPASS_CDC_DMA_VA_TX4 29
+#define LPASS_CDC_DMA_VA_TX5 30
+#define LPASS_CDC_DMA_VA_TX6 31
+#define LPASS_CDC_DMA_VA_TX7 32
+#define LPASS_CDC_DMA_VA_TX8 33
+
#define LPASS_MCLK0 0
#endif /* __DT_QCOM_LPASS_H */
diff --git a/include/dt-bindings/sound/qcom,q6afe.h b/include/dt-bindings/sound/qcom,q6afe.h
index f64b5d2e6efd..9d5d89cfabcf 100644
--- a/include/dt-bindings/sound/qcom,q6afe.h
+++ b/include/dt-bindings/sound/qcom,q6afe.h
@@ -2,205 +2,8 @@
#ifndef __DT_BINDINGS_Q6_AFE_H__
#define __DT_BINDINGS_Q6_AFE_H__
-/* Audio Front End (AFE) virtual ports IDs */
-#define HDMI_RX 1
-#define SLIMBUS_0_RX 2
-#define SLIMBUS_0_TX 3
-#define SLIMBUS_1_RX 4
-#define SLIMBUS_1_TX 5
-#define SLIMBUS_2_RX 6
-#define SLIMBUS_2_TX 7
-#define SLIMBUS_3_RX 8
-#define SLIMBUS_3_TX 9
-#define SLIMBUS_4_RX 10
-#define SLIMBUS_4_TX 11
-#define SLIMBUS_5_RX 12
-#define SLIMBUS_5_TX 13
-#define SLIMBUS_6_RX 14
-#define SLIMBUS_6_TX 15
-#define PRIMARY_MI2S_RX 16
-#define PRIMARY_MI2S_TX 17
-#define SECONDARY_MI2S_RX 18
-#define SECONDARY_MI2S_TX 19
-#define TERTIARY_MI2S_RX 20
-#define TERTIARY_MI2S_TX 21
-#define QUATERNARY_MI2S_RX 22
-#define QUATERNARY_MI2S_TX 23
-#define PRIMARY_TDM_RX_0 24
-#define PRIMARY_TDM_TX_0 25
-#define PRIMARY_TDM_RX_1 26
-#define PRIMARY_TDM_TX_1 27
-#define PRIMARY_TDM_RX_2 28
-#define PRIMARY_TDM_TX_2 29
-#define PRIMARY_TDM_RX_3 30
-#define PRIMARY_TDM_TX_3 31
-#define PRIMARY_TDM_RX_4 32
-#define PRIMARY_TDM_TX_4 33
-#define PRIMARY_TDM_RX_5 34
-#define PRIMARY_TDM_TX_5 35
-#define PRIMARY_TDM_RX_6 36
-#define PRIMARY_TDM_TX_6 37
-#define PRIMARY_TDM_RX_7 38
-#define PRIMARY_TDM_TX_7 39
-#define SECONDARY_TDM_RX_0 40
-#define SECONDARY_TDM_TX_0 41
-#define SECONDARY_TDM_RX_1 42
-#define SECONDARY_TDM_TX_1 43
-#define SECONDARY_TDM_RX_2 44
-#define SECONDARY_TDM_TX_2 45
-#define SECONDARY_TDM_RX_3 46
-#define SECONDARY_TDM_TX_3 47
-#define SECONDARY_TDM_RX_4 48
-#define SECONDARY_TDM_TX_4 49
-#define SECONDARY_TDM_RX_5 50
-#define SECONDARY_TDM_TX_5 51
-#define SECONDARY_TDM_RX_6 52
-#define SECONDARY_TDM_TX_6 53
-#define SECONDARY_TDM_RX_7 54
-#define SECONDARY_TDM_TX_7 55
-#define TERTIARY_TDM_RX_0 56
-#define TERTIARY_TDM_TX_0 57
-#define TERTIARY_TDM_RX_1 58
-#define TERTIARY_TDM_TX_1 59
-#define TERTIARY_TDM_RX_2 60
-#define TERTIARY_TDM_TX_2 61
-#define TERTIARY_TDM_RX_3 62
-#define TERTIARY_TDM_TX_3 63
-#define TERTIARY_TDM_RX_4 64
-#define TERTIARY_TDM_TX_4 65
-#define TERTIARY_TDM_RX_5 66
-#define TERTIARY_TDM_TX_5 67
-#define TERTIARY_TDM_RX_6 68
-#define TERTIARY_TDM_TX_6 69
-#define TERTIARY_TDM_RX_7 70
-#define TERTIARY_TDM_TX_7 71
-#define QUATERNARY_TDM_RX_0 72
-#define QUATERNARY_TDM_TX_0 73
-#define QUATERNARY_TDM_RX_1 74
-#define QUATERNARY_TDM_TX_1 75
-#define QUATERNARY_TDM_RX_2 76
-#define QUATERNARY_TDM_TX_2 77
-#define QUATERNARY_TDM_RX_3 78
-#define QUATERNARY_TDM_TX_3 79
-#define QUATERNARY_TDM_RX_4 80
-#define QUATERNARY_TDM_TX_4 81
-#define QUATERNARY_TDM_RX_5 82
-#define QUATERNARY_TDM_TX_5 83
-#define QUATERNARY_TDM_RX_6 84
-#define QUATERNARY_TDM_TX_6 85
-#define QUATERNARY_TDM_RX_7 86
-#define QUATERNARY_TDM_TX_7 87
-#define QUINARY_TDM_RX_0 88
-#define QUINARY_TDM_TX_0 89
-#define QUINARY_TDM_RX_1 90
-#define QUINARY_TDM_TX_1 91
-#define QUINARY_TDM_RX_2 92
-#define QUINARY_TDM_TX_2 93
-#define QUINARY_TDM_RX_3 94
-#define QUINARY_TDM_TX_3 95
-#define QUINARY_TDM_RX_4 96
-#define QUINARY_TDM_TX_4 97
-#define QUINARY_TDM_RX_5 98
-#define QUINARY_TDM_TX_5 99
-#define QUINARY_TDM_RX_6 100
-#define QUINARY_TDM_TX_6 101
-#define QUINARY_TDM_RX_7 102
-#define QUINARY_TDM_TX_7 103
-#define DISPLAY_PORT_RX 104
-#define WSA_CODEC_DMA_RX_0 105
-#define WSA_CODEC_DMA_TX_0 106
-#define WSA_CODEC_DMA_RX_1 107
-#define WSA_CODEC_DMA_TX_1 108
-#define WSA_CODEC_DMA_TX_2 109
-#define VA_CODEC_DMA_TX_0 110
-#define VA_CODEC_DMA_TX_1 111
-#define VA_CODEC_DMA_TX_2 112
-#define RX_CODEC_DMA_RX_0 113
-#define TX_CODEC_DMA_TX_0 114
-#define RX_CODEC_DMA_RX_1 115
-#define TX_CODEC_DMA_TX_1 116
-#define RX_CODEC_DMA_RX_2 117
-#define TX_CODEC_DMA_TX_2 118
-#define RX_CODEC_DMA_RX_3 119
-#define TX_CODEC_DMA_TX_3 120
-#define RX_CODEC_DMA_RX_4 121
-#define TX_CODEC_DMA_TX_4 122
-#define RX_CODEC_DMA_RX_5 123
-#define TX_CODEC_DMA_TX_5 124
-#define RX_CODEC_DMA_RX_6 125
-#define RX_CODEC_DMA_RX_7 126
+/* This file exists due to backward compatibility reasons, Please do not DELETE! */
-#define LPASS_CLK_ID_PRI_MI2S_IBIT 1
-#define LPASS_CLK_ID_PRI_MI2S_EBIT 2
-#define LPASS_CLK_ID_SEC_MI2S_IBIT 3
-#define LPASS_CLK_ID_SEC_MI2S_EBIT 4
-#define LPASS_CLK_ID_TER_MI2S_IBIT 5
-#define LPASS_CLK_ID_TER_MI2S_EBIT 6
-#define LPASS_CLK_ID_QUAD_MI2S_IBIT 7
-#define LPASS_CLK_ID_QUAD_MI2S_EBIT 8
-#define LPASS_CLK_ID_SPEAKER_I2S_IBIT 9
-#define LPASS_CLK_ID_SPEAKER_I2S_EBIT 10
-#define LPASS_CLK_ID_SPEAKER_I2S_OSR 11
-#define LPASS_CLK_ID_QUI_MI2S_IBIT 12
-#define LPASS_CLK_ID_QUI_MI2S_EBIT 13
-#define LPASS_CLK_ID_SEN_MI2S_IBIT 14
-#define LPASS_CLK_ID_SEN_MI2S_EBIT 15
-#define LPASS_CLK_ID_INT0_MI2S_IBIT 16
-#define LPASS_CLK_ID_INT1_MI2S_IBIT 17
-#define LPASS_CLK_ID_INT2_MI2S_IBIT 18
-#define LPASS_CLK_ID_INT3_MI2S_IBIT 19
-#define LPASS_CLK_ID_INT4_MI2S_IBIT 20
-#define LPASS_CLK_ID_INT5_MI2S_IBIT 21
-#define LPASS_CLK_ID_INT6_MI2S_IBIT 22
-#define LPASS_CLK_ID_QUI_MI2S_OSR 23
-#define LPASS_CLK_ID_PRI_PCM_IBIT 24
-#define LPASS_CLK_ID_PRI_PCM_EBIT 25
-#define LPASS_CLK_ID_SEC_PCM_IBIT 26
-#define LPASS_CLK_ID_SEC_PCM_EBIT 27
-#define LPASS_CLK_ID_TER_PCM_IBIT 28
-#define LPASS_CLK_ID_TER_PCM_EBIT 29
-#define LPASS_CLK_ID_QUAD_PCM_IBIT 30
-#define LPASS_CLK_ID_QUAD_PCM_EBIT 31
-#define LPASS_CLK_ID_QUIN_PCM_IBIT 32
-#define LPASS_CLK_ID_QUIN_PCM_EBIT 33
-#define LPASS_CLK_ID_QUI_PCM_OSR 34
-#define LPASS_CLK_ID_PRI_TDM_IBIT 35
-#define LPASS_CLK_ID_PRI_TDM_EBIT 36
-#define LPASS_CLK_ID_SEC_TDM_IBIT 37
-#define LPASS_CLK_ID_SEC_TDM_EBIT 38
-#define LPASS_CLK_ID_TER_TDM_IBIT 39
-#define LPASS_CLK_ID_TER_TDM_EBIT 40
-#define LPASS_CLK_ID_QUAD_TDM_IBIT 41
-#define LPASS_CLK_ID_QUAD_TDM_EBIT 42
-#define LPASS_CLK_ID_QUIN_TDM_IBIT 43
-#define LPASS_CLK_ID_QUIN_TDM_EBIT 44
-#define LPASS_CLK_ID_QUIN_TDM_OSR 45
-#define LPASS_CLK_ID_MCLK_1 46
-#define LPASS_CLK_ID_MCLK_2 47
-#define LPASS_CLK_ID_MCLK_3 48
-#define LPASS_CLK_ID_MCLK_4 49
-#define LPASS_CLK_ID_INTERNAL_DIGITAL_CODEC_CORE 50
-#define LPASS_CLK_ID_INT_MCLK_0 51
-#define LPASS_CLK_ID_INT_MCLK_1 52
-#define LPASS_CLK_ID_MCLK_5 53
-#define LPASS_CLK_ID_WSA_CORE_MCLK 54
-#define LPASS_CLK_ID_WSA_CORE_NPL_MCLK 55
-#define LPASS_CLK_ID_VA_CORE_MCLK 56
-#define LPASS_CLK_ID_TX_CORE_MCLK 57
-#define LPASS_CLK_ID_TX_CORE_NPL_MCLK 58
-#define LPASS_CLK_ID_RX_CORE_MCLK 59
-#define LPASS_CLK_ID_RX_CORE_NPL_MCLK 60
-#define LPASS_CLK_ID_VA_CORE_2X_MCLK 61
-
-#define LPASS_HW_AVTIMER_VOTE 101
-#define LPASS_HW_MACRO_VOTE 102
-#define LPASS_HW_DCODEC_VOTE 103
-
-#define Q6AFE_MAX_CLK_ID 104
-
-#define LPASS_CLK_ATTRIBUTE_INVALID 0x0
-#define LPASS_CLK_ATTRIBUTE_COUPLE_NO 0x1
-#define LPASS_CLK_ATTRIBUTE_COUPLE_DIVIDEND 0x2
-#define LPASS_CLK_ATTRIBUTE_COUPLE_DIVISOR 0x3
+#include <dt-bindings/sound/qcom,q6dsp-lpass-ports.h>
#endif /* __DT_BINDINGS_Q6_AFE_H__ */
diff --git a/include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h b/include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h
new file mode 100644
index 000000000000..6d1ce7f5da51
--- /dev/null
+++ b/include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h
@@ -0,0 +1,235 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_BINDINGS_Q6_AUDIO_PORTS_H__
+#define __DT_BINDINGS_Q6_AUDIO_PORTS_H__
+
+/* LPASS Audio virtual ports IDs */
+#define HDMI_RX 1
+#define SLIMBUS_0_RX 2
+#define SLIMBUS_0_TX 3
+#define SLIMBUS_1_RX 4
+#define SLIMBUS_1_TX 5
+#define SLIMBUS_2_RX 6
+#define SLIMBUS_2_TX 7
+#define SLIMBUS_3_RX 8
+#define SLIMBUS_3_TX 9
+#define SLIMBUS_4_RX 10
+#define SLIMBUS_4_TX 11
+#define SLIMBUS_5_RX 12
+#define SLIMBUS_5_TX 13
+#define SLIMBUS_6_RX 14
+#define SLIMBUS_6_TX 15
+#define PRIMARY_MI2S_RX 16
+#define PRIMARY_MI2S_TX 17
+#define SECONDARY_MI2S_RX 18
+#define SECONDARY_MI2S_TX 19
+#define TERTIARY_MI2S_RX 20
+#define TERTIARY_MI2S_TX 21
+#define QUATERNARY_MI2S_RX 22
+#define QUATERNARY_MI2S_TX 23
+#define PRIMARY_TDM_RX_0 24
+#define PRIMARY_TDM_TX_0 25
+#define PRIMARY_TDM_RX_1 26
+#define PRIMARY_TDM_TX_1 27
+#define PRIMARY_TDM_RX_2 28
+#define PRIMARY_TDM_TX_2 29
+#define PRIMARY_TDM_RX_3 30
+#define PRIMARY_TDM_TX_3 31
+#define PRIMARY_TDM_RX_4 32
+#define PRIMARY_TDM_TX_4 33
+#define PRIMARY_TDM_RX_5 34
+#define PRIMARY_TDM_TX_5 35
+#define PRIMARY_TDM_RX_6 36
+#define PRIMARY_TDM_TX_6 37
+#define PRIMARY_TDM_RX_7 38
+#define PRIMARY_TDM_TX_7 39
+#define SECONDARY_TDM_RX_0 40
+#define SECONDARY_TDM_TX_0 41
+#define SECONDARY_TDM_RX_1 42
+#define SECONDARY_TDM_TX_1 43
+#define SECONDARY_TDM_RX_2 44
+#define SECONDARY_TDM_TX_2 45
+#define SECONDARY_TDM_RX_3 46
+#define SECONDARY_TDM_TX_3 47
+#define SECONDARY_TDM_RX_4 48
+#define SECONDARY_TDM_TX_4 49
+#define SECONDARY_TDM_RX_5 50
+#define SECONDARY_TDM_TX_5 51
+#define SECONDARY_TDM_RX_6 52
+#define SECONDARY_TDM_TX_6 53
+#define SECONDARY_TDM_RX_7 54
+#define SECONDARY_TDM_TX_7 55
+#define TERTIARY_TDM_RX_0 56
+#define TERTIARY_TDM_TX_0 57
+#define TERTIARY_TDM_RX_1 58
+#define TERTIARY_TDM_TX_1 59
+#define TERTIARY_TDM_RX_2 60
+#define TERTIARY_TDM_TX_2 61
+#define TERTIARY_TDM_RX_3 62
+#define TERTIARY_TDM_TX_3 63
+#define TERTIARY_TDM_RX_4 64
+#define TERTIARY_TDM_TX_4 65
+#define TERTIARY_TDM_RX_5 66
+#define TERTIARY_TDM_TX_5 67
+#define TERTIARY_TDM_RX_6 68
+#define TERTIARY_TDM_TX_6 69
+#define TERTIARY_TDM_RX_7 70
+#define TERTIARY_TDM_TX_7 71
+#define QUATERNARY_TDM_RX_0 72
+#define QUATERNARY_TDM_TX_0 73
+#define QUATERNARY_TDM_RX_1 74
+#define QUATERNARY_TDM_TX_1 75
+#define QUATERNARY_TDM_RX_2 76
+#define QUATERNARY_TDM_TX_2 77
+#define QUATERNARY_TDM_RX_3 78
+#define QUATERNARY_TDM_TX_3 79
+#define QUATERNARY_TDM_RX_4 80
+#define QUATERNARY_TDM_TX_4 81
+#define QUATERNARY_TDM_RX_5 82
+#define QUATERNARY_TDM_TX_5 83
+#define QUATERNARY_TDM_RX_6 84
+#define QUATERNARY_TDM_TX_6 85
+#define QUATERNARY_TDM_RX_7 86
+#define QUATERNARY_TDM_TX_7 87
+#define QUINARY_TDM_RX_0 88
+#define QUINARY_TDM_TX_0 89
+#define QUINARY_TDM_RX_1 90
+#define QUINARY_TDM_TX_1 91
+#define QUINARY_TDM_RX_2 92
+#define QUINARY_TDM_TX_2 93
+#define QUINARY_TDM_RX_3 94
+#define QUINARY_TDM_TX_3 95
+#define QUINARY_TDM_RX_4 96
+#define QUINARY_TDM_TX_4 97
+#define QUINARY_TDM_RX_5 98
+#define QUINARY_TDM_TX_5 99
+#define QUINARY_TDM_RX_6 100
+#define QUINARY_TDM_TX_6 101
+#define QUINARY_TDM_RX_7 102
+#define QUINARY_TDM_TX_7 103
+#define DISPLAY_PORT_RX 104
+#define WSA_CODEC_DMA_RX_0 105
+#define WSA_CODEC_DMA_TX_0 106
+#define WSA_CODEC_DMA_RX_1 107
+#define WSA_CODEC_DMA_TX_1 108
+#define WSA_CODEC_DMA_TX_2 109
+#define VA_CODEC_DMA_TX_0 110
+#define VA_CODEC_DMA_TX_1 111
+#define VA_CODEC_DMA_TX_2 112
+#define RX_CODEC_DMA_RX_0 113
+#define TX_CODEC_DMA_TX_0 114
+#define RX_CODEC_DMA_RX_1 115
+#define TX_CODEC_DMA_TX_1 116
+#define RX_CODEC_DMA_RX_2 117
+#define TX_CODEC_DMA_TX_2 118
+#define RX_CODEC_DMA_RX_3 119
+#define TX_CODEC_DMA_TX_3 120
+#define RX_CODEC_DMA_RX_4 121
+#define TX_CODEC_DMA_TX_4 122
+#define RX_CODEC_DMA_RX_5 123
+#define TX_CODEC_DMA_TX_5 124
+#define RX_CODEC_DMA_RX_6 125
+#define RX_CODEC_DMA_RX_7 126
+#define QUINARY_MI2S_RX 127
+#define QUINARY_MI2S_TX 128
+#define DISPLAY_PORT_RX_0 DISPLAY_PORT_RX
+#define DISPLAY_PORT_RX_1 129
+#define DISPLAY_PORT_RX_2 130
+#define DISPLAY_PORT_RX_3 131
+#define DISPLAY_PORT_RX_4 132
+#define DISPLAY_PORT_RX_5 133
+#define DISPLAY_PORT_RX_6 134
+#define DISPLAY_PORT_RX_7 135
+#define USB_RX 136
+
+#define LPASS_CLK_ID_PRI_MI2S_IBIT 1
+#define LPASS_CLK_ID_PRI_MI2S_EBIT 2
+#define LPASS_CLK_ID_SEC_MI2S_IBIT 3
+#define LPASS_CLK_ID_SEC_MI2S_EBIT 4
+#define LPASS_CLK_ID_TER_MI2S_IBIT 5
+#define LPASS_CLK_ID_TER_MI2S_EBIT 6
+#define LPASS_CLK_ID_QUAD_MI2S_IBIT 7
+#define LPASS_CLK_ID_QUAD_MI2S_EBIT 8
+#define LPASS_CLK_ID_SPEAKER_I2S_IBIT 9
+#define LPASS_CLK_ID_SPEAKER_I2S_EBIT 10
+#define LPASS_CLK_ID_SPEAKER_I2S_OSR 11
+#define LPASS_CLK_ID_QUI_MI2S_IBIT 12
+#define LPASS_CLK_ID_QUI_MI2S_EBIT 13
+#define LPASS_CLK_ID_SEN_MI2S_IBIT 14
+#define LPASS_CLK_ID_SEN_MI2S_EBIT 15
+#define LPASS_CLK_ID_INT0_MI2S_IBIT 16
+#define LPASS_CLK_ID_INT1_MI2S_IBIT 17
+#define LPASS_CLK_ID_INT2_MI2S_IBIT 18
+#define LPASS_CLK_ID_INT3_MI2S_IBIT 19
+#define LPASS_CLK_ID_INT4_MI2S_IBIT 20
+#define LPASS_CLK_ID_INT5_MI2S_IBIT 21
+#define LPASS_CLK_ID_INT6_MI2S_IBIT 22
+#define LPASS_CLK_ID_QUI_MI2S_OSR 23
+#define LPASS_CLK_ID_PRI_PCM_IBIT 24
+#define LPASS_CLK_ID_PRI_PCM_EBIT 25
+#define LPASS_CLK_ID_SEC_PCM_IBIT 26
+#define LPASS_CLK_ID_SEC_PCM_EBIT 27
+#define LPASS_CLK_ID_TER_PCM_IBIT 28
+#define LPASS_CLK_ID_TER_PCM_EBIT 29
+#define LPASS_CLK_ID_QUAD_PCM_IBIT 30
+#define LPASS_CLK_ID_QUAD_PCM_EBIT 31
+#define LPASS_CLK_ID_QUIN_PCM_IBIT 32
+#define LPASS_CLK_ID_QUIN_PCM_EBIT 33
+#define LPASS_CLK_ID_QUI_PCM_OSR 34
+#define LPASS_CLK_ID_PRI_TDM_IBIT 35
+#define LPASS_CLK_ID_PRI_TDM_EBIT 36
+#define LPASS_CLK_ID_SEC_TDM_IBIT 37
+#define LPASS_CLK_ID_SEC_TDM_EBIT 38
+#define LPASS_CLK_ID_TER_TDM_IBIT 39
+#define LPASS_CLK_ID_TER_TDM_EBIT 40
+#define LPASS_CLK_ID_QUAD_TDM_IBIT 41
+#define LPASS_CLK_ID_QUAD_TDM_EBIT 42
+#define LPASS_CLK_ID_QUIN_TDM_IBIT 43
+#define LPASS_CLK_ID_QUIN_TDM_EBIT 44
+#define LPASS_CLK_ID_QUIN_TDM_OSR 45
+#define LPASS_CLK_ID_MCLK_1 46
+#define LPASS_CLK_ID_MCLK_2 47
+#define LPASS_CLK_ID_MCLK_3 48
+#define LPASS_CLK_ID_MCLK_4 49
+#define LPASS_CLK_ID_INTERNAL_DIGITAL_CODEC_CORE 50
+#define LPASS_CLK_ID_INT_MCLK_0 51
+#define LPASS_CLK_ID_INT_MCLK_1 52
+#define LPASS_CLK_ID_MCLK_5 53
+#define LPASS_CLK_ID_WSA_CORE_MCLK 54
+#define LPASS_CLK_ID_WSA_CORE_NPL_MCLK 55
+#define LPASS_CLK_ID_VA_CORE_MCLK 56
+#define LPASS_CLK_ID_TX_CORE_MCLK 57
+#define LPASS_CLK_ID_TX_CORE_NPL_MCLK 58
+#define LPASS_CLK_ID_RX_CORE_MCLK 59
+#define LPASS_CLK_ID_RX_CORE_NPL_MCLK 60
+#define LPASS_CLK_ID_VA_CORE_2X_MCLK 61
+/* Clock ID for MCLK for WSA2 core */
+#define LPASS_CLK_ID_WSA2_CORE_MCLK 62
+/* Clock ID for NPL MCLK for WSA2 core */
+#define LPASS_CLK_ID_WSA2_CORE_2X_MCLK 63
+/* Clock ID for RX Core TX MCLK */
+#define LPASS_CLK_ID_RX_CORE_TX_MCLK 64
+/* Clock ID for RX CORE TX 2X MCLK */
+#define LPASS_CLK_ID_RX_CORE_TX_2X_MCLK 65
+/* Clock ID for WSA core TX MCLK */
+#define LPASS_CLK_ID_WSA_CORE_TX_MCLK 66
+/* Clock ID for WSA core TX 2X MCLK */
+#define LPASS_CLK_ID_WSA_CORE_TX_2X_MCLK 67
+/* Clock ID for WSA2 core TX MCLK */
+#define LPASS_CLK_ID_WSA2_CORE_TX_MCLK 68
+/* Clock ID for WSA2 core TX 2X MCLK */
+#define LPASS_CLK_ID_WSA2_CORE_TX_2X_MCLK 69
+/* Clock ID for RX CORE MCLK2 2X MCLK */
+#define LPASS_CLK_ID_RX_CORE_MCLK2_2X_MCLK 70
+
+#define LPASS_HW_AVTIMER_VOTE 101
+#define LPASS_HW_MACRO_VOTE 102
+#define LPASS_HW_DCODEC_VOTE 103
+
+#define Q6AFE_MAX_CLK_ID 104
+
+#define LPASS_CLK_ATTRIBUTE_INVALID 0x0
+#define LPASS_CLK_ATTRIBUTE_COUPLE_NO 0x1
+#define LPASS_CLK_ATTRIBUTE_COUPLE_DIVIDEND 0x2
+#define LPASS_CLK_ATTRIBUTE_COUPLE_DIVISOR 0x3
+
+#endif /* __DT_BINDINGS_Q6_AUDIO_PORTS_H__ */
diff --git a/include/dt-bindings/sound/qcom,wcd9335.h b/include/dt-bindings/sound/qcom,wcd9335.h
new file mode 100644
index 000000000000..4fc68aeb9e04
--- /dev/null
+++ b/include/dt-bindings/sound/qcom,wcd9335.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef __DT_SOUND_QCOM_WCD9335_H
+#define __DT_SOUND_QCOM_WCD9335_H
+
+#define AIF1_PB 0
+#define AIF1_CAP 1
+#define AIF2_PB 2
+#define AIF2_CAP 3
+#define AIF3_PB 4
+#define AIF3_CAP 5
+#define AIF4_PB 6
+
+#endif
diff --git a/include/dt-bindings/sound/qcom,wcd934x.h b/include/dt-bindings/sound/qcom,wcd934x.h
new file mode 100644
index 000000000000..8b30d34fcc87
--- /dev/null
+++ b/include/dt-bindings/sound/qcom,wcd934x.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef __DT_SOUND_QCOM_WCD934x_H
+#define __DT_SOUND_QCOM_WCD934x_H
+
+#define AIF1_PB 0
+#define AIF1_CAP 1
+#define AIF2_PB 2
+#define AIF2_CAP 3
+#define AIF3_PB 4
+#define AIF3_CAP 5
+#define AIF4_PB 6
+#define AIF4_VIFEED 7
+#define AIF4_MAD_TX 8
+
+#endif
diff --git a/include/dt-bindings/sound/rt5640.h b/include/dt-bindings/sound/rt5640.h
index 154c9b4414f2..655f6946388a 100644
--- a/include/dt-bindings/sound/rt5640.h
+++ b/include/dt-bindings/sound/rt5640.h
@@ -16,6 +16,7 @@
#define RT5640_JD_SRC_GPIO2 4
#define RT5640_JD_SRC_GPIO3 5
#define RT5640_JD_SRC_GPIO4 6
+#define RT5640_JD_SRC_HDA_HEADER 7
#define RT5640_OVCD_SF_0P5 0
#define RT5640_OVCD_SF_0P75 1
diff --git a/include/dt-bindings/sound/tlv320adc3xxx.h b/include/dt-bindings/sound/tlv320adc3xxx.h
new file mode 100644
index 000000000000..ec988439da20
--- /dev/null
+++ b/include/dt-bindings/sound/tlv320adc3xxx.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Devicetree bindings definitions for tlv320adc3xxx driver.
+ *
+ * Copyright (C) 2021 Axis Communications AB
+ */
+#ifndef __DT_TLV320ADC3XXX_H
+#define __DT_TLV320ADC3XXX_H
+
+#define ADC3XXX_GPIO_DISABLED 0 /* I/O buffers powered down */
+#define ADC3XXX_GPIO_INPUT 1 /* Various non-GPIO inputs */
+#define ADC3XXX_GPIO_GPI 2 /* General purpose input */
+#define ADC3XXX_GPIO_GPO 3 /* General purpose output */
+#define ADC3XXX_GPIO_CLKOUT 4 /* Source set in reg. CLKOUT_MUX */
+#define ADC3XXX_GPIO_INT1 5 /* INT1 output */
+#define ADC3XXX_GPIO_INT2 6 /* INT2 output */
+/* value 7 is reserved */
+#define ADC3XXX_GPIO_SECONDARY_BCLK 8 /* Codec interface secondary BCLK */
+#define ADC3XXX_GPIO_SECONDARY_WCLK 9 /* Codec interface secondary WCLK */
+#define ADC3XXX_GPIO_ADC_MOD_CLK 10 /* Clock output for digital mics */
+/* values 11-15 reserved */
+
+#define ADC3XXX_MICBIAS_OFF 0 /* Micbias pin powered off */
+#define ADC3XXX_MICBIAS_2_0V 1 /* Micbias pin set to 2.0V */
+#define ADC3XXX_MICBIAS_2_5V 2 /* Micbias pin set to 2.5V */
+#define ADC3XXX_MICBIAS_AVDD 3 /* Use AVDD voltage for micbias pin */
+
+#endif /* __DT_TLV320ADC3XXX_H */
diff --git a/include/dt-bindings/sound/tlv320aic31xx-micbias.h b/include/dt-bindings/sound/tlv320aic31xx-micbias.h
deleted file mode 100644
index c6895a18a455..000000000000
--- a/include/dt-bindings/sound/tlv320aic31xx-micbias.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DT_TLV320AIC31XX_MICBIAS_H
-#define __DT_TLV320AIC31XX_MICBIAS_H
-
-#define MICBIAS_2_0V 1
-#define MICBIAS_2_5V 2
-#define MICBIAS_AVDDV 3
-
-#endif /* __DT_TLV320AIC31XX_MICBIAS_H */
diff --git a/include/dt-bindings/sound/tlv320aic31xx.h b/include/dt-bindings/sound/tlv320aic31xx.h
new file mode 100644
index 000000000000..4a80238ab250
--- /dev/null
+++ b/include/dt-bindings/sound/tlv320aic31xx.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_TLV320AIC31XX_H
+#define __DT_TLV320AIC31XX_H
+
+#define MICBIAS_2_0V 1
+#define MICBIAS_2_5V 2
+#define MICBIAS_AVDDV 3
+
+#define PLL_CLKIN_MCLK 0x00
+#define PLL_CLKIN_BCLK 0x01
+#define PLL_CLKIN_GPIO1 0x02
+#define PLL_CLKIN_DIN 0x03
+
+#endif /* __DT_TLV320AIC31XX_H */
diff --git a/include/dt-bindings/thermal/mediatek,lvts-thermal.h b/include/dt-bindings/thermal/mediatek,lvts-thermal.h
new file mode 100644
index 000000000000..ddc7302a510a
--- /dev/null
+++ b/include/dt-bindings/thermal/mediatek,lvts-thermal.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023 MediaTek Inc.
+ * Author: Balsam CHIHI <bchihi@baylibre.com>
+ */
+
+#ifndef __MEDIATEK_LVTS_DT_H
+#define __MEDIATEK_LVTS_DT_H
+
+#define MT7988_CPU_0 0
+#define MT7988_CPU_1 1
+#define MT7988_ETH2P5G_0 2
+#define MT7988_ETH2P5G_1 3
+#define MT7988_TOPS_0 4
+#define MT7988_TOPS_1 5
+#define MT7988_ETHWARP_0 6
+#define MT7988_ETHWARP_1 7
+
+#define MT8186_LITTLE_CPU0 0
+#define MT8186_LITTLE_CPU1 1
+#define MT8186_LITTLE_CPU2 2
+#define MT8186_CAM 3
+#define MT8186_BIG_CPU0 4
+#define MT8186_BIG_CPU1 5
+#define MT8186_NNA 6
+#define MT8186_ADSP 7
+#define MT8186_GPU 8
+
+#define MT8188_MCU_LITTLE_CPU0 0
+#define MT8188_MCU_LITTLE_CPU1 1
+#define MT8188_MCU_LITTLE_CPU2 2
+#define MT8188_MCU_LITTLE_CPU3 3
+#define MT8188_MCU_BIG_CPU0 4
+#define MT8188_MCU_BIG_CPU1 5
+
+#define MT8188_AP_APU 0
+#define MT8188_AP_GPU0 1
+#define MT8188_AP_GPU1 2
+#define MT8188_AP_ADSP 3
+#define MT8188_AP_VDO 4
+#define MT8188_AP_INFRA 5
+#define MT8188_AP_CAM1 6
+#define MT8188_AP_CAM2 7
+
+#define MT8195_MCU_BIG_CPU0 0
+#define MT8195_MCU_BIG_CPU1 1
+#define MT8195_MCU_BIG_CPU2 2
+#define MT8195_MCU_BIG_CPU3 3
+#define MT8195_MCU_LITTLE_CPU0 4
+#define MT8195_MCU_LITTLE_CPU1 5
+#define MT8195_MCU_LITTLE_CPU2 6
+#define MT8195_MCU_LITTLE_CPU3 7
+
+#define MT8195_AP_VPU0 8
+#define MT8195_AP_VPU1 9
+#define MT8195_AP_GPU0 10
+#define MT8195_AP_GPU1 11
+#define MT8195_AP_VDEC 12
+#define MT8195_AP_IMG 13
+#define MT8195_AP_INFRA 14
+#define MT8195_AP_CAM0 15
+#define MT8195_AP_CAM1 16
+
+#define MT8192_MCU_BIG_CPU0 0
+#define MT8192_MCU_BIG_CPU1 1
+#define MT8192_MCU_BIG_CPU2 2
+#define MT8192_MCU_BIG_CPU3 3
+#define MT8192_MCU_LITTLE_CPU0 4
+#define MT8192_MCU_LITTLE_CPU1 5
+#define MT8192_MCU_LITTLE_CPU2 6
+#define MT8192_MCU_LITTLE_CPU3 7
+
+#define MT8192_AP_VPU0 8
+#define MT8192_AP_VPU1 9
+#define MT8192_AP_GPU0 10
+#define MT8192_AP_GPU1 11
+#define MT8192_AP_INFRA 12
+#define MT8192_AP_CAM 13
+#define MT8192_AP_MD0 14
+#define MT8192_AP_MD1 15
+#define MT8192_AP_MD2 16
+
+#endif /* __MEDIATEK_LVTS_DT_H */
diff --git a/include/dt-bindings/thermal/tegra114-soctherm.h b/include/dt-bindings/thermal/tegra114-soctherm.h
new file mode 100644
index 000000000000..b766a61cd1ce
--- /dev/null
+++ b/include/dt-bindings/thermal/tegra114-soctherm.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * This header provides constants for binding nvidia,tegra114-soctherm.
+ */
+
+#ifndef _DT_BINDINGS_THERMAL_TEGRA114_SOCTHERM_H
+#define _DT_BINDINGS_THERMAL_TEGRA114_SOCTHERM_H
+
+#define TEGRA114_SOCTHERM_SENSOR_CPU 0
+#define TEGRA114_SOCTHERM_SENSOR_MEM 1
+#define TEGRA114_SOCTHERM_SENSOR_GPU 2
+#define TEGRA114_SOCTHERM_SENSOR_PLLX 3
+
+#define TEGRA114_SOCTHERM_THROT_LEVEL_NONE 0
+#define TEGRA114_SOCTHERM_THROT_LEVEL_LOW 1
+#define TEGRA114_SOCTHERM_THROT_LEVEL_MED 2
+#define TEGRA114_SOCTHERM_THROT_LEVEL_HIGH 3
+
+#endif
diff --git a/include/dt-bindings/thermal/tegra234-bpmp-thermal.h b/include/dt-bindings/thermal/tegra234-bpmp-thermal.h
new file mode 100644
index 000000000000..934787950932
--- /dev/null
+++ b/include/dt-bindings/thermal/tegra234-bpmp-thermal.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides constants for binding nvidia,tegra234-bpmp-thermal.
+ */
+
+#ifndef _DT_BINDINGS_THERMAL_TEGRA234_BPMP_THERMAL_H
+#define _DT_BINDINGS_THERMAL_TEGRA234_BPMP_THERMAL_H
+
+#define TEGRA234_BPMP_THERMAL_ZONE_CPU 0
+#define TEGRA234_BPMP_THERMAL_ZONE_GPU 1
+#define TEGRA234_BPMP_THERMAL_ZONE_CV0 2
+#define TEGRA234_BPMP_THERMAL_ZONE_CV1 3
+#define TEGRA234_BPMP_THERMAL_ZONE_CV2 4
+#define TEGRA234_BPMP_THERMAL_ZONE_SOC0 5
+#define TEGRA234_BPMP_THERMAL_ZONE_SOC1 6
+#define TEGRA234_BPMP_THERMAL_ZONE_SOC2 7
+#define TEGRA234_BPMP_THERMAL_ZONE_TJ_MAX 8
+
+#endif
diff --git a/include/dt-bindings/usb/pd.h b/include/dt-bindings/usb/pd.h
index fef3ef65967f..e6526b138174 100644
--- a/include/dt-bindings/usb/pd.h
+++ b/include/dt-bindings/usb/pd.h
@@ -106,6 +106,10 @@
* <20:16> :: Reserved, Shall be set to zero
* <15:0> :: USB-IF assigned VID for this cable vendor
*/
+
+/* PD Rev2.0 definition */
+#define IDH_PTYPE_UNDEF 0
+
/* SOP Product Type (UFP) */
#define IDH_PTYPE_NOT_UFP 0
#define IDH_PTYPE_HUB 1
@@ -163,10 +167,10 @@
#define UFP_VDO_VER1_2 2
/* Device Capability */
-#define DEV_USB2_CAPABLE BIT(0)
-#define DEV_USB2_BILLBOARD BIT(1)
-#define DEV_USB3_CAPABLE BIT(2)
-#define DEV_USB4_CAPABLE BIT(3)
+#define DEV_USB2_CAPABLE (1 << 0)
+#define DEV_USB2_BILLBOARD (1 << 1)
+#define DEV_USB3_CAPABLE (1 << 2)
+#define DEV_USB4_CAPABLE (1 << 3)
/* Connector Type */
#define UFP_RECEPTACLE 2
@@ -191,9 +195,9 @@
/* Alternate Modes */
#define UFP_ALTMODE_NOT_SUPP 0
-#define UFP_ALTMODE_TBT3 BIT(0)
-#define UFP_ALTMODE_RECFG BIT(1)
-#define UFP_ALTMODE_NO_RECFG BIT(2)
+#define UFP_ALTMODE_TBT3 (1 << 0)
+#define UFP_ALTMODE_RECFG (1 << 1)
+#define UFP_ALTMODE_NO_RECFG (1 << 2)
/* USB Highest Speed */
#define UFP_USB2_ONLY 0
@@ -217,9 +221,9 @@
* <4:0> :: Port number
*/
#define DFP_VDO_VER1_1 1
-#define HOST_USB2_CAPABLE BIT(0)
-#define HOST_USB3_CAPABLE BIT(1)
-#define HOST_USB4_CAPABLE BIT(2)
+#define HOST_USB2_CAPABLE (1 << 0)
+#define HOST_USB3_CAPABLE (1 << 1)
+#define HOST_USB4_CAPABLE (1 << 2)
#define DFP_RECEPTACLE 2
#define DFP_CAPTIVE 3
@@ -228,7 +232,25 @@
| ((pnum) & 0x1f))
/*
- * Passive Cable VDO
+ * Cable VDO (for both Passive and Active Cable VDO in PD Rev2.0)
+ * ---------
+ * <31:28> :: Cable HW version
+ * <27:24> :: Cable FW version
+ * <23:20> :: Reserved, Shall be set to zero
+ * <19:18> :: type-C to Type-A/B/C/Captive (00b == A, 01 == B, 10 == C, 11 == Captive)
+ * <17> :: Reserved, Shall be set to zero
+ * <16:13> :: cable latency (0001 == <10ns(~1m length))
+ * <12:11> :: cable termination type (11b == both ends active VCONN req)
+ * <10> :: SSTX1 Directionality support (0b == fixed, 1b == cfgable)
+ * <9> :: SSTX2 Directionality support
+ * <8> :: SSRX1 Directionality support
+ * <7> :: SSRX2 Directionality support
+ * <6:5> :: Vbus current handling capability (01b == 3A, 10b == 5A)
+ * <4> :: Vbus through cable (0b == no, 1b == yes)
+ * <3> :: SOP" controller present? (0b == no, 1b == yes)
+ * <2:0> :: USB SS Signaling support
+ *
+ * Passive Cable VDO (PD Rev3.0+)
* ---------
* <31:28> :: Cable HW version
* <27:24> :: Cable FW version
@@ -244,7 +266,7 @@
* <4:3> :: Reserved, Shall be set to zero
* <2:0> :: USB highest speed
*
- * Active Cable VDO 1
+ * Active Cable VDO 1 (PD Rev3.0+)
* ---------
* <31:28> :: Cable HW version
* <27:24> :: Cable FW version
@@ -266,7 +288,9 @@
#define CABLE_VDO_VER1_0 0
#define CABLE_VDO_VER1_3 3
-/* Connector Type */
+/* Connector Type (_ATYPE and _BTYPE are for PD Rev2.0 only) */
+#define CABLE_ATYPE 0
+#define CABLE_BTYPE 1
#define CABLE_CTYPE 2
#define CABLE_CAPTIVE 3
@@ -303,12 +327,22 @@
#define CABLE_CURR_3A 1
#define CABLE_CURR_5A 2
+/* USB SuperSpeed Signaling Support (PD Rev2.0) */
+#define CABLE_USBSS_U2_ONLY 0
+#define CABLE_USBSS_U31_GEN1 1
+#define CABLE_USBSS_U31_GEN2 2
+
/* USB Highest Speed */
#define CABLE_USB2_ONLY 0
#define CABLE_USB32_GEN1 1
#define CABLE_USB32_4_GEN2 2
#define CABLE_USB4_GEN3 3
+#define VDO_CABLE(hw, fw, cbl, lat, term, tx1d, tx2d, rx1d, rx2d, cur, vps, sopp, usbss) \
+ (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 | ((cbl) & 0x3) << 18 \
+ | ((lat) & 0x7) << 13 | ((term) & 0x3) << 11 | (tx1d) << 10 \
+ | (tx2d) << 9 | (rx1d) << 8 | (rx2d) << 7 | ((cur) & 0x3) << 5 \
+ | (vps) << 4 | (sopp) << 3 | ((usbss) & 0x7))
#define VDO_PCABLE(hw, fw, ver, conn, lat, term, vbm, cur, spd) \
(((hw) & 0xf) << 28 | ((fw) & 0xf) << 24 | ((ver) & 0x7) << 21 \
| ((conn) & 0x3) << 18 | ((lat) & 0xf) << 13 | ((term) & 0x3) << 11 \
@@ -374,6 +408,35 @@
| (iso) << 2 | (gen))
/*
+ * AMA VDO (PD Rev2.0)
+ * ---------
+ * <31:28> :: Cable HW version
+ * <27:24> :: Cable FW version
+ * <23:12> :: Reserved, Shall be set to zero
+ * <11> :: SSTX1 Directionality support (0b == fixed, 1b == cfgable)
+ * <10> :: SSTX2 Directionality support
+ * <9> :: SSRX1 Directionality support
+ * <8> :: SSRX2 Directionality support
+ * <7:5> :: Vconn power
+ * <4> :: Vconn power required
+ * <3> :: Vbus power required
+ * <2:0> :: USB SS Signaling support
+ */
+#define VDO_AMA(hw, fw, tx1d, tx2d, rx1d, rx2d, vcpwr, vcr, vbr, usbss) \
+ (((hw) & 0x7) << 28 | ((fw) & 0x7) << 24 \
+ | (tx1d) << 11 | (tx2d) << 10 | (rx1d) << 9 | (rx2d) << 8 \
+ | ((vcpwr) & 0x7) << 5 | (vcr) << 4 | (vbr) << 3 \
+ | ((usbss) & 0x7))
+
+#define PD_VDO_AMA_VCONN_REQ(vdo) (((vdo) >> 4) & 1)
+#define PD_VDO_AMA_VBUS_REQ(vdo) (((vdo) >> 3) & 1)
+
+#define AMA_USBSS_U2_ONLY 0
+#define AMA_USBSS_U31_GEN1 1
+#define AMA_USBSS_U31_GEN2 2
+#define AMA_USBSS_BBONLY 3
+
+/*
* VPD VDO
* ---------
* <31:28> :: HW version
diff --git a/include/dt-bindings/watchdog/aspeed-wdt.h b/include/dt-bindings/watchdog/aspeed-wdt.h
new file mode 100644
index 000000000000..89fa31ffce2d
--- /dev/null
+++ b/include/dt-bindings/watchdog/aspeed-wdt.h
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef DT_BINDINGS_ASPEED_WDT_H
+#define DT_BINDINGS_ASPEED_WDT_H
+
+#define AST2500_WDT_RESET_CPU (1 << 0)
+#define AST2500_WDT_RESET_COPROC (1 << 1)
+#define AST2500_WDT_RESET_SDRAM (1 << 2)
+#define AST2500_WDT_RESET_AHB (1 << 3)
+#define AST2500_WDT_RESET_I2C (1 << 4)
+#define AST2500_WDT_RESET_MAC0 (1 << 5)
+#define AST2500_WDT_RESET_MAC1 (1 << 6)
+#define AST2500_WDT_RESET_GRAPHICS (1 << 7)
+#define AST2500_WDT_RESET_USB2_HOST_HUB (1 << 8)
+#define AST2500_WDT_RESET_USB_HOST (1 << 9)
+#define AST2500_WDT_RESET_HID_EHCI (1 << 10)
+#define AST2500_WDT_RESET_VIDEO (1 << 11)
+#define AST2500_WDT_RESET_HAC (1 << 12)
+#define AST2500_WDT_RESET_LPC (1 << 13)
+#define AST2500_WDT_RESET_SDIO (1 << 14)
+#define AST2500_WDT_RESET_MIC (1 << 15)
+#define AST2500_WDT_RESET_CRT (1 << 16)
+#define AST2500_WDT_RESET_PWM (1 << 17)
+#define AST2500_WDT_RESET_PECI (1 << 18)
+#define AST2500_WDT_RESET_JTAG (1 << 19)
+#define AST2500_WDT_RESET_ADC (1 << 20)
+#define AST2500_WDT_RESET_GPIO (1 << 21)
+#define AST2500_WDT_RESET_MCTP (1 << 22)
+#define AST2500_WDT_RESET_XDMA (1 << 23)
+#define AST2500_WDT_RESET_SPI (1 << 24)
+#define AST2500_WDT_RESET_SOC_MISC (1 << 25)
+
+#define AST2500_WDT_RESET_DEFAULT 0x023ffff3
+
+#define AST2600_WDT_RESET1_CPU (1 << 0)
+#define AST2600_WDT_RESET1_SDRAM (1 << 1)
+#define AST2600_WDT_RESET1_AHB (1 << 2)
+#define AST2600_WDT_RESET1_SLI (1 << 3)
+#define AST2600_WDT_RESET1_SOC_MISC0 (1 << 4)
+#define AST2600_WDT_RESET1_COPROC (1 << 5)
+#define AST2600_WDT_RESET1_USB_A (1 << 6)
+#define AST2600_WDT_RESET1_USB_B (1 << 7)
+#define AST2600_WDT_RESET1_UHCI (1 << 8)
+#define AST2600_WDT_RESET1_GRAPHICS (1 << 9)
+#define AST2600_WDT_RESET1_CRT (1 << 10)
+#define AST2600_WDT_RESET1_VIDEO (1 << 11)
+#define AST2600_WDT_RESET1_HAC (1 << 12)
+#define AST2600_WDT_RESET1_DP (1 << 13)
+#define AST2600_WDT_RESET1_DP_MCU (1 << 14)
+#define AST2600_WDT_RESET1_GP_MCU (1 << 15)
+#define AST2600_WDT_RESET1_MAC0 (1 << 16)
+#define AST2600_WDT_RESET1_MAC1 (1 << 17)
+#define AST2600_WDT_RESET1_SDIO0 (1 << 18)
+#define AST2600_WDT_RESET1_JTAG0 (1 << 19)
+#define AST2600_WDT_RESET1_MCTP0 (1 << 20)
+#define AST2600_WDT_RESET1_MCTP1 (1 << 21)
+#define AST2600_WDT_RESET1_XDMA0 (1 << 22)
+#define AST2600_WDT_RESET1_XDMA1 (1 << 23)
+#define AST2600_WDT_RESET1_GPIO0 (1 << 24)
+#define AST2600_WDT_RESET1_RVAS (1 << 25)
+
+#define AST2600_WDT_RESET1_DEFAULT 0x030f1ff1
+
+#define AST2600_WDT_RESET2_CPU (1 << 0)
+#define AST2600_WDT_RESET2_SPI (1 << 1)
+#define AST2600_WDT_RESET2_AHB2 (1 << 2)
+#define AST2600_WDT_RESET2_SLI2 (1 << 3)
+#define AST2600_WDT_RESET2_SOC_MISC1 (1 << 4)
+#define AST2600_WDT_RESET2_MAC2 (1 << 5)
+#define AST2600_WDT_RESET2_MAC3 (1 << 6)
+#define AST2600_WDT_RESET2_SDIO1 (1 << 7)
+#define AST2600_WDT_RESET2_JTAG1 (1 << 8)
+#define AST2600_WDT_RESET2_GPIO1 (1 << 9)
+#define AST2600_WDT_RESET2_MDIO (1 << 10)
+#define AST2600_WDT_RESET2_LPC (1 << 11)
+#define AST2600_WDT_RESET2_PECI (1 << 12)
+#define AST2600_WDT_RESET2_PWM (1 << 13)
+#define AST2600_WDT_RESET2_ADC (1 << 14)
+#define AST2600_WDT_RESET2_FSI (1 << 15)
+#define AST2600_WDT_RESET2_I2C (1 << 16)
+#define AST2600_WDT_RESET2_I3C_GLOBAL (1 << 17)
+#define AST2600_WDT_RESET2_I3C0 (1 << 18)
+#define AST2600_WDT_RESET2_I3C1 (1 << 19)
+#define AST2600_WDT_RESET2_I3C2 (1 << 20)
+#define AST2600_WDT_RESET2_I3C3 (1 << 21)
+#define AST2600_WDT_RESET2_I3C4 (1 << 22)
+#define AST2600_WDT_RESET2_I3C5 (1 << 23)
+#define AST2600_WDT_RESET2_ESPI (1 << 26)
+
+#define AST2600_WDT_RESET2_DEFAULT 0x03fffff1
+
+#define AST2700_WDT_RESET1_CPU (1 << 0)
+#define AST2700_WDT_RESET1_DRAM (1 << 1)
+#define AST2700_WDT_RESET1_SLI0 (1 << 2)
+#define AST2700_WDT_RESET1_EHCI (1 << 3)
+#define AST2700_WDT_RESET1_HACE (1 << 4)
+#define AST2700_WDT_RESET1_SOC_MISC0 (1 << 5)
+#define AST2700_WDT_RESET1_VIDEO (1 << 6)
+#define AST2700_WDT_RESET1_2D_GRAPHIC (1 << 7)
+#define AST2700_WDT_RESET1_RAVS0 (1 << 8)
+#define AST2700_WDT_RESET1_RAVS1 (1 << 9)
+#define AST2700_WDT_RESET1_GPIO0 (1 << 10)
+#define AST2700_WDT_RESET1_SSP (1 << 11)
+#define AST2700_WDT_RESET1_TSP (1 << 12)
+#define AST2700_WDT_RESET1_CRT (1 << 13)
+#define AST2700_WDT_RESET1_USB20_HOST (1 << 14)
+#define AST2700_WDT_RESET1_USB11_HOST (1 << 15)
+#define AST2700_WDT_RESET1_UFS (1 << 16)
+#define AST2700_WDT_RESET1_EMMC (1 << 17)
+#define AST2700_WDT_RESET1_AHB_TO_PCIE1 (1 << 18)
+#define AST2700_WDT_RESET1_XDMA0 (1 << 22)
+#define AST2700_WDT_RESET1_MCTP1 (1 << 23)
+#define AST2700_WDT_RESET1_MCTP0 (1 << 24)
+#define AST2700_WDT_RESET1_JTAG0 (1 << 25)
+#define AST2700_WDT_RESET1_ECC (1 << 26)
+#define AST2700_WDT_RESET1_XDMA1 (1 << 27)
+#define AST2700_WDT_RESET1_DP (1 << 28)
+#define AST2700_WDT_RESET1_DP_MCU (1 << 29)
+#define AST2700_WDT_RESET1_AHB_TO_PCIE0 (1 << 31)
+
+#define AST2700_WDT_RESET1_DEFAULT 0x8207ff71
+
+#define AST2700_WDT_RESET2_USB3_A_HOST (1 << 0)
+#define AST2700_WDT_RESET2_USB3_A_VHUB3 (1 << 1)
+#define AST2700_WDT_RESET2_USB3_A_VHUB2 (1 << 2)
+#define AST2700_WDT_RESET2_USB3_B_HOST (1 << 3)
+#define AST2700_WDT_RESET2_USB3_B_VHUB3 (1 << 4)
+#define AST2700_WDT_RESET2_USB3_B_VHUB2 (1 << 5)
+#define AST2700_WDT_RESET2_SM3 (1 << 6)
+#define AST2700_WDT_RESET2_SM4 (1 << 7)
+#define AST2700_WDT_RESET2_SHA3 (1 << 8)
+#define AST2700_WDT_RESET2_RSA (1 << 9)
+
+#define AST2700_WDT_RESET2_DEFAULT 0x000003f6
+
+#define AST2700_WDT_RESET3_LPC0 (1 << 0)
+#define AST2700_WDT_RESET3_LPC1 (1 << 1)
+#define AST2700_WDT_RESET3_MDIO (1 << 2)
+#define AST2700_WDT_RESET3_PECI (1 << 3)
+#define AST2700_WDT_RESET3_PWM (1 << 4)
+#define AST2700_WDT_RESET3_MAC0 (1 << 5)
+#define AST2700_WDT_RESET3_MAC1 (1 << 6)
+#define AST2700_WDT_RESET3_MAC2 (1 << 7)
+#define AST2700_WDT_RESET3_ADC (1 << 8)
+#define AST2700_WDT_RESET3_SDC (1 << 9)
+#define AST2700_WDT_RESET3_ESPI0 (1 << 10)
+#define AST2700_WDT_RESET3_ESPI1 (1 << 11)
+#define AST2700_WDT_RESET3_JTAG1 (1 << 12)
+#define AST2700_WDT_RESET3_SPI0 (1 << 13)
+#define AST2700_WDT_RESET3_SPI1 (1 << 14)
+#define AST2700_WDT_RESET3_SPI2 (1 << 15)
+#define AST2700_WDT_RESET3_I3C0 (1 << 16)
+#define AST2700_WDT_RESET3_I3C1 (1 << 17)
+#define AST2700_WDT_RESET3_I3C2 (1 << 18)
+#define AST2700_WDT_RESET3_I3C3 (1 << 19)
+#define AST2700_WDT_RESET3_I3C4 (1 << 20)
+#define AST2700_WDT_RESET3_I3C5 (1 << 21)
+#define AST2700_WDT_RESET3_I3C6 (1 << 22)
+#define AST2700_WDT_RESET3_I3C7 (1 << 23)
+#define AST2700_WDT_RESET3_I3C8 (1 << 24)
+#define AST2700_WDT_RESET3_I3C9 (1 << 25)
+#define AST2700_WDT_RESET3_I3C10 (1 << 26)
+#define AST2700_WDT_RESET3_I3C11 (1 << 27)
+#define AST2700_WDT_RESET3_I3C12 (1 << 28)
+#define AST2700_WDT_RESET3_I3C13 (1 << 29)
+#define AST2700_WDT_RESET3_I3C14 (1 << 30)
+#define AST2700_WDT_RESET3_I3C15 (1 << 31)
+
+#define AST2700_WDT_RESET3_DEFAULT 0x000093ec
+
+#define AST2700_WDT_RESET4_FMC (1 << 0)
+#define AST2700_WDT_RESET4_SOC_MISC1 (1 << 1)
+#define AST2700_WDT_RESET4_AHB (1 << 2)
+#define AST2700_WDT_RESET4_SLI1 (1 << 3)
+#define AST2700_WDT_RESET4_UART0 (1 << 4)
+#define AST2700_WDT_RESET4_UART1 (1 << 5)
+#define AST2700_WDT_RESET4_UART2 (1 << 6)
+#define AST2700_WDT_RESET4_UART3 (1 << 7)
+#define AST2700_WDT_RESET4_I2C_MONITOR (1 << 8)
+#define AST2700_WDT_RESET4_HOST_TO_SPI1 (1 << 9)
+#define AST2700_WDT_RESET4_HOST_TO_SPI2 (1 << 10)
+#define AST2700_WDT_RESET4_GPIO1 (1 << 11)
+#define AST2700_WDT_RESET4_FSI (1 << 12)
+#define AST2700_WDT_RESET4_CANBUS (1 << 13)
+#define AST2700_WDT_RESET4_MCTP (1 << 14)
+#define AST2700_WDT_RESET4_XDMA (1 << 15)
+#define AST2700_WDT_RESET4_UART5 (1 << 16)
+#define AST2700_WDT_RESET4_UART6 (1 << 17)
+#define AST2700_WDT_RESET4_UART7 (1 << 18)
+#define AST2700_WDT_RESET4_UART8 (1 << 19)
+#define AST2700_WDT_RESET4_BOOT_MCU (1 << 20)
+#define AST2700_WDT_RESET4_IO_MCU (1 << 21)
+#define AST2700_WDT_RESET4_LTPI0 (1 << 22)
+#define AST2700_WDT_RESET4_VGA_LINK (1 << 23)
+#define AST2700_WDT_RESET4_LTPI1 (1 << 24)
+#define AST2700_WDT_RESET4_LTPI_PHY (1 << 25)
+#define AST2700_WDT_RESET4_ACE (1 << 26)
+#define AST2700_WDT_RESET4_LTPI_GPIO0 (1 << 28)
+#define AST2700_WDT_RESET4_LTPI_GPIO1 (1 << 29)
+#define AST2700_WDT_RESET4_AHB_TO_PCIE1 (1 << 30)
+#define AST2700_WDT_RESET4_I3C_DMA (1 << 31)
+
+#define AST2700_WDT_RESET4_DEFAULT 0x40303803
+
+#define AST2700_WDT_RESET5_I2C_GLOBAL (1 << 0)
+#define AST2700_WDT_RESET5_I2C0 (1 << 1)
+#define AST2700_WDT_RESET5_I2C1 (1 << 2)
+#define AST2700_WDT_RESET5_I2C2 (1 << 3)
+#define AST2700_WDT_RESET5_I2C3 (1 << 4)
+#define AST2700_WDT_RESET5_I2C4 (1 << 5)
+#define AST2700_WDT_RESET5_I2C5 (1 << 6)
+#define AST2700_WDT_RESET5_I2C6 (1 << 7)
+#define AST2700_WDT_RESET5_I2C7 (1 << 8)
+#define AST2700_WDT_RESET5_I2C8 (1 << 9)
+#define AST2700_WDT_RESET5_I2C9 (1 << 10)
+#define AST2700_WDT_RESET5_I2C10 (1 << 11)
+#define AST2700_WDT_RESET5_I2C11 (1 << 12)
+#define AST2700_WDT_RESET5_I2C12 (1 << 13)
+#define AST2700_WDT_RESET5_I2C13 (1 << 14)
+#define AST2700_WDT_RESET5_I2C14 (1 << 15)
+#define AST2700_WDT_RESET5_I2C15 (1 << 16)
+#define AST2700_WDT_RESET5_UHCI (1 << 17)
+#define AST2700_WDT_RESET5_USB2_C_UART (1 << 18)
+#define AST2700_WDT_RESET5_USB2_C (1 << 19)
+#define AST2700_WDT_RESET5_USB2_D_UART (1 << 20)
+#define AST2700_WDT_RESET5_USB2_D (1 << 21)
+
+#define AST2700_WDT_RESET5_DEFAULT 0x00320000
+
+#endif
diff --git a/include/hyperv/hvgdk.h b/include/hyperv/hvgdk.h
new file mode 100644
index 000000000000..dd6d4939ea29
--- /dev/null
+++ b/include/hyperv/hvgdk.h
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Type definitions for the Microsoft Hypervisor.
+ */
+#ifndef _HV_HVGDK_H
+#define _HV_HVGDK_H
+
+#include "hvgdk_mini.h"
+#include "hvgdk_ext.h"
+
+/*
+ * The guest OS needs to register the guest ID with the hypervisor.
+ * The guest ID is a 64 bit entity and the structure of this ID is
+ * specified in the Hyper-V TLFS specification.
+ *
+ * While the current guideline does not specify how Linux guest ID(s)
+ * need to be generated, our plan is to publish the guidelines for
+ * Linux and other guest operating systems that currently are hosted
+ * on Hyper-V. The implementation here conforms to this yet
+ * unpublished guidelines.
+ *
+ * Bit(s)
+ * 63 - Indicates if the OS is Open Source or not; 1 is Open Source
+ * 62:56 - Os Type; Linux is 0x100
+ * 55:48 - Distro specific identification
+ * 47:16 - Linux kernel version number
+ * 15:0 - Distro specific identification
+ */
+
+#define HV_LINUX_VENDOR_ID 0x8100
+
+/* HV_VMX_ENLIGHTENED_VMCS */
+struct hv_enlightened_vmcs {
+ u32 revision_id;
+ u32 abort;
+
+ u16 host_es_selector;
+ u16 host_cs_selector;
+ u16 host_ss_selector;
+ u16 host_ds_selector;
+ u16 host_fs_selector;
+ u16 host_gs_selector;
+ u16 host_tr_selector;
+
+ u16 padding16_1;
+
+ u64 host_ia32_pat;
+ u64 host_ia32_efer;
+
+ u64 host_cr0;
+ u64 host_cr3;
+ u64 host_cr4;
+
+ u64 host_ia32_sysenter_esp;
+ u64 host_ia32_sysenter_eip;
+ u64 host_rip;
+ u32 host_ia32_sysenter_cs;
+
+ u32 pin_based_vm_exec_control;
+ u32 vm_exit_controls;
+ u32 secondary_vm_exec_control;
+
+ u64 io_bitmap_a;
+ u64 io_bitmap_b;
+ u64 msr_bitmap;
+
+ u16 guest_es_selector;
+ u16 guest_cs_selector;
+ u16 guest_ss_selector;
+ u16 guest_ds_selector;
+ u16 guest_fs_selector;
+ u16 guest_gs_selector;
+ u16 guest_ldtr_selector;
+ u16 guest_tr_selector;
+
+ u32 guest_es_limit;
+ u32 guest_cs_limit;
+ u32 guest_ss_limit;
+ u32 guest_ds_limit;
+ u32 guest_fs_limit;
+ u32 guest_gs_limit;
+ u32 guest_ldtr_limit;
+ u32 guest_tr_limit;
+ u32 guest_gdtr_limit;
+ u32 guest_idtr_limit;
+
+ u32 guest_es_ar_bytes;
+ u32 guest_cs_ar_bytes;
+ u32 guest_ss_ar_bytes;
+ u32 guest_ds_ar_bytes;
+ u32 guest_fs_ar_bytes;
+ u32 guest_gs_ar_bytes;
+ u32 guest_ldtr_ar_bytes;
+ u32 guest_tr_ar_bytes;
+
+ u64 guest_es_base;
+ u64 guest_cs_base;
+ u64 guest_ss_base;
+ u64 guest_ds_base;
+ u64 guest_fs_base;
+ u64 guest_gs_base;
+ u64 guest_ldtr_base;
+ u64 guest_tr_base;
+ u64 guest_gdtr_base;
+ u64 guest_idtr_base;
+
+ u64 padding64_1[3];
+
+ u64 vm_exit_msr_store_addr;
+ u64 vm_exit_msr_load_addr;
+ u64 vm_entry_msr_load_addr;
+
+ u64 cr3_target_value0;
+ u64 cr3_target_value1;
+ u64 cr3_target_value2;
+ u64 cr3_target_value3;
+
+ u32 page_fault_error_code_mask;
+ u32 page_fault_error_code_match;
+
+ u32 cr3_target_count;
+ u32 vm_exit_msr_store_count;
+ u32 vm_exit_msr_load_count;
+ u32 vm_entry_msr_load_count;
+
+ u64 tsc_offset;
+ u64 virtual_apic_page_addr;
+ u64 vmcs_link_pointer;
+
+ u64 guest_ia32_debugctl;
+ u64 guest_ia32_pat;
+ u64 guest_ia32_efer;
+
+ u64 guest_pdptr0;
+ u64 guest_pdptr1;
+ u64 guest_pdptr2;
+ u64 guest_pdptr3;
+
+ u64 guest_pending_dbg_exceptions;
+ u64 guest_sysenter_esp;
+ u64 guest_sysenter_eip;
+
+ u32 guest_activity_state;
+ u32 guest_sysenter_cs;
+
+ u64 cr0_guest_host_mask;
+ u64 cr4_guest_host_mask;
+ u64 cr0_read_shadow;
+ u64 cr4_read_shadow;
+ u64 guest_cr0;
+ u64 guest_cr3;
+ u64 guest_cr4;
+ u64 guest_dr7;
+
+ u64 host_fs_base;
+ u64 host_gs_base;
+ u64 host_tr_base;
+ u64 host_gdtr_base;
+ u64 host_idtr_base;
+ u64 host_rsp;
+
+ u64 ept_pointer;
+
+ u16 virtual_processor_id;
+ u16 padding16_2[3];
+
+ u64 padding64_2[5];
+ u64 guest_physical_address;
+
+ u32 vm_instruction_error;
+ u32 vm_exit_reason;
+ u32 vm_exit_intr_info;
+ u32 vm_exit_intr_error_code;
+ u32 idt_vectoring_info_field;
+ u32 idt_vectoring_error_code;
+ u32 vm_exit_instruction_len;
+ u32 vmx_instruction_info;
+
+ u64 exit_qualification;
+ u64 exit_io_instruction_ecx;
+ u64 exit_io_instruction_esi;
+ u64 exit_io_instruction_edi;
+ u64 exit_io_instruction_eip;
+
+ u64 guest_linear_address;
+ u64 guest_rsp;
+ u64 guest_rflags;
+
+ u32 guest_interruptibility_info;
+ u32 cpu_based_vm_exec_control;
+ u32 exception_bitmap;
+ u32 vm_entry_controls;
+ u32 vm_entry_intr_info_field;
+ u32 vm_entry_exception_error_code;
+ u32 vm_entry_instruction_len;
+ u32 tpr_threshold;
+
+ u64 guest_rip;
+
+ u32 hv_clean_fields;
+ u32 padding32_1;
+ u32 hv_synthetic_controls;
+ struct {
+ u32 nested_flush_hypercall:1;
+ u32 msr_bitmap:1;
+ u32 reserved:30;
+ } __packed hv_enlightenments_control;
+ u32 hv_vp_id;
+ u32 padding32_2;
+ u64 hv_vm_id;
+ u64 partition_assist_page;
+ u64 padding64_4[4];
+ u64 guest_bndcfgs;
+ u64 guest_ia32_perf_global_ctrl;
+ u64 guest_ia32_s_cet;
+ u64 guest_ssp;
+ u64 guest_ia32_int_ssp_table_addr;
+ u64 guest_ia32_lbr_ctl;
+ u64 padding64_5[2];
+ u64 xss_exit_bitmap;
+ u64 encls_exiting_bitmap;
+ u64 host_ia32_perf_global_ctrl;
+ u64 tsc_multiplier;
+ u64 host_ia32_s_cet;
+ u64 host_ssp;
+ u64 host_ia32_int_ssp_table_addr;
+ u64 padding64_6;
+} __packed;
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE 0
+
+
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP BIT(0)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP BIT(1)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2 BIT(2)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1 BIT(3)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC BIT(4)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT BIT(5)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY BIT(6)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN BIT(7)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR BIT(8)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT BIT(9)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC BIT(10)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1 BIT(11)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2 BIT(12)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER BIT(13)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1 BIT(14)
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_ENLIGHTENMENTSCONTROL BIT(15)
+
+#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL 0xFFFF
+
+/*
+ * Note, Hyper-V isn't actually stealing bit 28 from Intel, just abusing it by
+ * pairing it with architecturally impossible exit reasons. Bit 28 is set only
+ * on SMI exits to a SMI transfer monitor (STM) and if and only if a MTF VM-Exit
+ * is pending. I.e. it will never be set by hardware for non-SMI exits (there
+ * are only three), nor will it ever be set unless the VMM is an STM.
+ */
+#define HV_VMX_SYNTHETIC_EXIT_REASON_TRAP_AFTER_FLUSH 0x10000031
+
+/*
+ * Hyper-V uses the software reserved 32 bytes in VMCB control area to expose
+ * SVM enlightenments to guests. This is documented in the TLFS doc.
+ * Note on naming: SVM_NESTED_ENLIGHTENED_VMCB_FIELDS
+ */
+struct hv_vmcb_enlightenments {
+ struct __packed hv_enlightenments_control {
+ u32 nested_flush_hypercall : 1;
+ u32 msr_bitmap : 1;
+ u32 enlightened_npt_tlb: 1;
+ u32 reserved : 29;
+ } __packed hv_enlightenments_control;
+ u32 hv_vp_id;
+ u64 hv_vm_id;
+ u64 partition_assist_page;
+ u64 reserved;
+} __packed;
+
+/*
+ * Hyper-V uses the software reserved clean bit in VMCB.
+ */
+#define HV_VMCB_NESTED_ENLIGHTENMENTS 31
+
+/* Synthetic VM-Exit */
+#define HV_SVM_EXITCODE_ENL 0xf0000000
+#define HV_SVM_ENL_EXITCODE_TRAP_AFTER_FLUSH (1)
+
+/* VM_PARTITION_ASSIST_PAGE */
+struct hv_partition_assist_pg {
+ u32 tlb_lock_count;
+};
+
+/* Define connection identifier type. */
+union hv_connection_id {
+ u32 asu32;
+ struct {
+ u32 id : 24;
+ u32 reserved : 8;
+ } __packed u;
+};
+
+struct hv_input_unmap_gpa_pages {
+ u64 target_partition_id;
+ u64 target_gpa_base;
+ u32 unmap_flags;
+ u32 padding;
+} __packed;
+
+#endif /* #ifndef _HV_HVGDK_H */
diff --git a/include/hyperv/hvgdk_ext.h b/include/hyperv/hvgdk_ext.h
new file mode 100644
index 000000000000..641b591ee61f
--- /dev/null
+++ b/include/hyperv/hvgdk_ext.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Type definitions for the Microsoft Hypervisor.
+ */
+#ifndef _HV_HVGDK_EXT_H
+#define _HV_HVGDK_EXT_H
+
+#include "hvgdk_mini.h"
+
+/* Extended hypercalls */
+#define HV_EXT_CALL_QUERY_CAPABILITIES 0x8001
+#define HV_EXT_CALL_MEMORY_HEAT_HINT 0x8003
+
+/* Extended hypercalls */
+enum { /* HV_EXT_CALL */
+ HV_EXTCALL_QUERY_CAPABILITIES = 0x8001,
+ HV_EXTCALL_MEMORY_HEAT_HINT = 0x8003,
+};
+
+/* HV_EXT_OUTPUT_QUERY_CAPABILITIES */
+#define HV_EXT_CAPABILITY_MEMORY_COLD_DISCARD_HINT BIT(8)
+
+enum { /* HV_EXT_MEMORY_HEAT_HINT_TYPE */
+ HV_EXTMEM_HEAT_HINT_COLD = 0,
+ HV_EXTMEM_HEAT_HINT_HOT = 1,
+ HV_EXTMEM_HEAT_HINT_COLD_DISCARD = 2,
+ HV_EXTMEM_HEAT_HINT_MAX
+};
+
+/*
+ * The whole argument should fit in a page to be able to pass to the hypervisor
+ * in one hypercall.
+ */
+#define HV_MEMORY_HINT_MAX_GPA_PAGE_RANGES \
+ ((HV_HYP_PAGE_SIZE - sizeof(struct hv_memory_hint)) / \
+ sizeof(union hv_gpa_page_range))
+
+/* HvExtCallMemoryHeatHint hypercall */
+#define HV_EXT_MEMORY_HEAT_HINT_TYPE_COLD_DISCARD 2
+struct hv_memory_hint { /* HV_EXT_INPUT_MEMORY_HEAT_HINT */
+ u64 heat_type : 2; /* HV_EXTMEM_HEAT_HINT_* */
+ u64 reserved : 62;
+ union hv_gpa_page_range ranges[];
+} __packed;
+
+#endif /* _HV_HVGDK_EXT_H */
diff --git a/include/hyperv/hvgdk_mini.h b/include/hyperv/hvgdk_mini.h
new file mode 100644
index 000000000000..04b18d0e37af
--- /dev/null
+++ b/include/hyperv/hvgdk_mini.h
@@ -0,0 +1,1528 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Type definitions for the Microsoft hypervisor.
+ */
+#ifndef _HV_HVGDK_MINI_H
+#define _HV_HVGDK_MINI_H
+
+#include <linux/types.h>
+#include <linux/bits.h>
+
+struct hv_u128 {
+ u64 low_part;
+ u64 high_part;
+} __packed;
+
+/* NOTE: when adding below, update hv_result_to_string() */
+#define HV_STATUS_SUCCESS 0x0
+#define HV_STATUS_INVALID_HYPERCALL_CODE 0x2
+#define HV_STATUS_INVALID_HYPERCALL_INPUT 0x3
+#define HV_STATUS_INVALID_ALIGNMENT 0x4
+#define HV_STATUS_INVALID_PARAMETER 0x5
+#define HV_STATUS_ACCESS_DENIED 0x6
+#define HV_STATUS_INVALID_PARTITION_STATE 0x7
+#define HV_STATUS_OPERATION_DENIED 0x8
+#define HV_STATUS_UNKNOWN_PROPERTY 0x9
+#define HV_STATUS_PROPERTY_VALUE_OUT_OF_RANGE 0xA
+#define HV_STATUS_INSUFFICIENT_MEMORY 0xB
+#define HV_STATUS_INVALID_PARTITION_ID 0xD
+#define HV_STATUS_INVALID_VP_INDEX 0xE
+#define HV_STATUS_NOT_FOUND 0x10
+#define HV_STATUS_INVALID_PORT_ID 0x11
+#define HV_STATUS_INVALID_CONNECTION_ID 0x12
+#define HV_STATUS_INSUFFICIENT_BUFFERS 0x13
+#define HV_STATUS_NOT_ACKNOWLEDGED 0x14
+#define HV_STATUS_INVALID_VP_STATE 0x15
+#define HV_STATUS_NO_RESOURCES 0x1D
+#define HV_STATUS_PROCESSOR_FEATURE_NOT_SUPPORTED 0x20
+#define HV_STATUS_INVALID_LP_INDEX 0x41
+#define HV_STATUS_INVALID_REGISTER_VALUE 0x50
+#define HV_STATUS_OPERATION_FAILED 0x71
+#define HV_STATUS_TIME_OUT 0x78
+#define HV_STATUS_CALL_PENDING 0x79
+#define HV_STATUS_VTL_ALREADY_ENABLED 0x86
+
+/*
+ * The Hyper-V TimeRefCount register and the TSC
+ * page provide a guest VM clock with 100ns tick rate
+ */
+#define HV_CLOCK_HZ (NSEC_PER_SEC / 100)
+
+#define HV_HYP_PAGE_SHIFT 12
+#define HV_HYP_PAGE_SIZE BIT(HV_HYP_PAGE_SHIFT)
+#define HV_HYP_PAGE_MASK (~(HV_HYP_PAGE_SIZE - 1))
+#define HV_HYP_LARGE_PAGE_SHIFT 21
+
+#define HV_PARTITION_ID_INVALID ((u64)0)
+#define HV_PARTITION_ID_SELF ((u64)-1)
+
+/* Hyper-V specific model specific registers (MSRs) */
+
+#if defined(CONFIG_X86)
+/* HV_X64_SYNTHETIC_MSR */
+#define HV_X64_MSR_GUEST_OS_ID 0x40000000
+#define HV_X64_MSR_HYPERCALL 0x40000001
+#define HV_X64_MSR_VP_INDEX 0x40000002
+#define HV_X64_MSR_RESET 0x40000003
+#define HV_X64_MSR_VP_RUNTIME 0x40000010
+#define HV_X64_MSR_TIME_REF_COUNT 0x40000020
+#define HV_X64_MSR_REFERENCE_TSC 0x40000021
+#define HV_X64_MSR_TSC_FREQUENCY 0x40000022
+#define HV_X64_MSR_APIC_FREQUENCY 0x40000023
+
+/* Define the virtual APIC registers */
+#define HV_X64_MSR_EOI 0x40000070
+#define HV_X64_MSR_ICR 0x40000071
+#define HV_X64_MSR_TPR 0x40000072
+#define HV_X64_MSR_VP_ASSIST_PAGE 0x40000073
+
+/* Define synthetic interrupt controller model specific registers. */
+#define HV_X64_MSR_SCONTROL 0x40000080
+#define HV_X64_MSR_SVERSION 0x40000081
+#define HV_X64_MSR_SIEFP 0x40000082
+#define HV_X64_MSR_SIMP 0x40000083
+#define HV_X64_MSR_EOM 0x40000084
+#define HV_X64_MSR_SIRBP 0x40000085
+#define HV_X64_MSR_SINT0 0x40000090
+#define HV_X64_MSR_SINT1 0x40000091
+#define HV_X64_MSR_SINT2 0x40000092
+#define HV_X64_MSR_SINT3 0x40000093
+#define HV_X64_MSR_SINT4 0x40000094
+#define HV_X64_MSR_SINT5 0x40000095
+#define HV_X64_MSR_SINT6 0x40000096
+#define HV_X64_MSR_SINT7 0x40000097
+#define HV_X64_MSR_SINT8 0x40000098
+#define HV_X64_MSR_SINT9 0x40000099
+#define HV_X64_MSR_SINT10 0x4000009A
+#define HV_X64_MSR_SINT11 0x4000009B
+#define HV_X64_MSR_SINT12 0x4000009C
+#define HV_X64_MSR_SINT13 0x4000009D
+#define HV_X64_MSR_SINT14 0x4000009E
+#define HV_X64_MSR_SINT15 0x4000009F
+
+/* Define synthetic interrupt controller model specific registers for nested hypervisor */
+#define HV_X64_MSR_NESTED_SCONTROL 0x40001080
+#define HV_X64_MSR_NESTED_SVERSION 0x40001081
+#define HV_X64_MSR_NESTED_SIEFP 0x40001082
+#define HV_X64_MSR_NESTED_SIMP 0x40001083
+#define HV_X64_MSR_NESTED_EOM 0x40001084
+#define HV_X64_MSR_NESTED_SINT0 0x40001090
+
+/*
+ * Synthetic Timer MSRs. Four timers per vcpu.
+ */
+#define HV_X64_MSR_STIMER0_CONFIG 0x400000B0
+#define HV_X64_MSR_STIMER0_COUNT 0x400000B1
+#define HV_X64_MSR_STIMER1_CONFIG 0x400000B2
+#define HV_X64_MSR_STIMER1_COUNT 0x400000B3
+#define HV_X64_MSR_STIMER2_CONFIG 0x400000B4
+#define HV_X64_MSR_STIMER2_COUNT 0x400000B5
+#define HV_X64_MSR_STIMER3_CONFIG 0x400000B6
+#define HV_X64_MSR_STIMER3_COUNT 0x400000B7
+
+/* Hyper-V guest idle MSR */
+#define HV_X64_MSR_GUEST_IDLE 0x400000F0
+
+/* Hyper-V guest crash notification MSR's */
+#define HV_X64_MSR_CRASH_P0 0x40000100
+#define HV_X64_MSR_CRASH_P1 0x40000101
+#define HV_X64_MSR_CRASH_P2 0x40000102
+#define HV_X64_MSR_CRASH_P3 0x40000103
+#define HV_X64_MSR_CRASH_P4 0x40000104
+#define HV_X64_MSR_CRASH_CTL 0x40000105
+
+#define HV_X64_MSR_HYPERCALL_ENABLE 0x00000001
+#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT 12
+#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK \
+ (~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1))
+
+#define HV_X64_MSR_CRASH_PARAMS \
+ (1 + (HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0))
+
+#define HV_IPI_LOW_VECTOR 0x10
+#define HV_IPI_HIGH_VECTOR 0xff
+
+#define HV_X64_MSR_VP_ASSIST_PAGE_ENABLE 0x00000001
+#define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT 12
+#define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK \
+ (~((1ull << HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT) - 1))
+
+/* Hyper-V Enlightened VMCS version mask in nested features CPUID */
+#define HV_X64_ENLIGHTENED_VMCS_VERSION 0xff
+
+#define HV_X64_MSR_TSC_REFERENCE_ENABLE 0x00000001
+#define HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT 12
+
+/* Number of XMM registers used in hypercall input/output */
+#define HV_HYPERCALL_MAX_XMM_REGISTERS 6
+
+struct hv_reenlightenment_control {
+ u64 vector : 8;
+ u64 reserved1 : 8;
+ u64 enabled : 1;
+ u64 reserved2 : 15;
+ u64 target_vp : 32;
+} __packed;
+
+struct hv_tsc_emulation_status { /* HV_TSC_EMULATION_STATUS */
+ u64 inprogress : 1;
+ u64 reserved : 63;
+} __packed;
+
+struct hv_tsc_emulation_control { /* HV_TSC_INVARIANT_CONTROL */
+ u64 enabled : 1;
+ u64 reserved : 63;
+} __packed;
+
+/* TSC emulation after migration */
+#define HV_X64_MSR_REENLIGHTENMENT_CONTROL 0x40000106
+#define HV_X64_MSR_TSC_EMULATION_CONTROL 0x40000107
+#define HV_X64_MSR_TSC_EMULATION_STATUS 0x40000108
+#define HV_X64_MSR_TSC_INVARIANT_CONTROL 0x40000118
+#define HV_EXPOSE_INVARIANT_TSC BIT_ULL(0)
+
+#endif /* CONFIG_X86 */
+
+struct hv_output_get_partition_id {
+ u64 partition_id;
+} __packed;
+
+/* HV_CRASH_CTL_REG_CONTENTS */
+#define HV_CRASH_CTL_CRASH_NOTIFY_MSG BIT_ULL(62)
+#define HV_CRASH_CTL_CRASH_NOTIFY BIT_ULL(63)
+
+union hv_reference_tsc_msr {
+ u64 as_uint64;
+ struct {
+ u64 enable : 1;
+ u64 reserved : 11;
+ u64 pfn : 52;
+ } __packed;
+};
+
+/* The maximum number of sparse vCPU banks which can be encoded by 'struct hv_vpset' */
+#define HV_MAX_SPARSE_VCPU_BANKS (64)
+/* The number of vCPUs in one sparse bank */
+#define HV_VCPUS_PER_SPARSE_BANK (64)
+
+/*
+ * Some of Hyper-V structs do not use hv_vpset where linux uses them.
+ *
+ * struct hv_vpset is usually used as part of hypercall input. The portion
+ * that counts as "fixed size input header" vs. "variable size input header"
+ * varies per hypercall. See comments at relevant hypercall call sites as to
+ * how the "valid_bank_mask" field should be accounted.
+ */
+struct hv_vpset { /* HV_VP_SET */
+ u64 format;
+ u64 valid_bank_mask;
+ u64 bank_contents[];
+} __packed;
+
+/*
+ * Version info reported by hypervisor
+ * Changed to a union for convenience
+ */
+union hv_hypervisor_version_info {
+ struct {
+ u32 build_number;
+
+ u32 minor_version : 16;
+ u32 major_version : 16;
+
+ u32 service_pack;
+
+ u32 service_number : 24;
+ u32 service_branch : 8;
+ };
+ struct {
+ u32 eax;
+ u32 ebx;
+ u32 ecx;
+ u32 edx;
+ };
+};
+
+/* HV_CPUID_FUNCTION */
+#define HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS 0x40000000
+#define HYPERV_CPUID_INTERFACE 0x40000001
+#define HYPERV_CPUID_VERSION 0x40000002
+#define HYPERV_CPUID_FEATURES 0x40000003
+#define HYPERV_CPUID_ENLIGHTMENT_INFO 0x40000004
+#define HYPERV_CPUID_IMPLEMENT_LIMITS 0x40000005
+#define HYPERV_CPUID_CPU_MANAGEMENT_FEATURES 0x40000007
+#define HYPERV_CPUID_NESTED_FEATURES 0x4000000A
+#define HYPERV_CPUID_ISOLATION_CONFIG 0x4000000C
+
+#define HYPERV_CPUID_VIRT_STACK_INTERFACE 0x40000081
+#define HYPERV_VS_INTERFACE_EAX_SIGNATURE 0x31235356 /* "VS#1" */
+
+#define HYPERV_CPUID_VIRT_STACK_PROPERTIES 0x40000082
+/* Support for the extended IOAPIC RTE format */
+#define HYPERV_VS_PROPERTIES_EAX_EXTENDED_IOAPIC_RTE BIT(2)
+#define HYPERV_VS_PROPERTIES_EAX_CONFIDENTIAL_VMBUS_AVAILABLE BIT(3)
+
+#define HYPERV_HYPERVISOR_PRESENT_BIT 0x80000000
+#define HYPERV_CPUID_MIN 0x40000005
+#define HYPERV_CPUID_MAX 0x4000ffff
+
+/*
+ * HV_X64_HYPERVISOR_FEATURES (EAX), or
+ * HV_PARTITION_PRIVILEGE_MASK [31-0]
+ */
+#define HV_MSR_VP_RUNTIME_AVAILABLE BIT(0)
+#define HV_MSR_TIME_REF_COUNT_AVAILABLE BIT(1)
+#define HV_MSR_SYNIC_AVAILABLE BIT(2)
+#define HV_MSR_SYNTIMER_AVAILABLE BIT(3)
+#define HV_MSR_APIC_ACCESS_AVAILABLE BIT(4)
+#define HV_MSR_HYPERCALL_AVAILABLE BIT(5)
+#define HV_MSR_VP_INDEX_AVAILABLE BIT(6)
+#define HV_MSR_RESET_AVAILABLE BIT(7)
+#define HV_MSR_STAT_PAGES_AVAILABLE BIT(8)
+#define HV_MSR_REFERENCE_TSC_AVAILABLE BIT(9)
+#define HV_MSR_GUEST_IDLE_AVAILABLE BIT(10)
+#define HV_ACCESS_FREQUENCY_MSRS BIT(11)
+#define HV_ACCESS_REENLIGHTENMENT BIT(13)
+#define HV_ACCESS_TSC_INVARIANT BIT(15)
+
+/*
+ * HV_X64_HYPERVISOR_FEATURES (EBX), or
+ * HV_PARTITION_PRIVILEGE_MASK [63-32]
+ */
+#define HV_CREATE_PARTITIONS BIT(0)
+#define HV_ACCESS_PARTITION_ID BIT(1)
+#define HV_ACCESS_MEMORY_POOL BIT(2)
+#define HV_ADJUST_MESSAGE_BUFFERS BIT(3)
+#define HV_POST_MESSAGES BIT(4)
+#define HV_SIGNAL_EVENTS BIT(5)
+#define HV_CREATE_PORT BIT(6)
+#define HV_CONNECT_PORT BIT(7)
+#define HV_ACCESS_STATS BIT(8)
+#define HV_DEBUGGING BIT(11)
+#define HV_CPU_MANAGEMENT BIT(12)
+#define HV_ENABLE_EXTENDED_HYPERCALLS BIT(20)
+#define HV_ISOLATION BIT(22)
+
+#if defined(CONFIG_X86)
+/* HV_X64_HYPERVISOR_FEATURES (EDX) */
+#define HV_X64_MWAIT_AVAILABLE BIT(0)
+#define HV_X64_GUEST_DEBUGGING_AVAILABLE BIT(1)
+#define HV_X64_PERF_MONITOR_AVAILABLE BIT(2)
+#define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE BIT(3)
+#define HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE BIT(4)
+#define HV_X64_GUEST_IDLE_STATE_AVAILABLE BIT(5)
+#define HV_FEATURE_FREQUENCY_MSRS_AVAILABLE BIT(8)
+#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE BIT(10)
+#define HV_FEATURE_DEBUG_MSRS_AVAILABLE BIT(11)
+#define HV_FEATURE_EXT_GVA_RANGES_FLUSH BIT(14)
+/*
+ * Support for returning hypercall output block via XMM
+ * registers is available
+ */
+#define HV_X64_HYPERCALL_XMM_OUTPUT_AVAILABLE BIT(15)
+/* stimer Direct Mode is available */
+#define HV_STIMER_DIRECT_MODE_AVAILABLE BIT(19)
+
+/*
+ * Implementation recommendations. Indicates which behaviors the hypervisor
+ * recommends the OS implement for optimal performance.
+ * These are HYPERV_CPUID_ENLIGHTMENT_INFO.EAX bits.
+ */
+/* HV_X64_ENLIGHTENMENT_INFORMATION */
+#define HV_X64_AS_SWITCH_RECOMMENDED BIT(0)
+#define HV_X64_LOCAL_TLB_FLUSH_RECOMMENDED BIT(1)
+#define HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED BIT(2)
+#define HV_X64_APIC_ACCESS_RECOMMENDED BIT(3)
+#define HV_X64_SYSTEM_RESET_RECOMMENDED BIT(4)
+#define HV_X64_RELAXED_TIMING_RECOMMENDED BIT(5)
+#define HV_DEPRECATING_AEOI_RECOMMENDED BIT(9)
+#define HV_X64_CLUSTER_IPI_RECOMMENDED BIT(10)
+#define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED BIT(11)
+#define HV_X64_HYPERV_NESTED BIT(12)
+#define HV_X64_ENLIGHTENED_VMCS_RECOMMENDED BIT(14)
+#define HV_X64_USE_MMIO_HYPERCALLS BIT(21)
+
+/*
+ * CPU management features identification.
+ * These are HYPERV_CPUID_CPU_MANAGEMENT_FEATURES.EAX bits.
+ */
+#define HV_X64_START_LOGICAL_PROCESSOR BIT(0)
+#define HV_X64_CREATE_ROOT_VIRTUAL_PROCESSOR BIT(1)
+#define HV_X64_PERFORMANCE_COUNTER_SYNC BIT(2)
+#define HV_X64_RESERVED_IDENTITY_BIT BIT(31)
+
+/*
+ * Virtual processor will never share a physical core with another virtual
+ * processor, except for virtual processors that are reported as sibling SMT
+ * threads.
+ */
+#define HV_X64_NO_NONARCH_CORESHARING BIT(18)
+
+/* Nested features. These are HYPERV_CPUID_NESTED_FEATURES.EAX bits. */
+#define HV_X64_NESTED_DIRECT_FLUSH BIT(17)
+#define HV_X64_NESTED_GUEST_MAPPING_FLUSH BIT(18)
+#define HV_X64_NESTED_MSR_BITMAP BIT(19)
+
+/* Nested features #2. These are HYPERV_CPUID_NESTED_FEATURES.EBX bits. */
+#define HV_X64_NESTED_EVMCS1_PERF_GLOBAL_CTRL BIT(0)
+
+/*
+ * This is specific to AMD and specifies that enlightened TLB flush is
+ * supported. If guest opts in to this feature, ASID invalidations only
+ * flushes gva -> hpa mapping entries. To flush the TLB entries derived
+ * from NPT, hypercalls should be used (HvFlushGuestPhysicalAddressSpace
+ * or HvFlushGuestPhysicalAddressList).
+ */
+#define HV_X64_NESTED_ENLIGHTENED_TLB BIT(22)
+
+/* HYPERV_CPUID_ISOLATION_CONFIG.EAX bits. */
+#define HV_PARAVISOR_PRESENT BIT(0)
+
+/* HYPERV_CPUID_ISOLATION_CONFIG.EBX bits. */
+#define HV_ISOLATION_TYPE GENMASK(3, 0)
+#define HV_SHARED_GPA_BOUNDARY_ACTIVE BIT(5)
+#define HV_SHARED_GPA_BOUNDARY_BITS GENMASK(11, 6)
+
+/* HYPERV_CPUID_FEATURES.ECX bits. */
+#define HV_VP_DISPATCH_INTERRUPT_INJECTION_AVAILABLE BIT(9)
+#define HV_VP_GHCB_ROOT_MAPPING_AVAILABLE BIT(10)
+
+enum hv_isolation_type {
+ HV_ISOLATION_TYPE_NONE = 0, /* HV_PARTITION_ISOLATION_TYPE_NONE */
+ HV_ISOLATION_TYPE_VBS = 1,
+ HV_ISOLATION_TYPE_SNP = 2,
+ HV_ISOLATION_TYPE_TDX = 3
+};
+
+union hv_x64_msr_hypercall_contents {
+ u64 as_uint64;
+ struct {
+ u64 enable : 1;
+ u64 reserved : 11;
+ u64 guest_physical_address : 52;
+ } __packed;
+};
+#endif /* CONFIG_X86 */
+
+#if defined(CONFIG_ARM64)
+#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE BIT(8)
+#define HV_STIMER_DIRECT_MODE_AVAILABLE BIT(13)
+#endif /* CONFIG_ARM64 */
+
+#if defined(CONFIG_X86)
+#define HV_MAXIMUM_PROCESSORS 2048
+#elif defined(CONFIG_ARM64) /* CONFIG_X86 */
+#define HV_MAXIMUM_PROCESSORS 320
+#endif /* CONFIG_ARM64 */
+
+#define HV_MAX_VP_INDEX (HV_MAXIMUM_PROCESSORS - 1)
+#define HV_VP_INDEX_SELF ((u32)-2)
+#define HV_ANY_VP ((u32)-1)
+
+union hv_vp_assist_msr_contents { /* HV_REGISTER_VP_ASSIST_PAGE */
+ u64 as_uint64;
+ struct {
+ u64 enable : 1;
+ u64 reserved : 11;
+ u64 pfn : 52;
+ } __packed;
+};
+
+/* Declare the various hypercall operations. */
+/* HV_CALL_CODE */
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003
+#define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008
+#define HVCALL_SEND_IPI 0x000b
+#define HVCALL_ENABLE_VP_VTL 0x000f
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014
+#define HVCALL_SEND_IPI_EX 0x0015
+#define HVCALL_CREATE_PARTITION 0x0040
+#define HVCALL_INITIALIZE_PARTITION 0x0041
+#define HVCALL_FINALIZE_PARTITION 0x0042
+#define HVCALL_DELETE_PARTITION 0x0043
+#define HVCALL_GET_PARTITION_PROPERTY 0x0044
+#define HVCALL_SET_PARTITION_PROPERTY 0x0045
+#define HVCALL_GET_PARTITION_ID 0x0046
+#define HVCALL_DEPOSIT_MEMORY 0x0048
+#define HVCALL_WITHDRAW_MEMORY 0x0049
+#define HVCALL_MAP_GPA_PAGES 0x004b
+#define HVCALL_UNMAP_GPA_PAGES 0x004c
+#define HVCALL_INSTALL_INTERCEPT 0x004d
+#define HVCALL_CREATE_VP 0x004e
+#define HVCALL_DELETE_VP 0x004f
+#define HVCALL_GET_VP_REGISTERS 0x0050
+#define HVCALL_SET_VP_REGISTERS 0x0051
+#define HVCALL_TRANSLATE_VIRTUAL_ADDRESS 0x0052
+#define HVCALL_CLEAR_VIRTUAL_INTERRUPT 0x0056
+#define HVCALL_DELETE_PORT 0x0058
+#define HVCALL_DISCONNECT_PORT 0x005b
+#define HVCALL_POST_MESSAGE 0x005c
+#define HVCALL_SIGNAL_EVENT 0x005d
+#define HVCALL_POST_DEBUG_DATA 0x0069
+#define HVCALL_RETRIEVE_DEBUG_DATA 0x006a
+#define HVCALL_RESET_DEBUG_SESSION 0x006b
+#define HVCALL_MAP_STATS_PAGE 0x006c
+#define HVCALL_UNMAP_STATS_PAGE 0x006d
+#define HVCALL_SET_SYSTEM_PROPERTY 0x006f
+#define HVCALL_ADD_LOGICAL_PROCESSOR 0x0076
+#define HVCALL_GET_SYSTEM_PROPERTY 0x007b
+#define HVCALL_MAP_DEVICE_INTERRUPT 0x007c
+#define HVCALL_UNMAP_DEVICE_INTERRUPT 0x007d
+#define HVCALL_RETARGET_INTERRUPT 0x007e
+#define HVCALL_NOTIFY_PARTITION_EVENT 0x0087
+#define HVCALL_ENTER_SLEEP_STATE 0x0084
+#define HVCALL_NOTIFY_PORT_RING_EMPTY 0x008b
+#define HVCALL_REGISTER_INTERCEPT_RESULT 0x0091
+#define HVCALL_ASSERT_VIRTUAL_INTERRUPT 0x0094
+#define HVCALL_CREATE_PORT 0x0095
+#define HVCALL_CONNECT_PORT 0x0096
+#define HVCALL_START_VP 0x0099
+#define HVCALL_GET_VP_INDEX_FROM_APIC_ID 0x009a
+#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af
+#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0
+#define HVCALL_SIGNAL_EVENT_DIRECT 0x00c0
+#define HVCALL_POST_MESSAGE_DIRECT 0x00c1
+#define HVCALL_DISPATCH_VP 0x00c2
+#define HVCALL_GET_GPA_PAGES_ACCESS_STATES 0x00c9
+#define HVCALL_ACQUIRE_SPARSE_SPA_PAGE_HOST_ACCESS 0x00d7
+#define HVCALL_RELEASE_SPARSE_SPA_PAGE_HOST_ACCESS 0x00d8
+#define HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY 0x00db
+#define HVCALL_MAP_VP_STATE_PAGE 0x00e1
+#define HVCALL_UNMAP_VP_STATE_PAGE 0x00e2
+#define HVCALL_GET_VP_STATE 0x00e3
+#define HVCALL_SET_VP_STATE 0x00e4
+#define HVCALL_GET_VP_CPUID_VALUES 0x00f4
+#define HVCALL_GET_PARTITION_PROPERTY_EX 0x0101
+#define HVCALL_MMIO_READ 0x0106
+#define HVCALL_MMIO_WRITE 0x0107
+#define HVCALL_DISABLE_HYP_EX 0x010f
+#define HVCALL_MAP_STATS_PAGE2 0x0131
+
+/* HV_HYPERCALL_INPUT */
+#define HV_HYPERCALL_RESULT_MASK GENMASK_ULL(15, 0)
+#define HV_HYPERCALL_FAST_BIT BIT(16)
+#define HV_HYPERCALL_VARHEAD_OFFSET 17
+#define HV_HYPERCALL_VARHEAD_MASK GENMASK_ULL(26, 17)
+#define HV_HYPERCALL_RSVD0_MASK GENMASK_ULL(31, 27)
+#define HV_HYPERCALL_NESTED BIT_ULL(31)
+#define HV_HYPERCALL_REP_COMP_OFFSET 32
+#define HV_HYPERCALL_REP_COMP_1 BIT_ULL(32)
+#define HV_HYPERCALL_REP_COMP_MASK GENMASK_ULL(43, 32)
+#define HV_HYPERCALL_RSVD1_MASK GENMASK_ULL(47, 44)
+#define HV_HYPERCALL_REP_START_OFFSET 48
+#define HV_HYPERCALL_REP_START_MASK GENMASK_ULL(59, 48)
+#define HV_HYPERCALL_RSVD2_MASK GENMASK_ULL(63, 60)
+#define HV_HYPERCALL_RSVD_MASK (HV_HYPERCALL_RSVD0_MASK | \
+ HV_HYPERCALL_RSVD1_MASK | \
+ HV_HYPERCALL_RSVD2_MASK)
+
+/* HvFlushGuestPhysicalAddressSpace hypercalls */
+struct hv_guest_mapping_flush {
+ u64 address_space;
+ u64 flags;
+} __packed;
+
+/*
+ * HV_MAX_FLUSH_PAGES = "additional_pages" + 1. It's limited
+ * by the bitwidth of "additional_pages" in union hv_gpa_page_range.
+ */
+#define HV_MAX_FLUSH_PAGES (2048)
+#define HV_GPA_PAGE_RANGE_PAGE_SIZE_2MB 0
+#define HV_GPA_PAGE_RANGE_PAGE_SIZE_1GB 1
+
+#define HV_FLUSH_ALL_PROCESSORS BIT(0)
+#define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES BIT(1)
+#define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY BIT(2)
+#define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT BIT(3)
+
+/* HvFlushGuestPhysicalAddressList, HvExtCallMemoryHeatHint hypercall */
+union hv_gpa_page_range {
+ u64 address_space;
+ struct {
+ u64 additional_pages : 11;
+ u64 largepage : 1;
+ u64 basepfn : 52;
+ } page;
+ struct {
+ u64 reserved : 12;
+ u64 page_size : 1;
+ u64 reserved1 : 8;
+ u64 base_large_pfn : 43;
+ };
+};
+
+/*
+ * All input flush parameters should be in single page. The max flush
+ * count is equal with how many entries of union hv_gpa_page_range can
+ * be populated into the input parameter page.
+ */
+#define HV_MAX_FLUSH_REP_COUNT ((HV_HYP_PAGE_SIZE - 2 * sizeof(u64)) / \
+ sizeof(union hv_gpa_page_range))
+
+struct hv_guest_mapping_flush_list {
+ u64 address_space;
+ u64 flags;
+ union hv_gpa_page_range gpa_list[HV_MAX_FLUSH_REP_COUNT];
+};
+
+struct hv_tlb_flush { /* HV_INPUT_FLUSH_VIRTUAL_ADDRESS_LIST */
+ u64 address_space;
+ u64 flags;
+ u64 processor_mask;
+ u64 gva_list[];
+} __packed;
+
+/* HvFlushVirtualAddressSpaceEx, HvFlushVirtualAddressListEx hypercalls */
+struct hv_tlb_flush_ex {
+ u64 address_space;
+ u64 flags;
+ struct hv_vpset hv_vp_set;
+ u64 gva_list[];
+} __packed;
+
+struct ms_hyperv_tsc_page { /* HV_REFERENCE_TSC_PAGE */
+ volatile u32 tsc_sequence;
+ u32 reserved1;
+ volatile u64 tsc_scale;
+ volatile s64 tsc_offset;
+} __packed;
+
+/* Define the number of synthetic interrupt sources. */
+#define HV_SYNIC_SINT_COUNT (16)
+
+/* Define the expected SynIC version. */
+#define HV_SYNIC_VERSION_1 (0x1)
+/* Valid SynIC vectors are 16-255. */
+#define HV_SYNIC_FIRST_VALID_VECTOR (16)
+
+#define HV_SYNIC_CONTROL_ENABLE (1ULL << 0)
+#define HV_SYNIC_SIMP_ENABLE (1ULL << 0)
+#define HV_SYNIC_SIEFP_ENABLE (1ULL << 0)
+#define HV_SYNIC_SINT_MASKED (1ULL << 16)
+#define HV_SYNIC_SINT_AUTO_EOI (1ULL << 17)
+#define HV_SYNIC_SINT_VECTOR_MASK (0xFF)
+
+/* Hyper-V defined statically assigned SINTs */
+#define HV_SYNIC_INTERCEPTION_SINT_INDEX 0x00000000
+#define HV_SYNIC_IOMMU_FAULT_SINT_INDEX 0x00000001
+#define HV_SYNIC_VMBUS_SINT_INDEX 0x00000002
+#define HV_SYNIC_FIRST_UNUSED_SINT_INDEX 0x00000005
+
+/* mshv assigned SINT for doorbell */
+#define HV_SYNIC_DOORBELL_SINT_INDEX HV_SYNIC_FIRST_UNUSED_SINT_INDEX
+
+enum hv_interrupt_type {
+ HV_X64_INTERRUPT_TYPE_FIXED = 0x0000,
+ HV_X64_INTERRUPT_TYPE_LOWESTPRIORITY = 0x0001,
+ HV_X64_INTERRUPT_TYPE_SMI = 0x0002,
+ HV_X64_INTERRUPT_TYPE_REMOTEREAD = 0x0003,
+ HV_X64_INTERRUPT_TYPE_NMI = 0x0004,
+ HV_X64_INTERRUPT_TYPE_INIT = 0x0005,
+ HV_X64_INTERRUPT_TYPE_SIPI = 0x0006,
+ HV_X64_INTERRUPT_TYPE_EXTINT = 0x0007,
+ HV_X64_INTERRUPT_TYPE_LOCALINT0 = 0x0008,
+ HV_X64_INTERRUPT_TYPE_LOCALINT1 = 0x0009,
+ HV_X64_INTERRUPT_TYPE_MAXIMUM = 0x000A,
+};
+
+/* Define synthetic interrupt source. */
+union hv_synic_sint {
+ u64 as_uint64;
+ struct {
+ u64 vector : 8;
+ u64 reserved1 : 8;
+ u64 masked : 1;
+ u64 auto_eoi : 1;
+ u64 polling : 1;
+ u64 as_intercept : 1;
+ u64 proxy : 1;
+ u64 reserved2 : 43;
+ } __packed;
+};
+
+union hv_x64_xsave_xfem_register {
+ u64 as_uint64;
+ struct {
+ u32 low_uint32;
+ u32 high_uint32;
+ } __packed;
+ struct {
+ u64 legacy_x87 : 1;
+ u64 legacy_sse : 1;
+ u64 avx : 1;
+ u64 mpx_bndreg : 1;
+ u64 mpx_bndcsr : 1;
+ u64 avx_512_op_mask : 1;
+ u64 avx_512_zmmhi : 1;
+ u64 avx_512_zmm16_31 : 1;
+ u64 rsvd8_9 : 2;
+ u64 pasid : 1;
+ u64 cet_u : 1;
+ u64 cet_s : 1;
+ u64 rsvd13_16 : 4;
+ u64 xtile_cfg : 1;
+ u64 xtile_data : 1;
+ u64 rsvd19_63 : 45;
+ } __packed;
+};
+
+/* Synthetic timer configuration */
+union hv_stimer_config { /* HV_X64_MSR_STIMER_CONFIG_CONTENTS */
+ u64 as_uint64;
+ struct {
+ u64 enable : 1;
+ u64 periodic : 1;
+ u64 lazy : 1;
+ u64 auto_enable : 1;
+ u64 apic_vector : 8;
+ u64 direct_mode : 1;
+ u64 reserved_z0 : 3;
+ u64 sintx : 4;
+ u64 reserved_z1 : 44;
+ } __packed;
+};
+
+/* Define the number of synthetic timers */
+#define HV_SYNIC_STIMER_COUNT (4)
+
+/* Define port identifier type. */
+union hv_port_id {
+ u32 asu32;
+ struct {
+ u32 id : 24;
+ u32 reserved : 8;
+ } __packed u;
+};
+
+#define HV_MESSAGE_SIZE (256)
+#define HV_MESSAGE_PAYLOAD_BYTE_COUNT (240)
+#define HV_MESSAGE_PAYLOAD_QWORD_COUNT (30)
+
+/* Define hypervisor message types. */
+enum hv_message_type {
+ HVMSG_NONE = 0x00000000,
+
+ /* Memory access messages. */
+ HVMSG_UNMAPPED_GPA = 0x80000000,
+ HVMSG_GPA_INTERCEPT = 0x80000001,
+
+ /* Timer notification messages. */
+ HVMSG_TIMER_EXPIRED = 0x80000010,
+
+ /* Error messages. */
+ HVMSG_INVALID_VP_REGISTER_VALUE = 0x80000020,
+ HVMSG_UNRECOVERABLE_EXCEPTION = 0x80000021,
+ HVMSG_UNSUPPORTED_FEATURE = 0x80000022,
+
+ /*
+ * Opaque intercept message. The original intercept message is only
+ * accessible from the mapped intercept message page.
+ */
+ HVMSG_OPAQUE_INTERCEPT = 0x8000003F,
+
+ /* Trace buffer complete messages. */
+ HVMSG_EVENTLOG_BUFFERCOMPLETE = 0x80000040,
+
+ /* Hypercall intercept */
+ HVMSG_HYPERCALL_INTERCEPT = 0x80000050,
+
+ /* SynIC intercepts */
+ HVMSG_SYNIC_EVENT_INTERCEPT = 0x80000060,
+ HVMSG_SYNIC_SINT_INTERCEPT = 0x80000061,
+ HVMSG_SYNIC_SINT_DELIVERABLE = 0x80000062,
+
+ /* Async call completion intercept */
+ HVMSG_ASYNC_CALL_COMPLETION = 0x80000070,
+
+ /* Root scheduler messages */
+ HVMSG_SCHEDULER_VP_SIGNAL_BITSET = 0x80000100,
+ HVMSG_SCHEDULER_VP_SIGNAL_PAIR = 0x80000101,
+
+ /* Platform-specific processor intercept messages. */
+ HVMSG_X64_IO_PORT_INTERCEPT = 0x80010000,
+ HVMSG_X64_MSR_INTERCEPT = 0x80010001,
+ HVMSG_X64_CPUID_INTERCEPT = 0x80010002,
+ HVMSG_X64_EXCEPTION_INTERCEPT = 0x80010003,
+ HVMSG_X64_APIC_EOI = 0x80010004,
+ HVMSG_X64_LEGACY_FP_ERROR = 0x80010005,
+ HVMSG_X64_IOMMU_PRQ = 0x80010006,
+ HVMSG_X64_HALT = 0x80010007,
+ HVMSG_X64_INTERRUPTION_DELIVERABLE = 0x80010008,
+ HVMSG_X64_SIPI_INTERCEPT = 0x80010009,
+};
+
+/* Define the format of the SIMP register */
+union hv_synic_simp {
+ u64 as_uint64;
+ struct {
+ u64 simp_enabled : 1;
+ u64 preserved : 11;
+ u64 base_simp_gpa : 52;
+ } __packed;
+};
+
+union hv_message_flags {
+ u8 asu8;
+ struct {
+ u8 msg_pending : 1;
+ u8 reserved : 7;
+ } __packed;
+};
+
+struct hv_message_header {
+ u32 message_type;
+ u8 payload_size;
+ union hv_message_flags message_flags;
+ u8 reserved[2];
+ union {
+ u64 sender;
+ union hv_port_id port;
+ };
+} __packed;
+
+/*
+ * Message format for notifications delivered via
+ * intercept message(as_intercept=1)
+ */
+struct hv_notification_message_payload {
+ u32 sint_index;
+} __packed;
+
+struct hv_message {
+ struct hv_message_header header;
+ union {
+ u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
+ } u;
+} __packed;
+
+/* Define the synthetic interrupt message page layout. */
+struct hv_message_page {
+ struct hv_message sint_message[HV_SYNIC_SINT_COUNT];
+} __packed;
+
+/* Define timer message payload structure. */
+struct hv_timer_message_payload {
+ u32 timer_index;
+ u32 reserved;
+ u64 expiration_time; /* When the timer expired */
+ u64 delivery_time; /* When the message was delivered */
+} __packed;
+
+struct hv_x64_segment_register {
+ u64 base;
+ u32 limit;
+ u16 selector;
+ union {
+ struct {
+ u16 segment_type : 4;
+ u16 non_system_segment : 1;
+ u16 descriptor_privilege_level : 2;
+ u16 present : 1;
+ u16 reserved : 4;
+ u16 available : 1;
+ u16 _long : 1;
+ u16 _default : 1;
+ u16 granularity : 1;
+ } __packed;
+ u16 attributes;
+ };
+} __packed;
+
+struct hv_x64_table_register {
+ u16 pad[3];
+ u16 limit;
+ u64 base;
+} __packed;
+
+#define HV_NORMAL_VTL 0
+
+union hv_input_vtl {
+ u8 as_uint8;
+ struct {
+ u8 target_vtl : 4;
+ u8 use_target_vtl : 1;
+ u8 reserved_z : 3;
+ };
+} __packed;
+
+struct hv_init_vp_context {
+ u64 rip;
+ u64 rsp;
+ u64 rflags;
+
+ struct hv_x64_segment_register cs;
+ struct hv_x64_segment_register ds;
+ struct hv_x64_segment_register es;
+ struct hv_x64_segment_register fs;
+ struct hv_x64_segment_register gs;
+ struct hv_x64_segment_register ss;
+ struct hv_x64_segment_register tr;
+ struct hv_x64_segment_register ldtr;
+
+ struct hv_x64_table_register idtr;
+ struct hv_x64_table_register gdtr;
+
+ u64 efer;
+ u64 cr0;
+ u64 cr3;
+ u64 cr4;
+ u64 msr_cr_pat;
+} __packed;
+
+struct hv_enable_vp_vtl {
+ u64 partition_id;
+ u32 vp_index;
+ union hv_input_vtl target_vtl;
+ u8 mbz0;
+ u16 mbz1;
+ struct hv_init_vp_context vp_context;
+} __packed;
+
+struct hv_get_vp_from_apic_id_in {
+ u64 partition_id;
+ union hv_input_vtl target_vtl;
+ u8 res[7];
+ u32 apic_ids[];
+} __packed;
+
+union hv_register_vsm_partition_config {
+ u64 as_uint64;
+ struct {
+ u64 enable_vtl_protection : 1;
+ u64 default_vtl_protection_mask : 4;
+ u64 zero_memory_on_reset : 1;
+ u64 deny_lower_vtl_startup : 1;
+ u64 intercept_acceptance : 1;
+ u64 intercept_enable_vtl_protection : 1;
+ u64 intercept_vp_startup : 1;
+ u64 intercept_cpuid_unimplemented : 1;
+ u64 intercept_unrecoverable_exception : 1;
+ u64 intercept_page : 1;
+ u64 mbz : 51;
+ } __packed;
+};
+
+union hv_register_vsm_capabilities {
+ u64 as_uint64;
+ struct {
+ u64 dr6_shared: 1;
+ u64 mbec_vtl_mask: 16;
+ u64 deny_lower_vtl_startup: 1;
+ u64 supervisor_shadow_stack: 1;
+ u64 hardware_hvpt_available: 1;
+ u64 software_hvpt_available: 1;
+ u64 hardware_hvpt_range_bits: 6;
+ u64 intercept_page_available: 1;
+ u64 return_action_available: 1;
+ u64 reserved: 35;
+ } __packed;
+};
+
+union hv_register_vsm_page_offsets {
+ struct {
+ u64 vtl_call_offset : 12;
+ u64 vtl_return_offset : 12;
+ u64 reserved_mbz : 40;
+ } __packed;
+ u64 as_uint64;
+};
+
+struct hv_nested_enlightenments_control {
+ struct {
+ u32 directhypercall : 1;
+ u32 reserved : 31;
+ } __packed features;
+ struct {
+ u32 inter_partition_comm : 1;
+ u32 reserved : 31;
+ } __packed hypercall_controls;
+} __packed;
+
+/* Define virtual processor assist page structure. */
+struct hv_vp_assist_page {
+ u32 apic_assist;
+ u32 reserved1;
+ u32 vtl_entry_reason;
+ u32 vtl_reserved;
+ u64 vtl_ret_x64rax;
+ u64 vtl_ret_x64rcx;
+ struct hv_nested_enlightenments_control nested_control;
+ u8 enlighten_vmentry;
+ u8 reserved2[7];
+ u64 current_nested_vmcs;
+ u8 synthetic_time_unhalted_timer_expired;
+ u8 reserved3[7];
+ u8 virtualization_fault_information[40];
+ u8 reserved4[8];
+ u8 intercept_message[256];
+ u8 vtl_ret_actions[256];
+} __packed;
+
+enum hv_register_name {
+ /* Suspend Registers */
+ HV_REGISTER_EXPLICIT_SUSPEND = 0x00000000,
+ HV_REGISTER_INTERCEPT_SUSPEND = 0x00000001,
+ HV_REGISTER_DISPATCH_SUSPEND = 0x00000003,
+
+ /* Version - 128-bit result same as CPUID 0x40000002 */
+ HV_REGISTER_HYPERVISOR_VERSION = 0x00000100,
+
+ /* Feature Access (registers are 128 bits) - same as CPUID 0x40000003 - 0x4000000B */
+ HV_REGISTER_PRIVILEGES_AND_FEATURES_INFO = 0x00000200,
+ HV_REGISTER_FEATURES_INFO = 0x00000201,
+ HV_REGISTER_IMPLEMENTATION_LIMITS_INFO = 0x00000202,
+ HV_REGISTER_HARDWARE_FEATURES_INFO = 0x00000203,
+ HV_REGISTER_CPU_MANAGEMENT_FEATURES_INFO = 0x00000204,
+ HV_REGISTER_SVM_FEATURES_INFO = 0x00000205,
+ HV_REGISTER_SKIP_LEVEL_FEATURES_INFO = 0x00000206,
+ HV_REGISTER_NESTED_VIRT_FEATURES_INFO = 0x00000207,
+ HV_REGISTER_IPT_FEATURES_INFO = 0x00000208,
+
+ /* Guest Crash Registers */
+ HV_REGISTER_GUEST_CRASH_P0 = 0x00000210,
+ HV_REGISTER_GUEST_CRASH_P1 = 0x00000211,
+ HV_REGISTER_GUEST_CRASH_P2 = 0x00000212,
+ HV_REGISTER_GUEST_CRASH_P3 = 0x00000213,
+ HV_REGISTER_GUEST_CRASH_P4 = 0x00000214,
+ HV_REGISTER_GUEST_CRASH_CTL = 0x00000215,
+
+ /* Misc */
+ HV_REGISTER_VP_RUNTIME = 0x00090000,
+ HV_REGISTER_GUEST_OS_ID = 0x00090002,
+ HV_REGISTER_VP_INDEX = 0x00090003,
+ HV_REGISTER_TIME_REF_COUNT = 0x00090004,
+ HV_REGISTER_CPU_MANAGEMENT_VERSION = 0x00090007,
+ HV_REGISTER_VP_ASSIST_PAGE = 0x00090013,
+ HV_REGISTER_VP_ROOT_SIGNAL_COUNT = 0x00090014,
+ HV_REGISTER_REFERENCE_TSC = 0x00090017,
+
+ /* Hypervisor-defined Registers (Synic) */
+ HV_REGISTER_SINT0 = 0x000A0000,
+ HV_REGISTER_SINT1 = 0x000A0001,
+ HV_REGISTER_SINT2 = 0x000A0002,
+ HV_REGISTER_SINT3 = 0x000A0003,
+ HV_REGISTER_SINT4 = 0x000A0004,
+ HV_REGISTER_SINT5 = 0x000A0005,
+ HV_REGISTER_SINT6 = 0x000A0006,
+ HV_REGISTER_SINT7 = 0x000A0007,
+ HV_REGISTER_SINT8 = 0x000A0008,
+ HV_REGISTER_SINT9 = 0x000A0009,
+ HV_REGISTER_SINT10 = 0x000A000A,
+ HV_REGISTER_SINT11 = 0x000A000B,
+ HV_REGISTER_SINT12 = 0x000A000C,
+ HV_REGISTER_SINT13 = 0x000A000D,
+ HV_REGISTER_SINT14 = 0x000A000E,
+ HV_REGISTER_SINT15 = 0x000A000F,
+ HV_REGISTER_SCONTROL = 0x000A0010,
+ HV_REGISTER_SVERSION = 0x000A0011,
+ HV_REGISTER_SIEFP = 0x000A0012,
+ HV_REGISTER_SIMP = 0x000A0013,
+ HV_REGISTER_EOM = 0x000A0014,
+ HV_REGISTER_SIRBP = 0x000A0015,
+
+ HV_REGISTER_NESTED_SINT0 = 0x000A1000,
+ HV_REGISTER_NESTED_SINT1 = 0x000A1001,
+ HV_REGISTER_NESTED_SINT2 = 0x000A1002,
+ HV_REGISTER_NESTED_SINT3 = 0x000A1003,
+ HV_REGISTER_NESTED_SINT4 = 0x000A1004,
+ HV_REGISTER_NESTED_SINT5 = 0x000A1005,
+ HV_REGISTER_NESTED_SINT6 = 0x000A1006,
+ HV_REGISTER_NESTED_SINT7 = 0x000A1007,
+ HV_REGISTER_NESTED_SINT8 = 0x000A1008,
+ HV_REGISTER_NESTED_SINT9 = 0x000A1009,
+ HV_REGISTER_NESTED_SINT10 = 0x000A100A,
+ HV_REGISTER_NESTED_SINT11 = 0x000A100B,
+ HV_REGISTER_NESTED_SINT12 = 0x000A100C,
+ HV_REGISTER_NESTED_SINT13 = 0x000A100D,
+ HV_REGISTER_NESTED_SINT14 = 0x000A100E,
+ HV_REGISTER_NESTED_SINT15 = 0x000A100F,
+ HV_REGISTER_NESTED_SCONTROL = 0x000A1010,
+ HV_REGISTER_NESTED_SVERSION = 0x000A1011,
+ HV_REGISTER_NESTED_SIFP = 0x000A1012,
+ HV_REGISTER_NESTED_SIPP = 0x000A1013,
+ HV_REGISTER_NESTED_EOM = 0x000A1014,
+ HV_REGISTER_NESTED_SIRBP = 0x000a1015,
+
+ /* Hypervisor-defined Registers (Synthetic Timers) */
+ HV_REGISTER_STIMER0_CONFIG = 0x000B0000,
+ HV_REGISTER_STIMER0_COUNT = 0x000B0001,
+
+ /* VSM */
+ HV_REGISTER_VSM_VP_STATUS = 0x000D0003,
+
+ /* Synthetic VSM registers */
+ HV_REGISTER_VSM_CODE_PAGE_OFFSETS = 0x000D0002,
+ HV_REGISTER_VSM_CAPABILITIES = 0x000D0006,
+ HV_REGISTER_VSM_PARTITION_CONFIG = 0x000D0007,
+
+#if defined(CONFIG_X86)
+ /* X64 Debug Registers */
+ HV_X64_REGISTER_DR0 = 0x00050000,
+ HV_X64_REGISTER_DR1 = 0x00050001,
+ HV_X64_REGISTER_DR2 = 0x00050002,
+ HV_X64_REGISTER_DR3 = 0x00050003,
+ HV_X64_REGISTER_DR6 = 0x00050004,
+ HV_X64_REGISTER_DR7 = 0x00050005,
+
+ /* X64 Cache control MSRs */
+ HV_X64_REGISTER_MSR_MTRR_CAP = 0x0008000D,
+ HV_X64_REGISTER_MSR_MTRR_DEF_TYPE = 0x0008000E,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASE0 = 0x00080010,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASE1 = 0x00080011,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASE2 = 0x00080012,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASE3 = 0x00080013,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASE4 = 0x00080014,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASE5 = 0x00080015,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASE6 = 0x00080016,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASE7 = 0x00080017,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASE8 = 0x00080018,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASE9 = 0x00080019,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASEA = 0x0008001A,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASEB = 0x0008001B,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASEC = 0x0008001C,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASED = 0x0008001D,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASEE = 0x0008001E,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASEF = 0x0008001F,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASK0 = 0x00080040,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASK1 = 0x00080041,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASK2 = 0x00080042,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASK3 = 0x00080043,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASK4 = 0x00080044,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASK5 = 0x00080045,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASK6 = 0x00080046,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASK7 = 0x00080047,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASK8 = 0x00080048,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASK9 = 0x00080049,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASKA = 0x0008004A,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASKB = 0x0008004B,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASKC = 0x0008004C,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASKD = 0x0008004D,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASKE = 0x0008004E,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASKF = 0x0008004F,
+ HV_X64_REGISTER_MSR_MTRR_FIX64K00000 = 0x00080070,
+ HV_X64_REGISTER_MSR_MTRR_FIX16K80000 = 0x00080071,
+ HV_X64_REGISTER_MSR_MTRR_FIX16KA0000 = 0x00080072,
+ HV_X64_REGISTER_MSR_MTRR_FIX4KC0000 = 0x00080073,
+ HV_X64_REGISTER_MSR_MTRR_FIX4KC8000 = 0x00080074,
+ HV_X64_REGISTER_MSR_MTRR_FIX4KD0000 = 0x00080075,
+ HV_X64_REGISTER_MSR_MTRR_FIX4KD8000 = 0x00080076,
+ HV_X64_REGISTER_MSR_MTRR_FIX4KE0000 = 0x00080077,
+ HV_X64_REGISTER_MSR_MTRR_FIX4KE8000 = 0x00080078,
+ HV_X64_REGISTER_MSR_MTRR_FIX4KF0000 = 0x00080079,
+ HV_X64_REGISTER_MSR_MTRR_FIX4KF8000 = 0x0008007A,
+
+ HV_X64_REGISTER_REG_PAGE = 0x0009001C,
+#endif
+};
+
+/*
+ * Arch compatibility regs for use with hv_set/get_register
+ */
+#if defined(CONFIG_X86)
+
+/*
+ * To support arch-generic code calling hv_set/get_register:
+ * - On x86, HV_MSR_ indicates an MSR accessed via rdmsrq/wrmsrq
+ * - On ARM, HV_MSR_ indicates a VP register accessed via hypercall
+ */
+#define HV_MSR_CRASH_P0 (HV_X64_MSR_CRASH_P0)
+#define HV_MSR_CRASH_P1 (HV_X64_MSR_CRASH_P1)
+#define HV_MSR_CRASH_P2 (HV_X64_MSR_CRASH_P2)
+#define HV_MSR_CRASH_P3 (HV_X64_MSR_CRASH_P3)
+#define HV_MSR_CRASH_P4 (HV_X64_MSR_CRASH_P4)
+#define HV_MSR_CRASH_CTL (HV_X64_MSR_CRASH_CTL)
+
+#define HV_MSR_VP_INDEX (HV_X64_MSR_VP_INDEX)
+#define HV_MSR_TIME_REF_COUNT (HV_X64_MSR_TIME_REF_COUNT)
+#define HV_MSR_REFERENCE_TSC (HV_X64_MSR_REFERENCE_TSC)
+
+#define HV_MSR_SINT0 (HV_X64_MSR_SINT0)
+#define HV_MSR_SVERSION (HV_X64_MSR_SVERSION)
+#define HV_MSR_SCONTROL (HV_X64_MSR_SCONTROL)
+#define HV_MSR_SIEFP (HV_X64_MSR_SIEFP)
+#define HV_MSR_SIMP (HV_X64_MSR_SIMP)
+#define HV_MSR_EOM (HV_X64_MSR_EOM)
+#define HV_MSR_SIRBP (HV_X64_MSR_SIRBP)
+
+#define HV_MSR_NESTED_SCONTROL (HV_X64_MSR_NESTED_SCONTROL)
+#define HV_MSR_NESTED_SVERSION (HV_X64_MSR_NESTED_SVERSION)
+#define HV_MSR_NESTED_SIEFP (HV_X64_MSR_NESTED_SIEFP)
+#define HV_MSR_NESTED_SIMP (HV_X64_MSR_NESTED_SIMP)
+#define HV_MSR_NESTED_EOM (HV_X64_MSR_NESTED_EOM)
+#define HV_MSR_NESTED_SINT0 (HV_X64_MSR_NESTED_SINT0)
+
+#define HV_MSR_STIMER0_CONFIG (HV_X64_MSR_STIMER0_CONFIG)
+#define HV_MSR_STIMER0_COUNT (HV_X64_MSR_STIMER0_COUNT)
+
+#elif defined(CONFIG_ARM64) /* CONFIG_X86 */
+
+#define HV_MSR_CRASH_P0 (HV_REGISTER_GUEST_CRASH_P0)
+#define HV_MSR_CRASH_P1 (HV_REGISTER_GUEST_CRASH_P1)
+#define HV_MSR_CRASH_P2 (HV_REGISTER_GUEST_CRASH_P2)
+#define HV_MSR_CRASH_P3 (HV_REGISTER_GUEST_CRASH_P3)
+#define HV_MSR_CRASH_P4 (HV_REGISTER_GUEST_CRASH_P4)
+#define HV_MSR_CRASH_CTL (HV_REGISTER_GUEST_CRASH_CTL)
+
+#define HV_MSR_VP_INDEX (HV_REGISTER_VP_INDEX)
+#define HV_MSR_TIME_REF_COUNT (HV_REGISTER_TIME_REF_COUNT)
+#define HV_MSR_REFERENCE_TSC (HV_REGISTER_REFERENCE_TSC)
+
+#define HV_MSR_SINT0 (HV_REGISTER_SINT0)
+#define HV_MSR_SCONTROL (HV_REGISTER_SCONTROL)
+#define HV_MSR_SIEFP (HV_REGISTER_SIEFP)
+#define HV_MSR_SIMP (HV_REGISTER_SIMP)
+#define HV_MSR_EOM (HV_REGISTER_EOM)
+#define HV_MSR_SIRBP (HV_REGISTER_SIRBP)
+
+#define HV_MSR_STIMER0_CONFIG (HV_REGISTER_STIMER0_CONFIG)
+#define HV_MSR_STIMER0_COUNT (HV_REGISTER_STIMER0_COUNT)
+
+#endif /* CONFIG_ARM64 */
+
+union hv_explicit_suspend_register {
+ u64 as_uint64;
+ struct {
+ u64 suspended : 1;
+ u64 reserved : 63;
+ } __packed;
+};
+
+union hv_intercept_suspend_register {
+ u64 as_uint64;
+ struct {
+ u64 suspended : 1;
+ u64 reserved : 63;
+ } __packed;
+};
+
+union hv_dispatch_suspend_register {
+ u64 as_uint64;
+ struct {
+ u64 suspended : 1;
+ u64 reserved : 63;
+ } __packed;
+};
+
+union hv_arm64_pending_interruption_register {
+ u64 as_uint64;
+ struct {
+ u64 interruption_pending : 1;
+ u64 interruption_type: 1;
+ u64 reserved : 30;
+ u64 error_code : 32;
+ } __packed;
+};
+
+union hv_arm64_interrupt_state_register {
+ u64 as_uint64;
+ struct {
+ u64 interrupt_shadow : 1;
+ u64 reserved : 63;
+ } __packed;
+};
+
+union hv_arm64_pending_synthetic_exception_event {
+ u64 as_uint64[2];
+ struct {
+ u8 event_pending : 1;
+ u8 event_type : 3;
+ u8 reserved : 4;
+ u8 rsvd[3];
+ u32 exception_type;
+ u64 context;
+ } __packed;
+};
+
+union hv_x64_interrupt_state_register {
+ u64 as_uint64;
+ struct {
+ u64 interrupt_shadow : 1;
+ u64 nmi_masked : 1;
+ u64 reserved : 62;
+ } __packed;
+};
+
+union hv_x64_pending_interruption_register {
+ u64 as_uint64;
+ struct {
+ u32 interruption_pending : 1;
+ u32 interruption_type : 3;
+ u32 deliver_error_code : 1;
+ u32 instruction_length : 4;
+ u32 nested_event : 1;
+ u32 reserved : 6;
+ u32 interruption_vector : 16;
+ u32 error_code;
+ } __packed;
+};
+
+union hv_register_value {
+ struct hv_u128 reg128;
+ u64 reg64;
+ u32 reg32;
+ u16 reg16;
+ u8 reg8;
+
+ struct hv_x64_segment_register segment;
+ struct hv_x64_table_register table;
+ union hv_explicit_suspend_register explicit_suspend;
+ union hv_intercept_suspend_register intercept_suspend;
+ union hv_dispatch_suspend_register dispatch_suspend;
+#ifdef CONFIG_ARM64
+ union hv_arm64_interrupt_state_register interrupt_state;
+ union hv_arm64_pending_interruption_register pending_interruption;
+#endif
+#ifdef CONFIG_X86
+ union hv_x64_interrupt_state_register interrupt_state;
+ union hv_x64_pending_interruption_register pending_interruption;
+#endif
+ union hv_arm64_pending_synthetic_exception_event pending_synthetic_exception_event;
+};
+
+/* NOTE: Linux helper struct - NOT from Hyper-V code. */
+struct hv_output_get_vp_registers {
+ DECLARE_FLEX_ARRAY(union hv_register_value, values);
+};
+
+#if defined(CONFIG_ARM64)
+/* HvGetVpRegisters returns an array of these output elements */
+struct hv_get_vp_registers_output {
+ union {
+ struct {
+ u32 a;
+ u32 b;
+ u32 c;
+ u32 d;
+ } as32 __packed;
+ struct {
+ u64 low;
+ u64 high;
+ } as64 __packed;
+ };
+};
+
+#endif /* CONFIG_ARM64 */
+
+struct hv_register_assoc {
+ u32 name; /* enum hv_register_name */
+ u32 reserved1;
+ u64 reserved2;
+ union hv_register_value value;
+} __packed;
+
+struct hv_input_get_vp_registers {
+ u64 partition_id;
+ u32 vp_index;
+ union hv_input_vtl input_vtl;
+ u8 rsvd_z8;
+ u16 rsvd_z16;
+ u32 names[];
+} __packed;
+
+struct hv_input_set_vp_registers {
+ u64 partition_id;
+ u32 vp_index;
+ union hv_input_vtl input_vtl;
+ u8 rsvd_z8;
+ u16 rsvd_z16;
+ struct hv_register_assoc elements[];
+} __packed;
+
+#define HV_UNMAP_GPA_LARGE_PAGE 0x2
+
+/* HvCallSendSyntheticClusterIpi hypercall */
+struct hv_send_ipi { /* HV_INPUT_SEND_SYNTHETIC_CLUSTER_IPI */
+ u32 vector;
+ u32 reserved;
+ u64 cpu_mask;
+} __packed;
+
+#define HV_VTL_MASK GENMASK(3, 0)
+
+/* Hyper-V memory host visibility */
+enum hv_mem_host_visibility {
+ VMBUS_PAGE_NOT_VISIBLE = 0,
+ VMBUS_PAGE_VISIBLE_READ_ONLY = 1,
+ VMBUS_PAGE_VISIBLE_READ_WRITE = 3
+};
+
+/* HvCallModifySparseGpaPageHostVisibility hypercall */
+#define HV_MAX_MODIFY_GPA_REP_COUNT ((HV_HYP_PAGE_SIZE / sizeof(u64)) - 2)
+struct hv_gpa_range_for_visibility {
+ u64 partition_id;
+ u32 host_visibility : 2;
+ u32 reserved0 : 30;
+ u32 reserved1;
+ u64 gpa_page_list[HV_MAX_MODIFY_GPA_REP_COUNT];
+} __packed;
+
+#if defined(CONFIG_X86)
+union hv_msi_address_register { /* HV_MSI_ADDRESS */
+ u32 as_uint32;
+ struct {
+ u32 reserved1 : 2;
+ u32 destination_mode : 1;
+ u32 redirection_hint : 1;
+ u32 reserved2 : 8;
+ u32 destination_id : 8;
+ u32 msi_base : 12;
+ };
+} __packed;
+
+union hv_msi_data_register { /* HV_MSI_ENTRY.Data */
+ u32 as_uint32;
+ struct {
+ u32 vector : 8;
+ u32 delivery_mode : 3;
+ u32 reserved1 : 3;
+ u32 level_assert : 1;
+ u32 trigger_mode : 1;
+ u32 reserved2 : 16;
+ };
+} __packed;
+
+union hv_msi_entry { /* HV_MSI_ENTRY */
+
+ u64 as_uint64;
+ struct {
+ union hv_msi_address_register address;
+ union hv_msi_data_register data;
+ } __packed;
+};
+
+#elif defined(CONFIG_ARM64) /* CONFIG_X86 */
+
+union hv_msi_entry {
+ u64 as_uint64[2];
+ struct {
+ u64 address;
+ u32 data;
+ u32 reserved;
+ } __packed;
+};
+#endif /* CONFIG_ARM64 */
+
+union hv_ioapic_rte {
+ u64 as_uint64;
+
+ struct {
+ u32 vector : 8;
+ u32 delivery_mode : 3;
+ u32 destination_mode : 1;
+ u32 delivery_status : 1;
+ u32 interrupt_polarity : 1;
+ u32 remote_irr : 1;
+ u32 trigger_mode : 1;
+ u32 interrupt_mask : 1;
+ u32 reserved1 : 15;
+
+ u32 reserved2 : 24;
+ u32 destination_id : 8;
+ };
+
+ struct {
+ u32 low_uint32;
+ u32 high_uint32;
+ };
+} __packed;
+
+enum hv_interrupt_source { /* HV_INTERRUPT_SOURCE */
+ HV_INTERRUPT_SOURCE_MSI = 1, /* MSI and MSI-X */
+ HV_INTERRUPT_SOURCE_IOAPIC,
+};
+
+struct hv_interrupt_entry { /* HV_INTERRUPT_ENTRY */
+ u32 source;
+ u32 reserved1;
+ union {
+ union hv_msi_entry msi_entry;
+ union hv_ioapic_rte ioapic_rte;
+ };
+} __packed;
+
+#define HV_DEVICE_INTERRUPT_TARGET_MULTICAST 1
+#define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET 2
+
+struct hv_device_interrupt_target { /* HV_DEVICE_INTERRUPT_TARGET */
+ u32 vector;
+ u32 flags; /* HV_DEVICE_INTERRUPT_TARGET_* above */
+ union {
+ u64 vp_mask;
+ struct hv_vpset vp_set;
+ };
+} __packed;
+
+struct hv_retarget_device_interrupt { /* HV_INPUT_RETARGET_DEVICE_INTERRUPT */
+ u64 partition_id; /* use "self" */
+ u64 device_id;
+ struct hv_interrupt_entry int_entry;
+ u64 reserved2;
+ struct hv_device_interrupt_target int_target;
+} __packed __aligned(8);
+
+enum hv_intercept_type {
+#if defined(CONFIG_X86)
+ HV_INTERCEPT_TYPE_X64_IO_PORT = 0x00000000,
+ HV_INTERCEPT_TYPE_X64_MSR = 0x00000001,
+ HV_INTERCEPT_TYPE_X64_CPUID = 0x00000002,
+#endif
+ HV_INTERCEPT_TYPE_EXCEPTION = 0x00000003,
+ /* Used to be HV_INTERCEPT_TYPE_REGISTER */
+ HV_INTERCEPT_TYPE_RESERVED0 = 0x00000004,
+ HV_INTERCEPT_TYPE_MMIO = 0x00000005,
+#if defined(CONFIG_X86)
+ HV_INTERCEPT_TYPE_X64_GLOBAL_CPUID = 0x00000006,
+ HV_INTERCEPT_TYPE_X64_APIC_SMI = 0x00000007,
+#endif
+ HV_INTERCEPT_TYPE_HYPERCALL = 0x00000008,
+#if defined(CONFIG_X86)
+ HV_INTERCEPT_TYPE_X64_APIC_INIT_SIPI = 0x00000009,
+ HV_INTERCEPT_MC_UPDATE_PATCH_LEVEL_MSR_READ = 0x0000000A,
+ HV_INTERCEPT_TYPE_X64_APIC_WRITE = 0x0000000B,
+ HV_INTERCEPT_TYPE_X64_MSR_INDEX = 0x0000000C,
+#endif
+ HV_INTERCEPT_TYPE_MAX,
+ HV_INTERCEPT_TYPE_INVALID = 0xFFFFFFFF,
+};
+
+union hv_intercept_parameters {
+ /* HV_INTERCEPT_PARAMETERS is defined to be an 8-byte field. */
+ u64 as_uint64;
+#if defined(CONFIG_X86)
+ /* HV_INTERCEPT_TYPE_X64_IO_PORT */
+ u16 io_port;
+ /* HV_INTERCEPT_TYPE_X64_CPUID */
+ u32 cpuid_index;
+ /* HV_INTERCEPT_TYPE_X64_APIC_WRITE */
+ u32 apic_write_mask;
+ /* HV_INTERCEPT_TYPE_EXCEPTION */
+ u16 exception_vector;
+ /* HV_INTERCEPT_TYPE_X64_MSR_INDEX */
+ u32 msr_index;
+#endif
+ /* N.B. Other intercept types do not have any parameters. */
+};
+
+/* Data structures for HVCALL_MMIO_READ and HVCALL_MMIO_WRITE */
+#define HV_HYPERCALL_MMIO_MAX_DATA_LENGTH 64
+
+struct hv_mmio_read_input { /* HV_INPUT_MEMORY_MAPPED_IO_READ */
+ u64 gpa;
+ u32 size;
+ u32 reserved;
+} __packed;
+
+struct hv_mmio_read_output {
+ u8 data[HV_HYPERCALL_MMIO_MAX_DATA_LENGTH];
+} __packed;
+
+struct hv_mmio_write_input {
+ u64 gpa;
+ u32 size;
+ u32 reserved;
+ u8 data[HV_HYPERCALL_MMIO_MAX_DATA_LENGTH];
+} __packed;
+
+#endif /* _HV_HVGDK_MINI_H */
diff --git a/include/hyperv/hvhdk.h b/include/hyperv/hvhdk.h
new file mode 100644
index 000000000000..469186df7826
--- /dev/null
+++ b/include/hyperv/hvhdk.h
@@ -0,0 +1,899 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Type definitions for the Microsoft hypervisor.
+ */
+#ifndef _HV_HVHDK_H
+#define _HV_HVHDK_H
+
+#include <linux/build_bug.h>
+
+#include "hvhdk_mini.h"
+#include "hvgdk.h"
+
+/* Bits for dirty mask of hv_vp_register_page */
+#define HV_X64_REGISTER_CLASS_GENERAL 0
+#define HV_X64_REGISTER_CLASS_IP 1
+#define HV_X64_REGISTER_CLASS_XMM 2
+#define HV_X64_REGISTER_CLASS_SEGMENT 3
+#define HV_X64_REGISTER_CLASS_FLAGS 4
+
+#define HV_VP_REGISTER_PAGE_VERSION_1 1u
+
+#define HV_VP_REGISTER_PAGE_MAX_VECTOR_COUNT 7
+
+union hv_vp_register_page_interrupt_vectors {
+ u64 as_uint64;
+ struct {
+ u8 vector_count;
+ u8 vector[HV_VP_REGISTER_PAGE_MAX_VECTOR_COUNT];
+ } __packed;
+};
+
+struct hv_vp_register_page {
+ u16 version;
+ u8 isvalid;
+ u8 rsvdz;
+ u32 dirty;
+
+#if IS_ENABLED(CONFIG_X86)
+
+ union {
+ struct {
+ /* General purpose registers
+ * (HV_X64_REGISTER_CLASS_GENERAL)
+ */
+ union {
+ struct {
+ u64 rax;
+ u64 rcx;
+ u64 rdx;
+ u64 rbx;
+ u64 rsp;
+ u64 rbp;
+ u64 rsi;
+ u64 rdi;
+ u64 r8;
+ u64 r9;
+ u64 r10;
+ u64 r11;
+ u64 r12;
+ u64 r13;
+ u64 r14;
+ u64 r15;
+ } __packed;
+
+ u64 gp_registers[16];
+ };
+ /* Instruction pointer (HV_X64_REGISTER_CLASS_IP) */
+ u64 rip;
+ /* Flags (HV_X64_REGISTER_CLASS_FLAGS) */
+ u64 rflags;
+ } __packed;
+
+ u64 registers[18];
+ };
+ /* Volatile XMM registers (HV_X64_REGISTER_CLASS_XMM) */
+ union {
+ struct {
+ struct hv_u128 xmm0;
+ struct hv_u128 xmm1;
+ struct hv_u128 xmm2;
+ struct hv_u128 xmm3;
+ struct hv_u128 xmm4;
+ struct hv_u128 xmm5;
+ } __packed;
+
+ struct hv_u128 xmm_registers[6];
+ };
+ /* Segment registers (HV_X64_REGISTER_CLASS_SEGMENT) */
+ union {
+ struct {
+ struct hv_x64_segment_register es;
+ struct hv_x64_segment_register cs;
+ struct hv_x64_segment_register ss;
+ struct hv_x64_segment_register ds;
+ struct hv_x64_segment_register fs;
+ struct hv_x64_segment_register gs;
+ } __packed;
+
+ struct hv_x64_segment_register segment_registers[6];
+ };
+ /* Misc. control registers (cannot be set via this interface) */
+ u64 cr0;
+ u64 cr3;
+ u64 cr4;
+ u64 cr8;
+ u64 efer;
+ u64 dr7;
+ union hv_x64_pending_interruption_register pending_interruption;
+ union hv_x64_interrupt_state_register interrupt_state;
+ u64 instruction_emulation_hints;
+ u64 xfem;
+
+ /*
+ * Fields from this point are not included in the register page save chunk.
+ * The reserved field is intended to maintain alignment for unsaved fields.
+ */
+ u8 reserved1[0x100];
+
+ /*
+ * Interrupts injected as part of HvCallDispatchVp.
+ */
+ union hv_vp_register_page_interrupt_vectors interrupt_vectors;
+
+#elif IS_ENABLED(CONFIG_ARM64)
+ /* Not yet supported in ARM */
+#endif
+} __packed;
+
+#define HV_PARTITION_PROCESSOR_FEATURES_BANKS 2
+
+union hv_partition_processor_features {
+ u64 as_uint64[HV_PARTITION_PROCESSOR_FEATURES_BANKS];
+ struct {
+ u64 sse3_support : 1;
+ u64 lahf_sahf_support : 1;
+ u64 ssse3_support : 1;
+ u64 sse4_1_support : 1;
+ u64 sse4_2_support : 1;
+ u64 sse4a_support : 1;
+ u64 xop_support : 1;
+ u64 pop_cnt_support : 1;
+ u64 cmpxchg16b_support : 1;
+ u64 altmovcr8_support : 1;
+ u64 lzcnt_support : 1;
+ u64 mis_align_sse_support : 1;
+ u64 mmx_ext_support : 1;
+ u64 amd3dnow_support : 1;
+ u64 extended_amd3dnow_support : 1;
+ u64 page_1gb_support : 1;
+ u64 aes_support : 1;
+ u64 pclmulqdq_support : 1;
+ u64 pcid_support : 1;
+ u64 fma4_support : 1;
+ u64 f16c_support : 1;
+ u64 rd_rand_support : 1;
+ u64 rd_wr_fs_gs_support : 1;
+ u64 smep_support : 1;
+ u64 enhanced_fast_string_support : 1;
+ u64 bmi1_support : 1;
+ u64 bmi2_support : 1;
+ u64 hle_support_deprecated : 1;
+ u64 rtm_support_deprecated : 1;
+ u64 movbe_support : 1;
+ u64 npiep1_support : 1;
+ u64 dep_x87_fpu_save_support : 1;
+ u64 rd_seed_support : 1;
+ u64 adx_support : 1;
+ u64 intel_prefetch_support : 1;
+ u64 smap_support : 1;
+ u64 hle_support : 1;
+ u64 rtm_support : 1;
+ u64 rdtscp_support : 1;
+ u64 clflushopt_support : 1;
+ u64 clwb_support : 1;
+ u64 sha_support : 1;
+ u64 x87_pointers_saved_support : 1;
+ u64 invpcid_support : 1;
+ u64 ibrs_support : 1;
+ u64 stibp_support : 1;
+ u64 ibpb_support: 1;
+ u64 unrestricted_guest_support : 1;
+ u64 mdd_support : 1;
+ u64 fast_short_rep_mov_support : 1;
+ u64 l1dcache_flush_support : 1;
+ u64 rdcl_no_support : 1;
+ u64 ibrs_all_support : 1;
+ u64 skip_l1df_support : 1;
+ u64 ssb_no_support : 1;
+ u64 rsb_a_no_support : 1;
+ u64 virt_spec_ctrl_support : 1;
+ u64 rd_pid_support : 1;
+ u64 umip_support : 1;
+ u64 mbs_no_support : 1;
+ u64 mb_clear_support : 1;
+ u64 taa_no_support : 1;
+ u64 tsx_ctrl_support : 1;
+ /*
+ * N.B. The final processor feature bit in bank 0 is reserved to
+ * simplify potential downlevel backports.
+ */
+ u64 reserved_bank0 : 1;
+
+ /* N.B. Begin bank 1 processor features. */
+ u64 acount_mcount_support : 1;
+ u64 tsc_invariant_support : 1;
+ u64 cl_zero_support : 1;
+ u64 rdpru_support : 1;
+ u64 la57_support : 1;
+ u64 mbec_support : 1;
+ u64 nested_virt_support : 1;
+ u64 psfd_support : 1;
+ u64 cet_ss_support : 1;
+ u64 cet_ibt_support : 1;
+ u64 vmx_exception_inject_support : 1;
+ u64 enqcmd_support : 1;
+ u64 umwait_tpause_support : 1;
+ u64 movdiri_support : 1;
+ u64 movdir64b_support : 1;
+ u64 cldemote_support : 1;
+ u64 serialize_support : 1;
+ u64 tsc_deadline_tmr_support : 1;
+ u64 tsc_adjust_support : 1;
+ u64 fzlrep_movsb : 1;
+ u64 fsrep_stosb : 1;
+ u64 fsrep_cmpsb : 1;
+ u64 reserved_bank1 : 42;
+ } __packed;
+};
+
+union hv_partition_processor_xsave_features {
+ struct {
+ u64 xsave_support : 1;
+ u64 xsaveopt_support : 1;
+ u64 avx_support : 1;
+ u64 reserved1 : 61;
+ } __packed;
+ u64 as_uint64;
+};
+
+struct hv_partition_creation_properties {
+ union hv_partition_processor_features disabled_processor_features;
+ union hv_partition_processor_xsave_features
+ disabled_processor_xsave_features;
+} __packed;
+
+#define HV_PARTITION_SYNTHETIC_PROCESSOR_FEATURES_BANKS 1
+
+union hv_partition_synthetic_processor_features {
+ u64 as_uint64[HV_PARTITION_SYNTHETIC_PROCESSOR_FEATURES_BANKS];
+
+ struct {
+ u64 hypervisor_present : 1;
+ /* Support for HV#1: (CPUID leaves 0x40000000 - 0x40000006)*/
+ u64 hv1 : 1;
+ u64 access_vp_run_time_reg : 1; /* HV_X64_MSR_VP_RUNTIME */
+ u64 access_partition_reference_counter : 1; /* HV_X64_MSR_TIME_REF_COUNT */
+ u64 access_synic_regs : 1; /* SINT-related registers */
+ /*
+ * Access to HV_X64_MSR_STIMER0_CONFIG through
+ * HV_X64_MSR_STIMER3_COUNT.
+ */
+ u64 access_synthetic_timer_regs : 1;
+ u64 access_intr_ctrl_regs : 1; /* APIC MSRs and VP assist page*/
+ /* HV_X64_MSR_GUEST_OS_ID and HV_X64_MSR_HYPERCALL */
+ u64 access_hypercall_regs : 1;
+ u64 access_vp_index : 1;
+ u64 access_partition_reference_tsc : 1;
+ u64 access_guest_idle_reg : 1;
+ u64 access_frequency_regs : 1;
+ u64 reserved_z12 : 1;
+ u64 reserved_z13 : 1;
+ u64 reserved_z14 : 1;
+ u64 enable_extended_gva_ranges_for_flush_virtual_address_list : 1;
+ u64 reserved_z16 : 1;
+ u64 reserved_z17 : 1;
+ /* Use fast hypercall output. Corresponds to privilege. */
+ u64 fast_hypercall_output : 1;
+ u64 reserved_z19 : 1;
+ u64 start_virtual_processor : 1; /* Can start VPs */
+ u64 reserved_z21 : 1;
+ /* Synthetic timers in direct mode. */
+ u64 direct_synthetic_timers : 1;
+ u64 reserved_z23 : 1;
+ u64 extended_processor_masks : 1;
+
+ /* Enable various hypercalls */
+ u64 tb_flush_hypercalls : 1;
+ u64 synthetic_cluster_ipi : 1;
+ u64 notify_long_spin_wait : 1;
+ u64 query_numa_distance : 1;
+ u64 signal_events : 1;
+ u64 retarget_device_interrupt : 1;
+ u64 restore_time : 1;
+
+ /* EnlightenedVmcs nested enlightenment is supported. */
+ u64 enlightened_vmcs : 1;
+ u64 reserved : 31;
+ } __packed;
+};
+
+#define HV_MAKE_COMPATIBILITY_VERSION(major_, minor_) \
+ ((u32)((major_) << 8 | (minor_)))
+
+#define HV_COMPATIBILITY_21_H2 HV_MAKE_COMPATIBILITY_VERSION(0X6, 0X9)
+
+union hv_partition_isolation_properties {
+ u64 as_uint64;
+ struct {
+ u64 isolation_type: 5;
+ u64 isolation_host_type : 2;
+ u64 rsvd_z: 5;
+ u64 shared_gpa_boundary_page_number: 52;
+ } __packed;
+};
+
+/*
+ * Various isolation types supported by MSHV.
+ */
+#define HV_PARTITION_ISOLATION_TYPE_NONE 0
+#define HV_PARTITION_ISOLATION_TYPE_SNP 2
+#define HV_PARTITION_ISOLATION_TYPE_TDX 3
+
+/*
+ * Various host isolation types supported by MSHV.
+ */
+#define HV_PARTITION_ISOLATION_HOST_TYPE_NONE 0x0
+#define HV_PARTITION_ISOLATION_HOST_TYPE_HARDWARE 0x1
+#define HV_PARTITION_ISOLATION_HOST_TYPE_RESERVED 0x2
+
+/* Note: Exo partition is enabled by default */
+#define HV_PARTITION_CREATION_FLAG_GPA_SUPER_PAGES_ENABLED BIT(4)
+#define HV_PARTITION_CREATION_FLAG_EXO_PARTITION BIT(8)
+#define HV_PARTITION_CREATION_FLAG_LAPIC_ENABLED BIT(13)
+#define HV_PARTITION_CREATION_FLAG_INTERCEPT_MESSAGE_PAGE_ENABLED BIT(19)
+#define HV_PARTITION_CREATION_FLAG_X2APIC_CAPABLE BIT(22)
+
+struct hv_input_create_partition {
+ u64 flags;
+ struct hv_proximity_domain_info proximity_domain_info;
+ u32 compatibility_version;
+ u32 padding;
+ struct hv_partition_creation_properties partition_creation_properties;
+ union hv_partition_isolation_properties isolation_properties;
+} __packed;
+
+struct hv_output_create_partition {
+ u64 partition_id;
+} __packed;
+
+struct hv_input_initialize_partition {
+ u64 partition_id;
+} __packed;
+
+struct hv_input_finalize_partition {
+ u64 partition_id;
+} __packed;
+
+struct hv_input_delete_partition {
+ u64 partition_id;
+} __packed;
+
+struct hv_input_get_partition_property {
+ u64 partition_id;
+ u32 property_code; /* enum hv_partition_property_code */
+ u32 padding;
+} __packed;
+
+struct hv_output_get_partition_property {
+ u64 property_value;
+} __packed;
+
+struct hv_input_set_partition_property {
+ u64 partition_id;
+ u32 property_code; /* enum hv_partition_property_code */
+ u32 padding;
+ u64 property_value;
+} __packed;
+
+union hv_partition_property_arg {
+ u64 as_uint64;
+ struct {
+ union {
+ u32 arg;
+ u32 vp_index;
+ };
+ u16 reserved0;
+ u8 reserved1;
+ u8 object_type;
+ } __packed;
+};
+
+struct hv_input_get_partition_property_ex {
+ u64 partition_id;
+ u32 property_code; /* enum hv_partition_property_code */
+ u32 padding;
+ union {
+ union hv_partition_property_arg arg_data;
+ u64 arg;
+ };
+} __packed;
+
+/*
+ * NOTE: Should use hv_input_set_partition_property_ex_header to compute this
+ * size, but hv_input_get_partition_property_ex is identical so it suffices
+ */
+#define HV_PARTITION_PROPERTY_EX_MAX_VAR_SIZE \
+ (HV_HYP_PAGE_SIZE - sizeof(struct hv_input_get_partition_property_ex))
+
+union hv_partition_property_ex {
+ u8 buffer[HV_PARTITION_PROPERTY_EX_MAX_VAR_SIZE];
+ struct hv_partition_property_vmm_capabilities vmm_capabilities;
+ /* More fields to be filled in when needed */
+};
+
+struct hv_output_get_partition_property_ex {
+ union hv_partition_property_ex property_value;
+} __packed;
+
+enum hv_vp_state_page_type {
+ HV_VP_STATE_PAGE_REGISTERS = 0,
+ HV_VP_STATE_PAGE_INTERCEPT_MESSAGE = 1,
+ HV_VP_STATE_PAGE_GHCB = 2,
+ HV_VP_STATE_PAGE_COUNT
+};
+
+struct hv_input_map_vp_state_page {
+ u64 partition_id;
+ u32 vp_index;
+ u16 type; /* enum hv_vp_state_page_type */
+ union hv_input_vtl input_vtl;
+ union {
+ u8 as_uint8;
+ struct {
+ u8 map_location_provided : 1;
+ u8 reserved : 7;
+ };
+ } flags;
+ u64 requested_map_location;
+} __packed;
+
+struct hv_output_map_vp_state_page {
+ u64 map_location; /* GPA page number */
+} __packed;
+
+struct hv_input_unmap_vp_state_page {
+ u64 partition_id;
+ u32 vp_index;
+ u16 type; /* enum hv_vp_state_page_type */
+ union hv_input_vtl input_vtl;
+ u8 reserved0;
+} __packed;
+
+struct hv_x64_apic_eoi_message {
+ u32 vp_index;
+ u32 interrupt_vector;
+} __packed;
+
+struct hv_opaque_intercept_message {
+ u32 vp_index;
+} __packed;
+
+enum hv_port_type {
+ HV_PORT_TYPE_MESSAGE = 1,
+ HV_PORT_TYPE_EVENT = 2,
+ HV_PORT_TYPE_MONITOR = 3,
+ HV_PORT_TYPE_DOORBELL = 4 /* Root Partition only */
+};
+
+struct hv_port_info {
+ u32 port_type; /* enum hv_port_type */
+ u32 padding;
+ union {
+ struct {
+ u32 target_sint;
+ u32 target_vp;
+ u64 rsvdz;
+ } message_port_info;
+ struct {
+ u32 target_sint;
+ u32 target_vp;
+ u16 base_flag_number;
+ u16 flag_count;
+ u32 rsvdz;
+ } event_port_info;
+ struct {
+ u64 monitor_address;
+ u64 rsvdz;
+ } monitor_port_info;
+ struct {
+ u32 target_sint;
+ u32 target_vp;
+ u64 rsvdz;
+ } doorbell_port_info;
+ };
+} __packed;
+
+struct hv_connection_info {
+ u32 port_type;
+ u32 padding;
+ union {
+ struct {
+ u64 rsvdz;
+ } message_connection_info;
+ struct {
+ u64 rsvdz;
+ } event_connection_info;
+ struct {
+ u64 monitor_address;
+ } monitor_connection_info;
+ struct {
+ u64 gpa;
+ u64 trigger_value;
+ u64 flags;
+ } doorbell_connection_info;
+ };
+} __packed;
+
+/* Define synthetic interrupt controller flag constants. */
+#define HV_EVENT_FLAGS_COUNT (256 * 8)
+#define HV_EVENT_FLAGS_BYTE_COUNT (256)
+#define HV_EVENT_FLAGS32_COUNT (256 / sizeof(u32))
+
+/* linux side we create long version of flags to use long bit ops on flags */
+#define HV_EVENT_FLAGS_UL_COUNT (256 / sizeof(ulong))
+
+/* Define the synthetic interrupt controller event flags format. */
+union hv_synic_event_flags {
+ unsigned char flags8[HV_EVENT_FLAGS_BYTE_COUNT];
+ u32 flags32[HV_EVENT_FLAGS32_COUNT];
+ ulong flags[HV_EVENT_FLAGS_UL_COUNT]; /* linux only */
+};
+
+struct hv_synic_event_flags_page {
+ volatile union hv_synic_event_flags event_flags[HV_SYNIC_SINT_COUNT];
+};
+
+#define HV_SYNIC_EVENT_RING_MESSAGE_COUNT 63
+
+struct hv_synic_event_ring {
+ u8 signal_masked;
+ u8 ring_full;
+ u16 reserved_z;
+ u32 data[HV_SYNIC_EVENT_RING_MESSAGE_COUNT];
+} __packed;
+
+struct hv_synic_event_ring_page {
+ struct hv_synic_event_ring sint_event_ring[HV_SYNIC_SINT_COUNT];
+};
+
+/* Define SynIC control register. */
+union hv_synic_scontrol {
+ u64 as_uint64;
+ struct {
+ u64 enable : 1;
+ u64 reserved : 63;
+ } __packed;
+};
+
+/* Define the format of the SIEFP register */
+union hv_synic_siefp {
+ u64 as_uint64;
+ struct {
+ u64 siefp_enabled : 1;
+ u64 preserved : 11;
+ u64 base_siefp_gpa : 52;
+ } __packed;
+};
+
+union hv_synic_sirbp {
+ u64 as_uint64;
+ struct {
+ u64 sirbp_enabled : 1;
+ u64 preserved : 11;
+ u64 base_sirbp_gpa : 52;
+ } __packed;
+};
+
+union hv_interrupt_control {
+ u64 as_uint64;
+ struct {
+ u32 interrupt_type; /* enum hv_interrupt_type */
+#if IS_ENABLED(CONFIG_X86)
+ u32 level_triggered : 1;
+ u32 logical_dest_mode : 1;
+ u32 rsvd : 30;
+#elif IS_ENABLED(CONFIG_ARM64)
+ u32 rsvd1 : 2;
+ u32 asserted : 1;
+ u32 rsvd2 : 29;
+#endif
+ } __packed;
+};
+
+struct hv_stimer_state {
+ struct {
+ u32 undelivered_msg_pending : 1;
+ u32 reserved : 31;
+ } __packed flags;
+ u32 resvd;
+ u64 config;
+ u64 count;
+ u64 adjustment;
+ u64 undelivered_exp_time;
+} __packed;
+
+struct hv_synthetic_timers_state {
+ struct hv_stimer_state timers[HV_SYNIC_STIMER_COUNT];
+ u64 reserved[5];
+} __packed;
+
+struct hv_async_completion_message_payload {
+ u64 partition_id;
+ u32 status;
+ u32 completion_count;
+ u64 sub_status;
+} __packed;
+
+union hv_input_delete_vp {
+ u64 as_uint64[2];
+ struct {
+ u64 partition_id;
+ u32 vp_index;
+ u8 reserved[4];
+ } __packed;
+} __packed;
+
+struct hv_input_assert_virtual_interrupt {
+ u64 partition_id;
+ union hv_interrupt_control control;
+ u64 dest_addr; /* cpu's apic id */
+ u32 vector;
+ u8 target_vtl;
+ u8 rsvd_z0;
+ u16 rsvd_z1;
+} __packed;
+
+struct hv_input_create_port {
+ u64 port_partition_id;
+ union hv_port_id port_id;
+ u8 port_vtl;
+ u8 min_connection_vtl;
+ u16 padding;
+ u64 connection_partition_id;
+ struct hv_port_info port_info;
+ struct hv_proximity_domain_info proximity_domain_info;
+} __packed;
+
+union hv_input_delete_port {
+ u64 as_uint64[2];
+ struct {
+ u64 port_partition_id;
+ union hv_port_id port_id;
+ u32 reserved;
+ };
+} __packed;
+
+struct hv_input_connect_port {
+ u64 connection_partition_id;
+ union hv_connection_id connection_id;
+ u8 connection_vtl;
+ u8 rsvdz0;
+ u16 rsvdz1;
+ u64 port_partition_id;
+ union hv_port_id port_id;
+ u32 reserved2;
+ struct hv_connection_info connection_info;
+ struct hv_proximity_domain_info proximity_domain_info;
+} __packed;
+
+union hv_input_disconnect_port {
+ u64 as_uint64[2];
+ struct {
+ u64 connection_partition_id;
+ union hv_connection_id connection_id;
+ u32 is_doorbell: 1;
+ u32 reserved: 31;
+ } __packed;
+} __packed;
+
+union hv_input_notify_port_ring_empty {
+ u64 as_uint64;
+ struct {
+ u32 sint_index;
+ u32 reserved;
+ };
+} __packed;
+
+struct hv_vp_state_data_xsave {
+ u64 flags;
+ union hv_x64_xsave_xfem_register states;
+} __packed;
+
+/*
+ * For getting and setting VP state, there are two options based on the state type:
+ *
+ * 1.) Data that is accessed by PFNs in the input hypercall page. This is used
+ * for state which may not fit into the hypercall pages.
+ * 2.) Data that is accessed directly in the input\output hypercall pages.
+ * This is used for state that will always fit into the hypercall pages.
+ *
+ * In the future this could be dynamic based on the size if needed.
+ *
+ * Note these hypercalls have an 8-byte aligned variable header size as per the tlfs
+ */
+
+#define HV_GET_SET_VP_STATE_TYPE_PFN BIT(31)
+
+enum hv_get_set_vp_state_type {
+ /* HvGetSetVpStateLocalInterruptControllerState - APIC/GIC state */
+ HV_GET_SET_VP_STATE_LAPIC_STATE = 0 | HV_GET_SET_VP_STATE_TYPE_PFN,
+ HV_GET_SET_VP_STATE_XSAVE = 1 | HV_GET_SET_VP_STATE_TYPE_PFN,
+ HV_GET_SET_VP_STATE_SIM_PAGE = 2 | HV_GET_SET_VP_STATE_TYPE_PFN,
+ HV_GET_SET_VP_STATE_SIEF_PAGE = 3 | HV_GET_SET_VP_STATE_TYPE_PFN,
+ HV_GET_SET_VP_STATE_SYNTHETIC_TIMERS = 4,
+};
+
+struct hv_vp_state_data {
+ u32 type;
+ u32 rsvd;
+ struct hv_vp_state_data_xsave xsave;
+} __packed;
+
+struct hv_input_get_vp_state {
+ u64 partition_id;
+ u32 vp_index;
+ u8 input_vtl;
+ u8 rsvd0;
+ u16 rsvd1;
+ struct hv_vp_state_data state_data;
+ u64 output_data_pfns[];
+} __packed;
+
+union hv_output_get_vp_state {
+ struct hv_synthetic_timers_state synthetic_timers_state;
+} __packed;
+
+union hv_input_set_vp_state_data {
+ u64 pfns;
+ u8 bytes;
+} __packed;
+
+struct hv_input_set_vp_state {
+ u64 partition_id;
+ u32 vp_index;
+ u8 input_vtl;
+ u8 rsvd0;
+ u16 rsvd1;
+ struct hv_vp_state_data state_data;
+ union hv_input_set_vp_state_data data[];
+} __packed;
+
+union hv_x64_vp_execution_state {
+ u16 as_uint16;
+ struct {
+ u16 cpl:2;
+ u16 cr0_pe:1;
+ u16 cr0_am:1;
+ u16 efer_lma:1;
+ u16 debug_active:1;
+ u16 interruption_pending:1;
+ u16 vtl:4;
+ u16 enclave_mode:1;
+ u16 interrupt_shadow:1;
+ u16 virtualization_fault_active:1;
+ u16 reserved:2;
+ } __packed;
+};
+
+struct hv_x64_intercept_message_header {
+ u32 vp_index;
+ u8 instruction_length:4;
+ u8 cr8:4; /* Only set for exo partitions */
+ u8 intercept_access_type;
+ union hv_x64_vp_execution_state execution_state;
+ struct hv_x64_segment_register cs_segment;
+ u64 rip;
+ u64 rflags;
+} __packed;
+
+union hv_x64_memory_access_info {
+ u8 as_uint8;
+ struct {
+ u8 gva_valid:1;
+ u8 gva_gpa_valid:1;
+ u8 hypercall_output_pending:1;
+ u8 tlb_locked_no_overlay:1;
+ u8 reserved:4;
+ } __packed;
+};
+
+struct hv_x64_memory_intercept_message {
+ struct hv_x64_intercept_message_header header;
+ u32 cache_type; /* enum hv_cache_type */
+ u8 instruction_byte_count;
+ union hv_x64_memory_access_info memory_access_info;
+ u8 tpr_priority;
+ u8 reserved1;
+ u64 guest_virtual_address;
+ u64 guest_physical_address;
+ u8 instruction_bytes[16];
+} __packed;
+
+/*
+ * Dispatch state for the VP communicated by the hypervisor to the
+ * VP-dispatching thread in the root on return from HVCALL_DISPATCH_VP.
+ */
+enum hv_vp_dispatch_state {
+ HV_VP_DISPATCH_STATE_INVALID = 0,
+ HV_VP_DISPATCH_STATE_BLOCKED = 1,
+ HV_VP_DISPATCH_STATE_READY = 2,
+};
+
+/*
+ * Dispatch event that caused the current dispatch state on return from
+ * HVCALL_DISPATCH_VP.
+ */
+enum hv_vp_dispatch_event {
+ HV_VP_DISPATCH_EVENT_INVALID = 0x00000000,
+ HV_VP_DISPATCH_EVENT_SUSPEND = 0x00000001,
+ HV_VP_DISPATCH_EVENT_INTERCEPT = 0x00000002,
+};
+
+#define HV_ROOT_SCHEDULER_MAX_VPS_PER_CHILD_PARTITION 1024
+/* The maximum array size of HV_GENERIC_SET (vp_set) buffer */
+#define HV_GENERIC_SET_QWORD_COUNT(max) (((((max) - 1) >> 6) + 1) + 2)
+
+struct hv_vp_signal_bitset_scheduler_message {
+ u64 partition_id;
+ u32 overflow_count;
+ u16 vp_count;
+ u16 reserved;
+
+#define BITSET_BUFFER_SIZE \
+ HV_GENERIC_SET_QWORD_COUNT(HV_ROOT_SCHEDULER_MAX_VPS_PER_CHILD_PARTITION)
+ union {
+ struct hv_vpset bitset;
+ u64 bitset_buffer[BITSET_BUFFER_SIZE];
+ } vp_bitset;
+#undef BITSET_BUFFER_SIZE
+} __packed;
+
+static_assert(sizeof(struct hv_vp_signal_bitset_scheduler_message) <=
+ (sizeof(struct hv_message) - sizeof(struct hv_message_header)));
+
+#define HV_MESSAGE_MAX_PARTITION_VP_PAIR_COUNT \
+ (((sizeof(struct hv_message) - sizeof(struct hv_message_header)) / \
+ (sizeof(u64 /* partition id */) + sizeof(u32 /* vp index */))) - 1)
+
+struct hv_vp_signal_pair_scheduler_message {
+ u32 overflow_count;
+ u8 vp_count;
+ u8 reserved1[3];
+
+ u64 partition_ids[HV_MESSAGE_MAX_PARTITION_VP_PAIR_COUNT];
+ u32 vp_indexes[HV_MESSAGE_MAX_PARTITION_VP_PAIR_COUNT];
+
+ u8 reserved2[4];
+} __packed;
+
+static_assert(sizeof(struct hv_vp_signal_pair_scheduler_message) ==
+ (sizeof(struct hv_message) - sizeof(struct hv_message_header)));
+
+/* Input and output structures for HVCALL_DISPATCH_VP */
+#define HV_DISPATCH_VP_FLAG_CLEAR_INTERCEPT_SUSPEND 0x1
+#define HV_DISPATCH_VP_FLAG_ENABLE_CALLER_INTERRUPTS 0x2
+#define HV_DISPATCH_VP_FLAG_SET_CALLER_SPEC_CTRL 0x4
+#define HV_DISPATCH_VP_FLAG_SKIP_VP_SPEC_FLUSH 0x8
+#define HV_DISPATCH_VP_FLAG_SKIP_CALLER_SPEC_FLUSH 0x10
+#define HV_DISPATCH_VP_FLAG_SKIP_CALLER_USER_SPEC_FLUSH 0x20
+#define HV_DISPATCH_VP_FLAG_SCAN_INTERRUPT_INJECTION 0x40
+
+struct hv_input_dispatch_vp {
+ u64 partition_id;
+ u32 vp_index;
+ u32 flags;
+ u64 time_slice; /* in 100ns */
+ u64 spec_ctrl;
+} __packed;
+
+struct hv_output_dispatch_vp {
+ u32 dispatch_state; /* enum hv_vp_dispatch_state */
+ u32 dispatch_event; /* enum hv_vp_dispatch_event */
+} __packed;
+
+struct hv_input_modify_sparse_spa_page_host_access {
+ u32 host_access : 2;
+ u32 reserved : 30;
+ u32 flags;
+ u64 partition_id;
+ u64 spa_page_list[];
+} __packed;
+
+/* hv_input_modify_sparse_spa_page_host_access flags */
+#define HV_MODIFY_SPA_PAGE_HOST_ACCESS_MAKE_EXCLUSIVE 0x1
+#define HV_MODIFY_SPA_PAGE_HOST_ACCESS_MAKE_SHARED 0x2
+#define HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE 0x4
+#define HV_MODIFY_SPA_PAGE_HOST_ACCESS_HUGE_PAGE 0x8
+
+#endif /* _HV_HVHDK_H */
diff --git a/include/hyperv/hvhdk_mini.h b/include/hyperv/hvhdk_mini.h
new file mode 100644
index 000000000000..41a29bf8ec14
--- /dev/null
+++ b/include/hyperv/hvhdk_mini.h
@@ -0,0 +1,531 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Type definitions for the Microsoft Hypervisor.
+ */
+#ifndef _HV_HVHDK_MINI_H
+#define _HV_HVHDK_MINI_H
+
+#include "hvgdk_mini.h"
+
+/*
+ * Doorbell connection_info flags.
+ */
+#define HV_DOORBELL_FLAG_TRIGGER_SIZE_MASK 0x00000007
+#define HV_DOORBELL_FLAG_TRIGGER_SIZE_ANY 0x00000000
+#define HV_DOORBELL_FLAG_TRIGGER_SIZE_BYTE 0x00000001
+#define HV_DOORBELL_FLAG_TRIGGER_SIZE_WORD 0x00000002
+#define HV_DOORBELL_FLAG_TRIGGER_SIZE_DWORD 0x00000003
+#define HV_DOORBELL_FLAG_TRIGGER_SIZE_QWORD 0x00000004
+#define HV_DOORBELL_FLAG_TRIGGER_ANY_VALUE 0x80000000
+
+/* Each generic set contains 64 elements */
+#define HV_GENERIC_SET_SHIFT (6)
+#define HV_GENERIC_SET_MASK (63)
+
+enum hv_generic_set_format {
+ HV_GENERIC_SET_SPARSE_4K,
+ HV_GENERIC_SET_ALL,
+};
+#define HV_GENERIC_SET_FORMAT hv_generic_set_format
+
+enum hv_scheduler_type {
+ HV_SCHEDULER_TYPE_LP = 1, /* Classic scheduler w/o SMT */
+ HV_SCHEDULER_TYPE_LP_SMT = 2, /* Classic scheduler w/ SMT */
+ HV_SCHEDULER_TYPE_CORE_SMT = 3, /* Core scheduler */
+ HV_SCHEDULER_TYPE_ROOT = 4, /* Root / integrated scheduler */
+ HV_SCHEDULER_TYPE_MAX
+};
+
+/* HV_STATS_AREA_TYPE */
+enum hv_stats_area_type {
+ HV_STATS_AREA_SELF = 0,
+ HV_STATS_AREA_PARENT = 1,
+ HV_STATS_AREA_INTERNAL = 2,
+ HV_STATS_AREA_COUNT
+};
+
+enum hv_stats_object_type {
+ HV_STATS_OBJECT_HYPERVISOR = 0x00000001,
+ HV_STATS_OBJECT_LOGICAL_PROCESSOR = 0x00000002,
+ HV_STATS_OBJECT_PARTITION = 0x00010001,
+ HV_STATS_OBJECT_VP = 0x00010002
+};
+
+union hv_stats_object_identity {
+ /* hv_stats_hypervisor */
+ struct {
+ u8 reserved[15];
+ u8 stats_area_type;
+ } __packed hv;
+
+ /* hv_stats_logical_processor */
+ struct {
+ u32 lp_index;
+ u8 reserved[11];
+ u8 stats_area_type;
+ } __packed lp;
+
+ /* hv_stats_partition */
+ struct {
+ u64 partition_id;
+ u8 reserved[7];
+ u8 stats_area_type;
+ } __packed partition;
+
+ /* hv_stats_vp */
+ struct {
+ u64 partition_id;
+ u32 vp_index;
+ u16 flags;
+ u8 reserved;
+ u8 stats_area_type;
+ } __packed vp;
+};
+
+enum hv_partition_property_code {
+ /* Privilege properties */
+ HV_PARTITION_PROPERTY_PRIVILEGE_FLAGS = 0x00010000,
+ HV_PARTITION_PROPERTY_SYNTHETIC_PROC_FEATURES = 0x00010001,
+
+ /* Resource properties */
+ HV_PARTITION_PROPERTY_GPA_PAGE_ACCESS_TRACKING = 0x00050005,
+ HV_PARTITION_PROPERTY_UNIMPLEMENTED_MSR_ACTION = 0x00050017,
+
+ /* Compatibility properties */
+ HV_PARTITION_PROPERTY_PROCESSOR_XSAVE_FEATURES = 0x00060002,
+ HV_PARTITION_PROPERTY_XSAVE_STATES = 0x00060007,
+ HV_PARTITION_PROPERTY_MAX_XSAVE_DATA_SIZE = 0x00060008,
+ HV_PARTITION_PROPERTY_PROCESSOR_CLOCK_FREQUENCY = 0x00060009,
+
+ /* Extended properties with larger property values */
+ HV_PARTITION_PROPERTY_VMM_CAPABILITIES = 0x00090007,
+};
+
+#define HV_PARTITION_VMM_CAPABILITIES_BANK_COUNT 1
+#define HV_PARTITION_VMM_CAPABILITIES_RESERVED_BITFIELD_COUNT 59
+
+struct hv_partition_property_vmm_capabilities {
+ u16 bank_count;
+ u16 reserved[3];
+ union {
+ u64 as_uint64[HV_PARTITION_VMM_CAPABILITIES_BANK_COUNT];
+ struct {
+ u64 map_gpa_preserve_adjustable: 1;
+ u64 vmm_can_provide_overlay_gpfn: 1;
+ u64 vp_affinity_property: 1;
+#if IS_ENABLED(CONFIG_ARM64)
+ u64 vmm_can_provide_gic_overlay_locations: 1;
+#else
+ u64 reservedbit3: 1;
+#endif
+ u64 assignable_synthetic_proc_features: 1;
+ u64 reserved0: HV_PARTITION_VMM_CAPABILITIES_RESERVED_BITFIELD_COUNT;
+ } __packed;
+ };
+} __packed;
+
+enum hv_snp_status {
+ HV_SNP_STATUS_NONE = 0,
+ HV_SNP_STATUS_AVAILABLE = 1,
+ HV_SNP_STATUS_INCOMPATIBLE = 2,
+ HV_SNP_STATUS_PSP_UNAVAILABLE = 3,
+ HV_SNP_STATUS_PSP_INIT_FAILED = 4,
+ HV_SNP_STATUS_PSP_BAD_FW_VERSION = 5,
+ HV_SNP_STATUS_BAD_CONFIGURATION = 6,
+ HV_SNP_STATUS_PSP_FW_UPDATE_IN_PROGRESS = 7,
+ HV_SNP_STATUS_PSP_RB_INIT_FAILED = 8,
+ HV_SNP_STATUS_PSP_PLATFORM_STATUS_FAILED = 9,
+ HV_SNP_STATUS_PSP_INIT_LATE_FAILED = 10,
+};
+
+enum hv_system_property {
+ /* Add more values when needed */
+ HV_SYSTEM_PROPERTY_SLEEP_STATE = 3,
+ HV_SYSTEM_PROPERTY_SCHEDULER_TYPE = 15,
+ HV_DYNAMIC_PROCESSOR_FEATURE_PROPERTY = 21,
+ HV_SYSTEM_PROPERTY_CRASHDUMPAREA = 47,
+};
+
+#define HV_PFN_RANGE_PGBITS 24 /* HV_SPA_PAGE_RANGE_ADDITIONAL_PAGES_BITS */
+union hv_pfn_range { /* HV_SPA_PAGE_RANGE */
+ u64 as_uint64;
+ struct {
+ /* 39:0: base pfn. 63:40: additional pages */
+ u64 base_pfn : 64 - HV_PFN_RANGE_PGBITS;
+ u64 add_pfns : HV_PFN_RANGE_PGBITS;
+ } __packed;
+};
+
+enum hv_sleep_state {
+ HV_SLEEP_STATE_S1 = 1,
+ HV_SLEEP_STATE_S2 = 2,
+ HV_SLEEP_STATE_S3 = 3,
+ HV_SLEEP_STATE_S4 = 4,
+ HV_SLEEP_STATE_S5 = 5,
+ /*
+ * After hypervisor has received this, any follow up sleep
+ * state registration requests will be rejected.
+ */
+ HV_SLEEP_STATE_LOCK = 6
+};
+
+enum hv_dynamic_processor_feature_property {
+ /* Add more values when needed */
+ HV_X64_DYNAMIC_PROCESSOR_FEATURE_MAX_ENCRYPTED_PARTITIONS = 13,
+ HV_X64_DYNAMIC_PROCESSOR_FEATURE_SNP_STATUS = 16,
+};
+
+struct hv_input_get_system_property {
+ u32 property_id; /* enum hv_system_property */
+ union {
+ u32 as_uint32;
+#if IS_ENABLED(CONFIG_X86)
+ /* enum hv_dynamic_processor_feature_property */
+ u32 hv_processor_feature;
+#endif
+ /* More fields to be filled in when needed */
+ };
+} __packed;
+
+struct hv_output_get_system_property {
+ union {
+ u32 scheduler_type; /* enum hv_scheduler_type */
+#if IS_ENABLED(CONFIG_X86)
+ u64 hv_processor_feature_value;
+#endif
+ union hv_pfn_range hv_cda_info; /* CrashdumpAreaAddress */
+ u64 hv_tramp_pa; /* CrashdumpTrampolineAddress */
+ };
+} __packed;
+
+struct hv_sleep_state_info {
+ u32 sleep_state; /* enum hv_sleep_state */
+ u8 pm1a_slp_typ;
+ u8 pm1b_slp_typ;
+} __packed;
+
+struct hv_input_set_system_property {
+ u32 property_id; /* enum hv_system_property */
+ u32 reserved;
+ union {
+ /* More fields to be filled in when needed */
+ struct hv_sleep_state_info set_sleep_state_info;
+
+ /*
+ * Add a reserved field to ensure the union is 8-byte aligned as
+ * existing members may not be. This is a temporary measure
+ * until all remaining members are added.
+ */
+ u64 reserved0[8];
+ };
+} __packed;
+
+struct hv_input_enter_sleep_state { /* HV_INPUT_ENTER_SLEEP_STATE */
+ u32 sleep_state; /* enum hv_sleep_state */
+} __packed;
+
+struct hv_input_map_stats_page {
+ u32 type; /* enum hv_stats_object_type */
+ u32 padding;
+ union hv_stats_object_identity identity;
+} __packed;
+
+struct hv_input_map_stats_page2 {
+ u32 type; /* enum hv_stats_object_type */
+ u32 padding;
+ union hv_stats_object_identity identity;
+ u64 map_location;
+} __packed;
+
+struct hv_output_map_stats_page {
+ u64 map_location;
+} __packed;
+
+struct hv_input_unmap_stats_page {
+ u32 type; /* enum hv_stats_object_type */
+ u32 padding;
+ union hv_stats_object_identity identity;
+} __packed;
+
+struct hv_proximity_domain_flags {
+ u32 proximity_preferred : 1;
+ u32 reserved : 30;
+ u32 proximity_info_valid : 1;
+} __packed;
+
+struct hv_proximity_domain_info {
+ u32 domain_id;
+ struct hv_proximity_domain_flags flags;
+} __packed;
+
+/* HvDepositMemory hypercall */
+struct hv_deposit_memory { /* HV_INPUT_DEPOSIT_MEMORY */
+ u64 partition_id;
+ u64 gpa_page_list[];
+} __packed;
+
+struct hv_input_withdraw_memory {
+ u64 partition_id;
+ struct hv_proximity_domain_info proximity_domain_info;
+} __packed;
+
+struct hv_output_withdraw_memory {
+ DECLARE_FLEX_ARRAY(u64, gpa_page_list);
+} __packed;
+
+/* HV Map GPA (Guest Physical Address) Flags */
+#define HV_MAP_GPA_PERMISSIONS_NONE 0x0
+#define HV_MAP_GPA_READABLE 0x1
+#define HV_MAP_GPA_WRITABLE 0x2
+#define HV_MAP_GPA_KERNEL_EXECUTABLE 0x4
+#define HV_MAP_GPA_USER_EXECUTABLE 0x8
+#define HV_MAP_GPA_EXECUTABLE 0xC
+#define HV_MAP_GPA_PERMISSIONS_MASK 0xF
+#define HV_MAP_GPA_ADJUSTABLE 0x8000
+#define HV_MAP_GPA_NO_ACCESS 0x10000
+#define HV_MAP_GPA_NOT_CACHED 0x200000
+#define HV_MAP_GPA_LARGE_PAGE 0x80000000
+
+struct hv_input_map_gpa_pages {
+ u64 target_partition_id;
+ u64 target_gpa_base;
+ u32 map_flags;
+ u32 padding;
+ u64 source_gpa_page_list[];
+} __packed;
+
+union hv_gpa_page_access_state_flags {
+ struct {
+ u64 clear_accessed : 1;
+ u64 set_accessed : 1;
+ u64 clear_dirty : 1;
+ u64 set_dirty : 1;
+ u64 reserved : 60;
+ } __packed;
+ u64 as_uint64;
+};
+
+struct hv_input_get_gpa_pages_access_state {
+ u64 partition_id;
+ union hv_gpa_page_access_state_flags flags;
+ u64 hv_gpa_page_number;
+} __packed;
+
+union hv_gpa_page_access_state {
+ struct {
+ u8 accessed : 1;
+ u8 dirty : 1;
+ u8 reserved: 6;
+ };
+ u8 as_uint8;
+} __packed;
+
+enum hv_crashdump_action {
+ HV_CRASHDUMP_NONE = 0,
+ HV_CRASHDUMP_SUSPEND_ALL_VPS,
+ HV_CRASHDUMP_PREPARE_FOR_STATE_SAVE,
+ HV_CRASHDUMP_STATE_SAVED,
+ HV_CRASHDUMP_ENTRY,
+};
+
+struct hv_partition_event_root_crashdump_input {
+ u32 crashdump_action; /* enum hv_crashdump_action */
+} __packed;
+
+struct hv_input_disable_hyp_ex { /* HV_X64_INPUT_DISABLE_HYPERVISOR_EX */
+ u64 rip;
+ u64 arg;
+} __packed;
+
+struct hv_crashdump_area { /* HV_CRASHDUMP_AREA */
+ u32 version;
+ union {
+ u32 flags_as_uint32;
+ struct {
+ u32 cda_valid : 1;
+ u32 cda_unused : 31;
+ } __packed;
+ };
+ /* more unused fields */
+} __packed;
+
+union hv_partition_event_input {
+ struct hv_partition_event_root_crashdump_input crashdump_input;
+};
+
+enum hv_partition_event {
+ HV_PARTITION_EVENT_ROOT_CRASHDUMP = 2,
+};
+
+struct hv_input_notify_partition_event {
+ u32 event; /* enum hv_partition_event */
+ union hv_partition_event_input input;
+} __packed;
+
+struct hv_lp_startup_status {
+ u64 hv_status;
+ u64 substatus1;
+ u64 substatus2;
+ u64 substatus3;
+ u64 substatus4;
+ u64 substatus5;
+ u64 substatus6;
+} __packed;
+
+struct hv_input_add_logical_processor {
+ u32 lp_index;
+ u32 apic_id;
+ struct hv_proximity_domain_info proximity_domain_info;
+} __packed;
+
+struct hv_output_add_logical_processor {
+ struct hv_lp_startup_status startup_status;
+} __packed;
+
+enum { /* HV_SUBNODE_TYPE */
+ HV_SUBNODE_ANY = 0,
+ HV_SUBNODE_SOCKET,
+ HV_SUBNODE_CLUSTER,
+ HV_SUBNODE_L3,
+ HV_SUBNODE_COUNT,
+ HV_SUBNODE_INVALID = -1
+};
+
+struct hv_create_vp { /* HV_INPUT_CREATE_VP */
+ u64 partition_id;
+ u32 vp_index;
+ u8 padding[3];
+ u8 subnode_type;
+ u64 subnode_id;
+ struct hv_proximity_domain_info proximity_domain_info;
+ u64 flags;
+} __packed;
+
+/* HV_INTERRUPT_TRIGGER_MODE */
+enum hv_interrupt_trigger_mode {
+ HV_INTERRUPT_TRIGGER_MODE_EDGE = 0,
+ HV_INTERRUPT_TRIGGER_MODE_LEVEL = 1,
+};
+
+/* HV_DEVICE_INTERRUPT_DESCRIPTOR */
+struct hv_device_interrupt_descriptor {
+ u32 interrupt_type;
+ u32 trigger_mode;
+ u32 vector_count;
+ u32 reserved;
+ struct hv_device_interrupt_target target;
+} __packed;
+
+/* HV_INPUT_MAP_DEVICE_INTERRUPT */
+struct hv_input_map_device_interrupt {
+ u64 partition_id;
+ u64 device_id;
+ u32 flags;
+ u32 base_irt_idx;
+ struct hv_interrupt_entry logical_interrupt_entry;
+ struct hv_device_interrupt_descriptor interrupt_descriptor;
+} __packed;
+
+/* HV_OUTPUT_MAP_DEVICE_INTERRUPT */
+struct hv_output_map_device_interrupt {
+ struct hv_interrupt_entry interrupt_entry;
+ u64 ext_status_deprecated[5];
+} __packed;
+
+/* HV_INPUT_UNMAP_DEVICE_INTERRUPT */
+struct hv_input_unmap_device_interrupt {
+ u64 partition_id;
+ u64 device_id;
+ struct hv_interrupt_entry interrupt_entry;
+ u32 flags;
+} __packed;
+
+#define HV_SOURCE_SHADOW_NONE 0x0
+#define HV_SOURCE_SHADOW_BRIDGE_BUS_RANGE 0x1
+
+struct hv_send_ipi_ex { /* HV_INPUT_SEND_SYNTHETIC_CLUSTER_IPI_EX */
+ u32 vector;
+ u32 reserved;
+ struct hv_vpset vp_set;
+} __packed;
+
+typedef u16 hv_pci_rid; /* HV_PCI_RID */
+typedef u16 hv_pci_segment; /* HV_PCI_SEGMENT */
+typedef u64 hv_logical_device_id;
+union hv_pci_bdf { /* HV_PCI_BDF */
+ u16 as_uint16;
+
+ struct {
+ u8 function : 3;
+ u8 device : 5;
+ u8 bus;
+ };
+} __packed;
+
+union hv_pci_bus_range {
+ u16 as_uint16;
+
+ struct {
+ u8 subordinate_bus;
+ u8 secondary_bus;
+ };
+} __packed;
+
+enum hv_device_type { /* HV_DEVICE_TYPE */
+ HV_DEVICE_TYPE_LOGICAL = 0,
+ HV_DEVICE_TYPE_PCI = 1,
+ HV_DEVICE_TYPE_IOAPIC = 2,
+ HV_DEVICE_TYPE_ACPI = 3,
+};
+
+union hv_device_id { /* HV_DEVICE_ID */
+ u64 as_uint64;
+
+ struct {
+ u64 reserved0 : 62;
+ u64 device_type : 2;
+ };
+
+ /* HV_DEVICE_TYPE_LOGICAL */
+ struct {
+ u64 id : 62;
+ u64 device_type : 2;
+ } logical;
+
+ /* HV_DEVICE_TYPE_PCI */
+ struct {
+ union {
+ hv_pci_rid rid;
+ union hv_pci_bdf bdf;
+ };
+
+ hv_pci_segment segment;
+ union hv_pci_bus_range shadow_bus_range;
+
+ u16 phantom_function_bits : 2;
+ u16 source_shadow : 1;
+
+ u16 rsvdz0 : 11;
+ u16 device_type : 2;
+ } pci;
+
+ /* HV_DEVICE_TYPE_IOAPIC */
+ struct {
+ u8 ioapic_id;
+ u8 rsvdz0;
+ u16 rsvdz1;
+ u16 rsvdz2;
+
+ u16 rsvdz3 : 14;
+ u16 device_type : 2;
+ } ioapic;
+
+ /* HV_DEVICE_TYPE_ACPI */
+ struct {
+ u32 input_mapping_base;
+ u32 input_mapping_count : 30;
+ u32 device_type : 2;
+ } acpi;
+} __packed;
+
+#endif /* _HV_HVHDK_MINI_H */
diff --git a/include/keys/asymmetric-parser.h b/include/keys/asymmetric-parser.h
index c47dc5405f79..516a3f51179e 100644
--- a/include/keys/asymmetric-parser.h
+++ b/include/keys/asymmetric-parser.h
@@ -10,6 +10,8 @@
#ifndef _KEYS_ASYMMETRIC_PARSER_H
#define _KEYS_ASYMMETRIC_PARSER_H
+struct key_preparsed_payload;
+
/*
* Key data parser. Called during key instantiation.
*/
diff --git a/include/keys/asymmetric-type.h b/include/keys/asymmetric-type.h
index c432fdb8547f..1b91c8f98688 100644
--- a/include/keys/asymmetric-type.h
+++ b/include/keys/asymmetric-type.h
@@ -49,11 +49,11 @@ enum asymmetric_payload_bits {
*/
struct asymmetric_key_id {
unsigned short len;
- unsigned char data[];
+ unsigned char data[] __counted_by(len);
};
struct asymmetric_key_ids {
- void *id[2];
+ void *id[3];
};
extern bool asymmetric_key_id_same(const struct asymmetric_key_id *kid1,
@@ -81,8 +81,12 @@ const struct public_key *asymmetric_key_public_key(const struct key *key)
extern struct key *find_asymmetric_key(struct key *keyring,
const struct asymmetric_key_id *id_0,
const struct asymmetric_key_id *id_1,
+ const struct asymmetric_key_id *id_2,
bool partial);
+int x509_load_certificate_list(const u8 cert_list[], const unsigned long list_size,
+ const struct key *keyring);
+
/*
* The payload is at the discretion of the subtype.
*/
diff --git a/include/keys/dns_resolver-type.h b/include/keys/dns_resolver-type.h
index 218ca22fb056..1b89088a2837 100644
--- a/include/keys/dns_resolver-type.h
+++ b/include/keys/dns_resolver-type.h
@@ -12,8 +12,4 @@
extern struct key_type key_type_dns_resolver;
-extern int request_dns_resolver_key(const char *description,
- const char *callout_info,
- char **data);
-
#endif /* _KEYS_DNS_RESOLVER_TYPE_H */
diff --git a/include/keys/rxrpc-type.h b/include/keys/rxrpc-type.h
index 333c0f49a9cd..0ddbe197a261 100644
--- a/include/keys/rxrpc-type.h
+++ b/include/keys/rxrpc-type.h
@@ -9,6 +9,7 @@
#define _KEYS_RXRPC_TYPE_H
#include <linux/key.h>
+#include <crypto/krb5.h>
/*
* key type for AF_RXRPC keys
@@ -32,6 +33,21 @@ struct rxkad_key {
};
/*
+ * RxRPC key for YFS-RxGK (type-6 security)
+ */
+struct rxgk_key {
+ s64 begintime; /* Time at which the ticket starts */
+ s64 endtime; /* Time at which the ticket ends */
+ u64 lifetime; /* Maximum lifespan of a connection (seconds) */
+ u64 bytelife; /* Maximum number of bytes on a connection */
+ unsigned int enctype; /* Encoding type */
+ s8 level; /* Negotiated security RXRPC_SECURITY_PLAIN/AUTH/ENCRYPT */
+ struct krb5_buffer key; /* Master key, K0 */
+ struct krb5_buffer ticket; /* Ticket to be passed to server */
+ u8 _key[]; /* Key storage */
+};
+
+/*
* list of tokens attached to an rxrpc key
*/
struct rxrpc_key_token {
@@ -40,6 +56,7 @@ struct rxrpc_key_token {
struct rxrpc_key_token *next; /* the next token in the list */
union {
struct rxkad_key *kad;
+ struct rxgk_key *rxgk;
};
};
diff --git a/include/keys/system_keyring.h b/include/keys/system_keyring.h
index 6acd3cf13a18..a6c2897bcc63 100644
--- a/include/keys/system_keyring.h
+++ b/include/keys/system_keyring.h
@@ -10,16 +10,28 @@
#include <linux/key.h>
+enum blacklist_hash_type {
+ /* TBSCertificate hash */
+ BLACKLIST_HASH_X509_TBS = 1,
+ /* Raw data hash */
+ BLACKLIST_HASH_BINARY = 2,
+};
+
#ifdef CONFIG_SYSTEM_TRUSTED_KEYRING
extern int restrict_link_by_builtin_trusted(struct key *keyring,
const struct key_type *type,
const union key_payload *payload,
struct key *restriction_key);
+int restrict_link_by_digsig_builtin(struct key *dest_keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *restriction_key);
extern __init int load_module_cert(struct key *keyring);
#else
#define restrict_link_by_builtin_trusted restrict_link_reject
+#define restrict_link_by_digsig_builtin restrict_link_reject
static inline __init int load_module_cert(struct key *keyring)
{
@@ -34,19 +46,42 @@ extern int restrict_link_by_builtin_and_secondary_trusted(
const struct key_type *type,
const union key_payload *payload,
struct key *restriction_key);
+int restrict_link_by_digsig_builtin_and_secondary(struct key *keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *restriction_key);
+void __init add_to_secondary_keyring(const char *source, const void *data, size_t len);
#else
#define restrict_link_by_builtin_and_secondary_trusted restrict_link_by_builtin_trusted
+#define restrict_link_by_digsig_builtin_and_secondary restrict_link_by_digsig_builtin
+static inline void __init add_to_secondary_keyring(const char *source, const void *data, size_t len)
+{
+}
+#endif
+
+#ifdef CONFIG_INTEGRITY_MACHINE_KEYRING
+extern int restrict_link_by_builtin_secondary_and_machine(
+ struct key *dest_keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *restrict_key);
+extern void __init set_machine_trusted_keys(struct key *keyring);
+#else
+#define restrict_link_by_builtin_secondary_and_machine restrict_link_by_builtin_trusted
+static inline void __init set_machine_trusted_keys(struct key *keyring)
+{
+}
#endif
-extern struct pkcs7_message *pkcs7;
#ifdef CONFIG_SYSTEM_BLACKLIST_KEYRING
-extern int mark_hash_blacklisted(const char *hash);
+extern int mark_hash_blacklisted(const u8 *hash, size_t hash_len,
+ enum blacklist_hash_type hash_type);
extern int is_hash_blacklisted(const u8 *hash, size_t hash_len,
- const char *type);
+ enum blacklist_hash_type hash_type);
extern int is_binary_blacklisted(const u8 *hash, size_t hash_len);
#else
static inline int is_hash_blacklisted(const u8 *hash, size_t hash_len,
- const char *type)
+ enum blacklist_hash_type hash_type)
{
return 0;
}
@@ -57,6 +92,7 @@ static inline int is_binary_blacklisted(const u8 *hash, size_t hash_len)
}
#endif
+struct pkcs7_message;
#ifdef CONFIG_SYSTEM_REVOCATION_LIST
extern int add_key_to_revocation_list(const char *data, size_t size);
extern int is_key_on_revocation_list(struct pkcs7_message *pkcs7);
diff --git a/include/keys/trusted-type.h b/include/keys/trusted-type.h
index d89fa2579ac0..4eb64548a74f 100644
--- a/include/keys/trusted-type.h
+++ b/include/keys/trusted-type.h
@@ -64,7 +64,7 @@ struct trusted_key_ops {
/* Unseal a key. */
int (*unseal)(struct trusted_key_payload *p, char *datablob);
- /* Get a randomized key. */
+ /* Optional: Get a randomized key. */
int (*get_random)(unsigned char *key, size_t key_len);
/* Exit key interface. */
diff --git a/include/keys/trusted_caam.h b/include/keys/trusted_caam.h
new file mode 100644
index 000000000000..73fe2f32f65e
--- /dev/null
+++ b/include/keys/trusted_caam.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Pengutronix, Ahmad Fatoum <kernel@pengutronix.de>
+ */
+
+#ifndef __CAAM_TRUSTED_KEY_H
+#define __CAAM_TRUSTED_KEY_H
+
+extern struct trusted_key_ops trusted_key_caam_ops;
+
+#endif
diff --git a/include/keys/trusted_dcp.h b/include/keys/trusted_dcp.h
new file mode 100644
index 000000000000..9aaa42075b40
--- /dev/null
+++ b/include/keys/trusted_dcp.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 sigma star gmbh
+ */
+
+#ifndef TRUSTED_DCP_H
+#define TRUSTED_DCP_H
+
+extern struct trusted_key_ops dcp_trusted_key_ops;
+
+#endif
diff --git a/include/keys/trusted_tpm.h b/include/keys/trusted_tpm.h
index 7769b726863a..0fadc6a4f166 100644
--- a/include/keys/trusted_tpm.h
+++ b/include/keys/trusted_tpm.h
@@ -5,43 +5,8 @@
#include <keys/trusted-type.h>
#include <linux/tpm_command.h>
-/* implementation specific TPM constants */
-#define MAX_BUF_SIZE 1024
-#define TPM_GETRANDOM_SIZE 14
-#define TPM_SIZE_OFFSET 2
-#define TPM_RETURN_OFFSET 6
-#define TPM_DATA_OFFSET 10
-
-#define LOAD32(buffer, offset) (ntohl(*(uint32_t *)&buffer[offset]))
-#define LOAD32N(buffer, offset) (*(uint32_t *)&buffer[offset])
-#define LOAD16(buffer, offset) (ntohs(*(uint16_t *)&buffer[offset]))
-
extern struct trusted_key_ops trusted_key_tpm_ops;
-struct osapsess {
- uint32_t handle;
- unsigned char secret[SHA1_DIGEST_SIZE];
- unsigned char enonce[TPM_NONCE_SIZE];
-};
-
-/* discrete values, but have to store in uint16_t for TPM use */
-enum {
- SEAL_keytype = 1,
- SRK_keytype = 4
-};
-
-int TSS_authhmac(unsigned char *digest, const unsigned char *key,
- unsigned int keylen, unsigned char *h1,
- unsigned char *h2, unsigned int h3, ...);
-int TSS_checkhmac1(unsigned char *buffer,
- const uint32_t command,
- const unsigned char *ononce,
- const unsigned char *key,
- unsigned int keylen, ...);
-
-int trusted_tpm_send(unsigned char *cmd, size_t buflen);
-int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce);
-
int tpm2_seal_trusted(struct tpm_chip *chip,
struct trusted_key_payload *payload,
struct trusted_key_options *options);
@@ -49,50 +14,4 @@ int tpm2_unseal_trusted(struct tpm_chip *chip,
struct trusted_key_payload *payload,
struct trusted_key_options *options);
-#define TPM_DEBUG 0
-
-#if TPM_DEBUG
-static inline void dump_options(struct trusted_key_options *o)
-{
- pr_info("sealing key type %d\n", o->keytype);
- pr_info("sealing key handle %0X\n", o->keyhandle);
- pr_info("pcrlock %d\n", o->pcrlock);
- pr_info("pcrinfo %d\n", o->pcrinfo_len);
- print_hex_dump(KERN_INFO, "pcrinfo ", DUMP_PREFIX_NONE,
- 16, 1, o->pcrinfo, o->pcrinfo_len, 0);
-}
-
-static inline void dump_sess(struct osapsess *s)
-{
- print_hex_dump(KERN_INFO, "trusted-key: handle ", DUMP_PREFIX_NONE,
- 16, 1, &s->handle, 4, 0);
- pr_info("secret:\n");
- print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE,
- 16, 1, &s->secret, SHA1_DIGEST_SIZE, 0);
- pr_info("trusted-key: enonce:\n");
- print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE,
- 16, 1, &s->enonce, SHA1_DIGEST_SIZE, 0);
-}
-
-static inline void dump_tpm_buf(unsigned char *buf)
-{
- int len;
-
- pr_info("\ntpm buffer\n");
- len = LOAD32(buf, TPM_SIZE_OFFSET);
- print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, buf, len, 0);
-}
-#else
-static inline void dump_options(struct trusted_key_options *o)
-{
-}
-
-static inline void dump_sess(struct osapsess *s)
-{
-}
-
-static inline void dump_tpm_buf(unsigned char *buf)
-{
-}
-#endif
#endif
diff --git a/include/kunit/assert.h b/include/kunit/assert.h
index ad889b539ab3..bb879389f11d 100644
--- a/include/kunit/assert.h
+++ b/include/kunit/assert.h
@@ -10,7 +10,7 @@
#define _KUNIT_ASSERT_H
#include <linux/err.h>
-#include <linux/kernel.h>
+#include <linux/printk.h>
struct kunit;
struct string_stream;
@@ -29,86 +29,48 @@ enum kunit_assert_type {
};
/**
- * struct kunit_assert - Data for printing a failed assertion or expectation.
- * @test: the test case this expectation/assertion is associated with.
- * @type: the type (either an expectation or an assertion) of this kunit_assert.
- * @line: the source code line number that the expectation/assertion is at.
- * @file: the file path of the source file that the expectation/assertion is in.
- * @message: an optional message to provide additional context.
- * @format: a function which formats the data in this kunit_assert to a string.
- *
- * Represents a failed expectation/assertion. Contains all the data necessary to
- * format a string to a user reporting the failure.
+ * struct kunit_loc - Identifies the source location of a line of code.
+ * @line: the line number in the file.
+ * @file: the file name.
*/
-struct kunit_assert {
- struct kunit *test;
- enum kunit_assert_type type;
+struct kunit_loc {
int line;
const char *file;
- struct va_format message;
- void (*format)(const struct kunit_assert *assert,
- struct string_stream *stream);
};
-/**
- * KUNIT_INIT_VA_FMT_NULL - Default initializer for struct va_format.
- *
- * Used inside a struct initialization block to initialize struct va_format to
- * default values where fmt and va are null.
- */
-#define KUNIT_INIT_VA_FMT_NULL { .fmt = NULL, .va = NULL }
+#define KUNIT_CURRENT_LOC { .file = __FILE__, .line = __LINE__ }
/**
- * KUNIT_INIT_ASSERT_STRUCT() - Initializer for a &struct kunit_assert.
- * @kunit: The test case that this expectation/assertion is associated with.
- * @assert_type: The type (assertion or expectation) of this kunit_assert.
- * @fmt: The formatting function which builds a string out of this kunit_assert.
+ * struct kunit_assert - Data for printing a failed assertion or expectation.
*
- * The base initializer for a &struct kunit_assert.
+ * Represents a failed expectation/assertion. Contains all the data necessary to
+ * format a string to a user reporting the failure.
*/
-#define KUNIT_INIT_ASSERT_STRUCT(kunit, assert_type, fmt) { \
- .test = kunit, \
- .type = assert_type, \
- .file = __FILE__, \
- .line = __LINE__, \
- .message = KUNIT_INIT_VA_FMT_NULL, \
- .format = fmt \
-}
+struct kunit_assert {};
-void kunit_base_assert_format(const struct kunit_assert *assert,
- struct string_stream *stream);
+typedef void (*assert_format_t)(const struct kunit_assert *assert,
+ const struct va_format *message,
+ struct string_stream *stream);
-void kunit_assert_print_msg(const struct kunit_assert *assert,
- struct string_stream *stream);
+void kunit_assert_prologue(const struct kunit_loc *loc,
+ enum kunit_assert_type type,
+ struct string_stream *stream);
/**
* struct kunit_fail_assert - Represents a plain fail expectation/assertion.
* @assert: The parent of this type.
*
- * Represents a simple KUNIT_FAIL/KUNIT_ASSERT_FAILURE that always fails.
+ * Represents a simple KUNIT_FAIL/KUNIT_FAIL_AND_ABORT that always fails.
*/
struct kunit_fail_assert {
struct kunit_assert assert;
};
void kunit_fail_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
struct string_stream *stream);
/**
- * KUNIT_INIT_FAIL_ASSERT_STRUCT() - Initializer for &struct kunit_fail_assert.
- * @test: The test case that this expectation/assertion is associated with.
- * @type: The type (assertion or expectation) of this kunit_assert.
- *
- * Initializes a &struct kunit_fail_assert. Intended to be used in
- * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
- */
-#define KUNIT_INIT_FAIL_ASSERT_STRUCT(test, type) { \
- .assert = KUNIT_INIT_ASSERT_STRUCT(test, \
- type, \
- kunit_fail_assert_format) \
-}
-
-/**
* struct kunit_unary_assert - Represents a KUNIT_{EXPECT|ASSERT}_{TRUE|FALSE}
* @assert: The parent of this type.
* @condition: A string representation of a conditional expression.
@@ -125,27 +87,10 @@ struct kunit_unary_assert {
};
void kunit_unary_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
struct string_stream *stream);
/**
- * KUNIT_INIT_UNARY_ASSERT_STRUCT() - Initializes &struct kunit_unary_assert.
- * @test: The test case that this expectation/assertion is associated with.
- * @type: The type (assertion or expectation) of this kunit_assert.
- * @cond: A string representation of the expression asserted true or false.
- * @expect_true: True if of type KUNIT_{EXPECT|ASSERT}_TRUE, false otherwise.
- *
- * Initializes a &struct kunit_unary_assert. Intended to be used in
- * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
- */
-#define KUNIT_INIT_UNARY_ASSERT_STRUCT(test, type, cond, expect_true) { \
- .assert = KUNIT_INIT_ASSERT_STRUCT(test, \
- type, \
- kunit_unary_assert_format), \
- .condition = cond, \
- .expected_true = expect_true \
-}
-
-/**
* struct kunit_ptr_not_err_assert - An expectation/assertion that a pointer is
* not NULL and not a -errno.
* @assert: The parent of this type.
@@ -162,35 +107,28 @@ struct kunit_ptr_not_err_assert {
};
void kunit_ptr_not_err_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
struct string_stream *stream);
/**
- * KUNIT_INIT_PTR_NOT_ERR_ASSERT_STRUCT() - Initializes a
- * &struct kunit_ptr_not_err_assert.
- * @test: The test case that this expectation/assertion is associated with.
- * @type: The type (assertion or expectation) of this kunit_assert.
- * @txt: A string representation of the expression passed to the expectation.
- * @val: The actual evaluated pointer value of the expression.
- *
- * Initializes a &struct kunit_ptr_not_err_assert. Intended to be used in
- * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
+ * struct kunit_binary_assert_text - holds strings for &struct
+ * kunit_binary_assert and friends to try and make the structs smaller.
+ * @operation: A string representation of the comparison operator (e.g. "==").
+ * @left_text: A string representation of the left expression (e.g. "2+2").
+ * @right_text: A string representation of the right expression (e.g. "2+2").
*/
-#define KUNIT_INIT_PTR_NOT_ERR_STRUCT(test, type, txt, val) { \
- .assert = KUNIT_INIT_ASSERT_STRUCT(test, \
- type, \
- kunit_ptr_not_err_assert_format), \
- .text = txt, \
- .value = val \
-}
+struct kunit_binary_assert_text {
+ const char *operation;
+ const char *left_text;
+ const char *right_text;
+};
/**
* struct kunit_binary_assert - An expectation/assertion that compares two
* non-pointer values (for example, KUNIT_EXPECT_EQ(test, 1 + 1, 2)).
* @assert: The parent of this type.
- * @operation: A string representation of the comparison operator (e.g. "==").
- * @left_text: A string representation of the expression in the left slot.
+ * @text: Holds the textual representations of the operands and op (e.g. "==").
* @left_value: The actual evaluated value of the expression in the left slot.
- * @right_text: A string representation of the expression in the right slot.
* @right_value: The actual evaluated value of the expression in the right slot.
*
* Represents an expectation/assertion that compares two non-pointer values. For
@@ -199,55 +137,21 @@ void kunit_ptr_not_err_assert_format(const struct kunit_assert *assert,
*/
struct kunit_binary_assert {
struct kunit_assert assert;
- const char *operation;
- const char *left_text;
+ const struct kunit_binary_assert_text *text;
long long left_value;
- const char *right_text;
long long right_value;
};
void kunit_binary_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
struct string_stream *stream);
/**
- * KUNIT_INIT_BINARY_ASSERT_STRUCT() - Initializes a
- * &struct kunit_binary_assert.
- * @test: The test case that this expectation/assertion is associated with.
- * @type: The type (assertion or expectation) of this kunit_assert.
- * @op_str: A string representation of the comparison operator (e.g. "==").
- * @left_str: A string representation of the expression in the left slot.
- * @left_val: The actual evaluated value of the expression in the left slot.
- * @right_str: A string representation of the expression in the right slot.
- * @right_val: The actual evaluated value of the expression in the right slot.
- *
- * Initializes a &struct kunit_binary_assert. Intended to be used in
- * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
- */
-#define KUNIT_INIT_BINARY_ASSERT_STRUCT(test, \
- type, \
- op_str, \
- left_str, \
- left_val, \
- right_str, \
- right_val) { \
- .assert = KUNIT_INIT_ASSERT_STRUCT(test, \
- type, \
- kunit_binary_assert_format), \
- .operation = op_str, \
- .left_text = left_str, \
- .left_value = left_val, \
- .right_text = right_str, \
- .right_value = right_val \
-}
-
-/**
* struct kunit_binary_ptr_assert - An expectation/assertion that compares two
* pointer values (for example, KUNIT_EXPECT_PTR_EQ(test, foo, bar)).
* @assert: The parent of this type.
- * @operation: A string representation of the comparison operator (e.g. "==").
- * @left_text: A string representation of the expression in the left slot.
+ * @text: Holds the textual representations of the operands and op (e.g. "==").
* @left_value: The actual evaluated value of the expression in the left slot.
- * @right_text: A string representation of the expression in the right slot.
* @right_value: The actual evaluated value of the expression in the right slot.
*
* Represents an expectation/assertion that compares two pointer values. For
@@ -256,55 +160,21 @@ void kunit_binary_assert_format(const struct kunit_assert *assert,
*/
struct kunit_binary_ptr_assert {
struct kunit_assert assert;
- const char *operation;
- const char *left_text;
+ const struct kunit_binary_assert_text *text;
const void *left_value;
- const char *right_text;
const void *right_value;
};
void kunit_binary_ptr_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
struct string_stream *stream);
/**
- * KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT() - Initializes a
- * &struct kunit_binary_ptr_assert.
- * @test: The test case that this expectation/assertion is associated with.
- * @type: The type (assertion or expectation) of this kunit_assert.
- * @op_str: A string representation of the comparison operator (e.g. "==").
- * @left_str: A string representation of the expression in the left slot.
- * @left_val: The actual evaluated value of the expression in the left slot.
- * @right_str: A string representation of the expression in the right slot.
- * @right_val: The actual evaluated value of the expression in the right slot.
- *
- * Initializes a &struct kunit_binary_ptr_assert. Intended to be used in
- * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
- */
-#define KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT(test, \
- type, \
- op_str, \
- left_str, \
- left_val, \
- right_str, \
- right_val) { \
- .assert = KUNIT_INIT_ASSERT_STRUCT(test, \
- type, \
- kunit_binary_ptr_assert_format), \
- .operation = op_str, \
- .left_text = left_str, \
- .left_value = left_val, \
- .right_text = right_str, \
- .right_value = right_val \
-}
-
-/**
* struct kunit_binary_str_assert - An expectation/assertion that compares two
* string values (for example, KUNIT_EXPECT_STREQ(test, foo, "bar")).
* @assert: The parent of this type.
- * @operation: A string representation of the comparison operator (e.g. "==").
- * @left_text: A string representation of the expression in the left slot.
+ * @text: Holds the textual representations of the operands and comparator.
* @left_value: The actual evaluated value of the expression in the left slot.
- * @right_text: A string representation of the expression in the right slot.
* @right_value: The actual evaluated value of the expression in the right slot.
*
* Represents an expectation/assertion that compares two string values. For
@@ -313,45 +183,50 @@ void kunit_binary_ptr_assert_format(const struct kunit_assert *assert,
*/
struct kunit_binary_str_assert {
struct kunit_assert assert;
- const char *operation;
- const char *left_text;
+ const struct kunit_binary_assert_text *text;
const char *left_value;
- const char *right_text;
const char *right_value;
};
void kunit_binary_str_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
struct string_stream *stream);
/**
- * KUNIT_INIT_BINARY_STR_ASSERT_STRUCT() - Initializes a
- * &struct kunit_binary_str_assert.
- * @test: The test case that this expectation/assertion is associated with.
- * @type: The type (assertion or expectation) of this kunit_assert.
- * @op_str: A string representation of the comparison operator (e.g. "==").
- * @left_str: A string representation of the expression in the left slot.
- * @left_val: The actual evaluated value of the expression in the left slot.
- * @right_str: A string representation of the expression in the right slot.
- * @right_val: The actual evaluated value of the expression in the right slot.
+ * struct kunit_mem_assert - An expectation/assertion that compares two
+ * memory blocks.
+ * @assert: The parent of this type.
+ * @text: Holds the textual representations of the operands and comparator.
+ * @left_value: The actual evaluated value of the expression in the left slot.
+ * @right_value: The actual evaluated value of the expression in the right slot.
+ * @size: Size of the memory block analysed in bytes.
*
- * Initializes a &struct kunit_binary_str_assert. Intended to be used in
- * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
+ * Represents an expectation/assertion that compares two memory blocks. For
+ * example, to expect that the first three bytes of foo is equal to the
+ * first three bytes of bar, you can use the expectation
+ * KUNIT_EXPECT_MEMEQ(test, foo, bar, 3);
*/
-#define KUNIT_INIT_BINARY_STR_ASSERT_STRUCT(test, \
- type, \
- op_str, \
- left_str, \
- left_val, \
- right_str, \
- right_val) { \
- .assert = KUNIT_INIT_ASSERT_STRUCT(test, \
- type, \
- kunit_binary_str_assert_format), \
- .operation = op_str, \
- .left_text = left_str, \
- .left_value = left_val, \
- .right_text = right_str, \
- .right_value = right_val \
-}
+struct kunit_mem_assert {
+ struct kunit_assert assert;
+ const struct kunit_binary_assert_text *text;
+ const void *left_value;
+ const void *right_value;
+ const size_t size;
+};
+
+void kunit_mem_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
+ struct string_stream *stream);
+
+#if IS_ENABLED(CONFIG_KUNIT)
+void kunit_assert_print_msg(const struct va_format *message,
+ struct string_stream *stream);
+bool is_literal(const char *text, long long value);
+bool is_str_literal(const char *text, const char *value);
+void kunit_assert_hexdump(struct string_stream *stream,
+ const void *buf,
+ const void *compared_buf,
+ const size_t len);
+#endif
#endif /* _KUNIT_ASSERT_H */
diff --git a/include/kunit/attributes.h b/include/kunit/attributes.h
new file mode 100644
index 000000000000..bc76a0b786d2
--- /dev/null
+++ b/include/kunit/attributes.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KUnit API to save and access test attributes
+ *
+ * Copyright (C) 2023, Google LLC.
+ * Author: Rae Moar <rmoar@google.com>
+ */
+
+#ifndef _KUNIT_ATTRIBUTES_H
+#define _KUNIT_ATTRIBUTES_H
+
+/*
+ * struct kunit_attr_filter - representation of attributes filter with the
+ * attribute object and string input
+ */
+struct kunit_attr_filter {
+ struct kunit_attr *attr;
+ char *input;
+};
+
+/*
+ * Returns the name of the filter's attribute.
+ */
+const char *kunit_attr_filter_name(struct kunit_attr_filter filter);
+
+/*
+ * Print all test attributes for a test case or suite.
+ * Output format for test cases: "# <test_name>.<attribute>: <value>"
+ * Output format for test suites: "# <attribute>: <value>"
+ */
+void kunit_print_attr(void *test_or_suite, bool is_test, unsigned int test_level);
+
+/*
+ * Returns the number of fitlers in input.
+ */
+int kunit_get_filter_count(char *input);
+
+/*
+ * Parse attributes filter input and return an objects containing the
+ * attribute object and the string input of the next filter.
+ */
+struct kunit_attr_filter kunit_next_attr_filter(char **filters, int *err);
+
+/*
+ * Returns a copy of the suite containing only tests that pass the filter.
+ */
+struct kunit_suite *kunit_filter_attr_tests(const struct kunit_suite *const suite,
+ struct kunit_attr_filter filter, char *action, int *err);
+
+#endif /* _KUNIT_ATTRIBUTES_H */
diff --git a/include/kunit/clk.h b/include/kunit/clk.h
new file mode 100644
index 000000000000..f226044cc78d
--- /dev/null
+++ b/include/kunit/clk.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _CLK_KUNIT_H
+#define _CLK_KUNIT_H
+
+struct clk;
+struct clk_hw;
+struct device;
+struct device_node;
+struct of_phandle_args;
+struct kunit;
+
+struct clk *
+clk_get_kunit(struct kunit *test, struct device *dev, const char *con_id);
+struct clk *
+of_clk_get_kunit(struct kunit *test, struct device_node *np, int index);
+
+struct clk *
+clk_hw_get_clk_kunit(struct kunit *test, struct clk_hw *hw, const char *con_id);
+struct clk *
+clk_hw_get_clk_prepared_enabled_kunit(struct kunit *test, struct clk_hw *hw,
+ const char *con_id);
+
+int clk_prepare_enable_kunit(struct kunit *test, struct clk *clk);
+
+int clk_hw_register_kunit(struct kunit *test, struct device *dev, struct clk_hw *hw);
+int of_clk_hw_register_kunit(struct kunit *test, struct device_node *node,
+ struct clk_hw *hw);
+
+int of_clk_add_hw_provider_kunit(struct kunit *test, struct device_node *np,
+ struct clk_hw *(*get)(struct of_phandle_args *clkspec, void *data),
+ void *data);
+
+#endif
diff --git a/include/kunit/device.h b/include/kunit/device.h
new file mode 100644
index 000000000000..2450110ad64e
--- /dev/null
+++ b/include/kunit/device.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KUnit basic device implementation
+ *
+ * Helpers for creating and managing fake devices for KUnit tests.
+ *
+ * Copyright (C) 2023, Google LLC.
+ * Author: David Gow <davidgow@google.com>
+ */
+
+#ifndef _KUNIT_DEVICE_H
+#define _KUNIT_DEVICE_H
+
+#if IS_ENABLED(CONFIG_KUNIT)
+
+#include <kunit/test.h>
+
+struct device;
+struct device_driver;
+
+/**
+ * kunit_driver_create() - Create a struct device_driver attached to the kunit_bus
+ * @test: The test context object.
+ * @name: The name to give the created driver.
+ *
+ * Creates a struct device_driver attached to the kunit_bus, with the name @name.
+ * This driver will automatically be cleaned up on test exit.
+ *
+ * Return: a stub struct device_driver, managed by KUnit, with the name @name.
+ */
+struct device_driver *kunit_driver_create(struct kunit *test, const char *name);
+
+/**
+ * kunit_device_register() - Create a struct device for use in KUnit tests
+ * @test: The test context object.
+ * @name: The name to give the created device.
+ *
+ * Creates a struct kunit_device (which is a struct device) with the given name,
+ * and a corresponding driver. The device and driver will be cleaned up on test
+ * exit, or when kunit_device_unregister is called. See also
+ * kunit_device_register_with_driver, if you wish to provide your own
+ * struct device_driver.
+ *
+ * Return: a pointer to a struct device which will be cleaned up when the test
+ * exits, or an error pointer if the device could not be allocated or registered.
+ */
+struct device *kunit_device_register(struct kunit *test, const char *name);
+
+/**
+ * kunit_device_register_with_driver() - Create a struct device for use in KUnit tests
+ * @test: The test context object.
+ * @name: The name to give the created device.
+ * @drv: The struct device_driver to associate with the device.
+ *
+ * Creates a struct kunit_device (which is a struct device) with the given
+ * name, and driver. The device will be cleaned up on test exit, or when
+ * kunit_device_unregister is called. See also kunit_device_register, if you
+ * wish KUnit to create and manage a driver for you.
+ *
+ * Return: a pointer to a struct device which will be cleaned up when the test
+ * exits, or an error pointer if the device could not be allocated or registered.
+ */
+struct device *kunit_device_register_with_driver(struct kunit *test,
+ const char *name,
+ const struct device_driver *drv);
+
+/**
+ * kunit_device_unregister() - Unregister a KUnit-managed device
+ * @test: The test context object which created the device
+ * @dev: The device.
+ *
+ * Unregisters and destroys a struct device which was created with
+ * kunit_device_register or kunit_device_register_with_driver. If KUnit created
+ * a driver, cleans it up as well.
+ */
+void kunit_device_unregister(struct kunit *test, struct device *dev);
+
+#endif
+
+#endif
diff --git a/include/kunit/of.h b/include/kunit/of.h
new file mode 100644
index 000000000000..75a760a4e2a5
--- /dev/null
+++ b/include/kunit/of.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _KUNIT_OF_H
+#define _KUNIT_OF_H
+
+#include <kunit/test.h>
+
+struct device_node;
+
+#ifdef CONFIG_OF
+
+void of_node_put_kunit(struct kunit *test, struct device_node *node);
+
+#else
+
+static inline
+void of_node_put_kunit(struct kunit *test, struct device_node *node)
+{
+ kunit_skip(test, "requires CONFIG_OF");
+}
+
+#endif /* !CONFIG_OF */
+
+#if defined(CONFIG_OF) && defined(CONFIG_OF_OVERLAY) && defined(CONFIG_OF_EARLY_FLATTREE)
+
+int of_overlay_fdt_apply_kunit(struct kunit *test, void *overlay_fdt,
+ u32 overlay_fdt_size, int *ovcs_id);
+#else
+
+static inline int
+of_overlay_fdt_apply_kunit(struct kunit *test, void *overlay_fdt,
+ u32 overlay_fdt_size, int *ovcs_id)
+{
+ kunit_skip(test, "requires CONFIG_OF and CONFIG_OF_OVERLAY and CONFIG_OF_EARLY_FLATTREE for root node");
+ return -EINVAL;
+}
+
+#endif
+
+/**
+ * __of_overlay_apply_kunit() - Test managed of_overlay_fdt_apply() variant
+ * @test: test context
+ * @overlay_begin: start address of overlay to apply
+ * @overlay_end: end address of overlay to apply
+ *
+ * This is mostly internal API. See of_overlay_apply_kunit() for the wrapper
+ * that makes this easier to use.
+ *
+ * Similar to of_overlay_fdt_apply(), except the overlay is managed by the test
+ * case and is automatically removed with of_overlay_remove() after the test
+ * case concludes.
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+static inline int __of_overlay_apply_kunit(struct kunit *test,
+ u8 *overlay_begin,
+ const u8 *overlay_end)
+{
+ int unused;
+
+ return of_overlay_fdt_apply_kunit(test, overlay_begin,
+ overlay_end - overlay_begin,
+ &unused);
+}
+
+#define of_overlay_begin(overlay_name) __dtbo_##overlay_name##_begin
+#define of_overlay_end(overlay_name) __dtbo_##overlay_name##_end
+
+#define OF_OVERLAY_DECLARE(overlay_name) \
+ extern uint8_t of_overlay_begin(overlay_name)[]; \
+ extern uint8_t of_overlay_end(overlay_name)[] \
+
+/**
+ * of_overlay_apply_kunit() - Test managed of_overlay_fdt_apply() for built-in overlays
+ * @test: test context
+ * @overlay_name: name of overlay to apply
+ *
+ * This macro is used to apply a device tree overlay built with the
+ * cmd_dt_S_dtbo rule in scripts/Makefile.lib that has been compiled into the
+ * kernel image or KUnit test module. The overlay is automatically removed when
+ * the test is finished.
+ *
+ * Unit tests that need device tree nodes should compile an overlay file with
+ * @overlay_name\.dtbo.o in their Makefile along with their unit test and then
+ * load the overlay during their test. The @overlay_name matches the filename
+ * of the overlay without the dtbo filename extension. If CONFIG_OF_OVERLAY is
+ * not enabled, the @test will be skipped.
+ *
+ * In the Makefile
+ *
+ * .. code-block:: none
+ *
+ * obj-$(CONFIG_OF_OVERLAY_KUNIT_TEST) += overlay_test.o kunit_overlay_test.dtbo.o
+ *
+ * In the test
+ *
+ * .. code-block:: c
+ *
+ * static void of_overlay_kunit_of_overlay_apply(struct kunit *test)
+ * {
+ * struct device_node *np;
+ *
+ * KUNIT_ASSERT_EQ(test, 0,
+ * of_overlay_apply_kunit(test, kunit_overlay_test));
+ *
+ * np = of_find_node_by_name(NULL, "test-kunit");
+ * KUNIT_EXPECT_NOT_ERR_OR_NULL(test, np);
+ * of_node_put(np);
+ * }
+ *
+ * Return: 0 on success, negative errno on failure.
+ */
+#define of_overlay_apply_kunit(test, overlay_name) \
+({ \
+ OF_OVERLAY_DECLARE(overlay_name); \
+ \
+ __of_overlay_apply_kunit((test), \
+ of_overlay_begin(overlay_name), \
+ of_overlay_end(overlay_name)); \
+})
+
+#endif
diff --git a/include/kunit/platform_device.h b/include/kunit/platform_device.h
new file mode 100644
index 000000000000..f8236a8536f7
--- /dev/null
+++ b/include/kunit/platform_device.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _KUNIT_PLATFORM_DRIVER_H
+#define _KUNIT_PLATFORM_DRIVER_H
+
+struct completion;
+struct kunit;
+struct platform_device;
+struct platform_driver;
+
+struct platform_device *
+kunit_platform_device_alloc(struct kunit *test, const char *name, int id);
+int kunit_platform_device_add(struct kunit *test, struct platform_device *pdev);
+
+int kunit_platform_device_prepare_wait_for_probe(struct kunit *test,
+ struct platform_device *pdev,
+ struct completion *x);
+
+int kunit_platform_driver_register(struct kunit *test,
+ struct platform_driver *drv);
+
+#endif
diff --git a/include/kunit/resource.h b/include/kunit/resource.h
new file mode 100644
index 000000000000..4ad69a2642a5
--- /dev/null
+++ b/include/kunit/resource.h
@@ -0,0 +1,503 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KUnit resource API for test managed resources (allocations, etc.).
+ *
+ * Copyright (C) 2022, Google LLC.
+ * Author: Daniel Latypov <dlatypov@google.com>
+ */
+
+#ifndef _KUNIT_RESOURCE_H
+#define _KUNIT_RESOURCE_H
+
+#include <kunit/test.h>
+
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+struct kunit_resource;
+
+typedef int (*kunit_resource_init_t)(struct kunit_resource *, void *);
+typedef void (*kunit_resource_free_t)(struct kunit_resource *);
+
+/**
+ * struct kunit_resource - represents a *test managed resource*
+ * @data: for the user to store arbitrary data.
+ * @name: optional name
+ * @free: a user supplied function to free the resource.
+ *
+ * Represents a *test managed resource*, a resource which will automatically be
+ * cleaned up at the end of a test case. This cleanup is performed by the 'free'
+ * function. The struct kunit_resource itself is freed automatically with
+ * kfree() if it was allocated by KUnit (e.g., by kunit_alloc_resource()), but
+ * must be freed by the user otherwise.
+ *
+ * Resources are reference counted so if a resource is retrieved via
+ * kunit_alloc_and_get_resource() or kunit_find_resource(), we need
+ * to call kunit_put_resource() to reduce the resource reference count
+ * when finished with it. Note that kunit_alloc_resource() does not require a
+ * kunit_resource_put() because it does not retrieve the resource itself.
+ *
+ * Example:
+ *
+ * .. code-block:: c
+ *
+ * struct kunit_kmalloc_params {
+ * size_t size;
+ * gfp_t gfp;
+ * };
+ *
+ * static int kunit_kmalloc_init(struct kunit_resource *res, void *context)
+ * {
+ * struct kunit_kmalloc_params *params = context;
+ * res->data = kmalloc(params->size, params->gfp);
+ *
+ * if (!res->data)
+ * return -ENOMEM;
+ *
+ * return 0;
+ * }
+ *
+ * static void kunit_kmalloc_free(struct kunit_resource *res)
+ * {
+ * kfree(res->data);
+ * }
+ *
+ * void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp)
+ * {
+ * struct kunit_kmalloc_params params;
+ *
+ * params.size = size;
+ * params.gfp = gfp;
+ *
+ * return kunit_alloc_resource(test, kunit_kmalloc_init,
+ * kunit_kmalloc_free, gfp, &params);
+ * }
+ *
+ * Resources can also be named, with lookup/removal done on a name
+ * basis also. kunit_add_named_resource(), kunit_find_named_resource()
+ * and kunit_destroy_named_resource(). Resource names must be
+ * unique within the test instance.
+ */
+struct kunit_resource {
+ void *data;
+ const char *name;
+ kunit_resource_free_t free;
+
+ /* private: internal use only. */
+ struct kref refcount;
+ struct list_head node;
+ bool should_kfree;
+};
+
+/**
+ * kunit_get_resource() - Hold resource for use. Should not need to be used
+ * by most users as we automatically get resources
+ * retrieved by kunit_find_resource*().
+ * @res: resource
+ */
+static inline void kunit_get_resource(struct kunit_resource *res)
+{
+ kref_get(&res->refcount);
+}
+
+/*
+ * Called when refcount reaches zero via kunit_put_resource();
+ * should not be called directly.
+ */
+static inline void kunit_release_resource(struct kref *kref)
+{
+ struct kunit_resource *res = container_of(kref, struct kunit_resource,
+ refcount);
+
+ if (res->free)
+ res->free(res);
+
+ /* 'res' is valid here, as if should_kfree is set, res->free may not free
+ * 'res' itself, just res->data
+ */
+ if (res->should_kfree)
+ kfree(res);
+}
+
+/**
+ * kunit_put_resource() - When caller is done with retrieved resource,
+ * kunit_put_resource() should be called to drop
+ * reference count. The resource list maintains
+ * a reference count on resources, so if no users
+ * are utilizing a resource and it is removed from
+ * the resource list, it will be freed via the
+ * associated free function (if any). Only
+ * needs to be used if we alloc_and_get() or
+ * find() resource.
+ * @res: resource
+ */
+static inline void kunit_put_resource(struct kunit_resource *res)
+{
+ kref_put(&res->refcount, kunit_release_resource);
+}
+
+/**
+ * __kunit_add_resource() - Internal helper to add a resource.
+ *
+ * res->should_kfree is not initialised.
+ * @test: The test context object.
+ * @init: a user-supplied function to initialize the result (if needed). If
+ * none is supplied, the resource data value is simply set to @data.
+ * If an init function is supplied, @data is passed to it instead.
+ * @free: a user-supplied function to free the resource (if needed).
+ * @res: The resource.
+ * @data: value to pass to init function or set in resource data field.
+ */
+int __kunit_add_resource(struct kunit *test,
+ kunit_resource_init_t init,
+ kunit_resource_free_t free,
+ struct kunit_resource *res,
+ void *data);
+
+/**
+ * kunit_add_resource() - Add a *test managed resource*.
+ * @test: The test context object.
+ * @init: a user-supplied function to initialize the result (if needed). If
+ * none is supplied, the resource data value is simply set to @data.
+ * If an init function is supplied, @data is passed to it instead.
+ * @free: a user-supplied function to free the resource (if needed).
+ * @res: The resource.
+ * @data: value to pass to init function or set in resource data field.
+ */
+static inline int kunit_add_resource(struct kunit *test,
+ kunit_resource_init_t init,
+ kunit_resource_free_t free,
+ struct kunit_resource *res,
+ void *data)
+{
+ res->should_kfree = false;
+ return __kunit_add_resource(test, init, free, res, data);
+}
+
+static inline struct kunit_resource *
+kunit_find_named_resource(struct kunit *test, const char *name);
+
+/**
+ * kunit_add_named_resource() - Add a named *test managed resource*.
+ * @test: The test context object.
+ * @init: a user-supplied function to initialize the resource data, if needed.
+ * @free: a user-supplied function to free the resource data, if needed.
+ * @res: The resource.
+ * @name: name to be set for resource.
+ * @data: value to pass to init function or set in resource data field.
+ */
+static inline int kunit_add_named_resource(struct kunit *test,
+ kunit_resource_init_t init,
+ kunit_resource_free_t free,
+ struct kunit_resource *res,
+ const char *name,
+ void *data)
+{
+ struct kunit_resource *existing;
+
+ if (!name)
+ return -EINVAL;
+
+ existing = kunit_find_named_resource(test, name);
+ if (existing) {
+ kunit_put_resource(existing);
+ return -EEXIST;
+ }
+
+ res->name = name;
+ res->should_kfree = false;
+
+ return __kunit_add_resource(test, init, free, res, data);
+}
+
+/**
+ * kunit_alloc_and_get_resource() - Allocates and returns a *test managed resource*.
+ * @test: The test context object.
+ * @init: a user supplied function to initialize the resource.
+ * @free: a user supplied function to free the resource (if needed).
+ * @internal_gfp: gfp to use for internal allocations, if unsure, use GFP_KERNEL
+ * @context: for the user to pass in arbitrary data to the init function.
+ *
+ * Allocates a *test managed resource*, a resource which will automatically be
+ * cleaned up at the end of a test case. See &struct kunit_resource for an
+ * example.
+ *
+ * This is effectively identical to kunit_alloc_resource, but returns the
+ * struct kunit_resource pointer, not just the 'data' pointer. It therefore
+ * also increments the resource's refcount, so kunit_put_resource() should be
+ * called when you've finished with it.
+ *
+ * Note: KUnit needs to allocate memory for a kunit_resource object. You must
+ * specify an @internal_gfp that is compatible with the use context of your
+ * resource.
+ */
+static inline struct kunit_resource *
+kunit_alloc_and_get_resource(struct kunit *test,
+ kunit_resource_init_t init,
+ kunit_resource_free_t free,
+ gfp_t internal_gfp,
+ void *context)
+{
+ struct kunit_resource *res;
+ int ret;
+
+ res = kzalloc(sizeof(*res), internal_gfp);
+ if (!res)
+ return NULL;
+
+ res->should_kfree = true;
+
+ ret = __kunit_add_resource(test, init, free, res, context);
+ if (!ret) {
+ /*
+ * bump refcount for get; kunit_resource_put() should be called
+ * when done.
+ */
+ kunit_get_resource(res);
+ return res;
+ }
+ return NULL;
+}
+
+/**
+ * kunit_alloc_resource() - Allocates a *test managed resource*.
+ * @test: The test context object.
+ * @init: a user supplied function to initialize the resource.
+ * @free: a user supplied function to free the resource (if needed).
+ * @internal_gfp: gfp to use for internal allocations, if unsure, use GFP_KERNEL
+ * @context: for the user to pass in arbitrary data to the init function.
+ *
+ * Allocates a *test managed resource*, a resource which will automatically be
+ * cleaned up at the end of a test case. See &struct kunit_resource for an
+ * example.
+ *
+ * Note: KUnit needs to allocate memory for a kunit_resource object. You must
+ * specify an @internal_gfp that is compatible with the use context of your
+ * resource.
+ */
+static inline void *kunit_alloc_resource(struct kunit *test,
+ kunit_resource_init_t init,
+ kunit_resource_free_t free,
+ gfp_t internal_gfp,
+ void *context)
+{
+ struct kunit_resource *res;
+
+ res = kzalloc(sizeof(*res), internal_gfp);
+ if (!res)
+ return NULL;
+
+ res->should_kfree = true;
+ if (!__kunit_add_resource(test, init, free, res, context))
+ return res->data;
+
+ return NULL;
+}
+
+typedef bool (*kunit_resource_match_t)(struct kunit *test,
+ struct kunit_resource *res,
+ void *match_data);
+
+/**
+ * kunit_resource_name_match() - Match a resource with the same name.
+ * @test: Test case to which the resource belongs.
+ * @res: The resource.
+ * @match_name: The name to match against.
+ */
+static inline bool kunit_resource_name_match(struct kunit *test,
+ struct kunit_resource *res,
+ void *match_name)
+{
+ return res->name && strcmp(res->name, match_name) == 0;
+}
+
+/**
+ * kunit_find_resource() - Find a resource using match function/data.
+ * @test: Test case to which the resource belongs.
+ * @match: match function to be applied to resources/match data.
+ * @match_data: data to be used in matching.
+ */
+static inline struct kunit_resource *
+kunit_find_resource(struct kunit *test,
+ kunit_resource_match_t match,
+ void *match_data)
+{
+ struct kunit_resource *res, *found = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&test->lock, flags);
+
+ list_for_each_entry_reverse(res, &test->resources, node) {
+ if (match(test, res, (void *)match_data)) {
+ found = res;
+ kunit_get_resource(found);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&test->lock, flags);
+
+ return found;
+}
+
+/**
+ * kunit_find_named_resource() - Find a resource using match name.
+ * @test: Test case to which the resource belongs.
+ * @name: match name.
+ */
+static inline struct kunit_resource *
+kunit_find_named_resource(struct kunit *test,
+ const char *name)
+{
+ return kunit_find_resource(test, kunit_resource_name_match,
+ (void *)name);
+}
+
+/**
+ * kunit_destroy_resource() - Find a kunit_resource and destroy it.
+ * @test: Test case to which the resource belongs.
+ * @match: Match function. Returns whether a given resource matches @match_data.
+ * @match_data: Data passed into @match.
+ *
+ * RETURNS:
+ * 0 if kunit_resource is found and freed, -ENOENT if not found.
+ */
+int kunit_destroy_resource(struct kunit *test,
+ kunit_resource_match_t match,
+ void *match_data);
+
+static inline int kunit_destroy_named_resource(struct kunit *test,
+ const char *name)
+{
+ return kunit_destroy_resource(test, kunit_resource_name_match,
+ (void *)name);
+}
+
+/**
+ * kunit_remove_resource() - remove resource from resource list associated with
+ * test.
+ * @test: The test context object.
+ * @res: The resource to be removed.
+ *
+ * Note that the resource will not be immediately freed since it is likely
+ * the caller has a reference to it via alloc_and_get() or find();
+ * in this case a final call to kunit_put_resource() is required.
+ */
+void kunit_remove_resource(struct kunit *test, struct kunit_resource *res);
+
+/* A 'deferred action' function to be used with kunit_add_action. */
+typedef void (kunit_action_t)(void *);
+
+/**
+ * KUNIT_DEFINE_ACTION_WRAPPER() - Wrap a function for use as a deferred action.
+ *
+ * @wrapper: The name of the new wrapper function define.
+ * @orig: The original function to wrap.
+ * @arg_type: The type of the argument accepted by @orig.
+ *
+ * Defines a wrapper for a function which accepts a single, pointer-sized
+ * argument. This wrapper can then be passed to kunit_add_action() and
+ * similar. This should be used in preference to casting a function
+ * directly to kunit_action_t, as casting function pointers will break
+ * control flow integrity (CFI), leading to crashes.
+ */
+#define KUNIT_DEFINE_ACTION_WRAPPER(wrapper, orig, arg_type) \
+ static void wrapper(void *in) \
+ { \
+ arg_type arg = (arg_type)in; \
+ orig(arg); \
+ }
+
+
+/**
+ * kunit_add_action() - Call a function when the test ends.
+ * @test: Test case to associate the action with.
+ * @action: The function to run on test exit
+ * @ctx: Data passed into @func
+ *
+ * Defer the execution of a function until the test exits, either normally or
+ * due to a failure. @ctx is passed as additional context. All functions
+ * registered with kunit_add_action() will execute in the opposite order to that
+ * they were registered in.
+ *
+ * This is useful for cleaning up allocated memory and resources, as these
+ * functions are called even if the test aborts early due to, e.g., a failed
+ * assertion.
+ *
+ * See also: devm_add_action() for the devres equivalent.
+ *
+ * Returns:
+ * 0 on success, an error if the action could not be deferred.
+ */
+int kunit_add_action(struct kunit *test, kunit_action_t *action, void *ctx);
+
+/**
+ * kunit_add_action_or_reset() - Call a function when the test ends.
+ * @test: Test case to associate the action with.
+ * @action: The function to run on test exit
+ * @ctx: Data passed into @func
+ *
+ * Defer the execution of a function until the test exits, either normally or
+ * due to a failure. @ctx is passed as additional context. All functions
+ * registered with kunit_add_action() will execute in the opposite order to that
+ * they were registered in.
+ *
+ * This is useful for cleaning up allocated memory and resources, as these
+ * functions are called even if the test aborts early due to, e.g., a failed
+ * assertion.
+ *
+ * If the action cannot be created (e.g., due to the system being out of memory),
+ * then action(ctx) will be called immediately, and an error will be returned.
+ *
+ * See also: devm_add_action_or_reset() for the devres equivalent.
+ *
+ * Returns:
+ * 0 on success, an error if the action could not be deferred.
+ */
+int kunit_add_action_or_reset(struct kunit *test, kunit_action_t *action,
+ void *ctx);
+
+/**
+ * kunit_remove_action() - Cancel a matching deferred action.
+ * @test: Test case the action is associated with.
+ * @action: The deferred function to cancel.
+ * @ctx: The context passed to the deferred function to trigger.
+ *
+ * Prevent an action deferred via kunit_add_action() from executing when the
+ * test terminates.
+ *
+ * If the function/context pair was deferred multiple times, only the most
+ * recent one will be cancelled.
+ *
+ * See also: devm_remove_action() for the devres equivalent.
+ */
+void kunit_remove_action(struct kunit *test,
+ kunit_action_t *action,
+ void *ctx);
+
+/**
+ * kunit_release_action() - Run a matching action call immediately.
+ * @test: Test case the action is associated with.
+ * @action: The deferred function to trigger.
+ * @ctx: The context passed to the deferred function to trigger.
+ *
+ * Execute a function deferred via kunit_add_action()) immediately, rather than
+ * when the test ends.
+ *
+ * If the function/context pair was deferred multiple times, it will only be
+ * executed once here. The most recent deferral will no longer execute when
+ * the test ends.
+ *
+ * kunit_release_action(test, func, ctx);
+ * is equivalent to
+ * func(ctx);
+ * kunit_remove_action(test, func, ctx);
+ *
+ * See also: devm_release_action() for the devres equivalent.
+ */
+void kunit_release_action(struct kunit *test,
+ kunit_action_t *action,
+ void *ctx);
+#endif /* _KUNIT_RESOURCE_H */
diff --git a/include/kunit/run-in-irq-context.h b/include/kunit/run-in-irq-context.h
new file mode 100644
index 000000000000..108e96433ea4
--- /dev/null
+++ b/include/kunit/run-in-irq-context.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Helper function for testing code in interrupt contexts
+ *
+ * Copyright 2025 Google LLC
+ */
+#ifndef _KUNIT_RUN_IN_IRQ_CONTEXT_H
+#define _KUNIT_RUN_IN_IRQ_CONTEXT_H
+
+#include <kunit/test.h>
+#include <linux/timekeeping.h>
+#include <linux/hrtimer.h>
+#include <linux/workqueue.h>
+
+#define KUNIT_IRQ_TEST_HRTIMER_INTERVAL us_to_ktime(5)
+
+struct kunit_irq_test_state {
+ bool (*func)(void *test_specific_state);
+ void *test_specific_state;
+ bool task_func_reported_failure;
+ bool hardirq_func_reported_failure;
+ bool softirq_func_reported_failure;
+ unsigned long hardirq_func_calls;
+ unsigned long softirq_func_calls;
+ struct hrtimer timer;
+ struct work_struct bh_work;
+};
+
+static enum hrtimer_restart kunit_irq_test_timer_func(struct hrtimer *timer)
+{
+ struct kunit_irq_test_state *state =
+ container_of(timer, typeof(*state), timer);
+
+ WARN_ON_ONCE(!in_hardirq());
+ state->hardirq_func_calls++;
+
+ if (!state->func(state->test_specific_state))
+ state->hardirq_func_reported_failure = true;
+
+ hrtimer_forward_now(&state->timer, KUNIT_IRQ_TEST_HRTIMER_INTERVAL);
+ queue_work(system_bh_wq, &state->bh_work);
+ return HRTIMER_RESTART;
+}
+
+static void kunit_irq_test_bh_work_func(struct work_struct *work)
+{
+ struct kunit_irq_test_state *state =
+ container_of(work, typeof(*state), bh_work);
+
+ WARN_ON_ONCE(!in_serving_softirq());
+ state->softirq_func_calls++;
+
+ if (!state->func(state->test_specific_state))
+ state->softirq_func_reported_failure = true;
+}
+
+/*
+ * Helper function which repeatedly runs the given @func in task, softirq, and
+ * hardirq context concurrently, and reports a failure to KUnit if any
+ * invocation of @func in any context returns false. @func is passed
+ * @test_specific_state as its argument. At most 3 invocations of @func will
+ * run concurrently: one in each of task, softirq, and hardirq context.
+ *
+ * The main purpose of this interrupt context testing is to validate fallback
+ * code paths that run in contexts where the normal code path cannot be used,
+ * typically due to the FPU or vector registers already being in-use in kernel
+ * mode. These code paths aren't covered when the test code is executed only by
+ * the KUnit test runner thread in task context. The reason for the concurrency
+ * is because merely using hardirq context is not sufficient to reach a fallback
+ * code path on some architectures; the hardirq actually has to occur while the
+ * FPU or vector unit was already in-use in kernel mode.
+ *
+ * Another purpose of this testing is to detect issues with the architecture's
+ * irq_fpu_usable() and kernel_fpu_begin/end() or equivalent functions,
+ * especially in softirq context when the softirq may have interrupted a task
+ * already using kernel-mode FPU or vector (if the arch didn't prevent that).
+ * Crypto functions are often executed in softirqs, so this is important.
+ */
+static inline void kunit_run_irq_test(struct kunit *test, bool (*func)(void *),
+ int max_iterations,
+ void *test_specific_state)
+{
+ struct kunit_irq_test_state state = {
+ .func = func,
+ .test_specific_state = test_specific_state,
+ };
+ unsigned long end_jiffies;
+
+ /*
+ * Set up a hrtimer (the way we access hardirq context) and a work
+ * struct for the BH workqueue (the way we access softirq context).
+ */
+ hrtimer_setup_on_stack(&state.timer, kunit_irq_test_timer_func,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
+ INIT_WORK_ONSTACK(&state.bh_work, kunit_irq_test_bh_work_func);
+
+ /* Run for up to max_iterations or 1 second, whichever comes first. */
+ end_jiffies = jiffies + HZ;
+ hrtimer_start(&state.timer, KUNIT_IRQ_TEST_HRTIMER_INTERVAL,
+ HRTIMER_MODE_REL_HARD);
+ for (int i = 0; i < max_iterations && !time_after(jiffies, end_jiffies);
+ i++) {
+ if (!func(test_specific_state))
+ state.task_func_reported_failure = true;
+ }
+
+ /* Cancel the timer and work. */
+ hrtimer_cancel(&state.timer);
+ flush_work(&state.bh_work);
+
+ /* Sanity check: the timer and BH functions should have been run. */
+ KUNIT_EXPECT_GT_MSG(test, state.hardirq_func_calls, 0,
+ "Timer function was not called");
+ KUNIT_EXPECT_GT_MSG(test, state.softirq_func_calls, 0,
+ "BH work function was not called");
+
+ /* Check for incorrect hash values reported from any context. */
+ KUNIT_EXPECT_FALSE_MSG(
+ test, state.task_func_reported_failure,
+ "Incorrect hash values reported from task context");
+ KUNIT_EXPECT_FALSE_MSG(
+ test, state.hardirq_func_reported_failure,
+ "Incorrect hash values reported from hardirq context");
+ KUNIT_EXPECT_FALSE_MSG(
+ test, state.softirq_func_reported_failure,
+ "Incorrect hash values reported from softirq context");
+}
+
+#endif /* _KUNIT_RUN_IN_IRQ_CONTEXT_H */
diff --git a/include/kunit/skbuff.h b/include/kunit/skbuff.h
new file mode 100644
index 000000000000..07784694357c
--- /dev/null
+++ b/include/kunit/skbuff.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KUnit resource management helpers for SKBs (skbuff).
+ *
+ * Copyright (C) 2023 Intel Corporation
+ */
+
+#ifndef _KUNIT_SKBUFF_H
+#define _KUNIT_SKBUFF_H
+
+#include <kunit/resource.h>
+#include <linux/skbuff.h>
+
+static void kunit_action_kfree_skb(void *p)
+{
+ kfree_skb((struct sk_buff *)p);
+}
+
+/**
+ * kunit_zalloc_skb() - Allocate and initialize a resource managed skb.
+ * @test: The test case to which the skb belongs
+ * @len: size to allocate
+ * @gfp: allocation flags
+ *
+ * Allocate a new struct sk_buff with gfp flags, zero fill the given length
+ * and add it as a resource to the kunit test for automatic cleanup.
+ *
+ * Returns: newly allocated SKB, or %NULL on error
+ */
+static inline struct sk_buff *kunit_zalloc_skb(struct kunit *test, int len,
+ gfp_t gfp)
+{
+ struct sk_buff *res = alloc_skb(len, gfp);
+
+ if (!res || skb_pad(res, len))
+ return NULL;
+
+ if (kunit_add_action_or_reset(test, kunit_action_kfree_skb, res))
+ return NULL;
+
+ return res;
+}
+
+/**
+ * kunit_kfree_skb() - Like kfree_skb except for allocations managed by KUnit.
+ * @test: The test case to which the resource belongs.
+ * @skb: The SKB to free.
+ */
+static inline void kunit_kfree_skb(struct kunit *test, struct sk_buff *skb)
+{
+ if (!skb)
+ return;
+
+ kunit_release_action(test, kunit_action_kfree_skb, (void *)skb);
+}
+
+#endif /* _KUNIT_SKBUFF_H */
diff --git a/include/kunit/static_stub.h b/include/kunit/static_stub.h
new file mode 100644
index 000000000000..bf940322dfc0
--- /dev/null
+++ b/include/kunit/static_stub.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KUnit function redirection (static stubbing) API.
+ *
+ * Copyright (C) 2022, Google LLC.
+ * Author: David Gow <davidgow@google.com>
+ */
+#ifndef _KUNIT_STATIC_STUB_H
+#define _KUNIT_STATIC_STUB_H
+
+#if !IS_ENABLED(CONFIG_KUNIT)
+
+/* If CONFIG_KUNIT is not enabled, these stubs quietly disappear. */
+#define KUNIT_STATIC_STUB_REDIRECT(real_fn_name, args...) do {} while (0)
+
+#else
+
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+
+#include <linux/compiler.h> /* for {un,}likely() */
+#include <linux/sched.h> /* for task_struct */
+
+
+/**
+ * KUNIT_STATIC_STUB_REDIRECT() - call a replacement 'static stub' if one exists
+ * @real_fn_name: The name of this function (as an identifier, not a string)
+ * @args: All of the arguments passed to this function
+ *
+ * This is a function prologue which is used to allow calls to the current
+ * function to be redirected by a KUnit test. KUnit tests can call
+ * kunit_activate_static_stub() to pass a replacement function in. The
+ * replacement function will be called by KUNIT_STATIC_STUB_REDIRECT(), which
+ * will then return from the function. If the caller is not in a KUnit context,
+ * the function will continue execution as normal.
+ *
+ * Example:
+ *
+ * .. code-block:: c
+ *
+ * int real_func(int n)
+ * {
+ * KUNIT_STATIC_STUB_REDIRECT(real_func, n);
+ * return 0;
+ * }
+ *
+ * int replacement_func(int n)
+ * {
+ * return 42;
+ * }
+ *
+ * void example_test(struct kunit *test)
+ * {
+ * kunit_activate_static_stub(test, real_func, replacement_func);
+ * KUNIT_EXPECT_EQ(test, real_func(1), 42);
+ * }
+ *
+ */
+#define KUNIT_STATIC_STUB_REDIRECT(real_fn_name, args...) \
+do { \
+ typeof(&real_fn_name) replacement; \
+ struct kunit *current_test = kunit_get_current_test(); \
+ \
+ if (likely(!current_test)) \
+ break; \
+ \
+ replacement = kunit_hooks.get_static_stub_address(current_test, \
+ &real_fn_name); \
+ \
+ if (unlikely(replacement)) \
+ return replacement(args); \
+} while (0)
+
+/* Helper function for kunit_activate_static_stub(). The macro does
+ * typechecking, so use it instead.
+ */
+void __kunit_activate_static_stub(struct kunit *test,
+ void *real_fn_addr,
+ void *replacement_addr);
+
+/**
+ * kunit_activate_static_stub() - replace a function using static stubs.
+ * @test: A pointer to the 'struct kunit' test context for the current test.
+ * @real_fn_addr: The address of the function to replace.
+ * @replacement_addr: The address of the function to replace it with.
+ *
+ * When activated, calls to real_fn_addr from within this test (even if called
+ * indirectly) will instead call replacement_addr. The function pointed to by
+ * real_fn_addr must begin with the static stub prologue in
+ * KUNIT_STATIC_STUB_REDIRECT() for this to work. real_fn_addr and
+ * replacement_addr must have the same type.
+ *
+ * The redirection can be disabled again with kunit_deactivate_static_stub().
+ */
+#define kunit_activate_static_stub(test, real_fn_addr, replacement_addr) do { \
+ typecheck_fn(typeof(&replacement_addr), real_fn_addr); \
+ __kunit_activate_static_stub(test, real_fn_addr, replacement_addr); \
+} while (0)
+
+
+/**
+ * kunit_deactivate_static_stub() - disable a function redirection
+ * @test: A pointer to the 'struct kunit' test context for the current test.
+ * @real_fn_addr: The address of the function to no-longer redirect
+ *
+ * Deactivates a redirection configured with kunit_activate_static_stub(). After
+ * this function returns, calls to real_fn_addr() will execute the original
+ * real_fn, not any previously-configured replacement.
+ */
+void kunit_deactivate_static_stub(struct kunit *test, void *real_fn_addr);
+
+#endif
+#endif
diff --git a/include/kunit/test-bug.h b/include/kunit/test-bug.h
index 5fc58081d511..47aa8f21ccce 100644
--- a/include/kunit/test-bug.h
+++ b/include/kunit/test-bug.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * KUnit API allowing dynamic analysis tools to interact with KUnit tests
+ * KUnit API providing hooks for non-test code to interact with tests.
*
* Copyright (C) 2020, Google LLC.
* Author: Uriel Guajardo <urielguajardo@google.com>
@@ -9,21 +9,63 @@
#ifndef _KUNIT_TEST_BUG_H
#define _KUNIT_TEST_BUG_H
-#define kunit_fail_current_test(fmt, ...) \
- __kunit_fail_current_test(__FILE__, __LINE__, fmt, ##__VA_ARGS__)
+#include <linux/stddef.h> /* for NULL */
-#if IS_BUILTIN(CONFIG_KUNIT)
+#if IS_ENABLED(CONFIG_KUNIT)
-extern __printf(3, 4) void __kunit_fail_current_test(const char *file, int line,
- const char *fmt, ...);
+#include <linux/jump_label.h> /* For static branch */
+#include <linux/sched.h>
-#else
+/* Static key if KUnit is running any tests. */
+DECLARE_STATIC_KEY_FALSE(kunit_running);
+
+/* Hooks table: a table of function pointers filled in when kunit loads */
+extern struct kunit_hooks_table {
+ __printf(3, 4) void (*fail_current_test)(const char*, int, const char*, ...);
+ void *(*get_static_stub_address)(struct kunit *test, void *real_fn_addr);
+} kunit_hooks;
-static inline __printf(3, 4) void __kunit_fail_current_test(const char *file, int line,
- const char *fmt, ...)
+/**
+ * kunit_get_current_test() - Return a pointer to the currently running
+ * KUnit test.
+ *
+ * If a KUnit test is running in the current task, returns a pointer to its
+ * associated struct kunit. This pointer can then be passed to any KUnit
+ * function or assertion. If no test is running (or a test is running in a
+ * different task), returns NULL.
+ *
+ * This function is safe to call even when KUnit is disabled. If CONFIG_KUNIT
+ * is not enabled, it will compile down to nothing and will return quickly no
+ * test is running.
+ */
+static inline struct kunit *kunit_get_current_test(void)
{
+ if (!static_branch_unlikely(&kunit_running))
+ return NULL;
+
+ return current->kunit_test;
}
+
+/**
+ * kunit_fail_current_test() - If a KUnit test is running, fail it.
+ *
+ * If a KUnit test is running in the current task, mark that test as failed.
+ */
+#define kunit_fail_current_test(fmt, ...) do { \
+ if (static_branch_unlikely(&kunit_running)) { \
+ /* Guaranteed to be non-NULL when kunit_running true*/ \
+ kunit_hooks.fail_current_test(__FILE__, __LINE__, \
+ fmt, ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+#else
+
+static inline struct kunit *kunit_get_current_test(void) { return NULL; }
+
+#define kunit_fail_current_test(fmt, ...) do {} while (0)
+
#endif
#endif /* _KUNIT_TEST_BUG_H */
diff --git a/include/kunit/test.h b/include/kunit/test.h
index 49601c4b98b8..5ec5182b5e57 100644
--- a/include/kunit/test.h
+++ b/include/kunit/test.h
@@ -11,106 +11,89 @@
#include <kunit/assert.h>
#include <kunit/try-catch.h>
-#include <linux/kernel.h>
+
+#include <linux/args.h>
+#include <linux/compiler.h>
+#include <linux/container_of.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/jump_label.h>
+#include <linux/kconfig.h>
+#include <linux/kref.h>
+#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
#include <linux/types.h>
-#include <linux/kref.h>
-struct kunit_resource;
+#include <asm/rwonce.h>
+#include <asm/sections.h>
-typedef int (*kunit_resource_init_t)(struct kunit_resource *, void *);
-typedef void (*kunit_resource_free_t)(struct kunit_resource *);
-
-/**
- * struct kunit_resource - represents a *test managed resource*
- * @data: for the user to store arbitrary data.
- * @name: optional name
- * @free: a user supplied function to free the resource. Populated by
- * kunit_resource_alloc().
- *
- * Represents a *test managed resource*, a resource which will automatically be
- * cleaned up at the end of a test case.
- *
- * Resources are reference counted so if a resource is retrieved via
- * kunit_alloc_and_get_resource() or kunit_find_resource(), we need
- * to call kunit_put_resource() to reduce the resource reference count
- * when finished with it. Note that kunit_alloc_resource() does not require a
- * kunit_resource_put() because it does not retrieve the resource itself.
- *
- * Example:
- *
- * .. code-block:: c
- *
- * struct kunit_kmalloc_params {
- * size_t size;
- * gfp_t gfp;
- * };
- *
- * static int kunit_kmalloc_init(struct kunit_resource *res, void *context)
- * {
- * struct kunit_kmalloc_params *params = context;
- * res->data = kmalloc(params->size, params->gfp);
- *
- * if (!res->data)
- * return -ENOMEM;
- *
- * return 0;
- * }
- *
- * static void kunit_kmalloc_free(struct kunit_resource *res)
- * {
- * kfree(res->data);
- * }
- *
- * void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp)
- * {
- * struct kunit_kmalloc_params params;
- *
- * params.size = size;
- * params.gfp = gfp;
- *
- * return kunit_alloc_resource(test, kunit_kmalloc_init,
- * kunit_kmalloc_free, &params);
- * }
- *
- * Resources can also be named, with lookup/removal done on a name
- * basis also. kunit_add_named_resource(), kunit_find_named_resource()
- * and kunit_destroy_named_resource(). Resource names must be
- * unique within the test instance.
- */
-struct kunit_resource {
- void *data;
- const char *name;
- kunit_resource_free_t free;
-
- /* private: internal use only. */
- struct kref refcount;
- struct list_head node;
-};
+/* Static key: true if any KUnit tests are currently running */
+DECLARE_STATIC_KEY_FALSE(kunit_running);
struct kunit;
-
-/* Size of log associated with test. */
-#define KUNIT_LOG_SIZE 512
+struct string_stream;
/* Maximum size of parameter description string. */
#define KUNIT_PARAM_DESC_SIZE 128
+/* Maximum size of a status comment. */
+#define KUNIT_STATUS_COMMENT_SIZE 256
+
/*
* TAP specifies subtest stream indentation of 4 spaces, 8 spaces for a
* sub-subtest. See the "Subtests" section in
* https://node-tap.org/tap-protocol/
*/
+#define KUNIT_INDENT_LEN 4
#define KUNIT_SUBTEST_INDENT " "
#define KUNIT_SUBSUBTEST_INDENT " "
/**
+ * enum kunit_status - Type of result for a test or test suite
+ * @KUNIT_SUCCESS: Denotes the test suite has not failed nor been skipped
+ * @KUNIT_FAILURE: Denotes the test has failed.
+ * @KUNIT_SKIPPED: Denotes the test has been skipped.
+ */
+enum kunit_status {
+ KUNIT_SUCCESS,
+ KUNIT_FAILURE,
+ KUNIT_SKIPPED,
+};
+
+/* Attribute struct/enum definitions */
+
+/*
+ * Speed Attribute is stored as an enum and separated into categories of
+ * speed: very_slow, slow, and normal. These speeds are relative to
+ * other KUnit tests.
+ *
+ * Note: unset speed attribute acts as default of KUNIT_SPEED_NORMAL.
+ */
+enum kunit_speed {
+ KUNIT_SPEED_UNSET,
+ KUNIT_SPEED_VERY_SLOW,
+ KUNIT_SPEED_SLOW,
+ KUNIT_SPEED_NORMAL,
+ KUNIT_SPEED_MAX = KUNIT_SPEED_NORMAL,
+};
+
+/* Holds attributes for each test case and suite */
+struct kunit_attributes {
+ enum kunit_speed speed;
+};
+
+/**
* struct kunit_case - represents an individual test case.
*
* @run_case: the function representing the actual test case.
* @name: the name of the test case.
* @generate_params: the generator function for parameterized tests.
+ * @attr: the attributes associated with the test
+ * @param_init: The init function to run before a parameterized test.
+ * @param_exit: The exit function to run after a parameterized test.
*
* A test case is a function with the signature,
* ``void (*)(struct kunit *)``
@@ -145,16 +128,28 @@ struct kunit;
struct kunit_case {
void (*run_case)(struct kunit *test);
const char *name;
- const void* (*generate_params)(const void *prev, char *desc);
+ const void* (*generate_params)(struct kunit *test,
+ const void *prev, char *desc);
+ struct kunit_attributes attr;
+ int (*param_init)(struct kunit *test);
+ void (*param_exit)(struct kunit *test);
/* private: internal use only. */
- bool success;
- char *log;
+ enum kunit_status status;
+ char *module_name;
+ struct string_stream *log;
};
-static inline char *kunit_status_to_string(bool status)
+static inline char *kunit_status_to_ok_not_ok(enum kunit_status status)
{
- return status ? "ok" : "not ok";
+ switch (status) {
+ case KUNIT_SKIPPED:
+ case KUNIT_SUCCESS:
+ return "ok";
+ case KUNIT_FAILURE:
+ return "not ok";
+ }
+ return "invalid";
}
/**
@@ -166,7 +161,32 @@ static inline char *kunit_status_to_string(bool status)
* &struct kunit_case object from it. See the documentation for
* &struct kunit_case for an example on how to use it.
*/
-#define KUNIT_CASE(test_name) { .run_case = test_name, .name = #test_name }
+#define KUNIT_CASE(test_name) \
+ { .run_case = test_name, .name = #test_name, \
+ .module_name = KBUILD_MODNAME}
+
+/**
+ * KUNIT_CASE_ATTR - A helper for creating a &struct kunit_case
+ * with attributes
+ *
+ * @test_name: a reference to a test case function.
+ * @attributes: a reference to a struct kunit_attributes object containing
+ * test attributes
+ */
+#define KUNIT_CASE_ATTR(test_name, attributes) \
+ { .run_case = test_name, .name = #test_name, \
+ .attr = attributes, .module_name = KBUILD_MODNAME}
+
+/**
+ * KUNIT_CASE_SLOW - A helper for creating a &struct kunit_case
+ * with the slow attribute
+ *
+ * @test_name: a reference to a test case function.
+ */
+
+#define KUNIT_CASE_SLOW(test_name) \
+ { .run_case = test_name, .name = #test_name, \
+ .attr.speed = KUNIT_SPEED_SLOW, .module_name = KBUILD_MODNAME}
/**
* KUNIT_CASE_PARAM - A helper for creation a parameterized &struct kunit_case
@@ -187,33 +207,104 @@ static inline char *kunit_status_to_string(bool status)
*/
#define KUNIT_CASE_PARAM(test_name, gen_params) \
{ .run_case = test_name, .name = #test_name, \
- .generate_params = gen_params }
+ .generate_params = gen_params, .module_name = KBUILD_MODNAME}
+
+/**
+ * KUNIT_CASE_PARAM_ATTR - A helper for creating a parameterized &struct
+ * kunit_case with attributes
+ *
+ * @test_name: a reference to a test case function.
+ * @gen_params: a reference to a parameter generator function.
+ * @attributes: a reference to a struct kunit_attributes object containing
+ * test attributes
+ */
+#define KUNIT_CASE_PARAM_ATTR(test_name, gen_params, attributes) \
+ { .run_case = test_name, .name = #test_name, \
+ .generate_params = gen_params, \
+ .attr = attributes, .module_name = KBUILD_MODNAME}
+
+/**
+ * KUNIT_CASE_PARAM_WITH_INIT - Define a parameterized KUnit test case with custom
+ * param_init() and param_exit() functions.
+ * @test_name: The function implementing the test case.
+ * @gen_params: The function to generate parameters for the test case.
+ * @init: A reference to the param_init() function to run before a parameterized test.
+ * @exit: A reference to the param_exit() function to run after a parameterized test.
+ *
+ * Provides the option to register param_init() and param_exit() functions.
+ * param_init/exit will be passed the parameterized test context and run once
+ * before and once after the parameterized test. The init function can be used
+ * to add resources to share between parameter runs, pass parameter arrays,
+ * and any other setup logic. The exit function can be used to clean up resources
+ * that were not managed by the parameterized test, and any other teardown logic.
+ *
+ * Note: If you are registering a parameter array in param_init() with
+ * kunit_register_param_array() then you need to pass kunit_array_gen_params()
+ * to this as the generator function.
+ */
+#define KUNIT_CASE_PARAM_WITH_INIT(test_name, gen_params, init, exit) \
+ { .run_case = test_name, .name = #test_name, \
+ .generate_params = gen_params, \
+ .param_init = init, .param_exit = exit, \
+ .module_name = KBUILD_MODNAME}
/**
* struct kunit_suite - describes a related collection of &struct kunit_case
*
* @name: the name of the test. Purely informational.
+ * @suite_init: called once per test suite before the test cases.
+ * @suite_exit: called once per test suite after all test cases.
* @init: called before every test case.
* @exit: called after every test case.
* @test_cases: a null terminated array of test cases.
+ * @attr: the attributes associated with the test suite
*
* A kunit_suite is a collection of related &struct kunit_case s, such that
* @init is called before every test case and @exit is called after every
* test case, similar to the notion of a *test fixture* or a *test class*
* in other unit testing frameworks like JUnit or Googletest.
*
+ * Note that @exit and @suite_exit will run even if @init or @suite_init
+ * fail: make sure they can handle any inconsistent state which may result.
+ *
* Every &struct kunit_case must be associated with a kunit_suite for KUnit
* to run it.
*/
struct kunit_suite {
const char name[256];
+ int (*suite_init)(struct kunit_suite *suite);
+ void (*suite_exit)(struct kunit_suite *suite);
int (*init)(struct kunit *test);
void (*exit)(struct kunit *test);
struct kunit_case *test_cases;
+ struct kunit_attributes attr;
/* private: internal use only */
+ char status_comment[KUNIT_STATUS_COMMENT_SIZE];
struct dentry *debugfs;
- char *log;
+ struct string_stream *log;
+ int suite_init_err;
+ bool is_init;
+};
+
+/* Stores an array of suites, end points one past the end */
+struct kunit_suite_set {
+ struct kunit_suite * const *start;
+ struct kunit_suite * const *end;
+};
+
+/* Stores the pointer to the parameter array and its metadata. */
+struct kunit_params {
+ /*
+ * Reference to the parameter array for a parameterized test. This
+ * is NULL if a parameter array wasn't directly passed to the
+ * parameterized test context struct kunit via kunit_register_params_array().
+ */
+ const void *params;
+ /* Reference to a function that gets the description of a parameter. */
+ void (*get_description)(struct kunit *test, const void *param, char *desc);
+ size_t num_params;
+ size_t elem_size;
};
/**
@@ -221,18 +312,24 @@ struct kunit_suite {
*
* @priv: for user to store arbitrary data. Commonly used to pass data
* created in the init function (see &struct kunit_suite).
+ * @parent: reference to the parent context of type struct kunit that can
+ * be used for storing shared resources.
+ * @params_array: for storing the parameter array.
*
* Used to store information about the current context under which the test
* is running. Most of this data is private and should only be accessed
- * indirectly via public functions; the one exception is @priv which can be
- * used by the test writer to store arbitrary data.
+ * indirectly via public functions; the exceptions are @priv, @parent and
+ * @params_array which can be used by the test writer to store arbitrary data,
+ * access the parent context, and to store the parameter array, respectively.
*/
struct kunit {
void *priv;
+ struct kunit *parent;
+ struct kunit_params params_array;
/* private: internal use only. */
const char *name; /* Read only after initialization! */
- char *log; /* Points at case log after initialization */
+ struct string_stream *log; /* Points at case log after initialization */
struct kunit_try_catch try_catch;
/* param_value is the current parameter value for a test case. */
const void *param_value;
@@ -245,22 +342,33 @@ struct kunit {
* be read after the test case finishes once all threads associated
* with the test case have terminated.
*/
- bool success; /* Read only after test_case finishes! */
spinlock_t lock; /* Guards all mutable test state. */
+ enum kunit_status status; /* Read only after test_case finishes! */
/*
* Because resources is a list that may be updated multiple times (with
* new resources) from any thread associated with a test case, we must
* protect it with some type of lock.
*/
struct list_head resources; /* Protected by lock. */
+
+ char status_comment[KUNIT_STATUS_COMMENT_SIZE];
+ /* Saves the last seen test. Useful to help with faults. */
+ struct kunit_loc last_seen;
};
static inline void kunit_set_failure(struct kunit *test)
{
- WRITE_ONCE(test->success, false);
+ WRITE_ONCE(test->status, KUNIT_FAILURE);
}
-void kunit_init_test(struct kunit *test, const char *name, char *log);
+bool kunit_enabled(void);
+bool kunit_autorun(void);
+const char *kunit_action(void);
+const char *kunit_filter_glob(void);
+char *kunit_filter(void);
+char *kunit_filter_action(void);
+
+void kunit_init_test(struct kunit *test, const char *name, struct string_stream *log);
int kunit_run_tests(struct kunit_suite *suite);
@@ -269,9 +377,26 @@ size_t kunit_suite_num_test_cases(struct kunit_suite *suite);
unsigned int kunit_test_case_num(struct kunit_suite *suite,
struct kunit_case *test_case);
-int __kunit_test_suites_init(struct kunit_suite * const * const suites);
+struct kunit_suite_set
+kunit_filter_suites(const struct kunit_suite_set *suite_set,
+ const char *filter_glob,
+ char *filters,
+ char *filter_action,
+ int *err);
+void kunit_free_suite_set(struct kunit_suite_set suite_set);
+
+int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_suites,
+ bool run_tests);
+
+void __kunit_test_suites_exit(struct kunit_suite **suites, int num_suites);
+
+void kunit_exec_run_tests(struct kunit_suite_set *suite_set, bool builtin);
+void kunit_exec_list_tests(struct kunit_suite_set *suite_set, bool include_attr);
+
+struct kunit_suite_set kunit_merge_suite_sets(struct kunit_suite_set init_suite_set,
+ struct kunit_suite_set suite_set);
-void __kunit_test_suites_exit(struct kunit_suite **suites);
+const void *kunit_array_gen_params(struct kunit *test, const void *prev, char *desc);
#if IS_BUILTIN(CONFIG_KUNIT)
int kunit_run_all_tests(void);
@@ -282,335 +407,250 @@ static inline int kunit_run_all_tests(void)
}
#endif /* IS_BUILTIN(CONFIG_KUNIT) */
-#ifdef MODULE
+#define __kunit_test_suites(unique_array, ...) \
+ static struct kunit_suite *unique_array[] \
+ __aligned(sizeof(struct kunit_suite *)) \
+ __used __section(".kunit_test_suites") = { __VA_ARGS__ }
+
/**
- * kunit_test_suites_for_module() - used to register one or more
- * &struct kunit_suite with KUnit.
+ * kunit_test_suites() - used to register one or more &struct kunit_suite
+ * with KUnit.
*
* @__suites: a statically allocated list of &struct kunit_suite.
*
- * Registers @__suites with the test framework. See &struct kunit_suite for
- * more information.
+ * Registers @suites with the test framework.
+ * This is done by placing the array of struct kunit_suite * in the
+ * .kunit_test_suites ELF section.
*
- * If a test suite is built-in, module_init() gets translated into
- * an initcall which we don't want as the idea is that for builtins
- * the executor will manage execution. So ensure we do not define
- * module_{init|exit} functions for the builtin case when registering
- * suites via kunit_test_suites() below.
- */
-#define kunit_test_suites_for_module(__suites) \
- static int __init kunit_test_suites_init(void) \
- { \
- return __kunit_test_suites_init(__suites); \
- } \
- module_init(kunit_test_suites_init); \
- \
- static void __exit kunit_test_suites_exit(void) \
- { \
- return __kunit_test_suites_exit(__suites); \
- } \
- module_exit(kunit_test_suites_exit)
-#else
-#define kunit_test_suites_for_module(__suites)
-#endif /* MODULE */
+ * When builtin, KUnit tests are all run via the executor at boot, and when
+ * built as a module, they run on module load.
+ *
+ */
+#define kunit_test_suites(__suites...) \
+ __kunit_test_suites(__UNIQUE_ID(array), \
+ ##__suites)
+
+#define kunit_test_suite(suite) kunit_test_suites(&suite)
-#define __kunit_test_suites(unique_array, unique_suites, ...) \
- static struct kunit_suite *unique_array[] = { __VA_ARGS__, NULL }; \
- kunit_test_suites_for_module(unique_array); \
- static struct kunit_suite **unique_suites \
- __used __section(".kunit_test_suites") = unique_array
+#define __kunit_init_test_suites(unique_array, ...) \
+ static struct kunit_suite *unique_array[] \
+ __aligned(sizeof(struct kunit_suite *)) \
+ __used __section(".kunit_init_test_suites") = { __VA_ARGS__ }
/**
- * kunit_test_suites() - used to register one or more &struct kunit_suite
- * with KUnit.
+ * kunit_test_init_section_suites() - used to register one or more &struct
+ * kunit_suite containing init functions or
+ * init data.
*
* @__suites: a statically allocated list of &struct kunit_suite.
*
- * Registers @suites with the test framework. See &struct kunit_suite for
- * more information.
+ * This functions similar to kunit_test_suites() except that it compiles the
+ * list of suites during init phase.
*
- * When builtin, KUnit tests are all run via executor; this is done
- * by placing the array of struct kunit_suite * in the .kunit_test_suites
- * ELF section.
+ * This macro also suffixes the array and suite declarations it makes with
+ * _probe; so that modpost suppresses warnings about referencing init data
+ * for symbols named in this manner.
*
- * An alternative is to build the tests as a module. Because modules do not
- * support multiple initcall()s, we need to initialize an array of suites for a
- * module.
+ * Note: these init tests are not able to be run after boot so there is no
+ * "run" debugfs file generated for these tests.
*
+ * Also, do not mark the suite or test case structs with __initdata because
+ * they will be used after the init phase with debugfs.
*/
-#define kunit_test_suites(__suites...) \
- __kunit_test_suites(__UNIQUE_ID(array), \
- __UNIQUE_ID(suites), \
+#define kunit_test_init_section_suites(__suites...) \
+ __kunit_init_test_suites(CONCATENATE(__UNIQUE_ID(array), _probe), \
##__suites)
-#define kunit_test_suite(suite) kunit_test_suites(&suite)
+#define kunit_test_init_section_suite(suite) \
+ kunit_test_init_section_suites(&suite)
#define kunit_suite_for_each_test_case(suite, test_case) \
for (test_case = suite->test_cases; test_case->run_case; test_case++)
-bool kunit_suite_has_succeeded(struct kunit_suite *suite);
-
-/*
- * Like kunit_alloc_resource() below, but returns the struct kunit_resource
- * object that contains the allocation. This is mostly for testing purposes.
- */
-struct kunit_resource *kunit_alloc_and_get_resource(struct kunit *test,
- kunit_resource_init_t init,
- kunit_resource_free_t free,
- gfp_t internal_gfp,
- void *context);
+enum kunit_status kunit_suite_has_succeeded(struct kunit_suite *suite);
/**
- * kunit_get_resource() - Hold resource for use. Should not need to be used
- * by most users as we automatically get resources
- * retrieved by kunit_find_resource*().
- * @res: resource
- */
-static inline void kunit_get_resource(struct kunit_resource *res)
-{
- kref_get(&res->refcount);
-}
-
-/*
- * Called when refcount reaches zero via kunit_put_resources();
- * should not be called directly.
+ * kunit_kmalloc_array() - Like kmalloc_array() except the allocation is *test managed*.
+ * @test: The test context object.
+ * @n: number of elements.
+ * @size: The size in bytes of the desired memory.
+ * @gfp: flags passed to underlying kmalloc().
+ *
+ * Just like `kmalloc_array(...)`, except the allocation is managed by the test case
+ * and is automatically cleaned up after the test case concludes. See kunit_add_action()
+ * for more information.
+ *
+ * Note that some internal context data is also allocated with GFP_KERNEL,
+ * regardless of the gfp passed in.
*/
-static inline void kunit_release_resource(struct kref *kref)
-{
- struct kunit_resource *res = container_of(kref, struct kunit_resource,
- refcount);
-
- /* If free function is defined, resource was dynamically allocated. */
- if (res->free) {
- res->free(res);
- kfree(res);
- }
-}
+void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t gfp);
/**
- * kunit_put_resource() - When caller is done with retrieved resource,
- * kunit_put_resource() should be called to drop
- * reference count. The resource list maintains
- * a reference count on resources, so if no users
- * are utilizing a resource and it is removed from
- * the resource list, it will be freed via the
- * associated free function (if any). Only
- * needs to be used if we alloc_and_get() or
- * find() resource.
- * @res: resource
+ * kunit_kmalloc() - Like kmalloc() except the allocation is *test managed*.
+ * @test: The test context object.
+ * @size: The size in bytes of the desired memory.
+ * @gfp: flags passed to underlying kmalloc().
+ *
+ * See kmalloc() and kunit_kmalloc_array() for more information.
+ *
+ * Note that some internal context data is also allocated with GFP_KERNEL,
+ * regardless of the gfp passed in.
*/
-static inline void kunit_put_resource(struct kunit_resource *res)
+static inline void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp)
{
- kref_put(&res->refcount, kunit_release_resource);
+ return kunit_kmalloc_array(test, 1, size, gfp);
}
/**
- * kunit_add_resource() - Add a *test managed resource*.
- * @test: The test context object.
- * @init: a user-supplied function to initialize the result (if needed). If
- * none is supplied, the resource data value is simply set to @data.
- * If an init function is supplied, @data is passed to it instead.
- * @free: a user-supplied function to free the resource (if needed).
- * @res: The resource.
- * @data: value to pass to init function or set in resource data field.
+ * kunit_kfree() - Like kfree except for allocations managed by KUnit.
+ * @test: The test case to which the resource belongs.
+ * @ptr: The memory allocation to free.
*/
-int kunit_add_resource(struct kunit *test,
- kunit_resource_init_t init,
- kunit_resource_free_t free,
- struct kunit_resource *res,
- void *data);
+void kunit_kfree(struct kunit *test, const void *ptr);
/**
- * kunit_add_named_resource() - Add a named *test managed resource*.
+ * kunit_kzalloc() - Just like kunit_kmalloc(), but zeroes the allocation.
* @test: The test context object.
- * @init: a user-supplied function to initialize the resource data, if needed.
- * @free: a user-supplied function to free the resource data, if needed.
- * @res: The resource.
- * @name: name to be set for resource.
- * @data: value to pass to init function or set in resource data field.
+ * @size: The size in bytes of the desired memory.
+ * @gfp: flags passed to underlying kmalloc().
+ *
+ * See kzalloc() and kunit_kmalloc_array() for more information.
*/
-int kunit_add_named_resource(struct kunit *test,
- kunit_resource_init_t init,
- kunit_resource_free_t free,
- struct kunit_resource *res,
- const char *name,
- void *data);
-
-/**
- * kunit_alloc_resource() - Allocates a *test managed resource*.
- * @test: The test context object.
- * @init: a user supplied function to initialize the resource.
- * @free: a user supplied function to free the resource.
- * @internal_gfp: gfp to use for internal allocations, if unsure, use GFP_KERNEL
- * @context: for the user to pass in arbitrary data to the init function.
- *
- * Allocates a *test managed resource*, a resource which will automatically be
- * cleaned up at the end of a test case. See &struct kunit_resource for an
- * example.
- *
- * Note: KUnit needs to allocate memory for a kunit_resource object. You must
- * specify an @internal_gfp that is compatible with the use context of your
- * resource.
- */
-static inline void *kunit_alloc_resource(struct kunit *test,
- kunit_resource_init_t init,
- kunit_resource_free_t free,
- gfp_t internal_gfp,
- void *context)
+static inline void *kunit_kzalloc(struct kunit *test, size_t size, gfp_t gfp)
{
- struct kunit_resource *res;
-
- res = kzalloc(sizeof(*res), internal_gfp);
- if (!res)
- return NULL;
-
- if (!kunit_add_resource(test, init, free, res, context))
- return res->data;
-
- return NULL;
+ return kunit_kmalloc(test, size, gfp | __GFP_ZERO);
}
-typedef bool (*kunit_resource_match_t)(struct kunit *test,
- struct kunit_resource *res,
- void *match_data);
-
/**
- * kunit_resource_instance_match() - Match a resource with the same instance.
- * @test: Test case to which the resource belongs.
- * @res: The resource.
- * @match_data: The resource pointer to match against.
+ * kunit_kcalloc() - Just like kunit_kmalloc_array(), but zeroes the allocation.
+ * @test: The test context object.
+ * @n: number of elements.
+ * @size: The size in bytes of the desired memory.
+ * @gfp: flags passed to underlying kmalloc().
*
- * An instance of kunit_resource_match_t that matches a resource whose
- * allocation matches @match_data.
+ * See kcalloc() and kunit_kmalloc_array() for more information.
*/
-static inline bool kunit_resource_instance_match(struct kunit *test,
- struct kunit_resource *res,
- void *match_data)
+static inline void *kunit_kcalloc(struct kunit *test, size_t n, size_t size, gfp_t gfp)
{
- return res->data == match_data;
+ return kunit_kmalloc_array(test, n, size, gfp | __GFP_ZERO);
}
+
/**
- * kunit_resource_name_match() - Match a resource with the same name.
- * @test: Test case to which the resource belongs.
- * @res: The resource.
- * @match_name: The name to match against.
+ * kunit_kfree_const() - conditionally free test managed memory
+ * @test: The test context object.
+ * @x: pointer to the memory
+ *
+ * Calls kunit_kfree() only if @x is not in .rodata section.
+ * See kunit_kstrdup_const() for more information.
*/
-static inline bool kunit_resource_name_match(struct kunit *test,
- struct kunit_resource *res,
- void *match_name)
-{
- return res->name && strcmp(res->name, match_name) == 0;
-}
+void kunit_kfree_const(struct kunit *test, const void *x);
/**
- * kunit_find_resource() - Find a resource using match function/data.
- * @test: Test case to which the resource belongs.
- * @match: match function to be applied to resources/match data.
- * @match_data: data to be used in matching.
+ * kunit_kstrdup() - Duplicates a string into a test managed allocation.
+ *
+ * @test: The test context object.
+ * @str: The NULL-terminated string to duplicate.
+ * @gfp: flags passed to underlying kmalloc().
+ *
+ * See kstrdup() and kunit_kmalloc_array() for more information.
*/
-static inline struct kunit_resource *
-kunit_find_resource(struct kunit *test,
- kunit_resource_match_t match,
- void *match_data)
+static inline char *kunit_kstrdup(struct kunit *test, const char *str, gfp_t gfp)
{
- struct kunit_resource *res, *found = NULL;
-
- spin_lock(&test->lock);
+ size_t len;
+ char *buf;
- list_for_each_entry_reverse(res, &test->resources, node) {
- if (match(test, res, (void *)match_data)) {
- found = res;
- kunit_get_resource(found);
- break;
- }
- }
-
- spin_unlock(&test->lock);
+ if (!str)
+ return NULL;
- return found;
+ len = strlen(str) + 1;
+ buf = kunit_kmalloc(test, len, gfp);
+ if (buf)
+ memcpy(buf, str, len);
+ return buf;
}
/**
- * kunit_find_named_resource() - Find a resource using match name.
- * @test: Test case to which the resource belongs.
- * @name: match name.
+ * kunit_kstrdup_const() - Conditionally duplicates a string into a test managed allocation.
+ *
+ * @test: The test context object.
+ * @str: The NULL-terminated string to duplicate.
+ * @gfp: flags passed to underlying kmalloc().
+ *
+ * Calls kunit_kstrdup() only if @str is not in the rodata section. Must be freed with
+ * kunit_kfree_const() -- not kunit_kfree().
+ * See kstrdup_const() and kunit_kmalloc_array() for more information.
*/
-static inline struct kunit_resource *
-kunit_find_named_resource(struct kunit *test,
- const char *name)
-{
- return kunit_find_resource(test, kunit_resource_name_match,
- (void *)name);
-}
+const char *kunit_kstrdup_const(struct kunit *test, const char *str, gfp_t gfp);
/**
- * kunit_destroy_resource() - Find a kunit_resource and destroy it.
- * @test: Test case to which the resource belongs.
- * @match: Match function. Returns whether a given resource matches @match_data.
- * @match_data: Data passed into @match.
+ * kunit_attach_mm() - Create and attach a new mm if it doesn't already exist.
*
- * RETURNS:
- * 0 if kunit_resource is found and freed, -ENOENT if not found.
+ * Allocates a &struct mm_struct and attaches it to @current. In most cases, call
+ * kunit_vm_mmap() without calling kunit_attach_mm() directly. Only necessary when
+ * code under test accesses the mm before executing the mmap (e.g., to perform
+ * additional initialization beforehand).
+ *
+ * Return: 0 on success, -errno on failure.
*/
-int kunit_destroy_resource(struct kunit *test,
- kunit_resource_match_t match,
- void *match_data);
-
-static inline int kunit_destroy_named_resource(struct kunit *test,
- const char *name)
-{
- return kunit_destroy_resource(test, kunit_resource_name_match,
- (void *)name);
-}
+int kunit_attach_mm(void);
/**
- * kunit_remove_resource() - remove resource from resource list associated with
- * test.
+ * kunit_vm_mmap() - Allocate KUnit-tracked vm_mmap() area
* @test: The test context object.
- * @res: The resource to be removed.
- *
- * Note that the resource will not be immediately freed since it is likely
- * the caller has a reference to it via alloc_and_get() or find();
- * in this case a final call to kunit_put_resource() is required.
+ * @file: struct file pointer to map from, if any
+ * @addr: desired address, if any
+ * @len: how many bytes to allocate
+ * @prot: mmap PROT_* bits
+ * @flag: mmap flags
+ * @offset: offset into @file to start mapping from.
+ *
+ * See vm_mmap() for more information.
*/
-void kunit_remove_resource(struct kunit *test, struct kunit_resource *res);
+unsigned long kunit_vm_mmap(struct kunit *test, struct file *file,
+ unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flag,
+ unsigned long offset);
+
+void kunit_cleanup(struct kunit *test);
+
+void __printf(2, 3) kunit_log_append(struct string_stream *log, const char *fmt, ...);
/**
- * kunit_kmalloc() - Like kmalloc() except the allocation is *test managed*.
+ * kunit_mark_skipped() - Marks @test as skipped
+ *
* @test: The test context object.
- * @size: The size in bytes of the desired memory.
- * @gfp: flags passed to underlying kmalloc().
+ * @fmt: A printk() style format string.
*
- * Just like `kmalloc(...)`, except the allocation is managed by the test case
- * and is automatically cleaned up after the test case concludes. See &struct
- * kunit_resource for more information.
- */
-void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp);
-
-/**
- * kunit_kfree() - Like kfree except for allocations managed by KUnit.
- * @test: The test case to which the resource belongs.
- * @ptr: The memory allocation to free.
+ * Marks the test as skipped. @fmt is given output as the test status
+ * comment, typically the reason the test was skipped.
+ *
+ * Test execution continues after kunit_mark_skipped() is called.
*/
-void kunit_kfree(struct kunit *test, const void *ptr);
+#define kunit_mark_skipped(test, fmt, ...) \
+ do { \
+ WRITE_ONCE((test)->status, KUNIT_SKIPPED); \
+ scnprintf((test)->status_comment, \
+ KUNIT_STATUS_COMMENT_SIZE, \
+ fmt, ##__VA_ARGS__); \
+ } while (0)
/**
- * kunit_kzalloc() - Just like kunit_kmalloc(), but zeroes the allocation.
+ * kunit_skip() - Marks @test as skipped
+ *
* @test: The test context object.
- * @size: The size in bytes of the desired memory.
- * @gfp: flags passed to underlying kmalloc().
+ * @fmt: A printk() style format string.
+ *
+ * Skips the test. @fmt is given output as the test status
+ * comment, typically the reason the test was skipped.
*
- * See kzalloc() and kunit_kmalloc() for more information.
+ * Test execution is halted after kunit_skip() is called.
*/
-static inline void *kunit_kzalloc(struct kunit *test, size_t size, gfp_t gfp)
-{
- return kunit_kmalloc(test, size, gfp | __GFP_ZERO);
-}
-
-void kunit_cleanup(struct kunit *test);
-
-void kunit_log_append(char *log, const char *fmt, ...);
+#define kunit_skip(test, fmt, ...) \
+ do { \
+ kunit_mark_skipped((test), fmt, ##__VA_ARGS__); \
+ kunit_try_catch_throw(&((test)->try_catch)); \
+ } while (0)
/*
* printk and log to per-test or per-suite log buffer. Logging only done
@@ -619,7 +659,7 @@ void kunit_log_append(char *log, const char *fmt, ...);
#define kunit_log(lvl, test_or_suite, fmt, ...) \
do { \
printk(lvl fmt, ##__VA_ARGS__); \
- kunit_log_append((test_or_suite)->log, fmt "\n", \
+ kunit_log_append((test_or_suite)->log, fmt, \
##__VA_ARGS__); \
} while (0)
@@ -661,6 +701,15 @@ void kunit_log_append(char *log, const char *fmt, ...);
#define kunit_err(test, fmt, ...) \
kunit_printk(KERN_ERR, test, fmt, ##__VA_ARGS__)
+/*
+ * Must be called at the beginning of each KUNIT_*_ASSERTION().
+ * Cf. KUNIT_CURRENT_LOC.
+ */
+#define _KUNIT_SAVE_LOC(test) do { \
+ WRITE_ONCE(test->last_seen.file, __FILE__); \
+ WRITE_ONCE(test->last_seen.line, __LINE__); \
+} while (0)
+
/**
* KUNIT_SUCCEED() - A no-op expectation. Only exists for code clarity.
* @test: The test context object.
@@ -669,30 +718,42 @@ void kunit_log_append(char *log, const char *fmt, ...);
* words, it does nothing and only exists for code clarity. See
* KUNIT_EXPECT_TRUE() for more information.
*/
-#define KUNIT_SUCCEED(test) do {} while (0)
-
-void kunit_do_assertion(struct kunit *test,
- struct kunit_assert *assert,
- bool pass,
- const char *fmt, ...);
-
-#define KUNIT_ASSERTION(test, pass, assert_class, INITIALIZER, fmt, ...) do { \
- struct assert_class __assertion = INITIALIZER; \
- kunit_do_assertion(test, \
- &__assertion.assert, \
- pass, \
- fmt, \
- ##__VA_ARGS__); \
+#define KUNIT_SUCCEED(test) _KUNIT_SAVE_LOC(test)
+
+void __noreturn __kunit_abort(struct kunit *test);
+
+void __printf(6, 7) __kunit_do_failed_assertion(struct kunit *test,
+ const struct kunit_loc *loc,
+ enum kunit_assert_type type,
+ const struct kunit_assert *assert,
+ assert_format_t assert_format,
+ const char *fmt, ...);
+
+#define _KUNIT_FAILED(test, assert_type, assert_class, assert_format, INITIALIZER, fmt, ...) do { \
+ static const struct kunit_loc __loc = KUNIT_CURRENT_LOC; \
+ const struct assert_class __assertion = INITIALIZER; \
+ __kunit_do_failed_assertion(test, \
+ &__loc, \
+ assert_type, \
+ &__assertion.assert, \
+ assert_format, \
+ fmt, \
+ ##__VA_ARGS__); \
+ if (assert_type == KUNIT_ASSERTION) \
+ __kunit_abort(test); \
} while (0)
-#define KUNIT_FAIL_ASSERTION(test, assert_type, fmt, ...) \
- KUNIT_ASSERTION(test, \
- false, \
- kunit_fail_assert, \
- KUNIT_INIT_FAIL_ASSERT_STRUCT(test, assert_type), \
- fmt, \
- ##__VA_ARGS__)
+#define KUNIT_FAIL_ASSERTION(test, assert_type, fmt, ...) do { \
+ _KUNIT_SAVE_LOC(test); \
+ _KUNIT_FAILED(test, \
+ assert_type, \
+ kunit_fail_assert, \
+ kunit_fail_assert_format, \
+ {}, \
+ fmt, \
+ ##__VA_ARGS__); \
+} while (0)
/**
* KUNIT_FAIL() - Always causes a test to fail when evaluated.
@@ -711,21 +772,29 @@ void kunit_do_assertion(struct kunit *test,
fmt, \
##__VA_ARGS__)
+/* Helper to safely pass around an initializer list to other macros. */
+#define KUNIT_INIT_ASSERT(initializers...) { initializers }
+
#define KUNIT_UNARY_ASSERTION(test, \
assert_type, \
- condition, \
- expected_true, \
+ condition_, \
+ expected_true_, \
fmt, \
...) \
- KUNIT_ASSERTION(test, \
- !!(condition) == !!expected_true, \
- kunit_unary_assert, \
- KUNIT_INIT_UNARY_ASSERT_STRUCT(test, \
- assert_type, \
- #condition, \
- expected_true), \
- fmt, \
- ##__VA_ARGS__)
+do { \
+ _KUNIT_SAVE_LOC(test); \
+ if (likely(!!(condition_) == !!expected_true_)) \
+ break; \
+ \
+ _KUNIT_FAILED(test, \
+ assert_type, \
+ kunit_unary_assert, \
+ kunit_unary_assert_format, \
+ KUNIT_INIT_ASSERT(.condition = #condition_, \
+ .expected_true = expected_true_), \
+ fmt, \
+ ##__VA_ARGS__); \
+} while (0)
#define KUNIT_TRUE_MSG_ASSERTION(test, assert_type, condition, fmt, ...) \
KUNIT_UNARY_ASSERTION(test, \
@@ -735,9 +804,6 @@ void kunit_do_assertion(struct kunit *test,
fmt, \
##__VA_ARGS__)
-#define KUNIT_TRUE_ASSERTION(test, assert_type, condition) \
- KUNIT_TRUE_MSG_ASSERTION(test, assert_type, condition, NULL)
-
#define KUNIT_FALSE_MSG_ASSERTION(test, assert_type, condition, fmt, ...) \
KUNIT_UNARY_ASSERTION(test, \
assert_type, \
@@ -746,9 +812,6 @@ void kunit_do_assertion(struct kunit *test,
fmt, \
##__VA_ARGS__)
-#define KUNIT_FALSE_ASSERTION(test, assert_type, condition) \
- KUNIT_FALSE_MSG_ASSERTION(test, assert_type, condition, NULL)
-
/*
* A factory macro for defining the assertions and expectations for the basic
* comparisons defined for the built in types.
@@ -765,7 +828,7 @@ void kunit_do_assertion(struct kunit *test,
*/
#define KUNIT_BASE_BINARY_ASSERTION(test, \
assert_class, \
- ASSERT_CLASS_INIT, \
+ format_func, \
assert_type, \
left, \
op, \
@@ -773,354 +836,59 @@ void kunit_do_assertion(struct kunit *test,
fmt, \
...) \
do { \
- typeof(left) __left = (left); \
- typeof(right) __right = (right); \
- ((void)__typecheck(__left, __right)); \
+ const typeof(left) __left = (left); \
+ const typeof(right) __right = (right); \
+ static const struct kunit_binary_assert_text __text = { \
+ .operation = #op, \
+ .left_text = #left, \
+ .right_text = #right, \
+ }; \
+ \
+ _KUNIT_SAVE_LOC(test); \
+ if (likely(__left op __right)) \
+ break; \
\
- KUNIT_ASSERTION(test, \
- __left op __right, \
- assert_class, \
- ASSERT_CLASS_INIT(test, \
- assert_type, \
- #op, \
- #left, \
- __left, \
- #right, \
- __right), \
- fmt, \
- ##__VA_ARGS__); \
+ _KUNIT_FAILED(test, \
+ assert_type, \
+ assert_class, \
+ format_func, \
+ KUNIT_INIT_ASSERT(.text = &__text, \
+ .left_value = __left, \
+ .right_value = __right), \
+ fmt, \
+ ##__VA_ARGS__); \
} while (0)
-#define KUNIT_BASE_EQ_MSG_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BASE_BINARY_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
- assert_type, \
- left, ==, right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BASE_NE_MSG_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BASE_BINARY_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
- assert_type, \
- left, !=, right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BASE_LT_MSG_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BASE_BINARY_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
- assert_type, \
- left, <, right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BASE_LE_MSG_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BASE_BINARY_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
- assert_type, \
- left, <=, right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BASE_GT_MSG_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
- assert_type, \
- left, \
- right, \
- fmt, \
+#define KUNIT_BINARY_INT_ASSERTION(test, \
+ assert_type, \
+ left, \
+ op, \
+ right, \
+ fmt, \
...) \
KUNIT_BASE_BINARY_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
+ kunit_binary_assert, \
+ kunit_binary_assert_format, \
assert_type, \
- left, >, right, \
+ left, op, right, \
fmt, \
##__VA_ARGS__)
-#define KUNIT_BASE_GE_MSG_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
- assert_type, \
- left, \
- right, \
- fmt, \
+#define KUNIT_BINARY_PTR_ASSERTION(test, \
+ assert_type, \
+ left, \
+ op, \
+ right, \
+ fmt, \
...) \
KUNIT_BASE_BINARY_ASSERTION(test, \
- assert_class, \
- ASSERT_CLASS_INIT, \
- assert_type, \
- left, >=, right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_EQ_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
- KUNIT_BASE_EQ_MSG_ASSERTION(test, \
- kunit_binary_assert, \
- KUNIT_INIT_BINARY_ASSERT_STRUCT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_EQ_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_EQ_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_PTR_EQ_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BASE_EQ_MSG_ASSERTION(test, \
- kunit_binary_ptr_assert, \
- KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_PTR_EQ_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_PTR_EQ_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_NE_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
- KUNIT_BASE_NE_MSG_ASSERTION(test, \
- kunit_binary_assert, \
- KUNIT_INIT_BINARY_ASSERT_STRUCT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_NE_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_NE_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_PTR_NE_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BASE_NE_MSG_ASSERTION(test, \
- kunit_binary_ptr_assert, \
- KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_PTR_NE_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_PTR_NE_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_LT_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
- KUNIT_BASE_LT_MSG_ASSERTION(test, \
- kunit_binary_assert, \
- KUNIT_INIT_BINARY_ASSERT_STRUCT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_LT_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_LT_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_PTR_LT_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BASE_LT_MSG_ASSERTION(test, \
- kunit_binary_ptr_assert, \
- KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_PTR_LT_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_PTR_LT_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_LE_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
- KUNIT_BASE_LE_MSG_ASSERTION(test, \
- kunit_binary_assert, \
- KUNIT_INIT_BINARY_ASSERT_STRUCT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_LE_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_LE_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_PTR_LE_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BASE_LE_MSG_ASSERTION(test, \
- kunit_binary_ptr_assert, \
- KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_PTR_LE_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_PTR_LE_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_GT_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
- KUNIT_BASE_GT_MSG_ASSERTION(test, \
- kunit_binary_assert, \
- KUNIT_INIT_BINARY_ASSERT_STRUCT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_GT_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_GT_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_PTR_GT_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BASE_GT_MSG_ASSERTION(test, \
kunit_binary_ptr_assert, \
- KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
+ kunit_binary_ptr_assert_format, \
assert_type, \
- left, \
- right, \
+ left, op, right, \
fmt, \
##__VA_ARGS__)
-#define KUNIT_BINARY_PTR_GT_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_PTR_GT_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_GE_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
- KUNIT_BASE_GE_MSG_ASSERTION(test, \
- kunit_binary_assert, \
- KUNIT_INIT_BINARY_ASSERT_STRUCT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_GE_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_GE_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_PTR_GE_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BASE_GE_MSG_ASSERTION(test, \
- kunit_binary_ptr_assert, \
- KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_PTR_GE_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_PTR_GE_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
#define KUNIT_BINARY_STR_ASSERTION(test, \
assert_type, \
left, \
@@ -1129,60 +897,64 @@ do { \
fmt, \
...) \
do { \
- typeof(left) __left = (left); \
- typeof(right) __right = (right); \
+ const char *__left = (left); \
+ const char *__right = (right); \
+ static const struct kunit_binary_assert_text __text = { \
+ .operation = #op, \
+ .left_text = #left, \
+ .right_text = #right, \
+ }; \
+ \
+ _KUNIT_SAVE_LOC(test); \
+ if (likely((__left) && (__right) && (strcmp(__left, __right) op 0))) \
+ break; \
\
- KUNIT_ASSERTION(test, \
- strcmp(__left, __right) op 0, \
- kunit_binary_str_assert, \
- KUNIT_INIT_BINARY_STR_ASSERT_STRUCT(test, \
- assert_type, \
- #op, \
- #left, \
- __left, \
- #right, \
- __right), \
- fmt, \
- ##__VA_ARGS__); \
+ \
+ _KUNIT_FAILED(test, \
+ assert_type, \
+ kunit_binary_str_assert, \
+ kunit_binary_str_assert_format, \
+ KUNIT_INIT_ASSERT(.text = &__text, \
+ .left_value = __left, \
+ .right_value = __right), \
+ fmt, \
+ ##__VA_ARGS__); \
} while (0)
-#define KUNIT_BINARY_STR_EQ_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BINARY_STR_ASSERTION(test, \
- assert_type, \
- left, ==, right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_STR_EQ_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_STR_EQ_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
-
-#define KUNIT_BINARY_STR_NE_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- fmt, \
- ...) \
- KUNIT_BINARY_STR_ASSERTION(test, \
- assert_type, \
- left, !=, right, \
- fmt, \
- ##__VA_ARGS__)
-
-#define KUNIT_BINARY_STR_NE_ASSERTION(test, assert_type, left, right) \
- KUNIT_BINARY_STR_NE_MSG_ASSERTION(test, \
- assert_type, \
- left, \
- right, \
- NULL)
+#define KUNIT_MEM_ASSERTION(test, \
+ assert_type, \
+ left, \
+ op, \
+ right, \
+ size_, \
+ fmt, \
+ ...) \
+do { \
+ const void *__left = (left); \
+ const void *__right = (right); \
+ const size_t __size = (size_); \
+ static const struct kunit_binary_assert_text __text = { \
+ .operation = #op, \
+ .left_text = #left, \
+ .right_text = #right, \
+ }; \
+ \
+ _KUNIT_SAVE_LOC(test); \
+ if (likely(__left && __right)) \
+ if (likely(memcmp(__left, __right, __size) op 0)) \
+ break; \
+ \
+ _KUNIT_FAILED(test, \
+ assert_type, \
+ kunit_mem_assert, \
+ kunit_mem_assert_format, \
+ KUNIT_INIT_ASSERT(.text = &__text, \
+ .left_value = __left, \
+ .right_value = __right, \
+ .size = __size), \
+ fmt, \
+ ##__VA_ARGS__); \
+} while (0)
#define KUNIT_PTR_NOT_ERR_OR_NULL_MSG_ASSERTION(test, \
assert_type, \
@@ -1190,25 +962,21 @@ do { \
fmt, \
...) \
do { \
- typeof(ptr) __ptr = (ptr); \
+ const typeof(ptr) __ptr = (ptr); \
+ \
+ _KUNIT_SAVE_LOC(test); \
+ if (!IS_ERR_OR_NULL(__ptr)) \
+ break; \
\
- KUNIT_ASSERTION(test, \
- !IS_ERR_OR_NULL(__ptr), \
- kunit_ptr_not_err_assert, \
- KUNIT_INIT_PTR_NOT_ERR_STRUCT(test, \
- assert_type, \
- #ptr, \
- __ptr), \
- fmt, \
- ##__VA_ARGS__); \
+ _KUNIT_FAILED(test, \
+ assert_type, \
+ kunit_ptr_not_err_assert, \
+ kunit_ptr_not_err_assert_format, \
+ KUNIT_INIT_ASSERT(.text = #ptr, .value = __ptr), \
+ fmt, \
+ ##__VA_ARGS__); \
} while (0)
-#define KUNIT_PTR_NOT_ERR_OR_NULL_ASSERTION(test, assert_type, ptr) \
- KUNIT_PTR_NOT_ERR_OR_NULL_MSG_ASSERTION(test, \
- assert_type, \
- ptr, \
- NULL)
-
/**
* KUNIT_EXPECT_TRUE() - Causes a test failure when the expression is not true.
* @test: The test context object.
@@ -1221,7 +989,7 @@ do { \
* *expectation failure*.
*/
#define KUNIT_EXPECT_TRUE(test, condition) \
- KUNIT_TRUE_ASSERTION(test, KUNIT_EXPECTATION, condition)
+ KUNIT_EXPECT_TRUE_MSG(test, condition, NULL)
#define KUNIT_EXPECT_TRUE_MSG(test, condition, fmt, ...) \
KUNIT_TRUE_MSG_ASSERTION(test, \
@@ -1240,7 +1008,7 @@ do { \
* KUNIT_EXPECT_TRUE() for more information.
*/
#define KUNIT_EXPECT_FALSE(test, condition) \
- KUNIT_FALSE_ASSERTION(test, KUNIT_EXPECTATION, condition)
+ KUNIT_EXPECT_FALSE_MSG(test, condition, NULL)
#define KUNIT_EXPECT_FALSE_MSG(test, condition, fmt, ...) \
KUNIT_FALSE_MSG_ASSERTION(test, \
@@ -1261,15 +1029,14 @@ do { \
* more information.
*/
#define KUNIT_EXPECT_EQ(test, left, right) \
- KUNIT_BINARY_EQ_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+ KUNIT_EXPECT_EQ_MSG(test, left, right, NULL)
#define KUNIT_EXPECT_EQ_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_EQ_MSG_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, ==, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_EXPECT_PTR_EQ() - Expects that pointers @left and @right are equal.
@@ -1283,18 +1050,14 @@ do { \
* more information.
*/
#define KUNIT_EXPECT_PTR_EQ(test, left, right) \
- KUNIT_BINARY_PTR_EQ_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right)
+ KUNIT_EXPECT_PTR_EQ_MSG(test, left, right, NULL)
#define KUNIT_EXPECT_PTR_EQ_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_PTR_EQ_MSG_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_PTR_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, ==, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_EXPECT_NE() - An expectation that @left and @right are not equal.
@@ -1308,15 +1071,14 @@ do { \
* more information.
*/
#define KUNIT_EXPECT_NE(test, left, right) \
- KUNIT_BINARY_NE_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+ KUNIT_EXPECT_NE_MSG(test, left, right, NULL)
#define KUNIT_EXPECT_NE_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_NE_MSG_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, !=, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_EXPECT_PTR_NE() - Expects that pointers @left and @right are not equal.
@@ -1330,18 +1092,14 @@ do { \
* more information.
*/
#define KUNIT_EXPECT_PTR_NE(test, left, right) \
- KUNIT_BINARY_PTR_NE_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right)
+ KUNIT_EXPECT_PTR_NE_MSG(test, left, right, NULL)
#define KUNIT_EXPECT_PTR_NE_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_PTR_NE_MSG_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_PTR_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, !=, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_EXPECT_LT() - An expectation that @left is less than @right.
@@ -1355,15 +1113,14 @@ do { \
* more information.
*/
#define KUNIT_EXPECT_LT(test, left, right) \
- KUNIT_BINARY_LT_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+ KUNIT_EXPECT_LT_MSG(test, left, right, NULL)
#define KUNIT_EXPECT_LT_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_LT_MSG_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, <, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_EXPECT_LE() - Expects that @left is less than or equal to @right.
@@ -1377,15 +1134,14 @@ do { \
* more information.
*/
#define KUNIT_EXPECT_LE(test, left, right) \
- KUNIT_BINARY_LE_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+ KUNIT_EXPECT_LE_MSG(test, left, right, NULL)
#define KUNIT_EXPECT_LE_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_LE_MSG_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, <=, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_EXPECT_GT() - An expectation that @left is greater than @right.
@@ -1399,15 +1155,14 @@ do { \
* more information.
*/
#define KUNIT_EXPECT_GT(test, left, right) \
- KUNIT_BINARY_GT_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+ KUNIT_EXPECT_GT_MSG(test, left, right, NULL)
#define KUNIT_EXPECT_GT_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_GT_MSG_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, >, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_EXPECT_GE() - Expects that @left is greater than or equal to @right.
@@ -1421,15 +1176,14 @@ do { \
* more information.
*/
#define KUNIT_EXPECT_GE(test, left, right) \
- KUNIT_BINARY_GE_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+ KUNIT_EXPECT_GE_MSG(test, left, right, NULL)
#define KUNIT_EXPECT_GE_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_GE_MSG_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, >=, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_EXPECT_STREQ() - Expects that strings @left and @right are equal.
@@ -1443,15 +1197,14 @@ do { \
* for more information.
*/
#define KUNIT_EXPECT_STREQ(test, left, right) \
- KUNIT_BINARY_STR_EQ_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+ KUNIT_EXPECT_STREQ_MSG(test, left, right, NULL)
#define KUNIT_EXPECT_STREQ_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_STR_EQ_MSG_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_STR_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, ==, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_EXPECT_STRNEQ() - Expects that strings @left and @right are not equal.
@@ -1465,15 +1218,110 @@ do { \
* for more information.
*/
#define KUNIT_EXPECT_STRNEQ(test, left, right) \
- KUNIT_BINARY_STR_NE_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+ KUNIT_EXPECT_STRNEQ_MSG(test, left, right, NULL)
#define KUNIT_EXPECT_STRNEQ_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_STR_NE_MSG_ASSERTION(test, \
- KUNIT_EXPECTATION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_STR_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, !=, right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_MEMEQ() - Expects that the first @size bytes of @left and @right are equal.
+ * @test: The test context object.
+ * @left: An arbitrary expression that evaluates to the specified size.
+ * @right: An arbitrary expression that evaluates to the specified size.
+ * @size: Number of bytes compared.
+ *
+ * Sets an expectation that the values that @left and @right evaluate to are
+ * equal. This is semantically equivalent to
+ * KUNIT_EXPECT_TRUE(@test, !memcmp((@left), (@right), (@size))). See
+ * KUNIT_EXPECT_TRUE() for more information.
+ *
+ * Although this expectation works for any memory block, it is not recommended
+ * for comparing more structured data, such as structs. This expectation is
+ * recommended for comparing, for example, data arrays.
+ */
+#define KUNIT_EXPECT_MEMEQ(test, left, right, size) \
+ KUNIT_EXPECT_MEMEQ_MSG(test, left, right, size, NULL)
+
+#define KUNIT_EXPECT_MEMEQ_MSG(test, left, right, size, fmt, ...) \
+ KUNIT_MEM_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, ==, right, \
+ size, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_MEMNEQ() - Expects that the first @size bytes of @left and @right are not equal.
+ * @test: The test context object.
+ * @left: An arbitrary expression that evaluates to the specified size.
+ * @right: An arbitrary expression that evaluates to the specified size.
+ * @size: Number of bytes compared.
+ *
+ * Sets an expectation that the values that @left and @right evaluate to are
+ * not equal. This is semantically equivalent to
+ * KUNIT_EXPECT_TRUE(@test, memcmp((@left), (@right), (@size))). See
+ * KUNIT_EXPECT_TRUE() for more information.
+ *
+ * Although this expectation works for any memory block, it is not recommended
+ * for comparing more structured data, such as structs. This expectation is
+ * recommended for comparing, for example, data arrays.
+ */
+#define KUNIT_EXPECT_MEMNEQ(test, left, right, size) \
+ KUNIT_EXPECT_MEMNEQ_MSG(test, left, right, size, NULL)
+
+#define KUNIT_EXPECT_MEMNEQ_MSG(test, left, right, size, fmt, ...) \
+ KUNIT_MEM_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, !=, right, \
+ size, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_NULL() - Expects that @ptr is null.
+ * @test: The test context object.
+ * @ptr: an arbitrary pointer.
+ *
+ * Sets an expectation that the value that @ptr evaluates to is null. This is
+ * semantically equivalent to KUNIT_EXPECT_PTR_EQ(@test, ptr, NULL).
+ * See KUNIT_EXPECT_TRUE() for more information.
+ */
+#define KUNIT_EXPECT_NULL(test, ptr) \
+ KUNIT_EXPECT_NULL_MSG(test, \
+ ptr, \
+ NULL)
+
+#define KUNIT_EXPECT_NULL_MSG(test, ptr, fmt, ...) \
+ KUNIT_BINARY_PTR_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ ptr, ==, NULL, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_NOT_NULL() - Expects that @ptr is not null.
+ * @test: The test context object.
+ * @ptr: an arbitrary pointer.
+ *
+ * Sets an expectation that the value that @ptr evaluates to is not null. This
+ * is semantically equivalent to KUNIT_EXPECT_PTR_NE(@test, ptr, NULL).
+ * See KUNIT_EXPECT_TRUE() for more information.
+ */
+#define KUNIT_EXPECT_NOT_NULL(test, ptr) \
+ KUNIT_EXPECT_NOT_NULL_MSG(test, \
+ ptr, \
+ NULL)
+
+#define KUNIT_EXPECT_NOT_NULL_MSG(test, ptr, fmt, ...) \
+ KUNIT_BINARY_PTR_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ ptr, !=, NULL, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_EXPECT_NOT_ERR_OR_NULL() - Expects that @ptr is not null and not err.
@@ -1486,7 +1334,7 @@ do { \
* more information.
*/
#define KUNIT_EXPECT_NOT_ERR_OR_NULL(test, ptr) \
- KUNIT_PTR_NOT_ERR_OR_NULL_ASSERTION(test, KUNIT_EXPECTATION, ptr)
+ KUNIT_EXPECT_NOT_ERR_OR_NULL_MSG(test, ptr, NULL)
#define KUNIT_EXPECT_NOT_ERR_OR_NULL_MSG(test, ptr, fmt, ...) \
KUNIT_PTR_NOT_ERR_OR_NULL_MSG_ASSERTION(test, \
@@ -1495,7 +1343,18 @@ do { \
fmt, \
##__VA_ARGS__)
-#define KUNIT_ASSERT_FAILURE(test, fmt, ...) \
+/**
+ * KUNIT_FAIL_AND_ABORT() - Always causes a test to fail and abort when evaluated.
+ * @test: The test context object.
+ * @fmt: an informational message to be printed when the assertion is made.
+ * @...: string format arguments.
+ *
+ * The opposite of KUNIT_SUCCEED(), it is an assertion that always fails. In
+ * other words, it always results in a failed assertion, and consequently
+ * always causes the test case to fail and abort when evaluated.
+ * See KUNIT_ASSERT_TRUE() for more information.
+ */
+#define KUNIT_FAIL_AND_ABORT(test, fmt, ...) \
KUNIT_FAIL_ASSERTION(test, KUNIT_ASSERTION, fmt, ##__VA_ARGS__)
/**
@@ -1510,7 +1369,7 @@ do { \
* this is otherwise known as an *assertion failure*.
*/
#define KUNIT_ASSERT_TRUE(test, condition) \
- KUNIT_TRUE_ASSERTION(test, KUNIT_ASSERTION, condition)
+ KUNIT_ASSERT_TRUE_MSG(test, condition, NULL)
#define KUNIT_ASSERT_TRUE_MSG(test, condition, fmt, ...) \
KUNIT_TRUE_MSG_ASSERTION(test, \
@@ -1529,7 +1388,7 @@ do { \
* (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
*/
#define KUNIT_ASSERT_FALSE(test, condition) \
- KUNIT_FALSE_ASSERTION(test, KUNIT_ASSERTION, condition)
+ KUNIT_ASSERT_FALSE_MSG(test, condition, NULL)
#define KUNIT_ASSERT_FALSE_MSG(test, condition, fmt, ...) \
KUNIT_FALSE_MSG_ASSERTION(test, \
@@ -1549,15 +1408,14 @@ do { \
* failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
*/
#define KUNIT_ASSERT_EQ(test, left, right) \
- KUNIT_BINARY_EQ_ASSERTION(test, KUNIT_ASSERTION, left, right)
+ KUNIT_ASSERT_EQ_MSG(test, left, right, NULL)
#define KUNIT_ASSERT_EQ_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_EQ_MSG_ASSERTION(test, \
- KUNIT_ASSERTION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, ==, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_ASSERT_PTR_EQ() - Asserts that pointers @left and @right are equal.
@@ -1570,15 +1428,14 @@ do { \
* failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
*/
#define KUNIT_ASSERT_PTR_EQ(test, left, right) \
- KUNIT_BINARY_PTR_EQ_ASSERTION(test, KUNIT_ASSERTION, left, right)
+ KUNIT_ASSERT_PTR_EQ_MSG(test, left, right, NULL)
#define KUNIT_ASSERT_PTR_EQ_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_PTR_EQ_MSG_ASSERTION(test, \
- KUNIT_ASSERTION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_PTR_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, ==, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_ASSERT_NE() - An assertion that @left and @right are not equal.
@@ -1591,15 +1448,14 @@ do { \
* failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
*/
#define KUNIT_ASSERT_NE(test, left, right) \
- KUNIT_BINARY_NE_ASSERTION(test, KUNIT_ASSERTION, left, right)
+ KUNIT_ASSERT_NE_MSG(test, left, right, NULL)
#define KUNIT_ASSERT_NE_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_NE_MSG_ASSERTION(test, \
- KUNIT_ASSERTION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, !=, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_ASSERT_PTR_NE() - Asserts that pointers @left and @right are not equal.
@@ -1613,15 +1469,14 @@ do { \
* failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
*/
#define KUNIT_ASSERT_PTR_NE(test, left, right) \
- KUNIT_BINARY_PTR_NE_ASSERTION(test, KUNIT_ASSERTION, left, right)
+ KUNIT_ASSERT_PTR_NE_MSG(test, left, right, NULL)
#define KUNIT_ASSERT_PTR_NE_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_PTR_NE_MSG_ASSERTION(test, \
- KUNIT_ASSERTION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_PTR_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, !=, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_ASSERT_LT() - An assertion that @left is less than @right.
* @test: The test context object.
@@ -1634,15 +1489,14 @@ do { \
* is not met.
*/
#define KUNIT_ASSERT_LT(test, left, right) \
- KUNIT_BINARY_LT_ASSERTION(test, KUNIT_ASSERTION, left, right)
+ KUNIT_ASSERT_LT_MSG(test, left, right, NULL)
#define KUNIT_ASSERT_LT_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_LT_MSG_ASSERTION(test, \
- KUNIT_ASSERTION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, <, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_ASSERT_LE() - An assertion that @left is less than or equal to @right.
* @test: The test context object.
@@ -1655,15 +1509,14 @@ do { \
* KUNIT_ASSERT_TRUE()) when the assertion is not met.
*/
#define KUNIT_ASSERT_LE(test, left, right) \
- KUNIT_BINARY_LE_ASSERTION(test, KUNIT_ASSERTION, left, right)
+ KUNIT_ASSERT_LE_MSG(test, left, right, NULL)
#define KUNIT_ASSERT_LE_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_LE_MSG_ASSERTION(test, \
- KUNIT_ASSERTION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, <=, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_ASSERT_GT() - An assertion that @left is greater than @right.
@@ -1677,15 +1530,14 @@ do { \
* is not met.
*/
#define KUNIT_ASSERT_GT(test, left, right) \
- KUNIT_BINARY_GT_ASSERTION(test, KUNIT_ASSERTION, left, right)
+ KUNIT_ASSERT_GT_MSG(test, left, right, NULL)
#define KUNIT_ASSERT_GT_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_GT_MSG_ASSERTION(test, \
- KUNIT_ASSERTION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, >, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_ASSERT_GE() - Assertion that @left is greater than or equal to @right.
@@ -1699,15 +1551,14 @@ do { \
* is not met.
*/
#define KUNIT_ASSERT_GE(test, left, right) \
- KUNIT_BINARY_GE_ASSERTION(test, KUNIT_ASSERTION, left, right)
+ KUNIT_ASSERT_GE_MSG(test, left, right, NULL)
#define KUNIT_ASSERT_GE_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_GE_MSG_ASSERTION(test, \
- KUNIT_ASSERTION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_INT_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, >=, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_ASSERT_STREQ() - An assertion that strings @left and @right are equal.
@@ -1720,37 +1571,131 @@ do { \
* assertion failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
*/
#define KUNIT_ASSERT_STREQ(test, left, right) \
- KUNIT_BINARY_STR_EQ_ASSERTION(test, KUNIT_ASSERTION, left, right)
+ KUNIT_ASSERT_STREQ_MSG(test, left, right, NULL)
#define KUNIT_ASSERT_STREQ_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_STR_EQ_MSG_ASSERTION(test, \
- KUNIT_ASSERTION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_STR_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, ==, right, \
+ fmt, \
+ ##__VA_ARGS__)
/**
- * KUNIT_ASSERT_STRNEQ() - Expects that strings @left and @right are not equal.
+ * KUNIT_ASSERT_STRNEQ() - An assertion that strings @left and @right are not equal.
* @test: The test context object.
* @left: an arbitrary expression that evaluates to a null terminated string.
* @right: an arbitrary expression that evaluates to a null terminated string.
*
- * Sets an expectation that the values that @left and @right evaluate to are
+ * Sets an assertion that the values that @left and @right evaluate to are
* not equal. This is semantically equivalent to
* KUNIT_ASSERT_TRUE(@test, strcmp((@left), (@right))). See KUNIT_ASSERT_TRUE()
* for more information.
*/
#define KUNIT_ASSERT_STRNEQ(test, left, right) \
- KUNIT_BINARY_STR_NE_ASSERTION(test, KUNIT_ASSERTION, left, right)
+ KUNIT_ASSERT_STRNEQ_MSG(test, left, right, NULL)
#define KUNIT_ASSERT_STRNEQ_MSG(test, left, right, fmt, ...) \
- KUNIT_BINARY_STR_NE_MSG_ASSERTION(test, \
- KUNIT_ASSERTION, \
- left, \
- right, \
- fmt, \
- ##__VA_ARGS__)
+ KUNIT_BINARY_STR_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, !=, right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_ASSERT_MEMEQ() - Asserts that the first @size bytes of @left and @right are equal.
+ * @test: The test context object.
+ * @left: An arbitrary expression that evaluates to the specified size.
+ * @right: An arbitrary expression that evaluates to the specified size.
+ * @size: Number of bytes compared.
+ *
+ * Sets an assertion that the values that @left and @right evaluate to are
+ * equal. This is semantically equivalent to
+ * KUNIT_ASSERT_TRUE(@test, !memcmp((@left), (@right), (@size))). See
+ * KUNIT_ASSERT_TRUE() for more information.
+ *
+ * Although this assertion works for any memory block, it is not recommended
+ * for comparing more structured data, such as structs. This assertion is
+ * recommended for comparing, for example, data arrays.
+ */
+#define KUNIT_ASSERT_MEMEQ(test, left, right, size) \
+ KUNIT_ASSERT_MEMEQ_MSG(test, left, right, size, NULL)
+
+#define KUNIT_ASSERT_MEMEQ_MSG(test, left, right, size, fmt, ...) \
+ KUNIT_MEM_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, ==, right, \
+ size, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_ASSERT_MEMNEQ() - Asserts that the first @size bytes of @left and @right are not equal.
+ * @test: The test context object.
+ * @left: An arbitrary expression that evaluates to the specified size.
+ * @right: An arbitrary expression that evaluates to the specified size.
+ * @size: Number of bytes compared.
+ *
+ * Sets an assertion that the values that @left and @right evaluate to are
+ * not equal. This is semantically equivalent to
+ * KUNIT_ASSERT_TRUE(@test, memcmp((@left), (@right), (@size))). See
+ * KUNIT_ASSERT_TRUE() for more information.
+ *
+ * Although this assertion works for any memory block, it is not recommended
+ * for comparing more structured data, such as structs. This assertion is
+ * recommended for comparing, for example, data arrays.
+ */
+#define KUNIT_ASSERT_MEMNEQ(test, left, right, size) \
+ KUNIT_ASSERT_MEMNEQ_MSG(test, left, right, size, NULL)
+
+#define KUNIT_ASSERT_MEMNEQ_MSG(test, left, right, size, fmt, ...) \
+ KUNIT_MEM_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, !=, right, \
+ size, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_ASSERT_NULL() - Asserts that pointers @ptr is null.
+ * @test: The test context object.
+ * @ptr: an arbitrary pointer.
+ *
+ * Sets an assertion that the values that @ptr evaluates to is null. This is
+ * the same as KUNIT_EXPECT_NULL(), except it causes an assertion
+ * failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
+ */
+#define KUNIT_ASSERT_NULL(test, ptr) \
+ KUNIT_ASSERT_NULL_MSG(test, \
+ ptr, \
+ NULL)
+
+#define KUNIT_ASSERT_NULL_MSG(test, ptr, fmt, ...) \
+ KUNIT_BINARY_PTR_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ ptr, ==, NULL, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_ASSERT_NOT_NULL() - Asserts that pointers @ptr is not null.
+ * @test: The test context object.
+ * @ptr: an arbitrary pointer.
+ *
+ * Sets an assertion that the values that @ptr evaluates to is not null. This
+ * is the same as KUNIT_EXPECT_NOT_NULL(), except it causes an assertion
+ * failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
+ */
+#define KUNIT_ASSERT_NOT_NULL(test, ptr) \
+ KUNIT_ASSERT_NOT_NULL_MSG(test, \
+ ptr, \
+ NULL)
+
+#define KUNIT_ASSERT_NOT_NULL_MSG(test, ptr, fmt, ...) \
+ KUNIT_BINARY_PTR_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ ptr, !=, NULL, \
+ fmt, \
+ ##__VA_ARGS__)
/**
* KUNIT_ASSERT_NOT_ERR_OR_NULL() - Assertion that @ptr is not null and not err.
@@ -1763,7 +1708,7 @@ do { \
* KUNIT_ASSERT_TRUE()) when the assertion is not met.
*/
#define KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr) \
- KUNIT_PTR_NOT_ERR_OR_NULL_ASSERTION(test, KUNIT_ASSERTION, ptr)
+ KUNIT_ASSERT_NOT_ERR_OR_NULL_MSG(test, ptr, NULL)
#define KUNIT_ASSERT_NOT_ERR_OR_NULL_MSG(test, ptr, fmt, ...) \
KUNIT_PTR_NOT_ERR_OR_NULL_MSG_ASSERTION(test, \
@@ -1781,9 +1726,12 @@ do { \
* Define function @name_gen_params which uses @array to generate parameters.
*/
#define KUNIT_ARRAY_PARAM(name, array, get_desc) \
- static const void *name##_gen_params(const void *prev, char *desc) \
+ static const void *name##_gen_params(struct kunit *test, \
+ const void *prev, char *desc) \
{ \
typeof((array)[0]) *__next = prev ? ((typeof(__next)) prev) + 1 : (array); \
+ if (!prev) \
+ kunit_register_params_array(test, array, ARRAY_SIZE(array), NULL); \
if (__next - (array) < ARRAY_SIZE((array))) { \
void (*__get_desc)(typeof(__next), char *) = get_desc; \
if (__get_desc) \
@@ -1793,4 +1741,57 @@ do { \
return NULL; \
}
+/**
+ * KUNIT_ARRAY_PARAM_DESC() - Define test parameter generator from an array.
+ * @name: prefix for the test parameter generator function.
+ * @array: array of test parameters.
+ * @desc_member: structure member from array element to use as description
+ *
+ * Define function @name_gen_params which uses @array to generate parameters.
+ */
+#define KUNIT_ARRAY_PARAM_DESC(name, array, desc_member) \
+ static const void *name##_gen_params(struct kunit *test, \
+ const void *prev, char *desc) \
+ { \
+ typeof((array)[0]) *__next = prev ? ((typeof(__next)) prev) + 1 : (array); \
+ if (!prev) \
+ kunit_register_params_array(test, array, ARRAY_SIZE(array), NULL); \
+ if (__next - (array) < ARRAY_SIZE((array))) { \
+ strscpy(desc, __next->desc_member, KUNIT_PARAM_DESC_SIZE); \
+ return __next; \
+ } \
+ return NULL; \
+ }
+
+/**
+ * kunit_register_params_array() - Register parameter array for a KUnit test.
+ * @test: The KUnit test structure to which parameters will be added.
+ * @array: An array of test parameters.
+ * @param_count: Number of parameters.
+ * @get_desc: Function that generates a string description for a given parameter
+ * element.
+ *
+ * This macro initializes the @test's parameter array data, storing information
+ * including the parameter array, its count, the element size, and the parameter
+ * description function within `test->params_array`.
+ *
+ * Note: If using this macro in param_init(), kunit_array_gen_params()
+ * will then need to be manually provided as the parameter generator function to
+ * KUNIT_CASE_PARAM_WITH_INIT(). kunit_array_gen_params() is a KUnit
+ * function that uses the registered array to generate parameters
+ */
+#define kunit_register_params_array(test, array, param_count, get_desc) \
+ do { \
+ struct kunit *_test = (test); \
+ const typeof((array)[0]) * _params_ptr = &(array)[0]; \
+ _test->params_array.params = _params_ptr; \
+ _test->params_array.num_params = (param_count); \
+ _test->params_array.elem_size = sizeof(*_params_ptr); \
+ _test->params_array.get_description = (get_desc); \
+ } while (0)
+
+// TODO(dlatypov@google.com): consider eventually migrating users to explicitly
+// include resource.h themselves if they need it.
+#include <kunit/resource.h>
+
#endif /* _KUNIT_TEST_H */
diff --git a/include/kunit/try-catch.h b/include/kunit/try-catch.h
index c507dd43119d..d4e1a5b98ed6 100644
--- a/include/kunit/try-catch.h
+++ b/include/kunit/try-catch.h
@@ -14,13 +14,11 @@
typedef void (*kunit_try_catch_func_t)(void *);
-struct completion;
struct kunit;
/**
* struct kunit_try_catch - provides a generic way to run code which might fail.
* @test: The test case that is currently being executed.
- * @try_completion: Completion that the control thread waits on while test runs.
* @try_result: Contains any errno obtained while running test case.
* @try: The function, the test case, to attempt to run.
* @catch: The function called if @try bails out.
@@ -46,10 +44,10 @@ struct kunit;
struct kunit_try_catch {
/* private: internal use only. */
struct kunit *test;
- struct completion *try_completion;
int try_result;
kunit_try_catch_func_t try;
kunit_try_catch_func_t catch;
+ unsigned long timeout;
void *context;
};
diff --git a/include/kunit/visibility.h b/include/kunit/visibility.h
new file mode 100644
index 000000000000..7c34c8ffcf3b
--- /dev/null
+++ b/include/kunit/visibility.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KUnit API to allow symbols to be conditionally visible during KUnit
+ * testing
+ *
+ * Copyright (C) 2022, Google LLC.
+ * Author: Rae Moar <rmoar@google.com>
+ */
+
+#ifndef _KUNIT_VISIBILITY_H
+#define _KUNIT_VISIBILITY_H
+
+#if IS_ENABLED(CONFIG_KUNIT)
+ /**
+ * VISIBLE_IF_KUNIT - A macro that sets symbols to be static if
+ * CONFIG_KUNIT is not enabled. Otherwise if CONFIG_KUNIT is enabled
+ * there is no change to the symbol definition.
+ */
+ #define VISIBLE_IF_KUNIT
+ /**
+ * EXPORT_SYMBOL_IF_KUNIT(symbol) - Exports symbol into
+ * EXPORTED_FOR_KUNIT_TESTING namespace only if CONFIG_KUNIT is
+ * enabled. Must use MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING")
+ * in test file in order to use symbols.
+ * @symbol: the symbol identifier to export
+ */
+ #define EXPORT_SYMBOL_IF_KUNIT(symbol) EXPORT_SYMBOL_NS(symbol, "EXPORTED_FOR_KUNIT_TESTING")
+#else
+ #define VISIBLE_IF_KUNIT static
+ #define EXPORT_SYMBOL_IF_KUNIT(symbol)
+#endif
+
+#endif /* _KUNIT_VISIBILITY_H */
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index 51c19381108c..7310841f4512 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -13,6 +13,9 @@
enum kvm_arch_timers {
TIMER_PTIMER,
TIMER_VTIMER,
+ NR_KVM_EL0_TIMERS,
+ TIMER_HVTIMER = NR_KVM_EL0_TIMERS,
+ TIMER_HPTIMER,
NR_KVM_TIMERS
};
@@ -21,17 +24,39 @@ enum kvm_arch_timer_regs {
TIMER_REG_CVAL,
TIMER_REG_TVAL,
TIMER_REG_CTL,
+ TIMER_REG_VOFF,
};
-struct arch_timer_context {
- struct kvm_vcpu *vcpu;
+struct arch_timer_offset {
+ /*
+ * If set, pointer to one of the offsets in the kvm's offset
+ * structure. If NULL, assume a zero offset.
+ */
+ u64 *vm_offset;
+ /*
+ * If set, pointer to one of the offsets in the vcpu's sysreg
+ * array. If NULL, assume a zero offset.
+ */
+ u64 *vcpu_offset;
+};
- /* Timer IRQ */
- struct kvm_irq_level irq;
+struct arch_timer_vm_data {
+ /* Offset applied to the virtual timer/counter */
+ u64 voffset;
+ /* Offset applied to the physical timer/counter */
+ u64 poffset;
+ /* The PPI for each timer, global to the VM */
+ u8 ppi[NR_KVM_TIMERS];
+};
+
+struct arch_timer_context {
/* Emulated Timer (may be unused) */
struct hrtimer hrtimer;
+ u64 ns_frac;
+ /* Offset for this counter/timer */
+ struct arch_timer_offset offset;
/*
* We have multiple paths which can save/restore the timer state onto
* the hardware, so we need some way of keeping track of where the
@@ -39,17 +64,27 @@ struct arch_timer_context {
*/
bool loaded;
+ /* Output level of the timer IRQ */
+ struct {
+ bool level;
+ } irq;
+
+ /* Who am I? */
+ enum kvm_arch_timers timer_id;
+
/* Duplicated state from arch_timer.c for convenience */
u32 host_timer_irq;
- u32 host_timer_irq_flags;
};
struct timer_map {
struct arch_timer_context *direct_vtimer;
struct arch_timer_context *direct_ptimer;
+ struct arch_timer_context *emul_vtimer;
struct arch_timer_context *emul_ptimer;
};
+void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map);
+
struct arch_timer_cpu {
struct arch_timer_context timers[NR_KVM_TIMERS];
@@ -60,24 +95,22 @@ struct arch_timer_cpu {
bool enabled;
};
-int kvm_timer_hyp_init(bool);
+int __init kvm_timer_hyp_init(bool has_gic);
int kvm_timer_enable(struct kvm_vcpu *vcpu);
-int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu);
+void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu);
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
+void kvm_timer_sync_nested(struct kvm_vcpu *vcpu);
void kvm_timer_sync_user(struct kvm_vcpu *vcpu);
bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu);
void kvm_timer_update_run(struct kvm_vcpu *vcpu);
void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
-u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid);
-int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value);
+void kvm_timer_init_vm(struct kvm *kvm);
int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
-bool kvm_timer_is_pending(struct kvm_vcpu *vcpu);
-
u64 kvm_phys_timer_read(void);
void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu);
@@ -85,14 +118,17 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu);
void kvm_timer_init_vhe(void);
-bool kvm_arch_timer_get_input_level(int vintid);
-
#define vcpu_timer(v) (&(v)->arch.timer_cpu)
#define vcpu_get_timer(v,t) (&vcpu_timer(v)->timers[(t)])
#define vcpu_vtimer(v) (&(v)->arch.timer_cpu.timers[TIMER_VTIMER])
#define vcpu_ptimer(v) (&(v)->arch.timer_cpu.timers[TIMER_PTIMER])
+#define vcpu_hvtimer(v) (&(v)->arch.timer_cpu.timers[TIMER_HVTIMER])
+#define vcpu_hptimer(v) (&(v)->arch.timer_cpu.timers[TIMER_HPTIMER])
-#define arch_timer_ctx_index(ctx) ((ctx) - vcpu_timer((ctx)->vcpu)->timers)
+#define arch_timer_ctx_index(ctx) ((ctx)->timer_id)
+#define timer_context_to_vcpu(ctx) container_of((ctx), struct kvm_vcpu, arch.timer_cpu.timers[(ctx)->timer_id])
+#define timer_vm_data(ctx) (&(timer_context_to_vcpu(ctx)->kvm->arch.timer_data))
+#define timer_irq(ctx) (timer_vm_data(ctx)->ppi[arch_timer_ctx_index(ctx)])
u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
enum kvm_arch_timers tmr,
@@ -106,4 +142,48 @@ void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
u32 timer_get_ctl(struct arch_timer_context *ctxt);
u64 timer_get_cval(struct arch_timer_context *ctxt);
+/* CPU HP callbacks */
+void kvm_timer_cpu_up(void);
+void kvm_timer_cpu_down(void);
+
+/* CNTKCTL_EL1 valid bits as of DDI0487J.a */
+#define CNTKCTL_VALID_BITS (BIT(17) | GENMASK_ULL(9, 0))
+
+DECLARE_STATIC_KEY_FALSE(broken_cntvoff_key);
+
+static inline bool has_broken_cntvoff(void)
+{
+ return static_branch_unlikely(&broken_cntvoff_key);
+}
+
+static inline bool has_cntpoff(void)
+{
+ return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF));
+}
+
+static inline u64 timer_get_offset(struct arch_timer_context *ctxt)
+{
+ u64 offset = 0;
+
+ if (!ctxt)
+ return 0;
+
+ if (ctxt->offset.vm_offset)
+ offset += *ctxt->offset.vm_offset;
+ if (ctxt->offset.vcpu_offset)
+ offset += *ctxt->offset.vcpu_offset;
+
+ return offset;
+}
+
+static inline void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
+{
+ if (!ctxt->offset.vm_offset) {
+ WARN(offset, "timer %d\n", arch_timer_ctx_index(ctxt));
+ return;
+ }
+
+ WRITE_ONCE(*ctxt->offset.vm_offset, offset);
+}
+
#endif
diff --git a/include/kvm/arm_hypercalls.h b/include/kvm/arm_hypercalls.h
index 0e2509d27910..2df152207ccd 100644
--- a/include/kvm/arm_hypercalls.h
+++ b/include/kvm/arm_hypercalls.h
@@ -6,7 +6,7 @@
#include <asm/kvm_emulate.h>
-int kvm_hvc_call_handler(struct kvm_vcpu *vcpu);
+int kvm_smccc_call_handler(struct kvm_vcpu *vcpu);
static inline u32 smccc_get_function(struct kvm_vcpu *vcpu)
{
@@ -40,4 +40,16 @@ static inline void smccc_set_retval(struct kvm_vcpu *vcpu,
vcpu_set_reg(vcpu, 3, a3);
}
+struct kvm_one_reg;
+
+void kvm_arm_init_hypercalls(struct kvm *kvm);
+void kvm_arm_teardown_hypercalls(struct kvm *kvm);
+int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu);
+int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
+int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+
+int kvm_vm_smccc_has_attr(struct kvm *kvm, struct kvm_device_attr *attr);
+int kvm_vm_smccc_set_attr(struct kvm *kvm, struct kvm_device_attr *attr);
+
#endif
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 864b9997efb2..96754b51b411 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -8,44 +8,46 @@
#define __ASM_ARM_KVM_PMU_H
#include <linux/perf_event.h>
-#include <asm/perf_event.h>
+#include <linux/perf/arm_pmuv3.h>
-#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
-#define ARMV8_PMU_MAX_COUNTER_PAIRS ((ARMV8_PMU_MAX_COUNTERS + 1) >> 1)
-
-DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
-
-static __always_inline bool kvm_arm_support_pmu_v3(void)
-{
- return static_branch_likely(&kvm_arm_pmu_available);
-}
-
-#ifdef CONFIG_HW_PERF_EVENTS
+#define KVM_ARMV8_PMU_MAX_COUNTERS 32
+#if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM)
struct kvm_pmc {
u8 idx; /* index into the pmu->pmc array */
struct perf_event *perf_event;
};
+struct kvm_pmu_events {
+ u64 events_host;
+ u64 events_guest;
+};
+
struct kvm_pmu {
+ struct irq_work overflow_work;
+ struct kvm_pmu_events events;
+ struct kvm_pmc pmc[KVM_ARMV8_PMU_MAX_COUNTERS];
int irq_num;
- struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
- DECLARE_BITMAP(chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
bool created;
bool irq_level;
- struct irq_work overflow_work;
};
+struct arm_pmu_entry {
+ struct list_head entry;
+ struct arm_pmu *arm_pmu;
+};
+
+bool kvm_supports_guest_pmuv3(void);
#define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
-u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
+void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
+u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu);
+u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu);
u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
-void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
-void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
-void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
+void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
@@ -54,6 +56,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
u64 select_idx);
+void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu);
int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
@@ -61,11 +64,43 @@ int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
-int kvm_pmu_probe_pmuver(void);
+
+struct kvm_pmu_events *kvm_get_pmu_events(void);
+void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
+void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
+void kvm_vcpu_pmu_resync_el0(void);
+
+#define kvm_vcpu_has_pmu(vcpu) \
+ (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PMU_V3))
+
+/*
+ * Updates the vcpu's view of the pmu events for this cpu.
+ * Must be called before every vcpu run after disabling interrupts, to ensure
+ * that an interrupt cannot fire and update the structure.
+ */
+#define kvm_pmu_update_vcpu_events(vcpu) \
+ do { \
+ if (!has_vhe() && system_supports_pmuv3()) \
+ vcpu->arch.pmu.events = *kvm_get_pmu_events(); \
+ } while (0)
+
+u8 kvm_arm_pmu_get_pmuver_limit(void);
+u64 kvm_pmu_evtyper_mask(struct kvm *kvm);
+int kvm_arm_set_default_pmu(struct kvm *kvm);
+u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm);
+
+u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu);
+bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx);
+void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu);
#else
struct kvm_pmu {
};
+static inline bool kvm_supports_guest_pmuv3(void)
+{
+ return false;
+}
+
#define kvm_arm_pmu_irq_initialized(v) (false)
static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
u64 select_idx)
@@ -74,15 +109,19 @@ static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
}
static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
u64 select_idx, u64 val) {}
-static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
+static inline void kvm_pmu_set_counter_value_user(struct kvm_vcpu *vcpu,
+ u64 select_idx, u64 val) {}
+static inline u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+static inline u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu)
{
return 0;
}
static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {}
-static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
-static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
-static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
+static inline void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
@@ -118,7 +157,42 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
return 0;
}
-static inline int kvm_pmu_probe_pmuver(void) { return 0xf; }
+#define kvm_vcpu_has_pmu(vcpu) ({ false; })
+static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {}
+static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
+static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
+static inline void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu) {}
+static inline u8 kvm_arm_pmu_get_pmuver_limit(void)
+{
+ return 0;
+}
+static inline u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
+{
+ return 0;
+}
+static inline void kvm_vcpu_pmu_resync_el0(void) {}
+
+static inline int kvm_arm_set_default_pmu(struct kvm *kvm)
+{
+ return -ENODEV;
+}
+
+static inline u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm)
+{
+ return 0;
+}
+
+static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
+static inline bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx)
+{
+ return false;
+}
+
+static inline void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu) {}
#endif
diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h
index 5b58bd2fe088..cbaec804eb83 100644
--- a/include/kvm/arm_psci.h
+++ b/include/kvm/arm_psci.h
@@ -13,14 +13,13 @@
#define KVM_ARM_PSCI_0_1 PSCI_VERSION(0, 1)
#define KVM_ARM_PSCI_0_2 PSCI_VERSION(0, 2)
#define KVM_ARM_PSCI_1_0 PSCI_VERSION(1, 0)
+#define KVM_ARM_PSCI_1_1 PSCI_VERSION(1, 1)
+#define KVM_ARM_PSCI_1_2 PSCI_VERSION(1, 2)
+#define KVM_ARM_PSCI_1_3 PSCI_VERSION(1, 3)
-#define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_0
+#define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_3
-/*
- * We need the KVM pointer independently from the vcpu as we can call
- * this from HYP, and need to apply kern_hyp_va on it...
- */
-static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
+static inline int kvm_psci_version(struct kvm_vcpu *vcpu)
{
/*
* Our PSCI implementation stays the same across versions from
@@ -29,7 +28,7 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
* revisions. It is thus safe to return the latest, unless
* userspace has instructed us otherwise.
*/
- if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) {
+ if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PSCI_0_2)) {
if (vcpu->kvm->arch.psci_version)
return vcpu->kvm->arch.psci_version;
@@ -42,11 +41,4 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
int kvm_psci_call(struct kvm_vcpu *vcpu);
-struct kvm_one_reg;
-
-int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu);
-int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
-int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
-int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
-
#endif /* __KVM_ARM_PSCI_H__ */
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index ec621180ef09..b261fb3968d0 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -5,12 +5,15 @@
#ifndef __KVM_ARM_VGIC_H
#define __KVM_ARM_VGIC_H
-#include <linux/kernel.h>
+#include <linux/bits.h>
#include <linux/kvm.h>
#include <linux/irqreturn.h>
+#include <linux/mutex.h>
+#include <linux/refcount.h>
#include <linux/spinlock.h>
#include <linux/static_key.h>
#include <linux/types.h>
+#include <linux/xarray.h>
#include <kvm/iodev.h>
#include <linux/list.h>
#include <linux/jump_label.h>
@@ -23,7 +26,6 @@
#define VGIC_NR_SGIS 16
#define VGIC_NR_PPIS 16
#define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS)
-#define VGIC_MAX_PRIVATE (VGIC_NR_PRIVATE_IRQS - 1)
#define VGIC_MAX_SPI 1019
#define VGIC_MAX_RESERVED 1023
#define VGIC_MIN_LPI 8192
@@ -36,6 +38,7 @@
enum vgic_type {
VGIC_V2, /* Good ol' GICv2 */
VGIC_V3, /* New fancy GICv3 */
+ VGIC_V5, /* Newer, fancier GICv5 */
};
/* same for all guests, as depending only on the _host's_ GIC model */
@@ -56,6 +59,9 @@ struct vgic_global {
/* virtual control interface mapping, HYP VA */
void __iomem *vctrl_hyp;
+ /* Physical CPU interface, kernel VA */
+ void __iomem *gicc_base;
+
/* Number of implemented list registers */
int nr_lr;
@@ -72,9 +78,15 @@ struct vgic_global {
bool has_gicv4;
bool has_gicv4_1;
- /* GIC system register CPU interface */
+ /* Pseudo GICv3 from outer space */
+ bool no_hw_deactivation;
+
+ /* GICv3 system register CPU interface */
struct static_key_false gicv3_cpuif;
+ /* GICv3 compat mode on a GICv5 host */
+ bool has_gcie_v3_compat;
+
u32 ich_vtr_el2;
};
@@ -89,9 +101,30 @@ enum vgic_irq_config {
VGIC_CONFIG_LEVEL
};
+/*
+ * Per-irq ops overriding some common behavious.
+ *
+ * Always called in non-preemptible section and the functions can use
+ * kvm_arm_get_running_vcpu() to get the vcpu pointer for private IRQs.
+ */
+struct irq_ops {
+ /* Per interrupt flags for special-cased interrupts */
+ unsigned long flags;
+
+#define VGIC_IRQ_SW_RESAMPLE BIT(0) /* Clear the active state for resampling */
+
+ /*
+ * Callback function pointer to in-kernel devices that can tell us the
+ * state of the input level of mapped level-triggered IRQ faster than
+ * peaking into the physical GIC.
+ */
+ bool (*get_input_level)(int vintid);
+};
+
struct vgic_irq {
raw_spinlock_t irq_lock; /* Protects the content of the struct */
- struct list_head lpi_list; /* Used to link all LPIs together */
+ u32 intid; /* Guest visible INTID */
+ struct rcu_head rcu;
struct list_head ap_list;
struct kvm_vcpu *vcpu; /* SGIs and PPIs: The VCPU
@@ -105,15 +138,19 @@ struct vgic_irq {
* affinity reg (v3).
*/
- u32 intid; /* Guest visible INTID */
- bool line_level; /* Level only */
- bool pending_latch; /* The pending latch state used to calculate
+ bool pending_release:1; /* Used for LPIs only, unreferenced IRQ
+ * pending a release */
+
+ bool pending_latch:1; /* The pending latch state used to calculate
* the pending state for both level
* and edge triggered IRQs. */
- bool active; /* not used for LPIs */
- bool enabled;
- bool hw; /* Tied to HW IRQ */
- struct kref refcount; /* Used for LPIs */
+ enum vgic_irq_config config:1; /* Level or edge */
+ bool line_level:1; /* Level only */
+ bool enabled:1;
+ bool active:1;
+ bool hw:1; /* Tied to HW IRQ */
+ bool on_lr:1; /* Present in a CPU LR */
+ refcount_t refcount; /* Used for LPIs */
u32 hwintid; /* HW INTID number */
unsigned int host_irq; /* linux irq corresponding to hwintid */
union {
@@ -124,23 +161,18 @@ struct vgic_irq {
u8 active_source; /* GICv2 SGIs only */
u8 priority;
u8 group; /* 0 == group 0, 1 == group 1 */
- enum vgic_irq_config config; /* Level or edge */
- /*
- * Callback function pointer to in-kernel devices that can tell us the
- * state of the input level of mapped level-triggered IRQ faster than
- * peaking into the physical GIC.
- *
- * Always called in non-preemptible section and the functions can use
- * kvm_arm_get_running_vcpu() to get the vcpu pointer for private
- * IRQs.
- */
- bool (*get_input_level)(int vintid);
+ struct irq_ops *ops;
void *owner; /* Opaque pointer to reserve an interrupt
for in-kernel devices. */
};
+static inline bool vgic_irq_needs_resampling(struct vgic_irq *irq)
+{
+ return irq->ops && (irq->ops->flags & VGIC_IRQ_SW_RESAMPLE);
+}
+
struct vgic_register_region;
struct vgic_its;
@@ -188,6 +220,12 @@ struct vgic_its {
struct mutex its_lock;
struct list_head device_list;
struct list_head collection_list;
+
+ /*
+ * Caches the (device_id, event_id) -> vgic_irq translation for
+ * LPIs that are mapped and enabled.
+ */
+ struct xarray translation_cache;
};
struct vgic_state_iter;
@@ -210,6 +248,9 @@ struct vgic_dist {
/* Implementation revision as reported in the GICD_IIDR */
u32 implementation_rev;
+#define KVM_VGIC_IMP_REV_2 2 /* GICv2 restorable groups */
+#define KVM_VGIC_IMP_REV_3 3 /* GICv3 GICR_CTLR.{IW,CES,RWP} */
+#define KVM_VGIC_IMP_REV_LATEST KVM_VGIC_IMP_REV_3
/* Userspace can write to GICv2 IGROUPR */
bool v2_groups_user_writable;
@@ -219,6 +260,12 @@ struct vgic_dist {
int nr_spis;
+ /* The GIC maintenance IRQ for nested hypervisors. */
+ u32 mi_intid;
+
+ /* Track the number of in-flight active SPIs */
+ atomic_t active_spis;
+
/* base addresses in guest physical address space: */
gpa_t vgic_dist_base; /* distributor */
union {
@@ -231,14 +278,19 @@ struct vgic_dist {
/* distributor enabled */
bool enabled;
+ /* Supports SGIs without active state */
+ bool nassgicap;
+
/* Wants SGIs without active state */
bool nassgireq;
struct vgic_irq *spis;
struct vgic_io_device dist_iodev;
+ struct vgic_io_device cpuif_iodev;
bool has_its;
+ bool table_write_in_progress;
/*
* Contains the attributes and gpa of the LPI configuration table.
@@ -248,13 +300,8 @@ struct vgic_dist {
*/
u64 propbaser;
- /* Protects the lpi_list and the count value below. */
- raw_spinlock_t lpi_list_lock;
- struct list_head lpi_list_head;
- int lpi_list_count;
-
- /* LPI translation cache */
- struct list_head lpi_translation_cache;
+#define LPI_XA_MARK_DEBUG_ITER XA_MARK_0
+ struct xarray lpi_xa;
/* used by vgic-debug */
struct vgic_state_iter *iter;
@@ -304,7 +351,7 @@ struct vgic_cpu {
struct vgic_v3_cpu_if vgic_v3;
};
- struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS];
+ struct vgic_irq *private_irqs;
raw_spinlock_t ap_list_lock; /* Protects the ap_list */
@@ -323,11 +370,12 @@ struct vgic_cpu {
struct vgic_io_device rd_iodev;
struct vgic_redist_region *rdreg;
u32 rdreg_index;
+ atomic_t syncr_busy;
/* Contains the attributes and gpa of the LPI pending tables. */
u64 pendbaser;
-
- bool lpis_enabled;
+ /* GICR_CTLR.{ENABLE_LPIS,RWP} */
+ atomic_t ctlr;
/* Cache guest priority bits */
u32 num_pri_bits;
@@ -338,10 +386,12 @@ struct vgic_cpu {
extern struct static_key_false vgic_v2_cpuif_trap;
extern struct static_key_false vgic_v3_cpuif_trap;
+extern struct static_key_false vgic_v3_has_v2_compat;
-int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
+int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr);
void kvm_vgic_early_init(struct kvm *kvm);
int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu);
+int kvm_vgic_vcpu_nv_init(struct kvm_vcpu *vcpu);
int kvm_vgic_create(struct kvm *kvm, u32 type);
void kvm_vgic_destroy(struct kvm *kvm);
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
@@ -349,22 +399,25 @@ int kvm_vgic_map_resources(struct kvm *kvm);
int kvm_vgic_hyp_init(void);
void kvm_vgic_init_cpu_hardware(void);
-int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
- bool level, void *owner);
+int kvm_vgic_inject_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
+ unsigned int intid, bool level, void *owner);
int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
- u32 vintid, bool (*get_input_level)(int vindid));
+ u32 vintid, struct irq_ops *ops);
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid);
+int kvm_vgic_get_map(struct kvm_vcpu *vcpu, unsigned int vintid);
bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid);
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
void kvm_vgic_load(struct kvm_vcpu *vcpu);
void kvm_vgic_put(struct kvm_vcpu *vcpu);
-void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu);
+
+u16 vgic_v3_get_eisr(struct kvm_vcpu *vcpu);
+u16 vgic_v3_get_elrsr(struct kvm_vcpu *vcpu);
+u64 vgic_v3_get_misr(struct kvm_vcpu *vcpu);
#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
#define vgic_initialized(k) ((k)->arch.vgic.initialized)
-#define vgic_ready(k) ((k)->arch.vgic.ready)
#define vgic_valid_spi(k, i) (((i) >= VGIC_NR_PRIVATE_IRQS) && \
((i) < (k)->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS))
@@ -372,6 +425,7 @@ bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu);
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid);
+void kvm_vgic_process_async_update(struct kvm_vcpu *vcpu);
void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1);
@@ -399,11 +453,16 @@ struct kvm_kernel_irq_routing_entry;
int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int irq,
struct kvm_kernel_irq_routing_entry *irq_entry);
-int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq,
- struct kvm_kernel_irq_routing_entry *irq_entry);
+void kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int host_irq);
int vgic_v4_load(struct kvm_vcpu *vcpu);
void vgic_v4_commit(struct kvm_vcpu *vcpu);
-int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db);
+int vgic_v4_put(struct kvm_vcpu *vcpu);
+
+bool vgic_state_is_nested(struct kvm_vcpu *vcpu);
+
+/* CPU HP callbacks */
+void kvm_vgic_cpu_up(void);
+void kvm_vgic_cpu_down(void);
#endif /* __KVM_ARM_VGIC_H */
diff --git a/include/kvm/iodev.h b/include/kvm/iodev.h
index d75fc4365746..56619e33251e 100644
--- a/include/kvm/iodev.h
+++ b/include/kvm/iodev.h
@@ -55,10 +55,4 @@ static inline int kvm_iodevice_write(struct kvm_vcpu *vcpu,
: -EOPNOTSUPP;
}
-static inline void kvm_iodevice_destructor(struct kvm_io_device *dev)
-{
- if (dev->ops->destructor)
- dev->ops->destructor(dev);
-}
-
#endif /* __KVM_IODEV_H__ */
diff --git a/include/linux/a.out.h b/include/linux/a.out.h
deleted file mode 100644
index 600cf45645c6..000000000000
--- a/include/linux/a.out.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __A_OUT_GNU_H__
-#define __A_OUT_GNU_H__
-
-#include <uapi/linux/a.out.h>
-
-#ifndef __ASSEMBLY__
-#ifdef linux
-#include <asm/page.h>
-#if defined(__i386__) || defined(__mc68000__)
-#else
-#ifndef SEGMENT_SIZE
-#define SEGMENT_SIZE PAGE_SIZE
-#endif
-#endif
-#endif
-#endif /*__ASSEMBLY__ */
-#endif /* __A_OUT_GNU_H__ */
diff --git a/include/linux/acct.h b/include/linux/acct.h
index bc70e81895c0..2718c4854815 100644
--- a/include/linux/acct.h
+++ b/include/linux/acct.h
@@ -21,7 +21,6 @@
#ifdef CONFIG_BSD_PROCESS_ACCT
struct pid_namespace;
-extern int acct_parm[]; /* for sysctl */
extern void acct_collect(long exitcode, int group_dead);
extern void acct_process(void);
extern void acct_exit_ns(struct pid_namespace *);
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index c60745f657e9..fbf0c3a65f59 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -8,33 +8,48 @@
#ifndef _LINUX_ACPI_H
#define _LINUX_ACPI_H
+#include <linux/cleanup.h>
#include <linux/errno.h>
#include <linux/ioport.h> /* for struct resource */
-#include <linux/irqdomain.h>
#include <linux/resource_ext.h>
#include <linux/device.h>
+#include <linux/mod_devicetable.h>
#include <linux/property.h>
#include <linux/uuid.h>
+#include <linux/node.h>
+
+struct irq_domain;
+struct irq_domain_ops;
#ifndef _LINUX
#define _LINUX
#endif
#include <acpi/acpi.h>
+#include <acpi/acpi_numa.h>
#ifdef CONFIG_ACPI
#include <linux/list.h>
-#include <linux/mod_devicetable.h>
#include <linux/dynamic_debug.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/fw_table.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
-#include <acpi/acpi_numa.h>
#include <acpi/acpi_io.h>
#include <asm/acpi.h>
+#ifdef CONFIG_ACPI_TABLE_LIB
+#define EXPORT_SYMBOL_ACPI_LIB(x) EXPORT_SYMBOL_NS_GPL(x, "ACPI")
+#define __init_or_acpilib
+#define __initdata_or_acpilib
+#else
+#define EXPORT_SYMBOL_ACPI_LIB(x)
+#define __init_or_acpilib __init
+#define __initdata_or_acpilib __initdata
+#endif
+
static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
{
return adev ? adev->handle : NULL;
@@ -68,19 +83,6 @@ static inline void acpi_free_fwnode_static(struct fwnode_handle *fwnode)
kfree(fwnode);
}
-/**
- * ACPI_DEVICE_CLASS - macro used to describe an ACPI device with
- * the PCI-defined class-code information
- *
- * @_cls : the class, subclass, prog-if triple for this device
- * @_msk : the class mask for this device
- *
- * This macro is used to create a struct acpi_device_id that matches a
- * specific PCI class. The .id and .driver_data fields will be left
- * initialized with the default value.
- */
-#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (_cls), .cls_msk = (_msk),
-
static inline bool has_acpi_companion(struct device *dev)
{
return is_acpi_device_node(dev->fwnode);
@@ -105,6 +107,8 @@ enum acpi_irq_model_id {
ACPI_IRQ_MODEL_IOSAPIC,
ACPI_IRQ_MODEL_PLATFORM,
ACPI_IRQ_MODEL_GIC,
+ ACPI_IRQ_MODEL_LPIC,
+ ACPI_IRQ_MODEL_RINTC,
ACPI_IRQ_MODEL_COUNT
};
@@ -129,16 +133,8 @@ enum acpi_address_range_id {
/* Table Handlers */
-union acpi_subtable_headers {
- struct acpi_subtable_header common;
- struct acpi_hmat_structure hmat;
-};
-
typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table);
-typedef int (*acpi_tbl_entry_handler)(union acpi_subtable_headers *header,
- const unsigned long end);
-
/* Debugger support */
struct acpi_debugger_ops {
@@ -212,12 +208,6 @@ static inline int acpi_debugger_notify_command_complete(void)
(!entry) || (unsigned long)entry + sizeof(*entry) > end || \
((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
-struct acpi_subtable_proc {
- int id;
- acpi_tbl_entry_handler handler;
- int count;
-};
-
void __iomem *__acpi_map_table(unsigned long phys, unsigned long size);
void __acpi_unmap_table(void __iomem *map, unsigned long size);
int early_acpi_boot_init(void);
@@ -231,24 +221,36 @@ int acpi_locate_initial_tables (void);
void acpi_reserve_initial_tables (void);
void acpi_table_init_complete (void);
int acpi_table_init (void);
+
+static inline struct acpi_table_header *acpi_get_table_pointer(char *signature, u32 instance)
+{
+ struct acpi_table_header *table;
+ int status = acpi_get_table(signature, instance, &table);
+
+ if (ACPI_FAILURE(status))
+ return ERR_PTR(-ENOENT);
+ return table;
+}
+DEFINE_FREE(acpi_put_table, struct acpi_table_header *, if (!IS_ERR_OR_NULL(_T)) acpi_put_table(_T))
+
int acpi_table_parse(char *id, acpi_tbl_table_handler handler);
-int __init acpi_table_parse_entries(char *id, unsigned long table_size,
- int entry_id,
- acpi_tbl_entry_handler handler,
- unsigned int max_entries);
-int __init acpi_table_parse_entries_array(char *id, unsigned long table_size,
- struct acpi_subtable_proc *proc, int proc_num,
- unsigned int max_entries);
+int __init_or_acpilib acpi_table_parse_entries(char *id,
+ unsigned long table_size, int entry_id,
+ acpi_tbl_entry_handler handler, unsigned int max_entries);
+int __init_or_acpilib acpi_table_parse_entries_array(char *id,
+ unsigned long table_size, struct acpi_subtable_proc *proc,
+ int proc_num, unsigned int max_entries);
int acpi_table_parse_madt(enum acpi_madt_type id,
acpi_tbl_entry_handler handler,
unsigned int max_entries);
+int __init_or_acpilib
+acpi_table_parse_cedt(enum acpi_cedt_type id,
+ acpi_tbl_entry_handler_arg handler_arg, void *arg);
+
int acpi_parse_mcfg (struct acpi_table_header *header);
void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
-/* the following numa functions are architecture-dependent */
-void acpi_numa_slit_init (struct acpi_table_slit *slit);
-
-#if defined(CONFIG_X86) || defined(CONFIG_IA64)
+#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH)
void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa);
#else
static inline void
@@ -257,6 +259,12 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) { }
void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa);
+#if defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH)
+void acpi_arch_dma_setup(struct device *dev);
+#else
+static inline void acpi_arch_dma_setup(struct device *dev) { }
+#endif
+
#ifdef CONFIG_ARM64
void acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa);
#else
@@ -264,7 +272,11 @@ static inline void
acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa) { }
#endif
-int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma);
+#ifdef CONFIG_RISCV
+void acpi_numa_rintc_affinity_init(struct acpi_srat_rintc_affinity *pa);
+#else
+static inline void acpi_numa_rintc_affinity_init(struct acpi_srat_rintc_affinity *pa) { }
+#endif
#ifndef PHYS_CPUID_INVALID
typedef u32 phys_cpuid_t;
@@ -281,6 +293,9 @@ static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id)
return phys_id == PHYS_CPUID_INVALID;
}
+
+int __init acpi_get_madt_revision(void);
+
/* Validate the processor object's proc_id */
bool acpi_duplicate_processor_id(int proc_id);
/* Processor _CTS control */
@@ -306,6 +321,8 @@ int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id,
int acpi_unmap_cpu(int cpu);
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
+acpi_handle acpi_get_processor_handle(int cpu);
+
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr);
#endif
@@ -325,14 +342,17 @@ static inline bool acpi_sci_irq_valid(void)
}
extern int sbf_port;
-extern unsigned long acpi_realmode_flags;
int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity);
int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi);
+typedef struct fwnode_handle *(*acpi_gsi_domain_disp_fn)(u32);
+
void acpi_set_irq_model(enum acpi_irq_model_id model,
- struct fwnode_handle *fwnode);
+ acpi_gsi_domain_disp_fn fn);
+acpi_gsi_domain_disp_fn acpi_get_gsi_dispatcher(void);
+void acpi_set_gsi_to_irq_fallback(u32 (*)(u32));
struct irq_domain *acpi_irq_create_hierarchy(unsigned int flags,
unsigned int size,
@@ -357,6 +377,7 @@ void acpi_unregister_gsi (u32 gsi);
struct pci_dev;
+struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin);
int acpi_pci_irq_enable (struct pci_dev *dev);
void acpi_penalize_isa_irq(int irq, int active);
bool acpi_isa_irq_available(int irq);
@@ -381,7 +402,9 @@ extern bool acpi_is_pnp_device(struct acpi_device *);
#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
-typedef void (*wmi_notify_handler) (u32 value, void *context);
+typedef void (*wmi_notify_handler) (union acpi_object *data, void *context);
+
+int wmi_instance_count(const char *guid);
extern acpi_status wmi_evaluate_method(const char *guid, u8 instance,
u32 method_id,
@@ -394,7 +417,6 @@ extern acpi_status wmi_set_block(const char *guid, u8 instance,
extern acpi_status wmi_install_notify_handler(const char *guid,
wmi_notify_handler handler, void *data);
extern acpi_status wmi_remove_notify_handler(const char *guid);
-extern acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out);
extern bool wmi_has_guid(const char *guid);
extern char *wmi_get_acpi_device_uid(const char *guid);
@@ -415,10 +437,27 @@ extern char *wmi_get_acpi_device_uid(const char *guid);
extern char acpi_video_backlight_string[];
extern long acpi_is_video_device(acpi_handle handle);
-extern int acpi_blacklisted(void);
+
extern void acpi_osi_setup(char *str);
extern bool acpi_osi_is_win8(void);
+#ifdef CONFIG_ACPI_THERMAL_LIB
+int thermal_acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp);
+int thermal_acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp);
+int thermal_acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp);
+int thermal_acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp);
+#endif
+
+#ifdef CONFIG_ACPI_HMAT
+int acpi_get_genport_coordinates(u32 uid, struct access_coordinate *coord);
+#else
+static inline int acpi_get_genport_coordinates(u32 uid,
+ struct access_coordinate *coord)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
#ifdef CONFIG_ACPI_NUMA
int acpi_map_pxm_to_node(int pxm);
int acpi_get_node(acpi_handle handle);
@@ -457,8 +496,6 @@ static inline int acpi_get_node(acpi_handle handle)
return 0;
}
#endif
-extern int acpi_paddr_to_node(u64 start_addr, u64 size);
-
extern int pnpacpi_disabled;
#define PXM_INVAL (-1)
@@ -469,7 +506,7 @@ bool acpi_dev_resource_address_space(struct acpi_resource *ares,
struct resource_win *win);
bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares,
struct resource_win *win);
-unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable);
+unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable, u8 wake_capable);
unsigned int acpi_dev_get_irq_type(int triggering, int polarity);
bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
struct resource *res);
@@ -480,6 +517,7 @@ int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list,
void *preproc_data);
int acpi_dev_get_dma_resources(struct acpi_device *adev,
struct list_head *list);
+int acpi_dev_get_memory_resources(struct acpi_device *adev, struct list_head *list);
int acpi_dev_filter_resource_type(struct acpi_resource *ares,
unsigned long types);
@@ -496,13 +534,10 @@ int acpi_check_resource_conflict(const struct resource *res);
int acpi_check_region(resource_size_t start, resource_size_t n,
const char *name);
-acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
- u32 level);
-
int acpi_resources_are_enforced(void);
#ifdef CONFIG_HIBERNATION
-void __init acpi_no_s4_hw_signature(void);
+extern int acpi_check_s4_hw_signature;
#endif
#ifdef CONFIG_PM_SLEEP
@@ -526,10 +561,16 @@ struct acpi_osc_context {
acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
-/* Indexes into _OSC Capabilities Buffer (DWORDs 2 & 3 are device-specific) */
+/* Number of _OSC capability DWORDS depends on bridge type */
+#define OSC_PCI_CAPABILITY_DWORDS 3
+#define OSC_CXL_CAPABILITY_DWORDS 5
+
+/* Indexes into _OSC Capabilities Buffer (DWORDs 2 to 5 are device-specific) */
#define OSC_QUERY_DWORD 0 /* DWORD 1 */
#define OSC_SUPPORT_DWORD 1 /* DWORD 2 */
#define OSC_CONTROL_DWORD 2 /* DWORD 3 */
+#define OSC_EXT_SUPPORT_DWORD 3 /* DWORD 4 */
+#define OSC_EXT_CONTROL_DWORD 4 /* DWORD 5 */
/* _OSC Capabilities DWORD 1: Query/Control and Error Returns (generic) */
#define OSC_QUERY_ENABLE 0x00000001 /* input */
@@ -548,13 +589,23 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
#define OSC_SB_CPCV2_SUPPORT 0x00000040
#define OSC_SB_PCLPI_SUPPORT 0x00000080
#define OSC_SB_OSLPI_SUPPORT 0x00000100
+#define OSC_SB_FAST_THERMAL_SAMPLING_SUPPORT 0x00000200
+#define OSC_SB_OVER_16_PSTATES_SUPPORT 0x00000400
+#define OSC_SB_GED_SUPPORT 0x00000800
#define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000
-#define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00002000
+#define OSC_SB_IRQ_RESOURCE_SOURCE_SUPPORT 0x00002000
+#define OSC_SB_CPC_FLEXIBLE_ADR_SPACE 0x00004000
+#define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00020000
#define OSC_SB_NATIVE_USB4_SUPPORT 0x00040000
+#define OSC_SB_BATTERY_CHARGE_LIMITING_SUPPORT 0x00080000
+#define OSC_SB_PRM_SUPPORT 0x00200000
+#define OSC_SB_FFH_OPR_SUPPORT 0x00400000
extern bool osc_sb_apei_support_acked;
extern bool osc_pc_lpi_support_confirmed;
extern bool osc_sb_native_usb4_support_confirmed;
+extern bool osc_sb_cppc2_support_acked;
+extern bool osc_cpc_flexible_adr_space_confirmed;
/* USB4 Capabilities */
#define OSC_USB_USB3_TUNNELING 0x00000001
@@ -572,7 +623,6 @@ extern u32 osc_sb_native_usb4_control;
#define OSC_PCI_MSI_SUPPORT 0x00000010
#define OSC_PCI_EDR_SUPPORT 0x00000080
#define OSC_PCI_HPX_TYPE_3_SUPPORT 0x00000100
-#define OSC_PCI_SUPPORT_MASKS 0x0000019f
/* PCI Host Bridge _OSC: Capabilities DWORD 3: Control Field */
#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 0x00000001
@@ -582,7 +632,29 @@ extern u32 osc_sb_native_usb4_control;
#define OSC_PCI_EXPRESS_CAPABILITY_CONTROL 0x00000010
#define OSC_PCI_EXPRESS_LTR_CONTROL 0x00000020
#define OSC_PCI_EXPRESS_DPC_CONTROL 0x00000080
-#define OSC_PCI_CONTROL_MASKS 0x000000bf
+
+/* CXL _OSC: Capabilities DWORD 4: Support Field */
+#define OSC_CXL_1_1_PORT_REG_ACCESS_SUPPORT 0x00000001
+#define OSC_CXL_2_0_PORT_DEV_REG_ACCESS_SUPPORT 0x00000002
+#define OSC_CXL_PROTOCOL_ERR_REPORTING_SUPPORT 0x00000004
+#define OSC_CXL_NATIVE_HP_SUPPORT 0x00000008
+
+/* CXL _OSC: Capabilities DWORD 5: Control Field */
+#define OSC_CXL_ERROR_REPORTING_CONTROL 0x00000001
+
+static inline u32 acpi_osc_ctx_get_pci_control(struct acpi_osc_context *context)
+{
+ u32 *ret = context->ret.pointer;
+
+ return ret[OSC_CONTROL_DWORD];
+}
+
+static inline u32 acpi_osc_ctx_get_cxl_control(struct acpi_osc_context *context)
+{
+ u32 *ret = context->ret.pointer;
+
+ return ret[OSC_EXT_CONTROL_DWORD];
+}
#define ACPI_GSB_ACCESS_ATTRIB_QUICK 0x00000002
#define ACPI_GSB_ACCESS_ATTRIB_SEND_RCV 0x00000004
@@ -651,25 +723,26 @@ int acpi_match_platform_list(const struct acpi_platform_list *plat);
extern void acpi_early_init(void);
extern void acpi_subsystem_init(void);
-extern void arch_post_acpi_subsys_init(void);
extern int acpi_nvs_register(__u64 start, __u64 size);
extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
void *data);
+const struct acpi_device_id *acpi_match_acpi_device(const struct acpi_device_id *ids,
+ const struct acpi_device *adev);
+
const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
const struct device *dev);
const void *acpi_device_get_match_data(const struct device *dev);
extern bool acpi_driver_match_device(struct device *dev,
const struct device_driver *drv);
-int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *);
+int acpi_device_uevent_modalias(const struct device *, struct kobj_uevent_env *);
int acpi_device_modalias(struct device *, char *, int);
-void acpi_walk_dep_device_list(acpi_handle handle);
struct platform_device *acpi_create_platform_device(struct acpi_device *,
- struct property_entry *);
+ const struct property_entry *);
#define ACPI_PTR(_ptr) (_ptr)
static inline void acpi_device_set_enumerated(struct acpi_device *adev)
@@ -694,22 +767,29 @@ int acpi_reconfig_notifier_unregister(struct notifier_block *nb);
int acpi_gtdt_init(struct acpi_table_header *table, int *platform_timer_count);
int acpi_gtdt_map_ppi(int type);
bool acpi_gtdt_c3stop(int type);
-int acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem, int *timer_count);
#endif
#ifndef ACPI_HAVE_ARCH_SET_ROOT_POINTER
-static inline void acpi_arch_set_root_pointer(u64 addr)
+static __always_inline void acpi_arch_set_root_pointer(u64 addr)
{
}
#endif
#ifndef ACPI_HAVE_ARCH_GET_ROOT_POINTER
-static inline u64 acpi_arch_get_root_pointer(void)
+static __always_inline u64 acpi_arch_get_root_pointer(void)
{
return 0;
}
#endif
+int acpi_get_local_u64_address(acpi_handle handle, u64 *addr);
+int acpi_get_local_address(acpi_handle handle, u32 *addr);
+const char *acpi_get_subsystem_id(acpi_handle handle);
+
+#ifdef CONFIG_ACPI_MRRM
+int acpi_mrrm_max_mem_region(void);
+#endif
+
#else /* !CONFIG_ACPI */
#define acpi_disabled 1
@@ -718,9 +798,10 @@ static inline u64 acpi_arch_get_root_pointer(void)
#define ACPI_COMPANION_SET(dev, adev) do { } while (0)
#define ACPI_HANDLE(dev) (NULL)
#define ACPI_HANDLE_FWNODE(fwnode) (NULL)
-#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (0), .cls_msk = (0),
-#include <acpi/acpi_numa.h>
+/* Get rid of the -Wunused-variable for adev */
+#define acpi_dev_uid_match(adev, uid2) (adev && false)
+#define acpi_dev_hid_uid_match(adev, hid2, uid2) (adev && false)
struct fwnode_handle;
@@ -736,10 +817,9 @@ static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
struct acpi_device;
-static inline bool
-acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2)
+static inline int acpi_dev_uid_to_integer(struct acpi_device *adev, u64 *integer)
{
- return false;
+ return -ENODEV;
}
static inline struct acpi_device *
@@ -765,7 +845,7 @@ static inline bool is_acpi_device_node(const struct fwnode_handle *fwnode)
return false;
}
-static inline struct acpi_device *to_acpi_device_node(struct fwnode_handle *fwnode)
+static inline struct acpi_device *to_acpi_device_node(const struct fwnode_handle *fwnode)
{
return NULL;
}
@@ -775,12 +855,12 @@ static inline bool is_acpi_data_node(const struct fwnode_handle *fwnode)
return false;
}
-static inline struct acpi_data_node *to_acpi_data_node(struct fwnode_handle *fwnode)
+static inline struct acpi_data_node *to_acpi_data_node(const struct fwnode_handle *fwnode)
{
return NULL;
}
-static inline bool acpi_data_node_match(struct fwnode_handle *fwnode,
+static inline bool acpi_data_node_match(const struct fwnode_handle *fwnode,
const char *name)
{
return false;
@@ -791,6 +871,11 @@ static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev)
return NULL;
}
+static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
+{
+ return NULL;
+}
+
static inline bool has_acpi_companion(struct device *dev)
{
return false;
@@ -867,6 +952,12 @@ static inline int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
struct acpi_device_id;
+static inline const struct acpi_device_id *acpi_match_acpi_device(
+ const struct acpi_device_id *ids, const struct acpi_device *adev)
+{
+ return NULL;
+}
+
static inline const struct acpi_device_id *acpi_match_device(
const struct acpi_device_id *ids, const struct device *dev)
{
@@ -884,6 +975,12 @@ static inline bool acpi_driver_match_device(struct device *dev,
return false;
}
+static inline bool acpi_check_dsm(acpi_handle handle, const guid_t *guid,
+ u64 rev, u64 funcs)
+{
+ return false;
+}
+
static inline union acpi_object *acpi_evaluate_dsm(acpi_handle handle,
const guid_t *guid,
u64 rev, u64 func,
@@ -892,7 +989,16 @@ static inline union acpi_object *acpi_evaluate_dsm(acpi_handle handle,
return NULL;
}
-static inline int acpi_device_uevent_modalias(struct device *dev,
+static inline union acpi_object *acpi_evaluate_dsm_typed(acpi_handle handle,
+ const guid_t *guid,
+ u64 rev, u64 func,
+ union acpi_object *argv4,
+ acpi_object_type type)
+{
+ return NULL;
+}
+
+static inline int acpi_device_uevent_modalias(const struct device *dev,
struct kobj_uevent_env *env)
{
return -ENODEV;
@@ -906,12 +1012,12 @@ static inline int acpi_device_modalias(struct device *dev,
static inline struct platform_device *
acpi_create_platform_device(struct acpi_device *adev,
- struct property_entry *properties)
+ const struct property_entry *properties)
{
return NULL;
}
-static inline bool acpi_dma_supported(struct acpi_device *adev)
+static inline bool acpi_dma_supported(const struct acpi_device *adev)
{
return false;
}
@@ -921,8 +1027,7 @@ static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
return DEV_DMA_NOT_SUPPORTED;
}
-static inline int acpi_dma_get_range(struct device *dev, u64 *dma_addr,
- u64 *offset, u64 *size)
+static inline int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map)
{
return -ENODEV;
}
@@ -965,8 +1070,66 @@ static inline struct acpi_device *acpi_resource_consumer(struct resource *res)
return NULL;
}
+static inline int acpi_get_local_address(acpi_handle handle, u32 *addr)
+{
+ return -ENODEV;
+}
+
+static inline const char *acpi_get_subsystem_id(acpi_handle handle)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int acpi_register_wakeup_handler(int wake_irq,
+ bool (*wakeup)(void *context), void *context)
+{
+ return -ENXIO;
+}
+
+static inline void acpi_unregister_wakeup_handler(
+ bool (*wakeup)(void *context), void *context) { }
+
+struct acpi_osc_context;
+static inline u32 acpi_osc_ctx_get_pci_control(struct acpi_osc_context *context)
+{
+ return 0;
+}
+
+static inline u32 acpi_osc_ctx_get_cxl_control(struct acpi_osc_context *context)
+{
+ return 0;
+}
+
+static inline bool acpi_sleep_state_supported(u8 sleep_state)
+{
+ return false;
+}
+
+static inline acpi_handle acpi_get_processor_handle(int cpu)
+{
+ return NULL;
+}
+
+static inline int acpi_mrrm_max_mem_region(void)
+{
+ return 1;
+}
+
#endif /* !CONFIG_ACPI */
+#ifdef CONFIG_ACPI_HMAT
+int hmat_get_extended_linear_cache_size(struct resource *backing_res, int nid,
+ resource_size_t *size);
+#else
+static inline int hmat_get_extended_linear_cache_size(struct resource *backing_res,
+ int nid, resource_size_t *size)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+extern void arch_post_acpi_subsys_init(void);
+
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
int acpi_ioapic_add(acpi_handle root);
#else
@@ -985,15 +1148,25 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state,
u32 val_a, u32 val_b);
-
-#ifndef CONFIG_IA64
-void arch_reserve_mem_area(acpi_physical_address addr, size_t size);
-#else
-static inline void arch_reserve_mem_area(acpi_physical_address addr,
- size_t size)
+struct acpi_s2idle_dev_ops {
+ struct list_head list_node;
+ void (*prepare)(void);
+ void (*check)(void);
+ void (*restore)(void);
+};
+#if defined(CONFIG_SUSPEND) && defined(CONFIG_X86)
+int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg);
+void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg);
+#else /* CONFIG_SUSPEND && CONFIG_X86 */
+static inline int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg)
+{
+ return -ENODEV;
+}
+static inline void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg)
{
}
-#endif /* CONFIG_X86 */
+#endif /* CONFIG_SUSPEND && CONFIG_X86 */
+void arch_reserve_mem_area(acpi_physical_address addr, size_t size);
#else
#define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0)
#endif
@@ -1004,6 +1177,8 @@ int acpi_dev_resume(struct device *dev);
int acpi_subsys_runtime_suspend(struct device *dev);
int acpi_subsys_runtime_resume(struct device *dev);
int acpi_dev_pm_attach(struct device *dev, bool power_on);
+bool acpi_storage_d3(struct device *dev);
+bool acpi_dev_state_d0(struct device *dev);
#else
static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; }
static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; }
@@ -1011,6 +1186,14 @@ static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
{
return 0;
}
+static inline bool acpi_storage_d3(struct device *dev)
+{
+ return false;
+}
+static inline bool acpi_dev_state_d0(struct device *dev)
+{
+ return true;
+}
#endif
#if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP)
@@ -1021,8 +1204,7 @@ int acpi_subsys_suspend_noirq(struct device *dev);
int acpi_subsys_suspend(struct device *dev);
int acpi_subsys_freeze(struct device *dev);
int acpi_subsys_poweroff(struct device *dev);
-void acpi_ec_mark_gpe_for_wake(void);
-void acpi_ec_set_gpe_wake_mask(u8 action);
+int acpi_subsys_restore_early(struct device *dev);
#else
static inline int acpi_subsys_prepare(struct device *dev) { return 0; }
static inline void acpi_subsys_complete(struct device *dev) {}
@@ -1031,11 +1213,19 @@ static inline int acpi_subsys_suspend_noirq(struct device *dev) { return 0; }
static inline int acpi_subsys_suspend(struct device *dev) { return 0; }
static inline int acpi_subsys_freeze(struct device *dev) { return 0; }
static inline int acpi_subsys_poweroff(struct device *dev) { return 0; }
+static inline int acpi_subsys_restore_early(struct device *dev) { return 0; }
+#endif
+
+#if defined(CONFIG_ACPI_EC) && defined(CONFIG_PM_SLEEP)
+void acpi_ec_mark_gpe_for_wake(void);
+void acpi_ec_set_gpe_wake_mask(u8 action);
+#else
static inline void acpi_ec_mark_gpe_for_wake(void) {}
static inline void acpi_ec_set_gpe_wake_mask(u8 action) {}
#endif
#ifdef CONFIG_ACPI
+char *acpi_handle_path(acpi_handle handle);
__printf(3, 4)
void acpi_handle_printk(const char *level, acpi_handle handle,
const char *fmt, ...);
@@ -1096,23 +1286,43 @@ void __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, const c
#if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB)
bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
struct acpi_resource_gpio **agpio);
-int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name, int index);
+bool acpi_gpio_get_io_resource(struct acpi_resource *ares,
+ struct acpi_resource_gpio **agpio);
+int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id, int index,
+ bool *wake_capable);
#else
static inline bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
struct acpi_resource_gpio **agpio)
{
return false;
}
-static inline int acpi_dev_gpio_irq_get_by(struct acpi_device *adev,
- const char *name, int index)
+static inline bool acpi_gpio_get_io_resource(struct acpi_resource *ares,
+ struct acpi_resource_gpio **agpio)
+{
+ return false;
+}
+static inline int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id,
+ int index, bool *wake_capable)
{
return -ENXIO;
}
#endif
+static inline int acpi_dev_gpio_irq_wake_get(struct acpi_device *adev, int index,
+ bool *wake_capable)
+{
+ return acpi_dev_gpio_irq_wake_get_by(adev, NULL, index, wake_capable);
+}
+
+static inline int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *con_id,
+ int index)
+{
+ return acpi_dev_gpio_irq_wake_get_by(adev, con_id, index, NULL);
+}
+
static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
{
- return acpi_dev_gpio_irq_get_by(adev, NULL, index);
+ return acpi_dev_gpio_irq_wake_get_by(adev, NULL, index, NULL);
}
/* Device properties */
@@ -1140,15 +1350,11 @@ static inline bool acpi_dev_has_props(const struct acpi_device *adev)
struct acpi_device_properties *
acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid,
- const union acpi_object *properties);
+ union acpi_object *properties);
int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname,
void **valptr);
-struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
- struct fwnode_handle *child);
-struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode);
-
struct acpi_probe_entry;
typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *,
struct acpi_probe_entry *);
@@ -1179,6 +1385,8 @@ struct acpi_probe_entry {
kernel_ulong_t driver_data;
};
+void arch_sort_irqchip_probe(struct acpi_probe_entry *ap_head, int nr);
+
#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, \
valid, data, fn) \
static const struct acpi_probe_entry __acpi_probe_##name \
@@ -1246,19 +1454,6 @@ static inline int acpi_node_prop_get(const struct fwnode_handle *fwnode,
}
static inline struct fwnode_handle *
-acpi_get_next_subnode(const struct fwnode_handle *fwnode,
- struct fwnode_handle *child)
-{
- return NULL;
-}
-
-static inline struct fwnode_handle *
-acpi_node_get_parent(const struct fwnode_handle *fwnode)
-{
- return NULL;
-}
-
-static inline struct fwnode_handle *
acpi_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_handle *prev)
{
@@ -1304,18 +1499,25 @@ int acpi_parse_spcr(bool enable_earlycon, bool enable_console);
#else
static inline int acpi_parse_spcr(bool enable_earlycon, bool enable_console)
{
- return 0;
+ return -ENODEV;
}
#endif
#if IS_ENABLED(CONFIG_ACPI_GENERIC_GSI)
int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res);
+const struct cpumask *acpi_irq_get_affinity(acpi_handle handle,
+ unsigned int index);
#else
static inline
int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res)
{
return -EINVAL;
}
+static inline const struct cpumask *acpi_irq_get_affinity(acpi_handle handle,
+ unsigned int index)
+{
+ return NULL;
+}
#endif
#ifdef CONFIG_ACPI_LPIT
@@ -1327,12 +1529,24 @@ static inline int lpit_read_residency_count_address(u64 *address)
}
#endif
+#ifdef CONFIG_ACPI_PROCESSOR_IDLE
+#ifndef arch_get_idle_state_flags
+static inline unsigned int arch_get_idle_state_flags(u32 arch_flags)
+{
+ return 0;
+}
+#endif
+#endif /* CONFIG_ACPI_PROCESSOR_IDLE */
+
#ifdef CONFIG_ACPI_PPTT
int acpi_pptt_cpu_is_thread(unsigned int cpu);
int find_acpi_cpu_topology(unsigned int cpu, int level);
+int find_acpi_cpu_topology_cluster(unsigned int cpu);
int find_acpi_cpu_topology_package(unsigned int cpu);
int find_acpi_cpu_topology_hetero_id(unsigned int cpu);
-int find_acpi_cpu_cache_topology(unsigned int cpu, int level);
+void acpi_pptt_get_cpus_from_container(u32 acpi_cpu_id, cpumask_t *cpus);
+int find_acpi_cache_level_from_id(u32 cache_id);
+int acpi_pptt_get_cpumask_from_cache_id(u32 cache_id, cpumask_t *cpus);
#else
static inline int acpi_pptt_cpu_is_thread(unsigned int cpu)
{
@@ -1342,6 +1556,10 @@ static inline int find_acpi_cpu_topology(unsigned int cpu, int level)
{
return -EINVAL;
}
+static inline int find_acpi_cpu_topology_cluster(unsigned int cpu)
+{
+ return -EINVAL;
+}
static inline int find_acpi_cpu_topology_package(unsigned int cpu)
{
return -EINVAL;
@@ -1350,19 +1568,56 @@ static inline int find_acpi_cpu_topology_hetero_id(unsigned int cpu)
{
return -EINVAL;
}
-static inline int find_acpi_cpu_cache_topology(unsigned int cpu, int level)
+static inline void acpi_pptt_get_cpus_from_container(u32 acpi_cpu_id,
+ cpumask_t *cpus) { }
+static inline int find_acpi_cache_level_from_id(u32 cache_id)
{
- return -EINVAL;
+ return -ENOENT;
+}
+static inline int acpi_pptt_get_cpumask_from_cache_id(u32 cache_id,
+ cpumask_t *cpus)
+{
+ return -ENOENT;
}
#endif
+void acpi_arch_init(void);
+
+#ifdef CONFIG_ACPI_PCC
+void acpi_init_pcc(void);
+#else
+static inline void acpi_init_pcc(void) { }
+#endif
+
+#ifdef CONFIG_ACPI_FFH
+void acpi_init_ffh(void);
+extern int acpi_ffh_address_space_arch_setup(void *handler_ctxt,
+ void **region_ctxt);
+extern int acpi_ffh_address_space_arch_handler(acpi_integer *value,
+ void *region_context);
+#else
+static inline void acpi_init_ffh(void) { }
+#endif
+
#ifdef CONFIG_ACPI
-extern int acpi_platform_notify(struct device *dev, enum kobject_action action);
+extern void acpi_device_notify(struct device *dev);
+extern void acpi_device_notify_remove(struct device *dev);
#else
-static inline int
-acpi_platform_notify(struct device *dev, enum kobject_action action)
+static inline void acpi_device_notify(struct device *dev) { }
+static inline void acpi_device_notify_remove(struct device *dev) { }
+#endif
+
+static inline void acpi_use_parent_companion(struct device *dev)
{
- return 0;
+ ACPI_COMPANION_SET(dev, ACPI_COMPANION(dev->parent));
+}
+
+#ifdef CONFIG_ACPI_NUMA
+bool acpi_node_backed_by_real_pxm(int nid);
+#else
+static inline bool acpi_node_backed_by_real_pxm(int nid)
+{
+ return false;
}
#endif
diff --git a/include/linux/acpi_amd_wbrf.h b/include/linux/acpi_amd_wbrf.h
new file mode 100644
index 000000000000..898f31d536d4
--- /dev/null
+++ b/include/linux/acpi_amd_wbrf.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Wifi Band Exclusion Interface (AMD ACPI Implementation)
+ * Copyright (C) 2023 Advanced Micro Devices
+ */
+
+#ifndef _ACPI_AMD_WBRF_H
+#define _ACPI_AMD_WBRF_H
+
+#include <linux/device.h>
+#include <linux/notifier.h>
+
+/* The maximum number of frequency band ranges */
+#define MAX_NUM_OF_WBRF_RANGES 11
+
+/* Record actions */
+#define WBRF_RECORD_ADD 0x0
+#define WBRF_RECORD_REMOVE 0x1
+
+/**
+ * struct freq_band_range - Wifi frequency band range definition
+ * @start: start frequency point (in Hz)
+ * @end: end frequency point (in Hz)
+ */
+struct freq_band_range {
+ u64 start;
+ u64 end;
+};
+
+/**
+ * struct wbrf_ranges_in_out - wbrf ranges info
+ * @num_of_ranges: total number of band ranges in this struct
+ * @band_list: array of Wifi band ranges
+ */
+struct wbrf_ranges_in_out {
+ u64 num_of_ranges;
+ struct freq_band_range band_list[MAX_NUM_OF_WBRF_RANGES];
+};
+
+/**
+ * enum wbrf_notifier_actions - wbrf notifier actions index
+ * @WBRF_CHANGED: there was some frequency band updates. The consumers
+ * should retrieve the latest active frequency bands.
+ */
+enum wbrf_notifier_actions {
+ WBRF_CHANGED,
+};
+
+#if IS_ENABLED(CONFIG_AMD_WBRF)
+bool acpi_amd_wbrf_supported_producer(struct device *dev);
+int acpi_amd_wbrf_add_remove(struct device *dev, uint8_t action, struct wbrf_ranges_in_out *in);
+bool acpi_amd_wbrf_supported_consumer(struct device *dev);
+int amd_wbrf_retrieve_freq_band(struct device *dev, struct wbrf_ranges_in_out *out);
+int amd_wbrf_register_notifier(struct notifier_block *nb);
+int amd_wbrf_unregister_notifier(struct notifier_block *nb);
+#else
+static inline
+bool acpi_amd_wbrf_supported_consumer(struct device *dev)
+{
+ return false;
+}
+
+static inline
+int acpi_amd_wbrf_add_remove(struct device *dev, uint8_t action, struct wbrf_ranges_in_out *in)
+{
+ return -ENODEV;
+}
+
+static inline
+bool acpi_amd_wbrf_supported_producer(struct device *dev)
+{
+ return false;
+}
+static inline
+int amd_wbrf_retrieve_freq_band(struct device *dev, struct wbrf_ranges_in_out *out)
+{
+ return -ENODEV;
+}
+static inline
+int amd_wbrf_register_notifier(struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+static inline
+int amd_wbrf_unregister_notifier(struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_AMD_WBRF */
+
+#endif /* _ACPI_AMD_WBRF_H */
diff --git a/include/linux/acpi_dma.h b/include/linux/acpi_dma.h
index 72cedb916a9c..e748b2877602 100644
--- a/include/linux/acpi_dma.h
+++ b/include/linux/acpi_dma.h
@@ -11,10 +11,11 @@
#ifndef __LINUX_ACPI_DMA_H
#define __LINUX_ACPI_DMA_H
-#include <linux/list.h>
-#include <linux/device.h>
#include <linux/err.h>
#include <linux/dmaengine.h>
+#include <linux/types.h>
+
+struct device;
/**
* struct acpi_dma_spec - slave device DMA resources
@@ -65,7 +66,6 @@ int devm_acpi_dma_controller_register(struct device *dev,
struct dma_chan *(*acpi_dma_xlate)
(struct acpi_dma_spec *, struct acpi_dma *),
void *data);
-void devm_acpi_dma_controller_free(struct device *dev);
struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
size_t index);
@@ -94,9 +94,6 @@ static inline int devm_acpi_dma_controller_register(struct device *dev,
{
return -ENODEV;
}
-static inline void devm_acpi_dma_controller_free(struct device *dev)
-{
-}
static inline struct dma_chan *acpi_dma_request_slave_chan_by_index(
struct device *dev, size_t index)
diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h
index 1a12baa58e40..d4ed5622cf2b 100644
--- a/include/linux/acpi_iort.h
+++ b/include/linux/acpi_iort.h
@@ -21,41 +21,47 @@
*/
#define IORT_SMMU_V3_PMCG_GENERIC 0x00000000 /* Generic SMMUv3 PMCG */
#define IORT_SMMU_V3_PMCG_HISI_HIP08 0x00000001 /* HiSilicon HIP08 PMCG */
+#define IORT_SMMU_V3_PMCG_HISI_HIP09 0x00000002 /* HiSilicon HIP09 PMCG */
int iort_register_domain_token(int trans_id, phys_addr_t base,
struct fwnode_handle *fw_node);
void iort_deregister_domain_token(int trans_id);
struct fwnode_handle *iort_find_domain_token(int trans_id);
+int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id);
+
#ifdef CONFIG_ACPI_IORT
-void acpi_iort_init(void);
u32 iort_msi_map_id(struct device *dev, u32 id);
struct irq_domain *iort_get_device_domain(struct device *dev, u32 id,
enum irq_domain_bus_token bus_token);
void acpi_configure_pmsi_domain(struct device *dev);
-int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id);
+void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode,
+ struct list_head *head);
+void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode,
+ struct list_head *head);
/* IOMMU interface */
-void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *size);
-const struct iommu_ops *iort_iommu_configure_id(struct device *dev,
- const u32 *id_in);
-int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head);
+int iort_dma_get_ranges(struct device *dev, u64 *limit);
+int iort_iommu_configure_id(struct device *dev, const u32 *id_in);
+void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head);
phys_addr_t acpi_iort_dma_get_max_cpu_address(void);
#else
-static inline void acpi_iort_init(void) { }
static inline u32 iort_msi_map_id(struct device *dev, u32 id)
{ return id; }
static inline struct irq_domain *iort_get_device_domain(
struct device *dev, u32 id, enum irq_domain_bus_token bus_token)
{ return NULL; }
static inline void acpi_configure_pmsi_domain(struct device *dev) { }
+static inline
+void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode, struct list_head *head) { }
+static inline
+void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode, struct list_head *head) { }
/* IOMMU interface */
-static inline void iort_dma_setup(struct device *dev, u64 *dma_addr,
- u64 *size) { }
-static inline const struct iommu_ops *iort_iommu_configure_id(
- struct device *dev, const u32 *id_in)
-{ return NULL; }
+static inline int iort_dma_get_ranges(struct device *dev, u64 *limit)
+{ return -ENODEV; }
+static inline int iort_iommu_configure_id(struct device *dev, const u32 *id_in)
+{ return -ENODEV; }
static inline
-int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
-{ return 0; }
+void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head)
+{ }
static inline phys_addr_t acpi_iort_dma_get_max_cpu_address(void)
{ return PHYS_ADDR_MAX; }
diff --git a/include/linux/acpi_mdio.h b/include/linux/acpi_mdio.h
new file mode 100644
index 000000000000..8e2eefa9fbc0
--- /dev/null
+++ b/include/linux/acpi_mdio.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ACPI helper for the MDIO (Ethernet PHY) API
+ */
+
+#ifndef __LINUX_ACPI_MDIO_H
+#define __LINUX_ACPI_MDIO_H
+
+#include <linux/phy.h>
+
+#if IS_ENABLED(CONFIG_ACPI_MDIO)
+int __acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode,
+ struct module *owner);
+
+static inline int
+acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *handle)
+{
+ return __acpi_mdiobus_register(mdio, handle, THIS_MODULE);
+}
+#else /* CONFIG_ACPI_MDIO */
+static inline int
+acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode)
+{
+ /*
+ * Fall back to mdiobus_register() function to register a bus.
+ * This way, we don't have to keep compat bits around in drivers.
+ */
+
+ return mdiobus_register(mdio);
+}
+#endif
+
+#endif /* __LINUX_ACPI_MDIO_H */
diff --git a/include/linux/acpi_pmtmr.h b/include/linux/acpi_pmtmr.h
index 50d88bf1498d..0ded9220d379 100644
--- a/include/linux/acpi_pmtmr.h
+++ b/include/linux/acpi_pmtmr.h
@@ -26,6 +26,19 @@ static inline u32 acpi_pm_read_early(void)
return acpi_pm_read_verified() & ACPI_PM_MASK;
}
+/**
+ * Register callback for suspend and resume event
+ *
+ * @cb Callback triggered on suspend and resume
+ * @data Data passed with the callback
+ */
+void acpi_pmtmr_register_suspend_resume_callback(void (*cb)(void *data, bool suspend), void *data);
+
+/**
+ * Remove registered callback for suspend and resume event
+ */
+void acpi_pmtmr_unregister_suspend_resume_callback(void);
+
#else
static inline u32 acpi_pm_read_early(void)
diff --git a/include/linux/acpi_rimt.h b/include/linux/acpi_rimt.h
new file mode 100644
index 000000000000..fad3adc4d899
--- /dev/null
+++ b/include/linux/acpi_rimt.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024-2025, Ventana Micro Systems Inc.
+ * Author: Sunil V L <sunilvl@ventanamicro.com>
+ */
+
+#ifndef _ACPI_RIMT_H
+#define _ACPI_RIMT_H
+
+#ifdef CONFIG_ACPI_RIMT
+int rimt_iommu_register(struct device *dev);
+#else
+static inline int rimt_iommu_register(struct device *dev)
+{
+ return -ENODEV;
+}
+#endif
+
+#if defined(CONFIG_IOMMU_API) && defined(CONFIG_ACPI_RIMT)
+int rimt_iommu_configure_id(struct device *dev, const u32 *id_in);
+#else
+static inline int rimt_iommu_configure_id(struct device *dev, const u32 *id_in)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* _ACPI_RIMT_H */
diff --git a/include/linux/acpi_viot.h b/include/linux/acpi_viot.h
new file mode 100644
index 000000000000..a5a122431563
--- /dev/null
+++ b/include/linux/acpi_viot.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ACPI_VIOT_H__
+#define __ACPI_VIOT_H__
+
+#include <linux/acpi.h>
+
+#ifdef CONFIG_ACPI_VIOT
+void __init acpi_viot_early_init(void);
+void __init acpi_viot_init(void);
+int viot_iommu_configure(struct device *dev);
+#else
+static inline void acpi_viot_early_init(void) {}
+static inline void acpi_viot_init(void) {}
+static inline int viot_iommu_configure(struct device *dev)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* __ACPI_VIOT_H__ */
diff --git a/include/linux/adi-axi-common.h b/include/linux/adi-axi-common.h
new file mode 100644
index 000000000000..37962ba530df
--- /dev/null
+++ b/include/linux/adi-axi-common.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Analog Devices AXI common registers & definitions
+ *
+ * Copyright 2019 Analog Devices Inc.
+ *
+ * https://wiki.analog.com/resources/fpga/docs/axi_ip
+ * https://wiki.analog.com/resources/fpga/docs/hdl/regmap
+ */
+
+#include <linux/types.h>
+
+#ifndef ADI_AXI_COMMON_H_
+#define ADI_AXI_COMMON_H_
+
+#define ADI_AXI_REG_VERSION 0x0000
+#define ADI_AXI_REG_FPGA_INFO 0x001C
+
+#define ADI_AXI_PCORE_VER(major, minor, patch) \
+ (((major) << 16) | ((minor) << 8) | (patch))
+
+#define ADI_AXI_PCORE_VER_MAJOR(version) (((version) >> 16) & 0xff)
+#define ADI_AXI_PCORE_VER_MINOR(version) (((version) >> 8) & 0xff)
+#define ADI_AXI_PCORE_VER_PATCH(version) ((version) & 0xff)
+
+/**
+ * adi_axi_pcore_ver_gteq() - check if a version is satisfied
+ * @version: the full version read from the hardware
+ * @major: the major version to compare against
+ * @minor: the minor version to compare against
+ *
+ * ADI AXI IP Cores use semantic versioning, so this can be used to check for
+ * feature availability.
+ *
+ * Return: true if the version is greater than or equal to the specified
+ * major and minor version, false otherwise.
+ */
+static inline bool adi_axi_pcore_ver_gteq(u32 version, u32 major, u32 minor)
+{
+ return ADI_AXI_PCORE_VER_MAJOR(version) > (major) ||
+ (ADI_AXI_PCORE_VER_MAJOR(version) == (major) &&
+ ADI_AXI_PCORE_VER_MINOR(version) >= (minor));
+}
+
+#define ADI_AXI_INFO_FPGA_TECH(info) (((info) >> 24) & 0xff)
+#define ADI_AXI_INFO_FPGA_FAMILY(info) (((info) >> 16) & 0xff)
+#define ADI_AXI_INFO_FPGA_SPEED_GRADE(info) (((info) >> 8) & 0xff)
+
+enum adi_axi_fpga_technology {
+ ADI_AXI_FPGA_TECH_UNKNOWN = 0,
+ ADI_AXI_FPGA_TECH_SERIES7,
+ ADI_AXI_FPGA_TECH_ULTRASCALE,
+ ADI_AXI_FPGA_TECH_ULTRASCALE_PLUS,
+};
+
+enum adi_axi_fpga_family {
+ ADI_AXI_FPGA_FAMILY_UNKNOWN = 0,
+ ADI_AXI_FPGA_FAMILY_ARTIX,
+ ADI_AXI_FPGA_FAMILY_KINTEX,
+ ADI_AXI_FPGA_FAMILY_VIRTEX,
+ ADI_AXI_FPGA_FAMILY_ZYNQ,
+};
+
+enum adi_axi_fpga_speed_grade {
+ ADI_AXI_FPGA_SPEED_UNKNOWN = 0,
+ ADI_AXI_FPGA_SPEED_1 = 10,
+ ADI_AXI_FPGA_SPEED_1L = 11,
+ ADI_AXI_FPGA_SPEED_1H = 12,
+ ADI_AXI_FPGA_SPEED_1HV = 13,
+ ADI_AXI_FPGA_SPEED_1LV = 14,
+ ADI_AXI_FPGA_SPEED_2 = 20,
+ ADI_AXI_FPGA_SPEED_2L = 21,
+ ADI_AXI_FPGA_SPEED_2LV = 22,
+ ADI_AXI_FPGA_SPEED_3 = 30,
+};
+
+#endif /* ADI_AXI_COMMON_H_ */
diff --git a/include/linux/adreno-smmu-priv.h b/include/linux/adreno-smmu-priv.h
index a889f28afb42..d83c9175828f 100644
--- a/include/linux/adreno-smmu-priv.h
+++ b/include/linux/adreno-smmu-priv.h
@@ -9,6 +9,32 @@
#include <linux/io-pgtable.h>
/**
+ * struct adreno_smmu_fault_info - container for key fault information
+ *
+ * @far: The faulting IOVA from ARM_SMMU_CB_FAR
+ * @ttbr0: The current TTBR0 pagetable from ARM_SMMU_CB_TTBR0
+ * @contextidr: The value of ARM_SMMU_CB_CONTEXTIDR
+ * @fsr: The fault status from ARM_SMMU_CB_FSR
+ * @fsynr0: The value of FSYNR0 from ARM_SMMU_CB_FSYNR0
+ * @fsynr1: The value of FSYNR1 from ARM_SMMU_CB_FSYNR0
+ * @cbfrsynra: The value of CBFRSYNRA from ARM_SMMU_GR1_CBFRSYNRA(idx)
+ *
+ * This struct passes back key page fault information to the GPU driver
+ * through the get_fault_info function pointer.
+ * The GPU driver can use this information to print informative
+ * log messages and provide deeper GPU specific insight into the fault.
+ */
+struct adreno_smmu_fault_info {
+ u64 far;
+ u64 ttbr0;
+ u32 contextidr;
+ u32 fsr;
+ u32 fsynr0;
+ u32 fsynr1;
+ u32 cbfrsynra;
+};
+
+/**
* struct adreno_smmu_priv - private interface between adreno-smmu and GPU
*
* @cookie: An opque token provided by adreno-smmu and passed
@@ -17,6 +43,18 @@
* @set_ttbr0_cfg: Set the TTBR0 config for the GPUs context bank. A
* NULL config disables TTBR0 translation, otherwise
* TTBR0 translation is enabled with the specified cfg
+ * @get_fault_info: Called by the GPU fault handler to get information about
+ * the fault
+ * @set_stall: Configure whether stall on fault (CFCFG) is enabled. If
+ * stalling on fault is enabled, the GPU driver must call
+ * resume_translation()
+ * @resume_translation: Resume translation after a fault
+ *
+ * @set_prr_bit: [optional] Configure the GPU's Partially Resident
+ * Region (PRR) bit in the ACTLR register.
+ * @set_prr_addr: [optional] Configure the PRR_CFG_*ADDR register with
+ * the physical address of PRR page passed from GPU
+ * driver.
*
* The GPU driver (drm/msm) and adreno-smmu work together for controlling
* the GPU's SMMU instance. This is by necessity, as the GPU is directly
@@ -31,6 +69,11 @@ struct adreno_smmu_priv {
const void *cookie;
const struct io_pgtable_cfg *(*get_ttbr1_cfg)(const void *cookie);
int (*set_ttbr0_cfg)(const void *cookie, const struct io_pgtable_cfg *cfg);
+ void (*get_fault_info)(const void *cookie, struct adreno_smmu_fault_info *info);
+ void (*set_stall)(const void *cookie, bool enabled);
+ void (*resume_translation)(const void *cookie, bool terminate);
+ void (*set_prr_bit)(const void *cookie, bool set);
+ void (*set_prr_addr)(const void *cookie, phys_addr_t page_addr);
};
-#endif /* __ADRENO_SMMU_PRIV_H */ \ No newline at end of file
+#endif /* __ADRENO_SMMU_PRIV_H */
diff --git a/include/linux/aer.h b/include/linux/aer.h
index 97f64ba1b34a..02940be66324 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -16,13 +16,26 @@
#define AER_CORRECTABLE 2
#define DPC_FATAL 3
+/*
+ * AER and DPC capabilities TLP Logging register sizes (PCIe r6.2, sec 7.8.4
+ * & 7.9.14).
+ */
+#define PCIE_STD_NUM_TLP_HEADERLOG 4
+#define PCIE_STD_MAX_TLP_PREFIXLOG 4
+#define PCIE_STD_MAX_TLP_HEADERLOG (PCIE_STD_NUM_TLP_HEADERLOG + 10)
+
struct pci_dev;
-struct aer_header_log_regs {
- unsigned int dw0;
- unsigned int dw1;
- unsigned int dw2;
- unsigned int dw3;
+struct pcie_tlp_log {
+ union {
+ u32 dw[PCIE_STD_MAX_TLP_HEADERLOG];
+ struct {
+ u32 _do_not_use[PCIE_STD_NUM_TLP_HEADERLOG];
+ u32 prefix[PCIE_STD_MAX_TLP_PREFIXLOG];
+ };
+ };
+ u8 header_len; /* Length of the Logged TLP Header in DWORDs */
+ bool flit; /* TLP was logged when in Flit mode */
};
struct aer_capability_regs {
@@ -33,7 +46,7 @@ struct aer_capability_regs {
u32 cor_status;
u32 cor_mask;
u32 cap_control;
- struct aer_header_log_regs header_log;
+ struct pcie_tlp_log header_log;
u32 root_command;
u32 root_status;
u16 cor_err_source;
@@ -41,30 +54,17 @@ struct aer_capability_regs {
};
#if defined(CONFIG_PCIEAER)
-/* PCIe port driver needs this function to enable AER */
-int pci_enable_pcie_error_reporting(struct pci_dev *dev);
-int pci_disable_pcie_error_reporting(struct pci_dev *dev);
int pci_aer_clear_nonfatal_status(struct pci_dev *dev);
-void pci_save_aer_state(struct pci_dev *dev);
-void pci_restore_aer_state(struct pci_dev *dev);
+int pcie_aer_is_native(struct pci_dev *dev);
#else
-static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
-{
- return -EINVAL;
-}
-static inline int pci_disable_pcie_error_reporting(struct pci_dev *dev)
-{
- return -EINVAL;
-}
static inline int pci_aer_clear_nonfatal_status(struct pci_dev *dev)
{
return -EINVAL;
}
-static inline void pci_save_aer_state(struct pci_dev *dev) {}
-static inline void pci_restore_aer_state(struct pci_dev *dev) {}
+static inline int pcie_aer_is_native(struct pci_dev *dev) { return 0; }
#endif
-void cper_print_aer(struct pci_dev *dev, int aer_severity,
+void pci_print_aer(struct pci_dev *dev, int aer_severity,
struct aer_capability_regs *aer);
int cper_severity_to_aer(int cper_severity);
void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h
index 49e5383d4222..fe0760ce34c8 100644
--- a/include/linux/ahci_platform.h
+++ b/include/linux/ahci_platform.h
@@ -13,6 +13,7 @@
#include <linux/compiler.h>
+struct clk;
struct device;
struct ata_port_info;
struct ahci_host_priv;
@@ -21,8 +22,12 @@ struct scsi_host_template;
int ahci_platform_enable_phys(struct ahci_host_priv *hpriv);
void ahci_platform_disable_phys(struct ahci_host_priv *hpriv);
+struct clk *ahci_platform_find_clk(struct ahci_host_priv *hpriv,
+ const char *con_id);
int ahci_platform_enable_clks(struct ahci_host_priv *hpriv);
void ahci_platform_disable_clks(struct ahci_host_priv *hpriv);
+int ahci_platform_deassert_rsts(struct ahci_host_priv *hpriv);
+int ahci_platform_assert_rsts(struct ahci_host_priv *hpriv);
int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv);
void ahci_platform_disable_regulators(struct ahci_host_priv *hpriv);
int ahci_platform_enable_resources(struct ahci_host_priv *hpriv);
@@ -32,7 +37,7 @@ struct ahci_host_priv *ahci_platform_get_resources(
int ahci_platform_init_host(struct platform_device *pdev,
struct ahci_host_priv *hpriv,
const struct ata_port_info *pi_template,
- struct scsi_host_template *sht);
+ const struct scsi_host_template *sht);
void ahci_platform_shutdown(struct platform_device *pdev);
@@ -41,6 +46,7 @@ int ahci_platform_resume_host(struct device *dev);
int ahci_platform_suspend(struct device *dev);
int ahci_platform_resume(struct device *dev);
-#define AHCI_PLATFORM_GET_RESETS 0x01
+#define AHCI_PLATFORM_GET_RESETS BIT(0)
+#define AHCI_PLATFORM_RST_TRIGGER BIT(1)
#endif /* _AHCI_PLATFORM_H */
diff --git a/include/linux/aio.h b/include/linux/aio.h
index b83e68dd006f..86892a4fe7c8 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -20,8 +20,4 @@ static inline void kiocb_set_cancel_fn(struct kiocb *req,
kiocb_cancel_fn *cancel) { }
#endif /* CONFIG_AIO */
-/* for sysctl: */
-extern unsigned long aio_nr;
-extern unsigned long aio_max_nr;
-
#endif /* __LINUX__AIO_H */
diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h
index 05e758b8b894..3ffa5341dce2 100644
--- a/include/linux/alarmtimer.h
+++ b/include/linux/alarmtimer.h
@@ -20,12 +20,6 @@ enum alarmtimer_type {
ALARM_BOOTTIME_FREEZER,
};
-enum alarmtimer_restart {
- ALARMTIMER_NORESTART,
- ALARMTIMER_RESTART,
-};
-
-
#define ALARMTIMER_STATE_INACTIVE 0x00
#define ALARMTIMER_STATE_ENQUEUED 0x01
@@ -42,14 +36,14 @@ enum alarmtimer_restart {
struct alarm {
struct timerqueue_node node;
struct hrtimer timer;
- enum alarmtimer_restart (*function)(struct alarm *, ktime_t now);
+ void (*function)(struct alarm *, ktime_t now);
enum alarmtimer_type type;
int state;
void *data;
};
void alarm_init(struct alarm *alarm, enum alarmtimer_type type,
- enum alarmtimer_restart (*function)(struct alarm *, ktime_t));
+ void (*function)(struct alarm *, ktime_t));
void alarm_start(struct alarm *alarm, ktime_t start);
void alarm_start_relative(struct alarm *alarm, ktime_t start);
void alarm_restart(struct alarm *alarm);
diff --git a/include/linux/alcor_pci.h b/include/linux/alcor_pci.h
index 8274ed525e9f..dcb1d37dabc2 100644
--- a/include/linux/alcor_pci.h
+++ b/include/linux/alcor_pci.h
@@ -11,6 +11,7 @@
#define ALCOR_SD_CARD 0
#define ALCOR_MS_CARD 1
+#define DRV_NAME_ALCOR_PCI "alcor_pci"
#define DRV_NAME_ALCOR_PCI_SDMMC "alcor_sdmmc"
#define DRV_NAME_ALCOR_PCI_MS "alcor_ms"
@@ -268,13 +269,6 @@ struct alcor_pci_priv {
unsigned long id; /* idr id */
struct alcor_dev_cfg *cfg;
-
- /* PCI ASPM related vars */
- int pdev_cap_off;
- u8 pdev_aspm_cap;
- int parent_cap_off;
- u8 parent_aspm_cap;
- u8 ext_config_dev_aspm;
};
void alcor_write8(struct alcor_pci_priv *priv, u8 val, unsigned int addr);
diff --git a/include/linux/align.h b/include/linux/align.h
index 2b4acec7b95a..55debf105a5d 100644
--- a/include/linux/align.h
+++ b/include/linux/align.h
@@ -2,14 +2,6 @@
#ifndef _LINUX_ALIGN_H
#define _LINUX_ALIGN_H
-#include <linux/const.h>
-
-/* @a is a power of 2 value */
-#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
-#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
-#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
-#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
-#define PTR_ALIGN_DOWN(p, a) ((typeof(p))ALIGN_DOWN((unsigned long)(p), (a)))
-#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
+#include <vdso/align.h>
#endif /* _LINUX_ALIGN_H */
diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h
new file mode 100644
index 000000000000..d40ac39bfbe8
--- /dev/null
+++ b/include/linux/alloc_tag.h
@@ -0,0 +1,268 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * allocation tagging
+ */
+#ifndef _LINUX_ALLOC_TAG_H
+#define _LINUX_ALLOC_TAG_H
+
+#include <linux/bug.h>
+#include <linux/codetag.h>
+#include <linux/container_of.h>
+#include <linux/preempt.h>
+#include <asm/percpu.h>
+#include <linux/cpumask.h>
+#include <linux/smp.h>
+#include <linux/static_key.h>
+#include <linux/irqflags.h>
+
+struct alloc_tag_counters {
+ u64 bytes;
+ u64 calls;
+};
+
+/*
+ * An instance of this structure is created in a special ELF section at every
+ * allocation callsite. At runtime, the special section is treated as
+ * an array of these. Embedded codetag utilizes codetag framework.
+ */
+struct alloc_tag {
+ struct codetag ct;
+ struct alloc_tag_counters __percpu *counters;
+} __aligned(8);
+
+struct alloc_tag_kernel_section {
+ struct alloc_tag *first_tag;
+ unsigned long count;
+};
+
+struct alloc_tag_module_section {
+ union {
+ unsigned long start_addr;
+ struct alloc_tag *first_tag;
+ };
+ unsigned long end_addr;
+ /* used size */
+ unsigned long size;
+};
+
+#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
+
+#define CODETAG_EMPTY ((void *)1)
+
+static inline bool is_codetag_empty(union codetag_ref *ref)
+{
+ return ref->ct == CODETAG_EMPTY;
+}
+
+static inline void set_codetag_empty(union codetag_ref *ref)
+{
+ if (ref)
+ ref->ct = CODETAG_EMPTY;
+}
+
+#else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
+
+static inline bool is_codetag_empty(union codetag_ref *ref) { return false; }
+
+static inline void set_codetag_empty(union codetag_ref *ref)
+{
+ if (ref)
+ ref->ct = NULL;
+}
+
+#endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
+
+#ifdef CONFIG_MEM_ALLOC_PROFILING
+
+#define ALLOC_TAG_SECTION_NAME "alloc_tags"
+
+struct codetag_bytes {
+ struct codetag *ct;
+ s64 bytes;
+};
+
+size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sleep);
+
+static inline struct alloc_tag *ct_to_alloc_tag(struct codetag *ct)
+{
+ return container_of(ct, struct alloc_tag, ct);
+}
+
+#if defined(CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU) && defined(MODULE)
+/*
+ * When percpu variables are required to be defined as weak, static percpu
+ * variables can't be used inside a function (see comments for DECLARE_PER_CPU_SECTION).
+ * Instead we will account all module allocations to a single counter.
+ */
+DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
+
+#define DEFINE_ALLOC_TAG(_alloc_tag) \
+ static struct alloc_tag _alloc_tag __used __aligned(8) \
+ __section(ALLOC_TAG_SECTION_NAME) = { \
+ .ct = CODE_TAG_INIT, \
+ .counters = &_shared_alloc_tag };
+
+#else /* CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU && MODULE */
+
+#ifdef MODULE
+
+#define DEFINE_ALLOC_TAG(_alloc_tag) \
+ static struct alloc_tag _alloc_tag __used __aligned(8) \
+ __section(ALLOC_TAG_SECTION_NAME) = { \
+ .ct = CODE_TAG_INIT, \
+ .counters = NULL };
+
+#else /* MODULE */
+
+#define DEFINE_ALLOC_TAG(_alloc_tag) \
+ static DEFINE_PER_CPU(struct alloc_tag_counters, _alloc_tag_cntr); \
+ static struct alloc_tag _alloc_tag __used __aligned(8) \
+ __section(ALLOC_TAG_SECTION_NAME) = { \
+ .ct = CODE_TAG_INIT, \
+ .counters = &_alloc_tag_cntr };
+
+#endif /* MODULE */
+
+#endif /* CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU && MODULE */
+
+DECLARE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
+ mem_alloc_profiling_key);
+
+static inline bool mem_alloc_profiling_enabled(void)
+{
+ return static_branch_maybe(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
+ &mem_alloc_profiling_key);
+}
+
+static inline struct alloc_tag_counters alloc_tag_read(struct alloc_tag *tag)
+{
+ struct alloc_tag_counters v = { 0, 0 };
+ struct alloc_tag_counters *counter;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ counter = per_cpu_ptr(tag->counters, cpu);
+ v.bytes += counter->bytes;
+ v.calls += counter->calls;
+ }
+
+ return v;
+}
+
+#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
+static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag)
+{
+ WARN_ONCE(ref && ref->ct && !is_codetag_empty(ref),
+ "alloc_tag was not cleared (got tag for %s:%u)\n",
+ ref->ct->filename, ref->ct->lineno);
+
+ WARN_ONCE(!tag, "current->alloc_tag not set\n");
+}
+
+static inline void alloc_tag_sub_check(union codetag_ref *ref)
+{
+ WARN_ONCE(ref && !ref->ct, "alloc_tag was not set\n");
+}
+#else
+static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag) {}
+static inline void alloc_tag_sub_check(union codetag_ref *ref) {}
+#endif
+
+/* Caller should verify both ref and tag to be valid */
+static inline bool __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
+{
+ alloc_tag_add_check(ref, tag);
+ if (!ref || !tag)
+ return false;
+
+ ref->ct = &tag->ct;
+ return true;
+}
+
+static inline bool alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
+{
+ if (unlikely(!__alloc_tag_ref_set(ref, tag)))
+ return false;
+
+ /*
+ * We need in increment the call counter every time we have a new
+ * allocation or when we split a large allocation into smaller ones.
+ * Each new reference for every sub-allocation needs to increment call
+ * counter because when we free each part the counter will be decremented.
+ */
+ this_cpu_inc(tag->counters->calls);
+ return true;
+}
+
+static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, size_t bytes)
+{
+ if (likely(alloc_tag_ref_set(ref, tag)))
+ this_cpu_add(tag->counters->bytes, bytes);
+}
+
+static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes)
+{
+ struct alloc_tag *tag;
+
+ alloc_tag_sub_check(ref);
+ if (!ref || !ref->ct)
+ return;
+
+ if (is_codetag_empty(ref)) {
+ ref->ct = NULL;
+ return;
+ }
+
+ tag = ct_to_alloc_tag(ref->ct);
+
+ this_cpu_sub(tag->counters->bytes, bytes);
+ this_cpu_dec(tag->counters->calls);
+
+ ref->ct = NULL;
+}
+
+static inline void alloc_tag_set_inaccurate(struct alloc_tag *tag)
+{
+ tag->ct.flags |= CODETAG_FLAG_INACCURATE;
+}
+
+static inline bool alloc_tag_is_inaccurate(struct alloc_tag *tag)
+{
+ return !!(tag->ct.flags & CODETAG_FLAG_INACCURATE);
+}
+
+#define alloc_tag_record(p) ((p) = current->alloc_tag)
+
+#else /* CONFIG_MEM_ALLOC_PROFILING */
+
+#define DEFINE_ALLOC_TAG(_alloc_tag)
+static inline bool mem_alloc_profiling_enabled(void) { return false; }
+static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag,
+ size_t bytes) {}
+static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {}
+static inline void alloc_tag_set_inaccurate(struct alloc_tag *tag) {}
+static inline bool alloc_tag_is_inaccurate(struct alloc_tag *tag) { return false; }
+#define alloc_tag_record(p) do {} while (0)
+
+#endif /* CONFIG_MEM_ALLOC_PROFILING */
+
+#define alloc_hooks_tag(_tag, _do_alloc) \
+({ \
+ typeof(_do_alloc) _res; \
+ if (mem_alloc_profiling_enabled()) { \
+ struct alloc_tag * __maybe_unused _old; \
+ _old = alloc_tag_save(_tag); \
+ _res = _do_alloc; \
+ alloc_tag_restore(_tag, _old); \
+ } else \
+ _res = _do_alloc; \
+ _res; \
+})
+
+#define alloc_hooks(_do_alloc) \
+({ \
+ DEFINE_ALLOC_TAG(_alloc_tag); \
+ alloc_hooks_tag(&_alloc_tag, _do_alloc); \
+})
+
+#endif /* _LINUX_ALLOC_TAG_H */
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index c68d87b87283..9946276aff73 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -67,10 +67,15 @@ struct amba_device {
struct clk *pclk;
struct device_dma_parameters dma_parms;
unsigned int periphid;
+ struct mutex periphid_lock;
unsigned int cid;
struct amba_cs_uci_id uci;
unsigned int irq[AMBA_NR_IRQS];
- char *driver_override;
+ /*
+ * Driver name to force a match. Do not set directly, because core
+ * frees it. Use driver_set_override() to set or clear it.
+ */
+ const char *driver_override;
};
struct amba_driver {
@@ -79,6 +84,14 @@ struct amba_driver {
void (*remove)(struct amba_device *);
void (*shutdown)(struct amba_device *);
const struct amba_id *id_table;
+ /*
+ * For most device drivers, no need to care about this flag as long as
+ * all DMAs are handled through the kernel DMA API. For some special
+ * ones, for example VFIO drivers, they know how to manage the DMA
+ * themselves and set this flag so that the IOMMU layer will allow them
+ * to setup and manage their own I/O address space.
+ */
+ bool driver_managed_dma;
};
/*
@@ -90,81 +103,48 @@ enum amba_vendor {
AMBA_VENDOR_ST = 0x80,
AMBA_VENDOR_QCOM = 0x51,
AMBA_VENDOR_LSI = 0xb6,
- AMBA_VENDOR_LINUX = 0xfe, /* This value is not official */
};
-/* This is used to generate pseudo-ID for AMBA device */
-#define AMBA_LINUX_ID(conf, rev, part) \
- (((conf) & 0xff) << 24 | ((rev) & 0xf) << 20 | \
- AMBA_VENDOR_LINUX << 12 | ((part) & 0xfff))
+extern const struct bus_type amba_bustype;
-extern struct bus_type amba_bustype;
-
-#define to_amba_device(d) container_of(d, struct amba_device, dev)
+#define to_amba_device(d) container_of_const(d, struct amba_device, dev)
#define amba_get_drvdata(d) dev_get_drvdata(&d->dev)
#define amba_set_drvdata(d,p) dev_set_drvdata(&d->dev, p)
+/*
+ * use a macro to avoid include chaining to get THIS_MODULE
+ */
+#define amba_driver_register(drv) \
+ __amba_driver_register(drv, THIS_MODULE)
+
#ifdef CONFIG_ARM_AMBA
-int amba_driver_register(struct amba_driver *);
+int __amba_driver_register(struct amba_driver *, struct module *);
void amba_driver_unregister(struct amba_driver *);
+bool dev_is_amba(const struct device *dev);
#else
-static inline int amba_driver_register(struct amba_driver *drv)
+static inline int __amba_driver_register(struct amba_driver *drv,
+ struct module *owner)
{
return -EINVAL;
}
static inline void amba_driver_unregister(struct amba_driver *drv)
{
}
+static inline bool dev_is_amba(const struct device *dev)
+{
+ return false;
+}
#endif
struct amba_device *amba_device_alloc(const char *, resource_size_t, size_t);
void amba_device_put(struct amba_device *);
int amba_device_add(struct amba_device *, struct resource *);
int amba_device_register(struct amba_device *, struct resource *);
-struct amba_device *amba_apb_device_add(struct device *parent, const char *name,
- resource_size_t base, size_t size,
- int irq1, int irq2, void *pdata,
- unsigned int periphid);
-struct amba_device *amba_ahb_device_add(struct device *parent, const char *name,
- resource_size_t base, size_t size,
- int irq1, int irq2, void *pdata,
- unsigned int periphid);
-struct amba_device *
-amba_apb_device_add_res(struct device *parent, const char *name,
- resource_size_t base, size_t size, int irq1,
- int irq2, void *pdata, unsigned int periphid,
- struct resource *resbase);
-struct amba_device *
-amba_ahb_device_add_res(struct device *parent, const char *name,
- resource_size_t base, size_t size, int irq1,
- int irq2, void *pdata, unsigned int periphid,
- struct resource *resbase);
void amba_device_unregister(struct amba_device *);
-struct amba_device *amba_find_device(const char *, struct device *, unsigned int, unsigned int);
int amba_request_regions(struct amba_device *, const char *);
void amba_release_regions(struct amba_device *);
-static inline int amba_pclk_enable(struct amba_device *dev)
-{
- return clk_enable(dev->pclk);
-}
-
-static inline void amba_pclk_disable(struct amba_device *dev)
-{
- clk_disable(dev->pclk);
-}
-
-static inline int amba_pclk_prepare(struct amba_device *dev)
-{
- return clk_prepare(dev->pclk);
-}
-
-static inline void amba_pclk_unprepare(struct amba_device *dev)
-{
- clk_unprepare(dev->pclk);
-}
-
/* Some drivers don't use the struct amba_device */
#define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff)
#define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f)
diff --git a/include/linux/amba/clcd-regs.h b/include/linux/amba/clcd-regs.h
deleted file mode 100644
index 421b0fa90d6a..000000000000
--- a/include/linux/amba/clcd-regs.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * David A Rusling
- *
- * Copyright (C) 2001 ARM Limited
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- */
-
-#ifndef AMBA_CLCD_REGS_H
-#define AMBA_CLCD_REGS_H
-
-/*
- * CLCD Controller Internal Register addresses
- */
-#define CLCD_TIM0 0x00000000
-#define CLCD_TIM1 0x00000004
-#define CLCD_TIM2 0x00000008
-#define CLCD_TIM3 0x0000000c
-#define CLCD_UBAS 0x00000010
-#define CLCD_LBAS 0x00000014
-
-#define CLCD_PL110_IENB 0x00000018
-#define CLCD_PL110_CNTL 0x0000001c
-#define CLCD_PL110_STAT 0x00000020
-#define CLCD_PL110_INTR 0x00000024
-#define CLCD_PL110_UCUR 0x00000028
-#define CLCD_PL110_LCUR 0x0000002C
-
-#define CLCD_PL111_CNTL 0x00000018
-#define CLCD_PL111_IENB 0x0000001c
-#define CLCD_PL111_RIS 0x00000020
-#define CLCD_PL111_MIS 0x00000024
-#define CLCD_PL111_ICR 0x00000028
-#define CLCD_PL111_UCUR 0x0000002c
-#define CLCD_PL111_LCUR 0x00000030
-
-#define CLCD_PALL 0x00000200
-#define CLCD_PALETTE 0x00000200
-
-#define TIM2_PCD_LO_MASK GENMASK(4, 0)
-#define TIM2_PCD_LO_BITS 5
-#define TIM2_CLKSEL (1 << 5)
-#define TIM2_ACB_MASK GENMASK(10, 6)
-#define TIM2_IVS (1 << 11)
-#define TIM2_IHS (1 << 12)
-#define TIM2_IPC (1 << 13)
-#define TIM2_IOE (1 << 14)
-#define TIM2_BCD (1 << 26)
-#define TIM2_PCD_HI_MASK GENMASK(31, 27)
-#define TIM2_PCD_HI_BITS 5
-#define TIM2_PCD_HI_SHIFT 27
-
-#define CNTL_LCDEN (1 << 0)
-#define CNTL_LCDBPP1 (0 << 1)
-#define CNTL_LCDBPP2 (1 << 1)
-#define CNTL_LCDBPP4 (2 << 1)
-#define CNTL_LCDBPP8 (3 << 1)
-#define CNTL_LCDBPP16 (4 << 1)
-#define CNTL_LCDBPP16_565 (6 << 1)
-#define CNTL_LCDBPP16_444 (7 << 1)
-#define CNTL_LCDBPP24 (5 << 1)
-#define CNTL_LCDBW (1 << 4)
-#define CNTL_LCDTFT (1 << 5)
-#define CNTL_LCDMONO8 (1 << 6)
-#define CNTL_LCDDUAL (1 << 7)
-#define CNTL_BGR (1 << 8)
-#define CNTL_BEBO (1 << 9)
-#define CNTL_BEPO (1 << 10)
-#define CNTL_LCDPWR (1 << 11)
-#define CNTL_LCDVCOMP(x) ((x) << 12)
-#define CNTL_LDMAFIFOTIME (1 << 15)
-#define CNTL_WATERMARK (1 << 16)
-
-/* ST Microelectronics variant bits */
-#define CNTL_ST_1XBPP_444 0x0
-#define CNTL_ST_1XBPP_5551 (1 << 17)
-#define CNTL_ST_1XBPP_565 (1 << 18)
-#define CNTL_ST_CDWID_12 0x0
-#define CNTL_ST_CDWID_16 (1 << 19)
-#define CNTL_ST_CDWID_18 (1 << 20)
-#define CNTL_ST_CDWID_24 ((1 << 19)|(1 << 20))
-#define CNTL_ST_CEAEN (1 << 21)
-#define CNTL_ST_LCDBPP24_PACKED (6 << 1)
-
-#endif /* AMBA_CLCD_REGS_H */
diff --git a/include/linux/amba/clcd.h b/include/linux/amba/clcd.h
deleted file mode 100644
index b6e0cbeaf533..000000000000
--- a/include/linux/amba/clcd.h
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * linux/include/asm-arm/hardware/amba_clcd.h -- Integrator LCD panel.
- *
- * David A Rusling
- *
- * Copyright (C) 2001 ARM Limited
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- */
-#include <linux/fb.h>
-#include <linux/amba/clcd-regs.h>
-
-enum {
- /* individual formats */
- CLCD_CAP_RGB444 = (1 << 0),
- CLCD_CAP_RGB5551 = (1 << 1),
- CLCD_CAP_RGB565 = (1 << 2),
- CLCD_CAP_RGB888 = (1 << 3),
- CLCD_CAP_BGR444 = (1 << 4),
- CLCD_CAP_BGR5551 = (1 << 5),
- CLCD_CAP_BGR565 = (1 << 6),
- CLCD_CAP_BGR888 = (1 << 7),
-
- /* connection layouts */
- CLCD_CAP_444 = CLCD_CAP_RGB444 | CLCD_CAP_BGR444,
- CLCD_CAP_5551 = CLCD_CAP_RGB5551 | CLCD_CAP_BGR5551,
- CLCD_CAP_565 = CLCD_CAP_RGB565 | CLCD_CAP_BGR565,
- CLCD_CAP_888 = CLCD_CAP_RGB888 | CLCD_CAP_BGR888,
-
- /* red/blue ordering */
- CLCD_CAP_RGB = CLCD_CAP_RGB444 | CLCD_CAP_RGB5551 |
- CLCD_CAP_RGB565 | CLCD_CAP_RGB888,
- CLCD_CAP_BGR = CLCD_CAP_BGR444 | CLCD_CAP_BGR5551 |
- CLCD_CAP_BGR565 | CLCD_CAP_BGR888,
-
- CLCD_CAP_ALL = CLCD_CAP_BGR | CLCD_CAP_RGB,
-};
-
-struct backlight_device;
-
-struct clcd_panel {
- struct fb_videomode mode;
- signed short width; /* width in mm */
- signed short height; /* height in mm */
- u32 tim2;
- u32 tim3;
- u32 cntl;
- u32 caps;
- unsigned int bpp:8,
- fixedtimings:1,
- grayscale:1;
- unsigned int connector;
- struct backlight_device *backlight;
- /*
- * If the B/R lines are switched between the CLCD
- * and the panel we need to know this and not try to
- * compensate with the BGR bit in the control register.
- */
- bool bgr_connection;
-};
-
-struct clcd_regs {
- u32 tim0;
- u32 tim1;
- u32 tim2;
- u32 tim3;
- u32 cntl;
- unsigned long pixclock;
-};
-
-struct clcd_fb;
-
-/*
- * the board-type specific routines
- */
-struct clcd_board {
- const char *name;
-
- /*
- * Optional. Hardware capability flags.
- */
- u32 caps;
-
- /*
- * Optional. Check whether the var structure is acceptable
- * for this display.
- */
- int (*check)(struct clcd_fb *fb, struct fb_var_screeninfo *var);
-
- /*
- * Compulsory. Decode fb->fb.var into regs->*. In the case of
- * fixed timing, set regs->* to the register values required.
- */
- void (*decode)(struct clcd_fb *fb, struct clcd_regs *regs);
-
- /*
- * Optional. Disable any extra display hardware.
- */
- void (*disable)(struct clcd_fb *);
-
- /*
- * Optional. Enable any extra display hardware.
- */
- void (*enable)(struct clcd_fb *);
-
- /*
- * Setup platform specific parts of CLCD driver
- */
- int (*setup)(struct clcd_fb *);
-
- /*
- * mmap the framebuffer memory
- */
- int (*mmap)(struct clcd_fb *, struct vm_area_struct *);
-
- /*
- * Remove platform specific parts of CLCD driver
- */
- void (*remove)(struct clcd_fb *);
-};
-
-struct amba_device;
-struct clk;
-
-/* this data structure describes each frame buffer device we find */
-struct clcd_fb {
- struct fb_info fb;
- struct amba_device *dev;
- struct clk *clk;
- struct clcd_panel *panel;
- struct clcd_board *board;
- void *board_data;
- void __iomem *regs;
- u16 off_ienb;
- u16 off_cntl;
- u32 clcd_cntl;
- u32 cmap[16];
- bool clk_enabled;
-};
-
-static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs)
-{
- struct fb_var_screeninfo *var = &fb->fb.var;
- u32 val, cpl;
-
- /*
- * Program the CLCD controller registers and start the CLCD
- */
- val = ((var->xres / 16) - 1) << 2;
- val |= (var->hsync_len - 1) << 8;
- val |= (var->right_margin - 1) << 16;
- val |= (var->left_margin - 1) << 24;
- regs->tim0 = val;
-
- val = var->yres;
- if (fb->panel->cntl & CNTL_LCDDUAL)
- val /= 2;
- val -= 1;
- val |= (var->vsync_len - 1) << 10;
- val |= var->lower_margin << 16;
- val |= var->upper_margin << 24;
- regs->tim1 = val;
-
- val = fb->panel->tim2;
- val |= var->sync & FB_SYNC_HOR_HIGH_ACT ? 0 : TIM2_IHS;
- val |= var->sync & FB_SYNC_VERT_HIGH_ACT ? 0 : TIM2_IVS;
-
- cpl = var->xres_virtual;
- if (fb->panel->cntl & CNTL_LCDTFT) /* TFT */
- /* / 1 */;
- else if (!var->grayscale) /* STN color */
- cpl = cpl * 8 / 3;
- else if (fb->panel->cntl & CNTL_LCDMONO8) /* STN monochrome, 8bit */
- cpl /= 8;
- else /* STN monochrome, 4bit */
- cpl /= 4;
-
- regs->tim2 = val | ((cpl - 1) << 16);
-
- regs->tim3 = fb->panel->tim3;
-
- val = fb->panel->cntl;
- if (var->grayscale)
- val |= CNTL_LCDBW;
-
- if (fb->panel->caps && fb->board->caps && var->bits_per_pixel >= 16) {
- /*
- * if board and panel supply capabilities, we can support
- * changing BGR/RGB depending on supplied parameters. Here
- * we switch to what the framebuffer is providing if need
- * be, so if the framebuffer is BGR but the display connection
- * is RGB (first case) we switch it around. Vice versa mutatis
- * mutandis if the framebuffer is RGB but the display connection
- * is BGR, we flip it around.
- */
- if (var->red.offset == 0)
- val &= ~CNTL_BGR;
- else
- val |= CNTL_BGR;
- if (fb->panel->bgr_connection)
- val ^= CNTL_BGR;
- }
-
- switch (var->bits_per_pixel) {
- case 1:
- val |= CNTL_LCDBPP1;
- break;
- case 2:
- val |= CNTL_LCDBPP2;
- break;
- case 4:
- val |= CNTL_LCDBPP4;
- break;
- case 8:
- val |= CNTL_LCDBPP8;
- break;
- case 16:
- /*
- * PL110 cannot choose between 5551 and 565 modes in its
- * control register. It is possible to use 565 with
- * custom external wiring.
- */
- if (amba_part(fb->dev) == 0x110 ||
- var->green.length == 5)
- val |= CNTL_LCDBPP16;
- else if (var->green.length == 6)
- val |= CNTL_LCDBPP16_565;
- else
- val |= CNTL_LCDBPP16_444;
- break;
- case 32:
- val |= CNTL_LCDBPP24;
- break;
- }
-
- regs->cntl = val;
- regs->pixclock = var->pixclock;
-}
-
-static inline int clcdfb_check(struct clcd_fb *fb, struct fb_var_screeninfo *var)
-{
- var->xres_virtual = var->xres = (var->xres + 15) & ~15;
- var->yres_virtual = var->yres = (var->yres + 1) & ~1;
-
-#define CHECK(e,l,h) (var->e < l || var->e > h)
- if (CHECK(right_margin, (5+1), 256) || /* back porch */
- CHECK(left_margin, (5+1), 256) || /* front porch */
- CHECK(hsync_len, (5+1), 256) ||
- var->xres > 4096 ||
- var->lower_margin > 255 || /* back porch */
- var->upper_margin > 255 || /* front porch */
- var->vsync_len > 32 ||
- var->yres > 1024)
- return -EINVAL;
-#undef CHECK
-
- /* single panel mode: PCD = max(PCD, 1) */
- /* dual panel mode: PCD = max(PCD, 5) */
-
- /*
- * You can't change the grayscale setting, and
- * we can only do non-interlaced video.
- */
- if (var->grayscale != fb->fb.var.grayscale ||
- (var->vmode & FB_VMODE_MASK) != FB_VMODE_NONINTERLACED)
- return -EINVAL;
-
-#define CHECK(e) (var->e != fb->fb.var.e)
- if (fb->panel->fixedtimings &&
- (CHECK(xres) ||
- CHECK(yres) ||
- CHECK(bits_per_pixel) ||
- CHECK(pixclock) ||
- CHECK(left_margin) ||
- CHECK(right_margin) ||
- CHECK(upper_margin) ||
- CHECK(lower_margin) ||
- CHECK(hsync_len) ||
- CHECK(vsync_len) ||
- CHECK(sync)))
- return -EINVAL;
-#undef CHECK
-
- var->nonstd = 0;
- var->accel_flags = 0;
-
- return 0;
-}
diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h
index c92ebc39fc1f..6f96dc2209c0 100644
--- a/include/linux/amba/mmci.h
+++ b/include/linux/amba/mmci.h
@@ -13,17 +13,11 @@
* @ocr_mask: available voltages on the 4 pins from the block, this
* is ignored if a regulator is used, see the MMC_VDD_* masks in
* mmc/host.h
- * @ios_handler: a callback function to act on specfic ios changes,
- * used for example to control a levelshifter
- * mask into a value to be binary (or set some other custom bits
- * in MMCIPWR) or:ed and written into the MMCIPWR register of the
- * block. May also control external power based on the power_mode.
* @status: if no GPIO line was given to the block in this function will
* be called to determine whether a card is present in the MMC slot or not
*/
struct mmci_platform_data {
unsigned int ocr_mask;
- int (*ios_handler)(struct device *, struct mmc_ios *);
unsigned int (*status)(struct device *);
};
diff --git a/include/linux/amba/pl022.h b/include/linux/amba/pl022.h
index 9bf58aac0df2..d7b07d0311e1 100644
--- a/include/linux/amba/pl022.h
+++ b/include/linux/amba/pl022.h
@@ -16,6 +16,7 @@
#ifndef _SSP_PL022_H
#define _SSP_PL022_H
+#include <linux/dmaengine.h>
#include <linux/types.h>
/**
@@ -224,6 +225,7 @@ struct dma_chan;
* struct pl022_ssp_master - device.platform_data for SPI controller devices.
* @bus_id: identifier for this bus
* @enable_dma: if true enables DMA driven transfers.
+ * @dma_filter: callback filter for dma_request_channel.
* @dma_rx_param: parameter to locate an RX DMA channel.
* @dma_tx_param: parameter to locate a TX DMA channel.
* @autosuspend_delay: delay in ms following transfer completion before the
@@ -235,7 +237,7 @@ struct dma_chan;
struct pl022_ssp_controller {
u16 bus_id;
u8 enable_dma:1;
- bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
+ dma_filter_fn dma_filter;
void *dma_rx_param;
void *dma_tx_param;
int autosuspend_delay;
diff --git a/include/linux/amba/pl093.h b/include/linux/amba/pl093.h
deleted file mode 100644
index b17166e3b49a..000000000000
--- a/include/linux/amba/pl093.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* linux/amba/pl093.h
- *
- * Copyright (c) 2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <ben@simtec.co.uk>
- *
- * AMBA PL093 SSMC (synchronous static memory controller)
- * See DDI0236.pdf (r0p4) for more details
-*/
-
-#define SMB_BANK(x) ((x) * 0x20) /* each bank control set is 0x20 apart */
-
-/* Offsets for SMBxxxxRy registers */
-
-#define SMBIDCYR (0x00)
-#define SMBWSTRDR (0x04)
-#define SMBWSTWRR (0x08)
-#define SMBWSTOENR (0x0C)
-#define SMBWSTWENR (0x10)
-#define SMBCR (0x14)
-#define SMBSR (0x18)
-#define SMBWSTBRDR (0x1C)
-
-/* Masks for SMB registers */
-#define IDCY_MASK (0xf)
-#define WSTRD_MASK (0xf)
-#define WSTWR_MASK (0xf)
-#define WSTOEN_MASK (0xf)
-#define WSTWEN_MASK (0xf)
-
-/* Notes from datasheet:
- * WSTOEN <= WSTRD
- * WSTWEN <= WSTWR
- *
- * WSTOEN is not used with nWAIT
- */
-
-/* SMBCR bit definitions */
-#define SMBCR_BIWRITEEN (1 << 21)
-#define SMBCR_ADDRVALIDWRITEEN (1 << 20)
-#define SMBCR_SYNCWRITE (1 << 17)
-#define SMBCR_BMWRITE (1 << 16)
-#define SMBCR_WRAPREAD (1 << 14)
-#define SMBCR_BIREADEN (1 << 13)
-#define SMBCR_ADDRVALIDREADEN (1 << 12)
-#define SMBCR_SYNCREAD (1 << 9)
-#define SMBCR_BMREAD (1 << 8)
-#define SMBCR_SMBLSPOL (1 << 6)
-#define SMBCR_WP (1 << 3)
-#define SMBCR_WAITEN (1 << 2)
-#define SMBCR_WAITPOL (1 << 1)
-#define SMBCR_RBLE (1 << 0)
-
-#define SMBCR_BURSTLENWRITE_MASK (3 << 18)
-#define SMBCR_BURSTLENWRITE_4 (0 << 18)
-#define SMBCR_BURSTLENWRITE_8 (1 << 18)
-#define SMBCR_BURSTLENWRITE_RESERVED (2 << 18)
-#define SMBCR_BURSTLENWRITE_CONTINUOUS (3 << 18)
-
-#define SMBCR_BURSTLENREAD_MASK (3 << 10)
-#define SMBCR_BURSTLENREAD_4 (0 << 10)
-#define SMBCR_BURSTLENREAD_8 (1 << 10)
-#define SMBCR_BURSTLENREAD_16 (2 << 10)
-#define SMBCR_BURSTLENREAD_CONTINUOUS (3 << 10)
-
-#define SMBCR_MW_MASK (3 << 4)
-#define SMBCR_MW_8BIT (0 << 4)
-#define SMBCR_MW_16BIT (1 << 4)
-#define SMBCR_MW_M32BIT (2 << 4)
-
-/* SSMC status registers */
-#define SSMCCSR (0x200)
-#define SSMCCR (0x204)
-#define SSMCITCR (0x208)
-#define SSMCITIP (0x20C)
-#define SSMCITIOP (0x210)
diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h
index a1307b58cc2c..9120de05ead0 100644
--- a/include/linux/amba/serial.h
+++ b/include/linux/amba/serial.h
@@ -10,6 +10,11 @@
#ifndef ASM_ARM_HARDWARE_SERIAL_AMBA_H
#define ASM_ARM_HARDWARE_SERIAL_AMBA_H
+#ifndef __ASSEMBLY__
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#endif
+
#include <linux/types.h>
/* -------------------------------------------------------------------------------
@@ -70,141 +75,145 @@
#define ZX_UART011_ICR 0x4c
#define ZX_UART011_DMACR 0x50
-#define UART011_DR_OE (1 << 11)
-#define UART011_DR_BE (1 << 10)
-#define UART011_DR_PE (1 << 9)
-#define UART011_DR_FE (1 << 8)
-
-#define UART01x_RSR_OE 0x08
-#define UART01x_RSR_BE 0x04
-#define UART01x_RSR_PE 0x02
-#define UART01x_RSR_FE 0x01
-
-#define UART011_FR_RI 0x100
-#define UART011_FR_TXFE 0x080
-#define UART011_FR_RXFF 0x040
-#define UART01x_FR_TXFF 0x020
-#define UART01x_FR_RXFE 0x010
-#define UART01x_FR_BUSY 0x008
-#define UART01x_FR_DCD 0x004
-#define UART01x_FR_DSR 0x002
-#define UART01x_FR_CTS 0x001
+#define UART011_DR_OE BIT(11)
+#define UART011_DR_BE BIT(10)
+#define UART011_DR_PE BIT(9)
+#define UART011_DR_FE BIT(8)
+
+#define UART01x_RSR_OE BIT(3)
+#define UART01x_RSR_BE BIT(2)
+#define UART01x_RSR_PE BIT(1)
+#define UART01x_RSR_FE BIT(0)
+
+#define UART011_FR_RI BIT(8)
+#define UART011_FR_TXFE BIT(7)
+#define UART011_FR_RXFF BIT(6)
+#define UART01x_FR_TXFF (1 << 5) /* used in ASM */
+#define UART01x_FR_RXFE BIT(4)
+#define UART01x_FR_BUSY (1 << 3) /* used in ASM */
+#define UART01x_FR_DCD BIT(2)
+#define UART01x_FR_DSR BIT(1)
+#define UART01x_FR_CTS BIT(0)
#define UART01x_FR_TMSK (UART01x_FR_TXFF + UART01x_FR_BUSY)
/*
* Some bits of Flag Register on ZTE device have different position from
* standard ones.
*/
-#define ZX_UART01x_FR_BUSY 0x100
-#define ZX_UART01x_FR_DSR 0x008
-#define ZX_UART01x_FR_CTS 0x002
-#define ZX_UART011_FR_RI 0x001
-
-#define UART011_CR_CTSEN 0x8000 /* CTS hardware flow control */
-#define UART011_CR_RTSEN 0x4000 /* RTS hardware flow control */
-#define UART011_CR_OUT2 0x2000 /* OUT2 */
-#define UART011_CR_OUT1 0x1000 /* OUT1 */
-#define UART011_CR_RTS 0x0800 /* RTS */
-#define UART011_CR_DTR 0x0400 /* DTR */
-#define UART011_CR_RXE 0x0200 /* receive enable */
-#define UART011_CR_TXE 0x0100 /* transmit enable */
-#define UART011_CR_LBE 0x0080 /* loopback enable */
-#define UART010_CR_RTIE 0x0040
-#define UART010_CR_TIE 0x0020
-#define UART010_CR_RIE 0x0010
-#define UART010_CR_MSIE 0x0008
-#define ST_UART011_CR_OVSFACT 0x0008 /* Oversampling factor */
-#define UART01x_CR_IIRLP 0x0004 /* SIR low power mode */
-#define UART01x_CR_SIREN 0x0002 /* SIR enable */
-#define UART01x_CR_UARTEN 0x0001 /* UART enable */
-
-#define UART011_LCRH_SPS 0x80
+#define ZX_UART01x_FR_BUSY BIT(8)
+#define ZX_UART01x_FR_DSR BIT(3)
+#define ZX_UART01x_FR_CTS BIT(1)
+#define ZX_UART011_FR_RI BIT(0)
+
+#define UART011_CR_CTSEN BIT(15) /* CTS hardware flow control */
+#define UART011_CR_RTSEN BIT(14) /* RTS hardware flow control */
+#define UART011_CR_OUT2 BIT(13) /* OUT2 */
+#define UART011_CR_OUT1 BIT(12) /* OUT1 */
+#define UART011_CR_RTS BIT(11) /* RTS */
+#define UART011_CR_DTR BIT(10) /* DTR */
+#define UART011_CR_RXE BIT(9) /* receive enable */
+#define UART011_CR_TXE BIT(8) /* transmit enable */
+#define UART011_CR_LBE BIT(7) /* loopback enable */
+#define UART010_CR_RTIE BIT(6)
+#define UART010_CR_TIE BIT(5)
+#define UART010_CR_RIE BIT(4)
+#define UART010_CR_MSIE BIT(3)
+#define ST_UART011_CR_OVSFACT BIT(3) /* Oversampling factor */
+#define UART01x_CR_IIRLP BIT(2) /* SIR low power mode */
+#define UART01x_CR_SIREN BIT(1) /* SIR enable */
+#define UART01x_CR_UARTEN BIT(0) /* UART enable */
+
+#define UART011_LCRH_SPS BIT(7)
#define UART01x_LCRH_WLEN_8 0x60
#define UART01x_LCRH_WLEN_7 0x40
#define UART01x_LCRH_WLEN_6 0x20
#define UART01x_LCRH_WLEN_5 0x00
-#define UART01x_LCRH_FEN 0x10
-#define UART01x_LCRH_STP2 0x08
-#define UART01x_LCRH_EPS 0x04
-#define UART01x_LCRH_PEN 0x02
-#define UART01x_LCRH_BRK 0x01
-
-#define ST_UART011_DMAWM_RX_1 (0 << 3)
-#define ST_UART011_DMAWM_RX_2 (1 << 3)
-#define ST_UART011_DMAWM_RX_4 (2 << 3)
-#define ST_UART011_DMAWM_RX_8 (3 << 3)
-#define ST_UART011_DMAWM_RX_16 (4 << 3)
-#define ST_UART011_DMAWM_RX_32 (5 << 3)
-#define ST_UART011_DMAWM_RX_48 (6 << 3)
-#define ST_UART011_DMAWM_TX_1 0
-#define ST_UART011_DMAWM_TX_2 1
-#define ST_UART011_DMAWM_TX_4 2
-#define ST_UART011_DMAWM_TX_8 3
-#define ST_UART011_DMAWM_TX_16 4
-#define ST_UART011_DMAWM_TX_32 5
-#define ST_UART011_DMAWM_TX_48 6
-
-#define UART010_IIR_RTIS 0x08
-#define UART010_IIR_TIS 0x04
-#define UART010_IIR_RIS 0x02
-#define UART010_IIR_MIS 0x01
-
-#define UART011_IFLS_RX1_8 (0 << 3)
-#define UART011_IFLS_RX2_8 (1 << 3)
-#define UART011_IFLS_RX4_8 (2 << 3)
-#define UART011_IFLS_RX6_8 (3 << 3)
-#define UART011_IFLS_RX7_8 (4 << 3)
-#define UART011_IFLS_TX1_8 (0 << 0)
-#define UART011_IFLS_TX2_8 (1 << 0)
-#define UART011_IFLS_TX4_8 (2 << 0)
-#define UART011_IFLS_TX6_8 (3 << 0)
-#define UART011_IFLS_TX7_8 (4 << 0)
+#define UART01x_LCRH_FEN BIT(4)
+#define UART01x_LCRH_STP2 BIT(3)
+#define UART01x_LCRH_EPS BIT(2)
+#define UART01x_LCRH_PEN BIT(1)
+#define UART01x_LCRH_BRK BIT(0)
+
+#define ST_UART011_DMAWM_RX GENMASK(5, 3)
+#define ST_UART011_DMAWM_RX_1 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 0)
+#define ST_UART011_DMAWM_RX_2 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 1)
+#define ST_UART011_DMAWM_RX_4 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 2)
+#define ST_UART011_DMAWM_RX_8 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 3)
+#define ST_UART011_DMAWM_RX_16 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 4)
+#define ST_UART011_DMAWM_RX_32 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 5)
+#define ST_UART011_DMAWM_RX_48 FIELD_PREP_CONST(ST_UART011_DMAWM_RX, 6)
+#define ST_UART011_DMAWM_TX GENMASK(2, 0)
+#define ST_UART011_DMAWM_TX_1 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 0)
+#define ST_UART011_DMAWM_TX_2 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 1)
+#define ST_UART011_DMAWM_TX_4 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 2)
+#define ST_UART011_DMAWM_TX_8 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 3)
+#define ST_UART011_DMAWM_TX_16 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 4)
+#define ST_UART011_DMAWM_TX_32 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 5)
+#define ST_UART011_DMAWM_TX_48 FIELD_PREP_CONST(ST_UART011_DMAWM_TX, 6)
+
+#define UART010_IIR_RTIS BIT(3)
+#define UART010_IIR_TIS BIT(2)
+#define UART010_IIR_RIS BIT(1)
+#define UART010_IIR_MIS BIT(0)
+
+#define UART011_IFLS_RXIFLSEL GENMASK(5, 3)
+#define UART011_IFLS_RX1_8 FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 0)
+#define UART011_IFLS_RX2_8 FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 1)
+#define UART011_IFLS_RX4_8 FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 2)
+#define UART011_IFLS_RX6_8 FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 3)
+#define UART011_IFLS_RX7_8 FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 4)
+#define UART011_IFLS_TXIFLSEL GENMASK(2, 0)
+#define UART011_IFLS_TX1_8 FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 0)
+#define UART011_IFLS_TX2_8 FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 1)
+#define UART011_IFLS_TX4_8 FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 2)
+#define UART011_IFLS_TX6_8 FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 3)
+#define UART011_IFLS_TX7_8 FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 4)
/* special values for ST vendor with deeper fifo */
-#define UART011_IFLS_RX_HALF (5 << 3)
-#define UART011_IFLS_TX_HALF (5 << 0)
-
-#define UART011_OEIM (1 << 10) /* overrun error interrupt mask */
-#define UART011_BEIM (1 << 9) /* break error interrupt mask */
-#define UART011_PEIM (1 << 8) /* parity error interrupt mask */
-#define UART011_FEIM (1 << 7) /* framing error interrupt mask */
-#define UART011_RTIM (1 << 6) /* receive timeout interrupt mask */
-#define UART011_TXIM (1 << 5) /* transmit interrupt mask */
-#define UART011_RXIM (1 << 4) /* receive interrupt mask */
-#define UART011_DSRMIM (1 << 3) /* DSR interrupt mask */
-#define UART011_DCDMIM (1 << 2) /* DCD interrupt mask */
-#define UART011_CTSMIM (1 << 1) /* CTS interrupt mask */
-#define UART011_RIMIM (1 << 0) /* RI interrupt mask */
-
-#define UART011_OEIS (1 << 10) /* overrun error interrupt status */
-#define UART011_BEIS (1 << 9) /* break error interrupt status */
-#define UART011_PEIS (1 << 8) /* parity error interrupt status */
-#define UART011_FEIS (1 << 7) /* framing error interrupt status */
-#define UART011_RTIS (1 << 6) /* receive timeout interrupt status */
-#define UART011_TXIS (1 << 5) /* transmit interrupt status */
-#define UART011_RXIS (1 << 4) /* receive interrupt status */
-#define UART011_DSRMIS (1 << 3) /* DSR interrupt status */
-#define UART011_DCDMIS (1 << 2) /* DCD interrupt status */
-#define UART011_CTSMIS (1 << 1) /* CTS interrupt status */
-#define UART011_RIMIS (1 << 0) /* RI interrupt status */
-
-#define UART011_OEIC (1 << 10) /* overrun error interrupt clear */
-#define UART011_BEIC (1 << 9) /* break error interrupt clear */
-#define UART011_PEIC (1 << 8) /* parity error interrupt clear */
-#define UART011_FEIC (1 << 7) /* framing error interrupt clear */
-#define UART011_RTIC (1 << 6) /* receive timeout interrupt clear */
-#define UART011_TXIC (1 << 5) /* transmit interrupt clear */
-#define UART011_RXIC (1 << 4) /* receive interrupt clear */
-#define UART011_DSRMIC (1 << 3) /* DSR interrupt clear */
-#define UART011_DCDMIC (1 << 2) /* DCD interrupt clear */
-#define UART011_CTSMIC (1 << 1) /* CTS interrupt clear */
-#define UART011_RIMIC (1 << 0) /* RI interrupt clear */
-
-#define UART011_DMAONERR (1 << 2) /* disable dma on error */
-#define UART011_TXDMAE (1 << 1) /* enable transmit dma */
-#define UART011_RXDMAE (1 << 0) /* enable receive dma */
-
-#define UART01x_RSR_ANY (UART01x_RSR_OE|UART01x_RSR_BE|UART01x_RSR_PE|UART01x_RSR_FE)
-#define UART01x_FR_MODEM_ANY (UART01x_FR_DCD|UART01x_FR_DSR|UART01x_FR_CTS)
+#define UART011_IFLS_RX_HALF FIELD_PREP_CONST(UART011_IFLS_RXIFLSEL, 5)
+#define UART011_IFLS_TX_HALF FIELD_PREP_CONST(UART011_IFLS_TXIFLSEL, 5)
+
+#define UART011_OEIM BIT(10) /* overrun error interrupt mask */
+#define UART011_BEIM BIT(9) /* break error interrupt mask */
+#define UART011_PEIM BIT(8) /* parity error interrupt mask */
+#define UART011_FEIM BIT(7) /* framing error interrupt mask */
+#define UART011_RTIM BIT(6) /* receive timeout interrupt mask */
+#define UART011_TXIM BIT(5) /* transmit interrupt mask */
+#define UART011_RXIM BIT(4) /* receive interrupt mask */
+#define UART011_DSRMIM BIT(3) /* DSR interrupt mask */
+#define UART011_DCDMIM BIT(2) /* DCD interrupt mask */
+#define UART011_CTSMIM BIT(1) /* CTS interrupt mask */
+#define UART011_RIMIM BIT(0) /* RI interrupt mask */
+
+#define UART011_OEIS BIT(10) /* overrun error interrupt status */
+#define UART011_BEIS BIT(9) /* break error interrupt status */
+#define UART011_PEIS BIT(8) /* parity error interrupt status */
+#define UART011_FEIS BIT(7) /* framing error interrupt status */
+#define UART011_RTIS BIT(6) /* receive timeout interrupt status */
+#define UART011_TXIS BIT(5) /* transmit interrupt status */
+#define UART011_RXIS BIT(4) /* receive interrupt status */
+#define UART011_DSRMIS BIT(3) /* DSR interrupt status */
+#define UART011_DCDMIS BIT(2) /* DCD interrupt status */
+#define UART011_CTSMIS BIT(1) /* CTS interrupt status */
+#define UART011_RIMIS BIT(0) /* RI interrupt status */
+
+#define UART011_OEIC BIT(10) /* overrun error interrupt clear */
+#define UART011_BEIC BIT(9) /* break error interrupt clear */
+#define UART011_PEIC BIT(8) /* parity error interrupt clear */
+#define UART011_FEIC BIT(7) /* framing error interrupt clear */
+#define UART011_RTIC BIT(6) /* receive timeout interrupt clear */
+#define UART011_TXIC BIT(5) /* transmit interrupt clear */
+#define UART011_RXIC BIT(4) /* receive interrupt clear */
+#define UART011_DSRMIC BIT(3) /* DSR interrupt clear */
+#define UART011_DCDMIC BIT(2) /* DCD interrupt clear */
+#define UART011_CTSMIC BIT(1) /* CTS interrupt clear */
+#define UART011_RIMIC BIT(0) /* RI interrupt clear */
+
+#define UART011_DMAONERR BIT(2) /* disable dma on error */
+#define UART011_TXDMAE BIT(1) /* enable transmit dma */
+#define UART011_RXDMAE BIT(0) /* enable receive dma */
+
+#define UART01x_RSR_ANY (UART01x_RSR_OE | UART01x_RSR_BE | UART01x_RSR_PE | UART01x_RSR_FE)
+#define UART01x_FR_MODEM_ANY (UART01x_FR_DCD | UART01x_FR_DSR | UART01x_FR_CTS)
#ifndef __ASSEMBLY__
struct amba_device; /* in uncompress this is included but amba/bus.h is not */
@@ -220,8 +229,8 @@ struct amba_pl011_data {
bool dma_rx_poll_enable;
unsigned int dma_rx_poll_rate;
unsigned int dma_rx_poll_timeout;
- void (*init) (void);
- void (*exit) (void);
+ void (*init)(void);
+ void (*exit)(void);
};
#endif
diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h
index 58e6c3806c09..edcee9f5335a 100644
--- a/include/linux/amd-iommu.h
+++ b/include/linux/amd-iommu.h
@@ -12,151 +12,16 @@
struct amd_iommu;
-/*
- * This is mainly used to communicate information back-and-forth
- * between SVM and IOMMU for setting up and tearing down posted
- * interrupt
- */
-struct amd_iommu_pi_data {
- u32 ga_tag;
- u32 prev_ga_tag;
- u64 base;
- bool is_guest_mode;
- struct vcpu_data *vcpu_data;
- void *ir_data;
-};
-
#ifdef CONFIG_AMD_IOMMU
struct task_struct;
struct pci_dev;
-extern int amd_iommu_detect(void);
-extern int amd_iommu_init_hardware(void);
-
-/**
- * amd_iommu_init_device() - Init device for use with IOMMUv2 driver
- * @pdev: The PCI device to initialize
- * @pasids: Number of PASIDs to support for this device
- *
- * This function does all setup for the device pdev so that it can be
- * used with IOMMUv2.
- * Returns 0 on success or negative value on error.
- */
-extern int amd_iommu_init_device(struct pci_dev *pdev, int pasids);
-
-/**
- * amd_iommu_free_device() - Free all IOMMUv2 related device resources
- * and disable IOMMUv2 usage for this device
- * @pdev: The PCI device to disable IOMMUv2 usage for'
- */
-extern void amd_iommu_free_device(struct pci_dev *pdev);
-
-/**
- * amd_iommu_bind_pasid() - Bind a given task to a PASID on a device
- * @pdev: The PCI device to bind the task to
- * @pasid: The PASID on the device the task should be bound to
- * @task: the task to bind
- *
- * The function returns 0 on success or a negative value on error.
- */
-extern int amd_iommu_bind_pasid(struct pci_dev *pdev, u32 pasid,
- struct task_struct *task);
-
-/**
- * amd_iommu_unbind_pasid() - Unbind a PASID from its task on
- * a device
- * @pdev: The device of the PASID
- * @pasid: The PASID to unbind
- *
- * When this function returns the device is no longer using the PASID
- * and the PASID is no longer bound to its task.
- */
-extern void amd_iommu_unbind_pasid(struct pci_dev *pdev, u32 pasid);
-
-/**
- * amd_iommu_set_invalid_ppr_cb() - Register a call-back for failed
- * PRI requests
- * @pdev: The PCI device the call-back should be registered for
- * @cb: The call-back function
- *
- * The IOMMUv2 driver invokes this call-back when it is unable to
- * successfully handle a PRI request. The device driver can then decide
- * which PRI response the device should see. Possible return values for
- * the call-back are:
- *
- * - AMD_IOMMU_INV_PRI_RSP_SUCCESS - Send SUCCESS back to the device
- * - AMD_IOMMU_INV_PRI_RSP_INVALID - Send INVALID back to the device
- * - AMD_IOMMU_INV_PRI_RSP_FAIL - Send Failure back to the device,
- * the device is required to disable
- * PRI when it receives this response
- *
- * The function returns 0 on success or negative value on error.
- */
-#define AMD_IOMMU_INV_PRI_RSP_SUCCESS 0
-#define AMD_IOMMU_INV_PRI_RSP_INVALID 1
-#define AMD_IOMMU_INV_PRI_RSP_FAIL 2
-
-typedef int (*amd_iommu_invalid_ppr_cb)(struct pci_dev *pdev,
- u32 pasid,
- unsigned long address,
- u16);
-
-extern int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
- amd_iommu_invalid_ppr_cb cb);
-
-#define PPR_FAULT_EXEC (1 << 1)
-#define PPR_FAULT_READ (1 << 2)
-#define PPR_FAULT_WRITE (1 << 5)
-#define PPR_FAULT_USER (1 << 6)
-#define PPR_FAULT_RSVD (1 << 7)
-#define PPR_FAULT_GN (1 << 8)
-
-/**
- * amd_iommu_device_info() - Get information about IOMMUv2 support of a
- * PCI device
- * @pdev: PCI device to query information from
- * @info: A pointer to an amd_iommu_device_info structure which will contain
- * the information about the PCI device
- *
- * Returns 0 on success, negative value on error
- */
-
-#define AMD_IOMMU_DEVICE_FLAG_ATS_SUP 0x1 /* ATS feature supported */
-#define AMD_IOMMU_DEVICE_FLAG_PRI_SUP 0x2 /* PRI feature supported */
-#define AMD_IOMMU_DEVICE_FLAG_PASID_SUP 0x4 /* PASID context supported */
-#define AMD_IOMMU_DEVICE_FLAG_EXEC_SUP 0x8 /* Device may request execution
- on memory pages */
-#define AMD_IOMMU_DEVICE_FLAG_PRIV_SUP 0x10 /* Device may request
- super-user privileges */
-
-struct amd_iommu_device_info {
- int max_pasids;
- u32 flags;
-};
-
-extern int amd_iommu_device_info(struct pci_dev *pdev,
- struct amd_iommu_device_info *info);
-
-/**
- * amd_iommu_set_invalidate_ctx_cb() - Register a call-back for invalidating
- * a pasid context. This call-back is
- * invoked when the IOMMUv2 driver needs to
- * invalidate a PASID context, for example
- * because the task that is bound to that
- * context is about to exit.
- *
- * @pdev: The PCI device the call-back should be registered for
- * @cb: The call-back function
- */
-
-typedef void (*amd_iommu_invalidate_ctx)(struct pci_dev *pdev, u32 pasid);
+extern void amd_iommu_detect(void);
-extern int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
- amd_iommu_invalidate_ctx cb);
#else /* CONFIG_AMD_IOMMU */
-static inline int amd_iommu_detect(void) { return -ENODEV; }
+static inline void amd_iommu_detect(void) { }
#endif /* CONFIG_AMD_IOMMU */
@@ -165,10 +30,8 @@ static inline int amd_iommu_detect(void) { return -ENODEV; }
/* IOMMU AVIC Function */
extern int amd_iommu_register_ga_log_notifier(int (*notifier)(u32));
-extern int
-amd_iommu_update_ga(int cpu, bool is_run, void *data);
-
-extern int amd_iommu_activate_guest_mode(void *data);
+extern int amd_iommu_update_ga(void *data, int cpu, bool ga_log_intr);
+extern int amd_iommu_activate_guest_mode(void *data, int cpu, bool ga_log_intr);
extern int amd_iommu_deactivate_guest_mode(void *data);
#else /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */
@@ -179,13 +42,12 @@ amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
return 0;
}
-static inline int
-amd_iommu_update_ga(int cpu, bool is_run, void *data)
+static inline int amd_iommu_update_ga(void *data, int cpu, bool ga_log_intr)
{
return 0;
}
-static inline int amd_iommu_activate_guest_mode(void *data)
+static inline int amd_iommu_activate_guest_mode(void *data, int cpu, bool ga_log_intr)
{
return 0;
}
@@ -206,4 +68,12 @@ int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn,
u64 *value);
struct amd_iommu *get_amd_iommu(unsigned int idx);
+#ifdef CONFIG_KVM_AMD_SEV
+int amd_iommu_snp_disable(void);
+extern bool amd_iommu_sev_tio_supported(void);
+#else
+static inline int amd_iommu_snp_disable(void) { return 0; }
+static inline bool amd_iommu_sev_tio_supported(void) { return false; }
+#endif
+
#endif /* _ASM_X86_AMD_IOMMU_H */
diff --git a/include/linux/amd-pmf-io.h b/include/linux/amd-pmf-io.h
new file mode 100644
index 000000000000..6fa510f419c0
--- /dev/null
+++ b/include/linux/amd-pmf-io.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AMD Platform Management Framework Interface
+ *
+ * Copyright (c) 2023, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Authors: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ * Basavaraj Natikar <Basavaraj.Natikar@amd.com>
+ */
+
+#ifndef AMD_PMF_IO_H
+#define AMD_PMF_IO_H
+
+#include <linux/types.h>
+
+/**
+ * enum sfh_message_type - Query the SFH message type
+ * @MT_HPD: Message ID to know the Human presence info from MP2 FW
+ * @MT_ALS: Message ID to know the Ambient light info from MP2 FW
+ * @MT_SRA: Message ID to know the SRA data from MP2 FW
+ */
+enum sfh_message_type {
+ MT_HPD,
+ MT_ALS,
+ MT_SRA,
+};
+
+/**
+ * enum sfh_hpd_info - Query the Human presence information
+ * @SFH_NOT_DETECTED: Check the HPD connection information from MP2 FW
+ * @SFH_USER_PRESENT: Check if the user is present from HPD sensor
+ * @SFH_USER_AWAY: Check if the user is away from HPD sensor
+ */
+enum sfh_hpd_info {
+ SFH_NOT_DETECTED,
+ SFH_USER_PRESENT,
+ SFH_USER_AWAY,
+};
+
+/**
+ * struct amd_sfh_info - get HPD sensor info from MP2 FW
+ * @ambient_light: Populates the ambient light information
+ * @user_present: Populates the user presence information
+ * @platform_type: Operating modes (clamshell, flat, tent, etc.)
+ * @laptop_placement: Device states (ontable, onlap, outbag)
+ */
+struct amd_sfh_info {
+ u32 ambient_light;
+ u8 user_present;
+ u32 platform_type;
+ u32 laptop_placement;
+};
+
+enum laptop_placement {
+ LP_UNKNOWN = 0,
+ ON_TABLE,
+ ON_LAP_MOTION,
+ IN_BAG,
+ OUT_OF_BAG,
+ LP_UNDEFINED,
+};
+
+int amd_get_sfh_info(struct amd_sfh_info *sfh_info, enum sfh_message_type op);
+#endif
diff --git a/include/linux/annotate.h b/include/linux/annotate.h
new file mode 100644
index 000000000000..2f1599c9e573
--- /dev/null
+++ b/include/linux/annotate.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ANNOTATE_H
+#define _LINUX_ANNOTATE_H
+
+#include <linux/objtool_types.h>
+
+#ifdef CONFIG_OBJTOOL
+
+#define __ASM_ANNOTATE(section, label, type) \
+ .pushsection section, "M", @progbits, 8; \
+ .long label - ., type; \
+ .popsection
+
+#ifndef __ASSEMBLY__
+
+#define ASM_ANNOTATE_LABEL(label, type) \
+ __stringify(__ASM_ANNOTATE(.discard.annotate_insn, label, type))
+
+#define ASM_ANNOTATE(type) \
+ "911: " \
+ __stringify(__ASM_ANNOTATE(.discard.annotate_insn, 911b, type))
+
+#define ASM_ANNOTATE_DATA(type) \
+ "912: " \
+ __stringify(__ASM_ANNOTATE(.discard.annotate_data, 912b, type))
+
+#else /* __ASSEMBLY__ */
+
+.macro ANNOTATE type
+.Lhere_\@:
+ __ASM_ANNOTATE(.discard.annotate_insn, .Lhere_\@, \type)
+.endm
+
+.macro ANNOTATE_DATA type
+.Lhere_\@:
+ __ASM_ANNOTATE(.discard.annotate_data, .Lhere_\@, \type)
+.endm
+
+#endif /* __ASSEMBLY__ */
+
+#else /* !CONFIG_OBJTOOL */
+#ifndef __ASSEMBLY__
+#define ASM_ANNOTATE_LABEL(label, type) ""
+#define ASM_ANNOTATE(type)
+#define ASM_ANNOTATE_DATA(type)
+#else /* __ASSEMBLY__ */
+.macro ANNOTATE type
+.endm
+.macro ANNOTATE_DATA type
+.endm
+#endif /* __ASSEMBLY__ */
+#endif /* !CONFIG_OBJTOOL */
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Annotate away the various 'relocation to !ENDBR` complaints; knowing that
+ * these relocations will never be used for indirect calls.
+ */
+#define ANNOTATE_NOENDBR ASM_ANNOTATE(ANNOTYPE_NOENDBR)
+#define ANNOTATE_NOENDBR_SYM(sym) asm(ASM_ANNOTATE_LABEL(sym, ANNOTYPE_NOENDBR))
+
+/*
+ * This should be used immediately before an indirect jump/call. It tells
+ * objtool the subsequent indirect jump/call is vouched safe for retpoline
+ * builds.
+ */
+#define ANNOTATE_RETPOLINE_SAFE ASM_ANNOTATE(ANNOTYPE_RETPOLINE_SAFE)
+/*
+ * See linux/instrumentation.h
+ */
+#define ANNOTATE_INSTR_BEGIN(label) ASM_ANNOTATE_LABEL(label, ANNOTYPE_INSTR_BEGIN)
+#define ANNOTATE_INSTR_END(label) ASM_ANNOTATE_LABEL(label, ANNOTYPE_INSTR_END)
+/*
+ * objtool annotation to ignore the alternatives and only consider the original
+ * instruction(s).
+ */
+#define ANNOTATE_IGNORE_ALTERNATIVE ASM_ANNOTATE(ANNOTYPE_IGNORE_ALTS)
+/*
+ * This macro indicates that the following intra-function call is valid.
+ * Any non-annotated intra-function call will cause objtool to issue a warning.
+ */
+#define ANNOTATE_INTRA_FUNCTION_CALL ASM_ANNOTATE(ANNOTYPE_INTRA_FUNCTION_CALL)
+/*
+ * Use objtool to validate the entry requirement that all code paths do
+ * VALIDATE_UNRET_END before RET.
+ *
+ * NOTE: The macro must be used at the beginning of a global symbol, otherwise
+ * it will be ignored.
+ */
+#define ANNOTATE_UNRET_BEGIN ASM_ANNOTATE(ANNOTYPE_UNRET_BEGIN)
+/*
+ * This should be used to refer to an instruction that is considered
+ * terminating, like a noreturn CALL or UD2 when we know they are not -- eg
+ * WARN using UD2.
+ */
+#define ANNOTATE_REACHABLE(label) ASM_ANNOTATE_LABEL(label, ANNOTYPE_REACHABLE)
+/*
+ * This should not be used; it annotates away CFI violations. There are a few
+ * valid use cases like kexec handover to the next kernel image, and there is
+ * no security concern there.
+ *
+ * There are also a few real issues annotated away, like EFI because we can't
+ * control the EFI code.
+ */
+#define ANNOTATE_NOCFI_SYM(sym) asm(ASM_ANNOTATE_LABEL(sym, ANNOTYPE_NOCFI))
+
+/*
+ * Annotate a special section entry. This emables livepatch module generation
+ * to find and extract individual special section entries as needed.
+ */
+#define ANNOTATE_DATA_SPECIAL ASM_ANNOTATE_DATA(ANNOTYPE_DATA_SPECIAL)
+
+#else /* __ASSEMBLY__ */
+#define ANNOTATE_NOENDBR ANNOTATE type=ANNOTYPE_NOENDBR
+#define ANNOTATE_RETPOLINE_SAFE ANNOTATE type=ANNOTYPE_RETPOLINE_SAFE
+/* ANNOTATE_INSTR_BEGIN ANNOTATE type=ANNOTYPE_INSTR_BEGIN */
+/* ANNOTATE_INSTR_END ANNOTATE type=ANNOTYPE_INSTR_END */
+#define ANNOTATE_IGNORE_ALTERNATIVE ANNOTATE type=ANNOTYPE_IGNORE_ALTS
+#define ANNOTATE_INTRA_FUNCTION_CALL ANNOTATE type=ANNOTYPE_INTRA_FUNCTION_CALL
+#define ANNOTATE_UNRET_BEGIN ANNOTATE type=ANNOTYPE_UNRET_BEGIN
+#define ANNOTATE_REACHABLE ANNOTATE type=ANNOTYPE_REACHABLE
+#define ANNOTATE_NOCFI_SYM ANNOTATE type=ANNOTYPE_NOCFI
+#define ANNOTATE_DATA_SPECIAL ANNOTATE_DATA type=ANNOTYPE_DATA_SPECIAL
+#endif /* __ASSEMBLY__ */
+
+#endif /* _LINUX_ANNOTATE_H */
diff --git a/include/linux/anon_inodes.h b/include/linux/anon_inodes.h
index 71881a2b6f78..edef565c2a1a 100644
--- a/include/linux/anon_inodes.h
+++ b/include/linux/anon_inodes.h
@@ -9,15 +9,24 @@
#ifndef _LINUX_ANON_INODES_H
#define _LINUX_ANON_INODES_H
+#include <linux/types.h>
+
struct file_operations;
struct inode;
struct file *anon_inode_getfile(const char *name,
const struct file_operations *fops,
void *priv, int flags);
+struct file *anon_inode_getfile_fmode(const char *name,
+ const struct file_operations *fops,
+ void *priv, int flags, fmode_t f_mode);
+struct file *anon_inode_create_getfile(const char *name,
+ const struct file_operations *fops,
+ void *priv, int flags,
+ const struct inode *context_inode);
int anon_inode_getfd(const char *name, const struct file_operations *fops,
void *priv, int flags);
-int anon_inode_getfd_secure(const char *name,
+int anon_inode_create_getfd(const char *name,
const struct file_operations *fops,
void *priv, int flags,
const struct inode *context_inode);
diff --git a/include/linux/aperture.h b/include/linux/aperture.h
new file mode 100644
index 000000000000..1a9a88b11584
--- /dev/null
+++ b/include/linux/aperture.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef _LINUX_APERTURE_H_
+#define _LINUX_APERTURE_H_
+
+#include <linux/types.h>
+
+struct pci_dev;
+struct platform_device;
+
+#if defined(CONFIG_APERTURE_HELPERS)
+int devm_aperture_acquire_for_platform_device(struct platform_device *pdev,
+ resource_size_t base,
+ resource_size_t size);
+
+int aperture_remove_conflicting_devices(resource_size_t base, resource_size_t size,
+ const char *name);
+
+int __aperture_remove_legacy_vga_devices(struct pci_dev *pdev);
+
+int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev, const char *name);
+#else
+static inline int devm_aperture_acquire_for_platform_device(struct platform_device *pdev,
+ resource_size_t base,
+ resource_size_t size)
+{
+ return 0;
+}
+
+static inline int aperture_remove_conflicting_devices(resource_size_t base, resource_size_t size,
+ const char *name)
+{
+ return 0;
+}
+
+static inline int __aperture_remove_legacy_vga_devices(struct pci_dev *pdev)
+{
+ return 0;
+}
+
+static inline int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev, const char *name)
+{
+ return 0;
+}
+#endif
+
+/**
+ * aperture_remove_all_conflicting_devices - remove all existing framebuffers
+ * @name: a descriptive name of the requesting driver
+ *
+ * This function removes all graphics device drivers. Use this function on systems
+ * that can have their framebuffer located anywhere in memory.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise
+ */
+static inline int aperture_remove_all_conflicting_devices(const char *name)
+{
+ return aperture_remove_conflicting_devices(0, (resource_size_t)-1, name);
+}
+
+#endif
diff --git a/include/linux/apple-gmux.h b/include/linux/apple-gmux.h
index ddb10aa67b14..206d97ffda79 100644
--- a/include/linux/apple-gmux.h
+++ b/include/linux/apple-gmux.h
@@ -8,18 +8,154 @@
#define LINUX_APPLE_GMUX_H
#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/pnp.h>
#define GMUX_ACPI_HID "APP000B"
+/*
+ * gmux port offsets. Many of these are not yet used, but may be in the
+ * future, and it's useful to have them documented here anyhow.
+ */
+#define GMUX_PORT_VERSION_MAJOR 0x04
+#define GMUX_PORT_VERSION_MINOR 0x05
+#define GMUX_PORT_VERSION_RELEASE 0x06
+#define GMUX_PORT_SWITCH_DISPLAY 0x10
+#define GMUX_PORT_SWITCH_GET_DISPLAY 0x11
+#define GMUX_PORT_INTERRUPT_ENABLE 0x14
+#define GMUX_PORT_INTERRUPT_STATUS 0x16
+#define GMUX_PORT_SWITCH_DDC 0x28
+#define GMUX_PORT_SWITCH_EXTERNAL 0x40
+#define GMUX_PORT_SWITCH_GET_EXTERNAL 0x41
+#define GMUX_PORT_DISCRETE_POWER 0x50
+#define GMUX_PORT_MAX_BRIGHTNESS 0x70
+#define GMUX_PORT_BRIGHTNESS 0x74
+#define GMUX_PORT_VALUE 0xc2
+#define GMUX_PORT_READ 0xd0
+#define GMUX_PORT_WRITE 0xd4
+
+#define GMUX_MMIO_PORT_SELECT 0x0e
+#define GMUX_MMIO_COMMAND_SEND 0x0f
+
+#define GMUX_MMIO_READ 0x00
+#define GMUX_MMIO_WRITE 0x40
+
+#define GMUX_MIN_IO_LEN (GMUX_PORT_BRIGHTNESS + 4)
+
+enum apple_gmux_type {
+ APPLE_GMUX_TYPE_PIO,
+ APPLE_GMUX_TYPE_INDEXED,
+ APPLE_GMUX_TYPE_MMIO,
+};
+
#if IS_ENABLED(CONFIG_APPLE_GMUX)
+static inline bool apple_gmux_is_indexed(unsigned long iostart)
+{
+ u16 val;
+
+ outb(0xaa, iostart + 0xcc);
+ outb(0x55, iostart + 0xcd);
+ outb(0x00, iostart + 0xce);
+
+ val = inb(iostart + 0xcc) | (inb(iostart + 0xcd) << 8);
+ if (val == 0x55aa)
+ return true;
+
+ return false;
+}
+
+static inline bool apple_gmux_is_mmio(unsigned long iostart)
+{
+ u8 __iomem *iomem_base = ioremap(iostart, 16);
+ u8 val;
+
+ if (!iomem_base)
+ return false;
+
+ /*
+ * If this is 0xff, then gmux must not be present, as the gmux would
+ * reset it to 0x00, or it would be one of 0x1, 0x4, 0x41, 0x44 if a
+ * command is currently being processed.
+ */
+ val = ioread8(iomem_base + GMUX_MMIO_COMMAND_SEND);
+ iounmap(iomem_base);
+ return (val != 0xff);
+}
/**
- * apple_gmux_present() - detect if gmux is built into the machine
+ * apple_gmux_detect() - detect if gmux is built into the machine
+ *
+ * @pnp_dev: Device to probe or NULL to use the first matching device
+ * @type_ret: Returns (by reference) the apple_gmux_type of the device
+ *
+ * Detect if a supported gmux device is present by actually probing it.
+ * This avoids the false positives returned on some models by
+ * apple_gmux_present().
+ *
+ * Return: %true if a supported gmux ACPI device is detected and the kernel
+ * was configured with CONFIG_APPLE_GMUX, %false otherwise.
+ */
+static inline bool apple_gmux_detect(struct pnp_dev *pnp_dev, enum apple_gmux_type *type_ret)
+{
+ u8 ver_major, ver_minor, ver_release;
+ struct device *dev = NULL;
+ struct acpi_device *adev;
+ struct resource *res;
+ enum apple_gmux_type type = APPLE_GMUX_TYPE_PIO;
+ bool ret = false;
+
+ if (!pnp_dev) {
+ adev = acpi_dev_get_first_match_dev(GMUX_ACPI_HID, NULL, -1);
+ if (!adev)
+ return false;
+
+ dev = get_device(acpi_get_first_physical_node(adev));
+ acpi_dev_put(adev);
+ if (!dev)
+ return false;
+
+ pnp_dev = to_pnp_dev(dev);
+ }
+
+ res = pnp_get_resource(pnp_dev, IORESOURCE_IO, 0);
+ if (res && resource_size(res) >= GMUX_MIN_IO_LEN) {
+ /*
+ * Invalid version information may indicate either that the gmux
+ * device isn't present or that it's a new one that uses indexed io.
+ */
+ ver_major = inb(res->start + GMUX_PORT_VERSION_MAJOR);
+ ver_minor = inb(res->start + GMUX_PORT_VERSION_MINOR);
+ ver_release = inb(res->start + GMUX_PORT_VERSION_RELEASE);
+ if (ver_major == 0xff && ver_minor == 0xff && ver_release == 0xff) {
+ if (apple_gmux_is_indexed(res->start))
+ type = APPLE_GMUX_TYPE_INDEXED;
+ else
+ goto out;
+ }
+ } else {
+ res = pnp_get_resource(pnp_dev, IORESOURCE_MEM, 0);
+ if (res && apple_gmux_is_mmio(res->start))
+ type = APPLE_GMUX_TYPE_MMIO;
+ else
+ goto out;
+ }
+
+ if (type_ret)
+ *type_ret = type;
+
+ ret = true;
+out:
+ put_device(dev);
+ return ret;
+}
+
+/**
+ * apple_gmux_present() - check if gmux ACPI device is present
*
* Drivers may use this to activate quirks specific to dual GPU MacBook Pros
* and Mac Pros, e.g. for deferred probing, runtime pm and backlight.
*
- * Return: %true if gmux is present and the kernel was configured
+ * Return: %true if gmux ACPI device is present and the kernel was configured
* with CONFIG_APPLE_GMUX, %false otherwise.
*/
static inline bool apple_gmux_present(void)
@@ -34,6 +170,11 @@ static inline bool apple_gmux_present(void)
return false;
}
+static inline bool apple_gmux_detect(struct pnp_dev *pnp_dev, bool *indexed_ret)
+{
+ return false;
+}
+
#endif /* !CONFIG_APPLE_GMUX */
#endif /* LINUX_APPLE_GMUX_H */
diff --git a/include/linux/apple_bl.h b/include/linux/apple_bl.h
deleted file mode 100644
index 445af2e3cc21..000000000000
--- a/include/linux/apple_bl.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * apple_bl exported symbols
- */
-
-#ifndef _LINUX_APPLE_BL_H
-#define _LINUX_APPLE_BL_H
-
-#if defined(CONFIG_BACKLIGHT_APPLE) || defined(CONFIG_BACKLIGHT_APPLE_MODULE)
-
-extern int apple_bl_register(void);
-extern void apple_bl_unregister(void);
-
-#else /* !CONFIG_BACKLIGHT_APPLE */
-
-static inline int apple_bl_register(void)
-{
- return 0;
-}
-
-static inline void apple_bl_unregister(void)
-{
-}
-
-#endif /* !CONFIG_BACKLIGHT_APPLE */
-
-#endif /* _LINUX_APPLE_BL_H */
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index f180240dc95f..ebd7f8935f96 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -14,15 +14,14 @@ int topology_update_cpu_topology(void);
struct device_node;
bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);
-DECLARE_PER_CPU(unsigned long, cpu_scale);
-static inline unsigned long topology_get_cpu_scale(int cpu)
+DECLARE_PER_CPU(unsigned long, capacity_freq_ref);
+
+static inline unsigned long topology_get_freq_ref(int cpu)
{
- return per_cpu(cpu_scale, cpu);
+ return per_cpu(capacity_freq_ref, cpu);
}
-void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
-
DECLARE_PER_CPU(unsigned long, arch_freq_scale);
static inline unsigned long topology_get_freq_scale(int cpu)
@@ -38,6 +37,7 @@ enum scale_freq_source {
SCALE_FREQ_SOURCE_CPUFREQ = 0,
SCALE_FREQ_SOURCE_ARCH,
SCALE_FREQ_SOURCE_CPPC,
+ SCALE_FREQ_SOURCE_VIRT,
};
struct scale_freq_data {
@@ -49,23 +49,24 @@ void topology_scale_freq_tick(void);
void topology_set_scale_freq_source(struct scale_freq_data *data, const struct cpumask *cpus);
void topology_clear_scale_freq_source(enum scale_freq_source source, const struct cpumask *cpus);
-DECLARE_PER_CPU(unsigned long, thermal_pressure);
+DECLARE_PER_CPU(unsigned long, hw_pressure);
-static inline unsigned long topology_get_thermal_pressure(int cpu)
+static inline unsigned long topology_get_hw_pressure(int cpu)
{
- return per_cpu(thermal_pressure, cpu);
+ return per_cpu(hw_pressure, cpu);
}
-void topology_set_thermal_pressure(const struct cpumask *cpus,
- unsigned long th_pressure);
+void topology_update_hw_pressure(const struct cpumask *cpus,
+ unsigned long capped_freq);
struct cpu_topology {
int thread_id;
int core_id;
+ int cluster_id;
int package_id;
- int llc_id;
cpumask_t thread_sibling;
cpumask_t core_sibling;
+ cpumask_t cluster_sibling;
cpumask_t llc_sibling;
};
@@ -73,17 +74,41 @@ struct cpu_topology {
extern struct cpu_topology cpu_topology[NR_CPUS];
#define topology_physical_package_id(cpu) (cpu_topology[cpu].package_id)
+#define topology_cluster_id(cpu) (cpu_topology[cpu].cluster_id)
#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
+#define topology_cluster_cpumask(cpu) (&cpu_topology[cpu].cluster_sibling)
#define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling)
+
+#ifndef arch_cpu_is_threaded
+#define arch_cpu_is_threaded() (0)
+#endif
+
void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
+const struct cpumask *cpu_clustergroup_mask(int cpu);
void update_siblings_masks(unsigned int cpu);
void remove_cpu_topology(unsigned int cpuid);
void reset_cpu_topology(void);
int parse_acpi_topology(void);
-#endif
+void freq_inv_set_max_ratio(int cpu, u64 max_rate);
+
+/*
+ * Architectures like ARM64 don't have reliable architectural way to get SMT
+ * information and depend on the firmware (ACPI/OF) report. Non-SMT core won't
+ * initialize thread_id so we can use this to detect the SMT implementation.
+ */
+static inline bool topology_core_has_smt(int cpu)
+{
+ return cpu_topology[cpu].thread_id != -1;
+}
+
+#else
+
+static inline bool topology_core_has_smt(int cpu) { return false; }
+
+#endif /* CONFIG_GENERIC_ARCH_TOPOLOGY */
#endif /* _LINUX_ARCH_TOPOLOGY_H_ */
diff --git a/include/linux/args.h b/include/linux/args.h
new file mode 100644
index 000000000000..2e8e65d975c7
--- /dev/null
+++ b/include/linux/args.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_ARGS_H
+#define _LINUX_ARGS_H
+
+/*
+ * How do these macros work?
+ *
+ * In __COUNT_ARGS() _0 to _12 are just placeholders from the start
+ * in order to make sure _n is positioned over the correct number
+ * from 12 to 0 (depending on X, which is a variadic argument list).
+ * They serve no purpose other than occupying a position. Since each
+ * macro parameter must have a distinct identifier, those identifiers
+ * are as good as any.
+ *
+ * In COUNT_ARGS() we use actual integers, so __COUNT_ARGS() returns
+ * that as _n.
+ */
+
+/* This counts to 15. Any more, it will return 16th argument. */
+#define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _n, X...) _n
+#define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
+
+/* Concatenate two parameters, but allow them to be expanded beforehand. */
+#define __CONCAT(a, b) a ## b
+#define CONCATENATE(a, b) __CONCAT(a, b)
+
+#endif /* _LINUX_ARGS_H */
diff --git a/include/linux/arm-cci.h b/include/linux/arm-cci.h
index d0e44201d855..7f7a576267bc 100644
--- a/include/linux/arm-cci.h
+++ b/include/linux/arm-cci.h
@@ -43,6 +43,8 @@ static inline int __cci_control_port_by_index(u32 port, bool enable)
}
#endif
+void cci_enable_port_for_self(void);
+
#define cci_disable_port_by_device(dev) \
__cci_control_port_by_device(dev, false)
#define cci_enable_port_by_device(dev) \
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index 6861489a1890..50b47eba7d01 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -5,7 +5,13 @@
#ifndef __LINUX_ARM_SMCCC_H
#define __LINUX_ARM_SMCCC_H
+#include <linux/args.h>
#include <linux/init.h>
+
+#ifndef __ASSEMBLY__
+#include <linux/uuid.h>
+#endif
+
#include <uapi/linux/const.h>
/*
@@ -63,6 +69,11 @@
#define ARM_SMCCC_VERSION_1_0 0x10000
#define ARM_SMCCC_VERSION_1_1 0x10001
#define ARM_SMCCC_VERSION_1_2 0x10002
+#define ARM_SMCCC_VERSION_1_3 0x10003
+
+#define ARM_SMCCC_1_3_SVE_HINT 0x10000
+#define ARM_SMCCC_CALL_HINTS ARM_SMCCC_1_3_SVE_HINT
+
#define ARM_SMCCC_VERSION_FUNC_ID \
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
@@ -89,6 +100,11 @@
ARM_SMCCC_SMC_32, \
0, 0x7fff)
+#define ARM_SMCCC_ARCH_WORKAROUND_3 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 0x3fff)
+
#define ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID \
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
ARM_SMCCC_SMC_32, \
@@ -96,14 +112,81 @@
ARM_SMCCC_FUNC_QUERY_CALL_UID)
/* KVM UID value: 28b46fb6-2ec5-11e9-a9ca-4b564d003a74 */
-#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0 0xb66fb428U
-#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1 0xe911c52eU
-#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2 0x564bcaa9U
-#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3 0x743a004dU
+#define ARM_SMCCC_VENDOR_HYP_UID_KVM UUID_INIT(\
+ 0x28b46fb6, 0x2ec5, 0x11e9, \
+ 0xa9, 0xca, 0x4b, 0x56, \
+ 0x4d, 0x00, 0x3a, 0x74)
/* KVM "vendor specific" services */
#define ARM_SMCCC_KVM_FUNC_FEATURES 0
#define ARM_SMCCC_KVM_FUNC_PTP 1
+/* Start of pKVM hypercall range */
+#define ARM_SMCCC_KVM_FUNC_HYP_MEMINFO 2
+#define ARM_SMCCC_KVM_FUNC_MEM_SHARE 3
+#define ARM_SMCCC_KVM_FUNC_MEM_UNSHARE 4
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_5 5
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_6 6
+#define ARM_SMCCC_KVM_FUNC_MMIO_GUARD 7
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_8 8
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_9 9
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_10 10
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_11 11
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_12 12
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_13 13
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_14 14
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_15 15
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_16 16
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_17 17
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_18 18
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_19 19
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_20 20
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_21 21
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_22 22
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_23 23
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_24 24
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_25 25
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_26 26
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_27 27
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_28 28
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_29 29
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_30 30
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_31 31
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_32 32
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_33 33
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_34 34
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_35 35
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_36 36
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_37 37
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_38 38
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_39 39
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_40 40
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_41 41
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_42 42
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_43 43
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_44 44
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_45 45
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_46 46
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_47 47
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_48 48
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_49 49
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_50 50
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_51 51
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_52 52
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_53 53
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_54 54
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_55 55
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_56 56
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_57 57
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_58 58
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_59 59
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_60 60
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_61 61
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_62 62
+#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_63 63
+/* End of pKVM hypercall range */
+#define ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_VER 64
+#define ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_CPUS 65
+
#define ARM_SMCCC_KVM_FUNC_FEATURES_2 127
#define ARM_SMCCC_KVM_NUM_FUNCS 128
@@ -126,6 +209,42 @@
ARM_SMCCC_OWNER_VENDOR_HYP, \
ARM_SMCCC_KVM_FUNC_PTP)
+#define ARM_SMCCC_VENDOR_HYP_KVM_HYP_MEMINFO_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_HYP_MEMINFO)
+
+#define ARM_SMCCC_VENDOR_HYP_KVM_MEM_SHARE_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_MEM_SHARE)
+
+#define ARM_SMCCC_VENDOR_HYP_KVM_MEM_UNSHARE_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_MEM_UNSHARE)
+
+#define ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_MMIO_GUARD)
+
+#define ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_VER_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_VER)
+
+#define ARM_SMCCC_VENDOR_HYP_KVM_DISCOVER_IMPL_CPUS_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_DISCOVER_IMPL_CPUS)
+
/* ptp_kvm counter type ID */
#define KVM_PTP_VIRT_COUNTER 0
#define KVM_PTP_PHYS_COUNTER 1
@@ -217,6 +336,75 @@ u32 arm_smccc_get_version(void);
void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit);
/**
+ * arm_smccc_get_soc_id_version()
+ *
+ * Returns the SOC ID version.
+ *
+ * When ARM_SMCCC_ARCH_SOC_ID is not present, returns SMCCC_RET_NOT_SUPPORTED.
+ */
+s32 arm_smccc_get_soc_id_version(void);
+
+/**
+ * arm_smccc_get_soc_id_revision()
+ *
+ * Returns the SOC ID revision.
+ *
+ * When ARM_SMCCC_ARCH_SOC_ID is not present, returns SMCCC_RET_NOT_SUPPORTED.
+ */
+s32 arm_smccc_get_soc_id_revision(void);
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Returns whether a specific hypervisor UUID is advertised for the
+ * Vendor Specific Hypervisor Service range.
+ */
+bool arm_smccc_hypervisor_has_uuid(const uuid_t *uuid);
+
+static inline uuid_t smccc_res_to_uuid(u32 r0, u32 r1, u32 r2, u32 r3)
+{
+ uuid_t uuid = {
+ .b = {
+ [0] = (r0 >> 0) & 0xff,
+ [1] = (r0 >> 8) & 0xff,
+ [2] = (r0 >> 16) & 0xff,
+ [3] = (r0 >> 24) & 0xff,
+
+ [4] = (r1 >> 0) & 0xff,
+ [5] = (r1 >> 8) & 0xff,
+ [6] = (r1 >> 16) & 0xff,
+ [7] = (r1 >> 24) & 0xff,
+
+ [8] = (r2 >> 0) & 0xff,
+ [9] = (r2 >> 8) & 0xff,
+ [10] = (r2 >> 16) & 0xff,
+ [11] = (r2 >> 24) & 0xff,
+
+ [12] = (r3 >> 0) & 0xff,
+ [13] = (r3 >> 8) & 0xff,
+ [14] = (r3 >> 16) & 0xff,
+ [15] = (r3 >> 24) & 0xff,
+ },
+ };
+
+ return uuid;
+}
+
+static inline u32 smccc_uuid_to_reg(const uuid_t *uuid, int reg)
+{
+ u32 val = 0;
+
+ val |= (u32)(uuid->b[4 * reg + 0] << 0);
+ val |= (u32)(uuid->b[4 * reg + 1] << 8);
+ val |= (u32)(uuid->b[4 * reg + 2] << 16);
+ val |= (u32)(uuid->b[4 * reg + 3] << 24);
+
+ return val;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+/**
* struct arm_smccc_res - Result from SMC/HVC call
* @a0-a3 result values from registers 0 to 3
*/
@@ -227,6 +415,61 @@ struct arm_smccc_res {
unsigned long a3;
};
+#ifdef CONFIG_ARM64
+/**
+ * struct arm_smccc_1_2_regs - Arguments for or Results from SMC/HVC call
+ * @a0-a17 argument values from registers 0 to 17
+ */
+struct arm_smccc_1_2_regs {
+ unsigned long a0;
+ unsigned long a1;
+ unsigned long a2;
+ unsigned long a3;
+ unsigned long a4;
+ unsigned long a5;
+ unsigned long a6;
+ unsigned long a7;
+ unsigned long a8;
+ unsigned long a9;
+ unsigned long a10;
+ unsigned long a11;
+ unsigned long a12;
+ unsigned long a13;
+ unsigned long a14;
+ unsigned long a15;
+ unsigned long a16;
+ unsigned long a17;
+};
+
+/**
+ * arm_smccc_1_2_hvc() - make HVC calls
+ * @args: arguments passed via struct arm_smccc_1_2_regs
+ * @res: result values via struct arm_smccc_1_2_regs
+ *
+ * This function is used to make HVC calls following SMC Calling Convention
+ * v1.2 or above. The content of the supplied param are copied from the
+ * structure to registers prior to the HVC instruction. The return values
+ * are updated with the content from registers on return from the HVC
+ * instruction.
+ */
+asmlinkage void arm_smccc_1_2_hvc(const struct arm_smccc_1_2_regs *args,
+ struct arm_smccc_1_2_regs *res);
+
+/**
+ * arm_smccc_1_2_smc() - make SMC calls
+ * @args: arguments passed via struct arm_smccc_1_2_regs
+ * @res: result values via struct arm_smccc_1_2_regs
+ *
+ * This function is used to make SMC calls following SMC Calling Convention
+ * v1.2 or above. The content of the supplied param are copied from the
+ * structure to registers prior to the SMC instruction. The return values
+ * are updated with the content from registers on return from the SMC
+ * instruction.
+ */
+asmlinkage void arm_smccc_1_2_smc(const struct arm_smccc_1_2_regs *args,
+ struct arm_smccc_1_2_regs *res);
+#endif
+
/**
* struct arm_smccc_quirk - Contains quirk information
* @id: quirk identification
@@ -252,10 +495,20 @@ struct arm_smccc_quirk {
* from register 0 to 3 on return from the SMC instruction. An optional
* quirk structure provides vendor specific behavior.
*/
+#ifdef CONFIG_HAVE_ARM_SMCCC
asmlinkage void __arm_smccc_smc(unsigned long a0, unsigned long a1,
unsigned long a2, unsigned long a3, unsigned long a4,
unsigned long a5, unsigned long a6, unsigned long a7,
struct arm_smccc_res *res, struct arm_smccc_quirk *quirk);
+#else
+static inline void __arm_smccc_smc(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3, unsigned long a4,
+ unsigned long a5, unsigned long a6, unsigned long a7,
+ struct arm_smccc_res *res, struct arm_smccc_quirk *quirk)
+{
+ *res = (struct arm_smccc_res){};
+}
+#endif
/**
* __arm_smccc_hvc() - make HVC calls
@@ -297,31 +550,26 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
#endif
-#define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x
+#define __constraint_read_2 "r" (arg0)
+#define __constraint_read_3 __constraint_read_2, "r" (arg1)
+#define __constraint_read_4 __constraint_read_3, "r" (arg2)
+#define __constraint_read_5 __constraint_read_4, "r" (arg3)
+#define __constraint_read_6 __constraint_read_5, "r" (arg4)
+#define __constraint_read_7 __constraint_read_6, "r" (arg5)
+#define __constraint_read_8 __constraint_read_7, "r" (arg6)
+#define __constraint_read_9 __constraint_read_8, "r" (arg7)
-#define __count_args(...) \
- ___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0)
-
-#define __constraint_read_0 "r" (arg0)
-#define __constraint_read_1 __constraint_read_0, "r" (arg1)
-#define __constraint_read_2 __constraint_read_1, "r" (arg2)
-#define __constraint_read_3 __constraint_read_2, "r" (arg3)
-#define __constraint_read_4 __constraint_read_3, "r" (arg4)
-#define __constraint_read_5 __constraint_read_4, "r" (arg5)
-#define __constraint_read_6 __constraint_read_5, "r" (arg6)
-#define __constraint_read_7 __constraint_read_6, "r" (arg7)
-
-#define __declare_arg_0(a0, res) \
+#define __declare_arg_2(a0, res) \
struct arm_smccc_res *___res = res; \
register unsigned long arg0 asm("r0") = (u32)a0
-#define __declare_arg_1(a0, a1, res) \
+#define __declare_arg_3(a0, a1, res) \
typeof(a1) __a1 = a1; \
struct arm_smccc_res *___res = res; \
register unsigned long arg0 asm("r0") = (u32)a0; \
register typeof(a1) arg1 asm("r1") = __a1
-#define __declare_arg_2(a0, a1, a2, res) \
+#define __declare_arg_4(a0, a1, a2, res) \
typeof(a1) __a1 = a1; \
typeof(a2) __a2 = a2; \
struct arm_smccc_res *___res = res; \
@@ -329,7 +577,7 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
register typeof(a1) arg1 asm("r1") = __a1; \
register typeof(a2) arg2 asm("r2") = __a2
-#define __declare_arg_3(a0, a1, a2, a3, res) \
+#define __declare_arg_5(a0, a1, a2, a3, res) \
typeof(a1) __a1 = a1; \
typeof(a2) __a2 = a2; \
typeof(a3) __a3 = a3; \
@@ -339,34 +587,26 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
register typeof(a2) arg2 asm("r2") = __a2; \
register typeof(a3) arg3 asm("r3") = __a3
-#define __declare_arg_4(a0, a1, a2, a3, a4, res) \
+#define __declare_arg_6(a0, a1, a2, a3, a4, res) \
typeof(a4) __a4 = a4; \
- __declare_arg_3(a0, a1, a2, a3, res); \
+ __declare_arg_5(a0, a1, a2, a3, res); \
register typeof(a4) arg4 asm("r4") = __a4
-#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \
+#define __declare_arg_7(a0, a1, a2, a3, a4, a5, res) \
typeof(a5) __a5 = a5; \
- __declare_arg_4(a0, a1, a2, a3, a4, res); \
+ __declare_arg_6(a0, a1, a2, a3, a4, res); \
register typeof(a5) arg5 asm("r5") = __a5
-#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \
+#define __declare_arg_8(a0, a1, a2, a3, a4, a5, a6, res) \
typeof(a6) __a6 = a6; \
- __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \
+ __declare_arg_7(a0, a1, a2, a3, a4, a5, res); \
register typeof(a6) arg6 asm("r6") = __a6
-#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \
+#define __declare_arg_9(a0, a1, a2, a3, a4, a5, a6, a7, res) \
typeof(a7) __a7 = a7; \
- __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \
+ __declare_arg_8(a0, a1, a2, a3, a4, a5, a6, res); \
register typeof(a7) arg7 asm("r7") = __a7
-#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__)
-#define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__)
-
-#define ___constraints(count) \
- : __constraint_read_ ## count \
- : "memory"
-#define __constraints(count) ___constraints(count)
-
/*
* We have an output list that is not necessarily used, and GCC feels
* entitled to optimise the whole sequence away. "volatile" is what
@@ -378,10 +618,13 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
register unsigned long r1 asm("r1"); \
register unsigned long r2 asm("r2"); \
register unsigned long r3 asm("r3"); \
- __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \
+ CONCATENATE(__declare_arg_, \
+ COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__); \
asm volatile(inst "\n" : \
"=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3) \
- __constraints(__count_args(__VA_ARGS__))); \
+ : CONCATENATE(__constraint_read_, \
+ COUNT_ARGS(__VA_ARGS__)) \
+ : "memory"); \
if (___res) \
*___res = (typeof(*___res)){r0, r1, r2, r3}; \
} while (0)
@@ -425,8 +668,12 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
*/
#define __fail_smccc_1_1(...) \
do { \
- __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \
- asm ("" : __constraints(__count_args(__VA_ARGS__))); \
+ CONCATENATE(__declare_arg_, \
+ COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__); \
+ asm ("" : \
+ : CONCATENATE(__constraint_read_, \
+ COUNT_ARGS(__VA_ARGS__)) \
+ : "memory"); \
if (___res) \
___res->a0 = SMCCC_RET_NOT_SUPPORTED; \
} while (0)
@@ -463,5 +710,45 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
method; \
})
+#ifdef CONFIG_ARM64
+
+#define __fail_smccc_1_2(___res) \
+ do { \
+ if (___res) \
+ ___res->a0 = SMCCC_RET_NOT_SUPPORTED; \
+ } while (0)
+
+/*
+ * arm_smccc_1_2_invoke() - make an SMCCC v1.2 compliant call
+ *
+ * @args: SMC args are in the a0..a17 fields of the arm_smcc_1_2_regs structure
+ * @res: result values from registers 0 to 17
+ *
+ * This macro will make either an HVC call or an SMC call depending on the
+ * current SMCCC conduit. If no valid conduit is available then -1
+ * (SMCCC_RET_NOT_SUPPORTED) is returned in @res.a0 (if supplied).
+ *
+ * The return value also provides the conduit that was used.
+ */
+#define arm_smccc_1_2_invoke(args, res) ({ \
+ struct arm_smccc_1_2_regs *__args = args; \
+ struct arm_smccc_1_2_regs *__res = res; \
+ int method = arm_smccc_1_1_get_conduit(); \
+ switch (method) { \
+ case SMCCC_CONDUIT_HVC: \
+ arm_smccc_1_2_hvc(__args, __res); \
+ break; \
+ case SMCCC_CONDUIT_SMC: \
+ arm_smccc_1_2_smc(__args, __res); \
+ break; \
+ default: \
+ __fail_smccc_1_2(__res); \
+ method = SMCCC_CONDUIT_NONE; \
+ break; \
+ } \
+ method; \
+ })
+#endif /*CONFIG_ARM64*/
+
#endif /*__ASSEMBLY__*/
#endif /*__LINUX_ARM_SMCCC_H*/
diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h
new file mode 100644
index 000000000000..81e603839c4a
--- /dev/null
+++ b/include/linux/arm_ffa.h
@@ -0,0 +1,515 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 ARM Ltd.
+ */
+
+#ifndef _LINUX_ARM_FFA_H
+#define _LINUX_ARM_FFA_H
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/uuid.h>
+
+#define FFA_SMC(calling_convention, func_num) \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, (calling_convention), \
+ ARM_SMCCC_OWNER_STANDARD, (func_num))
+
+#define FFA_SMC_32(func_num) FFA_SMC(ARM_SMCCC_SMC_32, (func_num))
+#define FFA_SMC_64(func_num) FFA_SMC(ARM_SMCCC_SMC_64, (func_num))
+
+#define FFA_ERROR FFA_SMC_32(0x60)
+#define FFA_SUCCESS FFA_SMC_32(0x61)
+#define FFA_FN64_SUCCESS FFA_SMC_64(0x61)
+#define FFA_INTERRUPT FFA_SMC_32(0x62)
+#define FFA_VERSION FFA_SMC_32(0x63)
+#define FFA_FEATURES FFA_SMC_32(0x64)
+#define FFA_RX_RELEASE FFA_SMC_32(0x65)
+#define FFA_RXTX_MAP FFA_SMC_32(0x66)
+#define FFA_FN64_RXTX_MAP FFA_SMC_64(0x66)
+#define FFA_RXTX_UNMAP FFA_SMC_32(0x67)
+#define FFA_PARTITION_INFO_GET FFA_SMC_32(0x68)
+#define FFA_ID_GET FFA_SMC_32(0x69)
+#define FFA_MSG_POLL FFA_SMC_32(0x6A)
+#define FFA_MSG_WAIT FFA_SMC_32(0x6B)
+#define FFA_YIELD FFA_SMC_32(0x6C)
+#define FFA_RUN FFA_SMC_32(0x6D)
+#define FFA_MSG_SEND FFA_SMC_32(0x6E)
+#define FFA_MSG_SEND_DIRECT_REQ FFA_SMC_32(0x6F)
+#define FFA_FN64_MSG_SEND_DIRECT_REQ FFA_SMC_64(0x6F)
+#define FFA_MSG_SEND_DIRECT_RESP FFA_SMC_32(0x70)
+#define FFA_FN64_MSG_SEND_DIRECT_RESP FFA_SMC_64(0x70)
+#define FFA_MEM_DONATE FFA_SMC_32(0x71)
+#define FFA_FN64_MEM_DONATE FFA_SMC_64(0x71)
+#define FFA_MEM_LEND FFA_SMC_32(0x72)
+#define FFA_FN64_MEM_LEND FFA_SMC_64(0x72)
+#define FFA_MEM_SHARE FFA_SMC_32(0x73)
+#define FFA_FN64_MEM_SHARE FFA_SMC_64(0x73)
+#define FFA_MEM_RETRIEVE_REQ FFA_SMC_32(0x74)
+#define FFA_FN64_MEM_RETRIEVE_REQ FFA_SMC_64(0x74)
+#define FFA_MEM_RETRIEVE_RESP FFA_SMC_32(0x75)
+#define FFA_MEM_RELINQUISH FFA_SMC_32(0x76)
+#define FFA_MEM_RECLAIM FFA_SMC_32(0x77)
+#define FFA_MEM_OP_PAUSE FFA_SMC_32(0x78)
+#define FFA_MEM_OP_RESUME FFA_SMC_32(0x79)
+#define FFA_MEM_FRAG_RX FFA_SMC_32(0x7A)
+#define FFA_MEM_FRAG_TX FFA_SMC_32(0x7B)
+#define FFA_NORMAL_WORLD_RESUME FFA_SMC_32(0x7C)
+#define FFA_NOTIFICATION_BITMAP_CREATE FFA_SMC_32(0x7D)
+#define FFA_NOTIFICATION_BITMAP_DESTROY FFA_SMC_32(0x7E)
+#define FFA_NOTIFICATION_BIND FFA_SMC_32(0x7F)
+#define FFA_NOTIFICATION_UNBIND FFA_SMC_32(0x80)
+#define FFA_NOTIFICATION_SET FFA_SMC_32(0x81)
+#define FFA_NOTIFICATION_GET FFA_SMC_32(0x82)
+#define FFA_NOTIFICATION_INFO_GET FFA_SMC_32(0x83)
+#define FFA_FN64_NOTIFICATION_INFO_GET FFA_SMC_64(0x83)
+#define FFA_RX_ACQUIRE FFA_SMC_32(0x84)
+#define FFA_SPM_ID_GET FFA_SMC_32(0x85)
+#define FFA_MSG_SEND2 FFA_SMC_32(0x86)
+#define FFA_SECONDARY_EP_REGISTER FFA_SMC_32(0x87)
+#define FFA_FN64_SECONDARY_EP_REGISTER FFA_SMC_64(0x87)
+#define FFA_MEM_PERM_GET FFA_SMC_32(0x88)
+#define FFA_FN64_MEM_PERM_GET FFA_SMC_64(0x88)
+#define FFA_MEM_PERM_SET FFA_SMC_32(0x89)
+#define FFA_FN64_MEM_PERM_SET FFA_SMC_64(0x89)
+#define FFA_CONSOLE_LOG FFA_SMC_32(0x8A)
+#define FFA_PARTITION_INFO_GET_REGS FFA_SMC_64(0x8B)
+#define FFA_EL3_INTR_HANDLE FFA_SMC_32(0x8C)
+#define FFA_MSG_SEND_DIRECT_REQ2 FFA_SMC_64(0x8D)
+#define FFA_MSG_SEND_DIRECT_RESP2 FFA_SMC_64(0x8E)
+
+/*
+ * For some calls it is necessary to use SMC64 to pass or return 64-bit values.
+ * For such calls FFA_FN_NATIVE(name) will choose the appropriate
+ * (native-width) function ID.
+ */
+#ifdef CONFIG_64BIT
+#define FFA_FN_NATIVE(name) FFA_FN64_##name
+#else
+#define FFA_FN_NATIVE(name) FFA_##name
+#endif
+
+/* FFA error codes. */
+#define FFA_RET_SUCCESS (0)
+#define FFA_RET_NOT_SUPPORTED (-1)
+#define FFA_RET_INVALID_PARAMETERS (-2)
+#define FFA_RET_NO_MEMORY (-3)
+#define FFA_RET_BUSY (-4)
+#define FFA_RET_INTERRUPTED (-5)
+#define FFA_RET_DENIED (-6)
+#define FFA_RET_RETRY (-7)
+#define FFA_RET_ABORTED (-8)
+#define FFA_RET_NO_DATA (-9)
+
+/* FFA version encoding */
+#define FFA_MAJOR_VERSION_MASK GENMASK(30, 16)
+#define FFA_MINOR_VERSION_MASK GENMASK(15, 0)
+#define FFA_MAJOR_VERSION(x) ((u16)(FIELD_GET(FFA_MAJOR_VERSION_MASK, (x))))
+#define FFA_MINOR_VERSION(x) ((u16)(FIELD_GET(FFA_MINOR_VERSION_MASK, (x))))
+#define FFA_PACK_VERSION_INFO(major, minor) \
+ (FIELD_PREP(FFA_MAJOR_VERSION_MASK, (major)) | \
+ FIELD_PREP(FFA_MINOR_VERSION_MASK, (minor)))
+#define FFA_VERSION_1_0 FFA_PACK_VERSION_INFO(1, 0)
+#define FFA_VERSION_1_1 FFA_PACK_VERSION_INFO(1, 1)
+#define FFA_VERSION_1_2 FFA_PACK_VERSION_INFO(1, 2)
+
+/**
+ * FF-A specification mentions explicitly about '4K pages'. This should
+ * not be confused with the kernel PAGE_SIZE, which is the translation
+ * granule kernel is configured and may be one among 4K, 16K and 64K.
+ */
+#define FFA_PAGE_SIZE SZ_4K
+
+/*
+ * Minimum buffer size/alignment encodings returned by an FFA_FEATURES
+ * query for FFA_RXTX_MAP.
+ */
+#define FFA_FEAT_RXTX_MIN_SZ_4K 0
+#define FFA_FEAT_RXTX_MIN_SZ_64K 1
+#define FFA_FEAT_RXTX_MIN_SZ_16K 2
+#define FFA_FEAT_RXTX_MIN_SZ_MASK GENMASK(1, 0)
+
+/* FFA Bus/Device/Driver related */
+struct ffa_device {
+ u32 id;
+ u32 properties;
+ int vm_id;
+ bool mode_32bit;
+ uuid_t uuid;
+ struct device dev;
+ const struct ffa_ops *ops;
+};
+
+#define to_ffa_dev(d) container_of(d, struct ffa_device, dev)
+
+struct ffa_device_id {
+ uuid_t uuid;
+};
+
+struct ffa_driver {
+ const char *name;
+ int (*probe)(struct ffa_device *sdev);
+ void (*remove)(struct ffa_device *sdev);
+ const struct ffa_device_id *id_table;
+
+ struct device_driver driver;
+};
+
+#define to_ffa_driver(d) container_of_const(d, struct ffa_driver, driver)
+
+static inline void ffa_dev_set_drvdata(struct ffa_device *fdev, void *data)
+{
+ dev_set_drvdata(&fdev->dev, data);
+}
+
+static inline void *ffa_dev_get_drvdata(struct ffa_device *fdev)
+{
+ return dev_get_drvdata(&fdev->dev);
+}
+
+struct ffa_partition_info;
+
+#if IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT)
+struct ffa_device *
+ffa_device_register(const struct ffa_partition_info *part_info,
+ const struct ffa_ops *ops);
+void ffa_device_unregister(struct ffa_device *ffa_dev);
+int ffa_driver_register(struct ffa_driver *driver, struct module *owner,
+ const char *mod_name);
+void ffa_driver_unregister(struct ffa_driver *driver);
+void ffa_devices_unregister(void);
+bool ffa_device_is_valid(struct ffa_device *ffa_dev);
+
+#else
+static inline struct ffa_device *
+ffa_device_register(const struct ffa_partition_info *part_info,
+ const struct ffa_ops *ops)
+{
+ return NULL;
+}
+
+static inline void ffa_device_unregister(struct ffa_device *dev) {}
+
+static inline void ffa_devices_unregister(void) {}
+
+static inline int
+ffa_driver_register(struct ffa_driver *driver, struct module *owner,
+ const char *mod_name)
+{
+ return -EINVAL;
+}
+
+static inline void ffa_driver_unregister(struct ffa_driver *driver) {}
+
+static inline
+bool ffa_device_is_valid(struct ffa_device *ffa_dev) { return false; }
+
+#endif /* CONFIG_ARM_FFA_TRANSPORT */
+
+#define ffa_register(driver) \
+ ffa_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
+#define ffa_unregister(driver) \
+ ffa_driver_unregister(driver)
+
+/**
+ * module_ffa_driver() - Helper macro for registering a psa_ffa driver
+ * @__ffa_driver: ffa_driver structure
+ *
+ * Helper macro for psa_ffa drivers to set up proper module init / exit
+ * functions. Replaces module_init() and module_exit() and keeps people from
+ * printing pointless things to the kernel log when their driver is loaded.
+ */
+#define module_ffa_driver(__ffa_driver) \
+ module_driver(__ffa_driver, ffa_register, ffa_unregister)
+
+extern const struct bus_type ffa_bus_type;
+
+/* The FF-A 1.0 partition structure lacks the uuid[4] */
+#define FFA_1_0_PARTITON_INFO_SZ (8)
+
+/* FFA transport related */
+struct ffa_partition_info {
+ u16 id;
+ u16 exec_ctxt;
+/* partition supports receipt of direct requests */
+#define FFA_PARTITION_DIRECT_RECV BIT(0)
+/* partition can send direct requests. */
+#define FFA_PARTITION_DIRECT_SEND BIT(1)
+/* partition can send and receive indirect messages. */
+#define FFA_PARTITION_INDIRECT_MSG BIT(2)
+/* partition can receive notifications */
+#define FFA_PARTITION_NOTIFICATION_RECV BIT(3)
+/* partition runs in the AArch64 execution state. */
+#define FFA_PARTITION_AARCH64_EXEC BIT(8)
+/* partition supports receipt of direct request2 */
+#define FFA_PARTITION_DIRECT_REQ2_RECV BIT(9)
+/* partition can send direct request2. */
+#define FFA_PARTITION_DIRECT_REQ2_SEND BIT(10)
+ u32 properties;
+ uuid_t uuid;
+};
+
+static inline
+bool ffa_partition_check_property(struct ffa_device *dev, u32 property)
+{
+ return dev->properties & property;
+}
+
+#define ffa_partition_supports_notify_recv(dev) \
+ ffa_partition_check_property(dev, FFA_PARTITION_NOTIFICATION_RECV)
+
+#define ffa_partition_supports_indirect_msg(dev) \
+ ffa_partition_check_property(dev, FFA_PARTITION_INDIRECT_MSG)
+
+#define ffa_partition_supports_direct_recv(dev) \
+ ffa_partition_check_property(dev, FFA_PARTITION_DIRECT_RECV)
+
+#define ffa_partition_supports_direct_req2_recv(dev) \
+ (ffa_partition_check_property(dev, FFA_PARTITION_DIRECT_REQ2_RECV) && \
+ !dev->mode_32bit)
+
+/* For use with FFA_MSG_SEND_DIRECT_{REQ,RESP} which pass data via registers */
+struct ffa_send_direct_data {
+ unsigned long data0; /* w3/x3 */
+ unsigned long data1; /* w4/x4 */
+ unsigned long data2; /* w5/x5 */
+ unsigned long data3; /* w6/x6 */
+ unsigned long data4; /* w7/x7 */
+};
+
+struct ffa_indirect_msg_hdr {
+ u32 flags;
+ u32 res0;
+ u32 offset;
+ u32 send_recv_id;
+ u32 size;
+ u32 res1;
+ uuid_t uuid;
+};
+
+/* For use with FFA_MSG_SEND_DIRECT_{REQ,RESP}2 which pass data via registers */
+struct ffa_send_direct_data2 {
+ unsigned long data[14]; /* x4-x17 */
+};
+
+struct ffa_mem_region_addr_range {
+ /* The base IPA of the constituent memory region, aligned to 4 kiB */
+ u64 address;
+ /* The number of 4 kiB pages in the constituent memory region. */
+ u32 pg_cnt;
+ u32 reserved;
+};
+
+struct ffa_composite_mem_region {
+ /*
+ * The total number of 4 kiB pages included in this memory region. This
+ * must be equal to the sum of page counts specified in each
+ * `struct ffa_mem_region_addr_range`.
+ */
+ u32 total_pg_cnt;
+ /* The number of constituents included in this memory region range */
+ u32 addr_range_cnt;
+ u64 reserved;
+ /** An array of `addr_range_cnt` memory region constituents. */
+ struct ffa_mem_region_addr_range constituents[];
+};
+
+struct ffa_mem_region_attributes {
+ /* The ID of the VM to which the memory is being given or shared. */
+ u16 receiver;
+ /*
+ * The permissions with which the memory region should be mapped in the
+ * receiver's page table.
+ */
+#define FFA_MEM_EXEC BIT(3)
+#define FFA_MEM_NO_EXEC BIT(2)
+#define FFA_MEM_RW BIT(1)
+#define FFA_MEM_RO BIT(0)
+ u8 attrs;
+ /*
+ * Flags used during FFA_MEM_RETRIEVE_REQ and FFA_MEM_RETRIEVE_RESP
+ * for memory regions with multiple borrowers.
+ */
+#define FFA_MEM_RETRIEVE_SELF_BORROWER BIT(0)
+ u8 flag;
+ /*
+ * Offset in bytes from the start of the outer `ffa_memory_region` to
+ * an `struct ffa_mem_region_addr_range`.
+ */
+ u32 composite_off;
+ u8 impdef_val[16];
+ u64 reserved;
+};
+
+struct ffa_mem_region {
+ /* The ID of the VM/owner which originally sent the memory region */
+ u16 sender_id;
+#define FFA_MEM_NORMAL BIT(5)
+#define FFA_MEM_DEVICE BIT(4)
+
+#define FFA_MEM_WRITE_BACK (3 << 2)
+#define FFA_MEM_NON_CACHEABLE (1 << 2)
+
+#define FFA_DEV_nGnRnE (0 << 2)
+#define FFA_DEV_nGnRE (1 << 2)
+#define FFA_DEV_nGRE (2 << 2)
+#define FFA_DEV_GRE (3 << 2)
+
+#define FFA_MEM_NON_SHAREABLE (0)
+#define FFA_MEM_OUTER_SHAREABLE (2)
+#define FFA_MEM_INNER_SHAREABLE (3)
+ /* Memory region attributes, upper byte MBZ pre v1.1 */
+ u16 attributes;
+/*
+ * Clear memory region contents after unmapping it from the sender and
+ * before mapping it for any receiver.
+ */
+#define FFA_MEM_CLEAR BIT(0)
+/*
+ * Whether the hypervisor may time slice the memory sharing or retrieval
+ * operation.
+ */
+#define FFA_TIME_SLICE_ENABLE BIT(1)
+
+#define FFA_MEM_RETRIEVE_TYPE_IN_RESP (0 << 3)
+#define FFA_MEM_RETRIEVE_TYPE_SHARE (1 << 3)
+#define FFA_MEM_RETRIEVE_TYPE_LEND (2 << 3)
+#define FFA_MEM_RETRIEVE_TYPE_DONATE (3 << 3)
+
+#define FFA_MEM_RETRIEVE_ADDR_ALIGN_HINT BIT(9)
+#define FFA_MEM_RETRIEVE_ADDR_ALIGN(x) ((x) << 5)
+ /* Flags to control behaviour of the transaction. */
+ u32 flags;
+#define HANDLE_LOW_MASK GENMASK_ULL(31, 0)
+#define HANDLE_HIGH_MASK GENMASK_ULL(63, 32)
+#define HANDLE_LOW(x) ((u32)(FIELD_GET(HANDLE_LOW_MASK, (x))))
+#define HANDLE_HIGH(x) ((u32)(FIELD_GET(HANDLE_HIGH_MASK, (x))))
+
+#define PACK_HANDLE(l, h) \
+ (FIELD_PREP(HANDLE_LOW_MASK, (l)) | FIELD_PREP(HANDLE_HIGH_MASK, (h)))
+ /*
+ * A globally-unique ID assigned by the hypervisor for a region
+ * of memory being sent between VMs.
+ */
+ u64 handle;
+ /*
+ * An implementation defined value associated with the receiver and the
+ * memory region.
+ */
+ u64 tag;
+ /* Size of each endpoint memory access descriptor, MBZ pre v1.1 */
+ u32 ep_mem_size;
+ /*
+ * The number of `ffa_mem_region_attributes` entries included in this
+ * transaction.
+ */
+ u32 ep_count;
+ /*
+ * 16-byte aligned offset from the base address of this descriptor
+ * to the first element of the endpoint memory access descriptor array
+ * Valid only from v1.1
+ */
+ u32 ep_mem_offset;
+ /* MBZ, valid only from v1.1 */
+ u32 reserved[3];
+};
+
+#define CONSTITUENTS_OFFSET(x) \
+ (offsetof(struct ffa_composite_mem_region, constituents[x]))
+
+#define FFA_EMAD_HAS_IMPDEF_FIELD(version) ((version) >= FFA_VERSION_1_2)
+#define FFA_MEM_REGION_HAS_EP_MEM_OFFSET(version) ((version) > FFA_VERSION_1_0)
+
+static inline u32 ffa_emad_size_get(u32 ffa_version)
+{
+ u32 sz;
+ struct ffa_mem_region_attributes *ep_mem_access;
+
+ if (FFA_EMAD_HAS_IMPDEF_FIELD(ffa_version))
+ sz = sizeof(*ep_mem_access);
+ else
+ sz = sizeof(*ep_mem_access) - sizeof(ep_mem_access->impdef_val);
+
+ return sz;
+}
+
+static inline u32
+ffa_mem_desc_offset(struct ffa_mem_region *buf, int count, u32 ffa_version)
+{
+ u32 offset = count * ffa_emad_size_get(ffa_version);
+ /*
+ * Earlier to v1.1, the endpoint memory descriptor array started at
+ * offset 32(i.e. offset of ep_mem_offset in the current structure)
+ */
+ if (!FFA_MEM_REGION_HAS_EP_MEM_OFFSET(ffa_version))
+ offset += offsetof(struct ffa_mem_region, ep_mem_offset);
+ else
+ offset += sizeof(struct ffa_mem_region);
+
+ return offset;
+}
+
+struct ffa_mem_ops_args {
+ bool use_txbuf;
+ u32 nattrs;
+ u32 flags;
+ u64 tag;
+ u64 g_handle;
+ struct scatterlist *sg;
+ struct ffa_mem_region_attributes *attrs;
+};
+
+struct ffa_info_ops {
+ u32 (*api_version_get)(void);
+ int (*partition_info_get)(const char *uuid_str,
+ struct ffa_partition_info *buffer);
+};
+
+struct ffa_msg_ops {
+ void (*mode_32bit_set)(struct ffa_device *dev);
+ int (*sync_send_receive)(struct ffa_device *dev,
+ struct ffa_send_direct_data *data);
+ int (*indirect_send)(struct ffa_device *dev, void *buf, size_t sz);
+ int (*sync_send_receive2)(struct ffa_device *dev,
+ struct ffa_send_direct_data2 *data);
+};
+
+struct ffa_mem_ops {
+ int (*memory_reclaim)(u64 g_handle, u32 flags);
+ int (*memory_share)(struct ffa_mem_ops_args *args);
+ int (*memory_lend)(struct ffa_mem_ops_args *args);
+};
+
+struct ffa_cpu_ops {
+ int (*run)(struct ffa_device *dev, u16 vcpu);
+};
+
+typedef void (*ffa_sched_recv_cb)(u16 vcpu, bool is_per_vcpu, void *cb_data);
+typedef void (*ffa_notifier_cb)(int notify_id, void *cb_data);
+typedef void (*ffa_fwk_notifier_cb)(int notify_id, void *cb_data, void *buf);
+
+struct ffa_notifier_ops {
+ int (*sched_recv_cb_register)(struct ffa_device *dev,
+ ffa_sched_recv_cb cb, void *cb_data);
+ int (*sched_recv_cb_unregister)(struct ffa_device *dev);
+ int (*notify_request)(struct ffa_device *dev, bool per_vcpu,
+ ffa_notifier_cb cb, void *cb_data, int notify_id);
+ int (*notify_relinquish)(struct ffa_device *dev, int notify_id);
+ int (*fwk_notify_request)(struct ffa_device *dev,
+ ffa_fwk_notifier_cb cb, void *cb_data,
+ int notify_id);
+ int (*fwk_notify_relinquish)(struct ffa_device *dev, int notify_id);
+ int (*notify_send)(struct ffa_device *dev, int notify_id, bool per_vcpu,
+ u16 vcpu);
+};
+
+struct ffa_ops {
+ const struct ffa_info_ops *info_ops;
+ const struct ffa_msg_ops *msg_ops;
+ const struct ffa_mem_ops *mem_ops;
+ const struct ffa_cpu_ops *cpu_ops;
+ const struct ffa_notifier_ops *notifier_ops;
+};
+
+#endif /* _LINUX_ARM_FFA_H */
diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h
new file mode 100644
index 000000000000..7f00c5285a32
--- /dev/null
+++ b/include/linux/arm_mpam.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2025 Arm Ltd. */
+
+#ifndef __LINUX_ARM_MPAM_H
+#define __LINUX_ARM_MPAM_H
+
+#include <linux/acpi.h>
+#include <linux/types.h>
+
+struct mpam_msc;
+
+enum mpam_msc_iface {
+ MPAM_IFACE_MMIO, /* a real MPAM MSC */
+ MPAM_IFACE_PCC, /* a fake MPAM MSC */
+};
+
+enum mpam_class_types {
+ MPAM_CLASS_CACHE, /* Caches, e.g. L2, L3 */
+ MPAM_CLASS_MEMORY, /* Main memory */
+ MPAM_CLASS_UNKNOWN, /* Everything else, e.g. SMMU */
+};
+
+#define MPAM_CLASS_ID_DEFAULT 255
+
+#ifdef CONFIG_ACPI_MPAM
+int acpi_mpam_parse_resources(struct mpam_msc *msc,
+ struct acpi_mpam_msc_node *tbl_msc);
+
+int acpi_mpam_count_msc(void);
+#else
+static inline int acpi_mpam_parse_resources(struct mpam_msc *msc,
+ struct acpi_mpam_msc_node *tbl_msc)
+{
+ return -EINVAL;
+}
+
+static inline int acpi_mpam_count_msc(void) { return -EINVAL; }
+#endif
+
+#ifdef CONFIG_ARM64_MPAM_DRIVER
+int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx,
+ enum mpam_class_types type, u8 class_id, int component_id);
+#else
+static inline int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx,
+ enum mpam_class_types type, u8 class_id,
+ int component_id)
+{
+ return -EINVAL;
+}
+#endif
+
+/**
+ * mpam_register_requestor() - Register a requestor with the MPAM driver
+ * @partid_max: The maximum PARTID value the requestor can generate.
+ * @pmg_max: The maximum PMG value the requestor can generate.
+ *
+ * Registers a requestor with the MPAM driver to ensure the chosen system-wide
+ * minimum PARTID and PMG values will allow the requestors features to be used.
+ *
+ * Returns an error if the registration is too late, and a larger PARTID/PMG
+ * value has been advertised to user-space. In this case the requestor should
+ * not use its MPAM features. Returns 0 on success.
+ */
+int mpam_register_requestor(u16 partid_max, u8 pmg_max);
+
+#endif /* __LINUX_ARM_MPAM_H */
diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h
index 0a241c5c911d..f652a5028b59 100644
--- a/include/linux/arm_sdei.h
+++ b/include/linux/arm_sdei.h
@@ -46,9 +46,13 @@ int sdei_unregister_ghes(struct ghes *ghes);
/* For use by arch code when CPU hotplug notifiers are not appropriate. */
int sdei_mask_local_cpu(void);
int sdei_unmask_local_cpu(void);
+void __init acpi_sdei_init(void);
+void sdei_handler_abort(void);
#else
static inline int sdei_mask_local_cpu(void) { return 0; }
static inline int sdei_unmask_local_cpu(void) { return 0; }
+static inline void acpi_sdei_init(void) { }
+static inline void sdei_handler_abort(void) { }
#endif /* CONFIG_ARM_SDE_INTERFACE */
diff --git a/include/linux/array_size.h b/include/linux/array_size.h
new file mode 100644
index 000000000000..06d7d83196ca
--- /dev/null
+++ b/include/linux/array_size.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ARRAY_SIZE_H
+#define _LINUX_ARRAY_SIZE_H
+
+#include <linux/compiler.h>
+
+/**
+ * ARRAY_SIZE - get the number of elements in array @arr
+ * @arr: array to be sized
+ */
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
+
+#endif /* _LINUX_ARRAY_SIZE_H */
diff --git a/include/linux/ascii85.h b/include/linux/ascii85.h
index 4cc40201273e..83ad775ad0aa 100644
--- a/include/linux/ascii85.h
+++ b/include/linux/ascii85.h
@@ -8,7 +8,8 @@
#ifndef _ASCII85_H_
#define _ASCII85_H_
-#include <linux/kernel.h>
+#include <linux/math.h>
+#include <linux/types.h>
#define ASCII85_BUFSZ 6
diff --git a/include/linux/asn1_decoder.h b/include/linux/asn1_decoder.h
index 83f9c6e1e5e9..b41bce82a191 100644
--- a/include/linux/asn1_decoder.h
+++ b/include/linux/asn1_decoder.h
@@ -9,6 +9,7 @@
#define _LINUX_ASN1_DECODER_H
#include <linux/asn1.h>
+#include <linux/types.h>
struct asn1_decoder;
diff --git a/include/linux/asn1_encoder.h b/include/linux/asn1_encoder.h
index 08cd0c2ad34f..d17484dffb74 100644
--- a/include/linux/asn1_encoder.h
+++ b/include/linux/asn1_encoder.h
@@ -6,7 +6,6 @@
#include <linux/types.h>
#include <linux/asn1.h>
#include <linux/asn1_ber_bytecode.h>
-#include <linux/bug.h>
#define asn1_oid_len(oid) (sizeof(oid)/sizeof(u32))
unsigned char *
diff --git a/include/linux/async.h b/include/linux/async.h
index cce4ad31e8fc..19b778d08600 100644
--- a/include/linux/async.h
+++ b/include/linux/async.h
@@ -90,6 +90,8 @@ async_schedule_dev(async_func_t func, struct device *dev)
return async_schedule_node(func, dev, dev_to_node(dev));
}
+bool async_schedule_dev_nocall(async_func_t func, struct device *dev);
+
/**
* async_schedule_dev_domain - A device specific version of async_schedule_domain
* @func: function to execute asynchronously
@@ -118,4 +120,5 @@ extern void async_synchronize_cookie(async_cookie_t cookie);
extern void async_synchronize_cookie_domain(async_cookie_t cookie,
struct async_domain *domain);
extern bool current_is_async(void);
+extern void async_init(void);
#endif
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index 5cc73d7e5b52..1ca9f9e05f4f 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -168,11 +168,6 @@ async_xor_offs(struct page *dest, unsigned int offset,
int src_cnt, size_t len, struct async_submit_ctl *submit);
struct dma_async_tx_descriptor *
-async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
- int src_cnt, size_t len, enum sum_check_flags *result,
- struct async_submit_ctl *submit);
-
-struct dma_async_tx_descriptor *
async_xor_val_offs(struct page *dest, unsigned int offset,
struct page **src_list, unsigned int *src_offset,
int src_cnt, size_t len, enum sum_check_flags *result,
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 6e67aded28f8..54b416e26995 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -13,10 +13,9 @@
#ifndef __LINUX_ATA_H__
#define __LINUX_ATA_H__
-#include <linux/kernel.h>
+#include <linux/bits.h>
#include <linux/string.h>
#include <linux/types.h>
-#include <asm/byteorder.h>
/* defines only for the constants which don't work well as enums */
#define ATA_DMA_BOUNDARY 0xffffUL
@@ -30,6 +29,7 @@ enum {
ATA_MAX_SECTORS_128 = 128,
ATA_MAX_SECTORS = 256,
ATA_MAX_SECTORS_1024 = 1024,
+ ATA_MAX_SECTORS_8191 = 8191,
ATA_MAX_SECTORS_LBA48 = 65535,/* avoid count to be 0000h */
ATA_MAX_SECTORS_TAPE = 65535,
ATA_MAX_TRIM_RNUM = 64, /* 512-byte payload / (6-byte LBA + 2-byte range per entry) */
@@ -323,14 +323,21 @@ enum {
ATA_LOG_SATA_NCQ = 0x10,
ATA_LOG_NCQ_NON_DATA = 0x12,
ATA_LOG_NCQ_SEND_RECV = 0x13,
+ ATA_LOG_CDL = 0x18,
+ ATA_LOG_CDL_SIZE = ATA_SECT_SIZE,
ATA_LOG_IDENTIFY_DEVICE = 0x30,
+ ATA_LOG_SENSE_NCQ = 0x0F,
+ ATA_LOG_SENSE_NCQ_SIZE = ATA_SECT_SIZE * 2,
+ ATA_LOG_CONCURRENT_POSITIONING_RANGES = 0x47,
/* Identify device log pages: */
+ ATA_LOG_SUPPORTED_CAPABILITIES = 0x03,
+ ATA_LOG_CURRENT_SETTINGS = 0x04,
ATA_LOG_SECURITY = 0x06,
ATA_LOG_SATA_SETTINGS = 0x08,
ATA_LOG_ZONED_INFORMATION = 0x09,
- /* Identify device SATA settings log:*/
+ /* Identify device SATA settings log: */
ATA_LOG_DEVSLP_OFFSET = 0x30,
ATA_LOG_DEVSLP_SIZE = 0x08,
ATA_LOG_DEVSLP_MDAT = 0x00,
@@ -415,6 +422,8 @@ enum {
SETFEATURES_SATA_ENABLE = 0x10, /* Enable use of SATA feature */
SETFEATURES_SATA_DISABLE = 0x90, /* Disable use of SATA feature */
+ SETFEATURES_CDL = 0x0d, /* Enable/disable cmd duration limits */
+
/* SETFEATURE Sector counts for SATA features */
SATA_FPDMA_OFFSET = 0x01, /* FPDMA non-zero buffer offsets */
SATA_FPDMA_AA = 0x02, /* FPDMA Setup FIS Auto-Activate */
@@ -425,6 +434,7 @@ enum {
SATA_DEVSLP = 0x09, /* Device Sleep */
SETFEATURE_SENSE_DATA = 0xC3, /* Sense Data Reporting feature */
+ SETFEATURE_SENSE_DATA_SUCC_NCQ = 0xC4, /* Sense Data for successful NCQ commands */
/* feature values for SET_MAX */
ATA_SET_MAX_ADDR = 0x00,
@@ -557,6 +567,7 @@ struct ata_bmdma_prd {
#define ata_id_has_ncq(id) ((id)[ATA_ID_SATA_CAPABILITY] & (1 << 8))
#define ata_id_queue_depth(id) (((id)[ATA_ID_QUEUE_DEPTH] & 0x1f) + 1)
#define ata_id_removable(id) ((id)[ATA_ID_CONFIG] & (1 << 7))
+#define ata_id_is_locked(id) (((id)[ATA_ID_DLF] & 0x7) == 0x7)
#define ata_id_has_atapi_AN(id) \
((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
@@ -565,6 +576,18 @@ struct ata_bmdma_prd {
((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
((id)[ATA_ID_FEATURE_SUPP] & (1 << 2)))
+#define ata_id_has_devslp(id) \
+ ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
+ ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
+ ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8)))
+#define ata_id_has_ncq_autosense(id) \
+ ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
+ ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
+ ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7)))
+#define ata_id_has_dipm(id) \
+ ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
+ ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
+ ((id)[ATA_ID_FEATURE_SUPP] & (1 << 3)))
#define ata_id_iordy_disable(id) ((id)[ATA_ID_CAPABILITY] & (1 << 10))
#define ata_id_has_iordy(id) ((id)[ATA_ID_CAPABILITY] & (1 << 11))
#define ata_id_u32(id,n) \
@@ -577,9 +600,6 @@ struct ata_bmdma_prd {
#define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20)
#define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4))
-#define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8))
-#define ata_id_has_ncq_autosense(id) \
- ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7))
static inline bool ata_id_has_hipm(const u16 *id)
{
@@ -591,17 +611,6 @@ static inline bool ata_id_has_hipm(const u16 *id)
return val & (1 << 9);
}
-static inline bool ata_id_has_dipm(const u16 *id)
-{
- u16 val = id[ATA_ID_FEATURE_SUPP];
-
- if (val == 0 || val == 0xffff)
- return false;
-
- return val & (1 << 3);
-}
-
-
static inline bool ata_id_has_fua(const u16 *id)
{
if ((id[ATA_ID_CFSSE] & 0xC000) != 0x4000)
@@ -616,15 +625,6 @@ static inline bool ata_id_has_flush(const u16 *id)
return id[ATA_ID_COMMAND_SET_2] & (1 << 12);
}
-static inline bool ata_id_flush_enabled(const u16 *id)
-{
- if (ata_id_has_flush(id) == 0)
- return false;
- if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000)
- return false;
- return id[ATA_ID_CFS_ENABLE_2] & (1 << 12);
-}
-
static inline bool ata_id_has_flush_ext(const u16 *id)
{
if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000)
@@ -632,19 +632,6 @@ static inline bool ata_id_has_flush_ext(const u16 *id)
return id[ATA_ID_COMMAND_SET_2] & (1 << 13);
}
-static inline bool ata_id_flush_ext_enabled(const u16 *id)
-{
- if (ata_id_has_flush_ext(id) == 0)
- return false;
- if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000)
- return false;
- /*
- * some Maxtor disks have bit 13 defined incorrectly
- * so check bit 10 too
- */
- return (id[ATA_ID_CFS_ENABLE_2] & 0x2400) == 0x2400;
-}
-
static inline u32 ata_id_logical_sector_size(const u16 *id)
{
/* T13/1699-D Revision 6a, Sep 6, 2008. Page 128.
@@ -699,15 +686,6 @@ static inline bool ata_id_has_lba48(const u16 *id)
return id[ATA_ID_COMMAND_SET_2] & (1 << 10);
}
-static inline bool ata_id_lba48_enabled(const u16 *id)
-{
- if (ata_id_has_lba48(id) == 0)
- return false;
- if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000)
- return false;
- return id[ATA_ID_CFS_ENABLE_2] & (1 << 10);
-}
-
static inline bool ata_id_hpa_enabled(const u16 *id)
{
/* Yes children, word 83 valid bits cover word 82 data */
@@ -770,16 +748,21 @@ static inline bool ata_id_has_read_log_dma_ext(const u16 *id)
static inline bool ata_id_has_sense_reporting(const u16 *id)
{
- if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
+ if (!(id[ATA_ID_CFS_ENABLE_2] & BIT(15)))
+ return false;
+ if ((id[ATA_ID_COMMAND_SET_3] & (BIT(15) | BIT(14))) != BIT(14))
return false;
- return id[ATA_ID_COMMAND_SET_3] & (1 << 6);
+ return id[ATA_ID_COMMAND_SET_3] & BIT(6);
}
static inline bool ata_id_sense_reporting_enabled(const u16 *id)
{
- if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
+ if (!ata_id_has_sense_reporting(id))
+ return false;
+ /* ata_id_has_sense_reporting() == true, word 86 must have bit 15 set */
+ if ((id[ATA_ID_COMMAND_SET_4] & (BIT(15) | BIT(14))) != BIT(14))
return false;
- return id[ATA_ID_COMMAND_SET_4] & (1 << 6);
+ return id[ATA_ID_COMMAND_SET_4] & BIT(6);
}
/**
@@ -1044,76 +1027,6 @@ static inline bool atapi_id_dmadir(const u16 *dev_id)
return ata_id_major_version(dev_id) >= 7 && (dev_id[62] & 0x8000);
}
-/*
- * ata_id_is_lba_capacity_ok() performs a sanity check on
- * the claimed LBA capacity value for the device.
- *
- * Returns 1 if LBA capacity looks sensible, 0 otherwise.
- *
- * It is called only once for each device.
- */
-static inline bool ata_id_is_lba_capacity_ok(u16 *id)
-{
- unsigned long lba_sects, chs_sects, head, tail;
-
- /* No non-LBA info .. so valid! */
- if (id[ATA_ID_CYLS] == 0)
- return true;
-
- lba_sects = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
-
- /*
- * The ATA spec tells large drives to return
- * C/H/S = 16383/16/63 independent of their size.
- * Some drives can be jumpered to use 15 heads instead of 16.
- * Some drives can be jumpered to use 4092 cyls instead of 16383.
- */
- if ((id[ATA_ID_CYLS] == 16383 ||
- (id[ATA_ID_CYLS] == 4092 && id[ATA_ID_CUR_CYLS] == 16383)) &&
- id[ATA_ID_SECTORS] == 63 &&
- (id[ATA_ID_HEADS] == 15 || id[ATA_ID_HEADS] == 16) &&
- (lba_sects >= 16383 * 63 * id[ATA_ID_HEADS]))
- return true;
-
- chs_sects = id[ATA_ID_CYLS] * id[ATA_ID_HEADS] * id[ATA_ID_SECTORS];
-
- /* perform a rough sanity check on lba_sects: within 10% is OK */
- if (lba_sects - chs_sects < chs_sects/10)
- return true;
-
- /* some drives have the word order reversed */
- head = (lba_sects >> 16) & 0xffff;
- tail = lba_sects & 0xffff;
- lba_sects = head | (tail << 16);
-
- if (lba_sects - chs_sects < chs_sects/10) {
- *(__le32 *)&id[ATA_ID_LBA_CAPACITY] = __cpu_to_le32(lba_sects);
- return true; /* LBA capacity is (now) good */
- }
-
- return false; /* LBA capacity value may be bad */
-}
-
-static inline void ata_id_to_hd_driveid(u16 *id)
-{
-#ifdef __BIG_ENDIAN
- /* accessed in struct hd_driveid as 8-bit values */
- id[ATA_ID_MAX_MULTSECT] = __cpu_to_le16(id[ATA_ID_MAX_MULTSECT]);
- id[ATA_ID_CAPABILITY] = __cpu_to_le16(id[ATA_ID_CAPABILITY]);
- id[ATA_ID_OLD_PIO_MODES] = __cpu_to_le16(id[ATA_ID_OLD_PIO_MODES]);
- id[ATA_ID_OLD_DMA_MODES] = __cpu_to_le16(id[ATA_ID_OLD_DMA_MODES]);
- id[ATA_ID_MULTSECT] = __cpu_to_le16(id[ATA_ID_MULTSECT]);
-
- /* as 32-bit values */
- *(u32 *)&id[ATA_ID_LBA_CAPACITY] = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
- *(u32 *)&id[ATA_ID_SPG] = ata_id_u32(id, ATA_ID_SPG);
-
- /* as 64-bit value */
- *(u64 *)&id[ATA_ID_LBA_CAPACITY_2] =
- ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
-#endif
-}
-
static inline bool ata_ok(u8 status)
{
return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR))
diff --git a/include/linux/ata_platform.h b/include/linux/ata_platform.h
index 9cafec92282d..b9745cc08e38 100644
--- a/include/linux/ata_platform.h
+++ b/include/linux/ata_platform.h
@@ -19,7 +19,7 @@ extern int __pata_platform_probe(struct device *dev,
struct resource *irq_res,
unsigned int ioport_shift,
int __pio_mask,
- struct scsi_host_template *sht,
+ const struct scsi_host_template *sht,
bool use16bit);
/*
diff --git a/include/linux/atalk.h b/include/linux/atalk.h
index f6034ba774be..a55bfc6567d0 100644
--- a/include/linux/atalk.h
+++ b/include/linux/atalk.h
@@ -113,7 +113,7 @@ extern int aarp_proto_init(void);
/* Inter module exports */
/* Give a device find its atif control structure */
-#if IS_ENABLED(CONFIG_IRDA) || IS_ENABLED(CONFIG_ATALK)
+#if IS_ENABLED(CONFIG_ATALK)
static inline struct atalk_iface *atalk_find_dev(struct net_device *dev)
{
return dev->atalk_ptr;
diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h
deleted file mode 100644
index 76860a461ed2..000000000000
--- a/include/linux/ath9k_platform.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2008 Atheros Communications Inc.
- * Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _LINUX_ATH9K_PLATFORM_H
-#define _LINUX_ATH9K_PLATFORM_H
-
-#define ATH9K_PLAT_EEP_MAX_WORDS 2048
-
-struct ath9k_platform_data {
- const char *eeprom_name;
-
- u16 eeprom_data[ATH9K_PLAT_EEP_MAX_WORDS];
- u8 *macaddr;
-
- int led_pin;
- u32 gpio_mask;
- u32 gpio_val;
-
- u32 bt_active_pin;
- u32 bt_priority_pin;
- u32 wlan_active_pin;
-
- bool endian_check;
- bool is_clk_25mhz;
- bool tx_gain_buffalo;
- bool disable_2ghz;
- bool disable_5ghz;
- bool led_active_high;
-
- int (*get_mac_revision)(void);
- int (*external_reset)(void);
-
- bool use_eeprom;
-};
-
-#endif /* _LINUX_ATH9K_PLATFORM_H */
diff --git a/include/linux/atm_tcp.h b/include/linux/atm_tcp.h
index c8ecf6f68fb5..2558439d849b 100644
--- a/include/linux/atm_tcp.h
+++ b/include/linux/atm_tcp.h
@@ -9,6 +9,8 @@
#include <uapi/linux/atm_tcp.h>
+struct atm_vcc;
+struct module;
struct atm_tcp_ops {
int (*attach)(struct atm_vcc *vcc,int itf);
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
index 9b02961d65ee..70807c679f1a 100644
--- a/include/linux/atmdev.h
+++ b/include/linux/atmdev.h
@@ -185,6 +185,7 @@ struct atmdev_ops { /* only send is required */
int (*compat_ioctl)(struct atm_dev *dev,unsigned int cmd,
void __user *arg);
#endif
+ int (*pre_send)(struct atm_vcc *vcc, struct sk_buff *skb);
int (*send)(struct atm_vcc *vcc,struct sk_buff *skb);
int (*send_bh)(struct atm_vcc *vcc, struct sk_buff *skb);
int (*send_oam)(struct atm_vcc *vcc,void *cell,int flags);
@@ -249,6 +250,12 @@ static inline void atm_account_tx(struct atm_vcc *vcc, struct sk_buff *skb)
ATM_SKB(skb)->atm_options = vcc->atm_options;
}
+static inline void atm_return_tx(struct atm_vcc *vcc, struct sk_buff *skb)
+{
+ WARN_ON_ONCE(refcount_sub_and_test(ATM_SKB(skb)->acct_truesize,
+ &sk_atm(vcc)->sk_wmem_alloc));
+}
+
static inline void atm_force_charge(struct atm_vcc *vcc,int truesize)
{
atomic_add(truesize, &sk_atm(vcc)->sk_rmem_alloc);
diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h
deleted file mode 100644
index 1491af38cc6e..000000000000
--- a/include/linux/atmel-mci.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_ATMEL_MCI_H
-#define __LINUX_ATMEL_MCI_H
-
-#include <linux/types.h>
-#include <linux/dmaengine.h>
-
-#define ATMCI_MAX_NR_SLOTS 2
-
-/**
- * struct mci_slot_pdata - board-specific per-slot configuration
- * @bus_width: Number of data lines wired up the slot
- * @detect_pin: GPIO pin wired to the card detect switch
- * @wp_pin: GPIO pin wired to the write protect sensor
- * @detect_is_active_high: The state of the detect pin when it is active
- * @non_removable: The slot is not removable, only detect once
- *
- * If a given slot is not present on the board, @bus_width should be
- * set to 0. The other fields are ignored in this case.
- *
- * Any pins that aren't available should be set to a negative value.
- *
- * Note that support for multiple slots is experimental -- some cards
- * might get upset if we don't get the clock management exactly right.
- * But in most cases, it should work just fine.
- */
-struct mci_slot_pdata {
- unsigned int bus_width;
- int detect_pin;
- int wp_pin;
- bool detect_is_active_high;
- bool non_removable;
-};
-
-/**
- * struct mci_platform_data - board-specific MMC/SDcard configuration
- * @dma_slave: DMA slave interface to use in data transfers.
- * @slot: Per-slot configuration data.
- */
-struct mci_platform_data {
- void *dma_slave;
- dma_filter_fn dma_filter;
- struct mci_slot_pdata slot[ATMCI_MAX_NR_SLOTS];
-};
-
-#endif /* __LINUX_ATMEL_MCI_H */
diff --git a/include/linux/atomic-arch-fallback.h b/include/linux/atomic-arch-fallback.h
deleted file mode 100644
index a3dba31df01e..000000000000
--- a/include/linux/atomic-arch-fallback.h
+++ /dev/null
@@ -1,2361 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-// Generated by scripts/atomic/gen-atomic-fallback.sh
-// DO NOT MODIFY THIS FILE DIRECTLY
-
-#ifndef _LINUX_ATOMIC_FALLBACK_H
-#define _LINUX_ATOMIC_FALLBACK_H
-
-#include <linux/compiler.h>
-
-#ifndef arch_xchg_relaxed
-#define arch_xchg_acquire arch_xchg
-#define arch_xchg_release arch_xchg
-#define arch_xchg_relaxed arch_xchg
-#else /* arch_xchg_relaxed */
-
-#ifndef arch_xchg_acquire
-#define arch_xchg_acquire(...) \
- __atomic_op_acquire(arch_xchg, __VA_ARGS__)
-#endif
-
-#ifndef arch_xchg_release
-#define arch_xchg_release(...) \
- __atomic_op_release(arch_xchg, __VA_ARGS__)
-#endif
-
-#ifndef arch_xchg
-#define arch_xchg(...) \
- __atomic_op_fence(arch_xchg, __VA_ARGS__)
-#endif
-
-#endif /* arch_xchg_relaxed */
-
-#ifndef arch_cmpxchg_relaxed
-#define arch_cmpxchg_acquire arch_cmpxchg
-#define arch_cmpxchg_release arch_cmpxchg
-#define arch_cmpxchg_relaxed arch_cmpxchg
-#else /* arch_cmpxchg_relaxed */
-
-#ifndef arch_cmpxchg_acquire
-#define arch_cmpxchg_acquire(...) \
- __atomic_op_acquire(arch_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef arch_cmpxchg_release
-#define arch_cmpxchg_release(...) \
- __atomic_op_release(arch_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef arch_cmpxchg
-#define arch_cmpxchg(...) \
- __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
-#endif
-
-#endif /* arch_cmpxchg_relaxed */
-
-#ifndef arch_cmpxchg64_relaxed
-#define arch_cmpxchg64_acquire arch_cmpxchg64
-#define arch_cmpxchg64_release arch_cmpxchg64
-#define arch_cmpxchg64_relaxed arch_cmpxchg64
-#else /* arch_cmpxchg64_relaxed */
-
-#ifndef arch_cmpxchg64_acquire
-#define arch_cmpxchg64_acquire(...) \
- __atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__)
-#endif
-
-#ifndef arch_cmpxchg64_release
-#define arch_cmpxchg64_release(...) \
- __atomic_op_release(arch_cmpxchg64, __VA_ARGS__)
-#endif
-
-#ifndef arch_cmpxchg64
-#define arch_cmpxchg64(...) \
- __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__)
-#endif
-
-#endif /* arch_cmpxchg64_relaxed */
-
-#ifndef arch_try_cmpxchg_relaxed
-#ifdef arch_try_cmpxchg
-#define arch_try_cmpxchg_acquire arch_try_cmpxchg
-#define arch_try_cmpxchg_release arch_try_cmpxchg
-#define arch_try_cmpxchg_relaxed arch_try_cmpxchg
-#endif /* arch_try_cmpxchg */
-
-#ifndef arch_try_cmpxchg
-#define arch_try_cmpxchg(_ptr, _oldp, _new) \
-({ \
- typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = arch_cmpxchg((_ptr), ___o, (_new)); \
- if (unlikely(___r != ___o)) \
- *___op = ___r; \
- likely(___r == ___o); \
-})
-#endif /* arch_try_cmpxchg */
-
-#ifndef arch_try_cmpxchg_acquire
-#define arch_try_cmpxchg_acquire(_ptr, _oldp, _new) \
-({ \
- typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = arch_cmpxchg_acquire((_ptr), ___o, (_new)); \
- if (unlikely(___r != ___o)) \
- *___op = ___r; \
- likely(___r == ___o); \
-})
-#endif /* arch_try_cmpxchg_acquire */
-
-#ifndef arch_try_cmpxchg_release
-#define arch_try_cmpxchg_release(_ptr, _oldp, _new) \
-({ \
- typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = arch_cmpxchg_release((_ptr), ___o, (_new)); \
- if (unlikely(___r != ___o)) \
- *___op = ___r; \
- likely(___r == ___o); \
-})
-#endif /* arch_try_cmpxchg_release */
-
-#ifndef arch_try_cmpxchg_relaxed
-#define arch_try_cmpxchg_relaxed(_ptr, _oldp, _new) \
-({ \
- typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = arch_cmpxchg_relaxed((_ptr), ___o, (_new)); \
- if (unlikely(___r != ___o)) \
- *___op = ___r; \
- likely(___r == ___o); \
-})
-#endif /* arch_try_cmpxchg_relaxed */
-
-#else /* arch_try_cmpxchg_relaxed */
-
-#ifndef arch_try_cmpxchg_acquire
-#define arch_try_cmpxchg_acquire(...) \
- __atomic_op_acquire(arch_try_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef arch_try_cmpxchg_release
-#define arch_try_cmpxchg_release(...) \
- __atomic_op_release(arch_try_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef arch_try_cmpxchg
-#define arch_try_cmpxchg(...) \
- __atomic_op_fence(arch_try_cmpxchg, __VA_ARGS__)
-#endif
-
-#endif /* arch_try_cmpxchg_relaxed */
-
-#ifndef arch_atomic_read_acquire
-static __always_inline int
-arch_atomic_read_acquire(const atomic_t *v)
-{
- return smp_load_acquire(&(v)->counter);
-}
-#define arch_atomic_read_acquire arch_atomic_read_acquire
-#endif
-
-#ifndef arch_atomic_set_release
-static __always_inline void
-arch_atomic_set_release(atomic_t *v, int i)
-{
- smp_store_release(&(v)->counter, i);
-}
-#define arch_atomic_set_release arch_atomic_set_release
-#endif
-
-#ifndef arch_atomic_add_return_relaxed
-#define arch_atomic_add_return_acquire arch_atomic_add_return
-#define arch_atomic_add_return_release arch_atomic_add_return
-#define arch_atomic_add_return_relaxed arch_atomic_add_return
-#else /* arch_atomic_add_return_relaxed */
-
-#ifndef arch_atomic_add_return_acquire
-static __always_inline int
-arch_atomic_add_return_acquire(int i, atomic_t *v)
-{
- int ret = arch_atomic_add_return_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic_add_return_acquire arch_atomic_add_return_acquire
-#endif
-
-#ifndef arch_atomic_add_return_release
-static __always_inline int
-arch_atomic_add_return_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return arch_atomic_add_return_relaxed(i, v);
-}
-#define arch_atomic_add_return_release arch_atomic_add_return_release
-#endif
-
-#ifndef arch_atomic_add_return
-static __always_inline int
-arch_atomic_add_return(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_add_return_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic_add_return arch_atomic_add_return
-#endif
-
-#endif /* arch_atomic_add_return_relaxed */
-
-#ifndef arch_atomic_fetch_add_relaxed
-#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add
-#define arch_atomic_fetch_add_release arch_atomic_fetch_add
-#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add
-#else /* arch_atomic_fetch_add_relaxed */
-
-#ifndef arch_atomic_fetch_add_acquire
-static __always_inline int
-arch_atomic_fetch_add_acquire(int i, atomic_t *v)
-{
- int ret = arch_atomic_fetch_add_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add_acquire
-#endif
-
-#ifndef arch_atomic_fetch_add_release
-static __always_inline int
-arch_atomic_fetch_add_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return arch_atomic_fetch_add_relaxed(i, v);
-}
-#define arch_atomic_fetch_add_release arch_atomic_fetch_add_release
-#endif
-
-#ifndef arch_atomic_fetch_add
-static __always_inline int
-arch_atomic_fetch_add(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_fetch_add_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic_fetch_add arch_atomic_fetch_add
-#endif
-
-#endif /* arch_atomic_fetch_add_relaxed */
-
-#ifndef arch_atomic_sub_return_relaxed
-#define arch_atomic_sub_return_acquire arch_atomic_sub_return
-#define arch_atomic_sub_return_release arch_atomic_sub_return
-#define arch_atomic_sub_return_relaxed arch_atomic_sub_return
-#else /* arch_atomic_sub_return_relaxed */
-
-#ifndef arch_atomic_sub_return_acquire
-static __always_inline int
-arch_atomic_sub_return_acquire(int i, atomic_t *v)
-{
- int ret = arch_atomic_sub_return_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic_sub_return_acquire arch_atomic_sub_return_acquire
-#endif
-
-#ifndef arch_atomic_sub_return_release
-static __always_inline int
-arch_atomic_sub_return_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return arch_atomic_sub_return_relaxed(i, v);
-}
-#define arch_atomic_sub_return_release arch_atomic_sub_return_release
-#endif
-
-#ifndef arch_atomic_sub_return
-static __always_inline int
-arch_atomic_sub_return(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_sub_return_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic_sub_return arch_atomic_sub_return
-#endif
-
-#endif /* arch_atomic_sub_return_relaxed */
-
-#ifndef arch_atomic_fetch_sub_relaxed
-#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub
-#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub
-#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub
-#else /* arch_atomic_fetch_sub_relaxed */
-
-#ifndef arch_atomic_fetch_sub_acquire
-static __always_inline int
-arch_atomic_fetch_sub_acquire(int i, atomic_t *v)
-{
- int ret = arch_atomic_fetch_sub_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub_acquire
-#endif
-
-#ifndef arch_atomic_fetch_sub_release
-static __always_inline int
-arch_atomic_fetch_sub_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return arch_atomic_fetch_sub_relaxed(i, v);
-}
-#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub_release
-#endif
-
-#ifndef arch_atomic_fetch_sub
-static __always_inline int
-arch_atomic_fetch_sub(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_fetch_sub_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic_fetch_sub arch_atomic_fetch_sub
-#endif
-
-#endif /* arch_atomic_fetch_sub_relaxed */
-
-#ifndef arch_atomic_inc
-static __always_inline void
-arch_atomic_inc(atomic_t *v)
-{
- arch_atomic_add(1, v);
-}
-#define arch_atomic_inc arch_atomic_inc
-#endif
-
-#ifndef arch_atomic_inc_return_relaxed
-#ifdef arch_atomic_inc_return
-#define arch_atomic_inc_return_acquire arch_atomic_inc_return
-#define arch_atomic_inc_return_release arch_atomic_inc_return
-#define arch_atomic_inc_return_relaxed arch_atomic_inc_return
-#endif /* arch_atomic_inc_return */
-
-#ifndef arch_atomic_inc_return
-static __always_inline int
-arch_atomic_inc_return(atomic_t *v)
-{
- return arch_atomic_add_return(1, v);
-}
-#define arch_atomic_inc_return arch_atomic_inc_return
-#endif
-
-#ifndef arch_atomic_inc_return_acquire
-static __always_inline int
-arch_atomic_inc_return_acquire(atomic_t *v)
-{
- return arch_atomic_add_return_acquire(1, v);
-}
-#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire
-#endif
-
-#ifndef arch_atomic_inc_return_release
-static __always_inline int
-arch_atomic_inc_return_release(atomic_t *v)
-{
- return arch_atomic_add_return_release(1, v);
-}
-#define arch_atomic_inc_return_release arch_atomic_inc_return_release
-#endif
-
-#ifndef arch_atomic_inc_return_relaxed
-static __always_inline int
-arch_atomic_inc_return_relaxed(atomic_t *v)
-{
- return arch_atomic_add_return_relaxed(1, v);
-}
-#define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed
-#endif
-
-#else /* arch_atomic_inc_return_relaxed */
-
-#ifndef arch_atomic_inc_return_acquire
-static __always_inline int
-arch_atomic_inc_return_acquire(atomic_t *v)
-{
- int ret = arch_atomic_inc_return_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire
-#endif
-
-#ifndef arch_atomic_inc_return_release
-static __always_inline int
-arch_atomic_inc_return_release(atomic_t *v)
-{
- __atomic_release_fence();
- return arch_atomic_inc_return_relaxed(v);
-}
-#define arch_atomic_inc_return_release arch_atomic_inc_return_release
-#endif
-
-#ifndef arch_atomic_inc_return
-static __always_inline int
-arch_atomic_inc_return(atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_inc_return_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic_inc_return arch_atomic_inc_return
-#endif
-
-#endif /* arch_atomic_inc_return_relaxed */
-
-#ifndef arch_atomic_fetch_inc_relaxed
-#ifdef arch_atomic_fetch_inc
-#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc
-#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc
-#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc
-#endif /* arch_atomic_fetch_inc */
-
-#ifndef arch_atomic_fetch_inc
-static __always_inline int
-arch_atomic_fetch_inc(atomic_t *v)
-{
- return arch_atomic_fetch_add(1, v);
-}
-#define arch_atomic_fetch_inc arch_atomic_fetch_inc
-#endif
-
-#ifndef arch_atomic_fetch_inc_acquire
-static __always_inline int
-arch_atomic_fetch_inc_acquire(atomic_t *v)
-{
- return arch_atomic_fetch_add_acquire(1, v);
-}
-#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire
-#endif
-
-#ifndef arch_atomic_fetch_inc_release
-static __always_inline int
-arch_atomic_fetch_inc_release(atomic_t *v)
-{
- return arch_atomic_fetch_add_release(1, v);
-}
-#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release
-#endif
-
-#ifndef arch_atomic_fetch_inc_relaxed
-static __always_inline int
-arch_atomic_fetch_inc_relaxed(atomic_t *v)
-{
- return arch_atomic_fetch_add_relaxed(1, v);
-}
-#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc_relaxed
-#endif
-
-#else /* arch_atomic_fetch_inc_relaxed */
-
-#ifndef arch_atomic_fetch_inc_acquire
-static __always_inline int
-arch_atomic_fetch_inc_acquire(atomic_t *v)
-{
- int ret = arch_atomic_fetch_inc_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire
-#endif
-
-#ifndef arch_atomic_fetch_inc_release
-static __always_inline int
-arch_atomic_fetch_inc_release(atomic_t *v)
-{
- __atomic_release_fence();
- return arch_atomic_fetch_inc_relaxed(v);
-}
-#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release
-#endif
-
-#ifndef arch_atomic_fetch_inc
-static __always_inline int
-arch_atomic_fetch_inc(atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_fetch_inc_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic_fetch_inc arch_atomic_fetch_inc
-#endif
-
-#endif /* arch_atomic_fetch_inc_relaxed */
-
-#ifndef arch_atomic_dec
-static __always_inline void
-arch_atomic_dec(atomic_t *v)
-{
- arch_atomic_sub(1, v);
-}
-#define arch_atomic_dec arch_atomic_dec
-#endif
-
-#ifndef arch_atomic_dec_return_relaxed
-#ifdef arch_atomic_dec_return
-#define arch_atomic_dec_return_acquire arch_atomic_dec_return
-#define arch_atomic_dec_return_release arch_atomic_dec_return
-#define arch_atomic_dec_return_relaxed arch_atomic_dec_return
-#endif /* arch_atomic_dec_return */
-
-#ifndef arch_atomic_dec_return
-static __always_inline int
-arch_atomic_dec_return(atomic_t *v)
-{
- return arch_atomic_sub_return(1, v);
-}
-#define arch_atomic_dec_return arch_atomic_dec_return
-#endif
-
-#ifndef arch_atomic_dec_return_acquire
-static __always_inline int
-arch_atomic_dec_return_acquire(atomic_t *v)
-{
- return arch_atomic_sub_return_acquire(1, v);
-}
-#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire
-#endif
-
-#ifndef arch_atomic_dec_return_release
-static __always_inline int
-arch_atomic_dec_return_release(atomic_t *v)
-{
- return arch_atomic_sub_return_release(1, v);
-}
-#define arch_atomic_dec_return_release arch_atomic_dec_return_release
-#endif
-
-#ifndef arch_atomic_dec_return_relaxed
-static __always_inline int
-arch_atomic_dec_return_relaxed(atomic_t *v)
-{
- return arch_atomic_sub_return_relaxed(1, v);
-}
-#define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed
-#endif
-
-#else /* arch_atomic_dec_return_relaxed */
-
-#ifndef arch_atomic_dec_return_acquire
-static __always_inline int
-arch_atomic_dec_return_acquire(atomic_t *v)
-{
- int ret = arch_atomic_dec_return_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire
-#endif
-
-#ifndef arch_atomic_dec_return_release
-static __always_inline int
-arch_atomic_dec_return_release(atomic_t *v)
-{
- __atomic_release_fence();
- return arch_atomic_dec_return_relaxed(v);
-}
-#define arch_atomic_dec_return_release arch_atomic_dec_return_release
-#endif
-
-#ifndef arch_atomic_dec_return
-static __always_inline int
-arch_atomic_dec_return(atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_dec_return_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic_dec_return arch_atomic_dec_return
-#endif
-
-#endif /* arch_atomic_dec_return_relaxed */
-
-#ifndef arch_atomic_fetch_dec_relaxed
-#ifdef arch_atomic_fetch_dec
-#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec
-#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec
-#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec
-#endif /* arch_atomic_fetch_dec */
-
-#ifndef arch_atomic_fetch_dec
-static __always_inline int
-arch_atomic_fetch_dec(atomic_t *v)
-{
- return arch_atomic_fetch_sub(1, v);
-}
-#define arch_atomic_fetch_dec arch_atomic_fetch_dec
-#endif
-
-#ifndef arch_atomic_fetch_dec_acquire
-static __always_inline int
-arch_atomic_fetch_dec_acquire(atomic_t *v)
-{
- return arch_atomic_fetch_sub_acquire(1, v);
-}
-#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire
-#endif
-
-#ifndef arch_atomic_fetch_dec_release
-static __always_inline int
-arch_atomic_fetch_dec_release(atomic_t *v)
-{
- return arch_atomic_fetch_sub_release(1, v);
-}
-#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release
-#endif
-
-#ifndef arch_atomic_fetch_dec_relaxed
-static __always_inline int
-arch_atomic_fetch_dec_relaxed(atomic_t *v)
-{
- return arch_atomic_fetch_sub_relaxed(1, v);
-}
-#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec_relaxed
-#endif
-
-#else /* arch_atomic_fetch_dec_relaxed */
-
-#ifndef arch_atomic_fetch_dec_acquire
-static __always_inline int
-arch_atomic_fetch_dec_acquire(atomic_t *v)
-{
- int ret = arch_atomic_fetch_dec_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire
-#endif
-
-#ifndef arch_atomic_fetch_dec_release
-static __always_inline int
-arch_atomic_fetch_dec_release(atomic_t *v)
-{
- __atomic_release_fence();
- return arch_atomic_fetch_dec_relaxed(v);
-}
-#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release
-#endif
-
-#ifndef arch_atomic_fetch_dec
-static __always_inline int
-arch_atomic_fetch_dec(atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_fetch_dec_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic_fetch_dec arch_atomic_fetch_dec
-#endif
-
-#endif /* arch_atomic_fetch_dec_relaxed */
-
-#ifndef arch_atomic_fetch_and_relaxed
-#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and
-#define arch_atomic_fetch_and_release arch_atomic_fetch_and
-#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and
-#else /* arch_atomic_fetch_and_relaxed */
-
-#ifndef arch_atomic_fetch_and_acquire
-static __always_inline int
-arch_atomic_fetch_and_acquire(int i, atomic_t *v)
-{
- int ret = arch_atomic_fetch_and_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and_acquire
-#endif
-
-#ifndef arch_atomic_fetch_and_release
-static __always_inline int
-arch_atomic_fetch_and_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return arch_atomic_fetch_and_relaxed(i, v);
-}
-#define arch_atomic_fetch_and_release arch_atomic_fetch_and_release
-#endif
-
-#ifndef arch_atomic_fetch_and
-static __always_inline int
-arch_atomic_fetch_and(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_fetch_and_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic_fetch_and arch_atomic_fetch_and
-#endif
-
-#endif /* arch_atomic_fetch_and_relaxed */
-
-#ifndef arch_atomic_andnot
-static __always_inline void
-arch_atomic_andnot(int i, atomic_t *v)
-{
- arch_atomic_and(~i, v);
-}
-#define arch_atomic_andnot arch_atomic_andnot
-#endif
-
-#ifndef arch_atomic_fetch_andnot_relaxed
-#ifdef arch_atomic_fetch_andnot
-#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot
-#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot
-#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot
-#endif /* arch_atomic_fetch_andnot */
-
-#ifndef arch_atomic_fetch_andnot
-static __always_inline int
-arch_atomic_fetch_andnot(int i, atomic_t *v)
-{
- return arch_atomic_fetch_and(~i, v);
-}
-#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
-#endif
-
-#ifndef arch_atomic_fetch_andnot_acquire
-static __always_inline int
-arch_atomic_fetch_andnot_acquire(int i, atomic_t *v)
-{
- return arch_atomic_fetch_and_acquire(~i, v);
-}
-#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
-#endif
-
-#ifndef arch_atomic_fetch_andnot_release
-static __always_inline int
-arch_atomic_fetch_andnot_release(int i, atomic_t *v)
-{
- return arch_atomic_fetch_and_release(~i, v);
-}
-#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release
-#endif
-
-#ifndef arch_atomic_fetch_andnot_relaxed
-static __always_inline int
-arch_atomic_fetch_andnot_relaxed(int i, atomic_t *v)
-{
- return arch_atomic_fetch_and_relaxed(~i, v);
-}
-#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
-#endif
-
-#else /* arch_atomic_fetch_andnot_relaxed */
-
-#ifndef arch_atomic_fetch_andnot_acquire
-static __always_inline int
-arch_atomic_fetch_andnot_acquire(int i, atomic_t *v)
-{
- int ret = arch_atomic_fetch_andnot_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
-#endif
-
-#ifndef arch_atomic_fetch_andnot_release
-static __always_inline int
-arch_atomic_fetch_andnot_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return arch_atomic_fetch_andnot_relaxed(i, v);
-}
-#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release
-#endif
-
-#ifndef arch_atomic_fetch_andnot
-static __always_inline int
-arch_atomic_fetch_andnot(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_fetch_andnot_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
-#endif
-
-#endif /* arch_atomic_fetch_andnot_relaxed */
-
-#ifndef arch_atomic_fetch_or_relaxed
-#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or
-#define arch_atomic_fetch_or_release arch_atomic_fetch_or
-#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or
-#else /* arch_atomic_fetch_or_relaxed */
-
-#ifndef arch_atomic_fetch_or_acquire
-static __always_inline int
-arch_atomic_fetch_or_acquire(int i, atomic_t *v)
-{
- int ret = arch_atomic_fetch_or_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or_acquire
-#endif
-
-#ifndef arch_atomic_fetch_or_release
-static __always_inline int
-arch_atomic_fetch_or_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return arch_atomic_fetch_or_relaxed(i, v);
-}
-#define arch_atomic_fetch_or_release arch_atomic_fetch_or_release
-#endif
-
-#ifndef arch_atomic_fetch_or
-static __always_inline int
-arch_atomic_fetch_or(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_fetch_or_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic_fetch_or arch_atomic_fetch_or
-#endif
-
-#endif /* arch_atomic_fetch_or_relaxed */
-
-#ifndef arch_atomic_fetch_xor_relaxed
-#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor
-#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor
-#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor
-#else /* arch_atomic_fetch_xor_relaxed */
-
-#ifndef arch_atomic_fetch_xor_acquire
-static __always_inline int
-arch_atomic_fetch_xor_acquire(int i, atomic_t *v)
-{
- int ret = arch_atomic_fetch_xor_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor_acquire
-#endif
-
-#ifndef arch_atomic_fetch_xor_release
-static __always_inline int
-arch_atomic_fetch_xor_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return arch_atomic_fetch_xor_relaxed(i, v);
-}
-#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor_release
-#endif
-
-#ifndef arch_atomic_fetch_xor
-static __always_inline int
-arch_atomic_fetch_xor(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_fetch_xor_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic_fetch_xor arch_atomic_fetch_xor
-#endif
-
-#endif /* arch_atomic_fetch_xor_relaxed */
-
-#ifndef arch_atomic_xchg_relaxed
-#define arch_atomic_xchg_acquire arch_atomic_xchg
-#define arch_atomic_xchg_release arch_atomic_xchg
-#define arch_atomic_xchg_relaxed arch_atomic_xchg
-#else /* arch_atomic_xchg_relaxed */
-
-#ifndef arch_atomic_xchg_acquire
-static __always_inline int
-arch_atomic_xchg_acquire(atomic_t *v, int i)
-{
- int ret = arch_atomic_xchg_relaxed(v, i);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic_xchg_acquire arch_atomic_xchg_acquire
-#endif
-
-#ifndef arch_atomic_xchg_release
-static __always_inline int
-arch_atomic_xchg_release(atomic_t *v, int i)
-{
- __atomic_release_fence();
- return arch_atomic_xchg_relaxed(v, i);
-}
-#define arch_atomic_xchg_release arch_atomic_xchg_release
-#endif
-
-#ifndef arch_atomic_xchg
-static __always_inline int
-arch_atomic_xchg(atomic_t *v, int i)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_xchg_relaxed(v, i);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic_xchg arch_atomic_xchg
-#endif
-
-#endif /* arch_atomic_xchg_relaxed */
-
-#ifndef arch_atomic_cmpxchg_relaxed
-#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg
-#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg
-#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg
-#else /* arch_atomic_cmpxchg_relaxed */
-
-#ifndef arch_atomic_cmpxchg_acquire
-static __always_inline int
-arch_atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
-{
- int ret = arch_atomic_cmpxchg_relaxed(v, old, new);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire
-#endif
-
-#ifndef arch_atomic_cmpxchg_release
-static __always_inline int
-arch_atomic_cmpxchg_release(atomic_t *v, int old, int new)
-{
- __atomic_release_fence();
- return arch_atomic_cmpxchg_relaxed(v, old, new);
-}
-#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release
-#endif
-
-#ifndef arch_atomic_cmpxchg
-static __always_inline int
-arch_atomic_cmpxchg(atomic_t *v, int old, int new)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_cmpxchg_relaxed(v, old, new);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic_cmpxchg arch_atomic_cmpxchg
-#endif
-
-#endif /* arch_atomic_cmpxchg_relaxed */
-
-#ifndef arch_atomic_try_cmpxchg_relaxed
-#ifdef arch_atomic_try_cmpxchg
-#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg
-#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg
-#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg
-#endif /* arch_atomic_try_cmpxchg */
-
-#ifndef arch_atomic_try_cmpxchg
-static __always_inline bool
-arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
-{
- int r, o = *old;
- r = arch_atomic_cmpxchg(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
-#endif
-
-#ifndef arch_atomic_try_cmpxchg_acquire
-static __always_inline bool
-arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
-{
- int r, o = *old;
- r = arch_atomic_cmpxchg_acquire(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire
-#endif
-
-#ifndef arch_atomic_try_cmpxchg_release
-static __always_inline bool
-arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
-{
- int r, o = *old;
- r = arch_atomic_cmpxchg_release(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release
-#endif
-
-#ifndef arch_atomic_try_cmpxchg_relaxed
-static __always_inline bool
-arch_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
-{
- int r, o = *old;
- r = arch_atomic_cmpxchg_relaxed(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg_relaxed
-#endif
-
-#else /* arch_atomic_try_cmpxchg_relaxed */
-
-#ifndef arch_atomic_try_cmpxchg_acquire
-static __always_inline bool
-arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
-{
- bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire
-#endif
-
-#ifndef arch_atomic_try_cmpxchg_release
-static __always_inline bool
-arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
-{
- __atomic_release_fence();
- return arch_atomic_try_cmpxchg_relaxed(v, old, new);
-}
-#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release
-#endif
-
-#ifndef arch_atomic_try_cmpxchg
-static __always_inline bool
-arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
-{
- bool ret;
- __atomic_pre_full_fence();
- ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
-#endif
-
-#endif /* arch_atomic_try_cmpxchg_relaxed */
-
-#ifndef arch_atomic_sub_and_test
-/**
- * arch_atomic_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-static __always_inline bool
-arch_atomic_sub_and_test(int i, atomic_t *v)
-{
- return arch_atomic_sub_return(i, v) == 0;
-}
-#define arch_atomic_sub_and_test arch_atomic_sub_and_test
-#endif
-
-#ifndef arch_atomic_dec_and_test
-/**
- * arch_atomic_dec_and_test - decrement and test
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-static __always_inline bool
-arch_atomic_dec_and_test(atomic_t *v)
-{
- return arch_atomic_dec_return(v) == 0;
-}
-#define arch_atomic_dec_and_test arch_atomic_dec_and_test
-#endif
-
-#ifndef arch_atomic_inc_and_test
-/**
- * arch_atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-static __always_inline bool
-arch_atomic_inc_and_test(atomic_t *v)
-{
- return arch_atomic_inc_return(v) == 0;
-}
-#define arch_atomic_inc_and_test arch_atomic_inc_and_test
-#endif
-
-#ifndef arch_atomic_add_negative
-/**
- * arch_atomic_add_negative - add and test if negative
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-static __always_inline bool
-arch_atomic_add_negative(int i, atomic_t *v)
-{
- return arch_atomic_add_return(i, v) < 0;
-}
-#define arch_atomic_add_negative arch_atomic_add_negative
-#endif
-
-#ifndef arch_atomic_fetch_add_unless
-/**
- * arch_atomic_fetch_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns original value of @v
- */
-static __always_inline int
-arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
-{
- int c = arch_atomic_read(v);
-
- do {
- if (unlikely(c == u))
- break;
- } while (!arch_atomic_try_cmpxchg(v, &c, c + a));
-
- return c;
-}
-#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
-#endif
-
-#ifndef arch_atomic_add_unless
-/**
- * arch_atomic_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, if @v was not already @u.
- * Returns true if the addition was done.
- */
-static __always_inline bool
-arch_atomic_add_unless(atomic_t *v, int a, int u)
-{
- return arch_atomic_fetch_add_unless(v, a, u) != u;
-}
-#define arch_atomic_add_unless arch_atomic_add_unless
-#endif
-
-#ifndef arch_atomic_inc_not_zero
-/**
- * arch_atomic_inc_not_zero - increment unless the number is zero
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1, if @v is non-zero.
- * Returns true if the increment was done.
- */
-static __always_inline bool
-arch_atomic_inc_not_zero(atomic_t *v)
-{
- return arch_atomic_add_unless(v, 1, 0);
-}
-#define arch_atomic_inc_not_zero arch_atomic_inc_not_zero
-#endif
-
-#ifndef arch_atomic_inc_unless_negative
-static __always_inline bool
-arch_atomic_inc_unless_negative(atomic_t *v)
-{
- int c = arch_atomic_read(v);
-
- do {
- if (unlikely(c < 0))
- return false;
- } while (!arch_atomic_try_cmpxchg(v, &c, c + 1));
-
- return true;
-}
-#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
-#endif
-
-#ifndef arch_atomic_dec_unless_positive
-static __always_inline bool
-arch_atomic_dec_unless_positive(atomic_t *v)
-{
- int c = arch_atomic_read(v);
-
- do {
- if (unlikely(c > 0))
- return false;
- } while (!arch_atomic_try_cmpxchg(v, &c, c - 1));
-
- return true;
-}
-#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
-#endif
-
-#ifndef arch_atomic_dec_if_positive
-static __always_inline int
-arch_atomic_dec_if_positive(atomic_t *v)
-{
- int dec, c = arch_atomic_read(v);
-
- do {
- dec = c - 1;
- if (unlikely(dec < 0))
- break;
- } while (!arch_atomic_try_cmpxchg(v, &c, dec));
-
- return dec;
-}
-#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
-#endif
-
-#ifdef CONFIG_GENERIC_ATOMIC64
-#include <asm-generic/atomic64.h>
-#endif
-
-#ifndef arch_atomic64_read_acquire
-static __always_inline s64
-arch_atomic64_read_acquire(const atomic64_t *v)
-{
- return smp_load_acquire(&(v)->counter);
-}
-#define arch_atomic64_read_acquire arch_atomic64_read_acquire
-#endif
-
-#ifndef arch_atomic64_set_release
-static __always_inline void
-arch_atomic64_set_release(atomic64_t *v, s64 i)
-{
- smp_store_release(&(v)->counter, i);
-}
-#define arch_atomic64_set_release arch_atomic64_set_release
-#endif
-
-#ifndef arch_atomic64_add_return_relaxed
-#define arch_atomic64_add_return_acquire arch_atomic64_add_return
-#define arch_atomic64_add_return_release arch_atomic64_add_return
-#define arch_atomic64_add_return_relaxed arch_atomic64_add_return
-#else /* arch_atomic64_add_return_relaxed */
-
-#ifndef arch_atomic64_add_return_acquire
-static __always_inline s64
-arch_atomic64_add_return_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = arch_atomic64_add_return_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic64_add_return_acquire arch_atomic64_add_return_acquire
-#endif
-
-#ifndef arch_atomic64_add_return_release
-static __always_inline s64
-arch_atomic64_add_return_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return arch_atomic64_add_return_relaxed(i, v);
-}
-#define arch_atomic64_add_return_release arch_atomic64_add_return_release
-#endif
-
-#ifndef arch_atomic64_add_return
-static __always_inline s64
-arch_atomic64_add_return(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_add_return_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic64_add_return arch_atomic64_add_return
-#endif
-
-#endif /* arch_atomic64_add_return_relaxed */
-
-#ifndef arch_atomic64_fetch_add_relaxed
-#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add
-#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add
-#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add
-#else /* arch_atomic64_fetch_add_relaxed */
-
-#ifndef arch_atomic64_fetch_add_acquire
-static __always_inline s64
-arch_atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = arch_atomic64_fetch_add_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_add_release
-static __always_inline s64
-arch_atomic64_fetch_add_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return arch_atomic64_fetch_add_relaxed(i, v);
-}
-#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add_release
-#endif
-
-#ifndef arch_atomic64_fetch_add
-static __always_inline s64
-arch_atomic64_fetch_add(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_fetch_add_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic64_fetch_add arch_atomic64_fetch_add
-#endif
-
-#endif /* arch_atomic64_fetch_add_relaxed */
-
-#ifndef arch_atomic64_sub_return_relaxed
-#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return
-#define arch_atomic64_sub_return_release arch_atomic64_sub_return
-#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return
-#else /* arch_atomic64_sub_return_relaxed */
-
-#ifndef arch_atomic64_sub_return_acquire
-static __always_inline s64
-arch_atomic64_sub_return_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = arch_atomic64_sub_return_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return_acquire
-#endif
-
-#ifndef arch_atomic64_sub_return_release
-static __always_inline s64
-arch_atomic64_sub_return_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return arch_atomic64_sub_return_relaxed(i, v);
-}
-#define arch_atomic64_sub_return_release arch_atomic64_sub_return_release
-#endif
-
-#ifndef arch_atomic64_sub_return
-static __always_inline s64
-arch_atomic64_sub_return(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_sub_return_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic64_sub_return arch_atomic64_sub_return
-#endif
-
-#endif /* arch_atomic64_sub_return_relaxed */
-
-#ifndef arch_atomic64_fetch_sub_relaxed
-#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub
-#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub
-#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub
-#else /* arch_atomic64_fetch_sub_relaxed */
-
-#ifndef arch_atomic64_fetch_sub_acquire
-static __always_inline s64
-arch_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = arch_atomic64_fetch_sub_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_sub_release
-static __always_inline s64
-arch_atomic64_fetch_sub_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return arch_atomic64_fetch_sub_relaxed(i, v);
-}
-#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub_release
-#endif
-
-#ifndef arch_atomic64_fetch_sub
-static __always_inline s64
-arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_fetch_sub_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
-#endif
-
-#endif /* arch_atomic64_fetch_sub_relaxed */
-
-#ifndef arch_atomic64_inc
-static __always_inline void
-arch_atomic64_inc(atomic64_t *v)
-{
- arch_atomic64_add(1, v);
-}
-#define arch_atomic64_inc arch_atomic64_inc
-#endif
-
-#ifndef arch_atomic64_inc_return_relaxed
-#ifdef arch_atomic64_inc_return
-#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return
-#define arch_atomic64_inc_return_release arch_atomic64_inc_return
-#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return
-#endif /* arch_atomic64_inc_return */
-
-#ifndef arch_atomic64_inc_return
-static __always_inline s64
-arch_atomic64_inc_return(atomic64_t *v)
-{
- return arch_atomic64_add_return(1, v);
-}
-#define arch_atomic64_inc_return arch_atomic64_inc_return
-#endif
-
-#ifndef arch_atomic64_inc_return_acquire
-static __always_inline s64
-arch_atomic64_inc_return_acquire(atomic64_t *v)
-{
- return arch_atomic64_add_return_acquire(1, v);
-}
-#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire
-#endif
-
-#ifndef arch_atomic64_inc_return_release
-static __always_inline s64
-arch_atomic64_inc_return_release(atomic64_t *v)
-{
- return arch_atomic64_add_return_release(1, v);
-}
-#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release
-#endif
-
-#ifndef arch_atomic64_inc_return_relaxed
-static __always_inline s64
-arch_atomic64_inc_return_relaxed(atomic64_t *v)
-{
- return arch_atomic64_add_return_relaxed(1, v);
-}
-#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed
-#endif
-
-#else /* arch_atomic64_inc_return_relaxed */
-
-#ifndef arch_atomic64_inc_return_acquire
-static __always_inline s64
-arch_atomic64_inc_return_acquire(atomic64_t *v)
-{
- s64 ret = arch_atomic64_inc_return_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire
-#endif
-
-#ifndef arch_atomic64_inc_return_release
-static __always_inline s64
-arch_atomic64_inc_return_release(atomic64_t *v)
-{
- __atomic_release_fence();
- return arch_atomic64_inc_return_relaxed(v);
-}
-#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release
-#endif
-
-#ifndef arch_atomic64_inc_return
-static __always_inline s64
-arch_atomic64_inc_return(atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_inc_return_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic64_inc_return arch_atomic64_inc_return
-#endif
-
-#endif /* arch_atomic64_inc_return_relaxed */
-
-#ifndef arch_atomic64_fetch_inc_relaxed
-#ifdef arch_atomic64_fetch_inc
-#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc
-#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc
-#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc
-#endif /* arch_atomic64_fetch_inc */
-
-#ifndef arch_atomic64_fetch_inc
-static __always_inline s64
-arch_atomic64_fetch_inc(atomic64_t *v)
-{
- return arch_atomic64_fetch_add(1, v);
-}
-#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc
-#endif
-
-#ifndef arch_atomic64_fetch_inc_acquire
-static __always_inline s64
-arch_atomic64_fetch_inc_acquire(atomic64_t *v)
-{
- return arch_atomic64_fetch_add_acquire(1, v);
-}
-#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_inc_release
-static __always_inline s64
-arch_atomic64_fetch_inc_release(atomic64_t *v)
-{
- return arch_atomic64_fetch_add_release(1, v);
-}
-#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release
-#endif
-
-#ifndef arch_atomic64_fetch_inc_relaxed
-static __always_inline s64
-arch_atomic64_fetch_inc_relaxed(atomic64_t *v)
-{
- return arch_atomic64_fetch_add_relaxed(1, v);
-}
-#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc_relaxed
-#endif
-
-#else /* arch_atomic64_fetch_inc_relaxed */
-
-#ifndef arch_atomic64_fetch_inc_acquire
-static __always_inline s64
-arch_atomic64_fetch_inc_acquire(atomic64_t *v)
-{
- s64 ret = arch_atomic64_fetch_inc_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_inc_release
-static __always_inline s64
-arch_atomic64_fetch_inc_release(atomic64_t *v)
-{
- __atomic_release_fence();
- return arch_atomic64_fetch_inc_relaxed(v);
-}
-#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release
-#endif
-
-#ifndef arch_atomic64_fetch_inc
-static __always_inline s64
-arch_atomic64_fetch_inc(atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_fetch_inc_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc
-#endif
-
-#endif /* arch_atomic64_fetch_inc_relaxed */
-
-#ifndef arch_atomic64_dec
-static __always_inline void
-arch_atomic64_dec(atomic64_t *v)
-{
- arch_atomic64_sub(1, v);
-}
-#define arch_atomic64_dec arch_atomic64_dec
-#endif
-
-#ifndef arch_atomic64_dec_return_relaxed
-#ifdef arch_atomic64_dec_return
-#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return
-#define arch_atomic64_dec_return_release arch_atomic64_dec_return
-#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return
-#endif /* arch_atomic64_dec_return */
-
-#ifndef arch_atomic64_dec_return
-static __always_inline s64
-arch_atomic64_dec_return(atomic64_t *v)
-{
- return arch_atomic64_sub_return(1, v);
-}
-#define arch_atomic64_dec_return arch_atomic64_dec_return
-#endif
-
-#ifndef arch_atomic64_dec_return_acquire
-static __always_inline s64
-arch_atomic64_dec_return_acquire(atomic64_t *v)
-{
- return arch_atomic64_sub_return_acquire(1, v);
-}
-#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire
-#endif
-
-#ifndef arch_atomic64_dec_return_release
-static __always_inline s64
-arch_atomic64_dec_return_release(atomic64_t *v)
-{
- return arch_atomic64_sub_return_release(1, v);
-}
-#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release
-#endif
-
-#ifndef arch_atomic64_dec_return_relaxed
-static __always_inline s64
-arch_atomic64_dec_return_relaxed(atomic64_t *v)
-{
- return arch_atomic64_sub_return_relaxed(1, v);
-}
-#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed
-#endif
-
-#else /* arch_atomic64_dec_return_relaxed */
-
-#ifndef arch_atomic64_dec_return_acquire
-static __always_inline s64
-arch_atomic64_dec_return_acquire(atomic64_t *v)
-{
- s64 ret = arch_atomic64_dec_return_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire
-#endif
-
-#ifndef arch_atomic64_dec_return_release
-static __always_inline s64
-arch_atomic64_dec_return_release(atomic64_t *v)
-{
- __atomic_release_fence();
- return arch_atomic64_dec_return_relaxed(v);
-}
-#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release
-#endif
-
-#ifndef arch_atomic64_dec_return
-static __always_inline s64
-arch_atomic64_dec_return(atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_dec_return_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic64_dec_return arch_atomic64_dec_return
-#endif
-
-#endif /* arch_atomic64_dec_return_relaxed */
-
-#ifndef arch_atomic64_fetch_dec_relaxed
-#ifdef arch_atomic64_fetch_dec
-#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec
-#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec
-#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec
-#endif /* arch_atomic64_fetch_dec */
-
-#ifndef arch_atomic64_fetch_dec
-static __always_inline s64
-arch_atomic64_fetch_dec(atomic64_t *v)
-{
- return arch_atomic64_fetch_sub(1, v);
-}
-#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec
-#endif
-
-#ifndef arch_atomic64_fetch_dec_acquire
-static __always_inline s64
-arch_atomic64_fetch_dec_acquire(atomic64_t *v)
-{
- return arch_atomic64_fetch_sub_acquire(1, v);
-}
-#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_dec_release
-static __always_inline s64
-arch_atomic64_fetch_dec_release(atomic64_t *v)
-{
- return arch_atomic64_fetch_sub_release(1, v);
-}
-#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release
-#endif
-
-#ifndef arch_atomic64_fetch_dec_relaxed
-static __always_inline s64
-arch_atomic64_fetch_dec_relaxed(atomic64_t *v)
-{
- return arch_atomic64_fetch_sub_relaxed(1, v);
-}
-#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec_relaxed
-#endif
-
-#else /* arch_atomic64_fetch_dec_relaxed */
-
-#ifndef arch_atomic64_fetch_dec_acquire
-static __always_inline s64
-arch_atomic64_fetch_dec_acquire(atomic64_t *v)
-{
- s64 ret = arch_atomic64_fetch_dec_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_dec_release
-static __always_inline s64
-arch_atomic64_fetch_dec_release(atomic64_t *v)
-{
- __atomic_release_fence();
- return arch_atomic64_fetch_dec_relaxed(v);
-}
-#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release
-#endif
-
-#ifndef arch_atomic64_fetch_dec
-static __always_inline s64
-arch_atomic64_fetch_dec(atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_fetch_dec_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec
-#endif
-
-#endif /* arch_atomic64_fetch_dec_relaxed */
-
-#ifndef arch_atomic64_fetch_and_relaxed
-#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and
-#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and
-#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and
-#else /* arch_atomic64_fetch_and_relaxed */
-
-#ifndef arch_atomic64_fetch_and_acquire
-static __always_inline s64
-arch_atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = arch_atomic64_fetch_and_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_and_release
-static __always_inline s64
-arch_atomic64_fetch_and_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return arch_atomic64_fetch_and_relaxed(i, v);
-}
-#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and_release
-#endif
-
-#ifndef arch_atomic64_fetch_and
-static __always_inline s64
-arch_atomic64_fetch_and(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_fetch_and_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic64_fetch_and arch_atomic64_fetch_and
-#endif
-
-#endif /* arch_atomic64_fetch_and_relaxed */
-
-#ifndef arch_atomic64_andnot
-static __always_inline void
-arch_atomic64_andnot(s64 i, atomic64_t *v)
-{
- arch_atomic64_and(~i, v);
-}
-#define arch_atomic64_andnot arch_atomic64_andnot
-#endif
-
-#ifndef arch_atomic64_fetch_andnot_relaxed
-#ifdef arch_atomic64_fetch_andnot
-#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot
-#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot
-#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot
-#endif /* arch_atomic64_fetch_andnot */
-
-#ifndef arch_atomic64_fetch_andnot
-static __always_inline s64
-arch_atomic64_fetch_andnot(s64 i, atomic64_t *v)
-{
- return arch_atomic64_fetch_and(~i, v);
-}
-#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
-#endif
-
-#ifndef arch_atomic64_fetch_andnot_acquire
-static __always_inline s64
-arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
-{
- return arch_atomic64_fetch_and_acquire(~i, v);
-}
-#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_andnot_release
-static __always_inline s64
-arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
-{
- return arch_atomic64_fetch_and_release(~i, v);
-}
-#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release
-#endif
-
-#ifndef arch_atomic64_fetch_andnot_relaxed
-static __always_inline s64
-arch_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
-{
- return arch_atomic64_fetch_and_relaxed(~i, v);
-}
-#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
-#endif
-
-#else /* arch_atomic64_fetch_andnot_relaxed */
-
-#ifndef arch_atomic64_fetch_andnot_acquire
-static __always_inline s64
-arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_andnot_release
-static __always_inline s64
-arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return arch_atomic64_fetch_andnot_relaxed(i, v);
-}
-#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release
-#endif
-
-#ifndef arch_atomic64_fetch_andnot
-static __always_inline s64
-arch_atomic64_fetch_andnot(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_fetch_andnot_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
-#endif
-
-#endif /* arch_atomic64_fetch_andnot_relaxed */
-
-#ifndef arch_atomic64_fetch_or_relaxed
-#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or
-#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or
-#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or
-#else /* arch_atomic64_fetch_or_relaxed */
-
-#ifndef arch_atomic64_fetch_or_acquire
-static __always_inline s64
-arch_atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = arch_atomic64_fetch_or_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_or_release
-static __always_inline s64
-arch_atomic64_fetch_or_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return arch_atomic64_fetch_or_relaxed(i, v);
-}
-#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or_release
-#endif
-
-#ifndef arch_atomic64_fetch_or
-static __always_inline s64
-arch_atomic64_fetch_or(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_fetch_or_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic64_fetch_or arch_atomic64_fetch_or
-#endif
-
-#endif /* arch_atomic64_fetch_or_relaxed */
-
-#ifndef arch_atomic64_fetch_xor_relaxed
-#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor
-#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor
-#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor
-#else /* arch_atomic64_fetch_xor_relaxed */
-
-#ifndef arch_atomic64_fetch_xor_acquire
-static __always_inline s64
-arch_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = arch_atomic64_fetch_xor_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor_acquire
-#endif
-
-#ifndef arch_atomic64_fetch_xor_release
-static __always_inline s64
-arch_atomic64_fetch_xor_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return arch_atomic64_fetch_xor_relaxed(i, v);
-}
-#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor_release
-#endif
-
-#ifndef arch_atomic64_fetch_xor
-static __always_inline s64
-arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_fetch_xor_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
-#endif
-
-#endif /* arch_atomic64_fetch_xor_relaxed */
-
-#ifndef arch_atomic64_xchg_relaxed
-#define arch_atomic64_xchg_acquire arch_atomic64_xchg
-#define arch_atomic64_xchg_release arch_atomic64_xchg
-#define arch_atomic64_xchg_relaxed arch_atomic64_xchg
-#else /* arch_atomic64_xchg_relaxed */
-
-#ifndef arch_atomic64_xchg_acquire
-static __always_inline s64
-arch_atomic64_xchg_acquire(atomic64_t *v, s64 i)
-{
- s64 ret = arch_atomic64_xchg_relaxed(v, i);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic64_xchg_acquire arch_atomic64_xchg_acquire
-#endif
-
-#ifndef arch_atomic64_xchg_release
-static __always_inline s64
-arch_atomic64_xchg_release(atomic64_t *v, s64 i)
-{
- __atomic_release_fence();
- return arch_atomic64_xchg_relaxed(v, i);
-}
-#define arch_atomic64_xchg_release arch_atomic64_xchg_release
-#endif
-
-#ifndef arch_atomic64_xchg
-static __always_inline s64
-arch_atomic64_xchg(atomic64_t *v, s64 i)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_xchg_relaxed(v, i);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic64_xchg arch_atomic64_xchg
-#endif
-
-#endif /* arch_atomic64_xchg_relaxed */
-
-#ifndef arch_atomic64_cmpxchg_relaxed
-#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg
-#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg
-#define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg
-#else /* arch_atomic64_cmpxchg_relaxed */
-
-#ifndef arch_atomic64_cmpxchg_acquire
-static __always_inline s64
-arch_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
-{
- s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg_acquire
-#endif
-
-#ifndef arch_atomic64_cmpxchg_release
-static __always_inline s64
-arch_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
-{
- __atomic_release_fence();
- return arch_atomic64_cmpxchg_relaxed(v, old, new);
-}
-#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg_release
-#endif
-
-#ifndef arch_atomic64_cmpxchg
-static __always_inline s64
-arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
-#endif
-
-#endif /* arch_atomic64_cmpxchg_relaxed */
-
-#ifndef arch_atomic64_try_cmpxchg_relaxed
-#ifdef arch_atomic64_try_cmpxchg
-#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg
-#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg
-#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg
-#endif /* arch_atomic64_try_cmpxchg */
-
-#ifndef arch_atomic64_try_cmpxchg
-static __always_inline bool
-arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
-{
- s64 r, o = *old;
- r = arch_atomic64_cmpxchg(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
-#endif
-
-#ifndef arch_atomic64_try_cmpxchg_acquire
-static __always_inline bool
-arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
-{
- s64 r, o = *old;
- r = arch_atomic64_cmpxchg_acquire(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire
-#endif
-
-#ifndef arch_atomic64_try_cmpxchg_release
-static __always_inline bool
-arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
-{
- s64 r, o = *old;
- r = arch_atomic64_cmpxchg_release(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release
-#endif
-
-#ifndef arch_atomic64_try_cmpxchg_relaxed
-static __always_inline bool
-arch_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
-{
- s64 r, o = *old;
- r = arch_atomic64_cmpxchg_relaxed(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg_relaxed
-#endif
-
-#else /* arch_atomic64_try_cmpxchg_relaxed */
-
-#ifndef arch_atomic64_try_cmpxchg_acquire
-static __always_inline bool
-arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
-{
- bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
- __atomic_acquire_fence();
- return ret;
-}
-#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire
-#endif
-
-#ifndef arch_atomic64_try_cmpxchg_release
-static __always_inline bool
-arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
-{
- __atomic_release_fence();
- return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
-}
-#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release
-#endif
-
-#ifndef arch_atomic64_try_cmpxchg
-static __always_inline bool
-arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
-{
- bool ret;
- __atomic_pre_full_fence();
- ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
- __atomic_post_full_fence();
- return ret;
-}
-#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
-#endif
-
-#endif /* arch_atomic64_try_cmpxchg_relaxed */
-
-#ifndef arch_atomic64_sub_and_test
-/**
- * arch_atomic64_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic64_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-static __always_inline bool
-arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
-{
- return arch_atomic64_sub_return(i, v) == 0;
-}
-#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
-#endif
-
-#ifndef arch_atomic64_dec_and_test
-/**
- * arch_atomic64_dec_and_test - decrement and test
- * @v: pointer of type atomic64_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-static __always_inline bool
-arch_atomic64_dec_and_test(atomic64_t *v)
-{
- return arch_atomic64_dec_return(v) == 0;
-}
-#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
-#endif
-
-#ifndef arch_atomic64_inc_and_test
-/**
- * arch_atomic64_inc_and_test - increment and test
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-static __always_inline bool
-arch_atomic64_inc_and_test(atomic64_t *v)
-{
- return arch_atomic64_inc_return(v) == 0;
-}
-#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
-#endif
-
-#ifndef arch_atomic64_add_negative
-/**
- * arch_atomic64_add_negative - add and test if negative
- * @i: integer value to add
- * @v: pointer of type atomic64_t
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-static __always_inline bool
-arch_atomic64_add_negative(s64 i, atomic64_t *v)
-{
- return arch_atomic64_add_return(i, v) < 0;
-}
-#define arch_atomic64_add_negative arch_atomic64_add_negative
-#endif
-
-#ifndef arch_atomic64_fetch_add_unless
-/**
- * arch_atomic64_fetch_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns original value of @v
- */
-static __always_inline s64
-arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
-{
- s64 c = arch_atomic64_read(v);
-
- do {
- if (unlikely(c == u))
- break;
- } while (!arch_atomic64_try_cmpxchg(v, &c, c + a));
-
- return c;
-}
-#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
-#endif
-
-#ifndef arch_atomic64_add_unless
-/**
- * arch_atomic64_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, if @v was not already @u.
- * Returns true if the addition was done.
- */
-static __always_inline bool
-arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
-{
- return arch_atomic64_fetch_add_unless(v, a, u) != u;
-}
-#define arch_atomic64_add_unless arch_atomic64_add_unless
-#endif
-
-#ifndef arch_atomic64_inc_not_zero
-/**
- * arch_atomic64_inc_not_zero - increment unless the number is zero
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1, if @v is non-zero.
- * Returns true if the increment was done.
- */
-static __always_inline bool
-arch_atomic64_inc_not_zero(atomic64_t *v)
-{
- return arch_atomic64_add_unless(v, 1, 0);
-}
-#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
-#endif
-
-#ifndef arch_atomic64_inc_unless_negative
-static __always_inline bool
-arch_atomic64_inc_unless_negative(atomic64_t *v)
-{
- s64 c = arch_atomic64_read(v);
-
- do {
- if (unlikely(c < 0))
- return false;
- } while (!arch_atomic64_try_cmpxchg(v, &c, c + 1));
-
- return true;
-}
-#define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative
-#endif
-
-#ifndef arch_atomic64_dec_unless_positive
-static __always_inline bool
-arch_atomic64_dec_unless_positive(atomic64_t *v)
-{
- s64 c = arch_atomic64_read(v);
-
- do {
- if (unlikely(c > 0))
- return false;
- } while (!arch_atomic64_try_cmpxchg(v, &c, c - 1));
-
- return true;
-}
-#define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive
-#endif
-
-#ifndef arch_atomic64_dec_if_positive
-static __always_inline s64
-arch_atomic64_dec_if_positive(atomic64_t *v)
-{
- s64 dec, c = arch_atomic64_read(v);
-
- do {
- dec = c - 1;
- if (unlikely(dec < 0))
- break;
- } while (!arch_atomic64_try_cmpxchg(v, &c, dec));
-
- return dec;
-}
-#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
-#endif
-
-#endif /* _LINUX_ATOMIC_FALLBACK_H */
-// cca554917d7ea73d5e3e7397dd70c484cad9b2c4
diff --git a/include/linux/atomic-fallback.h b/include/linux/atomic-fallback.h
deleted file mode 100644
index 2a3f55d98be9..000000000000
--- a/include/linux/atomic-fallback.h
+++ /dev/null
@@ -1,2595 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-// Generated by scripts/atomic/gen-atomic-fallback.sh
-// DO NOT MODIFY THIS FILE DIRECTLY
-
-#ifndef _LINUX_ATOMIC_FALLBACK_H
-#define _LINUX_ATOMIC_FALLBACK_H
-
-#include <linux/compiler.h>
-
-#ifndef xchg_relaxed
-#define xchg_acquire xchg
-#define xchg_release xchg
-#define xchg_relaxed xchg
-#else /* xchg_relaxed */
-
-#ifndef xchg_acquire
-#define xchg_acquire(...) \
- __atomic_op_acquire(xchg, __VA_ARGS__)
-#endif
-
-#ifndef xchg_release
-#define xchg_release(...) \
- __atomic_op_release(xchg, __VA_ARGS__)
-#endif
-
-#ifndef xchg
-#define xchg(...) \
- __atomic_op_fence(xchg, __VA_ARGS__)
-#endif
-
-#endif /* xchg_relaxed */
-
-#ifndef cmpxchg_relaxed
-#define cmpxchg_acquire cmpxchg
-#define cmpxchg_release cmpxchg
-#define cmpxchg_relaxed cmpxchg
-#else /* cmpxchg_relaxed */
-
-#ifndef cmpxchg_acquire
-#define cmpxchg_acquire(...) \
- __atomic_op_acquire(cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg_release
-#define cmpxchg_release(...) \
- __atomic_op_release(cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg
-#define cmpxchg(...) \
- __atomic_op_fence(cmpxchg, __VA_ARGS__)
-#endif
-
-#endif /* cmpxchg_relaxed */
-
-#ifndef cmpxchg64_relaxed
-#define cmpxchg64_acquire cmpxchg64
-#define cmpxchg64_release cmpxchg64
-#define cmpxchg64_relaxed cmpxchg64
-#else /* cmpxchg64_relaxed */
-
-#ifndef cmpxchg64_acquire
-#define cmpxchg64_acquire(...) \
- __atomic_op_acquire(cmpxchg64, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg64_release
-#define cmpxchg64_release(...) \
- __atomic_op_release(cmpxchg64, __VA_ARGS__)
-#endif
-
-#ifndef cmpxchg64
-#define cmpxchg64(...) \
- __atomic_op_fence(cmpxchg64, __VA_ARGS__)
-#endif
-
-#endif /* cmpxchg64_relaxed */
-
-#ifndef try_cmpxchg_relaxed
-#ifdef try_cmpxchg
-#define try_cmpxchg_acquire try_cmpxchg
-#define try_cmpxchg_release try_cmpxchg
-#define try_cmpxchg_relaxed try_cmpxchg
-#endif /* try_cmpxchg */
-
-#ifndef try_cmpxchg
-#define try_cmpxchg(_ptr, _oldp, _new) \
-({ \
- typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = cmpxchg((_ptr), ___o, (_new)); \
- if (unlikely(___r != ___o)) \
- *___op = ___r; \
- likely(___r == ___o); \
-})
-#endif /* try_cmpxchg */
-
-#ifndef try_cmpxchg_acquire
-#define try_cmpxchg_acquire(_ptr, _oldp, _new) \
-({ \
- typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = cmpxchg_acquire((_ptr), ___o, (_new)); \
- if (unlikely(___r != ___o)) \
- *___op = ___r; \
- likely(___r == ___o); \
-})
-#endif /* try_cmpxchg_acquire */
-
-#ifndef try_cmpxchg_release
-#define try_cmpxchg_release(_ptr, _oldp, _new) \
-({ \
- typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = cmpxchg_release((_ptr), ___o, (_new)); \
- if (unlikely(___r != ___o)) \
- *___op = ___r; \
- likely(___r == ___o); \
-})
-#endif /* try_cmpxchg_release */
-
-#ifndef try_cmpxchg_relaxed
-#define try_cmpxchg_relaxed(_ptr, _oldp, _new) \
-({ \
- typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
- ___r = cmpxchg_relaxed((_ptr), ___o, (_new)); \
- if (unlikely(___r != ___o)) \
- *___op = ___r; \
- likely(___r == ___o); \
-})
-#endif /* try_cmpxchg_relaxed */
-
-#else /* try_cmpxchg_relaxed */
-
-#ifndef try_cmpxchg_acquire
-#define try_cmpxchg_acquire(...) \
- __atomic_op_acquire(try_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef try_cmpxchg_release
-#define try_cmpxchg_release(...) \
- __atomic_op_release(try_cmpxchg, __VA_ARGS__)
-#endif
-
-#ifndef try_cmpxchg
-#define try_cmpxchg(...) \
- __atomic_op_fence(try_cmpxchg, __VA_ARGS__)
-#endif
-
-#endif /* try_cmpxchg_relaxed */
-
-#define arch_atomic_read atomic_read
-#define arch_atomic_read_acquire atomic_read_acquire
-
-#ifndef atomic_read_acquire
-static __always_inline int
-atomic_read_acquire(const atomic_t *v)
-{
- return smp_load_acquire(&(v)->counter);
-}
-#define atomic_read_acquire atomic_read_acquire
-#endif
-
-#define arch_atomic_set atomic_set
-#define arch_atomic_set_release atomic_set_release
-
-#ifndef atomic_set_release
-static __always_inline void
-atomic_set_release(atomic_t *v, int i)
-{
- smp_store_release(&(v)->counter, i);
-}
-#define atomic_set_release atomic_set_release
-#endif
-
-#define arch_atomic_add atomic_add
-
-#define arch_atomic_add_return atomic_add_return
-#define arch_atomic_add_return_acquire atomic_add_return_acquire
-#define arch_atomic_add_return_release atomic_add_return_release
-#define arch_atomic_add_return_relaxed atomic_add_return_relaxed
-
-#ifndef atomic_add_return_relaxed
-#define atomic_add_return_acquire atomic_add_return
-#define atomic_add_return_release atomic_add_return
-#define atomic_add_return_relaxed atomic_add_return
-#else /* atomic_add_return_relaxed */
-
-#ifndef atomic_add_return_acquire
-static __always_inline int
-atomic_add_return_acquire(int i, atomic_t *v)
-{
- int ret = atomic_add_return_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_add_return_acquire atomic_add_return_acquire
-#endif
-
-#ifndef atomic_add_return_release
-static __always_inline int
-atomic_add_return_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_add_return_relaxed(i, v);
-}
-#define atomic_add_return_release atomic_add_return_release
-#endif
-
-#ifndef atomic_add_return
-static __always_inline int
-atomic_add_return(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_add_return_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_add_return atomic_add_return
-#endif
-
-#endif /* atomic_add_return_relaxed */
-
-#define arch_atomic_fetch_add atomic_fetch_add
-#define arch_atomic_fetch_add_acquire atomic_fetch_add_acquire
-#define arch_atomic_fetch_add_release atomic_fetch_add_release
-#define arch_atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-
-#ifndef atomic_fetch_add_relaxed
-#define atomic_fetch_add_acquire atomic_fetch_add
-#define atomic_fetch_add_release atomic_fetch_add
-#define atomic_fetch_add_relaxed atomic_fetch_add
-#else /* atomic_fetch_add_relaxed */
-
-#ifndef atomic_fetch_add_acquire
-static __always_inline int
-atomic_fetch_add_acquire(int i, atomic_t *v)
-{
- int ret = atomic_fetch_add_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_add_acquire atomic_fetch_add_acquire
-#endif
-
-#ifndef atomic_fetch_add_release
-static __always_inline int
-atomic_fetch_add_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_add_relaxed(i, v);
-}
-#define atomic_fetch_add_release atomic_fetch_add_release
-#endif
-
-#ifndef atomic_fetch_add
-static __always_inline int
-atomic_fetch_add(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_add_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_add atomic_fetch_add
-#endif
-
-#endif /* atomic_fetch_add_relaxed */
-
-#define arch_atomic_sub atomic_sub
-
-#define arch_atomic_sub_return atomic_sub_return
-#define arch_atomic_sub_return_acquire atomic_sub_return_acquire
-#define arch_atomic_sub_return_release atomic_sub_return_release
-#define arch_atomic_sub_return_relaxed atomic_sub_return_relaxed
-
-#ifndef atomic_sub_return_relaxed
-#define atomic_sub_return_acquire atomic_sub_return
-#define atomic_sub_return_release atomic_sub_return
-#define atomic_sub_return_relaxed atomic_sub_return
-#else /* atomic_sub_return_relaxed */
-
-#ifndef atomic_sub_return_acquire
-static __always_inline int
-atomic_sub_return_acquire(int i, atomic_t *v)
-{
- int ret = atomic_sub_return_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_sub_return_acquire atomic_sub_return_acquire
-#endif
-
-#ifndef atomic_sub_return_release
-static __always_inline int
-atomic_sub_return_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_sub_return_relaxed(i, v);
-}
-#define atomic_sub_return_release atomic_sub_return_release
-#endif
-
-#ifndef atomic_sub_return
-static __always_inline int
-atomic_sub_return(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_sub_return_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_sub_return atomic_sub_return
-#endif
-
-#endif /* atomic_sub_return_relaxed */
-
-#define arch_atomic_fetch_sub atomic_fetch_sub
-#define arch_atomic_fetch_sub_acquire atomic_fetch_sub_acquire
-#define arch_atomic_fetch_sub_release atomic_fetch_sub_release
-#define arch_atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
-
-#ifndef atomic_fetch_sub_relaxed
-#define atomic_fetch_sub_acquire atomic_fetch_sub
-#define atomic_fetch_sub_release atomic_fetch_sub
-#define atomic_fetch_sub_relaxed atomic_fetch_sub
-#else /* atomic_fetch_sub_relaxed */
-
-#ifndef atomic_fetch_sub_acquire
-static __always_inline int
-atomic_fetch_sub_acquire(int i, atomic_t *v)
-{
- int ret = atomic_fetch_sub_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire
-#endif
-
-#ifndef atomic_fetch_sub_release
-static __always_inline int
-atomic_fetch_sub_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_sub_relaxed(i, v);
-}
-#define atomic_fetch_sub_release atomic_fetch_sub_release
-#endif
-
-#ifndef atomic_fetch_sub
-static __always_inline int
-atomic_fetch_sub(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_sub_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_sub atomic_fetch_sub
-#endif
-
-#endif /* atomic_fetch_sub_relaxed */
-
-#define arch_atomic_inc atomic_inc
-
-#ifndef atomic_inc
-static __always_inline void
-atomic_inc(atomic_t *v)
-{
- atomic_add(1, v);
-}
-#define atomic_inc atomic_inc
-#endif
-
-#define arch_atomic_inc_return atomic_inc_return
-#define arch_atomic_inc_return_acquire atomic_inc_return_acquire
-#define arch_atomic_inc_return_release atomic_inc_return_release
-#define arch_atomic_inc_return_relaxed atomic_inc_return_relaxed
-
-#ifndef atomic_inc_return_relaxed
-#ifdef atomic_inc_return
-#define atomic_inc_return_acquire atomic_inc_return
-#define atomic_inc_return_release atomic_inc_return
-#define atomic_inc_return_relaxed atomic_inc_return
-#endif /* atomic_inc_return */
-
-#ifndef atomic_inc_return
-static __always_inline int
-atomic_inc_return(atomic_t *v)
-{
- return atomic_add_return(1, v);
-}
-#define atomic_inc_return atomic_inc_return
-#endif
-
-#ifndef atomic_inc_return_acquire
-static __always_inline int
-atomic_inc_return_acquire(atomic_t *v)
-{
- return atomic_add_return_acquire(1, v);
-}
-#define atomic_inc_return_acquire atomic_inc_return_acquire
-#endif
-
-#ifndef atomic_inc_return_release
-static __always_inline int
-atomic_inc_return_release(atomic_t *v)
-{
- return atomic_add_return_release(1, v);
-}
-#define atomic_inc_return_release atomic_inc_return_release
-#endif
-
-#ifndef atomic_inc_return_relaxed
-static __always_inline int
-atomic_inc_return_relaxed(atomic_t *v)
-{
- return atomic_add_return_relaxed(1, v);
-}
-#define atomic_inc_return_relaxed atomic_inc_return_relaxed
-#endif
-
-#else /* atomic_inc_return_relaxed */
-
-#ifndef atomic_inc_return_acquire
-static __always_inline int
-atomic_inc_return_acquire(atomic_t *v)
-{
- int ret = atomic_inc_return_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_inc_return_acquire atomic_inc_return_acquire
-#endif
-
-#ifndef atomic_inc_return_release
-static __always_inline int
-atomic_inc_return_release(atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_inc_return_relaxed(v);
-}
-#define atomic_inc_return_release atomic_inc_return_release
-#endif
-
-#ifndef atomic_inc_return
-static __always_inline int
-atomic_inc_return(atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_inc_return_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_inc_return atomic_inc_return
-#endif
-
-#endif /* atomic_inc_return_relaxed */
-
-#define arch_atomic_fetch_inc atomic_fetch_inc
-#define arch_atomic_fetch_inc_acquire atomic_fetch_inc_acquire
-#define arch_atomic_fetch_inc_release atomic_fetch_inc_release
-#define arch_atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
-
-#ifndef atomic_fetch_inc_relaxed
-#ifdef atomic_fetch_inc
-#define atomic_fetch_inc_acquire atomic_fetch_inc
-#define atomic_fetch_inc_release atomic_fetch_inc
-#define atomic_fetch_inc_relaxed atomic_fetch_inc
-#endif /* atomic_fetch_inc */
-
-#ifndef atomic_fetch_inc
-static __always_inline int
-atomic_fetch_inc(atomic_t *v)
-{
- return atomic_fetch_add(1, v);
-}
-#define atomic_fetch_inc atomic_fetch_inc
-#endif
-
-#ifndef atomic_fetch_inc_acquire
-static __always_inline int
-atomic_fetch_inc_acquire(atomic_t *v)
-{
- return atomic_fetch_add_acquire(1, v);
-}
-#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire
-#endif
-
-#ifndef atomic_fetch_inc_release
-static __always_inline int
-atomic_fetch_inc_release(atomic_t *v)
-{
- return atomic_fetch_add_release(1, v);
-}
-#define atomic_fetch_inc_release atomic_fetch_inc_release
-#endif
-
-#ifndef atomic_fetch_inc_relaxed
-static __always_inline int
-atomic_fetch_inc_relaxed(atomic_t *v)
-{
- return atomic_fetch_add_relaxed(1, v);
-}
-#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
-#endif
-
-#else /* atomic_fetch_inc_relaxed */
-
-#ifndef atomic_fetch_inc_acquire
-static __always_inline int
-atomic_fetch_inc_acquire(atomic_t *v)
-{
- int ret = atomic_fetch_inc_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire
-#endif
-
-#ifndef atomic_fetch_inc_release
-static __always_inline int
-atomic_fetch_inc_release(atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_inc_relaxed(v);
-}
-#define atomic_fetch_inc_release atomic_fetch_inc_release
-#endif
-
-#ifndef atomic_fetch_inc
-static __always_inline int
-atomic_fetch_inc(atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_inc_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_inc atomic_fetch_inc
-#endif
-
-#endif /* atomic_fetch_inc_relaxed */
-
-#define arch_atomic_dec atomic_dec
-
-#ifndef atomic_dec
-static __always_inline void
-atomic_dec(atomic_t *v)
-{
- atomic_sub(1, v);
-}
-#define atomic_dec atomic_dec
-#endif
-
-#define arch_atomic_dec_return atomic_dec_return
-#define arch_atomic_dec_return_acquire atomic_dec_return_acquire
-#define arch_atomic_dec_return_release atomic_dec_return_release
-#define arch_atomic_dec_return_relaxed atomic_dec_return_relaxed
-
-#ifndef atomic_dec_return_relaxed
-#ifdef atomic_dec_return
-#define atomic_dec_return_acquire atomic_dec_return
-#define atomic_dec_return_release atomic_dec_return
-#define atomic_dec_return_relaxed atomic_dec_return
-#endif /* atomic_dec_return */
-
-#ifndef atomic_dec_return
-static __always_inline int
-atomic_dec_return(atomic_t *v)
-{
- return atomic_sub_return(1, v);
-}
-#define atomic_dec_return atomic_dec_return
-#endif
-
-#ifndef atomic_dec_return_acquire
-static __always_inline int
-atomic_dec_return_acquire(atomic_t *v)
-{
- return atomic_sub_return_acquire(1, v);
-}
-#define atomic_dec_return_acquire atomic_dec_return_acquire
-#endif
-
-#ifndef atomic_dec_return_release
-static __always_inline int
-atomic_dec_return_release(atomic_t *v)
-{
- return atomic_sub_return_release(1, v);
-}
-#define atomic_dec_return_release atomic_dec_return_release
-#endif
-
-#ifndef atomic_dec_return_relaxed
-static __always_inline int
-atomic_dec_return_relaxed(atomic_t *v)
-{
- return atomic_sub_return_relaxed(1, v);
-}
-#define atomic_dec_return_relaxed atomic_dec_return_relaxed
-#endif
-
-#else /* atomic_dec_return_relaxed */
-
-#ifndef atomic_dec_return_acquire
-static __always_inline int
-atomic_dec_return_acquire(atomic_t *v)
-{
- int ret = atomic_dec_return_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_dec_return_acquire atomic_dec_return_acquire
-#endif
-
-#ifndef atomic_dec_return_release
-static __always_inline int
-atomic_dec_return_release(atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_dec_return_relaxed(v);
-}
-#define atomic_dec_return_release atomic_dec_return_release
-#endif
-
-#ifndef atomic_dec_return
-static __always_inline int
-atomic_dec_return(atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_dec_return_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_dec_return atomic_dec_return
-#endif
-
-#endif /* atomic_dec_return_relaxed */
-
-#define arch_atomic_fetch_dec atomic_fetch_dec
-#define arch_atomic_fetch_dec_acquire atomic_fetch_dec_acquire
-#define arch_atomic_fetch_dec_release atomic_fetch_dec_release
-#define arch_atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
-
-#ifndef atomic_fetch_dec_relaxed
-#ifdef atomic_fetch_dec
-#define atomic_fetch_dec_acquire atomic_fetch_dec
-#define atomic_fetch_dec_release atomic_fetch_dec
-#define atomic_fetch_dec_relaxed atomic_fetch_dec
-#endif /* atomic_fetch_dec */
-
-#ifndef atomic_fetch_dec
-static __always_inline int
-atomic_fetch_dec(atomic_t *v)
-{
- return atomic_fetch_sub(1, v);
-}
-#define atomic_fetch_dec atomic_fetch_dec
-#endif
-
-#ifndef atomic_fetch_dec_acquire
-static __always_inline int
-atomic_fetch_dec_acquire(atomic_t *v)
-{
- return atomic_fetch_sub_acquire(1, v);
-}
-#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire
-#endif
-
-#ifndef atomic_fetch_dec_release
-static __always_inline int
-atomic_fetch_dec_release(atomic_t *v)
-{
- return atomic_fetch_sub_release(1, v);
-}
-#define atomic_fetch_dec_release atomic_fetch_dec_release
-#endif
-
-#ifndef atomic_fetch_dec_relaxed
-static __always_inline int
-atomic_fetch_dec_relaxed(atomic_t *v)
-{
- return atomic_fetch_sub_relaxed(1, v);
-}
-#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
-#endif
-
-#else /* atomic_fetch_dec_relaxed */
-
-#ifndef atomic_fetch_dec_acquire
-static __always_inline int
-atomic_fetch_dec_acquire(atomic_t *v)
-{
- int ret = atomic_fetch_dec_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire
-#endif
-
-#ifndef atomic_fetch_dec_release
-static __always_inline int
-atomic_fetch_dec_release(atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_dec_relaxed(v);
-}
-#define atomic_fetch_dec_release atomic_fetch_dec_release
-#endif
-
-#ifndef atomic_fetch_dec
-static __always_inline int
-atomic_fetch_dec(atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_dec_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_dec atomic_fetch_dec
-#endif
-
-#endif /* atomic_fetch_dec_relaxed */
-
-#define arch_atomic_and atomic_and
-
-#define arch_atomic_fetch_and atomic_fetch_and
-#define arch_atomic_fetch_and_acquire atomic_fetch_and_acquire
-#define arch_atomic_fetch_and_release atomic_fetch_and_release
-#define arch_atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-
-#ifndef atomic_fetch_and_relaxed
-#define atomic_fetch_and_acquire atomic_fetch_and
-#define atomic_fetch_and_release atomic_fetch_and
-#define atomic_fetch_and_relaxed atomic_fetch_and
-#else /* atomic_fetch_and_relaxed */
-
-#ifndef atomic_fetch_and_acquire
-static __always_inline int
-atomic_fetch_and_acquire(int i, atomic_t *v)
-{
- int ret = atomic_fetch_and_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_and_acquire atomic_fetch_and_acquire
-#endif
-
-#ifndef atomic_fetch_and_release
-static __always_inline int
-atomic_fetch_and_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_and_relaxed(i, v);
-}
-#define atomic_fetch_and_release atomic_fetch_and_release
-#endif
-
-#ifndef atomic_fetch_and
-static __always_inline int
-atomic_fetch_and(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_and_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_and atomic_fetch_and
-#endif
-
-#endif /* atomic_fetch_and_relaxed */
-
-#define arch_atomic_andnot atomic_andnot
-
-#ifndef atomic_andnot
-static __always_inline void
-atomic_andnot(int i, atomic_t *v)
-{
- atomic_and(~i, v);
-}
-#define atomic_andnot atomic_andnot
-#endif
-
-#define arch_atomic_fetch_andnot atomic_fetch_andnot
-#define arch_atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
-#define arch_atomic_fetch_andnot_release atomic_fetch_andnot_release
-#define arch_atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
-
-#ifndef atomic_fetch_andnot_relaxed
-#ifdef atomic_fetch_andnot
-#define atomic_fetch_andnot_acquire atomic_fetch_andnot
-#define atomic_fetch_andnot_release atomic_fetch_andnot
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot
-#endif /* atomic_fetch_andnot */
-
-#ifndef atomic_fetch_andnot
-static __always_inline int
-atomic_fetch_andnot(int i, atomic_t *v)
-{
- return atomic_fetch_and(~i, v);
-}
-#define atomic_fetch_andnot atomic_fetch_andnot
-#endif
-
-#ifndef atomic_fetch_andnot_acquire
-static __always_inline int
-atomic_fetch_andnot_acquire(int i, atomic_t *v)
-{
- return atomic_fetch_and_acquire(~i, v);
-}
-#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
-#endif
-
-#ifndef atomic_fetch_andnot_release
-static __always_inline int
-atomic_fetch_andnot_release(int i, atomic_t *v)
-{
- return atomic_fetch_and_release(~i, v);
-}
-#define atomic_fetch_andnot_release atomic_fetch_andnot_release
-#endif
-
-#ifndef atomic_fetch_andnot_relaxed
-static __always_inline int
-atomic_fetch_andnot_relaxed(int i, atomic_t *v)
-{
- return atomic_fetch_and_relaxed(~i, v);
-}
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
-#endif
-
-#else /* atomic_fetch_andnot_relaxed */
-
-#ifndef atomic_fetch_andnot_acquire
-static __always_inline int
-atomic_fetch_andnot_acquire(int i, atomic_t *v)
-{
- int ret = atomic_fetch_andnot_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
-#endif
-
-#ifndef atomic_fetch_andnot_release
-static __always_inline int
-atomic_fetch_andnot_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_andnot_relaxed(i, v);
-}
-#define atomic_fetch_andnot_release atomic_fetch_andnot_release
-#endif
-
-#ifndef atomic_fetch_andnot
-static __always_inline int
-atomic_fetch_andnot(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_andnot_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_andnot atomic_fetch_andnot
-#endif
-
-#endif /* atomic_fetch_andnot_relaxed */
-
-#define arch_atomic_or atomic_or
-
-#define arch_atomic_fetch_or atomic_fetch_or
-#define arch_atomic_fetch_or_acquire atomic_fetch_or_acquire
-#define arch_atomic_fetch_or_release atomic_fetch_or_release
-#define arch_atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-
-#ifndef atomic_fetch_or_relaxed
-#define atomic_fetch_or_acquire atomic_fetch_or
-#define atomic_fetch_or_release atomic_fetch_or
-#define atomic_fetch_or_relaxed atomic_fetch_or
-#else /* atomic_fetch_or_relaxed */
-
-#ifndef atomic_fetch_or_acquire
-static __always_inline int
-atomic_fetch_or_acquire(int i, atomic_t *v)
-{
- int ret = atomic_fetch_or_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_or_acquire atomic_fetch_or_acquire
-#endif
-
-#ifndef atomic_fetch_or_release
-static __always_inline int
-atomic_fetch_or_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_or_relaxed(i, v);
-}
-#define atomic_fetch_or_release atomic_fetch_or_release
-#endif
-
-#ifndef atomic_fetch_or
-static __always_inline int
-atomic_fetch_or(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_or_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_or atomic_fetch_or
-#endif
-
-#endif /* atomic_fetch_or_relaxed */
-
-#define arch_atomic_xor atomic_xor
-
-#define arch_atomic_fetch_xor atomic_fetch_xor
-#define arch_atomic_fetch_xor_acquire atomic_fetch_xor_acquire
-#define arch_atomic_fetch_xor_release atomic_fetch_xor_release
-#define arch_atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
-
-#ifndef atomic_fetch_xor_relaxed
-#define atomic_fetch_xor_acquire atomic_fetch_xor
-#define atomic_fetch_xor_release atomic_fetch_xor
-#define atomic_fetch_xor_relaxed atomic_fetch_xor
-#else /* atomic_fetch_xor_relaxed */
-
-#ifndef atomic_fetch_xor_acquire
-static __always_inline int
-atomic_fetch_xor_acquire(int i, atomic_t *v)
-{
- int ret = atomic_fetch_xor_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire
-#endif
-
-#ifndef atomic_fetch_xor_release
-static __always_inline int
-atomic_fetch_xor_release(int i, atomic_t *v)
-{
- __atomic_release_fence();
- return atomic_fetch_xor_relaxed(i, v);
-}
-#define atomic_fetch_xor_release atomic_fetch_xor_release
-#endif
-
-#ifndef atomic_fetch_xor
-static __always_inline int
-atomic_fetch_xor(int i, atomic_t *v)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_fetch_xor_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_fetch_xor atomic_fetch_xor
-#endif
-
-#endif /* atomic_fetch_xor_relaxed */
-
-#define arch_atomic_xchg atomic_xchg
-#define arch_atomic_xchg_acquire atomic_xchg_acquire
-#define arch_atomic_xchg_release atomic_xchg_release
-#define arch_atomic_xchg_relaxed atomic_xchg_relaxed
-
-#ifndef atomic_xchg_relaxed
-#define atomic_xchg_acquire atomic_xchg
-#define atomic_xchg_release atomic_xchg
-#define atomic_xchg_relaxed atomic_xchg
-#else /* atomic_xchg_relaxed */
-
-#ifndef atomic_xchg_acquire
-static __always_inline int
-atomic_xchg_acquire(atomic_t *v, int i)
-{
- int ret = atomic_xchg_relaxed(v, i);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_xchg_acquire atomic_xchg_acquire
-#endif
-
-#ifndef atomic_xchg_release
-static __always_inline int
-atomic_xchg_release(atomic_t *v, int i)
-{
- __atomic_release_fence();
- return atomic_xchg_relaxed(v, i);
-}
-#define atomic_xchg_release atomic_xchg_release
-#endif
-
-#ifndef atomic_xchg
-static __always_inline int
-atomic_xchg(atomic_t *v, int i)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_xchg_relaxed(v, i);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_xchg atomic_xchg
-#endif
-
-#endif /* atomic_xchg_relaxed */
-
-#define arch_atomic_cmpxchg atomic_cmpxchg
-#define arch_atomic_cmpxchg_acquire atomic_cmpxchg_acquire
-#define arch_atomic_cmpxchg_release atomic_cmpxchg_release
-#define arch_atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
-
-#ifndef atomic_cmpxchg_relaxed
-#define atomic_cmpxchg_acquire atomic_cmpxchg
-#define atomic_cmpxchg_release atomic_cmpxchg
-#define atomic_cmpxchg_relaxed atomic_cmpxchg
-#else /* atomic_cmpxchg_relaxed */
-
-#ifndef atomic_cmpxchg_acquire
-static __always_inline int
-atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
-{
- int ret = atomic_cmpxchg_relaxed(v, old, new);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
-#endif
-
-#ifndef atomic_cmpxchg_release
-static __always_inline int
-atomic_cmpxchg_release(atomic_t *v, int old, int new)
-{
- __atomic_release_fence();
- return atomic_cmpxchg_relaxed(v, old, new);
-}
-#define atomic_cmpxchg_release atomic_cmpxchg_release
-#endif
-
-#ifndef atomic_cmpxchg
-static __always_inline int
-atomic_cmpxchg(atomic_t *v, int old, int new)
-{
- int ret;
- __atomic_pre_full_fence();
- ret = atomic_cmpxchg_relaxed(v, old, new);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_cmpxchg atomic_cmpxchg
-#endif
-
-#endif /* atomic_cmpxchg_relaxed */
-
-#define arch_atomic_try_cmpxchg atomic_try_cmpxchg
-#define arch_atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
-#define arch_atomic_try_cmpxchg_release atomic_try_cmpxchg_release
-#define arch_atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed
-
-#ifndef atomic_try_cmpxchg_relaxed
-#ifdef atomic_try_cmpxchg
-#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg
-#define atomic_try_cmpxchg_release atomic_try_cmpxchg
-#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg
-#endif /* atomic_try_cmpxchg */
-
-#ifndef atomic_try_cmpxchg
-static __always_inline bool
-atomic_try_cmpxchg(atomic_t *v, int *old, int new)
-{
- int r, o = *old;
- r = atomic_cmpxchg(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic_try_cmpxchg atomic_try_cmpxchg
-#endif
-
-#ifndef atomic_try_cmpxchg_acquire
-static __always_inline bool
-atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
-{
- int r, o = *old;
- r = atomic_cmpxchg_acquire(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
-#endif
-
-#ifndef atomic_try_cmpxchg_release
-static __always_inline bool
-atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
-{
- int r, o = *old;
- r = atomic_cmpxchg_release(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release
-#endif
-
-#ifndef atomic_try_cmpxchg_relaxed
-static __always_inline bool
-atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
-{
- int r, o = *old;
- r = atomic_cmpxchg_relaxed(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed
-#endif
-
-#else /* atomic_try_cmpxchg_relaxed */
-
-#ifndef atomic_try_cmpxchg_acquire
-static __always_inline bool
-atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
-{
- bool ret = atomic_try_cmpxchg_relaxed(v, old, new);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
-#endif
-
-#ifndef atomic_try_cmpxchg_release
-static __always_inline bool
-atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
-{
- __atomic_release_fence();
- return atomic_try_cmpxchg_relaxed(v, old, new);
-}
-#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release
-#endif
-
-#ifndef atomic_try_cmpxchg
-static __always_inline bool
-atomic_try_cmpxchg(atomic_t *v, int *old, int new)
-{
- bool ret;
- __atomic_pre_full_fence();
- ret = atomic_try_cmpxchg_relaxed(v, old, new);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic_try_cmpxchg atomic_try_cmpxchg
-#endif
-
-#endif /* atomic_try_cmpxchg_relaxed */
-
-#define arch_atomic_sub_and_test atomic_sub_and_test
-
-#ifndef atomic_sub_and_test
-/**
- * atomic_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-static __always_inline bool
-atomic_sub_and_test(int i, atomic_t *v)
-{
- return atomic_sub_return(i, v) == 0;
-}
-#define atomic_sub_and_test atomic_sub_and_test
-#endif
-
-#define arch_atomic_dec_and_test atomic_dec_and_test
-
-#ifndef atomic_dec_and_test
-/**
- * atomic_dec_and_test - decrement and test
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-static __always_inline bool
-atomic_dec_and_test(atomic_t *v)
-{
- return atomic_dec_return(v) == 0;
-}
-#define atomic_dec_and_test atomic_dec_and_test
-#endif
-
-#define arch_atomic_inc_and_test atomic_inc_and_test
-
-#ifndef atomic_inc_and_test
-/**
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-static __always_inline bool
-atomic_inc_and_test(atomic_t *v)
-{
- return atomic_inc_return(v) == 0;
-}
-#define atomic_inc_and_test atomic_inc_and_test
-#endif
-
-#define arch_atomic_add_negative atomic_add_negative
-
-#ifndef atomic_add_negative
-/**
- * atomic_add_negative - add and test if negative
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-static __always_inline bool
-atomic_add_negative(int i, atomic_t *v)
-{
- return atomic_add_return(i, v) < 0;
-}
-#define atomic_add_negative atomic_add_negative
-#endif
-
-#define arch_atomic_fetch_add_unless atomic_fetch_add_unless
-
-#ifndef atomic_fetch_add_unless
-/**
- * atomic_fetch_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns original value of @v
- */
-static __always_inline int
-atomic_fetch_add_unless(atomic_t *v, int a, int u)
-{
- int c = atomic_read(v);
-
- do {
- if (unlikely(c == u))
- break;
- } while (!atomic_try_cmpxchg(v, &c, c + a));
-
- return c;
-}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
-#endif
-
-#define arch_atomic_add_unless atomic_add_unless
-
-#ifndef atomic_add_unless
-/**
- * atomic_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, if @v was not already @u.
- * Returns true if the addition was done.
- */
-static __always_inline bool
-atomic_add_unless(atomic_t *v, int a, int u)
-{
- return atomic_fetch_add_unless(v, a, u) != u;
-}
-#define atomic_add_unless atomic_add_unless
-#endif
-
-#define arch_atomic_inc_not_zero atomic_inc_not_zero
-
-#ifndef atomic_inc_not_zero
-/**
- * atomic_inc_not_zero - increment unless the number is zero
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1, if @v is non-zero.
- * Returns true if the increment was done.
- */
-static __always_inline bool
-atomic_inc_not_zero(atomic_t *v)
-{
- return atomic_add_unless(v, 1, 0);
-}
-#define atomic_inc_not_zero atomic_inc_not_zero
-#endif
-
-#define arch_atomic_inc_unless_negative atomic_inc_unless_negative
-
-#ifndef atomic_inc_unless_negative
-static __always_inline bool
-atomic_inc_unless_negative(atomic_t *v)
-{
- int c = atomic_read(v);
-
- do {
- if (unlikely(c < 0))
- return false;
- } while (!atomic_try_cmpxchg(v, &c, c + 1));
-
- return true;
-}
-#define atomic_inc_unless_negative atomic_inc_unless_negative
-#endif
-
-#define arch_atomic_dec_unless_positive atomic_dec_unless_positive
-
-#ifndef atomic_dec_unless_positive
-static __always_inline bool
-atomic_dec_unless_positive(atomic_t *v)
-{
- int c = atomic_read(v);
-
- do {
- if (unlikely(c > 0))
- return false;
- } while (!atomic_try_cmpxchg(v, &c, c - 1));
-
- return true;
-}
-#define atomic_dec_unless_positive atomic_dec_unless_positive
-#endif
-
-#define arch_atomic_dec_if_positive atomic_dec_if_positive
-
-#ifndef atomic_dec_if_positive
-static __always_inline int
-atomic_dec_if_positive(atomic_t *v)
-{
- int dec, c = atomic_read(v);
-
- do {
- dec = c - 1;
- if (unlikely(dec < 0))
- break;
- } while (!atomic_try_cmpxchg(v, &c, dec));
-
- return dec;
-}
-#define atomic_dec_if_positive atomic_dec_if_positive
-#endif
-
-#ifdef CONFIG_GENERIC_ATOMIC64
-#include <asm-generic/atomic64.h>
-#endif
-
-#define arch_atomic64_read atomic64_read
-#define arch_atomic64_read_acquire atomic64_read_acquire
-
-#ifndef atomic64_read_acquire
-static __always_inline s64
-atomic64_read_acquire(const atomic64_t *v)
-{
- return smp_load_acquire(&(v)->counter);
-}
-#define atomic64_read_acquire atomic64_read_acquire
-#endif
-
-#define arch_atomic64_set atomic64_set
-#define arch_atomic64_set_release atomic64_set_release
-
-#ifndef atomic64_set_release
-static __always_inline void
-atomic64_set_release(atomic64_t *v, s64 i)
-{
- smp_store_release(&(v)->counter, i);
-}
-#define atomic64_set_release atomic64_set_release
-#endif
-
-#define arch_atomic64_add atomic64_add
-
-#define arch_atomic64_add_return atomic64_add_return
-#define arch_atomic64_add_return_acquire atomic64_add_return_acquire
-#define arch_atomic64_add_return_release atomic64_add_return_release
-#define arch_atomic64_add_return_relaxed atomic64_add_return_relaxed
-
-#ifndef atomic64_add_return_relaxed
-#define atomic64_add_return_acquire atomic64_add_return
-#define atomic64_add_return_release atomic64_add_return
-#define atomic64_add_return_relaxed atomic64_add_return
-#else /* atomic64_add_return_relaxed */
-
-#ifndef atomic64_add_return_acquire
-static __always_inline s64
-atomic64_add_return_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_add_return_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_add_return_acquire atomic64_add_return_acquire
-#endif
-
-#ifndef atomic64_add_return_release
-static __always_inline s64
-atomic64_add_return_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_add_return_relaxed(i, v);
-}
-#define atomic64_add_return_release atomic64_add_return_release
-#endif
-
-#ifndef atomic64_add_return
-static __always_inline s64
-atomic64_add_return(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_add_return_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_add_return atomic64_add_return
-#endif
-
-#endif /* atomic64_add_return_relaxed */
-
-#define arch_atomic64_fetch_add atomic64_fetch_add
-#define arch_atomic64_fetch_add_acquire atomic64_fetch_add_acquire
-#define arch_atomic64_fetch_add_release atomic64_fetch_add_release
-#define arch_atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-
-#ifndef atomic64_fetch_add_relaxed
-#define atomic64_fetch_add_acquire atomic64_fetch_add
-#define atomic64_fetch_add_release atomic64_fetch_add
-#define atomic64_fetch_add_relaxed atomic64_fetch_add
-#else /* atomic64_fetch_add_relaxed */
-
-#ifndef atomic64_fetch_add_acquire
-static __always_inline s64
-atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_fetch_add_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire
-#endif
-
-#ifndef atomic64_fetch_add_release
-static __always_inline s64
-atomic64_fetch_add_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_add_relaxed(i, v);
-}
-#define atomic64_fetch_add_release atomic64_fetch_add_release
-#endif
-
-#ifndef atomic64_fetch_add
-static __always_inline s64
-atomic64_fetch_add(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_add_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_add atomic64_fetch_add
-#endif
-
-#endif /* atomic64_fetch_add_relaxed */
-
-#define arch_atomic64_sub atomic64_sub
-
-#define arch_atomic64_sub_return atomic64_sub_return
-#define arch_atomic64_sub_return_acquire atomic64_sub_return_acquire
-#define arch_atomic64_sub_return_release atomic64_sub_return_release
-#define arch_atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-
-#ifndef atomic64_sub_return_relaxed
-#define atomic64_sub_return_acquire atomic64_sub_return
-#define atomic64_sub_return_release atomic64_sub_return
-#define atomic64_sub_return_relaxed atomic64_sub_return
-#else /* atomic64_sub_return_relaxed */
-
-#ifndef atomic64_sub_return_acquire
-static __always_inline s64
-atomic64_sub_return_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_sub_return_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_sub_return_acquire atomic64_sub_return_acquire
-#endif
-
-#ifndef atomic64_sub_return_release
-static __always_inline s64
-atomic64_sub_return_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_sub_return_relaxed(i, v);
-}
-#define atomic64_sub_return_release atomic64_sub_return_release
-#endif
-
-#ifndef atomic64_sub_return
-static __always_inline s64
-atomic64_sub_return(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_sub_return_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_sub_return atomic64_sub_return
-#endif
-
-#endif /* atomic64_sub_return_relaxed */
-
-#define arch_atomic64_fetch_sub atomic64_fetch_sub
-#define arch_atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
-#define arch_atomic64_fetch_sub_release atomic64_fetch_sub_release
-#define arch_atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
-
-#ifndef atomic64_fetch_sub_relaxed
-#define atomic64_fetch_sub_acquire atomic64_fetch_sub
-#define atomic64_fetch_sub_release atomic64_fetch_sub
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub
-#else /* atomic64_fetch_sub_relaxed */
-
-#ifndef atomic64_fetch_sub_acquire
-static __always_inline s64
-atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_fetch_sub_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
-#endif
-
-#ifndef atomic64_fetch_sub_release
-static __always_inline s64
-atomic64_fetch_sub_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_sub_relaxed(i, v);
-}
-#define atomic64_fetch_sub_release atomic64_fetch_sub_release
-#endif
-
-#ifndef atomic64_fetch_sub
-static __always_inline s64
-atomic64_fetch_sub(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_sub_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_sub atomic64_fetch_sub
-#endif
-
-#endif /* atomic64_fetch_sub_relaxed */
-
-#define arch_atomic64_inc atomic64_inc
-
-#ifndef atomic64_inc
-static __always_inline void
-atomic64_inc(atomic64_t *v)
-{
- atomic64_add(1, v);
-}
-#define atomic64_inc atomic64_inc
-#endif
-
-#define arch_atomic64_inc_return atomic64_inc_return
-#define arch_atomic64_inc_return_acquire atomic64_inc_return_acquire
-#define arch_atomic64_inc_return_release atomic64_inc_return_release
-#define arch_atomic64_inc_return_relaxed atomic64_inc_return_relaxed
-
-#ifndef atomic64_inc_return_relaxed
-#ifdef atomic64_inc_return
-#define atomic64_inc_return_acquire atomic64_inc_return
-#define atomic64_inc_return_release atomic64_inc_return
-#define atomic64_inc_return_relaxed atomic64_inc_return
-#endif /* atomic64_inc_return */
-
-#ifndef atomic64_inc_return
-static __always_inline s64
-atomic64_inc_return(atomic64_t *v)
-{
- return atomic64_add_return(1, v);
-}
-#define atomic64_inc_return atomic64_inc_return
-#endif
-
-#ifndef atomic64_inc_return_acquire
-static __always_inline s64
-atomic64_inc_return_acquire(atomic64_t *v)
-{
- return atomic64_add_return_acquire(1, v);
-}
-#define atomic64_inc_return_acquire atomic64_inc_return_acquire
-#endif
-
-#ifndef atomic64_inc_return_release
-static __always_inline s64
-atomic64_inc_return_release(atomic64_t *v)
-{
- return atomic64_add_return_release(1, v);
-}
-#define atomic64_inc_return_release atomic64_inc_return_release
-#endif
-
-#ifndef atomic64_inc_return_relaxed
-static __always_inline s64
-atomic64_inc_return_relaxed(atomic64_t *v)
-{
- return atomic64_add_return_relaxed(1, v);
-}
-#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
-#endif
-
-#else /* atomic64_inc_return_relaxed */
-
-#ifndef atomic64_inc_return_acquire
-static __always_inline s64
-atomic64_inc_return_acquire(atomic64_t *v)
-{
- s64 ret = atomic64_inc_return_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_inc_return_acquire atomic64_inc_return_acquire
-#endif
-
-#ifndef atomic64_inc_return_release
-static __always_inline s64
-atomic64_inc_return_release(atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_inc_return_relaxed(v);
-}
-#define atomic64_inc_return_release atomic64_inc_return_release
-#endif
-
-#ifndef atomic64_inc_return
-static __always_inline s64
-atomic64_inc_return(atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_inc_return_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_inc_return atomic64_inc_return
-#endif
-
-#endif /* atomic64_inc_return_relaxed */
-
-#define arch_atomic64_fetch_inc atomic64_fetch_inc
-#define arch_atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
-#define arch_atomic64_fetch_inc_release atomic64_fetch_inc_release
-#define arch_atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
-
-#ifndef atomic64_fetch_inc_relaxed
-#ifdef atomic64_fetch_inc
-#define atomic64_fetch_inc_acquire atomic64_fetch_inc
-#define atomic64_fetch_inc_release atomic64_fetch_inc
-#define atomic64_fetch_inc_relaxed atomic64_fetch_inc
-#endif /* atomic64_fetch_inc */
-
-#ifndef atomic64_fetch_inc
-static __always_inline s64
-atomic64_fetch_inc(atomic64_t *v)
-{
- return atomic64_fetch_add(1, v);
-}
-#define atomic64_fetch_inc atomic64_fetch_inc
-#endif
-
-#ifndef atomic64_fetch_inc_acquire
-static __always_inline s64
-atomic64_fetch_inc_acquire(atomic64_t *v)
-{
- return atomic64_fetch_add_acquire(1, v);
-}
-#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
-#endif
-
-#ifndef atomic64_fetch_inc_release
-static __always_inline s64
-atomic64_fetch_inc_release(atomic64_t *v)
-{
- return atomic64_fetch_add_release(1, v);
-}
-#define atomic64_fetch_inc_release atomic64_fetch_inc_release
-#endif
-
-#ifndef atomic64_fetch_inc_relaxed
-static __always_inline s64
-atomic64_fetch_inc_relaxed(atomic64_t *v)
-{
- return atomic64_fetch_add_relaxed(1, v);
-}
-#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
-#endif
-
-#else /* atomic64_fetch_inc_relaxed */
-
-#ifndef atomic64_fetch_inc_acquire
-static __always_inline s64
-atomic64_fetch_inc_acquire(atomic64_t *v)
-{
- s64 ret = atomic64_fetch_inc_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
-#endif
-
-#ifndef atomic64_fetch_inc_release
-static __always_inline s64
-atomic64_fetch_inc_release(atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_inc_relaxed(v);
-}
-#define atomic64_fetch_inc_release atomic64_fetch_inc_release
-#endif
-
-#ifndef atomic64_fetch_inc
-static __always_inline s64
-atomic64_fetch_inc(atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_inc_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_inc atomic64_fetch_inc
-#endif
-
-#endif /* atomic64_fetch_inc_relaxed */
-
-#define arch_atomic64_dec atomic64_dec
-
-#ifndef atomic64_dec
-static __always_inline void
-atomic64_dec(atomic64_t *v)
-{
- atomic64_sub(1, v);
-}
-#define atomic64_dec atomic64_dec
-#endif
-
-#define arch_atomic64_dec_return atomic64_dec_return
-#define arch_atomic64_dec_return_acquire atomic64_dec_return_acquire
-#define arch_atomic64_dec_return_release atomic64_dec_return_release
-#define arch_atomic64_dec_return_relaxed atomic64_dec_return_relaxed
-
-#ifndef atomic64_dec_return_relaxed
-#ifdef atomic64_dec_return
-#define atomic64_dec_return_acquire atomic64_dec_return
-#define atomic64_dec_return_release atomic64_dec_return
-#define atomic64_dec_return_relaxed atomic64_dec_return
-#endif /* atomic64_dec_return */
-
-#ifndef atomic64_dec_return
-static __always_inline s64
-atomic64_dec_return(atomic64_t *v)
-{
- return atomic64_sub_return(1, v);
-}
-#define atomic64_dec_return atomic64_dec_return
-#endif
-
-#ifndef atomic64_dec_return_acquire
-static __always_inline s64
-atomic64_dec_return_acquire(atomic64_t *v)
-{
- return atomic64_sub_return_acquire(1, v);
-}
-#define atomic64_dec_return_acquire atomic64_dec_return_acquire
-#endif
-
-#ifndef atomic64_dec_return_release
-static __always_inline s64
-atomic64_dec_return_release(atomic64_t *v)
-{
- return atomic64_sub_return_release(1, v);
-}
-#define atomic64_dec_return_release atomic64_dec_return_release
-#endif
-
-#ifndef atomic64_dec_return_relaxed
-static __always_inline s64
-atomic64_dec_return_relaxed(atomic64_t *v)
-{
- return atomic64_sub_return_relaxed(1, v);
-}
-#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
-#endif
-
-#else /* atomic64_dec_return_relaxed */
-
-#ifndef atomic64_dec_return_acquire
-static __always_inline s64
-atomic64_dec_return_acquire(atomic64_t *v)
-{
- s64 ret = atomic64_dec_return_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_dec_return_acquire atomic64_dec_return_acquire
-#endif
-
-#ifndef atomic64_dec_return_release
-static __always_inline s64
-atomic64_dec_return_release(atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_dec_return_relaxed(v);
-}
-#define atomic64_dec_return_release atomic64_dec_return_release
-#endif
-
-#ifndef atomic64_dec_return
-static __always_inline s64
-atomic64_dec_return(atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_dec_return_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_dec_return atomic64_dec_return
-#endif
-
-#endif /* atomic64_dec_return_relaxed */
-
-#define arch_atomic64_fetch_dec atomic64_fetch_dec
-#define arch_atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
-#define arch_atomic64_fetch_dec_release atomic64_fetch_dec_release
-#define arch_atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
-
-#ifndef atomic64_fetch_dec_relaxed
-#ifdef atomic64_fetch_dec
-#define atomic64_fetch_dec_acquire atomic64_fetch_dec
-#define atomic64_fetch_dec_release atomic64_fetch_dec
-#define atomic64_fetch_dec_relaxed atomic64_fetch_dec
-#endif /* atomic64_fetch_dec */
-
-#ifndef atomic64_fetch_dec
-static __always_inline s64
-atomic64_fetch_dec(atomic64_t *v)
-{
- return atomic64_fetch_sub(1, v);
-}
-#define atomic64_fetch_dec atomic64_fetch_dec
-#endif
-
-#ifndef atomic64_fetch_dec_acquire
-static __always_inline s64
-atomic64_fetch_dec_acquire(atomic64_t *v)
-{
- return atomic64_fetch_sub_acquire(1, v);
-}
-#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
-#endif
-
-#ifndef atomic64_fetch_dec_release
-static __always_inline s64
-atomic64_fetch_dec_release(atomic64_t *v)
-{
- return atomic64_fetch_sub_release(1, v);
-}
-#define atomic64_fetch_dec_release atomic64_fetch_dec_release
-#endif
-
-#ifndef atomic64_fetch_dec_relaxed
-static __always_inline s64
-atomic64_fetch_dec_relaxed(atomic64_t *v)
-{
- return atomic64_fetch_sub_relaxed(1, v);
-}
-#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
-#endif
-
-#else /* atomic64_fetch_dec_relaxed */
-
-#ifndef atomic64_fetch_dec_acquire
-static __always_inline s64
-atomic64_fetch_dec_acquire(atomic64_t *v)
-{
- s64 ret = atomic64_fetch_dec_relaxed(v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
-#endif
-
-#ifndef atomic64_fetch_dec_release
-static __always_inline s64
-atomic64_fetch_dec_release(atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_dec_relaxed(v);
-}
-#define atomic64_fetch_dec_release atomic64_fetch_dec_release
-#endif
-
-#ifndef atomic64_fetch_dec
-static __always_inline s64
-atomic64_fetch_dec(atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_dec_relaxed(v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_dec atomic64_fetch_dec
-#endif
-
-#endif /* atomic64_fetch_dec_relaxed */
-
-#define arch_atomic64_and atomic64_and
-
-#define arch_atomic64_fetch_and atomic64_fetch_and
-#define arch_atomic64_fetch_and_acquire atomic64_fetch_and_acquire
-#define arch_atomic64_fetch_and_release atomic64_fetch_and_release
-#define arch_atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-
-#ifndef atomic64_fetch_and_relaxed
-#define atomic64_fetch_and_acquire atomic64_fetch_and
-#define atomic64_fetch_and_release atomic64_fetch_and
-#define atomic64_fetch_and_relaxed atomic64_fetch_and
-#else /* atomic64_fetch_and_relaxed */
-
-#ifndef atomic64_fetch_and_acquire
-static __always_inline s64
-atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_fetch_and_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire
-#endif
-
-#ifndef atomic64_fetch_and_release
-static __always_inline s64
-atomic64_fetch_and_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_and_relaxed(i, v);
-}
-#define atomic64_fetch_and_release atomic64_fetch_and_release
-#endif
-
-#ifndef atomic64_fetch_and
-static __always_inline s64
-atomic64_fetch_and(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_and_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_and atomic64_fetch_and
-#endif
-
-#endif /* atomic64_fetch_and_relaxed */
-
-#define arch_atomic64_andnot atomic64_andnot
-
-#ifndef atomic64_andnot
-static __always_inline void
-atomic64_andnot(s64 i, atomic64_t *v)
-{
- atomic64_and(~i, v);
-}
-#define atomic64_andnot atomic64_andnot
-#endif
-
-#define arch_atomic64_fetch_andnot atomic64_fetch_andnot
-#define arch_atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
-#define arch_atomic64_fetch_andnot_release atomic64_fetch_andnot_release
-#define arch_atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
-
-#ifndef atomic64_fetch_andnot_relaxed
-#ifdef atomic64_fetch_andnot
-#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot
-#define atomic64_fetch_andnot_release atomic64_fetch_andnot
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot
-#endif /* atomic64_fetch_andnot */
-
-#ifndef atomic64_fetch_andnot
-static __always_inline s64
-atomic64_fetch_andnot(s64 i, atomic64_t *v)
-{
- return atomic64_fetch_and(~i, v);
-}
-#define atomic64_fetch_andnot atomic64_fetch_andnot
-#endif
-
-#ifndef atomic64_fetch_andnot_acquire
-static __always_inline s64
-atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
-{
- return atomic64_fetch_and_acquire(~i, v);
-}
-#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
-#endif
-
-#ifndef atomic64_fetch_andnot_release
-static __always_inline s64
-atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
-{
- return atomic64_fetch_and_release(~i, v);
-}
-#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
-#endif
-
-#ifndef atomic64_fetch_andnot_relaxed
-static __always_inline s64
-atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
-{
- return atomic64_fetch_and_relaxed(~i, v);
-}
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
-#endif
-
-#else /* atomic64_fetch_andnot_relaxed */
-
-#ifndef atomic64_fetch_andnot_acquire
-static __always_inline s64
-atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_fetch_andnot_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
-#endif
-
-#ifndef atomic64_fetch_andnot_release
-static __always_inline s64
-atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_andnot_relaxed(i, v);
-}
-#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
-#endif
-
-#ifndef atomic64_fetch_andnot
-static __always_inline s64
-atomic64_fetch_andnot(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_andnot_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_andnot atomic64_fetch_andnot
-#endif
-
-#endif /* atomic64_fetch_andnot_relaxed */
-
-#define arch_atomic64_or atomic64_or
-
-#define arch_atomic64_fetch_or atomic64_fetch_or
-#define arch_atomic64_fetch_or_acquire atomic64_fetch_or_acquire
-#define arch_atomic64_fetch_or_release atomic64_fetch_or_release
-#define arch_atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-
-#ifndef atomic64_fetch_or_relaxed
-#define atomic64_fetch_or_acquire atomic64_fetch_or
-#define atomic64_fetch_or_release atomic64_fetch_or
-#define atomic64_fetch_or_relaxed atomic64_fetch_or
-#else /* atomic64_fetch_or_relaxed */
-
-#ifndef atomic64_fetch_or_acquire
-static __always_inline s64
-atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_fetch_or_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire
-#endif
-
-#ifndef atomic64_fetch_or_release
-static __always_inline s64
-atomic64_fetch_or_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_or_relaxed(i, v);
-}
-#define atomic64_fetch_or_release atomic64_fetch_or_release
-#endif
-
-#ifndef atomic64_fetch_or
-static __always_inline s64
-atomic64_fetch_or(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_or_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_or atomic64_fetch_or
-#endif
-
-#endif /* atomic64_fetch_or_relaxed */
-
-#define arch_atomic64_xor atomic64_xor
-
-#define arch_atomic64_fetch_xor atomic64_fetch_xor
-#define arch_atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
-#define arch_atomic64_fetch_xor_release atomic64_fetch_xor_release
-#define arch_atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
-
-#ifndef atomic64_fetch_xor_relaxed
-#define atomic64_fetch_xor_acquire atomic64_fetch_xor
-#define atomic64_fetch_xor_release atomic64_fetch_xor
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor
-#else /* atomic64_fetch_xor_relaxed */
-
-#ifndef atomic64_fetch_xor_acquire
-static __always_inline s64
-atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
-{
- s64 ret = atomic64_fetch_xor_relaxed(i, v);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
-#endif
-
-#ifndef atomic64_fetch_xor_release
-static __always_inline s64
-atomic64_fetch_xor_release(s64 i, atomic64_t *v)
-{
- __atomic_release_fence();
- return atomic64_fetch_xor_relaxed(i, v);
-}
-#define atomic64_fetch_xor_release atomic64_fetch_xor_release
-#endif
-
-#ifndef atomic64_fetch_xor
-static __always_inline s64
-atomic64_fetch_xor(s64 i, atomic64_t *v)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_fetch_xor_relaxed(i, v);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_fetch_xor atomic64_fetch_xor
-#endif
-
-#endif /* atomic64_fetch_xor_relaxed */
-
-#define arch_atomic64_xchg atomic64_xchg
-#define arch_atomic64_xchg_acquire atomic64_xchg_acquire
-#define arch_atomic64_xchg_release atomic64_xchg_release
-#define arch_atomic64_xchg_relaxed atomic64_xchg_relaxed
-
-#ifndef atomic64_xchg_relaxed
-#define atomic64_xchg_acquire atomic64_xchg
-#define atomic64_xchg_release atomic64_xchg
-#define atomic64_xchg_relaxed atomic64_xchg
-#else /* atomic64_xchg_relaxed */
-
-#ifndef atomic64_xchg_acquire
-static __always_inline s64
-atomic64_xchg_acquire(atomic64_t *v, s64 i)
-{
- s64 ret = atomic64_xchg_relaxed(v, i);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_xchg_acquire atomic64_xchg_acquire
-#endif
-
-#ifndef atomic64_xchg_release
-static __always_inline s64
-atomic64_xchg_release(atomic64_t *v, s64 i)
-{
- __atomic_release_fence();
- return atomic64_xchg_relaxed(v, i);
-}
-#define atomic64_xchg_release atomic64_xchg_release
-#endif
-
-#ifndef atomic64_xchg
-static __always_inline s64
-atomic64_xchg(atomic64_t *v, s64 i)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_xchg_relaxed(v, i);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_xchg atomic64_xchg
-#endif
-
-#endif /* atomic64_xchg_relaxed */
-
-#define arch_atomic64_cmpxchg atomic64_cmpxchg
-#define arch_atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire
-#define arch_atomic64_cmpxchg_release atomic64_cmpxchg_release
-#define arch_atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
-
-#ifndef atomic64_cmpxchg_relaxed
-#define atomic64_cmpxchg_acquire atomic64_cmpxchg
-#define atomic64_cmpxchg_release atomic64_cmpxchg
-#define atomic64_cmpxchg_relaxed atomic64_cmpxchg
-#else /* atomic64_cmpxchg_relaxed */
-
-#ifndef atomic64_cmpxchg_acquire
-static __always_inline s64
-atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
-{
- s64 ret = atomic64_cmpxchg_relaxed(v, old, new);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire
-#endif
-
-#ifndef atomic64_cmpxchg_release
-static __always_inline s64
-atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
-{
- __atomic_release_fence();
- return atomic64_cmpxchg_relaxed(v, old, new);
-}
-#define atomic64_cmpxchg_release atomic64_cmpxchg_release
-#endif
-
-#ifndef atomic64_cmpxchg
-static __always_inline s64
-atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
-{
- s64 ret;
- __atomic_pre_full_fence();
- ret = atomic64_cmpxchg_relaxed(v, old, new);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_cmpxchg atomic64_cmpxchg
-#endif
-
-#endif /* atomic64_cmpxchg_relaxed */
-
-#define arch_atomic64_try_cmpxchg atomic64_try_cmpxchg
-#define arch_atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
-#define arch_atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
-#define arch_atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed
-
-#ifndef atomic64_try_cmpxchg_relaxed
-#ifdef atomic64_try_cmpxchg
-#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg
-#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg
-#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg
-#endif /* atomic64_try_cmpxchg */
-
-#ifndef atomic64_try_cmpxchg
-static __always_inline bool
-atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
-{
- s64 r, o = *old;
- r = atomic64_cmpxchg(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic64_try_cmpxchg atomic64_try_cmpxchg
-#endif
-
-#ifndef atomic64_try_cmpxchg_acquire
-static __always_inline bool
-atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
-{
- s64 r, o = *old;
- r = atomic64_cmpxchg_acquire(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
-#endif
-
-#ifndef atomic64_try_cmpxchg_release
-static __always_inline bool
-atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
-{
- s64 r, o = *old;
- r = atomic64_cmpxchg_release(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
-#endif
-
-#ifndef atomic64_try_cmpxchg_relaxed
-static __always_inline bool
-atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
-{
- s64 r, o = *old;
- r = atomic64_cmpxchg_relaxed(v, o, new);
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed
-#endif
-
-#else /* atomic64_try_cmpxchg_relaxed */
-
-#ifndef atomic64_try_cmpxchg_acquire
-static __always_inline bool
-atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
-{
- bool ret = atomic64_try_cmpxchg_relaxed(v, old, new);
- __atomic_acquire_fence();
- return ret;
-}
-#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
-#endif
-
-#ifndef atomic64_try_cmpxchg_release
-static __always_inline bool
-atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
-{
- __atomic_release_fence();
- return atomic64_try_cmpxchg_relaxed(v, old, new);
-}
-#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
-#endif
-
-#ifndef atomic64_try_cmpxchg
-static __always_inline bool
-atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
-{
- bool ret;
- __atomic_pre_full_fence();
- ret = atomic64_try_cmpxchg_relaxed(v, old, new);
- __atomic_post_full_fence();
- return ret;
-}
-#define atomic64_try_cmpxchg atomic64_try_cmpxchg
-#endif
-
-#endif /* atomic64_try_cmpxchg_relaxed */
-
-#define arch_atomic64_sub_and_test atomic64_sub_and_test
-
-#ifndef atomic64_sub_and_test
-/**
- * atomic64_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic64_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-static __always_inline bool
-atomic64_sub_and_test(s64 i, atomic64_t *v)
-{
- return atomic64_sub_return(i, v) == 0;
-}
-#define atomic64_sub_and_test atomic64_sub_and_test
-#endif
-
-#define arch_atomic64_dec_and_test atomic64_dec_and_test
-
-#ifndef atomic64_dec_and_test
-/**
- * atomic64_dec_and_test - decrement and test
- * @v: pointer of type atomic64_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-static __always_inline bool
-atomic64_dec_and_test(atomic64_t *v)
-{
- return atomic64_dec_return(v) == 0;
-}
-#define atomic64_dec_and_test atomic64_dec_and_test
-#endif
-
-#define arch_atomic64_inc_and_test atomic64_inc_and_test
-
-#ifndef atomic64_inc_and_test
-/**
- * atomic64_inc_and_test - increment and test
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-static __always_inline bool
-atomic64_inc_and_test(atomic64_t *v)
-{
- return atomic64_inc_return(v) == 0;
-}
-#define atomic64_inc_and_test atomic64_inc_and_test
-#endif
-
-#define arch_atomic64_add_negative atomic64_add_negative
-
-#ifndef atomic64_add_negative
-/**
- * atomic64_add_negative - add and test if negative
- * @i: integer value to add
- * @v: pointer of type atomic64_t
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-static __always_inline bool
-atomic64_add_negative(s64 i, atomic64_t *v)
-{
- return atomic64_add_return(i, v) < 0;
-}
-#define atomic64_add_negative atomic64_add_negative
-#endif
-
-#define arch_atomic64_fetch_add_unless atomic64_fetch_add_unless
-
-#ifndef atomic64_fetch_add_unless
-/**
- * atomic64_fetch_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns original value of @v
- */
-static __always_inline s64
-atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
-{
- s64 c = atomic64_read(v);
-
- do {
- if (unlikely(c == u))
- break;
- } while (!atomic64_try_cmpxchg(v, &c, c + a));
-
- return c;
-}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
-#endif
-
-#define arch_atomic64_add_unless atomic64_add_unless
-
-#ifndef atomic64_add_unless
-/**
- * atomic64_add_unless - add unless the number is already a given value
- * @v: pointer of type atomic64_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, if @v was not already @u.
- * Returns true if the addition was done.
- */
-static __always_inline bool
-atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
-{
- return atomic64_fetch_add_unless(v, a, u) != u;
-}
-#define atomic64_add_unless atomic64_add_unless
-#endif
-
-#define arch_atomic64_inc_not_zero atomic64_inc_not_zero
-
-#ifndef atomic64_inc_not_zero
-/**
- * atomic64_inc_not_zero - increment unless the number is zero
- * @v: pointer of type atomic64_t
- *
- * Atomically increments @v by 1, if @v is non-zero.
- * Returns true if the increment was done.
- */
-static __always_inline bool
-atomic64_inc_not_zero(atomic64_t *v)
-{
- return atomic64_add_unless(v, 1, 0);
-}
-#define atomic64_inc_not_zero atomic64_inc_not_zero
-#endif
-
-#define arch_atomic64_inc_unless_negative atomic64_inc_unless_negative
-
-#ifndef atomic64_inc_unless_negative
-static __always_inline bool
-atomic64_inc_unless_negative(atomic64_t *v)
-{
- s64 c = atomic64_read(v);
-
- do {
- if (unlikely(c < 0))
- return false;
- } while (!atomic64_try_cmpxchg(v, &c, c + 1));
-
- return true;
-}
-#define atomic64_inc_unless_negative atomic64_inc_unless_negative
-#endif
-
-#define arch_atomic64_dec_unless_positive atomic64_dec_unless_positive
-
-#ifndef atomic64_dec_unless_positive
-static __always_inline bool
-atomic64_dec_unless_positive(atomic64_t *v)
-{
- s64 c = atomic64_read(v);
-
- do {
- if (unlikely(c > 0))
- return false;
- } while (!atomic64_try_cmpxchg(v, &c, c - 1));
-
- return true;
-}
-#define atomic64_dec_unless_positive atomic64_dec_unless_positive
-#endif
-
-#define arch_atomic64_dec_if_positive atomic64_dec_if_positive
-
-#ifndef atomic64_dec_if_positive
-static __always_inline s64
-atomic64_dec_if_positive(atomic64_t *v)
-{
- s64 dec, c = atomic64_read(v);
-
- do {
- dec = c - 1;
- if (unlikely(dec < 0))
- break;
- } while (!atomic64_try_cmpxchg(v, &c, dec));
-
- return dec;
-}
-#define atomic64_dec_if_positive atomic64_dec_if_positive
-#endif
-
-#endif /* _LINUX_ATOMIC_FALLBACK_H */
-// d78e6c293c661c15188f0ec05bce45188c8d5892
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 571a11008ab5..8dd57c3a99e9 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -77,13 +77,8 @@
__ret; \
})
-#ifdef ARCH_ATOMIC
-#include <linux/atomic-arch-fallback.h>
-#include <asm-generic/atomic-instrumented.h>
-#else
-#include <linux/atomic-fallback.h>
-#endif
-
-#include <asm-generic/atomic-long.h>
+#include <linux/atomic/atomic-arch-fallback.h>
+#include <linux/atomic/atomic-long.h>
+#include <linux/atomic/atomic-instrumented.h>
#endif /* _LINUX_ATOMIC_H */
diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h
new file mode 100644
index 000000000000..2f9d36b72bd8
--- /dev/null
+++ b/include/linux/atomic/atomic-arch-fallback.h
@@ -0,0 +1,4693 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by scripts/atomic/gen-atomic-fallback.sh
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+#ifndef _LINUX_ATOMIC_FALLBACK_H
+#define _LINUX_ATOMIC_FALLBACK_H
+
+#include <linux/compiler.h>
+
+#if defined(arch_xchg)
+#define raw_xchg arch_xchg
+#elif defined(arch_xchg_relaxed)
+#define raw_xchg(...) \
+ __atomic_op_fence(arch_xchg, __VA_ARGS__)
+#else
+extern void raw_xchg_not_implemented(void);
+#define raw_xchg(...) raw_xchg_not_implemented()
+#endif
+
+#if defined(arch_xchg_acquire)
+#define raw_xchg_acquire arch_xchg_acquire
+#elif defined(arch_xchg_relaxed)
+#define raw_xchg_acquire(...) \
+ __atomic_op_acquire(arch_xchg, __VA_ARGS__)
+#elif defined(arch_xchg)
+#define raw_xchg_acquire arch_xchg
+#else
+extern void raw_xchg_acquire_not_implemented(void);
+#define raw_xchg_acquire(...) raw_xchg_acquire_not_implemented()
+#endif
+
+#if defined(arch_xchg_release)
+#define raw_xchg_release arch_xchg_release
+#elif defined(arch_xchg_relaxed)
+#define raw_xchg_release(...) \
+ __atomic_op_release(arch_xchg, __VA_ARGS__)
+#elif defined(arch_xchg)
+#define raw_xchg_release arch_xchg
+#else
+extern void raw_xchg_release_not_implemented(void);
+#define raw_xchg_release(...) raw_xchg_release_not_implemented()
+#endif
+
+#if defined(arch_xchg_relaxed)
+#define raw_xchg_relaxed arch_xchg_relaxed
+#elif defined(arch_xchg)
+#define raw_xchg_relaxed arch_xchg
+#else
+extern void raw_xchg_relaxed_not_implemented(void);
+#define raw_xchg_relaxed(...) raw_xchg_relaxed_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg)
+#define raw_cmpxchg arch_cmpxchg
+#elif defined(arch_cmpxchg_relaxed)
+#define raw_cmpxchg(...) \
+ __atomic_op_fence(arch_cmpxchg, __VA_ARGS__)
+#else
+extern void raw_cmpxchg_not_implemented(void);
+#define raw_cmpxchg(...) raw_cmpxchg_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg_acquire)
+#define raw_cmpxchg_acquire arch_cmpxchg_acquire
+#elif defined(arch_cmpxchg_relaxed)
+#define raw_cmpxchg_acquire(...) \
+ __atomic_op_acquire(arch_cmpxchg, __VA_ARGS__)
+#elif defined(arch_cmpxchg)
+#define raw_cmpxchg_acquire arch_cmpxchg
+#else
+extern void raw_cmpxchg_acquire_not_implemented(void);
+#define raw_cmpxchg_acquire(...) raw_cmpxchg_acquire_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg_release)
+#define raw_cmpxchg_release arch_cmpxchg_release
+#elif defined(arch_cmpxchg_relaxed)
+#define raw_cmpxchg_release(...) \
+ __atomic_op_release(arch_cmpxchg, __VA_ARGS__)
+#elif defined(arch_cmpxchg)
+#define raw_cmpxchg_release arch_cmpxchg
+#else
+extern void raw_cmpxchg_release_not_implemented(void);
+#define raw_cmpxchg_release(...) raw_cmpxchg_release_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg_relaxed)
+#define raw_cmpxchg_relaxed arch_cmpxchg_relaxed
+#elif defined(arch_cmpxchg)
+#define raw_cmpxchg_relaxed arch_cmpxchg
+#else
+extern void raw_cmpxchg_relaxed_not_implemented(void);
+#define raw_cmpxchg_relaxed(...) raw_cmpxchg_relaxed_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg64)
+#define raw_cmpxchg64 arch_cmpxchg64
+#elif defined(arch_cmpxchg64_relaxed)
+#define raw_cmpxchg64(...) \
+ __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__)
+#else
+extern void raw_cmpxchg64_not_implemented(void);
+#define raw_cmpxchg64(...) raw_cmpxchg64_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg64_acquire)
+#define raw_cmpxchg64_acquire arch_cmpxchg64_acquire
+#elif defined(arch_cmpxchg64_relaxed)
+#define raw_cmpxchg64_acquire(...) \
+ __atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__)
+#elif defined(arch_cmpxchg64)
+#define raw_cmpxchg64_acquire arch_cmpxchg64
+#else
+extern void raw_cmpxchg64_acquire_not_implemented(void);
+#define raw_cmpxchg64_acquire(...) raw_cmpxchg64_acquire_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg64_release)
+#define raw_cmpxchg64_release arch_cmpxchg64_release
+#elif defined(arch_cmpxchg64_relaxed)
+#define raw_cmpxchg64_release(...) \
+ __atomic_op_release(arch_cmpxchg64, __VA_ARGS__)
+#elif defined(arch_cmpxchg64)
+#define raw_cmpxchg64_release arch_cmpxchg64
+#else
+extern void raw_cmpxchg64_release_not_implemented(void);
+#define raw_cmpxchg64_release(...) raw_cmpxchg64_release_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg64_relaxed)
+#define raw_cmpxchg64_relaxed arch_cmpxchg64_relaxed
+#elif defined(arch_cmpxchg64)
+#define raw_cmpxchg64_relaxed arch_cmpxchg64
+#else
+extern void raw_cmpxchg64_relaxed_not_implemented(void);
+#define raw_cmpxchg64_relaxed(...) raw_cmpxchg64_relaxed_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg128)
+#define raw_cmpxchg128 arch_cmpxchg128
+#elif defined(arch_cmpxchg128_relaxed)
+#define raw_cmpxchg128(...) \
+ __atomic_op_fence(arch_cmpxchg128, __VA_ARGS__)
+#else
+extern void raw_cmpxchg128_not_implemented(void);
+#define raw_cmpxchg128(...) raw_cmpxchg128_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg128_acquire)
+#define raw_cmpxchg128_acquire arch_cmpxchg128_acquire
+#elif defined(arch_cmpxchg128_relaxed)
+#define raw_cmpxchg128_acquire(...) \
+ __atomic_op_acquire(arch_cmpxchg128, __VA_ARGS__)
+#elif defined(arch_cmpxchg128)
+#define raw_cmpxchg128_acquire arch_cmpxchg128
+#else
+extern void raw_cmpxchg128_acquire_not_implemented(void);
+#define raw_cmpxchg128_acquire(...) raw_cmpxchg128_acquire_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg128_release)
+#define raw_cmpxchg128_release arch_cmpxchg128_release
+#elif defined(arch_cmpxchg128_relaxed)
+#define raw_cmpxchg128_release(...) \
+ __atomic_op_release(arch_cmpxchg128, __VA_ARGS__)
+#elif defined(arch_cmpxchg128)
+#define raw_cmpxchg128_release arch_cmpxchg128
+#else
+extern void raw_cmpxchg128_release_not_implemented(void);
+#define raw_cmpxchg128_release(...) raw_cmpxchg128_release_not_implemented()
+#endif
+
+#if defined(arch_cmpxchg128_relaxed)
+#define raw_cmpxchg128_relaxed arch_cmpxchg128_relaxed
+#elif defined(arch_cmpxchg128)
+#define raw_cmpxchg128_relaxed arch_cmpxchg128
+#else
+extern void raw_cmpxchg128_relaxed_not_implemented(void);
+#define raw_cmpxchg128_relaxed(...) raw_cmpxchg128_relaxed_not_implemented()
+#endif
+
+#if defined(arch_try_cmpxchg)
+#define raw_try_cmpxchg arch_try_cmpxchg
+#elif defined(arch_try_cmpxchg_relaxed)
+#define raw_try_cmpxchg(...) \
+ __atomic_op_fence(arch_try_cmpxchg, __VA_ARGS__)
+#else
+#define raw_try_cmpxchg(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#if defined(arch_try_cmpxchg_acquire)
+#define raw_try_cmpxchg_acquire arch_try_cmpxchg_acquire
+#elif defined(arch_try_cmpxchg_relaxed)
+#define raw_try_cmpxchg_acquire(...) \
+ __atomic_op_acquire(arch_try_cmpxchg, __VA_ARGS__)
+#elif defined(arch_try_cmpxchg)
+#define raw_try_cmpxchg_acquire arch_try_cmpxchg
+#else
+#define raw_try_cmpxchg_acquire(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg_acquire((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#if defined(arch_try_cmpxchg_release)
+#define raw_try_cmpxchg_release arch_try_cmpxchg_release
+#elif defined(arch_try_cmpxchg_relaxed)
+#define raw_try_cmpxchg_release(...) \
+ __atomic_op_release(arch_try_cmpxchg, __VA_ARGS__)
+#elif defined(arch_try_cmpxchg)
+#define raw_try_cmpxchg_release arch_try_cmpxchg
+#else
+#define raw_try_cmpxchg_release(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg_release((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#if defined(arch_try_cmpxchg_relaxed)
+#define raw_try_cmpxchg_relaxed arch_try_cmpxchg_relaxed
+#elif defined(arch_try_cmpxchg)
+#define raw_try_cmpxchg_relaxed arch_try_cmpxchg
+#else
+#define raw_try_cmpxchg_relaxed(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg_relaxed((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#if defined(arch_try_cmpxchg64)
+#define raw_try_cmpxchg64 arch_try_cmpxchg64
+#elif defined(arch_try_cmpxchg64_relaxed)
+#define raw_try_cmpxchg64(...) \
+ __atomic_op_fence(arch_try_cmpxchg64, __VA_ARGS__)
+#else
+#define raw_try_cmpxchg64(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg64((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#if defined(arch_try_cmpxchg64_acquire)
+#define raw_try_cmpxchg64_acquire arch_try_cmpxchg64_acquire
+#elif defined(arch_try_cmpxchg64_relaxed)
+#define raw_try_cmpxchg64_acquire(...) \
+ __atomic_op_acquire(arch_try_cmpxchg64, __VA_ARGS__)
+#elif defined(arch_try_cmpxchg64)
+#define raw_try_cmpxchg64_acquire arch_try_cmpxchg64
+#else
+#define raw_try_cmpxchg64_acquire(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg64_acquire((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#if defined(arch_try_cmpxchg64_release)
+#define raw_try_cmpxchg64_release arch_try_cmpxchg64_release
+#elif defined(arch_try_cmpxchg64_relaxed)
+#define raw_try_cmpxchg64_release(...) \
+ __atomic_op_release(arch_try_cmpxchg64, __VA_ARGS__)
+#elif defined(arch_try_cmpxchg64)
+#define raw_try_cmpxchg64_release arch_try_cmpxchg64
+#else
+#define raw_try_cmpxchg64_release(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg64_release((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#if defined(arch_try_cmpxchg64_relaxed)
+#define raw_try_cmpxchg64_relaxed arch_try_cmpxchg64_relaxed
+#elif defined(arch_try_cmpxchg64)
+#define raw_try_cmpxchg64_relaxed arch_try_cmpxchg64
+#else
+#define raw_try_cmpxchg64_relaxed(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg64_relaxed((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#if defined(arch_try_cmpxchg128)
+#define raw_try_cmpxchg128 arch_try_cmpxchg128
+#elif defined(arch_try_cmpxchg128_relaxed)
+#define raw_try_cmpxchg128(...) \
+ __atomic_op_fence(arch_try_cmpxchg128, __VA_ARGS__)
+#else
+#define raw_try_cmpxchg128(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg128((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#if defined(arch_try_cmpxchg128_acquire)
+#define raw_try_cmpxchg128_acquire arch_try_cmpxchg128_acquire
+#elif defined(arch_try_cmpxchg128_relaxed)
+#define raw_try_cmpxchg128_acquire(...) \
+ __atomic_op_acquire(arch_try_cmpxchg128, __VA_ARGS__)
+#elif defined(arch_try_cmpxchg128)
+#define raw_try_cmpxchg128_acquire arch_try_cmpxchg128
+#else
+#define raw_try_cmpxchg128_acquire(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg128_acquire((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#if defined(arch_try_cmpxchg128_release)
+#define raw_try_cmpxchg128_release arch_try_cmpxchg128_release
+#elif defined(arch_try_cmpxchg128_relaxed)
+#define raw_try_cmpxchg128_release(...) \
+ __atomic_op_release(arch_try_cmpxchg128, __VA_ARGS__)
+#elif defined(arch_try_cmpxchg128)
+#define raw_try_cmpxchg128_release arch_try_cmpxchg128
+#else
+#define raw_try_cmpxchg128_release(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg128_release((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#if defined(arch_try_cmpxchg128_relaxed)
+#define raw_try_cmpxchg128_relaxed arch_try_cmpxchg128_relaxed
+#elif defined(arch_try_cmpxchg128)
+#define raw_try_cmpxchg128_relaxed arch_try_cmpxchg128
+#else
+#define raw_try_cmpxchg128_relaxed(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg128_relaxed((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#define raw_cmpxchg_local arch_cmpxchg_local
+
+#ifdef arch_try_cmpxchg_local
+#define raw_try_cmpxchg_local arch_try_cmpxchg_local
+#else
+#define raw_try_cmpxchg_local(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg_local((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#define raw_cmpxchg64_local arch_cmpxchg64_local
+
+#ifdef arch_try_cmpxchg64_local
+#define raw_try_cmpxchg64_local arch_try_cmpxchg64_local
+#else
+#define raw_try_cmpxchg64_local(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg64_local((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#define raw_cmpxchg128_local arch_cmpxchg128_local
+
+#ifdef arch_try_cmpxchg128_local
+#define raw_try_cmpxchg128_local arch_try_cmpxchg128_local
+#else
+#define raw_try_cmpxchg128_local(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_cmpxchg128_local((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+#define raw_sync_cmpxchg arch_sync_cmpxchg
+
+#ifdef arch_sync_try_cmpxchg
+#define raw_sync_try_cmpxchg arch_sync_try_cmpxchg
+#else
+#define raw_sync_try_cmpxchg(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_sync_cmpxchg((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
+/**
+ * raw_atomic_read() - atomic load with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically loads the value of @v with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_read() elsewhere.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline int
+raw_atomic_read(const atomic_t *v)
+{
+ return arch_atomic_read(v);
+}
+
+/**
+ * raw_atomic_read_acquire() - atomic load with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically loads the value of @v with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_read_acquire() elsewhere.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline int
+raw_atomic_read_acquire(const atomic_t *v)
+{
+#if defined(arch_atomic_read_acquire)
+ return arch_atomic_read_acquire(v);
+#else
+ int ret;
+
+ if (__native_word(atomic_t)) {
+ ret = smp_load_acquire(&(v)->counter);
+ } else {
+ ret = raw_atomic_read(v);
+ __atomic_acquire_fence();
+ }
+
+ return ret;
+#endif
+}
+
+/**
+ * raw_atomic_set() - atomic set with relaxed ordering
+ * @v: pointer to atomic_t
+ * @i: int value to assign
+ *
+ * Atomically sets @v to @i with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_set() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_set(atomic_t *v, int i)
+{
+ arch_atomic_set(v, i);
+}
+
+/**
+ * raw_atomic_set_release() - atomic set with release ordering
+ * @v: pointer to atomic_t
+ * @i: int value to assign
+ *
+ * Atomically sets @v to @i with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_set_release() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_set_release(atomic_t *v, int i)
+{
+#if defined(arch_atomic_set_release)
+ arch_atomic_set_release(v, i);
+#else
+ if (__native_word(atomic_t)) {
+ smp_store_release(&(v)->counter, i);
+ } else {
+ __atomic_release_fence();
+ raw_atomic_set(v, i);
+ }
+#endif
+}
+
+/**
+ * raw_atomic_add() - atomic add with relaxed ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_add(int i, atomic_t *v)
+{
+ arch_atomic_add(i, v);
+}
+
+/**
+ * raw_atomic_add_return() - atomic add with full ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_add_return(int i, atomic_t *v)
+{
+#if defined(arch_atomic_add_return)
+ return arch_atomic_add_return(i, v);
+#elif defined(arch_atomic_add_return_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_add_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic_add_return"
+#endif
+}
+
+/**
+ * raw_atomic_add_return_acquire() - atomic add with acquire ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_add_return_acquire(int i, atomic_t *v)
+{
+#if defined(arch_atomic_add_return_acquire)
+ return arch_atomic_add_return_acquire(i, v);
+#elif defined(arch_atomic_add_return_relaxed)
+ int ret = arch_atomic_add_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_add_return)
+ return arch_atomic_add_return(i, v);
+#else
+#error "Unable to define raw_atomic_add_return_acquire"
+#endif
+}
+
+/**
+ * raw_atomic_add_return_release() - atomic add with release ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_add_return_release(int i, atomic_t *v)
+{
+#if defined(arch_atomic_add_return_release)
+ return arch_atomic_add_return_release(i, v);
+#elif defined(arch_atomic_add_return_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_add_return_relaxed(i, v);
+#elif defined(arch_atomic_add_return)
+ return arch_atomic_add_return(i, v);
+#else
+#error "Unable to define raw_atomic_add_return_release"
+#endif
+}
+
+/**
+ * raw_atomic_add_return_relaxed() - atomic add with relaxed ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_add_return_relaxed(int i, atomic_t *v)
+{
+#if defined(arch_atomic_add_return_relaxed)
+ return arch_atomic_add_return_relaxed(i, v);
+#elif defined(arch_atomic_add_return)
+ return arch_atomic_add_return(i, v);
+#else
+#error "Unable to define raw_atomic_add_return_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_add() - atomic add with full ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_add() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_add(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_add)
+ return arch_atomic_fetch_add(i, v);
+#elif defined(arch_atomic_fetch_add_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_add_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic_fetch_add"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_add_acquire() - atomic add with acquire ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_add_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_add_acquire(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_add_acquire)
+ return arch_atomic_fetch_add_acquire(i, v);
+#elif defined(arch_atomic_fetch_add_relaxed)
+ int ret = arch_atomic_fetch_add_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_fetch_add)
+ return arch_atomic_fetch_add(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_add_acquire"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_add_release() - atomic add with release ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_add_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_add_release(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_add_release)
+ return arch_atomic_fetch_add_release(i, v);
+#elif defined(arch_atomic_fetch_add_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_fetch_add_relaxed(i, v);
+#elif defined(arch_atomic_fetch_add)
+ return arch_atomic_fetch_add(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_add_release"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_add_relaxed() - atomic add with relaxed ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_add_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_add_relaxed(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_add_relaxed)
+ return arch_atomic_fetch_add_relaxed(i, v);
+#elif defined(arch_atomic_fetch_add)
+ return arch_atomic_fetch_add(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_add_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic_sub() - atomic subtract with relaxed ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_sub() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_sub(int i, atomic_t *v)
+{
+ arch_atomic_sub(i, v);
+}
+
+/**
+ * raw_atomic_sub_return() - atomic subtract with full ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_sub_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_sub_return(int i, atomic_t *v)
+{
+#if defined(arch_atomic_sub_return)
+ return arch_atomic_sub_return(i, v);
+#elif defined(arch_atomic_sub_return_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_sub_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic_sub_return"
+#endif
+}
+
+/**
+ * raw_atomic_sub_return_acquire() - atomic subtract with acquire ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_sub_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_sub_return_acquire(int i, atomic_t *v)
+{
+#if defined(arch_atomic_sub_return_acquire)
+ return arch_atomic_sub_return_acquire(i, v);
+#elif defined(arch_atomic_sub_return_relaxed)
+ int ret = arch_atomic_sub_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_sub_return)
+ return arch_atomic_sub_return(i, v);
+#else
+#error "Unable to define raw_atomic_sub_return_acquire"
+#endif
+}
+
+/**
+ * raw_atomic_sub_return_release() - atomic subtract with release ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_sub_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_sub_return_release(int i, atomic_t *v)
+{
+#if defined(arch_atomic_sub_return_release)
+ return arch_atomic_sub_return_release(i, v);
+#elif defined(arch_atomic_sub_return_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_sub_return_relaxed(i, v);
+#elif defined(arch_atomic_sub_return)
+ return arch_atomic_sub_return(i, v);
+#else
+#error "Unable to define raw_atomic_sub_return_release"
+#endif
+}
+
+/**
+ * raw_atomic_sub_return_relaxed() - atomic subtract with relaxed ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_sub_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_sub_return_relaxed(int i, atomic_t *v)
+{
+#if defined(arch_atomic_sub_return_relaxed)
+ return arch_atomic_sub_return_relaxed(i, v);
+#elif defined(arch_atomic_sub_return)
+ return arch_atomic_sub_return(i, v);
+#else
+#error "Unable to define raw_atomic_sub_return_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_sub() - atomic subtract with full ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_sub() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_sub(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_sub)
+ return arch_atomic_fetch_sub(i, v);
+#elif defined(arch_atomic_fetch_sub_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_sub_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic_fetch_sub"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_sub_acquire() - atomic subtract with acquire ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_sub_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_sub_acquire(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_sub_acquire)
+ return arch_atomic_fetch_sub_acquire(i, v);
+#elif defined(arch_atomic_fetch_sub_relaxed)
+ int ret = arch_atomic_fetch_sub_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_fetch_sub)
+ return arch_atomic_fetch_sub(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_sub_acquire"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_sub_release() - atomic subtract with release ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_sub_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_sub_release(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_sub_release)
+ return arch_atomic_fetch_sub_release(i, v);
+#elif defined(arch_atomic_fetch_sub_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_fetch_sub_relaxed(i, v);
+#elif defined(arch_atomic_fetch_sub)
+ return arch_atomic_fetch_sub(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_sub_release"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_sub_relaxed() - atomic subtract with relaxed ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_sub_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_sub_relaxed(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_sub_relaxed)
+ return arch_atomic_fetch_sub_relaxed(i, v);
+#elif defined(arch_atomic_fetch_sub)
+ return arch_atomic_fetch_sub(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_sub_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic_inc() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_inc() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_inc(atomic_t *v)
+{
+#if defined(arch_atomic_inc)
+ arch_atomic_inc(v);
+#else
+ raw_atomic_add(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_inc_return() - atomic increment with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_inc_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_inc_return(atomic_t *v)
+{
+#if defined(arch_atomic_inc_return)
+ return arch_atomic_inc_return(v);
+#elif defined(arch_atomic_inc_return_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_inc_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic_add_return(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_inc_return_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_inc_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_inc_return_acquire(atomic_t *v)
+{
+#if defined(arch_atomic_inc_return_acquire)
+ return arch_atomic_inc_return_acquire(v);
+#elif defined(arch_atomic_inc_return_relaxed)
+ int ret = arch_atomic_inc_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_inc_return)
+ return arch_atomic_inc_return(v);
+#else
+ return raw_atomic_add_return_acquire(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_inc_return_release() - atomic increment with release ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_inc_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_inc_return_release(atomic_t *v)
+{
+#if defined(arch_atomic_inc_return_release)
+ return arch_atomic_inc_return_release(v);
+#elif defined(arch_atomic_inc_return_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_inc_return_relaxed(v);
+#elif defined(arch_atomic_inc_return)
+ return arch_atomic_inc_return(v);
+#else
+ return raw_atomic_add_return_release(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_inc_return_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_inc_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_inc_return_relaxed(atomic_t *v)
+{
+#if defined(arch_atomic_inc_return_relaxed)
+ return arch_atomic_inc_return_relaxed(v);
+#elif defined(arch_atomic_inc_return)
+ return arch_atomic_inc_return(v);
+#else
+ return raw_atomic_add_return_relaxed(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_inc() - atomic increment with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_inc() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_inc(atomic_t *v)
+{
+#if defined(arch_atomic_fetch_inc)
+ return arch_atomic_fetch_inc(v);
+#elif defined(arch_atomic_fetch_inc_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_inc_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic_fetch_add(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_inc_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_inc_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_inc_acquire(atomic_t *v)
+{
+#if defined(arch_atomic_fetch_inc_acquire)
+ return arch_atomic_fetch_inc_acquire(v);
+#elif defined(arch_atomic_fetch_inc_relaxed)
+ int ret = arch_atomic_fetch_inc_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_fetch_inc)
+ return arch_atomic_fetch_inc(v);
+#else
+ return raw_atomic_fetch_add_acquire(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_inc_release() - atomic increment with release ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_inc_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_inc_release(atomic_t *v)
+{
+#if defined(arch_atomic_fetch_inc_release)
+ return arch_atomic_fetch_inc_release(v);
+#elif defined(arch_atomic_fetch_inc_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_fetch_inc_relaxed(v);
+#elif defined(arch_atomic_fetch_inc)
+ return arch_atomic_fetch_inc(v);
+#else
+ return raw_atomic_fetch_add_release(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_inc_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_inc_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_inc_relaxed(atomic_t *v)
+{
+#if defined(arch_atomic_fetch_inc_relaxed)
+ return arch_atomic_fetch_inc_relaxed(v);
+#elif defined(arch_atomic_fetch_inc)
+ return arch_atomic_fetch_inc(v);
+#else
+ return raw_atomic_fetch_add_relaxed(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_dec() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_dec() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_dec(atomic_t *v)
+{
+#if defined(arch_atomic_dec)
+ arch_atomic_dec(v);
+#else
+ raw_atomic_sub(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_dec_return() - atomic decrement with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_dec_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_dec_return(atomic_t *v)
+{
+#if defined(arch_atomic_dec_return)
+ return arch_atomic_dec_return(v);
+#elif defined(arch_atomic_dec_return_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_dec_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic_sub_return(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_dec_return_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_dec_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_dec_return_acquire(atomic_t *v)
+{
+#if defined(arch_atomic_dec_return_acquire)
+ return arch_atomic_dec_return_acquire(v);
+#elif defined(arch_atomic_dec_return_relaxed)
+ int ret = arch_atomic_dec_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_dec_return)
+ return arch_atomic_dec_return(v);
+#else
+ return raw_atomic_sub_return_acquire(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_dec_return_release() - atomic decrement with release ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_dec_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_dec_return_release(atomic_t *v)
+{
+#if defined(arch_atomic_dec_return_release)
+ return arch_atomic_dec_return_release(v);
+#elif defined(arch_atomic_dec_return_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_dec_return_relaxed(v);
+#elif defined(arch_atomic_dec_return)
+ return arch_atomic_dec_return(v);
+#else
+ return raw_atomic_sub_return_release(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_dec_return_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_dec_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+raw_atomic_dec_return_relaxed(atomic_t *v)
+{
+#if defined(arch_atomic_dec_return_relaxed)
+ return arch_atomic_dec_return_relaxed(v);
+#elif defined(arch_atomic_dec_return)
+ return arch_atomic_dec_return(v);
+#else
+ return raw_atomic_sub_return_relaxed(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_dec() - atomic decrement with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_dec() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_dec(atomic_t *v)
+{
+#if defined(arch_atomic_fetch_dec)
+ return arch_atomic_fetch_dec(v);
+#elif defined(arch_atomic_fetch_dec_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_dec_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic_fetch_sub(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_dec_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_dec_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_dec_acquire(atomic_t *v)
+{
+#if defined(arch_atomic_fetch_dec_acquire)
+ return arch_atomic_fetch_dec_acquire(v);
+#elif defined(arch_atomic_fetch_dec_relaxed)
+ int ret = arch_atomic_fetch_dec_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_fetch_dec)
+ return arch_atomic_fetch_dec(v);
+#else
+ return raw_atomic_fetch_sub_acquire(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_dec_release() - atomic decrement with release ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_dec_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_dec_release(atomic_t *v)
+{
+#if defined(arch_atomic_fetch_dec_release)
+ return arch_atomic_fetch_dec_release(v);
+#elif defined(arch_atomic_fetch_dec_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_fetch_dec_relaxed(v);
+#elif defined(arch_atomic_fetch_dec)
+ return arch_atomic_fetch_dec(v);
+#else
+ return raw_atomic_fetch_sub_release(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_dec_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_dec_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_dec_relaxed(atomic_t *v)
+{
+#if defined(arch_atomic_fetch_dec_relaxed)
+ return arch_atomic_fetch_dec_relaxed(v);
+#elif defined(arch_atomic_fetch_dec)
+ return arch_atomic_fetch_dec(v);
+#else
+ return raw_atomic_fetch_sub_relaxed(1, v);
+#endif
+}
+
+/**
+ * raw_atomic_and() - atomic bitwise AND with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_and() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_and(int i, atomic_t *v)
+{
+ arch_atomic_and(i, v);
+}
+
+/**
+ * raw_atomic_fetch_and() - atomic bitwise AND with full ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_and() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_and(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_and)
+ return arch_atomic_fetch_and(i, v);
+#elif defined(arch_atomic_fetch_and_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_and_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic_fetch_and"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_and_acquire() - atomic bitwise AND with acquire ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_and_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_and_acquire(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_and_acquire)
+ return arch_atomic_fetch_and_acquire(i, v);
+#elif defined(arch_atomic_fetch_and_relaxed)
+ int ret = arch_atomic_fetch_and_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_fetch_and)
+ return arch_atomic_fetch_and(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_and_acquire"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_and_release() - atomic bitwise AND with release ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_and_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_and_release(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_and_release)
+ return arch_atomic_fetch_and_release(i, v);
+#elif defined(arch_atomic_fetch_and_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_fetch_and_relaxed(i, v);
+#elif defined(arch_atomic_fetch_and)
+ return arch_atomic_fetch_and(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_and_release"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_and_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_and_relaxed(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_and_relaxed)
+ return arch_atomic_fetch_and_relaxed(i, v);
+#elif defined(arch_atomic_fetch_and)
+ return arch_atomic_fetch_and(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_and_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic_andnot() - atomic bitwise AND NOT with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_andnot() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_andnot(int i, atomic_t *v)
+{
+#if defined(arch_atomic_andnot)
+ arch_atomic_andnot(i, v);
+#else
+ raw_atomic_and(~i, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_andnot() - atomic bitwise AND NOT with full ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_andnot() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_andnot(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_andnot)
+ return arch_atomic_fetch_andnot(i, v);
+#elif defined(arch_atomic_fetch_andnot_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_andnot_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic_fetch_and(~i, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_andnot_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_andnot_acquire(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_andnot_acquire)
+ return arch_atomic_fetch_andnot_acquire(i, v);
+#elif defined(arch_atomic_fetch_andnot_relaxed)
+ int ret = arch_atomic_fetch_andnot_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_fetch_andnot)
+ return arch_atomic_fetch_andnot(i, v);
+#else
+ return raw_atomic_fetch_and_acquire(~i, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_andnot_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_andnot_release(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_andnot_release)
+ return arch_atomic_fetch_andnot_release(i, v);
+#elif defined(arch_atomic_fetch_andnot_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_fetch_andnot_relaxed(i, v);
+#elif defined(arch_atomic_fetch_andnot)
+ return arch_atomic_fetch_andnot(i, v);
+#else
+ return raw_atomic_fetch_and_release(~i, v);
+#endif
+}
+
+/**
+ * raw_atomic_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_andnot_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_andnot_relaxed(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_andnot_relaxed)
+ return arch_atomic_fetch_andnot_relaxed(i, v);
+#elif defined(arch_atomic_fetch_andnot)
+ return arch_atomic_fetch_andnot(i, v);
+#else
+ return raw_atomic_fetch_and_relaxed(~i, v);
+#endif
+}
+
+/**
+ * raw_atomic_or() - atomic bitwise OR with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_or() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_or(int i, atomic_t *v)
+{
+ arch_atomic_or(i, v);
+}
+
+/**
+ * raw_atomic_fetch_or() - atomic bitwise OR with full ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_or() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_or(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_or)
+ return arch_atomic_fetch_or(i, v);
+#elif defined(arch_atomic_fetch_or_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_or_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic_fetch_or"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_or_acquire() - atomic bitwise OR with acquire ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_or_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_or_acquire(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_or_acquire)
+ return arch_atomic_fetch_or_acquire(i, v);
+#elif defined(arch_atomic_fetch_or_relaxed)
+ int ret = arch_atomic_fetch_or_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_fetch_or)
+ return arch_atomic_fetch_or(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_or_acquire"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_or_release() - atomic bitwise OR with release ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_or_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_or_release(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_or_release)
+ return arch_atomic_fetch_or_release(i, v);
+#elif defined(arch_atomic_fetch_or_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_fetch_or_relaxed(i, v);
+#elif defined(arch_atomic_fetch_or)
+ return arch_atomic_fetch_or(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_or_release"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_or_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_or_relaxed(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_or_relaxed)
+ return arch_atomic_fetch_or_relaxed(i, v);
+#elif defined(arch_atomic_fetch_or)
+ return arch_atomic_fetch_or(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_or_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic_xor() - atomic bitwise XOR with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_xor() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_xor(int i, atomic_t *v)
+{
+ arch_atomic_xor(i, v);
+}
+
+/**
+ * raw_atomic_fetch_xor() - atomic bitwise XOR with full ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_xor() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_xor(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_xor)
+ return arch_atomic_fetch_xor(i, v);
+#elif defined(arch_atomic_fetch_xor_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_fetch_xor_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic_fetch_xor"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_xor_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_xor_acquire(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_xor_acquire)
+ return arch_atomic_fetch_xor_acquire(i, v);
+#elif defined(arch_atomic_fetch_xor_relaxed)
+ int ret = arch_atomic_fetch_xor_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_fetch_xor)
+ return arch_atomic_fetch_xor(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_xor_acquire"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_xor_release() - atomic bitwise XOR with release ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_xor_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_xor_release(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_xor_release)
+ return arch_atomic_fetch_xor_release(i, v);
+#elif defined(arch_atomic_fetch_xor_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_fetch_xor_relaxed(i, v);
+#elif defined(arch_atomic_fetch_xor)
+ return arch_atomic_fetch_xor(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_xor_release"
+#endif
+}
+
+/**
+ * raw_atomic_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_xor_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_xor_relaxed(int i, atomic_t *v)
+{
+#if defined(arch_atomic_fetch_xor_relaxed)
+ return arch_atomic_fetch_xor_relaxed(i, v);
+#elif defined(arch_atomic_fetch_xor)
+ return arch_atomic_fetch_xor(i, v);
+#else
+#error "Unable to define raw_atomic_fetch_xor_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic_xchg() - atomic exchange with full ordering
+ * @v: pointer to atomic_t
+ * @new: int value to assign
+ *
+ * Atomically updates @v to @new with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_xchg() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_xchg(atomic_t *v, int new)
+{
+#if defined(arch_atomic_xchg)
+ return arch_atomic_xchg(v, new);
+#elif defined(arch_atomic_xchg_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_xchg_relaxed(v, new);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_xchg(&v->counter, new);
+#endif
+}
+
+/**
+ * raw_atomic_xchg_acquire() - atomic exchange with acquire ordering
+ * @v: pointer to atomic_t
+ * @new: int value to assign
+ *
+ * Atomically updates @v to @new with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_xchg_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_xchg_acquire(atomic_t *v, int new)
+{
+#if defined(arch_atomic_xchg_acquire)
+ return arch_atomic_xchg_acquire(v, new);
+#elif defined(arch_atomic_xchg_relaxed)
+ int ret = arch_atomic_xchg_relaxed(v, new);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_xchg)
+ return arch_atomic_xchg(v, new);
+#else
+ return raw_xchg_acquire(&v->counter, new);
+#endif
+}
+
+/**
+ * raw_atomic_xchg_release() - atomic exchange with release ordering
+ * @v: pointer to atomic_t
+ * @new: int value to assign
+ *
+ * Atomically updates @v to @new with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_xchg_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_xchg_release(atomic_t *v, int new)
+{
+#if defined(arch_atomic_xchg_release)
+ return arch_atomic_xchg_release(v, new);
+#elif defined(arch_atomic_xchg_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_xchg_relaxed(v, new);
+#elif defined(arch_atomic_xchg)
+ return arch_atomic_xchg(v, new);
+#else
+ return raw_xchg_release(&v->counter, new);
+#endif
+}
+
+/**
+ * raw_atomic_xchg_relaxed() - atomic exchange with relaxed ordering
+ * @v: pointer to atomic_t
+ * @new: int value to assign
+ *
+ * Atomically updates @v to @new with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_xchg_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_xchg_relaxed(atomic_t *v, int new)
+{
+#if defined(arch_atomic_xchg_relaxed)
+ return arch_atomic_xchg_relaxed(v, new);
+#elif defined(arch_atomic_xchg)
+ return arch_atomic_xchg(v, new);
+#else
+ return raw_xchg_relaxed(&v->counter, new);
+#endif
+}
+
+/**
+ * raw_atomic_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic_t
+ * @old: int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_cmpxchg() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+#if defined(arch_atomic_cmpxchg)
+ return arch_atomic_cmpxchg(v, old, new);
+#elif defined(arch_atomic_cmpxchg_relaxed)
+ int ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_cmpxchg(&v->counter, old, new);
+#endif
+}
+
+/**
+ * raw_atomic_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic_t
+ * @old: int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_cmpxchg_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
+{
+#if defined(arch_atomic_cmpxchg_acquire)
+ return arch_atomic_cmpxchg_acquire(v, old, new);
+#elif defined(arch_atomic_cmpxchg_relaxed)
+ int ret = arch_atomic_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_cmpxchg)
+ return arch_atomic_cmpxchg(v, old, new);
+#else
+ return raw_cmpxchg_acquire(&v->counter, old, new);
+#endif
+}
+
+/**
+ * raw_atomic_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic_t
+ * @old: int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_cmpxchg_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_cmpxchg_release(atomic_t *v, int old, int new)
+{
+#if defined(arch_atomic_cmpxchg_release)
+ return arch_atomic_cmpxchg_release(v, old, new);
+#elif defined(arch_atomic_cmpxchg_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_cmpxchg_relaxed(v, old, new);
+#elif defined(arch_atomic_cmpxchg)
+ return arch_atomic_cmpxchg(v, old, new);
+#else
+ return raw_cmpxchg_release(&v->counter, old, new);
+#endif
+}
+
+/**
+ * raw_atomic_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic_t
+ * @old: int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_cmpxchg_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
+{
+#if defined(arch_atomic_cmpxchg_relaxed)
+ return arch_atomic_cmpxchg_relaxed(v, old, new);
+#elif defined(arch_atomic_cmpxchg)
+ return arch_atomic_cmpxchg(v, old, new);
+#else
+ return raw_cmpxchg_relaxed(&v->counter, old, new);
+#endif
+}
+
+/**
+ * raw_atomic_try_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic_t
+ * @old: pointer to int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_try_cmpxchg() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+#if defined(arch_atomic_try_cmpxchg)
+ return arch_atomic_try_cmpxchg(v, old, new);
+#elif defined(arch_atomic_try_cmpxchg_relaxed)
+ bool ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ int r, o = *old;
+ r = raw_atomic_cmpxchg(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+#endif
+}
+
+/**
+ * raw_atomic_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic_t
+ * @old: pointer to int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_try_cmpxchg_acquire() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
+{
+#if defined(arch_atomic_try_cmpxchg_acquire)
+ return arch_atomic_try_cmpxchg_acquire(v, old, new);
+#elif defined(arch_atomic_try_cmpxchg_relaxed)
+ bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_try_cmpxchg)
+ return arch_atomic_try_cmpxchg(v, old, new);
+#else
+ int r, o = *old;
+ r = raw_atomic_cmpxchg_acquire(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+#endif
+}
+
+/**
+ * raw_atomic_try_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic_t
+ * @old: pointer to int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_try_cmpxchg_release() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
+{
+#if defined(arch_atomic_try_cmpxchg_release)
+ return arch_atomic_try_cmpxchg_release(v, old, new);
+#elif defined(arch_atomic_try_cmpxchg_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_try_cmpxchg_relaxed(v, old, new);
+#elif defined(arch_atomic_try_cmpxchg)
+ return arch_atomic_try_cmpxchg(v, old, new);
+#else
+ int r, o = *old;
+ r = raw_atomic_cmpxchg_release(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+#endif
+}
+
+/**
+ * raw_atomic_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic_t
+ * @old: pointer to int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_try_cmpxchg_relaxed() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
+{
+#if defined(arch_atomic_try_cmpxchg_relaxed)
+ return arch_atomic_try_cmpxchg_relaxed(v, old, new);
+#elif defined(arch_atomic_try_cmpxchg)
+ return arch_atomic_try_cmpxchg(v, old, new);
+#else
+ int r, o = *old;
+ r = raw_atomic_cmpxchg_relaxed(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+#endif
+}
+
+/**
+ * raw_atomic_sub_and_test() - atomic subtract and test if zero with full ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_sub_and_test() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_sub_and_test(int i, atomic_t *v)
+{
+#if defined(arch_atomic_sub_and_test)
+ return arch_atomic_sub_and_test(i, v);
+#else
+ return raw_atomic_sub_return(i, v) == 0;
+#endif
+}
+
+/**
+ * raw_atomic_dec_and_test() - atomic decrement and test if zero with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_dec_and_test() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_dec_and_test(atomic_t *v)
+{
+#if defined(arch_atomic_dec_and_test)
+ return arch_atomic_dec_and_test(v);
+#else
+ return raw_atomic_dec_return(v) == 0;
+#endif
+}
+
+/**
+ * raw_atomic_inc_and_test() - atomic increment and test if zero with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_inc_and_test() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_inc_and_test(atomic_t *v)
+{
+#if defined(arch_atomic_inc_and_test)
+ return arch_atomic_inc_and_test(v);
+#else
+ return raw_atomic_inc_return(v) == 0;
+#endif
+}
+
+/**
+ * raw_atomic_add_negative() - atomic add and test if negative with full ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add_negative() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_add_negative(int i, atomic_t *v)
+{
+#if defined(arch_atomic_add_negative)
+ return arch_atomic_add_negative(i, v);
+#elif defined(arch_atomic_add_negative_relaxed)
+ bool ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic_add_negative_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic_add_return(i, v) < 0;
+#endif
+}
+
+/**
+ * raw_atomic_add_negative_acquire() - atomic add and test if negative with acquire ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add_negative_acquire() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_add_negative_acquire(int i, atomic_t *v)
+{
+#if defined(arch_atomic_add_negative_acquire)
+ return arch_atomic_add_negative_acquire(i, v);
+#elif defined(arch_atomic_add_negative_relaxed)
+ bool ret = arch_atomic_add_negative_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic_add_negative)
+ return arch_atomic_add_negative(i, v);
+#else
+ return raw_atomic_add_return_acquire(i, v) < 0;
+#endif
+}
+
+/**
+ * raw_atomic_add_negative_release() - atomic add and test if negative with release ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add_negative_release() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_add_negative_release(int i, atomic_t *v)
+{
+#if defined(arch_atomic_add_negative_release)
+ return arch_atomic_add_negative_release(i, v);
+#elif defined(arch_atomic_add_negative_relaxed)
+ __atomic_release_fence();
+ return arch_atomic_add_negative_relaxed(i, v);
+#elif defined(arch_atomic_add_negative)
+ return arch_atomic_add_negative(i, v);
+#else
+ return raw_atomic_add_return_release(i, v) < 0;
+#endif
+}
+
+/**
+ * raw_atomic_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_add_negative_relaxed() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_add_negative_relaxed(int i, atomic_t *v)
+{
+#if defined(arch_atomic_add_negative_relaxed)
+ return arch_atomic_add_negative_relaxed(i, v);
+#elif defined(arch_atomic_add_negative)
+ return arch_atomic_add_negative(i, v);
+#else
+ return raw_atomic_add_return_relaxed(i, v) < 0;
+#endif
+}
+
+/**
+ * raw_atomic_fetch_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic_t
+ * @a: int value to add
+ * @u: int value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_fetch_add_unless() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+raw_atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+#if defined(arch_atomic_fetch_add_unless)
+ return arch_atomic_fetch_add_unless(v, a, u);
+#else
+ int c = raw_atomic_read(v);
+
+ do {
+ if (unlikely(c == u))
+ break;
+ } while (!raw_atomic_try_cmpxchg(v, &c, c + a));
+
+ return c;
+#endif
+}
+
+/**
+ * raw_atomic_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic_t
+ * @a: int value to add
+ * @u: int value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_add_unless() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_add_unless(atomic_t *v, int a, int u)
+{
+#if defined(arch_atomic_add_unless)
+ return arch_atomic_add_unless(v, a, u);
+#else
+ return raw_atomic_fetch_add_unless(v, a, u) != u;
+#endif
+}
+
+/**
+ * raw_atomic_inc_not_zero() - atomic increment unless zero with full ordering
+ * @v: pointer to atomic_t
+ *
+ * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_inc_not_zero() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_inc_not_zero(atomic_t *v)
+{
+#if defined(arch_atomic_inc_not_zero)
+ return arch_atomic_inc_not_zero(v);
+#else
+ return raw_atomic_add_unless(v, 1, 0);
+#endif
+}
+
+/**
+ * raw_atomic_inc_unless_negative() - atomic increment unless negative with full ordering
+ * @v: pointer to atomic_t
+ *
+ * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_inc_unless_negative() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_inc_unless_negative(atomic_t *v)
+{
+#if defined(arch_atomic_inc_unless_negative)
+ return arch_atomic_inc_unless_negative(v);
+#else
+ int c = raw_atomic_read(v);
+
+ do {
+ if (unlikely(c < 0))
+ return false;
+ } while (!raw_atomic_try_cmpxchg(v, &c, c + 1));
+
+ return true;
+#endif
+}
+
+/**
+ * raw_atomic_dec_unless_positive() - atomic decrement unless positive with full ordering
+ * @v: pointer to atomic_t
+ *
+ * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_dec_unless_positive() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_dec_unless_positive(atomic_t *v)
+{
+#if defined(arch_atomic_dec_unless_positive)
+ return arch_atomic_dec_unless_positive(v);
+#else
+ int c = raw_atomic_read(v);
+
+ do {
+ if (unlikely(c > 0))
+ return false;
+ } while (!raw_atomic_try_cmpxchg(v, &c, c - 1));
+
+ return true;
+#endif
+}
+
+/**
+ * raw_atomic_dec_if_positive() - atomic decrement if positive with full ordering
+ * @v: pointer to atomic_t
+ *
+ * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_dec_if_positive() elsewhere.
+ *
+ * Return: The old value of (@v - 1), regardless of whether @v was updated.
+ */
+static __always_inline int
+raw_atomic_dec_if_positive(atomic_t *v)
+{
+#if defined(arch_atomic_dec_if_positive)
+ return arch_atomic_dec_if_positive(v);
+#else
+ int dec, c = raw_atomic_read(v);
+
+ do {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ } while (!raw_atomic_try_cmpxchg(v, &c, dec));
+
+ return dec;
+#endif
+}
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+#include <asm-generic/atomic64.h>
+#endif
+
+/**
+ * raw_atomic64_read() - atomic load with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically loads the value of @v with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_read() elsewhere.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline s64
+raw_atomic64_read(const atomic64_t *v)
+{
+ return arch_atomic64_read(v);
+}
+
+/**
+ * raw_atomic64_read_acquire() - atomic load with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically loads the value of @v with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_read_acquire() elsewhere.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline s64
+raw_atomic64_read_acquire(const atomic64_t *v)
+{
+#if defined(arch_atomic64_read_acquire)
+ return arch_atomic64_read_acquire(v);
+#else
+ s64 ret;
+
+ if (__native_word(atomic64_t)) {
+ ret = smp_load_acquire(&(v)->counter);
+ } else {
+ ret = raw_atomic64_read(v);
+ __atomic_acquire_fence();
+ }
+
+ return ret;
+#endif
+}
+
+/**
+ * raw_atomic64_set() - atomic set with relaxed ordering
+ * @v: pointer to atomic64_t
+ * @i: s64 value to assign
+ *
+ * Atomically sets @v to @i with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_set() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_set(atomic64_t *v, s64 i)
+{
+ arch_atomic64_set(v, i);
+}
+
+/**
+ * raw_atomic64_set_release() - atomic set with release ordering
+ * @v: pointer to atomic64_t
+ * @i: s64 value to assign
+ *
+ * Atomically sets @v to @i with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_set_release() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_set_release(atomic64_t *v, s64 i)
+{
+#if defined(arch_atomic64_set_release)
+ arch_atomic64_set_release(v, i);
+#else
+ if (__native_word(atomic64_t)) {
+ smp_store_release(&(v)->counter, i);
+ } else {
+ __atomic_release_fence();
+ raw_atomic64_set(v, i);
+ }
+#endif
+}
+
+/**
+ * raw_atomic64_add() - atomic add with relaxed ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_add(s64 i, atomic64_t *v)
+{
+ arch_atomic64_add(i, v);
+}
+
+/**
+ * raw_atomic64_add_return() - atomic add with full ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_add_return(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_add_return)
+ return arch_atomic64_add_return(i, v);
+#elif defined(arch_atomic64_add_return_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_add_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic64_add_return"
+#endif
+}
+
+/**
+ * raw_atomic64_add_return_acquire() - atomic add with acquire ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_add_return_acquire(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_add_return_acquire)
+ return arch_atomic64_add_return_acquire(i, v);
+#elif defined(arch_atomic64_add_return_relaxed)
+ s64 ret = arch_atomic64_add_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_add_return)
+ return arch_atomic64_add_return(i, v);
+#else
+#error "Unable to define raw_atomic64_add_return_acquire"
+#endif
+}
+
+/**
+ * raw_atomic64_add_return_release() - atomic add with release ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_add_return_release(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_add_return_release)
+ return arch_atomic64_add_return_release(i, v);
+#elif defined(arch_atomic64_add_return_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_add_return_relaxed(i, v);
+#elif defined(arch_atomic64_add_return)
+ return arch_atomic64_add_return(i, v);
+#else
+#error "Unable to define raw_atomic64_add_return_release"
+#endif
+}
+
+/**
+ * raw_atomic64_add_return_relaxed() - atomic add with relaxed ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_add_return_relaxed(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_add_return_relaxed)
+ return arch_atomic64_add_return_relaxed(i, v);
+#elif defined(arch_atomic64_add_return)
+ return arch_atomic64_add_return(i, v);
+#else
+#error "Unable to define raw_atomic64_add_return_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_add() - atomic add with full ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_add() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_add(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_add)
+ return arch_atomic64_fetch_add(i, v);
+#elif defined(arch_atomic64_fetch_add_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_add_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic64_fetch_add"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_add_acquire() - atomic add with acquire ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_add_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_add_acquire)
+ return arch_atomic64_fetch_add_acquire(i, v);
+#elif defined(arch_atomic64_fetch_add_relaxed)
+ s64 ret = arch_atomic64_fetch_add_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_fetch_add)
+ return arch_atomic64_fetch_add(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_add_acquire"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_add_release() - atomic add with release ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_add_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_add_release(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_add_release)
+ return arch_atomic64_fetch_add_release(i, v);
+#elif defined(arch_atomic64_fetch_add_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_fetch_add_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_add)
+ return arch_atomic64_fetch_add(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_add_release"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_add_relaxed() - atomic add with relaxed ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_add_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_add_relaxed)
+ return arch_atomic64_fetch_add_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_add)
+ return arch_atomic64_fetch_add(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_add_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic64_sub() - atomic subtract with relaxed ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_sub() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_sub(s64 i, atomic64_t *v)
+{
+ arch_atomic64_sub(i, v);
+}
+
+/**
+ * raw_atomic64_sub_return() - atomic subtract with full ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_sub_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_sub_return(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_sub_return)
+ return arch_atomic64_sub_return(i, v);
+#elif defined(arch_atomic64_sub_return_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_sub_return_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic64_sub_return"
+#endif
+}
+
+/**
+ * raw_atomic64_sub_return_acquire() - atomic subtract with acquire ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_sub_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_sub_return_acquire(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_sub_return_acquire)
+ return arch_atomic64_sub_return_acquire(i, v);
+#elif defined(arch_atomic64_sub_return_relaxed)
+ s64 ret = arch_atomic64_sub_return_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_sub_return)
+ return arch_atomic64_sub_return(i, v);
+#else
+#error "Unable to define raw_atomic64_sub_return_acquire"
+#endif
+}
+
+/**
+ * raw_atomic64_sub_return_release() - atomic subtract with release ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_sub_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_sub_return_release(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_sub_return_release)
+ return arch_atomic64_sub_return_release(i, v);
+#elif defined(arch_atomic64_sub_return_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_sub_return_relaxed(i, v);
+#elif defined(arch_atomic64_sub_return)
+ return arch_atomic64_sub_return(i, v);
+#else
+#error "Unable to define raw_atomic64_sub_return_release"
+#endif
+}
+
+/**
+ * raw_atomic64_sub_return_relaxed() - atomic subtract with relaxed ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_sub_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_sub_return_relaxed)
+ return arch_atomic64_sub_return_relaxed(i, v);
+#elif defined(arch_atomic64_sub_return)
+ return arch_atomic64_sub_return(i, v);
+#else
+#error "Unable to define raw_atomic64_sub_return_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_sub() - atomic subtract with full ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_sub() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_sub(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_sub)
+ return arch_atomic64_fetch_sub(i, v);
+#elif defined(arch_atomic64_fetch_sub_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_sub_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic64_fetch_sub"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_sub_acquire() - atomic subtract with acquire ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_sub_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_sub_acquire)
+ return arch_atomic64_fetch_sub_acquire(i, v);
+#elif defined(arch_atomic64_fetch_sub_relaxed)
+ s64 ret = arch_atomic64_fetch_sub_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_fetch_sub)
+ return arch_atomic64_fetch_sub(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_sub_acquire"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_sub_release() - atomic subtract with release ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_sub_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_sub_release(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_sub_release)
+ return arch_atomic64_fetch_sub_release(i, v);
+#elif defined(arch_atomic64_fetch_sub_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_fetch_sub_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_sub)
+ return arch_atomic64_fetch_sub(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_sub_release"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_sub_relaxed() - atomic subtract with relaxed ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_sub_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_sub_relaxed)
+ return arch_atomic64_fetch_sub_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_sub)
+ return arch_atomic64_fetch_sub(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_sub_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic64_inc() - atomic increment with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_inc() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_inc(atomic64_t *v)
+{
+#if defined(arch_atomic64_inc)
+ arch_atomic64_inc(v);
+#else
+ raw_atomic64_add(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_inc_return() - atomic increment with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_inc_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_inc_return(atomic64_t *v)
+{
+#if defined(arch_atomic64_inc_return)
+ return arch_atomic64_inc_return(v);
+#elif defined(arch_atomic64_inc_return_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_inc_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic64_add_return(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_inc_return_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_inc_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_inc_return_acquire(atomic64_t *v)
+{
+#if defined(arch_atomic64_inc_return_acquire)
+ return arch_atomic64_inc_return_acquire(v);
+#elif defined(arch_atomic64_inc_return_relaxed)
+ s64 ret = arch_atomic64_inc_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_inc_return)
+ return arch_atomic64_inc_return(v);
+#else
+ return raw_atomic64_add_return_acquire(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_inc_return_release() - atomic increment with release ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_inc_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_inc_return_release(atomic64_t *v)
+{
+#if defined(arch_atomic64_inc_return_release)
+ return arch_atomic64_inc_return_release(v);
+#elif defined(arch_atomic64_inc_return_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_inc_return_relaxed(v);
+#elif defined(arch_atomic64_inc_return)
+ return arch_atomic64_inc_return(v);
+#else
+ return raw_atomic64_add_return_release(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_inc_return_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_inc_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_inc_return_relaxed(atomic64_t *v)
+{
+#if defined(arch_atomic64_inc_return_relaxed)
+ return arch_atomic64_inc_return_relaxed(v);
+#elif defined(arch_atomic64_inc_return)
+ return arch_atomic64_inc_return(v);
+#else
+ return raw_atomic64_add_return_relaxed(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_inc() - atomic increment with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_inc() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_inc(atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_inc)
+ return arch_atomic64_fetch_inc(v);
+#elif defined(arch_atomic64_fetch_inc_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_inc_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic64_fetch_add(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_inc_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_inc_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_inc_acquire(atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_inc_acquire)
+ return arch_atomic64_fetch_inc_acquire(v);
+#elif defined(arch_atomic64_fetch_inc_relaxed)
+ s64 ret = arch_atomic64_fetch_inc_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_fetch_inc)
+ return arch_atomic64_fetch_inc(v);
+#else
+ return raw_atomic64_fetch_add_acquire(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_inc_release() - atomic increment with release ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_inc_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_inc_release(atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_inc_release)
+ return arch_atomic64_fetch_inc_release(v);
+#elif defined(arch_atomic64_fetch_inc_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_fetch_inc_relaxed(v);
+#elif defined(arch_atomic64_fetch_inc)
+ return arch_atomic64_fetch_inc(v);
+#else
+ return raw_atomic64_fetch_add_release(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_inc_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_inc_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_inc_relaxed(atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_inc_relaxed)
+ return arch_atomic64_fetch_inc_relaxed(v);
+#elif defined(arch_atomic64_fetch_inc)
+ return arch_atomic64_fetch_inc(v);
+#else
+ return raw_atomic64_fetch_add_relaxed(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_dec() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_dec() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_dec(atomic64_t *v)
+{
+#if defined(arch_atomic64_dec)
+ arch_atomic64_dec(v);
+#else
+ raw_atomic64_sub(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_dec_return() - atomic decrement with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_dec_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_dec_return(atomic64_t *v)
+{
+#if defined(arch_atomic64_dec_return)
+ return arch_atomic64_dec_return(v);
+#elif defined(arch_atomic64_dec_return_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_dec_return_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic64_sub_return(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_dec_return_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_dec_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_dec_return_acquire(atomic64_t *v)
+{
+#if defined(arch_atomic64_dec_return_acquire)
+ return arch_atomic64_dec_return_acquire(v);
+#elif defined(arch_atomic64_dec_return_relaxed)
+ s64 ret = arch_atomic64_dec_return_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_dec_return)
+ return arch_atomic64_dec_return(v);
+#else
+ return raw_atomic64_sub_return_acquire(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_dec_return_release() - atomic decrement with release ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_dec_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_dec_return_release(atomic64_t *v)
+{
+#if defined(arch_atomic64_dec_return_release)
+ return arch_atomic64_dec_return_release(v);
+#elif defined(arch_atomic64_dec_return_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_dec_return_relaxed(v);
+#elif defined(arch_atomic64_dec_return)
+ return arch_atomic64_dec_return(v);
+#else
+ return raw_atomic64_sub_return_release(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_dec_return_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_dec_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+raw_atomic64_dec_return_relaxed(atomic64_t *v)
+{
+#if defined(arch_atomic64_dec_return_relaxed)
+ return arch_atomic64_dec_return_relaxed(v);
+#elif defined(arch_atomic64_dec_return)
+ return arch_atomic64_dec_return(v);
+#else
+ return raw_atomic64_sub_return_relaxed(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_dec() - atomic decrement with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_dec() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_dec(atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_dec)
+ return arch_atomic64_fetch_dec(v);
+#elif defined(arch_atomic64_fetch_dec_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_dec_relaxed(v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic64_fetch_sub(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_dec_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_dec_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_dec_acquire(atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_dec_acquire)
+ return arch_atomic64_fetch_dec_acquire(v);
+#elif defined(arch_atomic64_fetch_dec_relaxed)
+ s64 ret = arch_atomic64_fetch_dec_relaxed(v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_fetch_dec)
+ return arch_atomic64_fetch_dec(v);
+#else
+ return raw_atomic64_fetch_sub_acquire(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_dec_release() - atomic decrement with release ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_dec_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_dec_release(atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_dec_release)
+ return arch_atomic64_fetch_dec_release(v);
+#elif defined(arch_atomic64_fetch_dec_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_fetch_dec_relaxed(v);
+#elif defined(arch_atomic64_fetch_dec)
+ return arch_atomic64_fetch_dec(v);
+#else
+ return raw_atomic64_fetch_sub_release(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_dec_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_dec_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_dec_relaxed(atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_dec_relaxed)
+ return arch_atomic64_fetch_dec_relaxed(v);
+#elif defined(arch_atomic64_fetch_dec)
+ return arch_atomic64_fetch_dec(v);
+#else
+ return raw_atomic64_fetch_sub_relaxed(1, v);
+#endif
+}
+
+/**
+ * raw_atomic64_and() - atomic bitwise AND with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_and() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_and(s64 i, atomic64_t *v)
+{
+ arch_atomic64_and(i, v);
+}
+
+/**
+ * raw_atomic64_fetch_and() - atomic bitwise AND with full ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_and() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_and(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_and)
+ return arch_atomic64_fetch_and(i, v);
+#elif defined(arch_atomic64_fetch_and_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_and_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic64_fetch_and"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_and_acquire() - atomic bitwise AND with acquire ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_and_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_and_acquire)
+ return arch_atomic64_fetch_and_acquire(i, v);
+#elif defined(arch_atomic64_fetch_and_relaxed)
+ s64 ret = arch_atomic64_fetch_and_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_fetch_and)
+ return arch_atomic64_fetch_and(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_and_acquire"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_and_release() - atomic bitwise AND with release ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_and_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_and_release(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_and_release)
+ return arch_atomic64_fetch_and_release(i, v);
+#elif defined(arch_atomic64_fetch_and_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_fetch_and_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_and)
+ return arch_atomic64_fetch_and(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_and_release"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_and_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_and_relaxed)
+ return arch_atomic64_fetch_and_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_and)
+ return arch_atomic64_fetch_and(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_and_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic64_andnot() - atomic bitwise AND NOT with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_andnot() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_andnot(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_andnot)
+ arch_atomic64_andnot(i, v);
+#else
+ raw_atomic64_and(~i, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_andnot() - atomic bitwise AND NOT with full ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_andnot() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_andnot(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_andnot)
+ return arch_atomic64_fetch_andnot(i, v);
+#elif defined(arch_atomic64_fetch_andnot_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_andnot_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic64_fetch_and(~i, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_andnot_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_andnot_acquire)
+ return arch_atomic64_fetch_andnot_acquire(i, v);
+#elif defined(arch_atomic64_fetch_andnot_relaxed)
+ s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_fetch_andnot)
+ return arch_atomic64_fetch_andnot(i, v);
+#else
+ return raw_atomic64_fetch_and_acquire(~i, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_andnot_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_andnot_release)
+ return arch_atomic64_fetch_andnot_release(i, v);
+#elif defined(arch_atomic64_fetch_andnot_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_fetch_andnot_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_andnot)
+ return arch_atomic64_fetch_andnot(i, v);
+#else
+ return raw_atomic64_fetch_and_release(~i, v);
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_andnot_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_andnot_relaxed)
+ return arch_atomic64_fetch_andnot_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_andnot)
+ return arch_atomic64_fetch_andnot(i, v);
+#else
+ return raw_atomic64_fetch_and_relaxed(~i, v);
+#endif
+}
+
+/**
+ * raw_atomic64_or() - atomic bitwise OR with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_or() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_or(s64 i, atomic64_t *v)
+{
+ arch_atomic64_or(i, v);
+}
+
+/**
+ * raw_atomic64_fetch_or() - atomic bitwise OR with full ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_or() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_or(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_or)
+ return arch_atomic64_fetch_or(i, v);
+#elif defined(arch_atomic64_fetch_or_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_or_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic64_fetch_or"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_or_acquire() - atomic bitwise OR with acquire ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_or_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_or_acquire)
+ return arch_atomic64_fetch_or_acquire(i, v);
+#elif defined(arch_atomic64_fetch_or_relaxed)
+ s64 ret = arch_atomic64_fetch_or_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_fetch_or)
+ return arch_atomic64_fetch_or(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_or_acquire"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_or_release() - atomic bitwise OR with release ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_or_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_or_release(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_or_release)
+ return arch_atomic64_fetch_or_release(i, v);
+#elif defined(arch_atomic64_fetch_or_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_fetch_or_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_or)
+ return arch_atomic64_fetch_or(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_or_release"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_or_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_or_relaxed)
+ return arch_atomic64_fetch_or_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_or)
+ return arch_atomic64_fetch_or(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_or_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic64_xor() - atomic bitwise XOR with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_xor() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic64_xor(s64 i, atomic64_t *v)
+{
+ arch_atomic64_xor(i, v);
+}
+
+/**
+ * raw_atomic64_fetch_xor() - atomic bitwise XOR with full ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_xor() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_xor(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_xor)
+ return arch_atomic64_fetch_xor(i, v);
+#elif defined(arch_atomic64_fetch_xor_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_fetch_xor_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+#error "Unable to define raw_atomic64_fetch_xor"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_xor_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_xor_acquire)
+ return arch_atomic64_fetch_xor_acquire(i, v);
+#elif defined(arch_atomic64_fetch_xor_relaxed)
+ s64 ret = arch_atomic64_fetch_xor_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_fetch_xor)
+ return arch_atomic64_fetch_xor(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_xor_acquire"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_xor_release() - atomic bitwise XOR with release ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_xor_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_xor_release(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_xor_release)
+ return arch_atomic64_fetch_xor_release(i, v);
+#elif defined(arch_atomic64_fetch_xor_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_fetch_xor_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_xor)
+ return arch_atomic64_fetch_xor(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_xor_release"
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_xor_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_fetch_xor_relaxed)
+ return arch_atomic64_fetch_xor_relaxed(i, v);
+#elif defined(arch_atomic64_fetch_xor)
+ return arch_atomic64_fetch_xor(i, v);
+#else
+#error "Unable to define raw_atomic64_fetch_xor_relaxed"
+#endif
+}
+
+/**
+ * raw_atomic64_xchg() - atomic exchange with full ordering
+ * @v: pointer to atomic64_t
+ * @new: s64 value to assign
+ *
+ * Atomically updates @v to @new with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_xchg() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_xchg(atomic64_t *v, s64 new)
+{
+#if defined(arch_atomic64_xchg)
+ return arch_atomic64_xchg(v, new);
+#elif defined(arch_atomic64_xchg_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_xchg_relaxed(v, new);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_xchg(&v->counter, new);
+#endif
+}
+
+/**
+ * raw_atomic64_xchg_acquire() - atomic exchange with acquire ordering
+ * @v: pointer to atomic64_t
+ * @new: s64 value to assign
+ *
+ * Atomically updates @v to @new with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_xchg_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_xchg_acquire(atomic64_t *v, s64 new)
+{
+#if defined(arch_atomic64_xchg_acquire)
+ return arch_atomic64_xchg_acquire(v, new);
+#elif defined(arch_atomic64_xchg_relaxed)
+ s64 ret = arch_atomic64_xchg_relaxed(v, new);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_xchg)
+ return arch_atomic64_xchg(v, new);
+#else
+ return raw_xchg_acquire(&v->counter, new);
+#endif
+}
+
+/**
+ * raw_atomic64_xchg_release() - atomic exchange with release ordering
+ * @v: pointer to atomic64_t
+ * @new: s64 value to assign
+ *
+ * Atomically updates @v to @new with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_xchg_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_xchg_release(atomic64_t *v, s64 new)
+{
+#if defined(arch_atomic64_xchg_release)
+ return arch_atomic64_xchg_release(v, new);
+#elif defined(arch_atomic64_xchg_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_xchg_relaxed(v, new);
+#elif defined(arch_atomic64_xchg)
+ return arch_atomic64_xchg(v, new);
+#else
+ return raw_xchg_release(&v->counter, new);
+#endif
+}
+
+/**
+ * raw_atomic64_xchg_relaxed() - atomic exchange with relaxed ordering
+ * @v: pointer to atomic64_t
+ * @new: s64 value to assign
+ *
+ * Atomically updates @v to @new with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_xchg_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_xchg_relaxed(atomic64_t *v, s64 new)
+{
+#if defined(arch_atomic64_xchg_relaxed)
+ return arch_atomic64_xchg_relaxed(v, new);
+#elif defined(arch_atomic64_xchg)
+ return arch_atomic64_xchg(v, new);
+#else
+ return raw_xchg_relaxed(&v->counter, new);
+#endif
+}
+
+/**
+ * raw_atomic64_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic64_t
+ * @old: s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_cmpxchg() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+{
+#if defined(arch_atomic64_cmpxchg)
+ return arch_atomic64_cmpxchg(v, old, new);
+#elif defined(arch_atomic64_cmpxchg_relaxed)
+ s64 ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_cmpxchg(&v->counter, old, new);
+#endif
+}
+
+/**
+ * raw_atomic64_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic64_t
+ * @old: s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_cmpxchg_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
+{
+#if defined(arch_atomic64_cmpxchg_acquire)
+ return arch_atomic64_cmpxchg_acquire(v, old, new);
+#elif defined(arch_atomic64_cmpxchg_relaxed)
+ s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_cmpxchg)
+ return arch_atomic64_cmpxchg(v, old, new);
+#else
+ return raw_cmpxchg_acquire(&v->counter, old, new);
+#endif
+}
+
+/**
+ * raw_atomic64_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic64_t
+ * @old: s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_cmpxchg_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
+{
+#if defined(arch_atomic64_cmpxchg_release)
+ return arch_atomic64_cmpxchg_release(v, old, new);
+#elif defined(arch_atomic64_cmpxchg_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_cmpxchg_relaxed(v, old, new);
+#elif defined(arch_atomic64_cmpxchg)
+ return arch_atomic64_cmpxchg(v, old, new);
+#else
+ return raw_cmpxchg_release(&v->counter, old, new);
+#endif
+}
+
+/**
+ * raw_atomic64_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic64_t
+ * @old: s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_cmpxchg_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
+{
+#if defined(arch_atomic64_cmpxchg_relaxed)
+ return arch_atomic64_cmpxchg_relaxed(v, old, new);
+#elif defined(arch_atomic64_cmpxchg)
+ return arch_atomic64_cmpxchg(v, old, new);
+#else
+ return raw_cmpxchg_relaxed(&v->counter, old, new);
+#endif
+}
+
+/**
+ * raw_atomic64_try_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic64_t
+ * @old: pointer to s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_try_cmpxchg() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+#if defined(arch_atomic64_try_cmpxchg)
+ return arch_atomic64_try_cmpxchg(v, old, new);
+#elif defined(arch_atomic64_try_cmpxchg_relaxed)
+ bool ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ s64 r, o = *old;
+ r = raw_atomic64_cmpxchg(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+#endif
+}
+
+/**
+ * raw_atomic64_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic64_t
+ * @old: pointer to s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_acquire() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
+{
+#if defined(arch_atomic64_try_cmpxchg_acquire)
+ return arch_atomic64_try_cmpxchg_acquire(v, old, new);
+#elif defined(arch_atomic64_try_cmpxchg_relaxed)
+ bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_try_cmpxchg)
+ return arch_atomic64_try_cmpxchg(v, old, new);
+#else
+ s64 r, o = *old;
+ r = raw_atomic64_cmpxchg_acquire(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+#endif
+}
+
+/**
+ * raw_atomic64_try_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic64_t
+ * @old: pointer to s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_release() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
+{
+#if defined(arch_atomic64_try_cmpxchg_release)
+ return arch_atomic64_try_cmpxchg_release(v, old, new);
+#elif defined(arch_atomic64_try_cmpxchg_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+#elif defined(arch_atomic64_try_cmpxchg)
+ return arch_atomic64_try_cmpxchg(v, old, new);
+#else
+ s64 r, o = *old;
+ r = raw_atomic64_cmpxchg_release(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+#endif
+}
+
+/**
+ * raw_atomic64_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic64_t
+ * @old: pointer to s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_try_cmpxchg_relaxed() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
+{
+#if defined(arch_atomic64_try_cmpxchg_relaxed)
+ return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+#elif defined(arch_atomic64_try_cmpxchg)
+ return arch_atomic64_try_cmpxchg(v, old, new);
+#else
+ s64 r, o = *old;
+ r = raw_atomic64_cmpxchg_relaxed(v, o, new);
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
+#endif
+}
+
+/**
+ * raw_atomic64_sub_and_test() - atomic subtract and test if zero with full ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_sub_and_test() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_sub_and_test(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_sub_and_test)
+ return arch_atomic64_sub_and_test(i, v);
+#else
+ return raw_atomic64_sub_return(i, v) == 0;
+#endif
+}
+
+/**
+ * raw_atomic64_dec_and_test() - atomic decrement and test if zero with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_dec_and_test() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_dec_and_test(atomic64_t *v)
+{
+#if defined(arch_atomic64_dec_and_test)
+ return arch_atomic64_dec_and_test(v);
+#else
+ return raw_atomic64_dec_return(v) == 0;
+#endif
+}
+
+/**
+ * raw_atomic64_inc_and_test() - atomic increment and test if zero with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_inc_and_test() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_inc_and_test(atomic64_t *v)
+{
+#if defined(arch_atomic64_inc_and_test)
+ return arch_atomic64_inc_and_test(v);
+#else
+ return raw_atomic64_inc_return(v) == 0;
+#endif
+}
+
+/**
+ * raw_atomic64_add_negative() - atomic add and test if negative with full ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_negative() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_add_negative(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_add_negative)
+ return arch_atomic64_add_negative(i, v);
+#elif defined(arch_atomic64_add_negative_relaxed)
+ bool ret;
+ __atomic_pre_full_fence();
+ ret = arch_atomic64_add_negative_relaxed(i, v);
+ __atomic_post_full_fence();
+ return ret;
+#else
+ return raw_atomic64_add_return(i, v) < 0;
+#endif
+}
+
+/**
+ * raw_atomic64_add_negative_acquire() - atomic add and test if negative with acquire ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_negative_acquire() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_add_negative_acquire(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_add_negative_acquire)
+ return arch_atomic64_add_negative_acquire(i, v);
+#elif defined(arch_atomic64_add_negative_relaxed)
+ bool ret = arch_atomic64_add_negative_relaxed(i, v);
+ __atomic_acquire_fence();
+ return ret;
+#elif defined(arch_atomic64_add_negative)
+ return arch_atomic64_add_negative(i, v);
+#else
+ return raw_atomic64_add_return_acquire(i, v) < 0;
+#endif
+}
+
+/**
+ * raw_atomic64_add_negative_release() - atomic add and test if negative with release ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_negative_release() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_add_negative_release(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_add_negative_release)
+ return arch_atomic64_add_negative_release(i, v);
+#elif defined(arch_atomic64_add_negative_relaxed)
+ __atomic_release_fence();
+ return arch_atomic64_add_negative_relaxed(i, v);
+#elif defined(arch_atomic64_add_negative)
+ return arch_atomic64_add_negative(i, v);
+#else
+ return raw_atomic64_add_return_release(i, v) < 0;
+#endif
+}
+
+/**
+ * raw_atomic64_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_negative_relaxed() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_add_negative_relaxed(s64 i, atomic64_t *v)
+{
+#if defined(arch_atomic64_add_negative_relaxed)
+ return arch_atomic64_add_negative_relaxed(i, v);
+#elif defined(arch_atomic64_add_negative)
+ return arch_atomic64_add_negative(i, v);
+#else
+ return raw_atomic64_add_return_relaxed(i, v) < 0;
+#endif
+}
+
+/**
+ * raw_atomic64_fetch_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic64_t
+ * @a: s64 value to add
+ * @u: s64 value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_fetch_add_unless() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+raw_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+#if defined(arch_atomic64_fetch_add_unless)
+ return arch_atomic64_fetch_add_unless(v, a, u);
+#else
+ s64 c = raw_atomic64_read(v);
+
+ do {
+ if (unlikely(c == u))
+ break;
+ } while (!raw_atomic64_try_cmpxchg(v, &c, c + a));
+
+ return c;
+#endif
+}
+
+/**
+ * raw_atomic64_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic64_t
+ * @a: s64 value to add
+ * @u: s64 value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_add_unless() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+#if defined(arch_atomic64_add_unless)
+ return arch_atomic64_add_unless(v, a, u);
+#else
+ return raw_atomic64_fetch_add_unless(v, a, u) != u;
+#endif
+}
+
+/**
+ * raw_atomic64_inc_not_zero() - atomic increment unless zero with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_inc_not_zero() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_inc_not_zero(atomic64_t *v)
+{
+#if defined(arch_atomic64_inc_not_zero)
+ return arch_atomic64_inc_not_zero(v);
+#else
+ return raw_atomic64_add_unless(v, 1, 0);
+#endif
+}
+
+/**
+ * raw_atomic64_inc_unless_negative() - atomic increment unless negative with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_inc_unless_negative() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_inc_unless_negative(atomic64_t *v)
+{
+#if defined(arch_atomic64_inc_unless_negative)
+ return arch_atomic64_inc_unless_negative(v);
+#else
+ s64 c = raw_atomic64_read(v);
+
+ do {
+ if (unlikely(c < 0))
+ return false;
+ } while (!raw_atomic64_try_cmpxchg(v, &c, c + 1));
+
+ return true;
+#endif
+}
+
+/**
+ * raw_atomic64_dec_unless_positive() - atomic decrement unless positive with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_dec_unless_positive() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic64_dec_unless_positive(atomic64_t *v)
+{
+#if defined(arch_atomic64_dec_unless_positive)
+ return arch_atomic64_dec_unless_positive(v);
+#else
+ s64 c = raw_atomic64_read(v);
+
+ do {
+ if (unlikely(c > 0))
+ return false;
+ } while (!raw_atomic64_try_cmpxchg(v, &c, c - 1));
+
+ return true;
+#endif
+}
+
+/**
+ * raw_atomic64_dec_if_positive() - atomic decrement if positive with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic64_dec_if_positive() elsewhere.
+ *
+ * Return: The old value of (@v - 1), regardless of whether @v was updated.
+ */
+static __always_inline s64
+raw_atomic64_dec_if_positive(atomic64_t *v)
+{
+#if defined(arch_atomic64_dec_if_positive)
+ return arch_atomic64_dec_if_positive(v);
+#else
+ s64 dec, c = raw_atomic64_read(v);
+
+ do {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ } while (!raw_atomic64_try_cmpxchg(v, &c, dec));
+
+ return dec;
+#endif
+}
+
+#endif /* _LINUX_ATOMIC_FALLBACK_H */
+// b565db590afeeff0d7c9485ccbca5bb6e155749f
diff --git a/include/linux/atomic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h
new file mode 100644
index 000000000000..37ab6314a9f7
--- /dev/null
+++ b/include/linux/atomic/atomic-instrumented.h
@@ -0,0 +1,5053 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by scripts/atomic/gen-atomic-instrumented.sh
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+/*
+ * This file provoides atomic operations with explicit instrumentation (e.g.
+ * KASAN, KCSAN), which should be used unless it is necessary to avoid
+ * instrumentation. Where it is necessary to aovid instrumenation, the
+ * raw_atomic*() operations should be used.
+ */
+#ifndef _LINUX_ATOMIC_INSTRUMENTED_H
+#define _LINUX_ATOMIC_INSTRUMENTED_H
+
+#include <linux/build_bug.h>
+#include <linux/compiler.h>
+#include <linux/instrumented.h>
+
+/**
+ * atomic_read() - atomic load with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically loads the value of @v with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_read() there.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline int
+atomic_read(const atomic_t *v)
+{
+ instrument_atomic_read(v, sizeof(*v));
+ return raw_atomic_read(v);
+}
+
+/**
+ * atomic_read_acquire() - atomic load with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically loads the value of @v with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_read_acquire() there.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline int
+atomic_read_acquire(const atomic_t *v)
+{
+ instrument_atomic_read(v, sizeof(*v));
+ return raw_atomic_read_acquire(v);
+}
+
+/**
+ * atomic_set() - atomic set with relaxed ordering
+ * @v: pointer to atomic_t
+ * @i: int value to assign
+ *
+ * Atomically sets @v to @i with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_set() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_set(atomic_t *v, int i)
+{
+ instrument_atomic_write(v, sizeof(*v));
+ raw_atomic_set(v, i);
+}
+
+/**
+ * atomic_set_release() - atomic set with release ordering
+ * @v: pointer to atomic_t
+ * @i: int value to assign
+ *
+ * Atomically sets @v to @i with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_set_release() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_set_release(atomic_t *v, int i)
+{
+ kcsan_release();
+ instrument_atomic_write(v, sizeof(*v));
+ raw_atomic_set_release(v, i);
+}
+
+/**
+ * atomic_add() - atomic add with relaxed ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_add(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_add(i, v);
+}
+
+/**
+ * atomic_add_return() - atomic add with full ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_add_return(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_add_return(i, v);
+}
+
+/**
+ * atomic_add_return_acquire() - atomic add with acquire ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_add_return_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_add_return_acquire(i, v);
+}
+
+/**
+ * atomic_add_return_release() - atomic add with release ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_add_return_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_add_return_release(i, v);
+}
+
+/**
+ * atomic_add_return_relaxed() - atomic add with relaxed ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_add_return_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_add_return_relaxed(i, v);
+}
+
+/**
+ * atomic_fetch_add() - atomic add with full ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_add() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_add(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_add(i, v);
+}
+
+/**
+ * atomic_fetch_add_acquire() - atomic add with acquire ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_add_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_add_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_add_acquire(i, v);
+}
+
+/**
+ * atomic_fetch_add_release() - atomic add with release ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_add_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_add_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_add_release(i, v);
+}
+
+/**
+ * atomic_fetch_add_relaxed() - atomic add with relaxed ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_add_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_add_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_add_relaxed(i, v);
+}
+
+/**
+ * atomic_sub() - atomic subtract with relaxed ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_sub() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_sub(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_sub(i, v);
+}
+
+/**
+ * atomic_sub_return() - atomic subtract with full ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_sub_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_sub_return(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_sub_return(i, v);
+}
+
+/**
+ * atomic_sub_return_acquire() - atomic subtract with acquire ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_sub_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_sub_return_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_sub_return_acquire(i, v);
+}
+
+/**
+ * atomic_sub_return_release() - atomic subtract with release ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_sub_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_sub_return_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_sub_return_release(i, v);
+}
+
+/**
+ * atomic_sub_return_relaxed() - atomic subtract with relaxed ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_sub_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_sub_return_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_sub_return_relaxed(i, v);
+}
+
+/**
+ * atomic_fetch_sub() - atomic subtract with full ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_sub() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_sub(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_sub(i, v);
+}
+
+/**
+ * atomic_fetch_sub_acquire() - atomic subtract with acquire ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_sub_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_sub_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_sub_acquire(i, v);
+}
+
+/**
+ * atomic_fetch_sub_release() - atomic subtract with release ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_sub_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_sub_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_sub_release(i, v);
+}
+
+/**
+ * atomic_fetch_sub_relaxed() - atomic subtract with relaxed ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_sub_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_sub_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_sub_relaxed(i, v);
+}
+
+/**
+ * atomic_inc() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_inc() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_inc(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_inc(v);
+}
+
+/**
+ * atomic_inc_return() - atomic increment with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_inc_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_inc_return(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_inc_return(v);
+}
+
+/**
+ * atomic_inc_return_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_inc_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_inc_return_acquire(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_inc_return_acquire(v);
+}
+
+/**
+ * atomic_inc_return_release() - atomic increment with release ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_inc_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_inc_return_release(atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_inc_return_release(v);
+}
+
+/**
+ * atomic_inc_return_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_inc_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_inc_return_relaxed(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_inc_return_relaxed(v);
+}
+
+/**
+ * atomic_fetch_inc() - atomic increment with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_inc() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_inc(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_inc(v);
+}
+
+/**
+ * atomic_fetch_inc_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_inc_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_inc_acquire(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_inc_acquire(v);
+}
+
+/**
+ * atomic_fetch_inc_release() - atomic increment with release ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_inc_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_inc_release(atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_inc_release(v);
+}
+
+/**
+ * atomic_fetch_inc_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_inc_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_inc_relaxed(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_inc_relaxed(v);
+}
+
+/**
+ * atomic_dec() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_dec() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_dec(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_dec(v);
+}
+
+/**
+ * atomic_dec_return() - atomic decrement with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_dec_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_dec_return(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_dec_return(v);
+}
+
+/**
+ * atomic_dec_return_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_dec_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_dec_return_acquire(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_dec_return_acquire(v);
+}
+
+/**
+ * atomic_dec_return_release() - atomic decrement with release ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_dec_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_dec_return_release(atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_dec_return_release(v);
+}
+
+/**
+ * atomic_dec_return_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_dec_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline int
+atomic_dec_return_relaxed(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_dec_return_relaxed(v);
+}
+
+/**
+ * atomic_fetch_dec() - atomic decrement with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_dec() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_dec(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_dec(v);
+}
+
+/**
+ * atomic_fetch_dec_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_dec_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_dec_acquire(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_dec_acquire(v);
+}
+
+/**
+ * atomic_fetch_dec_release() - atomic decrement with release ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_dec_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_dec_release(atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_dec_release(v);
+}
+
+/**
+ * atomic_fetch_dec_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_dec_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_dec_relaxed(atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_dec_relaxed(v);
+}
+
+/**
+ * atomic_and() - atomic bitwise AND with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_and() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_and(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_and(i, v);
+}
+
+/**
+ * atomic_fetch_and() - atomic bitwise AND with full ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_and() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_and(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_and(i, v);
+}
+
+/**
+ * atomic_fetch_and_acquire() - atomic bitwise AND with acquire ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_and_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_and_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_and_acquire(i, v);
+}
+
+/**
+ * atomic_fetch_and_release() - atomic bitwise AND with release ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_and_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_and_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_and_release(i, v);
+}
+
+/**
+ * atomic_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_and_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_and_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_and_relaxed(i, v);
+}
+
+/**
+ * atomic_andnot() - atomic bitwise AND NOT with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_andnot() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_andnot(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_andnot(i, v);
+}
+
+/**
+ * atomic_fetch_andnot() - atomic bitwise AND NOT with full ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_andnot() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_andnot(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_andnot(i, v);
+}
+
+/**
+ * atomic_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_andnot_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_andnot_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_andnot_acquire(i, v);
+}
+
+/**
+ * atomic_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_andnot_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_andnot_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_andnot_release(i, v);
+}
+
+/**
+ * atomic_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_andnot_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_andnot_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_andnot_relaxed(i, v);
+}
+
+/**
+ * atomic_or() - atomic bitwise OR with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_or() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_or(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_or(i, v);
+}
+
+/**
+ * atomic_fetch_or() - atomic bitwise OR with full ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_or() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_or(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_or(i, v);
+}
+
+/**
+ * atomic_fetch_or_acquire() - atomic bitwise OR with acquire ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_or_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_or_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_or_acquire(i, v);
+}
+
+/**
+ * atomic_fetch_or_release() - atomic bitwise OR with release ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_or_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_or_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_or_release(i, v);
+}
+
+/**
+ * atomic_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_or_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_or_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_or_relaxed(i, v);
+}
+
+/**
+ * atomic_xor() - atomic bitwise XOR with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_xor() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_xor(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_xor(i, v);
+}
+
+/**
+ * atomic_fetch_xor() - atomic bitwise XOR with full ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_xor() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_xor(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_xor(i, v);
+}
+
+/**
+ * atomic_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_xor_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_xor_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_xor_acquire(i, v);
+}
+
+/**
+ * atomic_fetch_xor_release() - atomic bitwise XOR with release ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_xor_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_xor_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_xor_release(i, v);
+}
+
+/**
+ * atomic_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
+ * @i: int value
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_xor_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_xor_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_xor_relaxed(i, v);
+}
+
+/**
+ * atomic_xchg() - atomic exchange with full ordering
+ * @v: pointer to atomic_t
+ * @new: int value to assign
+ *
+ * Atomically updates @v to @new with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_xchg() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_xchg(atomic_t *v, int new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_xchg(v, new);
+}
+
+/**
+ * atomic_xchg_acquire() - atomic exchange with acquire ordering
+ * @v: pointer to atomic_t
+ * @new: int value to assign
+ *
+ * Atomically updates @v to @new with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_xchg_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_xchg_acquire(atomic_t *v, int new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_xchg_acquire(v, new);
+}
+
+/**
+ * atomic_xchg_release() - atomic exchange with release ordering
+ * @v: pointer to atomic_t
+ * @new: int value to assign
+ *
+ * Atomically updates @v to @new with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_xchg_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_xchg_release(atomic_t *v, int new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_xchg_release(v, new);
+}
+
+/**
+ * atomic_xchg_relaxed() - atomic exchange with relaxed ordering
+ * @v: pointer to atomic_t
+ * @new: int value to assign
+ *
+ * Atomically updates @v to @new with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_xchg_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_xchg_relaxed(atomic_t *v, int new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_xchg_relaxed(v, new);
+}
+
+/**
+ * atomic_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic_t
+ * @old: int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_cmpxchg() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_cmpxchg(v, old, new);
+}
+
+/**
+ * atomic_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic_t
+ * @old: int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_cmpxchg_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_cmpxchg_acquire(v, old, new);
+}
+
+/**
+ * atomic_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic_t
+ * @old: int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_cmpxchg_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_cmpxchg_release(atomic_t *v, int old, int new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_cmpxchg_release(v, old, new);
+}
+
+/**
+ * atomic_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic_t
+ * @old: int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_cmpxchg_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_cmpxchg_relaxed(v, old, new);
+}
+
+/**
+ * atomic_try_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic_t
+ * @old: pointer to int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_read_write(old, sizeof(*old));
+ return raw_atomic_try_cmpxchg(v, old, new);
+}
+
+/**
+ * atomic_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic_t
+ * @old: pointer to int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_acquire() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_read_write(old, sizeof(*old));
+ return raw_atomic_try_cmpxchg_acquire(v, old, new);
+}
+
+/**
+ * atomic_try_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic_t
+ * @old: pointer to int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_release() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_read_write(old, sizeof(*old));
+ return raw_atomic_try_cmpxchg_release(v, old, new);
+}
+
+/**
+ * atomic_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic_t
+ * @old: pointer to int value to compare with
+ * @new: int value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_relaxed() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_read_write(old, sizeof(*old));
+ return raw_atomic_try_cmpxchg_relaxed(v, old, new);
+}
+
+/**
+ * atomic_sub_and_test() - atomic subtract and test if zero with full ordering
+ * @i: int value to subtract
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_sub_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+atomic_sub_and_test(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_sub_and_test(i, v);
+}
+
+/**
+ * atomic_dec_and_test() - atomic decrement and test if zero with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_dec_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+atomic_dec_and_test(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_dec_and_test(v);
+}
+
+/**
+ * atomic_inc_and_test() - atomic increment and test if zero with full ordering
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_inc_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+atomic_inc_and_test(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_inc_and_test(v);
+}
+
+/**
+ * atomic_add_negative() - atomic add and test if negative with full ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_negative() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic_add_negative(int i, atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_add_negative(i, v);
+}
+
+/**
+ * atomic_add_negative_acquire() - atomic add and test if negative with acquire ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_negative_acquire() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic_add_negative_acquire(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_add_negative_acquire(i, v);
+}
+
+/**
+ * atomic_add_negative_release() - atomic add and test if negative with release ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_negative_release() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic_add_negative_release(int i, atomic_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_add_negative_release(i, v);
+}
+
+/**
+ * atomic_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
+ * @i: int value to add
+ * @v: pointer to atomic_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_negative_relaxed() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic_add_negative_relaxed(int i, atomic_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_add_negative_relaxed(i, v);
+}
+
+/**
+ * atomic_fetch_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic_t
+ * @a: int value to add
+ * @u: int value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_fetch_add_unless() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline int
+atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_fetch_add_unless(v, a, u);
+}
+
+/**
+ * atomic_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic_t
+ * @a: int value to add
+ * @u: int value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_add_unless() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic_add_unless(atomic_t *v, int a, int u)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_add_unless(v, a, u);
+}
+
+/**
+ * atomic_inc_not_zero() - atomic increment unless zero with full ordering
+ * @v: pointer to atomic_t
+ *
+ * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_inc_not_zero() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic_inc_not_zero(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_inc_not_zero(v);
+}
+
+/**
+ * atomic_inc_unless_negative() - atomic increment unless negative with full ordering
+ * @v: pointer to atomic_t
+ *
+ * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_inc_unless_negative() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic_inc_unless_negative(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_inc_unless_negative(v);
+}
+
+/**
+ * atomic_dec_unless_positive() - atomic decrement unless positive with full ordering
+ * @v: pointer to atomic_t
+ *
+ * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_dec_unless_positive() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic_dec_unless_positive(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_dec_unless_positive(v);
+}
+
+/**
+ * atomic_dec_if_positive() - atomic decrement if positive with full ordering
+ * @v: pointer to atomic_t
+ *
+ * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_dec_if_positive() there.
+ *
+ * Return: The old value of (@v - 1), regardless of whether @v was updated.
+ */
+static __always_inline int
+atomic_dec_if_positive(atomic_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_dec_if_positive(v);
+}
+
+/**
+ * atomic64_read() - atomic load with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically loads the value of @v with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_read() there.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline s64
+atomic64_read(const atomic64_t *v)
+{
+ instrument_atomic_read(v, sizeof(*v));
+ return raw_atomic64_read(v);
+}
+
+/**
+ * atomic64_read_acquire() - atomic load with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically loads the value of @v with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_read_acquire() there.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline s64
+atomic64_read_acquire(const atomic64_t *v)
+{
+ instrument_atomic_read(v, sizeof(*v));
+ return raw_atomic64_read_acquire(v);
+}
+
+/**
+ * atomic64_set() - atomic set with relaxed ordering
+ * @v: pointer to atomic64_t
+ * @i: s64 value to assign
+ *
+ * Atomically sets @v to @i with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_set() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic64_set(atomic64_t *v, s64 i)
+{
+ instrument_atomic_write(v, sizeof(*v));
+ raw_atomic64_set(v, i);
+}
+
+/**
+ * atomic64_set_release() - atomic set with release ordering
+ * @v: pointer to atomic64_t
+ * @i: s64 value to assign
+ *
+ * Atomically sets @v to @i with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_set_release() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic64_set_release(atomic64_t *v, s64 i)
+{
+ kcsan_release();
+ instrument_atomic_write(v, sizeof(*v));
+ raw_atomic64_set_release(v, i);
+}
+
+/**
+ * atomic64_add() - atomic add with relaxed ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic64_add(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic64_add(i, v);
+}
+
+/**
+ * atomic64_add_return() - atomic add with full ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_add_return(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_add_return(i, v);
+}
+
+/**
+ * atomic64_add_return_acquire() - atomic add with acquire ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_add_return_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_add_return_acquire(i, v);
+}
+
+/**
+ * atomic64_add_return_release() - atomic add with release ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_add_return_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_add_return_release(i, v);
+}
+
+/**
+ * atomic64_add_return_relaxed() - atomic add with relaxed ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_add_return_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_add_return_relaxed(i, v);
+}
+
+/**
+ * atomic64_fetch_add() - atomic add with full ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_add() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_add(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_add(i, v);
+}
+
+/**
+ * atomic64_fetch_add_acquire() - atomic add with acquire ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_add_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_add_acquire(i, v);
+}
+
+/**
+ * atomic64_fetch_add_release() - atomic add with release ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_add_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_add_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_add_release(i, v);
+}
+
+/**
+ * atomic64_fetch_add_relaxed() - atomic add with relaxed ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_add_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_add_relaxed(i, v);
+}
+
+/**
+ * atomic64_sub() - atomic subtract with relaxed ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_sub() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic64_sub(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic64_sub(i, v);
+}
+
+/**
+ * atomic64_sub_return() - atomic subtract with full ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_sub_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_sub_return(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_sub_return(i, v);
+}
+
+/**
+ * atomic64_sub_return_acquire() - atomic subtract with acquire ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_sub_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_sub_return_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_sub_return_acquire(i, v);
+}
+
+/**
+ * atomic64_sub_return_release() - atomic subtract with release ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_sub_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_sub_return_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_sub_return_release(i, v);
+}
+
+/**
+ * atomic64_sub_return_relaxed() - atomic subtract with relaxed ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_sub_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_sub_return_relaxed(i, v);
+}
+
+/**
+ * atomic64_fetch_sub() - atomic subtract with full ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_sub() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_sub(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_sub(i, v);
+}
+
+/**
+ * atomic64_fetch_sub_acquire() - atomic subtract with acquire ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_sub_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_sub_acquire(i, v);
+}
+
+/**
+ * atomic64_fetch_sub_release() - atomic subtract with release ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_sub_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_sub_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_sub_release(i, v);
+}
+
+/**
+ * atomic64_fetch_sub_relaxed() - atomic subtract with relaxed ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_sub_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_sub_relaxed(i, v);
+}
+
+/**
+ * atomic64_inc() - atomic increment with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_inc() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic64_inc(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic64_inc(v);
+}
+
+/**
+ * atomic64_inc_return() - atomic increment with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_inc_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_inc_return(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_inc_return(v);
+}
+
+/**
+ * atomic64_inc_return_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_inc_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_inc_return_acquire(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_inc_return_acquire(v);
+}
+
+/**
+ * atomic64_inc_return_release() - atomic increment with release ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_inc_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_inc_return_release(atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_inc_return_release(v);
+}
+
+/**
+ * atomic64_inc_return_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_inc_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_inc_return_relaxed(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_inc_return_relaxed(v);
+}
+
+/**
+ * atomic64_fetch_inc() - atomic increment with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_inc() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_inc(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_inc(v);
+}
+
+/**
+ * atomic64_fetch_inc_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_inc_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_inc_acquire(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_inc_acquire(v);
+}
+
+/**
+ * atomic64_fetch_inc_release() - atomic increment with release ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_inc_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_inc_release(atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_inc_release(v);
+}
+
+/**
+ * atomic64_fetch_inc_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_inc_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_inc_relaxed(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_inc_relaxed(v);
+}
+
+/**
+ * atomic64_dec() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_dec() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic64_dec(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic64_dec(v);
+}
+
+/**
+ * atomic64_dec_return() - atomic decrement with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_dec_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_dec_return(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_dec_return(v);
+}
+
+/**
+ * atomic64_dec_return_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_dec_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_dec_return_acquire(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_dec_return_acquire(v);
+}
+
+/**
+ * atomic64_dec_return_release() - atomic decrement with release ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_dec_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_dec_return_release(atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_dec_return_release(v);
+}
+
+/**
+ * atomic64_dec_return_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_dec_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline s64
+atomic64_dec_return_relaxed(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_dec_return_relaxed(v);
+}
+
+/**
+ * atomic64_fetch_dec() - atomic decrement with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_dec() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_dec(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_dec(v);
+}
+
+/**
+ * atomic64_fetch_dec_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_dec_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_dec_acquire(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_dec_acquire(v);
+}
+
+/**
+ * atomic64_fetch_dec_release() - atomic decrement with release ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_dec_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_dec_release(atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_dec_release(v);
+}
+
+/**
+ * atomic64_fetch_dec_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_dec_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_dec_relaxed(atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_dec_relaxed(v);
+}
+
+/**
+ * atomic64_and() - atomic bitwise AND with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_and() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic64_and(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic64_and(i, v);
+}
+
+/**
+ * atomic64_fetch_and() - atomic bitwise AND with full ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_and() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_and(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_and(i, v);
+}
+
+/**
+ * atomic64_fetch_and_acquire() - atomic bitwise AND with acquire ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_and_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_and_acquire(i, v);
+}
+
+/**
+ * atomic64_fetch_and_release() - atomic bitwise AND with release ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_and_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_and_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_and_release(i, v);
+}
+
+/**
+ * atomic64_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_and_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_and_relaxed(i, v);
+}
+
+/**
+ * atomic64_andnot() - atomic bitwise AND NOT with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_andnot() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic64_andnot(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic64_andnot(i, v);
+}
+
+/**
+ * atomic64_fetch_andnot() - atomic bitwise AND NOT with full ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_andnot() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_andnot(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_andnot(i, v);
+}
+
+/**
+ * atomic64_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_andnot_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_andnot_acquire(i, v);
+}
+
+/**
+ * atomic64_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_andnot_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_andnot_release(i, v);
+}
+
+/**
+ * atomic64_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_andnot_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_andnot_relaxed(i, v);
+}
+
+/**
+ * atomic64_or() - atomic bitwise OR with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_or() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic64_or(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic64_or(i, v);
+}
+
+/**
+ * atomic64_fetch_or() - atomic bitwise OR with full ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_or() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_or(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_or(i, v);
+}
+
+/**
+ * atomic64_fetch_or_acquire() - atomic bitwise OR with acquire ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_or_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_or_acquire(i, v);
+}
+
+/**
+ * atomic64_fetch_or_release() - atomic bitwise OR with release ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_or_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_or_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_or_release(i, v);
+}
+
+/**
+ * atomic64_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_or_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_or_relaxed(i, v);
+}
+
+/**
+ * atomic64_xor() - atomic bitwise XOR with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_xor() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic64_xor(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic64_xor(i, v);
+}
+
+/**
+ * atomic64_fetch_xor() - atomic bitwise XOR with full ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_xor() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_xor(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_xor(i, v);
+}
+
+/**
+ * atomic64_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_xor_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_xor_acquire(i, v);
+}
+
+/**
+ * atomic64_fetch_xor_release() - atomic bitwise XOR with release ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_xor_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_xor_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_xor_release(i, v);
+}
+
+/**
+ * atomic64_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
+ * @i: s64 value
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_xor_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_xor_relaxed(i, v);
+}
+
+/**
+ * atomic64_xchg() - atomic exchange with full ordering
+ * @v: pointer to atomic64_t
+ * @new: s64 value to assign
+ *
+ * Atomically updates @v to @new with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_xchg() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_xchg(atomic64_t *v, s64 new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_xchg(v, new);
+}
+
+/**
+ * atomic64_xchg_acquire() - atomic exchange with acquire ordering
+ * @v: pointer to atomic64_t
+ * @new: s64 value to assign
+ *
+ * Atomically updates @v to @new with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_xchg_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_xchg_acquire(atomic64_t *v, s64 new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_xchg_acquire(v, new);
+}
+
+/**
+ * atomic64_xchg_release() - atomic exchange with release ordering
+ * @v: pointer to atomic64_t
+ * @new: s64 value to assign
+ *
+ * Atomically updates @v to @new with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_xchg_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_xchg_release(atomic64_t *v, s64 new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_xchg_release(v, new);
+}
+
+/**
+ * atomic64_xchg_relaxed() - atomic exchange with relaxed ordering
+ * @v: pointer to atomic64_t
+ * @new: s64 value to assign
+ *
+ * Atomically updates @v to @new with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_xchg_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_xchg_relaxed(atomic64_t *v, s64 new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_xchg_relaxed(v, new);
+}
+
+/**
+ * atomic64_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic64_t
+ * @old: s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_cmpxchg() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_cmpxchg(v, old, new);
+}
+
+/**
+ * atomic64_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic64_t
+ * @old: s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_cmpxchg_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_cmpxchg_acquire(v, old, new);
+}
+
+/**
+ * atomic64_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic64_t
+ * @old: s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_cmpxchg_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_cmpxchg_release(v, old, new);
+}
+
+/**
+ * atomic64_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic64_t
+ * @old: s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_cmpxchg_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_cmpxchg_relaxed(v, old, new);
+}
+
+/**
+ * atomic64_try_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic64_t
+ * @old: pointer to s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_read_write(old, sizeof(*old));
+ return raw_atomic64_try_cmpxchg(v, old, new);
+}
+
+/**
+ * atomic64_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic64_t
+ * @old: pointer to s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_acquire() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_read_write(old, sizeof(*old));
+ return raw_atomic64_try_cmpxchg_acquire(v, old, new);
+}
+
+/**
+ * atomic64_try_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic64_t
+ * @old: pointer to s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_release() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_read_write(old, sizeof(*old));
+ return raw_atomic64_try_cmpxchg_release(v, old, new);
+}
+
+/**
+ * atomic64_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic64_t
+ * @old: pointer to s64 value to compare with
+ * @new: s64 value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_relaxed() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_read_write(old, sizeof(*old));
+ return raw_atomic64_try_cmpxchg_relaxed(v, old, new);
+}
+
+/**
+ * atomic64_sub_and_test() - atomic subtract and test if zero with full ordering
+ * @i: s64 value to subtract
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_sub_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+atomic64_sub_and_test(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_sub_and_test(i, v);
+}
+
+/**
+ * atomic64_dec_and_test() - atomic decrement and test if zero with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_dec_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+atomic64_dec_and_test(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_dec_and_test(v);
+}
+
+/**
+ * atomic64_inc_and_test() - atomic increment and test if zero with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_inc_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+atomic64_inc_and_test(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_inc_and_test(v);
+}
+
+/**
+ * atomic64_add_negative() - atomic add and test if negative with full ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_negative() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic64_add_negative(s64 i, atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_add_negative(i, v);
+}
+
+/**
+ * atomic64_add_negative_acquire() - atomic add and test if negative with acquire ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_negative_acquire() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic64_add_negative_acquire(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_add_negative_acquire(i, v);
+}
+
+/**
+ * atomic64_add_negative_release() - atomic add and test if negative with release ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_negative_release() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic64_add_negative_release(s64 i, atomic64_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_add_negative_release(i, v);
+}
+
+/**
+ * atomic64_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
+ * @i: s64 value to add
+ * @v: pointer to atomic64_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_negative_relaxed() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic64_add_negative_relaxed(s64 i, atomic64_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_add_negative_relaxed(i, v);
+}
+
+/**
+ * atomic64_fetch_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic64_t
+ * @a: s64 value to add
+ * @u: s64 value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_fetch_add_unless() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline s64
+atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_fetch_add_unless(v, a, u);
+}
+
+/**
+ * atomic64_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic64_t
+ * @a: s64 value to add
+ * @u: s64 value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_add_unless() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_add_unless(v, a, u);
+}
+
+/**
+ * atomic64_inc_not_zero() - atomic increment unless zero with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_inc_not_zero() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic64_inc_not_zero(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_inc_not_zero(v);
+}
+
+/**
+ * atomic64_inc_unless_negative() - atomic increment unless negative with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_inc_unless_negative() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic64_inc_unless_negative(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_inc_unless_negative(v);
+}
+
+/**
+ * atomic64_dec_unless_positive() - atomic decrement unless positive with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_dec_unless_positive() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic64_dec_unless_positive(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_dec_unless_positive(v);
+}
+
+/**
+ * atomic64_dec_if_positive() - atomic decrement if positive with full ordering
+ * @v: pointer to atomic64_t
+ *
+ * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic64_dec_if_positive() there.
+ *
+ * Return: The old value of (@v - 1), regardless of whether @v was updated.
+ */
+static __always_inline s64
+atomic64_dec_if_positive(atomic64_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic64_dec_if_positive(v);
+}
+
+/**
+ * atomic_long_read() - atomic load with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically loads the value of @v with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_read() there.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline long
+atomic_long_read(const atomic_long_t *v)
+{
+ instrument_atomic_read(v, sizeof(*v));
+ return raw_atomic_long_read(v);
+}
+
+/**
+ * atomic_long_read_acquire() - atomic load with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically loads the value of @v with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_read_acquire() there.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline long
+atomic_long_read_acquire(const atomic_long_t *v)
+{
+ instrument_atomic_read(v, sizeof(*v));
+ return raw_atomic_long_read_acquire(v);
+}
+
+/**
+ * atomic_long_set() - atomic set with relaxed ordering
+ * @v: pointer to atomic_long_t
+ * @i: long value to assign
+ *
+ * Atomically sets @v to @i with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_set() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_long_set(atomic_long_t *v, long i)
+{
+ instrument_atomic_write(v, sizeof(*v));
+ raw_atomic_long_set(v, i);
+}
+
+/**
+ * atomic_long_set_release() - atomic set with release ordering
+ * @v: pointer to atomic_long_t
+ * @i: long value to assign
+ *
+ * Atomically sets @v to @i with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_set_release() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_long_set_release(atomic_long_t *v, long i)
+{
+ kcsan_release();
+ instrument_atomic_write(v, sizeof(*v));
+ raw_atomic_long_set_release(v, i);
+}
+
+/**
+ * atomic_long_add() - atomic add with relaxed ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_long_add(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_long_add(i, v);
+}
+
+/**
+ * atomic_long_add_return() - atomic add with full ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_add_return(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_add_return(i, v);
+}
+
+/**
+ * atomic_long_add_return_acquire() - atomic add with acquire ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_add_return_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_add_return_acquire(i, v);
+}
+
+/**
+ * atomic_long_add_return_release() - atomic add with release ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_add_return_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_add_return_release(i, v);
+}
+
+/**
+ * atomic_long_add_return_relaxed() - atomic add with relaxed ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_add_return_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_add_return_relaxed(i, v);
+}
+
+/**
+ * atomic_long_fetch_add() - atomic add with full ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_add() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_add(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_add(i, v);
+}
+
+/**
+ * atomic_long_fetch_add_acquire() - atomic add with acquire ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_add_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_add_acquire(i, v);
+}
+
+/**
+ * atomic_long_fetch_add_release() - atomic add with release ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_add_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_add_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_add_release(i, v);
+}
+
+/**
+ * atomic_long_fetch_add_relaxed() - atomic add with relaxed ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_add_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_add_relaxed(i, v);
+}
+
+/**
+ * atomic_long_sub() - atomic subtract with relaxed ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_sub() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_long_sub(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_long_sub(i, v);
+}
+
+/**
+ * atomic_long_sub_return() - atomic subtract with full ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_sub_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_sub_return(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_sub_return(i, v);
+}
+
+/**
+ * atomic_long_sub_return_acquire() - atomic subtract with acquire ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_sub_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_sub_return_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_sub_return_acquire(i, v);
+}
+
+/**
+ * atomic_long_sub_return_release() - atomic subtract with release ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_sub_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_sub_return_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_sub_return_release(i, v);
+}
+
+/**
+ * atomic_long_sub_return_relaxed() - atomic subtract with relaxed ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_sub_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_sub_return_relaxed(i, v);
+}
+
+/**
+ * atomic_long_fetch_sub() - atomic subtract with full ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_sub() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_sub(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_sub(i, v);
+}
+
+/**
+ * atomic_long_fetch_sub_acquire() - atomic subtract with acquire ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_sub_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_sub_acquire(i, v);
+}
+
+/**
+ * atomic_long_fetch_sub_release() - atomic subtract with release ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_sub_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_sub_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_sub_release(i, v);
+}
+
+/**
+ * atomic_long_fetch_sub_relaxed() - atomic subtract with relaxed ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_sub_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_sub_relaxed(i, v);
+}
+
+/**
+ * atomic_long_inc() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_inc() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_long_inc(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_long_inc(v);
+}
+
+/**
+ * atomic_long_inc_return() - atomic increment with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_inc_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_inc_return(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_inc_return(v);
+}
+
+/**
+ * atomic_long_inc_return_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_inc_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_inc_return_acquire(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_inc_return_acquire(v);
+}
+
+/**
+ * atomic_long_inc_return_release() - atomic increment with release ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_inc_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_inc_return_release(atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_inc_return_release(v);
+}
+
+/**
+ * atomic_long_inc_return_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_inc_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_inc_return_relaxed(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_inc_return_relaxed(v);
+}
+
+/**
+ * atomic_long_fetch_inc() - atomic increment with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_inc() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_inc(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_inc(v);
+}
+
+/**
+ * atomic_long_fetch_inc_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_inc_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_inc_acquire(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_inc_acquire(v);
+}
+
+/**
+ * atomic_long_fetch_inc_release() - atomic increment with release ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_inc_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_inc_release(atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_inc_release(v);
+}
+
+/**
+ * atomic_long_fetch_inc_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_inc_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_inc_relaxed(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_inc_relaxed(v);
+}
+
+/**
+ * atomic_long_dec() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_dec() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_long_dec(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_long_dec(v);
+}
+
+/**
+ * atomic_long_dec_return() - atomic decrement with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_dec_return() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_dec_return(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_dec_return(v);
+}
+
+/**
+ * atomic_long_dec_return_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_dec_return_acquire() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_dec_return_acquire(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_dec_return_acquire(v);
+}
+
+/**
+ * atomic_long_dec_return_release() - atomic decrement with release ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_dec_return_release() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_dec_return_release(atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_dec_return_release(v);
+}
+
+/**
+ * atomic_long_dec_return_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_dec_return_relaxed() there.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+atomic_long_dec_return_relaxed(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_dec_return_relaxed(v);
+}
+
+/**
+ * atomic_long_fetch_dec() - atomic decrement with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_dec() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_dec(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_dec(v);
+}
+
+/**
+ * atomic_long_fetch_dec_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_dec_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_dec_acquire(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_dec_acquire(v);
+}
+
+/**
+ * atomic_long_fetch_dec_release() - atomic decrement with release ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_dec_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_dec_release(atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_dec_release(v);
+}
+
+/**
+ * atomic_long_fetch_dec_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_dec_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_dec_relaxed(atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_dec_relaxed(v);
+}
+
+/**
+ * atomic_long_and() - atomic bitwise AND with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_and() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_long_and(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_long_and(i, v);
+}
+
+/**
+ * atomic_long_fetch_and() - atomic bitwise AND with full ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_and() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_and(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_and(i, v);
+}
+
+/**
+ * atomic_long_fetch_and_acquire() - atomic bitwise AND with acquire ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_and_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_and_acquire(i, v);
+}
+
+/**
+ * atomic_long_fetch_and_release() - atomic bitwise AND with release ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_and_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_and_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_and_release(i, v);
+}
+
+/**
+ * atomic_long_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_and_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_and_relaxed(i, v);
+}
+
+/**
+ * atomic_long_andnot() - atomic bitwise AND NOT with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_andnot() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_long_andnot(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_long_andnot(i, v);
+}
+
+/**
+ * atomic_long_fetch_andnot() - atomic bitwise AND NOT with full ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_andnot() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_andnot(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_andnot(i, v);
+}
+
+/**
+ * atomic_long_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_andnot_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_andnot_acquire(i, v);
+}
+
+/**
+ * atomic_long_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_andnot_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_andnot_release(i, v);
+}
+
+/**
+ * atomic_long_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_andnot_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_andnot_relaxed(i, v);
+}
+
+/**
+ * atomic_long_or() - atomic bitwise OR with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_or() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_long_or(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_long_or(i, v);
+}
+
+/**
+ * atomic_long_fetch_or() - atomic bitwise OR with full ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_or() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_or(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_or(i, v);
+}
+
+/**
+ * atomic_long_fetch_or_acquire() - atomic bitwise OR with acquire ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_or_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_or_acquire(i, v);
+}
+
+/**
+ * atomic_long_fetch_or_release() - atomic bitwise OR with release ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_or_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_or_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_or_release(i, v);
+}
+
+/**
+ * atomic_long_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_or_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_or_relaxed(i, v);
+}
+
+/**
+ * atomic_long_xor() - atomic bitwise XOR with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_xor() there.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+atomic_long_xor(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ raw_atomic_long_xor(i, v);
+}
+
+/**
+ * atomic_long_fetch_xor() - atomic bitwise XOR with full ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_xor() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_xor(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_xor(i, v);
+}
+
+/**
+ * atomic_long_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_xor_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_xor_acquire(i, v);
+}
+
+/**
+ * atomic_long_fetch_xor_release() - atomic bitwise XOR with release ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_xor_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_xor_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_xor_release(i, v);
+}
+
+/**
+ * atomic_long_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_xor_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_xor_relaxed(i, v);
+}
+
+/**
+ * atomic_long_xchg() - atomic exchange with full ordering
+ * @v: pointer to atomic_long_t
+ * @new: long value to assign
+ *
+ * Atomically updates @v to @new with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_xchg() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_xchg(atomic_long_t *v, long new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_xchg(v, new);
+}
+
+/**
+ * atomic_long_xchg_acquire() - atomic exchange with acquire ordering
+ * @v: pointer to atomic_long_t
+ * @new: long value to assign
+ *
+ * Atomically updates @v to @new with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_xchg_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_xchg_acquire(atomic_long_t *v, long new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_xchg_acquire(v, new);
+}
+
+/**
+ * atomic_long_xchg_release() - atomic exchange with release ordering
+ * @v: pointer to atomic_long_t
+ * @new: long value to assign
+ *
+ * Atomically updates @v to @new with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_xchg_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_xchg_release(atomic_long_t *v, long new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_xchg_release(v, new);
+}
+
+/**
+ * atomic_long_xchg_relaxed() - atomic exchange with relaxed ordering
+ * @v: pointer to atomic_long_t
+ * @new: long value to assign
+ *
+ * Atomically updates @v to @new with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_xchg_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_xchg_relaxed(atomic_long_t *v, long new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_xchg_relaxed(v, new);
+}
+
+/**
+ * atomic_long_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic_long_t
+ * @old: long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_cmpxchg() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_cmpxchg(v, old, new);
+}
+
+/**
+ * atomic_long_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic_long_t
+ * @old: long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_cmpxchg_acquire() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_cmpxchg_acquire(v, old, new);
+}
+
+/**
+ * atomic_long_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic_long_t
+ * @old: long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_cmpxchg_release() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_cmpxchg_release(v, old, new);
+}
+
+/**
+ * atomic_long_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic_long_t
+ * @old: long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_cmpxchg_relaxed() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_cmpxchg_relaxed(v, old, new);
+}
+
+/**
+ * atomic_long_try_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic_long_t
+ * @old: pointer to long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_read_write(old, sizeof(*old));
+ return raw_atomic_long_try_cmpxchg(v, old, new);
+}
+
+/**
+ * atomic_long_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic_long_t
+ * @old: pointer to long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_acquire() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_read_write(old, sizeof(*old));
+ return raw_atomic_long_try_cmpxchg_acquire(v, old, new);
+}
+
+/**
+ * atomic_long_try_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic_long_t
+ * @old: pointer to long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_release() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_read_write(old, sizeof(*old));
+ return raw_atomic_long_try_cmpxchg_release(v, old, new);
+}
+
+/**
+ * atomic_long_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic_long_t
+ * @old: pointer to long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_relaxed() there.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ instrument_read_write(old, sizeof(*old));
+ return raw_atomic_long_try_cmpxchg_relaxed(v, old, new);
+}
+
+/**
+ * atomic_long_sub_and_test() - atomic subtract and test if zero with full ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_sub_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_sub_and_test(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_sub_and_test(i, v);
+}
+
+/**
+ * atomic_long_dec_and_test() - atomic decrement and test if zero with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_dec_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_dec_and_test(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_dec_and_test(v);
+}
+
+/**
+ * atomic_long_inc_and_test() - atomic increment and test if zero with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_inc_and_test() there.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_inc_and_test(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_inc_and_test(v);
+}
+
+/**
+ * atomic_long_add_negative() - atomic add and test if negative with full ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_negative() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_add_negative(long i, atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_add_negative(i, v);
+}
+
+/**
+ * atomic_long_add_negative_acquire() - atomic add and test if negative with acquire ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_negative_acquire() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_add_negative_acquire(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_add_negative_acquire(i, v);
+}
+
+/**
+ * atomic_long_add_negative_release() - atomic add and test if negative with release ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_negative_release() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_add_negative_release(long i, atomic_long_t *v)
+{
+ kcsan_release();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_add_negative_release(i, v);
+}
+
+/**
+ * atomic_long_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_negative_relaxed() there.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_add_negative_relaxed(long i, atomic_long_t *v)
+{
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_add_negative_relaxed(i, v);
+}
+
+/**
+ * atomic_long_fetch_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic_long_t
+ * @a: long value to add
+ * @u: long value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_fetch_add_unless() there.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_fetch_add_unless(v, a, u);
+}
+
+/**
+ * atomic_long_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic_long_t
+ * @a: long value to add
+ * @u: long value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_add_unless() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_add_unless(atomic_long_t *v, long a, long u)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_add_unless(v, a, u);
+}
+
+/**
+ * atomic_long_inc_not_zero() - atomic increment unless zero with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_inc_not_zero() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_inc_not_zero(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_inc_not_zero(v);
+}
+
+/**
+ * atomic_long_inc_unless_negative() - atomic increment unless negative with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_inc_unless_negative() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_inc_unless_negative(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_inc_unless_negative(v);
+}
+
+/**
+ * atomic_long_dec_unless_positive() - atomic decrement unless positive with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_dec_unless_positive() there.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+atomic_long_dec_unless_positive(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_dec_unless_positive(v);
+}
+
+/**
+ * atomic_long_dec_if_positive() - atomic decrement if positive with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Unsafe to use in noinstr code; use raw_atomic_long_dec_if_positive() there.
+ *
+ * Return: The old value of (@v - 1), regardless of whether @v was updated.
+ */
+static __always_inline long
+atomic_long_dec_if_positive(atomic_long_t *v)
+{
+ kcsan_mb();
+ instrument_atomic_read_write(v, sizeof(*v));
+ return raw_atomic_long_dec_if_positive(v);
+}
+
+#define xchg(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_xchg(__ai_ptr, __VA_ARGS__); \
+})
+
+#define xchg_acquire(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_xchg_acquire(__ai_ptr, __VA_ARGS__); \
+})
+
+#define xchg_release(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_release(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_xchg_release(__ai_ptr, __VA_ARGS__); \
+})
+
+#define xchg_relaxed(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_xchg_relaxed(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg_acquire(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg_release(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_release(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg_release(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg_relaxed(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg64(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64_acquire(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64_release(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_release(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64_relaxed(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg128(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg128(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg128_acquire(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg128_acquire(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg128_release(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_release(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg128_release(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg128_relaxed(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg128_relaxed(__ai_ptr, __VA_ARGS__); \
+})
+
+#define try_cmpxchg(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg_acquire(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg_release(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ kcsan_release(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg_relaxed(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg64(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg64(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg64_acquire(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg64_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg64_release(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ kcsan_release(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg64_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg64_relaxed(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg64_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg128(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg128(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg128_acquire(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg128_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg128_release(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ kcsan_release(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg128_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg128_relaxed(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg128_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define cmpxchg_local(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg_local(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg64_local(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \
+})
+
+#define cmpxchg128_local(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_cmpxchg128_local(__ai_ptr, __VA_ARGS__); \
+})
+
+#define sync_cmpxchg(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \
+})
+
+#define try_cmpxchg_local(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg64_local(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg64_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define try_cmpxchg128_local(ptr, oldp, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ typeof(oldp) __ai_oldp = (oldp); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \
+ raw_try_cmpxchg128_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \
+})
+
+#define sync_try_cmpxchg(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_sync_try_cmpxchg(__ai_ptr, __VA_ARGS__); \
+})
+
+
+#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
+// f618ac667f868941a84ce0ab2242f1786e049ed4
diff --git a/include/linux/atomic/atomic-long.h b/include/linux/atomic/atomic-long.h
new file mode 100644
index 000000000000..f86b29d90877
--- /dev/null
+++ b/include/linux/atomic/atomic-long.h
@@ -0,0 +1,1812 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by scripts/atomic/gen-atomic-long.sh
+// DO NOT MODIFY THIS FILE DIRECTLY
+
+#ifndef _LINUX_ATOMIC_LONG_H
+#define _LINUX_ATOMIC_LONG_H
+
+#include <linux/compiler.h>
+#include <asm/types.h>
+
+#ifdef CONFIG_64BIT
+typedef atomic64_t atomic_long_t;
+#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
+#define atomic_long_cond_read_acquire atomic64_cond_read_acquire
+#define atomic_long_cond_read_relaxed atomic64_cond_read_relaxed
+#else
+typedef atomic_t atomic_long_t;
+#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
+#define atomic_long_cond_read_acquire atomic_cond_read_acquire
+#define atomic_long_cond_read_relaxed atomic_cond_read_relaxed
+#endif
+
+/**
+ * raw_atomic_long_read() - atomic load with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically loads the value of @v with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_read() elsewhere.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline long
+raw_atomic_long_read(const atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_read(v);
+#else
+ return raw_atomic_read(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_read_acquire() - atomic load with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically loads the value of @v with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_read_acquire() elsewhere.
+ *
+ * Return: The value loaded from @v.
+ */
+static __always_inline long
+raw_atomic_long_read_acquire(const atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_read_acquire(v);
+#else
+ return raw_atomic_read_acquire(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_set() - atomic set with relaxed ordering
+ * @v: pointer to atomic_long_t
+ * @i: long value to assign
+ *
+ * Atomically sets @v to @i with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_set() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_long_set(atomic_long_t *v, long i)
+{
+#ifdef CONFIG_64BIT
+ raw_atomic64_set(v, i);
+#else
+ raw_atomic_set(v, i);
+#endif
+}
+
+/**
+ * raw_atomic_long_set_release() - atomic set with release ordering
+ * @v: pointer to atomic_long_t
+ * @i: long value to assign
+ *
+ * Atomically sets @v to @i with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_set_release() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_long_set_release(atomic_long_t *v, long i)
+{
+#ifdef CONFIG_64BIT
+ raw_atomic64_set_release(v, i);
+#else
+ raw_atomic_set_release(v, i);
+#endif
+}
+
+/**
+ * raw_atomic_long_add() - atomic add with relaxed ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_long_add(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ raw_atomic64_add(i, v);
+#else
+ raw_atomic_add(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_add_return() - atomic add with full ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_add_return(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_return(i, v);
+#else
+ return raw_atomic_add_return(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_add_return_acquire() - atomic add with acquire ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_add_return_acquire(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_return_acquire(i, v);
+#else
+ return raw_atomic_add_return_acquire(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_add_return_release() - atomic add with release ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_add_return_release(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_return_release(i, v);
+#else
+ return raw_atomic_add_return_release(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_add_return_relaxed() - atomic add with relaxed ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_add_return_relaxed(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_return_relaxed(i, v);
+#else
+ return raw_atomic_add_return_relaxed(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_add() - atomic add with full ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_add() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_add(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_add(i, v);
+#else
+ return raw_atomic_fetch_add(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_add_acquire() - atomic add with acquire ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_add_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_add_acquire(i, v);
+#else
+ return raw_atomic_fetch_add_acquire(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_add_release() - atomic add with release ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_add_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_add_release(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_add_release(i, v);
+#else
+ return raw_atomic_fetch_add_release(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_add_relaxed() - atomic add with relaxed ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_add_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_add_relaxed(i, v);
+#else
+ return raw_atomic_fetch_add_relaxed(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_sub() - atomic subtract with relaxed ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_sub() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_long_sub(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ raw_atomic64_sub(i, v);
+#else
+ raw_atomic_sub(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_sub_return() - atomic subtract with full ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_sub_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_sub_return(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_sub_return(i, v);
+#else
+ return raw_atomic_sub_return(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_sub_return_acquire() - atomic subtract with acquire ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_sub_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_sub_return_acquire(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_sub_return_acquire(i, v);
+#else
+ return raw_atomic_sub_return_acquire(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_sub_return_release() - atomic subtract with release ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_sub_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_sub_return_release(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_sub_return_release(i, v);
+#else
+ return raw_atomic_sub_return_release(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_sub_return_relaxed() - atomic subtract with relaxed ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_sub_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_sub_return_relaxed(i, v);
+#else
+ return raw_atomic_sub_return_relaxed(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_sub() - atomic subtract with full ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_sub() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_sub(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_sub(i, v);
+#else
+ return raw_atomic_fetch_sub(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_sub_acquire() - atomic subtract with acquire ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_sub_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_sub_acquire(i, v);
+#else
+ return raw_atomic_fetch_sub_acquire(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_sub_release() - atomic subtract with release ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_sub_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_sub_release(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_sub_release(i, v);
+#else
+ return raw_atomic_fetch_sub_release(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_sub_relaxed() - atomic subtract with relaxed ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_sub_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_sub_relaxed(i, v);
+#else
+ return raw_atomic_fetch_sub_relaxed(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_inc() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_inc() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_long_inc(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ raw_atomic64_inc(v);
+#else
+ raw_atomic_inc(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_inc_return() - atomic increment with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_inc_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_inc_return(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_inc_return(v);
+#else
+ return raw_atomic_inc_return(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_inc_return_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_inc_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_inc_return_acquire(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_inc_return_acquire(v);
+#else
+ return raw_atomic_inc_return_acquire(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_inc_return_release() - atomic increment with release ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_inc_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_inc_return_release(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_inc_return_release(v);
+#else
+ return raw_atomic_inc_return_release(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_inc_return_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_inc_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_inc_return_relaxed(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_inc_return_relaxed(v);
+#else
+ return raw_atomic_inc_return_relaxed(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_inc() - atomic increment with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_inc() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_inc(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_inc(v);
+#else
+ return raw_atomic_fetch_inc(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_inc_acquire() - atomic increment with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_inc_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_inc_acquire(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_inc_acquire(v);
+#else
+ return raw_atomic_fetch_inc_acquire(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_inc_release() - atomic increment with release ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_inc_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_inc_release(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_inc_release(v);
+#else
+ return raw_atomic_fetch_inc_release(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_inc_relaxed() - atomic increment with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_inc_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_inc_relaxed(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_inc_relaxed(v);
+#else
+ return raw_atomic_fetch_inc_relaxed(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_dec() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_dec() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_long_dec(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ raw_atomic64_dec(v);
+#else
+ raw_atomic_dec(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_dec_return() - atomic decrement with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_dec_return() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_dec_return(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_dec_return(v);
+#else
+ return raw_atomic_dec_return(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_dec_return_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_dec_return_acquire() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_dec_return_acquire(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_dec_return_acquire(v);
+#else
+ return raw_atomic_dec_return_acquire(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_dec_return_release() - atomic decrement with release ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_dec_return_release() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_dec_return_release(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_dec_return_release(v);
+#else
+ return raw_atomic_dec_return_release(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_dec_return_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_dec_return_relaxed() elsewhere.
+ *
+ * Return: The updated value of @v.
+ */
+static __always_inline long
+raw_atomic_long_dec_return_relaxed(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_dec_return_relaxed(v);
+#else
+ return raw_atomic_dec_return_relaxed(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_dec() - atomic decrement with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_dec() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_dec(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_dec(v);
+#else
+ return raw_atomic_fetch_dec(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_dec_acquire() - atomic decrement with acquire ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_dec_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_dec_acquire(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_dec_acquire(v);
+#else
+ return raw_atomic_fetch_dec_acquire(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_dec_release() - atomic decrement with release ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_dec_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_dec_release(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_dec_release(v);
+#else
+ return raw_atomic_fetch_dec_release(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_dec_relaxed() - atomic decrement with relaxed ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_dec_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_dec_relaxed(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_dec_relaxed(v);
+#else
+ return raw_atomic_fetch_dec_relaxed(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_and() - atomic bitwise AND with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_and() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_long_and(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ raw_atomic64_and(i, v);
+#else
+ raw_atomic_and(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_and() - atomic bitwise AND with full ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_and() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_and(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_and(i, v);
+#else
+ return raw_atomic_fetch_and(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_and_acquire() - atomic bitwise AND with acquire ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_and_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_and_acquire(i, v);
+#else
+ return raw_atomic_fetch_and_acquire(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_and_release() - atomic bitwise AND with release ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_and_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_and_release(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_and_release(i, v);
+#else
+ return raw_atomic_fetch_and_release(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_and_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_and_relaxed(i, v);
+#else
+ return raw_atomic_fetch_and_relaxed(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_andnot() - atomic bitwise AND NOT with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_andnot() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_long_andnot(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ raw_atomic64_andnot(i, v);
+#else
+ raw_atomic_andnot(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_andnot() - atomic bitwise AND NOT with full ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_andnot() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_andnot(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_andnot(i, v);
+#else
+ return raw_atomic_fetch_andnot(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_andnot_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_andnot_acquire(i, v);
+#else
+ return raw_atomic_fetch_andnot_acquire(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_andnot_release() - atomic bitwise AND NOT with release ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_andnot_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_andnot_release(i, v);
+#else
+ return raw_atomic_fetch_andnot_release(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v & ~@i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_andnot_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_andnot_relaxed(i, v);
+#else
+ return raw_atomic_fetch_andnot_relaxed(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_or() - atomic bitwise OR with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_or() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_long_or(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ raw_atomic64_or(i, v);
+#else
+ raw_atomic_or(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_or() - atomic bitwise OR with full ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_or() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_or(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_or(i, v);
+#else
+ return raw_atomic_fetch_or(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_or_acquire() - atomic bitwise OR with acquire ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_or_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_or_acquire(i, v);
+#else
+ return raw_atomic_fetch_or_acquire(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_or_release() - atomic bitwise OR with release ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_or_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_or_release(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_or_release(i, v);
+#else
+ return raw_atomic_fetch_or_release(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v | @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_or_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_or_relaxed(i, v);
+#else
+ return raw_atomic_fetch_or_relaxed(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_xor() - atomic bitwise XOR with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_xor() elsewhere.
+ *
+ * Return: Nothing.
+ */
+static __always_inline void
+raw_atomic_long_xor(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ raw_atomic64_xor(i, v);
+#else
+ raw_atomic_xor(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_xor() - atomic bitwise XOR with full ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_xor() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_xor(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_xor(i, v);
+#else
+ return raw_atomic_fetch_xor(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_xor_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_xor_acquire(i, v);
+#else
+ return raw_atomic_fetch_xor_acquire(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_xor_release() - atomic bitwise XOR with release ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_xor_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_xor_release(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_xor_release(i, v);
+#else
+ return raw_atomic_fetch_xor_release(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering
+ * @i: long value
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v ^ @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_xor_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_xor_relaxed(i, v);
+#else
+ return raw_atomic_fetch_xor_relaxed(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_xchg() - atomic exchange with full ordering
+ * @v: pointer to atomic_long_t
+ * @new: long value to assign
+ *
+ * Atomically updates @v to @new with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_xchg() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_xchg(atomic_long_t *v, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_xchg(v, new);
+#else
+ return raw_atomic_xchg(v, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_xchg_acquire() - atomic exchange with acquire ordering
+ * @v: pointer to atomic_long_t
+ * @new: long value to assign
+ *
+ * Atomically updates @v to @new with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_xchg_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_xchg_acquire(atomic_long_t *v, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_xchg_acquire(v, new);
+#else
+ return raw_atomic_xchg_acquire(v, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_xchg_release() - atomic exchange with release ordering
+ * @v: pointer to atomic_long_t
+ * @new: long value to assign
+ *
+ * Atomically updates @v to @new with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_xchg_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_xchg_release(atomic_long_t *v, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_xchg_release(v, new);
+#else
+ return raw_atomic_xchg_release(v, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_xchg_relaxed() - atomic exchange with relaxed ordering
+ * @v: pointer to atomic_long_t
+ * @new: long value to assign
+ *
+ * Atomically updates @v to @new with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_xchg_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_xchg_relaxed(atomic_long_t *v, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_xchg_relaxed(v, new);
+#else
+ return raw_atomic_xchg_relaxed(v, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic_long_t
+ * @old: long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_cmpxchg() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_cmpxchg(v, old, new);
+#else
+ return raw_atomic_cmpxchg(v, old, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic_long_t
+ * @old: long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_cmpxchg_acquire() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_cmpxchg_acquire(v, old, new);
+#else
+ return raw_atomic_cmpxchg_acquire(v, old, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic_long_t
+ * @old: long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_cmpxchg_release() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_cmpxchg_release(v, old, new);
+#else
+ return raw_atomic_cmpxchg_release(v, old, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic_long_t
+ * @old: long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_cmpxchg_relaxed() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_cmpxchg_relaxed(v, old, new);
+#else
+ return raw_atomic_cmpxchg_relaxed(v, old, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_try_cmpxchg() - atomic compare and exchange with full ordering
+ * @v: pointer to atomic_long_t
+ * @old: pointer to long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with full ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_try_cmpxchg() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_try_cmpxchg(v, (s64 *)old, new);
+#else
+ return raw_atomic_try_cmpxchg(v, (int *)old, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering
+ * @v: pointer to atomic_long_t
+ * @old: pointer to long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with acquire ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_acquire() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_try_cmpxchg_acquire(v, (s64 *)old, new);
+#else
+ return raw_atomic_try_cmpxchg_acquire(v, (int *)old, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_try_cmpxchg_release() - atomic compare and exchange with release ordering
+ * @v: pointer to atomic_long_t
+ * @old: pointer to long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with release ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_release() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_try_cmpxchg_release(v, (s64 *)old, new);
+#else
+ return raw_atomic_try_cmpxchg_release(v, (int *)old, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering
+ * @v: pointer to atomic_long_t
+ * @old: pointer to long value to compare with
+ * @new: long value to assign
+ *
+ * If (@v == @old), atomically updates @v to @new with relaxed ordering.
+ * Otherwise, @v is not modified, @old is updated to the current value of @v,
+ * and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_relaxed() elsewhere.
+ *
+ * Return: @true if the exchange occured, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new);
+#else
+ return raw_atomic_try_cmpxchg_relaxed(v, (int *)old, new);
+#endif
+}
+
+/**
+ * raw_atomic_long_sub_and_test() - atomic subtract and test if zero with full ordering
+ * @i: long value to subtract
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_sub_and_test() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_sub_and_test(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_sub_and_test(i, v);
+#else
+ return raw_atomic_sub_and_test(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_dec_and_test() - atomic decrement and test if zero with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v - 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_dec_and_test() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_dec_and_test(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_dec_and_test(v);
+#else
+ return raw_atomic_dec_and_test(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_inc_and_test() - atomic increment and test if zero with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + 1) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_inc_and_test() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is zero, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_inc_and_test(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_inc_and_test(v);
+#else
+ return raw_atomic_inc_and_test(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_add_negative() - atomic add and test if negative with full ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with full ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_negative() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_add_negative(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_negative(i, v);
+#else
+ return raw_atomic_add_negative(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_add_negative_acquire() - atomic add and test if negative with acquire ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with acquire ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_negative_acquire() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_add_negative_acquire(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_negative_acquire(i, v);
+#else
+ return raw_atomic_add_negative_acquire(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_add_negative_release() - atomic add and test if negative with release ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with release ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_negative_release() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_add_negative_release(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_negative_release(i, v);
+#else
+ return raw_atomic_add_negative_release(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_add_negative_relaxed() - atomic add and test if negative with relaxed ordering
+ * @i: long value to add
+ * @v: pointer to atomic_long_t
+ *
+ * Atomically updates @v to (@v + @i) with relaxed ordering.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_negative_relaxed() elsewhere.
+ *
+ * Return: @true if the resulting value of @v is negative, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_add_negative_relaxed(long i, atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_negative_relaxed(i, v);
+#else
+ return raw_atomic_add_negative_relaxed(i, v);
+#endif
+}
+
+/**
+ * raw_atomic_long_fetch_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic_long_t
+ * @a: long value to add
+ * @u: long value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_fetch_add_unless() elsewhere.
+ *
+ * Return: The original value of @v.
+ */
+static __always_inline long
+raw_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_fetch_add_unless(v, a, u);
+#else
+ return raw_atomic_fetch_add_unless(v, a, u);
+#endif
+}
+
+/**
+ * raw_atomic_long_add_unless() - atomic add unless value with full ordering
+ * @v: pointer to atomic_long_t
+ * @a: long value to add
+ * @u: long value to compare with
+ *
+ * If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_add_unless() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_add_unless(atomic_long_t *v, long a, long u)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_add_unless(v, a, u);
+#else
+ return raw_atomic_add_unless(v, a, u);
+#endif
+}
+
+/**
+ * raw_atomic_long_inc_not_zero() - atomic increment unless zero with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_inc_not_zero() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_inc_not_zero(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_inc_not_zero(v);
+#else
+ return raw_atomic_inc_not_zero(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_inc_unless_negative() - atomic increment unless negative with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_inc_unless_negative() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_inc_unless_negative(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_inc_unless_negative(v);
+#else
+ return raw_atomic_inc_unless_negative(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_dec_unless_positive() - atomic decrement unless positive with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_dec_unless_positive() elsewhere.
+ *
+ * Return: @true if @v was updated, @false otherwise.
+ */
+static __always_inline bool
+raw_atomic_long_dec_unless_positive(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_dec_unless_positive(v);
+#else
+ return raw_atomic_dec_unless_positive(v);
+#endif
+}
+
+/**
+ * raw_atomic_long_dec_if_positive() - atomic decrement if positive with full ordering
+ * @v: pointer to atomic_long_t
+ *
+ * If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
+ * Otherwise, @v is not modified and relaxed ordering is provided.
+ *
+ * Safe to use in noinstr code; prefer atomic_long_dec_if_positive() elsewhere.
+ *
+ * Return: The old value of (@v - 1), regardless of whether @v was updated.
+ */
+static __always_inline long
+raw_atomic_long_dec_if_positive(atomic_long_t *v)
+{
+#ifdef CONFIG_64BIT
+ return raw_atomic64_dec_if_positive(v);
+#else
+ return raw_atomic_dec_if_positive(v);
+#endif
+}
+
+#endif /* _LINUX_ATOMIC_LONG_H */
+// eadf183c3600b8b92b91839dd3be6bcc560c752d
diff --git a/include/linux/attribute_container.h b/include/linux/attribute_container.h
index e4004d1e6725..b3643de9931d 100644
--- a/include/linux/attribute_container.h
+++ b/include/linux/attribute_container.h
@@ -61,14 +61,8 @@ int attribute_container_device_trigger_safe(struct device *dev,
int (*undo)(struct attribute_container *,
struct device *,
struct device *));
-void attribute_container_trigger(struct device *dev,
- int (*fn)(struct attribute_container *,
- struct device *));
int attribute_container_add_attrs(struct device *classdev);
int attribute_container_add_class_device(struct device *classdev);
-int attribute_container_add_class_device_adapter(struct attribute_container *cont,
- struct device *dev,
- struct device *classdev);
void attribute_container_remove_attrs(struct device *classdev);
void attribute_container_class_device_del(struct device *classdev);
struct attribute_container *attribute_container_classdev_to_container(struct device *);
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 82b7c1116a85..536f8ee8da81 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -11,8 +11,10 @@
#include <linux/sched.h>
#include <linux/ptrace.h>
+#include <linux/audit_arch.h>
#include <uapi/linux/audit.h>
#include <uapi/linux/netfilter/nf_tables.h>
+#include <uapi/linux/fanotify.h>
#define AUDIT_INO_UNSET ((unsigned long)-1)
#define AUDIT_DEV_UNSET ((dev_t)-1)
@@ -34,6 +36,9 @@ struct mqstat;
struct audit_watch;
struct audit_tree;
struct sk_buff;
+struct kern_ipc_perm;
+struct lsm_id;
+struct lsm_prop;
struct audit_krule {
u32 pflags;
@@ -115,11 +120,11 @@ enum audit_nfcfgop {
AUDIT_NFT_OP_OBJ_RESET,
AUDIT_NFT_OP_FLOWTABLE_REGISTER,
AUDIT_NFT_OP_FLOWTABLE_UNREGISTER,
+ AUDIT_NFT_OP_SETELEM_RESET,
+ AUDIT_NFT_OP_RULE_RESET,
AUDIT_NFT_OP_INVALID,
};
-extern int is_audit_feature_set(int which);
-
extern int __init audit_register_class(int class, unsigned *list);
extern int audit_classify_syscall(int abi, unsigned syscall);
extern int audit_classify_arch(int arch);
@@ -130,8 +135,6 @@ extern unsigned compat_dir_class[];
extern unsigned compat_chattr_class[];
extern unsigned compat_signal_class[];
-extern int audit_classify_compat_syscall(int abi, unsigned syscall);
-
/* audit_names->type values */
#define AUDIT_TYPE_UNKNOWN 0 /* we don't know yet */
#define AUDIT_TYPE_NORMAL 1 /* a "normal" audit record */
@@ -146,6 +149,10 @@ extern int audit_classify_compat_syscall(int abi, unsigned syscall);
#define AUDIT_TTY_ENABLE BIT(0)
#define AUDIT_TTY_LOG_PASSWD BIT(1)
+/* bit values for audit_cfg_lsm */
+#define AUDIT_CFG_LSM_SECCTX_SUBJECT BIT(0)
+#define AUDIT_CFG_LSM_SECCTX_OBJECT BIT(1)
+
struct filename;
#define AUDIT_OFF 0
@@ -184,6 +191,8 @@ extern void audit_log_path_denied(int type,
const char *operation);
extern void audit_log_lost(const char *message);
+extern int audit_log_subj_ctx(struct audit_buffer *ab, struct lsm_prop *prop);
+extern int audit_log_obj_ctx(struct audit_buffer *ab, struct lsm_prop *prop);
extern int audit_log_task_context(struct audit_buffer *ab);
extern void audit_log_task_info(struct audit_buffer *ab);
@@ -209,6 +218,8 @@ extern u32 audit_enabled;
extern int audit_signal_info(int sig, struct task_struct *t);
+extern void audit_cfg_lsm(const struct lsm_id *lsmid, int flags);
+
#else /* CONFIG_AUDIT */
static inline __printf(4, 5)
void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type,
@@ -244,6 +255,16 @@ static inline void audit_log_key(struct audit_buffer *ab, char *key)
{ }
static inline void audit_log_path_denied(int type, const char *operation)
{ }
+static inline int audit_log_subj_ctx(struct audit_buffer *ab,
+ struct lsm_prop *prop)
+{
+ return 0;
+}
+static inline int audit_log_obj_ctx(struct audit_buffer *ab,
+ struct lsm_prop *prop)
+{
+ return 0;
+}
static inline int audit_log_task_context(struct audit_buffer *ab)
{
return 0;
@@ -268,6 +289,9 @@ static inline int audit_signal_info(int sig, struct task_struct *t)
return 0;
}
+static inline void audit_cfg_lsm(const struct lsm_id *lsmid, int flags)
+{ }
+
#endif /* CONFIG_AUDIT */
#ifdef CONFIG_AUDIT_COMPAT_GENERIC
@@ -287,6 +311,8 @@ static inline int audit_signal_info(int sig, struct task_struct *t)
/* Public API */
extern int audit_alloc(struct task_struct *task);
extern void __audit_free(struct task_struct *task);
+extern void __audit_uring_entry(u8 op);
+extern void __audit_uring_exit(int success, long code);
extern void __audit_syscall_entry(int major, unsigned long a0, unsigned long a1,
unsigned long a2, unsigned long a3);
extern void __audit_syscall_exit(int ret_success, long ret_value);
@@ -323,6 +349,21 @@ static inline void audit_free(struct task_struct *task)
if (unlikely(task->audit_context))
__audit_free(task);
}
+static inline void audit_uring_entry(u8 op)
+{
+ /*
+ * We intentionally check audit_context() before audit_enabled as most
+ * Linux systems (as of ~2021) rely on systemd which forces audit to
+ * be enabled regardless of the user's audit configuration.
+ */
+ if (unlikely(audit_context() && audit_enabled))
+ __audit_uring_entry(op);
+}
+static inline void audit_uring_exit(int success, long code)
+{
+ if (unlikely(audit_context()))
+ __audit_uring_exit(success, code);
+}
static inline void audit_syscall_entry(int major, unsigned long a0,
unsigned long a1, unsigned long a2,
unsigned long a3)
@@ -398,8 +439,9 @@ extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm,
const struct cred *old);
extern void __audit_log_capset(const struct cred *new, const struct cred *old);
extern void __audit_mmap_fd(int fd, int flags);
-extern void __audit_log_kern_module(char *name);
-extern void __audit_fanotify(unsigned int response);
+extern void __audit_openat2_how(struct open_how *how);
+extern void __audit_log_kern_module(const char *name);
+extern void __audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar);
extern void __audit_tk_injoffset(struct timespec64 offset);
extern void __audit_ntp_log(const struct audit_ntp_data *ad);
extern void __audit_log_nfcfg(const char *name, u8 af, unsigned int nentries,
@@ -494,16 +536,22 @@ static inline void audit_mmap_fd(int fd, int flags)
__audit_mmap_fd(fd, flags);
}
-static inline void audit_log_kern_module(char *name)
+static inline void audit_openat2_how(struct open_how *how)
+{
+ if (unlikely(!audit_dummy_context()))
+ __audit_openat2_how(how);
+}
+
+static inline void audit_log_kern_module(const char *name)
{
if (!audit_dummy_context())
__audit_log_kern_module(name);
}
-static inline void audit_fanotify(unsigned int response)
+static inline void audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar)
{
- if (!audit_dummy_context())
- __audit_fanotify(response);
+ if (audit_enabled)
+ __audit_fanotify(response, friar);
}
static inline void audit_tk_injoffset(struct timespec64 offset)
@@ -556,6 +604,10 @@ static inline int audit_alloc(struct task_struct *task)
}
static inline void audit_free(struct task_struct *task)
{ }
+static inline void audit_uring_entry(u8 op)
+{ }
+static inline void audit_uring_exit(int success, long code)
+{ }
static inline void audit_syscall_entry(int major, unsigned long a0,
unsigned long a1, unsigned long a2,
unsigned long a3)
@@ -645,11 +697,13 @@ static inline void audit_log_capset(const struct cred *new,
static inline void audit_mmap_fd(int fd, int flags)
{ }
-static inline void audit_log_kern_module(char *name)
-{
-}
+static inline void audit_openat2_how(struct open_how *how)
+{ }
+
+static inline void audit_log_kern_module(const char *name)
+{ }
-static inline void audit_fanotify(unsigned int response)
+static inline void audit_fanotify(u32 response, struct fanotify_response_info_audit_rule *friar)
{ }
static inline void audit_tk_injoffset(struct timespec64 offset)
diff --git a/include/linux/audit_arch.h b/include/linux/audit_arch.h
new file mode 100644
index 000000000000..0e34d673ef17
--- /dev/null
+++ b/include/linux/audit_arch.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* audit_arch.h -- Arch layer specific support for audit
+ *
+ * Copyright 2021 Red Hat Inc., Durham, North Carolina.
+ * All Rights Reserved.
+ *
+ * Author: Richard Guy Briggs <rgb@redhat.com>
+ */
+#ifndef _LINUX_AUDIT_ARCH_H_
+#define _LINUX_AUDIT_ARCH_H_
+
+enum auditsc_class_t {
+ AUDITSC_NATIVE = 0,
+ AUDITSC_COMPAT,
+ AUDITSC_OPEN,
+ AUDITSC_OPENAT,
+ AUDITSC_SOCKETCALL,
+ AUDITSC_EXECVE,
+ AUDITSC_OPENAT2,
+
+ AUDITSC_NVALS /* count */
+};
+
+extern int audit_classify_compat_syscall(int abi, unsigned syscall);
+
+#endif
diff --git a/include/linux/auxiliary_bus.h b/include/linux/auxiliary_bus.h
index fc51d45f106b..4086afd0cc6b 100644
--- a/include/linux/auxiliary_bus.h
+++ b/include/linux/auxiliary_bus.h
@@ -11,12 +11,181 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
+/**
+ * DOC: DEVICE_LIFESPAN
+ *
+ * The registering driver is the entity that allocates memory for the
+ * auxiliary_device and registers it on the auxiliary bus. It is important to
+ * note that, as opposed to the platform bus, the registering driver is wholly
+ * responsible for the management of the memory used for the device object.
+ *
+ * To be clear the memory for the auxiliary_device is freed in the release()
+ * callback defined by the registering driver. The registering driver should
+ * only call auxiliary_device_delete() and then auxiliary_device_uninit() when
+ * it is done with the device. The release() function is then automatically
+ * called if and when other code releases their reference to the devices.
+ *
+ * A parent object, defined in the shared header file, contains the
+ * auxiliary_device. It also contains a pointer to the shared object(s), which
+ * also is defined in the shared header. Both the parent object and the shared
+ * object(s) are allocated by the registering driver. This layout allows the
+ * auxiliary_driver's registering module to perform a container_of() call to go
+ * from the pointer to the auxiliary_device, that is passed during the call to
+ * the auxiliary_driver's probe function, up to the parent object, and then
+ * have access to the shared object(s).
+ *
+ * The memory for the shared object(s) must have a lifespan equal to, or
+ * greater than, the lifespan of the memory for the auxiliary_device. The
+ * auxiliary_driver should only consider that the shared object is valid as
+ * long as the auxiliary_device is still registered on the auxiliary bus. It
+ * is up to the registering driver to manage (e.g. free or keep available) the
+ * memory for the shared object beyond the life of the auxiliary_device.
+ *
+ * The registering driver must unregister all auxiliary devices before its own
+ * driver.remove() is completed. An easy way to ensure this is to use the
+ * devm_add_action_or_reset() call to register a function against the parent
+ * device which unregisters the auxiliary device object(s).
+ *
+ * Finally, any operations which operate on the auxiliary devices must continue
+ * to function (if only to return an error) after the registering driver
+ * unregisters the auxiliary device.
+ */
+
+/**
+ * struct auxiliary_device - auxiliary device object.
+ * @dev: Device,
+ * The release and parent fields of the device structure must be filled
+ * in
+ * @name: Match name found by the auxiliary device driver,
+ * @id: unique identitier if multiple devices of the same name are exported,
+ * @sysfs: embedded struct which hold all sysfs related fields,
+ * @sysfs.irqs: irqs xarray contains irq indices which are used by the device,
+ * @sysfs.lock: Synchronize irq sysfs creation,
+ * @sysfs.irq_dir_exists: whether "irqs" directory exists,
+ *
+ * An auxiliary_device represents a part of its parent device's functionality.
+ * It is given a name that, combined with the registering drivers
+ * KBUILD_MODNAME, creates a match_name that is used for driver binding, and an
+ * id that combined with the match_name provide a unique name to register with
+ * the bus subsystem. For example, a driver registering an auxiliary device is
+ * named 'foo_mod.ko' and the subdevice is named 'foo_dev'. The match name is
+ * therefore 'foo_mod.foo_dev'.
+ *
+ * Registering an auxiliary_device is a three-step process.
+ *
+ * First, a 'struct auxiliary_device' needs to be defined or allocated for each
+ * sub-device desired. The name, id, dev.release, and dev.parent fields of
+ * this structure must be filled in as follows.
+ *
+ * The 'name' field is to be given a name that is recognized by the auxiliary
+ * driver. If two auxiliary_devices with the same match_name, eg
+ * "foo_mod.foo_dev", are registered onto the bus, they must have unique id
+ * values (e.g. "x" and "y") so that the registered devices names are
+ * "foo_mod.foo_dev.x" and "foo_mod.foo_dev.y". If match_name + id are not
+ * unique, then the device_add fails and generates an error message.
+ *
+ * The auxiliary_device.dev.type.release or auxiliary_device.dev.release must
+ * be populated with a non-NULL pointer to successfully register the
+ * auxiliary_device. This release call is where resources associated with the
+ * auxiliary device must be free'ed. Because once the device is placed on the
+ * bus the parent driver can not tell what other code may have a reference to
+ * this data.
+ *
+ * The auxiliary_device.dev.parent should be set. Typically to the registering
+ * drivers device.
+ *
+ * Second, call auxiliary_device_init(), which checks several aspects of the
+ * auxiliary_device struct and performs a device_initialize(). After this step
+ * completes, any error state must have a call to auxiliary_device_uninit() in
+ * its resolution path.
+ *
+ * The third and final step in registering an auxiliary_device is to perform a
+ * call to auxiliary_device_add(), which sets the name of the device and adds
+ * the device to the bus.
+ *
+ * .. code-block:: c
+ *
+ * #define MY_DEVICE_NAME "foo_dev"
+ *
+ * ...
+ *
+ * struct auxiliary_device *my_aux_dev = my_aux_dev_alloc(xxx);
+ *
+ * // Step 1:
+ * my_aux_dev->name = MY_DEVICE_NAME;
+ * my_aux_dev->id = my_unique_id_alloc(xxx);
+ * my_aux_dev->dev.release = my_aux_dev_release;
+ * my_aux_dev->dev.parent = my_dev;
+ *
+ * // Step 2:
+ * if (auxiliary_device_init(my_aux_dev))
+ * goto fail;
+ *
+ * // Step 3:
+ * if (auxiliary_device_add(my_aux_dev)) {
+ * auxiliary_device_uninit(my_aux_dev);
+ * goto fail;
+ * }
+ *
+ * ...
+ *
+ *
+ * Unregistering an auxiliary_device is a two-step process to mirror the
+ * register process. First call auxiliary_device_delete(), then call
+ * auxiliary_device_uninit().
+ *
+ * .. code-block:: c
+ *
+ * auxiliary_device_delete(my_dev->my_aux_dev);
+ * auxiliary_device_uninit(my_dev->my_aux_dev);
+ */
struct auxiliary_device {
struct device dev;
const char *name;
u32 id;
+ struct {
+ struct xarray irqs;
+ struct mutex lock; /* Synchronize irq sysfs creation */
+ bool irq_dir_exists;
+ } sysfs;
};
+/**
+ * struct auxiliary_driver - Definition of an auxiliary bus driver
+ * @probe: Called when a matching device is added to the bus.
+ * @remove: Called when device is removed from the bus.
+ * @shutdown: Called at shut-down time to quiesce the device.
+ * @suspend: Called to put the device to sleep mode. Usually to a power state.
+ * @resume: Called to bring a device from sleep mode.
+ * @name: Driver name.
+ * @driver: Core driver structure.
+ * @id_table: Table of devices this driver should match on the bus.
+ *
+ * Auxiliary drivers follow the standard driver model convention, where
+ * discovery/enumeration is handled by the core, and drivers provide probe()
+ * and remove() methods. They support power management and shutdown
+ * notifications using the standard conventions.
+ *
+ * Auxiliary drivers register themselves with the bus by calling
+ * auxiliary_driver_register(). The id_table contains the match_names of
+ * auxiliary devices that a driver can bind with.
+ *
+ * .. code-block:: c
+ *
+ * static const struct auxiliary_device_id my_auxiliary_id_table[] = {
+ * { .name = "foo_mod.foo_dev" },
+ * {},
+ * };
+ *
+ * MODULE_DEVICE_TABLE(auxiliary, my_auxiliary_id_table);
+ *
+ * struct auxiliary_driver my_drv = {
+ * .name = "myauxiliarydrv",
+ * .id_table = my_auxiliary_id_table,
+ * .probe = my_drv_probe,
+ * .remove = my_drv_remove
+ * };
+ */
struct auxiliary_driver {
int (*probe)(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id);
void (*remove)(struct auxiliary_device *auxdev);
@@ -28,12 +197,22 @@ struct auxiliary_driver {
const struct auxiliary_device_id *id_table;
};
+static inline void *auxiliary_get_drvdata(struct auxiliary_device *auxdev)
+{
+ return dev_get_drvdata(&auxdev->dev);
+}
+
+static inline void auxiliary_set_drvdata(struct auxiliary_device *auxdev, void *data)
+{
+ dev_set_drvdata(&auxdev->dev, data);
+}
+
static inline struct auxiliary_device *to_auxiliary_dev(struct device *dev)
{
return container_of(dev, struct auxiliary_device, dev);
}
-static inline struct auxiliary_driver *to_auxiliary_drv(struct device_driver *drv)
+static inline const struct auxiliary_driver *to_auxiliary_drv(const struct device_driver *drv)
{
return container_of(drv, struct auxiliary_driver, driver);
}
@@ -42,8 +221,24 @@ int auxiliary_device_init(struct auxiliary_device *auxdev);
int __auxiliary_device_add(struct auxiliary_device *auxdev, const char *modname);
#define auxiliary_device_add(auxdev) __auxiliary_device_add(auxdev, KBUILD_MODNAME)
+#ifdef CONFIG_SYSFS
+int auxiliary_device_sysfs_irq_add(struct auxiliary_device *auxdev, int irq);
+void auxiliary_device_sysfs_irq_remove(struct auxiliary_device *auxdev,
+ int irq);
+#else /* CONFIG_SYSFS */
+static inline int
+auxiliary_device_sysfs_irq_add(struct auxiliary_device *auxdev, int irq)
+{
+ return 0;
+}
+
+static inline void
+auxiliary_device_sysfs_irq_remove(struct auxiliary_device *auxdev, int irq) {}
+#endif
+
static inline void auxiliary_device_uninit(struct auxiliary_device *auxdev)
{
+ mutex_destroy(&auxdev->sysfs.lock);
put_device(&auxdev->dev);
}
@@ -59,6 +254,23 @@ int __auxiliary_driver_register(struct auxiliary_driver *auxdrv, struct module *
void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv);
+struct auxiliary_device *auxiliary_device_create(struct device *dev,
+ const char *modname,
+ const char *devname,
+ void *platform_data,
+ int id);
+void auxiliary_device_destroy(void *auxdev);
+
+struct auxiliary_device *__devm_auxiliary_device_create(struct device *dev,
+ const char *modname,
+ const char *devname,
+ void *platform_data,
+ int id);
+
+#define devm_auxiliary_device_create(dev, devname, platform_data) \
+ __devm_auxiliary_device_create(dev, KBUILD_MODNAME, devname, \
+ platform_data, 0)
+
/**
* module_auxiliary_driver() - Helper macro for registering an auxiliary driver
* @__auxiliary_driver: auxiliary driver struct
@@ -66,12 +278,12 @@ void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv);
* Helper macro for auxiliary drivers which do not do anything special in
* module init/exit. This eliminates a lot of boilerplate. Each module may only
* use this macro once, and calling it replaces module_init() and module_exit()
+ *
+ * .. code-block:: c
+ *
+ * module_auxiliary_driver(my_drv);
*/
#define module_auxiliary_driver(__auxiliary_driver) \
module_driver(__auxiliary_driver, auxiliary_driver_register, auxiliary_driver_unregister)
-struct auxiliary_device *auxiliary_find_device(struct device *start,
- const void *data,
- int (*match)(struct device *dev, const void *data));
-
#endif /* _AUXILIARY_BUS_H_ */
diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h
index f68d0ec2d740..407f7005e6d6 100644
--- a/include/linux/auxvec.h
+++ b/include/linux/auxvec.h
@@ -4,6 +4,6 @@
#include <uapi/linux/auxvec.h>
-#define AT_VECTOR_SIZE_BASE 20 /* NEW_AUX_ENT entries in auxiliary table */
+#define AT_VECTOR_SIZE_BASE 22 /* NEW_AUX_ENT entries in auxiliary table */
/* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */
#endif /* _LINUX_AUXVEC_H */
diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
index 565deea6ffe8..11bdab5522fd 100644
--- a/include/linux/avf/virtchnl.h
+++ b/include/linux/avf/virtchnl.h
@@ -1,21 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/*******************************************************************************
- *
- * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- ******************************************************************************/
+/* Copyright (c) 2013-2022, Intel Corporation. */
#ifndef _VIRTCHNL_H_
#define _VIRTCHNL_H_
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/overflow.h>
+#include <uapi/linux/if_ether.h>
+
/* Description:
- * This header file describes the VF-PF communication protocol used
- * by the drivers for all devices starting from our 40G product line
+ * This header file describes the Virtual Function (VF) - Physical Function
+ * (PF) communication protocol used by the drivers for all devices starting
+ * from our 40G product line
*
* Admin queue buffer usage:
* desc->opcode is always aqc_opc_send_msg_to_pf
@@ -29,8 +26,8 @@
* have a maximum of sixteen queues for all of its VSIs.
*
* The PF is required to return a status code in v_retval for all messages
- * except RESET_VF, which does not require any response. The return value
- * is of status_code type, defined in the shared type.h.
+ * except RESET_VF, which does not require any response. The returned value
+ * is of virtchnl_status_code type, defined here.
*
* In general, VF driver initialization should roughly follow the order of
* these opcodes. The VF driver must first validate the API version of the
@@ -92,6 +89,9 @@ enum virtchnl_rx_hsplit {
VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8,
};
+enum virtchnl_bw_limit_type {
+ VIRTCHNL_BW_SHAPER = 0,
+};
/* END GENERIC DEFINES */
/* Opcodes for VF-PF communication. These are placed in the v_opcode field
@@ -122,13 +122,18 @@ enum virtchnl_ops {
VIRTCHNL_OP_GET_STATS = 15,
VIRTCHNL_OP_RSVD = 16,
VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+ VIRTCHNL_OP_CONFIG_RSS_HFUNC = 18,
+ /* opcode 19 is reserved */
VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
+ VIRTCHNL_OP_RDMA = VIRTCHNL_OP_IWARP,
VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */
+ VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP = VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */
+ VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP = VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
- VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
- VIRTCHNL_OP_SET_RSS_HENA = 26,
+ VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS = 25,
+ VIRTCHNL_OP_SET_RSS_HASHCFG = 26,
VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
VIRTCHNL_OP_REQUEST_QUEUES = 29,
@@ -136,11 +141,27 @@ enum virtchnl_ops {
VIRTCHNL_OP_DISABLE_CHANNELS = 31,
VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
- /* opcode 34 - 44 are reserved */
+ /* opcode 34 - 43 are reserved */
+ VIRTCHNL_OP_GET_SUPPORTED_RXDIDS = 44,
VIRTCHNL_OP_ADD_RSS_CFG = 45,
VIRTCHNL_OP_DEL_RSS_CFG = 46,
VIRTCHNL_OP_ADD_FDIR_FILTER = 47,
VIRTCHNL_OP_DEL_FDIR_FILTER = 48,
+ VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS = 51,
+ VIRTCHNL_OP_ADD_VLAN_V2 = 52,
+ VIRTCHNL_OP_DEL_VLAN_V2 = 53,
+ VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 = 54,
+ VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 = 55,
+ VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 = 56,
+ VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57,
+ /* opcode 58 and 59 are reserved */
+ VIRTCHNL_OP_1588_PTP_GET_CAPS = 60,
+ VIRTCHNL_OP_1588_PTP_GET_TIME = 61,
+ /* opcode 62 - 65 are reserved */
+ VIRTCHNL_OP_GET_QOS_CAPS = 66,
+ /* opcode 68 through 111 are reserved */
+ VIRTCHNL_OP_CONFIG_QUEUE_BW = 112,
+ VIRTCHNL_OP_CONFIG_QUANTA = 113,
VIRTCHNL_OP_MAX,
};
@@ -154,19 +175,6 @@ enum virtchnl_ops {
#define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \
{ virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) }
-/* Virtual channel message descriptor. This overlays the admin queue
- * descriptor. All other data is passed in external buffers.
- */
-
-struct virtchnl_msg {
- u8 pad[8]; /* AQ flags/opcode/len/retval fields */
- enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
- enum virtchnl_status_code v_retval; /* ditto for desc->retval */
- u32 vfid; /* used by PF when sending to VF */
-};
-
-VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
-
/* Message descriptions and data structures. */
/* VIRTCHNL_OP_VERSION
@@ -227,7 +235,9 @@ enum virtchnl_vsi_type {
struct virtchnl_vsi_resource {
u16 vsi_id;
u16 num_queue_pairs;
- enum virtchnl_vsi_type vsi_type;
+
+ /* see enum virtchnl_vsi_type */
+ s32 vsi_type;
u16 qset_handle;
u8 default_mac_addr[ETH_ALEN];
};
@@ -238,27 +248,33 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
* VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
* TX/RX Checksum offloading and TSO for non-tunnelled packets.
*/
-#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001
-#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
-#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004
-#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
-#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
-#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
-#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040
-#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
-#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
-#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
-#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
-#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
-#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
-#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
-#define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000
-#define VIRTCHNL_VF_OFFLOAD_USO 0X02000000
-#define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF 0X08000000
-#define VIRTCHNL_VF_OFFLOAD_FDIR_PF 0X10000000
-
-/* Define below the capability flags that are not offloads */
-#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED 0x00000080
+#define VIRTCHNL_VF_OFFLOAD_L2 BIT(0)
+#define VIRTCHNL_VF_OFFLOAD_RDMA BIT(1)
+#define VIRTCHNL_VF_CAP_RDMA VIRTCHNL_VF_OFFLOAD_RDMA
+#define VIRTCHNL_VF_OFFLOAD_RSS_AQ BIT(3)
+#define VIRTCHNL_VF_OFFLOAD_RSS_REG BIT(4)
+#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR BIT(5)
+#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES BIT(6)
+/* used to negotiate communicating link speeds in Mbps */
+#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED BIT(7)
+#define VIRTCHNL_VF_OFFLOAD_CRC BIT(10)
+#define VIRTCHNL_VF_OFFLOAD_TC_U32 BIT(11)
+#define VIRTCHNL_VF_OFFLOAD_VLAN_V2 BIT(15)
+#define VIRTCHNL_VF_OFFLOAD_VLAN BIT(16)
+#define VIRTCHNL_VF_OFFLOAD_RX_POLLING BIT(17)
+#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 BIT(18)
+#define VIRTCHNL_VF_OFFLOAD_RSS_PF BIT(19)
+#define VIRTCHNL_VF_OFFLOAD_ENCAP BIT(20)
+#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM BIT(21)
+#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM BIT(22)
+#define VIRTCHNL_VF_OFFLOAD_ADQ BIT(23)
+#define VIRTCHNL_VF_OFFLOAD_USO BIT(25)
+#define VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC BIT(26)
+#define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF BIT(27)
+#define VIRTCHNL_VF_OFFLOAD_FDIR_PF BIT(28)
+#define VIRTCHNL_VF_OFFLOAD_QOS BIT(29)
+#define VIRTCHNL_VF_CAP_PTP BIT(31)
+
#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
VIRTCHNL_VF_OFFLOAD_VLAN | \
VIRTCHNL_VF_OFFLOAD_RSS_PF)
@@ -273,10 +289,11 @@ struct virtchnl_vf_resource {
u32 rss_key_size;
u32 rss_lut_size;
- struct virtchnl_vsi_resource vsi_res[1];
+ struct virtchnl_vsi_resource vsi_res[];
};
-VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource);
+VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_vf_resource);
+#define virtchnl_vf_resource_LEGACY_SIZEOF 36
/* VIRTCHNL_OP_CONFIG_TX_QUEUE
* VF sends this message to set up parameters for one TX queue.
@@ -296,10 +313,70 @@ struct virtchnl_txq_info {
VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
+/* RX descriptor IDs (range from 0 to 63) */
+enum virtchnl_rx_desc_ids {
+ VIRTCHNL_RXDID_0_16B_BASE = 0,
+ VIRTCHNL_RXDID_1_32B_BASE = 1,
+ VIRTCHNL_RXDID_2_FLEX_SQ_NIC = 2,
+ VIRTCHNL_RXDID_3_FLEX_SQ_SW = 3,
+ VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB = 4,
+ VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL = 5,
+ VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2 = 6,
+ VIRTCHNL_RXDID_7_HW_RSVD = 7,
+ /* 8 through 15 are reserved */
+ VIRTCHNL_RXDID_16_COMMS_GENERIC = 16,
+ VIRTCHNL_RXDID_17_COMMS_AUX_VLAN = 17,
+ VIRTCHNL_RXDID_18_COMMS_AUX_IPV4 = 18,
+ VIRTCHNL_RXDID_19_COMMS_AUX_IPV6 = 19,
+ VIRTCHNL_RXDID_20_COMMS_AUX_FLOW = 20,
+ VIRTCHNL_RXDID_21_COMMS_AUX_TCP = 21,
+ /* 22 through 63 are reserved */
+};
+
+#define VIRTCHNL_RXDID_BIT(x) BIT_ULL(VIRTCHNL_RXDID_##x)
+
+/* RX descriptor ID bitmasks */
+enum virtchnl_rx_desc_id_bitmasks {
+ VIRTCHNL_RXDID_0_16B_BASE_M = VIRTCHNL_RXDID_BIT(0_16B_BASE),
+ VIRTCHNL_RXDID_1_32B_BASE_M = VIRTCHNL_RXDID_BIT(1_32B_BASE),
+ VIRTCHNL_RXDID_2_FLEX_SQ_NIC_M = VIRTCHNL_RXDID_BIT(2_FLEX_SQ_NIC),
+ VIRTCHNL_RXDID_3_FLEX_SQ_SW_M = VIRTCHNL_RXDID_BIT(3_FLEX_SQ_SW),
+ VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB_M = VIRTCHNL_RXDID_BIT(4_FLEX_SQ_NIC_VEB),
+ VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL_M = VIRTCHNL_RXDID_BIT(5_FLEX_SQ_NIC_ACL),
+ VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2_M = VIRTCHNL_RXDID_BIT(6_FLEX_SQ_NIC_2),
+ VIRTCHNL_RXDID_7_HW_RSVD_M = VIRTCHNL_RXDID_BIT(7_HW_RSVD),
+ /* 8 through 15 are reserved */
+ VIRTCHNL_RXDID_16_COMMS_GENERIC_M = VIRTCHNL_RXDID_BIT(16_COMMS_GENERIC),
+ VIRTCHNL_RXDID_17_COMMS_AUX_VLAN_M = VIRTCHNL_RXDID_BIT(17_COMMS_AUX_VLAN),
+ VIRTCHNL_RXDID_18_COMMS_AUX_IPV4_M = VIRTCHNL_RXDID_BIT(18_COMMS_AUX_IPV4),
+ VIRTCHNL_RXDID_19_COMMS_AUX_IPV6_M = VIRTCHNL_RXDID_BIT(19_COMMS_AUX_IPV6),
+ VIRTCHNL_RXDID_20_COMMS_AUX_FLOW_M = VIRTCHNL_RXDID_BIT(20_COMMS_AUX_FLOW),
+ VIRTCHNL_RXDID_21_COMMS_AUX_TCP_M = VIRTCHNL_RXDID_BIT(21_COMMS_AUX_TCP),
+ /* 22 through 63 are reserved */
+};
+
+/* virtchnl_rxq_info_flags - definition of bits in the flags field of the
+ * virtchnl_rxq_info structure.
+ *
+ * @VIRTCHNL_PTP_RX_TSTAMP: request to enable Rx timestamping
+ *
+ * Other flag bits are currently reserved and they may be extended in the
+ * future.
+ */
+enum virtchnl_rxq_info_flags {
+ VIRTCHNL_PTP_RX_TSTAMP = BIT(0),
+};
+
/* VIRTCHNL_OP_CONFIG_RX_QUEUE
* VF sends this message to set up parameters for one RX queue.
* External data buffer contains one instance of virtchnl_rxq_info.
- * PF configures requested queue and returns a status code.
+ * PF configures requested queue and returns a status code. The
+ * crc_disable flag disables CRC stripping on the VF. Setting
+ * the crc_disable flag to 1 will disable CRC stripping for each
+ * queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC
+ * offload must have been set prior to sending this info or the PF
+ * will ignore the request. This flag should be set the same for
+ * all of the queues for a VF.
*/
/* Rx queue config info */
@@ -311,9 +388,19 @@ struct virtchnl_rxq_info {
u16 splithdr_enabled; /* deprecated with AVF 1.0 */
u32 databuffer_size;
u32 max_pkt_size;
- u32 pad1;
+ u8 crc_disable;
+ /* see enum virtchnl_rx_desc_ids;
+ * only used when VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC is supported. Note
+ * that when the offload is not supported, the descriptor format aligns
+ * with VIRTCHNL_RXDID_1_32B_BASE.
+ */
+ enum virtchnl_rx_desc_ids rxdid:8;
+ enum virtchnl_rxq_info_flags flags:8; /* see virtchnl_rxq_info_flags */
+ u8 pad1;
u64 dma_ring_addr;
- enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */
+
+ /* see enum virtchnl_rx_hsplit; deprecated with AVF 1.0 */
+ s32 rx_split_pos;
u32 pad2;
};
@@ -325,6 +412,9 @@ VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
* PF configures queues and returns status.
* If the number of queues specified is greater than the number of queues
* associated with the VSI, an error is returned and no queues are configured.
+ * NOTE: The VF is not required to configure all queues in a single request.
+ * It may send multiple messages. PF drivers must correctly handle all VF
+ * requests.
*/
struct virtchnl_queue_pair_info {
/* NOTE: vsi_id and queue_id should be identical for both queues. */
@@ -338,10 +428,11 @@ struct virtchnl_vsi_queue_config_info {
u16 vsi_id;
u16 num_queue_pairs;
u32 pad;
- struct virtchnl_queue_pair_info qpair[1];
+ struct virtchnl_queue_pair_info qpair[];
};
-VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vsi_queue_config_info);
+#define virtchnl_vsi_queue_config_info_LEGACY_SIZEOF 72
/* VIRTCHNL_OP_REQUEST_QUEUES
* VF sends this message to request the PF to allocate additional queues to
@@ -362,8 +453,13 @@ struct virtchnl_vf_res_request {
* VF uses this message to map vectors to queues.
* The rxq_map and txq_map fields are bitmaps used to indicate which queues
* are to be associated with the specified vector.
- * The "other" causes are always mapped to vector 0.
+ * The "other" causes are always mapped to vector 0. The VF may not request
+ * that vector 0 be used for traffic.
* PF configures interrupt mapping and returns status.
+ * NOTE: due to hardware requirements, all active queues (both TX and RX)
+ * should be mapped to interrupts, even if the driver intends to operate
+ * only in polling mode. In this case the interrupt may be disabled, but
+ * the ITR timer will still run to trigger writebacks.
*/
struct virtchnl_vector_map {
u16 vsi_id;
@@ -378,10 +474,11 @@ VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
struct virtchnl_irq_map_info {
u16 num_vectors;
- struct virtchnl_vector_map vecmap[1];
+ struct virtchnl_vector_map vecmap[];
};
-VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
+VIRTCHNL_CHECK_STRUCT_LEN(2, virtchnl_irq_map_info);
+#define virtchnl_irq_map_info_LEGACY_SIZEOF 14
/* VIRTCHNL_OP_ENABLE_QUEUES
* VIRTCHNL_OP_DISABLE_QUEUES
@@ -390,6 +487,9 @@ VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
* (Currently, we only support 16 queues per VF, but we make the field
* u32 to allow for expansion.)
* PF performs requested action and returns status.
+ * NOTE: The VF is not required to enable/disable all queues in a single
+ * request. It may send multiple messages.
+ * PF drivers must correctly handle all VF requests.
*/
struct virtchnl_queue_select {
u16 vsi_id;
@@ -412,9 +512,36 @@ VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
* PF removes the filters and returns status.
*/
+/* VIRTCHNL_ETHER_ADDR_LEGACY
+ * Prior to adding the @type member to virtchnl_ether_addr, there were 2 pad
+ * bytes. Moving forward all VF drivers should not set type to
+ * VIRTCHNL_ETHER_ADDR_LEGACY. This is only here to not break previous/legacy
+ * behavior. The control plane function (i.e. PF) can use a best effort method
+ * of tracking the primary/device unicast in this case, but there is no
+ * guarantee and functionality depends on the implementation of the PF.
+ */
+
+/* VIRTCHNL_ETHER_ADDR_PRIMARY
+ * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_PRIMARY for the
+ * primary/device unicast MAC address filter for VIRTCHNL_OP_ADD_ETH_ADDR and
+ * VIRTCHNL_OP_DEL_ETH_ADDR. This allows for the underlying control plane
+ * function (i.e. PF) to accurately track and use this MAC address for
+ * displaying on the host and for VM/function reset.
+ */
+
+/* VIRTCHNL_ETHER_ADDR_EXTRA
+ * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_EXTRA for any extra
+ * unicast and/or multicast filters that are being added/deleted via
+ * VIRTCHNL_OP_DEL_ETH_ADDR/VIRTCHNL_OP_ADD_ETH_ADDR respectively.
+ */
struct virtchnl_ether_addr {
u8 addr[ETH_ALEN];
- u8 pad[2];
+ u8 type;
+#define VIRTCHNL_ETHER_ADDR_LEGACY 0
+#define VIRTCHNL_ETHER_ADDR_PRIMARY 1
+#define VIRTCHNL_ETHER_ADDR_EXTRA 2
+#define VIRTCHNL_ETHER_ADDR_TYPE_MASK 3 /* first two bits of type are valid */
+ u8 pad;
};
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
@@ -422,10 +549,11 @@ VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
struct virtchnl_ether_addr_list {
u16 vsi_id;
u16 num_elements;
- struct virtchnl_ether_addr list[1];
+ struct virtchnl_ether_addr list[];
};
-VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_ether_addr_list);
+#define virtchnl_ether_addr_list_LEGACY_SIZEOF 12
/* VIRTCHNL_OP_ADD_VLAN
* VF sends this message to add one or more VLAN tag filters for receives.
@@ -444,10 +572,357 @@ VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);
struct virtchnl_vlan_filter_list {
u16 vsi_id;
u16 num_elements;
- u16 vlan_id[1];
+ u16 vlan_id[];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_vlan_filter_list);
+#define virtchnl_vlan_filter_list_LEGACY_SIZEOF 6
+
+/* This enum is used for all of the VIRTCHNL_VF_OFFLOAD_VLAN_V2_CAPS related
+ * structures and opcodes.
+ *
+ * VIRTCHNL_VLAN_UNSUPPORTED - This field is not supported and if a VF driver
+ * populates it the PF should return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED.
+ *
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 - This field supports 0x8100 ethertype.
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 - This field supports 0x88A8 ethertype.
+ * VIRTCHNL_VLAN_ETHERTYPE_9100 - This field supports 0x9100 ethertype.
+ *
+ * VIRTCHNL_VLAN_ETHERTYPE_AND - Used when multiple ethertypes can be supported
+ * by the PF concurrently. For example, if the PF can support
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 AND VIRTCHNL_VLAN_ETHERTYPE_88A8 filters it
+ * would OR the following bits:
+ *
+ * VIRTHCNL_VLAN_ETHERTYPE_8100 |
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ * VIRTCHNL_VLAN_ETHERTYPE_AND;
+ *
+ * The VF would interpret this as VLAN filtering can be supported on both 0x8100
+ * and 0x88A8 VLAN ethertypes.
+ *
+ * VIRTCHNL_ETHERTYPE_XOR - Used when only a single ethertype can be supported
+ * by the PF concurrently. For example if the PF can support
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 XOR VIRTCHNL_VLAN_ETHERTYPE_88A8 stripping
+ * offload it would OR the following bits:
+ *
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ * VIRTCHNL_VLAN_ETHERTYPE_XOR;
+ *
+ * The VF would interpret this as VLAN stripping can be supported on either
+ * 0x8100 or 0x88a8 VLAN ethertypes. So when requesting VLAN stripping via
+ * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 the specified ethertype will override
+ * the previously set value.
+ *
+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 - Used to tell the VF to insert and/or
+ * strip the VLAN tag using the L2TAG1 field of the Tx/Rx descriptors.
+ *
+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to insert hardware
+ * offloaded VLAN tags using the L2TAG2 field of the Tx descriptor.
+ *
+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to strip hardware
+ * offloaded VLAN tags using the L2TAG2_2 field of the Rx descriptor.
+ *
+ * VIRTCHNL_VLAN_PRIO - This field supports VLAN priority bits. This is used for
+ * VLAN filtering if the underlying PF supports it.
+ *
+ * VIRTCHNL_VLAN_TOGGLE_ALLOWED - This field is used to say whether a
+ * certain VLAN capability can be toggled. For example if the underlying PF/CP
+ * allows the VF to toggle VLAN filtering, stripping, and/or insertion it should
+ * set this bit along with the supported ethertypes.
+ */
+enum virtchnl_vlan_support {
+ VIRTCHNL_VLAN_UNSUPPORTED = 0,
+ VIRTCHNL_VLAN_ETHERTYPE_8100 = BIT(0),
+ VIRTCHNL_VLAN_ETHERTYPE_88A8 = BIT(1),
+ VIRTCHNL_VLAN_ETHERTYPE_9100 = BIT(2),
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 = BIT(8),
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 = BIT(9),
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 = BIT(10),
+ VIRTCHNL_VLAN_PRIO = BIT(24),
+ VIRTCHNL_VLAN_FILTER_MASK = BIT(28),
+ VIRTCHNL_VLAN_ETHERTYPE_AND = BIT(29),
+ VIRTCHNL_VLAN_ETHERTYPE_XOR = BIT(30),
+ VIRTCHNL_VLAN_TOGGLE = BIT(31),
+};
+
+/* This structure is used as part of the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS
+ * for filtering, insertion, and stripping capabilities.
+ *
+ * If only outer capabilities are supported (for filtering, insertion, and/or
+ * stripping) then this refers to the outer most or single VLAN from the VF's
+ * perspective.
+ *
+ * If only inner capabilities are supported (for filtering, insertion, and/or
+ * stripping) then this refers to the outer most or single VLAN from the VF's
+ * perspective. Functionally this is the same as if only outer capabilities are
+ * supported. The VF driver is just forced to use the inner fields when
+ * adding/deleting filters and enabling/disabling offloads (if supported).
+ *
+ * If both outer and inner capabilities are supported (for filtering, insertion,
+ * and/or stripping) then outer refers to the outer most or single VLAN and
+ * inner refers to the second VLAN, if it exists, in the packet.
+ *
+ * There is no support for tunneled VLAN offloads, so outer or inner are never
+ * referring to a tunneled packet from the VF's perspective.
+ */
+struct virtchnl_vlan_supported_caps {
+ u32 outer;
+ u32 inner;
+};
+
+/* The PF populates these fields based on the supported VLAN filtering. If a
+ * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will
+ * reject any VIRTCHNL_OP_ADD_VLAN_V2 or VIRTCHNL_OP_DEL_VLAN_V2 messages using
+ * the unsupported fields.
+ *
+ * Also, a VF is only allowed to toggle its VLAN filtering setting if the
+ * VIRTCHNL_VLAN_TOGGLE bit is set.
+ *
+ * The ethertype(s) specified in the ethertype_init field are the ethertypes
+ * enabled for VLAN filtering. VLAN filtering in this case refers to the outer
+ * most VLAN from the VF's perspective. If both inner and outer filtering are
+ * allowed then ethertype_init only refers to the outer most VLAN as only
+ * VLAN ethertype supported for inner VLAN filtering is
+ * VIRTCHNL_VLAN_ETHERTYPE_8100. By default, inner VLAN filtering is disabled
+ * when both inner and outer filtering are allowed.
+ *
+ * The max_filters field tells the VF how many VLAN filters it's allowed to have
+ * at any one time. If it exceeds this amount and tries to add another filter,
+ * then the request will be rejected by the PF. To prevent failures, the VF
+ * should keep track of how many VLAN filters it has added and not attempt to
+ * add more than max_filters.
+ */
+struct virtchnl_vlan_filtering_caps {
+ struct virtchnl_vlan_supported_caps filtering_support;
+ u32 ethertype_init;
+ u16 max_filters;
+ u8 pad[2];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_filtering_caps);
+
+/* This enum is used for the virtchnl_vlan_offload_caps structure to specify
+ * if the PF supports a different ethertype for stripping and insertion.
+ *
+ * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION - The ethertype(s) specified
+ * for stripping affect the ethertype(s) specified for insertion and visa versa
+ * as well. If the VF tries to configure VLAN stripping via
+ * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 with VIRTCHNL_VLAN_ETHERTYPE_8100 then
+ * that will be the ethertype for both stripping and insertion.
+ *
+ * VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED - The ethertype(s) specified for
+ * stripping do not affect the ethertype(s) specified for insertion and visa
+ * versa.
+ */
+enum virtchnl_vlan_ethertype_match {
+ VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION = 0,
+ VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED = 1,
+};
+
+/* The PF populates these fields based on the supported VLAN offloads. If a
+ * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will
+ * reject any VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 or
+ * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 messages using the unsupported fields.
+ *
+ * Also, a VF is only allowed to toggle its VLAN offload setting if the
+ * VIRTCHNL_VLAN_TOGGLE_ALLOWED bit is set.
+ *
+ * The VF driver needs to be aware of how the tags are stripped by hardware and
+ * inserted by the VF driver based on the level of offload support. The PF will
+ * populate these fields based on where the VLAN tags are expected to be
+ * offloaded via the VIRTHCNL_VLAN_TAG_LOCATION_* bits. The VF will need to
+ * interpret these fields. See the definition of the
+ * VIRTCHNL_VLAN_TAG_LOCATION_* bits above the virtchnl_vlan_support
+ * enumeration.
+ */
+struct virtchnl_vlan_offload_caps {
+ struct virtchnl_vlan_supported_caps stripping_support;
+ struct virtchnl_vlan_supported_caps insertion_support;
+ u32 ethertype_init;
+ u8 ethertype_match;
+ u8 pad[3];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_vlan_offload_caps);
+
+/* VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS
+ * VF sends this message to determine its VLAN capabilities.
+ *
+ * PF will mark which capabilities it supports based on hardware support and
+ * current configuration. For example, if a port VLAN is configured the PF will
+ * not allow outer VLAN filtering, stripping, or insertion to be configured so
+ * it will block these features from the VF.
+ *
+ * The VF will need to cross reference its capabilities with the PFs
+ * capabilities in the response message from the PF to determine the VLAN
+ * support.
+ */
+struct virtchnl_vlan_caps {
+ struct virtchnl_vlan_filtering_caps filtering;
+ struct virtchnl_vlan_offload_caps offloads;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_caps);
+
+struct virtchnl_vlan {
+ u16 tci; /* tci[15:13] = PCP and tci[11:0] = VID */
+ u16 tci_mask; /* only valid if VIRTCHNL_VLAN_FILTER_MASK set in
+ * filtering caps
+ */
+ u16 tpid; /* 0x8100, 0x88a8, etc. and only type(s) set in
+ * filtering caps. Note that tpid here does not refer to
+ * VIRTCHNL_VLAN_ETHERTYPE_*, but it refers to the
+ * actual 2-byte VLAN TPID
+ */
+ u8 pad[2];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan);
+
+struct virtchnl_vlan_filter {
+ struct virtchnl_vlan inner;
+ struct virtchnl_vlan outer;
+ u8 pad[16];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(32, virtchnl_vlan_filter);
+
+/* VIRTCHNL_OP_ADD_VLAN_V2
+ * VIRTCHNL_OP_DEL_VLAN_V2
+ *
+ * VF sends these messages to add/del one or more VLAN tag filters for Rx
+ * traffic.
+ *
+ * The PF attempts to add the filters and returns status.
+ *
+ * The VF should only ever attempt to add/del virtchnl_vlan_filter(s) using the
+ * supported fields negotiated via VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS.
+ */
+struct virtchnl_vlan_filter_list_v2 {
+ u16 vport_id;
+ u16 num_elements;
+ u8 pad[4];
+ struct virtchnl_vlan_filter filters[];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan_filter_list_v2);
+#define virtchnl_vlan_filter_list_v2_LEGACY_SIZEOF 40
+
+/* VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2
+ * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2
+ * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2
+ * VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2
+ *
+ * VF sends this message to enable or disable VLAN stripping or insertion. It
+ * also needs to specify an ethertype. The VF knows which VLAN ethertypes are
+ * allowed and whether or not it's allowed to enable/disable the specific
+ * offload via the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS message. The VF needs to
+ * parse the virtchnl_vlan_caps.offloads fields to determine which offload
+ * messages are allowed.
+ *
+ * For example, if the PF populates the virtchnl_vlan_caps.offloads in the
+ * following manner the VF will be allowed to enable and/or disable 0x8100 inner
+ * VLAN insertion and/or stripping via the opcodes listed above. Inner in this
+ * case means the outer most or single VLAN from the VF's perspective. This is
+ * because no outer offloads are supported. See the comments above the
+ * virtchnl_vlan_supported_caps structure for more details.
+ *
+ * virtchnl_vlan_caps.offloads.stripping_support.inner =
+ * VIRTCHNL_VLAN_TOGGLE |
+ * VIRTCHNL_VLAN_ETHERTYPE_8100;
+ *
+ * virtchnl_vlan_caps.offloads.insertion_support.inner =
+ * VIRTCHNL_VLAN_TOGGLE |
+ * VIRTCHNL_VLAN_ETHERTYPE_8100;
+ *
+ * In order to enable inner (again note that in this case inner is the outer
+ * most or single VLAN from the VF's perspective) VLAN stripping for 0x8100
+ * VLANs, the VF would populate the virtchnl_vlan_setting structure in the
+ * following manner and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.
+ *
+ * virtchnl_vlan_setting.inner_ethertype_setting =
+ * VIRTCHNL_VLAN_ETHERTYPE_8100;
+ *
+ * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
+ * initialization.
+ *
+ * The reason that VLAN TPID(s) are not being used for the
+ * outer_ethertype_setting and inner_ethertype_setting fields is because it's
+ * possible a device could support VLAN insertion and/or stripping offload on
+ * multiple ethertypes concurrently, so this method allows a VF to request
+ * multiple ethertypes in one message using the virtchnl_vlan_support
+ * enumeration.
+ *
+ * For example, if the PF populates the virtchnl_vlan_caps.offloads in the
+ * following manner the VF will be allowed to enable 0x8100 and 0x88a8 outer
+ * VLAN insertion and stripping simultaneously. The
+ * virtchnl_vlan_caps.offloads.ethertype_match field will also have to be
+ * populated based on what the PF can support.
+ *
+ * virtchnl_vlan_caps.offloads.stripping_support.outer =
+ * VIRTCHNL_VLAN_TOGGLE |
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ * VIRTCHNL_VLAN_ETHERTYPE_AND;
+ *
+ * virtchnl_vlan_caps.offloads.insertion_support.outer =
+ * VIRTCHNL_VLAN_TOGGLE |
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ * VIRTCHNL_VLAN_ETHERTYPE_AND;
+ *
+ * In order to enable outer VLAN stripping for 0x8100 and 0x88a8 VLANs, the VF
+ * would populate the virthcnl_vlan_offload_structure in the following manner
+ * and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message.
+ *
+ * virtchnl_vlan_setting.outer_ethertype_setting =
+ * VIRTHCNL_VLAN_ETHERTYPE_8100 |
+ * VIRTHCNL_VLAN_ETHERTYPE_88A8;
+ *
+ * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
+ * initialization.
+ *
+ * There is also the case where a PF and the underlying hardware can support
+ * VLAN offloads on multiple ethertypes, but not concurrently. For example, if
+ * the PF populates the virtchnl_vlan_caps.offloads in the following manner the
+ * VF will be allowed to enable and/or disable 0x8100 XOR 0x88a8 outer VLAN
+ * offloads. The ethertypes must match for stripping and insertion.
+ *
+ * virtchnl_vlan_caps.offloads.stripping_support.outer =
+ * VIRTCHNL_VLAN_TOGGLE |
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ * VIRTCHNL_VLAN_ETHERTYPE_XOR;
+ *
+ * virtchnl_vlan_caps.offloads.insertion_support.outer =
+ * VIRTCHNL_VLAN_TOGGLE |
+ * VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ * VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ * VIRTCHNL_VLAN_ETHERTYPE_XOR;
+ *
+ * virtchnl_vlan_caps.offloads.ethertype_match =
+ * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
+ *
+ * In order to enable outer VLAN stripping for 0x88a8 VLANs, the VF would
+ * populate the virtchnl_vlan_setting structure in the following manner and send
+ * the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2. Also, this will change the
+ * ethertype for VLAN insertion if it's enabled. So, for completeness, a
+ * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 with the same ethertype should be sent.
+ *
+ * virtchnl_vlan_setting.outer_ethertype_setting = VIRTHCNL_VLAN_ETHERTYPE_88A8;
+ *
+ * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on
+ * initialization.
+ */
+struct virtchnl_vlan_setting {
+ u32 outer_ethertype_setting;
+ u32 inner_ethertype_setting;
+ u16 vport_id;
+ u8 pad[6];
};
-VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_setting);
/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
* VF sends VSI id and flags.
@@ -484,31 +959,57 @@ VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
struct virtchnl_rss_key {
u16 vsi_id;
u16 key_len;
- u8 key[1]; /* RSS hash key, packed bytes */
+ u8 key[]; /* RSS hash key, packed bytes */
};
-VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rss_key);
+#define virtchnl_rss_key_LEGACY_SIZEOF 6
struct virtchnl_rss_lut {
u16 vsi_id;
u16 lut_entries;
- u8 lut[1]; /* RSS lookup table */
+ u8 lut[]; /* RSS lookup table */
};
-VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rss_lut);
+#define virtchnl_rss_lut_LEGACY_SIZEOF 6
-/* VIRTCHNL_OP_GET_RSS_HENA_CAPS
- * VIRTCHNL_OP_SET_RSS_HENA
- * VF sends these messages to get and set the hash filter enable bits for RSS.
+/* VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS
+ * VIRTCHNL_OP_SET_RSS_HASHCFG
+ * VF sends these messages to get and set the hash filter configuration for RSS.
* By default, the PF sets these to all possible traffic types that the
* hardware supports. The VF can query this value if it wants to change the
* traffic types that are hashed by the hardware.
*/
-struct virtchnl_rss_hena {
- u64 hena;
+struct virtchnl_rss_hashcfg {
+ /* Bits defined by enum libie_filter_pctype */
+ u64 hashcfg;
};
-VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hashcfg);
+
+/* Type of RSS algorithm */
+enum virtchnl_rss_algorithm {
+ VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0,
+ VIRTCHNL_RSS_ALG_R_ASYMMETRIC = 1,
+ VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC = 2,
+ VIRTCHNL_RSS_ALG_XOR_SYMMETRIC = 3,
+};
+
+/* VIRTCHNL_OP_CONFIG_RSS_HFUNC
+ * VF sends this message to configure the RSS hash function. Only supported
+ * if both PF and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation.
+ * The hash function is initialized to VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC
+ * by the PF.
+ */
+struct virtchnl_rss_hfunc {
+ u16 vsi_id;
+ u16 rss_algorithm; /* enum virtchnl_rss_algorithm */
+ u32 reserved;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hfunc);
/* VIRTCHNL_OP_ENABLE_CHANNELS
* VIRTCHNL_OP_DISABLE_CHANNELS
@@ -529,10 +1030,11 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info);
struct virtchnl_tc_info {
u32 num_tc;
u32 pad;
- struct virtchnl_channel_info list[1];
+ struct virtchnl_channel_info list[];
};
-VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info);
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_tc_info);
+#define virtchnl_tc_info_LEGACY_SIZEOF 24
/* VIRTCHNL_ADD_CLOUD_FILTER
* VIRTCHNL_DEL_CLOUD_FILTER
@@ -582,8 +1084,12 @@ enum virtchnl_flow_type {
struct virtchnl_filter {
union virtchnl_flow_spec data;
union virtchnl_flow_spec mask;
- enum virtchnl_flow_type flow_type;
- enum virtchnl_action action;
+
+ /* see enum virtchnl_flow_type */
+ s32 flow_type;
+
+ /* see enum virtchnl_action */
+ s32 action;
u32 action_meta;
u8 field_flags;
u8 pad[3];
@@ -607,7 +1113,8 @@ enum virtchnl_event_codes {
#define PF_EVENT_SEVERITY_CERTAIN_DOOM 255
struct virtchnl_pf_event {
- enum virtchnl_event_codes event;
+ /* see enum virtchnl_event_codes */
+ s32 event;
union {
/* If the PF driver does not support the new speed reporting
* capabilities then use link_event else use link_event_adv to
@@ -620,6 +1127,7 @@ struct virtchnl_pf_event {
struct {
enum virtchnl_link_speed link_speed;
bool link_status;
+ u8 pad[3];
} link_event;
struct {
/* link_speed provided in Mbps */
@@ -629,39 +1137,42 @@ struct virtchnl_pf_event {
} link_event_adv;
} event_data;
- int severity;
+ s32 severity;
};
VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
-/* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
- * VF uses this message to request PF to map IWARP vectors to IWARP queues.
- * The request for this originates from the VF IWARP driver through
- * a client interface between VF LAN and VF IWARP driver.
+/* used to specify if a ceq_idx or aeq_idx is invalid */
+#define VIRTCHNL_RDMA_INVALID_QUEUE_IDX 0xFFFF
+/* VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP
+ * VF uses this message to request PF to map RDMA vectors to RDMA queues.
+ * The request for this originates from the VF RDMA driver through
+ * a client interface between VF LAN and VF RDMA driver.
* A vector could have an AEQ and CEQ attached to it although
- * there is a single AEQ per VF IWARP instance in which case
- * most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
- * There will never be a case where there will be multiple CEQs attached
- * to a single vector.
+ * there is a single AEQ per VF RDMA instance in which case
+ * most vectors will have an VIRTCHNL_RDMA_INVALID_QUEUE_IDX for aeq and valid
+ * idx for ceqs There will never be a case where there will be multiple CEQs
+ * attached to a single vector.
* PF configures interrupt mapping and returns status.
*/
-struct virtchnl_iwarp_qv_info {
+struct virtchnl_rdma_qv_info {
u32 v_idx; /* msix_vector */
- u16 ceq_idx;
- u16 aeq_idx;
+ u16 ceq_idx; /* set to VIRTCHNL_RDMA_INVALID_QUEUE_IDX if invalid */
+ u16 aeq_idx; /* set to VIRTCHNL_RDMA_INVALID_QUEUE_IDX if invalid */
u8 itr_idx;
u8 pad[3];
};
-VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info);
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_rdma_qv_info);
-struct virtchnl_iwarp_qvlist_info {
+struct virtchnl_rdma_qvlist_info {
u32 num_vectors;
- struct virtchnl_iwarp_qv_info qv_info[1];
+ struct virtchnl_rdma_qv_info qv_info[];
};
-VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info);
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_rdma_qvlist_info);
+#define virtchnl_rdma_qvlist_info_LEGACY_SIZEOF 16
/* VF reset states - these are written into the RSTAT register:
* VFGEN_RSTAT on the VF
@@ -680,15 +1191,8 @@ enum virtchnl_vfr_states {
VIRTCHNL_VFR_VFACTIVE,
};
-/* Type of RSS algorithm */
-enum virtchnl_rss_algorithm {
- VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0,
- VIRTCHNL_RSS_ALG_R_ASYMMETRIC = 1,
- VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC = 2,
- VIRTCHNL_RSS_ALG_XOR_SYMMETRIC = 3,
-};
-
#define VIRTCHNL_MAX_NUM_PROTO_HDRS 32
+#define VIRTCHNL_MAX_SIZE_RAW_PACKET 1024
#define PROTO_HDR_SHIFT 5
#define PROTO_HDR_FIELD_START(proto_hdr_type) ((proto_hdr_type) << PROTO_HDR_SHIFT)
#define PROTO_HDR_FIELD_MASK ((1UL << PROTO_HDR_SHIFT) - 1)
@@ -720,7 +1224,7 @@ enum virtchnl_rss_algorithm {
#define VIRTCHNL_GET_PROTO_HDR_TYPE(hdr) \
(((hdr)->type) >> PROTO_HDR_SHIFT)
#define VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) \
- ((hdr)->type == ((val) >> PROTO_HDR_SHIFT))
+ ((hdr)->type == ((s32)((val) >> PROTO_HDR_SHIFT)))
#define VIRTCHNL_TEST_PROTO_HDR(hdr, val) \
(VIRTCHNL_TEST_PROTO_HDR_TYPE((hdr), (val)) && \
VIRTCHNL_TEST_PROTO_HDR_FIELD((hdr), (val)))
@@ -749,6 +1253,17 @@ enum virtchnl_proto_hdr_type {
VIRTCHNL_PROTO_HDR_ESP,
VIRTCHNL_PROTO_HDR_AH,
VIRTCHNL_PROTO_HDR_PFCP,
+ VIRTCHNL_PROTO_HDR_GTPC,
+ VIRTCHNL_PROTO_HDR_ECPRI,
+ VIRTCHNL_PROTO_HDR_L2TPV2,
+ VIRTCHNL_PROTO_HDR_PPP,
+ /* IPv4 and IPv6 Fragment header types are only associated to
+ * VIRTCHNL_PROTO_HDR_IPV4 and VIRTCHNL_PROTO_HDR_IPV6 respectively,
+ * cannot be used independently.
+ */
+ VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG,
+ VIRTCHNL_PROTO_HDR_GRE,
};
/* Protocol header field within a protocol header. */
@@ -771,6 +1286,7 @@ enum virtchnl_proto_hdr_field {
VIRTCHNL_PROTO_HDR_IPV4_DSCP,
VIRTCHNL_PROTO_HDR_IPV4_TTL,
VIRTCHNL_PROTO_HDR_IPV4_PROT,
+ VIRTCHNL_PROTO_HDR_IPV4_CHKSUM,
/* IPV6 */
VIRTCHNL_PROTO_HDR_IPV6_SRC =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6),
@@ -778,18 +1294,34 @@ enum virtchnl_proto_hdr_field {
VIRTCHNL_PROTO_HDR_IPV6_TC,
VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT,
VIRTCHNL_PROTO_HDR_IPV6_PROT,
+ /* IPV6 Prefix */
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX32_SRC,
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX32_DST,
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX40_SRC,
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX40_DST,
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX48_SRC,
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX48_DST,
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX56_SRC,
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX56_DST,
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC,
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST,
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX96_SRC,
+ VIRTCHNL_PROTO_HDR_IPV6_PREFIX96_DST,
/* TCP */
VIRTCHNL_PROTO_HDR_TCP_SRC_PORT =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_TCP),
VIRTCHNL_PROTO_HDR_TCP_DST_PORT,
+ VIRTCHNL_PROTO_HDR_TCP_CHKSUM,
/* UDP */
VIRTCHNL_PROTO_HDR_UDP_SRC_PORT =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_UDP),
VIRTCHNL_PROTO_HDR_UDP_DST_PORT,
+ VIRTCHNL_PROTO_HDR_UDP_CHKSUM,
/* SCTP */
VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_SCTP),
VIRTCHNL_PROTO_HDR_SCTP_DST_PORT,
+ VIRTCHNL_PROTO_HDR_SCTP_CHKSUM,
/* GTPU_IP */
VIRTCHNL_PROTO_HDR_GTPU_IP_TEID =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_IP),
@@ -813,10 +1345,33 @@ enum virtchnl_proto_hdr_field {
VIRTCHNL_PROTO_HDR_PFCP_S_FIELD =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PFCP),
VIRTCHNL_PROTO_HDR_PFCP_SEID,
+ /* GTPC */
+ VIRTCHNL_PROTO_HDR_GTPC_TEID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPC),
+ /* ECPRI */
+ VIRTCHNL_PROTO_HDR_ECPRI_MSG_TYPE =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ECPRI),
+ VIRTCHNL_PROTO_HDR_ECPRI_PC_RTC_ID,
+ /* IPv4 Dummy Fragment */
+ VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4_FRAG),
+ /* IPv6 Extension Fragment */
+ VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG),
+ /* GTPU_DWN/UP */
+ VIRTCHNL_PROTO_HDR_GTPU_DWN_QFI =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN),
+ VIRTCHNL_PROTO_HDR_GTPU_UP_QFI =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP),
+ /* L2TPv2 */
+ VIRTCHNL_PROTO_HDR_L2TPV2_SESS_ID =
+ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV2),
+ VIRTCHNL_PROTO_HDR_L2TPV2_LEN_SESS_ID,
};
struct virtchnl_proto_hdr {
- enum virtchnl_proto_hdr_type type;
+ /* see enum virtchnl_proto_hdr_type */
+ s32 type;
u32 field_selector; /* a bit mask to select field for header type */
u8 buffer[64];
/**
@@ -830,30 +1385,43 @@ VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
struct virtchnl_proto_hdrs {
u8 tunnel_level;
+ u8 pad[3];
/**
* specify where protocol header start from.
+ * must be 0 when sending a raw packet request.
* 0 - from the outer layer
* 1 - from the first inner layer
* 2 - from the second inner layer
* ....
**/
- int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */
- struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+ u32 count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */
+ union {
+ struct virtchnl_proto_hdr
+ proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
+ struct {
+ u16 pkt_len;
+ u8 spec[VIRTCHNL_MAX_SIZE_RAW_PACKET];
+ u8 mask[VIRTCHNL_MAX_SIZE_RAW_PACKET];
+ } raw;
+ };
};
VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);
struct virtchnl_rss_cfg {
struct virtchnl_proto_hdrs proto_hdrs; /* protocol headers */
- enum virtchnl_rss_algorithm rss_algorithm; /* RSS algorithm type */
- u8 reserved[128]; /* reserve for future */
+
+ /* see enum virtchnl_rss_algorithm; rss algorithm type */
+ s32 rss_algorithm;
+ u8 reserved[128]; /* reserve for future */
};
VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg);
/* action configuration for FDIR */
struct virtchnl_filter_action {
- enum virtchnl_action type;
+ /* see enum virtchnl_action type */
+ s32 type;
union {
/* used for queue and qgroup action */
struct {
@@ -878,7 +1446,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_filter_action);
struct virtchnl_filter_action_set {
/* action number must be less then VIRTCHNL_MAX_NUM_ACTIONS */
- int count;
+ u32 count;
struct virtchnl_filter_action actions[VIRTCHNL_MAX_NUM_ACTIONS];
};
@@ -895,7 +1463,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule);
/* Status returned to VF after VF requests FDIR commands
* VIRTCHNL_FDIR_SUCCESS
* VF FDIR related request is successfully done by PF
- * The request can be OP_ADD/DEL.
+ * The request can be OP_ADD/DEL/QUERY_FDIR_FILTER.
*
* VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE
* OP_ADD_FDIR_FILTER request is failed due to no Hardware resource.
@@ -916,6 +1484,10 @@ VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule);
* VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT
* OP_ADD/DEL_FDIR_FILTER request is failed due to timing out
* for programming.
+ *
+ * VIRTCHNL_FDIR_FAILURE_QUERY_INVALID
+ * OP_QUERY_FDIR_FILTER request is failed due to parameters validation,
+ * for example, VF query counter of a rule who has no counter action.
*/
enum virtchnl_fdir_prgm_status {
VIRTCHNL_FDIR_SUCCESS = 0,
@@ -925,6 +1497,7 @@ enum virtchnl_fdir_prgm_status {
VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST,
VIRTCHNL_FDIR_FAILURE_RULE_INVALID,
VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT,
+ VIRTCHNL_FDIR_FAILURE_QUERY_INVALID,
};
/* VIRTCHNL_OP_ADD_FDIR_FILTER
@@ -941,7 +1514,9 @@ struct virtchnl_fdir_add {
u16 validate_only; /* INPUT */
u32 flow_id; /* OUTPUT */
struct virtchnl_fdir_rule rule_cfg; /* INPUT */
- enum virtchnl_fdir_prgm_status status; /* OUTPUT */
+
+ /* see enum virtchnl_fdir_prgm_status; OUTPUT */
+ s32 status;
};
VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_fdir_add);
@@ -954,11 +1529,175 @@ struct virtchnl_fdir_del {
u16 vsi_id; /* INPUT */
u16 pad;
u32 flow_id; /* INPUT */
- enum virtchnl_fdir_prgm_status status; /* OUTPUT */
+
+ /* see enum virtchnl_fdir_prgm_status; OUTPUT */
+ s32 status;
};
VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
+#define VIRTCHNL_1588_PTP_CAP_RX_TSTAMP BIT(1)
+#define VIRTCHNL_1588_PTP_CAP_READ_PHC BIT(2)
+
+/**
+ * struct virtchnl_ptp_caps - Defines the PTP caps available to the VF.
+ * @caps: On send, VF sets what capabilities it requests. On reply, PF
+ * indicates what has been enabled for this VF. The PF shall not set
+ * bits which were not requested by the VF.
+ * @rsvd: Reserved bits for future extension.
+ *
+ * Structure that defines the PTP capabilities available to the VF. The VF
+ * sends VIRTCHNL_OP_1588_PTP_GET_CAPS, and must fill in the ptp_caps field
+ * indicating what capabilities it is requesting. The PF will respond with the
+ * same message with the virtchnl_ptp_caps structure indicating what is
+ * enabled for the VF.
+ *
+ * VIRTCHNL_1588_PTP_CAP_RX_TSTAMP indicates that the VF receive queues have
+ * receive timestamps enabled in the flexible descriptors. Note that this
+ * requires a VF to also negotiate to enable advanced flexible descriptors in
+ * the receive path instead of the default legacy descriptor format.
+ *
+ * VIRTCHNL_1588_PTP_CAP_READ_PHC indicates that the VF may read the PHC time
+ * via the VIRTCHNL_OP_1588_PTP_GET_TIME command.
+ *
+ * Note that in the future, additional capability flags may be added which
+ * indicate additional extended support. All fields marked as reserved by this
+ * header will be set to zero. VF implementations should verify this to ensure
+ * that future extensions do not break compatibility.
+ */
+struct virtchnl_ptp_caps {
+ u32 caps;
+ u8 rsvd[44];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(48, virtchnl_ptp_caps);
+
+/**
+ * struct virtchnl_phc_time - Contains the 64bits of PHC clock time in ns.
+ * @time: PHC time in nanoseconds
+ * @rsvd: Reserved for future extension
+ *
+ * Structure received with VIRTCHNL_OP_1588_PTP_GET_TIME. Contains the 64bits
+ * of PHC clock time in nanoseconds.
+ *
+ * VIRTCHNL_OP_1588_PTP_GET_TIME may be sent to request the current time of
+ * the PHC. This op is available in case direct access via the PHC registers
+ * is not available.
+ */
+struct virtchnl_phc_time {
+ u64 time;
+ u8 rsvd[8];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_phc_time);
+
+struct virtchnl_shaper_bw {
+ /* Unit is Kbps */
+ u32 committed;
+ u32 peak;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_shaper_bw);
+
+/* VIRTCHNL_OP_GET_QOS_CAPS
+ * VF sends this message to get its QoS Caps, such as
+ * TC number, Arbiter and Bandwidth.
+ */
+struct virtchnl_qos_cap_elem {
+ u8 tc_num;
+ u8 tc_prio;
+#define VIRTCHNL_ABITER_STRICT 0
+#define VIRTCHNL_ABITER_ETS 2
+ u8 arbiter;
+#define VIRTCHNL_STRICT_WEIGHT 1
+ u8 weight;
+ enum virtchnl_bw_limit_type type;
+ union {
+ struct virtchnl_shaper_bw shaper;
+ u8 pad2[32];
+ };
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_qos_cap_elem);
+
+struct virtchnl_qos_cap_list {
+ u16 vsi_id;
+ u16 num_elem;
+ struct virtchnl_qos_cap_elem cap[];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_qos_cap_list);
+#define virtchnl_qos_cap_list_LEGACY_SIZEOF 44
+
+/* VIRTCHNL_OP_CONFIG_QUEUE_BW */
+struct virtchnl_queue_bw {
+ u16 queue_id;
+ u8 tc;
+ u8 pad;
+ struct virtchnl_shaper_bw shaper;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_bw);
+
+struct virtchnl_queues_bw_cfg {
+ u16 vsi_id;
+ u16 num_queues;
+ struct virtchnl_queue_bw cfg[];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_queues_bw_cfg);
+#define virtchnl_queues_bw_cfg_LEGACY_SIZEOF 16
+
+enum virtchnl_queue_type {
+ VIRTCHNL_QUEUE_TYPE_TX = 0,
+ VIRTCHNL_QUEUE_TYPE_RX = 1,
+};
+
+/* structure to specify a chunk of contiguous queues */
+struct virtchnl_queue_chunk {
+ /* see enum virtchnl_queue_type */
+ s32 type;
+ u16 start_queue_id;
+ u16 num_queues;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_queue_chunk);
+
+struct virtchnl_quanta_cfg {
+ u16 quanta_size;
+ u16 pad;
+ struct virtchnl_queue_chunk queue_select;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_quanta_cfg);
+
+#define __vss_byone(p, member, count, old) \
+ (struct_size(p, member, count) + (old - 1 - struct_size(p, member, 0)))
+
+#define __vss_byelem(p, member, count, old) \
+ (struct_size(p, member, count - 1) + (old - struct_size(p, member, 0)))
+
+#define __vss_full(p, member, count, old) \
+ (struct_size(p, member, count) + (old - struct_size(p, member, 0)))
+
+#define __vss(type, func, p, member, count) \
+ struct type: func(p, member, count, type##_LEGACY_SIZEOF)
+
+#define virtchnl_struct_size(p, m, c) \
+ _Generic(*p, \
+ __vss(virtchnl_vf_resource, __vss_full, p, m, c), \
+ __vss(virtchnl_vsi_queue_config_info, __vss_full, p, m, c), \
+ __vss(virtchnl_irq_map_info, __vss_full, p, m, c), \
+ __vss(virtchnl_ether_addr_list, __vss_full, p, m, c), \
+ __vss(virtchnl_vlan_filter_list, __vss_full, p, m, c), \
+ __vss(virtchnl_vlan_filter_list_v2, __vss_byelem, p, m, c), \
+ __vss(virtchnl_tc_info, __vss_byelem, p, m, c), \
+ __vss(virtchnl_rdma_qvlist_info, __vss_byelem, p, m, c), \
+ __vss(virtchnl_qos_cap_list, __vss_byelem, p, m, c), \
+ __vss(virtchnl_queues_bw_cfg, __vss_byelem, p, m, c), \
+ __vss(virtchnl_rss_key, __vss_byone, p, m, c), \
+ __vss(virtchnl_rss_lut, __vss_byone, p, m, c))
+
/**
* virtchnl_vc_validate_vf_msg
* @ver: Virtchnl version info
@@ -973,7 +1712,7 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
u8 *msg, u16 msglen)
{
bool err_msg_format = false;
- int valid_len = 0;
+ u32 valid_len = 0;
/* Validate message length. */
switch (v_opcode) {
@@ -993,24 +1732,23 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
valid_len = sizeof(struct virtchnl_rxq_info);
break;
case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
- valid_len = sizeof(struct virtchnl_vsi_queue_config_info);
+ valid_len = virtchnl_vsi_queue_config_info_LEGACY_SIZEOF;
if (msglen >= valid_len) {
struct virtchnl_vsi_queue_config_info *vqc =
(struct virtchnl_vsi_queue_config_info *)msg;
- valid_len += (vqc->num_queue_pairs *
- sizeof(struct
- virtchnl_queue_pair_info));
+ valid_len = virtchnl_struct_size(vqc, qpair,
+ vqc->num_queue_pairs);
if (vqc->num_queue_pairs == 0)
err_msg_format = true;
}
break;
case VIRTCHNL_OP_CONFIG_IRQ_MAP:
- valid_len = sizeof(struct virtchnl_irq_map_info);
+ valid_len = virtchnl_irq_map_info_LEGACY_SIZEOF;
if (msglen >= valid_len) {
struct virtchnl_irq_map_info *vimi =
(struct virtchnl_irq_map_info *)msg;
- valid_len += (vimi->num_vectors *
- sizeof(struct virtchnl_vector_map));
+ valid_len = virtchnl_struct_size(vimi, vecmap,
+ vimi->num_vectors);
if (vimi->num_vectors == 0)
err_msg_format = true;
}
@@ -1021,23 +1759,24 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
break;
case VIRTCHNL_OP_ADD_ETH_ADDR:
case VIRTCHNL_OP_DEL_ETH_ADDR:
- valid_len = sizeof(struct virtchnl_ether_addr_list);
+ valid_len = virtchnl_ether_addr_list_LEGACY_SIZEOF;
if (msglen >= valid_len) {
struct virtchnl_ether_addr_list *veal =
(struct virtchnl_ether_addr_list *)msg;
- valid_len += veal->num_elements *
- sizeof(struct virtchnl_ether_addr);
+ valid_len = virtchnl_struct_size(veal, list,
+ veal->num_elements);
if (veal->num_elements == 0)
err_msg_format = true;
}
break;
case VIRTCHNL_OP_ADD_VLAN:
case VIRTCHNL_OP_DEL_VLAN:
- valid_len = sizeof(struct virtchnl_vlan_filter_list);
+ valid_len = virtchnl_vlan_filter_list_LEGACY_SIZEOF;
if (msglen >= valid_len) {
struct virtchnl_vlan_filter_list *vfl =
(struct virtchnl_vlan_filter_list *)msg;
- valid_len += vfl->num_elements * sizeof(u16);
+ valid_len = virtchnl_struct_size(vfl, vlan_id,
+ vfl->num_elements);
if (vfl->num_elements == 0)
err_msg_format = true;
}
@@ -1048,7 +1787,7 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_GET_STATS:
valid_len = sizeof(struct virtchnl_queue_select);
break;
- case VIRTCHNL_OP_IWARP:
+ case VIRTCHNL_OP_RDMA:
/* These messages are opaque to us and will be validated in
* the RDMA client code. We just need to check for nonzero
* length. The firmware will enforce max length restrictions.
@@ -1058,41 +1797,43 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
else
err_msg_format = true;
break;
- case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
+ case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP:
break;
- case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
- valid_len = sizeof(struct virtchnl_iwarp_qvlist_info);
+ case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP:
+ valid_len = virtchnl_rdma_qvlist_info_LEGACY_SIZEOF;
if (msglen >= valid_len) {
- struct virtchnl_iwarp_qvlist_info *qv =
- (struct virtchnl_iwarp_qvlist_info *)msg;
- if (qv->num_vectors == 0) {
- err_msg_format = true;
- break;
- }
- valid_len += ((qv->num_vectors - 1) *
- sizeof(struct virtchnl_iwarp_qv_info));
+ struct virtchnl_rdma_qvlist_info *qv =
+ (struct virtchnl_rdma_qvlist_info *)msg;
+
+ valid_len = virtchnl_struct_size(qv, qv_info,
+ qv->num_vectors);
}
break;
case VIRTCHNL_OP_CONFIG_RSS_KEY:
- valid_len = sizeof(struct virtchnl_rss_key);
+ valid_len = virtchnl_rss_key_LEGACY_SIZEOF;
if (msglen >= valid_len) {
struct virtchnl_rss_key *vrk =
(struct virtchnl_rss_key *)msg;
- valid_len += vrk->key_len - 1;
+ valid_len = virtchnl_struct_size(vrk, key,
+ vrk->key_len);
}
break;
case VIRTCHNL_OP_CONFIG_RSS_LUT:
- valid_len = sizeof(struct virtchnl_rss_lut);
+ valid_len = virtchnl_rss_lut_LEGACY_SIZEOF;
if (msglen >= valid_len) {
struct virtchnl_rss_lut *vrl =
(struct virtchnl_rss_lut *)msg;
- valid_len += vrl->lut_entries - 1;
+ valid_len = virtchnl_struct_size(vrl, lut,
+ vrl->lut_entries);
}
break;
- case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ case VIRTCHNL_OP_CONFIG_RSS_HFUNC:
+ valid_len = sizeof(struct virtchnl_rss_hfunc);
+ break;
+ case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS:
break;
- case VIRTCHNL_OP_SET_RSS_HENA:
- valid_len = sizeof(struct virtchnl_rss_hena);
+ case VIRTCHNL_OP_SET_RSS_HASHCFG:
+ valid_len = sizeof(struct virtchnl_rss_hashcfg);
break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
@@ -1101,12 +1842,12 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
valid_len = sizeof(struct virtchnl_vf_res_request);
break;
case VIRTCHNL_OP_ENABLE_CHANNELS:
- valid_len = sizeof(struct virtchnl_tc_info);
+ valid_len = virtchnl_tc_info_LEGACY_SIZEOF;
if (msglen >= valid_len) {
struct virtchnl_tc_info *vti =
(struct virtchnl_tc_info *)msg;
- valid_len += (vti->num_tc - 1) *
- sizeof(struct virtchnl_channel_info);
+ valid_len = virtchnl_struct_size(vti, list,
+ vti->num_tc);
if (vti->num_tc == 0)
err_msg_format = true;
}
@@ -1114,11 +1855,11 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_DISABLE_CHANNELS:
break;
case VIRTCHNL_OP_ADD_CLOUD_FILTER:
- valid_len = sizeof(struct virtchnl_filter);
- break;
case VIRTCHNL_OP_DEL_CLOUD_FILTER:
valid_len = sizeof(struct virtchnl_filter);
break;
+ case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
+ break;
case VIRTCHNL_OP_ADD_RSS_CFG:
case VIRTCHNL_OP_DEL_RSS_CFG:
valid_len = sizeof(struct virtchnl_rss_cfg);
@@ -1129,6 +1870,65 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_DEL_FDIR_FILTER:
valid_len = sizeof(struct virtchnl_fdir_del);
break;
+ case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
+ break;
+ case VIRTCHNL_OP_ADD_VLAN_V2:
+ case VIRTCHNL_OP_DEL_VLAN_V2:
+ valid_len = virtchnl_vlan_filter_list_v2_LEGACY_SIZEOF;
+ if (msglen >= valid_len) {
+ struct virtchnl_vlan_filter_list_v2 *vfl =
+ (struct virtchnl_vlan_filter_list_v2 *)msg;
+
+ valid_len = virtchnl_struct_size(vfl, filters,
+ vfl->num_elements);
+
+ if (vfl->num_elements == 0) {
+ err_msg_format = true;
+ break;
+ }
+ }
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
+ case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
+ case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
+ valid_len = sizeof(struct virtchnl_vlan_setting);
+ break;
+ case VIRTCHNL_OP_GET_QOS_CAPS:
+ break;
+ case VIRTCHNL_OP_CONFIG_QUEUE_BW:
+ valid_len = virtchnl_queues_bw_cfg_LEGACY_SIZEOF;
+ if (msglen >= valid_len) {
+ struct virtchnl_queues_bw_cfg *q_bw =
+ (struct virtchnl_queues_bw_cfg *)msg;
+
+ valid_len = virtchnl_struct_size(q_bw, cfg,
+ q_bw->num_queues);
+ if (q_bw->num_queues == 0) {
+ err_msg_format = true;
+ break;
+ }
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_QUANTA:
+ valid_len = sizeof(struct virtchnl_quanta_cfg);
+ if (msglen >= valid_len) {
+ struct virtchnl_quanta_cfg *q_quanta =
+ (struct virtchnl_quanta_cfg *)msg;
+
+ if (q_quanta->quanta_size == 0 ||
+ q_quanta->queue_select.num_queues == 0) {
+ err_msg_format = true;
+ break;
+ }
+ }
+ break;
+ case VIRTCHNL_OP_1588_PTP_GET_CAPS:
+ valid_len = sizeof(struct virtchnl_ptp_caps);
+ break;
+ case VIRTCHNL_OP_1588_PTP_GET_TIME:
+ valid_len = sizeof(struct virtchnl_phc_time);
+ break;
/* These are always errors coming from the VF. */
case VIRTCHNL_OP_EVENT:
case VIRTCHNL_OP_UNKNOWN:
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index fff9367a6348..0217c1073735 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -28,11 +28,6 @@ enum wb_state {
WB_start_all, /* nr_pages == 0 (all) work pending */
};
-enum wb_congested_state {
- WB_async_congested, /* The async (write) queue is getting full */
- WB_sync_congested, /* The sync queue is getting full */
-};
-
enum wb_stat_item {
WB_RECLAIMABLE,
WB_WRITEBACK,
@@ -68,6 +63,8 @@ enum wb_reason {
struct wb_completion {
atomic_t cnt;
wait_queue_head_t *waitq;
+ unsigned long progress_stamp; /* The jiffies when slow progress is detected */
+ unsigned long wait_start; /* The jiffies when waiting for the writeback work to finish */
};
#define __WB_COMPLETION_INIT(_waitq) \
@@ -103,6 +100,9 @@ struct wb_completion {
* change as blkcg is disabled and enabled higher up in the hierarchy, a wb
* is tested for blkcg after lookup and removed from index on mismatch so
* that a new wb for the combination can be created.
+ *
+ * Each bdi_writeback that is not embedded into the backing_dev_info must hold
+ * a reference to the parent backing_dev_info. See cgwb_create() for details.
*/
struct bdi_writeback {
struct backing_dev_info *bdi; /* our parent bdi */
@@ -116,10 +116,9 @@ struct bdi_writeback {
struct list_head b_dirty_time; /* time stamps are dirty */
spinlock_t list_lock; /* protects the b_* lists */
+ atomic_t writeback_inodes; /* number of inodes under writeback */
struct percpu_counter stat[NR_WB_STAT_ITEMS];
- unsigned long congested; /* WB_[a]sync_congested flags */
-
unsigned long bw_time_stamp; /* last time write bw is updated */
unsigned long dirtied_stamp;
unsigned long written_stamp; /* pages written at bw_time_stamp */
@@ -142,8 +141,7 @@ struct bdi_writeback {
spinlock_t work_lock; /* protects work_list & dwork scheduling */
struct list_head work_list;
struct delayed_work dwork; /* work item used for writeback */
-
- unsigned long dirty_sleep; /* last wait */
+ struct delayed_work bw_dwork; /* work item used for bandwidth estimate */
struct list_head bdi_node; /* anchored at bdi->wb_list */
@@ -154,6 +152,12 @@ struct bdi_writeback {
struct cgroup_subsys_state *blkcg_css; /* and blkcg */
struct list_head memcg_node; /* anchored at memcg->cgwb_list */
struct list_head blkcg_node; /* anchored at blkcg->cgwb_list */
+ struct list_head b_attached; /* attached inodes, protected by list_lock */
+ struct list_head offline_node; /* anchored at offline_cgwbs */
+ struct work_struct switch_work; /* work used to perform inode switching
+ * to this wb */
+ struct llist_head switch_wbs_ctxs; /* queued contexts for
+ * writeback switching */
union {
struct work_struct release_work;
@@ -166,7 +170,9 @@ struct backing_dev_info {
u64 id;
struct rb_node rb_node; /* keyed by ->id */
struct list_head bdi_list;
- unsigned long ra_pages; /* max readahead in PAGE_SIZE units */
+ /* max readahead in PAGE_SIZE units */
+ unsigned long __data_racy ra_pages;
+
unsigned long io_pages; /* max allowed IO size */
struct kref refcnt; /* Reference counter for the structure */
@@ -179,6 +185,11 @@ struct backing_dev_info {
* any dirty wbs, which is depended upon by bdi_has_dirty().
*/
atomic_long_t tot_write_bandwidth;
+ /*
+ * Jiffies when last process was dirty throttled on this bdi. Used by
+ * blk-wbt.
+ */
+ unsigned long last_bdp_sleep;
struct bdi_writeback wb; /* the root writeback info for this bdi */
struct list_head wb_list; /* list of all wbs */
@@ -200,14 +211,6 @@ struct backing_dev_info {
#endif
};
-enum {
- BLK_RW_ASYNC = 0,
- BLK_RW_SYNC = 1,
-};
-
-void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
-void set_bdi_congested(struct backing_dev_info *bdi, int sync);
-
struct wb_lock_cookie {
bool locked;
unsigned long flags;
@@ -239,8 +242,9 @@ static inline void wb_get(struct bdi_writeback *wb)
/**
* wb_put - decrement a wb's refcount
* @wb: bdi_writeback to put
+ * @nr: number of references to put
*/
-static inline void wb_put(struct bdi_writeback *wb)
+static inline void wb_put_many(struct bdi_writeback *wb, unsigned long nr)
{
if (WARN_ON_ONCE(!wb->bdi)) {
/*
@@ -251,7 +255,16 @@ static inline void wb_put(struct bdi_writeback *wb)
}
if (wb != &wb->bdi->wb)
- percpu_ref_put(&wb->refcnt);
+ percpu_ref_put_many(&wb->refcnt, nr);
+}
+
+/**
+ * wb_put - decrement a wb's refcount
+ * @wb: bdi_writeback to put
+ */
+static inline void wb_put(struct bdi_writeback *wb)
+{
+ wb_put_many(wb, 1);
}
/**
@@ -280,6 +293,10 @@ static inline void wb_put(struct bdi_writeback *wb)
{
}
+static inline void wb_put_many(struct bdi_writeback *wb, unsigned long nr)
+{
+}
+
static inline bool wb_dying(struct bdi_writeback *wb)
{
return false;
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 44df4fcef65c..0c8342747cab 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -12,10 +12,8 @@
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/sched.h>
-#include <linux/blkdev.h>
#include <linux/device.h>
#include <linux/writeback.h>
-#include <linux/blk-cgroup.h>
#include <linux/backing-dev-defs.h>
#include <linux/slab.h>
@@ -40,7 +38,6 @@ struct backing_dev_info *bdi_alloc(int node_id);
void wb_start_background_writeback(struct bdi_writeback *wb);
void wb_workfn(struct work_struct *work);
-void wb_wakeup_delayed(struct bdi_writeback *wb);
void wb_wait_for_completion(struct wb_completion *done);
@@ -48,7 +45,6 @@ extern spinlock_t bdi_lock;
extern struct list_head bdi_list;
extern struct workqueue_struct *bdi_wq;
-extern struct workqueue_struct *bdi_async_bio_wq;
static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
{
@@ -64,22 +60,12 @@ static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
return atomic_long_read(&bdi->tot_write_bandwidth);
}
-static inline void __add_wb_stat(struct bdi_writeback *wb,
+static inline void wb_stat_mod(struct bdi_writeback *wb,
enum wb_stat_item item, s64 amount)
{
percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
}
-static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
-{
- __add_wb_stat(wb, item, 1);
-}
-
-static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
-{
- __add_wb_stat(wb, item, -1);
-}
-
static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
{
return percpu_counter_read_positive(&wb->stat[item]);
@@ -104,23 +90,33 @@ static inline unsigned long wb_stat_error(void)
#endif
}
+/* BDI ratio is expressed as part per 1000000 for finer granularity. */
+#define BDI_RATIO_SCALE 10000
+
+u64 bdi_get_min_bytes(struct backing_dev_info *bdi);
+u64 bdi_get_max_bytes(struct backing_dev_info *bdi);
int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
+int bdi_set_min_ratio_no_scale(struct backing_dev_info *bdi, unsigned int min_ratio);
+int bdi_set_max_ratio_no_scale(struct backing_dev_info *bdi, unsigned int max_ratio);
+int bdi_set_min_bytes(struct backing_dev_info *bdi, u64 min_bytes);
+int bdi_set_max_bytes(struct backing_dev_info *bdi, u64 max_bytes);
+int bdi_set_strict_limit(struct backing_dev_info *bdi, unsigned int strict_limit);
/*
* Flags in backing_dev_info::capability
*
* BDI_CAP_WRITEBACK: Supports dirty page writeback, and dirty pages
* should contribute to accounting
- * BDI_CAP_WRITEBACK_ACCT: Automatically account writeback pages
* BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold
*/
#define BDI_CAP_WRITEBACK (1 << 0)
-#define BDI_CAP_WRITEBACK_ACCT (1 << 1)
-#define BDI_CAP_STRICTLIMIT (1 << 2)
+#define BDI_CAP_STRICTLIMIT (1 << 1)
extern struct backing_dev_info noop_backing_dev_info;
+int bdi_init(struct backing_dev_info *bdi);
+
/**
* writeback_in_progress - determine whether there is writeback in progress
* @wb: bdi_writeback of interest
@@ -133,40 +129,13 @@ static inline bool writeback_in_progress(struct bdi_writeback *wb)
return test_bit(WB_writeback_running, &wb->state);
}
-static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
-{
- struct super_block *sb;
-
- if (!inode)
- return &noop_backing_dev_info;
-
- sb = inode->i_sb;
-#ifdef CONFIG_BLOCK
- if (sb_is_blkdev_sb(sb))
- return I_BDEV(inode)->bd_bdi;
-#endif
- return sb->s_bdi;
-}
-
-static inline int wb_congested(struct bdi_writeback *wb, int cong_bits)
-{
- return wb->congested & cong_bits;
-}
-
-long congestion_wait(int sync, long timeout);
-long wait_iff_congested(int sync, long timeout);
+struct backing_dev_info *inode_to_bdi(struct inode *inode);
static inline bool mapping_can_writeback(struct address_space *mapping)
{
return inode_to_bdi(mapping->host)->capabilities & BDI_CAP_WRITEBACK;
}
-static inline int bdi_sched_wait(void *word)
-{
- schedule();
- return 0;
-}
-
#ifdef CONFIG_CGROUP_WRITEBACK
struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi,
@@ -175,8 +144,7 @@ struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
struct cgroup_subsys_state *memcg_css,
gfp_t gfp);
void wb_memcg_offline(struct mem_cgroup *memcg);
-void wb_blkcg_offline(struct blkcg *blkcg);
-int inode_congested(struct inode *inode, int cong_bits);
+void wb_blkcg_offline(struct cgroup_subsys_state *css);
/**
* inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
@@ -258,18 +226,6 @@ wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
}
/**
- * inode_to_wb_is_valid - test whether an inode has a wb associated
- * @inode: inode of interest
- *
- * Returns %true if @inode has a wb associated. May be called without any
- * locking.
- */
-static inline bool inode_to_wb_is_valid(struct inode *inode)
-{
- return inode->i_wb;
-}
-
-/**
* inode_to_wb - determine the wb of an inode
* @inode: inode of interest
*
@@ -281,6 +237,7 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
{
#ifdef CONFIG_LOCKDEP
WARN_ON_ONCE(debug_locks &&
+ (inode->i_sb->s_iflags & SB_I_CGROUPWB) &&
(!lockdep_is_held(&inode->i_lock) &&
!lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) &&
!lockdep_is_held(&inode->i_wb->list_lock)));
@@ -288,6 +245,17 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
return inode->i_wb;
}
+static inline struct bdi_writeback *inode_to_wb_wbc(
+ struct inode *inode,
+ struct writeback_control *wbc)
+{
+ /*
+ * If wbc does not have inode attached, it means cgroup writeback was
+ * disabled when wbc started. Just use the default wb in that case.
+ */
+ return wbc->wb ? wbc->wb : &inode_to_bdi(inode)->wb;
+}
+
/**
* unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
* @inode: target inode
@@ -309,10 +277,11 @@ unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
rcu_read_lock();
/*
- * Paired with store_release in inode_switch_wbs_work_fn() and
+ * Paired with a release fence in inode_do_switch_wbs() and
* ensures that we see the new wb if we see cleared I_WB_SWITCH.
*/
- cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
+ cookie->locked = inode_state_read_once(inode) & I_WB_SWITCH;
+ smp_rmb();
if (unlikely(cookie->locked))
xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags);
@@ -356,16 +325,19 @@ wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
return &bdi->wb;
}
-static inline bool inode_to_wb_is_valid(struct inode *inode)
+static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
{
- return true;
+ return &inode_to_bdi(inode)->wb;
}
-static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
+static inline struct bdi_writeback *inode_to_wb_wbc(
+ struct inode *inode,
+ struct writeback_control *wbc)
{
- return &inode_to_bdi(inode)->wb;
+ return inode_to_wb(inode);
}
+
static inline struct bdi_writeback *
unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
{
@@ -381,54 +353,12 @@ static inline void wb_memcg_offline(struct mem_cgroup *memcg)
{
}
-static inline void wb_blkcg_offline(struct blkcg *blkcg)
-{
-}
-
-static inline int inode_congested(struct inode *inode, int cong_bits)
+static inline void wb_blkcg_offline(struct cgroup_subsys_state *css)
{
- return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);
}
#endif /* CONFIG_CGROUP_WRITEBACK */
-static inline int inode_read_congested(struct inode *inode)
-{
- return inode_congested(inode, 1 << WB_sync_congested);
-}
-
-static inline int inode_write_congested(struct inode *inode)
-{
- return inode_congested(inode, 1 << WB_async_congested);
-}
-
-static inline int inode_rw_congested(struct inode *inode)
-{
- return inode_congested(inode, (1 << WB_sync_congested) |
- (1 << WB_async_congested));
-}
-
-static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits)
-{
- return wb_congested(&bdi->wb, cong_bits);
-}
-
-static inline int bdi_read_congested(struct backing_dev_info *bdi)
-{
- return bdi_congested(bdi, 1 << WB_sync_congested);
-}
-
-static inline int bdi_write_congested(struct backing_dev_info *bdi)
-{
- return bdi_congested(bdi, 1 << WB_async_congested);
-}
-
-static inline int bdi_rw_congested(struct backing_dev_info *bdi)
-{
- return bdi_congested(bdi, (1 << WB_sync_congested) |
- (1 << WB_async_congested));
-}
-
const char *bdi_dev_name(struct backing_dev_info *bdi);
#endif /* _LINUX_BACKING_DEV_H */
diff --git a/include/linux/backing-file.h b/include/linux/backing-file.h
new file mode 100644
index 000000000000..1476a6ed1bfd
--- /dev/null
+++ b/include/linux/backing-file.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Common helpers for stackable filesystems and backing files.
+ *
+ * Copyright (C) 2023 CTERA Networks.
+ */
+
+#ifndef _LINUX_BACKING_FILE_H
+#define _LINUX_BACKING_FILE_H
+
+#include <linux/file.h>
+#include <linux/uio.h>
+#include <linux/fs.h>
+
+struct backing_file_ctx {
+ const struct cred *cred;
+ void (*accessed)(struct file *file);
+ void (*end_write)(struct kiocb *iocb, ssize_t);
+};
+
+struct file *backing_file_open(const struct path *user_path, int flags,
+ const struct path *real_path,
+ const struct cred *cred);
+struct file *backing_tmpfile_open(const struct path *user_path, int flags,
+ const struct path *real_parentpath,
+ umode_t mode, const struct cred *cred);
+ssize_t backing_file_read_iter(struct file *file, struct iov_iter *iter,
+ struct kiocb *iocb, int flags,
+ struct backing_file_ctx *ctx);
+ssize_t backing_file_write_iter(struct file *file, struct iov_iter *iter,
+ struct kiocb *iocb, int flags,
+ struct backing_file_ctx *ctx);
+ssize_t backing_file_splice_read(struct file *in, struct kiocb *iocb,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags,
+ struct backing_file_ctx *ctx);
+ssize_t backing_file_splice_write(struct pipe_inode_info *pipe,
+ struct file *out, struct kiocb *iocb,
+ size_t len, unsigned int flags,
+ struct backing_file_ctx *ctx);
+int backing_file_mmap(struct file *file, struct vm_area_struct *vma,
+ struct backing_file_ctx *ctx);
+
+#endif /* _LINUX_BACKING_FILE_H */
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index 614653e07e3a..f29a9ef1052e 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -10,9 +10,8 @@
#define _LINUX_BACKLIGHT_H
#include <linux/device.h>
-#include <linux/fb.h>
#include <linux/mutex.h>
-#include <linux/notifier.h>
+#include <linux/types.h>
/**
* enum backlight_update_reason - what method was used to update backlight
@@ -65,24 +64,6 @@ enum backlight_type {
BACKLIGHT_TYPE_MAX,
};
-/**
- * enum backlight_notification - the type of notification
- *
- * The notifications that is used for notification sent to the receiver
- * that registered notifications using backlight_register_notifier().
- */
-enum backlight_notification {
- /**
- * @BACKLIGHT_REGISTERED: The backlight device is registered.
- */
- BACKLIGHT_REGISTERED,
-
- /**
- * @BACKLIGHT_UNREGISTERED: The backlight revice is unregistered.
- */
- BACKLIGHT_UNREGISTERED,
-};
-
/** enum backlight_scale - the type of scale used for brightness values
*
* The type of scale used for brightness values.
@@ -110,7 +91,6 @@ enum backlight_scale {
};
struct backlight_device;
-struct fb_info;
/**
* struct backlight_ops - backlight operations
@@ -160,18 +140,18 @@ struct backlight_ops {
int (*get_brightness)(struct backlight_device *);
/**
- * @check_fb: Check the framebuffer device.
+ * @controls_device: Check against the display device
*
- * Check if given framebuffer device is the one bound to this backlight.
- * This operation is optional and if not implemented it is assumed that the
- * fbdev is always the one bound to the backlight.
+ * Check if the backlight controls the given display device. This
+ * operation is optional and if not implemented it is assumed that
+ * the display is always the one controlled by the backlight.
*
* RETURNS:
*
- * If info is NULL or the info matches the fbdev bound to the backlight return true.
- * If info does not match the fbdev bound to the backlight return false.
+ * If display_dev is NULL or display_dev matches the device controlled by
+ * the backlight, return true. Otherwise return false.
*/
- int (*check_fb)(struct backlight_device *bd, struct fb_info *info);
+ bool (*controls_device)(struct backlight_device *bd, struct device *display_dev);
};
/**
@@ -209,33 +189,18 @@ struct backlight_properties {
* attribute: /sys/class/backlight/<backlight>/bl_power
* When the power property is updated update_status() is called.
*
- * The possible values are: (0: full on, 1 to 3: power saving
- * modes; 4: full off), see FB_BLANK_XXX.
+ * The possible values are: (0: full on, 4: full off), see
+ * BACKLIGHT_POWER constants.
*
- * When the backlight device is enabled @power is set
- * to FB_BLANK_UNBLANK. When the backlight device is disabled
- * @power is set to FB_BLANK_POWERDOWN.
+ * When the backlight device is enabled, @power is set to
+ * BACKLIGHT_POWER_ON. When the backlight device is disabled,
+ * @power is set to BACKLIGHT_POWER_OFF.
*/
int power;
- /**
- * @fb_blank: The power state from the FBIOBLANK ioctl.
- *
- * When the FBIOBLANK ioctl is called @fb_blank is set to the
- * blank parameter and the update_status() operation is called.
- *
- * When the backlight device is enabled @fb_blank is set
- * to FB_BLANK_UNBLANK. When the backlight device is disabled
- * @fb_blank is set to FB_BLANK_POWERDOWN.
- *
- * Backlight drivers should avoid using this property. It has been
- * replaced by state & BL_CORE_FBLANK (although most drivers should
- * use backlight_is_blank() as the preferred means to get the blank
- * state).
- *
- * fb_blank is deprecated and will be removed.
- */
- int fb_blank;
+#define BACKLIGHT_POWER_ON (0)
+#define BACKLIGHT_POWER_OFF (4)
+#define BACKLIGHT_POWER_REDUCED (1) // deprecated; don't use in new code
/**
* @type: The type of backlight supported.
@@ -312,11 +277,6 @@ struct backlight_device {
const struct backlight_ops *ops;
/**
- * @fb_notif: The framebuffer notifier block
- */
- struct notifier_block fb_notif;
-
- /**
* @entry: List entry of all registered backlight devices
*/
struct list_head entry;
@@ -327,15 +287,7 @@ struct backlight_device {
struct device dev;
/**
- * @fb_bl_on: The state of individual fbdev's.
- *
- * Multiple fbdev's may share one backlight device. The fb_bl_on
- * records the state of the individual fbdev.
- */
- bool fb_bl_on[FB_MAX];
-
- /**
- * @use_count: The number of uses of fb_bl_on.
+ * @use_count: The number of unblanked displays.
*/
int use_count;
};
@@ -365,8 +317,7 @@ static inline int backlight_enable(struct backlight_device *bd)
if (!bd)
return 0;
- bd->props.power = FB_BLANK_UNBLANK;
- bd->props.fb_blank = FB_BLANK_UNBLANK;
+ bd->props.power = BACKLIGHT_POWER_ON;
bd->props.state &= ~BL_CORE_FBBLANK;
return backlight_update_status(bd);
@@ -381,8 +332,7 @@ static inline int backlight_disable(struct backlight_device *bd)
if (!bd)
return 0;
- bd->props.power = FB_BLANK_POWERDOWN;
- bd->props.fb_blank = FB_BLANK_POWERDOWN;
+ bd->props.power = BACKLIGHT_POWER_OFF;
bd->props.state |= BL_CORE_FBBLANK;
return backlight_update_status(bd);
@@ -395,15 +345,13 @@ static inline int backlight_disable(struct backlight_device *bd)
* Display is expected to be blank if any of these is true::
*
* 1) if power in not UNBLANK
- * 2) if fb_blank is not UNBLANK
- * 3) if state indicate BLANK or SUSPENDED
+ * 2) if state indicate BLANK or SUSPENDED
*
* Returns true if display is expected to be blank, false otherwise.
*/
static inline bool backlight_is_blank(const struct backlight_device *bd)
{
- return bd->props.power != FB_BLANK_UNBLANK ||
- bd->props.fb_blank != FB_BLANK_UNBLANK ||
+ return bd->props.power != BACKLIGHT_POWER_ON ||
bd->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK);
}
@@ -440,13 +388,27 @@ void devm_backlight_device_unregister(struct device *dev,
struct backlight_device *bd);
void backlight_force_update(struct backlight_device *bd,
enum backlight_update_reason reason);
-int backlight_register_notifier(struct notifier_block *nb);
-int backlight_unregister_notifier(struct notifier_block *nb);
struct backlight_device *backlight_device_get_by_name(const char *name);
struct backlight_device *backlight_device_get_by_type(enum backlight_type type);
int backlight_device_set_brightness(struct backlight_device *bd,
unsigned long brightness);
+#if IS_REACHABLE(CONFIG_BACKLIGHT_CLASS_DEVICE)
+void backlight_notify_blank(struct backlight_device *bd,
+ struct device *display_dev,
+ bool fb_on, bool prev_fb_on);
+void backlight_notify_blank_all(struct device *display_dev,
+ bool fb_on, bool prev_fb_on);
+#else
+static inline void backlight_notify_blank(struct backlight_device *bd,
+ struct device *display_dev,
+ bool fb_on, bool prev_fb_on)
+{ }
+static inline void backlight_notify_blank_all(struct device *display_dev,
+ bool fb_on, bool prev_fb_on)
+{ }
+#endif
+
#define to_backlight_device(obj) container_of(obj, struct backlight_device, dev)
/**
diff --git a/include/linux/badblocks.h b/include/linux/badblocks.h
index 2426276b9bd3..996493917f36 100644
--- a/include/linux/badblocks.h
+++ b/include/linux/badblocks.h
@@ -15,6 +15,7 @@
#define BB_OFFSET(x) (((x) & BB_OFFSET_MASK) >> 9)
#define BB_LEN(x) (((x) & BB_LEN_MASK) + 1)
#define BB_ACK(x) (!!((x) & BB_ACK_MASK))
+#define BB_END(x) (BB_OFFSET(x) + BB_LEN(x))
#define BB_MAKE(a, l, ack) (((a)<<9) | ((l)-1) | ((u64)(!!(ack)) << 63))
/* Bad block numbers are stored sorted in a single page.
@@ -41,11 +42,17 @@ struct badblocks {
sector_t size; /* in sectors */
};
-int badblocks_check(struct badblocks *bb, sector_t s, int sectors,
- sector_t *first_bad, int *bad_sectors);
-int badblocks_set(struct badblocks *bb, sector_t s, int sectors,
- int acknowledged);
-int badblocks_clear(struct badblocks *bb, sector_t s, int sectors);
+struct badblocks_context {
+ sector_t start;
+ sector_t len;
+ int ack;
+};
+
+int badblocks_check(struct badblocks *bb, sector_t s, sector_t sectors,
+ sector_t *first_bad, sector_t *bad_sectors);
+bool badblocks_set(struct badblocks *bb, sector_t s, sector_t sectors,
+ int acknowledged);
+bool badblocks_clear(struct badblocks *bb, sector_t s, sector_t sectors);
void ack_all_badblocks(struct badblocks *bb);
ssize_t badblocks_show(struct badblocks *bb, char *page, int unack);
ssize_t badblocks_store(struct badblocks *bb, const char *page, size_t len,
@@ -63,4 +70,27 @@ static inline void devm_exit_badblocks(struct device *dev, struct badblocks *bb)
}
badblocks_exit(bb);
}
+
+static inline int badblocks_full(struct badblocks *bb)
+{
+ return (bb->count >= MAX_BADBLOCKS);
+}
+
+static inline int badblocks_empty(struct badblocks *bb)
+{
+ return (bb->count == 0);
+}
+
+static inline void set_changed(struct badblocks *bb)
+{
+ if (bb->changed != 1)
+ bb->changed = 1;
+}
+
+static inline void clear_changed(struct badblocks *bb)
+{
+ if (bb->changed != 0)
+ bb->changed = 0;
+}
+
#endif
diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h
index 338aa27e4773..7cfe48769239 100644
--- a/include/linux/balloon_compaction.h
+++ b/include/linux/balloon_compaction.h
@@ -4,12 +4,13 @@
*
* Common interface definitions for making balloon pages movable by compaction.
*
- * Balloon page migration makes use of the general non-lru movable page
+ * Balloon page migration makes use of the general "movable_ops page migration"
* feature.
*
* page->private is used to reference the responsible balloon device.
- * page->mapping is used in context of non-lru page migration to reference
- * the address space operations for page isolation/migration/compaction.
+ * That these pages have movable_ops, and which movable_ops apply,
+ * is derived from the page type (PageOffline()) combined with the
+ * PG_movable_ops flag (PageMovableOps()).
*
* As the page isolation scanning step a compaction thread does is a lockless
* procedure (from a page standpoint), it might bring some racy situations while
@@ -17,12 +18,10 @@
* and safely perform balloon's page compaction and migration we must, always,
* ensure following these simple rules:
*
- * i. when updating a balloon's page ->mapping element, strictly do it under
- * the following lock order, independently of the far superior
- * locking scheme (lru_lock, balloon_lock):
+ * i. Setting the PG_movable_ops flag and page->private with the following
+ * lock order
* +-page_lock(page);
* +--spin_lock_irq(&b_dev_info->pages_lock);
- * ... page->mapping updates here ...
*
* ii. isolation or dequeueing procedure must remove the page from balloon
* device page list under b_dev_info->pages_lock.
@@ -57,7 +56,6 @@ struct balloon_dev_info {
struct list_head pages; /* Pages enqueued & handled to Host */
int (*migratepage)(struct balloon_dev_info *, struct page *newpage,
struct page *page, enum migrate_mode mode);
- struct inode *inode;
};
extern struct page *balloon_page_alloc(void);
@@ -75,17 +73,19 @@ static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
spin_lock_init(&balloon->pages_lock);
INIT_LIST_HEAD(&balloon->pages);
balloon->migratepage = NULL;
- balloon->inode = NULL;
}
#ifdef CONFIG_BALLOON_COMPACTION
-extern const struct address_space_operations balloon_aops;
-extern bool balloon_page_isolate(struct page *page,
- isolate_mode_t mode);
-extern void balloon_page_putback(struct page *page);
-extern int balloon_page_migrate(struct address_space *mapping,
- struct page *newpage,
- struct page *page, enum migrate_mode mode);
+extern const struct movable_operations balloon_mops;
+/*
+ * balloon_page_device - get the b_dev_info descriptor for the balloon device
+ * that enqueues the given page.
+ */
+static inline struct balloon_dev_info *balloon_page_device(struct page *page)
+{
+ return (struct balloon_dev_info *)page_private(page);
+}
+#endif /* CONFIG_BALLOON_COMPACTION */
/*
* balloon_page_insert - insert a page into the balloon's page list and make
@@ -100,84 +100,34 @@ static inline void balloon_page_insert(struct balloon_dev_info *balloon,
struct page *page)
{
__SetPageOffline(page);
- __SetPageMovable(page, balloon->inode->i_mapping);
- set_page_private(page, (unsigned long)balloon);
+ if (IS_ENABLED(CONFIG_BALLOON_COMPACTION)) {
+ SetPageMovableOps(page);
+ set_page_private(page, (unsigned long)balloon);
+ }
list_add(&page->lru, &balloon->pages);
}
-/*
- * balloon_page_delete - delete a page from balloon's page list and clear
- * the page->private assignement accordingly.
- * @page : page to be released from balloon's page list
- *
- * Caller must ensure the page is locked and the spin_lock protecting balloon
- * pages list is held before deleting a page from the balloon device.
- */
-static inline void balloon_page_delete(struct page *page)
+static inline gfp_t balloon_mapping_gfp_mask(void)
{
- __ClearPageOffline(page);
- __ClearPageMovable(page);
- set_page_private(page, 0);
- /*
- * No touch page.lru field once @page has been isolated
- * because VM is using the field.
- */
- if (!PageIsolated(page))
- list_del(&page->lru);
+ if (IS_ENABLED(CONFIG_BALLOON_COMPACTION))
+ return GFP_HIGHUSER_MOVABLE;
+ return GFP_HIGHUSER;
}
/*
- * balloon_page_device - get the b_dev_info descriptor for the balloon device
- * that enqueues the given page.
+ * balloon_page_finalize - prepare a balloon page that was removed from the
+ * balloon list for release to the page allocator
+ * @page: page to be released to the page allocator
+ *
+ * Caller must ensure that the page is locked.
*/
-static inline struct balloon_dev_info *balloon_page_device(struct page *page)
-{
- return (struct balloon_dev_info *)page_private(page);
-}
-
-static inline gfp_t balloon_mapping_gfp_mask(void)
-{
- return GFP_HIGHUSER_MOVABLE;
-}
-
-#else /* !CONFIG_BALLOON_COMPACTION */
-
-static inline void balloon_page_insert(struct balloon_dev_info *balloon,
- struct page *page)
-{
- __SetPageOffline(page);
- list_add(&page->lru, &balloon->pages);
-}
-
-static inline void balloon_page_delete(struct page *page)
-{
- __ClearPageOffline(page);
- list_del(&page->lru);
-}
-
-static inline bool balloon_page_isolate(struct page *page)
+static inline void balloon_page_finalize(struct page *page)
{
- return false;
+ if (IS_ENABLED(CONFIG_BALLOON_COMPACTION))
+ set_page_private(page, 0);
+ /* PageOffline is sticky until the page is freed to the buddy. */
}
-static inline void balloon_page_putback(struct page *page)
-{
- return;
-}
-
-static inline int balloon_page_migrate(struct page *newpage,
- struct page *page, enum migrate_mode mode)
-{
- return 0;
-}
-
-static inline gfp_t balloon_mapping_gfp_mask(void)
-{
- return GFP_HIGHUSER;
-}
-
-#endif /* CONFIG_BALLOON_COMPACTION */
-
/*
* balloon_page_push - insert a page into a page list.
* @head : pointer to list
diff --git a/include/linux/base64.h b/include/linux/base64.h
new file mode 100644
index 000000000000..a2c6c9222da3
--- /dev/null
+++ b/include/linux/base64.h
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * base64 encoding, lifted from fs/crypto/fname.c.
+ */
+
+#ifndef _LINUX_BASE64_H
+#define _LINUX_BASE64_H
+
+#include <linux/types.h>
+
+enum base64_variant {
+ BASE64_STD, /* RFC 4648 (standard) */
+ BASE64_URLSAFE, /* RFC 4648 (base64url) */
+ BASE64_IMAP, /* RFC 3501 */
+};
+
+#define BASE64_CHARS(nbytes) DIV_ROUND_UP((nbytes) * 4, 3)
+
+int base64_encode(const u8 *src, int len, char *dst, bool padding, enum base64_variant variant);
+int base64_decode(const char *src, int len, u8 *dst, bool padding, enum base64_variant variant);
+
+#endif /* _LINUX_BASE64_H */
diff --git a/include/linux/bcd.h b/include/linux/bcd.h
index 118bea36d7d4..abbc8149178e 100644
--- a/include/linux/bcd.h
+++ b/include/linux/bcd.h
@@ -14,8 +14,12 @@
const_bin2bcd(x) : \
_bin2bcd(x))
+#define bcd_is_valid(x) \
+ const_bcd_is_valid(x)
+
#define const_bcd2bin(x) (((x) & 0x0f) + ((x) >> 4) * 10)
#define const_bin2bcd(x) ((((x) / 10) << 4) + (x) % 10)
+#define const_bcd_is_valid(x) (((x) & 0x0f) < 10 && ((x) >> 4) < 10)
unsigned _bcd2bin(unsigned char val) __attribute_const__;
unsigned char _bin2bcd(unsigned val) __attribute_const__;
diff --git a/include/linux/bcm47xx_nvram.h b/include/linux/bcm47xx_nvram.h
index 53b31f69b74a..e4b6ce953ddb 100644
--- a/include/linux/bcm47xx_nvram.h
+++ b/include/linux/bcm47xx_nvram.h
@@ -7,10 +7,10 @@
#include <linux/errno.h>
#include <linux/types.h>
-#include <linux/kernel.h>
#include <linux/vmalloc.h>
#ifdef CONFIG_BCM47XX_NVRAM
+int bcm47xx_nvram_init_from_iomem(void __iomem *nvram_start, size_t res_size);
int bcm47xx_nvram_init_from_mem(u32 base, u32 lim);
int bcm47xx_nvram_getenv(const char *name, char *val, size_t val_len);
int bcm47xx_nvram_gpio_pin(const char *name);
@@ -20,6 +20,11 @@ static inline void bcm47xx_nvram_release_contents(char *nvram)
vfree(nvram);
};
#else
+static inline int bcm47xx_nvram_init_from_iomem(void __iomem *nvram_start,
+ size_t res_size)
+{
+ return -ENOTSUPP;
+}
static inline int bcm47xx_nvram_init_from_mem(u32 base, u32 lim)
{
return -ENOTSUPP;
diff --git a/include/linux/bcm47xx_sprom.h b/include/linux/bcm47xx_sprom.h
index f8254fd53e15..40a7da3ef50e 100644
--- a/include/linux/bcm47xx_sprom.h
+++ b/include/linux/bcm47xx_sprom.h
@@ -5,8 +5,8 @@
#ifndef __BCM47XX_SPROM_H
#define __BCM47XX_SPROM_H
+#include <linux/errno.h>
#include <linux/types.h>
-#include <linux/kernel.h>
#include <linux/vmalloc.h>
struct ssb_sprom;
diff --git a/include/linux/bcm963xx_nvram.h b/include/linux/bcm963xx_nvram.h
index c8c7f01159fe..48830bf18042 100644
--- a/include/linux/bcm963xx_nvram.h
+++ b/include/linux/bcm963xx_nvram.h
@@ -81,25 +81,21 @@ static int __maybe_unused bcm963xx_nvram_checksum(
const struct bcm963xx_nvram *nvram,
u32 *expected_out, u32 *actual_out)
{
+ const u32 zero = 0;
u32 expected, actual;
size_t len;
if (nvram->version <= 4) {
expected = nvram->checksum_v4;
- len = BCM963XX_NVRAM_V4_SIZE - sizeof(u32);
+ len = BCM963XX_NVRAM_V4_SIZE;
} else {
expected = nvram->checksum_v5;
- len = BCM963XX_NVRAM_V5_SIZE - sizeof(u32);
+ len = BCM963XX_NVRAM_V5_SIZE;
}
- /*
- * Calculate the CRC32 value for the nvram with a checksum value
- * of 0 without modifying or copying the nvram by combining:
- * - The CRC32 of the nvram without the checksum value
- * - The CRC32 of a zero checksum value (which is also 0)
- */
- actual = crc32_le_combine(
- crc32_le(~0, (u8 *)nvram, len), 0, sizeof(u32));
+ /* Calculate the CRC32 of the nvram with the checksum field set to 0. */
+ actual = crc32_le(~0, nvram, len - sizeof(u32));
+ actual = crc32_le(actual, &zero, sizeof(u32));
if (expected_out)
*expected_out = expected;
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index d35b9206096d..0cb6638b55e5 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -3,7 +3,8 @@
#define LINUX_BCMA_DRIVER_CC_H_
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/platform_data/brcmnand.h>
+#include <linux/gpio/driver.h>
/** ChipCommon core registers. **/
#define BCMA_CC_ID 0x0000
@@ -270,6 +271,7 @@
#define BCMA_CC_SROM_CONTROL_OP_WRDIS 0x40000000
#define BCMA_CC_SROM_CONTROL_OP_WREN 0x60000000
#define BCMA_CC_SROM_CONTROL_OTPSEL 0x00000010
+#define BCMA_CC_SROM_CONTROL_OTP_PRESENT 0x00000020
#define BCMA_CC_SROM_CONTROL_LOCK 0x00000008
#define BCMA_CC_SROM_CONTROL_SIZE_MASK 0x00000006
#define BCMA_CC_SROM_CONTROL_SIZE_1K 0x00000000
@@ -599,6 +601,10 @@ struct bcma_sflash {
#ifdef CONFIG_BCMA_NFLASH
struct bcma_nflash {
+ /* Must be the fist member for the brcmnand driver to
+ * de-reference that structure.
+ */
+ struct brcmnand_platform_data brcmnand_info;
bool present;
bool boot; /* This is the flash the SoC boots from */
};
diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h
index 68da8dba5162..dba41b65ae0d 100644
--- a/include/linux/bcma/bcma_driver_pci.h
+++ b/include/linux/bcma/bcma_driver_pci.h
@@ -203,7 +203,7 @@ struct pci_dev;
#define BCMA_CORE_PCI_MDIO_RXCTRL0 0x840
/* PCIE Root Capability Register bits (Host mode only) */
-#define BCMA_CORE_PCI_RC_CRS_VISIBILITY 0x0001
+#define BCMA_CORE_PCI_RC_RRS_VISIBILITY 0x0001
struct bcma_drv_pci;
struct bcma_bus;
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 049cf9421d83..65abd5ab8836 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -8,6 +8,7 @@
#include <uapi/linux/binfmts.h>
struct filename;
+struct coredump_params;
#define CORENAME_MAX_SIZE 128
@@ -18,13 +19,13 @@ struct linux_binprm {
#ifdef CONFIG_MMU
struct vm_area_struct *vma;
unsigned long vma_pages;
+ unsigned long argmin; /* rlimit marker for copy_strings() */
#else
# define MAX_ARG_PAGES 32
struct page *page[MAX_ARG_PAGES];
#endif
struct mm_struct *mm;
unsigned long p; /* current top of mem */
- unsigned long argmin; /* rlimit marker for copy_strings() */
unsigned int
/* Should an execfd be passed to userspace? */
have_execfd:1,
@@ -41,10 +42,14 @@ struct linux_binprm {
* Set when errors can no longer be returned to the
* original userspace.
*/
- point_of_no_return:1;
-#ifdef __alpha__
- unsigned int taso:1;
-#endif
+ point_of_no_return:1,
+ /* Set when "comm" must come from the dentry. */
+ comm_from_dentry:1,
+ /*
+ * Set by user space to check executability according to the
+ * caller's environment.
+ */
+ is_check:1;
struct file *executable; /* Executable to pass to the interpreter */
struct file *interpreter;
struct file *file;
@@ -59,7 +64,7 @@ struct linux_binprm {
const char *fdpath; /* generated filename for execveat */
unsigned interp_flags;
int execfd; /* File descriptor of the executable */
- unsigned long loader, exec;
+ unsigned long exec;
struct rlimit rlim_stack; /* Saved RLIMIT_STACK used during exec. */
@@ -77,18 +82,6 @@ struct linux_binprm {
#define BINPRM_FLAGS_PRESERVE_ARGV0_BIT 3
#define BINPRM_FLAGS_PRESERVE_ARGV0 (1 << BINPRM_FLAGS_PRESERVE_ARGV0_BIT)
-/* Function parameter for binfmt->coredump */
-struct coredump_params {
- const kernel_siginfo_t *siginfo;
- struct pt_regs *regs;
- struct file *file;
- unsigned long limit;
- unsigned long mm_flags;
- loff_t written;
- loff_t pos;
- loff_t to_skip;
-};
-
/*
* This structure defines the functions that are used to load the binary formats that
* linux accepts.
@@ -97,11 +90,22 @@ struct linux_binfmt {
struct list_head lh;
struct module *module;
int (*load_binary)(struct linux_binprm *);
- int (*load_shlib)(struct file *);
+#ifdef CONFIG_COREDUMP
int (*core_dump)(struct coredump_params *cprm);
unsigned long min_coredump; /* minimal dump size */
+#endif
+} __randomize_layout;
+
+#if IS_ENABLED(CONFIG_BINFMT_MISC)
+struct binfmt_misc {
+ struct list_head entries;
+ rwlock_t entries_lock;
+ bool enabled;
} __randomize_layout;
+extern struct binfmt_misc init_binfmt_misc;
+#endif
+
extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
/* Registration of default binfmt handlers */
diff --git a/include/linux/bio-integrity.h b/include/linux/bio-integrity.h
new file mode 100644
index 000000000000..21e4652dcfd2
--- /dev/null
+++ b/include/linux/bio-integrity.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_BIO_INTEGRITY_H
+#define _LINUX_BIO_INTEGRITY_H
+
+#include <linux/bio.h>
+
+enum bip_flags {
+ BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
+ BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
+ BIP_DISK_NOCHECK = 1 << 2, /* disable disk integrity checking */
+ BIP_IP_CHECKSUM = 1 << 3, /* IP checksum */
+ BIP_COPY_USER = 1 << 4, /* Kernel bounce buffer in use */
+ BIP_CHECK_GUARD = 1 << 5, /* guard check */
+ BIP_CHECK_REFTAG = 1 << 6, /* reftag check */
+ BIP_CHECK_APPTAG = 1 << 7, /* apptag check */
+
+ BIP_MEMPOOL = 1 << 15, /* buffer backed by mempool */
+};
+
+struct bio_integrity_payload {
+ struct bvec_iter bip_iter;
+
+ unsigned short bip_vcnt; /* # of integrity bio_vecs */
+ unsigned short bip_max_vcnt; /* integrity bio_vec slots */
+ unsigned short bip_flags; /* control flags */
+ u16 app_tag; /* application tag value */
+
+ struct bio_vec *bip_vec;
+};
+
+#define BIP_CLONE_FLAGS (BIP_MAPPED_INTEGRITY | BIP_IP_CHECKSUM | \
+ BIP_CHECK_GUARD | BIP_CHECK_REFTAG | BIP_CHECK_APPTAG)
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+
+#define bip_for_each_vec(bvl, bip, iter) \
+ for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
+
+#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \
+ for_each_bio(_bio) \
+ bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
+
+static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
+{
+ if (bio->bi_opf & REQ_INTEGRITY)
+ return bio->bi_integrity;
+
+ return NULL;
+}
+
+static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
+{
+ struct bio_integrity_payload *bip = bio_integrity(bio);
+
+ if (bip)
+ return bip->bip_flags & flag;
+
+ return false;
+}
+
+static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
+{
+ return bip->bip_iter.bi_sector;
+}
+
+static inline void bip_set_seed(struct bio_integrity_payload *bip,
+ sector_t seed)
+{
+ bip->bip_iter.bi_sector = seed;
+}
+
+void bio_integrity_init(struct bio *bio, struct bio_integrity_payload *bip,
+ struct bio_vec *bvecs, unsigned int nr_vecs);
+struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, gfp_t gfp,
+ unsigned int nr);
+int bio_integrity_add_page(struct bio *bio, struct page *page, unsigned int len,
+ unsigned int offset);
+int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter);
+int bio_integrity_map_iter(struct bio *bio, struct uio_meta *meta);
+void bio_integrity_unmap_user(struct bio *bio);
+bool bio_integrity_prep(struct bio *bio);
+void bio_integrity_advance(struct bio *bio, unsigned int bytes_done);
+void bio_integrity_trim(struct bio *bio);
+int bio_integrity_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp_mask);
+
+#else /* CONFIG_BLK_DEV_INTEGRITY */
+
+static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
+{
+ return NULL;
+}
+
+static inline int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter)
+{
+ return -EINVAL;
+}
+
+static inline int bio_integrity_map_iter(struct bio *bio, struct uio_meta *meta)
+{
+ return -EINVAL;
+}
+
+static inline void bio_integrity_unmap_user(struct bio *bio)
+{
+}
+
+static inline bool bio_integrity_prep(struct bio *bio)
+{
+ return true;
+}
+
+static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
+ gfp_t gfp_mask)
+{
+ return 0;
+}
+
+static inline void bio_integrity_advance(struct bio *bio,
+ unsigned int bytes_done)
+{
+}
+
+static inline void bio_integrity_trim(struct bio *bio)
+{
+}
+
+static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
+{
+ return false;
+}
+
+static inline struct bio_integrity_payload *
+bio_integrity_alloc(struct bio *bio, gfp_t gfp, unsigned int nr)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
+ unsigned int len, unsigned int offset)
+{
+ return 0;
+}
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+
+void bio_integrity_alloc_buf(struct bio *bio, bool zero_buffer);
+void bio_integrity_free_buf(struct bio_integrity_payload *bip);
+
+#endif /* _LINUX_BIO_INTEGRITY_H */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index a0b4cfdf62a4..ad2d57908c1c 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -5,31 +5,21 @@
#ifndef __LINUX_BIO_H
#define __LINUX_BIO_H
-#include <linux/highmem.h>
#include <linux/mempool.h>
-#include <linux/ioprio.h>
/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
#include <linux/blk_types.h>
#include <linux/uio.h>
-#define BIO_DEBUG
-
-#ifdef BIO_DEBUG
-#define BIO_BUG_ON BUG_ON
-#else
-#define BIO_BUG_ON
-#endif
-
#define BIO_MAX_VECS 256U
+#define BIO_MAX_INLINE_VECS UIO_MAXIOV
+
+struct queue_limits;
static inline unsigned int bio_max_segs(unsigned int nr_segs)
{
return min(nr_segs, BIO_MAX_VECS);
}
-#define bio_prio(bio) (bio)->bi_ioprio
-#define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio)
-
#define bio_iter_iovec(bio, iter) \
bvec_iter_bvec((bio)->bi_io_vec, (iter))
@@ -44,9 +34,6 @@ static inline unsigned int bio_max_segs(unsigned int nr_segs)
#define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter)
#define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter)
-#define bio_multiple_segments(bio) \
- ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
-
#define bvec_iter_sectors(iter) ((iter).bi_size >> 9)
#define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))
@@ -78,26 +65,9 @@ static inline bool bio_no_advance_iter(const struct bio *bio)
{
return bio_op(bio) == REQ_OP_DISCARD ||
bio_op(bio) == REQ_OP_SECURE_ERASE ||
- bio_op(bio) == REQ_OP_WRITE_SAME ||
bio_op(bio) == REQ_OP_WRITE_ZEROES;
}
-static inline bool bio_mergeable(struct bio *bio)
-{
- if (bio->bi_opf & REQ_NOMERGE_FLAGS)
- return false;
-
- return true;
-}
-
-static inline unsigned int bio_cur_bytes(struct bio *bio)
-{
- if (bio_has_data(bio))
- return bio_iovec(bio).bv_len;
- else /* dataless requests such as discard */
- return bio->bi_iter.bi_size;
-}
-
static inline void *bio_data(struct bio *bio)
{
if (bio_has_data(bio))
@@ -106,25 +76,6 @@ static inline void *bio_data(struct bio *bio)
return NULL;
}
-/**
- * bio_full - check if the bio is full
- * @bio: bio to check
- * @len: length of one segment to be added
- *
- * Return true if @bio is full and one segment with @len bytes can't be
- * added to the bio, otherwise return false
- */
-static inline bool bio_full(struct bio *bio, unsigned len)
-{
- if (bio->bi_vcnt >= bio->bi_max_vecs)
- return true;
-
- if (bio->bi_iter.bi_size > UINT_MAX - len)
- return true;
-
- return false;
-}
-
static inline bool bio_next_segment(const struct bio *bio,
struct bvec_iter_all *iter)
{
@@ -167,6 +118,28 @@ static inline void bio_advance_iter_single(const struct bio *bio,
bvec_iter_advance_single(bio->bi_io_vec, iter, bytes);
}
+void __bio_advance(struct bio *, unsigned bytes);
+
+/**
+ * bio_advance - increment/complete a bio by some number of bytes
+ * @bio: bio to advance
+ * @nbytes: number of bytes to complete
+ *
+ * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
+ * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
+ * be updated on the last bvec as well.
+ *
+ * @bio will then represent the remaining, uncompleted portion of the io.
+ */
+static inline void bio_advance(struct bio *bio, unsigned int nbytes)
+{
+ if (nbytes == bio->bi_iter.bi_size) {
+ bio->bi_iter.bi_size = 0;
+ return;
+ }
+ __bio_advance(bio, nbytes);
+}
+
#define __bio_for_each_segment(bvl, bio, iter, start) \
for (iter = (start); \
(iter).bi_size && \
@@ -192,7 +165,7 @@ static inline void bio_advance_iter_single(const struct bio *bio,
*/
#define bio_for_each_bvec_all(bvl, bio, i) \
for (i = 0, bvl = bio_first_bvec_all(bio); \
- i < (bio)->bi_vcnt; i++, bvl++) \
+ i < (bio)->bi_vcnt; i++, bvl++)
#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
@@ -212,8 +185,6 @@ static inline unsigned bio_segments(struct bio *bio)
case REQ_OP_SECURE_ERASE:
case REQ_OP_WRITE_ZEROES:
return 0;
- case REQ_OP_WRITE_SAME:
- return 1;
default:
break;
}
@@ -256,7 +227,7 @@ static inline void bio_cnt_set(struct bio *bio, unsigned int count)
static inline bool bio_flagged(struct bio *bio, unsigned int bit)
{
- return (bio->bi_flags & (1U << bit)) != 0;
+ return bio->bi_flags & (1U << bit);
}
static inline void bio_set_flag(struct bio *bio, unsigned int bit)
@@ -269,38 +240,6 @@ static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
bio->bi_flags &= ~(1U << bit);
}
-static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
-{
- *bv = bio_iovec(bio);
-}
-
-static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
-{
- struct bvec_iter iter = bio->bi_iter;
- int idx;
-
- if (unlikely(!bio_multiple_segments(bio))) {
- *bv = bio_iovec(bio);
- return;
- }
-
- bio_advance_iter(bio, &iter, iter.bi_size);
-
- if (!iter.bi_bvec_done)
- idx = iter.bi_idx - 1;
- else /* in the middle of bvec */
- idx = iter.bi_idx;
-
- *bv = bio->bi_io_vec[idx];
-
- /*
- * iter.bi_bvec_done records actual length of the last bvec
- * if this bio ends in the middle of one io vector
- */
- if (iter.bi_bvec_done)
- bv->bv_len = iter.bi_bvec_done;
-}
-
static inline struct bio_vec *bio_first_bvec_all(struct bio *bio)
{
WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
@@ -312,76 +251,81 @@ static inline struct page *bio_first_page_all(struct bio *bio)
return bio_first_bvec_all(bio)->bv_page;
}
+static inline struct folio *bio_first_folio_all(struct bio *bio)
+{
+ return page_folio(bio_first_page_all(bio));
+}
+
static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
{
WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
return &bio->bi_io_vec[bio->bi_vcnt - 1];
}
-enum bip_flags {
- BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
- BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
- BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */
- BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */
- BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */
-};
-
-/*
- * bio integrity payload
+/**
+ * struct folio_iter - State for iterating all folios in a bio.
+ * @folio: The current folio we're iterating. NULL after the last folio.
+ * @offset: The byte offset within the current folio.
+ * @length: The number of bytes in this iteration (will not cross folio
+ * boundary).
*/
-struct bio_integrity_payload {
- struct bio *bip_bio; /* parent bio */
-
- struct bvec_iter bip_iter;
-
- unsigned short bip_vcnt; /* # of integrity bio_vecs */
- unsigned short bip_max_vcnt; /* integrity bio_vec slots */
- unsigned short bip_flags; /* control flags */
-
- struct bvec_iter bio_iter; /* for rewinding parent bio */
-
- struct work_struct bip_work; /* I/O completion */
-
- struct bio_vec *bip_vec;
- struct bio_vec bip_inline_vecs[];/* embedded bvec array */
+struct folio_iter {
+ struct folio *folio;
+ size_t offset;
+ size_t length;
+ /* private: for use by the iterator */
+ struct folio *_next;
+ size_t _seg_count;
+ int _i;
};
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
-
-static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
+static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio,
+ int i)
{
- if (bio->bi_opf & REQ_INTEGRITY)
- return bio->bi_integrity;
+ struct bio_vec *bvec = bio_first_bvec_all(bio) + i;
- return NULL;
-}
-
-static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
-{
- struct bio_integrity_payload *bip = bio_integrity(bio);
-
- if (bip)
- return bip->bip_flags & flag;
-
- return false;
-}
-
-static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
-{
- return bip->bip_iter.bi_sector;
-}
+ if (unlikely(i >= bio->bi_vcnt)) {
+ fi->folio = NULL;
+ return;
+ }
-static inline void bip_set_seed(struct bio_integrity_payload *bip,
- sector_t seed)
-{
- bip->bip_iter.bi_sector = seed;
+ fi->folio = page_folio(bvec->bv_page);
+ fi->offset = bvec->bv_offset +
+ PAGE_SIZE * folio_page_idx(fi->folio, bvec->bv_page);
+ fi->_seg_count = bvec->bv_len;
+ fi->length = min(folio_size(fi->folio) - fi->offset, fi->_seg_count);
+ fi->_next = folio_next(fi->folio);
+ fi->_i = i;
+}
+
+static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio)
+{
+ fi->_seg_count -= fi->length;
+ if (fi->_seg_count) {
+ fi->folio = fi->_next;
+ fi->offset = 0;
+ fi->length = min(folio_size(fi->folio), fi->_seg_count);
+ fi->_next = folio_next(fi->folio);
+ } else {
+ bio_first_folio(fi, bio, fi->_i + 1);
+ }
}
-#endif /* CONFIG_BLK_DEV_INTEGRITY */
+/**
+ * bio_for_each_folio_all - Iterate over each folio in a bio.
+ * @fi: struct folio_iter which is updated for each folio.
+ * @bio: struct bio to iterate over.
+ */
+#define bio_for_each_folio_all(fi, bio) \
+ for (bio_first_folio(&fi, bio, 0); fi.folio; bio_next_folio(&fi, bio))
-extern void bio_trim(struct bio *bio, int offset, int size);
+void bio_trim(struct bio *bio, sector_t offset, sector_t size);
extern struct bio *bio_split(struct bio *bio, int sectors,
gfp_t gfp, struct bio_set *bs);
+int bio_split_io_at(struct bio *bio, const struct queue_limits *lim,
+ unsigned *segs, unsigned max_bytes, unsigned len_align);
+u8 bio_seg_gap(struct request_queue *q, struct bio *prev, struct bio *next,
+ u8 gaps_bit);
/**
* bio_next_split - get next @sectors from a bio, splitting if necessary
@@ -390,7 +334,7 @@ extern struct bio *bio_split(struct bio *bio, int sectors,
* @gfp: gfp mask
* @bs: bio set to allocate from
*
- * Returns a bio representing the next @sectors of @bio - if the bio is smaller
+ * Return: a bio representing the next @sectors of @bio - if the bio is smaller
* than @sectors, returns the original bio unchanged.
*/
static inline struct bio *bio_next_split(struct bio *bio, int sectors,
@@ -405,28 +349,32 @@ static inline struct bio *bio_next_split(struct bio *bio, int sectors,
enum {
BIOSET_NEED_BVECS = BIT(0),
BIOSET_NEED_RESCUER = BIT(1),
+ BIOSET_PERCPU_CACHE = BIT(2),
};
extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags);
extern void bioset_exit(struct bio_set *);
extern int biovec_init_pool(mempool_t *pool, int pool_entries);
-extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
-struct bio *bio_alloc_bioset(gfp_t gfp, unsigned short nr_iovecs,
- struct bio_set *bs);
-struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs);
+struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
+ blk_opf_t opf, gfp_t gfp_mask,
+ struct bio_set *bs);
+struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask);
extern void bio_put(struct bio *);
-extern void __bio_clone_fast(struct bio *, struct bio *);
-extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
+struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
+ gfp_t gfp, struct bio_set *bs);
+int bio_init_clone(struct block_device *bdev, struct bio *bio,
+ struct bio *bio_src, gfp_t gfp);
extern struct bio_set fs_bio_set;
-static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned short nr_iovecs)
+static inline struct bio *bio_alloc(struct block_device *bdev,
+ unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp_mask)
{
- return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set);
+ return bio_alloc_bioset(bdev, nr_vecs, opf, gfp_mask, &fs_bio_set);
}
-extern blk_qc_t submit_bio(struct bio *);
+void submit_bio(struct bio *bio);
extern void bio_endio(struct bio *);
@@ -457,26 +405,54 @@ static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs)
struct request_queue;
-extern int submit_bio_wait(struct bio *bio);
-extern void bio_advance(struct bio *, unsigned);
-
-extern void bio_init(struct bio *bio, struct bio_vec *table,
- unsigned short max_vecs);
+void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
+ unsigned short max_vecs, blk_opf_t opf);
+static inline void bio_init_inline(struct bio *bio, struct block_device *bdev,
+ unsigned short max_vecs, blk_opf_t opf)
+{
+ bio_init(bio, bdev, bio_inline_vecs(bio), max_vecs, opf);
+}
extern void bio_uninit(struct bio *);
-extern void bio_reset(struct bio *);
+void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf);
void bio_chain(struct bio *, struct bio *);
-extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
-extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
- unsigned int, unsigned int);
-int bio_add_zone_append_page(struct bio *bio, struct page *page,
- unsigned int len, unsigned int offset);
-bool __bio_try_merge_page(struct bio *bio, struct page *page,
- unsigned int len, unsigned int off, bool *same_page);
+int __must_check bio_add_page(struct bio *bio, struct page *page, unsigned len,
+ unsigned off);
+bool __must_check bio_add_folio(struct bio *bio, struct folio *folio,
+ size_t len, size_t off);
void __bio_add_page(struct bio *bio, struct page *page,
unsigned int len, unsigned int off);
-int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
-void bio_release_pages(struct bio *bio, bool mark_dirty);
+void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len,
+ size_t off);
+void bio_add_virt_nofail(struct bio *bio, void *vaddr, unsigned len);
+
+/**
+ * bio_add_max_vecs - number of bio_vecs needed to add data to a bio
+ * @kaddr: kernel virtual address to add
+ * @len: length in bytes to add
+ *
+ * Calculate how many bio_vecs need to be allocated to add the kernel virtual
+ * address range in [@kaddr:@len] in the worse case.
+ */
+static inline unsigned int bio_add_max_vecs(void *kaddr, unsigned int len)
+{
+ if (is_vmalloc_addr(kaddr))
+ return DIV_ROUND_UP(offset_in_page(kaddr) + len, PAGE_SIZE);
+ return 1;
+}
+
+unsigned int bio_add_vmalloc_chunk(struct bio *bio, void *vaddr, unsigned len);
+bool bio_add_vmalloc(struct bio *bio, void *vaddr, unsigned int len);
+
+int submit_bio_wait(struct bio *bio);
+int bdev_rw_virt(struct block_device *bdev, sector_t sector, void *data,
+ size_t len, enum req_op op);
+
+int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter,
+ unsigned len_align_mask);
+
+void bio_iov_bvec_set(struct bio *bio, const struct iov_iter *iter);
+void __bio_release_pages(struct bio *bio, bool mark_dirty);
extern void bio_set_pages_dirty(struct bio *bio);
extern void bio_check_pages_dirty(struct bio *bio);
@@ -484,27 +460,19 @@ extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
struct bio *src, struct bvec_iter *src_iter);
extern void bio_copy_data(struct bio *dst, struct bio *src);
extern void bio_free_pages(struct bio *bio);
-void bio_truncate(struct bio *bio, unsigned new_size);
void guard_bio_eod(struct bio *bio);
-void zero_fill_bio(struct bio *bio);
-
-extern const char *bio_devname(struct bio *bio, char *buffer);
-
-#define bio_set_dev(bio, bdev) \
-do { \
- bio_clear_flag(bio, BIO_REMAPPED); \
- if ((bio)->bi_bdev != (bdev)) \
- bio_clear_flag(bio, BIO_THROTTLED); \
- (bio)->bi_bdev = (bdev); \
- bio_associate_blkg(bio); \
-} while (0)
-
-#define bio_copy_dev(dst, src) \
-do { \
- bio_clear_flag(dst, BIO_REMAPPED); \
- (dst)->bi_bdev = (src)->bi_bdev; \
- bio_clone_blkg_association(dst, src); \
-} while (0)
+void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
+
+static inline void zero_fill_bio(struct bio *bio)
+{
+ zero_fill_bio_iter(bio, bio->bi_iter);
+}
+
+static inline void bio_release_pages(struct bio *bio, bool mark_dirty)
+{
+ if (bio_flagged(bio, BIO_PAGE_PINNED))
+ __bio_release_pages(bio, mark_dirty);
+}
#define bio_dev(bio) \
disk_devt((bio)->bi_bdev->bd_disk)
@@ -514,6 +482,7 @@ void bio_associate_blkg(struct bio *bio);
void bio_associate_blkg_from_css(struct bio *bio,
struct cgroup_subsys_state *css);
void bio_clone_blkg_association(struct bio *dst, struct bio *src);
+void blkcg_punt_bio_submit(struct bio *bio);
#else /* CONFIG_BLK_CGROUP */
static inline void bio_associate_blkg(struct bio *bio) { }
static inline void bio_associate_blkg_from_css(struct bio *bio,
@@ -521,48 +490,20 @@ static inline void bio_associate_blkg_from_css(struct bio *bio,
{ }
static inline void bio_clone_blkg_association(struct bio *dst,
struct bio *src) { }
-#endif /* CONFIG_BLK_CGROUP */
-
-#ifdef CONFIG_HIGHMEM
-/*
- * remember never ever reenable interrupts between a bvec_kmap_irq and
- * bvec_kunmap_irq!
- */
-static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
-{
- unsigned long addr;
-
- /*
- * might not be a highmem page, but the preempt/irq count
- * balancing is a lot nicer this way
- */
- local_irq_save(*flags);
- addr = (unsigned long) kmap_atomic(bvec->bv_page);
-
- BUG_ON(addr & ~PAGE_MASK);
-
- return (char *) addr + bvec->bv_offset;
-}
-
-static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
-{
- unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
-
- kunmap_atomic((void *) ptr);
- local_irq_restore(*flags);
-}
-
-#else
-static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
+static inline void blkcg_punt_bio_submit(struct bio *bio)
{
- return page_address(bvec->bv_page) + bvec->bv_offset;
+ submit_bio(bio);
}
+#endif /* CONFIG_BLK_CGROUP */
-static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
+static inline void bio_set_dev(struct bio *bio, struct block_device *bdev)
{
- *flags = 0;
+ bio_clear_flag(bio, BIO_REMAPPED);
+ if (bio->bi_bdev != bdev)
+ bio_clear_flag(bio, BIO_BPS_THROTTLED);
+ bio->bi_bdev = bdev;
+ bio_associate_blkg(bio);
}
-#endif
/*
* BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
@@ -637,6 +578,13 @@ static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
bl->tail = bl2->tail;
}
+static inline void bio_list_merge_init(struct bio_list *bl,
+ struct bio_list *bl2)
+{
+ bio_list_merge(bl, bl2);
+ bio_list_init(bl2);
+}
+
static inline void bio_list_merge_head(struct bio_list *bl,
struct bio_list *bl2)
{
@@ -703,12 +651,13 @@ struct bio_set {
struct kmem_cache *bio_slab;
unsigned int front_pad;
+ /*
+ * per-cpu bio alloc cache
+ */
+ struct bio_alloc_cache __percpu *cache;
+
mempool_t bio_pool;
mempool_t bvec_pool;
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
- mempool_t bio_integrity_pool;
- mempool_t bvec_integrity_pool;
-#endif
unsigned int back_pad;
/*
@@ -719,6 +668,11 @@ struct bio_set {
struct bio_list rescue_list;
struct work_struct rescue_work;
struct workqueue_struct *rescue_workqueue;
+
+ /*
+ * Hot un-plug notifier for the per-cpu cache, if used
+ */
+ struct hlist_node cpuhp_dead;
};
static inline bool bioset_initialized(struct bio_set *bs)
@@ -726,88 +680,6 @@ static inline bool bioset_initialized(struct bio_set *bs)
return bs->bio_slab != NULL;
}
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
-
-#define bip_for_each_vec(bvl, bip, iter) \
- for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
-
-#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \
- for_each_bio(_bio) \
- bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
-
-extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
-extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
-extern bool bio_integrity_prep(struct bio *);
-extern void bio_integrity_advance(struct bio *, unsigned int);
-extern void bio_integrity_trim(struct bio *);
-extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
-extern int bioset_integrity_create(struct bio_set *, int);
-extern void bioset_integrity_free(struct bio_set *);
-extern void bio_integrity_init(void);
-
-#else /* CONFIG_BLK_DEV_INTEGRITY */
-
-static inline void *bio_integrity(struct bio *bio)
-{
- return NULL;
-}
-
-static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
-{
- return 0;
-}
-
-static inline void bioset_integrity_free (struct bio_set *bs)
-{
- return;
-}
-
-static inline bool bio_integrity_prep(struct bio *bio)
-{
- return true;
-}
-
-static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
- gfp_t gfp_mask)
-{
- return 0;
-}
-
-static inline void bio_integrity_advance(struct bio *bio,
- unsigned int bytes_done)
-{
- return;
-}
-
-static inline void bio_integrity_trim(struct bio *bio)
-{
- return;
-}
-
-static inline void bio_integrity_init(void)
-{
- return;
-}
-
-static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
-{
- return false;
-}
-
-static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp,
- unsigned int nr)
-{
- return ERR_PTR(-EINVAL);
-}
-
-static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
- unsigned int len, unsigned int offset)
-{
- return 0;
-}
-
-#endif /* CONFIG_BLK_DEV_INTEGRITY */
-
/*
* Mark a bio as polled. Note that for async polled IO, the caller must
* expect -EWOULDBLOCK if we cannot allocate a request (or other resources).
@@ -817,9 +689,38 @@ static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
*/
static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
{
- bio->bi_opf |= REQ_HIPRI;
- if (!is_sync_kiocb(kiocb))
+ bio->bi_opf |= REQ_POLLED;
+ if (kiocb->ki_flags & IOCB_NOWAIT)
bio->bi_opf |= REQ_NOWAIT;
}
+static inline void bio_clear_polled(struct bio *bio)
+{
+ bio->bi_opf &= ~REQ_POLLED;
+}
+
+/**
+ * bio_is_zone_append - is this a zone append bio?
+ * @bio: bio to check
+ *
+ * Check if @bio is a zone append operation. Core block layer code and end_io
+ * handlers must use this instead of an open coded REQ_OP_ZONE_APPEND check
+ * because the block layer can rewrite REQ_OP_ZONE_APPEND to REQ_OP_WRITE if
+ * it is not natively supported.
+ */
+static inline bool bio_is_zone_append(struct bio *bio)
+{
+ if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED))
+ return false;
+ return bio_op(bio) == REQ_OP_ZONE_APPEND ||
+ bio_flagged(bio, BIO_EMULATES_ZONE_APPEND);
+}
+
+struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
+ unsigned int nr_pages, blk_opf_t opf, gfp_t gfp);
+struct bio *bio_chain_and_submit(struct bio *prev, struct bio *new);
+
+struct bio *blk_alloc_discard_bio(struct block_device *bdev,
+ sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask);
+
#endif /* __LINUX_BIO_H */
diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h
index bbc4730a6505..c0989b5b0407 100644
--- a/include/linux/bit_spinlock.h
+++ b/include/linux/bit_spinlock.h
@@ -13,7 +13,7 @@
* Don't use this unless you really need to: spin_lock() and spin_unlock()
* are significantly faster.
*/
-static inline void bit_spin_lock(int bitnum, unsigned long *addr)
+static __always_inline void bit_spin_lock(int bitnum, unsigned long *addr)
{
/*
* Assuming the lock is uncontended, this never enters
@@ -38,7 +38,7 @@ static inline void bit_spin_lock(int bitnum, unsigned long *addr)
/*
* Return true if it was acquired
*/
-static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
+static __always_inline int bit_spin_trylock(int bitnum, unsigned long *addr)
{
preempt_disable();
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
@@ -54,7 +54,7 @@ static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
/*
* bit-based spin_unlock()
*/
-static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
+static __always_inline void bit_spin_unlock(int bitnum, unsigned long *addr)
{
#ifdef CONFIG_DEBUG_SPINLOCK
BUG_ON(!test_bit(bitnum, addr));
@@ -71,7 +71,7 @@ static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
* non-atomic version, which can be used eg. if the bit lock itself is
* protecting the rest of the flags in the word.
*/
-static inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
+static __always_inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
{
#ifdef CONFIG_DEBUG_SPINLOCK
BUG_ON(!test_bit(bitnum, addr));
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index 4e035aca6f7e..126dc5b380af 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -8,6 +8,7 @@
#define _LINUX_BITFIELD_H
#include <linux/build_bug.h>
+#include <linux/typecheck.h>
#include <asm/byteorder.h>
/*
@@ -16,9 +17,13 @@
* FIELD_{GET,PREP} macros take as first parameter shifted mask
* from which they extract the base mask and shift amount.
* Mask must be a compilation time constant.
+ * field_{get,prep} are variants that take a non-const mask.
*
* Example:
*
+ * #include <linux/bitfield.h>
+ * #include <linux/bits.h>
+ *
* #define REG_FIELD_A GENMASK(6, 0)
* #define REG_FIELD_B BIT(7)
* #define REG_FIELD_C GENMASK(15, 8)
@@ -35,26 +40,63 @@
* FIELD_PREP(REG_FIELD_D, 0x40);
*
* Modify:
- * reg &= ~REG_FIELD_C;
- * reg |= FIELD_PREP(REG_FIELD_C, c);
+ * FIELD_MODIFY(REG_FIELD_C, &reg, c);
*/
#define __bf_shf(x) (__builtin_ffsll(x) - 1)
-#define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \
+#define __scalar_type_to_unsigned_cases(type) \
+ unsigned type: (unsigned type)0, \
+ signed type: (unsigned type)0
+
+#define __unsigned_scalar_typeof(x) typeof( \
+ _Generic((x), \
+ char: (unsigned char)0, \
+ __scalar_type_to_unsigned_cases(char), \
+ __scalar_type_to_unsigned_cases(short), \
+ __scalar_type_to_unsigned_cases(int), \
+ __scalar_type_to_unsigned_cases(long), \
+ __scalar_type_to_unsigned_cases(long long), \
+ default: (x)))
+
+#define __bf_cast_unsigned(type, x) ((__unsigned_scalar_typeof(type))(x))
+
+#define __BF_FIELD_CHECK_MASK(_mask, _val, _pfx) \
({ \
BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \
_pfx "mask is not constant"); \
BUILD_BUG_ON_MSG((_mask) == 0, _pfx "mask is zero"); \
BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \
- ~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \
+ ~((_mask) >> __bf_shf(_mask)) & \
+ (0 + (_val)) : 0, \
_pfx "value too large for the field"); \
- BUILD_BUG_ON_MSG((_mask) > (typeof(_reg))~0ull, \
- _pfx "type of reg too small for mask"); \
__BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \
(1ULL << __bf_shf(_mask))); \
})
+#define __BF_FIELD_CHECK_REG(mask, reg, pfx) \
+ BUILD_BUG_ON_MSG(__bf_cast_unsigned(mask, mask) > \
+ __bf_cast_unsigned(reg, ~0ull), \
+ pfx "type of reg too small for mask")
+
+#define __BF_FIELD_CHECK(mask, reg, val, pfx) \
+ ({ \
+ __BF_FIELD_CHECK_MASK(mask, val, pfx); \
+ __BF_FIELD_CHECK_REG(mask, reg, pfx); \
+ })
+
+#define __FIELD_PREP(mask, val, pfx) \
+ ({ \
+ __BF_FIELD_CHECK_MASK(mask, val, pfx); \
+ ((typeof(mask))(val) << __bf_shf(mask)) & (mask); \
+ })
+
+#define __FIELD_GET(mask, reg, pfx) \
+ ({ \
+ __BF_FIELD_CHECK_MASK(mask, 0U, pfx); \
+ (typeof(mask))(((reg) & (mask)) >> __bf_shf(mask)); \
+ })
+
/**
* FIELD_MAX() - produce the maximum value representable by a field
* @_mask: shifted mask defining the field's length and position
@@ -91,10 +133,36 @@
*/
#define FIELD_PREP(_mask, _val) \
({ \
- __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \
- ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \
+ __BF_FIELD_CHECK_REG(_mask, 0ULL, "FIELD_PREP: "); \
+ __FIELD_PREP(_mask, _val, "FIELD_PREP: "); \
})
+#define __BF_CHECK_POW2(n) BUILD_BUG_ON_ZERO(((n) & ((n) - 1)) != 0)
+
+/**
+ * FIELD_PREP_CONST() - prepare a constant bitfield element
+ * @_mask: shifted mask defining the field's length and position
+ * @_val: value to put in the field
+ *
+ * FIELD_PREP_CONST() masks and shifts up the value. The result should
+ * be combined with other fields of the bitfield using logical OR.
+ *
+ * Unlike FIELD_PREP() this is a constant expression and can therefore
+ * be used in initializers. Error checking is less comfortable for this
+ * version, and non-constant masks cannot be used.
+ */
+#define FIELD_PREP_CONST(_mask, _val) \
+ ( \
+ /* mask must be non-zero */ \
+ BUILD_BUG_ON_ZERO((_mask) == 0) + \
+ /* check if value fits */ \
+ BUILD_BUG_ON_ZERO(~((_mask) >> __bf_shf(_mask)) & (_val)) + \
+ /* check if mask is contiguous */ \
+ __BF_CHECK_POW2((_mask) + (1ULL << __bf_shf(_mask))) + \
+ /* and create the value */ \
+ (((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask)) \
+ )
+
/**
* FIELD_GET() - extract a bitfield element
* @_mask: shifted mask defining the field's length and position
@@ -105,8 +173,25 @@
*/
#define FIELD_GET(_mask, _reg) \
({ \
- __BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \
- (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
+ __BF_FIELD_CHECK_REG(_mask, _reg, "FIELD_GET: "); \
+ __FIELD_GET(_mask, _reg, "FIELD_GET: "); \
+ })
+
+/**
+ * FIELD_MODIFY() - modify a bitfield element
+ * @_mask: shifted mask defining the field's length and position
+ * @_reg_p: pointer to the memory that should be updated
+ * @_val: value to store in the bitfield
+ *
+ * FIELD_MODIFY() modifies the set of bits in @_reg_p specified by @_mask,
+ * by replacing them with the bitfield value passed in as @_val.
+ */
+#define FIELD_MODIFY(_mask, _reg_p, _val) \
+ ({ \
+ typecheck_pointer(_reg_p); \
+ __BF_FIELD_CHECK(_mask, *(_reg_p), _val, "FIELD_MODIFY: "); \
+ *(_reg_p) &= ~(_mask); \
+ *(_reg_p) |= (((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask)); \
})
extern void __compiletime_error("value doesn't fit into mask")
@@ -125,14 +210,14 @@ static __always_inline u64 field_mask(u64 field)
}
#define field_max(field) ((typeof(field))field_mask(field))
#define ____MAKE_OP(type,base,to,from) \
-static __always_inline __##type type##_encode_bits(base v, base field) \
+static __always_inline __##type __must_check type##_encode_bits(base v, base field) \
{ \
if (__builtin_constant_p(v) && (v & ~field_mask(field))) \
__field_overflow(); \
return to((v & field_mask(field)) * field_multiplier(field)); \
} \
-static __always_inline __##type type##_replace_bits(__##type old, \
- base val, base field) \
+static __always_inline __##type __must_check type##_replace_bits(__##type old, \
+ base val, base field) \
{ \
return (old & ~to(field)) | type##_encode_bits(val, field); \
} \
@@ -141,7 +226,7 @@ static __always_inline void type##p_replace_bits(__##type *p, \
{ \
*p = (*p & ~to(field)) | type##_encode_bits(val, field); \
} \
-static __always_inline base type##_get_bits(__##type v, base field) \
+static __always_inline base __must_check type##_get_bits(__##type v, base field) \
{ \
return (from(v) & field)/field_multiplier(field); \
}
@@ -156,4 +241,62 @@ __MAKE_OP(64)
#undef __MAKE_OP
#undef ____MAKE_OP
+#define __field_prep(mask, val) \
+ ({ \
+ __auto_type __mask = (mask); \
+ typeof(__mask) __val = (val); \
+ unsigned int __shift = BITS_PER_TYPE(__mask) <= 32 ? \
+ __ffs(__mask) : __ffs64(__mask); \
+ (__val << __shift) & __mask; \
+ })
+
+#define __field_get(mask, reg) \
+ ({ \
+ __auto_type __mask = (mask); \
+ typeof(__mask) __reg = (reg); \
+ unsigned int __shift = BITS_PER_TYPE(__mask) <= 32 ? \
+ __ffs(__mask) : __ffs64(__mask); \
+ (__reg & __mask) >> __shift; \
+ })
+
+/**
+ * field_prep() - prepare a bitfield element
+ * @mask: shifted mask defining the field's length and position, must be
+ * non-zero
+ * @val: value to put in the field
+ *
+ * Return: field value masked and shifted to its final destination
+ *
+ * field_prep() masks and shifts up the value. The result should be
+ * combined with other fields of the bitfield using logical OR.
+ * Unlike FIELD_PREP(), @mask is not limited to a compile-time constant.
+ * Typical usage patterns are a value stored in a table, or calculated by
+ * shifting a constant by a variable number of bits.
+ * If you want to ensure that @mask is a compile-time constant, please use
+ * FIELD_PREP() directly instead.
+ */
+#define field_prep(mask, val) \
+ (__builtin_constant_p(mask) ? __FIELD_PREP(mask, val, "field_prep: ") \
+ : __field_prep(mask, val))
+
+/**
+ * field_get() - extract a bitfield element
+ * @mask: shifted mask defining the field's length and position, must be
+ * non-zero
+ * @reg: value of entire bitfield
+ *
+ * Return: extracted field value
+ *
+ * field_get() extracts the field specified by @mask from the
+ * bitfield passed in as @reg by masking and shifting it down.
+ * Unlike FIELD_GET(), @mask is not limited to a compile-time constant.
+ * Typical usage patterns are a value stored in a table, or calculated by
+ * shifting a constant by a variable number of bits.
+ * If you want to ensure that @mask is a compile-time constant, please use
+ * FIELD_GET() directly instead.
+ */
+#define field_get(mask, reg) \
+ (__builtin_constant_p(mask) ? __FIELD_GET(mask, reg, "field_get: ") \
+ : __field_get(mask, reg))
+
#endif
diff --git a/include/linux/bitmap-str.h b/include/linux/bitmap-str.h
new file mode 100644
index 000000000000..53d3e1b32d3d
--- /dev/null
+++ b/include/linux/bitmap-str.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_BITMAP_STR_H
+#define __LINUX_BITMAP_STR_H
+
+#include <linux/types.h>
+
+int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, unsigned long *dst, int nbits);
+int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, int nmaskbits);
+int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp, int nmaskbits,
+ loff_t off, size_t count);
+int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp, int nmaskbits,
+ loff_t off, size_t count);
+int bitmap_parse(const char *buf, unsigned int buflen, unsigned long *dst, int nbits);
+int bitmap_parselist(const char *buf, unsigned long *maskp, int nmaskbits);
+int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen,
+ unsigned long *dst, int nbits);
+
+#endif /* __LINUX_BITMAP_STR_H */
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index a36cfcec4e77..b0395e4ccf90 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -6,9 +6,13 @@
#include <linux/align.h>
#include <linux/bitops.h>
+#include <linux/cleanup.h>
+#include <linux/errno.h>
+#include <linux/find.h>
#include <linux/limits.h>
#include <linux/string.h>
#include <linux/types.h>
+#include <linux/bitmap-str.h>
struct device;
@@ -19,7 +23,7 @@ struct device;
*
* Function implementations generic to all architectures are in
* lib/bitmap.c. Functions implementations that are architecture
- * specific are in various include/asm-<arch>/bitops.h headers
+ * specific are in various arch/<arch>/include/asm/bitops.h headers
* and other arch/<arch> specific files.
*
* See lib/bitmap.c for more details.
@@ -41,6 +45,7 @@ struct device;
* bitmap_copy(dst, src, nbits) *dst = *src
* bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2
* bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2
+ * bitmap_weighted_or(dst, src1, src2, nbits) *dst = *src1 | *src2. Returns Hamming Weight of dst
* bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2
* bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2)
* bitmap_complement(dst, src, nbits) *dst = ~(*src)
@@ -50,20 +55,18 @@ struct device;
* bitmap_empty(src, nbits) Are all bits zero in *src?
* bitmap_full(src, nbits) Are all bits set in *src?
* bitmap_weight(src, nbits) Hamming Weight: number set bits
+ * bitmap_weight_and(src1, src2, nbits) Hamming Weight of and'ed bitmap
+ * bitmap_weight_andnot(src1, src2, nbits) Hamming Weight of andnot'ed bitmap
* bitmap_set(dst, pos, nbits) Set specified bit area
* bitmap_clear(dst, pos, nbits) Clear specified bit area
* bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
* bitmap_find_next_zero_area_off(buf, len, pos, n, mask, mask_off) as above
- * bitmap_next_clear_region(map, &start, &end, nbits) Find next clear region
- * bitmap_next_set_region(map, &start, &end, nbits) Find next set region
- * bitmap_for_each_clear_region(map, rs, re, start, end)
- * Iterate over all clear regions
- * bitmap_for_each_set_region(map, rs, re, start, end)
- * Iterate over all set regions
* bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
* bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
* bitmap_cut(dst, src, first, n, nbits) Cut n bits from first, copy rest
* bitmap_replace(dst, old, new, mask, nbits) *dst = (*old & ~(*mask)) | (*new & *mask)
+ * bitmap_scatter(dst, src, mask, nbits) *dst = map(dense, sparse)(src)
+ * bitmap_gather(dst, src, mask, nbits) *dst = map(sparse, dense)(src)
* bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
* bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit)
* bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap
@@ -76,9 +79,15 @@ struct device;
* bitmap_release_region(bitmap, pos, order) Free specified bit region
* bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
* bitmap_from_arr32(dst, buf, nbits) Copy nbits from u32[] buf to dst
+ * bitmap_from_arr64(dst, buf, nbits) Copy nbits from u64[] buf to dst
* bitmap_to_arr32(buf, src, nbits) Copy nbits from buf to u32[] dst
+ * bitmap_to_arr64(buf, src, nbits) Copy nbits from buf to u64[] dst
* bitmap_get_value8(map, start) Get 8bit value from map at start
* bitmap_set_value8(map, value, start) Set 8bit value to map at start
+ * bitmap_read(map, start, nbits) Read an nbits-sized value from
+ * map at start
+ * bitmap_write(map, value, start, nbits) Write an nbits-sized value to
+ * map at start
*
* Note, bitmap_zero() and bitmap_fill() operate over the region of
* unsigned longs, that is, bits behind bitmap till the unsigned long
@@ -123,8 +132,12 @@ struct device;
*/
unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags);
unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags);
+unsigned long *bitmap_alloc_node(unsigned int nbits, gfp_t flags, int node);
+unsigned long *bitmap_zalloc_node(unsigned int nbits, gfp_t flags, int node);
void bitmap_free(const unsigned long *bitmap);
+DEFINE_FREE(bitmap, unsigned long *, if (_T) bitmap_free(_T))
+
/* Managed variants of the above. */
unsigned long *devm_bitmap_alloc(struct device *dev,
unsigned int nbits, gfp_t flags);
@@ -135,8 +148,8 @@ unsigned long *devm_bitmap_zalloc(struct device *dev,
* lib/bitmap.c provides these functions:
*/
-int __bitmap_equal(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
+bool __bitmap_equal(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
bool __pure __bitmap_or_equal(const unsigned long *src1,
const unsigned long *src2,
const unsigned long *src3,
@@ -149,22 +162,28 @@ void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
unsigned int shift, unsigned int nbits);
void bitmap_cut(unsigned long *dst, const unsigned long *src,
unsigned int first, unsigned int cut, unsigned int nbits);
-int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
+bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
+unsigned int __bitmap_weighted_or(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
-int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
+bool __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
void __bitmap_replace(unsigned long *dst,
const unsigned long *old, const unsigned long *new,
const unsigned long *mask, unsigned int nbits);
-int __bitmap_intersects(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
-int __bitmap_subset(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int nbits);
-int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
+bool __bitmap_intersects(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+bool __bitmap_subset(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
+unsigned int __bitmap_weight_and(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+unsigned int __bitmap_weight_andnot(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
void __bitmap_set(unsigned long *map, unsigned int start, int len);
void __bitmap_clear(unsigned long *map, unsigned int start, int len);
@@ -187,25 +206,17 @@ unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
* the bit offset of all zero areas this function finds is multiples of that
* power of 2. A @align_mask of 0 means no alignment is required.
*/
-static inline unsigned long
-bitmap_find_next_zero_area(unsigned long *map,
- unsigned long size,
- unsigned long start,
- unsigned int nr,
- unsigned long align_mask)
+static __always_inline
+unsigned long bitmap_find_next_zero_area(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr,
+ unsigned long align_mask)
{
return bitmap_find_next_zero_area_off(map, size, start, nr,
align_mask, 0);
}
-int bitmap_parse(const char *buf, unsigned int buflen,
- unsigned long *dst, int nbits);
-int bitmap_parse_user(const char __user *ubuf, unsigned int ulen,
- unsigned long *dst, int nbits);
-int bitmap_parselist(const char *buf, unsigned long *maskp,
- int nmaskbits);
-int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen,
- unsigned long *dst, int nbits);
void bitmap_remap(unsigned long *dst, const unsigned long *src,
const unsigned long *old, const unsigned long *new, unsigned int nbits);
int bitmap_bitremap(int oldbit,
@@ -214,55 +225,73 @@ void bitmap_onto(unsigned long *dst, const unsigned long *orig,
const unsigned long *relmap, unsigned int bits);
void bitmap_fold(unsigned long *dst, const unsigned long *orig,
unsigned int sz, unsigned int nbits);
-int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
-void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
-int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
-
-#ifdef __BIG_ENDIAN
-void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits);
-#else
-#define bitmap_copy_le bitmap_copy
-#endif
-unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, unsigned int nbits);
-int bitmap_print_to_pagebuf(bool list, char *buf,
- const unsigned long *maskp, int nmaskbits);
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
-static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
+#define bitmap_size(nbits) (ALIGN(nbits, BITS_PER_LONG) / BITS_PER_BYTE)
+
+static __always_inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
{
- unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
- memset(dst, 0, len);
+ unsigned int len = bitmap_size(nbits);
+
+ if (small_const_nbits(nbits))
+ *dst = 0;
+ else
+ memset(dst, 0, len);
}
-static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
+static __always_inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
{
- unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
- memset(dst, 0xff, len);
+ unsigned int len = bitmap_size(nbits);
+
+ if (small_const_nbits(nbits))
+ *dst = ~0UL;
+ else
+ memset(dst, 0xff, len);
}
-static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
- unsigned int nbits)
+static __always_inline
+void bitmap_copy(unsigned long *dst, const unsigned long *src, unsigned int nbits)
{
- unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
- memcpy(dst, src, len);
+ unsigned int len = bitmap_size(nbits);
+
+ if (small_const_nbits(nbits))
+ *dst = *src;
+ else
+ memcpy(dst, src, len);
}
/*
* Copy bitmap and clear tail bits in last word.
*/
-static inline void bitmap_copy_clear_tail(unsigned long *dst,
- const unsigned long *src, unsigned int nbits)
+static __always_inline
+void bitmap_copy_clear_tail(unsigned long *dst, const unsigned long *src, unsigned int nbits)
{
bitmap_copy(dst, src, nbits);
if (nbits % BITS_PER_LONG)
dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits);
}
+static inline void bitmap_copy_and_extend(unsigned long *to,
+ const unsigned long *from,
+ unsigned int count, unsigned int size)
+{
+ unsigned int copy = BITS_TO_LONGS(count);
+
+ memcpy(to, from, copy * sizeof(long));
+ if (count % BITS_PER_LONG)
+ to[copy - 1] &= BITMAP_LAST_WORD_MASK(count);
+ memset(to + copy, 0, bitmap_size(size) - copy * sizeof(long));
+}
+
/*
- * On 32-bit systems bitmaps are represented as u32 arrays internally, and
- * therefore conversion is not needed when copying data from/to arrays of u32.
+ * On 32-bit systems bitmaps are represented as u32 arrays internally. On LE64
+ * machines the order of hi and lo parts of numbers match the bitmap structure.
+ * In both cases conversion is not needed when copying data from/to arrays of
+ * u32. But in LE64 case, typecast in bitmap_copy_clear_tail() may lead
+ * to out-of-bound access. To avoid that, both LE and BE variants of 64-bit
+ * architectures are not using bitmap_copy_clear_tail().
*/
#if BITS_PER_LONG == 64
void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf,
@@ -278,16 +307,32 @@ void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap,
(const unsigned long *) (bitmap), (nbits))
#endif
-static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+/*
+ * On 64-bit systems bitmaps are represented as u64 arrays internally. So,
+ * the conversion is not needed when copying data from/to arrays of u64.
+ */
+#if BITS_PER_LONG == 32
+void bitmap_from_arr64(unsigned long *bitmap, const u64 *buf, unsigned int nbits);
+void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits);
+#else
+#define bitmap_from_arr64(bitmap, buf, nbits) \
+ bitmap_copy_clear_tail((unsigned long *)(bitmap), (const unsigned long *)(buf), (nbits))
+#define bitmap_to_arr64(buf, bitmap, nbits) \
+ bitmap_copy_clear_tail((unsigned long *)(buf), (const unsigned long *)(bitmap), (nbits))
+#endif
+
+static __always_inline
+bool bitmap_and(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return (*dst = *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)) != 0;
return __bitmap_and(dst, src1, src2, nbits);
}
-static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static __always_inline
+void bitmap_or(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = *src1 | *src2;
@@ -295,8 +340,21 @@ static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
__bitmap_or(dst, src1, src2, nbits);
}
-static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static __always_inline
+unsigned int bitmap_weighted_or(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
+{
+ if (small_const_nbits(nbits)) {
+ *dst = *src1 | *src2;
+ return hweight_long(*dst & BITMAP_LAST_WORD_MASK(nbits));
+ } else {
+ return __bitmap_weighted_or(dst, src1, src2, nbits);
+ }
+}
+
+static __always_inline
+void bitmap_xor(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = *src1 ^ *src2;
@@ -304,16 +362,17 @@ static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
__bitmap_xor(dst, src1, src2, nbits);
}
-static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static __always_inline
+bool bitmap_andnot(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return (*dst = *src1 & ~(*src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
return __bitmap_andnot(dst, src1, src2, nbits);
}
-static inline void bitmap_complement(unsigned long *dst, const unsigned long *src,
- unsigned int nbits)
+static __always_inline
+void bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = ~(*src);
@@ -328,8 +387,8 @@ static inline void bitmap_complement(unsigned long *dst, const unsigned long *sr
#endif
#define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1)
-static inline int bitmap_equal(const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static __always_inline
+bool bitmap_equal(const unsigned long *src1, const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
@@ -348,10 +407,9 @@ static inline int bitmap_equal(const unsigned long *src1,
*
* Returns: True if (*@src1 | *@src2) == *@src3, false otherwise
*/
-static inline bool bitmap_or_equal(const unsigned long *src1,
- const unsigned long *src2,
- const unsigned long *src3,
- unsigned int nbits)
+static __always_inline
+bool bitmap_or_equal(const unsigned long *src1, const unsigned long *src2,
+ const unsigned long *src3, unsigned int nbits)
{
if (!small_const_nbits(nbits))
return __bitmap_or_equal(src1, src2, src3, nbits);
@@ -359,8 +417,8 @@ static inline bool bitmap_or_equal(const unsigned long *src1,
return !(((*src1 | *src2) ^ *src3) & BITMAP_LAST_WORD_MASK(nbits));
}
-static inline int bitmap_intersects(const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static __always_inline
+bool bitmap_intersects(const unsigned long *src1, const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
@@ -368,8 +426,8 @@ static inline int bitmap_intersects(const unsigned long *src1,
return __bitmap_intersects(src1, src2, nbits);
}
-static inline int bitmap_subset(const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static __always_inline
+bool bitmap_subset(const unsigned long *src1, const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits));
@@ -377,7 +435,8 @@ static inline int bitmap_subset(const unsigned long *src1,
return __bitmap_subset(src1, src2, nbits);
}
-static inline bool bitmap_empty(const unsigned long *src, unsigned nbits)
+static __always_inline
+bool bitmap_empty(const unsigned long *src, unsigned nbits)
{
if (small_const_nbits(nbits))
return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
@@ -385,7 +444,8 @@ static inline bool bitmap_empty(const unsigned long *src, unsigned nbits)
return find_first_bit(src, nbits) == nbits;
}
-static inline bool bitmap_full(const unsigned long *src, unsigned int nbits)
+static __always_inline
+bool bitmap_full(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
@@ -393,18 +453,39 @@ static inline bool bitmap_full(const unsigned long *src, unsigned int nbits)
return find_first_zero_bit(src, nbits) == nbits;
}
-static __always_inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
+static __always_inline
+unsigned int bitmap_weight(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
return __bitmap_weight(src, nbits);
}
-static __always_inline void bitmap_set(unsigned long *map, unsigned int start,
- unsigned int nbits)
+static __always_inline
+unsigned long bitmap_weight_and(const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
+{
+ if (small_const_nbits(nbits))
+ return hweight_long(*src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits));
+ return __bitmap_weight_and(src1, src2, nbits);
+}
+
+static __always_inline
+unsigned long bitmap_weight_andnot(const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
+{
+ if (small_const_nbits(nbits))
+ return hweight_long(*src1 & ~(*src2) & BITMAP_LAST_WORD_MASK(nbits));
+ return __bitmap_weight_andnot(src1, src2, nbits);
+}
+
+static __always_inline
+void bitmap_set(unsigned long *map, unsigned int start, unsigned int nbits)
{
if (__builtin_constant_p(nbits) && nbits == 1)
__set_bit(start, map);
+ else if (small_const_nbits(start + nbits))
+ *map |= GENMASK(start + nbits - 1, start);
else if (__builtin_constant_p(start & BITMAP_MEM_MASK) &&
IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) &&
__builtin_constant_p(nbits & BITMAP_MEM_MASK) &&
@@ -414,11 +495,13 @@ static __always_inline void bitmap_set(unsigned long *map, unsigned int start,
__bitmap_set(map, start, nbits);
}
-static __always_inline void bitmap_clear(unsigned long *map, unsigned int start,
- unsigned int nbits)
+static __always_inline
+void bitmap_clear(unsigned long *map, unsigned int start, unsigned int nbits)
{
if (__builtin_constant_p(nbits) && nbits == 1)
__clear_bit(start, map);
+ else if (small_const_nbits(start + nbits))
+ *map &= ~GENMASK(start + nbits - 1, start);
else if (__builtin_constant_p(start & BITMAP_MEM_MASK) &&
IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) &&
__builtin_constant_p(nbits & BITMAP_MEM_MASK) &&
@@ -428,8 +511,9 @@ static __always_inline void bitmap_clear(unsigned long *map, unsigned int start,
__bitmap_clear(map, start, nbits);
}
-static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
- unsigned int shift, unsigned int nbits)
+static __always_inline
+void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift;
@@ -437,8 +521,9 @@ static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *s
__bitmap_shift_right(dst, src, shift, nbits);
}
-static inline void bitmap_shift_left(unsigned long *dst, const unsigned long *src,
- unsigned int shift, unsigned int nbits)
+static __always_inline
+void bitmap_shift_left(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = (*src << shift) & BITMAP_LAST_WORD_MASK(nbits);
@@ -446,11 +531,12 @@ static inline void bitmap_shift_left(unsigned long *dst, const unsigned long *sr
__bitmap_shift_left(dst, src, shift, nbits);
}
-static inline void bitmap_replace(unsigned long *dst,
- const unsigned long *old,
- const unsigned long *new,
- const unsigned long *mask,
- unsigned int nbits)
+static __always_inline
+void bitmap_replace(unsigned long *dst,
+ const unsigned long *old,
+ const unsigned long *new,
+ const unsigned long *mask,
+ unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = (*old & ~(*mask)) | (*new & *mask);
@@ -458,40 +544,181 @@ static inline void bitmap_replace(unsigned long *dst,
__bitmap_replace(dst, old, new, mask, nbits);
}
-static inline void bitmap_next_clear_region(unsigned long *bitmap,
- unsigned int *rs, unsigned int *re,
- unsigned int end)
+/**
+ * bitmap_scatter - Scatter a bitmap according to the given mask
+ * @dst: scattered bitmap
+ * @src: gathered bitmap
+ * @mask: mask representing bits to assign to in the scattered bitmap
+ * @nbits: number of bits in each of these bitmaps
+ *
+ * Scatters bitmap with sequential bits according to the given @mask.
+ *
+ * Example:
+ * If @src bitmap = 0x005a, with @mask = 0x1313, @dst will be 0x0302.
+ *
+ * Or in binary form
+ * @src @mask @dst
+ * 0000000001011010 0001001100010011 0000001100000010
+ *
+ * (Bits 0, 1, 2, 3, 4, 5 are copied to the bits 0, 1, 4, 8, 9, 12)
+ *
+ * A more 'visual' description of the operation::
+ *
+ * src: 0000000001011010
+ * ||||||
+ * +------+|||||
+ * | +----+||||
+ * | |+----+|||
+ * | || +-+||
+ * | || | ||
+ * mask: ...v..vv...v..vv
+ * ...0..11...0..10
+ * dst: 0000001100000010
+ *
+ * A relationship exists between bitmap_scatter() and bitmap_gather(). See
+ * bitmap_gather() for the bitmap gather detailed operations. TL;DR:
+ * bitmap_gather() can be seen as the 'reverse' bitmap_scatter() operation.
+ */
+static __always_inline
+void bitmap_scatter(unsigned long *dst, const unsigned long *src,
+ const unsigned long *mask, unsigned int nbits)
+{
+ unsigned int n = 0;
+ unsigned int bit;
+
+ bitmap_zero(dst, nbits);
+
+ for_each_set_bit(bit, mask, nbits)
+ __assign_bit(bit, dst, test_bit(n++, src));
+}
+
+/**
+ * bitmap_gather - Gather a bitmap according to given mask
+ * @dst: gathered bitmap
+ * @src: scattered bitmap
+ * @mask: mask representing bits to extract from in the scattered bitmap
+ * @nbits: number of bits in each of these bitmaps
+ *
+ * Gathers bitmap with sparse bits according to the given @mask.
+ *
+ * Example:
+ * If @src bitmap = 0x0302, with @mask = 0x1313, @dst will be 0x001a.
+ *
+ * Or in binary form
+ * @src @mask @dst
+ * 0000001100000010 0001001100010011 0000000000011010
+ *
+ * (Bits 0, 1, 4, 8, 9, 12 are copied to the bits 0, 1, 2, 3, 4, 5)
+ *
+ * A more 'visual' description of the operation::
+ *
+ * mask: ...v..vv...v..vv
+ * src: 0000001100000010
+ * ^ ^^ ^ 0
+ * | || | 10
+ * | || > 010
+ * | |+--> 1010
+ * | +--> 11010
+ * +----> 011010
+ * dst: 0000000000011010
+ *
+ * A relationship exists between bitmap_gather() and bitmap_scatter(). See
+ * bitmap_scatter() for the bitmap scatter detailed operations. TL;DR:
+ * bitmap_scatter() can be seen as the 'reverse' bitmap_gather() operation.
+ *
+ * Suppose scattered computed using bitmap_scatter(scattered, src, mask, n).
+ * The operation bitmap_gather(result, scattered, mask, n) leads to a result
+ * equal or equivalent to src.
+ *
+ * The result can be 'equivalent' because bitmap_scatter() and bitmap_gather()
+ * are not bijective.
+ * The result and src values are equivalent in that sense that a call to
+ * bitmap_scatter(res, src, mask, n) and a call to
+ * bitmap_scatter(res, result, mask, n) will lead to the same res value.
+ */
+static __always_inline
+void bitmap_gather(unsigned long *dst, const unsigned long *src,
+ const unsigned long *mask, unsigned int nbits)
{
- *rs = find_next_zero_bit(bitmap, end, *rs);
- *re = find_next_bit(bitmap, end, *rs + 1);
+ unsigned int n = 0;
+ unsigned int bit;
+
+ bitmap_zero(dst, nbits);
+
+ for_each_set_bit(bit, mask, nbits)
+ __assign_bit(n++, dst, test_bit(bit, src));
}
-static inline void bitmap_next_set_region(unsigned long *bitmap,
- unsigned int *rs, unsigned int *re,
- unsigned int end)
+static __always_inline
+void bitmap_next_set_region(unsigned long *bitmap, unsigned int *rs,
+ unsigned int *re, unsigned int end)
{
*rs = find_next_bit(bitmap, end, *rs);
*re = find_next_zero_bit(bitmap, end, *rs + 1);
}
-/*
- * Bitmap region iterators. Iterates over the bitmap between [@start, @end).
- * @rs and @re should be integer variables and will be set to start and end
- * index of the current clear or set region.
+/**
+ * bitmap_release_region - release allocated bitmap region
+ * @bitmap: array of unsigned longs corresponding to the bitmap
+ * @pos: beginning of bit region to release
+ * @order: region size (log base 2 of number of bits) to release
+ *
+ * This is the complement to __bitmap_find_free_region() and releases
+ * the found region (by clearing it in the bitmap).
*/
-#define bitmap_for_each_clear_region(bitmap, rs, re, start, end) \
- for ((rs) = (start), \
- bitmap_next_clear_region((bitmap), &(rs), &(re), (end)); \
- (rs) < (re); \
- (rs) = (re) + 1, \
- bitmap_next_clear_region((bitmap), &(rs), &(re), (end)))
-
-#define bitmap_for_each_set_region(bitmap, rs, re, start, end) \
- for ((rs) = (start), \
- bitmap_next_set_region((bitmap), &(rs), &(re), (end)); \
- (rs) < (re); \
- (rs) = (re) + 1, \
- bitmap_next_set_region((bitmap), &(rs), &(re), (end)))
+static __always_inline
+void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order)
+{
+ bitmap_clear(bitmap, pos, BIT(order));
+}
+
+/**
+ * bitmap_allocate_region - allocate bitmap region
+ * @bitmap: array of unsigned longs corresponding to the bitmap
+ * @pos: beginning of bit region to allocate
+ * @order: region size (log base 2 of number of bits) to allocate
+ *
+ * Allocate (set bits in) a specified region of a bitmap.
+ *
+ * Returns: 0 on success, or %-EBUSY if specified region wasn't
+ * free (not all bits were zero).
+ */
+static __always_inline
+int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order)
+{
+ unsigned int len = BIT(order);
+
+ if (find_next_bit(bitmap, pos + len, pos) < pos + len)
+ return -EBUSY;
+ bitmap_set(bitmap, pos, len);
+ return 0;
+}
+
+/**
+ * bitmap_find_free_region - find a contiguous aligned mem region
+ * @bitmap: array of unsigned longs corresponding to the bitmap
+ * @bits: number of bits in the bitmap
+ * @order: region size (log base 2 of number of bits) to find
+ *
+ * Find a region of free (zero) bits in a @bitmap of @bits bits and
+ * allocate them (set them to one). Only consider regions of length
+ * a power (@order) of two, aligned to that power of two, which
+ * makes the search algorithm much faster.
+ *
+ * Returns: the bit offset in bitmap of the allocated region,
+ * or -errno on failure.
+ */
+static __always_inline
+int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order)
+{
+ unsigned int pos, end; /* scans bitmap by regions of size order */
+
+ for (pos = 0; (end = pos + BIT(order)) <= bits; pos = end) {
+ if (!bitmap_allocate_region(bitmap, pos, order))
+ return pos;
+ }
+ return -ENOMEM;
+}
/**
* BITMAP_FROM_U64() - Represent u64 value in the format suitable for bitmap.
@@ -536,47 +763,89 @@ static inline void bitmap_next_set_region(unsigned long *bitmap,
* That is ``(u32 *)(&val)[0]`` gets the upper 32 bits,
* but we expect the lower 32-bits of u64.
*/
-static inline void bitmap_from_u64(unsigned long *dst, u64 mask)
+static __always_inline void bitmap_from_u64(unsigned long *dst, u64 mask)
{
- dst[0] = mask & ULONG_MAX;
-
- if (sizeof(mask) > sizeof(unsigned long))
- dst[1] = mask >> 32;
+ bitmap_from_arr64(dst, &mask, 64);
}
/**
- * bitmap_get_value8 - get an 8-bit value within a memory region
+ * bitmap_read - read a value of n-bits from the memory region
* @map: address to the bitmap memory region
- * @start: bit offset of the 8-bit value; must be a multiple of 8
+ * @start: bit offset of the n-bit value
+ * @nbits: size of value in bits, nonzero, up to BITS_PER_LONG
*
- * Returns the 8-bit value located at the @start bit offset within the @src
- * memory region.
+ * Returns: value of @nbits bits located at the @start bit offset within the
+ * @map memory region. For @nbits = 0 and @nbits > BITS_PER_LONG the return
+ * value is undefined.
*/
-static inline unsigned long bitmap_get_value8(const unsigned long *map,
- unsigned long start)
+static __always_inline
+unsigned long bitmap_read(const unsigned long *map, unsigned long start, unsigned long nbits)
{
- const size_t index = BIT_WORD(start);
- const unsigned long offset = start % BITS_PER_LONG;
+ size_t index = BIT_WORD(start);
+ unsigned long offset = start % BITS_PER_LONG;
+ unsigned long space = BITS_PER_LONG - offset;
+ unsigned long value_low, value_high;
- return (map[index] >> offset) & 0xFF;
+ if (unlikely(!nbits || nbits > BITS_PER_LONG))
+ return 0;
+
+ if (space >= nbits)
+ return (map[index] >> offset) & BITMAP_LAST_WORD_MASK(nbits);
+
+ value_low = map[index] & BITMAP_FIRST_WORD_MASK(start);
+ value_high = map[index + 1] & BITMAP_LAST_WORD_MASK(start + nbits);
+ return (value_low >> offset) | (value_high << space);
}
/**
- * bitmap_set_value8 - set an 8-bit value within a memory region
+ * bitmap_write - write n-bit value within a memory region
* @map: address to the bitmap memory region
- * @value: the 8-bit value; values wider than 8 bits may clobber bitmap
- * @start: bit offset of the 8-bit value; must be a multiple of 8
+ * @value: value to write, clamped to nbits
+ * @start: bit offset of the n-bit value
+ * @nbits: size of value in bits, nonzero, up to BITS_PER_LONG.
+ *
+ * bitmap_write() behaves as-if implemented as @nbits calls of __assign_bit(),
+ * i.e. bits beyond @nbits are ignored:
+ *
+ * for (bit = 0; bit < nbits; bit++)
+ * __assign_bit(start + bit, bitmap, val & BIT(bit));
+ *
+ * For @nbits == 0 and @nbits > BITS_PER_LONG no writes are performed.
*/
-static inline void bitmap_set_value8(unsigned long *map, unsigned long value,
- unsigned long start)
+static __always_inline
+void bitmap_write(unsigned long *map, unsigned long value,
+ unsigned long start, unsigned long nbits)
{
- const size_t index = BIT_WORD(start);
- const unsigned long offset = start % BITS_PER_LONG;
-
- map[index] &= ~(0xFFUL << offset);
+ size_t index;
+ unsigned long offset;
+ unsigned long space;
+ unsigned long mask;
+ bool fit;
+
+ if (unlikely(!nbits || nbits > BITS_PER_LONG))
+ return;
+
+ mask = BITMAP_LAST_WORD_MASK(nbits);
+ value &= mask;
+ offset = start % BITS_PER_LONG;
+ space = BITS_PER_LONG - offset;
+ fit = space >= nbits;
+ index = BIT_WORD(start);
+
+ map[index] &= (fit ? (~(mask << offset)) : ~BITMAP_FIRST_WORD_MASK(start));
map[index] |= value << offset;
+ if (fit)
+ return;
+
+ map[index + 1] &= BITMAP_FIRST_WORD_MASK(start + nbits);
+ map[index + 1] |= (value >> space);
}
+#define bitmap_get_value8(map, start) \
+ bitmap_read(map, start, BITS_PER_BYTE)
+#define bitmap_set_value8(map, value, start) \
+ bitmap_write(map, value, start, BITS_PER_BYTE)
+
#endif /* __ASSEMBLY__ */
#endif /* __LINUX_BITMAP_H */
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 26bf15e6cd35..ea7898cc5903 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -4,66 +4,84 @@
#include <asm/types.h>
#include <linux/bits.h>
+#include <linux/typecheck.h>
#include <uapi/linux/kernel.h>
-/* Set bits in the first 'n' bytes when loaded from memory */
-#ifdef __LITTLE_ENDIAN
-# define aligned_byte_mask(n) ((1UL << 8*(n))-1)
-#else
-# define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n)))
-#endif
-
-#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
#define BITS_TO_LONGS(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
#define BITS_TO_U64(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u64))
#define BITS_TO_U32(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32))
#define BITS_TO_BYTES(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(char))
+#define BYTES_TO_BITS(nb) ((nb) * BITS_PER_BYTE)
+
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);
extern unsigned int __sw_hweight32(unsigned int w);
extern unsigned long __sw_hweight64(__u64 w);
/*
- * Include this here because some architectures need generic_ffs/fls in
- * scope
+ * Defined here because those may be needed by architecture-specific static
+ * inlines.
*/
-#include <asm/bitops.h>
-#define for_each_set_bit(bit, addr, size) \
- for ((bit) = find_first_bit((addr), (size)); \
- (bit) < (size); \
- (bit) = find_next_bit((addr), (size), (bit) + 1))
+#include <asm-generic/bitops/generic-non-atomic.h>
-/* same as for_each_set_bit() but use bit as value to start with */
-#define for_each_set_bit_from(bit, addr, size) \
- for ((bit) = find_next_bit((addr), (size), (bit)); \
- (bit) < (size); \
- (bit) = find_next_bit((addr), (size), (bit) + 1))
+/*
+ * Many architecture-specific non-atomic bitops contain inline asm code and due
+ * to that the compiler can't optimize them to compile-time expressions or
+ * constants. In contrary, generic_*() helpers are defined in pure C and
+ * compilers optimize them just well.
+ * Therefore, to make `unsigned long foo = 0; __set_bit(BAR, &foo)` effectively
+ * equal to `unsigned long foo = BIT(BAR)`, pick the generic C alternative when
+ * the arguments can be resolved at compile time. That expression itself is a
+ * constant and doesn't bring any functional changes to the rest of cases.
+ * The casts to `uintptr_t` are needed to mitigate `-Waddress` warnings when
+ * passing a bitmap from .bss or .data (-> `!!addr` is always true).
+ */
+#define bitop(op, nr, addr) \
+ ((__builtin_constant_p(nr) && \
+ __builtin_constant_p((uintptr_t)(addr) != (uintptr_t)NULL) && \
+ (uintptr_t)(addr) != (uintptr_t)NULL && \
+ __builtin_constant_p(*(const unsigned long *)(addr))) ? \
+ const##op(nr, addr) : op(nr, addr))
-#define for_each_clear_bit(bit, addr, size) \
- for ((bit) = find_first_zero_bit((addr), (size)); \
- (bit) < (size); \
- (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
+/*
+ * The following macros are non-atomic versions of their non-underscored
+ * counterparts.
+ */
+#define __set_bit(nr, addr) bitop(___set_bit, nr, addr)
+#define __clear_bit(nr, addr) bitop(___clear_bit, nr, addr)
+#define __change_bit(nr, addr) bitop(___change_bit, nr, addr)
+#define __test_and_set_bit(nr, addr) bitop(___test_and_set_bit, nr, addr)
+#define __test_and_clear_bit(nr, addr) bitop(___test_and_clear_bit, nr, addr)
+#define __test_and_change_bit(nr, addr) bitop(___test_and_change_bit, nr, addr)
-/* same as for_each_clear_bit() but use bit as value to start with */
-#define for_each_clear_bit_from(bit, addr, size) \
- for ((bit) = find_next_zero_bit((addr), (size), (bit)); \
- (bit) < (size); \
- (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
+#define test_bit(nr, addr) bitop(_test_bit, nr, addr)
+#define test_bit_acquire(nr, addr) bitop(_test_bit_acquire, nr, addr)
-/**
- * for_each_set_clump8 - iterate over bitmap for each 8-bit clump with set bits
- * @start: bit offset to start search and to store the current iteration offset
- * @clump: location to store copy of current 8-bit clump
- * @bits: bitmap address to base the search on
- * @size: bitmap size in number of bits
+/*
+ * Include this here because some architectures need generic_ffs/fls in
+ * scope
*/
-#define for_each_set_clump8(start, clump, bits, size) \
- for ((start) = find_first_clump8(&(clump), (bits), (size)); \
- (start) < (size); \
- (start) = find_next_clump8(&(clump), (bits), (size), (start) + 8))
+#include <asm/bitops.h>
+
+/* Check that the bitops prototypes are sane */
+#define __check_bitop_pr(name) \
+ static_assert(__same_type(arch_##name, generic_##name) && \
+ __same_type(const_##name, generic_##name) && \
+ __same_type(_##name, generic_##name))
+
+__check_bitop_pr(__set_bit);
+__check_bitop_pr(__clear_bit);
+__check_bitop_pr(__change_bit);
+__check_bitop_pr(__test_and_set_bit);
+__check_bitop_pr(__test_and_clear_bit);
+__check_bitop_pr(__test_and_change_bit);
+__check_bitop_pr(test_bit);
+__check_bitop_pr(test_bit_acquire);
+
+#undef __check_bitop_pr
static inline int get_bitmask_order(unsigned int count)
{
@@ -182,7 +200,7 @@ static __always_inline __s64 sign_extend64(__u64 value, int index)
return (__s64)(value << shift) >> shift;
}
-static inline unsigned fls_long(unsigned long l)
+static inline unsigned int fls_long(unsigned long l)
{
if (sizeof(l) == 4)
return fls(l);
@@ -211,6 +229,37 @@ static inline int get_count_order_long(unsigned long l)
}
/**
+ * parity8 - get the parity of an u8 value
+ * @value: the value to be examined
+ *
+ * Determine the parity of the u8 argument.
+ *
+ * Returns:
+ * 0 for even parity, 1 for odd parity
+ *
+ * Note: This function informs you about the current parity. Example to bail
+ * out when parity is odd:
+ *
+ * if (parity8(val) == 1)
+ * return -EBADMSG;
+ *
+ * If you need to calculate a parity bit, you need to draw the conclusion from
+ * this result yourself. Example to enforce odd parity, parity bit is bit 7:
+ *
+ * if (parity8(val) == 0)
+ * val ^= BIT(7);
+ */
+static inline int parity8(u8 val)
+{
+ /*
+ * One explanation of this algorithm:
+ * https://funloop.org/codex/problem/parity/README.html
+ */
+ val ^= val >> 4;
+ return (0x6996 >> (val & 0xf)) & 1;
+}
+
+/**
* __ffs64 - find first set bit in a 64 bit word
* @word: The 64 bit word
*
@@ -218,7 +267,7 @@ static inline int get_count_order_long(unsigned long l)
* The result is not defined if no bits are set, so check that @word
* is non-zero before calling this.
*/
-static inline unsigned long __ffs64(u64 word)
+static inline __attribute_const__ unsigned int __ffs64(u64 word)
{
#if BITS_PER_LONG == 32
if (((u32)word) == 0UL)
@@ -230,28 +279,78 @@ static inline unsigned long __ffs64(u64 word)
}
/**
+ * fns - find N'th set bit in a word
+ * @word: The word to search
+ * @n: Bit to find
+ */
+static inline unsigned int fns(unsigned long word, unsigned int n)
+{
+ while (word && n--)
+ word &= word - 1;
+
+ return word ? __ffs(word) : BITS_PER_LONG;
+}
+
+/**
* assign_bit - Assign value to a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
* @value: the value to assign
*/
-static __always_inline void assign_bit(long nr, volatile unsigned long *addr,
- bool value)
-{
- if (value)
- set_bit(nr, addr);
- else
- clear_bit(nr, addr);
-}
+#define assign_bit(nr, addr, value) \
+ ((value) ? set_bit((nr), (addr)) : clear_bit((nr), (addr)))
-static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
- bool value)
-{
- if (value)
- __set_bit(nr, addr);
- else
- __clear_bit(nr, addr);
-}
+#define __assign_bit(nr, addr, value) \
+ ((value) ? __set_bit((nr), (addr)) : __clear_bit((nr), (addr)))
+
+/**
+ * __ptr_set_bit - Set bit in a pointer's value
+ * @nr: the bit to set
+ * @addr: the address of the pointer variable
+ *
+ * Example:
+ * void *p = foo();
+ * __ptr_set_bit(bit, &p);
+ */
+#define __ptr_set_bit(nr, addr) \
+ ({ \
+ typecheck_pointer(*(addr)); \
+ __set_bit(nr, (unsigned long *)(addr)); \
+ })
+
+/**
+ * __ptr_clear_bit - Clear bit in a pointer's value
+ * @nr: the bit to clear
+ * @addr: the address of the pointer variable
+ *
+ * Example:
+ * void *p = foo();
+ * __ptr_clear_bit(bit, &p);
+ */
+#define __ptr_clear_bit(nr, addr) \
+ ({ \
+ typecheck_pointer(*(addr)); \
+ __clear_bit(nr, (unsigned long *)(addr)); \
+ })
+
+/**
+ * __ptr_test_bit - Test bit in a pointer's value
+ * @nr: the bit to test
+ * @addr: the address of the pointer variable
+ *
+ * Example:
+ * void *p = foo();
+ * if (__ptr_test_bit(bit, &p)) {
+ * ...
+ * } else {
+ * ...
+ * }
+ */
+#define __ptr_test_bit(nr, addr) \
+ ({ \
+ typecheck_pointer(*(addr)); \
+ test_bit(nr, (unsigned long *)(addr)); \
+ })
#ifdef __KERNEL__
@@ -261,10 +360,10 @@ static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
const typeof(*(ptr)) mask__ = (mask), bits__ = (bits); \
typeof(*(ptr)) old__, new__; \
\
+ old__ = READ_ONCE(*(ptr)); \
do { \
- old__ = READ_ONCE(*(ptr)); \
new__ = (old__ & ~mask__) | bits__; \
- } while (cmpxchg(ptr, old__, new__) != old__); \
+ } while (!try_cmpxchg(ptr, &old__, new__)); \
\
old__; \
})
@@ -276,11 +375,12 @@ static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
const typeof(*(ptr)) clear__ = (clear), test__ = (test);\
typeof(*(ptr)) old__, new__; \
\
+ old__ = READ_ONCE(*(ptr)); \
do { \
- old__ = READ_ONCE(*(ptr)); \
+ if (old__ & test__) \
+ break; \
new__ = old__ & ~clear__; \
- } while (!(old__ & test__) && \
- cmpxchg(ptr, old__, new__) != old__); \
+ } while (!try_cmpxchg(ptr, &old__, new__)); \
\
!(old__ & test__); \
})
diff --git a/include/linux/bits.h b/include/linux/bits.h
index 7f475d59a097..a40cc861b3a7 100644
--- a/include/linux/bits.h
+++ b/include/linux/bits.h
@@ -2,16 +2,15 @@
#ifndef __LINUX_BITS_H
#define __LINUX_BITS_H
-#include <linux/const.h>
#include <vdso/bits.h>
-#include <asm/bitsperlong.h>
+#include <uapi/linux/bits.h>
-#define BIT_ULL(nr) (ULL(1) << (nr))
#define BIT_MASK(nr) (UL(1) << ((nr) % BITS_PER_LONG))
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
#define BIT_ULL_MASK(nr) (ULL(1) << ((nr) % BITS_PER_LONG_LONG))
#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
#define BITS_PER_BYTE 8
+#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
/*
* Create a contiguous bitmask starting at bit position @l and ending at
@@ -19,28 +18,72 @@
* GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
*/
#if !defined(__ASSEMBLY__)
+
+/*
+ * Missing asm support
+ *
+ * GENMASK_U*() and BIT_U*() depend on BITS_PER_TYPE() which relies on sizeof(),
+ * something not available in asm. Nevertheless, fixed width integers is a C
+ * concept. Assembly code can rely on the long and long long versions instead.
+ */
+
#include <linux/build_bug.h>
-#define GENMASK_INPUT_CHECK(h, l) \
- (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
- __builtin_constant_p((l) > (h)), (l) > (h), 0)))
-#else
+#include <linux/compiler.h>
+#include <linux/overflow.h>
+
+#define GENMASK_INPUT_CHECK(h, l) BUILD_BUG_ON_ZERO(const_true((l) > (h)))
+
+/*
+ * Generate a mask for the specified type @t. Additional checks are made to
+ * guarantee the value returned fits in that type, relying on
+ * -Wshift-count-overflow compiler check to detect incompatible arguments.
+ * For example, all these create build errors or warnings:
+ *
+ * - GENMASK(15, 20): wrong argument order
+ * - GENMASK(72, 15): doesn't fit unsigned long
+ * - GENMASK_U32(33, 15): doesn't fit in a u32
+ */
+#define GENMASK_TYPE(t, h, l) \
+ ((t)(GENMASK_INPUT_CHECK(h, l) + \
+ (type_max(t) << (l) & \
+ type_max(t) >> (BITS_PER_TYPE(t) - 1 - (h)))))
+
+#define GENMASK(h, l) GENMASK_TYPE(unsigned long, h, l)
+#define GENMASK_ULL(h, l) GENMASK_TYPE(unsigned long long, h, l)
+
+#define GENMASK_U8(h, l) GENMASK_TYPE(u8, h, l)
+#define GENMASK_U16(h, l) GENMASK_TYPE(u16, h, l)
+#define GENMASK_U32(h, l) GENMASK_TYPE(u32, h, l)
+#define GENMASK_U64(h, l) GENMASK_TYPE(u64, h, l)
+#define GENMASK_U128(h, l) GENMASK_TYPE(u128, h, l)
+
+/*
+ * Fixed-type variants of BIT(), with additional checks like GENMASK_TYPE(). The
+ * following examples generate compiler warnings due to -Wshift-count-overflow:
+ *
+ * - BIT_U8(8)
+ * - BIT_U32(-1)
+ * - BIT_U32(40)
+ */
+#define BIT_INPUT_CHECK(type, nr) \
+ BUILD_BUG_ON_ZERO(const_true((nr) >= BITS_PER_TYPE(type)))
+
+#define BIT_TYPE(type, nr) ((type)(BIT_INPUT_CHECK(type, nr) + BIT_ULL(nr)))
+
+#define BIT_U8(nr) BIT_TYPE(u8, nr)
+#define BIT_U16(nr) BIT_TYPE(u16, nr)
+#define BIT_U32(nr) BIT_TYPE(u32, nr)
+#define BIT_U64(nr) BIT_TYPE(u64, nr)
+
+#else /* defined(__ASSEMBLY__) */
+
/*
* BUILD_BUG_ON_ZERO is not available in h files included from asm files,
* disable the input check if that is the case.
*/
-#define GENMASK_INPUT_CHECK(h, l) 0
-#endif
-
-#define __GENMASK(h, l) \
- (((~UL(0)) - (UL(1) << (l)) + 1) & \
- (~UL(0) >> (BITS_PER_LONG - 1 - (h))))
-#define GENMASK(h, l) \
- (GENMASK_INPUT_CHECK(h, l) + __GENMASK(h, l))
-
-#define __GENMASK_ULL(h, l) \
- (((~ULL(0)) - (ULL(1) << (l)) + 1) & \
- (~ULL(0) >> (BITS_PER_LONG_LONG - 1 - (h))))
-#define GENMASK_ULL(h, l) \
- (GENMASK_INPUT_CHECK(h, l) + __GENMASK_ULL(h, l))
+#define GENMASK(h, l) __GENMASK(h, l)
+#define GENMASK_ULL(h, l) __GENMASK_ULL(h, l)
+
+#endif /* !defined(__ASSEMBLY__) */
#endif /* __LINUX_BITS_H */
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index b9f3c246c3c9..dd5841a42c33 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -14,650 +14,38 @@
* Nauman Rafique <nauman@google.com>
*/
-#include <linux/cgroup.h>
-#include <linux/percpu.h>
-#include <linux/percpu_counter.h>
-#include <linux/u64_stats_sync.h>
-#include <linux/seq_file.h>
-#include <linux/radix-tree.h>
-#include <linux/blkdev.h>
-#include <linux/atomic.h>
-#include <linux/kthread.h>
-#include <linux/fs.h>
+#include <linux/types.h>
-/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
-#define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
+struct bio;
+struct cgroup_subsys_state;
+struct gendisk;
-/* Max limits for throttle policy */
-#define THROTL_IOPS_MAX UINT_MAX
+#define FC_APPID_LEN 129
#ifdef CONFIG_BLK_CGROUP
-
-enum blkg_iostat_type {
- BLKG_IOSTAT_READ,
- BLKG_IOSTAT_WRITE,
- BLKG_IOSTAT_DISCARD,
-
- BLKG_IOSTAT_NR,
-};
-
-struct blkcg_gq;
-
-struct blkcg {
- struct cgroup_subsys_state css;
- spinlock_t lock;
- refcount_t online_pin;
-
- struct radix_tree_root blkg_tree;
- struct blkcg_gq __rcu *blkg_hint;
- struct hlist_head blkg_list;
-
- struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
-
- struct list_head all_blkcgs_node;
-#ifdef CONFIG_CGROUP_WRITEBACK
- struct list_head cgwb_list;
-#endif
-};
-
-struct blkg_iostat {
- u64 bytes[BLKG_IOSTAT_NR];
- u64 ios[BLKG_IOSTAT_NR];
-};
-
-struct blkg_iostat_set {
- struct u64_stats_sync sync;
- struct blkg_iostat cur;
- struct blkg_iostat last;
-};
-
-/*
- * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
- * request_queue (q). This is used by blkcg policies which need to track
- * information per blkcg - q pair.
- *
- * There can be multiple active blkcg policies and each blkg:policy pair is
- * represented by a blkg_policy_data which is allocated and freed by each
- * policy's pd_alloc/free_fn() methods. A policy can allocate private data
- * area by allocating larger data structure which embeds blkg_policy_data
- * at the beginning.
- */
-struct blkg_policy_data {
- /* the blkg and policy id this per-policy data belongs to */
- struct blkcg_gq *blkg;
- int plid;
-};
-
-/*
- * Policies that need to keep per-blkcg data which is independent from any
- * request_queue associated to it should implement cpd_alloc/free_fn()
- * methods. A policy can allocate private data area by allocating larger
- * data structure which embeds blkcg_policy_data at the beginning.
- * cpd_init() is invoked to let each policy handle per-blkcg data.
- */
-struct blkcg_policy_data {
- /* the blkcg and policy id this per-policy data belongs to */
- struct blkcg *blkcg;
- int plid;
-};
-
-/* association between a blk cgroup and a request queue */
-struct blkcg_gq {
- /* Pointer to the associated request_queue */
- struct request_queue *q;
- struct list_head q_node;
- struct hlist_node blkcg_node;
- struct blkcg *blkcg;
-
- /* all non-root blkcg_gq's are guaranteed to have access to parent */
- struct blkcg_gq *parent;
-
- /* reference count */
- struct percpu_ref refcnt;
-
- /* is this blkg online? protected by both blkcg and q locks */
- bool online;
-
- struct blkg_iostat_set __percpu *iostat_cpu;
- struct blkg_iostat_set iostat;
-
- struct blkg_policy_data *pd[BLKCG_MAX_POLS];
-
- spinlock_t async_bio_lock;
- struct bio_list async_bios;
- struct work_struct async_bio_work;
-
- atomic_t use_delay;
- atomic64_t delay_nsec;
- atomic64_t delay_start;
- u64 last_delay;
- int last_use;
-
- struct rcu_head rcu_head;
-};
-
-typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
-typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
-typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
-typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
-typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp,
- struct request_queue *q, struct blkcg *blkcg);
-typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
-typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
-typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
-typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
-typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
-typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf,
- size_t size);
-
-struct blkcg_policy {
- int plid;
- /* cgroup files for the policy */
- struct cftype *dfl_cftypes;
- struct cftype *legacy_cftypes;
-
- /* operations */
- blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
- blkcg_pol_init_cpd_fn *cpd_init_fn;
- blkcg_pol_free_cpd_fn *cpd_free_fn;
- blkcg_pol_bind_cpd_fn *cpd_bind_fn;
-
- blkcg_pol_alloc_pd_fn *pd_alloc_fn;
- blkcg_pol_init_pd_fn *pd_init_fn;
- blkcg_pol_online_pd_fn *pd_online_fn;
- blkcg_pol_offline_pd_fn *pd_offline_fn;
- blkcg_pol_free_pd_fn *pd_free_fn;
- blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
- blkcg_pol_stat_pd_fn *pd_stat_fn;
-};
-
-extern struct blkcg blkcg_root;
extern struct cgroup_subsys_state * const blkcg_root_css;
-extern bool blkcg_debug_stats;
-struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
- struct request_queue *q, bool update_hint);
-int blkcg_init_queue(struct request_queue *q);
-void blkcg_exit_queue(struct request_queue *q);
-
-/* Blkio controller policy registration */
-int blkcg_policy_register(struct blkcg_policy *pol);
-void blkcg_policy_unregister(struct blkcg_policy *pol);
-int blkcg_activate_policy(struct request_queue *q,
- const struct blkcg_policy *pol);
-void blkcg_deactivate_policy(struct request_queue *q,
- const struct blkcg_policy *pol);
-
-const char *blkg_dev_name(struct blkcg_gq *blkg);
-void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
- u64 (*prfill)(struct seq_file *,
- struct blkg_policy_data *, int),
- const struct blkcg_policy *pol, int data,
- bool show_total);
-u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
-
-struct blkg_conf_ctx {
- struct block_device *bdev;
- struct blkcg_gq *blkg;
- char *body;
-};
-
-struct block_device *blkcg_conf_open_bdev(char **inputp);
-int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
- char *input, struct blkg_conf_ctx *ctx);
-void blkg_conf_finish(struct blkg_conf_ctx *ctx);
-
-/**
- * blkcg_css - find the current css
- *
- * Find the css associated with either the kthread or the current task.
- * This may return a dying css, so it is up to the caller to use tryget logic
- * to confirm it is alive and well.
- */
-static inline struct cgroup_subsys_state *blkcg_css(void)
-{
- struct cgroup_subsys_state *css;
-
- css = kthread_blkcg();
- if (css)
- return css;
- return task_css(current, io_cgrp_id);
-}
-
-static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
-{
- return css ? container_of(css, struct blkcg, css) : NULL;
-}
-
-/**
- * __bio_blkcg - internal, inconsistent version to get blkcg
- *
- * DO NOT USE.
- * This function is inconsistent and consequently is dangerous to use. The
- * first part of the function returns a blkcg where a reference is owned by the
- * bio. This means it does not need to be rcu protected as it cannot go away
- * with the bio owning a reference to it. However, the latter potentially gets
- * it from task_css(). This can race against task migration and the cgroup
- * dying. It is also semantically different as it must be called rcu protected
- * and is susceptible to failure when trying to get a reference to it.
- * Therefore, it is not ok to assume that *_get() will always succeed on the
- * blkcg returned here.
- */
-static inline struct blkcg *__bio_blkcg(struct bio *bio)
-{
- if (bio && bio->bi_blkg)
- return bio->bi_blkg->blkcg;
- return css_to_blkcg(blkcg_css());
-}
-
-/**
- * bio_blkcg - grab the blkcg associated with a bio
- * @bio: target bio
- *
- * This returns the blkcg associated with a bio, %NULL if not associated.
- * Callers are expected to either handle %NULL or know association has been
- * done prior to calling this.
- */
-static inline struct blkcg *bio_blkcg(struct bio *bio)
-{
- if (bio && bio->bi_blkg)
- return bio->bi_blkg->blkcg;
- return NULL;
-}
-
-static inline bool blk_cgroup_congested(void)
-{
- struct cgroup_subsys_state *css;
- bool ret = false;
-
- rcu_read_lock();
- css = kthread_blkcg();
- if (!css)
- css = task_css(current, io_cgrp_id);
- while (css) {
- if (atomic_read(&css->cgroup->congestion_count)) {
- ret = true;
- break;
- }
- css = css->parent;
- }
- rcu_read_unlock();
- return ret;
-}
-
-/**
- * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
- * @return: true if this bio needs to be submitted with the root blkg context.
- *
- * In order to avoid priority inversions we sometimes need to issue a bio as if
- * it were attached to the root blkg, and then backcharge to the actual owning
- * blkg. The idea is we do bio_blkcg() to look up the actual context for the
- * bio and attach the appropriate blkg to the bio. Then we call this helper and
- * if it is true run with the root blkg for that queue and then do any
- * backcharging to the originating cgroup once the io is complete.
- */
-static inline bool bio_issue_as_root_blkg(struct bio *bio)
-{
- return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
-}
-
-/**
- * blkcg_parent - get the parent of a blkcg
- * @blkcg: blkcg of interest
- *
- * Return the parent blkcg of @blkcg. Can be called anytime.
- */
-static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
-{
- return css_to_blkcg(blkcg->css.parent);
-}
-
-/**
- * __blkg_lookup - internal version of blkg_lookup()
- * @blkcg: blkcg of interest
- * @q: request_queue of interest
- * @update_hint: whether to update lookup hint with the result or not
- *
- * This is internal version and shouldn't be used by policy
- * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
- * @q's bypass state. If @update_hint is %true, the caller should be
- * holding @q->queue_lock and lookup hint is updated on success.
- */
-static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
- struct request_queue *q,
- bool update_hint)
-{
- struct blkcg_gq *blkg;
-
- if (blkcg == &blkcg_root)
- return q->root_blkg;
-
- blkg = rcu_dereference(blkcg->blkg_hint);
- if (blkg && blkg->q == q)
- return blkg;
-
- return blkg_lookup_slowpath(blkcg, q, update_hint);
-}
-
-/**
- * blkg_lookup - lookup blkg for the specified blkcg - q pair
- * @blkcg: blkcg of interest
- * @q: request_queue of interest
- *
- * Lookup blkg for the @blkcg - @q pair. This function should be called
- * under RCU read lock.
- */
-static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
- struct request_queue *q)
-{
- WARN_ON_ONCE(!rcu_read_lock_held());
- return __blkg_lookup(blkcg, q, false);
-}
-
-/**
- * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair
- * @q: request_queue of interest
- *
- * Lookup blkg for @q at the root level. See also blkg_lookup().
- */
-static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
-{
- return q->root_blkg;
-}
-
-/**
- * blkg_to_pdata - get policy private data
- * @blkg: blkg of interest
- * @pol: policy of interest
- *
- * Return pointer to private data associated with the @blkg-@pol pair.
- */
-static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
- struct blkcg_policy *pol)
-{
- return blkg ? blkg->pd[pol->plid] : NULL;
-}
-
-static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
- struct blkcg_policy *pol)
-{
- return blkcg ? blkcg->cpd[pol->plid] : NULL;
-}
-
-/**
- * pdata_to_blkg - get blkg associated with policy private data
- * @pd: policy private data of interest
- *
- * @pd is policy private data. Determine the blkg it's associated with.
- */
-static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
-{
- return pd ? pd->blkg : NULL;
-}
-
-static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
-{
- return cpd ? cpd->blkcg : NULL;
-}
-
-extern void blkcg_destroy_blkgs(struct blkcg *blkcg);
-
-/**
- * blkcg_pin_online - pin online state
- * @blkcg: blkcg of interest
- *
- * While pinned, a blkcg is kept online. This is primarily used to
- * impedance-match blkg and cgwb lifetimes so that blkg doesn't go offline
- * while an associated cgwb is still active.
- */
-static inline void blkcg_pin_online(struct blkcg *blkcg)
-{
- refcount_inc(&blkcg->online_pin);
-}
-
-/**
- * blkcg_unpin_online - unpin online state
- * @blkcg: blkcg of interest
- *
- * This is primarily used to impedance-match blkg and cgwb lifetimes so
- * that blkg doesn't go offline while an associated cgwb is still active.
- * When this count goes to zero, all active cgwbs have finished so the
- * blkcg can continue destruction by calling blkcg_destroy_blkgs().
- */
-static inline void blkcg_unpin_online(struct blkcg *blkcg)
-{
- do {
- if (!refcount_dec_and_test(&blkcg->online_pin))
- break;
- blkcg_destroy_blkgs(blkcg);
- blkcg = blkcg_parent(blkcg);
- } while (blkcg);
-}
-
-/**
- * blkg_path - format cgroup path of blkg
- * @blkg: blkg of interest
- * @buf: target buffer
- * @buflen: target buffer length
- *
- * Format the path of the cgroup of @blkg into @buf.
- */
-static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
-{
- return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
-}
-
-/**
- * blkg_get - get a blkg reference
- * @blkg: blkg to get
- *
- * The caller should be holding an existing reference.
- */
-static inline void blkg_get(struct blkcg_gq *blkg)
-{
- percpu_ref_get(&blkg->refcnt);
-}
-
-/**
- * blkg_tryget - try and get a blkg reference
- * @blkg: blkg to get
- *
- * This is for use when doing an RCU lookup of the blkg. We may be in the midst
- * of freeing this blkg, so we can only use it if the refcnt is not zero.
- */
-static inline bool blkg_tryget(struct blkcg_gq *blkg)
-{
- return blkg && percpu_ref_tryget(&blkg->refcnt);
-}
-
-/**
- * blkg_put - put a blkg reference
- * @blkg: blkg to put
- */
-static inline void blkg_put(struct blkcg_gq *blkg)
-{
- percpu_ref_put(&blkg->refcnt);
-}
-
-/**
- * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
- * @d_blkg: loop cursor pointing to the current descendant
- * @pos_css: used for iteration
- * @p_blkg: target blkg to walk descendants of
- *
- * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
- * read locked. If called under either blkcg or queue lock, the iteration
- * is guaranteed to include all and only online blkgs. The caller may
- * update @pos_css by calling css_rightmost_descendant() to skip subtree.
- * @p_blkg is included in the iteration and the first node to be visited.
- */
-#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
- css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
- if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
- (p_blkg)->q, false)))
-
-/**
- * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
- * @d_blkg: loop cursor pointing to the current descendant
- * @pos_css: used for iteration
- * @p_blkg: target blkg to walk descendants of
- *
- * Similar to blkg_for_each_descendant_pre() but performs post-order
- * traversal instead. Synchronization rules are the same. @p_blkg is
- * included in the iteration and the last node to be visited.
- */
-#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
- css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
- if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
- (p_blkg)->q, false)))
-
-bool __blkcg_punt_bio_submit(struct bio *bio);
-
-static inline bool blkcg_punt_bio_submit(struct bio *bio)
-{
- if (bio->bi_opf & REQ_CGROUP_PUNT)
- return __blkcg_punt_bio_submit(bio);
- else
- return false;
-}
-
-static inline void blkcg_bio_issue_init(struct bio *bio)
-{
- bio_issue_init(&bio->bi_issue, bio_sectors(bio));
-}
-
-static inline void blkcg_use_delay(struct blkcg_gq *blkg)
-{
- if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0))
- return;
- if (atomic_add_return(1, &blkg->use_delay) == 1)
- atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
-}
-
-static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
-{
- int old = atomic_read(&blkg->use_delay);
-
- if (WARN_ON_ONCE(old < 0))
- return 0;
- if (old == 0)
- return 0;
-
- /*
- * We do this song and dance because we can race with somebody else
- * adding or removing delay. If we just did an atomic_dec we'd end up
- * negative and we'd already be in trouble. We need to subtract 1 and
- * then check to see if we were the last delay so we can drop the
- * congestion count on the cgroup.
- */
- while (old) {
- int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1);
- if (cur == old)
- break;
- old = cur;
- }
-
- if (old == 0)
- return 0;
- if (old == 1)
- atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
- return 1;
-}
-
-/**
- * blkcg_set_delay - Enable allocator delay mechanism with the specified delay amount
- * @blkg: target blkg
- * @delay: delay duration in nsecs
- *
- * When enabled with this function, the delay is not decayed and must be
- * explicitly cleared with blkcg_clear_delay(). Must not be mixed with
- * blkcg_[un]use_delay() and blkcg_add_delay() usages.
- */
-static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay)
-{
- int old = atomic_read(&blkg->use_delay);
-
- /* We only want 1 person setting the congestion count for this blkg. */
- if (!old && atomic_cmpxchg(&blkg->use_delay, old, -1) == old)
- atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
-
- atomic64_set(&blkg->delay_nsec, delay);
-}
-
-/**
- * blkcg_clear_delay - Disable allocator delay mechanism
- * @blkg: target blkg
- *
- * Disable use_delay mechanism. See blkcg_set_delay().
- */
-static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
-{
- int old = atomic_read(&blkg->use_delay);
-
- /* We only want 1 person clearing the congestion count for this blkg. */
- if (old && atomic_cmpxchg(&blkg->use_delay, old, 0) == old)
- atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
-}
-
-void blk_cgroup_bio_start(struct bio *bio);
-void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
-void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
+void blkcg_schedule_throttle(struct gendisk *disk, bool use_memdelay);
void blkcg_maybe_throttle_current(void);
-#else /* CONFIG_BLK_CGROUP */
-
-struct blkcg {
-};
-
-struct blkg_policy_data {
-};
+bool blk_cgroup_congested(void);
+void blkcg_pin_online(struct cgroup_subsys_state *blkcg_css);
+void blkcg_unpin_online(struct cgroup_subsys_state *blkcg_css);
+struct list_head *blkcg_get_cgwb_list(struct cgroup_subsys_state *css);
+struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio);
-struct blkcg_policy_data {
-};
-
-struct blkcg_gq {
-};
-
-struct blkcg_policy {
-};
+#else /* CONFIG_BLK_CGROUP */
#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
static inline void blkcg_maybe_throttle_current(void) { }
static inline bool blk_cgroup_congested(void) { return false; }
+static inline struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio)
+{
+ return NULL;
+}
+#endif /* CONFIG_BLK_CGROUP */
-#ifdef CONFIG_BLOCK
-
-static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
-
-static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
-static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
-{ return NULL; }
-static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
-static inline void blkcg_exit_queue(struct request_queue *q) { }
-static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
-static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
-static inline int blkcg_activate_policy(struct request_queue *q,
- const struct blkcg_policy *pol) { return 0; }
-static inline void blkcg_deactivate_policy(struct request_queue *q,
- const struct blkcg_policy *pol) { }
-
-static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; }
-static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
-
-static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
- struct blkcg_policy *pol) { return NULL; }
-static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
-static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
-static inline void blkg_get(struct blkcg_gq *blkg) { }
-static inline void blkg_put(struct blkcg_gq *blkg) { }
-
-static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; }
-static inline void blkcg_bio_issue_init(struct bio *bio) { }
-static inline void blk_cgroup_bio_start(struct bio *bio) { }
-
-#define blk_queue_for_each_rl(rl, q) \
- for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
+int blkcg_set_fc_appid(char *app_id, u64 cgrp_id, size_t app_id_len);
+char *blkcg_get_fc_appid(struct bio *bio);
-#endif /* CONFIG_BLOCK */
-#endif /* CONFIG_BLK_CGROUP */
#endif /* _BLK_CGROUP_H */
diff --git a/include/linux/blk-crypto-profile.h b/include/linux/blk-crypto-profile.h
new file mode 100644
index 000000000000..4f39e9cd7576
--- /dev/null
+++ b/include/linux/blk-crypto-profile.h
@@ -0,0 +1,228 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2019 Google LLC
+ */
+
+#ifndef __LINUX_BLK_CRYPTO_PROFILE_H
+#define __LINUX_BLK_CRYPTO_PROFILE_H
+
+#include <linux/bio.h>
+#include <linux/blk-crypto.h>
+
+struct blk_crypto_profile;
+
+/**
+ * struct blk_crypto_ll_ops - functions to control inline encryption hardware
+ *
+ * Low-level operations for controlling inline encryption hardware. This
+ * interface must be implemented by storage drivers that support inline
+ * encryption. All functions may sleep, are serialized by profile->lock, and
+ * are never called while profile->dev (if set) is runtime-suspended.
+ */
+struct blk_crypto_ll_ops {
+
+ /**
+ * @keyslot_program: Program a key into the inline encryption hardware.
+ *
+ * Program @key into the specified @slot in the inline encryption
+ * hardware, overwriting any key that the keyslot may already contain.
+ * The keyslot is guaranteed to not be in-use by any I/O.
+ *
+ * This is required if the device has keyslots. Otherwise (i.e. if the
+ * device is a layered device, or if the device is real hardware that
+ * simply doesn't have the concept of keyslots) it is never called.
+ *
+ * Must return 0 on success, or -errno on failure.
+ */
+ int (*keyslot_program)(struct blk_crypto_profile *profile,
+ const struct blk_crypto_key *key,
+ unsigned int slot);
+
+ /**
+ * @keyslot_evict: Evict a key from the inline encryption hardware.
+ *
+ * If the device has keyslots, this function must evict the key from the
+ * specified @slot. The slot will contain @key, but there should be no
+ * need for the @key argument to be used as @slot should be sufficient.
+ * The keyslot is guaranteed to not be in-use by any I/O.
+ *
+ * If the device doesn't have keyslots itself, this function must evict
+ * @key from any underlying devices. @slot won't be valid in this case.
+ *
+ * If there are no keyslots and no underlying devices, this function
+ * isn't required.
+ *
+ * Must return 0 on success, or -errno on failure.
+ */
+ int (*keyslot_evict)(struct blk_crypto_profile *profile,
+ const struct blk_crypto_key *key,
+ unsigned int slot);
+
+ /**
+ * @derive_sw_secret: Derive the software secret from a hardware-wrapped
+ * key in ephemerally-wrapped form.
+ *
+ * This only needs to be implemented if BLK_CRYPTO_KEY_TYPE_HW_WRAPPED
+ * is supported.
+ *
+ * Must return 0 on success, -EBADMSG if the key is invalid, or another
+ * -errno code on other errors.
+ */
+ int (*derive_sw_secret)(struct blk_crypto_profile *profile,
+ const u8 *eph_key, size_t eph_key_size,
+ u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE]);
+
+ /**
+ * @import_key: Create a hardware-wrapped key by importing a raw key.
+ *
+ * This only needs to be implemented if BLK_CRYPTO_KEY_TYPE_HW_WRAPPED
+ * is supported.
+ *
+ * On success, must write the new key in long-term wrapped form to
+ * @lt_key and return its size in bytes. On failure, must return a
+ * -errno value.
+ */
+ int (*import_key)(struct blk_crypto_profile *profile,
+ const u8 *raw_key, size_t raw_key_size,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]);
+
+ /**
+ * @generate_key: Generate a hardware-wrapped key.
+ *
+ * This only needs to be implemented if BLK_CRYPTO_KEY_TYPE_HW_WRAPPED
+ * is supported.
+ *
+ * On success, must write the new key in long-term wrapped form to
+ * @lt_key and return its size in bytes. On failure, must return a
+ * -errno value.
+ */
+ int (*generate_key)(struct blk_crypto_profile *profile,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]);
+
+ /**
+ * @prepare_key: Prepare a hardware-wrapped key to be used.
+ *
+ * Prepare a hardware-wrapped key to be used by converting it from
+ * long-term wrapped form to ephemerally-wrapped form. This only needs
+ * to be implemented if BLK_CRYPTO_KEY_TYPE_HW_WRAPPED is supported.
+ *
+ * On success, must write the key in ephemerally-wrapped form to
+ * @eph_key and return its size in bytes. On failure, must return
+ * -EBADMSG if the key is invalid, or another -errno on other error.
+ */
+ int (*prepare_key)(struct blk_crypto_profile *profile,
+ const u8 *lt_key, size_t lt_key_size,
+ u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]);
+};
+
+/**
+ * struct blk_crypto_profile - inline encryption profile for a device
+ *
+ * This struct contains a storage device's inline encryption capabilities (e.g.
+ * the supported crypto algorithms), driver-provided functions to control the
+ * inline encryption hardware (e.g. programming and evicting keys), and optional
+ * device-independent keyslot management data.
+ */
+struct blk_crypto_profile {
+
+ /* public: Drivers must initialize the following fields. */
+
+ /**
+ * @ll_ops: Driver-provided functions to control the inline encryption
+ * hardware, e.g. program and evict keys.
+ */
+ struct blk_crypto_ll_ops ll_ops;
+
+ /**
+ * @max_dun_bytes_supported: The maximum number of bytes supported for
+ * specifying the data unit number (DUN). Specifically, the range of
+ * supported DUNs is 0 through (1 << (8 * max_dun_bytes_supported)) - 1.
+ */
+ unsigned int max_dun_bytes_supported;
+
+ /**
+ * @key_types_supported: A bitmask of the supported key types:
+ * BLK_CRYPTO_KEY_TYPE_RAW and/or BLK_CRYPTO_KEY_TYPE_HW_WRAPPED.
+ */
+ unsigned int key_types_supported;
+
+ /**
+ * @modes_supported: Array of bitmasks that specifies whether each
+ * combination of crypto mode and data unit size is supported.
+ * Specifically, the i'th bit of modes_supported[crypto_mode] is set if
+ * crypto_mode can be used with a data unit size of (1 << i). Note that
+ * only data unit sizes that are powers of 2 can be supported.
+ */
+ unsigned int modes_supported[BLK_ENCRYPTION_MODE_MAX];
+
+ /**
+ * @dev: An optional device for runtime power management. If the driver
+ * provides this device, it will be runtime-resumed before any function
+ * in @ll_ops is called and will remain resumed during the call.
+ */
+ struct device *dev;
+
+ /* private: The following fields shouldn't be accessed by drivers. */
+
+ /* Number of keyslots, or 0 if not applicable */
+ unsigned int num_slots;
+
+ /*
+ * Serializes all calls to functions in @ll_ops as well as all changes
+ * to @slot_hashtable. This can also be taken in read mode to look up
+ * keyslots while ensuring that they can't be changed concurrently.
+ */
+ struct rw_semaphore lock;
+ struct lock_class_key lockdep_key;
+
+ /* List of idle slots, with least recently used slot at front */
+ wait_queue_head_t idle_slots_wait_queue;
+ struct list_head idle_slots;
+ spinlock_t idle_slots_lock;
+
+ /*
+ * Hash table which maps struct *blk_crypto_key to keyslots, so that we
+ * can find a key's keyslot in O(1) time rather than O(num_slots).
+ * Protected by 'lock'.
+ */
+ struct hlist_head *slot_hashtable;
+ unsigned int log_slot_ht_size;
+
+ /* Per-keyslot data */
+ struct blk_crypto_keyslot *slots;
+};
+
+int blk_crypto_profile_init(struct blk_crypto_profile *profile,
+ unsigned int num_slots);
+
+int devm_blk_crypto_profile_init(struct device *dev,
+ struct blk_crypto_profile *profile,
+ unsigned int num_slots);
+
+unsigned int blk_crypto_keyslot_index(struct blk_crypto_keyslot *slot);
+
+void blk_crypto_reprogram_all_keys(struct blk_crypto_profile *profile);
+
+void blk_crypto_profile_destroy(struct blk_crypto_profile *profile);
+
+int blk_crypto_import_key(struct blk_crypto_profile *profile,
+ const u8 *raw_key, size_t raw_key_size,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]);
+
+int blk_crypto_generate_key(struct blk_crypto_profile *profile,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]);
+
+int blk_crypto_prepare_key(struct blk_crypto_profile *profile,
+ const u8 *lt_key, size_t lt_key_size,
+ u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]);
+
+void blk_crypto_intersect_capabilities(struct blk_crypto_profile *parent,
+ const struct blk_crypto_profile *child);
+
+bool blk_crypto_has_capabilities(const struct blk_crypto_profile *target,
+ const struct blk_crypto_profile *reference);
+
+void blk_crypto_update_capabilities(struct blk_crypto_profile *dst,
+ const struct blk_crypto_profile *src);
+
+#endif /* __LINUX_BLK_CRYPTO_PROFILE_H */
diff --git a/include/linux/blk-crypto.h b/include/linux/blk-crypto.h
index 69b24fe92cbf..58b0c5254a67 100644
--- a/include/linux/blk-crypto.h
+++ b/include/linux/blk-crypto.h
@@ -6,17 +6,68 @@
#ifndef __LINUX_BLK_CRYPTO_H
#define __LINUX_BLK_CRYPTO_H
+#include <linux/minmax.h>
#include <linux/types.h>
+#include <uapi/linux/blk-crypto.h>
enum blk_crypto_mode_num {
BLK_ENCRYPTION_MODE_INVALID,
BLK_ENCRYPTION_MODE_AES_256_XTS,
BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV,
BLK_ENCRYPTION_MODE_ADIANTUM,
+ BLK_ENCRYPTION_MODE_SM4_XTS,
BLK_ENCRYPTION_MODE_MAX,
};
-#define BLK_CRYPTO_MAX_KEY_SIZE 64
+/*
+ * Supported types of keys. Must be bitflags due to their use in
+ * blk_crypto_profile::key_types_supported.
+ */
+enum blk_crypto_key_type {
+ /*
+ * Raw keys (i.e. "software keys"). These keys are simply kept in raw,
+ * plaintext form in kernel memory.
+ */
+ BLK_CRYPTO_KEY_TYPE_RAW = 0x1,
+
+ /*
+ * Hardware-wrapped keys. These keys are only present in kernel memory
+ * in ephemerally-wrapped form, and they can only be unwrapped by
+ * dedicated hardware. For details, see the "Hardware-wrapped keys"
+ * section of Documentation/block/inline-encryption.rst.
+ */
+ BLK_CRYPTO_KEY_TYPE_HW_WRAPPED = 0x2,
+};
+
+/*
+ * Currently the maximum raw key size is 64 bytes, as that is the key size of
+ * BLK_ENCRYPTION_MODE_AES_256_XTS which takes the longest key.
+ *
+ * The maximum hardware-wrapped key size depends on the hardware's key wrapping
+ * algorithm, which is a hardware implementation detail, so it isn't precisely
+ * specified. But currently 128 bytes is plenty in practice. Implementations
+ * are recommended to wrap a 32-byte key for the hardware KDF with AES-256-GCM,
+ * which should result in a size closer to 64 bytes than 128.
+ *
+ * Both of these values can trivially be increased if ever needed.
+ */
+#define BLK_CRYPTO_MAX_RAW_KEY_SIZE 64
+#define BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE 128
+
+#define BLK_CRYPTO_MAX_ANY_KEY_SIZE \
+ MAX(BLK_CRYPTO_MAX_RAW_KEY_SIZE, BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE)
+
+/*
+ * Size of the "software secret" which can be derived from a hardware-wrapped
+ * key. This is currently always 32 bytes. Note, the choice of 32 bytes
+ * assumes that the software secret is only used directly for algorithms that
+ * don't require more than a 256-bit key to get the desired security strength.
+ * If it were to be used e.g. directly as an AES-256-XTS key, then this would
+ * need to be increased (which is possible if hardware supports it, but care
+ * would need to be taken to avoid breaking users who need exactly 32 bytes).
+ */
+#define BLK_CRYPTO_SW_SECRET_SIZE 32
+
/**
* struct blk_crypto_config - an inline encryption key's crypto configuration
* @crypto_mode: encryption algorithm this key is for
@@ -25,20 +76,23 @@ enum blk_crypto_mode_num {
* ciphertext. This is always a power of 2. It might be e.g. the
* filesystem block size or the disk sector size.
* @dun_bytes: the maximum number of bytes of DUN used when using this key
+ * @key_type: the type of this key -- either raw or hardware-wrapped
*/
struct blk_crypto_config {
enum blk_crypto_mode_num crypto_mode;
unsigned int data_unit_size;
unsigned int dun_bytes;
+ enum blk_crypto_key_type key_type;
};
/**
* struct blk_crypto_key - an inline encryption key
- * @crypto_cfg: the crypto configuration (like crypto_mode, key size) for this
- * key
+ * @crypto_cfg: the crypto mode, data unit size, key type, and other
+ * characteristics of this key and how it will be used
* @data_unit_size_bits: log2 of data_unit_size
- * @size: size of this key in bytes (determined by @crypto_cfg.crypto_mode)
- * @raw: the raw bytes of this key. Only the first @size bytes are used.
+ * @size: size of this key in bytes. The size of a raw key is fixed for a given
+ * crypto mode, but the size of a hardware-wrapped key can vary.
+ * @bytes: the bytes of this key. Only the first @size bytes are significant.
*
* A blk_crypto_key is immutable once created, and many bios can reference it at
* the same time. It must not be freed until all bios using it have completed
@@ -48,7 +102,7 @@ struct blk_crypto_key {
struct blk_crypto_config crypto_cfg;
unsigned int data_unit_size_bits;
unsigned int size;
- u8 raw[BLK_CRYPTO_MAX_KEY_SIZE];
+ u8 bytes[BLK_CRYPTO_MAX_ANY_KEY_SIZE];
};
#define BLK_CRYPTO_MAX_IV_SIZE 32
@@ -71,9 +125,6 @@ struct bio_crypt_ctx {
#include <linux/blk_types.h>
#include <linux/blkdev.h>
-struct request;
-struct request_queue;
-
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
static inline bool bio_has_crypt_ctx(struct bio *bio)
@@ -89,20 +140,28 @@ bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc,
unsigned int bytes,
const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]);
-int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
+int blk_crypto_init_key(struct blk_crypto_key *blk_key,
+ const u8 *key_bytes, size_t key_size,
+ enum blk_crypto_key_type key_type,
enum blk_crypto_mode_num crypto_mode,
unsigned int dun_bytes,
unsigned int data_unit_size);
-int blk_crypto_start_using_key(const struct blk_crypto_key *key,
- struct request_queue *q);
+int blk_crypto_start_using_key(struct block_device *bdev,
+ const struct blk_crypto_key *key);
-int blk_crypto_evict_key(struct request_queue *q,
- const struct blk_crypto_key *key);
+void blk_crypto_evict_key(struct block_device *bdev,
+ const struct blk_crypto_key *key);
-bool blk_crypto_config_supported(struct request_queue *q,
+bool blk_crypto_config_supported_natively(struct block_device *bdev,
+ const struct blk_crypto_config *cfg);
+bool blk_crypto_config_supported(struct block_device *bdev,
const struct blk_crypto_config *cfg);
+int blk_crypto_derive_sw_secret(struct block_device *bdev,
+ const u8 *eph_key, size_t eph_key_size,
+ u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE]);
+
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
static inline bool bio_has_crypt_ctx(struct bio *bio)
diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h
new file mode 100644
index 000000000000..a6b84206eb94
--- /dev/null
+++ b/include/linux/blk-integrity.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_BLK_INTEGRITY_H
+#define _LINUX_BLK_INTEGRITY_H
+
+#include <linux/blk-mq.h>
+#include <linux/bio-integrity.h>
+#include <linux/blk-mq-dma.h>
+
+struct request;
+
+/*
+ * Maximum contiguous integrity buffer allocation.
+ */
+#define BLK_INTEGRITY_MAX_SIZE SZ_2M
+
+enum blk_integrity_flags {
+ BLK_INTEGRITY_NOVERIFY = 1 << 0,
+ BLK_INTEGRITY_NOGENERATE = 1 << 1,
+ BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2,
+ BLK_INTEGRITY_REF_TAG = 1 << 3,
+ BLK_INTEGRITY_STACKED = 1 << 4,
+};
+
+const char *blk_integrity_profile_name(struct blk_integrity *bi);
+bool queue_limits_stack_integrity(struct queue_limits *t,
+ struct queue_limits *b);
+static inline bool queue_limits_stack_integrity_bdev(struct queue_limits *t,
+ struct block_device *bdev)
+{
+ return queue_limits_stack_integrity(t, &bdev->bd_disk->queue->limits);
+}
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+int blk_rq_map_integrity_sg(struct request *, struct scatterlist *);
+
+int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
+int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
+ ssize_t bytes);
+int blk_get_meta_cap(struct block_device *bdev, unsigned int cmd,
+ struct logical_block_metadata_cap __user *argp);
+bool blk_rq_integrity_dma_map_iter_start(struct request *req,
+ struct device *dma_dev, struct dma_iova_state *state,
+ struct blk_dma_iter *iter);
+bool blk_rq_integrity_dma_map_iter_next(struct request *req,
+ struct device *dma_dev, struct blk_dma_iter *iter);
+
+static inline bool
+blk_integrity_queue_supports_integrity(struct request_queue *q)
+{
+ return q->limits.integrity.metadata_size;
+}
+
+static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
+{
+ if (!blk_integrity_queue_supports_integrity(disk->queue))
+ return NULL;
+ return &disk->queue->limits.integrity;
+}
+
+static inline struct blk_integrity *
+bdev_get_integrity(struct block_device *bdev)
+{
+ return blk_get_integrity(bdev->bd_disk);
+}
+
+static inline unsigned short
+queue_max_integrity_segments(const struct request_queue *q)
+{
+ return q->limits.max_integrity_segments;
+}
+
+/**
+ * bio_integrity_intervals - Return number of integrity intervals for a bio
+ * @bi: blk_integrity profile for device
+ * @sectors: Size of the bio in 512-byte sectors
+ *
+ * Description: The block layer calculates everything in 512 byte
+ * sectors but integrity metadata is done in terms of the data integrity
+ * interval size of the storage device. Convert the block layer sectors
+ * to the appropriate number of integrity intervals.
+ */
+static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return sectors >> (bi->interval_exp - 9);
+}
+
+static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return bio_integrity_intervals(bi, sectors) * bi->metadata_size;
+}
+
+static inline bool blk_integrity_rq(struct request *rq)
+{
+ return rq->cmd_flags & REQ_INTEGRITY;
+}
+
+/*
+ * Return the current bvec that contains the integrity data. bip_iter may be
+ * advanced to iterate over the integrity data.
+ */
+static inline struct bio_vec rq_integrity_vec(struct request *rq)
+{
+ return mp_bvec_iter_bvec(rq->bio->bi_integrity->bip_vec,
+ rq->bio->bi_integrity->bip_iter);
+}
+#else /* CONFIG_BLK_DEV_INTEGRITY */
+static inline int blk_get_meta_cap(struct block_device *bdev, unsigned int cmd,
+ struct logical_block_metadata_cap __user *argp)
+{
+ return -ENOIOCTLCMD;
+}
+static inline int blk_rq_count_integrity_sg(struct request_queue *q,
+ struct bio *b)
+{
+ return 0;
+}
+static inline int blk_rq_map_integrity_sg(struct request *q,
+ struct scatterlist *s)
+{
+ return 0;
+}
+static inline int blk_rq_integrity_map_user(struct request *rq,
+ void __user *ubuf,
+ ssize_t bytes)
+{
+ return -EINVAL;
+}
+static inline bool blk_rq_integrity_dma_map_iter_start(struct request *req,
+ struct device *dma_dev, struct dma_iova_state *state,
+ struct blk_dma_iter *iter)
+{
+ return false;
+}
+static inline bool blk_rq_integrity_dma_map_iter_next(struct request *req,
+ struct device *dma_dev, struct blk_dma_iter *iter)
+{
+ return false;
+}
+static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
+{
+ return NULL;
+}
+static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
+{
+ return NULL;
+}
+static inline bool
+blk_integrity_queue_supports_integrity(struct request_queue *q)
+{
+ return false;
+}
+static inline unsigned short
+queue_max_integrity_segments(const struct request_queue *q)
+{
+ return 0;
+}
+
+static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return 0;
+}
+
+static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return 0;
+}
+static inline int blk_integrity_rq(struct request *rq)
+{
+ return 0;
+}
+
+static inline struct bio_vec rq_integrity_vec(struct request *rq)
+{
+ /* the optimizer will remove all calls to this function */
+ return (struct bio_vec){ };
+}
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+
+#endif /* _LINUX_BLK_INTEGRITY_H */
diff --git a/include/linux/blk-mq-dma.h b/include/linux/blk-mq-dma.h
new file mode 100644
index 000000000000..cb88fc791fbd
--- /dev/null
+++ b/include/linux/blk-mq-dma.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef BLK_MQ_DMA_H
+#define BLK_MQ_DMA_H
+
+#include <linux/blk-mq.h>
+#include <linux/pci-p2pdma.h>
+
+struct blk_map_iter {
+ struct bvec_iter iter;
+ struct bio *bio;
+ struct bio_vec *bvecs;
+ bool is_integrity;
+};
+
+struct blk_dma_iter {
+ /* Output address range for this iteration */
+ dma_addr_t addr;
+ u32 len;
+ struct pci_p2pdma_map_state p2pdma;
+
+ /* Status code. Only valid when blk_rq_dma_map_iter_* returned false */
+ blk_status_t status;
+
+ /* Internal to blk_rq_dma_map_iter_* */
+ struct blk_map_iter iter;
+};
+
+bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev,
+ struct dma_iova_state *state, struct blk_dma_iter *iter);
+bool blk_rq_dma_map_iter_next(struct request *req, struct device *dma_dev,
+ struct dma_iova_state *state, struct blk_dma_iter *iter);
+
+/**
+ * blk_rq_dma_map_coalesce - were all segments coalesced?
+ * @state: DMA state to check
+ *
+ * Returns true if blk_rq_dma_map_iter_start coalesced all segments into a
+ * single DMA range.
+ */
+static inline bool blk_rq_dma_map_coalesce(struct dma_iova_state *state)
+{
+ return dma_use_iova(state);
+}
+
+/**
+ * blk_rq_dma_unmap - try to DMA unmap a request
+ * @req: request to unmap
+ * @dma_dev: device to unmap from
+ * @state: DMA IOVA state
+ * @mapped_len: number of bytes to unmap
+ * @map: peer-to-peer mapping type
+ *
+ * Returns %false if the callers need to manually unmap every DMA segment
+ * mapped using @iter or %true if no work is left to be done.
+ */
+static inline bool blk_rq_dma_unmap(struct request *req, struct device *dma_dev,
+ struct dma_iova_state *state, size_t mapped_len,
+ enum pci_p2pdma_map_type map)
+{
+ if (map == PCI_P2PDMA_MAP_BUS_ADDR)
+ return true;
+
+ if (dma_use_iova(state)) {
+ unsigned int attrs = 0;
+
+ if (map == PCI_P2PDMA_MAP_THRU_HOST_BRIDGE)
+ attrs |= DMA_ATTR_MMIO;
+
+ dma_iova_destroy(dma_dev, state, mapped_len, rq_dma_dir(req),
+ attrs);
+ return true;
+ }
+
+ return !dma_need_unmap(dma_dev);
+}
+#endif /* BLK_MQ_DMA_H */
diff --git a/include/linux/blk-mq-pci.h b/include/linux/blk-mq-pci.h
deleted file mode 100644
index 0b1f45c62623..000000000000
--- a/include/linux/blk-mq-pci.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_BLK_MQ_PCI_H
-#define _LINUX_BLK_MQ_PCI_H
-
-struct blk_mq_queue_map;
-struct pci_dev;
-
-int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
- int offset);
-
-#endif /* _LINUX_BLK_MQ_PCI_H */
diff --git a/include/linux/blk-mq-rdma.h b/include/linux/blk-mq-rdma.h
deleted file mode 100644
index 5cc5f0f36218..000000000000
--- a/include/linux/blk-mq-rdma.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_BLK_MQ_RDMA_H
-#define _LINUX_BLK_MQ_RDMA_H
-
-struct blk_mq_tag_set;
-struct ib_device;
-
-int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
- struct ib_device *dev, int first_vec);
-
-#endif /* _LINUX_BLK_MQ_RDMA_H */
diff --git a/include/linux/blk-mq-virtio.h b/include/linux/blk-mq-virtio.h
deleted file mode 100644
index 687ae287e1dc..000000000000
--- a/include/linux/blk-mq-virtio.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_BLK_MQ_VIRTIO_H
-#define _LINUX_BLK_MQ_VIRTIO_H
-
-struct blk_mq_queue_map;
-struct virtio_device;
-
-int blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
- struct virtio_device *vdev, int first_vec);
-
-#endif /* _LINUX_BLK_MQ_VIRTIO_H */
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 359486940fa0..cae9e857aea4 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -4,12 +4,315 @@
#include <linux/blkdev.h>
#include <linux/sbitmap.h>
-#include <linux/srcu.h>
#include <linux/lockdep.h>
+#include <linux/scatterlist.h>
+#include <linux/prefetch.h>
+#include <linux/srcu.h>
+#include <linux/rw_hint.h>
+#include <linux/rwsem.h>
struct blk_mq_tags;
struct blk_flush_queue;
+#define BLKDEV_MIN_RQ 4
+#define BLKDEV_DEFAULT_RQ 128
+
+enum rq_end_io_ret {
+ RQ_END_IO_NONE,
+ RQ_END_IO_FREE,
+};
+
+typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t);
+
+/*
+ * request flags */
+typedef __u32 __bitwise req_flags_t;
+
+/* Keep rqf_name[] in sync with the definitions below */
+enum rqf_flags {
+ /* drive already may have started this one */
+ __RQF_STARTED,
+ /* request for flush sequence */
+ __RQF_FLUSH_SEQ,
+ /* merge of different types, fail separately */
+ __RQF_MIXED_MERGE,
+ /* don't call prep for this one */
+ __RQF_DONTPREP,
+ /* use hctx->sched_tags */
+ __RQF_SCHED_TAGS,
+ /* use an I/O scheduler for this request */
+ __RQF_USE_SCHED,
+ /* vaguely specified driver internal error. Ignored by block layer */
+ __RQF_FAILED,
+ /* don't warn about errors */
+ __RQF_QUIET,
+ /* account into disk and partition IO statistics */
+ __RQF_IO_STAT,
+ /* runtime pm request */
+ __RQF_PM,
+ /* on IO scheduler merge hash */
+ __RQF_HASHED,
+ /* track IO completion time */
+ __RQF_STATS,
+ /* Look at ->special_vec for the actual data payload instead of the
+ bio chain. */
+ __RQF_SPECIAL_PAYLOAD,
+ /* request completion needs to be signaled to zone write plugging. */
+ __RQF_ZONE_WRITE_PLUGGING,
+ /* ->timeout has been called, don't expire again */
+ __RQF_TIMED_OUT,
+ __RQF_RESV,
+ __RQF_BITS
+};
+
+#define RQF_STARTED ((__force req_flags_t)(1 << __RQF_STARTED))
+#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << __RQF_FLUSH_SEQ))
+#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << __RQF_MIXED_MERGE))
+#define RQF_DONTPREP ((__force req_flags_t)(1 << __RQF_DONTPREP))
+#define RQF_SCHED_TAGS ((__force req_flags_t)(1 << __RQF_SCHED_TAGS))
+#define RQF_USE_SCHED ((__force req_flags_t)(1 << __RQF_USE_SCHED))
+#define RQF_FAILED ((__force req_flags_t)(1 << __RQF_FAILED))
+#define RQF_QUIET ((__force req_flags_t)(1 << __RQF_QUIET))
+#define RQF_IO_STAT ((__force req_flags_t)(1 << __RQF_IO_STAT))
+#define RQF_PM ((__force req_flags_t)(1 << __RQF_PM))
+#define RQF_HASHED ((__force req_flags_t)(1 << __RQF_HASHED))
+#define RQF_STATS ((__force req_flags_t)(1 << __RQF_STATS))
+#define RQF_SPECIAL_PAYLOAD \
+ ((__force req_flags_t)(1 << __RQF_SPECIAL_PAYLOAD))
+#define RQF_ZONE_WRITE_PLUGGING \
+ ((__force req_flags_t)(1 << __RQF_ZONE_WRITE_PLUGGING))
+#define RQF_TIMED_OUT ((__force req_flags_t)(1 << __RQF_TIMED_OUT))
+#define RQF_RESV ((__force req_flags_t)(1 << __RQF_RESV))
+
+/* flags that prevent us from merging requests: */
+#define RQF_NOMERGE_FLAGS \
+ (RQF_STARTED | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
+
+enum mq_rq_state {
+ MQ_RQ_IDLE = 0,
+ MQ_RQ_IN_FLIGHT = 1,
+ MQ_RQ_COMPLETE = 2,
+};
+
+/*
+ * Try to put the fields that are referenced together in the same cacheline.
+ *
+ * If you modify this structure, make sure to update blk_rq_init() and
+ * especially blk_mq_rq_ctx_init() to take care of the added fields.
+ */
+struct request {
+ struct request_queue *q;
+ struct blk_mq_ctx *mq_ctx;
+ struct blk_mq_hw_ctx *mq_hctx;
+
+ blk_opf_t cmd_flags; /* op and common flags */
+ req_flags_t rq_flags;
+
+ int tag;
+ int internal_tag;
+
+ unsigned int timeout;
+
+ /* the following two fields are internal, NEVER access directly */
+ unsigned int __data_len; /* total data len */
+ sector_t __sector; /* sector cursor */
+
+ struct bio *bio;
+ struct bio *biotail;
+
+ union {
+ struct list_head queuelist;
+ struct request *rq_next;
+ };
+
+ struct block_device *part;
+#ifdef CONFIG_BLK_RQ_ALLOC_TIME
+ /* Time that the first bio started allocating this request. */
+ u64 alloc_time_ns;
+#endif
+ /* Time that this request was allocated for this IO. */
+ u64 start_time_ns;
+ /* Time that I/O was submitted to the device. */
+ u64 io_start_time_ns;
+
+#ifdef CONFIG_BLK_WBT
+ unsigned short wbt_flags;
+#endif
+ /*
+ * rq sectors used for blk stats. It has the same value
+ * with blk_rq_sectors(rq), except that it never be zeroed
+ * by completion.
+ */
+ unsigned short stats_sectors;
+
+ /*
+ * Number of scatter-gather DMA addr+len pairs after
+ * physical address coalescing is performed.
+ */
+ unsigned short nr_phys_segments;
+ unsigned short nr_integrity_segments;
+
+ /*
+ * The lowest set bit for address gaps between physical segments. This
+ * provides information necessary for dma optimization opprotunities,
+ * like for testing if the segments can be coalesced against the
+ * device's iommu granule.
+ */
+ unsigned char phys_gap_bit;
+
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+ struct bio_crypt_ctx *crypt_ctx;
+ struct blk_crypto_keyslot *crypt_keyslot;
+#endif
+
+ enum mq_rq_state state;
+ atomic_t ref;
+
+ unsigned long deadline;
+
+ /*
+ * The hash is used inside the scheduler, and killed once the
+ * request reaches the dispatch list. The ipi_list is only used
+ * to queue the request for softirq completion, which is long
+ * after the request has been unhashed (and even removed from
+ * the dispatch list).
+ */
+ union {
+ struct hlist_node hash; /* merge hash */
+ struct llist_node ipi_list;
+ };
+
+ /*
+ * The rb_node is only used inside the io scheduler, requests
+ * are pruned when moved to the dispatch queue. special_vec must
+ * only be used if RQF_SPECIAL_PAYLOAD is set, and those cannot be
+ * insert into an IO scheduler.
+ */
+ union {
+ struct rb_node rb_node; /* sort/lookup */
+ struct bio_vec special_vec;
+ };
+
+ /*
+ * Three pointers are available for the IO schedulers, if they need
+ * more they have to dynamically allocate it.
+ */
+ struct {
+ struct io_cq *icq;
+ void *priv[2];
+ } elv;
+
+ struct {
+ unsigned int seq;
+ rq_end_io_fn *saved_end_io;
+ } flush;
+
+ u64 fifo_time;
+
+ /*
+ * completion callback.
+ */
+ rq_end_io_fn *end_io;
+ void *end_io_data;
+};
+
+/*
+ * Returns a mask with all bits starting at req->phys_gap_bit set to 1.
+ */
+static inline unsigned long req_phys_gap_mask(const struct request *req)
+{
+ return ~(((1 << req->phys_gap_bit) >> 1) - 1);
+}
+
+static inline enum req_op req_op(const struct request *req)
+{
+ return req->cmd_flags & REQ_OP_MASK;
+}
+
+static inline bool blk_rq_is_passthrough(struct request *rq)
+{
+ return blk_op_is_passthrough(rq->cmd_flags);
+}
+
+static inline unsigned short req_get_ioprio(struct request *req)
+{
+ if (req->bio)
+ return req->bio->bi_ioprio;
+ return 0;
+}
+
+#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
+
+#define rq_dma_dir(rq) \
+ (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
+
+static inline int rq_list_empty(const struct rq_list *rl)
+{
+ return rl->head == NULL;
+}
+
+static inline void rq_list_init(struct rq_list *rl)
+{
+ rl->head = NULL;
+ rl->tail = NULL;
+}
+
+static inline void rq_list_add_tail(struct rq_list *rl, struct request *rq)
+{
+ rq->rq_next = NULL;
+ if (rl->tail)
+ rl->tail->rq_next = rq;
+ else
+ rl->head = rq;
+ rl->tail = rq;
+}
+
+static inline void rq_list_add_head(struct rq_list *rl, struct request *rq)
+{
+ rq->rq_next = rl->head;
+ rl->head = rq;
+ if (!rl->tail)
+ rl->tail = rq;
+}
+
+static inline struct request *rq_list_pop(struct rq_list *rl)
+{
+ struct request *rq = rl->head;
+
+ if (rq) {
+ rl->head = rl->head->rq_next;
+ if (!rl->head)
+ rl->tail = NULL;
+ rq->rq_next = NULL;
+ }
+
+ return rq;
+}
+
+static inline struct request *rq_list_peek(struct rq_list *rl)
+{
+ return rl->head;
+}
+
+#define rq_list_for_each(rl, pos) \
+ for (pos = rq_list_peek((rl)); (pos); pos = pos->rq_next)
+
+#define rq_list_for_each_safe(rl, pos, nxt) \
+ for (pos = rq_list_peek((rl)), nxt = pos->rq_next; \
+ pos; pos = nxt, nxt = pos ? pos->rq_next : NULL)
+
+/**
+ * enum blk_eh_timer_return - How the timeout handler should proceed
+ * @BLK_EH_DONE: The block driver completed the command or will complete it at
+ * a later time.
+ * @BLK_EH_RESET_TIMER: Reset the request timer and continue waiting for the
+ * request to complete.
+ */
+enum blk_eh_timer_return {
+ BLK_EH_DONE,
+ BLK_EH_RESET_TIMER,
+};
+
/**
* struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
* block device
@@ -122,14 +425,6 @@ struct blk_mq_hw_ctx {
*/
struct blk_mq_tags *sched_tags;
- /** @queued: Number of queued requests. */
- unsigned long queued;
- /** @run: Number of dispatched requests. */
- unsigned long run;
-#define BLK_MQ_MAX_DISPATCH_ORDER 7
- /** @dispatched: Number of dispatch requests by queue. */
- unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
-
/** @numa_node: NUMA node the storage adapter has been connected to. */
unsigned int numa_node;
/** @queue_num: Index of this hardware queue. */
@@ -148,13 +443,6 @@ struct blk_mq_hw_ctx {
/** @kobj: Kernel object for sysfs. */
struct kobject kobj;
- /** @poll_considered: Count times blk_poll() was called. */
- unsigned long poll_considered;
- /** @poll_invoked: Count how many requests blk_poll() polled. */
- unsigned long poll_invoked;
- /** @poll_success: Count how many polled requests were completed. */
- unsigned long poll_success;
-
#ifdef CONFIG_BLK_DEBUG_FS
/**
* @debugfs_dir: debugfs directory for this hardware queue. Named
@@ -170,13 +458,6 @@ struct blk_mq_hw_ctx {
* q->unused_hctx_list.
*/
struct list_head hctx_list;
-
- /**
- * @srcu: Sleepable RCU. Use as lock when type of the hardware queue is
- * blocking (BLK_MQ_F_BLOCKING). Must be the last member - see also
- * blk_mq_hw_ctx_size().
- */
- struct srcu_struct srcu[];
};
/**
@@ -212,6 +493,7 @@ enum hctx_type {
/**
* struct blk_mq_tag_set - tag set that can be shared between request queues
+ * @ops: Pointers to functions that implement block driver behavior.
* @map: One or more ctx -> hctx mappings. One map exists for each
* hardware queue type (enum hctx_type) that the driver wishes
* to support. There are no restrictions on maps being of the
@@ -219,7 +501,6 @@ enum hctx_type {
* types.
* @nr_maps: Number of elements in the @map array. A number in the range
* [1, HCTX_MAX_TYPES].
- * @ops: Pointers to functions that implement block driver behavior.
* @nr_hw_queues: Number of hardware queues supported by the block driver that
* owns this data structure.
* @queue_depth: Number of tags per hardware queue, reserved tags included.
@@ -232,21 +513,26 @@ enum hctx_type {
* @flags: Zero or more BLK_MQ_F_* flags.
* @driver_data: Pointer to data owned by the block driver that created this
* tag set.
- * @active_queues_shared_sbitmap:
- * number of active request queues per tag set.
- * @__bitmap_tags: A shared tags sbitmap, used over all hctx's
- * @__breserved_tags:
- * A shared reserved tags sbitmap, used over all hctx's
* @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues
* elements.
+ * @shared_tags:
+ * Shared set of tags. Has @nr_hw_queues elements. If set,
+ * shared by all @tags.
* @tag_list_lock: Serializes tag_list accesses.
* @tag_list: List of the request queues that use this tag set. See also
* request_queue.tag_set_list.
+ * @srcu: Use as lock when type of the request queue is blocking
+ * (BLK_MQ_F_BLOCKING).
+ * @tags_srcu: SRCU used to defer freeing of tags page_list to prevent
+ * use-after-free when iterating tags.
+ * @update_nr_hwq_lock:
+ * Synchronize updating nr_hw_queues with add/del disk &
+ * switching elevator.
*/
struct blk_mq_tag_set {
+ const struct blk_mq_ops *ops;
struct blk_mq_queue_map map[HCTX_MAX_TYPES];
unsigned int nr_maps;
- const struct blk_mq_ops *ops;
unsigned int nr_hw_queues;
unsigned int queue_depth;
unsigned int reserved_tags;
@@ -255,14 +541,17 @@ struct blk_mq_tag_set {
unsigned int timeout;
unsigned int flags;
void *driver_data;
- atomic_t active_queues_shared_sbitmap;
- struct sbitmap_queue __bitmap_tags;
- struct sbitmap_queue __breserved_tags;
struct blk_mq_tags **tags;
+ struct blk_mq_tags *shared_tags;
+
struct mutex tag_list_lock;
struct list_head tag_list;
+ struct srcu_struct *srcu;
+ struct srcu_struct tags_srcu;
+
+ struct rw_semaphore update_nr_hwq_lock;
};
/**
@@ -276,9 +565,7 @@ struct blk_mq_queue_data {
bool last;
};
-typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
- bool);
-typedef bool (busy_tag_iter_fn)(struct request *, void *, bool);
+typedef bool (busy_tag_iter_fn)(struct request *, void *);
/**
* struct blk_mq_ops - Callback functions that implements block driver
@@ -301,6 +588,14 @@ struct blk_mq_ops {
void (*commit_rqs)(struct blk_mq_hw_ctx *);
/**
+ * @queue_rqs: Queue a list of new requests. Driver is guaranteed
+ * that each request belongs to the same queue. If the driver doesn't
+ * empty the @rqlist completely, then the rest will be queued
+ * individually by the block layer upon return.
+ */
+ void (*queue_rqs)(struct rq_list *rqlist);
+
+ /**
* @get_budget: Reserve budget before queue request, once .queue_rq is
* run, it is driver's responsibility to release the
* reserved budget. Also we have to handle failure case
@@ -325,12 +620,12 @@ struct blk_mq_ops {
/**
* @timeout: Called on request timeout.
*/
- enum blk_eh_timer_return (*timeout)(struct request *, bool);
+ enum blk_eh_timer_return (*timeout)(struct request *);
/**
* @poll: Called to poll for completion of a specific tag.
*/
- int (*poll)(struct blk_mq_hw_ctx *);
+ int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *);
/**
* @complete: Mark the request as complete.
@@ -364,11 +659,6 @@ struct blk_mq_ops {
unsigned int);
/**
- * @initialize_rq_fn: Called from inside blk_get_request().
- */
- void (*initialize_rq_fn)(struct request *rq);
-
- /**
* @cleanup_rq: Called before freeing one request which isn't completed
* yet, and usually for freeing the driver private data.
*/
@@ -383,7 +673,7 @@ struct blk_mq_ops {
* @map_queues: This allows drivers specify their own queue mapping by
* overriding the setup-time function that builds the mq_map.
*/
- int (*map_queues)(struct blk_mq_tag_set *set);
+ void (*map_queues)(struct blk_mq_tag_set *set);
#ifdef CONFIG_BLK_DEBUG_FS
/**
@@ -394,8 +684,8 @@ struct blk_mq_ops {
#endif
};
+/* Keep hctx_flag_name[] in sync with the definitions below */
enum {
- BLK_MQ_F_SHOULD_MERGE = 1 << 0,
BLK_MQ_F_TAG_QUEUE_SHARED = 1 << 1,
/*
* Set when this device requires underlying blk-mq device for
@@ -403,47 +693,61 @@ enum {
*/
BLK_MQ_F_STACKING = 1 << 2,
BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3,
- BLK_MQ_F_BLOCKING = 1 << 5,
- BLK_MQ_F_NO_SCHED = 1 << 6,
- BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
- BLK_MQ_F_ALLOC_POLICY_BITS = 1,
+ BLK_MQ_F_BLOCKING = 1 << 4,
- BLK_MQ_S_STOPPED = 0,
- BLK_MQ_S_TAG_ACTIVE = 1,
- BLK_MQ_S_SCHED_RESTART = 2,
+ /*
+ * Alloc tags on a round-robin base instead of the first available one.
+ */
+ BLK_MQ_F_TAG_RR = 1 << 5,
- /* hw queue is inactive after all its CPUs become offline */
- BLK_MQ_S_INACTIVE = 3,
+ /*
+ * Select 'none' during queue registration in case of a single hwq
+ * or shared hwqs instead of 'mq-deadline'.
+ */
+ BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 6,
+
+ BLK_MQ_F_MAX = 1 << 7,
+};
- BLK_MQ_MAX_DEPTH = 10240,
+#define BLK_MQ_MAX_DEPTH (10240)
+#define BLK_MQ_NO_HCTX_IDX (-1U)
- BLK_MQ_CPU_WORK_BATCH = 8,
+enum {
+ /* Keep hctx_state_name[] in sync with the definitions below */
+ BLK_MQ_S_STOPPED,
+ BLK_MQ_S_TAG_ACTIVE,
+ BLK_MQ_S_SCHED_RESTART,
+ /* hw queue is inactive after all its CPUs become offline */
+ BLK_MQ_S_INACTIVE,
+ BLK_MQ_S_MAX
};
-#define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
- ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
- ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
-#define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
- ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
- << BLK_MQ_F_ALLOC_POLICY_START_BIT)
-
-struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
-struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
- void *queuedata);
-struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
- struct request_queue *q,
- bool elevator_init);
-struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
- const struct blk_mq_ops *ops,
- unsigned int queue_depth,
- unsigned int set_flags);
-void blk_mq_unregister_dev(struct device *, struct request_queue *);
+
+struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set,
+ struct queue_limits *lim, void *queuedata,
+ struct lock_class_key *lkclass);
+#define blk_mq_alloc_disk(set, lim, queuedata) \
+({ \
+ static struct lock_class_key __key; \
+ \
+ __blk_mq_alloc_disk(set, lim, queuedata, &__key); \
+})
+struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
+ struct lock_class_key *lkclass);
+struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set,
+ struct queue_limits *lim, void *queuedata);
+int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
+ struct request_queue *q);
+void blk_mq_destroy_queue(struct request_queue *);
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
+int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
+ const struct blk_mq_ops *ops, unsigned int queue_depth,
+ unsigned int set_flags);
void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
-void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
-
void blk_mq_free_request(struct request *rq);
+int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
+ unsigned int poll_flags);
bool blk_mq_queue_inflight(struct request_queue *q);
@@ -456,12 +760,45 @@ enum {
BLK_MQ_REQ_PM = (__force blk_mq_req_flags_t)(1 << 2),
};
-struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
+struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
blk_mq_req_flags_t flags);
struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
- unsigned int op, blk_mq_req_flags_t flags,
+ blk_opf_t opf, blk_mq_req_flags_t flags,
unsigned int hctx_idx);
-struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
+
+/*
+ * Tag address space map.
+ */
+struct blk_mq_tags {
+ unsigned int nr_tags;
+ unsigned int nr_reserved_tags;
+ unsigned int active_queues;
+
+ struct sbitmap_queue bitmap_tags;
+ struct sbitmap_queue breserved_tags;
+
+ struct request **rqs;
+ struct request **static_rqs;
+ struct list_head page_list;
+
+ /*
+ * used to clear request reference in rqs[] before freeing one
+ * request pool
+ */
+ spinlock_t lock;
+ struct rcu_head rcu_head;
+};
+
+static inline struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags,
+ unsigned int tag)
+{
+ if (tag < tags->nr_tags) {
+ prefetch(tags->rqs[tag]);
+ return tags->rqs[tag];
+ }
+
+ return NULL;
+}
enum {
BLK_MQ_UNIQUE_TAG_BITS = 16,
@@ -511,16 +848,84 @@ static inline void blk_mq_set_request_complete(struct request *rq)
WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
}
+/*
+ * Complete the request directly instead of deferring it to softirq or
+ * completing it another CPU. Useful in preemptible instead of an interrupt.
+ */
+static inline void blk_mq_complete_request_direct(struct request *rq,
+ void (*complete)(struct request *rq))
+{
+ WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
+ complete(rq);
+}
+
void blk_mq_start_request(struct request *rq);
void blk_mq_end_request(struct request *rq, blk_status_t error);
void __blk_mq_end_request(struct request *rq, blk_status_t error);
+void blk_mq_end_request_batch(struct io_comp_batch *ib);
+
+/*
+ * Only need start/end time stamping if we have iostat or
+ * blk stats enabled, or using an IO scheduler.
+ */
+static inline bool blk_mq_need_time_stamp(struct request *rq)
+{
+ return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_USE_SCHED));
+}
+
+static inline bool blk_mq_is_reserved_rq(struct request *rq)
+{
+ return rq->rq_flags & RQF_RESV;
+}
+
+/**
+ * blk_mq_add_to_batch() - add a request to the completion batch
+ * @req: The request to add to batch
+ * @iob: The batch to add the request
+ * @is_error: Specify true if the request failed with an error
+ * @complete: The completaion handler for the request
+ *
+ * Batched completions only work when there is no I/O error and no special
+ * ->end_io handler.
+ *
+ * Return: true when the request was added to the batch, otherwise false
+ */
+static inline bool blk_mq_add_to_batch(struct request *req,
+ struct io_comp_batch *iob, bool is_error,
+ void (*complete)(struct io_comp_batch *))
+{
+ /*
+ * Check various conditions that exclude batch processing:
+ * 1) No batch container
+ * 2) Has scheduler data attached
+ * 3) Not a passthrough request and end_io set
+ * 4) Not a passthrough request and failed with an error
+ */
+ if (!iob)
+ return false;
+ if (req->rq_flags & RQF_SCHED_TAGS)
+ return false;
+ if (!blk_rq_is_passthrough(req)) {
+ if (req->end_io)
+ return false;
+ if (is_error)
+ return false;
+ }
+
+ if (!iob->complete)
+ iob->complete = complete;
+ else if (iob->complete != complete)
+ return false;
+ iob->need_ts |= blk_mq_need_time_stamp(req);
+ rq_list_add_tail(&iob->req_list, req);
+ return true;
+}
void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
void blk_mq_kick_requeue_list(struct request_queue *q);
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
void blk_mq_complete_request(struct request *rq);
bool blk_mq_complete_request_remote(struct request *rq);
-bool blk_mq_queue_stopped(struct request_queue *q);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_stop_hw_queues(struct request_queue *q);
@@ -528,6 +933,9 @@ void blk_mq_start_hw_queues(struct request_queue *q);
void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
void blk_mq_quiesce_queue(struct request_queue *q);
+void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set);
+void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set);
+void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set);
void blk_mq_unquiesce_queue(struct request_queue *q);
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
@@ -536,14 +944,34 @@ void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs);
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
busy_tag_iter_fn *fn, void *priv);
void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
-void blk_mq_freeze_queue(struct request_queue *q);
-void blk_mq_unfreeze_queue(struct request_queue *q);
+void blk_mq_freeze_queue_nomemsave(struct request_queue *q);
+void blk_mq_unfreeze_queue_nomemrestore(struct request_queue *q);
+static inline unsigned int __must_check
+blk_mq_freeze_queue(struct request_queue *q)
+{
+ unsigned int memflags = memalloc_noio_save();
+
+ blk_mq_freeze_queue_nomemsave(q);
+ return memflags;
+}
+static inline void
+blk_mq_unfreeze_queue(struct request_queue *q, unsigned int memflags)
+{
+ blk_mq_unfreeze_queue_nomemrestore(q);
+ memalloc_noio_restore(memflags);
+}
void blk_freeze_queue_start(struct request_queue *q);
void blk_mq_freeze_queue_wait(struct request_queue *q);
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
unsigned long timeout);
-
-int blk_mq_map_queues(struct blk_mq_queue_map *qmap);
+void blk_mq_unfreeze_queue_non_owner(struct request_queue *q);
+void blk_freeze_queue_start_non_owner(struct request_queue *q);
+
+unsigned int blk_mq_num_possible_queues(unsigned int max_queues);
+unsigned int blk_mq_num_online_queues(unsigned int max_queues);
+void blk_mq_map_queues(struct blk_mq_queue_map *qmap);
+void blk_mq_map_hw_queues(struct blk_mq_queue_map *qmap,
+ struct device *dev, unsigned int offset);
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
void blk_mq_quiesce_queue_nowait(struct request_queue *q);
@@ -587,44 +1015,230 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq)
return rq + 1;
}
+static inline struct blk_mq_hw_ctx *queue_hctx(struct request_queue *q, int id)
+{
+ struct blk_mq_hw_ctx *hctx;
+
+ rcu_read_lock();
+ hctx = rcu_dereference(q->queue_hw_ctx)[id];
+ rcu_read_unlock();
+
+ return hctx;
+}
+
#define queue_for_each_hw_ctx(q, hctx, i) \
for ((i) = 0; (i) < (q)->nr_hw_queues && \
- ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
+ ({ hctx = queue_hctx((q), i); 1; }); (i)++)
#define hctx_for_each_ctx(hctx, ctx, i) \
for ((i) = 0; (i) < (hctx)->nr_ctx && \
({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
-static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx,
- struct request *rq)
+static inline void blk_mq_cleanup_rq(struct request *rq)
{
- if (rq->tag != -1)
- return rq->tag | (hctx->queue_num << BLK_QC_T_SHIFT);
+ if (rq->q->mq_ops->cleanup_rq)
+ rq->q->mq_ops->cleanup_rq(rq);
+}
- return rq->internal_tag | (hctx->queue_num << BLK_QC_T_SHIFT) |
- BLK_QC_T_INTERNAL;
+void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
+ struct lock_class_key *key);
+
+static inline bool rq_is_sync(struct request *rq)
+{
+ return op_is_sync(rq->cmd_flags);
}
-static inline void blk_mq_cleanup_rq(struct request *rq)
+void blk_rq_init(struct request_queue *q, struct request *rq);
+int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
+ struct bio_set *bs, gfp_t gfp_mask,
+ int (*bio_ctr)(struct bio *, struct bio *, void *), void *data);
+void blk_rq_unprep_clone(struct request *rq);
+blk_status_t blk_insert_cloned_request(struct request *rq);
+
+struct rq_map_data {
+ struct page **pages;
+ unsigned long offset;
+ unsigned short page_order;
+ unsigned short nr_entries;
+ bool null_mapped;
+ bool from_user;
+};
+
+int blk_rq_map_user(struct request_queue *, struct request *,
+ struct rq_map_data *, void __user *, unsigned long, gfp_t);
+int blk_rq_map_user_io(struct request *, struct rq_map_data *,
+ void __user *, unsigned long, gfp_t, bool, int, bool, int);
+int blk_rq_map_user_iov(struct request_queue *, struct request *,
+ struct rq_map_data *, const struct iov_iter *, gfp_t);
+int blk_rq_unmap_user(struct bio *);
+int blk_rq_map_kern(struct request *rq, void *kbuf, unsigned int len,
+ gfp_t gfp);
+int blk_rq_append_bio(struct request *rq, struct bio *bio);
+void blk_execute_rq_nowait(struct request *rq, bool at_head);
+blk_status_t blk_execute_rq(struct request *rq, bool at_head);
+bool blk_rq_is_poll(struct request *rq);
+
+struct req_iterator {
+ struct bvec_iter iter;
+ struct bio *bio;
+};
+
+#define __rq_for_each_bio(_bio, rq) \
+ if ((rq->bio)) \
+ for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
+
+#define rq_for_each_segment(bvl, _rq, _iter) \
+ __rq_for_each_bio(_iter.bio, _rq) \
+ bio_for_each_segment(bvl, _iter.bio, _iter.iter)
+
+#define rq_for_each_bvec(bvl, _rq, _iter) \
+ __rq_for_each_bio(_iter.bio, _rq) \
+ bio_for_each_bvec(bvl, _iter.bio, _iter.iter)
+
+#define rq_iter_last(bvec, _iter) \
+ (_iter.bio->bi_next == NULL && \
+ bio_iter_last(bvec, _iter.iter))
+
+/*
+ * blk_rq_pos() : the current sector
+ * blk_rq_bytes() : bytes left in the entire request
+ * blk_rq_cur_bytes() : bytes left in the current segment
+ * blk_rq_sectors() : sectors left in the entire request
+ * blk_rq_cur_sectors() : sectors left in the current segment
+ * blk_rq_stats_sectors() : sectors of the entire request used for stats
+ */
+static inline sector_t blk_rq_pos(const struct request *rq)
{
- if (rq->q->mq_ops->cleanup_rq)
- rq->q->mq_ops->cleanup_rq(rq);
+ return rq->__sector;
+}
+
+static inline unsigned int blk_rq_bytes(const struct request *rq)
+{
+ return rq->__data_len;
}
-static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
- unsigned int nr_segs)
+static inline int blk_rq_cur_bytes(const struct request *rq)
{
- rq->nr_phys_segments = nr_segs;
- rq->__data_len = bio->bi_iter.bi_size;
- rq->bio = rq->biotail = bio;
- rq->ioprio = bio_prio(bio);
+ if (!rq->bio)
+ return 0;
+ if (!bio_has_data(rq->bio)) /* dataless requests such as discard */
+ return rq->bio->bi_iter.bi_size;
+ return bio_iovec(rq->bio).bv_len;
+}
- if (bio->bi_bdev)
- rq->rq_disk = bio->bi_bdev->bd_disk;
+static inline unsigned int blk_rq_sectors(const struct request *rq)
+{
+ return blk_rq_bytes(rq) >> SECTOR_SHIFT;
}
-blk_qc_t blk_mq_submit_bio(struct bio *bio);
-void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
- struct lock_class_key *key);
+static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
+{
+ return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
+}
-#endif
+static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
+{
+ return rq->stats_sectors;
+}
+
+/*
+ * Some commands like WRITE SAME have a payload or data transfer size which
+ * is different from the size of the request. Any driver that supports such
+ * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
+ * calculate the data transfer size.
+ */
+static inline unsigned int blk_rq_payload_bytes(struct request *rq)
+{
+ if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
+ return rq->special_vec.bv_len;
+ return blk_rq_bytes(rq);
+}
+
+/*
+ * Return the first full biovec in the request. The caller needs to check that
+ * there are any bvecs before calling this helper.
+ */
+static inline struct bio_vec req_bvec(struct request *rq)
+{
+ if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
+ return rq->special_vec;
+ return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
+}
+
+static inline unsigned int blk_rq_count_bios(struct request *rq)
+{
+ unsigned int nr_bios = 0;
+ struct bio *bio;
+
+ __rq_for_each_bio(bio, rq)
+ nr_bios++;
+
+ return nr_bios;
+}
+
+void blk_steal_bios(struct bio_list *list, struct request *rq);
+
+/*
+ * Request completion related functions.
+ *
+ * blk_update_request() completes given number of bytes and updates
+ * the request without completing it.
+ */
+bool blk_update_request(struct request *rq, blk_status_t error,
+ unsigned int nr_bytes);
+void blk_abort_request(struct request *);
+
+/*
+ * Number of physical segments as sent to the device.
+ *
+ * Normally this is the number of discontiguous data segments sent by the
+ * submitter. But for data-less command like discard we might have no
+ * actual data segments submitted, but the driver might have to add it's
+ * own special payload. In that case we still return 1 here so that this
+ * special payload will be mapped.
+ */
+static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
+{
+ if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
+ return 1;
+ return rq->nr_phys_segments;
+}
+
+/*
+ * Number of discard segments (or ranges) the driver needs to fill in.
+ * Each discard bio merged into a request is counted as one segment.
+ */
+static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
+{
+ return max_t(unsigned short, rq->nr_phys_segments, 1);
+}
+
+/**
+ * blk_rq_nr_bvec - return number of bvecs in a request
+ * @rq: request to calculate bvecs for
+ *
+ * Returns the number of bvecs.
+ */
+static inline unsigned int blk_rq_nr_bvec(struct request *rq)
+{
+ struct req_iterator rq_iter;
+ struct bio_vec bv;
+ unsigned int nr_bvec = 0;
+
+ rq_for_each_bvec(bv, rq, rq_iter)
+ nr_bvec++;
+
+ return nr_bvec;
+}
+
+int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist,
+ struct scatterlist **last_sg);
+static inline int blk_rq_map_sg(struct request *rq, struct scatterlist *sglist)
+{
+ struct scatterlist *last_sg = NULL;
+
+ return __blk_rq_map_sg(rq, sglist, &last_sg);
+}
+void blk_dump_rq_flags(struct request *, char *);
+
+#endif /* BLK_MQ_H */
diff --git a/include/linux/blk-pm.h b/include/linux/blk-pm.h
index b80c65aba249..004b38a538ff 100644
--- a/include/linux/blk-pm.h
+++ b/include/linux/blk-pm.h
@@ -14,8 +14,7 @@ extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
extern int blk_pre_runtime_suspend(struct request_queue *q);
extern void blk_post_runtime_suspend(struct request_queue *q, int err);
extern void blk_pre_runtime_resume(struct request_queue *q);
-extern void blk_post_runtime_resume(struct request_queue *q, int err);
-extern void blk_set_runtime_active(struct request_queue *q);
+extern void blk_post_runtime_resume(struct request_queue *q);
#else
static inline void blk_pm_runtime_init(struct request_queue *q,
struct device *dev) {}
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index db026b6ec15a..5dc061d318a4 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -10,6 +10,7 @@
#include <linux/bvec.h>
#include <linux/device.h>
#include <linux/ktime.h>
+#include <linux/rw_hint.h>
struct bio_set;
struct bio;
@@ -20,43 +21,64 @@ struct cgroup_subsys_state;
typedef void (bio_end_io_t) (struct bio *);
struct bio_crypt_ctx;
+/*
+ * The basic unit of block I/O is a sector. It is used in a number of contexts
+ * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9
+ * bytes. Variables of type sector_t represent an offset or size that is a
+ * multiple of 512 bytes. Hence these two constants.
+ */
+#ifndef SECTOR_SHIFT
+#define SECTOR_SHIFT 9
+#endif
+#ifndef SECTOR_SIZE
+#define SECTOR_SIZE (1 << SECTOR_SHIFT)
+#endif
+
+#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
+#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
+#define SECTOR_MASK (PAGE_SECTORS - 1)
+
struct block_device {
sector_t bd_start_sect;
+ sector_t bd_nr_sectors;
+ struct gendisk * bd_disk;
+ struct request_queue * bd_queue;
struct disk_stats __percpu *bd_stats;
unsigned long bd_stamp;
- bool bd_read_only; /* read-only policy */
+ atomic_t __bd_flags; // partition number + flags
+#define BD_PARTNO 255 // lower 8 bits; assign-once
+#define BD_READ_ONLY (1u<<8) // read-only policy
+#define BD_WRITE_HOLDER (1u<<9)
+#define BD_HAS_SUBMIT_BIO (1u<<10)
+#define BD_RO_WARNED (1u<<11)
+#ifdef CONFIG_FAIL_MAKE_REQUEST
+#define BD_MAKE_IT_FAIL (1u<<12)
+#endif
dev_t bd_dev;
- int bd_openers;
- struct inode * bd_inode; /* will die */
- struct super_block * bd_super;
- struct mutex bd_mutex; /* open/close mutex */
+ struct address_space *bd_mapping; /* page cache */
+
+ atomic_t bd_openers;
+ spinlock_t bd_size_lock; /* for bd_inode->i_size updates */
void * bd_claiming;
- struct device bd_device;
void * bd_holder;
+ const struct blk_holder_ops *bd_holder_ops;
+ struct mutex bd_holder_lock;
int bd_holders;
- bool bd_write_holder;
-#ifdef CONFIG_SYSFS
- struct list_head bd_holder_disks;
-#endif
struct kobject *bd_holder_dir;
- u8 bd_partno;
- /* number of times partitions within this device have been opened. */
- unsigned bd_part_count;
- spinlock_t bd_size_lock; /* for bd_inode->i_size updates */
- struct gendisk * bd_disk;
- struct backing_dev_info *bd_bdi;
-
- /* The counter of freeze processes */
- int bd_fsfreeze_count;
- /* Mutex for freeze */
- struct mutex bd_fsfreeze_mutex;
- struct super_block *bd_fsfreeze_sb;
+ atomic_t bd_fsfreeze_count; /* number of freeze requests */
+ struct mutex bd_fsfreeze_mutex; /* serialize freeze/thaw */
struct partition_meta_info *bd_meta_info;
-#ifdef CONFIG_FAIL_MAKE_REQUEST
- bool bd_make_it_fail;
+ int bd_writers;
+#ifdef CONFIG_SECURITY
+ void *bd_security;
#endif
+ /*
+ * keep this out-of-line as it's both big and not needed in the fast
+ * path
+ */
+ struct device bd_device;
} __randomize_layout;
#define bdev_whole(_bdev) \
@@ -70,20 +92,16 @@ struct block_device {
/*
* Block error status values. See block/blk-core:blk_errors for the details.
- * Alpha cannot write a byte atomically, so we need to use 32-bit value.
*/
-#if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
-typedef u32 __bitwise blk_status_t;
-#else
typedef u8 __bitwise blk_status_t;
-#endif
+typedef u16 blk_short_t;
#define BLK_STS_OK 0
#define BLK_STS_NOTSUPP ((__force blk_status_t)1)
#define BLK_STS_TIMEOUT ((__force blk_status_t)2)
#define BLK_STS_NOSPC ((__force blk_status_t)3)
#define BLK_STS_TRANSPORT ((__force blk_status_t)4)
#define BLK_STS_TARGET ((__force blk_status_t)5)
-#define BLK_STS_NEXUS ((__force blk_status_t)6)
+#define BLK_STS_RESV_CONFLICT ((__force blk_status_t)6)
#define BLK_STS_MEDIUM ((__force blk_status_t)7)
#define BLK_STS_PROTECTION ((__force blk_status_t)8)
#define BLK_STS_RESOURCE ((__force blk_status_t)9)
@@ -92,6 +110,10 @@ typedef u8 __bitwise blk_status_t;
/* hack for device mapper, don't use elsewhere: */
#define BLK_STS_DM_REQUEUE ((__force blk_status_t)11)
+/*
+ * BLK_STS_AGAIN should only be returned if RQF_NOWAIT is set
+ * and the bio would block (cf bio_wouldblock_error())
+ */
#define BLK_STS_AGAIN ((__force blk_status_t)12)
/*
@@ -113,25 +135,13 @@ typedef u8 __bitwise blk_status_t;
#define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13)
/*
- * BLK_STS_ZONE_RESOURCE is returned from the driver to the block layer if zone
- * related resources are unavailable, but the driver can guarantee the queue
- * will be rerun in the future once the resources become available again.
- *
- * This is different from BLK_STS_DEV_RESOURCE in that it explicitly references
- * a zone specific resource and IO to a different zone on the same device could
- * still be served. Examples of that are zones that are write-locked, but a read
- * to the same zone could be served.
- */
-#define BLK_STS_ZONE_RESOURCE ((__force blk_status_t)14)
-
-/*
* BLK_STS_ZONE_OPEN_RESOURCE is returned from the driver in the completion
* path if the device returns a status indicating that too many zone resources
* are currently open. The same command should be successful if resubmitted
* after the number of open zones decreases below the device's limits, which is
* reported in the request_queue's max_open_zones.
*/
-#define BLK_STS_ZONE_OPEN_RESOURCE ((__force blk_status_t)15)
+#define BLK_STS_ZONE_OPEN_RESOURCE ((__force blk_status_t)14)
/*
* BLK_STS_ZONE_ACTIVE_RESOURCE is returned from the driver in the completion
@@ -140,7 +150,25 @@ typedef u8 __bitwise blk_status_t;
* after the number of active zones decreases below the device's limits, which
* is reported in the request_queue's max_active_zones.
*/
-#define BLK_STS_ZONE_ACTIVE_RESOURCE ((__force blk_status_t)16)
+#define BLK_STS_ZONE_ACTIVE_RESOURCE ((__force blk_status_t)15)
+
+/*
+ * BLK_STS_OFFLINE is returned from the driver when the target device is offline
+ * or is being taken offline. This could help differentiate the case where a
+ * device is intentionally being shut down from a real I/O error.
+ */
+#define BLK_STS_OFFLINE ((__force blk_status_t)16)
+
+/*
+ * BLK_STS_DURATION_LIMIT is returned from the driver when the target device
+ * aborted the command because it exceeded one of its Command Duration Limits.
+ */
+#define BLK_STS_DURATION_LIMIT ((__force blk_status_t)17)
+
+/*
+ * Invalid size or alignment.
+ */
+#define BLK_STS_INVAL ((__force blk_status_t)19)
/**
* blk_path_error - returns true if error may be path related
@@ -160,7 +188,7 @@ static inline bool blk_path_error(blk_status_t error)
case BLK_STS_NOTSUPP:
case BLK_STS_NOSPC:
case BLK_STS_TARGET:
- case BLK_STS_NEXUS:
+ case BLK_STS_RESV_CONFLICT:
case BLK_STS_MEDIUM:
case BLK_STS_PROTECTION:
return false;
@@ -170,51 +198,10 @@ static inline bool blk_path_error(blk_status_t error)
return true;
}
-/*
- * From most significant bit:
- * 1 bit: reserved for other usage, see below
- * 12 bits: original size of bio
- * 51 bits: issue time of bio
- */
-#define BIO_ISSUE_RES_BITS 1
-#define BIO_ISSUE_SIZE_BITS 12
-#define BIO_ISSUE_RES_SHIFT (64 - BIO_ISSUE_RES_BITS)
-#define BIO_ISSUE_SIZE_SHIFT (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
-#define BIO_ISSUE_TIME_MASK ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
-#define BIO_ISSUE_SIZE_MASK \
- (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
-#define BIO_ISSUE_RES_MASK (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
-
-/* Reserved bit for blk-throtl */
-#define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
-
-struct bio_issue {
- u64 value;
-};
-
-static inline u64 __bio_issue_time(u64 time)
-{
- return time & BIO_ISSUE_TIME_MASK;
-}
-
-static inline u64 bio_issue_time(struct bio_issue *issue)
-{
- return __bio_issue_time(issue->value);
-}
-
-static inline sector_t bio_issue_size(struct bio_issue *issue)
-{
- return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
-}
+typedef __u32 __bitwise blk_opf_t;
-static inline void bio_issue_init(struct bio_issue *issue,
- sector_t size)
-{
- size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
- issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
- (ktime_get_ns() & BIO_ISSUE_TIME_MASK) |
- ((u64)size << BIO_ISSUE_SIZE_SHIFT));
-}
+typedef unsigned int blk_qc_t;
+#define BLK_QC_T_NONE -1U
/*
* main unit of I/O for the block layer and lower layers (ie drivers and
@@ -223,20 +210,37 @@ static inline void bio_issue_init(struct bio_issue *issue,
struct bio {
struct bio *bi_next; /* request queue link */
struct block_device *bi_bdev;
- unsigned int bi_opf; /* bottom bits req flags,
- * top bits REQ_OP. Use
- * accessors.
+ blk_opf_t bi_opf; /* bottom bits REQ_OP, top bits
+ * req_flags.
*/
unsigned short bi_flags; /* BIO_* below */
unsigned short bi_ioprio;
- unsigned short bi_write_hint;
+ enum rw_hint bi_write_hint;
+ u8 bi_write_stream;
blk_status_t bi_status;
+
+ /*
+ * The bvec gap bit indicates the lowest set bit in any address offset
+ * between all bi_io_vecs. This field is initialized only after the bio
+ * is split to the hardware limits (see bio_split_io_at()). The value
+ * may be used to consider DMA optimization when performing that
+ * mapping. The value is compared to a power of two mask where the
+ * result depends on any bit set within the mask, so saving the lowest
+ * bit is sufficient to know if any segment gap collides with the mask.
+ */
+ u8 bi_bvec_gap_bit;
+
atomic_t __bi_remaining;
struct bvec_iter bi_iter;
+ union {
+ /* for polled bios: */
+ blk_qc_t bi_cookie;
+ /* for plugged zoned writes only: */
+ unsigned int __bi_nr_segments;
+ };
bio_end_io_t *bi_end_io;
-
void *bi_private;
#ifdef CONFIG_BLK_CGROUP
/*
@@ -246,7 +250,8 @@ struct bio {
* on release of the bio.
*/
struct blkcg_gq *bi_blkg;
- struct bio_issue bi_issue;
+ /* Time that this bio was issued. */
+ u64 issue_time_ns;
#ifdef CONFIG_BLK_CGROUP_IOCOST
u64 bi_iocost_cost;
#endif
@@ -256,11 +261,9 @@ struct bio {
struct bio_crypt_ctx *bi_crypt_context;
#endif
- union {
#if defined(CONFIG_BLK_DEV_INTEGRITY)
- struct bio_integrity_payload *bi_integrity; /* data integrity */
+ struct bio_integrity_payload *bi_integrity; /* data integrity */
#endif
- };
unsigned short bi_vcnt; /* how many bio_vec's */
@@ -275,42 +278,54 @@ struct bio {
struct bio_vec *bi_io_vec; /* the actual vec list */
struct bio_set *bi_pool;
-
- /*
- * We can inline a number of vecs at the end of the bio, to avoid
- * double allocations for a small number of bio_vecs. This member
- * MUST obviously be kept at the very end of the bio.
- */
- struct bio_vec bi_inline_vecs[];
};
#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
+#define BIO_MAX_SECTORS (UINT_MAX >> SECTOR_SHIFT)
+
+static inline struct bio_vec *bio_inline_vecs(struct bio *bio)
+{
+ return (struct bio_vec *)(bio + 1);
+}
/*
* bio flags
*/
enum {
- BIO_NO_PAGE_REF, /* don't put release vec pages */
+ BIO_PAGE_PINNED, /* Unpin pages in bio_release_pages() */
BIO_CLONED, /* doesn't own data */
- BIO_BOUNCED, /* bio is a bounce bio */
- BIO_WORKINGSET, /* contains userspace workingset pages */
BIO_QUIET, /* Make BIO Quiet */
BIO_CHAIN, /* chained bio, ->bi_remaining in effect */
BIO_REFFED, /* bio has elevated ->bi_cnt */
- BIO_THROTTLED, /* This bio has already been subjected to
+ BIO_BPS_THROTTLED, /* This bio has already been subjected to
* throttling rules. Don't do it again. */
BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion
* of this bio. */
BIO_CGROUP_ACCT, /* has been accounted to a cgroup */
- BIO_TRACKED, /* set if bio goes through the rq_qos path */
+ BIO_QOS_THROTTLED, /* bio went through rq_qos throttle path */
+ /*
+ * This bio has completed bps throttling at the single tg granularity,
+ * which is different from BIO_BPS_THROTTLED. When the bio is enqueued
+ * into the sq->queued of the upper tg, or is about to be dispatched,
+ * this flag needs to be cleared. Since blk-throttle and rq_qos are not
+ * on the same hierarchical level, reuse the value.
+ */
+ BIO_TG_BPS_THROTTLED = BIO_QOS_THROTTLED,
+ BIO_QOS_MERGED, /* but went through rq_qos merge path */
BIO_REMAPPED,
+ BIO_ZONE_WRITE_PLUGGING, /* bio handled through zone write plugging */
+ BIO_EMULATES_ZONE_APPEND, /* bio emulates a zone append operation */
BIO_FLAG_LAST
};
typedef __u32 __bitwise blk_mq_req_flags_t;
-/*
- * Operations and flags common to the bio and request structures.
+#define REQ_OP_BITS 8
+#define REQ_OP_MASK (__force blk_opf_t)((1 << REQ_OP_BITS) - 1)
+#define REQ_FLAG_BITS 24
+
+/**
+ * enum req_op - Operations common to the bio and request structures.
* We use 8 bits for encoding the operation, and the remaining 24 for flags.
*
* The least significant bit of the operation number indicates the data
@@ -322,48 +337,40 @@ typedef __u32 __bitwise blk_mq_req_flags_t;
* If a operation does not transfer data the least significant bit has no
* meaning.
*/
-#define REQ_OP_BITS 8
-#define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1)
-#define REQ_FLAG_BITS 24
-
-enum req_opf {
+enum req_op {
/* read sectors from the device */
- REQ_OP_READ = 0,
+ REQ_OP_READ = (__force blk_opf_t)0,
/* write sectors to the device */
- REQ_OP_WRITE = 1,
+ REQ_OP_WRITE = (__force blk_opf_t)1,
/* flush the volatile write cache */
- REQ_OP_FLUSH = 2,
+ REQ_OP_FLUSH = (__force blk_opf_t)2,
/* discard sectors */
- REQ_OP_DISCARD = 3,
+ REQ_OP_DISCARD = (__force blk_opf_t)3,
/* securely erase sectors */
- REQ_OP_SECURE_ERASE = 5,
- /* write the same sector many times */
- REQ_OP_WRITE_SAME = 7,
+ REQ_OP_SECURE_ERASE = (__force blk_opf_t)5,
+ /* write data at the current zone write pointer */
+ REQ_OP_ZONE_APPEND = (__force blk_opf_t)7,
/* write the zero filled sector many times */
- REQ_OP_WRITE_ZEROES = 9,
+ REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9,
/* Open a zone */
- REQ_OP_ZONE_OPEN = 10,
+ REQ_OP_ZONE_OPEN = (__force blk_opf_t)11,
/* Close a zone */
- REQ_OP_ZONE_CLOSE = 11,
+ REQ_OP_ZONE_CLOSE = (__force blk_opf_t)13,
/* Transition a zone to full */
- REQ_OP_ZONE_FINISH = 12,
- /* write data at the current zone write pointer */
- REQ_OP_ZONE_APPEND = 13,
+ REQ_OP_ZONE_FINISH = (__force blk_opf_t)15,
/* reset a zone write pointer */
- REQ_OP_ZONE_RESET = 15,
+ REQ_OP_ZONE_RESET = (__force blk_opf_t)17,
/* reset all the zone present on the device */
- REQ_OP_ZONE_RESET_ALL = 17,
+ REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)19,
- /* SCSI passthrough using struct scsi_request */
- REQ_OP_SCSI_IN = 32,
- REQ_OP_SCSI_OUT = 33,
/* Driver private requests */
- REQ_OP_DRV_IN = 34,
- REQ_OP_DRV_OUT = 35,
+ REQ_OP_DRV_IN = (__force blk_opf_t)34,
+ REQ_OP_DRV_OUT = (__force blk_opf_t)35,
- REQ_OP_LAST,
+ REQ_OP_LAST = (__force blk_opf_t)36,
};
+/* Keep cmd_flag_name[] in sync with the definitions below */
enum req_flag_bits {
__REQ_FAILFAST_DEV = /* no driver retries of device errors */
REQ_OP_BITS,
@@ -380,47 +387,46 @@ enum req_flag_bits {
__REQ_RAHEAD, /* read ahead, can fail anytime */
__REQ_BACKGROUND, /* background IO */
__REQ_NOWAIT, /* Don't wait if request will block */
+ __REQ_POLLED, /* caller polls for completion using bio_poll */
+ __REQ_ALLOC_CACHE, /* allocate IO from cache if available */
+ __REQ_SWAP, /* swap I/O */
+ __REQ_DRV, /* for driver use */
+ __REQ_FS_PRIVATE, /* for file system (submitter) use */
+ __REQ_ATOMIC, /* for atomic write operations */
/*
- * When a shared kthread needs to issue a bio for a cgroup, doing
- * so synchronously can lead to priority inversions as the kthread
- * can be trapped waiting for that cgroup. CGROUP_PUNT flag makes
- * submit_bio() punt the actual issuing to a dedicated per-blkcg
- * work item to avoid such priority inversions.
+ * Command specific flags, keep last:
*/
- __REQ_CGROUP_PUNT,
-
- /* command specific flags for REQ_OP_WRITE_ZEROES: */
+ /* for REQ_OP_WRITE_ZEROES: */
__REQ_NOUNMAP, /* do not free blocks when zeroing */
- __REQ_HIPRI,
-
- /* for driver use */
- __REQ_DRV,
- __REQ_SWAP, /* swapping request. */
__REQ_NR_BITS, /* stops here */
};
-#define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV)
-#define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT)
-#define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER)
-#define REQ_SYNC (1ULL << __REQ_SYNC)
-#define REQ_META (1ULL << __REQ_META)
-#define REQ_PRIO (1ULL << __REQ_PRIO)
-#define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
-#define REQ_IDLE (1ULL << __REQ_IDLE)
-#define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY)
-#define REQ_FUA (1ULL << __REQ_FUA)
-#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
-#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
-#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
-#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
-#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT)
-
-#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
-#define REQ_HIPRI (1ULL << __REQ_HIPRI)
-
-#define REQ_DRV (1ULL << __REQ_DRV)
-#define REQ_SWAP (1ULL << __REQ_SWAP)
+#define REQ_FAILFAST_DEV \
+ (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DEV)
+#define REQ_FAILFAST_TRANSPORT \
+ (__force blk_opf_t)(1ULL << __REQ_FAILFAST_TRANSPORT)
+#define REQ_FAILFAST_DRIVER \
+ (__force blk_opf_t)(1ULL << __REQ_FAILFAST_DRIVER)
+#define REQ_SYNC (__force blk_opf_t)(1ULL << __REQ_SYNC)
+#define REQ_META (__force blk_opf_t)(1ULL << __REQ_META)
+#define REQ_PRIO (__force blk_opf_t)(1ULL << __REQ_PRIO)
+#define REQ_NOMERGE (__force blk_opf_t)(1ULL << __REQ_NOMERGE)
+#define REQ_IDLE (__force blk_opf_t)(1ULL << __REQ_IDLE)
+#define REQ_INTEGRITY (__force blk_opf_t)(1ULL << __REQ_INTEGRITY)
+#define REQ_FUA (__force blk_opf_t)(1ULL << __REQ_FUA)
+#define REQ_PREFLUSH (__force blk_opf_t)(1ULL << __REQ_PREFLUSH)
+#define REQ_RAHEAD (__force blk_opf_t)(1ULL << __REQ_RAHEAD)
+#define REQ_BACKGROUND (__force blk_opf_t)(1ULL << __REQ_BACKGROUND)
+#define REQ_NOWAIT (__force blk_opf_t)(1ULL << __REQ_NOWAIT)
+#define REQ_POLLED (__force blk_opf_t)(1ULL << __REQ_POLLED)
+#define REQ_ALLOC_CACHE (__force blk_opf_t)(1ULL << __REQ_ALLOC_CACHE)
+#define REQ_SWAP (__force blk_opf_t)(1ULL << __REQ_SWAP)
+#define REQ_DRV (__force blk_opf_t)(1ULL << __REQ_DRV)
+#define REQ_FS_PRIVATE (__force blk_opf_t)(1ULL << __REQ_FS_PRIVATE)
+#define REQ_ATOMIC (__force blk_opf_t)(1ULL << __REQ_ATOMIC)
+
+#define REQ_NOUNMAP (__force blk_opf_t)(1ULL << __REQ_NOUNMAP)
#define REQ_FAILFAST_MASK \
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
@@ -437,28 +443,21 @@ enum stat_group {
NR_STAT_GROUPS
};
-#define bio_op(bio) \
- ((bio)->bi_opf & REQ_OP_MASK)
-#define req_op(req) \
- ((req)->cmd_flags & REQ_OP_MASK)
-
-/* obsolete, don't use in new code */
-static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
- unsigned op_flags)
+static inline enum req_op bio_op(const struct bio *bio)
{
- bio->bi_opf = op | op_flags;
+ return bio->bi_opf & REQ_OP_MASK;
}
-static inline bool op_is_write(unsigned int op)
+static inline bool op_is_write(blk_opf_t op)
{
- return (op & 1);
+ return !!(op & (__force blk_opf_t)1);
}
/*
* Check if the bio or request is one that needs special treatment in the
* flush state machine.
*/
-static inline bool op_is_flush(unsigned int op)
+static inline bool op_is_flush(blk_opf_t op)
{
return op & (REQ_FUA | REQ_PREFLUSH);
}
@@ -468,27 +467,25 @@ static inline bool op_is_flush(unsigned int op)
* PREFLUSH flag. Other operations may be marked as synchronous using the
* REQ_SYNC flag.
*/
-static inline bool op_is_sync(unsigned int op)
+static inline bool op_is_sync(blk_opf_t op)
{
return (op & REQ_OP_MASK) == REQ_OP_READ ||
(op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
}
-static inline bool op_is_discard(unsigned int op)
+static inline bool op_is_discard(blk_opf_t op)
{
return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
}
/*
- * Check if a bio or request operation is a zone management operation, with
- * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case
- * due to its different handling in the block layer and device response in
- * case of command failure.
+ * Check if a bio or request operation is a zone management operation.
*/
-static inline bool op_is_zone_mgmt(enum req_opf op)
+static inline bool op_is_zone_mgmt(enum req_op op)
{
switch (op & REQ_OP_MASK) {
case REQ_OP_ZONE_RESET:
+ case REQ_OP_ZONE_RESET_ALL:
case REQ_OP_ZONE_OPEN:
case REQ_OP_ZONE_CLOSE:
case REQ_OP_ZONE_FINISH:
@@ -498,38 +495,13 @@ static inline bool op_is_zone_mgmt(enum req_opf op)
}
}
-static inline int op_stat_group(unsigned int op)
+static inline int op_stat_group(enum req_op op)
{
if (op_is_discard(op))
return STAT_DISCARD;
return op_is_write(op);
}
-typedef unsigned int blk_qc_t;
-#define BLK_QC_T_NONE -1U
-#define BLK_QC_T_SHIFT 16
-#define BLK_QC_T_INTERNAL (1U << 31)
-
-static inline bool blk_qc_t_valid(blk_qc_t cookie)
-{
- return cookie != BLK_QC_T_NONE;
-}
-
-static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
-{
- return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
-}
-
-static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
-{
- return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
-}
-
-static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
-{
- return (cookie & BLK_QC_T_INTERNAL) != 0;
-}
-
struct blk_rq_stat {
u64 mean;
u64 min;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index f69c75bd6d27..72e34acd439c 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1,332 +1,376 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Portions Copyright (C) 1992 Drew Eckhardt
+ */
#ifndef _LINUX_BLKDEV_H
#define _LINUX_BLKDEV_H
-#include <linux/sched.h>
-#include <linux/sched/clock.h>
-#include <linux/major.h>
-#include <linux/genhd.h>
+#include <linux/types.h>
+#include <linux/blk_types.h>
+#include <linux/device.h>
#include <linux/list.h>
#include <linux/llist.h>
#include <linux/minmax.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
-#include <linux/backing-dev-defs.h>
#include <linux/wait.h>
-#include <linux/mempool.h>
-#include <linux/pfn.h>
#include <linux/bio.h>
-#include <linux/stringify.h>
#include <linux/gfp.h>
-#include <linux/bsg.h>
-#include <linux/smp.h>
+#include <linux/kdev_t.h>
#include <linux/rcupdate.h>
#include <linux/percpu-refcount.h>
-#include <linux/scatterlist.h>
#include <linux/blkzoned.h>
-#include <linux/pm.h>
+#include <linux/sched.h>
+#include <linux/sbitmap.h>
+#include <linux/uuid.h>
+#include <linux/xarray.h>
+#include <linux/file.h>
+#include <linux/lockdep.h>
struct module;
-struct scsi_ioctl_command;
-
struct request_queue;
struct elevator_queue;
struct blk_trace;
struct request;
struct sg_io_hdr;
-struct bsg_job;
struct blkcg_gq;
struct blk_flush_queue;
+struct kiocb;
struct pr_ops;
struct rq_qos;
+struct blk_report_zones_args;
struct blk_queue_stats;
struct blk_stat_callback;
-struct blk_keyslot_manager;
-
-#define BLKDEV_MIN_RQ 4
-#define BLKDEV_MAX_RQ 128 /* Default maximum */
+struct blk_crypto_profile;
-/* Must be consistent with blk_mq_poll_stats_bkt() */
-#define BLK_MQ_POLL_STATS_BKTS 16
-
-/* Doing classic polling */
-#define BLK_MQ_POLL_CLASSIC -1
+extern const struct device_type disk_type;
+extern const struct device_type part_type;
+extern const struct class block_class;
/*
* Maximum number of blkcg policies allowed to be registered concurrently.
* Defined here to simplify include dependency.
*/
-#define BLKCG_MAX_POLS 5
-
-typedef void (rq_end_io_fn)(struct request *, blk_status_t);
+#define BLKCG_MAX_POLS 6
-/*
- * request flags */
-typedef __u32 __bitwise req_flags_t;
-
-/* drive already may have started this one */
-#define RQF_STARTED ((__force req_flags_t)(1 << 1))
-/* may not be passed by ioscheduler */
-#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3))
-/* request for flush sequence */
-#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4))
-/* merge of different types, fail separately */
-#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5))
-/* track inflight for MQ */
-#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6))
-/* don't call prep for this one */
-#define RQF_DONTPREP ((__force req_flags_t)(1 << 7))
-/* vaguely specified driver internal error. Ignored by the block layer */
-#define RQF_FAILED ((__force req_flags_t)(1 << 10))
-/* don't warn about errors */
-#define RQF_QUIET ((__force req_flags_t)(1 << 11))
-/* elevator private data attached */
-#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12))
-/* account into disk and partition IO statistics */
-#define RQF_IO_STAT ((__force req_flags_t)(1 << 13))
-/* runtime pm request */
-#define RQF_PM ((__force req_flags_t)(1 << 15))
-/* on IO scheduler merge hash */
-#define RQF_HASHED ((__force req_flags_t)(1 << 16))
-/* track IO completion time */
-#define RQF_STATS ((__force req_flags_t)(1 << 17))
-/* Look at ->special_vec for the actual data payload instead of the
- bio chain. */
-#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
-/* The per-zone write lock is held for this request */
-#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
-/* already slept for hybrid poll */
-#define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 20))
-/* ->timeout has been called, don't expire again */
-#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21))
-
-/* flags that prevent us from merging requests: */
-#define RQF_NOMERGE_FLAGS \
- (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
+#define DISK_MAX_PARTS 256
+#define DISK_NAME_LEN 32
+#define PARTITION_META_INFO_VOLNAMELTH 64
/*
- * Request state for blk-mq.
+ * Enough for the string representation of any kind of UUID plus NULL.
+ * EFI UUID is 36 characters. MSDOS UUID is 11 characters.
*/
-enum mq_rq_state {
- MQ_RQ_IDLE = 0,
- MQ_RQ_IN_FLIGHT = 1,
- MQ_RQ_COMPLETE = 2,
+#define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1)
+
+struct partition_meta_info {
+ char uuid[PARTITION_META_INFO_UUIDLTH];
+ u8 volname[PARTITION_META_INFO_VOLNAMELTH];
};
-/*
- * Try to put the fields that are referenced together in the same cacheline.
+/**
+ * DOC: genhd capability flags
+ *
+ * ``GENHD_FL_REMOVABLE``: indicates that the block device gives access to
+ * removable media. When set, the device remains present even when media is not
+ * inserted. Shall not be set for devices which are removed entirely when the
+ * media is removed.
+ *
+ * ``GENHD_FL_HIDDEN``: the block device is hidden; it doesn't produce events,
+ * doesn't appear in sysfs, and can't be opened from userspace or using
+ * blkdev_get*. Used for the underlying components of multipath devices.
+ *
+ * ``GENHD_FL_NO_PART``: partition support is disabled. The kernel will not
+ * scan for partitions from add_disk, and users can't add partitions manually.
*
- * If you modify this structure, make sure to update blk_rq_init() and
- * especially blk_mq_rq_ctx_init() to take care of the added fields.
*/
-struct request {
- struct request_queue *q;
- struct blk_mq_ctx *mq_ctx;
- struct blk_mq_hw_ctx *mq_hctx;
-
- unsigned int cmd_flags; /* op and common flags */
- req_flags_t rq_flags;
-
- int tag;
- int internal_tag;
+enum {
+ GENHD_FL_REMOVABLE = 1 << 0,
+ GENHD_FL_HIDDEN = 1 << 1,
+ GENHD_FL_NO_PART = 1 << 2,
+};
- /* the following two fields are internal, NEVER access directly */
- unsigned int __data_len; /* total data len */
- sector_t __sector; /* sector cursor */
+enum {
+ DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */
+ DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */
+};
- struct bio *bio;
- struct bio *biotail;
+enum {
+ /* Poll even if events_poll_msecs is unset */
+ DISK_EVENT_FLAG_POLL = 1 << 0,
+ /* Forward events to udev */
+ DISK_EVENT_FLAG_UEVENT = 1 << 1,
+ /* Block event polling when open for exclusive write */
+ DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE = 1 << 2,
+};
- struct list_head queuelist;
+struct disk_events;
+struct badblocks;
+
+enum blk_integrity_checksum {
+ BLK_INTEGRITY_CSUM_NONE = 0,
+ BLK_INTEGRITY_CSUM_IP = 1,
+ BLK_INTEGRITY_CSUM_CRC = 2,
+ BLK_INTEGRITY_CSUM_CRC64 = 3,
+} __packed ;
+
+struct blk_integrity {
+ unsigned char flags;
+ enum blk_integrity_checksum csum_type;
+ unsigned char metadata_size;
+ unsigned char pi_offset;
+ unsigned char interval_exp;
+ unsigned char tag_size;
+ unsigned char pi_tuple_size;
+};
+typedef unsigned int __bitwise blk_mode_t;
+
+/* open for reading */
+#define BLK_OPEN_READ ((__force blk_mode_t)(1 << 0))
+/* open for writing */
+#define BLK_OPEN_WRITE ((__force blk_mode_t)(1 << 1))
+/* open exclusively (vs other exclusive openers */
+#define BLK_OPEN_EXCL ((__force blk_mode_t)(1 << 2))
+/* opened with O_NDELAY */
+#define BLK_OPEN_NDELAY ((__force blk_mode_t)(1 << 3))
+/* open for "writes" only for ioctls (specialy hack for floppy.c) */
+#define BLK_OPEN_WRITE_IOCTL ((__force blk_mode_t)(1 << 4))
+/* open is exclusive wrt all other BLK_OPEN_WRITE opens to the device */
+#define BLK_OPEN_RESTRICT_WRITES ((__force blk_mode_t)(1 << 5))
+/* return partition scanning errors */
+#define BLK_OPEN_STRICT_SCAN ((__force blk_mode_t)(1 << 6))
+
+struct gendisk {
/*
- * The hash is used inside the scheduler, and killed once the
- * request reaches the dispatch list. The ipi_list is only used
- * to queue the request for softirq completion, which is long
- * after the request has been unhashed (and even removed from
- * the dispatch list).
+ * major/first_minor/minors should not be set by any new driver, the
+ * block core will take care of allocating them automatically.
*/
- union {
- struct hlist_node hash; /* merge hash */
- struct llist_node ipi_list;
- };
+ int major;
+ int first_minor;
+ int minors;
+
+ char disk_name[DISK_NAME_LEN]; /* name of major driver */
+
+ unsigned short events; /* supported events */
+ unsigned short event_flags; /* flags related to event processing */
+
+ struct xarray part_tbl;
+ struct block_device *part0;
+
+ const struct block_device_operations *fops;
+ struct request_queue *queue;
+ void *private_data;
+
+ struct bio_set bio_split;
+
+ int flags;
+ unsigned long state;
+#define GD_NEED_PART_SCAN 0
+#define GD_READ_ONLY 1
+#define GD_DEAD 2
+#define GD_NATIVE_CAPACITY 3
+#define GD_ADDED 4
+#define GD_SUPPRESS_PART_SCAN 5
+#define GD_OWNS_QUEUE 6
+#define GD_ZONE_APPEND_USED 7
+
+ struct mutex open_mutex; /* open/close mutex */
+ unsigned open_partitions; /* number of open partitions */
+
+ struct backing_dev_info *bdi;
+ struct kobject queue_kobj; /* the queue/ directory */
+ struct kobject *slave_dir;
+#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
+ struct list_head slave_bdevs;
+#endif
+ struct timer_rand_state *random;
+ struct disk_events *ev;
+#ifdef CONFIG_BLK_DEV_ZONED
/*
- * The rb_node is only used inside the io scheduler, requests
- * are pruned when moved to the dispatch queue. So let the
- * completion_data share space with the rb_node.
+ * Zoned block device information. Reads of this information must be
+ * protected with blk_queue_enter() / blk_queue_exit(). Modifying this
+ * information is only allowed while no requests are being processed.
+ * See also blk_mq_freeze_queue() and blk_mq_unfreeze_queue().
*/
- union {
- struct rb_node rb_node; /* sort/lookup */
- struct bio_vec special_vec;
- void *completion_data;
- int error_count; /* for legacy drivers, don't use */
- };
+ unsigned int nr_zones;
+ unsigned int zone_capacity;
+ unsigned int last_zone_capacity;
+ u8 __rcu *zones_cond;
+ unsigned int zone_wplugs_hash_bits;
+ atomic_t nr_zone_wplugs;
+ spinlock_t zone_wplugs_lock;
+ struct mempool *zone_wplugs_pool;
+ struct hlist_head *zone_wplugs_hash;
+ struct workqueue_struct *zone_wplugs_wq;
+#endif /* CONFIG_BLK_DEV_ZONED */
- /*
- * Three pointers are available for the IO schedulers, if they need
- * more they have to dynamically allocate it. Flush requests are
- * never put on the IO scheduler. So let the flush fields share
- * space with the elevator data.
- */
- union {
- struct {
- struct io_cq *icq;
- void *priv[2];
- } elv;
-
- struct {
- unsigned int seq;
- struct list_head list;
- rq_end_io_fn *saved_end_io;
- } flush;
- };
-
- struct gendisk *rq_disk;
- struct block_device *part;
-#ifdef CONFIG_BLK_RQ_ALLOC_TIME
- /* Time that the first bio started allocating this request. */
- u64 alloc_time_ns;
+#if IS_ENABLED(CONFIG_CDROM)
+ struct cdrom_device_info *cdi;
#endif
- /* Time that this request was allocated for this IO. */
- u64 start_time_ns;
- /* Time that I/O was submitted to the device. */
- u64 io_start_time_ns;
+ int node_id;
+ struct badblocks *bb;
+ struct lockdep_map lockdep_map;
+ u64 diskseq;
+ blk_mode_t open_mode;
-#ifdef CONFIG_BLK_WBT
- unsigned short wbt_flags;
-#endif
/*
- * rq sectors used for blk stats. It has the same value
- * with blk_rq_sectors(rq), except that it never be zeroed
- * by completion.
+ * Independent sector access ranges. This is always NULL for
+ * devices that do not have multiple independent access ranges.
*/
- unsigned short stats_sectors;
+ struct blk_independent_access_ranges *ia_ranges;
- /*
- * Number of scatter-gather DMA addr+len pairs after
- * physical address coalescing is performed.
- */
- unsigned short nr_phys_segments;
+ struct mutex rqos_state_mutex; /* rqos state change mutex */
+};
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
- unsigned short nr_integrity_segments;
-#endif
+/**
+ * disk_openers - returns how many openers are there for a disk
+ * @disk: disk to check
+ *
+ * This returns the number of openers for a disk. Note that this value is only
+ * stable if disk->open_mutex is held.
+ *
+ * Note: Due to a quirk in the block layer open code, each open partition is
+ * only counted once even if there are multiple openers.
+ */
+static inline unsigned int disk_openers(struct gendisk *disk)
+{
+ return atomic_read(&disk->part0->bd_openers);
+}
-#ifdef CONFIG_BLK_INLINE_ENCRYPTION
- struct bio_crypt_ctx *crypt_ctx;
- struct blk_ksm_keyslot *crypt_keyslot;
-#endif
+/**
+ * disk_has_partscan - return %true if partition scanning is enabled on a disk
+ * @disk: disk to check
+ *
+ * Returns %true if partitions scanning is enabled for @disk, or %false if
+ * partition scanning is disabled either permanently or temporarily.
+ */
+static inline bool disk_has_partscan(struct gendisk *disk)
+{
+ return !(disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN)) &&
+ !test_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
+}
- unsigned short write_hint;
- unsigned short ioprio;
+/*
+ * The gendisk is refcounted by the part0 block_device, and the bd_device
+ * therein is also used for device model presentation in sysfs.
+ */
+#define dev_to_disk(device) \
+ (dev_to_bdev(device)->bd_disk)
+#define disk_to_dev(disk) \
+ (&((disk)->part0->bd_device))
- enum mq_rq_state state;
- refcount_t ref;
+#if IS_REACHABLE(CONFIG_CDROM)
+#define disk_to_cdi(disk) ((disk)->cdi)
+#else
+#define disk_to_cdi(disk) NULL
+#endif
- unsigned int timeout;
- unsigned long deadline;
+static inline dev_t disk_devt(struct gendisk *disk)
+{
+ return MKDEV(disk->major, disk->first_minor);
+}
- union {
- struct __call_single_data csd;
- u64 fifo_time;
- };
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/*
+ * We should strive for 1 << (PAGE_SHIFT + MAX_PAGECACHE_ORDER)
+ * however we constrain this to what we can validate and test.
+ */
+#define BLK_MAX_BLOCK_SIZE SZ_64K
+#else
+#define BLK_MAX_BLOCK_SIZE PAGE_SIZE
+#endif
- /*
- * completion callback.
- */
- rq_end_io_fn *end_io;
- void *end_io_data;
-};
-static inline bool blk_op_is_scsi(unsigned int op)
+/* blk_validate_limits() validates bsize, so drivers don't usually need to */
+static inline int blk_validate_block_size(unsigned long bsize)
{
- return op == REQ_OP_SCSI_IN || op == REQ_OP_SCSI_OUT;
+ if (bsize < 512 || bsize > BLK_MAX_BLOCK_SIZE || !is_power_of_2(bsize))
+ return -EINVAL;
+
+ return 0;
}
-static inline bool blk_op_is_private(unsigned int op)
+static inline bool blk_op_is_passthrough(blk_opf_t op)
{
+ op &= REQ_OP_MASK;
return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
}
-static inline bool blk_rq_is_scsi(struct request *rq)
-{
- return blk_op_is_scsi(req_op(rq));
-}
+/* flags set by the driver in queue_limits.features */
+typedef unsigned int __bitwise blk_features_t;
-static inline bool blk_rq_is_private(struct request *rq)
-{
- return blk_op_is_private(req_op(rq));
-}
+/* supports a volatile write cache */
+#define BLK_FEAT_WRITE_CACHE ((__force blk_features_t)(1u << 0))
-static inline bool blk_rq_is_passthrough(struct request *rq)
-{
- return blk_rq_is_scsi(rq) || blk_rq_is_private(rq);
-}
+/* supports passing on the FUA bit */
+#define BLK_FEAT_FUA ((__force blk_features_t)(1u << 1))
-static inline bool bio_is_passthrough(struct bio *bio)
-{
- unsigned op = bio_op(bio);
+/* rotational device (hard drive or floppy) */
+#define BLK_FEAT_ROTATIONAL ((__force blk_features_t)(1u << 2))
- return blk_op_is_scsi(op) || blk_op_is_private(op);
-}
+/* contributes to the random number pool */
+#define BLK_FEAT_ADD_RANDOM ((__force blk_features_t)(1u << 3))
-static inline bool blk_op_is_passthrough(unsigned int op)
-{
- return (blk_op_is_scsi(op & REQ_OP_MASK) ||
- blk_op_is_private(op & REQ_OP_MASK));
-}
+/* do disk/partitions IO accounting */
+#define BLK_FEAT_IO_STAT ((__force blk_features_t)(1u << 4))
-static inline unsigned short req_get_ioprio(struct request *req)
-{
- return req->ioprio;
-}
+/* don't modify data until writeback is done */
+#define BLK_FEAT_STABLE_WRITES ((__force blk_features_t)(1u << 5))
-#include <linux/elevator.h>
+/* always completes in submit context */
+#define BLK_FEAT_SYNCHRONOUS ((__force blk_features_t)(1u << 6))
-struct blk_queue_ctx;
+/* supports REQ_NOWAIT */
+#define BLK_FEAT_NOWAIT ((__force blk_features_t)(1u << 7))
-struct bio_vec;
+/* supports DAX */
+#define BLK_FEAT_DAX ((__force blk_features_t)(1u << 8))
-enum blk_eh_timer_return {
- BLK_EH_DONE, /* drivers has completed the command */
- BLK_EH_RESET_TIMER, /* reset timer and try again */
-};
+/* supports I/O polling */
+#define BLK_FEAT_POLL ((__force blk_features_t)(1u << 9))
-enum blk_queue_state {
- Queue_down,
- Queue_up,
-};
+/* is a zoned device */
+#define BLK_FEAT_ZONED ((__force blk_features_t)(1u << 10))
-#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
-#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
+/* supports PCI(e) p2p requests */
+#define BLK_FEAT_PCI_P2PDMA ((__force blk_features_t)(1u << 12))
-#define BLK_SCSI_MAX_CMDS (256)
-#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
+/* skip this queue in blk_mq_(un)quiesce_tagset */
+#define BLK_FEAT_SKIP_TAGSET_QUIESCE ((__force blk_features_t)(1u << 13))
-/*
- * Zoned block device models (zoned limit).
- *
- * Note: This needs to be ordered from the least to the most severe
- * restrictions for the inheritance in blk_stack_limits() to work.
- */
-enum blk_zoned_model {
- BLK_ZONED_NONE = 0, /* Regular block device */
- BLK_ZONED_HA, /* Host-aware zoned block device */
- BLK_ZONED_HM, /* Host-managed zoned block device */
-};
+/* undocumented magic for bcache */
+#define BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE \
+ ((__force blk_features_t)(1u << 15))
+
+/* atomic writes enabled */
+#define BLK_FEAT_ATOMIC_WRITES \
+ ((__force blk_features_t)(1u << 16))
/*
- * BLK_BOUNCE_NONE: never bounce (default)
- * BLK_BOUNCE_HIGH: bounce all highmem pages
+ * Flags automatically inherited when stacking limits.
*/
-enum blk_bounce {
- BLK_BOUNCE_NONE,
- BLK_BOUNCE_HIGH,
-};
+#define BLK_FEAT_INHERIT_MASK \
+ (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_ROTATIONAL | \
+ BLK_FEAT_STABLE_WRITES | BLK_FEAT_ZONED | \
+ BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE)
+
+/* internal flags in queue_limits.flags */
+typedef unsigned int __bitwise blk_flags_t;
+
+/* do not send FLUSH/FUA commands despite advertising a write cache */
+#define BLK_FLAG_WRITE_CACHE_DISABLED ((__force blk_flags_t)(1u << 0))
+
+/* I/O topology is misaligned */
+#define BLK_FLAG_MISALIGNED ((__force blk_flags_t)(1u << 1))
+
+/* passthrough command IO accounting */
+#define BLK_FLAG_IOSTATS_PASSTHROUGH ((__force blk_flags_t)(1u << 2))
struct queue_limits {
- enum blk_bounce bounce;
+ blk_features_t features;
+ blk_flags_t flags;
unsigned long seg_boundary_mask;
unsigned long virt_boundary_mask;
@@ -334,7 +378,9 @@ struct queue_limits {
unsigned int max_dev_sectors;
unsigned int chunk_sectors;
unsigned int max_sectors;
+ unsigned int max_user_sectors;
unsigned int max_segment_size;
+ unsigned int max_fast_segment_size;
unsigned int physical_block_size;
unsigned int logical_block_size;
unsigned int alignment_offset;
@@ -342,127 +388,144 @@ struct queue_limits {
unsigned int io_opt;
unsigned int max_discard_sectors;
unsigned int max_hw_discard_sectors;
- unsigned int max_write_same_sectors;
+ unsigned int max_user_discard_sectors;
+ unsigned int max_secure_erase_sectors;
unsigned int max_write_zeroes_sectors;
+ unsigned int max_wzeroes_unmap_sectors;
+ unsigned int max_hw_wzeroes_unmap_sectors;
+ unsigned int max_user_wzeroes_unmap_sectors;
+ unsigned int max_hw_zone_append_sectors;
unsigned int max_zone_append_sectors;
unsigned int discard_granularity;
unsigned int discard_alignment;
unsigned int zone_write_granularity;
+ /* atomic write limits */
+ unsigned int atomic_write_hw_max;
+ unsigned int atomic_write_max_sectors;
+ unsigned int atomic_write_hw_boundary;
+ unsigned int atomic_write_boundary_sectors;
+ unsigned int atomic_write_hw_unit_min;
+ unsigned int atomic_write_unit_min;
+ unsigned int atomic_write_hw_unit_max;
+ unsigned int atomic_write_unit_max;
+
unsigned short max_segments;
unsigned short max_integrity_segments;
unsigned short max_discard_segments;
- unsigned char misaligned;
- unsigned char discard_misaligned;
- unsigned char raid_partial_stripes_expensive;
- enum blk_zoned_model zoned;
+ unsigned short max_write_streams;
+ unsigned int write_stream_granularity;
+
+ unsigned int max_open_zones;
+ unsigned int max_active_zones;
+
+ /*
+ * Drivers that set dma_alignment to less than 511 must be prepared to
+ * handle individual bvec's that are not a multiple of a SECTOR_SIZE
+ * due to possible offsets.
+ */
+ unsigned int dma_alignment;
+ unsigned int dma_pad_mask;
+
+ struct blk_integrity integrity;
};
typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
void *data);
-void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model);
+int disk_report_zone(struct gendisk *disk, struct blk_zone *zone,
+ unsigned int idx, struct blk_report_zones_args *args);
-#ifdef CONFIG_BLK_DEV_ZONED
+int blkdev_get_zone_info(struct block_device *bdev, sector_t sector,
+ struct blk_zone *zone);
#define BLK_ALL_ZONES ((unsigned int)-1)
int blkdev_report_zones(struct block_device *bdev, sector_t sector,
- unsigned int nr_zones, report_zones_cb cb, void *data);
-unsigned int blkdev_nr_zones(struct gendisk *disk);
-extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
- sector_t sectors, sector_t nr_sectors,
- gfp_t gfp_mask);
-int blk_revalidate_disk_zones(struct gendisk *disk,
- void (*update_driver_data)(struct gendisk *disk));
-
-extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg);
-extern int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg);
-
-#else /* CONFIG_BLK_DEV_ZONED */
-
-static inline unsigned int blkdev_nr_zones(struct gendisk *disk)
-{
- return 0;
-}
+ unsigned int nr_zones, report_zones_cb cb, void *data);
+int blkdev_report_zones_cached(struct block_device *bdev, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data);
+int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
+ sector_t sectors, sector_t nr_sectors);
+int blk_revalidate_disk_zones(struct gendisk *disk);
-static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
- fmode_t mode, unsigned int cmd,
- unsigned long arg)
-{
- return -ENOTTY;
-}
-
-static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
- fmode_t mode, unsigned int cmd,
- unsigned long arg)
-{
- return -ENOTTY;
-}
+/*
+ * Independent access ranges: struct blk_independent_access_range describes
+ * a range of contiguous sectors that can be accessed using device command
+ * execution resources that are independent from the resources used for
+ * other access ranges. This is typically found with single-LUN multi-actuator
+ * HDDs where each access range is served by a different set of heads.
+ * The set of independent ranges supported by the device is defined using
+ * struct blk_independent_access_ranges. The independent ranges must not overlap
+ * and must include all sectors within the disk capacity (no sector holes
+ * allowed).
+ * For a device with multiple ranges, requests targeting sectors in different
+ * ranges can be executed in parallel. A request can straddle an access range
+ * boundary.
+ */
+struct blk_independent_access_range {
+ struct kobject kobj;
+ sector_t sector;
+ sector_t nr_sectors;
+};
-#endif /* CONFIG_BLK_DEV_ZONED */
+struct blk_independent_access_ranges {
+ struct kobject kobj;
+ bool sysfs_registered;
+ unsigned int nr_ia_ranges;
+ struct blk_independent_access_range ia_range[];
+};
struct request_queue {
- struct request *last_merge;
- struct elevator_queue *elevator;
-
- struct percpu_ref q_usage_counter;
+ /*
+ * The queue owner gets to use this for whatever they like.
+ * ll_rw_blk doesn't touch it.
+ */
+ void *queuedata;
- struct blk_queue_stats *stats;
- struct rq_qos *rq_qos;
+ struct elevator_queue *elevator;
const struct blk_mq_ops *mq_ops;
/* sw queues */
struct blk_mq_ctx __percpu *queue_ctx;
+ /*
+ * various queue flags, see QUEUE_* below
+ */
+ unsigned long queue_flags;
+
+ unsigned int __data_racy rq_timeout;
+
unsigned int queue_depth;
+ refcount_t refs;
+
/* hw dispatch queues */
- struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
+ struct blk_mq_hw_ctx * __rcu *queue_hw_ctx;
- struct backing_dev_info *backing_dev_info;
+ struct percpu_ref q_usage_counter;
+ struct lock_class_key io_lock_cls_key;
+ struct lockdep_map io_lockdep_map;
- /*
- * The queue owner gets to use this for whatever they like.
- * ll_rw_blk doesn't touch it.
- */
- void *queuedata;
+ struct lock_class_key q_lock_cls_key;
+ struct lockdep_map q_lockdep_map;
- /*
- * various queue flags, see QUEUE_* below
- */
- unsigned long queue_flags;
- /*
- * Number of contexts that have called blk_set_pm_only(). If this
- * counter is above zero then only RQF_PM requests are processed.
- */
- atomic_t pm_only;
-
- /*
- * ida allocated id for this queue. Used to index queues from
- * ioctx.
- */
- int id;
+ struct request *last_merge;
spinlock_t queue_lock;
- /*
- * queue kobject
- */
- struct kobject kobj;
+ int quiesce_depth;
+
+ struct gendisk *disk;
/*
* mq queue kobject
*/
struct kobject *mq_kobj;
-#ifdef CONFIG_BLK_DEV_INTEGRITY
- struct blk_integrity integrity;
-#endif /* CONFIG_BLK_DEV_INTEGRITY */
+ struct queue_limits limits;
#ifdef CONFIG_PM
struct device *dev;
@@ -470,72 +533,52 @@ struct request_queue {
#endif
/*
+ * Number of contexts that have called blk_set_pm_only(). If this
+ * counter is above zero then only RQF_PM requests are processed.
+ */
+ atomic_t pm_only;
+
+ struct blk_queue_stats *stats;
+ struct rq_qos *rq_qos;
+ struct mutex rq_qos_mutex;
+
+ /*
+ * ida allocated id for this queue. Used to index queues from
+ * ioctx.
+ */
+ int id;
+
+ /*
* queue settings
*/
unsigned long nr_requests; /* Max # of requests */
- unsigned int dma_pad_mask;
- unsigned int dma_alignment;
-
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
- /* Inline crypto capabilities */
- struct blk_keyslot_manager *ksm;
+ struct blk_crypto_profile *crypto_profile;
+ struct kobject *crypto_kobject;
#endif
- unsigned int rq_timeout;
- int poll_nsec;
-
- struct blk_stat_callback *poll_cb;
- struct blk_rq_stat poll_stat[BLK_MQ_POLL_STATS_BKTS];
-
struct timer_list timeout;
struct work_struct timeout_work;
- atomic_t nr_active_requests_shared_sbitmap;
+ atomic_t nr_active_requests_shared_tags;
+
+ struct blk_mq_tags *sched_shared_tags;
struct list_head icq_list;
#ifdef CONFIG_BLK_CGROUP
DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
struct blkcg_gq *root_blkg;
struct list_head blkg_list;
+ struct mutex blkcg_mutex;
#endif
- struct queue_limits limits;
-
- unsigned int required_elevator_features;
+ int node;
-#ifdef CONFIG_BLK_DEV_ZONED
- /*
- * Zoned block device information for request dispatch control.
- * nr_zones is the total number of zones of the device. This is always
- * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones
- * bits which indicates if a zone is conventional (bit set) or
- * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones
- * bits which indicates if a zone is write locked, that is, if a write
- * request targeting the zone was dispatched. All three fields are
- * initialized by the low level device driver (e.g. scsi/sd.c).
- * Stacking drivers (device mappers) may or may not initialize
- * these fields.
- *
- * Reads of this information must be protected with blk_queue_enter() /
- * blk_queue_exit(). Modifying this information is only allowed while
- * no requests are being processed. See also blk_mq_freeze_queue() and
- * blk_mq_unfreeze_queue().
- */
- unsigned int nr_zones;
- unsigned long *conv_zones_bitmap;
- unsigned long *seq_zones_wlock;
- unsigned int max_open_zones;
- unsigned int max_active_zones;
-#endif /* CONFIG_BLK_DEV_ZONED */
+ spinlock_t requeue_lock;
+ struct list_head requeue_list;
+ struct delayed_work requeue_work;
- /*
- * sg stuff
- */
- unsigned int sg_timeout;
- unsigned int sg_reserved_size;
- int node;
- struct mutex debugfs_mutex;
#ifdef CONFIG_BLK_DEV_IO_TRACE
struct blk_trace __rcu *blk_trace;
#endif
@@ -543,13 +586,25 @@ struct request_queue {
* for flush operations
*/
struct blk_flush_queue *fq;
+ struct list_head flush_list;
- struct list_head requeue_list;
- spinlock_t requeue_lock;
- struct delayed_work requeue_work;
+ /*
+ * Protects against I/O scheduler switching, particularly when updating
+ * q->elevator. Since the elevator update code path may also modify q->
+ * nr_requests and wbt latency, this lock also protects the sysfs attrs
+ * nr_requests and wbt_lat_usec. Additionally the nr_hw_queues update
+ * may modify hctx tags, reserved-tags and cpumask, so this lock also
+ * helps protect the hctx sysfs/debugfs attrs. To ensure proper locking
+ * order during an elevator or nr_hw_queue update, first freeze the
+ * queue, then acquire ->elevator_lock.
+ */
+ struct mutex elevator_lock;
struct mutex sysfs_lock;
- struct mutex sysfs_dir_lock;
+ /*
+ * Protects queue limits and also sysfs attribute read_ahead_kb.
+ */
+ struct mutex limits_lock;
/*
* for reusing dead hctx instance in case of updating
@@ -560,15 +615,21 @@ struct request_queue {
int mq_freeze_depth;
-#if defined(CONFIG_BLK_DEV_BSG)
- struct bsg_class_device bsg_dev;
-#endif
-
#ifdef CONFIG_BLK_DEV_THROTTLING
/* Throttle data */
struct throtl_data *td;
#endif
struct rcu_head rcu_head;
+#ifdef CONFIG_LOCKDEP
+ struct task_struct *mq_freeze_owner;
+ int mq_freeze_owner_depth;
+ /*
+ * Records disk & queue state in current context, used in unfreeze
+ * queue
+ */
+ bool mq_freeze_disk_dead;
+ bool mq_freeze_queue_dying;
+#endif
wait_queue_head_t mq_freeze_wq;
/*
* Protect concurrent access to q_usage_counter by
@@ -578,85 +639,54 @@ struct request_queue {
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
- struct bio_set bio_split;
struct dentry *debugfs_dir;
-
-#ifdef CONFIG_BLK_DEBUG_FS
struct dentry *sched_debugfs_dir;
struct dentry *rqos_debugfs_dir;
-#endif
-
- bool mq_sysfs_init_done;
-
- size_t cmd_size;
-
-#define BLK_MAX_WRITE_HINTS 5
- u64 write_hints[BLK_MAX_WRITE_HINTS];
+ /*
+ * Serializes all debugfs metadata operations using the above dentries.
+ */
+ struct mutex debugfs_mutex;
};
/* Keep blk_queue_flag_name[] in sync with the definitions below */
-#define QUEUE_FLAG_STOPPED 0 /* queue is stopped */
-#define QUEUE_FLAG_DYING 1 /* queue being torn down */
-#define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */
-#define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */
-#define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */
-#define QUEUE_FLAG_NONROT 6 /* non-rotational device (SSD) */
-#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
-#define QUEUE_FLAG_IO_STAT 7 /* do disk/partitions IO accounting */
-#define QUEUE_FLAG_DISCARD 8 /* supports DISCARD */
-#define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */
-#define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */
-#define QUEUE_FLAG_SECERASE 11 /* supports secure erase */
-#define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */
-#define QUEUE_FLAG_DEAD 13 /* queue tear-down finished */
-#define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */
-#define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */
-#define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */
-#define QUEUE_FLAG_WC 17 /* Write back caching */
-#define QUEUE_FLAG_FUA 18 /* device supports FUA writes */
-#define QUEUE_FLAG_DAX 19 /* device supports DAX */
-#define QUEUE_FLAG_STATS 20 /* track IO start and completion times */
-#define QUEUE_FLAG_POLL_STATS 21 /* collecting stats for hybrid polling */
-#define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */
-#define QUEUE_FLAG_SCSI_PASSTHROUGH 23 /* queue supports SCSI commands */
-#define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */
-#define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */
-#define QUEUE_FLAG_ZONE_RESETALL 26 /* supports Zone Reset All */
-#define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */
-#define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */
-#define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */
-
-#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
- (1 << QUEUE_FLAG_SAME_COMP) | \
- (1 << QUEUE_FLAG_NOWAIT))
+enum {
+ QUEUE_FLAG_DYING, /* queue being torn down */
+ QUEUE_FLAG_NOMERGES, /* disable merge attempts */
+ QUEUE_FLAG_SAME_COMP, /* complete on same CPU-group */
+ QUEUE_FLAG_FAIL_IO, /* fake timeout */
+ QUEUE_FLAG_NOXMERGES, /* No extended merges */
+ QUEUE_FLAG_SAME_FORCE, /* force complete on same CPU */
+ QUEUE_FLAG_INIT_DONE, /* queue is initialized */
+ QUEUE_FLAG_STATS, /* track IO start and completion times */
+ QUEUE_FLAG_REGISTERED, /* queue has been registered to a disk */
+ QUEUE_FLAG_QUIESCED, /* queue has been quiesced */
+ QUEUE_FLAG_RQ_ALLOC_TIME, /* record rq->alloc_time_ns */
+ QUEUE_FLAG_HCTX_ACTIVE, /* at least one blk-mq hctx is active */
+ QUEUE_FLAG_SQ_SCHED, /* single queue style io dispatch */
+ QUEUE_FLAG_DISABLE_WBT_DEF, /* for sched to disable/enable wbt */
+ QUEUE_FLAG_NO_ELV_SWITCH, /* can't switch elevator any more */
+ QUEUE_FLAG_QOS_ENABLED, /* qos is enabled */
+ QUEUE_FLAG_BIO_ISSUE_TIME, /* record bio->issue_time_ns */
+ QUEUE_FLAG_MAX
+};
+
+#define QUEUE_FLAG_MQ_DEFAULT (1UL << QUEUE_FLAG_SAME_COMP)
void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
-bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
-#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
-#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_noxmerges(q) \
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
-#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
-#define blk_queue_stable_writes(q) \
- test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags)
-#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
-#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
-#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
-#define blk_queue_zone_resetall(q) \
- test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags)
-#define blk_queue_secure_erase(q) \
- (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
-#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
-#define blk_queue_scsi_passthrough(q) \
- test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags)
-#define blk_queue_pci_p2pdma(q) \
- test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags)
+#define blk_queue_nonrot(q) (!((q)->limits.features & BLK_FEAT_ROTATIONAL))
+#define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT)
+#define blk_queue_passthrough_stat(q) \
+ ((q)->limits.flags & BLK_FLAG_IOSTATS_PASSTHROUGH)
+#define blk_queue_dax(q) ((q)->limits.features & BLK_FEAT_DAX)
+#define blk_queue_pci_p2pdma(q) ((q)->limits.features & BLK_FEAT_PCI_P2PDMA)
#ifdef CONFIG_BLK_RQ_ALLOC_TIME
#define blk_queue_rq_alloc_time(q) \
test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags)
@@ -669,26 +699,24 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
REQ_FAILFAST_DRIVER))
#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
-#define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
-#define blk_queue_nowait(q) test_bit(QUEUE_FLAG_NOWAIT, &(q)->queue_flags)
+#define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags)
+#define blk_queue_skip_tagset_quiesce(q) \
+ ((q)->limits.features & BLK_FEAT_SKIP_TAGSET_QUIESCE)
+#define blk_queue_disable_wbt(q) \
+ test_bit(QUEUE_FLAG_DISABLE_WBT_DEF, &(q)->queue_flags)
+#define blk_queue_no_elv_switch(q) \
+ test_bit(QUEUE_FLAG_NO_ELV_SWITCH, &(q)->queue_flags)
extern void blk_set_pm_only(struct request_queue *q);
extern void blk_clear_pm_only(struct request_queue *q);
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
-#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
-
-#define rq_dma_dir(rq) \
- (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
-
#define dma_map_bvec(dev, bv, dir, attrs) \
dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \
(dir), (attrs))
-#define queue_to_disk(q) (dev_to_disk(kobj_to_dev((q)->kobj.parent)))
-
static inline bool queue_is_mq(struct request_queue *q)
{
return q->mq_ops;
@@ -706,512 +734,428 @@ static inline enum rpm_status queue_rpm_status(struct request_queue *q)
}
#endif
-static inline enum blk_zoned_model
-blk_queue_zoned_model(struct request_queue *q)
+static inline bool blk_queue_is_zoned(struct request_queue *q)
{
- if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
- return q->limits.zoned;
- return BLK_ZONED_NONE;
+ return IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
+ (q->limits.features & BLK_FEAT_ZONED);
}
-static inline bool blk_queue_is_zoned(struct request_queue *q)
+static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
{
- switch (blk_queue_zoned_model(q)) {
- case BLK_ZONED_HA:
- case BLK_ZONED_HM:
- return true;
- default:
- return false;
- }
+ if (!blk_queue_is_zoned(disk->queue))
+ return 0;
+ return sector >> ilog2(disk->queue->limits.chunk_sectors);
}
-static inline sector_t blk_queue_zone_sectors(struct request_queue *q)
+static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
{
- return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
+ return bdev->bd_disk->queue->limits.max_open_zones;
}
-#ifdef CONFIG_BLK_DEV_ZONED
-static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
+static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
{
- return blk_queue_is_zoned(q) ? q->nr_zones : 0;
+ return bdev->bd_disk->queue->limits.max_active_zones;
}
-static inline unsigned int blk_queue_zone_no(struct request_queue *q,
- sector_t sector)
+static inline unsigned int blk_queue_depth(struct request_queue *q)
{
- if (!blk_queue_is_zoned(q))
- return 0;
- return sector >> ilog2(q->limits.chunk_sectors);
+ if (q->queue_depth)
+ return q->queue_depth;
+
+ return q->nr_requests;
}
-static inline bool blk_queue_zone_is_seq(struct request_queue *q,
- sector_t sector)
+/*
+ * default timeout for SG_IO if none specified
+ */
+#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
+#define BLK_MIN_SG_TIMEOUT (7 * HZ)
+
+/* This should not be used directly - use rq_for_each_segment */
+#define for_each_bio(_bio) \
+ for (; _bio; _bio = _bio->bi_next)
+
+int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups,
+ struct fwnode_handle *fwnode);
+int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups);
+static inline int __must_check add_disk(struct gendisk *disk)
{
- if (!blk_queue_is_zoned(q))
- return false;
- if (!q->conv_zones_bitmap)
- return true;
- return !test_bit(blk_queue_zone_no(q, sector), q->conv_zones_bitmap);
+ return device_add_disk(NULL, disk, NULL);
}
+void del_gendisk(struct gendisk *gp);
+void invalidate_disk(struct gendisk *disk);
+void set_disk_ro(struct gendisk *disk, bool read_only);
+void disk_uevent(struct gendisk *disk, enum kobject_action action);
-static inline void blk_queue_max_open_zones(struct request_queue *q,
- unsigned int max_open_zones)
+static inline u8 bdev_partno(const struct block_device *bdev)
{
- q->max_open_zones = max_open_zones;
+ return atomic_read(&bdev->__bd_flags) & BD_PARTNO;
}
-static inline unsigned int queue_max_open_zones(const struct request_queue *q)
+static inline bool bdev_test_flag(const struct block_device *bdev, unsigned flag)
{
- return q->max_open_zones;
+ return atomic_read(&bdev->__bd_flags) & flag;
}
-static inline void blk_queue_max_active_zones(struct request_queue *q,
- unsigned int max_active_zones)
+static inline void bdev_set_flag(struct block_device *bdev, unsigned flag)
{
- q->max_active_zones = max_active_zones;
+ atomic_or(flag, &bdev->__bd_flags);
}
-static inline unsigned int queue_max_active_zones(const struct request_queue *q)
+static inline void bdev_clear_flag(struct block_device *bdev, unsigned flag)
{
- return q->max_active_zones;
+ atomic_andnot(flag, &bdev->__bd_flags);
}
-#else /* CONFIG_BLK_DEV_ZONED */
-static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
+
+static inline bool get_disk_ro(struct gendisk *disk)
{
- return 0;
+ return bdev_test_flag(disk->part0, BD_READ_ONLY) ||
+ test_bit(GD_READ_ONLY, &disk->state);
}
-static inline bool blk_queue_zone_is_seq(struct request_queue *q,
- sector_t sector)
+
+static inline bool bdev_read_only(struct block_device *bdev)
{
- return false;
+ return bdev_test_flag(bdev, BD_READ_ONLY) || get_disk_ro(bdev->bd_disk);
}
-static inline unsigned int blk_queue_zone_no(struct request_queue *q,
- sector_t sector)
+
+bool set_capacity_and_notify(struct gendisk *disk, sector_t size);
+void disk_force_media_change(struct gendisk *disk);
+void bdev_mark_dead(struct block_device *bdev, bool surprise);
+
+void add_disk_randomness(struct gendisk *disk) __latent_entropy;
+void rand_initialize_disk(struct gendisk *disk);
+
+static inline sector_t get_start_sect(struct block_device *bdev)
{
- return 0;
+ return bdev->bd_start_sect;
}
-static inline unsigned int queue_max_open_zones(const struct request_queue *q)
+
+static inline sector_t bdev_nr_sectors(struct block_device *bdev)
{
- return 0;
+ return bdev->bd_nr_sectors;
}
-static inline unsigned int queue_max_active_zones(const struct request_queue *q)
+
+static inline loff_t bdev_nr_bytes(struct block_device *bdev)
{
- return 0;
+ return (loff_t)bdev_nr_sectors(bdev) << SECTOR_SHIFT;
}
-#endif /* CONFIG_BLK_DEV_ZONED */
-static inline bool rq_is_sync(struct request *rq)
+static inline sector_t get_capacity(struct gendisk *disk)
{
- return op_is_sync(rq->cmd_flags);
+ return bdev_nr_sectors(disk->part0);
}
-static inline bool rq_mergeable(struct request *rq)
+static inline u64 sb_bdev_nr_blocks(struct super_block *sb)
{
- if (blk_rq_is_passthrough(rq))
- return false;
+ return bdev_nr_sectors(sb->s_bdev) >>
+ (sb->s_blocksize_bits - SECTOR_SHIFT);
+}
- if (req_op(rq) == REQ_OP_FLUSH)
- return false;
+#ifdef CONFIG_BLK_DEV_ZONED
+static inline unsigned int disk_nr_zones(struct gendisk *disk)
+{
+ return disk->nr_zones;
+}
- if (req_op(rq) == REQ_OP_WRITE_ZEROES)
+/**
+ * bio_needs_zone_write_plugging - Check if a BIO needs to be handled with zone
+ * write plugging
+ * @bio: The BIO being submitted
+ *
+ * Return true whenever @bio execution needs to be handled through zone
+ * write plugging (using blk_zone_plug_bio()). Return false otherwise.
+ */
+static inline bool bio_needs_zone_write_plugging(struct bio *bio)
+{
+ enum req_op op = bio_op(bio);
+
+ /*
+ * Only zoned block devices have a zone write plug hash table. But not
+ * all of them have one (e.g. DM devices may not need one).
+ */
+ if (!bio->bi_bdev->bd_disk->zone_wplugs_hash)
return false;
- if (req_op(rq) == REQ_OP_ZONE_APPEND)
+ /* Only write operations need zone write plugging. */
+ if (!op_is_write(op))
return false;
- if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
+ /* Ignore empty flush */
+ if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
return false;
- if (rq->rq_flags & RQF_NOMERGE_FLAGS)
+
+ /* Ignore BIOs that already have been handled by zone write plugging. */
+ if (bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING))
return false;
- return true;
+ /*
+ * All zone write operations must be handled through zone write plugging
+ * using blk_zone_plug_bio().
+ */
+ switch (op) {
+ case REQ_OP_ZONE_APPEND:
+ case REQ_OP_WRITE:
+ case REQ_OP_WRITE_ZEROES:
+ case REQ_OP_ZONE_FINISH:
+ case REQ_OP_ZONE_RESET:
+ case REQ_OP_ZONE_RESET_ALL:
+ return true;
+ default:
+ return false;
+ }
}
-static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
+bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs);
+
+/**
+ * disk_zone_capacity - returns the zone capacity of zone containing @sector
+ * @disk: disk to work with
+ * @sector: sector number within the querying zone
+ *
+ * Returns the zone capacity of a zone containing @sector. @sector can be any
+ * sector in the zone.
+ */
+static inline unsigned int disk_zone_capacity(struct gendisk *disk,
+ sector_t sector)
{
- if (bio_page(a) == bio_page(b) &&
- bio_offset(a) == bio_offset(b))
- return true;
+ sector_t zone_sectors = disk->queue->limits.chunk_sectors;
- return false;
+ if (sector + zone_sectors >= get_capacity(disk))
+ return disk->last_zone_capacity;
+ return disk->zone_capacity;
}
-
-static inline unsigned int blk_queue_depth(struct request_queue *q)
+static inline unsigned int bdev_zone_capacity(struct block_device *bdev,
+ sector_t pos)
{
- if (q->queue_depth)
- return q->queue_depth;
-
- return q->nr_requests;
+ return disk_zone_capacity(bdev->bd_disk, pos);
}
-/*
- * default timeout for SG_IO if none specified
- */
-#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
-#define BLK_MIN_SG_TIMEOUT (7 * HZ)
+bool bdev_zone_is_seq(struct block_device *bdev, sector_t sector);
-struct rq_map_data {
- struct page **pages;
- int page_order;
- int nr_entries;
- unsigned long offset;
- int null_mapped;
- int from_user;
-};
+#else /* CONFIG_BLK_DEV_ZONED */
+static inline unsigned int disk_nr_zones(struct gendisk *disk)
+{
+ return 0;
+}
-struct req_iterator {
- struct bvec_iter iter;
- struct bio *bio;
-};
+static inline bool bdev_zone_is_seq(struct block_device *bdev, sector_t sector)
+{
+ return false;
+}
-/* This should not be used directly - use rq_for_each_segment */
-#define for_each_bio(_bio) \
- for (; _bio; _bio = _bio->bi_next)
-#define __rq_for_each_bio(_bio, rq) \
- if ((rq->bio)) \
- for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
+static inline bool bio_needs_zone_write_plugging(struct bio *bio)
+{
+ return false;
+}
+
+static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
+{
+ return false;
+}
+#endif /* CONFIG_BLK_DEV_ZONED */
-#define rq_for_each_segment(bvl, _rq, _iter) \
- __rq_for_each_bio(_iter.bio, _rq) \
- bio_for_each_segment(bvl, _iter.bio, _iter.iter)
+static inline unsigned int bdev_nr_zones(struct block_device *bdev)
+{
+ return disk_nr_zones(bdev->bd_disk);
+}
-#define rq_for_each_bvec(bvl, _rq, _iter) \
- __rq_for_each_bio(_iter.bio, _rq) \
- bio_for_each_bvec(bvl, _iter.bio, _iter.iter)
+int bdev_disk_changed(struct gendisk *disk, bool invalidate);
-#define rq_iter_last(bvec, _iter) \
- (_iter.bio->bi_next == NULL && \
- bio_iter_last(bvec, _iter.iter))
+void put_disk(struct gendisk *disk);
+struct gendisk *__blk_alloc_disk(struct queue_limits *lim, int node,
+ struct lock_class_key *lkclass);
-#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
-# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
-#endif
-#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
-extern void rq_flush_dcache_pages(struct request *rq);
+/**
+ * blk_alloc_disk - allocate a gendisk structure
+ * @lim: queue limits to be used for this disk.
+ * @node_id: numa node to allocate on
+ *
+ * Allocate and pre-initialize a gendisk structure for use with BIO based
+ * drivers.
+ *
+ * Returns an ERR_PTR on error, else the allocated disk.
+ *
+ * Context: can sleep
+ */
+#define blk_alloc_disk(lim, node_id) \
+({ \
+ static struct lock_class_key __key; \
+ \
+ __blk_alloc_disk(lim, node_id, &__key); \
+})
+
+int __register_blkdev(unsigned int major, const char *name,
+ void (*probe)(dev_t devt));
+#define register_blkdev(major, name) \
+ __register_blkdev(major, name, NULL)
+void unregister_blkdev(unsigned int major, const char *name);
+
+bool disk_check_media_change(struct gendisk *disk);
+void set_capacity(struct gendisk *disk, sector_t size);
+
+#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
+int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
+void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk);
#else
-static inline void rq_flush_dcache_pages(struct request *rq)
+static inline int bd_link_disk_holder(struct block_device *bdev,
+ struct gendisk *disk)
{
+ return 0;
}
-#endif
+static inline void bd_unlink_disk_holder(struct block_device *bdev,
+ struct gendisk *disk)
+{
+}
+#endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */
+
+dev_t part_devt(struct gendisk *disk, u8 partno);
+void inc_diskseq(struct gendisk *disk);
+void blk_request_module(dev_t devt);
extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk);
-blk_qc_t submit_bio_noacct(struct bio *bio);
-extern void blk_rq_init(struct request_queue *q, struct request *rq);
-extern void blk_put_request(struct request *);
-extern struct request *blk_get_request(struct request_queue *, unsigned int op,
- blk_mq_req_flags_t flags);
-extern int blk_lld_busy(struct request_queue *q);
-extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
- struct bio_set *bs, gfp_t gfp_mask,
- int (*bio_ctr)(struct bio *, struct bio *, void *),
- void *data);
-extern void blk_rq_unprep_clone(struct request *rq);
-extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
- struct request *rq);
-int blk_rq_append_bio(struct request *rq, struct bio *bio);
-extern void blk_queue_split(struct bio **);
-extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
-extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
- unsigned int, void __user *);
-extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
- unsigned int, void __user *);
-extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
- struct scsi_ioctl_command __user *);
-extern int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp);
-extern int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp);
+void submit_bio_noacct(struct bio *bio);
+struct bio *bio_split_to_limits(struct bio *bio);
+struct bio *bio_submit_split_bioset(struct bio *bio, unsigned int split_sectors,
+ struct bio_set *bs);
+extern int blk_lld_busy(struct request_queue *q);
extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
extern void blk_queue_exit(struct request_queue *q);
extern void blk_sync_queue(struct request_queue *q);
-extern int blk_rq_map_user(struct request_queue *, struct request *,
- struct rq_map_data *, void __user *, unsigned long,
- gfp_t);
-extern int blk_rq_unmap_user(struct bio *);
-extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
-extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct rq_map_data *, const struct iov_iter *,
- gfp_t);
-extern void blk_execute_rq(struct gendisk *, struct request *, int);
-extern void blk_execute_rq_nowait(struct gendisk *,
- struct request *, int, rq_end_io_fn *);
/* Helper to convert REQ_OP_XXX to its string format XXX */
-extern const char *blk_op_str(unsigned int op);
+extern const char *blk_op_str(enum req_op op);
int blk_status_to_errno(blk_status_t status);
blk_status_t errno_to_blk_status(int errno);
+const char *blk_status_to_str(blk_status_t status);
-int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin);
+/* only poll the hardware once, don't continue until a completion was found */
+#define BLK_POLL_ONESHOT (1 << 0)
+int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags);
+int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
+ unsigned int flags);
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
{
- return bdev->bd_disk->queue; /* this is never NULL */
+ return bdev->bd_queue; /* this is never NULL */
}
-/*
- * The basic unit of block I/O is a sector. It is used in a number of contexts
- * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9
- * bytes. Variables of type sector_t represent an offset or size that is a
- * multiple of 512 bytes. Hence these two constants.
- */
-#ifndef SECTOR_SHIFT
-#define SECTOR_SHIFT 9
-#endif
-#ifndef SECTOR_SIZE
-#define SECTOR_SIZE (1 << SECTOR_SHIFT)
-#endif
-
-/*
- * blk_rq_pos() : the current sector
- * blk_rq_bytes() : bytes left in the entire request
- * blk_rq_cur_bytes() : bytes left in the current segment
- * blk_rq_err_bytes() : bytes left till the next error boundary
- * blk_rq_sectors() : sectors left in the entire request
- * blk_rq_cur_sectors() : sectors left in the current segment
- * blk_rq_stats_sectors() : sectors of the entire request used for stats
- */
-static inline sector_t blk_rq_pos(const struct request *rq)
-{
- return rq->__sector;
-}
-
-static inline unsigned int blk_rq_bytes(const struct request *rq)
-{
- return rq->__data_len;
-}
-
-static inline int blk_rq_cur_bytes(const struct request *rq)
-{
- return rq->bio ? bio_cur_bytes(rq->bio) : 0;
-}
-
-extern unsigned int blk_rq_err_bytes(const struct request *rq);
-
-static inline unsigned int blk_rq_sectors(const struct request *rq)
-{
- return blk_rq_bytes(rq) >> SECTOR_SHIFT;
-}
-
-static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
-{
- return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
-}
-
-static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
-{
- return rq->stats_sectors;
-}
-
-#ifdef CONFIG_BLK_DEV_ZONED
-
/* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */
const char *blk_zone_cond_str(enum blk_zone_cond zone_cond);
-static inline unsigned int blk_rq_zone_no(struct request *rq)
+static inline unsigned int bio_zone_no(struct bio *bio)
{
- return blk_queue_zone_no(rq->q, blk_rq_pos(rq));
+ return disk_zone_no(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector);
}
-static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
+static inline bool bio_straddles_zones(struct bio *bio)
{
- return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq));
+ return bio_sectors(bio) &&
+ bio_zone_no(bio) !=
+ disk_zone_no(bio->bi_bdev->bd_disk, bio_end_sector(bio) - 1);
}
-#endif /* CONFIG_BLK_DEV_ZONED */
/*
- * Some commands like WRITE SAME have a payload or data transfer size which
- * is different from the size of the request. Any driver that supports such
- * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
- * calculate the data transfer size.
+ * Return how much within the boundary is left to be used for I/O at a given
+ * offset.
*/
-static inline unsigned int blk_rq_payload_bytes(struct request *rq)
+static inline unsigned int blk_boundary_sectors_left(sector_t offset,
+ unsigned int boundary_sectors)
{
- if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
- return rq->special_vec.bv_len;
- return blk_rq_bytes(rq);
+ if (unlikely(!is_power_of_2(boundary_sectors)))
+ return boundary_sectors - sector_div(offset, boundary_sectors);
+ return boundary_sectors - (offset & (boundary_sectors - 1));
}
-/*
- * Return the first full biovec in the request. The caller needs to check that
- * there are any bvecs before calling this helper.
+/**
+ * queue_limits_start_update - start an atomic update of queue limits
+ * @q: queue to update
+ *
+ * This functions starts an atomic update of the queue limits. It takes a lock
+ * to prevent other updates and returns a snapshot of the current limits that
+ * the caller can modify. The caller must call queue_limits_commit_update()
+ * to finish the update.
+ *
+ * Context: process context.
*/
-static inline struct bio_vec req_bvec(struct request *rq)
+static inline struct queue_limits
+queue_limits_start_update(struct request_queue *q)
{
- if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
- return rq->special_vec;
- return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
+ mutex_lock(&q->limits_lock);
+ return q->limits;
}
+int queue_limits_commit_update_frozen(struct request_queue *q,
+ struct queue_limits *lim);
+int queue_limits_commit_update(struct request_queue *q,
+ struct queue_limits *lim);
+int queue_limits_set(struct request_queue *q, struct queue_limits *lim);
+int blk_validate_limits(struct queue_limits *lim);
-static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
- int op)
+/**
+ * queue_limits_cancel_update - cancel an atomic update of queue limits
+ * @q: queue to update
+ *
+ * This functions cancels an atomic update of the queue limits started by
+ * queue_limits_start_update() and should be used when an error occurs after
+ * starting update.
+ */
+static inline void queue_limits_cancel_update(struct request_queue *q)
{
- if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
- return min(q->limits.max_discard_sectors,
- UINT_MAX >> SECTOR_SHIFT);
-
- if (unlikely(op == REQ_OP_WRITE_SAME))
- return q->limits.max_write_same_sectors;
-
- if (unlikely(op == REQ_OP_WRITE_ZEROES))
- return q->limits.max_write_zeroes_sectors;
-
- return q->limits.max_sectors;
+ mutex_unlock(&q->limits_lock);
}
/*
- * Return maximum size of a request at given offset. Only valid for
- * file system requests.
+ * These helpers are for drivers that have sloppy feature negotiation and might
+ * have to disable DISCARD, WRITE_ZEROES or SECURE_DISCARD from the I/O
+ * completion handler when the device returned an indicator that the respective
+ * feature is not actually supported. They are racy and the driver needs to
+ * cope with that. Try to avoid this scheme if you can.
*/
-static inline unsigned int blk_max_size_offset(struct request_queue *q,
- sector_t offset,
- unsigned int chunk_sectors)
-{
- if (!chunk_sectors) {
- if (q->limits.chunk_sectors)
- chunk_sectors = q->limits.chunk_sectors;
- else
- return q->limits.max_sectors;
- }
-
- if (likely(is_power_of_2(chunk_sectors)))
- chunk_sectors -= offset & (chunk_sectors - 1);
- else
- chunk_sectors -= sector_div(offset, chunk_sectors);
-
- return min(q->limits.max_sectors, chunk_sectors);
+static inline void blk_queue_disable_discard(struct request_queue *q)
+{
+ q->limits.max_discard_sectors = 0;
}
-static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
- sector_t offset)
+static inline void blk_queue_disable_secure_erase(struct request_queue *q)
{
- struct request_queue *q = rq->q;
-
- if (blk_rq_is_passthrough(rq))
- return q->limits.max_hw_sectors;
-
- if (!q->limits.chunk_sectors ||
- req_op(rq) == REQ_OP_DISCARD ||
- req_op(rq) == REQ_OP_SECURE_ERASE)
- return blk_queue_get_max_sectors(q, req_op(rq));
-
- return min(blk_max_size_offset(q, offset, 0),
- blk_queue_get_max_sectors(q, req_op(rq)));
+ q->limits.max_secure_erase_sectors = 0;
}
-static inline unsigned int blk_rq_count_bios(struct request *rq)
+static inline void blk_queue_disable_write_zeroes(struct request_queue *q)
{
- unsigned int nr_bios = 0;
- struct bio *bio;
-
- __rq_for_each_bio(bio, rq)
- nr_bios++;
-
- return nr_bios;
+ q->limits.max_write_zeroes_sectors = 0;
+ q->limits.max_wzeroes_unmap_sectors = 0;
}
-void blk_steal_bios(struct bio_list *list, struct request *rq);
-
-/*
- * Request completion related functions.
- *
- * blk_update_request() completes given number of bytes and updates
- * the request without completing it.
- */
-extern bool blk_update_request(struct request *rq, blk_status_t error,
- unsigned int nr_bytes);
-
-extern void blk_abort_request(struct request *);
-
/*
* Access functions for manipulating queue properties
*/
-extern void blk_cleanup_queue(struct request_queue *);
-void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit);
-extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
-extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
-extern void blk_queue_max_segments(struct request_queue *, unsigned short);
-extern void blk_queue_max_discard_segments(struct request_queue *,
- unsigned short);
-extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
-extern void blk_queue_max_discard_sectors(struct request_queue *q,
- unsigned int max_discard_sectors);
-extern void blk_queue_max_write_same_sectors(struct request_queue *q,
- unsigned int max_write_same_sectors);
-extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
- unsigned int max_write_same_sectors);
-extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
-extern void blk_queue_max_zone_append_sectors(struct request_queue *q,
- unsigned int max_zone_append_sectors);
-extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
-void blk_queue_zone_write_granularity(struct request_queue *q,
- unsigned int size);
-extern void blk_queue_alignment_offset(struct request_queue *q,
- unsigned int alignment);
-void blk_queue_update_readahead(struct request_queue *q);
-extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
-extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
-extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
-extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
-extern void blk_set_default_limits(struct queue_limits *lim);
extern void blk_set_stacking_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
sector_t offset);
-extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
- sector_t offset);
-extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
-extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
-extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
-extern void blk_queue_dma_alignment(struct request_queue *, int);
-extern void blk_queue_update_dma_alignment(struct request_queue *, int);
+void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
+ sector_t offset, const char *pfx);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
-extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
-extern void blk_queue_required_elevator_features(struct request_queue *q,
- unsigned int features);
-extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
- struct device *dev);
-
-/*
- * Number of physical segments as sent to the device.
- *
- * Normally this is the number of discontiguous data segments sent by the
- * submitter. But for data-less command like discard we might have no
- * actual data segments submitted, but the driver might have to add it's
- * own special payload. In that case we still return 1 here so that this
- * special payload will be mapped.
- */
-static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
-{
- if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
- return 1;
- return rq->nr_phys_segments;
-}
-/*
- * Number of discard segments (or ranges) the driver needs to fill in.
- * Each discard bio merged into a request is counted as one segment.
- */
-static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
-{
- return max_t(unsigned short, rq->nr_phys_segments, 1);
-}
-
-int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
- struct scatterlist *sglist, struct scatterlist **last_sg);
-static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
- struct scatterlist *sglist)
-{
- struct scatterlist *last_sg = NULL;
-
- return __blk_rq_map_sg(q, rq, sglist, &last_sg);
-}
-extern void blk_dump_rq_flags(struct request *, char *);
+struct blk_independent_access_ranges *
+disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges);
+void disk_set_independent_access_ranges(struct gendisk *disk,
+ struct blk_independent_access_ranges *iars);
bool __must_check blk_get_queue(struct request_queue *);
-struct request_queue *blk_alloc_queue(int node_id);
extern void blk_put_queue(struct request_queue *);
-extern void blk_set_queue_dying(struct request_queue *);
+
+void blk_mark_disk_dead(struct gendisk *disk);
+
+struct rq_list {
+ struct request *head;
+ struct request *tail;
+};
#ifdef CONFIG_BLOCK
/*
@@ -1222,19 +1166,24 @@ extern void blk_set_queue_dying(struct request_queue *);
* as the lock contention for request_queue lock is reduced.
*
* It is ok not to disable preemption when adding the request to the plug list
- * or when attempting a merge, because blk_schedule_flush_list() will only flush
- * the plug list when the task sleeps by itself. For details, please see
- * schedule() where blk_schedule_flush_plug() is called.
+ * or when attempting a merge. For details, please see schedule() where
+ * blk_flush_plug() is called.
*/
struct blk_plug {
- struct list_head mq_list; /* blk-mq requests */
- struct list_head cb_list; /* md requires an unplug callback */
+ struct rq_list mq_list; /* blk-mq requests */
+
+ /* if ios_left is > 1, we can batch tag/rq allocations */
+ struct rq_list cached_rqs;
+ u64 cur_ktime;
+ unsigned short nr_ios;
+
unsigned short rq_count;
+
bool multiple_queues;
- bool nowait;
+ bool has_elevator;
+
+ struct list_head cb_list; /* md requires an unplug callback */
};
-#define BLK_MAX_REQUEST_COUNT 16
-#define BLK_PLUG_FLUSH_SIZE (128 * 1024)
struct blk_plug_cb;
typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
@@ -1246,32 +1195,26 @@ struct blk_plug_cb {
extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
void *data, int size);
extern void blk_start_plug(struct blk_plug *);
+extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short);
extern void blk_finish_plug(struct blk_plug *);
-extern void blk_flush_plug_list(struct blk_plug *, bool);
-static inline void blk_flush_plug(struct task_struct *tsk)
+void __blk_flush_plug(struct blk_plug *plug, bool from_schedule);
+static inline void blk_flush_plug(struct blk_plug *plug, bool async)
{
- struct blk_plug *plug = tsk->plug;
-
if (plug)
- blk_flush_plug_list(plug, false);
+ __blk_flush_plug(plug, async);
}
-static inline void blk_schedule_flush_plug(struct task_struct *tsk)
+/*
+ * tsk == current here
+ */
+static inline void blk_plug_invalidate_ts(struct task_struct *tsk)
{
struct blk_plug *plug = tsk->plug;
if (plug)
- blk_flush_plug_list(plug, true);
-}
-
-static inline bool blk_needs_flush_plug(struct task_struct *tsk)
-{
- struct blk_plug *plug = tsk->plug;
-
- return plug &&
- (!list_empty(&plug->mq_list) ||
- !list_empty(&plug->cb_list));
+ plug->cur_ktime = 0;
+ current->flags &= ~PF_BLOCK_TS;
}
int blkdev_issue_flush(struct block_device *bdev);
@@ -1280,26 +1223,25 @@ long nr_blockdev_pages(void);
struct blk_plug {
};
-static inline void blk_start_plug(struct blk_plug *plug)
+static inline void blk_start_plug_nr_ios(struct blk_plug *plug,
+ unsigned short nr_ios)
{
}
-static inline void blk_finish_plug(struct blk_plug *plug)
+static inline void blk_start_plug(struct blk_plug *plug)
{
}
-static inline void blk_flush_plug(struct task_struct *task)
+static inline void blk_finish_plug(struct blk_plug *plug)
{
}
-static inline void blk_schedule_flush_plug(struct task_struct *task)
+static inline void blk_flush_plug(struct blk_plug *plug, bool async)
{
}
-
-static inline bool blk_needs_flush_plug(struct task_struct *tsk)
+static inline void blk_plug_invalidate_ts(struct task_struct *tsk)
{
- return false;
}
static inline int blkdev_issue_flush(struct block_device *bdev)
@@ -1315,19 +1257,16 @@ static inline long nr_blockdev_pages(void)
extern void blk_io_schedule(void);
-extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, struct page *page);
-
-#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */
-
-extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
-extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, int flags,
- struct bio **biop);
+int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask);
+int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, struct bio **biop);
+int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp);
#define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */
#define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */
+#define BLKDEV_ZERO_KILLABLE (1 << 2) /* interruptible by fatal signals */
extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
@@ -1343,7 +1282,7 @@ static inline int sb_issue_discard(struct super_block *sb, sector_t block,
SECTOR_SHIFT),
nr_blocks << (sb->s_blocksize_bits -
SECTOR_SHIFT),
- gfp_mask, flags);
+ gfp_mask);
}
static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
sector_t nr_blocks, gfp_t gfp_mask)
@@ -1356,21 +1295,23 @@ static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
gfp_mask, 0);
}
-extern int blk_verify_command(unsigned char *cmd, fmode_t mode);
-
static inline bool bdev_is_partition(struct block_device *bdev)
{
- return bdev->bd_partno;
+ return bdev_partno(bdev) != 0;
}
enum blk_default_limits {
BLK_MAX_SEGMENTS = 128,
BLK_SAFE_MAX_SECTORS = 255,
- BLK_DEF_MAX_SECTORS = 2560,
BLK_MAX_SEGMENT_SIZE = 65536,
BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
};
+static inline struct queue_limits *bdev_limits(struct block_device *bdev)
+{
+ return &bdev_get_queue(bdev)->limits;
+}
+
static inline unsigned long queue_segment_boundary(const struct request_queue *q)
{
return q->limits.seg_boundary_mask;
@@ -1386,6 +1327,11 @@ static inline unsigned int queue_max_sectors(const struct request_queue *q)
return q->limits.max_sectors;
}
+static inline unsigned int queue_max_bytes(struct request_queue *q)
+{
+ return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9;
+}
+
static inline unsigned int queue_max_hw_sectors(const struct request_queue *q)
{
return q->limits.max_hw_sectors;
@@ -1406,22 +1352,37 @@ static inline unsigned int queue_max_segment_size(const struct request_queue *q)
return q->limits.max_segment_size;
}
-static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q)
+static inline bool queue_emulates_zone_append(struct request_queue *q)
{
+ return blk_queue_is_zoned(q) && !q->limits.max_hw_zone_append_sectors;
+}
- const struct queue_limits *l = &q->limits;
+static inline bool bdev_emulates_zone_append(struct block_device *bdev)
+{
+ return queue_emulates_zone_append(bdev_get_queue(bdev));
+}
- return min(l->max_zone_append_sectors, l->max_sectors);
+static inline unsigned int
+bdev_max_zone_append_sectors(struct block_device *bdev)
+{
+ return bdev_limits(bdev)->max_zone_append_sectors;
}
-static inline unsigned queue_logical_block_size(const struct request_queue *q)
+static inline unsigned int bdev_max_segments(struct block_device *bdev)
{
- int retval = 512;
+ return queue_max_segments(bdev_get_queue(bdev));
+}
- if (q && q->limits.logical_block_size)
- retval = q->limits.logical_block_size;
+static inline unsigned short bdev_max_write_streams(struct block_device *bdev)
+{
+ if (bdev_is_partition(bdev))
+ return 0;
+ return bdev_limits(bdev)->max_write_streams;
+}
- return retval;
+static inline unsigned queue_logical_block_size(const struct request_queue *q)
+{
+ return q->limits.logical_block_size;
}
static inline unsigned int bdev_logical_block_size(struct block_device *bdev)
@@ -1444,7 +1405,7 @@ static inline unsigned int queue_io_min(const struct request_queue *q)
return q->limits.io_min;
}
-static inline int bdev_io_min(struct block_device *bdev)
+static inline unsigned int bdev_io_min(struct block_device *bdev)
{
return queue_io_min(bdev_get_queue(bdev));
}
@@ -1454,7 +1415,7 @@ static inline unsigned int queue_io_opt(const struct request_queue *q)
return q->limits.io_opt;
}
-static inline int bdev_io_opt(struct block_device *bdev)
+static inline unsigned int bdev_io_opt(struct block_device *bdev)
{
return queue_io_opt(bdev_get_queue(bdev));
}
@@ -1471,467 +1432,257 @@ bdev_zone_write_granularity(struct block_device *bdev)
return queue_zone_write_granularity(bdev_get_queue(bdev));
}
-static inline int queue_alignment_offset(const struct request_queue *q)
-{
- if (q->limits.misaligned)
- return -1;
-
- return q->limits.alignment_offset;
-}
+int bdev_alignment_offset(struct block_device *bdev);
+unsigned int bdev_discard_alignment(struct block_device *bdev);
-static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
+static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev)
{
- unsigned int granularity = max(lim->physical_block_size, lim->io_min);
- unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
- << SECTOR_SHIFT;
-
- return (granularity + lim->alignment_offset - alignment) % granularity;
+ return bdev_limits(bdev)->max_discard_sectors;
}
-static inline int bdev_alignment_offset(struct block_device *bdev)
+static inline unsigned int bdev_discard_granularity(struct block_device *bdev)
{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (q->limits.misaligned)
- return -1;
- if (bdev_is_partition(bdev))
- return queue_limit_alignment_offset(&q->limits,
- bdev->bd_start_sect);
- return q->limits.alignment_offset;
+ return bdev_limits(bdev)->discard_granularity;
}
-static inline int queue_discard_alignment(const struct request_queue *q)
+static inline unsigned int
+bdev_max_secure_erase_sectors(struct block_device *bdev)
{
- if (q->limits.discard_misaligned)
- return -1;
-
- return q->limits.discard_alignment;
+ return bdev_limits(bdev)->max_secure_erase_sectors;
}
-static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
+static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
{
- unsigned int alignment, granularity, offset;
-
- if (!lim->max_discard_sectors)
- return 0;
-
- /* Why are these in bytes, not sectors? */
- alignment = lim->discard_alignment >> SECTOR_SHIFT;
- granularity = lim->discard_granularity >> SECTOR_SHIFT;
- if (!granularity)
- return 0;
-
- /* Offset of the partition start in 'granularity' sectors */
- offset = sector_div(sector, granularity);
-
- /* And why do we do this modulus *again* in blkdev_issue_discard()? */
- offset = (granularity + alignment - offset) % granularity;
-
- /* Turn it back into bytes, gaah */
- return offset << SECTOR_SHIFT;
+ return bdev_limits(bdev)->max_write_zeroes_sectors;
}
-static inline int bdev_discard_alignment(struct block_device *bdev)
+static inline unsigned int
+bdev_write_zeroes_unmap_sectors(struct block_device *bdev)
{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (bdev_is_partition(bdev))
- return queue_limit_discard_alignment(&q->limits,
- bdev->bd_start_sect);
- return q->limits.discard_alignment;
+ return bdev_limits(bdev)->max_wzeroes_unmap_sectors;
}
-static inline unsigned int bdev_write_same(struct block_device *bdev)
+static inline bool bdev_nonrot(struct block_device *bdev)
{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (q)
- return q->limits.max_write_same_sectors;
-
- return 0;
+ return blk_queue_nonrot(bdev_get_queue(bdev));
}
-static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
+static inline bool bdev_synchronous(struct block_device *bdev)
{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (q)
- return q->limits.max_write_zeroes_sectors;
-
- return 0;
+ return bdev->bd_disk->queue->limits.features & BLK_FEAT_SYNCHRONOUS;
}
-static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
+static inline bool bdev_stable_writes(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
- if (q)
- return blk_queue_zoned_model(q);
-
- return BLK_ZONED_NONE;
+ if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
+ q->limits.integrity.csum_type != BLK_INTEGRITY_CSUM_NONE)
+ return true;
+ return q->limits.features & BLK_FEAT_STABLE_WRITES;
}
-static inline bool bdev_is_zoned(struct block_device *bdev)
+static inline bool blk_queue_write_cache(struct request_queue *q)
{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (q)
- return blk_queue_is_zoned(q);
-
- return false;
+ return (q->limits.features & BLK_FEAT_WRITE_CACHE) &&
+ !(q->limits.flags & BLK_FLAG_WRITE_CACHE_DISABLED);
}
-static inline sector_t bdev_zone_sectors(struct block_device *bdev)
+static inline bool bdev_write_cache(struct block_device *bdev)
{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (q)
- return blk_queue_zone_sectors(q);
- return 0;
+ return blk_queue_write_cache(bdev_get_queue(bdev));
}
-static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
+static inline bool bdev_fua(struct block_device *bdev)
{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (q)
- return queue_max_open_zones(q);
- return 0;
+ return bdev_limits(bdev)->features & BLK_FEAT_FUA;
}
-static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
+static inline bool bdev_nowait(struct block_device *bdev)
{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (q)
- return queue_max_active_zones(q);
- return 0;
+ return bdev->bd_disk->queue->limits.features & BLK_FEAT_NOWAIT;
}
-static inline int queue_dma_alignment(const struct request_queue *q)
+static inline bool bdev_is_zoned(struct block_device *bdev)
{
- return q ? q->dma_alignment : 511;
+ return blk_queue_is_zoned(bdev_get_queue(bdev));
}
-static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
- unsigned int len)
+static inline unsigned int bdev_zone_no(struct block_device *bdev, sector_t sec)
{
- unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
- return !(addr & alignment) && !(len & alignment);
+ return disk_zone_no(bdev->bd_disk, sec);
}
-/* assumes size > 256 */
-static inline unsigned int blksize_bits(unsigned int size)
+static inline sector_t bdev_zone_sectors(struct block_device *bdev)
{
- unsigned int bits = 8;
- do {
- bits++;
- size >>= 1;
- } while (size > 256);
- return bits;
-}
+ struct request_queue *q = bdev_get_queue(bdev);
-static inline unsigned int block_size(struct block_device *bdev)
-{
- return 1 << bdev->bd_inode->i_blkbits;
+ if (!blk_queue_is_zoned(q))
+ return 0;
+ return q->limits.chunk_sectors;
}
-int kblockd_schedule_work(struct work_struct *work);
-int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
-
-#define MODULE_ALIAS_BLOCKDEV(major,minor) \
- MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
-#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
- MODULE_ALIAS("block-major-" __stringify(major) "-*")
-
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
-
-enum blk_integrity_flags {
- BLK_INTEGRITY_VERIFY = 1 << 0,
- BLK_INTEGRITY_GENERATE = 1 << 1,
- BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2,
- BLK_INTEGRITY_IP_CHECKSUM = 1 << 3,
-};
-
-struct blk_integrity_iter {
- void *prot_buf;
- void *data_buf;
- sector_t seed;
- unsigned int data_size;
- unsigned short interval;
- const char *disk_name;
-};
-
-typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *);
-typedef void (integrity_prepare_fn) (struct request *);
-typedef void (integrity_complete_fn) (struct request *, unsigned int);
-
-struct blk_integrity_profile {
- integrity_processing_fn *generate_fn;
- integrity_processing_fn *verify_fn;
- integrity_prepare_fn *prepare_fn;
- integrity_complete_fn *complete_fn;
- const char *name;
-};
-
-extern void blk_integrity_register(struct gendisk *, struct blk_integrity *);
-extern void blk_integrity_unregister(struct gendisk *);
-extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
-extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
- struct scatterlist *);
-extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
-
-static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
+static inline sector_t bdev_zone_start(struct block_device *bdev,
+ sector_t sector)
{
- struct blk_integrity *bi = &disk->queue->integrity;
-
- if (!bi->profile)
- return NULL;
-
- return bi;
+ return sector & ~(bdev_zone_sectors(bdev) - 1);
}
-static inline
-struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
+static inline sector_t bdev_offset_from_zone_start(struct block_device *bdev,
+ sector_t sector)
{
- return blk_get_integrity(bdev->bd_disk);
+ return sector & (bdev_zone_sectors(bdev) - 1);
}
-static inline bool
-blk_integrity_queue_supports_integrity(struct request_queue *q)
+static inline sector_t bio_offset_from_zone_start(struct bio *bio)
{
- return q->integrity.profile;
+ return bdev_offset_from_zone_start(bio->bi_bdev,
+ bio->bi_iter.bi_sector);
}
-static inline bool blk_integrity_rq(struct request *rq)
+static inline bool bdev_is_zone_start(struct block_device *bdev,
+ sector_t sector)
{
- return rq->cmd_flags & REQ_INTEGRITY;
+ return bdev_offset_from_zone_start(bdev, sector) == 0;
}
-static inline void blk_queue_max_integrity_segments(struct request_queue *q,
- unsigned int segs)
+/* Check whether @sector is a multiple of the zone size. */
+static inline bool bdev_is_zone_aligned(struct block_device *bdev,
+ sector_t sector)
{
- q->limits.max_integrity_segments = segs;
+ return bdev_is_zone_start(bdev, sector);
}
-static inline unsigned short
-queue_max_integrity_segments(const struct request_queue *q)
-{
- return q->limits.max_integrity_segments;
-}
+int blk_zone_issue_zeroout(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask);
-/**
- * bio_integrity_intervals - Return number of integrity intervals for a bio
- * @bi: blk_integrity profile for device
- * @sectors: Size of the bio in 512-byte sectors
- *
- * Description: The block layer calculates everything in 512 byte
- * sectors but integrity metadata is done in terms of the data integrity
- * interval size of the storage device. Convert the block layer sectors
- * to the appropriate number of integrity intervals.
- */
-static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
- unsigned int sectors)
+static inline unsigned int queue_dma_alignment(const struct request_queue *q)
{
- return sectors >> (bi->interval_exp - 9);
+ return q->limits.dma_alignment;
}
-static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
- unsigned int sectors)
+static inline unsigned int
+queue_atomic_write_unit_max_bytes(const struct request_queue *q)
{
- return bio_integrity_intervals(bi, sectors) * bi->tuple_size;
+ return q->limits.atomic_write_unit_max;
}
-/*
- * Return the first bvec that contains integrity data. Only drivers that are
- * limited to a single integrity segment should use this helper.
- */
-static inline struct bio_vec *rq_integrity_vec(struct request *rq)
+static inline unsigned int
+queue_atomic_write_unit_min_bytes(const struct request_queue *q)
{
- if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1))
- return NULL;
- return rq->bio->bi_integrity->bip_vec;
+ return q->limits.atomic_write_unit_min;
}
-#else /* CONFIG_BLK_DEV_INTEGRITY */
-
-struct bio;
-struct block_device;
-struct gendisk;
-struct blk_integrity;
-
-static inline int blk_integrity_rq(struct request *rq)
-{
- return 0;
-}
-static inline int blk_rq_count_integrity_sg(struct request_queue *q,
- struct bio *b)
-{
- return 0;
-}
-static inline int blk_rq_map_integrity_sg(struct request_queue *q,
- struct bio *b,
- struct scatterlist *s)
-{
- return 0;
-}
-static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
-{
- return NULL;
-}
-static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
-{
- return NULL;
-}
-static inline bool
-blk_integrity_queue_supports_integrity(struct request_queue *q)
-{
- return false;
-}
-static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
-{
- return 0;
-}
-static inline void blk_integrity_register(struct gendisk *d,
- struct blk_integrity *b)
-{
-}
-static inline void blk_integrity_unregister(struct gendisk *d)
+static inline unsigned int
+queue_atomic_write_boundary_bytes(const struct request_queue *q)
{
+ return q->limits.atomic_write_boundary_sectors << SECTOR_SHIFT;
}
-static inline void blk_queue_max_integrity_segments(struct request_queue *q,
- unsigned int segs)
+
+static inline unsigned int
+queue_atomic_write_max_bytes(const struct request_queue *q)
{
+ return q->limits.atomic_write_max_sectors << SECTOR_SHIFT;
}
-static inline unsigned short queue_max_integrity_segments(const struct request_queue *q)
+
+static inline unsigned int bdev_dma_alignment(struct block_device *bdev)
{
- return 0;
+ return queue_dma_alignment(bdev_get_queue(bdev));
}
-static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
- unsigned int sectors)
+static inline unsigned int
+blk_lim_dma_alignment_and_pad(struct queue_limits *lim)
{
- return 0;
+ return lim->dma_alignment | lim->dma_pad_mask;
}
-static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
- unsigned int sectors)
+static inline bool blk_rq_aligned(struct request_queue *q, unsigned long addr,
+ unsigned int len)
{
- return 0;
+ unsigned int alignment = blk_lim_dma_alignment_and_pad(&q->limits);
+
+ return !(addr & alignment) && !(len & alignment);
}
-static inline struct bio_vec *rq_integrity_vec(struct request *rq)
+/* assumes size > 256 */
+static inline unsigned int blksize_bits(unsigned int size)
{
- return NULL;
+ return order_base_2(size >> SECTOR_SHIFT) + SECTOR_SHIFT;
}
-#endif /* CONFIG_BLK_DEV_INTEGRITY */
+int kblockd_schedule_work(struct work_struct *work);
+int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
-#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+#define MODULE_ALIAS_BLOCKDEV(major,minor) \
+ MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
+#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
+ MODULE_ALIAS("block-major-" __stringify(major) "-*")
-bool blk_ksm_register(struct blk_keyslot_manager *ksm, struct request_queue *q);
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
-void blk_ksm_unregister(struct request_queue *q);
+bool blk_crypto_register(struct blk_crypto_profile *profile,
+ struct request_queue *q);
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
-static inline bool blk_ksm_register(struct blk_keyslot_manager *ksm,
- struct request_queue *q)
+static inline bool blk_crypto_register(struct blk_crypto_profile *profile,
+ struct request_queue *q)
{
return true;
}
-static inline void blk_ksm_unregister(struct request_queue *q) { }
-
#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
+enum blk_unique_id {
+ /* these match the Designator Types specified in SPC */
+ BLK_UID_T10 = 1,
+ BLK_UID_EUI64 = 2,
+ BLK_UID_NAA = 3,
+};
struct block_device_operations {
- blk_qc_t (*submit_bio) (struct bio *bio);
- int (*open) (struct block_device *, fmode_t);
- void (*release) (struct gendisk *, fmode_t);
- int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int);
- int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
- int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
+ void (*submit_bio)(struct bio *bio);
+ int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob,
+ unsigned int flags);
+ int (*open)(struct gendisk *disk, blk_mode_t mode);
+ void (*release)(struct gendisk *disk);
+ int (*ioctl)(struct block_device *bdev, blk_mode_t mode,
+ unsigned cmd, unsigned long arg);
+ int (*compat_ioctl)(struct block_device *bdev, blk_mode_t mode,
+ unsigned cmd, unsigned long arg);
unsigned int (*check_events) (struct gendisk *disk,
unsigned int clearing);
void (*unlock_native_capacity) (struct gendisk *);
- int (*getgeo)(struct block_device *, struct hd_geometry *);
+ int (*getgeo)(struct gendisk *, struct hd_geometry *);
int (*set_read_only)(struct block_device *bdev, bool ro);
+ void (*free_disk)(struct gendisk *disk);
/* this callback is with swap_lock and sometimes page table lock held */
void (*swap_slot_free_notify) (struct block_device *, unsigned long);
int (*report_zones)(struct gendisk *, sector_t sector,
- unsigned int nr_zones, report_zones_cb cb, void *data);
+ unsigned int nr_zones,
+ struct blk_report_zones_args *args);
char *(*devnode)(struct gendisk *disk, umode_t *mode);
+ /* returns the length of the identifier or a negative errno: */
+ int (*get_unique_id)(struct gendisk *disk, u8 id[16],
+ enum blk_unique_id id_type);
struct module *owner;
const struct pr_ops *pr_ops;
+
+ /*
+ * Special callback for probing GPT entry at a given sector.
+ * Needed by Android devices, used by GPT scanner and MMC blk
+ * driver.
+ */
+ int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector);
};
#ifdef CONFIG_COMPAT
-extern int blkdev_compat_ptr_ioctl(struct block_device *, fmode_t,
+extern int blkdev_compat_ptr_ioctl(struct block_device *, blk_mode_t,
unsigned int, unsigned long);
#else
#define blkdev_compat_ptr_ioctl NULL
#endif
-extern int bdev_read_page(struct block_device *, sector_t, struct page *);
-extern int bdev_write_page(struct block_device *, sector_t, struct page *,
- struct writeback_control *);
-
-#ifdef CONFIG_BLK_DEV_ZONED
-bool blk_req_needs_zone_write_lock(struct request *rq);
-bool blk_req_zone_write_trylock(struct request *rq);
-void __blk_req_zone_write_lock(struct request *rq);
-void __blk_req_zone_write_unlock(struct request *rq);
-
-static inline void blk_req_zone_write_lock(struct request *rq)
-{
- if (blk_req_needs_zone_write_lock(rq))
- __blk_req_zone_write_lock(rq);
-}
-
-static inline void blk_req_zone_write_unlock(struct request *rq)
-{
- if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED)
- __blk_req_zone_write_unlock(rq);
-}
-
-static inline bool blk_req_zone_is_write_locked(struct request *rq)
-{
- return rq->q->seq_zones_wlock &&
- test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock);
-}
-
-static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
-{
- if (!blk_req_needs_zone_write_lock(rq))
- return true;
- return !blk_req_zone_is_write_locked(rq);
-}
-#else
-static inline bool blk_req_needs_zone_write_lock(struct request *rq)
-{
- return false;
-}
-
-static inline void blk_req_zone_write_lock(struct request *rq)
-{
-}
-
-static inline void blk_req_zone_write_unlock(struct request *rq)
-{
-}
-static inline bool blk_req_zone_is_write_locked(struct request *rq)
-{
- return false;
-}
-
-static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
-{
- return true;
-}
-#endif /* CONFIG_BLK_DEV_ZONED */
-
static inline void blk_wake_io_task(struct task_struct *waiter)
{
/*
@@ -1945,10 +1696,10 @@ static inline void blk_wake_io_task(struct task_struct *waiter)
wake_up_process(waiter);
}
-unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
- unsigned int op);
-void disk_end_io_acct(struct gendisk *disk, unsigned int op,
- unsigned long start_time);
+unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op,
+ unsigned long start_time);
+void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
+ unsigned int sectors, unsigned long start_time);
unsigned long bio_start_io_acct(struct bio *bio);
void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
@@ -1957,17 +1708,16 @@ void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
/**
* bio_end_io_acct - end I/O accounting for bio based drivers
* @bio: bio to end account for
- * @start: start time returned by bio_start_io_acct()
+ * @start_time: start time returned by bio_start_io_acct()
*/
static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time)
{
return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev);
}
-int bdev_read_only(struct block_device *bdev);
-int set_blocksize(struct block_device *bdev, int size);
+int bdev_validate_blocksize(struct block_device *bdev, int block_size);
+int set_blocksize(struct file *file, int size);
-const char *bdevname(struct block_device *bdev, char *buffer);
int lookup_bdev(const char *pathname, dev_t *dev);
void blkdev_show(struct seq_file *seqf, off_t offset);
@@ -1980,28 +1730,62 @@ void blkdev_show(struct seq_file *seqf, off_t offset);
#define BLKDEV_MAJOR_MAX 0
#endif
-struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
- void *holder);
-struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder);
-int bd_prepare_to_claim(struct block_device *bdev, void *holder);
-void bd_abort_claiming(struct block_device *bdev, void *holder);
-void blkdev_put(struct block_device *bdev, fmode_t mode);
+struct blk_holder_ops {
+ void (*mark_dead)(struct block_device *bdev, bool surprise);
+
+ /*
+ * Sync the file system mounted on the block device.
+ */
+ void (*sync)(struct block_device *bdev);
+
+ /*
+ * Freeze the file system mounted on the block device.
+ */
+ int (*freeze)(struct block_device *bdev);
-/* just for blk-cgroup, don't use elsewhere */
-struct block_device *blkdev_get_no_open(dev_t dev);
-void blkdev_put_no_open(struct block_device *bdev);
+ /*
+ * Thaw the file system mounted on the block device.
+ */
+ int (*thaw)(struct block_device *bdev);
+};
+
+/*
+ * For filesystems using @fs_holder_ops, the @holder argument passed to
+ * helpers used to open and claim block devices via
+ * bd_prepare_to_claim() must point to a superblock.
+ */
+extern const struct blk_holder_ops fs_holder_ops;
+
+/*
+ * Return the correct open flags for blkdev_get_by_* for super block flags
+ * as stored in sb->s_flags.
+ */
+#define sb_open_mode(flags) \
+ (BLK_OPEN_READ | BLK_OPEN_RESTRICT_WRITES | \
+ (((flags) & SB_RDONLY) ? 0 : BLK_OPEN_WRITE))
+
+struct file *bdev_file_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
+ const struct blk_holder_ops *hops);
+struct file *bdev_file_open_by_path(const char *path, blk_mode_t mode,
+ void *holder, const struct blk_holder_ops *hops);
+int bd_prepare_to_claim(struct block_device *bdev, void *holder,
+ const struct blk_holder_ops *hops);
+void bd_abort_claiming(struct block_device *bdev, void *holder);
-struct block_device *bdev_alloc(struct gendisk *disk, u8 partno);
-void bdev_add(struct block_device *bdev, dev_t dev);
struct block_device *I_BDEV(struct inode *inode);
-struct block_device *bdgrab(struct block_device *bdev);
-void bdput(struct block_device *);
-int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart,
- loff_t lend);
+struct block_device *file_bdev(struct file *bdev_file);
+bool disk_live(struct gendisk *disk);
+unsigned int block_size(struct block_device *bdev);
#ifdef CONFIG_BLOCK
void invalidate_bdev(struct block_device *bdev);
int sync_blockdev(struct block_device *bdev);
+int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend);
+int sync_blockdev_nowait(struct block_device *bdev);
+void sync_bdevs(bool wait);
+void bdev_statx(const struct path *path, struct kstat *stat, u32 request_mask);
+void printk_all_partitions(void);
+int __init early_lookup_bdev(const char *pathname, dev_t *dev);
#else
static inline void invalidate_bdev(struct block_device *bdev)
{
@@ -2010,10 +1794,83 @@ static inline int sync_blockdev(struct block_device *bdev)
{
return 0;
}
-#endif
-int fsync_bdev(struct block_device *bdev);
+static inline int sync_blockdev_nowait(struct block_device *bdev)
+{
+ return 0;
+}
+static inline void sync_bdevs(bool wait)
+{
+}
+static inline void bdev_statx(const struct path *path, struct kstat *stat,
+ u32 request_mask)
+{
+}
+static inline void printk_all_partitions(void)
+{
+}
+static inline int early_lookup_bdev(const char *pathname, dev_t *dev)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_BLOCK */
+
+int bdev_freeze(struct block_device *bdev);
+int bdev_thaw(struct block_device *bdev);
+void bdev_fput(struct file *bdev_file);
+
+struct io_comp_batch {
+ struct rq_list req_list;
+ bool need_ts;
+ void (*complete)(struct io_comp_batch *);
+};
+
+static inline bool blk_atomic_write_start_sect_aligned(sector_t sector,
+ struct queue_limits *limits)
+{
+ unsigned int alignment = max(limits->atomic_write_hw_unit_min,
+ limits->atomic_write_hw_boundary);
+
+ return IS_ALIGNED(sector, alignment >> SECTOR_SHIFT);
+}
+
+static inline bool bdev_can_atomic_write(struct block_device *bdev)
+{
+ struct request_queue *bd_queue = bdev->bd_queue;
+ struct queue_limits *limits = &bd_queue->limits;
+
+ if (!limits->atomic_write_unit_min)
+ return false;
+
+ if (bdev_is_partition(bdev))
+ return blk_atomic_write_start_sect_aligned(bdev->bd_start_sect,
+ limits);
+
+ return true;
+}
+
+static inline unsigned int
+bdev_atomic_write_unit_min_bytes(struct block_device *bdev)
+{
+ if (!bdev_can_atomic_write(bdev))
+ return 0;
+ return queue_atomic_write_unit_min_bytes(bdev_get_queue(bdev));
+}
+
+static inline unsigned int
+bdev_atomic_write_unit_max_bytes(struct block_device *bdev)
+{
+ if (!bdev_can_atomic_write(bdev))
+ return 0;
+ return queue_atomic_write_unit_max_bytes(bdev_get_queue(bdev));
+}
+
+static inline int bio_split_rw_at(struct bio *bio,
+ const struct queue_limits *lim,
+ unsigned *segs, unsigned max_bytes)
+{
+ return bio_split_io_at(bio, lim, segs, max_bytes, lim->dma_alignment);
+}
-int freeze_bdev(struct block_device *bdev);
-int thaw_bdev(struct block_device *bdev);
+#define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { }
#endif /* _LINUX_BLKDEV_H */
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index a083e15df608..05c8754456aa 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -2,22 +2,24 @@
#ifndef BLKTRACE_H
#define BLKTRACE_H
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/relay.h>
#include <linux/compat.h>
#include <uapi/linux/blktrace_api.h>
#include <linux/list.h>
+#include <linux/blk_types.h>
#if defined(CONFIG_BLK_DEV_IO_TRACE)
#include <linux/sysfs.h>
struct blk_trace {
+ int version;
int trace_state;
struct rchan *rchan;
unsigned long __percpu *sequence;
unsigned char __percpu *msg_data;
- u16 act_mask;
+ u64 act_mask;
u64 start_lba;
u64 end_lba;
u32 pid;
@@ -27,12 +29,10 @@ struct blk_trace {
atomic_t dropped;
};
-struct blkcg;
-
extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
extern void blk_trace_shutdown(struct request_queue *);
-extern __printf(3, 4)
-void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *fmt, ...);
+__printf(3, 4) void __blk_trace_note_message(struct blk_trace *bt,
+ struct cgroup_subsys_state *css, const char *fmt, ...);
/**
* blk_add_trace_msg - Add a (simple) message to the blktrace stream
@@ -47,14 +47,14 @@ void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *f
* NOTE: Can not use 'static inline' due to presence of var args...
*
**/
-#define blk_add_cgroup_trace_msg(q, cg, fmt, ...) \
+#define blk_add_cgroup_trace_msg(q, css, fmt, ...) \
do { \
struct blk_trace *bt; \
\
rcu_read_lock(); \
bt = rcu_dereference((q)->blk_trace); \
if (unlikely(bt)) \
- __trace_note_message(bt, cg, fmt, ##__VA_ARGS__);\
+ __blk_trace_note_message(bt, css, fmt, ##__VA_ARGS__);\
rcu_read_unlock(); \
} while (0)
#define blk_add_trace_msg(q, fmt, ...) \
@@ -79,10 +79,6 @@ extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
char __user *arg);
extern int blk_trace_startstop(struct request_queue *q, int start);
extern int blk_trace_remove(struct request_queue *q);
-extern void blk_trace_remove_sysfs(struct device *dev);
-extern int blk_trace_init_sysfs(struct device *dev);
-
-extern struct attribute_group blk_trace_attr_group;
#else /* !CONFIG_BLK_DEV_IO_TRACE */
# define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
@@ -90,16 +86,14 @@ extern struct attribute_group blk_trace_attr_group;
# define blk_add_driver_data(rq, data, len) do {} while (0)
# define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY)
# define blk_trace_startstop(q, start) (-ENOTTY)
-# define blk_trace_remove(q) (-ENOTTY)
# define blk_add_trace_msg(q, fmt, ...) do { } while (0)
# define blk_add_cgroup_trace_msg(q, cg, fmt, ...) do { } while (0)
-# define blk_trace_remove_sysfs(dev) do { } while (0)
# define blk_trace_note_message_enabled(q) (false)
-static inline int blk_trace_init_sysfs(struct device *dev)
+
+static inline int blk_trace_remove(struct request_queue *q)
{
- return 0;
+ return -ENOTTY;
}
-
#endif /* CONFIG_BLK_DEV_IO_TRACE */
#ifdef CONFIG_COMPAT
@@ -117,7 +111,7 @@ struct compat_blk_user_trace_setup {
#endif
-void blk_fill_rwbs(char *rwbs, unsigned int op);
+void blk_fill_rwbs(char *rwbs, blk_opf_t opf);
static inline sector_t blk_rq_trace_sector(struct request *rq)
{
diff --git a/include/linux/bma150.h b/include/linux/bma150.h
index 31c9e323a391..4d4a62d49341 100644
--- a/include/linux/bma150.h
+++ b/include/linux/bma150.h
@@ -33,8 +33,8 @@ struct bma150_cfg {
unsigned char lg_hyst; /* Low-G hysterisis */
unsigned char lg_dur; /* Low-G duration */
unsigned char lg_thres; /* Low-G threshold */
- unsigned char range; /* one of BMA0150_RANGE_xxx */
- unsigned char bandwidth; /* one of BMA0150_BW_xxx */
+ unsigned char range; /* one of BMA150_RANGE_xxx */
+ unsigned char bandwidth; /* one of BMA150_BW_xxx */
};
struct bma150_platform_data {
diff --git a/include/linux/bnxt/hsi.h b/include/linux/bnxt/hsi.h
new file mode 100644
index 000000000000..47c34990cf23
--- /dev/null
+++ b/include/linux/bnxt/hsi.h
@@ -0,0 +1,11166 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2014-2018 Broadcom Limited
+ * Copyright (c) 2018-2025 Broadcom Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * DO NOT MODIFY!!! This file is automatically generated.
+ */
+
+#ifndef _BNXT_HSI_H_
+#define _BNXT_HSI_H_
+
+/* hwrm_cmd_hdr (size:128b/16B) */
+struct hwrm_cmd_hdr {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_resp_hdr (size:64b/8B) */
+struct hwrm_resp_hdr {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+};
+
+#define CMD_DISCR_TLV_ENCAP 0x8000UL
+#define CMD_DISCR_LAST CMD_DISCR_TLV_ENCAP
+
+
+#define TLV_TYPE_HWRM_REQUEST 0x1UL
+#define TLV_TYPE_HWRM_RESPONSE 0x2UL
+#define TLV_TYPE_ROCE_SP_COMMAND 0x3UL
+#define TLV_TYPE_QUERY_ROCE_CC_GEN1 0x4UL
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN1 0x5UL
+#define TLV_TYPE_QUERY_ROCE_CC_GEN2 0x6UL
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN2 0x7UL
+#define TLV_TYPE_QUERY_ROCE_CC_GEN1_EXT 0x8UL
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN1_EXT 0x9UL
+#define TLV_TYPE_QUERY_ROCE_CC_GEN2_EXT 0xaUL
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN2_EXT 0xbUL
+#define TLV_TYPE_ENGINE_CKV_ALIAS_ECC_PUBLIC_KEY 0x8001UL
+#define TLV_TYPE_ENGINE_CKV_IV 0x8003UL
+#define TLV_TYPE_ENGINE_CKV_AUTH_TAG 0x8004UL
+#define TLV_TYPE_ENGINE_CKV_CIPHERTEXT 0x8005UL
+#define TLV_TYPE_ENGINE_CKV_HOST_ALGORITHMS 0x8006UL
+#define TLV_TYPE_ENGINE_CKV_HOST_ECC_PUBLIC_KEY 0x8007UL
+#define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE 0x8008UL
+#define TLV_TYPE_ENGINE_CKV_FW_ECC_PUBLIC_KEY 0x8009UL
+#define TLV_TYPE_ENGINE_CKV_FW_ALGORITHMS 0x800aUL
+#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_FW_ALGORITHMS
+
+
+/* tlv (size:64b/8B) */
+struct tlv {
+ __le16 cmd_discr;
+ u8 reserved_8b;
+ u8 flags;
+ #define TLV_FLAGS_MORE 0x1UL
+ #define TLV_FLAGS_MORE_LAST 0x0UL
+ #define TLV_FLAGS_MORE_NOT_LAST 0x1UL
+ #define TLV_FLAGS_REQUIRED 0x2UL
+ #define TLV_FLAGS_REQUIRED_NO (0x0UL << 1)
+ #define TLV_FLAGS_REQUIRED_YES (0x1UL << 1)
+ #define TLV_FLAGS_REQUIRED_LAST TLV_FLAGS_REQUIRED_YES
+ __le16 tlv_type;
+ __le16 length;
+};
+
+/* input (size:128b/16B) */
+struct input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* output (size:64b/8B) */
+struct output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+};
+
+/* hwrm_short_input (size:128b/16B) */
+struct hwrm_short_input {
+ __le16 req_type;
+ __le16 signature;
+ #define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL
+ #define SHORT_REQ_SIGNATURE_LAST SHORT_REQ_SIGNATURE_SHORT_CMD
+ __le16 target_id;
+ #define SHORT_REQ_TARGET_ID_DEFAULT 0x0UL
+ #define SHORT_REQ_TARGET_ID_TOOLS 0xfffdUL
+ #define SHORT_REQ_TARGET_ID_LAST SHORT_REQ_TARGET_ID_TOOLS
+ __le16 size;
+ __le64 req_addr;
+};
+
+/* cmd_nums (size:64b/8B) */
+struct cmd_nums {
+ __le16 req_type;
+ #define HWRM_VER_GET 0x0UL
+ #define HWRM_FUNC_ECHO_RESPONSE 0xbUL
+ #define HWRM_ERROR_RECOVERY_QCFG 0xcUL
+ #define HWRM_FUNC_DRV_IF_CHANGE 0xdUL
+ #define HWRM_FUNC_BUF_UNRGTR 0xeUL
+ #define HWRM_FUNC_VF_CFG 0xfUL
+ #define HWRM_RESERVED1 0x10UL
+ #define HWRM_FUNC_RESET 0x11UL
+ #define HWRM_FUNC_GETFID 0x12UL
+ #define HWRM_FUNC_VF_ALLOC 0x13UL
+ #define HWRM_FUNC_VF_FREE 0x14UL
+ #define HWRM_FUNC_QCAPS 0x15UL
+ #define HWRM_FUNC_QCFG 0x16UL
+ #define HWRM_FUNC_CFG 0x17UL
+ #define HWRM_FUNC_QSTATS 0x18UL
+ #define HWRM_FUNC_CLR_STATS 0x19UL
+ #define HWRM_FUNC_DRV_UNRGTR 0x1aUL
+ #define HWRM_FUNC_VF_RESC_FREE 0x1bUL
+ #define HWRM_FUNC_VF_VNIC_IDS_QUERY 0x1cUL
+ #define HWRM_FUNC_DRV_RGTR 0x1dUL
+ #define HWRM_FUNC_DRV_QVER 0x1eUL
+ #define HWRM_FUNC_BUF_RGTR 0x1fUL
+ #define HWRM_PORT_PHY_CFG 0x20UL
+ #define HWRM_PORT_MAC_CFG 0x21UL
+ #define HWRM_PORT_TS_QUERY 0x22UL
+ #define HWRM_PORT_QSTATS 0x23UL
+ #define HWRM_PORT_LPBK_QSTATS 0x24UL
+ #define HWRM_PORT_CLR_STATS 0x25UL
+ #define HWRM_PORT_LPBK_CLR_STATS 0x26UL
+ #define HWRM_PORT_PHY_QCFG 0x27UL
+ #define HWRM_PORT_MAC_QCFG 0x28UL
+ #define HWRM_PORT_MAC_PTP_QCFG 0x29UL
+ #define HWRM_PORT_PHY_QCAPS 0x2aUL
+ #define HWRM_PORT_PHY_I2C_WRITE 0x2bUL
+ #define HWRM_PORT_PHY_I2C_READ 0x2cUL
+ #define HWRM_PORT_LED_CFG 0x2dUL
+ #define HWRM_PORT_LED_QCFG 0x2eUL
+ #define HWRM_PORT_LED_QCAPS 0x2fUL
+ #define HWRM_QUEUE_QPORTCFG 0x30UL
+ #define HWRM_QUEUE_QCFG 0x31UL
+ #define HWRM_QUEUE_CFG 0x32UL
+ #define HWRM_FUNC_VLAN_CFG 0x33UL
+ #define HWRM_FUNC_VLAN_QCFG 0x34UL
+ #define HWRM_QUEUE_PFCENABLE_QCFG 0x35UL
+ #define HWRM_QUEUE_PFCENABLE_CFG 0x36UL
+ #define HWRM_QUEUE_PRI2COS_QCFG 0x37UL
+ #define HWRM_QUEUE_PRI2COS_CFG 0x38UL
+ #define HWRM_QUEUE_COS2BW_QCFG 0x39UL
+ #define HWRM_QUEUE_COS2BW_CFG 0x3aUL
+ #define HWRM_QUEUE_DSCP_QCAPS 0x3bUL
+ #define HWRM_QUEUE_DSCP2PRI_QCFG 0x3cUL
+ #define HWRM_QUEUE_DSCP2PRI_CFG 0x3dUL
+ #define HWRM_VNIC_ALLOC 0x40UL
+ #define HWRM_VNIC_FREE 0x41UL
+ #define HWRM_VNIC_CFG 0x42UL
+ #define HWRM_VNIC_QCFG 0x43UL
+ #define HWRM_VNIC_TPA_CFG 0x44UL
+ #define HWRM_VNIC_TPA_QCFG 0x45UL
+ #define HWRM_VNIC_RSS_CFG 0x46UL
+ #define HWRM_VNIC_RSS_QCFG 0x47UL
+ #define HWRM_VNIC_PLCMODES_CFG 0x48UL
+ #define HWRM_VNIC_PLCMODES_QCFG 0x49UL
+ #define HWRM_VNIC_QCAPS 0x4aUL
+ #define HWRM_VNIC_UPDATE 0x4bUL
+ #define HWRM_RING_ALLOC 0x50UL
+ #define HWRM_RING_FREE 0x51UL
+ #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS 0x53UL
+ #define HWRM_RING_AGGINT_QCAPS 0x54UL
+ #define HWRM_RING_SCHQ_ALLOC 0x55UL
+ #define HWRM_RING_SCHQ_CFG 0x56UL
+ #define HWRM_RING_SCHQ_FREE 0x57UL
+ #define HWRM_RING_RESET 0x5eUL
+ #define HWRM_RING_GRP_ALLOC 0x60UL
+ #define HWRM_RING_GRP_FREE 0x61UL
+ #define HWRM_RING_CFG 0x62UL
+ #define HWRM_RING_QCFG 0x63UL
+ #define HWRM_RESERVED5 0x64UL
+ #define HWRM_RESERVED6 0x65UL
+ #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL
+ #define HWRM_VNIC_RSS_COS_LB_CTX_FREE 0x71UL
+ #define HWRM_QUEUE_MPLS_QCAPS 0x80UL
+ #define HWRM_QUEUE_MPLSTC2PRI_QCFG 0x81UL
+ #define HWRM_QUEUE_MPLSTC2PRI_CFG 0x82UL
+ #define HWRM_QUEUE_VLANPRI_QCAPS 0x83UL
+ #define HWRM_QUEUE_VLANPRI2PRI_QCFG 0x84UL
+ #define HWRM_QUEUE_VLANPRI2PRI_CFG 0x85UL
+ #define HWRM_QUEUE_GLOBAL_CFG 0x86UL
+ #define HWRM_QUEUE_GLOBAL_QCFG 0x87UL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG 0x88UL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG 0x89UL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG 0x8aUL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG 0x8bUL
+ #define HWRM_QUEUE_QCAPS 0x8cUL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_QCFG 0x8dUL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG 0x8eUL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_QCFG 0x8fUL
+ #define HWRM_CFA_L2_FILTER_ALLOC 0x90UL
+ #define HWRM_CFA_L2_FILTER_FREE 0x91UL
+ #define HWRM_CFA_L2_FILTER_CFG 0x92UL
+ #define HWRM_CFA_L2_SET_RX_MASK 0x93UL
+ #define HWRM_CFA_VLAN_ANTISPOOF_CFG 0x94UL
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC 0x95UL
+ #define HWRM_CFA_TUNNEL_FILTER_FREE 0x96UL
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC 0x97UL
+ #define HWRM_CFA_ENCAP_RECORD_FREE 0x98UL
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC 0x99UL
+ #define HWRM_CFA_NTUPLE_FILTER_FREE 0x9aUL
+ #define HWRM_CFA_NTUPLE_FILTER_CFG 0x9bUL
+ #define HWRM_CFA_EM_FLOW_ALLOC 0x9cUL
+ #define HWRM_CFA_EM_FLOW_FREE 0x9dUL
+ #define HWRM_CFA_EM_FLOW_CFG 0x9eUL
+ #define HWRM_TUNNEL_DST_PORT_QUERY 0xa0UL
+ #define HWRM_TUNNEL_DST_PORT_ALLOC 0xa1UL
+ #define HWRM_TUNNEL_DST_PORT_FREE 0xa2UL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG 0xa3UL
+ #define HWRM_STAT_CTX_ENG_QUERY 0xafUL
+ #define HWRM_STAT_CTX_ALLOC 0xb0UL
+ #define HWRM_STAT_CTX_FREE 0xb1UL
+ #define HWRM_STAT_CTX_QUERY 0xb2UL
+ #define HWRM_STAT_CTX_CLR_STATS 0xb3UL
+ #define HWRM_PORT_QSTATS_EXT 0xb4UL
+ #define HWRM_PORT_PHY_MDIO_WRITE 0xb5UL
+ #define HWRM_PORT_PHY_MDIO_READ 0xb6UL
+ #define HWRM_PORT_PHY_MDIO_BUS_ACQUIRE 0xb7UL
+ #define HWRM_PORT_PHY_MDIO_BUS_RELEASE 0xb8UL
+ #define HWRM_PORT_QSTATS_EXT_PFC_WD 0xb9UL
+ #define HWRM_RESERVED7 0xbaUL
+ #define HWRM_PORT_TX_FIR_CFG 0xbbUL
+ #define HWRM_PORT_TX_FIR_QCFG 0xbcUL
+ #define HWRM_PORT_ECN_QSTATS 0xbdUL
+ #define HWRM_FW_LIVEPATCH_QUERY 0xbeUL
+ #define HWRM_FW_LIVEPATCH 0xbfUL
+ #define HWRM_FW_RESET 0xc0UL
+ #define HWRM_FW_QSTATUS 0xc1UL
+ #define HWRM_FW_HEALTH_CHECK 0xc2UL
+ #define HWRM_FW_SYNC 0xc3UL
+ #define HWRM_FW_STATE_QCAPS 0xc4UL
+ #define HWRM_FW_STATE_QUIESCE 0xc5UL
+ #define HWRM_FW_STATE_BACKUP 0xc6UL
+ #define HWRM_FW_STATE_RESTORE 0xc7UL
+ #define HWRM_FW_SET_TIME 0xc8UL
+ #define HWRM_FW_GET_TIME 0xc9UL
+ #define HWRM_FW_SET_STRUCTURED_DATA 0xcaUL
+ #define HWRM_FW_GET_STRUCTURED_DATA 0xcbUL
+ #define HWRM_FW_IPC_MAILBOX 0xccUL
+ #define HWRM_FW_ECN_CFG 0xcdUL
+ #define HWRM_FW_ECN_QCFG 0xceUL
+ #define HWRM_FW_SECURE_CFG 0xcfUL
+ #define HWRM_EXEC_FWD_RESP 0xd0UL
+ #define HWRM_REJECT_FWD_RESP 0xd1UL
+ #define HWRM_FWD_RESP 0xd2UL
+ #define HWRM_FWD_ASYNC_EVENT_CMPL 0xd3UL
+ #define HWRM_OEM_CMD 0xd4UL
+ #define HWRM_PORT_PRBS_TEST 0xd5UL
+ #define HWRM_PORT_SFP_SIDEBAND_CFG 0xd6UL
+ #define HWRM_PORT_SFP_SIDEBAND_QCFG 0xd7UL
+ #define HWRM_FW_STATE_UNQUIESCE 0xd8UL
+ #define HWRM_PORT_DSC_DUMP 0xd9UL
+ #define HWRM_PORT_EP_TX_QCFG 0xdaUL
+ #define HWRM_PORT_EP_TX_CFG 0xdbUL
+ #define HWRM_PORT_CFG 0xdcUL
+ #define HWRM_PORT_QCFG 0xddUL
+ #define HWRM_PORT_MAC_QCAPS 0xdfUL
+ #define HWRM_TEMP_MONITOR_QUERY 0xe0UL
+ #define HWRM_REG_POWER_QUERY 0xe1UL
+ #define HWRM_CORE_FREQUENCY_QUERY 0xe2UL
+ #define HWRM_REG_POWER_HISTOGRAM 0xe3UL
+ #define HWRM_MONITOR_PAX_HISTOGRAM_START 0xe4UL
+ #define HWRM_MONITOR_PAX_HISTOGRAM_COLLECT 0xe5UL
+ #define HWRM_STAT_QUERY_ROCE_STATS 0xe6UL
+ #define HWRM_STAT_QUERY_ROCE_STATS_EXT 0xe7UL
+ #define HWRM_WOL_FILTER_ALLOC 0xf0UL
+ #define HWRM_WOL_FILTER_FREE 0xf1UL
+ #define HWRM_WOL_FILTER_QCFG 0xf2UL
+ #define HWRM_WOL_REASON_QCFG 0xf3UL
+ #define HWRM_CFA_METER_QCAPS 0xf4UL
+ #define HWRM_CFA_METER_PROFILE_ALLOC 0xf5UL
+ #define HWRM_CFA_METER_PROFILE_FREE 0xf6UL
+ #define HWRM_CFA_METER_PROFILE_CFG 0xf7UL
+ #define HWRM_CFA_METER_INSTANCE_ALLOC 0xf8UL
+ #define HWRM_CFA_METER_INSTANCE_FREE 0xf9UL
+ #define HWRM_CFA_METER_INSTANCE_CFG 0xfaUL
+ #define HWRM_CFA_VFR_ALLOC 0xfdUL
+ #define HWRM_CFA_VFR_FREE 0xfeUL
+ #define HWRM_CFA_VF_PAIR_ALLOC 0x100UL
+ #define HWRM_CFA_VF_PAIR_FREE 0x101UL
+ #define HWRM_CFA_VF_PAIR_INFO 0x102UL
+ #define HWRM_CFA_FLOW_ALLOC 0x103UL
+ #define HWRM_CFA_FLOW_FREE 0x104UL
+ #define HWRM_CFA_FLOW_FLUSH 0x105UL
+ #define HWRM_CFA_FLOW_STATS 0x106UL
+ #define HWRM_CFA_FLOW_INFO 0x107UL
+ #define HWRM_CFA_DECAP_FILTER_ALLOC 0x108UL
+ #define HWRM_CFA_DECAP_FILTER_FREE 0x109UL
+ #define HWRM_CFA_VLAN_ANTISPOOF_QCFG 0x10aUL
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC 0x10bUL
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE 0x10cUL
+ #define HWRM_CFA_PAIR_ALLOC 0x10dUL
+ #define HWRM_CFA_PAIR_FREE 0x10eUL
+ #define HWRM_CFA_PAIR_INFO 0x10fUL
+ #define HWRM_FW_IPC_MSG 0x110UL
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO 0x111UL
+ #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE 0x112UL
+ #define HWRM_CFA_FLOW_AGING_TIMER_RESET 0x113UL
+ #define HWRM_CFA_FLOW_AGING_CFG 0x114UL
+ #define HWRM_CFA_FLOW_AGING_QCFG 0x115UL
+ #define HWRM_CFA_FLOW_AGING_QCAPS 0x116UL
+ #define HWRM_CFA_CTX_MEM_RGTR 0x117UL
+ #define HWRM_CFA_CTX_MEM_UNRGTR 0x118UL
+ #define HWRM_CFA_CTX_MEM_QCTX 0x119UL
+ #define HWRM_CFA_CTX_MEM_QCAPS 0x11aUL
+ #define HWRM_CFA_COUNTER_QCAPS 0x11bUL
+ #define HWRM_CFA_COUNTER_CFG 0x11cUL
+ #define HWRM_CFA_COUNTER_QCFG 0x11dUL
+ #define HWRM_CFA_COUNTER_QSTATS 0x11eUL
+ #define HWRM_CFA_TCP_FLAG_PROCESS_QCFG 0x11fUL
+ #define HWRM_CFA_EEM_QCAPS 0x120UL
+ #define HWRM_CFA_EEM_CFG 0x121UL
+ #define HWRM_CFA_EEM_QCFG 0x122UL
+ #define HWRM_CFA_EEM_OP 0x123UL
+ #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS 0x124UL
+ #define HWRM_CFA_TFLIB 0x125UL
+ #define HWRM_CFA_LAG_GROUP_MEMBER_RGTR 0x126UL
+ #define HWRM_CFA_LAG_GROUP_MEMBER_UNRGTR 0x127UL
+ #define HWRM_CFA_TLS_FILTER_ALLOC 0x128UL
+ #define HWRM_CFA_TLS_FILTER_FREE 0x129UL
+ #define HWRM_CFA_RELEASE_AFM_FUNC 0x12aUL
+ #define HWRM_ENGINE_CKV_STATUS 0x12eUL
+ #define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
+ #define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL
+ #define HWRM_ENGINE_CKV_KEY_ADD 0x131UL
+ #define HWRM_ENGINE_CKV_KEY_DELETE 0x132UL
+ #define HWRM_ENGINE_CKV_FLUSH 0x133UL
+ #define HWRM_ENGINE_CKV_RNG_GET 0x134UL
+ #define HWRM_ENGINE_CKV_KEY_GEN 0x135UL
+ #define HWRM_ENGINE_CKV_KEY_LABEL_CFG 0x136UL
+ #define HWRM_ENGINE_CKV_KEY_LABEL_QCFG 0x137UL
+ #define HWRM_ENGINE_QG_CONFIG_QUERY 0x13cUL
+ #define HWRM_ENGINE_QG_QUERY 0x13dUL
+ #define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY 0x13eUL
+ #define HWRM_ENGINE_QG_METER_PROFILE_QUERY 0x13fUL
+ #define HWRM_ENGINE_QG_METER_PROFILE_ALLOC 0x140UL
+ #define HWRM_ENGINE_QG_METER_PROFILE_FREE 0x141UL
+ #define HWRM_ENGINE_QG_METER_QUERY 0x142UL
+ #define HWRM_ENGINE_QG_METER_BIND 0x143UL
+ #define HWRM_ENGINE_QG_METER_UNBIND 0x144UL
+ #define HWRM_ENGINE_QG_FUNC_BIND 0x145UL
+ #define HWRM_ENGINE_SG_CONFIG_QUERY 0x146UL
+ #define HWRM_ENGINE_SG_QUERY 0x147UL
+ #define HWRM_ENGINE_SG_METER_QUERY 0x148UL
+ #define HWRM_ENGINE_SG_METER_CONFIG 0x149UL
+ #define HWRM_ENGINE_SG_QG_BIND 0x14aUL
+ #define HWRM_ENGINE_QG_SG_UNBIND 0x14bUL
+ #define HWRM_ENGINE_CONFIG_QUERY 0x154UL
+ #define HWRM_ENGINE_STATS_CONFIG 0x155UL
+ #define HWRM_ENGINE_STATS_CLEAR 0x156UL
+ #define HWRM_ENGINE_STATS_QUERY 0x157UL
+ #define HWRM_ENGINE_STATS_QUERY_CONTINUOUS_ERROR 0x158UL
+ #define HWRM_ENGINE_RQ_ALLOC 0x15eUL
+ #define HWRM_ENGINE_RQ_FREE 0x15fUL
+ #define HWRM_ENGINE_CQ_ALLOC 0x160UL
+ #define HWRM_ENGINE_CQ_FREE 0x161UL
+ #define HWRM_ENGINE_NQ_ALLOC 0x162UL
+ #define HWRM_ENGINE_NQ_FREE 0x163UL
+ #define HWRM_ENGINE_ON_DIE_RQE_CREDITS 0x164UL
+ #define HWRM_ENGINE_FUNC_QCFG 0x165UL
+ #define HWRM_FUNC_RESOURCE_QCAPS 0x190UL
+ #define HWRM_FUNC_VF_RESOURCE_CFG 0x191UL
+ #define HWRM_FUNC_BACKING_STORE_QCAPS 0x192UL
+ #define HWRM_FUNC_BACKING_STORE_CFG 0x193UL
+ #define HWRM_FUNC_BACKING_STORE_QCFG 0x194UL
+ #define HWRM_FUNC_VF_BW_CFG 0x195UL
+ #define HWRM_FUNC_VF_BW_QCFG 0x196UL
+ #define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL
+ #define HWRM_FUNC_QSTATS_EXT 0x198UL
+ #define HWRM_STAT_EXT_CTX_QUERY 0x199UL
+ #define HWRM_FUNC_SPD_CFG 0x19aUL
+ #define HWRM_FUNC_SPD_QCFG 0x19bUL
+ #define HWRM_FUNC_PTP_PIN_QCFG 0x19cUL
+ #define HWRM_FUNC_PTP_PIN_CFG 0x19dUL
+ #define HWRM_FUNC_PTP_CFG 0x19eUL
+ #define HWRM_FUNC_PTP_TS_QUERY 0x19fUL
+ #define HWRM_FUNC_PTP_EXT_CFG 0x1a0UL
+ #define HWRM_FUNC_PTP_EXT_QCFG 0x1a1UL
+ #define HWRM_FUNC_KEY_CTX_ALLOC 0x1a2UL
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2 0x1a3UL
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2 0x1a4UL
+ #define HWRM_FUNC_DBR_PACING_CFG 0x1a5UL
+ #define HWRM_FUNC_DBR_PACING_QCFG 0x1a6UL
+ #define HWRM_FUNC_DBR_PACING_BROADCAST_EVENT 0x1a7UL
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2 0x1a8UL
+ #define HWRM_FUNC_DBR_PACING_NQLIST_QUERY 0x1a9UL
+ #define HWRM_FUNC_DBR_RECOVERY_COMPLETED 0x1aaUL
+ #define HWRM_FUNC_SYNCE_CFG 0x1abUL
+ #define HWRM_FUNC_SYNCE_QCFG 0x1acUL
+ #define HWRM_FUNC_KEY_CTX_FREE 0x1adUL
+ #define HWRM_FUNC_LAG_MODE_CFG 0x1aeUL
+ #define HWRM_FUNC_LAG_MODE_QCFG 0x1afUL
+ #define HWRM_FUNC_LAG_CREATE 0x1b0UL
+ #define HWRM_FUNC_LAG_UPDATE 0x1b1UL
+ #define HWRM_FUNC_LAG_FREE 0x1b2UL
+ #define HWRM_FUNC_LAG_QCFG 0x1b3UL
+ #define HWRM_FUNC_TTX_PACING_RATE_PROF_QUERY 0x1c3UL
+ #define HWRM_FUNC_TTX_PACING_RATE_QUERY 0x1c4UL
+ #define HWRM_SELFTEST_QLIST 0x200UL
+ #define HWRM_SELFTEST_EXEC 0x201UL
+ #define HWRM_SELFTEST_IRQ 0x202UL
+ #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA 0x203UL
+ #define HWRM_PCIE_QSTATS 0x204UL
+ #define HWRM_MFG_FRU_WRITE_CONTROL 0x205UL
+ #define HWRM_MFG_TIMERS_QUERY 0x206UL
+ #define HWRM_MFG_OTP_CFG 0x207UL
+ #define HWRM_MFG_OTP_QCFG 0x208UL
+ #define HWRM_MFG_HDMA_TEST 0x209UL
+ #define HWRM_MFG_FRU_EEPROM_WRITE 0x20aUL
+ #define HWRM_MFG_FRU_EEPROM_READ 0x20bUL
+ #define HWRM_MFG_SOC_IMAGE 0x20cUL
+ #define HWRM_MFG_SOC_QSTATUS 0x20dUL
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_FINALIZE 0x20eUL
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_READ 0x20fUL
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_HEALTH 0x210UL
+ #define HWRM_MFG_PRVSN_EXPORT_CSR 0x211UL
+ #define HWRM_MFG_PRVSN_IMPORT_CERT 0x212UL
+ #define HWRM_MFG_PRVSN_GET_STATE 0x213UL
+ #define HWRM_MFG_GET_NVM_MEASUREMENT 0x214UL
+ #define HWRM_MFG_PSOC_QSTATUS 0x215UL
+ #define HWRM_MFG_SELFTEST_QLIST 0x216UL
+ #define HWRM_MFG_SELFTEST_EXEC 0x217UL
+ #define HWRM_STAT_GENERIC_QSTATS 0x218UL
+ #define HWRM_MFG_PRVSN_EXPORT_CERT 0x219UL
+ #define HWRM_STAT_DB_ERROR_QSTATS 0x21aUL
+ #define HWRM_MFG_TESTS 0x21bUL
+ #define HWRM_MFG_WRITE_CERT_NVM 0x21cUL
+ #define HWRM_PORT_POE_CFG 0x230UL
+ #define HWRM_PORT_POE_QCFG 0x231UL
+ #define HWRM_PORT_PHY_FDRSTAT 0x232UL
+ #define HWRM_UDCC_QCAPS 0x258UL
+ #define HWRM_UDCC_CFG 0x259UL
+ #define HWRM_UDCC_QCFG 0x25aUL
+ #define HWRM_UDCC_SESSION_CFG 0x25bUL
+ #define HWRM_UDCC_SESSION_QCFG 0x25cUL
+ #define HWRM_UDCC_SESSION_QUERY 0x25dUL
+ #define HWRM_UDCC_COMP_CFG 0x25eUL
+ #define HWRM_UDCC_COMP_QCFG 0x25fUL
+ #define HWRM_UDCC_COMP_QUERY 0x260UL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS 0x261UL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_CFG 0x262UL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_QCFG 0x263UL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_QCFG 0x264UL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_QCFG 0x265UL
+ #define HWRM_TF 0x2bcUL
+ #define HWRM_TF_VERSION_GET 0x2bdUL
+ #define HWRM_TF_SESSION_OPEN 0x2c6UL
+ #define HWRM_TF_SESSION_REGISTER 0x2c8UL
+ #define HWRM_TF_SESSION_UNREGISTER 0x2c9UL
+ #define HWRM_TF_SESSION_CLOSE 0x2caUL
+ #define HWRM_TF_SESSION_QCFG 0x2cbUL
+ #define HWRM_TF_SESSION_RESC_QCAPS 0x2ccUL
+ #define HWRM_TF_SESSION_RESC_ALLOC 0x2cdUL
+ #define HWRM_TF_SESSION_RESC_FREE 0x2ceUL
+ #define HWRM_TF_SESSION_RESC_FLUSH 0x2cfUL
+ #define HWRM_TF_SESSION_RESC_INFO 0x2d0UL
+ #define HWRM_TF_SESSION_HOTUP_STATE_SET 0x2d1UL
+ #define HWRM_TF_SESSION_HOTUP_STATE_GET 0x2d2UL
+ #define HWRM_TF_TBL_TYPE_GET 0x2daUL
+ #define HWRM_TF_TBL_TYPE_SET 0x2dbUL
+ #define HWRM_TF_TBL_TYPE_BULK_GET 0x2dcUL
+ #define HWRM_TF_EM_INSERT 0x2eaUL
+ #define HWRM_TF_EM_DELETE 0x2ebUL
+ #define HWRM_TF_EM_HASH_INSERT 0x2ecUL
+ #define HWRM_TF_EM_MOVE 0x2edUL
+ #define HWRM_TF_TCAM_SET 0x2f8UL
+ #define HWRM_TF_TCAM_GET 0x2f9UL
+ #define HWRM_TF_TCAM_MOVE 0x2faUL
+ #define HWRM_TF_TCAM_FREE 0x2fbUL
+ #define HWRM_TF_GLOBAL_CFG_SET 0x2fcUL
+ #define HWRM_TF_GLOBAL_CFG_GET 0x2fdUL
+ #define HWRM_TF_IF_TBL_SET 0x2feUL
+ #define HWRM_TF_IF_TBL_GET 0x2ffUL
+ #define HWRM_TF_RESC_USAGE_SET 0x300UL
+ #define HWRM_TF_RESC_USAGE_QUERY 0x301UL
+ #define HWRM_TF_TBL_TYPE_ALLOC 0x302UL
+ #define HWRM_TF_TBL_TYPE_FREE 0x303UL
+ #define HWRM_TFC_TBL_SCOPE_QCAPS 0x380UL
+ #define HWRM_TFC_TBL_SCOPE_ID_ALLOC 0x381UL
+ #define HWRM_TFC_TBL_SCOPE_CONFIG 0x382UL
+ #define HWRM_TFC_TBL_SCOPE_DECONFIG 0x383UL
+ #define HWRM_TFC_TBL_SCOPE_FID_ADD 0x384UL
+ #define HWRM_TFC_TBL_SCOPE_FID_REM 0x385UL
+ #define HWRM_TFC_TBL_SCOPE_POOL_ALLOC 0x386UL
+ #define HWRM_TFC_TBL_SCOPE_POOL_FREE 0x387UL
+ #define HWRM_TFC_SESSION_ID_ALLOC 0x388UL
+ #define HWRM_TFC_SESSION_FID_ADD 0x389UL
+ #define HWRM_TFC_SESSION_FID_REM 0x38aUL
+ #define HWRM_TFC_IDENT_ALLOC 0x38bUL
+ #define HWRM_TFC_IDENT_FREE 0x38cUL
+ #define HWRM_TFC_IDX_TBL_ALLOC 0x38dUL
+ #define HWRM_TFC_IDX_TBL_ALLOC_SET 0x38eUL
+ #define HWRM_TFC_IDX_TBL_SET 0x38fUL
+ #define HWRM_TFC_IDX_TBL_GET 0x390UL
+ #define HWRM_TFC_IDX_TBL_FREE 0x391UL
+ #define HWRM_TFC_GLOBAL_ID_ALLOC 0x392UL
+ #define HWRM_TFC_TCAM_SET 0x393UL
+ #define HWRM_TFC_TCAM_GET 0x394UL
+ #define HWRM_TFC_TCAM_ALLOC 0x395UL
+ #define HWRM_TFC_TCAM_ALLOC_SET 0x396UL
+ #define HWRM_TFC_TCAM_FREE 0x397UL
+ #define HWRM_TFC_IF_TBL_SET 0x398UL
+ #define HWRM_TFC_IF_TBL_GET 0x399UL
+ #define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL
+ #define HWRM_TFC_RESC_USAGE_QUERY 0x39bUL
+ #define HWRM_TFC_GLOBAL_ID_FREE 0x39cUL
+ #define HWRM_TFC_TCAM_PRI_UPDATE 0x39dUL
+ #define HWRM_TFC_HOT_UPGRADE_PROCESS 0x3a0UL
+ #define HWRM_SV 0x400UL
+ #define HWRM_DBG_SERDES_TEST 0xff0eUL
+ #define HWRM_DBG_LOG_BUFFER_FLUSH 0xff0fUL
+ #define HWRM_DBG_READ_DIRECT 0xff10UL
+ #define HWRM_DBG_READ_INDIRECT 0xff11UL
+ #define HWRM_DBG_WRITE_DIRECT 0xff12UL
+ #define HWRM_DBG_WRITE_INDIRECT 0xff13UL
+ #define HWRM_DBG_DUMP 0xff14UL
+ #define HWRM_DBG_ERASE_NVM 0xff15UL
+ #define HWRM_DBG_CFG 0xff16UL
+ #define HWRM_DBG_COREDUMP_LIST 0xff17UL
+ #define HWRM_DBG_COREDUMP_INITIATE 0xff18UL
+ #define HWRM_DBG_COREDUMP_RETRIEVE 0xff19UL
+ #define HWRM_DBG_FW_CLI 0xff1aUL
+ #define HWRM_DBG_I2C_CMD 0xff1bUL
+ #define HWRM_DBG_RING_INFO_GET 0xff1cUL
+ #define HWRM_DBG_CRASHDUMP_HEADER 0xff1dUL
+ #define HWRM_DBG_CRASHDUMP_ERASE 0xff1eUL
+ #define HWRM_DBG_DRV_TRACE 0xff1fUL
+ #define HWRM_DBG_QCAPS 0xff20UL
+ #define HWRM_DBG_QCFG 0xff21UL
+ #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG 0xff22UL
+ #define HWRM_DBG_USEQ_ALLOC 0xff23UL
+ #define HWRM_DBG_USEQ_FREE 0xff24UL
+ #define HWRM_DBG_USEQ_FLUSH 0xff25UL
+ #define HWRM_DBG_USEQ_QCAPS 0xff26UL
+ #define HWRM_DBG_USEQ_CW_CFG 0xff27UL
+ #define HWRM_DBG_USEQ_SCHED_CFG 0xff28UL
+ #define HWRM_DBG_USEQ_RUN 0xff29UL
+ #define HWRM_DBG_USEQ_DELIVERY_REQ 0xff2aUL
+ #define HWRM_DBG_USEQ_RESP_HDR 0xff2bUL
+ #define HWRM_DBG_COREDUMP_CAPTURE 0xff2cUL
+ #define HWRM_DBG_PTRACE 0xff2dUL
+ #define HWRM_DBG_SIM_CABLE_STATE 0xff2eUL
+ #define HWRM_DBG_TOKEN_QUERY_AUTH_IDS 0xff2fUL
+ #define HWRM_DBG_TOKEN_CFG 0xff30UL
+ #define HWRM_NVM_GET_VPD_FIELD_INFO 0xffeaUL
+ #define HWRM_NVM_SET_VPD_FIELD_INFO 0xffebUL
+ #define HWRM_NVM_DEFRAG 0xffecUL
+ #define HWRM_NVM_REQ_ARBITRATION 0xffedUL
+ #define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
+ #define HWRM_NVM_VALIDATE_OPTION 0xffefUL
+ #define HWRM_NVM_FLUSH 0xfff0UL
+ #define HWRM_NVM_GET_VARIABLE 0xfff1UL
+ #define HWRM_NVM_SET_VARIABLE 0xfff2UL
+ #define HWRM_NVM_INSTALL_UPDATE 0xfff3UL
+ #define HWRM_NVM_MODIFY 0xfff4UL
+ #define HWRM_NVM_VERIFY_UPDATE 0xfff5UL
+ #define HWRM_NVM_GET_DEV_INFO 0xfff6UL
+ #define HWRM_NVM_ERASE_DIR_ENTRY 0xfff7UL
+ #define HWRM_NVM_MOD_DIR_ENTRY 0xfff8UL
+ #define HWRM_NVM_FIND_DIR_ENTRY 0xfff9UL
+ #define HWRM_NVM_GET_DIR_ENTRIES 0xfffaUL
+ #define HWRM_NVM_GET_DIR_INFO 0xfffbUL
+ #define HWRM_NVM_RAW_DUMP 0xfffcUL
+ #define HWRM_NVM_READ 0xfffdUL
+ #define HWRM_NVM_WRITE 0xfffeUL
+ #define HWRM_NVM_RAW_WRITE_BLK 0xffffUL
+ #define HWRM_LAST HWRM_NVM_RAW_WRITE_BLK
+ __le16 unused_0[3];
+};
+
+/* ret_codes (size:64b/8B) */
+struct ret_codes {
+ __le16 error_code;
+ #define HWRM_ERR_CODE_SUCCESS 0x0UL
+ #define HWRM_ERR_CODE_FAIL 0x1UL
+ #define HWRM_ERR_CODE_INVALID_PARAMS 0x2UL
+ #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED 0x3UL
+ #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR 0x4UL
+ #define HWRM_ERR_CODE_INVALID_FLAGS 0x5UL
+ #define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL
+ #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL
+ #define HWRM_ERR_CODE_NO_BUFFER 0x8UL
+ #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL
+ #define HWRM_ERR_CODE_HOT_RESET_PROGRESS 0xaUL
+ #define HWRM_ERR_CODE_HOT_RESET_FAIL 0xbUL
+ #define HWRM_ERR_CODE_NO_FLOW_COUNTER_DURING_ALLOC 0xcUL
+ #define HWRM_ERR_CODE_KEY_HASH_COLLISION 0xdUL
+ #define HWRM_ERR_CODE_KEY_ALREADY_EXISTS 0xeUL
+ #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
+ #define HWRM_ERR_CODE_BUSY 0x10UL
+ #define HWRM_ERR_CODE_RESOURCE_LOCKED 0x11UL
+ #define HWRM_ERR_CODE_PF_UNAVAILABLE 0x12UL
+ #define HWRM_ERR_CODE_ENTITY_NOT_PRESENT 0x13UL
+ #define HWRM_ERR_CODE_SECURE_SOC_ERROR 0x14UL
+ #define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL
+ #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
+ #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
+ #define HWRM_ERR_CODE_LAST HWRM_ERR_CODE_CMD_NOT_SUPPORTED
+ __le16 unused_0[3];
+};
+
+/* hwrm_err_output (size:128b/16B) */
+struct hwrm_err_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 opaque_0;
+ __le16 opaque_1;
+ u8 cmd_err;
+ u8 valid;
+};
+#define HWRM_NA_SIGNATURE ((__le32)(-1))
+#define HWRM_MAX_REQ_LEN 128
+#define HWRM_MAX_RESP_LEN 704
+#define HW_HASH_INDEX_SIZE 0x80
+#define HW_HASH_KEY_SIZE 40
+#define HWRM_RESP_VALID_KEY 1
+#define HWRM_TARGET_ID_BONO 0xFFF8
+#define HWRM_TARGET_ID_KONG 0xFFF9
+#define HWRM_TARGET_ID_APE 0xFFFA
+#define HWRM_TARGET_ID_TOOLS 0xFFFD
+#define HWRM_VERSION_MAJOR 1
+#define HWRM_VERSION_MINOR 10
+#define HWRM_VERSION_UPDATE 3
+#define HWRM_VERSION_RSVD 133
+#define HWRM_VERSION_STR "1.10.3.133"
+
+/* hwrm_ver_get_input (size:192b/24B) */
+struct hwrm_ver_get_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 hwrm_intf_maj;
+ u8 hwrm_intf_min;
+ u8 hwrm_intf_upd;
+ u8 unused_0[5];
+};
+
+/* hwrm_ver_get_output (size:1408b/176B) */
+struct hwrm_ver_get_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 hwrm_intf_maj_8b;
+ u8 hwrm_intf_min_8b;
+ u8 hwrm_intf_upd_8b;
+ u8 hwrm_intf_rsvd_8b;
+ u8 hwrm_fw_maj_8b;
+ u8 hwrm_fw_min_8b;
+ u8 hwrm_fw_bld_8b;
+ u8 hwrm_fw_rsvd_8b;
+ u8 mgmt_fw_maj_8b;
+ u8 mgmt_fw_min_8b;
+ u8 mgmt_fw_bld_8b;
+ u8 mgmt_fw_rsvd_8b;
+ u8 netctrl_fw_maj_8b;
+ u8 netctrl_fw_min_8b;
+ u8 netctrl_fw_bld_8b;
+ u8 netctrl_fw_rsvd_8b;
+ __le32 dev_caps_cfg;
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED 0x10UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED 0x20UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_L2_FILTER_TYPES_ROCE_OR_L2_SUPPORTED 0x40UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_VIRTIO_VSWITCH_OFFLOAD_SUPPORTED 0x80UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED 0x100UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_AGING_SUPPORTED 0x200UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED 0x400UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_EEM_SUPPORTED 0x800UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED 0x1000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED 0x2000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED 0x4000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_BOOT_CAPABLE 0x8000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_SOC_CAPABLE 0x10000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_DEBUG_TOKEN_SUPPORTED 0x20000UL
+ u8 roce_fw_maj_8b;
+ u8 roce_fw_min_8b;
+ u8 roce_fw_bld_8b;
+ u8 roce_fw_rsvd_8b;
+ char hwrm_fw_name[16];
+ char mgmt_fw_name[16];
+ char netctrl_fw_name[16];
+ char active_pkg_name[16];
+ char roce_fw_name[16];
+ __le16 chip_num;
+ u8 chip_rev;
+ u8 chip_metal;
+ u8 chip_bond_id;
+ u8 chip_platform_type;
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC 0x0UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA 0x1UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM 0x2UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_LAST VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM
+ __le16 max_req_win_len;
+ __le16 max_resp_len;
+ __le16 def_req_timeout;
+ u8 flags;
+ #define VER_GET_RESP_FLAGS_DEV_NOT_RDY 0x1UL
+ #define VER_GET_RESP_FLAGS_EXT_VER_AVAIL 0x2UL
+ #define VER_GET_RESP_FLAGS_DEV_NOT_RDY_BACKING_STORE 0x4UL
+ u8 unused_0[2];
+ u8 always_1;
+ __le16 hwrm_intf_major;
+ __le16 hwrm_intf_minor;
+ __le16 hwrm_intf_build;
+ __le16 hwrm_intf_patch;
+ __le16 hwrm_fw_major;
+ __le16 hwrm_fw_minor;
+ __le16 hwrm_fw_build;
+ __le16 hwrm_fw_patch;
+ __le16 mgmt_fw_major;
+ __le16 mgmt_fw_minor;
+ __le16 mgmt_fw_build;
+ __le16 mgmt_fw_patch;
+ __le16 netctrl_fw_major;
+ __le16 netctrl_fw_minor;
+ __le16 netctrl_fw_build;
+ __le16 netctrl_fw_patch;
+ __le16 roce_fw_major;
+ __le16 roce_fw_minor;
+ __le16 roce_fw_build;
+ __le16 roce_fw_patch;
+ __le16 max_ext_req_len;
+ __le16 max_req_timeout;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* eject_cmpl (size:128b/16B) */
+struct eject_cmpl {
+ __le16 type;
+ #define EJECT_CMPL_TYPE_MASK 0x3fUL
+ #define EJECT_CMPL_TYPE_SFT 0
+ #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL
+ #define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT
+ #define EJECT_CMPL_FLAGS_MASK 0xffc0UL
+ #define EJECT_CMPL_FLAGS_SFT 6
+ #define EJECT_CMPL_FLAGS_ERROR 0x40UL
+ __le16 len;
+ __le32 opaque;
+ __le16 v;
+ #define EJECT_CMPL_V 0x1UL
+ #define EJECT_CMPL_ERRORS_MASK 0xfffeUL
+ #define EJECT_CMPL_ERRORS_SFT 1
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_MASK 0xeUL
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_SFT 1
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0UL << 1)
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (0x1UL << 1)
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3UL << 1)
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH (0x5UL << 1)
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_LAST EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH
+ __le16 reserved16;
+ __le32 unused_2;
+};
+
+/* hwrm_cmpl (size:128b/16B) */
+struct hwrm_cmpl {
+ __le16 type;
+ #define CMPL_TYPE_MASK 0x3fUL
+ #define CMPL_TYPE_SFT 0
+ #define CMPL_TYPE_HWRM_DONE 0x20UL
+ #define CMPL_TYPE_LAST CMPL_TYPE_HWRM_DONE
+ __le16 sequence_id;
+ __le32 unused_1;
+ __le32 v;
+ #define CMPL_V 0x1UL
+ __le32 unused_3;
+};
+
+/* hwrm_fwd_req_cmpl (size:128b/16B) */
+struct hwrm_fwd_req_cmpl {
+ __le16 req_len_type;
+ #define FWD_REQ_CMPL_TYPE_MASK 0x3fUL
+ #define FWD_REQ_CMPL_TYPE_SFT 0
+ #define FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ 0x22UL
+ #define FWD_REQ_CMPL_TYPE_LAST FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ
+ #define FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL
+ #define FWD_REQ_CMPL_REQ_LEN_SFT 6
+ __le16 source_id;
+ __le32 unused0;
+ __le32 req_buf_addr_v[2];
+ #define FWD_REQ_CMPL_V 0x1UL
+ #define FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL
+ #define FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
+};
+
+/* hwrm_fwd_resp_cmpl (size:128b/16B) */
+struct hwrm_fwd_resp_cmpl {
+ __le16 type;
+ #define FWD_RESP_CMPL_TYPE_MASK 0x3fUL
+ #define FWD_RESP_CMPL_TYPE_SFT 0
+ #define FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP 0x24UL
+ #define FWD_RESP_CMPL_TYPE_LAST FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP
+ __le16 source_id;
+ __le16 resp_len;
+ __le16 unused_1;
+ __le32 resp_buf_addr_v[2];
+ #define FWD_RESP_CMPL_V 0x1UL
+ #define FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL
+ #define FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1
+};
+
+/* hwrm_async_event_cmpl (size:128b/16B) */
+struct hwrm_async_event_cmpl {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_TYPE_LAST ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY 0x8UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY 0x9UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG 0xaUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HW_FLOW_AGED 0x36UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION 0x37UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_TCP_FLAG_ACTION_CHANGE 0x3aUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_FLOW_ACTIVE 0x3bUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE 0x3dUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST 0x42UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE 0x43UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP 0x44UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD 0x46UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_RSS_CHANGE 0x47UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE 0x48UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HW_DOORBELL_RECOVERY_READ_ERROR 0x49UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_CTX_ERROR 0x4aUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_UDCC_SESSION_CHANGE 0x4bUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER 0x4cUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PEER_MMAP_CHANGE 0x4dUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_REPRESENTOR_PAIR_CHANGE 0x4eUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_STAT_CHANGE 0x4fUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HOST_COREDUMP 0x50UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ADPTV_QOS 0x51UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x52UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_V 0x1UL
+ #define ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_link_status_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_link_status_change {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN 0x0UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_MASK 0xff00000UL
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_SFT 20
+};
+
+/* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */
+struct hwrm_async_event_cmpl_port_conn_not_allowed {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16)
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
+};
+
+/* hwrm_async_event_cmpl_link_speed_cfg_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_link_speed_cfg_change {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
+};
+
+/* hwrm_async_event_cmpl_reset_notify (size:128b/16B) */
+struct hwrm_async_event_cmpl_reset_notify {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY 0x8UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_V 0x1UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_SFT 0
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_STOP_TX_QUEUE 0x1UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN 0x2UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK 0xff00UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_SFT 8
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MANAGEMENT_RESET_REQUEST (0x1UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL (0x2UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL (0x3UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET (0x4UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION (0x5UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_MASK 0xffff0000UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT 16
+};
+
+/* hwrm_async_event_cmpl_error_recovery (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_recovery {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_ERROR_RECOVERY 0x9UL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_ERROR_RECOVERY
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED 0x2UL
+};
+
+/* hwrm_async_event_cmpl_ring_monitor_msg (size:128b/16B) */
+struct hwrm_async_event_cmpl_ring_monitor_msg {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_RING_MONITOR_MSG 0xaUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_RING_MONITOR_MSG
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_TX 0x0UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX 0x1UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_CMPL 0x2UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_CMPL
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_V 0x1UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_vf_cfg_change {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA2_VF_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA2_VF_ID_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE 0x10UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TF_OWNERSHIP_RELEASE 0x20UL
+};
+
+/* hwrm_async_event_cmpl_default_vnic_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_default_vnic_change {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_MASK 0xffc0UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_SFT 6
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION 0x35UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_MASK 0x3UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_SFT 0
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_ALLOC 0x1UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE 0x2UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_MASK 0x3fcUL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_SFT 2
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_MASK 0x3fffc00UL
+ #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_SFT 10
+};
+
+/* hwrm_async_event_cmpl_hw_flow_aged (size:128b/16B) */
+struct hwrm_async_event_cmpl_hw_flow_aged {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED 0x36UL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_V 0x1UL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_MASK 0x7fffffffUL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION 0x80000000UL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_RX (0x0UL << 31)
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX (0x1UL << 31)
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX
+};
+
+/* hwrm_async_event_cmpl_eem_cache_flush_req (size:128b/16B) */
+struct hwrm_async_event_cmpl_eem_cache_flush_req {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_V 0x1UL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_eem_cache_flush_done (size:128b/16B) */
+struct hwrm_async_event_cmpl_eem_cache_flush_done {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_SFT 0
+};
+
+/* hwrm_async_event_cmpl_deferred_response (size:128b/16B) */
+struct hwrm_async_event_cmpl_deferred_response {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_LAST ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_DEFERRED_RESPONSE 0x40UL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_DEFERRED_RESPONSE
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_DATA2_SEQ_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_DATA2_SEQ_ID_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_echo_request (size:128b/16B) */
+struct hwrm_async_event_cmpl_echo_request {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_LAST ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST 0x42UL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_LAST ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_phc_update (size:128b/16B) */
+struct hwrm_async_event_cmpl_phc_update {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_PHC_UPDATE 0x43UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_PHC_UPDATE
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_MASTER_FID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_MASTER_FID_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_SEC_FID_MASK 0xffff0000UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_SEC_FID_SFT 16
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK 0xfUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_MASTER 0x1UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_SECONDARY 0x2UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_FAILOVER 0x3UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE 0x4UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK 0xffff0UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT 4
+};
+
+/* hwrm_async_event_cmpl_pps_timestamp (size:128b/16B) */
+struct hwrm_async_event_cmpl_pps_timestamp {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_PPS_TIMESTAMP 0x44UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_PPS_TIMESTAMP
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE 0x1UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_INTERNAL 0x0UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL 0x1UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_MASK 0xeUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_SFT 1
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_MASK 0xffff0UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_SFT 4
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_MASK 0xffffffffUL
+ #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_SFT 0
+};
+
+/* hwrm_async_event_cmpl_error_report (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_DATA1_ERROR_TYPE_SFT 0
+};
+
+/* hwrm_async_event_cmpl_dbg_buf_producer (size:128b/16B) */
+struct hwrm_async_event_cmpl_dbg_buf_producer {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_DBG_BUF_PRODUCER 0x4cUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_DBG_BUF_PRODUCER
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_MASK 0xffffffffUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_V 0x1UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SRT_TRACE 0x0UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SRT2_TRACE 0x1UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CRT_TRACE 0x2UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CRT2_TRACE 0x3UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_RIGP0_TRACE 0x4UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_L2_HWRM_TRACE 0x5UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_ROCE_HWRM_TRACE 0x6UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CA0_TRACE 0x7UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CA1_TRACE 0x8UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CA2_TRACE 0x9UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_RIGP1_TRACE 0xaUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_AFM_KONG_HWRM_TRACE 0xbUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_ERR_QPC_TRACE 0xcUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_ERR_QPC_TRACE
+};
+
+/* hwrm_async_event_cmpl_hwrm_error (size:128b/16B) */
+struct hwrm_async_event_cmpl_hwrm_error {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR 0xffUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
+};
+
+/* hwrm_async_event_cmpl_error_report_base (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_base {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD 0x5UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUP_UDCC_SES 0x7UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DB_DROP 0x8UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MD_TEMP 0x9UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_VNIC_ERR 0xaUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_VNIC_ERR
+};
+
+/* hwrm_async_event_cmpl_error_report_pause_storm (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_pause_storm {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM
+};
+
+/* hwrm_async_event_cmpl_error_report_invalid_signal (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_invalid_signal {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL
+};
+
+/* hwrm_async_event_cmpl_error_report_nvm (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_nvm {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_MASK 0xffffffffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_NVM_ERROR 0x3UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_NVM_ERROR
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_MASK 0xff00UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_SFT 8
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_WRITE (0x1UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE (0x2UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE
+};
+
+/* hwrm_async_event_cmpl_error_report_doorbell_drop_threshold (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_doorbell_drop_threshold {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_MASK 0xffffff00UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_SFT 8
+};
+
+/* hwrm_async_event_cmpl_error_report_thermal (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_thermal {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK 0xff00UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT 8
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_THERMAL_EVENT 0x5UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_THERMAL_EVENT
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK 0x700UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SFT 8
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN (0x0UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL (0x1UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL (0x2UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN (0x3UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR 0x800UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_DECREASING (0x0UL << 11)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING (0x1UL << 11)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING
+};
+
+/* hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED
+};
+
+/* hwrm_func_reset_input (size:192b/24B) */
+struct hwrm_func_reset_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL
+ __le16 vf_id;
+ u8 func_reset_level;
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL 0x0UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME 0x1UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN 0x2UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF 0x3UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_LAST FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF
+ u8 unused_0;
+};
+
+/* hwrm_func_reset_output (size:128b/16B) */
+struct hwrm_func_reset_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_getfid_input (size:192b/24B) */
+struct hwrm_func_getfid_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_GETFID_REQ_ENABLES_PCI_ID 0x1UL
+ __le16 pci_id;
+ u8 unused_0[2];
+};
+
+/* hwrm_func_getfid_output (size:128b/16B) */
+struct hwrm_func_getfid_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_func_vf_alloc_input (size:192b/24B) */
+struct hwrm_func_vf_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID 0x1UL
+ __le16 first_vf_id;
+ __le16 num_vfs;
+};
+
+/* hwrm_func_vf_alloc_output (size:128b/16B) */
+struct hwrm_func_vf_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 first_vf_id;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_func_vf_free_input (size:192b/24B) */
+struct hwrm_func_vf_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID 0x1UL
+ __le16 first_vf_id;
+ __le16 num_vfs;
+};
+
+/* hwrm_func_vf_free_output (size:128b/16B) */
+struct hwrm_func_vf_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_vf_cfg_input (size:576b/72B) */
+struct hwrm_func_vf_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL
+ #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL
+ #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL
+ #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x10UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x20UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS 0x40UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS 0x80UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS 0x100UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS 0x200UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x400UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x800UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_KTLS_TX_KEY_CTXS 0x1000UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_KTLS_RX_KEY_CTXS 0x2000UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_QUIC_TX_KEY_CTXS 0x4000UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_QUIC_RX_KEY_CTXS 0x8000UL
+ __le16 mtu;
+ __le16 guest_vlan;
+ __le16 async_event_cr;
+ u8 dflt_mac_addr[6];
+ __le32 flags;
+ #define FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x1UL
+ #define FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x2UL
+ #define FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x4UL
+ #define FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x8UL
+ #define FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x10UL
+ #define FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x20UL
+ #define FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x40UL
+ #define FUNC_VF_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x80UL
+ #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x100UL
+ #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x200UL
+ __le16 num_rsscos_ctxs;
+ __le16 num_cmpl_rings;
+ __le16 num_tx_rings;
+ __le16 num_rx_rings;
+ __le16 num_l2_ctxs;
+ __le16 num_vnics;
+ __le16 num_stat_ctxs;
+ __le16 num_hw_ring_grps;
+ __le32 num_ktls_tx_key_ctxs;
+ __le32 num_ktls_rx_key_ctxs;
+ __le16 num_msix;
+ u8 unused[2];
+ __le32 num_quic_tx_key_ctxs;
+ __le32 num_quic_rx_key_ctxs;
+};
+
+/* hwrm_func_vf_cfg_output (size:128b/16B) */
+struct hwrm_func_vf_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_qcaps_input (size:192b/24B) */
+struct hwrm_func_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_func_qcaps_output (size:1152b/144B) */
+struct hwrm_func_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ __le16 port_id;
+ __le32 flags;
+ #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL
+ #define FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED 0x10000UL
+ #define FUNC_QCAPS_RESP_FLAGS_ADOPTED_PF_SUPPORTED 0x20000UL
+ #define FUNC_QCAPS_RESP_FLAGS_ADMIN_PF_SUPPORTED 0x40000UL
+ #define FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED 0x80000UL
+ #define FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE 0x100000UL
+ #define FUNC_QCAPS_RESP_FLAGS_DYNAMIC_TX_RING_ALLOC 0x200000UL
+ #define FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE 0x400000UL
+ #define FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE 0x800000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED 0x1000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD 0x2000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_NOTIFY_VF_DEF_VNIC_CHNG_SUPPORTED 0x4000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED 0x8000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_COREDUMP_CMD_SUPPORTED 0x10000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_CRASHDUMP_CMD_SUPPORTED 0x20000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_PFC_WD_STATS_SUPPORTED 0x40000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED 0x80000000UL
+ u8 mac_address[6];
+ __le16 max_rsscos_ctx;
+ __le16 max_cmpl_rings;
+ __le16 max_tx_rings;
+ __le16 max_rx_rings;
+ __le16 max_l2_ctxs;
+ __le16 max_vnics;
+ __le16 first_vf_id;
+ __le16 max_vfs;
+ __le16 max_stat_ctx;
+ __le32 max_encap_records;
+ __le32 max_decap_records;
+ __le32 max_tx_em_flows;
+ __le32 max_tx_wm_flows;
+ __le32 max_rx_em_flows;
+ __le32 max_rx_wm_flows;
+ __le32 max_mcast_filters;
+ __le32 max_flow_id;
+ __le32 max_hw_ring_grps;
+ __le16 max_sp_tx_rings;
+ __le16 max_msix_vfs;
+ __le32 flags_ext;
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PROXY_MODE_SUPPORT 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_PROXY_SRC_INTF_OVERRIDE_SUPPORT 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_SCHQ_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EVB_MODE_CFG_NOT_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_SOC_SPD_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_FAST_RESET_CAPABLE 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_METADATA_CFG_CAPABLE 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_NVM_OPTION_ACTION_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BD_METADATA_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECHO_REQUEST_SUPPORTED 0x8000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED 0x10000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PTM_SUPPORTED 0x20000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED 0x40000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_VF_CFG_ASYNC_FOR_PF_SUPPORTED 0x80000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PARTITION_BW_SUPPORTED 0x100000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED 0x200000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_KTLS_SUPPORTED 0x400000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EP_RATE_CONTROL 0x800000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_MIN_BW_SUPPORTED 0x1000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP 0x2000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED 0x4000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_REQUIRED 0x8000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED 0x10000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_DBR_PACING_SUPPORTED 0x20000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_HW_DBR_DROP_RECOV_SUPPORTED 0x40000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_DISABLE_CQ_OVERFLOW_DETECTION_SUPPORTED 0x80000000UL
+ u8 max_schqs;
+ u8 mpc_chnls_cap;
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE 0x1UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RCE 0x2UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TE_CFA 0x4UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RE_CFA 0x8UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_PRIMATE 0x10UL
+ __le16 max_key_ctxs_alloc;
+ __le32 flags_ext2;
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_QUIC_SUPPORTED 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_KDNET_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_DBR_DROP_RECOVERY_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_GENERIC_STATS_SUPPORTED 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SYNCE_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_V0_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_HW_LAG_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_ON_CHIP_CTX_SUPPORTED 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_STEERING_TAG_SUPPORTED 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_ENHANCED_VF_SCALE_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_KEY_XID_PARTITION_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_CONCURRENT_KTLS_QUIC_SUPPORTED 0x8000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_CROSS_TC_CAP_SUPPORTED 0x10000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_PER_TC_CAP_SUPPORTED 0x20000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_PER_TC_RESERVATION_SUPPORTED 0x40000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_DB_ERROR_STATS_SUPPORTED 0x80000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED 0x100000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDCC_SUPPORTED 0x200000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TIMED_TX_SO_TXTIME_SUPPORTED 0x400000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED 0x800000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_INGRESS_NIC_FLOW_SUPPORTED 0x1000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_LPBK_STATS_SUPPORTED 0x2000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_EGRESS_NIC_FLOW_SUPPORTED 0x4000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_MULTI_LOSSLESS_QUEUES_SUPPORTED 0x8000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_PEER_MMAP_SUPPORTED 0x10000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TIMED_TX_PACING_SUPPORTED 0x20000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_VF_STAT_EJECTION_SUPPORTED 0x40000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_HOST_COREDUMP_SUPPORTED 0x80000000UL
+ __le16 tunnel_disable_flag;
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NVGRE 0x4UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_L2GRE 0x8UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_GRE 0x10UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_IPINIP 0x20UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_MPLS 0x40UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_PPPOE 0x80UL
+ __le16 xid_partition_cap;
+ #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_TX_CK 0x1UL
+ #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_RX_CK 0x2UL
+ u8 device_serial_number[8];
+ __le16 ctxs_per_partition;
+ __le16 max_tso_segs;
+ __le32 roce_vf_max_av;
+ __le32 roce_vf_max_cq;
+ __le32 roce_vf_max_mrw;
+ __le32 roce_vf_max_qp;
+ __le32 roce_vf_max_srq;
+ __le32 roce_vf_max_gid;
+ __le32 flags_ext3;
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_RM_RSV_WHILE_ALLOC_CAP 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_REQUIRE_L2_FILTER 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MAX_ROCE_VFS_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_RX_RATE_PROFILE_SEL_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_BIDI_OPT_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_ROCE_VF_DYN_ALLOC_SUPPORT 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_CHANGE_UDP_SRCPORT_SUPPORT 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_PCIE_COMPLIANCE_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MULTI_L2_DB_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_PCIE_SECURE_ATS_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MBUF_STATS_SUPPORTED 0x800UL
+ __le16 max_roce_vfs;
+ __le16 max_crypto_rx_flow_filters;
+ u8 unused_3[3];
+ u8 valid;
+};
+
+/* hwrm_func_qcfg_input (size:192b/24B) */
+struct hwrm_func_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_func_qcfg_output (size:1408b/176B) */
+struct hwrm_func_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ __le16 port_id;
+ __le16 vlan;
+ __le16 flags;
+ #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL
+ #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL
+ #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL
+ #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL
+ #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL
+ #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
+ #define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL
+ #define FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED 0x80UL
+ #define FUNC_QCFG_RESP_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x100UL
+ #define FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED 0x200UL
+ #define FUNC_QCFG_RESP_FLAGS_PPP_PUSH_MODE_ENABLED 0x400UL
+ #define FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED 0x800UL
+ #define FUNC_QCFG_RESP_FLAGS_FAST_RESET_ALLOWED 0x1000UL
+ #define FUNC_QCFG_RESP_FLAGS_MULTI_ROOT 0x2000UL
+ #define FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV 0x4000UL
+ #define FUNC_QCFG_RESP_FLAGS_ROCE_VNIC_ID_VALID 0x8000UL
+ u8 mac_address[6];
+ __le16 pci_id;
+ __le16 alloc_rsscos_ctx;
+ __le16 alloc_cmpl_rings;
+ __le16 alloc_tx_rings;
+ __le16 alloc_rx_rings;
+ __le16 alloc_l2_ctx;
+ __le16 alloc_vnics;
+ __le16 admin_mtu;
+ __le16 mru;
+ __le16 stat_ctx_id;
+ u8 port_partition_type;
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF 0x0UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS 0x1UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2 0x5UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_LAST FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN
+ u8 port_pf_cnt;
+ #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL
+ #define FUNC_QCFG_RESP_PORT_PF_CNT_LAST FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL
+ __le16 dflt_vnic_id;
+ __le16 max_mtu_configured;
+ __le32 min_bw;
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 max_bw;
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 evb_mode;
+ #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL
+ #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL
+ #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
+ #define FUNC_QCFG_RESP_EVB_MODE_LAST FUNC_QCFG_RESP_EVB_MODE_VEPA
+ u8 options;
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SFT 0
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_SFT 2
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2)
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2)
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2)
+ #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO
+ #define FUNC_QCFG_RESP_OPTIONS_RSVD_MASK 0xf0UL
+ #define FUNC_QCFG_RESP_OPTIONS_RSVD_SFT 4
+ __le16 alloc_vfs;
+ __le32 alloc_mcast_filters;
+ __le32 alloc_hw_ring_grps;
+ __le16 alloc_sp_tx_rings;
+ __le16 alloc_stat_ctx;
+ __le16 alloc_msix;
+ __le16 registered_vfs;
+ __le16 l2_doorbell_bar_size_kb;
+ u8 active_endpoints;
+ u8 always_1;
+ __le32 reset_addr_poll;
+ __le16 legacy_l2_db_size_kb;
+ __le16 svif_info;
+ #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_MASK 0x7fffUL
+ #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_SFT 0
+ #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_VALID 0x8000UL
+ u8 mpc_chnls;
+ #define FUNC_QCFG_RESP_MPC_CHNLS_TCE_ENABLED 0x1UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_RCE_ENABLED 0x2UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_TE_CFA_ENABLED 0x4UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_RE_CFA_ENABLED 0x8UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_PRIMATE_ENABLED 0x10UL
+ u8 db_page_size;
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_4KB 0x0UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_8KB 0x1UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_16KB 0x2UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_32KB 0x3UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_64KB 0x4UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_128KB 0x5UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_256KB 0x6UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_512KB 0x7UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_1MB 0x8UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_2MB 0x9UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB 0xaUL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_LAST FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB
+ __le16 roce_vnic_id;
+ __le32 partition_min_bw;
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100
+ __le32 partition_max_bw;
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BYTES
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
+ __le16 host_mtu;
+ __le16 flags2;
+ #define FUNC_QCFG_RESP_FLAGS2_SRIOV_DSCP_INSERT_ENABLED 0x1UL
+ __le16 stag_vid;
+ u8 port_kdnet_mode;
+ #define FUNC_QCFG_RESP_PORT_KDNET_MODE_DISABLED 0x0UL
+ #define FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED 0x1UL
+ #define FUNC_QCFG_RESP_PORT_KDNET_MODE_LAST FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED
+ u8 kdnet_pcie_function;
+ __le16 port_kdnet_fid;
+ u8 unused_5;
+ u8 roce_bidi_opt_mode;
+ #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DISABLED 0x1UL
+ #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DEDICATED 0x2UL
+ #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_SHARED 0x4UL
+ __le32 num_ktls_tx_key_ctxs;
+ __le32 num_ktls_rx_key_ctxs;
+ u8 lag_id;
+ u8 parif;
+ u8 fw_lag_id;
+ u8 unused_6;
+ __le32 num_quic_tx_key_ctxs;
+ __le32 num_quic_rx_key_ctxs;
+ __le32 roce_max_av_per_vf;
+ __le32 roce_max_cq_per_vf;
+ __le32 roce_max_mrw_per_vf;
+ __le32 roce_max_qp_per_vf;
+ __le32 roce_max_srq_per_vf;
+ __le32 roce_max_gid_per_vf;
+ __le16 xid_partition_cfg;
+ #define FUNC_QCFG_RESP_XID_PARTITION_CFG_TX_CK 0x1UL
+ #define FUNC_QCFG_RESP_XID_PARTITION_CFG_RX_CK 0x2UL
+ __le16 mirror_vnic_id;
+ u8 max_link_width;
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_UNKNOWN 0x0UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_X1 0x1UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_X2 0x2UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_X4 0x4UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_X8 0x8UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_X16 0x10UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_LAST FUNC_QCFG_RESP_MAX_LINK_WIDTH_X16
+ u8 max_link_speed;
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_UNKNOWN 0x0UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_G1 0x1UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_G2 0x2UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_G3 0x3UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_G4 0x4UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_G5 0x5UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_LAST FUNC_QCFG_RESP_MAX_LINK_SPEED_G5
+ u8 negotiated_link_width;
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_UNKNOWN 0x0UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X1 0x1UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X2 0x2UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X4 0x4UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X8 0x8UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X16 0x10UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_LAST FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X16
+ u8 negotiated_link_speed;
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_UNKNOWN 0x0UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G1 0x1UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G2 0x2UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G3 0x3UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G4 0x4UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G5 0x5UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_LAST FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G5
+ u8 unused_7[2];
+ u8 pcie_compliance;
+ u8 unused_8;
+ __le16 l2_db_multi_page_size_kb;
+ u8 unused_9[5];
+ u8 valid;
+};
+
+/* hwrm_func_cfg_input (size:1280b/160B) */
+struct hwrm_func_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ __le16 num_msix;
+ __le32 flags;
+ #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL
+ #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL
+ #define FUNC_CFG_REQ_FLAGS_RSVD_MASK 0x1fcUL
+ #define FUNC_CFG_REQ_FLAGS_RSVD_SFT 2
+ #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL
+ #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL
+ #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL
+ #define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL
+ #define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x2000UL
+ #define FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x4000UL
+ #define FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x8000UL
+ #define FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x10000UL
+ #define FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x20000UL
+ #define FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x40000UL
+ #define FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x80000UL
+ #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL
+ #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL
+ #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL
+ #define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL
+ #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE 0x1000000UL
+ #define FUNC_CFG_REQ_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x2000000UL
+ #define FUNC_CFG_REQ_FLAGS_HOT_RESET_IF_EN_DIS 0x4000000UL
+ #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x8000000UL
+ #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x10000000UL
+ #define FUNC_CFG_REQ_FLAGS_BD_METADATA_ENABLE 0x20000000UL
+ #define FUNC_CFG_REQ_FLAGS_BD_METADATA_DISABLE 0x40000000UL
+ __le32 enables;
+ #define FUNC_CFG_REQ_ENABLES_ADMIN_MTU 0x1UL
+ #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL
+ #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL
+ #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL
+ #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL
+ #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL
+ #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL
+ #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL
+ #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
+ #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL
+ #define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL
+ #define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL
+ #define FUNC_CFG_REQ_ENABLES_HOT_RESET_IF_SUPPORT 0x800000UL
+ #define FUNC_CFG_REQ_ENABLES_SCHQ_ID 0x1000000UL
+ #define FUNC_CFG_REQ_ENABLES_MPC_CHNLS 0x2000000UL
+ #define FUNC_CFG_REQ_ENABLES_PARTITION_MIN_BW 0x4000000UL
+ #define FUNC_CFG_REQ_ENABLES_PARTITION_MAX_BW 0x8000000UL
+ #define FUNC_CFG_REQ_ENABLES_TPID 0x10000000UL
+ #define FUNC_CFG_REQ_ENABLES_HOST_MTU 0x20000000UL
+ #define FUNC_CFG_REQ_ENABLES_KTLS_TX_KEY_CTXS 0x40000000UL
+ #define FUNC_CFG_REQ_ENABLES_KTLS_RX_KEY_CTXS 0x80000000UL
+ __le16 admin_mtu;
+ __le16 mru;
+ __le16 num_rsscos_ctxs;
+ __le16 num_cmpl_rings;
+ __le16 num_tx_rings;
+ __le16 num_rx_rings;
+ __le16 num_l2_ctxs;
+ __le16 num_vnics;
+ __le16 num_stat_ctxs;
+ __le16 num_hw_ring_grps;
+ u8 dflt_mac_addr[6];
+ __le16 dflt_vlan;
+ __be32 dflt_ip_addr[4];
+ __le32 min_bw;
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_MIN_BW_SCALE_LAST FUNC_CFG_REQ_MIN_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 max_bw;
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_MAX_BW_SCALE_LAST FUNC_CFG_REQ_MAX_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
+ __le16 async_event_cr;
+ u8 vlan_antispoof_mode;
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK 0x0UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN 0x1UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE 0x2UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN 0x3UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_LAST FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN
+ u8 allowed_vlan_pris;
+ u8 evb_mode;
+ #define FUNC_CFG_REQ_EVB_MODE_NO_EVB 0x0UL
+ #define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL
+ #define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL
+ #define FUNC_CFG_REQ_EVB_MODE_LAST FUNC_CFG_REQ_EVB_MODE_VEPA
+ u8 options;
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SFT 0
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_LAST FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_SFT 2
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2)
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2)
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2)
+ #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO
+ #define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xf0UL
+ #define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 4
+ __le16 num_mcast_filters;
+ __le16 schq_id;
+ __le16 mpc_chnls;
+ #define FUNC_CFG_REQ_MPC_CHNLS_TCE_ENABLE 0x1UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_TCE_DISABLE 0x2UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RCE_ENABLE 0x4UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RCE_DISABLE 0x8UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_TE_CFA_ENABLE 0x10UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_TE_CFA_DISABLE 0x20UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RE_CFA_ENABLE 0x40UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RE_CFA_DISABLE 0x80UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_PRIMATE_ENABLE 0x100UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_PRIMATE_DISABLE 0x200UL
+ __le32 partition_min_bw;
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_LAST FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100
+ __le32 partition_max_bw;
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE 0x10000000UL
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BYTES
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
+ __be16 tpid;
+ __le16 host_mtu;
+ __le32 flags2;
+ #define FUNC_CFG_REQ_FLAGS2_KTLS_KEY_CTX_ASSETS_TEST 0x1UL
+ #define FUNC_CFG_REQ_FLAGS2_QUIC_KEY_CTX_ASSETS_TEST 0x2UL
+ __le32 enables2;
+ #define FUNC_CFG_REQ_ENABLES2_KDNET 0x1UL
+ #define FUNC_CFG_REQ_ENABLES2_DB_PAGE_SIZE 0x2UL
+ #define FUNC_CFG_REQ_ENABLES2_QUIC_TX_KEY_CTXS 0x4UL
+ #define FUNC_CFG_REQ_ENABLES2_QUIC_RX_KEY_CTXS 0x8UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_AV_PER_VF 0x10UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_CQ_PER_VF 0x20UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_MRW_PER_VF 0x40UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_QP_PER_VF 0x80UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_SRQ_PER_VF 0x100UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_GID_PER_VF 0x200UL
+ #define FUNC_CFG_REQ_ENABLES2_XID_PARTITION_CFG 0x400UL
+ #define FUNC_CFG_REQ_ENABLES2_PHYSICAL_SLOT_NUMBER 0x800UL
+ #define FUNC_CFG_REQ_ENABLES2_PCIE_COMPLIANCE 0x1000UL
+ u8 port_kdnet_mode;
+ #define FUNC_CFG_REQ_PORT_KDNET_MODE_DISABLED 0x0UL
+ #define FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED 0x1UL
+ #define FUNC_CFG_REQ_PORT_KDNET_MODE_LAST FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED
+ u8 db_page_size;
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_4KB 0x0UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_8KB 0x1UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_16KB 0x2UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_32KB 0x3UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_64KB 0x4UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_128KB 0x5UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_256KB 0x6UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_512KB 0x7UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_1MB 0x8UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_2MB 0x9UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_4MB 0xaUL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_LAST FUNC_CFG_REQ_DB_PAGE_SIZE_4MB
+ __le16 physical_slot_number;
+ __le32 num_ktls_tx_key_ctxs;
+ __le32 num_ktls_rx_key_ctxs;
+ __le32 num_quic_tx_key_ctxs;
+ __le32 num_quic_rx_key_ctxs;
+ __le32 roce_max_av_per_vf;
+ __le32 roce_max_cq_per_vf;
+ __le32 roce_max_mrw_per_vf;
+ __le32 roce_max_qp_per_vf;
+ __le32 roce_max_srq_per_vf;
+ __le32 roce_max_gid_per_vf;
+ __le16 xid_partition_cfg;
+ #define FUNC_CFG_REQ_XID_PARTITION_CFG_TX_CK 0x1UL
+ #define FUNC_CFG_REQ_XID_PARTITION_CFG_RX_CK 0x2UL
+ u8 pcie_compliance;
+ u8 unused_2;
+};
+
+/* hwrm_func_cfg_output (size:128b/16B) */
+struct hwrm_func_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_cfg_cmd_err (size:64b/8B) */
+struct hwrm_func_cfg_cmd_err {
+ u8 code;
+ #define FUNC_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FUNC_CFG_CMD_ERR_CODE_PARTITION_BW_OUT_OF_RANGE 0x1UL
+ #define FUNC_CFG_CMD_ERR_CODE_NPAR_PARTITION_DOWN_FAILED 0x2UL
+ #define FUNC_CFG_CMD_ERR_CODE_TPID_SET_DFLT_VLAN_NOT_SET 0x3UL
+ #define FUNC_CFG_CMD_ERR_CODE_RES_ARRAY_ALLOC_FAILED 0x4UL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_RING_ASSET_TEST_FAILED 0x5UL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_RING_RES_UPDATE_FAILED 0x6UL
+ #define FUNC_CFG_CMD_ERR_CODE_APPLY_MAX_BW_FAILED 0x7UL
+ #define FUNC_CFG_CMD_ERR_CODE_ENABLE_EVB_FAILED 0x8UL
+ #define FUNC_CFG_CMD_ERR_CODE_RSS_CTXT_ASSET_TEST_FAILED 0x9UL
+ #define FUNC_CFG_CMD_ERR_CODE_RSS_CTXT_RES_UPDATE_FAILED 0xaUL
+ #define FUNC_CFG_CMD_ERR_CODE_CMPL_RING_ASSET_TEST_FAILED 0xbUL
+ #define FUNC_CFG_CMD_ERR_CODE_CMPL_RING_RES_UPDATE_FAILED 0xcUL
+ #define FUNC_CFG_CMD_ERR_CODE_NQ_ASSET_TEST_FAILED 0xdUL
+ #define FUNC_CFG_CMD_ERR_CODE_NQ_RES_UPDATE_FAILED 0xeUL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_RING_ASSET_TEST_FAILED 0xfUL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_RING_RES_UPDATE_FAILED 0x10UL
+ #define FUNC_CFG_CMD_ERR_CODE_VNIC_ASSET_TEST_FAILED 0x11UL
+ #define FUNC_CFG_CMD_ERR_CODE_VNIC_RES_UPDATE_FAILED 0x12UL
+ #define FUNC_CFG_CMD_ERR_CODE_FAILED_TO_START_STATS_THREAD 0x13UL
+ #define FUNC_CFG_CMD_ERR_CODE_RDMA_SRIOV_DISABLED 0x14UL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_KTLS_DISABLED 0x15UL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_KTLS_ASSET_TEST_FAILED 0x16UL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_KTLS_RES_UPDATE_FAILED 0x17UL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_KTLS_DISABLED 0x18UL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_KTLS_ASSET_TEST_FAILED 0x19UL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_KTLS_RES_UPDATE_FAILED 0x1aUL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_QUIC_DISABLED 0x1bUL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_QUIC_ASSET_TEST_FAILED 0x1cUL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_QUIC_RES_UPDATE_FAILED 0x1dUL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_QUIC_DISABLED 0x1eUL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_QUIC_ASSET_TEST_FAILED 0x1fUL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_QUIC_RES_UPDATE_FAILED 0x20UL
+ #define FUNC_CFG_CMD_ERR_CODE_INVALID_KDNET_MODE 0x21UL
+ #define FUNC_CFG_CMD_ERR_CODE_SCHQ_CFG_FAIL 0x22UL
+ #define FUNC_CFG_CMD_ERR_CODE_LAST FUNC_CFG_CMD_ERR_CODE_SCHQ_CFG_FAIL
+ u8 unused_0[7];
+};
+
+/* hwrm_func_qstats_input (size:192b/24B) */
+struct hwrm_func_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 flags;
+ #define FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY 0x1UL
+ #define FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x2UL
+ #define FUNC_QSTATS_REQ_FLAGS_L2_ONLY 0x4UL
+ u8 unused_0[5];
+};
+
+/* hwrm_func_qstats_output (size:1408b/176B) */
+struct hwrm_func_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 rx_agg_pkts;
+ __le64 rx_agg_bytes;
+ __le64 rx_agg_events;
+ __le64 rx_agg_aborts;
+ u8 clear_seq;
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_func_qstats_ext_input (size:256b/32B) */
+struct hwrm_func_qstats_ext_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 flags;
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_ROCE_ONLY 0x1UL
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x2UL
+ u8 unused_0[1];
+ __le32 enables;
+ #define FUNC_QSTATS_EXT_REQ_ENABLES_SCHQ_ID 0x1UL
+ __le16 schq_id;
+ __le16 traffic_class;
+ u8 unused_1[4];
+};
+
+/* hwrm_func_qstats_ext_output (size:1536b/192B) */
+struct hwrm_func_qstats_ext_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_tpa_eligible_pkt;
+ __le64 rx_tpa_eligible_bytes;
+ __le64 rx_tpa_pkt;
+ __le64 rx_tpa_bytes;
+ __le64 rx_tpa_errors;
+ __le64 rx_tpa_events;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_clr_stats_input (size:192b/24B) */
+struct hwrm_func_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_func_clr_stats_output (size:128b/16B) */
+struct hwrm_func_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_vf_resc_free_input (size:192b/24B) */
+struct hwrm_func_vf_resc_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_func_vf_resc_free_output (size:128b/16B) */
+struct hwrm_func_vf_resc_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_drv_rgtr_input (size:896b/112B) */
+struct hwrm_func_drv_rgtr_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT 0x20UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT 0x40UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FAST_RESET_SUPPORT 0x80UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_RSS_STRICT_HASH_TYPE_SUPPORT 0x100UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT 0x200UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_ASYM_QUEUE_CFG_SUPPORT 0x400UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_TF_INGRESS_NIC_FLOW_MODE 0x800UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_TF_EGRESS_NIC_FLOW_MODE 0x1000UL
+ __le32 enables;
+ #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP 0x4UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL
+ #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL
+ __le16 os_type;
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN 0x0UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER 0x1UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS 0xeUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS 0x12UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS 0x1dUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX 0x24UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD 0x2aUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI 0x68UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 0x73UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI 0x8000UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_LAST FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI
+ u8 ver_maj_8b;
+ u8 ver_min_8b;
+ u8 ver_upd_8b;
+ u8 unused_0[3];
+ __le32 timestamp;
+ u8 unused_1[4];
+ __le32 vf_req_fwd[8];
+ __le32 async_event_fwd[8];
+ __le16 ver_maj;
+ __le16 ver_min;
+ __le16 ver_upd;
+ __le16 ver_patch;
+};
+
+/* hwrm_func_drv_rgtr_output (size:128b/16B) */
+struct hwrm_func_drv_rgtr_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED 0x1UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_func_drv_unrgtr_input (size:192b/24B) */
+struct hwrm_func_drv_unrgtr_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN 0x1UL
+ u8 unused_0[4];
+};
+
+/* hwrm_func_drv_unrgtr_output (size:128b/16B) */
+struct hwrm_func_drv_unrgtr_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_buf_rgtr_input (size:1024b/128B) */
+struct hwrm_func_buf_rgtr_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID 0x1UL
+ #define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR 0x2UL
+ __le16 vf_id;
+ __le16 req_buf_num_pages;
+ __le16 req_buf_page_size;
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B 0x4UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K 0xcUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K 0xdUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K 0x10UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M 0x15UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M 0x16UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G 0x1eUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_LAST FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G
+ __le16 req_buf_len;
+ __le16 resp_buf_len;
+ u8 unused_0[2];
+ __le64 req_buf_page_addr0;
+ __le64 req_buf_page_addr1;
+ __le64 req_buf_page_addr2;
+ __le64 req_buf_page_addr3;
+ __le64 req_buf_page_addr4;
+ __le64 req_buf_page_addr5;
+ __le64 req_buf_page_addr6;
+ __le64 req_buf_page_addr7;
+ __le64 req_buf_page_addr8;
+ __le64 req_buf_page_addr9;
+ __le64 error_buf_addr;
+ __le64 resp_buf_addr;
+};
+
+/* hwrm_func_buf_rgtr_output (size:128b/16B) */
+struct hwrm_func_buf_rgtr_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_drv_qver_input (size:192b/24B) */
+struct hwrm_func_drv_qver_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 reserved;
+ __le16 fid;
+ u8 driver_type;
+ #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_L2 0x0UL
+ #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_ROCE 0x1UL
+ #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_LAST FUNC_DRV_QVER_REQ_DRIVER_TYPE_ROCE
+ u8 unused_0;
+};
+
+/* hwrm_func_drv_qver_output (size:256b/32B) */
+struct hwrm_func_drv_qver_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 os_type;
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN 0x0UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER 0x1UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS 0xeUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS 0x12UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS 0x1dUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX 0x24UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD 0x2aUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI 0x68UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 0x73UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI 0x8000UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_LAST FUNC_DRV_QVER_RESP_OS_TYPE_UEFI
+ u8 ver_maj_8b;
+ u8 ver_min_8b;
+ u8 ver_upd_8b;
+ u8 unused_0[3];
+ __le16 ver_maj;
+ __le16 ver_min;
+ __le16 ver_upd;
+ __le16 ver_patch;
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* hwrm_func_resource_qcaps_input (size:192b/24B) */
+struct hwrm_func_resource_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_func_resource_qcaps_output (size:704b/88B) */
+struct hwrm_func_resource_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 max_vfs;
+ __le16 max_msix;
+ __le16 vf_reservation_strategy;
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MAXIMAL 0x0UL
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL 0x1UL
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC 0x2UL
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_LAST FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC
+ __le16 min_rsscos_ctx;
+ __le16 max_rsscos_ctx;
+ __le16 min_cmpl_rings;
+ __le16 max_cmpl_rings;
+ __le16 min_tx_rings;
+ __le16 max_tx_rings;
+ __le16 min_rx_rings;
+ __le16 max_rx_rings;
+ __le16 min_l2_ctxs;
+ __le16 max_l2_ctxs;
+ __le16 min_vnics;
+ __le16 max_vnics;
+ __le16 min_stat_ctx;
+ __le16 max_stat_ctx;
+ __le16 min_hw_ring_grps;
+ __le16 max_hw_ring_grps;
+ __le16 max_tx_scheduler_inputs;
+ __le16 flags;
+ #define FUNC_RESOURCE_QCAPS_RESP_FLAGS_MIN_GUARANTEED 0x1UL
+ __le16 min_msix;
+ __le32 min_ktls_tx_key_ctxs;
+ __le32 max_ktls_tx_key_ctxs;
+ __le32 min_ktls_rx_key_ctxs;
+ __le32 max_ktls_rx_key_ctxs;
+ __le32 min_quic_tx_key_ctxs;
+ __le32 max_quic_tx_key_ctxs;
+ __le32 min_quic_rx_key_ctxs;
+ __le32 max_quic_rx_key_ctxs;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_func_vf_resource_cfg_input (size:704b/88B) */
+struct hwrm_func_vf_resource_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ __le16 max_msix;
+ __le16 min_rsscos_ctx;
+ __le16 max_rsscos_ctx;
+ __le16 min_cmpl_rings;
+ __le16 max_cmpl_rings;
+ __le16 min_tx_rings;
+ __le16 max_tx_rings;
+ __le16 min_rx_rings;
+ __le16 max_rx_rings;
+ __le16 min_l2_ctxs;
+ __le16 max_l2_ctxs;
+ __le16 min_vnics;
+ __le16 max_vnics;
+ __le16 min_stat_ctx;
+ __le16 max_stat_ctx;
+ __le16 min_hw_ring_grps;
+ __le16 max_hw_ring_grps;
+ __le16 flags;
+ #define FUNC_VF_RESOURCE_CFG_REQ_FLAGS_MIN_GUARANTEED 0x1UL
+ __le16 min_msix;
+ __le32 min_ktls_tx_key_ctxs;
+ __le32 max_ktls_tx_key_ctxs;
+ __le32 min_ktls_rx_key_ctxs;
+ __le32 max_ktls_rx_key_ctxs;
+ __le32 min_quic_tx_key_ctxs;
+ __le32 max_quic_tx_key_ctxs;
+ __le32 min_quic_rx_key_ctxs;
+ __le32 max_quic_rx_key_ctxs;
+};
+
+/* hwrm_func_vf_resource_cfg_output (size:384b/48B) */
+struct hwrm_func_vf_resource_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 reserved_rsscos_ctx;
+ __le16 reserved_cmpl_rings;
+ __le16 reserved_tx_rings;
+ __le16 reserved_rx_rings;
+ __le16 reserved_l2_ctxs;
+ __le16 reserved_vnics;
+ __le16 reserved_stat_ctx;
+ __le16 reserved_hw_ring_grps;
+ __le32 reserved_ktls_tx_key_ctxs;
+ __le32 reserved_ktls_rx_key_ctxs;
+ __le32 reserved_quic_tx_key_ctxs;
+ __le32 reserved_quic_rx_key_ctxs;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_backing_store_qcaps_input (size:128b/16B) */
+struct hwrm_func_backing_store_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_func_backing_store_qcaps_output (size:832b/104B) */
+struct hwrm_func_backing_store_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 qp_max_entries;
+ __le16 qp_min_qp1_entries;
+ __le16 qp_max_l2_entries;
+ __le16 qp_entry_size;
+ __le16 srq_max_l2_entries;
+ __le32 srq_max_entries;
+ __le16 srq_entry_size;
+ __le16 cq_max_l2_entries;
+ __le32 cq_max_entries;
+ __le16 cq_entry_size;
+ __le16 vnic_max_vnic_entries;
+ __le16 vnic_max_ring_table_entries;
+ __le16 vnic_entry_size;
+ __le32 stat_max_entries;
+ __le16 stat_entry_size;
+ __le16 tqm_entry_size;
+ __le32 tqm_min_entries_per_ring;
+ __le32 tqm_max_entries_per_ring;
+ __le32 mrav_max_entries;
+ __le16 mrav_entry_size;
+ __le16 tim_entry_size;
+ __le32 tim_max_entries;
+ __le16 mrav_num_entries_units;
+ u8 tqm_entries_multiple;
+ u8 ctx_kind_initializer;
+ __le16 ctx_init_mask;
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_QP 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_SRQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_CQ 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_VNIC 0x8UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_STAT 0x10UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_MRAV 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_TKC 0x40UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_RKC 0x80UL
+ u8 qp_init_offset;
+ u8 srq_init_offset;
+ u8 cq_init_offset;
+ u8 vnic_init_offset;
+ u8 tqm_fp_rings_count;
+ u8 stat_init_offset;
+ u8 mrav_init_offset;
+ u8 tqm_fp_rings_count_ext;
+ u8 tkc_init_offset;
+ u8 rkc_init_offset;
+ __le16 tkc_entry_size;
+ __le16 rkc_entry_size;
+ __le32 tkc_max_entries;
+ __le32 rkc_max_entries;
+ __le16 fast_qpmd_qp_num_entries;
+ u8 rsvd1[5];
+ u8 valid;
+};
+
+/* tqm_fp_ring_cfg (size:128b/16B) */
+struct tqm_fp_ring_cfg {
+ u8 tqm_ring_pg_size_tqm_ring_lvl;
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_MASK 0xfUL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_SFT 0
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_0 0x0UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_1 0x1UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2 0x2UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LAST TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_SFT 4
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_LAST TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G
+ u8 unused[3];
+ __le32 tqm_ring_num_entries;
+ __le64 tqm_ring_page_dir;
+};
+
+/* hwrm_func_backing_store_cfg_input (size:2688b/336B) */
+struct hwrm_func_backing_store_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT 0x2UL
+ __le32 enables;
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING8 0x10000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING9 0x20000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING10 0x40000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TKC 0x80000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_RKC 0x100000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD 0x200000UL
+ u8 qpc_pg_size_qpc_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G
+ u8 srq_pg_size_srq_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G
+ u8 cq_pg_size_cq_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G
+ u8 vnic_pg_size_vnic_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G
+ u8 stat_pg_size_stat_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G
+ u8 tqm_sp_pg_size_tqm_sp_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G
+ u8 tqm_ring0_pg_size_tqm_ring0_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G
+ u8 tqm_ring1_pg_size_tqm_ring1_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G
+ u8 tqm_ring2_pg_size_tqm_ring2_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G
+ u8 tqm_ring3_pg_size_tqm_ring3_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G
+ u8 tqm_ring4_pg_size_tqm_ring4_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G
+ u8 tqm_ring5_pg_size_tqm_ring5_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G
+ u8 tqm_ring6_pg_size_tqm_ring6_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G
+ u8 tqm_ring7_pg_size_tqm_ring7_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G
+ u8 mrav_pg_size_mrav_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G
+ u8 tim_pg_size_tim_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G
+ __le64 qpc_page_dir;
+ __le64 srq_page_dir;
+ __le64 cq_page_dir;
+ __le64 vnic_page_dir;
+ __le64 stat_page_dir;
+ __le64 tqm_sp_page_dir;
+ __le64 tqm_ring0_page_dir;
+ __le64 tqm_ring1_page_dir;
+ __le64 tqm_ring2_page_dir;
+ __le64 tqm_ring3_page_dir;
+ __le64 tqm_ring4_page_dir;
+ __le64 tqm_ring5_page_dir;
+ __le64 tqm_ring6_page_dir;
+ __le64 tqm_ring7_page_dir;
+ __le64 mrav_page_dir;
+ __le64 tim_page_dir;
+ __le32 qp_num_entries;
+ __le32 srq_num_entries;
+ __le32 cq_num_entries;
+ __le32 stat_num_entries;
+ __le32 tqm_sp_num_entries;
+ __le32 tqm_ring0_num_entries;
+ __le32 tqm_ring1_num_entries;
+ __le32 tqm_ring2_num_entries;
+ __le32 tqm_ring3_num_entries;
+ __le32 tqm_ring4_num_entries;
+ __le32 tqm_ring5_num_entries;
+ __le32 tqm_ring6_num_entries;
+ __le32 tqm_ring7_num_entries;
+ __le32 mrav_num_entries;
+ __le32 tim_num_entries;
+ __le16 qp_num_qp1_entries;
+ __le16 qp_num_l2_entries;
+ __le16 qp_entry_size;
+ __le16 srq_num_l2_entries;
+ __le16 srq_entry_size;
+ __le16 cq_num_l2_entries;
+ __le16 cq_entry_size;
+ __le16 vnic_num_vnic_entries;
+ __le16 vnic_num_ring_table_entries;
+ __le16 vnic_entry_size;
+ __le16 stat_entry_size;
+ __le16 tqm_entry_size;
+ __le16 mrav_entry_size;
+ __le16 tim_entry_size;
+ u8 tqm_ring8_pg_size_tqm_ring_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_1G
+ u8 ring8_unused[3];
+ __le32 tqm_ring8_num_entries;
+ __le64 tqm_ring8_page_dir;
+ u8 tqm_ring9_pg_size_tqm_ring_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_1G
+ u8 ring9_unused[3];
+ __le32 tqm_ring9_num_entries;
+ __le64 tqm_ring9_page_dir;
+ u8 tqm_ring10_pg_size_tqm_ring_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_1G
+ u8 ring10_unused[3];
+ __le32 tqm_ring10_num_entries;
+ __le64 tqm_ring10_page_dir;
+ __le32 tkc_num_entries;
+ __le32 rkc_num_entries;
+ __le64 tkc_page_dir;
+ __le64 rkc_page_dir;
+ __le16 tkc_entry_size;
+ __le16 rkc_entry_size;
+ u8 tkc_pg_size_tkc_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_1G
+ u8 rkc_pg_size_rkc_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G
+ __le16 qp_num_fast_qpmd_entries;
+};
+
+/* hwrm_func_backing_store_cfg_output (size:128b/16B) */
+struct hwrm_func_backing_store_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_error_recovery_qcfg_input (size:192b/24B) */
+struct hwrm_error_recovery_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 unused_0[8];
+};
+
+/* hwrm_error_recovery_qcfg_output (size:1664b/208B) */
+struct hwrm_error_recovery_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU 0x2UL
+ __le32 driver_polling_freq;
+ __le32 master_func_wait_period;
+ __le32 normal_func_wait_period;
+ __le32 master_func_wait_period_after_reset;
+ __le32 max_bailout_time_after_reset;
+ __le32 fw_health_status_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SFT 2
+ __le32 fw_heartbeat_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SFT 2
+ __le32 fw_reset_cnt_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SFT 2
+ __le32 reset_inprogress_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SFT 2
+ __le32 reset_inprogress_reg_mask;
+ u8 unused_0[3];
+ u8 reg_array_cnt;
+ __le32 reset_reg[16];
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SFT 2
+ __le32 reset_reg_val[16];
+ u8 delay_after_reset[16];
+ __le32 err_recovery_cnt_reg;
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_MASK 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_SFT 0
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_GRC 0x1UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR0 0x2UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1 0x3UL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_MASK 0xfffffffcUL
+ #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SFT 2
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_func_echo_response_input (size:192b/24B) */
+struct hwrm_func_echo_response_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 event_data1;
+ __le32 event_data2;
+};
+
+/* hwrm_func_echo_response_output (size:128b/16B) */
+struct hwrm_func_echo_response_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_pin_qcfg_input (size:192b/24B) */
+struct hwrm_func_ptp_pin_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 unused_0[8];
+};
+
+/* hwrm_func_ptp_pin_qcfg_output (size:128b/16B) */
+struct hwrm_func_ptp_pin_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_pins;
+ u8 state;
+ #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN0_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN1_ENABLED 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN2_ENABLED 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN3_ENABLED 0x8UL
+ u8 pin0_usage;
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_OUT
+ u8 pin1_usage;
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT
+ u8 pin2_usage;
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT
+ u8 pin3_usage;
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_func_ptp_pin_cfg_input (size:256b/32B) */
+struct hwrm_func_ptp_pin_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_STATE 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_USAGE 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN1_STATE 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN1_USAGE 0x8UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN2_STATE 0x10UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN2_USAGE 0x20UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN3_STATE 0x40UL
+ #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN3_USAGE 0x80UL
+ u8 pin0_state;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_DISABLED 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_ENABLED
+ u8 pin0_usage;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_OUT
+ u8 pin1_state;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_DISABLED 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_ENABLED
+ u8 pin1_usage;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_OUT
+ u8 pin2_state;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_DISABLED 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED
+ u8 pin2_usage;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT
+ u8 pin3_state;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_DISABLED 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED
+ u8 pin3_usage;
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT
+ u8 unused_0[4];
+};
+
+/* hwrm_func_ptp_pin_cfg_output (size:128b/16B) */
+struct hwrm_func_ptp_pin_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_cfg_input (size:384b/48B) */
+struct hwrm_func_ptp_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 enables;
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_PPS_EVENT 0x1UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_DLL_SOURCE 0x2UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_DLL_PHASE 0x4UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PERIOD 0x8UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_UP 0x10UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PHASE 0x20UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_SET_TIME 0x40UL
+ u8 ptp_pps_event;
+ #define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_INTERNAL 0x1UL
+ #define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_EXTERNAL 0x2UL
+ u8 ptp_freq_adj_dll_source;
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_NONE 0x0UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_0 0x1UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_1 0x2UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_2 0x3UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_3 0x4UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_0 0x5UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_1 0x6UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_2 0x7UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_3 0x8UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_INVALID 0xffUL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_INVALID
+ u8 ptp_freq_adj_dll_phase;
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_NONE 0x0UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_4K 0x1UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_8K 0x2UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_10M 0x3UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_25M 0x4UL
+ #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_25M
+ u8 unused_0[3];
+ __le32 ptp_freq_adj_ext_period;
+ __le32 ptp_freq_adj_ext_up;
+ __le32 ptp_freq_adj_ext_phase_lower;
+ __le32 ptp_freq_adj_ext_phase_upper;
+ __le64 ptp_set_time;
+};
+
+/* hwrm_func_ptp_cfg_output (size:128b/16B) */
+struct hwrm_func_ptp_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_ts_query_input (size:192b/24B) */
+struct hwrm_func_ptp_ts_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_PTP_TS_QUERY_REQ_FLAGS_PPS_TIME 0x1UL
+ #define FUNC_PTP_TS_QUERY_REQ_FLAGS_PTM_TIME 0x2UL
+ u8 unused_0[4];
+};
+
+/* hwrm_func_ptp_ts_query_output (size:320b/40B) */
+struct hwrm_func_ptp_ts_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 pps_event_ts;
+ __le64 ptm_local_ts;
+ __le64 ptm_system_ts;
+ __le32 ptm_link_delay;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_ext_cfg_input (size:256b/32B) */
+struct hwrm_func_ptp_ext_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 enables;
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_MASTER_FID 0x1UL
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_SEC_FID 0x2UL
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_SEC_MODE 0x4UL
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_FAILOVER_TIMER 0x8UL
+ __le16 phc_master_fid;
+ __le16 phc_sec_fid;
+ u8 phc_sec_mode;
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_SWITCH 0x0UL
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_ALL 0x1UL
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_PF_ONLY 0x2UL
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_LAST FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_PF_ONLY
+ u8 unused_0;
+ __le32 failover_timer;
+ u8 unused_1[4];
+};
+
+/* hwrm_func_ptp_ext_cfg_output (size:128b/16B) */
+struct hwrm_func_ptp_ext_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_ext_qcfg_input (size:192b/24B) */
+struct hwrm_func_ptp_ext_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 unused_0[8];
+};
+
+/* hwrm_func_ptp_ext_qcfg_output (size:256b/32B) */
+struct hwrm_func_ptp_ext_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 phc_master_fid;
+ __le16 phc_sec_fid;
+ __le16 phc_active_fid0;
+ __le16 phc_active_fid1;
+ __le32 last_failover_event;
+ __le16 from_fid;
+ __le16 to_fid;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_backing_store_cfg_v2_input (size:512b/64B) */
+struct hwrm_func_backing_store_cfg_v2_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_ERR_QPC_TRACE 0x2bUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID
+ __le16 instance;
+ __le32 flags;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_PREBOOT_MODE 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_EXTEND 0x4UL
+ __le64 page_dir;
+ __le32 num_entries;
+ __le16 entry_size;
+ u8 page_size_pbl_level;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LAST FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_1G
+ u8 subtype_valid_cnt;
+ __le32 split_entry_0;
+ __le32 split_entry_1;
+ __le32 split_entry_2;
+ __le32 split_entry_3;
+ __le32 enables;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_ENABLES_NEXT_BS_OFFSET 0x1UL
+ __le32 next_bs_offset;
+};
+
+/* hwrm_func_backing_store_cfg_v2_output (size:128b/16B) */
+struct hwrm_func_backing_store_cfg_v2_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 rsvd0[7];
+ u8 valid;
+};
+
+/* hwrm_func_backing_store_qcfg_v2_input (size:192b/24B) */
+struct hwrm_func_backing_store_qcfg_v2_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_XID_PARTITION_TABLE 0x1dUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_ERR_QPC_TRACE 0x2bUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID
+ __le16 instance;
+ u8 rsvd[4];
+};
+
+/* hwrm_func_backing_store_qcfg_v2_output (size:448b/56B) */
+struct hwrm_func_backing_store_qcfg_v2_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_ERR_QPC_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID
+ __le16 instance;
+ __le32 flags;
+ __le64 page_dir;
+ __le32 num_entries;
+ u8 page_size_pbl_level;
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_SFT 0
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_2
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_1G
+ u8 subtype_valid_cnt;
+ u8 rsvd[2];
+ __le32 split_entry_0;
+ __le32 split_entry_1;
+ __le32 split_entry_2;
+ __le32 split_entry_3;
+ u8 rsvd2[7];
+ u8 valid;
+};
+
+/* qpc_split_entries (size:128b/16B) */
+struct qpc_split_entries {
+ __le32 qp_num_l2_entries;
+ __le32 qp_num_qp1_entries;
+ __le32 qp_num_fast_qpmd_entries;
+ __le32 rsvd;
+};
+
+/* srq_split_entries (size:128b/16B) */
+struct srq_split_entries {
+ __le32 srq_num_l2_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* cq_split_entries (size:128b/16B) */
+struct cq_split_entries {
+ __le32 cq_num_l2_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* vnic_split_entries (size:128b/16B) */
+struct vnic_split_entries {
+ __le32 vnic_num_vnic_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* mrav_split_entries (size:128b/16B) */
+struct mrav_split_entries {
+ __le32 mrav_num_av_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* ts_split_entries (size:128b/16B) */
+struct ts_split_entries {
+ __le32 region_num_entries;
+ u8 tsid;
+ u8 lkup_static_bkt_cnt_exp[2];
+ u8 locked;
+ __le32 rsvd2[2];
+};
+
+/* ck_split_entries (size:128b/16B) */
+struct ck_split_entries {
+ __le32 num_quic_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* hwrm_func_backing_store_qcaps_v2_input (size:192b/24B) */
+struct hwrm_func_backing_store_qcaps_v2_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ERR_QPC_TRACE 0x2bUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID
+ u8 rsvd[6];
+};
+
+/* hwrm_func_backing_store_qcaps_v2_output (size:448b/56B) */
+struct hwrm_func_backing_store_qcaps_v2_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_ERR_QPC_TRACE 0x2bUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID
+ __le16 entry_size;
+ __le32 flags;
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_DRIVER_MANAGED_MEMORY 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ROCE_QP_PSEUDO_STATIC_ALLOC 0x8UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_DBG_TRACE 0x10UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_BIN_DBG_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_NEXT_BS_OFFSET 0x40UL
+ __le32 instance_bit_map;
+ u8 ctx_init_value;
+ u8 ctx_init_offset;
+ u8 entry_multiple;
+ u8 rsvd;
+ __le32 max_num_entries;
+ __le32 min_num_entries;
+ __le16 next_valid_type;
+ u8 subtype_valid_cnt;
+ u8 exact_cnt_bit_map;
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_0_EXACT 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_1_EXACT 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_2_EXACT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_3_EXACT 0x8UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_UNUSED_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_UNUSED_SFT 4
+ __le32 split_entry_0;
+ __le32 split_entry_1;
+ __le32 split_entry_2;
+ __le32 split_entry_3;
+ __le16 max_instance_count;
+ u8 rsvd3;
+ u8 valid;
+};
+
+/* hwrm_func_dbr_pacing_qcfg_input (size:128b/16B) */
+struct hwrm_func_dbr_pacing_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_func_dbr_pacing_qcfg_output (size:512b/64B) */
+struct hwrm_func_dbr_pacing_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define FUNC_DBR_PACING_QCFG_RESP_FLAGS_DBR_NQ_EVENT_ENABLED 0x1UL
+ u8 unused_0[7];
+ __le32 dbr_stat_db_fifo_reg;
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_SFT 0
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_GRC 0x1UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR0 0x2UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_LAST FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_MASK 0xfffffffcUL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SFT 2
+ __le32 dbr_stat_db_fifo_reg_watermark_mask;
+ u8 dbr_stat_db_fifo_reg_watermark_shift;
+ u8 unused_1[3];
+ __le32 dbr_stat_db_fifo_reg_fifo_room_mask;
+ u8 dbr_stat_db_fifo_reg_fifo_room_shift;
+ u8 unused_2[3];
+ __le32 dbr_throttling_aeq_arm_reg;
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_MASK 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_SFT 0
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_GRC 0x1UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR0 0x2UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1 0x3UL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_LAST FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_MASK 0xfffffffcUL
+ #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SFT 2
+ u8 dbr_throttling_aeq_arm_reg_val;
+ u8 unused_3[3];
+ __le32 dbr_stat_db_max_fifo_depth;
+ __le32 primary_nq_id;
+ __le32 pacing_threshold;
+ u8 unused_4[7];
+ u8 valid;
+};
+
+/* hwrm_func_drv_if_change_input (size:192b/24B) */
+struct hwrm_func_drv_if_change_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP 0x1UL
+ __le32 unused;
+};
+
+/* hwrm_func_drv_if_change_output (size:128b/16B) */
+struct hwrm_func_drv_if_change_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE 0x1UL
+ #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE 0x2UL
+ #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE 0x4UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_port_phy_cfg_input (size:512b/64B) */
+struct hwrm_port_phy_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
+ #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
+ #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_ENABLE 0x8000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_DISABLE 0x10000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_ENABLE 0x20000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_DISABLE 0x40000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_ENABLE 0x80000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_DISABLE 0x100000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_ENABLE 0x200000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_DISABLE 0x400000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_LINK_TRAINING_ENABLE 0x800000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_LINK_TRAINING_DISABLE 0x1000000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_PRECODING_ENABLE 0x2000000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_PRECODING_DISABLE 0x4000000UL
+ __le32 enables;
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL
+ #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL
+ #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
+ #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
+ #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
+ #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED 0x800UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK 0x1000UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2 0x2000UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK 0x4000UL
+ __le16 port_id;
+ __le16 force_link_speed;
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB
+ u8 auto_mode;
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK 0x4UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_LAST PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK
+ u8 auto_duplex;
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF 0x0UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_LAST PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH
+ u8 auto_pause;
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
+ u8 mgmt_flag;
+ #define PORT_PHY_CFG_REQ_MGMT_FLAG_LINK_RELEASE 0x1UL
+ #define PORT_PHY_CFG_REQ_MGMT_FLAG_MGMT_VALID 0x80UL
+ __le16 auto_link_speed;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_LAST PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB
+ __le16 auto_link_speed_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB 0x10UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB 0x40UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB 0x80UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
+ u8 wirespeed;
+ #define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL
+ #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
+ #define PORT_PHY_CFG_REQ_WIRESPEED_LAST PORT_PHY_CFG_REQ_WIRESPEED_ON
+ u8 lpbk;
+ #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL
+ #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL
+ #define PORT_PHY_CFG_REQ_LPBK_EXTERNAL 0x3UL
+ #define PORT_PHY_CFG_REQ_LPBK_LAST PORT_PHY_CFG_REQ_LPBK_EXTERNAL
+ u8 force_pause;
+ #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
+ u8 unused_1;
+ __le32 preemphasis;
+ __le16 eee_link_speed_mask;
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ __le16 force_pam4_link_speed;
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB
+ __le32 tx_lpi_timer;
+ #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL
+ #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0
+ __le16 auto_link_pam4_speed_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_50G 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_100G 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_200G 0x4UL
+ __le16 force_link_speeds2;
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB_PAM4_56 0x1f5UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_56 0x3e9UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_56 0x7d1UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_56 0xfa1UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112
+ __le16 auto_link_speeds2_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_1GB 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_10GB 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_25GB 0x4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_40GB 0x8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_50GB 0x10UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB 0x20UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_800GB_PAM4_112 0x2000UL
+ u8 unused_2[6];
+};
+
+/* hwrm_port_phy_cfg_output (size:128b/16B) */
+struct hwrm_port_phy_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_cfg_cmd_err (size:64b/8B) */
+struct hwrm_port_phy_cfg_cmd_err {
+ u8 code;
+ #define PORT_PHY_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define PORT_PHY_CFG_CMD_ERR_CODE_ILLEGAL_SPEED 0x1UL
+ #define PORT_PHY_CFG_CMD_ERR_CODE_RETRY 0x2UL
+ #define PORT_PHY_CFG_CMD_ERR_CODE_LAST PORT_PHY_CFG_CMD_ERR_CODE_RETRY
+ u8 unused_0[7];
+};
+
+/* hwrm_port_phy_qcfg_input (size:192b/24B) */
+struct hwrm_port_phy_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_phy_qcfg_output (size:832b/104B) */
+struct hwrm_port_phy_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 link;
+ #define PORT_PHY_QCFG_RESP_LINK_NO_LINK 0x0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_LINK
+ u8 active_fec_signal_mode;
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK 0xfUL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_SFT 0
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ 0x0UL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4 0x1UL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112 0x2UL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_LAST PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK 0xf0UL
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_SFT 4
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE (0x0UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE (0x1UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE (0x2UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE (0x3UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE (0x4UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE (0x5UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE (0x6UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_LAST PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE
+ __le16 link_speed;
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_200GB 0x7d0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_400GB 0xfa0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_800GB 0x1f40UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB
+ u8 duplex_cfg;
+ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF 0x0UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL 0x1UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_LAST PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL
+ u8 pause;
+ #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL
+ __le16 support_speeds;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
+ __le16 force_link_speed;
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB
+ u8 auto_mode;
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK 0x4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK
+ u8 auto_pause;
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
+ __le16 auto_link_speed;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB
+ __le16 auto_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
+ u8 wirespeed;
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_LAST PORT_PHY_QCFG_RESP_WIRESPEED_ON
+ u8 lpbk;
+ #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL
+ #define PORT_PHY_QCFG_RESP_LPBK_EXTERNAL 0x3UL
+ #define PORT_PHY_QCFG_RESP_LPBK_LAST PORT_PHY_QCFG_RESP_LPBK_EXTERNAL
+ u8 force_pause;
+ #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
+ u8 module_status;
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX 0x1UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT 0x5UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_OVERHEATED 0x6UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_LAST PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE
+ __le32 preemphasis;
+ u8 phy_maj;
+ u8 phy_min;
+ u8 phy_bld;
+ u8 phy_type;
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN 0x0UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR 0x1UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 0x2UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR 0x3UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR 0x4UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 0x5UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX 0x6UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR 0x7UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET 0x8UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE 0x9UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY 0xaUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L 0xbUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S 0xcUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N 0xdUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR 0xeUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4 0xfUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4 0x10UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4 0x11UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4 0x12UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10 0x13UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4 0x14UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4 0x15UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4 0x1cUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4 0x1dUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4 0x1eUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4 0x1fUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR 0x20UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR 0x21UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR 0x22UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER 0x23UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2 0x24UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2 0x25UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2 0x26UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2 0x27UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR 0x28UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR 0x29UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR 0x2aUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER 0x2bUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR2 0x2cUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR2 0x2dUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR2 0x2eUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER2 0x2fUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR8 0x30UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR8 0x31UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR8 0x32UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER8 0x33UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR4 0x34UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4 0x35UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4 0x36UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4 0x37UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASECR8 0x38UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASESR8 0x39UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASELR8 0x3aUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEER8 0x3bUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEFR8 0x3cUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8 0x3dUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8
+ u8 media_type;
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC 0x2UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE 0x3UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_BACKPLANE 0x4UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_LAST PORT_PHY_QCFG_RESP_MEDIA_TYPE_BACKPLANE
+ u8 xcvr_pkg_type;
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL 0x2UL
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL
+ u8 eee_config_phy_addr;
+ #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL
+ #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK 0xe0UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT 5
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED 0x20UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE 0x40UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI 0x80UL
+ u8 parallel_detect;
+ #define PORT_PHY_QCFG_RESP_PARALLEL_DETECT 0x1UL
+ __le16 link_partner_adv_speeds;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB 0x40UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB 0x80UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL
+ u8 link_partner_adv_auto_mode;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK
+ u8 link_partner_adv_pause;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL
+ __le16 adv_eee_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ __le16 link_partner_adv_eee_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ __le32 xcvr_identifier_type_tx_lpi_timer;
+ #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK 0xffffffUL
+ #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT 0
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK 0xff000000UL
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT 24
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_UNKNOWN (0x0UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFP (0x3UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPDD (0x18UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP112 (0x1eUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFPDD (0x1fUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_CSFP (0x20UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_CSFP
+ __le16 fec_cfg;
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_SUPPORTED 0x80UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ENABLED 0x100UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_SUPPORTED 0x200UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_ENABLED 0x400UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_SUPPORTED 0x800UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_ENABLED 0x1000UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_SUPPORTED 0x2000UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_ENABLED 0x4000UL
+ u8 duplex_state;
+ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_LAST PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
+ u8 option_flags;
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN 0x2UL
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SPEEDS2_SUPPORTED 0x4UL
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_LINK_TRAINING 0x8UL
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_PRECODING 0x10UL
+ char phy_vendor_name[16];
+ char phy_vendor_partnumber[16];
+ __le16 support_pam4_speeds;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_50G 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_100G 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_200G 0x4UL
+ __le16 force_pam4_link_speed;
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB
+ __le16 auto_pam4_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_50G 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_100G 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_200G 0x4UL
+ u8 link_partner_pam4_adv_speeds;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_50GB 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_100GB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_200GB 0x4UL
+ u8 link_down_reason;
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_OTP_SPEED_VIOLATION 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_CABLE_REMOVED 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_MODULE_FAULT 0x8UL
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_BMC_REQUEST 0x10UL
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_TX_LASER_DISABLED 0x20UL
+ __le16 support_speeds2;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_1GB 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_10GB 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_25GB 0x4UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_40GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_800GB_PAM4_112 0x2000UL
+ __le16 force_link_speeds2;
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_50GB_PAM4_56 0x1f5UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB_PAM4_56 0x3e9UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_200GB_PAM4_56 0x7d1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_400GB_PAM4_56 0xfa1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_112
+ __le16 auto_link_speeds2;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_1GB 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_10GB 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_25GB 0x4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_40GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_50GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_800GB_PAM4_112 0x2000UL
+ u8 active_lanes;
+ u8 valid;
+};
+
+/* hwrm_port_mac_cfg_input (size:448b/56B) */
+struct hwrm_port_mac_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL
+ #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL
+ #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
+ #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
+ #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL
+ #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL
+ #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL
+ #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
+ #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_ONE_STEP_TX_TS 0x2000UL
+ #define PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE 0x4000UL
+ #define PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE 0x8000UL
+ __le32 enables;
+ #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
+ #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
+ #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL
+ #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
+ #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
+ #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
+ #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
+ #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
+ #define PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB 0x200UL
+ #define PORT_MAC_CFG_REQ_ENABLES_PTP_ADJ_PHASE 0x400UL
+ #define PORT_MAC_CFG_REQ_ENABLES_PTP_LOAD_CONTROL 0x800UL
+ __le16 port_id;
+ u8 ipg;
+ u8 lpbk;
+ #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL
+ #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL
+ #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL
+ #define PORT_MAC_CFG_REQ_LPBK_LAST PORT_MAC_CFG_REQ_LPBK_REMOTE
+ u8 vlan_pri2cos_map_pri;
+ u8 reserved1;
+ u8 tunnel_pri2cos_map_pri;
+ u8 dscp2pri_map_pri;
+ __le16 rx_ts_capture_ptp_msg_type;
+ __le16 tx_ts_capture_ptp_msg_type;
+ u8 cos_field_cfg;
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
+ u8 unused_0[3];
+ __le32 ptp_freq_adj_ppb;
+ u8 unused_1[3];
+ u8 ptp_load_control;
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_NONE 0x0UL
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_IMMEDIATE 0x1UL
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_PPS_EVENT 0x2UL
+ #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_LAST PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_PPS_EVENT
+ __le64 ptp_adj_phase;
+};
+
+/* hwrm_port_mac_cfg_output (size:128b/16B) */
+struct hwrm_port_mac_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 mru;
+ __le16 mtu;
+ u8 ipg;
+ u8 lpbk;
+ #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL
+ #define PORT_MAC_CFG_RESP_LPBK_LAST PORT_MAC_CFG_RESP_LPBK_REMOTE
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */
+struct hwrm_port_mac_ptp_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_mac_ptp_qcfg_output (size:704b/88B) */
+struct hwrm_port_mac_ptp_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_ONE_STEP_TX_TS 0x4UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x8UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK 0x10UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED 0x20UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME 0x40UL
+ u8 unused_0[3];
+ __le32 rx_ts_reg_off_lower;
+ __le32 rx_ts_reg_off_upper;
+ __le32 rx_ts_reg_off_seq_id;
+ __le32 rx_ts_reg_off_src_id_0;
+ __le32 rx_ts_reg_off_src_id_1;
+ __le32 rx_ts_reg_off_src_id_2;
+ __le32 rx_ts_reg_off_domain_id;
+ __le32 rx_ts_reg_off_fifo;
+ __le32 rx_ts_reg_off_fifo_adv;
+ __le32 rx_ts_reg_off_granularity;
+ __le32 tx_ts_reg_off_lower;
+ __le32 tx_ts_reg_off_upper;
+ __le32 tx_ts_reg_off_seq_id;
+ __le32 tx_ts_reg_off_fifo;
+ __le32 tx_ts_reg_off_granularity;
+ __le32 ts_ref_clock_reg_lower;
+ __le32 ts_ref_clock_reg_upper;
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* tx_port_stats (size:3264b/408B) */
+struct tx_port_stats {
+ __le64 tx_64b_frames;
+ __le64 tx_65b_127b_frames;
+ __le64 tx_128b_255b_frames;
+ __le64 tx_256b_511b_frames;
+ __le64 tx_512b_1023b_frames;
+ __le64 tx_1024b_1518b_frames;
+ __le64 tx_good_vlan_frames;
+ __le64 tx_1519b_2047b_frames;
+ __le64 tx_2048b_4095b_frames;
+ __le64 tx_4096b_9216b_frames;
+ __le64 tx_9217b_16383b_frames;
+ __le64 tx_good_frames;
+ __le64 tx_total_frames;
+ __le64 tx_ucast_frames;
+ __le64 tx_mcast_frames;
+ __le64 tx_bcast_frames;
+ __le64 tx_pause_frames;
+ __le64 tx_pfc_frames;
+ __le64 tx_jabber_frames;
+ __le64 tx_fcs_err_frames;
+ __le64 tx_control_frames;
+ __le64 tx_oversz_frames;
+ __le64 tx_single_dfrl_frames;
+ __le64 tx_multi_dfrl_frames;
+ __le64 tx_single_coll_frames;
+ __le64 tx_multi_coll_frames;
+ __le64 tx_late_coll_frames;
+ __le64 tx_excessive_coll_frames;
+ __le64 tx_frag_frames;
+ __le64 tx_err;
+ __le64 tx_tagged_frames;
+ __le64 tx_dbl_tagged_frames;
+ __le64 tx_runt_frames;
+ __le64 tx_fifo_underruns;
+ __le64 tx_pfc_ena_frames_pri0;
+ __le64 tx_pfc_ena_frames_pri1;
+ __le64 tx_pfc_ena_frames_pri2;
+ __le64 tx_pfc_ena_frames_pri3;
+ __le64 tx_pfc_ena_frames_pri4;
+ __le64 tx_pfc_ena_frames_pri5;
+ __le64 tx_pfc_ena_frames_pri6;
+ __le64 tx_pfc_ena_frames_pri7;
+ __le64 tx_eee_lpi_events;
+ __le64 tx_eee_lpi_duration;
+ __le64 tx_llfc_logical_msgs;
+ __le64 tx_hcfc_msgs;
+ __le64 tx_total_collisions;
+ __le64 tx_bytes;
+ __le64 tx_xthol_frames;
+ __le64 tx_stat_discard;
+ __le64 tx_stat_error;
+};
+
+/* rx_port_stats (size:4224b/528B) */
+struct rx_port_stats {
+ __le64 rx_64b_frames;
+ __le64 rx_65b_127b_frames;
+ __le64 rx_128b_255b_frames;
+ __le64 rx_256b_511b_frames;
+ __le64 rx_512b_1023b_frames;
+ __le64 rx_1024b_1518b_frames;
+ __le64 rx_good_vlan_frames;
+ __le64 rx_1519b_2047b_frames;
+ __le64 rx_2048b_4095b_frames;
+ __le64 rx_4096b_9216b_frames;
+ __le64 rx_9217b_16383b_frames;
+ __le64 rx_total_frames;
+ __le64 rx_ucast_frames;
+ __le64 rx_mcast_frames;
+ __le64 rx_bcast_frames;
+ __le64 rx_fcs_err_frames;
+ __le64 rx_ctrl_frames;
+ __le64 rx_pause_frames;
+ __le64 rx_pfc_frames;
+ __le64 rx_unsupported_opcode_frames;
+ __le64 rx_unsupported_da_pausepfc_frames;
+ __le64 rx_wrong_sa_frames;
+ __le64 rx_align_err_frames;
+ __le64 rx_oor_len_frames;
+ __le64 rx_code_err_frames;
+ __le64 rx_false_carrier_frames;
+ __le64 rx_ovrsz_frames;
+ __le64 rx_jbr_frames;
+ __le64 rx_mtu_err_frames;
+ __le64 rx_match_crc_frames;
+ __le64 rx_promiscuous_frames;
+ __le64 rx_tagged_frames;
+ __le64 rx_double_tagged_frames;
+ __le64 rx_trunc_frames;
+ __le64 rx_good_frames;
+ __le64 rx_pfc_xon2xoff_frames_pri0;
+ __le64 rx_pfc_xon2xoff_frames_pri1;
+ __le64 rx_pfc_xon2xoff_frames_pri2;
+ __le64 rx_pfc_xon2xoff_frames_pri3;
+ __le64 rx_pfc_xon2xoff_frames_pri4;
+ __le64 rx_pfc_xon2xoff_frames_pri5;
+ __le64 rx_pfc_xon2xoff_frames_pri6;
+ __le64 rx_pfc_xon2xoff_frames_pri7;
+ __le64 rx_pfc_ena_frames_pri0;
+ __le64 rx_pfc_ena_frames_pri1;
+ __le64 rx_pfc_ena_frames_pri2;
+ __le64 rx_pfc_ena_frames_pri3;
+ __le64 rx_pfc_ena_frames_pri4;
+ __le64 rx_pfc_ena_frames_pri5;
+ __le64 rx_pfc_ena_frames_pri6;
+ __le64 rx_pfc_ena_frames_pri7;
+ __le64 rx_sch_crc_err_frames;
+ __le64 rx_undrsz_frames;
+ __le64 rx_frag_frames;
+ __le64 rx_eee_lpi_events;
+ __le64 rx_eee_lpi_duration;
+ __le64 rx_llfc_physical_msgs;
+ __le64 rx_llfc_logical_msgs;
+ __le64 rx_llfc_msgs_with_crc_err;
+ __le64 rx_hcfc_msgs;
+ __le64 rx_hcfc_msgs_with_crc_err;
+ __le64 rx_bytes;
+ __le64 rx_runt_bytes;
+ __le64 rx_runt_frames;
+ __le64 rx_stat_discard;
+ __le64 rx_stat_err;
+};
+
+/* hwrm_port_qstats_input (size:320b/40B) */
+struct hwrm_port_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 flags;
+ #define PORT_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[5];
+ __le64 tx_stat_host_addr;
+ __le64 rx_stat_host_addr;
+};
+
+/* hwrm_port_qstats_output (size:128b/16B) */
+struct hwrm_port_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tx_stat_size;
+ __le16 rx_stat_size;
+ u8 flags;
+ #define PORT_QSTATS_RESP_FLAGS_CLEARED 0x1UL
+ u8 unused_0[2];
+ u8 valid;
+};
+
+/* tx_port_stats_ext (size:2048b/256B) */
+struct tx_port_stats_ext {
+ __le64 tx_bytes_cos0;
+ __le64 tx_bytes_cos1;
+ __le64 tx_bytes_cos2;
+ __le64 tx_bytes_cos3;
+ __le64 tx_bytes_cos4;
+ __le64 tx_bytes_cos5;
+ __le64 tx_bytes_cos6;
+ __le64 tx_bytes_cos7;
+ __le64 tx_packets_cos0;
+ __le64 tx_packets_cos1;
+ __le64 tx_packets_cos2;
+ __le64 tx_packets_cos3;
+ __le64 tx_packets_cos4;
+ __le64 tx_packets_cos5;
+ __le64 tx_packets_cos6;
+ __le64 tx_packets_cos7;
+ __le64 pfc_pri0_tx_duration_us;
+ __le64 pfc_pri0_tx_transitions;
+ __le64 pfc_pri1_tx_duration_us;
+ __le64 pfc_pri1_tx_transitions;
+ __le64 pfc_pri2_tx_duration_us;
+ __le64 pfc_pri2_tx_transitions;
+ __le64 pfc_pri3_tx_duration_us;
+ __le64 pfc_pri3_tx_transitions;
+ __le64 pfc_pri4_tx_duration_us;
+ __le64 pfc_pri4_tx_transitions;
+ __le64 pfc_pri5_tx_duration_us;
+ __le64 pfc_pri5_tx_transitions;
+ __le64 pfc_pri6_tx_duration_us;
+ __le64 pfc_pri6_tx_transitions;
+ __le64 pfc_pri7_tx_duration_us;
+ __le64 pfc_pri7_tx_transitions;
+};
+
+/* rx_port_stats_ext (size:3904b/488B) */
+struct rx_port_stats_ext {
+ __le64 link_down_events;
+ __le64 continuous_pause_events;
+ __le64 resume_pause_events;
+ __le64 continuous_roce_pause_events;
+ __le64 resume_roce_pause_events;
+ __le64 rx_bytes_cos0;
+ __le64 rx_bytes_cos1;
+ __le64 rx_bytes_cos2;
+ __le64 rx_bytes_cos3;
+ __le64 rx_bytes_cos4;
+ __le64 rx_bytes_cos5;
+ __le64 rx_bytes_cos6;
+ __le64 rx_bytes_cos7;
+ __le64 rx_packets_cos0;
+ __le64 rx_packets_cos1;
+ __le64 rx_packets_cos2;
+ __le64 rx_packets_cos3;
+ __le64 rx_packets_cos4;
+ __le64 rx_packets_cos5;
+ __le64 rx_packets_cos6;
+ __le64 rx_packets_cos7;
+ __le64 pfc_pri0_rx_duration_us;
+ __le64 pfc_pri0_rx_transitions;
+ __le64 pfc_pri1_rx_duration_us;
+ __le64 pfc_pri1_rx_transitions;
+ __le64 pfc_pri2_rx_duration_us;
+ __le64 pfc_pri2_rx_transitions;
+ __le64 pfc_pri3_rx_duration_us;
+ __le64 pfc_pri3_rx_transitions;
+ __le64 pfc_pri4_rx_duration_us;
+ __le64 pfc_pri4_rx_transitions;
+ __le64 pfc_pri5_rx_duration_us;
+ __le64 pfc_pri5_rx_transitions;
+ __le64 pfc_pri6_rx_duration_us;
+ __le64 pfc_pri6_rx_transitions;
+ __le64 pfc_pri7_rx_duration_us;
+ __le64 pfc_pri7_rx_transitions;
+ __le64 rx_bits;
+ __le64 rx_buffer_passed_threshold;
+ __le64 rx_pcs_symbol_err;
+ __le64 rx_corrected_bits;
+ __le64 rx_discard_bytes_cos0;
+ __le64 rx_discard_bytes_cos1;
+ __le64 rx_discard_bytes_cos2;
+ __le64 rx_discard_bytes_cos3;
+ __le64 rx_discard_bytes_cos4;
+ __le64 rx_discard_bytes_cos5;
+ __le64 rx_discard_bytes_cos6;
+ __le64 rx_discard_bytes_cos7;
+ __le64 rx_discard_packets_cos0;
+ __le64 rx_discard_packets_cos1;
+ __le64 rx_discard_packets_cos2;
+ __le64 rx_discard_packets_cos3;
+ __le64 rx_discard_packets_cos4;
+ __le64 rx_discard_packets_cos5;
+ __le64 rx_discard_packets_cos6;
+ __le64 rx_discard_packets_cos7;
+ __le64 rx_fec_corrected_blocks;
+ __le64 rx_fec_uncorrectable_blocks;
+ __le64 rx_filter_miss;
+ __le64 rx_fec_symbol_err;
+};
+
+/* hwrm_port_qstats_ext_input (size:320b/40B) */
+struct hwrm_port_qstats_ext_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 tx_stat_size;
+ __le16 rx_stat_size;
+ u8 flags;
+ #define PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0;
+ __le64 tx_stat_host_addr;
+ __le64 rx_stat_host_addr;
+};
+
+/* hwrm_port_qstats_ext_output (size:128b/16B) */
+struct hwrm_port_qstats_ext_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tx_stat_size;
+ __le16 rx_stat_size;
+ __le16 total_active_cos_queues;
+ u8 flags;
+ #define PORT_QSTATS_EXT_RESP_FLAGS_CLEAR_ROCE_COUNTERS_SUPPORTED 0x1UL
+ #define PORT_QSTATS_EXT_RESP_FLAGS_CLEARED 0x2UL
+ u8 valid;
+};
+
+/* hwrm_port_lpbk_qstats_input (size:256b/32B) */
+struct hwrm_port_lpbk_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 lpbk_stat_size;
+ u8 flags;
+ #define PORT_LPBK_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[5];
+ __le64 lpbk_stat_host_addr;
+};
+
+/* hwrm_port_lpbk_qstats_output (size:128b/16B) */
+struct hwrm_port_lpbk_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 lpbk_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* port_lpbk_stats (size:640b/80B) */
+struct port_lpbk_stats {
+ __le64 lpbk_ucast_frames;
+ __le64 lpbk_mcast_frames;
+ __le64 lpbk_bcast_frames;
+ __le64 lpbk_ucast_bytes;
+ __le64 lpbk_mcast_bytes;
+ __le64 lpbk_bcast_bytes;
+ __le64 lpbk_tx_discards;
+ __le64 lpbk_tx_errors;
+ __le64 lpbk_rx_discards;
+ __le64 lpbk_rx_errors;
+};
+
+/* hwrm_port_ecn_qstats_input (size:256b/32B) */
+struct hwrm_port_ecn_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 ecn_stat_buf_size;
+ u8 flags;
+ #define PORT_ECN_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[3];
+ __le64 ecn_stat_host_addr;
+};
+
+/* hwrm_port_ecn_qstats_output (size:128b/16B) */
+struct hwrm_port_ecn_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 ecn_stat_buf_size;
+ u8 mark_en;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* port_stats_ecn (size:512b/64B) */
+struct port_stats_ecn {
+ __le64 mark_cnt_cos0;
+ __le64 mark_cnt_cos1;
+ __le64 mark_cnt_cos2;
+ __le64 mark_cnt_cos3;
+ __le64 mark_cnt_cos4;
+ __le64 mark_cnt_cos5;
+ __le64 mark_cnt_cos6;
+ __le64 mark_cnt_cos7;
+};
+
+/* hwrm_port_clr_stats_input (size:192b/24B) */
+struct hwrm_port_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 flags;
+ #define PORT_CLR_STATS_REQ_FLAGS_ROCE_COUNTERS 0x1UL
+ u8 unused_0[5];
+};
+
+/* hwrm_port_clr_stats_output (size:128b/16B) */
+struct hwrm_port_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_lpbk_clr_stats_input (size:192b/24B) */
+struct hwrm_port_lpbk_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */
+struct hwrm_port_lpbk_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_ts_query_input (size:320b/40B) */
+struct hwrm_port_ts_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_TS_QUERY_REQ_FLAGS_PATH 0x1UL
+ #define PORT_TS_QUERY_REQ_FLAGS_PATH_TX 0x0UL
+ #define PORT_TS_QUERY_REQ_FLAGS_PATH_RX 0x1UL
+ #define PORT_TS_QUERY_REQ_FLAGS_PATH_LAST PORT_TS_QUERY_REQ_FLAGS_PATH_RX
+ #define PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME 0x2UL
+ __le16 port_id;
+ u8 unused_0[2];
+ __le16 enables;
+ #define PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT 0x1UL
+ #define PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID 0x2UL
+ #define PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET 0x4UL
+ __le16 ts_req_timeout;
+ __le32 ptp_seq_id;
+ __le16 ptp_hdr_offset;
+ u8 unused_1[6];
+};
+
+/* hwrm_port_ts_query_output (size:192b/24B) */
+struct hwrm_port_ts_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 ptp_msg_ts;
+ __le16 ptp_msg_seqid;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_port_phy_qcaps_input (size:192b/24B) */
+struct hwrm_port_phy_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_phy_qcaps_output (size:320b/40B) */
+struct hwrm_port_phy_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED 0x4UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET 0x10UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED 0x20UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_FW_MANAGED_LINK_DOWN 0x40UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_NO_FCS 0x80UL
+ u8 port_cnt;
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_12 0xcUL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_LAST PORT_PHY_QCAPS_RESP_PORT_CNT_12
+ __le16 supported_speeds_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL
+ __le16 supported_speeds_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL
+ __le16 supported_speeds_eee_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL
+ __le32 tx_lpi_timer_low;
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK 0xffffffUL
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT 0
+ #define PORT_PHY_QCAPS_RESP_RSVD2_MASK 0xff000000UL
+ #define PORT_PHY_QCAPS_RESP_RSVD2_SFT 24
+ __le32 valid_tx_lpi_timer_high;
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0
+ #define PORT_PHY_QCAPS_RESP_RSVD_MASK 0xff000000UL
+ #define PORT_PHY_QCAPS_RESP_RSVD_SFT 24
+ __le16 supported_pam4_speeds_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_50G 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_100G 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_200G 0x4UL
+ __le16 supported_pam4_speeds_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_50G 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL
+ __le16 flags2;
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED 0x8UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_REMOTE_LPBK_UNSUPPORTED 0x10UL
+ u8 internal_port_cnt;
+ u8 unused_0;
+ __le16 supported_speeds2_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_1GB 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_10GB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_25GB 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_40GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_50GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_800GB_PAM4_112 0x2000UL
+ __le16 supported_speeds2_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_1GB 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_10GB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_25GB 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_40GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_50GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_800GB_PAM4_112 0x2000UL
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_port_phy_i2c_write_input (size:832b/104B) */
+struct hwrm_port_phy_i2c_write_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define PORT_PHY_I2C_WRITE_REQ_ENABLES_PAGE_OFFSET 0x1UL
+ #define PORT_PHY_I2C_WRITE_REQ_ENABLES_BANK_NUMBER 0x2UL
+ __le16 port_id;
+ u8 i2c_slave_addr;
+ u8 bank_number;
+ __le16 page_number;
+ __le16 page_offset;
+ u8 data_length;
+ u8 unused_1[7];
+ __le32 data[16];
+};
+
+/* hwrm_port_phy_i2c_write_output (size:128b/16B) */
+struct hwrm_port_phy_i2c_write_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_i2c_read_input (size:320b/40B) */
+struct hwrm_port_phy_i2c_read_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET 0x1UL
+ #define PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER 0x2UL
+ __le16 port_id;
+ u8 i2c_slave_addr;
+ u8 bank_number;
+ __le16 page_number;
+ __le16 page_offset;
+ u8 data_length;
+ u8 unused_1[7];
+};
+
+/* hwrm_port_phy_i2c_read_output (size:640b/80B) */
+struct hwrm_port_phy_i2c_read_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 data[16];
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_mdio_write_input (size:320b/40B) */
+struct hwrm_port_phy_mdio_write_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 unused_0[2];
+ __le16 port_id;
+ u8 phy_addr;
+ u8 dev_addr;
+ __le16 reg_addr;
+ __le16 reg_data;
+ u8 cl45_mdio;
+ u8 unused_1[7];
+};
+
+/* hwrm_port_phy_mdio_write_output (size:128b/16B) */
+struct hwrm_port_phy_mdio_write_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_mdio_read_input (size:256b/32B) */
+struct hwrm_port_phy_mdio_read_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 unused_0[2];
+ __le16 port_id;
+ u8 phy_addr;
+ u8 dev_addr;
+ __le16 reg_addr;
+ u8 cl45_mdio;
+ u8 unused_1;
+};
+
+/* hwrm_port_phy_mdio_read_output (size:128b/16B) */
+struct hwrm_port_phy_mdio_read_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 reg_data;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_port_led_cfg_input (size:512b/64B) */
+struct hwrm_port_led_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_ID 0x1UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE 0x2UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR 0x4UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON 0x8UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF 0x10UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID 0x20UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_ID 0x40UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE 0x80UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR 0x100UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON 0x200UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF 0x400UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID 0x800UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_ID 0x1000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE 0x2000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR 0x4000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON 0x8000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF 0x10000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID 0x20000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_ID 0x40000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE 0x80000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR 0x100000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON 0x200000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF 0x400000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID 0x800000UL
+ __le16 port_id;
+ u8 num_leds;
+ u8 rsvd;
+ u8 led0_id;
+ u8 led0_state;
+ #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_LAST PORT_LED_CFG_REQ_LED0_STATE_BLINKALT
+ u8 led0_color;
+ #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_LAST PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER
+ u8 unused_0;
+ __le16 led0_blink_on;
+ __le16 led0_blink_off;
+ u8 led0_group_id;
+ u8 rsvd0;
+ u8 led1_id;
+ u8 led1_state;
+ #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_LAST PORT_LED_CFG_REQ_LED1_STATE_BLINKALT
+ u8 led1_color;
+ #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_LAST PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER
+ u8 unused_1;
+ __le16 led1_blink_on;
+ __le16 led1_blink_off;
+ u8 led1_group_id;
+ u8 rsvd1;
+ u8 led2_id;
+ u8 led2_state;
+ #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_LAST PORT_LED_CFG_REQ_LED2_STATE_BLINKALT
+ u8 led2_color;
+ #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_LAST PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER
+ u8 unused_2;
+ __le16 led2_blink_on;
+ __le16 led2_blink_off;
+ u8 led2_group_id;
+ u8 rsvd2;
+ u8 led3_id;
+ u8 led3_state;
+ #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_LAST PORT_LED_CFG_REQ_LED3_STATE_BLINKALT
+ u8 led3_color;
+ #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_LAST PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER
+ u8 unused_3;
+ __le16 led3_blink_on;
+ __le16 led3_blink_off;
+ u8 led3_group_id;
+ u8 rsvd3;
+};
+
+/* hwrm_port_led_cfg_output (size:128b/16B) */
+struct hwrm_port_led_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_led_qcfg_input (size:192b/24B) */
+struct hwrm_port_led_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_led_qcfg_output (size:448b/56B) */
+struct hwrm_port_led_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_leds;
+ u8 led0_id;
+ u8 led0_type;
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED0_TYPE_LAST PORT_LED_QCFG_RESP_LED0_TYPE_INVALID
+ u8 led0_state;
+ #define PORT_LED_QCFG_RESP_LED0_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED0_STATE_LAST PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT
+ u8 led0_color;
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED0_COLOR_LAST PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER
+ u8 unused_0;
+ __le16 led0_blink_on;
+ __le16 led0_blink_off;
+ u8 led0_group_id;
+ u8 led1_id;
+ u8 led1_type;
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED1_TYPE_LAST PORT_LED_QCFG_RESP_LED1_TYPE_INVALID
+ u8 led1_state;
+ #define PORT_LED_QCFG_RESP_LED1_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED1_STATE_LAST PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT
+ u8 led1_color;
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED1_COLOR_LAST PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER
+ u8 unused_1;
+ __le16 led1_blink_on;
+ __le16 led1_blink_off;
+ u8 led1_group_id;
+ u8 led2_id;
+ u8 led2_type;
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED2_TYPE_LAST PORT_LED_QCFG_RESP_LED2_TYPE_INVALID
+ u8 led2_state;
+ #define PORT_LED_QCFG_RESP_LED2_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED2_STATE_LAST PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT
+ u8 led2_color;
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED2_COLOR_LAST PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER
+ u8 unused_2;
+ __le16 led2_blink_on;
+ __le16 led2_blink_off;
+ u8 led2_group_id;
+ u8 led3_id;
+ u8 led3_type;
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCFG_RESP_LED3_TYPE_LAST PORT_LED_QCFG_RESP_LED3_TYPE_INVALID
+ u8 led3_state;
+ #define PORT_LED_QCFG_RESP_LED3_STATE_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_OFF 0x1UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_ON 0x2UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_BLINK 0x3UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT 0x4UL
+ #define PORT_LED_QCFG_RESP_LED3_STATE_LAST PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT
+ u8 led3_color;
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_AMBER 0x1UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_GREEN 0x2UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER 0x3UL
+ #define PORT_LED_QCFG_RESP_LED3_COLOR_LAST PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER
+ u8 unused_3;
+ __le16 led3_blink_on;
+ __le16 led3_blink_off;
+ u8 led3_group_id;
+ u8 unused_4[6];
+ u8 valid;
+};
+
+/* hwrm_port_led_qcaps_input (size:192b/24B) */
+struct hwrm_port_led_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_led_qcaps_output (size:384b/48B) */
+struct hwrm_port_led_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_leds;
+ u8 unused[3];
+ u8 led0_id;
+ u8 led0_type;
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_LAST PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID
+ u8 led0_group_id;
+ u8 unused_0;
+ __le16 led0_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led0_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GRNAMB_SUPPORTED 0x8UL
+ u8 led1_id;
+ u8 led1_type;
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_LAST PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID
+ u8 led1_group_id;
+ u8 unused_1;
+ __le16 led1_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led1_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GRNAMB_SUPPORTED 0x8UL
+ u8 led2_id;
+ u8 led2_type;
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_LAST PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID
+ u8 led2_group_id;
+ u8 unused_2;
+ __le16 led2_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led2_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GRNAMB_SUPPORTED 0x8UL
+ u8 led3_id;
+ u8 led3_type;
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID 0xffUL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_LAST PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID
+ u8 led3_group_id;
+ u8 unused_3;
+ __le16 led3_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led3_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GRNAMB_SUPPORTED 0x8UL
+ u8 unused_4[3];
+ u8 valid;
+};
+
+/* hwrm_port_mac_qcaps_input (size:192b/24B) */
+struct hwrm_port_mac_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_port_mac_qcaps_output (size:128b/16B) */
+struct hwrm_port_mac_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define PORT_MAC_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED 0x1UL
+ #define PORT_MAC_QCAPS_RESP_FLAGS_REMOTE_LPBK_SUPPORTED 0x2UL
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_queue_qportcfg_input (size:192b/24B) */
+struct hwrm_queue_qportcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
+ __le16 port_id;
+ u8 drv_qmap_cap;
+ #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_DISABLED 0x0UL
+ #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED 0x1UL
+ #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_LAST QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED
+ u8 unused_0;
+};
+
+/* hwrm_queue_qportcfg_output (size:1344b/168B) */
+struct hwrm_queue_qportcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 max_configurable_queues;
+ u8 max_configurable_lossless_queues;
+ u8 queue_cfg_allowed;
+ u8 queue_cfg_info;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_USE_PROFILE_TYPE 0x2UL
+ u8 queue_pfcenable_cfg_allowed;
+ u8 queue_pri2cos_cfg_allowed;
+ u8 queue_cos2bw_cfg_allowed;
+ u8 queue_id0;
+ u8 queue_id0_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id1;
+ u8 queue_id1_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id2;
+ u8 queue_id2_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id3;
+ u8 queue_id3_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id4;
+ u8 queue_id4_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id5;
+ u8 queue_id5_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id6;
+ u8 queue_id6_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id7;
+ u8 queue_id7_service_profile;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN
+ u8 queue_id0_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ char qid0_name[16];
+ char qid1_name[16];
+ char qid2_name[16];
+ char qid3_name[16];
+ char qid4_name[16];
+ char qid5_name[16];
+ char qid6_name[16];
+ char qid7_name[16];
+ u8 queue_id1_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id2_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id3_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id4_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id5_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id6_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id7_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 valid;
+};
+
+/* hwrm_queue_qcfg_input (size:192b/24B) */
+struct hwrm_queue_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_QCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_QCFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_QCFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_QCFG_REQ_FLAGS_PATH_LAST QUEUE_QCFG_REQ_FLAGS_PATH_RX
+ __le32 queue_id;
+};
+
+/* hwrm_queue_qcfg_output (size:128b/16B) */
+struct hwrm_queue_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 queue_len;
+ u8 service_profile;
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LAST QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN
+ u8 queue_cfg_info;
+ #define QUEUE_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_queue_cfg_input (size:320b/40B) */
+struct hwrm_queue_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_CFG_REQ_FLAGS_PATH_MASK 0x3UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_SFT 0
+ #define QUEUE_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_BIDIR
+ __le32 enables;
+ #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL
+ #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL
+ __le32 queue_id;
+ __le32 dflt_len;
+ u8 service_profile;
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LAST QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN
+ u8 unused_0[7];
+};
+
+/* hwrm_queue_cfg_output (size:128b/16B) */
+struct hwrm_queue_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcenable_qcfg_input (size:192b/24B) */
+struct hwrm_queue_pfcenable_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_queue_pfcenable_qcfg_output (size:128b/16B) */
+struct hwrm_queue_pfcenable_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL
+ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcenable_cfg_input (size:192b/24B) */
+struct hwrm_queue_pfcenable_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_ENABLED 0x8UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_ENABLED 0x10UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL
+ __le16 port_id;
+ u8 unused_0[2];
+};
+
+/* hwrm_queue_pfcenable_cfg_output (size:128b/16B) */
+struct hwrm_queue_pfcenable_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_pri2cos_qcfg_input (size:192b/24B) */
+struct hwrm_queue_pri2cos_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX
+ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN 0x2UL
+ u8 port_id;
+ u8 unused_0[3];
+};
+
+/* hwrm_queue_pri2cos_qcfg_output (size:192b/24B) */
+struct hwrm_queue_pri2cos_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 pri0_cos_queue_id;
+ u8 pri1_cos_queue_id;
+ u8 pri2_cos_queue_id;
+ u8 pri3_cos_queue_id;
+ u8 pri4_cos_queue_id;
+ u8 pri5_cos_queue_id;
+ u8 pri6_cos_queue_id;
+ u8 pri7_cos_queue_id;
+ u8 queue_cfg_info;
+ #define QUEUE_PRI2COS_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_queue_pri2cos_cfg_input (size:320b/40B) */
+struct hwrm_queue_pri2cos_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK 0x3UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT 0
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x4UL
+ __le32 enables;
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID 0x1UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID 0x2UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID 0x4UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID 0x8UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID 0x10UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID 0x20UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID 0x40UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID 0x80UL
+ u8 port_id;
+ u8 pri0_cos_queue_id;
+ u8 pri1_cos_queue_id;
+ u8 pri2_cos_queue_id;
+ u8 pri3_cos_queue_id;
+ u8 pri4_cos_queue_id;
+ u8 pri5_cos_queue_id;
+ u8 pri6_cos_queue_id;
+ u8 pri7_cos_queue_id;
+ u8 unused_0[7];
+};
+
+/* hwrm_queue_pri2cos_cfg_output (size:128b/16B) */
+struct hwrm_queue_pri2cos_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_cos2bw_qcfg_input (size:192b/24B) */
+struct hwrm_queue_cos2bw_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_queue_cos2bw_qcfg_output (size:896b/112B) */
+struct hwrm_queue_cos2bw_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 queue_id0;
+ u8 unused_0;
+ __le16 unused_1;
+ __le32 queue_id0_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id0_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id0_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id0_pri_lvl;
+ u8 queue_id0_bw_weight;
+ struct {
+ u8 queue_id;
+ __le32 queue_id_min_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id_max_bw;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id_tsa_assign;
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id_pri_lvl;
+ u8 queue_id_bw_weight;
+ } __packed cfg[7];
+ u8 unused_2[4];
+ u8 valid;
+};
+
+/* hwrm_queue_cos2bw_cfg_input (size:1024b/128B) */
+struct hwrm_queue_cos2bw_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID 0x4UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID 0x8UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID 0x10UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID 0x20UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID 0x40UL
+ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID 0x80UL
+ __le16 port_id;
+ u8 queue_id0;
+ u8 unused_0;
+ __le32 queue_id0_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id0_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id0_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id0_pri_lvl;
+ u8 queue_id0_bw_weight;
+ struct {
+ u8 queue_id;
+ __le32 queue_id_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID
+ __le32 queue_id_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BYTES
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 queue_id_tsa_assign;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_RESERVED_LAST 0xffUL
+ u8 queue_id_pri_lvl;
+ u8 queue_id_bw_weight;
+ } __packed cfg[7];
+ u8 unused_1[5];
+};
+
+/* hwrm_queue_cos2bw_cfg_output (size:128b/16B) */
+struct hwrm_queue_cos2bw_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_dscp_qcaps_input (size:192b/24B) */
+struct hwrm_queue_dscp_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 port_id;
+ u8 unused_0[7];
+};
+
+/* hwrm_queue_dscp_qcaps_output (size:128b/16B) */
+struct hwrm_queue_dscp_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_dscp_bits;
+ u8 unused_0;
+ __le16 max_entries;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_queue_dscp2pri_qcfg_input (size:256b/32B) */
+struct hwrm_queue_dscp2pri_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 dest_data_addr;
+ u8 port_id;
+ u8 unused_0;
+ __le16 dest_data_buffer_size;
+ u8 unused_1[4];
+};
+
+/* hwrm_queue_dscp2pri_qcfg_output (size:128b/16B) */
+struct hwrm_queue_dscp2pri_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 entry_cnt;
+ u8 default_pri;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_queue_dscp2pri_cfg_input (size:320b/40B) */
+struct hwrm_queue_dscp2pri_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le32 flags;
+ #define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI 0x1UL
+ __le32 enables;
+ #define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI 0x1UL
+ u8 port_id;
+ u8 default_pri;
+ __le16 entry_cnt;
+ u8 unused_0[4];
+};
+
+/* hwrm_queue_dscp2pri_cfg_output (size:128b/16B) */
+struct hwrm_queue_dscp2pri_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcwd_timeout_qcaps_input (size:128b/16B) */
+struct hwrm_queue_pfcwd_timeout_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_queue_pfcwd_timeout_qcaps_output (size:128b/16B) */
+struct hwrm_queue_pfcwd_timeout_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 max_pfcwd_timeout;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcwd_timeout_cfg_input (size:192b/24B) */
+struct hwrm_queue_pfcwd_timeout_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 pfcwd_timeout_value;
+ u8 unused_0[6];
+};
+
+/* hwrm_queue_pfcwd_timeout_cfg_output (size:128b/16B) */
+struct hwrm_queue_pfcwd_timeout_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcwd_timeout_qcfg_input (size:128b/16B) */
+struct hwrm_queue_pfcwd_timeout_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_queue_pfcwd_timeout_qcfg_output (size:128b/16B) */
+struct hwrm_queue_pfcwd_timeout_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 pfcwd_timeout_value;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_vnic_alloc_input (size:192b/24B) */
+struct hwrm_vnic_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
+ #define VNIC_ALLOC_REQ_FLAGS_VIRTIO_NET_FID_VALID 0x2UL
+ #define VNIC_ALLOC_REQ_FLAGS_VNIC_ID_VALID 0x4UL
+ __le16 virtio_net_fid;
+ __le16 vnic_id;
+};
+
+/* hwrm_vnic_alloc_output (size:128b/16B) */
+struct hwrm_vnic_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 vnic_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_vnic_update_input (size:256b/32B) */
+struct hwrm_vnic_update_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 vnic_id;
+ __le32 enables;
+ #define VNIC_UPDATE_REQ_ENABLES_VNIC_STATE_VALID 0x1UL
+ #define VNIC_UPDATE_REQ_ENABLES_MRU_VALID 0x2UL
+ #define VNIC_UPDATE_REQ_ENABLES_METADATA_FORMAT_TYPE_VALID 0x4UL
+ u8 vnic_state;
+ #define VNIC_UPDATE_REQ_VNIC_STATE_NORMAL 0x0UL
+ #define VNIC_UPDATE_REQ_VNIC_STATE_DROP 0x1UL
+ #define VNIC_UPDATE_REQ_VNIC_STATE_LAST VNIC_UPDATE_REQ_VNIC_STATE_DROP
+ u8 metadata_format_type;
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_0 0x0UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_1 0x1UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_2 0x2UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_3 0x3UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_4 0x4UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_LAST VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_4
+ __le16 mru;
+ u8 unused_1[4];
+};
+
+/* hwrm_vnic_update_output (size:128b/16B) */
+struct hwrm_vnic_update_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_free_input (size:192b/24B) */
+struct hwrm_vnic_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 vnic_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_vnic_free_output (size:128b/16B) */
+struct hwrm_vnic_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_cfg_input (size:384b/48B) */
+struct hwrm_vnic_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL
+ #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL
+ #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL
+ #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL
+ #define VNIC_CFG_REQ_FLAGS_PORTCOS_MAPPING_MODE 0x80UL
+ __le32 enables;
+ #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
+ #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
+ #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL
+ #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL
+ #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL
+ #define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID 0x20UL
+ #define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID 0x40UL
+ #define VNIC_CFG_REQ_ENABLES_QUEUE_ID 0x80UL
+ #define VNIC_CFG_REQ_ENABLES_RX_CSUM_V2_MODE 0x100UL
+ #define VNIC_CFG_REQ_ENABLES_L2_CQE_MODE 0x200UL
+ #define VNIC_CFG_REQ_ENABLES_RAW_QP_ID 0x400UL
+ __le16 vnic_id;
+ __le16 dflt_ring_grp;
+ __le16 rss_rule;
+ __le16 cos_rule;
+ __le16 lb_rule;
+ __le16 mru;
+ __le16 default_rx_ring_id;
+ __le16 default_cmpl_ring_id;
+ __le16 queue_id;
+ u8 rx_csum_v2_mode;
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_DEFAULT 0x0UL
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_ALL_OK 0x1UL
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX 0x2UL
+ #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_LAST VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX
+ u8 l2_cqe_mode;
+ #define VNIC_CFG_REQ_L2_CQE_MODE_DEFAULT 0x0UL
+ #define VNIC_CFG_REQ_L2_CQE_MODE_COMPRESSED 0x1UL
+ #define VNIC_CFG_REQ_L2_CQE_MODE_MIXED 0x2UL
+ #define VNIC_CFG_REQ_L2_CQE_MODE_LAST VNIC_CFG_REQ_L2_CQE_MODE_MIXED
+ __le32 raw_qp_id;
+};
+
+/* hwrm_vnic_cfg_output (size:128b/16B) */
+struct hwrm_vnic_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_qcaps_input (size:192b/24B) */
+struct hwrm_vnic_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ u8 unused_0[4];
+};
+
+/* hwrm_vnic_qcaps_output (size:192b/24B) */
+struct hwrm_vnic_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 mru;
+ u8 unused_0[2];
+ __le32 flags;
+ #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL
+ #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL
+ #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL
+ #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL
+ #define VNIC_QCAPS_RESP_FLAGS_COS_ASSIGNMENT_CAP 0x100UL
+ #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V2_CAP 0x200UL
+ #define VNIC_QCAPS_RESP_FLAGS_VNIC_STATE_CAP 0x400UL
+ #define VNIC_QCAPS_RESP_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP 0x800UL
+ #define VNIC_QCAPS_RESP_FLAGS_METADATA_FORMAT_CAP 0x1000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_STRICT_HASH_TYPE_CAP 0x2000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP 0x4000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_TOEPLITZ_CAP 0x8000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_XOR_CAP 0x10000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_TOEPLITZ_CHKSM_CAP 0x20000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP 0x40000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V3_CAP 0x80000UL
+ #define VNIC_QCAPS_RESP_FLAGS_L2_CQE_MODE_CAP 0x100000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP 0x200000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP 0x400000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP 0x800000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP 0x1000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_TRUSTED_VF_CAP 0x2000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_PORTCOS_MAPPING_MODE 0x4000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED 0x8000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_VNIC_RSS_HASH_MODE_CAP 0x10000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP 0x20000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP 0x40000000UL
+ __le16 max_aggs_supported;
+ u8 unused_1[5];
+ u8 valid;
+};
+
+/* hwrm_vnic_tpa_cfg_input (size:384b/48B) */
+struct hwrm_vnic_tpa_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_TPA_CFG_REQ_FLAGS_TPA 0x1UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA 0x2UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE 0x4UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO 0x8UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN 0x10UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_PACK_AS_GRO 0x100UL
+ __le32 enables;
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN 0x10UL
+ __le16 vnic_id;
+ __le16 max_agg_segs;
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 0x0UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 0x1UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 0x2UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 0x3UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX 0x1fUL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_LAST VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX
+ __le16 max_aggs;
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 0x0UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 0x1UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 0x2UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 0x3UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 0x4UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX 0x7UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_LAST VNIC_TPA_CFG_REQ_MAX_AGGS_MAX
+ u8 unused_0[2];
+ __le32 max_agg_timer;
+ __le32 min_agg_len;
+ __le32 tnl_tpa_en_bitmap;
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN 0x1UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE 0x2UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_NVGRE 0x4UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE 0x8UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 0x10UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6 0x20UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE 0x40UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_CUST1 0x80UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE_CUST1 0x100UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR1 0x200UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR2 0x400UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR3 0x800UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR4 0x1000UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR5 0x2000UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR6 0x4000UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR7 0x8000UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR8 0x10000UL
+ u8 unused_1[4];
+};
+
+/* hwrm_vnic_tpa_cfg_output (size:128b/16B) */
+struct hwrm_vnic_tpa_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_tpa_qcfg_input (size:192b/24B) */
+struct hwrm_vnic_tpa_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vnic_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_vnic_tpa_qcfg_output (size:256b/32B) */
+struct hwrm_vnic_tpa_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define VNIC_TPA_QCFG_RESP_FLAGS_TPA 0x1UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_ENCAP_TPA 0x2UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_RSC_WND_UPDATE 0x4UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_GRO 0x8UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_ECN 0x10UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_IPID_CHECK 0x40UL
+ #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_TTL_CHECK 0x80UL
+ __le16 max_agg_segs;
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_1 0x0UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_2 0x1UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_4 0x2UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_8 0x3UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX 0x1fUL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX
+ __le16 max_aggs;
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_1 0x0UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_2 0x1UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_4 0x2UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_8 0x3UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_16 0x4UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX 0x7UL
+ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX
+ __le32 max_agg_timer;
+ __le32 min_agg_len;
+ __le32 tnl_tpa_en_bitmap;
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN 0x1UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GENEVE 0x2UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_NVGRE 0x4UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GRE 0x8UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_IPV4 0x10UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_IPV6 0x20UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN_GPE 0x40UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN_CUST1 0x80UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GRE_CUST1 0x100UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR1 0x200UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR2 0x400UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR3 0x800UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR4 0x1000UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR5 0x2000UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR6 0x4000UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR7 0x8000UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR8 0x10000UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cfg_input (size:384b/48B) */
+struct hwrm_vnic_rss_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 hash_type;
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL 0x40UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 0x80UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4 0x100UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 0x200UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6 0x400UL
+ __le16 vnic_id;
+ u8 ring_table_pair_index;
+ u8 hash_mode_flags;
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT 0x1UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_4 0x2UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_2 0x4UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL
+ #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL
+ __le64 ring_grp_tbl_addr;
+ __le64 hash_key_tbl_addr;
+ __le16 rss_ctx_idx;
+ u8 flags;
+ #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE 0x1UL
+ #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE 0x2UL
+ #define VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT 0x4UL
+ u8 ring_select_mode;
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ 0x0UL
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_XOR 0x1UL
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ_CHECKSUM 0x2UL
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_LAST VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ_CHECKSUM
+ u8 unused_1[4];
+};
+
+/* hwrm_vnic_rss_cfg_output (size:128b/16B) */
+struct hwrm_vnic_rss_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cfg_cmd_err (size:64b/8B) */
+struct hwrm_vnic_rss_cfg_cmd_err {
+ u8 code;
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY 0x1UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_UNABLE_TO_GET_RSS_CFG 0x2UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_HASH_TYPE_UNSUPPORTED 0x3UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_HASH_TYPE_ERR 0x4UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_HASH_MODE_FAIL 0x5UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_RING_GRP_TABLE_ALLOC_ERR 0x6UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_HASH_KEY_ALLOC_ERR 0x7UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_DMA_FAILED 0x8UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_RX_RING_ALLOC_ERR 0x9UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_CMPL_RING_ALLOC_ERR 0xaUL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_HW_SET_RSS_FAILED 0xbUL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_CTX_INVALID 0xcUL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_VNIC_INVALID 0xdUL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_VNIC_RING_TABLE_PAIR_INVALID 0xeUL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_LAST VNIC_RSS_CFG_CMD_ERR_CODE_VNIC_RING_TABLE_PAIR_INVALID
+ u8 unused_0[7];
+};
+
+/* hwrm_vnic_rss_qcfg_input (size:192b/24B) */
+struct hwrm_vnic_rss_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 rss_ctx_idx;
+ __le16 vnic_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_vnic_rss_qcfg_output (size:512b/64B) */
+struct hwrm_vnic_rss_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 hash_type;
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV4 0x1UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV4 0x2UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV4 0x4UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV6 0x8UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV6 0x10UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV6 0x20UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV6_FLOW_LABEL 0x40UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_AH_SPI_IPV4 0x80UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_ESP_SPI_IPV4 0x100UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_AH_SPI_IPV6 0x200UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_ESP_SPI_IPV6 0x400UL
+ u8 unused_0[4];
+ __le32 hash_key[10];
+ u8 hash_mode_flags;
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_DEFAULT 0x1UL
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_4 0x2UL
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_2 0x4UL
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL
+ u8 ring_select_mode;
+ #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ 0x0UL
+ #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_XOR 0x1UL
+ #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ_CHECKSUM 0x2UL
+ #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_LAST VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ_CHECKSUM
+ u8 unused_1[5];
+ u8 valid;
+};
+
+/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */
+struct hwrm_vnic_plcmodes_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT 0x1UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT 0x2UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 0x4UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL
+ #define VNIC_PLCMODES_CFG_REQ_FLAGS_VIRTIO_PLACEMENT 0x40UL
+ __le32 enables;
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL
+ #define VNIC_PLCMODES_CFG_REQ_ENABLES_MAX_BDS_VALID 0x8UL
+ __le32 vnic_id;
+ __le16 jumbo_thresh;
+ __le16 hds_offset;
+ __le16 hds_threshold;
+ __le16 max_bds;
+ u8 unused_0[4];
+};
+
+/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */
+struct hwrm_vnic_plcmodes_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vnic_plcmodes_cfg_cmd_err (size:64b/8B) */
+struct hwrm_vnic_plcmodes_cfg_cmd_err {
+ u8 code;
+ #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_INVALID_HDS_THRESHOLD 0x1UL
+ #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_LAST VNIC_PLCMODES_CFG_CMD_ERR_CODE_INVALID_HDS_THRESHOLD
+ u8 unused_0[7];
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_alloc_output (size:128b/16B) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 rss_cos_lb_ctx_id;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_free_input (size:192b/24B) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 rss_cos_lb_ctx_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_vnic_rss_cos_lb_ctx_free_output (size:128b/16B) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_alloc_input (size:768b/96B) */
+struct hwrm_ring_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
+ #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
+ #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
+ #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL
+ #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL
+ #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL
+ #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL
+ #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL
+ #define RING_ALLOC_REQ_ENABLES_STEERING_TAG_VALID 0x800UL
+ #define RING_ALLOC_REQ_ENABLES_RX_RATE_PROFILE_VALID 0x1000UL
+ #define RING_ALLOC_REQ_ENABLES_DPI_VALID 0x2000UL
+ u8 ring_type;
+ #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
+ #define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL
+ #define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_ALLOC_REQ_RING_TYPE_RX_AGG 0x4UL
+ #define RING_ALLOC_REQ_RING_TYPE_NQ 0x5UL
+ #define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_NQ
+ u8 cmpl_coal_cnt;
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_OFF 0x0UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_4 0x1UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_8 0x2UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_12 0x3UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_16 0x4UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_24 0x5UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_32 0x6UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_48 0x7UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64 0x8UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_96 0x9UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_128 0xaUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_192 0xbUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_256 0xcUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_320 0xdUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_384 0xeUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX 0xfUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_LAST RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX
+ __le16 flags;
+ #define RING_ALLOC_REQ_FLAGS_RX_SOP_PAD 0x1UL
+ #define RING_ALLOC_REQ_FLAGS_DISABLE_CQ_OVERFLOW_DETECTION 0x2UL
+ #define RING_ALLOC_REQ_FLAGS_NQ_DBR_PACING 0x4UL
+ #define RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE 0x8UL
+ __le64 page_tbl_addr;
+ __le32 fbo;
+ u8 page_size;
+ u8 page_tbl_depth;
+ __le16 schq_id;
+ __le32 length;
+ __le16 logical_id;
+ __le16 cmpl_ring_id;
+ __le16 queue_id;
+ __le16 rx_buf_size;
+ __le16 rx_ring_id;
+ __le16 nq_ring_id;
+ __le16 ring_arb_cfg;
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP 0x1UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ 0x2UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ
+ #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK 0xf0UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT 4
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
+ __le16 steering_tag;
+ __le32 reserved3;
+ __le32 stat_ctx_id;
+ __le32 reserved4;
+ __le32 max_bw;
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT 0
+ #define RING_ALLOC_REQ_MAX_BW_SCALE 0x10000000UL
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
+ #define RING_ALLOC_REQ_MAX_BW_SCALE_LAST RING_ALLOC_REQ_MAX_BW_SCALE_BYTES
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
+ u8 int_mode;
+ #define RING_ALLOC_REQ_INT_MODE_LEGACY 0x0UL
+ #define RING_ALLOC_REQ_INT_MODE_RSVD 0x1UL
+ #define RING_ALLOC_REQ_INT_MODE_MSIX 0x2UL
+ #define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL
+ #define RING_ALLOC_REQ_INT_MODE_LAST RING_ALLOC_REQ_INT_MODE_POLL
+ u8 mpc_chnls_type;
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_TCE 0x0UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RCE 0x1UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_TE_CFA 0x2UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RE_CFA 0x3UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE 0x4UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_LAST RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE
+ u8 rx_rate_profile_sel;
+ #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_DEFAULT 0x0UL
+ #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_POLL_MODE 0x1UL
+ #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_LAST RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_POLL_MODE
+ u8 unused_4;
+ __le64 cq_handle;
+ __le16 dpi;
+ __le16 unused_5[3];
+};
+
+/* hwrm_ring_alloc_output (size:128b/16B) */
+struct hwrm_ring_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 ring_id;
+ __le16 logical_ring_id;
+ u8 push_buffer_index;
+ #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PING_BUFFER 0x0UL
+ #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER 0x1UL
+ #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_LAST RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER
+ u8 unused_0[2];
+ u8 valid;
+};
+
+/* hwrm_ring_free_input (size:256b/32B) */
+struct hwrm_ring_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define RING_FREE_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_FREE_REQ_RING_TYPE_TX 0x1UL
+ #define RING_FREE_REQ_RING_TYPE_RX 0x2UL
+ #define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_FREE_REQ_RING_TYPE_RX_AGG 0x4UL
+ #define RING_FREE_REQ_RING_TYPE_NQ 0x5UL
+ #define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_NQ
+ u8 flags;
+ #define RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID 0x1UL
+ #define RING_FREE_REQ_FLAGS_LAST RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID
+ __le16 ring_id;
+ __le32 prod_idx;
+ __le32 opaque;
+ __le32 unused_1;
+};
+
+/* hwrm_ring_free_output (size:128b/16B) */
+struct hwrm_ring_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_reset_input (size:192b/24B) */
+struct hwrm_ring_reset_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
+ #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
+ #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+ #define RING_RESET_REQ_RING_TYPE_RX_RING_GRP 0x6UL
+ #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_RX_RING_GRP
+ u8 unused_0;
+ __le16 ring_id;
+ u8 unused_1[4];
+};
+
+/* hwrm_ring_reset_output (size:128b/16B) */
+struct hwrm_ring_reset_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 push_buffer_index;
+ #define RING_RESET_RESP_PUSH_BUFFER_INDEX_PING_BUFFER 0x0UL
+ #define RING_RESET_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER 0x1UL
+ #define RING_RESET_RESP_PUSH_BUFFER_INDEX_LAST RING_RESET_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER
+ u8 unused_0[3];
+ u8 consumer_idx[3];
+ u8 valid;
+};
+
+/* hwrm_ring_aggint_qcaps_input (size:128b/16B) */
+struct hwrm_ring_aggint_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_ring_aggint_qcaps_output (size:384b/48B) */
+struct hwrm_ring_aggint_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 cmpl_params;
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN 0x1UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MAX 0x2UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET 0x4UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE 0x8UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR 0x10UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT 0x20UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR 0x40UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR_DURING_INT 0x80UL
+ #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_AGGR_INT 0x100UL
+ __le32 nq_params;
+ #define RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN 0x1UL
+ __le16 num_cmpl_dma_aggr_min;
+ __le16 num_cmpl_dma_aggr_max;
+ __le16 num_cmpl_dma_aggr_during_int_min;
+ __le16 num_cmpl_dma_aggr_during_int_max;
+ __le16 cmpl_aggr_dma_tmr_min;
+ __le16 cmpl_aggr_dma_tmr_max;
+ __le16 cmpl_aggr_dma_tmr_during_int_min;
+ __le16 cmpl_aggr_dma_tmr_during_int_max;
+ __le16 int_lat_tmr_min_min;
+ __le16 int_lat_tmr_min_max;
+ __le16 int_lat_tmr_max_min;
+ __le16 int_lat_tmr_max_max;
+ __le16 num_cmpl_aggr_int_min;
+ __le16 num_cmpl_aggr_int_max;
+ __le16 timer_units;
+ u8 unused_0[1];
+ u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_qaggint_params_input (size:192b/24B) */
+struct hwrm_ring_cmpl_ring_qaggint_params_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 ring_id;
+ __le16 flags;
+ #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_MASK 0x3UL
+ #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_SFT 0
+ #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL
+ u8 unused_0[4];
+};
+
+/* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */
+struct hwrm_ring_cmpl_ring_qaggint_params_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 flags;
+ #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL
+ #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL
+ __le16 num_cmpl_dma_aggr;
+ __le16 num_cmpl_dma_aggr_during_int;
+ __le16 cmpl_aggr_dma_tmr;
+ __le16 cmpl_aggr_dma_tmr_during_int;
+ __le16 int_lat_tmr_min;
+ __le16 int_lat_tmr_max;
+ __le16 num_cmpl_aggr_int;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_cmpl_ring_cfg_aggint_params_input (size:320b/40B) */
+struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 ring_id;
+ __le16 flags;
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL
+ __le16 num_cmpl_dma_aggr;
+ __le16 num_cmpl_dma_aggr_during_int;
+ __le16 cmpl_aggr_dma_tmr;
+ __le16 cmpl_aggr_dma_tmr_during_int;
+ __le16 int_lat_tmr_min;
+ __le16 int_lat_tmr_max;
+ __le16 num_cmpl_aggr_int;
+ __le16 enables;
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR 0x1UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT 0x2UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_CMPL_AGGR_DMA_TMR 0x4UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MIN 0x8UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MAX 0x10UL
+ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_AGGR_INT 0x20UL
+ u8 unused_0[4];
+};
+
+/* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */
+struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_ring_grp_alloc_input (size:192b/24B) */
+struct hwrm_ring_grp_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 cr;
+ __le16 rr;
+ __le16 ar;
+ __le16 sc;
+};
+
+/* hwrm_ring_grp_alloc_output (size:128b/16B) */
+struct hwrm_ring_grp_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 ring_group_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_ring_grp_free_input (size:192b/24B) */
+struct hwrm_ring_grp_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 ring_group_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_ring_grp_free_output (size:128b/16B) */
+struct hwrm_ring_grp_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+#define DEFAULT_FLOW_ID 0xFFFFFFFFUL
+#define ROCEV1_FLOW_ID 0xFFFFFFFEUL
+#define ROCEV2_FLOW_ID 0xFFFFFFFDUL
+#define ROCEV2_CNP_FLOW_ID 0xFFFFFFFCUL
+
+/* hwrm_cfa_l2_filter_alloc_input (size:768b/96B) */
+struct hwrm_cfa_l2_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_MASK 0x30UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_SFT 4
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 4)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 4)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 4)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_XDP_DISABLE 0x40UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_SOURCE_VALID 0x80UL
+ __le32 enables;
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x10UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK 0x20UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR 0x40UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK 0x80UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN 0x100UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK 0x200UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN 0x400UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK 0x800UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS 0x20000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_NUM_VLANS 0x40000UL
+ u8 l2_addr[6];
+ u8 num_vlans;
+ u8 t_num_vlans;
+ u8 l2_addr_mask[6];
+ __le16 l2_ovlan;
+ __le16 l2_ovlan_mask;
+ __le16 l2_ivlan;
+ __le16 l2_ivlan_mask;
+ u8 unused_1[2];
+ u8 t_l2_addr[6];
+ u8 unused_2[2];
+ u8 t_l2_addr_mask[6];
+ __le16 t_l2_ovlan;
+ __le16 t_l2_ovlan_mask;
+ __le16 t_l2_ivlan;
+ __le16 t_l2_ivlan_mask;
+ u8 src_type;
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE 0x5UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO 0x6UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG 0x7UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG
+ u8 unused_3;
+ __le32 src_id;
+ u8 tunnel_type;
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 unused_4;
+ __le16 dst_id;
+ __le16 mirror_vnic_id;
+ u8 pri_hint;
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN
+ u8 unused_5;
+ __le32 unused_6;
+ __le64 l2_filter_id_hint;
+};
+
+/* hwrm_cfa_l2_filter_alloc_output (size:192b/24B) */
+struct hwrm_cfa_l2_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 l2_filter_id;
+ __le32 flow_id;
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_free_input (size:192b/24B) */
+struct hwrm_cfa_l2_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 l2_filter_id;
+};
+
+/* hwrm_cfa_l2_filter_free_output (size:128b/16B) */
+struct hwrm_cfa_l2_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_filter_cfg_input (size:384b/48B) */
+struct hwrm_cfa_l2_filter_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_MASK 0x30UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_SFT 4
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_NO_UPDATE (0x0UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_BYPASS_LKUP (0x1UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_ENABLE_LKUP (0x2UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_RESTORE_FW_OP (0x3UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_RESTORE_FW_OP
+ __le32 enables;
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_PROF_FUNC 0x4UL
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_L2_CONTEXT_ID 0x8UL
+ __le64 l2_filter_id;
+ __le32 dst_id;
+ __le32 new_mirror_vnic_id;
+ __le32 prof_func;
+ __le32 l2_context_id;
+};
+
+/* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */
+struct hwrm_cfa_l2_filter_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_set_rx_mask_input (size:448b/56B) */
+struct hwrm_cfa_l2_set_rx_mask_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 vnic_id;
+ __le32 mask;
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY 0x40UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN 0x80UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN 0x100UL
+ __le64 mc_tbl_addr;
+ __le32 num_mc_entries;
+ u8 unused_0[4];
+ __le64 vlan_tag_tbl_addr;
+ __le32 num_vlan_tags;
+ u8 unused_1[4];
+};
+
+/* hwrm_cfa_l2_set_rx_mask_output (size:128b/16B) */
+struct hwrm_cfa_l2_set_rx_mask_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_l2_set_rx_mask_cmd_err (size:64b/8B) */
+struct hwrm_cfa_l2_set_rx_mask_cmd_err {
+ u8 code;
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR 0x1UL
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_MAX_VLAN_TAGS 0x2UL
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_INVALID_VNIC_ID 0x3UL
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_INVALID_ACTION 0x4UL
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_LAST CFA_L2_SET_RX_MASK_CMD_ERR_CODE_INVALID_ACTION
+ u8 unused_0[7];
+};
+
+/* hwrm_cfa_tunnel_filter_alloc_input (size:704b/88B) */
+struct hwrm_cfa_tunnel_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ __le32 enables;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x4UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR 0x8UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE 0x10UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR 0x40UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x80UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI 0x100UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x200UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL
+ __le64 l2_filter_id;
+ u8 l2_addr[6];
+ __le16 l2_ivlan;
+ __le32 l3_addr[4];
+ __le32 t_l3_addr[4];
+ u8 l3_addr_type;
+ u8 t_l3_addr_type;
+ u8 tunnel_type;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 tunnel_flags;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_OAM_CHECKSUM_EXPLHDR 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_CRITICAL_OPT_S1 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_EXTHDR_SEQNUM_S0 0x4UL
+ __le32 vni;
+ __le32 dst_vnic_id;
+ __le32 mirror_vnic_id;
+};
+
+/* hwrm_cfa_tunnel_filter_alloc_output (size:192b/24B) */
+struct hwrm_cfa_tunnel_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tunnel_filter_id;
+ __le32 flow_id;
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_tunnel_filter_free_input (size:192b/24B) */
+struct hwrm_cfa_tunnel_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 tunnel_filter_id;
+};
+
+/* hwrm_cfa_tunnel_filter_free_output (size:128b/16B) */
+struct hwrm_cfa_tunnel_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_vxlan_ipv4_hdr (size:128b/16B) */
+struct hwrm_vxlan_ipv4_hdr {
+ u8 ver_hlen;
+ #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK 0xfUL
+ #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0
+ #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK 0xf0UL
+ #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT 4
+ u8 tos;
+ __be16 ip_id;
+ __be16 flags_frag_offset;
+ u8 ttl;
+ u8 protocol;
+ __be32 src_ip_addr;
+ __be32 dest_ip_addr;
+};
+
+/* hwrm_vxlan_ipv6_hdr (size:320b/40B) */
+struct hwrm_vxlan_ipv6_hdr {
+ __be32 ver_tc_flow_label;
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT 0x1cUL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK 0xf0000000UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT 0x14UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK 0xff00000UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT 0x0UL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK 0xfffffUL
+ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_LAST VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK
+ __be16 payload_len;
+ u8 next_hdr;
+ u8 ttl;
+ __be32 src_ip_addr[4];
+ __be32 dest_ip_addr[4];
+};
+
+/* hwrm_cfa_encap_data_vxlan (size:640b/80B) */
+struct hwrm_cfa_encap_data_vxlan {
+ u8 src_mac_addr[6];
+ __le16 unused_0;
+ u8 dst_mac_addr[6];
+ u8 num_vlan_tags;
+ u8 unused_1;
+ __be16 ovlan_tpid;
+ __be16 ovlan_tci;
+ __be16 ivlan_tpid;
+ __be16 ivlan_tci;
+ __le32 l3[10];
+ #define CFA_ENCAP_DATA_VXLAN_L3_VER_MASK 0xfUL
+ #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 0x4UL
+ #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 0x6UL
+ #define CFA_ENCAP_DATA_VXLAN_L3_LAST CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6
+ __be16 src_port;
+ __be16 dst_port;
+ __be32 vni;
+ u8 hdr_rsvd0[3];
+ u8 hdr_rsvd1;
+ u8 hdr_flags;
+ u8 unused[3];
+};
+
+/* hwrm_cfa_encap_record_alloc_input (size:832b/104B) */
+struct hwrm_cfa_encap_record_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_EXTERNAL 0x2UL
+ u8 encap_type;
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_V4 0x9UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE_V1 0xaUL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE 0xbUL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE 0x10UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE
+ u8 unused_0[3];
+ __le32 encap_data[20];
+};
+
+/* hwrm_cfa_encap_record_alloc_output (size:128b/16B) */
+struct hwrm_cfa_encap_record_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 encap_record_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_encap_record_free_input (size:192b/24B) */
+struct hwrm_cfa_encap_record_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_record_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_cfa_encap_record_free_output (size:128b/16B) */
+struct hwrm_cfa_encap_record_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */
+struct hwrm_cfa_ntuple_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_FID 0x8UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_ARP_REPLY 0x10UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX 0x20UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_NO_L2_CONTEXT 0x40UL
+ __le32 enables;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x8UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x10UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x20UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x80UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x200UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x400UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK 0x800UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x1000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK 0x2000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT 0x4000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x10000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x40000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX 0x80000UL
+ __le64 l2_filter_id;
+ u8 src_macaddr[6];
+ __be16 ethertype;
+ u8 ip_addr_type;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6
+ u8 ip_protocol;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_ICMP 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_ICMPV6 0x3aUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_RSVD 0xffUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_RSVD
+ __le16 dst_id;
+ __le16 rfs_ring_tbl_idx;
+ u8 tunnel_type;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 pri_hint;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST 0x3UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST
+ __be32 src_ipaddr[4];
+ __be32 src_ipaddr_mask[4];
+ __be32 dst_ipaddr[4];
+ __be32 dst_ipaddr_mask[4];
+ __be16 src_port;
+ __be16 src_port_mask;
+ __be16 dst_port;
+ __be16 dst_port_mask;
+ __le64 ntuple_filter_id_hint;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */
+struct hwrm_cfa_ntuple_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 ntuple_filter_id;
+ __le32 flow_id;
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_alloc_cmd_err (size:64b/8B) */
+struct hwrm_cfa_ntuple_filter_alloc_cmd_err {
+ u8 code;
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_ZERO_MAC 0x65UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_BC_MC_MAC 0x66UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_VNIC 0x67UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_PF_FID 0x68UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_L2_CTXT_ID 0x69UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_NULL_L2_CTXT_CFG 0x6aUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_NULL_L2_DATA_FLD 0x6bUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_CFA_LAYOUT 0x6cUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_L2_CTXT_ALLOC_FAIL 0x6dUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_ROCE_FLOW_ERR 0x6eUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_OWNER_FID 0x6fUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_ZERO_REF_CNT 0x70UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_FLOW_TYPE 0x71UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_IVLAN 0x72UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_MAX_VLAN_ID 0x73UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_TNL_REQ 0x74UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_L2_ADDR 0x75UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_L2_IVLAN 0x76UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_L3_ADDR 0x77UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_L3_ADDR_TYPE 0x78UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_T_L3_ADDR_TYPE 0x79UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_DST_VNIC_ID 0x7aUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_VNI 0x7bUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_DST_ID 0x7cUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_FAIL_ROCE_L2_FLOW 0x7dUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_NPAR_VLAN 0x7eUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_ATSP_ADD 0x7fUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_DFLT_VLAN_FAIL 0x80UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_L3_TYPE 0x81UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_VAL_FAIL_TNL_FLOW 0x82UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_LAST CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_VAL_FAIL_TNL_FLOW
+ u8 unused_0[7];
+};
+
+/* hwrm_cfa_ntuple_filter_free_input (size:192b/24B) */
+struct hwrm_cfa_ntuple_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 ntuple_filter_id;
+};
+
+/* hwrm_cfa_ntuple_filter_free_output (size:128b/16B) */
+struct hwrm_cfa_ntuple_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_ntuple_filter_cfg_input (size:384b/48B) */
+struct hwrm_cfa_ntuple_filter_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL
+ __le32 flags;
+ #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_FID 0x1UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_RFS_RING_IDX 0x2UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_NO_L2_CONTEXT 0x4UL
+ __le64 ntuple_filter_id;
+ __le32 new_dst_id;
+ __le32 new_mirror_vnic_id;
+ __le16 new_meter_instance_id;
+ #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID 0xffffUL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_LAST CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID
+ u8 unused_1[6];
+};
+
+/* hwrm_cfa_ntuple_filter_cfg_output (size:128b/16B) */
+struct hwrm_cfa_ntuple_filter_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_decap_filter_alloc_input (size:832b/104B) */
+struct hwrm_cfa_decap_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL 0x1UL
+ __le32 enables;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x1UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID 0x2UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x4UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x8UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_OVLAN_VID 0x10UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IVLAN_VID 0x20UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_OVLAN_VID 0x40UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID 0x80UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x100UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x200UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x400UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x800UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x1000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x2000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x4000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
+ __be32 tunnel_id;
+ u8 tunnel_type;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ u8 unused_0;
+ __le16 unused_1;
+ u8 src_macaddr[6];
+ u8 unused_2[2];
+ u8 dst_macaddr[6];
+ __be16 ovlan_vid;
+ __be16 ivlan_vid;
+ __be16 t_ovlan_vid;
+ __be16 t_ivlan_vid;
+ __be16 ethertype;
+ u8 ip_addr_type;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6
+ u8 ip_protocol;
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP
+ __le16 unused_3;
+ __le32 unused_4;
+ __be32 src_ipaddr[4];
+ __be32 dst_ipaddr[4];
+ __be16 src_port;
+ __be16 dst_port;
+ __le16 dst_id;
+ __le16 l2_ctxt_ref_id;
+};
+
+/* hwrm_cfa_decap_filter_alloc_output (size:128b/16B) */
+struct hwrm_cfa_decap_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 decap_filter_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_decap_filter_free_input (size:192b/24B) */
+struct hwrm_cfa_decap_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 decap_filter_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_cfa_decap_filter_free_output (size:128b/16B) */
+struct hwrm_cfa_decap_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_flow_alloc_input (size:1024b/128B) */
+struct hwrm_cfa_flow_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 flags;
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_TX 0x40UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_RX 0x80UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_MATCH_VXLAN_IP_VNI 0x100UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_VHOST_ID_USE_VLAN 0x200UL
+ __le16 src_fid;
+ __le32 tunnel_handle;
+ __le16 action_flags;
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL_IP 0x400UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FLOW_AGING_ENABLED 0x800UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_PRI_HINT 0x1000UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NO_FLOW_COUNTER_ALLOC 0x2000UL
+ __le16 dst_fid;
+ __be16 l2_rewrite_vlan_tpid;
+ __be16 l2_rewrite_vlan_tci;
+ __le16 act_meter_id;
+ __le16 ref_flow_handle;
+ __be16 ethertype;
+ __be16 outer_vlan_tci;
+ __be16 dmac[3];
+ __be16 inner_vlan_tci;
+ __be16 smac[3];
+ u8 ip_dst_mask_len;
+ u8 ip_src_mask_len;
+ __be32 ip_dst[4];
+ __be32 ip_src[4];
+ __be16 l4_src_port;
+ __be16 l4_src_port_mask;
+ __be16 l4_dst_port;
+ __be16 l4_dst_port_mask;
+ __be32 nat_ip_address[4];
+ __be16 l2_rewrite_dmac[3];
+ __be16 nat_port;
+ __be16 l2_rewrite_smac[3];
+ u8 ip_proto;
+ u8 tunnel_type;
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+};
+
+/* hwrm_cfa_flow_alloc_output (size:256b/32B) */
+struct hwrm_cfa_flow_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 flow_handle;
+ u8 unused_0[2];
+ __le32 flow_id;
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_EXT
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_TX
+ __le64 ext_flow_handle;
+ __le32 flow_counter_id;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_flow_alloc_cmd_err (size:64b/8B) */
+struct hwrm_cfa_flow_alloc_cmd_err {
+ u8 code;
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_L2_CONTEXT_TCAM 0x1UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_ACTION_RECORD 0x2UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_COUNTER 0x3UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_WILD_CARD_TCAM 0x4UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_HASH_COLLISION 0x5UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_KEY_EXISTS 0x6UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB 0x7UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_LAST CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB
+ u8 unused_0[7];
+};
+
+/* hwrm_cfa_flow_free_input (size:256b/32B) */
+struct hwrm_cfa_flow_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 flow_handle;
+ __le16 unused_0;
+ __le32 flow_counter_id;
+ __le64 ext_flow_handle;
+};
+
+/* hwrm_cfa_flow_free_output (size:256b/32B) */
+struct hwrm_cfa_flow_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 packet;
+ __le64 byte;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_flow_info_input (size:256b/32B) */
+struct hwrm_cfa_flow_info_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 flow_handle;
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK 0xfffUL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT 0x1000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT 0x2000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_NIC_TX 0x3000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT 0x4000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX 0x8000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT_RX 0x9000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT_RX 0xa000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_NIC_RX 0xb000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT_RX 0xc000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_LAST CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT_RX
+ u8 unused_0[6];
+ __le64 ext_flow_handle;
+};
+
+/* hwrm_cfa_flow_info_output (size:5632b/704B) */
+struct hwrm_cfa_flow_info_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define CFA_FLOW_INFO_RESP_FLAGS_PATH_TX 0x1UL
+ #define CFA_FLOW_INFO_RESP_FLAGS_PATH_RX 0x2UL
+ u8 profile;
+ __le16 src_fid;
+ __le16 dst_fid;
+ __le16 l2_ctxt_id;
+ __le64 em_info;
+ __le64 tcam_info;
+ __le64 vfp_tcam_info;
+ __le16 ar_id;
+ __le16 flow_handle;
+ __le32 tunnel_handle;
+ __le16 flow_timer;
+ u8 unused_0[6];
+ __le32 flow_key_data[130];
+ __le32 flow_action_info[30];
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_flow_stats_input (size:640b/80B) */
+struct hwrm_cfa_flow_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 num_flows;
+ __le16 flow_handle_0;
+ __le16 flow_handle_1;
+ __le16 flow_handle_2;
+ __le16 flow_handle_3;
+ __le16 flow_handle_4;
+ __le16 flow_handle_5;
+ __le16 flow_handle_6;
+ __le16 flow_handle_7;
+ __le16 flow_handle_8;
+ __le16 flow_handle_9;
+ u8 unused_0[2];
+ __le32 flow_id_0;
+ __le32 flow_id_1;
+ __le32 flow_id_2;
+ __le32 flow_id_3;
+ __le32 flow_id_4;
+ __le32 flow_id_5;
+ __le32 flow_id_6;
+ __le32 flow_id_7;
+ __le32 flow_id_8;
+ __le32 flow_id_9;
+};
+
+/* hwrm_cfa_flow_stats_output (size:1408b/176B) */
+struct hwrm_cfa_flow_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 packet_0;
+ __le64 packet_1;
+ __le64 packet_2;
+ __le64 packet_3;
+ __le64 packet_4;
+ __le64 packet_5;
+ __le64 packet_6;
+ __le64 packet_7;
+ __le64 packet_8;
+ __le64 packet_9;
+ __le64 byte_0;
+ __le64 byte_1;
+ __le64 byte_2;
+ __le64 byte_3;
+ __le64 byte_4;
+ __le64 byte_5;
+ __le64 byte_6;
+ __le64 byte_7;
+ __le64 byte_8;
+ __le64 byte_9;
+ __le16 flow_hits;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_cfa_vfr_alloc_input (size:448b/56B) */
+struct hwrm_cfa_vfr_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ __le16 reserved;
+ u8 unused_0[4];
+ char vfr_name[32];
+};
+
+/* hwrm_cfa_vfr_alloc_output (size:128b/16B) */
+struct hwrm_cfa_vfr_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 rx_cfa_code;
+ __le16 tx_cfa_action;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_cfa_vfr_free_input (size:448b/56B) */
+struct hwrm_cfa_vfr_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ char vfr_name[32];
+ __le16 vf_id;
+ __le16 reserved;
+ u8 unused_0[4];
+};
+
+/* hwrm_cfa_vfr_free_output (size:128b/16B) */
+struct hwrm_cfa_vfr_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_eem_qcaps_input (size:192b/24B) */
+struct hwrm_cfa_eem_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_EEM_QCAPS_REQ_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_QCAPS_REQ_FLAGS_PATH_RX 0x2UL
+ #define CFA_EEM_QCAPS_REQ_FLAGS_PREFERRED_OFFLOAD 0x4UL
+ __le32 unused_0;
+};
+
+/* hwrm_cfa_eem_qcaps_output (size:320b/40B) */
+struct hwrm_cfa_eem_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_RX 0x2UL
+ #define CFA_EEM_QCAPS_RESP_FLAGS_CENTRALIZED_MEMORY_MODEL_SUPPORTED 0x4UL
+ #define CFA_EEM_QCAPS_RESP_FLAGS_DETACHED_CENTRALIZED_MEMORY_MODEL_SUPPORTED 0x8UL
+ __le32 unused_0;
+ __le32 supported;
+ #define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY0_TABLE 0x1UL
+ #define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY1_TABLE 0x2UL
+ #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_RECORD_TABLE 0x4UL
+ #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_FLOW_COUNTERS_TABLE 0x8UL
+ #define CFA_EEM_QCAPS_RESP_SUPPORTED_FID_TABLE 0x10UL
+ __le32 max_entries_supported;
+ __le16 key_entry_size;
+ __le16 record_entry_size;
+ __le16 efc_entry_size;
+ __le16 fid_entry_size;
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_eem_cfg_input (size:384b/48B) */
+struct hwrm_cfa_eem_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_EEM_CFG_REQ_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_CFG_REQ_FLAGS_PATH_RX 0x2UL
+ #define CFA_EEM_CFG_REQ_FLAGS_PREFERRED_OFFLOAD 0x4UL
+ #define CFA_EEM_CFG_REQ_FLAGS_SECONDARY_PF 0x8UL
+ __le16 group_id;
+ __le16 unused_0;
+ __le32 num_entries;
+ __le32 unused_1;
+ __le16 key0_ctx_id;
+ __le16 key1_ctx_id;
+ __le16 record_ctx_id;
+ __le16 efc_ctx_id;
+ __le16 fid_ctx_id;
+ __le16 unused_2;
+ __le32 unused_3;
+};
+
+/* hwrm_cfa_eem_cfg_output (size:128b/16B) */
+struct hwrm_cfa_eem_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_eem_qcfg_input (size:192b/24B) */
+struct hwrm_cfa_eem_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_EEM_QCFG_REQ_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_QCFG_REQ_FLAGS_PATH_RX 0x2UL
+ __le32 unused_0;
+};
+
+/* hwrm_cfa_eem_qcfg_output (size:256b/32B) */
+struct hwrm_cfa_eem_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define CFA_EEM_QCFG_RESP_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_QCFG_RESP_FLAGS_PATH_RX 0x2UL
+ #define CFA_EEM_QCFG_RESP_FLAGS_PREFERRED_OFFLOAD 0x4UL
+ __le32 num_entries;
+ __le16 key0_ctx_id;
+ __le16 key1_ctx_id;
+ __le16 record_ctx_id;
+ __le16 efc_ctx_id;
+ __le16 fid_ctx_id;
+ u8 unused_2[5];
+ u8 valid;
+};
+
+/* hwrm_cfa_eem_op_input (size:192b/24B) */
+struct hwrm_cfa_eem_op_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_EEM_OP_REQ_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_OP_REQ_FLAGS_PATH_RX 0x2UL
+ __le16 unused_0;
+ __le16 op;
+ #define CFA_EEM_OP_REQ_OP_RESERVED 0x0UL
+ #define CFA_EEM_OP_REQ_OP_EEM_DISABLE 0x1UL
+ #define CFA_EEM_OP_REQ_OP_EEM_ENABLE 0x2UL
+ #define CFA_EEM_OP_REQ_OP_EEM_CLEANUP 0x3UL
+ #define CFA_EEM_OP_REQ_OP_LAST CFA_EEM_OP_REQ_OP_EEM_CLEANUP
+};
+
+/* hwrm_cfa_eem_op_output (size:128b/16B) */
+struct hwrm_cfa_eem_op_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_adv_flow_mgnt_qcaps_input (size:256b/32B) */
+struct hwrm_cfa_adv_flow_mgnt_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 unused_0[4];
+};
+
+/* hwrm_cfa_adv_flow_mgnt_qcaps_output (size:128b/16B) */
+struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ARP_SUPPORTED 0x1000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED 0x2000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ETHERTYPE_IP_SUPPORTED 0x4000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TRUFLOW_CAPABLE 0x8000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_FILTER_TRAFFIC_TYPE_L2_ROCE_SUPPORTED 0x10000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_LAG_SUPPORTED 0x20000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_NO_L2CTX_SUPPORTED 0x40000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NIC_FLOW_STATS_SUPPORTED 0x80000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED 0x100000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED 0x200000UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */
+struct hwrm_tunnel_dst_port_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
+ u8 tunnel_next_proto;
+ u8 unused_0[6];
+};
+
+/* hwrm_tunnel_dst_port_query_output (size:128b/16B) */
+struct hwrm_tunnel_dst_port_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tunnel_dst_port_id;
+ __be16 tunnel_dst_port_val;
+ u8 upar_in_use;
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR0 0x1UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR1 0x2UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR2 0x4UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR3 0x8UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR4 0x10UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR5 0x20UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR6 0x40UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR7 0x80UL
+ u8 status;
+ #define TUNNEL_DST_PORT_QUERY_RESP_STATUS_CHIP_LEVEL 0x1UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_STATUS_FUNC_LEVEL 0x2UL
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_alloc_input (size:192b/24B) */
+struct hwrm_tunnel_dst_port_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
+ u8 tunnel_next_proto;
+ __be16 tunnel_dst_port_val;
+ u8 unused_0[4];
+};
+
+/* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */
+struct hwrm_tunnel_dst_port_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tunnel_dst_port_id;
+ u8 error_info;
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_SUCCESS 0x0UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ALLOCATED 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_NO_RESOURCE 0x2UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ENABLED 0x3UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_LAST TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ENABLED
+ u8 upar_in_use;
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR0 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR1 0x2UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR2 0x4UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR3 0x8UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR4 0x10UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR5 0x20UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR6 0x40UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR7 0x80UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_tunnel_dst_port_free_input (size:192b/24B) */
+struct hwrm_tunnel_dst_port_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 tunnel_type;
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
+ u8 tunnel_next_proto;
+ __le16 tunnel_dst_port_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_tunnel_dst_port_free_output (size:128b/16B) */
+struct hwrm_tunnel_dst_port_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 error_info;
+ #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_SUCCESS 0x0UL
+ #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_OWNER 0x1UL
+ #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_ALLOCATED 0x2UL
+ #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_LAST TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_ALLOCATED
+ u8 unused_1[6];
+ u8 valid;
+};
+
+/* ctx_hw_stats (size:1280b/160B) */
+struct ctx_hw_stats {
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 tpa_pkts;
+ __le64 tpa_bytes;
+ __le64 tpa_events;
+ __le64 tpa_aborts;
+};
+
+/* ctx_hw_stats_ext (size:1408b/176B) */
+struct ctx_hw_stats_ext {
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_tpa_eligible_pkt;
+ __le64 rx_tpa_eligible_bytes;
+ __le64 rx_tpa_pkt;
+ __le64 rx_tpa_bytes;
+ __le64 rx_tpa_errors;
+ __le64 rx_tpa_events;
+};
+
+/* hwrm_stat_ctx_alloc_input (size:384b/48B) */
+struct hwrm_stat_ctx_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 stats_dma_addr;
+ __le32 update_period_ms;
+ u8 stat_ctx_flags;
+ #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
+ #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_DUP_HOST_BUF 0x2UL
+ u8 unused_0;
+ __le16 stats_dma_length;
+ __le16 flags;
+ #define STAT_CTX_ALLOC_REQ_FLAGS_STEERING_TAG_VALID 0x1UL
+ __le16 steering_tag;
+ __le32 stat_ctx_id;
+ __le16 alloc_seq_id;
+ u8 unused_1[6];
+};
+
+/* hwrm_stat_ctx_alloc_output (size:128b/16B) */
+struct hwrm_stat_ctx_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 stat_ctx_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_free_input (size:192b/24B) */
+struct hwrm_stat_ctx_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_stat_ctx_free_output (size:128b/16B) */
+struct hwrm_stat_ctx_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 stat_ctx_id;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_query_input (size:192b/24B) */
+struct hwrm_stat_ctx_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 flags;
+ #define STAT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[3];
+};
+
+/* hwrm_stat_ctx_query_output (size:1408b/176B) */
+struct hwrm_stat_ctx_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 rx_agg_pkts;
+ __le64 rx_agg_bytes;
+ __le64 rx_agg_events;
+ __le64 rx_agg_aborts;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_stat_ext_ctx_query_input (size:192b/24B) */
+struct hwrm_stat_ext_ctx_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 flags;
+ #define STAT_EXT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[3];
+};
+
+/* hwrm_stat_ext_ctx_query_output (size:1536b/192B) */
+struct hwrm_stat_ext_ctx_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_error_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_tpa_eligible_pkt;
+ __le64 rx_tpa_eligible_bytes;
+ __le64 rx_tpa_pkt;
+ __le64 rx_tpa_bytes;
+ __le64 rx_tpa_errors;
+ __le64 rx_tpa_events;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */
+struct hwrm_stat_ctx_clr_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 stat_ctx_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_stat_ctx_clr_stats_output (size:128b/16B) */
+struct hwrm_stat_ctx_clr_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_pcie_qstats_input (size:256b/32B) */
+struct hwrm_pcie_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 pcie_stat_size;
+ u8 unused_0[6];
+ __le64 pcie_stat_host_addr;
+};
+
+/* hwrm_pcie_qstats_output (size:128b/16B) */
+struct hwrm_pcie_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 pcie_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* pcie_ctx_hw_stats (size:768b/96B) */
+struct pcie_ctx_hw_stats {
+ __le64 pcie_pl_signal_integrity;
+ __le64 pcie_dl_signal_integrity;
+ __le64 pcie_tl_signal_integrity;
+ __le64 pcie_link_integrity;
+ __le64 pcie_tx_traffic_rate;
+ __le64 pcie_rx_traffic_rate;
+ __le64 pcie_tx_dllp_statistics;
+ __le64 pcie_rx_dllp_statistics;
+ __le64 pcie_equalization_time;
+ __le32 pcie_ltssm_histogram[4];
+ __le64 pcie_recovery_histogram;
+};
+
+/* pcie_ctx_hw_stats_v2 (size:4544b/568B) */
+struct pcie_ctx_hw_stats_v2 {
+ __le64 pcie_pl_signal_integrity;
+ __le64 pcie_dl_signal_integrity;
+ __le64 pcie_tl_signal_integrity;
+ __le64 pcie_link_integrity;
+ __le64 pcie_tx_traffic_rate;
+ __le64 pcie_rx_traffic_rate;
+ __le64 pcie_tx_dllp_statistics;
+ __le64 pcie_rx_dllp_statistics;
+ __le64 pcie_equalization_time;
+ __le32 pcie_ltssm_histogram[4];
+ __le64 pcie_recovery_histogram;
+ __le32 pcie_tl_credit_nph_histogram[8];
+ __le32 pcie_tl_credit_ph_histogram[8];
+ __le32 pcie_tl_credit_pd_histogram[8];
+ __le32 pcie_cmpl_latest_times[4];
+ __le32 pcie_cmpl_longest_time;
+ __le32 pcie_cmpl_shortest_time;
+ __le32 unused_0[2];
+ __le32 pcie_cmpl_latest_headers[4][4];
+ __le32 pcie_cmpl_longest_headers[4][4];
+ __le32 pcie_cmpl_shortest_headers[4][4];
+ __le32 pcie_wr_latency_histogram[12];
+ __le32 pcie_wr_latency_all_normal_count;
+ __le32 unused_1;
+ __le64 pcie_posted_packet_count;
+ __le64 pcie_non_posted_packet_count;
+ __le64 pcie_other_packet_count;
+ __le64 pcie_blocked_packet_count;
+ __le64 pcie_cmpl_packet_count;
+ __le32 pcie_rd_latency_histogram[12];
+ __le32 pcie_rd_latency_all_normal_count;
+ __le32 unused_2;
+};
+
+/* hwrm_stat_generic_qstats_input (size:256b/32B) */
+struct hwrm_stat_generic_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 generic_stat_size;
+ u8 flags;
+ #define STAT_GENERIC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[5];
+ __le64 generic_stat_host_addr;
+};
+
+/* hwrm_stat_generic_qstats_output (size:128b/16B) */
+struct hwrm_stat_generic_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 generic_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* generic_sw_hw_stats (size:1472b/184B) */
+struct generic_sw_hw_stats {
+ __le64 pcie_statistics_tx_tlp;
+ __le64 pcie_statistics_rx_tlp;
+ __le64 pcie_credit_fc_hdr_posted;
+ __le64 pcie_credit_fc_hdr_nonposted;
+ __le64 pcie_credit_fc_hdr_cmpl;
+ __le64 pcie_credit_fc_data_posted;
+ __le64 pcie_credit_fc_data_nonposted;
+ __le64 pcie_credit_fc_data_cmpl;
+ __le64 pcie_credit_fc_tgt_nonposted;
+ __le64 pcie_credit_fc_tgt_data_posted;
+ __le64 pcie_credit_fc_tgt_hdr_posted;
+ __le64 pcie_credit_fc_cmpl_hdr_posted;
+ __le64 pcie_credit_fc_cmpl_data_posted;
+ __le64 pcie_cmpl_longest;
+ __le64 pcie_cmpl_shortest;
+ __le64 cache_miss_count_cfcq;
+ __le64 cache_miss_count_cfcs;
+ __le64 cache_miss_count_cfcc;
+ __le64 cache_miss_count_cfcm;
+ __le64 hw_db_recov_dbs_dropped;
+ __le64 hw_db_recov_drops_serviced;
+ __le64 hw_db_recov_dbs_recovered;
+ __le64 hw_db_recov_oo_drop_count;
+};
+
+/* hwrm_fw_reset_input (size:192b/24B) */
+struct hwrm_fw_reset_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 embedded_proc_type;
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT 0x7UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_IMPACTLESS_ACTIVATION 0x8UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_IMPACTLESS_ACTIVATION
+ u8 selfrst_status;
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL
+ #define FW_RESET_REQ_SELFRST_STATUS_LAST FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE
+ u8 host_idx;
+ u8 flags;
+ #define FW_RESET_REQ_FLAGS_RESET_GRACEFUL 0x1UL
+ #define FW_RESET_REQ_FLAGS_FW_ACTIVATION 0x2UL
+ u8 unused_0[4];
+};
+
+/* hwrm_fw_reset_output (size:128b/16B) */
+struct hwrm_fw_reset_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 selfrst_status;
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL
+ #define FW_RESET_RESP_SELFRST_STATUS_LAST FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_fw_qstatus_input (size:192b/24B) */
+struct hwrm_fw_qstatus_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 embedded_proc_type;
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_LAST FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_qstatus_output (size:128b/16B) */
+struct hwrm_fw_qstatus_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 selfrst_status;
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER 0x3UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_LAST FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER
+ u8 nvm_option_action_status;
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_NONE 0x0UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_HOTRESET 0x1UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_WARMBOOT 0x2UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT 0x3UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_LAST FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_fw_set_time_input (size:256b/32B) */
+struct hwrm_fw_set_time_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 year;
+ #define FW_SET_TIME_REQ_YEAR_UNKNOWN 0x0UL
+ #define FW_SET_TIME_REQ_YEAR_LAST FW_SET_TIME_REQ_YEAR_UNKNOWN
+ u8 month;
+ u8 day;
+ u8 hour;
+ u8 minute;
+ u8 second;
+ u8 unused_0;
+ __le16 millisecond;
+ __le16 zone;
+ #define FW_SET_TIME_REQ_ZONE_UTC 0
+ #define FW_SET_TIME_REQ_ZONE_UNKNOWN 65535
+ #define FW_SET_TIME_REQ_ZONE_LAST FW_SET_TIME_REQ_ZONE_UNKNOWN
+ u8 unused_1[4];
+};
+
+/* hwrm_fw_set_time_output (size:128b/16B) */
+struct hwrm_fw_set_time_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_struct_hdr (size:128b/16B) */
+struct hwrm_struct_hdr {
+ __le16 struct_id;
+ #define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL
+ #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
+ #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
+ #define STRUCT_HDR_STRUCT_ID_POWER_BKUP 0x427UL
+ #define STRUCT_HDR_STRUCT_ID_PEER_MMAP 0x429UL
+ #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
+ #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
+ #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
+ #define STRUCT_HDR_STRUCT_ID_MSIX_PER_VF 0xc8UL
+ #define STRUCT_HDR_STRUCT_ID_UDCC_RTT_BUCKET_COUNT 0x12cUL
+ #define STRUCT_HDR_STRUCT_ID_UDCC_RTT_BUCKET_BOUND 0x12dUL
+ #define STRUCT_HDR_STRUCT_ID_DBG_TOKEN_CLAIMS 0x190UL
+ #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_DBG_TOKEN_CLAIMS
+ __le16 len;
+ u8 version;
+ #define STRUCT_HDR_VERSION_0 0x0UL
+ #define STRUCT_HDR_VERSION_1 0x1UL
+ #define STRUCT_HDR_VERSION_LAST STRUCT_HDR_VERSION_1
+ u8 count;
+ __le16 subtype;
+ __le16 next_offset;
+ #define STRUCT_HDR_NEXT_OFFSET_LAST 0x0UL
+ u8 unused_0[6];
+};
+
+/* hwrm_struct_data_dcbx_app (size:64b/8B) */
+struct hwrm_struct_data_dcbx_app {
+ __be16 protocol_id;
+ u8 protocol_selector;
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT 0x2UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT 0x3UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_LAST STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT
+ u8 priority;
+ u8 valid;
+ u8 unused_0[3];
+};
+
+/* hwrm_fw_set_structured_data_input (size:256b/32B) */
+struct hwrm_fw_set_structured_data_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le16 data_len;
+ u8 hdr_cnt;
+ u8 unused_0[5];
+};
+
+/* hwrm_fw_set_structured_data_output (size:128b/16B) */
+struct hwrm_fw_set_structured_data_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fw_set_structured_data_cmd_err (size:64b/8B) */
+struct hwrm_fw_set_structured_data_cmd_err {
+ u8 code;
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT 0x1UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT 0x2UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_ALREADY_ADDED 0x4UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_INST_IN_PROG 0x5UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_INST_IN_PROG
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_get_structured_data_input (size:256b/32B) */
+struct hwrm_fw_get_structured_data_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 dest_data_addr;
+ __le16 data_len;
+ __le16 structure_id;
+ __le16 subtype;
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_UNUSED 0x0UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_ALL 0xffffUL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_ADMIN 0x100UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_PEER 0x101UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_OPERATIONAL 0x102UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN 0x200UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER 0x201UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL 0x300UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_CLAIMS_SUPPORTED 0x320UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_CLAIMS_ACTIVE 0x321UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_LAST FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_CLAIMS_ACTIVE
+ u8 count;
+ u8 unused_0;
+};
+
+/* hwrm_fw_get_structured_data_output (size:128b/16B) */
+struct hwrm_fw_get_structured_data_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 hdr_cnt;
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_fw_get_structured_data_cmd_err (size:64b/8B) */
+struct hwrm_fw_get_structured_data_cmd_err {
+ u8 code;
+ #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
+ #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_livepatch_query_input (size:192b/24B) */
+struct hwrm_fw_livepatch_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 fw_target;
+ #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_COMMON_FW 0x1UL
+ #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW 0x2UL
+ #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_LAST FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_livepatch_query_output (size:640b/80B) */
+struct hwrm_fw_livepatch_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ char install_ver[32];
+ char active_ver[32];
+ __le16 status_flags;
+ #define FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL 0x1UL
+ #define FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE 0x2UL
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_fw_livepatch_input (size:256b/32B) */
+struct hwrm_fw_livepatch_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 opcode;
+ #define FW_LIVEPATCH_REQ_OPCODE_ACTIVATE 0x1UL
+ #define FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE 0x2UL
+ #define FW_LIVEPATCH_REQ_OPCODE_LAST FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE
+ u8 fw_target;
+ #define FW_LIVEPATCH_REQ_FW_TARGET_COMMON_FW 0x1UL
+ #define FW_LIVEPATCH_REQ_FW_TARGET_SECURE_FW 0x2UL
+ #define FW_LIVEPATCH_REQ_FW_TARGET_LAST FW_LIVEPATCH_REQ_FW_TARGET_SECURE_FW
+ u8 loadtype;
+ #define FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL 0x1UL
+ #define FW_LIVEPATCH_REQ_LOADTYPE_MEMORY_DIRECT 0x2UL
+ #define FW_LIVEPATCH_REQ_LOADTYPE_LAST FW_LIVEPATCH_REQ_LOADTYPE_MEMORY_DIRECT
+ u8 flags;
+ __le32 patch_len;
+ __le64 host_addr;
+};
+
+/* hwrm_fw_livepatch_output (size:128b/16B) */
+struct hwrm_fw_livepatch_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fw_livepatch_cmd_err (size:64b/8B) */
+struct hwrm_fw_livepatch_cmd_err {
+ u8 code;
+ #define FW_LIVEPATCH_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_OPCODE 0x1UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_TARGET 0x2UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_SUPPORTED 0x3UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_INSTALLED 0x4UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_PATCHED 0x5UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_AUTH_FAIL 0x6UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_HEADER 0x7UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_SIZE 0x8UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED 0x9UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_LAST FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED
+ u8 unused_0[7];
+};
+
+/* hwrm_exec_fwd_resp_input (size:1024b/128B) */
+struct hwrm_exec_fwd_resp_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_request[26];
+ __le16 encap_resp_target_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_exec_fwd_resp_output (size:128b/16B) */
+struct hwrm_exec_fwd_resp_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_reject_fwd_resp_input (size:1024b/128B) */
+struct hwrm_reject_fwd_resp_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 encap_request[26];
+ __le16 encap_resp_target_id;
+ u8 unused_0[6];
+};
+
+/* hwrm_reject_fwd_resp_output (size:128b/16B) */
+struct hwrm_reject_fwd_resp_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fwd_resp_input (size:1024b/128B) */
+struct hwrm_fwd_resp_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 encap_resp_target_id;
+ __le16 encap_resp_cmpl_ring;
+ __le16 encap_resp_len;
+ u8 unused_0;
+ u8 unused_1;
+ __le64 encap_resp_addr;
+ __le32 encap_resp[24];
+};
+
+/* hwrm_fwd_resp_output (size:128b/16B) */
+struct hwrm_fwd_resp_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fwd_async_event_cmpl_input (size:320b/40B) */
+struct hwrm_fwd_async_event_cmpl_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 encap_async_event_target_id;
+ u8 unused_0[6];
+ __le32 encap_async_event_cmpl[4];
+};
+
+/* hwrm_fwd_async_event_cmpl_output (size:128b/16B) */
+struct hwrm_fwd_async_event_cmpl_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_temp_monitor_query_input (size:128b/16B) */
+struct hwrm_temp_monitor_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_temp_monitor_query_output (size:192b/24B) */
+struct hwrm_temp_monitor_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 temp;
+ u8 phy_temp;
+ u8 om_temp;
+ u8 flags;
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_TEMP_NOT_AVAILABLE 0x1UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_PHY_TEMP_NOT_AVAILABLE 0x2UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_NOT_PRESENT 0x4UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_TEMP_NOT_AVAILABLE 0x8UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_EXT_TEMP_FIELDS_AVAILABLE 0x10UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_THRESHOLD_VALUES_AVAILABLE 0x20UL
+ u8 temp2;
+ u8 phy_temp2;
+ u8 om_temp2;
+ u8 warn_threshold;
+ u8 critical_threshold;
+ u8 fatal_threshold;
+ u8 shutdown_threshold;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_wol_filter_alloc_input (size:512b/64B) */
+struct hwrm_wol_filter_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS 0x1UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_OFFSET 0x2UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_SIZE 0x4UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_ADDR 0x8UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_ADDR 0x10UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_SIZE 0x20UL
+ __le16 port_id;
+ u8 wol_type;
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT 0x0UL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_BMP 0x1UL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID 0xffUL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_LAST WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID
+ u8 unused_0[5];
+ u8 mac_address[6];
+ __le16 pattern_offset;
+ __le16 pattern_buf_size;
+ __le16 pattern_mask_size;
+ u8 unused_1[4];
+ __le64 pattern_buf_addr;
+ __le64 pattern_mask_addr;
+};
+
+/* hwrm_wol_filter_alloc_output (size:128b/16B) */
+struct hwrm_wol_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 wol_filter_id;
+ u8 unused_0[6];
+ u8 valid;
+};
+
+/* hwrm_wol_filter_free_input (size:256b/32B) */
+struct hwrm_wol_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define WOL_FILTER_FREE_REQ_FLAGS_FREE_ALL_WOL_FILTERS 0x1UL
+ __le32 enables;
+ #define WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID 0x1UL
+ __le16 port_id;
+ u8 wol_filter_id;
+ u8 unused_0[5];
+};
+
+/* hwrm_wol_filter_free_output (size:128b/16B) */
+struct hwrm_wol_filter_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_wol_filter_qcfg_input (size:448b/56B) */
+struct hwrm_wol_filter_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 handle;
+ u8 unused_0[4];
+ __le64 pattern_buf_addr;
+ __le16 pattern_buf_size;
+ u8 unused_1[6];
+ __le64 pattern_mask_addr;
+ __le16 pattern_mask_size;
+ u8 unused_2[6];
+};
+
+/* hwrm_wol_filter_qcfg_output (size:256b/32B) */
+struct hwrm_wol_filter_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 next_handle;
+ u8 wol_filter_id;
+ u8 wol_type;
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_MAGICPKT 0x0UL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_BMP 0x1UL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID 0xffUL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_LAST WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID
+ __le32 unused_0;
+ u8 mac_address[6];
+ __le16 pattern_offset;
+ __le16 pattern_size;
+ __le16 pattern_mask_size;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_wol_reason_qcfg_input (size:320b/40B) */
+struct hwrm_wol_reason_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
+ __le64 wol_pkt_buf_addr;
+ __le16 wol_pkt_buf_size;
+ u8 unused_1[6];
+};
+
+/* hwrm_wol_reason_qcfg_output (size:128b/16B) */
+struct hwrm_wol_reason_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 wol_filter_id;
+ u8 wol_reason;
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_MAGICPKT 0x0UL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_BMP 0x1UL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_INVALID 0xffUL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_LAST WOL_REASON_QCFG_RESP_WOL_REASON_INVALID
+ u8 wol_pkt_len;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_dbg_read_direct_input (size:256b/32B) */
+struct hwrm_dbg_read_direct_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 read_addr;
+ __le32 read_len32;
+};
+
+/* hwrm_dbg_read_direct_output (size:128b/16B) */
+struct hwrm_dbg_read_direct_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 crc32;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_dbg_qcaps_input (size:192b/24B) */
+struct hwrm_dbg_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_dbg_qcaps_output (size:192b/24B) */
+struct hwrm_dbg_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ u8 unused_0[2];
+ __le32 coredump_component_disable_caps;
+ #define DBG_QCAPS_RESP_COREDUMP_COMPONENT_DISABLE_CAPS_NVRAM 0x1UL
+ __le32 flags;
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_NVM 0x1UL
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR 0x2UL
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR 0x4UL
+ #define DBG_QCAPS_RESP_FLAGS_USEQ 0x8UL
+ #define DBG_QCAPS_RESP_FLAGS_COREDUMP_HOST_DDR 0x10UL
+ #define DBG_QCAPS_RESP_FLAGS_COREDUMP_HOST_CAPTURE 0x20UL
+ #define DBG_QCAPS_RESP_FLAGS_PTRACE 0x40UL
+ #define DBG_QCAPS_RESP_FLAGS_REG_ACCESS_RESTRICTED 0x80UL
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_dbg_qcfg_input (size:192b/24B) */
+struct hwrm_dbg_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ __le16 flags;
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_MASK 0x3UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_SFT 0
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_NVM 0x0UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_HOST_DDR 0x1UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR 0x2UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_LAST DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR
+ __le32 coredump_component_disable_flags;
+ #define DBG_QCFG_REQ_COREDUMP_COMPONENT_DISABLE_FLAGS_NVRAM 0x1UL
+};
+
+/* hwrm_dbg_qcfg_output (size:256b/32B) */
+struct hwrm_dbg_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ u8 unused_0[2];
+ __le32 coredump_size;
+ __le32 flags;
+ #define DBG_QCFG_RESP_FLAGS_UART_LOG 0x1UL
+ #define DBG_QCFG_RESP_FLAGS_UART_LOG_SECONDARY 0x2UL
+ #define DBG_QCFG_RESP_FLAGS_FW_TRACE 0x4UL
+ #define DBG_QCFG_RESP_FLAGS_FW_TRACE_SECONDARY 0x8UL
+ #define DBG_QCFG_RESP_FLAGS_DEBUG_NOTIFY 0x10UL
+ #define DBG_QCFG_RESP_FLAGS_JTAG_DEBUG 0x20UL
+ __le16 async_cmpl_ring;
+ u8 unused_2[2];
+ __le32 crashdump_size;
+ u8 unused_3[3];
+ u8 valid;
+};
+
+/* hwrm_dbg_crashdump_medium_cfg_input (size:320b/40B) */
+struct hwrm_dbg_crashdump_medium_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 output_dest_flags;
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_TYPE_DDR 0x1UL
+ __le16 pg_size_lvl;
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_MASK 0x3UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_SFT 0
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_0 0x0UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_1 0x1UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_2 0x2UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LAST DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_2
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_MASK 0x1cUL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_SFT 2
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K (0x0UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K (0x1UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K (0x2UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_2M (0x3UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8M (0x4UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_1G (0x5UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_LAST DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_1G
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_UNUSED11_MASK 0xffe0UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_UNUSED11_SFT 5
+ __le32 size;
+ __le32 coredump_component_disable_flags;
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_NVRAM 0x1UL
+ __le32 unused_0;
+ __le64 pbl;
+};
+
+/* hwrm_dbg_crashdump_medium_cfg_output (size:128b/16B) */
+struct hwrm_dbg_crashdump_medium_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_1[7];
+ u8 valid;
+};
+
+/* coredump_segment_record (size:128b/16B) */
+struct coredump_segment_record {
+ __le16 component_id;
+ __le16 segment_id;
+ __le16 max_instances;
+ u8 version_hi;
+ u8 version_low;
+ u8 seg_flags;
+ u8 compress_flags;
+ #define SFLAG_COMPRESSED_ZLIB 0x1UL
+ u8 unused_0[2];
+ __le32 segment_len;
+};
+
+/* hwrm_dbg_coredump_list_input (size:256b/32B) */
+struct hwrm_dbg_coredump_list_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 host_buf_len;
+ __le16 seq_no;
+ u8 flags;
+ #define DBG_COREDUMP_LIST_REQ_FLAGS_CRASHDUMP 0x1UL
+ u8 unused_0[1];
+};
+
+/* hwrm_dbg_coredump_list_output (size:128b/16B) */
+struct hwrm_dbg_coredump_list_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define DBG_COREDUMP_LIST_RESP_FLAGS_MORE 0x1UL
+ u8 unused_0;
+ __le16 total_segments;
+ __le16 data_len;
+ u8 unused_1;
+ u8 valid;
+};
+
+/* hwrm_dbg_coredump_initiate_input (size:256b/32B) */
+struct hwrm_dbg_coredump_initiate_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 component_id;
+ __le16 segment_id;
+ __le16 instance;
+ __le16 unused_0;
+ u8 seg_flags;
+ #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_LIVE_DATA 0x1UL
+ #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_CRASH_DATA 0x2UL
+ #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_COLLECT_CTX_L1_CACHE 0x4UL
+ u8 unused_1[7];
+};
+
+/* hwrm_dbg_coredump_initiate_output (size:128b/16B) */
+struct hwrm_dbg_coredump_initiate_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* coredump_data_hdr (size:128b/16B) */
+struct coredump_data_hdr {
+ __le32 address;
+ __le32 flags_length;
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_MASK 0xffffffUL
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_SFT 0
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_INDIRECT_ACCESS 0x1000000UL
+ __le32 instance;
+ __le32 next_offset;
+};
+
+/* hwrm_dbg_coredump_retrieve_input (size:448b/56B) */
+struct hwrm_dbg_coredump_retrieve_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 host_buf_len;
+ __le32 unused_0;
+ __le16 component_id;
+ __le16 segment_id;
+ __le16 instance;
+ __le16 unused_1;
+ u8 seg_flags;
+ u8 unused_2;
+ __le16 unused_3;
+ __le32 unused_4;
+ __le32 seq_no;
+ __le32 unused_5;
+};
+
+/* hwrm_dbg_coredump_retrieve_output (size:128b/16B) */
+struct hwrm_dbg_coredump_retrieve_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define DBG_COREDUMP_RETRIEVE_RESP_FLAGS_MORE 0x1UL
+ u8 unused_0;
+ __le16 data_len;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_dbg_ring_info_get_input (size:192b/24B) */
+struct hwrm_dbg_ring_info_get_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 ring_type;
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL 0x0UL
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_TX 0x1UL
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_RX 0x2UL
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_NQ 0x3UL
+ #define DBG_RING_INFO_GET_REQ_RING_TYPE_LAST DBG_RING_INFO_GET_REQ_RING_TYPE_NQ
+ u8 unused_0[3];
+ __le32 fw_ring_id;
+};
+
+/* hwrm_dbg_ring_info_get_output (size:192b/24B) */
+struct hwrm_dbg_ring_info_get_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 producer_index;
+ __le32 consumer_index;
+ __le32 cag_vector_ctrl;
+ __le16 st_tag;
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_dbg_log_buffer_flush_input (size:192b/24B) */
+struct hwrm_dbg_log_buffer_flush_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT_TRACE 0x0UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT2_TRACE 0x1UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT_TRACE 0x2UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT2_TRACE 0x3UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP0_TRACE 0x4UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_L2_HWRM_TRACE 0x5UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ROCE_HWRM_TRACE 0x6UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA0_TRACE 0x7UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA1_TRACE 0x8UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA2_TRACE 0x9UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP1_TRACE 0xaUL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_AFM_KONG_HWRM_TRACE 0xbUL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ERR_QPC_TRACE 0xcUL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_LAST DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ERR_QPC_TRACE
+ u8 unused_1[2];
+ __le32 flags;
+ #define DBG_LOG_BUFFER_FLUSH_REQ_FLAGS_FLUSH_ALL_BUFFERS 0x1UL
+};
+
+/* hwrm_dbg_log_buffer_flush_output (size:128b/16B) */
+struct hwrm_dbg_log_buffer_flush_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 current_buffer_offset;
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_nvm_read_input (size:320b/40B) */
+struct hwrm_nvm_read_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le16 dir_idx;
+ u8 unused_0[2];
+ __le32 offset;
+ __le32 len;
+ u8 unused_1[4];
+};
+
+/* hwrm_nvm_read_output (size:128b/16B) */
+struct hwrm_nvm_read_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dir_entries_input (size:192b/24B) */
+struct hwrm_nvm_get_dir_entries_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+};
+
+/* hwrm_nvm_get_dir_entries_output (size:128b/16B) */
+struct hwrm_nvm_get_dir_entries_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dir_info_input (size:128b/16B) */
+struct hwrm_nvm_get_dir_info_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_nvm_get_dir_info_output (size:192b/24B) */
+struct hwrm_nvm_get_dir_info_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 entries;
+ __le32 entry_length;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_write_input (size:448b/56B) */
+struct hwrm_nvm_write_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ __le16 dir_attr;
+ __le32 dir_data_length;
+ __le16 option;
+ __le16 flags;
+ #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL
+ #define NVM_WRITE_REQ_FLAGS_BATCH_MODE 0x2UL
+ #define NVM_WRITE_REQ_FLAGS_BATCH_LAST 0x4UL
+ #define NVM_WRITE_REQ_FLAGS_SKIP_CRID_CHECK 0x8UL
+ __le32 dir_item_length;
+ __le32 offset;
+ __le32 len;
+ __le32 unused_0;
+};
+
+/* hwrm_nvm_write_output (size:128b/16B) */
+struct hwrm_nvm_write_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 dir_item_length;
+ __le16 dir_idx;
+ u8 unused_0;
+ u8 valid;
+};
+
+/* hwrm_nvm_write_cmd_err (size:64b/8B) */
+struct hwrm_nvm_write_cmd_err {
+ u8 code;
+ #define NVM_WRITE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+ #define NVM_WRITE_CMD_ERR_CODE_NO_SPACE 0x2UL
+ #define NVM_WRITE_CMD_ERR_CODE_WRITE_FAILED 0x3UL
+ #define NVM_WRITE_CMD_ERR_CODE_REQD_ERASE_FAILED 0x4UL
+ #define NVM_WRITE_CMD_ERR_CODE_VERIFY_FAILED 0x5UL
+ #define NVM_WRITE_CMD_ERR_CODE_INVALID_HEADER 0x6UL
+ #define NVM_WRITE_CMD_ERR_CODE_UPDATE_DIGEST_FAILED 0x7UL
+ #define NVM_WRITE_CMD_ERR_CODE_LAST NVM_WRITE_CMD_ERR_CODE_UPDATE_DIGEST_FAILED
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_modify_input (size:320b/40B) */
+struct hwrm_nvm_modify_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_src_addr;
+ __le16 dir_idx;
+ __le16 flags;
+ #define NVM_MODIFY_REQ_FLAGS_BATCH_MODE 0x1UL
+ #define NVM_MODIFY_REQ_FLAGS_BATCH_LAST 0x2UL
+ __le32 offset;
+ __le32 len;
+ u8 unused_1[4];
+};
+
+/* hwrm_nvm_modify_output (size:128b/16B) */
+struct hwrm_nvm_modify_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_find_dir_entry_input (size:256b/32B) */
+struct hwrm_nvm_find_dir_entry_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID 0x1UL
+ __le16 dir_idx;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ u8 opt_ordinal;
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ 0x0UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE 0x1UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT 0x2UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_LAST NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT
+ u8 unused_0[3];
+};
+
+/* hwrm_nvm_find_dir_entry_output (size:256b/32B) */
+struct hwrm_nvm_find_dir_entry_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 dir_item_length;
+ __le32 dir_data_length;
+ __le32 fw_ver;
+ __le16 dir_ordinal;
+ __le16 dir_idx;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_erase_dir_entry_input (size:192b/24B) */
+struct hwrm_nvm_erase_dir_entry_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 dir_idx;
+ u8 unused_0[6];
+};
+
+/* hwrm_nvm_erase_dir_entry_output (size:128b/16B) */
+struct hwrm_nvm_erase_dir_entry_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_dev_info_input (size:192b/24B) */
+struct hwrm_nvm_get_dev_info_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 flags;
+ #define NVM_GET_DEV_INFO_REQ_FLAGS_SECURITY_SOC_NVM 0x1UL
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_get_dev_info_output (size:768b/96B) */
+struct hwrm_nvm_get_dev_info_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 manufacturer_id;
+ __le16 device_id;
+ __le32 sector_size;
+ __le32 nvram_size;
+ __le32 reserved_size;
+ __le32 available_size;
+ u8 nvm_cfg_ver_maj;
+ u8 nvm_cfg_ver_min;
+ u8 nvm_cfg_ver_upd;
+ u8 flags;
+ #define NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID 0x1UL
+ char pkg_name[16];
+ __le16 hwrm_fw_major;
+ __le16 hwrm_fw_minor;
+ __le16 hwrm_fw_build;
+ __le16 hwrm_fw_patch;
+ __le16 mgmt_fw_major;
+ __le16 mgmt_fw_minor;
+ __le16 mgmt_fw_build;
+ __le16 mgmt_fw_patch;
+ __le16 roce_fw_major;
+ __le16 roce_fw_minor;
+ __le16 roce_fw_build;
+ __le16 roce_fw_patch;
+ __le16 netctrl_fw_major;
+ __le16 netctrl_fw_minor;
+ __le16 netctrl_fw_build;
+ __le16 netctrl_fw_patch;
+ __le16 srt2_fw_major;
+ __le16 srt2_fw_minor;
+ __le16 srt2_fw_build;
+ __le16 srt2_fw_patch;
+ u8 security_soc_fw_major;
+ u8 security_soc_fw_minor;
+ u8 security_soc_fw_build;
+ u8 security_soc_fw_patch;
+ u8 unused_0[3];
+ u8 valid;
+};
+
+/* hwrm_nvm_mod_dir_entry_input (size:256b/32B) */
+struct hwrm_nvm_mod_dir_entry_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM 0x1UL
+ __le16 dir_idx;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ __le16 dir_attr;
+ __le32 checksum;
+};
+
+/* hwrm_nvm_mod_dir_entry_output (size:128b/16B) */
+struct hwrm_nvm_mod_dir_entry_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_verify_update_input (size:192b/24B) */
+struct hwrm_nvm_verify_update_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 dir_type;
+ __le16 dir_ordinal;
+ __le16 dir_ext;
+ u8 unused_0[2];
+};
+
+/* hwrm_nvm_verify_update_output (size:128b/16B) */
+struct hwrm_nvm_verify_update_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_install_update_input (size:192b/24B) */
+struct hwrm_nvm_install_update_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 install_type;
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_LAST NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL
+ __le16 flags;
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL
+ #define NVM_INSTALL_UPDATE_REQ_FLAGS_VERIFY_ONLY 0x8UL
+ u8 unused_0[2];
+};
+
+/* hwrm_nvm_install_update_output (size:192b/24B) */
+struct hwrm_nvm_install_update_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 installed_items;
+ u8 result;
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_FAILURE 0xffUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_MALLOC_FAILURE 0xfdUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER 0xfbUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER 0xf3UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE 0xf2UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER 0xecUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE 0xebUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM 0xeaUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH 0xe9UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST 0xe8UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER 0xe7UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM 0xe6UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM 0xe5UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH 0xe4UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE 0xe1UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV 0xceUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID 0xcdUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR 0xccUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID 0xcbUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM 0xc5UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM 0xc4UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM 0xc3UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR 0xb9UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR 0xb8UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR 0xb7UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND 0xb0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED 0xa7UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_LAST NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED
+ u8 problem_item;
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_LAST NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE
+ u8 reset_required;
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_NONE 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_PCI 0x1UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER 0x2UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_LAST NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */
+struct hwrm_nvm_install_update_cmd_err {
+ u8 code;
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK 0x3UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_VOLTREG_SUPPORT 0x4UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_DEFRAG_FAILED 0x5UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN_DIR_ERR 0x6UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN_DIR_ERR
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_get_variable_input (size:320b/40B) */
+struct hwrm_nvm_get_variable_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 dest_data_addr;
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
+ #define NVM_GET_VARIABLE_REQ_OPTION_NUM_LAST NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF
+ __le16 dimensions;
+ __le16 index_0;
+ __le16 index_1;
+ __le16 index_2;
+ __le16 index_3;
+ u8 flags;
+ #define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT 0x1UL
+ #define NVM_GET_VARIABLE_REQ_FLAGS_VALIDATE_OPT_VALUE 0x2UL
+ u8 unused_0;
+};
+
+/* hwrm_nvm_get_variable_output (size:128b/16B) */
+struct hwrm_nvm_get_variable_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF 0xffffUL
+ #define NVM_GET_VARIABLE_RESP_OPTION_NUM_LAST NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF
+ u8 flags;
+ #define NVM_GET_VARIABLE_RESP_FLAGS_VALIDATE_OPT_VALUE 0x1UL
+ u8 unused_0[2];
+ u8 valid;
+};
+
+/* hwrm_nvm_get_variable_cmd_err (size:64b/8B) */
+struct hwrm_nvm_get_variable_cmd_err {
+ u8 code;
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_INDEX_INVALID 0x4UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_ACCESS_DENIED 0x5UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_CB_FAILED 0x6UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_INVALID_DATA_LEN 0x7UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_NO_MEM 0x8UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_LAST NVM_GET_VARIABLE_CMD_ERR_CODE_NO_MEM
+ u8 unused_0[7];
+};
+
+/* hwrm_nvm_set_variable_input (size:320b/40B) */
+struct hwrm_nvm_set_variable_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le16 data_len;
+ __le16 option_num;
+ #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL
+ #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL
+ #define NVM_SET_VARIABLE_REQ_OPTION_NUM_LAST NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF
+ __le16 dimensions;
+ __le16 index_0;
+ __le16 index_1;
+ __le16 index_2;
+ __le16 index_3;
+ u8 flags;
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_AES256 (0x2UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH (0x3UL << 1)
+ #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_MASK 0x70UL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_SFT 4
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FACTORY_DEFAULT 0x80UL
+ u8 unused_0;
+};
+
+/* hwrm_nvm_set_variable_output (size:128b/16B) */
+struct hwrm_nvm_set_variable_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_nvm_set_variable_cmd_err (size:64b/8B) */
+struct hwrm_nvm_set_variable_cmd_err {
+ u8 code;
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_ACTION_NOT_SUPPORTED 0x4UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_INDEX_INVALID 0x5UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_ACCESS_DENIED 0x6UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_CB_FAILED 0x7UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_INVALID_DATA_LEN 0x8UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_NO_MEM 0x9UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_LAST NVM_SET_VARIABLE_CMD_ERR_CODE_NO_MEM
+ u8 unused_0[7];
+};
+
+/* hwrm_selftest_qlist_input (size:128b/16B) */
+struct hwrm_selftest_qlist_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_selftest_qlist_output (size:2240b/280B) */
+struct hwrm_selftest_qlist_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_tests;
+ u8 available_tests;
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_SERDES_TEST 0x20UL
+ u8 offline_tests;
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_SERDES_TEST 0x20UL
+ u8 unused_0;
+ __le16 test_timeout;
+ u8 unused_1[2];
+ char test_name[8][32];
+ u8 eyescope_target_BER_support;
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E8_SUPPORTED 0x0UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E9_SUPPORTED 0x1UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E10_SUPPORTED 0x2UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E11_SUPPORTED 0x3UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED 0x4UL
+ #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_LAST SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED
+ u8 unused_2[6];
+ u8 valid;
+};
+
+/* hwrm_selftest_exec_input (size:192b/24B) */
+struct hwrm_selftest_exec_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 flags;
+ #define SELFTEST_EXEC_REQ_FLAGS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL
+ #define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL
+ u8 unused_0[7];
+};
+
+/* hwrm_selftest_exec_output (size:128b/16B) */
+struct hwrm_selftest_exec_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 requested_tests;
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_SERDES_TEST 0x20UL
+ u8 test_success;
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_SERDES_TEST 0x10UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_SERDES_TEST 0x20UL
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_selftest_irq_input (size:128b/16B) */
+struct hwrm_selftest_irq_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_selftest_irq_output (size:128b/16B) */
+struct hwrm_selftest_irq_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* dbc_dbc (size:64b/8B) */
+struct dbc_dbc {
+ __le32 index;
+ #define DBC_DBC_INDEX_MASK 0xffffffUL
+ #define DBC_DBC_INDEX_SFT 0
+ #define DBC_DBC_EPOCH 0x1000000UL
+ #define DBC_DBC_TOGGLE_MASK 0x6000000UL
+ #define DBC_DBC_TOGGLE_SFT 25
+ __le32 type_path_xid;
+ #define DBC_DBC_XID_MASK 0xfffffUL
+ #define DBC_DBC_XID_SFT 0
+ #define DBC_DBC_PATH_MASK 0x3000000UL
+ #define DBC_DBC_PATH_SFT 24
+ #define DBC_DBC_PATH_ROCE (0x0UL << 24)
+ #define DBC_DBC_PATH_L2 (0x1UL << 24)
+ #define DBC_DBC_PATH_ENGINE (0x2UL << 24)
+ #define DBC_DBC_PATH_LAST DBC_DBC_PATH_ENGINE
+ #define DBC_DBC_VALID 0x4000000UL
+ #define DBC_DBC_DEBUG_TRACE 0x8000000UL
+ #define DBC_DBC_TYPE_MASK 0xf0000000UL
+ #define DBC_DBC_TYPE_SFT 28
+ #define DBC_DBC_TYPE_SQ (0x0UL << 28)
+ #define DBC_DBC_TYPE_RQ (0x1UL << 28)
+ #define DBC_DBC_TYPE_SRQ (0x2UL << 28)
+ #define DBC_DBC_TYPE_SRQ_ARM (0x3UL << 28)
+ #define DBC_DBC_TYPE_CQ (0x4UL << 28)
+ #define DBC_DBC_TYPE_CQ_ARMSE (0x5UL << 28)
+ #define DBC_DBC_TYPE_CQ_ARMALL (0x6UL << 28)
+ #define DBC_DBC_TYPE_CQ_ARMENA (0x7UL << 28)
+ #define DBC_DBC_TYPE_SRQ_ARMENA (0x8UL << 28)
+ #define DBC_DBC_TYPE_CQ_CUTOFF_ACK (0x9UL << 28)
+ #define DBC_DBC_TYPE_NQ (0xaUL << 28)
+ #define DBC_DBC_TYPE_NQ_ARM (0xbUL << 28)
+ #define DBC_DBC_TYPE_NQ_MASK (0xeUL << 28)
+ #define DBC_DBC_TYPE_NULL (0xfUL << 28)
+ #define DBC_DBC_TYPE_LAST DBC_DBC_TYPE_NULL
+};
+
+/* db_push_start (size:64b/8B) */
+struct db_push_start {
+ u64 db;
+ #define DB_PUSH_START_DB_INDEX_MASK 0xffffffUL
+ #define DB_PUSH_START_DB_INDEX_SFT 0
+ #define DB_PUSH_START_DB_PI_LO_MASK 0xff000000UL
+ #define DB_PUSH_START_DB_PI_LO_SFT 24
+ #define DB_PUSH_START_DB_XID_MASK 0xfffff00000000ULL
+ #define DB_PUSH_START_DB_XID_SFT 32
+ #define DB_PUSH_START_DB_PI_HI_MASK 0xf0000000000000ULL
+ #define DB_PUSH_START_DB_PI_HI_SFT 52
+ #define DB_PUSH_START_DB_TYPE_MASK 0xf000000000000000ULL
+ #define DB_PUSH_START_DB_TYPE_SFT 60
+ #define DB_PUSH_START_DB_TYPE_PUSH_START (0xcULL << 60)
+ #define DB_PUSH_START_DB_TYPE_PUSH_END (0xdULL << 60)
+ #define DB_PUSH_START_DB_TYPE_LAST DB_PUSH_START_DB_TYPE_PUSH_END
+};
+
+/* db_push_end (size:64b/8B) */
+struct db_push_end {
+ u64 db;
+ #define DB_PUSH_END_DB_INDEX_MASK 0xffffffUL
+ #define DB_PUSH_END_DB_INDEX_SFT 0
+ #define DB_PUSH_END_DB_PI_LO_MASK 0xff000000UL
+ #define DB_PUSH_END_DB_PI_LO_SFT 24
+ #define DB_PUSH_END_DB_XID_MASK 0xfffff00000000ULL
+ #define DB_PUSH_END_DB_XID_SFT 32
+ #define DB_PUSH_END_DB_PI_HI_MASK 0xf0000000000000ULL
+ #define DB_PUSH_END_DB_PI_HI_SFT 52
+ #define DB_PUSH_END_DB_PATH_MASK 0x300000000000000ULL
+ #define DB_PUSH_END_DB_PATH_SFT 56
+ #define DB_PUSH_END_DB_PATH_ROCE (0x0ULL << 56)
+ #define DB_PUSH_END_DB_PATH_L2 (0x1ULL << 56)
+ #define DB_PUSH_END_DB_PATH_ENGINE (0x2ULL << 56)
+ #define DB_PUSH_END_DB_PATH_LAST DB_PUSH_END_DB_PATH_ENGINE
+ #define DB_PUSH_END_DB_DEBUG_TRACE 0x800000000000000ULL
+ #define DB_PUSH_END_DB_TYPE_MASK 0xf000000000000000ULL
+ #define DB_PUSH_END_DB_TYPE_SFT 60
+ #define DB_PUSH_END_DB_TYPE_PUSH_START (0xcULL << 60)
+ #define DB_PUSH_END_DB_TYPE_PUSH_END (0xdULL << 60)
+ #define DB_PUSH_END_DB_TYPE_LAST DB_PUSH_END_DB_TYPE_PUSH_END
+};
+
+/* db_push_info (size:64b/8B) */
+struct db_push_info {
+ u32 push_size_push_index;
+ #define DB_PUSH_INFO_PUSH_INDEX_MASK 0xffffffUL
+ #define DB_PUSH_INFO_PUSH_INDEX_SFT 0
+ #define DB_PUSH_INFO_PUSH_SIZE_MASK 0x1f000000UL
+ #define DB_PUSH_INFO_PUSH_SIZE_SFT 24
+ u32 reserved32;
+};
+
+/* fw_status_reg (size:32b/4B) */
+struct fw_status_reg {
+ u32 fw_status;
+ #define FW_STATUS_REG_CODE_MASK 0xffffUL
+ #define FW_STATUS_REG_CODE_SFT 0
+ #define FW_STATUS_REG_CODE_READY 0x8000UL
+ #define FW_STATUS_REG_CODE_LAST FW_STATUS_REG_CODE_READY
+ #define FW_STATUS_REG_IMAGE_DEGRADED 0x10000UL
+ #define FW_STATUS_REG_RECOVERABLE 0x20000UL
+ #define FW_STATUS_REG_CRASHDUMP_ONGOING 0x40000UL
+ #define FW_STATUS_REG_CRASHDUMP_COMPLETE 0x80000UL
+ #define FW_STATUS_REG_SHUTDOWN 0x100000UL
+ #define FW_STATUS_REG_CRASHED_NO_MASTER 0x200000UL
+ #define FW_STATUS_REG_RECOVERING 0x400000UL
+ #define FW_STATUS_REG_MANU_DEBUG_STATUS 0x800000UL
+};
+
+/* hcomm_status (size:64b/8B) */
+struct hcomm_status {
+ u32 sig_ver;
+ #define HCOMM_STATUS_VER_MASK 0xffUL
+ #define HCOMM_STATUS_VER_SFT 0
+ #define HCOMM_STATUS_VER_LATEST 0x1UL
+ #define HCOMM_STATUS_VER_LAST HCOMM_STATUS_VER_LATEST
+ #define HCOMM_STATUS_SIGNATURE_MASK 0xffffff00UL
+ #define HCOMM_STATUS_SIGNATURE_SFT 8
+ #define HCOMM_STATUS_SIGNATURE_VAL (0x484353UL << 8)
+ #define HCOMM_STATUS_SIGNATURE_LAST HCOMM_STATUS_SIGNATURE_VAL
+ u32 fw_status_loc;
+ #define HCOMM_STATUS_TRUE_ADDR_SPACE_MASK 0x3UL
+ #define HCOMM_STATUS_TRUE_ADDR_SPACE_SFT 0
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_GRC 0x1UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR0 0x2UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR1 0x3UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_LAST HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR1
+ #define HCOMM_STATUS_TRUE_OFFSET_MASK 0xfffffffcUL
+ #define HCOMM_STATUS_TRUE_OFFSET_SFT 2
+};
+#define HCOMM_STATUS_STRUCT_LOC 0x31001F0UL
+
+#endif /* _BNXT_HSI_H_ */
diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h
index 2696eb0fc149..25df9260d206 100644
--- a/include/linux/bootconfig.h
+++ b/include/linux/bootconfig.h
@@ -7,8 +7,19 @@
* Author: Masami Hiramatsu <mhiramat@kernel.org>
*/
+#ifdef __KERNEL__
#include <linux/kernel.h>
#include <linux/types.h>
+bool __init cmdline_has_extra_options(void);
+#else /* !__KERNEL__ */
+/*
+ * NOTE: This is only for tools/bootconfig, because tools/bootconfig will
+ * run the parser sanity test.
+ * This does NOT mean linux/bootconfig.h is available in the user space.
+ * However, if you change this file, please make sure the tools/bootconfig
+ * has no issue on building and running.
+ */
+#endif
#define BOOTCONFIG_MAGIC "#BOOTCONFIG\n"
#define BOOTCONFIG_MAGIC_LEN 12
@@ -16,12 +27,32 @@
#define BOOTCONFIG_ALIGN (1 << BOOTCONFIG_ALIGN_SHIFT)
#define BOOTCONFIG_ALIGN_MASK (BOOTCONFIG_ALIGN - 1)
+/**
+ * xbc_calc_checksum() - Calculate checksum of bootconfig
+ * @data: Bootconfig data.
+ * @size: The size of the bootconfig data.
+ *
+ * Calculate the checksum value of the bootconfig data.
+ * The checksum will be used with the BOOTCONFIG_MAGIC and the size for
+ * embedding the bootconfig in the initrd image.
+ */
+static inline __init uint32_t xbc_calc_checksum(void *data, uint32_t size)
+{
+ unsigned char *p = data;
+ uint32_t ret = 0;
+
+ while (size--)
+ ret += *p++;
+
+ return ret;
+}
+
/* XBC tree node */
struct xbc_node {
- u16 next;
- u16 child;
- u16 parent;
- u16 data;
+ uint16_t next;
+ uint16_t child;
+ uint16_t parent;
+ uint16_t data;
} __attribute__ ((__packed__));
#define XBC_KEY 0
@@ -29,7 +60,7 @@ struct xbc_node {
/* Maximum size of boot config is 32KB - 1 */
#define XBC_DATA_MAX (XBC_VALUE - 1)
-#define XBC_NODE_MAX 1024
+#define XBC_NODE_MAX 8192
#define XBC_KEYLEN_MAX 256
#define XBC_DEPTH_MAX 16
@@ -71,7 +102,7 @@ static inline __init bool xbc_node_is_key(struct xbc_node *node)
*/
static inline __init bool xbc_node_is_array(struct xbc_node *node)
{
- return xbc_node_is_value(node) && node->next != 0;
+ return xbc_node_is_value(node) && node->child != 0;
}
/**
@@ -80,6 +111,8 @@ static inline __init bool xbc_node_is_array(struct xbc_node *node)
*
* Test the @node is a leaf key node which is a key node and has a value node
* or no child. Returns true if it is a leaf node, or false if not.
+ * Note that the leaf node can have subkey nodes in addition to the
+ * value node.
*/
static inline __init bool xbc_node_is_leaf(struct xbc_node *node)
{
@@ -88,7 +121,7 @@ static inline __init bool xbc_node_is_leaf(struct xbc_node *node)
}
/* Tree-based key-value access APIs */
-struct xbc_node * __init xbc_node_find_child(struct xbc_node *parent,
+struct xbc_node * __init xbc_node_find_subkey(struct xbc_node *parent,
const char *key);
const char * __init xbc_node_find_value(struct xbc_node *parent,
@@ -126,7 +159,24 @@ xbc_find_value(const char *key, struct xbc_node **vnode)
*/
static inline struct xbc_node * __init xbc_find_node(const char *key)
{
- return xbc_node_find_child(NULL, key);
+ return xbc_node_find_subkey(NULL, key);
+}
+
+/**
+ * xbc_node_get_subkey() - Return the first subkey node if exists
+ * @node: Parent node
+ *
+ * Return the first subkey node of the @node. If the @node has no child
+ * or only value node, this will return NULL.
+ */
+static inline struct xbc_node * __init xbc_node_get_subkey(struct xbc_node *node)
+{
+ struct xbc_node *child = xbc_node_get_child(node);
+
+ if (child && xbc_node_is_value(child))
+ return xbc_node_get_next(child);
+ else
+ return child;
}
/**
@@ -140,7 +190,7 @@ static inline struct xbc_node * __init xbc_find_node(const char *key)
*/
#define xbc_array_for_each_value(anode, value) \
for (value = xbc_node_get_data(anode); anode != NULL ; \
- anode = xbc_node_get_next(anode), \
+ anode = xbc_node_get_child(anode), \
value = anode ? xbc_node_get_data(anode) : NULL)
/**
@@ -149,12 +199,25 @@ static inline struct xbc_node * __init xbc_find_node(const char *key)
* @child: Iterated XBC node.
*
* Iterate child nodes of @parent. Each child nodes are stored to @child.
+ * The @child can be mixture of a value node and subkey nodes.
*/
#define xbc_node_for_each_child(parent, child) \
for (child = xbc_node_get_child(parent); child != NULL ; \
child = xbc_node_get_next(child))
/**
+ * xbc_node_for_each_subkey() - Iterate child subkey nodes
+ * @parent: An XBC node.
+ * @child: Iterated XBC node.
+ *
+ * Iterate subkey nodes of @parent. Each child nodes are stored to @child.
+ * The @child is only the subkey node.
+ */
+#define xbc_node_for_each_subkey(parent, child) \
+ for (child = xbc_node_get_subkey(parent); child != NULL ; \
+ child = xbc_node_get_next(child))
+
+/**
* xbc_node_for_each_array_value() - Iterate array entries of geven key
* @node: An XBC node.
* @key: A key string searched under @node
@@ -162,16 +225,16 @@ static inline struct xbc_node * __init xbc_find_node(const char *key)
* @value: Iterated value of array entry.
*
* Iterate array entries of given @key under @node. Each array entry node
- * is stroed to @anode and @value. If the @node doesn't have @key node,
+ * is stored to @anode and @value. If the @node doesn't have @key node,
* it does nothing.
* Note that even if the found key node has only one value (not array)
- * this executes block once. Hoever, if the found key node has no value
+ * this executes block once. However, if the found key node has no value
* (key-only node), this does nothing. So don't use this for testing the
* key-value pair existence.
*/
#define xbc_node_for_each_array_value(node, key, anode, value) \
for (value = xbc_node_find_value(node, key, &anode); value != NULL; \
- anode = xbc_node_get_next(anode), \
+ anode = xbc_node_get_child(anode), \
value = anode ? xbc_node_get_data(anode) : NULL)
/**
@@ -219,13 +282,27 @@ static inline int __init xbc_node_compose_key(struct xbc_node *node,
}
/* XBC node initializer */
-int __init xbc_init(char *buf, const char **emsg, int *epos);
+int __init xbc_init(const char *buf, size_t size, const char **emsg, int *epos);
+/* XBC node and size information */
+int __init xbc_get_info(int *node_size, size_t *data_size);
/* XBC cleanup data structures */
-void __init xbc_destroy_all(void);
+void __init _xbc_exit(bool early);
+
+static __always_inline void xbc_exit(void)
+{
+ _xbc_exit(false);
+}
-/* Debug dump functions */
-void __init xbc_debug_dump(void);
+/* XBC embedded bootconfig data in kernel */
+#ifdef CONFIG_BOOT_CONFIG_EMBED
+const char * __init xbc_get_embedded_bootconfig(size_t *size);
+#else
+static inline const char *xbc_get_embedded_bootconfig(size_t *size)
+{
+ return NULL;
+}
+#endif
#endif
diff --git a/include/linux/bootmem_info.h b/include/linux/bootmem_info.h
new file mode 100644
index 000000000000..4c506e76a808
--- /dev/null
+++ b/include/linux/bootmem_info.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_BOOTMEM_INFO_H
+#define __LINUX_BOOTMEM_INFO_H
+
+#include <linux/mm.h>
+#include <linux/kmemleak.h>
+
+/*
+ * Types for free bootmem stored in the low bits of page->private.
+ */
+enum bootmem_type {
+ MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 1,
+ SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE,
+ MIX_SECTION_INFO,
+ NODE_INFO,
+ MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO,
+};
+
+#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
+void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
+void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
+ unsigned long nr_pages);
+
+void get_page_bootmem(unsigned long info, struct page *page,
+ enum bootmem_type type);
+void put_page_bootmem(struct page *page);
+
+static inline enum bootmem_type bootmem_type(const struct page *page)
+{
+ return (unsigned long)page->private & 0xf;
+}
+
+static inline unsigned long bootmem_info(const struct page *page)
+{
+ return (unsigned long)page->private >> 4;
+}
+
+/*
+ * Any memory allocated via the memblock allocator and not via the
+ * buddy will be marked reserved already in the memmap. For those
+ * pages, we can call this function to free it to buddy allocator.
+ */
+static inline void free_bootmem_page(struct page *page)
+{
+ enum bootmem_type type = bootmem_type(page);
+
+ /*
+ * The reserve_bootmem_region sets the reserved flag on bootmem
+ * pages.
+ */
+ VM_BUG_ON_PAGE(page_ref_count(page) != 2, page);
+
+ if (type == SECTION_INFO || type == MIX_SECTION_INFO)
+ put_page_bootmem(page);
+ else
+ VM_BUG_ON_PAGE(1, page);
+}
+#else
+static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
+{
+}
+
+static inline void register_page_bootmem_memmap(unsigned long section_nr,
+ struct page *map, unsigned long nr_pages)
+{
+}
+
+static inline void put_page_bootmem(struct page *page)
+{
+}
+
+static inline enum bootmem_type bootmem_type(const struct page *page)
+{
+ return SECTION_INFO;
+}
+
+static inline unsigned long bootmem_info(const struct page *page)
+{
+ return 0;
+}
+
+static inline void get_page_bootmem(unsigned long info, struct page *page,
+ enum bootmem_type type)
+{
+}
+
+static inline void free_bootmem_page(struct page *page)
+{
+ kmemleak_free_part_phys(PFN_PHYS(page_to_pfn(page)), PAGE_SIZE);
+ free_reserved_page(page);
+}
+#endif
+
+#endif /* __LINUX_BOOTMEM_INFO_H */
diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
index eed86eb0a1de..fc53e0ad56d9 100644
--- a/include/linux/bottom_half.h
+++ b/include/linux/bottom_half.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_BH_H
#define _LINUX_BH_H
+#include <linux/instruction_pointer.h>
#include <linux/preempt.h>
#if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_TRACE_IRQFLAGS)
diff --git a/include/linux/bpf-cgroup-defs.h b/include/linux/bpf-cgroup-defs.h
new file mode 100644
index 000000000000..c9e6b26abab6
--- /dev/null
+++ b/include/linux/bpf-cgroup-defs.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BPF_CGROUP_DEFS_H
+#define _BPF_CGROUP_DEFS_H
+
+#ifdef CONFIG_CGROUP_BPF
+
+#include <linux/list.h>
+#include <linux/percpu-refcount.h>
+#include <linux/workqueue.h>
+
+struct bpf_prog_array;
+
+#ifdef CONFIG_BPF_LSM
+/* Maximum number of concurrently attachable per-cgroup LSM hooks. */
+#define CGROUP_LSM_NUM 10
+#else
+#define CGROUP_LSM_NUM 0
+#endif
+
+enum cgroup_bpf_attach_type {
+ CGROUP_BPF_ATTACH_TYPE_INVALID = -1,
+ CGROUP_INET_INGRESS = 0,
+ CGROUP_INET_EGRESS,
+ CGROUP_INET_SOCK_CREATE,
+ CGROUP_SOCK_OPS,
+ CGROUP_DEVICE,
+ CGROUP_INET4_BIND,
+ CGROUP_INET6_BIND,
+ CGROUP_INET4_CONNECT,
+ CGROUP_INET6_CONNECT,
+ CGROUP_UNIX_CONNECT,
+ CGROUP_INET4_POST_BIND,
+ CGROUP_INET6_POST_BIND,
+ CGROUP_UDP4_SENDMSG,
+ CGROUP_UDP6_SENDMSG,
+ CGROUP_UNIX_SENDMSG,
+ CGROUP_SYSCTL,
+ CGROUP_UDP4_RECVMSG,
+ CGROUP_UDP6_RECVMSG,
+ CGROUP_UNIX_RECVMSG,
+ CGROUP_GETSOCKOPT,
+ CGROUP_SETSOCKOPT,
+ CGROUP_INET4_GETPEERNAME,
+ CGROUP_INET6_GETPEERNAME,
+ CGROUP_UNIX_GETPEERNAME,
+ CGROUP_INET4_GETSOCKNAME,
+ CGROUP_INET6_GETSOCKNAME,
+ CGROUP_UNIX_GETSOCKNAME,
+ CGROUP_INET_SOCK_RELEASE,
+ CGROUP_LSM_START,
+ CGROUP_LSM_END = CGROUP_LSM_START + CGROUP_LSM_NUM - 1,
+ MAX_CGROUP_BPF_ATTACH_TYPE
+};
+
+struct cgroup_bpf {
+ /* array of effective progs in this cgroup */
+ struct bpf_prog_array __rcu *effective[MAX_CGROUP_BPF_ATTACH_TYPE];
+
+ /* attached progs to this cgroup and attach flags
+ * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
+ * have either zero or one element
+ * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
+ */
+ struct hlist_head progs[MAX_CGROUP_BPF_ATTACH_TYPE];
+ u8 flags[MAX_CGROUP_BPF_ATTACH_TYPE];
+ u64 revisions[MAX_CGROUP_BPF_ATTACH_TYPE];
+
+ /* list of cgroup shared storages */
+ struct list_head storages;
+
+ /* temp storage for effective prog array used by prog_attach/detach */
+ struct bpf_prog_array *inactive;
+
+ /* reference counter used to detach bpf programs after cgroup removal */
+ struct percpu_ref refcnt;
+
+ /* cgroup_bpf is released using a work queue */
+ struct work_struct release_work;
+};
+
+#else /* CONFIG_CGROUP_BPF */
+struct cgroup_bpf {};
+#endif /* CONFIG_CGROUP_BPF */
+
+#endif
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 8b77d08d4b47..d1eb5c7729cb 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -3,11 +3,12 @@
#define _BPF_CGROUP_H
#include <linux/bpf.h>
+#include <linux/bpf-cgroup-defs.h>
#include <linux/errno.h>
#include <linux/jump_label.h>
#include <linux/percpu.h>
-#include <linux/percpu-refcount.h>
#include <linux/rbtree.h>
+#include <net/sock.h>
#include <uapi/linux/bpf.h>
struct sock;
@@ -22,26 +23,59 @@ struct ctl_table;
struct ctl_table_header;
struct task_struct;
-#ifdef CONFIG_CGROUP_BPF
+unsigned int __cgroup_bpf_run_lsm_sock(const void *ctx,
+ const struct bpf_insn *insn);
+unsigned int __cgroup_bpf_run_lsm_socket(const void *ctx,
+ const struct bpf_insn *insn);
+unsigned int __cgroup_bpf_run_lsm_current(const void *ctx,
+ const struct bpf_insn *insn);
-extern struct static_key_false cgroup_bpf_enabled_key[MAX_BPF_ATTACH_TYPE];
-#define cgroup_bpf_enabled(type) static_branch_unlikely(&cgroup_bpf_enabled_key[type])
+#ifdef CONFIG_CGROUP_BPF
-#define BPF_CGROUP_STORAGE_NEST_MAX 8
+#define CGROUP_ATYPE(type) \
+ case BPF_##type: return type
-struct bpf_cgroup_storage_info {
- struct task_struct *task;
- struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
-};
+static inline enum cgroup_bpf_attach_type
+to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
+{
+ switch (attach_type) {
+ CGROUP_ATYPE(CGROUP_INET_INGRESS);
+ CGROUP_ATYPE(CGROUP_INET_EGRESS);
+ CGROUP_ATYPE(CGROUP_INET_SOCK_CREATE);
+ CGROUP_ATYPE(CGROUP_SOCK_OPS);
+ CGROUP_ATYPE(CGROUP_DEVICE);
+ CGROUP_ATYPE(CGROUP_INET4_BIND);
+ CGROUP_ATYPE(CGROUP_INET6_BIND);
+ CGROUP_ATYPE(CGROUP_INET4_CONNECT);
+ CGROUP_ATYPE(CGROUP_INET6_CONNECT);
+ CGROUP_ATYPE(CGROUP_UNIX_CONNECT);
+ CGROUP_ATYPE(CGROUP_INET4_POST_BIND);
+ CGROUP_ATYPE(CGROUP_INET6_POST_BIND);
+ CGROUP_ATYPE(CGROUP_UDP4_SENDMSG);
+ CGROUP_ATYPE(CGROUP_UDP6_SENDMSG);
+ CGROUP_ATYPE(CGROUP_UNIX_SENDMSG);
+ CGROUP_ATYPE(CGROUP_SYSCTL);
+ CGROUP_ATYPE(CGROUP_UDP4_RECVMSG);
+ CGROUP_ATYPE(CGROUP_UDP6_RECVMSG);
+ CGROUP_ATYPE(CGROUP_UNIX_RECVMSG);
+ CGROUP_ATYPE(CGROUP_GETSOCKOPT);
+ CGROUP_ATYPE(CGROUP_SETSOCKOPT);
+ CGROUP_ATYPE(CGROUP_INET4_GETPEERNAME);
+ CGROUP_ATYPE(CGROUP_INET6_GETPEERNAME);
+ CGROUP_ATYPE(CGROUP_UNIX_GETPEERNAME);
+ CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME);
+ CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME);
+ CGROUP_ATYPE(CGROUP_UNIX_GETSOCKNAME);
+ CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE);
+ default:
+ return CGROUP_BPF_ATTACH_TYPE_INVALID;
+ }
+}
-/* For each cpu, permit maximum BPF_CGROUP_STORAGE_NEST_MAX number of tasks
- * to use bpf cgroup storage simultaneously.
- */
-DECLARE_PER_CPU(struct bpf_cgroup_storage_info,
- bpf_cgroup_storage_info[BPF_CGROUP_STORAGE_NEST_MAX]);
+#undef CGROUP_ATYPE
-#define for_each_cgroup_storage_type(stype) \
- for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
+extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE];
+#define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype])
struct bpf_cgroup_storage_map;
@@ -66,97 +100,51 @@ struct bpf_cgroup_storage {
struct bpf_cgroup_link {
struct bpf_link link;
struct cgroup *cgroup;
- enum bpf_attach_type type;
};
struct bpf_prog_list {
- struct list_head node;
+ struct hlist_node node;
struct bpf_prog *prog;
struct bpf_cgroup_link *link;
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
+ u32 flags;
};
-struct bpf_prog_array;
-
-struct cgroup_bpf {
- /* array of effective progs in this cgroup */
- struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
-
- /* attached progs to this cgroup and attach flags
- * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
- * have either zero or one element
- * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
- */
- struct list_head progs[MAX_BPF_ATTACH_TYPE];
- u32 flags[MAX_BPF_ATTACH_TYPE];
-
- /* list of cgroup shared storages */
- struct list_head storages;
-
- /* temp storage for effective prog array used by prog_attach/detach */
- struct bpf_prog_array *inactive;
-
- /* reference counter used to detach bpf programs after cgroup removal */
- struct percpu_ref refcnt;
-
- /* cgroup_bpf is released using a work queue */
- struct work_struct release_work;
-};
-
-int cgroup_bpf_inherit(struct cgroup *cgrp);
-void cgroup_bpf_offline(struct cgroup *cgrp);
-
-int __cgroup_bpf_attach(struct cgroup *cgrp,
- struct bpf_prog *prog, struct bpf_prog *replace_prog,
- struct bpf_cgroup_link *link,
- enum bpf_attach_type type, u32 flags);
-int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
- struct bpf_cgroup_link *link,
- enum bpf_attach_type type);
-int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
- union bpf_attr __user *uattr);
-
-/* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
-int cgroup_bpf_attach(struct cgroup *cgrp,
- struct bpf_prog *prog, struct bpf_prog *replace_prog,
- struct bpf_cgroup_link *link, enum bpf_attach_type type,
- u32 flags);
-int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
- enum bpf_attach_type type);
-int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
- union bpf_attr __user *uattr);
+void __init cgroup_bpf_lifetime_notifier_init(void);
int __cgroup_bpf_run_filter_skb(struct sock *sk,
struct sk_buff *skb,
- enum bpf_attach_type type);
+ enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_run_filter_sk(struct sock *sk,
- enum bpf_attach_type type);
+ enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
- struct sockaddr *uaddr,
- enum bpf_attach_type type,
+ struct sockaddr_unsized *uaddr,
+ int *uaddrlen,
+ enum cgroup_bpf_attach_type atype,
void *t_ctx,
u32 *flags);
int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
struct bpf_sock_ops_kern *sock_ops,
- enum bpf_attach_type type);
+ enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
- short access, enum bpf_attach_type type);
+ short access, enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
- struct ctl_table *table, int write,
+ const struct ctl_table *table, int write,
char **buf, size_t *pcount, loff_t *ppos,
- enum bpf_attach_type type);
+ enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
- int *optname, char __user *optval,
+ int *optname, sockptr_t optval,
int *optlen, char **kernel_optval);
+
int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
- int optname, char __user *optval,
- int __user *optlen, int max_optlen,
+ int optname, sockptr_t optval,
+ sockptr_t optlen, int max_optlen,
int retval);
int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
@@ -172,44 +160,6 @@ static inline enum bpf_cgroup_storage_type cgroup_storage_type(
return BPF_CGROUP_STORAGE_SHARED;
}
-static inline int bpf_cgroup_storage_set(struct bpf_cgroup_storage
- *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
-{
- enum bpf_cgroup_storage_type stype;
- int i, err = 0;
-
- preempt_disable();
- for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) {
- if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != NULL))
- continue;
-
- this_cpu_write(bpf_cgroup_storage_info[i].task, current);
- for_each_cgroup_storage_type(stype)
- this_cpu_write(bpf_cgroup_storage_info[i].storage[stype],
- storage[stype]);
- goto out;
- }
- err = -EBUSY;
- WARN_ON_ONCE(1);
-
-out:
- preempt_enable();
- return err;
-}
-
-static inline void bpf_cgroup_storage_unset(void)
-{
- int i;
-
- for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) {
- if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
- continue;
-
- this_cpu_write(bpf_cgroup_storage_info[i].task, NULL);
- return;
- }
-}
-
struct bpf_cgroup_storage *
cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
void *key, bool locked);
@@ -226,13 +176,26 @@ int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
void *value, u64 flags);
+/* Opportunistic check to see whether we have any BPF program attached*/
+static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
+ enum cgroup_bpf_attach_type type)
+{
+ struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
+ struct bpf_prog_array *array;
+
+ array = rcu_access_pointer(cgrp->bpf.effective[type]);
+ return array != &bpf_empty_prog_array.hdr;
+}
+
/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled(BPF_CGROUP_INET_INGRESS)) \
+ if (cgroup_bpf_enabled(CGROUP_INET_INGRESS) && \
+ cgroup_bpf_sock_enabled(sk, CGROUP_INET_INGRESS) && sk && \
+ sk_fullsock(sk)) \
__ret = __cgroup_bpf_run_filter_skb(sk, skb, \
- BPF_CGROUP_INET_INGRESS); \
+ CGROUP_INET_INGRESS); \
\
__ret; \
})
@@ -240,56 +203,55 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled(BPF_CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \
+ if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk) { \
typeof(sk) __sk = sk_to_full_sk(sk); \
- if (sk_fullsock(__sk)) \
+ if (__sk && __sk == skb_to_full_sk(skb) && \
+ cgroup_bpf_sock_enabled(__sk, CGROUP_INET_EGRESS)) \
__ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
- BPF_CGROUP_INET_EGRESS); \
+ CGROUP_INET_EGRESS); \
} \
__ret; \
})
-#define BPF_CGROUP_RUN_SK_PROG(sk, type) \
+#define BPF_CGROUP_RUN_SK_PROG(sk, atype) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled(type)) { \
- __ret = __cgroup_bpf_run_filter_sk(sk, type); \
+ if (cgroup_bpf_enabled(atype)) { \
+ __ret = __cgroup_bpf_run_filter_sk(sk, atype); \
} \
__ret; \
})
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
- BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
+ BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_CREATE)
#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) \
- BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_RELEASE)
+ BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_RELEASE)
#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
- BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
+ BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET4_POST_BIND)
#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
- BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
+ BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND)
-#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
+#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, atype) \
({ \
- u32 __unused_flags; \
int __ret = 0; \
- if (cgroup_bpf_enabled(type)) \
- __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
- NULL, \
- &__unused_flags); \
+ if (cgroup_bpf_enabled(atype)) \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, \
+ (struct sockaddr_unsized *)uaddr, uaddrlen, \
+ atype, NULL, NULL); \
__ret; \
})
-#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \
+#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, atype, t_ctx) \
({ \
- u32 __unused_flags; \
int __ret = 0; \
- if (cgroup_bpf_enabled(type)) { \
+ if (cgroup_bpf_enabled(atype)) { \
lock_sock(sk); \
- __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
- t_ctx, \
- &__unused_flags); \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, \
+ (struct sockaddr_unsized *)uaddr, uaddrlen, \
+ atype, t_ctx, NULL); \
release_sock(sk); \
} \
__ret; \
@@ -300,14 +262,15 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
* (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check
* should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE).
*/
-#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, type, bind_flags) \
+#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, uaddrlen, atype, bind_flags) \
({ \
u32 __flags = 0; \
int __ret = 0; \
- if (cgroup_bpf_enabled(type)) { \
+ if (cgroup_bpf_enabled(atype)) { \
lock_sock(sk); \
- __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
- NULL, &__flags); \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, \
+ (struct sockaddr_unsized *)uaddr, uaddrlen, \
+ atype, NULL, &__flags); \
release_sock(sk); \
if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE) \
*bind_flags |= BIND_NO_CAP_NET_BIND_SERVICE; \
@@ -316,33 +279,42 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
})
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) \
- ((cgroup_bpf_enabled(BPF_CGROUP_INET4_CONNECT) || \
- cgroup_bpf_enabled(BPF_CGROUP_INET6_CONNECT)) && \
+ ((cgroup_bpf_enabled(CGROUP_INET4_CONNECT) || \
+ cgroup_bpf_enabled(CGROUP_INET6_CONNECT)) && \
(sk)->sk_prot->pre_connect)
-#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
+#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, CGROUP_INET4_CONNECT)
+
+#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, CGROUP_INET6_CONNECT)
-#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
+#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_INET4_CONNECT, NULL)
-#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
+#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_INET6_CONNECT, NULL)
-#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
+#define BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UNIX_CONNECT, NULL)
-#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
+#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP4_SENDMSG, t_ctx)
-#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
+#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP6_SENDMSG, t_ctx)
-#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL)
+#define BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UNIX_SENDMSG, t_ctx)
-#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL)
+#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP4_RECVMSG, NULL)
+
+#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP6_RECVMSG, NULL)
+
+#define BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UNIX_RECVMSG, NULL)
/* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a
* fullsock and its parent fullsock cannot be traced by
@@ -362,33 +334,33 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled(BPF_CGROUP_SOCK_OPS)) \
+ if (cgroup_bpf_enabled(CGROUP_SOCK_OPS)) \
__ret = __cgroup_bpf_run_filter_sock_ops(sk, \
sock_ops, \
- BPF_CGROUP_SOCK_OPS); \
+ CGROUP_SOCK_OPS); \
__ret; \
})
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled(BPF_CGROUP_SOCK_OPS) && (sock_ops)->sk) { \
+ if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && (sock_ops)->sk) { \
typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
if (__sk && sk_fullsock(__sk)) \
__ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
sock_ops, \
- BPF_CGROUP_SOCK_OPS); \
+ CGROUP_SOCK_OPS); \
} \
__ret; \
})
-#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \
+#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled(BPF_CGROUP_DEVICE)) \
- __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
+ if (cgroup_bpf_enabled(CGROUP_DEVICE)) \
+ __ret = __cgroup_bpf_check_dev_permission(atype, major, minor, \
access, \
- BPF_CGROUP_DEVICE); \
+ CGROUP_DEVICE); \
\
__ret; \
})
@@ -397,10 +369,10 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled(BPF_CGROUP_SYSCTL)) \
+ if (cgroup_bpf_enabled(CGROUP_SYSCTL)) \
__ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \
buf, count, pos, \
- BPF_CGROUP_SYSCTL); \
+ CGROUP_SYSCTL); \
__ret; \
})
@@ -408,7 +380,8 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
kernel_optval) \
({ \
int __ret = 0; \
- if (cgroup_bpf_enabled(BPF_CGROUP_SETSOCKOPT)) \
+ if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT) && \
+ cgroup_bpf_sock_enabled(sock, CGROUP_SETSOCKOPT)) \
__ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \
optname, optval, \
optlen, \
@@ -416,19 +389,12 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
__ret; \
})
-#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \
-({ \
- int __ret = 0; \
- if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \
- get_user(__ret, optlen); \
- __ret; \
-})
-
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen, \
max_optlen, retval) \
({ \
int __ret = retval; \
- if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \
+ if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT) && \
+ cgroup_bpf_sock_enabled(sock, CGROUP_GETSOCKOPT)) \
if (!(sock)->sk_prot->bpf_bypass_getsockopt || \
!INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \
tcp_bpf_bypass_getsockopt, \
@@ -443,7 +409,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
optlen, retval) \
({ \
int __ret = retval; \
- if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \
+ if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
__ret = __cgroup_bpf_run_filter_getsockopt_kern( \
sock, level, optname, optval, optlen, retval); \
__ret; \
@@ -456,11 +422,15 @@ int cgroup_bpf_prog_detach(const union bpf_attr *attr,
int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
int cgroup_bpf_prog_query(const union bpf_attr *attr,
union bpf_attr __user *uattr);
+
+const struct bpf_func_proto *
+cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
#else
-struct cgroup_bpf {};
-static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
-static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
+static inline void cgroup_bpf_lifetime_notifier_init(void)
+{
+ return;
+}
static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
enum bpf_prog_type ptype,
@@ -487,9 +457,12 @@ static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
return -EINVAL;
}
-static inline int bpf_cgroup_storage_set(
- struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) { return 0; }
-static inline void bpf_cgroup_storage_unset(void) {}
+static inline const struct bpf_func_proto *
+cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+{
+ return NULL;
+}
+
static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
struct bpf_map *map) { return 0; }
static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
@@ -505,28 +478,31 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
return 0;
}
-#define cgroup_bpf_enabled(type) (0)
-#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) ({ 0; })
+#define cgroup_bpf_enabled(atype) (0)
+#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, atype, t_ctx) ({ 0; })
+#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, atype) ({ 0; })
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, type, flags) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, uaddrlen, atype, flags) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; })
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; })
#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
-#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
optlen, max_optlen, retval) ({ retval; })
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
@@ -534,8 +510,6 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
kernel_optval) ({ 0; })
-#define for_each_cgroup_storage_type(stype) for (; false; )
-
#endif /* CONFIG_CGROUP_BPF */
#endif /* _BPF_CGROUP_H */
diff --git a/include/linux/bpf-netns.h b/include/linux/bpf-netns.h
index 722f799c1a2e..413cfa5e4b07 100644
--- a/include/linux/bpf-netns.h
+++ b/include/linux/bpf-netns.h
@@ -3,15 +3,9 @@
#define _BPF_NETNS_H
#include <linux/mutex.h>
+#include <net/netns/bpf.h>
#include <uapi/linux/bpf.h>
-enum netns_bpf_attach_type {
- NETNS_BPF_INVALID = -1,
- NETNS_BPF_FLOW_DISSECTOR = 0,
- NETNS_BPF_SK_LOOKUP,
- MAX_NETNS_BPF_ATTACH_TYPE
-};
-
static inline enum netns_bpf_attach_type
to_netns_bpf_attach_type(enum bpf_attach_type attach_type)
{
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 02b02cb29ce2..6498be4c44f8 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -5,7 +5,9 @@
#define _LINUX_BPF_H 1
#include <uapi/linux/bpf.h>
+#include <uapi/linux/filter.h>
+#include <crypto/sha2.h>
#include <linux/workqueue.h>
#include <linux/file.h>
#include <linux/percpu.h>
@@ -22,6 +24,14 @@
#include <linux/sched/mm.h>
#include <linux/slab.h>
#include <linux/percpu-refcount.h>
+#include <linux/stddef.h>
+#include <linux/bpfptr.h>
+#include <linux/btf.h>
+#include <linux/rcupdate_trace.h>
+#include <linux/static_call.h>
+#include <linux/memcontrol.h>
+#include <linux/cfi.h>
+#include <asm/rqspinlock.h>
struct bpf_verifier_env;
struct bpf_verifier_log;
@@ -29,6 +39,7 @@ struct perf_event;
struct bpf_prog;
struct bpf_prog_aux;
struct bpf_map;
+struct bpf_arena;
struct sock;
struct seq_file;
struct btf;
@@ -42,14 +53,25 @@ struct kobject;
struct mem_cgroup;
struct module;
struct bpf_func_state;
+struct ftrace_ops;
+struct cgroup;
+struct bpf_token;
+struct user_namespace;
+struct super_block;
+struct inode;
extern struct idr btf_idr;
extern spinlock_t btf_idr_lock;
extern struct kobject *btf_kobj;
+extern struct bpf_mem_alloc bpf_global_ma, bpf_global_percpu_ma;
+extern bool bpf_global_ma_set;
+typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
struct bpf_iter_aux_info *aux);
typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
+typedef unsigned int (*bpf_func_t)(const void *,
+ const struct bpf_insn *);
struct bpf_iter_seq_info {
const struct seq_operations *seq_ops;
bpf_iter_init_seq_priv_t init_seq_private;
@@ -69,26 +91,35 @@ struct bpf_map_ops {
void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
union bpf_attr __user *uattr);
+ int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key,
+ void *value, u64 flags);
int (*map_lookup_and_delete_batch)(struct bpf_map *map,
const union bpf_attr *attr,
union bpf_attr __user *uattr);
- int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr,
+ int (*map_update_batch)(struct bpf_map *map, struct file *map_file,
+ const union bpf_attr *attr,
union bpf_attr __user *uattr);
int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
union bpf_attr __user *uattr);
/* funcs callable from userspace and from eBPF programs */
void *(*map_lookup_elem)(struct bpf_map *map, void *key);
- int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
- int (*map_delete_elem)(struct bpf_map *map, void *key);
- int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
- int (*map_pop_elem)(struct bpf_map *map, void *value);
- int (*map_peek_elem)(struct bpf_map *map, void *value);
+ long (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
+ long (*map_delete_elem)(struct bpf_map *map, void *key);
+ long (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
+ long (*map_pop_elem)(struct bpf_map *map, void *value);
+ long (*map_peek_elem)(struct bpf_map *map, void *value);
+ void *(*map_lookup_percpu_elem)(struct bpf_map *map, void *key, u32 cpu);
+ int (*map_get_hash)(struct bpf_map *map, u32 hash_buf_size, void *hash_buf);
/* funcs called by prog_array and perf_event_array map */
void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
int fd);
- void (*map_fd_put_ptr)(void *ptr);
+ /* If need_defer is true, the implementation should guarantee that
+ * the to-be-put element is still alive before the bpf program, which
+ * may manipulate it, exists.
+ */
+ void (*map_fd_put_ptr)(struct bpf_map *map, void *ptr, bool need_defer);
int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
u32 (*map_fd_sys_lookup_elem)(void *ptr);
void (*map_seq_show_elem)(struct bpf_map *map, void *key,
@@ -112,6 +143,9 @@ struct bpf_map_ops {
int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
__poll_t (*map_poll)(struct bpf_map *map, struct file *filp,
struct poll_table_struct *pts);
+ unsigned long (*map_get_unmapped_area)(struct file *filep, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
/* Functions called by bpf_local_storage maps */
int (*map_local_storage_charge)(struct bpf_local_storage_map *smap,
@@ -121,7 +155,7 @@ struct bpf_map_ops {
struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner);
/* Misc helpers.*/
- int (*map_redirect)(struct bpf_map *map, u32 ifindex, u64 flags);
+ long (*map_redirect)(struct bpf_map *map, u64 key, u64 flags);
/* map_meta_equal must be implemented for maps that can be
* used as an inner map. It is a runtime check to ensure
@@ -139,22 +173,128 @@ struct bpf_map_ops {
int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env,
struct bpf_func_state *caller,
struct bpf_func_state *callee);
- int (*map_for_each_callback)(struct bpf_map *map, void *callback_fn,
+ long (*map_for_each_callback)(struct bpf_map *map,
+ bpf_callback_t callback_fn,
void *callback_ctx, u64 flags);
- /* BTF name and id of struct allocated by map_alloc */
- const char * const map_btf_name;
+ u64 (*map_mem_usage)(const struct bpf_map *map);
+
+ /* BTF id of struct allocated by map_alloc */
int *map_btf_id;
/* bpf_iter info used to open a seq_file */
const struct bpf_iter_seq_info *iter_seq_info;
};
-struct bpf_map {
- /* The first two cachelines with read-mostly members of which some
- * are also accessed in fast-path (e.g. ops, max_entries).
+enum {
+ /* Support at most 11 fields in a BTF type */
+ BTF_FIELDS_MAX = 11,
+};
+
+enum btf_field_type {
+ BPF_SPIN_LOCK = (1 << 0),
+ BPF_TIMER = (1 << 1),
+ BPF_KPTR_UNREF = (1 << 2),
+ BPF_KPTR_REF = (1 << 3),
+ BPF_KPTR_PERCPU = (1 << 4),
+ BPF_KPTR = BPF_KPTR_UNREF | BPF_KPTR_REF | BPF_KPTR_PERCPU,
+ BPF_LIST_HEAD = (1 << 5),
+ BPF_LIST_NODE = (1 << 6),
+ BPF_RB_ROOT = (1 << 7),
+ BPF_RB_NODE = (1 << 8),
+ BPF_GRAPH_NODE = BPF_RB_NODE | BPF_LIST_NODE,
+ BPF_GRAPH_ROOT = BPF_RB_ROOT | BPF_LIST_HEAD,
+ BPF_REFCOUNT = (1 << 9),
+ BPF_WORKQUEUE = (1 << 10),
+ BPF_UPTR = (1 << 11),
+ BPF_RES_SPIN_LOCK = (1 << 12),
+ BPF_TASK_WORK = (1 << 13),
+};
+
+enum bpf_cgroup_storage_type {
+ BPF_CGROUP_STORAGE_SHARED,
+ BPF_CGROUP_STORAGE_PERCPU,
+ __BPF_CGROUP_STORAGE_MAX
+#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
+};
+
+#ifdef CONFIG_CGROUP_BPF
+# define for_each_cgroup_storage_type(stype) \
+ for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
+#else
+# define for_each_cgroup_storage_type(stype) for (; false; )
+#endif /* CONFIG_CGROUP_BPF */
+
+typedef void (*btf_dtor_kfunc_t)(void *);
+
+struct btf_field_kptr {
+ struct btf *btf;
+ struct module *module;
+ /* dtor used if btf_is_kernel(btf), otherwise the type is
+ * program-allocated, dtor is NULL, and __bpf_obj_drop_impl is used
*/
- const struct bpf_map_ops *ops ____cacheline_aligned;
+ btf_dtor_kfunc_t dtor;
+ u32 btf_id;
+};
+
+struct btf_field_graph_root {
+ struct btf *btf;
+ u32 value_btf_id;
+ u32 node_offset;
+ struct btf_record *value_rec;
+};
+
+struct btf_field {
+ u32 offset;
+ u32 size;
+ enum btf_field_type type;
+ union {
+ struct btf_field_kptr kptr;
+ struct btf_field_graph_root graph_root;
+ };
+};
+
+struct btf_record {
+ u32 cnt;
+ u32 field_mask;
+ int spin_lock_off;
+ int res_spin_lock_off;
+ int timer_off;
+ int wq_off;
+ int refcount_off;
+ int task_work_off;
+ struct btf_field fields[];
+};
+
+/* Non-opaque version of bpf_rb_node in uapi/linux/bpf.h */
+struct bpf_rb_node_kern {
+ struct rb_node rb_node;
+ void *owner;
+} __attribute__((aligned(8)));
+
+/* Non-opaque version of bpf_list_node in uapi/linux/bpf.h */
+struct bpf_list_node_kern {
+ struct list_head list_head;
+ void *owner;
+} __attribute__((aligned(8)));
+
+/* 'Ownership' of program-containing map is claimed by the first program
+ * that is going to use this map or by the first program which FD is
+ * stored in the map to make sure that all callers and callees have the
+ * same prog type, JITed flag and xdp_has_frags flag.
+ */
+struct bpf_map_owner {
+ enum bpf_prog_type type;
+ bool jited;
+ bool xdp_has_frags;
+ u64 storage_cookie[MAX_BPF_CGROUP_STORAGE_TYPE];
+ const struct btf_type *attach_func_proto;
+ enum bpf_attach_type expected_attach_type;
+};
+
+struct bpf_map {
+ u8 sha[SHA256_DIGEST_SIZE];
+ const struct bpf_map_ops *ops;
struct bpf_map *inner_map_meta;
#ifdef CONFIG_SECURITY
void *security;
@@ -163,61 +303,319 @@ struct bpf_map {
u32 key_size;
u32 value_size;
u32 max_entries;
+ u64 map_extra; /* any per-map-type extra fields */
u32 map_flags;
- int spin_lock_off; /* >=0 valid offset, <0 error */
u32 id;
+ struct btf_record *record;
int numa_node;
u32 btf_key_type_id;
u32 btf_value_type_id;
+ u32 btf_vmlinux_value_type_id;
struct btf *btf;
-#ifdef CONFIG_MEMCG_KMEM
- struct mem_cgroup *memcg;
+#ifdef CONFIG_MEMCG
+ struct obj_cgroup *objcg;
#endif
char name[BPF_OBJ_NAME_LEN];
- u32 btf_vmlinux_value_type_id;
+ struct mutex freeze_mutex;
+ atomic64_t refcnt;
+ atomic64_t usercnt;
+ /* rcu is used before freeing and work is only used during freeing */
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
+ atomic64_t writecnt;
+ spinlock_t owner_lock;
+ struct bpf_map_owner *owner;
bool bypass_spec_v1;
bool frozen; /* write-once; write-protected by freeze_mutex */
- /* 22 bytes hole */
-
- /* The 3rd and 4th cacheline with misc members to avoid false sharing
- * particularly with refcounting.
- */
- atomic64_t refcnt ____cacheline_aligned;
- atomic64_t usercnt;
- struct work_struct work;
- struct mutex freeze_mutex;
- u64 writecnt; /* writable mmap cnt; protected by freeze_mutex */
+ bool free_after_mult_rcu_gp;
+ bool free_after_rcu_gp;
+ atomic64_t sleepable_refcnt;
+ s64 __percpu *elem_count;
+ u64 cookie; /* write-once */
+ char *excl_prog_sha;
};
-static inline bool map_value_has_spin_lock(const struct bpf_map *map)
+static inline const char *btf_field_type_name(enum btf_field_type type)
+{
+ switch (type) {
+ case BPF_SPIN_LOCK:
+ return "bpf_spin_lock";
+ case BPF_RES_SPIN_LOCK:
+ return "bpf_res_spin_lock";
+ case BPF_TIMER:
+ return "bpf_timer";
+ case BPF_WORKQUEUE:
+ return "bpf_wq";
+ case BPF_KPTR_UNREF:
+ case BPF_KPTR_REF:
+ return "kptr";
+ case BPF_KPTR_PERCPU:
+ return "percpu_kptr";
+ case BPF_UPTR:
+ return "uptr";
+ case BPF_LIST_HEAD:
+ return "bpf_list_head";
+ case BPF_LIST_NODE:
+ return "bpf_list_node";
+ case BPF_RB_ROOT:
+ return "bpf_rb_root";
+ case BPF_RB_NODE:
+ return "bpf_rb_node";
+ case BPF_REFCOUNT:
+ return "bpf_refcount";
+ case BPF_TASK_WORK:
+ return "bpf_task_work";
+ default:
+ WARN_ON_ONCE(1);
+ return "unknown";
+ }
+}
+
+#if IS_ENABLED(CONFIG_DEBUG_KERNEL)
+#define BPF_WARN_ONCE(cond, format...) WARN_ONCE(cond, format)
+#else
+#define BPF_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
+#endif
+
+static inline u32 btf_field_type_size(enum btf_field_type type)
+{
+ switch (type) {
+ case BPF_SPIN_LOCK:
+ return sizeof(struct bpf_spin_lock);
+ case BPF_RES_SPIN_LOCK:
+ return sizeof(struct bpf_res_spin_lock);
+ case BPF_TIMER:
+ return sizeof(struct bpf_timer);
+ case BPF_WORKQUEUE:
+ return sizeof(struct bpf_wq);
+ case BPF_KPTR_UNREF:
+ case BPF_KPTR_REF:
+ case BPF_KPTR_PERCPU:
+ case BPF_UPTR:
+ return sizeof(u64);
+ case BPF_LIST_HEAD:
+ return sizeof(struct bpf_list_head);
+ case BPF_LIST_NODE:
+ return sizeof(struct bpf_list_node);
+ case BPF_RB_ROOT:
+ return sizeof(struct bpf_rb_root);
+ case BPF_RB_NODE:
+ return sizeof(struct bpf_rb_node);
+ case BPF_REFCOUNT:
+ return sizeof(struct bpf_refcount);
+ case BPF_TASK_WORK:
+ return sizeof(struct bpf_task_work);
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+}
+
+static inline u32 btf_field_type_align(enum btf_field_type type)
+{
+ switch (type) {
+ case BPF_SPIN_LOCK:
+ return __alignof__(struct bpf_spin_lock);
+ case BPF_RES_SPIN_LOCK:
+ return __alignof__(struct bpf_res_spin_lock);
+ case BPF_TIMER:
+ return __alignof__(struct bpf_timer);
+ case BPF_WORKQUEUE:
+ return __alignof__(struct bpf_wq);
+ case BPF_KPTR_UNREF:
+ case BPF_KPTR_REF:
+ case BPF_KPTR_PERCPU:
+ case BPF_UPTR:
+ return __alignof__(u64);
+ case BPF_LIST_HEAD:
+ return __alignof__(struct bpf_list_head);
+ case BPF_LIST_NODE:
+ return __alignof__(struct bpf_list_node);
+ case BPF_RB_ROOT:
+ return __alignof__(struct bpf_rb_root);
+ case BPF_RB_NODE:
+ return __alignof__(struct bpf_rb_node);
+ case BPF_REFCOUNT:
+ return __alignof__(struct bpf_refcount);
+ case BPF_TASK_WORK:
+ return __alignof__(struct bpf_task_work);
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+}
+
+static inline void bpf_obj_init_field(const struct btf_field *field, void *addr)
+{
+ memset(addr, 0, field->size);
+
+ switch (field->type) {
+ case BPF_REFCOUNT:
+ refcount_set((refcount_t *)addr, 1);
+ break;
+ case BPF_RB_NODE:
+ RB_CLEAR_NODE((struct rb_node *)addr);
+ break;
+ case BPF_LIST_HEAD:
+ case BPF_LIST_NODE:
+ INIT_LIST_HEAD((struct list_head *)addr);
+ break;
+ case BPF_RB_ROOT:
+ /* RB_ROOT_CACHED 0-inits, no need to do anything after memset */
+ case BPF_SPIN_LOCK:
+ case BPF_RES_SPIN_LOCK:
+ case BPF_TIMER:
+ case BPF_WORKQUEUE:
+ case BPF_KPTR_UNREF:
+ case BPF_KPTR_REF:
+ case BPF_KPTR_PERCPU:
+ case BPF_UPTR:
+ case BPF_TASK_WORK:
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return;
+ }
+}
+
+static inline bool btf_record_has_field(const struct btf_record *rec, enum btf_field_type type)
+{
+ if (IS_ERR_OR_NULL(rec))
+ return false;
+ return rec->field_mask & type;
+}
+
+static inline void bpf_obj_init(const struct btf_record *rec, void *obj)
+{
+ int i;
+
+ if (IS_ERR_OR_NULL(rec))
+ return;
+ for (i = 0; i < rec->cnt; i++)
+ bpf_obj_init_field(&rec->fields[i], obj + rec->fields[i].offset);
+}
+
+/* 'dst' must be a temporary buffer and should not point to memory that is being
+ * used in parallel by a bpf program or bpf syscall, otherwise the access from
+ * the bpf program or bpf syscall may be corrupted by the reinitialization,
+ * leading to weird problems. Even 'dst' is newly-allocated from bpf memory
+ * allocator, it is still possible for 'dst' to be used in parallel by a bpf
+ * program or bpf syscall.
+ */
+static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
{
- return map->spin_lock_off >= 0;
+ bpf_obj_init(map->record, dst);
+}
+
+/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
+ * forced to use 'long' read/writes to try to atomically copy long counters.
+ * Best-effort only. No barriers here, since it _will_ race with concurrent
+ * updates from BPF programs. Called from bpf syscall and mostly used with
+ * size 8 or 16 bytes, so ask compiler to inline it.
+ */
+static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
+{
+ const long *lsrc = src;
+ long *ldst = dst;
+
+ size /= sizeof(long);
+ while (size--)
+ data_race(*ldst++ = *lsrc++);
}
-static inline void check_and_init_map_lock(struct bpf_map *map, void *dst)
+/* copy everything but bpf_spin_lock, bpf_timer, and kptrs. There could be one of each. */
+static inline void bpf_obj_memcpy(struct btf_record *rec,
+ void *dst, void *src, u32 size,
+ bool long_memcpy)
{
- if (likely(!map_value_has_spin_lock(map)))
+ u32 curr_off = 0;
+ int i;
+
+ if (IS_ERR_OR_NULL(rec)) {
+ if (long_memcpy)
+ bpf_long_memcpy(dst, src, round_up(size, 8));
+ else
+ memcpy(dst, src, size);
return;
- *(struct bpf_spin_lock *)(dst + map->spin_lock_off) =
- (struct bpf_spin_lock){};
+ }
+
+ for (i = 0; i < rec->cnt; i++) {
+ u32 next_off = rec->fields[i].offset;
+ u32 sz = next_off - curr_off;
+
+ memcpy(dst + curr_off, src + curr_off, sz);
+ curr_off += rec->fields[i].size + sz;
+ }
+ memcpy(dst + curr_off, src + curr_off, size - curr_off);
}
-/* copy everything but bpf_spin_lock */
static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
{
- if (unlikely(map_value_has_spin_lock(map))) {
- u32 off = map->spin_lock_off;
+ bpf_obj_memcpy(map->record, dst, src, map->value_size, false);
+}
+
+static inline void copy_map_value_long(struct bpf_map *map, void *dst, void *src)
+{
+ bpf_obj_memcpy(map->record, dst, src, map->value_size, true);
+}
+
+static inline void bpf_obj_swap_uptrs(const struct btf_record *rec, void *dst, void *src)
+{
+ unsigned long *src_uptr, *dst_uptr;
+ const struct btf_field *field;
+ int i;
+
+ if (!btf_record_has_field(rec, BPF_UPTR))
+ return;
+
+ for (i = 0, field = rec->fields; i < rec->cnt; i++, field++) {
+ if (field->type != BPF_UPTR)
+ continue;
+
+ src_uptr = src + field->offset;
+ dst_uptr = dst + field->offset;
+ swap(*src_uptr, *dst_uptr);
+ }
+}
+
+static inline void bpf_obj_memzero(struct btf_record *rec, void *dst, u32 size)
+{
+ u32 curr_off = 0;
+ int i;
+
+ if (IS_ERR_OR_NULL(rec)) {
+ memset(dst, 0, size);
+ return;
+ }
+
+ for (i = 0; i < rec->cnt; i++) {
+ u32 next_off = rec->fields[i].offset;
+ u32 sz = next_off - curr_off;
- memcpy(dst, src, off);
- memcpy(dst + off + sizeof(struct bpf_spin_lock),
- src + off + sizeof(struct bpf_spin_lock),
- map->value_size - off - sizeof(struct bpf_spin_lock));
- } else {
- memcpy(dst, src, map->value_size);
+ memset(dst + curr_off, 0, sz);
+ curr_off += rec->fields[i].size + sz;
}
+ memset(dst + curr_off, 0, size - curr_off);
}
+
+static inline void zero_map_value(struct bpf_map *map, void *dst)
+{
+ bpf_obj_memzero(map->record, dst, map->value_size);
+}
+
void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
bool lock_src);
+void bpf_timer_cancel_and_free(void *timer);
+void bpf_wq_cancel_and_free(void *timer);
+void bpf_task_work_cancel_and_free(void *timer);
+void bpf_list_head_free(const struct btf_field *field, void *list_head,
+ struct bpf_spin_lock *spin_lock);
+void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
+ struct bpf_spin_lock *spin_lock);
+u64 bpf_arena_get_kern_vm_start(struct bpf_arena *arena);
+u64 bpf_arena_get_user_vm_start(struct bpf_arena *arena);
int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
struct bpf_offload_dev;
@@ -265,8 +663,154 @@ int map_check_no_btf(const struct bpf_map *map,
bool bpf_map_meta_equal(const struct bpf_map *meta0,
const struct bpf_map *meta1);
+static inline bool bpf_map_has_internal_structs(struct bpf_map *map)
+{
+ return btf_record_has_field(map->record, BPF_TIMER | BPF_WORKQUEUE | BPF_TASK_WORK);
+}
+
+void bpf_map_free_internal_structs(struct bpf_map *map, void *obj);
+
+int bpf_dynptr_from_file_sleepable(struct file *file, u32 flags,
+ struct bpf_dynptr *ptr__uninit);
+
extern const struct bpf_map_ops bpf_map_offload_ops;
+/* bpf_type_flag contains a set of flags that are applicable to the values of
+ * arg_type, ret_type and reg_type. For example, a pointer value may be null,
+ * or a memory is read-only. We classify types into two categories: base types
+ * and extended types. Extended types are base types combined with a type flag.
+ *
+ * Currently there are no more than 32 base types in arg_type, ret_type and
+ * reg_types.
+ */
+#define BPF_BASE_TYPE_BITS 8
+
+enum bpf_type_flag {
+ /* PTR may be NULL. */
+ PTR_MAYBE_NULL = BIT(0 + BPF_BASE_TYPE_BITS),
+
+ /* MEM is read-only. When applied on bpf_arg, it indicates the arg is
+ * compatible with both mutable and immutable memory.
+ */
+ MEM_RDONLY = BIT(1 + BPF_BASE_TYPE_BITS),
+
+ /* MEM points to BPF ring buffer reservation. */
+ MEM_RINGBUF = BIT(2 + BPF_BASE_TYPE_BITS),
+
+ /* MEM is in user address space. */
+ MEM_USER = BIT(3 + BPF_BASE_TYPE_BITS),
+
+ /* MEM is a percpu memory. MEM_PERCPU tags PTR_TO_BTF_ID. When tagged
+ * with MEM_PERCPU, PTR_TO_BTF_ID _cannot_ be directly accessed. In
+ * order to drop this tag, it must be passed into bpf_per_cpu_ptr()
+ * or bpf_this_cpu_ptr(), which will return the pointer corresponding
+ * to the specified cpu.
+ */
+ MEM_PERCPU = BIT(4 + BPF_BASE_TYPE_BITS),
+
+ /* Indicates that the argument will be released. */
+ OBJ_RELEASE = BIT(5 + BPF_BASE_TYPE_BITS),
+
+ /* PTR is not trusted. This is only used with PTR_TO_BTF_ID, to mark
+ * unreferenced and referenced kptr loaded from map value using a load
+ * instruction, so that they can only be dereferenced but not escape the
+ * BPF program into the kernel (i.e. cannot be passed as arguments to
+ * kfunc or bpf helpers).
+ */
+ PTR_UNTRUSTED = BIT(6 + BPF_BASE_TYPE_BITS),
+
+ /* MEM can be uninitialized. */
+ MEM_UNINIT = BIT(7 + BPF_BASE_TYPE_BITS),
+
+ /* DYNPTR points to memory local to the bpf program. */
+ DYNPTR_TYPE_LOCAL = BIT(8 + BPF_BASE_TYPE_BITS),
+
+ /* DYNPTR points to a kernel-produced ringbuf record. */
+ DYNPTR_TYPE_RINGBUF = BIT(9 + BPF_BASE_TYPE_BITS),
+
+ /* Size is known at compile time. */
+ MEM_FIXED_SIZE = BIT(10 + BPF_BASE_TYPE_BITS),
+
+ /* MEM is of an allocated object of type in program BTF. This is used to
+ * tag PTR_TO_BTF_ID allocated using bpf_obj_new.
+ */
+ MEM_ALLOC = BIT(11 + BPF_BASE_TYPE_BITS),
+
+ /* PTR was passed from the kernel in a trusted context, and may be
+ * passed to KF_TRUSTED_ARGS kfuncs or BPF helper functions.
+ * Confusingly, this is _not_ the opposite of PTR_UNTRUSTED above.
+ * PTR_UNTRUSTED refers to a kptr that was read directly from a map
+ * without invoking bpf_kptr_xchg(). What we really need to know is
+ * whether a pointer is safe to pass to a kfunc or BPF helper function.
+ * While PTR_UNTRUSTED pointers are unsafe to pass to kfuncs and BPF
+ * helpers, they do not cover all possible instances of unsafe
+ * pointers. For example, a pointer that was obtained from walking a
+ * struct will _not_ get the PTR_UNTRUSTED type modifier, despite the
+ * fact that it may be NULL, invalid, etc. This is due to backwards
+ * compatibility requirements, as this was the behavior that was first
+ * introduced when kptrs were added. The behavior is now considered
+ * deprecated, and PTR_UNTRUSTED will eventually be removed.
+ *
+ * PTR_TRUSTED, on the other hand, is a pointer that the kernel
+ * guarantees to be valid and safe to pass to kfuncs and BPF helpers.
+ * For example, pointers passed to tracepoint arguments are considered
+ * PTR_TRUSTED, as are pointers that are passed to struct_ops
+ * callbacks. As alluded to above, pointers that are obtained from
+ * walking PTR_TRUSTED pointers are _not_ trusted. For example, if a
+ * struct task_struct *task is PTR_TRUSTED, then accessing
+ * task->last_wakee will lose the PTR_TRUSTED modifier when it's stored
+ * in a BPF register. Similarly, pointers passed to certain programs
+ * types such as kretprobes are not guaranteed to be valid, as they may
+ * for example contain an object that was recently freed.
+ */
+ PTR_TRUSTED = BIT(12 + BPF_BASE_TYPE_BITS),
+
+ /* MEM is tagged with rcu and memory access needs rcu_read_lock protection. */
+ MEM_RCU = BIT(13 + BPF_BASE_TYPE_BITS),
+
+ /* Used to tag PTR_TO_BTF_ID | MEM_ALLOC references which are non-owning.
+ * Currently only valid for linked-list and rbtree nodes. If the nodes
+ * have a bpf_refcount_field, they must be tagged MEM_RCU as well.
+ */
+ NON_OWN_REF = BIT(14 + BPF_BASE_TYPE_BITS),
+
+ /* DYNPTR points to sk_buff */
+ DYNPTR_TYPE_SKB = BIT(15 + BPF_BASE_TYPE_BITS),
+
+ /* DYNPTR points to xdp_buff */
+ DYNPTR_TYPE_XDP = BIT(16 + BPF_BASE_TYPE_BITS),
+
+ /* Memory must be aligned on some architectures, used in combination with
+ * MEM_FIXED_SIZE.
+ */
+ MEM_ALIGNED = BIT(17 + BPF_BASE_TYPE_BITS),
+
+ /* MEM is being written to, often combined with MEM_UNINIT. Non-presence
+ * of MEM_WRITE means that MEM is only being read. MEM_WRITE without the
+ * MEM_UNINIT means that memory needs to be initialized since it is also
+ * read.
+ */
+ MEM_WRITE = BIT(18 + BPF_BASE_TYPE_BITS),
+
+ /* DYNPTR points to skb_metadata_end()-skb_metadata_len() */
+ DYNPTR_TYPE_SKB_META = BIT(19 + BPF_BASE_TYPE_BITS),
+
+ /* DYNPTR points to file */
+ DYNPTR_TYPE_FILE = BIT(20 + BPF_BASE_TYPE_BITS),
+
+ __BPF_TYPE_FLAG_MAX,
+ __BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1,
+};
+
+#define DYNPTR_TYPE_FLAG_MASK (DYNPTR_TYPE_LOCAL | DYNPTR_TYPE_RINGBUF | DYNPTR_TYPE_SKB \
+ | DYNPTR_TYPE_XDP | DYNPTR_TYPE_SKB_META | DYNPTR_TYPE_FILE)
+
+/* Max number of base types. */
+#define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS)
+
+/* Max number of all types. */
+#define BPF_TYPE_LIMIT (__BPF_TYPE_LAST_FLAG | (__BPF_TYPE_LAST_FLAG - 1))
+
/* function argument constraints */
enum bpf_arg_type {
ARG_DONTCARE = 0, /* unused argument in helper function */
@@ -277,58 +821,84 @@ enum bpf_arg_type {
ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */
ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */
ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */
- ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */
- ARG_PTR_TO_MAP_VALUE_OR_NULL, /* pointer to stack used as map value or NULL */
- /* the following constraints used to prototype bpf_memcmp() and other
- * functions that access data on eBPF program stack
+ /* Used to prototype bpf_memcmp() and other functions that access data
+ * on eBPF program stack
*/
ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */
- ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */
- ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized,
- * helper function must fill all bytes or clear
- * them in error case.
- */
+ ARG_PTR_TO_ARENA,
ARG_CONST_SIZE, /* number of bytes accessed from memory */
ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */
ARG_PTR_TO_CTX, /* pointer to context */
- ARG_PTR_TO_CTX_OR_NULL, /* pointer to context or NULL */
ARG_ANYTHING, /* any (initialized) argument is ok */
ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */
ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
- ARG_PTR_TO_INT, /* pointer to int */
- ARG_PTR_TO_LONG, /* pointer to long */
ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */
- ARG_PTR_TO_SOCKET_OR_NULL, /* pointer to bpf_sock (fullsock) or NULL */
ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */
- ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */
- ARG_PTR_TO_ALLOC_MEM_OR_NULL, /* pointer to dynamically allocated memory or NULL */
+ ARG_PTR_TO_RINGBUF_MEM, /* pointer to dynamically reserved ringbuf memory */
ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */
ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */
ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */
ARG_PTR_TO_FUNC, /* pointer to a bpf program function */
- ARG_PTR_TO_STACK_OR_NULL, /* pointer to stack or NULL */
+ ARG_PTR_TO_STACK, /* pointer to stack */
ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */
+ ARG_PTR_TO_TIMER, /* pointer to bpf_timer */
+ ARG_KPTR_XCHG_DEST, /* pointer to destination that kptrs are bpf_kptr_xchg'd into */
+ ARG_PTR_TO_DYNPTR, /* pointer to bpf_dynptr. See bpf_type_flag for dynptr type */
__BPF_ARG_TYPE_MAX,
+
+ /* Extended arg_types. */
+ ARG_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MAP_VALUE,
+ ARG_PTR_TO_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MEM,
+ ARG_PTR_TO_CTX_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_CTX,
+ ARG_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET,
+ ARG_PTR_TO_STACK_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_STACK,
+ ARG_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_BTF_ID,
+ /* Pointer to memory does not need to be initialized, since helper function
+ * fills all bytes or clears them in error case.
+ */
+ ARG_PTR_TO_UNINIT_MEM = MEM_UNINIT | MEM_WRITE | ARG_PTR_TO_MEM,
+ /* Pointer to valid memory of size known at compile time. */
+ ARG_PTR_TO_FIXED_SIZE_MEM = MEM_FIXED_SIZE | ARG_PTR_TO_MEM,
+
+ /* This must be the last entry. Its purpose is to ensure the enum is
+ * wide enough to hold the higher bits reserved for bpf_type_flag.
+ */
+ __BPF_ARG_TYPE_LIMIT = BPF_TYPE_LIMIT,
};
+static_assert(__BPF_ARG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
/* type of values returned from helper functions */
enum bpf_return_type {
RET_INTEGER, /* function returns integer */
RET_VOID, /* function doesn't return anything */
RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */
- RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */
- RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */
- RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */
- RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */
- RET_PTR_TO_ALLOC_MEM_OR_NULL, /* returns a pointer to dynamically allocated memory or NULL */
- RET_PTR_TO_BTF_ID_OR_NULL, /* returns a pointer to a btf_id or NULL */
- RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL, /* returns a pointer to a valid memory or a btf_id or NULL */
+ RET_PTR_TO_SOCKET, /* returns a pointer to a socket */
+ RET_PTR_TO_TCP_SOCK, /* returns a pointer to a tcp_sock */
+ RET_PTR_TO_SOCK_COMMON, /* returns a pointer to a sock_common */
+ RET_PTR_TO_MEM, /* returns a pointer to memory */
RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */
RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */
+ __BPF_RET_TYPE_MAX,
+
+ /* Extended ret_types. */
+ RET_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MAP_VALUE,
+ RET_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCKET,
+ RET_PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK,
+ RET_PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON,
+ RET_PTR_TO_RINGBUF_MEM_OR_NULL = PTR_MAYBE_NULL | MEM_RINGBUF | RET_PTR_TO_MEM,
+ RET_PTR_TO_DYNPTR_MEM_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MEM,
+ RET_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID,
+ RET_PTR_TO_BTF_ID_TRUSTED = PTR_TRUSTED | RET_PTR_TO_BTF_ID,
+
+ /* This must be the last entry. Its purpose is to ensure the enum is
+ * wide enough to hold the higher bits reserved for bpf_type_flag.
+ */
+ __BPF_RET_TYPE_LIMIT = BPF_TYPE_LIMIT,
};
+static_assert(__BPF_RET_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
* to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
@@ -338,6 +908,13 @@ struct bpf_func_proto {
u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
bool gpl_only;
bool pkt_access;
+ bool might_sleep;
+ /* set to true if helper follows contract for llvm
+ * attribute bpf_fastcall:
+ * - void functions do not scratch r0
+ * - functions taking N arguments scratch only registers r1-rN
+ */
+ bool allow_fastcall;
enum bpf_return_type ret_type;
union {
struct {
@@ -358,6 +935,14 @@ struct bpf_func_proto {
u32 *arg5_btf_id;
};
u32 *arg_btf_id[5];
+ struct {
+ size_t arg1_size;
+ size_t arg2_size;
+ size_t arg3_size;
+ size_t arg4_size;
+ size_t arg5_size;
+ };
+ size_t arg_size[5];
};
int *ret_btf_id; /* return value btf_id */
bool (*allowed)(const struct bpf_prog *prog);
@@ -390,18 +975,15 @@ enum bpf_reg_type {
PTR_TO_CTX, /* reg points to bpf_context */
CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
PTR_TO_MAP_VALUE, /* reg points to map element value */
- PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
+ PTR_TO_MAP_KEY, /* reg points to a map element key */
PTR_TO_STACK, /* reg == frame_pointer + offset */
PTR_TO_PACKET_META, /* skb->data - meta_len */
PTR_TO_PACKET, /* reg points to skb->data */
PTR_TO_PACKET_END, /* skb->data + headlen */
PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */
PTR_TO_SOCKET, /* reg points to struct bpf_sock */
- PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */
PTR_TO_SOCK_COMMON, /* reg points to sock_common */
- PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */
PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */
- PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */
PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */
PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */
/* PTR_TO_BTF_ID points to a kernel struct that does not need
@@ -415,36 +997,48 @@ enum bpf_reg_type {
* additional context, assume the value is non-null.
*/
PTR_TO_BTF_ID,
+ PTR_TO_MEM, /* reg points to valid memory region */
+ PTR_TO_ARENA,
+ PTR_TO_BUF, /* reg points to a read/write buffer */
+ PTR_TO_FUNC, /* reg points to a bpf program function */
+ PTR_TO_INSN, /* reg points to a bpf program instruction */
+ CONST_PTR_TO_DYNPTR, /* reg points to a const struct bpf_dynptr */
+ __BPF_REG_TYPE_MAX,
+
+ /* Extended reg_types. */
+ PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | PTR_TO_MAP_VALUE,
+ PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCKET,
+ PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON,
+ PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK,
/* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not
* been checked for null. Used primarily to inform the verifier
* an explicit null check is required for this struct.
*/
- PTR_TO_BTF_ID_OR_NULL,
- PTR_TO_MEM, /* reg points to valid memory region */
- PTR_TO_MEM_OR_NULL, /* reg points to valid memory region or NULL */
- PTR_TO_RDONLY_BUF, /* reg points to a readonly buffer */
- PTR_TO_RDONLY_BUF_OR_NULL, /* reg points to a readonly buffer or NULL */
- PTR_TO_RDWR_BUF, /* reg points to a read/write buffer */
- PTR_TO_RDWR_BUF_OR_NULL, /* reg points to a read/write buffer or NULL */
- PTR_TO_PERCPU_BTF_ID, /* reg points to a percpu kernel variable */
- PTR_TO_FUNC, /* reg points to a bpf program function */
- PTR_TO_MAP_KEY, /* reg points to a map element key */
- __BPF_REG_TYPE_MAX,
+ PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID,
+
+ /* This must be the last entry. Its purpose is to ensure the enum is
+ * wide enough to hold the higher bits reserved for bpf_type_flag.
+ */
+ __BPF_REG_TYPE_LIMIT = BPF_TYPE_LIMIT,
};
+static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
/* The information passed from prog-specific *_is_valid_access
* back to the verifier.
*/
struct bpf_insn_access_aux {
enum bpf_reg_type reg_type;
+ bool is_ldsx;
union {
int ctx_field_size;
struct {
struct btf *btf;
u32 btf_id;
+ u32 ref_obj_id;
};
};
struct bpf_verifier_log *log; /* for verbose logs */
+ bool is_retval; /* is accessing function return value ? */
};
static inline void
@@ -453,11 +1047,37 @@ bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
aux->ctx_field_size = size;
}
+static bool bpf_is_ldimm64(const struct bpf_insn *insn)
+{
+ return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
+}
+
+static inline bool bpf_pseudo_func(const struct bpf_insn *insn)
+{
+ return bpf_is_ldimm64(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
+}
+
+/* Given a BPF_ATOMIC instruction @atomic_insn, return true if it is an
+ * atomic load or store, and false if it is a read-modify-write instruction.
+ */
+static inline bool
+bpf_atomic_is_load_store(const struct bpf_insn *atomic_insn)
+{
+ switch (atomic_insn->imm) {
+ case BPF_LOAD_ACQ:
+ case BPF_STORE_REL:
+ return true;
+ default:
+ return false;
+ }
+}
+
struct bpf_prog_ops {
int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr);
};
+struct bpf_reg_state;
struct bpf_verifier_ops {
/* return eBPF function prototype for verification */
const struct bpf_func_proto *
@@ -472,6 +1092,8 @@ struct bpf_verifier_ops {
struct bpf_insn_access_aux *info);
int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
const struct bpf_prog *prog);
+ int (*gen_epilogue)(struct bpf_insn *insn, const struct bpf_prog *prog,
+ s16 ctx_stack_off);
int (*gen_ld_abs)(const struct bpf_insn *orig,
struct bpf_insn *insn_buf);
u32 (*convert_ctx_access)(enum bpf_access_type type,
@@ -479,11 +1101,8 @@ struct bpf_verifier_ops {
struct bpf_insn *dst,
struct bpf_prog *prog, u32 *target_size);
int (*btf_struct_access)(struct bpf_verifier_log *log,
- const struct btf *btf,
- const struct btf_type *t, int off, int size,
- enum bpf_access_type atype,
- u32 *next_btf_id);
- bool (*check_kfunc_call)(u32 kfunc_btf_id);
+ const struct bpf_reg_state *reg,
+ int off, int size);
};
struct bpf_prog_offload_ops {
@@ -513,14 +1132,6 @@ struct bpf_prog_offload {
u32 jited_len;
};
-enum bpf_cgroup_storage_type {
- BPF_CGROUP_STORAGE_SHARED,
- BPF_CGROUP_STORAGE_PERCPU,
- __BPF_CGROUP_STORAGE_MAX
-};
-
-#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
-
/* The longest tracepoint has 12 args.
* See include/trace/bpf_probe.h
*/
@@ -531,10 +1142,18 @@ enum bpf_cgroup_storage_type {
*/
#define MAX_BPF_FUNC_REG_ARGS 5
+/* The argument is a structure or a union. */
+#define BTF_FMODEL_STRUCT_ARG BIT(0)
+
+/* The argument is signed. */
+#define BTF_FMODEL_SIGNED_ARG BIT(1)
+
struct btf_func_model {
u8 ret_size;
+ u8 ret_flags;
u8 nr_args;
u8 arg_size[MAX_BPF_FUNC_ARGS];
+ u8 arg_flags[MAX_BPF_FUNC_ARGS];
};
/* Restore arguments before returning from trampoline to let original function
@@ -550,17 +1169,57 @@ struct btf_func_model {
* programs only. Should not be used with normal calls and indirect calls.
*/
#define BPF_TRAMP_F_SKIP_FRAME BIT(2)
+/* Store IP address of the caller on the trampoline stack,
+ * so it's available for trampoline's programs.
+ */
+#define BPF_TRAMP_F_IP_ARG BIT(3)
+/* Return the return value of fentry prog. Only used by bpf_struct_ops. */
+#define BPF_TRAMP_F_RET_FENTRY_RET BIT(4)
+
+/* Get original function from stack instead of from provided direct address.
+ * Makes sense for trampolines with fexit or fmod_ret programs.
+ */
+#define BPF_TRAMP_F_ORIG_STACK BIT(5)
+
+/* This trampoline is on a function with another ftrace_ops with IPMODIFY,
+ * e.g., a live patch. This flag is set and cleared by ftrace call backs,
+ */
+#define BPF_TRAMP_F_SHARE_IPMODIFY BIT(6)
+
+/* Indicate that current trampoline is in a tail call context. Then, it has to
+ * cache and restore tail_call_cnt to avoid infinite tail call loop.
+ */
+#define BPF_TRAMP_F_TAIL_CALL_CTX BIT(7)
+
+/*
+ * Indicate the trampoline should be suitable to receive indirect calls;
+ * without this indirectly calling the generated code can result in #UD/#CP,
+ * depending on the CFI options.
+ *
+ * Used by bpf_struct_ops.
+ *
+ * Incompatible with FENTRY usage, overloads @func_addr argument.
+ */
+#define BPF_TRAMP_F_INDIRECT BIT(8)
/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
- * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2
+ * bytes on x86.
*/
-#define BPF_MAX_TRAMP_PROGS 38
+enum {
+#if defined(__s390x__)
+ BPF_MAX_TRAMP_LINKS = 27,
+#else
+ BPF_MAX_TRAMP_LINKS = 38,
+#endif
+};
-struct bpf_tramp_progs {
- struct bpf_prog *progs[BPF_MAX_TRAMP_PROGS];
- int nr_progs;
+struct bpf_tramp_links {
+ struct bpf_tramp_link *links[BPF_MAX_TRAMP_LINKS];
+ int nr_links;
};
+struct bpf_tramp_run_ctx;
+
/* Different use cases for BPF trampoline:
* 1. replace nop at the function entry (kprobe equivalent)
* flags = BPF_TRAMP_F_RESTORE_REGS
@@ -582,17 +1241,40 @@ struct bpf_tramp_progs {
* fexit = a set of program to run after original function
*/
struct bpf_tramp_image;
-int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
+int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
const struct btf_func_model *m, u32 flags,
- struct bpf_tramp_progs *tprogs,
- void *orig_call);
-/* these two functions are called from generated trampoline */
-u64 notrace __bpf_prog_enter(struct bpf_prog *prog);
-void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
-u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog);
-void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start);
+ struct bpf_tramp_links *tlinks,
+ void *func_addr);
+void *arch_alloc_bpf_trampoline(unsigned int size);
+void arch_free_bpf_trampoline(void *image, unsigned int size);
+int __must_check arch_protect_bpf_trampoline(void *image, unsigned int size);
+int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
+ struct bpf_tramp_links *tlinks, void *func_addr);
+
+u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
+ struct bpf_tramp_run_ctx *run_ctx);
+void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start,
+ struct bpf_tramp_run_ctx *run_ctx);
void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
+typedef u64 (*bpf_trampoline_enter_t)(struct bpf_prog *prog,
+ struct bpf_tramp_run_ctx *run_ctx);
+typedef void (*bpf_trampoline_exit_t)(struct bpf_prog *prog, u64 start,
+ struct bpf_tramp_run_ctx *run_ctx);
+bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog);
+bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog);
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_JMP
+static inline bool bpf_trampoline_use_jmp(u64 flags)
+{
+ return flags & BPF_TRAMP_F_CALL_ORIG && !(flags & BPF_TRAMP_F_SKIP_FRAME);
+}
+#else
+static inline bool bpf_trampoline_use_jmp(u64 flags)
+{
+ return false;
+}
+#endif
struct bpf_ksym {
unsigned long start;
@@ -613,6 +1295,7 @@ enum bpf_tramp_prog_type {
struct bpf_tramp_image {
void *image;
+ int size;
struct bpf_ksym ksym;
struct percpu_ref pcref;
void *ip_after_call;
@@ -626,9 +1309,11 @@ struct bpf_tramp_image {
struct bpf_trampoline {
/* hlist for trampoline_table */
struct hlist_node hlist;
+ struct ftrace_ops *fops;
/* serializes access to fields of this trampoline */
struct mutex mutex;
refcount_t refcnt;
+ u32 flags;
u64 key;
struct {
struct btf_func_model model;
@@ -646,13 +1331,12 @@ struct bpf_trampoline {
int progs_cnt[BPF_TRAMP_MAX];
/* Executable image of trampoline */
struct bpf_tramp_image *cur_image;
- u64 selector;
- struct module *mod;
};
struct bpf_attach_target_info {
struct btf_func_model fmodel;
long tgt_addr;
+ struct module *tgt_mod;
const char *tgt_name;
const struct btf_type *tgt_type;
};
@@ -671,24 +1355,118 @@ struct bpf_dispatcher {
struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX];
int num_progs;
void *image;
+ void *rw_image;
u32 image_off;
struct bpf_ksym ksym;
+#ifdef CONFIG_HAVE_STATIC_CALL
+ struct static_call_key *sc_key;
+ void *sc_tramp;
+#endif
};
-static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func(
+#ifndef __bpfcall
+#define __bpfcall __nocfi
+#endif
+
+static __always_inline __bpfcall unsigned int bpf_dispatcher_nop_func(
const void *ctx,
const struct bpf_insn *insnsi,
- unsigned int (*bpf_func)(const void *,
- const struct bpf_insn *))
+ bpf_func_t bpf_func)
{
return bpf_func(ctx, insnsi);
}
+
+/* the implementation of the opaque uapi struct bpf_dynptr */
+struct bpf_dynptr_kern {
+ void *data;
+ /* Size represents the number of usable bytes of dynptr data.
+ * If for example the offset is at 4 for a local dynptr whose data is
+ * of type u64, the number of usable bytes is 4.
+ *
+ * The upper 8 bits are reserved. It is as follows:
+ * Bits 0 - 23 = size
+ * Bits 24 - 30 = dynptr type
+ * Bit 31 = whether dynptr is read-only
+ */
+ u32 size;
+ u32 offset;
+} __aligned(8);
+
+enum bpf_dynptr_type {
+ BPF_DYNPTR_TYPE_INVALID,
+ /* Points to memory that is local to the bpf program */
+ BPF_DYNPTR_TYPE_LOCAL,
+ /* Underlying data is a ringbuf record */
+ BPF_DYNPTR_TYPE_RINGBUF,
+ /* Underlying data is a sk_buff */
+ BPF_DYNPTR_TYPE_SKB,
+ /* Underlying data is a xdp_buff */
+ BPF_DYNPTR_TYPE_XDP,
+ /* Points to skb_metadata_end()-skb_metadata_len() */
+ BPF_DYNPTR_TYPE_SKB_META,
+ /* Underlying data is a file */
+ BPF_DYNPTR_TYPE_FILE,
+};
+
+int bpf_dynptr_check_size(u64 size);
+u64 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr);
+const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u64 len);
+void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u64 len);
+bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr);
+int __bpf_dynptr_write(const struct bpf_dynptr_kern *dst, u64 offset,
+ void *src, u64 len, u64 flags);
+void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u64 offset,
+ void *buffer__opt, u64 buffer__szk);
+
+static inline int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u64 offset, u64 len)
+{
+ u64 size = __bpf_dynptr_size(ptr);
+
+ if (len > size || offset > size - len)
+ return -E2BIG;
+
+ return 0;
+}
+
#ifdef CONFIG_BPF_JIT
-int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr);
-int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr);
+int bpf_trampoline_link_prog(struct bpf_tramp_link *link,
+ struct bpf_trampoline *tr,
+ struct bpf_prog *tgt_prog);
+int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
+ struct bpf_trampoline *tr,
+ struct bpf_prog *tgt_prog);
struct bpf_trampoline *bpf_trampoline_get(u64 key,
struct bpf_attach_target_info *tgt_info);
void bpf_trampoline_put(struct bpf_trampoline *tr);
+int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs);
+
+/*
+ * When the architecture supports STATIC_CALL replace the bpf_dispatcher_fn
+ * indirection with a direct call to the bpf program. If the architecture does
+ * not have STATIC_CALL, avoid a double-indirection.
+ */
+#ifdef CONFIG_HAVE_STATIC_CALL
+
+#define __BPF_DISPATCHER_SC_INIT(_name) \
+ .sc_key = &STATIC_CALL_KEY(_name), \
+ .sc_tramp = STATIC_CALL_TRAMP_ADDR(_name),
+
+#define __BPF_DISPATCHER_SC(name) \
+ DEFINE_STATIC_CALL(bpf_dispatcher_##name##_call, bpf_dispatcher_nop_func)
+
+#define __BPF_DISPATCHER_CALL(name) \
+ static_call(bpf_dispatcher_##name##_call)(ctx, insnsi, bpf_func)
+
+#define __BPF_DISPATCHER_UPDATE(_d, _new) \
+ __static_call_update((_d)->sc_key, (_d)->sc_tramp, (_new))
+
+#else
+#define __BPF_DISPATCHER_SC_INIT(name)
+#define __BPF_DISPATCHER_SC(name)
+#define __BPF_DISPATCHER_CALL(name) bpf_func(ctx, insnsi)
+#define __BPF_DISPATCHER_UPDATE(_d, _new)
+#endif
+
#define BPF_DISPATCHER_INIT(_name) { \
.mutex = __MUTEX_INITIALIZER(_name.mutex), \
.func = &_name##_func, \
@@ -700,54 +1478,59 @@ void bpf_trampoline_put(struct bpf_trampoline *tr);
.name = #_name, \
.lnode = LIST_HEAD_INIT(_name.ksym.lnode), \
}, \
+ __BPF_DISPATCHER_SC_INIT(_name##_call) \
}
#define DEFINE_BPF_DISPATCHER(name) \
- noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \
+ __BPF_DISPATCHER_SC(name); \
+ noinline __bpfcall unsigned int bpf_dispatcher_##name##_func( \
const void *ctx, \
const struct bpf_insn *insnsi, \
- unsigned int (*bpf_func)(const void *, \
- const struct bpf_insn *)) \
+ bpf_func_t bpf_func) \
{ \
- return bpf_func(ctx, insnsi); \
+ return __BPF_DISPATCHER_CALL(name); \
} \
EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \
struct bpf_dispatcher bpf_dispatcher_##name = \
BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
+
#define DECLARE_BPF_DISPATCHER(name) \
unsigned int bpf_dispatcher_##name##_func( \
const void *ctx, \
const struct bpf_insn *insnsi, \
- unsigned int (*bpf_func)(const void *, \
- const struct bpf_insn *)); \
+ bpf_func_t bpf_func); \
extern struct bpf_dispatcher bpf_dispatcher_##name;
+
#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
#define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
struct bpf_prog *to);
/* Called only from JIT-enabled code, so there's no need for stubs. */
-void *bpf_jit_alloc_exec_page(void);
-void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
+void bpf_image_ksym_init(void *data, unsigned int size, struct bpf_ksym *ksym);
+void bpf_image_ksym_add(struct bpf_ksym *ksym);
void bpf_image_ksym_del(struct bpf_ksym *ksym);
void bpf_ksym_add(struct bpf_ksym *ksym);
void bpf_ksym_del(struct bpf_ksym *ksym);
-int bpf_jit_charge_modmem(u32 pages);
-void bpf_jit_uncharge_modmem(u32 pages);
+int bpf_jit_charge_modmem(u32 size);
+void bpf_jit_uncharge_modmem(u32 size);
+bool bpf_prog_has_trampoline(const struct bpf_prog *prog);
#else
-static inline int bpf_trampoline_link_prog(struct bpf_prog *prog,
- struct bpf_trampoline *tr)
+static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link,
+ struct bpf_trampoline *tr,
+ struct bpf_prog *tgt_prog)
{
return -ENOTSUPP;
}
-static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog,
- struct bpf_trampoline *tr)
+static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
+ struct bpf_trampoline *tr,
+ struct bpf_prog *tgt_prog)
{
return -ENOTSUPP;
}
static inline struct bpf_trampoline *bpf_trampoline_get(u64 key,
struct bpf_attach_target_info *tgt_info)
{
- return ERR_PTR(-EOPNOTSUPP);
+ return NULL;
}
static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
#define DEFINE_BPF_DISPATCHER(name)
@@ -761,11 +1544,17 @@ static inline bool is_bpf_image_address(unsigned long address)
{
return false;
}
+static inline bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
+{
+ return false;
+}
#endif
struct bpf_func_info_aux {
u16 linkage;
bool unreliable;
+ bool called : 1;
+ bool verified : 1;
};
enum bpf_jit_poke_reason {
@@ -777,6 +1566,7 @@ struct bpf_jit_poke_descriptor {
void *tailcall_target;
void *tailcall_bypass;
void *bypass_addr;
+ void *aux;
union {
struct {
struct bpf_map *map;
@@ -793,7 +1583,10 @@ struct bpf_jit_poke_descriptor {
struct bpf_ctx_arg_aux {
u32 offset;
enum bpf_reg_type reg_type;
+ struct btf *btf;
u32 btf_id;
+ u32 ref_obj_id;
+ bool refcounted;
};
struct btf_mod_pair {
@@ -803,6 +1596,37 @@ struct btf_mod_pair {
struct bpf_kfunc_desc_tab;
+enum bpf_stream_id {
+ BPF_STDOUT = 1,
+ BPF_STDERR = 2,
+};
+
+struct bpf_stream_elem {
+ struct llist_node node;
+ int total_len;
+ int consumed_len;
+ char str[];
+};
+
+enum {
+ /* 100k bytes */
+ BPF_STREAM_MAX_CAPACITY = 100000ULL,
+};
+
+struct bpf_stream {
+ atomic_t capacity;
+ struct llist_head log; /* list of in-flight stream elements in LIFO order */
+
+ struct mutex lock; /* lock protecting backlog_{head,tail} */
+ struct llist_node *backlog_head; /* list of in-flight stream elements in FIFO order */
+ struct llist_node *backlog_tail; /* tail of the list above */
+};
+
+struct bpf_stream_stage {
+ struct llist_head log;
+ int len;
+};
+
struct bpf_prog_aux {
atomic64_t refcnt;
u32 used_map_cnt;
@@ -813,47 +1637,74 @@ struct bpf_prog_aux {
u32 stack_depth;
u32 id;
u32 func_cnt; /* used by non-func prog as the number of func progs */
+ u32 real_func_cnt; /* includes hidden progs, only used for JIT and freeing progs */
u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
u32 attach_btf_id; /* in-kernel BTF type id to attach to */
+ u32 attach_st_ops_member_off;
u32 ctx_arg_info_size;
u32 max_rdonly_access;
u32 max_rdwr_access;
+ u32 subprog_start;
struct btf *attach_btf;
- const struct bpf_ctx_arg_aux *ctx_arg_info;
+ struct bpf_ctx_arg_aux *ctx_arg_info;
+ void __percpu *priv_stack_ptr;
struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */
struct bpf_prog *dst_prog;
struct bpf_trampoline *dst_trampoline;
enum bpf_prog_type saved_dst_prog_type;
enum bpf_attach_type saved_dst_attach_type;
bool verifier_zext; /* Zero extensions has been inserted by verifier. */
- bool offload_requested;
+ bool dev_bound; /* Program is bound to the netdev. */
+ bool offload_requested; /* Program is bound and offloaded to the netdev. */
bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
+ bool attach_tracing_prog; /* true if tracing another tracing program */
bool func_proto_unreliable;
- bool sleepable;
bool tail_call_reachable;
- struct hlist_node tramp_hlist;
+ bool xdp_has_frags;
+ bool exception_cb;
+ bool exception_boundary;
+ bool is_extended; /* true if extended by freplace program */
+ bool jits_use_priv_stack;
+ bool priv_stack_requested;
+ bool changes_pkt_data;
+ bool might_sleep;
+ bool kprobe_write_ctx;
+ u64 prog_array_member_cnt; /* counts how many times as member of prog_array */
+ struct mutex ext_mutex; /* mutex for is_extended and prog_array_member_cnt */
+ struct bpf_arena *arena;
+ void (*recursion_detected)(struct bpf_prog *prog); /* callback if recursion is detected */
/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
const struct btf_type *attach_func_proto;
/* function name for valid attach_btf_id */
const char *attach_func_name;
struct bpf_prog **func;
+ struct bpf_prog_aux *main_prog_aux;
void *jit_data; /* JIT specific data. arch dependent */
struct bpf_jit_poke_descriptor *poke_tab;
struct bpf_kfunc_desc_tab *kfunc_tab;
+ struct bpf_kfunc_btf_tab *kfunc_btf_tab;
u32 size_poke_tab;
+#ifdef CONFIG_FINEIBT
+ struct bpf_ksym ksym_prefix;
+#endif
struct bpf_ksym ksym;
const struct bpf_prog_ops *ops;
+ const struct bpf_struct_ops *st_ops;
struct bpf_map **used_maps;
struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */
struct btf_mod_pair *used_btfs;
struct bpf_prog *prog;
struct user_struct *user;
u64 load_time; /* ns since boottime */
+ u32 verified_insns;
+ int cgroup_atype; /* enum cgroup_bpf_attach_type */
struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
char name[BPF_OBJ_NAME_LEN];
+ u64 (*bpf_exception_cb)(u64 cookie, u64 sp, u64 bp, u64, u64);
#ifdef CONFIG_SECURITY
void *security;
#endif
+ struct bpf_token *token;
struct bpf_prog_offload *offload;
struct btf *btf;
struct bpf_func_info *func_info;
@@ -880,22 +1731,55 @@ struct bpf_prog_aux {
* main prog always has linfo_idx == 0
*/
u32 linfo_idx;
+ struct module *mod;
u32 num_exentries;
struct exception_table_entry *extable;
union {
struct work_struct work;
struct rcu_head rcu;
};
+ struct bpf_stream stream[2];
+};
+
+struct bpf_prog {
+ u16 pages; /* Number of allocated pages */
+ u16 jited:1, /* Is our filter JIT'ed? */
+ jit_requested:1,/* archs need to JIT the prog */
+ gpl_compatible:1, /* Is filter GPL compatible? */
+ cb_access:1, /* Is control block accessed? */
+ dst_needed:1, /* Do we need dst entry? */
+ blinding_requested:1, /* needs constant blinding */
+ blinded:1, /* Was blinded */
+ is_func:1, /* program is a bpf function */
+ kprobe_override:1, /* Do we override a kprobe? */
+ has_callchain_buf:1, /* callchain buffer allocated? */
+ enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
+ call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */
+ call_get_func_ip:1, /* Do we call get_func_ip() */
+ tstamp_type_access:1, /* Accessed __sk_buff->tstamp_type */
+ sleepable:1; /* BPF program is sleepable */
+ enum bpf_prog_type type; /* Type of BPF program */
+ enum bpf_attach_type expected_attach_type; /* For some prog types */
+ u32 len; /* Number of filter blocks */
+ u32 jited_len; /* Size of jited insns in bytes */
+ union {
+ u8 digest[SHA256_DIGEST_SIZE];
+ u8 tag[BPF_TAG_SIZE];
+ };
+ struct bpf_prog_stats __percpu *stats;
+ int __percpu *active;
+ unsigned int (*bpf_func)(const void *ctx,
+ const struct bpf_insn *insn);
+ struct bpf_prog_aux *aux; /* Auxiliary fields */
+ struct sock_fprog_kern *orig_prog; /* Original BPF program */
+ /* Instructions for interpreter */
+ union {
+ DECLARE_FLEX_ARRAY(struct sock_filter, insns);
+ DECLARE_FLEX_ARRAY(struct bpf_insn, insnsi);
+ };
};
struct bpf_array_aux {
- /* 'Ownership' of prog array is claimed by the first program that
- * is going to use this map or by the first program which FD is
- * stored in the map to make sure that all callers and callees have
- * the same prog type and JITed flag.
- */
- enum bpf_prog_type type;
- bool jited;
/* Programs with direct jumps into programs part of this array. */
struct list_head poke_progs;
struct bpf_map *map;
@@ -909,18 +1793,69 @@ struct bpf_link {
enum bpf_link_type type;
const struct bpf_link_ops *ops;
struct bpf_prog *prog;
- struct work_struct work;
+
+ u32 flags;
+ enum bpf_attach_type attach_type;
+
+ /* rcu is used before freeing, work can be used to schedule that
+ * RCU-based freeing before that, so they never overlap
+ */
+ union {
+ struct rcu_head rcu;
+ struct work_struct work;
+ };
+ /* whether BPF link itself has "sleepable" semantics, which can differ
+ * from underlying BPF program having a "sleepable" semantics, as BPF
+ * link's semantics is determined by target attach hook
+ */
+ bool sleepable;
};
struct bpf_link_ops {
void (*release)(struct bpf_link *link);
+ /* deallocate link resources callback, called without RCU grace period
+ * waiting
+ */
void (*dealloc)(struct bpf_link *link);
+ /* deallocate link resources callback, called after RCU grace period;
+ * if either the underlying BPF program is sleepable or BPF link's
+ * target hook is sleepable, we'll go through tasks trace RCU GP and
+ * then "classic" RCU GP; this need for chaining tasks trace and
+ * classic RCU GPs is designated by setting bpf_link->sleepable flag
+ */
+ void (*dealloc_deferred)(struct bpf_link *link);
int (*detach)(struct bpf_link *link);
int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
struct bpf_prog *old_prog);
void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq);
int (*fill_link_info)(const struct bpf_link *link,
struct bpf_link_info *info);
+ int (*update_map)(struct bpf_link *link, struct bpf_map *new_map,
+ struct bpf_map *old_map);
+ __poll_t (*poll)(struct file *file, struct poll_table_struct *pts);
+};
+
+struct bpf_tramp_link {
+ struct bpf_link link;
+ struct hlist_node tramp_hlist;
+ u64 cookie;
+};
+
+struct bpf_shim_tramp_link {
+ struct bpf_tramp_link link;
+ struct bpf_trampoline *trampoline;
+};
+
+struct bpf_tracing_link {
+ struct bpf_tramp_link link;
+ struct bpf_trampoline *trampoline;
+ struct bpf_prog *tgt_prog;
+};
+
+struct bpf_raw_tp_link {
+ struct bpf_link link;
+ struct bpf_raw_event_map *btp;
+ u64 cookie;
};
struct bpf_link_primer {
@@ -930,36 +1865,167 @@ struct bpf_link_primer {
u32 id;
};
+struct bpf_mount_opts {
+ kuid_t uid;
+ kgid_t gid;
+ umode_t mode;
+
+ /* BPF token-related delegation options */
+ u64 delegate_cmds;
+ u64 delegate_maps;
+ u64 delegate_progs;
+ u64 delegate_attachs;
+};
+
+struct bpf_token {
+ struct work_struct work;
+ atomic64_t refcnt;
+ struct user_namespace *userns;
+ u64 allowed_cmds;
+ u64 allowed_maps;
+ u64 allowed_progs;
+ u64 allowed_attachs;
+#ifdef CONFIG_SECURITY
+ void *security;
+#endif
+};
+
struct bpf_struct_ops_value;
struct btf_member;
#define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
+/**
+ * struct bpf_struct_ops - A structure of callbacks allowing a subsystem to
+ * define a BPF_MAP_TYPE_STRUCT_OPS map type composed
+ * of BPF_PROG_TYPE_STRUCT_OPS progs.
+ * @verifier_ops: A structure of callbacks that are invoked by the verifier
+ * when determining whether the struct_ops progs in the
+ * struct_ops map are valid.
+ * @init: A callback that is invoked a single time, and before any other
+ * callback, to initialize the structure. A nonzero return value means
+ * the subsystem could not be initialized.
+ * @check_member: When defined, a callback invoked by the verifier to allow
+ * the subsystem to determine if an entry in the struct_ops map
+ * is valid. A nonzero return value means that the map is
+ * invalid and should be rejected by the verifier.
+ * @init_member: A callback that is invoked for each member of the struct_ops
+ * map to allow the subsystem to initialize the member. A nonzero
+ * value means the member could not be initialized. This callback
+ * is exclusive with the @type, @type_id, @value_type, and
+ * @value_id fields.
+ * @reg: A callback that is invoked when the struct_ops map has been
+ * initialized and is being attached to. Zero means the struct_ops map
+ * has been successfully registered and is live. A nonzero return value
+ * means the struct_ops map could not be registered.
+ * @unreg: A callback that is invoked when the struct_ops map should be
+ * unregistered.
+ * @update: A callback that is invoked when the live struct_ops map is being
+ * updated to contain new values. This callback is only invoked when
+ * the struct_ops map is loaded with BPF_F_LINK. If not defined, the
+ * it is assumed that the struct_ops map cannot be updated.
+ * @validate: A callback that is invoked after all of the members have been
+ * initialized. This callback should perform static checks on the
+ * map, meaning that it should either fail or succeed
+ * deterministically. A struct_ops map that has been validated may
+ * not necessarily succeed in being registered if the call to @reg
+ * fails. For example, a valid struct_ops map may be loaded, but
+ * then fail to be registered due to there being another active
+ * struct_ops map on the system in the subsystem already. For this
+ * reason, if this callback is not defined, the check is skipped as
+ * the struct_ops map will have final verification performed in
+ * @reg.
+ * @cfi_stubs: Pointer to a structure of stub functions for CFI. These stubs
+ * provide the correct Control Flow Integrity hashes for the
+ * trampolines generated by BPF struct_ops.
+ * @owner: The module that owns this struct_ops. Used for module reference
+ * counting to ensure the module providing the struct_ops cannot be
+ * unloaded while in use.
+ * @name: The name of the struct bpf_struct_ops object.
+ * @func_models: Func models
+ */
struct bpf_struct_ops {
const struct bpf_verifier_ops *verifier_ops;
int (*init)(struct btf *btf);
int (*check_member)(const struct btf_type *t,
- const struct btf_member *member);
+ const struct btf_member *member,
+ const struct bpf_prog *prog);
int (*init_member)(const struct btf_type *t,
const struct btf_member *member,
void *kdata, const void *udata);
- int (*reg)(void *kdata);
- void (*unreg)(void *kdata);
- const struct btf_type *type;
- const struct btf_type *value_type;
+ int (*reg)(void *kdata, struct bpf_link *link);
+ void (*unreg)(void *kdata, struct bpf_link *link);
+ int (*update)(void *kdata, void *old_kdata, struct bpf_link *link);
+ int (*validate)(void *kdata);
+ void *cfi_stubs;
+ struct module *owner;
const char *name;
struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
+};
+
+/* Every member of a struct_ops type has an instance even a member is not
+ * an operator (function pointer). The "info" field will be assigned to
+ * prog->aux->ctx_arg_info of BPF struct_ops programs to provide the
+ * argument information required by the verifier to verify the program.
+ *
+ * btf_ctx_access() will lookup prog->aux->ctx_arg_info to find the
+ * corresponding entry for an given argument.
+ */
+struct bpf_struct_ops_arg_info {
+ struct bpf_ctx_arg_aux *info;
+ u32 cnt;
+};
+
+struct bpf_struct_ops_desc {
+ struct bpf_struct_ops *st_ops;
+
+ const struct btf_type *type;
+ const struct btf_type *value_type;
u32 type_id;
u32 value_id;
+
+ /* Collection of argument information for each member */
+ struct bpf_struct_ops_arg_info *arg_info;
+};
+
+enum bpf_struct_ops_state {
+ BPF_STRUCT_OPS_STATE_INIT,
+ BPF_STRUCT_OPS_STATE_INUSE,
+ BPF_STRUCT_OPS_STATE_TOBEFREE,
+ BPF_STRUCT_OPS_STATE_READY,
+};
+
+struct bpf_struct_ops_common_value {
+ refcount_t refcnt;
+ enum bpf_struct_ops_state state;
};
#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
+/* This macro helps developer to register a struct_ops type and generate
+ * type information correctly. Developers should use this macro to register
+ * a struct_ops type instead of calling __register_bpf_struct_ops() directly.
+ */
+#define register_bpf_struct_ops(st_ops, type) \
+ ({ \
+ struct bpf_struct_ops_##type { \
+ struct bpf_struct_ops_common_value common; \
+ struct type data ____cacheline_aligned_in_smp; \
+ }; \
+ BTF_TYPE_EMIT(struct bpf_struct_ops_##type); \
+ __register_bpf_struct_ops(st_ops); \
+ })
#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
-const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id);
-void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log);
bool bpf_struct_ops_get(const void *kdata);
void bpf_struct_ops_put(const void *kdata);
+int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff);
int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
void *value);
+int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
+ struct bpf_tramp_link *link,
+ const struct btf_func_model *model,
+ void *stub_func,
+ void **image, u32 *image_off,
+ bool allow_alloc);
+void bpf_struct_ops_image_free(void *image);
static inline bool bpf_try_module_get(const void *data, struct module *owner)
{
if (owner == BPF_MODULE_OWNER)
@@ -974,15 +2040,32 @@ static inline void bpf_module_put(const void *data, struct module *owner)
else
module_put(owner);
}
+int bpf_struct_ops_link_create(union bpf_attr *attr);
+u32 bpf_struct_ops_id(const void *kdata);
+
+#ifdef CONFIG_NET
+/* Define it here to avoid the use of forward declaration */
+struct bpf_dummy_ops_state {
+ int val;
+};
+
+struct bpf_dummy_ops {
+ int (*test_1)(struct bpf_dummy_ops_state *cb);
+ int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
+ char a3, unsigned long a4);
+ int (*test_sleepable)(struct bpf_dummy_ops_state *cb);
+};
+
+int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+#endif
+int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
+ struct btf *btf,
+ struct bpf_verifier_log *log);
+void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map);
+void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc);
#else
-static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
-{
- return NULL;
-}
-static inline void bpf_struct_ops_init(struct btf *btf,
- struct bpf_verifier_log *log)
-{
-}
+#define register_bpf_struct_ops(st_ops, type) ({ (void *)(st_ops); 0; })
static inline bool bpf_try_module_get(const void *data, struct module *owner)
{
return try_module_get(owner);
@@ -991,12 +2074,48 @@ static inline void bpf_module_put(const void *data, struct module *owner)
{
module_put(owner);
}
+static inline int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff)
+{
+ return -ENOTSUPP;
+}
static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
void *key,
void *value)
{
return -EINVAL;
}
+static inline int bpf_struct_ops_link_create(union bpf_attr *attr)
+{
+ return -EOPNOTSUPP;
+}
+static inline void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map)
+{
+}
+
+static inline void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc)
+{
+}
+
+#endif
+
+int bpf_prog_ctx_arg_info_init(struct bpf_prog *prog,
+ const struct bpf_ctx_arg_aux *info, u32 cnt);
+
+#if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM)
+int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
+ int cgroup_atype,
+ enum bpf_attach_type attach_type);
+void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog);
+#else
+static inline int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
+ int cgroup_atype,
+ enum bpf_attach_type attach_type)
+{
+ return -EOPNOTSUPP;
+}
+static inline void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog)
+{
+}
#endif
struct bpf_array {
@@ -1005,14 +2124,28 @@ struct bpf_array {
u32 index_mask;
struct bpf_array_aux *aux;
union {
- char value[0] __aligned(8);
- void *ptrs[0] __aligned(8);
- void __percpu *pptrs[0] __aligned(8);
+ DECLARE_FLEX_ARRAY(char, value) __aligned(8);
+ DECLARE_FLEX_ARRAY(void *, ptrs) __aligned(8);
+ DECLARE_FLEX_ARRAY(void __percpu *, pptrs) __aligned(8);
};
};
+/*
+ * The bpf_array_get_next_key() function may be used for all array-like
+ * maps, i.e., maps with u32 keys with range [0 ,..., max_entries)
+ */
+int bpf_array_get_next_key(struct bpf_map *map, void *key, void *next_key);
+
#define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */
-#define MAX_TAIL_CALL_CNT 32
+#define MAX_TAIL_CALL_CNT 33
+
+/* Maximum number of loops for bpf_loop and bpf_iter_num.
+ * It's enum to expose it (and thus make it discoverable) through BTF.
+ */
+enum {
+ BPF_MAX_LOOPS = 8 * 1024 * 1024,
+ BPF_MAX_TIMED_LOOPS = 0xffff,
+};
#define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \
BPF_F_RDONLY_PROG | \
@@ -1022,6 +2155,11 @@ struct bpf_array {
#define BPF_MAP_CAN_READ BIT(0)
#define BPF_MAP_CAN_WRITE BIT(1)
+/* Maximum number of user-producer ring buffer samples that can be drained in
+ * a call to bpf_user_ringbuf_drain().
+ */
+#define BPF_MAX_USER_RINGBUF_SAMPLES (128 * 1024)
+
static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
{
u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
@@ -1043,6 +2181,16 @@ static inline bool bpf_map_flags_access_ok(u32 access_flags)
(BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
}
+static inline struct bpf_map_owner *bpf_map_owner_alloc(struct bpf_map *map)
+{
+ return kzalloc(sizeof(*map->owner), GFP_ATOMIC);
+}
+
+static inline void bpf_map_owner_free(struct bpf_map *map)
+{
+ kfree(map->owner);
+}
+
struct bpf_event_entry {
struct perf_event *event;
struct file *perf_file;
@@ -1050,10 +2198,20 @@ struct bpf_event_entry {
struct rcu_head rcu;
};
-bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
+static inline bool map_type_contains_progs(struct bpf_map *map)
+{
+ return map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
+ map->map_type == BPF_MAP_TYPE_DEVMAP ||
+ map->map_type == BPF_MAP_TYPE_CPUMAP;
+}
+
+bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp);
int bpf_prog_calc_tag(struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
+const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void);
+
+const struct bpf_func_proto *bpf_get_perf_event_read_value_proto(void);
typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
unsigned long off, unsigned long len);
@@ -1069,7 +2227,7 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
/* an array of programs to be executed under rcu_lock.
*
* Typical usage:
- * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN);
+ * ret = bpf_prog_run_array(rcu_dereference(&bpf_prog_array), ctx, bpf_prog_run);
*
* the structure returned by bpf_prog_array_alloc() should be populated
* with program pointers and the last pointer must be NULL.
@@ -1080,7 +2238,10 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
*/
struct bpf_prog_array_item {
struct bpf_prog *prog;
- struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
+ union {
+ struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
+ u64 bpf_cookie;
+ };
};
struct bpf_prog_array {
@@ -1088,8 +2249,23 @@ struct bpf_prog_array {
struct bpf_prog_array_item items[];
};
+struct bpf_empty_prog_array {
+ struct bpf_prog_array hdr;
+ struct bpf_prog *null_prog;
+};
+
+/* to avoid allocating empty bpf_prog_array for cgroups that
+ * don't have bpf program attached use one global 'bpf_empty_prog_array'
+ * It will not be modified the caller of bpf_prog_array_alloc()
+ * (since caller requested prog_cnt == 0)
+ * that pointer should be 'freed' by bpf_prog_array_free()
+ */
+extern struct bpf_empty_prog_array bpf_empty_prog_array;
+
struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
void bpf_prog_array_free(struct bpf_prog_array *progs);
+/* Use when traversal over the bpf_prog_array uses tasks_trace rcu */
+void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs);
int bpf_prog_array_length(struct bpf_prog_array *progs);
bool bpf_prog_array_is_empty(struct bpf_prog_array *array);
int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
@@ -1106,115 +2282,137 @@ int bpf_prog_array_copy_info(struct bpf_prog_array *array,
int bpf_prog_array_copy(struct bpf_prog_array *old_array,
struct bpf_prog *exclude_prog,
struct bpf_prog *include_prog,
+ u64 bpf_cookie,
struct bpf_prog_array **new_array);
+struct bpf_run_ctx {};
+
+struct bpf_cg_run_ctx {
+ struct bpf_run_ctx run_ctx;
+ const struct bpf_prog_array_item *prog_item;
+ int retval;
+};
+
+struct bpf_trace_run_ctx {
+ struct bpf_run_ctx run_ctx;
+ u64 bpf_cookie;
+ bool is_uprobe;
+};
+
+struct bpf_tramp_run_ctx {
+ struct bpf_run_ctx run_ctx;
+ u64 bpf_cookie;
+ struct bpf_run_ctx *saved_run_ctx;
+};
+
+static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx)
+{
+ struct bpf_run_ctx *old_ctx = NULL;
+
+#ifdef CONFIG_BPF_SYSCALL
+ old_ctx = current->bpf_ctx;
+ current->bpf_ctx = new_ctx;
+#endif
+ return old_ctx;
+}
+
+static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx)
+{
+#ifdef CONFIG_BPF_SYSCALL
+ current->bpf_ctx = old_ctx;
+#endif
+}
+
/* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */
#define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0)
/* BPF program asks to set CN on the packet. */
#define BPF_RET_SET_CN (1 << 0)
-/* For BPF_PROG_RUN_ARRAY_FLAGS and __BPF_PROG_RUN_ARRAY,
- * if bpf_cgroup_storage_set() failed, the rest of programs
- * will not execute. This should be a really rare scenario
- * as it requires BPF_CGROUP_STORAGE_NEST_MAX number of
- * preemptions all between bpf_cgroup_storage_set() and
- * bpf_cgroup_storage_unset() on the same cpu.
- */
-#define BPF_PROG_RUN_ARRAY_FLAGS(array, ctx, func, ret_flags) \
- ({ \
- struct bpf_prog_array_item *_item; \
- struct bpf_prog *_prog; \
- struct bpf_prog_array *_array; \
- u32 _ret = 1; \
- u32 func_ret; \
- migrate_disable(); \
- rcu_read_lock(); \
- _array = rcu_dereference(array); \
- _item = &_array->items[0]; \
- while ((_prog = READ_ONCE(_item->prog))) { \
- if (unlikely(bpf_cgroup_storage_set(_item->cgroup_storage))) \
- break; \
- func_ret = func(_prog, ctx); \
- _ret &= (func_ret & 1); \
- *(ret_flags) |= (func_ret >> 1); \
- bpf_cgroup_storage_unset(); \
- _item++; \
- } \
- rcu_read_unlock(); \
- migrate_enable(); \
- _ret; \
- })
-
-#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null, set_cg_storage) \
- ({ \
- struct bpf_prog_array_item *_item; \
- struct bpf_prog *_prog; \
- struct bpf_prog_array *_array; \
- u32 _ret = 1; \
- migrate_disable(); \
- rcu_read_lock(); \
- _array = rcu_dereference(array); \
- if (unlikely(check_non_null && !_array))\
- goto _out; \
- _item = &_array->items[0]; \
- while ((_prog = READ_ONCE(_item->prog))) { \
- if (!set_cg_storage) { \
- _ret &= func(_prog, ctx); \
- } else { \
- if (unlikely(bpf_cgroup_storage_set(_item->cgroup_storage))) \
- break; \
- _ret &= func(_prog, ctx); \
- bpf_cgroup_storage_unset(); \
- } \
- _item++; \
- } \
-_out: \
- rcu_read_unlock(); \
- migrate_enable(); \
- _ret; \
- })
-
-/* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs
- * so BPF programs can request cwr for TCP packets.
- *
- * Current cgroup skb programs can only return 0 or 1 (0 to drop the
- * packet. This macro changes the behavior so the low order bit
- * indicates whether the packet should be dropped (0) or not (1)
- * and the next bit is a congestion notification bit. This could be
- * used by TCP to call tcp_enter_cwr()
+typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx);
+
+static __always_inline u32
+bpf_prog_run_array(const struct bpf_prog_array *array,
+ const void *ctx, bpf_prog_run_fn run_prog)
+{
+ const struct bpf_prog_array_item *item;
+ const struct bpf_prog *prog;
+ struct bpf_run_ctx *old_run_ctx;
+ struct bpf_trace_run_ctx run_ctx;
+ u32 ret = 1;
+
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held");
+
+ if (unlikely(!array))
+ return ret;
+
+ run_ctx.is_uprobe = false;
+
+ migrate_disable();
+ old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
+ item = &array->items[0];
+ while ((prog = READ_ONCE(item->prog))) {
+ run_ctx.bpf_cookie = item->bpf_cookie;
+ ret &= run_prog(prog, ctx);
+ item++;
+ }
+ bpf_reset_run_ctx(old_run_ctx);
+ migrate_enable();
+ return ret;
+}
+
+/* Notes on RCU design for bpf_prog_arrays containing sleepable programs:
*
- * Hence, new allowed return values of CGROUP EGRESS BPF programs are:
- * 0: drop packet
- * 1: keep packet
- * 2: drop packet and cn
- * 3: keep packet and cn
+ * We use the tasks_trace rcu flavor read section to protect the bpf_prog_array
+ * overall. As a result, we must use the bpf_prog_array_free_sleepable
+ * in order to use the tasks_trace rcu grace period.
*
- * This macro then converts it to one of the NET_XMIT or an error
- * code that is then interpreted as drop packet (and no cn):
- * 0: NET_XMIT_SUCCESS skb should be transmitted
- * 1: NET_XMIT_DROP skb should be dropped and cn
- * 2: NET_XMIT_CN skb should be transmitted and cn
- * 3: -EPERM skb should be dropped
+ * When a non-sleepable program is inside the array, we take the rcu read
+ * section and disable preemption for that program alone, so it can access
+ * rcu-protected dynamically sized maps.
*/
-#define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \
- ({ \
- u32 _flags = 0; \
- bool _cn; \
- u32 _ret; \
- _ret = BPF_PROG_RUN_ARRAY_FLAGS(array, ctx, func, &_flags); \
- _cn = _flags & BPF_RET_SET_CN; \
- if (_ret) \
- _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \
- else \
- _ret = (_cn ? NET_XMIT_DROP : -EPERM); \
- _ret; \
- })
+static __always_inline u32
+bpf_prog_run_array_uprobe(const struct bpf_prog_array *array,
+ const void *ctx, bpf_prog_run_fn run_prog)
+{
+ const struct bpf_prog_array_item *item;
+ const struct bpf_prog *prog;
+ struct bpf_run_ctx *old_run_ctx;
+ struct bpf_trace_run_ctx run_ctx;
+ u32 ret = 1;
+
+ might_fault();
+ RCU_LOCKDEP_WARN(!rcu_read_lock_trace_held(), "no rcu lock held");
-#define BPF_PROG_RUN_ARRAY(array, ctx, func) \
- __BPF_PROG_RUN_ARRAY(array, ctx, func, false, true)
+ if (unlikely(!array))
+ return ret;
-#define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \
- __BPF_PROG_RUN_ARRAY(array, ctx, func, true, false)
+ migrate_disable();
+
+ run_ctx.is_uprobe = true;
+
+ old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
+ item = &array->items[0];
+ while ((prog = READ_ONCE(item->prog))) {
+ if (!prog->sleepable)
+ rcu_read_lock();
+
+ run_ctx.bpf_cookie = item->bpf_cookie;
+ ret &= run_prog(prog, ctx);
+ item++;
+
+ if (!prog->sleepable)
+ rcu_read_unlock();
+ }
+ bpf_reset_run_ctx(old_run_ctx);
+ migrate_enable();
+ return ret;
+}
+
+bool bpf_jit_bypass_spec_v1(void);
+bool bpf_jit_bypass_spec_v4(void);
+
+#define bpf_rcu_lock_held() \
+ (rcu_read_lock_held() || rcu_read_lock_trace_held() || rcu_read_lock_bh_held())
#ifdef CONFIG_BPF_SYSCALL
DECLARE_PER_CPU(int, bpf_prog_active);
@@ -1225,34 +2423,24 @@ extern struct mutex bpf_stats_enabled_mutex;
* kprobes, tracepoints) to prevent deadlocks on map operations as any of
* these events can happen inside a region which holds a map bucket lock
* and can deadlock on it.
- *
- * Use the preemption safe inc/dec variants on RT because migrate disable
- * is preemptible on RT and preemption in the middle of the RMW operation
- * might lead to inconsistent state. Use the raw variants for non RT
- * kernels as migrate_disable() maps to preempt_disable() so the slightly
- * more expensive save operation can be avoided.
*/
static inline void bpf_disable_instrumentation(void)
{
migrate_disable();
- if (IS_ENABLED(CONFIG_PREEMPT_RT))
- this_cpu_inc(bpf_prog_active);
- else
- __this_cpu_inc(bpf_prog_active);
+ this_cpu_inc(bpf_prog_active);
}
static inline void bpf_enable_instrumentation(void)
{
- if (IS_ENABLED(CONFIG_PREEMPT_RT))
- this_cpu_dec(bpf_prog_active);
- else
- __this_cpu_dec(bpf_prog_active);
+ this_cpu_dec(bpf_prog_active);
migrate_enable();
}
+extern const struct super_operations bpf_super_ops;
extern const struct file_operations bpf_map_fops;
extern const struct file_operations bpf_prog_fops;
extern const struct file_operations bpf_iter_fops;
+extern const struct file_operations bpf_token_fops;
#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
extern const struct bpf_prog_ops _name ## _prog_ops; \
@@ -1278,25 +2466,65 @@ void bpf_prog_inc(struct bpf_prog *prog);
struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
void bpf_prog_put(struct bpf_prog *prog);
-void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
-void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
+void bpf_prog_free_id(struct bpf_prog *prog);
+void bpf_map_free_id(struct bpf_map *map);
+
+struct btf_field *btf_record_find(const struct btf_record *rec,
+ u32 offset, u32 field_mask);
+void btf_record_free(struct btf_record *rec);
+void bpf_map_free_record(struct bpf_map *map);
+struct btf_record *btf_record_dup(const struct btf_record *rec);
+bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b);
+void bpf_obj_free_timer(const struct btf_record *rec, void *obj);
+void bpf_obj_free_workqueue(const struct btf_record *rec, void *obj);
+void bpf_obj_free_task_work(const struct btf_record *rec, void *obj);
+void bpf_obj_free_fields(const struct btf_record *rec, void *obj);
+void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu);
struct bpf_map *bpf_map_get(u32 ufd);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
-struct bpf_map *__bpf_map_get(struct fd f);
+
+/*
+ * The __bpf_map_get() and __btf_get_by_fd() functions parse a file
+ * descriptor and return a corresponding map or btf object.
+ * Their names are double underscored to emphasize the fact that they
+ * do not increase refcnt. To also increase refcnt use corresponding
+ * bpf_map_get() and btf_get_by_fd() functions.
+ */
+
+static inline struct bpf_map *__bpf_map_get(struct fd f)
+{
+ if (fd_empty(f))
+ return ERR_PTR(-EBADF);
+ if (unlikely(fd_file(f)->f_op != &bpf_map_fops))
+ return ERR_PTR(-EINVAL);
+ return fd_file(f)->private_data;
+}
+
+static inline struct btf *__btf_get_by_fd(struct fd f)
+{
+ if (fd_empty(f))
+ return ERR_PTR(-EBADF);
+ if (unlikely(fd_file(f)->f_op != &btf_fops))
+ return ERR_PTR(-EINVAL);
+ return fd_file(f)->private_data;
+}
+
void bpf_map_inc(struct bpf_map *map);
void bpf_map_inc_with_uref(struct bpf_map *map);
+struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref);
struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
void *bpf_map_area_alloc(u64 size, int numa_node);
void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
void bpf_map_area_free(void *base);
+bool bpf_map_write_active(const struct bpf_map *map);
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
int generic_map_lookup_batch(struct bpf_map *map,
const union bpf_attr *attr,
union bpf_attr __user *uattr);
-int generic_map_update_batch(struct bpf_map *map,
+int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
const union bpf_attr *attr,
union bpf_attr __user *uattr);
int generic_map_delete_batch(struct bpf_map *map,
@@ -1305,85 +2533,170 @@ int generic_map_delete_batch(struct bpf_map *map,
struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
-#ifdef CONFIG_MEMCG_KMEM
+
+int bpf_map_alloc_pages(const struct bpf_map *map, int nid,
+ unsigned long nr_pages, struct page **page_array);
+#ifdef CONFIG_MEMCG
void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
int node);
+void *bpf_map_kmalloc_nolock(const struct bpf_map *map, size_t size, gfp_t flags,
+ int node);
void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
+void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
+ gfp_t flags);
void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
size_t align, gfp_t flags);
#else
-static inline void *
-bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
- int node)
+/*
+ * These specialized allocators have to be macros for their allocations to be
+ * accounted separately (to have separate alloc_tag).
+ */
+#define bpf_map_kmalloc_node(_map, _size, _flags, _node) \
+ kmalloc_node(_size, _flags, _node)
+#define bpf_map_kmalloc_nolock(_map, _size, _flags, _node) \
+ kmalloc_nolock(_size, _flags, _node)
+#define bpf_map_kzalloc(_map, _size, _flags) \
+ kzalloc(_size, _flags)
+#define bpf_map_kvcalloc(_map, _n, _size, _flags) \
+ kvcalloc(_n, _size, _flags)
+#define bpf_map_alloc_percpu(_map, _size, _align, _flags) \
+ __alloc_percpu_gfp(_size, _align, _flags)
+#endif
+
+static inline int
+bpf_map_init_elem_count(struct bpf_map *map)
{
- return kmalloc_node(size, flags, node);
+ size_t size = sizeof(*map->elem_count), align = size;
+ gfp_t flags = GFP_USER | __GFP_NOWARN;
+
+ map->elem_count = bpf_map_alloc_percpu(map, size, align, flags);
+ if (!map->elem_count)
+ return -ENOMEM;
+
+ return 0;
}
-static inline void *
-bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
+static inline void
+bpf_map_free_elem_count(struct bpf_map *map)
{
- return kzalloc(size, flags);
+ free_percpu(map->elem_count);
}
-static inline void __percpu *
-bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align,
- gfp_t flags)
+static inline void bpf_map_inc_elem_count(struct bpf_map *map)
{
- return __alloc_percpu_gfp(size, align, flags);
+ this_cpu_inc(*map->elem_count);
}
-#endif
-
-extern int sysctl_unprivileged_bpf_disabled;
-static inline bool bpf_allow_ptr_leaks(void)
+static inline void bpf_map_dec_elem_count(struct bpf_map *map)
{
- return perfmon_capable();
+ this_cpu_dec(*map->elem_count);
}
-static inline bool bpf_allow_uninit_stack(void)
+extern int sysctl_unprivileged_bpf_disabled;
+
+bool bpf_token_capable(const struct bpf_token *token, int cap);
+
+static inline bool bpf_allow_ptr_leaks(const struct bpf_token *token)
{
- return perfmon_capable();
+ return bpf_token_capable(token, CAP_PERFMON);
}
-static inline bool bpf_allow_ptr_to_map_access(void)
+static inline bool bpf_allow_uninit_stack(const struct bpf_token *token)
{
- return perfmon_capable();
+ return bpf_token_capable(token, CAP_PERFMON);
}
-static inline bool bpf_bypass_spec_v1(void)
+static inline bool bpf_bypass_spec_v1(const struct bpf_token *token)
{
- return perfmon_capable();
+ return bpf_jit_bypass_spec_v1() ||
+ cpu_mitigations_off() ||
+ bpf_token_capable(token, CAP_PERFMON);
}
-static inline bool bpf_bypass_spec_v4(void)
+static inline bool bpf_bypass_spec_v4(const struct bpf_token *token)
{
- return perfmon_capable();
+ return bpf_jit_bypass_spec_v4() ||
+ cpu_mitigations_off() ||
+ bpf_token_capable(token, CAP_PERFMON);
}
int bpf_map_new_fd(struct bpf_map *map, int flags);
int bpf_prog_new_fd(struct bpf_prog *prog);
void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
- const struct bpf_link_ops *ops, struct bpf_prog *prog);
+ const struct bpf_link_ops *ops, struct bpf_prog *prog,
+ enum bpf_attach_type attach_type);
+void bpf_link_init_sleepable(struct bpf_link *link, enum bpf_link_type type,
+ const struct bpf_link_ops *ops, struct bpf_prog *prog,
+ enum bpf_attach_type attach_type, bool sleepable);
int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
int bpf_link_settle(struct bpf_link_primer *primer);
void bpf_link_cleanup(struct bpf_link_primer *primer);
void bpf_link_inc(struct bpf_link *link);
+struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link);
void bpf_link_put(struct bpf_link *link);
int bpf_link_new_fd(struct bpf_link *link);
-struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd);
struct bpf_link *bpf_link_get_from_fd(u32 ufd);
+struct bpf_link *bpf_link_get_curr_or_next(u32 *id);
+
+void bpf_token_inc(struct bpf_token *token);
+void bpf_token_put(struct bpf_token *token);
+int bpf_token_create(union bpf_attr *attr);
+struct bpf_token *bpf_token_get_from_fd(u32 ufd);
+int bpf_token_get_info_by_fd(struct bpf_token *token,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+
+bool bpf_token_allow_cmd(const struct bpf_token *token, enum bpf_cmd cmd);
+bool bpf_token_allow_map_type(const struct bpf_token *token, enum bpf_map_type type);
+bool bpf_token_allow_prog_type(const struct bpf_token *token,
+ enum bpf_prog_type prog_type,
+ enum bpf_attach_type attach_type);
-int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
-int bpf_obj_get_user(const char __user *pathname, int flags);
+int bpf_obj_pin_user(u32 ufd, int path_fd, const char __user *pathname);
+int bpf_obj_get_user(int path_fd, const char __user *pathname, int flags);
+struct inode *bpf_get_inode(struct super_block *sb, const struct inode *dir,
+ umode_t mode);
#define BPF_ITER_FUNC_PREFIX "bpf_iter_"
#define DEFINE_BPF_ITER_FUNC(target, args...) \
extern int bpf_iter_ ## target(args); \
int __init bpf_iter_ ## target(args) { return 0; }
+/*
+ * The task type of iterators.
+ *
+ * For BPF task iterators, they can be parameterized with various
+ * parameters to visit only some of tasks.
+ *
+ * BPF_TASK_ITER_ALL (default)
+ * Iterate over resources of every task.
+ *
+ * BPF_TASK_ITER_TID
+ * Iterate over resources of a task/tid.
+ *
+ * BPF_TASK_ITER_TGID
+ * Iterate over resources of every task of a process / task group.
+ */
+enum bpf_iter_task_type {
+ BPF_TASK_ITER_ALL = 0,
+ BPF_TASK_ITER_TID,
+ BPF_TASK_ITER_TGID,
+};
+
struct bpf_iter_aux_info {
+ /* for map_elem iter */
struct bpf_map *map;
+
+ /* for cgroup iter */
+ struct {
+ struct cgroup *start; /* starting cgroup */
+ enum bpf_cgroup_iter_order order;
+ } cgroup;
+ struct {
+ enum bpf_iter_task_type type;
+ u32 pid;
+ } task;
};
typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog,
@@ -1394,6 +2707,9 @@ typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux,
struct seq_file *seq);
typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux,
struct bpf_link_info *info);
+typedef const struct bpf_func_proto *
+(*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id,
+ const struct bpf_prog *prog);
enum bpf_iter_feature {
BPF_ITER_RESCHED = BIT(0),
@@ -1406,6 +2722,7 @@ struct bpf_iter_reg {
bpf_iter_detach_target_t detach_target;
bpf_iter_show_fdinfo_t show_fdinfo;
bpf_iter_fill_link_info_t fill_link_info;
+ bpf_iter_get_func_proto_t get_func_proto;
u32 ctx_arg_info_size;
u32 feature;
struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
@@ -1427,8 +2744,10 @@ struct bpf_iter__bpf_map_elem {
int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
-bool bpf_iter_prog_supported(struct bpf_prog *prog);
-int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
+int bpf_iter_prog_supported(struct bpf_prog *prog);
+const struct bpf_func_proto *
+bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
+int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog);
int bpf_iter_new_fd(struct bpf_link *link);
bool bpf_link_is_iter(struct bpf_link *link);
struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop);
@@ -1449,7 +2768,7 @@ int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
u64 flags);
-int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
+int bpf_stackmap_extract(struct bpf_map *map, void *key, void *value, bool delete);
int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
void *key, void *value, u64 map_flags);
@@ -1459,28 +2778,11 @@ int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
int bpf_get_file_flag(int flags);
-int bpf_check_uarg_tail_zero(void __user *uaddr, size_t expected_size,
+int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size,
size_t actual_size);
-/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
- * forced to use 'long' read/writes to try to atomically copy long counters.
- * Best-effort only. No barriers here, since it _will_ race with concurrent
- * updates from BPF programs. Called from bpf syscall and mostly used with
- * size 8 or 16 bytes, so ask compiler to inline it.
- */
-static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
-{
- const long *lsrc = src;
- long *ldst = dst;
-
- size /= sizeof(long);
- while (size--)
- *ldst++ = *lsrc++;
-}
-
/* verify correctness of eBPF program */
-int bpf_check(struct bpf_prog **fp, union bpf_attr *attr,
- union bpf_attr __user *uattr);
+int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size);
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
@@ -1489,24 +2791,29 @@ void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
struct btf *bpf_get_btf_vmlinux(void);
/* Map specifics */
-struct xdp_buff;
+struct xdp_frame;
struct sk_buff;
struct bpf_dtab_netdev;
struct bpf_cpu_map_entry;
-void __dev_flush(void);
-int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
+void __dev_flush(struct list_head *flush_list);
+int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
struct net_device *dev_rx);
-int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
+int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
struct net_device *dev_rx);
+int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
+ struct bpf_map *map, bool exclude_ingress);
int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
- struct bpf_prog *xdp_prog);
-bool dev_map_can_have_prog(struct bpf_map *map);
+ const struct bpf_prog *xdp_prog);
+int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
+ const struct bpf_prog *xdp_prog,
+ struct bpf_map *map, bool exclude_ingress);
-void __cpu_map_flush(void);
-int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
+void __cpu_map_flush(struct list_head *flush_list);
+int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
struct net_device *dev_rx);
-bool cpu_map_prog_allowed(struct bpf_map *map);
+int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
+ struct sk_buff *skb);
/* Return map's numa specified by userspace */
static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
@@ -1534,17 +2841,43 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
const union bpf_attr *kattr,
union bpf_attr __user *uattr);
-bool bpf_prog_test_check_kfunc_call(u32 kfunc_id);
+int bpf_prog_test_run_nf(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
bool btf_ctx_access(int off, int size, enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info);
-int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
- const struct btf_type *t, int off, int size,
- enum bpf_access_type atype,
- u32 *next_btf_id);
+
+static inline bool bpf_tracing_ctx_access(int off, int size,
+ enum bpf_access_type type)
+{
+ if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
+ return false;
+ if (type != BPF_READ)
+ return false;
+ if (off % size != 0)
+ return false;
+ return true;
+}
+
+static inline bool bpf_tracing_btf_ctx_access(int off, int size,
+ enum bpf_access_type type,
+ const struct bpf_prog *prog,
+ struct bpf_insn_access_aux *info)
+{
+ if (!bpf_tracing_ctx_access(off, size, type))
+ return false;
+ return btf_ctx_access(off, size, type, prog, info);
+}
+
+int btf_struct_access(struct bpf_verifier_log *log,
+ const struct bpf_reg_state *reg,
+ int off, int size, enum bpf_access_type atype,
+ u32 *next_btf_id, enum bpf_type_flag *flag, const char **field_name);
bool btf_struct_ids_match(struct bpf_verifier_log *log,
const struct btf *btf, u32 id, int off,
- const struct btf *need_btf, u32 need_type_id);
+ const struct btf *need_btf, u32 need_type_id,
+ bool strict);
int btf_distill_func_proto(struct bpf_verifier_log *log,
struct btf *btf,
@@ -1553,25 +2886,67 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
struct btf_func_model *m);
struct bpf_reg_state;
-int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog,
- struct bpf_reg_state *regs);
-int btf_check_kfunc_arg_match(struct bpf_verifier_env *env,
- const struct btf *btf, u32 func_id,
- struct bpf_reg_state *regs);
-int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
- struct bpf_reg_state *reg);
+int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog);
int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
struct btf *btf, const struct btf_type *t);
+const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt,
+ int comp_idx, const char *tag_key);
+int btf_find_next_decl_tag(const struct btf *btf, const struct btf_type *pt,
+ int comp_idx, const char *tag_key, int last_id);
struct bpf_prog *bpf_prog_by_id(u32 id);
struct bpf_link *bpf_link_by_id(u32 id);
-const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id);
+const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id,
+ const struct bpf_prog *prog);
void bpf_task_storage_free(struct task_struct *task);
+void bpf_cgrp_storage_free(struct cgroup *cgroup);
bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog);
const struct btf_func_model *
bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
const struct bpf_insn *insn);
+int bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id,
+ u16 btf_fd_idx, u8 **func_addr);
+
+struct bpf_core_ctx {
+ struct bpf_verifier_log *log;
+ const struct btf *btf;
+};
+
+bool btf_nested_type_is_trusted(struct bpf_verifier_log *log,
+ const struct bpf_reg_state *reg,
+ const char *field_name, u32 btf_id, const char *suffix);
+
+bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log,
+ const struct btf *reg_btf, u32 reg_id,
+ const struct btf *arg_btf, u32 arg_id);
+
+int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
+ int relo_idx, void *insn);
+
+static inline bool unprivileged_ebpf_enabled(void)
+{
+ return !sysctl_unprivileged_bpf_disabled;
+}
+
+/* Not all bpf prog type has the bpf_ctx.
+ * For the bpf prog type that has initialized the bpf_ctx,
+ * this function can be used to decide if a kernel function
+ * is called by a bpf program.
+ */
+static inline bool has_current_bpf_ctx(void)
+{
+ return !!current->bpf_ctx;
+}
+
+void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog);
+
+void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
+ enum bpf_dynptr_type type, u32 offset, u32 size);
+void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr);
+void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr);
+void bpf_prog_report_arena_violation(bool write, unsigned long addr, unsigned long fault_ip);
+
#else /* !CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{
@@ -1609,7 +2984,13 @@ bpf_prog_inc_not_zero(struct bpf_prog *prog)
static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
const struct bpf_link_ops *ops,
- struct bpf_prog *prog)
+ struct bpf_prog *prog, enum bpf_attach_type attach_type)
+{
+}
+
+static inline void bpf_link_init_sleepable(struct bpf_link *link, enum bpf_link_type type,
+ const struct bpf_link_ops *ops, struct bpf_prog *prog,
+ enum bpf_attach_type attach_type, bool sleepable)
{
}
@@ -1632,6 +3013,11 @@ static inline void bpf_link_inc(struct bpf_link *link)
{
}
+static inline struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
+{
+ return NULL;
+}
+
static inline void bpf_link_put(struct bpf_link *link)
{
}
@@ -1641,56 +3027,92 @@ static inline int bpf_obj_get_user(const char __user *pathname, int flags)
return -EOPNOTSUPP;
}
-static inline bool dev_map_can_have_prog(struct bpf_map *map)
+static inline bool bpf_token_capable(const struct bpf_token *token, int cap)
{
- return false;
+ return capable(cap) || (cap != CAP_SYS_ADMIN && capable(CAP_SYS_ADMIN));
+}
+
+static inline void bpf_token_inc(struct bpf_token *token)
+{
+}
+
+static inline void bpf_token_put(struct bpf_token *token)
+{
+}
+
+static inline struct bpf_token *bpf_token_get_from_fd(u32 ufd)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int bpf_token_get_info_by_fd(struct bpf_token *token,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+{
+ return -EOPNOTSUPP;
}
-static inline void __dev_flush(void)
+static inline void __dev_flush(struct list_head *flush_list)
{
}
-struct xdp_buff;
+struct xdp_frame;
struct bpf_dtab_netdev;
struct bpf_cpu_map_entry;
static inline
-int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
+int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
struct net_device *dev_rx)
{
return 0;
}
static inline
-int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
+int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
struct net_device *dev_rx)
{
return 0;
}
+static inline
+int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
+ struct bpf_map *map, bool exclude_ingress)
+{
+ return 0;
+}
+
struct sk_buff;
static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
struct sk_buff *skb,
- struct bpf_prog *xdp_prog)
+ const struct bpf_prog *xdp_prog)
{
return 0;
}
-static inline void __cpu_map_flush(void)
+static inline
+int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
+ const struct bpf_prog *xdp_prog,
+ struct bpf_map *map, bool exclude_ingress)
+{
+ return 0;
+}
+
+static inline void __cpu_map_flush(struct list_head *flush_list)
{
}
static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
- struct xdp_buff *xdp,
+ struct xdp_frame *xdpf,
struct net_device *dev_rx)
{
return 0;
}
-static inline bool cpu_map_prog_allowed(struct bpf_map *map)
+static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
+ struct sk_buff *skb)
{
- return false;
+ return -EOPNOTSUPP;
}
static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
@@ -1734,11 +3156,6 @@ static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
return -ENOTSUPP;
}
-static inline bool bpf_prog_test_check_kfunc_call(u32 kfunc_id)
-{
- return false;
-}
-
static inline void bpf_map_put(struct bpf_map *map)
{
}
@@ -1748,8 +3165,17 @@ static inline struct bpf_prog *bpf_prog_by_id(u32 id)
return ERR_PTR(-ENOTSUPP);
}
+static inline int btf_struct_access(struct bpf_verifier_log *log,
+ const struct bpf_reg_state *reg,
+ int off, int size, enum bpf_access_type atype,
+ u32 *next_btf_id, enum bpf_type_flag *flag,
+ const char **field_name)
+{
+ return -EACCES;
+}
+
static inline const struct bpf_func_proto *
-bpf_base_func_proto(enum bpf_func_id func_id)
+bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
return NULL;
}
@@ -1769,10 +3195,64 @@ bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
{
return NULL;
}
+
+static inline int
+bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id,
+ u16 btf_fd_idx, u8 **func_addr)
+{
+ return -ENOTSUPP;
+}
+
+static inline bool unprivileged_ebpf_enabled(void)
+{
+ return false;
+}
+
+static inline bool has_current_bpf_ctx(void)
+{
+ return false;
+}
+
+static inline void bpf_prog_inc_misses_counter(struct bpf_prog *prog)
+{
+}
+
+static inline void bpf_cgrp_storage_free(struct cgroup *cgroup)
+{
+}
+
+static inline void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
+ enum bpf_dynptr_type type, u32 offset, u32 size)
+{
+}
+
+static inline void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr)
+{
+}
+
+static inline void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)
+{
+}
+
+static inline void bpf_prog_report_arena_violation(bool write, unsigned long addr,
+ unsigned long fault_ip)
+{
+}
#endif /* CONFIG_BPF_SYSCALL */
-void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
- struct btf_mod_pair *used_btfs, u32 len);
+static __always_inline int
+bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
+{
+ int ret = -EFAULT;
+
+ if (IS_ENABLED(CONFIG_BPF_EVENTS))
+ ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
+ if (unlikely(ret < 0))
+ memset(dst, 0, size);
+ return ret;
+}
+
+void __bpf_free_used_btfs(struct btf_mod_pair *used_btfs, u32 len);
static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
enum bpf_prog_type type)
@@ -1786,7 +3266,7 @@ void __bpf_free_used_maps(struct bpf_prog_aux *aux,
bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
int bpf_prog_offload_compile(struct bpf_prog *prog);
-void bpf_prog_offload_destroy(struct bpf_prog *prog);
+void bpf_prog_dev_bound_destroy(struct bpf_prog *prog);
int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
struct bpf_prog *prog);
@@ -1811,34 +3291,95 @@ void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
struct net_device *netdev);
bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
+void unpriv_ebpf_notify(int new_state);
+
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
-int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
+int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log,
+ struct bpf_prog_aux *prog_aux);
+void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id);
+int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr);
+int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, struct bpf_prog *old_prog);
+void bpf_dev_bound_netdev_unregister(struct net_device *dev);
static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
{
+ return aux->dev_bound;
+}
+
+static inline bool bpf_prog_is_offloaded(const struct bpf_prog_aux *aux)
+{
return aux->offload_requested;
}
-static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
+bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs);
+
+static inline bool bpf_map_is_offloaded(struct bpf_map *map)
{
return unlikely(map->ops == &bpf_map_offload_ops);
}
struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
void bpf_map_offload_map_free(struct bpf_map *map);
+u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map);
+int bpf_prog_test_run_syscall(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+
+int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
+int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
+int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
+int sock_map_bpf_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+int sock_map_link_create(const union bpf_attr *attr, struct bpf_prog *prog);
+
+void sock_map_unhash(struct sock *sk);
+void sock_map_destroy(struct sock *sk);
+void sock_map_close(struct sock *sk, long timeout);
#else
-static inline int bpf_prog_offload_init(struct bpf_prog *prog,
- union bpf_attr *attr)
+static inline int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log,
+ struct bpf_prog_aux *prog_aux)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog,
+ u32 func_id)
+{
+ return NULL;
+}
+
+static inline int bpf_prog_dev_bound_init(struct bpf_prog *prog,
+ union bpf_attr *attr)
{
return -EOPNOTSUPP;
}
-static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
+static inline int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog,
+ struct bpf_prog *old_prog)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void bpf_dev_bound_netdev_unregister(struct net_device *dev)
+{
+}
+
+static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
+{
+ return false;
+}
+
+static inline bool bpf_prog_is_offloaded(struct bpf_prog_aux *aux)
{
return false;
}
-static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
+static inline bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs)
+{
+ return false;
+}
+
+static inline bool bpf_map_is_offloaded(struct bpf_map *map)
{
return false;
}
@@ -1851,23 +3392,17 @@ static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
static inline void bpf_map_offload_map_free(struct bpf_map *map)
{
}
-#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
-#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
-int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
-int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
-int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
-void sock_map_unhash(struct sock *sk);
-void sock_map_close(struct sock *sk, long timeout);
+static inline u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map)
+{
+ return 0;
+}
-void bpf_sk_reuseport_detach(struct sock *sk);
-int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
- void *value);
-int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
- void *value, u64 map_flags);
-#else
-static inline void bpf_sk_reuseport_detach(struct sock *sk)
+static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
{
+ return -ENOTSUPP;
}
#ifdef CONFIG_BPF_SYSCALL
@@ -1889,6 +3424,47 @@ static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void
return -EOPNOTSUPP;
}
+static inline int sock_map_bpf_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+{
+ return -EINVAL;
+}
+
+static inline int sock_map_link_create(const union bpf_attr *attr, struct bpf_prog *prog)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_BPF_SYSCALL */
+#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
+
+static __always_inline void
+bpf_prog_inc_misses_counters(const struct bpf_prog_array *array)
+{
+ const struct bpf_prog_array_item *item;
+ struct bpf_prog *prog;
+
+ if (unlikely(!array))
+ return;
+
+ item = &array->items[0];
+ while ((prog = READ_ONCE(item->prog))) {
+ bpf_prog_inc_misses_counter(prog);
+ item++;
+ }
+}
+
+#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
+void bpf_sk_reuseport_detach(struct sock *sk);
+int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
+ void *value);
+int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
+ void *value, u64 map_flags);
+#else
+static inline void bpf_sk_reuseport_detach(struct sock *sk)
+{
+}
+
+#ifdef CONFIG_BPF_SYSCALL
static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
void *key, void *value)
{
@@ -1904,6 +3480,38 @@ static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
#endif /* CONFIG_BPF_SYSCALL */
#endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */
+#if defined(CONFIG_KEYS) && defined(CONFIG_BPF_SYSCALL)
+
+struct bpf_key *bpf_lookup_user_key(s32 serial, u64 flags);
+struct bpf_key *bpf_lookup_system_key(u64 id);
+void bpf_key_put(struct bpf_key *bkey);
+int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_p,
+ struct bpf_dynptr *sig_p,
+ struct bpf_key *trusted_keyring);
+
+#else
+static inline struct bpf_key *bpf_lookup_user_key(u32 serial, u64 flags)
+{
+ return NULL;
+}
+
+static inline struct bpf_key *bpf_lookup_system_key(u64 id)
+{
+ return NULL;
+}
+
+static inline void bpf_key_put(struct bpf_key *bkey)
+{
+}
+
+static inline int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_p,
+ struct bpf_dynptr *sig_p,
+ struct bpf_key *trusted_keyring)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* defined(CONFIG_KEYS) && defined(CONFIG_BPF_SYSCALL) */
+
/* verifier prototypes for helper functions called from eBPF programs */
extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
extern const struct bpf_func_proto bpf_map_update_elem_proto;
@@ -1911,6 +3519,7 @@ extern const struct bpf_func_proto bpf_map_delete_elem_proto;
extern const struct bpf_func_proto bpf_map_push_elem_proto;
extern const struct bpf_func_proto bpf_map_pop_elem_proto;
extern const struct bpf_func_proto bpf_map_peek_elem_proto;
+extern const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto;
extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
@@ -1918,18 +3527,23 @@ extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
extern const struct bpf_func_proto bpf_tail_call_proto;
extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto;
+extern const struct bpf_func_proto bpf_ktime_get_tai_ns_proto;
extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
extern const struct bpf_func_proto bpf_get_current_comm_proto;
extern const struct bpf_func_proto bpf_get_stackid_proto;
extern const struct bpf_func_proto bpf_get_stack_proto;
+extern const struct bpf_func_proto bpf_get_stack_sleepable_proto;
extern const struct bpf_func_proto bpf_get_task_stack_proto;
+extern const struct bpf_func_proto bpf_get_task_stack_sleepable_proto;
extern const struct bpf_func_proto bpf_get_stackid_proto_pe;
extern const struct bpf_func_proto bpf_get_stack_proto_pe;
extern const struct bpf_func_proto bpf_sock_map_update_proto;
extern const struct bpf_func_proto bpf_sock_hash_update_proto;
extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
+extern const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto;
+extern const struct bpf_func_proto bpf_current_task_under_cgroup_proto;
extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
@@ -1948,11 +3562,16 @@ extern const struct bpf_func_proto bpf_ringbuf_reserve_proto;
extern const struct bpf_func_proto bpf_ringbuf_submit_proto;
extern const struct bpf_func_proto bpf_ringbuf_discard_proto;
extern const struct bpf_func_proto bpf_ringbuf_query_proto;
+extern const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto;
+extern const struct bpf_func_proto bpf_ringbuf_submit_dynptr_proto;
+extern const struct bpf_func_proto bpf_ringbuf_discard_dynptr_proto;
extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto;
extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto;
extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto;
extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto;
extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto;
+extern const struct bpf_func_proto bpf_skc_to_unix_sock_proto;
+extern const struct bpf_func_proto bpf_skc_to_mptcp_sock_proto;
extern const struct bpf_func_proto bpf_copy_from_user_proto;
extern const struct bpf_func_proto bpf_snprintf_btf_proto;
extern const struct bpf_func_proto bpf_snprintf_proto;
@@ -1961,12 +3580,24 @@ extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto;
extern const struct bpf_func_proto bpf_sock_from_file_proto;
extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto;
+extern const struct bpf_func_proto bpf_task_storage_get_recur_proto;
extern const struct bpf_func_proto bpf_task_storage_get_proto;
+extern const struct bpf_func_proto bpf_task_storage_delete_recur_proto;
extern const struct bpf_func_proto bpf_task_storage_delete_proto;
extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
-
-const struct bpf_func_proto *bpf_tracing_func_proto(
- enum bpf_func_id func_id, const struct bpf_prog *prog);
+extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
+extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
+extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
+extern const struct bpf_func_proto bpf_unlocked_sk_setsockopt_proto;
+extern const struct bpf_func_proto bpf_unlocked_sk_getsockopt_proto;
+extern const struct bpf_func_proto bpf_find_vma_proto;
+extern const struct bpf_func_proto bpf_loop_proto;
+extern const struct bpf_func_proto bpf_copy_from_user_task_proto;
+extern const struct bpf_func_proto bpf_set_retval_proto;
+extern const struct bpf_func_proto bpf_get_retval_proto;
+extern const struct bpf_func_proto bpf_user_ringbuf_drain_proto;
+extern const struct bpf_func_proto bpf_cgrp_storage_get_proto;
+extern const struct bpf_func_proto bpf_cgrp_storage_delete_proto;
const struct bpf_func_proto *tracing_prog_func_proto(
enum bpf_func_id func_id, const struct bpf_prog *prog);
@@ -1987,6 +3618,8 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
struct bpf_insn *insn_buf,
struct bpf_prog *prog,
u32 *target_size);
+int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags,
+ struct bpf_dynptr *ptr);
#else
static inline bool bpf_sock_common_is_valid_access(int off, int size,
enum bpf_access_type type,
@@ -2008,6 +3641,11 @@ static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
{
return 0;
}
+static inline int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags,
+ struct bpf_dynptr *ptr)
+{
+ return -EOPNOTSUPP;
+}
#endif
#ifdef CONFIG_INET
@@ -2015,6 +3653,7 @@ struct sk_reuseport_kern {
struct sk_buff *skb;
struct sock *sk;
struct sock *selected_sk;
+ struct sock *migrating_sk;
void *data_end;
u32 hash;
u32 reuseport_id;
@@ -2071,18 +3710,134 @@ static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
#endif /* CONFIG_INET */
enum bpf_text_poke_type {
+ BPF_MOD_NOP,
BPF_MOD_CALL,
BPF_MOD_JUMP,
};
-int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
- void *addr1, void *addr2);
+int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t,
+ enum bpf_text_poke_type new_t, void *old_addr,
+ void *new_addr);
+
+void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
+ struct bpf_prog *new, struct bpf_prog *old);
+
+void *bpf_arch_text_copy(void *dst, void *src, size_t len);
+int bpf_arch_text_invalidate(void *dst, size_t len);
struct btf_id_set;
bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
-int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
- u32 **bin_buf, u32 num_args);
-void bpf_bprintf_cleanup(void);
+#define MAX_BPRINTF_VARARGS 12
+#define MAX_BPRINTF_BUF 1024
+
+/* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary
+ * arguments representation.
+ */
+#define MAX_BPRINTF_BIN_ARGS 512
+
+struct bpf_bprintf_buffers {
+ char bin_args[MAX_BPRINTF_BIN_ARGS];
+ char buf[MAX_BPRINTF_BUF];
+};
+
+struct bpf_bprintf_data {
+ u32 *bin_args;
+ char *buf;
+ bool get_bin_args;
+ bool get_buf;
+};
+
+int bpf_bprintf_prepare(const char *fmt, u32 fmt_size, const u64 *raw_args,
+ u32 num_args, struct bpf_bprintf_data *data);
+void bpf_bprintf_cleanup(struct bpf_bprintf_data *data);
+int bpf_try_get_buffers(struct bpf_bprintf_buffers **bufs);
+void bpf_put_buffers(void);
+
+void bpf_prog_stream_init(struct bpf_prog *prog);
+void bpf_prog_stream_free(struct bpf_prog *prog);
+int bpf_prog_stream_read(struct bpf_prog *prog, enum bpf_stream_id stream_id, void __user *buf, int len);
+void bpf_stream_stage_init(struct bpf_stream_stage *ss);
+void bpf_stream_stage_free(struct bpf_stream_stage *ss);
+__printf(2, 3)
+int bpf_stream_stage_printk(struct bpf_stream_stage *ss, const char *fmt, ...);
+int bpf_stream_stage_commit(struct bpf_stream_stage *ss, struct bpf_prog *prog,
+ enum bpf_stream_id stream_id);
+int bpf_stream_stage_dump_stack(struct bpf_stream_stage *ss);
+
+#define bpf_stream_printk(ss, ...) bpf_stream_stage_printk(&ss, __VA_ARGS__)
+#define bpf_stream_dump_stack(ss) bpf_stream_stage_dump_stack(&ss)
+
+#define bpf_stream_stage(ss, prog, stream_id, expr) \
+ ({ \
+ bpf_stream_stage_init(&ss); \
+ (expr); \
+ bpf_stream_stage_commit(&ss, prog, stream_id); \
+ bpf_stream_stage_free(&ss); \
+ })
+
+#ifdef CONFIG_BPF_LSM
+void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype);
+void bpf_cgroup_atype_put(int cgroup_atype);
+#else
+static inline void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype) {}
+static inline void bpf_cgroup_atype_put(int cgroup_atype) {}
+#endif /* CONFIG_BPF_LSM */
+
+struct key;
+
+#ifdef CONFIG_KEYS
+struct bpf_key {
+ struct key *key;
+ bool has_ref;
+};
+#endif /* CONFIG_KEYS */
+
+static inline bool type_is_alloc(u32 type)
+{
+ return type & MEM_ALLOC;
+}
+
+static inline gfp_t bpf_memcg_flags(gfp_t flags)
+{
+ if (memcg_bpf_enabled())
+ return flags | __GFP_ACCOUNT;
+ return flags;
+}
+
+static inline bool bpf_is_subprog(const struct bpf_prog *prog)
+{
+ return prog->aux->func_idx != 0;
+}
+
+int bpf_prog_get_file_line(struct bpf_prog *prog, unsigned long ip, const char **filep,
+ const char **linep, int *nump);
+struct bpf_prog *bpf_prog_find_from_stack(void);
+
+int bpf_insn_array_init(struct bpf_map *map, const struct bpf_prog *prog);
+int bpf_insn_array_ready(struct bpf_map *map);
+void bpf_insn_array_release(struct bpf_map *map);
+void bpf_insn_array_adjust(struct bpf_map *map, u32 off, u32 len);
+void bpf_insn_array_adjust_after_remove(struct bpf_map *map, u32 off, u32 len);
+
+#ifdef CONFIG_BPF_SYSCALL
+void bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image);
+#else
+static inline void
+bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image)
+{
+}
+#endif
+
+static inline int bpf_map_check_op_flags(struct bpf_map *map, u64 flags, u64 allowed_flags)
+{
+ if (flags & ~allowed_flags)
+ return -EINVAL;
+
+ if ((flags & BPF_F_LOCK) && !btf_record_has_field(map->record, BPF_SPIN_LOCK))
+ return -EINVAL;
+
+ return 0;
+}
#endif /* _LINUX_BPF_H */
diff --git a/include/linux/bpf_crypto.h b/include/linux/bpf_crypto.h
new file mode 100644
index 000000000000..a41e71d4e2d9
--- /dev/null
+++ b/include/linux/bpf_crypto.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#ifndef _BPF_CRYPTO_H
+#define _BPF_CRYPTO_H
+
+struct bpf_crypto_type {
+ void *(*alloc_tfm)(const char *algo);
+ void (*free_tfm)(void *tfm);
+ int (*has_algo)(const char *algo);
+ int (*setkey)(void *tfm, const u8 *key, unsigned int keylen);
+ int (*setauthsize)(void *tfm, unsigned int authsize);
+ int (*encrypt)(void *tfm, const u8 *src, u8 *dst, unsigned int len, u8 *iv);
+ int (*decrypt)(void *tfm, const u8 *src, u8 *dst, unsigned int len, u8 *iv);
+ unsigned int (*ivsize)(void *tfm);
+ unsigned int (*statesize)(void *tfm);
+ u32 (*get_flags)(void *tfm);
+ struct module *owner;
+ char name[14];
+};
+
+int bpf_crypto_register_type(const struct bpf_crypto_type *type);
+int bpf_crypto_unregister_type(const struct bpf_crypto_type *type);
+
+#endif /* _BPF_CRYPTO_H */
diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h
index b902c580c48d..66432248cd81 100644
--- a/include/linux/bpf_local_storage.h
+++ b/include/linux/bpf_local_storage.h
@@ -8,10 +8,12 @@
#define _BPF_LOCAL_STORAGE_H
#include <linux/bpf.h>
+#include <linux/filter.h>
#include <linux/rculist.h>
#include <linux/list.h>
#include <linux/hash.h>
#include <linux/types.h>
+#include <linux/bpf_mem_alloc.h>
#include <uapi/linux/btf.h>
#define BPF_LOCAL_STORAGE_CACHE_SIZE 16
@@ -51,6 +53,7 @@ struct bpf_local_storage_map {
u32 bucket_log;
u16 elem_size;
u16 cache_idx;
+ bool use_kmalloc_nolock;
};
struct bpf_local_storage_data {
@@ -58,7 +61,7 @@ struct bpf_local_storage_data {
* from the object's bpf_local_storage.
*
* Put it in the same cacheline as the data to minimize
- * the number of cachelines access during the cache hit case.
+ * the number of cachelines accessed during the cache hit case.
*/
struct bpf_local_storage_map __rcu *smap;
u8 data[] __aligned(8);
@@ -69,9 +72,15 @@ struct bpf_local_storage_elem {
struct hlist_node map_node; /* Linked to bpf_local_storage_map */
struct hlist_node snode; /* Linked to bpf_local_storage */
struct bpf_local_storage __rcu *local_storage;
- struct rcu_head rcu;
+ union {
+ struct rcu_head rcu;
+ struct hlist_node free_node; /* used to postpone
+ * bpf_selem_free
+ * after raw_spin_unlock
+ */
+ };
/* 8 bytes hole */
- /* The data is stored in aother cacheline to minimize
+ /* The data is stored in another cacheline to minimize
* the number of cachelines access during a cache hit.
*/
struct bpf_local_storage_data sdata ____cacheline_aligned;
@@ -79,12 +88,14 @@ struct bpf_local_storage_elem {
struct bpf_local_storage {
struct bpf_local_storage_data __rcu *cache[BPF_LOCAL_STORAGE_CACHE_SIZE];
+ struct bpf_local_storage_map __rcu *smap;
struct hlist_head list; /* List of bpf_local_storage_elem */
void *owner; /* The object that owns the above "list" of
* bpf_local_storage_elem.
*/
struct rcu_head rcu;
raw_spinlock_t lock; /* Protect adding/removing from the "list" */
+ bool use_kmalloc_nolock;
};
/* U16_MAX is much more than enough for sk local storage
@@ -112,21 +123,49 @@ static struct bpf_local_storage_cache name = { \
.idx_lock = __SPIN_LOCK_UNLOCKED(name.idx_lock), \
}
-u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache);
-void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
- u16 idx);
-
/* Helper functions for bpf_local_storage */
int bpf_local_storage_map_alloc_check(union bpf_attr *attr);
-struct bpf_local_storage_map *bpf_local_storage_map_alloc(union bpf_attr *attr);
+struct bpf_map *
+bpf_local_storage_map_alloc(union bpf_attr *attr,
+ struct bpf_local_storage_cache *cache,
+ bool use_kmalloc_nolock);
-struct bpf_local_storage_data *
+void __bpf_local_storage_insert_cache(struct bpf_local_storage *local_storage,
+ struct bpf_local_storage_map *smap,
+ struct bpf_local_storage_elem *selem);
+/* If cacheit_lockit is false, this lookup function is lockless */
+static inline struct bpf_local_storage_data *
bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
struct bpf_local_storage_map *smap,
- bool cacheit_lockit);
+ bool cacheit_lockit)
+{
+ struct bpf_local_storage_data *sdata;
+ struct bpf_local_storage_elem *selem;
+
+ /* Fast path (cache hit) */
+ sdata = rcu_dereference_check(local_storage->cache[smap->cache_idx],
+ bpf_rcu_lock_held());
+ if (sdata && rcu_access_pointer(sdata->smap) == smap)
+ return sdata;
+
+ /* Slow path (cache miss) */
+ hlist_for_each_entry_rcu(selem, &local_storage->list, snode,
+ rcu_read_lock_trace_held())
+ if (rcu_access_pointer(SDATA(selem)->smap) == smap)
+ break;
+
+ if (!selem)
+ return NULL;
+ if (cacheit_lockit)
+ __bpf_local_storage_insert_cache(local_storage, smap, selem);
+ return SDATA(selem);
+}
-void bpf_local_storage_map_free(struct bpf_local_storage_map *smap,
+void bpf_local_storage_destroy(struct bpf_local_storage *local_storage);
+
+void bpf_local_storage_map_free(struct bpf_map *map,
+ struct bpf_local_storage_cache *cache,
int __percpu *busy_counter);
int bpf_local_storage_map_check_btf(const struct bpf_map *map,
@@ -137,28 +176,28 @@ int bpf_local_storage_map_check_btf(const struct bpf_map *map,
void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
struct bpf_local_storage_elem *selem);
-bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
- struct bpf_local_storage_elem *selem,
- bool uncharge_omem);
-
-void bpf_selem_unlink(struct bpf_local_storage_elem *selem);
+void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool reuse_now);
void bpf_selem_link_map(struct bpf_local_storage_map *smap,
struct bpf_local_storage_elem *selem);
-void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem);
-
struct bpf_local_storage_elem *
bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, void *value,
- bool charge_mem);
+ bool swap_uptrs, gfp_t gfp_flags);
+
+void bpf_selem_free(struct bpf_local_storage_elem *selem,
+ bool reuse_now);
int
bpf_local_storage_alloc(void *owner,
struct bpf_local_storage_map *smap,
- struct bpf_local_storage_elem *first_selem);
+ struct bpf_local_storage_elem *first_selem,
+ gfp_t gfp_flags);
struct bpf_local_storage_data *
bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
- void *value, u64 map_flags);
+ void *value, u64 map_flags, bool swap_uptrs, gfp_t gfp_flags);
+
+u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map);
#endif /* _BPF_LOCAL_STORAGE_H */
diff --git a/include/linux/bpf_lsm.h b/include/linux/bpf_lsm.h
index 479c101546ad..643809cc78c3 100644
--- a/include/linux/bpf_lsm.h
+++ b/include/linux/bpf_lsm.h
@@ -9,6 +9,7 @@
#include <linux/sched.h>
#include <linux/bpf.h>
+#include <linux/bpf_verifier.h>
#include <linux/lsm_hooks.h>
#ifdef CONFIG_BPF_LSM
@@ -28,6 +29,7 @@ int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
const struct bpf_prog *prog);
bool bpf_lsm_is_sleepable_hook(u32 btf_id);
+bool bpf_lsm_is_trusted(const struct bpf_prog *prog);
static inline struct bpf_storage_blob *bpf_inode(
const struct inode *inode)
@@ -42,6 +44,15 @@ extern const struct bpf_func_proto bpf_inode_storage_get_proto;
extern const struct bpf_func_proto bpf_inode_storage_delete_proto;
void bpf_inode_storage_free(struct inode *inode);
+void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog, bpf_func_t *bpf_func);
+
+int bpf_lsm_get_retval_range(const struct bpf_prog *prog,
+ struct bpf_retval_range *range);
+int bpf_set_dentry_xattr_locked(struct dentry *dentry, const char *name__str,
+ const struct bpf_dynptr *value_p, int flags);
+int bpf_remove_dentry_xattr_locked(struct dentry *dentry, const char *name__str);
+bool bpf_lsm_has_d_inode_locked(const struct bpf_prog *prog);
+
#else /* !CONFIG_BPF_LSM */
static inline bool bpf_lsm_is_sleepable_hook(u32 btf_id)
@@ -49,6 +60,11 @@ static inline bool bpf_lsm_is_sleepable_hook(u32 btf_id)
return false;
}
+static inline bool bpf_lsm_is_trusted(const struct bpf_prog *prog)
+{
+ return false;
+}
+
static inline int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
const struct bpf_prog *prog)
{
@@ -65,6 +81,29 @@ static inline void bpf_inode_storage_free(struct inode *inode)
{
}
+static inline void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog,
+ bpf_func_t *bpf_func)
+{
+}
+
+static inline int bpf_lsm_get_retval_range(const struct bpf_prog *prog,
+ struct bpf_retval_range *range)
+{
+ return -EOPNOTSUPP;
+}
+static inline int bpf_set_dentry_xattr_locked(struct dentry *dentry, const char *name__str,
+ const struct bpf_dynptr *value_p, int flags)
+{
+ return -EOPNOTSUPP;
+}
+static inline int bpf_remove_dentry_xattr_locked(struct dentry *dentry, const char *name__str)
+{
+ return -EOPNOTSUPP;
+}
+static inline bool bpf_lsm_has_d_inode_locked(const struct bpf_prog *prog)
+{
+ return false;
+}
#endif /* CONFIG_BPF_LSM */
#endif /* _LINUX_BPF_LSM_H */
diff --git a/include/linux/bpf_mem_alloc.h b/include/linux/bpf_mem_alloc.h
new file mode 100644
index 000000000000..e45162ef59bb
--- /dev/null
+++ b/include/linux/bpf_mem_alloc.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#ifndef _BPF_MEM_ALLOC_H
+#define _BPF_MEM_ALLOC_H
+#include <linux/compiler_types.h>
+#include <linux/workqueue.h>
+
+struct bpf_mem_cache;
+struct bpf_mem_caches;
+
+struct bpf_mem_alloc {
+ struct bpf_mem_caches __percpu *caches;
+ struct bpf_mem_cache __percpu *cache;
+ struct obj_cgroup *objcg;
+ bool percpu;
+ struct work_struct work;
+};
+
+/* 'size != 0' is for bpf_mem_alloc which manages fixed-size objects.
+ * Alloc and free are done with bpf_mem_cache_{alloc,free}().
+ *
+ * 'size = 0' is for bpf_mem_alloc which manages many fixed-size objects.
+ * Alloc and free are done with bpf_mem_{alloc,free}() and the size of
+ * the returned object is given by the size argument of bpf_mem_alloc().
+ * If percpu equals true, error will be returned in order to avoid
+ * large memory consumption and the below bpf_mem_alloc_percpu_unit_init()
+ * should be used to do on-demand per-cpu allocation for each size.
+ */
+int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu);
+/* Initialize a non-fix-size percpu memory allocator */
+int bpf_mem_alloc_percpu_init(struct bpf_mem_alloc *ma, struct obj_cgroup *objcg);
+/* The percpu allocation with a specific unit size. */
+int bpf_mem_alloc_percpu_unit_init(struct bpf_mem_alloc *ma, int size);
+void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma);
+
+/* Check the allocation size for kmalloc equivalent allocator */
+int bpf_mem_alloc_check_size(bool percpu, size_t size);
+
+/* kmalloc/kfree equivalent: */
+void *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size);
+void bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr);
+void bpf_mem_free_rcu(struct bpf_mem_alloc *ma, void *ptr);
+
+/* kmem_cache_alloc/free equivalent: */
+void *bpf_mem_cache_alloc(struct bpf_mem_alloc *ma);
+void bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr);
+void bpf_mem_cache_free_rcu(struct bpf_mem_alloc *ma, void *ptr);
+void bpf_mem_cache_raw_free(void *ptr);
+void *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags);
+
+#endif /* _BPF_MEM_ALLOC_H */
diff --git a/include/linux/bpf_mprog.h b/include/linux/bpf_mprog.h
new file mode 100644
index 000000000000..929225f7b095
--- /dev/null
+++ b/include/linux/bpf_mprog.h
@@ -0,0 +1,343 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2023 Isovalent */
+#ifndef __BPF_MPROG_H
+#define __BPF_MPROG_H
+
+#include <linux/bpf.h>
+
+/* bpf_mprog framework:
+ *
+ * bpf_mprog is a generic layer for multi-program attachment. In-kernel users
+ * of the bpf_mprog don't need to care about the dependency resolution
+ * internals, they can just consume it with few API calls. Currently available
+ * dependency directives are BPF_F_{BEFORE,AFTER} which enable insertion of
+ * a BPF program or BPF link relative to an existing BPF program or BPF link
+ * inside the multi-program array as well as prepend and append behavior if
+ * no relative object was specified, see corresponding selftests for concrete
+ * examples (e.g. tc_links and tc_opts test cases of test_progs).
+ *
+ * Usage of bpf_mprog_{attach,detach,query}() core APIs with pseudo code:
+ *
+ * Attach case:
+ *
+ * struct bpf_mprog_entry *entry, *entry_new;
+ * int ret;
+ *
+ * // bpf_mprog user-side lock
+ * // fetch active @entry from attach location
+ * [...]
+ * ret = bpf_mprog_attach(entry, &entry_new, [...]);
+ * if (!ret) {
+ * if (entry != entry_new) {
+ * // swap @entry to @entry_new at attach location
+ * // ensure there are no inflight users of @entry:
+ * synchronize_rcu();
+ * }
+ * bpf_mprog_commit(entry);
+ * } else {
+ * // error path, bail out, propagate @ret
+ * }
+ * // bpf_mprog user-side unlock
+ *
+ * Detach case:
+ *
+ * struct bpf_mprog_entry *entry, *entry_new;
+ * int ret;
+ *
+ * // bpf_mprog user-side lock
+ * // fetch active @entry from attach location
+ * [...]
+ * ret = bpf_mprog_detach(entry, &entry_new, [...]);
+ * if (!ret) {
+ * // all (*) marked is optional and depends on the use-case
+ * // whether bpf_mprog_bundle should be freed or not
+ * if (!bpf_mprog_total(entry_new)) (*)
+ * entry_new = NULL (*)
+ * // swap @entry to @entry_new at attach location
+ * // ensure there are no inflight users of @entry:
+ * synchronize_rcu();
+ * bpf_mprog_commit(entry);
+ * if (!entry_new) (*)
+ * // free bpf_mprog_bundle (*)
+ * } else {
+ * // error path, bail out, propagate @ret
+ * }
+ * // bpf_mprog user-side unlock
+ *
+ * Query case:
+ *
+ * struct bpf_mprog_entry *entry;
+ * int ret;
+ *
+ * // bpf_mprog user-side lock
+ * // fetch active @entry from attach location
+ * [...]
+ * ret = bpf_mprog_query(attr, uattr, entry);
+ * // bpf_mprog user-side unlock
+ *
+ * Data/fast path:
+ *
+ * struct bpf_mprog_entry *entry;
+ * struct bpf_mprog_fp *fp;
+ * struct bpf_prog *prog;
+ * int ret = [...];
+ *
+ * rcu_read_lock();
+ * // fetch active @entry from attach location
+ * [...]
+ * bpf_mprog_foreach_prog(entry, fp, prog) {
+ * ret = bpf_prog_run(prog, [...]);
+ * // process @ret from program
+ * }
+ * [...]
+ * rcu_read_unlock();
+ *
+ * bpf_mprog locking considerations:
+ *
+ * bpf_mprog_{attach,detach,query}() must be protected by an external lock
+ * (like RTNL in case of tcx).
+ *
+ * bpf_mprog_entry pointer can be an __rcu annotated pointer (in case of tcx
+ * the netdevice has tcx_ingress and tcx_egress __rcu pointer) which gets
+ * updated via rcu_assign_pointer() pointing to the active bpf_mprog_entry of
+ * the bpf_mprog_bundle.
+ *
+ * Fast path accesses the active bpf_mprog_entry within RCU critical section
+ * (in case of tcx it runs in NAPI which provides RCU protection there,
+ * other users might need explicit rcu_read_lock()). The bpf_mprog_commit()
+ * assumes that for the old bpf_mprog_entry there are no inflight users
+ * anymore.
+ *
+ * The READ_ONCE()/WRITE_ONCE() pairing for bpf_mprog_fp's prog access is for
+ * the replacement case where we don't swap the bpf_mprog_entry.
+ */
+
+#define bpf_mprog_foreach_tuple(entry, fp, cp, t) \
+ for (fp = &entry->fp_items[0], cp = &entry->parent->cp_items[0];\
+ ({ \
+ t.prog = READ_ONCE(fp->prog); \
+ t.link = cp->link; \
+ t.prog; \
+ }); \
+ fp++, cp++)
+
+#define bpf_mprog_foreach_prog(entry, fp, p) \
+ for (fp = &entry->fp_items[0]; \
+ (p = READ_ONCE(fp->prog)); \
+ fp++)
+
+#define BPF_MPROG_MAX 64
+
+struct bpf_mprog_fp {
+ struct bpf_prog *prog;
+};
+
+struct bpf_mprog_cp {
+ struct bpf_link *link;
+};
+
+struct bpf_mprog_entry {
+ struct bpf_mprog_fp fp_items[BPF_MPROG_MAX];
+ struct bpf_mprog_bundle *parent;
+};
+
+struct bpf_mprog_bundle {
+ struct bpf_mprog_entry a;
+ struct bpf_mprog_entry b;
+ struct bpf_mprog_cp cp_items[BPF_MPROG_MAX];
+ struct bpf_prog *ref;
+ atomic64_t revision;
+ u32 count;
+};
+
+struct bpf_tuple {
+ struct bpf_prog *prog;
+ struct bpf_link *link;
+};
+
+static inline struct bpf_mprog_entry *
+bpf_mprog_peer(const struct bpf_mprog_entry *entry)
+{
+ if (entry == &entry->parent->a)
+ return &entry->parent->b;
+ else
+ return &entry->parent->a;
+}
+
+static inline void bpf_mprog_bundle_init(struct bpf_mprog_bundle *bundle)
+{
+ BUILD_BUG_ON(sizeof(bundle->a.fp_items[0]) > sizeof(u64));
+ BUILD_BUG_ON(ARRAY_SIZE(bundle->a.fp_items) !=
+ ARRAY_SIZE(bundle->cp_items));
+
+ memset(bundle, 0, sizeof(*bundle));
+ atomic64_set(&bundle->revision, 1);
+ bundle->a.parent = bundle;
+ bundle->b.parent = bundle;
+}
+
+static inline void bpf_mprog_inc(struct bpf_mprog_entry *entry)
+{
+ entry->parent->count++;
+}
+
+static inline void bpf_mprog_dec(struct bpf_mprog_entry *entry)
+{
+ entry->parent->count--;
+}
+
+static inline int bpf_mprog_max(void)
+{
+ return ARRAY_SIZE(((struct bpf_mprog_entry *)NULL)->fp_items) - 1;
+}
+
+static inline int bpf_mprog_total(struct bpf_mprog_entry *entry)
+{
+ int total = entry->parent->count;
+
+ WARN_ON_ONCE(total > bpf_mprog_max());
+ return total;
+}
+
+static inline bool bpf_mprog_exists(struct bpf_mprog_entry *entry,
+ struct bpf_prog *prog)
+{
+ const struct bpf_mprog_fp *fp;
+ const struct bpf_prog *tmp;
+
+ bpf_mprog_foreach_prog(entry, fp, tmp) {
+ if (tmp == prog)
+ return true;
+ }
+ return false;
+}
+
+static inline void bpf_mprog_mark_for_release(struct bpf_mprog_entry *entry,
+ struct bpf_tuple *tuple)
+{
+ WARN_ON_ONCE(entry->parent->ref);
+ if (!tuple->link)
+ entry->parent->ref = tuple->prog;
+}
+
+static inline void bpf_mprog_complete_release(struct bpf_mprog_entry *entry)
+{
+ /* In the non-link case prog deletions can only drop the reference
+ * to the prog after the bpf_mprog_entry got swapped and the
+ * bpf_mprog ensured that there are no inflight users anymore.
+ *
+ * Paired with bpf_mprog_mark_for_release().
+ */
+ if (entry->parent->ref) {
+ bpf_prog_put(entry->parent->ref);
+ entry->parent->ref = NULL;
+ }
+}
+
+static inline void bpf_mprog_revision_new(struct bpf_mprog_entry *entry)
+{
+ atomic64_inc(&entry->parent->revision);
+}
+
+static inline void bpf_mprog_commit(struct bpf_mprog_entry *entry)
+{
+ bpf_mprog_complete_release(entry);
+ bpf_mprog_revision_new(entry);
+}
+
+static inline u64 bpf_mprog_revision(struct bpf_mprog_entry *entry)
+{
+ return atomic64_read(&entry->parent->revision);
+}
+
+static inline void bpf_mprog_entry_copy(struct bpf_mprog_entry *dst,
+ struct bpf_mprog_entry *src)
+{
+ memcpy(dst->fp_items, src->fp_items, sizeof(src->fp_items));
+}
+
+static inline void bpf_mprog_entry_clear(struct bpf_mprog_entry *dst)
+{
+ memset(dst->fp_items, 0, sizeof(dst->fp_items));
+}
+
+static inline void bpf_mprog_clear_all(struct bpf_mprog_entry *entry,
+ struct bpf_mprog_entry **entry_new)
+{
+ struct bpf_mprog_entry *peer;
+
+ peer = bpf_mprog_peer(entry);
+ bpf_mprog_entry_clear(peer);
+ peer->parent->count = 0;
+ *entry_new = peer;
+}
+
+static inline void bpf_mprog_entry_grow(struct bpf_mprog_entry *entry, int idx)
+{
+ int total = bpf_mprog_total(entry);
+
+ memmove(entry->fp_items + idx + 1,
+ entry->fp_items + idx,
+ (total - idx) * sizeof(struct bpf_mprog_fp));
+
+ memmove(entry->parent->cp_items + idx + 1,
+ entry->parent->cp_items + idx,
+ (total - idx) * sizeof(struct bpf_mprog_cp));
+}
+
+static inline void bpf_mprog_entry_shrink(struct bpf_mprog_entry *entry, int idx)
+{
+ /* Total array size is needed in this case to enure the NULL
+ * entry is copied at the end.
+ */
+ int total = ARRAY_SIZE(entry->fp_items);
+
+ memmove(entry->fp_items + idx,
+ entry->fp_items + idx + 1,
+ (total - idx - 1) * sizeof(struct bpf_mprog_fp));
+
+ memmove(entry->parent->cp_items + idx,
+ entry->parent->cp_items + idx + 1,
+ (total - idx - 1) * sizeof(struct bpf_mprog_cp));
+}
+
+static inline void bpf_mprog_read(struct bpf_mprog_entry *entry, u32 idx,
+ struct bpf_mprog_fp **fp,
+ struct bpf_mprog_cp **cp)
+{
+ *fp = &entry->fp_items[idx];
+ *cp = &entry->parent->cp_items[idx];
+}
+
+static inline void bpf_mprog_write(struct bpf_mprog_fp *fp,
+ struct bpf_mprog_cp *cp,
+ struct bpf_tuple *tuple)
+{
+ WRITE_ONCE(fp->prog, tuple->prog);
+ cp->link = tuple->link;
+}
+
+int bpf_mprog_attach(struct bpf_mprog_entry *entry,
+ struct bpf_mprog_entry **entry_new,
+ struct bpf_prog *prog_new, struct bpf_link *link,
+ struct bpf_prog *prog_old,
+ u32 flags, u32 id_or_fd, u64 revision);
+
+int bpf_mprog_detach(struct bpf_mprog_entry *entry,
+ struct bpf_mprog_entry **entry_new,
+ struct bpf_prog *prog, struct bpf_link *link,
+ u32 flags, u32 id_or_fd, u64 revision);
+
+int bpf_mprog_query(const union bpf_attr *attr, union bpf_attr __user *uattr,
+ struct bpf_mprog_entry *entry);
+
+static inline bool bpf_mprog_supported(enum bpf_prog_type type)
+{
+ switch (type) {
+ case BPF_PROG_TYPE_SCHED_CLS:
+ return true;
+ default:
+ return false;
+ }
+}
+#endif /* __BPF_MPROG_H */
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index f883f01a5061..b13de31e163f 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -77,6 +77,12 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_LSM, lsm,
void *, void *)
#endif /* CONFIG_BPF_LSM */
#endif
+BPF_PROG_TYPE(BPF_PROG_TYPE_SYSCALL, bpf_syscall,
+ void *, void *)
+#ifdef CONFIG_NETFILTER_BPF_LINK
+BPF_PROG_TYPE(BPF_PROG_TYPE_NETFILTER, netfilter,
+ struct bpf_nf_ctx, struct bpf_nf_ctx)
+#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops)
@@ -84,6 +90,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_PROG_ARRAY, prog_array_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, perf_event_array_map_ops)
#ifdef CONFIG_CGROUPS
BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, cgroup_array_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_CGRP_STORAGE, cgrp_storage_map_ops)
#endif
#ifdef CONFIG_CGROUP_BPF
BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, cgroup_storage_map_ops)
@@ -99,14 +106,14 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_trace_map_ops)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops)
-#ifdef CONFIG_NET
-BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
-BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, dev_map_hash_ops)
-BPF_MAP_TYPE(BPF_MAP_TYPE_SK_STORAGE, sk_storage_map_ops)
#ifdef CONFIG_BPF_LSM
BPF_MAP_TYPE(BPF_MAP_TYPE_INODE_STORAGE, inode_storage_map_ops)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_TASK_STORAGE, task_storage_map_ops)
+#ifdef CONFIG_NET
+BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, dev_map_hash_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_SK_STORAGE, sk_storage_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops)
#if defined(CONFIG_XDP_SOCKETS)
BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops)
@@ -123,6 +130,10 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_STACK, stack_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_STRUCT_OPS, bpf_struct_ops_map_ops)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_RINGBUF, ringbuf_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_BLOOM_FILTER, bloom_filter_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_USER_RINGBUF, user_ringbuf_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_ARENA, arena_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_INSN_ARRAY, insn_array_map_ops)
BPF_LINK_TYPE(BPF_LINK_TYPE_RAW_TRACEPOINT, raw_tracepoint)
BPF_LINK_TYPE(BPF_LINK_TYPE_TRACING, tracing)
@@ -132,4 +143,15 @@ BPF_LINK_TYPE(BPF_LINK_TYPE_CGROUP, cgroup)
BPF_LINK_TYPE(BPF_LINK_TYPE_ITER, iter)
#ifdef CONFIG_NET
BPF_LINK_TYPE(BPF_LINK_TYPE_NETNS, netns)
+BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp)
+BPF_LINK_TYPE(BPF_LINK_TYPE_NETFILTER, netfilter)
+BPF_LINK_TYPE(BPF_LINK_TYPE_TCX, tcx)
+BPF_LINK_TYPE(BPF_LINK_TYPE_NETKIT, netkit)
+BPF_LINK_TYPE(BPF_LINK_TYPE_SOCKMAP, sockmap)
+#endif
+#ifdef CONFIG_PERF_EVENTS
+BPF_LINK_TYPE(BPF_LINK_TYPE_PERF_EVENT, perf)
#endif
+BPF_LINK_TYPE(BPF_LINK_TYPE_KPROBE_MULTI, kprobe_multi)
+BPF_LINK_TYPE(BPF_LINK_TYPE_STRUCT_OPS, struct_ops)
+BPF_LINK_TYPE(BPF_LINK_TYPE_UPROBE_MULTI, uprobe_multi)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 06841517ab1e..130bcbd66f60 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -18,33 +18,29 @@
* that converting umax_value to int cannot overflow.
*/
#define BPF_MAX_VAR_SIZ (1 << 29)
-
-/* Liveness marks, used for registers and spilled-regs (in stack slots).
- * Read marks propagate upwards until they find a write mark; they record that
- * "one of this state's descendants read this reg" (and therefore the reg is
- * relevant for states_equal() checks).
- * Write marks collect downwards and do not propagate; they record that "the
- * straight-line code that reached this state (from its parent) wrote this reg"
- * (and therefore that reads propagated from this state or its descendants
- * should not propagate to its parent).
- * A state with a write mark can receive read marks; it just won't propagate
- * them to its parent, since the write mark is a property, not of the state,
- * but of the link between it and its parent. See mark_reg_read() and
- * mark_stack_slot_read() in kernel/bpf/verifier.c.
+/* size of tmp_str_buf in bpf_verifier.
+ * we need at least 306 bytes to fit full stack mask representation
+ * (in the "-8,-16,...,-512" form)
*/
-enum bpf_reg_liveness {
- REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */
- REG_LIVE_READ32 = 0x1, /* reg was read, so we're sensitive to initial value */
- REG_LIVE_READ64 = 0x2, /* likewise, but full 64-bit content matters */
- REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64,
- REG_LIVE_WRITTEN = 0x4, /* reg was written first, screening off later reads */
- REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */
+#define TMP_STR_BUF_LEN 320
+/* Patch buffer size */
+#define INSN_BUF_SIZE 32
+
+#define ITER_PREFIX "bpf_iter_"
+
+enum bpf_iter_state {
+ BPF_ITER_STATE_INVALID, /* for non-first slot */
+ BPF_ITER_STATE_ACTIVE,
+ BPF_ITER_STATE_DRAINED,
};
struct bpf_reg_state {
/* Ordering of fields matters. See states_equal() */
enum bpf_reg_type type;
- /* Fixed part of pointer offset, pointer types only */
+ /*
+ * Fixed part of pointer offset, pointer types only.
+ * Or constant delta between "linked" scalars with the same ID.
+ */
s32 off;
union {
/* valid when type == PTR_TO_PACKET */
@@ -53,7 +49,14 @@ struct bpf_reg_state {
/* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
* PTR_TO_MAP_VALUE_OR_NULL
*/
- struct bpf_map *map_ptr;
+ struct {
+ struct bpf_map *map_ptr;
+ /* To distinguish map lookups from outer map
+ * the map_uid is non-zero for registers
+ * pointing to inner maps.
+ */
+ u32 map_uid;
+ };
/* for PTR_TO_BTF_ID */
struct {
@@ -61,7 +64,42 @@ struct bpf_reg_state {
u32 btf_id;
};
- u32 mem_size; /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
+ struct { /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
+ u32 mem_size;
+ u32 dynptr_id; /* for dynptr slices */
+ };
+
+ /* For dynptr stack slots */
+ struct {
+ enum bpf_dynptr_type type;
+ /* A dynptr is 16 bytes so it takes up 2 stack slots.
+ * We need to track which slot is the first slot
+ * to protect against cases where the user may try to
+ * pass in an address starting at the second slot of the
+ * dynptr.
+ */
+ bool first_slot;
+ } dynptr;
+
+ /* For bpf_iter stack slots */
+ struct {
+ /* BTF container and BTF type ID describing
+ * struct bpf_iter_<type> of an iterator state
+ */
+ struct btf *btf;
+ u32 btf_id;
+ /* packing following two fields to fit iter state into 16 bytes */
+ enum bpf_iter_state state:2;
+ int depth:30;
+ } iter;
+
+ /* For irq stack slots */
+ struct {
+ enum {
+ IRQ_NATIVE_KFUNC,
+ IRQ_LOCK_KFUNC,
+ } kfunc_class;
+ } irq;
/* Max size from any of the above. */
struct {
@@ -71,6 +109,26 @@ struct bpf_reg_state {
u32 subprogno; /* for PTR_TO_FUNC */
};
+ /* For scalar types (SCALAR_VALUE), this represents our knowledge of
+ * the actual value.
+ * For pointer types, this represents the variable part of the offset
+ * from the pointed-to object, and is shared with all bpf_reg_states
+ * with the same id as us.
+ */
+ struct tnum var_off;
+ /* Used to determine if any memory access using this register will
+ * result in a bad access.
+ * These refer to the same value as var_off, not necessarily the actual
+ * contents of the register.
+ */
+ s64 smin_value; /* minimum possible (s64)value */
+ s64 smax_value; /* maximum possible (s64)value */
+ u64 umin_value; /* minimum possible (u64)value */
+ u64 umax_value; /* maximum possible (u64)value */
+ s32 s32_min_value; /* minimum possible (s32)value */
+ s32 s32_max_value; /* maximum possible (s32)value */
+ u32 u32_min_value; /* minimum possible (u32)value */
+ u32 u32_max_value; /* maximum possible (u32)value */
/* For PTR_TO_PACKET, used to find other pointers with the same variable
* offset, so they can share range knowledge.
* For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we
@@ -79,7 +137,18 @@ struct bpf_reg_state {
* for the purpose of tracking that it's freed.
* For PTR_TO_SOCKET this is used to share which pointers retain the
* same reference to the socket, to determine proper reference freeing.
+ * For stack slots that are dynptrs, this is used to track references to
+ * the dynptr to determine proper reference freeing.
+ * Similarly to dynptrs, we use ID to track "belonging" of a reference
+ * to a specific instance of bpf_iter.
*/
+ /*
+ * Upper bit of ID is used to remember relationship between "linked"
+ * registers. Example:
+ * r1 = r2; both will have r1->id == r2->id == N
+ * r1 += 10; r1->id == N | BPF_ADD_CONST and r1->off == 10
+ */
+#define BPF_ADD_CONST (1U << 31)
u32 id;
/* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
* from a pointer-cast helper, bpf_sk_fullsock() and
@@ -121,28 +190,6 @@ struct bpf_reg_state {
* allowed and has the same effect as bpf_sk_release(sk).
*/
u32 ref_obj_id;
- /* For scalar types (SCALAR_VALUE), this represents our knowledge of
- * the actual value.
- * For pointer types, this represents the variable part of the offset
- * from the pointed-to object, and is shared with all bpf_reg_states
- * with the same id as us.
- */
- struct tnum var_off;
- /* Used to determine if any memory access using this register will
- * result in a bad access.
- * These refer to the same value as var_off, not necessarily the actual
- * contents of the register.
- */
- s64 smin_value; /* minimum possible (s64)value */
- s64 smax_value; /* maximum possible (s64)value */
- u64 umin_value; /* minimum possible (u64)value */
- u64 umax_value; /* maximum possible (u64)value */
- s32 s32_min_value; /* minimum possible (s32)value */
- s32 s32_max_value; /* maximum possible (s32)value */
- u32 u32_min_value; /* minimum possible (u32)value */
- u32 u32_max_value; /* maximum possible (u32)value */
- /* parentage chain for liveness checking */
- struct bpf_reg_state *parent;
/* Inside the callee two registers can be both PTR_TO_STACK like
* R1=fp-8 and R2=fp-8, but one of them points to this function stack
* while another to the caller's stack. To differentiate them 'frameno'
@@ -155,7 +202,6 @@ struct bpf_reg_state {
* patching which only happens after main verification finished.
*/
s32 subreg_def;
- enum bpf_reg_liveness live;
/* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */
bool precise;
};
@@ -165,16 +211,40 @@ enum bpf_stack_slot_type {
STACK_SPILL, /* register spilled into stack */
STACK_MISC, /* BPF program wrote some data into this slot */
STACK_ZERO, /* BPF program wrote constant zero */
+ /* A dynptr is stored in this stack slot. The type of dynptr
+ * is stored in bpf_stack_state->spilled_ptr.dynptr.type
+ */
+ STACK_DYNPTR,
+ STACK_ITER,
+ STACK_IRQ_FLAG,
};
#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
+#define BPF_REGMASK_ARGS ((1 << BPF_REG_1) | (1 << BPF_REG_2) | \
+ (1 << BPF_REG_3) | (1 << BPF_REG_4) | \
+ (1 << BPF_REG_5))
+
+#define BPF_DYNPTR_SIZE sizeof(struct bpf_dynptr_kern)
+#define BPF_DYNPTR_NR_SLOTS (BPF_DYNPTR_SIZE / BPF_REG_SIZE)
+
struct bpf_stack_state {
struct bpf_reg_state spilled_ptr;
u8 slot_type[BPF_REG_SIZE];
};
struct bpf_reference_state {
+ /* Each reference object has a type. Ensure REF_TYPE_PTR is zero to
+ * default to pointer reference on zero initialization of a state.
+ */
+ enum ref_state_type {
+ REF_TYPE_PTR = (1 << 1),
+ REF_TYPE_IRQ = (1 << 2),
+ REF_TYPE_LOCK = (1 << 3),
+ REF_TYPE_RES_LOCK = (1 << 4),
+ REF_TYPE_RES_LOCK_IRQ = (1 << 5),
+ REF_TYPE_LOCK_MASK = REF_TYPE_LOCK | REF_TYPE_RES_LOCK | REF_TYPE_RES_LOCK_IRQ,
+ } type;
/* Track each reference created with a unique id, even if the same
* instruction creates the reference multiple times (eg, via CALL).
*/
@@ -183,6 +253,15 @@ struct bpf_reference_state {
* is used purely to inform the user of a reference leak.
*/
int insn_idx;
+ /* Use to keep track of the source object of a lock, to ensure
+ * it matches on unlock.
+ */
+ void *ptr;
+};
+
+struct bpf_retval_range {
+ s32 minval;
+ s32 maxval;
};
/* state of the program:
@@ -201,25 +280,87 @@ struct bpf_func_state {
* zero == main subprog
*/
u32 subprogno;
+ /* Every bpf_timer_start will increment async_entry_cnt.
+ * It's used to distinguish:
+ * void foo(void) { for(;;); }
+ * void foo(void) { bpf_timer_set_callback(,foo); }
+ */
+ u32 async_entry_cnt;
+ struct bpf_retval_range callback_ret_range;
+ bool in_callback_fn;
+ bool in_async_callback_fn;
+ bool in_exception_callback_fn;
+ /* For callback calling functions that limit number of possible
+ * callback executions (e.g. bpf_loop) keeps track of current
+ * simulated iteration number.
+ * Value in frame N refers to number of times callback with frame
+ * N+1 was simulated, e.g. for the following call:
+ *
+ * bpf_loop(..., fn, ...); | suppose current frame is N
+ * | fn would be simulated in frame N+1
+ * | number of simulations is tracked in frame N
+ */
+ u32 callback_depth;
/* The following fields should be last. See copy_func_state() */
- int acquired_refs;
- struct bpf_reference_state *refs;
- int allocated_stack;
- bool in_callback_fn;
+ /* The state of the stack. Each element of the array describes BPF_REG_SIZE
+ * (i.e. 8) bytes worth of stack memory.
+ * stack[0] represents bytes [*(r10-8)..*(r10-1)]
+ * stack[1] represents bytes [*(r10-16)..*(r10-9)]
+ * ...
+ * stack[allocated_stack/8 - 1] represents [*(r10-allocated_stack)..*(r10-allocated_stack+7)]
+ */
struct bpf_stack_state *stack;
+ /* Size of the current stack, in bytes. The stack state is tracked below, in
+ * `stack`. allocated_stack is always a multiple of BPF_REG_SIZE.
+ */
+ int allocated_stack;
+};
+
+#define MAX_CALL_FRAMES 8
+
+/* instruction history flags, used in bpf_jmp_history_entry.flags field */
+enum {
+ /* instruction references stack slot through PTR_TO_STACK register;
+ * we also store stack's frame number in lower 3 bits (MAX_CALL_FRAMES is 8)
+ * and accessed stack slot's index in next 6 bits (MAX_BPF_STACK is 512,
+ * 8 bytes per slot, so slot index (spi) is [0, 63])
+ */
+ INSN_F_FRAMENO_MASK = 0x7, /* 3 bits */
+
+ INSN_F_SPI_MASK = 0x3f, /* 6 bits */
+ INSN_F_SPI_SHIFT = 3, /* shifted 3 bits to the left */
+
+ INSN_F_STACK_ACCESS = BIT(9),
+
+ INSN_F_DST_REG_STACK = BIT(10), /* dst_reg is PTR_TO_STACK */
+ INSN_F_SRC_REG_STACK = BIT(11), /* src_reg is PTR_TO_STACK */
+ /* total 12 bits are used now. */
};
-struct bpf_idx_pair {
- u32 prev_idx;
+static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES);
+static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8);
+
+struct bpf_jmp_history_entry {
u32 idx;
+ /* insn idx can't be bigger than 1 million */
+ u32 prev_idx : 20;
+ /* special INSN_F_xxx flags */
+ u32 flags : 12;
+ /* additional registers that need precision tracking when this
+ * jump is backtracked, vector of six 10-bit records
+ */
+ u64 linked_regs;
};
-#define MAX_CALL_FRAMES 8
+/* Maximum number of register states that can exist at once */
+#define BPF_ID_MAP_SIZE ((MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) * MAX_CALL_FRAMES)
struct bpf_verifier_state {
/* call stack tracking */
struct bpf_func_state *frame[MAX_CALL_FRAMES];
struct bpf_verifier_state *parent;
+ /* Acquired reference states */
+ struct bpf_reference_state *refs;
/*
* 'branches' field is the number of branches left to explore:
* 0 - all possible paths from this state reached bpf_exit or
@@ -256,7 +397,7 @@ struct bpf_verifier_state {
* If is_state_visited() sees a state with branches > 0 it means
* there is a loop. If such state is exactly equal to the current state
* it's an infinite loop. Note states_equal() checks for states
- * equvalency, so two states being 'states_equal' does not mean
+ * equivalency, so two states being 'states_equal' does not mean
* infinite loop. The exact comparison is provided by
* states_maybe_looping() function. It's a stronger pre-check and
* much faster than states_equal().
@@ -268,37 +409,95 @@ struct bpf_verifier_state {
u32 branches;
u32 insn_idx;
u32 curframe;
- u32 active_spin_lock;
+
+ u32 acquired_refs;
+ u32 active_locks;
+ u32 active_preempt_locks;
+ u32 active_irq_id;
+ u32 active_lock_id;
+ void *active_lock_ptr;
+ u32 active_rcu_locks;
+
bool speculative;
+ bool in_sleepable;
+ bool cleaned;
/* first and last insn idx of this verifier state */
u32 first_insn_idx;
u32 last_insn_idx;
+ /* if this state is a backedge state then equal_state
+ * records cached state to which this state is equal.
+ */
+ struct bpf_verifier_state *equal_state;
/* jmp history recorded from first to last.
* backtracking is using it to go from last to first.
* For most states jmp_history_cnt is [0-3].
* For loops can go up to ~40.
*/
- struct bpf_idx_pair *jmp_history;
+ struct bpf_jmp_history_entry *jmp_history;
u32 jmp_history_cnt;
+ u32 dfs_depth;
+ u32 callback_unroll_depth;
+ u32 may_goto_depth;
};
-#define bpf_get_spilled_reg(slot, frame) \
+#define bpf_get_spilled_reg(slot, frame, mask) \
(((slot < frame->allocated_stack / BPF_REG_SIZE) && \
- (frame->stack[slot].slot_type[0] == STACK_SPILL)) \
+ ((1 << frame->stack[slot].slot_type[BPF_REG_SIZE - 1]) & (mask))) \
? &frame->stack[slot].spilled_ptr : NULL)
/* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */
-#define bpf_for_each_spilled_reg(iter, frame, reg) \
- for (iter = 0, reg = bpf_get_spilled_reg(iter, frame); \
+#define bpf_for_each_spilled_reg(iter, frame, reg, mask) \
+ for (iter = 0, reg = bpf_get_spilled_reg(iter, frame, mask); \
iter < frame->allocated_stack / BPF_REG_SIZE; \
- iter++, reg = bpf_get_spilled_reg(iter, frame))
+ iter++, reg = bpf_get_spilled_reg(iter, frame, mask))
+
+#define bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, __mask, __expr) \
+ ({ \
+ struct bpf_verifier_state *___vstate = __vst; \
+ int ___i, ___j; \
+ for (___i = 0; ___i <= ___vstate->curframe; ___i++) { \
+ struct bpf_reg_state *___regs; \
+ __state = ___vstate->frame[___i]; \
+ ___regs = __state->regs; \
+ for (___j = 0; ___j < MAX_BPF_REG; ___j++) { \
+ __reg = &___regs[___j]; \
+ (void)(__expr); \
+ } \
+ bpf_for_each_spilled_reg(___j, __state, __reg, __mask) { \
+ if (!__reg) \
+ continue; \
+ (void)(__expr); \
+ } \
+ } \
+ })
+
+/* Invoke __expr over regsiters in __vst, setting __state and __reg */
+#define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr) \
+ bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, 1 << STACK_SPILL, __expr)
/* linked list of verifier states used to prune search */
struct bpf_verifier_state_list {
struct bpf_verifier_state state;
- struct bpf_verifier_state_list *next;
- int miss_cnt, hit_cnt;
+ struct list_head node;
+ u32 miss_cnt;
+ u32 hit_cnt:31;
+ u32 in_free_list:1;
+};
+
+struct bpf_loop_inline_state {
+ unsigned int initialized:1; /* set to true upon first entry */
+ unsigned int fit_for_inline:1; /* true if callback function is the same
+ * at each call and flags are always zero
+ */
+ u32 callback_subprogno; /* valid when fit_for_inline is true */
+};
+
+/* pointer and state for maps */
+struct bpf_map_ptr_state {
+ struct bpf_map *map_ptr;
+ bool poison;
+ bool unpriv;
};
/* Possible states for alu_state member. */
@@ -310,10 +509,19 @@ struct bpf_verifier_state_list {
#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \
BPF_ALU_SANITIZE_DST)
+/*
+ * An array of BPF instructions.
+ * Primary usage: return value of bpf_insn_successors.
+ */
+struct bpf_iarray {
+ int cnt;
+ u32 items[];
+};
+
struct bpf_insn_aux_data {
union {
enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
- unsigned long map_ptr_state; /* pointer/poison value for maps */
+ struct bpf_map_ptr_state map_ptr_state;
s32 call_imm; /* saved imm field of call insn */
u32 alu_limit; /* limit for add/sub register with pointer */
struct {
@@ -330,17 +538,59 @@ struct bpf_insn_aux_data {
u32 mem_size; /* mem_size for non-struct typed var */
};
} btf_var;
+ /* if instruction is a call to bpf_loop this field tracks
+ * the state of the relevant registers to make decision about inlining
+ */
+ struct bpf_loop_inline_state loop_inline_state;
};
+ union {
+ /* remember the size of type passed to bpf_obj_new to rewrite R1 */
+ u64 obj_new_size;
+ /* remember the offset of node field within type to rewrite */
+ u64 insert_off;
+ };
+ struct bpf_iarray *jt; /* jump table for gotox or bpf_tailcall call instruction */
+ struct btf_struct_meta *kptr_struct_meta;
u64 map_key_state; /* constant (32 bit) key tracking for maps */
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
- int sanitize_stack_off; /* stack slot to be cleared */
u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
+ bool nospec; /* do not execute this instruction speculatively */
+ bool nospec_result; /* result is unsafe under speculation, nospec must follow */
bool zext_dst; /* this insn zero extends dst reg */
+ bool needs_zext; /* alu op needs to clear upper bits */
+ bool non_sleepable; /* helper/kfunc may be called from non-sleepable context */
+ bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */
+ bool call_with_percpu_alloc_ptr; /* {this,per}_cpu_ptr() with prog percpu alloc */
u8 alu_state; /* used in combination with alu_limit */
+ /* true if STX or LDX instruction is a part of a spill/fill
+ * pattern for a bpf_fastcall call.
+ */
+ u8 fastcall_pattern:1;
+ /* for CALL instructions, a number of spill/fill pairs in the
+ * bpf_fastcall pattern.
+ */
+ u8 fastcall_spills_num:3;
+ u8 arg_prog:4;
/* below fields are initialized once */
unsigned int orig_idx; /* original instruction index */
+ bool jmp_point;
bool prune_point;
+ /* ensure we check state equivalence and save state checkpoint and
+ * this instruction, regardless of any heuristics
+ */
+ bool force_checkpoint;
+ /* true if instruction is a call to a helper function that
+ * accepts callback function as a parameter.
+ */
+ bool calls_callback;
+ /*
+ * CFG strongly connected component this instruction belongs to,
+ * zero if it is a singleton SCC.
+ */
+ u32 scc;
+ /* registers alive before this instruction. */
+ u16 live_regs_before;
};
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
@@ -349,44 +599,141 @@ struct bpf_insn_aux_data {
#define BPF_VERIFIER_TMP_LOG_SIZE 1024
struct bpf_verifier_log {
- u32 level;
- char kbuf[BPF_VERIFIER_TMP_LOG_SIZE];
+ /* Logical start and end positions of a "log window" of the verifier log.
+ * start_pos == 0 means we haven't truncated anything.
+ * Once truncation starts to happen, start_pos + len_total == end_pos,
+ * except during log reset situations, in which (end_pos - start_pos)
+ * might get smaller than len_total (see bpf_vlog_reset()).
+ * Generally, (end_pos - start_pos) gives number of useful data in
+ * user log buffer.
+ */
+ u64 start_pos;
+ u64 end_pos;
char __user *ubuf;
- u32 len_used;
+ u32 level;
u32 len_total;
+ u32 len_max;
+ char kbuf[BPF_VERIFIER_TMP_LOG_SIZE];
};
-static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log)
-{
- return log->len_used >= log->len_total - 1;
-}
-
#define BPF_LOG_LEVEL1 1
#define BPF_LOG_LEVEL2 2
#define BPF_LOG_STATS 4
+#define BPF_LOG_FIXED 8
#define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2)
-#define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS)
+#define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS | BPF_LOG_FIXED)
#define BPF_LOG_KERNEL (BPF_LOG_MASK + 1) /* kernel internal flag */
+#define BPF_LOG_MIN_ALIGNMENT 8U
+#define BPF_LOG_ALIGNMENT 40U
static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
{
- return log &&
- ((log->level && log->ubuf && !bpf_verifier_log_full(log)) ||
- log->level == BPF_LOG_KERNEL);
+ return log && log->level;
}
#define BPF_MAX_SUBPROGS 256
+struct bpf_subprog_arg_info {
+ enum bpf_arg_type arg_type;
+ union {
+ u32 mem_size;
+ u32 btf_id;
+ };
+};
+
+enum priv_stack_mode {
+ PRIV_STACK_UNKNOWN,
+ NO_PRIV_STACK,
+ PRIV_STACK_ADAPTIVE,
+};
+
struct bpf_subprog_info {
/* 'start' has to be the first field otherwise find_subprog() won't work */
u32 start; /* insn idx of function entry point */
u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
+ u32 postorder_start; /* The idx to the env->cfg.insn_postorder */
+ u32 exit_idx; /* Index of one of the BPF_EXIT instructions in this subprogram */
u16 stack_depth; /* max. stack depth used by this function */
- bool has_tail_call;
- bool tail_call_reachable;
- bool has_ld_abs;
+ u16 stack_extra;
+ /* offsets in range [stack_depth .. fastcall_stack_off)
+ * are used for bpf_fastcall spills and fills.
+ */
+ s16 fastcall_stack_off;
+ bool has_tail_call: 1;
+ bool tail_call_reachable: 1;
+ bool has_ld_abs: 1;
+ bool is_cb: 1;
+ bool is_async_cb: 1;
+ bool is_exception_cb: 1;
+ bool args_cached: 1;
+ /* true if bpf_fastcall stack region is used by functions that can't be inlined */
+ bool keep_fastcall_stack: 1;
+ bool changes_pkt_data: 1;
+ bool might_sleep: 1;
+ u8 arg_cnt:3;
+
+ enum priv_stack_mode priv_stack_mode;
+ struct bpf_subprog_arg_info args[MAX_BPF_FUNC_REG_ARGS];
+};
+
+struct bpf_verifier_env;
+
+struct backtrack_state {
+ struct bpf_verifier_env *env;
+ u32 frame;
+ u32 reg_masks[MAX_CALL_FRAMES];
+ u64 stack_masks[MAX_CALL_FRAMES];
+};
+
+struct bpf_id_pair {
+ u32 old;
+ u32 cur;
};
+struct bpf_idmap {
+ u32 tmp_id_gen;
+ struct bpf_id_pair map[BPF_ID_MAP_SIZE];
+};
+
+struct bpf_idset {
+ u32 count;
+ u32 ids[BPF_ID_MAP_SIZE];
+};
+
+/* see verifier.c:compute_scc_callchain() */
+struct bpf_scc_callchain {
+ /* call sites from bpf_verifier_state->frame[*]->callsite leading to this SCC */
+ u32 callsites[MAX_CALL_FRAMES - 1];
+ /* last frame in a chain is identified by SCC id */
+ u32 scc;
+};
+
+/* verifier state waiting for propagate_backedges() */
+struct bpf_scc_backedge {
+ struct bpf_scc_backedge *next;
+ struct bpf_verifier_state state;
+};
+
+struct bpf_scc_visit {
+ struct bpf_scc_callchain callchain;
+ /* first state in current verification path that entered SCC
+ * identified by the callchain
+ */
+ struct bpf_verifier_state *entry_state;
+ struct bpf_scc_backedge *backedges; /* list of backedges */
+ u32 num_backedges;
+};
+
+/* An array of bpf_scc_visit structs sharing tht same bpf_scc_callchain->scc
+ * but having different bpf_scc_callchain->callsites.
+ */
+struct bpf_scc_info {
+ u32 num_visits;
+ struct bpf_scc_visit visits[];
+};
+
+struct bpf_liveness;
+
/* single container for all structs
* one verifier_env per bpf_check() call
*/
@@ -395,34 +742,61 @@ struct bpf_verifier_env {
u32 prev_insn_idx;
struct bpf_prog *prog; /* eBPF program being verified */
const struct bpf_verifier_ops *ops;
+ struct module *attach_btf_mod; /* The owner module of prog->aux->attach_btf */
struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
int stack_size; /* number of states to be processed */
bool strict_alignment; /* perform strict pointer alignment checks */
bool test_state_freq; /* test verifier with different pruning frequency */
+ bool test_reg_invariants; /* fail verification on register invariants violations */
struct bpf_verifier_state *cur_state; /* current verifier state */
- struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
- struct bpf_verifier_state_list *free_list;
+ /* Search pruning optimization, array of list_heads for
+ * lists of struct bpf_verifier_state_list.
+ */
+ struct list_head *explored_states;
+ struct list_head free_list; /* list of struct bpf_verifier_state_list */
struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
struct btf_mod_pair used_btfs[MAX_USED_BTFS]; /* array of BTF's used by BPF program */
+ struct bpf_map *insn_array_maps[MAX_USED_MAPS]; /* array of INSN_ARRAY map's to be relocated */
u32 used_map_cnt; /* number of used maps */
u32 used_btf_cnt; /* number of used BTF objects */
+ u32 insn_array_map_cnt; /* number of used maps of type BPF_MAP_TYPE_INSN_ARRAY */
u32 id_gen; /* used to generate unique reg IDs */
+ u32 hidden_subprog_cnt; /* number of hidden subprogs */
+ int exception_callback_subprog;
+ bool explore_alu_limits;
bool allow_ptr_leaks;
+ /* Allow access to uninitialized stack memory. Writes with fixed offset are
+ * always allowed, so this refers to reads (with fixed or variable offset),
+ * to writes with variable offset and to indirect (helper) accesses.
+ */
bool allow_uninit_stack;
- bool allow_ptr_to_map_access;
bool bpf_capable;
bool bypass_spec_v1;
bool bypass_spec_v4;
bool seen_direct_write;
+ bool seen_exception;
struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
const struct bpf_line_info *prev_linfo;
struct bpf_verifier_log log;
- struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
+ struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 2]; /* max + 2 for the fake and exception subprogs */
+ union {
+ struct bpf_idmap idmap_scratch;
+ struct bpf_idset idset_scratch;
+ };
struct {
int *insn_state;
int *insn_stack;
+ /*
+ * vector of instruction indexes sorted in post-order, grouped by subprogram,
+ * see bpf_subprog_info->postorder_start.
+ */
+ int *insn_postorder;
int cur_stack;
+ /* current position in the insn_postorder vector */
+ int cur_postorder;
} cfg;
+ struct backtrack_state bt;
+ struct bpf_jmp_history_entry *cur_hist_ent;
u32 pass_cnt; /* number of times do_check() was called */
u32 subprog_cnt;
/* number of instructions analyzed by the verifier */
@@ -442,14 +816,72 @@ struct bpf_verifier_env {
u32 peak_states;
/* longest register parentage chain walked for liveness marking */
u32 longest_mark_read_walk;
+ u32 free_list_size;
+ u32 explored_states_size;
+ u32 num_backedges;
+ bpfptr_t fd_array;
+
+ /* bit mask to keep track of whether a register has been accessed
+ * since the last time the function state was printed
+ */
+ u32 scratched_regs;
+ /* Same as scratched_regs but for stack slots */
+ u64 scratched_stack_slots;
+ u64 prev_log_pos, prev_insn_print_pos;
+ /* buffer used to temporary hold constants as scalar registers */
+ struct bpf_reg_state fake_reg[2];
+ /* buffer used to generate temporary string representations,
+ * e.g., in reg_type_str() to generate reg_type string
+ */
+ char tmp_str_buf[TMP_STR_BUF_LEN];
+ struct bpf_insn insn_buf[INSN_BUF_SIZE];
+ struct bpf_insn epilogue_buf[INSN_BUF_SIZE];
+ struct bpf_scc_callchain callchain_buf;
+ struct bpf_liveness *liveness;
+ /* array of pointers to bpf_scc_info indexed by SCC id */
+ struct bpf_scc_info **scc_info;
+ u32 scc_cnt;
+ struct bpf_iarray *succ;
+ struct bpf_iarray *gotox_tmp_buf;
};
+static inline struct bpf_func_info_aux *subprog_aux(struct bpf_verifier_env *env, int subprog)
+{
+ return &env->prog->aux->func_info_aux[subprog];
+}
+
+static inline struct bpf_subprog_info *subprog_info(struct bpf_verifier_env *env, int subprog)
+{
+ return &env->subprog_info[subprog];
+}
+
__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
const char *fmt, va_list args);
__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
const char *fmt, ...);
__printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
const char *fmt, ...);
+int bpf_vlog_init(struct bpf_verifier_log *log, u32 log_level,
+ char __user *log_buf, u32 log_size);
+void bpf_vlog_reset(struct bpf_verifier_log *log, u64 new_pos);
+int bpf_vlog_finalize(struct bpf_verifier_log *log, u32 *log_size_actual);
+
+__printf(3, 4) void verbose_linfo(struct bpf_verifier_env *env,
+ u32 insn_off,
+ const char *prefix_fmt, ...);
+
+#define verifier_bug_if(cond, env, fmt, args...) \
+ ({ \
+ bool __cond = (cond); \
+ if (unlikely(__cond)) \
+ verifier_bug(env, fmt " (" #cond ")", ##args); \
+ (__cond); \
+ })
+#define verifier_bug(env, fmt, args...) \
+ ({ \
+ BPF_WARN_ONCE(1, "verifier bug: " fmt "\n", ##args); \
+ bpf_log(&env->log, "verifier bug: " fmt "\n", ##args); \
+ })
static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
{
@@ -473,11 +905,6 @@ bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
void
bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
-int check_ctx_reg(struct bpf_verifier_env *env,
- const struct bpf_reg_state *reg, int regno);
-int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
- u32 regno, u32 mem_size);
-
/* this lives here instead of in bpf.h because it needs to dereference tgt_prog */
static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog,
struct btf *btf, u32 btf_id)
@@ -502,5 +929,162 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
const struct bpf_prog *tgt_prog,
u32 btf_id,
struct bpf_attach_target_info *tgt_info);
+void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab);
+
+int mark_chain_precision(struct bpf_verifier_env *env, int regno);
+
+#define BPF_BASE_TYPE_MASK GENMASK(BPF_BASE_TYPE_BITS - 1, 0)
+
+/* extract base type from bpf_{arg, return, reg}_type. */
+static inline u32 base_type(u32 type)
+{
+ return type & BPF_BASE_TYPE_MASK;
+}
+
+/* extract flags from an extended type. See bpf_type_flag in bpf.h. */
+static inline u32 type_flag(u32 type)
+{
+ return type & ~BPF_BASE_TYPE_MASK;
+}
+
+/* only use after check_attach_btf_id() */
+static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog)
+{
+ return (prog->type == BPF_PROG_TYPE_EXT && prog->aux->saved_dst_prog_type) ?
+ prog->aux->saved_dst_prog_type : prog->type;
+}
+
+static inline bool bpf_prog_check_recur(const struct bpf_prog *prog)
+{
+ switch (resolve_prog_type(prog)) {
+ case BPF_PROG_TYPE_TRACING:
+ return prog->expected_attach_type != BPF_TRACE_ITER;
+ case BPF_PROG_TYPE_STRUCT_OPS:
+ return prog->aux->jits_use_priv_stack;
+ case BPF_PROG_TYPE_LSM:
+ case BPF_PROG_TYPE_SYSCALL:
+ return false;
+ default:
+ return true;
+ }
+}
+
+#define BPF_REG_TRUSTED_MODIFIERS (MEM_ALLOC | PTR_TRUSTED | NON_OWN_REF)
+
+static inline bool bpf_type_has_unsafe_modifiers(u32 type)
+{
+ return type_flag(type) & ~BPF_REG_TRUSTED_MODIFIERS;
+}
+
+static inline bool type_is_ptr_alloc_obj(u32 type)
+{
+ return base_type(type) == PTR_TO_BTF_ID && type_flag(type) & MEM_ALLOC;
+}
+
+static inline bool type_is_non_owning_ref(u32 type)
+{
+ return type_is_ptr_alloc_obj(type) && type_flag(type) & NON_OWN_REF;
+}
+
+static inline bool type_is_pkt_pointer(enum bpf_reg_type type)
+{
+ type = base_type(type);
+ return type == PTR_TO_PACKET ||
+ type == PTR_TO_PACKET_META;
+}
+
+static inline bool type_is_sk_pointer(enum bpf_reg_type type)
+{
+ return type == PTR_TO_SOCKET ||
+ type == PTR_TO_SOCK_COMMON ||
+ type == PTR_TO_TCP_SOCK ||
+ type == PTR_TO_XDP_SOCK;
+}
+
+static inline bool type_may_be_null(u32 type)
+{
+ return type & PTR_MAYBE_NULL;
+}
+
+static inline void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno)
+{
+ env->scratched_regs |= 1U << regno;
+}
+
+static inline void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi)
+{
+ env->scratched_stack_slots |= 1ULL << spi;
+}
+
+static inline bool reg_scratched(const struct bpf_verifier_env *env, u32 regno)
+{
+ return (env->scratched_regs >> regno) & 1;
+}
+
+static inline bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno)
+{
+ return (env->scratched_stack_slots >> regno) & 1;
+}
+
+static inline bool verifier_state_scratched(const struct bpf_verifier_env *env)
+{
+ return env->scratched_regs || env->scratched_stack_slots;
+}
+
+static inline void mark_verifier_state_clean(struct bpf_verifier_env *env)
+{
+ env->scratched_regs = 0U;
+ env->scratched_stack_slots = 0ULL;
+}
+
+/* Used for printing the entire verifier state. */
+static inline void mark_verifier_state_scratched(struct bpf_verifier_env *env)
+{
+ env->scratched_regs = ~0U;
+ env->scratched_stack_slots = ~0ULL;
+}
+
+static inline bool bpf_stack_narrow_access_ok(int off, int fill_size, int spill_size)
+{
+#ifdef __BIG_ENDIAN
+ off -= spill_size - fill_size;
+#endif
+
+ return !(off % BPF_REG_SIZE);
+}
+
+static inline bool insn_is_gotox(struct bpf_insn *insn)
+{
+ return BPF_CLASS(insn->code) == BPF_JMP &&
+ BPF_OP(insn->code) == BPF_JA &&
+ BPF_SRC(insn->code) == BPF_X;
+}
+
+const char *reg_type_str(struct bpf_verifier_env *env, enum bpf_reg_type type);
+const char *dynptr_type_str(enum bpf_dynptr_type type);
+const char *iter_type_str(const struct btf *btf, u32 btf_id);
+const char *iter_state_str(enum bpf_iter_state state);
+
+void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
+ u32 frameno, bool print_all);
+void print_insn_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
+ u32 frameno);
+
+struct bpf_subprog_info *bpf_find_containing_subprog(struct bpf_verifier_env *env, int off);
+int bpf_jmp_offset(struct bpf_insn *insn);
+struct bpf_iarray *bpf_insn_successors(struct bpf_verifier_env *env, u32 idx);
+void bpf_fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask);
+bool bpf_calls_callback(struct bpf_verifier_env *env, int insn_idx);
+
+int bpf_stack_liveness_init(struct bpf_verifier_env *env);
+void bpf_stack_liveness_free(struct bpf_verifier_env *env);
+int bpf_update_live_stack(struct bpf_verifier_env *env);
+int bpf_mark_stack_read(struct bpf_verifier_env *env, u32 frameno, u32 insn_idx, u64 mask);
+void bpf_mark_stack_write(struct bpf_verifier_env *env, u32 frameno, u64 mask);
+int bpf_reset_stack_write_marks(struct bpf_verifier_env *env, u32 insn_idx);
+int bpf_commit_stack_write_marks(struct bpf_verifier_env *env);
+int bpf_live_stack_query_init(struct bpf_verifier_env *env, struct bpf_verifier_state *st);
+bool bpf_stack_slot_alive(struct bpf_verifier_env *env, u32 frameno, u32 spi);
+void bpf_reset_live_stack_callchain(struct bpf_verifier_env *env);
#endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/bpfilter.h b/include/linux/bpfilter.h
deleted file mode 100644
index 2ae3c8e1d83c..000000000000
--- a/include/linux/bpfilter.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_BPFILTER_H
-#define _LINUX_BPFILTER_H
-
-#include <uapi/linux/bpfilter.h>
-#include <linux/usermode_driver.h>
-#include <linux/sockptr.h>
-
-struct sock;
-int bpfilter_ip_set_sockopt(struct sock *sk, int optname, sockptr_t optval,
- unsigned int optlen);
-int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
- int __user *optlen);
-void bpfilter_umh_cleanup(struct umd_info *info);
-
-struct bpfilter_umh_ops {
- struct umd_info info;
- /* since ip_getsockopt() can run in parallel, serialize access to umh */
- struct mutex lock;
- int (*sockopt)(struct sock *sk, int optname, sockptr_t optval,
- unsigned int optlen, bool is_set);
- int (*start)(void);
-};
-extern struct bpfilter_umh_ops bpfilter_ops;
-#endif
diff --git a/include/linux/bpfptr.h b/include/linux/bpfptr.h
new file mode 100644
index 000000000000..f6e0795db484
--- /dev/null
+++ b/include/linux/bpfptr.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* A pointer that can point to either kernel or userspace memory. */
+#ifndef _LINUX_BPFPTR_H
+#define _LINUX_BPFPTR_H
+
+#include <linux/mm.h>
+#include <linux/sockptr.h>
+
+typedef sockptr_t bpfptr_t;
+
+static inline bool bpfptr_is_kernel(bpfptr_t bpfptr)
+{
+ return bpfptr.is_kernel;
+}
+
+static inline bpfptr_t KERNEL_BPFPTR(void *p)
+{
+ return (bpfptr_t) { .kernel = p, .is_kernel = true };
+}
+
+static inline bpfptr_t USER_BPFPTR(void __user *p)
+{
+ return (bpfptr_t) { .user = p };
+}
+
+static inline bpfptr_t make_bpfptr(u64 addr, bool is_kernel)
+{
+ if (is_kernel)
+ return KERNEL_BPFPTR((void*) (uintptr_t) addr);
+ else
+ return USER_BPFPTR(u64_to_user_ptr(addr));
+}
+
+static inline bool bpfptr_is_null(bpfptr_t bpfptr)
+{
+ if (bpfptr_is_kernel(bpfptr))
+ return !bpfptr.kernel;
+ return !bpfptr.user;
+}
+
+static inline void bpfptr_add(bpfptr_t *bpfptr, size_t val)
+{
+ if (bpfptr_is_kernel(*bpfptr))
+ bpfptr->kernel += val;
+ else
+ bpfptr->user += val;
+}
+
+static inline int copy_from_bpfptr_offset(void *dst, bpfptr_t src,
+ size_t offset, size_t size)
+{
+ if (!bpfptr_is_kernel(src))
+ return copy_from_user(dst, src.user + offset, size);
+ return copy_from_kernel_nofault(dst, src.kernel + offset, size);
+}
+
+static inline int copy_from_bpfptr(void *dst, bpfptr_t src, size_t size)
+{
+ return copy_from_bpfptr_offset(dst, src, 0, size);
+}
+
+static inline int copy_to_bpfptr_offset(bpfptr_t dst, size_t offset,
+ const void *src, size_t size)
+{
+ return copy_to_sockptr_offset((sockptr_t) dst, offset, src, size);
+}
+
+static inline void *kvmemdup_bpfptr_noprof(bpfptr_t src, size_t len)
+{
+ void *p = kvmalloc_node_align_noprof(len, 1, GFP_USER | __GFP_NOWARN, NUMA_NO_NODE);
+
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+ if (copy_from_bpfptr(p, src, len)) {
+ kvfree(p);
+ return ERR_PTR(-EFAULT);
+ }
+ return p;
+}
+#define kvmemdup_bpfptr(...) alloc_hooks(kvmemdup_bpfptr_noprof(__VA_ARGS__))
+
+static inline long strncpy_from_bpfptr(char *dst, bpfptr_t src, size_t count)
+{
+ if (bpfptr_is_kernel(src))
+ return strncpy_from_kernel_nofault(dst, src.kernel, count);
+ return strncpy_from_user(dst, src.user, count);
+}
+
+#endif /* _LINUX_BPFPTR_H */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index c2c2147dfeb8..115a964f3006 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -11,11 +11,13 @@
#define PHY_ID_BCM50610 0x0143bd60
#define PHY_ID_BCM50610M 0x0143bd70
+#define PHY_ID_BCM5221 0x004061e0
#define PHY_ID_BCM5241 0x0143bc30
#define PHY_ID_BCMAC131 0x0143bc70
#define PHY_ID_BCM5481 0x0143bca0
#define PHY_ID_BCM5395 0x0143bcf0
#define PHY_ID_BCM53125 0x03625f20
+#define PHY_ID_BCM53128 0x03625e10
#define PHY_ID_BCM54810 0x03625d00
#define PHY_ID_BCM54811 0x03625cc0
#define PHY_ID_BCM5482 0x0143bcb0
@@ -32,6 +34,7 @@
#define PHY_ID_BCM72113 0x35905310
#define PHY_ID_BCM72116 0x35905350
+#define PHY_ID_BCM72165 0x35905340
#define PHY_ID_BCM7250 0xae025280
#define PHY_ID_BCM7255 0xae025120
#define PHY_ID_BCM7260 0xae025190
@@ -42,6 +45,7 @@
#define PHY_ID_BCM7366 0x600d8490
#define PHY_ID_BCM7346 0x600d8650
#define PHY_ID_BCM7362 0x600d84b0
+#define PHY_ID_BCM74165 0x359052c0
#define PHY_ID_BCM7425 0x600d86b0
#define PHY_ID_BCM7429 0x600d8730
#define PHY_ID_BCM7435 0x600d8750
@@ -49,6 +53,7 @@
#define PHY_ID_BCM7439 0x600d8480
#define PHY_ID_BCM7439_2 0xae025080
#define PHY_ID_BCM7445 0x600d8510
+#define PHY_ID_BCM7712 0x35905330
#define PHY_ID_BCM_CYGNUS 0xae025200
#define PHY_ID_BCM_OMEGA 0xae025100
@@ -66,6 +71,7 @@
#define PHY_BRCM_CLEAR_RGMII_MODE 0x00000004
#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00000008
#define PHY_BRCM_EN_MASTER_MODE 0x00000010
+#define PHY_BRCM_IDDQ_SUSPEND 0x00000020
/* Broadcom BCM7xxx specific workarounds */
#define PHY_BRCM_7XXX_REV(x) (((x) >> 8) & 0xff)
@@ -83,7 +89,9 @@
#define MII_BCM54XX_EXP_DATA 0x15 /* Expansion register data */
#define MII_BCM54XX_EXP_SEL 0x17 /* Expansion register select */
+#define MII_BCM54XX_EXP_SEL_TOP 0x0d00 /* TOP_MISC expansion register select */
#define MII_BCM54XX_EXP_SEL_SSD 0x0e00 /* Secondary SerDes select */
+#define MII_BCM54XX_EXP_SEL_WOL 0x0e00 /* Wake-on-LAN expansion select register */
#define MII_BCM54XX_EXP_SEL_ER 0x0f00 /* Expansion register select */
#define MII_BCM54XX_EXP_SEL_ETC 0x0d00 /* Expansion register spare + 2k mem */
@@ -129,6 +137,7 @@
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x07
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN 0x0010
+#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RSVD 0x0060
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_EN 0x0080
#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN 0x0100
#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200
@@ -155,6 +164,7 @@
#define BCM_LED_SRC_OPENSHORT 0xb
#define BCM_LED_SRC_OFF 0xe /* Tied high */
#define BCM_LED_SRC_ON 0xf /* Tied low */
+#define BCM_LED_SRC_MASK GENMASK(3, 0)
/*
* Broadcom Multicolor LED configurations (expansion register 4)
@@ -174,6 +184,12 @@
#define BCM_LED_MULTICOLOR_PROGRAM 0xa
/*
+ * Broadcom Synchronous Ethernet Controls (expansion register 0x0E)
+ */
+#define BCM_EXP_SYNC_ETHERNET (MII_BCM54XX_EXP_SEL_ER + 0x0E)
+#define BCM_EXP_SYNC_ETHERNET_MII_LITE BIT(11)
+
+/*
* BCM5482: Shadow registers
* Shadow values go into bits [14:10] of register 0x1c to select a shadow
* register to access.
@@ -200,11 +216,13 @@
#define BCM_NO_ANEG_APD_EN 0x0060 /* bits 5 & 6 */
#define BCM_APD_SINGLELP_EN 0x0100 /* Bit 8 */
-#define BCM5482_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */
+#define BCM54XX_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */
/* LED3 / ~LINKSPD[2] selector */
-#define BCM5482_SHD_LEDS1_LED3(src) ((src & 0xf) << 4)
+#define BCM54XX_SHD_LEDS_SHIFT(led) (4 * (led))
+#define BCM54XX_SHD_LEDS1_LED3(src) ((src & 0xf) << 4)
/* LED1 / ~LINKSPD[1] selector */
-#define BCM5482_SHD_LEDS1_LED1(src) ((src & 0xf) << 0)
+#define BCM54XX_SHD_LEDS1_LED1(src) ((src & 0xf) << 0)
+#define BCM54XX_SHD_LEDS2 0x0e /* 01110: LED Selector 2 */
#define BCM54XX_SHD_RGMII_MODE 0x0b /* 01011: RGMII Mode Selector */
#define BCM5482_SHD_SSD 0x14 /* 10100: Secondary SerDes control */
#define BCM5482_SHD_SSD_LEDM 0x0008 /* SSD LED Mode enable */
@@ -233,6 +251,7 @@
#define MII_BCM54XX_EXP_EXP08 0x0F08
#define MII_BCM54XX_EXP_EXP08_RJCT_2MHZ 0x0001
#define MII_BCM54XX_EXP_EXP08_EARLY_DAC_WAKE 0x0200
+#define MII_BCM54XX_EXP_EXP08_FORCE_DAC_WAKE 0x0100
#define MII_BCM54XX_EXP_EXP75 0x0f75
#define MII_BCM54XX_EXP_EXP75_VDACCTRL 0x003c
#define MII_BCM54XX_EXP_EXP75_CM_OSC 0x0001
@@ -241,6 +260,15 @@
#define MII_BCM54XX_EXP_EXP97 0x0f97
#define MII_BCM54XX_EXP_EXP97_MYST 0x0c0c
+/* Top-MISC expansion registers */
+#define BCM54XX_TOP_MISC_IDDQ_CTRL (MII_BCM54XX_EXP_SEL_TOP + 0x06)
+#define BCM54XX_TOP_MISC_IDDQ_LP (1 << 0)
+#define BCM54XX_TOP_MISC_IDDQ_SD (1 << 2)
+#define BCM54XX_TOP_MISC_IDDQ_SR (1 << 3)
+
+#define BCM54XX_TOP_MISC_LED_CTL (MII_BCM54XX_EXP_SEL_TOP + 0x0C)
+#define BCM54XX_LED4_SEL_INTR BIT(1)
+
/*
* BCM5482: Secondary SerDes registers
*/
@@ -250,16 +278,164 @@
#define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */
#define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */
+/* BroadR-Reach LRE Registers. */
+#define MII_BCM54XX_LRECR 0x00 /* LRE Control Register */
+#define MII_BCM54XX_LRESR 0x01 /* LRE Status Register */
+#define MII_BCM54XX_LREPHYSID1 0x02 /* LRE PHYS ID 1 */
+#define MII_BCM54XX_LREPHYSID2 0x03 /* LRE PHYS ID 2 */
+#define MII_BCM54XX_LREANAA 0x04 /* LDS Auto-Negotiation Advertised Ability */
+#define MII_BCM54XX_LREANAC 0x05 /* LDS Auto-Negotiation Advertised Control */
+#define MII_BCM54XX_LREANPT 0x06 /* LDS Ability Next Page Transmit */
+#define MII_BCM54XX_LRELPA 0x07 /* LDS Link Partner Ability */
+#define MII_BCM54XX_LRELPNPM 0x08 /* LDS Link Partner Next Page Message */
+#define MII_BCM54XX_LRELPNPC 0x09 /* LDS Link Partner Next Page Control */
+#define MII_BCM54XX_LRELDSE 0x0a /* LDS Expansion Register */
+#define MII_BCM54XX_LREES 0x0f /* LRE Extended Status */
+
+/* LRE control register. */
+#define LRECR_RESET 0x8000 /* Reset to default state */
+#define LRECR_LOOPBACK 0x4000 /* Internal Loopback */
+#define LRECR_LDSRES 0x2000 /* Restart LDS Process */
+#define LRECR_LDSEN 0x1000 /* LDS Enable */
+#define LRECR_PDOWN 0x0800 /* Enable low power state */
+#define LRECR_ISOLATE 0x0400 /* Isolate data paths from MII */
+#define LRECR_SPEED100 0x0200 /* Select 100 Mbps */
+#define LRECR_SPEED10 0x0000 /* Select 10 Mbps */
+#define LRECR_4PAIRS 0x0020 /* Select 4 Pairs */
+#define LRECR_2PAIRS 0x0010 /* Select 2 Pairs */
+#define LRECR_1PAIR 0x0000 /* Select 1 Pair */
+#define LRECR_MASTER 0x0008 /* Force Master when LDS disabled */
+#define LRECR_SLAVE 0x0000 /* Force Slave when LDS disabled */
+
+/* LRE status register. */
+#define LRESR_100_1PAIR 0x2000 /* Can do 100Mbps 1 Pair */
+#define LRESR_100_4PAIR 0x1000 /* Can do 100Mbps 4 Pairs */
+#define LRESR_100_2PAIR 0x0800 /* Can do 100Mbps 2 Pairs */
+#define LRESR_10_2PAIR 0x0400 /* Can do 10Mbps 2 Pairs */
+#define LRESR_10_1PAIR 0x0200 /* Can do 10Mbps 1 Pair */
+#define LRESR_ESTATEN 0x0100 /* Extended Status in R15 */
+#define LRESR_RESV 0x0080 /* Unused... */
+#define LRESR_MFPS 0x0040 /* Can suppress Management Frames Preamble */
+#define LRESR_LDSCOMPLETE 0x0020 /* LDS Auto-negotiation complete */
+#define LRESR_8023 0x0010 /* Has IEEE 802.3 Support */
+#define LRESR_LDSABILITY 0x0008 /* LDS auto-negotiation capable */
+#define LRESR_LSTATUS 0x0004 /* Link status */
+#define LRESR_JCD 0x0002 /* Jabber detected */
+#define LRESR_ERCAP 0x0001 /* Ext-reg capability */
+
+/* LDS Auto-Negotiation Advertised Ability. */
+#define LREANAA_PAUSE_ASYM 0x8000 /* Can pause asymmetrically */
+#define LREANAA_PAUSE 0x4000 /* Can pause */
+#define LREANAA_100_1PAIR 0x0020 /* Can do 100Mbps 1 Pair */
+#define LREANAA_100_4PAIR 0x0010 /* Can do 100Mbps 4 Pair */
+#define LREANAA_100_2PAIR 0x0008 /* Can do 100Mbps 2 Pair */
+#define LREANAA_10_2PAIR 0x0004 /* Can do 10Mbps 2 Pair */
+#define LREANAA_10_1PAIR 0x0002 /* Can do 10Mbps 1 Pair */
+
+#define LRE_ADVERTISE_FULL (LREANAA_100_1PAIR | LREANAA_100_4PAIR | \
+ LREANAA_100_2PAIR | LREANAA_10_2PAIR | \
+ LREANAA_10_1PAIR)
+
+#define LRE_ADVERTISE_ALL LRE_ADVERTISE_FULL
+
+/* LDS Link Partner Ability. */
+#define LRELPA_PAUSE_ASYM 0x8000 /* Supports asymmetric pause */
+#define LRELPA_PAUSE 0x4000 /* Supports pause capability */
+#define LRELPA_100_1PAIR 0x0020 /* 100Mbps 1 Pair capable */
+#define LRELPA_100_4PAIR 0x0010 /* 100Mbps 4 Pair capable */
+#define LRELPA_100_2PAIR 0x0008 /* 100Mbps 2 Pair capable */
+#define LRELPA_10_2PAIR 0x0004 /* 10Mbps 2 Pair capable */
+#define LRELPA_10_1PAIR 0x0002 /* 10Mbps 1 Pair capable */
+
+/* LDS Expansion register. */
+#define LDSE_DOWNGRADE 0x8000 /* Can do LDS Speed Downgrade */
+#define LDSE_MASTER 0x4000 /* Master / Slave */
+#define LDSE_PAIRS_MASK 0x3000 /* Pair Count Mask */
+#define LDSE_PAIRS_SHIFT 12
+#define LDSE_4PAIRS (2 << LDSE_PAIRS_SHIFT) /* 4 Pairs Connection */
+#define LDSE_2PAIRS (1 << LDSE_PAIRS_SHIFT) /* 2 Pairs Connection */
+#define LDSE_1PAIR (0 << LDSE_PAIRS_SHIFT) /* 1 Pair Connection */
+#define LDSE_CABLEN_MASK 0x0FFF /* Cable Length Mask */
+
/* BCM54810 Registers */
#define BCM54810_EXP_BROADREACH_LRE_MISC_CTL (MII_BCM54XX_EXP_SEL_ER + 0x90)
#define BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN (1 << 0)
#define BCM54810_SHD_CLK_CTL 0x3
#define BCM54810_SHD_CLK_CTL_GTXCLK_EN (1 << 9)
+/* BCM54811 Registers */
+#define BCM54811_EXP_BROADREACH_LRE_OVERLAY_CTL (MII_BCM54XX_EXP_SEL_ER + 0x9A)
+/* Access Control Override Enable */
+#define BCM54811_EXP_BROADREACH_LRE_OVERLAY_CTL_EN BIT(15)
+/* Access Control Override Value */
+#define BCM54811_EXP_BROADREACH_LRE_OVERLAY_CTL_OVERRIDE_VAL BIT(14)
+/* Access Control Value */
+#define BCM54811_EXP_BROADREACH_LRE_OVERLAY_CTL_VAL BIT(13)
+
/* BCM54612E Registers */
#define BCM54612E_EXP_SPARE0 (MII_BCM54XX_EXP_SEL_ETC + 0x34)
#define BCM54612E_LED4_CLK125OUT_EN (1 << 1)
+
+/* Wake-on-LAN registers */
+#define BCM54XX_WOL_MAIN_CTL (MII_BCM54XX_EXP_SEL_WOL + 0x80)
+#define BCM54XX_WOL_EN BIT(0)
+#define BCM54XX_WOL_MODE_SINGLE_MPD 0
+#define BCM54XX_WOL_MODE_SINGLE_MPDSEC 1
+#define BCM54XX_WOL_MODE_DUAL 2
+#define BCM54XX_WOL_MODE_SHIFT 1
+#define BCM54XX_WOL_MODE_MASK 0x3
+#define BCM54XX_WOL_MP_MSB_FF_EN BIT(3)
+#define BCM54XX_WOL_SECKEY_OPT_4B 0
+#define BCM54XX_WOL_SECKEY_OPT_6B 1
+#define BCM54XX_WOL_SECKEY_OPT_8B 2
+#define BCM54XX_WOL_SECKEY_OPT_SHIFT 4
+#define BCM54XX_WOL_SECKEY_OPT_MASK 0x3
+#define BCM54XX_WOL_L2_TYPE_CHK BIT(6)
+#define BCM54XX_WOL_L4IPV4UDP_CHK BIT(7)
+#define BCM54XX_WOL_L4IPV6UDP_CHK BIT(8)
+#define BCM54XX_WOL_UDPPORT_CHK BIT(9)
+#define BCM54XX_WOL_CRC_CHK BIT(10)
+#define BCM54XX_WOL_SECKEY_MODE BIT(11)
+#define BCM54XX_WOL_RST BIT(12)
+#define BCM54XX_WOL_DIR_PKT_EN BIT(13)
+#define BCM54XX_WOL_MASK_MODE_DA_FF 0
+#define BCM54XX_WOL_MASK_MODE_DA_MPD 1
+#define BCM54XX_WOL_MASK_MODE_DA_ONLY 2
+#define BCM54XX_WOL_MASK_MODE_MPD 3
+#define BCM54XX_WOL_MASK_MODE_SHIFT 14
+#define BCM54XX_WOL_MASK_MODE_MASK 0x3
+
+#define BCM54XX_WOL_INNER_PROTO (MII_BCM54XX_EXP_SEL_WOL + 0x81)
+#define BCM54XX_WOL_OUTER_PROTO (MII_BCM54XX_EXP_SEL_WOL + 0x82)
+#define BCM54XX_WOL_OUTER_PROTO2 (MII_BCM54XX_EXP_SEL_WOL + 0x83)
+
+#define BCM54XX_WOL_MPD_DATA1(x) (MII_BCM54XX_EXP_SEL_WOL + 0x84 + (x))
+#define BCM54XX_WOL_MPD_DATA2(x) (MII_BCM54XX_EXP_SEL_WOL + 0x87 + (x))
+#define BCM54XX_WOL_SEC_KEY_8B (MII_BCM54XX_EXP_SEL_WOL + 0x8A)
+#define BCM54XX_WOL_MASK(x) (MII_BCM54XX_EXP_SEL_WOL + 0x8B + (x))
+#define BCM54XX_SEC_KEY_STORE(x) (MII_BCM54XX_EXP_SEL_WOL + 0x8E)
+#define BCM54XX_WOL_SHARED_CNT (MII_BCM54XX_EXP_SEL_WOL + 0x92)
+
+#define BCM54XX_WOL_INT_MASK (MII_BCM54XX_EXP_SEL_WOL + 0x93)
+#define BCM54XX_WOL_PKT1 BIT(0)
+#define BCM54XX_WOL_PKT2 BIT(1)
+#define BCM54XX_WOL_DIR BIT(2)
+#define BCM54XX_WOL_ALL_INTRS (BCM54XX_WOL_PKT1 | \
+ BCM54XX_WOL_PKT2 | \
+ BCM54XX_WOL_DIR)
+
+#define BCM54XX_WOL_INT_STATUS (MII_BCM54XX_EXP_SEL_WOL + 0x94)
+
+/* BCM5221 Registers */
+#define BCM5221_AEGSR 0x1C
+#define BCM5221_AEGSR_MDIX_STATUS BIT(13)
+#define BCM5221_AEGSR_MDIX_MAN_SWAP BIT(12)
+#define BCM5221_AEGSR_MDIX_DIS BIT(11)
+
+#define BCM5221_SHDW_AM4_EN_CLK_LPM BIT(2)
+#define BCM5221_SHDW_AM4_FORCE_LPM BIT(1)
+
/*****************************************************************************/
/* Fast Ethernet Transceiver definitions. */
/*****************************************************************************/
@@ -281,6 +457,7 @@
#define MII_BRCM_FET_SHDW_MC_FAME 0x4000 /* Force Auto MDIX enable */
#define MII_BRCM_FET_SHDW_AUXMODE4 0x1a /* Auxiliary mode 4 */
+#define MII_BRCM_FET_SHDW_AM4_STANDBY 0x0008 /* Standby enable */
#define MII_BRCM_FET_SHDW_AM4_LED_MASK 0x0003
#define MII_BRCM_FET_SHDW_AM4_LED_MODE1 0x0001
@@ -291,6 +468,8 @@
#define LPI_FEATURE_EN 0x8000
#define LPI_FEATURE_EN_DIG1000X 0x4000
+#define BRCM_CL45VEN_EEE_LPI_CNT 0x803f
+
/* Core register definitions*/
#define MII_BRCM_CORE_BASE12 0x12
#define MII_BRCM_CORE_BASE13 0x13
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
index 960988d42f77..14fa93268630 100644
--- a/include/linux/bsg-lib.h
+++ b/include/linux/bsg-lib.h
@@ -10,8 +10,8 @@
#define _BLK_BSG_
#include <linux/blkdev.h>
-#include <scsi/scsi_request.h>
+struct bsg_job;
struct request;
struct device;
struct scatterlist;
@@ -65,7 +65,8 @@ struct bsg_job {
void bsg_job_done(struct bsg_job *job, int result,
unsigned int reply_payload_rcv_len);
struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
- bsg_job_fn *job_fn, bsg_timeout_fn *timeout, int dd_job_size);
+ struct queue_limits *lim, bsg_job_fn *job_fn,
+ bsg_timeout_fn *timeout, int dd_job_size);
void bsg_remove_queue(struct request_queue *q);
void bsg_job_put(struct bsg_job *job);
int __must_check bsg_job_get(struct bsg_job *job);
diff --git a/include/linux/bsg.h b/include/linux/bsg.h
index dac37b6e00ec..ee2df73edf83 100644
--- a/include/linux/bsg.h
+++ b/include/linux/bsg.h
@@ -4,36 +4,16 @@
#include <uapi/linux/bsg.h>
-struct request;
+struct bsg_device;
+struct device;
+struct request_queue;
-#ifdef CONFIG_BLK_DEV_BSG
-struct bsg_ops {
- int (*check_proto)(struct sg_io_v4 *hdr);
- int (*fill_hdr)(struct request *rq, struct sg_io_v4 *hdr,
- fmode_t mode);
- int (*complete_rq)(struct request *rq, struct sg_io_v4 *hdr);
- void (*free_rq)(struct request *rq);
-};
+typedef int (bsg_sg_io_fn)(struct request_queue *, struct sg_io_v4 *hdr,
+ bool open_for_write, unsigned int timeout);
-struct bsg_class_device {
- struct device *class_dev;
- int minor;
- struct request_queue *queue;
- const struct bsg_ops *ops;
-};
+struct bsg_device *bsg_register_queue(struct request_queue *q,
+ struct device *parent, const char *name,
+ bsg_sg_io_fn *sg_io_fn);
+void bsg_unregister_queue(struct bsg_device *bcd);
-int bsg_register_queue(struct request_queue *q, struct device *parent,
- const char *name, const struct bsg_ops *ops);
-int bsg_scsi_register_queue(struct request_queue *q, struct device *parent);
-void bsg_unregister_queue(struct request_queue *q);
-#else
-static inline int bsg_scsi_register_queue(struct request_queue *q,
- struct device *parent)
-{
- return 0;
-}
-static inline void bsg_unregister_queue(struct request_queue *q)
-{
-}
-#endif /* CONFIG_BLK_DEV_BSG */
#endif /* _LINUX_BSG_H */
diff --git a/include/linux/btf.h b/include/linux/btf.h
index 3bac66e0183a..f06976ffb63f 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -5,23 +5,147 @@
#define _LINUX_BTF_H 1
#include <linux/types.h>
+#include <linux/bpfptr.h>
+#include <linux/bsearch.h>
+#include <linux/btf_ids.h>
#include <uapi/linux/btf.h>
#include <uapi/linux/bpf.h>
#define BTF_TYPE_EMIT(type) ((void)(type *)0)
#define BTF_TYPE_EMIT_ENUM(enum_val) ((void)enum_val)
+/* These need to be macros, as the expressions are used in assembler input */
+#define KF_ACQUIRE (1 << 0) /* kfunc is an acquire function */
+#define KF_RELEASE (1 << 1) /* kfunc is a release function */
+#define KF_RET_NULL (1 << 2) /* kfunc returns a pointer that may be NULL */
+/* Trusted arguments are those which are guaranteed to be valid when passed to
+ * the kfunc. It is used to enforce that pointers obtained from either acquire
+ * kfuncs, or from the main kernel on a tracepoint or struct_ops callback
+ * invocation, remain unmodified when being passed to helpers taking trusted
+ * args.
+ *
+ * Consider, for example, the following new task tracepoint:
+ *
+ * SEC("tp_btf/task_newtask")
+ * int BPF_PROG(new_task_tp, struct task_struct *task, u64 clone_flags)
+ * {
+ * ...
+ * }
+ *
+ * And the following kfunc:
+ *
+ * BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
+ *
+ * All invocations to the kfunc must pass the unmodified, unwalked task:
+ *
+ * bpf_task_acquire(task); // Allowed
+ * bpf_task_acquire(task->last_wakee); // Rejected, walked task
+ *
+ * Programs may also pass referenced tasks directly to the kfunc:
+ *
+ * struct task_struct *acquired;
+ *
+ * acquired = bpf_task_acquire(task); // Allowed, same as above
+ * bpf_task_acquire(acquired); // Allowed
+ * bpf_task_acquire(task); // Allowed
+ * bpf_task_acquire(acquired->last_wakee); // Rejected, walked task
+ *
+ * Programs may _not_, however, pass a task from an arbitrary fentry/fexit, or
+ * kprobe/kretprobe to the kfunc, as BPF cannot guarantee that all of these
+ * pointers are guaranteed to be safe. For example, the following BPF program
+ * would be rejected:
+ *
+ * SEC("kretprobe/free_task")
+ * int BPF_PROG(free_task_probe, struct task_struct *tsk)
+ * {
+ * struct task_struct *acquired;
+ *
+ * acquired = bpf_task_acquire(acquired); // Rejected, not a trusted pointer
+ * bpf_task_release(acquired);
+ *
+ * return 0;
+ * }
+ */
+#define KF_TRUSTED_ARGS (1 << 4) /* kfunc only takes trusted pointer arguments */
+#define KF_SLEEPABLE (1 << 5) /* kfunc may sleep */
+#define KF_DESTRUCTIVE (1 << 6) /* kfunc performs destructive actions */
+#define KF_RCU (1 << 7) /* kfunc takes either rcu or trusted pointer arguments */
+/* only one of KF_ITER_{NEW,NEXT,DESTROY} could be specified per kfunc */
+#define KF_ITER_NEW (1 << 8) /* kfunc implements BPF iter constructor */
+#define KF_ITER_NEXT (1 << 9) /* kfunc implements BPF iter next method */
+#define KF_ITER_DESTROY (1 << 10) /* kfunc implements BPF iter destructor */
+#define KF_RCU_PROTECTED (1 << 11) /* kfunc should be protected by rcu cs when they are invoked */
+#define KF_FASTCALL (1 << 12) /* kfunc supports bpf_fastcall protocol */
+#define KF_ARENA_RET (1 << 13) /* kfunc returns an arena pointer */
+#define KF_ARENA_ARG1 (1 << 14) /* kfunc takes an arena pointer as its first argument */
+#define KF_ARENA_ARG2 (1 << 15) /* kfunc takes an arena pointer as its second argument */
+
+/*
+ * Tag marking a kernel function as a kfunc. This is meant to minimize the
+ * amount of copy-paste that kfunc authors have to include for correctness so
+ * as to avoid issues such as the compiler inlining or eliding either a static
+ * kfunc, or a global kfunc in an LTO build.
+ */
+#define __bpf_kfunc __used __retain __noclone noinline
+
+#define __bpf_kfunc_start_defs() \
+ __diag_push(); \
+ __diag_ignore_all("-Wmissing-declarations", \
+ "Global kfuncs as their definitions will be in BTF");\
+ __diag_ignore_all("-Wmissing-prototypes", \
+ "Global kfuncs as their definitions will be in BTF")
+
+#define __bpf_kfunc_end_defs() __diag_pop()
+#define __bpf_hook_start() __bpf_kfunc_start_defs()
+#define __bpf_hook_end() __bpf_kfunc_end_defs()
+
+/*
+ * Return the name of the passed struct, if exists, or halt the build if for
+ * example the structure gets renamed. In this way, developers have to revisit
+ * the code using that structure name, and update it accordingly.
+ */
+#define stringify_struct(x) \
+ ({ BUILD_BUG_ON(sizeof(struct x) < 0); \
+ __stringify(x); })
+
struct btf;
struct btf_member;
struct btf_type;
union bpf_attr;
struct btf_show;
+struct btf_id_set;
+struct bpf_prog;
+
+typedef int (*btf_kfunc_filter_t)(const struct bpf_prog *prog, u32 kfunc_id);
+
+struct btf_kfunc_id_set {
+ struct module *owner;
+ struct btf_id_set8 *set;
+ btf_kfunc_filter_t filter;
+};
+
+struct btf_id_dtor_kfunc {
+ u32 btf_id;
+ u32 kfunc_btf_id;
+};
+
+struct btf_struct_meta {
+ u32 btf_id;
+ struct btf_record *record;
+};
+
+struct btf_struct_metas {
+ u32 cnt;
+ struct btf_struct_meta types[];
+};
extern const struct file_operations btf_fops;
+const char *btf_get_name(const struct btf *btf);
void btf_get(struct btf *btf);
void btf_put(struct btf *btf);
-int btf_new_fd(const union bpf_attr *attr);
+const struct btf_header *btf_header(const struct btf *btf);
+int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_sz);
struct btf *btf_get_by_fd(int fd);
int btf_get_info_by_fd(const struct btf *btf,
const union bpf_attr *attr,
@@ -93,14 +217,22 @@ int btf_get_fd_by_id(u32 id);
u32 btf_obj_id(const struct btf *btf);
bool btf_is_kernel(const struct btf *btf);
bool btf_is_module(const struct btf *btf);
+bool btf_is_vmlinux(const struct btf *btf);
struct module *btf_try_get_module(const struct btf *btf);
u32 btf_nr_types(const struct btf *btf);
+struct btf *btf_base_btf(const struct btf *btf);
+bool btf_type_is_i32(const struct btf_type *t);
+bool btf_type_is_i64(const struct btf_type *t);
+bool btf_type_is_primitive(const struct btf_type *t);
bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
const struct btf_member *m,
u32 expected_offset, u32 expected_size);
-int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t);
+struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type *t,
+ u32 field_mask, u32 value_size);
+int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec);
bool btf_type_is_void(const struct btf_type *t);
s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind);
+s32 bpf_find_btf_id(const char *name, u32 kind, struct btf **btf_p);
const struct btf_type *btf_type_skip_modifiers(const struct btf *btf,
u32 id, u32 *res_id);
const struct btf_type *btf_type_resolve_ptr(const struct btf *btf,
@@ -137,21 +269,111 @@ static inline bool btf_type_is_small_int(const struct btf_type *t)
return btf_type_is_int(t) && t->size <= sizeof(u64);
}
+static inline u8 btf_int_encoding(const struct btf_type *t)
+{
+ return BTF_INT_ENCODING(*(u32 *)(t + 1));
+}
+
+static inline bool btf_type_is_signed_int(const struct btf_type *t)
+{
+ return btf_type_is_int(t) && (btf_int_encoding(t) & BTF_INT_SIGNED);
+}
+
static inline bool btf_type_is_enum(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM;
}
+static inline bool btf_is_any_enum(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM ||
+ BTF_INFO_KIND(t->info) == BTF_KIND_ENUM64;
+}
+
+static inline bool btf_kind_core_compat(const struct btf_type *t1,
+ const struct btf_type *t2)
+{
+ return BTF_INFO_KIND(t1->info) == BTF_INFO_KIND(t2->info) ||
+ (btf_is_any_enum(t1) && btf_is_any_enum(t2));
+}
+
+static inline bool str_is_empty(const char *s)
+{
+ return !s || !s[0];
+}
+
+static inline u16 btf_kind(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info);
+}
+
+static inline bool btf_is_enum(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_ENUM;
+}
+
+static inline bool btf_is_enum64(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_ENUM64;
+}
+
+static inline u64 btf_enum64_value(const struct btf_enum64 *e)
+{
+ return ((u64)e->val_hi32 << 32) | e->val_lo32;
+}
+
+static inline bool btf_is_composite(const struct btf_type *t)
+{
+ u16 kind = btf_kind(t);
+
+ return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION;
+}
+
+static inline bool btf_is_array(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_ARRAY;
+}
+
+static inline bool btf_is_int(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_INT;
+}
+
+static inline bool btf_is_ptr(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_PTR;
+}
+
+static inline u8 btf_int_offset(const struct btf_type *t)
+{
+ return BTF_INT_OFFSET(*(u32 *)(t + 1));
+}
+
+static inline __u8 btf_int_bits(const struct btf_type *t)
+{
+ return BTF_INT_BITS(*(__u32 *)(t + 1));
+}
+
static inline bool btf_type_is_scalar(const struct btf_type *t)
{
return btf_type_is_int(t) || btf_type_is_enum(t);
}
+static inline bool btf_type_is_fwd(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
+}
+
static inline bool btf_type_is_typedef(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF;
}
+static inline bool btf_type_is_volatile(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_VOLATILE;
+}
+
static inline bool btf_type_is_func(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC;
@@ -167,6 +389,11 @@ static inline bool btf_type_is_var(const struct btf_type *t)
return BTF_INFO_KIND(t->info) == BTF_KIND_VAR;
}
+static inline bool btf_type_is_type_tag(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_TYPE_TAG;
+}
+
/* union is only a special case of struct:
* all its offsetof(member) == 0
*/
@@ -177,11 +404,26 @@ static inline bool btf_type_is_struct(const struct btf_type *t)
return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION;
}
+static inline bool __btf_type_is_struct(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT;
+}
+
+static inline bool btf_type_is_array(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY;
+}
+
static inline u16 btf_type_vlen(const struct btf_type *t)
{
return BTF_INFO_VLEN(t->info);
}
+static inline u16 btf_vlen(const struct btf_type *t)
+{
+ return btf_type_vlen(t);
+}
+
static inline u16 btf_func_linkage(const struct btf_type *t)
{
return BTF_INFO_VLEN(t->info);
@@ -192,49 +434,253 @@ static inline bool btf_type_kflag(const struct btf_type *t)
return BTF_INFO_KFLAG(t->info);
}
-static inline u32 btf_member_bit_offset(const struct btf_type *struct_type,
- const struct btf_member *member)
+static inline u32 __btf_member_bit_offset(const struct btf_type *struct_type,
+ const struct btf_member *member)
{
return btf_type_kflag(struct_type) ? BTF_MEMBER_BIT_OFFSET(member->offset)
: member->offset;
}
-static inline u32 btf_member_bitfield_size(const struct btf_type *struct_type,
- const struct btf_member *member)
+static inline u32 __btf_member_bitfield_size(const struct btf_type *struct_type,
+ const struct btf_member *member)
{
return btf_type_kflag(struct_type) ? BTF_MEMBER_BITFIELD_SIZE(member->offset)
: 0;
}
+static inline struct btf_member *btf_members(const struct btf_type *t)
+{
+ return (struct btf_member *)(t + 1);
+}
+
+static inline u32 btf_member_bit_offset(const struct btf_type *t, u32 member_idx)
+{
+ const struct btf_member *m = btf_members(t) + member_idx;
+
+ return __btf_member_bit_offset(t, m);
+}
+
+static inline u32 btf_member_bitfield_size(const struct btf_type *t, u32 member_idx)
+{
+ const struct btf_member *m = btf_members(t) + member_idx;
+
+ return __btf_member_bitfield_size(t, m);
+}
+
static inline const struct btf_member *btf_type_member(const struct btf_type *t)
{
return (const struct btf_member *)(t + 1);
}
+static inline struct btf_array *btf_array(const struct btf_type *t)
+{
+ return (struct btf_array *)(t + 1);
+}
+
+static inline struct btf_enum *btf_enum(const struct btf_type *t)
+{
+ return (struct btf_enum *)(t + 1);
+}
+
+static inline struct btf_enum64 *btf_enum64(const struct btf_type *t)
+{
+ return (struct btf_enum64 *)(t + 1);
+}
+
static inline const struct btf_var_secinfo *btf_type_var_secinfo(
const struct btf_type *t)
{
return (const struct btf_var_secinfo *)(t + 1);
}
-#ifdef CONFIG_BPF_SYSCALL
-struct bpf_prog;
+static inline struct btf_param *btf_params(const struct btf_type *t)
+{
+ return (struct btf_param *)(t + 1);
+}
+
+static inline struct btf_decl_tag *btf_decl_tag(const struct btf_type *t)
+{
+ return (struct btf_decl_tag *)(t + 1);
+}
+
+static inline int btf_id_cmp_func(const void *a, const void *b)
+{
+ const int *pa = a, *pb = b;
+
+ return *pa - *pb;
+}
+
+static inline bool btf_id_set_contains(const struct btf_id_set *set, u32 id)
+{
+ return bsearch(&id, set->ids, set->cnt, sizeof(u32), btf_id_cmp_func) != NULL;
+}
+
+static inline void *btf_id_set8_contains(const struct btf_id_set8 *set, u32 id)
+{
+ return bsearch(&id, set->pairs, set->cnt, sizeof(set->pairs[0]), btf_id_cmp_func);
+}
+
+bool btf_param_match_suffix(const struct btf *btf,
+ const struct btf_param *arg,
+ const char *suffix);
+int btf_ctx_arg_offset(const struct btf *btf, const struct btf_type *func_proto,
+ u32 arg_no);
+u32 btf_ctx_arg_idx(struct btf *btf, const struct btf_type *func_proto, int off);
+
+struct bpf_verifier_log;
+
+#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
+struct bpf_struct_ops;
+int __register_bpf_struct_ops(struct bpf_struct_ops *st_ops);
+const struct bpf_struct_ops_desc *bpf_struct_ops_find_value(struct btf *btf, u32 value_id);
+const struct bpf_struct_ops_desc *bpf_struct_ops_find(struct btf *btf, u32 type_id);
+#else
+static inline const struct bpf_struct_ops_desc *bpf_struct_ops_find(struct btf *btf, u32 type_id)
+{
+ return NULL;
+}
+#endif
+enum btf_field_iter_kind {
+ BTF_FIELD_ITER_IDS,
+ BTF_FIELD_ITER_STRS,
+};
+
+struct btf_field_desc {
+ /* once-per-type offsets */
+ int t_off_cnt, t_offs[2];
+ /* member struct size, or zero, if no members */
+ int m_sz;
+ /* repeated per-member offsets */
+ int m_off_cnt, m_offs[1];
+};
+
+struct btf_field_iter {
+ struct btf_field_desc desc;
+ void *p;
+ int m_idx;
+ int off_idx;
+ int vlen;
+};
+
+#ifdef CONFIG_BPF_SYSCALL
const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id);
+void btf_set_base_btf(struct btf *btf, const struct btf *base_btf);
+int btf_relocate(struct btf *btf, const struct btf *base_btf, __u32 **map_ids);
+int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t,
+ enum btf_field_iter_kind iter_kind);
+__u32 *btf_field_iter_next(struct btf_field_iter *it);
+
const char *btf_name_by_offset(const struct btf *btf, u32 offset);
+const char *btf_str_by_offset(const struct btf *btf, u32 offset);
struct btf *btf_parse_vmlinux(void);
struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog);
+u32 *btf_kfunc_id_set_contains(const struct btf *btf, u32 kfunc_btf_id,
+ const struct bpf_prog *prog);
+u32 *btf_kfunc_is_modify_return(const struct btf *btf, u32 kfunc_btf_id,
+ const struct bpf_prog *prog);
+int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
+ const struct btf_kfunc_id_set *s);
+int register_btf_fmodret_id_set(const struct btf_kfunc_id_set *kset);
+s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id);
+int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt,
+ struct module *owner);
+struct btf_struct_meta *btf_find_struct_meta(const struct btf *btf, u32 btf_id);
+bool btf_is_projection_of(const char *pname, const char *tname);
+bool btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
+ const struct btf_type *t, enum bpf_prog_type prog_type,
+ int arg);
+int get_kern_ctx_btf_id(struct bpf_verifier_log *log, enum bpf_prog_type prog_type);
+bool btf_types_are_same(const struct btf *btf1, u32 id1,
+ const struct btf *btf2, u32 id2);
+int btf_check_iter_arg(struct btf *btf, const struct btf_type *func, int arg_idx);
+
+static inline bool btf_type_is_struct_ptr(struct btf *btf, const struct btf_type *t)
+{
+ if (!btf_type_is_ptr(t))
+ return false;
+
+ t = btf_type_skip_modifiers(btf, t->type, NULL);
+
+ return btf_type_is_struct(t);
+}
#else
static inline const struct btf_type *btf_type_by_id(const struct btf *btf,
u32 type_id)
{
return NULL;
}
+
+static inline void btf_set_base_btf(struct btf *btf, const struct btf *base_btf)
+{
+}
+
+static inline int btf_relocate(void *log, struct btf *btf, const struct btf *base_btf,
+ __u32 **map_ids)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t,
+ enum btf_field_iter_kind iter_kind)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline __u32 *btf_field_iter_next(struct btf_field_iter *it)
+{
+ return NULL;
+}
+
static inline const char *btf_name_by_offset(const struct btf *btf,
u32 offset)
{
return NULL;
}
-#endif
+static inline u32 *btf_kfunc_id_set_contains(const struct btf *btf,
+ u32 kfunc_btf_id,
+ struct bpf_prog *prog)
+{
+ return NULL;
+}
+static inline int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
+ const struct btf_kfunc_id_set *s)
+{
+ return 0;
+}
+static inline s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id)
+{
+ return -ENOENT;
+}
+static inline int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors,
+ u32 add_cnt, struct module *owner)
+{
+ return 0;
+}
+static inline struct btf_struct_meta *btf_find_struct_meta(const struct btf *btf, u32 btf_id)
+{
+ return NULL;
+}
+static inline bool
+btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
+ const struct btf_type *t, enum bpf_prog_type prog_type,
+ int arg)
+{
+ return false;
+}
+static inline int get_kern_ctx_btf_id(struct bpf_verifier_log *log,
+ enum bpf_prog_type prog_type) {
+ return -EINVAL;
+}
+static inline bool btf_types_are_same(const struct btf *btf1, u32 id1,
+ const struct btf *btf2, u32 id2)
+{
+ return false;
+}
+static inline int btf_check_iter_arg(struct btf *btf, const struct btf_type *func, int arg_idx)
+{
+ return -EOPNOTSUPP;
+}
+#endif
#endif
diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h
index 57890b357f85..139bdececdcf 100644
--- a/include/linux/btf_ids.h
+++ b/include/linux/btf_ids.h
@@ -3,14 +3,30 @@
#ifndef _LINUX_BTF_IDS_H
#define _LINUX_BTF_IDS_H
+#include <linux/types.h> /* for u32 */
+
struct btf_id_set {
u32 cnt;
u32 ids[];
};
+/* This flag implies BTF_SET8 holds kfunc(s) */
+#define BTF_SET8_KFUNCS (1 << 0)
+
+struct btf_id_set8 {
+ u32 cnt;
+ u32 flags;
+ struct {
+ u32 id;
+ u32 flags;
+ } pairs[];
+};
+
#ifdef CONFIG_DEBUG_INFO_BTF
#include <linux/compiler.h> /* for __PASTE */
+#include <linux/compiler_attributes.h> /* for __maybe_unused */
+#include <linux/stringify.h>
/*
* Following macros help to define lists of BTF IDs placed
@@ -24,7 +40,7 @@ struct btf_id_set {
#define BTF_IDS_SECTION ".BTF_ids"
-#define ____BTF_ID(symbol) \
+#define ____BTF_ID(symbol, word) \
asm( \
".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
".local " #symbol " ; \n" \
@@ -32,20 +48,28 @@ asm( \
".size " #symbol ", 4; \n" \
#symbol ": \n" \
".zero 4 \n" \
+word \
".popsection; \n");
-#define __BTF_ID(symbol) \
- ____BTF_ID(symbol)
+#define __BTF_ID(symbol, word) \
+ ____BTF_ID(symbol, word)
#define __ID(prefix) \
- __PASTE(prefix, __COUNTER__)
+ __PASTE(__PASTE(prefix, __COUNTER__), __LINE__)
/*
* The BTF_ID defines unique symbol for each ID pointing
* to 4 zero bytes.
*/
#define BTF_ID(prefix, name) \
- __BTF_ID(__ID(__BTF_ID__##prefix##__##name##__))
+ __BTF_ID(__ID(__BTF_ID__##prefix##__##name##__), "")
+
+#define ____BTF_ID_FLAGS(prefix, name, flags) \
+ __BTF_ID(__ID(__BTF_ID__##prefix##__##name##__), ".long " #flags "\n")
+#define __BTF_ID_FLAGS(prefix, name, flags, ...) \
+ ____BTF_ID_FLAGS(prefix, name, flags)
+#define BTF_ID_FLAGS(prefix, name, ...) \
+ __BTF_ID_FLAGS(prefix, name, ##__VA_ARGS__, 0)
/*
* The BTF_ID_LIST macro defines pure (unsorted) list
@@ -73,7 +97,7 @@ asm( \
__BTF_ID_LIST(name, local) \
extern u32 name[];
-#define BTF_ID_LIST_GLOBAL(name) \
+#define BTF_ID_LIST_GLOBAL(name, n) \
__BTF_ID_LIST(name, globl)
/* The BTF_ID_LIST_SINGLE macro defines a BTF_ID_LIST with
@@ -82,6 +106,9 @@ __BTF_ID_LIST(name, globl)
#define BTF_ID_LIST_SINGLE(name, prefix, typename) \
BTF_ID_LIST(name) \
BTF_ID(prefix, typename)
+#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) \
+ BTF_ID_LIST_GLOBAL(name, 1) \
+ BTF_ID(prefix, typename)
/*
* The BTF_ID_UNUSED macro defines 4 zero bytes.
@@ -141,16 +168,69 @@ asm( \
".popsection; \n"); \
extern struct btf_id_set name;
+/*
+ * The BTF_SET8_START/END macros pair defines sorted list of
+ * BTF IDs and their flags plus its members count, with the
+ * following layout:
+ *
+ * BTF_SET8_START(list)
+ * BTF_ID_FLAGS(type1, name1, flags)
+ * BTF_ID_FLAGS(type2, name2, flags)
+ * BTF_SET8_END(list)
+ *
+ * __BTF_ID__set8__list:
+ * .zero 8
+ * list:
+ * __BTF_ID__type1__name1__3:
+ * .zero 4
+ * .word (1 << 0) | (1 << 2)
+ * __BTF_ID__type2__name2__5:
+ * .zero 4
+ * .word (1 << 3) | (1 << 1) | (1 << 2)
+ *
+ */
+#define __BTF_SET8_START(name, scope, flags) \
+__BTF_ID_LIST(name, local) \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+"." #scope " __BTF_ID__set8__" #name "; \n" \
+"__BTF_ID__set8__" #name ":; \n" \
+".zero 4 \n" \
+".long " __stringify(flags) "\n" \
+".popsection; \n");
+
+#define BTF_SET8_START(name) \
+__BTF_SET8_START(name, local, 0)
+
+#define BTF_SET8_END(name) \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+".size __BTF_ID__set8__" #name ", .-" #name " \n" \
+".popsection; \n"); \
+extern struct btf_id_set8 name;
+
+#define BTF_KFUNCS_START(name) \
+__BTF_SET8_START(name, local, BTF_SET8_KFUNCS)
+
+#define BTF_KFUNCS_END(name) \
+BTF_SET8_END(name)
+
#else
-#define BTF_ID_LIST(name) static u32 name[5];
+#define BTF_ID_LIST(name) static u32 __maybe_unused name[64];
#define BTF_ID(prefix, name)
+#define BTF_ID_FLAGS(prefix, name, ...)
#define BTF_ID_UNUSED
-#define BTF_ID_LIST_GLOBAL(name) u32 name[1];
-#define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 name[1];
-#define BTF_SET_START(name) static struct btf_id_set name = { 0 };
-#define BTF_SET_START_GLOBAL(name) static struct btf_id_set name = { 0 };
+#define BTF_ID_LIST_GLOBAL(name, n) u32 __maybe_unused name[n];
+#define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 __maybe_unused name[1];
+#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) u32 __maybe_unused name[1];
+#define BTF_SET_START(name) static struct btf_id_set __maybe_unused name = { 0 };
+#define BTF_SET_START_GLOBAL(name) static struct btf_id_set __maybe_unused name = { 0 };
#define BTF_SET_END(name)
+#define BTF_SET8_START(name) static struct btf_id_set8 __maybe_unused name = { 0 };
+#define BTF_SET8_END(name)
+#define BTF_KFUNCS_START(name) static struct btf_id_set8 __maybe_unused name = { .flags = BTF_SET8_KFUNCS };
+#define BTF_KFUNCS_END(name)
#endif /* CONFIG_DEBUG_INFO_BTF */
@@ -172,7 +252,10 @@ extern struct btf_id_set name;
BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, tcp_timewait_sock) \
BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock) \
BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \
- BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock)
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_UNIX, unix_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_MPTCP, mptcp_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCKET, socket)
enum {
#define BTF_SOCK_TYPE(name, str) name,
@@ -184,4 +267,22 @@ MAX_BTF_SOCK_TYPE,
extern u32 btf_sock_ids[];
#endif
+#define BTF_TRACING_TYPE_xxx \
+ BTF_TRACING_TYPE(BTF_TRACING_TYPE_TASK, task_struct) \
+ BTF_TRACING_TYPE(BTF_TRACING_TYPE_FILE, file) \
+ BTF_TRACING_TYPE(BTF_TRACING_TYPE_VMA, vm_area_struct)
+
+enum {
+#define BTF_TRACING_TYPE(name, type) name,
+BTF_TRACING_TYPE_xxx
+#undef BTF_TRACING_TYPE
+MAX_BTF_TRACING_TYPE,
+};
+
+extern u32 btf_tracing_ids[];
+extern u32 bpf_cgroup_btf_id[];
+extern u32 bpf_local_storage_map_btf_id[];
+extern u32 btf_bpf_map_id[];
+extern u32 bpf_kmem_cache_btf_id[];
+
#endif
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index e7e99da31349..b16b88bfbc3e 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -9,14 +9,13 @@
#define _LINUX_BUFFER_HEAD_H
#include <linux/types.h>
+#include <linux/blk_types.h>
#include <linux/fs.h>
#include <linux/linkage.h>
#include <linux/pagemap.h>
#include <linux/wait.h>
#include <linux/atomic.h>
-#ifdef CONFIG_BLOCK
-
enum bh_state_bits {
BH_Uptodate, /* Contains valid data */
BH_Dirty, /* Is dirty */
@@ -35,6 +34,7 @@ enum bh_state_bits {
BH_Meta, /* Buffer contains metadata */
BH_Prio, /* Buffer should be submitted with REQ_PRIO */
BH_Defer_Completion, /* Defer AIO completion to workqueue */
+ BH_Migrate, /* Buffer is being migrated (norefs) */
BH_PrivateStart,/* not a state bit, but the first bit available
* for private allocation by other entities
@@ -54,13 +54,16 @@ typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
* filesystem and block layers. Nowadays the basic I/O unit
* is the bio, and buffer_heads are used for extracting block
* mappings (via a get_block_t call), for tracking state within
- * a page (via a page_mapping) and for wrapping bio submission
+ * a folio (via a folio_mapping) and for wrapping bio submission
* for backward compatibility reasons (e.g. submit_bh).
*/
struct buffer_head {
unsigned long b_state; /* buffer state bitmap (see above) */
struct buffer_head *b_this_page;/* circular list of page's buffers */
- struct page *b_page; /* the page this bh is mapped to */
+ union {
+ struct page *b_page; /* the page this bh is mapped to */
+ struct folio *b_folio; /* the folio this bh is mapped to */
+ };
sector_t b_blocknr; /* start block number */
size_t b_size; /* size of mapping */
@@ -117,7 +120,6 @@ static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \
* of the form "mark_buffer_foo()". These are higher-level functions which
* do something in addition to setting a b_state bit.
*/
-BUFFER_FNS(Uptodate, uptodate)
BUFFER_FNS(Dirty, dirty)
TAS_BUFFER_FNS(Dirty, dirty)
BUFFER_FNS(Lock, locked)
@@ -135,7 +137,45 @@ BUFFER_FNS(Meta, meta)
BUFFER_FNS(Prio, prio)
BUFFER_FNS(Defer_Completion, defer_completion)
-#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
+static __always_inline void set_buffer_uptodate(struct buffer_head *bh)
+{
+ /*
+ * If somebody else already set this uptodate, they will
+ * have done the memory barrier, and a reader will thus
+ * see *some* valid buffer state.
+ *
+ * Any other serialization (with IO errors or whatever that
+ * might clear the bit) has to come from other state (eg BH_Lock).
+ */
+ if (test_bit(BH_Uptodate, &bh->b_state))
+ return;
+
+ /*
+ * make it consistent with folio_mark_uptodate
+ * pairs with smp_load_acquire in buffer_uptodate
+ */
+ smp_mb__before_atomic();
+ set_bit(BH_Uptodate, &bh->b_state);
+}
+
+static __always_inline void clear_buffer_uptodate(struct buffer_head *bh)
+{
+ clear_bit(BH_Uptodate, &bh->b_state);
+}
+
+static __always_inline int buffer_uptodate(const struct buffer_head *bh)
+{
+ /*
+ * make it consistent with folio_test_uptodate
+ * pairs with smp_mb__before_atomic in set_buffer_uptodate
+ */
+ return test_bit_acquire(BH_Uptodate, &bh->b_state);
+}
+
+static inline unsigned long bh_offset(const struct buffer_head *bh)
+{
+ return (unsigned long)(bh)->b_data & (page_size(bh->b_page) - 1);
+}
/* If we *know* page->private refers to buffer_heads */
#define page_buffers(page) \
@@ -143,9 +183,9 @@ BUFFER_FNS(Defer_Completion, defer_completion)
BUG_ON(!PagePrivate(page)); \
((struct buffer_head *)page_private(page)); \
})
-#define page_has_buffers(page) PagePrivate(page)
+#define folio_buffers(folio) folio_get_private(folio)
-void buffer_check_dirty_writeback(struct page *page,
+void buffer_check_dirty_writeback(struct folio *folio,
bool *dirty, bool *writeback);
/*
@@ -155,23 +195,22 @@ void buffer_check_dirty_writeback(struct page *page,
void mark_buffer_dirty(struct buffer_head *bh);
void mark_buffer_write_io_error(struct buffer_head *bh);
void touch_buffer(struct buffer_head *bh);
-void set_bh_page(struct buffer_head *bh,
- struct page *page, unsigned long offset);
-int try_to_free_buffers(struct page *);
-struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
- bool retry);
-void create_empty_buffers(struct page *, unsigned long,
- unsigned long b_state);
+void folio_set_bh(struct buffer_head *bh, struct folio *folio,
+ unsigned long offset);
+struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
+ gfp_t gfp);
+struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size);
+struct buffer_head *create_empty_buffers(struct folio *folio,
+ unsigned long blocksize, unsigned long b_state);
void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
-void end_buffer_async_write(struct buffer_head *bh, int uptodate);
/* Things to do with buffers at mapping->private_list */
void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
-int inode_has_buffers(struct inode *);
-void invalidate_inode_buffers(struct inode *);
-int remove_inode_buffers(struct inode *inode);
-int sync_mapping_buffers(struct address_space *mapping);
+int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
+ bool datasync);
+int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
+ bool datasync);
void clean_bdev_aliases(struct block_device *bdev, sector_t block,
sector_t len);
static inline void clean_bdev_bh_alias(struct buffer_head *bh)
@@ -184,91 +223,69 @@ void __wait_on_buffer(struct buffer_head *);
wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
unsigned size);
-struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
- unsigned size, gfp_t gfp);
+struct buffer_head *__find_get_block_nonatomic(struct block_device *bdev,
+ sector_t block, unsigned size);
+struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
+ unsigned size, gfp_t gfp);
void __brelse(struct buffer_head *);
void __bforget(struct buffer_head *);
void __breadahead(struct block_device *, sector_t block, unsigned int size);
-void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size,
- gfp_t gfp);
struct buffer_head *__bread_gfp(struct block_device *,
sector_t block, unsigned size, gfp_t gfp);
-void invalidate_bh_lrus(void);
-void invalidate_bh_lrus_cpu(int cpu);
-bool has_bh_in_lru(int cpu, void *dummy);
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
void free_buffer_head(struct buffer_head * bh);
void unlock_buffer(struct buffer_head *bh);
void __lock_buffer(struct buffer_head *bh);
-void ll_rw_block(int, int, int, struct buffer_head * bh[]);
int sync_dirty_buffer(struct buffer_head *bh);
-int __sync_dirty_buffer(struct buffer_head *bh, int op_flags);
-void write_dirty_buffer(struct buffer_head *bh, int op_flags);
-int submit_bh(int, int, struct buffer_head *);
+int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
+void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
+void submit_bh(blk_opf_t, struct buffer_head *);
void write_boundary_block(struct block_device *bdev,
sector_t bblock, unsigned blocksize);
int bh_uptodate_or_lock(struct buffer_head *bh);
-int bh_submit_read(struct buffer_head *bh);
-
-extern int buffer_heads_over_limit;
+int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait);
+void __bh_read_batch(int nr, struct buffer_head *bhs[],
+ blk_opf_t op_flags, bool force_lock);
/*
* Generic address_space_operations implementations for buffer_head-backed
* address_spaces.
*/
-void block_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length);
-int block_write_full_page(struct page *page, get_block_t *get_block,
- struct writeback_control *wbc);
-int __block_write_full_page(struct inode *inode, struct page *page,
- get_block_t *get_block, struct writeback_control *wbc,
- bh_end_io_t *handler);
-int block_read_full_page(struct page*, get_block_t*);
-int block_is_partially_uptodate(struct page *page, unsigned long from,
- unsigned long count);
+void block_invalidate_folio(struct folio *folio, size_t offset, size_t length);
+int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
+ void *get_block);
+int __block_write_full_folio(struct inode *inode, struct folio *folio,
+ get_block_t *get_block, struct writeback_control *wbc);
+int block_read_full_folio(struct folio *, get_block_t *);
+bool block_is_partially_uptodate(struct folio *, size_t from, size_t count);
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
- unsigned flags, struct page **pagep, get_block_t *get_block);
-int __block_write_begin(struct page *page, loff_t pos, unsigned len,
+ struct folio **foliop, get_block_t *get_block);
+int __block_write_begin(struct folio *folio, loff_t pos, unsigned len,
get_block_t *get_block);
-int block_write_end(struct file *, struct address_space *,
- loff_t, unsigned, unsigned,
- struct page *, void *);
-int generic_write_end(struct file *, struct address_space *,
- loff_t, unsigned, unsigned,
- struct page *, void *);
-void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
-void clean_page_buffers(struct page *page);
-int cont_write_begin(struct file *, struct address_space *, loff_t,
- unsigned, unsigned, struct page **, void **,
+int block_write_end(loff_t pos, unsigned len, unsigned copied, struct folio *);
+int generic_write_end(const struct kiocb *, struct address_space *,
+ loff_t, unsigned len, unsigned copied,
+ struct folio *, void *);
+void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to);
+int cont_write_begin(const struct kiocb *, struct address_space *, loff_t,
+ unsigned, struct folio **, void **,
get_block_t *, loff_t *);
int generic_cont_expand_simple(struct inode *inode, loff_t size);
-int block_commit_write(struct page *page, unsigned from, unsigned to);
+void block_commit_write(struct folio *folio, size_t from, size_t to);
int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
get_block_t get_block);
-/* Convert errno to return value from ->page_mkwrite() call */
-static inline vm_fault_t block_page_mkwrite_return(int err)
-{
- if (err == 0)
- return VM_FAULT_LOCKED;
- if (err == -EFAULT || err == -EAGAIN)
- return VM_FAULT_NOPAGE;
- if (err == -ENOMEM)
- return VM_FAULT_OOM;
- /* -ENOSPC, -EDQUOT, -EIO ... */
- return VM_FAULT_SIGBUS;
-}
sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
int block_truncate_page(struct address_space *, loff_t, get_block_t *);
-int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned,
- struct page **, void **, get_block_t*);
-int nobh_write_end(struct file *, struct address_space *,
- loff_t, unsigned, unsigned,
- struct page *, void *);
-int nobh_truncate_page(struct address_space *, loff_t, get_block_t *);
-int nobh_writepage(struct page *page, get_block_t *get_block,
- struct writeback_control *wbc);
-void buffer_init(void);
+#ifdef CONFIG_MIGRATION
+extern int buffer_migrate_folio(struct address_space *,
+ struct folio *dst, struct folio *src, enum migrate_mode);
+extern int buffer_migrate_folio_norefs(struct address_space *,
+ struct folio *dst, struct folio *src, enum migrate_mode);
+#else
+#define buffer_migrate_folio NULL
+#define buffer_migrate_folio_norefs NULL
+#endif
/*
* inline definitions
@@ -285,12 +302,38 @@ static inline void put_bh(struct buffer_head *bh)
atomic_dec(&bh->b_count);
}
+/**
+ * brelse - Release a buffer.
+ * @bh: The buffer to release.
+ *
+ * Decrement a buffer_head's reference count. If @bh is NULL, this
+ * function is a no-op.
+ *
+ * If all buffers on a folio have zero reference count, are clean
+ * and unlocked, and if the folio is unlocked and not under writeback
+ * then try_to_free_buffers() may strip the buffers from the folio in
+ * preparation for freeing it (sometimes, rarely, buffers are removed
+ * from a folio but it ends up not being freed, and buffers may later
+ * be reattached).
+ *
+ * Context: Any context.
+ */
static inline void brelse(struct buffer_head *bh)
{
if (bh)
__brelse(bh);
}
+/**
+ * bforget - Discard any dirty data in a buffer.
+ * @bh: The buffer to forget.
+ *
+ * Call this function instead of brelse() if the data written to a buffer
+ * no longer needs to be written back. It will clear the buffer's dirty
+ * flag so writeback of this buffer will be skipped.
+ *
+ * Context: Any context.
+ */
static inline void bforget(struct buffer_head *bh)
{
if (bh)
@@ -315,23 +358,38 @@ sb_breadahead(struct super_block *sb, sector_t block)
__breadahead(sb->s_bdev, block, sb->s_blocksize);
}
-static inline void
-sb_breadahead_unmovable(struct super_block *sb, sector_t block)
+static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
+ sector_t block, unsigned size)
{
- __breadahead_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
+ gfp_t gfp;
+
+ gfp = mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS);
+ gfp |= __GFP_NOFAIL;
+
+ return bdev_getblk(bdev, block, size, gfp);
}
-static inline struct buffer_head *
-sb_getblk(struct super_block *sb, sector_t block)
+static inline struct buffer_head *__getblk(struct block_device *bdev,
+ sector_t block, unsigned size)
{
- return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
+ gfp_t gfp;
+
+ gfp = mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS);
+ gfp |= __GFP_MOVABLE | __GFP_NOFAIL;
+
+ return bdev_getblk(bdev, block, size, gfp);
}
+static inline struct buffer_head *sb_getblk(struct super_block *sb,
+ sector_t block)
+{
+ return __getblk(sb->s_bdev, block, sb->s_blocksize);
+}
-static inline struct buffer_head *
-sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
+static inline struct buffer_head *sb_getblk_gfp(struct super_block *sb,
+ sector_t block, gfp_t gfp)
{
- return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
+ return bdev_getblk(sb->s_bdev, block, sb->s_blocksize, gfp);
}
static inline struct buffer_head *
@@ -340,6 +398,12 @@ sb_find_get_block(struct super_block *sb, sector_t block)
return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
}
+static inline struct buffer_head *
+sb_find_get_block_nonatomic(struct super_block *sb, sector_t block)
+{
+ return __find_get_block_nonatomic(sb->s_bdev, block, sb->s_blocksize);
+}
+
static inline void
map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
{
@@ -368,49 +432,110 @@ static inline void lock_buffer(struct buffer_head *bh)
__lock_buffer(bh);
}
-static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
- sector_t block,
- unsigned size)
+static inline void bh_readahead(struct buffer_head *bh, blk_opf_t op_flags)
{
- return __getblk_gfp(bdev, block, size, 0);
+ if (!buffer_uptodate(bh) && trylock_buffer(bh)) {
+ if (!buffer_uptodate(bh))
+ __bh_read(bh, op_flags, false);
+ else
+ unlock_buffer(bh);
+ }
}
-static inline struct buffer_head *__getblk(struct block_device *bdev,
- sector_t block,
- unsigned size)
+static inline void bh_read_nowait(struct buffer_head *bh, blk_opf_t op_flags)
+{
+ if (!bh_uptodate_or_lock(bh))
+ __bh_read(bh, op_flags, false);
+}
+
+/* Returns 1 if buffer uptodated, 0 on success, and -EIO on error. */
+static inline int bh_read(struct buffer_head *bh, blk_opf_t op_flags)
+{
+ if (bh_uptodate_or_lock(bh))
+ return 1;
+ return __bh_read(bh, op_flags, true);
+}
+
+static inline void bh_read_batch(int nr, struct buffer_head *bhs[])
+{
+ __bh_read_batch(nr, bhs, 0, true);
+}
+
+static inline void bh_readahead_batch(int nr, struct buffer_head *bhs[],
+ blk_opf_t op_flags)
{
- return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
+ __bh_read_batch(nr, bhs, op_flags, false);
}
/**
- * __bread() - reads a specified block and returns the bh
- * @bdev: the block_device to read from
- * @block: number of block
- * @size: size (in bytes) to read
+ * __bread() - Read a block.
+ * @bdev: The block device to read from.
+ * @block: Block number in units of block size.
+ * @size: The block size of this device in bytes.
+ *
+ * Read a specified block, and return the buffer head that refers
+ * to it. The memory is allocated from the movable area so that it can
+ * be migrated. The returned buffer head has its refcount increased.
+ * The caller should call brelse() when it has finished with the buffer.
*
- * Reads a specified block, and returns buffer head that contains it.
- * The page cache is allocated from movable area so that it can be migrated.
- * It returns NULL if the block was unreadable.
+ * Context: May sleep waiting for I/O.
+ * Return: NULL if the block was unreadable.
*/
-static inline struct buffer_head *
-__bread(struct block_device *bdev, sector_t block, unsigned size)
+static inline struct buffer_head *__bread(struct block_device *bdev,
+ sector_t block, unsigned size)
{
return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
}
-extern int __set_page_dirty_buffers(struct page *page);
+/**
+ * get_nth_bh - Get a reference on the n'th buffer after this one.
+ * @bh: The buffer to start counting from.
+ * @count: How many buffers to skip.
+ *
+ * This is primarily useful for finding the nth buffer in a folio; in
+ * that case you pass the head buffer and the byte offset in the folio
+ * divided by the block size. It can be used for other purposes, but
+ * it will wrap at the end of the folio rather than returning NULL or
+ * proceeding to the next folio for you.
+ *
+ * Return: The requested buffer with an elevated refcount.
+ */
+static inline __must_check
+struct buffer_head *get_nth_bh(struct buffer_head *bh, unsigned int count)
+{
+ while (count--)
+ bh = bh->b_this_page;
+ get_bh(bh);
+ return bh;
+}
+
+bool block_dirty_folio(struct address_space *mapping, struct folio *folio);
+
+#ifdef CONFIG_BUFFER_HEAD
+
+void buffer_init(void);
+bool try_to_free_buffers(struct folio *folio);
+int inode_has_buffers(struct inode *inode);
+void invalidate_inode_buffers(struct inode *inode);
+int remove_inode_buffers(struct inode *inode);
+int sync_mapping_buffers(struct address_space *mapping);
+void invalidate_bh_lrus(void);
+void invalidate_bh_lrus_cpu(void);
+bool has_bh_in_lru(int cpu, void *dummy);
+extern int buffer_heads_over_limit;
-#else /* CONFIG_BLOCK */
+#else /* CONFIG_BUFFER_HEAD */
static inline void buffer_init(void) {}
-static inline int try_to_free_buffers(struct page *page) { return 1; }
+static inline bool try_to_free_buffers(struct folio *folio) { return true; }
static inline int inode_has_buffers(struct inode *inode) { return 0; }
static inline void invalidate_inode_buffers(struct inode *inode) {}
static inline int remove_inode_buffers(struct inode *inode) { return 1; }
static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
-static inline void invalidate_bh_lrus_cpu(int cpu) {}
-static inline bool has_bh_in_lru(int cpu, void *dummy) { return 0; }
+static inline void invalidate_bh_lrus(void) {}
+static inline void invalidate_bh_lrus_cpu(void) {}
+static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; }
#define buffer_heads_over_limit 0
-#endif /* CONFIG_BLOCK */
+#endif /* CONFIG_BUFFER_HEAD */
#endif /* _LINUX_BUFFER_HEAD_H */
diff --git a/include/linux/bug.h b/include/linux/bug.h
index 348acf2558f3..17a4933c611b 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -42,6 +42,7 @@ void bug_get_file_line(struct bug_entry *bug, const char **file,
struct bug_entry *find_bug(unsigned long bugaddr);
enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs);
+enum bug_trap_type report_bug_entry(struct bug_entry *bug, struct pt_regs *regs);
/* These are defined by the architecture */
int is_valid_bugaddr(unsigned long addr);
@@ -62,6 +63,13 @@ static inline enum bug_trap_type report_bug(unsigned long bug_addr,
}
struct bug_entry;
+
+static inline enum bug_trap_type
+report_bug_entry(struct bug_entry *bug, struct pt_regs *regs)
+{
+ return BUG_TRAP_TYPE_BUG;
+}
+
static inline void bug_get_file_line(struct bug_entry *bug, const char **file,
unsigned int *line)
{
@@ -73,15 +81,23 @@ static inline void generic_bug_clear_once(void) {}
#endif /* CONFIG_GENERIC_BUG */
+#ifdef CONFIG_PRINTK
+void mem_dump_obj(void *object);
+#else
+static inline void mem_dump_obj(void *object) {}
+#endif
+
/*
* Since detected data corruption should stop operation on the affected
* structures. Return value must be checked and sanely acted on by caller.
*/
static inline __must_check bool check_data_corruption(bool v) { return v; }
-#define CHECK_DATA_CORRUPTION(condition, fmt, ...) \
+#define CHECK_DATA_CORRUPTION(condition, addr, fmt, ...) \
check_data_corruption(({ \
bool corruption = unlikely(condition); \
if (corruption) { \
+ if (addr) \
+ mem_dump_obj(addr); \
if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) { \
pr_err(fmt, ##__VA_ARGS__); \
BUG(); \
diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h
index e3a0be2c90ad..2cfbb4c65c78 100644
--- a/include/linux/build_bug.h
+++ b/include/linux/build_bug.h
@@ -4,17 +4,17 @@
#include <linux/compiler.h>
-#ifdef __CHECKER__
-#define BUILD_BUG_ON_ZERO(e) (0)
-#else /* __CHECKER__ */
/*
* Force a compilation error if condition is true, but also produce a
* result (of value 0 and type int), so the expression can be used
* e.g. in a structure initializer (or where-ever else comma expressions
* aren't permitted).
+ *
+ * Take an error message as an optional second argument. If omitted,
+ * default to the stringification of the tested expression.
*/
-#define BUILD_BUG_ON_ZERO(e) ((int)(sizeof(struct { int:(-!!(e)); })))
-#endif /* __CHECKER__ */
+#define BUILD_BUG_ON_ZERO(e, ...) \
+ __BUILD_BUG_ON_ZERO_MSG(e, ##__VA_ARGS__, #e " is true")
/* Force a compilation error if a constant expression is not a power of 2 */
#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) \
@@ -77,4 +77,13 @@
#define static_assert(expr, ...) __static_assert(expr, ##__VA_ARGS__, #expr)
#define __static_assert(expr, msg, ...) _Static_assert(expr, msg)
+
+/*
+ * Compile time check that field has an expected offset
+ */
+#define ASSERT_STRUCT_OFFSET(type, field, expected_offset) \
+ BUILD_BUG_ON_MSG(offsetof(type, field) != (expected_offset), \
+ "Offset of " #field " in " #type " has changed.")
+
+
#endif /* _LINUX_BUILD_BUG_H */
diff --git a/include/linux/buildid.h b/include/linux/buildid.h
index 40232f90db6e..831c1b4b626c 100644
--- a/include/linux/buildid.h
+++ b/include/linux/buildid.h
@@ -2,11 +2,45 @@
#ifndef _LINUX_BUILDID_H
#define _LINUX_BUILDID_H
-#include <linux/mm_types.h>
+#include <linux/types.h>
#define BUILD_ID_SIZE_MAX 20
-int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
- __u32 *size);
+struct vm_area_struct;
+int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size);
+int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size);
+int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size);
+
+#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_VMCORE_INFO)
+extern unsigned char vmlinux_build_id[BUILD_ID_SIZE_MAX];
+void init_vmlinux_build_id(void);
+#else
+static inline void init_vmlinux_build_id(void) { }
+#endif
+
+struct freader {
+ void *buf;
+ u32 buf_sz;
+ int err;
+ union {
+ struct {
+ struct file *file;
+ struct folio *folio;
+ void *addr;
+ loff_t folio_off;
+ bool may_fault;
+ };
+ struct {
+ const char *data;
+ u64 data_sz;
+ };
+ };
+};
+
+void freader_init_from_file(struct freader *r, void *buf, u32 buf_sz,
+ struct file *file, bool may_fault);
+void freader_init_from_mem(struct freader *r, const char *data, u64 data_sz);
+const void *freader_fetch(struct freader *r, loff_t file_off, size_t sz);
+void freader_cleanup(struct freader *r);
#endif
diff --git a/include/linux/bus/stm32_firewall_device.h b/include/linux/bus/stm32_firewall_device.h
new file mode 100644
index 000000000000..eaa7a3f54450
--- /dev/null
+++ b/include/linux/bus/stm32_firewall_device.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023, STMicroelectronics - All Rights Reserved
+ */
+
+#ifndef STM32_FIREWALL_DEVICE_H
+#define STM32_FIREWALL_DEVICE_H
+
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#define STM32_FIREWALL_MAX_EXTRA_ARGS 5
+
+/* Opaque reference to stm32_firewall_controller */
+struct stm32_firewall_controller;
+
+/**
+ * struct stm32_firewall - Information on a device's firewall. Each device can have more than one
+ * firewall.
+ *
+ * @firewall_ctrl: Pointer referencing a firewall controller of the device. It is
+ * opaque so a device cannot manipulate the controller's ops or access
+ * the controller's data
+ * @extra_args: Extra arguments that are implementation dependent
+ * @entry: Name of the firewall entry
+ * @extra_args_size: Number of extra arguments
+ * @firewall_id: Firewall ID associated the device for this firewall controller
+ */
+struct stm32_firewall {
+ struct stm32_firewall_controller *firewall_ctrl;
+ u32 extra_args[STM32_FIREWALL_MAX_EXTRA_ARGS];
+ const char *entry;
+ size_t extra_args_size;
+ u32 firewall_id;
+};
+
+#if IS_ENABLED(CONFIG_STM32_FIREWALL)
+/**
+ * stm32_firewall_get_firewall - Get the firewall(s) associated to given device.
+ * The firewall controller reference is always the first argument
+ * of each of the access-controller property entries.
+ * The firewall ID is always the second argument of each of the
+ * access-controller property entries.
+ * If there's no argument linked to the phandle, then the firewall ID
+ * field is set to U32_MAX, which is an invalid ID.
+ *
+ * @np: Device node to parse
+ * @firewall: Array of firewall references
+ * @nb_firewall: Number of firewall references to get. Must be at least 1.
+ *
+ * Returns 0 on success, -ENODEV if there's no match with a firewall controller or appropriate errno
+ * code if error occurred.
+ */
+int stm32_firewall_get_firewall(struct device_node *np, struct stm32_firewall *firewall,
+ unsigned int nb_firewall);
+
+/**
+ * stm32_firewall_grant_access - Request firewall access rights and grant access.
+ *
+ * @firewall: Firewall reference containing the ID to check against its firewall
+ * controller
+ *
+ * Returns 0 if access is granted, -EACCES if access is denied, -ENODEV if firewall is null or
+ * appropriate errno code if error occurred
+ */
+int stm32_firewall_grant_access(struct stm32_firewall *firewall);
+
+/**
+ * stm32_firewall_release_access - Release access granted from a call to
+ * stm32_firewall_grant_access().
+ *
+ * @firewall: Firewall reference containing the ID to check against its firewall
+ * controller
+ */
+void stm32_firewall_release_access(struct stm32_firewall *firewall);
+
+/**
+ * stm32_firewall_grant_access_by_id - Request firewall access rights of a given device
+ * based on a specific firewall ID
+ *
+ * Warnings:
+ * There is no way to ensure that the given ID will correspond to the firewall referenced in the
+ * device node if the ID did not come from stm32_firewall_get_firewall(). In that case, this
+ * function must be used with caution.
+ * This function should be used for subsystem resources that do not have the same firewall ID
+ * as their parent.
+ * U32_MAX is an invalid ID.
+ *
+ * @firewall: Firewall reference containing the firewall controller
+ * @subsystem_id: Firewall ID of the subsystem resource
+ *
+ * Returns 0 if access is granted, -EACCES if access is denied, -ENODEV if firewall is null or
+ * appropriate errno code if error occurred
+ */
+int stm32_firewall_grant_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id);
+
+/**
+ * stm32_firewall_release_access_by_id - Release access granted from a call to
+ * stm32_firewall_grant_access_by_id().
+ *
+ * Warnings:
+ * There is no way to ensure that the given ID will correspond to the firewall referenced in the
+ * device node if the ID did not come from stm32_firewall_get_firewall(). In that case, this
+ * function must be used with caution.
+ * This function should be used for subsystem resources that do not have the same firewall ID
+ * as their parent.
+ * U32_MAX is an invalid ID.
+ *
+ * @firewall: Firewall reference containing the firewall controller
+ * @subsystem_id: Firewall ID of the subsystem resource
+ */
+void stm32_firewall_release_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id);
+
+#else /* CONFIG_STM32_FIREWALL */
+
+static inline int stm32_firewall_get_firewall(struct device_node *np,
+ struct stm32_firewall *firewall,
+ unsigned int nb_firewall)
+{
+ return -ENODEV;
+}
+
+static inline int stm32_firewall_grant_access(struct stm32_firewall *firewall)
+{
+ return -ENODEV;
+}
+
+static inline void stm32_firewall_release_access(struct stm32_firewall *firewall)
+{
+}
+
+static inline int stm32_firewall_grant_access_by_id(struct stm32_firewall *firewall,
+ u32 subsystem_id)
+{
+ return -ENODEV;
+}
+
+static inline void stm32_firewall_release_access_by_id(struct stm32_firewall *firewall,
+ u32 subsystem_id)
+{
+}
+
+#endif /* CONFIG_STM32_FIREWALL */
+#endif /* STM32_FIREWALL_DEVICE_H */
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index ff832e698efb..3fc0efa0825b 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -4,14 +4,14 @@
*
* Copyright (C) 2001 Ming Lei <ming.lei@canonical.com>
*/
-#ifndef __LINUX_BVEC_ITER_H
-#define __LINUX_BVEC_ITER_H
+#ifndef __LINUX_BVEC_H
+#define __LINUX_BVEC_H
+#include <linux/highmem.h>
#include <linux/bug.h>
#include <linux/errno.h>
#include <linux/limits.h>
#include <linux/minmax.h>
-#include <linux/mm.h>
#include <linux/types.h>
struct page;
@@ -22,11 +22,8 @@ struct page;
* @bv_len: Number of bytes in the address range.
* @bv_offset: Start of the address range relative to the start of @bv_page.
*
- * The following holds for a bvec if n * PAGE_SIZE < bv_offset + bv_len:
- *
- * nth_page(@bv_page, n) == @bv_page + n
- *
- * This holds because page_is_mergeable() checks the above property.
+ * All pages within a bio_vec starting from @bv_page are contiguous and
+ * can simply be iterated (see bvec_advance()).
*/
struct bio_vec {
struct page *bv_page;
@@ -34,6 +31,49 @@ struct bio_vec {
unsigned int bv_offset;
};
+/**
+ * bvec_set_page - initialize a bvec based off a struct page
+ * @bv: bvec to initialize
+ * @page: page the bvec should point to
+ * @len: length of the bvec
+ * @offset: offset into the page
+ */
+static inline void bvec_set_page(struct bio_vec *bv, struct page *page,
+ unsigned int len, unsigned int offset)
+{
+ bv->bv_page = page;
+ bv->bv_len = len;
+ bv->bv_offset = offset;
+}
+
+/**
+ * bvec_set_folio - initialize a bvec based off a struct folio
+ * @bv: bvec to initialize
+ * @folio: folio the bvec should point to
+ * @len: length of the bvec
+ * @offset: offset into the folio
+ */
+static inline void bvec_set_folio(struct bio_vec *bv, struct folio *folio,
+ size_t len, size_t offset)
+{
+ unsigned long nr = offset / PAGE_SIZE;
+
+ WARN_ON_ONCE(len > UINT_MAX);
+ bvec_set_page(bv, folio_page(folio, nr), len, offset % PAGE_SIZE);
+}
+
+/**
+ * bvec_set_virt - initialize a bvec based on a virtual address
+ * @bv: bvec to initialize
+ * @vaddr: virtual address to set the bvec to
+ * @len: length of the bvec
+ */
+static inline void bvec_set_virt(struct bio_vec *bv, void *vaddr,
+ unsigned int len)
+{
+ bvec_set_page(bv, virt_to_page(vaddr), len, offset_in_page(vaddr));
+}
+
struct bvec_iter {
sector_t bi_sector; /* device address in 512 byte
sectors */
@@ -43,7 +83,7 @@ struct bvec_iter {
unsigned int bi_bvec_done; /* number of bytes completed in
current bvec */
-};
+} __packed __aligned(4);
struct bvec_iter_all {
struct bio_vec bv;
@@ -144,6 +184,12 @@ static inline void bvec_iter_advance_single(const struct bio_vec *bv,
((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
bvec_iter_advance_single((bio_vec), &(iter), (bvl).bv_len))
+#define for_each_mp_bvec(bvl, bio_vec, iter, start) \
+ for (iter = (start); \
+ (iter).bi_size && \
+ ((bvl = mp_bvec_iter_bvec((bio_vec), (iter))), 1); \
+ bvec_iter_advance_single((bio_vec), &(iter), (bvl).bv_len))
+
/* for iterating one bio from start to end */
#define BVEC_ITER_ALL_INIT (struct bvec_iter) \
{ \
@@ -183,4 +229,70 @@ static inline void bvec_advance(const struct bio_vec *bvec,
}
}
-#endif /* __LINUX_BVEC_ITER_H */
+/**
+ * bvec_kmap_local - map a bvec into the kernel virtual address space
+ * @bvec: bvec to map
+ *
+ * Must be called on single-page bvecs only. Call kunmap_local on the returned
+ * address to unmap.
+ */
+static inline void *bvec_kmap_local(struct bio_vec *bvec)
+{
+ return kmap_local_page(bvec->bv_page) + bvec->bv_offset;
+}
+
+/**
+ * memcpy_from_bvec - copy data from a bvec
+ * @bvec: bvec to copy from
+ *
+ * Must be called on single-page bvecs only.
+ */
+static inline void memcpy_from_bvec(char *to, struct bio_vec *bvec)
+{
+ memcpy_from_page(to, bvec->bv_page, bvec->bv_offset, bvec->bv_len);
+}
+
+/**
+ * memcpy_to_bvec - copy data to a bvec
+ * @bvec: bvec to copy to
+ *
+ * Must be called on single-page bvecs only.
+ */
+static inline void memcpy_to_bvec(struct bio_vec *bvec, const char *from)
+{
+ memcpy_to_page(bvec->bv_page, bvec->bv_offset, from, bvec->bv_len);
+}
+
+/**
+ * memzero_bvec - zero all data in a bvec
+ * @bvec: bvec to zero
+ *
+ * Must be called on single-page bvecs only.
+ */
+static inline void memzero_bvec(struct bio_vec *bvec)
+{
+ memzero_page(bvec->bv_page, bvec->bv_offset, bvec->bv_len);
+}
+
+/**
+ * bvec_virt - return the virtual address for a bvec
+ * @bvec: bvec to return the virtual address for
+ *
+ * Note: the caller must ensure that @bvec->bv_page is not a highmem page.
+ */
+static inline void *bvec_virt(struct bio_vec *bvec)
+{
+ WARN_ON_ONCE(PageHighMem(bvec->bv_page));
+ return page_address(bvec->bv_page) + bvec->bv_offset;
+}
+
+/**
+ * bvec_phys - return the physical address for a bvec
+ * @bvec: bvec to return the physical address for
+ */
+static inline phys_addr_t bvec_phys(const struct bio_vec *bvec)
+{
+ return page_to_phys(bvec->bv_page) + bvec->bv_offset;
+}
+
+#endif /* __LINUX_BVEC_H */
diff --git a/include/linux/byteorder/generic.h b/include/linux/byteorder/generic.h
index 4b13e0a3e15b..55a44199de87 100644
--- a/include/linux/byteorder/generic.h
+++ b/include/linux/byteorder/generic.h
@@ -173,6 +173,38 @@ static inline void cpu_to_le32_array(u32 *buf, unsigned int words)
}
}
+static inline void le64_to_cpu_array(u64 *buf, unsigned int words)
+{
+ while (words--) {
+ __le64_to_cpus(buf);
+ buf++;
+ }
+}
+
+static inline void cpu_to_le64_array(u64 *buf, unsigned int words)
+{
+ while (words--) {
+ __cpu_to_le64s(buf);
+ buf++;
+ }
+}
+
+static inline void memcpy_from_le32(u32 *dst, const __le32 *src, size_t words)
+{
+ size_t i;
+
+ for (i = 0; i < words; i++)
+ dst[i] = le32_to_cpu(src[i]);
+}
+
+static inline void memcpy_to_le32(__le32 *dst, const u32 *src, size_t words)
+{
+ size_t i;
+
+ for (i = 0; i < words; i++)
+ dst[i] = cpu_to_le32(src[i]);
+}
+
static inline void be16_add_cpu(__be16 *var, u16 val)
{
*var = cpu_to_be16(be16_to_cpu(*var) + val);
@@ -190,7 +222,7 @@ static inline void be64_add_cpu(__be64 *var, u64 val)
static inline void cpu_to_be32_array(__be32 *dst, const u32 *src, size_t len)
{
- int i;
+ size_t i;
for (i = 0; i < len; i++)
dst[i] = cpu_to_be32(src[i]);
@@ -198,7 +230,7 @@ static inline void cpu_to_be32_array(__be32 *dst, const u32 *src, size_t len)
static inline void be32_to_cpu_array(u32 *dst, const __be32 *src, size_t len)
{
- int i;
+ size_t i;
for (i = 0; i < len; i++)
dst[i] = be32_to_cpu(src[i]);
diff --git a/include/linux/cache.h b/include/linux/cache.h
index d742c57eaee5..e69768f50d53 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -3,14 +3,37 @@
#define __LINUX_CACHE_H
#include <uapi/linux/kernel.h>
+#include <vdso/cache.h>
#include <asm/cache.h>
#ifndef L1_CACHE_ALIGN
#define L1_CACHE_ALIGN(x) __ALIGN_KERNEL(x, L1_CACHE_BYTES)
#endif
-#ifndef SMP_CACHE_BYTES
-#define SMP_CACHE_BYTES L1_CACHE_BYTES
+/**
+ * SMP_CACHE_ALIGN - align a value to the L2 cacheline size
+ * @x: value to align
+ *
+ * On some architectures, L2 ("SMP") CL size is bigger than L1, and sometimes,
+ * this needs to be accounted.
+ *
+ * Return: aligned value.
+ */
+#ifndef SMP_CACHE_ALIGN
+#define SMP_CACHE_ALIGN(x) ALIGN(x, SMP_CACHE_BYTES)
+#endif
+
+/*
+ * ``__aligned_largest`` aligns a field to the value most optimal for the
+ * target architecture to perform memory operations. Get the actual value
+ * to be able to use it anywhere else.
+ */
+#ifndef __LARGEST_ALIGN
+#define __LARGEST_ALIGN sizeof(struct { long x; } __aligned_largest)
+#endif
+
+#ifndef LARGEST_ALIGN
+#define LARGEST_ALIGN(x) ALIGN(x, __LARGEST_ALIGN)
#endif
/*
@@ -37,10 +60,6 @@
#define __ro_after_init __section(".data..ro_after_init")
#endif
-#ifndef ____cacheline_aligned
-#define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
-#endif
-
#ifndef ____cacheline_aligned_in_smp
#ifdef CONFIG_SMP
#define ____cacheline_aligned_in_smp ____cacheline_aligned
@@ -85,4 +104,81 @@
#define cache_line_size() L1_CACHE_BYTES
#endif
+#ifndef __cacheline_group_begin
+#define __cacheline_group_begin(GROUP) \
+ __u8 __cacheline_group_begin__##GROUP[0]
+#endif
+
+#ifndef __cacheline_group_end
+#define __cacheline_group_end(GROUP) \
+ __u8 __cacheline_group_end__##GROUP[0]
+#endif
+
+/**
+ * __cacheline_group_begin_aligned - declare an aligned group start
+ * @GROUP: name of the group
+ * @...: optional group alignment
+ *
+ * The following block inside a struct:
+ *
+ * __cacheline_group_begin_aligned(grp);
+ * field a;
+ * field b;
+ * __cacheline_group_end_aligned(grp);
+ *
+ * will always be aligned to either the specified alignment or
+ * ``SMP_CACHE_BYTES``.
+ */
+#define __cacheline_group_begin_aligned(GROUP, ...) \
+ __cacheline_group_begin(GROUP) \
+ __aligned((__VA_ARGS__ + 0) ? : SMP_CACHE_BYTES)
+
+/**
+ * __cacheline_group_end_aligned - declare an aligned group end
+ * @GROUP: name of the group
+ * @...: optional alignment (same as was in __cacheline_group_begin_aligned())
+ *
+ * Note that the end marker is aligned to sizeof(long) to allow more precise
+ * size assertion. It also declares a padding at the end to avoid next field
+ * falling into this cacheline.
+ */
+#define __cacheline_group_end_aligned(GROUP, ...) \
+ __cacheline_group_end(GROUP) __aligned(sizeof(long)); \
+ struct { } __cacheline_group_pad__##GROUP \
+ __aligned((__VA_ARGS__ + 0) ? : SMP_CACHE_BYTES)
+
+#ifndef CACHELINE_ASSERT_GROUP_MEMBER
+#define CACHELINE_ASSERT_GROUP_MEMBER(TYPE, GROUP, MEMBER) \
+ BUILD_BUG_ON(!(offsetof(TYPE, MEMBER) >= \
+ offsetofend(TYPE, __cacheline_group_begin__##GROUP) && \
+ offsetofend(TYPE, MEMBER) <= \
+ offsetof(TYPE, __cacheline_group_end__##GROUP)))
+#endif
+
+#ifndef CACHELINE_ASSERT_GROUP_SIZE
+#define CACHELINE_ASSERT_GROUP_SIZE(TYPE, GROUP, SIZE) \
+ BUILD_BUG_ON(offsetof(TYPE, __cacheline_group_end__##GROUP) - \
+ offsetofend(TYPE, __cacheline_group_begin__##GROUP) > \
+ SIZE)
+#endif
+
+/*
+ * Helper to add padding within a struct to ensure data fall into separate
+ * cachelines.
+ */
+#if defined(CONFIG_SMP)
+struct cacheline_padding {
+ char x[0];
+} ____cacheline_internodealigned_in_smp;
+#define CACHELINE_PADDING(name) struct cacheline_padding name
+#else
+#define CACHELINE_PADDING(name)
+#endif
+
+#ifdef ARCH_DMA_MINALIGN
+#define ARCH_HAS_DMA_MINALIGN
+#else
+#define ARCH_DMA_MINALIGN __alignof__(unsigned long long)
+#endif
+
#endif /* __LINUX_CACHE_H */
diff --git a/include/linux/cache_coherency.h b/include/linux/cache_coherency.h
new file mode 100644
index 000000000000..cc81c5733e31
--- /dev/null
+++ b/include/linux/cache_coherency.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cache coherency maintenance operation device drivers
+ *
+ * Copyright Huawei 2025
+ */
+#ifndef _LINUX_CACHE_COHERENCY_H_
+#define _LINUX_CACHE_COHERENCY_H_
+
+#include <linux/list.h>
+#include <linux/kref.h>
+#include <linux/types.h>
+
+struct cc_inval_params {
+ phys_addr_t addr;
+ size_t size;
+};
+
+struct cache_coherency_ops_inst;
+
+struct cache_coherency_ops {
+ int (*wbinv)(struct cache_coherency_ops_inst *cci,
+ struct cc_inval_params *invp);
+ int (*done)(struct cache_coherency_ops_inst *cci);
+};
+
+struct cache_coherency_ops_inst {
+ struct kref kref;
+ struct list_head node;
+ const struct cache_coherency_ops *ops;
+};
+
+int cache_coherency_ops_instance_register(struct cache_coherency_ops_inst *cci);
+void cache_coherency_ops_instance_unregister(struct cache_coherency_ops_inst *cci);
+
+struct cache_coherency_ops_inst *
+_cache_coherency_ops_instance_alloc(const struct cache_coherency_ops *ops,
+ size_t size);
+/**
+ * cache_coherency_ops_instance_alloc - Allocate cache coherency ops instance
+ * @ops: Cache maintenance operations
+ * @drv_struct: structure that contains the struct cache_coherency_ops_inst
+ * @member: Name of the struct cache_coherency_ops_inst member in @drv_struct.
+ *
+ * This allocates a driver specific structure and initializes the
+ * cache_coherency_ops_inst embedded in the drv_struct. Upon success the
+ * pointer must be freed via cache_coherency_ops_instance_put().
+ *
+ * Returns a &drv_struct * on success, %NULL on error.
+ */
+#define cache_coherency_ops_instance_alloc(ops, drv_struct, member) \
+ ({ \
+ static_assert(__same_type(struct cache_coherency_ops_inst, \
+ ((drv_struct *)NULL)->member)); \
+ static_assert(offsetof(drv_struct, member) == 0); \
+ (drv_struct *)_cache_coherency_ops_instance_alloc(ops, \
+ sizeof(drv_struct)); \
+ })
+void cache_coherency_ops_instance_put(struct cache_coherency_ops_inst *cci);
+
+#endif
diff --git a/include/linux/cacheflush.h b/include/linux/cacheflush.h
new file mode 100644
index 000000000000..55f297b2c23f
--- /dev/null
+++ b/include/linux/cacheflush.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CACHEFLUSH_H
+#define _LINUX_CACHEFLUSH_H
+
+#include <asm/cacheflush.h>
+
+struct folio;
+
+#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+#ifndef flush_dcache_folio
+void flush_dcache_folio(struct folio *folio);
+#endif
+#else
+static inline void flush_dcache_folio(struct folio *folio)
+{
+}
+#define flush_dcache_folio flush_dcache_folio
+#endif /* ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE */
+
+#ifndef flush_icache_pages
+static inline void flush_icache_pages(struct vm_area_struct *vma,
+ struct page *page, unsigned int nr)
+{
+}
+#endif
+
+#define flush_icache_page(vma, page) flush_icache_pages(vma, page, 1)
+
+#endif /* _LINUX_CACHEFLUSH_H */
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
index 4f72b47973c3..c8f4f0a0b874 100644
--- a/include/linux/cacheinfo.h
+++ b/include/linux/cacheinfo.h
@@ -3,8 +3,8 @@
#define _LINUX_CACHEINFO_H
#include <linux/bitops.h>
-#include <linux/cpu.h>
-#include <linux/cpumask.h>
+#include <linux/cpuhplock.h>
+#include <linux/cpumask_types.h>
#include <linux/smp.h>
struct device_node;
@@ -74,70 +74,95 @@ struct cacheinfo {
struct cpu_cacheinfo {
struct cacheinfo *info_list;
+ unsigned int per_cpu_data_slice_size;
unsigned int num_levels;
unsigned int num_leaves;
bool cpu_map_populated;
+ bool early_ci_levels;
};
-/*
- * Helpers to make sure "func" is executed on the cpu whose cache
- * attributes are being detected
- */
-#define DEFINE_SMP_CALL_CACHE_FUNCTION(func) \
-static inline void _##func(void *ret) \
-{ \
- int cpu = smp_processor_id(); \
- *(int *)ret = __##func(cpu); \
-} \
- \
-int func(unsigned int cpu) \
-{ \
- int ret; \
- smp_call_function_single(cpu, _##func, &ret, true); \
- return ret; \
-}
-
struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu);
+int early_cache_level(unsigned int cpu);
int init_cache_level(unsigned int cpu);
+int init_of_cache_level(unsigned int cpu);
int populate_cache_leaves(unsigned int cpu);
int cache_setup_acpi(unsigned int cpu);
+bool last_level_cache_is_valid(unsigned int cpu);
+bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y);
+int fetch_cache_info(unsigned int cpu);
+int detect_cache_attributes(unsigned int cpu);
#ifndef CONFIG_ACPI_PPTT
/*
- * acpi_find_last_cache_level is only called on ACPI enabled
+ * acpi_get_cache_info() is only called on ACPI enabled
* platforms using the PPTT for topology. This means that if
* the platform supports other firmware configuration methods
* we need to stub out the call when ACPI is disabled.
* ACPI enabled platforms not using PPTT won't be making calls
* to this function so we need not worry about them.
*/
-static inline int acpi_find_last_cache_level(unsigned int cpu)
+static inline
+int acpi_get_cache_info(unsigned int cpu,
+ unsigned int *levels, unsigned int *split_levels)
{
- return 0;
+ return -ENOENT;
}
#else
-int acpi_find_last_cache_level(unsigned int cpu);
+int acpi_get_cache_info(unsigned int cpu,
+ unsigned int *levels, unsigned int *split_levels);
#endif
const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf);
/*
- * Get the id of the cache associated with @cpu at level @level.
+ * Get the cacheinfo structure for the cache associated with @cpu at
+ * level @level.
* cpuhp lock must be held.
*/
-static inline int get_cpu_cacheinfo_id(int cpu, int level)
+static inline struct cacheinfo *get_cpu_cacheinfo_level(int cpu, int level)
{
struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu);
int i;
+ lockdep_assert_cpus_held();
+
for (i = 0; i < ci->num_leaves; i++) {
if (ci->info_list[i].level == level) {
if (ci->info_list[i].attributes & CACHE_ID)
- return ci->info_list[i].id;
- return -1;
+ return &ci->info_list[i];
+ return NULL;
}
}
- return -1;
+ return NULL;
}
+/*
+ * Get the id of the cache associated with @cpu at level @level.
+ * cpuhp lock must be held.
+ */
+static inline int get_cpu_cacheinfo_id(int cpu, int level)
+{
+ struct cacheinfo *ci = get_cpu_cacheinfo_level(cpu, level);
+
+ return ci ? ci->id : -1;
+}
+
+#if defined(CONFIG_ARM64) || defined(CONFIG_ARM)
+#define use_arch_cache_info() (true)
+#else
+#define use_arch_cache_info() (false)
+#endif
+
+#ifndef CONFIG_ARCH_HAS_CPU_CACHE_ALIASING
+#define cpu_dcache_is_aliasing() false
+#define cpu_icache_is_aliasing() cpu_dcache_is_aliasing()
+#else
+#include <asm/cachetype.h>
+
+#ifndef cpu_icache_is_aliasing
+#define cpu_icache_is_aliasing() cpu_dcache_is_aliasing()
+#endif
+
+#endif
+
#endif /* _LINUX_CACHEINFO_H */
diff --git a/include/linux/call_once.h b/include/linux/call_once.h
new file mode 100644
index 000000000000..13cd6469e7e5
--- /dev/null
+++ b/include/linux/call_once.h
@@ -0,0 +1,66 @@
+#ifndef _LINUX_CALL_ONCE_H
+#define _LINUX_CALL_ONCE_H
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+
+#define ONCE_NOT_STARTED 0
+#define ONCE_RUNNING 1
+#define ONCE_COMPLETED 2
+
+struct once {
+ atomic_t state;
+ struct mutex lock;
+};
+
+static inline void __once_init(struct once *once, const char *name,
+ struct lock_class_key *key)
+{
+ atomic_set(&once->state, ONCE_NOT_STARTED);
+ __mutex_init(&once->lock, name, key);
+}
+
+#define once_init(once) \
+do { \
+ static struct lock_class_key __key; \
+ __once_init((once), #once, &__key); \
+} while (0)
+
+/*
+ * call_once - Ensure a function has been called exactly once
+ *
+ * @once: Tracking struct
+ * @cb: Function to be called
+ *
+ * If @once has never completed successfully before, call @cb and, if
+ * it returns a zero or positive value, mark @once as completed. Return
+ * the value returned by @cb
+ *
+ * If @once has completed succesfully before, return 0.
+ *
+ * The call to @cb is implicitly surrounded by a mutex, though for
+ * efficiency the * function avoids taking it after the first call.
+ */
+static inline int call_once(struct once *once, int (*cb)(struct once *))
+{
+ int r, state;
+
+ /* Pairs with atomic_set_release() below. */
+ if (atomic_read_acquire(&once->state) == ONCE_COMPLETED)
+ return 0;
+
+ guard(mutex)(&once->lock);
+ state = atomic_read(&once->state);
+ if (unlikely(state != ONCE_NOT_STARTED))
+ return WARN_ON_ONCE(state != ONCE_COMPLETED) ? -EINVAL : 0;
+
+ atomic_set(&once->state, ONCE_RUNNING);
+ r = cb(once);
+ if (r < 0)
+ atomic_set(&once->state, ONCE_NOT_STARTED);
+ else
+ atomic_set_release(&once->state, ONCE_COMPLETED);
+ return r;
+}
+
+#endif /* _LINUX_CALL_ONCE_H */
diff --git a/include/linux/can/bittiming.h b/include/linux/can/bittiming.h
index ae7a3411167c..726d909e87ce 100644
--- a/include/linux/can/bittiming.h
+++ b/include/linux/can/bittiming.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2020 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
- * Copyright (c) 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+ * Copyright (c) 2021-2025 Vincent Mailhol <mailhol@kernel.org>
*/
#ifndef _CAN_BITTIMING_H
@@ -11,13 +11,17 @@
#define CAN_SYNC_SEG 1
+#define CAN_BITRATE_UNSET 0
+#define CAN_BITRATE_UNKNOWN (-1U)
-/* Kilobits and Megabits per second */
-#define CAN_KBPS 1000UL
-#define CAN_MBPS 1000000UL
-
-/* Megahertz */
-#define CAN_MHZ 1000000UL
+#define CAN_CTRLMODE_FD_TDC_MASK \
+ (CAN_CTRLMODE_TDC_AUTO | CAN_CTRLMODE_TDC_MANUAL)
+#define CAN_CTRLMODE_XL_TDC_MASK \
+ (CAN_CTRLMODE_XL_TDC_AUTO | CAN_CTRLMODE_XL_TDC_MANUAL)
+#define CAN_CTRLMODE_TDC_AUTO_MASK \
+ (CAN_CTRLMODE_TDC_AUTO | CAN_CTRLMODE_XL_TDC_AUTO)
+#define CAN_CTRLMODE_TDC_MANUAL_MASK \
+ (CAN_CTRLMODE_TDC_MANUAL | CAN_CTRLMODE_XL_TDC_MANUAL)
/*
* struct can_tdc - CAN FD Transmission Delay Compensation parameters
@@ -28,34 +32,54 @@
*
* To solve this issue, ISO 11898-1 introduces in section 11.3.3
* "Transmitter delay compensation" a SSP (Secondary Sample Point)
- * equal to the distance, in time quanta, from the start of the bit
- * time on the TX pin to the actual measurement on the RX pin.
+ * equal to the distance from the start of the bit time on the TX pin
+ * to the actual measurement on the RX pin.
*
* This structure contains the parameters to calculate that SSP.
*
- * @tdcv: Transmitter Delay Compensation Value. Distance, in time
- * quanta, from when the bit is sent on the TX pin to when it is
- * received on the RX pin of the transmitter. Possible options:
+ * -+----------- one bit ----------+-- TX pin
+ * |<--- Sample Point --->|
+ *
+ * --+----------- one bit ----------+-- RX pin
+ * |<-------- TDCV -------->|
+ * |<------- TDCO ------->|
+ * |<----------- Secondary Sample Point ---------->|
+ *
+ * To increase precision, contrary to the other bittiming parameters
+ * which are measured in time quanta, the TDC parameters are measured
+ * in clock periods (also referred as "minimum time quantum" in ISO
+ * 11898-1).
*
- * O: automatic mode. The controller dynamically measure @tdcv
- * for each transmitted CAN FD frame.
+ * @tdcv: Transmitter Delay Compensation Value. The time needed for
+ * the signal to propagate, i.e. the distance, in clock periods,
+ * from the start of the bit on the TX pin to when it is received
+ * on the RX pin. @tdcv depends on the controller modes:
*
- * Other values: manual mode. Use the fixed provided value.
+ * CAN_CTRLMODE_TDC_AUTO is set: The transceiver dynamically
+ * measures @tdcv for each transmitted CAN FD frame and the
+ * value provided here should be ignored.
*
- * @tdco: Transmitter Delay Compensation Offset. Offset value, in time
- * quanta, defining the distance between the start of the bit
- * reception on the RX pin of the transceiver and the SSP
- * position such as SSP = @tdcv + @tdco.
+ * CAN_CTRLMODE_TDC_MANUAL is set: use the fixed provided @tdcv
+ * value.
*
- * If @tdco is zero, then TDC is disabled and both @tdcv and
- * @tdcf should be ignored.
+ * N.B. CAN_CTRLMODE_TDC_AUTO and CAN_CTRLMODE_TDC_MANUAL are
+ * mutually exclusive. Only one can be set at a time. If both
+ * CAN_TDC_CTRLMODE_AUTO and CAN_TDC_CTRLMODE_MANUAL are unset,
+ * TDC is disabled and all the values of this structure should be
+ * ignored.
+ *
+ * @tdco: Transmitter Delay Compensation Offset. Offset value, in
+ * clock periods, defining the distance between the start of the
+ * bit reception on the RX pin of the transceiver and the SSP
+ * position such that SSP = @tdcv + @tdco.
*
* @tdcf: Transmitter Delay Compensation Filter window. Defines the
- * minimum value for the SSP position in time quanta. If SSP is
- * less than @tdcf, then no delay compensations occur and the
- * normal sampling point is used instead. The feature is enabled
- * if and only if @tdcv is set to zero (automatic mode) and @tdcf
- * is configured to a value greater than @tdco.
+ * minimum value for the SSP position in clock periods. If the
+ * SSP position is less than @tdcf, then no delay compensations
+ * occur and the normal sampling point is used instead. The
+ * feature is enabled if and only if @tdcv is set to zero
+ * (automatic mode) and @tdcf is configured to a value greater
+ * than @tdco.
*/
struct can_tdc {
u32 tdcv;
@@ -63,49 +87,169 @@ struct can_tdc {
u32 tdcf;
};
+/* The transceiver decoding margin corresponds to t_Decode in ISO 11898-2 */
+#define CAN_PWM_DECODE_NS 5
+/* Maximum PWM symbol duration. Corresponds to t_SymbolNom_MAX - t_Decode */
+#define CAN_PWM_NS_MAX (205 - CAN_PWM_DECODE_NS)
+
/*
* struct can_tdc_const - CAN hardware-dependent constant for
* Transmission Delay Compensation
*
- * @tdcv_max: Transmitter Delay Compensation Value maximum value.
- * Should be set to zero if the controller does not support
- * manual mode for tdcv.
+ * @tdcv_min: Transmitter Delay Compensation Value minimum value. If
+ * the controller does not support manual mode for tdcv
+ * (c.f. flag CAN_CTRLMODE_TDC_MANUAL) then this value is
+ * ignored.
+ * @tdcv_max: Transmitter Delay Compensation Value maximum value. If
+ * the controller does not support manual mode for tdcv
+ * (c.f. flag CAN_CTRLMODE_TDC_MANUAL) then this value is
+ * ignored.
+ *
+ * @tdco_min: Transmitter Delay Compensation Offset minimum value.
* @tdco_max: Transmitter Delay Compensation Offset maximum value.
* Should not be zero. If the controller does not support TDC,
* then the pointer to this structure should be NULL.
+ *
+ * @tdcf_min: Transmitter Delay Compensation Filter window minimum
+ * value. If @tdcf_max is zero, this value is ignored.
* @tdcf_max: Transmitter Delay Compensation Filter window maximum
* value. Should be set to zero if the controller does not
* support this feature.
*/
struct can_tdc_const {
+ u32 tdcv_min;
u32 tdcv_max;
+ u32 tdco_min;
u32 tdco_max;
+ u32 tdcf_min;
u32 tdcf_max;
};
+/*
+ * struct can_pwm - CAN Pulse-Width Modulation (PWM) parameters
+ *
+ * @pwms: pulse width modulation short phase
+ * @pwml: pulse width modulation long phase
+ * @pwmo: pulse width modulation offset
+ */
+struct can_pwm {
+ u32 pwms;
+ u32 pwml;
+ u32 pwmo;
+};
+
+/*
+ * struct can_pwm - CAN hardware-dependent constants for Pulse-Width
+ * Modulation (PWM)
+ *
+ * @pwms_min: PWM short phase minimum value. Must be at least 1.
+ * @pwms_max: PWM short phase maximum value
+ * @pwml_min: PWM long phase minimum value. Must be at least 1.
+ * @pwml_max: PWM long phase maximum value
+ * @pwmo_min: PWM offset phase minimum value
+ * @pwmo_max: PWM offset phase maximum value
+ */
+struct can_pwm_const {
+ u32 pwms_min;
+ u32 pwms_max;
+ u32 pwml_min;
+ u32 pwml_max;
+ u32 pwmo_min;
+ u32 pwmo_max;
+};
+
+struct data_bittiming_params {
+ const struct can_bittiming_const *data_bittiming_const;
+ struct can_bittiming data_bittiming;
+ const struct can_tdc_const *tdc_const;
+ const struct can_pwm_const *pwm_const;
+ union {
+ struct can_tdc tdc;
+ struct can_pwm pwm;
+ };
+ const u32 *data_bitrate_const;
+ unsigned int data_bitrate_const_cnt;
+ int (*do_set_data_bittiming)(struct net_device *dev);
+ int (*do_get_auto_tdcv)(const struct net_device *dev, u32 *tdcv);
+};
+
#ifdef CONFIG_CAN_CALC_BITTIMING
-int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc);
+int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc, struct netlink_ext_ack *extack);
+
+void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
+ const struct can_bittiming *dbt,
+ u32 tdc_mask, u32 *ctrlmode, u32 ctrlmode_supported);
-void can_calc_tdco(struct net_device *dev);
+int can_calc_pwm(struct net_device *dev, struct netlink_ext_ack *extack);
#else /* !CONFIG_CAN_CALC_BITTIMING */
static inline int
-can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc)
+can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc, struct netlink_ext_ack *extack)
{
- netdev_err(dev, "bit-timing calculation not available\n");
+ NL_SET_ERR_MSG(extack, "bit-timing calculation not available\n");
return -EINVAL;
}
-static inline void can_calc_tdco(struct net_device *dev)
+static inline void
+can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
+ const struct can_bittiming *dbt,
+ u32 tdc_mask, u32 *ctrlmode, u32 ctrlmode_supported)
+{
+}
+
+static inline int
+can_calc_pwm(struct net_device *dev, struct netlink_ext_ack *extack)
{
+ NL_SET_ERR_MSG(extack,
+ "bit-timing calculation not available: manually provide PWML and PWMS\n");
+ return -EINVAL;
}
#endif /* CONFIG_CAN_CALC_BITTIMING */
-int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
+void can_sjw_set_default(struct can_bittiming *bt);
+
+int can_sjw_check(const struct net_device *dev, const struct can_bittiming *bt,
+ const struct can_bittiming_const *btc, struct netlink_ext_ack *extack);
+
+int can_get_bittiming(const struct net_device *dev, struct can_bittiming *bt,
const struct can_bittiming_const *btc,
const u32 *bitrate_const,
- const unsigned int bitrate_const_cnt);
+ const unsigned int bitrate_const_cnt,
+ struct netlink_ext_ack *extack);
+
+int can_validate_pwm_bittiming(const struct net_device *dev,
+ const struct can_pwm *pwm,
+ struct netlink_ext_ack *extack);
+
+/*
+ * can_get_relative_tdco() - TDCO relative to the sample point
+ *
+ * struct can_tdc::tdco represents the absolute offset from TDCV. Some
+ * controllers use instead an offset relative to the Sample Point (SP)
+ * such that:
+ *
+ * SSP = TDCV + absolute TDCO
+ * = TDCV + SP + relative TDCO
+ *
+ * -+----------- one bit ----------+-- TX pin
+ * |<--- Sample Point --->|
+ *
+ * --+----------- one bit ----------+-- RX pin
+ * |<-------- TDCV -------->|
+ * |<------------------------>| absolute TDCO
+ * |<--- Sample Point --->|
+ * | |<->| relative TDCO
+ * |<------------- Secondary Sample Point ------------>|
+ */
+static inline s32 can_get_relative_tdco(const struct data_bittiming_params *dbt_params)
+{
+ const struct can_bittiming *dbt = &dbt_params->data_bittiming;
+ s32 sample_point_in_tc = (CAN_SYNC_SEG + dbt->prop_seg +
+ dbt->phase_seg1) * dbt->brp;
+
+ return (s32)dbt_params->tdc.tdco - sample_point_in_tc;
+}
/*
* can_bit_time() - Duration of one bit
@@ -120,4 +264,17 @@ static inline unsigned int can_bit_time(const struct can_bittiming *bt)
return CAN_SYNC_SEG + bt->prop_seg + bt->phase_seg1 + bt->phase_seg2;
}
+/* Duration of one bit in minimum time quantum */
+static inline unsigned int can_bit_time_tqmin(const struct can_bittiming *bt)
+{
+ return can_bit_time(bt) * bt->brp;
+}
+
+/* Convert a duration from minimum a minimum time quantum to nano seconds */
+static inline u32 can_tqmin_to_ns(u32 tqmin, u32 clock_freq)
+{
+ return DIV_U64_ROUND_CLOSEST(mul_u32_u32(tqmin, NSEC_PER_SEC),
+ clock_freq);
+}
+
#endif /* !_CAN_BITTIMING_H */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 27b275e463da..f6416a56e95d 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -17,10 +17,10 @@
#include <linux/can.h>
#include <linux/can/bittiming.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
#include <linux/can/length.h>
#include <linux/can/netlink.h>
#include <linux/can/skb.h>
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
/*
@@ -32,6 +32,12 @@ enum can_mode {
CAN_MODE_SLEEP
};
+enum can_termination_gpio {
+ CAN_TERMINATION_GPIO_DISABLED = 0,
+ CAN_TERMINATION_GPIO_ENABLED,
+ CAN_TERMINATION_GPIO_MAX,
+};
+
/*
* CAN common private data
*/
@@ -39,69 +45,59 @@ struct can_priv {
struct net_device *dev;
struct can_device_stats can_stats;
- const struct can_bittiming_const *bittiming_const,
- *data_bittiming_const;
- struct can_bittiming bittiming, data_bittiming;
- const struct can_tdc_const *tdc_const;
- struct can_tdc tdc;
-
+ const struct can_bittiming_const *bittiming_const;
+ struct can_bittiming bittiming;
+ struct data_bittiming_params fd, xl;
unsigned int bitrate_const_cnt;
const u32 *bitrate_const;
- const u32 *data_bitrate_const;
- unsigned int data_bitrate_const_cnt;
u32 bitrate_max;
struct can_clock clock;
unsigned int termination_const_cnt;
const u16 *termination_const;
u16 termination;
+ struct gpio_desc *termination_gpio;
+ u16 termination_gpio_ohms[CAN_TERMINATION_GPIO_MAX];
+
+ unsigned int echo_skb_max;
+ struct sk_buff **echo_skb;
enum can_state state;
/* CAN controller features - see include/uapi/linux/can/netlink.h */
u32 ctrlmode; /* current options setting */
u32 ctrlmode_supported; /* options that can be modified by netlink */
- u32 ctrlmode_static; /* static enabled options for driver/hardware */
int restart_ms;
struct delayed_work restart_work;
int (*do_set_bittiming)(struct net_device *dev);
- int (*do_set_data_bittiming)(struct net_device *dev);
int (*do_set_mode)(struct net_device *dev, enum can_mode mode);
int (*do_set_termination)(struct net_device *dev, u16 term);
int (*do_get_state)(const struct net_device *dev,
enum can_state *state);
int (*do_get_berr_counter)(const struct net_device *dev,
struct can_berr_counter *bec);
-
- unsigned int echo_skb_max;
- struct sk_buff **echo_skb;
-
-#ifdef CONFIG_CAN_LEDS
- struct led_trigger *tx_led_trig;
- char tx_led_trig_name[CAN_LED_NAME_SZ];
- struct led_trigger *rx_led_trig;
- char rx_led_trig_name[CAN_LED_NAME_SZ];
- struct led_trigger *rxtx_led_trig;
- char rxtx_led_trig_name[CAN_LED_NAME_SZ];
-#endif
};
+static inline bool can_fd_tdc_is_enabled(const struct can_priv *priv)
+{
+ return !!(priv->ctrlmode & CAN_CTRLMODE_FD_TDC_MASK);
+}
-/* helper to define static CAN controller features at device creation time */
-static inline void can_set_static_ctrlmode(struct net_device *dev,
- u32 static_mode)
+static inline bool can_xl_tdc_is_enabled(const struct can_priv *priv)
{
- struct can_priv *priv = netdev_priv(dev);
+ return !!(priv->ctrlmode & CAN_CTRLMODE_XL_TDC_MASK);
+}
- /* alloc_candev() succeeded => netdev_priv() is valid at this point */
- priv->ctrlmode = static_mode;
- priv->ctrlmode_static = static_mode;
+static inline u32 can_get_static_ctrlmode(struct can_priv *priv)
+{
+ return priv->ctrlmode & ~priv->ctrlmode_supported;
+}
- /* override MTU which was set by default in can_setup()? */
- if (static_mode & CAN_CTRLMODE_FD)
- dev->mtu = CANFD_MTU;
+static inline bool can_is_canxl_dev_mtu(unsigned int mtu)
+{
+ return (mtu >= CANXL_MIN_MTU && mtu <= CANXL_MAX_MTU);
}
void can_setup(struct net_device *dev);
@@ -115,11 +111,27 @@ struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
void free_candev(struct net_device *dev);
/* a candev safe wrapper around netdev_priv */
+#if IS_ENABLED(CONFIG_CAN_NETLINK)
struct can_priv *safe_candev_priv(struct net_device *dev);
+#else
+static inline struct can_priv *safe_candev_priv(struct net_device *dev)
+{
+ return NULL;
+}
+#endif
int open_candev(struct net_device *dev);
void close_candev(struct net_device *dev);
-int can_change_mtu(struct net_device *dev, int new_mtu);
+void can_set_default_mtu(struct net_device *dev);
+int __must_check can_set_static_ctrlmode(struct net_device *dev,
+ u32 static_mode);
+int can_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *cfg);
+int can_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack);
+int can_ethtool_op_get_ts_info_hwts(struct net_device *dev,
+ struct kernel_ethtool_ts_info *info);
int register_candev(struct net_device *dev);
void unregister_candev(struct net_device *dev);
@@ -128,6 +140,57 @@ int can_restart_now(struct net_device *dev);
void can_bus_off(struct net_device *dev);
const char *can_get_state_str(const enum can_state state);
+const char *can_get_ctrlmode_str(u32 ctrlmode);
+
+static inline bool can_dev_in_xl_only_mode(struct can_priv *priv)
+{
+ const u32 mixed_mode = CAN_CTRLMODE_FD | CAN_CTRLMODE_XL;
+
+ /* When CAN XL is enabled but FD is disabled we are running in
+ * the so-called 'CANXL-only mode' where the error signalling is
+ * disabled. This helper function determines the required value
+ * to disable error signalling in the CAN XL controller.
+ * The so-called CC/FD/XL 'mixed mode' requires error signalling.
+ */
+ return ((priv->ctrlmode & mixed_mode) == CAN_CTRLMODE_XL);
+}
+
+/* drop skb if it does not contain a valid CAN frame for sending */
+static inline bool can_dev_dropped_skb(struct net_device *dev, struct sk_buff *skb)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ u32 silent_mode = priv->ctrlmode & (CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_RESTRICTED);
+
+ if (silent_mode) {
+ netdev_info_once(dev, "interface in %s mode, dropping skb\n",
+ can_get_ctrlmode_str(silent_mode));
+ goto invalid_skb;
+ }
+
+ if (!(priv->ctrlmode & CAN_CTRLMODE_FD) && can_is_canfd_skb(skb)) {
+ netdev_info_once(dev, "CAN FD is disabled, dropping skb\n");
+ goto invalid_skb;
+ }
+
+ if (can_dev_in_xl_only_mode(priv) && !can_is_canxl_skb(skb)) {
+ netdev_info_once(dev,
+ "Error signaling is disabled, dropping skb\n");
+ goto invalid_skb;
+ }
+
+ return can_dropped_invalid_skb(dev, skb);
+
+invalid_skb:
+ kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return true;
+}
+
+void can_state_get_by_berr_counter(const struct net_device *dev,
+ const struct can_berr_counter *bec,
+ enum can_state *tx_state,
+ enum can_state *rx_state);
void can_change_state(struct net_device *dev, struct can_frame *cf,
enum can_state tx_state, enum can_state rx_state);
diff --git a/include/linux/can/dev/peak_canfd.h b/include/linux/can/dev/peak_canfd.h
index f38772fd0c07..d3788a3d0942 100644
--- a/include/linux/can/dev/peak_canfd.h
+++ b/include/linux/can/dev/peak_canfd.h
@@ -2,8 +2,8 @@
/*
* CAN driver for PEAK System micro-CAN based adapters
*
- * Copyright (C) 2003-2011 PEAK System-Technik GmbH
- * Copyright (C) 2011-2013 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2003-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#ifndef PUCAN_H
#define PUCAN_H
diff --git a/include/linux/can/led.h b/include/linux/can/led.h
deleted file mode 100644
index 7c3cfd798c56..000000000000
--- a/include/linux/can/led.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2012, Fabio Baltieri <fabio.baltieri@gmail.com>
- */
-
-#ifndef _CAN_LED_H
-#define _CAN_LED_H
-
-#include <linux/if.h>
-#include <linux/leds.h>
-#include <linux/netdevice.h>
-
-enum can_led_event {
- CAN_LED_EVENT_OPEN,
- CAN_LED_EVENT_STOP,
- CAN_LED_EVENT_TX,
- CAN_LED_EVENT_RX,
-};
-
-#ifdef CONFIG_CAN_LEDS
-
-/* keep space for interface name + "-tx"/"-rx"/"-rxtx"
- * suffix and null terminator
- */
-#define CAN_LED_NAME_SZ (IFNAMSIZ + 6)
-
-void can_led_event(struct net_device *netdev, enum can_led_event event);
-void devm_can_led_init(struct net_device *netdev);
-int __init can_led_notifier_init(void);
-void __exit can_led_notifier_exit(void);
-
-#else
-
-static inline void can_led_event(struct net_device *netdev,
- enum can_led_event event)
-{
-}
-static inline void devm_can_led_init(struct net_device *netdev)
-{
-}
-static inline int can_led_notifier_init(void)
-{
- return 0;
-}
-static inline void can_led_notifier_exit(void)
-{
-}
-
-#endif
-
-#endif /* !_CAN_LED_H */
diff --git a/include/linux/can/length.h b/include/linux/can/length.h
index 6995092b774e..abc978b38f79 100644
--- a/include/linux/can/length.h
+++ b/include/linux/can/length.h
@@ -1,126 +1,258 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2020 Oliver Hartkopp <socketcan@hartkopp.net>
* Copyright (C) 2020 Marc Kleine-Budde <kernel@pengutronix.de>
+ * Copyright (C) 2020, 2023 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
*/
#ifndef _CAN_LENGTH_H
#define _CAN_LENGTH_H
+#include <linux/bits.h>
+#include <linux/can.h>
+#include <linux/can/netlink.h>
+#include <linux/math.h>
+
/*
- * Size of a Classical CAN Standard Frame
+ * Size of a Classical CAN Standard Frame header in bits
*
- * Name of Field Bits
+ * Name of Field Bits
* ---------------------------------------------------------
- * Start-of-frame 1
- * Identifier 11
- * Remote transmission request (RTR) 1
- * Identifier extension bit (IDE) 1
- * Reserved bit (r0) 1
- * Data length code (DLC) 4
- * Data field 0...64
- * CRC 15
- * CRC delimiter 1
- * ACK slot 1
- * ACK delimiter 1
- * End-of-frame (EOF) 7
- * Inter frame spacing 3
+ * Start Of Frame (SOF) 1
+ * Arbitration field:
+ * base ID 11
+ * Remote Transmission Request (RTR) 1
+ * Control field:
+ * IDentifier Extension bit (IDE) 1
+ * FD Format indicator (FDF) 1
+ * Data Length Code (DLC) 4
+ *
+ * including all fields preceding the data field, ignoring bitstuffing
+ */
+#define CAN_FRAME_HEADER_SFF_BITS 19
+
+/*
+ * Size of a Classical CAN Extended Frame header in bits
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * Start Of Frame (SOF) 1
+ * Arbitration field:
+ * base ID 11
+ * Substitute Remote Request (SRR) 1
+ * IDentifier Extension bit (IDE) 1
+ * ID extension 18
+ * Remote Transmission Request (RTR) 1
+ * Control field:
+ * FD Format indicator (FDF) 1
+ * Reserved bit (r0) 1
+ * Data length code (DLC) 4
+ *
+ * including all fields preceding the data field, ignoring bitstuffing
+ */
+#define CAN_FRAME_HEADER_EFF_BITS 39
+
+/*
+ * Size of a CAN-FD Standard Frame in bits
*
- * rounded up and ignoring bitstuffing
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * Start Of Frame (SOF) 1
+ * Arbitration field:
+ * base ID 11
+ * Remote Request Substitution (RRS) 1
+ * Control field:
+ * IDentifier Extension bit (IDE) 1
+ * FD Format indicator (FDF) 1
+ * Reserved bit (res) 1
+ * Bit Rate Switch (BRS) 1
+ * Error Status Indicator (ESI) 1
+ * Data length code (DLC) 4
+ *
+ * including all fields preceding the data field, ignoring bitstuffing
+ */
+#define CANFD_FRAME_HEADER_SFF_BITS 22
+
+/*
+ * Size of a CAN-FD Extended Frame in bits
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * Start Of Frame (SOF) 1
+ * Arbitration field:
+ * base ID 11
+ * Substitute Remote Request (SRR) 1
+ * IDentifier Extension bit (IDE) 1
+ * ID extension 18
+ * Remote Request Substitution (RRS) 1
+ * Control field:
+ * FD Format indicator (FDF) 1
+ * Reserved bit (res) 1
+ * Bit Rate Switch (BRS) 1
+ * Error Status Indicator (ESI) 1
+ * Data length code (DLC) 4
+ *
+ * including all fields preceding the data field, ignoring bitstuffing
*/
-#define CAN_FRAME_OVERHEAD_SFF DIV_ROUND_UP(47, 8)
+#define CANFD_FRAME_HEADER_EFF_BITS 41
/*
- * Size of a Classical CAN Extended Frame
+ * Size of a CAN CRC Field in bits
*
* Name of Field Bits
* ---------------------------------------------------------
- * Start-of-frame 1
- * Identifier A 11
- * Substitute remote request (SRR) 1
- * Identifier extension bit (IDE) 1
- * Identifier B 18
- * Remote transmission request (RTR) 1
- * Reserved bits (r1, r0) 2
- * Data length code (DLC) 4
- * Data field 0...64
- * CRC 15
- * CRC delimiter 1
- * ACK slot 1
- * ACK delimiter 1
- * End-of-frame (EOF) 7
- * Inter frame spacing 3
+ * CRC sequence (CRC15) 15
+ * CRC Delimiter 1
*
- * rounded up and ignoring bitstuffing
+ * ignoring bitstuffing
*/
-#define CAN_FRAME_OVERHEAD_EFF DIV_ROUND_UP(67, 8)
+#define CAN_FRAME_CRC_FIELD_BITS 16
/*
- * Size of a CAN-FD Standard Frame
+ * Size of a CAN-FD CRC17 Field in bits (length: 0..16)
*
* Name of Field Bits
* ---------------------------------------------------------
- * Start-of-frame 1
- * Identifier 11
- * Reserved bit (r1) 1
- * Identifier extension bit (IDE) 1
- * Flexible data rate format (FDF) 1
- * Reserved bit (r0) 1
- * Bit Rate Switch (BRS) 1
- * Error Status Indicator (ESI) 1
- * Data length code (DLC) 4
- * Data field 0...512
- * Stuff Bit Count (SBC) 0...16: 4 20...64:5
- * CRC 0...16: 17 20...64:21
- * CRC delimiter (CD) 1
- * ACK slot (AS) 1
- * ACK delimiter (AD) 1
- * End-of-frame (EOF) 7
- * Inter frame spacing 3
- *
- * assuming CRC21, rounded up and ignoring bitstuffing
- */
-#define CANFD_FRAME_OVERHEAD_SFF DIV_ROUND_UP(61, 8)
+ * Stuff Count 4
+ * CRC Sequence (CRC17) 17
+ * CRC Delimiter 1
+ * Fixed stuff bits 6
+ */
+#define CANFD_FRAME_CRC17_FIELD_BITS 28
/*
- * Size of a CAN-FD Extended Frame
+ * Size of a CAN-FD CRC21 Field in bits (length: 20..64)
*
* Name of Field Bits
* ---------------------------------------------------------
- * Start-of-frame 1
- * Identifier A 11
- * Substitute remote request (SRR) 1
- * Identifier extension bit (IDE) 1
- * Identifier B 18
- * Reserved bit (r1) 1
- * Flexible data rate format (FDF) 1
- * Reserved bit (r0) 1
- * Bit Rate Switch (BRS) 1
- * Error Status Indicator (ESI) 1
- * Data length code (DLC) 4
- * Data field 0...512
- * Stuff Bit Count (SBC) 0...16: 4 20...64:5
- * CRC 0...16: 17 20...64:21
- * CRC delimiter (CD) 1
- * ACK slot (AS) 1
- * ACK delimiter (AD) 1
- * End-of-frame (EOF) 7
- * Inter frame spacing 3
- *
- * assuming CRC21, rounded up and ignoring bitstuffing
- */
-#define CANFD_FRAME_OVERHEAD_EFF DIV_ROUND_UP(80, 8)
+ * Stuff Count 4
+ * CRC sequence (CRC21) 21
+ * CRC Delimiter 1
+ * Fixed stuff bits 7
+ */
+#define CANFD_FRAME_CRC21_FIELD_BITS 33
+
+/*
+ * Size of a CAN(-FD) Frame footer in bits
+ *
+ * Name of Field Bits
+ * ---------------------------------------------------------
+ * ACK slot 1
+ * ACK delimiter 1
+ * End Of Frame (EOF) 7
+ *
+ * including all fields following the CRC field
+ */
+#define CAN_FRAME_FOOTER_BITS 9
+
+/*
+ * First part of the Inter Frame Space
+ * (a.k.a. IMF - intermission field)
+ */
+#define CAN_INTERMISSION_BITS 3
+
+/**
+ * can_bitstuffing_len() - Calculate the maximum length with bitstuffing
+ * @destuffed_len: length of a destuffed bit stream
+ *
+ * The worst bit stuffing case is a sequence in which dominant and
+ * recessive bits alternate every four bits:
+ *
+ * Destuffed: 1 1111 0000 1111 0000 1111
+ * Stuffed: 1 1111o 0000i 1111o 0000i 1111o
+ *
+ * Nomenclature
+ *
+ * - "0": dominant bit
+ * - "o": dominant stuff bit
+ * - "1": recessive bit
+ * - "i": recessive stuff bit
+ *
+ * Aside from the first bit, one stuff bit is added every four bits.
+ *
+ * Return: length of the stuffed bit stream in the worst case scenario.
+ */
+#define can_bitstuffing_len(destuffed_len) \
+ (destuffed_len + (destuffed_len - 1) / 4)
+
+#define __can_bitstuffing_len(bitstuffing, destuffed_len) \
+ (bitstuffing ? can_bitstuffing_len(destuffed_len) : \
+ destuffed_len)
+
+#define __can_cc_frame_bits(is_eff, bitstuffing, \
+ intermission, data_len) \
+( \
+ __can_bitstuffing_len(bitstuffing, \
+ (is_eff ? CAN_FRAME_HEADER_EFF_BITS : \
+ CAN_FRAME_HEADER_SFF_BITS) + \
+ (data_len) * BITS_PER_BYTE + \
+ CAN_FRAME_CRC_FIELD_BITS) + \
+ CAN_FRAME_FOOTER_BITS + \
+ (intermission ? CAN_INTERMISSION_BITS : 0) \
+)
+
+#define __can_fd_frame_bits(is_eff, bitstuffing, \
+ intermission, data_len) \
+( \
+ __can_bitstuffing_len(bitstuffing, \
+ (is_eff ? CANFD_FRAME_HEADER_EFF_BITS : \
+ CANFD_FRAME_HEADER_SFF_BITS) + \
+ (data_len) * BITS_PER_BYTE) + \
+ ((data_len) <= 16 ? \
+ CANFD_FRAME_CRC17_FIELD_BITS : \
+ CANFD_FRAME_CRC21_FIELD_BITS) + \
+ CAN_FRAME_FOOTER_BITS + \
+ (intermission ? CAN_INTERMISSION_BITS : 0) \
+)
+
+/**
+ * can_frame_bits() - Calculate the number of bits on the wire in a
+ * CAN frame
+ * @is_fd: true: CAN-FD frame; false: Classical CAN frame.
+ * @is_eff: true: Extended frame; false: Standard frame.
+ * @bitstuffing: true: calculate the bitstuffing worst case; false:
+ * calculate the bitstuffing best case (no dynamic
+ * bitstuffing). CAN-FD's fixed stuff bits are always included.
+ * @intermission: if and only if true, include the inter frame space
+ * assuming no bus idle (i.e. only the intermission). Strictly
+ * speaking, the inter frame space is not part of the
+ * frame. However, it is needed when calculating the delay
+ * between the Start Of Frame of two consecutive frames.
+ * @data_len: length of the data field in bytes. Correspond to
+ * can(fd)_frame->len. Should be zero for remote frames. No
+ * sanitization is done on @data_len and it shall have no side
+ * effects.
+ *
+ * Return: the numbers of bits on the wire of a CAN frame.
+ */
+#define can_frame_bits(is_fd, is_eff, bitstuffing, \
+ intermission, data_len) \
+( \
+ is_fd ? __can_fd_frame_bits(is_eff, bitstuffing, \
+ intermission, data_len) : \
+ __can_cc_frame_bits(is_eff, bitstuffing, \
+ intermission, data_len) \
+)
+
+/*
+ * Number of bytes in a CAN frame
+ * (rounded up, including intermission)
+ */
+#define can_frame_bytes(is_fd, is_eff, bitstuffing, data_len) \
+ DIV_ROUND_UP(can_frame_bits(is_fd, is_eff, bitstuffing, \
+ true, data_len), \
+ BITS_PER_BYTE)
/*
* Maximum size of a Classical CAN frame
- * (rounded up and ignoring bitstuffing)
+ * (rounded up, ignoring bitstuffing but including intermission)
*/
-#define CAN_FRAME_LEN_MAX (CAN_FRAME_OVERHEAD_EFF + CAN_MAX_DLEN)
+#define CAN_FRAME_LEN_MAX can_frame_bytes(false, true, false, CAN_MAX_DLEN)
/*
* Maximum size of a CAN-FD frame
- * (rounded up and ignoring bitstuffing)
+ * (rounded up, ignoring dynamic bitstuffing but including intermission)
*/
-#define CANFD_FRAME_LEN_MAX (CANFD_FRAME_OVERHEAD_EFF + CANFD_MAX_DLEN)
+#define CANFD_FRAME_LEN_MAX can_frame_bytes(true, true, false, CANFD_MAX_DLEN)
/*
* can_cc_dlc2len(value) - convert a given data length code (dlc) of a
diff --git a/include/linux/can/platform/flexcan.h b/include/linux/can/platform/flexcan.h
new file mode 100644
index 000000000000..1b536fb999de
--- /dev/null
+++ b/include/linux/can/platform/flexcan.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Angelo Dureghello <angelo@kernel-space.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAN_PLATFORM_FLEXCAN_H
+#define _CAN_PLATFORM_FLEXCAN_H
+
+struct flexcan_platform_data {
+ u32 clock_frequency;
+ u8 clk_src;
+};
+
+#endif /* _CAN_PLATFORM_FLEXCAN_H */
diff --git a/include/linux/can/platform/sja1000.h b/include/linux/can/platform/sja1000.h
index 5755ae5a4712..6a869682c120 100644
--- a/include/linux/can/platform/sja1000.h
+++ b/include/linux/can/platform/sja1000.h
@@ -14,7 +14,7 @@
#define OCR_MODE_TEST 0x01
#define OCR_MODE_NORMAL 0x02
#define OCR_MODE_CLOCK 0x03
-#define OCR_MODE_MASK 0x07
+#define OCR_MODE_MASK 0x03
#define OCR_TX0_INVERT 0x04
#define OCR_TX0_PULLDOWN 0x08
#define OCR_TX0_PULLUP 0x10
diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h
index 40882df7105e..d29bb4521947 100644
--- a/include/linux/can/rx-offload.h
+++ b/include/linux/can/rx-offload.h
@@ -3,7 +3,7 @@
* linux/can/rx-offload.h
*
* Copyright (c) 2014 David Jander, Protonic Holland
- * Copyright (c) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
+ * Copyright (c) 2014-2017, 2023 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
*/
#ifndef _CAN_RX_OFFLOAD_H
@@ -20,6 +20,7 @@ struct can_rx_offload {
bool drop);
struct sk_buff_head skb_queue;
+ struct sk_buff_head skb_irq_queue;
u32 skb_queue_len_max;
unsigned int mb_first;
@@ -41,21 +42,21 @@ int can_rx_offload_add_manual(struct net_device *dev,
int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
u64 reg);
int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload);
-int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
- struct sk_buff *skb, u32 timestamp);
-unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
- unsigned int idx, u32 timestamp,
- unsigned int *frame_len_ptr);
+int can_rx_offload_queue_timestamp(struct can_rx_offload *offload,
+ struct sk_buff *skb, u32 timestamp);
+unsigned int can_rx_offload_get_echo_skb_queue_timestamp(struct can_rx_offload *offload,
+ unsigned int idx, u32 timestamp,
+ unsigned int *frame_len_ptr);
int can_rx_offload_queue_tail(struct can_rx_offload *offload,
struct sk_buff *skb);
+unsigned int can_rx_offload_get_echo_skb_queue_tail(struct can_rx_offload *offload,
+ unsigned int idx,
+ unsigned int *frame_len_ptr);
+void can_rx_offload_irq_finish(struct can_rx_offload *offload);
+void can_rx_offload_threaded_irq_finish(struct can_rx_offload *offload);
void can_rx_offload_del(struct can_rx_offload *offload);
void can_rx_offload_enable(struct can_rx_offload *offload);
-static inline void can_rx_offload_schedule(struct can_rx_offload *offload)
-{
- napi_schedule(&offload->napi);
-}
-
static inline void can_rx_offload_disable(struct can_rx_offload *offload)
{
napi_disable(&offload->napi);
diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
index d311bc369a39..1abc25a8d144 100644
--- a/include/linux/can/skb.h
+++ b/include/linux/can/skb.h
@@ -20,16 +20,22 @@ void can_flush_echo_skb(struct net_device *dev);
int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
unsigned int idx, unsigned int frame_len);
struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx,
- u8 *len_ptr, unsigned int *frame_len_ptr);
-unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx,
- unsigned int *frame_len_ptr);
+ unsigned int *len_ptr,
+ unsigned int *frame_len_ptr);
+unsigned int __must_check can_get_echo_skb(struct net_device *dev,
+ unsigned int idx,
+ unsigned int *frame_len_ptr);
void can_free_echo_skb(struct net_device *dev, unsigned int idx,
unsigned int *frame_len_ptr);
struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf);
struct sk_buff *alloc_canfd_skb(struct net_device *dev,
struct canfd_frame **cfd);
+struct sk_buff *alloc_canxl_skb(struct net_device *dev,
+ struct canxl_frame **cxl,
+ unsigned int data_len);
struct sk_buff *alloc_can_err_skb(struct net_device *dev,
struct can_frame **cf);
+bool can_dropped_invalid_skb(struct net_device *dev, struct sk_buff *skb);
/*
* The struct can_skb_priv is used to transport additional information along
@@ -95,68 +101,59 @@ static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb)
return nskb;
}
-/* Check for outgoing skbs that have not been created by the CAN subsystem */
-static inline bool can_skb_headroom_valid(struct net_device *dev,
- struct sk_buff *skb)
+static inline bool can_is_can_skb(const struct sk_buff *skb)
{
- /* af_packet creates a headroom of HH_DATA_MOD bytes which is fine */
- if (WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct can_skb_priv)))
- return false;
+ struct can_frame *cf = (struct can_frame *)skb->data;
- /* af_packet does not apply CAN skb specific settings */
- if (skb->ip_summed == CHECKSUM_NONE) {
- /* init headroom */
- can_skb_prv(skb)->ifindex = dev->ifindex;
- can_skb_prv(skb)->skbcnt = 0;
+ /* the CAN specific type of skb is identified by its data length */
+ return (skb->len == CAN_MTU && cf->len <= CAN_MAX_DLEN);
+}
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+static inline bool can_is_canfd_skb(const struct sk_buff *skb)
+{
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
- /* perform proper loopback on capable devices */
- if (dev->flags & IFF_ECHO)
- skb->pkt_type = PACKET_LOOPBACK;
- else
- skb->pkt_type = PACKET_HOST;
+ /* the CAN specific type of skb is identified by its data length */
+ return (skb->len == CANFD_MTU && cfd->len <= CANFD_MAX_DLEN);
+}
- skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
- }
+static inline bool can_is_canxl_skb(const struct sk_buff *skb)
+{
+ const struct canxl_frame *cxl = (struct canxl_frame *)skb->data;
- return true;
+ if (skb->len < CANXL_HDR_SIZE + CANXL_MIN_DLEN || skb->len > CANXL_MTU)
+ return false;
+
+ /* this also checks valid CAN XL data length boundaries */
+ if (skb->len != CANXL_HDR_SIZE + cxl->len)
+ return false;
+
+ return cxl->flags & CANXL_XLF;
}
-/* Drop a given socketbuffer if it does not contain a valid CAN frame. */
-static inline bool can_dropped_invalid_skb(struct net_device *dev,
- struct sk_buff *skb)
+/* get length element value from can[|fd|xl]_frame structure */
+static inline unsigned int can_skb_get_len_val(struct sk_buff *skb)
{
+ const struct canxl_frame *cxl = (struct canxl_frame *)skb->data;
const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
- if (skb->protocol == htons(ETH_P_CAN)) {
- if (unlikely(skb->len != CAN_MTU ||
- cfd->len > CAN_MAX_DLEN))
- goto inval_skb;
- } else if (skb->protocol == htons(ETH_P_CANFD)) {
- if (unlikely(skb->len != CANFD_MTU ||
- cfd->len > CANFD_MAX_DLEN))
- goto inval_skb;
- } else
- goto inval_skb;
-
- if (!can_skb_headroom_valid(dev, skb))
- goto inval_skb;
-
- return false;
-
-inval_skb:
- kfree_skb(skb);
- dev->stats.tx_dropped++;
- return true;
+ if (can_is_canxl_skb(skb))
+ return cxl->len;
+
+ return cfd->len;
}
-static inline bool can_is_canfd_skb(const struct sk_buff *skb)
+/* get needed data length inside CAN frame for all frame types (RTR aware) */
+static inline unsigned int can_skb_get_data_len(struct sk_buff *skb)
{
- /* the CAN specific type of skb is identified by its data length */
- return skb->len == CANFD_MTU;
+ unsigned int len = can_skb_get_len_val(skb);
+ const struct can_frame *cf = (struct can_frame *)skb->data;
+
+ /* RTR frames have an actual length of zero */
+ if (can_is_can_skb(skb) && cf->can_id & CAN_RTR_FLAG)
+ return 0;
+
+ return len;
}
#endif /* !_CAN_SKB_H */
diff --git a/include/linux/capability.h b/include/linux/capability.h
index 65efb74c3585..1fb08922552c 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -15,43 +15,31 @@
#include <uapi/linux/capability.h>
#include <linux/uidgid.h>
+#include <linux/bits.h>
#define _KERNEL_CAPABILITY_VERSION _LINUX_CAPABILITY_VERSION_3
-#define _KERNEL_CAPABILITY_U32S _LINUX_CAPABILITY_U32S_3
extern int file_caps_enabled;
-typedef struct kernel_cap_struct {
- __u32 cap[_KERNEL_CAPABILITY_U32S];
-} kernel_cap_t;
+typedef struct { u64 val; } kernel_cap_t;
/* same as vfs_ns_cap_data but in cpu endian and always filled completely */
struct cpu_vfs_cap_data {
__u32 magic_etc;
+ kuid_t rootid;
kernel_cap_t permitted;
kernel_cap_t inheritable;
- kuid_t rootid;
};
#define _USER_CAP_HEADER_SIZE (sizeof(struct __user_cap_header_struct))
#define _KERNEL_CAP_T_SIZE (sizeof(kernel_cap_t))
-
struct file;
struct inode;
struct dentry;
struct task_struct;
struct user_namespace;
-
-extern const kernel_cap_t __cap_empty_set;
-extern const kernel_cap_t __cap_init_eff_set;
-
-/*
- * Internal kernel functions only
- */
-
-#define CAP_FOR_EACH_U32(__capi) \
- for (__capi = 0; __capi < _KERNEL_CAPABILITY_U32S; ++__capi)
+struct mnt_idmap;
/*
* CAP_FS_MASK and CAP_NFSD_MASKS:
@@ -66,94 +54,52 @@ extern const kernel_cap_t __cap_init_eff_set;
* 2. The security.* and trusted.* xattrs are fs-related MAC permissions
*/
-# define CAP_FS_MASK_B0 (CAP_TO_MASK(CAP_CHOWN) \
- | CAP_TO_MASK(CAP_MKNOD) \
- | CAP_TO_MASK(CAP_DAC_OVERRIDE) \
- | CAP_TO_MASK(CAP_DAC_READ_SEARCH) \
- | CAP_TO_MASK(CAP_FOWNER) \
- | CAP_TO_MASK(CAP_FSETID))
-
-# define CAP_FS_MASK_B1 (CAP_TO_MASK(CAP_MAC_OVERRIDE))
-
-#if _KERNEL_CAPABILITY_U32S != 2
-# error Fix up hand-coded capability macro initializers
-#else /* HAND-CODED capability initializers */
+# define CAP_FS_MASK (BIT_ULL(CAP_CHOWN) \
+ | BIT_ULL(CAP_MKNOD) \
+ | BIT_ULL(CAP_DAC_OVERRIDE) \
+ | BIT_ULL(CAP_DAC_READ_SEARCH) \
+ | BIT_ULL(CAP_FOWNER) \
+ | BIT_ULL(CAP_FSETID) \
+ | BIT_ULL(CAP_MAC_OVERRIDE))
+#define CAP_VALID_MASK (BIT_ULL(CAP_LAST_CAP+1)-1)
-#define CAP_LAST_U32 ((_KERNEL_CAPABILITY_U32S) - 1)
-#define CAP_LAST_U32_VALID_MASK (CAP_TO_MASK(CAP_LAST_CAP + 1) -1)
+# define CAP_EMPTY_SET ((kernel_cap_t) { 0 })
+# define CAP_FULL_SET ((kernel_cap_t) { CAP_VALID_MASK })
+# define CAP_FS_SET ((kernel_cap_t) { CAP_FS_MASK | BIT_ULL(CAP_LINUX_IMMUTABLE) })
+# define CAP_NFSD_SET ((kernel_cap_t) { CAP_FS_MASK | BIT_ULL(CAP_SYS_RESOURCE) })
-# define CAP_EMPTY_SET ((kernel_cap_t){{ 0, 0 }})
-# define CAP_FULL_SET ((kernel_cap_t){{ ~0, CAP_LAST_U32_VALID_MASK }})
-# define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \
- | CAP_TO_MASK(CAP_LINUX_IMMUTABLE), \
- CAP_FS_MASK_B1 } })
-# define CAP_NFSD_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \
- | CAP_TO_MASK(CAP_SYS_RESOURCE), \
- CAP_FS_MASK_B1 } })
+# define cap_clear(c) do { (c).val = 0; } while (0)
-#endif /* _KERNEL_CAPABILITY_U32S != 2 */
-
-# define cap_clear(c) do { (c) = __cap_empty_set; } while (0)
-
-#define cap_raise(c, flag) ((c).cap[CAP_TO_INDEX(flag)] |= CAP_TO_MASK(flag))
-#define cap_lower(c, flag) ((c).cap[CAP_TO_INDEX(flag)] &= ~CAP_TO_MASK(flag))
-#define cap_raised(c, flag) ((c).cap[CAP_TO_INDEX(flag)] & CAP_TO_MASK(flag))
-
-#define CAP_BOP_ALL(c, a, b, OP) \
-do { \
- unsigned __capi; \
- CAP_FOR_EACH_U32(__capi) { \
- c.cap[__capi] = a.cap[__capi] OP b.cap[__capi]; \
- } \
-} while (0)
-
-#define CAP_UOP_ALL(c, a, OP) \
-do { \
- unsigned __capi; \
- CAP_FOR_EACH_U32(__capi) { \
- c.cap[__capi] = OP a.cap[__capi]; \
- } \
-} while (0)
+#define cap_raise(c, flag) ((c).val |= BIT_ULL(flag))
+#define cap_lower(c, flag) ((c).val &= ~BIT_ULL(flag))
+#define cap_raised(c, flag) (((c).val & BIT_ULL(flag)) != 0)
static inline kernel_cap_t cap_combine(const kernel_cap_t a,
const kernel_cap_t b)
{
- kernel_cap_t dest;
- CAP_BOP_ALL(dest, a, b, |);
- return dest;
+ return (kernel_cap_t) { a.val | b.val };
}
static inline kernel_cap_t cap_intersect(const kernel_cap_t a,
const kernel_cap_t b)
{
- kernel_cap_t dest;
- CAP_BOP_ALL(dest, a, b, &);
- return dest;
+ return (kernel_cap_t) { a.val & b.val };
}
static inline kernel_cap_t cap_drop(const kernel_cap_t a,
const kernel_cap_t drop)
{
- kernel_cap_t dest;
- CAP_BOP_ALL(dest, a, drop, &~);
- return dest;
+ return (kernel_cap_t) { a.val &~ drop.val };
}
-static inline kernel_cap_t cap_invert(const kernel_cap_t c)
+static inline bool cap_isclear(const kernel_cap_t a)
{
- kernel_cap_t dest;
- CAP_UOP_ALL(dest, c, ~);
- return dest;
+ return !a.val;
}
-static inline bool cap_isclear(const kernel_cap_t a)
+static inline bool cap_isidentical(const kernel_cap_t a, const kernel_cap_t b)
{
- unsigned __capi;
- CAP_FOR_EACH_U32(__capi) {
- if (a.cap[__capi] != 0)
- return false;
- }
- return true;
+ return a.val == b.val;
}
/*
@@ -165,43 +111,34 @@ static inline bool cap_isclear(const kernel_cap_t a)
*/
static inline bool cap_issubset(const kernel_cap_t a, const kernel_cap_t set)
{
- kernel_cap_t dest;
- dest = cap_drop(a, set);
- return cap_isclear(dest);
+ return !(a.val & ~set.val);
}
/* Used to decide between falling back on the old suser() or fsuser(). */
static inline kernel_cap_t cap_drop_fs_set(const kernel_cap_t a)
{
- const kernel_cap_t __cap_fs_set = CAP_FS_SET;
- return cap_drop(a, __cap_fs_set);
+ return cap_drop(a, CAP_FS_SET);
}
static inline kernel_cap_t cap_raise_fs_set(const kernel_cap_t a,
const kernel_cap_t permitted)
{
- const kernel_cap_t __cap_fs_set = CAP_FS_SET;
- return cap_combine(a,
- cap_intersect(permitted, __cap_fs_set));
+ return cap_combine(a, cap_intersect(permitted, CAP_FS_SET));
}
static inline kernel_cap_t cap_drop_nfsd_set(const kernel_cap_t a)
{
- const kernel_cap_t __cap_fs_set = CAP_NFSD_SET;
- return cap_drop(a, __cap_fs_set);
+ return cap_drop(a, CAP_NFSD_SET);
}
static inline kernel_cap_t cap_raise_nfsd_set(const kernel_cap_t a,
const kernel_cap_t permitted)
{
- const kernel_cap_t __cap_nfsd_set = CAP_NFSD_SET;
- return cap_combine(a,
- cap_intersect(permitted, __cap_nfsd_set));
+ return cap_combine(a, cap_intersect(permitted, CAP_NFSD_SET));
}
#ifdef CONFIG_MULTIUSER
-extern bool has_capability(struct task_struct *t, int cap);
extern bool has_ns_capability(struct task_struct *t,
struct user_namespace *ns, int cap);
extern bool has_capability_noaudit(struct task_struct *t, int cap);
@@ -212,10 +149,6 @@ extern bool ns_capable(struct user_namespace *ns, int cap);
extern bool ns_capable_noaudit(struct user_namespace *ns, int cap);
extern bool ns_capable_setid(struct user_namespace *ns, int cap);
#else
-static inline bool has_capability(struct task_struct *t, int cap)
-{
- return true;
-}
static inline bool has_ns_capability(struct task_struct *t,
struct user_namespace *ns, int cap)
{
@@ -248,9 +181,9 @@ static inline bool ns_capable_setid(struct user_namespace *ns, int cap)
}
#endif /* CONFIG_MULTIUSER */
bool privileged_wrt_inode_uidgid(struct user_namespace *ns,
- struct user_namespace *mnt_userns,
+ struct mnt_idmap *idmap,
const struct inode *inode);
-bool capable_wrt_inode_uidgid(struct user_namespace *mnt_userns,
+bool capable_wrt_inode_uidgid(struct mnt_idmap *idmap,
const struct inode *inode, int cap);
extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
extern bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns);
@@ -271,11 +204,11 @@ static inline bool checkpoint_restore_ns_capable(struct user_namespace *ns)
}
/* audit system wants to get cap info from files as well */
-int get_vfs_caps_from_disk(struct user_namespace *mnt_userns,
+int get_vfs_caps_from_disk(struct mnt_idmap *idmap,
const struct dentry *dentry,
struct cpu_vfs_cap_data *cpu_caps);
-int cap_convert_nscap(struct user_namespace *mnt_userns, struct dentry *dentry,
+int cap_convert_nscap(struct mnt_idmap *idmap, struct dentry *dentry,
const void **ivalue, size_t size);
#endif /* !_LINUX_CAPABILITY_H */
diff --git a/include/linux/cc_platform.h b/include/linux/cc_platform.h
new file mode 100644
index 000000000000..559353ad64ac
--- /dev/null
+++ b/include/linux/cc_platform.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Confidential Computing Platform Capability checks
+ *
+ * Copyright (C) 2021 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ */
+
+#ifndef _LINUX_CC_PLATFORM_H
+#define _LINUX_CC_PLATFORM_H
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+
+/**
+ * enum cc_attr - Confidential computing attributes
+ *
+ * These attributes represent confidential computing features that are
+ * currently active.
+ */
+enum cc_attr {
+ /**
+ * @CC_ATTR_MEM_ENCRYPT: Memory encryption is active
+ *
+ * The platform/OS is running with active memory encryption. This
+ * includes running either as a bare-metal system or a hypervisor
+ * and actively using memory encryption or as a guest/virtual machine
+ * and actively using memory encryption.
+ *
+ * Examples include SME, SEV and SEV-ES.
+ */
+ CC_ATTR_MEM_ENCRYPT,
+
+ /**
+ * @CC_ATTR_HOST_MEM_ENCRYPT: Host memory encryption is active
+ *
+ * The platform/OS is running as a bare-metal system or a hypervisor
+ * and actively using memory encryption.
+ *
+ * Examples include SME.
+ */
+ CC_ATTR_HOST_MEM_ENCRYPT,
+
+ /**
+ * @CC_ATTR_GUEST_MEM_ENCRYPT: Guest memory encryption is active
+ *
+ * The platform/OS is running as a guest/virtual machine and actively
+ * using memory encryption.
+ *
+ * Examples include SEV and SEV-ES.
+ */
+ CC_ATTR_GUEST_MEM_ENCRYPT,
+
+ /**
+ * @CC_ATTR_GUEST_STATE_ENCRYPT: Guest state encryption is active
+ *
+ * The platform/OS is running as a guest/virtual machine and actively
+ * using memory encryption and register state encryption.
+ *
+ * Examples include SEV-ES.
+ */
+ CC_ATTR_GUEST_STATE_ENCRYPT,
+
+ /**
+ * @CC_ATTR_GUEST_UNROLL_STRING_IO: String I/O is implemented with
+ * IN/OUT instructions
+ *
+ * The platform/OS is running as a guest/virtual machine and uses
+ * IN/OUT instructions in place of string I/O.
+ *
+ * Examples include TDX guest & SEV.
+ */
+ CC_ATTR_GUEST_UNROLL_STRING_IO,
+
+ /**
+ * @CC_ATTR_GUEST_SEV_SNP: Guest SNP is active.
+ *
+ * The platform/OS is running as a guest/virtual machine and actively
+ * using AMD SEV-SNP features.
+ */
+ CC_ATTR_GUEST_SEV_SNP,
+
+ /**
+ * @CC_ATTR_GUEST_SNP_SECURE_TSC: SNP Secure TSC is active.
+ *
+ * The platform/OS is running as a guest/virtual machine and actively
+ * using AMD SEV-SNP Secure TSC feature.
+ */
+ CC_ATTR_GUEST_SNP_SECURE_TSC,
+
+ /**
+ * @CC_ATTR_HOST_SEV_SNP: AMD SNP enabled on the host.
+ *
+ * The host kernel is running with the necessary features
+ * enabled to run SEV-SNP guests.
+ */
+ CC_ATTR_HOST_SEV_SNP,
+
+ /**
+ * @CC_ATTR_SNP_SECURE_AVIC: Secure AVIC mode is active.
+ *
+ * The host kernel is running with the necessary features enabled
+ * to run SEV-SNP guests with full Secure AVIC capabilities.
+ */
+ CC_ATTR_SNP_SECURE_AVIC,
+};
+
+#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
+
+/**
+ * cc_platform_has() - Checks if the specified cc_attr attribute is active
+ * @attr: Confidential computing attribute to check
+ *
+ * The cc_platform_has() function will return an indicator as to whether the
+ * specified Confidential Computing attribute is currently active.
+ *
+ * Context: Any context
+ * Return:
+ * * TRUE - Specified Confidential Computing attribute is active
+ * * FALSE - Specified Confidential Computing attribute is not active
+ */
+bool cc_platform_has(enum cc_attr attr);
+void cc_platform_set(enum cc_attr attr);
+void cc_platform_clear(enum cc_attr attr);
+
+#else /* !CONFIG_ARCH_HAS_CC_PLATFORM */
+
+static inline bool cc_platform_has(enum cc_attr attr) { return false; }
+static inline void cc_platform_set(enum cc_attr attr) { }
+static inline void cc_platform_clear(enum cc_attr attr) { }
+
+#endif /* CONFIG_ARCH_HAS_CC_PLATFORM */
+
+#endif /* _LINUX_CC_PLATFORM_H */
diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
index f48d0a31deae..b907e6c2307d 100644
--- a/include/linux/cdrom.h
+++ b/include/linux/cdrom.h
@@ -13,6 +13,7 @@
#include <linux/fs.h> /* not really needed, later.. */
#include <linux/list.h>
+#include <linux/blkdev.h>
#include <scsi/scsi_common.h>
#include <uapi/linux/cdrom.h>
@@ -61,9 +62,9 @@ struct cdrom_device_info {
__u8 last_sense;
__u8 media_written; /* dirty flag, DVD+RW bookkeeping */
unsigned short mmc3_profile; /* current MMC3 profile */
- int for_data;
- int (*exit)(struct cdrom_device_info *);
int mrw_mode_page;
+ bool opened_for_data;
+ __s64 last_media_change_ms;
};
struct cdrom_device_ops {
@@ -75,8 +76,7 @@ struct cdrom_device_ops {
unsigned int clearing, int slot);
int (*tray_move) (struct cdrom_device_info *, int);
int (*lock_door) (struct cdrom_device_info *, int);
- int (*select_speed) (struct cdrom_device_info *, int);
- int (*select_disc) (struct cdrom_device_info *, int);
+ int (*select_speed) (struct cdrom_device_info *, unsigned long);
int (*get_last_session) (struct cdrom_device_info *,
struct cdrom_multisession *);
int (*get_mcn) (struct cdrom_device_info *,
@@ -86,11 +86,13 @@ struct cdrom_device_ops {
/* play stuff */
int (*audio_ioctl) (struct cdrom_device_info *,unsigned int, void *);
-/* driver specifications */
- const int capability; /* capability flags */
/* handle uniform packets for scsi type devices (scsi,atapi) */
int (*generic_packet) (struct cdrom_device_info *,
struct packet_command *);
+ int (*read_cdda_bpc)(struct cdrom_device_info *cdi, void __user *ubuf,
+ u32 lba, u32 nframes, u8 *last_sense);
+/* driver specifications */
+ const int capability; /* capability flags */
};
int cdrom_multisession(struct cdrom_device_info *cdi,
@@ -99,11 +101,10 @@ int cdrom_read_tocentry(struct cdrom_device_info *cdi,
struct cdrom_tocentry *entry);
/* the general block_device operations structure: */
-extern int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
- fmode_t mode);
-extern void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode);
-extern int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
- fmode_t mode, unsigned int cmd, unsigned long arg);
+int cdrom_open(struct cdrom_device_info *cdi, blk_mode_t mode);
+void cdrom_release(struct cdrom_device_info *cdi);
+int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
+ unsigned int cmd, unsigned long arg);
extern unsigned int cdrom_check_events(struct cdrom_device_info *cdi,
unsigned int clearing);
diff --git a/include/linux/cdx/bitfield.h b/include/linux/cdx/bitfield.h
new file mode 100644
index 000000000000..567f8ec47582
--- /dev/null
+++ b/include/linux/cdx/bitfield.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#ifndef CDX_BITFIELD_H
+#define CDX_BITFIELD_H
+
+#include <linux/bitfield.h>
+
+/* Lowest bit numbers and widths */
+#define CDX_DWORD_LBN 0
+#define CDX_DWORD_WIDTH 32
+
+/* Specified attribute (e.g. LBN) of the specified field */
+#define CDX_VAL(field, attribute) field ## _ ## attribute
+/* Low bit number of the specified field */
+#define CDX_LOW_BIT(field) CDX_VAL(field, LBN)
+/* Bit width of the specified field */
+#define CDX_WIDTH(field) CDX_VAL(field, WIDTH)
+/* High bit number of the specified field */
+#define CDX_HIGH_BIT(field) (CDX_LOW_BIT(field) + CDX_WIDTH(field) - 1)
+
+/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */
+struct cdx_dword {
+ __le32 cdx_u32;
+};
+
+/* Value expanders for printk */
+#define CDX_DWORD_VAL(dword) \
+ ((unsigned int)le32_to_cpu((dword).cdx_u32))
+
+/*
+ * Extract bit field portion [low,high) from the 32-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define CDX_DWORD_FIELD(dword, field) \
+ (FIELD_GET(GENMASK(CDX_HIGH_BIT(field), CDX_LOW_BIT(field)), \
+ le32_to_cpu((dword).cdx_u32)))
+
+/*
+ * Creates the portion of the named bit field that lies within the
+ * range [min,max).
+ */
+#define CDX_INSERT_FIELD(field, value) \
+ (FIELD_PREP(GENMASK(CDX_HIGH_BIT(field), \
+ CDX_LOW_BIT(field)), value))
+
+/*
+ * Creates the portion of the named bit fields that lie within the
+ * range [min,max).
+ */
+#define CDX_INSERT_FIELDS(field1, value1, \
+ field2, value2, \
+ field3, value3, \
+ field4, value4, \
+ field5, value5, \
+ field6, value6, \
+ field7, value7) \
+ (CDX_INSERT_FIELD(field1, (value1)) | \
+ CDX_INSERT_FIELD(field2, (value2)) | \
+ CDX_INSERT_FIELD(field3, (value3)) | \
+ CDX_INSERT_FIELD(field4, (value4)) | \
+ CDX_INSERT_FIELD(field5, (value5)) | \
+ CDX_INSERT_FIELD(field6, (value6)) | \
+ CDX_INSERT_FIELD(field7, (value7)))
+
+#define CDX_POPULATE_DWORD(dword, ...) \
+ (dword).cdx_u32 = cpu_to_le32(CDX_INSERT_FIELDS(__VA_ARGS__))
+
+/* Populate a dword field with various numbers of arguments */
+#define CDX_POPULATE_DWORD_7 CDX_POPULATE_DWORD
+#define CDX_POPULATE_DWORD_6(dword, ...) \
+ CDX_POPULATE_DWORD_7(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_POPULATE_DWORD_5(dword, ...) \
+ CDX_POPULATE_DWORD_6(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_POPULATE_DWORD_4(dword, ...) \
+ CDX_POPULATE_DWORD_5(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_POPULATE_DWORD_3(dword, ...) \
+ CDX_POPULATE_DWORD_4(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_POPULATE_DWORD_2(dword, ...) \
+ CDX_POPULATE_DWORD_3(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_POPULATE_DWORD_1(dword, ...) \
+ CDX_POPULATE_DWORD_2(dword, CDX_DWORD, 0, __VA_ARGS__)
+#define CDX_SET_DWORD(dword) \
+ CDX_POPULATE_DWORD_1(dword, CDX_DWORD, 0xffffffff)
+
+#endif /* CDX_BITFIELD_H */
diff --git a/include/linux/cdx/cdx_bus.h b/include/linux/cdx/cdx_bus.h
new file mode 100644
index 000000000000..b1ba97f6c9ad
--- /dev/null
+++ b/include/linux/cdx/cdx_bus.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * CDX bus public interface
+ *
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ *
+ */
+
+#ifndef _CDX_BUS_H_
+#define _CDX_BUS_H_
+
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/mod_devicetable.h>
+#include <linux/msi.h>
+
+#define MAX_CDX_DEV_RESOURCES 4
+#define CDX_CONTROLLER_ID_SHIFT 4
+#define CDX_BUS_NUM_MASK 0xF
+
+/* Forward declaration for CDX controller */
+struct cdx_controller;
+
+enum {
+ CDX_DEV_MSI_CONF,
+ CDX_DEV_BUS_MASTER_CONF,
+ CDX_DEV_RESET_CONF,
+ CDX_DEV_MSI_ENABLE,
+};
+
+struct cdx_msi_config {
+ u64 addr;
+ u32 data;
+ u16 msi_index;
+};
+
+struct cdx_device_config {
+ u8 type;
+ union {
+ struct cdx_msi_config msi;
+ bool bus_master_enable;
+ bool msi_enable;
+ };
+};
+
+typedef int (*cdx_bus_enable_cb)(struct cdx_controller *cdx, u8 bus_num);
+
+typedef int (*cdx_bus_disable_cb)(struct cdx_controller *cdx, u8 bus_num);
+
+typedef int (*cdx_scan_cb)(struct cdx_controller *cdx);
+
+typedef int (*cdx_dev_configure_cb)(struct cdx_controller *cdx,
+ u8 bus_num, u8 dev_num,
+ struct cdx_device_config *dev_config);
+
+/**
+ * CDX_DEVICE - macro used to describe a specific CDX device
+ * @vend: the 16 bit CDX Vendor ID
+ * @dev: the 16 bit CDX Device ID
+ *
+ * This macro is used to create a struct cdx_device_id that matches a
+ * specific device. The subvendor and subdevice fields will be set to
+ * CDX_ANY_ID.
+ */
+#define CDX_DEVICE(vend, dev) \
+ .vendor = (vend), .device = (dev), \
+ .subvendor = CDX_ANY_ID, .subdevice = CDX_ANY_ID
+
+/**
+ * CDX_DEVICE_DRIVER_OVERRIDE - macro used to describe a CDX device with
+ * override_only flags.
+ * @vend: the 16 bit CDX Vendor ID
+ * @dev: the 16 bit CDX Device ID
+ * @driver_override: the 32 bit CDX Device override_only
+ *
+ * This macro is used to create a struct cdx_device_id that matches only a
+ * driver_override device. The subvendor and subdevice fields will be set to
+ * CDX_ANY_ID.
+ */
+#define CDX_DEVICE_DRIVER_OVERRIDE(vend, dev, driver_override) \
+ .vendor = (vend), .device = (dev), .subvendor = CDX_ANY_ID,\
+ .subdevice = CDX_ANY_ID, .override_only = (driver_override)
+
+/**
+ * struct cdx_ops - Callbacks supported by CDX controller.
+ * @bus_enable: enable bus on the controller
+ * @bus_disable: disable bus on the controller
+ * @scan: scan the devices on the controller
+ * @dev_configure: configuration like reset, master_enable,
+ * msi_config etc for a CDX device
+ */
+struct cdx_ops {
+ cdx_bus_enable_cb bus_enable;
+ cdx_bus_disable_cb bus_disable;
+ cdx_scan_cb scan;
+ cdx_dev_configure_cb dev_configure;
+};
+
+/**
+ * struct cdx_controller: CDX controller object
+ * @dev: Linux device associated with the CDX controller.
+ * @priv: private data
+ * @msi_domain: MSI domain
+ * @id: Controller ID
+ * @controller_registered: controller registered with bus
+ * @ops: CDX controller ops
+ */
+struct cdx_controller {
+ struct device *dev;
+ void *priv;
+ struct irq_domain *msi_domain;
+ u32 id;
+ bool controller_registered;
+ struct cdx_ops *ops;
+};
+
+/**
+ * struct cdx_device - CDX device object
+ * @dev: Linux driver model device object
+ * @cdx: CDX controller associated with the device
+ * @vendor: Vendor ID for CDX device
+ * @device: Device ID for CDX device
+ * @subsystem_vendor: Subsystem Vendor ID for CDX device
+ * @subsystem_device: Subsystem Device ID for CDX device
+ * @class: Class for the CDX device
+ * @revision: Revision of the CDX device
+ * @bus_num: Bus number for this CDX device
+ * @dev_num: Device number for this device
+ * @res: array of MMIO region entries
+ * @res_attr: resource binary attribute
+ * @debugfs_dir: debugfs directory for this device
+ * @res_count: number of valid MMIO regions
+ * @dma_mask: Default DMA mask
+ * @flags: CDX device flags
+ * @req_id: Requestor ID associated with CDX device
+ * @is_bus: Is this bus device
+ * @enabled: is this bus enabled
+ * @msi_dev_id: MSI Device ID associated with CDX device
+ * @num_msi: Number of MSI's supported by the device
+ * @driver_override: driver name to force a match; do not set directly,
+ * because core frees it; use driver_set_override() to
+ * set or clear it.
+ * @irqchip_lock: lock to synchronize irq/msi configuration
+ * @msi_write_pending: MSI write pending for this device
+ */
+struct cdx_device {
+ struct device dev;
+ struct cdx_controller *cdx;
+ u16 vendor;
+ u16 device;
+ u16 subsystem_vendor;
+ u16 subsystem_device;
+ u32 class;
+ u8 revision;
+ u8 bus_num;
+ u8 dev_num;
+ struct resource res[MAX_CDX_DEV_RESOURCES];
+ struct bin_attribute *res_attr[MAX_CDX_DEV_RESOURCES];
+ struct dentry *debugfs_dir;
+ u8 res_count;
+ u64 dma_mask;
+ u16 flags;
+ u32 req_id;
+ bool is_bus;
+ bool enabled;
+ u32 msi_dev_id;
+ u32 num_msi;
+ const char *driver_override;
+ struct mutex irqchip_lock;
+ bool msi_write_pending;
+};
+
+#define to_cdx_device(_dev) \
+ container_of(_dev, struct cdx_device, dev)
+
+#define cdx_resource_start(dev, num) ((dev)->res[(num)].start)
+#define cdx_resource_end(dev, num) ((dev)->res[(num)].end)
+#define cdx_resource_flags(dev, num) ((dev)->res[(num)].flags)
+#define cdx_resource_len(dev, num) \
+ ((cdx_resource_start((dev), (num)) == 0 && \
+ cdx_resource_end((dev), (num)) == \
+ cdx_resource_start((dev), (num))) ? 0 : \
+ (cdx_resource_end((dev), (num)) - \
+ cdx_resource_start((dev), (num)) + 1))
+/**
+ * struct cdx_driver - CDX device driver
+ * @driver: Generic device driver
+ * @match_id_table: table of supported device matching Ids
+ * @probe: Function called when a device is added
+ * @remove: Function called when a device is removed
+ * @shutdown: Function called at shutdown time to quiesce the device
+ * @reset_prepare: Function called before is reset to notify driver
+ * @reset_done: Function called after reset is complete to notify driver
+ * @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA.
+ * For most device drivers, no need to care about this flag
+ * as long as all DMAs are handled through the kernel DMA API.
+ * For some special ones, for example VFIO drivers, they know
+ * how to manage the DMA themselves and set this flag so that
+ * the IOMMU layer will allow them to setup and manage their
+ * own I/O address space.
+ */
+struct cdx_driver {
+ struct device_driver driver;
+ const struct cdx_device_id *match_id_table;
+ int (*probe)(struct cdx_device *dev);
+ int (*remove)(struct cdx_device *dev);
+ void (*shutdown)(struct cdx_device *dev);
+ void (*reset_prepare)(struct cdx_device *dev);
+ void (*reset_done)(struct cdx_device *dev);
+ bool driver_managed_dma;
+};
+
+#define to_cdx_driver(_drv) \
+ container_of_const(_drv, struct cdx_driver, driver)
+
+/* Macro to avoid include chaining to get THIS_MODULE */
+#define cdx_driver_register(drv) \
+ __cdx_driver_register(drv, THIS_MODULE)
+
+/**
+ * __cdx_driver_register - registers a CDX device driver
+ * @cdx_driver: CDX driver to register
+ * @owner: module owner
+ *
+ * Return: -errno on failure, 0 on success.
+ */
+int __must_check __cdx_driver_register(struct cdx_driver *cdx_driver,
+ struct module *owner);
+
+/**
+ * cdx_driver_unregister - unregisters a device driver from the
+ * CDX bus.
+ * @cdx_driver: CDX driver to register
+ */
+void cdx_driver_unregister(struct cdx_driver *cdx_driver);
+
+extern const struct bus_type cdx_bus_type;
+
+/**
+ * cdx_dev_reset - Reset CDX device
+ * @dev: device pointer
+ *
+ * Return: 0 for success, -errno on failure
+ */
+int cdx_dev_reset(struct device *dev);
+
+/**
+ * cdx_set_master - enables bus-mastering for CDX device
+ * @cdx_dev: the CDX device to enable
+ *
+ * Return: 0 for success, -errno on failure
+ */
+int cdx_set_master(struct cdx_device *cdx_dev);
+
+/**
+ * cdx_clear_master - disables bus-mastering for CDX device
+ * @cdx_dev: the CDX device to disable
+ *
+ * Return: 0 for success, -errno on failure
+ */
+int cdx_clear_master(struct cdx_device *cdx_dev);
+
+#ifdef CONFIG_GENERIC_MSI_IRQ
+/**
+ * cdx_enable_msi - Enable MSI for the CDX device.
+ * @cdx_dev: device pointer
+ *
+ * Return: 0 for success, -errno on failure
+ */
+int cdx_enable_msi(struct cdx_device *cdx_dev);
+
+/**
+ * cdx_disable_msi - Disable MSI for the CDX device.
+ * @cdx_dev: device pointer
+ */
+void cdx_disable_msi(struct cdx_device *cdx_dev);
+
+#else /* CONFIG_GENERIC_MSI_IRQ */
+
+static inline int cdx_enable_msi(struct cdx_device *cdx_dev)
+{
+ return -ENODEV;
+}
+
+static inline void cdx_disable_msi(struct cdx_device *cdx_dev)
+{
+}
+
+#endif /* CONFIG_GENERIC_MSI_IRQ */
+
+#endif /* _CDX_BUS_H_ */
diff --git a/include/linux/cdx/edac_cdx_pcol.h b/include/linux/cdx/edac_cdx_pcol.h
new file mode 100644
index 000000000000..749db33bb482
--- /dev/null
+++ b/include/linux/cdx/edac_cdx_pcol.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Driver for AMD network controllers and boards
+ *
+ * Copyright (C) 2021, Xilinx, Inc.
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#ifndef MC_CDX_PCOL_H
+#define MC_CDX_PCOL_H
+#include <linux/cdx/mcdi.h>
+
+#define MC_CMD_EDAC_GET_DDR_CONFIG_OUT_WORD_LENGTH_LEN 4
+/* Number of registers for the DDR controller */
+#define MC_CMD_GET_DDR_CONFIG_OFST 4
+#define MC_CMD_GET_DDR_CONFIG_LEN 4
+
+/***********************************/
+/* MC_CMD_EDAC_GET_DDR_CONFIG
+ * Provides detailed configuration for the DDR controller of the given index.
+ */
+#define MC_CMD_EDAC_GET_DDR_CONFIG 0x3
+
+/* MC_CMD_EDAC_GET_DDR_CONFIG_IN msgrequest */
+#define MC_CMD_EDAC_GET_DDR_CONFIG_IN_CONTROLLER_INDEX_OFST 0
+#define MC_CMD_EDAC_GET_DDR_CONFIG_IN_CONTROLLER_INDEX_LEN 4
+
+#endif /* MC_CDX_PCOL_H */
diff --git a/include/linux/cdx/mcdi.h b/include/linux/cdx/mcdi.h
new file mode 100644
index 000000000000..74075305cba4
--- /dev/null
+++ b/include/linux/cdx/mcdi.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2008-2013 Solarflare Communications Inc.
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#ifndef CDX_MCDI_H
+#define CDX_MCDI_H
+
+#include <linux/mutex.h>
+#include <linux/kref.h>
+#include <linux/rpmsg.h>
+
+#include "linux/cdx/bitfield.h"
+
+/**
+ * enum cdx_mcdi_mode - MCDI transaction mode
+ * @MCDI_MODE_EVENTS: wait for an mcdi response callback.
+ * @MCDI_MODE_FAIL: we think MCDI is dead, so fail-fast all calls
+ */
+enum cdx_mcdi_mode {
+ MCDI_MODE_EVENTS,
+ MCDI_MODE_FAIL,
+};
+
+#define MCDI_RPC_TIMEOUT (10 * HZ)
+#define MCDI_RPC_LONG_TIMEOU (60 * HZ)
+#define MCDI_RPC_POST_RST_TIME (10 * HZ)
+
+/**
+ * enum cdx_mcdi_cmd_state - State for an individual MCDI command
+ * @MCDI_STATE_QUEUED: Command not started and is waiting to run.
+ * @MCDI_STATE_RETRY: Command was submitted and MC rejected with no resources,
+ * as MC have too many outstanding commands. Command will be retried once
+ * another command returns.
+ * @MCDI_STATE_RUNNING: Command was accepted and is running.
+ * @MCDI_STATE_RUNNING_CANCELLED: Command is running but the issuer cancelled
+ * the command.
+ * @MCDI_STATE_FINISHED: Processing of this command has completed.
+ */
+
+enum cdx_mcdi_cmd_state {
+ MCDI_STATE_QUEUED,
+ MCDI_STATE_RETRY,
+ MCDI_STATE_RUNNING,
+ MCDI_STATE_RUNNING_CANCELLED,
+ MCDI_STATE_FINISHED,
+};
+
+/**
+ * struct cdx_mcdi - CDX MCDI Firmware interface, to interact
+ * with CDX controller.
+ * @mcdi: MCDI interface
+ * @mcdi_ops: MCDI operations
+ * @r5_rproc : R5 Remoteproc device handle
+ * @rpdev: RPMsg device
+ * @ept: RPMsg endpoint
+ * @work: Post probe work
+ */
+struct cdx_mcdi {
+ /* MCDI interface */
+ struct cdx_mcdi_data *mcdi;
+ const struct cdx_mcdi_ops *mcdi_ops;
+
+ struct rproc *r5_rproc;
+ struct rpmsg_device *rpdev;
+ struct rpmsg_endpoint *ept;
+ struct work_struct work;
+};
+
+struct cdx_mcdi_ops {
+ void (*mcdi_request)(struct cdx_mcdi *cdx,
+ const struct cdx_dword *hdr, size_t hdr_len,
+ const struct cdx_dword *sdu, size_t sdu_len);
+ unsigned int (*mcdi_rpc_timeout)(struct cdx_mcdi *cdx, unsigned int cmd);
+};
+
+typedef void cdx_mcdi_async_completer(struct cdx_mcdi *cdx,
+ unsigned long cookie, int rc,
+ struct cdx_dword *outbuf,
+ size_t outlen_actual);
+
+/**
+ * struct cdx_mcdi_cmd - An outstanding MCDI command
+ * @ref: Reference count. There will be one reference if the command is
+ * in the mcdi_iface cmd_list, another if it's on a cleanup list,
+ * and a third if it's queued in the work queue.
+ * @list: The data for this entry in mcdi->cmd_list
+ * @cleanup_list: The data for this entry in a cleanup list
+ * @work: The work item for this command, queued in mcdi->workqueue
+ * @mcdi: The mcdi_iface for this command
+ * @state: The state of this command
+ * @inlen: inbuf length
+ * @inbuf: Input buffer
+ * @quiet: Whether to silence errors
+ * @reboot_seen: Whether a reboot has been seen during this command,
+ * to prevent duplicates
+ * @seq: Sequence number
+ * @started: Jiffies this command was started at
+ * @cookie: Context for completion function
+ * @completer: Completion function
+ * @handle: Command handle
+ * @cmd: Command number
+ * @rc: Return code
+ * @outlen: Length of output buffer
+ * @outbuf: Output buffer
+ */
+struct cdx_mcdi_cmd {
+ struct kref ref;
+ struct list_head list;
+ struct list_head cleanup_list;
+ struct work_struct work;
+ struct cdx_mcdi_iface *mcdi;
+ enum cdx_mcdi_cmd_state state;
+ size_t inlen;
+ const struct cdx_dword *inbuf;
+ bool quiet;
+ bool reboot_seen;
+ u8 seq;
+ unsigned long started;
+ unsigned long cookie;
+ cdx_mcdi_async_completer *completer;
+ unsigned int handle;
+ unsigned int cmd;
+ int rc;
+ size_t outlen;
+ struct cdx_dword *outbuf;
+ /* followed by inbuf data if necessary */
+};
+
+/**
+ * struct cdx_mcdi_iface - MCDI protocol context
+ * @cdx: The associated NIC
+ * @iface_lock: Serialise access to this structure
+ * @outstanding_cleanups: Count of cleanups
+ * @cmd_list: List of outstanding and running commands
+ * @workqueue: Workqueue used for delayed processing
+ * @cmd_complete_wq: Waitqueue for command completion
+ * @db_held_by: Command the MC doorbell is in use by
+ * @seq_held_by: Command each sequence number is in use by
+ * @prev_handle: The last used command handle
+ * @mode: Poll for mcdi completion, or wait for an mcdi_event
+ * @prev_seq: The last used sequence number
+ * @new_epoch: Indicates start of day or start of MC reboot recovery
+ */
+struct cdx_mcdi_iface {
+ struct cdx_mcdi *cdx;
+ /* Serialise access */
+ struct mutex iface_lock;
+ unsigned int outstanding_cleanups;
+ struct list_head cmd_list;
+ struct workqueue_struct *workqueue;
+ wait_queue_head_t cmd_complete_wq;
+ struct cdx_mcdi_cmd *db_held_by;
+ struct cdx_mcdi_cmd *seq_held_by[16];
+ unsigned int prev_handle;
+ enum cdx_mcdi_mode mode;
+ u8 prev_seq;
+ bool new_epoch;
+};
+
+/**
+ * struct cdx_mcdi_data - extra state for NICs that implement MCDI
+ * @iface: Interface/protocol state
+ * @fn_flags: Flags for this function, as returned by %MC_CMD_DRV_ATTACH.
+ */
+struct cdx_mcdi_data {
+ struct cdx_mcdi_iface iface;
+ u32 fn_flags;
+};
+
+void cdx_mcdi_finish(struct cdx_mcdi *cdx);
+int cdx_mcdi_init(struct cdx_mcdi *cdx);
+void cdx_mcdi_process_cmd(struct cdx_mcdi *cdx, struct cdx_dword *outbuf, int len);
+int cdx_mcdi_rpc(struct cdx_mcdi *cdx, unsigned int cmd,
+ const struct cdx_dword *inbuf, size_t inlen,
+ struct cdx_dword *outbuf, size_t outlen, size_t *outlen_actual);
+
+/*
+ * We expect that 16- and 32-bit fields in MCDI requests and responses
+ * are appropriately aligned, but 64-bit fields are only
+ * 32-bit-aligned.
+ */
+#define MCDI_DECLARE_BUF(_name, _len) struct cdx_dword _name[DIV_ROUND_UP(_len, 4)] = {{0}}
+#define _MCDI_PTR(_buf, _offset) \
+ ((u8 *)(_buf) + (_offset))
+#define MCDI_PTR(_buf, _field) \
+ _MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST)
+#define _MCDI_CHECK_ALIGN(_ofst, _align) \
+ ((void)BUILD_BUG_ON_ZERO((_ofst) & ((_align) - 1)), \
+ (_ofst))
+#define _MCDI_DWORD(_buf, _field) \
+ ((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
+
+#define MCDI_SET_DWORD(_buf, _field, _value) \
+ CDX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), CDX_DWORD, _value)
+#define MCDI_DWORD(_buf, _field) \
+ CDX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), CDX_DWORD)
+#endif /* CDX_MCDI_H */
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h
index 71b5d481c653..6b138fa97db8 100644
--- a/include/linux/ceph/auth.h
+++ b/include/linux/ceph/auth.h
@@ -50,7 +50,7 @@ struct ceph_auth_client_ops {
* another request.
*/
int (*build_request)(struct ceph_auth_client *ac, void *buf, void *end);
- int (*handle_reply)(struct ceph_auth_client *ac, int result,
+ int (*handle_reply)(struct ceph_auth_client *ac, u64 global_id,
void *buf, void *end, u8 *session_key,
int *session_key_len, u8 *con_secret,
int *con_secret_len);
@@ -104,6 +104,8 @@ struct ceph_auth_client {
struct mutex mutex;
};
+void ceph_auth_set_global_id(struct ceph_auth_client *ac, u64 global_id);
+
struct ceph_auth_client *ceph_auth_init(const char *name,
const struct ceph_crypto_key *key,
const int *con_modes);
diff --git a/include/linux/ceph/ceph_debug.h b/include/linux/ceph/ceph_debug.h
index d5a5da838caf..5f904591fa5f 100644
--- a/include/linux/ceph/ceph_debug.h
+++ b/include/linux/ceph/ceph_debug.h
@@ -19,12 +19,21 @@
pr_debug("%.*s %12.12s:%-4d : " fmt, \
8 - (int)sizeof(KBUILD_MODNAME), " ", \
kbasename(__FILE__), __LINE__, ##__VA_ARGS__)
+# define doutc(client, fmt, ...) \
+ pr_debug("%.*s %12.12s:%-4d : [%pU %llu] " fmt, \
+ 8 - (int)sizeof(KBUILD_MODNAME), " ", \
+ kbasename(__FILE__), __LINE__, \
+ &client->fsid, client->monc.auth->global_id, \
+ ##__VA_ARGS__)
# else
/* faux printk call just to see any compiler warnings. */
-# define dout(fmt, ...) do { \
- if (0) \
- printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
- } while (0)
+# define dout(fmt, ...) \
+ no_printk(KERN_DEBUG fmt, ##__VA_ARGS__)
+# define doutc(client, fmt, ...) \
+ no_printk(KERN_DEBUG "[%pU %llu] " fmt, \
+ &client->fsid, \
+ client->monc.auth->global_id, \
+ ##__VA_ARGS__)
# endif
#else
@@ -33,7 +42,32 @@
* or, just wrap pr_debug
*/
# define dout(fmt, ...) pr_debug(" " fmt, ##__VA_ARGS__)
+# define doutc(client, fmt, ...) \
+ pr_debug(" [%pU %llu] %s: " fmt, &client->fsid, \
+ client->monc.auth->global_id, __func__, ##__VA_ARGS__)
#endif
+#define pr_notice_client(client, fmt, ...) \
+ pr_notice("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_info_client(client, fmt, ...) \
+ pr_info("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_warn_client(client, fmt, ...) \
+ pr_warn("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_warn_once_client(client, fmt, ...) \
+ pr_warn_once("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_err_client(client, fmt, ...) \
+ pr_err("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_warn_ratelimited_client(client, fmt, ...) \
+ pr_warn_ratelimited("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_err_ratelimited_client(client, fmt, ...) \
+ pr_err_ratelimited("[%pU %llu]: " fmt, &client->fsid, \
+ client->monc.auth->global_id, ##__VA_ARGS__)
+
#endif
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index e41a811026f6..c7f2c63b3bc3 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -28,8 +28,8 @@
#define CEPH_INO_ROOT 1
-#define CEPH_INO_CEPH 2 /* hidden .ceph dir */
-#define CEPH_INO_DOTDOT 3 /* used by ceph fuse for parent (..) */
+#define CEPH_INO_CEPH 2 /* hidden .ceph dir */
+#define CEPH_INO_GLOBAL_SNAPREALM 3 /* global dummy snaprealm */
/* arbitrary limit on max # of monitors (cluster of 3 is typical) */
#define CEPH_MAX_MON 31
@@ -299,8 +299,11 @@ enum {
CEPH_SESSION_FLUSHMSG_ACK,
CEPH_SESSION_FORCE_RO,
CEPH_SESSION_REJECT,
+ CEPH_SESSION_REQUEST_FLUSH_MDLOG,
};
+#define CEPH_SESSION_BLOCKLISTED (1 << 0) /* session blocklisted */
+
extern const char *ceph_session_op_name(int op);
struct ceph_mds_session_head {
@@ -325,6 +328,7 @@ enum {
CEPH_MDS_OP_LOOKUPPARENT = 0x00103,
CEPH_MDS_OP_LOOKUPINO = 0x00104,
CEPH_MDS_OP_LOOKUPNAME = 0x00105,
+ CEPH_MDS_OP_GETVXATTR = 0x00106,
CEPH_MDS_OP_SETXATTR = 0x01105,
CEPH_MDS_OP_RMXATTR = 0x01106,
@@ -353,16 +357,26 @@ enum {
CEPH_MDS_OP_RENAMESNAP = 0x01403,
};
-extern const char *ceph_mds_op_name(int op);
+#define IS_CEPH_MDS_OP_NEWINODE(op) (op == CEPH_MDS_OP_CREATE || \
+ op == CEPH_MDS_OP_MKNOD || \
+ op == CEPH_MDS_OP_MKDIR || \
+ op == CEPH_MDS_OP_SYMLINK)
+extern const char *ceph_mds_op_name(int op);
-#define CEPH_SETATTR_MODE 1
-#define CEPH_SETATTR_UID 2
-#define CEPH_SETATTR_GID 4
-#define CEPH_SETATTR_MTIME 8
-#define CEPH_SETATTR_ATIME 16
-#define CEPH_SETATTR_SIZE 32
-#define CEPH_SETATTR_CTIME 64
+#define CEPH_SETATTR_MODE (1 << 0)
+#define CEPH_SETATTR_UID (1 << 1)
+#define CEPH_SETATTR_GID (1 << 2)
+#define CEPH_SETATTR_MTIME (1 << 3)
+#define CEPH_SETATTR_ATIME (1 << 4)
+#define CEPH_SETATTR_SIZE (1 << 5)
+#define CEPH_SETATTR_CTIME (1 << 6)
+#define CEPH_SETATTR_MTIME_NOW (1 << 7)
+#define CEPH_SETATTR_ATIME_NOW (1 << 8)
+#define CEPH_SETATTR_BTIME (1 << 9)
+#define CEPH_SETATTR_KILL_SGUID (1 << 10)
+#define CEPH_SETATTR_FSCRYPT_AUTH (1 << 11)
+#define CEPH_SETATTR_FSCRYPT_FILE (1 << 12)
/*
* Ceph setxattr request flags.
@@ -429,9 +443,9 @@ union ceph_mds_request_args {
__le32 stripe_unit; /* layout for newly created file */
__le32 stripe_count; /* ... */
__le32 object_size;
- __le32 file_replication;
- __le32 mask; /* CEPH_CAP_* */
- __le32 old_size;
+ __le32 pool;
+ __le32 mask; /* CEPH_CAP_* */
+ __le64 old_size;
} __attribute__ ((packed)) open;
struct {
__le32 flags;
@@ -475,7 +489,7 @@ union ceph_mds_request_args_ext {
#define CEPH_MDS_FLAG_WANT_DENTRY 2 /* want dentry in reply */
#define CEPH_MDS_FLAG_ASYNC 4 /* request is asynchronous */
-struct ceph_mds_request_head_old {
+struct ceph_mds_request_head_legacy {
__le64 oldest_client_tid;
__le32 mdsmap_epoch; /* on client */
__le32 flags; /* CEPH_MDS_FLAG_* */
@@ -488,20 +502,26 @@ struct ceph_mds_request_head_old {
union ceph_mds_request_args args;
} __attribute__ ((packed));
-#define CEPH_MDS_REQUEST_HEAD_VERSION 1
+#define CEPH_MDS_REQUEST_HEAD_VERSION 3
struct ceph_mds_request_head {
__le16 version; /* struct version */
__le64 oldest_client_tid;
__le32 mdsmap_epoch; /* on client */
__le32 flags; /* CEPH_MDS_FLAG_* */
- __u8 num_retry, num_fwd; /* count retry, fwd attempts */
+ __u8 num_retry, num_fwd; /* legacy count retry and fwd attempts */
__le16 num_releases; /* # include cap/lease release records */
__le32 op; /* mds op code */
__le32 caller_uid, caller_gid;
__le64 ino; /* use this ino for openc, mkdir, mknod,
etc. (if replaying) */
union ceph_mds_request_args_ext args;
+
+ __le32 ext_num_retry; /* new count retry attempts */
+ __le32 ext_num_fwd; /* new count fwd attempts */
+
+ __le32 struct_len; /* to store size of struct ceph_mds_request_head */
+ __le32 owner_uid, owner_gid; /* used for OPs which create inodes */
} __attribute__ ((packed));
/* cap/lease release record */
@@ -764,7 +784,7 @@ struct ceph_mds_caps {
__le32 xattr_len;
__le64 xattr_version;
- /* filelock */
+ /* a union of non-export and export bodies. */
__le64 size, max_size, truncate_size;
__le32 truncate_seq;
struct ceph_timespec mtime, atime, ctime;
@@ -774,7 +794,7 @@ struct ceph_mds_caps {
struct ceph_mds_cap_peer {
__le64 cap_id;
- __le32 seq;
+ __le32 issue_seq;
__le32 mseq;
__le32 mds;
__u8 flags;
@@ -788,7 +808,7 @@ struct ceph_mds_cap_release {
struct ceph_mds_cap_item {
__le64 ino;
__le64 cap_id;
- __le32 migrate_seq, seq;
+ __le32 migrate_seq, issue_seq;
} __attribute__ ((packed));
#define CEPH_MDS_LEASE_REVOKE 1 /* mds -> client */
diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h
index 04f3ace5787b..8fc1aed64113 100644
--- a/include/linux/ceph/decode.h
+++ b/include/linux/ceph/decode.h
@@ -6,7 +6,7 @@
#include <linux/bug.h>
#include <linux/slab.h>
#include <linux/time.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/ceph/types.h>
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 409d8c29bc4f..63e0e2aa1ce9 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -4,7 +4,7 @@
#include <linux/ceph/ceph_debug.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/backing-dev.h>
#include <linux/completion.h>
#include <linux/exportfs.h>
@@ -35,6 +35,7 @@
#define CEPH_OPT_TCP_NODELAY (1<<4) /* TCP_NODELAY on TCP sockets */
#define CEPH_OPT_NOMSGSIGN (1<<5) /* don't sign msgs (msgr1) */
#define CEPH_OPT_ABORT_ON_FULL (1<<6) /* abort w/ ENOSPC when full */
+#define CEPH_OPT_RXBOUNCE (1<<7) /* double-buffer read data */
#define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY)
@@ -98,16 +99,6 @@ struct ceph_options {
#define CEPH_AUTH_NAME_DEFAULT "guest"
-/* mount state */
-enum {
- CEPH_MOUNT_MOUNTING,
- CEPH_MOUNT_MOUNTED,
- CEPH_MOUNT_UNMOUNTING,
- CEPH_MOUNT_UNMOUNTED,
- CEPH_MOUNT_SHUTDOWN,
- CEPH_MOUNT_RECOVER,
-};
-
static inline unsigned long ceph_timeout_jiffies(unsigned long timeout)
{
return timeout ?: MAX_SCHEDULE_TIMEOUT;
@@ -283,6 +274,7 @@ DEFINE_RB_LOOKUP_FUNC(name, type, keyfld, nodefld)
extern struct kmem_cache *ceph_inode_cachep;
extern struct kmem_cache *ceph_cap_cachep;
+extern struct kmem_cache *ceph_cap_snap_cachep;
extern struct kmem_cache *ceph_cap_flush_cachep;
extern struct kmem_cache *ceph_dentry_cachep;
extern struct kmem_cache *ceph_file_cachep;
@@ -295,13 +287,13 @@ extern bool libceph_compatible(void *data);
extern const char *ceph_msg_type_name(int type);
extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid);
-extern void *ceph_kvmalloc(size_t size, gfp_t flags);
+extern int ceph_parse_fsid(const char *str, struct ceph_fsid *fsid);
struct fs_parameter;
struct fc_log;
struct ceph_options *ceph_alloc_options(void);
int ceph_parse_mon_ips(const char *buf, size_t len, struct ceph_options *opt,
- struct fc_log *l);
+ struct fc_log *l, char delim);
int ceph_parse_param(struct fs_parameter *param, struct ceph_options *opt,
struct fc_log *l);
int ceph_print_client_options(struct seq_file *m, struct ceph_client *client,
@@ -314,8 +306,7 @@ struct ceph_entity_addr *ceph_client_addr(struct ceph_client *client);
u64 ceph_client_gid(struct ceph_client *client);
extern void ceph_destroy_client(struct ceph_client *client);
extern void ceph_reset_client_addr(struct ceph_client *client);
-extern int __ceph_open_session(struct ceph_client *client,
- unsigned long started);
+extern int __ceph_open_session(struct ceph_client *client);
extern int ceph_open_session(struct ceph_client *client);
int ceph_wait_for_latest_osdmap(struct ceph_client *client,
unsigned long timeout);
@@ -325,12 +316,6 @@ extern void ceph_release_page_vector(struct page **pages, int num_pages);
extern void ceph_put_page_vector(struct page **pages, int num_pages,
bool dirty);
extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags);
-extern int ceph_copy_user_to_page_vector(struct page **pages,
- const void __user *data,
- loff_t off, size_t len);
-extern void ceph_copy_to_page_vector(struct page **pages,
- const void *data,
- loff_t off, size_t len);
extern void ceph_copy_from_page_vector(struct page **pages,
void *data,
loff_t off, size_t len);
diff --git a/include/linux/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h
deleted file mode 100644
index 523fd0452856..000000000000
--- a/include/linux/ceph/mdsmap.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _FS_CEPH_MDSMAP_H
-#define _FS_CEPH_MDSMAP_H
-
-#include <linux/bug.h>
-#include <linux/ceph/types.h>
-
-/*
- * mds map - describe servers in the mds cluster.
- *
- * we limit fields to those the client actually xcares about
- */
-struct ceph_mds_info {
- u64 global_id;
- struct ceph_entity_addr addr;
- s32 state;
- int num_export_targets;
- bool laggy;
- u32 *export_targets;
-};
-
-struct ceph_mdsmap {
- u32 m_epoch, m_client_epoch, m_last_failure;
- u32 m_root;
- u32 m_session_timeout; /* seconds */
- u32 m_session_autoclose; /* seconds */
- u64 m_max_file_size;
- u32 m_max_mds; /* expected up:active mds number */
- u32 m_num_active_mds; /* actual up:active mds number */
- u32 possible_max_rank; /* possible max rank index */
- struct ceph_mds_info *m_info;
-
- /* which object pools file data can be stored in */
- int m_num_data_pg_pools;
- u64 *m_data_pg_pools;
- u64 m_cas_pg_pool;
-
- bool m_enabled;
- bool m_damaged;
- int m_num_laggy;
-};
-
-static inline struct ceph_entity_addr *
-ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w)
-{
- if (w >= m->possible_max_rank)
- return NULL;
- return &m->m_info[w].addr;
-}
-
-static inline int ceph_mdsmap_get_state(struct ceph_mdsmap *m, int w)
-{
- BUG_ON(w < 0);
- if (w >= m->possible_max_rank)
- return CEPH_MDS_STATE_DNE;
- return m->m_info[w].state;
-}
-
-static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w)
-{
- if (w >= 0 && w < m->possible_max_rank)
- return m->m_info[w].laggy;
- return false;
-}
-
-extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m);
-struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2);
-extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m);
-extern bool ceph_mdsmap_is_cluster_available(struct ceph_mdsmap *m);
-
-#endif
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 0e6e9ad3c3bf..6aa4c6478c9f 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -2,6 +2,7 @@
#ifndef __FS_CEPH_MESSENGER_H
#define __FS_CEPH_MESSENGER_H
+#include <crypto/sha2.h>
#include <linux/bvec.h>
#include <linux/crypto.h>
#include <linux/kref.h>
@@ -17,6 +18,7 @@
struct ceph_msg;
struct ceph_connection;
+struct ceph_msg_data_cursor;
/*
* Ceph defines these callbacks for handling connection events.
@@ -70,6 +72,30 @@ struct ceph_connection_operations {
int used_proto, int result,
const int *allowed_protos, int proto_cnt,
const int *allowed_modes, int mode_cnt);
+
+ /**
+ * sparse_read: read sparse data
+ * @con: connection we're reading from
+ * @cursor: data cursor for reading extents
+ * @buf: optional buffer to read into
+ *
+ * This should be called more than once, each time setting up to
+ * receive an extent into the current cursor position, and zeroing
+ * the holes between them.
+ *
+ * Returns amount of data to be read (in bytes), 0 if reading is
+ * complete, or -errno if there was an error.
+ *
+ * If @buf is set on a >0 return, then the data should be read into
+ * the provided buffer. Otherwise, it should be read into the cursor.
+ *
+ * The sparse read operation is expected to initialize the cursor
+ * with a length covering up to the end of the last extent.
+ */
+ int (*sparse_read)(struct ceph_connection *con,
+ struct ceph_msg_data_cursor *cursor,
+ char **buf);
+
};
/* use format string %s%lld */
@@ -98,6 +124,7 @@ enum ceph_msg_data_type {
CEPH_MSG_DATA_BIO, /* data source/destination is a bio list */
#endif /* CONFIG_BLOCK */
CEPH_MSG_DATA_BVECS, /* data source/destination is a bio_vec array */
+ CEPH_MSG_DATA_ITER, /* data source/destination is an iov_iter */
};
#ifdef CONFIG_BLOCK
@@ -199,6 +226,7 @@ struct ceph_msg_data {
bool own_pages;
};
struct ceph_pagelist *pagelist;
+ struct iov_iter iter;
};
};
@@ -207,7 +235,7 @@ struct ceph_msg_data_cursor {
struct ceph_msg_data *data; /* current data item */
size_t resid; /* bytes not yet consumed */
- bool last_piece; /* current is last piece */
+ int sr_resid; /* residual sparse_read len */
bool need_crc; /* crc update needed */
union {
#ifdef CONFIG_BLOCK
@@ -223,6 +251,10 @@ struct ceph_msg_data_cursor {
struct page *page; /* page from list */
size_t offset; /* bytes from list */
};
+ struct {
+ struct iov_iter iov_iter;
+ unsigned int lastlen;
+ };
};
};
@@ -252,6 +284,7 @@ struct ceph_msg {
struct kref kref;
bool more_to_follow;
bool needs_out_seq;
+ u64 sparse_read_total;
int front_alloc_len;
struct ceph_msgpool *pool;
@@ -310,6 +343,10 @@ struct ceph_connection_v1_info {
int in_base_pos; /* bytes read */
+ /* sparse reads */
+ struct kvec in_sr_kvec; /* current location to receive into */
+ u64 in_sr_len; /* amount of data in this extent */
+
/* message in temps */
u8 in_tag; /* protocol control byte */
struct ceph_msg_header in_hdr;
@@ -376,13 +413,18 @@ struct ceph_connection_v2_info {
struct ceph_msg_data_cursor in_cursor;
struct ceph_msg_data_cursor out_cursor;
- struct crypto_shash *hmac_tfm; /* post-auth signature */
+ struct hmac_sha256_key hmac_key; /* post-auth signature */
+ bool hmac_key_set;
struct crypto_aead *gcm_tfm; /* on-wire encryption */
struct aead_request *gcm_req;
struct crypto_wait gcm_wait;
struct ceph_gcm_nonce in_gcm_nonce;
struct ceph_gcm_nonce out_gcm_nonce;
+ struct page **in_enc_pages;
+ int in_enc_page_cnt;
+ int in_enc_resid;
+ int in_enc_i;
struct page **out_enc_pages;
int out_enc_page_cnt;
int out_enc_resid;
@@ -392,6 +434,7 @@ struct ceph_connection_v2_info {
void *conn_bufs[16];
int conn_buf_cnt;
+ int data_len_remain;
struct kvec in_sign_kvecs[8];
struct kvec out_sign_kvecs[8];
@@ -457,6 +500,7 @@ struct ceph_connection {
struct ceph_msg *out_msg; /* sending message (== tail of
out_sent) */
+ struct page *bounce_page;
u32 in_front_crc, in_middle_crc, in_data_crc; /* calculated crc */
struct timespec64 last_keepalive_ack; /* keepalive2 ack stamp */
@@ -493,8 +537,7 @@ void ceph_con_discard_requeued(struct ceph_connection *con, u64 reconnect_seq);
void ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor,
struct ceph_msg *msg, size_t length);
struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor,
- size_t *page_offset, size_t *length,
- bool *last_piece);
+ size_t *page_offset, size_t *length);
void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, size_t bytes);
u32 ceph_crc32c_page(u32 crc, struct page *page, unsigned int page_offset,
@@ -507,12 +550,12 @@ void ceph_addr_set_port(struct ceph_entity_addr *addr, int p);
void ceph_con_process_message(struct ceph_connection *con);
int ceph_con_in_msg_alloc(struct ceph_connection *con,
struct ceph_msg_header *hdr, int *skip);
-void ceph_con_get_out_msg(struct ceph_connection *con);
+struct ceph_msg *ceph_con_get_out_msg(struct ceph_connection *con);
/* messenger_v1.c */
int ceph_con_v1_try_read(struct ceph_connection *con);
int ceph_con_v1_try_write(struct ceph_connection *con);
-void ceph_con_v1_revoke(struct ceph_connection *con);
+void ceph_con_v1_revoke(struct ceph_connection *con, struct ceph_msg *msg);
void ceph_con_v1_revoke_incoming(struct ceph_connection *con);
bool ceph_con_v1_opened(struct ceph_connection *con);
void ceph_con_v1_reset_session(struct ceph_connection *con);
@@ -521,7 +564,7 @@ void ceph_con_v1_reset_protocol(struct ceph_connection *con);
/* messenger_v2.c */
int ceph_con_v2_try_read(struct ceph_connection *con);
int ceph_con_v2_try_write(struct ceph_connection *con);
-void ceph_con_v2_revoke(struct ceph_connection *con);
+void ceph_con_v2_revoke(struct ceph_connection *con, struct ceph_msg *msg);
void ceph_con_v2_revoke_incoming(struct ceph_connection *con);
bool ceph_con_v2_opened(struct ceph_connection *con);
void ceph_con_v2_reset_session(struct ceph_connection *con);
@@ -532,7 +575,7 @@ extern const char *ceph_pr_addr(const struct ceph_entity_addr *addr);
extern int ceph_parse_ips(const char *c, const char *end,
struct ceph_entity_addr *addr,
- int max_count, int *count);
+ int max_count, int *count, char delim);
extern int ceph_msgr_init(void);
extern void ceph_msgr_exit(void);
@@ -570,6 +613,8 @@ void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos,
#endif /* CONFIG_BLOCK */
void ceph_msg_data_add_bvecs(struct ceph_msg *msg,
struct ceph_bvec_iter *bvec_pos);
+void ceph_msg_data_add_iter(struct ceph_msg *msg,
+ struct iov_iter *iter);
struct ceph_msg *ceph_msg_new2(int type, int front_len, int max_data_items,
gfp_t flags, bool can_fail);
diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h
index b658961156a0..7a9a40163c0f 100644
--- a/include/linux/ceph/mon_client.h
+++ b/include/linux/ceph/mon_client.h
@@ -19,7 +19,7 @@ struct ceph_monmap {
struct ceph_fsid fsid;
u32 epoch;
u32 num_mon;
- struct ceph_entity_inst mon_inst[];
+ struct ceph_entity_inst mon_inst[] __counted_by(num_mon);
};
struct ceph_mon_client;
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 83fa08a06507..50b14a5661c7 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -29,14 +29,63 @@ typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *);
#define CEPH_HOMELESS_OSD -1
-/* a given osd we're communicating with */
+/*
+ * A single extent in a SPARSE_READ reply.
+ *
+ * Note that these come from the OSD as little-endian values. On BE arches,
+ * we convert them in-place after receipt.
+ */
+struct ceph_sparse_extent {
+ u64 off;
+ u64 len;
+} __packed;
+
+/* Sparse read state machine state values */
+enum ceph_sparse_read_state {
+ CEPH_SPARSE_READ_HDR = 0,
+ CEPH_SPARSE_READ_EXTENTS,
+ CEPH_SPARSE_READ_DATA_LEN,
+ CEPH_SPARSE_READ_DATA_PRE,
+ CEPH_SPARSE_READ_DATA,
+};
+
+/*
+ * A SPARSE_READ reply is a 32-bit count of extents, followed by an array of
+ * 64-bit offset/length pairs, and then all of the actual file data
+ * concatenated after it (sans holes).
+ *
+ * Unfortunately, we don't know how long the extent array is until we've
+ * started reading the data section of the reply. The caller should send down
+ * a destination buffer for the array, but we'll alloc one if it's too small
+ * or if the caller doesn't.
+ */
+struct ceph_sparse_read {
+ enum ceph_sparse_read_state sr_state; /* state machine state */
+ u64 sr_req_off; /* orig request offset */
+ u64 sr_req_len; /* orig request length */
+ u64 sr_pos; /* current pos in buffer */
+ int sr_index; /* current extent index */
+ u32 sr_datalen; /* length of actual data */
+ u32 sr_count; /* extent count in reply */
+ int sr_ext_len; /* length of extent array */
+ struct ceph_sparse_extent *sr_extent; /* extent array */
+};
+
+/*
+ * A given osd we're communicating with.
+ *
+ * Note that the o_requests tree can be searched while holding the "lock" mutex
+ * or the "o_requests_lock" spinlock. Insertion or removal requires both!
+ */
struct ceph_osd {
refcount_t o_ref;
+ int o_sparse_op_idx;
struct ceph_osd_client *o_osdc;
int o_osd;
int o_incarnation;
struct rb_node o_node;
struct ceph_connection o_con;
+ spinlock_t o_requests_lock;
struct rb_root o_requests;
struct rb_root o_linger_requests;
struct rb_root o_backoff_mappings;
@@ -46,6 +95,7 @@ struct ceph_osd {
unsigned long lru_ttl;
struct list_head o_keepalive_item;
struct mutex lock;
+ struct ceph_sparse_read o_sparse_read;
};
#define CEPH_OSD_SLAB_OPS 2
@@ -59,6 +109,7 @@ enum ceph_osd_data_type {
CEPH_OSD_DATA_TYPE_BIO,
#endif /* CONFIG_BLOCK */
CEPH_OSD_DATA_TYPE_BVECS,
+ CEPH_OSD_DATA_TYPE_ITER,
};
struct ceph_osd_data {
@@ -82,6 +133,7 @@ struct ceph_osd_data {
struct ceph_bvec_iter bvec_pos;
u32 num_bvecs;
};
+ struct iov_iter iter;
};
};
@@ -98,6 +150,8 @@ struct ceph_osd_req_op {
u64 offset, length;
u64 truncate_size;
u32 truncate_seq;
+ int sparse_ext_cnt;
+ struct ceph_sparse_extent *sparse_ext;
struct ceph_osd_data osd_data;
} extent;
struct {
@@ -145,6 +199,9 @@ struct ceph_osd_req_op {
u32 src_fadvise_flags;
struct ceph_osd_data osd_data;
} copy_from;
+ struct {
+ u64 ver;
+ } assert_ver;
};
};
@@ -199,6 +256,7 @@ struct ceph_osd_request {
struct ceph_osd_client *r_osdc;
struct kref r_kref;
bool r_mempool;
+ bool r_linger; /* don't resend on failure */
struct completion r_completion; /* private to osd_client.c */
ceph_osdc_callback_t r_callback;
@@ -211,9 +269,9 @@ struct ceph_osd_request {
struct ceph_snap_context *r_snapc; /* for writes */
struct timespec64 r_mtime; /* ditto */
u64 r_data_offset; /* ditto */
- bool r_linger; /* don't resend on failure */
/* internal */
+ u64 r_version; /* data version sent in reply */
unsigned long r_stamp; /* jiffies, send or check time */
unsigned long r_start_stamp; /* jiffies */
ktime_t r_start_latency; /* ktime_t */
@@ -221,7 +279,7 @@ struct ceph_osd_request {
int r_attempts;
u32 r_map_dne_bound;
- struct ceph_osd_req_op r_ops[];
+ struct ceph_osd_req_op r_ops[] __counted_by(r_num_ops);
};
struct ceph_request_redirect {
@@ -287,6 +345,9 @@ struct ceph_osd_linger_request {
rados_watcherrcb_t errcb;
void *data;
+ struct ceph_pagelist *request_pl;
+ struct page **notify_id_pages;
+
struct page ***preply_pages;
size_t *preply_len;
};
@@ -388,8 +449,6 @@ extern int ceph_osdc_init(struct ceph_osd_client *osdc,
extern void ceph_osdc_stop(struct ceph_osd_client *osdc);
extern void ceph_osdc_reopen_osds(struct ceph_osd_client *osdc);
-extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc,
- struct ceph_msg *msg);
extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc,
struct ceph_msg *msg);
void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb);
@@ -431,9 +490,6 @@ extern void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *,
struct page **pages, u64 length,
u32 alignment, bool pages_from_pool,
bool own_pages);
-extern void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *,
- unsigned int which,
- struct ceph_pagelist *pagelist);
#ifdef CONFIG_BLOCK
void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
unsigned int which,
@@ -447,10 +503,9 @@ void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req,
void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req,
unsigned int which,
struct ceph_bvec_iter *bvec_pos);
+void osd_req_op_extent_osd_iter(struct ceph_osd_request *osd_req,
+ unsigned int which, struct iov_iter *iter);
-extern void osd_req_op_cls_request_data_pagelist(struct ceph_osd_request *,
- unsigned int which,
- struct ceph_pagelist *pagelist);
extern void osd_req_op_cls_request_data_pages(struct ceph_osd_request *,
unsigned int which,
struct page **pages, u64 length,
@@ -475,6 +530,14 @@ extern void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
u64 expected_object_size,
u64 expected_write_size,
u32 flags);
+extern int osd_req_op_copy_from_init(struct ceph_osd_request *req,
+ u64 src_snapid, u64 src_version,
+ struct ceph_object_id *src_oid,
+ struct ceph_object_locator *src_oloc,
+ u32 src_fadvise_flags,
+ u32 dst_fadvise_flags,
+ u32 truncate_seq, u64 truncate_size,
+ u8 copy_from_flags);
extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
struct ceph_snap_context *snapc,
@@ -493,12 +556,28 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
u32 truncate_seq, u64 truncate_size,
bool use_mempool);
+int __ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op, int cnt);
+
+/*
+ * How big an extent array should we preallocate for a sparse read? This is
+ * just a starting value. If we get more than this back from the OSD, the
+ * receiver will reallocate.
+ */
+#define CEPH_SPARSE_EXT_ARRAY_INITIAL 16
+
+static inline int ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op, int cnt)
+{
+ if (!cnt)
+ cnt = CEPH_SPARSE_EXT_ARRAY_INITIAL;
+
+ return __ceph_alloc_sparse_ext_map(op, cnt);
+}
+
extern void ceph_osdc_get_request(struct ceph_osd_request *req);
extern void ceph_osdc_put_request(struct ceph_osd_request *req);
-extern int ceph_osdc_start_request(struct ceph_osd_client *osdc,
- struct ceph_osd_request *req,
- bool nofail);
+void ceph_osdc_start_request(struct ceph_osd_client *osdc,
+ struct ceph_osd_request *req);
extern void ceph_osdc_cancel_request(struct ceph_osd_request *req);
extern int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req);
@@ -515,17 +594,6 @@ int ceph_osdc_call(struct ceph_osd_client *osdc,
struct page *req_page, size_t req_len,
struct page **resp_pages, size_t *resp_len);
-int ceph_osdc_copy_from(struct ceph_osd_client *osdc,
- u64 src_snapid, u64 src_version,
- struct ceph_object_id *src_oid,
- struct ceph_object_locator *src_oloc,
- u32 src_fadvise_flags,
- struct ceph_object_id *dst_oid,
- struct ceph_object_locator *dst_oloc,
- u32 dst_fadvise_flags,
- u32 truncate_seq, u64 truncate_size,
- u8 copy_from_flags);
-
/* watch/notify */
struct ceph_osd_linger_request *
ceph_osdc_watch(struct ceph_osd_client *osdc,
@@ -552,12 +620,24 @@ int ceph_osdc_notify(struct ceph_osd_client *osdc,
u32 timeout,
struct page ***preply_pages,
size_t *preply_len);
-int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
- struct ceph_osd_linger_request *lreq);
int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
struct ceph_object_id *oid,
struct ceph_object_locator *oloc,
struct ceph_watch_item **watchers,
u32 *num_watchers);
-#endif
+/* Find offset into the buffer of the end of the extent map */
+static inline u64 ceph_sparse_ext_map_end(struct ceph_osd_req_op *op)
+{
+ struct ceph_sparse_extent *ext;
+
+ /* No extents? No data */
+ if (op->extent.sparse_ext_cnt == 0)
+ return 0;
+
+ ext = &op->extent.sparse_ext[op->extent.sparse_ext_cnt - 1];
+
+ return ext->off + ext->len - op->extent.offset;
+}
+
+#endif
diff --git a/include/linux/ceph/pagelist.h b/include/linux/ceph/pagelist.h
index 5dead8486fd8..879bec0863aa 100644
--- a/include/linux/ceph/pagelist.h
+++ b/include/linux/ceph/pagelist.h
@@ -17,12 +17,6 @@ struct ceph_pagelist {
refcount_t refcnt;
};
-struct ceph_pagelist_cursor {
- struct ceph_pagelist *pl; /* pagelist, for error checking */
- struct list_head *page_lru; /* page in list */
- size_t room; /* room remaining to reset to */
-};
-
struct ceph_pagelist *ceph_pagelist_alloc(gfp_t gfp_flags);
extern void ceph_pagelist_release(struct ceph_pagelist *pl);
@@ -33,12 +27,6 @@ extern int ceph_pagelist_reserve(struct ceph_pagelist *pl, size_t space);
extern int ceph_pagelist_free_reserve(struct ceph_pagelist *pl);
-extern void ceph_pagelist_set_cursor(struct ceph_pagelist *pl,
- struct ceph_pagelist_cursor *c);
-
-extern int ceph_pagelist_truncate(struct ceph_pagelist *pl,
- struct ceph_pagelist_cursor *c);
-
static inline int ceph_pagelist_encode_64(struct ceph_pagelist *pl, u64 v)
{
__le64 ev = cpu_to_le64(v);
diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h
index 43a7a1573b51..73c3efbec36c 100644
--- a/include/linux/ceph/rados.h
+++ b/include/linux/ceph/rados.h
@@ -524,6 +524,10 @@ struct ceph_osd_op {
__le64 cookie;
} __attribute__ ((packed)) notify;
struct {
+ __le64 unused;
+ __le64 ver;
+ } __attribute__ ((packed)) assert_ver;
+ struct {
__le64 offset, length;
__le64 src_offset;
} __attribute__ ((packed)) clonerange;
diff --git a/include/linux/cfag12864b.h b/include/linux/cfag12864b.h
index 6617d9c68d86..83e6613d12ae 100644
--- a/include/linux/cfag12864b.h
+++ b/include/linux/cfag12864b.h
@@ -28,13 +28,6 @@
extern unsigned char * cfag12864b_buffer;
/*
- * Get the refresh rate of the LCD
- *
- * Returns the refresh rate (hertz).
- */
-extern unsigned int cfag12864b_getrate(void);
-
-/*
* Enable refreshing
*
* Returns 0 if successful (anyone was using it),
@@ -50,16 +43,6 @@ extern unsigned char cfag12864b_enable(void);
extern void cfag12864b_disable(void);
/*
- * Is enabled refreshing? (is anyone using the module?)
- *
- * Returns 0 if refreshing is not enabled (anyone is using it),
- * or != 0 if refreshing is enabled (someone is using it).
- *
- * Useful for buffer read-only modules.
- */
-extern unsigned char cfag12864b_isenabled(void);
-
-/*
* Is the module inited?
*/
extern unsigned char cfag12864b_isinited(void);
diff --git a/include/linux/cfi.h b/include/linux/cfi.h
index 879744aaa6e0..1fd22ea6eba4 100644
--- a/include/linux/cfi.h
+++ b/include/linux/cfi.h
@@ -2,40 +2,85 @@
/*
* Clang Control Flow Integrity (CFI) support.
*
- * Copyright (C) 2021 Google LLC
+ * Copyright (C) 2022 Google LLC
*/
#ifndef _LINUX_CFI_H
#define _LINUX_CFI_H
-#ifdef CONFIG_CFI_CLANG
-typedef void (*cfi_check_fn)(uint64_t id, void *ptr, void *diag);
+#include <linux/bug.h>
+#include <linux/module.h>
+#include <asm/cfi.h>
-/* Compiler-generated function in each module, and the kernel */
-extern void __cfi_check(uint64_t id, void *ptr, void *diag);
+#ifdef CONFIG_CFI
+extern bool cfi_warn;
+enum bug_trap_type report_cfi_failure(struct pt_regs *regs, unsigned long addr,
+ unsigned long *target, u32 type);
+
+static inline enum bug_trap_type report_cfi_failure_noaddr(struct pt_regs *regs,
+ unsigned long addr)
+{
+ return report_cfi_failure(regs, addr, NULL, 0);
+}
+
+#ifndef cfi_get_offset
/*
- * Force the compiler to generate a CFI jump table entry for a function
- * and store the jump table address to __cfi_jt_<function>.
+ * Returns the CFI prefix offset. By default, the compiler emits only
+ * a 4-byte CFI type hash before the function. If an architecture
+ * uses -fpatchable-function-entry=N,M where M>0 to change the prefix
+ * offset, they must override this function.
*/
-#define __CFI_ADDRESSABLE(fn, __attr) \
- const void *__cfi_jt_ ## fn __visible __attr = (void *)&fn
+static inline int cfi_get_offset(void)
+{
+ return 4;
+}
+#endif
-#ifdef CONFIG_CFI_CLANG_SHADOW
+#ifndef cfi_get_func_hash
+static inline u32 cfi_get_func_hash(void *func)
+{
+ u32 hash;
-extern void cfi_module_add(struct module *mod, unsigned long base_addr);
-extern void cfi_module_remove(struct module *mod, unsigned long base_addr);
+ if (get_kernel_nofault(hash, func - cfi_get_offset()))
+ return 0;
-#else
+ return hash;
+}
+#endif
+
+/* CFI type hashes for BPF function types */
+extern u32 cfi_bpf_hash;
+extern u32 cfi_bpf_subprog_hash;
-static inline void cfi_module_add(struct module *mod, unsigned long base_addr) {}
-static inline void cfi_module_remove(struct module *mod, unsigned long base_addr) {}
+#else /* CONFIG_CFI */
-#endif /* CONFIG_CFI_CLANG_SHADOW */
+static inline int cfi_get_offset(void) { return 0; }
+static inline u32 cfi_get_func_hash(void *func) { return 0; }
-#else /* !CONFIG_CFI_CLANG */
+#define cfi_bpf_hash 0U
+#define cfi_bpf_subprog_hash 0U
-#define __CFI_ADDRESSABLE(fn, __attr)
+#endif /* CONFIG_CFI */
+
+#ifdef CONFIG_ARCH_USES_CFI_TRAPS
+bool is_cfi_trap(unsigned long addr);
+#else
+static inline bool is_cfi_trap(unsigned long addr) { return false; }
+#endif
+
+#ifdef CONFIG_MODULES
+#ifdef CONFIG_ARCH_USES_CFI_TRAPS
+void module_cfi_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
+ struct module *mod);
+#else
+static inline void module_cfi_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ struct module *mod) {}
+#endif /* CONFIG_ARCH_USES_CFI_TRAPS */
+#endif /* CONFIG_MODULES */
-#endif /* CONFIG_CFI_CLANG */
+#ifndef CFI_NOSEAL
+#define CFI_NOSEAL(x)
+#endif
#endif /* _LINUX_CFI_H */
diff --git a/include/linux/cfi_types.h b/include/linux/cfi_types.h
new file mode 100644
index 000000000000..a86af9bc8bdc
--- /dev/null
+++ b/include/linux/cfi_types.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Clang Control Flow Integrity (CFI) type definitions.
+ */
+#ifndef _LINUX_CFI_TYPES_H
+#define _LINUX_CFI_TYPES_H
+
+#ifdef __ASSEMBLY__
+#include <linux/linkage.h>
+
+#ifdef CONFIG_CFI
+/*
+ * Use the __kcfi_typeid_<function> type identifier symbol to
+ * annotate indirectly called assembly functions. The compiler emits
+ * these symbols for all address-taken function declarations in C
+ * code.
+ */
+#ifndef __CFI_TYPE
+#define __CFI_TYPE(name) \
+ .4byte __kcfi_typeid_##name
+#endif
+
+#define SYM_TYPED_ENTRY(name, linkage, align...) \
+ linkage(name) ASM_NL \
+ align ASM_NL \
+ __CFI_TYPE(name) ASM_NL \
+ name:
+
+#define SYM_TYPED_START(name, linkage, align...) \
+ SYM_TYPED_ENTRY(name, linkage, align)
+
+#else /* CONFIG_CFI */
+
+#define SYM_TYPED_START(name, linkage, align...) \
+ SYM_START(name, linkage, align)
+
+#endif /* CONFIG_CFI */
+
+#ifndef SYM_TYPED_FUNC_START
+#define SYM_TYPED_FUNC_START(name) \
+ SYM_TYPED_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
+#endif
+
+#else /* __ASSEMBLY__ */
+
+#ifdef CONFIG_CFI
+#define DEFINE_CFI_TYPE(name, func) \
+ /* \
+ * Force a reference to the function so the compiler generates \
+ * __kcfi_typeid_<func>. \
+ */ \
+ __ADDRESSABLE(func); \
+ /* u32 name __ro_after_init = __kcfi_typeid_<func> */ \
+ extern u32 name; \
+ asm ( \
+ " .pushsection .data..ro_after_init,\"aw\",\%progbits \n" \
+ " .type " #name ",\%object \n" \
+ " .globl " #name " \n" \
+ " .p2align 2, 0x0 \n" \
+ #name ": \n" \
+ " .4byte __kcfi_typeid_" #func " \n" \
+ " .size " #name ", 4 \n" \
+ " .popsection \n" \
+ );
+#endif
+
+#endif /* __ASSEMBLY__ */
+#endif /* _LINUX_CFI_TYPES_H */
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 559ee05f86b2..b760a3c470a5 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -19,7 +19,7 @@
#include <linux/percpu-rwsem.h>
#include <linux/u64_stats_sync.h>
#include <linux/workqueue.h>
-#include <linux/bpf-cgroup.h>
+#include <linux/bpf-cgroup-defs.h>
#include <linux/psi_types.h>
#ifdef CONFIG_CGROUPS
@@ -86,19 +86,48 @@ enum {
CGRP_ROOT_NS_DELEGATE = (1 << 3),
/*
+ * Reduce latencies on dynamic cgroup modifications such as task
+ * migrations and controller on/offs by disabling percpu operation on
+ * cgroup_threadgroup_rwsem. This makes hot path operations such as
+ * forks and exits into the slow path and more expensive.
+ *
+ * Alleviate the contention between fork, exec, exit operations and
+ * writing to cgroup.procs by taking a per threadgroup rwsem instead of
+ * the global cgroup_threadgroup_rwsem. Fork and other operations
+ * from threads in different thread groups no longer contend with
+ * writing to cgroup.procs.
+ *
+ * The static usage pattern of creating a cgroup, enabling controllers,
+ * and then seeding it with CLONE_INTO_CGROUP doesn't require write
+ * locking cgroup_threadgroup_rwsem and thus doesn't benefit from
+ * favordynmod.
+ */
+ CGRP_ROOT_FAVOR_DYNMODS = (1 << 4),
+
+ /*
* Enable cpuset controller in v1 cgroup to use v2 behavior.
*/
- CGRP_ROOT_CPUSET_V2_MODE = (1 << 4),
+ CGRP_ROOT_CPUSET_V2_MODE = (1 << 16),
/*
* Enable legacy local memory.events.
*/
- CGRP_ROOT_MEMORY_LOCAL_EVENTS = (1 << 5),
+ CGRP_ROOT_MEMORY_LOCAL_EVENTS = (1 << 17),
/*
* Enable recursive subtree protection
*/
- CGRP_ROOT_MEMORY_RECURSIVE_PROT = (1 << 6),
+ CGRP_ROOT_MEMORY_RECURSIVE_PROT = (1 << 18),
+
+ /*
+ * Enable hugetlb accounting for the memory controller.
+ */
+ CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING = (1 << 19),
+
+ /*
+ * Enable legacy local pids.events.
+ */
+ CGRP_ROOT_PIDS_LOCAL_EVENTS = (1 << 20),
};
/* cftype->flags */
@@ -114,6 +143,18 @@ enum {
/* internal flags, do not use outside cgroup core proper */
__CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */
__CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */
+ __CFTYPE_ADDED = (1 << 18),
+};
+
+enum cgroup_attach_lock_mode {
+ /* Default */
+ CGRP_ATTACH_LOCK_GLOBAL,
+
+ /* When pid=0 && threadgroup=false, see comments in cgroup_procs_write_start */
+ CGRP_ATTACH_LOCK_NONE,
+
+ /* When favordynmods is on, see comments above CGRP_ROOT_FAVOR_DYNMODS */
+ CGRP_ATTACH_LOCK_PER_THREADGROUP,
};
/*
@@ -145,13 +186,31 @@ struct cgroup_subsys_state {
/* reference count - access via css_[try]get() and css_put() */
struct percpu_ref refcnt;
- /* siblings list anchored at the parent's ->children */
+ /*
+ * Depending on the context, this field is initialized
+ * via css_rstat_init() at different places:
+ *
+ * when css is associated with cgroup::self
+ * when css->cgroup is the root cgroup
+ * performed in cgroup_init()
+ * when css->cgroup is not the root cgroup
+ * performed in cgroup_create()
+ * when css is associated with a subsystem
+ * when css->cgroup is the root cgroup
+ * performed in cgroup_init_subsys() in the non-early path
+ * when css->cgroup is not the root cgroup
+ * performed in css_create()
+ */
+ struct css_rstat_cpu __percpu *rstat_cpu;
+
+ /*
+ * siblings list anchored at the parent's ->children
+ *
+ * linkage is protected by cgroup_mutex or RCU
+ */
struct list_head sibling;
struct list_head children;
- /* flush target list anchored at cgrp->rstat_css_list */
- struct list_head rstat_css_node;
-
/*
* PI: Subsys-unique ID. 0 is unused and root is always 1. The
* matching css can be looked up using css_from_id().
@@ -183,6 +242,24 @@ struct cgroup_subsys_state {
* fields of the containing structure.
*/
struct cgroup_subsys_state *parent;
+
+ /*
+ * Keep track of total numbers of visible descendant CSSes.
+ * The total number of dying CSSes is tracked in
+ * css->cgroup->nr_dying_subsys[ssid].
+ * Protected by cgroup_mutex.
+ */
+ int nr_descendants;
+
+ /*
+ * A singly-linked list of css structures to be rstat flushed.
+ * This is a scratch field to be used exclusively by
+ * css_rstat_flush().
+ *
+ * Protected by rstat_base_lock when css is cgroup::self.
+ * Protected by css->ss->rstat_ss_lock otherwise.
+ */
+ struct cgroup_subsys_state *rstat_flush_next;
};
/*
@@ -221,7 +298,7 @@ struct css_set {
* Lists running through all tasks using this cgroup group.
* mg_tasks lists tasks which belong to this cset but are in the
* process of being migrated out or in. Protected by
- * css_set_rwsem, but, during migration, once tasks are moved to
+ * css_set_lock, but, during migration, once tasks are moved to
* mg_tasks, it can be read safely while holding cgroup_mutex.
*/
struct list_head tasks;
@@ -232,7 +309,7 @@ struct css_set {
struct list_head task_iters;
/*
- * On the default hierarhcy, ->subsys[ssid] may point to a css
+ * On the default hierarchy, ->subsys[ssid] may point to a css
* attached to an ancestor instead of the cgroup this css_set is
* associated with. The following node is anchored at
* ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to
@@ -260,7 +337,8 @@ struct css_set {
* List of csets participating in the on-going migration either as
* source or destination. Protected by cgroup_mutex.
*/
- struct list_head mg_preload_node;
+ struct list_head mg_src_preload_node;
+ struct list_head mg_dst_preload_node;
struct list_head mg_node;
/*
@@ -283,14 +361,19 @@ struct css_set {
struct cgroup_base_stat {
struct task_cputime cputime;
+
+#ifdef CONFIG_SCHED_CORE
+ u64 forceidle_sum;
+#endif
+ u64 ntime;
};
/*
* rstat - cgroup scalable recursive statistics. Accounting is done
- * per-cpu in cgroup_rstat_cpu which is then lazily propagated up the
+ * per-cpu in css_rstat_cpu which is then lazily propagated up the
* hierarchy on reads.
*
- * When a stat gets updated, the cgroup_rstat_cpu and its ancestors are
+ * When a stat gets updated, the css_rstat_cpu and its ancestors are
* linked into the updated tree. On the following read, propagation only
* considers and consumes the updated tree. This makes reading O(the
* number of descendants which have been active since last read) instead of
@@ -302,10 +385,26 @@ struct cgroup_base_stat {
* frequency decreases the cost of each read.
*
* This struct hosts both the fields which implement the above -
- * updated_children and updated_next - and the fields which track basic
- * resource statistics on top of it - bsync, bstat and last_bstat.
+ * updated_children and updated_next.
*/
-struct cgroup_rstat_cpu {
+struct css_rstat_cpu {
+ /*
+ * Child cgroups with stat updates on this cpu since the last read
+ * are linked on the parent's ->updated_children through
+ * ->updated_next. updated_children is terminated by its container css.
+ */
+ struct cgroup_subsys_state *updated_children;
+ struct cgroup_subsys_state *updated_next; /* NULL if not on the list */
+
+ struct llist_node lnode; /* lockless list for update */
+ struct cgroup_subsys_state *owner; /* back pointer */
+};
+
+/*
+ * This struct hosts the fields which track basic resource statistics on
+ * top of it - bsync, bstat and last_bstat.
+ */
+struct cgroup_rstat_base_cpu {
/*
* ->bsync protects ->bstat. These are the only fields which get
* updated in the hot path.
@@ -320,18 +419,18 @@ struct cgroup_rstat_cpu {
struct cgroup_base_stat last_bstat;
/*
- * Child cgroups with stat updates on this cpu since the last read
- * are linked on the parent's ->updated_children through
- * ->updated_next.
- *
- * In addition to being more compact, singly-linked list pointing
- * to the cgroup makes it unnecessary for each per-cpu struct to
- * point back to the associated cgroup.
- *
- * Protected by per-cpu cgroup_rstat_cpu_lock.
+ * This field is used to record the cumulative per-cpu time of
+ * the cgroup and its descendants. Currently it can be read via
+ * eBPF/drgn etc, and we are still trying to determine how to
+ * expose it in the cgroupfs interface.
+ */
+ struct cgroup_base_stat subtree_bstat;
+
+ /*
+ * Snapshots at the last reading. These are used to calculate the
+ * deltas to propagate to the per-cpu subtree_bstat.
*/
- struct cgroup *updated_children; /* terminated by self cgroup */
- struct cgroup *updated_next; /* NULL iff not on the list */
+ struct cgroup_base_stat last_subtree_bstat;
};
struct cgroup_freezer_state {
@@ -339,7 +438,7 @@ struct cgroup_freezer_state {
bool freeze;
/* Should the cgroup actually be frozen? */
- int e_freeze;
+ bool e_freeze;
/* Fields below are protected by css_set_lock */
@@ -351,6 +450,23 @@ struct cgroup_freezer_state {
* frozen, SIGSTOPped, and PTRACEd.
*/
int nr_frozen_tasks;
+
+ /* Freeze time data consistency protection */
+ seqcount_spinlock_t freeze_seq;
+
+ /*
+ * Most recent time the cgroup was requested to freeze.
+ * Accesses guarded by freeze_seq counter. Writes serialized
+ * by css_set_lock.
+ */
+ u64 freeze_start_nsec;
+
+ /*
+ * Total duration the cgroup has spent freezing.
+ * Accesses guarded by freeze_seq counter. Writes serialized
+ * by css_set_lock.
+ */
+ u64 frozen_nsec;
};
struct cgroup {
@@ -362,7 +478,7 @@ struct cgroup {
/*
* The depth this cgroup is at. The root is at depth zero and each
* step down the hierarchy increments the level. This along with
- * ancestor_ids[] can determine whether a given cgroup is a
+ * ancestors[] can determine whether a given cgroup is a
* descendant of another without traversing the hierarchy.
*/
int level;
@@ -402,14 +518,20 @@ struct cgroup {
int nr_threaded_children; /* # of live threaded child cgroups */
+ /* sequence number for cgroup.kill, serialized by css_set_lock. */
+ unsigned int kill_seq;
+
struct kernfs_node *kn; /* cgroup kernfs entry */
struct cgroup_file procs_file; /* handle for "cgroup.procs" */
struct cgroup_file events_file; /* handle for "cgroup.events" */
+ /* handles for "{cpu,memory,io,irq}.pressure" */
+ struct cgroup_file psi_files[NR_PSI_RESOURCES];
+
/*
* The bitmask of subsystems enabled on the child cgroups.
* ->subtree_control is the one configured through
- * "cgroup.subtree_control" while ->child_ss_mask is the effective
+ * "cgroup.subtree_control" while ->subtree_ss_mask is the effective
* one which may have more subsystems enabled. Controller knobs
* are made available iff it's enabled in ->subtree_control.
*/
@@ -421,6 +543,12 @@ struct cgroup {
/* Private pointers for each registered subsystem */
struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
+ /*
+ * Keep track of total number of dying CSSes at and below this cgroup.
+ * Protected by cgroup_mutex.
+ */
+ int nr_dying_subsys[CGROUP_SUBSYS_COUNT];
+
struct cgroup_root *root;
/*
@@ -448,9 +576,23 @@ struct cgroup {
struct cgroup *dom_cgrp;
struct cgroup *old_dom_cgrp; /* used while enabling threaded */
- /* per-cpu recursive resource statistics */
- struct cgroup_rstat_cpu __percpu *rstat_cpu;
- struct list_head rstat_css_list;
+ /*
+ * Depending on the context, this field is initialized via
+ * css_rstat_init() at different places:
+ *
+ * when cgroup is the root cgroup
+ * performed in cgroup_setup_root()
+ * otherwise
+ * performed in cgroup_create()
+ */
+ struct cgroup_rstat_base_cpu __percpu *rstat_base_cpu;
+
+ /*
+ * Add padding to keep the read mostly rstat per-cpu pointer on a
+ * different cacheline than the following *bstat fields which can have
+ * frequent updates.
+ */
+ CACHELINE_PADDING(_pad_);
/* cgroup basic resource statistics */
struct cgroup_base_stat last_bstat;
@@ -471,19 +613,20 @@ struct cgroup {
struct work_struct release_agent_work;
/* used to track pressure stalls */
- struct psi_group psi;
+ struct psi_group *psi;
/* used to store eBPF programs */
struct cgroup_bpf bpf;
- /* If there is block congestion on this cgroup. */
- atomic_t congestion_count;
-
/* Used to store internal freezer state */
struct cgroup_freezer_state freezer;
- /* ids of the ancestors at each level including self */
- u64 ancestor_ids[];
+#ifdef CONFIG_BPF_SYSCALL
+ struct bpf_local_storage __rcu *bpf_cgrp_storage;
+#endif
+
+ /* All ancestors including self */
+ struct cgroup *ancestors[];
};
/*
@@ -500,18 +643,23 @@ struct cgroup_root {
/* Unique id for this hierarchy. */
int hierarchy_id;
- /* The root cgroup. Root is destroyed on its release. */
+ /* A list running through the active hierarchies */
+ struct list_head root_list;
+ struct rcu_head rcu; /* Must be near the top */
+
+ /*
+ * The root cgroup. The containing cgroup_root will be destroyed on its
+ * release. cgrp->ancestors[0] will be used overflowing into the
+ * following field. cgrp_ancestor_storage must immediately follow.
+ */
struct cgroup cgrp;
- /* for cgrp->ancestor_ids[0] */
- u64 cgrp_ancestor_id_storage;
+ /* must follow cgrp for cgrp->ancestors[0], see above */
+ struct cgroup *cgrp_ancestor_storage;
/* Number of cgroups in the hierarchy, used only for /proc/cgroups */
atomic_t nr_cgrps;
- /* A list running through the active hierarchies */
- struct list_head root_list;
-
/* Hierarchy-specific flags */
unsigned int flags;
@@ -531,9 +679,8 @@ struct cgroup_root {
*/
struct cftype {
/*
- * By convention, the name should begin with the name of the
- * subsystem, followed by a period. Zero length string indicates
- * end of cftype array.
+ * Name of the subsystem is prepended in cgroup_file_name().
+ * Zero length string indicates end of cftype array.
*/
char name[MAX_CFTYPE_NAME];
unsigned long private;
@@ -609,9 +756,7 @@ struct cftype {
__poll_t (*poll)(struct kernfs_open_file *of,
struct poll_table_struct *pt);
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lock_class_key lockdep_key;
-#endif
};
/*
@@ -625,14 +770,16 @@ struct cgroup_subsys {
void (*css_released)(struct cgroup_subsys_state *css);
void (*css_free)(struct cgroup_subsys_state *css);
void (*css_reset)(struct cgroup_subsys_state *css);
+ void (*css_killed)(struct cgroup_subsys_state *css);
void (*css_rstat_flush)(struct cgroup_subsys_state *css, int cpu);
int (*css_extra_stat_show)(struct seq_file *seq,
struct cgroup_subsys_state *css);
+ int (*css_local_stat_show)(struct seq_file *seq,
+ struct cgroup_subsys_state *css);
int (*can_attach)(struct cgroup_taskset *tset);
void (*cancel_attach)(struct cgroup_taskset *tset);
void (*attach)(struct cgroup_taskset *tset);
- void (*post_attach)(void);
int (*can_fork)(struct task_struct *task,
struct css_set *cset);
void (*cancel_fork)(struct task_struct *task, struct css_set *cset);
@@ -668,7 +815,7 @@ struct cgroup_subsys {
*/
bool threaded:1;
- /* the following two fields are initialized automtically during boot */
+ /* the following two fields are initialized automatically during boot */
int id;
const char *name;
@@ -702,20 +849,32 @@ struct cgroup_subsys {
* specifies the mask of subsystems that this one depends on.
*/
unsigned int depends_on;
+
+ spinlock_t rstat_ss_lock;
+ struct llist_head __percpu *lhead; /* lockless update list head */
};
extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
+extern bool cgroup_enable_per_threadgroup_rwsem;
+
+struct cgroup_of_peak {
+ unsigned long value;
+ struct list_head list;
+};
/**
* cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups
* @tsk: target task
*
* Allows cgroup operations to synchronize against threadgroup changes
- * using a percpu_rw_semaphore.
+ * using a global percpu_rw_semaphore and a per threadgroup rw_semaphore when
+ * favordynmods is on. See the comment above CGRP_ROOT_FAVOR_DYNMODS definition.
*/
static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
{
percpu_down_read(&cgroup_threadgroup_rwsem);
+ if (cgroup_enable_per_threadgroup_rwsem)
+ down_read(&tsk->signal->cgroup_threadgroup_rwsem);
}
/**
@@ -726,6 +885,8 @@ static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
*/
static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
{
+ if (cgroup_enable_per_threadgroup_rwsem)
+ up_read(&tsk->signal->cgroup_threadgroup_rwsem);
percpu_up_read(&cgroup_threadgroup_rwsem);
}
@@ -748,108 +909,53 @@ static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
* sock_cgroup_data is embedded at sock->sk_cgrp_data and contains
* per-socket cgroup information except for memcg association.
*
- * On legacy hierarchies, net_prio and net_cls controllers directly set
- * attributes on each sock which can then be tested by the network layer.
- * On the default hierarchy, each sock is associated with the cgroup it was
- * created in and the networking layer can match the cgroup directly.
- *
- * To avoid carrying all three cgroup related fields separately in sock,
- * sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer.
- * On boot, sock_cgroup_data records the cgroup that the sock was created
- * in so that cgroup2 matches can be made; however, once either net_prio or
- * net_cls starts being used, the area is overriden to carry prioidx and/or
- * classid. The two modes are distinguished by whether the lowest bit is
- * set. Clear bit indicates cgroup pointer while set bit prioidx and
- * classid.
- *
- * While userland may start using net_prio or net_cls at any time, once
- * either is used, cgroup2 matching no longer works. There is no reason to
- * mix the two and this is in line with how legacy and v2 compatibility is
- * handled. On mode switch, cgroup references which are already being
- * pointed to by socks may be leaked. While this can be remedied by adding
- * synchronization around sock_cgroup_data, given that the number of leaked
- * cgroups is bound and highly unlikely to be high, this seems to be the
- * better trade-off.
+ * On legacy hierarchies, net_prio and net_cls controllers directly
+ * set attributes on each sock which can then be tested by the network
+ * layer. On the default hierarchy, each sock is associated with the
+ * cgroup it was created in and the networking layer can match the
+ * cgroup directly.
*/
struct sock_cgroup_data {
- union {
-#ifdef __LITTLE_ENDIAN
- struct {
- u8 is_data : 1;
- u8 no_refcnt : 1;
- u8 unused : 6;
- u8 padding;
- u16 prioidx;
- u32 classid;
- } __packed;
-#else
- struct {
- u32 classid;
- u16 prioidx;
- u8 padding;
- u8 unused : 6;
- u8 no_refcnt : 1;
- u8 is_data : 1;
- } __packed;
+ struct cgroup *cgroup; /* v2 */
+#ifdef CONFIG_CGROUP_NET_CLASSID
+ u32 classid; /* v1 */
+#endif
+#ifdef CONFIG_CGROUP_NET_PRIO
+ u16 prioidx; /* v1 */
#endif
- u64 val;
- };
};
-/*
- * There's a theoretical window where the following accessors race with
- * updaters and return part of the previous pointer as the prioidx or
- * classid. Such races are short-lived and the result isn't critical.
- */
static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd)
{
- /* fallback to 1 which is always the ID of the root cgroup */
- return (skcd->is_data & 1) ? skcd->prioidx : 1;
+#ifdef CONFIG_CGROUP_NET_PRIO
+ return READ_ONCE(skcd->prioidx);
+#else
+ return 1;
+#endif
}
+#ifdef CONFIG_CGROUP_NET_CLASSID
static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd)
{
- /* fallback to 0 which is the unconfigured default classid */
- return (skcd->is_data & 1) ? skcd->classid : 0;
+ return READ_ONCE(skcd->classid);
}
+#endif
-/*
- * If invoked concurrently, the updaters may clobber each other. The
- * caller is responsible for synchronization.
- */
static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd,
u16 prioidx)
{
- struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
-
- if (sock_cgroup_prioidx(&skcd_buf) == prioidx)
- return;
-
- if (!(skcd_buf.is_data & 1)) {
- skcd_buf.val = 0;
- skcd_buf.is_data = 1;
- }
-
- skcd_buf.prioidx = prioidx;
- WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */
+#ifdef CONFIG_CGROUP_NET_PRIO
+ WRITE_ONCE(skcd->prioidx, prioidx);
+#endif
}
+#ifdef CONFIG_CGROUP_NET_CLASSID
static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd,
u32 classid)
{
- struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
-
- if (sock_cgroup_classid(&skcd_buf) == classid)
- return;
-
- if (!(skcd_buf.is_data & 1)) {
- skcd_buf.val = 0;
- skcd_buf.is_data = 1;
- }
-
- skcd_buf.classid = classid;
- WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */
+ WRITE_ONCE(skcd->classid, classid);
}
+#endif
#else /* CONFIG_SOCK_CGROUP_DATA */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 4f2f79de083e..bc892e3b37ee 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -10,8 +10,8 @@
*/
#include <linux/sched.h>
-#include <linux/cpumask.h>
#include <linux/nodemask.h>
+#include <linux/list.h>
#include <linux/rculist.h>
#include <linux/cgroupstats.h>
#include <linux/fs.h>
@@ -19,6 +19,7 @@
#include <linux/kernfs.h>
#include <linux/jump_label.h>
#include <linux/types.h>
+#include <linux/notifier.h>
#include <linux/ns_common.h>
#include <linux/nsproxy.h>
#include <linux/user_namespace.h>
@@ -26,13 +27,12 @@
#include <linux/kernel_stat.h>
#include <linux/cgroup-defs.h>
+#include <linux/cgroup_namespace.h>
struct kernel_clone_args;
-#ifdef CONFIG_CGROUPS
-
/*
- * All weight knobs on the default hierarhcy should use the following min,
+ * All weight knobs on the default hierarchy should use the following min,
* default and max values. The default value is the logarithmic center of
* MIN and MAX and allows 100x to be expressed in both directions.
*/
@@ -40,13 +40,13 @@ struct kernel_clone_args;
#define CGROUP_WEIGHT_DFL 100
#define CGROUP_WEIGHT_MAX 10000
-/* walk only threadgroup leaders */
-#define CSS_TASK_ITER_PROCS (1U << 0)
-/* walk all threaded css_sets in the domain */
-#define CSS_TASK_ITER_THREADED (1U << 1)
+#ifdef CONFIG_CGROUPS
-/* internal flags */
-#define CSS_TASK_ITER_SKIPPED (1U << 16)
+enum css_task_iter_flags {
+ CSS_TASK_ITER_PROCS = (1U << 0), /* walk only threadgroup leaders */
+ CSS_TASK_ITER_THREADED = (1U << 1), /* walk all threaded css_sets in the domain */
+ CSS_TASK_ITER_SKIPPED = (1U << 16), /* internal flags */
+};
/* a css_task_iter should be treated as an opaque object */
struct css_task_iter {
@@ -68,8 +68,16 @@ struct css_task_iter {
struct list_head iters_node; /* css_set->task_iters */
};
+enum cgroup_lifetime_events {
+ CGROUP_LIFETIME_ONLINE,
+ CGROUP_LIFETIME_OFFLINE,
+};
+
+extern struct file_system_type cgroup_fs_type;
extern struct cgroup_root cgrp_dfl_root;
extern struct css_set init_css_set;
+extern spinlock_t css_set_lock;
+extern struct blocking_notifier_head cgroup_lifetime_notifier;
#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
#include <linux/cgroup_subsys.h>
@@ -106,16 +114,18 @@ struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
struct cgroup *cgroup_get_from_path(const char *path);
struct cgroup *cgroup_get_from_fd(int fd);
+struct cgroup *cgroup_v1v2_get_from_fd(int fd);
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
+int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_rm_cftypes(struct cftype *cfts);
void cgroup_file_notify(struct cgroup_file *cfile);
+void cgroup_file_show(struct cgroup_file *cfile, bool show);
-int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *tsk);
@@ -127,9 +137,10 @@ extern void cgroup_cancel_fork(struct task_struct *p,
struct kernel_clone_args *kargs);
extern void cgroup_post_fork(struct task_struct *p,
struct kernel_clone_args *kargs);
-void cgroup_exit(struct task_struct *p);
-void cgroup_release(struct task_struct *p);
-void cgroup_free(struct task_struct *p);
+void cgroup_task_exit(struct task_struct *p);
+void cgroup_task_dead(struct task_struct *p);
+void cgroup_task_release(struct task_struct *p);
+void cgroup_task_free(struct task_struct *p);
int cgroup_init_early(void);
int cgroup_init(void);
@@ -307,72 +318,25 @@ void css_task_iter_end(struct css_task_iter *it);
* Inline functions.
*/
+#ifdef CONFIG_DEBUG_CGROUP_REF
+void css_get(struct cgroup_subsys_state *css);
+void css_get_many(struct cgroup_subsys_state *css, unsigned int n);
+bool css_tryget(struct cgroup_subsys_state *css);
+bool css_tryget_online(struct cgroup_subsys_state *css);
+void css_put(struct cgroup_subsys_state *css);
+void css_put_many(struct cgroup_subsys_state *css, unsigned int n);
+#else
+#define CGROUP_REF_FN_ATTRS static inline
+#define CGROUP_REF_EXPORT(fn)
+#include <linux/cgroup_refcnt.h>
+#endif
+
static inline u64 cgroup_id(const struct cgroup *cgrp)
{
return cgrp->kn->id;
}
/**
- * css_get - obtain a reference on the specified css
- * @css: target css
- *
- * The caller must already have a reference.
- */
-static inline void css_get(struct cgroup_subsys_state *css)
-{
- if (!(css->flags & CSS_NO_REF))
- percpu_ref_get(&css->refcnt);
-}
-
-/**
- * css_get_many - obtain references on the specified css
- * @css: target css
- * @n: number of references to get
- *
- * The caller must already have a reference.
- */
-static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
-{
- if (!(css->flags & CSS_NO_REF))
- percpu_ref_get_many(&css->refcnt, n);
-}
-
-/**
- * css_tryget - try to obtain a reference on the specified css
- * @css: target css
- *
- * Obtain a reference on @css unless it already has reached zero and is
- * being released. This function doesn't care whether @css is on or
- * offline. The caller naturally needs to ensure that @css is accessible
- * but doesn't have to be holding a reference on it - IOW, RCU protected
- * access is good enough for this function. Returns %true if a reference
- * count was successfully obtained; %false otherwise.
- */
-static inline bool css_tryget(struct cgroup_subsys_state *css)
-{
- if (!(css->flags & CSS_NO_REF))
- return percpu_ref_tryget(&css->refcnt);
- return true;
-}
-
-/**
- * css_tryget_online - try to obtain a reference on the specified css if online
- * @css: target css
- *
- * Obtain a reference on @css if it's online. The caller naturally needs
- * to ensure that @css is accessible but doesn't have to be holding a
- * reference on it - IOW, RCU protected access is good enough for this
- * function. Returns %true if a reference count was successfully obtained;
- * %false otherwise.
- */
-static inline bool css_tryget_online(struct cgroup_subsys_state *css)
-{
- if (!(css->flags & CSS_NO_REF))
- return percpu_ref_tryget_live(&css->refcnt);
- return true;
-}
-
-/**
* css_is_dying - test whether the specified css is dying
* @css: target css
*
@@ -389,32 +353,23 @@ static inline bool css_tryget_online(struct cgroup_subsys_state *css)
*/
static inline bool css_is_dying(struct cgroup_subsys_state *css)
{
- return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
+ return css->flags & CSS_DYING;
}
-/**
- * css_put - put a css reference
- * @css: target css
- *
- * Put a reference obtained via css_get() and css_tryget_online().
- */
-static inline void css_put(struct cgroup_subsys_state *css)
+static inline bool css_is_online(struct cgroup_subsys_state *css)
{
- if (!(css->flags & CSS_NO_REF))
- percpu_ref_put(&css->refcnt);
+ return css->flags & CSS_ONLINE;
}
-/**
- * css_put_many - put css references
- * @css: target css
- * @n: number of references to put
- *
- * Put references obtained via css_get() and css_tryget_online().
- */
-static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
+static inline bool css_is_self(struct cgroup_subsys_state *css)
{
- if (!(css->flags & CSS_NO_REF))
- percpu_ref_put_many(&css->refcnt, n);
+ if (css == &css->cgroup->self) {
+ /* cgroup::self should not have subsystem association */
+ WARN_ON(css->ss != NULL);
+ return true;
+ }
+
+ return false;
}
static inline void cgroup_get(struct cgroup *cgrp)
@@ -432,6 +387,18 @@ static inline void cgroup_put(struct cgroup *cgrp)
css_put(&cgrp->self);
}
+extern struct mutex cgroup_mutex;
+
+static inline void cgroup_lock(void)
+{
+ mutex_lock(&cgroup_mutex);
+}
+
+static inline void cgroup_unlock(void)
+{
+ mutex_unlock(&cgroup_mutex);
+}
+
/**
* task_css_set_check - obtain a task's css_set with extra access conditions
* @task: the task to obtain css_set for
@@ -446,10 +413,9 @@ static inline void cgroup_put(struct cgroup *cgrp)
* as locks used during the cgroup_subsys::attach() methods.
*/
#ifdef CONFIG_PROVE_RCU
-extern struct mutex cgroup_mutex;
-extern spinlock_t css_set_lock;
#define task_css_set_check(task, __c) \
rcu_dereference_check((task)->cgroups, \
+ rcu_read_lock_sched_held() || \
lockdep_is_held(&cgroup_mutex) || \
lockdep_is_held(&css_set_lock) || \
((task)->flags & PF_EXITING) || (__c))
@@ -573,7 +539,7 @@ static inline bool cgroup_is_descendant(struct cgroup *cgrp,
{
if (cgrp->root != ancestor->root || cgrp->level < ancestor->level)
return false;
- return cgrp->ancestor_ids[ancestor->level] == cgroup_id(ancestor);
+ return cgrp->ancestors[ancestor->level] == ancestor;
}
/**
@@ -590,11 +556,9 @@ static inline bool cgroup_is_descendant(struct cgroup *cgrp,
static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
int ancestor_level)
{
- if (cgrp->level < ancestor_level)
+ if (ancestor_level < 0 || ancestor_level > cgrp->level)
return NULL;
- while (cgrp && cgrp->level > ancestor_level)
- cgrp = cgroup_parent(cgrp);
- return cgrp;
+ return cgrp->ancestors[ancestor_level];
}
/**
@@ -671,10 +635,7 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
pr_cont_kernfs_path(cgrp->kn);
}
-static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
-{
- return &cgrp->psi;
-}
+bool cgroup_psi_enabled(void);
static inline void cgroup_init_kthreadd(void)
{
@@ -696,6 +657,8 @@ static inline void cgroup_kthread_ready(void)
}
void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen);
+struct cgroup *__cgroup_get_from_id(u64 id);
+struct cgroup *cgroup_get_from_id(u64 id);
#else /* !CONFIG_CGROUPS */
struct cgroup_subsys_state;
@@ -704,6 +667,8 @@ struct cgroup;
static inline u64 cgroup_id(const struct cgroup *cgrp) { return 1; }
static inline void css_get(struct cgroup_subsys_state *css) {}
static inline void css_put(struct cgroup_subsys_state *css) {}
+static inline void cgroup_lock(void) {}
+static inline void cgroup_unlock(void) {}
static inline int cgroup_attach_task_all(struct task_struct *from,
struct task_struct *t) { return 0; }
static inline int cgroupstats_build(struct cgroupstats *stats,
@@ -716,9 +681,10 @@ static inline void cgroup_cancel_fork(struct task_struct *p,
struct kernel_clone_args *kargs) {}
static inline void cgroup_post_fork(struct task_struct *p,
struct kernel_clone_args *kargs) {}
-static inline void cgroup_exit(struct task_struct *p) {}
-static inline void cgroup_release(struct task_struct *p) {}
-static inline void cgroup_free(struct task_struct *p) {}
+static inline void cgroup_task_exit(struct task_struct *p) {}
+static inline void cgroup_task_dead(struct task_struct *p) {}
+static inline void cgroup_task_release(struct task_struct *p) {}
+static inline void cgroup_task_free(struct task_struct *p) {}
static inline int cgroup_init_early(void) { return 0; }
static inline int cgroup_init(void) { return 0; }
@@ -730,9 +696,9 @@ static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
return NULL;
}
-static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
+static inline bool cgroup_psi_enabled(void)
{
- return NULL;
+ return false;
}
static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
@@ -749,11 +715,8 @@ static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
/*
* cgroup scalable recursive statistics.
*/
-void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
-void cgroup_rstat_flush(struct cgroup *cgrp);
-void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp);
-void cgroup_rstat_flush_hold(struct cgroup *cgrp);
-void cgroup_rstat_flush_release(void);
+void css_rstat_updated(struct cgroup_subsys_state *css, int cpu);
+void css_rstat_flush(struct cgroup_subsys_state *css);
/*
* Basic resource stats.
@@ -778,11 +741,9 @@ static inline void cgroup_account_cputime(struct task_struct *task,
cpuacct_charge(task, delta_exec);
- rcu_read_lock();
cgrp = task_dfl_cgroup(task);
if (cgroup_parent(cgrp))
__cgroup_account_cputime(cgrp, delta_exec);
- rcu_read_unlock();
}
static inline void cgroup_account_cputime_field(struct task_struct *task,
@@ -793,11 +754,9 @@ static inline void cgroup_account_cputime_field(struct task_struct *task,
cpuacct_account_field(task, index, delta_exec);
- rcu_read_lock();
cgrp = task_dfl_cgroup(task);
if (cgroup_parent(cgrp))
__cgroup_account_cputime_field(cgrp, index, delta_exec);
- rcu_read_unlock();
}
#else /* CONFIG_CGROUPS */
@@ -816,33 +775,13 @@ static inline void cgroup_account_cputime_field(struct task_struct *task,
*/
#ifdef CONFIG_SOCK_CGROUP_DATA
-#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
-extern spinlock_t cgroup_sk_update_lock;
-#endif
-
-void cgroup_sk_alloc_disable(void);
void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
void cgroup_sk_clone(struct sock_cgroup_data *skcd);
void cgroup_sk_free(struct sock_cgroup_data *skcd);
static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
{
-#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
- unsigned long v;
-
- /*
- * @skcd->val is 64bit but the following is safe on 32bit too as we
- * just need the lower ulong to be written and read atomically.
- */
- v = READ_ONCE(skcd->val);
-
- if (v & 3)
- return &cgrp_dfl_root.cgrp;
-
- return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
-#else
- return (struct cgroup *)(unsigned long)skcd->val;
-#endif
+ return skcd->cgroup;
}
#else /* CONFIG_CGROUP_DATA */
@@ -853,50 +792,6 @@ static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
#endif /* CONFIG_CGROUP_DATA */
-struct cgroup_namespace {
- struct ns_common ns;
- struct user_namespace *user_ns;
- struct ucounts *ucounts;
- struct css_set *root_cset;
-};
-
-extern struct cgroup_namespace init_cgroup_ns;
-
-#ifdef CONFIG_CGROUPS
-
-void free_cgroup_ns(struct cgroup_namespace *ns);
-
-struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
- struct user_namespace *user_ns,
- struct cgroup_namespace *old_ns);
-
-int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
- struct cgroup_namespace *ns);
-
-#else /* !CONFIG_CGROUPS */
-
-static inline void free_cgroup_ns(struct cgroup_namespace *ns) { }
-static inline struct cgroup_namespace *
-copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
- struct cgroup_namespace *old_ns)
-{
- return old_ns;
-}
-
-#endif /* !CONFIG_CGROUPS */
-
-static inline void get_cgroup_ns(struct cgroup_namespace *ns)
-{
- if (ns)
- refcount_inc(&ns->ns.count);
-}
-
-static inline void put_cgroup_ns(struct cgroup_namespace *ns)
-{
- if (ns && refcount_dec_and_test(&ns->ns.count))
- free_cgroup_ns(ns);
-}
-
#ifdef CONFIG_CGROUPS
void cgroup_enter_frozen(void);
@@ -906,20 +801,6 @@ void cgroup_freeze(struct cgroup *cgrp, bool freeze);
void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src,
struct cgroup *dst);
-static inline bool cgroup_task_freeze(struct task_struct *task)
-{
- bool ret;
-
- if (task->flags & PF_KTHREAD)
- return false;
-
- rcu_read_lock();
- ret = test_bit(CGRP_FREEZE, &task_dfl_cgroup(task)->flags);
- rcu_read_unlock();
-
- return ret;
-}
-
static inline bool cgroup_task_frozen(struct task_struct *task)
{
return task->frozen;
@@ -929,10 +810,6 @@ static inline bool cgroup_task_frozen(struct task_struct *task)
static inline void cgroup_enter_frozen(void) { }
static inline void cgroup_leave_frozen(bool always_leave) { }
-static inline bool cgroup_task_freeze(struct task_struct *task)
-{
- return false;
-}
static inline bool cgroup_task_frozen(struct task_struct *task)
{
return false;
@@ -958,4 +835,8 @@ static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
#endif /* CONFIG_CGROUP_BPF */
+struct cgroup *task_get_cgroup1(struct task_struct *tsk, int hierarchy_id);
+
+struct cgroup_of_peak *of_peak(struct kernfs_open_file *of);
+
#endif /* _LINUX_CGROUP_H */
diff --git a/include/linux/cgroup_api.h b/include/linux/cgroup_api.h
new file mode 100644
index 000000000000..d0cfe8025111
--- /dev/null
+++ b/include/linux/cgroup_api.h
@@ -0,0 +1 @@
+#include <linux/cgroup.h>
diff --git a/include/linux/cgroup_dmem.h b/include/linux/cgroup_dmem.h
new file mode 100644
index 000000000000..dd4869f1d736
--- /dev/null
+++ b/include/linux/cgroup_dmem.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#ifndef _CGROUP_DMEM_H
+#define _CGROUP_DMEM_H
+
+#include <linux/types.h>
+#include <linux/llist.h>
+
+struct dmem_cgroup_pool_state;
+
+/* Opaque definition of a cgroup region, used internally */
+struct dmem_cgroup_region;
+
+#if IS_ENABLED(CONFIG_CGROUP_DMEM)
+struct dmem_cgroup_region *dmem_cgroup_register_region(u64 size, const char *name_fmt, ...) __printf(2,3);
+void dmem_cgroup_unregister_region(struct dmem_cgroup_region *region);
+int dmem_cgroup_try_charge(struct dmem_cgroup_region *region, u64 size,
+ struct dmem_cgroup_pool_state **ret_pool,
+ struct dmem_cgroup_pool_state **ret_limit_pool);
+void dmem_cgroup_uncharge(struct dmem_cgroup_pool_state *pool, u64 size);
+bool dmem_cgroup_state_evict_valuable(struct dmem_cgroup_pool_state *limit_pool,
+ struct dmem_cgroup_pool_state *test_pool,
+ bool ignore_low, bool *ret_hit_low);
+
+void dmem_cgroup_pool_state_put(struct dmem_cgroup_pool_state *pool);
+#else
+static inline __printf(2,3) struct dmem_cgroup_region *
+dmem_cgroup_register_region(u64 size, const char *name_fmt, ...)
+{
+ return NULL;
+}
+
+static inline void dmem_cgroup_unregister_region(struct dmem_cgroup_region *region)
+{ }
+
+static inline int dmem_cgroup_try_charge(struct dmem_cgroup_region *region, u64 size,
+ struct dmem_cgroup_pool_state **ret_pool,
+ struct dmem_cgroup_pool_state **ret_limit_pool)
+{
+ *ret_pool = NULL;
+
+ if (ret_limit_pool)
+ *ret_limit_pool = NULL;
+
+ return 0;
+}
+
+static inline void dmem_cgroup_uncharge(struct dmem_cgroup_pool_state *pool, u64 size)
+{ }
+
+static inline
+bool dmem_cgroup_state_evict_valuable(struct dmem_cgroup_pool_state *limit_pool,
+ struct dmem_cgroup_pool_state *test_pool,
+ bool ignore_low, bool *ret_hit_low)
+{
+ return true;
+}
+
+static inline void dmem_cgroup_pool_state_put(struct dmem_cgroup_pool_state *pool)
+{ }
+
+#endif
+#endif /* _CGROUP_DMEM_H */
diff --git a/include/linux/cgroup_namespace.h b/include/linux/cgroup_namespace.h
new file mode 100644
index 000000000000..78a8418558a4
--- /dev/null
+++ b/include/linux/cgroup_namespace.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CGROUP_NAMESPACE_H
+#define _LINUX_CGROUP_NAMESPACE_H
+
+#include <linux/ns_common.h>
+
+struct cgroup_namespace {
+ struct ns_common ns;
+ struct user_namespace *user_ns;
+ struct ucounts *ucounts;
+ struct css_set *root_cset;
+};
+
+extern struct cgroup_namespace init_cgroup_ns;
+
+#ifdef CONFIG_CGROUPS
+
+static inline struct cgroup_namespace *to_cg_ns(struct ns_common *ns)
+{
+ return container_of(ns, struct cgroup_namespace, ns);
+}
+
+void free_cgroup_ns(struct cgroup_namespace *ns);
+
+struct cgroup_namespace *copy_cgroup_ns(u64 flags,
+ struct user_namespace *user_ns,
+ struct cgroup_namespace *old_ns);
+
+int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
+ struct cgroup_namespace *ns);
+
+static inline void get_cgroup_ns(struct cgroup_namespace *ns)
+{
+ ns_ref_inc(ns);
+}
+
+static inline void put_cgroup_ns(struct cgroup_namespace *ns)
+{
+ if (ns_ref_put(ns))
+ free_cgroup_ns(ns);
+}
+
+#else /* !CONFIG_CGROUPS */
+
+static inline void free_cgroup_ns(struct cgroup_namespace *ns) { }
+static inline struct cgroup_namespace *
+copy_cgroup_ns(u64 flags, struct user_namespace *user_ns,
+ struct cgroup_namespace *old_ns)
+{
+ return old_ns;
+}
+
+static inline void get_cgroup_ns(struct cgroup_namespace *ns) { }
+static inline void put_cgroup_ns(struct cgroup_namespace *ns) { }
+
+#endif /* !CONFIG_CGROUPS */
+
+#endif /* _LINUX_CGROUP_NAMESPACE_H */
diff --git a/include/linux/cgroup_refcnt.h b/include/linux/cgroup_refcnt.h
new file mode 100644
index 000000000000..2eea0a69ecfc
--- /dev/null
+++ b/include/linux/cgroup_refcnt.h
@@ -0,0 +1,96 @@
+/**
+ * css_get - obtain a reference on the specified css
+ * @css: target css
+ *
+ * The caller must already have a reference.
+ */
+CGROUP_REF_FN_ATTRS
+void css_get(struct cgroup_subsys_state *css)
+{
+ if (!(css->flags & CSS_NO_REF))
+ percpu_ref_get(&css->refcnt);
+}
+CGROUP_REF_EXPORT(css_get)
+
+/**
+ * css_get_many - obtain references on the specified css
+ * @css: target css
+ * @n: number of references to get
+ *
+ * The caller must already have a reference.
+ */
+CGROUP_REF_FN_ATTRS
+void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
+{
+ if (!(css->flags & CSS_NO_REF))
+ percpu_ref_get_many(&css->refcnt, n);
+}
+CGROUP_REF_EXPORT(css_get_many)
+
+/**
+ * css_tryget - try to obtain a reference on the specified css
+ * @css: target css
+ *
+ * Obtain a reference on @css unless it already has reached zero and is
+ * being released. This function doesn't care whether @css is on or
+ * offline. The caller naturally needs to ensure that @css is accessible
+ * but doesn't have to be holding a reference on it - IOW, RCU protected
+ * access is good enough for this function. Returns %true if a reference
+ * count was successfully obtained; %false otherwise.
+ */
+CGROUP_REF_FN_ATTRS
+bool css_tryget(struct cgroup_subsys_state *css)
+{
+ if (!(css->flags & CSS_NO_REF))
+ return percpu_ref_tryget(&css->refcnt);
+ return true;
+}
+CGROUP_REF_EXPORT(css_tryget)
+
+/**
+ * css_tryget_online - try to obtain a reference on the specified css if online
+ * @css: target css
+ *
+ * Obtain a reference on @css if it's online. The caller naturally needs
+ * to ensure that @css is accessible but doesn't have to be holding a
+ * reference on it - IOW, RCU protected access is good enough for this
+ * function. Returns %true if a reference count was successfully obtained;
+ * %false otherwise.
+ */
+CGROUP_REF_FN_ATTRS
+bool css_tryget_online(struct cgroup_subsys_state *css)
+{
+ if (!(css->flags & CSS_NO_REF))
+ return percpu_ref_tryget_live(&css->refcnt);
+ return true;
+}
+CGROUP_REF_EXPORT(css_tryget_online)
+
+/**
+ * css_put - put a css reference
+ * @css: target css
+ *
+ * Put a reference obtained via css_get() and css_tryget_online().
+ */
+CGROUP_REF_FN_ATTRS
+void css_put(struct cgroup_subsys_state *css)
+{
+ if (!(css->flags & CSS_NO_REF))
+ percpu_ref_put(&css->refcnt);
+}
+CGROUP_REF_EXPORT(css_put)
+
+/**
+ * css_put_many - put css references
+ * @css: target css
+ * @n: number of references to put
+ *
+ * Put references obtained via css_get() and css_tryget_online().
+ */
+CGROUP_REF_FN_ATTRS
+void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
+{
+ if (!(css->flags & CSS_NO_REF))
+ percpu_ref_put_many(&css->refcnt, n);
+}
+CGROUP_REF_EXPORT(css_put_many)
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index 445235487230..3fd0bcbf3080 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -65,6 +65,10 @@ SUBSYS(rdma)
SUBSYS(misc)
#endif
+#if IS_ENABLED(CONFIG_CGROUP_DMEM)
+SUBSYS(dmem)
+#endif
+
/*
* The following subsystems are not supported on the default hierarchy.
*/
diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
deleted file mode 100644
index 5f5730c1d324..000000000000
--- a/include/linux/cleancache.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_CLEANCACHE_H
-#define _LINUX_CLEANCACHE_H
-
-#include <linux/fs.h>
-#include <linux/exportfs.h>
-#include <linux/mm.h>
-
-#define CLEANCACHE_NO_POOL -1
-#define CLEANCACHE_NO_BACKEND -2
-#define CLEANCACHE_NO_BACKEND_SHARED -3
-
-#define CLEANCACHE_KEY_MAX 6
-
-/*
- * cleancache requires every file with a page in cleancache to have a
- * unique key unless/until the file is removed/truncated. For some
- * filesystems, the inode number is unique, but for "modern" filesystems
- * an exportable filehandle is required (see exportfs.h)
- */
-struct cleancache_filekey {
- union {
- ino_t ino;
- __u32 fh[CLEANCACHE_KEY_MAX];
- u32 key[CLEANCACHE_KEY_MAX];
- } u;
-};
-
-struct cleancache_ops {
- int (*init_fs)(size_t);
- int (*init_shared_fs)(uuid_t *uuid, size_t);
- int (*get_page)(int, struct cleancache_filekey,
- pgoff_t, struct page *);
- void (*put_page)(int, struct cleancache_filekey,
- pgoff_t, struct page *);
- void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
- void (*invalidate_inode)(int, struct cleancache_filekey);
- void (*invalidate_fs)(int);
-};
-
-extern int cleancache_register_ops(const struct cleancache_ops *ops);
-extern void __cleancache_init_fs(struct super_block *);
-extern void __cleancache_init_shared_fs(struct super_block *);
-extern int __cleancache_get_page(struct page *);
-extern void __cleancache_put_page(struct page *);
-extern void __cleancache_invalidate_page(struct address_space *, struct page *);
-extern void __cleancache_invalidate_inode(struct address_space *);
-extern void __cleancache_invalidate_fs(struct super_block *);
-
-#ifdef CONFIG_CLEANCACHE
-#define cleancache_enabled (1)
-static inline bool cleancache_fs_enabled_mapping(struct address_space *mapping)
-{
- return mapping->host->i_sb->cleancache_poolid >= 0;
-}
-static inline bool cleancache_fs_enabled(struct page *page)
-{
- return cleancache_fs_enabled_mapping(page->mapping);
-}
-#else
-#define cleancache_enabled (0)
-#define cleancache_fs_enabled(_page) (0)
-#define cleancache_fs_enabled_mapping(_page) (0)
-#endif
-
-/*
- * The shim layer provided by these inline functions allows the compiler
- * to reduce all cleancache hooks to nothingness if CONFIG_CLEANCACHE
- * is disabled, to a single global variable check if CONFIG_CLEANCACHE
- * is enabled but no cleancache "backend" has dynamically enabled it,
- * and, for the most frequent cleancache ops, to a single global variable
- * check plus a superblock element comparison if CONFIG_CLEANCACHE is enabled
- * and a cleancache backend has dynamically enabled cleancache, but the
- * filesystem referenced by that cleancache op has not enabled cleancache.
- * As a result, CONFIG_CLEANCACHE can be enabled by default with essentially
- * no measurable performance impact.
- */
-
-static inline void cleancache_init_fs(struct super_block *sb)
-{
- if (cleancache_enabled)
- __cleancache_init_fs(sb);
-}
-
-static inline void cleancache_init_shared_fs(struct super_block *sb)
-{
- if (cleancache_enabled)
- __cleancache_init_shared_fs(sb);
-}
-
-static inline int cleancache_get_page(struct page *page)
-{
- if (cleancache_enabled && cleancache_fs_enabled(page))
- return __cleancache_get_page(page);
- return -1;
-}
-
-static inline void cleancache_put_page(struct page *page)
-{
- if (cleancache_enabled && cleancache_fs_enabled(page))
- __cleancache_put_page(page);
-}
-
-static inline void cleancache_invalidate_page(struct address_space *mapping,
- struct page *page)
-{
- /* careful... page->mapping is NULL sometimes when this is called */
- if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping))
- __cleancache_invalidate_page(mapping, page);
-}
-
-static inline void cleancache_invalidate_inode(struct address_space *mapping)
-{
- if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping))
- __cleancache_invalidate_inode(mapping);
-}
-
-static inline void cleancache_invalidate_fs(struct super_block *sb)
-{
- if (cleancache_enabled)
- __cleancache_invalidate_fs(sb);
-}
-
-#endif /* _LINUX_CLEANCACHE_H */
diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h
new file mode 100644
index 000000000000..8d41b917c77d
--- /dev/null
+++ b/include/linux/cleanup.h
@@ -0,0 +1,534 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CLEANUP_H
+#define _LINUX_CLEANUP_H
+
+#include <linux/compiler.h>
+#include <linux/err.h>
+#include <linux/args.h>
+
+/**
+ * DOC: scope-based cleanup helpers
+ *
+ * The "goto error" pattern is notorious for introducing subtle resource
+ * leaks. It is tedious and error prone to add new resource acquisition
+ * constraints into code paths that already have several unwind
+ * conditions. The "cleanup" helpers enable the compiler to help with
+ * this tedium and can aid in maintaining LIFO (last in first out)
+ * unwind ordering to avoid unintentional leaks.
+ *
+ * As drivers make up the majority of the kernel code base, here is an
+ * example of using these helpers to clean up PCI drivers. The target of
+ * the cleanups are occasions where a goto is used to unwind a device
+ * reference (pci_dev_put()), or unlock the device (pci_dev_unlock())
+ * before returning.
+ *
+ * The DEFINE_FREE() macro can arrange for PCI device references to be
+ * dropped when the associated variable goes out of scope::
+ *
+ * DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T))
+ * ...
+ * struct pci_dev *dev __free(pci_dev_put) =
+ * pci_get_slot(parent, PCI_DEVFN(0, 0));
+ *
+ * The above will automatically call pci_dev_put() if @dev is non-NULL
+ * when @dev goes out of scope (automatic variable scope). If a function
+ * wants to invoke pci_dev_put() on error, but return @dev (i.e. without
+ * freeing it) on success, it can do::
+ *
+ * return no_free_ptr(dev);
+ *
+ * ...or::
+ *
+ * return_ptr(dev);
+ *
+ * The DEFINE_GUARD() macro can arrange for the PCI device lock to be
+ * dropped when the scope where guard() is invoked ends::
+ *
+ * DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
+ * ...
+ * guard(pci_dev)(dev);
+ *
+ * The lifetime of the lock obtained by the guard() helper follows the
+ * scope of automatic variable declaration. Take the following example::
+ *
+ * func(...)
+ * {
+ * if (...) {
+ * ...
+ * guard(pci_dev)(dev); // pci_dev_lock() invoked here
+ * ...
+ * } // <- implied pci_dev_unlock() triggered here
+ * }
+ *
+ * Observe the lock is held for the remainder of the "if ()" block not
+ * the remainder of "func()".
+ *
+ * The ACQUIRE() macro can be used in all places that guard() can be
+ * used and additionally support conditional locks::
+ *
+ * DEFINE_GUARD_COND(pci_dev, _try, pci_dev_trylock(_T))
+ * ...
+ * ACQUIRE(pci_dev_try, lock)(dev);
+ * rc = ACQUIRE_ERR(pci_dev_try, &lock);
+ * if (rc)
+ * return rc;
+ * // @lock is held
+ *
+ * Now, when a function uses both __free() and guard()/ACQUIRE(), or
+ * multiple instances of __free(), the LIFO order of variable definition
+ * order matters. GCC documentation says:
+ *
+ * "When multiple variables in the same scope have cleanup attributes,
+ * at exit from the scope their associated cleanup functions are run in
+ * reverse order of definition (last defined, first cleanup)."
+ *
+ * When the unwind order matters it requires that variables be defined
+ * mid-function scope rather than at the top of the file. Take the
+ * following example and notice the bug highlighted by "!!"::
+ *
+ * LIST_HEAD(list);
+ * DEFINE_MUTEX(lock);
+ *
+ * struct object {
+ * struct list_head node;
+ * };
+ *
+ * static struct object *alloc_add(void)
+ * {
+ * struct object *obj;
+ *
+ * lockdep_assert_held(&lock);
+ * obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ * if (obj) {
+ * LIST_HEAD_INIT(&obj->node);
+ * list_add(obj->node, &list):
+ * }
+ * return obj;
+ * }
+ *
+ * static void remove_free(struct object *obj)
+ * {
+ * lockdep_assert_held(&lock);
+ * list_del(&obj->node);
+ * kfree(obj);
+ * }
+ *
+ * DEFINE_FREE(remove_free, struct object *, if (_T) remove_free(_T))
+ * static int init(void)
+ * {
+ * struct object *obj __free(remove_free) = NULL;
+ * int err;
+ *
+ * guard(mutex)(&lock);
+ * obj = alloc_add();
+ *
+ * if (!obj)
+ * return -ENOMEM;
+ *
+ * err = other_init(obj);
+ * if (err)
+ * return err; // remove_free() called without the lock!!
+ *
+ * no_free_ptr(obj);
+ * return 0;
+ * }
+ *
+ * That bug is fixed by changing init() to call guard() and define +
+ * initialize @obj in this order::
+ *
+ * guard(mutex)(&lock);
+ * struct object *obj __free(remove_free) = alloc_add();
+ *
+ * Given that the "__free(...) = NULL" pattern for variables defined at
+ * the top of the function poses this potential interdependency problem
+ * the recommendation is to always define and assign variables in one
+ * statement and not group variable definitions at the top of the
+ * function when __free() is used.
+ *
+ * Lastly, given that the benefit of cleanup helpers is removal of
+ * "goto", and that the "goto" statement can jump between scopes, the
+ * expectation is that usage of "goto" and cleanup helpers is never
+ * mixed in the same function. I.e. for a given routine, convert all
+ * resources that need a "goto" cleanup to scope-based cleanup, or
+ * convert none of them.
+ */
+
+/*
+ * DEFINE_FREE(name, type, free):
+ * simple helper macro that defines the required wrapper for a __free()
+ * based cleanup function. @free is an expression using '_T' to access the
+ * variable. @free should typically include a NULL test before calling a
+ * function, see the example below.
+ *
+ * __free(name):
+ * variable attribute to add a scoped based cleanup to the variable.
+ *
+ * no_free_ptr(var):
+ * like a non-atomic xchg(var, NULL), such that the cleanup function will
+ * be inhibited -- provided it sanely deals with a NULL value.
+ *
+ * NOTE: this has __must_check semantics so that it is harder to accidentally
+ * leak the resource.
+ *
+ * return_ptr(p):
+ * returns p while inhibiting the __free().
+ *
+ * Ex.
+ *
+ * DEFINE_FREE(kfree, void *, if (_T) kfree(_T))
+ *
+ * void *alloc_obj(...)
+ * {
+ * struct obj *p __free(kfree) = kmalloc(...);
+ * if (!p)
+ * return NULL;
+ *
+ * if (!init_obj(p))
+ * return NULL;
+ *
+ * return_ptr(p);
+ * }
+ *
+ * NOTE: the DEFINE_FREE()'s @free expression includes a NULL test even though
+ * kfree() is fine to be called with a NULL value. This is on purpose. This way
+ * the compiler sees the end of our alloc_obj() function as:
+ *
+ * tmp = p;
+ * p = NULL;
+ * if (p)
+ * kfree(p);
+ * return tmp;
+ *
+ * And through the magic of value-propagation and dead-code-elimination, it
+ * eliminates the actual cleanup call and compiles into:
+ *
+ * return p;
+ *
+ * Without the NULL test it turns into a mess and the compiler can't help us.
+ */
+
+#define DEFINE_FREE(_name, _type, _free) \
+ static __always_inline void __free_##_name(void *p) { _type _T = *(_type *)p; _free; }
+
+#define __free(_name) __cleanup(__free_##_name)
+
+#define __get_and_null(p, nullvalue) \
+ ({ \
+ auto __ptr = &(p); \
+ auto __val = *__ptr; \
+ *__ptr = nullvalue; \
+ __val; \
+ })
+
+static __always_inline __must_check
+const volatile void * __must_check_fn(const volatile void *val)
+{ return val; }
+
+#define no_free_ptr(p) \
+ ((typeof(p)) __must_check_fn((__force const volatile void *)__get_and_null(p, NULL)))
+
+#define return_ptr(p) return no_free_ptr(p)
+
+/*
+ * Only for situations where an allocation is handed in to another function
+ * and consumed by that function on success.
+ *
+ * struct foo *f __free(kfree) = kzalloc(sizeof(*f), GFP_KERNEL);
+ *
+ * setup(f);
+ * if (some_condition)
+ * return -EINVAL;
+ * ....
+ * ret = bar(f);
+ * if (!ret)
+ * retain_and_null_ptr(f);
+ * return ret;
+ *
+ * After retain_and_null_ptr(f) the variable f is NULL and cannot be
+ * dereferenced anymore.
+ */
+#define retain_and_null_ptr(p) ((void)__get_and_null(p, NULL))
+
+/*
+ * DEFINE_CLASS(name, type, exit, init, init_args...):
+ * helper to define the destructor and constructor for a type.
+ * @exit is an expression using '_T' -- similar to FREE above.
+ * @init is an expression in @init_args resulting in @type
+ *
+ * EXTEND_CLASS(name, ext, init, init_args...):
+ * extends class @name to @name@ext with the new constructor
+ *
+ * CLASS(name, var)(args...):
+ * declare the variable @var as an instance of the named class
+ *
+ * CLASS_INIT(name, var, init_expr):
+ * declare the variable @var as an instance of the named class with
+ * custom initialization expression.
+ *
+ * Ex.
+ *
+ * DEFINE_CLASS(fdget, struct fd, fdput(_T), fdget(fd), int fd)
+ *
+ * CLASS(fdget, f)(fd);
+ * if (fd_empty(f))
+ * return -EBADF;
+ *
+ * // use 'f' without concern
+ */
+
+#define DEFINE_CLASS(_name, _type, _exit, _init, _init_args...) \
+typedef _type class_##_name##_t; \
+static __always_inline void class_##_name##_destructor(_type *p) \
+{ _type _T = *p; _exit; } \
+static __always_inline _type class_##_name##_constructor(_init_args) \
+{ _type t = _init; return t; }
+
+#define EXTEND_CLASS(_name, ext, _init, _init_args...) \
+typedef class_##_name##_t class_##_name##ext##_t; \
+static __always_inline void class_##_name##ext##_destructor(class_##_name##_t *p) \
+{ class_##_name##_destructor(p); } \
+static __always_inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
+{ class_##_name##_t t = _init; return t; }
+
+#define CLASS(_name, var) \
+ class_##_name##_t var __cleanup(class_##_name##_destructor) = \
+ class_##_name##_constructor
+
+#define CLASS_INIT(_name, _var, _init_expr) \
+ class_##_name##_t _var __cleanup(class_##_name##_destructor) = (_init_expr)
+
+#define __scoped_class(_name, var, _label, args...) \
+ for (CLASS(_name, var)(args); ; ({ goto _label; })) \
+ if (0) { \
+_label: \
+ break; \
+ } else
+
+#define scoped_class(_name, var, args...) \
+ __scoped_class(_name, var, __UNIQUE_ID(label), args)
+
+/*
+ * DEFINE_GUARD(name, type, lock, unlock):
+ * trivial wrapper around DEFINE_CLASS() above specifically
+ * for locks.
+ *
+ * DEFINE_GUARD_COND(name, ext, condlock)
+ * wrapper around EXTEND_CLASS above to add conditional lock
+ * variants to a base class, eg. mutex_trylock() or
+ * mutex_lock_interruptible().
+ *
+ * guard(name):
+ * an anonymous instance of the (guard) class, not recommended for
+ * conditional locks.
+ *
+ * scoped_guard (name, args...) { }:
+ * similar to CLASS(name, scope)(args), except the variable (with the
+ * explicit name 'scope') is declard in a for-loop such that its scope is
+ * bound to the next (compound) statement.
+ *
+ * for conditional locks the loop body is skipped when the lock is not
+ * acquired.
+ *
+ * scoped_cond_guard (name, fail, args...) { }:
+ * similar to scoped_guard(), except it does fail when the lock
+ * acquire fails.
+ *
+ * Only for conditional locks.
+ *
+ * ACQUIRE(name, var):
+ * a named instance of the (guard) class, suitable for conditional
+ * locks when paired with ACQUIRE_ERR().
+ *
+ * ACQUIRE_ERR(name, &var):
+ * a helper that is effectively a PTR_ERR() conversion of the guard
+ * pointer. Returns 0 when the lock was acquired and a negative
+ * error code otherwise.
+ */
+
+#define __DEFINE_CLASS_IS_CONDITIONAL(_name, _is_cond) \
+static __maybe_unused const bool class_##_name##_is_conditional = _is_cond
+
+#define DEFINE_CLASS_IS_UNCONDITIONAL(_name) \
+ __DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
+ static inline void * class_##_name##_lock_ptr(class_##_name##_t *_T) \
+ { return (void *)1; }
+
+#define __GUARD_IS_ERR(_ptr) \
+ ({ \
+ unsigned long _rc = (__force unsigned long)(_ptr); \
+ unlikely((_rc - 1) >= -MAX_ERRNO - 1); \
+ })
+
+#define __DEFINE_GUARD_LOCK_PTR(_name, _exp) \
+ static __always_inline void *class_##_name##_lock_ptr(class_##_name##_t *_T) \
+ { \
+ void *_ptr = (void *)(__force unsigned long)*(_exp); \
+ if (IS_ERR(_ptr)) { \
+ _ptr = NULL; \
+ } \
+ return _ptr; \
+ } \
+ static __always_inline int class_##_name##_lock_err(class_##_name##_t *_T) \
+ { \
+ long _rc = (__force unsigned long)*(_exp); \
+ if (!_rc) { \
+ _rc = -EBUSY; \
+ } \
+ if (!IS_ERR_VALUE(_rc)) { \
+ _rc = 0; \
+ } \
+ return _rc; \
+ }
+
+#define DEFINE_CLASS_IS_GUARD(_name) \
+ __DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
+ __DEFINE_GUARD_LOCK_PTR(_name, _T)
+
+#define DEFINE_CLASS_IS_COND_GUARD(_name) \
+ __DEFINE_CLASS_IS_CONDITIONAL(_name, true); \
+ __DEFINE_GUARD_LOCK_PTR(_name, _T)
+
+#define DEFINE_GUARD(_name, _type, _lock, _unlock) \
+ DEFINE_CLASS(_name, _type, if (!__GUARD_IS_ERR(_T)) { _unlock; }, ({ _lock; _T; }), _type _T); \
+ DEFINE_CLASS_IS_GUARD(_name)
+
+#define DEFINE_GUARD_COND_4(_name, _ext, _lock, _cond) \
+ __DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \
+ EXTEND_CLASS(_name, _ext, \
+ ({ void *_t = _T; int _RET = (_lock); if (_T && !(_cond)) _t = ERR_PTR(_RET); _t; }), \
+ class_##_name##_t _T) \
+ static __always_inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
+ { return class_##_name##_lock_ptr(_T); } \
+ static __always_inline int class_##_name##_ext##_lock_err(class_##_name##_t *_T) \
+ { return class_##_name##_lock_err(_T); }
+
+/*
+ * Default binary condition; success on 'true'.
+ */
+#define DEFINE_GUARD_COND_3(_name, _ext, _lock) \
+ DEFINE_GUARD_COND_4(_name, _ext, _lock, _RET)
+
+#define DEFINE_GUARD_COND(X...) CONCATENATE(DEFINE_GUARD_COND_, COUNT_ARGS(X))(X)
+
+#define guard(_name) \
+ CLASS(_name, __UNIQUE_ID(guard))
+
+#define __guard_ptr(_name) class_##_name##_lock_ptr
+#define __guard_err(_name) class_##_name##_lock_err
+#define __is_cond_ptr(_name) class_##_name##_is_conditional
+
+#define ACQUIRE(_name, _var) CLASS(_name, _var)
+#define ACQUIRE_ERR(_name, _var) __guard_err(_name)(_var)
+
+/*
+ * Helper macro for scoped_guard().
+ *
+ * Note that the "!__is_cond_ptr(_name)" part of the condition ensures that
+ * compiler would be sure that for the unconditional locks the body of the
+ * loop (caller-provided code glued to the else clause) could not be skipped.
+ * It is needed because the other part - "__guard_ptr(_name)(&scope)" - is too
+ * hard to deduce (even if could be proven true for unconditional locks).
+ */
+#define __scoped_guard(_name, _label, args...) \
+ for (CLASS(_name, scope)(args); \
+ __guard_ptr(_name)(&scope) || !__is_cond_ptr(_name); \
+ ({ goto _label; })) \
+ if (0) { \
+_label: \
+ break; \
+ } else
+
+#define scoped_guard(_name, args...) \
+ __scoped_guard(_name, __UNIQUE_ID(label), args)
+
+#define __scoped_cond_guard(_name, _fail, _label, args...) \
+ for (CLASS(_name, scope)(args); true; ({ goto _label; })) \
+ if (!__guard_ptr(_name)(&scope)) { \
+ BUILD_BUG_ON(!__is_cond_ptr(_name)); \
+ _fail; \
+_label: \
+ break; \
+ } else
+
+#define scoped_cond_guard(_name, _fail, args...) \
+ __scoped_cond_guard(_name, _fail, __UNIQUE_ID(label), args)
+
+/*
+ * Additional helper macros for generating lock guards with types, either for
+ * locks that don't have a native type (eg. RCU, preempt) or those that need a
+ * 'fat' pointer (eg. spin_lock_irqsave).
+ *
+ * DEFINE_LOCK_GUARD_0(name, lock, unlock, ...)
+ * DEFINE_LOCK_GUARD_1(name, type, lock, unlock, ...)
+ * DEFINE_LOCK_GUARD_1_COND(name, ext, condlock)
+ *
+ * will result in the following type:
+ *
+ * typedef struct {
+ * type *lock; // 'type := void' for the _0 variant
+ * __VA_ARGS__;
+ * } class_##name##_t;
+ *
+ * As above, both _lock and _unlock are statements, except this time '_T' will
+ * be a pointer to the above struct.
+ */
+
+#define __DEFINE_UNLOCK_GUARD(_name, _type, _unlock, ...) \
+typedef struct { \
+ _type *lock; \
+ __VA_ARGS__; \
+} class_##_name##_t; \
+ \
+static __always_inline void class_##_name##_destructor(class_##_name##_t *_T) \
+{ \
+ if (!__GUARD_IS_ERR(_T->lock)) { _unlock; } \
+} \
+ \
+__DEFINE_GUARD_LOCK_PTR(_name, &_T->lock)
+
+#define __DEFINE_LOCK_GUARD_1(_name, _type, _lock) \
+static __always_inline class_##_name##_t class_##_name##_constructor(_type *l) \
+{ \
+ class_##_name##_t _t = { .lock = l }, *_T = &_t; \
+ _lock; \
+ return _t; \
+}
+
+#define __DEFINE_LOCK_GUARD_0(_name, _lock) \
+static __always_inline class_##_name##_t class_##_name##_constructor(void) \
+{ \
+ class_##_name##_t _t = { .lock = (void*)1 }, \
+ *_T __maybe_unused = &_t; \
+ _lock; \
+ return _t; \
+}
+
+#define DEFINE_LOCK_GUARD_1(_name, _type, _lock, _unlock, ...) \
+__DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
+__DEFINE_UNLOCK_GUARD(_name, _type, _unlock, __VA_ARGS__) \
+__DEFINE_LOCK_GUARD_1(_name, _type, _lock)
+
+#define DEFINE_LOCK_GUARD_0(_name, _lock, _unlock, ...) \
+__DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
+__DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__) \
+__DEFINE_LOCK_GUARD_0(_name, _lock)
+
+#define DEFINE_LOCK_GUARD_1_COND_4(_name, _ext, _lock, _cond) \
+ __DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \
+ EXTEND_CLASS(_name, _ext, \
+ ({ class_##_name##_t _t = { .lock = l }, *_T = &_t;\
+ int _RET = (_lock); \
+ if (_T->lock && !(_cond)) _T->lock = ERR_PTR(_RET);\
+ _t; }), \
+ typeof_member(class_##_name##_t, lock) l) \
+ static __always_inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
+ { return class_##_name##_lock_ptr(_T); } \
+ static __always_inline int class_##_name##_ext##_lock_err(class_##_name##_t *_T) \
+ { return class_##_name##_lock_err(_T); }
+
+#define DEFINE_LOCK_GUARD_1_COND_3(_name, _ext, _lock) \
+ DEFINE_LOCK_GUARD_1_COND_4(_name, _ext, _lock, _RET)
+
+#define DEFINE_LOCK_GUARD_1_COND(X...) CONCATENATE(DEFINE_LOCK_GUARD_1_COND_, COUNT_ARGS(X))(X)
+
+#endif /* _LINUX_CLEANUP_H */
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 162a2e5546a3..630705a47129 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -42,6 +42,9 @@ struct dentry;
* struct clk_rate_request - Structure encoding the clk constraints that
* a clock user might require.
*
+ * Should be initialized by calling clk_hw_init_rate_request().
+ *
+ * @core: Pointer to the struct clk_core affected by this request
* @rate: Requested clock rate. This field will be adjusted by
* clock drivers according to hardware capabilities.
* @min_rate: Minimum rate imposed by clk users.
@@ -53,6 +56,7 @@ struct dentry;
*
*/
struct clk_rate_request {
+ struct clk_core *core;
unsigned long rate;
unsigned long min_rate;
unsigned long max_rate;
@@ -60,8 +64,17 @@ struct clk_rate_request {
struct clk_hw *best_parent_hw;
};
+void clk_hw_init_rate_request(const struct clk_hw *hw,
+ struct clk_rate_request *req,
+ unsigned long rate);
+void clk_hw_forward_rate_request(const struct clk_hw *core,
+ const struct clk_rate_request *old_req,
+ const struct clk_hw *parent,
+ struct clk_rate_request *req,
+ unsigned long parent_rate);
+
/**
- * struct clk_duty - Struture encoding the duty cycle ratio of a clock
+ * struct clk_duty - Structure encoding the duty cycle ratio of a clock
*
* @num: Numerator of the duty cycle ratio
* @den: Denominator of the duty cycle ratio
@@ -116,10 +129,11 @@ struct clk_duty {
* @restore_context: Restore the context of the clock after a restoration
* of power.
*
- * @recalc_rate Recalculate the rate of this clock, by querying hardware. The
+ * @recalc_rate: Recalculate the rate of this clock, by querying hardware. The
* parent rate is an input parameter. It is up to the caller to
- * ensure that the prepare_mutex is held across this call.
- * Returns the calculated rate. Optional, but recommended - if
+ * ensure that the prepare_mutex is held across this call. If the
+ * driver cannot figure out a rate for this clock, it must return
+ * 0. Returns the calculated rate. Optional, but recommended - if
* this op is not set then clock rate will be initialized to 0.
*
* @round_rate: Given a target rate as input, returns the closest rate actually
@@ -342,7 +356,7 @@ struct clk_fixed_rate {
unsigned long flags;
};
-#define CLK_FIXED_RATE_PARENT_ACCURACY BIT(0)
+#define CLK_FIXED_RATE_PARENT_ACCURACY BIT(0)
extern const struct clk_ops clk_fixed_rate_ops;
struct clk_hw *__clk_hw_register_fixed_rate(struct device *dev,
@@ -350,7 +364,7 @@ struct clk_hw *__clk_hw_register_fixed_rate(struct device *dev,
const char *parent_name, const struct clk_hw *parent_hw,
const struct clk_parent_data *parent_data, unsigned long flags,
unsigned long fixed_rate, unsigned long fixed_accuracy,
- unsigned long clk_fixed_flags);
+ unsigned long clk_fixed_flags, bool devm);
struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
unsigned long fixed_rate);
@@ -365,7 +379,34 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
*/
#define clk_hw_register_fixed_rate(dev, name, parent_name, flags, fixed_rate) \
__clk_hw_register_fixed_rate((dev), NULL, (name), (parent_name), NULL, \
- NULL, (flags), (fixed_rate), 0, 0)
+ NULL, (flags), (fixed_rate), 0, 0, false)
+
+/**
+ * devm_clk_hw_register_fixed_rate - register fixed-rate clock with the clock
+ * framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+#define devm_clk_hw_register_fixed_rate(dev, name, parent_name, flags, fixed_rate) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), (parent_name), NULL, \
+ NULL, (flags), (fixed_rate), 0, 0, true)
+/**
+ * devm_clk_hw_register_fixed_rate_parent_data - register fixed-rate clock with
+ * the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_data: parent clk data
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+#define devm_clk_hw_register_fixed_rate_parent_data(dev, name, parent_data, flags, \
+ fixed_rate) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL, \
+ (parent_data), (flags), (fixed_rate), 0, \
+ 0, true)
/**
* clk_hw_register_fixed_rate_parent_hw - register fixed-rate clock with
* the clock framework
@@ -378,7 +419,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
#define clk_hw_register_fixed_rate_parent_hw(dev, name, parent_hw, flags, \
fixed_rate) \
__clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw), \
- NULL, (flags), (fixed_rate), 0, 0)
+ NULL, (flags), (fixed_rate), 0, 0, false)
/**
* clk_hw_register_fixed_rate_parent_data - register fixed-rate clock with
* the clock framework
@@ -388,11 +429,11 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
* @flags: framework-specific flags
* @fixed_rate: non-adjustable clock rate
*/
-#define clk_hw_register_fixed_rate_parent_data(dev, name, parent_hw, flags, \
+#define clk_hw_register_fixed_rate_parent_data(dev, name, parent_data, flags, \
fixed_rate) \
__clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL, \
(parent_data), (flags), (fixed_rate), 0, \
- 0)
+ 0, false)
/**
* clk_hw_register_fixed_rate_with_accuracy - register fixed-rate clock with
* the clock framework
@@ -408,7 +449,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
fixed_accuracy) \
__clk_hw_register_fixed_rate((dev), NULL, (name), (parent_name), \
NULL, NULL, (flags), (fixed_rate), \
- (fixed_accuracy), 0)
+ (fixed_accuracy), 0, false)
/**
* clk_hw_register_fixed_rate_with_accuracy_parent_hw - register fixed-rate
* clock with the clock framework
@@ -421,15 +462,15 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
*/
#define clk_hw_register_fixed_rate_with_accuracy_parent_hw(dev, name, \
parent_hw, flags, fixed_rate, fixed_accuracy) \
- __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw) \
- NULL, NULL, (flags), (fixed_rate), \
- (fixed_accuracy), 0)
+ __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw), \
+ NULL, (flags), (fixed_rate), \
+ (fixed_accuracy), 0, false)
/**
* clk_hw_register_fixed_rate_with_accuracy_parent_data - register fixed-rate
* clock with the clock framework
* @dev: device that is registering this clock
* @name: name of this clock
- * @parent_name: name of clock's parent
+ * @parent_data: name of clock's parent
* @flags: framework-specific flags
* @fixed_rate: non-adjustable clock rate
* @fixed_accuracy: non-adjustable clock accuracy
@@ -438,7 +479,21 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
parent_data, flags, fixed_rate, fixed_accuracy) \
__clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL, \
(parent_data), NULL, (flags), \
- (fixed_rate), (fixed_accuracy), 0)
+ (fixed_rate), (fixed_accuracy), 0, false)
+/**
+ * clk_hw_register_fixed_rate_parent_accuracy - register fixed-rate clock with
+ * the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_data: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+#define clk_hw_register_fixed_rate_parent_accuracy(dev, name, parent_data, \
+ flags, fixed_rate) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL, \
+ (parent_data), (flags), (fixed_rate), 0, \
+ CLK_FIXED_RATE_PARENT_ACCURACY, false)
void clk_unregister_fixed_rate(struct clk *clk);
void clk_hw_unregister_fixed_rate(struct clk_hw *hw);
@@ -490,6 +545,13 @@ struct clk_hw *__clk_hw_register_gate(struct device *dev,
unsigned long flags,
void __iomem *reg, u8 bit_idx,
u8 clk_gate_flags, spinlock_t *lock);
+struct clk_hw *__devm_clk_hw_register_gate(struct device *dev,
+ struct device_node *np, const char *name,
+ const char *parent_name, const struct clk_hw *parent_hw,
+ const struct clk_parent_data *parent_data,
+ unsigned long flags,
+ void __iomem *reg, u8 bit_idx,
+ u8 clk_gate_flags, spinlock_t *lock);
struct clk *clk_register_gate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 bit_idx,
@@ -544,6 +606,59 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
__clk_hw_register_gate((dev), NULL, (name), NULL, NULL, (parent_data), \
(flags), (reg), (bit_idx), \
(clk_gate_flags), (lock))
+/**
+ * devm_clk_hw_register_gate - register a gate clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of this clock's parent
+ * @flags: framework-specific flags for this clock
+ * @reg: register address to control gating of this clock
+ * @bit_idx: which bit in the register controls gating of this clock
+ * @clk_gate_flags: gate-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define devm_clk_hw_register_gate(dev, name, parent_name, flags, reg, bit_idx,\
+ clk_gate_flags, lock) \
+ __devm_clk_hw_register_gate((dev), NULL, (name), (parent_name), NULL, \
+ NULL, (flags), (reg), (bit_idx), \
+ (clk_gate_flags), (lock))
+/**
+ * devm_clk_hw_register_gate_parent_hw - register a gate clock with the clock
+ * framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: framework-specific flags for this clock
+ * @reg: register address to control gating of this clock
+ * @bit_idx: which bit in the register controls gating of this clock
+ * @clk_gate_flags: gate-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define devm_clk_hw_register_gate_parent_hw(dev, name, parent_hw, flags, \
+ reg, bit_idx, clk_gate_flags, \
+ lock) \
+ __devm_clk_hw_register_gate((dev), NULL, (name), NULL, (parent_hw), \
+ NULL, (flags), (reg), (bit_idx), \
+ (clk_gate_flags), (lock))
+/**
+ * devm_clk_hw_register_gate_parent_data - register a gate clock with the
+ * clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_data: parent clk data
+ * @flags: framework-specific flags for this clock
+ * @reg: register address to control gating of this clock
+ * @bit_idx: which bit in the register controls gating of this clock
+ * @clk_gate_flags: gate-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define devm_clk_hw_register_gate_parent_data(dev, name, parent_data, flags, \
+ reg, bit_idx, clk_gate_flags, \
+ lock) \
+ __devm_clk_hw_register_gate((dev), NULL, (name), NULL, NULL, \
+ (parent_data), (flags), (reg), (bit_idx), \
+ (clk_gate_flags), (lock))
+
void clk_unregister_gate(struct clk *clk);
void clk_hw_unregister_gate(struct clk_hw *hw);
int clk_gate_is_enabled(struct clk_hw *hw);
@@ -566,7 +681,7 @@ struct clk_div_table {
* Clock with an adjustable divider affecting its output frequency. Implements
* .recalc_rate, .set_rate and .round_rate
*
- * Flags:
+ * @flags:
* CLK_DIVIDER_ONE_BASED - by default the divisor is the value read from the
* register plus one. If CLK_DIVIDER_ONE_BASED is set then the divider is
* the raw value read from the register, with the value of zero considered
@@ -592,13 +707,15 @@ struct clk_div_table {
* CLK_DIVIDER_BIG_ENDIAN - By default little endian register accesses are used
* for the divider register. Setting this flag makes the register accesses
* big endian.
+ * CLK_DIVIDER_EVEN_INTEGERS - clock divisor is 2, 4, 6, 8, 10, etc.
+ * Formula is 2 * (value read from hardware + 1).
*/
struct clk_divider {
struct clk_hw hw;
void __iomem *reg;
u8 shift;
u8 width;
- u8 flags;
+ u16 flags;
const struct clk_div_table *table;
spinlock_t *lock;
};
@@ -614,6 +731,7 @@ struct clk_divider {
#define CLK_DIVIDER_READ_ONLY BIT(5)
#define CLK_DIVIDER_MAX_AT_ZERO BIT(6)
#define CLK_DIVIDER_BIG_ENDIAN BIT(7)
+#define CLK_DIVIDER_EVEN_INTEGERS BIT(8)
extern const struct clk_ops clk_divider_ops;
extern const struct clk_ops clk_divider_ro_ops;
@@ -629,6 +747,12 @@ long divider_ro_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
unsigned long rate, unsigned long *prate,
const struct clk_div_table *table, u8 width,
unsigned long flags, unsigned int val);
+int divider_determine_rate(struct clk_hw *hw, struct clk_rate_request *req,
+ const struct clk_div_table *table, u8 width,
+ unsigned long flags);
+int divider_ro_determine_rate(struct clk_hw *hw, struct clk_rate_request *req,
+ const struct clk_div_table *table, u8 width,
+ unsigned long flags, unsigned int val);
int divider_get_val(unsigned long rate, unsigned long parent_rate,
const struct clk_div_table *table, u8 width,
unsigned long flags);
@@ -637,19 +761,21 @@ struct clk_hw *__clk_hw_register_divider(struct device *dev,
struct device_node *np, const char *name,
const char *parent_name, const struct clk_hw *parent_hw,
const struct clk_parent_data *parent_data, unsigned long flags,
- void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags,
+ void __iomem *reg, u8 shift, u8 width,
+ unsigned long clk_divider_flags,
const struct clk_div_table *table, spinlock_t *lock);
struct clk_hw *__devm_clk_hw_register_divider(struct device *dev,
struct device_node *np, const char *name,
const char *parent_name, const struct clk_hw *parent_hw,
const struct clk_parent_data *parent_data, unsigned long flags,
- void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags,
+ void __iomem *reg, u8 shift, u8 width,
+ unsigned long clk_divider_flags,
const struct clk_div_table *table, spinlock_t *lock);
struct clk *clk_register_divider_table(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
- u8 clk_divider_flags, const struct clk_div_table *table,
- spinlock_t *lock);
+ unsigned long clk_divider_flags,
+ const struct clk_div_table *table, spinlock_t *lock);
/**
* clk_register_divider - register a divider clock with the clock framework
* @dev: device registering this clock
@@ -803,6 +929,25 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name,
NULL, (flags), (reg), (shift), (width), \
(clk_divider_flags), NULL, (lock))
/**
+ * devm_clk_hw_register_divider_parent_hw - register a divider clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define devm_clk_hw_register_divider_parent_hw(dev, name, parent_hw, flags, \
+ reg, shift, width, \
+ clk_divider_flags, lock) \
+ __devm_clk_hw_register_divider((dev), NULL, (name), NULL, \
+ (parent_hw), NULL, (flags), (reg), \
+ (shift), (width), (clk_divider_flags), \
+ NULL, (lock))
+/**
* devm_clk_hw_register_divider_table - register a table based divider clock
* with the clock framework (devres variant)
* @dev: device registering this clock
@@ -859,7 +1004,7 @@ void clk_hw_unregister_divider(struct clk_hw *hw);
struct clk_mux {
struct clk_hw hw;
void __iomem *reg;
- u32 *table;
+ const u32 *table;
u32 mask;
u8 shift;
u8 flags;
@@ -884,18 +1029,18 @@ struct clk_hw *__clk_hw_register_mux(struct device *dev, struct device_node *np,
const struct clk_hw **parent_hws,
const struct clk_parent_data *parent_data,
unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
- u8 clk_mux_flags, u32 *table, spinlock_t *lock);
+ u8 clk_mux_flags, const u32 *table, spinlock_t *lock);
struct clk_hw *__devm_clk_hw_register_mux(struct device *dev, struct device_node *np,
const char *name, u8 num_parents,
const char * const *parent_names,
const struct clk_hw **parent_hws,
const struct clk_parent_data *parent_data,
unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
- u8 clk_mux_flags, u32 *table, spinlock_t *lock);
+ u8 clk_mux_flags, const u32 *table, spinlock_t *lock);
struct clk *clk_register_mux_table(struct device *dev, const char *name,
const char * const *parent_names, u8 num_parents,
unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
- u8 clk_mux_flags, u32 *table, spinlock_t *lock);
+ u8 clk_mux_flags, const u32 *table, spinlock_t *lock);
#define clk_register_mux(dev, name, parent_names, num_parents, flags, reg, \
shift, width, clk_mux_flags, lock) \
@@ -909,6 +1054,13 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name,
(parent_names), NULL, NULL, (flags), (reg), \
(shift), (mask), (clk_mux_flags), (table), \
(lock))
+#define clk_hw_register_mux_table_parent_data(dev, name, parent_data, \
+ num_parents, flags, reg, shift, mask, \
+ clk_mux_flags, table, lock) \
+ __clk_hw_register_mux((dev), NULL, (name), (num_parents), \
+ NULL, NULL, (parent_data), (flags), (reg), \
+ (shift), (mask), (clk_mux_flags), (table), \
+ (lock))
#define clk_hw_register_mux(dev, name, parent_names, num_parents, flags, reg, \
shift, width, clk_mux_flags, lock) \
__clk_hw_register_mux((dev), NULL, (name), (num_parents), \
@@ -926,16 +1078,37 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name,
__clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, NULL, \
(parent_data), (flags), (reg), (shift), \
BIT((width)) - 1, (clk_mux_flags), NULL, (lock))
+#define clk_hw_register_mux_parent_data_table(dev, name, parent_data, \
+ num_parents, flags, reg, shift, \
+ width, clk_mux_flags, table, \
+ lock) \
+ __clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, NULL, \
+ (parent_data), (flags), (reg), (shift), \
+ BIT((width)) - 1, (clk_mux_flags), table, (lock))
#define devm_clk_hw_register_mux(dev, name, parent_names, num_parents, flags, reg, \
shift, width, clk_mux_flags, lock) \
__devm_clk_hw_register_mux((dev), NULL, (name), (num_parents), \
(parent_names), NULL, NULL, (flags), (reg), \
(shift), BIT((width)) - 1, (clk_mux_flags), \
NULL, (lock))
-
-int clk_mux_val_to_index(struct clk_hw *hw, u32 *table, unsigned int flags,
+#define devm_clk_hw_register_mux_parent_hws(dev, name, parent_hws, \
+ num_parents, flags, reg, shift, \
+ width, clk_mux_flags, lock) \
+ __devm_clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, \
+ (parent_hws), NULL, (flags), (reg), \
+ (shift), BIT((width)) - 1, \
+ (clk_mux_flags), NULL, (lock))
+#define devm_clk_hw_register_mux_parent_data_table(dev, name, parent_data, \
+ num_parents, flags, reg, shift, \
+ width, clk_mux_flags, table, \
+ lock) \
+ __devm_clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, \
+ NULL, (parent_data), (flags), (reg), (shift), \
+ BIT((width)) - 1, (clk_mux_flags), table, (lock))
+
+int clk_mux_val_to_index(struct clk_hw *hw, const u32 *table, unsigned int flags,
unsigned int val);
-unsigned int clk_mux_index_to_val(u32 *table, unsigned int flags, u8 index);
+unsigned int clk_mux_index_to_val(const u32 *table, unsigned int flags, u8 index);
void clk_unregister_mux(struct clk *clk);
void clk_hw_unregister_mux(struct clk_hw *hw);
@@ -948,18 +1121,28 @@ void of_fixed_factor_clk_setup(struct device_node *node);
* @hw: handle between common and hardware-specific interfaces
* @mult: multiplier
* @div: divider
+ * @acc: fixed accuracy in ppb
+ * @flags: behavior modifying flags
*
* Clock with a fixed multiplier and divider. The output frequency is the
* parent clock rate divided by div and multiplied by mult.
- * Implements .recalc_rate, .set_rate and .round_rate
+ * Implements .recalc_rate, .set_rate, .round_rate and .recalc_accuracy
+ *
+ * Flags:
+ * * CLK_FIXED_FACTOR_FIXED_ACCURACY - Use the value in @acc instead of the
+ * parent clk accuracy.
*/
struct clk_fixed_factor {
struct clk_hw hw;
unsigned int mult;
unsigned int div;
+ unsigned long acc;
+ unsigned int flags;
};
+#define CLK_FIXED_FACTOR_FIXED_ACCURACY BIT(0)
+
#define to_clk_fixed_factor(_hw) container_of(_hw, struct clk_fixed_factor, hw)
extern const struct clk_ops clk_fixed_factor_ops;
@@ -970,10 +1153,38 @@ void clk_unregister_fixed_factor(struct clk *clk);
struct clk_hw *clk_hw_register_fixed_factor(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
unsigned int mult, unsigned int div);
+struct clk_hw *clk_hw_register_fixed_factor_fwname(struct device *dev,
+ struct device_node *np, const char *name, const char *fw_name,
+ unsigned long flags, unsigned int mult, unsigned int div);
+struct clk_hw *clk_hw_register_fixed_factor_with_accuracy_fwname(struct device *dev,
+ struct device_node *np, const char *name, const char *fw_name,
+ unsigned long flags, unsigned int mult, unsigned int div,
+ unsigned long acc);
+struct clk_hw *clk_hw_register_fixed_factor_index(struct device *dev,
+ const char *name, unsigned int index, unsigned long flags,
+ unsigned int mult, unsigned int div);
void clk_hw_unregister_fixed_factor(struct clk_hw *hw);
struct clk_hw *devm_clk_hw_register_fixed_factor(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
unsigned int mult, unsigned int div);
+struct clk_hw *devm_clk_hw_register_fixed_factor_fwname(struct device *dev,
+ struct device_node *np, const char *name, const char *fw_name,
+ unsigned long flags, unsigned int mult, unsigned int div);
+struct clk_hw *devm_clk_hw_register_fixed_factor_with_accuracy_fwname(struct device *dev,
+ struct device_node *np, const char *name, const char *fw_name,
+ unsigned long flags, unsigned int mult, unsigned int div,
+ unsigned long acc);
+struct clk_hw *devm_clk_hw_register_fixed_factor_index(struct device *dev,
+ const char *name, unsigned int index, unsigned long flags,
+ unsigned int mult, unsigned int div);
+
+struct clk_hw *devm_clk_hw_register_fixed_factor_parent_hw(struct device *dev,
+ const char *name, const struct clk_hw *parent_hw,
+ unsigned long flags, unsigned int mult, unsigned int div);
+
+struct clk_hw *clk_hw_register_fixed_factor_parent_hw(struct device *dev,
+ const char *name, const struct clk_hw *parent_hw,
+ unsigned long flags, unsigned int mult, unsigned int div);
/**
* struct clk_fractional_divider - adjustable fractional divider clock
*
@@ -983,11 +1194,12 @@ struct clk_hw *devm_clk_hw_register_fixed_factor(struct device *dev,
* @mwidth: width of the numerator bit field
* @nshift: shift to the denominator bit field
* @nwidth: width of the denominator bit field
+ * @approximation: clk driver's callback for calculating the divider clock
* @lock: register lock
*
* Clock with adjustable fractional divider affecting its output frequency.
*
- * Flags:
+ * @flags:
* CLK_FRAC_DIVIDER_ZERO_BASED - by default the numerator and denominator
* is the value read from the register. If CLK_FRAC_DIVIDER_ZERO_BASED
* is set then the numerator and denominator are both the value read
@@ -995,16 +1207,20 @@ struct clk_hw *devm_clk_hw_register_fixed_factor(struct device *dev,
* CLK_FRAC_DIVIDER_BIG_ENDIAN - By default little endian register accesses are
* used for the divider register. Setting this flag makes the register
* accesses big endian.
+ * CLK_FRAC_DIVIDER_POWER_OF_TWO_PS - By default the resulting fraction might
+ * be saturated and the caller will get quite far from the good enough
+ * approximation. Instead the caller may require, by setting this flag,
+ * to shift left by a few bits in case, when the asked one is quite small
+ * to satisfy the desired range of denominator. It assumes that on the
+ * caller's side the power-of-two capable prescaler exists.
*/
struct clk_fractional_divider {
struct clk_hw hw;
void __iomem *reg;
u8 mshift;
u8 mwidth;
- u32 mmask;
u8 nshift;
u8 nwidth;
- u32 nmask;
u8 flags;
void (*approximation)(struct clk_hw *hw,
unsigned long rate, unsigned long *parent_rate,
@@ -1016,8 +1232,8 @@ struct clk_fractional_divider {
#define CLK_FRAC_DIVIDER_ZERO_BASED BIT(0)
#define CLK_FRAC_DIVIDER_BIG_ENDIAN BIT(1)
+#define CLK_FRAC_DIVIDER_POWER_OF_TWO_PS BIT(2)
-extern const struct clk_ops clk_fractional_divider_ops;
struct clk *clk_register_fractional_divider(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth,
@@ -1040,7 +1256,7 @@ void clk_hw_unregister_fractional_divider(struct clk_hw *hw);
* Clock with an adjustable multiplier affecting its output frequency.
* Implements .recalc_rate, .set_rate and .round_rate
*
- * Flags:
+ * @flags:
* CLK_MULTIPLIER_ZERO_BYPASS - By default, the multiplier is the value read
* from the register, with 0 being a valid value effectively
* zeroing the output clock rate. If CLK_MULTIPLIER_ZERO_BYPASS is
@@ -1063,9 +1279,9 @@ struct clk_multiplier {
#define to_clk_multiplier(_hw) container_of(_hw, struct clk_multiplier, hw)
-#define CLK_MULTIPLIER_ZERO_BYPASS BIT(0)
+#define CLK_MULTIPLIER_ZERO_BYPASS BIT(0)
#define CLK_MULTIPLIER_ROUND_CLOSEST BIT(1)
-#define CLK_MULTIPLIER_BIG_ENDIAN BIT(2)
+#define CLK_MULTIPLIER_BIG_ENDIAN BIT(2)
extern const struct clk_ops clk_multiplier_ops;
@@ -1138,14 +1354,38 @@ int __must_check devm_clk_hw_register(struct device *dev, struct clk_hw *hw);
int __must_check of_clk_hw_register(struct device_node *node, struct clk_hw *hw);
void clk_unregister(struct clk *clk);
-void devm_clk_unregister(struct device *dev, struct clk *clk);
void clk_hw_unregister(struct clk_hw *hw);
-void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw);
/* helper functions */
const char *__clk_get_name(const struct clk *clk);
const char *clk_hw_get_name(const struct clk_hw *hw);
+
+/**
+ * clk_hw_get_dev() - get device from an hardware clock.
+ * @hw: the clk_hw pointer to get the struct device from
+ *
+ * This is a helper to get the struct device associated with a hardware
+ * clock. Some clock controllers, such as the one registered with
+ * CLK_OF_DECLARE(), may have not provided a device pointer while
+ * registering the clock.
+ *
+ * Return: the struct device associated with the clock, or NULL if there
+ * is none.
+ */
+struct device *clk_hw_get_dev(const struct clk_hw *hw);
+
+/**
+ * clk_hw_get_of_node() - get device_node from a hardware clock.
+ * @hw: the clk_hw pointer to get the struct device_node from
+ *
+ * This is a helper to get the struct device_node associated with a
+ * hardware clock.
+ *
+ * Return: the struct device_node associated with the clock, or NULL
+ * if there is none.
+ */
+struct device_node *clk_hw_get_of_node(const struct clk_hw *hw);
#ifdef CONFIG_COMMON_CLK
struct clk_hw *__clk_get_hw(struct clk *clk);
#else
@@ -1172,7 +1412,6 @@ unsigned long clk_hw_get_flags(const struct clk_hw *hw);
(clk_hw_get_flags((hw)) & CLK_SET_RATE_PARENT)
bool clk_hw_is_prepared(const struct clk_hw *hw);
-bool clk_hw_rate_is_protected(const struct clk_hw *hw);
bool clk_hw_is_enabled(const struct clk_hw *hw);
bool __clk_is_enabled(struct clk *clk);
struct clk *__clk_lookup(const char *name);
@@ -1184,7 +1423,11 @@ int __clk_mux_determine_rate_closest(struct clk_hw *hw,
int clk_mux_determine_rate_flags(struct clk_hw *hw,
struct clk_rate_request *req,
unsigned long flags);
+int clk_hw_determine_rate_no_reparent(struct clk_hw *hw,
+ struct clk_rate_request *req);
void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent);
+void clk_hw_get_rate_range(struct clk_hw *hw, unsigned long *min_rate,
+ unsigned long *max_rate);
void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
unsigned long max_rate);
@@ -1226,10 +1469,16 @@ struct clk_onecell_data {
struct clk_hw_onecell_data {
unsigned int num;
- struct clk_hw *hws[];
+ struct clk_hw *hws[] __counted_by(num);
};
-#define CLK_OF_DECLARE(name, compat, fn) OF_DECLARE_1(clk, name, compat, fn)
+#define CLK_OF_DECLARE(name, compat, fn) \
+ static void __init __##name##_of_clk_init_declare(struct device_node *np) \
+ { \
+ fn(np); \
+ fwnode_dev_initialized(of_fwnode_handle(np), true); \
+ } \
+ OF_DECLARE_1(clk, name, compat, __##name##_of_clk_init_declare)
/*
* Use this macro when you have a driver that requires two initialization
@@ -1384,7 +1633,7 @@ int devm_of_clk_add_hw_provider(struct device *dev,
void *data),
void *data);
void of_clk_del_provider(struct device_node *np);
-void devm_of_clk_del_provider(struct device *dev);
+
struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
void *data);
struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec,
@@ -1421,7 +1670,7 @@ static inline int devm_of_clk_add_hw_provider(struct device *dev,
return 0;
}
static inline void of_clk_del_provider(struct device_node *np) {}
-static inline void devm_of_clk_del_provider(struct device *dev) {}
+
static inline struct clk *of_clk_src_simple_get(
struct of_phandle_args *clkspec, void *data)
{
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 266e8de3cb51..b607482ca77e 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -183,6 +183,51 @@ int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale);
*/
bool clk_is_match(const struct clk *p, const struct clk *q);
+/**
+ * clk_rate_exclusive_get - get exclusivity over the rate control of a
+ * producer
+ * @clk: clock source
+ *
+ * This function allows drivers to get exclusive control over the rate of a
+ * provider. It prevents any other consumer to execute, even indirectly,
+ * opereation which could alter the rate of the provider or cause glitches
+ *
+ * If exlusivity is claimed more than once on clock, even by the same driver,
+ * the rate effectively gets locked as exclusivity can't be preempted.
+ *
+ * Must not be called from within atomic context.
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_rate_exclusive_get(struct clk *clk);
+
+/**
+ * devm_clk_rate_exclusive_get - devm variant of clk_rate_exclusive_get
+ * @dev: device the exclusivity is bound to
+ * @clk: clock source
+ *
+ * Calls clk_rate_exclusive_get() on @clk and registers a devm cleanup handler
+ * on @dev to call clk_rate_exclusive_put().
+ *
+ * Must not be called from within atomic context.
+ */
+int devm_clk_rate_exclusive_get(struct device *dev, struct clk *clk);
+
+/**
+ * clk_rate_exclusive_put - release exclusivity over the rate control of a
+ * producer
+ * @clk: clock source
+ *
+ * This function allows drivers to release the exclusivity it previously got
+ * from clk_rate_exclusive_get()
+ *
+ * The caller must balance the number of clk_rate_exclusive_get() and
+ * clk_rate_exclusive_put() calls.
+ *
+ * Must not be called from within atomic context.
+ */
+void clk_rate_exclusive_put(struct clk *clk);
+
#else
static inline int clk_notifier_register(struct clk *clk,
@@ -236,6 +281,18 @@ static inline bool clk_is_match(const struct clk *p, const struct clk *q)
return p == q;
}
+static inline int clk_rate_exclusive_get(struct clk *clk)
+{
+ return 0;
+}
+
+static inline int devm_clk_rate_exclusive_get(struct device *dev, struct clk *clk)
+{
+ return 0;
+}
+
+static inline void clk_rate_exclusive_put(struct clk *clk) {}
+
#endif
#ifdef CONFIG_HAVE_CLK_PREPARE
@@ -439,19 +496,38 @@ int __must_check devm_clk_bulk_get_all(struct device *dev,
struct clk_bulk_data **clks);
/**
+ * devm_clk_bulk_get_all_enabled - Get and enable all clocks of the consumer (managed)
+ * @dev: device for clock "consumer"
+ * @clks: pointer to the clk_bulk_data table of consumer
+ *
+ * Returns a positive value for the number of clocks obtained while the
+ * clock references are stored in the clk_bulk_data table in @clks field.
+ * Returns 0 if there're none and a negative value if something failed.
+ *
+ * This helper function allows drivers to get all clocks of the
+ * consumer and enables them in one operation with management.
+ * The clks will automatically be disabled and freed when the device
+ * is unbound.
+ */
+
+int __must_check devm_clk_bulk_get_all_enabled(struct device *dev,
+ struct clk_bulk_data **clks);
+
+/**
* devm_clk_get - lookup and obtain a managed reference to a clock producer.
* @dev: device for clock "consumer"
* @id: clock consumer ID
*
- * Returns a struct clk corresponding to the clock producer, or
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
* valid IS_ERR() condition containing errno. The implementation
* uses @dev and @id to determine the clock consumer, and thereby
* the clock producer. (IOW, @id may be identical strings, but
* clk_get may return different clock producers depending on @dev.)
*
- * Drivers must assume that the clock source is not enabled.
- *
- * devm_clk_get should not be called from within interrupt context.
+ * Drivers must assume that the clock source is neither prepared nor
+ * enabled.
*
* The clock will automatically be freed when the device is unbound
* from the bus.
@@ -459,64 +535,155 @@ int __must_check devm_clk_bulk_get_all(struct device *dev,
struct clk *devm_clk_get(struct device *dev, const char *id);
/**
+ * devm_clk_get_prepared - devm_clk_get() + clk_prepare()
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ *
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. (IOW, @id may be identical strings, but
+ * clk_get may return different clock producers depending on @dev.)
+ *
+ * The returned clk (if valid) is prepared. Drivers must however assume
+ * that the clock is not enabled.
+ *
+ * The clock will automatically be unprepared and freed when the device
+ * is unbound from the bus.
+ */
+struct clk *devm_clk_get_prepared(struct device *dev, const char *id);
+
+/**
+ * devm_clk_get_enabled - devm_clk_get() + clk_prepare_enable()
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ *
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. (IOW, @id may be identical strings, but
+ * clk_get may return different clock producers depending on @dev.)
+ *
+ * The returned clk (if valid) is prepared and enabled.
+ *
+ * The clock will automatically be disabled, unprepared and freed
+ * when the device is unbound from the bus.
+ */
+struct clk *devm_clk_get_enabled(struct device *dev, const char *id);
+
+/**
* devm_clk_get_optional - lookup and obtain a managed reference to an optional
* clock producer.
* @dev: device for clock "consumer"
* @id: clock consumer ID
*
- * Behaves the same as devm_clk_get() except where there is no clock producer.
- * In this case, instead of returning -ENOENT, the function returns NULL.
+ * Context: May sleep.
+ *
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. If no such clk is found, it returns NULL
+ * which serves as a dummy clk. That's the only difference compared
+ * to devm_clk_get().
+ *
+ * Drivers must assume that the clock source is neither prepared nor
+ * enabled.
+ *
+ * The clock will automatically be freed when the device is unbound
+ * from the bus.
*/
struct clk *devm_clk_get_optional(struct device *dev, const char *id);
/**
- * devm_get_clk_from_child - lookup and obtain a managed reference to a
- * clock producer from child node.
+ * devm_clk_get_optional_prepared - devm_clk_get_optional() + clk_prepare()
* @dev: device for clock "consumer"
- * @np: pointer to clock consumer node
- * @con_id: clock consumer ID
+ * @id: clock consumer ID
*
- * This function parses the clocks, and uses them to look up the
- * struct clk from the registered list of clock providers by using
- * @np and @con_id
+ * Context: May sleep.
*
- * The clock will automatically be freed when the device is unbound
- * from the bus.
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. If no such clk is found, it returns NULL
+ * which serves as a dummy clk. That's the only difference compared
+ * to devm_clk_get_prepared().
+ *
+ * The returned clk (if valid) is prepared. Drivers must however
+ * assume that the clock is not enabled.
+ *
+ * The clock will automatically be unprepared and freed when the
+ * device is unbound from the bus.
*/
-struct clk *devm_get_clk_from_child(struct device *dev,
- struct device_node *np, const char *con_id);
+struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id);
+
/**
- * clk_rate_exclusive_get - get exclusivity over the rate control of a
- * producer
- * @clk: clock source
+ * devm_clk_get_optional_enabled - devm_clk_get_optional() +
+ * clk_prepare_enable()
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
*
- * This function allows drivers to get exclusive control over the rate of a
- * provider. It prevents any other consumer to execute, even indirectly,
- * opereation which could alter the rate of the provider or cause glitches
+ * Context: May sleep.
*
- * If exlusivity is claimed more than once on clock, even by the same driver,
- * the rate effectively gets locked as exclusivity can't be preempted.
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. If no such clk is found, it returns NULL
+ * which serves as a dummy clk. That's the only difference compared
+ * to devm_clk_get_enabled().
*
- * Must not be called from within atomic context.
+ * The returned clk (if valid) is prepared and enabled.
*
- * Returns success (0) or negative errno.
+ * The clock will automatically be disabled, unprepared and freed
+ * when the device is unbound from the bus.
*/
-int clk_rate_exclusive_get(struct clk *clk);
+struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id);
/**
- * clk_rate_exclusive_put - release exclusivity over the rate control of a
- * producer
- * @clk: clock source
+ * devm_clk_get_optional_enabled_with_rate - devm_clk_get_optional() +
+ * clk_set_rate() +
+ * clk_prepare_enable()
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ * @rate: new clock rate
*
- * This function allows drivers to release the exclusivity it previously got
- * from clk_rate_exclusive_get()
+ * Context: May sleep.
*
- * The caller must balance the number of clk_rate_exclusive_get() and
- * clk_rate_exclusive_put() calls.
+ * Return: a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. If no such clk is found, it returns NULL
+ * which serves as a dummy clk. That's the only difference compared
+ * to devm_clk_get_enabled().
*
- * Must not be called from within atomic context.
+ * The returned clk (if valid) is prepared and enabled and rate was set.
+ *
+ * The clock will automatically be disabled, unprepared and freed
+ * when the device is unbound from the bus.
*/
-void clk_rate_exclusive_put(struct clk *clk);
+struct clk *devm_clk_get_optional_enabled_with_rate(struct device *dev,
+ const char *id,
+ unsigned long rate);
+
+/**
+ * devm_get_clk_from_child - lookup and obtain a managed reference to a
+ * clock producer from child node.
+ * @dev: device for clock "consumer"
+ * @np: pointer to clock consumer node
+ * @con_id: clock consumer ID
+ *
+ * This function parses the clocks, and uses them to look up the
+ * struct clk from the registered list of clock providers by using
+ * @np and @con_id
+ *
+ * The clock will automatically be freed when the device is unbound
+ * from the bus.
+ */
+struct clk *devm_get_clk_from_child(struct device *dev,
+ struct device_node *np, const char *con_id);
/**
* clk_enable - inform the system when the clock source should be running.
@@ -701,7 +868,7 @@ int clk_set_rate_exclusive(struct clk *clk, unsigned long rate);
*
* Returns true if @parent is a possible parent for @clk, false otherwise.
*/
-bool clk_has_parent(struct clk *clk, struct clk *parent);
+bool clk_has_parent(const struct clk *clk, const struct clk *parent);
/**
* clk_set_rate_range - set a rate range for a clock source
@@ -813,12 +980,43 @@ static inline struct clk *devm_clk_get(struct device *dev, const char *id)
return NULL;
}
+static inline struct clk *devm_clk_get_prepared(struct device *dev,
+ const char *id)
+{
+ return NULL;
+}
+
+static inline struct clk *devm_clk_get_enabled(struct device *dev,
+ const char *id)
+{
+ return NULL;
+}
+
static inline struct clk *devm_clk_get_optional(struct device *dev,
const char *id)
{
return NULL;
}
+static inline struct clk *devm_clk_get_optional_prepared(struct device *dev,
+ const char *id)
+{
+ return NULL;
+}
+
+static inline struct clk *devm_clk_get_optional_enabled(struct device *dev,
+ const char *id)
+{
+ return NULL;
+}
+
+static inline struct clk *
+devm_clk_get_optional_enabled_with_rate(struct device *dev, const char *id,
+ unsigned long rate)
+{
+ return NULL;
+}
+
static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
struct clk_bulk_data *clks)
{
@@ -838,6 +1036,12 @@ static inline int __must_check devm_clk_bulk_get_all(struct device *dev,
return 0;
}
+static inline int __must_check devm_clk_bulk_get_all_enabled(struct device *dev,
+ struct clk_bulk_data **clks)
+{
+ return 0;
+}
+
static inline struct clk *devm_get_clk_from_child(struct device *dev,
struct device_node *np, const char *con_id)
{
@@ -852,14 +1056,6 @@ static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {}
static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
-
-static inline int clk_rate_exclusive_get(struct clk *clk)
-{
- return 0;
-}
-
-static inline void clk_rate_exclusive_put(struct clk *clk) {}
-
static inline int clk_enable(struct clk *clk)
{
return 0;
@@ -987,6 +1183,17 @@ static inline void clk_bulk_disable_unprepare(int num_clks,
}
/**
+ * clk_drop_range - Reset any range set on that clock
+ * @clk: clock source
+ *
+ * Returns success (0) or negative errno.
+ */
+static inline int clk_drop_range(struct clk *clk)
+{
+ return clk_set_rate_range(clk, 0, ULONG_MAX);
+}
+
+/**
* clk_get_optional - lookup and obtain a reference to an optional clock
* producer.
* @dev: device for clock "consumer"
diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h
index a4f82e836a7c..d60ce9708ea2 100644
--- a/include/linux/clk/at91_pmc.h
+++ b/include/linux/clk/at91_pmc.h
@@ -12,6 +12,8 @@
#ifndef AT91_PMC_H
#define AT91_PMC_H
+#include <linux/bits.h>
+
#define AT91_PMC_V1 (1) /* PMC version 1 */
#define AT91_PMC_V2 (2) /* PMC version 2 [SAM9X60] */
@@ -45,8 +47,6 @@
#define AT91_PMC_PCSR 0x18 /* Peripheral Clock Status Register */
#define AT91_PMC_PLL_ACR 0x18 /* PLL Analog Control Register [for SAM9X60] */
-#define AT91_PMC_PLL_ACR_DEFAULT_UPLL 0x12020010UL /* Default PLL ACR value for UPLL */
-#define AT91_PMC_PLL_ACR_DEFAULT_PLLA 0x00020010UL /* Default PLL ACR value for PLLA */
#define AT91_PMC_PLL_ACR_UTMIVR (1 << 12) /* UPLL Voltage regulator Control */
#define AT91_PMC_PLL_ACR_UTMIBG (1 << 13) /* UPLL Bandgap Control */
@@ -78,6 +78,10 @@
#define AT91_PMC_MAINRDY (1 << 16) /* Main Clock Ready */
#define AT91_CKGR_PLLAR 0x28 /* PLL A Register */
+
+#define AT91_PMC_RATIO 0x2c /* Processor clock ratio register [SAMA7G5 only] */
+#define AT91_PMC_RATIO_RATIO (0xf) /* CPU clock ratio. */
+
#define AT91_CKGR_PLLBR 0x2c /* PLL B Register */
#define AT91_PMC_DIV (0xff << 0) /* Divider */
#define AT91_PMC_PLLCOUNT (0x3f << 8) /* PLL Counter */
@@ -137,6 +141,32 @@
#define AT91_PMC_PLLADIV2_ON (1 << 12)
#define AT91_PMC_H32MXDIV BIT(24)
+#define AT91_PMC_MCR_V2 0x30 /* Master Clock Register [SAMA7G5 only] */
+#define AT91_PMC_MCR_V2_ID_MSK (0xF)
+#define AT91_PMC_MCR_V2_ID(_id) ((_id) & AT91_PMC_MCR_V2_ID_MSK)
+#define AT91_PMC_MCR_V2_CMD (1 << 7)
+#define AT91_PMC_MCR_V2_DIV (7 << 8)
+#define AT91_PMC_MCR_V2_DIV1 (0 << 8)
+#define AT91_PMC_MCR_V2_DIV2 (1 << 8)
+#define AT91_PMC_MCR_V2_DIV4 (2 << 8)
+#define AT91_PMC_MCR_V2_DIV8 (3 << 8)
+#define AT91_PMC_MCR_V2_DIV16 (4 << 8)
+#define AT91_PMC_MCR_V2_DIV32 (5 << 8)
+#define AT91_PMC_MCR_V2_DIV64 (6 << 8)
+#define AT91_PMC_MCR_V2_DIV3 (7 << 8)
+#define AT91_PMC_MCR_V2_CSS (0x1F << 16)
+#define AT91_PMC_MCR_V2_CSS_MD_SLCK (0 << 16)
+#define AT91_PMC_MCR_V2_CSS_TD_SLCK (1 << 16)
+#define AT91_PMC_MCR_V2_CSS_MAINCK (2 << 16)
+#define AT91_PMC_MCR_V2_CSS_MCK0 (3 << 16)
+#define AT91_PMC_MCR_V2_CSS_SYSPLL (5 << 16)
+#define AT91_PMC_MCR_V2_CSS_DDRPLL (6 << 16)
+#define AT91_PMC_MCR_V2_CSS_IMGPLL (7 << 16)
+#define AT91_PMC_MCR_V2_CSS_BAUDPLL (8 << 16)
+#define AT91_PMC_MCR_V2_CSS_AUDIOPLL (9 << 16)
+#define AT91_PMC_MCR_V2_CSS_ETHPLL (10 << 16)
+#define AT91_PMC_MCR_V2_EN (1 << 28)
+
#define AT91_PMC_XTALF 0x34 /* Main XTAL Frequency Register [SAMA7G5 only] */
#define AT91_PMC_USB 0x38 /* USB Clock Register [some SAM9 only] */
diff --git a/include/linux/clk/davinci.h b/include/linux/clk/davinci.h
index 8a7b5cd7eac0..787a81116b00 100644
--- a/include/linux/clk/davinci.h
+++ b/include/linux/clk/davinci.h
@@ -12,29 +12,6 @@
#include <linux/regmap.h>
/* function for registering clocks in early boot */
-
-#ifdef CONFIG_ARCH_DAVINCI_DA830
-int da830_pll_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DA850
int da850_pll0_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM355
-int dm355_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
-int dm355_psc_init(struct device *dev, void __iomem *base);
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM365
-int dm365_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
-int dm365_pll2_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
-int dm365_psc_init(struct device *dev, void __iomem *base);
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM644x
-int dm644x_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
-int dm644x_psc_init(struct device *dev, void __iomem *base);
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM646x
-int dm646x_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
-int dm646x_psc_init(struct device *dev, void __iomem *base);
-#endif
#endif /* __LINUX_CLK_DAVINCI_PLL_H___ */
diff --git a/include/linux/clk/mmp.h b/include/linux/clk/mmp.h
deleted file mode 100644
index 445130460380..000000000000
--- a/include/linux/clk/mmp.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __CLK_MMP_H
-#define __CLK_MMP_H
-
-#include <linux/types.h>
-
-extern void pxa168_clk_init(phys_addr_t mpmu_phys,
- phys_addr_t apmu_phys,
- phys_addr_t apbc_phys);
-extern void pxa910_clk_init(phys_addr_t mpmu_phys,
- phys_addr_t apmu_phys,
- phys_addr_t apbc_phys,
- phys_addr_t apbcp_phys);
-extern void mmp2_clk_init(phys_addr_t mpmu_phys,
- phys_addr_t apmu_phys,
- phys_addr_t apbc_phys);
-
-#endif
diff --git a/include/linux/clk/pxa.h b/include/linux/clk/pxa.h
new file mode 100644
index 000000000000..736b8bb91bd7
--- /dev/null
+++ b/include/linux/clk/pxa.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+extern int pxa25x_clocks_init(void __iomem *regs);
+extern int pxa27x_clocks_init(void __iomem *regs);
+extern int pxa3xx_clocks_init(void __iomem *regs, void __iomem *oscc_reg);
+
+#ifdef CONFIG_PXA3xx
+extern unsigned pxa3xx_get_clk_frequency_khz(int);
+extern void pxa3xx_clk_update_accr(u32 disable, u32 enable, u32 xclkcfg, u32 mask);
+#else
+#define pxa3xx_get_clk_frequency_khz(x) (0)
+#define pxa3xx_clk_update_accr(disable, enable, xclkcfg, mask) do { } while (0)
+#endif
diff --git a/include/linux/clk/renesas.h b/include/linux/clk/renesas.h
index 0ebbe2f0b45e..69d8159deee3 100644
--- a/include/linux/clk/renesas.h
+++ b/include/linux/clk/renesas.h
@@ -10,7 +10,9 @@
#ifndef __LINUX_CLK_RENESAS_H_
#define __LINUX_CLK_RENESAS_H_
+#include <linux/clk-provider.h>
#include <linux/types.h>
+#include <linux/units.h>
struct device;
struct device_node;
@@ -32,4 +34,147 @@ void cpg_mssr_detach_dev(struct generic_pm_domain *unused, struct device *dev);
#define cpg_mssr_attach_dev NULL
#define cpg_mssr_detach_dev NULL
#endif
+
+/**
+ * struct rzv2h_pll_limits - PLL parameter constraints
+ *
+ * This structure defines the minimum and maximum allowed values for
+ * various parameters used to configure a PLL. These limits ensure
+ * the PLL operates within valid and stable ranges.
+ *
+ * @fout: Output frequency range (in MHz)
+ * @fout.min: Minimum allowed output frequency
+ * @fout.max: Maximum allowed output frequency
+ *
+ * @fvco: PLL oscillation frequency range (in MHz)
+ * @fvco.min: Minimum allowed VCO frequency
+ * @fvco.max: Maximum allowed VCO frequency
+ *
+ * @m: Main-divider range
+ * @m.min: Minimum main-divider value
+ * @m.max: Maximum main-divider value
+ *
+ * @p: Pre-divider range
+ * @p.min: Minimum pre-divider value
+ * @p.max: Maximum pre-divider value
+ *
+ * @s: Divider range
+ * @s.min: Minimum divider value
+ * @s.max: Maximum divider value
+ *
+ * @k: Delta-sigma modulator range (signed)
+ * @k.min: Minimum delta-sigma value
+ * @k.max: Maximum delta-sigma value
+ */
+struct rzv2h_pll_limits {
+ struct {
+ u32 min;
+ u32 max;
+ } fout;
+
+ struct {
+ u32 min;
+ u32 max;
+ } fvco;
+
+ struct {
+ u16 min;
+ u16 max;
+ } m;
+
+ struct {
+ u8 min;
+ u8 max;
+ } p;
+
+ struct {
+ u8 min;
+ u8 max;
+ } s;
+
+ struct {
+ s16 min;
+ s16 max;
+ } k;
+};
+
+/**
+ * struct rzv2h_pll_pars - PLL configuration parameters
+ *
+ * This structure contains the configuration parameters for the
+ * Phase-Locked Loop (PLL), used to achieve a specific output frequency.
+ *
+ * @m: Main divider value
+ * @p: Pre-divider value
+ * @s: Output divider value
+ * @k: Delta-sigma modulation value
+ * @freq_millihz: Calculated PLL output frequency in millihertz
+ * @error_millihz: Frequency error from target in millihertz (signed)
+ */
+struct rzv2h_pll_pars {
+ u16 m;
+ u8 p;
+ u8 s;
+ s16 k;
+ u64 freq_millihz;
+ s64 error_millihz;
+};
+
+/**
+ * struct rzv2h_pll_div_pars - PLL parameters with post-divider
+ *
+ * This structure is used for PLLs that include an additional post-divider
+ * stage after the main PLL block. It contains both the PLL configuration
+ * parameters and the resulting frequency/error values after the divider.
+ *
+ * @pll: Main PLL configuration parameters (see struct rzv2h_pll_pars)
+ *
+ * @div: Post-divider configuration and result
+ * @div.divider_value: Divider applied to the PLL output
+ * @div.freq_millihz: Output frequency after divider in millihertz
+ * @div.error_millihz: Frequency error from target in millihertz (signed)
+ */
+struct rzv2h_pll_div_pars {
+ struct rzv2h_pll_pars pll;
+ struct {
+ u8 divider_value;
+ u64 freq_millihz;
+ s64 error_millihz;
+ } div;
+};
+
+#define RZV2H_CPG_PLL_DSI_LIMITS(name) \
+ static const struct rzv2h_pll_limits (name) = { \
+ .fout = { .min = 25 * MEGA, .max = 375 * MEGA }, \
+ .fvco = { .min = 1600 * MEGA, .max = 3200 * MEGA }, \
+ .m = { .min = 64, .max = 533 }, \
+ .p = { .min = 1, .max = 4 }, \
+ .s = { .min = 0, .max = 6 }, \
+ .k = { .min = -32768, .max = 32767 }, \
+ } \
+
+#ifdef CONFIG_CLK_RZV2H
+bool rzv2h_get_pll_pars(const struct rzv2h_pll_limits *limits,
+ struct rzv2h_pll_pars *pars, u64 freq_millihz);
+
+bool rzv2h_get_pll_divs_pars(const struct rzv2h_pll_limits *limits,
+ struct rzv2h_pll_div_pars *pars,
+ const u8 *table, u8 table_size, u64 freq_millihz);
+#else
+static inline bool rzv2h_get_pll_pars(const struct rzv2h_pll_limits *limits,
+ struct rzv2h_pll_pars *pars,
+ u64 freq_millihz)
+{
+ return false;
+}
+
+static inline bool rzv2h_get_pll_divs_pars(const struct rzv2h_pll_limits *limits,
+ struct rzv2h_pll_div_pars *pars,
+ const u8 *table, u8 table_size,
+ u64 freq_millihz)
+{
+ return false;
+}
+#endif
+
#endif
diff --git a/include/linux/clk/samsung.h b/include/linux/clk/samsung.h
index 38b774001712..0cf7aac83439 100644
--- a/include/linux/clk/samsung.h
+++ b/include/linux/clk/samsung.h
@@ -21,36 +21,4 @@ static inline void s3c64xx_clk_init(struct device_node *np,
bool s3c6400, void __iomem *base) { }
#endif /* CONFIG_S3C64XX_COMMON_CLK */
-#ifdef CONFIG_S3C2410_COMMON_CLK
-void s3c2410_common_clk_init(struct device_node *np, unsigned long xti_f,
- int current_soc,
- void __iomem *reg_base);
-#else
-static inline void s3c2410_common_clk_init(struct device_node *np,
- unsigned long xti_f,
- int current_soc,
- void __iomem *reg_base) { }
-#endif /* CONFIG_S3C2410_COMMON_CLK */
-
-#ifdef CONFIG_S3C2412_COMMON_CLK
-void s3c2412_common_clk_init(struct device_node *np, unsigned long xti_f,
- unsigned long ext_f, void __iomem *reg_base);
-#else
-static inline void s3c2412_common_clk_init(struct device_node *np,
- unsigned long xti_f,
- unsigned long ext_f,
- void __iomem *reg_base) { }
-#endif /* CONFIG_S3C2412_COMMON_CLK */
-
-#ifdef CONFIG_S3C2443_COMMON_CLK
-void s3c2443_common_clk_init(struct device_node *np, unsigned long xti_f,
- int current_soc,
- void __iomem *reg_base);
-#else
-static inline void s3c2443_common_clk_init(struct device_node *np,
- unsigned long xti_f,
- int current_soc,
- void __iomem *reg_base) { }
-#endif /* CONFIG_S3C2443_COMMON_CLK */
-
#endif /* __LINUX_CLK_SAMSUNG_H_ */
diff --git a/include/linux/clk/spear.h b/include/linux/clk/spear.h
index a64d034ceddd..eaf95ca656f8 100644
--- a/include/linux/clk/spear.h
+++ b/include/linux/clk/spear.h
@@ -8,6 +8,20 @@
#ifndef __LINUX_CLK_SPEAR_H
#define __LINUX_CLK_SPEAR_H
+#ifdef CONFIG_ARCH_SPEAR3XX
+void __init spear3xx_clk_init(void __iomem *misc_base,
+ void __iomem *soc_config_base);
+#else
+static inline void __init spear3xx_clk_init(void __iomem *misc_base,
+ void __iomem *soc_config_base) {}
+#endif
+
+#ifdef CONFIG_ARCH_SPEAR6XX
+void __init spear6xx_clk_init(void __iomem *misc_base);
+#else
+static inline void __init spear6xx_clk_init(void __iomem *misc_base) {}
+#endif
+
#ifdef CONFIG_MACH_SPEAR1310
void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base);
#else
diff --git a/include/linux/clk/sunxi-ng.h b/include/linux/clk/sunxi-ng.h
index 3cd14acde0a1..57c8ec44ab4e 100644
--- a/include/linux/clk/sunxi-ng.h
+++ b/include/linux/clk/sunxi-ng.h
@@ -6,22 +6,9 @@
#ifndef _LINUX_CLK_SUNXI_NG_H_
#define _LINUX_CLK_SUNXI_NG_H_
-#include <linux/errno.h>
-
-#ifdef CONFIG_SUNXI_CCU
int sunxi_ccu_set_mmc_timing_mode(struct clk *clk, bool new_mode);
int sunxi_ccu_get_mmc_timing_mode(struct clk *clk);
-#else
-static inline int sunxi_ccu_set_mmc_timing_mode(struct clk *clk,
- bool new_mode)
-{
- return -ENOTSUPP;
-}
-static inline int sunxi_ccu_get_mmc_timing_mode(struct clk *clk)
-{
- return -ENOTSUPP;
-}
-#endif
+int sun6i_rtc_ccu_probe(struct device *dev, void __iomem *reg);
#endif
diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h
index f7ff722a03dd..3650e926e93f 100644
--- a/include/linux/clk/tegra.h
+++ b/include/linux/clk/tegra.h
@@ -42,6 +42,7 @@ struct tegra_cpu_car_ops {
#endif
};
+#ifdef CONFIG_ARCH_TEGRA
extern struct tegra_cpu_car_ops *tegra_cpu_car_ops;
static inline void tegra_wait_cpu_in_reset(u32 cpu)
@@ -83,8 +84,29 @@ static inline void tegra_disable_cpu_clock(u32 cpu)
tegra_cpu_car_ops->disable_clock(cpu);
}
+#else
+static inline void tegra_wait_cpu_in_reset(u32 cpu)
+{
+}
-#ifdef CONFIG_PM_SLEEP
+static inline void tegra_put_cpu_in_reset(u32 cpu)
+{
+}
+
+static inline void tegra_cpu_out_of_reset(u32 cpu)
+{
+}
+
+static inline void tegra_enable_cpu_clock(u32 cpu)
+{
+}
+
+static inline void tegra_disable_cpu_clock(u32 cpu)
+{
+}
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA) && defined(CONFIG_PM_SLEEP)
static inline bool tegra_cpu_rail_off_ready(void)
{
if (WARN_ON(!tegra_cpu_car_ops->rail_off_ready))
@@ -123,20 +145,6 @@ static inline void tegra_cpu_clock_resume(void)
}
#endif
-extern int tegra210_plle_hw_sequence_start(void);
-extern bool tegra210_plle_hw_sequence_is_enabled(void);
-extern void tegra210_xusb_pll_hw_control_enable(void);
-extern void tegra210_xusb_pll_hw_sequence_start(void);
-extern void tegra210_sata_pll_hw_control_enable(void);
-extern void tegra210_sata_pll_hw_sequence_start(void);
-extern void tegra210_set_sata_pll_seq_sw(bool state);
-extern void tegra210_put_utmipll_in_iddq(void);
-extern void tegra210_put_utmipll_out_iddq(void);
-extern int tegra210_clk_handle_mbist_war(unsigned int id);
-extern void tegra210_clk_emc_dll_enable(bool flag);
-extern void tegra210_clk_emc_dll_update_setting(u32 emc_dll_src_value);
-extern void tegra210_clk_emc_update_setting(u32 emc_src_value);
-
struct clk;
struct tegra_emc;
@@ -144,17 +152,10 @@ typedef long (tegra20_clk_emc_round_cb)(unsigned long rate,
unsigned long min_rate,
unsigned long max_rate,
void *arg);
-
-void tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb *round_cb,
- void *cb_arg);
-int tegra20_clk_prepare_emc_mc_same_freq(struct clk *emc_clk, bool same);
-
typedef int (tegra124_emc_prepare_timing_change_cb)(struct tegra_emc *emc,
unsigned long rate);
typedef void (tegra124_emc_complete_timing_change_cb)(struct tegra_emc *emc,
unsigned long rate);
-void tegra124_clk_set_emc_callbacks(tegra124_emc_prepare_timing_change_cb *prep_cb,
- tegra124_emc_complete_timing_change_cb *complete_cb);
struct tegra210_clk_emc_config {
unsigned long rate;
@@ -176,8 +177,87 @@ struct tegra210_clk_emc_provider {
const struct tegra210_clk_emc_config *config);
};
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC) || defined(CONFIG_ARCH_TEGRA_3x_SOC)
+void tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb *round_cb,
+ void *cb_arg);
+int tegra20_clk_prepare_emc_mc_same_freq(struct clk *emc_clk, bool same);
+#else
+static inline void
+tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb *round_cb,
+ void *cb_arg)
+{
+}
+
+static inline int
+tegra20_clk_prepare_emc_mc_same_freq(struct clk *emc_clk, bool same)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_TEGRA124_CLK_EMC
+void tegra124_clk_set_emc_callbacks(tegra124_emc_prepare_timing_change_cb *prep_cb,
+ tegra124_emc_complete_timing_change_cb *complete_cb);
+#else
+static inline void
+tegra124_clk_set_emc_callbacks(tegra124_emc_prepare_timing_change_cb *prep_cb,
+ tegra124_emc_complete_timing_change_cb *complete_cb)
+{
+}
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_210_SOC
+int tegra210_plle_hw_sequence_start(void);
+bool tegra210_plle_hw_sequence_is_enabled(void);
+void tegra210_xusb_pll_hw_control_enable(void);
+void tegra210_xusb_pll_hw_sequence_start(void);
+void tegra210_sata_pll_hw_control_enable(void);
+void tegra210_sata_pll_hw_sequence_start(void);
+void tegra210_set_sata_pll_seq_sw(bool state);
+void tegra210_put_utmipll_in_iddq(void);
+void tegra210_put_utmipll_out_iddq(void);
+int tegra210_clk_handle_mbist_war(unsigned int id);
+void tegra210_clk_emc_dll_enable(bool flag);
+void tegra210_clk_emc_dll_update_setting(u32 emc_dll_src_value);
+void tegra210_clk_emc_update_setting(u32 emc_src_value);
+
int tegra210_clk_emc_attach(struct clk *clk,
struct tegra210_clk_emc_provider *provider);
void tegra210_clk_emc_detach(struct clk *clk);
+#else
+static inline int tegra210_plle_hw_sequence_start(void)
+{
+ return 0;
+}
+
+static inline bool tegra210_plle_hw_sequence_is_enabled(void)
+{
+ return false;
+}
+
+static inline int tegra210_clk_handle_mbist_war(unsigned int id)
+{
+ return 0;
+}
+
+static inline int
+tegra210_clk_emc_attach(struct clk *clk,
+ struct tegra210_clk_emc_provider *provider)
+{
+ return 0;
+}
+
+static inline void tegra210_xusb_pll_hw_control_enable(void) {}
+static inline void tegra210_xusb_pll_hw_sequence_start(void) {}
+static inline void tegra210_sata_pll_hw_control_enable(void) {}
+static inline void tegra210_sata_pll_hw_sequence_start(void) {}
+static inline void tegra210_set_sata_pll_seq_sw(bool state) {}
+static inline void tegra210_put_utmipll_in_iddq(void) {}
+static inline void tegra210_put_utmipll_out_iddq(void) {}
+static inline void tegra210_clk_emc_dll_enable(bool flag) {}
+static inline void tegra210_clk_emc_dll_update_setting(u32 emc_dll_src_value) {}
+static inline void tegra210_clk_emc_update_setting(u32 emc_src_value) {}
+static inline void tegra210_clk_emc_detach(struct clk *clk) {}
+#endif
#endif /* __LINUX_CLK_TEGRA_H_ */
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index c62f6fa6763d..54a3fa370004 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI clock drivers support
*
* Copyright (C) 2013 Texas Instruments, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __LINUX_CLK_TI_H__
#define __LINUX_CLK_TI_H__
@@ -21,11 +13,14 @@
/**
* struct clk_omap_reg - OMAP register declaration
* @offset: offset from the master IP module base address
+ * @bit: register bit offset
* @index: index of the master IP module
+ * @flags: flags
*/
struct clk_omap_reg {
void __iomem *ptr;
u16 offset;
+ u8 bit;
u8 index;
u8 flags;
};
@@ -39,14 +34,14 @@ struct clk_omap_reg {
* @clk_ref: struct clk_hw pointer to the clock's reference clock input
* @control_reg: register containing the DPLL mode bitfield
* @enable_mask: mask of the DPLL mode bitfield in @control_reg
- * @last_rounded_rate: cache of the last rate result of omap2_dpll_round_rate()
- * @last_rounded_m: cache of the last M result of omap2_dpll_round_rate()
+ * @last_rounded_rate: cache of the last rate result of omap2_dpll_determine_rate()
+ * @last_rounded_m: cache of the last M result of omap2_dpll_determine_rate()
* @last_rounded_m4xen: cache of the last M4X result of
- * omap4_dpll_regm4xen_round_rate()
+ * omap4_dpll_regm4xen_determine_rate()
* @last_rounded_lpmode: cache of the last lpmode result of
* omap4_dpll_lpmode_recalc()
* @max_multiplier: maximum valid non-bypass multiplier value (actual)
- * @last_rounded_n: cache of the last N result of omap2_dpll_round_rate()
+ * @last_rounded_n: cache of the last N result of omap2_dpll_determine_rate()
* @min_divider: minimum valid non-bypass divider value (actual)
* @max_divider: maximum valid non-bypass divider value (actual)
* @max_rate: maximum clock rate for the DPLL
@@ -63,6 +58,17 @@ struct clk_omap_reg {
* @auto_recal_bit: bitshift of the driftguard enable bit in @control_reg
* @recal_en_bit: bitshift of the PRM_IRQENABLE_* bit for recalibration IRQs
* @recal_st_bit: bitshift of the PRM_IRQSTATUS_* bit for recalibration IRQs
+ * @ssc_deltam_reg: register containing the DPLL SSC frequency spreading
+ * @ssc_modfreq_reg: register containing the DPLL SSC modulation frequency
+ * @ssc_modfreq_mant_mask: mask of the mantissa component in @ssc_modfreq_reg
+ * @ssc_modfreq_exp_mask: mask of the exponent component in @ssc_modfreq_reg
+ * @ssc_enable_mask: mask of the DPLL SSC enable bit in @control_reg
+ * @ssc_downspread_mask: mask of the DPLL SSC low frequency only bit in
+ * @control_reg
+ * @ssc_modfreq: the DPLL SSC frequency modulation in kHz
+ * @ssc_deltam: the DPLL SSC frequency spreading in permille (10th of percent)
+ * @ssc_downspread: require the only low frequency spread of the DPLL in SSC
+ * mode
* @flags: DPLL type/features (see below)
*
* Possible values for @flags:
@@ -110,6 +116,17 @@ struct dpll_data {
u8 auto_recal_bit;
u8 recal_en_bit;
u8 recal_st_bit;
+ struct clk_omap_reg ssc_deltam_reg;
+ struct clk_omap_reg ssc_modfreq_reg;
+ u32 ssc_deltam_int_mask;
+ u32 ssc_deltam_frac_mask;
+ u32 ssc_modfreq_mant_mask;
+ u32 ssc_modfreq_exp_mask;
+ u32 ssc_enable_mask;
+ u32 ssc_downspread_mask;
+ u32 ssc_modfreq;
+ u32 ssc_deltam;
+ bool ssc_downspread;
u8 flags;
};
diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h
index fd06b2780a22..45570bc21a43 100644
--- a/include/linux/clkdev.h
+++ b/include/linux/clkdev.h
@@ -30,11 +30,6 @@ struct clk_lookup {
.clk = c, \
}
-struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
- const char *dev_fmt, ...) __printf(3, 4);
-struct clk_lookup *clkdev_hw_alloc(struct clk_hw *hw, const char *con_id,
- const char *dev_fmt, ...) __printf(3, 4);
-
void clkdev_add(struct clk_lookup *cl);
void clkdev_drop(struct clk_lookup *cl);
@@ -51,6 +46,4 @@ int clk_hw_register_clkdev(struct clk_hw *, const char *, const char *);
int devm_clk_hw_register_clkdev(struct device *dev, struct clk_hw *hw,
const char *con_id, const char *dev_id);
-void devm_clk_release_clkdev(struct device *dev, const char *con_id,
- const char *dev_id);
#endif
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 8ae9a95ebf5b..b0df28ddd394 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -12,7 +12,7 @@
#ifdef CONFIG_GENERIC_CLOCKEVENTS
# include <linux/clocksource.h>
-# include <linux/cpumask.h>
+# include <linux/cpumask_types.h>
# include <linux/ktime.h>
# include <linux/notifier.h>
@@ -211,7 +211,7 @@ extern int tick_receive_broadcast(void);
extern void tick_setup_hrtimer_broadcast(void);
extern int tick_check_broadcast_expired(void);
# else
-static inline int tick_check_broadcast_expired(void) { return 0; }
+static __always_inline int tick_check_broadcast_expired(void) { return 0; }
static inline void tick_setup_hrtimer_broadcast(void) { }
# endif
@@ -219,7 +219,7 @@ static inline void tick_setup_hrtimer_broadcast(void) { }
static inline void clockevents_suspend(void) { }
static inline void clockevents_resume(void) { }
-static inline int tick_check_broadcast_expired(void) { return 0; }
+static __always_inline int tick_check_broadcast_expired(void) { return 0; }
static inline void tick_setup_hrtimer_broadcast(void) { }
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index d6ab416ee2d2..65b7c41471c3 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -21,6 +21,7 @@
#include <asm/div64.h>
#include <asm/io.h>
+struct clocksource_base;
struct clocksource;
struct module;
@@ -43,11 +44,15 @@ struct module;
* @shift: Cycle to nanosecond divisor (power of two)
* @max_idle_ns: Maximum idle time permitted by the clocksource (nsecs)
* @maxadj: Maximum adjustment value to mult (~11%)
+ * @uncertainty_margin: Maximum uncertainty in nanoseconds per half second.
+ * Zero says to use default WATCHDOG_THRESHOLD.
* @archdata: Optional arch-specific data
* @max_cycles: Maximum safe cycle value which won't overflow on
* multiplication
+ * @max_raw_delta: Maximum safe delta value for negative motion detection
* @name: Pointer to clocksource name
* @list: List head for registration (internal)
+ * @freq_khz: Clocksource frequency in khz.
* @rating: Rating value for selection (higher is better)
* To avoid rating inflation the following
* list should give you a guide as to how
@@ -68,6 +73,8 @@ struct module;
* validate the clocksource from which the snapshot was
* taken.
* @flags: Flags describing special properties
+ * @base: Hardware abstraction for clock on which a clocksource
+ * is based
* @enable: Optional function to enable the clocksource
* @disable: Optional function to disable the clocksource
* @suspend: Optional suspend function for the clocksource
@@ -98,16 +105,20 @@ struct clocksource {
u32 shift;
u64 max_idle_ns;
u32 maxadj;
+ u32 uncertainty_margin;
#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA
struct arch_clocksource_data archdata;
#endif
u64 max_cycles;
+ u64 max_raw_delta;
const char *name;
struct list_head list;
+ u32 freq_khz;
int rating;
enum clocksource_ids id;
enum vdso_clock_mode vdso_clock_mode;
unsigned long flags;
+ struct clocksource_base *base;
int (*enable)(struct clocksource *cs);
void (*disable)(struct clocksource *cs);
@@ -137,7 +148,7 @@ struct clocksource {
#define CLOCK_SOURCE_UNSTABLE 0x40
#define CLOCK_SOURCE_SUSPEND_NONSTOP 0x80
#define CLOCK_SOURCE_RESELECT 0x100
-
+#define CLOCK_SOURCE_VERIFY_PERCPU 0x200
/* simplify initialization of mask field */
#define CLOCKSOURCE_MASK(bits) GENMASK_ULL((bits) - 1, 0)
@@ -206,7 +217,6 @@ static inline s64 clocksource_cyc2ns(u64 cycles, u32 mult, u32 shift)
extern int clocksource_unregister(struct clocksource*);
extern void clocksource_touch_watchdog(void);
-extern void clocksource_change_rating(struct clocksource *cs, int rating);
extern void clocksource_suspend(void);
extern void clocksource_resume(void);
extern struct clocksource * __init clocksource_default_clock(void);
@@ -288,4 +298,40 @@ static inline void timer_probe(void) {}
#define TIMER_ACPI_DECLARE(name, table_id, fn) \
ACPI_DECLARE_PROBE_ENTRY(timer, name, table_id, 0, NULL, 0, fn)
+static inline unsigned int clocksource_get_max_watchdog_retry(void)
+{
+ /*
+ * When system is in the boot phase or under heavy workload, there
+ * can be random big latencies during the clocksource/watchdog
+ * read, so allow retries to filter the noise latency. As the
+ * latency's frequency and maximum value goes up with the number of
+ * CPUs, scale the number of retries with the number of online
+ * CPUs.
+ */
+ return (ilog2(num_online_cpus()) / 2) + 1;
+}
+
+void clocksource_verify_percpu(struct clocksource *cs);
+
+/**
+ * struct clocksource_base - hardware abstraction for clock on which a clocksource
+ * is based
+ * @id: Defaults to CSID_GENERIC. The id value is used for conversion
+ * functions which require that the current clocksource is based
+ * on a clocksource_base with a particular ID in certain snapshot
+ * functions to allow callers to validate the clocksource from
+ * which the snapshot was taken.
+ * @freq_khz: Nominal frequency of the base clock in kHz
+ * @offset: Offset between the base clock and the clocksource
+ * @numerator: Numerator of the clock ratio between base clock and the clocksource
+ * @denominator: Denominator of the clock ratio between base clock and the clocksource
+ */
+struct clocksource_base {
+ enum clocksource_ids id;
+ u32 freq_khz;
+ u64 offset;
+ u32 numerator;
+ u32 denominator;
+};
+
#endif /* _LINUX_CLOCKSOURCE_H */
diff --git a/include/linux/clocksource_ids.h b/include/linux/clocksource_ids.h
index 16775d7d8f8d..c4ef4ae2eded 100644
--- a/include/linux/clocksource_ids.h
+++ b/include/linux/clocksource_ids.h
@@ -6,6 +6,11 @@
enum clocksource_ids {
CSID_GENERIC = 0,
CSID_ARM_ARCH_COUNTER,
+ CSID_S390_TOD,
+ CSID_X86_TSC_EARLY,
+ CSID_X86_TSC,
+ CSID_X86_KVM_CLK,
+ CSID_X86_ART,
CSID_MAX,
};
diff --git a/include/linux/closure.h b/include/linux/closure.h
new file mode 100644
index 000000000000..880fe85e35e9
--- /dev/null
+++ b/include/linux/closure.h
@@ -0,0 +1,492 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CLOSURE_H
+#define _LINUX_CLOSURE_H
+
+#include <linux/llist.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/workqueue.h>
+
+/*
+ * Closure is perhaps the most overused and abused term in computer science, but
+ * since I've been unable to come up with anything better you're stuck with it
+ * again.
+ *
+ * What are closures?
+ *
+ * They embed a refcount. The basic idea is they count "things that are in
+ * progress" - in flight bios, some other thread that's doing something else -
+ * anything you might want to wait on.
+ *
+ * The refcount may be manipulated with closure_get() and closure_put().
+ * closure_put() is where many of the interesting things happen, when it causes
+ * the refcount to go to 0.
+ *
+ * Closures can be used to wait on things both synchronously and asynchronously,
+ * and synchronous and asynchronous use can be mixed without restriction. To
+ * wait synchronously, use closure_sync() - you will sleep until your closure's
+ * refcount hits 1.
+ *
+ * To wait asynchronously, use
+ * continue_at(cl, next_function, workqueue);
+ *
+ * passing it, as you might expect, the function to run when nothing is pending
+ * and the workqueue to run that function out of.
+ *
+ * continue_at() also, critically, requires a 'return' immediately following the
+ * location where this macro is referenced, to return to the calling function.
+ * There's good reason for this.
+ *
+ * To use safely closures asynchronously, they must always have a refcount while
+ * they are running owned by the thread that is running them. Otherwise, suppose
+ * you submit some bios and wish to have a function run when they all complete:
+ *
+ * foo_endio(struct bio *bio)
+ * {
+ * closure_put(cl);
+ * }
+ *
+ * closure_init(cl);
+ *
+ * do_stuff();
+ * closure_get(cl);
+ * bio1->bi_endio = foo_endio;
+ * bio_submit(bio1);
+ *
+ * do_more_stuff();
+ * closure_get(cl);
+ * bio2->bi_endio = foo_endio;
+ * bio_submit(bio2);
+ *
+ * continue_at(cl, complete_some_read, system_wq);
+ *
+ * If closure's refcount started at 0, complete_some_read() could run before the
+ * second bio was submitted - which is almost always not what you want! More
+ * importantly, it wouldn't be possible to say whether the original thread or
+ * complete_some_read()'s thread owned the closure - and whatever state it was
+ * associated with!
+ *
+ * So, closure_init() initializes a closure's refcount to 1 - and when a
+ * closure_fn is run, the refcount will be reset to 1 first.
+ *
+ * Then, the rule is - if you got the refcount with closure_get(), release it
+ * with closure_put() (i.e, in a bio->bi_endio function). If you have a refcount
+ * on a closure because you called closure_init() or you were run out of a
+ * closure - _always_ use continue_at(). Doing so consistently will help
+ * eliminate an entire class of particularly pernicious races.
+ *
+ * Lastly, you might have a wait list dedicated to a specific event, and have no
+ * need for specifying the condition - you just want to wait until someone runs
+ * closure_wake_up() on the appropriate wait list. In that case, just use
+ * closure_wait(). It will return either true or false, depending on whether the
+ * closure was already on a wait list or not - a closure can only be on one wait
+ * list at a time.
+ *
+ * Parents:
+ *
+ * closure_init() takes two arguments - it takes the closure to initialize, and
+ * a (possibly null) parent.
+ *
+ * If parent is non null, the new closure will have a refcount for its lifetime;
+ * a closure is considered to be "finished" when its refcount hits 0 and the
+ * function to run is null. Hence
+ *
+ * continue_at(cl, NULL, NULL);
+ *
+ * returns up the (spaghetti) stack of closures, precisely like normal return
+ * returns up the C stack. continue_at() with non null fn is better thought of
+ * as doing a tail call.
+ *
+ * All this implies that a closure should typically be embedded in a particular
+ * struct (which its refcount will normally control the lifetime of), and that
+ * struct can very much be thought of as a stack frame.
+ */
+
+struct closure;
+struct closure_syncer;
+typedef void (closure_fn) (struct work_struct *);
+extern struct dentry *bcache_debug;
+
+struct closure_waitlist {
+ struct llist_head list;
+};
+
+enum closure_state {
+ /*
+ * CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by
+ * the thread that owns the closure, and cleared by the thread that's
+ * waking up the closure.
+ *
+ * The rest are for debugging and don't affect behaviour:
+ *
+ * CLOSURE_RUNNING: Set when a closure is running (i.e. by
+ * closure_init() and when closure_put() runs then next function), and
+ * must be cleared before remaining hits 0. Primarily to help guard
+ * against incorrect usage and accidentally transferring references.
+ * continue_at() and closure_return() clear it for you, if you're doing
+ * something unusual you can use closure_set_dead() which also helps
+ * annotate where references are being transferred.
+ */
+
+ CLOSURE_BITS_START = (1U << 26),
+ CLOSURE_DESTRUCTOR = (1U << 26),
+ CLOSURE_WAITING = (1U << 28),
+ CLOSURE_RUNNING = (1U << 30),
+};
+
+#define CLOSURE_GUARD_MASK \
+ ((CLOSURE_DESTRUCTOR|CLOSURE_WAITING|CLOSURE_RUNNING) << 1)
+
+#define CLOSURE_REMAINING_MASK (CLOSURE_BITS_START - 1)
+#define CLOSURE_REMAINING_INITIALIZER (1|CLOSURE_RUNNING)
+
+struct closure {
+ union {
+ struct {
+ struct workqueue_struct *wq;
+ struct closure_syncer *s;
+ struct llist_node list;
+ closure_fn *fn;
+ };
+ struct work_struct work;
+ };
+
+ struct closure *parent;
+
+ atomic_t remaining;
+ bool closure_get_happened;
+
+#ifdef CONFIG_DEBUG_CLOSURES
+#define CLOSURE_MAGIC_DEAD 0xc054dead
+#define CLOSURE_MAGIC_ALIVE 0xc054a11e
+#define CLOSURE_MAGIC_STACK 0xc05451cc
+
+ unsigned int magic;
+ struct list_head all;
+ unsigned long ip;
+ unsigned long waiting_on;
+#endif
+};
+
+void closure_sub(struct closure *cl, int v);
+void closure_put(struct closure *cl);
+void __closure_wake_up(struct closure_waitlist *list);
+bool closure_wait(struct closure_waitlist *list, struct closure *cl);
+void __closure_sync(struct closure *cl);
+
+static inline unsigned closure_nr_remaining(struct closure *cl)
+{
+ return atomic_read(&cl->remaining) & CLOSURE_REMAINING_MASK;
+}
+
+/**
+ * closure_sync - sleep until a closure a closure has nothing left to wait on
+ *
+ * Sleeps until the refcount hits 1 - the thread that's running the closure owns
+ * the last refcount.
+ */
+static inline void closure_sync(struct closure *cl)
+{
+#ifdef CONFIG_DEBUG_CLOSURES
+ BUG_ON(closure_nr_remaining(cl) != 1 && !cl->closure_get_happened);
+#endif
+
+ if (cl->closure_get_happened)
+ __closure_sync(cl);
+}
+
+int __closure_sync_timeout(struct closure *cl, unsigned long timeout);
+
+static inline int closure_sync_timeout(struct closure *cl, unsigned long timeout)
+{
+#ifdef CONFIG_DEBUG_CLOSURES
+ BUG_ON(closure_nr_remaining(cl) != 1 && !cl->closure_get_happened);
+#endif
+ return cl->closure_get_happened
+ ? __closure_sync_timeout(cl, timeout)
+ : 0;
+}
+
+#ifdef CONFIG_DEBUG_CLOSURES
+
+void closure_debug_create(struct closure *cl);
+void closure_debug_destroy(struct closure *cl);
+
+#else
+
+static inline void closure_debug_create(struct closure *cl) {}
+static inline void closure_debug_destroy(struct closure *cl) {}
+
+#endif
+
+static inline void closure_set_ip(struct closure *cl)
+{
+#ifdef CONFIG_DEBUG_CLOSURES
+ cl->ip = _THIS_IP_;
+#endif
+}
+
+static inline void closure_set_ret_ip(struct closure *cl)
+{
+#ifdef CONFIG_DEBUG_CLOSURES
+ cl->ip = _RET_IP_;
+#endif
+}
+
+static inline void closure_set_waiting(struct closure *cl, unsigned long f)
+{
+#ifdef CONFIG_DEBUG_CLOSURES
+ cl->waiting_on = f;
+#endif
+}
+
+static inline void closure_set_stopped(struct closure *cl)
+{
+ atomic_sub(CLOSURE_RUNNING, &cl->remaining);
+}
+
+static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
+ struct workqueue_struct *wq)
+{
+ closure_set_ip(cl);
+ cl->fn = fn;
+ cl->wq = wq;
+}
+
+static inline void closure_queue(struct closure *cl)
+{
+ struct workqueue_struct *wq = cl->wq;
+ /**
+ * Changes made to closure, work_struct, or a couple of other structs
+ * may cause work.func not pointing to the right location.
+ */
+ BUILD_BUG_ON(offsetof(struct closure, fn)
+ != offsetof(struct work_struct, func));
+
+ if (wq) {
+ INIT_WORK(&cl->work, cl->work.func);
+ BUG_ON(!queue_work(wq, &cl->work));
+ } else
+ cl->fn(&cl->work);
+}
+
+/**
+ * closure_get - increment a closure's refcount
+ */
+static inline void closure_get(struct closure *cl)
+{
+ cl->closure_get_happened = true;
+
+#ifdef CONFIG_DEBUG_CLOSURES
+ BUG_ON((atomic_inc_return(&cl->remaining) &
+ CLOSURE_REMAINING_MASK) <= 1);
+#else
+ atomic_inc(&cl->remaining);
+#endif
+}
+
+/**
+ * closure_get_not_zero
+ */
+static inline bool closure_get_not_zero(struct closure *cl)
+{
+ unsigned old = atomic_read(&cl->remaining);
+ do {
+ if (!(old & CLOSURE_REMAINING_MASK))
+ return false;
+
+ } while (!atomic_try_cmpxchg_acquire(&cl->remaining, &old, old + 1));
+
+ return true;
+}
+
+/**
+ * closure_init - Initialize a closure, setting the refcount to 1
+ * @cl: closure to initialize
+ * @parent: parent of the new closure. cl will take a refcount on it for its
+ * lifetime; may be NULL.
+ */
+static inline void closure_init(struct closure *cl, struct closure *parent)
+{
+ cl->fn = NULL;
+ cl->parent = parent;
+ if (parent)
+ closure_get(parent);
+
+ atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
+ cl->closure_get_happened = false;
+
+ closure_debug_create(cl);
+ closure_set_ip(cl);
+}
+
+static inline void closure_init_stack(struct closure *cl)
+{
+ memset(cl, 0, sizeof(struct closure));
+ atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
+#ifdef CONFIG_DEBUG_CLOSURES
+ cl->magic = CLOSURE_MAGIC_STACK;
+#endif
+}
+
+static inline void closure_init_stack_release(struct closure *cl)
+{
+ memset(cl, 0, sizeof(struct closure));
+ atomic_set_release(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
+#ifdef CONFIG_DEBUG_CLOSURES
+ cl->magic = CLOSURE_MAGIC_STACK;
+#endif
+}
+
+/**
+ * closure_wake_up - wake up all closures on a wait list,
+ * with memory barrier
+ */
+static inline void closure_wake_up(struct closure_waitlist *list)
+{
+ /* Memory barrier for the wait list */
+ smp_mb();
+ __closure_wake_up(list);
+}
+
+#define CLOSURE_CALLBACK(name) void name(struct work_struct *ws)
+#define closure_type(name, type, member) \
+ struct closure *cl = container_of(ws, struct closure, work); \
+ type *name = container_of(cl, type, member)
+
+/**
+ * continue_at - jump to another function with barrier
+ *
+ * After @cl is no longer waiting on anything (i.e. all outstanding refs have
+ * been dropped with closure_put()), it will resume execution at @fn running out
+ * of @wq (or, if @wq is NULL, @fn will be called by closure_put() directly).
+ *
+ * This is because after calling continue_at() you no longer have a ref on @cl,
+ * and whatever @cl owns may be freed out from under you - a running closure fn
+ * has a ref on its own closure which continue_at() drops.
+ *
+ * Note you are expected to immediately return after using this macro.
+ */
+#define continue_at(_cl, _fn, _wq) \
+do { \
+ set_closure_fn(_cl, _fn, _wq); \
+ closure_sub(_cl, CLOSURE_RUNNING + 1); \
+} while (0)
+
+/**
+ * closure_return - finish execution of a closure
+ *
+ * This is used to indicate that @cl is finished: when all outstanding refs on
+ * @cl have been dropped @cl's ref on its parent closure (as passed to
+ * closure_init()) will be dropped, if one was specified - thus this can be
+ * thought of as returning to the parent closure.
+ */
+#define closure_return(_cl) continue_at((_cl), NULL, NULL)
+
+void closure_return_sync(struct closure *cl);
+
+/**
+ * continue_at_nobarrier - jump to another function without barrier
+ *
+ * Causes @fn to be executed out of @cl, in @wq context (or called directly if
+ * @wq is NULL).
+ *
+ * The ref the caller of continue_at_nobarrier() had on @cl is now owned by @fn,
+ * thus it's not safe to touch anything protected by @cl after a
+ * continue_at_nobarrier().
+ */
+#define continue_at_nobarrier(_cl, _fn, _wq) \
+do { \
+ set_closure_fn(_cl, _fn, _wq); \
+ closure_queue(_cl); \
+} while (0)
+
+/**
+ * closure_return_with_destructor - finish execution of a closure,
+ * with destructor
+ *
+ * Works like closure_return(), except @destructor will be called when all
+ * outstanding refs on @cl have been dropped; @destructor may be used to safely
+ * free the memory occupied by @cl, and it is called with the ref on the parent
+ * closure still held - so @destructor could safely return an item to a
+ * freelist protected by @cl's parent.
+ */
+#define closure_return_with_destructor(_cl, _destructor) \
+do { \
+ set_closure_fn(_cl, _destructor, NULL); \
+ closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1); \
+} while (0)
+
+/**
+ * closure_call - execute @fn out of a new, uninitialized closure
+ *
+ * Typically used when running out of one closure, and we want to run @fn
+ * asynchronously out of a new closure - @parent will then wait for @cl to
+ * finish.
+ */
+static inline void closure_call(struct closure *cl, closure_fn fn,
+ struct workqueue_struct *wq,
+ struct closure *parent)
+{
+ closure_init(cl, parent);
+ continue_at_nobarrier(cl, fn, wq);
+}
+
+#define __closure_wait_event(waitlist, _cond) \
+do { \
+ struct closure cl; \
+ \
+ closure_init_stack(&cl); \
+ \
+ while (1) { \
+ closure_wait(waitlist, &cl); \
+ if (_cond) \
+ break; \
+ closure_sync(&cl); \
+ } \
+ closure_wake_up(waitlist); \
+ closure_sync(&cl); \
+} while (0)
+
+#define closure_wait_event(waitlist, _cond) \
+do { \
+ if (!(_cond)) \
+ __closure_wait_event(waitlist, _cond); \
+} while (0)
+
+#define __closure_wait_event_timeout(waitlist, _cond, _until) \
+({ \
+ struct closure cl; \
+ long _t; \
+ \
+ closure_init_stack(&cl); \
+ \
+ while (1) { \
+ closure_wait(waitlist, &cl); \
+ if (_cond) { \
+ _t = max_t(long, 1L, _until - jiffies); \
+ break; \
+ } \
+ _t = max_t(long, 0L, _until - jiffies); \
+ if (!_t) \
+ break; \
+ closure_sync_timeout(&cl, _t); \
+ } \
+ closure_wake_up(waitlist); \
+ closure_sync(&cl); \
+ _t; \
+})
+
+/*
+ * Returns 0 if timeout expired, remaining time in jiffies (at least 1) if
+ * condition became true
+ */
+#define closure_wait_event_timeout(waitlist, _cond, _timeout) \
+({ \
+ unsigned long _until = jiffies + _timeout; \
+ (_cond) \
+ ? max_t(long, 1L, _until - jiffies) \
+ : __closure_wait_event_timeout(waitlist, _cond, _until);\
+})
+
+#endif /* _LINUX_CLOSURE_H */
diff --git a/include/linux/cm4000_cs.h b/include/linux/cm4000_cs.h
deleted file mode 100644
index ea4958e07a14..000000000000
--- a/include/linux/cm4000_cs.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _CM4000_H_
-#define _CM4000_H_
-
-#include <uapi/linux/cm4000_cs.h>
-
-
-#define DEVICE_NAME "cmm"
-#define MODULE_NAME "cm4000_cs"
-
-#endif /* _CM4000_H_ */
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 53fd8c3cdbd0..62d9c1cf6326 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -6,20 +6,20 @@
#include <linux/types.h>
#include <linux/numa.h>
-/*
- * There is always at least global CMA area and a few optional
- * areas configured in kernel .config.
- */
#ifdef CONFIG_CMA_AREAS
-#define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS)
-
-#else
-#define MAX_CMA_AREAS (0)
-
+#define MAX_CMA_AREAS CONFIG_CMA_AREAS
#endif
#define CMA_MAX_NAME 64
+/*
+ * the buddy -- especially pageblock merging and alloc_contig_range()
+ * -- can deal with only some pageblocks of a higher-order page being
+ * MIGRATE_CMA, we can use pageblock_nr_pages.
+ */
+#define CMA_MIN_ALIGNMENT_PAGES pageblock_nr_pages
+#define CMA_MIN_ALIGNMENT_BYTES (PAGE_SIZE * CMA_MIN_ALIGNMENT_PAGES)
+
struct cma;
extern unsigned long totalcma_pages;
@@ -40,13 +40,41 @@ static inline int __init cma_declare_contiguous(phys_addr_t base,
return cma_declare_contiguous_nid(base, size, limit, alignment,
order_per_bit, fixed, name, res_cma, NUMA_NO_NODE);
}
+extern int __init cma_declare_contiguous_multi(phys_addr_t size,
+ phys_addr_t align, unsigned int order_per_bit,
+ const char *name, struct cma **res_cma, int nid);
extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
unsigned int order_per_bit,
const char *name,
struct cma **res_cma);
extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align,
bool no_warn);
+extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned long count);
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count);
extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
+extern bool cma_intersects(struct cma *cma, unsigned long start, unsigned long end);
+
+extern void cma_reserve_pages_on_error(struct cma *cma);
+
+#ifdef CONFIG_CMA
+struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp);
+bool cma_free_folio(struct cma *cma, const struct folio *folio);
+bool cma_validate_zones(struct cma *cma);
+#else
+static inline struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp)
+{
+ return NULL;
+}
+
+static inline bool cma_free_folio(struct cma *cma, const struct folio *folio)
+{
+ return false;
+}
+static inline bool cma_validate_zones(struct cma *cma)
+{
+ return false;
+}
+#endif
+
#endif
diff --git a/include/linux/cmdline-parser.h b/include/linux/cmdline-parser.h
deleted file mode 100644
index 68a541807bdf..000000000000
--- a/include/linux/cmdline-parser.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Parsing command line, get the partitions information.
- *
- * Written by Cai Zhiyong <caizhiyong@huawei.com>
- *
- */
-#ifndef CMDLINEPARSEH
-#define CMDLINEPARSEH
-
-#include <linux/blkdev.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-
-/* partition flags */
-#define PF_RDONLY 0x01 /* Device is read only */
-#define PF_POWERUP_LOCK 0x02 /* Always locked after reset */
-
-struct cmdline_subpart {
- char name[BDEVNAME_SIZE]; /* partition name, such as 'rootfs' */
- sector_t from;
- sector_t size;
- int flags;
- struct cmdline_subpart *next_subpart;
-};
-
-struct cmdline_parts {
- char name[BDEVNAME_SIZE]; /* block device, such as 'mmcblk0' */
- unsigned int nr_subparts;
- struct cmdline_subpart *subpart;
- struct cmdline_parts *next_parts;
-};
-
-void cmdline_parts_free(struct cmdline_parts **parts);
-
-int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline);
-
-struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
- const char *bdev);
-
-int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
- int slot,
- int (*add_part)(int, struct cmdline_subpart *, void *),
- void *param);
-
-#endif /* CMDLINEPARSEH */
diff --git a/include/linux/cmpxchg-emu.h b/include/linux/cmpxchg-emu.h
new file mode 100644
index 000000000000..998deec67740
--- /dev/null
+++ b/include/linux/cmpxchg-emu.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Emulated 1-byte and 2-byte cmpxchg operations for architectures
+ * lacking direct support for these sizes. These are implemented in terms
+ * of 4-byte cmpxchg operations.
+ *
+ * Copyright (C) 2024 Paul E. McKenney.
+ */
+
+#ifndef __LINUX_CMPXCHG_EMU_H
+#define __LINUX_CMPXCHG_EMU_H
+
+uintptr_t cmpxchg_emu_u8(volatile u8 *p, uintptr_t old, uintptr_t new);
+
+#endif /* __LINUX_CMPXCHG_EMU_H */
diff --git a/include/linux/codetag.h b/include/linux/codetag.h
new file mode 100644
index 000000000000..8ea2a5f7c98a
--- /dev/null
+++ b/include/linux/codetag.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * code tagging framework
+ */
+#ifndef _LINUX_CODETAG_H
+#define _LINUX_CODETAG_H
+
+#include <linux/types.h>
+
+struct codetag_iterator;
+struct codetag_type;
+struct codetag_module;
+struct seq_buf;
+struct module;
+
+#define CODETAG_SECTION_START_PREFIX "__start_"
+#define CODETAG_SECTION_STOP_PREFIX "__stop_"
+
+/* codetag flags */
+#define CODETAG_FLAG_INACCURATE (1 << 0)
+
+/*
+ * An instance of this structure is created in a special ELF section at every
+ * code location being tagged. At runtime, the special section is treated as
+ * an array of these.
+ */
+struct codetag {
+ unsigned int flags;
+ unsigned int lineno;
+ const char *modname;
+ const char *function;
+ const char *filename;
+} __aligned(8);
+
+union codetag_ref {
+ struct codetag *ct;
+};
+
+struct codetag_type_desc {
+ const char *section;
+ size_t tag_size;
+ int (*module_load)(struct module *mod,
+ struct codetag *start, struct codetag *end);
+ void (*module_unload)(struct module *mod,
+ struct codetag *start, struct codetag *end);
+#ifdef CONFIG_MODULES
+ void (*module_replaced)(struct module *mod, struct module *new_mod);
+ bool (*needs_section_mem)(struct module *mod, unsigned long size);
+ void *(*alloc_section_mem)(struct module *mod, unsigned long size,
+ unsigned int prepend, unsigned long align);
+ void (*free_section_mem)(struct module *mod, bool used);
+#endif
+};
+
+struct codetag_iterator {
+ struct codetag_type *cttype;
+ struct codetag_module *cmod;
+ unsigned long mod_id;
+ struct codetag *ct;
+ unsigned long mod_seq;
+};
+
+#ifdef MODULE
+#define CT_MODULE_NAME KBUILD_MODNAME
+#else
+#define CT_MODULE_NAME NULL
+#endif
+
+#define CODE_TAG_INIT { \
+ .modname = CT_MODULE_NAME, \
+ .function = __func__, \
+ .filename = __FILE__, \
+ .lineno = __LINE__, \
+ .flags = 0, \
+}
+
+void codetag_lock_module_list(struct codetag_type *cttype, bool lock);
+bool codetag_trylock_module_list(struct codetag_type *cttype);
+struct codetag_iterator codetag_get_ct_iter(struct codetag_type *cttype);
+struct codetag *codetag_next_ct(struct codetag_iterator *iter);
+
+void codetag_to_text(struct seq_buf *out, struct codetag *ct);
+
+struct codetag_type *
+codetag_register_type(const struct codetag_type_desc *desc);
+
+#if defined(CONFIG_CODE_TAGGING) && defined(CONFIG_MODULES)
+
+bool codetag_needs_module_section(struct module *mod, const char *name,
+ unsigned long size);
+void *codetag_alloc_module_section(struct module *mod, const char *name,
+ unsigned long size, unsigned int prepend,
+ unsigned long align);
+void codetag_free_module_sections(struct module *mod);
+void codetag_module_replaced(struct module *mod, struct module *new_mod);
+int codetag_load_module(struct module *mod);
+void codetag_unload_module(struct module *mod);
+
+#else /* defined(CONFIG_CODE_TAGGING) && defined(CONFIG_MODULES) */
+
+static inline bool
+codetag_needs_module_section(struct module *mod, const char *name,
+ unsigned long size) { return false; }
+static inline void *
+codetag_alloc_module_section(struct module *mod, const char *name,
+ unsigned long size, unsigned int prepend,
+ unsigned long align) { return NULL; }
+static inline void codetag_free_module_sections(struct module *mod) {}
+static inline void codetag_module_replaced(struct module *mod, struct module *new_mod) {}
+static inline int codetag_load_module(struct module *mod) { return 0; }
+static inline void codetag_unload_module(struct module *mod) {}
+
+#endif /* defined(CONFIG_CODE_TAGGING) && defined(CONFIG_MODULES) */
+
+#endif /* _LINUX_CODETAG_H */
diff --git a/include/linux/comedi/comedi_8254.h b/include/linux/comedi/comedi_8254.h
new file mode 100644
index 000000000000..d527f04400df
--- /dev/null
+++ b/include/linux/comedi/comedi_8254.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * comedi_8254.h
+ * Generic 8254 timer/counter support
+ * Copyright (C) 2014 H Hartley Sweeten <hsweeten@visionengravers.com>
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ */
+
+#ifndef _COMEDI_8254_H
+#define _COMEDI_8254_H
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+
+struct comedi_device;
+struct comedi_insn;
+struct comedi_subdevice;
+
+/*
+ * Common oscillator base values in nanoseconds
+ */
+#define I8254_OSC_BASE_10MHZ 100
+#define I8254_OSC_BASE_5MHZ 200
+#define I8254_OSC_BASE_4MHZ 250
+#define I8254_OSC_BASE_2MHZ 500
+#define I8254_OSC_BASE_1MHZ 1000
+#define I8254_OSC_BASE_100KHZ 10000
+#define I8254_OSC_BASE_10KHZ 100000
+#define I8254_OSC_BASE_1KHZ 1000000
+
+/*
+ * I/O access size used to read/write registers
+ */
+#define I8254_IO8 1
+#define I8254_IO16 2
+#define I8254_IO32 4
+
+/*
+ * Register map for generic 8254 timer (I8254_IO8 with 0 regshift)
+ */
+#define I8254_COUNTER0_REG 0x00
+#define I8254_COUNTER1_REG 0x01
+#define I8254_COUNTER2_REG 0x02
+#define I8254_CTRL_REG 0x03
+#define I8254_CTRL_SEL_CTR(x) ((x) << 6)
+#define I8254_CTRL_READBACK(x) (I8254_CTRL_SEL_CTR(3) | BIT(x))
+#define I8254_CTRL_READBACK_COUNT I8254_CTRL_READBACK(4)
+#define I8254_CTRL_READBACK_STATUS I8254_CTRL_READBACK(5)
+#define I8254_CTRL_READBACK_SEL_CTR(x) (2 << (x))
+#define I8254_CTRL_RW(x) (((x) & 0x3) << 4)
+#define I8254_CTRL_LATCH I8254_CTRL_RW(0)
+#define I8254_CTRL_LSB_ONLY I8254_CTRL_RW(1)
+#define I8254_CTRL_MSB_ONLY I8254_CTRL_RW(2)
+#define I8254_CTRL_LSB_MSB I8254_CTRL_RW(3)
+
+/* counter maps zero to 0x10000 */
+#define I8254_MAX_COUNT 0x10000
+
+struct comedi_8254;
+
+/**
+ * typedef comedi_8254_iocb_fn - call-back function type for 8254 register access
+ * @i8254: pointer to struct comedi_8254
+ * @dir: direction (0 = read, 1 = write)
+ * @reg: register number
+ * @val: value to write
+ *
+ * Return: Register value when reading, 0 when writing.
+ */
+typedef unsigned int comedi_8254_iocb_fn(struct comedi_8254 *i8254, int dir,
+ unsigned int reg, unsigned int val);
+
+/**
+ * struct comedi_8254 - private data used by this module
+ * @iocb: I/O call-back function for register access
+ * @context: context for register access (e.g. a base address)
+ * @iosize: I/O size used to access the registers (b/w/l)
+ * @regshift: register gap shift
+ * @osc_base: cascaded oscillator speed in ns
+ * @divisor: divisor for single counter
+ * @divisor1: divisor loaded into first cascaded counter
+ * @divisor2: divisor loaded into second cascaded counter
+ * #next_div: next divisor for single counter
+ * @next_div1: next divisor to use for first cascaded counter
+ * @next_div2: next divisor to use for second cascaded counter
+ * @clock_src; current clock source for each counter (driver specific)
+ * @gate_src; current gate source for each counter (driver specific)
+ * @busy: flags used to indicate that a counter is "busy"
+ * @insn_config: driver specific (*insn_config) callback
+ */
+struct comedi_8254 {
+ comedi_8254_iocb_fn *iocb;
+ unsigned long context;
+ unsigned int iosize;
+ unsigned int regshift;
+ unsigned int osc_base;
+ unsigned int divisor;
+ unsigned int divisor1;
+ unsigned int divisor2;
+ unsigned int next_div;
+ unsigned int next_div1;
+ unsigned int next_div2;
+ unsigned int clock_src[3];
+ unsigned int gate_src[3];
+ bool busy[3];
+
+ int (*insn_config)(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data);
+};
+
+unsigned int comedi_8254_status(struct comedi_8254 *i8254,
+ unsigned int counter);
+unsigned int comedi_8254_read(struct comedi_8254 *i8254, unsigned int counter);
+void comedi_8254_write(struct comedi_8254 *i8254,
+ unsigned int counter, unsigned int val);
+
+int comedi_8254_set_mode(struct comedi_8254 *i8254,
+ unsigned int counter, unsigned int mode);
+int comedi_8254_load(struct comedi_8254 *i8254,
+ unsigned int counter, unsigned int val, unsigned int mode);
+
+void comedi_8254_pacer_enable(struct comedi_8254 *i8254,
+ unsigned int counter1, unsigned int counter2,
+ bool enable);
+void comedi_8254_update_divisors(struct comedi_8254 *i8254);
+void comedi_8254_cascade_ns_to_timer(struct comedi_8254 *i8254,
+ unsigned int *nanosec, unsigned int flags);
+void comedi_8254_ns_to_timer(struct comedi_8254 *i8254,
+ unsigned int *nanosec, unsigned int flags);
+
+void comedi_8254_set_busy(struct comedi_8254 *i8254,
+ unsigned int counter, bool busy);
+
+void comedi_8254_subdevice_init(struct comedi_subdevice *s,
+ struct comedi_8254 *i8254);
+
+#ifdef CONFIG_HAS_IOPORT
+struct comedi_8254 *comedi_8254_io_alloc(unsigned long iobase,
+ unsigned int osc_base,
+ unsigned int iosize,
+ unsigned int regshift);
+#else
+static inline struct comedi_8254 *comedi_8254_io_alloc(unsigned long iobase,
+ unsigned int osc_base,
+ unsigned int iosize,
+ unsigned int regshift)
+{
+ return ERR_PTR(-ENXIO);
+}
+#endif
+
+struct comedi_8254 *comedi_8254_mm_alloc(void __iomem *mmio,
+ unsigned int osc_base,
+ unsigned int iosize,
+ unsigned int regshift);
+
+#endif /* _COMEDI_8254_H */
diff --git a/include/linux/comedi/comedi_8255.h b/include/linux/comedi/comedi_8255.h
new file mode 100644
index 000000000000..d24a69da389b
--- /dev/null
+++ b/include/linux/comedi/comedi_8255.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * comedi_8255.h
+ * Generic 8255 digital I/O subdevice support
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1998 David A. Schleef <ds@schleef.org>
+ */
+
+#ifndef _COMEDI_8255_H
+#define _COMEDI_8255_H
+
+#include <linux/errno.h>
+
+#define I8255_SIZE 0x04
+
+#define I8255_DATA_A_REG 0x00
+#define I8255_DATA_B_REG 0x01
+#define I8255_DATA_C_REG 0x02
+#define I8255_CTRL_REG 0x03
+#define I8255_CTRL_C_LO_IO BIT(0)
+#define I8255_CTRL_B_IO BIT(1)
+#define I8255_CTRL_B_MODE BIT(2)
+#define I8255_CTRL_C_HI_IO BIT(3)
+#define I8255_CTRL_A_IO BIT(4)
+#define I8255_CTRL_A_MODE(x) ((x) << 5)
+#define I8255_CTRL_CW BIT(7)
+
+struct comedi_device;
+struct comedi_subdevice;
+
+#ifdef CONFIG_HAS_IOPORT
+int subdev_8255_io_init(struct comedi_device *dev, struct comedi_subdevice *s,
+ unsigned long regbase);
+#else
+static inline int subdev_8255_io_init(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned long regbase)
+{
+ return -ENXIO;
+}
+#endif
+
+int subdev_8255_mm_init(struct comedi_device *dev, struct comedi_subdevice *s,
+ unsigned long regbase);
+
+int subdev_8255_cb_init(struct comedi_device *dev, struct comedi_subdevice *s,
+ int (*io)(struct comedi_device *dev, int dir, int port,
+ int data, unsigned long context),
+ unsigned long context);
+
+unsigned long subdev_8255_regbase(struct comedi_subdevice *s);
+
+#endif
diff --git a/include/linux/comedi/comedi_isadma.h b/include/linux/comedi/comedi_isadma.h
new file mode 100644
index 000000000000..9d2b12db7e6e
--- /dev/null
+++ b/include/linux/comedi/comedi_isadma.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * COMEDI ISA DMA support functions
+ * Copyright (c) 2014 H Hartley Sweeten <hsweeten@visionengravers.com>
+ */
+
+#ifndef _COMEDI_ISADMA_H
+#define _COMEDI_ISADMA_H
+
+#include <linux/types.h>
+
+struct comedi_device;
+struct device;
+
+/*
+ * These are used to avoid issues when <asm/dma.h> and the DMA_MODE_
+ * defines are not available.
+ */
+#define COMEDI_ISADMA_READ 0
+#define COMEDI_ISADMA_WRITE 1
+
+/**
+ * struct comedi_isadma_desc - cookie for ISA DMA
+ * @virt_addr: virtual address of buffer
+ * @hw_addr: hardware (bus) address of buffer
+ * @chan: DMA channel
+ * @maxsize: allocated size of buffer (in bytes)
+ * @size: transfer size (in bytes)
+ * @mode: DMA_MODE_READ or DMA_MODE_WRITE
+ */
+struct comedi_isadma_desc {
+ void *virt_addr;
+ dma_addr_t hw_addr;
+ unsigned int chan;
+ unsigned int maxsize;
+ unsigned int size;
+ char mode;
+};
+
+/**
+ * struct comedi_isadma - ISA DMA data
+ * @dev: device to allocate non-coherent memory for
+ * @desc: cookie for each DMA buffer
+ * @n_desc: the number of cookies
+ * @cur_dma: the current cookie in use
+ * @chan: the first DMA channel requested
+ * @chan2: the second DMA channel requested
+ */
+struct comedi_isadma {
+ struct device *dev;
+ struct comedi_isadma_desc *desc;
+ int n_desc;
+ int cur_dma;
+ unsigned int chan;
+ unsigned int chan2;
+};
+
+#if IS_ENABLED(CONFIG_ISA_DMA_API)
+
+void comedi_isadma_program(struct comedi_isadma_desc *desc);
+unsigned int comedi_isadma_disable(unsigned int dma_chan);
+unsigned int comedi_isadma_disable_on_sample(unsigned int dma_chan,
+ unsigned int size);
+unsigned int comedi_isadma_poll(struct comedi_isadma *dma);
+void comedi_isadma_set_mode(struct comedi_isadma_desc *desc, char dma_dir);
+
+struct comedi_isadma *comedi_isadma_alloc(struct comedi_device *dev,
+ int n_desc, unsigned int dma_chan1,
+ unsigned int dma_chan2,
+ unsigned int maxsize, char dma_dir);
+void comedi_isadma_free(struct comedi_isadma *dma);
+
+#else /* !IS_ENABLED(CONFIG_ISA_DMA_API) */
+
+static inline void comedi_isadma_program(struct comedi_isadma_desc *desc)
+{
+}
+
+static inline unsigned int comedi_isadma_disable(unsigned int dma_chan)
+{
+ return 0;
+}
+
+static inline unsigned int
+comedi_isadma_disable_on_sample(unsigned int dma_chan, unsigned int size)
+{
+ return 0;
+}
+
+static inline unsigned int comedi_isadma_poll(struct comedi_isadma *dma)
+{
+ return 0;
+}
+
+static inline void comedi_isadma_set_mode(struct comedi_isadma_desc *desc,
+ char dma_dir)
+{
+}
+
+static inline struct comedi_isadma *
+comedi_isadma_alloc(struct comedi_device *dev, int n_desc,
+ unsigned int dma_chan1, unsigned int dma_chan2,
+ unsigned int maxsize, char dma_dir)
+{
+ return NULL;
+}
+
+static inline void comedi_isadma_free(struct comedi_isadma *dma)
+{
+}
+
+#endif /* !IS_ENABLED(CONFIG_ISA_DMA_API) */
+
+#endif /* #ifndef _COMEDI_ISADMA_H */
diff --git a/include/linux/comedi/comedi_pci.h b/include/linux/comedi/comedi_pci.h
new file mode 100644
index 000000000000..2fb50663e3ed
--- /dev/null
+++ b/include/linux/comedi/comedi_pci.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * comedi_pci.h
+ * header file for Comedi PCI drivers
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ */
+
+#ifndef _COMEDI_PCI_H
+#define _COMEDI_PCI_H
+
+#include <linux/pci.h>
+#include <linux/comedi/comedidev.h>
+
+/*
+ * PCI Vendor IDs not in <linux/pci_ids.h>
+ */
+#define PCI_VENDOR_ID_KOLTER 0x1001
+#define PCI_VENDOR_ID_ICP 0x104c
+#define PCI_VENDOR_ID_DT 0x1116
+#define PCI_VENDOR_ID_IOTECH 0x1616
+#define PCI_VENDOR_ID_CONTEC 0x1221
+#define PCI_VENDOR_ID_RTD 0x1435
+#define PCI_VENDOR_ID_HUMUSOFT 0x186c
+
+struct pci_dev *comedi_to_pci_dev(struct comedi_device *dev);
+
+int comedi_pci_enable(struct comedi_device *dev);
+void comedi_pci_disable(struct comedi_device *dev);
+void comedi_pci_detach(struct comedi_device *dev);
+
+int comedi_pci_auto_config(struct pci_dev *pcidev, struct comedi_driver *driver,
+ unsigned long context);
+void comedi_pci_auto_unconfig(struct pci_dev *pcidev);
+
+int comedi_pci_driver_register(struct comedi_driver *comedi_driver,
+ struct pci_driver *pci_driver);
+void comedi_pci_driver_unregister(struct comedi_driver *comedi_driver,
+ struct pci_driver *pci_driver);
+
+/**
+ * module_comedi_pci_driver() - Helper macro for registering a comedi PCI driver
+ * @__comedi_driver: comedi_driver struct
+ * @__pci_driver: pci_driver struct
+ *
+ * Helper macro for comedi PCI drivers which do not do anything special
+ * in module init/exit. This eliminates a lot of boilerplate. Each
+ * module may only use this macro once, and calling it replaces
+ * module_init() and module_exit()
+ */
+#define module_comedi_pci_driver(__comedi_driver, __pci_driver) \
+ module_driver(__comedi_driver, comedi_pci_driver_register, \
+ comedi_pci_driver_unregister, &(__pci_driver))
+
+#endif /* _COMEDI_PCI_H */
diff --git a/include/linux/comedi/comedi_pcmcia.h b/include/linux/comedi/comedi_pcmcia.h
new file mode 100644
index 000000000000..a33dfb65b869
--- /dev/null
+++ b/include/linux/comedi/comedi_pcmcia.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * comedi_pcmcia.h
+ * header file for Comedi PCMCIA drivers
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ */
+
+#ifndef _COMEDI_PCMCIA_H
+#define _COMEDI_PCMCIA_H
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+#include <linux/comedi/comedidev.h>
+
+struct pcmcia_device *comedi_to_pcmcia_dev(struct comedi_device *dev);
+
+int comedi_pcmcia_enable(struct comedi_device *dev,
+ int (*conf_check)(struct pcmcia_device *p_dev,
+ void *priv_data));
+void comedi_pcmcia_disable(struct comedi_device *dev);
+
+int comedi_pcmcia_auto_config(struct pcmcia_device *link,
+ struct comedi_driver *driver);
+void comedi_pcmcia_auto_unconfig(struct pcmcia_device *link);
+
+int comedi_pcmcia_driver_register(struct comedi_driver *comedi_driver,
+ struct pcmcia_driver *pcmcia_driver);
+void comedi_pcmcia_driver_unregister(struct comedi_driver *comedi_driver,
+ struct pcmcia_driver *pcmcia_driver);
+
+/**
+ * module_comedi_pcmcia_driver() - Helper macro for registering a comedi
+ * PCMCIA driver
+ * @__comedi_driver: comedi_driver struct
+ * @__pcmcia_driver: pcmcia_driver struct
+ *
+ * Helper macro for comedi PCMCIA drivers which do not do anything special
+ * in module init/exit. This eliminates a lot of boilerplate. Each
+ * module may only use this macro once, and calling it replaces
+ * module_init() and module_exit()
+ */
+#define module_comedi_pcmcia_driver(__comedi_driver, __pcmcia_driver) \
+ module_driver(__comedi_driver, comedi_pcmcia_driver_register, \
+ comedi_pcmcia_driver_unregister, &(__pcmcia_driver))
+
+#endif /* _COMEDI_PCMCIA_H */
diff --git a/include/linux/comedi/comedi_usb.h b/include/linux/comedi/comedi_usb.h
new file mode 100644
index 000000000000..5d17dd425bd2
--- /dev/null
+++ b/include/linux/comedi/comedi_usb.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* comedi_usb.h
+ * header file for USB Comedi drivers
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ */
+
+#ifndef _COMEDI_USB_H
+#define _COMEDI_USB_H
+
+#include <linux/usb.h>
+#include <linux/comedi/comedidev.h>
+
+struct usb_interface *comedi_to_usb_interface(struct comedi_device *dev);
+struct usb_device *comedi_to_usb_dev(struct comedi_device *dev);
+
+int comedi_usb_auto_config(struct usb_interface *intf,
+ struct comedi_driver *driver, unsigned long context);
+void comedi_usb_auto_unconfig(struct usb_interface *intf);
+
+int comedi_usb_driver_register(struct comedi_driver *comedi_driver,
+ struct usb_driver *usb_driver);
+void comedi_usb_driver_unregister(struct comedi_driver *comedi_driver,
+ struct usb_driver *usb_driver);
+
+/**
+ * module_comedi_usb_driver() - Helper macro for registering a comedi USB driver
+ * @__comedi_driver: comedi_driver struct
+ * @__usb_driver: usb_driver struct
+ *
+ * Helper macro for comedi USB drivers which do not do anything special
+ * in module init/exit. This eliminates a lot of boilerplate. Each
+ * module may only use this macro once, and calling it replaces
+ * module_init() and module_exit()
+ */
+#define module_comedi_usb_driver(__comedi_driver, __usb_driver) \
+ module_driver(__comedi_driver, comedi_usb_driver_register, \
+ comedi_usb_driver_unregister, &(__usb_driver))
+
+#endif /* _COMEDI_USB_H */
diff --git a/include/linux/comedi/comedidev.h b/include/linux/comedi/comedidev.h
new file mode 100644
index 000000000000..35fdc41845ce
--- /dev/null
+++ b/include/linux/comedi/comedidev.h
@@ -0,0 +1,1054 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * comedidev.h
+ * header file for kernel-only structures, variables, and constants
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ */
+
+#ifndef _COMEDIDEV_H
+#define _COMEDIDEV_H
+
+#include <linux/dma-mapping.h>
+#include <linux/mutex.h>
+#include <linux/spinlock_types.h>
+#include <linux/rwsem.h>
+#include <linux/kref.h>
+#include <linux/completion.h>
+#include <linux/comedi.h>
+
+#define COMEDI_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
+#define COMEDI_VERSION_CODE COMEDI_VERSION(COMEDI_MAJORVERSION, \
+ COMEDI_MINORVERSION, COMEDI_MICROVERSION)
+#define COMEDI_RELEASE VERSION
+
+#define COMEDI_NUM_BOARD_MINORS 0x30
+
+/**
+ * struct comedi_subdevice - Working data for a COMEDI subdevice
+ * @device: COMEDI device to which this subdevice belongs. (Initialized by
+ * comedi_alloc_subdevices().)
+ * @index: Index of this subdevice within device's array of subdevices.
+ * (Initialized by comedi_alloc_subdevices().)
+ * @type: Type of subdevice from &enum comedi_subdevice_type. (Initialized by
+ * the low-level driver.)
+ * @n_chan: Number of channels the subdevice supports. (Initialized by the
+ * low-level driver.)
+ * @subdev_flags: Various "SDF" flags indicating aspects of the subdevice to
+ * the COMEDI core and user application. (Initialized by the low-level
+ * driver.)
+ * @len_chanlist: Maximum length of a channel list if the subdevice supports
+ * asynchronous acquisition commands. (Optionally initialized by the
+ * low-level driver, or changed from 0 to 1 during post-configuration.)
+ * @private: Private data pointer which is either set by the low-level driver
+ * itself, or by a call to comedi_alloc_spriv() which allocates storage.
+ * In the latter case, the storage is automatically freed after the
+ * low-level driver's "detach" handler is called for the device.
+ * (Initialized by the low-level driver.)
+ * @async: Pointer to &struct comedi_async id the subdevice supports
+ * asynchronous acquisition commands. (Allocated and initialized during
+ * post-configuration if needed.)
+ * @lock: Pointer to a file object that performed a %COMEDI_LOCK ioctl on the
+ * subdevice. (Initially NULL.)
+ * @busy: Pointer to a file object that is performing an asynchronous
+ * acquisition command on the subdevice. (Initially NULL.)
+ * @runflags: Internal flags for use by COMEDI core, mostly indicating whether
+ * an asynchronous acquisition command is running.
+ * @spin_lock: Generic spin-lock for use by the COMEDI core and the low-level
+ * driver. (Initialized by comedi_alloc_subdevices().)
+ * @io_bits: Bit-mask indicating the channel directions for a DIO subdevice
+ * with no more than 32 channels. A '1' at a bit position indicates the
+ * corresponding channel is configured as an output. (Initialized by the
+ * low-level driver for a DIO subdevice. Forced to all-outputs during
+ * post-configuration for a digital output subdevice.)
+ * @maxdata: If non-zero, this is the maximum raw data value of each channel.
+ * If zero, the maximum data value is channel-specific. (Initialized by
+ * the low-level driver.)
+ * @maxdata_list: If the maximum data value is channel-specific, this points
+ * to an array of maximum data values indexed by channel index.
+ * (Initialized by the low-level driver.)
+ * @range_table: If non-NULL, this points to a COMEDI range table for the
+ * subdevice. If NULL, the range table is channel-specific. (Initialized
+ * by the low-level driver, will be set to an "invalid" range table during
+ * post-configuration if @range_table and @range_table_list are both
+ * NULL.)
+ * @range_table_list: If the COMEDI range table is channel-specific, this
+ * points to an array of pointers to COMEDI range tables indexed by
+ * channel number. (Initialized by the low-level driver.)
+ * @chanlist: Not used.
+ * @insn_read: Optional pointer to a handler for the %INSN_READ instruction.
+ * (Initialized by the low-level driver, or set to a default handler
+ * during post-configuration.)
+ * @insn_write: Optional pointer to a handler for the %INSN_WRITE instruction.
+ * (Initialized by the low-level driver, or set to a default handler
+ * during post-configuration.)
+ * @insn_bits: Optional pointer to a handler for the %INSN_BITS instruction
+ * for a digital input, digital output or digital input/output subdevice.
+ * (Initialized by the low-level driver, or set to a default handler
+ * during post-configuration.)
+ * @insn_config: Optional pointer to a handler for the %INSN_CONFIG
+ * instruction. (Initialized by the low-level driver, or set to a default
+ * handler during post-configuration.)
+ * @do_cmd: If the subdevice supports asynchronous acquisition commands, this
+ * points to a handler to set it up in hardware. (Initialized by the
+ * low-level driver.)
+ * @do_cmdtest: If the subdevice supports asynchronous acquisition commands,
+ * this points to a handler used to check and possibly tweak a prospective
+ * acquisition command without setting it up in hardware. (Initialized by
+ * the low-level driver.)
+ * @poll: If the subdevice supports asynchronous acquisition commands, this
+ * is an optional pointer to a handler for the %COMEDI_POLL ioctl which
+ * instructs the low-level driver to synchronize buffers. (Initialized by
+ * the low-level driver if needed.)
+ * @cancel: If the subdevice supports asynchronous acquisition commands, this
+ * points to a handler used to terminate a running command. (Initialized
+ * by the low-level driver.)
+ * @buf_change: If the subdevice supports asynchronous acquisition commands,
+ * this is an optional pointer to a handler that is called when the data
+ * buffer for handling asynchronous commands is allocated or reallocated.
+ * (Initialized by the low-level driver if needed.)
+ * @munge: If the subdevice supports asynchronous acquisition commands and
+ * uses DMA to transfer data from the hardware to the acquisition buffer,
+ * this points to a function used to "munge" the data values from the
+ * hardware into the format expected by COMEDI. (Initialized by the
+ * low-level driver if needed.)
+ * @async_dma_dir: If the subdevice supports asynchronous acquisition commands
+ * and uses DMA to transfer data from the hardware to the acquisition
+ * buffer, this sets the DMA direction for the buffer. (initialized to
+ * %DMA_NONE by comedi_alloc_subdevices() and changed by the low-level
+ * driver if necessary.)
+ * @state: Handy bit-mask indicating the output states for a DIO or digital
+ * output subdevice with no more than 32 channels. (Initialized by the
+ * low-level driver.)
+ * @class_dev: If the subdevice supports asynchronous acquisition commands,
+ * this points to a sysfs comediX_subdY device where X is the minor device
+ * number of the COMEDI device and Y is the subdevice number. The minor
+ * device number for the sysfs device is allocated dynamically in the
+ * range 48 to 255. This is used to allow the COMEDI device to be opened
+ * with a different default read or write subdevice. (Allocated during
+ * post-configuration if needed.)
+ * @minor: If @class_dev is set, this is its dynamically allocated minor
+ * device number. (Set during post-configuration if necessary.)
+ * @readback: Optional pointer to memory allocated by
+ * comedi_alloc_subdev_readback() used to hold the values written to
+ * analog output channels so they can be read back. The storage is
+ * automatically freed after the low-level driver's "detach" handler is
+ * called for the device. (Initialized by the low-level driver.)
+ *
+ * This is the main control structure for a COMEDI subdevice. If the subdevice
+ * supports asynchronous acquisition commands, additional information is stored
+ * in the &struct comedi_async pointed to by @async.
+ *
+ * Most of the subdevice is initialized by the low-level driver's "attach" or
+ * "auto_attach" handlers but parts of it are initialized by
+ * comedi_alloc_subdevices(), and other parts are initialized during
+ * post-configuration on return from that handler.
+ *
+ * A low-level driver that sets @insn_bits for a digital input, digital output,
+ * or DIO subdevice may leave @insn_read and @insn_write uninitialized, in
+ * which case they will be set to a default handler during post-configuration
+ * that uses @insn_bits to emulate the %INSN_READ and %INSN_WRITE instructions.
+ */
+struct comedi_subdevice {
+ struct comedi_device *device;
+ int index;
+ int type;
+ int n_chan;
+ int subdev_flags;
+ int len_chanlist; /* maximum length of channel/gain list */
+
+ void *private;
+
+ struct comedi_async *async;
+
+ void *lock;
+ void *busy;
+ unsigned int runflags;
+ spinlock_t spin_lock; /* generic spin-lock for COMEDI and drivers */
+
+ unsigned int io_bits;
+
+ unsigned int maxdata; /* if maxdata==0, use list */
+ const unsigned int *maxdata_list; /* list is channel specific */
+
+ const struct comedi_lrange *range_table;
+ const struct comedi_lrange *const *range_table_list;
+
+ unsigned int *chanlist; /* driver-owned chanlist (not used) */
+
+ int (*insn_read)(struct comedi_device *dev, struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data);
+ int (*insn_write)(struct comedi_device *dev, struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data);
+ int (*insn_bits)(struct comedi_device *dev, struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data);
+ int (*insn_config)(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data);
+
+ int (*do_cmd)(struct comedi_device *dev, struct comedi_subdevice *s);
+ int (*do_cmdtest)(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd);
+ int (*poll)(struct comedi_device *dev, struct comedi_subdevice *s);
+ int (*cancel)(struct comedi_device *dev, struct comedi_subdevice *s);
+
+ /* called when the buffer changes */
+ int (*buf_change)(struct comedi_device *dev,
+ struct comedi_subdevice *s);
+
+ void (*munge)(struct comedi_device *dev, struct comedi_subdevice *s,
+ void *data, unsigned int num_bytes,
+ unsigned int start_chan_index);
+ enum dma_data_direction async_dma_dir;
+
+ unsigned int state;
+
+ struct device *class_dev;
+ int minor;
+
+ unsigned int *readback;
+};
+
+/**
+ * struct comedi_buf_page - Describe a page of a COMEDI buffer
+ * @virt_addr: Kernel address of page.
+ * @dma_addr: DMA address of page if in DMA coherent memory.
+ */
+struct comedi_buf_page {
+ void *virt_addr;
+ dma_addr_t dma_addr;
+};
+
+/**
+ * struct comedi_buf_map - Describe pages in a COMEDI buffer
+ * @dma_hw_dev: Low-level hardware &struct device pointer copied from the
+ * COMEDI device's hw_dev member.
+ * @page_list: Pointer to array of &struct comedi_buf_page, one for each
+ * page in the buffer.
+ * @n_pages: Number of pages in the buffer.
+ * @dma_dir: DMA direction used to allocate pages of DMA coherent memory,
+ * or %DMA_NONE if pages allocated from regular memory.
+ * @refcount: &struct kref reference counter used to free the buffer.
+ *
+ * A COMEDI data buffer is allocated as individual pages, either in
+ * conventional memory or DMA coherent memory, depending on the attached,
+ * low-level hardware device.
+ *
+ * The buffer is normally freed when the COMEDI device is detached from the
+ * low-level driver (which may happen due to device removal), but if it happens
+ * to be mmapped at the time, the pages cannot be freed until the buffer has
+ * been munmapped. That is what the reference counter is for.
+ */
+struct comedi_buf_map {
+ struct device *dma_hw_dev;
+ struct comedi_buf_page *page_list;
+ unsigned int n_pages;
+ enum dma_data_direction dma_dir;
+ struct kref refcount;
+};
+
+/**
+ * struct comedi_async - Control data for asynchronous COMEDI commands
+ * @prealloc_bufsz: Buffer size (in bytes).
+ * @buf_map: Map of buffer pages.
+ * @max_bufsize: Maximum allowed buffer size (in bytes).
+ * @buf_write_count: "Write completed" count (in bytes, modulo 2**32).
+ * @buf_write_alloc_count: "Allocated for writing" count (in bytes,
+ * modulo 2**32).
+ * @buf_read_count: "Read completed" count (in bytes, modulo 2**32).
+ * @buf_read_alloc_count: "Allocated for reading" count (in bytes,
+ * modulo 2**32).
+ * @buf_write_ptr: Buffer position for writer.
+ * @buf_read_ptr: Buffer position for reader.
+ * @cur_chan: Current position in chanlist for scan (for those drivers that
+ * use it).
+ * @scans_done: The number of scans completed.
+ * @scan_progress: Amount received or sent for current scan (in bytes).
+ * @munge_chan: Current position in chanlist for "munging".
+ * @munge_count: "Munge" count (in bytes, modulo 2**32).
+ * @munge_ptr: Buffer position for "munging".
+ * @events: Bit-vector of events that have occurred.
+ * @cmd: Details of comedi command in progress.
+ * @wait_head: Task wait queue for file reader or writer.
+ * @run_complete: "run complete" completion event.
+ * @run_active: "run active" reference counter.
+ * @cb_mask: Bit-vector of events that should wake waiting tasks.
+ * @inttrig: Software trigger function for command, or NULL.
+ *
+ * Note about the ..._count and ..._ptr members:
+ *
+ * Think of the _Count values being integers of unlimited size, indexing
+ * into a buffer of infinite length (though only an advancing portion
+ * of the buffer of fixed length prealloc_bufsz is accessible at any
+ * time). Then:
+ *
+ * Buf_Read_Count <= Buf_Read_Alloc_Count <= Munge_Count <=
+ * Buf_Write_Count <= Buf_Write_Alloc_Count <=
+ * (Buf_Read_Count + prealloc_bufsz)
+ *
+ * (Those aren't the actual members, apart from prealloc_bufsz.) When the
+ * buffer is reset, those _Count values start at 0 and only increase in value,
+ * maintaining the above inequalities until the next time the buffer is
+ * reset. The buffer is divided into the following regions by the inequalities:
+ *
+ * [0, Buf_Read_Count):
+ * old region no longer accessible
+ *
+ * [Buf_Read_Count, Buf_Read_Alloc_Count):
+ * filled and munged region allocated for reading but not yet read
+ *
+ * [Buf_Read_Alloc_Count, Munge_Count):
+ * filled and munged region not yet allocated for reading
+ *
+ * [Munge_Count, Buf_Write_Count):
+ * filled region not yet munged
+ *
+ * [Buf_Write_Count, Buf_Write_Alloc_Count):
+ * unfilled region allocated for writing but not yet written
+ *
+ * [Buf_Write_Alloc_Count, Buf_Read_Count + prealloc_bufsz):
+ * unfilled region not yet allocated for writing
+ *
+ * [Buf_Read_Count + prealloc_bufsz, infinity):
+ * unfilled region not yet accessible
+ *
+ * Data needs to be written into the buffer before it can be read out,
+ * and may need to be converted (or "munged") between the two
+ * operations. Extra unfilled buffer space may need to allocated for
+ * writing (advancing Buf_Write_Alloc_Count) before new data is written.
+ * After writing new data, the newly filled space needs to be released
+ * (advancing Buf_Write_Count). This also results in the new data being
+ * "munged" (advancing Munge_Count). Before data is read out of the
+ * buffer, extra space may need to be allocated for reading (advancing
+ * Buf_Read_Alloc_Count). After the data has been read out, the space
+ * needs to be released (advancing Buf_Read_Count).
+ *
+ * The actual members, buf_read_count, buf_read_alloc_count,
+ * munge_count, buf_write_count, and buf_write_alloc_count take the
+ * value of the corresponding capitalized _Count values modulo 2^32
+ * (UINT_MAX+1). Subtracting a "higher" _count value from a "lower"
+ * _count value gives the same answer as subtracting a "higher" _Count
+ * value from a lower _Count value because prealloc_bufsz < UINT_MAX+1.
+ * The modulo operation is done implicitly.
+ *
+ * The buf_read_ptr, munge_ptr, and buf_write_ptr members take the value
+ * of the corresponding capitalized _Count values modulo prealloc_bufsz.
+ * These correspond to byte indices in the physical buffer. The modulo
+ * operation is done by subtracting prealloc_bufsz when the value
+ * exceeds prealloc_bufsz (assuming prealloc_bufsz plus the increment is
+ * less than or equal to UINT_MAX).
+ */
+struct comedi_async {
+ unsigned int prealloc_bufsz;
+ struct comedi_buf_map *buf_map;
+ unsigned int max_bufsize;
+ unsigned int buf_write_count;
+ unsigned int buf_write_alloc_count;
+ unsigned int buf_read_count;
+ unsigned int buf_read_alloc_count;
+ unsigned int buf_write_ptr;
+ unsigned int buf_read_ptr;
+ unsigned int cur_chan;
+ unsigned int scans_done;
+ unsigned int scan_progress;
+ unsigned int munge_chan;
+ unsigned int munge_count;
+ unsigned int munge_ptr;
+ unsigned int events;
+ struct comedi_cmd cmd;
+ wait_queue_head_t wait_head;
+ struct completion run_complete;
+ refcount_t run_active;
+ unsigned int cb_mask;
+ int (*inttrig)(struct comedi_device *dev, struct comedi_subdevice *s,
+ unsigned int x);
+};
+
+/**
+ * enum comedi_cb - &struct comedi_async callback "events"
+ * @COMEDI_CB_EOS: end-of-scan
+ * @COMEDI_CB_EOA: end-of-acquisition/output
+ * @COMEDI_CB_BLOCK: data has arrived, wakes up read() / write()
+ * @COMEDI_CB_EOBUF: DEPRECATED: end of buffer
+ * @COMEDI_CB_ERROR: card error during acquisition
+ * @COMEDI_CB_OVERFLOW: buffer overflow/underflow
+ * @COMEDI_CB_ERROR_MASK: events that indicate an error has occurred
+ * @COMEDI_CB_CANCEL_MASK: events that will cancel an async command
+ */
+enum comedi_cb {
+ COMEDI_CB_EOS = BIT(0),
+ COMEDI_CB_EOA = BIT(1),
+ COMEDI_CB_BLOCK = BIT(2),
+ COMEDI_CB_EOBUF = BIT(3),
+ COMEDI_CB_ERROR = BIT(4),
+ COMEDI_CB_OVERFLOW = BIT(5),
+ /* masks */
+ COMEDI_CB_ERROR_MASK = (COMEDI_CB_ERROR | COMEDI_CB_OVERFLOW),
+ COMEDI_CB_CANCEL_MASK = (COMEDI_CB_EOA | COMEDI_CB_ERROR_MASK)
+};
+
+/**
+ * struct comedi_driver - COMEDI driver registration
+ * @driver_name: Name of driver.
+ * @module: Owning module.
+ * @attach: The optional "attach" handler for manually configured COMEDI
+ * devices.
+ * @detach: The "detach" handler for deconfiguring COMEDI devices.
+ * @auto_attach: The optional "auto_attach" handler for automatically
+ * configured COMEDI devices.
+ * @num_names: Optional number of "board names" supported.
+ * @board_name: Optional pointer to a pointer to a board name. The pointer
+ * to a board name is embedded in an element of a driver-defined array
+ * of static, read-only board type information.
+ * @offset: Optional size of each element of the driver-defined array of
+ * static, read-only board type information, i.e. the offset between each
+ * pointer to a board name.
+ *
+ * This is used with comedi_driver_register() and comedi_driver_unregister() to
+ * register and unregister a low-level COMEDI driver with the COMEDI core.
+ *
+ * If @num_names is non-zero, @board_name should be non-NULL, and @offset
+ * should be at least sizeof(*board_name). These are used by the handler for
+ * the %COMEDI_DEVCONFIG ioctl to match a hardware device and its driver by
+ * board name. If @num_names is zero, the %COMEDI_DEVCONFIG ioctl matches a
+ * hardware device and its driver by driver name. This is only useful if the
+ * @attach handler is set. If @num_names is non-zero, the driver's @attach
+ * handler will be called with the COMEDI device structure's board_ptr member
+ * pointing to the matched pointer to a board name within the driver's private
+ * array of static, read-only board type information.
+ *
+ * The @detach handler has two roles. If a COMEDI device was successfully
+ * configured by the @attach or @auto_attach handler, it is called when the
+ * device is being deconfigured (by the %COMEDI_DEVCONFIG ioctl, or due to
+ * unloading of the driver, or due to device removal). It is also called when
+ * the @attach or @auto_attach handler returns an error. Therefore, the
+ * @attach or @auto_attach handlers can defer clean-up on error until the
+ * @detach handler is called. If the @attach or @auto_attach handlers free
+ * any resources themselves, they must prevent the @detach handler from
+ * freeing the same resources. The @detach handler must not assume that all
+ * resources requested by the @attach or @auto_attach handler were
+ * successfully allocated.
+ */
+struct comedi_driver {
+ /* private: */
+ struct comedi_driver *next; /* Next in list of COMEDI drivers. */
+ /* public: */
+ const char *driver_name;
+ struct module *module;
+ int (*attach)(struct comedi_device *dev, struct comedi_devconfig *it);
+ void (*detach)(struct comedi_device *dev);
+ int (*auto_attach)(struct comedi_device *dev, unsigned long context);
+ unsigned int num_names;
+ const char *const *board_name;
+ int offset;
+};
+
+/**
+ * struct comedi_device - Working data for a COMEDI device
+ * @use_count: Number of open file objects.
+ * @driver: Low-level COMEDI driver attached to this COMEDI device.
+ * @pacer: Optional pointer to a dynamically allocated acquisition pacer
+ * control. It is freed automatically after the COMEDI device is
+ * detached from the low-level driver.
+ * @private: Optional pointer to private data allocated by the low-level
+ * driver. It is freed automatically after the COMEDI device is
+ * detached from the low-level driver.
+ * @class_dev: Sysfs comediX device.
+ * @minor: Minor device number of COMEDI char device (0-47).
+ * @detach_count: Counter incremented every time the COMEDI device is detached.
+ * Used for checking a previous attachment is still valid.
+ * @hw_dev: Optional pointer to the low-level hardware &struct device. It is
+ * required for automatically configured COMEDI devices and optional for
+ * COMEDI devices configured by the %COMEDI_DEVCONFIG ioctl, although
+ * the bus-specific COMEDI functions only work if it is set correctly.
+ * It is also passed to dma_alloc_coherent() for COMEDI subdevices that
+ * have their 'async_dma_dir' member set to something other than
+ * %DMA_NONE.
+ * @board_name: Pointer to a COMEDI board name or a COMEDI driver name. When
+ * the low-level driver's "attach" handler is called by the handler for
+ * the %COMEDI_DEVCONFIG ioctl, it either points to a matched board name
+ * string if the 'num_names' member of the &struct comedi_driver is
+ * non-zero, otherwise it points to the low-level driver name string.
+ * When the low-lever driver's "auto_attach" handler is called for an
+ * automatically configured COMEDI device, it points to the low-level
+ * driver name string. The low-level driver is free to change it in its
+ * "attach" or "auto_attach" handler if it wishes.
+ * @board_ptr: Optional pointer to private, read-only board type information in
+ * the low-level driver. If the 'num_names' member of the &struct
+ * comedi_driver is non-zero, the handler for the %COMEDI_DEVCONFIG ioctl
+ * will point it to a pointer to a matched board name string within the
+ * driver's private array of static, read-only board type information when
+ * calling the driver's "attach" handler. The low-level driver is free to
+ * change it.
+ * @attached: Flag indicating that the COMEDI device is attached to a low-level
+ * driver.
+ * @ioenabled: Flag used to indicate that a PCI device has been enabled and
+ * its regions requested.
+ * @spinlock: Generic spin-lock for use by the low-level driver.
+ * @mutex: Generic mutex for use by the COMEDI core module.
+ * @attach_lock: &struct rw_semaphore used to guard against the COMEDI device
+ * being detached while an operation is in progress. The down_write()
+ * operation is only allowed while @mutex is held and is used when
+ * changing @attached and @detach_count and calling the low-level driver's
+ * "detach" handler. The down_read() operation is generally used without
+ * holding @mutex.
+ * @refcount: &struct kref reference counter for freeing COMEDI device.
+ * @n_subdevices: Number of COMEDI subdevices allocated by the low-level
+ * driver for this device.
+ * @subdevices: Dynamically allocated array of COMEDI subdevices.
+ * @mmio: Optional pointer to a remapped MMIO region set by the low-level
+ * driver.
+ * @iobase: Optional base of an I/O port region requested by the low-level
+ * driver.
+ * @iolen: Length of I/O port region requested at @iobase.
+ * @irq: Optional IRQ number requested by the low-level driver.
+ * @read_subdev: Optional pointer to a default COMEDI subdevice operated on by
+ * the read() file operation. Set by the low-level driver.
+ * @write_subdev: Optional pointer to a default COMEDI subdevice operated on by
+ * the write() file operation. Set by the low-level driver.
+ * @async_queue: Storage for fasync_helper().
+ * @open: Optional pointer to a function set by the low-level driver to be
+ * called when @use_count changes from 0 to 1.
+ * @close: Optional pointer to a function set by the low-level driver to be
+ * called when @use_count changed from 1 to 0.
+ * @insn_device_config: Optional pointer to a handler for all sub-instructions
+ * except %INSN_DEVICE_CONFIG_GET_ROUTES of the %INSN_DEVICE_CONFIG
+ * instruction. If this is not initialized by the low-level driver, a
+ * default handler will be set during post-configuration.
+ * @get_valid_routes: Optional pointer to a handler for the
+ * %INSN_DEVICE_CONFIG_GET_ROUTES sub-instruction of the
+ * %INSN_DEVICE_CONFIG instruction set. If this is not initialized by the
+ * low-level driver, a default handler that copies zero routes back to the
+ * user will be used.
+ *
+ * This is the main control data structure for a COMEDI device (as far as the
+ * COMEDI core is concerned). There are two groups of COMEDI devices -
+ * "legacy" devices that are configured by the handler for the
+ * %COMEDI_DEVCONFIG ioctl, and automatically configured devices resulting
+ * from a call to comedi_auto_config() as a result of a bus driver probe in
+ * a low-level COMEDI driver. The "legacy" COMEDI devices are allocated
+ * during module initialization if the "comedi_num_legacy_minors" module
+ * parameter is non-zero and use minor device numbers from 0 to
+ * comedi_num_legacy_minors minus one. The automatically configured COMEDI
+ * devices are allocated on demand and use minor device numbers from
+ * comedi_num_legacy_minors to 47.
+ */
+struct comedi_device {
+ int use_count;
+ struct comedi_driver *driver;
+ struct comedi_8254 *pacer;
+ void *private;
+
+ struct device *class_dev;
+ int minor;
+ unsigned int detach_count;
+ struct device *hw_dev;
+
+ const char *board_name;
+ const void *board_ptr;
+ unsigned int attached:1;
+ unsigned int ioenabled:1;
+ spinlock_t spinlock; /* generic spin-lock for low-level driver */
+ struct mutex mutex; /* generic mutex for COMEDI core */
+ struct rw_semaphore attach_lock;
+ struct kref refcount;
+
+ int n_subdevices;
+ struct comedi_subdevice *subdevices;
+
+ /* dumb */
+ void __iomem *mmio;
+ unsigned long iobase;
+ unsigned long iolen;
+ unsigned int irq;
+
+ struct comedi_subdevice *read_subdev;
+ struct comedi_subdevice *write_subdev;
+
+ struct fasync_struct *async_queue;
+
+ int (*open)(struct comedi_device *dev);
+ void (*close)(struct comedi_device *dev);
+ int (*insn_device_config)(struct comedi_device *dev,
+ struct comedi_insn *insn, unsigned int *data);
+ unsigned int (*get_valid_routes)(struct comedi_device *dev,
+ unsigned int n_pairs,
+ unsigned int *pair_data);
+};
+
+/*
+ * function prototypes
+ */
+
+void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s);
+
+struct comedi_device *comedi_dev_get_from_minor(unsigned int minor);
+int comedi_dev_put(struct comedi_device *dev);
+
+bool comedi_is_subdevice_running(struct comedi_subdevice *s);
+bool comedi_get_is_subdevice_running(struct comedi_subdevice *s);
+void comedi_put_is_subdevice_running(struct comedi_subdevice *s);
+
+void *comedi_alloc_spriv(struct comedi_subdevice *s, size_t size);
+void comedi_set_spriv_auto_free(struct comedi_subdevice *s);
+
+int comedi_check_chanlist(struct comedi_subdevice *s,
+ int n,
+ unsigned int *chanlist);
+
+/* range stuff */
+
+#define RANGE(a, b) {(a) * 1e6, (b) * 1e6, 0}
+#define RANGE_ext(a, b) {(a) * 1e6, (b) * 1e6, RF_EXTERNAL}
+#define RANGE_mA(a, b) {(a) * 1e6, (b) * 1e6, UNIT_mA}
+#define RANGE_unitless(a, b) {(a) * 1e6, (b) * 1e6, 0}
+#define BIP_RANGE(a) {-(a) * 1e6, (a) * 1e6, 0}
+#define UNI_RANGE(a) {0, (a) * 1e6, 0}
+
+extern const struct comedi_lrange range_bipolar10;
+extern const struct comedi_lrange range_bipolar5;
+extern const struct comedi_lrange range_bipolar2_5;
+extern const struct comedi_lrange range_unipolar10;
+extern const struct comedi_lrange range_unipolar5;
+extern const struct comedi_lrange range_unipolar2_5;
+extern const struct comedi_lrange range_0_20mA;
+extern const struct comedi_lrange range_4_20mA;
+extern const struct comedi_lrange range_0_32mA;
+extern const struct comedi_lrange range_unknown;
+
+#define range_digital range_unipolar5
+
+/**
+ * struct comedi_lrange - Describes a COMEDI range table
+ * @length: Number of entries in the range table.
+ * @range: Array of &struct comedi_krange, one for each range.
+ *
+ * Each element of @range[] describes the minimum and maximum physical range
+ * and the type of units. Typically, the type of unit is %UNIT_volt
+ * (i.e. volts) and the minimum and maximum are in millionths of a volt.
+ * There may also be a flag that indicates the minimum and maximum are merely
+ * scale factors for an unknown, external reference.
+ */
+struct comedi_lrange {
+ int length;
+ struct comedi_krange range[] __counted_by(length);
+};
+
+/**
+ * comedi_range_is_bipolar() - Test if subdevice range is bipolar
+ * @s: COMEDI subdevice.
+ * @range: Index of range within a range table.
+ *
+ * Tests whether a range is bipolar by checking whether its minimum value
+ * is negative.
+ *
+ * Assumes @range is valid. Does not work for subdevices using a
+ * channel-specific range table list.
+ *
+ * Return:
+ * %true if the range is bipolar.
+ * %false if the range is unipolar.
+ */
+static inline bool comedi_range_is_bipolar(struct comedi_subdevice *s,
+ unsigned int range)
+{
+ return s->range_table->range[range].min < 0;
+}
+
+/**
+ * comedi_range_is_unipolar() - Test if subdevice range is unipolar
+ * @s: COMEDI subdevice.
+ * @range: Index of range within a range table.
+ *
+ * Tests whether a range is unipolar by checking whether its minimum value
+ * is at least 0.
+ *
+ * Assumes @range is valid. Does not work for subdevices using a
+ * channel-specific range table list.
+ *
+ * Return:
+ * %true if the range is unipolar.
+ * %false if the range is bipolar.
+ */
+static inline bool comedi_range_is_unipolar(struct comedi_subdevice *s,
+ unsigned int range)
+{
+ return s->range_table->range[range].min >= 0;
+}
+
+/**
+ * comedi_range_is_external() - Test if subdevice range is external
+ * @s: COMEDI subdevice.
+ * @range: Index of range within a range table.
+ *
+ * Tests whether a range is externally reference by checking whether its
+ * %RF_EXTERNAL flag is set.
+ *
+ * Assumes @range is valid. Does not work for subdevices using a
+ * channel-specific range table list.
+ *
+ * Return:
+ * %true if the range is external.
+ * %false if the range is internal.
+ */
+static inline bool comedi_range_is_external(struct comedi_subdevice *s,
+ unsigned int range)
+{
+ return !!(s->range_table->range[range].flags & RF_EXTERNAL);
+}
+
+/**
+ * comedi_chan_range_is_bipolar() - Test if channel-specific range is bipolar
+ * @s: COMEDI subdevice.
+ * @chan: The channel number.
+ * @range: Index of range within a range table.
+ *
+ * Tests whether a range is bipolar by checking whether its minimum value
+ * is negative.
+ *
+ * Assumes @chan and @range are valid. Only works for subdevices with a
+ * channel-specific range table list.
+ *
+ * Return:
+ * %true if the range is bipolar.
+ * %false if the range is unipolar.
+ */
+static inline bool comedi_chan_range_is_bipolar(struct comedi_subdevice *s,
+ unsigned int chan,
+ unsigned int range)
+{
+ return s->range_table_list[chan]->range[range].min < 0;
+}
+
+/**
+ * comedi_chan_range_is_unipolar() - Test if channel-specific range is unipolar
+ * @s: COMEDI subdevice.
+ * @chan: The channel number.
+ * @range: Index of range within a range table.
+ *
+ * Tests whether a range is unipolar by checking whether its minimum value
+ * is at least 0.
+ *
+ * Assumes @chan and @range are valid. Only works for subdevices with a
+ * channel-specific range table list.
+ *
+ * Return:
+ * %true if the range is unipolar.
+ * %false if the range is bipolar.
+ */
+static inline bool comedi_chan_range_is_unipolar(struct comedi_subdevice *s,
+ unsigned int chan,
+ unsigned int range)
+{
+ return s->range_table_list[chan]->range[range].min >= 0;
+}
+
+/**
+ * comedi_chan_range_is_external() - Test if channel-specific range is external
+ * @s: COMEDI subdevice.
+ * @chan: The channel number.
+ * @range: Index of range within a range table.
+ *
+ * Tests whether a range is externally reference by checking whether its
+ * %RF_EXTERNAL flag is set.
+ *
+ * Assumes @chan and @range are valid. Only works for subdevices with a
+ * channel-specific range table list.
+ *
+ * Return:
+ * %true if the range is bipolar.
+ * %false if the range is unipolar.
+ */
+static inline bool comedi_chan_range_is_external(struct comedi_subdevice *s,
+ unsigned int chan,
+ unsigned int range)
+{
+ return !!(s->range_table_list[chan]->range[range].flags & RF_EXTERNAL);
+}
+
+/**
+ * comedi_offset_munge() - Convert between offset binary and 2's complement
+ * @s: COMEDI subdevice.
+ * @val: Value to be converted.
+ *
+ * Toggles the highest bit of a sample value to toggle between offset binary
+ * and 2's complement. Assumes that @s->maxdata is a power of 2 minus 1.
+ *
+ * Return: The converted value.
+ */
+static inline unsigned int comedi_offset_munge(struct comedi_subdevice *s,
+ unsigned int val)
+{
+ return val ^ s->maxdata ^ (s->maxdata >> 1);
+}
+
+/**
+ * comedi_bytes_per_sample() - Determine subdevice sample size
+ * @s: COMEDI subdevice.
+ *
+ * The sample size will be 4 (sizeof int) or 2 (sizeof short) depending on
+ * whether the %SDF_LSAMPL subdevice flag is set or not.
+ *
+ * Return: The subdevice sample size.
+ */
+static inline unsigned int comedi_bytes_per_sample(struct comedi_subdevice *s)
+{
+ return s->subdev_flags & SDF_LSAMPL ? sizeof(int) : sizeof(short);
+}
+
+/**
+ * comedi_sample_shift() - Determine log2 of subdevice sample size
+ * @s: COMEDI subdevice.
+ *
+ * The sample size will be 4 (sizeof int) or 2 (sizeof short) depending on
+ * whether the %SDF_LSAMPL subdevice flag is set or not. The log2 of the
+ * sample size will be 2 or 1 and can be used as the right operand of a
+ * bit-shift operator to multiply or divide something by the sample size.
+ *
+ * Return: log2 of the subdevice sample size.
+ */
+static inline unsigned int comedi_sample_shift(struct comedi_subdevice *s)
+{
+ return s->subdev_flags & SDF_LSAMPL ? 2 : 1;
+}
+
+/**
+ * comedi_bytes_to_samples() - Convert a number of bytes to a number of samples
+ * @s: COMEDI subdevice.
+ * @nbytes: Number of bytes
+ *
+ * Return: The number of bytes divided by the subdevice sample size.
+ */
+static inline unsigned int comedi_bytes_to_samples(struct comedi_subdevice *s,
+ unsigned int nbytes)
+{
+ return nbytes >> comedi_sample_shift(s);
+}
+
+/**
+ * comedi_samples_to_bytes() - Convert a number of samples to a number of bytes
+ * @s: COMEDI subdevice.
+ * @nsamples: Number of samples.
+ *
+ * Return: The number of samples multiplied by the subdevice sample size.
+ * (Does not check for arithmetic overflow.)
+ */
+static inline unsigned int comedi_samples_to_bytes(struct comedi_subdevice *s,
+ unsigned int nsamples)
+{
+ return nsamples << comedi_sample_shift(s);
+}
+
+/**
+ * comedi_check_trigger_src() - Trivially validate a comedi_cmd trigger source
+ * @src: Pointer to the trigger source to validate.
+ * @flags: Bitmask of valid %TRIG_* for the trigger.
+ *
+ * This is used in "step 1" of the do_cmdtest functions of comedi drivers
+ * to validate the comedi_cmd triggers. The mask of the @src against the
+ * @flags allows the userspace comedilib to pass all the comedi_cmd
+ * triggers as %TRIG_ANY and get back a bitmask of the valid trigger sources.
+ *
+ * Return:
+ * 0 if trigger sources in *@src are all supported.
+ * -EINVAL if any trigger source in *@src is unsupported.
+ */
+static inline int comedi_check_trigger_src(unsigned int *src,
+ unsigned int flags)
+{
+ unsigned int orig_src = *src;
+
+ *src = orig_src & flags;
+ if (*src == TRIG_INVALID || *src != orig_src)
+ return -EINVAL;
+ return 0;
+}
+
+/**
+ * comedi_check_trigger_is_unique() - Make sure a trigger source is unique
+ * @src: The trigger source to check.
+ *
+ * Return:
+ * 0 if no more than one trigger source is set.
+ * -EINVAL if more than one trigger source is set.
+ */
+static inline int comedi_check_trigger_is_unique(unsigned int src)
+{
+ /* this test is true if more than one _src bit is set */
+ if ((src & (src - 1)) != 0)
+ return -EINVAL;
+ return 0;
+}
+
+/**
+ * comedi_check_trigger_arg_is() - Trivially validate a trigger argument
+ * @arg: Pointer to the trigger arg to validate.
+ * @val: The value the argument should be.
+ *
+ * Forces *@arg to be @val.
+ *
+ * Return:
+ * 0 if *@arg was already @val.
+ * -EINVAL if *@arg differed from @val.
+ */
+static inline int comedi_check_trigger_arg_is(unsigned int *arg,
+ unsigned int val)
+{
+ if (*arg != val) {
+ *arg = val;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * comedi_check_trigger_arg_min() - Trivially validate a trigger argument min
+ * @arg: Pointer to the trigger arg to validate.
+ * @val: The minimum value the argument should be.
+ *
+ * Forces *@arg to be at least @val, setting it to @val if necessary.
+ *
+ * Return:
+ * 0 if *@arg was already at least @val.
+ * -EINVAL if *@arg was less than @val.
+ */
+static inline int comedi_check_trigger_arg_min(unsigned int *arg,
+ unsigned int val)
+{
+ if (*arg < val) {
+ *arg = val;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * comedi_check_trigger_arg_max() - Trivially validate a trigger argument max
+ * @arg: Pointer to the trigger arg to validate.
+ * @val: The maximum value the argument should be.
+ *
+ * Forces *@arg to be no more than @val, setting it to @val if necessary.
+ *
+ * Return:
+ * 0 if*@arg was already no more than @val.
+ * -EINVAL if *@arg was greater than @val.
+ */
+static inline int comedi_check_trigger_arg_max(unsigned int *arg,
+ unsigned int val)
+{
+ if (*arg > val) {
+ *arg = val;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Must set dev->hw_dev if you wish to dma directly into comedi's buffer.
+ * Also useful for retrieving a previously configured hardware device of
+ * known bus type. Set automatically for auto-configured devices.
+ * Automatically set to NULL when detaching hardware device.
+ */
+int comedi_set_hw_dev(struct comedi_device *dev, struct device *hw_dev);
+
+/**
+ * comedi_buf_n_bytes_ready - Determine amount of unread data in buffer
+ * @s: COMEDI subdevice.
+ *
+ * Determines the number of bytes of unread data in the asynchronous
+ * acquisition data buffer for a subdevice. The data in question might not
+ * have been fully "munged" yet.
+ *
+ * Returns: The amount of unread data in bytes.
+ */
+static inline unsigned int comedi_buf_n_bytes_ready(struct comedi_subdevice *s)
+{
+ return s->async->buf_write_count - s->async->buf_read_count;
+}
+
+unsigned int comedi_buf_write_alloc(struct comedi_subdevice *s, unsigned int n);
+unsigned int comedi_buf_write_free(struct comedi_subdevice *s, unsigned int n);
+
+unsigned int comedi_buf_read_n_available(struct comedi_subdevice *s);
+unsigned int comedi_buf_read_alloc(struct comedi_subdevice *s, unsigned int n);
+unsigned int comedi_buf_read_free(struct comedi_subdevice *s, unsigned int n);
+
+unsigned int comedi_buf_write_samples(struct comedi_subdevice *s,
+ const void *data, unsigned int nsamples);
+unsigned int comedi_buf_read_samples(struct comedi_subdevice *s,
+ void *data, unsigned int nsamples);
+
+/* drivers.c - general comedi driver functions */
+
+#define COMEDI_TIMEOUT_MS 1000
+
+int comedi_timeout(struct comedi_device *dev, struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ int (*cb)(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned long context),
+ unsigned long context);
+
+unsigned int comedi_handle_events(struct comedi_device *dev,
+ struct comedi_subdevice *s);
+
+int comedi_dio_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data,
+ unsigned int mask);
+unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
+ unsigned int *data);
+unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
+ struct comedi_cmd *cmd);
+unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s);
+unsigned int comedi_nscans_left(struct comedi_subdevice *s,
+ unsigned int nscans);
+unsigned int comedi_nsamples_left(struct comedi_subdevice *s,
+ unsigned int nsamples);
+void comedi_inc_scan_progress(struct comedi_subdevice *s,
+ unsigned int num_bytes);
+
+void *comedi_alloc_devpriv(struct comedi_device *dev, size_t size);
+int comedi_alloc_subdevices(struct comedi_device *dev, int num_subdevices);
+int comedi_alloc_subdev_readback(struct comedi_subdevice *s);
+
+int comedi_readback_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn, unsigned int *data);
+
+int comedi_load_firmware(struct comedi_device *dev, struct device *hw_dev,
+ const char *name,
+ int (*cb)(struct comedi_device *dev,
+ const u8 *data, size_t size,
+ unsigned long context),
+ unsigned long context);
+
+int __comedi_request_region(struct comedi_device *dev,
+ unsigned long start, unsigned long len);
+int comedi_request_region(struct comedi_device *dev,
+ unsigned long start, unsigned long len);
+void comedi_legacy_detach(struct comedi_device *dev);
+
+int comedi_auto_config(struct device *hardware_device,
+ struct comedi_driver *driver, unsigned long context);
+void comedi_auto_unconfig(struct device *hardware_device);
+
+int comedi_driver_register(struct comedi_driver *driver);
+void comedi_driver_unregister(struct comedi_driver *driver);
+
+/**
+ * module_comedi_driver() - Helper macro for registering a comedi driver
+ * @__comedi_driver: comedi_driver struct
+ *
+ * Helper macro for comedi drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only use
+ * this macro once, and calling it replaces module_init() and module_exit().
+ */
+#define module_comedi_driver(__comedi_driver) \
+ module_driver(__comedi_driver, comedi_driver_register, \
+ comedi_driver_unregister)
+
+#endif /* _COMEDIDEV_H */
diff --git a/include/linux/comedi/comedilib.h b/include/linux/comedi/comedilib.h
new file mode 100644
index 000000000000..1f2b22b383cc
--- /dev/null
+++ b/include/linux/comedi/comedilib.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * comedilib.h
+ * Header file for kcomedilib
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1998-2001 David A. Schleef <ds@schleef.org>
+ */
+
+#ifndef _LINUX_COMEDILIB_H
+#define _LINUX_COMEDILIB_H
+
+struct comedi_device *comedi_open_from(const char *path, int from);
+
+/**
+ * comedi_open() - Open a COMEDI device from the kernel
+ * @filename: Fake pathname of the form "/dev/comediN".
+ *
+ * Converts @filename to a COMEDI device number and "opens" it if it exists
+ * and is attached to a low-level COMEDI driver.
+ *
+ * Return: A pointer to the COMEDI device on success.
+ * Return %NULL on failure.
+ */
+static inline struct comedi_device *comedi_open(const char *path)
+{
+ return comedi_open_from(path, -1);
+}
+
+int comedi_close_from(struct comedi_device *dev, int from);
+
+/**
+ * comedi_close() - Close a COMEDI device from the kernel
+ * @dev: COMEDI device.
+ *
+ * Closes a COMEDI device previously opened by comedi_open().
+ *
+ * Returns: 0
+ */
+static inline int comedi_close(struct comedi_device *dev)
+{
+ return comedi_close_from(dev, -1);
+}
+
+int comedi_dio_get_config(struct comedi_device *dev, unsigned int subdev,
+ unsigned int chan, unsigned int *io);
+int comedi_dio_config(struct comedi_device *dev, unsigned int subdev,
+ unsigned int chan, unsigned int io);
+int comedi_dio_bitfield2(struct comedi_device *dev, unsigned int subdev,
+ unsigned int mask, unsigned int *bits,
+ unsigned int base_channel);
+int comedi_find_subdevice_by_type(struct comedi_device *dev, int type,
+ unsigned int subd);
+int comedi_get_n_channels(struct comedi_device *dev, unsigned int subdevice);
+
+#endif
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 4221888bdcd6..173d9c07a895 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -35,12 +35,12 @@ enum compact_result {
COMPACT_CONTINUE,
/*
- * The full zone was compacted scanned but wasn't successfull to compact
+ * The full zone was compacted scanned but wasn't successful to compact
* suitable pages.
*/
COMPACT_COMPLETE,
/*
- * direct compaction has scanned part of the zone but wasn't successfull
+ * direct compaction has scanned part of the zone but wasn't successful
* to compact suitable pages.
*/
COMPACT_PARTIAL_SKIPPED,
@@ -80,12 +80,12 @@ static inline unsigned long compact_gap(unsigned int order)
return 2UL << order;
}
+static inline int current_is_kcompactd(void)
+{
+ return current->flags & PF_KCOMPACTD;
+}
+
#ifdef CONFIG_COMPACTION
-extern unsigned int sysctl_compaction_proactiveness;
-extern int sysctl_compaction_handler(struct ctl_table *table, int write,
- void *buffer, size_t *length, loff_t *ppos);
-extern int sysctl_extfrag_threshold;
-extern int sysctl_compact_unevictable_allowed;
extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order);
extern int fragmentation_index(struct zone *zone, unsigned int order);
@@ -94,89 +94,17 @@ extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
const struct alloc_context *ac, enum compact_priority prio,
struct page **page);
extern void reset_isolation_suitable(pg_data_t *pgdat);
-extern enum compact_result compaction_suitable(struct zone *zone, int order,
- unsigned int alloc_flags, int highest_zoneidx);
+extern bool compaction_suitable(struct zone *zone, int order,
+ unsigned long watermark, int highest_zoneidx);
extern void compaction_defer_reset(struct zone *zone, int order,
bool alloc_success);
-/* Compaction has made some progress and retrying makes sense */
-static inline bool compaction_made_progress(enum compact_result result)
-{
- /*
- * Even though this might sound confusing this in fact tells us
- * that the compaction successfully isolated and migrated some
- * pageblocks.
- */
- if (result == COMPACT_SUCCESS)
- return true;
-
- return false;
-}
-
-/* Compaction has failed and it doesn't make much sense to keep retrying. */
-static inline bool compaction_failed(enum compact_result result)
-{
- /* All zones were scanned completely and still not result. */
- if (result == COMPACT_COMPLETE)
- return true;
-
- return false;
-}
-
-/* Compaction needs reclaim to be performed first, so it can continue. */
-static inline bool compaction_needs_reclaim(enum compact_result result)
-{
- /*
- * Compaction backed off due to watermark checks for order-0
- * so the regular reclaim has to try harder and reclaim something.
- */
- if (result == COMPACT_SKIPPED)
- return true;
-
- return false;
-}
-
-/*
- * Compaction has backed off for some reason after doing some work or none
- * at all. It might be throttling or lock contention. Retrying might be still
- * worthwhile, but with a higher priority if allowed.
- */
-static inline bool compaction_withdrawn(enum compact_result result)
-{
- /*
- * If compaction is deferred for high-order allocations, it is
- * because sync compaction recently failed. If this is the case
- * and the caller requested a THP allocation, we do not want
- * to heavily disrupt the system, so we fail the allocation
- * instead of entering direct reclaim.
- */
- if (result == COMPACT_DEFERRED)
- return true;
-
- /*
- * If compaction in async mode encounters contention or blocks higher
- * priority task we back off early rather than cause stalls.
- */
- if (result == COMPACT_CONTENDED)
- return true;
-
- /*
- * Page scanners have met but we haven't scanned full zones so this
- * is a back off in fact.
- */
- if (result == COMPACT_PARTIAL_SKIPPED)
- return true;
-
- return false;
-}
-
-
bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
int alloc_flags);
-extern int kcompactd_run(int nid);
-extern void kcompactd_stop(int nid);
+extern void __meminit kcompactd_run(int nid);
+extern void __meminit kcompactd_stop(int nid);
extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx);
#else
@@ -184,35 +112,15 @@ static inline void reset_isolation_suitable(pg_data_t *pgdat)
{
}
-static inline enum compact_result compaction_suitable(struct zone *zone, int order,
- int alloc_flags, int highest_zoneidx)
-{
- return COMPACT_SKIPPED;
-}
-
-static inline bool compaction_made_progress(enum compact_result result)
+static inline bool compaction_suitable(struct zone *zone, int order,
+ unsigned long watermark,
+ int highest_zoneidx)
{
return false;
}
-static inline bool compaction_failed(enum compact_result result)
+static inline void kcompactd_run(int nid)
{
- return false;
-}
-
-static inline bool compaction_needs_reclaim(enum compact_result result)
-{
- return false;
-}
-
-static inline bool compaction_withdrawn(enum compact_result result)
-{
- return true;
-}
-
-static inline int kcompactd_run(int nid)
-{
- return 0;
}
static inline void kcompactd_stop(int nid)
{
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 98dd7b324c35..56cebaff0c91 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -20,11 +20,8 @@
#include <linux/unistd.h>
#include <asm/compat.h>
-
-#ifdef CONFIG_COMPAT
#include <asm/siginfo.h>
#include <asm/signal.h>
-#endif
#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
/*
@@ -95,8 +92,6 @@ struct compat_iovec {
compat_size_t iov_len;
};
-#ifdef CONFIG_COMPAT
-
#ifndef compat_user_stack_pointer
#define compat_user_stack_pointer() current_user_stack_pointer()
#endif
@@ -213,12 +208,11 @@ typedef struct compat_siginfo {
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */
struct {
compat_uptr_t _addr; /* faulting insn/memory ref. */
-#ifdef __ARCH_SI_TRAPNO
- int _trapno; /* TRAP # which caused the signal */
-#endif
#define __COMPAT_ADDR_BND_PKEY_PAD (__alignof__(compat_uptr_t) < sizeof(short) ? \
sizeof(short) : __alignof__(compat_uptr_t))
union {
+ /* used on alpha and sparc */
+ int _trapno; /* TRAP # which caused the signal */
/*
* used when si_code=BUS_MCEERR_AR or
* used when si_code=BUS_MCEERR_AO
@@ -236,7 +230,11 @@ typedef struct compat_siginfo {
u32 _pkey;
} _addr_pkey;
/* used when si_code=TRAP_PERF */
- compat_ulong_t _perf;
+ struct {
+ compat_ulong_t _data;
+ u32 _type;
+ u32 _flags;
+ } _perf;
};
} _sigfault;
@@ -259,6 +257,37 @@ struct compat_rlimit {
compat_ulong_t rlim_max;
};
+#ifdef __ARCH_NEED_COMPAT_FLOCK64_PACKED
+#define __ARCH_COMPAT_FLOCK64_PACK __attribute__((packed))
+#else
+#define __ARCH_COMPAT_FLOCK64_PACK
+#endif
+
+struct compat_flock {
+ short l_type;
+ short l_whence;
+ compat_off_t l_start;
+ compat_off_t l_len;
+#ifdef __ARCH_COMPAT_FLOCK_EXTRA_SYSID
+ __ARCH_COMPAT_FLOCK_EXTRA_SYSID
+#endif
+ compat_pid_t l_pid;
+#ifdef __ARCH_COMPAT_FLOCK_PAD
+ __ARCH_COMPAT_FLOCK_PAD
+#endif
+};
+
+struct compat_flock64 {
+ short l_type;
+ short l_whence;
+ compat_loff_t l_start;
+ compat_loff_t l_len;
+ compat_pid_t l_pid;
+#ifdef __ARCH_COMPAT_FLOCK64_PAD
+ __ARCH_COMPAT_FLOCK64_PAD
+#endif
+} __ARCH_COMPAT_FLOCK64_PACK;
+
struct compat_rusage {
struct old_timeval32 ru_utime;
struct old_timeval32 ru_stime;
@@ -382,6 +411,7 @@ struct compat_keyctl_kdf_params {
__u32 __spare[8];
};
+struct compat_stat;
struct compat_statfs;
struct compat_statfs64;
struct compat_old_linux_dirent;
@@ -395,14 +425,6 @@ struct compat_kexec_segment;
struct compat_mq_attr;
struct compat_msgbuf;
-#define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t))
-
-#define BITS_TO_COMPAT_LONGS(bits) DIV_ROUND_UP(bits, BITS_PER_COMPAT_LONG)
-
-long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
- unsigned long bitmap_size);
-long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
- unsigned long bitmap_size);
void copy_siginfo_to_external32(struct compat_siginfo *to,
const struct kernel_siginfo *from);
int copy_siginfo_from_user32(kernel_siginfo_t *to,
@@ -426,7 +448,7 @@ put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set,
unsigned int size)
{
/* size <= sizeof(compat_sigset_t) <= sizeof(sigset_t) */
-#ifdef __BIG_ENDIAN
+#if defined(__BIG_ENDIAN) && defined(CONFIG_64BIT)
compat_sigset_t v;
switch (_NSIG_WORDS) {
case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3];
@@ -519,8 +541,6 @@ extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
struct epoll_event; /* fortunately, this one is fixed-layout */
-extern void __user *compat_alloc_user_space(unsigned long len);
-
int compat_restore_altstack(const compat_stack_t __user *uss);
int __compat_save_altstack(compat_stack_t __user *, unsigned long);
#define unsafe_compat_save_altstack(uss, sp, label) do { \
@@ -530,8 +550,6 @@ int __compat_save_altstack(compat_stack_t __user *, unsigned long);
&__uss->ss_sp, label); \
unsafe_put_user(t->sas_ss_flags, &__uss->ss_flags, label); \
unsafe_put_user(t->sas_ss_size, &__uss->ss_size, label); \
- if (t->sas_ss_flags & SS_AUTODISARM) \
- sas_ss_reset(t); \
} while (0);
/*
@@ -563,11 +581,6 @@ asmlinkage long compat_sys_io_pgetevents_time64(compat_aio_context_t ctx_id,
struct io_event __user *events,
struct __kernel_timespec __user *timeout,
const struct __compat_aio_sigset __user *usig);
-
-/* fs/cookies.c */
-asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
-
-/* fs/eventpoll.c */
asmlinkage long compat_sys_epoll_pwait(int epfd,
struct epoll_event __user *events,
int maxevents, int timeout,
@@ -579,18 +592,12 @@ asmlinkage long compat_sys_epoll_pwait2(int epfd,
const struct __kernel_timespec __user *timeout,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize);
-
-/* fs/fcntl.c */
asmlinkage long compat_sys_fcntl(unsigned int fd, unsigned int cmd,
compat_ulong_t arg);
asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd,
compat_ulong_t arg);
-
-/* fs/ioctl.c */
asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
compat_ulong_t arg);
-
-/* fs/open.c */
asmlinkage long compat_sys_statfs(const char __user *pathname,
struct compat_statfs __user *buf);
asmlinkage long compat_sys_statfs64(const char __user *pathname,
@@ -601,17 +608,13 @@ asmlinkage long compat_sys_fstatfs(unsigned int fd,
asmlinkage long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
struct compat_statfs64 __user *buf);
asmlinkage long compat_sys_truncate(const char __user *, compat_off_t);
-asmlinkage long compat_sys_ftruncate(unsigned int, compat_ulong_t);
+asmlinkage long compat_sys_ftruncate(unsigned int, compat_off_t);
/* No generic prototype for truncate64, ftruncate64, fallocate */
asmlinkage long compat_sys_openat(int dfd, const char __user *filename,
int flags, umode_t mode);
-
-/* fs/readdir.c */
asmlinkage long compat_sys_getdents(unsigned int fd,
struct compat_linux_dirent __user *dirent,
unsigned int count);
-
-/* fs/read_write.c */
asmlinkage long compat_sys_lseek(unsigned int, compat_off_t, unsigned int);
/* No generic prototype for pread64 and pwrite64 */
asmlinkage ssize_t compat_sys_preadv(compat_ulong_t fd,
@@ -631,14 +634,10 @@ asmlinkage long compat_sys_pwritev64(unsigned long fd,
const struct iovec __user *vec,
unsigned long vlen, loff_t pos);
#endif
-
-/* fs/sendfile.c */
asmlinkage long compat_sys_sendfile(int out_fd, int in_fd,
compat_off_t __user *offset, compat_size_t count);
asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd,
compat_loff_t __user *offset, compat_size_t count);
-
-/* fs/select.c */
asmlinkage long compat_sys_pselect6_time32(int n, compat_ulong_t __user *inp,
compat_ulong_t __user *outp,
compat_ulong_t __user *exp,
@@ -659,68 +658,45 @@ asmlinkage long compat_sys_ppoll_time64(struct pollfd __user *ufds,
struct __kernel_timespec __user *tsp,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize);
-
-/* fs/signalfd.c */
asmlinkage long compat_sys_signalfd4(int ufd,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize, int flags);
-
-/* fs/stat.c */
asmlinkage long compat_sys_newfstatat(unsigned int dfd,
const char __user *filename,
struct compat_stat __user *statbuf,
int flag);
asmlinkage long compat_sys_newfstat(unsigned int fd,
struct compat_stat __user *statbuf);
-
-/* fs/sync.c: No generic prototype for sync_file_range and sync_file_range2 */
-
-/* kernel/exit.c */
+/* No generic prototype for sync_file_range and sync_file_range2 */
asmlinkage long compat_sys_waitid(int, compat_pid_t,
struct compat_siginfo __user *, int,
struct compat_rusage __user *);
-
-
-
-/* kernel/futex.c */
asmlinkage long
compat_sys_set_robust_list(struct compat_robust_list_head __user *head,
compat_size_t len);
asmlinkage long
compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
compat_size_t __user *len_ptr);
-
-/* kernel/itimer.c */
asmlinkage long compat_sys_getitimer(int which,
struct old_itimerval32 __user *it);
asmlinkage long compat_sys_setitimer(int which,
struct old_itimerval32 __user *in,
struct old_itimerval32 __user *out);
-
-/* kernel/kexec.c */
asmlinkage long compat_sys_kexec_load(compat_ulong_t entry,
compat_ulong_t nr_segments,
struct compat_kexec_segment __user *,
compat_ulong_t flags);
-
-/* kernel/posix-timers.c */
asmlinkage long compat_sys_timer_create(clockid_t which_clock,
struct compat_sigevent __user *timer_event_spec,
timer_t __user *created_timer_id);
-
-/* kernel/ptrace.c */
asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
compat_long_t addr, compat_long_t data);
-
-/* kernel/sched/core.c */
asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid,
unsigned int len,
compat_ulong_t __user *user_mask_ptr);
asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid,
unsigned int len,
compat_ulong_t __user *user_mask_ptr);
-
-/* kernel/signal.c */
asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
compat_stack_t __user *uoss_ptr);
asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset,
@@ -745,25 +721,17 @@ asmlinkage long compat_sys_rt_sigtimedwait_time64(compat_sigset_t __user *uthese
asmlinkage long compat_sys_rt_sigqueueinfo(compat_pid_t pid, int sig,
struct compat_siginfo __user *uinfo);
/* No generic prototype for rt_sigreturn */
-
-/* kernel/sys.c */
asmlinkage long compat_sys_times(struct compat_tms __user *tbuf);
asmlinkage long compat_sys_getrlimit(unsigned int resource,
struct compat_rlimit __user *rlim);
asmlinkage long compat_sys_setrlimit(unsigned int resource,
struct compat_rlimit __user *rlim);
asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru);
-
-/* kernel/time.c */
asmlinkage long compat_sys_gettimeofday(struct old_timeval32 __user *tv,
struct timezone __user *tz);
asmlinkage long compat_sys_settimeofday(struct old_timeval32 __user *tv,
struct timezone __user *tz);
-
-/* kernel/timer.c */
asmlinkage long compat_sys_sysinfo(struct compat_sysinfo __user *info);
-
-/* ipc/mqueue.c */
asmlinkage long compat_sys_mq_open(const char __user *u_name,
int oflag, compat_mode_t mode,
struct compat_mq_attr __user *u_attr);
@@ -772,22 +740,14 @@ asmlinkage long compat_sys_mq_notify(mqd_t mqdes,
asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes,
const struct compat_mq_attr __user *u_mqstat,
struct compat_mq_attr __user *u_omqstat);
-
-/* ipc/msg.c */
asmlinkage long compat_sys_msgctl(int first, int second, void __user *uptr);
asmlinkage long compat_sys_msgrcv(int msqid, compat_uptr_t msgp,
compat_ssize_t msgsz, compat_long_t msgtyp, int msgflg);
asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
compat_ssize_t msgsz, int msgflg);
-
-/* ipc/sem.c */
asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
-
-/* ipc/shm.c */
asmlinkage long compat_sys_shmctl(int first, int second, void __user *uptr);
asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
-
-/* net/socket.c */
asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, compat_size_t len,
unsigned flags, struct sockaddr __user *addr,
int __user *addrlen);
@@ -795,40 +755,13 @@ asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg,
unsigned flags);
asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg,
unsigned int flags);
-
-/* mm/filemap.c: No generic prototype for readahead */
-
-/* security/keys/keyctl.c */
+/* No generic prototype for readahead */
asmlinkage long compat_sys_keyctl(u32 option,
u32 arg2, u32 arg3, u32 arg4, u32 arg5);
-
-/* arch/example/kernel/sys_example.c */
asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv,
const compat_uptr_t __user *envp);
-
-/* mm/fadvise.c: No generic prototype for fadvise64_64 */
-
-/* mm/, CONFIG_MMU only */
-asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
- compat_ulong_t mode,
- compat_ulong_t __user *nmask,
- compat_ulong_t maxnode, compat_ulong_t flags);
-asmlinkage long compat_sys_get_mempolicy(int __user *policy,
- compat_ulong_t __user *nmask,
- compat_ulong_t maxnode,
- compat_ulong_t addr,
- compat_ulong_t flags);
-asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
- compat_ulong_t maxnode);
-asmlinkage long compat_sys_migrate_pages(compat_pid_t pid,
- compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes,
- const compat_ulong_t __user *new_nodes);
-asmlinkage long compat_sys_move_pages(pid_t pid, compat_ulong_t nr_pages,
- __u32 __user *pages,
- const int __user *nodes,
- int __user *status,
- int flags);
-
+/* No generic prototype for fadvise64_64 */
+/* CONFIG_MMU only */
asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid,
compat_pid_t pid, int sig,
struct compat_siginfo __user *uinfo);
@@ -898,18 +831,18 @@ asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len,
unsigned flags);
-/* obsolete: fs/readdir.c */
+/* obsolete */
asmlinkage long compat_sys_old_readdir(unsigned int fd,
struct compat_old_linux_dirent __user *,
unsigned int count);
-/* obsolete: fs/select.c */
+/* obsolete */
asmlinkage long compat_sys_old_select(struct compat_sel_arg_struct __user *arg);
-/* obsolete: ipc */
+/* obsolete */
asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
-/* obsolete: kernel/signal.c */
+/* obsolete */
#ifdef __ARCH_WANT_SYS_SIGPENDING
asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set);
#endif
@@ -924,22 +857,48 @@ asmlinkage long compat_sys_sigaction(int sig,
struct compat_old_sigaction __user *oact);
#endif
-/* obsolete: net/socket.c */
+/* obsolete */
asmlinkage long compat_sys_socketcall(int call, u32 __user *args);
-#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */
+#ifdef __ARCH_WANT_COMPAT_TRUNCATE64
+asmlinkage long compat_sys_truncate64(const char __user *pathname, compat_arg_u64(len));
+#endif
+#ifdef __ARCH_WANT_COMPAT_FTRUNCATE64
+asmlinkage long compat_sys_ftruncate64(unsigned int fd, compat_arg_u64(len));
+#endif
-/*
- * For most but not all architectures, "am I in a compat syscall?" and
- * "am I a compat task?" are the same question. For architectures on which
- * they aren't the same question, arch code can override in_compat_syscall.
- */
+#ifdef __ARCH_WANT_COMPAT_FALLOCATE
+asmlinkage long compat_sys_fallocate(int fd, int mode, compat_arg_u64(offset),
+ compat_arg_u64(len));
+#endif
-#ifndef in_compat_syscall
-static inline bool in_compat_syscall(void) { return is_compat_task(); }
+#ifdef __ARCH_WANT_COMPAT_PREAD64
+asmlinkage long compat_sys_pread64(unsigned int fd, char __user *buf, size_t count,
+ compat_arg_u64(pos));
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_PWRITE64
+asmlinkage long compat_sys_pwrite64(unsigned int fd, const char __user *buf, size_t count,
+ compat_arg_u64(pos));
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_SYNC_FILE_RANGE
+asmlinkage long compat_sys_sync_file_range(int fd, compat_arg_u64(pos),
+ compat_arg_u64(nbytes), unsigned int flags);
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_FADVISE64_64
+asmlinkage long compat_sys_fadvise64_64(int fd, compat_arg_u64(pos),
+ compat_arg_u64(len), int advice);
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_READAHEAD
+asmlinkage long compat_sys_readahead(int fd, compat_arg_u64(offset), size_t count);
#endif
+#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */
+
/**
* ns_to_old_timeval32 - Compat version of ns_to_timeval
* @nsec: the nanoseconds value to be converted
@@ -969,6 +928,17 @@ int kcompat_sys_statfs64(const char __user * pathname, compat_size_t sz,
int kcompat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
struct compat_statfs64 __user * buf);
+#ifdef CONFIG_COMPAT
+
+/*
+ * For most but not all architectures, "am I in a compat syscall?" and
+ * "am I a compat task?" are the same question. For architectures on which
+ * they aren't the same question, arch code can override in_compat_syscall.
+ */
+#ifndef in_compat_syscall
+static inline bool in_compat_syscall(void) { return is_compat_task(); }
+#endif
+
#else /* !CONFIG_COMPAT */
#define is_compat_task() (0)
@@ -978,6 +948,15 @@ static inline bool in_compat_syscall(void) { return false; }
#endif /* CONFIG_COMPAT */
+#define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t))
+
+#define BITS_TO_COMPAT_LONGS(bits) DIV_ROUND_UP(bits, BITS_PER_COMPAT_LONG)
+
+long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
+ unsigned long bitmap_size);
+long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
+ unsigned long bitmap_size);
+
/*
* Some legacy ABIs like the i386 one use less than natural alignment for 64-bit
* types, and will need special compat treatment for that. Most architectures
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index adbe76b203e2..107ce05bd16e 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -1,30 +1,59 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_COMPILER_TYPES_H
-#error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead."
+#error "Please do not include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead."
#endif
/* Compiler specific definitions for Clang compiler */
-/* same as gcc, this was present in clang-2.6 so we can assume it works
- * with any version that can compile the kernel
+/*
+ * Clang prior to 17 is being silly and considers many __cleanup() variables
+ * as unused (because they are, their sole purpose is to go out of scope).
+ *
+ * https://github.com/llvm/llvm-project/commit/877210faa447f4cc7db87812f8ed80e398fedd61
*/
-#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+#undef __cleanup
+#define __cleanup(func) __maybe_unused __attribute__((__cleanup__(func)))
/* all clang versions usable with the kernel support KASAN ABI version 5 */
#define KASAN_ABI_VERSION 5
-#if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer)
-/* Emulate GCC's __SANITIZE_ADDRESS__ flag */
+/*
+ * Clang 22 added preprocessor macros to match GCC, in hopes of eventually
+ * dropping __has_feature support for sanitizers:
+ * https://github.com/llvm/llvm-project/commit/568c23bbd3303518c5056d7f03444dae4fdc8a9c
+ * Create these macros for older versions of clang so that it is easy to clean
+ * up once the minimum supported version of LLVM for building the kernel always
+ * creates these macros.
+ *
+ * Note: Checking __has_feature(*_sanitizer) is only true if the feature is
+ * enabled. Therefore it is not required to additionally check defined(CONFIG_*)
+ * to avoid adding redundant attributes in other configurations.
+ */
+#if __has_feature(address_sanitizer) && !defined(__SANITIZE_ADDRESS__)
+#define __SANITIZE_ADDRESS__
+#endif
+#if __has_feature(hwaddress_sanitizer) && !defined(__SANITIZE_HWADDRESS__)
+#define __SANITIZE_HWADDRESS__
+#endif
+#if __has_feature(thread_sanitizer) && !defined(__SANITIZE_THREAD__)
+#define __SANITIZE_THREAD__
+#endif
+
+/*
+ * Treat __SANITIZE_HWADDRESS__ the same as __SANITIZE_ADDRESS__ in the kernel.
+ */
+#ifdef __SANITIZE_HWADDRESS__
#define __SANITIZE_ADDRESS__
+#endif
+
+#ifdef __SANITIZE_ADDRESS__
#define __no_sanitize_address \
__attribute__((no_sanitize("address", "hwaddress")))
#else
#define __no_sanitize_address
#endif
-#if __has_feature(thread_sanitizer)
-/* emulate gcc's __SANITIZE_THREAD__ flag */
-#define __SANITIZE_THREAD__
+#ifdef __SANITIZE_THREAD__
#define __no_sanitize_thread \
__attribute__((no_sanitize("thread")))
#else
@@ -45,22 +74,82 @@
#define __no_sanitize_undefined
#endif
+#if __has_feature(memory_sanitizer)
+#define __SANITIZE_MEMORY__
+/*
+ * Unlike other sanitizers, KMSAN still inserts code into functions marked with
+ * no_sanitize("kernel-memory"). Using disable_sanitizer_instrumentation
+ * provides the behavior consistent with other __no_sanitize_ attributes,
+ * guaranteeing that __no_sanitize_memory functions remain uninstrumented.
+ */
+#define __no_sanitize_memory __disable_sanitizer_instrumentation
+
+/*
+ * The __no_kmsan_checks attribute ensures that a function does not produce
+ * false positive reports by:
+ * - initializing all local variables and memory stores in this function;
+ * - skipping all shadow checks;
+ * - passing initialized arguments to this function's callees.
+ */
+#define __no_kmsan_checks __attribute__((no_sanitize("kernel-memory")))
+#else
+#define __no_sanitize_memory
+#define __no_kmsan_checks
+#endif
+
/*
- * Not all versions of clang implement the type-generic versions
- * of the builtin overflow checkers. Fortunately, clang implements
- * __has_builtin allowing us to avoid awkward version
- * checks. Unfortunately, we don't know which version of gcc clang
- * pretends to be, so the macro may or may not be defined.
+ * Support for __has_feature(coverage_sanitizer) was added in Clang 13 together
+ * with no_sanitize("coverage"). Prior versions of Clang support coverage
+ * instrumentation, but cannot be queried for support by the preprocessor.
*/
-#if __has_builtin(__builtin_mul_overflow) && \
- __has_builtin(__builtin_add_overflow) && \
- __has_builtin(__builtin_sub_overflow)
-#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
+#if __has_feature(coverage_sanitizer)
+#define __no_sanitize_coverage __attribute__((no_sanitize("coverage")))
+#else
+#define __no_sanitize_coverage
#endif
+/* Only Clang needs to disable the coverage sanitizer for kstack_erase. */
+#define __no_kstack_erase __no_sanitize_coverage
+
#if __has_feature(shadow_call_stack)
# define __noscs __attribute__((__no_sanitize__("shadow-call-stack")))
#endif
-#define __nocfi __attribute__((__no_sanitize__("cfi")))
-#define __cficanonical __attribute__((__cfi_canonical_jump_table__))
+/*
+ * Turn individual warnings and errors on and off locally, depending
+ * on version.
+ */
+#define __diag_clang(version, severity, s) \
+ __diag_clang_ ## version(__diag_clang_ ## severity s)
+
+/* Severity used in pragma directives */
+#define __diag_clang_ignore ignored
+#define __diag_clang_warn warning
+#define __diag_clang_error error
+
+#define __diag_str1(s) #s
+#define __diag_str(s) __diag_str1(s)
+#define __diag(s) _Pragma(__diag_str(clang diagnostic s))
+
+#define __diag_clang_13(s) __diag(s)
+
+#define __diag_ignore_all(option, comment) \
+ __diag_clang(13, ignore, option)
+
+/*
+ * clang has horrible behavior with "g" or "rm" constraints for asm
+ * inputs, turning them into something worse than "m". Avoid using
+ * constraints with multiple possible uses (but "ir" seems to be ok):
+ *
+ * https://github.com/llvm/llvm-project/issues/20571
+ */
+#define ASM_INPUT_G "ir"
+#define ASM_INPUT_RM "r"
+
+/*
+ * Declare compiler support for __typeof_unqual__() operator.
+ *
+ * Bindgen uses LLVM even if our C compiler is GCC, so we cannot
+ * rely on the auto-detected CONFIG_CC_HAS_TYPEOF_UNQUAL.
+ */
+#define CC_HAS_TYPEOF_UNQUAL (__clang_major__ >= 19)
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 5d97ef738a57..5de824a0b3d7 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_COMPILER_TYPES_H
-#error "Please don't include <linux/compiler-gcc.h> directly, include <linux/compiler.h> instead."
+#error "Please do not include <linux/compiler-gcc.h> directly, include <linux/compiler.h> instead."
#endif
/*
@@ -35,17 +35,6 @@
(typeof(ptr)) (__ptr + (off)); \
})
-#ifdef CONFIG_RETPOLINE
-#define __noretpoline __attribute__((__indirect_branch__("keep")))
-#endif
-
-#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
-
-#define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
-
-#define __compiletime_warning(message) __attribute__((__warning__(message)))
-#define __compiletime_error(message) __attribute__((__error__(message)))
-
#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
#define __latent_entropy __attribute__((latent_entropy))
#endif
@@ -59,37 +48,6 @@
*/
#define barrier_before_unreachable() asm volatile("")
-/*
- * Mark a position in code as unreachable. This can be used to
- * suppress control flow warnings after asm blocks that transfer
- * control elsewhere.
- */
-#define unreachable() \
- do { \
- annotate_unreachable(); \
- barrier_before_unreachable(); \
- __builtin_unreachable(); \
- } while (0)
-
-#if defined(RANDSTRUCT_PLUGIN) && !defined(__CHECKER__)
-#define __randomize_layout __attribute__((randomize_layout))
-#define __no_randomize_layout __attribute__((no_randomize_layout))
-/* This anon struct can add padding, so only enable it under randstruct. */
-#define randomized_struct_fields_start struct {
-#define randomized_struct_fields_end } __randomize_layout;
-#endif
-
-/*
- * GCC 'asm goto' miscompiles certain code sequences:
- *
- * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
- *
- * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
- *
- * (asm goto is automatically volatile - the naming reflects this.)
- */
-#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
-
#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP)
#define __HAVE_BUILTIN_BSWAP32__
#define __HAVE_BUILTIN_BSWAP64__
@@ -98,35 +56,52 @@
#if GCC_VERSION >= 70000
#define KASAN_ABI_VERSION 5
-#elif GCC_VERSION >= 50000
+#else
#define KASAN_ABI_VERSION 4
-#elif GCC_VERSION >= 40902
-#define KASAN_ABI_VERSION 3
#endif
-#if __has_attribute(__no_sanitize_address__)
-#define __no_sanitize_address __attribute__((no_sanitize_address))
+#ifdef CONFIG_SHADOW_CALL_STACK
+#define __noscs __attribute__((__no_sanitize__("shadow-call-stack")))
+#endif
+
+#ifdef __SANITIZE_HWADDRESS__
+#define __no_sanitize_address __attribute__((__no_sanitize__("hwaddress")))
#else
-#define __no_sanitize_address
+#define __no_sanitize_address __attribute__((__no_sanitize_address__))
#endif
-#if defined(__SANITIZE_THREAD__) && __has_attribute(__no_sanitize_thread__)
-#define __no_sanitize_thread __attribute__((no_sanitize_thread))
+#if defined(__SANITIZE_THREAD__)
+#define __no_sanitize_thread __attribute__((__no_sanitize_thread__))
#else
#define __no_sanitize_thread
#endif
-#if __has_attribute(__no_sanitize_undefined__)
-#define __no_sanitize_undefined __attribute__((no_sanitize_undefined))
+#define __no_sanitize_undefined __attribute__((__no_sanitize_undefined__))
+
+/*
+ * Only supported since gcc >= 12
+ */
+#if defined(CONFIG_KCOV) && __has_attribute(__no_sanitize_coverage__)
+#define __no_sanitize_coverage __attribute__((__no_sanitize_coverage__))
#else
-#define __no_sanitize_undefined
+#define __no_sanitize_coverage
#endif
-#if GCC_VERSION >= 50100
-#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
+/*
+ * Treat __SANITIZE_HWADDRESS__ the same as __SANITIZE_ADDRESS__ in the kernel,
+ * matching the defines used by Clang.
+ */
+#ifdef __SANITIZE_HWADDRESS__
+#define __SANITIZE_ADDRESS__
#endif
/*
+ * GCC does not support KMSAN.
+ */
+#define __no_sanitize_memory
+#define __no_kmsan_checks
+
+/*
* Turn individual warnings and errors on and off locally, depending
* on version.
*/
@@ -147,3 +122,24 @@
#else
#define __diag_GCC_8(s)
#endif
+
+#define __diag_GCC_all(s) __diag(s)
+
+#define __diag_ignore_all(option, comment) \
+ __diag(__diag_GCC_ignore option)
+
+/*
+ * Prior to 9.1, -Wno-alloc-size-larger-than (and therefore the "alloc_size"
+ * attribute) do not work, and must be disabled.
+ */
+#if GCC_VERSION < 90100
+#undef __alloc_size__
+#endif
+
+/*
+ * Declare compiler support for __typeof_unqual__() operator.
+ *
+ * Bindgen uses LLVM even if our C compiler is GCC, so we cannot
+ * rely on the auto-detected CONFIG_CC_HAS_TYPEOF_UNQUAL.
+ */
+#define CC_HAS_TYPEOF_UNQUAL (__GNUC__ >= 14)
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
deleted file mode 100644
index b17f3cd18334..000000000000
--- a/include/linux/compiler-intel.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_COMPILER_TYPES_H
-#error "Please don't include <linux/compiler-intel.h> directly, include <linux/compiler.h> instead."
-#endif
-
-#ifdef __ECC
-
-/* Compiler specific definitions for Intel ECC compiler */
-
-#include <asm/intrinsics.h>
-
-/* Intel ECC compiler doesn't support gcc specific asm stmts.
- * It uses intrinsics to do the equivalent things.
- */
-
-#define barrier() __memory_barrier()
-#define barrier_data(ptr) barrier()
-
-#define RELOC_HIDE(ptr, off) \
- ({ unsigned long __ptr; \
- __ptr = (unsigned long) (ptr); \
- (typeof(ptr)) (__ptr + (off)); })
-
-/* This should act as an optimization barrier on var.
- * Given that this compiler does not have inline assembly, a compiler barrier
- * is the best we can do.
- */
-#define OPTIMIZER_HIDE_VAR(var) barrier()
-
-#endif
-
-/* icc has this, but it's called _bswap16 */
-#define __HAVE_BUILTIN_BSWAP16__
-#define __builtin_bswap16 _bswap16
diff --git a/include/linux/compiler-version.h b/include/linux/compiler-version.h
index 573fa85b6c0c..ac1665a98a15 100644
--- a/include/linux/compiler-version.h
+++ b/include/linux/compiler-version.h
@@ -12,3 +12,33 @@
* and add dependency on include/config/CC_VERSION_TEXT, which is touched
* by Kconfig when the version string from the compiler changes.
*/
+
+/* Additional tree-wide dependencies start here. */
+
+/*
+ * If any of the GCC plugins change, we need to rebuild everything that
+ * was built with them, as they may have changed their behavior and those
+ * behaviors may need to be synchronized across all translation units.
+ */
+#ifdef GCC_PLUGINS
+#include <generated/gcc-plugins.h>
+#endif
+
+/*
+ * If the randstruct seed itself changes (whether for GCC plugins or
+ * Clang), the entire tree needs to be rebuilt since the randomization of
+ * structures may change between compilation units if not.
+ */
+#ifdef RANDSTRUCT
+#include <generated/randstruct_hash.h>
+#endif
+
+/*
+ * If any external changes affect Clang's integer wrapping sanitizer
+ * behavior, a full rebuild is needed as the coverage for wrapping types
+ * may have changed, which may impact the expected behaviors that should
+ * not differ between compilation units.
+ */
+#ifdef INTEGER_WRAP
+#include <generated/integer-wrap.h>
+#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index df5b405e6305..04487c9bd751 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -12,11 +12,10 @@
* Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
* to disable branch tracing on a per file basis.
*/
-#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
- && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
void ftrace_likely_update(struct ftrace_likely_data *f, int val,
int expect, int is_constant);
-
+#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
+ && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
#define likely_notrace(x) __builtin_expect(!!(x), 1)
#define unlikely_notrace(x) __builtin_expect(!!(x), 0)
@@ -109,48 +108,22 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#endif
/* Unreachable code */
-#ifdef CONFIG_STACK_VALIDATION
-/*
- * These macros help objtool understand GCC code flow for unreachable code.
- * The __COUNTER__ based labels are a hack to make each instance of the macros
- * unique, to convince GCC not to merge duplicate inline asm statements.
- */
-#define annotate_reachable() ({ \
- asm volatile("%c0:\n\t" \
- ".pushsection .discard.reachable\n\t" \
- ".long %c0b - .\n\t" \
- ".popsection\n\t" : : "i" (__COUNTER__)); \
-})
-#define annotate_unreachable() ({ \
- asm volatile("%c0:\n\t" \
- ".pushsection .discard.unreachable\n\t" \
- ".long %c0b - .\n\t" \
- ".popsection\n\t" : : "i" (__COUNTER__)); \
-})
-#define ASM_UNREACHABLE \
- "999:\n\t" \
- ".pushsection .discard.unreachable\n\t" \
- ".long 999b - .\n\t" \
- ".popsection\n\t"
-
+#ifdef CONFIG_OBJTOOL
/* Annotate a C jump table to allow objtool to follow the code flow */
-#define __annotate_jump_table __section(".rodata..c_jump_table")
-
-#else
-#define annotate_reachable()
-#define annotate_unreachable()
+#define __annotate_jump_table __section(".data.rel.ro.c_jump_table")
+#else /* !CONFIG_OBJTOOL */
#define __annotate_jump_table
-#endif
+#endif /* CONFIG_OBJTOOL */
-#ifndef ASM_UNREACHABLE
-# define ASM_UNREACHABLE
-#endif
-#ifndef unreachable
-# define unreachable() do { \
- annotate_unreachable(); \
+/*
+ * Mark a position in code as unreachable. This can be used to
+ * suppress control flow warnings after asm blocks that transfer
+ * control elsewhere.
+ */
+#define unreachable() do { \
+ barrier_before_unreachable(); \
__builtin_unreachable(); \
} while (0)
-#endif
/*
* KENTRY - kernel entry point
@@ -182,16 +155,19 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
(typeof(ptr)) (__ptr + (off)); })
#endif
+#define absolute_pointer(val) RELOC_HIDE((void *)(val), 0)
+
#ifndef OPTIMIZER_HIDE_VAR
/* Make the optimizer believe the variable can be manipulated arbitrarily. */
#define OPTIMIZER_HIDE_VAR(var) \
__asm__ ("" : "=r" (var) : "0" (var))
#endif
-/* Not-quite-unique ID. */
-#ifndef __UNIQUE_ID
-# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
-#endif
+/* Format: __UNIQUE_ID_<name>_<__COUNTER__> */
+#define __UNIQUE_ID(name) \
+ __PASTE(__UNIQUE_ID_, \
+ __PASTE(name, \
+ __PASTE(_, __COUNTER__)))
/**
* data_race - mark an expression as containing intentional data races
@@ -199,31 +175,92 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
* This data_race() macro is useful for situations in which data races
* should be forgiven. One example is diagnostic code that accesses
* shared variables but is not a part of the core synchronization design.
+ * For example, if accesses to a given variable are protected by a lock,
+ * except for diagnostic code, then the accesses under the lock should
+ * be plain C-language accesses and those in the diagnostic code should
+ * use data_race(). This way, KCSAN will complain if buggy lockless
+ * accesses to that variable are introduced, even if the buggy accesses
+ * are protected by READ_ONCE() or WRITE_ONCE().
*
* This macro *does not* affect normal code generation, but is a hint
- * to tooling that data races here are to be ignored.
+ * to tooling that data races here are to be ignored. If the access must
+ * be atomic *and* KCSAN should ignore the access, use both data_race()
+ * and READ_ONCE(), for example, data_race(READ_ONCE(x)).
*/
#define data_race(expr) \
({ \
- __unqual_scalar_typeof(({ expr; })) __v = ({ \
- __kcsan_disable_current(); \
- expr; \
- }); \
+ __kcsan_disable_current(); \
+ auto __v = (expr); \
__kcsan_enable_current(); \
__v; \
})
+#ifdef __CHECKER__
+#define __BUILD_BUG_ON_ZERO_MSG(e, msg, ...) (0)
+#else /* __CHECKER__ */
+#define __BUILD_BUG_ON_ZERO_MSG(e, msg, ...) ((int)sizeof(struct {_Static_assert(!(e), msg);}))
+#endif /* __CHECKER__ */
+
+/* &a[0] degrades to a pointer: a different type from an array */
+#define __is_array(a) (!__same_type((a), &(a)[0]))
+#define __must_be_array(a) __BUILD_BUG_ON_ZERO_MSG(!__is_array(a), \
+ "must be array")
+
+#define __is_byte_array(a) (__is_array(a) && sizeof((a)[0]) == 1)
+#define __must_be_byte_array(a) __BUILD_BUG_ON_ZERO_MSG(!__is_byte_array(a), \
+ "must be byte array")
+
+/*
+ * If the "nonstring" attribute isn't available, we have to return true
+ * so the __must_*() checks pass when "nonstring" isn't supported.
+ */
+#if __has_attribute(__nonstring__) && defined(__annotated)
+#define __is_cstr(a) (!__annotated(a, nonstring))
+#define __is_noncstr(a) (__annotated(a, nonstring))
+#else
+#define __is_cstr(a) (true)
+#define __is_noncstr(a) (true)
+#endif
+
+/* Require C Strings (i.e. NUL-terminated) lack the "nonstring" attribute. */
+#define __must_be_cstr(p) \
+ __BUILD_BUG_ON_ZERO_MSG(!__is_cstr(p), \
+ "must be C-string (NUL-terminated)")
+#define __must_be_noncstr(p) \
+ __BUILD_BUG_ON_ZERO_MSG(!__is_noncstr(p), \
+ "must be non-C-string (not NUL-terminated)")
+
+/*
+ * Use __typeof_unqual__() when available.
+ *
+ * XXX: Remove test for __CHECKER__ once
+ * sparse learns about __typeof_unqual__().
+ */
+#if CC_HAS_TYPEOF_UNQUAL && !defined(__CHECKER__)
+# define USE_TYPEOF_UNQUAL 1
+#endif
+
+/*
+ * Define TYPEOF_UNQUAL() to use __typeof_unqual__() as typeof
+ * operator when available, to return an unqualified type of the exp.
+ */
+#if defined(USE_TYPEOF_UNQUAL)
+# define TYPEOF_UNQUAL(exp) __typeof_unqual__(exp)
+#else
+# define TYPEOF_UNQUAL(exp) __typeof__(exp)
+#endif
+
#endif /* __KERNEL__ */
+#if defined(CONFIG_CFI) && !defined(__DISABLE_EXPORTS) && !defined(BUILD_VDSO)
/*
- * Force the compiler to emit 'sym' as a symbol, so that we can reference
- * it from inline assembler. Necessary in case 'sym' could be inlined
- * otherwise, or eliminated entirely due to lack of references that are
- * visible to the compiler.
+ * Force a reference to the external symbol so the compiler generates
+ * __kcfi_typid.
*/
-#define __ADDRESSABLE(sym) \
- static void * __section(".discard.addressable") __used \
- __UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)&sym;
+#define KCFI_REFERENCE(sym) __ADDRESSABLE(sym)
+#else
+#define KCFI_REFERENCE(sym)
+#endif
/**
* offset_to_ptr - convert a relative memory offset to an absolute pointer
@@ -236,8 +273,103 @@ static inline void *offset_to_ptr(const int *off)
#endif /* __ASSEMBLY__ */
-/* &a[0] degrades to a pointer: a different type from an array */
-#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
+/*
+ * Force the compiler to emit 'sym' as a symbol, so that we can reference
+ * it from inline assembler. Necessary in case 'sym' could be inlined
+ * otherwise, or eliminated entirely due to lack of references that are
+ * visible to the compiler.
+ */
+#define ___ADDRESSABLE(sym, __attrs) \
+ static void * __used __attrs \
+ __UNIQUE_ID(__PASTE(addressable_, sym)) = (void *)(uintptr_t)&sym;
+
+#define __ADDRESSABLE(sym) \
+ ___ADDRESSABLE(sym, __section(".discard.addressable"))
+
+/*
+ * This returns a constant expression while determining if an argument is
+ * a constant expression, most importantly without evaluating the argument.
+ * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de>
+ *
+ * Details:
+ * - sizeof() return an integer constant expression, and does not evaluate
+ * the value of its operand; it only examines the type of its operand.
+ * - The results of comparing two integer constant expressions is also
+ * an integer constant expression.
+ * - The first literal "8" isn't important. It could be any literal value.
+ * - The second literal "8" is to avoid warnings about unaligned pointers;
+ * this could otherwise just be "1".
+ * - (long)(x) is used to avoid warnings about 64-bit types on 32-bit
+ * architectures.
+ * - The C Standard defines "null pointer constant", "(void *)0", as
+ * distinct from other void pointers.
+ * - If (x) is an integer constant expression, then the "* 0l" resolves
+ * it into an integer constant expression of value 0. Since it is cast to
+ * "void *", this makes the second operand a null pointer constant.
+ * - If (x) is not an integer constant expression, then the second operand
+ * resolves to a void pointer (but not a null pointer constant: the value
+ * is not an integer constant 0).
+ * - The conditional operator's third operand, "(int *)8", is an object
+ * pointer (to type "int").
+ * - The behavior (including the return type) of the conditional operator
+ * ("operand1 ? operand2 : operand3") depends on the kind of expressions
+ * given for the second and third operands. This is the central mechanism
+ * of the macro:
+ * - When one operand is a null pointer constant (i.e. when x is an integer
+ * constant expression) and the other is an object pointer (i.e. our
+ * third operand), the conditional operator returns the type of the
+ * object pointer operand (i.e. "int *"). Here, within the sizeof(), we
+ * would then get:
+ * sizeof(*((int *)(...)) == sizeof(int) == 4
+ * - When one operand is a void pointer (i.e. when x is not an integer
+ * constant expression) and the other is an object pointer (i.e. our
+ * third operand), the conditional operator returns a "void *" type.
+ * Here, within the sizeof(), we would then get:
+ * sizeof(*((void *)(...)) == sizeof(void) == 1
+ * - The equality comparison to "sizeof(int)" therefore depends on (x):
+ * sizeof(int) == sizeof(int) (x) was a constant expression
+ * sizeof(int) != sizeof(void) (x) was not a constant expression
+ */
+#define __is_constexpr(x) \
+ (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
+
+/*
+ * Whether 'type' is a signed type or an unsigned type. Supports scalar types,
+ * bool and also pointer types.
+ */
+#define is_signed_type(type) (((type)(-1)) < (__force type)1)
+#define is_unsigned_type(type) (!is_signed_type(type))
+
+/*
+ * Useful shorthand for "is this condition known at compile-time?"
+ *
+ * Note that the condition may involve non-constant values,
+ * but the compiler may know enough about the details of the
+ * values to determine that the condition is statically true.
+ */
+#define statically_true(x) (__builtin_constant_p(x) && (x))
+
+/*
+ * Similar to statically_true() but produces a constant expression
+ *
+ * To be used in conjunction with macros, such as BUILD_BUG_ON_ZERO(),
+ * which require their input to be a constant expression and for which
+ * statically_true() would otherwise fail.
+ *
+ * This is a trade-off: const_true() requires all its operands to be
+ * compile time constants. Else, it would always returns false even on
+ * the most trivial cases like:
+ *
+ * true || non_const_var
+ *
+ * On the opposite, statically_true() is able to fold more complex
+ * tautologies and will return true on expressions such as:
+ *
+ * !(non_const_var * 8 % 4)
+ *
+ * For the general case, statically_true() is better.
+ */
+#define const_true(x) __builtin_choose_expr(__is_constexpr(x), x, false)
/*
* This is needed in functions which generate the stack canary, see
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
index c043b8d2b17b..c16d4199bf92 100644
--- a/include/linux/compiler_attributes.h
+++ b/include/linux/compiler_attributes.h
@@ -21,25 +21,6 @@
*/
/*
- * __has_attribute is supported on gcc >= 5, clang >= 2.9 and icc >= 17.
- * In the meantime, to support gcc < 5, we implement __has_attribute
- * by hand.
- */
-#ifndef __has_attribute
-# define __has_attribute(x) __GCC4_has_attribute_##x
-# define __GCC4_has_attribute___assume_aligned__ (__GNUC_MINOR__ >= 9)
-# define __GCC4_has_attribute___copy__ 0
-# define __GCC4_has_attribute___designated_init__ 0
-# define __GCC4_has_attribute___externally_visible__ 1
-# define __GCC4_has_attribute___no_caller_saved_registers__ 0
-# define __GCC4_has_attribute___noclone__ 1
-# define __GCC4_has_attribute___nonstring__ 0
-# define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8)
-# define __GCC4_has_attribute___no_sanitize_undefined__ (__GNUC_MINOR__ >= 9)
-# define __GCC4_has_attribute___fallthrough__ 0
-#endif
-
-/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-alias-function-attribute
*/
#define __alias(symbol) __attribute__((__alias__(#symbol)))
@@ -53,6 +34,16 @@
#define __aligned_largest __attribute__((__aligned__))
/*
+ * Note: do not use this directly. Instead, use __alloc_size() since it is conditionally
+ * available and includes other attributes. For GCC < 9.1, __alloc_size__ gets undefined
+ * in compiler-gcc.h, due to misbehaviors.
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-alloc_005fsize-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#alloc-size
+ */
+#define __alloc_size__(x, ...) __attribute__((__alloc_size__(x, ## __VA_ARGS__)))
+
+/*
* Note: users of __always_inline currently do not write "inline" themselves,
* which seems to be required by gcc to apply the attribute according
* to its docs (and also "warning: always_inline function might not be
@@ -73,23 +64,16 @@
* compiler should see some alignment anyway, when the return value is
* massaged by 'flags = ptr & 3; ptr &= ~3;').
*
- * Optional: only supported since gcc >= 4.9
- * Optional: not supported by icc
- *
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-assume_005faligned-function-attribute
* clang: https://clang.llvm.org/docs/AttributeReference.html#assume-aligned
*/
-#if __has_attribute(__assume_aligned__)
-# define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__)))
-#else
-# define __assume_aligned(a, ...)
-#endif
+#define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__)))
/*
- * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-cold-function-attribute
- * gcc: https://gcc.gnu.org/onlinedocs/gcc/Label-Attributes.html#index-cold-label-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-cleanup-variable-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#cleanup
*/
-#define __cold __attribute__((__cold__))
+#define __cleanup(func) __attribute__((__cleanup__(func)))
/*
* Note the long name.
@@ -101,7 +85,6 @@
/*
* Optional: only supported since gcc >= 9
* Optional: not supported by clang
- * Optional: not supported by icc
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-copy-function-attribute
*/
@@ -112,6 +95,18 @@
#endif
/*
+ * Optional: not supported by gcc
+ * Optional: only supported since clang >= 14.0
+ *
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#diagnose_as_builtin
+ */
+#if __has_attribute(__diagnose_as_builtin__)
+# define __diagnose_as(builtin...) __attribute__((__diagnose_as_builtin__(builtin)))
+#else
+# define __diagnose_as(builtin...)
+#endif
+
+/*
* Don't. Just don't. See commit 771c035372a0 ("deprecate the '__deprecated'
* attribute warnings entirely and for good") for more information.
*
@@ -124,9 +119,7 @@
#define __deprecated
/*
- * Optional: only supported since gcc >= 5.1
* Optional: not supported by clang
- * Optional: not supported by icc
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-designated_005finit-type-attribute
*/
@@ -137,6 +130,17 @@
#endif
/*
+ * Optional: only supported since clang >= 14.0
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-error-function-attribute
+ */
+#if __has_attribute(__error__)
+# define __compiletime_error(msg) __attribute__((__error__(msg)))
+#else
+# define __compiletime_error(msg)
+#endif
+
+/*
* Optional: not supported by clang
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-externally_005fvisible-function-attribute
@@ -162,6 +166,7 @@
/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-malloc-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#malloc
*/
#define __malloc __attribute__((__malloc__))
@@ -199,6 +204,7 @@
* must end with any of these keywords:
* break;
* fallthrough;
+ * continue;
* goto <label>;
* return [expression];
*
@@ -227,7 +233,6 @@
/*
* Optional: only supported since gcc >= 8
* Optional: not supported by clang
- * Optional: not supported by icc
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-nonstring-variable-attribute
*/
@@ -238,6 +243,18 @@
#endif
/*
+ * Optional: only supported since GCC >= 7.1, clang >= 13.0.
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-no_005fprofile_005finstrument_005ffunction-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#no-profile-instrument-function
+ */
+#if __has_attribute(__no_profile_instrument_function__)
+# define __no_profile __attribute__((__no_profile_instrument_function__))
+#else
+# define __no_profile
+#endif
+
+/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noreturn-function-attribute
* clang: https://clang.llvm.org/docs/AttributeReference.html#noreturn
* clang: https://clang.llvm.org/docs/AttributeReference.html#id1
@@ -245,12 +262,53 @@
#define __noreturn __attribute__((__noreturn__))
/*
+ * Optional: only supported since GCC >= 11.1, clang >= 7.0.
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-no_005fstack_005fprotector-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#no-stack-protector-safebuffers
+ */
+#if __has_attribute(__no_stack_protector__)
+# define __no_stack_protector __attribute__((__no_stack_protector__))
+#else
+# define __no_stack_protector
+#endif
+
+/*
+ * Optional: not supported by gcc.
+ *
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#overloadable
+ */
+#if __has_attribute(__overloadable__)
+# define __overloadable __attribute__((__overloadable__))
+#else
+# define __overloadable
+#endif
+
+/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-packed-type-attribute
* clang: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-packed-variable-attribute
*/
#define __packed __attribute__((__packed__))
/*
+ * Note: the "type" argument should match any __builtin_object_size(p, type) usage.
+ *
+ * Optional: not supported by gcc.
+ *
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#pass-object-size-pass-dynamic-object-size
+ */
+#if __has_attribute(__pass_dynamic_object_size__)
+# define __pass_dynamic_object_size(type) __attribute__((__pass_dynamic_object_size__(type)))
+#else
+# define __pass_dynamic_object_size(type)
+#endif
+#if __has_attribute(__pass_object_size__)
+# define __pass_object_size(type) __attribute__((__pass_object_size__(type)))
+#else
+# define __pass_object_size(type)
+#endif
+
+/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-pure-function-attribute
*/
#define __pure __attribute__((__pure__))
@@ -263,6 +321,18 @@
#define __section(section) __attribute__((__section__(section)))
/*
+ * Optional: only supported since gcc >= 12
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-uninitialized-variable-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#uninitialized
+ */
+#if __has_attribute(__uninitialized__)
+# define __uninitialized __attribute__((__uninitialized__))
+#else
+# define __uninitialized
+#endif
+
+/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-unused-function-attribute
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-unused-type-attribute
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-unused-variable-attribute
@@ -279,15 +349,64 @@
#define __used __attribute__((__used__))
/*
+ * The __used attribute guarantees that the attributed variable will be
+ * always emitted by a compiler. It doesn't prevent the compiler from
+ * throwing 'unused' warnings when it can't detect how the variable is
+ * actually used. It's a compiler implementation details either emit
+ * the warning in that case or not.
+ *
+ * The combination of both 'used' and 'unused' attributes ensures that
+ * the variable would be emitted, and will not trigger 'unused' warnings.
+ * The attribute is applicable for functions, static and global variables.
+ */
+#define __always_used __used __maybe_unused
+
+/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-warn_005funused_005fresult-function-attribute
* clang: https://clang.llvm.org/docs/AttributeReference.html#nodiscard-warn-unused-result
*/
#define __must_check __attribute__((__warn_unused_result__))
/*
+ * Optional: only supported since clang >= 14.0
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-warning-function-attribute
+ */
+#if __has_attribute(__warning__)
+# define __compiletime_warning(msg) __attribute__((__warning__(msg)))
+#else
+# define __compiletime_warning(msg)
+#endif
+
+/*
+ * Optional: only supported since clang >= 14.0
+ *
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#disable-sanitizer-instrumentation
+ *
+ * disable_sanitizer_instrumentation is not always similar to
+ * no_sanitize((<sanitizer-name>)): the latter may still let specific sanitizers
+ * insert code into functions to prevent false positives. Unlike that,
+ * disable_sanitizer_instrumentation prevents all kinds of instrumentation to
+ * functions with the attribute.
+ */
+#if __has_attribute(disable_sanitizer_instrumentation)
+# define __disable_sanitizer_instrumentation \
+ __attribute__((disable_sanitizer_instrumentation))
+#else
+# define __disable_sanitizer_instrumentation
+#endif
+
+/*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-weak-function-attribute
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-weak-variable-attribute
*/
#define __weak __attribute__((__weak__))
+/*
+ * Used by functions that use '__builtin_return_address'. These function
+ * don't want to be splited or made inline, which can make
+ * the '__builtin_return_address' get unexpected address.
+ */
+#define __fix_address noinline __noclone
+
#endif /* __LINUX_COMPILER_ATTRIBUTES_H */
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index d29bda7f6ebd..1280693766b9 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -2,8 +2,46 @@
#ifndef __LINUX_COMPILER_TYPES_H
#define __LINUX_COMPILER_TYPES_H
+/*
+ * __has_builtin is supported on gcc >= 10, clang >= 3 and icc >= 21.
+ * In the meantime, to support gcc < 10, we implement __has_builtin
+ * by hand.
+ */
+#ifndef __has_builtin
+#define __has_builtin(x) (0)
+#endif
+
+/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
+#define ___PASTE(a, b) a##b
+#define __PASTE(a, b) ___PASTE(a, b)
+
#ifndef __ASSEMBLY__
+/*
+ * C23 introduces "auto" as a standard way to define type-inferred
+ * variables, but "auto" has been a (useless) keyword even since K&R C,
+ * so it has always been "namespace reserved."
+ *
+ * Until at some future time we require C23 support, we need the gcc
+ * extension __auto_type, but there is no reason to put that elsewhere
+ * in the source code.
+ */
+#if __STDC_VERSION__ < 202311L
+# define auto __auto_type
+#endif
+
+/*
+ * Skipped when running bindgen due to a libclang issue;
+ * see https://github.com/rust-lang/rust-bindgen/issues/2244.
+ */
+#if defined(CONFIG_DEBUG_INFO_BTF) && defined(CONFIG_PAHOLE_HAS_BTF_TAG) && \
+ __has_attribute(btf_type_tag) && !defined(__BINDGEN__)
+# define BTF_TYPE_TAG(value) __attribute__((btf_type_tag(#value)))
+#else
+# define BTF_TYPE_TAG(value) /* nothing */
+#endif
+
+/* sparse defines __CHECKER__; see Documentation/dev-tools/sparse.rst */
#ifdef __CHECKER__
/* address spaces */
# define __kernel __attribute__((address_space(0)))
@@ -16,6 +54,7 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
/* context/locking */
# define __must_hold(x) __attribute__((context(x,1,1)))
# define __acquires(x) __attribute__((context(x,0,1)))
+# define __cond_acquires(x) __attribute__((context(x,0,-1)))
# define __releases(x) __attribute__((context(x,1,0)))
# define __acquire(x) __context__(x,1)
# define __release(x) __context__(x,-1)
@@ -32,16 +71,18 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
# ifdef STRUCTLEAK_PLUGIN
# define __user __attribute__((user))
# else
-# define __user
+# define __user BTF_TYPE_TAG(user)
# endif
# define __iomem
-# define __percpu
-# define __rcu
+# define __percpu __percpu_qual BTF_TYPE_TAG(percpu)
+# define __rcu BTF_TYPE_TAG(rcu)
+
# define __chk_user_ptr(x) (void)0
# define __chk_io_ptr(x) (void)0
/* context/locking */
# define __must_hold(x)
# define __acquires(x)
+# define __cond_acquires(x)
# define __releases(x)
# define __acquire(x) (void)0
# define __release(x) (void)0
@@ -55,31 +96,92 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
# define __builtin_warning(x, y...) (1)
#endif /* __CHECKER__ */
-/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
-#define ___PASTE(a,b) a##b
-#define __PASTE(a,b) ___PASTE(a,b)
-
#ifdef __KERNEL__
/* Attributes */
#include <linux/compiler_attributes.h>
-/* Builtins */
+#if CONFIG_FUNCTION_ALIGNMENT > 0
+#define __function_aligned __aligned(CONFIG_FUNCTION_ALIGNMENT)
+#else
+#define __function_aligned
+#endif
/*
- * __has_builtin is supported on gcc >= 10, clang >= 3 and icc >= 21.
- * In the meantime, to support gcc < 10, we implement __has_builtin
- * by hand.
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-cold-function-attribute
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Label-Attributes.html#index-cold-label-attribute
+ *
+ * When -falign-functions=N is in use, we must avoid the cold attribute as
+ * GCC drops the alignment for cold functions. Worse, GCC can implicitly mark
+ * callees of cold functions as cold themselves, so it's not sufficient to add
+ * __function_aligned here as that will not ensure that callees are correctly
+ * aligned.
+ *
+ * See:
+ *
+ * https://lore.kernel.org/lkml/Y77%2FqVgvaJidFpYt@FVFF77S0Q05N
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=88345#c9
*/
-#ifndef __has_builtin
-#define __has_builtin(x) (0)
+#if defined(CONFIG_CC_HAS_SANE_FUNCTION_ALIGNMENT) || (CONFIG_FUNCTION_ALIGNMENT == 0)
+#define __cold __attribute__((__cold__))
+#else
+#define __cold
+#endif
+
+/*
+ * On x86-64 and arm64 targets, __preserve_most changes the calling convention
+ * of a function to make the code in the caller as unintrusive as possible. This
+ * convention behaves identically to the C calling convention on how arguments
+ * and return values are passed, but uses a different set of caller- and callee-
+ * saved registers.
+ *
+ * The purpose is to alleviates the burden of saving and recovering a large
+ * register set before and after the call in the caller. This is beneficial for
+ * rarely taken slow paths, such as error-reporting functions that may be called
+ * from hot paths.
+ *
+ * Note: This may conflict with instrumentation inserted on function entry which
+ * does not use __preserve_most or equivalent convention (if in assembly). Since
+ * function tracing assumes the normal C calling convention, where the attribute
+ * is supported, __preserve_most implies notrace. It is recommended to restrict
+ * use of the attribute to functions that should or already disable tracing.
+ *
+ * Optional: not supported by gcc.
+ *
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#preserve-most
+ */
+#if __has_attribute(__preserve_most__) && (defined(CONFIG_X86_64) || defined(CONFIG_ARM64))
+# define __preserve_most notrace __attribute__((__preserve_most__))
+#else
+# define __preserve_most
+#endif
+
+/*
+ * Annotating a function/variable with __retain tells the compiler to place
+ * the object in its own section and set the flag SHF_GNU_RETAIN. This flag
+ * instructs the linker to retain the object during garbage-cleanup or LTO
+ * phases.
+ *
+ * Note that the __used macro is also used to prevent functions or data
+ * being optimized out, but operates at the compiler/IR-level and may still
+ * allow unintended removal of objects during linking.
+ *
+ * Optional: only supported since gcc >= 11, clang >= 13
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-retain-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#retain
+ */
+#if __has_attribute(__retain__) && \
+ (defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || \
+ defined(CONFIG_LTO_CLANG))
+# define __retain __attribute__((__retain__))
+#else
+# define __retain
#endif
/* Compiler specific macros. */
#ifdef __clang__
#include <linux/compiler-clang.h>
-#elif defined(__INTEL_COMPILER)
-#include <linux/compiler-intel.h>
#elif defined(__GNUC__)
/* The above compilers also define __GNUC__, so order is important here. */
#include <linux/compiler-gcc.h>
@@ -137,8 +239,6 @@ struct ftrace_likely_data {
*/
#define __naked __attribute__((__naked__)) notrace
-#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
-
/*
* Prefer gnu_inline, so that extern inline functions do not emit an
* externally visible function. This makes extern inline behave as per gnu89
@@ -163,10 +263,9 @@ struct ftrace_likely_data {
/*
* GCC does not warn about unused static inline functions for -Wunused-function.
* Suppress the warning in clang as well by using __maybe_unused, but enable it
- * for W=1 build. This will allow clang to find unused functions. Remove the
- * __inline_maybe_unused entirely after fixing most of -Wunused-function warnings.
+ * for W=2 build. This will allow clang to find unused functions.
*/
-#ifdef KBUILD_EXTRA_WARN1
+#ifdef KBUILD_EXTRA_WARN2
#define __inline_maybe_unused
#else
#define __inline_maybe_unused __maybe_unused
@@ -179,6 +278,12 @@ struct ftrace_likely_data {
#define noinline_for_stack noinline
/*
+ * Use noinline_for_tracing for functions that should not be inlined.
+ * For tracing reasons.
+ */
+#define noinline_for_tracing noinline
+
+/*
* Sanitizer helper attributes: Because using __always_inline and
* __no_sanitize_* conflict, provide helper attributes that will either expand
* to __no_sanitize_* in compilation units where instrumentation is enabled
@@ -198,19 +303,150 @@ struct ftrace_likely_data {
# define __no_kasan_or_inline __always_inline
#endif
-#define __no_kcsan __no_sanitize_thread
#ifdef __SANITIZE_THREAD__
+/*
+ * Clang still emits instrumentation for __tsan_func_{entry,exit}() and builtin
+ * atomics even with __no_sanitize_thread (to avoid false positives in userspace
+ * ThreadSanitizer). The kernel's requirements are stricter and we really do not
+ * want any instrumentation with __no_kcsan.
+ *
+ * Therefore we add __disable_sanitizer_instrumentation where available to
+ * disable all instrumentation. See Kconfig.kcsan where this is mandatory.
+ */
+# define __no_kcsan __no_sanitize_thread __disable_sanitizer_instrumentation
+/*
+ * Type qualifier to mark variables where all data-racy accesses should be
+ * ignored by KCSAN. Note, the implementation simply marks these variables as
+ * volatile, since KCSAN will treat such accesses as "marked".
+ */
+# define __data_racy volatile
# define __no_sanitize_or_inline __no_kcsan notrace __maybe_unused
+#else
+# define __no_kcsan
+# define __data_racy
+#endif
+
+#ifdef __SANITIZE_MEMORY__
+/*
+ * Similarly to KASAN and KCSAN, KMSAN loses function attributes of inlined
+ * functions, therefore disabling KMSAN checks also requires disabling inlining.
+ *
+ * __no_sanitize_or_inline effectively prevents KMSAN from reporting errors
+ * within the function and marks all its outputs as initialized.
+ */
+# define __no_sanitize_or_inline __no_kmsan_checks notrace __maybe_unused
#endif
#ifndef __no_sanitize_or_inline
#define __no_sanitize_or_inline __always_inline
#endif
+/*
+ * The assume attribute is used to indicate that a certain condition is
+ * assumed to be true. If this condition is violated at runtime, the behavior
+ * is undefined. Compilers may or may not use this indication to generate
+ * optimized code.
+ *
+ * Note that the clang documentation states that optimizers may react
+ * differently to this attribute, and this may even have a negative
+ * performance impact. Therefore this attribute should be used with care.
+ *
+ * Optional: only supported since gcc >= 13
+ * Optional: only supported since clang >= 19
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Statement-Attributes.html#index-assume-statement-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#id13
+ *
+ */
+#ifdef CONFIG_CC_HAS_ASSUME
+# define __assume(expr) __attribute__((__assume__(expr)))
+#else
+# define __assume(expr)
+#endif
+
+/*
+ * Optional: only supported since gcc >= 15
+ * Optional: only supported since clang >= 18
+ *
+ * gcc: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108896
+ * clang: https://github.com/llvm/llvm-project/pull/76348
+ *
+ * __bdos on clang < 19.1.2 can erroneously return 0:
+ * https://github.com/llvm/llvm-project/pull/110497
+ *
+ * __bdos on clang < 19.1.3 can be off by 4:
+ * https://github.com/llvm/llvm-project/pull/112636
+ */
+#ifdef CONFIG_CC_HAS_COUNTED_BY
+# define __counted_by(member) __attribute__((__counted_by__(member)))
+#else
+# define __counted_by(member)
+#endif
+
+/*
+ * Optional: only supported since gcc >= 15
+ * Optional: not supported by Clang
+ *
+ * gcc: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=117178
+ */
+#ifdef CONFIG_CC_HAS_MULTIDIMENSIONAL_NONSTRING
+# define __nonstring_array __attribute__((__nonstring__))
+#else
+# define __nonstring_array
+#endif
+
+/*
+ * Apply __counted_by() when the Endianness matches to increase test coverage.
+ */
+#ifdef __LITTLE_ENDIAN
+#define __counted_by_le(member) __counted_by(member)
+#define __counted_by_be(member)
+#else
+#define __counted_by_le(member)
+#define __counted_by_be(member) __counted_by(member)
+#endif
+
+/*
+ * This designates the minimum number of elements a passed array parameter must
+ * have. For example:
+ *
+ * void some_function(u8 param[at_least 7]);
+ *
+ * If a caller passes an array with fewer than 7 elements, the compiler will
+ * emit a warning.
+ */
+#ifndef __CHECKER__
+#define at_least static
+#else
+#define at_least
+#endif
+
+/* Do not trap wrapping arithmetic within an annotated function. */
+#ifdef CONFIG_UBSAN_INTEGER_WRAP
+# define __signed_wrap __attribute__((no_sanitize("signed-integer-overflow")))
+#else
+# define __signed_wrap
+#endif
+
/* Section for code which can't be instrumented at all */
-#define noinstr \
- noinline notrace __attribute((__section__(".noinstr.text"))) \
- __no_kcsan __no_sanitize_address
+#define __noinstr_section(section) \
+ noinline notrace __attribute((__section__(section))) \
+ __no_kcsan __no_sanitize_address __no_profile __no_sanitize_coverage \
+ __no_sanitize_memory __signed_wrap
+
+#define noinstr __noinstr_section(".noinstr.text")
+
+/*
+ * The __cpuidle section is used twofold:
+ *
+ * 1) the original use -- identifying if a CPU is 'stuck' in idle state based
+ * on it's instruction pointer. See cpu_in_idle().
+ *
+ * 2) supressing instrumentation around where cpuidle disables RCU; where the
+ * function isn't strictly required for #1, this is interchangeable with
+ * noinstr.
+ */
+#define __cpuidle __noinstr_section(".cpuidle.text")
#endif /* __KERNEL__ */
@@ -225,33 +461,98 @@ struct ftrace_likely_data {
# define __latent_entropy
#endif
-#ifndef __randomize_layout
+#if defined(RANDSTRUCT) && !defined(__CHECKER__)
+# define __randomize_layout __designated_init __attribute__((randomize_layout))
+# define __no_randomize_layout __attribute__((no_randomize_layout))
+/* This anon struct can add padding, so only enable it under randstruct. */
+# define randomized_struct_fields_start struct {
+# define randomized_struct_fields_end } __randomize_layout;
+#else
# define __randomize_layout __designated_init
-#endif
-
-#ifndef __no_randomize_layout
# define __no_randomize_layout
-#endif
-
-#ifndef randomized_struct_fields_start
# define randomized_struct_fields_start
# define randomized_struct_fields_end
#endif
+#ifndef __no_kstack_erase
+# define __no_kstack_erase
+#endif
+
#ifndef __noscs
# define __noscs
#endif
-#ifndef __nocfi
+#if defined(CONFIG_CFI)
+# define __nocfi __attribute__((__no_sanitize__("kcfi")))
+#else
# define __nocfi
#endif
-#ifndef __cficanonical
-# define __cficanonical
+#if defined(CONFIG_ARCH_USES_CFI_GENERIC_LLVM_PASS)
+# define __nocfi_generic __nocfi
+#else
+# define __nocfi_generic
+#endif
+
+/*
+ * Any place that could be marked with the "alloc_size" attribute is also
+ * a place to be marked with the "malloc" attribute, except those that may
+ * be performing a _reallocation_, as that may alias the existing pointer.
+ * For these, use __realloc_size().
+ */
+#ifdef __alloc_size__
+# define __alloc_size(x, ...) __alloc_size__(x, ## __VA_ARGS__) __malloc
+# define __realloc_size(x, ...) __alloc_size__(x, ## __VA_ARGS__)
+#else
+# define __alloc_size(x, ...) __malloc
+# define __realloc_size(x, ...)
+#endif
+
+/*
+ * When the size of an allocated object is needed, use the best available
+ * mechanism to find it. (For cases where sizeof() cannot be used.)
+ *
+ * Optional: only supported since gcc >= 12
+ *
+ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Object-Size-Checking.html
+ * clang: https://clang.llvm.org/docs/LanguageExtensions.html#evaluating-object-size
+ */
+#if __has_builtin(__builtin_dynamic_object_size)
+#define __struct_size(p) __builtin_dynamic_object_size(p, 0)
+#define __member_size(p) __builtin_dynamic_object_size(p, 1)
+#else
+#define __struct_size(p) __builtin_object_size(p, 0)
+#define __member_size(p) __builtin_object_size(p, 1)
+#endif
+
+/*
+ * Determine if an attribute has been applied to a variable.
+ * Using __annotated needs to check for __annotated being available,
+ * or negative tests may fail when annotation cannot be checked. For
+ * example, see the definition of __is_cstr().
+ */
+#if __has_builtin(__builtin_has_attribute)
+#define __annotated(var, attr) __builtin_has_attribute(var, attr)
#endif
-#ifndef asm_volatile_goto
-#define asm_volatile_goto(x...) asm goto(x)
+/*
+ * Some versions of gcc do not mark 'asm goto' volatile:
+ *
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=103979
+ *
+ * We do it here by hand, because it doesn't hurt.
+ */
+#ifndef asm_goto_output
+#define asm_goto_output(x...) asm volatile goto(x)
+#endif
+
+/*
+ * Clang has trouble with constraints with multiple
+ * alternative behaviors (mainly "g" and "rm").
+ */
+#ifndef ASM_INPUT_G
+ #define ASM_INPUT_G "g"
+ #define ASM_INPUT_RM "rm"
#endif
#ifdef CONFIG_CC_HAS_ASM_INLINE
@@ -290,26 +591,27 @@ struct ftrace_likely_data {
(sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \
sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
-/* Compile time object size, -1 for unknown */
-#ifndef __compiletime_object_size
-# define __compiletime_object_size(obj) -1
-#endif
-#ifndef __compiletime_warning
-# define __compiletime_warning(message)
-#endif
-#ifndef __compiletime_error
-# define __compiletime_error(message)
-#endif
-
#ifdef __OPTIMIZE__
+/*
+ * #ifdef __OPTIMIZE__ is only a good approximation; for instance "make
+ * CFLAGS_foo.o=-Og" defines __OPTIMIZE__, does not elide the conditional code
+ * and can break compilation with wrong error message(s). Combine with
+ * -U__OPTIMIZE__ when needed.
+ */
# define __compiletime_assert(condition, msg, prefix, suffix) \
do { \
- extern void prefix ## suffix(void) __compiletime_error(msg); \
+ /* \
+ * __noreturn is needed to give the compiler enough \
+ * information to avoid certain possibly-uninitialized \
+ * warnings (regardless of the build failing). \
+ */ \
+ __noreturn extern void prefix ## suffix(void) \
+ __compiletime_error(msg); \
if (!(condition)) \
prefix ## suffix(); \
} while (0)
#else
-# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
+# define __compiletime_assert(condition, msg, prefix, suffix) ((void)(condition))
#endif
#define _compiletime_assert(condition, msg, prefix, suffix) \
@@ -350,4 +652,8 @@ struct ftrace_likely_data {
#define __diag_error(compiler, version, option, comment) \
__diag_ ## compiler(version, error, option)
+#ifndef __diag_ignore_all
+#define __diag_ignore_all(option, comment)
+#endif
+
#endif /* __LINUX_COMPILER_TYPES_H */
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 51d9ab079629..fb2915676574 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -103,6 +103,7 @@ extern void wait_for_completion(struct completion *);
extern void wait_for_completion_io(struct completion *);
extern int wait_for_completion_interruptible(struct completion *x);
extern int wait_for_completion_killable(struct completion *x);
+extern int wait_for_completion_state(struct completion *x, unsigned int state);
extern unsigned long wait_for_completion_timeout(struct completion *x,
unsigned long timeout);
extern unsigned long wait_for_completion_io_timeout(struct completion *x,
@@ -115,6 +116,7 @@ extern bool try_wait_for_completion(struct completion *x);
extern bool completion_done(struct completion *x);
extern void complete(struct completion *);
+extern void complete_on_current_cpu(struct completion *x);
extern void complete_all(struct completion *);
#endif
diff --git a/include/linux/component.h b/include/linux/component.h
index 16de18f473d7..9d6c66401280 100644
--- a/include/linux/component.h
+++ b/include/linux/component.h
@@ -3,7 +3,7 @@
#define COMPONENT_H
#include <linux/stddef.h>
-
+#include <linux/types.h>
struct device;
@@ -38,10 +38,10 @@ int component_add_typed(struct device *dev, const struct component_ops *ops,
int subcomponent);
void component_del(struct device *, const struct component_ops *);
-int component_bind_all(struct device *master, void *master_data);
-void component_unbind_all(struct device *master, void *master_data);
+int component_bind_all(struct device *parent, void *data);
+void component_unbind_all(struct device *parent, void *data);
-struct master;
+struct aggregate_device;
/**
* struct component_master_ops - callback for the aggregate driver
@@ -82,29 +82,37 @@ struct component_master_ops {
void (*unbind)(struct device *master);
};
+/* A set helper functions for component compare/release */
+int component_compare_of(struct device *dev, void *data);
+void component_release_of(struct device *dev, void *data);
+int component_compare_dev(struct device *dev, void *data);
+int component_compare_dev_name(struct device *dev, void *data);
+
void component_master_del(struct device *,
const struct component_master_ops *);
+bool component_master_is_bound(struct device *parent,
+ const struct component_master_ops *ops);
struct component_match;
int component_master_add_with_match(struct device *,
const struct component_master_ops *, struct component_match *);
-void component_match_add_release(struct device *master,
+void component_match_add_release(struct device *parent,
struct component_match **matchptr,
void (*release)(struct device *, void *),
int (*compare)(struct device *, void *), void *compare_data);
-void component_match_add_typed(struct device *master,
+void component_match_add_typed(struct device *parent,
struct component_match **matchptr,
int (*compare_typed)(struct device *, int, void *), void *compare_data);
/**
* component_match_add - add a component match entry
- * @master: device with the aggregate driver
+ * @parent: device with the aggregate driver
* @matchptr: pointer to the list of component matches
* @compare: compare function to match against all components
* @compare_data: opaque pointer passed to the @compare function
*
- * Adds a new component match to the list stored in @matchptr, which the @master
+ * Adds a new component match to the list stored in @matchptr, which the @parent
* aggregate driver needs to function. The list of component matches pointed to
* by @matchptr must be initialized to NULL before adding the first match. This
* only matches against components added with component_add().
@@ -114,11 +122,11 @@ void component_match_add_typed(struct device *master,
*
* See also component_match_add_release() and component_match_add_typed().
*/
-static inline void component_match_add(struct device *master,
+static inline void component_match_add(struct device *parent,
struct component_match **matchptr,
int (*compare)(struct device *, void *), void *compare_data)
{
- component_match_add_release(master, matchptr, NULL, compare,
+ component_match_add_release(parent, matchptr, NULL, compare,
compare_data);
}
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index 97cfd13bae51..ef65c75beeaa 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -64,8 +64,8 @@ extern void config_item_put(struct config_item *);
struct config_item_type {
struct module *ct_owner;
- struct configfs_item_operations *ct_item_ops;
- struct configfs_group_operations *ct_group_ops;
+ const struct configfs_item_operations *ct_item_ops;
+ const struct configfs_group_operations *ct_group_ops;
struct configfs_attribute **ct_attrs;
struct configfs_bin_attribute **ct_bin_attrs;
};
@@ -120,15 +120,19 @@ struct configfs_attribute {
ssize_t (*store)(struct config_item *, const char *, size_t);
};
-#define CONFIGFS_ATTR(_pfx, _name) \
+#define CONFIGFS_ATTR_PERM(_pfx, _name, _perm) \
static struct configfs_attribute _pfx##attr_##_name = { \
.ca_name = __stringify(_name), \
- .ca_mode = S_IRUGO | S_IWUSR, \
+ .ca_mode = _perm, \
.ca_owner = THIS_MODULE, \
.show = _pfx##_name##_show, \
.store = _pfx##_name##_store, \
}
+#define CONFIGFS_ATTR(_pfx, _name) CONFIGFS_ATTR_PERM( \
+ _pfx, _name, S_IRUGO | S_IWUSR \
+)
+
#define CONFIGFS_ATTR_RO(_pfx, _name) \
static struct configfs_attribute _pfx##attr_##_name = { \
.ca_name = __stringify(_name), \
@@ -204,8 +208,6 @@ static struct configfs_bin_attribute _pfx##attr_##_name = { \
* group children. default_groups may coexist alongsize make_group() or
* make_item(), but if the group wishes to have only default_groups
* children (disallowing mkdir(2)), it need not provide either function.
- * If the group has commit(), it supports pending and committed (active)
- * items.
*/
struct configfs_item_operations {
void (*release)(struct config_item *);
@@ -216,9 +218,11 @@ struct configfs_item_operations {
struct configfs_group_operations {
struct config_item *(*make_item)(struct config_group *group, const char *name);
struct config_group *(*make_group)(struct config_group *group, const char *name);
- int (*commit_item)(struct config_item *item);
void (*disconnect_notify)(struct config_group *group, struct config_item *item);
void (*drop_item)(struct config_group *group, struct config_item *item);
+ bool (*is_visible)(struct config_item *item, struct configfs_attribute *attr, int n);
+ bool (*is_bin_visible)(struct config_item *item, struct configfs_bin_attribute *attr,
+ int n);
};
struct configfs_subsystem {
diff --git a/include/linux/connector.h b/include/linux/connector.h
index 487350bb19c3..70bc1160f3d8 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -90,13 +90,18 @@ void cn_del_callback(const struct cb_id *id);
* If @group is not zero, then message will be delivered
* to the specified group.
* @gfp_mask: GFP mask.
+ * @filter: Filter function to be used at netlink layer.
+ * @filter_data:Filter data to be supplied to the filter function
*
* It can be safely called from softirq context, but may silently
* fail under strong memory pressure.
*
* If there are no listeners for given group %-ESRCH can be returned.
*/
-int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 group, gfp_t gfp_mask);
+int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid,
+ u32 group, gfp_t gfp_mask,
+ netlink_filter_fn filter,
+ void *filter_data);
/**
* cn_netlink_send - Sends message to the specified groups.
diff --git a/include/linux/console.h b/include/linux/console.h
index 20874db50bc8..fc9f5c5c1b04 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -15,7 +15,13 @@
#define _LINUX_CONSOLE_H_ 1
#include <linux/atomic.h>
+#include <linux/bits.h>
+#include <linux/irq_work.h>
+#include <linux/rculist.h>
+#include <linux/rcuwait.h>
+#include <linux/smp.h>
#include <linux/types.h>
+#include <linux/vesa.h>
struct vc_data;
struct console_font_op;
@@ -34,62 +40,91 @@ enum vc_intensity;
/**
* struct consw - callbacks for consoles
*
+ * @owner: the module to get references of when this console is used
+ * @con_startup: set up the console and return its name (like VGA, EGA, ...)
+ * @con_init: initialize the console on @vc. @init is true for the very first
+ * call on this @vc.
+ * @con_deinit: deinitialize the console from @vc.
+ * @con_clear: erase @count characters at [@x, @y] on @vc. @count >= 1.
+ * @con_putc: emit one character with attributes @ca to [@x, @y] on @vc.
+ * (optional -- @con_putcs would be called instead)
+ * @con_putcs: emit @count characters with attributes @s to [@x, @y] on @vc.
+ * @con_cursor: enable/disable cursor depending on @enable
* @con_scroll: move lines from @top to @bottom in direction @dir by @lines.
* Return true if no generic handling should be done.
* Invoked by csi_M and printing to the console.
- * @con_set_palette: sets the palette of the console to @table (optional)
+ * @con_switch: notifier about the console switch; it is supposed to return
+ * true if a redraw is needed.
+ * @con_blank: blank/unblank the console. The target mode is passed in @blank.
+ * @mode_switch is set if changing from/to text/graphics. The hook
+ * is supposed to return true if a redraw is needed.
+ * @con_font_set: set console @vc font to @font with height @vpitch. @flags can
+ * be %KD_FONT_FLAG_DONT_RECALC. (optional)
+ * @con_font_get: fetch the current font on @vc of height @vpitch into @font.
+ * (optional)
+ * @con_font_default: set default font on @vc. @name can be %NULL or font name
+ * to search for. @font can be filled back. (optional)
+ * @con_resize: resize the @vc console to @width x @height. @from_user is true
+ * when this change comes from the user space.
+ * @con_set_palette: sets the palette of the console @vc to @table (optional)
* @con_scrolldelta: the contents of the console should be scrolled by @lines.
* Invoked by user. (optional)
+ * @con_set_origin: set origin (see &vc_data::vc_origin) of the @vc. If not
+ * provided or returns false, the origin is set to
+ * @vc->vc_screenbuf. (optional)
+ * @con_save_screen: save screen content into @vc->vc_screenbuf. Called e.g.
+ * upon entering graphics. (optional)
+ * @con_build_attr: build attributes based on @color, @intensity and other
+ * parameters. The result is used for both normal and erase
+ * characters. (optional)
+ * @con_invert_region: invert a region of length @count on @vc starting at @p.
+ * (optional)
+ * @con_debug_enter: prepare the console for the debugger. This includes, but
+ * is not limited to, unblanking the console, loading an
+ * appropriate palette, and allowing debugger generated output.
+ * (optional)
+ * @con_debug_leave: restore the console to its pre-debug state as closely as
+ * possible. (optional)
*/
struct consw {
struct module *owner;
const char *(*con_startup)(void);
- void (*con_init)(struct vc_data *vc, int init);
+ void (*con_init)(struct vc_data *vc, bool init);
void (*con_deinit)(struct vc_data *vc);
- void (*con_clear)(struct vc_data *vc, int sy, int sx, int height,
- int width);
- void (*con_putc)(struct vc_data *vc, int c, int ypos, int xpos);
- void (*con_putcs)(struct vc_data *vc, const unsigned short *s,
- int count, int ypos, int xpos);
- void (*con_cursor)(struct vc_data *vc, int mode);
+ void (*con_clear)(struct vc_data *vc, unsigned int y,
+ unsigned int x, unsigned int count);
+ void (*con_putc)(struct vc_data *vc, u16 ca, unsigned int y,
+ unsigned int x);
+ void (*con_putcs)(struct vc_data *vc, const u16 *s,
+ unsigned int count, unsigned int ypos,
+ unsigned int xpos);
+ void (*con_cursor)(struct vc_data *vc, bool enable);
bool (*con_scroll)(struct vc_data *vc, unsigned int top,
unsigned int bottom, enum con_scroll dir,
unsigned int lines);
- int (*con_switch)(struct vc_data *vc);
- int (*con_blank)(struct vc_data *vc, int blank, int mode_switch);
- int (*con_font_set)(struct vc_data *vc, struct console_font *font,
- unsigned int flags);
- int (*con_font_get)(struct vc_data *vc, struct console_font *font);
+ bool (*con_switch)(struct vc_data *vc);
+ bool (*con_blank)(struct vc_data *vc, enum vesa_blank_mode blank,
+ bool mode_switch);
+ int (*con_font_set)(struct vc_data *vc,
+ const struct console_font *font,
+ unsigned int vpitch, unsigned int flags);
+ int (*con_font_get)(struct vc_data *vc, struct console_font *font,
+ unsigned int vpitch);
int (*con_font_default)(struct vc_data *vc,
- struct console_font *font, char *name);
+ struct console_font *font, const char *name);
int (*con_resize)(struct vc_data *vc, unsigned int width,
- unsigned int height, unsigned int user);
+ unsigned int height, bool from_user);
void (*con_set_palette)(struct vc_data *vc,
const unsigned char *table);
void (*con_scrolldelta)(struct vc_data *vc, int lines);
- int (*con_set_origin)(struct vc_data *vc);
+ bool (*con_set_origin)(struct vc_data *vc);
void (*con_save_screen)(struct vc_data *vc);
u8 (*con_build_attr)(struct vc_data *vc, u8 color,
enum vc_intensity intensity,
bool blink, bool underline, bool reverse, bool italic);
void (*con_invert_region)(struct vc_data *vc, u16 *p, int count);
- u16 *(*con_screen_pos)(const struct vc_data *vc, int offset);
- unsigned long (*con_getxy)(struct vc_data *vc, unsigned long position,
- int *px, int *py);
- /*
- * Flush the video console driver's scrollback buffer
- */
- void (*con_flush_scrollback)(struct vc_data *vc);
- /*
- * Prepare the console for the debugger. This includes, but is not
- * limited to, unblanking the console, loading an appropriate
- * palette, and allowing debugger generated output.
- */
- int (*con_debug_enter)(struct vc_data *vc);
- /*
- * Restore the console to its pre-debug state as closely as possible.
- */
- int (*con_debug_leave)(struct vc_data *vc);
+ void (*con_debug_enter)(struct vc_data *vc);
+ void (*con_debug_leave)(struct vc_data *vc);
};
extern const struct consw *conswitchp;
@@ -98,66 +133,554 @@ extern const struct consw dummy_con; /* dummy console buffer */
extern const struct consw vga_con; /* VGA text console */
extern const struct consw newport_con; /* SGI Newport console */
+struct screen_info;
+#ifdef CONFIG_VGA_CONSOLE
+void vgacon_register_screen(struct screen_info *si);
+#else
+static inline void vgacon_register_screen(struct screen_info *si) { }
+#endif
+
int con_is_bound(const struct consw *csw);
int do_unregister_con_driver(const struct consw *csw);
int do_take_over_console(const struct consw *sw, int first, int last, int deflt);
void give_up_console(const struct consw *sw);
-#ifdef CONFIG_HW_CONSOLE
-int con_debug_enter(struct vc_data *vc);
-int con_debug_leave(void);
+#ifdef CONFIG_VT
+void con_debug_enter(struct vc_data *vc);
+void con_debug_leave(void);
#else
-static inline int con_debug_enter(struct vc_data *vc)
+static inline void con_debug_enter(struct vc_data *vc) { }
+static inline void con_debug_leave(void) { }
+#endif
+
+/*
+ * The interface for a console, or any other device that wants to capture
+ * console messages (printer driver?)
+ */
+
+/**
+ * enum cons_flags - General console flags
+ * @CON_PRINTBUFFER: Used by newly registered consoles to avoid duplicate
+ * output of messages that were already shown by boot
+ * consoles or read by userspace via syslog() syscall.
+ * @CON_CONSDEV: Indicates that the console driver is backing
+ * /dev/console.
+ * @CON_ENABLED: Indicates if a console is allowed to print records. If
+ * false, the console also will not advance to later
+ * records.
+ * @CON_BOOT: Marks the console driver as early console driver which
+ * is used during boot before the real driver becomes
+ * available. It will be automatically unregistered
+ * when the real console driver is registered unless
+ * "keep_bootcon" parameter is used.
+ * @CON_ANYTIME: A misnomed historical flag which tells the core code
+ * that the legacy @console::write callback can be invoked
+ * on a CPU which is marked OFFLINE. That is misleading as
+ * it suggests that there is no contextual limit for
+ * invoking the callback. The original motivation was
+ * readiness of the per-CPU areas.
+ * @CON_BRL: Indicates a braille device which is exempt from
+ * receiving the printk spam for obvious reasons.
+ * @CON_EXTENDED: The console supports the extended output format of
+ * /dev/kmesg which requires a larger output buffer.
+ * @CON_SUSPENDED: Indicates if a console is suspended. If true, the
+ * printing callbacks must not be called.
+ * @CON_NBCON: Console can operate outside of the legacy style console_lock
+ * constraints.
+ * @CON_NBCON_ATOMIC_UNSAFE: The write_atomic() callback is not safe and is
+ * therefore only used by nbcon_atomic_flush_unsafe().
+ */
+enum cons_flags {
+ CON_PRINTBUFFER = BIT(0),
+ CON_CONSDEV = BIT(1),
+ CON_ENABLED = BIT(2),
+ CON_BOOT = BIT(3),
+ CON_ANYTIME = BIT(4),
+ CON_BRL = BIT(5),
+ CON_EXTENDED = BIT(6),
+ CON_SUSPENDED = BIT(7),
+ CON_NBCON = BIT(8),
+ CON_NBCON_ATOMIC_UNSAFE = BIT(9),
+};
+
+/**
+ * struct nbcon_state - console state for nbcon consoles
+ * @atom: Compound of the state fields for atomic operations
+ *
+ * @req_prio: The priority of a handover request
+ * @prio: The priority of the current owner
+ * @unsafe: Console is busy in a non takeover region
+ * @unsafe_takeover: A hostile takeover in an unsafe state happened in the
+ * past. The console cannot be safe until re-initialized.
+ * @cpu: The CPU on which the owner runs
+ *
+ * To be used for reading and preparing of the value stored in the nbcon
+ * state variable @console::nbcon_state.
+ *
+ * The @prio and @req_prio fields are particularly important to allow
+ * spin-waiting to timeout and give up without the risk of a waiter being
+ * assigned the lock after giving up.
+ */
+struct nbcon_state {
+ union {
+ unsigned int atom;
+ struct {
+ unsigned int prio : 2;
+ unsigned int req_prio : 2;
+ unsigned int unsafe : 1;
+ unsigned int unsafe_takeover : 1;
+ unsigned int cpu : 24;
+ };
+ };
+};
+
+/*
+ * The nbcon_state struct is used to easily create and interpret values that
+ * are stored in the @console::nbcon_state variable. Ensure this struct stays
+ * within the size boundaries of the atomic variable's underlying type in
+ * order to avoid any accidental truncation.
+ */
+static_assert(sizeof(struct nbcon_state) <= sizeof(int));
+
+/**
+ * enum nbcon_prio - console owner priority for nbcon consoles
+ * @NBCON_PRIO_NONE: Unused
+ * @NBCON_PRIO_NORMAL: Normal (non-emergency) usage
+ * @NBCON_PRIO_EMERGENCY: Emergency output (WARN/OOPS...)
+ * @NBCON_PRIO_PANIC: Panic output
+ * @NBCON_PRIO_MAX: The number of priority levels
+ *
+ * A higher priority context can takeover the console when it is
+ * in the safe state. The final attempt to flush consoles in panic()
+ * can be allowed to do so even in an unsafe state (Hope and pray).
+ */
+enum nbcon_prio {
+ NBCON_PRIO_NONE = 0,
+ NBCON_PRIO_NORMAL,
+ NBCON_PRIO_EMERGENCY,
+ NBCON_PRIO_PANIC,
+ NBCON_PRIO_MAX,
+};
+
+struct console;
+struct printk_buffers;
+
+/**
+ * struct nbcon_context - Context for console acquire/release
+ * @console: The associated console
+ * @spinwait_max_us: Limit for spin-wait acquire
+ * @prio: Priority of the context
+ * @allow_unsafe_takeover: Allow performing takeover even if unsafe. Can
+ * be used only with NBCON_PRIO_PANIC @prio. It
+ * might cause a system freeze when the console
+ * is used later.
+ * @backlog: Ringbuffer has pending records
+ * @pbufs: Pointer to the text buffer for this context
+ * @seq: The sequence number to print for this context
+ */
+struct nbcon_context {
+ /* members set by caller */
+ struct console *console;
+ unsigned int spinwait_max_us;
+ enum nbcon_prio prio;
+ unsigned int allow_unsafe_takeover : 1;
+
+ /* members set by emit */
+ unsigned int backlog : 1;
+
+ /* members set by acquire */
+ struct printk_buffers *pbufs;
+ u64 seq;
+};
+
+/**
+ * struct nbcon_write_context - Context handed to the nbcon write callbacks
+ * @ctxt: The core console context
+ * @outbuf: Pointer to the text buffer for output
+ * @len: Length to write
+ * @unsafe_takeover: If a hostile takeover in an unsafe state has occurred
+ */
+struct nbcon_write_context {
+ struct nbcon_context __private ctxt;
+ char *outbuf;
+ unsigned int len;
+ bool unsafe_takeover;
+};
+
+/**
+ * struct console - The console descriptor structure
+ * @name: The name of the console driver
+ * @write: Legacy write callback to output messages (Optional)
+ * @read: Read callback for console input (Optional)
+ * @device: The underlying TTY device driver (Optional)
+ * @unblank: Callback to unblank the console (Optional)
+ * @setup: Callback for initializing the console (Optional)
+ * @exit: Callback for teardown of the console (Optional)
+ * @match: Callback for matching a console (Optional)
+ * @flags: Console flags. See enum cons_flags
+ * @index: Console index, e.g. port number
+ * @cflag: TTY control mode flags
+ * @ispeed: TTY input speed
+ * @ospeed: TTY output speed
+ * @seq: Sequence number of the next ringbuffer record to print
+ * @dropped: Number of unreported dropped ringbuffer records
+ * @data: Driver private data
+ * @node: hlist node for the console list
+ *
+ * @nbcon_state: State for nbcon consoles
+ * @nbcon_seq: Sequence number of the next record for nbcon to print
+ * @nbcon_device_ctxt: Context available for non-printing operations
+ * @nbcon_prev_seq: Seq num the previous nbcon owner was assigned to print
+ * @pbufs: Pointer to nbcon private buffer
+ * @kthread: Printer kthread for this console
+ * @rcuwait: RCU-safe wait object for @kthread waking
+ * @irq_work: Defer @kthread waking to IRQ work context
+ */
+struct console {
+ char name[16];
+ void (*write)(struct console *co, const char *s, unsigned int count);
+ int (*read)(struct console *co, char *s, unsigned int count);
+ struct tty_driver *(*device)(struct console *co, int *index);
+ void (*unblank)(void);
+ int (*setup)(struct console *co, char *options);
+ int (*exit)(struct console *co);
+ int (*match)(struct console *co, char *name, int idx, char *options);
+ short flags;
+ short index;
+ int cflag;
+ uint ispeed;
+ uint ospeed;
+ u64 seq;
+ unsigned long dropped;
+ void *data;
+ struct hlist_node node;
+
+ /* nbcon console specific members */
+
+ /**
+ * @write_atomic:
+ *
+ * NBCON callback to write out text in any context. (Optional)
+ *
+ * This callback is called with the console already acquired. However,
+ * a higher priority context is allowed to take it over by default.
+ *
+ * The callback must call nbcon_enter_unsafe() and nbcon_exit_unsafe()
+ * around any code where the takeover is not safe, for example, when
+ * manipulating the serial port registers.
+ *
+ * nbcon_enter_unsafe() will fail if the context has lost the console
+ * ownership in the meantime. In this case, the callback is no longer
+ * allowed to go forward. It must back out immediately and carefully.
+ * The buffer content is also no longer trusted since it no longer
+ * belongs to the context.
+ *
+ * The callback should allow the takeover whenever it is safe. It
+ * increases the chance to see messages when the system is in trouble.
+ * If the driver must reacquire ownership in order to finalize or
+ * revert hardware changes, nbcon_reacquire_nobuf() can be used.
+ * However, on reacquire the buffer content is no longer available. A
+ * reacquire cannot be used to resume printing.
+ *
+ * The callback can be called from any context (including NMI).
+ * Therefore it must avoid usage of any locking and instead rely
+ * on the console ownership for synchronization.
+ */
+ void (*write_atomic)(struct console *con, struct nbcon_write_context *wctxt);
+
+ /**
+ * @write_thread:
+ *
+ * NBCON callback to write out text in task context.
+ *
+ * This callback must be called only in task context with both
+ * device_lock() and the nbcon console acquired with
+ * NBCON_PRIO_NORMAL.
+ *
+ * The same rules for console ownership verification and unsafe
+ * sections handling applies as with write_atomic().
+ *
+ * The console ownership handling is necessary for synchronization
+ * against write_atomic() which is synchronized only via the context.
+ *
+ * The device_lock() provides the primary serialization for operations
+ * on the device. It might be as relaxed (mutex)[*] or as tight
+ * (disabled preemption and interrupts) as needed. It allows
+ * the kthread to operate in the least restrictive mode[**].
+ *
+ * [*] Standalone nbcon_context_try_acquire() is not safe with
+ * the preemption enabled, see nbcon_owner_matches(). But it
+ * can be safe when always called in the preemptive context
+ * under the device_lock().
+ *
+ * [**] The device_lock() makes sure that nbcon_context_try_acquire()
+ * would never need to spin which is important especially with
+ * PREEMPT_RT.
+ */
+ void (*write_thread)(struct console *con, struct nbcon_write_context *wctxt);
+
+ /**
+ * @device_lock:
+ *
+ * NBCON callback to begin synchronization with driver code.
+ *
+ * Console drivers typically must deal with access to the hardware
+ * via user input/output (such as an interactive login shell) and
+ * output of kernel messages via printk() calls. This callback is
+ * called by the printk-subsystem whenever it needs to synchronize
+ * with hardware access by the driver. It should be implemented to
+ * use whatever synchronization mechanism the driver is using for
+ * itself (for example, the port lock for uart serial consoles).
+ *
+ * The callback is always called from task context. It may use any
+ * synchronization method required by the driver.
+ *
+ * IMPORTANT: The callback MUST disable migration. The console driver
+ * may be using a synchronization mechanism that already takes
+ * care of this (such as spinlocks). Otherwise this function must
+ * explicitly call migrate_disable().
+ *
+ * The flags argument is provided as a convenience to the driver. It
+ * will be passed again to device_unlock(). It can be ignored if the
+ * driver does not need it.
+ */
+ void (*device_lock)(struct console *con, unsigned long *flags);
+
+ /**
+ * @device_unlock:
+ *
+ * NBCON callback to finish synchronization with driver code.
+ *
+ * It is the counterpart to device_lock().
+ *
+ * This callback is always called from task context. It must
+ * appropriately re-enable migration (depending on how device_lock()
+ * disabled migration).
+ *
+ * The flags argument is the value of the same variable that was
+ * passed to device_lock().
+ */
+ void (*device_unlock)(struct console *con, unsigned long flags);
+
+ atomic_t __private nbcon_state;
+ atomic_long_t __private nbcon_seq;
+ struct nbcon_context __private nbcon_device_ctxt;
+ atomic_long_t __private nbcon_prev_seq;
+
+ struct printk_buffers *pbufs;
+ struct task_struct *kthread;
+ struct rcuwait rcuwait;
+ struct irq_work irq_work;
+};
+
+#ifdef CONFIG_LOCKDEP
+extern void lockdep_assert_console_list_lock_held(void);
+#else
+static inline void lockdep_assert_console_list_lock_held(void)
{
- return 0;
}
-static inline int con_debug_leave(void)
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern bool console_srcu_read_lock_is_held(void);
+#else
+static inline bool console_srcu_read_lock_is_held(void)
{
- return 0;
+ return 1;
}
#endif
-/* cursor */
-#define CM_DRAW (1)
-#define CM_ERASE (2)
-#define CM_MOVE (3)
+extern int console_srcu_read_lock(void);
+extern void console_srcu_read_unlock(int cookie);
+
+extern void console_list_lock(void) __acquires(console_mutex);
+extern void console_list_unlock(void) __releases(console_mutex);
+
+extern struct hlist_head console_list;
+
+/**
+ * console_srcu_read_flags - Locklessly read flags of a possibly registered
+ * console
+ * @con: struct console pointer of console to read flags from
+ *
+ * Locklessly reading @con->flags provides a consistent read value because
+ * there is at most one CPU modifying @con->flags and that CPU is using only
+ * read-modify-write operations to do so.
+ *
+ * Requires console_srcu_read_lock to be held, which implies that @con might
+ * be a registered console. The purpose of holding console_srcu_read_lock is
+ * to guarantee that the console state is valid (CON_SUSPENDED/CON_ENABLED)
+ * and that no exit/cleanup routines will run if the console is currently
+ * undergoing unregistration.
+ *
+ * If the caller is holding the console_list_lock or it is _certain_ that
+ * @con is not and will not become registered, the caller may read
+ * @con->flags directly instead.
+ *
+ * Context: Any context.
+ * Return: The current value of the @con->flags field.
+ */
+static inline short console_srcu_read_flags(const struct console *con)
+{
+ WARN_ON_ONCE(!console_srcu_read_lock_is_held());
+
+ /*
+ * The READ_ONCE() matches the WRITE_ONCE() when @flags are modified
+ * for registered consoles with console_srcu_write_flags().
+ */
+ return data_race(READ_ONCE(con->flags));
+}
+
+/**
+ * console_srcu_write_flags - Write flags for a registered console
+ * @con: struct console pointer of console to write flags to
+ * @flags: new flags value to write
+ *
+ * Only use this function to write flags for registered consoles. It
+ * requires holding the console_list_lock.
+ *
+ * Context: Any context.
+ */
+static inline void console_srcu_write_flags(struct console *con, short flags)
+{
+ lockdep_assert_console_list_lock_held();
+
+ /* This matches the READ_ONCE() in console_srcu_read_flags(). */
+ WRITE_ONCE(con->flags, flags);
+}
+
+/* Variant of console_is_registered() when the console_list_lock is held. */
+static inline bool console_is_registered_locked(const struct console *con)
+{
+ lockdep_assert_console_list_lock_held();
+ return !hlist_unhashed(&con->node);
+}
/*
- * The interface for a console, or any other device that wants to capture
- * console messages (printer driver?)
+ * console_is_registered - Check if the console is registered
+ * @con: struct console pointer of console to check
+ *
+ * Context: Process context. May sleep while acquiring console list lock.
+ * Return: true if the console is in the console list, otherwise false.
*
- * If a console driver is marked CON_BOOT then it will be auto-unregistered
- * when the first real console is registered. This is for early-printk drivers.
+ * If false is returned for a console that was previously registered, it
+ * can be assumed that the console's unregistration is fully completed,
+ * including the exit() callback after console list removal.
*/
+static inline bool console_is_registered(const struct console *con)
+{
+ bool ret;
-#define CON_PRINTBUFFER (1)
-#define CON_CONSDEV (2) /* Preferred console, /dev/console */
-#define CON_ENABLED (4)
-#define CON_BOOT (8)
-#define CON_ANYTIME (16) /* Safe to call when cpu is offline */
-#define CON_BRL (32) /* Used for a braille device */
-#define CON_EXTENDED (64) /* Use the extended output format a la /dev/kmsg */
+ console_list_lock();
+ ret = console_is_registered_locked(con);
+ console_list_unlock();
+ return ret;
+}
-struct console {
- char name[16];
- void (*write)(struct console *, const char *, unsigned);
- int (*read)(struct console *, char *, unsigned);
- struct tty_driver *(*device)(struct console *, int *);
- void (*unblank)(void);
- int (*setup)(struct console *, char *);
- int (*exit)(struct console *);
- int (*match)(struct console *, char *name, int idx, char *options);
- short flags;
- short index;
- int cflag;
- void *data;
- struct console *next;
-};
+/**
+ * for_each_console_srcu() - Iterator over registered consoles
+ * @con: struct console pointer used as loop cursor
+ *
+ * Although SRCU guarantees the console list will be consistent, the
+ * struct console fields may be updated by other CPUs while iterating.
+ *
+ * Requires console_srcu_read_lock to be held. Can be invoked from
+ * any context.
+ */
+#define for_each_console_srcu(con) \
+ hlist_for_each_entry_srcu(con, &console_list, node, \
+ console_srcu_read_lock_is_held())
+
+/**
+ * for_each_console() - Iterator over registered consoles
+ * @con: struct console pointer used as loop cursor
+ *
+ * The console list and the &console.flags are immutable while iterating.
+ *
+ * Requires console_list_lock to be held.
+ */
+#define for_each_console(con) \
+ lockdep_assert_console_list_lock_held(); \
+ hlist_for_each_entry(con, &console_list, node)
+
+#ifdef CONFIG_PRINTK
+extern void nbcon_cpu_emergency_enter(void);
+extern void nbcon_cpu_emergency_exit(void);
+extern bool nbcon_can_proceed(struct nbcon_write_context *wctxt);
+extern void nbcon_write_context_set_buf(struct nbcon_write_context *wctxt,
+ char *buf, unsigned int len);
+extern bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt);
+extern bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt);
+extern void nbcon_reacquire_nobuf(struct nbcon_write_context *wctxt);
+extern bool nbcon_allow_unsafe_takeover(void);
+extern bool nbcon_kdb_try_acquire(struct console *con,
+ struct nbcon_write_context *wctxt);
+extern void nbcon_kdb_release(struct nbcon_write_context *wctxt);
/*
- * for_each_console() allows you to iterate on each console
+ * Check if the given console is currently capable and allowed to print
+ * records. Note that this function does not consider the current context,
+ * which can also play a role in deciding if @con can be used to print
+ * records.
*/
-#define for_each_console(con) \
- for (con = console_drivers; con != NULL; con = con->next)
+static inline bool console_is_usable(struct console *con, short flags, bool use_atomic)
+{
+ if (!(flags & CON_ENABLED))
+ return false;
+
+ if ((flags & CON_SUSPENDED))
+ return false;
+
+ if (flags & CON_NBCON) {
+ if (use_atomic) {
+ /* The write_atomic() callback is optional. */
+ if (!con->write_atomic)
+ return false;
+
+ /*
+ * An unsafe write_atomic() callback is only usable
+ * when unsafe takeovers are allowed.
+ */
+ if ((flags & CON_NBCON_ATOMIC_UNSAFE) && !nbcon_allow_unsafe_takeover())
+ return false;
+ }
+
+ /*
+ * For the !use_atomic case, @printk_kthreads_running is not
+ * checked because the write_thread() callback is also used
+ * via the legacy loop when the printer threads are not
+ * available.
+ */
+ } else {
+ if (!con->write)
+ return false;
+ }
+
+ /*
+ * Console drivers may assume that per-cpu resources have been
+ * allocated. So unless they're explicitly marked as being able to
+ * cope (CON_ANYTIME) don't call them until this CPU is officially up.
+ */
+ if (!cpu_online(raw_smp_processor_id()) && !(flags & CON_ANYTIME))
+ return false;
+
+ return true;
+}
+
+#else
+static inline void nbcon_cpu_emergency_enter(void) { }
+static inline void nbcon_cpu_emergency_exit(void) { }
+static inline bool nbcon_can_proceed(struct nbcon_write_context *wctxt) { return false; }
+static inline void nbcon_write_context_set_buf(struct nbcon_write_context *wctxt,
+ char *buf, unsigned int len) { }
+static inline bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt) { return false; }
+static inline bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt) { return false; }
+static inline void nbcon_reacquire_nobuf(struct nbcon_write_context *wctxt) { }
+static inline bool nbcon_kdb_try_acquire(struct console *con,
+ struct nbcon_write_context *wctxt) { return false; }
+static inline void nbcon_kdb_release(struct nbcon_write_context *wctxt) { }
+static inline bool console_is_usable(struct console *con, short flags,
+ bool use_atomic) { return false; }
+#endif
extern int console_set_on_cmdline;
extern struct console *early_console;
@@ -167,10 +690,10 @@ enum con_flush_mode {
CONSOLE_REPLAY_ALL,
};
-extern int add_preferred_console(char *name, int idx, char *options);
+extern int add_preferred_console(const char *name, const short idx, char *options);
+extern void console_force_preferred_locked(struct console *con);
extern void register_console(struct console *);
extern int unregister_console(struct console *);
-extern struct console *console_drivers;
extern void console_lock(void);
extern int console_trylock(void);
extern void console_unlock(void);
@@ -178,8 +701,8 @@ extern void console_conditional_schedule(void);
extern void console_unblank(void);
extern void console_flush_on_panic(enum con_flush_mode mode);
extern struct tty_driver *console_device(int *);
-extern void console_stop(struct console *);
-extern void console_start(struct console *);
+extern void console_suspend(struct console *);
+extern void console_resume(struct console *);
extern int is_console_locked(void);
extern int braille_register_console(struct console *, int index,
char *console_options, char *braille_options);
@@ -193,8 +716,8 @@ static inline void console_sysfs_notify(void)
extern bool console_suspend_enabled;
/* Suspend and resume console messages over PM events */
-extern void suspend_console(void);
-extern void resume_console(void);
+extern void console_suspend_all(void);
+extern void console_resume_all(void);
int mda_console_init(void);
@@ -211,17 +734,7 @@ void vcs_remove_sysfs(int index);
*/
extern atomic_t ignore_console_lock_warning;
-/* VESA Blanking Levels */
-#define VESA_NO_BLANKING 0
-#define VESA_VSYNC_SUSPEND 1
-#define VESA_HSYNC_SUSPEND 2
-#define VESA_POWERDOWN 3
-
-#ifdef CONFIG_VGA_CONSOLE
-extern bool vgacon_text_force(void);
-#else
-static inline bool vgacon_text_force(void) { return false; }
-#endif
+DEFINE_LOCK_GUARD_0(console_lock, console_lock(), console_unlock());
extern void console_init(void);
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
index d5b9c8d40c18..13b35637bd5a 100644
--- a/include/linux/console_struct.h
+++ b/include/linux/console_struct.h
@@ -17,8 +17,7 @@
#include <linux/vt.h>
#include <linux/workqueue.h>
-struct uni_pagedir;
-struct uni_screen;
+struct uni_pagedict;
#define NPAR 16
#define VC_TABSTOPS_COUNT 256U
@@ -146,20 +145,23 @@ struct vc_data {
unsigned int vc_need_wrap : 1;
unsigned int vc_can_do_color : 1;
unsigned int vc_report_mouse : 2;
+ unsigned int vc_bracketed_paste : 1;
unsigned char vc_utf : 1; /* Unicode UTF-8 encoding */
unsigned char vc_utf_count;
int vc_utf_char;
DECLARE_BITMAP(vc_tab_stop, VC_TABSTOPS_COUNT); /* Tab stops. 256 columns. */
unsigned char vc_palette[16*3]; /* Colour palette for VGA+ */
unsigned short * vc_translate;
- unsigned int vc_resize_user; /* resize request from user */
unsigned int vc_bell_pitch; /* Console bell pitch */
unsigned int vc_bell_duration; /* Console bell duration */
unsigned short vc_cur_blink_ms; /* Cursor blink duration */
struct vc_data **vc_display_fg; /* [!] Ptr to var holding fg console for this display */
- struct uni_pagedir *vc_uni_pagedir;
- struct uni_pagedir **vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */
- struct uni_screen *vc_uni_screen; /* unicode screen content */
+ struct uni_pagedict *uni_pagedict;
+ struct uni_pagedict **uni_pagedict_loc; /* [!] Location of uni_pagedict variable for this console */
+ u32 **vc_uni_lines; /* unicode screen content */
+ u16 *vc_saved_screen;
+ unsigned int vc_saved_cols;
+ unsigned int vc_saved_rows;
/* additional information is in vt_kern.h */
};
diff --git a/include/linux/consolemap.h b/include/linux/consolemap.h
index bcfce748c9d8..6180b803795c 100644
--- a/include/linux/consolemap.h
+++ b/include/linux/consolemap.h
@@ -7,30 +7,80 @@
#ifndef __LINUX_CONSOLEMAP_H__
#define __LINUX_CONSOLEMAP_H__
-#define LAT1_MAP 0
-#define GRAF_MAP 1
-#define IBMPC_MAP 2
-#define USER_MAP 3
+enum translation_map {
+ LAT1_MAP,
+ GRAF_MAP,
+ IBMPC_MAP,
+ USER_MAP,
+
+ FIRST_MAP = LAT1_MAP,
+ LAST_MAP = USER_MAP,
+};
#include <linux/types.h>
-#ifdef CONFIG_CONSOLE_TRANSLATIONS
struct vc_data;
-extern u16 inverse_translate(const struct vc_data *conp, int glyph,
- int use_unicode);
-extern unsigned short *set_translate(int m, struct vc_data *vc);
-extern int conv_uni_to_pc(struct vc_data *conp, long ucs);
-extern u32 conv_8bit_to_uni(unsigned char c);
-extern int conv_uni_to_8bit(u32 uni);
+#ifdef CONFIG_CONSOLE_TRANSLATIONS
+u16 inverse_translate(const struct vc_data *conp, u16 glyph, bool use_unicode);
+unsigned short *set_translate(enum translation_map m, struct vc_data *vc);
+int conv_uni_to_pc(struct vc_data *conp, long ucs);
+u32 conv_8bit_to_uni(unsigned char c);
+int conv_uni_to_8bit(u32 uni);
void console_map_init(void);
+bool ucs_is_double_width(uint32_t cp);
+bool ucs_is_zero_width(uint32_t cp);
+u32 ucs_recompose(u32 base, u32 mark);
+u32 ucs_get_fallback(u32 cp);
#else
-#define inverse_translate(conp, glyph, uni) ((uint16_t)glyph)
-#define set_translate(m, vc) ((unsigned short *)NULL)
-#define conv_uni_to_pc(conp, ucs) ((int) (ucs > 0xff ? -1: ucs))
-#define conv_8bit_to_uni(c) ((uint32_t)(c))
-#define conv_uni_to_8bit(c) ((int) ((c) & 0xff))
-#define console_map_init(c) do { ; } while (0)
+static inline u16 inverse_translate(const struct vc_data *conp, u16 glyph,
+ bool use_unicode)
+{
+ return glyph;
+}
+
+static inline unsigned short *set_translate(enum translation_map m,
+ struct vc_data *vc)
+{
+ return NULL;
+}
+
+static inline int conv_uni_to_pc(struct vc_data *conp, long ucs)
+{
+ return ucs > 0xff ? -1 : ucs;
+}
+
+static inline u32 conv_8bit_to_uni(unsigned char c)
+{
+ return c;
+}
+
+static inline int conv_uni_to_8bit(u32 uni)
+{
+ return uni & 0xff;
+}
+
+static inline void console_map_init(void) { }
+
+static inline bool ucs_is_double_width(uint32_t cp)
+{
+ return false;
+}
+
+static inline bool ucs_is_zero_width(uint32_t cp)
+{
+ return false;
+}
+
+static inline u32 ucs_recompose(u32 base, u32 mark)
+{
+ return 0;
+}
+
+static inline u32 ucs_get_fallback(u32 cp)
+{
+ return 0;
+}
#endif /* CONFIG_CONSOLE_TRANSLATIONS */
#endif /* __LINUX_CONSOLEMAP_H__ */
diff --git a/include/linux/container.h b/include/linux/container.h
index 2566a1baa736..dd00cc918a92 100644
--- a/include/linux/container.h
+++ b/include/linux/container.h
@@ -12,7 +12,7 @@
#include <linux/device.h>
/* drivers/base/power/container.c */
-extern struct bus_type container_subsys;
+extern const struct bus_type container_subsys;
struct container_dev {
struct device dev;
diff --git a/include/linux/container_of.h b/include/linux/container_of.h
new file mode 100644
index 000000000000..1f6ebf27d962
--- /dev/null
+++ b/include/linux/container_of.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CONTAINER_OF_H
+#define _LINUX_CONTAINER_OF_H
+
+#include <linux/build_bug.h>
+#include <linux/stddef.h>
+
+#define typeof_member(T, m) typeof(((T*)0)->m)
+
+/**
+ * container_of - cast a member of a structure out to the containing structure
+ * @ptr: the pointer to the member.
+ * @type: the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
+ *
+ * WARNING: any const qualifier of @ptr is lost.
+ * Do not use container_of() in new code.
+ */
+#define container_of(ptr, type, member) ({ \
+ void *__mptr = (void *)(ptr); \
+ static_assert(__same_type(*(ptr), ((type *)0)->member) || \
+ __same_type(*(ptr), void), \
+ "pointer type mismatch in container_of()"); \
+ ((type *)(__mptr - offsetof(type, member))); })
+
+/**
+ * container_of_const - cast a member of a structure out to the containing
+ * structure and preserve the const-ness of the pointer
+ * @ptr: the pointer to the member
+ * @type: the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
+ *
+ * Always prefer container_of_const() instead of container_of() in new code.
+ */
+#define container_of_const(ptr, type, member) \
+ _Generic(ptr, \
+ const typeof(*(ptr)) *: ((const type *)container_of(ptr, type, member)),\
+ default: ((type *)container_of(ptr, type, member)) \
+ )
+
+#endif /* _LINUX_CONTAINER_OF_H */
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index 4d7fced3a39f..af9fe87a0922 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -10,112 +10,163 @@
#include <asm/ptrace.h>
-#ifdef CONFIG_CONTEXT_TRACKING
-extern void context_tracking_cpu_set(int cpu);
+#ifdef CONFIG_CONTEXT_TRACKING_USER
+extern void ct_cpu_track_user(int cpu);
/* Called with interrupts disabled. */
-extern void __context_tracking_enter(enum ctx_state state);
-extern void __context_tracking_exit(enum ctx_state state);
+extern void __ct_user_enter(enum ctx_state state);
+extern void __ct_user_exit(enum ctx_state state);
-extern void context_tracking_enter(enum ctx_state state);
-extern void context_tracking_exit(enum ctx_state state);
-extern void context_tracking_user_enter(void);
-extern void context_tracking_user_exit(void);
+extern void ct_user_enter(enum ctx_state state);
+extern void ct_user_exit(enum ctx_state state);
+
+extern void user_enter_callable(void);
+extern void user_exit_callable(void);
static inline void user_enter(void)
{
if (context_tracking_enabled())
- context_tracking_enter(CONTEXT_USER);
+ ct_user_enter(CT_STATE_USER);
}
static inline void user_exit(void)
{
if (context_tracking_enabled())
- context_tracking_exit(CONTEXT_USER);
+ ct_user_exit(CT_STATE_USER);
}
/* Called with interrupts disabled. */
static __always_inline void user_enter_irqoff(void)
{
if (context_tracking_enabled())
- __context_tracking_enter(CONTEXT_USER);
+ __ct_user_enter(CT_STATE_USER);
}
static __always_inline void user_exit_irqoff(void)
{
if (context_tracking_enabled())
- __context_tracking_exit(CONTEXT_USER);
+ __ct_user_exit(CT_STATE_USER);
}
static inline enum ctx_state exception_enter(void)
{
enum ctx_state prev_ctx;
- if (IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK) ||
+ if (IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) ||
!context_tracking_enabled())
return 0;
- prev_ctx = this_cpu_read(context_tracking.state);
- if (prev_ctx != CONTEXT_KERNEL)
- context_tracking_exit(prev_ctx);
+ prev_ctx = __ct_state();
+ if (prev_ctx != CT_STATE_KERNEL)
+ ct_user_exit(prev_ctx);
return prev_ctx;
}
static inline void exception_exit(enum ctx_state prev_ctx)
{
- if (!IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK) &&
+ if (!IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) &&
context_tracking_enabled()) {
- if (prev_ctx != CONTEXT_KERNEL)
- context_tracking_enter(prev_ctx);
+ if (prev_ctx != CT_STATE_KERNEL)
+ ct_user_enter(prev_ctx);
}
}
static __always_inline bool context_tracking_guest_enter(void)
{
if (context_tracking_enabled())
- __context_tracking_enter(CONTEXT_GUEST);
+ __ct_user_enter(CT_STATE_GUEST);
return context_tracking_enabled_this_cpu();
}
-static __always_inline void context_tracking_guest_exit(void)
+static __always_inline bool context_tracking_guest_exit(void)
{
if (context_tracking_enabled())
- __context_tracking_exit(CONTEXT_GUEST);
-}
+ __ct_user_exit(CT_STATE_GUEST);
-/**
- * ct_state() - return the current context tracking state if known
- *
- * Returns the current cpu's context tracking state if context tracking
- * is enabled. If context tracking is disabled, returns
- * CONTEXT_DISABLED. This should be used primarily for debugging.
- */
-static __always_inline enum ctx_state ct_state(void)
-{
- return context_tracking_enabled() ?
- this_cpu_read(context_tracking.state) : CONTEXT_DISABLED;
+ return context_tracking_enabled_this_cpu();
}
+
+#define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond))
+
#else
static inline void user_enter(void) { }
static inline void user_exit(void) { }
static inline void user_enter_irqoff(void) { }
static inline void user_exit_irqoff(void) { }
-static inline enum ctx_state exception_enter(void) { return 0; }
+static inline int exception_enter(void) { return 0; }
static inline void exception_exit(enum ctx_state prev_ctx) { }
-static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; }
-static inline bool context_tracking_guest_enter(void) { return false; }
-static inline void context_tracking_guest_exit(void) { }
+static inline int ct_state(void) { return -1; }
+static inline int __ct_state(void) { return -1; }
+static __always_inline bool context_tracking_guest_enter(void) { return false; }
+static __always_inline bool context_tracking_guest_exit(void) { return false; }
+#define CT_WARN_ON(cond) do { } while (0)
+#endif /* !CONFIG_CONTEXT_TRACKING_USER */
+
+#ifdef CONFIG_CONTEXT_TRACKING_USER_FORCE
+extern void context_tracking_init(void);
+#else
+static inline void context_tracking_init(void) { }
+#endif /* CONFIG_CONTEXT_TRACKING_USER_FORCE */
-#endif /* !CONFIG_CONTEXT_TRACKING */
+#ifdef CONFIG_CONTEXT_TRACKING_IDLE
+extern void ct_idle_enter(void);
+extern void ct_idle_exit(void);
-#define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond))
+/*
+ * Is RCU watching the current CPU (IOW, it is not in an extended quiescent state)?
+ *
+ * Note that this returns the actual boolean data (watching / not watching),
+ * whereas ct_rcu_watching() returns the RCU_WATCHING subvariable of
+ * context_tracking.state.
+ *
+ * No ordering, as we are sampling CPU-local information.
+ */
+static __always_inline bool rcu_is_watching_curr_cpu(void)
+{
+ return raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_RCU_WATCHING;
+}
+
+/*
+ * Increment the current CPU's context_tracking structure's ->state field
+ * with ordering. Return the new value.
+ */
+static __always_inline unsigned long ct_state_inc(int incby)
+{
+ return raw_atomic_add_return(incby, this_cpu_ptr(&context_tracking.state));
+}
+
+static __always_inline bool warn_rcu_enter(void)
+{
+ bool ret = false;
+
+ /*
+ * Horrible hack to shut up recursive RCU isn't watching fail since
+ * lots of the actual reporting also relies on RCU.
+ */
+ preempt_disable_notrace();
+ if (!rcu_is_watching_curr_cpu()) {
+ ret = true;
+ ct_state_inc(CT_RCU_WATCHING);
+ }
+
+ return ret;
+}
+
+static __always_inline void warn_rcu_exit(bool rcu)
+{
+ if (rcu)
+ ct_state_inc(CT_RCU_WATCHING);
+ preempt_enable_notrace();
+}
-#ifdef CONFIG_CONTEXT_TRACKING_FORCE
-extern void context_tracking_init(void);
#else
-static inline void context_tracking_init(void) { }
-#endif /* CONFIG_CONTEXT_TRACKING_FORCE */
+static inline void ct_idle_enter(void) { }
+static inline void ct_idle_exit(void) { }
+
+static __always_inline bool warn_rcu_enter(void) { return false; }
+static __always_inline void warn_rcu_exit(bool rcu) { }
+#endif /* !CONFIG_CONTEXT_TRACKING_IDLE */
#endif
diff --git a/include/linux/context_tracking_irq.h b/include/linux/context_tracking_irq.h
new file mode 100644
index 000000000000..197916ee91a4
--- /dev/null
+++ b/include/linux/context_tracking_irq.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CONTEXT_TRACKING_IRQ_H
+#define _LINUX_CONTEXT_TRACKING_IRQ_H
+
+#ifdef CONFIG_CONTEXT_TRACKING_IDLE
+void ct_irq_enter(void);
+void ct_irq_exit(void);
+void ct_irq_enter_irqson(void);
+void ct_irq_exit_irqson(void);
+void ct_nmi_enter(void);
+void ct_nmi_exit(void);
+#else
+static __always_inline void ct_irq_enter(void) { }
+static __always_inline void ct_irq_exit(void) { }
+static inline void ct_irq_enter_irqson(void) { }
+static inline void ct_irq_exit_irqson(void) { }
+static __always_inline void ct_nmi_enter(void) { }
+static __always_inline void ct_nmi_exit(void) { }
+#endif
+
+#endif
diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
index 65a60d3313b0..0b81248aa03e 100644
--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -4,8 +4,22 @@
#include <linux/percpu.h>
#include <linux/static_key.h>
+#include <linux/context_tracking_irq.h>
+
+/* Offset to allow distinguishing irq vs. task-based idle entry/exit. */
+#define CT_NESTING_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
+
+enum ctx_state {
+ CT_STATE_DISABLED = -1, /* returned by ct_state() if unknown */
+ CT_STATE_KERNEL = 0,
+ CT_STATE_IDLE = 1,
+ CT_STATE_USER = 2,
+ CT_STATE_GUEST = 3,
+ CT_STATE_MAX = 4,
+};
struct context_tracking {
+#ifdef CONFIG_CONTEXT_TRACKING_USER
/*
* When active is false, probes are unset in order
* to minimize overhead: TIF flags are cleared
@@ -14,17 +28,110 @@ struct context_tracking {
*/
bool active;
int recursion;
- enum ctx_state {
- CONTEXT_DISABLED = -1, /* returned by ct_state() if unknown */
- CONTEXT_KERNEL = 0,
- CONTEXT_USER,
- CONTEXT_GUEST,
- } state;
+#endif
+#ifdef CONFIG_CONTEXT_TRACKING
+ atomic_t state;
+#endif
+#ifdef CONFIG_CONTEXT_TRACKING_IDLE
+ long nesting; /* Track process nesting level. */
+ long nmi_nesting; /* Track irq/NMI nesting level. */
+#endif
};
+/*
+ * We cram two different things within the same atomic variable:
+ *
+ * CT_RCU_WATCHING_START CT_STATE_START
+ * | |
+ * v v
+ * MSB [ RCU watching counter ][ context_state ] LSB
+ * ^ ^
+ * | |
+ * CT_RCU_WATCHING_END CT_STATE_END
+ *
+ * Bits are used from the LSB upwards, so unused bits (if any) will always be in
+ * upper bits of the variable.
+ */
#ifdef CONFIG_CONTEXT_TRACKING
-extern struct static_key_false context_tracking_key;
+#define CT_SIZE (sizeof(((struct context_tracking *)0)->state) * BITS_PER_BYTE)
+
+#define CT_STATE_WIDTH bits_per(CT_STATE_MAX - 1)
+#define CT_STATE_START 0
+#define CT_STATE_END (CT_STATE_START + CT_STATE_WIDTH - 1)
+
+#define CT_RCU_WATCHING_MAX_WIDTH (CT_SIZE - CT_STATE_WIDTH)
+#define CT_RCU_WATCHING_WIDTH (IS_ENABLED(CONFIG_RCU_DYNTICKS_TORTURE) ? 2 : CT_RCU_WATCHING_MAX_WIDTH)
+#define CT_RCU_WATCHING_START (CT_STATE_END + 1)
+#define CT_RCU_WATCHING_END (CT_RCU_WATCHING_START + CT_RCU_WATCHING_WIDTH - 1)
+#define CT_RCU_WATCHING BIT(CT_RCU_WATCHING_START)
+
+#define CT_STATE_MASK GENMASK(CT_STATE_END, CT_STATE_START)
+#define CT_RCU_WATCHING_MASK GENMASK(CT_RCU_WATCHING_END, CT_RCU_WATCHING_START)
+
+#define CT_UNUSED_WIDTH (CT_RCU_WATCHING_MAX_WIDTH - CT_RCU_WATCHING_WIDTH)
+
+static_assert(CT_STATE_WIDTH +
+ CT_RCU_WATCHING_WIDTH +
+ CT_UNUSED_WIDTH ==
+ CT_SIZE);
+
DECLARE_PER_CPU(struct context_tracking, context_tracking);
+#endif /* CONFIG_CONTEXT_TRACKING */
+
+#ifdef CONFIG_CONTEXT_TRACKING_USER
+static __always_inline int __ct_state(void)
+{
+ return raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_STATE_MASK;
+}
+#endif
+
+#ifdef CONFIG_CONTEXT_TRACKING_IDLE
+static __always_inline int ct_rcu_watching(void)
+{
+ return atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_RCU_WATCHING_MASK;
+}
+
+static __always_inline int ct_rcu_watching_cpu(int cpu)
+{
+ struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
+
+ return atomic_read(&ct->state) & CT_RCU_WATCHING_MASK;
+}
+
+static __always_inline int ct_rcu_watching_cpu_acquire(int cpu)
+{
+ struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
+
+ return atomic_read_acquire(&ct->state) & CT_RCU_WATCHING_MASK;
+}
+
+static __always_inline long ct_nesting(void)
+{
+ return __this_cpu_read(context_tracking.nesting);
+}
+
+static __always_inline long ct_nesting_cpu(int cpu)
+{
+ struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
+
+ return ct->nesting;
+}
+
+static __always_inline long ct_nmi_nesting(void)
+{
+ return __this_cpu_read(context_tracking.nmi_nesting);
+}
+
+static __always_inline long ct_nmi_nesting_cpu(int cpu)
+{
+ struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
+
+ return ct->nmi_nesting;
+}
+#endif /* #ifdef CONFIG_CONTEXT_TRACKING_IDLE */
+
+#ifdef CONFIG_CONTEXT_TRACKING_USER
+extern struct static_key_false context_tracking_key;
static __always_inline bool context_tracking_enabled(void)
{
@@ -36,20 +143,36 @@ static __always_inline bool context_tracking_enabled_cpu(int cpu)
return context_tracking_enabled() && per_cpu(context_tracking.active, cpu);
}
-static inline bool context_tracking_enabled_this_cpu(void)
+static __always_inline bool context_tracking_enabled_this_cpu(void)
{
return context_tracking_enabled() && __this_cpu_read(context_tracking.active);
}
-static __always_inline bool context_tracking_in_user(void)
+/**
+ * ct_state() - return the current context tracking state if known
+ *
+ * Returns the current cpu's context tracking state if context tracking
+ * is enabled. If context tracking is disabled, returns
+ * CT_STATE_DISABLED. This should be used primarily for debugging.
+ */
+static __always_inline int ct_state(void)
{
- return __this_cpu_read(context_tracking.state) == CONTEXT_USER;
+ int ret;
+
+ if (!context_tracking_enabled())
+ return CT_STATE_DISABLED;
+
+ preempt_disable();
+ ret = __ct_state();
+ preempt_enable();
+
+ return ret;
}
+
#else
-static inline bool context_tracking_in_user(void) { return false; }
-static inline bool context_tracking_enabled(void) { return false; }
-static inline bool context_tracking_enabled_cpu(int cpu) { return false; }
-static inline bool context_tracking_enabled_this_cpu(void) { return false; }
-#endif /* CONFIG_CONTEXT_TRACKING */
+static __always_inline bool context_tracking_enabled(void) { return false; }
+static __always_inline bool context_tracking_enabled_cpu(int cpu) { return false; }
+static __always_inline bool context_tracking_enabled_this_cpu(void) { return false; }
+#endif /* CONFIG_CONTEXT_TRACKING_USER */
#endif
diff --git a/include/linux/coredump.h b/include/linux/coredump.h
index 78fcd776b185..68861da4cf7c 100644
--- a/include/linux/coredump.h
+++ b/include/linux/coredump.h
@@ -10,31 +10,70 @@
#ifdef CONFIG_COREDUMP
struct core_vma_metadata {
unsigned long start, end;
- unsigned long flags;
+ vm_flags_t flags;
unsigned long dump_size;
+ unsigned long pgoff;
+ struct file *file;
};
-extern int core_uses_pid;
-extern char core_pattern[];
-extern unsigned int core_pipe_limit;
+struct coredump_params {
+ const kernel_siginfo_t *siginfo;
+ struct file *file;
+ unsigned long limit;
+ unsigned long mm_flags;
+ int cpu;
+ loff_t written;
+ loff_t pos;
+ loff_t to_skip;
+ int vma_count;
+ size_t vma_data_size;
+ struct core_vma_metadata *vma_meta;
+ struct pid *pid;
+};
+
+extern unsigned int core_file_note_size_limit;
/*
* These are the only things you should do on a core-file: use only these
* functions to write out all the necessary info.
*/
-struct coredump_params;
extern void dump_skip_to(struct coredump_params *cprm, unsigned long to);
extern void dump_skip(struct coredump_params *cprm, size_t nr);
extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
extern int dump_align(struct coredump_params *cprm, int align);
int dump_user_range(struct coredump_params *cprm, unsigned long start,
unsigned long len);
-int dump_vma_snapshot(struct coredump_params *cprm, int *vma_count,
- struct core_vma_metadata **vma_meta,
- size_t *vma_data_size_ptr);
-extern void do_coredump(const kernel_siginfo_t *siginfo);
+extern void vfs_coredump(const kernel_siginfo_t *siginfo);
+
+/*
+ * Logging for the coredump code, ratelimited.
+ * The TGID and comm fields are added to the message.
+ */
+
+#define __COREDUMP_PRINTK(Level, Format, ...) \
+ do { \
+ char comm[TASK_COMM_LEN]; \
+ /* This will always be NUL terminated. */ \
+ memcpy(comm, current->comm, sizeof(comm)); \
+ printk_ratelimited(Level "coredump: %d(%*pE): " Format "\n", \
+ task_tgid_vnr(current), (int)strlen(comm), comm, ##__VA_ARGS__); \
+ } while (0) \
+
+#define coredump_report(fmt, ...) __COREDUMP_PRINTK(KERN_INFO, fmt, ##__VA_ARGS__)
+#define coredump_report_failure(fmt, ...) __COREDUMP_PRINTK(KERN_WARNING, fmt, ##__VA_ARGS__)
+
+#else
+static inline void vfs_coredump(const kernel_siginfo_t *siginfo) {}
+
+#define coredump_report(...)
+#define coredump_report_failure(...)
+
+#endif
+
+#if defined(CONFIG_COREDUMP) && defined(CONFIG_SYSCTL)
+extern void validate_coredump_safety(void);
#else
-static inline void do_coredump(const kernel_siginfo_t *siginfo) {}
+static inline void validate_coredump_safety(void) {}
#endif
#endif /* _LINUX_COREDUMP_H */
diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h
index 4ac5c081af93..89b0ac0014b0 100644
--- a/include/linux/coresight-pmu.h
+++ b/include/linux/coresight-pmu.h
@@ -7,8 +7,19 @@
#ifndef _LINUX_CORESIGHT_PMU_H
#define _LINUX_CORESIGHT_PMU_H
+#include <linux/bits.h>
+
#define CORESIGHT_ETM_PMU_NAME "cs_etm"
-#define CORESIGHT_ETM_PMU_SEED 0x10
+
+/*
+ * The legacy Trace ID system based on fixed calculation from the cpu
+ * number. This has been replaced by drivers using a dynamic allocation
+ * system - but need to retain the legacy algorithm for backward comparibility
+ * in certain situations:-
+ * a) new perf running on older systems that generate the legacy mapping
+ * b) older tools that may not update at the same time as the kernel.
+ */
+#define CORESIGHT_LEGACY_CPU_TRACE_ID(cpu) (0x10 + (cpu * 2))
/*
* Below are the definition of bit offsets for perf option, and works as
@@ -18,6 +29,7 @@
* ETMv3.5/PTM doesn't define ETMCR config bits with prefix "ETM3_" and
* directly use below macros as config bits.
*/
+#define ETM_OPT_BRANCH_BROADCAST 8
#define ETM_OPT_CYCACC 12
#define ETM_OPT_CTXTID 14
#define ETM_OPT_CTXTID2 15
@@ -25,6 +37,7 @@
#define ETM_OPT_RETSTK 29
/* ETMv4 CONFIGR programming bits for the ETM OPTs */
+#define ETM4_CFG_BIT_BB 3
#define ETM4_CFG_BIT_CYCACC 4
#define ETM4_CFG_BIT_CTXTID 6
#define ETM4_CFG_BIT_VMID 7
@@ -32,15 +45,25 @@
#define ETM4_CFG_BIT_RETSTK 12
#define ETM4_CFG_BIT_VMID_OPT 15
-static inline int coresight_get_trace_id(int cpu)
-{
- /*
- * A trace ID of value 0 is invalid, so let's start at some
- * random value that fits in 7 bits and go from there. Since
- * the common convention is to have data trace IDs be I(N) + 1,
- * set instruction trace IDs as a function of the CPU number.
- */
- return (CORESIGHT_ETM_PMU_SEED + (cpu * 2));
-}
+/*
+ * Interpretation of the PERF_RECORD_AUX_OUTPUT_HW_ID payload.
+ * Used to associate a CPU with the CoreSight Trace ID.
+ * [07:00] - Trace ID - uses 8 bits to make value easy to read in file.
+ * [39:08] - Sink ID - as reported in /sys/bus/event_source/devices/cs_etm/sinks/
+ * Added in minor version 1.
+ * [55:40] - Unused (SBZ)
+ * [59:56] - Minor Version - previously existing fields are compatible with
+ * all minor versions.
+ * [63:60] - Major Version - previously existing fields mean different things
+ * in new major versions.
+ */
+#define CS_AUX_HW_ID_TRACE_ID_MASK GENMASK_ULL(7, 0)
+#define CS_AUX_HW_ID_SINK_ID_MASK GENMASK_ULL(39, 8)
+
+#define CS_AUX_HW_ID_MINOR_VERSION_MASK GENMASK_ULL(59, 56)
+#define CS_AUX_HW_ID_MAJOR_VERSION_MASK GENMASK_ULL(63, 60)
+
+#define CS_AUX_HW_ID_MAJOR_VERSION 0
+#define CS_AUX_HW_ID_MINOR_VERSION 1
#endif
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 85008a65e21f..2b48be97fcd0 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -6,10 +6,13 @@
#ifndef _LINUX_CORESIGHT_H
#define _LINUX_CORESIGHT_H
+#include <linux/amba/bus.h>
+#include <linux/clk.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/perf_event.h>
#include <linux/sched.h>
+#include <linux/platform_device.h>
/* Peripheral id registers (0xFD0-0xFEC) */
#define CORESIGHT_PERIPHIDR4 0xfd0
@@ -33,20 +36,19 @@
#define CORESIGHT_UNLOCK 0xc5acce55
-extern struct bus_type coresight_bustype;
+extern const struct bus_type coresight_bustype;
enum coresight_dev_type {
- CORESIGHT_DEV_TYPE_NONE,
CORESIGHT_DEV_TYPE_SINK,
CORESIGHT_DEV_TYPE_LINK,
CORESIGHT_DEV_TYPE_LINKSINK,
CORESIGHT_DEV_TYPE_SOURCE,
CORESIGHT_DEV_TYPE_HELPER,
- CORESIGHT_DEV_TYPE_ECT,
+ CORESIGHT_DEV_TYPE_MAX
};
enum coresight_dev_subtype_sink {
- CORESIGHT_DEV_SUBTYPE_SINK_NONE,
+ CORESIGHT_DEV_SUBTYPE_SINK_DUMMY,
CORESIGHT_DEV_SUBTYPE_SINK_PORT,
CORESIGHT_DEV_SUBTYPE_SINK_BUFFER,
CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM,
@@ -54,28 +56,23 @@ enum coresight_dev_subtype_sink {
};
enum coresight_dev_subtype_link {
- CORESIGHT_DEV_SUBTYPE_LINK_NONE,
CORESIGHT_DEV_SUBTYPE_LINK_MERG,
CORESIGHT_DEV_SUBTYPE_LINK_SPLIT,
CORESIGHT_DEV_SUBTYPE_LINK_FIFO,
};
enum coresight_dev_subtype_source {
- CORESIGHT_DEV_SUBTYPE_SOURCE_NONE,
CORESIGHT_DEV_SUBTYPE_SOURCE_PROC,
CORESIGHT_DEV_SUBTYPE_SOURCE_BUS,
CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE,
+ CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM,
+ CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS,
};
enum coresight_dev_subtype_helper {
- CORESIGHT_DEV_SUBTYPE_HELPER_NONE,
CORESIGHT_DEV_SUBTYPE_HELPER_CATU,
-};
-
-/* Embedded Cross Trigger (ECT) sub-types */
-enum coresight_dev_subtype_ect {
- CORESIGHT_DEV_SUBTYPE_ECT_NONE,
- CORESIGHT_DEV_SUBTYPE_ECT_CTI,
+ CORESIGHT_DEV_SUBTYPE_HELPER_ECT_CTI,
+ CORESIGHT_DEV_SUBTYPE_HELPER_CTCU,
};
/**
@@ -88,8 +85,6 @@ enum coresight_dev_subtype_ect {
* by @coresight_dev_subtype_source.
* @helper_subtype: type of helper this component is, as defined
* by @coresight_dev_subtype_helper.
- * @ect_subtype: type of cross trigger this component is, as
- * defined by @coresight_dev_subtype_ect
*/
union coresight_dev_subtype {
/* We have some devices which acts as LINK and SINK */
@@ -99,21 +94,25 @@ union coresight_dev_subtype {
};
enum coresight_dev_subtype_source source_subtype;
enum coresight_dev_subtype_helper helper_subtype;
- enum coresight_dev_subtype_ect ect_subtype;
};
/**
* struct coresight_platform_data - data harvested from the firmware
* specification.
*
- * @nr_inport: Number of elements for the input connections.
- * @nr_outport: Number of elements for the output connections.
- * @conns: Sparse array of nr_outport connections from this component.
+ * @nr_inconns: Number of elements for the input connections.
+ * @nr_outconns: Number of elements for the output connections.
+ * @out_conns: Array of nr_outconns pointers to connections from this
+ * component.
+ * @in_conns: Sparse array of pointers to input connections. Sparse
+ * because the source device owns the connection so when it's
+ * unloaded the connection leaves an empty slot.
*/
struct coresight_platform_data {
- int nr_inport;
- int nr_outport;
- struct coresight_connection *conns;
+ int nr_inconns;
+ int nr_outconns;
+ struct coresight_connection **out_conns;
+ struct coresight_connection **in_conns;
};
/**
@@ -168,19 +167,47 @@ struct coresight_desc {
/**
* struct coresight_connection - representation of a single connection
- * @outport: a connection's output port number.
- * @child_port: remote component's port number @output is connected to.
- * @chid_fwnode: remote component's fwnode handle.
- * @child_dev: a @coresight_device representation of the component
- connected to @outport.
+ * @src_port: a connection's output port number.
+ * @dest_port: destination's input port number @src_port is connected to.
+ * @dest_fwnode: destination component's fwnode handle.
+ * @dest_dev: a @coresight_device representation of the component
+ connected to @src_port. NULL until the device is created
* @link: Representation of the connection as a sysfs link.
+ * @filter_src_fwnode: filter source component's fwnode handle.
+ * @filter_src_dev: a @coresight_device representation of the component that
+ needs to be filtered.
+ *
+ * The full connection structure looks like this, where in_conns store
+ * references to same connection as the source device's out_conns.
+ *
+ * +-----------------------------+ +-----------------------------+
+ * |coresight_device | |coresight_connection |
+ * |-----------------------------| |-----------------------------|
+ * | | | |
+ * | | | dest_dev*|<--
+ * |pdata->out_conns[nr_outconns]|<->|src_dev* | |
+ * | | | | |
+ * +-----------------------------+ +-----------------------------+ |
+ * |
+ * +-----------------------------+ |
+ * |coresight_device | |
+ * |------------------------------ |
+ * | | |
+ * | pdata->in_conns[nr_inconns]|<--
+ * | |
+ * +-----------------------------+
*/
struct coresight_connection {
- int outport;
- int child_port;
- struct fwnode_handle *child_fwnode;
- struct coresight_device *child_dev;
+ int src_port;
+ int dest_port;
+ struct fwnode_handle *dest_fwnode;
+ struct coresight_device *dest_dev;
struct coresight_sysfs_link *link;
+ struct coresight_device *src_dev;
+ struct fwnode_handle *filter_src_fwnode;
+ struct coresight_device *filter_src_dev;
+ int src_refcnt;
+ int dest_refcnt;
};
/**
@@ -197,6 +224,24 @@ struct coresight_sysfs_link {
const char *target_name;
};
+/* architecturally we have 128 IDs some of which are reserved */
+#define CORESIGHT_TRACE_IDS_MAX 128
+
+/**
+ * Trace ID map.
+ *
+ * @used_ids: Bitmap to register available (bit = 0) and in use (bit = 1) IDs.
+ * Initialised so that the reserved IDs are permanently marked as
+ * in use.
+ * @perf_cs_etm_session_active: Number of Perf sessions using this ID map.
+ */
+struct coresight_trace_id_map {
+ DECLARE_BITMAP(used_ids, CORESIGHT_TRACE_IDS_MAX);
+ atomic_t __percpu *cpu_map;
+ atomic_t perf_cs_etm_session_active;
+ raw_spinlock_t lock;
+};
+
/**
* struct coresight_device - representation of a device as used by the framework
* @pdata: Platform data with device connections associated to this device.
@@ -206,20 +251,31 @@ struct coresight_sysfs_link {
* by @coresight_ops.
* @access: Device i/o access abstraction for this device.
* @dev: The device entity associated to this component.
- * @refcnt: keep track of what is in use.
+ * @mode: The device mode, i.e sysFS, Perf or disabled. This is actually
+ * an 'enum cs_mode' but stored in an atomic type. Access is always
+ * through atomic APIs, ensuring SMP-safe synchronisation between
+ * racing from sysFS and Perf mode. A compare-and-exchange
+ * operation is done to atomically claim one mode or the other.
+ * @refcnt: keep track of what is in use. Only access this outside of the
+ * device's spinlock when the coresight_mutex held and mode ==
+ * CS_MODE_SYSFS. Otherwise it must be accessed from inside the
+ * spinlock.
* @orphan: true if the component has connections that haven't been linked.
- * @enable: 'true' if component is currently part of an active path.
- * @activated: 'true' only if a _sink_ has been activated. A sink can be
- * activated but not yet enabled. Enabling for a _sink_
- * happens when a source has been selected and a path is enabled
- * from source to that sink.
+ * @sysfs_sink_activated: 'true' when a sink has been selected for use via sysfs
+ * by writing a 1 to the 'enable_sink' file. A sink can be
+ * activated but not yet enabled. Enabling for a _sink_ happens
+ * when a source has been selected and a path is enabled from
+ * source to that sink. A sink can also become enabled but not
+ * activated if it's used via Perf.
* @ea: Device attribute for sink representation under PMU directory.
* @def_sink: cached reference to default sink found for this device.
- * @ect_dev: Associated cross trigger device. Not part of the trace data
- * path or connections.
* @nr_links: number of sysfs links created to other components from this
* device. These will appear in the "connections" group.
* @has_conns_grp: Have added a "connections" group for sysfs links.
+ * @feature_csdev_list: List of complex feature programming added to the device.
+ * @config_csdev_list: List of system configurations added to the device.
+ * @cscfg_csdev_lock: Protect the lists of configurations and features.
+ * @active_cscfg_ctxt: Context information for current active system configuration.
*/
struct coresight_device {
struct coresight_platform_data *pdata;
@@ -228,19 +284,22 @@ struct coresight_device {
const struct coresight_ops *ops;
struct csdev_access access;
struct device dev;
- atomic_t *refcnt;
+ atomic_t mode;
+ int refcnt;
bool orphan;
- bool enable; /* true only if configured as part of a path */
/* sink specific fields */
- bool activated; /* true only if a sink is part of a path */
+ bool sysfs_sink_activated;
struct dev_ext_attribute *ea;
struct coresight_device *def_sink;
- /* cross trigger handling */
- struct coresight_device *ect_dev;
+ struct coresight_trace_id_map perf_sink_id_map;
/* sysfs links between components */
int nr_links;
bool has_conns_grp;
- bool ect_enabled; /* true only if associated ect device is enabled */
+ /* system configuration and feature lists */
+ struct list_head feature_csdev_list;
+ struct list_head config_csdev_list;
+ raw_spinlock_t cscfg_csdev_lock;
+ void *active_cscfg_ctxt;
};
/*
@@ -267,11 +326,31 @@ static struct coresight_dev_list (var) = { \
#define to_coresight_device(d) container_of(d, struct coresight_device, dev)
+/**
+ * struct coresight_path - data needed by enable/disable path
+ * @path_list: path from source to sink.
+ * @trace_id: trace_id of the whole path.
+ * @handle: handle of the aux_event.
+ */
+struct coresight_path {
+ struct list_head path_list;
+ u8 trace_id;
+ struct perf_output_handle *handle;
+};
+
+enum cs_mode {
+ CS_MODE_DISABLED,
+ CS_MODE_SYSFS,
+ CS_MODE_PERF,
+};
+
+#define coresight_ops(csdev) csdev->ops
#define source_ops(csdev) csdev->ops->source_ops
#define sink_ops(csdev) csdev->ops->sink_ops
#define link_ops(csdev) csdev->ops->link_ops
#define helper_ops(csdev) csdev->ops->helper_ops
#define ect_ops(csdev) csdev->ops->ect_ops
+#define panic_ops(csdev) csdev->ops->panic_ops
/**
* struct coresight_ops_sink - basic operations for a sink
@@ -283,7 +362,8 @@ static struct coresight_dev_list (var) = { \
* @update_buffer: update buffer pointers after a trace session.
*/
struct coresight_ops_sink {
- int (*enable)(struct coresight_device *csdev, u32 mode, void *data);
+ int (*enable)(struct coresight_device *csdev, enum cs_mode mode,
+ struct coresight_path *path);
int (*disable)(struct coresight_device *csdev);
void *(*alloc_buffer)(struct coresight_device *csdev,
struct perf_event *event, void **pages,
@@ -301,8 +381,12 @@ struct coresight_ops_sink {
* @disable: disables flow between iport and oport.
*/
struct coresight_ops_link {
- int (*enable)(struct coresight_device *csdev, int iport, int oport);
- void (*disable)(struct coresight_device *csdev, int iport, int oport);
+ int (*enable)(struct coresight_device *csdev,
+ struct coresight_connection *in,
+ struct coresight_connection *out);
+ void (*disable)(struct coresight_device *csdev,
+ struct coresight_connection *in,
+ struct coresight_connection *out);
};
/**
@@ -310,18 +394,19 @@ struct coresight_ops_link {
* Operations available for sources.
* @cpu_id: returns the value of the CPU number this component
* is associated to.
- * @trace_id: returns the value of the component's trace ID as known
- * to the HW.
* @enable: enables tracing for a source.
* @disable: disables tracing for a source.
+ * @resume_perf: resumes tracing for a source in perf session.
+ * @pause_perf: pauses tracing for a source in perf session.
*/
struct coresight_ops_source {
int (*cpu_id)(struct coresight_device *csdev);
- int (*trace_id)(struct coresight_device *csdev);
- int (*enable)(struct coresight_device *csdev,
- struct perf_event *event, u32 mode);
+ int (*enable)(struct coresight_device *csdev, struct perf_event *event,
+ enum cs_mode mode, struct coresight_path *path);
void (*disable)(struct coresight_device *csdev,
struct perf_event *event);
+ int (*resume_perf)(struct coresight_device *csdev);
+ void (*pause_perf)(struct coresight_device *csdev);
};
/**
@@ -334,31 +419,32 @@ struct coresight_ops_source {
* @disable : Disable the device
*/
struct coresight_ops_helper {
- int (*enable)(struct coresight_device *csdev, void *data);
- int (*disable)(struct coresight_device *csdev, void *data);
+ int (*enable)(struct coresight_device *csdev, enum cs_mode mode,
+ struct coresight_path *path);
+ int (*disable)(struct coresight_device *csdev,
+ struct coresight_path *path);
};
+
/**
- * struct coresight_ops_ect - Ops for an embedded cross trigger device
+ * struct coresight_ops_panic - Generic device ops for panic handing
*
- * @enable : Enable the device
- * @disable : Disable the device
+ * @sync : Sync the device register state/trace data
*/
-struct coresight_ops_ect {
- int (*enable)(struct coresight_device *csdev);
- int (*disable)(struct coresight_device *csdev);
+struct coresight_ops_panic {
+ int (*sync)(struct coresight_device *csdev);
};
struct coresight_ops {
+ int (*trace_id)(struct coresight_device *csdev, enum cs_mode mode,
+ struct coresight_device *sink);
const struct coresight_ops_sink *sink_ops;
const struct coresight_ops_link *link_ops;
const struct coresight_ops_source *source_ops;
const struct coresight_ops_helper *helper_ops;
- const struct coresight_ops_ect *ect_ops;
+ const struct coresight_ops_panic *panic_ops;
};
-#if IS_ENABLED(CONFIG_CORESIGHT)
-
static inline u32 csdev_access_relaxed_read32(struct csdev_access *csa,
u32 offset)
{
@@ -368,6 +454,60 @@ static inline u32 csdev_access_relaxed_read32(struct csdev_access *csa,
return csa->read(offset, true, false);
}
+#define CORESIGHT_CIDRn(i) (0xFF0 + ((i) * 4))
+
+static inline u32 coresight_get_cid(void __iomem *base)
+{
+ u32 i, cid = 0;
+
+ for (i = 0; i < 4; i++)
+ cid |= readl(base + CORESIGHT_CIDRn(i)) << (i * 8);
+
+ return cid;
+}
+
+static inline bool is_coresight_device(void __iomem *base)
+{
+ u32 cid = coresight_get_cid(base);
+
+ return cid == CORESIGHT_CID;
+}
+
+#define CORESIGHT_PIDRn(i) (0xFE0 + ((i) * 4))
+
+static inline u32 coresight_get_pid(struct csdev_access *csa)
+{
+ u32 i, pid = 0;
+
+ for (i = 0; i < 4; i++)
+ pid |= csdev_access_relaxed_read32(csa, CORESIGHT_PIDRn(i)) << (i * 8);
+
+ return pid;
+}
+
+static inline u64 csdev_access_relaxed_read_pair(struct csdev_access *csa,
+ u32 lo_offset, u32 hi_offset)
+{
+ if (likely(csa->io_mem)) {
+ return readl_relaxed(csa->base + lo_offset) |
+ ((u64)readl_relaxed(csa->base + hi_offset) << 32);
+ }
+
+ return csa->read(lo_offset, true, false) | (csa->read(hi_offset, true, false) << 32);
+}
+
+static inline void csdev_access_relaxed_write_pair(struct csdev_access *csa, u64 val,
+ u32 lo_offset, u32 hi_offset)
+{
+ if (likely(csa->io_mem)) {
+ writel_relaxed((u32)val, csa->base + lo_offset);
+ writel_relaxed((u32)(val >> 32), csa->base + hi_offset);
+ } else {
+ csa->write((u32)val, lo_offset, true, false);
+ csa->write((u32)(val >> 32), hi_offset, true, false);
+ }
+}
+
static inline u32 csdev_access_read32(struct csdev_access *csa, u32 offset)
{
if (likely(csa->io_mem))
@@ -456,9 +596,14 @@ static inline void csdev_access_write64(struct csdev_access *csa, u64 val, u32 o
}
#endif /* CONFIG_64BIT */
+static inline bool coresight_is_device_source(struct coresight_device *csdev)
+{
+ return csdev && (csdev->type == CORESIGHT_DEV_TYPE_SOURCE);
+}
+
static inline bool coresight_is_percpu_source(struct coresight_device *csdev)
{
- return csdev && (csdev->type == CORESIGHT_DEV_TYPE_SOURCE) &&
+ return csdev && coresight_is_device_source(csdev) &&
(csdev->subtype.source_subtype == CORESIGHT_DEV_SUBTYPE_SOURCE_PROC);
}
@@ -468,23 +613,60 @@ static inline bool coresight_is_percpu_sink(struct coresight_device *csdev)
(csdev->subtype.sink_subtype == CORESIGHT_DEV_SUBTYPE_SINK_PERCPU_SYSMEM);
}
-extern struct coresight_device *
-coresight_register(struct coresight_desc *desc);
-extern void coresight_unregister(struct coresight_device *csdev);
-extern int coresight_enable(struct coresight_device *csdev);
-extern void coresight_disable(struct coresight_device *csdev);
-extern int coresight_timeout(struct csdev_access *csa, u32 offset,
- int position, int value);
+/*
+ * Atomically try to take the device and set a new mode. Returns true on
+ * success, false if the device is already taken by someone else.
+ */
+static inline bool coresight_take_mode(struct coresight_device *csdev,
+ enum cs_mode new_mode)
+{
+ int curr = CS_MODE_DISABLED;
+
+ return atomic_try_cmpxchg_acquire(&csdev->mode, &curr, new_mode);
+}
+
+static inline enum cs_mode coresight_get_mode(struct coresight_device *csdev)
+{
+ return atomic_read_acquire(&csdev->mode);
+}
+
+static inline void coresight_set_mode(struct coresight_device *csdev,
+ enum cs_mode new_mode)
+{
+ enum cs_mode current_mode = coresight_get_mode(csdev);
+
+ /*
+ * Changing to a new mode must be done from an already disabled state
+ * unless it's synchronized with coresight_take_mode(). Otherwise the
+ * device is already in use and signifies a locking issue.
+ */
+ WARN(new_mode != CS_MODE_DISABLED && current_mode != CS_MODE_DISABLED &&
+ current_mode != new_mode, "Device already in use\n");
-extern int coresight_claim_device(struct coresight_device *csdev);
-extern int coresight_claim_device_unlocked(struct coresight_device *csdev);
+ atomic_set_release(&csdev->mode, new_mode);
+}
-extern void coresight_disclaim_device(struct coresight_device *csdev);
-extern void coresight_disclaim_device_unlocked(struct coresight_device *csdev);
-extern char *coresight_alloc_device_name(struct coresight_dev_list *devs,
+struct coresight_device *coresight_register(struct coresight_desc *desc);
+void coresight_unregister(struct coresight_device *csdev);
+int coresight_enable_sysfs(struct coresight_device *csdev);
+void coresight_disable_sysfs(struct coresight_device *csdev);
+int coresight_timeout(struct csdev_access *csa, u32 offset, int position, int value);
+typedef void (*coresight_timeout_cb_t) (struct csdev_access *, u32, int, int);
+int coresight_timeout_action(struct csdev_access *csa, u32 offset, int position, int value,
+ coresight_timeout_cb_t cb);
+int coresight_claim_device(struct coresight_device *csdev);
+int coresight_claim_device_unlocked(struct coresight_device *csdev);
+
+int coresight_claim_device(struct coresight_device *csdev);
+int coresight_claim_device_unlocked(struct coresight_device *csdev);
+void coresight_clear_self_claim_tag(struct csdev_access *csa);
+void coresight_clear_self_claim_tag_unlocked(struct csdev_access *csa);
+void coresight_disclaim_device(struct coresight_device *csdev);
+void coresight_disclaim_device_unlocked(struct coresight_device *csdev);
+char *coresight_alloc_device_name(struct coresight_dev_list *devs,
struct device *dev);
-extern bool coresight_loses_context_with_cpu(struct device *dev);
+bool coresight_loses_context_with_cpu(struct device *dev);
u32 coresight_relaxed_read32(struct coresight_device *csdev, u32 offset);
u32 coresight_read32(struct coresight_device *csdev, u32 offset);
@@ -497,85 +679,31 @@ void coresight_relaxed_write64(struct coresight_device *csdev,
u64 val, u32 offset);
void coresight_write64(struct coresight_device *csdev, u64 val, u32 offset);
-#else
-static inline struct coresight_device *
-coresight_register(struct coresight_desc *desc) { return NULL; }
-static inline void coresight_unregister(struct coresight_device *csdev) {}
-static inline int
-coresight_enable(struct coresight_device *csdev) { return -ENOSYS; }
-static inline void coresight_disable(struct coresight_device *csdev) {}
-
-static inline int coresight_timeout(struct csdev_access *csa, u32 offset,
- int position, int value)
-{
- return 1;
-}
-
-static inline int coresight_claim_device_unlocked(struct coresight_device *csdev)
-{
- return -EINVAL;
-}
-
-static inline int coresight_claim_device(struct coresight_device *csdev)
-{
- return -EINVAL;
-}
-
-static inline void coresight_disclaim_device(struct coresight_device *csdev) {}
-static inline void coresight_disclaim_device_unlocked(struct coresight_device *csdev) {}
-
-static inline bool coresight_loses_context_with_cpu(struct device *dev)
-{
- return false;
-}
-
-static inline u32 coresight_relaxed_read32(struct coresight_device *csdev, u32 offset)
-{
- WARN_ON_ONCE(1);
- return 0;
-}
-
-static inline u32 coresight_read32(struct coresight_device *csdev, u32 offset)
-{
- WARN_ON_ONCE(1);
- return 0;
-}
-
-static inline void coresight_write32(struct coresight_device *csdev, u32 val, u32 offset)
-{
-}
-
-static inline void coresight_relaxed_write32(struct coresight_device *csdev,
- u32 val, u32 offset)
-{
-}
-
-static inline u64 coresight_relaxed_read64(struct coresight_device *csdev,
- u32 offset)
-{
- WARN_ON_ONCE(1);
- return 0;
-}
-
-static inline u64 coresight_read64(struct coresight_device *csdev, u32 offset)
-{
- WARN_ON_ONCE(1);
- return 0;
-}
-
-static inline void coresight_relaxed_write64(struct coresight_device *csdev,
- u64 val, u32 offset)
-{
-}
-
-static inline void coresight_write64(struct coresight_device *csdev, u64 val, u32 offset)
-{
-}
-
-#endif /* IS_ENABLED(CONFIG_CORESIGHT) */
-
-extern int coresight_get_cpu(struct device *dev);
+int coresight_get_cpu(struct device *dev);
+int coresight_get_static_trace_id(struct device *dev, u32 *id);
struct coresight_platform_data *coresight_get_platform_data(struct device *dev);
-
+struct coresight_connection *
+coresight_add_out_conn(struct device *dev,
+ struct coresight_platform_data *pdata,
+ const struct coresight_connection *new_conn);
+int coresight_add_in_conn(struct coresight_connection *conn);
+struct coresight_device *
+coresight_find_input_type(struct coresight_platform_data *pdata,
+ enum coresight_dev_type type,
+ union coresight_dev_subtype subtype);
+struct coresight_device *
+coresight_find_output_type(struct coresight_platform_data *pdata,
+ enum coresight_dev_type type,
+ union coresight_dev_subtype subtype);
+
+int coresight_init_driver(const char *drv, struct amba_driver *amba_drv,
+ struct platform_driver *pdev_drv, struct module *owner);
+
+void coresight_remove_driver(struct amba_driver *amba_drv,
+ struct platform_driver *pdev_drv);
+int coresight_etm_get_trace_id(struct coresight_device *csdev, enum cs_mode mode,
+ struct coresight_device *sink);
+int coresight_get_enable_clocks(struct device *dev, struct clk **pclk,
+ struct clk **atclk);
#endif /* _LINUX_COREISGHT_H */
diff --git a/include/linux/counter.h b/include/linux/counter.h
index 9dbd5df4cd34..f208e867dd0f 100644
--- a/include/linux/counter.h
+++ b/include/linux/counter.h
@@ -6,417 +6,353 @@
#ifndef _COUNTER_H_
#define _COUNTER_H_
-#include <linux/counter_enum.h>
+#include <linux/array_size.h>
+#include <linux/cdev.h>
#include <linux/device.h>
+#include <linux/kfifo.h>
+#include <linux/mutex.h>
+#include <linux/spinlock_types.h>
#include <linux/types.h>
+#include <linux/wait.h>
-enum counter_count_direction {
- COUNTER_COUNT_DIRECTION_FORWARD = 0,
- COUNTER_COUNT_DIRECTION_BACKWARD
-};
-extern const char *const counter_count_direction_str[2];
-
-enum counter_count_mode {
- COUNTER_COUNT_MODE_NORMAL = 0,
- COUNTER_COUNT_MODE_RANGE_LIMIT,
- COUNTER_COUNT_MODE_NON_RECYCLE,
- COUNTER_COUNT_MODE_MODULO_N
-};
-extern const char *const counter_count_mode_str[4];
+#include <uapi/linux/counter.h>
struct counter_device;
+struct counter_count;
+struct counter_synapse;
struct counter_signal;
+enum counter_comp_type {
+ COUNTER_COMP_U8,
+ COUNTER_COMP_U64,
+ COUNTER_COMP_BOOL,
+ COUNTER_COMP_SIGNAL_LEVEL,
+ COUNTER_COMP_FUNCTION,
+ COUNTER_COMP_SYNAPSE_ACTION,
+ COUNTER_COMP_ENUM,
+ COUNTER_COMP_COUNT_DIRECTION,
+ COUNTER_COMP_COUNT_MODE,
+ COUNTER_COMP_SIGNAL_POLARITY,
+ COUNTER_COMP_ARRAY,
+};
+
/**
- * struct counter_signal_ext - Counter Signal extensions
- * @name: attribute name
- * @read: read callback for this attribute; may be NULL
- * @write: write callback for this attribute; may be NULL
- * @priv: data private to the driver
+ * struct counter_comp - Counter component node
+ * @type: Counter component data type
+ * @name: device-specific component name
+ * @priv: component-relevant data
+ * @action_read: Synapse action mode read callback. The read value of the
+ * respective Synapse action mode should be passed back via
+ * the action parameter.
+ * @device_u8_read: Device u8 component read callback. The read value of the
+ * respective Device u8 component should be passed back via
+ * the val parameter.
+ * @count_u8_read: Count u8 component read callback. The read value of the
+ * respective Count u8 component should be passed back via
+ * the val parameter.
+ * @signal_u8_read: Signal u8 component read callback. The read value of the
+ * respective Signal u8 component should be passed back via
+ * the val parameter.
+ * @device_u32_read: Device u32 component read callback. The read value of
+ * the respective Device u32 component should be passed
+ * back via the val parameter.
+ * @count_u32_read: Count u32 component read callback. The read value of the
+ * respective Count u32 component should be passed back via
+ * the val parameter.
+ * @signal_u32_read: Signal u32 component read callback. The read value of
+ * the respective Signal u32 component should be passed
+ * back via the val parameter.
+ * @device_u64_read: Device u64 component read callback. The read value of
+ * the respective Device u64 component should be passed
+ * back via the val parameter.
+ * @count_u64_read: Count u64 component read callback. The read value of the
+ * respective Count u64 component should be passed back via
+ * the val parameter.
+ * @signal_u64_read: Signal u64 component read callback. The read value of
+ * the respective Signal u64 component should be passed
+ * back via the val parameter.
+ * @signal_array_u32_read: Signal u32 array component read callback. The
+ * index of the respective Count u32 array
+ * component element is passed via the idx
+ * parameter. The read value of the respective
+ * Count u32 array component element should be
+ * passed back via the val parameter.
+ * @device_array_u64_read: Device u64 array component read callback. The
+ * index of the respective Device u64 array
+ * component element is passed via the idx
+ * parameter. The read value of the respective
+ * Device u64 array component element should be
+ * passed back via the val parameter.
+ * @count_array_u64_read: Count u64 array component read callback. The
+ * index of the respective Count u64 array
+ * component element is passed via the idx
+ * parameter. The read value of the respective
+ * Count u64 array component element should be
+ * passed back via the val parameter.
+ * @signal_array_u64_read: Signal u64 array component read callback. The
+ * index of the respective Count u64 array
+ * component element is passed via the idx
+ * parameter. The read value of the respective
+ * Count u64 array component element should be
+ * passed back via the val parameter.
+ * @action_write: Synapse action mode write callback. The write value of
+ * the respective Synapse action mode is passed via the
+ * action parameter.
+ * @device_u8_write: Device u8 component write callback. The write value of
+ * the respective Device u8 component is passed via the val
+ * parameter.
+ * @count_u8_write: Count u8 component write callback. The write value of
+ * the respective Count u8 component is passed via the val
+ * parameter.
+ * @signal_u8_write: Signal u8 component write callback. The write value of
+ * the respective Signal u8 component is passed via the val
+ * parameter.
+ * @device_u32_write: Device u32 component write callback. The write value of
+ * the respective Device u32 component is passed via the
+ * val parameter.
+ * @count_u32_write: Count u32 component write callback. The write value of
+ * the respective Count u32 component is passed via the val
+ * parameter.
+ * @signal_u32_write: Signal u32 component write callback. The write value of
+ * the respective Signal u32 component is passed via the
+ * val parameter.
+ * @device_u64_write: Device u64 component write callback. The write value of
+ * the respective Device u64 component is passed via the
+ * val parameter.
+ * @count_u64_write: Count u64 component write callback. The write value of
+ * the respective Count u64 component is passed via the val
+ * parameter.
+ * @signal_u64_write: Signal u64 component write callback. The write value of
+ * the respective Signal u64 component is passed via the
+ * val parameter.
+ * @signal_array_u32_write: Signal u32 array component write callback. The
+ * index of the respective Signal u32 array
+ * component element is passed via the idx
+ * parameter. The write value of the respective
+ * Signal u32 array component element is passed via
+ * the val parameter.
+ * @device_array_u64_write: Device u64 array component write callback. The
+ * index of the respective Device u64 array
+ * component element is passed via the idx
+ * parameter. The write value of the respective
+ * Device u64 array component element is passed via
+ * the val parameter.
+ * @count_array_u64_write: Count u64 array component write callback. The
+ * index of the respective Count u64 array
+ * component element is passed via the idx
+ * parameter. The write value of the respective
+ * Count u64 array component element is passed via
+ * the val parameter.
+ * @signal_array_u64_write: Signal u64 array component write callback. The
+ * index of the respective Signal u64 array
+ * component element is passed via the idx
+ * parameter. The write value of the respective
+ * Signal u64 array component element is passed via
+ * the val parameter.
*/
-struct counter_signal_ext {
+struct counter_comp {
+ enum counter_comp_type type;
const char *name;
- ssize_t (*read)(struct counter_device *counter,
- struct counter_signal *signal, void *priv, char *buf);
- ssize_t (*write)(struct counter_device *counter,
- struct counter_signal *signal, void *priv,
- const char *buf, size_t len);
void *priv;
+ union {
+ int (*action_read)(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action *action);
+ int (*device_u8_read)(struct counter_device *counter, u8 *val);
+ int (*count_u8_read)(struct counter_device *counter,
+ struct counter_count *count, u8 *val);
+ int (*signal_u8_read)(struct counter_device *counter,
+ struct counter_signal *signal, u8 *val);
+ int (*device_u32_read)(struct counter_device *counter,
+ u32 *val);
+ int (*count_u32_read)(struct counter_device *counter,
+ struct counter_count *count, u32 *val);
+ int (*signal_u32_read)(struct counter_device *counter,
+ struct counter_signal *signal, u32 *val);
+ int (*device_u64_read)(struct counter_device *counter,
+ u64 *val);
+ int (*count_u64_read)(struct counter_device *counter,
+ struct counter_count *count, u64 *val);
+ int (*signal_u64_read)(struct counter_device *counter,
+ struct counter_signal *signal, u64 *val);
+ int (*signal_array_u32_read)(struct counter_device *counter,
+ struct counter_signal *signal,
+ size_t idx, u32 *val);
+ int (*device_array_u64_read)(struct counter_device *counter,
+ size_t idx, u64 *val);
+ int (*count_array_u64_read)(struct counter_device *counter,
+ struct counter_count *count,
+ size_t idx, u64 *val);
+ int (*signal_array_u64_read)(struct counter_device *counter,
+ struct counter_signal *signal,
+ size_t idx, u64 *val);
+ };
+ union {
+ int (*action_write)(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action action);
+ int (*device_u8_write)(struct counter_device *counter, u8 val);
+ int (*count_u8_write)(struct counter_device *counter,
+ struct counter_count *count, u8 val);
+ int (*signal_u8_write)(struct counter_device *counter,
+ struct counter_signal *signal, u8 val);
+ int (*device_u32_write)(struct counter_device *counter,
+ u32 val);
+ int (*count_u32_write)(struct counter_device *counter,
+ struct counter_count *count, u32 val);
+ int (*signal_u32_write)(struct counter_device *counter,
+ struct counter_signal *signal, u32 val);
+ int (*device_u64_write)(struct counter_device *counter,
+ u64 val);
+ int (*count_u64_write)(struct counter_device *counter,
+ struct counter_count *count, u64 val);
+ int (*signal_u64_write)(struct counter_device *counter,
+ struct counter_signal *signal, u64 val);
+ int (*signal_array_u32_write)(struct counter_device *counter,
+ struct counter_signal *signal,
+ size_t idx, u32 val);
+ int (*device_array_u64_write)(struct counter_device *counter,
+ size_t idx, u64 val);
+ int (*count_array_u64_write)(struct counter_device *counter,
+ struct counter_count *count,
+ size_t idx, u64 val);
+ int (*signal_array_u64_write)(struct counter_device *counter,
+ struct counter_signal *signal,
+ size_t idx, u64 val);
+ };
};
/**
* struct counter_signal - Counter Signal node
- * @id: unique ID used to identify signal
- * @name: device-specific Signal name; ideally, this should match the name
- * as it appears in the datasheet documentation
- * @ext: optional array of Counter Signal extensions
- * @num_ext: number of Counter Signal extensions specified in @ext
- * @priv: optional private data supplied by driver
+ * @id: unique ID used to identify the Signal
+ * @name: device-specific Signal name
+ * @ext: optional array of Signal extensions
+ * @num_ext: number of Signal extensions specified in @ext
*/
struct counter_signal {
int id;
const char *name;
- const struct counter_signal_ext *ext;
+ struct counter_comp *ext;
size_t num_ext;
-
- void *priv;
-};
-
-/**
- * struct counter_signal_enum_ext - Signal enum extension attribute
- * @items: Array of strings
- * @num_items: Number of items specified in @items
- * @set: Set callback function; may be NULL
- * @get: Get callback function; may be NULL
- *
- * The counter_signal_enum_ext structure can be used to implement enum style
- * Signal extension attributes. Enum style attributes are those which have a set
- * of strings that map to unsigned integer values. The Generic Counter Signal
- * enum extension helper code takes care of mapping between value and string, as
- * well as generating a "_available" file which contains a list of all available
- * items. The get callback is used to query the currently active item; the index
- * of the item within the respective items array is returned via the 'item'
- * parameter. The set callback is called when the attribute is updated; the
- * 'item' parameter contains the index of the newly activated item within the
- * respective items array.
- */
-struct counter_signal_enum_ext {
- const char * const *items;
- size_t num_items;
- int (*get)(struct counter_device *counter,
- struct counter_signal *signal, size_t *item);
- int (*set)(struct counter_device *counter,
- struct counter_signal *signal, size_t item);
-};
-
-/**
- * COUNTER_SIGNAL_ENUM() - Initialize Signal enum extension
- * @_name: Attribute name
- * @_e: Pointer to a counter_signal_enum_ext structure
- *
- * This should usually be used together with COUNTER_SIGNAL_ENUM_AVAILABLE()
- */
-#define COUNTER_SIGNAL_ENUM(_name, _e) \
-{ \
- .name = (_name), \
- .read = counter_signal_enum_read, \
- .write = counter_signal_enum_write, \
- .priv = (_e) \
-}
-
-/**
- * COUNTER_SIGNAL_ENUM_AVAILABLE() - Initialize Signal enum available extension
- * @_name: Attribute name ("_available" will be appended to the name)
- * @_e: Pointer to a counter_signal_enum_ext structure
- *
- * Creates a read only attribute that lists all the available enum items in a
- * newline separated list. This should usually be used together with
- * COUNTER_SIGNAL_ENUM()
- */
-#define COUNTER_SIGNAL_ENUM_AVAILABLE(_name, _e) \
-{ \
- .name = (_name "_available"), \
- .read = counter_signal_enum_available_read, \
- .priv = (_e) \
-}
-
-enum counter_synapse_action {
- COUNTER_SYNAPSE_ACTION_NONE = 0,
- COUNTER_SYNAPSE_ACTION_RISING_EDGE,
- COUNTER_SYNAPSE_ACTION_FALLING_EDGE,
- COUNTER_SYNAPSE_ACTION_BOTH_EDGES
};
/**
* struct counter_synapse - Counter Synapse node
- * @action: index of current action mode
* @actions_list: array of available action modes
* @num_actions: number of action modes specified in @actions_list
- * @signal: pointer to associated signal
+ * @signal: pointer to the associated Signal
*/
struct counter_synapse {
- size_t action;
const enum counter_synapse_action *actions_list;
size_t num_actions;
struct counter_signal *signal;
};
-struct counter_count;
-
-/**
- * struct counter_count_ext - Counter Count extension
- * @name: attribute name
- * @read: read callback for this attribute; may be NULL
- * @write: write callback for this attribute; may be NULL
- * @priv: data private to the driver
- */
-struct counter_count_ext {
- const char *name;
- ssize_t (*read)(struct counter_device *counter,
- struct counter_count *count, void *priv, char *buf);
- ssize_t (*write)(struct counter_device *counter,
- struct counter_count *count, void *priv,
- const char *buf, size_t len);
- void *priv;
-};
-
-enum counter_count_function {
- COUNTER_COUNT_FUNCTION_INCREASE = 0,
- COUNTER_COUNT_FUNCTION_DECREASE,
- COUNTER_COUNT_FUNCTION_PULSE_DIRECTION,
- COUNTER_COUNT_FUNCTION_QUADRATURE_X1_A,
- COUNTER_COUNT_FUNCTION_QUADRATURE_X1_B,
- COUNTER_COUNT_FUNCTION_QUADRATURE_X2_A,
- COUNTER_COUNT_FUNCTION_QUADRATURE_X2_B,
- COUNTER_COUNT_FUNCTION_QUADRATURE_X4
-};
-
/**
* struct counter_count - Counter Count node
- * @id: unique ID used to identify Count
- * @name: device-specific Count name; ideally, this should match
- * the name as it appears in the datasheet documentation
- * @function: index of current function mode
- * @functions_list: array available function modes
+ * @id: unique ID used to identify the Count
+ * @name: device-specific Count name
+ * @functions_list: array of available function modes
* @num_functions: number of function modes specified in @functions_list
- * @synapses: array of synapses for initialization
- * @num_synapses: number of synapses specified in @synapses
- * @ext: optional array of Counter Count extensions
- * @num_ext: number of Counter Count extensions specified in @ext
- * @priv: optional private data supplied by driver
+ * @synapses: array of Synapses for initialization
+ * @num_synapses: number of Synapses specified in @synapses
+ * @ext: optional array of Count extensions
+ * @num_ext: number of Count extensions specified in @ext
*/
struct counter_count {
int id;
const char *name;
- size_t function;
- const enum counter_count_function *functions_list;
+ const enum counter_function *functions_list;
size_t num_functions;
struct counter_synapse *synapses;
size_t num_synapses;
- const struct counter_count_ext *ext;
+ struct counter_comp *ext;
size_t num_ext;
-
- void *priv;
-};
-
-/**
- * struct counter_count_enum_ext - Count enum extension attribute
- * @items: Array of strings
- * @num_items: Number of items specified in @items
- * @set: Set callback function; may be NULL
- * @get: Get callback function; may be NULL
- *
- * The counter_count_enum_ext structure can be used to implement enum style
- * Count extension attributes. Enum style attributes are those which have a set
- * of strings that map to unsigned integer values. The Generic Counter Count
- * enum extension helper code takes care of mapping between value and string, as
- * well as generating a "_available" file which contains a list of all available
- * items. The get callback is used to query the currently active item; the index
- * of the item within the respective items array is returned via the 'item'
- * parameter. The set callback is called when the attribute is updated; the
- * 'item' parameter contains the index of the newly activated item within the
- * respective items array.
- */
-struct counter_count_enum_ext {
- const char * const *items;
- size_t num_items;
- int (*get)(struct counter_device *counter, struct counter_count *count,
- size_t *item);
- int (*set)(struct counter_device *counter, struct counter_count *count,
- size_t item);
};
/**
- * COUNTER_COUNT_ENUM() - Initialize Count enum extension
- * @_name: Attribute name
- * @_e: Pointer to a counter_count_enum_ext structure
- *
- * This should usually be used together with COUNTER_COUNT_ENUM_AVAILABLE()
- */
-#define COUNTER_COUNT_ENUM(_name, _e) \
-{ \
- .name = (_name), \
- .read = counter_count_enum_read, \
- .write = counter_count_enum_write, \
- .priv = (_e) \
-}
-
-/**
- * COUNTER_COUNT_ENUM_AVAILABLE() - Initialize Count enum available extension
- * @_name: Attribute name ("_available" will be appended to the name)
- * @_e: Pointer to a counter_count_enum_ext structure
- *
- * Creates a read only attribute that lists all the available enum items in a
- * newline separated list. This should usually be used together with
- * COUNTER_COUNT_ENUM()
- */
-#define COUNTER_COUNT_ENUM_AVAILABLE(_name, _e) \
-{ \
- .name = (_name "_available"), \
- .read = counter_count_enum_available_read, \
- .priv = (_e) \
-}
-
-/**
- * struct counter_device_attr_group - internal container for attribute group
- * @attr_group: Counter sysfs attributes group
- * @attr_list: list to keep track of created Counter sysfs attributes
- * @num_attr: number of Counter sysfs attributes
- */
-struct counter_device_attr_group {
- struct attribute_group attr_group;
- struct list_head attr_list;
- size_t num_attr;
-};
-
-/**
- * struct counter_device_state - internal state container for a Counter device
- * @id: unique ID used to identify the Counter
- * @dev: internal device structure
- * @groups_list: attribute groups list (for Signals, Counts, and ext)
- * @num_groups: number of attribute groups containers
- * @groups: Counter sysfs attribute groups (to populate @dev.groups)
+ * struct counter_event_node - Counter Event node
+ * @l: list of current watching Counter events
+ * @event: event that triggers
+ * @channel: event channel
+ * @comp_list: list of components to watch when event triggers
*/
-struct counter_device_state {
- int id;
- struct device dev;
- struct counter_device_attr_group *groups_list;
- size_t num_groups;
- const struct attribute_group **groups;
-};
-
-enum counter_signal_value {
- COUNTER_SIGNAL_LOW = 0,
- COUNTER_SIGNAL_HIGH
+struct counter_event_node {
+ struct list_head l;
+ u8 event;
+ u8 channel;
+ struct list_head comp_list;
};
/**
* struct counter_ops - Callbacks from driver
- * @signal_read: optional read callback for Signal attribute. The read
- * value of the respective Signal should be passed back via
- * the val parameter.
- * @count_read: optional read callback for Count attribute. The read
- * value of the respective Count should be passed back via
- * the val parameter.
- * @count_write: optional write callback for Count attribute. The write
- * value for the respective Count is passed in via the val
+ * @signal_read: optional read callback for Signals. The read level of
+ * the respective Signal should be passed back via the
+ * level parameter.
+ * @count_read: read callback for Counts. The read value of the
+ * respective Count should be passed back via the value
* parameter.
- * @function_get: function to get the current count function mode. Returns
- * 0 on success and negative error code on error. The index
- * of the respective Count's returned function mode should
- * be passed back via the function parameter.
- * @function_set: function to set the count function mode. function is the
- * index of the requested function mode from the respective
- * Count's functions_list array.
- * @action_get: function to get the current action mode. Returns 0 on
- * success and negative error code on error. The index of
- * the respective Synapse's returned action mode should be
+ * @count_write: optional write callback for Counts. The write value for
+ * the respective Count is passed in via the value
+ * parameter.
+ * @function_read: read callback the Count function modes. The read
+ * function mode of the respective Count should be passed
+ * back via the function parameter.
+ * @function_write: optional write callback for Count function modes. The
+ * function mode to write for the respective Count is
+ * passed in via the function parameter.
+ * @action_read: optional read callback the Synapse action modes. The
+ * read action mode of the respective Synapse should be
* passed back via the action parameter.
- * @action_set: function to set the action mode. action is the index of
- * the requested action mode from the respective Synapse's
- * actions_list array.
+ * @action_write: optional write callback for Synapse action modes. The
+ * action mode to write for the respective Synapse is
+ * passed in via the action parameter.
+ * @events_configure: optional write callback to configure events. The list of
+ * struct counter_event_node may be accessed via the
+ * events_list member of the counter parameter.
+ * @watch_validate: optional callback to validate a watch. The Counter
+ * component watch configuration is passed in via the watch
+ * parameter. A return value of 0 indicates a valid Counter
+ * component watch configuration.
*/
struct counter_ops {
int (*signal_read)(struct counter_device *counter,
struct counter_signal *signal,
- enum counter_signal_value *val);
+ enum counter_signal_level *level);
int (*count_read)(struct counter_device *counter,
- struct counter_count *count, unsigned long *val);
+ struct counter_count *count, u64 *value);
int (*count_write)(struct counter_device *counter,
- struct counter_count *count, unsigned long val);
- int (*function_get)(struct counter_device *counter,
- struct counter_count *count, size_t *function);
- int (*function_set)(struct counter_device *counter,
- struct counter_count *count, size_t function);
- int (*action_get)(struct counter_device *counter,
- struct counter_count *count,
- struct counter_synapse *synapse, size_t *action);
- int (*action_set)(struct counter_device *counter,
- struct counter_count *count,
- struct counter_synapse *synapse, size_t action);
+ struct counter_count *count, u64 value);
+ int (*function_read)(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function *function);
+ int (*function_write)(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_function function);
+ int (*action_read)(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action *action);
+ int (*action_write)(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse,
+ enum counter_synapse_action action);
+ int (*events_configure)(struct counter_device *counter);
+ int (*watch_validate)(struct counter_device *counter,
+ const struct counter_watch *watch);
};
/**
- * struct counter_device_ext - Counter device extension
- * @name: attribute name
- * @read: read callback for this attribute; may be NULL
- * @write: write callback for this attribute; may be NULL
- * @priv: data private to the driver
- */
-struct counter_device_ext {
- const char *name;
- ssize_t (*read)(struct counter_device *counter, void *priv, char *buf);
- ssize_t (*write)(struct counter_device *counter, void *priv,
- const char *buf, size_t len);
- void *priv;
-};
-
-/**
- * struct counter_device_enum_ext - Counter enum extension attribute
- * @items: Array of strings
- * @num_items: Number of items specified in @items
- * @set: Set callback function; may be NULL
- * @get: Get callback function; may be NULL
- *
- * The counter_device_enum_ext structure can be used to implement enum style
- * Counter extension attributes. Enum style attributes are those which have a
- * set of strings that map to unsigned integer values. The Generic Counter enum
- * extension helper code takes care of mapping between value and string, as well
- * as generating a "_available" file which contains a list of all available
- * items. The get callback is used to query the currently active item; the index
- * of the item within the respective items array is returned via the 'item'
- * parameter. The set callback is called when the attribute is updated; the
- * 'item' parameter contains the index of the newly activated item within the
- * respective items array.
- */
-struct counter_device_enum_ext {
- const char * const *items;
- size_t num_items;
- int (*get)(struct counter_device *counter, size_t *item);
- int (*set)(struct counter_device *counter, size_t item);
-};
-
-/**
- * COUNTER_DEVICE_ENUM() - Initialize Counter enum extension
- * @_name: Attribute name
- * @_e: Pointer to a counter_device_enum_ext structure
- *
- * This should usually be used together with COUNTER_DEVICE_ENUM_AVAILABLE()
- */
-#define COUNTER_DEVICE_ENUM(_name, _e) \
-{ \
- .name = (_name), \
- .read = counter_device_enum_read, \
- .write = counter_device_enum_write, \
- .priv = (_e) \
-}
-
-/**
- * COUNTER_DEVICE_ENUM_AVAILABLE() - Initialize Counter enum available extension
- * @_name: Attribute name ("_available" will be appended to the name)
- * @_e: Pointer to a counter_device_enum_ext structure
- *
- * Creates a read only attribute that lists all the available enum items in a
- * newline separated list. This should usually be used together with
- * COUNTER_DEVICE_ENUM()
- */
-#define COUNTER_DEVICE_ENUM_AVAILABLE(_name, _e) \
-{ \
- .name = (_name "_available"), \
- .read = counter_device_enum_available_read, \
- .priv = (_e) \
-}
-
-/**
* struct counter_device - Counter data structure
- * @name: name of the device as it appears in the datasheet
+ * @name: name of the device
* @parent: optional parent device providing the counters
- * @device_state: internal device state container
* @ops: callbacks from driver
* @signals: array of Signals
* @num_signals: number of Signals specified in @signals
@@ -424,12 +360,21 @@ struct counter_device_enum_ext {
* @num_counts: number of Counts specified in @counts
* @ext: optional array of Counter device extensions
* @num_ext: number of Counter device extensions specified in @ext
- * @priv: optional private data supplied by driver
+ * @dev: internal device structure
+ * @chrdev: internal character device structure
+ * @events_list: list of current watching Counter events
+ * @events_list_lock: lock to protect Counter events list operations
+ * @next_events_list: list of next watching Counter events
+ * @n_events_list_lock: lock to protect Counter next events list operations
+ * @events: queue of detected Counter events
+ * @events_wait: wait queue to allow blocking reads of Counter events
+ * @events_in_lock: lock to protect Counter events queue in operations
+ * @events_out_lock: lock to protect Counter events queue out operations
+ * @ops_exist_lock: lock to prevent use during removal
*/
struct counter_device {
const char *name;
struct device *parent;
- struct counter_device_state *device_state;
const struct counter_ops *ops;
@@ -438,17 +383,256 @@ struct counter_device {
struct counter_count *counts;
size_t num_counts;
- const struct counter_device_ext *ext;
+ struct counter_comp *ext;
size_t num_ext;
- void *priv;
+ struct device dev;
+ struct cdev chrdev;
+ struct list_head events_list;
+ spinlock_t events_list_lock;
+ struct list_head next_events_list;
+ struct mutex n_events_list_lock;
+ DECLARE_KFIFO_PTR(events, struct counter_event);
+ wait_queue_head_t events_wait;
+ spinlock_t events_in_lock;
+ struct mutex events_out_lock;
+ struct mutex ops_exist_lock;
};
-int counter_register(struct counter_device *const counter);
+void *counter_priv(const struct counter_device *const counter) __attribute_const__;
+
+struct counter_device *counter_alloc(size_t sizeof_priv);
+void counter_put(struct counter_device *const counter);
+int counter_add(struct counter_device *const counter);
+
void counter_unregister(struct counter_device *const counter);
-int devm_counter_register(struct device *dev,
- struct counter_device *const counter);
-void devm_counter_unregister(struct device *dev,
- struct counter_device *const counter);
+struct counter_device *devm_counter_alloc(struct device *dev,
+ size_t sizeof_priv);
+int devm_counter_add(struct device *dev,
+ struct counter_device *const counter);
+void counter_push_event(struct counter_device *const counter, const u8 event,
+ const u8 channel);
+
+#define COUNTER_COMP_DEVICE_U8(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U8, \
+ .name = (_name), \
+ .device_u8_read = (_read), \
+ .device_u8_write = (_write), \
+}
+#define COUNTER_COMP_COUNT_U8(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U8, \
+ .name = (_name), \
+ .count_u8_read = (_read), \
+ .count_u8_write = (_write), \
+}
+#define COUNTER_COMP_SIGNAL_U8(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U8, \
+ .name = (_name), \
+ .signal_u8_read = (_read), \
+ .signal_u8_write = (_write), \
+}
+
+#define COUNTER_COMP_DEVICE_U64(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U64, \
+ .name = (_name), \
+ .device_u64_read = (_read), \
+ .device_u64_write = (_write), \
+}
+#define COUNTER_COMP_COUNT_U64(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U64, \
+ .name = (_name), \
+ .count_u64_read = (_read), \
+ .count_u64_write = (_write), \
+}
+#define COUNTER_COMP_SIGNAL_U64(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_U64, \
+ .name = (_name), \
+ .signal_u64_read = (_read), \
+ .signal_u64_write = (_write), \
+}
+
+#define COUNTER_COMP_DEVICE_BOOL(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_BOOL, \
+ .name = (_name), \
+ .device_u8_read = (_read), \
+ .device_u8_write = (_write), \
+}
+#define COUNTER_COMP_COUNT_BOOL(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_BOOL, \
+ .name = (_name), \
+ .count_u8_read = (_read), \
+ .count_u8_write = (_write), \
+}
+#define COUNTER_COMP_SIGNAL_BOOL(_name, _read, _write) \
+{ \
+ .type = COUNTER_COMP_BOOL, \
+ .name = (_name), \
+ .signal_u8_read = (_read), \
+ .signal_u8_write = (_write), \
+}
+
+struct counter_available {
+ union {
+ const u32 *enums;
+ const char *const *strs;
+ };
+ size_t num_items;
+};
+
+#define DEFINE_COUNTER_AVAILABLE(_name, _enums) \
+ struct counter_available _name = { \
+ .enums = (_enums), \
+ .num_items = ARRAY_SIZE(_enums), \
+ }
+
+#define DEFINE_COUNTER_ENUM(_name, _strs) \
+ struct counter_available _name = { \
+ .strs = (_strs), \
+ .num_items = ARRAY_SIZE(_strs), \
+ }
+
+#define COUNTER_COMP_DEVICE_ENUM(_name, _get, _set, _available) \
+{ \
+ .type = COUNTER_COMP_ENUM, \
+ .name = (_name), \
+ .device_u32_read = (_get), \
+ .device_u32_write = (_set), \
+ .priv = &(_available), \
+}
+#define COUNTER_COMP_COUNT_ENUM(_name, _get, _set, _available) \
+{ \
+ .type = COUNTER_COMP_ENUM, \
+ .name = (_name), \
+ .count_u32_read = (_get), \
+ .count_u32_write = (_set), \
+ .priv = &(_available), \
+}
+#define COUNTER_COMP_SIGNAL_ENUM(_name, _get, _set, _available) \
+{ \
+ .type = COUNTER_COMP_ENUM, \
+ .name = (_name), \
+ .signal_u32_read = (_get), \
+ .signal_u32_write = (_set), \
+ .priv = &(_available), \
+}
+
+struct counter_array {
+ enum counter_comp_type type;
+ const struct counter_available *avail;
+ union {
+ size_t length;
+ size_t idx;
+ };
+};
+
+#define DEFINE_COUNTER_ARRAY_U64(_name, _length) \
+ struct counter_array _name = { \
+ .type = COUNTER_COMP_U64, \
+ .length = (_length), \
+ }
+
+#define DEFINE_COUNTER_ARRAY_CAPTURE(_name, _length) \
+ DEFINE_COUNTER_ARRAY_U64(_name, _length)
+
+#define DEFINE_COUNTER_ARRAY_POLARITY(_name, _available, _length) \
+ struct counter_array _name = { \
+ .type = COUNTER_COMP_SIGNAL_POLARITY, \
+ .avail = &(_available), \
+ .length = (_length), \
+ }
+
+#define COUNTER_COMP_DEVICE_ARRAY_U64(_name, _read, _write, _array) \
+{ \
+ .type = COUNTER_COMP_ARRAY, \
+ .name = (_name), \
+ .device_array_u64_read = (_read), \
+ .device_array_u64_write = (_write), \
+ .priv = &(_array), \
+}
+#define COUNTER_COMP_COUNT_ARRAY_U64(_name, _read, _write, _array) \
+{ \
+ .type = COUNTER_COMP_ARRAY, \
+ .name = (_name), \
+ .count_array_u64_read = (_read), \
+ .count_array_u64_write = (_write), \
+ .priv = &(_array), \
+}
+#define COUNTER_COMP_SIGNAL_ARRAY_U64(_name, _read, _write, _array) \
+{ \
+ .type = COUNTER_COMP_ARRAY, \
+ .name = (_name), \
+ .signal_array_u64_read = (_read), \
+ .signal_array_u64_write = (_write), \
+ .priv = &(_array), \
+}
+
+#define COUNTER_COMP_CAPTURE(_read, _write) \
+ COUNTER_COMP_COUNT_U64("capture", _read, _write)
+
+#define COUNTER_COMP_CEILING(_read, _write) \
+ COUNTER_COMP_COUNT_U64("ceiling", _read, _write)
+
+#define COUNTER_COMP_COMPARE(_read, _write) \
+ COUNTER_COMP_COUNT_U64("compare", _read, _write)
+
+#define COUNTER_COMP_COUNT_MODE(_read, _write, _available) \
+{ \
+ .type = COUNTER_COMP_COUNT_MODE, \
+ .name = "count_mode", \
+ .count_u32_read = (_read), \
+ .count_u32_write = (_write), \
+ .priv = &(_available), \
+}
+
+#define COUNTER_COMP_DIRECTION(_read) \
+{ \
+ .type = COUNTER_COMP_COUNT_DIRECTION, \
+ .name = "direction", \
+ .count_u32_read = (_read), \
+}
+
+#define COUNTER_COMP_ENABLE(_read, _write) \
+ COUNTER_COMP_COUNT_BOOL("enable", _read, _write)
+
+#define COUNTER_COMP_FLOOR(_read, _write) \
+ COUNTER_COMP_COUNT_U64("floor", _read, _write)
+
+#define COUNTER_COMP_FREQUENCY(_read) \
+ COUNTER_COMP_SIGNAL_U64("frequency", _read, NULL)
+
+#define COUNTER_COMP_POLARITY(_read, _write, _available) \
+{ \
+ .type = COUNTER_COMP_SIGNAL_POLARITY, \
+ .name = "polarity", \
+ .signal_u32_read = (_read), \
+ .signal_u32_write = (_write), \
+ .priv = &(_available), \
+}
+
+#define COUNTER_COMP_PRESET(_read, _write) \
+ COUNTER_COMP_COUNT_U64("preset", _read, _write)
+
+#define COUNTER_COMP_PRESET_ENABLE(_read, _write) \
+ COUNTER_COMP_COUNT_BOOL("preset_enable", _read, _write)
+
+#define COUNTER_COMP_ARRAY_CAPTURE(_read, _write, _array) \
+ COUNTER_COMP_COUNT_ARRAY_U64("capture", _read, _write, _array)
+
+#define COUNTER_COMP_ARRAY_POLARITY(_read, _write, _array) \
+{ \
+ .type = COUNTER_COMP_ARRAY, \
+ .name = "polarity", \
+ .signal_array_u32_read = (_read), \
+ .signal_array_u32_write = (_write), \
+ .priv = &(_array), \
+}
#endif /* _COUNTER_H_ */
diff --git a/include/linux/counter_enum.h b/include/linux/counter_enum.h
deleted file mode 100644
index 9f917298a88f..000000000000
--- a/include/linux/counter_enum.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Counter interface enum functions
- * Copyright (C) 2018 William Breathitt Gray
- */
-#ifndef _COUNTER_ENUM_H_
-#define _COUNTER_ENUM_H_
-
-#include <linux/types.h>
-
-struct counter_device;
-struct counter_signal;
-struct counter_count;
-
-ssize_t counter_signal_enum_read(struct counter_device *counter,
- struct counter_signal *signal, void *priv,
- char *buf);
-ssize_t counter_signal_enum_write(struct counter_device *counter,
- struct counter_signal *signal, void *priv,
- const char *buf, size_t len);
-
-ssize_t counter_signal_enum_available_read(struct counter_device *counter,
- struct counter_signal *signal,
- void *priv, char *buf);
-
-ssize_t counter_count_enum_read(struct counter_device *counter,
- struct counter_count *count, void *priv,
- char *buf);
-ssize_t counter_count_enum_write(struct counter_device *counter,
- struct counter_count *count, void *priv,
- const char *buf, size_t len);
-
-ssize_t counter_count_enum_available_read(struct counter_device *counter,
- struct counter_count *count,
- void *priv, char *buf);
-
-ssize_t counter_device_enum_read(struct counter_device *counter, void *priv,
- char *buf);
-ssize_t counter_device_enum_write(struct counter_device *counter, void *priv,
- const char *buf, size_t len);
-
-ssize_t counter_device_enum_available_read(struct counter_device *counter,
- void *priv, char *buf);
-
-#endif /* _COUNTER_ENUM_H_ */
diff --git a/include/linux/cper.h b/include/linux/cper.h
index 6a511a1078ca..5b1236d8c65b 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -89,6 +89,33 @@ enum {
#define CPER_NOTIFY_DMAR \
GUID_INIT(0x667DD791, 0xC6B3, 0x4c27, 0x8A, 0x6B, 0x0F, 0x8E, \
0x72, 0x2D, 0xEB, 0x41)
+/* CXL Protocol Error Section */
+#define CPER_SEC_CXL_PROT_ERR \
+ GUID_INIT(0x80B9EFB4, 0x52B5, 0x4DE3, 0xA7, 0x77, 0x68, 0x78, \
+ 0x4B, 0x77, 0x10, 0x48)
+
+/* CXL Event record UUIDs are formatted as GUIDs and reported in section type */
+/*
+ * General Media Event Record
+ * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
+ */
+#define CPER_SEC_CXL_GEN_MEDIA_GUID \
+ GUID_INIT(0xfbcd0a77, 0xc260, 0x417f, \
+ 0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6)
+/*
+ * DRAM Event Record
+ * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
+ */
+#define CPER_SEC_CXL_DRAM_GUID \
+ GUID_INIT(0x601dcbb3, 0x9c06, 0x4eab, \
+ 0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24)
+/*
+ * Memory Module Event Record
+ * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
+ */
+#define CPER_SEC_CXL_MEM_MODULE_GUID \
+ GUID_INIT(0xfe927475, 0xdd59, 0x4339, \
+ 0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74)
/*
* Flags bits definitions for flags in struct cper_record_header
@@ -270,11 +297,11 @@ enum {
#define CPER_ARM_INFO_FLAGS_PROPAGATED BIT(2)
#define CPER_ARM_INFO_FLAGS_OVERFLOW BIT(3)
-#define CPER_ARM_CACHE_ERROR 0
-#define CPER_ARM_TLB_ERROR 1
-#define CPER_ARM_BUS_ERROR 2
-#define CPER_ARM_VENDOR_ERROR 3
-#define CPER_ARM_MAX_TYPE CPER_ARM_VENDOR_ERROR
+#define CPER_ARM_ERR_TYPE_MASK GENMASK(4,1)
+#define CPER_ARM_CACHE_ERROR BIT(1)
+#define CPER_ARM_TLB_ERROR BIT(2)
+#define CPER_ARM_BUS_ERROR BIT(3)
+#define CPER_ARM_VENDOR_ERROR BIT(4)
#define CPER_ARM_ERR_VALID_TRANSACTION_TYPE BIT(0)
#define CPER_ARM_ERR_VALID_OPERATION_TYPE BIT(1)
@@ -558,8 +585,11 @@ extern const char *const cper_proc_error_type_strs[4];
u64 cper_next_record_id(void);
const char *cper_severity_str(unsigned int);
const char *cper_mem_err_type_str(unsigned int);
+const char *cper_mem_err_status_str(u64 status);
void cper_print_bits(const char *prefix, unsigned int bits,
const char * const strs[], unsigned int strs_size);
+int cper_bits_to_str(char *buf, int buf_size, unsigned long bits,
+ const char * const strs[], unsigned int strs_size);
void cper_mem_err_pack(const struct cper_sec_mem_err *,
struct cper_mem_err_compact *);
const char *cper_mem_err_unpack(struct trace_seq *,
@@ -568,5 +598,17 @@ void cper_print_proc_arm(const char *pfx,
const struct cper_sec_proc_arm *proc);
void cper_print_proc_ia(const char *pfx,
const struct cper_sec_proc_ia *proc);
+int cper_mem_err_location(struct cper_mem_err_compact *mem, char *msg);
+int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg);
+
+struct acpi_hest_generic_status;
+void cper_estatus_print(const char *pfx,
+ const struct acpi_hest_generic_status *estatus);
+int cper_estatus_check_header(const struct acpi_hest_generic_status *estatus);
+int cper_estatus_check(const struct acpi_hest_generic_status *estatus);
+
+struct cxl_cper_sec_prot_err;
+void cxl_cper_print_prot_err(const char *pfx,
+ const struct cxl_cper_sec_prot_err *prot_err);
#endif
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 94a578a96202..487b3bf2e1ea 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -16,8 +16,9 @@
#include <linux/node.h>
#include <linux/compiler.h>
-#include <linux/cpumask.h>
#include <linux/cpuhotplug.h>
+#include <linux/cpuhplock.h>
+#include <linux/cpu_smt.h>
struct device;
struct device_node;
@@ -65,17 +66,42 @@ extern ssize_t cpu_show_tsx_async_abort(struct device *dev,
extern ssize_t cpu_show_itlb_multihit(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_mmio_stale_data(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+extern ssize_t cpu_show_retbleed(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_spec_rstack_overflow(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_gds(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_ghostwrite(struct device *dev, struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_old_microcode(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_indirect_target_selection(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_vmscape(struct device *dev, struct device_attribute *attr, char *buf);
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
const struct attribute_group **groups,
const char *fmt, ...);
+extern bool arch_cpu_is_hotpluggable(int cpu);
+extern int arch_register_cpu(int cpu);
+extern void arch_unregister_cpu(int cpu);
#ifdef CONFIG_HOTPLUG_CPU
extern void unregister_cpu(struct cpu *cpu);
extern ssize_t arch_cpu_probe(const char *, size_t);
extern ssize_t arch_cpu_release(const char *, size_t);
#endif
+#ifdef CONFIG_GENERIC_CPU_DEVICES
+DECLARE_PER_CPU(struct cpu, cpu_devices);
+#endif
+
/*
* These states are not related to the core CPU hotplug mechanism. They are
* used by various (sub)architectures to track internal state
@@ -95,7 +121,8 @@ void notify_cpu_starting(unsigned int cpu);
extern void cpu_maps_update_begin(void);
extern void cpu_maps_update_done(void);
int bringup_hibernate_cpu(unsigned int sleep_cpu);
-void bringup_nonboot_cpus(unsigned int setup_max_cpus);
+void bringup_nonboot_cpus(unsigned int max_cpus);
+int arch_cpu_rescan_dead_smt_siblings(void);
#else /* CONFIG_SMP */
#define cpuhp_tasks_frozen 0
@@ -110,44 +137,10 @@ static inline void cpu_maps_update_done(void)
static inline int add_cpu(unsigned int cpu) { return 0;}
-#endif /* CONFIG_SMP */
-extern struct bus_type cpu_subsys;
+static inline int arch_cpu_rescan_dead_smt_siblings(void) { return 0; }
-extern int lockdep_is_cpus_held(void);
-
-#ifdef CONFIG_HOTPLUG_CPU
-extern void cpus_write_lock(void);
-extern void cpus_write_unlock(void);
-extern void cpus_read_lock(void);
-extern void cpus_read_unlock(void);
-extern int cpus_read_trylock(void);
-extern void lockdep_assert_cpus_held(void);
-extern void cpu_hotplug_disable(void);
-extern void cpu_hotplug_enable(void);
-void clear_tasks_mm_cpumask(int cpu);
-int remove_cpu(unsigned int cpu);
-int cpu_device_down(struct device *dev);
-extern void smp_shutdown_nonboot_cpus(unsigned int primary_cpu);
-
-#else /* CONFIG_HOTPLUG_CPU */
-
-static inline void cpus_write_lock(void) { }
-static inline void cpus_write_unlock(void) { }
-static inline void cpus_read_lock(void) { }
-static inline void cpus_read_unlock(void) { }
-static inline int cpus_read_trylock(void) { return true; }
-static inline void lockdep_assert_cpus_held(void) { }
-static inline void cpu_hotplug_disable(void) { }
-static inline void cpu_hotplug_enable(void) { }
-static inline int remove_cpu(unsigned int cpu) { return -EPERM; }
-static inline void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) { }
-#endif /* !CONFIG_HOTPLUG_CPU */
-
-/* Wrappers which go away once all code is converted */
-static inline void cpu_hotplug_begin(void) { cpus_write_lock(); }
-static inline void cpu_hotplug_done(void) { cpus_write_unlock(); }
-static inline void get_online_cpus(void) { cpus_read_lock(); }
-static inline void put_online_cpus(void) { cpus_read_unlock(); }
+#endif /* CONFIG_SMP */
+extern const struct bus_type cpu_subsys;
#ifdef CONFIG_PM_SLEEP_SMP
extern int freeze_secondary_cpus(int primary);
@@ -164,7 +157,7 @@ static inline int suspend_disable_secondary_cpus(void)
}
static inline void suspend_enable_secondary_cpus(void)
{
- return thaw_secondary_cpus();
+ thaw_secondary_cpus();
}
#else /* !CONFIG_PM_SLEEP_SMP */
@@ -173,64 +166,67 @@ static inline int suspend_disable_secondary_cpus(void) { return 0; }
static inline void suspend_enable_secondary_cpus(void) { }
#endif /* !CONFIG_PM_SLEEP_SMP */
-void cpu_startup_entry(enum cpuhp_state state);
+void __noreturn cpu_startup_entry(enum cpuhp_state state);
void cpu_idle_poll_ctrl(bool enable);
-/* Attach to any functions which should be considered cpuidle. */
-#define __cpuidle __section(".cpuidle.text")
-
bool cpu_in_idle(unsigned long pc);
void arch_cpu_idle(void);
void arch_cpu_idle_prepare(void);
void arch_cpu_idle_enter(void);
void arch_cpu_idle_exit(void);
-void arch_cpu_idle_dead(void);
+void arch_tick_broadcast_enter(void);
+void arch_tick_broadcast_exit(void);
+void __noreturn arch_cpu_idle_dead(void);
-int cpu_report_state(int cpu);
-int cpu_check_up_prepare(int cpu);
-void cpu_set_state_online(int cpu);
-void play_idle_precise(u64 duration_ns, u64 latency_ns);
+#ifdef CONFIG_ARCH_HAS_CPU_FINALIZE_INIT
+void arch_cpu_finalize_init(void);
+#else
+static inline void arch_cpu_finalize_init(void) { }
+#endif
-static inline void play_idle(unsigned long duration_us)
-{
- play_idle_precise(duration_us * NSEC_PER_USEC, U64_MAX);
-}
+void play_idle_precise(u64 duration_ns, u64 latency_ns);
#ifdef CONFIG_HOTPLUG_CPU
-bool cpu_wait_death(unsigned int cpu, int seconds);
-bool cpu_report_death(void);
void cpuhp_report_idle_dead(void);
#else
static inline void cpuhp_report_idle_dead(void) { }
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
-enum cpuhp_smt_control {
- CPU_SMT_ENABLED,
- CPU_SMT_DISABLED,
- CPU_SMT_FORCE_DISABLED,
- CPU_SMT_NOT_SUPPORTED,
- CPU_SMT_NOT_IMPLEMENTED,
+enum cpu_attack_vectors {
+ CPU_MITIGATE_USER_KERNEL,
+ CPU_MITIGATE_USER_USER,
+ CPU_MITIGATE_GUEST_HOST,
+ CPU_MITIGATE_GUEST_GUEST,
+ NR_CPU_ATTACK_VECTORS,
};
-#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
-extern enum cpuhp_smt_control cpu_smt_control;
-extern void cpu_smt_disable(bool force);
-extern void cpu_smt_check_topology(void);
-extern bool cpu_smt_possible(void);
-extern int cpuhp_smt_enable(void);
-extern int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval);
-#else
-# define cpu_smt_control (CPU_SMT_NOT_IMPLEMENTED)
-static inline void cpu_smt_disable(bool force) { }
-static inline void cpu_smt_check_topology(void) { }
-static inline bool cpu_smt_possible(void) { return false; }
-static inline int cpuhp_smt_enable(void) { return 0; }
-static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; }
-#endif
+enum smt_mitigations {
+ SMT_MITIGATIONS_OFF,
+ SMT_MITIGATIONS_AUTO,
+ SMT_MITIGATIONS_ON,
+};
+#ifdef CONFIG_CPU_MITIGATIONS
extern bool cpu_mitigations_off(void);
extern bool cpu_mitigations_auto_nosmt(void);
+extern bool cpu_attack_vector_mitigated(enum cpu_attack_vectors v);
+extern enum smt_mitigations smt_mitigations;
+#else
+static inline bool cpu_mitigations_off(void)
+{
+ return true;
+}
+static inline bool cpu_mitigations_auto_nosmt(void)
+{
+ return false;
+}
+static inline bool cpu_attack_vector_mitigated(enum cpu_attack_vectors v)
+{
+ return false;
+}
+#define smt_mitigations SMT_MITIGATIONS_OFF
+#endif
#endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index a3bdc8a98f2c..2c774fb3c091 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -15,7 +15,6 @@
#include <linux/of.h>
#include <linux/thermal.h>
-#include <linux/cpumask.h>
struct cpufreq_policy;
diff --git a/include/linux/cpu_rmap.h b/include/linux/cpu_rmap.h
index be8aea04d023..2fd7ba75362a 100644
--- a/include/linux/cpu_rmap.h
+++ b/include/linux/cpu_rmap.h
@@ -7,7 +7,7 @@
* Copyright 2011 Solarflare Communications Inc.
*/
-#include <linux/cpumask.h>
+#include <linux/cpumask_types.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/kref.h>
@@ -16,14 +16,13 @@
* struct cpu_rmap - CPU affinity reverse-map
* @refcount: kref for object
* @size: Number of objects to be reverse-mapped
- * @used: Number of objects added
* @obj: Pointer to array of object pointers
* @near: For each CPU, the index and distance to the nearest object,
* based on affinity masks
*/
struct cpu_rmap {
struct kref refcount;
- u16 size, used;
+ u16 size;
void **obj;
struct {
u16 index;
@@ -33,6 +32,7 @@ struct cpu_rmap {
#define CPU_RMAP_DIST_INF 0xffff
extern struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags);
+extern void cpu_rmap_get(struct cpu_rmap *rmap);
extern int cpu_rmap_put(struct cpu_rmap *rmap);
extern int cpu_rmap_add(struct cpu_rmap *rmap, void *obj);
@@ -61,6 +61,7 @@ static inline struct cpu_rmap *alloc_irq_cpu_rmap(unsigned int size)
}
extern void free_irq_cpu_rmap(struct cpu_rmap *rmap);
+int irq_cpu_rmap_remove(struct cpu_rmap *rmap, int irq);
extern int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq);
#endif /* __LINUX_CPU_RMAP_H */
diff --git a/include/linux/cpu_smt.h b/include/linux/cpu_smt.h
new file mode 100644
index 000000000000..0c1664294b57
--- /dev/null
+++ b/include/linux/cpu_smt.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CPU_SMT_H_
+#define _LINUX_CPU_SMT_H_
+
+enum cpuhp_smt_control {
+ CPU_SMT_ENABLED,
+ CPU_SMT_DISABLED,
+ CPU_SMT_FORCE_DISABLED,
+ CPU_SMT_NOT_SUPPORTED,
+ CPU_SMT_NOT_IMPLEMENTED,
+};
+
+#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
+extern enum cpuhp_smt_control cpu_smt_control;
+extern unsigned int cpu_smt_num_threads;
+extern void cpu_smt_disable(bool force);
+extern void cpu_smt_set_num_threads(unsigned int num_threads,
+ unsigned int max_threads);
+extern bool cpu_smt_possible(void);
+extern int cpuhp_smt_enable(void);
+extern int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval);
+#else
+# define cpu_smt_control (CPU_SMT_NOT_IMPLEMENTED)
+# define cpu_smt_num_threads 1
+static inline void cpu_smt_disable(bool force) { }
+static inline void cpu_smt_set_num_threads(unsigned int num_threads,
+ unsigned int max_threads) { }
+static inline bool cpu_smt_possible(void) { return false; }
+static inline int cpuhp_smt_enable(void) { return 0; }
+static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; }
+#endif
+
+#endif /* _LINUX_CPU_SMT_H_ */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 353969c7acd3..0465d1e6f72a 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -9,25 +9,27 @@
#define _LINUX_CPUFREQ_H
#include <linux/clk.h>
+#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/completion.h>
#include <linux/kobject.h>
#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/pm_opp.h>
#include <linux/pm_qos.h>
#include <linux/spinlock.h>
#include <linux/sysfs.h>
+#include <linux/minmax.h>
/*********************************************************************
* CPUFREQ INTERFACE *
*********************************************************************/
/*
* Frequency values here are CPU kHz
- *
- * Maximum transition latency is in nanoseconds - if it's unknown,
- * CPUFREQ_ETERNAL shall be used.
*/
-#define CPUFREQ_ETERNAL (-1)
+#define CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS NSEC_PER_MSEC
+
#define CPUFREQ_NAME_LEN 16
/* Print length for names. Extra 1 space for accommodating '\n' in prints */
#define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1)
@@ -115,6 +117,13 @@ struct cpufreq_policy {
bool strict_target;
/*
+ * Set if inefficient frequencies were found in the frequency table.
+ * This indicates if the relation flag CPUFREQ_RELATION_E can be
+ * honored.
+ */
+ bool efficiencies_available;
+
+ /*
* Preferred average time interval between consecutive invocations of
* the driver to set the frequency for this policy. To be set by the
* scaling driver (0, which is the default, means no preference).
@@ -130,6 +139,12 @@ struct cpufreq_policy {
*/
bool dvfs_possible_from_any_cpu;
+ /* Per policy boost enabled flag. */
+ bool boost_enabled;
+
+ /* Per policy boost supported flag. */
+ bool boost_supported;
+
/* Cached frequency lookup from cpufreq_driver_resolve_freq. */
unsigned int cached_target_freq;
unsigned int cached_resolved_idx;
@@ -153,6 +168,12 @@ struct cpufreq_policy {
struct notifier_block nb_max;
};
+DEFINE_GUARD(cpufreq_policy_write, struct cpufreq_policy *,
+ down_write(&_T->rwsem), up_write(&_T->rwsem))
+
+DEFINE_GUARD(cpufreq_policy_read, struct cpufreq_policy *,
+ down_read(&_T->rwsem), up_read(&_T->rwsem))
+
/*
* Used for passing new cpufreq policy data to the cpufreq driver's ->verify()
* callback for sanitization. That callback is only expected to modify the min
@@ -196,6 +217,9 @@ static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { }
#endif
+/* Scope based cleanup macro for cpufreq_policy kobject reference counting */
+DEFINE_FREE(put_cpufreq_policy, struct cpufreq_policy *, if (_T) cpufreq_cpu_put(_T))
+
static inline bool policy_is_inactive(struct cpufreq_policy *policy)
{
return cpumask_empty(policy->cpus);
@@ -215,9 +239,6 @@ void disable_cpufreq(void);
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
-struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu);
-void cpufreq_cpu_release(struct cpufreq_policy *policy);
-int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
void refresh_frequency_limits(struct cpufreq_policy *policy);
void cpufreq_update_policy(unsigned int cpu);
void cpufreq_update_limits(unsigned int cpu);
@@ -226,6 +247,13 @@ bool cpufreq_supports_freq_invariance(void);
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
void cpufreq_enable_fast_switch(struct cpufreq_policy *policy);
void cpufreq_disable_fast_switch(struct cpufreq_policy *policy);
+bool has_target_index(void);
+
+DECLARE_PER_CPU(unsigned long, cpufreq_pressure);
+static inline unsigned long cpufreq_get_pressure(int cpu)
+{
+ return READ_ONCE(per_cpu(cpufreq_pressure, cpu));
+}
#else
static inline unsigned int cpufreq_get(unsigned int cpu)
{
@@ -248,6 +276,11 @@ static inline bool cpufreq_supports_freq_invariance(void)
return false;
}
static inline void disable_cpufreq(void) { }
+static inline void cpufreq_update_limits(unsigned int cpu) { }
+static inline unsigned long cpufreq_get_pressure(int cpu)
+{
+ return 0;
+}
#endif
#ifdef CONFIG_CPU_FREQ_STAT
@@ -269,6 +302,12 @@ static inline void cpufreq_stats_record_transition(struct cpufreq_policy *policy
#define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */
#define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */
#define CPUFREQ_RELATION_C 2 /* closest frequency to target */
+/* relation flags */
+#define CPUFREQ_RELATION_E BIT(2) /* Get if possible an efficient frequency */
+
+#define CPUFREQ_RELATION_LE (CPUFREQ_RELATION_L | CPUFREQ_RELATION_E)
+#define CPUFREQ_RELATION_HE (CPUFREQ_RELATION_H | CPUFREQ_RELATION_E)
+#define CPUFREQ_RELATION_CE (CPUFREQ_RELATION_C | CPUFREQ_RELATION_E)
struct freq_attr {
struct attribute attr;
@@ -323,7 +362,10 @@ struct cpufreq_driver {
/*
* ->fast_switch() replacement for drivers that use an internal
* representation of performance levels and can pass hints other than
- * the target performance level to the hardware.
+ * the target performance level to the hardware. This can only be set
+ * if ->fast_switch is set too, because in those cases (under specific
+ * conditions) scale invariance can be disabled, which causes the
+ * schedutil governor to fall back to the latter.
*/
void (*adjust_perf)(unsigned int cpu,
unsigned long min_perf,
@@ -331,15 +373,6 @@ struct cpufreq_driver {
unsigned long capacity);
/*
- * Caches and returns the lowest driver-supported frequency greater than
- * or equal to the target frequency, subject to any driver limitations.
- * Does not set the frequency. Only to be implemented for drivers with
- * target().
- */
- unsigned int (*resolve_freq)(struct cpufreq_policy *policy,
- unsigned int target_freq);
-
- /*
* Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION
* unset.
*
@@ -359,19 +392,18 @@ struct cpufreq_driver {
int (*target_intermediate)(struct cpufreq_policy *policy,
unsigned int index);
- /* should be defined, if possible */
+ /* should be defined, if possible, return 0 on error */
unsigned int (*get)(unsigned int cpu);
/* Called to update policy limits on firmware notifications. */
- void (*update_limits)(unsigned int cpu);
+ void (*update_limits)(struct cpufreq_policy *policy);
/* optional */
int (*bios_limit)(int cpu, unsigned int *limit);
int (*online)(struct cpufreq_policy *policy);
int (*offline)(struct cpufreq_policy *policy);
- int (*exit)(struct cpufreq_policy *policy);
- void (*stop_cpu)(struct cpufreq_policy *policy);
+ void (*exit)(struct cpufreq_policy *policy);
int (*suspend)(struct cpufreq_policy *policy);
int (*resume)(struct cpufreq_policy *policy);
@@ -383,12 +415,18 @@ struct cpufreq_driver {
/* platform specific boost support code */
bool boost_enabled;
int (*set_boost)(struct cpufreq_policy *policy, int state);
+
+ /*
+ * Set by drivers that want to register with the energy model after the
+ * policy is properly initialized, but before the governor is started.
+ */
+ void (*register_em)(struct cpufreq_policy *policy);
};
/* flags */
/*
- * Set by drivers that need to update internale upper and lower boundaries along
+ * Set by drivers that need to update internal upper and lower boundaries along
* with the target frequency and so the core and governors should also invoke
* the diver if the target frequency does not change, but the policy min or max
* may have changed.
@@ -435,7 +473,7 @@ struct cpufreq_driver {
#define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING BIT(6)
int cpufreq_register_driver(struct cpufreq_driver *driver_data);
-int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
+void cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
bool cpufreq_driver_test_flags(u16 flags);
const char *cpufreq_get_current_driver(void);
@@ -451,17 +489,8 @@ static inline void cpufreq_verify_within_limits(struct cpufreq_policy_data *poli
unsigned int min,
unsigned int max)
{
- if (policy->min < min)
- policy->min = min;
- if (policy->max < min)
- policy->max = min;
- if (policy->min > max)
- policy->min = max;
- if (policy->max > max)
- policy->max = max;
- if (policy->min > policy->max)
- policy->min = policy->max;
- return;
+ policy->max = clamp(policy->max, min, max);
+ policy->min = clamp(policy->min, min, policy->max);
}
static inline void
@@ -555,14 +584,6 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
#define CPUFREQ_POLICY_POWERSAVE (1)
#define CPUFREQ_POLICY_PERFORMANCE (2)
-/*
- * The polling frequency depends on the capability of the processor. Default
- * polling frequency is 1000 times the transition latency of the processor. The
- * ondemand governor will work on any processor with transition latency <= 10ms,
- * using appropriate sampling rate.
- */
-#define LATENCY_MULTIPLIER (1000)
-
struct cpufreq_governor {
char name[CPUFREQ_NAME_LEN];
int (*init)(struct cpufreq_policy *policy);
@@ -627,12 +648,23 @@ module_exit(__governor##_exit)
struct cpufreq_governor *cpufreq_default_governor(void);
struct cpufreq_governor *cpufreq_fallback_governor(void);
+#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
+bool sugov_is_governor(struct cpufreq_policy *policy);
+#else
+static inline bool sugov_is_governor(struct cpufreq_policy *policy)
+{
+ return false;
+}
+#endif
+
static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy)
{
if (policy->max < policy->cur)
- __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
+ __cpufreq_driver_target(policy, policy->max,
+ CPUFREQ_RELATION_HE);
else if (policy->min > policy->cur)
- __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
+ __cpufreq_driver_target(policy, policy->min,
+ CPUFREQ_RELATION_LE);
}
/* Governor attribute set */
@@ -646,6 +678,11 @@ struct gov_attr_set {
/* sysfs ops for cpufreq governors */
extern const struct sysfs_ops governor_sysfs_ops;
+static inline struct gov_attr_set *to_gov_attr_set(struct kobject *kobj)
+{
+ return container_of(kobj, struct gov_attr_set, kobj);
+}
+
void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node);
void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node);
unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node);
@@ -663,10 +700,11 @@ struct governor_attr {
*********************************************************************/
/* Special Values of .frequency field */
-#define CPUFREQ_ENTRY_INVALID ~0u
-#define CPUFREQ_TABLE_END ~1u
+#define CPUFREQ_ENTRY_INVALID ~0u
+#define CPUFREQ_TABLE_END ~1u
/* Special Values of .flags field */
-#define CPUFREQ_BOOST_FREQ (1 << 0)
+#define CPUFREQ_BOOST_FREQ (1 << 0)
+#define CPUFREQ_INEFFICIENT_FREQ (1 << 1)
struct cpufreq_frequency_table {
unsigned int flags;
@@ -675,26 +713,6 @@ struct cpufreq_frequency_table {
* order */
};
-#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
-int dev_pm_opp_init_cpufreq_table(struct device *dev,
- struct cpufreq_frequency_table **table);
-void dev_pm_opp_free_cpufreq_table(struct device *dev,
- struct cpufreq_frequency_table **table);
-#else
-static inline int dev_pm_opp_init_cpufreq_table(struct device *dev,
- struct cpufreq_frequency_table
- **table)
-{
- return -EINVAL;
-}
-
-static inline void dev_pm_opp_free_cpufreq_table(struct device *dev,
- struct cpufreq_frequency_table
- **table)
-{
-}
-#endif
-
/*
* cpufreq_for_each_entry - iterate over a cpufreq_frequency_table
* @pos: the cpufreq_frequency_table * to use as a loop cursor.
@@ -743,38 +761,52 @@ static inline void dev_pm_opp_free_cpufreq_table(struct device *dev,
continue; \
else
+/**
+ * cpufreq_for_each_efficient_entry_idx - iterate with index over a cpufreq
+ * frequency_table excluding CPUFREQ_ENTRY_INVALID and
+ * CPUFREQ_INEFFICIENT_FREQ frequencies.
+ * @pos: the &struct cpufreq_frequency_table to use as a loop cursor.
+ * @table: the &struct cpufreq_frequency_table to iterate over.
+ * @idx: the table entry currently being processed.
+ * @efficiencies: set to true to only iterate over efficient frequencies.
+ */
+
+#define cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) \
+ cpufreq_for_each_valid_entry_idx(pos, table, idx) \
+ if (efficiencies && (pos->flags & CPUFREQ_INEFFICIENT_FREQ)) \
+ continue; \
+ else
+
-int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
- struct cpufreq_frequency_table *table);
+int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy);
+
+int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy);
-int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
- struct cpufreq_frequency_table *table);
int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy);
int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation);
+ unsigned int target_freq, unsigned int min,
+ unsigned int max, unsigned int relation);
int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
unsigned int freq);
ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
#ifdef CONFIG_CPU_FREQ
-int cpufreq_boost_trigger_state(int state);
-int cpufreq_boost_enabled(void);
-int cpufreq_enable_boost_support(void);
-bool policy_has_boost_freq(struct cpufreq_policy *policy);
+bool cpufreq_boost_enabled(void);
+int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state);
/* Find lowest freq at or above target in a table in ascending order */
static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
struct cpufreq_frequency_table *pos;
unsigned int freq;
int idx, best = -1;
- cpufreq_for_each_valid_entry_idx(pos, table, idx) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq >= target_freq)
@@ -788,14 +820,15 @@ static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy,
/* Find lowest freq at or above target in a table in descending order */
static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
struct cpufreq_frequency_table *pos;
unsigned int freq;
int idx, best = -1;
- cpufreq_for_each_valid_entry_idx(pos, table, idx) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq == target_freq)
@@ -816,28 +849,40 @@ static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
return best;
}
-/* Works only on sorted freq-tables */
-static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
- unsigned int target_freq)
+static inline int find_index_l(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int min, unsigned int max,
+ bool efficiencies)
{
- target_freq = clamp_val(target_freq, policy->min, policy->max);
+ target_freq = clamp_val(target_freq, min, max);
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
- return cpufreq_table_find_index_al(policy, target_freq);
+ return cpufreq_table_find_index_al(policy, target_freq,
+ efficiencies);
else
- return cpufreq_table_find_index_dl(policy, target_freq);
+ return cpufreq_table_find_index_dl(policy, target_freq,
+ efficiencies);
+}
+
+/* Works only on sorted freq-tables */
+static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ bool efficiencies)
+{
+ return find_index_l(policy, target_freq, policy->min, policy->max, efficiencies);
}
/* Find highest freq at or below target in a table in ascending order */
static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
struct cpufreq_frequency_table *pos;
unsigned int freq;
int idx, best = -1;
- cpufreq_for_each_valid_entry_idx(pos, table, idx) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq == target_freq)
@@ -860,14 +905,15 @@ static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy,
/* Find highest freq at or below target in a table in descending order */
static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
struct cpufreq_frequency_table *pos;
unsigned int freq;
int idx, best = -1;
- cpufreq_for_each_valid_entry_idx(pos, table, idx) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq <= target_freq)
@@ -879,28 +925,40 @@ static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy,
return best;
}
-/* Works only on sorted freq-tables */
-static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
- unsigned int target_freq)
+static inline int find_index_h(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int min, unsigned int max,
+ bool efficiencies)
{
- target_freq = clamp_val(target_freq, policy->min, policy->max);
+ target_freq = clamp_val(target_freq, min, max);
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
- return cpufreq_table_find_index_ah(policy, target_freq);
+ return cpufreq_table_find_index_ah(policy, target_freq,
+ efficiencies);
else
- return cpufreq_table_find_index_dh(policy, target_freq);
+ return cpufreq_table_find_index_dh(policy, target_freq,
+ efficiencies);
+}
+
+/* Works only on sorted freq-tables */
+static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ bool efficiencies)
+{
+ return find_index_h(policy, target_freq, policy->min, policy->max, efficiencies);
}
/* Find closest freq to target in a table in ascending order */
static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
struct cpufreq_frequency_table *pos;
unsigned int freq;
int idx, best = -1;
- cpufreq_for_each_valid_entry_idx(pos, table, idx) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq == target_freq)
@@ -927,14 +985,15 @@ static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy,
/* Find closest freq to target in a table in descending order */
static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy,
- unsigned int target_freq)
+ unsigned int target_freq,
+ bool efficiencies)
{
struct cpufreq_frequency_table *table = policy->freq_table;
struct cpufreq_frequency_table *pos;
unsigned int freq;
int idx, best = -1;
- cpufreq_for_each_valid_entry_idx(pos, table, idx) {
+ cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) {
freq = pos->frequency;
if (freq == target_freq)
@@ -959,37 +1018,82 @@ static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy,
return best;
}
-/* Works only on sorted freq-tables */
-static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
- unsigned int target_freq)
+static inline int find_index_c(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int min, unsigned int max,
+ bool efficiencies)
{
- target_freq = clamp_val(target_freq, policy->min, policy->max);
+ target_freq = clamp_val(target_freq, min, max);
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
- return cpufreq_table_find_index_ac(policy, target_freq);
+ return cpufreq_table_find_index_ac(policy, target_freq,
+ efficiencies);
else
- return cpufreq_table_find_index_dc(policy, target_freq);
+ return cpufreq_table_find_index_dc(policy, target_freq,
+ efficiencies);
+}
+
+/* Works only on sorted freq-tables */
+static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ bool efficiencies)
+{
+ return find_index_c(policy, target_freq, policy->min, policy->max, efficiencies);
+}
+
+static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy,
+ unsigned int min, unsigned int max,
+ int idx)
+{
+ unsigned int freq;
+
+ if (idx < 0)
+ return false;
+
+ freq = policy->freq_table[idx].frequency;
+
+ return freq == clamp_val(freq, min, max);
}
static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
unsigned int target_freq,
+ unsigned int min,
+ unsigned int max,
unsigned int relation)
{
- if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED))
- return cpufreq_table_index_unsorted(policy, target_freq,
- relation);
+ bool efficiencies = policy->efficiencies_available &&
+ (relation & CPUFREQ_RELATION_E);
+ int idx;
+
+ /* cpufreq_table_index_unsorted() has no use for this flag anyway */
+ relation &= ~CPUFREQ_RELATION_E;
+ if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED))
+ return cpufreq_table_index_unsorted(policy, target_freq, min,
+ max, relation);
+retry:
switch (relation) {
case CPUFREQ_RELATION_L:
- return cpufreq_table_find_index_l(policy, target_freq);
+ idx = find_index_l(policy, target_freq, min, max, efficiencies);
+ break;
case CPUFREQ_RELATION_H:
- return cpufreq_table_find_index_h(policy, target_freq);
+ idx = find_index_h(policy, target_freq, min, max, efficiencies);
+ break;
case CPUFREQ_RELATION_C:
- return cpufreq_table_find_index_c(policy, target_freq);
+ idx = find_index_c(policy, target_freq, min, max, efficiencies);
+ break;
default:
WARN_ON_ONCE(1);
return 0;
}
+
+ /* Limit frequency index to honor min and max */
+ if (!cpufreq_is_in_limits(policy, min, max, idx) && efficiencies) {
+ efficiencies = false;
+ goto retry;
+ }
+
+ return idx;
}
static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy *policy)
@@ -1005,37 +1109,111 @@ static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy
return count;
}
-#else
-static inline int cpufreq_boost_trigger_state(int state)
+
+/**
+ * cpufreq_table_set_inefficient() - Mark a frequency as inefficient
+ * @policy: the &struct cpufreq_policy containing the inefficient frequency
+ * @frequency: the inefficient frequency
+ *
+ * The &struct cpufreq_policy must use a sorted frequency table
+ *
+ * Return: %0 on success or a negative errno code
+ */
+
+static inline int
+cpufreq_table_set_inefficient(struct cpufreq_policy *policy,
+ unsigned int frequency)
+{
+ struct cpufreq_frequency_table *pos;
+
+ /* Not supported */
+ if (policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED)
+ return -EINVAL;
+
+ cpufreq_for_each_valid_entry(pos, policy->freq_table) {
+ if (pos->frequency == frequency) {
+ pos->flags |= CPUFREQ_INEFFICIENT_FREQ;
+ policy->efficiencies_available = true;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static inline int parse_perf_domain(int cpu, const char *list_name,
+ const char *cell_name,
+ struct of_phandle_args *args)
{
+ int ret;
+
+ struct device_node *cpu_np __free(device_node) = of_cpu_device_node_get(cpu);
+ if (!cpu_np)
+ return -ENODEV;
+
+ ret = of_parse_phandle_with_args(cpu_np, list_name, cell_name, 0,
+ args);
+ if (ret < 0)
+ return ret;
return 0;
}
-static inline int cpufreq_boost_enabled(void)
+
+static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name,
+ const char *cell_name, struct cpumask *cpumask,
+ struct of_phandle_args *pargs)
{
+ int cpu, ret;
+ struct of_phandle_args args;
+
+ ret = parse_perf_domain(pcpu, list_name, cell_name, pargs);
+ if (ret < 0)
+ return ret;
+
+ cpumask_set_cpu(pcpu, cpumask);
+
+ for_each_possible_cpu(cpu) {
+ if (cpu == pcpu)
+ continue;
+
+ ret = parse_perf_domain(cpu, list_name, cell_name, &args);
+ if (ret < 0)
+ continue;
+
+ if (of_phandle_args_equal(pargs, &args))
+ cpumask_set_cpu(cpu, cpumask);
+
+ of_node_put(args.np);
+ }
+
return 0;
}
+#else
+static inline bool cpufreq_boost_enabled(void)
+{
+ return false;
+}
-static inline int cpufreq_enable_boost_support(void)
+static inline int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
{
- return -EINVAL;
+ return -EOPNOTSUPP;
}
-static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
+static inline int
+cpufreq_table_set_inefficient(struct cpufreq_policy *policy,
+ unsigned int frequency)
{
- return false;
+ return -EINVAL;
}
-#endif
-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
-void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
- struct cpufreq_governor *old_gov);
-#else
-static inline void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
- struct cpufreq_governor *old_gov) { }
+static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name,
+ const char *cell_name, struct cpumask *cpumask,
+ struct of_phandle_args *pargs)
+{
+ return -EOPNOTSUPP;
+}
#endif
-extern void arch_freq_prepare_all(void);
-extern unsigned int arch_freq_get_on_cpu(int cpu);
+extern int arch_freq_get_on_cpu(int cpu);
#ifndef arch_set_freq_scale
static __always_inline
@@ -1049,11 +1227,18 @@ void arch_set_freq_scale(const struct cpumask *cpus,
/* the following are really really optional */
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
-extern struct freq_attr *cpufreq_generic_attr[];
int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy);
unsigned int cpufreq_generic_get(unsigned int cpu);
void cpufreq_generic_init(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table,
unsigned int transition_latency);
+
+bool cpufreq_ready_for_eas(const struct cpumask *cpu_mask);
+
+static inline void cpufreq_register_em_with_opp(struct cpufreq_policy *policy)
+{
+ dev_pm_opp_of_register_em(get_cpu_device(policy->cpu),
+ policy->related_cpus);
+}
#endif /* _LINUX_CPUFREQ_H */
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 4a62b3980642..62cd7b35a29c 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -22,19 +22,52 @@
* AP_ACTIVE AP_ACTIVE
*/
+/*
+ * CPU hotplug states. The state machine invokes the installed state
+ * startup callbacks sequentially from CPUHP_OFFLINE + 1 to CPUHP_ONLINE
+ * during a CPU online operation. During a CPU offline operation the
+ * installed teardown callbacks are invoked in the reverse order from
+ * CPUHP_ONLINE - 1 down to CPUHP_OFFLINE.
+ *
+ * The state space has three sections: PREPARE, STARTING and ONLINE.
+ *
+ * PREPARE: The callbacks are invoked on a control CPU before the
+ * hotplugged CPU is started up or after the hotplugged CPU has died.
+ *
+ * STARTING: The callbacks are invoked on the hotplugged CPU from the low level
+ * hotplug startup/teardown code with interrupts disabled.
+ *
+ * ONLINE: The callbacks are invoked on the hotplugged CPU from the per CPU
+ * hotplug thread with interrupts and preemption enabled.
+ *
+ * Adding explicit states to this enum is only necessary when:
+ *
+ * 1) The state is within the STARTING section
+ *
+ * 2) The state has ordering constraints vs. other states in the
+ * same section.
+ *
+ * If neither #1 nor #2 apply, please use the dynamic state space when
+ * setting up a state by using CPUHP_BP_PREPARE_DYN or CPUHP_AP_ONLINE_DYN
+ * for the @state argument of the setup function.
+ *
+ * See Documentation/core-api/cpu_hotplug.rst for further information and
+ * examples.
+ */
enum cpuhp_state {
CPUHP_INVALID = -1,
+
+ /* PREPARE section invoked on a control CPU */
CPUHP_OFFLINE = 0,
CPUHP_CREATE_THREADS,
- CPUHP_PERF_PREPARE,
CPUHP_PERF_X86_PREPARE,
CPUHP_PERF_X86_AMD_UNCORE_PREP,
CPUHP_PERF_POWER,
CPUHP_PERF_SUPERH,
CPUHP_X86_HPET_DEAD,
- CPUHP_X86_APB_DEAD,
CPUHP_X86_MCE_DEAD,
CPUHP_VIRT_NET_DEAD,
+ CPUHP_IBMVNIC_DEAD,
CPUHP_SLUB_DEAD,
CPUHP_DEBUG_OBJ_DEAD,
CPUHP_MM_WRITEBACK_DEAD,
@@ -46,6 +79,7 @@ enum cpuhp_state {
CPUHP_ARM_OMAP_WAKE_DEAD,
CPUHP_IRQ_POLL_DEAD,
CPUHP_BLOCK_SOFTIRQ_DEAD,
+ CPUHP_BIO_DEAD,
CPUHP_ACPI_CPUDRV_DEAD,
CPUHP_S390_PFAULT_DEAD,
CPUHP_BLK_MQ_DEAD,
@@ -54,21 +88,19 @@ enum cpuhp_state {
CPUHP_MM_MEMCQ_DEAD,
CPUHP_PERCPU_CNT_DEAD,
CPUHP_RADIX_DEAD,
- CPUHP_PAGE_ALLOC_DEAD,
+ CPUHP_PAGE_ALLOC,
CPUHP_NET_DEV_DEAD,
- CPUHP_PCI_XGENE_DEAD,
CPUHP_IOMMU_IOVA_DEAD,
- CPUHP_LUSTRE_CFS_DEAD,
CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
CPUHP_PADATA_DEAD,
+ CPUHP_AP_DTPM_CPU_DEAD,
+ CPUHP_RANDOM_PREPARE,
CPUHP_WORKQUEUE_PREP,
CPUHP_POWER_NUMA_PREPARE,
CPUHP_HRTIMERS_PREPARE,
- CPUHP_PROFILE_PREPARE,
CPUHP_X2APIC_PREPARE,
CPUHP_SMPCFD_PREPARE,
CPUHP_RELAY_PREPARE,
- CPUHP_SLAB_PREPARE,
CPUHP_MD_RAID5_PREPARE,
CPUHP_RCUTREE_PREP,
CPUHP_CPUIDLE_COUPLED_PREPARE,
@@ -78,23 +110,28 @@ enum cpuhp_state {
CPUHP_XEN_EVTCHN_PREPARE,
CPUHP_ARM_SHMOBILE_SCU_PREPARE,
CPUHP_SH_SH3X_PREPARE,
- CPUHP_NET_FLOW_PREPARE,
CPUHP_TOPOLOGY_PREPARE,
CPUHP_NET_IUCV_PREPARE,
CPUHP_ARM_BL_PREPARE,
CPUHP_TRACE_RB_PREPARE,
- CPUHP_MM_ZS_PREPARE,
- CPUHP_MM_ZSWP_MEM_PREPARE,
CPUHP_MM_ZSWP_POOL_PREPARE,
CPUHP_KVM_PPC_BOOK3S_PREPARE,
CPUHP_ZCOMP_PREPARE,
CPUHP_TIMERS_PREPARE,
+ CPUHP_TMIGR_PREPARE,
CPUHP_MIPS_SOC_PREPARE,
CPUHP_BP_PREPARE_DYN,
CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20,
+ CPUHP_BP_KICK_AP,
CPUHP_BRINGUP_CPU,
+
+ /*
+ * STARTING section invoked on the hotplugged CPU in low level
+ * bringup and teardown code.
+ */
CPUHP_AP_IDLE_DEAD,
CPUHP_AP_OFFLINE,
+ CPUHP_AP_CACHECTRL_STARTING,
CPUHP_AP_SCHED_STARTING,
CPUHP_AP_RCUTREE_DYING,
CPUHP_AP_CPU_PM_STARTING,
@@ -104,59 +141,63 @@ enum cpuhp_state {
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_IRQ_MIPS_GIC_STARTING,
- CPUHP_AP_IRQ_RISCV_STARTING,
+ CPUHP_AP_IRQ_EIOINTC_STARTING,
+ CPUHP_AP_IRQ_AVECINTC_STARTING,
CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
+ CPUHP_AP_IRQ_ACLINT_SSWI_STARTING,
+ CPUHP_AP_IRQ_RISCV_IMSIC_STARTING,
+ CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
- CPUHP_AP_MICROCODE_LOADER,
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
CPUHP_AP_PERF_X86_STARTING,
CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
- CPUHP_AP_PERF_X86_CQM_STARTING,
- CPUHP_AP_PERF_X86_CSTATE_STARTING,
CPUHP_AP_PERF_XTENSA_STARTING,
- CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
- CPUHP_AP_ARM_SDEI_STARTING,
CPUHP_AP_ARM_VFP_STARTING,
CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
CPUHP_AP_PERF_ARM_ACPI_STARTING,
CPUHP_AP_PERF_ARM_STARTING,
+ CPUHP_AP_PERF_RISCV_STARTING,
CPUHP_AP_ARM_L2X0_STARTING,
CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
CPUHP_AP_ARM_ARCH_TIMER_STARTING,
+ CPUHP_AP_ARM_ARCH_TIMER_EVTSTRM_STARTING,
CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
CPUHP_AP_JCORE_TIMER_STARTING,
CPUHP_AP_ARM_TWD_STARTING,
CPUHP_AP_QCOM_TIMER_STARTING,
CPUHP_AP_TEGRA_TIMER_STARTING,
CPUHP_AP_ARMADA_TIMER_STARTING,
- CPUHP_AP_MARCO_TIMER_STARTING,
+ CPUHP_AP_LOONGARCH_ARCH_TIMER_STARTING,
CPUHP_AP_MIPS_GIC_TIMER_STARTING,
CPUHP_AP_ARC_TIMER_STARTING,
+ CPUHP_AP_REALTEK_TIMER_STARTING,
CPUHP_AP_RISCV_TIMER_STARTING,
CPUHP_AP_CLINT_TIMER_STARTING,
CPUHP_AP_CSKY_TIMER_STARTING,
CPUHP_AP_TI_GP_TIMER_STARTING,
CPUHP_AP_HYPERV_TIMER_STARTING,
- CPUHP_AP_KVM_STARTING,
- CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
- CPUHP_AP_KVM_ARM_VGIC_STARTING,
- CPUHP_AP_KVM_ARM_TIMER_STARTING,
/* Must be the last timer callback */
CPUHP_AP_DUMMY_TIMER_STARTING,
CPUHP_AP_ARM_XEN_STARTING,
+ CPUHP_AP_ARM_XEN_RUNSTATE_STARTING,
CPUHP_AP_ARM_CORESIGHT_STARTING,
CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
CPUHP_AP_ARM64_ISNDEP_STARTING,
CPUHP_AP_SMPCFD_DYING,
+ CPUHP_AP_HRTIMERS_DYING,
+ CPUHP_AP_TICK_DYING,
CPUHP_AP_X86_TBOOT_DYING,
CPUHP_AP_ARM_CACHE_B15_RAC_DYING,
CPUHP_AP_ONLINE,
CPUHP_TEARDOWN_CPU,
+
+ /* Online section invoked on the hotplugged CPU from the hotplug thread */
CPUHP_AP_ONLINE_IDLE,
+ CPUHP_AP_HYPERV_ONLINE,
+ CPUHP_AP_KVM_ONLINE,
CPUHP_AP_SCHED_WAIT_EMPTY,
CPUHP_AP_SMPBOOT_THREADS,
- CPUHP_AP_X86_VDSO_VMA_ONLINE,
CPUHP_AP_IRQ_AFFINITY_ONLINE,
CPUHP_AP_BLK_MQ_ONLINE,
CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS,
@@ -166,25 +207,25 @@ enum cpuhp_state {
CPUHP_AP_PERF_X86_UNCORE_ONLINE,
CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
CPUHP_AP_PERF_X86_AMD_POWER_ONLINE,
- CPUHP_AP_PERF_X86_RAPL_ONLINE,
- CPUHP_AP_PERF_X86_CQM_ONLINE,
- CPUHP_AP_PERF_X86_CSTATE_ONLINE,
- CPUHP_AP_PERF_X86_IDXD_ONLINE,
CPUHP_AP_PERF_S390_CF_ONLINE,
- CPUHP_AP_PERF_S390_CFD_ONLINE,
CPUHP_AP_PERF_S390_SF_ONLINE,
CPUHP_AP_PERF_ARM_CCI_ONLINE,
CPUHP_AP_PERF_ARM_CCN_ONLINE,
+ CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE,
CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
+ CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE,
+ CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
CPUHP_AP_PERF_ARM_L2X0_ONLINE,
CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,
CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
+ CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
+ CPUHP_AP_PERF_ARM_MRVL_PEM_ONLINE,
CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
@@ -192,15 +233,17 @@ enum cpuhp_state {
CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE,
CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE,
CPUHP_AP_PERF_CSKY_ONLINE,
+ CPUHP_AP_TMIGR_ONLINE,
CPUHP_AP_WATCHDOG_ONLINE,
CPUHP_AP_WORKQUEUE_ONLINE,
+ CPUHP_AP_RANDOM_ONLINE,
CPUHP_AP_RCUTREE_ONLINE,
+ CPUHP_AP_KTHREADS_ONLINE,
CPUHP_AP_BASE_CACHEINFO_ONLINE,
CPUHP_AP_ONLINE_DYN,
- CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
+ CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 40,
CPUHP_AP_X86_HPET_ONLINE,
CPUHP_AP_X86_KVM_CLK_ONLINE,
- CPUHP_AP_DTPM_CPU_ONLINE,
CPUHP_AP_ACTIVE,
CPUHP_ONLINE,
};
@@ -215,14 +258,15 @@ int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state, const char *name,
int (*teardown)(unsigned int cpu),
bool multi_instance);
/**
- * cpuhp_setup_state - Setup hotplug state callbacks with calling the callbacks
+ * cpuhp_setup_state - Setup hotplug state callbacks with calling the @startup
+ * callback
* @state: The state for which the calls are installed
* @name: Name of the callback (will be used in debug output)
- * @startup: startup callback function
- * @teardown: teardown callback function
+ * @startup: startup callback function or NULL if not required
+ * @teardown: teardown callback function or NULL if not required
*
- * Installs the callback functions and invokes the startup callback on
- * the present cpus which have already reached the @state.
+ * Installs the callback functions and invokes the @startup callback on
+ * the online cpus which have already reached the @state.
*/
static inline int cpuhp_setup_state(enum cpuhp_state state,
const char *name,
@@ -232,6 +276,18 @@ static inline int cpuhp_setup_state(enum cpuhp_state state,
return __cpuhp_setup_state(state, name, true, startup, teardown, false);
}
+/**
+ * cpuhp_setup_state_cpuslocked - Setup hotplug state callbacks with calling
+ * @startup callback from a cpus_read_lock()
+ * held region
+ * @state: The state for which the calls are installed
+ * @name: Name of the callback (will be used in debug output)
+ * @startup: startup callback function or NULL if not required
+ * @teardown: teardown callback function or NULL if not required
+ *
+ * Same as cpuhp_setup_state() except that it must be invoked from within a
+ * cpus_read_lock() held region.
+ */
static inline int cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
const char *name,
int (*startup)(unsigned int cpu),
@@ -243,14 +299,14 @@ static inline int cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
/**
* cpuhp_setup_state_nocalls - Setup hotplug state callbacks without calling the
- * callbacks
+ * @startup callback
* @state: The state for which the calls are installed
* @name: Name of the callback.
- * @startup: startup callback function
- * @teardown: teardown callback function
+ * @startup: startup callback function or NULL if not required
+ * @teardown: teardown callback function or NULL if not required
*
- * Same as @cpuhp_setup_state except that no calls are executed are invoked
- * during installation of this callback. NOP if SMP=n or HOTPLUG_CPU=n.
+ * Same as cpuhp_setup_state() except that the @startup callback is not
+ * invoked during installation. NOP if SMP=n or HOTPLUG_CPU=n.
*/
static inline int cpuhp_setup_state_nocalls(enum cpuhp_state state,
const char *name,
@@ -261,6 +317,19 @@ static inline int cpuhp_setup_state_nocalls(enum cpuhp_state state,
false);
}
+/**
+ * cpuhp_setup_state_nocalls_cpuslocked - Setup hotplug state callbacks without
+ * invoking the @startup callback from
+ * a cpus_read_lock() held region
+ * callbacks
+ * @state: The state for which the calls are installed
+ * @name: Name of the callback.
+ * @startup: startup callback function or NULL if not required
+ * @teardown: teardown callback function or NULL if not required
+ *
+ * Same as cpuhp_setup_state_nocalls() except that it must be invoked from
+ * within a cpus_read_lock() held region.
+ */
static inline int cpuhp_setup_state_nocalls_cpuslocked(enum cpuhp_state state,
const char *name,
int (*startup)(unsigned int cpu),
@@ -274,13 +343,13 @@ static inline int cpuhp_setup_state_nocalls_cpuslocked(enum cpuhp_state state,
* cpuhp_setup_state_multi - Add callbacks for multi state
* @state: The state for which the calls are installed
* @name: Name of the callback.
- * @startup: startup callback function
- * @teardown: teardown callback function
+ * @startup: startup callback function or NULL if not required
+ * @teardown: teardown callback function or NULL if not required
*
* Sets the internal multi_instance flag and prepares a state to work as a multi
* instance callback. No callbacks are invoked at this point. The callbacks are
* invoked once an instance for this state are registered via
- * @cpuhp_state_add_instance or @cpuhp_state_add_instance_nocalls.
+ * cpuhp_state_add_instance() or cpuhp_state_add_instance_nocalls()
*/
static inline int cpuhp_setup_state_multi(enum cpuhp_state state,
const char *name,
@@ -305,9 +374,10 @@ int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
* @state: The state for which the instance is installed
* @node: The node for this individual state.
*
- * Installs the instance for the @state and invokes the startup callback on
- * the present cpus which have already reached the @state. The @state must have
- * been earlier marked as multi-instance by @cpuhp_setup_state_multi.
+ * Installs the instance for the @state and invokes the registered startup
+ * callback on the online cpus which have already reached the @state. The
+ * @state must have been earlier marked as multi-instance by
+ * cpuhp_setup_state_multi().
*/
static inline int cpuhp_state_add_instance(enum cpuhp_state state,
struct hlist_node *node)
@@ -321,8 +391,9 @@ static inline int cpuhp_state_add_instance(enum cpuhp_state state,
* @state: The state for which the instance is installed
* @node: The node for this individual state.
*
- * Installs the instance for the @state The @state must have been earlier
- * marked as multi-instance by @cpuhp_setup_state_multi.
+ * Installs the instance for the @state. The @state must have been earlier
+ * marked as multi-instance by cpuhp_setup_state_multi. NOP if SMP=n or
+ * HOTPLUG_CPU=n.
*/
static inline int cpuhp_state_add_instance_nocalls(enum cpuhp_state state,
struct hlist_node *node)
@@ -330,6 +401,17 @@ static inline int cpuhp_state_add_instance_nocalls(enum cpuhp_state state,
return __cpuhp_state_add_instance(state, node, false);
}
+/**
+ * cpuhp_state_add_instance_nocalls_cpuslocked - Add an instance for a state
+ * without invoking the startup
+ * callback from a cpus_read_lock()
+ * held region.
+ * @state: The state for which the instance is installed
+ * @node: The node for this individual state.
+ *
+ * Same as cpuhp_state_add_instance_nocalls() except that it must be
+ * invoked from within a cpus_read_lock() held region.
+ */
static inline int
cpuhp_state_add_instance_nocalls_cpuslocked(enum cpuhp_state state,
struct hlist_node *node)
@@ -345,7 +427,7 @@ void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke);
* @state: The state for which the calls are removed
*
* Removes the callback functions and invokes the teardown callback on
- * the present cpus which have already reached the @state.
+ * the online cpus which have already reached the @state.
*/
static inline void cpuhp_remove_state(enum cpuhp_state state)
{
@@ -354,7 +436,7 @@ static inline void cpuhp_remove_state(enum cpuhp_state state)
/**
* cpuhp_remove_state_nocalls - Remove hotplug state callbacks without invoking
- * teardown
+ * the teardown callback
* @state: The state for which the calls are removed
*/
static inline void cpuhp_remove_state_nocalls(enum cpuhp_state state)
@@ -362,6 +444,14 @@ static inline void cpuhp_remove_state_nocalls(enum cpuhp_state state)
__cpuhp_remove_state(state, false);
}
+/**
+ * cpuhp_remove_state_nocalls_cpuslocked - Remove hotplug state callbacks without invoking
+ * teardown from a cpus_read_lock() held region.
+ * @state: The state for which the calls are removed
+ *
+ * Same as cpuhp_remove_state nocalls() except that it must be invoked
+ * from within a cpus_read_lock() held region.
+ */
static inline void cpuhp_remove_state_nocalls_cpuslocked(enum cpuhp_state state)
{
__cpuhp_remove_state_cpuslocked(state, false);
@@ -389,8 +479,8 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
* @state: The state from which the instance is removed
* @node: The node for this individual state.
*
- * Removes the instance and invokes the teardown callback on the present cpus
- * which have already reached the @state.
+ * Removes the instance and invokes the teardown callback on the online cpus
+ * which have already reached @state.
*/
static inline int cpuhp_state_remove_instance(enum cpuhp_state state,
struct hlist_node *node)
@@ -400,7 +490,7 @@ static inline int cpuhp_state_remove_instance(enum cpuhp_state state,
/**
* cpuhp_state_remove_instance_nocalls - Remove hotplug instance from state
- * without invoking the reatdown callback
+ * without invoking the teardown callback
* @state: The state from which the instance is removed
* @node: The node for this individual state.
*
@@ -418,4 +508,20 @@ void cpuhp_online_idle(enum cpuhp_state state);
static inline void cpuhp_online_idle(enum cpuhp_state state) { }
#endif
+struct task_struct;
+
+void cpuhp_ap_sync_alive(void);
+void arch_cpuhp_sync_state_poll(void);
+void arch_cpuhp_cleanup_kick_cpu(unsigned int cpu);
+int arch_cpuhp_kick_ap_alive(unsigned int cpu, struct task_struct *tidle);
+bool arch_cpuhp_init_parallel_bringup(void);
+
+#ifdef CONFIG_HOTPLUG_CORE_SYNC_DEAD
+void cpuhp_ap_report_dead(void);
+void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu);
+#else
+static inline void cpuhp_ap_report_dead(void) { }
+static inline void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) { }
+#endif
+
#endif
diff --git a/include/linux/cpuhplock.h b/include/linux/cpuhplock.h
new file mode 100644
index 000000000000..f7aa20f62b87
--- /dev/null
+++ b/include/linux/cpuhplock.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * include/linux/cpuhplock.h - CPU hotplug locking
+ *
+ * Locking functions for CPU hotplug.
+ */
+#ifndef _LINUX_CPUHPLOCK_H_
+#define _LINUX_CPUHPLOCK_H_
+
+#include <linux/cleanup.h>
+#include <linux/errno.h>
+
+struct device;
+
+extern int lockdep_is_cpus_held(void);
+
+#ifdef CONFIG_HOTPLUG_CPU
+void cpus_write_lock(void);
+void cpus_write_unlock(void);
+void cpus_read_lock(void);
+void cpus_read_unlock(void);
+int cpus_read_trylock(void);
+void lockdep_assert_cpus_held(void);
+void cpu_hotplug_disable_offlining(void);
+void cpu_hotplug_disable(void);
+void cpu_hotplug_enable(void);
+void clear_tasks_mm_cpumask(int cpu);
+int remove_cpu(unsigned int cpu);
+int cpu_device_down(struct device *dev);
+void smp_shutdown_nonboot_cpus(unsigned int primary_cpu);
+
+#else /* CONFIG_HOTPLUG_CPU */
+
+static inline void cpus_write_lock(void) { }
+static inline void cpus_write_unlock(void) { }
+static inline void cpus_read_lock(void) { }
+static inline void cpus_read_unlock(void) { }
+static inline int cpus_read_trylock(void) { return true; }
+static inline void lockdep_assert_cpus_held(void) { }
+static inline void cpu_hotplug_disable_offlining(void) { }
+static inline void cpu_hotplug_disable(void) { }
+static inline void cpu_hotplug_enable(void) { }
+static inline int remove_cpu(unsigned int cpu) { return -EPERM; }
+static inline void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) { }
+#endif /* !CONFIG_HOTPLUG_CPU */
+
+DEFINE_LOCK_GUARD_0(cpus_read_lock, cpus_read_lock(), cpus_read_unlock())
+
+#endif /* _LINUX_CPUHPLOCK_H_ */
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index fce476275e16..4073690504a7 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -14,6 +14,7 @@
#include <linux/percpu.h>
#include <linux/list.h>
#include <linux/hrtimer.h>
+#include <linux/context_tracking.h>
#define CPUIDLE_STATE_MAX 10
#define CPUIDLE_NAME_LEN 16
@@ -60,7 +61,7 @@ struct cpuidle_state {
struct cpuidle_driver *drv,
int index);
- int (*enter_dead) (struct cpuidle_device *dev, int index);
+ void (*enter_dead) (struct cpuidle_device *dev, int index);
/*
* CPUs execute ->enter_s2idle with the local tick or entire timekeeping
@@ -115,6 +116,35 @@ struct cpuidle_device {
DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev);
+static __always_inline void ct_cpuidle_enter(void)
+{
+ lockdep_assert_irqs_disabled();
+ /*
+ * Idle is allowed to (temporary) enable IRQs. It
+ * will return with IRQs disabled.
+ *
+ * Trace IRQs enable here, then switch off RCU, and have
+ * arch_cpu_idle() use raw_local_irq_enable(). Note that
+ * ct_idle_enter() relies on lockdep IRQ state, so switch that
+ * last -- this is very similar to the entry code.
+ */
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare();
+ instrumentation_end();
+ ct_idle_enter();
+ lockdep_hardirqs_on(_RET_IP_);
+}
+
+static __always_inline void ct_cpuidle_exit(void)
+{
+ /*
+ * Carefully undo the above.
+ */
+ lockdep_hardirqs_off(_RET_IP_);
+ ct_idle_exit();
+ instrumentation_begin();
+}
+
/****************************
* CPUIDLE DRIVER INTERFACE *
****************************/
@@ -218,7 +248,8 @@ extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev,
u64 latency_limit_ns);
extern int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
- struct cpuidle_device *dev);
+ struct cpuidle_device *dev,
+ u64 latency_limit_ns);
extern void cpuidle_use_deepest_state(u64 latency_limit_ns);
#else
static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
@@ -226,7 +257,8 @@ static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
u64 latency_limit_ns)
{return -ENODEV; }
static inline int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
- struct cpuidle_device *dev)
+ struct cpuidle_device *dev,
+ u64 latency_limit_ns)
{return -ENODEV; }
static inline void cpuidle_use_deepest_state(u64 latency_limit_ns)
{
@@ -277,7 +309,7 @@ extern s64 cpuidle_governor_latency_req(unsigned int cpu);
#define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, \
idx, \
state, \
- is_retention) \
+ is_retention, is_rcu) \
({ \
int __ret = 0; \
\
@@ -289,7 +321,11 @@ extern s64 cpuidle_governor_latency_req(unsigned int cpu);
if (!is_retention) \
__ret = cpu_pm_enter(); \
if (!__ret) { \
+ if (!is_rcu) \
+ ct_cpuidle_enter(); \
__ret = low_level_idle_enter(state); \
+ if (!is_rcu) \
+ ct_cpuidle_exit(); \
if (!is_retention) \
cpu_pm_exit(); \
} \
@@ -298,15 +334,21 @@ extern s64 cpuidle_governor_latency_req(unsigned int cpu);
})
#define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \
- __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 0)
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 0, 0)
#define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx) \
- __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 1)
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 1, 0)
#define CPU_PM_CPU_IDLE_ENTER_PARAM(low_level_idle_enter, idx, state) \
- __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 0)
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 0, 0)
+
+#define CPU_PM_CPU_IDLE_ENTER_PARAM_RCU(low_level_idle_enter, idx, state) \
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 0, 1)
#define CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(low_level_idle_enter, idx, state) \
- __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1)
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1, 0)
+
+#define CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM_RCU(low_level_idle_enter, idx, state) \
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1, 1)
#endif /* _LINUX_CPUIDLE_H */
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index bfc4690de4f4..80211900f373 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -4,26 +4,19 @@
/*
* Cpumasks provide a bitmap suitable for representing the
- * set of CPU's in a system, one bit position per CPU number. In general,
+ * set of CPUs in a system, one bit position per CPU number. In general,
* only nr_cpu_ids (<= NR_CPUS) bits are valid.
*/
-#include <linux/kernel.h>
-#include <linux/threads.h>
-#include <linux/bitmap.h>
#include <linux/atomic.h>
-#include <linux/bug.h>
+#include <linux/bitmap.h>
+#include <linux/cleanup.h>
+#include <linux/cpumask_types.h>
+#include <linux/gfp_types.h>
+#include <linux/numa.h>
+#include <linux/threads.h>
+#include <linux/types.h>
-/* Don't assign or return these: may not be this big! */
-typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
-
-/**
- * cpumask_bits - get the bits in a cpumask
- * @maskp: the struct cpumask *
- *
- * You should only assume nr_cpu_ids bits of this mask are valid. This is
- * a macro so it's const-correct.
- */
-#define cpumask_bits(maskp) ((maskp)->bits)
+#include <asm/bug.h>
/**
* cpumask_pr_args - printf args to output a cpumask
@@ -33,19 +26,56 @@ typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
*/
#define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp)
-#if NR_CPUS == 1
-#define nr_cpu_ids 1U
+#if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS)
+#define nr_cpu_ids ((unsigned int)NR_CPUS)
#else
extern unsigned int nr_cpu_ids;
#endif
-#ifdef CONFIG_CPUMASK_OFFSTACK
-/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
- * not all bits may be allocated. */
-#define nr_cpumask_bits nr_cpu_ids
+static __always_inline void set_nr_cpu_ids(unsigned int nr)
+{
+#if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS)
+ WARN_ON(nr != nr_cpu_ids);
+#else
+ nr_cpu_ids = nr;
+#endif
+}
+
+/*
+ * We have several different "preferred sizes" for the cpumask
+ * operations, depending on operation.
+ *
+ * For example, the bitmap scanning and operating operations have
+ * optimized routines that work for the single-word case, but only when
+ * the size is constant. So if NR_CPUS fits in one single word, we are
+ * better off using that small constant, in order to trigger the
+ * optimized bit finding. That is 'small_cpumask_size'.
+ *
+ * The clearing and copying operations will similarly perform better
+ * with a constant size, but we limit that size arbitrarily to four
+ * words. We call this 'large_cpumask_size'.
+ *
+ * Finally, some operations just want the exact limit, either because
+ * they set bits or just don't have any faster fixed-sized versions. We
+ * call this just 'nr_cpumask_bits'.
+ *
+ * Note that these optional constants are always guaranteed to be at
+ * least as big as 'nr_cpu_ids' itself is, and all our cpumask
+ * allocations are at least that size (see cpumask_size()). The
+ * optimization comes from being able to potentially use a compile-time
+ * constant instead of a run-time generated exact number of CPUs.
+ */
+#if NR_CPUS <= BITS_PER_LONG
+ #define small_cpumask_bits ((unsigned int)NR_CPUS)
+ #define large_cpumask_bits ((unsigned int)NR_CPUS)
+#elif NR_CPUS <= 4*BITS_PER_LONG
+ #define small_cpumask_bits nr_cpu_ids
+ #define large_cpumask_bits ((unsigned int)NR_CPUS)
#else
-#define nr_cpumask_bits ((unsigned int)NR_CPUS)
+ #define small_cpumask_bits nr_cpu_ids
+ #define large_cpumask_bits nr_cpu_ids
#endif
+#define nr_cpumask_bits nr_cpu_ids
/*
* The following particular system cpumasks and operations manage
@@ -53,22 +83,19 @@ extern unsigned int nr_cpu_ids;
*
* cpu_possible_mask- has bit 'cpu' set iff cpu is populatable
* cpu_present_mask - has bit 'cpu' set iff cpu is populated
+ * cpu_enabled_mask - has bit 'cpu' set iff cpu can be brought online
* cpu_online_mask - has bit 'cpu' set iff cpu available to scheduler
* cpu_active_mask - has bit 'cpu' set iff cpu available to migration
*
* If !CONFIG_HOTPLUG_CPU, present == possible, and active == online.
*
- * The cpu_possible_mask is fixed at boot time, as the set of CPU id's
+ * The cpu_possible_mask is fixed at boot time, as the set of CPU IDs
* that it is possible might ever be plugged in at anytime during the
* life of that system boot. The cpu_present_mask is dynamic(*),
* representing which CPUs are currently plugged in. And
* cpu_online_mask is the dynamic subset of cpu_present_mask,
* indicating those CPUs available for scheduling.
*
- * If HOTPLUG is enabled, then cpu_possible_mask is forced to have
- * all NR_CPUS bits set, otherwise it is just the set of CPUs that
- * ACPI reports present at boot.
- *
* If HOTPLUG is enabled, then cpu_present_mask varies dynamically,
* depending on what ACPI reports as currently plugged in, otherwise
* cpu_present_mask is just a copy of cpu_possible_mask.
@@ -77,7 +104,7 @@ extern unsigned int nr_cpu_ids;
* hotplug, it's a copy of cpu_possible_mask, hence fixed at boot.
*
* Subtleties:
- * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
+ * 1) UP ARCHes (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
* assumption that their single CPU is online. The UP
* cpu_{online,possible,present}_masks are placebos. Changing them
* will have no useful affect on the following num_*_cpus()
@@ -89,20 +116,23 @@ extern unsigned int nr_cpu_ids;
extern struct cpumask __cpu_possible_mask;
extern struct cpumask __cpu_online_mask;
+extern struct cpumask __cpu_enabled_mask;
extern struct cpumask __cpu_present_mask;
extern struct cpumask __cpu_active_mask;
extern struct cpumask __cpu_dying_mask;
#define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask)
#define cpu_online_mask ((const struct cpumask *)&__cpu_online_mask)
+#define cpu_enabled_mask ((const struct cpumask *)&__cpu_enabled_mask)
#define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask)
#define cpu_active_mask ((const struct cpumask *)&__cpu_active_mask)
#define cpu_dying_mask ((const struct cpumask *)&__cpu_dying_mask)
extern atomic_t __num_online_cpus;
+extern unsigned int __num_possible_cpus;
extern cpumask_t cpus_booted_once_mask;
-static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
+static __always_inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
{
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
WARN_ON_ONCE(cpu >= bits);
@@ -110,125 +140,234 @@ static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
}
/* verify cpu argument to cpumask_* operators */
-static inline unsigned int cpumask_check(unsigned int cpu)
+static __always_inline unsigned int cpumask_check(unsigned int cpu)
{
- cpu_max_bits_warn(cpu, nr_cpumask_bits);
+ cpu_max_bits_warn(cpu, small_cpumask_bits);
return cpu;
}
-#if NR_CPUS == 1
-/* Uniprocessor. Assume all masks are "1". */
-static inline unsigned int cpumask_first(const struct cpumask *srcp)
+/**
+ * cpumask_first - get the first cpu in a cpumask
+ * @srcp: the cpumask pointer
+ *
+ * Return: >= nr_cpu_ids if no cpus set.
+ */
+static __always_inline unsigned int cpumask_first(const struct cpumask *srcp)
{
- return 0;
+ return find_first_bit(cpumask_bits(srcp), small_cpumask_bits);
}
-static inline unsigned int cpumask_last(const struct cpumask *srcp)
+/**
+ * cpumask_first_zero - get the first unset cpu in a cpumask
+ * @srcp: the cpumask pointer
+ *
+ * Return: >= nr_cpu_ids if all cpus are set.
+ */
+static __always_inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
{
- return 0;
+ return find_first_zero_bit(cpumask_bits(srcp), small_cpumask_bits);
+}
+
+/**
+ * cpumask_first_and - return the first cpu from *srcp1 & *srcp2
+ * @srcp1: the first input
+ * @srcp2: the second input
+ *
+ * Return: >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
+ */
+static __always_inline
+unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask *srcp2)
+{
+ return find_first_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
}
-/* Valid inputs for n are -1 and 0. */
-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
+/**
+ * cpumask_first_andnot - return the first cpu from *srcp1 & ~*srcp2
+ * @srcp1: the first input
+ * @srcp2: the second input
+ *
+ * Return: >= nr_cpu_ids if no such cpu found.
+ */
+static __always_inline
+unsigned int cpumask_first_andnot(const struct cpumask *srcp1, const struct cpumask *srcp2)
{
- return n+1;
+ return find_first_andnot_bit(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
}
-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
+/**
+ * cpumask_first_and_and - return the first cpu from *srcp1 & *srcp2 & *srcp3
+ * @srcp1: the first input
+ * @srcp2: the second input
+ * @srcp3: the third input
+ *
+ * Return: >= nr_cpu_ids if no cpus set in all.
+ */
+static __always_inline
+unsigned int cpumask_first_and_and(const struct cpumask *srcp1,
+ const struct cpumask *srcp2,
+ const struct cpumask *srcp3)
{
- return n+1;
+ return find_first_and_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2),
+ cpumask_bits(srcp3), small_cpumask_bits);
}
-static inline unsigned int cpumask_next_and(int n,
- const struct cpumask *srcp,
- const struct cpumask *andp)
+/**
+ * cpumask_last - get the last CPU in a cpumask
+ * @srcp: - the cpumask pointer
+ *
+ * Return: >= nr_cpumask_bits if no CPUs set.
+ */
+static __always_inline unsigned int cpumask_last(const struct cpumask *srcp)
{
- return n+1;
+ return find_last_bit(cpumask_bits(srcp), small_cpumask_bits);
}
-static inline unsigned int cpumask_next_wrap(int n, const struct cpumask *mask,
- int start, bool wrap)
+/**
+ * cpumask_next - get the next cpu in a cpumask
+ * @n: the cpu prior to the place to search (i.e. return will be > @n)
+ * @srcp: the cpumask pointer
+ *
+ * Return: >= nr_cpu_ids if no further cpus set.
+ */
+static __always_inline
+unsigned int cpumask_next(int n, const struct cpumask *srcp)
{
- /* cpu0 unless stop condition, wrap and at cpu0, then nr_cpumask_bits */
- return (wrap && n == 0);
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpumask_check(n);
+ return find_next_bit(cpumask_bits(srcp), small_cpumask_bits, n + 1);
}
-/* cpu must be a valid cpu, ie 0, so there's no other choice. */
-static inline unsigned int cpumask_any_but(const struct cpumask *mask,
- unsigned int cpu)
+/**
+ * cpumask_next_zero - get the next unset cpu in a cpumask
+ * @n: the cpu prior to the place to search (i.e. return will be > @n)
+ * @srcp: the cpumask pointer
+ *
+ * Return: >= nr_cpu_ids if no further cpus unset.
+ */
+static __always_inline
+unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
{
- return 1;
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpumask_check(n);
+ return find_next_zero_bit(cpumask_bits(srcp), small_cpumask_bits, n+1);
}
-static inline unsigned int cpumask_local_spread(unsigned int i, int node)
+#if NR_CPUS == 1
+/* Uniprocessor: there is only one valid CPU */
+static __always_inline
+unsigned int cpumask_local_spread(unsigned int i, int node)
{
return 0;
}
-static inline int cpumask_any_and_distribute(const struct cpumask *src1p,
- const struct cpumask *src2p) {
- return cpumask_next_and(-1, src1p, src2p);
+static __always_inline
+unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ return cpumask_first_and(src1p, src2p);
}
-static inline int cpumask_any_distribute(const struct cpumask *srcp)
+static __always_inline
+unsigned int cpumask_any_distribute(const struct cpumask *srcp)
{
return cpumask_first(srcp);
}
-
-#define for_each_cpu(cpu, mask) \
- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
-#define for_each_cpu_not(cpu, mask) \
- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
-#define for_each_cpu_wrap(cpu, mask, start) \
- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start))
-#define for_each_cpu_and(cpu, mask1, mask2) \
- for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask1, (void)mask2)
#else
+unsigned int cpumask_local_spread(unsigned int i, int node);
+unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
+ const struct cpumask *src2p);
+unsigned int cpumask_any_distribute(const struct cpumask *srcp);
+#endif /* NR_CPUS */
+
/**
- * cpumask_first - get the first cpu in a cpumask
- * @srcp: the cpumask pointer
+ * cpumask_next_and - get the next cpu in *src1p & *src2p
+ * @n: the cpu prior to the place to search (i.e. return will be > @n)
+ * @src1p: the first cpumask pointer
+ * @src2p: the second cpumask pointer
*
- * Returns >= nr_cpu_ids if no cpus set.
+ * Return: >= nr_cpu_ids if no further cpus set in both.
*/
-static inline unsigned int cpumask_first(const struct cpumask *srcp)
+static __always_inline
+unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
- return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits);
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpumask_check(n);
+ return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
+ small_cpumask_bits, n + 1);
}
/**
- * cpumask_last - get the last CPU in a cpumask
- * @srcp: - the cpumask pointer
+ * cpumask_next_andnot - get the next cpu in *src1p & ~*src2p
+ * @n: the cpu prior to the place to search (i.e. return will be > @n)
+ * @src1p: the first cpumask pointer
+ * @src2p: the second cpumask pointer
*
- * Returns >= nr_cpumask_bits if no CPUs set.
+ * Return: >= nr_cpu_ids if no further cpus set in both.
*/
-static inline unsigned int cpumask_last(const struct cpumask *srcp)
+static __always_inline
+unsigned int cpumask_next_andnot(int n, const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
- return find_last_bit(cpumask_bits(srcp), nr_cpumask_bits);
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpumask_check(n);
+ return find_next_andnot_bit(cpumask_bits(src1p), cpumask_bits(src2p),
+ small_cpumask_bits, n + 1);
}
-unsigned int __pure cpumask_next(int n, const struct cpumask *srcp);
+/**
+ * cpumask_next_and_wrap - get the next cpu in *src1p & *src2p, starting from
+ * @n+1. If nothing found, wrap around and start from
+ * the beginning
+ * @n: the cpu prior to the place to search (i.e. search starts from @n+1)
+ * @src1p: the first cpumask pointer
+ * @src2p: the second cpumask pointer
+ *
+ * Return: next set bit, wrapped if needed, or >= nr_cpu_ids if @src1p & @src2p is empty.
+ */
+static __always_inline
+unsigned int cpumask_next_and_wrap(int n, const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpumask_check(n);
+ return find_next_and_bit_wrap(cpumask_bits(src1p), cpumask_bits(src2p),
+ small_cpumask_bits, n + 1);
+}
/**
- * cpumask_next_zero - get the next unset cpu in a cpumask
- * @n: the cpu prior to the place to search (ie. return will be > @n)
- * @srcp: the cpumask pointer
+ * cpumask_next_wrap - get the next cpu in *src, starting from @n+1. If nothing
+ * found, wrap around and start from the beginning
+ * @n: the cpu prior to the place to search (i.e. search starts from @n+1)
+ * @src: cpumask pointer
*
- * Returns >= nr_cpu_ids if no further cpus unset.
+ * Return: next set bit, wrapped if needed, or >= nr_cpu_ids if @src is empty.
*/
-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
+static __always_inline
+unsigned int cpumask_next_wrap(int n, const struct cpumask *src)
{
/* -1 is a legal arg here. */
if (n != -1)
cpumask_check(n);
- return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
+ return find_next_bit_wrap(cpumask_bits(src), small_cpumask_bits, n + 1);
}
-int __pure cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
-int __pure cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
-unsigned int cpumask_local_spread(unsigned int i, int node);
-int cpumask_any_and_distribute(const struct cpumask *src1p,
- const struct cpumask *src2p);
-int cpumask_any_distribute(const struct cpumask *srcp);
+/**
+ * cpumask_random - get random cpu in *src.
+ * @src: cpumask pointer
+ *
+ * Return: random set bit, or >= nr_cpu_ids if @src is empty.
+ */
+static __always_inline
+unsigned int cpumask_random(const struct cpumask *src)
+{
+ return find_random_bit(cpumask_bits(src), nr_cpu_ids);
+}
/**
* for_each_cpu - iterate over every cpu in a mask
@@ -238,58 +377,209 @@ int cpumask_any_distribute(const struct cpumask *srcp);
* After the loop, cpu is >= nr_cpu_ids.
*/
#define for_each_cpu(cpu, mask) \
- for ((cpu) = -1; \
- (cpu) = cpumask_next((cpu), (mask)), \
- (cpu) < nr_cpu_ids;)
+ for_each_set_bit(cpu, cpumask_bits(mask), small_cpumask_bits)
/**
- * for_each_cpu_not - iterate over every cpu in a complemented mask
+ * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
* @cpu: the (optionally unsigned) integer iterator
* @mask: the cpumask pointer
+ * @start: the start location
+ *
+ * The implementation does not assume any bit in @mask is set (including @start).
*
* After the loop, cpu is >= nr_cpu_ids.
*/
-#define for_each_cpu_not(cpu, mask) \
- for ((cpu) = -1; \
- (cpu) = cpumask_next_zero((cpu), (mask)), \
- (cpu) < nr_cpu_ids;)
+#define for_each_cpu_wrap(cpu, mask, start) \
+ for_each_set_bit_wrap(cpu, cpumask_bits(mask), small_cpumask_bits, start)
-extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
+/**
+ * for_each_cpu_and - iterate over every cpu in both masks
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask1: the first cpumask pointer
+ * @mask2: the second cpumask pointer
+ *
+ * This saves a temporary CPU mask in many places. It is equivalent to:
+ * struct cpumask tmp;
+ * cpumask_and(&tmp, &mask1, &mask2);
+ * for_each_cpu(cpu, &tmp)
+ * ...
+ *
+ * After the loop, cpu is >= nr_cpu_ids.
+ */
+#define for_each_cpu_and(cpu, mask1, mask2) \
+ for_each_and_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits)
/**
- * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
+ * for_each_cpu_andnot - iterate over every cpu present in one mask, excluding
+ * those present in another.
* @cpu: the (optionally unsigned) integer iterator
- * @mask: the cpumask poiter
- * @start: the start location
+ * @mask1: the first cpumask pointer
+ * @mask2: the second cpumask pointer
*
- * The implementation does not assume any bit in @mask is set (including @start).
+ * This saves a temporary CPU mask in many places. It is equivalent to:
+ * struct cpumask tmp;
+ * cpumask_andnot(&tmp, &mask1, &mask2);
+ * for_each_cpu(cpu, &tmp)
+ * ...
*
* After the loop, cpu is >= nr_cpu_ids.
*/
-#define for_each_cpu_wrap(cpu, mask, start) \
- for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false); \
- (cpu) < nr_cpumask_bits; \
- (cpu) = cpumask_next_wrap((cpu), (mask), (start), true))
+#define for_each_cpu_andnot(cpu, mask1, mask2) \
+ for_each_andnot_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits)
/**
- * for_each_cpu_and - iterate over every cpu in both masks
+ * for_each_cpu_or - iterate over every cpu present in either mask
* @cpu: the (optionally unsigned) integer iterator
* @mask1: the first cpumask pointer
* @mask2: the second cpumask pointer
*
* This saves a temporary CPU mask in many places. It is equivalent to:
* struct cpumask tmp;
- * cpumask_and(&tmp, &mask1, &mask2);
+ * cpumask_or(&tmp, &mask1, &mask2);
* for_each_cpu(cpu, &tmp)
* ...
*
* After the loop, cpu is >= nr_cpu_ids.
*/
-#define for_each_cpu_and(cpu, mask1, mask2) \
- for ((cpu) = -1; \
- (cpu) = cpumask_next_and((cpu), (mask1), (mask2)), \
- (cpu) < nr_cpu_ids;)
-#endif /* SMP */
+#define for_each_cpu_or(cpu, mask1, mask2) \
+ for_each_or_bit(cpu, cpumask_bits(mask1), cpumask_bits(mask2), small_cpumask_bits)
+
+/**
+ * for_each_cpu_from - iterate over CPUs present in @mask, from @cpu to the end of @mask.
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask: the cpumask pointer
+ *
+ * After the loop, cpu is >= nr_cpu_ids.
+ */
+#define for_each_cpu_from(cpu, mask) \
+ for_each_set_bit_from(cpu, cpumask_bits(mask), small_cpumask_bits)
+
+/**
+ * cpumask_any_but - return an arbitrary cpu in a cpumask, but not this one.
+ * @mask: the cpumask to search
+ * @cpu: the cpu to ignore.
+ *
+ * Often used to find any cpu but smp_processor_id() in a mask.
+ * If @cpu == -1, the function is equivalent to cpumask_any().
+ * Return: >= nr_cpu_ids if no cpus set.
+ */
+static __always_inline
+unsigned int cpumask_any_but(const struct cpumask *mask, int cpu)
+{
+ unsigned int i;
+
+ /* -1 is a legal arg here. */
+ if (cpu != -1)
+ cpumask_check(cpu);
+
+ for_each_cpu(i, mask)
+ if (i != cpu)
+ break;
+ return i;
+}
+
+/**
+ * cpumask_any_and_but - pick an arbitrary cpu from *mask1 & *mask2, but not this one.
+ * @mask1: the first input cpumask
+ * @mask2: the second input cpumask
+ * @cpu: the cpu to ignore
+ *
+ * If @cpu == -1, the function is equivalent to cpumask_any_and().
+ * Returns >= nr_cpu_ids if no cpus set.
+ */
+static __always_inline
+unsigned int cpumask_any_and_but(const struct cpumask *mask1,
+ const struct cpumask *mask2,
+ int cpu)
+{
+ unsigned int i;
+
+ /* -1 is a legal arg here. */
+ if (cpu != -1)
+ cpumask_check(cpu);
+
+ i = cpumask_first_and(mask1, mask2);
+ if (i != cpu)
+ return i;
+
+ return cpumask_next_and(cpu, mask1, mask2);
+}
+
+/**
+ * cpumask_any_andnot_but - pick an arbitrary cpu from *mask1 & ~*mask2, but not this one.
+ * @mask1: the first input cpumask
+ * @mask2: the second input cpumask
+ * @cpu: the cpu to ignore
+ *
+ * If @cpu == -1, the function returns the first matching cpu.
+ * Returns >= nr_cpu_ids if no cpus set.
+ */
+static __always_inline
+unsigned int cpumask_any_andnot_but(const struct cpumask *mask1,
+ const struct cpumask *mask2,
+ int cpu)
+{
+ unsigned int i;
+
+ /* -1 is a legal arg here. */
+ if (cpu != -1)
+ cpumask_check(cpu);
+
+ i = cpumask_first_andnot(mask1, mask2);
+ if (i != cpu)
+ return i;
+
+ return cpumask_next_andnot(cpu, mask1, mask2);
+}
+
+/**
+ * cpumask_nth - get the Nth cpu in a cpumask
+ * @srcp: the cpumask pointer
+ * @cpu: the Nth cpu to find, starting from 0
+ *
+ * Return: >= nr_cpu_ids if such cpu doesn't exist.
+ */
+static __always_inline
+unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *srcp)
+{
+ return find_nth_bit(cpumask_bits(srcp), small_cpumask_bits, cpumask_check(cpu));
+}
+
+/**
+ * cpumask_nth_and - get the Nth cpu in 2 cpumasks
+ * @srcp1: the cpumask pointer
+ * @srcp2: the cpumask pointer
+ * @cpu: the Nth cpu to find, starting from 0
+ *
+ * Return: >= nr_cpu_ids if such cpu doesn't exist.
+ */
+static __always_inline
+unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1,
+ const struct cpumask *srcp2)
+{
+ return find_nth_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2),
+ small_cpumask_bits, cpumask_check(cpu));
+}
+
+/**
+ * cpumask_nth_and_andnot - get the Nth cpu set in 1st and 2nd cpumask, and clear in 3rd.
+ * @srcp1: the cpumask pointer
+ * @srcp2: the cpumask pointer
+ * @srcp3: the cpumask pointer
+ * @cpu: the Nth cpu to find, starting from 0
+ *
+ * Return: >= nr_cpu_ids if such cpu doesn't exist.
+ */
+static __always_inline
+unsigned int cpumask_nth_and_andnot(unsigned int cpu, const struct cpumask *srcp1,
+ const struct cpumask *srcp2,
+ const struct cpumask *srcp3)
+{
+ return find_nth_and_andnot_bit(cpumask_bits(srcp1),
+ cpumask_bits(srcp2),
+ cpumask_bits(srcp3),
+ small_cpumask_bits, cpumask_check(cpu));
+}
#define CPU_BITS_NONE \
{ \
@@ -306,28 +596,42 @@ extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool
* @cpu: cpu number (< nr_cpu_ids)
* @dstp: the cpumask pointer
*/
-static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
+static __always_inline
+void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
{
set_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
-static inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
+static __always_inline
+void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
{
__set_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
+/**
+ * cpumask_clear_cpus - clear cpus in a cpumask
+ * @dstp: the cpumask pointer
+ * @cpu: cpu number (< nr_cpu_ids)
+ * @ncpus: number of cpus to clear (< nr_cpu_ids)
+ */
+static __always_inline void cpumask_clear_cpus(struct cpumask *dstp,
+ unsigned int cpu, unsigned int ncpus)
+{
+ cpumask_check(cpu + ncpus - 1);
+ bitmap_clear(cpumask_bits(dstp), cpumask_check(cpu), ncpus);
+}
/**
* cpumask_clear_cpu - clear a cpu in a cpumask
* @cpu: cpu number (< nr_cpu_ids)
* @dstp: the cpumask pointer
*/
-static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
+static __always_inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
{
clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
-static inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
+static __always_inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
{
__clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
@@ -337,9 +641,10 @@ static inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
* @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer
*
- * Returns 1 if @cpu is set in @cpumask, else returns 0
+ * Return: true if @cpu is set in @cpumask, else returns false
*/
-static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
+static __always_inline
+bool cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
{
return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
}
@@ -349,11 +654,12 @@ static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
* @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer
*
- * Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0
- *
* test_and_set_bit wrapper for cpumasks.
+ *
+ * Return: true if @cpu is set in old bitmap of @cpumask, else returns false
*/
-static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
+static __always_inline
+bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
{
return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
}
@@ -363,11 +669,12 @@ static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
* @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer
*
- * Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0
- *
* test_and_clear_bit wrapper for cpumasks.
+ *
+ * Return: true if @cpu is set in old bitmap of @cpumask, else returns false
*/
-static inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
+static __always_inline
+bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
{
return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask));
}
@@ -376,8 +683,12 @@ static inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
* cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask
* @dstp: the cpumask pointer
*/
-static inline void cpumask_setall(struct cpumask *dstp)
+static __always_inline void cpumask_setall(struct cpumask *dstp)
{
+ if (small_const_nbits(small_cpumask_bits)) {
+ cpumask_bits(dstp)[0] = BITMAP_LAST_WORD_MASK(nr_cpumask_bits);
+ return;
+ }
bitmap_fill(cpumask_bits(dstp), nr_cpumask_bits);
}
@@ -385,9 +696,9 @@ static inline void cpumask_setall(struct cpumask *dstp)
* cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask
* @dstp: the cpumask pointer
*/
-static inline void cpumask_clear(struct cpumask *dstp)
+static __always_inline void cpumask_clear(struct cpumask *dstp)
{
- bitmap_zero(cpumask_bits(dstp), nr_cpumask_bits);
+ bitmap_zero(cpumask_bits(dstp), large_cpumask_bits);
}
/**
@@ -396,14 +707,14 @@ static inline void cpumask_clear(struct cpumask *dstp)
* @src1p: the first input
* @src2p: the second input
*
- * If *@dstp is empty, returns 0, else returns 1
+ * Return: false if *@dstp is empty, else returns true
*/
-static inline int cpumask_and(struct cpumask *dstp,
- const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+bool cpumask_and(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
- cpumask_bits(src2p), nr_cpumask_bits);
+ cpumask_bits(src2p), small_cpumask_bits);
}
/**
@@ -412,65 +723,72 @@ static inline int cpumask_and(struct cpumask *dstp,
* @src1p: the first input
* @src2p: the second input
*/
-static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p),
- cpumask_bits(src2p), nr_cpumask_bits);
+ cpumask_bits(src2p), small_cpumask_bits);
}
/**
- * cpumask_xor - *dstp = *src1p ^ *src2p
+ * cpumask_weighted_or - *dstp = *src1p | *src2p and return the weight of the result
* @dstp: the cpumask result
* @src1p: the first input
* @src2p: the second input
+ *
+ * Return: The number of bits set in the resulting cpumask @dstp
*/
-static inline void cpumask_xor(struct cpumask *dstp,
- const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+unsigned int cpumask_weighted_or(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
- bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p),
- cpumask_bits(src2p), nr_cpumask_bits);
+ return bitmap_weighted_or(cpumask_bits(dstp), cpumask_bits(src1p),
+ cpumask_bits(src2p), small_cpumask_bits);
}
/**
- * cpumask_andnot - *dstp = *src1p & ~*src2p
+ * cpumask_xor - *dstp = *src1p ^ *src2p
* @dstp: the cpumask result
* @src1p: the first input
* @src2p: the second input
- *
- * If *@dstp is empty, returns 0, else returns 1
*/
-static inline int cpumask_andnot(struct cpumask *dstp,
- const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+void cpumask_xor(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
- return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
- cpumask_bits(src2p), nr_cpumask_bits);
+ bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p),
+ cpumask_bits(src2p), small_cpumask_bits);
}
/**
- * cpumask_complement - *dstp = ~*srcp
+ * cpumask_andnot - *dstp = *src1p & ~*src2p
* @dstp: the cpumask result
- * @srcp: the input to invert
+ * @src1p: the first input
+ * @src2p: the second input
+ *
+ * Return: false if *@dstp is empty, else returns true
*/
-static inline void cpumask_complement(struct cpumask *dstp,
- const struct cpumask *srcp)
+static __always_inline
+bool cpumask_andnot(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
{
- bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp),
- nr_cpumask_bits);
+ return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
+ cpumask_bits(src2p), small_cpumask_bits);
}
/**
* cpumask_equal - *src1p == *src2p
* @src1p: the first input
* @src2p: the second input
+ *
+ * Return: true if the cpumasks are equal, false if not
*/
-static inline bool cpumask_equal(const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+bool cpumask_equal(const struct cpumask *src1p, const struct cpumask *src2p)
{
return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p),
- nr_cpumask_bits);
+ small_cpumask_bits);
}
/**
@@ -478,25 +796,31 @@ static inline bool cpumask_equal(const struct cpumask *src1p,
* @src1p: the first input
* @src2p: the second input
* @src3p: the third input
+ *
+ * Return: true if first cpumask ORed with second cpumask == third cpumask,
+ * otherwise false
*/
-static inline bool cpumask_or_equal(const struct cpumask *src1p,
- const struct cpumask *src2p,
- const struct cpumask *src3p)
+static __always_inline
+bool cpumask_or_equal(const struct cpumask *src1p, const struct cpumask *src2p,
+ const struct cpumask *src3p)
{
return bitmap_or_equal(cpumask_bits(src1p), cpumask_bits(src2p),
- cpumask_bits(src3p), nr_cpumask_bits);
+ cpumask_bits(src3p), small_cpumask_bits);
}
/**
* cpumask_intersects - (*src1p & *src2p) != 0
* @src1p: the first input
* @src2p: the second input
+ *
+ * Return: true if first cpumask ANDed with second cpumask is non-empty,
+ * otherwise false
*/
-static inline bool cpumask_intersects(const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+bool cpumask_intersects(const struct cpumask *src1p, const struct cpumask *src2p)
{
return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p),
- nr_cpumask_bits);
+ small_cpumask_bits);
}
/**
@@ -504,29 +828,33 @@ static inline bool cpumask_intersects(const struct cpumask *src1p,
* @src1p: the first input
* @src2p: the second input
*
- * Returns 1 if *@src1p is a subset of *@src2p, else returns 0
+ * Return: true if *@src1p is a subset of *@src2p, else returns false
*/
-static inline int cpumask_subset(const struct cpumask *src1p,
- const struct cpumask *src2p)
+static __always_inline
+bool cpumask_subset(const struct cpumask *src1p, const struct cpumask *src2p)
{
return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p),
- nr_cpumask_bits);
+ small_cpumask_bits);
}
/**
* cpumask_empty - *srcp == 0
* @srcp: the cpumask to that all cpus < nr_cpu_ids are clear.
+ *
+ * Return: true if srcp is empty (has no bits set), else false
*/
-static inline bool cpumask_empty(const struct cpumask *srcp)
+static __always_inline bool cpumask_empty(const struct cpumask *srcp)
{
- return bitmap_empty(cpumask_bits(srcp), nr_cpumask_bits);
+ return bitmap_empty(cpumask_bits(srcp), small_cpumask_bits);
}
/**
* cpumask_full - *srcp == 0xFFFFFFFF...
* @srcp: the cpumask to that all cpus < nr_cpu_ids are set.
+ *
+ * Return: true if srcp is full (has all bits set), else false
*/
-static inline bool cpumask_full(const struct cpumask *srcp)
+static __always_inline bool cpumask_full(const struct cpumask *srcp)
{
return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits);
}
@@ -534,10 +862,39 @@ static inline bool cpumask_full(const struct cpumask *srcp)
/**
* cpumask_weight - Count of bits in *srcp
* @srcp: the cpumask to count bits (< nr_cpu_ids) in.
+ *
+ * Return: count of bits set in *srcp
*/
-static inline unsigned int cpumask_weight(const struct cpumask *srcp)
+static __always_inline unsigned int cpumask_weight(const struct cpumask *srcp)
{
- return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
+ return bitmap_weight(cpumask_bits(srcp), small_cpumask_bits);
+}
+
+/**
+ * cpumask_weight_and - Count of bits in (*srcp1 & *srcp2)
+ * @srcp1: the cpumask to count bits (< nr_cpu_ids) in.
+ * @srcp2: the cpumask to count bits (< nr_cpu_ids) in.
+ *
+ * Return: count of bits set in both *srcp1 and *srcp2
+ */
+static __always_inline
+unsigned int cpumask_weight_and(const struct cpumask *srcp1, const struct cpumask *srcp2)
+{
+ return bitmap_weight_and(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
+}
+
+/**
+ * cpumask_weight_andnot - Count of bits in (*srcp1 & ~*srcp2)
+ * @srcp1: the cpumask to count bits (< nr_cpu_ids) in.
+ * @srcp2: the cpumask to count bits (< nr_cpu_ids) in.
+ *
+ * Return: count of bits set in both *srcp1 and *srcp2
+ */
+static __always_inline
+unsigned int cpumask_weight_andnot(const struct cpumask *srcp1,
+ const struct cpumask *srcp2)
+{
+ return bitmap_weight_andnot(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits);
}
/**
@@ -546,11 +903,11 @@ static inline unsigned int cpumask_weight(const struct cpumask *srcp)
* @srcp: the input to shift
* @n: the number of bits to shift by
*/
-static inline void cpumask_shift_right(struct cpumask *dstp,
- const struct cpumask *srcp, int n)
+static __always_inline
+void cpumask_shift_right(struct cpumask *dstp, const struct cpumask *srcp, int n)
{
bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n,
- nr_cpumask_bits);
+ small_cpumask_bits);
}
/**
@@ -559,8 +916,8 @@ static inline void cpumask_shift_right(struct cpumask *dstp,
* @srcp: the input to shift
* @n: the number of bits to shift by
*/
-static inline void cpumask_shift_left(struct cpumask *dstp,
- const struct cpumask *srcp, int n)
+static __always_inline
+void cpumask_shift_left(struct cpumask *dstp, const struct cpumask *srcp, int n)
{
bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n,
nr_cpumask_bits);
@@ -571,35 +928,26 @@ static inline void cpumask_shift_left(struct cpumask *dstp,
* @dstp: the result
* @srcp: the input cpumask
*/
-static inline void cpumask_copy(struct cpumask *dstp,
- const struct cpumask *srcp)
+static __always_inline
+void cpumask_copy(struct cpumask *dstp, const struct cpumask *srcp)
{
- bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits);
+ bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), large_cpumask_bits);
}
/**
- * cpumask_any - pick a "random" cpu from *srcp
+ * cpumask_any - pick an arbitrary cpu from *srcp
* @srcp: the input cpumask
*
- * Returns >= nr_cpu_ids if no cpus set.
+ * Return: >= nr_cpu_ids if no cpus set.
*/
#define cpumask_any(srcp) cpumask_first(srcp)
/**
- * cpumask_first_and - return the first cpu from *srcp1 & *srcp2
- * @src1p: the first input
- * @src2p: the second input
- *
- * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
- */
-#define cpumask_first_and(src1p, src2p) cpumask_next_and(-1, (src1p), (src2p))
-
-/**
- * cpumask_any_and - pick a "random" cpu from *mask1 & *mask2
+ * cpumask_any_and - pick an arbitrary cpu from *mask1 & *mask2
* @mask1: the first input cpumask
* @mask2: the second input cpumask
*
- * Returns >= nr_cpu_ids if no cpus set.
+ * Return: >= nr_cpu_ids if no cpus set.
*/
#define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2))
@@ -615,10 +963,10 @@ static inline void cpumask_copy(struct cpumask *dstp,
* @len: the length of the buffer
* @dstp: the cpumask to set.
*
- * Returns -errno, or 0 for success.
+ * Return: -errno, or 0 for success.
*/
-static inline int cpumask_parse_user(const char __user *buf, int len,
- struct cpumask *dstp)
+static __always_inline
+int cpumask_parse_user(const char __user *buf, int len, struct cpumask *dstp)
{
return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
}
@@ -629,10 +977,10 @@ static inline int cpumask_parse_user(const char __user *buf, int len,
* @len: the length of the buffer
* @dstp: the cpumask to set.
*
- * Returns -errno, or 0 for success.
+ * Return: -errno, or 0 for success.
*/
-static inline int cpumask_parselist_user(const char __user *buf, int len,
- struct cpumask *dstp)
+static __always_inline
+int cpumask_parselist_user(const char __user *buf, int len, struct cpumask *dstp)
{
return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
nr_cpumask_bits);
@@ -643,9 +991,9 @@ static inline int cpumask_parselist_user(const char __user *buf, int len,
* @buf: the buffer to extract from
* @dstp: the cpumask to set.
*
- * Returns -errno, or 0 for success.
+ * Return: -errno, or 0 for success.
*/
-static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
+static __always_inline int cpumask_parse(const char *buf, struct cpumask *dstp)
{
return bitmap_parse(buf, UINT_MAX, cpumask_bits(dstp), nr_cpumask_bits);
}
@@ -655,128 +1003,120 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
* @buf: the buffer to extract from
* @dstp: the cpumask to set.
*
- * Returns -errno, or 0 for success.
+ * Return: -errno, or 0 for success.
*/
-static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
+static __always_inline int cpulist_parse(const char *buf, struct cpumask *dstp)
{
return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
}
/**
- * cpumask_size - size to allocate for a 'struct cpumask' in bytes
+ * cpumask_size - calculate size to allocate for a 'struct cpumask' in bytes
+ *
+ * Return: size to allocate for a &struct cpumask in bytes
*/
-static inline unsigned int cpumask_size(void)
+static __always_inline unsigned int cpumask_size(void)
{
- return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long);
+ return bitmap_size(large_cpumask_bits);
}
-/*
- * cpumask_var_t: struct cpumask for stack usage.
- *
- * Oh, the wicked games we play! In order to make kernel coding a
- * little more difficult, we typedef cpumask_var_t to an array or a
- * pointer: doing &mask on an array is a noop, so it still works.
- *
- * ie.
- * cpumask_var_t tmpmask;
- * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
- * return -ENOMEM;
- *
- * ... use 'tmpmask' like a normal struct cpumask * ...
- *
- * free_cpumask_var(tmpmask);
- *
- *
- * However, one notable exception is there. alloc_cpumask_var() allocates
- * only nr_cpumask_bits bits (in the other hand, real cpumask_t always has
- * NR_CPUS bits). Therefore you don't have to dereference cpumask_var_t.
- *
- * cpumask_var_t tmpmask;
- * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
- * return -ENOMEM;
- *
- * var = *tmpmask;
- *
- * This code makes NR_CPUS length memcopy and brings to a memory corruption.
- * cpumask_copy() provide safe copy functionality.
- *
- * Note that there is another evil here: If you define a cpumask_var_t
- * as a percpu variable then the way to obtain the address of the cpumask
- * structure differently influences what this_cpu_* operation needs to be
- * used. Please use this_cpu_cpumask_var_t in those cases. The direct use
- * of this_cpu_ptr() or this_cpu_read() will lead to failures when the
- * other type of cpumask_var_t implementation is configured.
- *
- * Please also note that __cpumask_var_read_mostly can be used to declare
- * a cpumask_var_t variable itself (not its content) as read mostly.
- */
#ifdef CONFIG_CPUMASK_OFFSTACK
-typedef struct cpumask *cpumask_var_t;
#define this_cpu_cpumask_var_ptr(x) this_cpu_read(x)
#define __cpumask_var_read_mostly __read_mostly
+#define CPUMASK_VAR_NULL NULL
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
-bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
-bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
-bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
+
+static __always_inline
+bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
+{
+ return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
+}
+
+/**
+ * alloc_cpumask_var - allocate a struct cpumask
+ * @mask: pointer to cpumask_var_t where the cpumask is returned
+ * @flags: GFP_ flags
+ *
+ * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
+ * a nop returning a constant 1 (in <linux/cpumask.h>).
+ *
+ * See alloc_cpumask_var_node.
+ *
+ * Return: %true if allocation succeeded, %false if not
+ */
+static __always_inline
+bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+{
+ return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
+}
+
+static __always_inline
+bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+{
+ return alloc_cpumask_var(mask, flags | __GFP_ZERO);
+}
+
void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
void free_cpumask_var(cpumask_var_t mask);
void free_bootmem_cpumask_var(cpumask_var_t mask);
-static inline bool cpumask_available(cpumask_var_t mask)
+static __always_inline bool cpumask_available(cpumask_var_t mask)
{
return mask != NULL;
}
#else
-typedef struct cpumask cpumask_var_t[1];
#define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x)
#define __cpumask_var_read_mostly
+#define CPUMASK_VAR_NULL {}
-static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+static __always_inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return true;
}
-static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
+static __always_inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
int node)
{
return true;
}
-static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+static __always_inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
cpumask_clear(*mask);
return true;
}
-static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
+static __always_inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
int node)
{
cpumask_clear(*mask);
return true;
}
-static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
+static __always_inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
{
}
-static inline void free_cpumask_var(cpumask_var_t mask)
+static __always_inline void free_cpumask_var(cpumask_var_t mask)
{
}
-static inline void free_bootmem_cpumask_var(cpumask_var_t mask)
+static __always_inline void free_bootmem_cpumask_var(cpumask_var_t mask)
{
}
-static inline bool cpumask_available(cpumask_var_t mask)
+static __always_inline bool cpumask_available(cpumask_var_t mask)
{
return true;
}
#endif /* CONFIG_CPUMASK_OFFSTACK */
+DEFINE_FREE(free_cpumask_var, struct cpumask *, if (_T) free_cpumask_var(_T));
+
/* It's common to want to use cpu_all_mask in struct member initializers,
* so it has to refer to an address rather than a pointer. */
extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
@@ -785,60 +1125,48 @@ extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
/* First bits of cpu_bit_bitmap are in fact unset. */
#define cpu_none_mask to_cpumask(cpu_bit_bitmap[0])
+#if NR_CPUS == 1
+/* Uniprocessor: the possible/online/present masks are always "1" */
+#define for_each_possible_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
+#define for_each_online_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
+#define for_each_present_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
+
+#define for_each_possible_cpu_wrap(cpu, start) \
+ for ((void)(start), (cpu) = 0; (cpu) < 1; (cpu)++)
+#define for_each_online_cpu_wrap(cpu, start) \
+ for ((void)(start), (cpu) = 0; (cpu) < 1; (cpu)++)
+#else
#define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask)
#define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask)
+#define for_each_enabled_cpu(cpu) for_each_cpu((cpu), cpu_enabled_mask)
#define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask)
+#define for_each_possible_cpu_wrap(cpu, start) \
+ for_each_cpu_wrap((cpu), cpu_possible_mask, (start))
+#define for_each_online_cpu_wrap(cpu, start) \
+ for_each_cpu_wrap((cpu), cpu_online_mask, (start))
+#endif
+
/* Wrappers for arch boot code to manipulate normally-constant masks */
void init_cpu_present(const struct cpumask *src);
void init_cpu_possible(const struct cpumask *src);
-void init_cpu_online(const struct cpumask *src);
-static inline void reset_cpu_possible_mask(void)
-{
- bitmap_zero(cpumask_bits(&__cpu_possible_mask), NR_CPUS);
-}
+#define assign_cpu(cpu, mask, val) \
+ assign_bit(cpumask_check(cpu), cpumask_bits(mask), (val))
-static inline void
-set_cpu_possible(unsigned int cpu, bool possible)
-{
- if (possible)
- cpumask_set_cpu(cpu, &__cpu_possible_mask);
- else
- cpumask_clear_cpu(cpu, &__cpu_possible_mask);
-}
+#define __assign_cpu(cpu, mask, val) \
+ __assign_bit(cpumask_check(cpu), cpumask_bits(mask), (val))
-static inline void
-set_cpu_present(unsigned int cpu, bool present)
-{
- if (present)
- cpumask_set_cpu(cpu, &__cpu_present_mask);
- else
- cpumask_clear_cpu(cpu, &__cpu_present_mask);
-}
+#define set_cpu_enabled(cpu, enabled) assign_cpu((cpu), &__cpu_enabled_mask, (enabled))
+#define set_cpu_present(cpu, present) assign_cpu((cpu), &__cpu_present_mask, (present))
+#define set_cpu_active(cpu, active) assign_cpu((cpu), &__cpu_active_mask, (active))
+#define set_cpu_dying(cpu, dying) assign_cpu((cpu), &__cpu_dying_mask, (dying))
void set_cpu_online(unsigned int cpu, bool online);
-
-static inline void
-set_cpu_active(unsigned int cpu, bool active)
-{
- if (active)
- cpumask_set_cpu(cpu, &__cpu_active_mask);
- else
- cpumask_clear_cpu(cpu, &__cpu_active_mask);
-}
-
-static inline void
-set_cpu_dying(unsigned int cpu, bool dying)
-{
- if (dying)
- cpumask_set_cpu(cpu, &__cpu_dying_mask);
- else
- cpumask_clear_cpu(cpu, &__cpu_dying_mask);
-}
+void set_cpu_possible(unsigned int cpu, bool possible);
/**
- * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
+ * to_cpumask - convert a NR_CPUS bitmap to a struct cpumask *
* @bitmap: the bitmap
*
* There are a few places where cpumask_var_t isn't appropriate and
@@ -851,7 +1179,7 @@ set_cpu_dying(unsigned int cpu, bool dying)
((struct cpumask *)(1 ? (bitmap) \
: (void *)sizeof(__check_is_bitmap(bitmap))))
-static inline int __check_is_bitmap(const unsigned long *bitmap)
+static __always_inline int __check_is_bitmap(const unsigned long *bitmap)
{
return 1;
}
@@ -866,7 +1194,7 @@ static inline int __check_is_bitmap(const unsigned long *bitmap)
extern const unsigned long
cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
-static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
+static __always_inline const struct cpumask *get_cpu_mask(unsigned int cpu)
{
const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
p -= cpu / BITS_PER_LONG;
@@ -881,36 +1209,49 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
* interface gives only a momentary snapshot and is not protected against
* concurrent CPU hotplug operations unless invoked from a cpuhp_lock held
* region.
+ *
+ * Return: momentary snapshot of the number of online CPUs
*/
-static inline unsigned int num_online_cpus(void)
+static __always_inline unsigned int num_online_cpus(void)
{
- return atomic_read(&__num_online_cpus);
+ return raw_atomic_read(&__num_online_cpus);
}
-#define num_possible_cpus() cpumask_weight(cpu_possible_mask)
+
+static __always_inline unsigned int num_possible_cpus(void)
+{
+ return __num_possible_cpus;
+}
+
+#define num_enabled_cpus() cpumask_weight(cpu_enabled_mask)
#define num_present_cpus() cpumask_weight(cpu_present_mask)
#define num_active_cpus() cpumask_weight(cpu_active_mask)
-static inline bool cpu_online(unsigned int cpu)
+static __always_inline bool cpu_online(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_online_mask);
}
-static inline bool cpu_possible(unsigned int cpu)
+static __always_inline bool cpu_enabled(unsigned int cpu)
+{
+ return cpumask_test_cpu(cpu, cpu_enabled_mask);
+}
+
+static __always_inline bool cpu_possible(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_possible_mask);
}
-static inline bool cpu_present(unsigned int cpu)
+static __always_inline bool cpu_present(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_present_mask);
}
-static inline bool cpu_active(unsigned int cpu)
+static __always_inline bool cpu_active(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_active_mask);
}
-static inline bool cpu_dying(unsigned int cpu)
+static __always_inline bool cpu_dying(unsigned int cpu)
{
return cpumask_test_cpu(cpu, cpu_dying_mask);
}
@@ -919,30 +1260,36 @@ static inline bool cpu_dying(unsigned int cpu)
#define num_online_cpus() 1U
#define num_possible_cpus() 1U
+#define num_enabled_cpus() 1U
#define num_present_cpus() 1U
#define num_active_cpus() 1U
-static inline bool cpu_online(unsigned int cpu)
+static __always_inline bool cpu_online(unsigned int cpu)
+{
+ return cpu == 0;
+}
+
+static __always_inline bool cpu_possible(unsigned int cpu)
{
return cpu == 0;
}
-static inline bool cpu_possible(unsigned int cpu)
+static __always_inline bool cpu_enabled(unsigned int cpu)
{
return cpu == 0;
}
-static inline bool cpu_present(unsigned int cpu)
+static __always_inline bool cpu_present(unsigned int cpu)
{
return cpu == 0;
}
-static inline bool cpu_active(unsigned int cpu)
+static __always_inline bool cpu_active(unsigned int cpu)
{
return cpu == 0;
}
-static inline bool cpu_dying(unsigned int cpu)
+static __always_inline bool cpu_dying(unsigned int cpu)
{
return false;
}
@@ -973,16 +1320,62 @@ static inline bool cpu_dying(unsigned int cpu)
* @mask: the cpumask to copy
* @buf: the buffer to copy into
*
- * Returns the length of the (null-terminated) @buf string, zero if
+ * Return: the length of the (null-terminated) @buf string, zero if
* nothing is copied.
*/
-static inline ssize_t
+static __always_inline ssize_t
cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
{
return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask),
nr_cpu_ids);
}
+/**
+ * cpumap_print_bitmask_to_buf - copies the cpumask into the buffer as
+ * hex values of cpumask
+ *
+ * @buf: the buffer to copy into
+ * @mask: the cpumask to copy
+ * @off: in the string from which we are copying, we copy to @buf
+ * @count: the maximum number of bytes to print
+ *
+ * The function prints the cpumask into the buffer as hex values of
+ * cpumask; Typically used by bin_attribute to export cpumask bitmask
+ * ABI.
+ *
+ * Return: the length of how many bytes have been copied, excluding
+ * terminating '\0'.
+ */
+static __always_inline
+ssize_t cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
+ loff_t off, size_t count)
+{
+ return bitmap_print_bitmask_to_buf(buf, cpumask_bits(mask),
+ nr_cpu_ids, off, count) - 1;
+}
+
+/**
+ * cpumap_print_list_to_buf - copies the cpumask into the buffer as
+ * comma-separated list of cpus
+ * @buf: the buffer to copy into
+ * @mask: the cpumask to copy
+ * @off: in the string from which we are copying, we copy to @buf
+ * @count: the maximum number of bytes to print
+ *
+ * Everything is same with the above cpumap_print_bitmask_to_buf()
+ * except the print format.
+ *
+ * Return: the length of how many bytes have been copied, excluding
+ * terminating '\0'.
+ */
+static __always_inline
+ssize_t cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
+ loff_t off, size_t count)
+{
+ return bitmap_print_list_to_buf(buf, cpumask_bits(mask),
+ nr_cpu_ids, off, count) - 1;
+}
+
#if NR_CPUS <= BITS_PER_LONG
#define CPU_MASK_ALL \
(cpumask_t) { { \
@@ -1006,4 +1399,23 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
[0] = 1UL \
} }
+/*
+ * Provide a valid theoretical max size for cpumap and cpulist sysfs files
+ * to avoid breaking userspace which may allocate a buffer based on the size
+ * reported by e.g. fstat.
+ *
+ * for cpumap NR_CPUS * 9/32 - 1 should be an exact length.
+ *
+ * For cpulist 7 is (ceil(log10(NR_CPUS)) + 1) allowing for NR_CPUS to be up
+ * to 2 orders of magnitude larger than 8192. And then we divide by 2 to
+ * cover a worst-case of every other cpu being on one of two nodes for a
+ * very large NR_CPUS.
+ *
+ * Use PAGE_SIZE as a minimum for smaller configurations while avoiding
+ * unsigned comparison to -1.
+ */
+#define CPUMAP_FILE_MAX_BYTES (((NR_CPUS * 9)/32 > PAGE_SIZE) \
+ ? (NR_CPUS * 9)/32 - 1 : PAGE_SIZE)
+#define CPULIST_FILE_MAX_BYTES (((NR_CPUS * 7)/2 > PAGE_SIZE) ? (NR_CPUS * 7)/2 : PAGE_SIZE)
+
#endif /* __LINUX_CPUMASK_H */
diff --git a/include/linux/cpumask_api.h b/include/linux/cpumask_api.h
new file mode 100644
index 000000000000..83bd3ebe82b0
--- /dev/null
+++ b/include/linux/cpumask_api.h
@@ -0,0 +1 @@
+#include <linux/cpumask.h>
diff --git a/include/linux/cpumask_types.h b/include/linux/cpumask_types.h
new file mode 100644
index 000000000000..461ed1b6bcdb
--- /dev/null
+++ b/include/linux/cpumask_types.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_CPUMASK_TYPES_H
+#define __LINUX_CPUMASK_TYPES_H
+
+#include <linux/bitops.h>
+#include <linux/threads.h>
+
+/* Don't assign or return these: may not be this big! */
+typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
+
+/**
+ * cpumask_bits - get the bits in a cpumask
+ * @maskp: the struct cpumask *
+ *
+ * You should only assume nr_cpu_ids bits of this mask are valid. This is
+ * a macro so it's const-correct.
+ */
+#define cpumask_bits(maskp) ((maskp)->bits)
+
+/*
+ * cpumask_var_t: struct cpumask for stack usage.
+ *
+ * Oh, the wicked games we play! In order to make kernel coding a
+ * little more difficult, we typedef cpumask_var_t to an array or a
+ * pointer: doing &mask on an array is a noop, so it still works.
+ *
+ * i.e.
+ * cpumask_var_t tmpmask;
+ * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
+ * return -ENOMEM;
+ *
+ * ... use 'tmpmask' like a normal struct cpumask * ...
+ *
+ * free_cpumask_var(tmpmask);
+ *
+ *
+ * However, one notable exception is there. alloc_cpumask_var() allocates
+ * only nr_cpumask_bits bits (in the other hand, real cpumask_t always has
+ * NR_CPUS bits). Therefore you don't have to dereference cpumask_var_t.
+ *
+ * cpumask_var_t tmpmask;
+ * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
+ * return -ENOMEM;
+ *
+ * var = *tmpmask;
+ *
+ * This code makes NR_CPUS length memcopy and brings to a memory corruption.
+ * cpumask_copy() provide safe copy functionality.
+ *
+ * Note that there is another evil here: If you define a cpumask_var_t
+ * as a percpu variable then the way to obtain the address of the cpumask
+ * structure differently influences what this_cpu_* operation needs to be
+ * used. Please use this_cpu_cpumask_var_t in those cases. The direct use
+ * of this_cpu_ptr() or this_cpu_read() will lead to failures when the
+ * other type of cpumask_var_t implementation is configured.
+ *
+ * Please also note that __cpumask_var_read_mostly can be used to declare
+ * a cpumask_var_t variable itself (not its content) as read mostly.
+ */
+#ifdef CONFIG_CPUMASK_OFFSTACK
+typedef struct cpumask *cpumask_var_t;
+#else
+typedef struct cpumask cpumask_var_t[1];
+#endif /* CONFIG_CPUMASK_OFFSTACK */
+
+#endif /* __LINUX_CPUMASK_TYPES_H */
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 04c20de66afc..a98d3330385c 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -15,6 +15,7 @@
#include <linux/cpumask.h>
#include <linux/nodemask.h>
#include <linux/mm.h>
+#include <linux/mmu_context.h>
#include <linux/jump_label.h>
#ifdef CONFIG_CPUSETS
@@ -33,6 +34,8 @@
*/
extern struct static_key_false cpusets_pre_enable_key;
extern struct static_key_false cpusets_enabled_key;
+extern struct static_key_false cpusets_insane_config_key;
+
static inline bool cpusets_enabled(void)
{
return static_branch_unlikely(&cpusets_enabled_key);
@@ -50,32 +53,41 @@ static inline void cpuset_dec(void)
static_branch_dec_cpuslocked(&cpusets_pre_enable_key);
}
+/*
+ * This will get enabled whenever a cpuset configuration is considered
+ * unsupportable in general. E.g. movable only node which cannot satisfy
+ * any non movable allocations (see update_nodemask). Page allocator
+ * needs to make additional checks for those configurations and this
+ * check is meant to guard those checks without any overhead for sane
+ * configurations.
+ */
+static inline bool cpusets_insane_config(void)
+{
+ return static_branch_unlikely(&cpusets_insane_config_key);
+}
+
extern int cpuset_init(void);
extern void cpuset_init_smp(void);
extern void cpuset_force_rebuild(void);
extern void cpuset_update_active_cpus(void);
-extern void cpuset_wait_for_hotplug(void);
-extern void cpuset_read_lock(void);
-extern void cpuset_read_unlock(void);
+extern void inc_dl_tasks_cs(struct task_struct *task);
+extern void dec_dl_tasks_cs(struct task_struct *task);
+extern void cpuset_lock(void);
+extern void cpuset_unlock(void);
+extern void cpuset_cpus_allowed_locked(struct task_struct *p, struct cpumask *mask);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
-extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
+extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
+extern bool cpuset_cpu_is_isolated(int cpu);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
#define cpuset_current_mems_allowed (current->mems_allowed)
void cpuset_init_current_mems_allowed(void);
int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
-extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
-
-static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
-{
- if (cpusets_enabled())
- return __cpuset_node_allowed(node, gfp_mask);
- return true;
-}
+extern bool cpuset_current_node_allowed(int node, gfp_t gfp_mask);
static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
- return __cpuset_node_allowed(zone_to_nid(z), gfp_mask);
+ return cpuset_current_node_allowed(zone_to_nid(z), gfp_mask);
}
static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
@@ -88,6 +100,7 @@ static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
const struct task_struct *tsk2);
+#ifdef CONFIG_CPUSETS_V1
#define cpuset_memory_pressure_bump() \
do { \
if (cpuset_memory_pressure_enabled) \
@@ -95,6 +108,9 @@ extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
} while (0)
extern int cpuset_memory_pressure_enabled;
extern void __cpuset_memory_pressure_bump(void);
+#else
+static inline void cpuset_memory_pressure_bump(void) { }
+#endif
extern void cpuset_task_status_allowed(struct seq_file *m,
struct task_struct *task);
@@ -102,23 +118,19 @@ extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *tsk);
extern int cpuset_mem_spread_node(void);
-extern int cpuset_slab_spread_node(void);
static inline int cpuset_do_page_mem_spread(void)
{
return task_spread_page(current);
}
-static inline int cpuset_do_slab_mem_spread(void)
-{
- return task_spread_slab(current);
-}
-
extern bool current_cpuset_is_being_rebound(void);
+extern void dl_rebuild_rd_accounting(void);
extern void rebuild_sched_domains(void);
extern void cpuset_print_current_mems_allowed(void);
+extern void cpuset_reset_sched_domains(void);
/*
* read_mems_allowed_begin is required when making decisions involving
@@ -162,10 +174,13 @@ static inline void set_mems_allowed(nodemask_t nodemask)
task_unlock(current);
}
+extern bool cpuset_node_allowed(struct cgroup *cgroup, int nid);
#else /* !CONFIG_CPUSETS */
static inline bool cpusets_enabled(void) { return false; }
+static inline bool cpusets_insane_config(void) { return false; }
+
static inline int cpuset_init(void) { return 0; }
static inline void cpuset_init_smp(void) {}
@@ -176,19 +191,31 @@ static inline void cpuset_update_active_cpus(void)
partition_sched_domains(1, NULL, NULL);
}
-static inline void cpuset_wait_for_hotplug(void) { }
+static inline void inc_dl_tasks_cs(struct task_struct *task) { }
+static inline void dec_dl_tasks_cs(struct task_struct *task) { }
+static inline void cpuset_lock(void) { }
+static inline void cpuset_unlock(void) { }
-static inline void cpuset_read_lock(void) { }
-static inline void cpuset_read_unlock(void) { }
+static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
+ struct cpumask *mask)
+{
+ cpumask_copy(mask, task_cpu_possible_mask(p));
+}
static inline void cpuset_cpus_allowed(struct task_struct *p,
struct cpumask *mask)
{
- cpumask_copy(mask, cpu_possible_mask);
+ cpuset_cpus_allowed_locked(p, mask);
+}
+
+static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p)
+{
+ return false;
}
-static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
+static inline bool cpuset_cpu_is_isolated(int cpu)
{
+ return false;
}
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
@@ -204,11 +231,6 @@ static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
return 1;
}
-static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
-{
- return true;
-}
-
static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
return true;
@@ -237,27 +259,26 @@ static inline int cpuset_mem_spread_node(void)
return 0;
}
-static inline int cpuset_slab_spread_node(void)
+static inline int cpuset_do_page_mem_spread(void)
{
return 0;
}
-static inline int cpuset_do_page_mem_spread(void)
+static inline bool current_cpuset_is_being_rebound(void)
{
- return 0;
+ return false;
}
-static inline int cpuset_do_slab_mem_spread(void)
+static inline void dl_rebuild_rd_accounting(void)
{
- return 0;
}
-static inline bool current_cpuset_is_being_rebound(void)
+static inline void rebuild_sched_domains(void)
{
- return false;
+ partition_sched_domains(1, NULL, NULL);
}
-static inline void rebuild_sched_domains(void)
+static inline void cpuset_reset_sched_domains(void)
{
partition_sched_domains(1, NULL, NULL);
}
@@ -280,6 +301,10 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
return false;
}
+static inline bool cpuset_node_allowed(struct cgroup *cgroup, int nid)
+{
+ return true;
+}
#endif /* !CONFIG_CPUSETS */
#endif /* _LINUX_CPUSET_H */
diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h
index 206bde8308b2..d35726d6a415 100644
--- a/include/linux/crash_core.h
+++ b/include/linux/crash_core.h
@@ -6,82 +6,94 @@
#include <linux/elfcore.h>
#include <linux/elf.h>
-#define CRASH_CORE_NOTE_NAME "CORE"
-#define CRASH_CORE_NOTE_HEAD_BYTES ALIGN(sizeof(struct elf_note), 4)
-#define CRASH_CORE_NOTE_NAME_BYTES ALIGN(sizeof(CRASH_CORE_NOTE_NAME), 4)
-#define CRASH_CORE_NOTE_DESC_BYTES ALIGN(sizeof(struct elf_prstatus), 4)
+struct kimage;
+struct crash_mem {
+ unsigned int max_nr_ranges;
+ unsigned int nr_ranges;
+ struct range ranges[] __counted_by(max_nr_ranges);
+};
+
+#ifdef CONFIG_CRASH_DUMP
+
+int crash_shrink_memory(unsigned long new_size);
+ssize_t crash_get_memory_size(void);
+
+#ifndef arch_kexec_protect_crashkres
/*
- * The per-cpu notes area is a list of notes terminated by a "NULL"
- * note header. For kdump, the code in vmcore.c runs in the context
- * of the second kernel to combine them into one note.
+ * Protection mechanism for crashkernel reserved memory after
+ * the kdump kernel is loaded.
+ *
+ * Provide an empty default implementation here -- architecture
+ * code may override this
*/
-#define CRASH_CORE_NOTE_BYTES ((CRASH_CORE_NOTE_HEAD_BYTES * 2) + \
- CRASH_CORE_NOTE_NAME_BYTES + \
- CRASH_CORE_NOTE_DESC_BYTES)
-
-#define VMCOREINFO_BYTES PAGE_SIZE
-#define VMCOREINFO_NOTE_NAME "VMCOREINFO"
-#define VMCOREINFO_NOTE_NAME_BYTES ALIGN(sizeof(VMCOREINFO_NOTE_NAME), 4)
-#define VMCOREINFO_NOTE_SIZE ((CRASH_CORE_NOTE_HEAD_BYTES * 2) + \
- VMCOREINFO_NOTE_NAME_BYTES + \
- VMCOREINFO_BYTES)
-
-typedef u32 note_buf_t[CRASH_CORE_NOTE_BYTES/4];
-
-void crash_update_vmcoreinfo_safecopy(void *ptr);
-void crash_save_vmcoreinfo(void);
-void arch_crash_save_vmcoreinfo(void);
-__printf(1, 2)
-void vmcoreinfo_append_str(const char *fmt, ...);
-phys_addr_t paddr_vmcoreinfo_note(void);
-
-#define VMCOREINFO_OSRELEASE(value) \
- vmcoreinfo_append_str("OSRELEASE=%s\n", value)
-#define VMCOREINFO_BUILD_ID(value) \
- vmcoreinfo_append_str("BUILD-ID=%s\n", value)
-#define VMCOREINFO_PAGESIZE(value) \
- vmcoreinfo_append_str("PAGESIZE=%ld\n", value)
-#define VMCOREINFO_SYMBOL(name) \
- vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name)
-#define VMCOREINFO_SYMBOL_ARRAY(name) \
- vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)name)
-#define VMCOREINFO_SIZE(name) \
- vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
- (unsigned long)sizeof(name))
-#define VMCOREINFO_STRUCT_SIZE(name) \
- vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
- (unsigned long)sizeof(struct name))
-#define VMCOREINFO_OFFSET(name, field) \
- vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #field, \
- (unsigned long)offsetof(struct name, field))
-#define VMCOREINFO_TYPE_OFFSET(name, field) \
- vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #field, \
- (unsigned long)offsetof(name, field))
-#define VMCOREINFO_LENGTH(name, value) \
- vmcoreinfo_append_str("LENGTH(%s)=%lu\n", #name, (unsigned long)value)
-#define VMCOREINFO_NUMBER(name) \
- vmcoreinfo_append_str("NUMBER(%s)=%ld\n", #name, (long)name)
-#define VMCOREINFO_CONFIG(name) \
- vmcoreinfo_append_str("CONFIG_%s=y\n", #name)
-
-extern unsigned char *vmcoreinfo_data;
-extern size_t vmcoreinfo_size;
-extern u32 *vmcoreinfo_note;
-
-/* raw contents of kernel .notes section */
-extern const void __start_notes __weak;
-extern const void __stop_notes __weak;
-
-Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type,
- void *data, size_t data_len);
-void final_note(Elf_Word *buf);
-
-int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
- unsigned long long *crash_size, unsigned long long *crash_base);
-int parse_crashkernel_high(char *cmdline, unsigned long long system_ram,
- unsigned long long *crash_size, unsigned long long *crash_base);
-int parse_crashkernel_low(char *cmdline, unsigned long long system_ram,
- unsigned long long *crash_size, unsigned long long *crash_base);
+static inline void arch_kexec_protect_crashkres(void) { }
+#endif
+
+#ifndef arch_kexec_unprotect_crashkres
+static inline void arch_kexec_unprotect_crashkres(void) { }
+#endif
+
+#ifdef CONFIG_CRASH_DM_CRYPT
+int crash_load_dm_crypt_keys(struct kimage *image);
+ssize_t dm_crypt_keys_read(char *buf, size_t count, u64 *ppos);
+#else
+static inline int crash_load_dm_crypt_keys(struct kimage *image) {return 0; }
+#endif
+
+#ifndef arch_crash_handle_hotplug_event
+static inline void arch_crash_handle_hotplug_event(struct kimage *image, void *arg) { }
+#endif
+
+int crash_check_hotplug_support(void);
+
+#ifndef arch_crash_hotplug_support
+static inline int arch_crash_hotplug_support(struct kimage *image, unsigned long kexec_flags)
+{
+ return 0;
+}
+#endif
+
+#ifndef crash_get_elfcorehdr_size
+static inline unsigned int crash_get_elfcorehdr_size(void) { return 0; }
+#endif
+
+/* Alignment required for elf header segment */
+#define ELF_CORE_HEADER_ALIGN 4096
+
+extern int crash_exclude_mem_range(struct crash_mem *mem,
+ unsigned long long mstart,
+ unsigned long long mend);
+extern int crash_prepare_elf64_headers(struct crash_mem *mem, int need_kernel_map,
+ void **addr, unsigned long *sz);
+
+struct kimage;
+struct kexec_segment;
+
+#define KEXEC_CRASH_HP_NONE 0
+#define KEXEC_CRASH_HP_ADD_CPU 1
+#define KEXEC_CRASH_HP_REMOVE_CPU 2
+#define KEXEC_CRASH_HP_ADD_MEMORY 3
+#define KEXEC_CRASH_HP_REMOVE_MEMORY 4
+#define KEXEC_CRASH_HP_INVALID_CPU -1U
+
+extern void __crash_kexec(struct pt_regs *regs);
+extern void crash_kexec(struct pt_regs *regs);
+int kexec_should_crash(struct task_struct *p);
+int kexec_crash_loaded(void);
+void crash_save_cpu(struct pt_regs *regs, int cpu);
+extern int kimage_crash_copy_vmcoreinfo(struct kimage *image);
+
+#else /* !CONFIG_CRASH_DUMP*/
+struct pt_regs;
+struct task_struct;
+struct kimage;
+static inline void __crash_kexec(struct pt_regs *regs) { }
+static inline void crash_kexec(struct pt_regs *regs) { }
+static inline int kexec_should_crash(struct task_struct *p) { return 0; }
+static inline int kexec_crash_loaded(void) { return 0; }
+static inline void crash_save_cpu(struct pt_regs *regs, int cpu) {};
+static inline int kimage_crash_copy_vmcoreinfo(struct kimage *image) { return 0; };
+#endif /* CONFIG_CRASH_DUMP*/
#endif /* LINUX_CRASH_CORE_H */
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index a5192b718dbe..dd6fc3b2133b 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -8,28 +8,30 @@
#include <linux/pgtable.h>
#include <uapi/linux/vmcore.h>
-#include <linux/pgtable.h> /* for pgprot_t */
-
-#ifdef CONFIG_CRASH_DUMP
+/* For IS_ENABLED(CONFIG_CRASH_DUMP) */
#define ELFCORE_ADDR_MAX (-1ULL)
#define ELFCORE_ADDR_ERR (-2ULL)
extern unsigned long long elfcorehdr_addr;
extern unsigned long long elfcorehdr_size;
+extern unsigned long long dm_crypt_keys_addr;
+
+#ifdef CONFIG_CRASH_DUMP
extern int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size);
extern void elfcorehdr_free(unsigned long long addr);
extern ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos);
extern ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos);
+void elfcorehdr_fill_device_ram_ptload_elf64(Elf64_Phdr *phdr,
+ unsigned long long paddr, unsigned long long size);
extern int remap_oldmem_pfn_range(struct vm_area_struct *vma,
unsigned long from, unsigned long pfn,
unsigned long size, pgprot_t prot);
-extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
- unsigned long, int);
-extern ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf,
- size_t csize, unsigned long offset,
- int userbuf);
+ssize_t copy_oldmem_page(struct iov_iter *i, unsigned long pfn, size_t csize,
+ unsigned long offset);
+ssize_t copy_oldmem_page_encrypted(struct iov_iter *iter, unsigned long pfn,
+ size_t csize, unsigned long offset);
void vmcore_cleanup(void);
@@ -52,6 +54,7 @@ void vmcore_cleanup(void);
#define vmcore_elf64_check_arch(x) (elf_check_arch(x) || vmcore_elf_check_arch_cross(x))
#endif
+#ifndef is_kdump_kernel
/*
* is_kdump_kernel() checks whether this kernel is booting after a panic of
* previous kernel or not. This is determined by checking if previous kernel
@@ -66,6 +69,7 @@ static inline bool is_kdump_kernel(void)
{
return elfcorehdr_addr != ELFCORE_ADDR_MAX;
}
+#endif
/* is_vmcore_usable() checks if the kernel is booting after a panic and
* the vmcore region is usable.
@@ -77,7 +81,8 @@ static inline bool is_kdump_kernel(void)
static inline int is_vmcore_usable(void)
{
- return is_kdump_kernel() && elfcorehdr_addr != ELFCORE_ADDR_ERR ? 1 : 0;
+ return elfcorehdr_addr != ELFCORE_ADDR_ERR &&
+ elfcorehdr_addr != ELFCORE_ADDR_MAX ? 1 : 0;
}
/* vmcore_unusable() marks the vmcore as unusable,
@@ -86,16 +91,74 @@ static inline int is_vmcore_usable(void)
static inline void vmcore_unusable(void)
{
- if (is_kdump_kernel())
- elfcorehdr_addr = ELFCORE_ADDR_ERR;
+ elfcorehdr_addr = ELFCORE_ADDR_ERR;
+}
+
+/**
+ * struct vmcore_cb - driver callbacks for /proc/vmcore handling
+ * @pfn_is_ram: check whether a PFN really is RAM and should be accessed when
+ * reading the vmcore. Will return "true" if it is RAM or if the
+ * callback cannot tell. If any callback returns "false", it's not
+ * RAM and the page must not be accessed; zeroes should be
+ * indicated in the vmcore instead. For example, a ballooned page
+ * contains no data and reading from such a page will cause high
+ * load in the hypervisor.
+ * @get_device_ram: query RAM ranges that can only be detected by device
+ * drivers, such as the virtio-mem driver, so they can be included in
+ * the crash dump on architectures that allocate the elfcore hdr in the dump
+ * ("2nd") kernel. Indicated RAM ranges may contain holes to reduce the
+ * total number of ranges; such holes can be detected using the pfn_is_ram
+ * callback just like for other RAM.
+ * @next: List head to manage registered callbacks internally; initialized by
+ * register_vmcore_cb().
+ *
+ * vmcore callbacks allow drivers managing physical memory ranges to
+ * coordinate with vmcore handling code, for example, to prevent accessing
+ * physical memory ranges that should not be accessed when reading the vmcore,
+ * although included in the vmcore header as memory ranges to dump.
+ */
+struct vmcore_cb {
+ bool (*pfn_is_ram)(struct vmcore_cb *cb, unsigned long pfn);
+ int (*get_device_ram)(struct vmcore_cb *cb, struct list_head *list);
+ struct list_head next;
+};
+extern void register_vmcore_cb(struct vmcore_cb *cb);
+extern void unregister_vmcore_cb(struct vmcore_cb *cb);
+
+struct vmcore_range {
+ struct list_head list;
+ unsigned long long paddr;
+ unsigned long long size;
+ loff_t offset;
+};
+
+/* Allocate a vmcore range and add it to the list. */
+static inline int vmcore_alloc_add_range(struct list_head *list,
+ unsigned long long paddr, unsigned long long size)
+{
+ struct vmcore_range *m = kzalloc(sizeof(*m), GFP_KERNEL);
+
+ if (!m)
+ return -ENOMEM;
+ m->paddr = paddr;
+ m->size = size;
+ list_add_tail(&m->list, list);
+ return 0;
}
-#define HAVE_OLDMEM_PFN_IS_RAM 1
-extern int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn));
-extern void unregister_oldmem_pfn_is_ram(void);
+/* Free a list of vmcore ranges. */
+static inline void vmcore_free_ranges(struct list_head *list)
+{
+ struct vmcore_range *m, *tmp;
+
+ list_for_each_entry_safe(m, tmp, list, list) {
+ list_del(&m->list);
+ kfree(m);
+ }
+}
#else /* !CONFIG_CRASH_DUMP */
-static inline bool is_kdump_kernel(void) { return 0; }
+static inline bool is_kdump_kernel(void) { return false; }
#endif /* CONFIG_CRASH_DUMP */
/* Device Dump information to be filled by drivers */
@@ -116,13 +179,11 @@ static inline int vmcore_add_device_dump(struct vmcoredd_data *data)
#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
#ifdef CONFIG_PROC_VMCORE
-ssize_t read_from_oldmem(char *buf, size_t count,
- u64 *ppos, int userbuf,
- bool encrypted);
+ssize_t read_from_oldmem(struct iov_iter *iter, size_t count,
+ u64 *ppos, bool encrypted);
#else
-static inline ssize_t read_from_oldmem(char *buf, size_t count,
- u64 *ppos, int userbuf,
- bool encrypted)
+static inline ssize_t read_from_oldmem(struct iov_iter *iter, size_t count,
+ u64 *ppos, bool encrypted)
{
return -EOPNOTSUPP;
}
diff --git a/include/linux/crash_reserve.h b/include/linux/crash_reserve.h
new file mode 100644
index 000000000000..f0dc03d94ca2
--- /dev/null
+++ b/include/linux/crash_reserve.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef LINUX_CRASH_RESERVE_H
+#define LINUX_CRASH_RESERVE_H
+
+#include <linux/linkage.h>
+#include <linux/elfcore.h>
+#include <linux/elf.h>
+#ifdef CONFIG_ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
+#include <asm/crash_reserve.h>
+#endif
+
+/* Location of a reserved region to hold the crash kernel.
+ */
+extern struct resource crashk_res;
+extern struct resource crashk_low_res;
+extern struct range crashk_cma_ranges[];
+#if defined(CONFIG_CMA) && defined(CONFIG_ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION)
+#define CRASHKERNEL_CMA
+#define CRASHKERNEL_CMA_RANGES_MAX 4
+extern int crashk_cma_cnt;
+#else
+#define crashk_cma_cnt 0
+#define CRASHKERNEL_CMA_RANGES_MAX 0
+#endif
+
+
+int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
+ unsigned long long *crash_size, unsigned long long *crash_base,
+ unsigned long long *low_size, unsigned long long *cma_size,
+ bool *high);
+
+void __init reserve_crashkernel_cma(unsigned long long cma_size);
+
+#ifdef CONFIG_ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
+#ifndef arch_add_crash_res_to_iomem
+static inline bool arch_add_crash_res_to_iomem(void)
+{
+ return true;
+}
+#endif
+#ifndef DEFAULT_CRASH_KERNEL_LOW_SIZE
+#define DEFAULT_CRASH_KERNEL_LOW_SIZE (128UL << 20)
+#endif
+#ifndef CRASH_ALIGN
+#define CRASH_ALIGN SZ_2M
+#endif
+#ifndef CRASH_ADDR_LOW_MAX
+#define CRASH_ADDR_LOW_MAX SZ_4G
+#endif
+#ifndef CRASH_ADDR_HIGH_MAX
+#define CRASH_ADDR_HIGH_MAX memblock_end_of_DRAM()
+#endif
+
+void __init reserve_crashkernel_generic(unsigned long long crash_size,
+ unsigned long long crash_base,
+ unsigned long long crash_low_size,
+ bool high);
+#else
+static inline void __init reserve_crashkernel_generic(
+ unsigned long long crash_size,
+ unsigned long long crash_base,
+ unsigned long long crash_low_size,
+ bool high)
+{}
+#endif
+#endif /* LINUX_CRASH_RESERVE_H */
diff --git a/include/linux/crc-ccitt.h b/include/linux/crc-ccitt.h
index 72c92c396bb8..cd4f420231ba 100644
--- a/include/linux/crc-ccitt.h
+++ b/include/linux/crc-ccitt.h
@@ -5,19 +5,12 @@
#include <linux/types.h>
extern u16 const crc_ccitt_table[256];
-extern u16 const crc_ccitt_false_table[256];
extern u16 crc_ccitt(u16 crc, const u8 *buffer, size_t len);
-extern u16 crc_ccitt_false(u16 crc, const u8 *buffer, size_t len);
static inline u16 crc_ccitt_byte(u16 crc, const u8 c)
{
return (crc >> 8) ^ crc_ccitt_table[(crc ^ c) & 0xff];
}
-static inline u16 crc_ccitt_false_byte(u16 crc, const u8 c)
-{
- return (crc << 8) ^ crc_ccitt_false_table[(crc >> 8) ^ c];
-}
-
#endif /* _LINUX_CRC_CCITT_H */
diff --git a/include/linux/crc-itu-t.h b/include/linux/crc-itu-t.h
index a4367051e192..2f991a427ade 100644
--- a/include/linux/crc-itu-t.h
+++ b/include/linux/crc-itu-t.h
@@ -4,7 +4,7 @@
*
* Implements the standard CRC ITU-T V.41:
* Width 16
- * Poly 0x1021 (x^16 + x^12 + x^15 + 1)
+ * Poly 0x1021 (x^16 + x^12 + x^5 + 1)
* Init 0
*/
diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h
index 6bb0c0bf357b..ecc8bc2dd7f4 100644
--- a/include/linux/crc-t10dif.h
+++ b/include/linux/crc-t10dif.h
@@ -4,13 +4,11 @@
#include <linux/types.h>
-#define CRC_T10DIF_DIGEST_SIZE 2
-#define CRC_T10DIF_BLOCK_SIZE 1
-#define CRC_T10DIF_STRING "crct10dif"
+u16 crc_t10dif_update(u16 crc, const u8 *p, size_t len);
-extern __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer,
- size_t len);
-extern __u16 crc_t10dif(unsigned char const *, size_t);
-extern __u16 crc_t10dif_update(__u16 crc, unsigned char const *, size_t);
+static inline u16 crc_t10dif(const u8 *p, size_t len)
+{
+ return crc_t10dif_update(0, p, len);
+}
#endif
diff --git a/include/linux/crc16.h b/include/linux/crc16.h
index 9fa74529b317..b861d969b161 100644
--- a/include/linux/crc16.h
+++ b/include/linux/crc16.h
@@ -15,14 +15,7 @@
#include <linux/types.h>
-extern u16 const crc16_table[256];
-
-extern u16 crc16(u16 crc, const u8 *buffer, size_t len);
-
-static inline u16 crc16_byte(u16 crc, const u8 data)
-{
- return (crc >> 8) ^ crc16_table[(crc ^ data) & 0xff];
-}
+u16 crc16(u16 crc, const u8 *p, size_t len);
#endif /* __CRC16_H */
diff --git a/include/linux/crc32.h b/include/linux/crc32.h
index 9e8a032c1788..da78b215ff2e 100644
--- a/include/linux/crc32.h
+++ b/include/linux/crc32.h
@@ -1,69 +1,100 @@
-/*
- * crc32.h
- * See linux/lib/crc32.c for license and changes
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _LINUX_CRC32_H
#define _LINUX_CRC32_H
#include <linux/types.h>
#include <linux/bitrev.h>
-u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len);
-u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len);
-
/**
- * crc32_le_combine - Combine two crc32 check values into one. For two
- * sequences of bytes, seq1 and seq2 with lengths len1
- * and len2, crc32_le() check values were calculated
- * for each, crc1 and crc2.
+ * crc32_le() - Compute least-significant-bit-first IEEE CRC-32
+ * @crc: Initial CRC value. ~0 (recommended) or 0 for a new CRC computation, or
+ * the previous CRC value if computing incrementally.
+ * @p: Pointer to the data buffer
+ * @len: Length of data in bytes
+ *
+ * This implements the CRC variant that is often known as the IEEE CRC-32, or
+ * simply CRC-32, and is widely used in Ethernet and other applications:
+ *
+ * - Polynomial: x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 +
+ * x^7 + x^5 + x^4 + x^2 + x^1 + x^0
+ * - Bit order: Least-significant-bit-first
+ * - Polynomial in integer form: 0xedb88320
*
- * @crc1: crc32 of the first block
- * @crc2: crc32 of the second block
- * @len2: length of the second block
+ * This does *not* invert the CRC at the beginning or end. The caller is
+ * expected to do that if it needs to. Inverting at both ends is recommended.
*
- * Return: The crc32_le() check value of seq1 and seq2 concatenated,
- * requiring only crc1, crc2, and len2. Note: If seq_full denotes
- * the concatenated memory area of seq1 with seq2, and crc_full
- * the crc32_le() value of seq_full, then crc_full ==
- * crc32_le_combine(crc1, crc2, len2) when crc_full was seeded
- * with the same initializer as crc1, and crc2 seed was 0. See
- * also crc32_combine_test().
+ * For new applications, prefer to use CRC-32C instead. See crc32c().
+ *
+ * Context: Any context
+ * Return: The new CRC value
*/
-u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len);
+u32 crc32_le(u32 crc, const void *p, size_t len);
-static inline u32 crc32_le_combine(u32 crc1, u32 crc2, size_t len2)
+/* This is just an alias for crc32_le(). */
+static inline u32 crc32(u32 crc, const void *p, size_t len)
{
- return crc32_le_shift(crc1, len2) ^ crc2;
+ return crc32_le(crc, p, len);
}
-u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len);
-
/**
- * __crc32c_le_combine - Combine two crc32c check values into one. For two
- * sequences of bytes, seq1 and seq2 with lengths len1
- * and len2, __crc32c_le() check values were calculated
- * for each, crc1 and crc2.
+ * crc32_be() - Compute most-significant-bit-first IEEE CRC-32
+ * @crc: Initial CRC value. ~0 (recommended) or 0 for a new CRC computation, or
+ * the previous CRC value if computing incrementally.
+ * @p: Pointer to the data buffer
+ * @len: Length of data in bytes
*
- * @crc1: crc32c of the first block
- * @crc2: crc32c of the second block
- * @len2: length of the second block
+ * crc32_be() is the same as crc32_le() except that crc32_be() computes the
+ * *most-significant-bit-first* variant of the CRC. I.e., within each byte, the
+ * most significant bit is processed first (treated as highest order polynomial
+ * coefficient). The same bit order is also used for the CRC value itself:
*
- * Return: The __crc32c_le() check value of seq1 and seq2 concatenated,
- * requiring only crc1, crc2, and len2. Note: If seq_full denotes
- * the concatenated memory area of seq1 with seq2, and crc_full
- * the __crc32c_le() value of seq_full, then crc_full ==
- * __crc32c_le_combine(crc1, crc2, len2) when crc_full was
- * seeded with the same initializer as crc1, and crc2 seed
- * was 0. See also crc32c_combine_test().
+ * - Polynomial: x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 +
+ * x^7 + x^5 + x^4 + x^2 + x^1 + x^0
+ * - Bit order: Most-significant-bit-first
+ * - Polynomial in integer form: 0x04c11db7
+ *
+ * Context: Any context
+ * Return: The new CRC value
*/
-u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len);
+u32 crc32_be(u32 crc, const void *p, size_t len);
-static inline u32 __crc32c_le_combine(u32 crc1, u32 crc2, size_t len2)
-{
- return __crc32c_le_shift(crc1, len2) ^ crc2;
-}
+/**
+ * crc32c() - Compute CRC-32C
+ * @crc: Initial CRC value. ~0 (recommended) or 0 for a new CRC computation, or
+ * the previous CRC value if computing incrementally.
+ * @p: Pointer to the data buffer
+ * @len: Length of data in bytes
+ *
+ * This implements CRC-32C, i.e. the Castagnoli CRC. This is the recommended
+ * CRC variant to use in new applications that want a 32-bit CRC.
+ *
+ * - Polynomial: x^32 + x^28 + x^27 + x^26 + x^25 + x^23 + x^22 + x^20 + x^19 +
+ * x^18 + x^14 + x^13 + x^11 + x^10 + x^9 + x^8 + x^6 + x^0
+ * - Bit order: Least-significant-bit-first
+ * - Polynomial in integer form: 0x82f63b78
+ *
+ * This does *not* invert the CRC at the beginning or end. The caller is
+ * expected to do that if it needs to. Inverting at both ends is recommended.
+ *
+ * Context: Any context
+ * Return: The new CRC value
+ */
+u32 crc32c(u32 crc, const void *p, size_t len);
-#define crc32(seed, data, length) crc32_le(seed, (unsigned char const *)(data), length)
+/*
+ * crc32_optimizations() returns flags that indicate which CRC32 library
+ * functions are using architecture-specific optimizations. Unlike
+ * IS_ENABLED(CONFIG_CRC32_ARCH) it takes into account the different CRC32
+ * variants and also whether any needed CPU features are available at runtime.
+ */
+#define CRC32_LE_OPTIMIZATION BIT(0) /* crc32_le() is optimized */
+#define CRC32_BE_OPTIMIZATION BIT(1) /* crc32_be() is optimized */
+#define CRC32C_OPTIMIZATION BIT(2) /* crc32c() is optimized */
+#if IS_ENABLED(CONFIG_CRC32_ARCH)
+u32 crc32_optimizations(void);
+#else
+static inline u32 crc32_optimizations(void) { return 0; }
+#endif
/*
* Helpers for hash table generation of ethernet nics:
diff --git a/include/linux/crc32c.h b/include/linux/crc32c.h
index bd21af828ff6..b8cff2f4309a 100644
--- a/include/linux/crc32c.h
+++ b/include/linux/crc32c.h
@@ -2,12 +2,6 @@
#ifndef _LINUX_CRC32C_H
#define _LINUX_CRC32C_H
-#include <linux/types.h>
-
-extern u32 crc32c(u32 crc, const void *address, unsigned int length);
-extern const char *crc32c_impl(void);
-
-/* This macro exists for backwards-compatibility. */
-#define crc32c_le crc32c
+#include <linux/crc32.h>
#endif /* _LINUX_CRC32C_H */
diff --git a/include/linux/crc32poly.h b/include/linux/crc32poly.h
index 62c4b7790a28..ccab711295fa 100644
--- a/include/linux/crc32poly.h
+++ b/include/linux/crc32poly.h
@@ -2,19 +2,13 @@
#ifndef _LINUX_CRC32_POLY_H
#define _LINUX_CRC32_POLY_H
-/*
- * There are multiple 16-bit CRC polynomials in common use, but this is
- * *the* standard CRC-32 polynomial, first popularized by Ethernet.
- * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0
- */
+/* The polynomial used by crc32_le(), in integer form. See crc32_le(). */
#define CRC32_POLY_LE 0xedb88320
+
+/* The polynomial used by crc32_be(), in integer form. See crc32_be(). */
#define CRC32_POLY_BE 0x04c11db7
-/*
- * This is the CRC32c polynomial, as outlined by Castagnoli.
- * x^32+x^28+x^27+x^26+x^25+x^23+x^22+x^20+x^19+x^18+x^14+x^13+x^11+x^10+x^9+
- * x^8+x^6+x^0
- */
-#define CRC32C_POLY_LE 0x82F63B78
+/* The polynomial used by crc32c(), in integer form. See crc32c(). */
+#define CRC32C_POLY_LE 0x82f63b78
#endif /* _LINUX_CRC32_POLY_H */
diff --git a/include/linux/crc64.h b/include/linux/crc64.h
index c756e65a1b58..fc0c06ab1993 100644
--- a/include/linux/crc64.h
+++ b/include/linux/crc64.h
@@ -1,11 +1,28 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * See lib/crc64.c for the related specification and polynomial arithmetic.
- */
#ifndef _LINUX_CRC64_H
#define _LINUX_CRC64_H
#include <linux/types.h>
-u64 __pure crc64_be(u64 crc, const void *p, size_t len);
+/**
+ * crc64_be - Calculate bitwise big-endian ECMA-182 CRC64
+ * @crc: seed value for computation. 0 or (u64)~0 for a new CRC calculation,
+ * or the previous crc64 value if computing incrementally.
+ * @p: pointer to buffer over which CRC64 is run
+ * @len: length of buffer @p
+ */
+u64 crc64_be(u64 crc, const void *p, size_t len);
+
+/**
+ * crc64_nvme - Calculate CRC64-NVME
+ * @crc: seed value for computation. 0 for a new CRC calculation, or the
+ * previous crc64 value if computing incrementally.
+ * @p: pointer to buffer over which CRC64 is run
+ * @len: length of buffer @p
+ *
+ * This computes the CRC64 defined in the NVME NVM Command Set Specification,
+ * *including the bitwise inversion at the beginning and end*.
+ */
+u64 crc64_nvme(u64 crc, const void *p, size_t len);
+
#endif /* _LINUX_CRC64_H */
diff --git a/include/linux/crc7.h b/include/linux/crc7.h
index b462842f3c32..61d34749e437 100644
--- a/include/linux/crc7.h
+++ b/include/linux/crc7.h
@@ -3,13 +3,6 @@
#define _LINUX_CRC7_H
#include <linux/types.h>
-extern const u8 crc7_be_syndrome_table[256];
-
-static inline u8 crc7_be_byte(u8 crc, u8 data)
-{
- return crc7_be_syndrome_table[crc ^ data];
-}
-
extern u8 crc7_be(u8 crc, const u8 *buffer, size_t len);
#endif
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 14971322e1a0..343a140a6ba2 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/key.h>
#include <linux/atomic.h>
+#include <linux/refcount.h>
#include <linux/uidgid.h>
#include <linux/sched.h>
#include <linux/sched/user.h>
@@ -19,11 +20,13 @@
struct cred;
struct inode;
+extern struct task_struct init_task;
+
/*
* COW Supplementary groups list
*/
struct group_info {
- atomic_t usage;
+ refcount_t usage;
int ngroups;
kgid_t gid[];
} __randomize_layout;
@@ -39,7 +42,7 @@ struct group_info {
*/
static inline struct group_info *get_group_info(struct group_info *gi)
{
- atomic_inc(&gi->usage);
+ refcount_inc(&gi->usage);
return gi;
}
@@ -49,7 +52,7 @@ static inline struct group_info *get_group_info(struct group_info *gi)
*/
#define put_group_info(group_info) \
do { \
- if (atomic_dec_and_test(&(group_info)->usage)) \
+ if (refcount_dec_and_test(&(group_info)->usage)) \
groups_free(group_info); \
} while (0)
@@ -108,14 +111,7 @@ static inline int groups_search(const struct group_info *group_info, kgid_t grp)
* same context as task->real_cred.
*/
struct cred {
- atomic_t usage;
-#ifdef CONFIG_DEBUG_CREDENTIALS
- atomic_t subscribers; /* number of processes subscribed */
- void *put_addr;
- unsigned magic;
-#define CRED_MAGIC 0x43736564
-#define CRED_MAGIC_DEAD 0x44656144
-#endif
+ atomic_long_t usage;
kuid_t uid; /* real UID of the task */
kgid_t gid; /* real GID of the task */
kuid_t suid; /* saved UID of the task */
@@ -143,6 +139,7 @@ struct cred {
#endif
struct user_struct *user; /* real user ID subscription */
struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
+ struct ucounts *ucounts;
struct group_info *group_info; /* supplementary groups for euid/fsgid */
/* RCU deletion */
union {
@@ -153,62 +150,25 @@ struct cred {
extern void __put_cred(struct cred *);
extern void exit_creds(struct task_struct *);
-extern int copy_creds(struct task_struct *, unsigned long);
+extern int copy_creds(struct task_struct *, u64);
extern const struct cred *get_task_cred(struct task_struct *);
extern struct cred *cred_alloc_blank(void);
extern struct cred *prepare_creds(void);
extern struct cred *prepare_exec_creds(void);
extern int commit_creds(struct cred *);
extern void abort_creds(struct cred *);
-extern const struct cred *override_creds(const struct cred *);
-extern void revert_creds(const struct cred *);
extern struct cred *prepare_kernel_cred(struct task_struct *);
-extern int change_create_files_as(struct cred *, struct inode *);
+static inline const struct cred *kernel_cred(void)
+{
+ /* shut up sparse */
+ return rcu_dereference_raw(init_task.cred);
+}
extern int set_security_override(struct cred *, u32);
extern int set_security_override_from_ctx(struct cred *, const char *);
extern int set_create_files_as(struct cred *, struct inode *);
extern int cred_fscmp(const struct cred *, const struct cred *);
extern void __init cred_init(void);
-
-/*
- * check for validity of credentials
- */
-#ifdef CONFIG_DEBUG_CREDENTIALS
-extern void __invalid_creds(const struct cred *, const char *, unsigned);
-extern void __validate_process_creds(struct task_struct *,
- const char *, unsigned);
-
-extern bool creds_are_invalid(const struct cred *cred);
-
-static inline void __validate_creds(const struct cred *cred,
- const char *file, unsigned line)
-{
- if (unlikely(creds_are_invalid(cred)))
- __invalid_creds(cred, file, line);
-}
-
-#define validate_creds(cred) \
-do { \
- __validate_creds((cred), __FILE__, __LINE__); \
-} while(0)
-
-#define validate_process_creds() \
-do { \
- __validate_process_creds(current, __FILE__, __LINE__); \
-} while(0)
-
-extern void validate_creds_for_do_exit(struct task_struct *);
-#else
-static inline void validate_creds(const struct cred *cred)
-{
-}
-static inline void validate_creds_for_do_exit(struct task_struct *tsk)
-{
-}
-static inline void validate_process_creds(void)
-{
-}
-#endif
+extern int set_cred_ucounts(struct cred *);
static inline bool cap_ambient_invariant_ok(const struct cred *cred)
{
@@ -217,25 +177,33 @@ static inline bool cap_ambient_invariant_ok(const struct cred *cred)
cred->cap_inheritable));
}
-/**
- * get_new_cred - Get a reference on a new set of credentials
- * @cred: The new credentials to reference
- *
- * Get a reference on the specified set of new credentials. The caller must
- * release the reference.
- */
-static inline struct cred *get_new_cred(struct cred *cred)
+static inline const struct cred *override_creds(const struct cred *override_cred)
{
- atomic_inc(&cred->usage);
- return cred;
+ return rcu_replace_pointer(current->cred, override_cred, 1);
+}
+
+static inline const struct cred *revert_creds(const struct cred *revert_cred)
+{
+ return rcu_replace_pointer(current->cred, revert_cred, 1);
}
+DEFINE_CLASS(override_creds,
+ const struct cred *,
+ revert_creds(_T),
+ override_creds(override_cred), const struct cred *override_cred)
+
+#define scoped_with_creds(cred) \
+ scoped_class(override_creds, __UNIQUE_ID(label), cred)
+
+#define scoped_with_kernel_creds() scoped_with_creds(kernel_cred())
+
/**
- * get_cred - Get a reference on a set of credentials
+ * get_cred_many - Get references on a set of credentials
* @cred: The credentials to reference
+ * @nr: Number of references to acquire
*
- * Get a reference on the specified set of credentials. The caller must
- * release the reference. If %NULL is passed, it is returned with no action.
+ * Get references on the specified set of credentials. The caller must release
+ * all acquired reference. If %NULL is passed, it is returned with no action.
*
* This is used to deal with a committed set of credentials. Although the
* pointer is const, this will temporarily discard the const and increment the
@@ -243,14 +211,28 @@ static inline struct cred *get_new_cred(struct cred *cred)
* accidental alteration of a set of credentials that should be considered
* immutable.
*/
-static inline const struct cred *get_cred(const struct cred *cred)
+static inline const struct cred *get_cred_many(const struct cred *cred, int nr)
{
struct cred *nonconst_cred = (struct cred *) cred;
if (!cred)
return cred;
- validate_creds(cred);
nonconst_cred->non_rcu = 0;
- return get_new_cred(nonconst_cred);
+ atomic_long_add(nr, &nonconst_cred->usage);
+ return cred;
+}
+
+/*
+ * get_cred - Get a reference on a set of credentials
+ * @cred: The credentials to reference
+ *
+ * Get a reference on the specified set of credentials. The caller must
+ * release the reference. If %NULL is passed, it is returned with no action.
+ *
+ * This is used to deal with a committed set of credentials.
+ */
+static inline const struct cred *get_cred(const struct cred *cred)
+{
+ return get_cred_many(cred, 1);
}
static inline const struct cred *get_cred_rcu(const struct cred *cred)
@@ -258,9 +240,8 @@ static inline const struct cred *get_cred_rcu(const struct cred *cred)
struct cred *nonconst_cred = (struct cred *) cred;
if (!cred)
return NULL;
- if (!atomic_inc_not_zero(&nonconst_cred->usage))
+ if (!atomic_long_inc_not_zero(&nonconst_cred->usage))
return NULL;
- validate_creds(cred);
nonconst_cred->non_rcu = 0;
return cred;
}
@@ -268,6 +249,7 @@ static inline const struct cred *get_cred_rcu(const struct cred *cred)
/**
* put_cred - Release a reference to a set of credentials
* @cred: The credentials to release
+ * @nr: Number of references to release
*
* Release a reference to a set of credentials, deleting them when the last ref
* is released. If %NULL is passed, nothing is done.
@@ -276,17 +258,35 @@ static inline const struct cred *get_cred_rcu(const struct cred *cred)
* on task_struct are attached by const pointers to prevent accidental
* alteration of otherwise immutable credential sets.
*/
-static inline void put_cred(const struct cred *_cred)
+static inline void put_cred_many(const struct cred *_cred, int nr)
{
struct cred *cred = (struct cred *) _cred;
if (cred) {
- validate_creds(cred);
- if (atomic_dec_and_test(&(cred)->usage))
+ if (atomic_long_sub_and_test(nr, &cred->usage))
__put_cred(cred);
}
}
+/*
+ * put_cred - Release a reference to a set of credentials
+ * @cred: The credentials to release
+ *
+ * Release a reference to a set of credentials, deleting them when the last ref
+ * is released. If %NULL is passed, nothing is done.
+ */
+static inline void put_cred(const struct cred *cred)
+{
+ put_cred_many(cred, 1);
+}
+
+DEFINE_CLASS(prepare_creds,
+ struct cred *,
+ if (_T) put_cred(_T),
+ prepare_creds(), void)
+
+DEFINE_FREE(put_cred, struct cred *, if (!IS_ERR_OR_NULL(_T)) put_cred(_T))
+
/**
* current_cred - Access the current task's subjective credentials
*
@@ -369,6 +369,7 @@ static inline void put_cred(const struct cred *_cred)
#define task_uid(task) (task_cred_xxx((task), uid))
#define task_euid(task) (task_cred_xxx((task), euid))
+#define task_ucounts(task) (task_cred_xxx((task), ucounts))
#define current_cred_xxx(xxx) \
({ \
@@ -385,6 +386,7 @@ static inline void put_cred(const struct cred *_cred)
#define current_fsgid() (current_cred_xxx(fsgid))
#define current_cap() (current_cred_xxx(cap_effective))
#define current_user() (current_cred_xxx(user))
+#define current_ucounts() (current_cred_xxx(ucounts))
extern struct user_namespace init_user_ns;
#ifdef CONFIG_USER_NS
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index da5e0d74bb2f..a2137e19be7d 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -12,46 +12,30 @@
#ifndef _LINUX_CRYPTO_H
#define _LINUX_CRYPTO_H
-#include <linux/atomic.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/bug.h>
-#include <linux/refcount.h>
-#include <linux/slab.h>
#include <linux/completion.h>
-
-/*
- * Autoloaded crypto modules should only use a prefixed name to avoid allowing
- * arbitrary modules to be loaded. Loading from userspace may still need the
- * unprefixed names, so retains those aliases as well.
- * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
- * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
- * expands twice on the same line. Instead, use a separate base name for the
- * alias.
- */
-#define MODULE_ALIAS_CRYPTO(name) \
- __MODULE_INFO(alias, alias_userspace, name); \
- __MODULE_INFO(alias, alias_crypto, "crypto-" name)
+#include <linux/errno.h>
+#include <linux/refcount_types.h>
+#include <linux/slab.h>
+#include <linux/types.h>
/*
* Algorithm masks and types.
*/
#define CRYPTO_ALG_TYPE_MASK 0x0000000f
#define CRYPTO_ALG_TYPE_CIPHER 0x00000001
-#define CRYPTO_ALG_TYPE_COMPRESS 0x00000002
#define CRYPTO_ALG_TYPE_AEAD 0x00000003
+#define CRYPTO_ALG_TYPE_LSKCIPHER 0x00000004
#define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005
+#define CRYPTO_ALG_TYPE_AKCIPHER 0x00000006
+#define CRYPTO_ALG_TYPE_SIG 0x00000007
#define CRYPTO_ALG_TYPE_KPP 0x00000008
#define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a
#define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b
#define CRYPTO_ALG_TYPE_RNG 0x0000000c
-#define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d
#define CRYPTO_ALG_TYPE_HASH 0x0000000e
#define CRYPTO_ALG_TYPE_SHASH 0x0000000e
#define CRYPTO_ALG_TYPE_AHASH 0x0000000f
-#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
-#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
#define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e
#define CRYPTO_ALG_LARVAL 0x00000010
@@ -66,6 +50,15 @@
#define CRYPTO_ALG_NEED_FALLBACK 0x00000100
/*
+ * Set if the algorithm data structure should be duplicated into
+ * kmalloc memory before registration. This is useful for hardware
+ * that can be disconnected at will. Do not use this if the data
+ * structure is embedded into a bigger one. Duplicate the overall
+ * data structure in the driver in that case.
+ */
+#define CRYPTO_ALG_DUP_FIRST 0x00000200
+
+/*
* Set if the algorithm has passed automated run-time testing. Note that
* if there is no run-time testing for a given algorithm it is considered
* to have passed.
@@ -126,13 +119,29 @@
* crypto_aead_walksize() (with the remainder going at the end), no chunk
* can cross a page boundary or a scatterlist element boundary.
* ahash:
- * - The result buffer must be aligned to the algorithm's alignmask.
* - crypto_ahash_finup() must not be used unless the algorithm implements
* ->finup() natively.
*/
#define CRYPTO_ALG_ALLOCATES_MEMORY 0x00010000
/*
+ * Mark an algorithm as a service implementation only usable by a
+ * template and never by a normal user of the kernel crypto API.
+ * This is intended to be used by algorithms that are themselves
+ * not FIPS-approved but may instead be used to implement parts of
+ * a FIPS-approved algorithm (e.g., dh vs. ffdhe2048(dh)).
+ */
+#define CRYPTO_ALG_FIPS_INTERNAL 0x00020000
+
+/* Set if the algorithm supports virtual addresses. */
+#define CRYPTO_ALG_REQ_VIRT 0x00040000
+
+/* Set if the algorithm cannot have a fallback (e.g., phmac). */
+#define CRYPTO_ALG_NO_FALLBACK 0x00080000
+
+/* The high bits 0xff000000 are reserved for type-specific flags. */
+
+/*
* Transform masks and values (for crt_flags).
*/
#define CRYPTO_TFM_NEED_KEY 0x00000001
@@ -141,6 +150,7 @@
#define CRYPTO_TFM_REQ_FORBID_WEAK_KEYS 0x00000100
#define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200
#define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400
+#define CRYPTO_TFM_REQ_ON_STACK 0x00000800
/*
* Miscellaneous stuff.
@@ -162,12 +172,11 @@
#define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN)))
-struct scatterlist;
-struct crypto_async_request;
struct crypto_tfm;
struct crypto_type;
+struct module;
-typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
+typedef void (*crypto_completion_t)(void *req, int err);
/**
* DOC: Block Cipher Context Data Structures
@@ -248,136 +257,7 @@ struct cipher_alg {
void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
};
-/**
- * struct compress_alg - compression/decompression algorithm
- * @coa_compress: Compress a buffer of specified length, storing the resulting
- * data in the specified buffer. Return the length of the
- * compressed data in dlen.
- * @coa_decompress: Decompress the source buffer, storing the uncompressed
- * data in the specified buffer. The length of the data is
- * returned in dlen.
- *
- * All fields are mandatory.
- */
-struct compress_alg {
- int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen);
- int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen);
-};
-
-#ifdef CONFIG_CRYPTO_STATS
-/*
- * struct crypto_istat_aead - statistics for AEAD algorithm
- * @encrypt_cnt: number of encrypt requests
- * @encrypt_tlen: total data size handled by encrypt requests
- * @decrypt_cnt: number of decrypt requests
- * @decrypt_tlen: total data size handled by decrypt requests
- * @err_cnt: number of error for AEAD requests
- */
-struct crypto_istat_aead {
- atomic64_t encrypt_cnt;
- atomic64_t encrypt_tlen;
- atomic64_t decrypt_cnt;
- atomic64_t decrypt_tlen;
- atomic64_t err_cnt;
-};
-
-/*
- * struct crypto_istat_akcipher - statistics for akcipher algorithm
- * @encrypt_cnt: number of encrypt requests
- * @encrypt_tlen: total data size handled by encrypt requests
- * @decrypt_cnt: number of decrypt requests
- * @decrypt_tlen: total data size handled by decrypt requests
- * @verify_cnt: number of verify operation
- * @sign_cnt: number of sign requests
- * @err_cnt: number of error for akcipher requests
- */
-struct crypto_istat_akcipher {
- atomic64_t encrypt_cnt;
- atomic64_t encrypt_tlen;
- atomic64_t decrypt_cnt;
- atomic64_t decrypt_tlen;
- atomic64_t verify_cnt;
- atomic64_t sign_cnt;
- atomic64_t err_cnt;
-};
-
-/*
- * struct crypto_istat_cipher - statistics for cipher algorithm
- * @encrypt_cnt: number of encrypt requests
- * @encrypt_tlen: total data size handled by encrypt requests
- * @decrypt_cnt: number of decrypt requests
- * @decrypt_tlen: total data size handled by decrypt requests
- * @err_cnt: number of error for cipher requests
- */
-struct crypto_istat_cipher {
- atomic64_t encrypt_cnt;
- atomic64_t encrypt_tlen;
- atomic64_t decrypt_cnt;
- atomic64_t decrypt_tlen;
- atomic64_t err_cnt;
-};
-
-/*
- * struct crypto_istat_compress - statistics for compress algorithm
- * @compress_cnt: number of compress requests
- * @compress_tlen: total data size handled by compress requests
- * @decompress_cnt: number of decompress requests
- * @decompress_tlen: total data size handled by decompress requests
- * @err_cnt: number of error for compress requests
- */
-struct crypto_istat_compress {
- atomic64_t compress_cnt;
- atomic64_t compress_tlen;
- atomic64_t decompress_cnt;
- atomic64_t decompress_tlen;
- atomic64_t err_cnt;
-};
-
-/*
- * struct crypto_istat_hash - statistics for has algorithm
- * @hash_cnt: number of hash requests
- * @hash_tlen: total data size hashed
- * @err_cnt: number of error for hash requests
- */
-struct crypto_istat_hash {
- atomic64_t hash_cnt;
- atomic64_t hash_tlen;
- atomic64_t err_cnt;
-};
-
-/*
- * struct crypto_istat_kpp - statistics for KPP algorithm
- * @setsecret_cnt: number of setsecrey operation
- * @generate_public_key_cnt: number of generate_public_key operation
- * @compute_shared_secret_cnt: number of compute_shared_secret operation
- * @err_cnt: number of error for KPP requests
- */
-struct crypto_istat_kpp {
- atomic64_t setsecret_cnt;
- atomic64_t generate_public_key_cnt;
- atomic64_t compute_shared_secret_cnt;
- atomic64_t err_cnt;
-};
-
-/*
- * struct crypto_istat_rng: statistics for RNG algorithm
- * @generate_cnt: number of RNG generate requests
- * @generate_tlen: total data size of generated data by the RNG
- * @seed_cnt: number of times the RNG was seeded
- * @err_cnt: number of error for RNG requests
- */
-struct crypto_istat_rng {
- atomic64_t generate_cnt;
- atomic64_t generate_tlen;
- atomic64_t seed_cnt;
- atomic64_t err_cnt;
-};
-#endif /* CONFIG_CRYPTO_STATS */
-
#define cra_cipher cra_u.cipher
-#define cra_compress cra_u.compress
/**
* struct crypto_alg - definition of a cryptograpic cipher algorithm
@@ -396,18 +276,21 @@ struct crypto_istat_rng {
* @cra_ctxsize: Size of the operational context of the transformation. This
* value informs the kernel crypto API about the memory size
* needed to be allocated for the transformation context.
- * @cra_alignmask: Alignment mask for the input and output data buffer. The data
- * buffer containing the input data for the algorithm must be
- * aligned to this alignment mask. The data buffer for the
- * output data must be aligned to this alignment mask. Note that
- * the Crypto API will do the re-alignment in software, but
- * only under special conditions and there is a performance hit.
- * The re-alignment happens at these occasions for different
- * @cra_u types: cipher -- For both input data and output data
- * buffer; ahash -- For output hash destination buf; shash --
- * For output hash destination buf.
- * This is needed on hardware which is flawed by design and
- * cannot pick data from arbitrary addresses.
+ * @cra_alignmask: For cipher, skcipher, lskcipher, and aead algorithms this is
+ * 1 less than the alignment, in bytes, that the algorithm
+ * implementation requires for input and output buffers. When
+ * the crypto API is invoked with buffers that are not aligned
+ * to this alignment, the crypto API automatically utilizes
+ * appropriately aligned temporary buffers to comply with what
+ * the algorithm needs. (For scatterlists this happens only if
+ * the algorithm uses the skcipher_walk helper functions.) This
+ * misalignment handling carries a performance penalty, so it is
+ * preferred that algorithms do not set a nonzero alignmask.
+ * Also, crypto API users may wish to allocate buffers aligned
+ * to the alignmask of the algorithm being used, in order to
+ * avoid the API having to realign them. Note: the alignmask is
+ * not supported for hash algorithms and is always 0 for them.
+ * @cra_reqsize: Size of the request context for this algorithm.
* @cra_priority: Priority of this transformation implementation. In case
* multiple transformations with same @cra_name are available to
* the Crypto API, the kernel will use the one with highest
@@ -426,42 +309,22 @@ struct crypto_istat_rng {
* transformation types. There are multiple options, such as
* &crypto_skcipher_type, &crypto_ahash_type, &crypto_rng_type.
* This field might be empty. In that case, there are no common
- * callbacks. This is the case for: cipher, compress, shash.
+ * callbacks. This is the case for: cipher.
* @cra_u: Callbacks implementing the transformation. This is a union of
* multiple structures. Depending on the type of transformation selected
* by @cra_type and @cra_flags above, the associated structure must be
* filled with callbacks. This field might be empty. This is the case
* for ahash, shash.
- * @cra_init: Initialize the cryptographic transformation object. This function
- * is used to initialize the cryptographic transformation object.
- * This function is called only once at the instantiation time, right
- * after the transformation context was allocated. In case the
- * cryptographic hardware has some special requirements which need to
- * be handled by software, this function shall check for the precise
- * requirement of the transformation and put any software fallbacks
- * in place.
- * @cra_exit: Deinitialize the cryptographic transformation object. This is a
- * counterpart to @cra_init, used to remove various changes set in
- * @cra_init.
+ * @cra_init: Deprecated, do not use.
+ * @cra_exit: Deprecated, do not use.
* @cra_u.cipher: Union member which contains a single-block symmetric cipher
* definition. See @struct @cipher_alg.
- * @cra_u.compress: Union member which contains a (de)compression algorithm.
- * See @struct @compress_alg.
* @cra_module: Owner of this transformation implementation. Set to THIS_MODULE
* @cra_list: internally used
* @cra_users: internally used
* @cra_refcnt: internally used
* @cra_destroy: internally used
*
- * @stats: union of all possible crypto_istat_xxx structures
- * @stats.aead: statistics for AEAD algorithm
- * @stats.akcipher: statistics for akcipher algorithm
- * @stats.cipher: statistics for cipher algorithm
- * @stats.compress: statistics for compress algorithm
- * @stats.hash: statistics for hash algorithm
- * @stats.rng: statistics for rng algorithm
- * @stats.kpp: statistics for KPP algorithm
- *
* The struct crypto_alg describes a generic Crypto API algorithm and is common
* for all of the transformations. Any variable not documented here shall not
* be used by a cipher implementation as it is internal to the Crypto API.
@@ -474,6 +337,7 @@ struct crypto_alg {
unsigned int cra_blocksize;
unsigned int cra_ctxsize;
unsigned int cra_alignmask;
+ unsigned int cra_reqsize;
int cra_priority;
refcount_t cra_refcnt;
@@ -485,7 +349,6 @@ struct crypto_alg {
union {
struct cipher_alg cipher;
- struct compress_alg compress;
} cra_u;
int (*cra_init)(struct crypto_tfm *tfm);
@@ -493,81 +356,8 @@ struct crypto_alg {
void (*cra_destroy)(struct crypto_alg *alg);
struct module *cra_module;
-
-#ifdef CONFIG_CRYPTO_STATS
- union {
- struct crypto_istat_aead aead;
- struct crypto_istat_akcipher akcipher;
- struct crypto_istat_cipher cipher;
- struct crypto_istat_compress compress;
- struct crypto_istat_hash hash;
- struct crypto_istat_rng rng;
- struct crypto_istat_kpp kpp;
- } stats;
-#endif /* CONFIG_CRYPTO_STATS */
-
} CRYPTO_MINALIGN_ATTR;
-#ifdef CONFIG_CRYPTO_STATS
-void crypto_stats_init(struct crypto_alg *alg);
-void crypto_stats_get(struct crypto_alg *alg);
-void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret);
-void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret);
-void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg);
-void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg);
-void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg);
-void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg);
-void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg);
-void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg);
-void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg);
-void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg);
-void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret);
-void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret);
-void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret);
-void crypto_stats_rng_seed(struct crypto_alg *alg, int ret);
-void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret);
-void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg);
-void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg);
-#else
-static inline void crypto_stats_init(struct crypto_alg *alg)
-{}
-static inline void crypto_stats_get(struct crypto_alg *alg)
-{}
-static inline void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret)
-{}
-static inline void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret)
-{}
-static inline void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret)
-{}
-static inline void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret)
-{}
-static inline void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret)
-{}
-static inline void crypto_stats_rng_seed(struct crypto_alg *alg, int ret)
-{}
-static inline void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret)
-{}
-static inline void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg)
-{}
-#endif
/*
* A helper struct for waiting for completion of async crypto ops
*/
@@ -586,7 +376,7 @@ struct crypto_wait {
/*
* Async ops completion helper functioons
*/
-void crypto_req_done(struct crypto_async_request *req, int err);
+void crypto_req_done(void *req, int err);
static inline int crypto_wait_req(int err, struct crypto_wait *wait)
{
@@ -608,14 +398,6 @@ static inline void crypto_init_wait(struct crypto_wait *wait)
}
/*
- * Algorithm registration interface.
- */
-int crypto_register_alg(struct crypto_alg *alg);
-void crypto_unregister_alg(struct crypto_alg *alg);
-int crypto_register_algs(struct crypto_alg *algs, int count);
-void crypto_unregister_algs(struct crypto_alg *algs, int count);
-
-/*
* Algorithm query interface.
*/
int crypto_has_alg(const char *name, u32 type, u32 mask);
@@ -627,46 +409,19 @@ int crypto_has_alg(const char *name, u32 type, u32 mask);
*/
struct crypto_tfm {
+ refcount_t refcnt;
u32 crt_flags;
int node;
-
- void (*exit)(struct crypto_tfm *tfm);
-
- struct crypto_alg *__crt_alg;
- void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
-};
-
-struct crypto_comp {
- struct crypto_tfm base;
-};
-
-enum {
- CRYPTOA_UNSPEC,
- CRYPTOA_ALG,
- CRYPTOA_TYPE,
- CRYPTOA_U32,
- __CRYPTOA_MAX,
-};
+ struct crypto_tfm *fb;
-#define CRYPTOA_MAX (__CRYPTOA_MAX - 1)
-
-/* Maximum number of (rtattr) parameters for each template. */
-#define CRYPTO_MAX_ATTRS 32
-
-struct crypto_attr_alg {
- char name[CRYPTO_MAX_ALG_NAME];
-};
+ void (*exit)(struct crypto_tfm *tfm);
-struct crypto_attr_type {
- u32 type;
- u32 mask;
-};
+ struct crypto_alg *__crt_alg;
-struct crypto_attr_u32 {
- u32 num;
+ void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
};
/*
@@ -681,8 +436,6 @@ static inline void crypto_free_tfm(struct crypto_tfm *tfm)
return crypto_destroy_tfm(tfm, tfm);
}
-int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
-
/*
* Transform helpers which query the underlying algorithm.
*/
@@ -696,16 +449,6 @@ static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm)
return tfm->__crt_alg->cra_driver_name;
}
-static inline int crypto_tfm_alg_priority(struct crypto_tfm *tfm)
-{
- return tfm->__crt_alg->cra_priority;
-}
-
-static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
-{
- return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
-}
-
static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm)
{
return tfm->__crt_alg->cra_blocksize;
@@ -716,6 +459,11 @@ static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm)
return tfm->__crt_alg->cra_alignmask;
}
+static inline unsigned int crypto_tfm_alg_reqsize(struct crypto_tfm *tfm)
+{
+ return tfm->__crt_alg->cra_reqsize;
+}
+
static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm)
{
return tfm->crt_flags;
@@ -731,63 +479,51 @@ static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags)
tfm->crt_flags &= ~flags;
}
-static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
-{
- return tfm->__crt_ctx;
-}
-
static inline unsigned int crypto_tfm_ctx_alignment(void)
{
struct crypto_tfm *tfm;
return __alignof__(tfm->__crt_ctx);
}
-static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm)
+static inline bool crypto_tfm_is_async(struct crypto_tfm *tfm)
{
- return (struct crypto_comp *)tfm;
+ return tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;
}
-static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name,
- u32 type, u32 mask)
+static inline bool crypto_req_on_stack(struct crypto_async_request *req)
{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_COMPRESS;
- mask |= CRYPTO_ALG_TYPE_MASK;
-
- return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask));
+ return req->flags & CRYPTO_TFM_REQ_ON_STACK;
}
-static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm)
+static inline void crypto_request_set_callback(
+ struct crypto_async_request *req, u32 flags,
+ crypto_completion_t compl, void *data)
{
- return &tfm->base;
-}
+ u32 keep = CRYPTO_TFM_REQ_ON_STACK;
-static inline void crypto_free_comp(struct crypto_comp *tfm)
-{
- crypto_free_tfm(crypto_comp_tfm(tfm));
+ req->complete = compl;
+ req->data = data;
+ req->flags &= keep;
+ req->flags |= flags & ~keep;
}
-static inline int crypto_has_comp(const char *alg_name, u32 type, u32 mask)
+static inline void crypto_request_set_tfm(struct crypto_async_request *req,
+ struct crypto_tfm *tfm)
{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_COMPRESS;
- mask |= CRYPTO_ALG_TYPE_MASK;
-
- return crypto_has_alg(alg_name, type, mask);
+ req->tfm = tfm;
+ req->flags &= ~CRYPTO_TFM_REQ_ON_STACK;
}
-static inline const char *crypto_comp_name(struct crypto_comp *tfm)
+struct crypto_async_request *crypto_request_clone(
+ struct crypto_async_request *req, size_t total, gfp_t gfp);
+
+static inline void crypto_stack_request_init(struct crypto_async_request *req,
+ struct crypto_tfm *tfm)
{
- return crypto_tfm_alg_name(crypto_comp_tfm(tfm));
+ req->flags = 0;
+ crypto_request_set_tfm(req, tfm);
+ req->flags |= CRYPTO_TFM_REQ_ON_STACK;
}
-int crypto_comp_compress(struct crypto_comp *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
-
-int crypto_comp_decompress(struct crypto_comp *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
-
#endif /* _LINUX_CRYPTO_H */
diff --git a/include/linux/cuda.h b/include/linux/cuda.h
index 45bfe9d61271..daf3e6f98444 100644
--- a/include/linux/cuda.h
+++ b/include/linux/cuda.h
@@ -12,7 +12,7 @@
#include <uapi/linux/cuda.h>
-extern int find_via_cuda(void);
+extern int __init find_via_cuda(void);
extern int cuda_request(struct adb_request *req,
void (*done)(struct adb_request *), int nbytes, ...);
extern void cuda_poll(void);
diff --git a/include/linux/damon.h b/include/linux/damon.h
new file mode 100644
index 000000000000..3813373a9200
--- /dev/null
+++ b/include/linux/damon.h
@@ -0,0 +1,975 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * DAMON api
+ *
+ * Author: SeongJae Park <sj@kernel.org>
+ */
+
+#ifndef _DAMON_H_
+#define _DAMON_H_
+
+#include <linux/memcontrol.h>
+#include <linux/mutex.h>
+#include <linux/time64.h>
+#include <linux/types.h>
+#include <linux/random.h>
+
+/* Minimal region size. Every damon_region is aligned by this. */
+#define DAMON_MIN_REGION PAGE_SIZE
+/* Max priority score for DAMON-based operation schemes */
+#define DAMOS_MAX_SCORE (99)
+
+/* Get a random number in [l, r) */
+static inline unsigned long damon_rand(unsigned long l, unsigned long r)
+{
+ return l + get_random_u32_below(r - l);
+}
+
+/**
+ * struct damon_addr_range - Represents an address region of [@start, @end).
+ * @start: Start address of the region (inclusive).
+ * @end: End address of the region (exclusive).
+ */
+struct damon_addr_range {
+ unsigned long start;
+ unsigned long end;
+};
+
+/**
+ * struct damon_size_range - Represents size for filter to operate on [@min, @max].
+ * @min: Min size (inclusive).
+ * @max: Max size (inclusive).
+ */
+struct damon_size_range {
+ unsigned long min;
+ unsigned long max;
+};
+
+/**
+ * struct damon_region - Represents a monitoring target region.
+ * @ar: The address range of the region.
+ * @sampling_addr: Address of the sample for the next access check.
+ * @nr_accesses: Access frequency of this region.
+ * @nr_accesses_bp: @nr_accesses in basis point (0.01%) that updated for
+ * each sampling interval.
+ * @list: List head for siblings.
+ * @age: Age of this region.
+ *
+ * @nr_accesses is reset to zero for every &damon_attrs->aggr_interval and be
+ * increased for every &damon_attrs->sample_interval if an access to the region
+ * during the last sampling interval is found. The update of this field should
+ * not be done with direct access but with the helper function,
+ * damon_update_region_access_rate().
+ *
+ * @nr_accesses_bp is another representation of @nr_accesses in basis point
+ * (1 in 10,000) that updated for every &damon_attrs->sample_interval in a
+ * manner similar to moving sum. By the algorithm, this value becomes
+ * @nr_accesses * 10000 for every &struct damon_attrs->aggr_interval. This can
+ * be used when the aggregation interval is too huge and therefore cannot wait
+ * for it before getting the access monitoring results.
+ *
+ * @age is initially zero, increased for each aggregation interval, and reset
+ * to zero again if the access frequency is significantly changed. If two
+ * regions are merged into a new region, both @nr_accesses and @age of the new
+ * region are set as region size-weighted average of those of the two regions.
+ */
+struct damon_region {
+ struct damon_addr_range ar;
+ unsigned long sampling_addr;
+ unsigned int nr_accesses;
+ unsigned int nr_accesses_bp;
+ struct list_head list;
+
+ unsigned int age;
+/* private: Internal value for age calculation. */
+ unsigned int last_nr_accesses;
+};
+
+/**
+ * struct damon_target - Represents a monitoring target.
+ * @pid: The PID of the virtual address space to monitor.
+ * @nr_regions: Number of monitoring target regions of this target.
+ * @regions_list: Head of the monitoring target regions of this target.
+ * @list: List head for siblings.
+ * @obsolete: Whether the commit destination target is obsolete.
+ *
+ * Each monitoring context could have multiple targets. For example, a context
+ * for virtual memory address spaces could have multiple target processes. The
+ * @pid should be set for appropriate &struct damon_operations including the
+ * virtual address spaces monitoring operations.
+ *
+ * @obsolete is used only for damon_commit_targets() source targets, to specify
+ * the matching destination targets are obsolete. Read damon_commit_targets()
+ * to see how it is handled.
+ */
+struct damon_target {
+ struct pid *pid;
+ unsigned int nr_regions;
+ struct list_head regions_list;
+ struct list_head list;
+ bool obsolete;
+};
+
+/**
+ * enum damos_action - Represents an action of a Data Access Monitoring-based
+ * Operation Scheme.
+ *
+ * @DAMOS_WILLNEED: Call ``madvise()`` for the region with MADV_WILLNEED.
+ * @DAMOS_COLD: Call ``madvise()`` for the region with MADV_COLD.
+ * @DAMOS_PAGEOUT: Reclaim the region.
+ * @DAMOS_HUGEPAGE: Call ``madvise()`` for the region with MADV_HUGEPAGE.
+ * @DAMOS_NOHUGEPAGE: Call ``madvise()`` for the region with MADV_NOHUGEPAGE.
+ * @DAMOS_LRU_PRIO: Prioritize the region on its LRU lists.
+ * @DAMOS_LRU_DEPRIO: Deprioritize the region on its LRU lists.
+ * @DAMOS_MIGRATE_HOT: Migrate the regions prioritizing warmer regions.
+ * @DAMOS_MIGRATE_COLD: Migrate the regions prioritizing colder regions.
+ * @DAMOS_STAT: Do nothing but count the stat.
+ * @NR_DAMOS_ACTIONS: Total number of DAMOS actions
+ *
+ * The support of each action is up to running &struct damon_operations.
+ * Refer to 'Operation Action' section of Documentation/mm/damon/design.rst for
+ * status of the supports.
+ *
+ * Note that DAMOS_PAGEOUT doesn't trigger demotions.
+ */
+enum damos_action {
+ DAMOS_WILLNEED,
+ DAMOS_COLD,
+ DAMOS_PAGEOUT,
+ DAMOS_HUGEPAGE,
+ DAMOS_NOHUGEPAGE,
+ DAMOS_LRU_PRIO,
+ DAMOS_LRU_DEPRIO,
+ DAMOS_MIGRATE_HOT,
+ DAMOS_MIGRATE_COLD,
+ DAMOS_STAT, /* Do nothing but only record the stat */
+ NR_DAMOS_ACTIONS,
+};
+
+/**
+ * enum damos_quota_goal_metric - Represents the metric to be used as the goal
+ *
+ * @DAMOS_QUOTA_USER_INPUT: User-input value.
+ * @DAMOS_QUOTA_SOME_MEM_PSI_US: System level some memory PSI in us.
+ * @DAMOS_QUOTA_NODE_MEM_USED_BP: MemUsed ratio of a node.
+ * @DAMOS_QUOTA_NODE_MEM_FREE_BP: MemFree ratio of a node.
+ * @DAMOS_QUOTA_NODE_MEMCG_USED_BP: MemUsed ratio of a node for a cgroup.
+ * @DAMOS_QUOTA_NODE_MEMCG_FREE_BP: MemFree ratio of a node for a cgroup.
+ * @NR_DAMOS_QUOTA_GOAL_METRICS: Number of DAMOS quota goal metrics.
+ *
+ * Metrics equal to larger than @NR_DAMOS_QUOTA_GOAL_METRICS are unsupported.
+ */
+enum damos_quota_goal_metric {
+ DAMOS_QUOTA_USER_INPUT,
+ DAMOS_QUOTA_SOME_MEM_PSI_US,
+ DAMOS_QUOTA_NODE_MEM_USED_BP,
+ DAMOS_QUOTA_NODE_MEM_FREE_BP,
+ DAMOS_QUOTA_NODE_MEMCG_USED_BP,
+ DAMOS_QUOTA_NODE_MEMCG_FREE_BP,
+ NR_DAMOS_QUOTA_GOAL_METRICS,
+};
+
+/**
+ * struct damos_quota_goal - DAMOS scheme quota auto-tuning goal.
+ * @metric: Metric to be used for representing the goal.
+ * @target_value: Target value of @metric to achieve with the tuning.
+ * @current_value: Current value of @metric.
+ * @last_psi_total: Last measured total PSI
+ * @nid: Node id.
+ * @memcg_id: Memcg id.
+ * @list: List head for siblings.
+ *
+ * Data structure for getting the current score of the quota tuning goal. The
+ * score is calculated by how close @current_value and @target_value are. Then
+ * the score is entered to DAMON's internal feedback loop mechanism to get the
+ * auto-tuned quota.
+ *
+ * If @metric is DAMOS_QUOTA_USER_INPUT, @current_value should be manually
+ * entered by the user, probably inside the kdamond callbacks. Otherwise,
+ * DAMON sets @current_value with self-measured value of @metric.
+ *
+ * If @metric is DAMOS_QUOTA_NODE_MEM_{USED,FREE}_BP, @nid represents the node
+ * id of the target node to account the used/free memory.
+ *
+ * If @metric is DAMOS_QUOTA_NODE_MEMCG_{USED,FREE}_BP, @nid and @memcg_id
+ * represents the node id and the cgroup to account the used memory for.
+ */
+struct damos_quota_goal {
+ enum damos_quota_goal_metric metric;
+ unsigned long target_value;
+ unsigned long current_value;
+ /* metric-dependent fields */
+ union {
+ u64 last_psi_total;
+ struct {
+ int nid;
+ unsigned short memcg_id;
+ };
+ };
+ struct list_head list;
+};
+
+/**
+ * struct damos_quota - Controls the aggressiveness of the given scheme.
+ * @reset_interval: Charge reset interval in milliseconds.
+ * @ms: Maximum milliseconds that the scheme can use.
+ * @sz: Maximum bytes of memory that the action can be applied.
+ * @goals: Head of quota tuning goals (&damos_quota_goal) list.
+ * @esz: Effective size quota in bytes.
+ *
+ * @weight_sz: Weight of the region's size for prioritization.
+ * @weight_nr_accesses: Weight of the region's nr_accesses for prioritization.
+ * @weight_age: Weight of the region's age for prioritization.
+ *
+ * To avoid consuming too much CPU time or IO resources for applying the
+ * &struct damos->action to large memory, DAMON allows users to set time and/or
+ * size quotas. The quotas can be set by writing non-zero values to &ms and
+ * &sz, respectively. If the time quota is set, DAMON tries to use only up to
+ * &ms milliseconds within &reset_interval for applying the action. If the
+ * size quota is set, DAMON tries to apply the action only up to &sz bytes
+ * within &reset_interval.
+ *
+ * To convince the different types of quotas and goals, DAMON internally
+ * converts those into one single size quota called "effective quota". DAMON
+ * internally uses it as the only one real quota. The conversion is made as
+ * follows.
+ *
+ * The time quota is transformed to a size quota using estimated throughput of
+ * the scheme's action. DAMON then compares it against &sz and uses smaller
+ * one as the effective quota.
+ *
+ * If @goals is not empty, DAMON calculates yet another size quota based on the
+ * goals using its internal feedback loop algorithm, for every @reset_interval.
+ * Then, if the new size quota is smaller than the effective quota, it uses the
+ * new size quota as the effective quota.
+ *
+ * The resulting effective size quota in bytes is set to @esz.
+ *
+ * For selecting regions within the quota, DAMON prioritizes current scheme's
+ * target memory regions using the &struct damon_operations->get_scheme_score.
+ * You could customize the prioritization logic by setting &weight_sz,
+ * &weight_nr_accesses, and &weight_age, because monitoring operations are
+ * encouraged to respect those.
+ */
+struct damos_quota {
+ unsigned long reset_interval;
+ unsigned long ms;
+ unsigned long sz;
+ struct list_head goals;
+ unsigned long esz;
+
+ unsigned int weight_sz;
+ unsigned int weight_nr_accesses;
+ unsigned int weight_age;
+
+/* private: */
+ /* For throughput estimation */
+ unsigned long total_charged_sz;
+ unsigned long total_charged_ns;
+
+ /* For charging the quota */
+ unsigned long charged_sz;
+ unsigned long charged_from;
+ struct damon_target *charge_target_from;
+ unsigned long charge_addr_from;
+
+ /* For prioritization */
+ unsigned int min_score;
+
+ /* For feedback loop */
+ unsigned long esz_bp;
+};
+
+/**
+ * enum damos_wmark_metric - Represents the watermark metric.
+ *
+ * @DAMOS_WMARK_NONE: Ignore the watermarks of the given scheme.
+ * @DAMOS_WMARK_FREE_MEM_RATE: Free memory rate of the system in [0,1000].
+ * @NR_DAMOS_WMARK_METRICS: Total number of DAMOS watermark metrics
+ */
+enum damos_wmark_metric {
+ DAMOS_WMARK_NONE,
+ DAMOS_WMARK_FREE_MEM_RATE,
+ NR_DAMOS_WMARK_METRICS,
+};
+
+/**
+ * struct damos_watermarks - Controls when a given scheme should be activated.
+ * @metric: Metric for the watermarks.
+ * @interval: Watermarks check time interval in microseconds.
+ * @high: High watermark.
+ * @mid: Middle watermark.
+ * @low: Low watermark.
+ *
+ * If &metric is &DAMOS_WMARK_NONE, the scheme is always active. Being active
+ * means DAMON does monitoring and applying the action of the scheme to
+ * appropriate memory regions. Else, DAMON checks &metric of the system for at
+ * least every &interval microseconds and works as below.
+ *
+ * If &metric is higher than &high, the scheme is inactivated. If &metric is
+ * between &mid and &low, the scheme is activated. If &metric is lower than
+ * &low, the scheme is inactivated.
+ */
+struct damos_watermarks {
+ enum damos_wmark_metric metric;
+ unsigned long interval;
+ unsigned long high;
+ unsigned long mid;
+ unsigned long low;
+
+/* private: */
+ bool activated;
+};
+
+/**
+ * struct damos_stat - Statistics on a given scheme.
+ * @nr_tried: Total number of regions that the scheme is tried to be applied.
+ * @sz_tried: Total size of regions that the scheme is tried to be applied.
+ * @nr_applied: Total number of regions that the scheme is applied.
+ * @sz_applied: Total size of regions that the scheme is applied.
+ * @sz_ops_filter_passed:
+ * Total bytes that passed ops layer-handled DAMOS filters.
+ * @qt_exceeds: Total number of times the quota of the scheme has exceeded.
+ *
+ * "Tried an action to a region" in this context means the DAMOS core logic
+ * determined the region as eligible to apply the action. The access pattern
+ * (&struct damos_access_pattern), quotas (&struct damos_quota), watermarks
+ * (&struct damos_watermarks) and filters (&struct damos_filter) that handled
+ * on core logic can affect this. The core logic asks the operation set
+ * (&struct damon_operations) to apply the action to the region.
+ *
+ * "Applied an action to a region" in this context means the operation set
+ * (&struct damon_operations) successfully applied the action to the region, at
+ * least to a part of the region. The filters (&struct damos_filter) that
+ * handled on operation set layer and type of the action and pages of the
+ * region can affect this. For example, if a filter is set to exclude
+ * anonymous pages and the region has only anonymous pages, the region will be
+ * failed at applying the action. If the action is &DAMOS_PAGEOUT and all
+ * pages of the region are already paged out, the region will be failed at
+ * applying the action.
+ */
+struct damos_stat {
+ unsigned long nr_tried;
+ unsigned long sz_tried;
+ unsigned long nr_applied;
+ unsigned long sz_applied;
+ unsigned long sz_ops_filter_passed;
+ unsigned long qt_exceeds;
+};
+
+/**
+ * enum damos_filter_type - Type of memory for &struct damos_filter
+ * @DAMOS_FILTER_TYPE_ANON: Anonymous pages.
+ * @DAMOS_FILTER_TYPE_ACTIVE: Active pages.
+ * @DAMOS_FILTER_TYPE_MEMCG: Specific memcg's pages.
+ * @DAMOS_FILTER_TYPE_YOUNG: Recently accessed pages.
+ * @DAMOS_FILTER_TYPE_HUGEPAGE_SIZE: Page is part of a hugepage.
+ * @DAMOS_FILTER_TYPE_UNMAPPED: Unmapped pages.
+ * @DAMOS_FILTER_TYPE_ADDR: Address range.
+ * @DAMOS_FILTER_TYPE_TARGET: Data Access Monitoring target.
+ * @NR_DAMOS_FILTER_TYPES: Number of filter types.
+ *
+ * The anon pages type and memcg type filters are handled by underlying
+ * &struct damon_operations as a part of scheme action trying, and therefore
+ * accounted as 'tried'. In contrast, other types are handled by core layer
+ * before trying of the action and therefore not accounted as 'tried'.
+ *
+ * The support of the filters that handled by &struct damon_operations depend
+ * on the running &struct damon_operations.
+ * &enum DAMON_OPS_PADDR supports both anon pages type and memcg type filters,
+ * while &enum DAMON_OPS_VADDR and &enum DAMON_OPS_FVADDR don't support any of
+ * the two types.
+ */
+enum damos_filter_type {
+ DAMOS_FILTER_TYPE_ANON,
+ DAMOS_FILTER_TYPE_ACTIVE,
+ DAMOS_FILTER_TYPE_MEMCG,
+ DAMOS_FILTER_TYPE_YOUNG,
+ DAMOS_FILTER_TYPE_HUGEPAGE_SIZE,
+ DAMOS_FILTER_TYPE_UNMAPPED,
+ DAMOS_FILTER_TYPE_ADDR,
+ DAMOS_FILTER_TYPE_TARGET,
+ NR_DAMOS_FILTER_TYPES,
+};
+
+/**
+ * struct damos_filter - DAMOS action target memory filter.
+ * @type: Type of the target memory.
+ * @matching: Whether this is for @type-matching memory.
+ * @allow: Whether to include or exclude the @matching memory.
+ * @memcg_id: Memcg id of the question if @type is DAMOS_FILTER_MEMCG.
+ * @addr_range: Address range if @type is DAMOS_FILTER_TYPE_ADDR.
+ * @target_idx: Index of the &struct damon_target of
+ * &damon_ctx->adaptive_targets if @type is
+ * DAMOS_FILTER_TYPE_TARGET.
+ * @sz_range: Size range if @type is DAMOS_FILTER_TYPE_HUGEPAGE_SIZE.
+ * @list: List head for siblings.
+ *
+ * Before applying the &damos->action to a memory region, DAMOS checks if each
+ * byte of the region matches to this given condition and avoid applying the
+ * action if so. Support of each filter type depends on the running &struct
+ * damon_operations and the type. Refer to &enum damos_filter_type for more
+ * details.
+ */
+struct damos_filter {
+ enum damos_filter_type type;
+ bool matching;
+ bool allow;
+ union {
+ unsigned short memcg_id;
+ struct damon_addr_range addr_range;
+ int target_idx;
+ struct damon_size_range sz_range;
+ };
+ struct list_head list;
+};
+
+struct damon_ctx;
+struct damos;
+
+/**
+ * struct damos_walk_control - Control damos_walk().
+ *
+ * @walk_fn: Function to be called back for each region.
+ * @data: Data that will be passed to walk functions.
+ *
+ * Control damos_walk(), which requests specific kdamond to invoke the given
+ * function to each region that eligible to apply actions of the kdamond's
+ * schemes. Refer to damos_walk() for more details.
+ */
+struct damos_walk_control {
+ void (*walk_fn)(void *data, struct damon_ctx *ctx,
+ struct damon_target *t, struct damon_region *r,
+ struct damos *s, unsigned long sz_filter_passed);
+ void *data;
+/* private: internal use only */
+ /* informs if the kdamond finished handling of the walk request */
+ struct completion completion;
+ /* informs if the walk is canceled. */
+ bool canceled;
+};
+
+/**
+ * struct damos_access_pattern - Target access pattern of the given scheme.
+ * @min_sz_region: Minimum size of target regions.
+ * @max_sz_region: Maximum size of target regions.
+ * @min_nr_accesses: Minimum ``->nr_accesses`` of target regions.
+ * @max_nr_accesses: Maximum ``->nr_accesses`` of target regions.
+ * @min_age_region: Minimum age of target regions.
+ * @max_age_region: Maximum age of target regions.
+ */
+struct damos_access_pattern {
+ unsigned long min_sz_region;
+ unsigned long max_sz_region;
+ unsigned int min_nr_accesses;
+ unsigned int max_nr_accesses;
+ unsigned int min_age_region;
+ unsigned int max_age_region;
+};
+
+/**
+ * struct damos_migrate_dests - Migration destination nodes and their weights.
+ * @node_id_arr: Array of migration destination node ids.
+ * @weight_arr: Array of migration weights for @node_id_arr.
+ * @nr_dests: Length of the @node_id_arr and @weight_arr arrays.
+ *
+ * @node_id_arr is an array of the ids of migration destination nodes.
+ * @weight_arr is an array of the weights for those. The weights in
+ * @weight_arr are for nodes in @node_id_arr of same array index.
+ */
+struct damos_migrate_dests {
+ unsigned int *node_id_arr;
+ unsigned int *weight_arr;
+ size_t nr_dests;
+};
+
+/**
+ * struct damos - Represents a Data Access Monitoring-based Operation Scheme.
+ * @pattern: Access pattern of target regions.
+ * @action: &damos_action to be applied to the target regions.
+ * @apply_interval_us: The time between applying the @action.
+ * @quota: Control the aggressiveness of this scheme.
+ * @wmarks: Watermarks for automated (in)activation of this scheme.
+ * @migrate_dests: Destination nodes if @action is "migrate_{hot,cold}".
+ * @target_nid: Destination node if @action is "migrate_{hot,cold}".
+ * @core_filters: Additional set of &struct damos_filter for &action.
+ * @ops_filters: ops layer handling &struct damos_filter objects list.
+ * @last_applied: Last @action applied ops-managing entity.
+ * @stat: Statistics of this scheme.
+ * @list: List head for siblings.
+ *
+ * For each @apply_interval_us, DAMON finds regions which fit in the
+ * &pattern and applies &action to those. To avoid consuming too much
+ * CPU time or IO resources for the &action, &quota is used.
+ *
+ * If @apply_interval_us is zero, &damon_attrs->aggr_interval is used instead.
+ *
+ * To do the work only when needed, schemes can be activated for specific
+ * system situations using &wmarks. If all schemes that registered to the
+ * monitoring context are inactive, DAMON stops monitoring either, and just
+ * repeatedly checks the watermarks.
+ *
+ * @migrate_dests specifies multiple migration target nodes with different
+ * weights for migrate_hot or migrate_cold actions. @target_nid is ignored if
+ * this is set.
+ *
+ * @target_nid is used to set the migration target node for migrate_hot or
+ * migrate_cold actions, and @migrate_dests is unset.
+ *
+ * Before applying the &action to a memory region, &struct damon_operations
+ * implementation could check pages of the region and skip &action to respect
+ * &core_filters
+ *
+ * The minimum entity that @action can be applied depends on the underlying
+ * &struct damon_operations. Since it may not be aligned with the core layer
+ * abstract, namely &struct damon_region, &struct damon_operations could apply
+ * @action to same entity multiple times. Large folios that underlying on
+ * multiple &struct damon region objects could be such examples. The &struct
+ * damon_operations can use @last_applied to avoid that. DAMOS core logic
+ * unsets @last_applied when each regions walking for applying the scheme is
+ * finished.
+ *
+ * After applying the &action to each region, &stat_count and &stat_sz is
+ * updated to reflect the number of regions and total size of regions that the
+ * &action is applied.
+ */
+struct damos {
+ struct damos_access_pattern pattern;
+ enum damos_action action;
+ unsigned long apply_interval_us;
+/* private: internal use only */
+ /*
+ * number of sample intervals that should be passed before applying
+ * @action
+ */
+ unsigned long next_apply_sis;
+ /* informs if ongoing DAMOS walk for this scheme is finished */
+ bool walk_completed;
+ /*
+ * If the current region in the filtering stage is allowed by core
+ * layer-handled filters. If true, operations layer allows it, too.
+ */
+ bool core_filters_allowed;
+ /* whether to reject core/ops filters umatched regions */
+ bool core_filters_default_reject;
+ bool ops_filters_default_reject;
+/* public: */
+ struct damos_quota quota;
+ struct damos_watermarks wmarks;
+ union {
+ struct {
+ int target_nid;
+ struct damos_migrate_dests migrate_dests;
+ };
+ };
+ struct list_head core_filters;
+ struct list_head ops_filters;
+ void *last_applied;
+ struct damos_stat stat;
+ struct list_head list;
+};
+
+/**
+ * enum damon_ops_id - Identifier for each monitoring operations implementation
+ *
+ * @DAMON_OPS_VADDR: Monitoring operations for virtual address spaces
+ * @DAMON_OPS_FVADDR: Monitoring operations for only fixed ranges of virtual
+ * address spaces
+ * @DAMON_OPS_PADDR: Monitoring operations for the physical address space
+ * @NR_DAMON_OPS: Number of monitoring operations implementations
+ */
+enum damon_ops_id {
+ DAMON_OPS_VADDR,
+ DAMON_OPS_FVADDR,
+ DAMON_OPS_PADDR,
+ NR_DAMON_OPS,
+};
+
+/**
+ * struct damon_operations - Monitoring operations for given use cases.
+ *
+ * @id: Identifier of this operations set.
+ * @init: Initialize operations-related data structures.
+ * @update: Update operations-related data structures.
+ * @prepare_access_checks: Prepare next access check of target regions.
+ * @check_accesses: Check the accesses to target regions.
+ * @get_scheme_score: Get the score of a region for a scheme.
+ * @apply_scheme: Apply a DAMON-based operation scheme.
+ * @target_valid: Determine if the target is valid.
+ * @cleanup_target: Clean up each target before deallocation.
+ * @cleanup: Clean up the context.
+ *
+ * DAMON can be extended for various address spaces and usages. For this,
+ * users should register the low level operations for their target address
+ * space and usecase via the &damon_ctx.ops. Then, the monitoring thread
+ * (&damon_ctx.kdamond) calls @init and @prepare_access_checks before starting
+ * the monitoring, @update after each &damon_attrs.ops_update_interval, and
+ * @check_accesses, @target_valid and @prepare_access_checks after each
+ * &damon_attrs.sample_interval.
+ *
+ * Each &struct damon_operations instance having valid @id can be registered
+ * via damon_register_ops() and selected by damon_select_ops() later.
+ * @init should initialize operations-related data structures. For example,
+ * this could be used to construct proper monitoring target regions and link
+ * those to @damon_ctx.adaptive_targets.
+ * @update should update the operations-related data structures. For example,
+ * this could be used to update monitoring target regions for current status.
+ * @prepare_access_checks should manipulate the monitoring regions to be
+ * prepared for the next access check.
+ * @check_accesses should check the accesses to each region that made after the
+ * last preparation and update the number of observed accesses of each region.
+ * It should also return max number of observed accesses that made as a result
+ * of its update. The value will be used for regions adjustment threshold.
+ * @get_scheme_score should return the priority score of a region for a scheme
+ * as an integer in [0, &DAMOS_MAX_SCORE].
+ * @apply_scheme is called from @kdamond when a region for user provided
+ * DAMON-based operation scheme is found. It should apply the scheme's action
+ * to the region and return bytes of the region that the action is successfully
+ * applied. It should also report how many bytes of the region has passed
+ * filters (&struct damos_filter) that handled by itself.
+ * @target_valid should check whether the target is still valid for the
+ * monitoring.
+ * @cleanup_target is called before the target will be deallocated.
+ * @cleanup is called from @kdamond just before its termination.
+ */
+struct damon_operations {
+ enum damon_ops_id id;
+ void (*init)(struct damon_ctx *context);
+ void (*update)(struct damon_ctx *context);
+ void (*prepare_access_checks)(struct damon_ctx *context);
+ unsigned int (*check_accesses)(struct damon_ctx *context);
+ int (*get_scheme_score)(struct damon_ctx *context,
+ struct damon_target *t, struct damon_region *r,
+ struct damos *scheme);
+ unsigned long (*apply_scheme)(struct damon_ctx *context,
+ struct damon_target *t, struct damon_region *r,
+ struct damos *scheme, unsigned long *sz_filter_passed);
+ bool (*target_valid)(struct damon_target *t);
+ void (*cleanup_target)(struct damon_target *t);
+ void (*cleanup)(struct damon_ctx *context);
+};
+
+/*
+ * struct damon_call_control - Control damon_call().
+ *
+ * @fn: Function to be called back.
+ * @data: Data that will be passed to @fn.
+ * @repeat: Repeat invocations.
+ * @return_code: Return code from @fn invocation.
+ * @dealloc_on_cancel: De-allocate when canceled.
+ *
+ * Control damon_call(), which requests specific kdamond to invoke a given
+ * function. Refer to damon_call() for more details.
+ */
+struct damon_call_control {
+ int (*fn)(void *data);
+ void *data;
+ bool repeat;
+ int return_code;
+ bool dealloc_on_cancel;
+/* private: internal use only */
+ /* informs if the kdamond finished handling of the request */
+ struct completion completion;
+ /* informs if the kdamond canceled @fn infocation */
+ bool canceled;
+ /* List head for siblings. */
+ struct list_head list;
+};
+
+/**
+ * struct damon_intervals_goal - Monitoring intervals auto-tuning goal.
+ *
+ * @access_bp: Access events observation ratio to achieve in bp.
+ * @aggrs: Number of aggregations to achieve @access_bp within.
+ * @min_sample_us: Minimum resulting sampling interval in microseconds.
+ * @max_sample_us: Maximum resulting sampling interval in microseconds.
+ *
+ * DAMON automatically tunes &damon_attrs->sample_interval and
+ * &damon_attrs->aggr_interval aiming the ratio in bp (1/10,000) of
+ * DAMON-observed access events to theoretical maximum amount within @aggrs
+ * aggregations be same to @access_bp. The logic increases
+ * &damon_attrs->aggr_interval and &damon_attrs->sampling_interval in same
+ * ratio if the current access events observation ratio is lower than the
+ * target for each @aggrs aggregations, and vice versa.
+ *
+ * If @aggrs is zero, the tuning is disabled and hence this struct is ignored.
+ */
+struct damon_intervals_goal {
+ unsigned long access_bp;
+ unsigned long aggrs;
+ unsigned long min_sample_us;
+ unsigned long max_sample_us;
+};
+
+/**
+ * struct damon_attrs - Monitoring attributes for accuracy/overhead control.
+ *
+ * @sample_interval: The time between access samplings.
+ * @aggr_interval: The time between monitor results aggregations.
+ * @ops_update_interval: The time between monitoring operations updates.
+ * @intervals_goal: Intervals auto-tuning goal.
+ * @min_nr_regions: The minimum number of adaptive monitoring
+ * regions.
+ * @max_nr_regions: The maximum number of adaptive monitoring
+ * regions.
+ *
+ * For each @sample_interval, DAMON checks whether each region is accessed or
+ * not during the last @sample_interval. If such access is found, DAMON
+ * aggregates the information by increasing &damon_region->nr_accesses for
+ * @aggr_interval time. For each @aggr_interval, the count is reset. DAMON
+ * also checks whether the target memory regions need update (e.g., by
+ * ``mmap()`` calls from the application, in case of virtual memory monitoring)
+ * and applies the changes for each @ops_update_interval. All time intervals
+ * are in micro-seconds. Please refer to &struct damon_operations and &struct
+ * damon_call_control for more detail.
+ */
+struct damon_attrs {
+ unsigned long sample_interval;
+ unsigned long aggr_interval;
+ unsigned long ops_update_interval;
+ struct damon_intervals_goal intervals_goal;
+ unsigned long min_nr_regions;
+ unsigned long max_nr_regions;
+/* private: internal use only */
+ /*
+ * @aggr_interval to @sample_interval ratio.
+ * Core-external components call damon_set_attrs() with &damon_attrs
+ * that this field is unset. In the case, damon_set_attrs() sets this
+ * field of resulting &damon_attrs. Core-internal components such as
+ * kdamond_tune_intervals() calls damon_set_attrs() with &damon_attrs
+ * that this field is set. In the case, damon_set_attrs() just keep
+ * it.
+ */
+ unsigned long aggr_samples;
+};
+
+/**
+ * struct damon_ctx - Represents a context for each monitoring. This is the
+ * main interface that allows users to set the attributes and get the results
+ * of the monitoring.
+ *
+ * @attrs: Monitoring attributes for accuracy/overhead control.
+ * @kdamond: Kernel thread who does the monitoring.
+ * @kdamond_lock: Mutex for the synchronizations with @kdamond.
+ *
+ * For each monitoring context, one kernel thread for the monitoring is
+ * created. The pointer to the thread is stored in @kdamond.
+ *
+ * Once started, the monitoring thread runs until explicitly required to be
+ * terminated or every monitoring target is invalid. The validity of the
+ * targets is checked via the &damon_operations.target_valid of @ops. The
+ * termination can also be explicitly requested by calling damon_stop().
+ * The thread sets @kdamond to NULL when it terminates. Therefore, users can
+ * know whether the monitoring is ongoing or terminated by reading @kdamond.
+ * Reads and writes to @kdamond from outside of the monitoring thread must
+ * be protected by @kdamond_lock.
+ *
+ * Note that the monitoring thread protects only @kdamond via @kdamond_lock.
+ * Accesses to other fields must be protected by themselves.
+ *
+ * @ops: Set of monitoring operations for given use cases.
+ * @addr_unit: Scale factor for core to ops address conversion.
+ * @min_sz_region: Minimum region size.
+ * @adaptive_targets: Head of monitoring targets (&damon_target) list.
+ * @schemes: Head of schemes (&damos) list.
+ */
+struct damon_ctx {
+ struct damon_attrs attrs;
+
+/* private: internal use only */
+ /* number of sample intervals that passed since this context started */
+ unsigned long passed_sample_intervals;
+ /*
+ * number of sample intervals that should be passed before next
+ * aggregation
+ */
+ unsigned long next_aggregation_sis;
+ /*
+ * number of sample intervals that should be passed before next ops
+ * update
+ */
+ unsigned long next_ops_update_sis;
+ /*
+ * number of sample intervals that should be passed before next
+ * intervals tuning
+ */
+ unsigned long next_intervals_tune_sis;
+ /* for waiting until the execution of the kdamond_fn is started */
+ struct completion kdamond_started;
+ /* for scheme quotas prioritization */
+ unsigned long *regions_score_histogram;
+
+ /* lists of &struct damon_call_control */
+ struct list_head call_controls;
+ struct mutex call_controls_lock;
+
+ struct damos_walk_control *walk_control;
+ struct mutex walk_control_lock;
+
+/* public: */
+ struct task_struct *kdamond;
+ struct mutex kdamond_lock;
+
+ struct damon_operations ops;
+ unsigned long addr_unit;
+ unsigned long min_sz_region;
+
+ struct list_head adaptive_targets;
+ struct list_head schemes;
+};
+
+static inline struct damon_region *damon_next_region(struct damon_region *r)
+{
+ return container_of(r->list.next, struct damon_region, list);
+}
+
+static inline struct damon_region *damon_prev_region(struct damon_region *r)
+{
+ return container_of(r->list.prev, struct damon_region, list);
+}
+
+static inline struct damon_region *damon_last_region(struct damon_target *t)
+{
+ return list_last_entry(&t->regions_list, struct damon_region, list);
+}
+
+static inline struct damon_region *damon_first_region(struct damon_target *t)
+{
+ return list_first_entry(&t->regions_list, struct damon_region, list);
+}
+
+static inline unsigned long damon_sz_region(struct damon_region *r)
+{
+ return r->ar.end - r->ar.start;
+}
+
+
+#define damon_for_each_region(r, t) \
+ list_for_each_entry(r, &t->regions_list, list)
+
+#define damon_for_each_region_from(r, t) \
+ list_for_each_entry_from(r, &t->regions_list, list)
+
+#define damon_for_each_region_safe(r, next, t) \
+ list_for_each_entry_safe(r, next, &t->regions_list, list)
+
+#define damon_for_each_target(t, ctx) \
+ list_for_each_entry(t, &(ctx)->adaptive_targets, list)
+
+#define damon_for_each_target_safe(t, next, ctx) \
+ list_for_each_entry_safe(t, next, &(ctx)->adaptive_targets, list)
+
+#define damon_for_each_scheme(s, ctx) \
+ list_for_each_entry(s, &(ctx)->schemes, list)
+
+#define damon_for_each_scheme_safe(s, next, ctx) \
+ list_for_each_entry_safe(s, next, &(ctx)->schemes, list)
+
+#define damos_for_each_quota_goal(goal, quota) \
+ list_for_each_entry(goal, &quota->goals, list)
+
+#define damos_for_each_quota_goal_safe(goal, next, quota) \
+ list_for_each_entry_safe(goal, next, &(quota)->goals, list)
+
+#define damos_for_each_core_filter(f, scheme) \
+ list_for_each_entry(f, &(scheme)->core_filters, list)
+
+#define damos_for_each_core_filter_safe(f, next, scheme) \
+ list_for_each_entry_safe(f, next, &(scheme)->core_filters, list)
+
+#define damos_for_each_ops_filter(f, scheme) \
+ list_for_each_entry(f, &(scheme)->ops_filters, list)
+
+#define damos_for_each_ops_filter_safe(f, next, scheme) \
+ list_for_each_entry_safe(f, next, &(scheme)->ops_filters, list)
+
+#ifdef CONFIG_DAMON
+
+struct damon_region *damon_new_region(unsigned long start, unsigned long end);
+
+/*
+ * Add a region between two other regions
+ */
+static inline void damon_insert_region(struct damon_region *r,
+ struct damon_region *prev, struct damon_region *next,
+ struct damon_target *t)
+{
+ __list_add(&r->list, &prev->list, &next->list);
+ t->nr_regions++;
+}
+
+void damon_add_region(struct damon_region *r, struct damon_target *t);
+void damon_destroy_region(struct damon_region *r, struct damon_target *t);
+int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
+ unsigned int nr_ranges, unsigned long min_sz_region);
+void damon_update_region_access_rate(struct damon_region *r, bool accessed,
+ struct damon_attrs *attrs);
+
+struct damos_filter *damos_new_filter(enum damos_filter_type type,
+ bool matching, bool allow);
+void damos_add_filter(struct damos *s, struct damos_filter *f);
+bool damos_filter_for_ops(enum damos_filter_type type);
+void damos_destroy_filter(struct damos_filter *f);
+
+struct damos_quota_goal *damos_new_quota_goal(
+ enum damos_quota_goal_metric metric,
+ unsigned long target_value);
+void damos_add_quota_goal(struct damos_quota *q, struct damos_quota_goal *g);
+void damos_destroy_quota_goal(struct damos_quota_goal *goal);
+
+struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
+ enum damos_action action,
+ unsigned long apply_interval_us,
+ struct damos_quota *quota,
+ struct damos_watermarks *wmarks,
+ int target_nid);
+void damon_add_scheme(struct damon_ctx *ctx, struct damos *s);
+void damon_destroy_scheme(struct damos *s);
+int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src);
+
+struct damon_target *damon_new_target(void);
+void damon_add_target(struct damon_ctx *ctx, struct damon_target *t);
+bool damon_targets_empty(struct damon_ctx *ctx);
+void damon_free_target(struct damon_target *t);
+void damon_destroy_target(struct damon_target *t, struct damon_ctx *ctx);
+unsigned int damon_nr_regions(struct damon_target *t);
+
+struct damon_ctx *damon_new_ctx(void);
+void damon_destroy_ctx(struct damon_ctx *ctx);
+int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs);
+void damon_set_schemes(struct damon_ctx *ctx,
+ struct damos **schemes, ssize_t nr_schemes);
+int damon_commit_ctx(struct damon_ctx *old_ctx, struct damon_ctx *new_ctx);
+int damon_nr_running_ctxs(void);
+bool damon_is_registered_ops(enum damon_ops_id id);
+int damon_register_ops(struct damon_operations *ops);
+int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id);
+
+static inline bool damon_target_has_pid(const struct damon_ctx *ctx)
+{
+ return ctx->ops.id == DAMON_OPS_VADDR || ctx->ops.id == DAMON_OPS_FVADDR;
+}
+
+static inline unsigned int damon_max_nr_accesses(const struct damon_attrs *attrs)
+{
+ /* {aggr,sample}_interval are unsigned long, hence could overflow */
+ return min(attrs->aggr_interval / attrs->sample_interval,
+ (unsigned long)UINT_MAX);
+}
+
+
+bool damon_initialized(void);
+int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive);
+int damon_stop(struct damon_ctx **ctxs, int nr_ctxs);
+bool damon_is_running(struct damon_ctx *ctx);
+
+int damon_call(struct damon_ctx *ctx, struct damon_call_control *control);
+int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control);
+
+int damon_set_region_biggest_system_ram_default(struct damon_target *t,
+ unsigned long *start, unsigned long *end,
+ unsigned long min_sz_region);
+
+#endif /* CONFIG_DAMON */
+
+#endif /* _DAMON_H */
diff --git a/include/linux/dax.h b/include/linux/dax.h
index b52f084aa643..9d624f4d9df6 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -6,14 +6,19 @@
#include <linux/mm.h>
#include <linux/radix-tree.h>
-/* Flag for synchronous flush */
-#define DAXDEV_F_SYNC (1UL << 0)
-
typedef unsigned long dax_entry_t;
+struct dax_device;
+struct gendisk;
struct iomap_ops;
+struct iomap_iter;
struct iomap;
-struct dax_device;
+
+enum dax_access_mode {
+ DAX_ACCESS,
+ DAX_RECOVERY_WRITE,
+};
+
struct dax_operations {
/*
* direct_access: translate a device-relative
@@ -21,70 +26,64 @@ struct dax_operations {
* number of pages available for DAX at that pfn.
*/
long (*direct_access)(struct dax_device *, pgoff_t, long,
- void **, pfn_t *);
- /*
- * Validate whether this device is usable as an fsdax backing
- * device.
- */
- bool (*dax_supported)(struct dax_device *, struct block_device *, int,
- sector_t, sector_t);
- /* copy_from_iter: required operation for fs-dax direct-i/o */
- size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t,
- struct iov_iter *);
- /* copy_to_iter: required operation for fs-dax direct-i/o */
- size_t (*copy_to_iter)(struct dax_device *, pgoff_t, void *, size_t,
- struct iov_iter *);
+ enum dax_access_mode, void **, unsigned long *);
/* zero_page_range: required operation. Zero page range */
int (*zero_page_range)(struct dax_device *, pgoff_t, size_t);
+ /*
+ * recovery_write: recover a poisoned range by DAX device driver
+ * capable of clearing poison.
+ */
+ size_t (*recovery_write)(struct dax_device *dax_dev, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *iter);
};
-extern struct attribute_group dax_attribute_group;
+struct dax_holder_operations {
+ /*
+ * notify_failure - notify memory failure into inner holder device
+ * @dax_dev: the dax device which contains the holder
+ * @offset: offset on this dax device where memory failure occurs
+ * @len: length of this memory failure event
+ * @flags: action flags for memory failure handler
+ */
+ int (*notify_failure)(struct dax_device *dax_dev, u64 offset,
+ u64 len, int mf_flags);
+};
#if IS_ENABLED(CONFIG_DAX)
-struct dax_device *dax_get_by_host(const char *host);
-struct dax_device *alloc_dax(void *private, const char *host,
- const struct dax_operations *ops, unsigned long flags);
+struct dax_device *alloc_dax(void *private, const struct dax_operations *ops);
+void *dax_holder(struct dax_device *dax_dev);
void put_dax(struct dax_device *dax_dev);
void kill_dax(struct dax_device *dax_dev);
void dax_write_cache(struct dax_device *dax_dev, bool wc);
bool dax_write_cache_enabled(struct dax_device *dax_dev);
-bool __dax_synchronous(struct dax_device *dax_dev);
-static inline bool dax_synchronous(struct dax_device *dax_dev)
-{
- return __dax_synchronous(dax_dev);
-}
-void __set_dax_synchronous(struct dax_device *dax_dev);
-static inline void set_dax_synchronous(struct dax_device *dax_dev)
-{
- __set_dax_synchronous(dax_dev);
-}
-bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
- int blocksize, sector_t start, sector_t len);
+bool dax_synchronous(struct dax_device *dax_dev);
+void set_dax_nocache(struct dax_device *dax_dev);
+void set_dax_nomc(struct dax_device *dax_dev);
+void set_dax_synchronous(struct dax_device *dax_dev);
+size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *i);
/*
* Check if given mapping is supported by the file / underlying device.
*/
-static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
- struct dax_device *dax_dev)
+static inline bool daxdev_mapping_supported(vm_flags_t vm_flags,
+ const struct inode *inode,
+ struct dax_device *dax_dev)
{
- if (!(vma->vm_flags & VM_SYNC))
+ if (!(vm_flags & VM_SYNC))
return true;
- if (!IS_DAX(file_inode(vma->vm_file)))
+ if (!IS_DAX(inode))
return false;
return dax_synchronous(dax_dev);
}
#else
-static inline struct dax_device *dax_get_by_host(const char *host)
+static inline void *dax_holder(struct dax_device *dax_dev)
{
return NULL;
}
-static inline struct dax_device *alloc_dax(void *private, const char *host,
- const struct dax_operations *ops, unsigned long flags)
+static inline struct dax_device *alloc_dax(void *private,
+ const struct dax_operations *ops)
{
- /*
- * Callers should check IS_ENABLED(CONFIG_DAX) to know if this
- * NULL is an error or expected.
- */
- return NULL;
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline void put_dax(struct dax_device *dax_dev)
{
@@ -103,78 +102,67 @@ static inline bool dax_synchronous(struct dax_device *dax_dev)
{
return true;
}
+static inline void set_dax_nocache(struct dax_device *dax_dev)
+{
+}
+static inline void set_dax_nomc(struct dax_device *dax_dev)
+{
+}
static inline void set_dax_synchronous(struct dax_device *dax_dev)
{
}
-static inline bool dax_supported(struct dax_device *dax_dev,
- struct block_device *bdev, int blocksize, sector_t start,
- sector_t len)
+static inline bool daxdev_mapping_supported(vm_flags_t vm_flags,
+ const struct inode *inode,
+ struct dax_device *dax_dev)
{
- return false;
+ return !(vm_flags & VM_SYNC);
}
-static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
- struct dax_device *dax_dev)
+static inline size_t dax_recovery_write(struct dax_device *dax_dev,
+ pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
{
- return !(vma->vm_flags & VM_SYNC);
+ return 0;
}
#endif
struct writeback_control;
-int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff);
-#if IS_ENABLED(CONFIG_FS_DAX)
-bool __bdev_dax_supported(struct block_device *bdev, int blocksize);
-static inline bool bdev_dax_supported(struct block_device *bdev, int blocksize)
+#if defined(CONFIG_BLOCK) && defined(CONFIG_FS_DAX)
+int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk);
+void dax_remove_host(struct gendisk *disk);
+struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off,
+ void *holder, const struct dax_holder_operations *ops);
+void fs_put_dax(struct dax_device *dax_dev, void *holder);
+#else
+static inline int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk)
+{
+ return 0;
+}
+static inline void dax_remove_host(struct gendisk *disk)
{
- return __bdev_dax_supported(bdev, blocksize);
}
-
-bool __generic_fsdax_supported(struct dax_device *dax_dev,
- struct block_device *bdev, int blocksize, sector_t start,
- sector_t sectors);
-static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
- struct block_device *bdev, int blocksize, sector_t start,
- sector_t sectors)
+static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev,
+ u64 *start_off, void *holder,
+ const struct dax_holder_operations *ops)
{
- return __generic_fsdax_supported(dax_dev, bdev, blocksize, start,
- sectors);
+ return NULL;
}
-
-static inline void fs_put_dax(struct dax_device *dax_dev)
+static inline void fs_put_dax(struct dax_device *dax_dev, void *holder)
{
- put_dax(dax_dev);
}
+#endif /* CONFIG_BLOCK && CONFIG_FS_DAX */
-struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev);
+#if IS_ENABLED(CONFIG_FS_DAX)
int dax_writeback_mapping_range(struct address_space *mapping,
struct dax_device *dax_dev, struct writeback_control *wbc);
struct page *dax_layout_busy_page(struct address_space *mapping);
struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end);
-dax_entry_t dax_lock_page(struct page *page);
-void dax_unlock_page(struct page *page, dax_entry_t cookie);
+dax_entry_t dax_lock_folio(struct folio *folio);
+void dax_unlock_folio(struct folio *folio, dax_entry_t cookie);
+dax_entry_t dax_lock_mapping_entry(struct address_space *mapping,
+ unsigned long index, struct page **page);
+void dax_unlock_mapping_entry(struct address_space *mapping,
+ unsigned long index, dax_entry_t cookie);
#else
-static inline bool bdev_dax_supported(struct block_device *bdev,
- int blocksize)
-{
- return false;
-}
-
-static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
- struct block_device *bdev, int blocksize, sector_t start,
- sector_t sectors)
-{
- return false;
-}
-
-static inline void fs_put_dax(struct dax_device *dax_dev)
-{
-}
-
-static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
-{
- return NULL;
-}
-
static inline struct page *dax_layout_busy_page(struct address_space *mapping)
{
return NULL;
@@ -191,18 +179,41 @@ static inline int dax_writeback_mapping_range(struct address_space *mapping,
return -EOPNOTSUPP;
}
-static inline dax_entry_t dax_lock_page(struct page *page)
+static inline dax_entry_t dax_lock_folio(struct folio *folio)
{
- if (IS_DAX(page->mapping->host))
+ if (IS_DAX(folio->mapping->host))
return ~0UL;
return 0;
}
-static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
+static inline void dax_unlock_folio(struct folio *folio, dax_entry_t cookie)
+{
+}
+
+static inline dax_entry_t dax_lock_mapping_entry(struct address_space *mapping,
+ unsigned long index, struct page **page)
+{
+ return 0;
+}
+
+static inline void dax_unlock_mapping_entry(struct address_space *mapping,
+ unsigned long index, dax_entry_t cookie)
{
}
#endif
+int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len,
+ const struct iomap_ops *ops);
+int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
+ const struct iomap_ops *ops);
+int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
+ const struct iomap_ops *ops);
+
+static inline bool dax_page_is_idle(struct page *page)
+{
+ return page && page_ref_count(page) == 0;
+}
+
#if IS_ENABLED(CONFIG_DAX)
int dax_read_lock(void);
void dax_read_unlock(int id);
@@ -216,39 +227,88 @@ static inline void dax_read_unlock(int id)
{
}
#endif /* CONFIG_DAX */
+
+#if !IS_ENABLED(CONFIG_FS_DAX)
+static inline int __must_check dax_break_layout(struct inode *inode,
+ loff_t start, loff_t end, void (cb)(struct inode *))
+{
+ return 0;
+}
+
+static inline void dax_break_layout_final(struct inode *inode)
+{
+}
+#endif
+
bool dax_alive(struct dax_device *dax_dev);
void *dax_get_private(struct dax_device *dax_dev);
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
- void **kaddr, pfn_t *pfn);
+ enum dax_access_mode mode, void **kaddr, unsigned long *pfn);
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t bytes, struct iov_iter *i);
size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t bytes, struct iov_iter *i);
int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
size_t nr_pages);
+int dax_holder_notify_failure(struct dax_device *dax_dev, u64 off, u64 len,
+ int mf_flags);
void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops);
-vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
- pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
+vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order,
+ unsigned long *pfnp, int *errp,
+ const struct iomap_ops *ops);
vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
- enum page_entry_size pe_size, pfn_t pfn);
+ unsigned int order, unsigned long pfn);
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
+void dax_delete_mapping_range(struct address_space *mapping,
+ loff_t start, loff_t end);
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
pgoff_t index);
-s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap);
+int __must_check dax_break_layout(struct inode *inode, loff_t start,
+ loff_t end, void (cb)(struct inode *));
+static inline int __must_check dax_break_layout_inode(struct inode *inode,
+ void (cb)(struct inode *))
+{
+ return dax_break_layout(inode, 0, LLONG_MAX, cb);
+}
+void dax_break_layout_final(struct inode *inode);
+int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
+ struct inode *dest, loff_t destoff,
+ loff_t len, bool *is_same,
+ const struct iomap_ops *ops);
+int dax_remap_file_range_prep(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t *len, unsigned int remap_flags,
+ const struct iomap_ops *ops);
static inline bool dax_mapping(struct address_space *mapping)
{
return mapping->host && IS_DAX(mapping->host);
}
+/*
+ * Due to dax's memory and block duo personalities, hwpoison reporting
+ * takes into consideration which personality is presently visible.
+ * When dax acts like a block device, such as in block IO, an encounter of
+ * dax hwpoison is reported as -EIO.
+ * When dax acts like memory, such as in page fault, a detection of hwpoison
+ * is reported as -EHWPOISON which leads to VM_FAULT_HWPOISON.
+ */
+static inline int dax_mem2blk_err(int err)
+{
+ return (err == -EHWPOISON) ? -EIO : err;
+}
+
#ifdef CONFIG_DEV_DAX_HMEM_DEVICES
-void hmem_register_device(int target_nid, struct resource *r);
+void hmem_register_resource(int target_nid, struct resource *r);
#else
-static inline void hmem_register_device(int target_nid, struct resource *r)
+static inline void hmem_register_resource(int target_nid, struct resource *r)
{
}
#endif
+typedef int (*walk_hmem_fn)(struct device *dev, int target_nid,
+ const struct resource *res);
+int walk_hmem_resources(struct device *dev, walk_hmem_fn fn);
#endif
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 9e23d33bb6f1..898c60d21c92 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -16,6 +16,7 @@
#include <linux/wait.h>
struct path;
+struct file;
struct vfsmount;
/*
@@ -56,37 +57,37 @@ struct qstr {
};
#define QSTR_INIT(n,l) { { { .len = l } }, .name = n }
+#define QSTR_LEN(n,l) (struct qstr)QSTR_INIT(n,l)
+#define QSTR(n) QSTR_LEN(n, strlen(n))
extern const struct qstr empty_name;
extern const struct qstr slash_name;
extern const struct qstr dotdot_name;
-struct dentry_stat_t {
- long nr_dentry;
- long nr_unused;
- long age_limit; /* age in seconds */
- long want_pages; /* pages requested by system */
- long nr_negative; /* # of unused negative dentries */
- long dummy; /* Reserved for future use */
-};
-extern struct dentry_stat_t dentry_stat;
-
/*
* Try to keep struct dentry aligned on 64 byte cachelines (this will
* give reasonable cacheline footprint with larger lines without the
* large memory footprint increase).
*/
#ifdef CONFIG_64BIT
-# define DNAME_INLINE_LEN 32 /* 192 bytes */
+# define DNAME_INLINE_WORDS 5 /* 192 bytes */
#else
# ifdef CONFIG_SMP
-# define DNAME_INLINE_LEN 36 /* 128 bytes */
+# define DNAME_INLINE_WORDS 9 /* 128 bytes */
# else
-# define DNAME_INLINE_LEN 40 /* 128 bytes */
+# define DNAME_INLINE_WORDS 11 /* 128 bytes */
# endif
#endif
+#define DNAME_INLINE_LEN (DNAME_INLINE_WORDS*sizeof(unsigned long))
+
+union shortname_store {
+ unsigned char string[DNAME_INLINE_LEN];
+ unsigned long words[DNAME_INLINE_WORDS];
+};
+
#define d_lock d_lockref.lock
+#define d_iname d_shortname.string
struct dentry {
/* RCU lookup touched fields */
@@ -94,24 +95,32 @@ struct dentry {
seqcount_spinlock_t d_seq; /* per dentry seqlock */
struct hlist_bl_node d_hash; /* lookup hash list */
struct dentry *d_parent; /* parent directory */
- struct qstr d_name;
+ union {
+ struct qstr __d_name; /* for use ONLY in fs/dcache.c */
+ const struct qstr d_name;
+ };
struct inode *d_inode; /* Where the name belongs to - NULL is
* negative */
- unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */
+ union shortname_store d_shortname;
+ /* --- cacheline 1 boundary (64 bytes) was 32 bytes ago --- */
/* Ref lookup also touches following */
- struct lockref d_lockref; /* per-dentry lock and refcount */
const struct dentry_operations *d_op;
struct super_block *d_sb; /* The root of the dentry tree */
unsigned long d_time; /* used by d_revalidate */
void *d_fsdata; /* fs-specific data */
+ /* --- cacheline 2 boundary (128 bytes) --- */
+ struct lockref d_lockref; /* per-dentry lock and refcount
+ * keep separate from RCU lookup area if
+ * possible!
+ */
union {
struct list_head d_lru; /* LRU list */
wait_queue_head_t *d_wait; /* in-lookup ones only */
};
- struct list_head d_child; /* child of parent list */
- struct list_head d_subdirs; /* our children */
+ struct hlist_node d_sib; /* child of parent list */
+ struct hlist_head d_children; /* our children */
/*
* d_alias and d_rcu can share memory
*/
@@ -120,7 +129,7 @@ struct dentry {
struct hlist_bl_node d_in_lookup_hash; /* only for in-lookup ones */
struct rcu_head d_rcu;
} d_u;
-} __randomize_layout;
+};
/*
* dentry->d_lock spinlock nesting subclasses:
@@ -134,8 +143,14 @@ enum dentry_d_lock_class
DENTRY_D_LOCK_NESTED
};
+enum d_real_type {
+ D_REAL_DATA,
+ D_REAL_METADATA,
+};
+
struct dentry_operations {
- int (*d_revalidate)(struct dentry *, unsigned int);
+ int (*d_revalidate)(struct inode *, const struct qstr *,
+ struct dentry *, unsigned int);
int (*d_weak_revalidate)(struct dentry *, unsigned int);
int (*d_hash)(const struct dentry *, struct qstr *);
int (*d_compare)(const struct dentry *,
@@ -148,7 +163,9 @@ struct dentry_operations {
char *(*d_dname)(struct dentry *, char *, int);
struct vfsmount *(*d_automount)(struct path *);
int (*d_manage)(const struct path *, bool);
- struct dentry *(*d_real)(struct dentry *, const struct inode *);
+ struct dentry *(*d_real)(struct dentry *, enum d_real_type type);
+ bool (*d_unalias_trylock)(const struct dentry *);
+ void (*d_unalias_unlock)(const struct dentry *);
} ____cacheline_aligned;
/*
@@ -160,68 +177,59 @@ struct dentry_operations {
*/
/* d_flags entries */
-#define DCACHE_OP_HASH 0x00000001
-#define DCACHE_OP_COMPARE 0x00000002
-#define DCACHE_OP_REVALIDATE 0x00000004
-#define DCACHE_OP_DELETE 0x00000008
-#define DCACHE_OP_PRUNE 0x00000010
-
-#define DCACHE_DISCONNECTED 0x00000020
- /* This dentry is possibly not currently connected to the dcache tree, in
- * which case its parent will either be itself, or will have this flag as
- * well. nfsd will not use a dentry with this bit set, but will first
- * endeavour to clear the bit either by discovering that it is connected,
- * or by performing lookup operations. Any filesystem which supports
- * nfsd_operations MUST have a lookup function which, if it finds a
- * directory inode with a DCACHE_DISCONNECTED dentry, will d_move that
- * dentry into place and return that dentry rather than the passed one,
- * typically using d_splice_alias. */
-
-#define DCACHE_REFERENCED 0x00000040 /* Recently used, don't discard. */
-
-#define DCACHE_DONTCACHE 0x00000080 /* Purge from memory on final dput() */
-
-#define DCACHE_CANT_MOUNT 0x00000100
-#define DCACHE_GENOCIDE 0x00000200
-#define DCACHE_SHRINK_LIST 0x00000400
-
-#define DCACHE_OP_WEAK_REVALIDATE 0x00000800
-
-#define DCACHE_NFSFS_RENAMED 0x00001000
- /* this dentry has been "silly renamed" and has to be deleted on the last
- * dput() */
-#define DCACHE_COOKIE 0x00002000 /* For use by dcookie subsystem */
-#define DCACHE_FSNOTIFY_PARENT_WATCHED 0x00004000
- /* Parent inode is watched by some fsnotify listener */
-
-#define DCACHE_DENTRY_KILLED 0x00008000
-
-#define DCACHE_MOUNTED 0x00010000 /* is a mountpoint */
-#define DCACHE_NEED_AUTOMOUNT 0x00020000 /* handle automount on this dir */
-#define DCACHE_MANAGE_TRANSIT 0x00040000 /* manage transit from this dirent */
+enum dentry_flags {
+ DCACHE_OP_HASH = BIT(0),
+ DCACHE_OP_COMPARE = BIT(1),
+ DCACHE_OP_REVALIDATE = BIT(2),
+ DCACHE_OP_DELETE = BIT(3),
+ DCACHE_OP_PRUNE = BIT(4),
+ /*
+ * This dentry is possibly not currently connected to the dcache tree,
+ * in which case its parent will either be itself, or will have this
+ * flag as well. nfsd will not use a dentry with this bit set, but will
+ * first endeavour to clear the bit either by discovering that it is
+ * connected, or by performing lookup operations. Any filesystem which
+ * supports nfsd_operations MUST have a lookup function which, if it
+ * finds a directory inode with a DCACHE_DISCONNECTED dentry, will
+ * d_move that dentry into place and return that dentry rather than the
+ * passed one, typically using d_splice_alias.
+ */
+ DCACHE_DISCONNECTED = BIT(5),
+ DCACHE_REFERENCED = BIT(6), /* Recently used, don't discard. */
+ DCACHE_DONTCACHE = BIT(7), /* Purge from memory on final dput() */
+ DCACHE_CANT_MOUNT = BIT(8),
+ DCACHE_SHRINK_LIST = BIT(10),
+ DCACHE_OP_WEAK_REVALIDATE = BIT(11),
+ /*
+ * this dentry has been "silly renamed" and has to be deleted on the
+ * last dput()
+ */
+ DCACHE_NFSFS_RENAMED = BIT(12),
+ DCACHE_FSNOTIFY_PARENT_WATCHED = BIT(13), /* Parent inode is watched by some fsnotify listener */
+ DCACHE_DENTRY_KILLED = BIT(14),
+ DCACHE_MOUNTED = BIT(15), /* is a mountpoint */
+ DCACHE_NEED_AUTOMOUNT = BIT(16), /* handle automount on this dir */
+ DCACHE_MANAGE_TRANSIT = BIT(17), /* manage transit from this dirent */
+ DCACHE_LRU_LIST = BIT(18),
+ DCACHE_ENTRY_TYPE = (7 << 19), /* bits 19..21 are for storing type: */
+ DCACHE_MISS_TYPE = (0 << 19), /* Negative dentry */
+ DCACHE_WHITEOUT_TYPE = (1 << 19), /* Whiteout dentry (stop pathwalk) */
+ DCACHE_DIRECTORY_TYPE = (2 << 19), /* Normal directory */
+ DCACHE_AUTODIR_TYPE = (3 << 19), /* Lookupless directory (presumed automount) */
+ DCACHE_REGULAR_TYPE = (4 << 19), /* Regular file type */
+ DCACHE_SPECIAL_TYPE = (5 << 19), /* Other file type */
+ DCACHE_SYMLINK_TYPE = (6 << 19), /* Symlink */
+ DCACHE_NOKEY_NAME = BIT(22), /* Encrypted name encoded without key */
+ DCACHE_OP_REAL = BIT(23),
+ DCACHE_PAR_LOOKUP = BIT(24), /* being looked up (with parent locked shared) */
+ DCACHE_DENTRY_CURSOR = BIT(25),
+ DCACHE_NORCU = BIT(26), /* No RCU delay for freeing */
+ DCACHE_PERSISTENT = BIT(27)
+};
+
#define DCACHE_MANAGED_DENTRY \
(DCACHE_MOUNTED|DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT)
-#define DCACHE_LRU_LIST 0x00080000
-
-#define DCACHE_ENTRY_TYPE 0x00700000
-#define DCACHE_MISS_TYPE 0x00000000 /* Negative dentry (maybe fallthru to nowhere) */
-#define DCACHE_WHITEOUT_TYPE 0x00100000 /* Whiteout dentry (stop pathwalk) */
-#define DCACHE_DIRECTORY_TYPE 0x00200000 /* Normal directory */
-#define DCACHE_AUTODIR_TYPE 0x00300000 /* Lookupless directory (presumed automount) */
-#define DCACHE_REGULAR_TYPE 0x00400000 /* Regular file type (or fallthru to such) */
-#define DCACHE_SPECIAL_TYPE 0x00500000 /* Other file type (or fallthru to such) */
-#define DCACHE_SYMLINK_TYPE 0x00600000 /* Symlink (or fallthru to such) */
-
-#define DCACHE_MAY_FREE 0x00800000
-#define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */
-#define DCACHE_NOKEY_NAME 0x02000000 /* Encrypted name encoded without key */
-#define DCACHE_OP_REAL 0x04000000
-
-#define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */
-#define DCACHE_DENTRY_CURSOR 0x20000000
-#define DCACHE_NORCU 0x40000000 /* No RCU delay for freeing */
-
extern seqlock_t rename_lock;
/*
@@ -229,12 +237,9 @@ extern seqlock_t rename_lock;
*/
extern void d_instantiate(struct dentry *, struct inode *);
extern void d_instantiate_new(struct dentry *, struct inode *);
-extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
-extern struct dentry * d_instantiate_anon(struct dentry *, struct inode *);
extern void __d_drop(struct dentry *dentry);
extern void d_drop(struct dentry *dentry);
extern void d_delete(struct dentry *);
-extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op);
/* allocate/de-allocate */
extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
@@ -242,26 +247,29 @@ extern struct dentry * d_alloc_anon(struct super_block *);
extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *,
wait_queue_head_t *);
extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
+/* weird procfs mess; *NOT* exported */
+extern struct dentry * d_splice_alias_ops(struct inode *, struct dentry *,
+ const struct dentry_operations *);
extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
-extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
+extern bool d_same_name(const struct dentry *dentry, const struct dentry *parent,
+ const struct qstr *name);
extern struct dentry *d_find_any_alias(struct inode *inode);
extern struct dentry * d_obtain_alias(struct inode *);
extern struct dentry * d_obtain_root(struct inode *);
extern void shrink_dcache_sb(struct super_block *);
extern void shrink_dcache_parent(struct dentry *);
-extern void shrink_dcache_for_umount(struct super_block *);
extern void d_invalidate(struct dentry *);
/* only used at mount-time */
extern struct dentry * d_make_root(struct inode *);
-/* <clickety>-<click> the ramfs-type tree */
-extern void d_genocide(struct dentry *);
-
-extern void d_tmpfile(struct dentry *, struct inode *);
+extern void d_mark_tmpfile(struct file *, struct inode *);
+extern void d_tmpfile(struct file *, struct inode *);
extern struct dentry *d_find_alias(struct inode *);
extern void d_prune_aliases(struct inode *);
+extern void d_dispose_if_unused(struct dentry *, struct list_head *);
+extern void shrink_dentry_list(struct list_head *);
extern struct dentry *d_find_alias_rcu(struct inode *);
@@ -280,23 +288,20 @@ extern void d_move(struct dentry *, struct dentry *);
extern void d_exchange(struct dentry *, struct dentry *);
extern struct dentry *d_ancestor(struct dentry *, struct dentry *);
-/* appendix may either be NULL or be used for transname suffixes */
extern struct dentry *d_lookup(const struct dentry *, const struct qstr *);
-extern struct dentry *d_hash_and_lookup(struct dentry *, struct qstr *);
-extern struct dentry *__d_lookup(const struct dentry *, const struct qstr *);
-extern struct dentry *__d_lookup_rcu(const struct dentry *parent,
- const struct qstr *name, unsigned *seq);
static inline unsigned d_count(const struct dentry *dentry)
{
return dentry->d_lockref.count;
}
+ino_t d_parent_ino(struct dentry *dentry);
+
/*
* helper function for dentry_operations.d_dname() members
*/
-extern __printf(4, 5)
-char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
+extern __printf(3, 4)
+char *dynamic_dname(char *, int, const char *, ...);
extern char *__d_path(const struct path *, const struct path *, char *, int);
extern char *d_absolute_path(const struct path *, char *, int);
@@ -307,20 +312,40 @@ extern char *dentry_path(const struct dentry *, char *, int);
/* Allocation counts.. */
/**
- * dget, dget_dlock - get a reference to a dentry
- * @dentry: dentry to get a reference to
+ * dget_dlock - get a reference to a dentry
+ * @dentry: dentry to get a reference to
*
- * Given a dentry or %NULL pointer increment the reference count
- * if appropriate and return the dentry. A dentry will not be
- * destroyed when it has references.
+ * Given a live dentry, increment the reference count and return the dentry.
+ * Caller must hold @dentry->d_lock. Making sure that dentry is alive is
+ * caller's resonsibility. There are many conditions sufficient to guarantee
+ * that; e.g. anything with non-negative refcount is alive, so's anything
+ * hashed, anything positive, anyone's parent, etc.
*/
static inline struct dentry *dget_dlock(struct dentry *dentry)
{
- if (dentry)
- dentry->d_lockref.count++;
+ dentry->d_lockref.count++;
return dentry;
}
+
+/**
+ * dget - get a reference to a dentry
+ * @dentry: dentry to get a reference to
+ *
+ * Given a dentry or %NULL pointer increment the reference count
+ * if appropriate and return the dentry. A dentry will not be
+ * destroyed when it has references. Conversely, a dentry with
+ * no references can disappear for any number of reasons, starting
+ * with memory pressure. In other words, that primitive is
+ * used to clone an existing reference; using it on something with
+ * zero refcount is a bug.
+ *
+ * NOTE: it will spin if @dentry->d_lock is held. From the deadlock
+ * avoidance point of view it is equivalent to spin_lock()/increment
+ * refcount/spin_unlock(), so calling it under @dentry->d_lock is
+ * always a bug; so's calling it under ->d_lock on any of its descendents.
+ *
+ */
static inline struct dentry *dget(struct dentry *dentry)
{
if (dentry)
@@ -331,12 +356,11 @@ static inline struct dentry *dget(struct dentry *dentry)
extern struct dentry *dget_parent(struct dentry *dentry);
/**
- * d_unhashed - is dentry hashed
- * @dentry: entry to check
+ * d_unhashed - is dentry hashed
+ * @dentry: entry to check
*
- * Returns true if the dentry passed is not currently hashed.
+ * Returns true if the dentry passed is not currently hashed.
*/
-
static inline int d_unhashed(const struct dentry *dentry)
{
return hlist_bl_unhashed(&dentry->d_hash);
@@ -359,7 +383,7 @@ static inline void dont_mount(struct dentry *dentry)
spin_unlock(&dentry->d_lock);
}
-extern void __d_lookup_done(struct dentry *);
+extern void __d_lookup_unhash_wake(struct dentry *dentry);
static inline int d_in_lookup(const struct dentry *dentry)
{
@@ -368,11 +392,8 @@ static inline int d_in_lookup(const struct dentry *dentry)
static inline void d_lookup_done(struct dentry *dentry)
{
- if (unlikely(d_in_lookup(dentry))) {
- spin_lock(&dentry->d_lock);
- __d_lookup_done(dentry);
- spin_unlock(&dentry->d_lock);
- }
+ if (unlikely(d_in_lookup(dentry)))
+ __d_lookup_unhash_wake(dentry);
}
extern void dput(struct dentry *);
@@ -499,20 +520,7 @@ static inline int simple_positive(const struct dentry *dentry)
return d_really_is_positive(dentry) && !d_unhashed(dentry);
}
-extern void d_set_fallthru(struct dentry *dentry);
-
-static inline bool d_is_fallthru(const struct dentry *dentry)
-{
- return dentry->d_flags & DCACHE_FALLTHRU;
-}
-
-
-extern int sysctl_vfs_cache_pressure;
-
-static inline unsigned long vfs_pressure_ratio(unsigned long val)
-{
- return mult_frac(val, sysctl_vfs_cache_pressure, 100);
-}
+unsigned long vfs_pressure_ratio(unsigned long val);
/**
* d_inode - Get the actual inode of this dentry
@@ -556,41 +564,25 @@ static inline struct inode *d_backing_inode(const struct dentry *upper)
}
/**
- * d_backing_dentry - Get upper or lower dentry we should be using
- * @upper: The upper layer
- *
- * This is the helper that should be used to get the dentry of the inode that
- * will be used if this dentry were opened as a file. It may be the upper
- * dentry or it may be a lower dentry pinned by the upper.
- *
- * Normal filesystems should not use this to access their own dentries.
- */
-static inline struct dentry *d_backing_dentry(struct dentry *upper)
-{
- return upper;
-}
-
-/**
* d_real - Return the real dentry
* @dentry: the dentry to query
- * @inode: inode to select the dentry from multiple layers (can be NULL)
+ * @type: the type of real dentry (data or metadata)
*
* If dentry is on a union/overlay, then return the underlying, real dentry.
* Otherwise return the dentry itself.
*
* See also: Documentation/filesystems/vfs.rst
*/
-static inline struct dentry *d_real(struct dentry *dentry,
- const struct inode *inode)
+static inline struct dentry *d_real(struct dentry *dentry, enum d_real_type type)
{
if (unlikely(dentry->d_flags & DCACHE_OP_REAL))
- return dentry->d_op->d_real(dentry, inode);
+ return dentry->d_op->d_real(dentry, type);
else
return dentry;
}
/**
- * d_real_inode - Return the real inode
+ * d_real_inode - Return the real inode hosting the data
* @dentry: The dentry to query
*
* If dentry is on a union/overlay, then return the underlying, real inode.
@@ -599,14 +591,28 @@ static inline struct dentry *d_real(struct dentry *dentry,
static inline struct inode *d_real_inode(const struct dentry *dentry)
{
/* This usage of d_real() results in const dentry */
- return d_backing_inode(d_real((struct dentry *) dentry, NULL));
+ return d_inode(d_real((struct dentry *) dentry, D_REAL_DATA));
}
struct name_snapshot {
struct qstr name;
- unsigned char inline_name[DNAME_INLINE_LEN];
+ union shortname_store inline_name;
};
void take_dentry_name_snapshot(struct name_snapshot *, struct dentry *);
void release_dentry_name_snapshot(struct name_snapshot *);
+static inline struct dentry *d_first_child(const struct dentry *dentry)
+{
+ return hlist_entry_safe(dentry->d_children.first, struct dentry, d_sib);
+}
+
+static inline struct dentry *d_next_sibling(const struct dentry *dentry)
+{
+ return hlist_entry_safe(dentry->d_sib.next, struct dentry, d_sib);
+}
+
+void set_default_d_op(struct super_block *, const struct dentry_operations *);
+struct dentry *d_make_persistent(struct dentry *, struct inode *);
+void d_make_discardable(struct dentry *dentry);
+
#endif /* __LINUX_DCACHE_H */
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 07e547c02fd8..0b61b8b996d4 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -2,79 +2,8 @@
#ifndef _LINUX_DCCP_H
#define _LINUX_DCCP_H
-
-#include <linux/in.h>
-#include <linux/interrupt.h>
-#include <linux/ktime.h>
-#include <linux/list.h>
-#include <linux/uio.h>
-#include <linux/workqueue.h>
-
-#include <net/inet_connection_sock.h>
-#include <net/inet_sock.h>
-#include <net/inet_timewait_sock.h>
-#include <net/tcp_states.h>
#include <uapi/linux/dccp.h>
-enum dccp_state {
- DCCP_OPEN = TCP_ESTABLISHED,
- DCCP_REQUESTING = TCP_SYN_SENT,
- DCCP_LISTEN = TCP_LISTEN,
- DCCP_RESPOND = TCP_SYN_RECV,
- /*
- * States involved in closing a DCCP connection:
- * 1) ACTIVE_CLOSEREQ is entered by a server sending a CloseReq.
- *
- * 2) CLOSING can have three different meanings (RFC 4340, 8.3):
- * a. Client has performed active-close, has sent a Close to the server
- * from state OPEN or PARTOPEN, and is waiting for the final Reset
- * (in this case, SOCK_DONE == 1).
- * b. Client is asked to perform passive-close, by receiving a CloseReq
- * in (PART)OPEN state. It sends a Close and waits for final Reset
- * (in this case, SOCK_DONE == 0).
- * c. Server performs an active-close as in (a), keeps TIMEWAIT state.
- *
- * 3) The following intermediate states are employed to give passively
- * closing nodes a chance to process their unread data:
- * - PASSIVE_CLOSE (from OPEN => CLOSED) and
- * - PASSIVE_CLOSEREQ (from (PART)OPEN to CLOSING; case (b) above).
- */
- DCCP_ACTIVE_CLOSEREQ = TCP_FIN_WAIT1,
- DCCP_PASSIVE_CLOSE = TCP_CLOSE_WAIT, /* any node receiving a Close */
- DCCP_CLOSING = TCP_CLOSING,
- DCCP_TIME_WAIT = TCP_TIME_WAIT,
- DCCP_CLOSED = TCP_CLOSE,
- DCCP_NEW_SYN_RECV = TCP_NEW_SYN_RECV,
- DCCP_PARTOPEN = TCP_MAX_STATES,
- DCCP_PASSIVE_CLOSEREQ, /* clients receiving CloseReq */
- DCCP_MAX_STATES
-};
-
-enum {
- DCCPF_OPEN = TCPF_ESTABLISHED,
- DCCPF_REQUESTING = TCPF_SYN_SENT,
- DCCPF_LISTEN = TCPF_LISTEN,
- DCCPF_RESPOND = TCPF_SYN_RECV,
- DCCPF_ACTIVE_CLOSEREQ = TCPF_FIN_WAIT1,
- DCCPF_CLOSING = TCPF_CLOSING,
- DCCPF_TIME_WAIT = TCPF_TIME_WAIT,
- DCCPF_CLOSED = TCPF_CLOSE,
- DCCPF_NEW_SYN_RECV = TCPF_NEW_SYN_RECV,
- DCCPF_PARTOPEN = (1 << DCCP_PARTOPEN),
-};
-
-static inline struct dccp_hdr *dccp_hdr(const struct sk_buff *skb)
-{
- return (struct dccp_hdr *)skb_transport_header(skb);
-}
-
-static inline struct dccp_hdr *dccp_zeroed_hdr(struct sk_buff *skb, int headlen)
-{
- skb_push(skb, headlen);
- skb_reset_transport_header(skb);
- return memset(skb_transport_header(skb), 0, headlen);
-}
-
static inline struct dccp_hdr_ext *dccp_hdrx(const struct dccp_hdr *dh)
{
return (struct dccp_hdr_ext *)((unsigned char *)dh + sizeof(*dh));
@@ -85,12 +14,6 @@ static inline unsigned int __dccp_basic_hdr_len(const struct dccp_hdr *dh)
return sizeof(*dh) + (dh->dccph_x ? sizeof(struct dccp_hdr_ext) : 0);
}
-static inline unsigned int dccp_basic_hdr_len(const struct sk_buff *skb)
-{
- const struct dccp_hdr *dh = dccp_hdr(skb);
- return __dccp_basic_hdr_len(dh);
-}
-
static inline __u64 dccp_hdr_seq(const struct dccp_hdr *dh)
{
__u64 seq_nr = ntohs(dh->dccph_seq);
@@ -103,224 +26,10 @@ static inline __u64 dccp_hdr_seq(const struct dccp_hdr *dh)
return seq_nr;
}
-static inline struct dccp_hdr_request *dccp_hdr_request(struct sk_buff *skb)
-{
- return (struct dccp_hdr_request *)(skb_transport_header(skb) +
- dccp_basic_hdr_len(skb));
-}
-
-static inline struct dccp_hdr_ack_bits *dccp_hdr_ack_bits(const struct sk_buff *skb)
-{
- return (struct dccp_hdr_ack_bits *)(skb_transport_header(skb) +
- dccp_basic_hdr_len(skb));
-}
-
-static inline u64 dccp_hdr_ack_seq(const struct sk_buff *skb)
-{
- const struct dccp_hdr_ack_bits *dhack = dccp_hdr_ack_bits(skb);
- return ((u64)ntohs(dhack->dccph_ack_nr_high) << 32) + ntohl(dhack->dccph_ack_nr_low);
-}
-
-static inline struct dccp_hdr_response *dccp_hdr_response(struct sk_buff *skb)
-{
- return (struct dccp_hdr_response *)(skb_transport_header(skb) +
- dccp_basic_hdr_len(skb));
-}
-
-static inline struct dccp_hdr_reset *dccp_hdr_reset(struct sk_buff *skb)
-{
- return (struct dccp_hdr_reset *)(skb_transport_header(skb) +
- dccp_basic_hdr_len(skb));
-}
-
static inline unsigned int __dccp_hdr_len(const struct dccp_hdr *dh)
{
return __dccp_basic_hdr_len(dh) +
dccp_packet_hdr_len(dh->dccph_type);
}
-static inline unsigned int dccp_hdr_len(const struct sk_buff *skb)
-{
- return __dccp_hdr_len(dccp_hdr(skb));
-}
-
-/**
- * struct dccp_request_sock - represent DCCP-specific connection request
- * @dreq_inet_rsk: structure inherited from
- * @dreq_iss: initial sequence number, sent on the first Response (RFC 4340, 7.1)
- * @dreq_gss: greatest sequence number sent (for retransmitted Responses)
- * @dreq_isr: initial sequence number received in the first Request
- * @dreq_gsr: greatest sequence number received (for retransmitted Request(s))
- * @dreq_service: service code present on the Request (there is just one)
- * @dreq_featneg: feature negotiation options for this connection
- * The following two fields are analogous to the ones in dccp_sock:
- * @dreq_timestamp_echo: last received timestamp to echo (13.1)
- * @dreq_timestamp_echo: the time of receiving the last @dreq_timestamp_echo
- */
-struct dccp_request_sock {
- struct inet_request_sock dreq_inet_rsk;
- __u64 dreq_iss;
- __u64 dreq_gss;
- __u64 dreq_isr;
- __u64 dreq_gsr;
- __be32 dreq_service;
- spinlock_t dreq_lock;
- struct list_head dreq_featneg;
- __u32 dreq_timestamp_echo;
- __u32 dreq_timestamp_time;
-};
-
-static inline struct dccp_request_sock *dccp_rsk(const struct request_sock *req)
-{
- return (struct dccp_request_sock *)req;
-}
-
-extern struct inet_timewait_death_row dccp_death_row;
-
-extern int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
- struct sk_buff *skb);
-
-struct dccp_options_received {
- u64 dccpor_ndp:48;
- u32 dccpor_timestamp;
- u32 dccpor_timestamp_echo;
- u32 dccpor_elapsed_time;
-};
-
-struct ccid;
-
-enum dccp_role {
- DCCP_ROLE_UNDEFINED,
- DCCP_ROLE_LISTEN,
- DCCP_ROLE_CLIENT,
- DCCP_ROLE_SERVER,
-};
-
-struct dccp_service_list {
- __u32 dccpsl_nr;
- __be32 dccpsl_list[];
-};
-
-#define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1)
-#define DCCP_SERVICE_CODE_IS_ABSENT 0
-
-static inline bool dccp_list_has_service(const struct dccp_service_list *sl,
- const __be32 service)
-{
- if (likely(sl != NULL)) {
- u32 i = sl->dccpsl_nr;
- while (i--)
- if (sl->dccpsl_list[i] == service)
- return true;
- }
- return false;
-}
-
-struct dccp_ackvec;
-
-/**
- * struct dccp_sock - DCCP socket state
- *
- * @dccps_swl - sequence number window low
- * @dccps_swh - sequence number window high
- * @dccps_awl - acknowledgement number window low
- * @dccps_awh - acknowledgement number window high
- * @dccps_iss - initial sequence number sent
- * @dccps_isr - initial sequence number received
- * @dccps_osr - first OPEN sequence number received
- * @dccps_gss - greatest sequence number sent
- * @dccps_gsr - greatest valid sequence number received
- * @dccps_gar - greatest valid ack number received on a non-Sync; initialized to %dccps_iss
- * @dccps_service - first (passive sock) or unique (active sock) service code
- * @dccps_service_list - second .. last service code on passive socket
- * @dccps_timestamp_echo - latest timestamp received on a TIMESTAMP option
- * @dccps_timestamp_time - time of receiving latest @dccps_timestamp_echo
- * @dccps_l_ack_ratio - feature-local Ack Ratio
- * @dccps_r_ack_ratio - feature-remote Ack Ratio
- * @dccps_l_seq_win - local Sequence Window (influences ack number validity)
- * @dccps_r_seq_win - remote Sequence Window (influences seq number validity)
- * @dccps_pcslen - sender partial checksum coverage (via sockopt)
- * @dccps_pcrlen - receiver partial checksum coverage (via sockopt)
- * @dccps_send_ndp_count - local Send NDP Count feature (7.7.2)
- * @dccps_ndp_count - number of Non Data Packets since last data packet
- * @dccps_mss_cache - current value of MSS (path MTU minus header sizes)
- * @dccps_rate_last - timestamp for rate-limiting DCCP-Sync (RFC 4340, 7.5.4)
- * @dccps_featneg - tracks feature-negotiation state (mostly during handshake)
- * @dccps_hc_rx_ackvec - rx half connection ack vector
- * @dccps_hc_rx_ccid - CCID used for the receiver (or receiving half-connection)
- * @dccps_hc_tx_ccid - CCID used for the sender (or sending half-connection)
- * @dccps_options_received - parsed set of retrieved options
- * @dccps_qpolicy - TX dequeueing policy, one of %dccp_packet_dequeueing_policy
- * @dccps_tx_qlen - maximum length of the TX queue
- * @dccps_role - role of this sock, one of %dccp_role
- * @dccps_hc_rx_insert_options - receiver wants to add options when acking
- * @dccps_hc_tx_insert_options - sender wants to add options when sending
- * @dccps_server_timewait - server holds timewait state on close (RFC 4340, 8.3)
- * @dccps_sync_scheduled - flag which signals "send out-of-band message soon"
- * @dccps_xmitlet - tasklet scheduled by the TX CCID to dequeue data packets
- * @dccps_xmit_timer - used by the TX CCID to delay sending (rate-based pacing)
- * @dccps_syn_rtt - RTT sample from Request/Response exchange (in usecs)
- */
-struct dccp_sock {
- /* inet_connection_sock has to be the first member of dccp_sock */
- struct inet_connection_sock dccps_inet_connection;
-#define dccps_syn_rtt dccps_inet_connection.icsk_ack.lrcvtime
- __u64 dccps_swl;
- __u64 dccps_swh;
- __u64 dccps_awl;
- __u64 dccps_awh;
- __u64 dccps_iss;
- __u64 dccps_isr;
- __u64 dccps_osr;
- __u64 dccps_gss;
- __u64 dccps_gsr;
- __u64 dccps_gar;
- __be32 dccps_service;
- __u32 dccps_mss_cache;
- struct dccp_service_list *dccps_service_list;
- __u32 dccps_timestamp_echo;
- __u32 dccps_timestamp_time;
- __u16 dccps_l_ack_ratio;
- __u16 dccps_r_ack_ratio;
- __u64 dccps_l_seq_win:48;
- __u64 dccps_r_seq_win:48;
- __u8 dccps_pcslen:4;
- __u8 dccps_pcrlen:4;
- __u8 dccps_send_ndp_count:1;
- __u64 dccps_ndp_count:48;
- unsigned long dccps_rate_last;
- struct list_head dccps_featneg;
- struct dccp_ackvec *dccps_hc_rx_ackvec;
- struct ccid *dccps_hc_rx_ccid;
- struct ccid *dccps_hc_tx_ccid;
- struct dccp_options_received dccps_options_received;
- __u8 dccps_qpolicy;
- __u32 dccps_tx_qlen;
- enum dccp_role dccps_role:2;
- __u8 dccps_hc_rx_insert_options:1;
- __u8 dccps_hc_tx_insert_options:1;
- __u8 dccps_server_timewait:1;
- __u8 dccps_sync_scheduled:1;
- struct tasklet_struct dccps_xmitlet;
- struct timer_list dccps_xmit_timer;
-};
-
-static inline struct dccp_sock *dccp_sk(const struct sock *sk)
-{
- return (struct dccp_sock *)sk;
-}
-
-static inline const char *dccp_role(const struct sock *sk)
-{
- switch (dccp_sk(sk)->dccps_role) {
- case DCCP_ROLE_UNDEFINED: return "undefined";
- case DCCP_ROLE_LISTEN: return "listen";
- case DCCP_ROLE_SERVER: return "server";
- case DCCP_ROLE_CLIENT: return "client";
- }
- return NULL;
-}
-
-extern void dccp_syn_ack_timeout(const struct request_sock *req);
-
#endif /* _LINUX_DCCP_H */
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 2915f56ad421..dbb409d77d4f 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -3,8 +3,7 @@
#define __LINUX_DEBUG_LOCKING_H
#include <linux/atomic.h>
-#include <linux/bug.h>
-#include <linux/printk.h>
+#include <linux/cache.h>
struct task_struct;
@@ -27,8 +26,10 @@ extern int debug_locks_off(void);
int __ret = 0; \
\
if (!oops_in_progress && unlikely(c)) { \
+ instrumentation_begin(); \
if (debug_locks_off() && !debug_locks_silent) \
WARN(1, "DEBUG_LOCKS_WARN_ON(%s)", #c); \
+ instrumentation_end(); \
__ret = 1; \
} \
__ret; \
@@ -46,8 +47,6 @@ extern int debug_locks_off(void);
# define locking_selftest() do { } while (0)
#endif
-struct task_struct;
-
#ifdef CONFIG_LOCKDEP
extern void debug_show_all_locks(void);
extern void debug_show_held_locks(struct task_struct *task);
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 1fdb4343af9c..7cecda29447e 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -45,7 +45,7 @@ struct debugfs_u32_array {
extern struct dentry *arch_debugfs_dir;
-#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
+#define DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, __is_signed) \
static int __fops ## _open(struct inode *inode, struct file *file) \
{ \
__simple_attr_check_format(__fmt, 0ull); \
@@ -56,19 +56,88 @@ static const struct file_operations __fops = { \
.open = __fops ## _open, \
.release = simple_attr_release, \
.read = debugfs_attr_read, \
- .write = debugfs_attr_write, \
- .llseek = no_llseek, \
+ .write = (__is_signed) ? debugfs_attr_write_signed : debugfs_attr_write, \
}
+#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
+ DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, false)
+
+#define DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(__fops, __get, __set, __fmt) \
+ DEFINE_DEBUGFS_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, true)
+
typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *);
+struct debugfs_short_fops {
+ ssize_t (*read)(struct file *, char __user *, size_t, loff_t *);
+ ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *);
+ loff_t (*llseek) (struct file *, loff_t, int);
+};
+
#if defined(CONFIG_DEBUG_FS)
struct dentry *debugfs_lookup(const char *name, struct dentry *parent);
-struct dentry *debugfs_create_file(const char *name, umode_t mode,
- struct dentry *parent, void *data,
- const struct file_operations *fops);
+struct dentry *debugfs_create_file_full(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const void *aux,
+ const struct file_operations *fops);
+struct dentry *debugfs_create_file_short(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const void *aux,
+ const struct debugfs_short_fops *fops);
+
+/**
+ * debugfs_create_file - create a file in the debugfs filesystem
+ * @name: a pointer to a string containing the name of the file to create.
+ * @mode: the permission that the file should have.
+ * @parent: a pointer to the parent dentry for this file. This should be a
+ * directory dentry if set. If this parameter is NULL, then the
+ * file will be created in the root of the debugfs filesystem.
+ * @data: a pointer to something that the caller will want to get to later
+ * on. The inode.i_private pointer will point to this value on
+ * the open() call.
+ * @fops: a pointer to a struct file_operations or struct debugfs_short_fops that
+ * should be used for this file.
+ *
+ * This is the basic "create a file" function for debugfs. It allows for a
+ * wide range of flexibility in creating a file, or a directory (if you want
+ * to create a directory, the debugfs_create_dir() function is
+ * recommended to be used instead.)
+ *
+ * This function will return a pointer to a dentry if it succeeds. This
+ * pointer must be passed to the debugfs_remove() function when the file is
+ * to be removed (no automatic cleanup happens if your module is unloaded,
+ * you are responsible here.) If an error occurs, ERR_PTR(-ERROR) will be
+ * returned.
+ *
+ * If debugfs is not enabled in the kernel, the value -%ENODEV will be
+ * returned.
+ *
+ * If fops points to a struct debugfs_short_fops, then simple_open() will be
+ * used for the open, and only read/write/llseek are supported and are proxied,
+ * so no module reference or release are needed.
+ *
+ * NOTE: it's expected that most callers should _ignore_ the errors returned
+ * by this function. Other debugfs functions handle the fact that the "dentry"
+ * passed to them could be an error and they don't crash in that case.
+ * Drivers should generally work fine even if debugfs fails to init anyway.
+ */
+#define debugfs_create_file(name, mode, parent, data, fops) \
+ _Generic(fops, \
+ const struct file_operations *: debugfs_create_file_full, \
+ const struct debugfs_short_fops *: debugfs_create_file_short, \
+ struct file_operations *: debugfs_create_file_full, \
+ struct debugfs_short_fops *: debugfs_create_file_short) \
+ (name, mode, parent, data, NULL, fops)
+
+#define debugfs_create_file_aux(name, mode, parent, data, aux, fops) \
+ _Generic(fops, \
+ const struct file_operations *: debugfs_create_file_full, \
+ const struct debugfs_short_fops *: debugfs_create_file_short, \
+ struct file_operations *: debugfs_create_file_full, \
+ struct debugfs_short_fops *: debugfs_create_file_short) \
+ (name, mode, parent, data, aux, fops)
+
struct dentry *debugfs_create_file_unsafe(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops);
@@ -91,7 +160,9 @@ struct dentry *debugfs_create_automount(const char *name,
void debugfs_remove(struct dentry *dentry);
#define debugfs_remove_recursive debugfs_remove
-const struct file_operations *debugfs_real_fops(const struct file *filp);
+void debugfs_lookup_and_remove(const char *name, struct dentry *parent);
+
+void *debugfs_get_aux(const struct file *file);
int debugfs_file_get(struct dentry *dentry);
void debugfs_file_put(struct dentry *dentry);
@@ -100,9 +171,10 @@ ssize_t debugfs_attr_read(struct file *file, char __user *buf,
size_t len, loff_t *ppos);
ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos);
+ssize_t debugfs_attr_write_signed(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos);
-struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
- struct dentry *new_dir, const char *new_name);
+int debugfs_change_name(struct dentry *dentry, const char *fmt, ...) __printf(2, 3);
void debugfs_create_u8(const char *name, umode_t mode, struct dentry *parent,
u8 *value);
@@ -112,8 +184,8 @@ void debugfs_create_u32(const char *name, umode_t mode, struct dentry *parent,
u32 *value);
void debugfs_create_u64(const char *name, umode_t mode, struct dentry *parent,
u64 *value);
-struct dentry *debugfs_create_ulong(const char *name, umode_t mode,
- struct dentry *parent, unsigned long *value);
+void debugfs_create_ulong(const char *name, umode_t mode, struct dentry *parent,
+ unsigned long *value);
void debugfs_create_x8(const char *name, umode_t mode, struct dentry *parent,
u8 *value);
void debugfs_create_x16(const char *name, umode_t mode, struct dentry *parent,
@@ -126,8 +198,8 @@ void debugfs_create_size_t(const char *name, umode_t mode,
struct dentry *parent, size_t *value);
void debugfs_create_atomic_t(const char *name, umode_t mode,
struct dentry *parent, atomic_t *value);
-struct dentry *debugfs_create_bool(const char *name, umode_t mode,
- struct dentry *parent, bool *value);
+void debugfs_create_bool(const char *name, umode_t mode, struct dentry *parent,
+ bool *value);
void debugfs_create_str(const char *name, umode_t mode,
struct dentry *parent, char **value);
@@ -161,6 +233,25 @@ ssize_t debugfs_write_file_bool(struct file *file, const char __user *user_buf,
ssize_t debugfs_read_file_str(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos);
+/**
+ * struct debugfs_cancellation - cancellation data
+ * @list: internal, for keeping track
+ * @cancel: callback to call
+ * @cancel_data: extra data for the callback to call
+ */
+struct debugfs_cancellation {
+ struct list_head list;
+ void (*cancel)(struct dentry *, void *);
+ void *cancel_data;
+};
+
+void __acquires(cancellation)
+debugfs_enter_cancellation(struct file *file,
+ struct debugfs_cancellation *cancellation);
+void __releases(cancellation)
+debugfs_leave_cancellation(struct file *file,
+ struct debugfs_cancellation *cancellation);
+
#else
#include <linux/err.h>
@@ -177,9 +268,17 @@ static inline struct dentry *debugfs_lookup(const char *name,
return ERR_PTR(-ENODEV);
}
+static inline struct dentry *debugfs_create_file_aux(const char *name,
+ umode_t mode, struct dentry *parent,
+ void *data, void *aux,
+ const void *fops)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline struct dentry *debugfs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
- const struct file_operations *fops)
+ const void *fops)
{
return ERR_PTR(-ENODEV);
}
@@ -225,7 +324,11 @@ static inline void debugfs_remove(struct dentry *dentry)
static inline void debugfs_remove_recursive(struct dentry *dentry)
{ }
-const struct file_operations *debugfs_real_fops(const struct file *filp);
+static inline void debugfs_lookup_and_remove(const char *name,
+ struct dentry *parent)
+{ }
+
+void *debugfs_get_aux(const struct file *file);
static inline int debugfs_file_get(struct dentry *dentry)
{
@@ -248,10 +351,17 @@ static inline ssize_t debugfs_attr_write(struct file *file,
return -ENODEV;
}
-static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
- struct dentry *new_dir, char *new_name)
+static inline ssize_t debugfs_attr_write_signed(struct file *file,
+ const char __user *buf,
+ size_t len, loff_t *ppos)
{
- return ERR_PTR(-ENODEV);
+ return -ENODEV;
+}
+
+static inline int __printf(2, 3) debugfs_change_name(struct dentry *dentry,
+ const char *fmt, ...)
+{
+ return -ENODEV;
}
static inline void debugfs_create_u8(const char *name, umode_t mode,
@@ -266,13 +376,9 @@ static inline void debugfs_create_u32(const char *name, umode_t mode,
static inline void debugfs_create_u64(const char *name, umode_t mode,
struct dentry *parent, u64 *value) { }
-static inline struct dentry *debugfs_create_ulong(const char *name,
- umode_t mode,
- struct dentry *parent,
- unsigned long *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_ulong(const char *name, umode_t mode,
+ struct dentry *parent,
+ unsigned long *value) { }
static inline void debugfs_create_x8(const char *name, umode_t mode,
struct dentry *parent, u8 *value) { }
@@ -295,12 +401,8 @@ static inline void debugfs_create_atomic_t(const char *name, umode_t mode,
atomic_t *value)
{ }
-static inline struct dentry *debugfs_create_bool(const char *name, umode_t mode,
- struct dentry *parent,
- bool *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_bool(const char *name, umode_t mode,
+ struct dentry *parent, bool *value) { }
static inline void debugfs_create_str(const char *name, umode_t mode,
struct dentry *parent,
@@ -367,6 +469,11 @@ static inline ssize_t debugfs_read_file_str(struct file *file,
#endif
+#define debugfs_create_file_aux_num(name, mode, parent, data, n, fops) \
+ debugfs_create_file_aux(name, mode, parent, data, \
+ (void *)(unsigned long)n, fops)
+#define debugfs_get_aux_num(f) (unsigned long)debugfs_get_aux(f)
+
/**
* debugfs_create_xul - create a debugfs file that is used to read and write an
* unsigned long value, formatted in hexadecimal
diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h
index 8d2dde23e9fb..8b95545e7924 100644
--- a/include/linux/debugobjects.h
+++ b/include/linux/debugobjects.h
@@ -18,18 +18,22 @@ enum debug_obj_state {
struct debug_obj_descr;
/**
- * struct debug_obj - representaion of an tracked object
+ * struct debug_obj - representation of an tracked object
* @node: hlist node to link the object into the tracker list
* @state: tracked object state
* @astate: current active state
* @object: pointer to the real object
+ * @batch_last: pointer to the last hlist node in a batch
* @descr: pointer to an object type specific debug description structure
*/
struct debug_obj {
- struct hlist_node node;
- enum debug_obj_state state;
- unsigned int astate;
- void *object;
+ struct hlist_node node;
+ enum debug_obj_state state;
+ unsigned int astate;
+ union {
+ void *object;
+ struct hlist_node *batch_last;
+ };
const struct debug_obj_descr *descr;
};
diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
index 868e9eacd69e..ac862422df15 100644
--- a/include/linux/decompress/mm.h
+++ b/include/linux/decompress/mm.h
@@ -25,13 +25,21 @@
#define STATIC_RW_DATA static
#endif
+/*
+ * When an architecture needs to share the malloc()/free() implementation
+ * between compilation units, it needs to have non-local visibility.
+ */
+#ifndef MALLOC_VISIBLE
+#define MALLOC_VISIBLE static
+#endif
+
/* A trivial malloc implementation, adapted from
* malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
*/
STATIC_RW_DATA unsigned long malloc_ptr;
STATIC_RW_DATA int malloc_count;
-static void *malloc(int size)
+MALLOC_VISIBLE void *malloc(int size)
{
void *p;
@@ -40,7 +48,7 @@ static void *malloc(int size)
if (!malloc_ptr)
malloc_ptr = free_mem_ptr;
- malloc_ptr = (malloc_ptr + 3) & ~3; /* Align */
+ malloc_ptr = (malloc_ptr + 7) & ~7; /* Align */
p = (void *)malloc_ptr;
malloc_ptr += size;
@@ -52,7 +60,7 @@ static void *malloc(int size)
return p;
}
-static void free(void *where)
+MALLOC_VISIBLE void free(void *where)
{
malloc_count--;
if (!malloc_count)
diff --git a/include/linux/decompress/unxz.h b/include/linux/decompress/unxz.h
index f764e2a7201e..3dd2658a9dab 100644
--- a/include/linux/decompress/unxz.h
+++ b/include/linux/decompress/unxz.h
@@ -1,10 +1,9 @@
+/* SPDX-License-Identifier: 0BSD */
+
/*
* Wrapper for decompressing XZ-compressed kernel, initramfs, and initrd
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
- *
- * This file has been put into the public domain.
- * You can do whatever you want with this file.
*/
#ifndef DECOMPRESS_UNXZ_H
diff --git a/include/linux/delay.h b/include/linux/delay.h
index 1d0e2ce6b6d9..46412c00033a 100644
--- a/include/linux/delay.h
+++ b/include/linux/delay.h
@@ -6,20 +6,12 @@
* Copyright (C) 1993 Linus Torvalds
*
* Delay routines, using a pre-computed "loops_per_jiffy" value.
- *
- * Please note that ndelay(), udelay() and mdelay() may return early for
- * several reasons:
- * 1. computed loops_per_jiffy too low (due to the time taken to
- * execute the timer interrupt.)
- * 2. cache behaviour affecting the time it takes to execute the
- * loop function.
- * 3. CPU clock rate changes.
- *
- * Please see this thread:
- * https://lists.openwall.net/linux-kernel/2011/01/09/56
+ * Sleep routines using timer list timers or hrtimers.
*/
-#include <linux/kernel.h>
+#include <linux/math.h>
+#include <linux/sched.h>
+#include <linux/jiffies.h>
extern unsigned long loops_per_jiffy;
@@ -34,12 +26,21 @@ extern unsigned long loops_per_jiffy;
* The 2nd mdelay() definition ensures GCC will optimize away the
* while loop for the common cases where n <= MAX_UDELAY_MS -- Paul G.
*/
-
#ifndef MAX_UDELAY_MS
#define MAX_UDELAY_MS 5
#endif
#ifndef mdelay
+/**
+ * mdelay - Inserting a delay based on milliseconds with busy waiting
+ * @n: requested delay in milliseconds
+ *
+ * See udelay() for basic information about mdelay() and it's variants.
+ *
+ * Please double check, whether mdelay() is the right way to go or whether a
+ * refactoring of the code is the better variant to be able to use msleep()
+ * instead.
+ */
#define mdelay(n) (\
(__builtin_constant_p(n) && (n)<=MAX_UDELAY_MS) ? udelay((n)*1000) : \
({unsigned long __ms=(n); while (__ms--) udelay(1000);}))
@@ -55,25 +56,82 @@ static inline void ndelay(unsigned long x)
extern unsigned long lpj_fine;
void calibrate_delay(void);
+unsigned long calibrate_delay_is_known(void);
void __attribute__((weak)) calibration_delay_done(void);
void msleep(unsigned int msecs);
unsigned long msleep_interruptible(unsigned int msecs);
-void usleep_range(unsigned long min, unsigned long max);
+void usleep_range_state(unsigned long min, unsigned long max,
+ unsigned int state);
+/**
+ * usleep_range - Sleep for an approximate time
+ * @min: Minimum time in microseconds to sleep
+ * @max: Maximum time in microseconds to sleep
+ *
+ * For basic information please refer to usleep_range_state().
+ *
+ * The task will be in the state TASK_UNINTERRUPTIBLE during the sleep.
+ */
+static inline void usleep_range(unsigned long min, unsigned long max)
+{
+ usleep_range_state(min, max, TASK_UNINTERRUPTIBLE);
+}
+
+/**
+ * usleep_range_idle - Sleep for an approximate time with idle time accounting
+ * @min: Minimum time in microseconds to sleep
+ * @max: Maximum time in microseconds to sleep
+ *
+ * For basic information please refer to usleep_range_state().
+ *
+ * The sleeping task has the state TASK_IDLE during the sleep to prevent
+ * contribution to the load average.
+ */
+static inline void usleep_range_idle(unsigned long min, unsigned long max)
+{
+ usleep_range_state(min, max, TASK_IDLE);
+}
+
+/**
+ * ssleep - wrapper for seconds around msleep
+ * @seconds: Requested sleep duration in seconds
+ *
+ * Please refer to msleep() for detailed information.
+ */
static inline void ssleep(unsigned int seconds)
{
msleep(seconds * 1000);
}
-/* see Documentation/timers/timers-howto.rst for the thresholds */
+static const unsigned int max_slack_shift = 2;
+#define USLEEP_RANGE_UPPER_BOUND ((TICK_NSEC << max_slack_shift) / NSEC_PER_USEC)
+
+/**
+ * fsleep - flexible sleep which autoselects the best mechanism
+ * @usecs: requested sleep duration in microseconds
+ *
+ * flseep() selects the best mechanism that will provide maximum 25% slack
+ * to the requested sleep duration. Therefore it uses:
+ *
+ * * udelay() loop for sleep durations <= 10 microseconds to avoid hrtimer
+ * overhead for really short sleep durations.
+ * * usleep_range() for sleep durations which would lead with the usage of
+ * msleep() to a slack larger than 25%. This depends on the granularity of
+ * jiffies.
+ * * msleep() for all other sleep durations.
+ *
+ * Note: When %CONFIG_HIGH_RES_TIMERS is not set, all sleeps are processed with
+ * the granularity of jiffies and the slack might exceed 25% especially for
+ * short sleep durations.
+ */
static inline void fsleep(unsigned long usecs)
{
if (usecs <= 10)
udelay(usecs);
- else if (usecs <= 20000)
- usleep_range(usecs, 2 * usecs);
+ else if (usecs < USLEEP_RANGE_UPPER_BOUND)
+ usleep_range(usecs, usecs + (usecs >> max_slack_shift));
else
- msleep(DIV_ROUND_UP(usecs, 1000));
+ msleep(DIV_ROUND_UP(usecs, USEC_PER_MSEC));
}
#endif /* defined(_LINUX_DELAY_H) */
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index 21651f946751..800dcc360db2 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -9,18 +9,9 @@
#include <uapi/linux/taskstats.h>
-/*
- * Per-task flags relevant to delay accounting
- * maintained privately to avoid exhausting similar flags in sched.h:PF_*
- * Used to set current->delays->flags
- */
-#define DELAYACCT_PF_SWAPIN 0x00000001 /* I am doing a swapin */
-#define DELAYACCT_PF_BLKIO 0x00000002 /* I am waiting on IO */
-
#ifdef CONFIG_TASK_DELAY_ACCT
struct task_delay_info {
raw_spinlock_t lock;
- unsigned int flags; /* Private per-task flags */
/* For each stat XXX, add following, aligned appropriately
*
@@ -37,62 +28,77 @@ struct task_delay_info {
* associated with the operation is added to XXX_delay.
* XXX_delay contains the accumulated delay time in nanoseconds.
*/
- u64 blkio_start; /* Shared by blkio, swapin */
+ u64 blkio_start;
+ u64 blkio_delay_max;
+ u64 blkio_delay_min;
u64 blkio_delay; /* wait for sync block io completion */
- u64 swapin_delay; /* wait for swapin block io completion */
+ u64 swapin_start;
+ u64 swapin_delay_max;
+ u64 swapin_delay_min;
+ u64 swapin_delay; /* wait for swapin */
u32 blkio_count; /* total count of the number of sync block */
/* io operations performed */
- u32 swapin_count; /* total count of the number of swapin block */
- /* io operations performed */
+ u32 swapin_count; /* total count of swapin */
u64 freepages_start;
+ u64 freepages_delay_max;
+ u64 freepages_delay_min;
u64 freepages_delay; /* wait for memory reclaim */
u64 thrashing_start;
+ u64 thrashing_delay_max;
+ u64 thrashing_delay_min;
u64 thrashing_delay; /* wait for thrashing page */
+ u64 compact_start;
+ u64 compact_delay_max;
+ u64 compact_delay_min;
+ u64 compact_delay; /* wait for memory compact */
+
+ u64 wpcopy_start;
+ u64 wpcopy_delay_max;
+ u64 wpcopy_delay_min;
+ u64 wpcopy_delay; /* wait for write-protect copy */
+
+ u64 irq_delay_max;
+ u64 irq_delay_min;
+ u64 irq_delay; /* wait for IRQ/SOFTIRQ */
+
u32 freepages_count; /* total count of memory reclaim */
u32 thrashing_count; /* total count of thrash waits */
+ u32 compact_count; /* total count of memory compact */
+ u32 wpcopy_count; /* total count of write-protect copy */
+ u32 irq_count; /* total count of IRQ/SOFTIRQ */
};
#endif
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/jump_label.h>
#ifdef CONFIG_TASK_DELAY_ACCT
+DECLARE_STATIC_KEY_FALSE(delayacct_key);
extern int delayacct_on; /* Delay accounting turned on/off */
extern struct kmem_cache *delayacct_cache;
extern void delayacct_init(void);
+
extern void __delayacct_tsk_init(struct task_struct *);
extern void __delayacct_tsk_exit(struct task_struct *);
extern void __delayacct_blkio_start(void);
extern void __delayacct_blkio_end(struct task_struct *);
-extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *);
+extern int delayacct_add_tsk(struct taskstats *, struct task_struct *);
extern __u64 __delayacct_blkio_ticks(struct task_struct *);
extern void __delayacct_freepages_start(void);
extern void __delayacct_freepages_end(void);
-extern void __delayacct_thrashing_start(void);
-extern void __delayacct_thrashing_end(void);
-
-static inline int delayacct_is_task_waiting_on_io(struct task_struct *p)
-{
- if (p->delays)
- return (p->delays->flags & DELAYACCT_PF_BLKIO);
- else
- return 0;
-}
-
-static inline void delayacct_set_flag(struct task_struct *p, int flag)
-{
- if (p->delays)
- p->delays->flags |= flag;
-}
-
-static inline void delayacct_clear_flag(struct task_struct *p, int flag)
-{
- if (p->delays)
- p->delays->flags &= ~flag;
-}
+extern void __delayacct_thrashing_start(bool *in_thrashing);
+extern void __delayacct_thrashing_end(bool *in_thrashing);
+extern void __delayacct_swapin_start(void);
+extern void __delayacct_swapin_end(void);
+extern void __delayacct_compact_start(void);
+extern void __delayacct_compact_end(void);
+extern void __delayacct_wpcopy_start(void);
+extern void __delayacct_wpcopy_end(void);
+extern void __delayacct_irq(struct task_struct *task, u32 delta);
static inline void delayacct_tsk_init(struct task_struct *tsk)
{
@@ -114,24 +120,20 @@ static inline void delayacct_tsk_free(struct task_struct *tsk)
static inline void delayacct_blkio_start(void)
{
- delayacct_set_flag(current, DELAYACCT_PF_BLKIO);
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
if (current->delays)
__delayacct_blkio_start();
}
static inline void delayacct_blkio_end(struct task_struct *p)
{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
if (p->delays)
__delayacct_blkio_end(p);
- delayacct_clear_flag(p, DELAYACCT_PF_BLKIO);
-}
-
-static inline int delayacct_add_tsk(struct taskstats *d,
- struct task_struct *tsk)
-{
- if (!delayacct_on || !tsk->delays)
- return 0;
- return __delayacct_add_tsk(d, tsk);
}
static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk)
@@ -143,33 +145,104 @@ static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk)
static inline void delayacct_freepages_start(void)
{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
if (current->delays)
__delayacct_freepages_start();
}
static inline void delayacct_freepages_end(void)
{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
if (current->delays)
__delayacct_freepages_end();
}
-static inline void delayacct_thrashing_start(void)
+static inline void delayacct_thrashing_start(bool *in_thrashing)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_thrashing_start(in_thrashing);
+}
+
+static inline void delayacct_thrashing_end(bool *in_thrashing)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_thrashing_end(in_thrashing);
+}
+
+static inline void delayacct_swapin_start(void)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_swapin_start();
+}
+
+static inline void delayacct_swapin_end(void)
{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
if (current->delays)
- __delayacct_thrashing_start();
+ __delayacct_swapin_end();
}
-static inline void delayacct_thrashing_end(void)
+static inline void delayacct_compact_start(void)
{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_compact_start();
+}
+
+static inline void delayacct_compact_end(void)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_compact_end();
+}
+
+static inline void delayacct_wpcopy_start(void)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
if (current->delays)
- __delayacct_thrashing_end();
+ __delayacct_wpcopy_start();
+}
+
+static inline void delayacct_wpcopy_end(void)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_wpcopy_end();
+}
+
+static inline void delayacct_irq(struct task_struct *task, u32 delta)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (task->delays)
+ __delayacct_irq(task, delta);
}
#else
-static inline void delayacct_set_flag(struct task_struct *p, int flag)
-{}
-static inline void delayacct_clear_flag(struct task_struct *p, int flag)
-{}
static inline void delayacct_init(void)
{}
static inline void delayacct_tsk_init(struct task_struct *tsk)
@@ -191,9 +264,23 @@ static inline void delayacct_freepages_start(void)
{}
static inline void delayacct_freepages_end(void)
{}
-static inline void delayacct_thrashing_start(void)
+static inline void delayacct_thrashing_start(bool *in_thrashing)
+{}
+static inline void delayacct_thrashing_end(bool *in_thrashing)
+{}
+static inline void delayacct_swapin_start(void)
+{}
+static inline void delayacct_swapin_end(void)
+{}
+static inline void delayacct_compact_start(void)
+{}
+static inline void delayacct_compact_end(void)
+{}
+static inline void delayacct_wpcopy_start(void)
+{}
+static inline void delayacct_wpcopy_end(void)
{}
-static inline void delayacct_thrashing_end(void)
+static inline void delayacct_irq(struct task_struct *task, u32 delta)
{}
#endif /* CONFIG_TASK_DELAY_ACCT */
diff --git a/include/linux/dev_printk.h b/include/linux/dev_printk.h
index 6f009559ee54..eb2094e43050 100644
--- a/include/linux/dev_printk.h
+++ b/include/linux/dev_printk.h
@@ -38,8 +38,8 @@ __printf(3, 4) __cold
int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...);
__printf(3, 4) __cold
-void dev_printk(const char *level, const struct device *dev,
- const char *fmt, ...);
+void _dev_printk(const char *level, const struct device *dev,
+ const char *fmt, ...);
__printf(2, 3) __cold
void _dev_emerg(const struct device *dev, const char *fmt, ...);
__printf(2, 3) __cold
@@ -69,7 +69,7 @@ static inline void __dev_printk(const char *level, const struct device *dev,
struct va_format *vaf)
{}
static inline __printf(3, 4)
-void dev_printk(const char *level, const struct device *dev,
+void _dev_printk(const char *level, const struct device *dev,
const char *fmt, ...)
{}
@@ -98,24 +98,66 @@ void _dev_info(const struct device *dev, const char *fmt, ...)
#endif
/*
+ * Need to take variadic arguments even though we don't use them, as dev_fmt()
+ * may only just have been expanded and may result in multiple arguments.
+ */
+#define dev_printk_index_emit(level, fmt, ...) \
+ printk_index_subsys_emit("%s %s: ", level, fmt)
+
+#define dev_printk_index_wrap(_p_func, level, dev, fmt, ...) \
+ ({ \
+ dev_printk_index_emit(level, fmt); \
+ _p_func(dev, fmt, ##__VA_ARGS__); \
+ })
+
+/*
+ * Some callsites directly call dev_printk rather than going through the
+ * dev_<level> infrastructure, so we need to emit here as well as inside those
+ * level-specific macros. Only one index entry will be produced, either way,
+ * since dev_printk's `fmt` isn't known at compile time if going through the
+ * dev_<level> macros.
+ *
+ * dev_fmt() isn't called for dev_printk when used directly, as it's used by
+ * the dev_<level> macros internally which already have dev_fmt() processed.
+ *
+ * We also can't use dev_printk_index_wrap directly, because we have a separate
+ * level to process.
+ */
+#define dev_printk(level, dev, fmt, ...) \
+ ({ \
+ dev_printk_index_emit(level, fmt); \
+ _dev_printk(level, dev, fmt, ##__VA_ARGS__); \
+ })
+
+/*
+ * Dummy dev_printk for disabled debugging statements to use whilst maintaining
+ * gcc's format checking.
+ */
+#define dev_no_printk(level, dev, fmt, ...) \
+ ({ \
+ if (0) \
+ _dev_printk(level, dev, fmt, ##__VA_ARGS__); \
+ })
+
+/*
* #defines for all the dev_<level> macros to prefix with whatever
* possible use of #define dev_fmt(fmt) ...
*/
-#define dev_emerg(dev, fmt, ...) \
- _dev_emerg(dev, dev_fmt(fmt), ##__VA_ARGS__)
-#define dev_crit(dev, fmt, ...) \
- _dev_crit(dev, dev_fmt(fmt), ##__VA_ARGS__)
-#define dev_alert(dev, fmt, ...) \
- _dev_alert(dev, dev_fmt(fmt), ##__VA_ARGS__)
-#define dev_err(dev, fmt, ...) \
- _dev_err(dev, dev_fmt(fmt), ##__VA_ARGS__)
-#define dev_warn(dev, fmt, ...) \
- _dev_warn(dev, dev_fmt(fmt), ##__VA_ARGS__)
-#define dev_notice(dev, fmt, ...) \
- _dev_notice(dev, dev_fmt(fmt), ##__VA_ARGS__)
-#define dev_info(dev, fmt, ...) \
- _dev_info(dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_emerg(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_emerg, KERN_EMERG, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_crit(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_crit, KERN_CRIT, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_alert(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_alert, KERN_ALERT, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_err(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_err, KERN_ERR, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_warn(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_warn, KERN_WARNING, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_notice(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_notice, KERN_NOTICE, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_info(dev, fmt, ...) \
+ dev_printk_index_wrap(_dev_info, KERN_INFO, dev, dev_fmt(fmt), ##__VA_ARGS__)
#if defined(CONFIG_DYNAMIC_DEBUG) || \
(defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
@@ -126,10 +168,7 @@ void _dev_info(const struct device *dev, const char *fmt, ...)
dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
#else
#define dev_dbg(dev, fmt, ...) \
-({ \
- if (0) \
- dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
-})
+ dev_no_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
#endif
#ifdef CONFIG_PRINTK
@@ -215,20 +254,14 @@ do { \
} while (0)
#else
#define dev_dbg_ratelimited(dev, fmt, ...) \
-do { \
- if (0) \
- dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
-} while (0)
+ dev_no_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
#endif
#ifdef VERBOSE_DEBUG
#define dev_vdbg dev_dbg
#else
#define dev_vdbg(dev, fmt, ...) \
-({ \
- if (0) \
- dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
-})
+ dev_no_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
#endif
/*
@@ -236,10 +269,21 @@ do { \
* using WARN/WARN_ONCE to include file/line information and a backtrace.
*/
#define dev_WARN(dev, format, arg...) \
- WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg);
+ WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg)
#define dev_WARN_ONCE(dev, condition, format, arg...) \
WARN_ONCE(condition, "%s %s: " format, \
dev_driver_string(dev), dev_name(dev), ## arg)
+__printf(3, 4) int dev_err_probe(const struct device *dev, int err, const char *fmt, ...);
+__printf(3, 4) int dev_warn_probe(const struct device *dev, int err, const char *fmt, ...);
+
+/* Simple helper for dev_err_probe() when ERR_PTR() is to be returned. */
+#define dev_err_ptr_probe(dev, ___err, fmt, ...) \
+ ERR_PTR(dev_err_probe(dev, ___err, fmt, ##__VA_ARGS__))
+
+/* Simple helper for dev_err_probe() when ERR_CAST() is to be returned. */
+#define dev_err_cast_probe(dev, ___err_ptr, fmt, ...) \
+ ERR_PTR(dev_err_probe(dev, PTR_ERR(___err_ptr), fmt, ##__VA_ARGS__))
+
#endif /* _DEVICE_PRINTK_H_ */
diff --git a/include/linux/devcoredump.h b/include/linux/devcoredump.h
index c008169ed2c6..377892604ff4 100644
--- a/include/linux/devcoredump.h
+++ b/include/linux/devcoredump.h
@@ -12,6 +12,9 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
+/* if data isn't read by userspace after 5 minutes then delete it */
+#define DEVCD_TIMEOUT (HZ * 60 * 5)
+
/*
* _devcd_free_sgtable - free all the memory of the given scatterlist table
* (i.e. both pages and scatterlist instances)
@@ -50,19 +53,22 @@ static inline void _devcd_free_sgtable(struct scatterlist *table)
kfree(delete_iter);
}
-
#ifdef CONFIG_DEV_COREDUMP
void dev_coredumpv(struct device *dev, void *data, size_t datalen,
gfp_t gfp);
-void dev_coredumpm(struct device *dev, struct module *owner,
- void *data, size_t datalen, gfp_t gfp,
- ssize_t (*read)(char *buffer, loff_t offset, size_t count,
- void *data, size_t datalen),
- void (*free)(void *data));
+void dev_coredumpm_timeout(struct device *dev, struct module *owner,
+ void *data, size_t datalen, gfp_t gfp,
+ ssize_t (*read)(char *buffer, loff_t offset,
+ size_t count, void *data,
+ size_t datalen),
+ void (*free)(void *data),
+ unsigned long timeout);
void dev_coredumpsg(struct device *dev, struct scatterlist *table,
size_t datalen, gfp_t gfp);
+
+void dev_coredump_put(struct device *dev);
#else
static inline void dev_coredumpv(struct device *dev, void *data,
size_t datalen, gfp_t gfp)
@@ -71,11 +77,13 @@ static inline void dev_coredumpv(struct device *dev, void *data,
}
static inline void
-dev_coredumpm(struct device *dev, struct module *owner,
- void *data, size_t datalen, gfp_t gfp,
- ssize_t (*read)(char *buffer, loff_t offset, size_t count,
- void *data, size_t datalen),
- void (*free)(void *data))
+dev_coredumpm_timeout(struct device *dev, struct module *owner,
+ void *data, size_t datalen, gfp_t gfp,
+ ssize_t (*read)(char *buffer, loff_t offset,
+ size_t count, void *data,
+ size_t datalen),
+ void (*free)(void *data),
+ unsigned long timeout)
{
free(data);
}
@@ -85,6 +93,34 @@ static inline void dev_coredumpsg(struct device *dev, struct scatterlist *table,
{
_devcd_free_sgtable(table);
}
+static inline void dev_coredump_put(struct device *dev)
+{
+}
#endif /* CONFIG_DEV_COREDUMP */
+/**
+ * dev_coredumpm - create device coredump with read/free methods
+ * @dev: the struct device for the crashed device
+ * @owner: the module that contains the read/free functions, use %THIS_MODULE
+ * @data: data cookie for the @read/@free functions
+ * @datalen: length of the data
+ * @gfp: allocation flags
+ * @read: function to read from the given buffer
+ * @free: function to free the given buffer
+ *
+ * Creates a new device coredump for the given device. If a previous one hasn't
+ * been read yet, the new coredump is discarded. The data lifetime is determined
+ * by the device coredump framework and when it is no longer needed the @free
+ * function will be called to free the data.
+ */
+static inline void dev_coredumpm(struct device *dev, struct module *owner,
+ void *data, size_t datalen, gfp_t gfp,
+ ssize_t (*read)(char *buffer, loff_t offset, size_t count,
+ void *data, size_t datalen),
+ void (*free)(void *data))
+{
+ dev_coredumpm_timeout(dev, owner, data, datalen, gfp, read, free,
+ DEVCD_TIMEOUT);
+}
+
#endif /* __DEVCOREDUMP_H */
diff --git a/include/linux/devfreq-governor.h b/include/linux/devfreq-governor.h
new file mode 100644
index 000000000000..dfdd0160a29f
--- /dev/null
+++ b/include/linux/devfreq-governor.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * governor.h - internal header for devfreq governors.
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This header is for devfreq governors
+ */
+
+#ifndef __LINUX_DEVFREQ_DEVFREQ_H__
+#define __LINUX_DEVFREQ_DEVFREQ_H__
+
+#include <linux/devfreq.h>
+
+#define DEVFREQ_NAME_LEN 16
+
+#define to_devfreq(DEV) container_of((DEV), struct devfreq, dev)
+
+/* Devfreq events */
+#define DEVFREQ_GOV_START 0x1
+#define DEVFREQ_GOV_STOP 0x2
+#define DEVFREQ_GOV_UPDATE_INTERVAL 0x3
+#define DEVFREQ_GOV_SUSPEND 0x4
+#define DEVFREQ_GOV_RESUME 0x5
+
+#define DEVFREQ_MIN_FREQ 0
+#define DEVFREQ_MAX_FREQ ULONG_MAX
+
+/*
+ * Definition of the governor feature flags
+ * - DEVFREQ_GOV_FLAG_IMMUTABLE
+ * : This governor is never changeable to other governors.
+ * - DEVFREQ_GOV_FLAG_IRQ_DRIVEN
+ * : The devfreq won't schedule the work for this governor.
+ */
+#define DEVFREQ_GOV_FLAG_IMMUTABLE BIT(0)
+#define DEVFREQ_GOV_FLAG_IRQ_DRIVEN BIT(1)
+
+/*
+ * Definition of governor attribute flags except for common sysfs attributes
+ * - DEVFREQ_GOV_ATTR_POLLING_INTERVAL
+ * : Indicate polling_interval sysfs attribute
+ * - DEVFREQ_GOV_ATTR_TIMER
+ * : Indicate timer sysfs attribute
+ */
+#define DEVFREQ_GOV_ATTR_POLLING_INTERVAL BIT(0)
+#define DEVFREQ_GOV_ATTR_TIMER BIT(1)
+
+/**
+ * struct devfreq_governor - Devfreq policy governor
+ * @node: list node - contains registered devfreq governors
+ * @name: Governor's name
+ * @attrs: Governor's sysfs attribute flags
+ * @flags: Governor's feature flags
+ * @get_target_freq: Returns desired operating frequency for the device.
+ * Basically, get_target_freq will run
+ * devfreq_dev_profile.get_dev_status() to get the
+ * status of the device (load = busy_time / total_time).
+ * @event_handler: Callback for devfreq core framework to notify events
+ * to governors. Events include per device governor
+ * init and exit, opp changes out of devfreq, suspend
+ * and resume of per device devfreq during device idle.
+ *
+ * Note that the callbacks are called with devfreq->lock locked by devfreq.
+ */
+struct devfreq_governor {
+ struct list_head node;
+
+ const char name[DEVFREQ_NAME_LEN];
+ const u64 attrs;
+ const u64 flags;
+ int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
+ int (*event_handler)(struct devfreq *devfreq,
+ unsigned int event, void *data);
+};
+
+void devfreq_monitor_start(struct devfreq *devfreq);
+void devfreq_monitor_stop(struct devfreq *devfreq);
+void devfreq_monitor_suspend(struct devfreq *devfreq);
+void devfreq_monitor_resume(struct devfreq *devfreq);
+void devfreq_update_interval(struct devfreq *devfreq, unsigned int *delay);
+
+int devfreq_add_governor(struct devfreq_governor *governor);
+int devfreq_remove_governor(struct devfreq_governor *governor);
+
+int devm_devfreq_add_governor(struct device *dev,
+ struct devfreq_governor *governor);
+
+int devfreq_update_status(struct devfreq *devfreq, unsigned long freq);
+int devfreq_update_target(struct devfreq *devfreq, unsigned long freq);
+void devfreq_get_freq_range(struct devfreq *devfreq, unsigned long *min_freq,
+ unsigned long *max_freq);
+
+static inline int devfreq_update_stats(struct devfreq *df)
+{
+ if (!df->profile->get_dev_status)
+ return -EINVAL;
+
+ return df->profile->get_dev_status(df->dev.parent, &df->last_status);
+}
+#endif /* __LINUX_DEVFREQ_DEVFREQ_H__ */
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 142474b4af96..dc1075dc3446 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -38,6 +38,7 @@ enum devfreq_timer {
struct devfreq;
struct devfreq_governor;
+struct devfreq_cpu_data;
struct thermal_cooling_device;
/**
@@ -102,12 +103,13 @@ struct devfreq_dev_status {
*
* @is_cooling_device: A self-explanatory boolean giving the device a
* cooling effect property.
+ * @dev_groups: Optional device-specific sysfs attribute groups that to
+ * be attached to the devfreq device.
*/
struct devfreq_dev_profile {
unsigned long initial_freq;
unsigned int polling_ms;
enum devfreq_timer timer;
- bool is_cooling_device;
int (*target)(struct device *dev, unsigned long *freq, u32 flags);
int (*get_dev_status)(struct device *dev,
@@ -117,6 +119,10 @@ struct devfreq_dev_profile {
unsigned long *freq_table;
unsigned int max_state;
+
+ bool is_cooling_device;
+
+ const struct attribute_group **dev_groups;
};
/**
@@ -147,10 +153,12 @@ struct devfreq_stats {
* reevaluate operable frequencies. Devfreq users may use
* devfreq.nb to the corresponding register notifier call chain.
* @work: delayed work for load monitoring.
+ * @freq_table: current frequency table used by the devfreq driver.
+ * @max_state: count of entry present in the frequency table.
* @previous_freq: previously configured frequency value.
* @last_status: devfreq user device info, performance statistics
- * @data: Private data of the governor. The devfreq framework does not
- * touch this.
+ * @data: devfreq driver pass to governors, governor should not change it.
+ * @governor_data: private data for governors, devfreq core doesn't touch it.
* @user_min_freq_req: PM QoS minimum frequency request from user (via sysfs)
* @user_max_freq_req: PM QoS maximum frequency request from user (via sysfs)
* @scaling_min_freq: Limit minimum frequency requested by OPP interface
@@ -184,10 +192,14 @@ struct devfreq {
struct notifier_block nb;
struct delayed_work work;
+ unsigned long *freq_table;
+ unsigned int max_state;
+
unsigned long previous_freq;
struct devfreq_dev_status last_status;
- void *data; /* private data for governors */
+ void *data;
+ void *governor_data;
struct dev_pm_qos_request user_min_freq_req;
struct dev_pm_qos_request user_max_freq_req;
@@ -266,8 +278,8 @@ void devm_devfreq_unregister_notifier(struct device *dev,
struct devfreq *devfreq_get_devfreq_by_node(struct device_node *node);
struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
const char *phandle_name, int index);
+#endif /* CONFIG_PM_DEVFREQ */
-#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
/**
* struct devfreq_simple_ondemand_data - ``void *data`` fed to struct devfreq
* and devfreq_add_device
@@ -285,9 +297,12 @@ struct devfreq_simple_ondemand_data {
unsigned int upthreshold;
unsigned int downdifferential;
};
-#endif
-#if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE)
+enum devfreq_parent_dev_type {
+ DEVFREQ_PARENT_DEV,
+ CPUFREQ_PARENT_DEV,
+};
+
/**
* struct devfreq_passive_data - ``void *data`` fed to struct devfreq
* and devfreq_add_device
@@ -299,8 +314,11 @@ struct devfreq_simple_ondemand_data {
* using governors except for passive governor.
* If the devfreq device has the specific method to decide
* the next frequency, should use this callback.
- * @this: the devfreq instance of own device.
- * @nb: the notifier block for DEVFREQ_TRANSITION_NOTIFIER list
+ * @parent_type: the parent type of the device.
+ * @this: the devfreq instance of own device.
+ * @nb: the notifier block for DEVFREQ_TRANSITION_NOTIFIER or
+ * CPUFREQ_TRANSITION_NOTIFIER list.
+ * @cpu_data_list: the list of cpu frequency data for all cpufreq_policy.
*
* The devfreq_passive_data have to set the devfreq instance of parent
* device with governors except for the passive governor. But, don't need to
@@ -314,13 +332,16 @@ struct devfreq_passive_data {
/* Optional callback to decide the next frequency of passvice device */
int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
+ /* Should set the type of parent device */
+ enum devfreq_parent_dev_type parent_type;
+
/* For passive governor's internal use. Don't need to set them */
struct devfreq *this;
struct notifier_block nb;
+ struct list_head cpu_data_list;
};
-#endif
-#else /* !CONFIG_PM_DEVFREQ */
+#if !defined(CONFIG_PM_DEVFREQ)
static inline struct devfreq *devfreq_add_device(struct device *dev,
struct devfreq_dev_profile *profile,
const char *governor_name,
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index ff700fb6ce1d..38f625af6ab4 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2001 Sistina Software (UK) Limited.
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
@@ -20,6 +21,7 @@ struct dm_table;
struct dm_report_zones_args;
struct mapped_device;
struct bio_vec;
+enum dax_access_mode;
/*
* Type of table, mapped_device's mempool and request_queue
@@ -31,7 +33,7 @@ enum dm_queue_mode {
DM_TYPE_DAX_BIO_BASED = 3,
};
-typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
+typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE, STATUSTYPE_IMA } status_type_t;
union map_info {
void *ptr;
@@ -86,12 +88,19 @@ typedef int (*dm_preresume_fn) (struct dm_target *ti);
typedef void (*dm_resume_fn) (struct dm_target *ti);
typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
- unsigned status_flags, char *result, unsigned maxlen);
+ unsigned int status_flags, char *result, unsigned int maxlen);
-typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv,
- char *result, unsigned maxlen);
+typedef int (*dm_message_fn) (struct dm_target *ti, unsigned int argc, char **argv,
+ char *result, unsigned int maxlen);
-typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
+/*
+ * Called with *forward == true. If it remains true, the ioctl should be
+ * forwarded to bdev. If it is reset to false, the target already fully handled
+ * the ioctl and the return value is the return value for the whole ioctl.
+ */
+typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev,
+ unsigned int cmd, unsigned long arg,
+ bool *forward);
#ifdef CONFIG_BLK_DEV_ZONED
typedef int (*dm_report_zones_fn) (struct dm_target *ti,
@@ -146,33 +155,43 @@ typedef int (*dm_busy_fn) (struct dm_target *ti);
* >= 0 : the number of bytes accessible at the address
*/
typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn);
-typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
- void *addr, size_t bytes, struct iov_iter *i);
+ long nr_pages, enum dax_access_mode node, void **kaddr,
+ unsigned long *pfn);
typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
size_t nr_pages);
-#define PAGE_SECTORS (PAGE_SIZE / 512)
+
+/*
+ * Returns:
+ * != 0 : number of bytes transferred
+ * 0 : recovery write failed
+ */
+typedef size_t (*dm_dax_recovery_write_fn)(struct dm_target *ti, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *i);
void dm_error(const char *message);
struct dm_dev {
struct block_device *bdev;
+ struct file *bdev_file;
struct dax_device *dax_dev;
- fmode_t mode;
+ blk_mode_t mode;
char name[16];
};
-dev_t dm_get_dev_t(const char *path);
-
/*
* Constructors should call these functions to ensure destination devices
* are opened/closed correctly.
*/
-int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
+int dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode,
struct dm_dev **result);
void dm_put_device(struct dm_target *ti, struct dm_dev *d);
/*
+ * Helper function for getting devices
+ */
+int dm_devt_from_path(const char *path, dev_t *dev_p);
+
+/*
* Information about a target type
*/
@@ -180,7 +199,7 @@ struct target_type {
uint64_t features;
const char *name;
struct module *module;
- unsigned version[3];
+ unsigned int version[3];
dm_ctr_fn ctr;
dm_dtr_fn dtr;
dm_map_fn map;
@@ -201,9 +220,8 @@ struct target_type {
dm_iterate_devices_fn iterate_devices;
dm_io_hints_fn io_hints;
dm_dax_direct_access_fn direct_access;
- dm_dax_copy_iter_fn dax_copy_from_iter;
- dm_dax_copy_iter_fn dax_copy_to_iter;
dm_dax_zero_page_range_fn dax_zero_page_range;
+ dm_dax_recovery_write_fn dax_recovery_write;
/* For internal device-mapper use. */
struct list_head list;
@@ -288,6 +306,9 @@ struct target_type {
#define dm_target_supports_mixed_zoned_model(type) (false)
#endif
+#define DM_TARGET_ATOMIC_WRITES 0x00000400
+#define dm_target_supports_atomic_writes(type) ((type)->features & DM_TARGET_ATOMIC_WRITES)
+
struct dm_target {
struct dm_table *table;
struct target_type *type;
@@ -307,37 +328,31 @@ struct dm_target {
* It is a responsibility of the target driver to remap these bios
* to the real underlying devices.
*/
- unsigned num_flush_bios;
+ unsigned int num_flush_bios;
/*
* The number of discard bios that will be submitted to the target.
* The bio number can be accessed with dm_bio_get_target_bio_nr.
*/
- unsigned num_discard_bios;
+ unsigned int num_discard_bios;
/*
* The number of secure erase bios that will be submitted to the target.
* The bio number can be accessed with dm_bio_get_target_bio_nr.
*/
- unsigned num_secure_erase_bios;
-
- /*
- * The number of WRITE SAME bios that will be submitted to the target.
- * The bio number can be accessed with dm_bio_get_target_bio_nr.
- */
- unsigned num_write_same_bios;
+ unsigned int num_secure_erase_bios;
/*
* The number of WRITE ZEROES bios that will be submitted to the target.
* The bio number can be accessed with dm_bio_get_target_bio_nr.
*/
- unsigned num_write_zeroes_bios;
+ unsigned int num_write_zeroes_bios;
/*
* The minimum number of extra bytes allocated in each io for the
* target to use.
*/
- unsigned per_io_data_size;
+ unsigned int per_io_data_size;
/* target specific data */
void *private;
@@ -358,14 +373,66 @@ struct dm_target {
bool discards_supported:1;
/*
+ * Automatically set by dm-core if this target supports
+ * REQ_OP_ZONE_RESET_ALL. Otherwise, this operation will be emulated
+ * using REQ_OP_ZONE_RESET. Target drivers must not set this manually.
+ */
+ bool zone_reset_all_supported:1;
+
+ /*
+ * Set if this target requires that discards be split on
+ * 'max_discard_sectors' boundaries.
+ */
+ bool max_discard_granularity:1;
+
+ /*
* Set if we need to limit the number of in-flight bios when swapping.
*/
bool limit_swap_bios:1;
+
+ /*
+ * Set if this target implements a zoned device and needs emulation of
+ * zone append operations using regular writes.
+ */
+ bool emulate_zone_append:1;
+
+ /*
+ * Set if the target will submit IO using dm_submit_bio_remap()
+ * after returning DM_MAPIO_SUBMITTED from its map function.
+ */
+ bool accounts_remapped_io:1;
+
+ /*
+ * Set if the target will submit the DM bio without first calling
+ * bio_set_dev(). NOTE: ideally a target should _not_ need this.
+ */
+ bool needs_bio_set_dev:1;
+
+ /*
+ * Set if the target supports flush optimization. If all the targets in
+ * a table have flush_bypasses_map set, the dm core will not send
+ * flushes to the targets via a ->map method. It will iterate over
+ * dm_table->devices and send flushes to the devices directly. This
+ * optimization reduces the number of flushes being sent when multiple
+ * targets in a table use the same underlying device.
+ *
+ * This optimization may be enabled on targets that just pass the
+ * flushes to the underlying devices without performing any other
+ * actions on the flush request. Currently, dm-linear and dm-stripe
+ * support it.
+ */
+ bool flush_bypasses_map:1;
+
+ /*
+ * Set if the target calls bio_integrity_alloc on bios received
+ * in the map method.
+ */
+ bool mempool_needs_integrity:1;
};
void *dm_per_bio_data(struct bio *bio, size_t data_size);
struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size);
-unsigned dm_bio_get_target_bio_nr(const struct bio *bio);
+unsigned int dm_bio_get_target_bio_nr(const struct bio *bio);
u64 dm_start_time_ns_from_clone(struct bio *bio);
@@ -376,7 +443,7 @@ void dm_unregister_target(struct target_type *t);
* Target argument parsing.
*/
struct dm_arg_set {
- unsigned argc;
+ unsigned int argc;
char **argv;
};
@@ -385,8 +452,8 @@ struct dm_arg_set {
* the error message to use if the number is found to be outside that range.
*/
struct dm_arg {
- unsigned min;
- unsigned max;
+ unsigned int min;
+ unsigned int max;
char *error;
};
@@ -395,7 +462,7 @@ struct dm_arg {
* returning -EINVAL and setting *error.
*/
int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
- unsigned *value, char **error);
+ unsigned int *value, char **error);
/*
* Process the next argument as the start of a group containing between
@@ -403,7 +470,7 @@ int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
* *num_args or, if invalid, return -EINVAL and set *error.
*/
int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
- unsigned *num_args, char **error);
+ unsigned int *num_args, char **error);
/*
* Return the current argument and shift to the next.
@@ -413,12 +480,14 @@ const char *dm_shift_arg(struct dm_arg_set *as);
/*
* Move through num_args arguments.
*/
-void dm_consume_args(struct dm_arg_set *as, unsigned num_args);
+void dm_consume_args(struct dm_arg_set *as, unsigned int num_args);
-/*-----------------------------------------------------------------
+/*
+ *----------------------------------------------------------------
* Functions for creating and manipulating mapped devices.
* Drop the reference with dm_put when you finish with the object.
- *---------------------------------------------------------------*/
+ *----------------------------------------------------------------
+ */
/*
* DM_ANY_MINOR chooses the next available minor number.
@@ -443,7 +512,7 @@ void *dm_get_mdptr(struct mapped_device *md);
/*
* A device can still be used while suspended, but I/O is deferred.
*/
-int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
+int dm_suspend(struct mapped_device *md, unsigned int suspend_flags);
int dm_resume(struct mapped_device *md);
/*
@@ -463,22 +532,29 @@ struct gendisk *dm_disk(struct mapped_device *md);
int dm_suspended(struct dm_target *ti);
int dm_post_suspending(struct dm_target *ti);
int dm_noflush_suspending(struct dm_target *ti);
-void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
-union map_info *dm_get_rq_mapinfo(struct request *rq);
+void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors);
+void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone);
#ifdef CONFIG_BLK_DEV_ZONED
struct dm_report_zones_args {
struct dm_target *tgt;
+ struct gendisk *disk;
sector_t next_sector;
- void *orig_data;
- report_zones_cb orig_cb;
unsigned int zone_idx;
+ /* for block layer ->report_zones */
+ struct blk_report_zones_args *rep_args;
+
+ /* for internal users */
+ report_zones_cb cb;
+ void *data;
+
/* must be filled by ->report_zones before calling dm_report_zones_cb */
sector_t start;
};
-int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx, void *data);
+int dm_report_zones(struct block_device *bdev, sector_t start, sector_t sector,
+ struct dm_report_zones_args *args, unsigned int nr_zones);
#endif /* CONFIG_BLK_DEV_ZONED */
/*
@@ -489,23 +565,23 @@ int __init dm_early_create(struct dm_ioctl *dmi,
struct dm_target_spec **spec_array,
char **target_params_array);
-struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
-
/*
* Geometry functions.
*/
int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Functions for manipulating device-mapper tables.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
/*
* First create an empty table.
*/
-int dm_table_create(struct dm_table **result, fmode_t mode,
- unsigned num_targets, struct mapped_device *md);
+int dm_table_create(struct dm_table **result, blk_mode_t mode,
+ unsigned int num_targets, struct mapped_device *md);
/*
* Then call this once for each target.
@@ -547,8 +623,7 @@ void dm_sync_table(struct mapped_device *md);
* Queries
*/
sector_t dm_table_get_size(struct dm_table *t);
-unsigned int dm_table_get_num_targets(struct dm_table *t);
-fmode_t dm_table_get_mode(struct dm_table *t);
+blk_mode_t dm_table_get_mode(struct dm_table *t);
struct mapped_device *dm_table_get_md(struct dm_table *t);
const char *dm_table_device_name(struct dm_table *t);
@@ -570,13 +645,15 @@ struct dm_table *dm_swap_table(struct mapped_device *md,
struct dm_table *t);
/*
- * Table keyslot manager functions
+ * Table blk_crypto_profile functions
*/
-void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm);
+void dm_destroy_crypto_profile(struct blk_crypto_profile *profile);
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Macros.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
#define DM_NAME "device-mapper"
#define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n"
@@ -593,8 +670,31 @@ void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm);
#define DMDEBUG(fmt, ...) pr_debug(DM_FMT(fmt), ##__VA_ARGS__)
#define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__)
-#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
- 0 : scnprintf(result + sz, maxlen - sz, x))
+#define DMEMIT(x...) (sz += ((sz >= maxlen) ? 0 : scnprintf(result + sz, maxlen - sz, x)))
+
+#define DMEMIT_TARGET_NAME_VERSION(y) \
+ DMEMIT("target_name=%s,target_version=%u.%u.%u", \
+ (y)->name, (y)->version[0], (y)->version[1], (y)->version[2])
+
+/**
+ * module_dm() - Helper macro for DM targets that don't do anything
+ * special in their module_init and module_exit.
+ * Each module may only use this macro once, and calling it replaces
+ * module_init() and module_exit().
+ *
+ * @name: DM target's name
+ */
+#define module_dm(name) \
+static int __init dm_##name##_init(void) \
+{ \
+ return dm_register_target(&(name##_target)); \
+} \
+module_init(dm_##name##_init) \
+static void __exit dm_##name##_exit(void) \
+{ \
+ dm_unregister_target(&(name##_target)); \
+} \
+module_exit(dm_##name##_exit)
/*
* Definitions of return values from target end_io function.
diff --git a/include/linux/device.h b/include/linux/device.h
index 38a2071cf776..0be95294b6e6 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -26,10 +26,11 @@
#include <linux/atomic.h>
#include <linux/uidgid.h>
#include <linux/gfp.h>
-#include <linux/overflow.h>
#include <linux/device/bus.h>
#include <linux/device/class.h>
+#include <linux/device/devres.h>
#include <linux/device/driver.h>
+#include <linux/cleanup.h>
#include <asm/device.h>
struct device;
@@ -41,10 +42,10 @@ struct class;
struct subsys_private;
struct device_node;
struct fwnode_handle;
-struct iommu_ops;
struct iommu_group;
struct dev_pin_info;
struct dev_iommu;
+struct msi_device_data;
/**
* struct subsys_interface - interfaces to device functions
@@ -61,7 +62,7 @@ struct dev_iommu;
*/
struct subsys_interface {
const char *name;
- struct bus_type *subsys;
+ const struct bus_type *subsys;
struct list_head node;
int (*add_dev)(struct device *dev, struct subsys_interface *sif);
void (*remove_dev)(struct device *dev, struct subsys_interface *sif);
@@ -70,9 +71,9 @@ struct subsys_interface {
int subsys_interface_register(struct subsys_interface *sif);
void subsys_interface_unregister(struct subsys_interface *sif);
-int subsys_system_register(struct bus_type *subsys,
+int subsys_system_register(const struct bus_type *subsys,
const struct attribute_group **groups);
-int subsys_virtual_register(struct bus_type *subsys,
+int subsys_virtual_register(const struct bus_type *subsys,
const struct attribute_group **groups);
/*
@@ -87,15 +88,20 @@ int subsys_virtual_register(struct bus_type *subsys,
struct device_type {
const char *name;
const struct attribute_group **groups;
- int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
- char *(*devnode)(struct device *dev, umode_t *mode,
+ int (*uevent)(const struct device *dev, struct kobj_uevent_env *env);
+ char *(*devnode)(const struct device *dev, umode_t *mode,
kuid_t *uid, kgid_t *gid);
void (*release)(struct device *dev);
const struct dev_pm_ops *pm;
};
-/* interface for exporting device attributes */
+/**
+ * struct device_attribute - Interface for exporting device attributes.
+ * @attr: sysfs attribute definition.
+ * @show: Show handler.
+ * @store: Store handler.
+ */
struct device_attribute {
struct attribute attr;
ssize_t (*show)(struct device *dev, struct device_attribute *attr,
@@ -104,6 +110,11 @@ struct device_attribute {
const char *buf, size_t count);
};
+/**
+ * struct dev_ext_attribute - Exported device attribute with extra context.
+ * @attr: Exported device attribute.
+ * @var: Pointer to context.
+ */
struct dev_ext_attribute {
struct device_attribute attr;
void *var;
@@ -121,31 +132,140 @@ ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
char *buf);
ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count);
+ssize_t device_show_string(struct device *dev, struct device_attribute *attr,
+ char *buf);
+/**
+ * DEVICE_ATTR - Define a device attribute.
+ * @_name: Attribute name.
+ * @_mode: File mode.
+ * @_show: Show handler. Optional, but mandatory if attribute is readable.
+ * @_store: Store handler. Optional, but mandatory if attribute is writable.
+ *
+ * Convenience macro for defining a struct device_attribute.
+ *
+ * For example, ``DEVICE_ATTR(foo, 0644, foo_show, foo_store);`` expands to:
+ *
+ * .. code-block:: c
+ *
+ * struct device_attribute dev_attr_foo = {
+ * .attr = { .name = "foo", .mode = 0644 },
+ * .show = foo_show,
+ * .store = foo_store,
+ * };
+ */
#define DEVICE_ATTR(_name, _mode, _show, _store) \
struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
+
+/**
+ * DEVICE_ATTR_PREALLOC - Define a preallocated device attribute.
+ * @_name: Attribute name.
+ * @_mode: File mode.
+ * @_show: Show handler. Optional, but mandatory if attribute is readable.
+ * @_store: Store handler. Optional, but mandatory if attribute is writable.
+ *
+ * Like DEVICE_ATTR(), but ``SYSFS_PREALLOC`` is set on @_mode.
+ */
#define DEVICE_ATTR_PREALLOC(_name, _mode, _show, _store) \
struct device_attribute dev_attr_##_name = \
__ATTR_PREALLOC(_name, _mode, _show, _store)
+
+/**
+ * DEVICE_ATTR_RW - Define a read-write device attribute.
+ * @_name: Attribute name.
+ *
+ * Like DEVICE_ATTR(), but @_mode is 0644, @_show is <_name>_show,
+ * and @_store is <_name>_store.
+ */
#define DEVICE_ATTR_RW(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
+
+/**
+ * DEVICE_ATTR_ADMIN_RW - Define an admin-only read-write device attribute.
+ * @_name: Attribute name.
+ *
+ * Like DEVICE_ATTR_RW(), but @_mode is 0600.
+ */
#define DEVICE_ATTR_ADMIN_RW(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RW_MODE(_name, 0600)
+
+/**
+ * DEVICE_ATTR_RO - Define a readable device attribute.
+ * @_name: Attribute name.
+ *
+ * Like DEVICE_ATTR(), but @_mode is 0444 and @_show is <_name>_show.
+ */
#define DEVICE_ATTR_RO(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RO(_name)
+
+/**
+ * DEVICE_ATTR_ADMIN_RO - Define an admin-only readable device attribute.
+ * @_name: Attribute name.
+ *
+ * Like DEVICE_ATTR_RO(), but @_mode is 0400.
+ */
#define DEVICE_ATTR_ADMIN_RO(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RO_MODE(_name, 0400)
+
+/**
+ * DEVICE_ATTR_WO - Define an admin-only writable device attribute.
+ * @_name: Attribute name.
+ *
+ * Like DEVICE_ATTR(), but @_mode is 0200 and @_store is <_name>_store.
+ */
#define DEVICE_ATTR_WO(_name) \
struct device_attribute dev_attr_##_name = __ATTR_WO(_name)
+
+/**
+ * DEVICE_ULONG_ATTR - Define a device attribute backed by an unsigned long.
+ * @_name: Attribute name.
+ * @_mode: File mode.
+ * @_var: Identifier of unsigned long.
+ *
+ * Like DEVICE_ATTR(), but @_show and @_store are automatically provided
+ * such that reads and writes to the attribute from userspace affect @_var.
+ */
#define DEVICE_ULONG_ATTR(_name, _mode, _var) \
struct dev_ext_attribute dev_attr_##_name = \
{ __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) }
+
+/**
+ * DEVICE_INT_ATTR - Define a device attribute backed by an int.
+ * @_name: Attribute name.
+ * @_mode: File mode.
+ * @_var: Identifier of int.
+ *
+ * Like DEVICE_ULONG_ATTR(), but @_var is an int.
+ */
#define DEVICE_INT_ATTR(_name, _mode, _var) \
struct dev_ext_attribute dev_attr_##_name = \
{ __ATTR(_name, _mode, device_show_int, device_store_int), &(_var) }
+
+/**
+ * DEVICE_BOOL_ATTR - Define a device attribute backed by a bool.
+ * @_name: Attribute name.
+ * @_mode: File mode.
+ * @_var: Identifier of bool.
+ *
+ * Like DEVICE_ULONG_ATTR(), but @_var is a bool.
+ */
#define DEVICE_BOOL_ATTR(_name, _mode, _var) \
struct dev_ext_attribute dev_attr_##_name = \
{ __ATTR(_name, _mode, device_show_bool, device_store_bool), &(_var) }
+
+/**
+ * DEVICE_STRING_ATTR_RO - Define a device attribute backed by a r/o string.
+ * @_name: Attribute name.
+ * @_mode: File mode.
+ * @_var: Identifier of string.
+ *
+ * Like DEVICE_ULONG_ATTR(), but @_var is a string. Because the length of the
+ * string allocation is unknown, the attribute must be read-only.
+ */
+#define DEVICE_STRING_ATTR_RO(_name, _mode, _var) \
+ struct dev_ext_attribute dev_attr_##_name = \
+ { __ATTR(_name, (_mode) & ~0222, device_show_string, NULL), (_var) }
+
#define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
struct device_attribute dev_attr_##_name = \
__ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
@@ -161,130 +281,6 @@ int __must_check device_create_bin_file(struct device *dev,
void device_remove_bin_file(struct device *dev,
const struct bin_attribute *attr);
-/* device resource management */
-typedef void (*dr_release_t)(struct device *dev, void *res);
-typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
-
-#ifdef CONFIG_DEBUG_DEVRES
-void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
- int nid, const char *name) __malloc;
-#define devres_alloc(release, size, gfp) \
- __devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release)
-#define devres_alloc_node(release, size, gfp, nid) \
- __devres_alloc_node(release, size, gfp, nid, #release)
-#else
-void *devres_alloc_node(dr_release_t release, size_t size,
- gfp_t gfp, int nid) __malloc;
-static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
-{
- return devres_alloc_node(release, size, gfp, NUMA_NO_NODE);
-}
-#endif
-
-void devres_for_each_res(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data,
- void (*fn)(struct device *, void *, void *),
- void *data);
-void devres_free(void *res);
-void devres_add(struct device *dev, void *res);
-void *devres_find(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data);
-void *devres_get(struct device *dev, void *new_res,
- dr_match_t match, void *match_data);
-void *devres_remove(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data);
-int devres_destroy(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data);
-int devres_release(struct device *dev, dr_release_t release,
- dr_match_t match, void *match_data);
-
-/* devres group */
-void * __must_check devres_open_group(struct device *dev, void *id, gfp_t gfp);
-void devres_close_group(struct device *dev, void *id);
-void devres_remove_group(struct device *dev, void *id);
-int devres_release_group(struct device *dev, void *id);
-
-/* managed devm_k.alloc/kfree for device drivers */
-void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc;
-void *devm_krealloc(struct device *dev, void *ptr, size_t size,
- gfp_t gfp) __must_check;
-__printf(3, 0) char *devm_kvasprintf(struct device *dev, gfp_t gfp,
- const char *fmt, va_list ap) __malloc;
-__printf(3, 4) char *devm_kasprintf(struct device *dev, gfp_t gfp,
- const char *fmt, ...) __malloc;
-static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
-{
- return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
-}
-static inline void *devm_kmalloc_array(struct device *dev,
- size_t n, size_t size, gfp_t flags)
-{
- size_t bytes;
-
- if (unlikely(check_mul_overflow(n, size, &bytes)))
- return NULL;
-
- return devm_kmalloc(dev, bytes, flags);
-}
-static inline void *devm_kcalloc(struct device *dev,
- size_t n, size_t size, gfp_t flags)
-{
- return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
-}
-void devm_kfree(struct device *dev, const void *p);
-char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc;
-const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp);
-void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp);
-
-unsigned long devm_get_free_pages(struct device *dev,
- gfp_t gfp_mask, unsigned int order);
-void devm_free_pages(struct device *dev, unsigned long addr);
-
-void __iomem *devm_ioremap_resource(struct device *dev,
- const struct resource *res);
-void __iomem *devm_ioremap_resource_wc(struct device *dev,
- const struct resource *res);
-
-void __iomem *devm_of_iomap(struct device *dev,
- struct device_node *node, int index,
- resource_size_t *size);
-
-/* allows to add/remove a custom action to devres stack */
-int devm_add_action(struct device *dev, void (*action)(void *), void *data);
-void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
-void devm_release_action(struct device *dev, void (*action)(void *), void *data);
-
-static inline int devm_add_action_or_reset(struct device *dev,
- void (*action)(void *), void *data)
-{
- int ret;
-
- ret = devm_add_action(dev, action, data);
- if (ret)
- action(data);
-
- return ret;
-}
-
-/**
- * devm_alloc_percpu - Resource-managed alloc_percpu
- * @dev: Device to allocate per-cpu memory for
- * @type: Type to allocate per-cpu memory for
- *
- * Managed alloc_percpu. Per-cpu memory allocated with this function is
- * automatically freed on driver detach.
- *
- * RETURNS:
- * Pointer to allocated memory on success, NULL on failure.
- */
-#define devm_alloc_percpu(dev, type) \
- ((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), \
- __alignof__(type)))
-
-void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
- size_t align);
-void devm_free_percpu(struct device *dev, void __percpu *pdata);
-
struct device_dma_parameters {
/*
* a low level driver may set these to teach IOMMU code about
@@ -335,6 +331,7 @@ enum device_link_state {
#define DL_FLAG_MANAGED BIT(6)
#define DL_FLAG_SYNC_STATE_ONLY BIT(7)
#define DL_FLAG_INFERRED BIT(8)
+#define DL_FLAG_CYCLE BIT(9)
/**
* enum dl_dev_state - Device driver presence tracking information.
@@ -351,6 +348,22 @@ enum dl_dev_state {
};
/**
+ * enum device_removable - Whether the device is removable. The criteria for a
+ * device to be classified as removable is determined by its subsystem or bus.
+ * @DEVICE_REMOVABLE_NOT_SUPPORTED: This attribute is not supported for this
+ * device (default).
+ * @DEVICE_REMOVABLE_UNKNOWN: Device location is Unknown.
+ * @DEVICE_FIXED: Device is not removable by the user.
+ * @DEVICE_REMOVABLE: Device is removable by the user.
+ */
+enum device_removable {
+ DEVICE_REMOVABLE_NOT_SUPPORTED = 0, /* must be 0 */
+ DEVICE_REMOVABLE_UNKNOWN,
+ DEVICE_FIXED,
+ DEVICE_REMOVABLE,
+};
+
+/**
* struct dev_links_info - Device data related to device links.
* @suppliers: List of links to supplier devices.
* @consumers: List of links to consumer devices.
@@ -365,6 +378,87 @@ struct dev_links_info {
};
/**
+ * struct dev_msi_info - Device data related to MSI
+ * @domain: The MSI interrupt domain associated to the device
+ * @data: Pointer to MSI device data
+ */
+struct dev_msi_info {
+#ifdef CONFIG_GENERIC_MSI_IRQ
+ struct irq_domain *domain;
+ struct msi_device_data *data;
+#endif
+};
+
+/**
+ * enum device_physical_location_panel - Describes which panel surface of the
+ * system's housing the device connection point resides on.
+ * @DEVICE_PANEL_TOP: Device connection point is on the top panel.
+ * @DEVICE_PANEL_BOTTOM: Device connection point is on the bottom panel.
+ * @DEVICE_PANEL_LEFT: Device connection point is on the left panel.
+ * @DEVICE_PANEL_RIGHT: Device connection point is on the right panel.
+ * @DEVICE_PANEL_FRONT: Device connection point is on the front panel.
+ * @DEVICE_PANEL_BACK: Device connection point is on the back panel.
+ * @DEVICE_PANEL_UNKNOWN: The panel with device connection point is unknown.
+ */
+enum device_physical_location_panel {
+ DEVICE_PANEL_TOP,
+ DEVICE_PANEL_BOTTOM,
+ DEVICE_PANEL_LEFT,
+ DEVICE_PANEL_RIGHT,
+ DEVICE_PANEL_FRONT,
+ DEVICE_PANEL_BACK,
+ DEVICE_PANEL_UNKNOWN,
+};
+
+/**
+ * enum device_physical_location_vertical_position - Describes vertical
+ * position of the device connection point on the panel surface.
+ * @DEVICE_VERT_POS_UPPER: Device connection point is at upper part of panel.
+ * @DEVICE_VERT_POS_CENTER: Device connection point is at center part of panel.
+ * @DEVICE_VERT_POS_LOWER: Device connection point is at lower part of panel.
+ */
+enum device_physical_location_vertical_position {
+ DEVICE_VERT_POS_UPPER,
+ DEVICE_VERT_POS_CENTER,
+ DEVICE_VERT_POS_LOWER,
+};
+
+/**
+ * enum device_physical_location_horizontal_position - Describes horizontal
+ * position of the device connection point on the panel surface.
+ * @DEVICE_HORI_POS_LEFT: Device connection point is at left part of panel.
+ * @DEVICE_HORI_POS_CENTER: Device connection point is at center part of panel.
+ * @DEVICE_HORI_POS_RIGHT: Device connection point is at right part of panel.
+ */
+enum device_physical_location_horizontal_position {
+ DEVICE_HORI_POS_LEFT,
+ DEVICE_HORI_POS_CENTER,
+ DEVICE_HORI_POS_RIGHT,
+};
+
+/**
+ * struct device_physical_location - Device data related to physical location
+ * of the device connection point.
+ * @panel: Panel surface of the system's housing that the device connection
+ * point resides on.
+ * @vertical_position: Vertical position of the device connection point within
+ * the panel.
+ * @horizontal_position: Horizontal position of the device connection point
+ * within the panel.
+ * @dock: Set if the device connection point resides in a docking station or
+ * port replicator.
+ * @lid: Set if this device connection point resides on the lid of laptop
+ * system.
+ */
+struct device_physical_location {
+ enum device_physical_location_panel panel;
+ enum device_physical_location_vertical_position vertical_position;
+ enum device_physical_location_horizontal_position horizontal_position;
+ bool dock;
+ bool lid;
+};
+
+/**
* struct device - The basic device structure
* @parent: The device's "parent" device, the device to which it is attached.
* In most cases, a parent device is some sort of bus or host
@@ -378,8 +472,6 @@ struct dev_links_info {
* This identifies the device type and carries type-specific
* information.
* @mutex: Mutex to synchronize calls to its driver.
- * @lockdep_mutex: An optional debug lock that a subsystem can use as a
- * peer lock to gain localized lockdep coverage of the device_lock.
* @bus: Type of bus device is on.
* @driver: Which driver has allocated this
* @platform_data: Platform data specific to the device.
@@ -399,9 +491,8 @@ struct dev_links_info {
* along with subsystem-level and driver-level callbacks.
* @em_pd: device's energy model performance domain
* @pins: For device pin management.
- * See Documentation/driver-api/pinctl.rst for details.
- * @msi_list: Hosts MSI descriptors
- * @msi_domain: The generic MSI domain this device is using.
+ * See Documentation/driver-api/pin-control.rst for details.
+ * @msi: MSI related data
* @numa_node: NUMA node this device is close to.
* @dma_ops: DMA mapping operations for this device.
* @dma_mask: Dma mask (if dma'ble device).
@@ -416,6 +507,10 @@ struct dev_links_info {
* @dma_pools: Dma pools (if dma'ble device).
* @dma_mem: Internal for coherent mem override.
* @cma_area: Contiguous memory area for dma allocations
+ * @dma_io_tlb_mem: Software IO TLB allocator. Not for driver use.
+ * @dma_io_tlb_pools: List of transient swiotlb memory pools.
+ * @dma_io_tlb_lock: Protects changes to the list of active pools.
+ * @dma_uses_io_tlb: %true if device has used the software IO TLB.
* @archdata: For arch-specific additions.
* @of_node: Associated device tree node.
* @fwnode: Associated device node supplied by platform firmware.
@@ -423,7 +518,6 @@ struct dev_links_info {
* @id: device instance
* @devres_lock: Spinlock to protect the resource of the device.
* @devres_head: The resources list of the device.
- * @knode_class: The node used to add the device to the class list.
* @class: The class of the device.
* @groups: Optional attribute groups.
* @release: Callback to free the device after all references have
@@ -431,6 +525,11 @@ struct dev_links_info {
* device (i.e. the bus driver that discovered the device).
* @iommu_group: IOMMU group the device belongs to.
* @iommu: Per device generic IOMMU runtime data
+ * @physical_location: Describes physical location of the device connection
+ * point in the system housing.
+ * @removable: Whether the device can be removed from the system. This
+ * should be set by the subsystem / bus driver that discovered
+ * the device.
*
* @offline_disabled: If set, the device is permanently online.
* @offline: Set after successful invocation of bus type's .offline().
@@ -449,6 +548,9 @@ struct dev_links_info {
* and optionall (if the coherent mask is large enough) also
* for dma allocations. This flag is managed by the dma ops
* instance from ->dma_supported.
+ * @dma_skip_sync: DMA sync operations can be skipped for coherent buffers.
+ * @dma_iommu: Device is using default IOMMU implementation for DMA and
+ * doesn't rely on dma_ops structure.
*
* At the lowest level, every device in a Linux system is represented by an
* instance of struct device. The device structure contains the information
@@ -467,16 +569,13 @@ struct device {
const char *init_name; /* initial name of the device */
const struct device_type *type;
- struct bus_type *bus; /* type of bus device is on */
+ const struct bus_type *bus; /* type of bus device is on */
struct device_driver *driver; /* which driver has allocated this
device */
void *platform_data; /* Platform specific data, device
core doesn't touch it */
void *driver_data; /* Driver data, set and get with
dev_set_drvdata/dev_get_drvdata */
-#ifdef CONFIG_PROVE_LOCKING
- struct mutex lockdep_mutex;
-#endif
struct mutex mutex; /* mutex to synchronize calls to
* its driver.
*/
@@ -489,16 +588,11 @@ struct device {
struct em_perf_domain *em_pd;
#endif
-#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
- struct irq_domain *msi_domain;
-#endif
#ifdef CONFIG_PINCTRL
struct dev_pin_info *pins;
#endif
-#ifdef CONFIG_GENERIC_MSI_IRQ
- struct list_head msi_list;
-#endif
-#ifdef CONFIG_DMA_OPS
+ struct dev_msi_info msi;
+#ifdef CONFIG_ARCH_HAS_DMA_OPS
const struct dma_map_ops *dma_ops;
#endif
u64 *dma_mask; /* dma mask (if dma'able device) */
@@ -522,6 +616,14 @@ struct device {
struct cma *cma_area; /* contiguous memory area for dma
allocations */
#endif
+#ifdef CONFIG_SWIOTLB
+ struct io_tlb_mem *dma_io_tlb_mem;
+#endif
+#ifdef CONFIG_SWIOTLB_DYNAMIC
+ struct list_head dma_io_tlb_pools;
+ spinlock_t dma_io_tlb_lock;
+ bool dma_uses_io_tlb;
+#endif
/* arch specific additions */
struct dev_archdata archdata;
@@ -537,13 +639,17 @@ struct device {
spinlock_t devres_lock;
struct list_head devres_head;
- struct class *class;
+ const struct class *class;
const struct attribute_group **groups; /* optional groups */
void (*release)(struct device *dev);
struct iommu_group *iommu_group;
struct dev_iommu *iommu;
+ struct device_physical_location *physical_location;
+
+ enum device_removable removable;
+
bool offline_disabled:1;
bool offline:1;
bool of_node_reused:1;
@@ -557,6 +663,12 @@ struct device {
#ifdef CONFIG_DMA_OPS_BYPASS
bool dma_ops_bypass : 1;
#endif
+#ifdef CONFIG_DMA_NEED_SYNC
+ bool dma_skip_sync:1;
+#endif
+#ifdef CONFIG_IOMMU_DMA
+ bool dma_iommu:1;
+#endif
};
/**
@@ -570,7 +682,7 @@ struct device {
* @flags: Link flags.
* @rpm_active: Whether or not the consumer device is runtime-PM-active.
* @kref: Count repeated addition of the same link.
- * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks.
+ * @rm_work: Work structure used for removing the link.
* @supplier_preactivated: Supplier has been made active before consumer probe.
*/
struct device_link {
@@ -583,16 +695,11 @@ struct device_link {
u32 flags;
refcount_t rpm_active;
struct kref kref;
-#ifdef CONFIG_SRCU
- struct rcu_head rcu_head;
-#endif
+ struct work_struct rm_work;
bool supplier_preactivated; /* Owned by consumer probe. */
};
-static inline struct device *kobj_to_dev(struct kobject *kobj)
-{
- return container_of(kobj, struct device, kobj);
-}
+#define kobj_to_dev(__kobj) container_of_const(__kobj, struct device, kobj)
/**
* device_iommu_mapped - Returns true when the device DMA is translated
@@ -607,6 +714,11 @@ static inline bool device_iommu_mapped(struct device *dev)
/* Get the wakeup routines, which depend on struct device */
#include <linux/pm_wakeup.h>
+/**
+ * dev_name - Return a device's name.
+ * @dev: Device with name to get.
+ * Return: The kobject name of the device, or its initial name if unavailable.
+ */
static inline const char *dev_name(const struct device *dev)
{
/* Use the init name until the kobject becomes available */
@@ -651,8 +763,8 @@ static inline void set_dev_node(struct device *dev, int node)
static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
{
-#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
- return dev->msi_domain;
+#ifdef CONFIG_GENERIC_MSI_IRQ
+ return dev->msi.domain;
#else
return NULL;
#endif
@@ -660,8 +772,8 @@ static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d)
{
-#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
- dev->msi_domain = d;
+#ifdef CONFIG_GENERIC_MSI_IRQ
+ dev->msi.domain = d;
#endif
}
@@ -720,6 +832,9 @@ static inline bool device_pm_not_required(struct device *dev)
static inline void device_set_pm_not_required(struct device *dev)
{
dev->power.no_pm = true;
+#ifdef CONFIG_PM
+ dev->power.no_callbacks = true;
+#endif
}
static inline void dev_pm_syscore_device(struct device *dev, bool val)
@@ -739,6 +854,42 @@ static inline bool dev_pm_test_driver_flags(struct device *dev, u32 flags)
return !!(dev->power.driver_flags & flags);
}
+static inline bool dev_pm_smart_suspend(struct device *dev)
+{
+#ifdef CONFIG_PM_SLEEP
+ return dev->power.smart_suspend;
+#else
+ return false;
+#endif
+}
+
+/*
+ * dev_pm_set_strict_midlayer - Update the device's power.strict_midlayer flag
+ * @dev: Target device.
+ * @val: New flag value.
+ *
+ * When set, power.strict_midlayer means that the middle layer power management
+ * code (typically, a bus type or a PM domain) does not expect its runtime PM
+ * suspend callback to be invoked at all during system-wide PM transitions and
+ * it does not expect its runtime PM resume callback to be invoked at any point
+ * when runtime PM is disabled for the device during system-wide PM transitions.
+ */
+static inline void dev_pm_set_strict_midlayer(struct device *dev, bool val)
+{
+#ifdef CONFIG_PM_SLEEP
+ dev->power.strict_midlayer = val;
+#endif
+}
+
+static inline bool dev_pm_strict_midlayer_is_set(struct device *dev)
+{
+#ifdef CONFIG_PM_SLEEP
+ return dev->power.strict_midlayer;
+#else
+ return false;
+#endif
+}
+
static inline void device_lock(struct device *dev)
{
mutex_lock(&dev->mutex);
@@ -759,18 +910,13 @@ static inline void device_unlock(struct device *dev)
mutex_unlock(&dev->mutex);
}
+DEFINE_GUARD(device, struct device *, device_lock(_T), device_unlock(_T))
+
static inline void device_lock_assert(struct device *dev)
{
lockdep_assert_held(&dev->mutex);
}
-static inline struct device_node *dev_of_node(struct device *dev)
-{
- if (!IS_ENABLED(CONFIG_OF) || !dev)
- return NULL;
- return dev->of_node;
-}
-
static inline bool dev_has_sync_state(struct device *dev)
{
if (!dev)
@@ -782,6 +928,34 @@ static inline bool dev_has_sync_state(struct device *dev)
return false;
}
+static inline int dev_set_drv_sync_state(struct device *dev,
+ void (*fn)(struct device *dev))
+{
+ if (!dev || !dev->driver)
+ return 0;
+ if (dev->driver->sync_state && dev->driver->sync_state != fn)
+ return -EBUSY;
+ if (!dev->driver->sync_state)
+ dev->driver->sync_state = fn;
+ return 0;
+}
+
+static inline void dev_set_removable(struct device *dev,
+ enum device_removable removable)
+{
+ dev->removable = removable;
+}
+
+static inline bool dev_is_removable(struct device *dev)
+{
+ return dev->removable == DEVICE_REMOVABLE;
+}
+
+static inline bool dev_removable_is_valid(struct device *dev)
+{
+ return dev->removable != DEVICE_REMOVABLE_NOT_SUPPORTED;
+}
+
/*
* High level routines for use by the bus drivers
*/
@@ -790,35 +964,121 @@ void device_unregister(struct device *dev);
void device_initialize(struct device *dev);
int __must_check device_add(struct device *dev);
void device_del(struct device *dev);
-int device_for_each_child(struct device *dev, void *data,
- int (*fn)(struct device *dev, void *data));
-int device_for_each_child_reverse(struct device *dev, void *data,
- int (*fn)(struct device *dev, void *data));
-struct device *device_find_child(struct device *dev, void *data,
- int (*match)(struct device *dev, void *data));
-struct device *device_find_child_by_name(struct device *parent,
- const char *name);
+
+DEFINE_FREE(device_del, struct device *, if (_T) device_del(_T))
+
+int device_for_each_child(struct device *parent, void *data,
+ device_iter_t fn);
+int device_for_each_child_reverse(struct device *parent, void *data,
+ device_iter_t fn);
+int device_for_each_child_reverse_from(struct device *parent,
+ struct device *from, void *data,
+ device_iter_t fn);
+struct device *device_find_child(struct device *parent, const void *data,
+ device_match_t match);
+/**
+ * device_find_child_by_name - device iterator for locating a child device.
+ * @parent: parent struct device
+ * @name: name of the child device
+ *
+ * This is similar to the device_find_child() function above, but it
+ * returns a reference to a device that has the name @name.
+ *
+ * NOTE: you will need to drop the reference with put_device() after use.
+ */
+static inline struct device *device_find_child_by_name(struct device *parent,
+ const char *name)
+{
+ return device_find_child(parent, name, device_match_name);
+}
+
+/**
+ * device_find_any_child - device iterator for locating a child device, if any.
+ * @parent: parent struct device
+ *
+ * This is similar to the device_find_child() function above, but it
+ * returns a reference to a child device, if any.
+ *
+ * NOTE: you will need to drop the reference with put_device() after use.
+ */
+static inline struct device *device_find_any_child(struct device *parent)
+{
+ return device_find_child(parent, NULL, device_match_any);
+}
+
int device_rename(struct device *dev, const char *new_name);
int device_move(struct device *dev, struct device *new_parent,
enum dpm_order dpm_order);
int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid);
-const char *device_get_devnode(struct device *dev, umode_t *mode, kuid_t *uid,
- kgid_t *gid, const char **tmp);
-int device_is_dependent(struct device *dev, void *target);
static inline bool device_supports_offline(struct device *dev)
{
return dev->bus && dev->bus->offline && dev->bus->online;
}
+#define __device_lock_set_class(dev, name, key) \
+do { \
+ struct device *__d2 __maybe_unused = dev; \
+ lock_set_class(&__d2->mutex.dep_map, name, key, 0, _THIS_IP_); \
+} while (0)
+
+/**
+ * device_lock_set_class - Specify a temporary lock class while a device
+ * is attached to a driver
+ * @dev: device to modify
+ * @key: lock class key data
+ *
+ * This must be called with the device_lock() already held, for example
+ * from driver ->probe(). Take care to only override the default
+ * lockdep_no_validate class.
+ */
+#ifdef CONFIG_LOCKDEP
+#define device_lock_set_class(dev, key) \
+do { \
+ struct device *__d = dev; \
+ dev_WARN_ONCE(__d, !lockdep_match_class(&__d->mutex, \
+ &__lockdep_no_validate__), \
+ "overriding existing custom lock class\n"); \
+ __device_lock_set_class(__d, #key, key); \
+} while (0)
+#else
+#define device_lock_set_class(dev, key) __device_lock_set_class(dev, #key, key)
+#endif
+
+/**
+ * device_lock_reset_class - Return a device to the default lockdep novalidate state
+ * @dev: device to modify
+ *
+ * This must be called with the device_lock() already held, for example
+ * from driver ->remove().
+ */
+#define device_lock_reset_class(dev) \
+do { \
+ struct device *__d __maybe_unused = dev; \
+ lock_set_novalidate_class(&__d->mutex.dep_map, "&dev->mutex", \
+ _THIS_IP_); \
+} while (0)
+
void lock_device_hotplug(void);
void unlock_device_hotplug(void);
int lock_device_hotplug_sysfs(void);
int device_offline(struct device *dev);
int device_online(struct device *dev);
+
void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
+void device_set_node(struct device *dev, struct fwnode_handle *fwnode);
+int device_add_of_node(struct device *dev, struct device_node *of_node);
+void device_remove_of_node(struct device *dev);
void device_set_of_node_from_dev(struct device *dev, const struct device *dev2);
+struct device *get_dev_from_fwnode(struct fwnode_handle *fwnode);
+
+static inline struct device_node *dev_of_node(struct device *dev)
+{
+ if (!IS_ENABLED(CONFIG_OF) || !dev)
+ return NULL;
+ return dev->of_node;
+}
static inline int dev_num_vf(struct device *dev)
{
@@ -847,10 +1107,12 @@ static inline void *dev_get_platdata(const struct device *dev)
* Manual binding of a device to driver. See drivers/base/bus.c
* for information on use.
*/
+int __must_check device_driver_attach(const struct device_driver *drv,
+ struct device *dev);
int __must_check device_bind_driver(struct device *dev);
void device_release_driver(struct device *dev);
int __must_check device_attach(struct device *dev);
-int __must_check driver_attach(struct device_driver *drv);
+int __must_check driver_attach(const struct device_driver *drv);
void device_initial_probe(struct device *dev);
int __must_check device_reprobe(struct device *dev);
@@ -860,13 +1122,13 @@ bool device_is_bound(struct device *dev);
* Easy functions for dynamically creating devices on the fly
*/
__printf(5, 6) struct device *
-device_create(struct class *cls, struct device *parent, dev_t devt,
+device_create(const struct class *cls, struct device *parent, dev_t devt,
void *drvdata, const char *fmt, ...);
__printf(6, 7) struct device *
-device_create_with_groups(struct class *cls, struct device *parent, dev_t devt,
+device_create_with_groups(const struct class *cls, struct device *parent, dev_t devt,
void *drvdata, const struct attribute_group **groups,
const char *fmt, ...);
-void device_destroy(struct class *cls, dev_t devt);
+void device_destroy(const struct class *cls, dev_t devt);
int __must_check device_add_groups(struct device *dev,
const struct attribute_group **groups);
@@ -886,28 +1148,11 @@ static inline void device_remove_group(struct device *dev,
{
const struct attribute_group *groups[] = { grp, NULL };
- return device_remove_groups(dev, groups);
+ device_remove_groups(dev, groups);
}
-int __must_check devm_device_add_groups(struct device *dev,
- const struct attribute_group **groups);
-void devm_device_remove_groups(struct device *dev,
- const struct attribute_group **groups);
int __must_check devm_device_add_group(struct device *dev,
const struct attribute_group *grp);
-void devm_device_remove_group(struct device *dev,
- const struct attribute_group *grp);
-
-/*
- * Platform "fixup" functions - allow the platform to have their say
- * about devices and actions that the general device layer doesn't
- * know about.
- */
-/* Notify platform of device discovery */
-extern int (*platform_notify)(struct device *dev);
-
-extern int (*platform_notify_remove)(struct device *dev);
-
/*
* get_device - atomically increment the reference count for the device.
@@ -915,6 +1160,9 @@ extern int (*platform_notify_remove)(struct device *dev);
*/
struct device *get_device(struct device *dev);
void put_device(struct device *dev);
+
+DEFINE_FREE(put_device, struct device *, if (_T) put_device(_T))
+
bool kill_device(struct device *dev);
#ifdef CONFIG_DEVTMPFS
@@ -936,9 +1184,12 @@ void device_link_del(struct device_link *link);
void device_link_remove(void *consumer, struct device *supplier);
void device_links_supplier_sync_state_pause(void);
void device_links_supplier_sync_state_resume(void);
+void device_link_wait_removal(void);
-extern __printf(3, 4)
-int dev_err_probe(const struct device *dev, int err, const char *fmt, ...);
+static inline bool device_link_test(const struct device_link *link, u32 flags)
+{
+ return !!(link->flags & flags);
+}
/* Create alias, so I can be autoloaded. */
#define MODULE_ALIAS_CHARDEV(major,minor) \
@@ -946,10 +1197,4 @@ int dev_err_probe(const struct device *dev, int err, const char *fmt, ...);
#define MODULE_ALIAS_CHARDEV_MAJOR(major) \
MODULE_ALIAS("char-major-" __stringify(major) "-*")
-#ifdef CONFIG_SYSFS_DEPRECATED
-extern long sysfs_deprecated;
-#else
-#define sysfs_deprecated 0
-#endif
-
#endif /* _DEVICE_H_ */
diff --git a/include/linux/device/bus.h b/include/linux/device/bus.h
index 1ea5e1d1545b..99b1002b3e31 100644
--- a/include/linux/device/bus.h
+++ b/include/linux/device/bus.h
@@ -26,7 +26,6 @@ struct fwnode_handle;
*
* @name: The name of the bus.
* @dev_name: Used for subsystems to enumerate devices like ("foo%u", dev->id).
- * @dev_root: Default device to use as the parent.
* @bus_groups: Default attributes of the bus.
* @dev_groups: Default attributes of the devices on the bus.
* @drv_groups: Default attributes of the device drivers on the bus.
@@ -49,6 +48,7 @@ struct fwnode_handle;
* will never get called until they do.
* @remove: Called when a device removed from this bus.
* @shutdown: Called at shut-down time to quiesce the device.
+ * @irq_get_affinity: Get IRQ affinity mask for the device on this bus.
*
* @online: Called to put the device back online (after offlining it).
* @offline: Called to put the device offline for hot-removal. May fail.
@@ -59,14 +59,10 @@ struct fwnode_handle;
* bus supports.
* @dma_configure: Called to setup DMA configuration on a device on
* this bus.
+ * @dma_cleanup: Called to cleanup DMA configuration on a device on
+ * this bus.
* @pm: Power management operations of this bus, callback the specific
* device driver's pm-ops.
- * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
- * driver implementations to a bus and allow the driver to do
- * bus-specific setup
- * @p: The private data of the driver core, only the driver core can
- * touch this.
- * @lock_key: Lock class key for use by the lock validator
* @need_parent_lock: When probing or removing a device on this bus, the
* device core should lock the device's parent.
*
@@ -82,17 +78,18 @@ struct fwnode_handle;
struct bus_type {
const char *name;
const char *dev_name;
- struct device *dev_root;
const struct attribute_group **bus_groups;
const struct attribute_group **dev_groups;
const struct attribute_group **drv_groups;
- int (*match)(struct device *dev, struct device_driver *drv);
- int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
+ int (*match)(struct device *dev, const struct device_driver *drv);
+ int (*uevent)(const struct device *dev, struct kobj_uevent_env *env);
int (*probe)(struct device *dev);
void (*sync_state)(struct device *dev);
- int (*remove)(struct device *dev);
+ void (*remove)(struct device *dev);
void (*shutdown)(struct device *dev);
+ const struct cpumask *(*irq_get_affinity)(struct device *dev,
+ unsigned int irq_vec);
int (*online)(struct device *dev);
int (*offline)(struct device *dev);
@@ -103,27 +100,23 @@ struct bus_type {
int (*num_vf)(struct device *dev);
int (*dma_configure)(struct device *dev);
+ void (*dma_cleanup)(struct device *dev);
const struct dev_pm_ops *pm;
- const struct iommu_ops *iommu_ops;
-
- struct subsys_private *p;
- struct lock_class_key lock_key;
-
bool need_parent_lock;
};
-extern int __must_check bus_register(struct bus_type *bus);
+int __must_check bus_register(const struct bus_type *bus);
-extern void bus_unregister(struct bus_type *bus);
+void bus_unregister(const struct bus_type *bus);
-extern int __must_check bus_rescan_devices(struct bus_type *bus);
+int __must_check bus_rescan_devices(const struct bus_type *bus);
struct bus_attribute {
struct attribute attr;
- ssize_t (*show)(struct bus_type *bus, char *buf);
- ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count);
+ ssize_t (*show)(const struct bus_type *bus, char *buf);
+ ssize_t (*store)(const struct bus_type *bus, const char *buf, size_t count);
};
#define BUS_ATTR_RW(_name) \
@@ -133,35 +126,33 @@ struct bus_attribute {
#define BUS_ATTR_WO(_name) \
struct bus_attribute bus_attr_##_name = __ATTR_WO(_name)
-extern int __must_check bus_create_file(struct bus_type *,
- struct bus_attribute *);
-extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
+int __must_check bus_create_file(const struct bus_type *bus, struct bus_attribute *attr);
+void bus_remove_file(const struct bus_type *bus, struct bus_attribute *attr);
+
+/* Matching function type for drivers/base APIs to find a specific device */
+typedef int (*device_match_t)(struct device *dev, const void *data);
/* Generic device matching functions that all busses can use to match with */
int device_match_name(struct device *dev, const void *name);
+int device_match_type(struct device *dev, const void *type);
int device_match_of_node(struct device *dev, const void *np);
int device_match_fwnode(struct device *dev, const void *fwnode);
int device_match_devt(struct device *dev, const void *pdevt);
int device_match_acpi_dev(struct device *dev, const void *adev);
+int device_match_acpi_handle(struct device *dev, const void *handle);
int device_match_any(struct device *dev, const void *unused);
+/* Device iterating function type for various driver core for_each APIs */
+typedef int (*device_iter_t)(struct device *dev, void *data);
+
/* iterator helpers for buses */
-struct subsys_dev_iter {
- struct klist_iter ki;
- const struct device_type *type;
-};
-void subsys_dev_iter_init(struct subsys_dev_iter *iter,
- struct bus_type *subsys,
- struct device *start,
- const struct device_type *type);
-struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter);
-void subsys_dev_iter_exit(struct subsys_dev_iter *iter);
-
-int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data,
- int (*fn)(struct device *dev, void *data));
-struct device *bus_find_device(struct bus_type *bus, struct device *start,
- const void *data,
- int (*match)(struct device *dev, const void *data));
+int bus_for_each_dev(const struct bus_type *bus, struct device *start,
+ void *data, device_iter_t fn);
+struct device *bus_find_device(const struct bus_type *bus, struct device *start,
+ const void *data, device_match_t match);
+struct device *bus_find_device_reverse(const struct bus_type *bus,
+ struct device *start, const void *data,
+ device_match_t match);
/**
* bus_find_device_by_name - device iterator for locating a particular device
* of a specific name.
@@ -169,7 +160,7 @@ struct device *bus_find_device(struct bus_type *bus, struct device *start,
* @start: Device to begin with
* @name: name of the device to match
*/
-static inline struct device *bus_find_device_by_name(struct bus_type *bus,
+static inline struct device *bus_find_device_by_name(const struct bus_type *bus,
struct device *start,
const char *name)
{
@@ -183,7 +174,7 @@ static inline struct device *bus_find_device_by_name(struct bus_type *bus,
* @np: of_node of the device to match.
*/
static inline struct device *
-bus_find_device_by_of_node(struct bus_type *bus, const struct device_node *np)
+bus_find_device_by_of_node(const struct bus_type *bus, const struct device_node *np)
{
return bus_find_device(bus, NULL, np, device_match_of_node);
}
@@ -195,7 +186,7 @@ bus_find_device_by_of_node(struct bus_type *bus, const struct device_node *np)
* @fwnode: fwnode of the device to match.
*/
static inline struct device *
-bus_find_device_by_fwnode(struct bus_type *bus, const struct fwnode_handle *fwnode)
+bus_find_device_by_fwnode(const struct bus_type *bus, const struct fwnode_handle *fwnode)
{
return bus_find_device(bus, NULL, fwnode, device_match_fwnode);
}
@@ -206,7 +197,7 @@ bus_find_device_by_fwnode(struct bus_type *bus, const struct fwnode_handle *fwno
* @bus: bus type
* @devt: device type of the device to match.
*/
-static inline struct device *bus_find_device_by_devt(struct bus_type *bus,
+static inline struct device *bus_find_device_by_devt(const struct bus_type *bus,
dev_t devt)
{
return bus_find_device(bus, NULL, &devt, device_match_devt);
@@ -219,7 +210,7 @@ static inline struct device *bus_find_device_by_devt(struct bus_type *bus,
* @cur: device to begin the search with.
*/
static inline struct device *
-bus_find_next_device(struct bus_type *bus,struct device *cur)
+bus_find_next_device(const struct bus_type *bus,struct device *cur)
{
return bus_find_device(bus, cur, NULL, device_match_any);
}
@@ -234,23 +225,21 @@ struct acpi_device;
* @adev: ACPI COMPANION device to match.
*/
static inline struct device *
-bus_find_device_by_acpi_dev(struct bus_type *bus, const struct acpi_device *adev)
+bus_find_device_by_acpi_dev(const struct bus_type *bus, const struct acpi_device *adev)
{
return bus_find_device(bus, NULL, adev, device_match_acpi_dev);
}
#else
static inline struct device *
-bus_find_device_by_acpi_dev(struct bus_type *bus, const void *adev)
+bus_find_device_by_acpi_dev(const struct bus_type *bus, const void *adev)
{
return NULL;
}
#endif
-struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id,
- struct device *hint);
-int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
+int bus_for_each_drv(const struct bus_type *bus, struct device_driver *start,
void *data, int (*fn)(struct device_driver *, void *));
-void bus_sort_breadthfirst(struct bus_type *bus,
+void bus_sort_breadthfirst(const struct bus_type *bus,
int (*compare)(const struct device *a,
const struct device *b));
/*
@@ -261,28 +250,40 @@ void bus_sort_breadthfirst(struct bus_type *bus,
*/
struct notifier_block;
-extern int bus_register_notifier(struct bus_type *bus,
- struct notifier_block *nb);
-extern int bus_unregister_notifier(struct bus_type *bus,
- struct notifier_block *nb);
+int bus_register_notifier(const struct bus_type *bus, struct notifier_block *nb);
+int bus_unregister_notifier(const struct bus_type *bus, struct notifier_block *nb);
-/* All 4 notifers below get called with the target struct device *
- * as an argument. Note that those functions are likely to be called
- * with the device lock held in the core, so be careful.
+/**
+ * enum bus_notifier_event - Bus Notifier events that have happened
+ * @BUS_NOTIFY_ADD_DEVICE: device is added to this bus
+ * @BUS_NOTIFY_DEL_DEVICE: device is about to be removed from this bus
+ * @BUS_NOTIFY_REMOVED_DEVICE: device is successfully removed from this bus
+ * @BUS_NOTIFY_BIND_DRIVER: a driver is about to be bound to this device on this bus
+ * @BUS_NOTIFY_BOUND_DRIVER: a driver is successfully bound to this device on this bus
+ * @BUS_NOTIFY_UNBIND_DRIVER: a driver is about to be unbound from this device on this bus
+ * @BUS_NOTIFY_UNBOUND_DRIVER: a driver is successfully unbound from this device on this bus
+ * @BUS_NOTIFY_DRIVER_NOT_BOUND: a driver failed to be bound to this device on this bus
+ *
+ * These are the value passed to a bus notifier when a specific event happens.
+ *
+ * Note that bus notifiers are likely to be called with the device lock already
+ * held by the driver core, so be careful in any notifier callback as to what
+ * you do with the device structure.
+ *
+ * All bus notifiers are called with the target struct device * as an argument.
*/
-#define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */
-#define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device to be removed */
-#define BUS_NOTIFY_REMOVED_DEVICE 0x00000003 /* device removed */
-#define BUS_NOTIFY_BIND_DRIVER 0x00000004 /* driver about to be
- bound */
-#define BUS_NOTIFY_BOUND_DRIVER 0x00000005 /* driver bound to device */
-#define BUS_NOTIFY_UNBIND_DRIVER 0x00000006 /* driver about to be
- unbound */
-#define BUS_NOTIFY_UNBOUND_DRIVER 0x00000007 /* driver is unbound
- from the device */
-#define BUS_NOTIFY_DRIVER_NOT_BOUND 0x00000008 /* driver fails to be bound */
-
-extern struct kset *bus_get_kset(struct bus_type *bus);
-extern struct klist *bus_get_device_klist(struct bus_type *bus);
+enum bus_notifier_event {
+ BUS_NOTIFY_ADD_DEVICE,
+ BUS_NOTIFY_DEL_DEVICE,
+ BUS_NOTIFY_REMOVED_DEVICE,
+ BUS_NOTIFY_BIND_DRIVER,
+ BUS_NOTIFY_BOUND_DRIVER,
+ BUS_NOTIFY_UNBIND_DRIVER,
+ BUS_NOTIFY_UNBOUND_DRIVER,
+ BUS_NOTIFY_DRIVER_NOT_BOUND,
+};
+
+struct kset *bus_get_kset(const struct bus_type *bus);
+struct device *bus_get_dev_root(const struct bus_type *bus);
#endif
diff --git a/include/linux/device/class.h b/include/linux/device/class.h
index e61ec5502019..65880e60c720 100644
--- a/include/linux/device/class.h
+++ b/include/linux/device/class.h
@@ -25,10 +25,8 @@ struct fwnode_handle;
/**
* struct class - device classes
* @name: Name of the class.
- * @owner: The module owner.
* @class_groups: Default attributes of this class.
* @dev_groups: Default attributes of the devices that belong to the class.
- * @dev_kobj: The kobject that represents this class and links it into the hierarchy.
* @dev_uevent: Called when a device is added, removed from this class, or a
* few other things that generate uevents to add the environment
* variables.
@@ -42,8 +40,6 @@ struct fwnode_handle;
* for the devices belonging to the class. Usually tied to
* device's namespace.
* @pm: The default device power management operations of this class.
- * @p: The private data of the driver core, no one other than the
- * driver core can touch this.
*
* A class is a higher-level view of a device that abstracts out low-level
* implementation details. Drivers may see a SCSI disk or an ATA disk, but,
@@ -53,70 +49,51 @@ struct fwnode_handle;
*/
struct class {
const char *name;
- struct module *owner;
const struct attribute_group **class_groups;
const struct attribute_group **dev_groups;
- struct kobject *dev_kobj;
- int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
- char *(*devnode)(struct device *dev, umode_t *mode);
+ int (*dev_uevent)(const struct device *dev, struct kobj_uevent_env *env);
+ char *(*devnode)(const struct device *dev, umode_t *mode);
- void (*class_release)(struct class *class);
+ void (*class_release)(const struct class *class);
void (*dev_release)(struct device *dev);
int (*shutdown_pre)(struct device *dev);
const struct kobj_ns_type_operations *ns_type;
- const void *(*namespace)(struct device *dev);
+ const void *(*namespace)(const struct device *dev);
- void (*get_ownership)(struct device *dev, kuid_t *uid, kgid_t *gid);
+ void (*get_ownership)(const struct device *dev, kuid_t *uid, kgid_t *gid);
const struct dev_pm_ops *pm;
-
- struct subsys_private *p;
};
struct class_dev_iter {
struct klist_iter ki;
const struct device_type *type;
+ struct subsys_private *sp;
};
-extern struct kobject *sysfs_dev_block_kobj;
-extern struct kobject *sysfs_dev_char_kobj;
-extern int __must_check __class_register(struct class *class,
- struct lock_class_key *key);
-extern void class_unregister(struct class *class);
-
-/* This is a #define to keep the compiler from merging different
- * instances of the __key variable */
-#define class_register(class) \
-({ \
- static struct lock_class_key __key; \
- __class_register(class, &__key); \
-})
+int __must_check class_register(const struct class *class);
+void class_unregister(const struct class *class);
+bool class_is_registered(const struct class *class);
struct class_compat;
struct class_compat *class_compat_register(const char *name);
void class_compat_unregister(struct class_compat *cls);
-int class_compat_create_link(struct class_compat *cls, struct device *dev,
- struct device *device_link);
-void class_compat_remove_link(struct class_compat *cls, struct device *dev,
- struct device *device_link);
-
-extern void class_dev_iter_init(struct class_dev_iter *iter,
- struct class *class,
- struct device *start,
- const struct device_type *type);
-extern struct device *class_dev_iter_next(struct class_dev_iter *iter);
-extern void class_dev_iter_exit(struct class_dev_iter *iter);
-
-extern int class_for_each_device(struct class *class, struct device *start,
- void *data,
- int (*fn)(struct device *dev, void *data));
-extern struct device *class_find_device(struct class *class,
- struct device *start, const void *data,
- int (*match)(struct device *, const void *));
+int class_compat_create_link(struct class_compat *cls, struct device *dev);
+void class_compat_remove_link(struct class_compat *cls, struct device *dev);
+
+void class_dev_iter_init(struct class_dev_iter *iter, const struct class *class,
+ const struct device *start, const struct device_type *type);
+struct device *class_dev_iter_next(struct class_dev_iter *iter);
+void class_dev_iter_exit(struct class_dev_iter *iter);
+
+int class_for_each_device(const struct class *class, const struct device *start,
+ void *data, device_iter_t fn);
+struct device *class_find_device(const struct class *class, const struct device *start,
+ const void *data, device_match_t match);
/**
* class_find_device_by_name - device iterator for locating a particular device
@@ -124,7 +101,7 @@ extern struct device *class_find_device(struct class *class,
* @class: class type
* @name: name of the device to match
*/
-static inline struct device *class_find_device_by_name(struct class *class,
+static inline struct device *class_find_device_by_name(const struct class *class,
const char *name)
{
return class_find_device(class, NULL, name, device_match_name);
@@ -136,8 +113,8 @@ static inline struct device *class_find_device_by_name(struct class *class,
* @class: class type
* @np: of_node of the device to match.
*/
-static inline struct device *
-class_find_device_by_of_node(struct class *class, const struct device_node *np)
+static inline struct device *class_find_device_by_of_node(const struct class *class,
+ const struct device_node *np)
{
return class_find_device(class, NULL, np, device_match_of_node);
}
@@ -148,9 +125,8 @@ class_find_device_by_of_node(struct class *class, const struct device_node *np)
* @class: class type
* @fwnode: fwnode of the device to match.
*/
-static inline struct device *
-class_find_device_by_fwnode(struct class *class,
- const struct fwnode_handle *fwnode)
+static inline struct device *class_find_device_by_fwnode(const struct class *class,
+ const struct fwnode_handle *fwnode)
{
return class_find_device(class, NULL, fwnode, device_match_fwnode);
}
@@ -161,7 +137,7 @@ class_find_device_by_fwnode(struct class *class,
* @class: class type
* @devt: device type of the device to match.
*/
-static inline struct device *class_find_device_by_devt(struct class *class,
+static inline struct device *class_find_device_by_devt(const struct class *class,
dev_t devt)
{
return class_find_device(class, NULL, &devt, device_match_devt);
@@ -175,14 +151,14 @@ struct acpi_device;
* @class: class type
* @adev: ACPI_COMPANION device to match.
*/
-static inline struct device *
-class_find_device_by_acpi_dev(struct class *class, const struct acpi_device *adev)
+static inline struct device *class_find_device_by_acpi_dev(const struct class *class,
+ const struct acpi_device *adev)
{
return class_find_device(class, NULL, adev, device_match_acpi_dev);
}
#else
-static inline struct device *
-class_find_device_by_acpi_dev(struct class *class, const void *adev)
+static inline struct device *class_find_device_by_acpi_dev(const struct class *class,
+ const void *adev)
{
return NULL;
}
@@ -190,10 +166,10 @@ class_find_device_by_acpi_dev(struct class *class, const void *adev)
struct class_attribute {
struct attribute attr;
- ssize_t (*show)(struct class *class, struct class_attribute *attr,
+ ssize_t (*show)(const struct class *class, const struct class_attribute *attr,
char *buf);
- ssize_t (*store)(struct class *class, struct class_attribute *attr,
- const char *buf, size_t count);
+ ssize_t (*store)(const struct class *class, const struct class_attribute *attr,
+ const char *buf, size_t count);
};
#define CLASS_ATTR_RW(_name) \
@@ -203,23 +179,21 @@ struct class_attribute {
#define CLASS_ATTR_WO(_name) \
struct class_attribute class_attr_##_name = __ATTR_WO(_name)
-extern int __must_check class_create_file_ns(struct class *class,
- const struct class_attribute *attr,
- const void *ns);
-extern void class_remove_file_ns(struct class *class,
- const struct class_attribute *attr,
- const void *ns);
+int __must_check class_create_file_ns(const struct class *class, const struct class_attribute *attr,
+ const void *ns);
+void class_remove_file_ns(const struct class *class, const struct class_attribute *attr,
+ const void *ns);
-static inline int __must_check class_create_file(struct class *class,
- const struct class_attribute *attr)
+static inline int __must_check class_create_file(const struct class *class,
+ const struct class_attribute *attr)
{
return class_create_file_ns(class, attr, NULL);
}
-static inline void class_remove_file(struct class *class,
+static inline void class_remove_file(const struct class *class,
const struct class_attribute *attr)
{
- return class_remove_file_ns(class, attr, NULL);
+ class_remove_file_ns(class, attr, NULL);
}
/* Simple class attribute that is just a static string */
@@ -235,46 +209,21 @@ struct class_attribute_string {
struct class_attribute_string class_attr_##_name = \
_CLASS_ATTR_STRING(_name, _mode, _str)
-extern ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr,
- char *buf);
+ssize_t show_class_attr_string(const struct class *class, const struct class_attribute *attr,
+ char *buf);
struct class_interface {
struct list_head node;
- struct class *class;
+ const struct class *class;
- int (*add_dev) (struct device *, struct class_interface *);
- void (*remove_dev) (struct device *, struct class_interface *);
+ int (*add_dev) (struct device *dev);
+ void (*remove_dev) (struct device *dev);
};
-extern int __must_check class_interface_register(struct class_interface *);
-extern void class_interface_unregister(struct class_interface *);
-
-extern struct class * __must_check __class_create(struct module *owner,
- const char *name,
- struct lock_class_key *key);
-extern void class_destroy(struct class *cls);
-
-/* This is a #define to keep the compiler from merging different
- * instances of the __key variable */
-
-/**
- * class_create - create a struct class structure
- * @owner: pointer to the module that is to "own" this struct class
- * @name: pointer to a string for the name of this class.
- *
- * This is used to create a struct class pointer that can then be used
- * in calls to device_create().
- *
- * Returns &struct class pointer on success, or ERR_PTR() on error.
- *
- * Note, the pointer created here is to be destroyed when finished by
- * making a call to class_destroy().
- */
-#define class_create(owner, name) \
-({ \
- static struct lock_class_key __key; \
- __class_create(owner, name, &__key); \
-})
+int __must_check class_interface_register(struct class_interface *);
+void class_interface_unregister(struct class_interface *);
+struct class * __must_check class_create(const char *name);
+void class_destroy(const struct class *cls);
#endif /* _DEVICE_CLASS_H_ */
diff --git a/include/linux/device/devres.h b/include/linux/device/devres.h
new file mode 100644
index 000000000000..9c1e3d643d69
--- /dev/null
+++ b/include/linux/device/devres.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _DEVICE_DEVRES_H_
+#define _DEVICE_DEVRES_H_
+
+#include <linux/err.h>
+#include <linux/gfp_types.h>
+#include <linux/numa.h>
+#include <linux/overflow.h>
+#include <linux/stdarg.h>
+#include <linux/types.h>
+#include <asm/bug.h>
+#include <asm/percpu.h>
+
+struct device;
+struct device_node;
+struct resource;
+
+/* device resource management */
+typedef void (*dr_release_t)(struct device *dev, void *res);
+typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
+
+void * __malloc
+__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid, const char *name);
+#define devres_alloc(release, size, gfp) \
+ __devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release)
+#define devres_alloc_node(release, size, gfp, nid) \
+ __devres_alloc_node(release, size, gfp, nid, #release)
+
+void devres_for_each_res(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data,
+ void (*fn)(struct device *, void *, void *),
+ void *data);
+void devres_free(void *res);
+void devres_add(struct device *dev, void *res);
+void *devres_find(struct device *dev, dr_release_t release, dr_match_t match, void *match_data);
+void *devres_get(struct device *dev, void *new_res, dr_match_t match, void *match_data);
+void *devres_remove(struct device *dev, dr_release_t release, dr_match_t match, void *match_data);
+int devres_destroy(struct device *dev, dr_release_t release, dr_match_t match, void *match_data);
+int devres_release(struct device *dev, dr_release_t release, dr_match_t match, void *match_data);
+
+/* devres group */
+void * __must_check devres_open_group(struct device *dev, void *id, gfp_t gfp);
+void devres_close_group(struct device *dev, void *id);
+void devres_remove_group(struct device *dev, void *id);
+int devres_release_group(struct device *dev, void *id);
+
+/* managed devm_k.alloc/kfree for device drivers */
+void * __alloc_size(2)
+devm_kmalloc(struct device *dev, size_t size, gfp_t gfp);
+void * __must_check __realloc_size(3)
+devm_krealloc(struct device *dev, void *ptr, size_t size, gfp_t gfp);
+static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
+{
+ return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
+}
+static inline void *devm_kmalloc_array(struct device *dev, size_t n, size_t size, gfp_t flags)
+{
+ size_t bytes;
+
+ if (unlikely(check_mul_overflow(n, size, &bytes)))
+ return NULL;
+
+ return devm_kmalloc(dev, bytes, flags);
+}
+static inline void *devm_kcalloc(struct device *dev, size_t n, size_t size, gfp_t flags)
+{
+ return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
+}
+static inline __realloc_size(3, 4) void * __must_check
+devm_krealloc_array(struct device *dev, void *p, size_t new_n, size_t new_size, gfp_t flags)
+{
+ size_t bytes;
+
+ if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
+ return NULL;
+
+ return devm_krealloc(dev, p, bytes, flags);
+}
+
+void devm_kfree(struct device *dev, const void *p);
+
+void * __realloc_size(3)
+devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp);
+const void *
+devm_kmemdup_const(struct device *dev, const void *src, size_t len, gfp_t gfp);
+static inline void *devm_kmemdup_array(struct device *dev, const void *src,
+ size_t n, size_t size, gfp_t flags)
+{
+ return devm_kmemdup(dev, src, size_mul(size, n), flags);
+}
+
+char * __malloc
+devm_kstrdup(struct device *dev, const char *s, gfp_t gfp);
+const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp);
+char * __printf(3, 0) __malloc
+devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap);
+char * __printf(3, 4) __malloc
+devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...);
+
+/**
+ * devm_alloc_percpu - Resource-managed alloc_percpu
+ * @dev: Device to allocate per-cpu memory for
+ * @type: Type to allocate per-cpu memory for
+ *
+ * Managed alloc_percpu. Per-cpu memory allocated with this function is
+ * automatically freed on driver detach.
+ *
+ * RETURNS:
+ * Pointer to allocated memory on success, NULL on failure.
+ */
+#define devm_alloc_percpu(dev, type) \
+ ((typeof(type) __percpu *)__devm_alloc_percpu((dev), sizeof(type), __alignof__(type)))
+
+void __percpu *__devm_alloc_percpu(struct device *dev, size_t size, size_t align);
+
+unsigned long devm_get_free_pages(struct device *dev, gfp_t gfp_mask, unsigned int order);
+void devm_free_pages(struct device *dev, unsigned long addr);
+
+#ifdef CONFIG_HAS_IOMEM
+
+void __iomem *devm_ioremap_resource(struct device *dev, const struct resource *res);
+void __iomem *devm_ioremap_resource_wc(struct device *dev, const struct resource *res);
+
+void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index,
+ resource_size_t *size);
+#else
+
+static inline
+void __iomem *devm_ioremap_resource(struct device *dev, const struct resource *res)
+{
+ return IOMEM_ERR_PTR(-EINVAL);
+}
+
+static inline
+void __iomem *devm_ioremap_resource_wc(struct device *dev, const struct resource *res)
+{
+ return IOMEM_ERR_PTR(-EINVAL);
+}
+
+static inline
+void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index,
+ resource_size_t *size)
+{
+ return IOMEM_ERR_PTR(-EINVAL);
+}
+
+#endif
+
+/* allows to add/remove a custom action to devres stack */
+int devm_remove_action_nowarn(struct device *dev, void (*action)(void *), void *data);
+
+/**
+ * devm_remove_action() - removes previously added custom action
+ * @dev: Device that owns the action
+ * @action: Function implementing the action
+ * @data: Pointer to data passed to @action implementation
+ *
+ * Removes instance of @action previously added by devm_add_action().
+ * Both action and data should match one of the existing entries.
+ */
+static inline
+void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
+{
+ WARN_ON(devm_remove_action_nowarn(dev, action, data));
+}
+
+void devm_release_action(struct device *dev, void (*action)(void *), void *data);
+
+int __devm_add_action(struct device *dev, void (*action)(void *), void *data, const char *name);
+#define devm_add_action(dev, action, data) \
+ __devm_add_action(dev, action, data, #action)
+
+static inline int __devm_add_action_or_reset(struct device *dev, void (*action)(void *),
+ void *data, const char *name)
+{
+ int ret;
+
+ ret = __devm_add_action(dev, action, data, name);
+ if (ret)
+ action(data);
+
+ return ret;
+}
+#define devm_add_action_or_reset(dev, action, data) \
+ __devm_add_action_or_reset(dev, action, data, #action)
+
+bool devm_is_action_added(struct device *dev, void (*action)(void *), void *data);
+
+#endif /* _DEVICE_DEVRES_H_ */
diff --git a/include/linux/device/driver.h b/include/linux/device/driver.h
index a498ebcf4993..cd8e0f0a634b 100644
--- a/include/linux/device/driver.h
+++ b/include/linux/device/driver.h
@@ -18,6 +18,7 @@
#include <linux/klist.h>
#include <linux/pm.h>
#include <linux/device/bus.h>
+#include <linux/module.h>
/**
* enum probe_type - device driver probe type to try
@@ -94,7 +95,7 @@ enum probe_type {
*/
struct device_driver {
const char *name;
- struct bus_type *bus;
+ const struct bus_type *bus;
struct module *owner;
const char *mod_name; /* used for built-in modules */
@@ -121,13 +122,13 @@ struct device_driver {
};
-extern int __must_check driver_register(struct device_driver *drv);
-extern void driver_unregister(struct device_driver *drv);
+int __must_check driver_register(struct device_driver *drv);
+void driver_unregister(struct device_driver *drv);
-extern struct device_driver *driver_find(const char *name,
- struct bus_type *bus);
-extern int driver_probe_done(void);
-extern void wait_for_device_probe(void);
+struct device_driver *driver_find(const char *name, const struct bus_type *bus);
+bool __init driver_probe_done(void);
+void wait_for_device_probe(void);
+void __init wait_for_init_devices_probe(void);
/* sysfs interface for exporting driver attributes */
@@ -145,19 +146,18 @@ struct driver_attribute {
#define DRIVER_ATTR_WO(_name) \
struct driver_attribute driver_attr_##_name = __ATTR_WO(_name)
-extern int __must_check driver_create_file(struct device_driver *driver,
- const struct driver_attribute *attr);
-extern void driver_remove_file(struct device_driver *driver,
- const struct driver_attribute *attr);
+int __must_check driver_create_file(const struct device_driver *driver,
+ const struct driver_attribute *attr);
+void driver_remove_file(const struct device_driver *driver,
+ const struct driver_attribute *attr);
-extern int __must_check driver_for_each_device(struct device_driver *drv,
- struct device *start,
- void *data,
- int (*fn)(struct device *dev,
- void *));
-struct device *driver_find_device(struct device_driver *drv,
+int driver_set_override(struct device *dev, const char **override,
+ const char *s, size_t len);
+int __must_check driver_for_each_device(struct device_driver *drv, struct device *start,
+ void *data, device_iter_t fn);
+struct device *driver_find_device(const struct device_driver *drv,
struct device *start, const void *data,
- int (*match)(struct device *dev, const void *data));
+ device_match_t match);
/**
* driver_find_device_by_name - device iterator for locating a particular device
@@ -165,7 +165,7 @@ struct device *driver_find_device(struct device_driver *drv,
* @drv: the driver we're iterating
* @name: name of the device to match
*/
-static inline struct device *driver_find_device_by_name(struct device_driver *drv,
+static inline struct device *driver_find_device_by_name(const struct device_driver *drv,
const char *name)
{
return driver_find_device(drv, NULL, name, device_match_name);
@@ -178,7 +178,7 @@ static inline struct device *driver_find_device_by_name(struct device_driver *dr
* @np: of_node pointer to match.
*/
static inline struct device *
-driver_find_device_by_of_node(struct device_driver *drv,
+driver_find_device_by_of_node(const struct device_driver *drv,
const struct device_node *np)
{
return driver_find_device(drv, NULL, np, device_match_of_node);
@@ -203,13 +203,13 @@ driver_find_device_by_fwnode(struct device_driver *drv,
* @drv: the driver we're iterating
* @devt: devt pointer to match.
*/
-static inline struct device *driver_find_device_by_devt(struct device_driver *drv,
+static inline struct device *driver_find_device_by_devt(const struct device_driver *drv,
dev_t devt)
{
return driver_find_device(drv, NULL, &devt, device_match_devt);
}
-static inline struct device *driver_find_next_device(struct device_driver *drv,
+static inline struct device *driver_find_next_device(const struct device_driver *drv,
struct device *start)
{
return driver_find_device(drv, start, NULL, device_match_any);
@@ -223,20 +223,19 @@ static inline struct device *driver_find_next_device(struct device_driver *drv,
* @adev: ACPI_COMPANION device to match.
*/
static inline struct device *
-driver_find_device_by_acpi_dev(struct device_driver *drv,
+driver_find_device_by_acpi_dev(const struct device_driver *drv,
const struct acpi_device *adev)
{
return driver_find_device(drv, NULL, adev, device_match_acpi_dev);
}
#else
static inline struct device *
-driver_find_device_by_acpi_dev(struct device_driver *drv, const void *adev)
+driver_find_device_by_acpi_dev(const struct device_driver *drv, const void *adev)
{
return NULL;
}
#endif
-extern int driver_deferred_probe_timeout;
void driver_deferred_probe_add(struct device *dev);
int driver_deferred_probe_check_state(struct device *dev);
void driver_init(void);
diff --git a/include/linux/device/faux.h b/include/linux/device/faux.h
new file mode 100644
index 000000000000..9f43c0e46aa4
--- /dev/null
+++ b/include/linux/device/faux.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2025 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (c) 2025 The Linux Foundation
+ *
+ * A "simple" faux bus that allows devices to be created and added
+ * automatically to it. This is to be used whenever you need to create a
+ * device that is not associated with any "real" system resources, and do
+ * not want to have to deal with a bus/driver binding logic. It is
+ * intended to be very simple, with only a create and a destroy function
+ * available.
+ */
+#ifndef _FAUX_DEVICE_H_
+#define _FAUX_DEVICE_H_
+
+#include <linux/container_of.h>
+#include <linux/device.h>
+
+/**
+ * struct faux_device - a "faux" device
+ * @dev: internal struct device of the object
+ *
+ * A simple faux device that can be created/destroyed. To be used when a
+ * driver only needs to have a device to "hang" something off. This can be
+ * used for downloading firmware or other basic tasks. Use this instead of
+ * a struct platform_device if the device has no resources assigned to
+ * it at all.
+ */
+struct faux_device {
+ struct device dev;
+};
+#define to_faux_device(x) container_of_const((x), struct faux_device, dev)
+
+/**
+ * struct faux_device_ops - a set of callbacks for a struct faux_device
+ * @probe: called when a faux device is probed by the driver core
+ * before the device is fully bound to the internal faux bus
+ * code. If probe succeeds, return 0, otherwise return a
+ * negative error number to stop the probe sequence from
+ * succeeding.
+ * @remove: called when a faux device is removed from the system
+ *
+ * Both @probe and @remove are optional, if not needed, set to NULL.
+ */
+struct faux_device_ops {
+ int (*probe)(struct faux_device *faux_dev);
+ void (*remove)(struct faux_device *faux_dev);
+};
+
+struct faux_device *faux_device_create(const char *name,
+ struct device *parent,
+ const struct faux_device_ops *faux_ops);
+struct faux_device *faux_device_create_with_groups(const char *name,
+ struct device *parent,
+ const struct faux_device_ops *faux_ops,
+ const struct attribute_group **groups);
+void faux_device_destroy(struct faux_device *faux_dev);
+
+static inline void *faux_device_get_drvdata(const struct faux_device *faux_dev)
+{
+ return dev_get_drvdata(&faux_dev->dev);
+}
+
+static inline void faux_device_set_drvdata(struct faux_device *faux_dev, void *data)
+{
+ dev_set_drvdata(&faux_dev->dev, data);
+}
+
+#endif /* _FAUX_DEVICE_H_ */
diff --git a/include/linux/device_cgroup.h b/include/linux/device_cgroup.h
index d02f32b7514e..0864773a57e8 100644
--- a/include/linux/device_cgroup.h
+++ b/include/linux/device_cgroup.h
@@ -18,15 +18,16 @@ static inline int devcgroup_inode_permission(struct inode *inode, int mask)
{
short type, access = 0;
+ if (likely(!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode)))
+ return 0;
+
if (likely(!inode->i_rdev))
return 0;
if (S_ISBLK(inode->i_mode))
type = DEVCG_DEV_BLOCK;
- else if (S_ISCHR(inode->i_mode))
+ else /* S_ISCHR by the test above */
type = DEVCG_DEV_CHAR;
- else
- return 0;
if (mask & MAY_WRITE)
access |= DEVCG_ACC_WRITE;
diff --git a/include/linux/devm-helpers.h b/include/linux/devm-helpers.h
index f40f77717a24..708ca9131402 100644
--- a/include/linux/devm-helpers.h
+++ b/include/linux/devm-helpers.h
@@ -41,7 +41,7 @@ static inline void devm_delayed_work_drop(void *res)
* detached. A few drivers need delayed work which must be cancelled before
* driver is detached to avoid accessing removed resources.
* devm_delayed_work_autocancel() can be used to omit the explicit
- * cancelleation when driver is detached.
+ * cancellation when driver is detached.
*/
static inline int devm_delayed_work_autocancel(struct device *dev,
struct delayed_work *w,
@@ -51,4 +51,29 @@ static inline int devm_delayed_work_autocancel(struct device *dev,
return devm_add_action(dev, devm_delayed_work_drop, w);
}
+static inline void devm_work_drop(void *res)
+{
+ cancel_work_sync(res);
+}
+
+/**
+ * devm_work_autocancel - Resource-managed work allocation
+ * @dev: Device which lifetime work is bound to
+ * @w: Work to be added (and automatically cancelled)
+ * @worker: Worker function
+ *
+ * Initialize work which is automatically cancelled when driver is detached.
+ * A few drivers need to queue work which must be cancelled before driver
+ * is detached to avoid accessing removed resources.
+ * devm_work_autocancel() can be used to omit the explicit
+ * cancellation when driver is detached.
+ */
+static inline int devm_work_autocancel(struct device *dev,
+ struct work_struct *w,
+ work_func_t worker)
+{
+ INIT_WORK(w, worker);
+ return devm_add_action(dev, devm_work_drop, w);
+}
+
#endif
diff --git a/include/linux/dfl.h b/include/linux/dfl.h
index 6cc10982351a..1f02db0c1897 100644
--- a/include/linux/dfl.h
+++ b/include/linux/dfl.h
@@ -27,22 +27,30 @@ enum dfl_id_type {
* @id: id of the dfl device.
* @type: type of DFL FIU of the device. See enum dfl_id_type.
* @feature_id: feature identifier local to its DFL FIU type.
+ * @revision: revision of this dfl device feature.
* @mmio_res: mmio resource of this dfl device.
* @irqs: list of Linux IRQ numbers of this dfl device.
* @num_irqs: number of IRQs supported by this dfl device.
* @cdev: pointer to DFL FPGA container device this dfl device belongs to.
* @id_entry: matched id entry in dfl driver's id table.
+ * @dfh_version: version of DFH for the device
+ * @param_size: size of the block parameters in bytes
+ * @params: pointer to block of parameters copied memory
*/
struct dfl_device {
struct device dev;
int id;
u16 type;
u16 feature_id;
+ u8 revision;
struct resource mmio_res;
int *irqs;
unsigned int num_irqs;
struct dfl_fpga_cdev *cdev;
const struct dfl_device_id *id_entry;
+ u8 dfh_version;
+ unsigned int param_size;
+ void *params;
};
/**
@@ -63,7 +71,7 @@ struct dfl_driver {
};
#define to_dfl_dev(d) container_of(d, struct dfl_device, dev)
-#define to_dfl_drv(d) container_of(d, struct dfl_driver, drv)
+#define to_dfl_drv(d) container_of_const(d, struct dfl_driver, drv)
/*
* use a macro to avoid include chaining to get THIS_MODULE.
@@ -83,4 +91,5 @@ void dfl_driver_unregister(struct dfl_driver *dfl_drv);
module_driver(__dfl_driver, dfl_driver_register, \
dfl_driver_unregister)
+void *dfh_find_param(struct dfl_device *dfl_dev, int param_id, size_t *pcount);
#endif /* __LINUX_DFL_H */
diff --git a/include/linux/dibs.h b/include/linux/dibs.h
new file mode 100644
index 000000000000..c75607f8a5cf
--- /dev/null
+++ b/include/linux/dibs.h
@@ -0,0 +1,464 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Direct Internal Buffer Sharing
+ *
+ * Definitions for the DIBS module
+ *
+ * Copyright IBM Corp. 2025
+ */
+#ifndef _DIBS_H
+#define _DIBS_H
+
+#include <linux/device.h>
+#include <linux/uuid.h>
+
+/* DIBS - Direct Internal Buffer Sharing - concept
+ * -----------------------------------------------
+ * In the case of multiple system sharing the same hardware, dibs fabrics can
+ * provide dibs devices to these systems. The systems use dibs devices of the
+ * same fabric to communicate via dmbs (Direct Memory Buffers). Each dmb has
+ * exactly one owning local dibs device and one remote using dibs device, that
+ * is authorized to write into this dmb. This access control is provided by the
+ * dibs fabric.
+ *
+ * Because the access to the dmb is based on access to physical memory, it is
+ * lossless and synchronous. The remote devices can directly access any offset
+ * of the dmb.
+ *
+ * Dibs fabrics, dibs devices and dmbs are identified by tokens and ids.
+ * Dibs fabric id is unique within the same hardware (with the exception of the
+ * dibs loopback fabric), dmb token is unique within the same fabric, dibs
+ * device gids are guaranteed to be unique within the same fabric and
+ * statistically likely to be globally unique. The exchange of these tokens and
+ * ids between the systems is not part of the dibs concept.
+ *
+ * The dibs layer provides an abstraction between dibs device drivers and dibs
+ * clients.
+ */
+
+/* DMB - Direct Memory Buffer
+ * --------------------------
+ * A dibs client provides a dmb as input buffer for a local receiving
+ * dibs device for exactly one (remote) sending dibs device. Only this
+ * sending device can send data into this dmb using move_data(). Sender
+ * and receiver can be the same device. A dmb belongs to exactly one client.
+ */
+struct dibs_dmb {
+ /* tok - Token for this dmb
+ * Used by remote and local devices and clients to address this dmb.
+ * Provided by dibs fabric. Unique per dibs fabric.
+ */
+ u64 dmb_tok;
+ /* rgid - GID of designated remote sending device */
+ uuid_t rgid;
+ /* cpu_addr - buffer address */
+ void *cpu_addr;
+ /* len - buffer length */
+ u32 dmb_len;
+ /* idx - Index of this DMB on this receiving device */
+ u32 idx;
+ /* VLAN support (deprecated)
+ * In order to write into a vlan-tagged dmb, the remote device needs
+ * to belong to the this vlan
+ */
+ u32 vlan_valid;
+ u32 vlan_id;
+ /* optional, used by device driver */
+ dma_addr_t dma_addr;
+};
+
+/* DIBS events
+ * -----------
+ * Dibs devices can optionally notify dibs clients about events that happened
+ * in the fabric or at the remote device or remote dmb.
+ */
+enum dibs_event_type {
+ /* Buffer event, e.g. a remote dmb was unregistered */
+ DIBS_BUF_EVENT,
+ /* Device event, e.g. a remote dibs device was disabled */
+ DIBS_DEV_EVENT,
+ /* Software event, a dibs client can send an event signal to a
+ * remote dibs device.
+ */
+ DIBS_SW_EVENT,
+ DIBS_OTHER_TYPE };
+
+enum dibs_event_subtype {
+ DIBS_BUF_UNREGISTERED,
+ DIBS_DEV_DISABLED,
+ DIBS_DEV_ERR_STATE,
+ DIBS_OTHER_SUBTYPE
+};
+
+struct dibs_event {
+ u32 type;
+ u32 subtype;
+ /* uuid_null if invalid */
+ uuid_t gid;
+ /* zero if invalid */
+ u64 buffer_tok;
+ u64 time;
+ /* additional data or zero */
+ u64 data;
+};
+
+struct dibs_dev;
+
+/* DIBS client
+ * -----------
+ */
+#define MAX_DIBS_CLIENTS 8
+#define NO_DIBS_CLIENT 0xff
+/* All dibs clients have access to all dibs devices.
+ * A dibs client provides the following functions to be called by dibs layer or
+ * dibs device drivers:
+ */
+struct dibs_client_ops {
+ /**
+ * add_dev() - add a dibs device
+ * @dev: device that was added
+ *
+ * Will be called during dibs_register_client() for all existing
+ * dibs devices and whenever a new dibs device is registered.
+ * dev is usable until dibs_client.remove() is called.
+ * *dev is protected by device refcounting.
+ */
+ void (*add_dev)(struct dibs_dev *dev);
+ /**
+ * del_dev() - remove a dibs device
+ * @dev: device to be removed
+ *
+ * Will be called whenever a dibs device is removed.
+ * Will be called during dibs_unregister_client() for all existing
+ * dibs devices and whenever a dibs device is unregistered.
+ * The device has already stopped initiative for this client:
+ * No new handlers will be started.
+ * The device is no longer usable by this client after this call.
+ */
+ void (*del_dev)(struct dibs_dev *dev);
+ /**
+ * handle_irq() - Handle signaling for a DMB
+ * @dev: device that owns the dmb
+ * @idx: Index of the dmb that got signalled
+ * @dmbemask: signaling mask of the dmb
+ *
+ * Handle signaling for a dmb that was registered by this client
+ * for this device.
+ * The dibs device can coalesce multiple signaling triggers into a
+ * single call of handle_irq(). dmbemask can be used to indicate
+ * different kinds of triggers.
+ *
+ * Context: Called in IRQ context by dibs device driver
+ */
+ void (*handle_irq)(struct dibs_dev *dev, unsigned int idx,
+ u16 dmbemask);
+ /**
+ * handle_event() - Handle control information sent by device
+ * @dev: device reporting the event
+ * @event: ism event structure
+ *
+ * * Context: Called in IRQ context by dibs device driver
+ */
+ void (*handle_event)(struct dibs_dev *dev,
+ const struct dibs_event *event);
+};
+
+struct dibs_client {
+ /* client name for logging and debugging purposes */
+ const char *name;
+ const struct dibs_client_ops *ops;
+ /* client index - provided and used by dibs layer */
+ u8 id;
+};
+
+/* Functions to be called by dibs clients:
+ */
+/**
+ * dibs_register_client() - register a client with dibs layer
+ * @client: this client
+ *
+ * Will call client->ops->add_dev() for all existing dibs devices.
+ * Return: zero on success.
+ */
+int dibs_register_client(struct dibs_client *client);
+/**
+ * dibs_unregister_client() - unregister a client with dibs layer
+ * @client: this client
+ *
+ * Will call client->ops->del_dev() for all existing dibs devices.
+ * Return: zero on success.
+ */
+int dibs_unregister_client(struct dibs_client *client);
+
+/* dibs clients can call dibs device ops. */
+
+/* DIBS devices
+ * ------------
+ */
+
+/* Defined fabric id / CHID for all loopback devices:
+ * All dibs loopback devices report this fabric id. In this case devices with
+ * the same fabric id can NOT communicate via dibs. Only loopback devices with
+ * the same dibs device gid can communicate (=same device with itself).
+ */
+#define DIBS_LOOPBACK_FABRIC 0xFFFF
+
+/* A dibs device provides the following functions to be called by dibs clients.
+ * They are mandatory, unless marked 'optional'.
+ */
+struct dibs_dev_ops {
+ /**
+ * get_fabric_id()
+ * @dev: local dibs device
+ *
+ * Only devices on the same dibs fabric can communicate. Fabric_id is
+ * unique inside the same HW system. Use fabric_id for fast negative
+ * checks, but only query_remote_gid() can give a reliable positive
+ * answer:
+ * Different fabric_id: dibs is not possible
+ * Same fabric_id: dibs may be possible or not
+ * (e.g. different HW systems)
+ * EXCEPTION: DIBS_LOOPBACK_FABRIC denotes an ism_loopback device
+ * that can only communicate with itself. Use dibs_dev.gid
+ * or query_remote_gid()to determine whether sender and
+ * receiver use the same ism_loopback device.
+ * Return: 2 byte dibs fabric id
+ */
+ u16 (*get_fabric_id)(struct dibs_dev *dev);
+ /**
+ * query_remote_gid()
+ * @dev: local dibs device
+ * @rgid: gid of remote dibs device
+ * @vid_valid: if zero, vid will be ignored;
+ * deprecated, ignored if device does not support vlan
+ * @vid: VLAN id; deprecated, ignored if device does not support vlan
+ *
+ * Query whether a remote dibs device is reachable via this local device
+ * and this vlan id.
+ * Return: 0 if remote gid is reachable.
+ */
+ int (*query_remote_gid)(struct dibs_dev *dev, const uuid_t *rgid,
+ u32 vid_valid, u32 vid);
+ /**
+ * max_dmbs()
+ * Return: Max number of DMBs that can be registered for this kind of
+ * dibs_dev
+ */
+ int (*max_dmbs)(void);
+ /**
+ * register_dmb() - allocate and register a dmb
+ * @dev: dibs device
+ * @dmb: dmb struct to be registered
+ * @client: dibs client
+ * @vid: VLAN id; deprecated, ignored if device does not support vlan
+ *
+ * The following fields of dmb must provide valid input:
+ * @rgid: gid of remote user device
+ * @dmb_len: buffer length
+ * @idx: Optionally:requested idx (if non-zero)
+ * @vlan_valid: if zero, vlan_id will be ignored;
+ * deprecated, ignored if device does not support vlan
+ * @vlan_id: deprecated, ignored if device does not support vlan
+ * Upon return in addition the following fields will be valid:
+ * @dmb_tok: for usage by remote and local devices and clients
+ * @cpu_addr: allocated buffer
+ * @idx: dmb index, unique per dibs device
+ * @dma_addr: to be used by device driver,if applicable
+ *
+ * Allocate a dmb buffer and register it with this device and for this
+ * client.
+ * Return: zero on success
+ */
+ int (*register_dmb)(struct dibs_dev *dev, struct dibs_dmb *dmb,
+ struct dibs_client *client);
+ /**
+ * unregister_dmb() - unregister and free a dmb
+ * @dev: dibs device
+ * @dmb: dmb struct to be unregistered
+ * The following fields of dmb must provide valid input:
+ * @dmb_tok
+ * @cpu_addr
+ * @idx
+ *
+ * Free dmb.cpu_addr and unregister the dmb from this device.
+ * Return: zero on success
+ */
+ int (*unregister_dmb)(struct dibs_dev *dev, struct dibs_dmb *dmb);
+ /**
+ * move_data() - write into a remote dmb
+ * @dev: Local sending dibs device
+ * @dmb_tok: Token of the remote dmb
+ * @idx: signaling index in dmbemask
+ * @sf: signaling flag;
+ * if true, idx will be turned on at target dmbemask mask
+ * and target device will be signaled.
+ * @offset: offset within target dmb
+ * @data: pointer to data to be sent
+ * @size: length of data to be sent, can be zero.
+ *
+ * Use dev to write data of size at offset into a remote dmb
+ * identified by dmb_tok. Data is moved synchronously, *data can
+ * be freed when this function returns.
+ *
+ * If signaling flag (sf) is true, bit number idx bit will be turned
+ * on in the dmbemask mask when handle_irq() is called at the remote
+ * dibs client that owns the target dmb. The target device may chose
+ * to coalesce the signaling triggers of multiple move_data() calls
+ * to the same target dmb into a single handle_irq() call.
+ * Return: zero on success
+ */
+ int (*move_data)(struct dibs_dev *dev, u64 dmb_tok, unsigned int idx,
+ bool sf, unsigned int offset, void *data,
+ unsigned int size);
+ /**
+ * add_vlan_id() - add dibs device to vlan (optional, deprecated)
+ * @dev: dibs device
+ * @vlan_id: vlan id
+ *
+ * In order to write into a vlan-tagged dmb, the remote device needs
+ * to belong to the this vlan. A device can belong to more than 1 vlan.
+ * Any device can access an untagged dmb.
+ * Deprecated, only supported for backwards compatibility.
+ * Return: zero on success
+ */
+ int (*add_vlan_id)(struct dibs_dev *dev, u64 vlan_id);
+ /**
+ * del_vlan_id() - remove dibs device from vlan (optional, deprecated)
+ * @dev: dibs device
+ * @vlan_id: vlan id
+ * Return: zero on success
+ */
+ int (*del_vlan_id)(struct dibs_dev *dev, u64 vlan_id);
+ /**
+ * signal_event() - trigger an event at a remote dibs device (optional)
+ * @dev: local dibs device
+ * @rgid: gid of remote dibs device
+ * trigger_irq: zero: notification may be coalesced with other events
+ * non-zero: notify immediately
+ * @subtype: 4 byte event code, meaning is defined by dibs client
+ * @data: 8 bytes of additional information,
+ * meaning is defined by dibs client
+ *
+ * dibs devices can offer support for sending a control event of type
+ * EVENT_SWR to a remote dibs device.
+ * NOTE: handle_event() will be called for all registered dibs clients
+ * at the remote device.
+ * Return: zero on success
+ */
+ int (*signal_event)(struct dibs_dev *dev, const uuid_t *rgid,
+ u32 trigger_irq, u32 event_code, u64 info);
+ /**
+ * support_mmapped_rdmb() - can this device provide memory mapped
+ * remote dmbs? (optional)
+ * @dev: dibs device
+ *
+ * A dibs device can provide a kernel address + length, that represent
+ * a remote target dmb (like MMIO). Alternatively to calling
+ * move_data(), a dibs client can write into such a ghost-send-buffer
+ * (= to this kernel address) and the data will automatically
+ * immediately appear in the target dmb, even without calling
+ * move_data().
+ *
+ * Either all 3 function pointers for support_dmb_nocopy(),
+ * attach_dmb() and detach_dmb() are defined, or all of them must
+ * be NULL.
+ *
+ * Return: non-zero, if memory mapped remote dmbs are supported.
+ */
+ int (*support_mmapped_rdmb)(struct dibs_dev *dev);
+ /**
+ * attach_dmb() - attach local memory to a remote dmb
+ * @dev: Local sending ism device
+ * @dmb: all other parameters are passed in the form of a
+ * dmb struct
+ * TODO: (THIS IS CONFUSING, should be changed)
+ * dmb_tok: (in) Token of the remote dmb, we want to attach to
+ * cpu_addr: (out) MMIO address
+ * dma_addr: (out) MMIO address (if applicable, invalid otherwise)
+ * dmb_len: (out) length of local MMIO region,
+ * equal to length of remote DMB.
+ * sba_idx: (out) index of remote dmb (NOT HELPFUL, should be removed)
+ *
+ * Provides a memory address to the sender that can be used to
+ * directly write into the remote dmb.
+ * Memory is available until detach_dmb is called
+ *
+ * Return: Zero upon success, Error code otherwise
+ */
+ int (*attach_dmb)(struct dibs_dev *dev, struct dibs_dmb *dmb);
+ /**
+ * detach_dmb() - Detach the ghost buffer from a remote dmb
+ * @dev: ism device
+ * @token: dmb token of the remote dmb
+ *
+ * No need to free cpu_addr.
+ *
+ * Return: Zero upon success, Error code otherwise
+ */
+ int (*detach_dmb)(struct dibs_dev *dev, u64 token);
+};
+
+struct dibs_dev {
+ struct list_head list;
+ struct device dev;
+ /* To be filled by device driver, before calling dibs_dev_add(): */
+ const struct dibs_dev_ops *ops;
+ uuid_t gid;
+ /* priv pointer for device driver */
+ void *drv_priv;
+
+ /* priv pointer per client; for client usage only */
+ void *priv[MAX_DIBS_CLIENTS];
+
+ /* get this lock before accessing any of the fields below */
+ spinlock_t lock;
+ /* array of client ids indexed by dmb idx;
+ * can be used as indices into priv and subs arrays
+ */
+ u8 *dmb_clientid_arr;
+ /* Sparse array of all ISM clients */
+ struct dibs_client *subs[MAX_DIBS_CLIENTS];
+};
+
+static inline void dibs_set_priv(struct dibs_dev *dev,
+ struct dibs_client *client, void *priv)
+{
+ dev->priv[client->id] = priv;
+}
+
+static inline void *dibs_get_priv(struct dibs_dev *dev,
+ struct dibs_client *client)
+{
+ return dev->priv[client->id];
+}
+
+/* ------- End of client-only functions ----------- */
+
+/* Functions to be called by dibs device drivers:
+ */
+/**
+ * dibs_dev_alloc() - allocate and reference device structure
+ *
+ * The following fields will be valid upon successful return: dev
+ * NOTE: Use put_device(dibs_get_dev(@dibs)) to give up your reference instead
+ * of freeing @dibs @dev directly once you have successfully called this
+ * function.
+ * Return: Pointer to dibs device structure
+ */
+struct dibs_dev *dibs_dev_alloc(void);
+/**
+ * dibs_dev_add() - register with dibs layer and all clients
+ * @dibs: dibs device
+ *
+ * The following fields must be valid upon entry: dev, ops, drv_priv
+ * All fields will be valid upon successful return.
+ * Return: zero on success
+ */
+int dibs_dev_add(struct dibs_dev *dibs);
+/**
+ * dibs_dev_del() - unregister from dibs layer and all clients
+ * @dibs: dibs device
+ */
+void dibs_dev_del(struct dibs_dev *dibs);
+
+#endif /* _DIBS_H */
diff --git a/include/linux/dim.h b/include/linux/dim.h
index b698266d0035..06543fd40fcc 100644
--- a/include/linux/dim.h
+++ b/include/linux/dim.h
@@ -10,6 +10,15 @@
#include <linux/types.h>
#include <linux/workqueue.h>
+struct net_device;
+
+/* Number of DIM profiles and period mode. */
+#define NET_DIM_PARAMS_NUM_PROFILES 5
+#define NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE 256
+#define NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE 128
+#define NET_DIM_DEF_PROFILE_CQE 1
+#define NET_DIM_DEF_PROFILE_EQE 1
+
/*
* Number of events between DIM iterations.
* Causes a moderation of the algorithm run.
@@ -21,7 +30,7 @@
* We consider 10% difference as significant.
*/
#define IS_SIGNIFICANT_DIFF(val, ref) \
- (((100UL * abs((val) - (ref))) / (ref)) > 10)
+ ((ref) && (((100UL * abs((val) - (ref))) / (ref)) > 10))
/*
* Calculate the gap between two values.
@@ -38,12 +47,45 @@
* @pkts: CQ packet counter suggestion (by DIM)
* @comps: Completion counter
* @cq_period_mode: CQ period count mode (from CQE/EQE)
+ * @rcu: for asynchronous kfree_rcu
*/
struct dim_cq_moder {
u16 usec;
u16 pkts;
u16 comps;
u8 cq_period_mode;
+ struct rcu_head rcu;
+};
+
+#define DIM_PROFILE_RX BIT(0) /* support rx profile modification */
+#define DIM_PROFILE_TX BIT(1) /* support tx profile modification */
+
+#define DIM_COALESCE_USEC BIT(0) /* support usec field modification */
+#define DIM_COALESCE_PKTS BIT(1) /* support pkts field modification */
+#define DIM_COALESCE_COMPS BIT(2) /* support comps field modification */
+
+/**
+ * struct dim_irq_moder - Structure for irq moderation information.
+ * Used to collect irq moderation related information.
+ *
+ * @profile_flags: DIM_PROFILE_*
+ * @coal_flags: DIM_COALESCE_* for Rx and Tx
+ * @dim_rx_mode: Rx DIM period count mode: CQE or EQE
+ * @dim_tx_mode: Tx DIM period count mode: CQE or EQE
+ * @rx_profile: DIM profile list for Rx
+ * @tx_profile: DIM profile list for Tx
+ * @rx_dim_work: Rx DIM worker scheduled by net_dim()
+ * @tx_dim_work: Tx DIM worker scheduled by net_dim()
+ */
+struct dim_irq_moder {
+ u8 profile_flags;
+ u8 coal_flags;
+ u8 dim_rx_mode;
+ u8 dim_tx_mode;
+ struct dim_cq_moder __rcu *rx_profile;
+ struct dim_cq_moder __rcu *tx_profile;
+ void (*rx_dim_work)(struct work_struct *work);
+ void (*tx_dim_work)(struct work_struct *work);
};
/**
@@ -192,6 +234,77 @@ enum dim_step_result {
};
/**
+ * net_dim_init_irq_moder - collect information to initialize irq moderation
+ * @dev: target network device
+ * @profile_flags: Rx or Tx profile modification capability
+ * @coal_flags: irq moderation params flags
+ * @rx_mode: CQ period mode for Rx
+ * @tx_mode: CQ period mode for Tx
+ * @rx_dim_work: Rx worker called after dim decision
+ * @tx_dim_work: Tx worker called after dim decision
+ *
+ * Return: 0 on success or a negative error code.
+ */
+int net_dim_init_irq_moder(struct net_device *dev, u8 profile_flags,
+ u8 coal_flags, u8 rx_mode, u8 tx_mode,
+ void (*rx_dim_work)(struct work_struct *work),
+ void (*tx_dim_work)(struct work_struct *work));
+
+/**
+ * net_dim_free_irq_moder - free fields for irq moderation
+ * @dev: target network device
+ */
+void net_dim_free_irq_moder(struct net_device *dev);
+
+/**
+ * net_dim_setting - initialize DIM's cq mode and schedule worker
+ * @dev: target network device
+ * @dim: DIM context
+ * @is_tx: true indicates the tx direction, false indicates the rx direction
+ */
+void net_dim_setting(struct net_device *dev, struct dim *dim, bool is_tx);
+
+/**
+ * net_dim_work_cancel - synchronously cancel dim's worker
+ * @dim: DIM context
+ */
+void net_dim_work_cancel(struct dim *dim);
+
+/**
+ * net_dim_get_rx_irq_moder - get DIM rx results based on profile_ix
+ * @dev: target network device
+ * @dim: DIM context
+ *
+ * Return: DIM irq moderation
+ */
+struct dim_cq_moder
+net_dim_get_rx_irq_moder(struct net_device *dev, struct dim *dim);
+
+/**
+ * net_dim_get_tx_irq_moder - get DIM tx results based on profile_ix
+ * @dev: target network device
+ * @dim: DIM context
+ *
+ * Return: DIM irq moderation
+ */
+struct dim_cq_moder
+net_dim_get_tx_irq_moder(struct net_device *dev, struct dim *dim);
+
+/**
+ * net_dim_set_rx_mode - set DIM rx cq mode
+ * @dev: target network device
+ * @rx_mode: target rx cq mode
+ */
+void net_dim_set_rx_mode(struct net_device *dev, u8 rx_mode);
+
+/**
+ * net_dim_set_tx_mode - set DIM tx cq mode
+ * @dev: target network device
+ * @tx_mode: target tx cq mode
+ */
+void net_dim_set_tx_mode(struct net_device *dev, u8 tx_mode);
+
+/**
* dim_on_top - check if current state is a good place to stop (top location)
* @dim: DIM context
*
@@ -236,8 +349,10 @@ void dim_park_tired(struct dim *dim);
*
* Calculate the delta between two samples (in data rates).
* Takes into consideration counter wrap-around.
+ * Returned boolean indicates whether curr_stats are reliable.
*/
-void dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
+bool dim_calc_stats(const struct dim_sample *start,
+ const struct dim_sample *end,
struct dim_stats *curr_stats);
/**
@@ -310,7 +425,7 @@ struct dim_cq_moder net_dim_get_def_tx_moderation(u8 cq_period_mode);
* This is the main logic of the algorithm, where data is processed in order
* to decide on next required action.
*/
-void net_dim(struct dim *dim, struct dim_sample end_sample);
+void net_dim(struct dim *dim, const struct dim_sample *end_sample);
/* RDMA DIM */
diff --git a/include/linux/dio.h b/include/linux/dio.h
index 5abd07361eb5..464331c4c4a7 100644
--- a/include/linux/dio.h
+++ b/include/linux/dio.h
@@ -68,7 +68,7 @@ struct dio_bus {
};
extern struct dio_bus dio_bus; /* Single DIO bus */
-extern struct bus_type dio_bus_type;
+extern const struct bus_type dio_bus_type;
/*
* DIO device IDs
@@ -93,7 +93,7 @@ struct dio_driver {
struct device_driver driver;
};
-#define to_dio_driver(drv) container_of(drv, struct dio_driver, driver)
+#define to_dio_driver(drv) container_of_const(drv, struct dio_driver, driver)
/* DIO/DIO-II boards all have the following 8bit registers.
* These are offsets from the base of the device.
diff --git a/include/linux/dlm.h b/include/linux/dlm.h
index ff951e9f6f20..7e7b45b0d097 100644
--- a/include/linux/dlm.h
+++ b/include/linux/dlm.h
@@ -35,6 +35,9 @@ struct dlm_lockspace_ops {
int num_slots, int our_slot, uint32_t generation);
};
+/* only relevant for kernel lockspaces, will be removed in future */
+#define DLM_LSFL_SOFTIRQ __DLM_LSFL_RESERVED0
+
/*
* dlm_new_lockspace
*
@@ -53,14 +56,13 @@ struct dlm_lockspace_ops {
* The dlm should not use a resource directory, but statically assign
* resource mastery to nodes based on the name hash that is otherwise
* used to select the directory node. Must be the same on all nodes.
- * DLM_LSFL_TIMEWARN
- * The dlm should emit netlink messages if locks have been waiting
- * for a configurable amount of time. (Unused.)
- * DLM_LSFL_FS
- * The lockspace user is in the kernel (i.e. filesystem). Enables
- * direct bast/cast callbacks.
* DLM_LSFL_NEWEXCL
* dlm_new_lockspace() should return -EEXIST if the lockspace exists.
+ * DLM_LSFL_SOFTIRQ
+ * dlm request callbacks (ast, bast) are softirq safe. Flag should be
+ * preferred by users. Will be default in some future. If set the
+ * strongest context for ast, bast callback is softirq as it avoids
+ * an additional context switch.
*
* lvblen: length of lvb in bytes. Must be multiple of 8.
* dlm_new_lockspace() returns an error if this does not match
@@ -86,12 +88,43 @@ int dlm_new_lockspace(const char *name, const char *cluster,
int *ops_result, dlm_lockspace_t **lockspace);
/*
+ * dlm_release_lockspace() release_option values:
+ *
+ * DLM_RELEASE_NO_LOCKS returns -EBUSY if any locks (lkb's)
+ * exist in the local lockspace.
+ *
+ * DLM_RELEASE_UNUSED previous value that is no longer used.
+ *
+ * DLM_RELEASE_NORMAL releases the lockspace regardless of any
+ * locks managed in the local lockspace.
+ *
+ * DLM_RELEASE_NO_EVENT release the lockspace regardless of any
+ * locks managed in the local lockspace, and does not submit
+ * a leave event to the cluster manager, so other nodes will
+ * not be notified that the node should be removed from the
+ * list of lockspace members.
+ *
+ * DLM_RELEASE_RECOVER like DLM_RELEASE_NORMAL, but the remaining
+ * nodes will handle the removal of the node as if the node
+ * had failed, e.g. the recover_slot() callback would be used.
+ */
+#define DLM_RELEASE_NO_LOCKS 0
+#define DLM_RELEASE_UNUSED 1
+#define DLM_RELEASE_NORMAL 2
+#define DLM_RELEASE_NO_EVENT 3
+#define DLM_RELEASE_RECOVER 4
+#define __DLM_RELEASE_MAX DLM_RELEASE_RECOVER
+
+/*
* dlm_release_lockspace
*
* Stop a lockspace.
+ *
+ * release_option: see DLM_RELEASE values above.
*/
-int dlm_release_lockspace(dlm_lockspace_t *lockspace, int force);
+int dlm_release_lockspace(dlm_lockspace_t *lockspace,
+ unsigned int release_option);
/*
* dlm_lock
@@ -127,14 +160,21 @@ int dlm_release_lockspace(dlm_lockspace_t *lockspace, int force);
* call.
*
* AST routines should not block (at least not for long), but may make
- * any locking calls they please.
+ * any locking calls they please. If DLM_LSFL_SOFTIRQ for kernel
+ * users of dlm_new_lockspace() is passed the ast and bast callbacks
+ * can be processed in softirq context. Also some of the callback
+ * contexts are in the same context as the DLM lock request API, users
+ * must not hold locks while calling dlm lock request API and trying
+ * to acquire this lock in the callback again, this will end in a
+ * lock recursion. For newer implementation the DLM_LSFL_SOFTIRQ
+ * should be used.
*/
int dlm_lock(dlm_lockspace_t *lockspace,
int mode,
struct dlm_lksb *lksb,
uint32_t flags,
- void *name,
+ const void *name,
unsigned int namelen,
uint32_t parent_lkid,
void (*lockast) (void *astarg),
diff --git a/include/linux/dlm_plock.h b/include/linux/dlm_plock.h
index e6d76e8715a6..15fc856d198c 100644
--- a/include/linux/dlm_plock.h
+++ b/include/linux/dlm_plock.h
@@ -11,6 +11,8 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
int cmd, struct file_lock *fl);
int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
struct file_lock *fl);
+int dlm_posix_cancel(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+ struct file_lock *fl);
int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file,
struct file_lock *fl);
#endif
diff --git a/include/linux/dm-bufio.h b/include/linux/dm-bufio.h
index 90bd558a17f5..d1503b815a78 100644
--- a/include/linux/dm-bufio.h
+++ b/include/linux/dm-bufio.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2009-2011 Red Hat, Inc.
*
@@ -18,19 +19,27 @@ struct dm_bufio_client;
struct dm_buffer;
/*
+ * Flags for dm_bufio_client_create
+ */
+#define DM_BUFIO_CLIENT_NO_SLEEP 0x1
+
+/*
* Create a buffered IO cache on a given device
*/
struct dm_bufio_client *
-dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
- unsigned reserved_buffers, unsigned aux_size,
+dm_bufio_client_create(struct block_device *bdev, unsigned int block_size,
+ unsigned int reserved_buffers, unsigned int aux_size,
void (*alloc_callback)(struct dm_buffer *),
- void (*write_callback)(struct dm_buffer *));
+ void (*write_callback)(struct dm_buffer *),
+ unsigned int flags);
/*
* Release a buffered IO cache.
*/
void dm_bufio_client_destroy(struct dm_bufio_client *c);
+void dm_bufio_client_reset(struct dm_bufio_client *c);
+
/*
* Set the sector range.
* When this function is called, there must be no I/O in progress on the bufio
@@ -55,6 +64,9 @@ void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start);
void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
struct dm_buffer **bp);
+void *dm_bufio_read_with_ioprio(struct dm_bufio_client *c, sector_t block,
+ struct dm_buffer **bp, unsigned short ioprio);
+
/*
* Like dm_bufio_read, but return buffer from cache, don't read
* it. If the buffer is not in the cache, return NULL.
@@ -75,7 +87,11 @@ void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
* I/O to finish.
*/
void dm_bufio_prefetch(struct dm_bufio_client *c,
- sector_t block, unsigned n_blocks);
+ sector_t block, unsigned int n_blocks);
+
+void dm_bufio_prefetch_with_ioprio(struct dm_bufio_client *c,
+ sector_t block, unsigned int n_blocks,
+ unsigned short ioprio);
/*
* Release a reference obtained with dm_bufio_{read,get,new}. The data
@@ -100,7 +116,7 @@ void dm_bufio_mark_buffer_dirty(struct dm_buffer *b);
* write the specified part of the buffer or it may write a larger superset.
*/
void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
- unsigned start, unsigned end);
+ unsigned int start, unsigned int end);
/*
* Initiate writing of dirty buffers, without waiting for completion.
@@ -124,12 +140,6 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c);
int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count);
/*
- * Like dm_bufio_release but also move the buffer to the new
- * block. dm_bufio_write_dirty_buffers is needed to commit the new block.
- */
-void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block);
-
-/*
* Free the given buffer.
* This is just a hint, if the buffer is in use or dirty, this function
* does nothing.
@@ -146,9 +156,9 @@ void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t
/*
* Set the minimum number of buffers before cleanup happens.
*/
-void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n);
+void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned int n);
-unsigned dm_bufio_get_block_size(struct dm_bufio_client *c);
+unsigned int dm_bufio_get_block_size(struct dm_bufio_client *c);
sector_t dm_bufio_get_device_size(struct dm_bufio_client *c);
struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c);
sector_t dm_bufio_get_block_number(struct dm_buffer *b);
diff --git a/include/linux/dm-dirty-log.h b/include/linux/dm-dirty-log.h
index 7084503c3405..0b10faedb26a 100644
--- a/include/linux/dm-dirty-log.h
+++ b/include/linux/dm-dirty-log.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2003 Sistina Software
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
@@ -33,7 +34,7 @@ struct dm_dirty_log_type {
struct list_head list;
int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti,
- unsigned argc, char **argv);
+ unsigned int argc, char **argv);
void (*dtr)(struct dm_dirty_log *log);
/*
@@ -96,7 +97,7 @@ struct dm_dirty_log_type {
* Do not confuse this function with 'in_sync()', one
* tells you if an area is synchronised, the other
* assigns recovery work.
- */
+ */
int (*get_resync_work)(struct dm_dirty_log *log, region_t *region);
/*
@@ -116,7 +117,7 @@ struct dm_dirty_log_type {
* Support function for mirror status requests.
*/
int (*status)(struct dm_dirty_log *log, status_type_t status_type,
- char *result, unsigned maxlen);
+ char *result, unsigned int maxlen);
/*
* is_remote_recovering is necessary for cluster mirroring. It provides
@@ -139,7 +140,7 @@ int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type);
struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
struct dm_target *ti,
int (*flush_callback_fn)(struct dm_target *ti),
- unsigned argc, char **argv);
+ unsigned int argc, char **argv);
void dm_dirty_log_destroy(struct dm_dirty_log *log);
#endif /* __KERNEL__ */
diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h
index a52c6580cc9a..7b2968612b7e 100644
--- a/include/linux/dm-io.h
+++ b/include/linux/dm-io.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2003 Sistina Software
* Copyright (C) 2004 - 2008 Red Hat, Inc. All rights reserved.
@@ -13,6 +14,7 @@
#ifdef __KERNEL__
#include <linux/types.h>
+#include <linux/blk_types.h>
struct dm_io_region {
struct block_device *bdev;
@@ -25,7 +27,7 @@ struct page_list {
struct page *page;
};
-typedef void (*io_notify_fn)(unsigned long error, void *context);
+typedef void (*io_notify_fn)(unsigned int long error, void *context);
enum dm_io_mem_type {
DM_IO_PAGE_LIST,/* Page list */
@@ -37,7 +39,7 @@ enum dm_io_mem_type {
struct dm_io_memory {
enum dm_io_mem_type type;
- unsigned offset;
+ unsigned int offset;
union {
struct page_list *pl;
@@ -57,8 +59,7 @@ struct dm_io_notify {
*/
struct dm_io_client;
struct dm_io_request {
- int bi_op; /* REQ_OP */
- int bi_op_flags; /* req_flag_bits */
+ blk_opf_t bi_opf; /* Request type and flags */
struct dm_io_memory mem; /* Memory to use for io */
struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */
struct dm_io_client *client; /* Client memory handler */
@@ -78,8 +79,9 @@ void dm_io_client_destroy(struct dm_io_client *client);
* Each bit in the optional 'sync_error_bits' bitset indicates whether an
* error occurred doing io to the corresponding region.
*/
-int dm_io(struct dm_io_request *io_req, unsigned num_regions,
- struct dm_io_region *region, unsigned long *sync_error_bits);
+int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
+ struct dm_io_region *region, unsigned int long *sync_error_bits,
+ unsigned short ioprio);
#endif /* __KERNEL__ */
#endif /* _LINUX_DM_IO_H */
diff --git a/include/linux/dm-kcopyd.h b/include/linux/dm-kcopyd.h
index e42de7750c88..51fb1af0b63e 100644
--- a/include/linux/dm-kcopyd.h
+++ b/include/linux/dm-kcopyd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2001 - 2003 Sistina Software
* Copyright (C) 2004 - 2008 Red Hat, Inc. All rights reserved.
@@ -23,11 +24,11 @@
#define DM_KCOPYD_WRITE_SEQ 2
struct dm_kcopyd_throttle {
- unsigned throttle;
- unsigned num_io_jobs;
- unsigned io_period;
- unsigned total_period;
- unsigned last_jiffies;
+ unsigned int throttle;
+ unsigned int num_io_jobs;
+ unsigned int io_period;
+ unsigned int total_period;
+ unsigned int last_jiffies;
};
/*
@@ -51,6 +52,7 @@ MODULE_PARM_DESC(name, description)
struct dm_kcopyd_client;
struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle);
void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc);
+void dm_kcopyd_client_flush(struct dm_kcopyd_client *kc);
/*
* Submit a copy job to kcopyd. This is built on top of the
@@ -59,12 +61,12 @@ void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc);
* read_err is a boolean,
* write_err is a bitset, with 1 bit for each destination region
*/
-typedef void (*dm_kcopyd_notify_fn)(int read_err, unsigned long write_err,
+typedef void (*dm_kcopyd_notify_fn)(int read_err, unsigned int long write_err,
void *context);
void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
- unsigned num_dests, struct dm_io_region *dests,
- unsigned flags, dm_kcopyd_notify_fn fn, void *context);
+ unsigned int num_dests, struct dm_io_region *dests,
+ unsigned int flags, dm_kcopyd_notify_fn fn, void *context);
/*
* Prepare a callback and submit it via the kcopyd thread.
@@ -79,11 +81,11 @@ void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
*/
void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc,
dm_kcopyd_notify_fn fn, void *context);
-void dm_kcopyd_do_callback(void *job, int read_err, unsigned long write_err);
+void dm_kcopyd_do_callback(void *job, int read_err, unsigned int long write_err);
void dm_kcopyd_zero(struct dm_kcopyd_client *kc,
- unsigned num_dests, struct dm_io_region *dests,
- unsigned flags, dm_kcopyd_notify_fn fn, void *context);
+ unsigned int num_dests, struct dm_io_region *dests,
+ unsigned int flags, dm_kcopyd_notify_fn fn, void *context);
#endif /* __KERNEL__ */
#endif /* _LINUX_DM_KCOPYD_H */
diff --git a/include/linux/dm-region-hash.h b/include/linux/dm-region-hash.h
index 9e2a7a401df5..3079ed93dd2d 100644
--- a/include/linux/dm-region-hash.h
+++ b/include/linux/dm-region-hash.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2003 Sistina Software Limited.
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
@@ -12,9 +13,11 @@
#include <linux/dm-dirty-log.h>
-/*-----------------------------------------------------------------
+/*
+ *----------------------------------------------------------------
* Region hash
- *----------------------------------------------------------------*/
+ *----------------------------------------------------------------
+ */
struct dm_region_hash;
struct dm_region;
@@ -37,7 +40,7 @@ struct dm_region_hash *dm_region_hash_create(
struct bio_list *bios),
void (*wakeup_workers)(void *context),
void (*wakeup_all_recovery_waiters)(void *context),
- sector_t target_begin, unsigned max_recovery,
+ sector_t target_begin, unsigned int max_recovery,
struct dm_dirty_log *log, uint32_t region_size,
region_t nr_regions);
void dm_region_hash_destroy(struct dm_region_hash *rh);
diff --git a/include/linux/dm-verity-loadpin.h b/include/linux/dm-verity-loadpin.h
new file mode 100644
index 000000000000..3ac6dbaeaa37
--- /dev/null
+++ b/include/linux/dm-verity-loadpin.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_DM_VERITY_LOADPIN_H
+#define __LINUX_DM_VERITY_LOADPIN_H
+
+#include <linux/list.h>
+
+struct block_device;
+
+extern struct list_head dm_verity_loadpin_trusted_root_digests;
+
+struct dm_verity_loadpin_trusted_root_digest {
+ struct list_head node;
+ unsigned int len;
+ u8 data[] __counted_by(len);
+};
+
+#if IS_ENABLED(CONFIG_SECURITY_LOADPIN_VERITY)
+bool dm_verity_loadpin_is_bdev_trusted(struct block_device *bdev);
+#else
+static inline bool dm_verity_loadpin_is_bdev_trusted(struct block_device *bdev)
+{
+ return false;
+}
+#endif
+
+#endif /* __LINUX_DM_VERITY_LOADPIN_H */
diff --git a/include/linux/dma-buf-map.h b/include/linux/dma-buf-map.h
deleted file mode 100644
index 278d489e4bdd..000000000000
--- a/include/linux/dma-buf-map.h
+++ /dev/null
@@ -1,266 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Pointer to dma-buf-mapped memory, plus helpers.
- */
-
-#ifndef __DMA_BUF_MAP_H__
-#define __DMA_BUF_MAP_H__
-
-#include <linux/io.h>
-#include <linux/string.h>
-
-/**
- * DOC: overview
- *
- * Calling dma-buf's vmap operation returns a pointer to the buffer's memory.
- * Depending on the location of the buffer, users may have to access it with
- * I/O operations or memory load/store operations. For example, copying to
- * system memory could be done with memcpy(), copying to I/O memory would be
- * done with memcpy_toio().
- *
- * .. code-block:: c
- *
- * void *vaddr = ...; // pointer to system memory
- * memcpy(vaddr, src, len);
- *
- * void *vaddr_iomem = ...; // pointer to I/O memory
- * memcpy_toio(vaddr, _iomem, src, len);
- *
- * When using dma-buf's vmap operation, the returned pointer is encoded as
- * :c:type:`struct dma_buf_map <dma_buf_map>`.
- * :c:type:`struct dma_buf_map <dma_buf_map>` stores the buffer's address in
- * system or I/O memory and a flag that signals the required method of
- * accessing the buffer. Use the returned instance and the helper functions
- * to access the buffer's memory in the correct way.
- *
- * The type :c:type:`struct dma_buf_map <dma_buf_map>` and its helpers are
- * actually independent from the dma-buf infrastructure. When sharing buffers
- * among devices, drivers have to know the location of the memory to access
- * the buffers in a safe way. :c:type:`struct dma_buf_map <dma_buf_map>`
- * solves this problem for dma-buf and its users. If other drivers or
- * sub-systems require similar functionality, the type could be generalized
- * and moved to a more prominent header file.
- *
- * Open-coding access to :c:type:`struct dma_buf_map <dma_buf_map>` is
- * considered bad style. Rather then accessing its fields directly, use one
- * of the provided helper functions, or implement your own. For example,
- * instances of :c:type:`struct dma_buf_map <dma_buf_map>` can be initialized
- * statically with DMA_BUF_MAP_INIT_VADDR(), or at runtime with
- * dma_buf_map_set_vaddr(). These helpers will set an address in system memory.
- *
- * .. code-block:: c
- *
- * struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(0xdeadbeaf);
- *
- * dma_buf_map_set_vaddr(&map. 0xdeadbeaf);
- *
- * To set an address in I/O memory, use dma_buf_map_set_vaddr_iomem().
- *
- * .. code-block:: c
- *
- * dma_buf_map_set_vaddr_iomem(&map. 0xdeadbeaf);
- *
- * Instances of struct dma_buf_map do not have to be cleaned up, but
- * can be cleared to NULL with dma_buf_map_clear(). Cleared mappings
- * always refer to system memory.
- *
- * .. code-block:: c
- *
- * dma_buf_map_clear(&map);
- *
- * Test if a mapping is valid with either dma_buf_map_is_set() or
- * dma_buf_map_is_null().
- *
- * .. code-block:: c
- *
- * if (dma_buf_map_is_set(&map) != dma_buf_map_is_null(&map))
- * // always true
- *
- * Instances of :c:type:`struct dma_buf_map <dma_buf_map>` can be compared
- * for equality with dma_buf_map_is_equal(). Mappings the point to different
- * memory spaces, system or I/O, are never equal. That's even true if both
- * spaces are located in the same address space, both mappings contain the
- * same address value, or both mappings refer to NULL.
- *
- * .. code-block:: c
- *
- * struct dma_buf_map sys_map; // refers to system memory
- * struct dma_buf_map io_map; // refers to I/O memory
- *
- * if (dma_buf_map_is_equal(&sys_map, &io_map))
- * // always false
- *
- * A set up instance of struct dma_buf_map can be used to access or manipulate
- * the buffer memory. Depending on the location of the memory, the provided
- * helpers will pick the correct operations. Data can be copied into the memory
- * with dma_buf_map_memcpy_to(). The address can be manipulated with
- * dma_buf_map_incr().
- *
- * .. code-block:: c
- *
- * const void *src = ...; // source buffer
- * size_t len = ...; // length of src
- *
- * dma_buf_map_memcpy_to(&map, src, len);
- * dma_buf_map_incr(&map, len); // go to first byte after the memcpy
- */
-
-/**
- * struct dma_buf_map - Pointer to vmap'ed dma-buf memory.
- * @vaddr_iomem: The buffer's address if in I/O memory
- * @vaddr: The buffer's address if in system memory
- * @is_iomem: True if the dma-buf memory is located in I/O
- * memory, or false otherwise.
- */
-struct dma_buf_map {
- union {
- void __iomem *vaddr_iomem;
- void *vaddr;
- };
- bool is_iomem;
-};
-
-/**
- * DMA_BUF_MAP_INIT_VADDR - Initializes struct dma_buf_map to an address in system memory
- * @vaddr_: A system-memory address
- */
-#define DMA_BUF_MAP_INIT_VADDR(vaddr_) \
- { \
- .vaddr = (vaddr_), \
- .is_iomem = false, \
- }
-
-/**
- * dma_buf_map_set_vaddr - Sets a dma-buf mapping structure to an address in system memory
- * @map: The dma-buf mapping structure
- * @vaddr: A system-memory address
- *
- * Sets the address and clears the I/O-memory flag.
- */
-static inline void dma_buf_map_set_vaddr(struct dma_buf_map *map, void *vaddr)
-{
- map->vaddr = vaddr;
- map->is_iomem = false;
-}
-
-/**
- * dma_buf_map_set_vaddr_iomem - Sets a dma-buf mapping structure to an address in I/O memory
- * @map: The dma-buf mapping structure
- * @vaddr_iomem: An I/O-memory address
- *
- * Sets the address and the I/O-memory flag.
- */
-static inline void dma_buf_map_set_vaddr_iomem(struct dma_buf_map *map,
- void __iomem *vaddr_iomem)
-{
- map->vaddr_iomem = vaddr_iomem;
- map->is_iomem = true;
-}
-
-/**
- * dma_buf_map_is_equal - Compares two dma-buf mapping structures for equality
- * @lhs: The dma-buf mapping structure
- * @rhs: A dma-buf mapping structure to compare with
- *
- * Two dma-buf mapping structures are equal if they both refer to the same type of memory
- * and to the same address within that memory.
- *
- * Returns:
- * True is both structures are equal, or false otherwise.
- */
-static inline bool dma_buf_map_is_equal(const struct dma_buf_map *lhs,
- const struct dma_buf_map *rhs)
-{
- if (lhs->is_iomem != rhs->is_iomem)
- return false;
- else if (lhs->is_iomem)
- return lhs->vaddr_iomem == rhs->vaddr_iomem;
- else
- return lhs->vaddr == rhs->vaddr;
-}
-
-/**
- * dma_buf_map_is_null - Tests for a dma-buf mapping to be NULL
- * @map: The dma-buf mapping structure
- *
- * Depending on the state of struct dma_buf_map.is_iomem, tests if the
- * mapping is NULL.
- *
- * Returns:
- * True if the mapping is NULL, or false otherwise.
- */
-static inline bool dma_buf_map_is_null(const struct dma_buf_map *map)
-{
- if (map->is_iomem)
- return !map->vaddr_iomem;
- return !map->vaddr;
-}
-
-/**
- * dma_buf_map_is_set - Tests is the dma-buf mapping has been set
- * @map: The dma-buf mapping structure
- *
- * Depending on the state of struct dma_buf_map.is_iomem, tests if the
- * mapping has been set.
- *
- * Returns:
- * True if the mapping is been set, or false otherwise.
- */
-static inline bool dma_buf_map_is_set(const struct dma_buf_map *map)
-{
- return !dma_buf_map_is_null(map);
-}
-
-/**
- * dma_buf_map_clear - Clears a dma-buf mapping structure
- * @map: The dma-buf mapping structure
- *
- * Clears all fields to zero; including struct dma_buf_map.is_iomem. So
- * mapping structures that were set to point to I/O memory are reset for
- * system memory. Pointers are cleared to NULL. This is the default.
- */
-static inline void dma_buf_map_clear(struct dma_buf_map *map)
-{
- if (map->is_iomem) {
- map->vaddr_iomem = NULL;
- map->is_iomem = false;
- } else {
- map->vaddr = NULL;
- }
-}
-
-/**
- * dma_buf_map_memcpy_to - Memcpy into dma-buf mapping
- * @dst: The dma-buf mapping structure
- * @src: The source buffer
- * @len: The number of byte in src
- *
- * Copies data into a dma-buf mapping. The source buffer is in system
- * memory. Depending on the buffer's location, the helper picks the correct
- * method of accessing the memory.
- */
-static inline void dma_buf_map_memcpy_to(struct dma_buf_map *dst, const void *src, size_t len)
-{
- if (dst->is_iomem)
- memcpy_toio(dst->vaddr_iomem, src, len);
- else
- memcpy(dst->vaddr, src, len);
-}
-
-/**
- * dma_buf_map_incr - Increments the address stored in a dma-buf mapping
- * @map: The dma-buf mapping structure
- * @incr: The number of bytes to increment
- *
- * Increments the address stored in a dma-buf mapping. Depending on the
- * buffer's location, the correct value will be updated.
- */
-static inline void dma_buf_map_incr(struct dma_buf_map *map, size_t incr)
-{
- if (map->is_iomem)
- map->vaddr_iomem += incr;
- else
- map->vaddr += incr;
-}
-
-#endif /* __DMA_BUF_MAP_H__ */
diff --git a/include/linux/dma-buf-mapping.h b/include/linux/dma-buf-mapping.h
new file mode 100644
index 000000000000..a3c0ce2d3a42
--- /dev/null
+++ b/include/linux/dma-buf-mapping.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * DMA BUF Mapping Helpers
+ *
+ */
+#ifndef __DMA_BUF_MAPPING_H__
+#define __DMA_BUF_MAPPING_H__
+#include <linux/dma-buf.h>
+
+struct sg_table *dma_buf_phys_vec_to_sgt(struct dma_buf_attachment *attach,
+ struct p2pdma_provider *provider,
+ struct dma_buf_phys_vec *phys_vec,
+ size_t nr_ranges, size_t size,
+ enum dma_data_direction dir);
+void dma_buf_free_sgt(struct dma_buf_attachment *attach, struct sg_table *sgt,
+ enum dma_data_direction dir);
+#endif
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index efdc56b9d95f..0bc492090237 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -13,7 +13,7 @@
#ifndef __DMA_BUF_H__
#define __DMA_BUF_H__
-#include <linux/dma-buf-map.h>
+#include <linux/iosys-map.h>
#include <linux/file.h>
#include <linux/err.h>
#include <linux/scatterlist.h>
@@ -22,6 +22,7 @@
#include <linux/fs.h>
#include <linux/dma-fence.h>
#include <linux/wait.h>
+#include <linux/pci-p2pdma.h>
struct device;
struct dma_buf;
@@ -35,15 +36,6 @@ struct dma_buf_attachment;
*/
struct dma_buf_ops {
/**
- * @cache_sgt_mapping:
- *
- * If true the framework will cache the first mapping made for each
- * attachment. This avoids creating mappings for attachments multiple
- * times.
- */
- bool cache_sgt_mapping;
-
- /**
* @attach:
*
* This is called from dma_buf_attach() to make sure that a given
@@ -54,7 +46,7 @@ struct dma_buf_ops {
* device), and otherwise need to fail the attach operation.
*
* The exporter should also in general check whether the current
- * allocation fullfills the DMA constraints of the new device. If this
+ * allocation fulfills the DMA constraints of the new device. If this
* is not the case, and the allocation cannot be moved, it should also
* fail the attach operation.
*
@@ -86,8 +78,8 @@ struct dma_buf_ops {
* @pin:
*
* This is called by dma_buf_pin() and lets the exporter know that the
- * DMA-buf can't be moved any more. The exporter should pin the buffer
- * into system memory to make sure it is generally accessible by other
+ * DMA-buf can't be moved any more. Ideally, the exporter should
+ * pin the buffer so that it is generally accessible by all
* devices.
*
* This is called with the &dmabuf.resv object locked and is mutual
@@ -96,6 +88,12 @@ struct dma_buf_ops {
* This is called automatically for non-dynamic importers from
* dma_buf_attach().
*
+ * Note that similar to non-dynamic exporters in their @map_dma_buf
+ * callback the driver must guarantee that the memory is available for
+ * use and cleared of any old data by the time this function returns.
+ * Drivers which pipeline their buffer moves internally must wait for
+ * all moves and clears to complete.
+ *
* Returns:
*
* 0 on success, negative error code on failure.
@@ -144,9 +142,18 @@ struct dma_buf_ops {
* This is always called with the dmabuf->resv object locked when
* the dynamic_mapping flag is true.
*
+ * Note that for non-dynamic exporters the driver must guarantee that
+ * that the memory is available for use and cleared of any old data by
+ * the time this function returns. Drivers which pipeline their buffer
+ * moves internally must wait for all moves and clears to complete.
+ * Dynamic exporters do not need to follow this rule: For non-dynamic
+ * importers the buffer is already pinned through @pin, which has the
+ * same requirements. Dynamic importers otoh are required to obey the
+ * dma_resv fences.
+ *
* Returns:
*
- * A &sg_table scatter list of or the backing storage of the DMA buffer,
+ * A &sg_table scatter list of the backing storage of the DMA buffer,
* already mapped into the device address space of the &device attached
* with the provided &dma_buf_attachment. The addresses and lengths in
* the scatter list are PAGE_SIZE aligned.
@@ -168,7 +175,7 @@ struct dma_buf_ops {
*
* This is called by dma_buf_unmap_attachment() and should unmap and
* release the &sg_table allocated in @map_dma_buf, and it is mandatory.
- * For static dma_buf handling this might also unpins the backing
+ * For static dma_buf handling this might also unpin the backing
* storage if this is the last mapping of the DMA buffer.
*/
void (*unmap_dma_buf)(struct dma_buf_attachment *,
@@ -237,7 +244,7 @@ struct dma_buf_ops {
* This callback is used by the dma_buf_mmap() function
*
* Note that the mapping needs to be incoherent, userspace is expected
- * to braket CPU access using the DMA_BUF_IOCTL_SYNC interface.
+ * to bracket CPU access using the DMA_BUF_IOCTL_SYNC interface.
*
* Because dma-buf buffers have invariant size over their lifetime, the
* dma-buf core checks whether a vma is too large and rejects such
@@ -268,33 +275,12 @@ struct dma_buf_ops {
*/
int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
- int (*vmap)(struct dma_buf *dmabuf, struct dma_buf_map *map);
- void (*vunmap)(struct dma_buf *dmabuf, struct dma_buf_map *map);
+ int (*vmap)(struct dma_buf *dmabuf, struct iosys_map *map);
+ void (*vunmap)(struct dma_buf *dmabuf, struct iosys_map *map);
};
/**
* struct dma_buf - shared buffer object
- * @size: size of the buffer; invariant over the lifetime of the buffer.
- * @file: file pointer used for sharing buffers across, and for refcounting.
- * @attachments: list of dma_buf_attachment that denotes all devices attached,
- * protected by dma_resv lock.
- * @ops: dma_buf_ops associated with this buffer object.
- * @lock: used internally to serialize list manipulation, attach/detach and
- * vmap/unmap
- * @vmapping_counter: used internally to refcnt the vmaps
- * @vmap_ptr: the current vmap ptr if vmapping_counter > 0
- * @exp_name: name of the exporter; useful for debugging.
- * @name: userspace-provided name; useful for accounting and debugging,
- * protected by @resv.
- * @name_lock: spinlock to protect name access
- * @owner: pointer to exporter module; used for refcounting when exporter is a
- * kernel module.
- * @list_node: node for dma_buf accounting and debugging.
- * @priv: exporter specific private data for this buffer object.
- * @resv: reservation object linked to this dma-buf
- * @poll: for userspace poll support
- * @cb_excl: for userspace poll support
- * @cb_shared: for userspace poll support
*
* This represents a shared buffer, created by calling dma_buf_export(). The
* userspace representation is a normal file descriptor, which can be created by
@@ -306,30 +292,155 @@ struct dma_buf_ops {
* Device DMA access is handled by the separate &struct dma_buf_attachment.
*/
struct dma_buf {
+ /**
+ * @size:
+ *
+ * Size of the buffer; invariant over the lifetime of the buffer.
+ */
size_t size;
+
+ /**
+ * @file:
+ *
+ * File pointer used for sharing buffers across, and for refcounting.
+ * See dma_buf_get() and dma_buf_put().
+ */
struct file *file;
+
+ /**
+ * @attachments:
+ *
+ * List of dma_buf_attachment that denotes all devices attached,
+ * protected by &dma_resv lock @resv.
+ */
struct list_head attachments;
+
+ /** @ops: dma_buf_ops associated with this buffer object. */
const struct dma_buf_ops *ops;
- struct mutex lock;
+
+ /**
+ * @vmapping_counter:
+ *
+ * Used internally to refcnt the vmaps returned by dma_buf_vmap().
+ * Protected by @lock.
+ */
unsigned vmapping_counter;
- struct dma_buf_map vmap_ptr;
+
+ /**
+ * @vmap_ptr:
+ * The current vmap ptr if @vmapping_counter > 0. Protected by @lock.
+ */
+ struct iosys_map vmap_ptr;
+
+ /**
+ * @exp_name:
+ *
+ * Name of the exporter; useful for debugging. Must not be NULL
+ */
const char *exp_name;
+
+ /**
+ * @name:
+ *
+ * Userspace-provided name. Default value is NULL. If not NULL,
+ * length cannot be longer than DMA_BUF_NAME_LEN, including NIL
+ * char. Useful for accounting and debugging. Read/Write accesses
+ * are protected by @name_lock
+ *
+ * See the IOCTLs DMA_BUF_SET_NAME or DMA_BUF_SET_NAME_A/B
+ */
const char *name;
+
+ /** @name_lock: Spinlock to protect name access for read access. */
spinlock_t name_lock;
+
+ /**
+ * @owner:
+ *
+ * Pointer to exporter module; used for refcounting when exporter is a
+ * kernel module.
+ */
struct module *owner;
+
+ /** @list_node: node for dma_buf accounting and debugging. */
struct list_head list_node;
+
+ /** @priv: exporter specific private data for this buffer object. */
void *priv;
+
+ /**
+ * @resv:
+ *
+ * Reservation object linked to this dma-buf.
+ *
+ * IMPLICIT SYNCHRONIZATION RULES:
+ *
+ * Drivers which support implicit synchronization of buffer access as
+ * e.g. exposed in `Implicit Fence Poll Support`_ must follow the
+ * below rules.
+ *
+ * - Drivers must add a read fence through dma_resv_add_fence() with the
+ * DMA_RESV_USAGE_READ flag for anything the userspace API considers a
+ * read access. This highly depends upon the API and window system.
+ *
+ * - Similarly drivers must add a write fence through
+ * dma_resv_add_fence() with the DMA_RESV_USAGE_WRITE flag for
+ * anything the userspace API considers write access.
+ *
+ * - Drivers may just always add a write fence, since that only
+ * causes unnecessary synchronization, but no correctness issues.
+ *
+ * - Some drivers only expose a synchronous userspace API with no
+ * pipelining across drivers. These do not set any fences for their
+ * access. An example here is v4l.
+ *
+ * - Driver should use dma_resv_usage_rw() when retrieving fences as
+ * dependency for implicit synchronization.
+ *
+ * DYNAMIC IMPORTER RULES:
+ *
+ * Dynamic importers, see dma_buf_attachment_is_dynamic(), have
+ * additional constraints on how they set up fences:
+ *
+ * - Dynamic importers must obey the write fences and wait for them to
+ * signal before allowing access to the buffer's underlying storage
+ * through the device.
+ *
+ * - Dynamic importers should set fences for any access that they can't
+ * disable immediately from their &dma_buf_attach_ops.move_notify
+ * callback.
+ *
+ * IMPORTANT:
+ *
+ * All drivers and memory management related functions must obey the
+ * struct dma_resv rules, specifically the rules for updating and
+ * obeying fences. See enum dma_resv_usage for further descriptions.
+ */
struct dma_resv *resv;
- /* poll support */
+ /** @poll: for userspace poll support */
wait_queue_head_t poll;
+ /** @cb_in: for userspace poll support */
+ /** @cb_out: for userspace poll support */
struct dma_buf_poll_cb_t {
struct dma_fence_cb cb;
wait_queue_head_t *poll;
__poll_t active;
- } cb_excl, cb_shared;
+ } cb_in, cb_out;
+#ifdef CONFIG_DMABUF_SYSFS_STATS
+ /**
+ * @sysfs_entry:
+ *
+ * For exposing information about this buffer in sysfs. See also
+ * `DMA-BUF statistics`_ for the uapi this enables.
+ */
+ struct dma_buf_sysfs_entry {
+ struct kobject kobj;
+ struct dma_buf *dmabuf;
+ } *sysfs_entry;
+#endif
};
/**
@@ -372,8 +483,6 @@ struct dma_buf_attach_ops {
* @dmabuf: buffer for this attachment.
* @dev: device attached to the buffer.
* @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf.
- * @sgt: cached mapping.
- * @dir: direction of cached mapping.
* @peer2peer: true if the importer can handle peer resources without pages.
* @priv: exporter specific attachment data.
* @importer_ops: importer operations for this attachment, if provided
@@ -393,8 +502,6 @@ struct dma_buf_attachment {
struct dma_buf *dmabuf;
struct device *dev;
struct list_head node;
- struct sg_table *sgt;
- enum dma_data_direction dir;
bool peer2peer;
const struct dma_buf_attach_ops *importer_ops;
void *importer_priv;
@@ -425,6 +532,16 @@ struct dma_buf_export_info {
};
/**
+ * struct dma_buf_phys_vec - describe continuous chunk of memory
+ * @paddr: physical address of that chunk
+ * @len: Length of this chunk
+ */
+struct dma_buf_phys_vec {
+ phys_addr_t paddr;
+ size_t len;
+};
+
+/**
* DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters
* @name: export-info name
*
@@ -462,20 +579,6 @@ static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
return !!dmabuf->ops->pin;
}
-/**
- * dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic
- * mappinsg
- * @attach: the DMA-buf attachment to check
- *
- * Returns true if a DMA-buf importer wants to call the map/unmap functions with
- * the dma_resv lock held.
- */
-static inline bool
-dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
-{
- return !!attach->importer_ops;
-}
-
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
struct device *dev);
struct dma_buf_attachment *
@@ -502,9 +605,19 @@ int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
+struct sg_table *
+dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach,
+ enum dma_data_direction direction);
+void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,
+ struct sg_table *sg_table,
+ enum dma_data_direction direction);
int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
unsigned long);
-int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map);
-void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map);
+int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map);
+void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map);
+int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map);
+void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map);
+struct dma_buf *dma_buf_iter_begin(void);
+struct dma_buf *dma_buf_iter_next(struct dma_buf *dmbuf);
#endif /* __DMA_BUF_H__ */
diff --git a/include/linux/dma-buf/heaps/cma.h b/include/linux/dma-buf/heaps/cma.h
new file mode 100644
index 000000000000..e751479e21e7
--- /dev/null
+++ b/include/linux/dma-buf/heaps/cma.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef DMA_BUF_HEAP_CMA_H_
+#define DMA_BUF_HEAP_CMA_H_
+
+struct cma;
+
+#ifdef CONFIG_DMABUF_HEAPS_CMA
+int dma_heap_cma_register_heap(struct cma *cma);
+#else
+static inline int dma_heap_cma_register_heap(struct cma *cma)
+{
+ return 0;
+}
+#endif // CONFIG_DMABUF_HEAPS_CMA
+
+#endif // DMA_BUF_HEAP_CMA_H_
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index 18aade195884..c249912456f9 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -12,7 +12,7 @@
#include <linux/mem_encrypt.h>
#include <linux/swiotlb.h>
-extern unsigned int zone_dma_bits;
+extern u64 zone_dma_limit;
/*
* Record the mapping of CPU physical to DMA addresses for a given region.
@@ -21,7 +21,6 @@ struct bus_dma_region {
phys_addr_t cpu_start;
dma_addr_t dma_start;
u64 size;
- u64 offset;
};
static inline dma_addr_t translate_phys_to_dma(struct device *dev,
@@ -29,9 +28,12 @@ static inline dma_addr_t translate_phys_to_dma(struct device *dev,
{
const struct bus_dma_region *m;
- for (m = dev->dma_range_map; m->size; m++)
- if (paddr >= m->cpu_start && paddr - m->cpu_start < m->size)
- return (dma_addr_t)paddr - m->offset;
+ for (m = dev->dma_range_map; m->size; m++) {
+ u64 offset = paddr - m->cpu_start;
+
+ if (paddr >= m->cpu_start && offset < m->size)
+ return m->dma_start + offset;
+ }
/* make sure dma_capable fails when no translation is available */
return DMA_MAPPING_ERROR;
@@ -42,27 +44,52 @@ static inline phys_addr_t translate_dma_to_phys(struct device *dev,
{
const struct bus_dma_region *m;
- for (m = dev->dma_range_map; m->size; m++)
- if (dma_addr >= m->dma_start && dma_addr - m->dma_start < m->size)
- return (phys_addr_t)dma_addr + m->offset;
+ for (m = dev->dma_range_map; m->size; m++) {
+ u64 offset = dma_addr - m->dma_start;
+
+ if (dma_addr >= m->dma_start && offset < m->size)
+ return m->cpu_start + offset;
+ }
return (phys_addr_t)-1;
}
+static inline dma_addr_t dma_range_map_min(const struct bus_dma_region *map)
+{
+ dma_addr_t ret = (dma_addr_t)U64_MAX;
+
+ for (; map->size; map++)
+ ret = min(ret, map->dma_start);
+ return ret;
+}
+
+static inline dma_addr_t dma_range_map_max(const struct bus_dma_region *map)
+{
+ dma_addr_t ret = 0;
+
+ for (; map->size; map++)
+ ret = max(ret, map->dma_start + map->size - 1);
+ return ret;
+}
+
#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
#include <asm/dma-direct.h>
#ifndef phys_to_dma_unencrypted
#define phys_to_dma_unencrypted phys_to_dma
#endif
#else
-static inline dma_addr_t phys_to_dma_unencrypted(struct device *dev,
- phys_addr_t paddr)
+static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
{
if (dev->dma_range_map)
return translate_phys_to_dma(dev, paddr);
return paddr;
}
+static inline dma_addr_t phys_to_dma_unencrypted(struct device *dev,
+ phys_addr_t paddr)
+{
+ return dma_addr_unencrypted(__phys_to_dma(dev, paddr));
+}
/*
* If memory encryption is supported, phys_to_dma will set the memory encryption
* bit in the DMA address, and dma_to_phys will clear it.
@@ -71,19 +98,20 @@ static inline dma_addr_t phys_to_dma_unencrypted(struct device *dev,
*/
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
- return __sme_set(phys_to_dma_unencrypted(dev, paddr));
+ return dma_addr_encrypted(__phys_to_dma(dev, paddr));
}
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
{
phys_addr_t paddr;
+ dma_addr = dma_addr_canonical(dma_addr);
if (dev->dma_range_map)
paddr = translate_dma_to_phys(dev, dma_addr);
else
paddr = dma_addr;
- return __sme_clr(paddr);
+ return paddr;
}
#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
@@ -121,7 +149,5 @@ void dma_direct_free_pages(struct device *dev, size_t size,
struct page *page, dma_addr_t dma_addr,
enum dma_data_direction dir);
int dma_direct_supported(struct device *dev, u64 mask);
-dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir, unsigned long attrs);
#endif /* _LINUX_DMA_DIRECT_H */
diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h
index 303dd712220f..079b3dec0a16 100644
--- a/include/linux/dma-fence-array.h
+++ b/include/linux/dma-fence-array.h
@@ -33,6 +33,7 @@ struct dma_fence_array_cb {
* @num_pending: fences in the array still pending
* @fences: array of the fences
* @work: internal irq_work function
+ * @callbacks: array of callback helpers
*/
struct dma_fence_array {
struct dma_fence base;
@@ -43,20 +44,9 @@ struct dma_fence_array {
struct dma_fence **fences;
struct irq_work work;
-};
-
-extern const struct dma_fence_ops dma_fence_array_ops;
-/**
- * dma_fence_is_array - check if a fence is from the array subsclass
- * @fence: fence to test
- *
- * Return true if it is a dma_fence_array and false otherwise.
- */
-static inline bool dma_fence_is_array(struct dma_fence *fence)
-{
- return fence->ops == &dma_fence_array_ops;
-}
+ struct dma_fence_array_cb callbacks[] __counted_by(num_fences);
+};
/**
* to_dma_fence_array - cast a fence to a dma_fence_array
@@ -68,12 +58,33 @@ static inline bool dma_fence_is_array(struct dma_fence *fence)
static inline struct dma_fence_array *
to_dma_fence_array(struct dma_fence *fence)
{
- if (fence->ops != &dma_fence_array_ops)
+ if (!fence || !dma_fence_is_array(fence))
return NULL;
return container_of(fence, struct dma_fence_array, base);
}
+/**
+ * dma_fence_array_for_each - iterate over all fences in array
+ * @fence: current fence
+ * @index: index into the array
+ * @head: potential dma_fence_array object
+ *
+ * Test if @array is a dma_fence_array object and if yes iterate over all fences
+ * in the array. If not just iterate over the fence in @array itself.
+ *
+ * For a deep dive iterator see dma_fence_unwrap_for_each().
+ */
+#define dma_fence_array_for_each(fence, index, head) \
+ for (index = 0, fence = dma_fence_array_first(head); fence; \
+ ++(index), fence = dma_fence_array_next(head, index))
+
+struct dma_fence_array *dma_fence_array_alloc(int num_fences);
+void dma_fence_array_init(struct dma_fence_array *array,
+ int num_fences, struct dma_fence **fences,
+ u64 context, unsigned seqno,
+ bool signal_on_any);
+
struct dma_fence_array *dma_fence_array_create(int num_fences,
struct dma_fence **fences,
u64 context, unsigned seqno,
@@ -81,4 +92,8 @@ struct dma_fence_array *dma_fence_array_create(int num_fences,
bool dma_fence_match_context(struct dma_fence *fence, u64 context);
+struct dma_fence *dma_fence_array_first(struct dma_fence *head);
+struct dma_fence *dma_fence_array_next(struct dma_fence *head,
+ unsigned int index);
+
#endif /* __LINUX_DMA_FENCE_ARRAY_H */
diff --git a/include/linux/dma-fence-chain.h b/include/linux/dma-fence-chain.h
index 10462a029da2..68c3c1e41014 100644
--- a/include/linux/dma-fence-chain.h
+++ b/include/linux/dma-fence-chain.h
@@ -12,28 +12,43 @@
#include <linux/dma-fence.h>
#include <linux/irq_work.h>
+#include <linux/slab.h>
/**
* struct dma_fence_chain - fence to represent an node of a fence chain
* @base: fence base class
- * @lock: spinlock for fence handling
* @prev: previous fence of the chain
* @prev_seqno: original previous seqno before garbage collection
* @fence: encapsulated fence
- * @cb: callback structure for signaling
- * @work: irq work item for signaling
+ * @lock: spinlock for fence handling
*/
struct dma_fence_chain {
struct dma_fence base;
- spinlock_t lock;
struct dma_fence __rcu *prev;
u64 prev_seqno;
struct dma_fence *fence;
- struct dma_fence_cb cb;
- struct irq_work work;
+ union {
+ /**
+ * @cb: callback for signaling
+ *
+ * This is used to add the callback for signaling the
+ * complection of the fence chain. Never used at the same time
+ * as the irq work.
+ */
+ struct dma_fence_cb cb;
+
+ /**
+ * @work: irq work item for signaling
+ *
+ * Irq work structure to allow us to add the callback without
+ * running into lock inversion. Never used at the same time as
+ * the callback.
+ */
+ struct irq_work work;
+ };
+ spinlock_t lock;
};
-extern const struct dma_fence_ops dma_fence_chain_ops;
/**
* to_dma_fence_chain - cast a fence to a dma_fence_chain
@@ -45,19 +60,62 @@ extern const struct dma_fence_ops dma_fence_chain_ops;
static inline struct dma_fence_chain *
to_dma_fence_chain(struct dma_fence *fence)
{
- if (!fence || fence->ops != &dma_fence_chain_ops)
+ if (!fence || !dma_fence_is_chain(fence))
return NULL;
return container_of(fence, struct dma_fence_chain, base);
}
/**
+ * dma_fence_chain_contained - return the contained fence
+ * @fence: the fence to test
+ *
+ * If the fence is a dma_fence_chain the function returns the fence contained
+ * inside the chain object, otherwise it returns the fence itself.
+ */
+static inline struct dma_fence *
+dma_fence_chain_contained(struct dma_fence *fence)
+{
+ struct dma_fence_chain *chain = to_dma_fence_chain(fence);
+
+ return chain ? chain->fence : fence;
+}
+
+/**
+ * dma_fence_chain_alloc
+ *
+ * Returns a new struct dma_fence_chain object or NULL on failure.
+ *
+ * This specialized allocator has to be a macro for its allocations to be
+ * accounted separately (to have a separate alloc_tag). The typecast is
+ * intentional to enforce typesafety.
+ */
+#define dma_fence_chain_alloc() \
+ ((struct dma_fence_chain *)kmalloc(sizeof(struct dma_fence_chain), GFP_KERNEL))
+
+/**
+ * dma_fence_chain_free
+ * @chain: chain node to free
+ *
+ * Frees up an allocated but not used struct dma_fence_chain object. This
+ * doesn't need an RCU grace period since the fence was never initialized nor
+ * published. After dma_fence_chain_init() has been called the fence must be
+ * released by calling dma_fence_put(), and not through this function.
+ */
+static inline void dma_fence_chain_free(struct dma_fence_chain *chain)
+{
+ kfree(chain);
+};
+
+/**
* dma_fence_chain_for_each - iterate over all fences in chain
* @iter: current fence
* @head: starting point
*
* Iterate over all fences in the chain. We keep a reference to the current
* fence while inside the loop which must be dropped when breaking out.
+ *
+ * For a deep dive iterator see dma_fence_unwrap_for_each().
*/
#define dma_fence_chain_for_each(iter, head) \
for (iter = dma_fence_get(head); iter; \
diff --git a/include/linux/dma-fence-unwrap.h b/include/linux/dma-fence-unwrap.h
new file mode 100644
index 000000000000..62df222fe0f1
--- /dev/null
+++ b/include/linux/dma-fence-unwrap.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ * Authors:
+ * Christian König <christian.koenig@amd.com>
+ */
+
+#ifndef __LINUX_DMA_FENCE_UNWRAP_H
+#define __LINUX_DMA_FENCE_UNWRAP_H
+
+struct dma_fence;
+
+/**
+ * struct dma_fence_unwrap - cursor into the container structure
+ *
+ * Should be used with dma_fence_unwrap_for_each() iterator macro.
+ */
+struct dma_fence_unwrap {
+ /**
+ * @chain: potential dma_fence_chain, but can be other fence as well
+ */
+ struct dma_fence *chain;
+ /**
+ * @array: potential dma_fence_array, but can be other fence as well
+ */
+ struct dma_fence *array;
+ /**
+ * @index: last returned index if @array is really a dma_fence_array
+ */
+ unsigned int index;
+};
+
+struct dma_fence *dma_fence_unwrap_first(struct dma_fence *head,
+ struct dma_fence_unwrap *cursor);
+struct dma_fence *dma_fence_unwrap_next(struct dma_fence_unwrap *cursor);
+
+/**
+ * dma_fence_unwrap_for_each - iterate over all fences in containers
+ * @fence: current fence
+ * @cursor: current position inside the containers
+ * @head: starting point for the iterator
+ *
+ * Unwrap dma_fence_chain and dma_fence_array containers and deep dive into all
+ * potential fences in them. If @head is just a normal fence only that one is
+ * returned.
+ */
+#define dma_fence_unwrap_for_each(fence, cursor, head) \
+ for (fence = dma_fence_unwrap_first(head, cursor); fence; \
+ fence = dma_fence_unwrap_next(cursor))
+
+struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
+ struct dma_fence **fences,
+ struct dma_fence_unwrap *cursors);
+
+int dma_fence_dedup_array(struct dma_fence **array, int num_fences);
+
+/**
+ * dma_fence_unwrap_merge - unwrap and merge fences
+ *
+ * All fences given as parameters are unwrapped and merged back together as flat
+ * dma_fence_array. Useful if multiple containers need to be merged together.
+ *
+ * Implemented as a macro to allocate the necessary arrays on the stack and
+ * account the stack frame size to the caller.
+ *
+ * Returns NULL on memory allocation failure, a dma_fence object representing
+ * all the given fences otherwise.
+ */
+#define dma_fence_unwrap_merge(...) \
+ ({ \
+ struct dma_fence *__f[] = { __VA_ARGS__ }; \
+ struct dma_fence_unwrap __c[ARRAY_SIZE(__f)]; \
+ \
+ __dma_fence_unwrap_merge(ARRAY_SIZE(__f), __f, __c); \
+ })
+
+#endif
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 6ffb4b2c6371..64639e104110 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -21,10 +21,12 @@
#include <linux/sched.h>
#include <linux/printk.h>
#include <linux/rcupdate.h>
+#include <linux/timekeeping.h>
struct dma_fence;
struct dma_fence_ops;
struct dma_fence_cb;
+struct seq_file;
/**
* struct dma_fence - software synchronization primitive
@@ -96,6 +98,7 @@ struct dma_fence {
};
enum dma_fence_flag_bits {
+ DMA_FENCE_FLAG_SEQNO64_BIT,
DMA_FENCE_FLAG_SIGNALED_BIT,
DMA_FENCE_FLAG_TIMESTAMP_BIT,
DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
@@ -124,14 +127,6 @@ struct dma_fence_cb {
*/
struct dma_fence_ops {
/**
- * @use_64bit_seqno:
- *
- * True if this dma_fence implementation uses 64bit seqno, false
- * otherwise.
- */
- bool use_64bit_seqno;
-
- /**
* @get_driver_name:
*
* Returns the driver name. This is a callback to allow drivers to
@@ -168,8 +163,8 @@ struct dma_fence_ops {
* implementation know that there is another driver waiting on the
* signal (ie. hw->sw case).
*
- * This function can be called from atomic context, but not
- * from irq context, so normal spinlocks can be used.
+ * This is called with irq's disabled, so only spinlocks which disable
+ * IRQ's can be used in the code outside of this callback.
*
* A return value of false indicates the fence already passed,
* or some failure occurred that made it impossible to enable
@@ -214,19 +209,15 @@ struct dma_fence_ops {
* Custom wait implementation, defaults to dma_fence_default_wait() if
* not set.
*
- * The dma_fence_default_wait implementation should work for any fence, as long
- * as @enable_signaling works correctly. This hook allows drivers to
- * have an optimized version for the case where a process context is
- * already available, e.g. if @enable_signaling for the general case
- * needs to set up a worker thread.
+ * Deprecated and should not be used by new implementations. Only used
+ * by existing implementations which need special handling for their
+ * hardware reset procedure.
*
* Must return -ERESTARTSYS if the wait is intr = true and the wait was
* interrupted, and remaining jiffies if fence has signaled, or 0 if wait
* timed out. Can also return other error values on custom implementations,
* which should be treated as if the fence is signaled. For example a hardware
* lockup could be reported like that.
- *
- * This callback is optional.
*/
signed long (*wait)(struct dma_fence *fence,
bool intr, signed long timeout);
@@ -242,32 +233,35 @@ struct dma_fence_ops {
void (*release)(struct dma_fence *fence);
/**
- * @fence_value_str:
+ * @set_deadline:
*
- * Callback to fill in free-form debug info specific to this fence, like
- * the sequence number.
+ * Callback to allow a fence waiter to inform the fence signaler of
+ * an upcoming deadline, such as vblank, by which point the waiter
+ * would prefer the fence to be signaled by. This is intended to
+ * give feedback to the fence signaler to aid in power management
+ * decisions, such as boosting GPU frequency.
*
- * This callback is optional.
- */
- void (*fence_value_str)(struct dma_fence *fence, char *str, int size);
-
- /**
- * @timeline_value_str:
+ * This is called without &dma_fence.lock held, it can be called
+ * multiple times and from any context. Locking is up to the callee
+ * if it has some state to manage. If multiple deadlines are set,
+ * the expectation is to track the soonest one. If the deadline is
+ * before the current time, it should be interpreted as an immediate
+ * deadline.
*
- * Fills in the current value of the timeline as a string, like the
- * sequence number. Note that the specific fence passed to this function
- * should not matter, drivers should only use it to look up the
- * corresponding timeline structures.
+ * This callback is optional.
*/
- void (*timeline_value_str)(struct dma_fence *fence,
- char *str, int size);
+ void (*set_deadline)(struct dma_fence *fence, ktime_t deadline);
};
void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
spinlock_t *lock, u64 context, u64 seqno);
+void dma_fence_init64(struct dma_fence *fence, const struct dma_fence_ops *ops,
+ spinlock_t *lock, u64 context, u64 seqno);
+
void dma_fence_release(struct kref *kref);
void dma_fence_free(struct dma_fence *fence);
+void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq);
/**
* dma_fence_put - decreases refcount of the fence
@@ -385,6 +379,29 @@ bool dma_fence_remove_callback(struct dma_fence *fence,
void dma_fence_enable_sw_signaling(struct dma_fence *fence);
/**
+ * DOC: Safe external access to driver provided object members
+ *
+ * All data not stored directly in the dma-fence object, such as the
+ * &dma_fence.lock and memory potentially accessed by functions in the
+ * &dma_fence.ops table, MUST NOT be accessed after the fence has been signalled
+ * because after that point drivers are allowed to free it.
+ *
+ * All code accessing that data via the dma-fence API (or directly, which is
+ * discouraged), MUST make sure to contain the complete access within a
+ * &rcu_read_lock and &rcu_read_unlock pair.
+ *
+ * Some dma-fence API handles this automatically, while other, as for example
+ * &dma_fence_driver_name and &dma_fence_timeline_name, leave that
+ * responsibility to the caller.
+ *
+ * To enable this scheme to work drivers MUST ensure a RCU grace period elapses
+ * between signalling the fence and freeing the said data.
+ *
+ */
+const char __rcu *dma_fence_driver_name(struct dma_fence *fence);
+const char __rcu *dma_fence_timeline_name(struct dma_fence *fence);
+
+/**
* dma_fence_is_signaled_locked - Return an indication if the fence
* is signaled yet.
* @fence: the fence to check
@@ -444,21 +461,20 @@ dma_fence_is_signaled(struct dma_fence *fence)
/**
* __dma_fence_is_later - return if f1 is chronologically later than f2
+ * @fence: fence in whose context to do the comparison
* @f1: the first fence's seqno
* @f2: the second fence's seqno from the same context
- * @ops: dma_fence_ops associated with the seqno
*
* Returns true if f1 is chronologically later than f2. Both fences must be
* from the same context, since a seqno is not common across contexts.
*/
-static inline bool __dma_fence_is_later(u64 f1, u64 f2,
- const struct dma_fence_ops *ops)
+static inline bool __dma_fence_is_later(struct dma_fence *fence, u64 f1, u64 f2)
{
/* This is for backward compatibility with drivers which can only handle
* 32bit sequence numbers. Use a 64bit compare when the driver says to
* do so.
*/
- if (ops->use_64bit_seqno)
+ if (test_bit(DMA_FENCE_FLAG_SEQNO64_BIT, &fence->flags))
return f1 > f2;
return (int)(lower_32_bits(f1) - lower_32_bits(f2)) > 0;
@@ -478,7 +494,22 @@ static inline bool dma_fence_is_later(struct dma_fence *f1,
if (WARN_ON(f1->context != f2->context))
return false;
- return __dma_fence_is_later(f1->seqno, f2->seqno, f1->ops);
+ return __dma_fence_is_later(f1, f1->seqno, f2->seqno);
+}
+
+/**
+ * dma_fence_is_later_or_same - return true if f1 is later or same as f2
+ * @f1: the first fence from the same context
+ * @f2: the second fence from the same context
+ *
+ * Returns true if f1 is chronologically later than f2 or the same fence. Both
+ * fences must be from the same context, since a seqno is not re-used across
+ * contexts.
+ */
+static inline bool dma_fence_is_later_or_same(struct dma_fence *f1,
+ struct dma_fence *f2)
+{
+ return f1 == f2 || dma_fence_is_later(f1, f2);
}
/**
@@ -541,6 +572,12 @@ int dma_fence_get_status(struct dma_fence *fence);
* rather than success. This must be set before signaling (so that the value
* is visible before any waiters on the signal callback are woken). This
* helper exists to help catching erroneous setting of #dma_fence.error.
+ *
+ * Examples of error codes which drivers should use:
+ *
+ * * %-ENODATA This operation produced no data, no other operation affected.
+ * * %-ECANCELED All operations from the same context have been canceled.
+ * * %-ETIME Operation caused a timeout and potentially device reset.
*/
static inline void dma_fence_set_error(struct dma_fence *fence,
int error)
@@ -551,6 +588,25 @@ static inline void dma_fence_set_error(struct dma_fence *fence,
fence->error = error;
}
+/**
+ * dma_fence_timestamp - helper to get the completion timestamp of a fence
+ * @fence: fence to get the timestamp from.
+ *
+ * After a fence is signaled the timestamp is updated with the signaling time,
+ * but setting the timestamp can race with tasks waiting for the signaling. This
+ * helper busy waits for the correct timestamp to appear.
+ */
+static inline ktime_t dma_fence_timestamp(struct dma_fence *fence)
+{
+ if (WARN_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)))
+ return ktime_get();
+
+ while (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
+ cpu_relax();
+
+ return fence->timestamp;
+}
+
signed long dma_fence_wait_timeout(struct dma_fence *,
bool intr, signed long timeout);
signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
@@ -586,30 +642,48 @@ static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
return ret < 0 ? ret : 0;
}
+void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline);
+
struct dma_fence *dma_fence_get_stub(void);
-struct dma_fence *dma_fence_allocate_private_stub(void);
+struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp);
u64 dma_fence_context_alloc(unsigned num);
-#define DMA_FENCE_TRACE(f, fmt, args...) \
- do { \
- struct dma_fence *__ff = (f); \
- if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \
- pr_info("f %llu#%llu: " fmt, \
- __ff->context, __ff->seqno, ##args); \
- } while (0)
-
-#define DMA_FENCE_WARN(f, fmt, args...) \
- do { \
- struct dma_fence *__ff = (f); \
- pr_warn("f %llu#%llu: " fmt, __ff->context, __ff->seqno,\
- ##args); \
- } while (0)
-
-#define DMA_FENCE_ERR(f, fmt, args...) \
- do { \
- struct dma_fence *__ff = (f); \
- pr_err("f %llu#%llu: " fmt, __ff->context, __ff->seqno, \
- ##args); \
- } while (0)
+extern const struct dma_fence_ops dma_fence_array_ops;
+extern const struct dma_fence_ops dma_fence_chain_ops;
+
+/**
+ * dma_fence_is_array - check if a fence is from the array subclass
+ * @fence: the fence to test
+ *
+ * Return true if it is a dma_fence_array and false otherwise.
+ */
+static inline bool dma_fence_is_array(struct dma_fence *fence)
+{
+ return fence->ops == &dma_fence_array_ops;
+}
+
+/**
+ * dma_fence_is_chain - check if a fence is from the chain subclass
+ * @fence: the fence to test
+ *
+ * Return true if it is a dma_fence_chain and false otherwise.
+ */
+static inline bool dma_fence_is_chain(struct dma_fence *fence)
+{
+ return fence->ops == &dma_fence_chain_ops;
+}
+
+/**
+ * dma_fence_is_container - check if a fence is a container for other fences
+ * @fence: the fence to test
+ *
+ * Return true if this fence is a container for other fences, false otherwise.
+ * This is important since we can't build up large fence structure or otherwise
+ * we run into recursion during operation on those fences.
+ */
+static inline bool dma_fence_is_container(struct dma_fence *fence)
+{
+ return dma_fence_is_array(fence) || dma_fence_is_chain(fence);
+}
#endif /* __LINUX_DMA_FENCE_H */
diff --git a/include/linux/dma-heap.h b/include/linux/dma-heap.h
index 0c05561cad6e..27d15f60950a 100644
--- a/include/linux/dma-heap.h
+++ b/include/linux/dma-heap.h
@@ -9,22 +9,21 @@
#ifndef _DMA_HEAPS_H
#define _DMA_HEAPS_H
-#include <linux/cdev.h>
#include <linux/types.h>
struct dma_heap;
/**
* struct dma_heap_ops - ops to operate on a given heap
- * @allocate: allocate dmabuf and return struct dma_buf ptr
+ * @allocate: allocate dmabuf and return struct dma_buf ptr
*
* allocate returns dmabuf on success, ERR_PTR(-errno) on error.
*/
struct dma_heap_ops {
struct dma_buf *(*allocate)(struct dma_heap *heap,
unsigned long len,
- unsigned long fd_flags,
- unsigned long heap_flags);
+ u32 fd_flags,
+ u64 heap_flags);
};
/**
@@ -41,28 +40,10 @@ struct dma_heap_export_info {
void *priv;
};
-/**
- * dma_heap_get_drvdata() - get per-heap driver data
- * @heap: DMA-Heap to retrieve private data for
- *
- * Returns:
- * The per-heap data for the heap.
- */
void *dma_heap_get_drvdata(struct dma_heap *heap);
-/**
- * dma_heap_get_name() - get heap name
- * @heap: DMA-Heap to retrieve private data for
- *
- * Returns:
- * The char* for the heap name.
- */
const char *dma_heap_get_name(struct dma_heap *heap);
-/**
- * dma_heap_add - adds a heap to dmabuf heaps
- * @exp_info: information needed to register this heap
- */
struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info);
#endif /* _DMA_HEAPS_H */
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
deleted file mode 100644
index 6e75a2d689b4..000000000000
--- a/include/linux/dma-iommu.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2014-2015 ARM Ltd.
- */
-#ifndef __DMA_IOMMU_H
-#define __DMA_IOMMU_H
-
-#include <linux/errno.h>
-#include <linux/types.h>
-
-#ifdef CONFIG_IOMMU_DMA
-#include <linux/dma-mapping.h>
-#include <linux/iommu.h>
-#include <linux/msi.h>
-
-/* Domain management interface for IOMMU drivers */
-int iommu_get_dma_cookie(struct iommu_domain *domain);
-int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
-void iommu_put_dma_cookie(struct iommu_domain *domain);
-
-/* Setup call for arch DMA mapping code */
-void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size);
-
-/* The DMA API isn't _quite_ the whole story, though... */
-/*
- * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU device
- *
- * The MSI page will be stored in @desc.
- *
- * Return: 0 on success otherwise an error describing the failure.
- */
-int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr);
-
-/* Update the MSI message if required. */
-void iommu_dma_compose_msi_msg(struct msi_desc *desc,
- struct msi_msg *msg);
-
-void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
-
-void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
- struct iommu_domain *domain);
-
-extern bool iommu_dma_forcedac;
-
-#else /* CONFIG_IOMMU_DMA */
-
-struct iommu_domain;
-struct msi_desc;
-struct msi_msg;
-struct device;
-
-static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base,
- u64 size)
-{
-}
-
-static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
-{
- return -ENODEV;
-}
-
-static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
-{
- return -ENODEV;
-}
-
-static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
-{
-}
-
-static inline int iommu_dma_prepare_msi(struct msi_desc *desc,
- phys_addr_t msi_addr)
-{
- return 0;
-}
-
-static inline void iommu_dma_compose_msi_msg(struct msi_desc *desc,
- struct msi_msg *msg)
-{
-}
-
-static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
-{
-}
-
-#endif /* CONFIG_IOMMU_DMA */
-#endif /* __DMA_IOMMU_H */
diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
index 0d53a96a3d64..4809204c674c 100644
--- a/include/linux/dma-map-ops.h
+++ b/include/linux/dma-map-ops.h
@@ -8,8 +8,10 @@
#include <linux/dma-mapping.h>
#include <linux/pgtable.h>
+#include <linux/slab.h>
struct cma;
+struct iommu_ops;
struct dma_map_ops {
void *(*alloc)(struct device *dev, size_t size,
@@ -17,16 +19,11 @@ struct dma_map_ops {
unsigned long attrs);
void (*free)(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs);
- struct page *(*alloc_pages)(struct device *dev, size_t size,
+ struct page *(*alloc_pages_op)(struct device *dev, size_t size,
dma_addr_t *dma_handle, enum dma_data_direction dir,
gfp_t gfp);
void (*free_pages)(struct device *dev, size_t size, struct page *vaddr,
dma_addr_t dma_handle, enum dma_data_direction dir);
- struct sg_table *(*alloc_noncontiguous)(struct device *dev, size_t size,
- enum dma_data_direction dir, gfp_t gfp,
- unsigned long attrs);
- void (*free_noncontiguous)(struct device *dev, size_t size,
- struct sg_table *sgt, enum dma_data_direction dir);
int (*mmap)(struct device *, struct vm_area_struct *,
void *, dma_addr_t, size_t, unsigned long attrs);
@@ -34,26 +31,21 @@ struct dma_map_ops {
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
- dma_addr_t (*map_page)(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir, unsigned long attrs);
- void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
+ dma_addr_t (*map_phys)(struct device *dev, phys_addr_t phys,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+ void (*unmap_phys)(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
unsigned long attrs);
/*
- * map_sg returns 0 on error and a value > 0 on success.
- * It should never return a value < 0.
+ * map_sg should return a negative error code on error. See
+ * dma_map_sgtable() for a list of appropriate error codes
+ * and their meanings.
*/
int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, unsigned long attrs);
void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, unsigned long attrs);
- dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs);
- void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs);
void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir);
void (*sync_single_for_device)(struct device *dev,
@@ -68,17 +60,18 @@ struct dma_map_ops {
int (*dma_supported)(struct device *dev, u64 mask);
u64 (*get_required_mask)(struct device *dev);
size_t (*max_mapping_size)(struct device *dev);
+ size_t (*opt_mapping_size)(void);
unsigned long (*get_merge_boundary)(struct device *dev);
};
-#ifdef CONFIG_DMA_OPS
+#ifdef CONFIG_ARCH_HAS_DMA_OPS
#include <asm/dma-mapping.h>
static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
if (dev->dma_ops)
return dev->dma_ops;
- return get_arch_dma_ops(dev->bus);
+ return get_arch_dma_ops();
}
static inline void set_dma_ops(struct device *dev,
@@ -86,7 +79,7 @@ static inline void set_dma_ops(struct device *dev,
{
dev->dma_ops = dma_ops;
}
-#else /* CONFIG_DMA_OPS */
+#else /* CONFIG_ARCH_HAS_DMA_OPS */
static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
return NULL;
@@ -95,7 +88,7 @@ static inline void set_dma_ops(struct device *dev,
const struct dma_map_ops *dma_ops)
{
}
-#endif /* CONFIG_DMA_OPS */
+#endif /* CONFIG_ARCH_HAS_DMA_OPS */
#ifdef CONFIG_DMA_CMA
extern struct cma *dma_contiguous_default_area;
@@ -154,39 +147,41 @@ static inline void dma_free_contiguous(struct device *dev, struct page *page,
{
__free_pages(page, get_order(size));
}
+static inline void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
+{
+}
#endif /* CONFIG_DMA_CMA*/
-#ifdef CONFIG_DMA_PERNUMA_CMA
-void dma_pernuma_cma_reserve(void);
-#else
-static inline void dma_pernuma_cma_reserve(void) { }
-#endif /* CONFIG_DMA_PERNUMA_CMA */
-
#ifdef CONFIG_DMA_DECLARE_COHERENT
int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size);
+void dma_release_coherent_memory(struct device *dev);
int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
dma_addr_t *dma_handle, void **ret);
int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, size_t size, int *ret);
-
-void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
- dma_addr_t *dma_handle);
-int dma_release_from_global_coherent(int order, void *vaddr);
-int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
- size_t size, int *ret);
-
#else
static inline int dma_declare_coherent_memory(struct device *dev,
phys_addr_t phys_addr, dma_addr_t device_addr, size_t size)
{
return -ENOSYS;
}
+
#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
+static inline void dma_release_coherent_memory(struct device *dev) { }
+#endif /* CONFIG_DMA_DECLARE_COHERENT */
+#ifdef CONFIG_DMA_GLOBAL_POOL
+void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
+ dma_addr_t *dma_handle);
+int dma_release_from_global_coherent(int order, void *vaddr);
+int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
+ size_t size, int *ret);
+int dma_init_global_coherent(phys_addr_t phys_addr, size_t size);
+#else
static inline void *dma_alloc_from_global_coherent(struct device *dev,
ssize_t size, dma_addr_t *dma_handle)
{
@@ -201,21 +196,7 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
{
return 0;
}
-#endif /* CONFIG_DMA_DECLARE_COHERENT */
-
-/*
- * This is the actual return value from the ->alloc_noncontiguous method.
- * The users of the DMA API should only care about the sg_table, but to make
- * the DMA-API internal vmaping and freeing easier we stash away the page
- * array as well (except for the fallback case). This can go away any time,
- * e.g. when a vmap-variant that takes a scatterlist comes along.
- */
-struct dma_sgt_handle {
- struct sg_table sgt;
- struct page **pages;
-};
-#define sgt_handle(sgt) \
- container_of((sgt), struct dma_sgt_handle, sgt)
+#endif /* CONFIG_DMA_GLOBAL_POOL */
int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
@@ -252,17 +233,94 @@ static inline bool dev_is_dma_coherent(struct device *dev)
return dev->dma_coherent;
}
#else
+#define dma_default_coherent true
+
static inline bool dev_is_dma_coherent(struct device *dev)
{
return true;
}
-#endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */
+#endif
+
+static inline void dma_reset_need_sync(struct device *dev)
+{
+#ifdef CONFIG_DMA_NEED_SYNC
+ /* Reset it only once so that the function can be called on hotpath */
+ if (unlikely(dev->dma_skip_sync))
+ dev->dma_skip_sync = false;
+#endif
+}
+
+/*
+ * Check whether potential kmalloc() buffers are safe for non-coherent DMA.
+ */
+static inline bool dma_kmalloc_safe(struct device *dev,
+ enum dma_data_direction dir)
+{
+ /*
+ * If DMA bouncing of kmalloc() buffers is disabled, the kmalloc()
+ * caches have already been aligned to a DMA-safe size.
+ */
+ if (!IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC))
+ return true;
+
+ /*
+ * kmalloc() buffers are DMA-safe irrespective of size if the device
+ * is coherent or the direction is DMA_TO_DEVICE (non-desctructive
+ * cache maintenance and benign cache line evictions).
+ */
+ if (dev_is_dma_coherent(dev) || dir == DMA_TO_DEVICE)
+ return true;
+
+ return false;
+}
+
+/*
+ * Check whether the given size, assuming it is for a kmalloc()'ed buffer, is
+ * sufficiently aligned for non-coherent DMA.
+ */
+static inline bool dma_kmalloc_size_aligned(size_t size)
+{
+ /*
+ * Larger kmalloc() sizes are guaranteed to be aligned to
+ * ARCH_DMA_MINALIGN.
+ */
+ if (size >= 2 * ARCH_DMA_MINALIGN ||
+ IS_ALIGNED(kmalloc_size_roundup(size), dma_get_cache_alignment()))
+ return true;
+
+ return false;
+}
+
+/*
+ * Check whether the given object size may have originated from a kmalloc()
+ * buffer with a slab alignment below the DMA-safe alignment and needs
+ * bouncing for non-coherent DMA. The pointer alignment is not considered and
+ * in-structure DMA-safe offsets are the responsibility of the caller. Such
+ * code should use the static ARCH_DMA_MINALIGN for compiler annotations.
+ *
+ * The heuristics can have false positives, bouncing unnecessarily, though the
+ * buffers would be small. False negatives are theoretically possible if, for
+ * example, multiple small kmalloc() buffers are coalesced into a larger
+ * buffer that passes the alignment check. There are no such known constructs
+ * in the kernel.
+ */
+static inline bool dma_kmalloc_needs_bounce(struct device *dev, size_t size,
+ enum dma_data_direction dir)
+{
+ return !dma_kmalloc_safe(dev, dir) && !dma_kmalloc_size_aligned(size);
+}
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs);
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs);
+#ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
+void arch_dma_set_mask(struct device *dev, u64 mask);
+#else
+#define arch_dma_set_mask(dev, mask) do { } while (0)
+#endif
+
#ifdef CONFIG_MMU
/*
* Page protection so that devices that can't snoop CPU caches can use the
@@ -331,25 +389,23 @@ void *arch_dma_set_uncached(void *addr, size_t size);
void arch_dma_clear_uncached(void *addr, size_t size);
#ifdef CONFIG_ARCH_HAS_DMA_MAP_DIRECT
-bool arch_dma_map_page_direct(struct device *dev, phys_addr_t addr);
-bool arch_dma_unmap_page_direct(struct device *dev, dma_addr_t dma_handle);
+bool arch_dma_map_phys_direct(struct device *dev, phys_addr_t addr);
+bool arch_dma_unmap_phys_direct(struct device *dev, dma_addr_t dma_handle);
bool arch_dma_map_sg_direct(struct device *dev, struct scatterlist *sg,
int nents);
bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
int nents);
#else
-#define arch_dma_map_page_direct(d, a) (false)
-#define arch_dma_unmap_page_direct(d, a) (false)
+#define arch_dma_map_phys_direct(d, a) (false)
+#define arch_dma_unmap_phys_direct(d, a) (false)
#define arch_dma_map_sg_direct(d, s, n) (false)
#define arch_dma_unmap_sg_direct(d, s, n) (false)
#endif
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
-void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- const struct iommu_ops *iommu, bool coherent);
+void arch_setup_dma_ops(struct device *dev, bool coherent);
#else
-static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
- u64 size, const struct iommu_ops *iommu, bool coherent)
+static inline void arch_setup_dma_ops(struct device *dev, bool coherent)
{
}
#endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
@@ -363,10 +419,10 @@ static inline void arch_teardown_dma_ops(struct device *dev)
#endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */
#ifdef CONFIG_DMA_API_DEBUG
-void dma_debug_add_bus(struct bus_type *bus);
+void dma_debug_add_bus(const struct bus_type *bus);
void debug_dma_dump_mappings(struct device *dev);
#else
-static inline void dma_debug_add_bus(struct bus_type *bus)
+static inline void dma_debug_add_bus(const struct bus_type *bus)
{
}
static inline void debug_dma_dump_mappings(struct device *dev)
@@ -375,5 +431,4 @@ static inline void debug_dma_dump_mappings(struct device *dev)
#endif /* CONFIG_DMA_API_DEBUG */
extern const struct dma_map_ops dma_dummy_ops;
-
#endif /* _LINUX_DMA_MAP_OPS_H */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 183e7103a66d..2ceda49c609f 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -2,14 +2,11 @@
#ifndef _LINUX_DMA_MAPPING_H
#define _LINUX_DMA_MAPPING_H
-#include <linux/sizes.h>
-#include <linux/string.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/dma-direction.h>
#include <linux/scatterlist.h>
#include <linux/bug.h>
-#include <linux/mem_encrypt.h>
/**
* List of possible attributes associated with a DMA mapping. The semantics
@@ -62,6 +59,26 @@
#define DMA_ATTR_PRIVILEGED (1UL << 9)
/*
+ * DMA_ATTR_MMIO - Indicates memory-mapped I/O (MMIO) region for DMA mapping
+ *
+ * This attribute indicates the physical address is not normal system
+ * memory. It may not be used with kmap*()/phys_to_virt()/phys_to_page()
+ * functions, it may not be cacheable, and access using CPU load/store
+ * instructions may not be allowed.
+ *
+ * Usually this will be used to describe MMIO addresses, or other non-cacheable
+ * register addresses. When DMA mapping this sort of address we call
+ * the operation Peer to Peer as a one device is DMA'ing to another device.
+ * For PCI devices the p2pdma APIs must be used to determine if DMA_ATTR_MMIO
+ * is appropriate.
+ *
+ * For architectures that require cache flushing for DMA coherence
+ * DMA_ATTR_MMIO will not perform any cache flushing. The address
+ * provided must never be mapped cacheable into the CPU.
+ */
+#define DMA_ATTR_MMIO (1UL << 10)
+
+/*
* A dma_addr_t can hold any valid DMA or bus address for the platform. It can
* be given to a device to use as a DMA source or target. It is specific to a
* given device and there may be a translation between the CPU physical address
@@ -73,7 +90,23 @@
*/
#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
-#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
+#define DMA_BIT_MASK(n) GENMASK_ULL(n - 1, 0)
+
+struct dma_iova_state {
+ dma_addr_t addr;
+ u64 __size;
+};
+
+/*
+ * Use the high bit to mark if we used swiotlb for one or more ranges.
+ */
+#define DMA_IOVA_USE_SWIOTLB (1ULL << 63)
+
+static inline size_t dma_iova_size(struct dma_iova_state *state)
+{
+ /* Casting is needed for 32-bits systems */
+ return (size_t)(state->__size & ~DMA_IOVA_USE_SWIOTLB);
+}
#ifdef CONFIG_DMA_API_DEBUG
void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
@@ -105,23 +138,21 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
unsigned long attrs);
void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
enum dma_data_direction dir, unsigned long attrs);
-int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
+dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+void dma_unmap_phys(struct device *dev, dma_addr_t addr, size_t size,
enum dma_data_direction dir, unsigned long attrs);
+unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir, unsigned long attrs);
void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
unsigned long attrs);
+int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
+ enum dma_data_direction dir, unsigned long attrs);
dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
size_t size, enum dma_data_direction dir, unsigned long attrs);
void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
enum dma_data_direction dir, unsigned long attrs);
-void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
- enum dma_data_direction dir);
-void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
- size_t size, enum dma_data_direction dir);
-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir);
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir);
void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t flag, unsigned long attrs);
void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
@@ -137,12 +168,13 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
bool dma_can_mmap(struct device *dev);
-int dma_supported(struct device *dev, u64 mask);
+bool dma_pci_p2pdma_supported(struct device *dev);
int dma_set_mask(struct device *dev, u64 mask);
int dma_set_coherent_mask(struct device *dev, u64 mask);
u64 dma_get_required_mask(struct device *dev);
+bool dma_addressing_limited(struct device *dev);
size_t dma_max_mapping_size(struct device *dev);
-bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
+size_t dma_opt_mapping_size(struct device *dev);
unsigned long dma_get_merge_boundary(struct device *dev);
struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
@@ -164,8 +196,18 @@ static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
}
-static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, unsigned long attrs)
+static inline dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ return DMA_MAPPING_ERROR;
+}
+static inline void dma_unmap_phys(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+}
+static inline unsigned int dma_map_sg_attrs(struct device *dev,
+ struct scatterlist *sg, int nents, enum dma_data_direction dir,
+ unsigned long attrs)
{
return 0;
}
@@ -174,6 +216,11 @@ static inline void dma_unmap_sg_attrs(struct device *dev,
unsigned long attrs)
{
}
+static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ return -EOPNOTSUPP;
+}
static inline dma_addr_t dma_map_resource(struct device *dev,
phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
unsigned long attrs)
@@ -184,22 +231,6 @@ static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
}
-static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
- size_t size, enum dma_data_direction dir)
-{
-}
-static inline void dma_sync_single_for_device(struct device *dev,
- dma_addr_t addr, size_t size, enum dma_data_direction dir)
-{
-}
-static inline void dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sg, int nelems, enum dma_data_direction dir)
-{
-}
-static inline void dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg, int nelems, enum dma_data_direction dir)
-{
-}
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return -ENOMEM;
@@ -238,9 +269,9 @@ static inline bool dma_can_mmap(struct device *dev)
{
return false;
}
-static inline int dma_supported(struct device *dev, u64 mask)
+static inline bool dma_pci_p2pdma_supported(struct device *dev)
{
- return 0;
+ return false;
}
static inline int dma_set_mask(struct device *dev, u64 mask)
{
@@ -254,13 +285,17 @@ static inline u64 dma_get_required_mask(struct device *dev)
{
return 0;
}
+static inline bool dma_addressing_limited(struct device *dev)
+{
+ return false;
+}
static inline size_t dma_max_mapping_size(struct device *dev)
{
return 0;
}
-static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
+static inline size_t dma_opt_mapping_size(struct device *dev)
{
- return false;
+ return 0;
}
static inline unsigned long dma_get_merge_boundary(struct device *dev)
{
@@ -291,6 +326,151 @@ static inline int dma_mmap_noncontiguous(struct device *dev,
}
#endif /* CONFIG_HAS_DMA */
+#ifdef CONFIG_IOMMU_DMA
+/**
+ * dma_use_iova - check if the IOVA API is used for this state
+ * @state: IOVA state
+ *
+ * Return %true if the DMA transfers uses the dma_iova_*() calls or %false if
+ * they can't be used.
+ */
+static inline bool dma_use_iova(struct dma_iova_state *state)
+{
+ return state->__size != 0;
+}
+
+bool dma_iova_try_alloc(struct device *dev, struct dma_iova_state *state,
+ phys_addr_t phys, size_t size);
+void dma_iova_free(struct device *dev, struct dma_iova_state *state);
+void dma_iova_destroy(struct device *dev, struct dma_iova_state *state,
+ size_t mapped_len, enum dma_data_direction dir,
+ unsigned long attrs);
+int dma_iova_sync(struct device *dev, struct dma_iova_state *state,
+ size_t offset, size_t size);
+int dma_iova_link(struct device *dev, struct dma_iova_state *state,
+ phys_addr_t phys, size_t offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+void dma_iova_unlink(struct device *dev, struct dma_iova_state *state,
+ size_t offset, size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+#else /* CONFIG_IOMMU_DMA */
+static inline bool dma_use_iova(struct dma_iova_state *state)
+{
+ return false;
+}
+static inline bool dma_iova_try_alloc(struct device *dev,
+ struct dma_iova_state *state, phys_addr_t phys, size_t size)
+{
+ return false;
+}
+static inline void dma_iova_free(struct device *dev,
+ struct dma_iova_state *state)
+{
+}
+static inline void dma_iova_destroy(struct device *dev,
+ struct dma_iova_state *state, size_t mapped_len,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+}
+static inline int dma_iova_sync(struct device *dev,
+ struct dma_iova_state *state, size_t offset, size_t size)
+{
+ return -EOPNOTSUPP;
+}
+static inline int dma_iova_link(struct device *dev,
+ struct dma_iova_state *state, phys_addr_t phys, size_t offset,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ return -EOPNOTSUPP;
+}
+static inline void dma_iova_unlink(struct device *dev,
+ struct dma_iova_state *state, size_t offset, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+}
+#endif /* CONFIG_IOMMU_DMA */
+
+#if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
+void __dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir);
+void __dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir);
+void __dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir);
+void __dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir);
+bool __dma_need_sync(struct device *dev, dma_addr_t dma_addr);
+
+static inline bool dma_dev_need_sync(const struct device *dev)
+{
+ /* Always call DMA sync operations when debugging is enabled */
+ return !dev->dma_skip_sync || IS_ENABLED(CONFIG_DMA_API_DEBUG);
+}
+
+static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir)
+{
+ if (dma_dev_need_sync(dev))
+ __dma_sync_single_for_cpu(dev, addr, size, dir);
+}
+
+static inline void dma_sync_single_for_device(struct device *dev,
+ dma_addr_t addr, size_t size, enum dma_data_direction dir)
+{
+ if (dma_dev_need_sync(dev))
+ __dma_sync_single_for_device(dev, addr, size, dir);
+}
+
+static inline void dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sg, int nelems, enum dma_data_direction dir)
+{
+ if (dma_dev_need_sync(dev))
+ __dma_sync_sg_for_cpu(dev, sg, nelems, dir);
+}
+
+static inline void dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg, int nelems, enum dma_data_direction dir)
+{
+ if (dma_dev_need_sync(dev))
+ __dma_sync_sg_for_device(dev, sg, nelems, dir);
+}
+
+static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_dev_need_sync(dev) ? __dma_need_sync(dev, dma_addr) : false;
+}
+bool dma_need_unmap(struct device *dev);
+#else /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
+static inline bool dma_dev_need_sync(const struct device *dev)
+{
+ return false;
+}
+static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir)
+{
+}
+static inline void dma_sync_single_for_device(struct device *dev,
+ dma_addr_t addr, size_t size, enum dma_data_direction dir)
+{
+}
+static inline void dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sg, int nelems, enum dma_data_direction dir)
+{
+}
+static inline void dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg, int nelems, enum dma_data_direction dir)
+{
+}
+static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
+{
+ return false;
+}
+static inline bool dma_need_unmap(struct device *dev)
+{
+ return false;
+}
+#endif /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
+
struct page *dma_alloc_pages(struct device *dev, size_t size,
dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
void dma_free_pages(struct device *dev, size_t size, struct page *page,
@@ -344,34 +524,6 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
}
/**
- * dma_map_sgtable - Map the given buffer for DMA
- * @dev: The device for which to perform the DMA operation
- * @sgt: The sg_table object describing the buffer
- * @dir: DMA direction
- * @attrs: Optional DMA attributes for the map operation
- *
- * Maps a buffer described by a scatterlist stored in the given sg_table
- * object for the @dir DMA operation by the @dev device. After success the
- * ownership for the buffer is transferred to the DMA domain. One has to
- * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
- * ownership of the buffer back to the CPU domain before touching the
- * buffer by the CPU.
- *
- * Returns 0 on success or -EINVAL on error during mapping the buffer.
- */
-static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
- enum dma_data_direction dir, unsigned long attrs)
-{
- int nents;
-
- nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
- if (nents <= 0)
- return -EINVAL;
- sgt->nents = nents;
- return 0;
-}
-
-/**
* dma_unmap_sgtable - Unmap the given buffer for DMA
* @dev: The device for which to perform the DMA operation
* @sgt: The sg_table object describing the buffer
@@ -432,6 +584,8 @@ static inline void dma_sync_sgtable_for_device(struct device *dev,
#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
+bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size);
+
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
{
@@ -477,20 +631,6 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
return dma_set_mask_and_coherent(dev, mask);
}
-/**
- * dma_addressing_limited - return if the device is addressing limited
- * @dev: device to check
- *
- * Return %true if the devices DMA mask is too small to address all memory in
- * the system, else %false. Lack of addressing bits is the prime reason for
- * bounce buffering, but might not be the only one.
- */
-static inline bool dma_addressing_limited(struct device *dev)
-{
- return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
- dma_get_required_mask(dev);
-}
-
static inline unsigned int dma_get_max_seg_size(struct device *dev)
{
if (dev->dma_parms && dev->dma_parms->max_segment_size)
@@ -498,13 +638,11 @@ static inline unsigned int dma_get_max_seg_size(struct device *dev)
return SZ_64K;
}
-static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
+static inline void dma_set_max_seg_size(struct device *dev, unsigned int size)
{
- if (dev->dma_parms) {
- dev->dma_parms->max_segment_size = size;
- return 0;
- }
- return -EIO;
+ if (WARN_ON_ONCE(!dev->dma_parms))
+ return;
+ dev->dma_parms->max_segment_size = size;
}
static inline unsigned long dma_get_seg_boundary(struct device *dev)
@@ -533,13 +671,11 @@ static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev,
return (dma_get_seg_boundary(dev) >> page_shift) + 1;
}
-static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
+static inline void dma_set_seg_boundary(struct device *dev, unsigned long mask)
{
- if (dev->dma_parms) {
- dev->dma_parms->segment_boundary_mask = mask;
- return 0;
- }
- return -EIO;
+ if (WARN_ON_ONCE(!dev->dma_parms))
+ return;
+ dev->dma_parms->segment_boundary_mask = mask;
}
static inline unsigned int dma_get_min_align_mask(struct device *dev)
@@ -549,22 +685,23 @@ static inline unsigned int dma_get_min_align_mask(struct device *dev)
return 0;
}
-static inline int dma_set_min_align_mask(struct device *dev,
+static inline void dma_set_min_align_mask(struct device *dev,
unsigned int min_align_mask)
{
if (WARN_ON_ONCE(!dev->dma_parms))
- return -EIO;
+ return;
dev->dma_parms->min_align_mask = min_align_mask;
- return 0;
}
+#ifndef dma_get_cache_alignment
static inline int dma_get_cache_alignment(void)
{
-#ifdef ARCH_DMA_MINALIGN
+#ifdef ARCH_HAS_DMA_MINALIGN
return ARCH_DMA_MINALIGN;
#endif
return 1;
}
+#endif
static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
@@ -610,10 +747,14 @@ static inline int dma_mmap_wc(struct device *dev,
#else
#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
-#define dma_unmap_addr(PTR, ADDR_NAME) (0)
-#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
-#define dma_unmap_len(PTR, LEN_NAME) (0)
-#define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
+#define dma_unmap_addr(PTR, ADDR_NAME) \
+ ({ typeof(PTR) __p __maybe_unused = PTR; 0; })
+#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) \
+ do { typeof(PTR) __p __maybe_unused = PTR; } while (0)
+#define dma_unmap_len(PTR, LEN_NAME) \
+ ({ typeof(PTR) __p __maybe_unused = PTR; 0; })
+#define dma_unmap_len_set(PTR, LEN_NAME, VAL) \
+ do { typeof(PTR) __p __maybe_unused = PTR; } while (0)
#endif
#endif /* _LINUX_DMA_MAPPING_H */
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index d44a77e8a7e3..c5ab6fd9ebe8 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -47,52 +47,277 @@
extern struct ww_class reservation_ww_class;
+struct dma_resv_list;
+
/**
- * struct dma_resv_list - a list of shared fences
- * @rcu: for internal use
- * @shared_count: table of shared fences
- * @shared_max: for growing shared fence table
- * @shared: shared fence table
+ * enum dma_resv_usage - how the fences from a dma_resv obj are used
+ *
+ * This enum describes the different use cases for a dma_resv object and
+ * controls which fences are returned when queried.
+ *
+ * An important fact is that there is the order KERNEL<WRITE<READ<BOOKKEEP and
+ * when the dma_resv object is asked for fences for one use case the fences
+ * for the lower use case are returned as well.
+ *
+ * For example when asking for WRITE fences then the KERNEL fences are returned
+ * as well. Similar when asked for READ fences then both WRITE and KERNEL
+ * fences are returned as well.
+ *
+ * Already used fences can be promoted in the sense that a fence with
+ * DMA_RESV_USAGE_BOOKKEEP could become DMA_RESV_USAGE_READ by adding it again
+ * with this usage. But fences can never be degraded in the sense that a fence
+ * with DMA_RESV_USAGE_WRITE could become DMA_RESV_USAGE_READ.
*/
-struct dma_resv_list {
- struct rcu_head rcu;
- u32 shared_count, shared_max;
- struct dma_fence __rcu *shared[];
+enum dma_resv_usage {
+ /**
+ * @DMA_RESV_USAGE_KERNEL: For in kernel memory management only.
+ *
+ * This should only be used for things like copying or clearing memory
+ * with a DMA hardware engine for the purpose of kernel memory
+ * management.
+ *
+ * Drivers *always* must wait for those fences before accessing the
+ * resource protected by the dma_resv object. The only exception for
+ * that is when the resource is known to be locked down in place by
+ * pinning it previously.
+ */
+ DMA_RESV_USAGE_KERNEL,
+
+ /**
+ * @DMA_RESV_USAGE_WRITE: Implicit write synchronization.
+ *
+ * This should only be used for userspace command submissions which add
+ * an implicit write dependency.
+ */
+ DMA_RESV_USAGE_WRITE,
+
+ /**
+ * @DMA_RESV_USAGE_READ: Implicit read synchronization.
+ *
+ * This should only be used for userspace command submissions which add
+ * an implicit read dependency.
+ */
+ DMA_RESV_USAGE_READ,
+
+ /**
+ * @DMA_RESV_USAGE_BOOKKEEP: No implicit sync.
+ *
+ * This should be used by submissions which don't want to participate in
+ * any implicit synchronization.
+ *
+ * The most common cases are preemption fences, page table updates, TLB
+ * flushes as well as explicitly synced user submissions.
+ *
+ * Explicitly synced user submissions can be promoted to
+ * DMA_RESV_USAGE_READ or DMA_RESV_USAGE_WRITE as needed using
+ * dma_buf_import_sync_file() when implicit synchronization should
+ * become necessary after initial adding of the fence.
+ */
+ DMA_RESV_USAGE_BOOKKEEP
};
/**
+ * dma_resv_usage_rw - helper for implicit sync
+ * @write: true if we create a new implicit sync write
+ *
+ * This returns the implicit synchronization usage for write or read accesses,
+ * see enum dma_resv_usage and &dma_buf.resv.
+ */
+static inline enum dma_resv_usage dma_resv_usage_rw(bool write)
+{
+ /* This looks confusing at first sight, but is indeed correct.
+ *
+ * The rational is that new write operations needs to wait for the
+ * existing read and write operations to finish.
+ * But a new read operation only needs to wait for the existing write
+ * operations to finish.
+ */
+ return write ? DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE;
+}
+
+/**
* struct dma_resv - a reservation object manages fences for a buffer
- * @lock: update side lock
- * @seq: sequence count for managing RCU read-side synchronization
- * @fence_excl: the exclusive fence, if there is one currently
- * @fence: list of current shared fences
+ *
+ * This is a container for dma_fence objects which needs to handle multiple use
+ * cases.
+ *
+ * One use is to synchronize cross-driver access to a struct dma_buf, either for
+ * dynamic buffer management or just to handle implicit synchronization between
+ * different users of the buffer in userspace. See &dma_buf.resv for a more
+ * in-depth discussion.
+ *
+ * The other major use is to manage access and locking within a driver in a
+ * buffer based memory manager. struct ttm_buffer_object is the canonical
+ * example here, since this is where reservation objects originated from. But
+ * use in drivers is spreading and some drivers also manage struct
+ * drm_gem_object with the same scheme.
*/
struct dma_resv {
+ /**
+ * @lock:
+ *
+ * Update side lock. Don't use directly, instead use the wrapper
+ * functions like dma_resv_lock() and dma_resv_unlock().
+ *
+ * Drivers which use the reservation object to manage memory dynamically
+ * also use this lock to protect buffer object state like placement,
+ * allocation policies or throughout command submission.
+ */
struct ww_mutex lock;
- seqcount_ww_mutex_t seq;
- struct dma_fence __rcu *fence_excl;
- struct dma_resv_list __rcu *fence;
+ /**
+ * @fences:
+ *
+ * Array of fences which where added to the dma_resv object
+ *
+ * A new fence is added by calling dma_resv_add_fence(). Since this
+ * often needs to be done past the point of no return in command
+ * submission it cannot fail, and therefore sufficient slots need to be
+ * reserved by calling dma_resv_reserve_fences().
+ */
+ struct dma_resv_list __rcu *fences;
};
-#define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
-#define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
+/**
+ * struct dma_resv_iter - current position into the dma_resv fences
+ *
+ * Don't touch this directly in the driver, use the accessor function instead.
+ *
+ * IMPORTANT
+ *
+ * When using the lockless iterators like dma_resv_iter_next_unlocked() or
+ * dma_resv_for_each_fence_unlocked() beware that the iterator can be restarted.
+ * Code which accumulates statistics or similar needs to check for this with
+ * dma_resv_iter_is_restarted().
+ */
+struct dma_resv_iter {
+ /** @obj: The dma_resv object we iterate over */
+ struct dma_resv *obj;
+
+ /** @usage: Return fences with this usage or lower. */
+ enum dma_resv_usage usage;
+
+ /** @fence: the currently handled fence */
+ struct dma_fence *fence;
+
+ /** @fence_usage: the usage of the current fence */
+ enum dma_resv_usage fence_usage;
+
+ /** @index: index into the shared fences */
+ unsigned int index;
+
+ /** @fences: the shared fences; private, *MUST* not dereference */
+ struct dma_resv_list *fences;
+
+ /** @num_fences: number of fences */
+ unsigned int num_fences;
+
+ /** @is_restarted: true if this is the first returned fence */
+ bool is_restarted;
+};
+
+struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor);
+struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor);
+struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor);
+struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor);
/**
- * dma_resv_get_list - get the reservation object's
- * shared fence list, with update-side lock held
- * @obj: the reservation object
+ * dma_resv_iter_begin - initialize a dma_resv_iter object
+ * @cursor: The dma_resv_iter object to initialize
+ * @obj: The dma_resv object which we want to iterate over
+ * @usage: controls which fences to include, see enum dma_resv_usage.
+ */
+static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor,
+ struct dma_resv *obj,
+ enum dma_resv_usage usage)
+{
+ cursor->obj = obj;
+ cursor->usage = usage;
+ cursor->fence = NULL;
+}
+
+/**
+ * dma_resv_iter_end - cleanup a dma_resv_iter object
+ * @cursor: the dma_resv_iter object which should be cleaned up
+ *
+ * Make sure that the reference to the fence in the cursor is properly
+ * dropped.
+ */
+static inline void dma_resv_iter_end(struct dma_resv_iter *cursor)
+{
+ dma_fence_put(cursor->fence);
+}
+
+/**
+ * dma_resv_iter_usage - Return the usage of the current fence
+ * @cursor: the cursor of the current position
+ *
+ * Returns the usage of the currently processed fence.
+ */
+static inline enum dma_resv_usage
+dma_resv_iter_usage(struct dma_resv_iter *cursor)
+{
+ return cursor->fence_usage;
+}
+
+/**
+ * dma_resv_iter_is_restarted - test if this is the first fence after a restart
+ * @cursor: the cursor with the current position
*
- * Returns the shared fence list. Does NOT take references to
- * the fence. The obj->lock must be held.
+ * Return true if this is the first fence in an iteration after a restart.
*/
-static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj)
+static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
{
- return rcu_dereference_protected(obj->fence,
- dma_resv_held(obj));
+ return cursor->is_restarted;
}
/**
+ * dma_resv_for_each_fence_unlocked - unlocked fence iterator
+ * @cursor: a struct dma_resv_iter pointer
+ * @fence: the current fence
+ *
+ * Iterate over the fences in a struct dma_resv object without holding the
+ * &dma_resv.lock and using RCU instead. The cursor needs to be initialized
+ * with dma_resv_iter_begin() and cleaned up with dma_resv_iter_end(). Inside
+ * the iterator a reference to the dma_fence is held and the RCU lock dropped.
+ *
+ * Beware that the iterator can be restarted when the struct dma_resv for
+ * @cursor is modified. Code which accumulates statistics or similar needs to
+ * check for this with dma_resv_iter_is_restarted(). For this reason prefer the
+ * lock iterator dma_resv_for_each_fence() whenever possible.
+ */
+#define dma_resv_for_each_fence_unlocked(cursor, fence) \
+ for (fence = dma_resv_iter_first_unlocked(cursor); \
+ fence; fence = dma_resv_iter_next_unlocked(cursor))
+
+/**
+ * dma_resv_for_each_fence - fence iterator
+ * @cursor: a struct dma_resv_iter pointer
+ * @obj: a dma_resv object pointer
+ * @usage: controls which fences to return
+ * @fence: the current fence
+ *
+ * Iterate over the fences in a struct dma_resv object while holding the
+ * &dma_resv.lock. @all_fences controls if the shared fences are returned as
+ * well. The cursor initialisation is part of the iterator and the fence stays
+ * valid as long as the lock is held and so no extra reference to the fence is
+ * taken.
+ */
+#define dma_resv_for_each_fence(cursor, obj, usage, fence) \
+ for (dma_resv_iter_begin(cursor, obj, usage), \
+ fence = dma_resv_iter_first(cursor); fence; \
+ fence = dma_resv_iter_next(cursor))
+
+#define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
+#define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
+
+#ifdef CONFIG_DEBUG_MUTEXES
+void dma_resv_reset_max_fences(struct dma_resv *obj);
+#else
+static inline void dma_resv_reset_max_fences(struct dma_resv *obj) {}
+#endif
+
+/**
* dma_resv_lock - lock the reservation object
* @obj: the reservation object
* @ctx: the locking context
@@ -106,6 +331,13 @@ static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj)
* undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
* is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
* object may be locked by itself by passing NULL as @ctx.
+ *
+ * When a die situation is indicated by returning -EDEADLK all locks held by
+ * @ctx must be unlocked and then dma_resv_lock_slow() called on @obj.
+ *
+ * Unlocked by calling dma_resv_unlock().
+ *
+ * See also dma_resv_lock_interruptible() for the interruptible variant.
*/
static inline int dma_resv_lock(struct dma_resv *obj,
struct ww_acquire_ctx *ctx)
@@ -127,6 +359,12 @@ static inline int dma_resv_lock(struct dma_resv *obj,
* undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
* is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
* object may be locked by itself by passing NULL as @ctx.
+ *
+ * When a die situation is indicated by returning -EDEADLK all locks held by
+ * @ctx must be unlocked and then dma_resv_lock_slow_interruptible() called on
+ * @obj.
+ *
+ * Unlocked by calling dma_resv_unlock().
*/
static inline int dma_resv_lock_interruptible(struct dma_resv *obj,
struct ww_acquire_ctx *ctx)
@@ -142,6 +380,8 @@ static inline int dma_resv_lock_interruptible(struct dma_resv *obj,
* Acquires the reservation object after a die case. This function
* will sleep until the lock becomes available. See dma_resv_lock() as
* well.
+ *
+ * See also dma_resv_lock_slow_interruptible() for the interruptible variant.
*/
static inline void dma_resv_lock_slow(struct dma_resv *obj,
struct ww_acquire_ctx *ctx)
@@ -175,13 +415,13 @@ static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj,
* if they overlap with a writer.
*
* Also note that since no context is provided, no deadlock protection is
- * possible.
+ * possible, which is also not needed for a trylock.
*
* Returns true if the lock was acquired, false otherwise.
*/
static inline bool __must_check dma_resv_trylock(struct dma_resv *obj)
{
- return ww_mutex_trylock(&obj->lock);
+ return ww_mutex_trylock(&obj->lock, NULL);
}
/**
@@ -201,6 +441,11 @@ static inline bool dma_resv_is_locked(struct dma_resv *obj)
*
* Returns the context used to lock a reservation object or NULL if no context
* was used or the object is not locked at all.
+ *
+ * WARNING: This interface is pretty horrible, but TTM needs it because it
+ * doesn't pass the struct ww_acquire_ctx around in some very long callchains.
+ * Everyone else just uses it to check whether they're holding a reservation or
+ * not.
*/
static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
{
@@ -215,79 +460,28 @@ static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
*/
static inline void dma_resv_unlock(struct dma_resv *obj)
{
-#ifdef CONFIG_DEBUG_MUTEXES
- /* Test shared fence slot reservation */
- if (rcu_access_pointer(obj->fence)) {
- struct dma_resv_list *fence = dma_resv_get_list(obj);
-
- fence->shared_max = fence->shared_count;
- }
-#endif
+ dma_resv_reset_max_fences(obj);
ww_mutex_unlock(&obj->lock);
}
-/**
- * dma_resv_get_excl - get the reservation object's
- * exclusive fence, with update-side lock held
- * @obj: the reservation object
- *
- * Returns the exclusive fence (if any). Does NOT take a
- * reference. Writers must hold obj->lock, readers may only
- * hold a RCU read side lock.
- *
- * RETURNS
- * The exclusive fence or NULL
- */
-static inline struct dma_fence *
-dma_resv_get_excl(struct dma_resv *obj)
-{
- return rcu_dereference_protected(obj->fence_excl,
- dma_resv_held(obj));
-}
-
-/**
- * dma_resv_get_excl_rcu - get the reservation object's
- * exclusive fence, without lock held.
- * @obj: the reservation object
- *
- * If there is an exclusive fence, this atomically increments it's
- * reference count and returns it.
- *
- * RETURNS
- * The exclusive fence or NULL if none
- */
-static inline struct dma_fence *
-dma_resv_get_excl_rcu(struct dma_resv *obj)
-{
- struct dma_fence *fence;
-
- if (!rcu_access_pointer(obj->fence_excl))
- return NULL;
-
- rcu_read_lock();
- fence = dma_fence_get_rcu_safe(&obj->fence_excl);
- rcu_read_unlock();
-
- return fence;
-}
-
void dma_resv_init(struct dma_resv *obj);
void dma_resv_fini(struct dma_resv *obj);
-int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
-void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
-
-void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
-
-int dma_resv_get_fences_rcu(struct dma_resv *obj,
- struct dma_fence **pfence_excl,
- unsigned *pshared_count,
- struct dma_fence ***pshared);
-
+int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences);
+void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
+ enum dma_resv_usage usage);
+void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
+ struct dma_fence *fence,
+ enum dma_resv_usage usage);
+int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
+ unsigned int *num_fences, struct dma_fence ***fences);
+int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,
+ struct dma_fence **fence);
int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
-
-long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr,
- unsigned long timeout);
-
-bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all);
+long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
+ bool intr, unsigned long timeout);
+void dma_resv_set_deadline(struct dma_resv *obj, enum dma_resv_usage usage,
+ ktime_t deadline);
+bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage);
+void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq);
#endif /* _LINUX_RESERVATION_H */
diff --git a/include/linux/dma/amd_xdma.h b/include/linux/dma/amd_xdma.h
new file mode 100644
index 000000000000..ceba69ed7cb4
--- /dev/null
+++ b/include/linux/dma/amd_xdma.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _DMAENGINE_AMD_XDMA_H
+#define _DMAENGINE_AMD_XDMA_H
+
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+int xdma_enable_user_irq(struct platform_device *pdev, u32 irq_num);
+void xdma_disable_user_irq(struct platform_device *pdev, u32 irq_num);
+int xdma_get_user_irq(struct platform_device *pdev, u32 user_irq_index);
+
+#endif /* _DMAENGINE_AMD_XDMA_H */
diff --git a/include/linux/dma/edma.h b/include/linux/dma/edma.h
index cab6e18773da..3080747689f6 100644
--- a/include/linux/dma/edma.h
+++ b/include/linux/dma/edma.h
@@ -12,24 +12,97 @@
#include <linux/device.h>
#include <linux/dmaengine.h>
+#define EDMA_MAX_WR_CH 8
+#define EDMA_MAX_RD_CH 8
+
struct dw_edma;
+struct dw_edma_region {
+ u64 paddr;
+ union {
+ void *mem;
+ void __iomem *io;
+ } vaddr;
+ size_t sz;
+};
+
+/**
+ * struct dw_edma_core_ops - platform-specific eDMA methods
+ * @irq_vector: Get IRQ number of the passed eDMA channel. Note the
+ * method accepts the channel id in the end-to-end
+ * numbering with the eDMA write channels being placed
+ * first in the row.
+ * @pci_address: Get PCIe bus address corresponding to the passed CPU
+ * address. Note there is no need in specifying this
+ * function if the address translation is performed by
+ * the DW PCIe RP/EP controller with the DW eDMA device in
+ * subject and DMA_BYPASS isn't set for all the outbound
+ * iATU windows. That will be done by the controller
+ * automatically.
+ */
+struct dw_edma_plat_ops {
+ int (*irq_vector)(struct device *dev, unsigned int nr);
+ u64 (*pci_address)(struct device *dev, phys_addr_t cpu_addr);
+};
+
+enum dw_edma_map_format {
+ EDMA_MF_EDMA_LEGACY = 0x0,
+ EDMA_MF_EDMA_UNROLL = 0x1,
+ EDMA_MF_HDMA_COMPAT = 0x5,
+ EDMA_MF_HDMA_NATIVE = 0x7,
+};
+
+/**
+ * enum dw_edma_chip_flags - Flags specific to an eDMA chip
+ * @DW_EDMA_CHIP_LOCAL: eDMA is used locally by an endpoint
+ */
+enum dw_edma_chip_flags {
+ DW_EDMA_CHIP_LOCAL = BIT(0),
+};
+
/**
* struct dw_edma_chip - representation of DesignWare eDMA controller hardware
* @dev: struct device of the eDMA controller
* @id: instance ID
- * @irq: irq line
- * @dw: struct dw_edma that is filed by dw_edma_probe()
+ * @nr_irqs: total number of DMA IRQs
+ * @ops DMA channel to IRQ number mapping
+ * @flags dw_edma_chip_flags
+ * @reg_base DMA register base address
+ * @ll_wr_cnt DMA write link list count
+ * @ll_rd_cnt DMA read link list count
+ * @rg_region DMA register region
+ * @ll_region_wr DMA descriptor link list memory for write channel
+ * @ll_region_rd DMA descriptor link list memory for read channel
+ * @dt_region_wr DMA data memory for write channel
+ * @dt_region_rd DMA data memory for read channel
+ * @mf DMA register map format
+ * @dw: struct dw_edma that is filled by dw_edma_probe()
*/
struct dw_edma_chip {
struct device *dev;
- int id;
- int irq;
+ int nr_irqs;
+ const struct dw_edma_plat_ops *ops;
+ u32 flags;
+
+ void __iomem *reg_base;
+
+ u16 ll_wr_cnt;
+ u16 ll_rd_cnt;
+ /* link list address */
+ struct dw_edma_region ll_region_wr[EDMA_MAX_WR_CH];
+ struct dw_edma_region ll_region_rd[EDMA_MAX_RD_CH];
+
+ /* data region */
+ struct dw_edma_region dt_region_wr[EDMA_MAX_WR_CH];
+ struct dw_edma_region dt_region_rd[EDMA_MAX_RD_CH];
+
+ enum dw_edma_map_format mf;
+
struct dw_edma *dw;
};
/* Export to the platform drivers */
-#if IS_ENABLED(CONFIG_DW_EDMA)
+#if IS_REACHABLE(CONFIG_DW_EDMA)
int dw_edma_probe(struct dw_edma_chip *chip);
int dw_edma_remove(struct dw_edma_chip *chip);
#else
diff --git a/include/linux/dma/hsu.h b/include/linux/dma/hsu.h
index a6b7bc707356..77ea602c287c 100644
--- a/include/linux/dma/hsu.h
+++ b/include/linux/dma/hsu.h
@@ -8,11 +8,13 @@
#ifndef _DMA_HSU_H
#define _DMA_HSU_H
-#include <linux/device.h>
-#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/kconfig.h>
+#include <linux/types.h>
#include <linux/platform_data/dma-hsu.h>
+struct device;
struct hsu_dma;
/**
diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/dma/imx-dma.h
index 281adbb26e6b..76a8de9ae151 100644
--- a/include/linux/platform_data/dma-imx.h
+++ b/include/linux/dma/imx-dma.h
@@ -3,8 +3,8 @@
* Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
*/
-#ifndef __ASM_ARCH_MXC_DMA_H__
-#define __ASM_ARCH_MXC_DMA_H__
+#ifndef __LINUX_DMA_IMX_H
+#define __LINUX_DMA_IMX_H
#include <linux/scatterlist.h>
#include <linux/device.h>
@@ -39,6 +39,9 @@ enum sdma_peripheral_type {
IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */
IMX_DMATYPE_ASRC_SP, /* Shared ASRC */
IMX_DMATYPE_SAI, /* SAI */
+ IMX_DMATYPE_MULTI_SAI, /* MULTI FIFOs For Audio */
+ IMX_DMATYPE_HDMI, /* HDMI Audio */
+ IMX_DMATYPE_I2C, /* I2C */
};
enum imx_dma_prio {
@@ -65,4 +68,36 @@ static inline int imx_dma_is_general_purpose(struct dma_chan *chan)
!strcmp(chan->device->dev->driver->name, "imx-dma");
}
-#endif
+/**
+ * struct sdma_peripheral_config - SDMA config for audio
+ * @n_fifos_src: Number of FIFOs for recording
+ * @n_fifos_dst: Number of FIFOs for playback
+ * @stride_fifos_src: FIFO address stride for recording, 0 means all FIFOs are
+ * continuous, 1 means 1 word stride between FIFOs. All stride
+ * between FIFOs should be same.
+ * @stride_fifos_dst: FIFO address stride for playback
+ * @words_per_fifo: numbers of words per FIFO fetch/fill, 1 means
+ * one channel per FIFO, 2 means 2 channels per FIFO..
+ * If 'n_fifos_src = 4' and 'words_per_fifo = 2', it
+ * means the first two words(channels) fetch from FIFO0
+ * and then jump to FIFO1 for next two words, and so on
+ * after the last FIFO3 fetched, roll back to FIFO0.
+ * @sw_done: Use software done. Needed for PDM (micfil)
+ *
+ * Some i.MX Audio devices (SAI, micfil) have multiple successive FIFO
+ * registers. For multichannel recording/playback the SAI/micfil have
+ * one FIFO register per channel and the SDMA engine has to read/write
+ * the next channel from/to the next register and wrap around to the
+ * first register when all channels are handled. The number of active
+ * channels must be communicated to the SDMA engine using this struct.
+ */
+struct sdma_peripheral_config {
+ int n_fifos_src;
+ int n_fifos_dst;
+ int stride_fifos_src;
+ int stride_fifos_dst;
+ int words_per_fifo;
+ bool sw_done;
+};
+
+#endif /* __LINUX_DMA_IMX_H */
diff --git a/include/linux/dma/ipu-dma.h b/include/linux/dma/ipu-dma.h
deleted file mode 100644
index 6969391580d2..000000000000
--- a/include/linux/dma/ipu-dma.h
+++ /dev/null
@@ -1,174 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2008
- * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
- *
- * Copyright (C) 2005-2007 Freescale Semiconductor, Inc.
- */
-
-#ifndef __LINUX_DMA_IPU_DMA_H
-#define __LINUX_DMA_IPU_DMA_H
-
-#include <linux/types.h>
-#include <linux/dmaengine.h>
-
-/* IPU DMA Controller channel definitions. */
-enum ipu_channel {
- IDMAC_IC_0 = 0, /* IC (encoding task) to memory */
- IDMAC_IC_1 = 1, /* IC (viewfinder task) to memory */
- IDMAC_ADC_0 = 1,
- IDMAC_IC_2 = 2,
- IDMAC_ADC_1 = 2,
- IDMAC_IC_3 = 3,
- IDMAC_IC_4 = 4,
- IDMAC_IC_5 = 5,
- IDMAC_IC_6 = 6,
- IDMAC_IC_7 = 7, /* IC (sensor data) to memory */
- IDMAC_IC_8 = 8,
- IDMAC_IC_9 = 9,
- IDMAC_IC_10 = 10,
- IDMAC_IC_11 = 11,
- IDMAC_IC_12 = 12,
- IDMAC_IC_13 = 13,
- IDMAC_SDC_0 = 14, /* Background synchronous display data */
- IDMAC_SDC_1 = 15, /* Foreground data (overlay) */
- IDMAC_SDC_2 = 16,
- IDMAC_SDC_3 = 17,
- IDMAC_ADC_2 = 18,
- IDMAC_ADC_3 = 19,
- IDMAC_ADC_4 = 20,
- IDMAC_ADC_5 = 21,
- IDMAC_ADC_6 = 22,
- IDMAC_ADC_7 = 23,
- IDMAC_PF_0 = 24,
- IDMAC_PF_1 = 25,
- IDMAC_PF_2 = 26,
- IDMAC_PF_3 = 27,
- IDMAC_PF_4 = 28,
- IDMAC_PF_5 = 29,
- IDMAC_PF_6 = 30,
- IDMAC_PF_7 = 31,
-};
-
-/* Order significant! */
-enum ipu_channel_status {
- IPU_CHANNEL_FREE,
- IPU_CHANNEL_INITIALIZED,
- IPU_CHANNEL_READY,
- IPU_CHANNEL_ENABLED,
-};
-
-#define IPU_CHANNELS_NUM 32
-
-enum pixel_fmt {
- /* 1 byte */
- IPU_PIX_FMT_GENERIC,
- IPU_PIX_FMT_RGB332,
- IPU_PIX_FMT_YUV420P,
- IPU_PIX_FMT_YUV422P,
- IPU_PIX_FMT_YUV420P2,
- IPU_PIX_FMT_YVU422P,
- /* 2 bytes */
- IPU_PIX_FMT_RGB565,
- IPU_PIX_FMT_RGB666,
- IPU_PIX_FMT_BGR666,
- IPU_PIX_FMT_YUYV,
- IPU_PIX_FMT_UYVY,
- /* 3 bytes */
- IPU_PIX_FMT_RGB24,
- IPU_PIX_FMT_BGR24,
- /* 4 bytes */
- IPU_PIX_FMT_GENERIC_32,
- IPU_PIX_FMT_RGB32,
- IPU_PIX_FMT_BGR32,
- IPU_PIX_FMT_ABGR32,
- IPU_PIX_FMT_BGRA32,
- IPU_PIX_FMT_RGBA32,
-};
-
-enum ipu_color_space {
- IPU_COLORSPACE_RGB,
- IPU_COLORSPACE_YCBCR,
- IPU_COLORSPACE_YUV
-};
-
-/*
- * Enumeration of IPU rotation modes
- */
-enum ipu_rotate_mode {
- /* Note the enum values correspond to BAM value */
- IPU_ROTATE_NONE = 0,
- IPU_ROTATE_VERT_FLIP = 1,
- IPU_ROTATE_HORIZ_FLIP = 2,
- IPU_ROTATE_180 = 3,
- IPU_ROTATE_90_RIGHT = 4,
- IPU_ROTATE_90_RIGHT_VFLIP = 5,
- IPU_ROTATE_90_RIGHT_HFLIP = 6,
- IPU_ROTATE_90_LEFT = 7,
-};
-
-/*
- * Enumeration of DI ports for ADC.
- */
-enum display_port {
- DISP0,
- DISP1,
- DISP2,
- DISP3
-};
-
-struct idmac_video_param {
- unsigned short in_width;
- unsigned short in_height;
- uint32_t in_pixel_fmt;
- unsigned short out_width;
- unsigned short out_height;
- uint32_t out_pixel_fmt;
- unsigned short out_stride;
- bool graphics_combine_en;
- bool global_alpha_en;
- bool key_color_en;
- enum display_port disp;
- unsigned short out_left;
- unsigned short out_top;
-};
-
-/*
- * Union of initialization parameters for a logical channel. So far only video
- * parameters are used.
- */
-union ipu_channel_param {
- struct idmac_video_param video;
-};
-
-struct idmac_tx_desc {
- struct dma_async_tx_descriptor txd;
- struct scatterlist *sg; /* scatterlist for this */
- unsigned int sg_len; /* tx-descriptor. */
- struct list_head list;
-};
-
-struct idmac_channel {
- struct dma_chan dma_chan;
- dma_cookie_t completed; /* last completed cookie */
- union ipu_channel_param params;
- enum ipu_channel link; /* input channel, linked to the output */
- enum ipu_channel_status status;
- void *client; /* Only one client per channel */
- unsigned int n_tx_desc;
- struct idmac_tx_desc *desc; /* allocated tx-descriptors */
- struct scatterlist *sg[2]; /* scatterlist elements in buffer-0 and -1 */
- struct list_head free_list; /* free tx-descriptors */
- struct list_head queue; /* queued tx-descriptors */
- spinlock_t lock; /* protects sg[0,1], queue */
- struct mutex chan_mutex; /* protects status, cookie, free_list */
- bool sec_chan_en;
- int active_buffer;
- unsigned int eof_irq;
- char eof_name[16]; /* EOF IRQ name for request_irq() */
-};
-
-#define to_tx_desc(tx) container_of(tx, struct idmac_tx_desc, txd)
-#define to_idmac_chan(c) container_of(c, struct idmac_channel, dma_chan)
-
-#endif /* __LINUX_DMA_IPU_DMA_H */
diff --git a/include/linux/dma/k3-udma-glue.h b/include/linux/dma/k3-udma-glue.h
index e443be4d3b4b..5d43881e6fb7 100644
--- a/include/linux/dma/k3-udma-glue.h
+++ b/include/linux/dma/k3-udma-glue.h
@@ -26,6 +26,11 @@ struct k3_udma_glue_tx_channel;
struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
const char *name, struct k3_udma_glue_tx_channel_cfg *cfg);
+struct k3_udma_glue_tx_channel *
+k3_udma_glue_request_tx_chn_for_thread_id(struct device *dev,
+ struct k3_udma_glue_tx_channel_cfg *cfg,
+ struct device_node *udmax_np, u32 thread_id);
+
void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn);
int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
struct cppi5_host_desc_t *desc_tx,
@@ -109,6 +114,11 @@ struct k3_udma_glue_rx_channel *k3_udma_glue_request_rx_chn(
const char *name,
struct k3_udma_glue_rx_channel_cfg *cfg);
+struct k3_udma_glue_rx_channel *
+k3_udma_glue_request_remote_rx_chn_for_thread_id(struct device *dev,
+ struct k3_udma_glue_rx_channel_cfg *cfg,
+ struct device_node *udmax_np, u32 thread_id);
+
void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn);
int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn);
void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn);
@@ -126,12 +136,9 @@ u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn);
int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_num);
-void k3_udma_glue_rx_put_irq(struct k3_udma_glue_rx_channel *rx_chn,
- u32 flow_num);
void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_num, void *data,
- void (*cleanup)(void *data, dma_addr_t desc_dma),
- bool skip_fdq);
+ void (*cleanup)(void *data, dma_addr_t desc_dma));
int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_idx);
int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
diff --git a/include/linux/dma/qcom-gpi-dma.h b/include/linux/dma/qcom-gpi-dma.h
index f46dc3372f11..6680dd1a43c6 100644
--- a/include/linux/dma/qcom-gpi-dma.h
+++ b/include/linux/dma/qcom-gpi-dma.h
@@ -26,7 +26,7 @@ enum spi_transfer_cmd {
* @clk_div: source clock divider
* @clk_src: serial clock
* @cmd: spi cmd
- * @fragmentation: keep CS assserted at end of sequence
+ * @fragmentation: keep CS asserted at end of sequence
* @cs: chip select toggle
* @set_config: set peripheral config
* @rx_len: receive length for buffer
diff --git a/include/linux/dma/qcom_adm.h b/include/linux/dma/qcom_adm.h
new file mode 100644
index 000000000000..af20df674f0c
--- /dev/null
+++ b/include/linux/dma/qcom_adm.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef __LINUX_DMA_QCOM_ADM_H
+#define __LINUX_DMA_QCOM_ADM_H
+
+#include <linux/types.h>
+
+struct qcom_adm_peripheral_config {
+ u32 crci;
+ u32 mux;
+};
+
+#endif /* __LINUX_DMA_QCOM_ADM_H */
diff --git a/include/linux/dma/ti-cppi5.h b/include/linux/dma/ti-cppi5.h
index efa2f0309f00..c53c0f6e3b1a 100644
--- a/include/linux/dma/ti-cppi5.h
+++ b/include/linux/dma/ti-cppi5.h
@@ -616,6 +616,7 @@ static inline void *cppi5_hdesc_get_swdata(struct cppi5_host_desc_t *desc)
#define CPPI5_TR_CSF_SUPR_EVT BIT(2)
#define CPPI5_TR_CSF_EOL_ADV_SHIFT (4U)
#define CPPI5_TR_CSF_EOL_ADV_MASK GENMASK(6, 4)
+#define CPPI5_TR_CSF_EOL_ICNT0 BIT(4)
#define CPPI5_TR_CSF_EOP BIT(7)
/**
diff --git a/include/linux/dma/xilinx_dpdma.h b/include/linux/dma/xilinx_dpdma.h
new file mode 100644
index 000000000000..02a4adf8921b
--- /dev/null
+++ b/include/linux/dma/xilinx_dpdma.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_DMA_XILINX_DPDMA_H
+#define __LINUX_DMA_XILINX_DPDMA_H
+
+#include <linux/types.h>
+
+struct xilinx_dpdma_peripheral_config {
+ bool video_group;
+};
+
+#endif /* __LINUX_DMA_XILINX_DPDMA_H */
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 004736b6a9c8..99efe2b9b4ea 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -84,7 +84,7 @@ enum dma_transfer_direction {
DMA_TRANS_NONE,
};
-/**
+/*
* Interleaved Transfer Request
* ----------------------------
* A chunk is collection of contiguous bytes to be transferred.
@@ -161,6 +161,16 @@ struct dma_interleaved_template {
};
/**
+ * struct dma_vec - DMA vector
+ * @addr: Bus address of the start of the vector
+ * @len: Length in bytes of the DMA vector
+ */
+struct dma_vec {
+ dma_addr_t addr;
+ size_t len;
+};
+
+/**
* enum dma_ctrl_flags - DMA flags to augment operation preparation,
* control completion, and communicate status.
* @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
@@ -213,7 +223,7 @@ enum sum_check_bits {
};
/**
- * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations
+ * enum sum_check_flags - result of async_{xor,pq}_zero_sum operations
* @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise
* @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise
*/
@@ -230,12 +240,6 @@ enum sum_check_flags {
typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
/**
- * struct dma_chan_percpu - the per-CPU part of struct dma_chan
- * @memcpy_count: transaction counter
- * @bytes_transferred: byte counter
- */
-
-/**
* enum dma_desc_metadata_mode - per descriptor metadata mode types supported
* @DESC_METADATA_CLIENT - the metadata buffer is allocated/provided by the
* client driver and it is attached (via the dmaengine_desc_attach_metadata()
@@ -282,7 +286,7 @@ typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
* pointer to the engine's metadata area
* 4. Read out the metadata from the pointer
*
- * Note: the two mode is not compatible and clients must use one mode for a
+ * Warning: the two modes are not compatible and clients must use one mode for a
* descriptor.
*/
enum dma_desc_metadata_mode {
@@ -291,6 +295,11 @@ enum dma_desc_metadata_mode {
DESC_METADATA_ENGINE = BIT(1),
};
+/**
+ * struct dma_chan_percpu - the per-CPU part of struct dma_chan
+ * @memcpy_count: transaction counter
+ * @bytes_transferred: byte counter
+ */
struct dma_chan_percpu {
/* stats */
unsigned long memcpy_count;
@@ -381,6 +390,7 @@ enum dma_slave_buswidth {
DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
+ DMA_SLAVE_BUSWIDTH_128_BYTES = 128,
};
/**
@@ -394,12 +404,12 @@ enum dma_slave_buswidth {
* should be read (RX), if the source is memory this argument is
* ignored.
* @dst_addr: this is the physical address where DMA slave data
- * should be written (TX), if the source is memory this argument
+ * should be written (TX), if the destination is memory this argument
* is ignored.
* @src_addr_width: this is the width in bytes of the source (RX)
* register where DMA data shall be read. If the source
* is memory this may be ignored depending on architecture.
- * Legal values: 1, 2, 3, 4, 8, 16, 32, 64.
+ * Legal values: 1, 2, 3, 4, 8, 16, 32, 64, 128.
* @dst_addr_width: same as src_addr_width but for destination
* target (TX) mutatis mutandis.
* @src_maxburst: the maximum number of words (note: words, as in
@@ -418,9 +428,6 @@ enum dma_slave_buswidth {
* @device_fc: Flow Controller Settings. Only valid for slave channels. Fill
* with 'true' if peripheral should be flow controller. Direction will be
* selected at Runtime.
- * @slave_id: Slave requester id. Only valid for slave channels. The dma
- * slave peripheral will have unique id as dma requester which need to be
- * pass as slave config.
* @peripheral_config: peripheral configuration for programming peripheral
* for dmaengine transfer
* @peripheral_size: peripheral configuration buffer size
@@ -448,7 +455,6 @@ struct dma_slave_config {
u32 src_port_window_size;
u32 dst_port_window_size;
bool device_fc;
- unsigned int slave_id;
void *peripheral_config;
size_t peripheral_size;
};
@@ -521,8 +527,6 @@ static inline const char *dma_chan_name(struct dma_chan *chan)
return dev_name(&chan->dev->device);
}
-void dma_chan_cleanup(struct kref *kref);
-
/**
* typedef dma_filter_fn - callback filter for dma_request_channel
* @chan: channel to be reviewed
@@ -590,9 +594,13 @@ struct dma_descriptor_metadata_ops {
* @phys: physical address of the descriptor
* @chan: target channel for this operation
* @tx_submit: accept the descriptor, assign ordered cookie and mark the
- * descriptor pending. To be pushed on .issue_pending() call
+ * descriptor pending. To be pushed on .issue_pending() call
+ * @desc_free: driver's callback function to free a resusable descriptor
+ * after completion
* @callback: routine to call after this operation is complete
+ * @callback_result: error result from a DMA transaction
* @callback_param: general parameter to pass to the callback routine
+ * @unmap: hook for generic DMA unmap data
* @desc_metadata_mode: core managed metadata mode to protect mixed use of
* DESC_METADATA_CLIENT or DESC_METADATA_ENGINE. Otherwise
* DESC_METADATA_NONE
@@ -777,6 +785,7 @@ struct dma_filter {
/**
* struct dma_device - info on the entity supplying DMA services
+ * @ref: reference is taken and put every time a channel is allocated or freed
* @chancnt: how many DMA channels are supported
* @privatecnt: how many DMA channels are requested by dma_request_channel
* @channels: the list of struct dma_chan
@@ -793,6 +802,7 @@ struct dma_filter {
* @dev_id: unique device ID
* @dev: struct device reference for dma mapping api
* @owner: owner module (automatically set based on the provided dev)
+ * @chan_ida: unique channel ID
* @src_addr_widths: bit mask of src addr widths the device supports
* Width is specified in bytes, e.g. for a device supporting
* a width of 4 the mask should have BIT(4) set.
@@ -806,6 +816,7 @@ struct dma_filter {
* @max_sg_burst: max number of SG list entries executed in a single burst
* DMA tansaction with no software intervention for reinitialization.
* Zero value means unlimited number of entries.
+ * @descriptor_reuse: a submitted transfer can be resubmitted after completion
* @residue_granularity: granularity of the transfer residue reported
* by tx_status
* @device_alloc_chan_resources: allocate resources and return the
@@ -820,12 +831,14 @@ struct dma_filter {
* @device_prep_dma_memset: prepares a memset operation
* @device_prep_dma_memset_sg: prepares a memset operation over a scatter list
* @device_prep_dma_interrupt: prepares an end of chain interrupt operation
+ * @device_prep_peripheral_dma_vec: prepares a scatter-gather DMA transfer,
+ * where the address and size of each segment is located in one entry of
+ * the dma_vec array.
* @device_prep_slave_sg: prepares a slave dma operation
* @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
* The function takes a buffer of size buf_len. The callback function will
* be called after period_len bytes have been transferred.
* @device_prep_interleaved_dma: Transfer expression in a generic way.
- * @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address
* @device_caps: May be used to override the generic DMA slave capabilities
* with per-channel specific ones
* @device_config: Pushes a new configuration to a channel, return 0 or an error
@@ -843,7 +856,6 @@ struct dma_filter {
* struct with auxiliary transfer status information, otherwise the call
* will just return a simple status code
* @device_issue_pending: push pending transactions to hardware
- * @descriptor_reuse: a submitted transfer can be resubmitted after completion
* @device_release: called sometime atfer dma_async_device_unregister() is
* called and there are no further references to this structure. This
* must be implemented to free resources however many existing drivers
@@ -851,6 +863,7 @@ struct dma_filter {
* @dbg_summary_show: optional routine to show contents in debugfs; default code
* will be used when this is omitted, but custom code can show extra,
* controller specific information.
+ * @dbg_dev_root: the root folder in debugfs for this device
*/
struct dma_device {
struct kref ref;
@@ -859,7 +872,7 @@ struct dma_device {
struct list_head channels;
struct list_head global_node;
struct dma_filter filter;
- dma_cap_mask_t cap_mask;
+ dma_cap_mask_t cap_mask;
enum dma_desc_metadata_mode desc_metadata_modes;
unsigned short max_xor;
unsigned short max_pq;
@@ -873,7 +886,6 @@ struct dma_device {
struct device *dev;
struct module *owner;
struct ida chan_ida;
- struct mutex chan_mutex; /* to protect chan_ida */
u32 src_addr_widths;
u32 dst_addr_widths;
@@ -914,6 +926,10 @@ struct dma_device {
struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
struct dma_chan *chan, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_peripheral_dma_vec)(
+ struct dma_chan *chan, const struct dma_vec *vecs,
+ size_t nents, enum dma_transfer_direction direction,
+ unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
@@ -925,14 +941,9 @@ struct dma_device {
struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
struct dma_chan *chan, struct dma_interleaved_template *xt,
unsigned long flags);
- struct dma_async_tx_descriptor *(*device_prep_dma_imm_data)(
- struct dma_chan *chan, dma_addr_t dst, u64 data,
- unsigned long flags);
- void (*device_caps)(struct dma_chan *chan,
- struct dma_slave_caps *caps);
- int (*device_config)(struct dma_chan *chan,
- struct dma_slave_config *config);
+ void (*device_caps)(struct dma_chan *chan, struct dma_slave_caps *caps);
+ int (*device_config)(struct dma_chan *chan, struct dma_slave_config *config);
int (*device_pause)(struct dma_chan *chan);
int (*device_resume)(struct dma_chan *chan);
int (*device_terminate_all)(struct dma_chan *chan);
@@ -944,10 +955,8 @@ struct dma_device {
void (*device_issue_pending)(struct dma_chan *chan);
void (*device_release)(struct dma_device *dev);
/* debugfs support */
-#ifdef CONFIG_DEBUG_FS
void (*dbg_summary_show)(struct seq_file *s, struct dma_device *dev);
struct dentry *dbg_dev_root;
-#endif
};
static inline int dmaengine_slave_config(struct dma_chan *chan,
@@ -961,7 +970,8 @@ static inline int dmaengine_slave_config(struct dma_chan *chan,
static inline bool is_slave_direction(enum dma_transfer_direction direction)
{
- return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
+ return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM) ||
+ (direction == DMA_DEV_TO_DEV);
}
static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
@@ -980,6 +990,25 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
dir, flags, NULL);
}
+/**
+ * dmaengine_prep_peripheral_dma_vec() - Prepare a DMA scatter-gather descriptor
+ * @chan: The channel to be used for this descriptor
+ * @vecs: The array of DMA vectors that should be transferred
+ * @nents: The number of DMA vectors in the array
+ * @dir: Specifies the direction of the data transfer
+ * @flags: DMA engine flags
+ */
+static inline struct dma_async_tx_descriptor *dmaengine_prep_peripheral_dma_vec(
+ struct dma_chan *chan, const struct dma_vec *vecs, size_t nents,
+ enum dma_transfer_direction dir, unsigned long flags)
+{
+ if (!chan || !chan->device || !chan->device->device_prep_peripheral_dma_vec)
+ return NULL;
+
+ return chan->device->device_prep_peripheral_dma_vec(chan, vecs, nents,
+ dir, flags);
+}
+
static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
enum dma_transfer_direction dir, unsigned long flags)
@@ -1031,6 +1060,14 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
return chan->device->device_prep_interleaved_dma(chan, xt, flags);
}
+/**
+ * dmaengine_prep_dma_memset() - Prepare a DMA memset descriptor.
+ * @chan: The channel to be used for this descriptor
+ * @dest: Address of buffer to be set
+ * @value: Treated as a single byte value that fills the destination buffer
+ * @len: The total size of dest
+ * @flags: DMA engine flags
+ */
static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
unsigned long flags)
@@ -1487,6 +1524,7 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
struct dma_chan *dma_request_chan(struct device *dev, const char *name);
struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);
+struct dma_chan *devm_dma_request_chan(struct device *dev, const char *name);
void dma_release_channel(struct dma_chan *chan);
int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);
@@ -1523,6 +1561,12 @@ static inline struct dma_chan *dma_request_chan_by_mask(
{
return ERR_PTR(-ENODEV);
}
+
+static inline struct dma_chan *devm_dma_request_chan(struct device *dev, const char *name)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline void dma_release_channel(struct dma_chan *chan)
{
}
@@ -1574,7 +1618,8 @@ int dma_async_device_register(struct dma_device *device);
int dmaenginem_async_device_register(struct dma_device *device);
void dma_async_device_unregister(struct dma_device *device);
int dma_async_device_channel_register(struct dma_device *device,
- struct dma_chan *chan);
+ struct dma_chan *chan,
+ const char *name);
void dma_async_device_channel_unregister(struct dma_device *device,
struct dma_chan *chan);
void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
@@ -1597,14 +1642,14 @@ static inline struct dma_chan
{
struct dma_chan *chan;
- chan = dma_request_slave_channel(dev, name);
- if (chan)
+ chan = dma_request_chan(dev, name);
+ if (!IS_ERR(chan))
return chan;
if (!fn || !fn_param)
return NULL;
- return __dma_request_channel(&mask, fn, fn_param, NULL);
+ return dma_request_channel(mask, fn, fn_param);
}
static inline char *
diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h
index f632ecfb4238..7d40b51933d1 100644
--- a/include/linux/dmapool.h
+++ b/include/linux/dmapool.h
@@ -11,6 +11,7 @@
#ifndef LINUX_DMAPOOL_H
#define LINUX_DMAPOOL_H
+#include <linux/nodemask_types.h>
#include <linux/scatterlist.h>
#include <asm/io.h>
@@ -18,8 +19,8 @@ struct device;
#ifdef CONFIG_HAS_DMA
-struct dma_pool *dma_pool_create(const char *name, struct device *dev,
- size_t size, size_t align, size_t allocation);
+struct dma_pool *dma_pool_create_node(const char *name, struct device *dev,
+ size_t size, size_t align, size_t boundary, int node);
void dma_pool_destroy(struct dma_pool *pool);
@@ -35,9 +36,12 @@ struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
void dmam_pool_destroy(struct dma_pool *pool);
#else /* !CONFIG_HAS_DMA */
-static inline struct dma_pool *dma_pool_create(const char *name,
- struct device *dev, size_t size, size_t align, size_t allocation)
-{ return NULL; }
+static inline struct dma_pool *dma_pool_create_node(const char *name,
+ struct device *dev, size_t size, size_t align, size_t boundary,
+ int node)
+{
+ return NULL;
+}
static inline void dma_pool_destroy(struct dma_pool *pool) { }
static inline void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle) { return NULL; }
@@ -49,6 +53,21 @@ static inline struct dma_pool *dmam_pool_create(const char *name,
static inline void dmam_pool_destroy(struct dma_pool *pool) { }
#endif /* !CONFIG_HAS_DMA */
+static inline struct dma_pool *dma_pool_create(const char *name,
+ struct device *dev, size_t size, size_t align, size_t boundary)
+{
+ return dma_pool_create_node(name, dev, size, align, boundary,
+ NUMA_NO_NODE);
+}
+
+/**
+ * dma_pool_zalloc - Get a zero-initialized block of DMA coherent memory.
+ * @pool: dma pool that will produce the block
+ * @mem_flags: GFP_* bitmask
+ * @handle: pointer to dma address of block
+ *
+ * Same as dma_pool_alloc(), but the returned memory is zeroed.
+ */
static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle)
{
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index e04436a7ff27..692b2b445761 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -18,11 +18,7 @@
struct acpi_dmar_header;
-#ifdef CONFIG_X86
-# define DMAR_UNITS_SUPPORTED MAX_IO_APICS
-#else
-# define DMAR_UNITS_SUPPORTED 64
-#endif
+#define DMAR_UNITS_SUPPORTED 1024
/* DMAR Flags */
#define DMAR_INTR_REMAP 0x1
@@ -43,6 +39,7 @@ struct dmar_drhd_unit {
struct list_head list; /* list of drhd units */
struct acpi_dmar_header *hdr; /* ACPI header */
u64 reg_base_addr; /* register base address*/
+ unsigned long reg_size; /* size of register set */
struct dmar_dev_scope *devices;/* target device array */
int devices_cnt; /* target device count */
u16 segment; /* PCI domain */
@@ -109,8 +106,6 @@ static inline bool dmar_rcu_check(void)
extern int dmar_table_init(void);
extern int dmar_dev_scope_init(void);
extern void dmar_register_bus_notifier(void);
-extern int dmar_parse_dev_scope(void *start, void *end, int *cnt,
- struct dmar_dev_scope **devices, u16 segment);
extern void *dmar_alloc_dev_scope(void *start, void *end, int *cnt);
extern void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt);
extern int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
@@ -121,8 +116,8 @@ extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info,
u16 segment, struct dmar_dev_scope *devices,
int count);
/* Intel IOMMU detection */
-extern int detect_intel_iommu(void);
-extern int enable_drhd_fault_handling(void);
+void detect_intel_iommu(void);
+extern int enable_drhd_fault_handling(unsigned int cpu);
extern int dmar_device_add(acpi_handle handle);
extern int dmar_device_remove(acpi_handle handle);
@@ -131,6 +126,14 @@ static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg)
return 0;
}
+#ifdef CONFIG_DMAR_DEBUG
+void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
+ unsigned long long addr, u32 pasid);
+#else
+static inline void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
+ unsigned long long addr, u32 pasid) {}
+#endif
+
#ifdef CONFIG_INTEL_IOMMU
extern int iommu_detected, no_iommu;
extern int intel_iommu_init(void);
@@ -189,71 +192,82 @@ static inline bool dmar_platform_optin(void)
return false;
}
+static inline void detect_intel_iommu(void)
+{
+}
+
#endif /* CONFIG_DMAR_TABLE */
struct irte {
union {
- /* Shared between remapped and posted mode*/
- struct {
- __u64 present : 1, /* 0 */
- fpd : 1, /* 1 */
- __res0 : 6, /* 2 - 6 */
- avail : 4, /* 8 - 11 */
- __res1 : 3, /* 12 - 14 */
- pst : 1, /* 15 */
- vector : 8, /* 16 - 23 */
- __res2 : 40; /* 24 - 63 */
- };
-
- /* Remapped mode */
- struct {
- __u64 r_present : 1, /* 0 */
- r_fpd : 1, /* 1 */
- dst_mode : 1, /* 2 */
- redir_hint : 1, /* 3 */
- trigger_mode : 1, /* 4 */
- dlvry_mode : 3, /* 5 - 7 */
- r_avail : 4, /* 8 - 11 */
- r_res0 : 4, /* 12 - 15 */
- r_vector : 8, /* 16 - 23 */
- r_res1 : 8, /* 24 - 31 */
- dest_id : 32; /* 32 - 63 */
- };
-
- /* Posted mode */
- struct {
- __u64 p_present : 1, /* 0 */
- p_fpd : 1, /* 1 */
- p_res0 : 6, /* 2 - 7 */
- p_avail : 4, /* 8 - 11 */
- p_res1 : 2, /* 12 - 13 */
- p_urgent : 1, /* 14 */
- p_pst : 1, /* 15 */
- p_vector : 8, /* 16 - 23 */
- p_res2 : 14, /* 24 - 37 */
- pda_l : 26; /* 38 - 63 */
- };
- __u64 low;
- };
-
- union {
- /* Shared between remapped and posted mode*/
- struct {
- __u64 sid : 16, /* 64 - 79 */
- sq : 2, /* 80 - 81 */
- svt : 2, /* 82 - 83 */
- __res3 : 44; /* 84 - 127 */
- };
-
- /* Posted mode*/
struct {
- __u64 p_sid : 16, /* 64 - 79 */
- p_sq : 2, /* 80 - 81 */
- p_svt : 2, /* 82 - 83 */
- p_res3 : 12, /* 84 - 95 */
- pda_h : 32; /* 96 - 127 */
+ union {
+ /* Shared between remapped and posted mode*/
+ struct {
+ __u64 present : 1, /* 0 */
+ fpd : 1, /* 1 */
+ __res0 : 6, /* 2 - 6 */
+ avail : 4, /* 8 - 11 */
+ __res1 : 3, /* 12 - 14 */
+ pst : 1, /* 15 */
+ vector : 8, /* 16 - 23 */
+ __res2 : 40; /* 24 - 63 */
+ };
+
+ /* Remapped mode */
+ struct {
+ __u64 r_present : 1, /* 0 */
+ r_fpd : 1, /* 1 */
+ dst_mode : 1, /* 2 */
+ redir_hint : 1, /* 3 */
+ trigger_mode : 1, /* 4 */
+ dlvry_mode : 3, /* 5 - 7 */
+ r_avail : 4, /* 8 - 11 */
+ r_res0 : 4, /* 12 - 15 */
+ r_vector : 8, /* 16 - 23 */
+ r_res1 : 8, /* 24 - 31 */
+ dest_id : 32; /* 32 - 63 */
+ };
+
+ /* Posted mode */
+ struct {
+ __u64 p_present : 1, /* 0 */
+ p_fpd : 1, /* 1 */
+ p_res0 : 6, /* 2 - 7 */
+ p_avail : 4, /* 8 - 11 */
+ p_res1 : 2, /* 12 - 13 */
+ p_urgent : 1, /* 14 */
+ p_pst : 1, /* 15 */
+ p_vector : 8, /* 16 - 23 */
+ p_res2 : 14, /* 24 - 37 */
+ pda_l : 26; /* 38 - 63 */
+ };
+ __u64 low;
+ };
+
+ union {
+ /* Shared between remapped and posted mode*/
+ struct {
+ __u64 sid : 16, /* 64 - 79 */
+ sq : 2, /* 80 - 81 */
+ svt : 2, /* 82 - 83 */
+ __res3 : 44; /* 84 - 127 */
+ };
+
+ /* Posted mode*/
+ struct {
+ __u64 p_sid : 16, /* 64 - 79 */
+ p_sq : 2, /* 80 - 81 */
+ p_svt : 2, /* 82 - 83 */
+ p_res3 : 12, /* 84 - 95 */
+ pda_h : 32; /* 96 - 127 */
+ };
+ __u64 high;
+ };
};
- __u64 high;
+#ifdef CONFIG_IRQ_REMAP
+ __u128 irte;
+#endif
};
};
@@ -278,7 +292,6 @@ static inline void dmar_copy_shared_irte(struct irte *dst, struct irte *src)
struct irq_data;
extern void dmar_msi_unmask(struct irq_data *data);
extern void dmar_msi_mask(struct irq_data *data);
-extern void dmar_msi_read(int irq, struct msi_msg *msg);
extern void dmar_msi_write(int irq, struct msi_msg *msg);
extern int dmar_set_interrupt(struct intel_iommu *iommu);
extern irqreturn_t dmar_fault(int irq, void *dev_id);
diff --git a/include/linux/dnotify.h b/include/linux/dnotify.h
index 0aad774beaec..9f183a679277 100644
--- a/include/linux/dnotify.h
+++ b/include/linux/dnotify.h
@@ -26,12 +26,11 @@ struct dnotify_struct {
FS_MODIFY | FS_MODIFY_CHILD |\
FS_ACCESS | FS_ACCESS_CHILD |\
FS_ATTRIB | FS_ATTRIB_CHILD |\
- FS_CREATE | FS_DN_RENAME |\
+ FS_CREATE | FS_RENAME |\
FS_MOVED_FROM | FS_MOVED_TO)
-extern int dir_notify_enable;
extern void dnotify_flush(struct file *, fl_owner_t);
-extern int fcntl_dirnotify(int, struct file *, unsigned long);
+extern int fcntl_dirnotify(int, struct file *, unsigned int);
#else
@@ -39,7 +38,7 @@ static inline void dnotify_flush(struct file *filp, fl_owner_t id)
{
}
-static inline int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
+static inline int fcntl_dirnotify(int fd, struct file *filp, unsigned int arg)
{
return -EINVAL;
}
diff --git a/include/linux/dpll.h b/include/linux/dpll.h
new file mode 100644
index 000000000000..562f520b23c2
--- /dev/null
+++ b/include/linux/dpll.h
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 Meta Platforms, Inc. and affiliates
+ * Copyright (c) 2023 Intel and affiliates
+ */
+
+#ifndef __DPLL_H__
+#define __DPLL_H__
+
+#include <uapi/linux/dpll.h>
+#include <linux/device.h>
+#include <linux/netlink.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+
+struct dpll_device;
+struct dpll_pin;
+struct dpll_pin_esync;
+
+struct dpll_device_ops {
+ int (*mode_get)(const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_mode *mode, struct netlink_ext_ack *extack);
+ int (*lock_status_get)(const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_lock_status *status,
+ enum dpll_lock_status_error *status_error,
+ struct netlink_ext_ack *extack);
+ int (*temp_get)(const struct dpll_device *dpll, void *dpll_priv,
+ s32 *temp, struct netlink_ext_ack *extack);
+ int (*clock_quality_level_get)(const struct dpll_device *dpll,
+ void *dpll_priv,
+ unsigned long *qls,
+ struct netlink_ext_ack *extack);
+ int (*phase_offset_monitor_set)(const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_feature_state state,
+ struct netlink_ext_ack *extack);
+ int (*phase_offset_monitor_get)(const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_feature_state *state,
+ struct netlink_ext_ack *extack);
+ int (*phase_offset_avg_factor_set)(const struct dpll_device *dpll,
+ void *dpll_priv, u32 factor,
+ struct netlink_ext_ack *extack);
+ int (*phase_offset_avg_factor_get)(const struct dpll_device *dpll,
+ void *dpll_priv, u32 *factor,
+ struct netlink_ext_ack *extack);
+};
+
+struct dpll_pin_ops {
+ int (*frequency_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ const u64 frequency,
+ struct netlink_ext_ack *extack);
+ int (*frequency_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 *frequency, struct netlink_ext_ack *extack);
+ int (*direction_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ const enum dpll_pin_direction direction,
+ struct netlink_ext_ack *extack);
+ int (*direction_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_direction *direction,
+ struct netlink_ext_ack *extack);
+ int (*state_on_pin_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *parent_pin,
+ void *parent_pin_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack);
+ int (*state_on_dpll_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv, enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack);
+ int (*state_on_pin_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *parent_pin,
+ void *parent_pin_priv,
+ const enum dpll_pin_state state,
+ struct netlink_ext_ack *extack);
+ int (*state_on_dpll_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ const enum dpll_pin_state state,
+ struct netlink_ext_ack *extack);
+ int (*prio_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u32 *prio, struct netlink_ext_ack *extack);
+ int (*prio_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ const u32 prio, struct netlink_ext_ack *extack);
+ int (*phase_offset_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s64 *phase_offset,
+ struct netlink_ext_ack *extack);
+ int (*phase_adjust_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s32 *phase_adjust,
+ struct netlink_ext_ack *extack);
+ int (*phase_adjust_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ const s32 phase_adjust,
+ struct netlink_ext_ack *extack);
+ int (*ffo_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s64 *ffo, struct netlink_ext_ack *extack);
+ int (*esync_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 freq, struct netlink_ext_ack *extack);
+ int (*esync_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ struct dpll_pin_esync *esync,
+ struct netlink_ext_ack *extack);
+ int (*ref_sync_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *ref_sync_pin,
+ void *ref_sync_pin_priv,
+ const enum dpll_pin_state state,
+ struct netlink_ext_ack *extack);
+ int (*ref_sync_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *ref_sync_pin,
+ void *ref_sync_pin_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack);
+};
+
+struct dpll_pin_frequency {
+ u64 min;
+ u64 max;
+};
+
+#define DPLL_PIN_FREQUENCY_RANGE(_min, _max) \
+ { \
+ .min = _min, \
+ .max = _max, \
+ }
+
+#define DPLL_PIN_FREQUENCY(_val) DPLL_PIN_FREQUENCY_RANGE(_val, _val)
+#define DPLL_PIN_FREQUENCY_1PPS \
+ DPLL_PIN_FREQUENCY(DPLL_PIN_FREQUENCY_1_HZ)
+#define DPLL_PIN_FREQUENCY_10MHZ \
+ DPLL_PIN_FREQUENCY(DPLL_PIN_FREQUENCY_10_MHZ)
+#define DPLL_PIN_FREQUENCY_IRIG_B \
+ DPLL_PIN_FREQUENCY(DPLL_PIN_FREQUENCY_10_KHZ)
+#define DPLL_PIN_FREQUENCY_DCF77 \
+ DPLL_PIN_FREQUENCY(DPLL_PIN_FREQUENCY_77_5_KHZ)
+
+struct dpll_pin_phase_adjust_range {
+ s32 min;
+ s32 max;
+};
+
+struct dpll_pin_esync {
+ u64 freq;
+ const struct dpll_pin_frequency *range;
+ u8 range_num;
+ u8 pulse;
+};
+
+struct dpll_pin_properties {
+ const char *board_label;
+ const char *panel_label;
+ const char *package_label;
+ enum dpll_pin_type type;
+ unsigned long capabilities;
+ u32 freq_supported_num;
+ struct dpll_pin_frequency *freq_supported;
+ struct dpll_pin_phase_adjust_range phase_range;
+ u32 phase_gran;
+};
+
+#if IS_ENABLED(CONFIG_DPLL)
+void dpll_netdev_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin);
+void dpll_netdev_pin_clear(struct net_device *dev);
+
+size_t dpll_netdev_pin_handle_size(const struct net_device *dev);
+int dpll_netdev_add_pin_handle(struct sk_buff *msg,
+ const struct net_device *dev);
+#else
+static inline void
+dpll_netdev_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin) { }
+static inline void dpll_netdev_pin_clear(struct net_device *dev) { }
+
+static inline size_t dpll_netdev_pin_handle_size(const struct net_device *dev)
+{
+ return 0;
+}
+
+static inline int
+dpll_netdev_add_pin_handle(struct sk_buff *msg, const struct net_device *dev)
+{
+ return 0;
+}
+#endif
+
+struct dpll_device *
+dpll_device_get(u64 clock_id, u32 dev_driver_id, struct module *module);
+
+void dpll_device_put(struct dpll_device *dpll);
+
+int dpll_device_register(struct dpll_device *dpll, enum dpll_type type,
+ const struct dpll_device_ops *ops, void *priv);
+
+void dpll_device_unregister(struct dpll_device *dpll,
+ const struct dpll_device_ops *ops, void *priv);
+
+struct dpll_pin *
+dpll_pin_get(u64 clock_id, u32 dev_driver_id, struct module *module,
+ const struct dpll_pin_properties *prop);
+
+int dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
+ const struct dpll_pin_ops *ops, void *priv);
+
+void dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
+ const struct dpll_pin_ops *ops, void *priv);
+
+void dpll_pin_put(struct dpll_pin *pin);
+
+int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
+ const struct dpll_pin_ops *ops, void *priv);
+
+void dpll_pin_on_pin_unregister(struct dpll_pin *parent, struct dpll_pin *pin,
+ const struct dpll_pin_ops *ops, void *priv);
+
+int dpll_pin_ref_sync_pair_add(struct dpll_pin *pin,
+ struct dpll_pin *ref_sync_pin);
+
+int dpll_device_change_ntf(struct dpll_device *dpll);
+
+int dpll_pin_change_ntf(struct dpll_pin *pin);
+
+#endif
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index 5755537b51b1..5468a2399d48 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -38,13 +38,6 @@
#endif
-extern const char *drbd_buildtag(void);
-#define REL_VERSION "8.4.11"
-#define API_VERSION 1
-#define PRO_VERSION_MIN 86
-#define PRO_VERSION_MAX 101
-
-
enum drbd_io_error_p {
EP_PASS_ON, /* FIXME should the better be named "Ignore"? */
EP_CALL_HELPER,
diff --git a/include/linux/drbd_config.h b/include/linux/drbd_config.h
new file mode 100644
index 000000000000..d215365c6bb1
--- /dev/null
+++ b/include/linux/drbd_config.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * drbd_config.h
+ * DRBD's compile time configuration.
+ */
+
+#ifndef DRBD_CONFIG_H
+#define DRBD_CONFIG_H
+
+extern const char *drbd_buildtag(void);
+
+#define REL_VERSION "8.4.11"
+#define PRO_VERSION_MIN 86
+#define PRO_VERSION_MAX 101
+
+#endif
diff --git a/include/linux/drbd_genl_api.h b/include/linux/drbd_genl_api.h
index bd62efc29002..70682c058027 100644
--- a/include/linux/drbd_genl_api.h
+++ b/include/linux/drbd_genl_api.h
@@ -47,7 +47,7 @@ enum drbd_state_info_bcast_reason {
#undef linux
#include <linux/drbd.h>
-#define GENL_MAGIC_VERSION API_VERSION
+#define GENL_MAGIC_VERSION 1
#define GENL_MAGIC_FAMILY drbd
#define GENL_MAGIC_FAMILY_HDRSZ sizeof(struct drbd_genlmsghdr)
#define GENL_MAGIC_INCLUDE_FILE <linux/drbd_genl.h>
diff --git a/include/linux/drbd_limits.h b/include/linux/drbd_limits.h
index 9e33f7038bea..5b042fb427e9 100644
--- a/include/linux/drbd_limits.h
+++ b/include/linux/drbd_limits.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
drbd_limits.h
This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
@@ -16,123 +16,123 @@
#define DEBUG_RANGE_CHECK 0
-#define DRBD_MINOR_COUNT_MIN 1
-#define DRBD_MINOR_COUNT_MAX 255
-#define DRBD_MINOR_COUNT_DEF 32
+#define DRBD_MINOR_COUNT_MIN 1U
+#define DRBD_MINOR_COUNT_MAX 255U
+#define DRBD_MINOR_COUNT_DEF 32U
#define DRBD_MINOR_COUNT_SCALE '1'
-#define DRBD_VOLUME_MAX 65535
+#define DRBD_VOLUME_MAX 65534U
-#define DRBD_DIALOG_REFRESH_MIN 0
-#define DRBD_DIALOG_REFRESH_MAX 600
+#define DRBD_DIALOG_REFRESH_MIN 0U
+#define DRBD_DIALOG_REFRESH_MAX 600U
#define DRBD_DIALOG_REFRESH_SCALE '1'
/* valid port number */
-#define DRBD_PORT_MIN 1
-#define DRBD_PORT_MAX 0xffff
+#define DRBD_PORT_MIN 1U
+#define DRBD_PORT_MAX 0xffffU
#define DRBD_PORT_SCALE '1'
/* startup { */
/* if you want more than 3.4 days, disable */
-#define DRBD_WFC_TIMEOUT_MIN 0
-#define DRBD_WFC_TIMEOUT_MAX 300000
-#define DRBD_WFC_TIMEOUT_DEF 0
+#define DRBD_WFC_TIMEOUT_MIN 0U
+#define DRBD_WFC_TIMEOUT_MAX 300000U
+#define DRBD_WFC_TIMEOUT_DEF 0U
#define DRBD_WFC_TIMEOUT_SCALE '1'
-#define DRBD_DEGR_WFC_TIMEOUT_MIN 0
-#define DRBD_DEGR_WFC_TIMEOUT_MAX 300000
-#define DRBD_DEGR_WFC_TIMEOUT_DEF 0
+#define DRBD_DEGR_WFC_TIMEOUT_MIN 0U
+#define DRBD_DEGR_WFC_TIMEOUT_MAX 300000U
+#define DRBD_DEGR_WFC_TIMEOUT_DEF 0U
#define DRBD_DEGR_WFC_TIMEOUT_SCALE '1'
-#define DRBD_OUTDATED_WFC_TIMEOUT_MIN 0
-#define DRBD_OUTDATED_WFC_TIMEOUT_MAX 300000
-#define DRBD_OUTDATED_WFC_TIMEOUT_DEF 0
+#define DRBD_OUTDATED_WFC_TIMEOUT_MIN 0U
+#define DRBD_OUTDATED_WFC_TIMEOUT_MAX 300000U
+#define DRBD_OUTDATED_WFC_TIMEOUT_DEF 0U
#define DRBD_OUTDATED_WFC_TIMEOUT_SCALE '1'
/* }*/
/* net { */
/* timeout, unit centi seconds
* more than one minute timeout is not useful */
-#define DRBD_TIMEOUT_MIN 1
-#define DRBD_TIMEOUT_MAX 600
-#define DRBD_TIMEOUT_DEF 60 /* 6 seconds */
+#define DRBD_TIMEOUT_MIN 1U
+#define DRBD_TIMEOUT_MAX 600U
+#define DRBD_TIMEOUT_DEF 60U /* 6 seconds */
#define DRBD_TIMEOUT_SCALE '1'
/* If backing disk takes longer than disk_timeout, mark the disk as failed */
-#define DRBD_DISK_TIMEOUT_MIN 0 /* 0 = disabled */
-#define DRBD_DISK_TIMEOUT_MAX 6000 /* 10 Minutes */
-#define DRBD_DISK_TIMEOUT_DEF 0 /* disabled */
+#define DRBD_DISK_TIMEOUT_MIN 0U /* 0 = disabled */
+#define DRBD_DISK_TIMEOUT_MAX 6000U /* 10 Minutes */
+#define DRBD_DISK_TIMEOUT_DEF 0U /* disabled */
#define DRBD_DISK_TIMEOUT_SCALE '1'
/* active connection retries when C_WF_CONNECTION */
-#define DRBD_CONNECT_INT_MIN 1
-#define DRBD_CONNECT_INT_MAX 120
-#define DRBD_CONNECT_INT_DEF 10 /* seconds */
+#define DRBD_CONNECT_INT_MIN 1U
+#define DRBD_CONNECT_INT_MAX 120U
+#define DRBD_CONNECT_INT_DEF 10U /* seconds */
#define DRBD_CONNECT_INT_SCALE '1'
/* keep-alive probes when idle */
-#define DRBD_PING_INT_MIN 1
-#define DRBD_PING_INT_MAX 120
-#define DRBD_PING_INT_DEF 10
+#define DRBD_PING_INT_MIN 1U
+#define DRBD_PING_INT_MAX 120U
+#define DRBD_PING_INT_DEF 10U
#define DRBD_PING_INT_SCALE '1'
/* timeout for the ping packets.*/
-#define DRBD_PING_TIMEO_MIN 1
-#define DRBD_PING_TIMEO_MAX 300
-#define DRBD_PING_TIMEO_DEF 5
+#define DRBD_PING_TIMEO_MIN 1U
+#define DRBD_PING_TIMEO_MAX 300U
+#define DRBD_PING_TIMEO_DEF 5U
#define DRBD_PING_TIMEO_SCALE '1'
/* max number of write requests between write barriers */
-#define DRBD_MAX_EPOCH_SIZE_MIN 1
-#define DRBD_MAX_EPOCH_SIZE_MAX 20000
-#define DRBD_MAX_EPOCH_SIZE_DEF 2048
+#define DRBD_MAX_EPOCH_SIZE_MIN 1U
+#define DRBD_MAX_EPOCH_SIZE_MAX 20000U
+#define DRBD_MAX_EPOCH_SIZE_DEF 2048U
#define DRBD_MAX_EPOCH_SIZE_SCALE '1'
/* I don't think that a tcp send buffer of more than 10M is useful */
-#define DRBD_SNDBUF_SIZE_MIN 0
-#define DRBD_SNDBUF_SIZE_MAX (10<<20)
-#define DRBD_SNDBUF_SIZE_DEF 0
+#define DRBD_SNDBUF_SIZE_MIN 0U
+#define DRBD_SNDBUF_SIZE_MAX (10U<<20)
+#define DRBD_SNDBUF_SIZE_DEF 0U
#define DRBD_SNDBUF_SIZE_SCALE '1'
-#define DRBD_RCVBUF_SIZE_MIN 0
-#define DRBD_RCVBUF_SIZE_MAX (10<<20)
-#define DRBD_RCVBUF_SIZE_DEF 0
+#define DRBD_RCVBUF_SIZE_MIN 0U
+#define DRBD_RCVBUF_SIZE_MAX (10U<<20)
+#define DRBD_RCVBUF_SIZE_DEF 0U
#define DRBD_RCVBUF_SIZE_SCALE '1'
/* @4k PageSize -> 128kB - 512MB */
-#define DRBD_MAX_BUFFERS_MIN 32
-#define DRBD_MAX_BUFFERS_MAX 131072
-#define DRBD_MAX_BUFFERS_DEF 2048
+#define DRBD_MAX_BUFFERS_MIN 32U
+#define DRBD_MAX_BUFFERS_MAX 131072U
+#define DRBD_MAX_BUFFERS_DEF 2048U
#define DRBD_MAX_BUFFERS_SCALE '1'
/* @4k PageSize -> 4kB - 512MB */
-#define DRBD_UNPLUG_WATERMARK_MIN 1
-#define DRBD_UNPLUG_WATERMARK_MAX 131072
+#define DRBD_UNPLUG_WATERMARK_MIN 1U
+#define DRBD_UNPLUG_WATERMARK_MAX 131072U
#define DRBD_UNPLUG_WATERMARK_DEF (DRBD_MAX_BUFFERS_DEF/16)
#define DRBD_UNPLUG_WATERMARK_SCALE '1'
/* 0 is disabled.
* 200 should be more than enough even for very short timeouts */
-#define DRBD_KO_COUNT_MIN 0
-#define DRBD_KO_COUNT_MAX 200
-#define DRBD_KO_COUNT_DEF 7
+#define DRBD_KO_COUNT_MIN 0U
+#define DRBD_KO_COUNT_MAX 200U
+#define DRBD_KO_COUNT_DEF 7U
#define DRBD_KO_COUNT_SCALE '1'
/* } */
/* syncer { */
/* FIXME allow rate to be zero? */
-#define DRBD_RESYNC_RATE_MIN 1
+#define DRBD_RESYNC_RATE_MIN 1U
/* channel bonding 10 GbE, or other hardware */
#define DRBD_RESYNC_RATE_MAX (4 << 20)
-#define DRBD_RESYNC_RATE_DEF 250
+#define DRBD_RESYNC_RATE_DEF 250U
#define DRBD_RESYNC_RATE_SCALE 'k' /* kilobytes */
-#define DRBD_AL_EXTENTS_MIN 67
+#define DRBD_AL_EXTENTS_MIN 67U
/* we use u16 as "slot number", (u16)~0 is "FREE".
* If you use >= 292 kB on-disk ring buffer,
* this is the maximum you can use: */
-#define DRBD_AL_EXTENTS_MAX 0xfffe
-#define DRBD_AL_EXTENTS_DEF 1237
+#define DRBD_AL_EXTENTS_MAX 0xfffeU
+#define DRBD_AL_EXTENTS_DEF 1237U
#define DRBD_AL_EXTENTS_SCALE '1'
#define DRBD_MINOR_NUMBER_MIN -1
@@ -147,9 +147,9 @@
* the upper limit with 64bit kernel, enough ram and flexible meta data
* is 1 PiB, currently. */
/* DRBD_MAX_SECTORS */
-#define DRBD_DISK_SIZE_MIN 0
-#define DRBD_DISK_SIZE_MAX (1 * (2LLU << 40))
-#define DRBD_DISK_SIZE_DEF 0 /* = disabled = no user size... */
+#define DRBD_DISK_SIZE_MIN 0LLU
+#define DRBD_DISK_SIZE_MAX (1LLU * (2LLU << 40))
+#define DRBD_DISK_SIZE_DEF 0LLU /* = disabled = no user size... */
#define DRBD_DISK_SIZE_SCALE 's' /* sectors */
#define DRBD_ON_IO_ERROR_DEF EP_DETACH
@@ -162,39 +162,39 @@
#define DRBD_ON_CONGESTION_DEF OC_BLOCK
#define DRBD_READ_BALANCING_DEF RB_PREFER_LOCAL
-#define DRBD_MAX_BIO_BVECS_MIN 0
-#define DRBD_MAX_BIO_BVECS_MAX 128
-#define DRBD_MAX_BIO_BVECS_DEF 0
+#define DRBD_MAX_BIO_BVECS_MIN 0U
+#define DRBD_MAX_BIO_BVECS_MAX 128U
+#define DRBD_MAX_BIO_BVECS_DEF 0U
#define DRBD_MAX_BIO_BVECS_SCALE '1'
-#define DRBD_C_PLAN_AHEAD_MIN 0
-#define DRBD_C_PLAN_AHEAD_MAX 300
-#define DRBD_C_PLAN_AHEAD_DEF 20
+#define DRBD_C_PLAN_AHEAD_MIN 0U
+#define DRBD_C_PLAN_AHEAD_MAX 300U
+#define DRBD_C_PLAN_AHEAD_DEF 20U
#define DRBD_C_PLAN_AHEAD_SCALE '1'
-#define DRBD_C_DELAY_TARGET_MIN 1
-#define DRBD_C_DELAY_TARGET_MAX 100
-#define DRBD_C_DELAY_TARGET_DEF 10
+#define DRBD_C_DELAY_TARGET_MIN 1U
+#define DRBD_C_DELAY_TARGET_MAX 100U
+#define DRBD_C_DELAY_TARGET_DEF 10U
#define DRBD_C_DELAY_TARGET_SCALE '1'
-#define DRBD_C_FILL_TARGET_MIN 0
-#define DRBD_C_FILL_TARGET_MAX (1<<20) /* 500MByte in sec */
-#define DRBD_C_FILL_TARGET_DEF 100 /* Try to place 50KiB in socket send buffer during resync */
+#define DRBD_C_FILL_TARGET_MIN 0U
+#define DRBD_C_FILL_TARGET_MAX (1U<<20) /* 500MByte in sec */
+#define DRBD_C_FILL_TARGET_DEF 100U /* Try to place 50KiB in socket send buffer during resync */
#define DRBD_C_FILL_TARGET_SCALE 's' /* sectors */
-#define DRBD_C_MAX_RATE_MIN 250
-#define DRBD_C_MAX_RATE_MAX (4 << 20)
-#define DRBD_C_MAX_RATE_DEF 102400
+#define DRBD_C_MAX_RATE_MIN 250U
+#define DRBD_C_MAX_RATE_MAX (4U << 20)
+#define DRBD_C_MAX_RATE_DEF 102400U
#define DRBD_C_MAX_RATE_SCALE 'k' /* kilobytes */
-#define DRBD_C_MIN_RATE_MIN 0
-#define DRBD_C_MIN_RATE_MAX (4 << 20)
-#define DRBD_C_MIN_RATE_DEF 250
+#define DRBD_C_MIN_RATE_MIN 0U
+#define DRBD_C_MIN_RATE_MAX (4U << 20)
+#define DRBD_C_MIN_RATE_DEF 250U
#define DRBD_C_MIN_RATE_SCALE 'k' /* kilobytes */
-#define DRBD_CONG_FILL_MIN 0
-#define DRBD_CONG_FILL_MAX (10<<21) /* 10GByte in sectors */
-#define DRBD_CONG_FILL_DEF 0
+#define DRBD_CONG_FILL_MIN 0U
+#define DRBD_CONG_FILL_MAX (10U<<21) /* 10GByte in sectors */
+#define DRBD_CONG_FILL_DEF 0U
#define DRBD_CONG_FILL_SCALE 's' /* sectors */
#define DRBD_CONG_EXTENTS_MIN DRBD_AL_EXTENTS_MIN
@@ -204,48 +204,48 @@
#define DRBD_PROTOCOL_DEF DRBD_PROT_C
-#define DRBD_DISK_BARRIER_DEF 0
-#define DRBD_DISK_FLUSHES_DEF 1
-#define DRBD_DISK_DRAIN_DEF 1
-#define DRBD_MD_FLUSHES_DEF 1
-#define DRBD_TCP_CORK_DEF 1
-#define DRBD_AL_UPDATES_DEF 1
+#define DRBD_DISK_BARRIER_DEF 0U
+#define DRBD_DISK_FLUSHES_DEF 1U
+#define DRBD_DISK_DRAIN_DEF 1U
+#define DRBD_MD_FLUSHES_DEF 1U
+#define DRBD_TCP_CORK_DEF 1U
+#define DRBD_AL_UPDATES_DEF 1U
/* We used to ignore the discard_zeroes_data setting.
* To not change established (and expected) behaviour,
* by default assume that, for discard_zeroes_data=0,
* we can make that an effective discard_zeroes_data=1,
* if we only explicitly zero-out unaligned partial chunks. */
-#define DRBD_DISCARD_ZEROES_IF_ALIGNED_DEF 1
+#define DRBD_DISCARD_ZEROES_IF_ALIGNED_DEF 1U
/* Some backends pretend to support WRITE SAME,
* but fail such requests when they are actually submitted.
* This is to tell DRBD to not even try. */
-#define DRBD_DISABLE_WRITE_SAME_DEF 0
+#define DRBD_DISABLE_WRITE_SAME_DEF 0U
-#define DRBD_ALLOW_TWO_PRIMARIES_DEF 0
-#define DRBD_ALWAYS_ASBP_DEF 0
-#define DRBD_USE_RLE_DEF 1
-#define DRBD_CSUMS_AFTER_CRASH_ONLY_DEF 0
+#define DRBD_ALLOW_TWO_PRIMARIES_DEF 0U
+#define DRBD_ALWAYS_ASBP_DEF 0U
+#define DRBD_USE_RLE_DEF 1U
+#define DRBD_CSUMS_AFTER_CRASH_ONLY_DEF 0U
-#define DRBD_AL_STRIPES_MIN 1
-#define DRBD_AL_STRIPES_MAX 1024
-#define DRBD_AL_STRIPES_DEF 1
+#define DRBD_AL_STRIPES_MIN 1U
+#define DRBD_AL_STRIPES_MAX 1024U
+#define DRBD_AL_STRIPES_DEF 1U
#define DRBD_AL_STRIPES_SCALE '1'
-#define DRBD_AL_STRIPE_SIZE_MIN 4
-#define DRBD_AL_STRIPE_SIZE_MAX 16777216
-#define DRBD_AL_STRIPE_SIZE_DEF 32
+#define DRBD_AL_STRIPE_SIZE_MIN 4U
+#define DRBD_AL_STRIPE_SIZE_MAX 16777216U
+#define DRBD_AL_STRIPE_SIZE_DEF 32U
#define DRBD_AL_STRIPE_SIZE_SCALE 'k' /* kilobytes */
-#define DRBD_SOCKET_CHECK_TIMEO_MIN 0
+#define DRBD_SOCKET_CHECK_TIMEO_MIN 0U
#define DRBD_SOCKET_CHECK_TIMEO_MAX DRBD_PING_TIMEO_MAX
-#define DRBD_SOCKET_CHECK_TIMEO_DEF 0
+#define DRBD_SOCKET_CHECK_TIMEO_DEF 0U
#define DRBD_SOCKET_CHECK_TIMEO_SCALE '1'
-#define DRBD_RS_DISCARD_GRANULARITY_MIN 0
-#define DRBD_RS_DISCARD_GRANULARITY_MAX (1<<20) /* 1MiByte */
-#define DRBD_RS_DISCARD_GRANULARITY_DEF 0 /* disabled by default */
+#define DRBD_RS_DISCARD_GRANULARITY_MIN 0U
+#define DRBD_RS_DISCARD_GRANULARITY_MAX (1U<<20) /* 1MiByte */
+#define DRBD_RS_DISCARD_GRANULARITY_DEF 0U /* disabled by default */
#define DRBD_RS_DISCARD_GRANULARITY_SCALE '1' /* bytes */
#endif
diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h
index b12b05f1c8b4..d13aabdeb4b2 100644
--- a/include/linux/dsa/8021q.h
+++ b/include/linux/dsa/8021q.h
@@ -5,143 +5,33 @@
#ifndef _NET_DSA_8021Q_H
#define _NET_DSA_8021Q_H
-#include <linux/refcount.h>
+#include <net/dsa.h>
#include <linux/types.h>
-struct dsa_switch;
-struct sk_buff;
-struct net_device;
-struct packet_type;
-struct dsa_8021q_context;
-
-struct dsa_8021q_crosschip_link {
- struct list_head list;
- int port;
- struct dsa_8021q_context *other_ctx;
- int other_port;
- refcount_t refcount;
-};
-
-struct dsa_8021q_ops {
- int (*vlan_add)(struct dsa_switch *ds, int port, u16 vid, u16 flags);
- int (*vlan_del)(struct dsa_switch *ds, int port, u16 vid);
-};
-
-struct dsa_8021q_context {
- const struct dsa_8021q_ops *ops;
- struct dsa_switch *ds;
- struct list_head crosschip_links;
- /* EtherType of RX VID, used for filtering on master interface */
- __be16 proto;
-};
-
-#define DSA_8021Q_N_SUBVLAN 8
-
-#if IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q)
-
-int dsa_8021q_setup(struct dsa_8021q_context *ctx, bool enabled);
+/* VBID is limited to three bits only and zero is reserved.
+ * Only 7 bridges can be enumerated.
+ */
+#define DSA_TAG_8021Q_MAX_NUM_BRIDGES 7
-int dsa_8021q_crosschip_bridge_join(struct dsa_8021q_context *ctx, int port,
- struct dsa_8021q_context *other_ctx,
- int other_port);
+int dsa_tag_8021q_register(struct dsa_switch *ds, __be16 proto);
-int dsa_8021q_crosschip_bridge_leave(struct dsa_8021q_context *ctx, int port,
- struct dsa_8021q_context *other_ctx,
- int other_port);
+void dsa_tag_8021q_unregister(struct dsa_switch *ds);
-struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
- u16 tpid, u16 tci);
+int dsa_tag_8021q_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge, bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack);
-u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port);
+void dsa_tag_8021q_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge);
-u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port);
+u16 dsa_tag_8021q_bridge_vid(unsigned int bridge_num);
-u16 dsa_8021q_rx_vid_subvlan(struct dsa_switch *ds, int port, u16 subvlan);
+u16 dsa_tag_8021q_standalone_vid(const struct dsa_port *dp);
int dsa_8021q_rx_switch_id(u16 vid);
int dsa_8021q_rx_source_port(u16 vid);
-u16 dsa_8021q_rx_subvlan(u16 vid);
-
-bool vid_is_dsa_8021q_rxvlan(u16 vid);
-
-bool vid_is_dsa_8021q_txvlan(u16 vid);
-
bool vid_is_dsa_8021q(u16 vid);
-#else
-
-int dsa_8021q_setup(struct dsa_8021q_context *ctx, bool enabled)
-{
- return 0;
-}
-
-int dsa_8021q_crosschip_bridge_join(struct dsa_8021q_context *ctx, int port,
- struct dsa_8021q_context *other_ctx,
- int other_port)
-{
- return 0;
-}
-
-int dsa_8021q_crosschip_bridge_leave(struct dsa_8021q_context *ctx, int port,
- struct dsa_8021q_context *other_ctx,
- int other_port)
-{
- return 0;
-}
-
-struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
- u16 tpid, u16 tci)
-{
- return NULL;
-}
-
-u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port)
-{
- return 0;
-}
-
-u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port)
-{
- return 0;
-}
-
-u16 dsa_8021q_rx_vid_subvlan(struct dsa_switch *ds, int port, u16 subvlan)
-{
- return 0;
-}
-
-int dsa_8021q_rx_switch_id(u16 vid)
-{
- return 0;
-}
-
-int dsa_8021q_rx_source_port(u16 vid)
-{
- return 0;
-}
-
-u16 dsa_8021q_rx_subvlan(u16 vid)
-{
- return 0;
-}
-
-bool vid_is_dsa_8021q_rxvlan(u16 vid)
-{
- return false;
-}
-
-bool vid_is_dsa_8021q_txvlan(u16 vid)
-{
- return false;
-}
-
-bool vid_is_dsa_8021q(u16 vid)
-{
- return false;
-}
-
-#endif /* IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q) */
-
#endif /* _NET_DSA_8021Q_H */
diff --git a/include/linux/dsa/ksz_common.h b/include/linux/dsa/ksz_common.h
new file mode 100644
index 000000000000..576a99ca698d
--- /dev/null
+++ b/include/linux/dsa/ksz_common.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Microchip switch tag common header
+ *
+ * Copyright (C) 2022 Microchip Technology Inc.
+ */
+
+#ifndef _NET_DSA_KSZ_COMMON_H_
+#define _NET_DSA_KSZ_COMMON_H_
+
+#include <net/dsa.h>
+
+/* All time stamps from the KSZ consist of 2 bits for seconds and 30 bits for
+ * nanoseconds. This is NOT the same as 32 bits for nanoseconds.
+ */
+#define KSZ_TSTAMP_SEC_MASK GENMASK(31, 30)
+#define KSZ_TSTAMP_NSEC_MASK GENMASK(29, 0)
+
+static inline ktime_t ksz_decode_tstamp(u32 tstamp)
+{
+ u64 ns = FIELD_GET(KSZ_TSTAMP_SEC_MASK, tstamp) * NSEC_PER_SEC +
+ FIELD_GET(KSZ_TSTAMP_NSEC_MASK, tstamp);
+
+ return ns_to_ktime(ns);
+}
+
+struct ksz_deferred_xmit_work {
+ struct dsa_port *dp;
+ struct sk_buff *skb;
+ struct kthread_work work;
+};
+
+struct ksz_tagger_data {
+ void (*xmit_work_fn)(struct kthread_work *work);
+ void (*hwtstamp_set_state)(struct dsa_switch *ds, bool on);
+};
+
+struct ksz_skb_cb {
+ struct sk_buff *clone;
+ unsigned int ptp_type;
+ bool update_correction;
+ u32 tstamp;
+};
+
+#define KSZ_SKB_CB(skb) \
+ ((struct ksz_skb_cb *)((skb)->cb))
+
+static inline struct ksz_tagger_data *
+ksz_tagger_data(struct dsa_switch *ds)
+{
+ return ds->tagger_data;
+}
+
+#endif /* _NET_DSA_KSZ_COMMON_H_ */
diff --git a/include/linux/dsa/lan9303.h b/include/linux/dsa/lan9303.h
index b4f22112ba75..3ce7cbcc37a3 100644
--- a/include/linux/dsa/lan9303.h
+++ b/include/linux/dsa/lan9303.h
@@ -5,8 +5,8 @@ struct lan9303;
struct lan9303_phy_ops {
/* PHY 1 and 2 access*/
- int (*phy_read)(struct lan9303 *chip, int port, int regnum);
- int (*phy_write)(struct lan9303 *chip, int port,
+ int (*phy_read)(struct lan9303 *chip, int addr, int regnum);
+ int (*phy_write)(struct lan9303 *chip, int addr,
int regnum, u16 val);
};
diff --git a/include/linux/dsa/loop.h b/include/linux/dsa/loop.h
index 5a3470bcc8a7..b8fef35591aa 100644
--- a/include/linux/dsa/loop.h
+++ b/include/linux/dsa/loop.h
@@ -2,6 +2,7 @@
#ifndef DSA_LOOP_H
#define DSA_LOOP_H
+#include <linux/if_vlan.h>
#include <linux/types.h>
#include <linux/ethtool.h>
#include <net/dsa.h>
diff --git a/include/linux/dsa/mv88e6xxx.h b/include/linux/dsa/mv88e6xxx.h
new file mode 100644
index 000000000000..8c3d45eca46b
--- /dev/null
+++ b/include/linux/dsa/mv88e6xxx.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright 2021 NXP
+ */
+
+#ifndef _NET_DSA_TAG_MV88E6XXX_H
+#define _NET_DSA_TAG_MV88E6XXX_H
+
+#include <linux/if_vlan.h>
+
+#define MV88E6XXX_VID_STANDALONE 0
+#define MV88E6XXX_VID_BRIDGED (VLAN_N_VID - 1)
+
+#endif
diff --git a/include/linux/dsa/ocelot.h b/include/linux/dsa/ocelot.h
index c6bc45ae5e03..620a3260fc08 100644
--- a/include/linux/dsa/ocelot.h
+++ b/include/linux/dsa/ocelot.h
@@ -1,11 +1,37 @@
/* SPDX-License-Identifier: GPL-2.0
- * Copyright 2019-2021 NXP Semiconductors
+ * Copyright 2019-2021 NXP
*/
#ifndef _NET_DSA_TAG_OCELOT_H
#define _NET_DSA_TAG_OCELOT_H
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/kthread.h>
#include <linux/packing.h>
+#include <linux/skbuff.h>
+#include <net/dsa.h>
+
+struct ocelot_skb_cb {
+ struct sk_buff *clone;
+ unsigned int ptp_class; /* valid only for clones */
+ unsigned long ptp_tx_time; /* valid only for clones */
+ u32 tstamp_lo;
+ u8 ptp_cmd;
+ u8 ts_id;
+};
+
+#define OCELOT_SKB_CB(skb) \
+ ((struct ocelot_skb_cb *)((skb)->cb))
+
+#define IFH_TAG_TYPE_C 0
+#define IFH_TAG_TYPE_S 1
+
+#define IFH_REW_OP_NOOP 0x0
+#define IFH_REW_OP_DSCP 0x1
+#define IFH_REW_OP_ONE_STEP_PTP 0x2
+#define IFH_REW_OP_TWO_STEP_PTP 0x3
+#define IFH_REW_OP_ORIGIN_PTP 0x5
#define OCELOT_TAG_LEN 16
#define OCELOT_SHORT_PREFIX_LEN 4
@@ -140,6 +166,24 @@
* +------+------+------+------+------+------+------+------+
*/
+struct felix_deferred_xmit_work {
+ struct dsa_port *dp;
+ struct sk_buff *skb;
+ struct kthread_work work;
+};
+
+struct ocelot_8021q_tagger_data {
+ void (*xmit_work_fn)(struct kthread_work *work);
+};
+
+static inline struct ocelot_8021q_tagger_data *
+ocelot_8021q_tagger_data(struct dsa_switch *ds)
+{
+ BUG_ON(ds->dst->tag_ops->proto != DSA_TAG_PROTO_OCELOT_8021Q);
+
+ return ds->tagger_data;
+}
+
static inline void ocelot_xfh_get_rew_val(void *extraction, u64 *rew_val)
{
packing(extraction, rew_val, 116, 85, OCELOT_TAG_LEN, UNPACK, 0);
@@ -210,9 +254,71 @@ static inline void ocelot_ifh_set_tag_type(void *injection, u64 tag_type)
packing(injection, &tag_type, 16, 16, OCELOT_TAG_LEN, PACK, 0);
}
-static inline void ocelot_ifh_set_vid(void *injection, u64 vid)
+static inline void ocelot_ifh_set_vlan_tci(void *injection, u64 vlan_tci)
+{
+ packing(injection, &vlan_tci, 15, 0, OCELOT_TAG_LEN, PACK, 0);
+}
+
+/* Determine the PTP REW_OP to use for injecting the given skb */
+static inline u32 ocelot_ptp_rew_op(struct sk_buff *skb)
+{
+ struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone;
+ u8 ptp_cmd = OCELOT_SKB_CB(skb)->ptp_cmd;
+ u32 rew_op = 0;
+
+ if (ptp_cmd == IFH_REW_OP_TWO_STEP_PTP && clone) {
+ rew_op = ptp_cmd;
+ rew_op |= OCELOT_SKB_CB(clone)->ts_id << 3;
+ } else if (ptp_cmd == IFH_REW_OP_ORIGIN_PTP) {
+ rew_op = ptp_cmd;
+ }
+
+ return rew_op;
+}
+
+/**
+ * ocelot_xmit_get_vlan_info: Determine VLAN_TCI and TAG_TYPE for injected frame
+ * @skb: Pointer to socket buffer
+ * @br: Pointer to bridge device that the port is under, if any
+ * @vlan_tci:
+ * @tag_type:
+ *
+ * If the port is under a VLAN-aware bridge, remove the VLAN header from the
+ * payload and move it into the DSA tag, which will make the switch classify
+ * the packet to the bridge VLAN. Otherwise, leave the classified VLAN at zero,
+ * which is the pvid of standalone ports (OCELOT_STANDALONE_PVID), although not
+ * of VLAN-unaware bridge ports (that would be ocelot_vlan_unaware_pvid()).
+ * Anyway, VID 0 is fine because it is stripped on egress for these port modes,
+ * and source address learning is not performed for packets injected from the
+ * CPU anyway, so it doesn't matter that the VID is "wrong".
+ */
+static inline void ocelot_xmit_get_vlan_info(struct sk_buff *skb,
+ struct net_device *br,
+ u64 *vlan_tci, u64 *tag_type)
{
- packing(injection, &vid, 11, 0, OCELOT_TAG_LEN, PACK, 0);
+ struct vlan_ethhdr *hdr;
+ u16 proto, tci;
+
+ if (!br || !br_vlan_enabled(br)) {
+ *vlan_tci = 0;
+ *tag_type = IFH_TAG_TYPE_C;
+ return;
+ }
+
+ hdr = (struct vlan_ethhdr *)skb_mac_header(skb);
+ br_vlan_get_proto(br, &proto);
+
+ if (ntohs(hdr->h_vlan_proto) == proto) {
+ vlan_remove_tag(skb, &tci);
+ *vlan_tci = tci;
+ } else {
+ rcu_read_lock();
+ br_vlan_get_pvid_rcu(br, &tci);
+ rcu_read_unlock();
+ *vlan_tci = tci;
+ }
+
+ *tag_type = (proto != ETH_P_8021Q) ? IFH_TAG_TYPE_S : IFH_TAG_TYPE_C;
}
#endif
diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h
index 1eb84562b311..b9dd35d4b8f5 100644
--- a/include/linux/dsa/sja1105.h
+++ b/include/linux/dsa/sja1105.h
@@ -14,6 +14,9 @@
#define ETH_P_SJA1105 ETH_P_DSA_8021Q
#define ETH_P_SJA1105_META 0x0008
+#define ETH_P_SJA1110 0xdadc
+
+#define SJA1105_DEFAULT_VLAN (VLAN_N_VID - 1)
/* IEEE 802.3 Annex 57A: Slow Protocols PDUs (01:80:C2:xx:xx:xx) */
#define SJA1105_LINKLOCAL_FILTER_A 0x0180C2000000ull
@@ -25,44 +28,48 @@
/* Source and Destination MAC of follow-up meta frames.
* Whereas the choice of SMAC only affects the unique identification of the
* switch as sender of meta frames, the DMAC must be an address that is present
- * in the DSA master port's multicast MAC filter.
+ * in the DSA conduit port's multicast MAC filter.
* 01-80-C2-00-00-0E is a good choice for this, as all profiles of IEEE 1588
* over L2 use this address for some purpose already.
*/
#define SJA1105_META_SMAC 0x222222222222ull
#define SJA1105_META_DMAC 0x0180C200000Eull
-#define SJA1105_HWTS_RX_EN 0
+enum sja1110_meta_tstamp {
+ SJA1110_META_TSTAMP_TX = 0,
+ SJA1110_META_TSTAMP_RX = 1,
+};
-/* Global tagger data: each struct sja1105_port has a reference to
- * the structure defined in struct sja1105_private.
- */
+struct sja1105_deferred_xmit_work {
+ struct dsa_port *dp;
+ struct sk_buff *skb;
+ struct kthread_work work;
+};
+
+/* Global tagger data */
struct sja1105_tagger_data {
- struct sk_buff *stampable_skb;
- /* Protects concurrent access to the meta state machine
- * from taggers running on multiple ports on SMP systems
- */
- spinlock_t meta_lock;
- unsigned long state;
+ void (*xmit_work_fn)(struct kthread_work *work);
+ void (*meta_tstamp_handler)(struct dsa_switch *ds, int port, u8 ts_id,
+ enum sja1110_meta_tstamp dir, u64 tstamp);
};
struct sja1105_skb_cb {
struct sk_buff *clone;
- u32 meta_tstamp;
+ u64 tstamp;
+ /* Only valid for packets cloned for 2-step TX timestamping */
+ u8 ts_id;
};
#define SJA1105_SKB_CB(skb) \
((struct sja1105_skb_cb *)((skb)->cb))
-struct sja1105_port {
- u16 subvlan_map[DSA_8021Q_N_SUBVLAN];
- struct kthread_worker *xmit_worker;
- struct kthread_work xmit_work;
- struct sk_buff_head xmit_queue;
- struct sja1105_tagger_data *data;
- struct dsa_port *dp;
- bool hwts_tx_en;
- u16 xmit_tpid;
-};
+static inline struct sja1105_tagger_data *
+sja1105_tagger_data(struct dsa_switch *ds)
+{
+ BUG_ON(ds->dst->tag_ops->proto != DSA_TAG_PROTO_SJA1105 &&
+ ds->dst->tag_ops->proto != DSA_TAG_PROTO_SJA1110);
+
+ return ds->tagger_data;
+}
#endif /* _NET_DSA_SJA1105_H */
diff --git a/include/linux/dsa/tag_qca.h b/include/linux/dsa/tag_qca.h
new file mode 100644
index 000000000000..ee657452f122
--- /dev/null
+++ b/include/linux/dsa/tag_qca.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __TAG_QCA_H
+#define __TAG_QCA_H
+
+#include <linux/types.h>
+
+struct dsa_switch;
+struct sk_buff;
+
+#define QCA_HDR_LEN 2
+#define QCA_HDR_VERSION 0x2
+
+#define QCA_HDR_RECV_VERSION GENMASK(15, 14)
+#define QCA_HDR_RECV_PRIORITY GENMASK(13, 11)
+#define QCA_HDR_RECV_TYPE GENMASK(10, 6)
+#define QCA_HDR_RECV_FRAME_IS_TAGGED BIT(3)
+#define QCA_HDR_RECV_SOURCE_PORT GENMASK(2, 0)
+
+/* Packet type for recv */
+#define QCA_HDR_RECV_TYPE_NORMAL 0x0
+#define QCA_HDR_RECV_TYPE_MIB 0x1
+#define QCA_HDR_RECV_TYPE_RW_REG_ACK 0x2
+
+#define QCA_HDR_XMIT_VERSION GENMASK(15, 14)
+#define QCA_HDR_XMIT_PRIORITY GENMASK(13, 11)
+#define QCA_HDR_XMIT_CONTROL GENMASK(10, 8)
+#define QCA_HDR_XMIT_FROM_CPU BIT(7)
+#define QCA_HDR_XMIT_DP_BIT GENMASK(6, 0)
+
+/* Packet type for xmit */
+#define QCA_HDR_XMIT_TYPE_NORMAL 0x0
+#define QCA_HDR_XMIT_TYPE_RW_REG 0x1
+
+/* Check code for a valid mgmt packet. Switch will ignore the packet
+ * with this wrong.
+ */
+#define QCA_HDR_MGMT_CHECK_CODE_VAL 0x5
+
+/* Specific define for in-band MDIO read/write with Ethernet packet */
+#define QCA_HDR_MGMT_SEQ_LEN 4 /* 4 byte for the seq */
+#define QCA_HDR_MGMT_COMMAND_LEN 4 /* 4 byte for the command */
+#define QCA_HDR_MGMT_DATA1_LEN 4 /* First 4 byte for the mdio data */
+#define QCA_HDR_MGMT_HEADER_LEN (QCA_HDR_MGMT_SEQ_LEN + \
+ QCA_HDR_MGMT_COMMAND_LEN + \
+ QCA_HDR_MGMT_DATA1_LEN)
+
+#define QCA_HDR_MGMT_DATA2_LEN 28 /* Other 28 byte for the mdio data */
+#define QCA_HDR_MGMT_PADDING_LEN 18 /* Padding to reach the min Ethernet packet */
+
+#define QCA_HDR_MGMT_PKT_LEN (QCA_HDR_MGMT_HEADER_LEN + \
+ QCA_HDR_LEN + \
+ QCA_HDR_MGMT_DATA2_LEN + \
+ QCA_HDR_MGMT_PADDING_LEN)
+
+#define QCA_HDR_MGMT_SEQ_NUM GENMASK(31, 0) /* 63, 32 */
+#define QCA_HDR_MGMT_CHECK_CODE GENMASK(31, 29) /* 31, 29 */
+#define QCA_HDR_MGMT_CMD BIT(28) /* 28 */
+#define QCA_HDR_MGMT_LENGTH GENMASK(23, 20) /* 23, 20 */
+#define QCA_HDR_MGMT_ADDR GENMASK(18, 0) /* 18, 0 */
+
+/* Special struct emulating a Ethernet header */
+struct qca_mgmt_ethhdr {
+ __le32 command; /* command bit 31:0 */
+ __le32 seq; /* seq 63:32 */
+ __le32 mdio_data; /* first 4byte mdio */
+ __be16 hdr; /* qca hdr */
+} __packed;
+
+enum mdio_cmd {
+ MDIO_WRITE = 0x0,
+ MDIO_READ
+};
+
+struct mib_ethhdr {
+ __le32 data[3]; /* first 3 mib counter */
+ __be16 hdr; /* qca hdr */
+} __packed;
+
+struct qca_tagger_data {
+ void (*rw_reg_ack_handler)(struct dsa_switch *ds,
+ struct sk_buff *skb);
+ void (*mib_autocast_handler)(struct dsa_switch *ds,
+ struct sk_buff *skb);
+};
+
+#endif /* __TAG_QCA_H */
diff --git a/include/linux/dtpm.h b/include/linux/dtpm.h
index e80a332e3d8a..a4a13514b730 100644
--- a/include/linux/dtpm.h
+++ b/include/linux/dtpm.h
@@ -23,55 +23,51 @@ struct dtpm {
u64 power_max;
u64 power_min;
int weight;
- void *private;
};
struct dtpm_ops {
u64 (*set_power_uw)(struct dtpm *, u64);
u64 (*get_power_uw)(struct dtpm *);
+ int (*update_power_uw)(struct dtpm *);
void (*release)(struct dtpm *);
};
-struct dtpm_descr;
+struct device_node;
-typedef int (*dtpm_init_t)(struct dtpm_descr *);
-
-struct dtpm_descr {
- struct dtpm *parent;
+struct dtpm_subsys_ops {
const char *name;
- dtpm_init_t init;
+ int (*init)(void);
+ void (*exit)(void);
+ int (*setup)(struct dtpm *, struct device_node *);
};
-/* Init section thermal table */
-extern struct dtpm_descr *__dtpm_table[];
-extern struct dtpm_descr *__dtpm_table_end[];
-
-#define DTPM_TABLE_ENTRY(name) \
- static typeof(name) *__dtpm_table_entry_##name \
- __used __section("__dtpm_table") = &name
-
-#define DTPM_DECLARE(name) DTPM_TABLE_ENTRY(name)
+enum DTPM_NODE_TYPE {
+ DTPM_NODE_VIRTUAL = 0,
+ DTPM_NODE_DT,
+};
-#define for_each_dtpm_table(__dtpm) \
- for (__dtpm = __dtpm_table; \
- __dtpm < __dtpm_table_end; \
- __dtpm++)
+struct dtpm_node {
+ enum DTPM_NODE_TYPE type;
+ const char *name;
+ struct dtpm_node *parent;
+};
static inline struct dtpm *to_dtpm(struct powercap_zone *zone)
{
return container_of(zone, struct dtpm, zone);
}
-int dtpm_update_power(struct dtpm *dtpm, u64 power_min, u64 power_max);
+int dtpm_update_power(struct dtpm *dtpm);
int dtpm_release_zone(struct powercap_zone *pcz);
-struct dtpm *dtpm_alloc(struct dtpm_ops *ops);
+void dtpm_init(struct dtpm *dtpm, struct dtpm_ops *ops);
void dtpm_unregister(struct dtpm *dtpm);
int dtpm_register(const char *name, struct dtpm *dtpm, struct dtpm *parent);
-int dtpm_register_cpu(struct dtpm *parent);
+int dtpm_create_hierarchy(struct of_device_id *dtpm_match_table);
+void dtpm_destroy_hierarchy(void);
#endif
diff --git a/include/linux/dw_apb_timer.h b/include/linux/dw_apb_timer.h
index 82ebf9223948..f8811c46b89e 100644
--- a/include/linux/dw_apb_timer.h
+++ b/include/linux/dw_apb_timer.h
@@ -34,9 +34,6 @@ struct dw_apb_clocksource {
};
void dw_apb_clockevent_register(struct dw_apb_clock_event_device *dw_ced);
-void dw_apb_clockevent_pause(struct dw_apb_clock_event_device *dw_ced);
-void dw_apb_clockevent_resume(struct dw_apb_clock_event_device *dw_ced);
-void dw_apb_clockevent_stop(struct dw_apb_clock_event_device *dw_ced);
struct dw_apb_clock_event_device *
dw_apb_clockevent_init(int cpu, const char *name, unsigned rating,
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index a57ee75342cf..05743900a116 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -6,6 +6,8 @@
#include <linux/jump_label.h>
#endif
+#include <linux/build_bug.h>
+
/*
* An instance of this structure is created in a special
* ELF section at every dynamic debug callsite. At runtime,
@@ -21,6 +23,9 @@ struct _ddebug {
const char *filename;
const char *format;
unsigned int lineno:18;
+#define CLS_BITS 6
+ unsigned int class_id:CLS_BITS;
+#define _DPRINTK_CLASS_DFLT ((1 << CLS_BITS) - 1)
/*
* The flags field controls the behaviour at the callsite.
* The bits here are changed dynamically when the user
@@ -32,6 +37,14 @@ struct _ddebug {
#define _DPRINTK_FLAGS_INCL_FUNCNAME (1<<2)
#define _DPRINTK_FLAGS_INCL_LINENO (1<<3)
#define _DPRINTK_FLAGS_INCL_TID (1<<4)
+#define _DPRINTK_FLAGS_INCL_SOURCENAME (1<<5)
+#define _DPRINTK_FLAGS_INCL_STACK (1<<6)
+
+#define _DPRINTK_FLAGS_INCL_ANY \
+ (_DPRINTK_FLAGS_INCL_MODNAME | _DPRINTK_FLAGS_INCL_FUNCNAME |\
+ _DPRINTK_FLAGS_INCL_LINENO | _DPRINTK_FLAGS_INCL_TID |\
+ _DPRINTK_FLAGS_INCL_SOURCENAME | _DPRINTK_FLAGS_INCL_STACK)
+
#if defined DEBUG
#define _DPRINTK_FLAGS_DEFAULT _DPRINTK_FLAGS_PRINT
#else
@@ -46,22 +59,88 @@ struct _ddebug {
#endif
} __attribute__((aligned(8)));
+enum class_map_type {
+ DD_CLASS_TYPE_DISJOINT_BITS,
+ /**
+ * DD_CLASS_TYPE_DISJOINT_BITS: classes are independent, one per bit.
+ * expecting hex input. Built for drm.debug, basis for other types.
+ */
+ DD_CLASS_TYPE_LEVEL_NUM,
+ /**
+ * DD_CLASS_TYPE_LEVEL_NUM: input is numeric level, 0-N.
+ * N turns on just bits N-1 .. 0, so N=0 turns all bits off.
+ */
+ DD_CLASS_TYPE_DISJOINT_NAMES,
+ /**
+ * DD_CLASS_TYPE_DISJOINT_NAMES: input is a CSV of [+-]CLASS_NAMES,
+ * classes are independent, like _DISJOINT_BITS.
+ */
+ DD_CLASS_TYPE_LEVEL_NAMES,
+ /**
+ * DD_CLASS_TYPE_LEVEL_NAMES: input is a CSV of [+-]CLASS_NAMES,
+ * intended for names like: INFO,DEBUG,TRACE, with a module prefix
+ * avoid EMERG,ALERT,CRIT,ERR,WARNING: they're not debug
+ */
+};
+
+struct ddebug_class_map {
+ struct list_head link;
+ struct module *mod;
+ const char *mod_name; /* needed for builtins */
+ const char **class_names;
+ const int length;
+ const int base; /* index of 1st .class_id, allows split/shared space */
+ enum class_map_type map_type;
+};
+
+/**
+ * DECLARE_DYNDBG_CLASSMAP - declare classnames known by a module
+ * @_var: a struct ddebug_class_map, passed to module_param_cb
+ * @_type: enum class_map_type, chooses bits/verbose, numeric/symbolic
+ * @_base: offset of 1st class-name. splits .class_id space
+ * @classes: class-names used to control class'd prdbgs
+ */
+#define DECLARE_DYNDBG_CLASSMAP(_var, _maptype, _base, ...) \
+ static const char *_var##_classnames[] = { __VA_ARGS__ }; \
+ static struct ddebug_class_map __aligned(8) __used \
+ __section("__dyndbg_classes") _var = { \
+ .mod = THIS_MODULE, \
+ .mod_name = KBUILD_MODNAME, \
+ .base = _base, \
+ .map_type = _maptype, \
+ .length = NUM_TYPE_ARGS(char*, __VA_ARGS__), \
+ .class_names = _var##_classnames, \
+ }
+#define NUM_TYPE_ARGS(eltype, ...) \
+ (sizeof((eltype[]){__VA_ARGS__}) / sizeof(eltype))
+
+/* encapsulate linker provided built-in (or module) dyndbg data */
+struct _ddebug_info {
+ struct _ddebug *descs;
+ struct ddebug_class_map *classes;
+ unsigned int num_descs;
+ unsigned int num_classes;
+};
+
+struct ddebug_class_param {
+ union {
+ unsigned long *bits;
+ unsigned int *lvl;
+ };
+ char flags[8];
+ const struct ddebug_class_map *map;
+};
+/*
+ * pr_debug() and friends are globally enabled or modules have selectively
+ * enabled them.
+ */
+#if defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
-#if defined(CONFIG_DYNAMIC_DEBUG_CORE)
-
-/* exported for module authors to exercise >control */
-int dynamic_debug_exec_queries(const char *query, const char *modname);
-
-int ddebug_add_module(struct _ddebug *tab, unsigned int n,
- const char *modname);
-extern int ddebug_remove_module(const char *mod_name);
extern __printf(2, 3)
void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...);
-extern int ddebug_dyndbg_module_param_cb(char *param, char *val,
- const char *modname);
-
struct device;
extern __printf(3, 4)
@@ -82,7 +161,13 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
const struct ib_device *ibdev,
const char *fmt, ...);
-#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
+#define __dynamic_dump_stack(desc) \
+{ \
+ if (desc.flags & _DPRINTK_FLAGS_INCL_STACK) \
+ dump_stack(); \
+}
+
+#define DEFINE_DYNAMIC_DEBUG_METADATA_CLS(name, cls, fmt) \
static struct _ddebug __aligned(8) \
__section("__dyndbg") name = { \
.modname = KBUILD_MODNAME, \
@@ -91,8 +176,14 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
.format = (fmt), \
.lineno = __LINE__, \
.flags = _DPRINTK_FLAGS_DEFAULT, \
+ .class_id = cls, \
_DPRINTK_KEY_INIT \
- }
+ }; \
+ BUILD_BUG_ON_MSG(cls > _DPRINTK_CLASS_DFLT, \
+ "classid value overflow")
+
+#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
+ DEFINE_DYNAMIC_DEBUG_METADATA_CLS(name, _DPRINTK_CLASS_DFLT, fmt)
#ifdef CONFIG_JUMP_LABEL
@@ -123,17 +214,38 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
#endif /* CONFIG_JUMP_LABEL */
-#define __dynamic_func_call(id, fmt, func, ...) do { \
- DEFINE_DYNAMIC_DEBUG_METADATA(id, fmt); \
- if (DYNAMIC_DEBUG_BRANCH(id)) \
- func(&id, ##__VA_ARGS__); \
+/*
+ * Factory macros: ($prefix)dynamic_func_call($suffix)
+ *
+ * Lower layer (with __ prefix) gets the callsite metadata, and wraps
+ * the func inside a debug-branch/static-key construct. Upper layer
+ * (with _ prefix) does the UNIQUE_ID once, so that lower can ref the
+ * name/label multiple times, and tie the elements together.
+ * Multiple flavors:
+ * (|_cls): adds in _DPRINT_CLASS_DFLT as needed
+ * (|_no_desc): former gets callsite descriptor as 1st arg (for prdbgs)
+ */
+#define __dynamic_func_call_cls(id, cls, fmt, func, ...) do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA_CLS(id, cls, fmt); \
+ if (DYNAMIC_DEBUG_BRANCH(id)) { \
+ func(&id, ##__VA_ARGS__); \
+ __dynamic_dump_stack(id); \
+ } \
} while (0)
-
-#define __dynamic_func_call_no_desc(id, fmt, func, ...) do { \
- DEFINE_DYNAMIC_DEBUG_METADATA(id, fmt); \
- if (DYNAMIC_DEBUG_BRANCH(id)) \
- func(__VA_ARGS__); \
+#define __dynamic_func_call(id, fmt, func, ...) \
+ __dynamic_func_call_cls(id, _DPRINTK_CLASS_DFLT, fmt, \
+ func, ##__VA_ARGS__)
+
+#define __dynamic_func_call_cls_no_desc(id, cls, fmt, func, ...) do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA_CLS(id, cls, fmt); \
+ if (DYNAMIC_DEBUG_BRANCH(id)) { \
+ func(__VA_ARGS__); \
+ __dynamic_dump_stack(id); \
+ } \
} while (0)
+#define __dynamic_func_call_no_desc(id, fmt, func, ...) \
+ __dynamic_func_call_cls_no_desc(id, _DPRINTK_CLASS_DFLT, \
+ fmt, func, ##__VA_ARGS__)
/*
* "Factory macro" for generating a call to func, guarded by a
@@ -143,22 +255,33 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
* the varargs. Note that fmt is repeated in invocations of this
* macro.
*/
+#define _dynamic_func_call_cls(cls, fmt, func, ...) \
+ __dynamic_func_call_cls(__UNIQUE_ID(ddebug), cls, fmt, func, ##__VA_ARGS__)
#define _dynamic_func_call(fmt, func, ...) \
- __dynamic_func_call(__UNIQUE_ID(ddebug), fmt, func, ##__VA_ARGS__)
+ _dynamic_func_call_cls(_DPRINTK_CLASS_DFLT, fmt, func, ##__VA_ARGS__)
+
/*
* A variant that does the same, except that the descriptor is not
* passed as the first argument to the function; it is only called
* with precisely the macro's varargs.
*/
-#define _dynamic_func_call_no_desc(fmt, func, ...) \
- __dynamic_func_call_no_desc(__UNIQUE_ID(ddebug), fmt, func, ##__VA_ARGS__)
+#define _dynamic_func_call_cls_no_desc(cls, fmt, func, ...) \
+ __dynamic_func_call_cls_no_desc(__UNIQUE_ID(ddebug), cls, fmt, \
+ func, ##__VA_ARGS__)
+#define _dynamic_func_call_no_desc(fmt, func, ...) \
+ _dynamic_func_call_cls_no_desc(_DPRINTK_CLASS_DFLT, fmt, \
+ func, ##__VA_ARGS__)
+
+#define dynamic_pr_debug_cls(cls, fmt, ...) \
+ _dynamic_func_call_cls(cls, fmt, __dynamic_pr_debug, \
+ pr_fmt(fmt), ##__VA_ARGS__)
#define dynamic_pr_debug(fmt, ...) \
- _dynamic_func_call(fmt, __dynamic_pr_debug, \
+ _dynamic_func_call(fmt, __dynamic_pr_debug, \
pr_fmt(fmt), ##__VA_ARGS__)
#define dynamic_dev_dbg(dev, fmt, ...) \
- _dynamic_func_call(fmt,__dynamic_dev_dbg, \
+ _dynamic_func_call(fmt, __dynamic_dev_dbg, \
dev, fmt, ##__VA_ARGS__)
#define dynamic_netdev_dbg(dev, fmt, ...) \
@@ -176,27 +299,50 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
KERN_DEBUG, prefix_str, prefix_type, \
rowsize, groupsize, buf, len, ascii)
-#else /* !CONFIG_DYNAMIC_DEBUG_CORE */
+/* for test only, generally expect drm.debug style macro wrappers */
+#define __pr_debug_cls(cls, fmt, ...) do { \
+ BUILD_BUG_ON_MSG(!__builtin_constant_p(cls), \
+ "expecting constant class int/enum"); \
+ dynamic_pr_debug_cls(cls, fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#else /* !(CONFIG_DYNAMIC_DEBUG || (CONFIG_DYNAMIC_DEBUG_CORE && DYNAMIC_DEBUG_MODULE)) */
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/printk.h>
-static inline int ddebug_add_module(struct _ddebug *tab, unsigned int n,
- const char *modname)
-{
- return 0;
-}
+#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt)
+#define DYNAMIC_DEBUG_BRANCH(descriptor) false
-static inline int ddebug_remove_module(const char *mod)
-{
- return 0;
-}
+#define dynamic_pr_debug(fmt, ...) \
+ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#define dynamic_dev_dbg(dev, fmt, ...) \
+ dev_no_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__)
+#define dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ do { if (0) \
+ print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, \
+ rowsize, groupsize, buf, len, ascii); \
+ } while (0)
+
+#endif /* CONFIG_DYNAMIC_DEBUG || (CONFIG_DYNAMIC_DEBUG_CORE && DYNAMIC_DEBUG_MODULE) */
+
+
+#ifdef CONFIG_DYNAMIC_DEBUG_CORE
+
+extern int ddebug_dyndbg_module_param_cb(char *param, char *val,
+ const char *modname);
+struct kernel_param;
+int param_set_dyndbg_classes(const char *instr, const struct kernel_param *kp);
+int param_get_dyndbg_classes(char *buffer, const struct kernel_param *kp);
+
+#else
static inline int ddebug_dyndbg_module_param_cb(char *param, char *val,
const char *modname)
{
- if (strstr(param, "dyndbg")) {
+ if (!strcmp(param, "dyndbg")) {
/* avoid pr_warn(), which wants pr_fmt() fully defined */
printk(KERN_WARNING "dyndbg param is supported only in "
"CONFIG_DYNAMIC_DEBUG builds\n");
@@ -205,23 +351,15 @@ static inline int ddebug_dyndbg_module_param_cb(char *param, char *val,
return -EINVAL;
}
-#define dynamic_pr_debug(fmt, ...) \
- do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0)
-#define dynamic_dev_dbg(dev, fmt, ...) \
- do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0)
-#define dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
- groupsize, buf, len, ascii) \
- do { if (0) \
- print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, \
- rowsize, groupsize, buf, len, ascii); \
- } while (0)
+struct kernel_param;
+static inline int param_set_dyndbg_classes(const char *instr, const struct kernel_param *kp)
+{ return 0; }
+static inline int param_get_dyndbg_classes(char *buffer, const struct kernel_param *kp)
+{ return 0; }
-static inline int dynamic_debug_exec_queries(const char *query, const char *modname)
-{
- pr_warn("kernel not built with CONFIG_DYNAMIC_DEBUG_CORE\n");
- return 0;
-}
+#endif
-#endif /* !CONFIG_DYNAMIC_DEBUG_CORE */
-#endif
+extern const struct kernel_param_ops param_ops_dyndbg_classes;
+
+#endif /* _DYNAMIC_DEBUG_H */
diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h
index 407c2f281b64..808b1a5102e7 100644
--- a/include/linux/dynamic_queue_limits.h
+++ b/include/linux/dynamic_queue_limits.h
@@ -38,14 +38,25 @@
#ifdef __KERNEL__
+#include <linux/bitops.h>
#include <asm/bug.h>
+#define DQL_HIST_LEN 4
+#define DQL_HIST_ENT(dql, idx) ((dql)->history[(idx) % DQL_HIST_LEN])
+
struct dql {
/* Fields accessed in enqueue path (dql_queued) */
unsigned int num_queued; /* Total ever queued */
unsigned int adj_limit; /* limit + num_completed */
unsigned int last_obj_cnt; /* Count at last queuing */
+ /* Stall threshold (in jiffies), defined by user */
+ unsigned short stall_thrs;
+
+ unsigned long history_head; /* top 58 bits of jiffies */
+ /* stall entries, a bit per entry */
+ unsigned long history[DQL_HIST_LEN];
+
/* Fields accessed only by completion path (dql_completed) */
unsigned int limit ____cacheline_aligned_in_smp; /* Current limit */
@@ -62,21 +73,61 @@ struct dql {
unsigned int max_limit; /* Max limit */
unsigned int min_limit; /* Minimum limit */
unsigned int slack_hold_time; /* Time to measure slack */
+
+ /* Longest stall detected, reported to user */
+ unsigned short stall_max;
+ unsigned long last_reap; /* Last reap (in jiffies) */
+ unsigned long stall_cnt; /* Number of stalls */
};
/* Set some static maximums */
#define DQL_MAX_OBJECT (UINT_MAX / 16)
#define DQL_MAX_LIMIT ((UINT_MAX / 2) - DQL_MAX_OBJECT)
+/* Populate the bitmap to be processed later in dql_check_stall() */
+static inline void dql_queue_stall(struct dql *dql)
+{
+ unsigned long map, now, now_hi, i;
+
+ now = jiffies;
+ now_hi = now / BITS_PER_LONG;
+
+ /* The following code set a bit in the ring buffer, where each
+ * bit trackes time the packet was queued. The dql->history buffer
+ * tracks DQL_HIST_LEN * BITS_PER_LONG time (jiffies) slot
+ */
+ if (unlikely(now_hi != dql->history_head)) {
+ /* About to reuse slots, clear them */
+ for (i = 0; i < DQL_HIST_LEN; i++) {
+ /* Multiplication masks high bits */
+ if (now_hi * BITS_PER_LONG ==
+ (dql->history_head + i) * BITS_PER_LONG)
+ break;
+ DQL_HIST_ENT(dql, dql->history_head + i + 1) = 0;
+ }
+ /* pairs with smp_rmb() in dql_check_stall() */
+ smp_wmb();
+ WRITE_ONCE(dql->history_head, now_hi);
+ }
+
+ /* __set_bit() does not guarantee WRITE_ONCE() semantics */
+ map = DQL_HIST_ENT(dql, now_hi);
+
+ /* Populate the history with an entry (bit) per queued */
+ if (!(map & BIT_MASK(now)))
+ WRITE_ONCE(DQL_HIST_ENT(dql, now_hi), map | BIT_MASK(now));
+}
+
/*
* Record number of objects queued. Assumes that caller has already checked
* availability in the queue with dql_avail.
*/
static inline void dql_queued(struct dql *dql, unsigned int count)
{
- BUG_ON(count > DQL_MAX_OBJECT);
+ if (WARN_ON_ONCE(count > DQL_MAX_OBJECT))
+ return;
- dql->last_obj_cnt = count;
+ WRITE_ONCE(dql->last_obj_cnt, count);
/* We want to force a write first, so that cpu do not attempt
* to get cache line containing last_obj_cnt, num_queued, adj_limit
@@ -86,6 +137,10 @@ static inline void dql_queued(struct dql *dql, unsigned int count)
barrier();
dql->num_queued += count;
+
+ /* Only populate stall information if the threshold is set */
+ if (READ_ONCE(dql->stall_thrs))
+ dql_queue_stall(dql);
}
/* Returns how many objects can be queued, < 0 indicates over limit. */
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 76d3562d3006..fa32f2aca22f 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -30,7 +30,7 @@ struct device;
extern int edac_op_state;
-struct bus_type *edac_get_sysfs_subsys(void);
+const struct bus_type *edac_get_sysfs_subsys(void);
static inline void opstate_init(void)
{
@@ -182,8 +182,12 @@ static inline char *mc_event_error_type(const unsigned int err_type)
* @MEM_LRDDR4: Load-Reduced DDR4 memory.
* @MEM_LPDDR4: Low-Power DDR4 memory.
* @MEM_DDR5: Unbuffered DDR5 RAM
+ * @MEM_RDDR5: Registered DDR5 RAM
+ * @MEM_LRDDR5: Load-Reduced DDR5 memory.
* @MEM_NVDIMM: Non-volatile RAM
* @MEM_WIO2: Wide I/O 2.
+ * @MEM_HBM2: High bandwidth Memory Gen 2.
+ * @MEM_HBM3: High bandwidth Memory Gen 3.
*/
enum mem_type {
MEM_EMPTY = 0,
@@ -210,8 +214,12 @@ enum mem_type {
MEM_LRDDR4,
MEM_LPDDR4,
MEM_DDR5,
+ MEM_RDDR5,
+ MEM_LRDDR5,
MEM_NVDIMM,
MEM_WIO2,
+ MEM_HBM2,
+ MEM_HBM3,
};
#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
@@ -225,20 +233,24 @@ enum mem_type {
#define MEM_FLAG_DDR BIT(MEM_DDR)
#define MEM_FLAG_RDDR BIT(MEM_RDDR)
#define MEM_FLAG_RMBS BIT(MEM_RMBS)
-#define MEM_FLAG_DDR2 BIT(MEM_DDR2)
-#define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2)
-#define MEM_FLAG_RDDR2 BIT(MEM_RDDR2)
-#define MEM_FLAG_XDR BIT(MEM_XDR)
-#define MEM_FLAG_DDR3 BIT(MEM_DDR3)
-#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3)
-#define MEM_FLAG_LPDDR3 BIT(MEM_LPDDR3)
-#define MEM_FLAG_DDR4 BIT(MEM_DDR4)
-#define MEM_FLAG_RDDR4 BIT(MEM_RDDR4)
-#define MEM_FLAG_LRDDR4 BIT(MEM_LRDDR4)
-#define MEM_FLAG_LPDDR4 BIT(MEM_LPDDR4)
-#define MEM_FLAG_DDR5 BIT(MEM_DDR5)
-#define MEM_FLAG_NVDIMM BIT(MEM_NVDIMM)
+#define MEM_FLAG_DDR2 BIT(MEM_DDR2)
+#define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2)
+#define MEM_FLAG_RDDR2 BIT(MEM_RDDR2)
+#define MEM_FLAG_XDR BIT(MEM_XDR)
+#define MEM_FLAG_DDR3 BIT(MEM_DDR3)
+#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3)
+#define MEM_FLAG_LPDDR3 BIT(MEM_LPDDR3)
+#define MEM_FLAG_DDR4 BIT(MEM_DDR4)
+#define MEM_FLAG_RDDR4 BIT(MEM_RDDR4)
+#define MEM_FLAG_LRDDR4 BIT(MEM_LRDDR4)
+#define MEM_FLAG_LPDDR4 BIT(MEM_LPDDR4)
+#define MEM_FLAG_DDR5 BIT(MEM_DDR5)
+#define MEM_FLAG_RDDR5 BIT(MEM_RDDR5)
+#define MEM_FLAG_LRDDR5 BIT(MEM_LRDDR5)
+#define MEM_FLAG_NVDIMM BIT(MEM_NVDIMM)
#define MEM_FLAG_WIO2 BIT(MEM_WIO2)
+#define MEM_FLAG_HBM2 BIT(MEM_HBM2)
+#define MEM_FLAG_HBM3 BIT(MEM_HBM3)
/**
* enum edac_type - Error Detection and Correction capabilities and mode
@@ -483,7 +495,7 @@ struct edac_raw_error_desc {
*/
struct mem_ctl_info {
struct device dev;
- struct bus_type *bus;
+ const struct bus_type *bus;
struct list_head link; /* for global list of mem_ctl_info structs */
@@ -649,4 +661,226 @@ static inline struct dimm_info *edac_get_dimm(struct mem_ctl_info *mci,
return mci->dimms[index];
}
+
+#define EDAC_FEAT_NAME_LEN 128
+
+/* RAS feature type */
+enum edac_dev_feat {
+ RAS_FEAT_SCRUB,
+ RAS_FEAT_ECS,
+ RAS_FEAT_MEM_REPAIR,
+ RAS_FEAT_MAX
+};
+
+/**
+ * struct edac_scrub_ops - scrub device operations (all elements optional)
+ * @read_addr: read base address of scrubbing range.
+ * @read_size: read offset of scrubbing range.
+ * @write_addr: set base address of the scrubbing range.
+ * @write_size: set offset of the scrubbing range.
+ * @get_enabled_bg: check if currently performing background scrub.
+ * @set_enabled_bg: start or stop a bg-scrub.
+ * @get_min_cycle: get minimum supported scrub cycle duration in seconds.
+ * @get_max_cycle: get maximum supported scrub cycle duration in seconds.
+ * @get_cycle_duration: get current scrub cycle duration in seconds.
+ * @set_cycle_duration: set current scrub cycle duration in seconds.
+ */
+struct edac_scrub_ops {
+ int (*read_addr)(struct device *dev, void *drv_data, u64 *base);
+ int (*read_size)(struct device *dev, void *drv_data, u64 *size);
+ int (*write_addr)(struct device *dev, void *drv_data, u64 base);
+ int (*write_size)(struct device *dev, void *drv_data, u64 size);
+ int (*get_enabled_bg)(struct device *dev, void *drv_data, bool *enable);
+ int (*set_enabled_bg)(struct device *dev, void *drv_data, bool enable);
+ int (*get_min_cycle)(struct device *dev, void *drv_data, u32 *min);
+ int (*get_max_cycle)(struct device *dev, void *drv_data, u32 *max);
+ int (*get_cycle_duration)(struct device *dev, void *drv_data, u32 *cycle);
+ int (*set_cycle_duration)(struct device *dev, void *drv_data, u32 cycle);
+};
+
+#if IS_ENABLED(CONFIG_EDAC_SCRUB)
+int edac_scrub_get_desc(struct device *scrub_dev,
+ const struct attribute_group **attr_groups,
+ u8 instance);
+#else
+static inline int edac_scrub_get_desc(struct device *scrub_dev,
+ const struct attribute_group **attr_groups,
+ u8 instance)
+{ return -EOPNOTSUPP; }
+#endif /* CONFIG_EDAC_SCRUB */
+
+/**
+ * struct edac_ecs_ops - ECS device operations (all elements optional)
+ * @get_log_entry_type: read the log entry type value.
+ * @set_log_entry_type: set the log entry type value.
+ * @get_mode: read the mode value.
+ * @set_mode: set the mode value.
+ * @reset: reset the ECS counter.
+ * @get_threshold: read the threshold count per gigabits of memory cells.
+ * @set_threshold: set the threshold count per gigabits of memory cells.
+ */
+struct edac_ecs_ops {
+ int (*get_log_entry_type)(struct device *dev, void *drv_data, int fru_id, u32 *val);
+ int (*set_log_entry_type)(struct device *dev, void *drv_data, int fru_id, u32 val);
+ int (*get_mode)(struct device *dev, void *drv_data, int fru_id, u32 *val);
+ int (*set_mode)(struct device *dev, void *drv_data, int fru_id, u32 val);
+ int (*reset)(struct device *dev, void *drv_data, int fru_id, u32 val);
+ int (*get_threshold)(struct device *dev, void *drv_data, int fru_id, u32 *threshold);
+ int (*set_threshold)(struct device *dev, void *drv_data, int fru_id, u32 threshold);
+};
+
+struct edac_ecs_ex_info {
+ u16 num_media_frus;
+};
+
+#if IS_ENABLED(CONFIG_EDAC_ECS)
+int edac_ecs_get_desc(struct device *ecs_dev,
+ const struct attribute_group **attr_groups,
+ u16 num_media_frus);
+#else
+static inline int edac_ecs_get_desc(struct device *ecs_dev,
+ const struct attribute_group **attr_groups,
+ u16 num_media_frus)
+{ return -EOPNOTSUPP; }
+#endif /* CONFIG_EDAC_ECS */
+
+enum edac_mem_repair_type {
+ EDAC_REPAIR_PPR,
+ EDAC_REPAIR_CACHELINE_SPARING,
+ EDAC_REPAIR_ROW_SPARING,
+ EDAC_REPAIR_BANK_SPARING,
+ EDAC_REPAIR_RANK_SPARING,
+ EDAC_REPAIR_MAX
+};
+
+extern const char * const edac_repair_type[];
+
+enum edac_mem_repair_cmd {
+ EDAC_DO_MEM_REPAIR = 1,
+};
+
+/**
+ * struct edac_mem_repair_ops - memory repair operations
+ * (all elements are optional except do_repair, set_hpa/set_dpa)
+ * @get_repair_type: get the memory repair type, listed in
+ * enum edac_mem_repair_function.
+ * @get_persist_mode: get the current persist mode.
+ * false - Soft repair type (temporary repair).
+ * true - Hard memory repair type (permanent repair).
+ * @set_persist_mode: set the persist mode of the memory repair instance.
+ * @get_repair_safe_when_in_use: get whether memory media is accessible and
+ * data is retained during repair operation.
+ * @get_hpa: get current host physical address (HPA) of memory to repair.
+ * @set_hpa: set host physical address (HPA) of memory to repair.
+ * @get_min_hpa: get the minimum supported host physical address (HPA).
+ * @get_max_hpa: get the maximum supported host physical address (HPA).
+ * @get_dpa: get current device physical address (DPA) of memory to repair.
+ * @set_dpa: set device physical address (DPA) of memory to repair.
+ * In some states of system configuration (e.g. before address decoders
+ * have been configured), memory devices (e.g. CXL) may not have an active
+ * mapping in the host physical address map. As such, the memory
+ * to repair must be identified by a device specific physical addressing
+ * scheme using a device physical address(DPA). The DPA and other control
+ * attributes to use for the repair operations will be presented in related
+ * error records.
+ * @get_min_dpa: get the minimum supported device physical address (DPA).
+ * @get_max_dpa: get the maximum supported device physical address (DPA).
+ * @get_nibble_mask: get current nibble mask of memory to repair.
+ * @set_nibble_mask: set nibble mask of memory to repair.
+ * @get_bank_group: get current bank group of memory to repair.
+ * @set_bank_group: set bank group of memory to repair.
+ * @get_bank: get current bank of memory to repair.
+ * @set_bank: set bank of memory to repair.
+ * @get_rank: get current rank of memory to repair.
+ * @set_rank: set rank of memory to repair.
+ * @get_row: get current row of memory to repair.
+ * @set_row: set row of memory to repair.
+ * @get_column: get current column of memory to repair.
+ * @set_column: set column of memory to repair.
+ * @get_channel: get current channel of memory to repair.
+ * @set_channel: set channel of memory to repair.
+ * @get_sub_channel: get current subchannel of memory to repair.
+ * @set_sub_channel: set subchannel of memory to repair.
+ * @do_repair: Issue memory repair operation for the HPA/DPA and
+ * other control attributes set for the memory to repair.
+ *
+ * All elements are optional except do_repair and at least one of set_hpa/set_dpa.
+ */
+struct edac_mem_repair_ops {
+ int (*get_repair_type)(struct device *dev, void *drv_data, const char **type);
+ int (*get_persist_mode)(struct device *dev, void *drv_data, bool *persist);
+ int (*set_persist_mode)(struct device *dev, void *drv_data, bool persist);
+ int (*get_repair_safe_when_in_use)(struct device *dev, void *drv_data, bool *safe);
+ int (*get_hpa)(struct device *dev, void *drv_data, u64 *hpa);
+ int (*set_hpa)(struct device *dev, void *drv_data, u64 hpa);
+ int (*get_min_hpa)(struct device *dev, void *drv_data, u64 *hpa);
+ int (*get_max_hpa)(struct device *dev, void *drv_data, u64 *hpa);
+ int (*get_dpa)(struct device *dev, void *drv_data, u64 *dpa);
+ int (*set_dpa)(struct device *dev, void *drv_data, u64 dpa);
+ int (*get_min_dpa)(struct device *dev, void *drv_data, u64 *dpa);
+ int (*get_max_dpa)(struct device *dev, void *drv_data, u64 *dpa);
+ int (*get_nibble_mask)(struct device *dev, void *drv_data, u32 *val);
+ int (*set_nibble_mask)(struct device *dev, void *drv_data, u32 val);
+ int (*get_bank_group)(struct device *dev, void *drv_data, u32 *val);
+ int (*set_bank_group)(struct device *dev, void *drv_data, u32 val);
+ int (*get_bank)(struct device *dev, void *drv_data, u32 *val);
+ int (*set_bank)(struct device *dev, void *drv_data, u32 val);
+ int (*get_rank)(struct device *dev, void *drv_data, u32 *val);
+ int (*set_rank)(struct device *dev, void *drv_data, u32 val);
+ int (*get_row)(struct device *dev, void *drv_data, u32 *val);
+ int (*set_row)(struct device *dev, void *drv_data, u32 val);
+ int (*get_column)(struct device *dev, void *drv_data, u32 *val);
+ int (*set_column)(struct device *dev, void *drv_data, u32 val);
+ int (*get_channel)(struct device *dev, void *drv_data, u32 *val);
+ int (*set_channel)(struct device *dev, void *drv_data, u32 val);
+ int (*get_sub_channel)(struct device *dev, void *drv_data, u32 *val);
+ int (*set_sub_channel)(struct device *dev, void *drv_data, u32 val);
+ int (*do_repair)(struct device *dev, void *drv_data, u32 val);
+};
+
+#if IS_ENABLED(CONFIG_EDAC_MEM_REPAIR)
+int edac_mem_repair_get_desc(struct device *dev,
+ const struct attribute_group **attr_groups,
+ u8 instance);
+#else
+static inline int edac_mem_repair_get_desc(struct device *dev,
+ const struct attribute_group **attr_groups,
+ u8 instance)
+{ return -EOPNOTSUPP; }
+#endif /* CONFIG_EDAC_MEM_REPAIR */
+
+/* EDAC device feature information structure */
+struct edac_dev_data {
+ union {
+ const struct edac_scrub_ops *scrub_ops;
+ const struct edac_ecs_ops *ecs_ops;
+ const struct edac_mem_repair_ops *mem_repair_ops;
+ };
+ u8 instance;
+ void *private;
+};
+
+struct edac_dev_feat_ctx {
+ struct device dev;
+ void *private;
+ struct edac_dev_data *scrub;
+ struct edac_dev_data ecs;
+ struct edac_dev_data *mem_repair;
+};
+
+struct edac_dev_feature {
+ enum edac_dev_feat ft_type;
+ u8 instance;
+ union {
+ const struct edac_scrub_ops *scrub_ops;
+ const struct edac_ecs_ops *ecs_ops;
+ const struct edac_mem_repair_ops *mem_repair_ops;
+ };
+ void *ctx;
+ struct edac_ecs_ex_info ecs_info;
+};
+
+int edac_dev_register(struct device *parent, char *dev_name,
+ void *parent_pvt_data, int num_features,
+ const struct edac_dev_feature *ras_features);
#endif /* _LINUX_EDAC_H_ */
diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h
index c860c72a921d..3a485cc0e0fa 100644
--- a/include/linux/eeprom_93cx6.h
+++ b/include/linux/eeprom_93cx6.h
@@ -11,6 +11,8 @@
Supported chipsets: 93c46, 93c56 and 93c66.
*/
+#include <linux/bits.h>
+
/*
* EEPROM operation defines.
*/
@@ -34,6 +36,7 @@
* @register_write(struct eeprom_93cx6 *eeprom): handler to
* write to the eeprom register by using all reg_* fields.
* @width: eeprom width, should be one of the PCI_EEPROM_WIDTH_* defines
+ * @quirks: eeprom or controller quirks
* @drive_data: Set if we're driving the data line.
* @reg_data_in: register field to indicate data input
* @reg_data_out: register field to indicate data output
@@ -50,6 +53,9 @@ struct eeprom_93cx6 {
void (*register_write)(struct eeprom_93cx6 *eeprom);
int width;
+ unsigned int quirks;
+/* Some EEPROMs require an extra clock cycle before reading */
+#define PCI_EEPROM_QUIRK_EXTRA_READ_CYCLE BIT(0)
char drive_data;
char reg_data_in;
@@ -71,3 +77,8 @@ extern void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable);
extern void eeprom_93cx6_write(struct eeprom_93cx6 *eeprom,
u8 addr, u16 data);
+
+static inline bool has_quirk_extra_read_cycle(struct eeprom_93cx6 *eeprom)
+{
+ return eeprom->quirks & PCI_EEPROM_QUIRK_EXTRA_READ_CYCLE;
+}
diff --git a/include/linux/eeprom_93xx46.h b/include/linux/eeprom_93xx46.h
deleted file mode 100644
index 99580c22f91a..000000000000
--- a/include/linux/eeprom_93xx46.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Module: eeprom_93xx46
- * platform description for 93xx46 EEPROMs.
- */
-#include <linux/gpio/consumer.h>
-
-struct eeprom_93xx46_platform_data {
- unsigned char flags;
-#define EE_ADDR8 0x01 /* 8 bit addr. cfg */
-#define EE_ADDR16 0x02 /* 16 bit addr. cfg */
-#define EE_READONLY 0x08 /* forbid writing */
-
- unsigned int quirks;
-/* Single word read transfers only; no sequential read. */
-#define EEPROM_93XX46_QUIRK_SINGLE_WORD_READ (1 << 0)
-/* Instructions such as EWEN are (addrlen + 2) in length. */
-#define EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH (1 << 1)
-/* Add extra cycle after address during a read */
-#define EEPROM_93XX46_QUIRK_EXTRA_READ_CYCLE BIT(2)
-
- /*
- * optional hooks to control additional logic
- * before and after spi transfer.
- */
- void (*prepare)(void *);
- void (*finish)(void *);
- struct gpio_desc *select;
-};
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 6b5d36babfcc..2a43094e23f7 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -24,10 +24,11 @@
#include <linux/range.h>
#include <linux/reboot.h>
#include <linux/uuid.h>
-#include <linux/screen_info.h>
#include <asm/page.h>
+struct screen_info;
+
#define EFI_SUCCESS 0
#define EFI_LOAD_ERROR ( 1 | (1UL << (BITS_PER_LONG-1)))
#define EFI_INVALID_PARAMETER ( 2 | (1UL << (BITS_PER_LONG-1)))
@@ -39,6 +40,7 @@
#define EFI_WRITE_PROTECTED ( 8 | (1UL << (BITS_PER_LONG-1)))
#define EFI_OUT_OF_RESOURCES ( 9 | (1UL << (BITS_PER_LONG-1)))
#define EFI_NOT_FOUND (14 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_ACCESS_DENIED (15 | (1UL << (BITS_PER_LONG-1)))
#define EFI_TIMEOUT (18 | (1UL << (BITS_PER_LONG-1)))
#define EFI_ABORTED (21 | (1UL << (BITS_PER_LONG-1)))
#define EFI_SECURITY_VIOLATION (26 | (1UL << (BITS_PER_LONG-1)))
@@ -72,10 +74,10 @@ typedef void *efi_handle_t;
*/
typedef guid_t efi_guid_t __aligned(__alignof__(u32));
-#define EFI_GUID(a, b, c, d...) (efi_guid_t){ { \
+#define EFI_GUID(a, b, c, d...) ((efi_guid_t){ { \
(a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
(b) & 0xff, ((b) >> 8) & 0xff, \
- (c) & 0xff, ((c) >> 8) & 0xff, d } }
+ (c) & 0xff, ((c) >> 8) & 0xff, d } })
/*
* Generic EFI table header
@@ -108,24 +110,26 @@ typedef struct {
#define EFI_MEMORY_MAPPED_IO_PORT_SPACE 12
#define EFI_PAL_CODE 13
#define EFI_PERSISTENT_MEMORY 14
-#define EFI_MAX_MEMORY_TYPE 15
+#define EFI_UNACCEPTED_MEMORY 15
+#define EFI_MAX_MEMORY_TYPE 16
/* Attribute values: */
-#define EFI_MEMORY_UC ((u64)0x0000000000000001ULL) /* uncached */
-#define EFI_MEMORY_WC ((u64)0x0000000000000002ULL) /* write-coalescing */
-#define EFI_MEMORY_WT ((u64)0x0000000000000004ULL) /* write-through */
-#define EFI_MEMORY_WB ((u64)0x0000000000000008ULL) /* write-back */
-#define EFI_MEMORY_UCE ((u64)0x0000000000000010ULL) /* uncached, exported */
-#define EFI_MEMORY_WP ((u64)0x0000000000001000ULL) /* write-protect */
-#define EFI_MEMORY_RP ((u64)0x0000000000002000ULL) /* read-protect */
-#define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */
-#define EFI_MEMORY_NV ((u64)0x0000000000008000ULL) /* non-volatile */
-#define EFI_MEMORY_MORE_RELIABLE \
- ((u64)0x0000000000010000ULL) /* higher reliability */
-#define EFI_MEMORY_RO ((u64)0x0000000000020000ULL) /* read-only */
-#define EFI_MEMORY_SP ((u64)0x0000000000040000ULL) /* soft reserved */
-#define EFI_MEMORY_CPU_CRYPTO ((u64)0x0000000000080000ULL) /* supports encryption */
-#define EFI_MEMORY_RUNTIME ((u64)0x8000000000000000ULL) /* range requires runtime mapping */
+#define EFI_MEMORY_UC BIT_ULL(0) /* uncached */
+#define EFI_MEMORY_WC BIT_ULL(1) /* write-coalescing */
+#define EFI_MEMORY_WT BIT_ULL(2) /* write-through */
+#define EFI_MEMORY_WB BIT_ULL(3) /* write-back */
+#define EFI_MEMORY_UCE BIT_ULL(4) /* uncached, exported */
+#define EFI_MEMORY_WP BIT_ULL(12) /* write-protect */
+#define EFI_MEMORY_RP BIT_ULL(13) /* read-protect */
+#define EFI_MEMORY_XP BIT_ULL(14) /* execute-protect */
+#define EFI_MEMORY_NV BIT_ULL(15) /* non-volatile */
+#define EFI_MEMORY_MORE_RELIABLE BIT_ULL(16) /* higher reliability */
+#define EFI_MEMORY_RO BIT_ULL(17) /* read-only */
+#define EFI_MEMORY_SP BIT_ULL(18) /* soft reserved */
+#define EFI_MEMORY_CPU_CRYPTO BIT_ULL(19) /* supports encryption */
+#define EFI_MEMORY_HOT_PLUGGABLE BIT_ULL(20) /* supports unplugging at runtime */
+#define EFI_MEMORY_RUNTIME BIT_ULL(63) /* range requires runtime mapping */
+
#define EFI_MEMORY_DESCRIPTOR_VERSION 1
#define EFI_PAGE_SHIFT 12
@@ -148,6 +152,52 @@ typedef struct {
u32 imagesize;
} efi_capsule_header_t;
+/* EFI_FIRMWARE_MANAGEMENT_CAPSULE_HEADER */
+struct efi_manage_capsule_header {
+ u32 ver;
+ u16 emb_drv_cnt;
+ u16 payload_cnt;
+ /*
+ * Variable-size array of the size given by the sum of
+ * emb_drv_cnt and payload_cnt.
+ */
+ u64 offset_list[];
+} __packed;
+
+/* EFI_FIRMWARE_MANAGEMENT_CAPSULE_IMAGE_HEADER */
+struct efi_manage_capsule_image_header {
+ u32 ver;
+ efi_guid_t image_type_id;
+ u8 image_index;
+ u8 reserved_bytes[3];
+ u32 image_size;
+ u32 vendor_code_size;
+ /* hw_ins was introduced in version 2 */
+ u64 hw_ins;
+ /* capsule_support was introduced in version 3 */
+ u64 capsule_support;
+} __packed;
+
+/* WIN_CERTIFICATE */
+struct win_cert {
+ u32 len;
+ u16 rev;
+ u16 cert_type;
+};
+
+/* WIN_CERTIFICATE_UEFI_GUID */
+struct win_cert_uefi_guid {
+ struct win_cert hdr;
+ efi_guid_t cert_type;
+ u8 cert_data[];
+};
+
+/* EFI_FIRMWARE_IMAGE_AUTHENTICATION */
+struct efi_image_auth {
+ u64 mon_count;
+ struct win_cert_uefi_guid auth_info;
+};
+
/*
* EFI capsule flags
*/
@@ -167,6 +217,8 @@ struct capsule_info {
size_t page_bytes_remain;
};
+int efi_capsule_setup_info(struct capsule_info *cap_info, void *kbuff,
+ size_t hdr_bytes);
int __efi_capsule_setup_info(struct capsule_info *cap_info);
/*
@@ -238,7 +290,7 @@ typedef efi_status_t efi_get_variable_t (efi_char16_t *name, efi_guid_t *vendor,
unsigned long *data_size, void *data);
typedef efi_status_t efi_get_next_variable_t (unsigned long *name_size, efi_char16_t *name,
efi_guid_t *vendor);
-typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor,
+typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor,
u32 attr, unsigned long data_size,
void *data);
typedef efi_status_t efi_get_next_high_mono_count_t (u32 *count);
@@ -308,35 +360,40 @@ void efi_native_runtime_setup(void);
* where the UEFI SPEC breaks the line.
*/
#define NULL_GUID EFI_GUID(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00)
-#define MPS_TABLE_GUID EFI_GUID(0xeb9d2d2f, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define ACPI_TABLE_GUID EFI_GUID(0xeb9d2d30, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define ACPI_20_TABLE_GUID EFI_GUID(0x8868e871, 0xe4f1, 0x11d3, 0xbc, 0x22, 0x00, 0x80, 0xc7, 0x3c, 0x88, 0x81)
#define SMBIOS_TABLE_GUID EFI_GUID(0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define SMBIOS3_TABLE_GUID EFI_GUID(0xf2fd1544, 0x9794, 0x4a2c, 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94)
-#define SAL_SYSTEM_TABLE_GUID EFI_GUID(0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
-#define HCDP_TABLE_GUID EFI_GUID(0xf951938d, 0x620b, 0x42ef, 0x82, 0x79, 0xa8, 0x4b, 0x79, 0x61, 0x78, 0x98)
-#define UGA_IO_PROTOCOL_GUID EFI_GUID(0x61a4d49e, 0x6f68, 0x4f1b, 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0x0b, 0x07, 0xa2)
#define EFI_GLOBAL_VARIABLE_GUID EFI_GUID(0x8be4df61, 0x93ca, 0x11d2, 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c)
#define UV_SYSTEM_TABLE_GUID EFI_GUID(0x3b13a7d4, 0x633e, 0x11dd, 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93)
#define LINUX_EFI_CRASH_GUID EFI_GUID(0xcfc8fc79, 0xbe2e, 0x4ddc, 0x97, 0xf0, 0x9f, 0x98, 0xbf, 0xe2, 0x98, 0xa0)
#define LOADED_IMAGE_PROTOCOL_GUID EFI_GUID(0x5b1b31a1, 0x9562, 0x11d2, 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
+#define LOADED_IMAGE_DEVICE_PATH_PROTOCOL_GUID EFI_GUID(0xbc62157e, 0x3e33, 0x4fec, 0x99, 0x20, 0x2d, 0x3b, 0x36, 0xd7, 0x50, 0xdf)
+#define EFI_DEVICE_PATH_PROTOCOL_GUID EFI_GUID(0x09576e91, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
+#define EFI_DEVICE_PATH_TO_TEXT_PROTOCOL_GUID EFI_GUID(0x8b843e20, 0x8132, 0x4852, 0x90, 0xcc, 0x55, 0x1a, 0x4e, 0x4a, 0x7f, 0x1c)
+#define EFI_DEVICE_PATH_FROM_TEXT_PROTOCOL_GUID EFI_GUID(0x05c99a21, 0xc70f, 0x4ad2, 0x8a, 0x5f, 0x35, 0xdf, 0x33, 0x43, 0xf5, 0x1e)
#define EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID EFI_GUID(0x9042a9de, 0x23dc, 0x4a38, 0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a)
-#define EFI_UGA_PROTOCOL_GUID EFI_GUID(0x982c298b, 0xf4fa, 0x41cb, 0xb8, 0x38, 0x77, 0xaa, 0x68, 0x8f, 0xb8, 0x39)
+#define EFI_EDID_DISCOVERED_PROTOCOL_GUID EFI_GUID(0x1c0c34f6, 0xd380, 0x41fa, 0xa0, 0x49, 0x8a, 0xd0, 0x6c, 0x1a, 0x66, 0xaa)
+#define EFI_EDID_ACTIVE_PROTOCOL_GUID EFI_GUID(0xbd8c1056, 0x9f36, 0x44ec, 0x92, 0xa8, 0xa6, 0x33, 0x7f, 0x81, 0x79, 0x86)
#define EFI_PCI_IO_PROTOCOL_GUID EFI_GUID(0x4cf5b200, 0x68b8, 0x4ca5, 0x9e, 0xec, 0xb2, 0x3e, 0x3f, 0x50, 0x02, 0x9a)
#define EFI_FILE_INFO_ID EFI_GUID(0x09576e92, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
#define EFI_SYSTEM_RESOURCE_TABLE_GUID EFI_GUID(0xb122a263, 0x3661, 0x4f68, 0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80)
#define EFI_FILE_SYSTEM_GUID EFI_GUID(0x964e5b22, 0x6459, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
#define DEVICE_TREE_GUID EFI_GUID(0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0)
-#define EFI_PROPERTIES_TABLE_GUID EFI_GUID(0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5)
#define EFI_RNG_PROTOCOL_GUID EFI_GUID(0x3152bca5, 0xeade, 0x433d, 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44)
#define EFI_RNG_ALGORITHM_RAW EFI_GUID(0xe43176d7, 0xb6e8, 0x4827, 0xb7, 0x84, 0x7f, 0xfd, 0xc4, 0xb6, 0x85, 0x61)
#define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20)
#define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0)
+#define APPLE_SET_OS_PROTOCOL_GUID EFI_GUID(0xc5c5da95, 0x7d5c, 0x45e6, 0xb2, 0xf1, 0x3f, 0xd5, 0x2b, 0xb1, 0x00, 0x77)
#define EFI_TCG2_PROTOCOL_GUID EFI_GUID(0x607f766c, 0x7455, 0x42be, 0x93, 0x0b, 0xe4, 0xd7, 0x6d, 0xb2, 0x72, 0x0f)
+#define EFI_TCG2_FINAL_EVENTS_TABLE_GUID EFI_GUID(0x1e2ed096, 0x30e2, 0x4254, 0xbd, 0x89, 0x86, 0x3b, 0xbe, 0xf8, 0x23, 0x25)
#define EFI_LOAD_FILE_PROTOCOL_GUID EFI_GUID(0x56ec3091, 0x954c, 0x11d2, 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
#define EFI_LOAD_FILE2_PROTOCOL_GUID EFI_GUID(0x4006c0c1, 0xfcb3, 0x403e, 0x99, 0x6d, 0x4a, 0x6c, 0x87, 0x24, 0xe0, 0x6d)
#define EFI_RT_PROPERTIES_TABLE_GUID EFI_GUID(0xeb66918a, 0x7eef, 0x402a, 0x84, 0x2e, 0x93, 0x1d, 0x21, 0xc3, 0x8a, 0xe9)
+#define EFI_DXE_SERVICES_TABLE_GUID EFI_GUID(0x05ad34ba, 0x6f02, 0x4214, 0x95, 0x2e, 0x4d, 0xa0, 0x39, 0x8e, 0x2b, 0xb9)
+#define EFI_SMBIOS_PROTOCOL_GUID EFI_GUID(0x03583ff6, 0xcb36, 0x4940, 0x94, 0x7e, 0xb9, 0xb3, 0x9f, 0x4a, 0xfa, 0xf7)
+#define EFI_MEMORY_ATTRIBUTE_PROTOCOL_GUID EFI_GUID(0xf4560cf6, 0x40ec, 0x4b4a, 0xa1, 0x92, 0xbf, 0x1d, 0x57, 0xd0, 0xb1, 0x89)
#define EFI_IMAGE_SECURITY_DATABASE_GUID EFI_GUID(0xd719b2cb, 0x3d3a, 0x4596, 0xa3, 0xbc, 0xda, 0xd0, 0x0e, 0x67, 0x65, 0x6f)
#define EFI_SHIM_LOCK_GUID EFI_GUID(0x605dab50, 0xe046, 0x4300, 0xab, 0xb6, 0x3d, 0xd8, 0x10, 0xdd, 0x8b, 0x23)
@@ -344,24 +401,47 @@ void efi_native_runtime_setup(void);
#define EFI_CERT_SHA256_GUID EFI_GUID(0xc1c41626, 0x504c, 0x4092, 0xac, 0xa9, 0x41, 0xf9, 0x36, 0x93, 0x43, 0x28)
#define EFI_CERT_X509_GUID EFI_GUID(0xa5c059a1, 0x94e4, 0x4aa7, 0x87, 0xb5, 0xab, 0x15, 0x5c, 0x2b, 0xf0, 0x72)
#define EFI_CERT_X509_SHA256_GUID EFI_GUID(0x3bd2a492, 0x96c0, 0x4079, 0xb4, 0x20, 0xfc, 0xf9, 0x8e, 0xf1, 0x03, 0xed)
+#define EFI_CC_BLOB_GUID EFI_GUID(0x067b1f5f, 0xcf26, 0x44c5, 0x85, 0x54, 0x93, 0xd7, 0x77, 0x91, 0x2d, 0x42)
+#define EFI_CC_MEASUREMENT_PROTOCOL_GUID EFI_GUID(0x96751a3d, 0x72f4, 0x41a6, 0xa7, 0x94, 0xed, 0x5d, 0x0e, 0x67, 0xae, 0x6b)
+#define EFI_CC_FINAL_EVENTS_TABLE_GUID EFI_GUID(0xdd4a4648, 0x2de7, 0x4665, 0x96, 0x4d, 0x21, 0xd9, 0xef, 0x5f, 0xb4, 0x46)
/*
* This GUID is used to pass to the kernel proper the struct screen_info
* structure that was populated by the stub based on the GOP protocol instance
* associated with ConOut
*/
-#define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
+#define LINUX_EFI_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
#define LINUX_EFI_ARM_CPU_STATE_TABLE_GUID EFI_GUID(0xef79e4aa, 0x3c3d, 0x4989, 0xb9, 0x02, 0x07, 0xa9, 0x43, 0xe5, 0x50, 0xd2)
#define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
#define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b)
#define LINUX_EFI_TPM_EVENT_LOG_GUID EFI_GUID(0xb7799cb0, 0xeca2, 0x4943, 0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa)
-#define LINUX_EFI_TPM_FINAL_LOG_GUID EFI_GUID(0x1e2ed096, 0x30e2, 0x4254, 0xbd, 0x89, 0x86, 0x3b, 0xbe, 0xf8, 0x23, 0x25)
#define LINUX_EFI_MEMRESERVE_TABLE_GUID EFI_GUID(0x888eb0c6, 0x8ede, 0x4ff5, 0xa8, 0xf0, 0x9a, 0xee, 0x5c, 0xb9, 0x77, 0xc2)
#define LINUX_EFI_INITRD_MEDIA_GUID EFI_GUID(0x5568e427, 0x68fc, 0x4f3d, 0xac, 0x74, 0xca, 0x55, 0x52, 0x31, 0xcc, 0x68)
#define LINUX_EFI_MOK_VARIABLE_TABLE_GUID EFI_GUID(0xc451ed2b, 0x9694, 0x45d3, 0xba, 0xba, 0xed, 0x9f, 0x89, 0x88, 0xa3, 0x89)
+#define LINUX_EFI_COCO_SECRET_AREA_GUID EFI_GUID(0xadf956ad, 0xe98c, 0x484c, 0xae, 0x11, 0xb5, 0x1c, 0x7d, 0x33, 0x64, 0x47)
+#define LINUX_EFI_BOOT_MEMMAP_GUID EFI_GUID(0x800f683f, 0xd08b, 0x423a, 0xa2, 0x93, 0x96, 0x5c, 0x3c, 0x6f, 0xe2, 0xb4)
+#define LINUX_EFI_UNACCEPTED_MEM_TABLE_GUID EFI_GUID(0xd5d1de3c, 0x105c, 0x44f9, 0x9e, 0xa9, 0xbc, 0xef, 0x98, 0x12, 0x00, 0x31)
+
+#define RISCV_EFI_BOOT_PROTOCOL_GUID EFI_GUID(0xccd15fec, 0x6f73, 0x4eec, 0x83, 0x95, 0x3e, 0x69, 0xe4, 0xb9, 0x40, 0xbf)
+
+/*
+ * This GUID may be installed onto the kernel image's handle as a NULL protocol
+ * to signal to the stub that the placement of the image should be respected,
+ * and moving the image in physical memory is undesirable. To ensure
+ * compatibility with 64k pages kernels with virtually mapped stacks, and to
+ * avoid defeating physical randomization, this protocol should only be
+ * installed if the image was placed at a randomized 128k aligned address in
+ * memory.
+ */
+#define LINUX_EFI_LOADED_IMAGE_FIXED_GUID EFI_GUID(0xf5a37b6d, 0x3344, 0x42a5, 0xb6, 0xbb, 0x97, 0x86, 0x48, 0xc1, 0x89, 0x0a)
/* OEM GUIDs */
#define DELLEMC_EFI_RCI2_TABLE_GUID EFI_GUID(0x2d9f28a2, 0xa886, 0x456a, 0x97, 0xa8, 0xf1, 0x1e, 0xf2, 0x4f, 0xf4, 0x55)
+#define AMD_SEV_MEM_ENCRYPT_GUID EFI_GUID(0x0cf29b71, 0x9e51, 0x433a, 0xa3, 0xb7, 0x81, 0xf3, 0xab, 0x16, 0xb8, 0x75)
+
+/* OVMF protocol GUIDs */
+#define OVMF_SEV_MEMORY_ACCEPTANCE_PROTOCOL_GUID EFI_GUID(0xc5a010fe, 0x38a7, 0x4531, 0x8a, 0x4a, 0x05, 0x00, 0xd2, 0xfd, 0x16, 0x49)
+#define OVMF_MEMORY_LOG_TABLE_GUID EFI_GUID(0x95305139, 0xb20f, 0x4723, 0x84, 0x25, 0x62, 0x7c, 0x88, 0x8f, 0xf1, 0x21)
typedef struct {
efi_guid_t guid;
@@ -388,6 +468,7 @@ typedef struct {
} efi_config_table_type_t;
#define EFI_SYSTEM_TABLE_SIGNATURE ((u64)0x5453595320494249ULL)
+#define EFI_DXE_SERVICES_TABLE_SIGNATURE ((u64)0x565245535f455844ULL)
#define EFI_2_30_SYSTEM_TABLE_REVISION ((2 << 16) | (30))
#define EFI_2_20_SYSTEM_TABLE_REVISION ((2 << 16) | (20))
@@ -452,6 +533,23 @@ typedef union {
efi_system_table_32_t mixed_mode;
} efi_system_table_t;
+struct efi_boot_memmap {
+ unsigned long map_size;
+ unsigned long desc_size;
+ u32 desc_ver;
+ unsigned long map_key;
+ unsigned long buff_size;
+ efi_memory_desc_t map[];
+};
+
+struct efi_unaccepted_memory {
+ u32 version;
+ u32 unit_size;
+ u64 phys_base;
+ u64 size;
+ unsigned long bitmap[];
+};
+
/*
* Architecture independent structure for describing a memory map for the
* benefit of efi_memmap_init_early(), and for passing context between
@@ -484,15 +582,6 @@ struct efi_mem_range {
};
typedef struct {
- u32 version;
- u32 length;
- u64 memory_protection_attribute;
-} efi_properties_table_t;
-
-#define EFI_PROPERTIES_TABLE_VERSION 0x00010000
-#define EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA 0x1
-
-typedef struct {
u16 version;
u16 length;
u32 runtime_services_supported;
@@ -502,12 +591,20 @@ typedef struct {
#define EFI_INVALID_TABLE_ADDR (~0UL)
+// BIT0 implies that Runtime code includes the forward control flow guard
+// instruction, such as X86 CET-IBT or ARM BTI.
+#define EFI_MEMORY_ATTRIBUTES_FLAGS_RT_FORWARD_CONTROL_FLOW_GUARD 0x1
+
typedef struct {
u32 version;
u32 num_entries;
u32 desc_size;
- u32 reserved;
- efi_memory_desc_t entry[0];
+ u32 flags;
+ /*
+ * There are @num_entries following, each of size @desc_size bytes,
+ * including an efi_memory_desc_t header. See efi_memdesc_ptr().
+ */
+ efi_memory_desc_t entry[];
} efi_memory_attributes_table_t;
typedef struct {
@@ -548,7 +645,10 @@ extern struct efi {
unsigned long esrt; /* ESRT table */
unsigned long tpm_log; /* TPM2 Event Log table */
unsigned long tpm_final_log; /* TPM2 Final Events Log table */
+ unsigned long ovmf_debug_log;
unsigned long mokvar_table; /* MOK variable config table */
+ unsigned long coco_secret; /* Confidential computing secret table */
+ unsigned long unaccepted; /* Unaccepted memory table */
efi_get_time_t *get_time;
efi_set_time_t *set_time;
@@ -569,8 +669,8 @@ extern struct efi {
unsigned long flags;
} efi;
-#define EFI_RT_SUPPORTED_GET_TIME 0x0001
-#define EFI_RT_SUPPORTED_SET_TIME 0x0002
+#define EFI_RT_SUPPORTED_GET_TIME 0x0001
+#define EFI_RT_SUPPORTED_SET_TIME 0x0002
#define EFI_RT_SUPPORTED_GET_WAKEUP_TIME 0x0004
#define EFI_RT_SUPPORTED_SET_WAKEUP_TIME 0x0008
#define EFI_RT_SUPPORTED_GET_VARIABLE 0x0010
@@ -586,11 +686,17 @@ extern struct efi {
#define EFI_RT_SUPPORTED_ALL 0x3fff
-#define EFI_RT_SUPPORTED_TIME_SERVICES 0x000f
+#define EFI_RT_SUPPORTED_TIME_SERVICES 0x0003
+#define EFI_RT_SUPPORTED_WAKEUP_SERVICES 0x000c
#define EFI_RT_SUPPORTED_VARIABLE_SERVICES 0x0070
extern struct mm_struct efi_mm;
+static inline bool mm_is_efi(struct mm_struct *mm)
+{
+ return IS_ENABLED(CONFIG_EFI) && mm == &efi_mm;
+}
+
static inline int
efi_guidcmp (efi_guid_t left, efi_guid_t right)
{
@@ -605,6 +711,7 @@ efi_guid_to_str(efi_guid_t *guid, char *out)
}
extern void efi_init (void);
+extern void efi_earlycon_reprobe(void);
#ifdef CONFIG_EFI
extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
#else
@@ -623,20 +730,11 @@ static inline efi_status_t efi_query_variable_store(u32 attributes,
return EFI_SUCCESS;
}
#endif
-extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
-extern int __init efi_memmap_alloc(unsigned int num_entries,
- struct efi_memory_map_data *data);
-extern void __efi_memmap_free(u64 phys, unsigned long size,
- unsigned long flags);
+extern int __init __efi_memmap_init(struct efi_memory_map_data *data);
extern int __init efi_memmap_init_early(struct efi_memory_map_data *data);
extern int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size);
extern void __init efi_memmap_unmap(void);
-extern int __init efi_memmap_install(struct efi_memory_map_data *data);
-extern int __init efi_memmap_split_count(efi_memory_desc_t *md,
- struct range *range);
-extern void __init efi_memmap_insert(struct efi_memory_map *old_memmap,
- void *buf, struct efi_mem_range *mem);
#ifdef CONFIG_EFI_ESRT
extern void __init efi_esrt_init(void);
@@ -646,8 +744,7 @@ static inline void efi_esrt_init(void) { }
extern int efi_config_parse_tables(const efi_config_table_t *config_tables,
int count,
const efi_config_table_type_t *arch_tables);
-extern int efi_systab_check_header(const efi_table_hdr_t *systab_hdr,
- int min_major_version);
+extern int efi_systab_check_header(const efi_table_hdr_t *systab_hdr);
extern void efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
unsigned long fw_vendor);
extern u64 efi_get_iobase (void);
@@ -657,22 +754,15 @@ extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size);
extern int __init efi_uart_console_only (void);
extern u64 efi_mem_desc_end(efi_memory_desc_t *md);
extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
+extern int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
extern void efi_mem_reserve(phys_addr_t addr, u64 size);
extern int efi_mem_reserve_persistent(phys_addr_t addr, u64 size);
-extern void efi_initialize_iomem_resources(struct resource *code_resource,
- struct resource *data_resource, struct resource *bss_resource);
extern u64 efi_get_fdt_params(struct efi_memory_map_data *data);
extern struct kobject *efi_kobj;
extern int efi_reboot_quirk_mode;
extern bool efi_poweroff_required(void);
-#ifdef CONFIG_EFI_FAKE_MEMMAP
-extern void __init efi_fake_memmap(void);
-#else
-static inline void efi_fake_memmap(void) { }
-#endif
-
extern unsigned long efi_mem_attr_table;
/*
@@ -682,14 +772,14 @@ extern unsigned long efi_mem_attr_table;
* argument in the page tables referred to by the
* first argument.
*/
-typedef int (*efi_memattr_perm_setter)(struct mm_struct *, efi_memory_desc_t *);
+typedef int (*efi_memattr_perm_setter)(struct mm_struct *, efi_memory_desc_t *, bool);
-extern int efi_memattr_init(void);
+extern void efi_memattr_init(void);
extern int efi_memattr_apply_permissions(struct mm_struct *mm,
efi_memattr_perm_setter fn);
/*
- * efi_early_memdesc_ptr - get the n-th EFI memmap descriptor
+ * efi_memdesc_ptr - get the n-th EFI memmap descriptor
* @map: the start of efi memmap
* @desc_size: the size of space for each EFI memmap descriptor
* @n: the index of efi memmap descriptor
@@ -707,7 +797,7 @@ extern int efi_memattr_apply_permissions(struct mm_struct *mm,
* during bootup since for_each_efi_memory_desc_xxx() is available after the
* kernel initializes the EFI subsystem to set up struct efi_memory_map.
*/
-#define efi_early_memdesc_ptr(map, desc_size, n) \
+#define efi_memdesc_ptr(map, desc_size, n) \
(efi_memory_desc_t *)((void *)(map) + ((n) * (desc_size)))
/* Iterate through an efi_memory_map */
@@ -762,10 +852,6 @@ static inline int efi_range_is_wc(unsigned long start, unsigned long len)
return 1;
}
-#ifdef CONFIG_EFI_PCDP
-extern int __init efi_setup_pcdp_console(char *);
-#endif
-
/*
* We play games with efi_enabled so that the compiler will, if
* possible, remove EFI-related code altogether.
@@ -778,10 +864,9 @@ extern int __init efi_setup_pcdp_console(char *);
#define EFI_PARAVIRT 6 /* Access is via a paravirt interface */
#define EFI_ARCH_1 7 /* First arch-specific bit */
#define EFI_DBG 8 /* Print additional debug info at runtime */
-#define EFI_NX_PE_DATA 9 /* Can runtime data regions be mapped non-executable? */
-#define EFI_MEM_ATTR 10 /* Did firmware publish an EFI_MEMORY_ATTRIBUTES table? */
-#define EFI_MEM_NO_SOFT_RESERVE 11 /* Is the kernel configured to ignore soft reservations? */
-#define EFI_PRESERVE_BS_REGIONS 12 /* Are EFI boot-services memory segments available? */
+#define EFI_MEM_ATTR 9 /* Did firmware publish an EFI_MEMORY_ATTRIBUTES table? */
+#define EFI_MEM_NO_SOFT_RESERVE 10 /* Is the kernel configured to ignore soft reservations? */
+#define EFI_PRESERVE_BS_REGIONS 11 /* Are EFI boot-services memory segments available? */
#ifdef CONFIG_EFI
/*
@@ -805,6 +890,7 @@ static inline bool efi_rt_services_supported(unsigned int mask)
{
return (efi.runtime_supported_mask & mask) == mask;
}
+extern void efi_find_mirror(void);
#else
static inline bool efi_enabled(int feature)
{
@@ -822,6 +908,8 @@ static inline bool efi_rt_services_supported(unsigned int mask)
{
return false;
}
+
+static inline void efi_find_mirror(void) {}
#endif
extern int efi_status_to_err(efi_status_t status);
@@ -837,7 +925,7 @@ extern int efi_status_to_err(efi_status_t status);
#define EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS 0x0000000000000020
#define EFI_VARIABLE_APPEND_WRITE 0x0000000000000040
-#define EFI_VARIABLE_MASK (EFI_VARIABLE_NON_VOLATILE | \
+#define EFI_VARIABLE_MASK (EFI_VARIABLE_NON_VOLATILE | \
EFI_VARIABLE_BOOTSERVICE_ACCESS | \
EFI_VARIABLE_RUNTIME_ACCESS | \
EFI_VARIABLE_HARDWARE_ERROR_RECORD | \
@@ -882,6 +970,7 @@ extern int efi_status_to_err(efi_status_t status);
#define EFI_DEV_MEDIA_VENDOR 3
#define EFI_DEV_MEDIA_FILE 4
#define EFI_DEV_MEDIA_PROTOCOL 5
+#define EFI_DEV_MEDIA_REL_OFFSET 8
#define EFI_DEV_BIOS_BOOT 0x05
#define EFI_DEV_END_PATH 0x7F
#define EFI_DEV_END_PATH2 0xFF
@@ -912,12 +1001,32 @@ struct efi_vendor_dev_path {
u8 vendordata[];
} __packed;
+struct efi_rel_offset_dev_path {
+ struct efi_generic_dev_path header;
+ u32 reserved;
+ u64 starting_offset;
+ u64 ending_offset;
+} __packed;
+
+struct efi_mem_mapped_dev_path {
+ struct efi_generic_dev_path header;
+ u32 memory_type;
+ u64 starting_addr;
+ u64 ending_addr;
+} __packed;
+
+struct efi_file_path_dev_path {
+ struct efi_generic_dev_path header;
+ efi_char16_t filename[];
+} __packed;
+
struct efi_dev_path {
union {
struct efi_generic_dev_path header;
struct efi_acpi_dev_path acpi;
struct efi_pci_dev_path pci;
struct efi_vendor_dev_path vendor;
+ struct efi_rel_offset_dev_path rel_offset;
};
} __packed;
@@ -943,90 +1052,60 @@ struct efivar_operations {
efi_set_variable_t *set_variable;
efi_set_variable_t *set_variable_nonblocking;
efi_query_variable_store_t *query_variable_store;
+ efi_query_variable_info_t *query_variable_info;
};
struct efivars {
struct kset *kset;
- struct kobject *kobject;
const struct efivar_operations *ops;
};
+#ifdef CONFIG_X86
+u64 __attribute_const__ efivar_reserved_space(void);
+#else
+static inline u64 efivar_reserved_space(void) { return 0; }
+#endif
+
/*
- * The maximum size of VariableName + Data = 1024
- * Therefore, it's reasonable to save that much
- * space in each part of the structure,
- * and we use a page for reading/writing.
+ * There is no actual upper limit specified for the variable name size.
+ *
+ * This limit exists only for practical purposes, since name conversions
+ * are bounds-checked and name data is occasionally stored in-line.
*/
-
#define EFI_VAR_NAME_LEN 1024
-struct efi_variable {
- efi_char16_t VariableName[EFI_VAR_NAME_LEN/sizeof(efi_char16_t)];
- efi_guid_t VendorGuid;
- unsigned long DataSize;
- __u8 Data[1024];
- efi_status_t Status;
- __u32 Attributes;
-} __attribute__((packed));
-
-struct efivar_entry {
- struct efi_variable var;
- struct list_head list;
- struct kobject kobj;
- bool scanning;
- bool deleting;
-};
-
-static inline void
-efivar_unregister(struct efivar_entry *var)
-{
- kobject_put(&var->kobj);
-}
-
int efivars_register(struct efivars *efivars,
- const struct efivar_operations *ops,
- struct kobject *kobject);
+ const struct efivar_operations *ops);
int efivars_unregister(struct efivars *efivars);
-struct kobject *efivars_kobject(void);
-
-int efivar_supports_writes(void);
-int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
- void *data, bool duplicates, struct list_head *head);
-
-int efivar_entry_add(struct efivar_entry *entry, struct list_head *head);
-int efivar_entry_remove(struct efivar_entry *entry);
-
-int __efivar_entry_delete(struct efivar_entry *entry);
-int efivar_entry_delete(struct efivar_entry *entry);
-
-int efivar_entry_size(struct efivar_entry *entry, unsigned long *size);
-int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
- unsigned long *size, void *data);
-int efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
- unsigned long *size, void *data);
-int efivar_entry_set(struct efivar_entry *entry, u32 attributes,
- unsigned long size, void *data, struct list_head *head);
-int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
- unsigned long *size, void *data, bool *set);
-int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes,
- bool block, unsigned long size, void *data);
-
-int efivar_entry_iter_begin(void);
-void efivar_entry_iter_end(void);
-
-int __efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
- struct list_head *head, void *data,
- struct efivar_entry **prev);
-int efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
- struct list_head *head, void *data);
-
-struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
- struct list_head *head, bool remove);
-
-bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
- unsigned long data_size);
-bool efivar_variable_is_removable(efi_guid_t vendor, const char *name,
- size_t len);
+
+#ifdef CONFIG_EFI
+bool efivar_is_available(void);
+#else
+static inline bool efivar_is_available(void) { return false; }
+#endif
+
+bool efivar_supports_writes(void);
+
+int efivar_lock(void);
+int efivar_trylock(void);
+void efivar_unlock(void);
+
+efi_status_t efivar_get_variable(efi_char16_t *name, efi_guid_t *vendor,
+ u32 *attr, unsigned long *size, void *data);
+
+efi_status_t efivar_get_next_variable(unsigned long *name_size,
+ efi_char16_t *name, efi_guid_t *vendor);
+
+efi_status_t efivar_set_variable_locked(efi_char16_t *name, efi_guid_t *vendor,
+ u32 attr, unsigned long data_size,
+ void *data, bool nonblocking);
+
+efi_status_t efivar_set_variable(efi_char16_t *name, efi_guid_t *vendor,
+ u32 attr, unsigned long data_size, void *data);
+
+efi_status_t efivar_query_variable_info(u32 attr, u64 *storage_space,
+ u64 *remaining_space,
+ u64 *max_variable_size);
#if IS_ENABLED(CONFIG_EFI_CAPSULE_LOADER)
extern bool efi_capsule_pending(int *reset_type);
@@ -1040,43 +1119,17 @@ extern int efi_capsule_update(efi_capsule_header_t *capsule,
static inline bool efi_capsule_pending(int *reset_type) { return false; }
#endif
-#ifdef CONFIG_EFI_RUNTIME_MAP
-int efi_runtime_map_init(struct kobject *);
-int efi_get_runtime_map_size(void);
-int efi_get_runtime_map_desc_size(void);
-int efi_runtime_map_copy(void *buf, size_t bufsz);
-#else
-static inline int efi_runtime_map_init(struct kobject *kobj)
-{
- return 0;
-}
-
-static inline int efi_get_runtime_map_size(void)
-{
- return 0;
-}
-
-static inline int efi_get_runtime_map_desc_size(void)
-{
- return 0;
-}
-
-static inline int efi_runtime_map_copy(void *buf, size_t bufsz)
-{
- return 0;
-}
-
-#endif
-
#ifdef CONFIG_EFI
extern bool efi_runtime_disabled(void);
#else
static inline bool efi_runtime_disabled(void) { return true; }
#endif
-extern void efi_call_virt_check_flags(unsigned long flags, const char *call);
+extern void efi_call_virt_check_flags(unsigned long flags, const void *caller);
extern unsigned long efi_call_virt_save_flags(void);
+void efi_runtime_assert_lock_held(void);
+
enum efi_secureboot_mode {
efi_secureboot_mode_unset,
efi_secureboot_mode_unknown,
@@ -1112,11 +1165,10 @@ void efi_check_for_embedded_firmwares(void);
static inline void efi_check_for_embedded_firmwares(void) { }
#endif
-efi_status_t efi_random_get_seed(void);
+#define arch_efi_call_virt(p, f, args...) ((p)->f(args))
/*
- * Arch code can implement the following three template macros, avoiding
- * reptition for the void/non-void return cases of {__,}efi_call_virt():
+ * Arch code must implement the following three routines:
*
* * arch_efi_call_virt_setup()
*
@@ -1125,9 +1177,8 @@ efi_status_t efi_random_get_seed(void);
*
* * arch_efi_call_virt()
*
- * Performs the call. The last expression in the macro must be the call
- * itself, allowing the logic to be shared by the void and non-void
- * cases.
+ * Performs the call. This routine takes a variable number of arguments so
+ * it must be implemented as a variadic preprocessor macro.
*
* * arch_efi_call_virt_teardown()
*
@@ -1136,34 +1187,21 @@ efi_status_t efi_random_get_seed(void);
#define efi_call_virt_pointer(p, f, args...) \
({ \
- efi_status_t __s; \
+ typeof((p)->f(args)) __s; \
unsigned long __flags; \
\
arch_efi_call_virt_setup(); \
\
__flags = efi_call_virt_save_flags(); \
__s = arch_efi_call_virt(p, f, args); \
- efi_call_virt_check_flags(__flags, __stringify(f)); \
+ efi_call_virt_check_flags(__flags, NULL); \
\
arch_efi_call_virt_teardown(); \
\
__s; \
})
-#define __efi_call_virt_pointer(p, f, args...) \
-({ \
- unsigned long __flags; \
- \
- arch_efi_call_virt_setup(); \
- \
- __flags = efi_call_virt_save_flags(); \
- arch_efi_call_virt(p, f, args); \
- efi_call_virt_check_flags(__flags, __stringify(f)); \
- \
- arch_efi_call_virt_teardown(); \
-})
-
-#define EFI_RANDOM_SEED_SIZE 64U
+#define EFI_RANDOM_SEED_SIZE 32U // BLAKE2S_HASH_SIZE
struct linux_efi_random_seed {
u32 size;
@@ -1188,6 +1226,10 @@ extern int efi_tpm_final_log_size;
extern unsigned long rci2_table_phys;
+efi_status_t
+efi_call_acpi_prm_handler(efi_status_t (__efiapi *handler_addr)(u64, void *),
+ u64 param_buffer_addr, void *context);
+
/*
* efi_runtime_service() function identifiers.
* "NONE" is used by efi_recover_from_page_fault() to check if the page
@@ -1207,25 +1249,26 @@ enum efi_rts_ids {
EFI_RESET_SYSTEM,
EFI_UPDATE_CAPSULE,
EFI_QUERY_CAPSULE_CAPS,
+ EFI_ACPI_PRM_HANDLER,
};
+union efi_rts_args;
+
/*
* efi_runtime_work: Details of EFI Runtime Service work
- * @arg<1-5>: EFI Runtime Service function arguments
+ * @args: Pointer to union describing the arguments
* @status: Status of executing EFI Runtime Service
* @efi_rts_id: EFI Runtime Service function identifier
* @efi_rts_comp: Struct used for handling completions
+ * @caller: The caller of the runtime service
*/
struct efi_runtime_work {
- void *arg1;
- void *arg2;
- void *arg3;
- void *arg4;
- void *arg5;
- efi_status_t status;
- struct work_struct work;
- enum efi_rts_ids efi_rts_id;
- struct completion efi_rts_comp;
+ union efi_rts_args *args;
+ efi_status_t status;
+ struct work_struct work;
+ enum efi_rts_ids efi_rts_id;
+ struct completion efi_rts_comp;
+ const void *caller;
};
extern struct efi_runtime_work efi_rts_work;
@@ -1248,8 +1291,6 @@ struct linux_efi_memreserve {
void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size);
-char *efi_systab_show_arch(char *str);
-
/*
* The LINUX_EFI_MOK_VARIABLE_TABLE_GUID config table can be provided
* to the kernel by an EFI boot loader. The table contains a packed
@@ -1282,4 +1323,44 @@ static inline struct efi_mokvar_table_entry *efi_mokvar_entry_find(
}
#endif
+extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
+
+struct linux_efi_coco_secret_area {
+ u64 base_pa;
+ u64 size;
+};
+
+struct linux_efi_initrd {
+ unsigned long base;
+ unsigned long size;
+};
+
+/* Header of a populated EFI secret area */
+#define EFI_SECRET_TABLE_HEADER_GUID EFI_GUID(0x1e74f542, 0x71dd, 0x4d66, 0x96, 0x3e, 0xef, 0x42, 0x87, 0xff, 0x17, 0x3b)
+
+bool xen_efi_config_table_is_usable(const efi_guid_t *guid, unsigned long table);
+
+static __always_inline
+bool efi_config_table_is_usable(const efi_guid_t *guid, unsigned long table)
+{
+ if (!IS_ENABLED(CONFIG_XEN_EFI))
+ return true;
+ return xen_efi_config_table_is_usable(guid, table);
+}
+
+umode_t efi_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n);
+
+int ovmf_log_probe(unsigned long ovmf_debug_log_table);
+
+/*
+ * efivar ops event type
+ */
+#define EFIVAR_OPS_RDONLY 0
+#define EFIVAR_OPS_RDWR 1
+
+extern struct blocking_notifier_head efivar_ops_nh;
+
+void efivars_generic_ops_register(void);
+void efivars_generic_ops_unregister(void);
+
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/ehl_pse_io_aux.h b/include/linux/ehl_pse_io_aux.h
new file mode 100644
index 000000000000..afb8587ee5fb
--- /dev/null
+++ b/include/linux/ehl_pse_io_aux.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Intel Elkhart Lake PSE I/O Auxiliary Device
+ *
+ * Copyright (c) 2025 Intel Corporation.
+ *
+ * Author: Raag Jadav <raag.jadav@intel.com>
+ */
+
+#ifndef _EHL_PSE_IO_AUX_H_
+#define _EHL_PSE_IO_AUX_H_
+
+#include <linux/ioport.h>
+
+#define EHL_PSE_IO_NAME "ehl_pse_io"
+#define EHL_PSE_GPIO_NAME "gpio"
+#define EHL_PSE_TIO_NAME "pps_tio"
+
+struct ehl_pse_io_data {
+ struct resource mem;
+ int irq;
+};
+
+#endif /* _EHL_PSE_IO_AUX_H_ */
diff --git a/include/linux/eisa.h b/include/linux/eisa.h
index b012e30afebd..cf55630b595b 100644
--- a/include/linux/eisa.h
+++ b/include/linux/eisa.h
@@ -28,6 +28,9 @@
#define EISA_CONFIG_ENABLED 1
#define EISA_CONFIG_FORCED 2
+/* Chosen to hold the longest string in eisa.ids. */
+#define EISA_DEVICE_INFO_NAME_SIZE 74
+
/* There is not much we can say about an EISA device, apart from
* signature, slot number, and base address. dma_mask is set by
* default to parent device mask..*/
@@ -41,7 +44,7 @@ struct eisa_device {
u64 dma_mask;
struct device dev; /* generic device */
#ifdef CONFIG_EISA_NAMES
- char pretty_name[50];
+ char pretty_name[EISA_DEVICE_INFO_NAME_SIZE];
#endif
};
@@ -60,12 +63,12 @@ struct eisa_driver {
struct device_driver driver;
};
-#define to_eisa_driver(drv) container_of(drv,struct eisa_driver, driver)
+#define to_eisa_driver(drv) container_of_const(drv,struct eisa_driver, driver)
/* These external functions are only available when EISA support is enabled. */
#ifdef CONFIG_EISA
-extern struct bus_type eisa_bus_type;
+extern const struct bus_type eisa_bus_type;
int eisa_driver_register (struct eisa_driver *edrv);
void eisa_driver_unregister (struct eisa_driver *edrv);
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
deleted file mode 100644
index dcb2f9022c1d..000000000000
--- a/include/linux/elevator.h
+++ /dev/null
@@ -1,179 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_ELEVATOR_H
-#define _LINUX_ELEVATOR_H
-
-#include <linux/percpu.h>
-#include <linux/hashtable.h>
-
-#ifdef CONFIG_BLOCK
-
-struct io_cq;
-struct elevator_type;
-#ifdef CONFIG_BLK_DEBUG_FS
-struct blk_mq_debugfs_attr;
-#endif
-
-/*
- * Return values from elevator merger
- */
-enum elv_merge {
- ELEVATOR_NO_MERGE = 0,
- ELEVATOR_FRONT_MERGE = 1,
- ELEVATOR_BACK_MERGE = 2,
- ELEVATOR_DISCARD_MERGE = 3,
-};
-
-struct blk_mq_alloc_data;
-struct blk_mq_hw_ctx;
-
-struct elevator_mq_ops {
- int (*init_sched)(struct request_queue *, struct elevator_type *);
- void (*exit_sched)(struct elevator_queue *);
- int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int);
- void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
- void (*depth_updated)(struct blk_mq_hw_ctx *);
-
- bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
- bool (*bio_merge)(struct request_queue *, struct bio *, unsigned int);
- int (*request_merge)(struct request_queue *q, struct request **, struct bio *);
- void (*request_merged)(struct request_queue *, struct request *, enum elv_merge);
- void (*requests_merged)(struct request_queue *, struct request *, struct request *);
- void (*limit_depth)(unsigned int, struct blk_mq_alloc_data *);
- void (*prepare_request)(struct request *);
- void (*finish_request)(struct request *);
- void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool);
- struct request *(*dispatch_request)(struct blk_mq_hw_ctx *);
- bool (*has_work)(struct blk_mq_hw_ctx *);
- void (*completed_request)(struct request *, u64);
- void (*requeue_request)(struct request *);
- struct request *(*former_request)(struct request_queue *, struct request *);
- struct request *(*next_request)(struct request_queue *, struct request *);
- void (*init_icq)(struct io_cq *);
- void (*exit_icq)(struct io_cq *);
-};
-
-#define ELV_NAME_MAX (16)
-
-struct elv_fs_entry {
- struct attribute attr;
- ssize_t (*show)(struct elevator_queue *, char *);
- ssize_t (*store)(struct elevator_queue *, const char *, size_t);
-};
-
-/*
- * identifies an elevator type, such as AS or deadline
- */
-struct elevator_type
-{
- /* managed by elevator core */
- struct kmem_cache *icq_cache;
-
- /* fields provided by elevator implementation */
- struct elevator_mq_ops ops;
-
- size_t icq_size; /* see iocontext.h */
- size_t icq_align; /* ditto */
- struct elv_fs_entry *elevator_attrs;
- const char *elevator_name;
- const char *elevator_alias;
- const unsigned int elevator_features;
- struct module *elevator_owner;
-#ifdef CONFIG_BLK_DEBUG_FS
- const struct blk_mq_debugfs_attr *queue_debugfs_attrs;
- const struct blk_mq_debugfs_attr *hctx_debugfs_attrs;
-#endif
-
- /* managed by elevator core */
- char icq_cache_name[ELV_NAME_MAX + 6]; /* elvname + "_io_cq" */
- struct list_head list;
-};
-
-#define ELV_HASH_BITS 6
-
-void elv_rqhash_del(struct request_queue *q, struct request *rq);
-void elv_rqhash_add(struct request_queue *q, struct request *rq);
-void elv_rqhash_reposition(struct request_queue *q, struct request *rq);
-struct request *elv_rqhash_find(struct request_queue *q, sector_t offset);
-
-/*
- * each queue has an elevator_queue associated with it
- */
-struct elevator_queue
-{
- struct elevator_type *type;
- void *elevator_data;
- struct kobject kobj;
- struct mutex sysfs_lock;
- unsigned int registered:1;
- DECLARE_HASHTABLE(hash, ELV_HASH_BITS);
-};
-
-/*
- * block elevator interface
- */
-extern enum elv_merge elv_merge(struct request_queue *, struct request **,
- struct bio *);
-extern void elv_merge_requests(struct request_queue *, struct request *,
- struct request *);
-extern void elv_merged_request(struct request_queue *, struct request *,
- enum elv_merge);
-extern bool elv_attempt_insert_merge(struct request_queue *, struct request *);
-extern struct request *elv_former_request(struct request_queue *, struct request *);
-extern struct request *elv_latter_request(struct request_queue *, struct request *);
-
-/*
- * io scheduler registration
- */
-extern int elv_register(struct elevator_type *);
-extern void elv_unregister(struct elevator_type *);
-
-/*
- * io scheduler sysfs switching
- */
-extern ssize_t elv_iosched_show(struct request_queue *, char *);
-extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
-
-extern bool elv_bio_merge_ok(struct request *, struct bio *);
-extern struct elevator_queue *elevator_alloc(struct request_queue *,
- struct elevator_type *);
-
-/*
- * Helper functions.
- */
-extern struct request *elv_rb_former_request(struct request_queue *, struct request *);
-extern struct request *elv_rb_latter_request(struct request_queue *, struct request *);
-
-/*
- * rb support functions.
- */
-extern void elv_rb_add(struct rb_root *, struct request *);
-extern void elv_rb_del(struct rb_root *, struct request *);
-extern struct request *elv_rb_find(struct rb_root *, sector_t);
-
-/*
- * Insertion selection
- */
-#define ELEVATOR_INSERT_FRONT 1
-#define ELEVATOR_INSERT_BACK 2
-#define ELEVATOR_INSERT_SORT 3
-#define ELEVATOR_INSERT_REQUEUE 4
-#define ELEVATOR_INSERT_FLUSH 5
-#define ELEVATOR_INSERT_SORT_MERGE 6
-
-#define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
-#define rb_entry_rq(node) rb_entry((node), struct request, rb_node)
-
-#define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
-#define rq_fifo_clear(rq) list_del_init(&(rq)->queuelist)
-
-/*
- * Elevator features.
- */
-
-/* Supports zoned block devices sequential write constraint */
-#define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0)
-/* Supports scheduling on multiple hardware queues */
-#define ELEVATOR_F_MQ_AWARE (1U << 1)
-
-#endif /* CONFIG_BLOCK */
-#endif
diff --git a/include/linux/elf-fdpic.h b/include/linux/elf-fdpic.h
index 3bea95a1af53..e533f4513194 100644
--- a/include/linux/elf-fdpic.h
+++ b/include/linux/elf-fdpic.h
@@ -10,13 +10,25 @@
#include <uapi/linux/elf-fdpic.h>
+#if ELF_CLASS == ELFCLASS32
+#define Elf_Sword Elf32_Sword
+#define elf_fdpic_loadseg elf32_fdpic_loadseg
+#define elf_fdpic_loadmap elf32_fdpic_loadmap
+#define ELF_FDPIC_LOADMAP_VERSION ELF32_FDPIC_LOADMAP_VERSION
+#else
+#define Elf_Sword Elf64_Sxword
+#define elf_fdpic_loadmap elf64_fdpic_loadmap
+#define elf_fdpic_loadseg elf64_fdpic_loadseg
+#define ELF_FDPIC_LOADMAP_VERSION ELF64_FDPIC_LOADMAP_VERSION
+#endif
+
/*
* binfmt binary parameters structure
*/
struct elf_fdpic_params {
struct elfhdr hdr; /* ref copy of ELF header */
struct elf_phdr *phdrs; /* ref copy of PT_PHDR table */
- struct elf32_fdpic_loadmap *loadmap; /* loadmap to be passed to userspace */
+ struct elf_fdpic_loadmap *loadmap; /* loadmap to be passed to userspace */
unsigned long elfhdr_addr; /* mapped ELF header user address */
unsigned long ph_addr; /* mapped PT_PHDR user address */
unsigned long map_addr; /* mapped loadmap user address */
diff --git a/include/linux/elf.h b/include/linux/elf.h
index c9a46c4e183b..5c402788da19 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -65,7 +65,7 @@ extern Elf64_Dyn _DYNAMIC [];
struct file;
struct coredump_params;
-#ifndef ARCH_HAVE_EXTRA_ELF_NOTES
+#ifndef CONFIG_ARCH_HAVE_EXTRA_ELF_NOTES
static inline int elf_coredump_extra_notes_size(void) { return 0; }
static inline int elf_coredump_extra_notes_write(struct coredump_params *cprm) { return 0; }
#else
diff --git a/include/linux/elfcore-compat.h b/include/linux/elfcore-compat.h
index e272c3d452ce..54feb64e9b5d 100644
--- a/include/linux/elfcore-compat.h
+++ b/include/linux/elfcore-compat.h
@@ -43,6 +43,11 @@ struct compat_elf_prpsinfo
__compat_uid_t pr_uid;
__compat_gid_t pr_gid;
compat_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
+ /*
+ * The hard-coded 16 is derived from TASK_COMM_LEN, but it can't be
+ * changed as it is exposed to userspace. We'd better make it hard-coded
+ * here.
+ */
char pr_fname[16];
char pr_psargs[ELF_PRARGSZ];
};
diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h
index 2aaa15779d50..bd5560542c79 100644
--- a/include/linux/elfcore.h
+++ b/include/linux/elfcore.h
@@ -65,6 +65,11 @@ struct elf_prpsinfo
__kernel_gid_t pr_gid;
pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
/* Lots missing */
+ /*
+ * The hard-coded 16 is derived from TASK_COMM_LEN, but it can't be
+ * changed as it is exposed to userspace. We'd better make it hard-coded
+ * here.
+ */
char pr_fname[16]; /* filename of executable */
char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
};
@@ -79,37 +84,19 @@ static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *re
#endif
}
-static inline void elf_core_copy_kernel_regs(elf_gregset_t *elfregs, struct pt_regs *regs)
-{
-#ifdef ELF_CORE_COPY_KERNEL_REGS
- ELF_CORE_COPY_KERNEL_REGS((*elfregs), regs);
-#else
- elf_core_copy_regs(elfregs, regs);
-#endif
-}
-
static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs)
{
#if defined (ELF_CORE_COPY_TASK_REGS)
return ELF_CORE_COPY_TASK_REGS(t, elfregs);
-#elif defined (task_pt_regs)
+#else
elf_core_copy_regs(elfregs, task_pt_regs(t));
#endif
return 0;
}
-extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
+int elf_core_copy_task_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
-static inline int elf_core_copy_task_fpregs(struct task_struct *t, struct pt_regs *regs, elf_fpregset_t *fpu)
-{
-#ifdef ELF_CORE_COPY_FPREGS
- return ELF_CORE_COPY_FPREGS(t, fpu);
-#else
- return dump_fpu(regs, fpu);
-#endif
-}
-
-#if defined(CONFIG_UM) || defined(CONFIG_IA64)
+#ifdef CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS
/*
* These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out
* extra segments containing the gate DSO contents. Dumping its
@@ -118,14 +105,14 @@ static inline int elf_core_copy_task_fpregs(struct task_struct *t, struct pt_reg
* Dumping its extra ELF program headers includes all the other information
* a debugger needs to easily find how the gate DSO was being used.
*/
-extern Elf_Half elf_core_extra_phdrs(void);
+extern Elf_Half elf_core_extra_phdrs(struct coredump_params *cprm);
extern int
elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset);
extern int
elf_core_write_extra_data(struct coredump_params *cprm);
-extern size_t elf_core_extra_data_size(void);
+extern size_t elf_core_extra_data_size(struct coredump_params *cprm);
#else
-static inline Elf_Half elf_core_extra_phdrs(void)
+static inline Elf_Half elf_core_extra_phdrs(struct coredump_params *cprm)
{
return 0;
}
@@ -140,10 +127,10 @@ static inline int elf_core_write_extra_data(struct coredump_params *cprm)
return 1;
}
-static inline size_t elf_core_extra_data_size(void)
+static inline size_t elf_core_extra_data_size(struct coredump_params *cprm)
{
return 0;
}
-#endif
+#endif /* CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS */
#endif /* _LINUX_ELFCORE_H */
diff --git a/include/linux/elfnote.h b/include/linux/elfnote.h
index 69b136e4dd2b..bb3dcded055f 100644
--- a/include/linux/elfnote.h
+++ b/include/linux/elfnote.h
@@ -60,23 +60,21 @@
#else /* !__ASSEMBLER__ */
#include <uapi/linux/elf.h>
+#include <linux/compiler.h>
/*
* Use an anonymous structure which matches the shape of
* Elf{32,64}_Nhdr, but includes the name and desc data. The size and
* type of name and desc depend on the macro arguments. "name" must
- * be a literal string, and "desc" must be passed by value. You may
- * only define one note per line, since __LINE__ is used to generate
- * unique symbols.
+ * be a literal string, and "desc" must be passed by value.
*/
-#define _ELFNOTE_PASTE(a,b) a##b
-#define _ELFNOTE(size, name, unique, type, desc) \
+#define ELFNOTE(size, name, type, desc) \
static const struct { \
struct elf##size##_note _nhdr; \
unsigned char _name[sizeof(name)] \
__attribute__((aligned(sizeof(Elf##size##_Word)))); \
typeof(desc) _desc \
__attribute__((aligned(sizeof(Elf##size##_Word)))); \
- } _ELFNOTE_PASTE(_note_, unique) \
+ } __UNIQUE_ID(note) \
__used \
__attribute__((section(".note." name), \
aligned(sizeof(Elf##size##_Word)), \
@@ -89,11 +87,10 @@
name, \
desc \
}
-#define ELFNOTE(size, name, type, desc) \
- _ELFNOTE(size, name, __LINE__, type, desc)
#define ELFNOTE32(name, type, desc) ELFNOTE(32, name, type, desc)
#define ELFNOTE64(name, type, desc) ELFNOTE(64, name, type, desc)
+
#endif /* __ASSEMBLER__ */
#endif /* _LINUX_ELFNOTE_H */
diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
index 757fc60658fa..43aa6153dc57 100644
--- a/include/linux/energy_model.h
+++ b/include/linux/energy_model.h
@@ -5,31 +5,61 @@
#include <linux/device.h>
#include <linux/jump_label.h>
#include <linux/kobject.h>
+#include <linux/kref.h>
#include <linux/rcupdate.h>
#include <linux/sched/cpufreq.h>
#include <linux/sched/topology.h>
#include <linux/types.h>
/**
- * em_perf_state - Performance state of a performance domain
+ * struct em_perf_state - Performance state of a performance domain
+ * @performance: CPU performance (capacity) at a given frequency
* @frequency: The frequency in KHz, for consistency with CPUFreq
* @power: The power consumed at this level (by 1 CPU or by a registered
* device). It can be a total power: static and dynamic.
* @cost: The cost coefficient associated with this level, used during
* energy calculation. Equal to: power * max_frequency / frequency
+ * @flags: see "em_perf_state flags" description below.
*/
struct em_perf_state {
+ unsigned long performance;
unsigned long frequency;
unsigned long power;
unsigned long cost;
+ unsigned long flags;
};
+/*
+ * em_perf_state flags:
+ *
+ * EM_PERF_STATE_INEFFICIENT: The performance state is inefficient. There is
+ * in this em_perf_domain, another performance state with a higher frequency
+ * but a lower or equal power cost. Such inefficient states are ignored when
+ * using em_pd_get_efficient_*() functions.
+ */
+#define EM_PERF_STATE_INEFFICIENT BIT(0)
+
/**
- * em_perf_domain - Performance domain
- * @table: List of performance states, in ascending order
+ * struct em_perf_table - Performance states table
+ * @rcu: RCU used for safe access and destruction
+ * @kref: Reference counter to track the users
+ * @state: List of performance states, in ascending order
+ */
+struct em_perf_table {
+ struct rcu_head rcu;
+ struct kref kref;
+ struct em_perf_state state[];
+};
+
+/**
+ * struct em_perf_domain - Performance domain
+ * @em_table: Pointer to the runtime modifiable em_perf_table
+ * @node: node in em_pd_list (in energy_model.c)
+ * @id: A unique ID number for each performance domain
* @nr_perf_states: Number of performance states
- * @milliwatts: Flag indicating the power values are in milli-Watts
- * or some other scale.
+ * @min_perf_state: Minimum allowed Performance State index
+ * @max_perf_state: Maximum allowed Performance State index
+ * @flags: See "em_perf_domain flags"
* @cpus: Cpumask covering the CPUs of the domain. It's here
* for performance reasons to avoid potential cache
* misses during energy calculations in the scheduler
@@ -42,55 +72,166 @@ struct em_perf_state {
* field is unused.
*/
struct em_perf_domain {
- struct em_perf_state *table;
+ struct em_perf_table __rcu *em_table;
+ struct list_head node;
+ int id;
int nr_perf_states;
- int milliwatts;
+ int min_perf_state;
+ int max_perf_state;
+ unsigned long flags;
unsigned long cpus[];
};
+/*
+ * em_perf_domain flags:
+ *
+ * EM_PERF_DOMAIN_MICROWATTS: The power values are in micro-Watts or some
+ * other scale.
+ *
+ * EM_PERF_DOMAIN_SKIP_INEFFICIENCIES: Skip inefficient states when estimating
+ * energy consumption.
+ *
+ * EM_PERF_DOMAIN_ARTIFICIAL: The power values are artificial and might be
+ * created by platform missing real power information
+ */
+#define EM_PERF_DOMAIN_MICROWATTS BIT(0)
+#define EM_PERF_DOMAIN_SKIP_INEFFICIENCIES BIT(1)
+#define EM_PERF_DOMAIN_ARTIFICIAL BIT(2)
+
#define em_span_cpus(em) (to_cpumask((em)->cpus))
+#define em_is_artificial(em) ((em)->flags & EM_PERF_DOMAIN_ARTIFICIAL)
#ifdef CONFIG_ENERGY_MODEL
-#define EM_MAX_POWER 0xFFFF
+/*
+ * The max power value in micro-Watts. The limit of 64 Watts is set as
+ * a safety net to not overflow multiplications on 32bit platforms. The
+ * 32bit value limit for total Perf Domain power implies a limit of
+ * maximum CPUs in such domain to 64.
+ */
+#define EM_MAX_POWER (64000000) /* 64 Watts */
+
+/*
+ * To avoid possible energy estimation overflow on 32bit machines add
+ * limits to number of CPUs in the Perf. Domain.
+ * We are safe on 64bit machine, thus some big number.
+ */
+#ifdef CONFIG_64BIT
+#define EM_MAX_NUM_CPUS 4096
+#else
+#define EM_MAX_NUM_CPUS 16
+#endif
struct em_data_callback {
/**
* active_power() - Provide power at the next performance state of
* a device
+ * @dev : Device for which we do this operation (can be a CPU)
* @power : Active power at the performance state
* (modified)
* @freq : Frequency at the performance state in kHz
* (modified)
- * @dev : Device for which we do this operation (can be a CPU)
*
* active_power() must find the lowest performance state of 'dev' above
* 'freq' and update 'power' and 'freq' to the matching active power
* and frequency.
*
* In case of CPUs, the power is the one of a single CPU in the domain,
- * expressed in milli-Watts or an abstract scale. It is expected to
+ * expressed in micro-Watts or an abstract scale. It is expected to
* fit in the [0, EM_MAX_POWER] range.
*
* Return 0 on success.
*/
- int (*active_power)(unsigned long *power, unsigned long *freq,
- struct device *dev);
+ int (*active_power)(struct device *dev, unsigned long *power,
+ unsigned long *freq);
+
+ /**
+ * get_cost() - Provide the cost at the given performance state of
+ * a device
+ * @dev : Device for which we do this operation (can be a CPU)
+ * @freq : Frequency at the performance state in kHz
+ * @cost : The cost value for the performance state
+ * (modified)
+ *
+ * In case of CPUs, the cost is the one of a single CPU in the domain.
+ * It is expected to fit in the [0, EM_MAX_POWER] range due to internal
+ * usage in EAS calculation.
+ *
+ * Return 0 on success, or appropriate error value in case of failure.
+ */
+ int (*get_cost)(struct device *dev, unsigned long freq,
+ unsigned long *cost);
};
-#define EM_DATA_CB(_active_power_cb) { .active_power = &_active_power_cb }
+#define EM_SET_ACTIVE_POWER_CB(em_cb, cb) ((em_cb).active_power = cb)
+#define EM_ADV_DATA_CB(_active_power_cb, _cost_cb) \
+ { .active_power = _active_power_cb, \
+ .get_cost = _cost_cb }
+#define EM_DATA_CB(_active_power_cb) \
+ EM_ADV_DATA_CB(_active_power_cb, NULL)
struct em_perf_domain *em_cpu_get(int cpu);
struct em_perf_domain *em_pd_get(struct device *dev);
+int em_dev_update_perf_domain(struct device *dev,
+ struct em_perf_table *new_table);
int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
- struct em_data_callback *cb, cpumask_t *span,
- bool milliwatts);
+ const struct em_data_callback *cb,
+ const cpumask_t *cpus, bool microwatts);
+int em_dev_register_pd_no_update(struct device *dev, unsigned int nr_states,
+ const struct em_data_callback *cb,
+ const cpumask_t *cpus, bool microwatts);
void em_dev_unregister_perf_domain(struct device *dev);
+struct em_perf_table *em_table_alloc(struct em_perf_domain *pd);
+void em_table_free(struct em_perf_table *table);
+int em_dev_compute_costs(struct device *dev, struct em_perf_state *table,
+ int nr_states);
+int em_dev_update_chip_binning(struct device *dev);
+int em_update_performance_limits(struct em_perf_domain *pd,
+ unsigned long freq_min_khz, unsigned long freq_max_khz);
+void em_adjust_cpu_capacity(unsigned int cpu);
+void em_rebuild_sched_domains(void);
+
+/**
+ * em_pd_get_efficient_state() - Get an efficient performance state from the EM
+ * @table: List of performance states, in ascending order
+ * @pd: performance domain for which this must be done
+ * @max_util: Max utilization to map with the EM
+ *
+ * It is called from the scheduler code quite frequently and as a consequence
+ * doesn't implement any check.
+ *
+ * Return: An efficient performance state id, high enough to meet @max_util
+ * requirement.
+ */
+static inline int
+em_pd_get_efficient_state(struct em_perf_state *table,
+ struct em_perf_domain *pd, unsigned long max_util)
+{
+ unsigned long pd_flags = pd->flags;
+ int min_ps = pd->min_perf_state;
+ int max_ps = pd->max_perf_state;
+ struct em_perf_state *ps;
+ int i;
+
+ for (i = min_ps; i <= max_ps; i++) {
+ ps = &table[i];
+ if (ps->performance >= max_util) {
+ if (pd_flags & EM_PERF_DOMAIN_SKIP_INEFFICIENCIES &&
+ ps->flags & EM_PERF_STATE_INEFFICIENT)
+ continue;
+ return i;
+ }
+ }
+
+ return max_ps;
+}
/**
* em_cpu_energy() - Estimates the energy consumed by the CPUs of a
- performance domain
+ * performance domain
* @pd : performance domain for which energy has to be estimated
* @max_util : highest utilization among CPUs of the domain
* @sum_util : sum of the utilization of all CPUs in the domain
+ * @allowed_cpu_cap : maximum allowed CPU capacity for the @pd, which
+ * might reflect reduced frequency (due to thermal)
*
* This function must be used only for CPU devices. There is no validation,
* i.e. if the EM is a CPU type and has cpumask allocated. It is called from
@@ -100,11 +241,14 @@ void em_dev_unregister_perf_domain(struct device *dev);
* a capacity state satisfying the max utilization of the domain.
*/
static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
- unsigned long max_util, unsigned long sum_util)
+ unsigned long max_util, unsigned long sum_util,
+ unsigned long allowed_cpu_cap)
{
- unsigned long freq, scale_cpu;
+ struct em_perf_table *em_table;
struct em_perf_state *ps;
- int i, cpu;
+ int i;
+
+ WARN_ONCE(!rcu_read_lock_held(), "EM: rcu read lock needed\n");
if (!sum_util)
return 0;
@@ -112,30 +256,28 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
/*
* In order to predict the performance state, map the utilization of
* the most utilized CPU of the performance domain to a requested
- * frequency, like schedutil.
+ * performance, like schedutil. Take also into account that the real
+ * performance might be set lower (due to thermal capping). Thus, clamp
+ * max utilization to the allowed CPU capacity before calculating
+ * effective performance.
*/
- cpu = cpumask_first(to_cpumask(pd->cpus));
- scale_cpu = arch_scale_cpu_capacity(cpu);
- ps = &pd->table[pd->nr_perf_states - 1];
- freq = map_util_freq(max_util, ps->frequency, scale_cpu);
+ max_util = min(max_util, allowed_cpu_cap);
/*
* Find the lowest performance state of the Energy Model above the
- * requested frequency.
+ * requested performance.
*/
- for (i = 0; i < pd->nr_perf_states; i++) {
- ps = &pd->table[i];
- if (ps->frequency >= freq)
- break;
- }
+ em_table = rcu_dereference(pd->em_table);
+ i = em_pd_get_efficient_state(em_table->state, pd, max_util);
+ ps = &em_table->state[i];
/*
- * The capacity of a CPU in the domain at the performance state (ps)
- * can be computed as:
+ * The performance (capacity) of a CPU in the domain at the performance
+ * state (ps) can be computed as:
*
- * ps->freq * scale_cpu
- * ps->cap = -------------------- (1)
- * cpu_max_freq
+ * ps->freq * scale_cpu
+ * ps->performance = -------------------- (1)
+ * cpu_max_freq
*
* So, ignoring the costs of idle states (which are not available in
* the EM), the energy consumed by this CPU at that performance state
@@ -143,9 +285,10 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
*
* ps->power * cpu_util
* cpu_nrg = -------------------- (2)
- * ps->cap
+ * ps->performance
*
- * since 'cpu_util / ps->cap' represents its percentage of busy time.
+ * since 'cpu_util / ps->performance' represents its percentage of busy
+ * time.
*
* NOTE: Although the result of this computation actually is in
* units of power, it can be manipulated as an energy value
@@ -155,9 +298,9 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
* By injecting (1) in (2), 'cpu_nrg' can be re-expressed as a product
* of two terms:
*
- * ps->power * cpu_max_freq cpu_util
- * cpu_nrg = ------------------------ * --------- (3)
- * ps->freq scale_cpu
+ * ps->power * cpu_max_freq
+ * cpu_nrg = ------------------------ * cpu_util (3)
+ * ps->freq * scale_cpu
*
* The first term is static, and is stored in the em_perf_state struct
* as 'ps->cost'.
@@ -167,11 +310,9 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
* total energy of the domain (which is the simple sum of the energy of
* all of its CPUs) can be factorized as:
*
- * ps->cost * \Sum cpu_util
- * pd_nrg = ------------------------ (4)
- * scale_cpu
+ * pd_nrg = ps->cost * \Sum cpu_util (4)
*/
- return ps->cost * sum_util / scale_cpu;
+ return ps->cost * sum_util;
}
/**
@@ -186,14 +327,40 @@ static inline int em_pd_nr_perf_states(struct em_perf_domain *pd)
return pd->nr_perf_states;
}
+/**
+ * em_perf_state_from_pd() - Get the performance states table of perf.
+ * domain
+ * @pd : performance domain for which this must be done
+ *
+ * To use this function the rcu_read_lock() should be hold. After the usage
+ * of the performance states table is finished, the rcu_read_unlock() should
+ * be called.
+ *
+ * Return: the pointer to performance states table of the performance domain
+ */
+static inline
+struct em_perf_state *em_perf_state_from_pd(struct em_perf_domain *pd)
+{
+ return rcu_dereference(pd->em_table)->state;
+}
+
#else
struct em_data_callback {};
+#define EM_ADV_DATA_CB(_active_power_cb, _cost_cb) { }
#define EM_DATA_CB(_active_power_cb) { }
+#define EM_SET_ACTIVE_POWER_CB(em_cb, cb) do { } while (0)
static inline
int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
- struct em_data_callback *cb, cpumask_t *span,
- bool milliwatts)
+ const struct em_data_callback *cb,
+ const cpumask_t *cpus, bool microwatts)
+{
+ return -EINVAL;
+}
+static inline
+int em_dev_register_pd_no_update(struct device *dev, unsigned int nr_states,
+ const struct em_data_callback *cb,
+ const cpumask_t *cpus, bool microwatts)
{
return -EINVAL;
}
@@ -209,7 +376,8 @@ static inline struct em_perf_domain *em_pd_get(struct device *dev)
return NULL;
}
static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
- unsigned long max_util, unsigned long sum_util)
+ unsigned long max_util, unsigned long sum_util,
+ unsigned long allowed_cpu_cap)
{
return 0;
}
@@ -217,6 +385,41 @@ static inline int em_pd_nr_perf_states(struct em_perf_domain *pd)
{
return 0;
}
+static inline
+struct em_perf_table *em_table_alloc(struct em_perf_domain *pd)
+{
+ return NULL;
+}
+static inline void em_table_free(struct em_perf_table *table) {}
+static inline
+int em_dev_update_perf_domain(struct device *dev,
+ struct em_perf_table *new_table)
+{
+ return -EINVAL;
+}
+static inline
+struct em_perf_state *em_perf_state_from_pd(struct em_perf_domain *pd)
+{
+ return NULL;
+}
+static inline
+int em_dev_compute_costs(struct device *dev, struct em_perf_state *table,
+ int nr_states)
+{
+ return -EINVAL;
+}
+static inline int em_dev_update_chip_binning(struct device *dev)
+{
+ return -EINVAL;
+}
+static inline
+int em_update_performance_limits(struct em_perf_domain *pd,
+ unsigned long freq_min_khz, unsigned long freq_max_khz)
+{
+ return -EINVAL;
+}
+static inline void em_adjust_cpu_capacity(unsigned int cpu) {}
+static inline void em_rebuild_sched_domains(void) {}
#endif
#endif
diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
index 2e2b8d6140ed..87efb38b7081 100644
--- a/include/linux/entry-common.h
+++ b/include/linux/entry-common.h
@@ -2,21 +2,15 @@
#ifndef __LINUX_ENTRYCOMMON_H
#define __LINUX_ENTRYCOMMON_H
-#include <linux/static_call_types.h>
-#include <linux/tracehook.h>
-#include <linux/syscalls.h>
+#include <linux/irq-entry-common.h>
+#include <linux/livepatch.h>
+#include <linux/ptrace.h>
+#include <linux/resume_user_mode.h>
#include <linux/seccomp.h>
#include <linux/sched.h>
#include <asm/entry-common.h>
-
-/*
- * Define dummy _TIF work flags if not defined by the architecture or for
- * disabled functionality.
- */
-#ifndef _TIF_PATCH_PENDING
-# define _TIF_PATCH_PENDING (0)
-#endif
+#include <asm/syscall.h>
#ifndef _TIF_UPROBE
# define _TIF_UPROBE (0)
@@ -43,6 +37,7 @@
SYSCALL_WORK_SYSCALL_AUDIT | \
SYSCALL_WORK_SYSCALL_USER_DISPATCH | \
ARCH_SYSCALL_WORK_ENTER)
+
#define SYSCALL_WORK_EXIT (SYSCALL_WORK_SYSCALL_TRACEPOINT | \
SYSCALL_WORK_SYSCALL_TRACE | \
SYSCALL_WORK_SYSCALL_AUDIT | \
@@ -50,92 +45,7 @@
SYSCALL_WORK_SYSCALL_EXIT_TRAP | \
ARCH_SYSCALL_WORK_EXIT)
-/*
- * TIF flags handled in exit_to_user_mode_loop()
- */
-#ifndef ARCH_EXIT_TO_USER_MODE_WORK
-# define ARCH_EXIT_TO_USER_MODE_WORK (0)
-#endif
-
-#define EXIT_TO_USER_MODE_WORK \
- (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
- _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \
- ARCH_EXIT_TO_USER_MODE_WORK)
-
-/**
- * arch_check_user_regs - Architecture specific sanity check for user mode regs
- * @regs: Pointer to currents pt_regs
- *
- * Defaults to an empty implementation. Can be replaced by architecture
- * specific code.
- *
- * Invoked from syscall_enter_from_user_mode() in the non-instrumentable
- * section. Use __always_inline so the compiler cannot push it out of line
- * and make it instrumentable.
- */
-static __always_inline void arch_check_user_regs(struct pt_regs *regs);
-
-#ifndef arch_check_user_regs
-static __always_inline void arch_check_user_regs(struct pt_regs *regs) {}
-#endif
-
-/**
- * arch_syscall_enter_tracehook - Wrapper around tracehook_report_syscall_entry()
- * @regs: Pointer to currents pt_regs
- *
- * Returns: 0 on success or an error code to skip the syscall.
- *
- * Defaults to tracehook_report_syscall_entry(). Can be replaced by
- * architecture specific code.
- *
- * Invoked from syscall_enter_from_user_mode()
- */
-static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs);
-
-#ifndef arch_syscall_enter_tracehook
-static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs)
-{
- return tracehook_report_syscall_entry(regs);
-}
-#endif
-
-/**
- * enter_from_user_mode - Establish state when coming from user mode
- *
- * Syscall/interrupt entry disables interrupts, but user mode is traced as
- * interrupts enabled. Also with NO_HZ_FULL RCU might be idle.
- *
- * 1) Tell lockdep that interrupts are disabled
- * 2) Invoke context tracking if enabled to reactivate RCU
- * 3) Trace interrupts off state
- *
- * Invoked from architecture specific syscall entry code with interrupts
- * disabled. The calling code has to be non-instrumentable. When the
- * function returns all state is correct and interrupts are still
- * disabled. The subsequent functions can be instrumented.
- *
- * This is invoked when there is architecture specific functionality to be
- * done between establishing state and enabling interrupts. The caller must
- * enable interrupts before invoking syscall_enter_from_user_mode_work().
- */
-void enter_from_user_mode(struct pt_regs *regs);
-
-/**
- * syscall_enter_from_user_mode_prepare - Establish state and enable interrupts
- * @regs: Pointer to currents pt_regs
- *
- * Invoked from architecture specific syscall entry code with interrupts
- * disabled. The calling code has to be non-instrumentable. When the
- * function returns all state is correct, interrupts are enabled and the
- * subsequent functions can be instrumented.
- *
- * This handles lockdep, RCU (context tracking) and tracing state, i.e.
- * the functionality provided by enter_from_user_mode().
- *
- * This is invoked when there is extra architecture specific functionality
- * to be done between establishing state and handling user mode entry work.
- */
-void syscall_enter_from_user_mode_prepare(struct pt_regs *regs);
+long syscall_trace_enter(struct pt_regs *regs, long syscall, unsigned long work);
/**
* syscall_enter_from_user_mode_work - Check and handle work before invoking
@@ -144,8 +54,8 @@ void syscall_enter_from_user_mode_prepare(struct pt_regs *regs);
* @syscall: The syscall number
*
* Invoked from architecture specific syscall entry code with interrupts
- * enabled after invoking syscall_enter_from_user_mode_prepare() and extra
- * architecture specific work.
+ * enabled after invoking enter_from_user_mode(), enabling interrupts and
+ * extra architecture specific work.
*
* Returns: The original or a modified syscall number
*
@@ -157,10 +67,18 @@ void syscall_enter_from_user_mode_prepare(struct pt_regs *regs);
* It handles the following work items:
*
* 1) syscall_work flag dependent invocations of
- * arch_syscall_enter_tracehook(), __secure_computing(), trace_sys_enter()
+ * ptrace_report_syscall_entry(), __secure_computing(), trace_sys_enter()
* 2) Invocation of audit_syscall_entry()
*/
-long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall);
+static __always_inline long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall)
+{
+ unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
+
+ if (work & SYSCALL_WORK_ENTER)
+ syscall = syscall_trace_enter(regs, syscall, work);
+
+ return syscall;
+}
/**
* syscall_enter_from_user_mode - Establish state and check and handle work
@@ -173,150 +91,35 @@ long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall);
* function returns all state is correct, interrupts are enabled and the
* subsequent functions can be instrumented.
*
- * This is combination of syscall_enter_from_user_mode_prepare() and
- * syscall_enter_from_user_mode_work().
+ * This is the combination of enter_from_user_mode() and
+ * syscall_enter_from_user_mode_work() to be used when there is no
+ * architecture specific work to be done between the two.
*
* Returns: The original or a modified syscall number. See
* syscall_enter_from_user_mode_work() for further explanation.
*/
-long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall);
-
-/**
- * local_irq_enable_exit_to_user - Exit to user variant of local_irq_enable()
- * @ti_work: Cached TIF flags gathered with interrupts disabled
- *
- * Defaults to local_irq_enable(). Can be supplied by architecture specific
- * code.
- */
-static inline void local_irq_enable_exit_to_user(unsigned long ti_work);
-
-#ifndef local_irq_enable_exit_to_user
-static inline void local_irq_enable_exit_to_user(unsigned long ti_work)
+static __always_inline long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall)
{
- local_irq_enable();
-}
-#endif
+ long ret;
-/**
- * local_irq_disable_exit_to_user - Exit to user variant of local_irq_disable()
- *
- * Defaults to local_irq_disable(). Can be supplied by architecture specific
- * code.
- */
-static inline void local_irq_disable_exit_to_user(void);
-
-#ifndef local_irq_disable_exit_to_user
-static inline void local_irq_disable_exit_to_user(void)
-{
- local_irq_disable();
-}
-#endif
-
-/**
- * arch_exit_to_user_mode_work - Architecture specific TIF work for exit
- * to user mode.
- * @regs: Pointer to currents pt_regs
- * @ti_work: Cached TIF flags gathered with interrupts disabled
- *
- * Invoked from exit_to_user_mode_loop() with interrupt enabled
- *
- * Defaults to NOOP. Can be supplied by architecture specific code.
- */
-static inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
- unsigned long ti_work);
-
-#ifndef arch_exit_to_user_mode_work
-static inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
- unsigned long ti_work)
-{
-}
-#endif
-
-/**
- * arch_exit_to_user_mode_prepare - Architecture specific preparation for
- * exit to user mode.
- * @regs: Pointer to currents pt_regs
- * @ti_work: Cached TIF flags gathered with interrupts disabled
- *
- * Invoked from exit_to_user_mode_prepare() with interrupt disabled as the last
- * function before return. Defaults to NOOP.
- */
-static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
- unsigned long ti_work);
+ enter_from_user_mode(regs);
-#ifndef arch_exit_to_user_mode_prepare
-static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
- unsigned long ti_work)
-{
-}
-#endif
-
-/**
- * arch_exit_to_user_mode - Architecture specific final work before
- * exit to user mode.
- *
- * Invoked from exit_to_user_mode() with interrupt disabled as the last
- * function before return. Defaults to NOOP.
- *
- * This needs to be __always_inline because it is non-instrumentable code
- * invoked after context tracking switched to user mode.
- *
- * An architecture implementation must not do anything complex, no locking
- * etc. The main purpose is for speculation mitigations.
- */
-static __always_inline void arch_exit_to_user_mode(void);
-
-#ifndef arch_exit_to_user_mode
-static __always_inline void arch_exit_to_user_mode(void) { }
-#endif
-
-/**
- * arch_do_signal_or_restart - Architecture specific signal delivery function
- * @regs: Pointer to currents pt_regs
- * @has_signal: actual signal to handle
- *
- * Invoked from exit_to_user_mode_loop().
- */
-void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal);
-
-/**
- * arch_syscall_exit_tracehook - Wrapper around tracehook_report_syscall_exit()
- * @regs: Pointer to currents pt_regs
- * @step: Indicator for single step
- *
- * Defaults to tracehook_report_syscall_exit(). Can be replaced by
- * architecture specific code.
- *
- * Invoked from syscall_exit_to_user_mode()
- */
-static inline void arch_syscall_exit_tracehook(struct pt_regs *regs, bool step);
+ instrumentation_begin();
+ local_irq_enable();
+ ret = syscall_enter_from_user_mode_work(regs, syscall);
+ instrumentation_end();
-#ifndef arch_syscall_exit_tracehook
-static inline void arch_syscall_exit_tracehook(struct pt_regs *regs, bool step)
-{
- tracehook_report_syscall_exit(regs, step);
+ return ret;
}
-#endif
/**
- * exit_to_user_mode - Fixup state when exiting to user mode
- *
- * Syscall/interrupt exit enables interrupts, but the kernel state is
- * interrupts disabled when this is invoked. Also tell RCU about it.
+ * syscall_exit_work - Handle work before returning to user mode
+ * @regs: Pointer to current pt_regs
+ * @work: Current thread syscall work
*
- * 1) Trace interrupts on state
- * 2) Invoke context tracking if enabled to adjust RCU state
- * 3) Invoke architecture specific last minute exit code, e.g. speculation
- * mitigations, etc.: arch_exit_to_user_mode()
- * 4) Tell lockdep that interrupts are enabled
- *
- * Invoked from architecture specific code when syscall_exit_to_user_mode()
- * is not suitable as the last step before returning to userspace. Must be
- * invoked with interrupts disabled and the caller must be
- * non-instrumentable.
- * The caller has to invoke syscall_exit_to_user_mode_work() before this.
+ * Do one-time syscall specific work.
*/
-void exit_to_user_mode(void);
+void syscall_exit_work(struct pt_regs *regs, unsigned long work);
/**
* syscall_exit_to_user_mode_work - Handle work before returning to user mode
@@ -331,7 +134,30 @@ void exit_to_user_mode(void);
* make the final state transitions. Interrupts must stay disabled between
* return from this function and the invocation of exit_to_user_mode().
*/
-void syscall_exit_to_user_mode_work(struct pt_regs *regs);
+static __always_inline void syscall_exit_to_user_mode_work(struct pt_regs *regs)
+{
+ unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
+ unsigned long nr = syscall_get_nr(current, regs);
+
+ CT_WARN_ON(ct_state() != CT_STATE_KERNEL);
+
+ if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
+ if (WARN(irqs_disabled(), "syscall %lu left IRQs disabled", nr))
+ local_irq_enable();
+ }
+
+ rseq_debug_syscall_return(regs);
+
+ /*
+ * Do one-time syscall specific work. If these work items are
+ * enabled, we want to run them exactly once per syscall exit with
+ * interrupts enabled.
+ */
+ if (unlikely(work & SYSCALL_WORK_EXIT))
+ syscall_exit_work(regs, work);
+ local_irq_disable_exit_to_user();
+ syscall_exit_to_user_mode_prepare(regs);
+}
/**
* syscall_exit_to_user_mode - Handle work before returning to user mode
@@ -347,7 +173,7 @@ void syscall_exit_to_user_mode_work(struct pt_regs *regs);
* - rseq syscall exit
* - audit
* - syscall tracing
- * - tracehook (single stepping)
+ * - ptrace (single stepping)
*
* 2) Preparatory work
* - Exit to user mode loop (common TIF handling). Invokes
@@ -362,136 +188,12 @@ void syscall_exit_to_user_mode_work(struct pt_regs *regs);
* exit_to_user_mode(). This function is preferred unless there is a
* compelling architectural reason to use the separate functions.
*/
-void syscall_exit_to_user_mode(struct pt_regs *regs);
-
-/**
- * irqentry_enter_from_user_mode - Establish state before invoking the irq handler
- * @regs: Pointer to currents pt_regs
- *
- * Invoked from architecture specific entry code with interrupts disabled.
- * Can only be called when the interrupt entry came from user mode. The
- * calling code must be non-instrumentable. When the function returns all
- * state is correct and the subsequent functions can be instrumented.
- *
- * The function establishes state (lockdep, RCU (context tracking), tracing)
- */
-void irqentry_enter_from_user_mode(struct pt_regs *regs);
-
-/**
- * irqentry_exit_to_user_mode - Interrupt exit work
- * @regs: Pointer to current's pt_regs
- *
- * Invoked with interrupts disabled and fully valid regs. Returns with all
- * work handled, interrupts disabled such that the caller can immediately
- * switch to user mode. Called from architecture specific interrupt
- * handling code.
- *
- * The call order is #2 and #3 as described in syscall_exit_to_user_mode().
- * Interrupt exit is not invoking #1 which is the syscall specific one time
- * work.
- */
-void irqentry_exit_to_user_mode(struct pt_regs *regs);
-
-#ifndef irqentry_state
-/**
- * struct irqentry_state - Opaque object for exception state storage
- * @exit_rcu: Used exclusively in the irqentry_*() calls; signals whether the
- * exit path has to invoke rcu_irq_exit().
- * @lockdep: Used exclusively in the irqentry_nmi_*() calls; ensures that
- * lockdep state is restored correctly on exit from nmi.
- *
- * This opaque object is filled in by the irqentry_*_enter() functions and
- * must be passed back into the corresponding irqentry_*_exit() functions
- * when the exception is complete.
- *
- * Callers of irqentry_*_[enter|exit]() must consider this structure opaque
- * and all members private. Descriptions of the members are provided to aid in
- * the maintenance of the irqentry_*() functions.
- */
-typedef struct irqentry_state {
- union {
- bool exit_rcu;
- bool lockdep;
- };
-} irqentry_state_t;
-#endif
-
-/**
- * irqentry_enter - Handle state tracking on ordinary interrupt entries
- * @regs: Pointer to pt_regs of interrupted context
- *
- * Invokes:
- * - lockdep irqflag state tracking as low level ASM entry disabled
- * interrupts.
- *
- * - Context tracking if the exception hit user mode.
- *
- * - The hardirq tracer to keep the state consistent as low level ASM
- * entry disabled interrupts.
- *
- * As a precondition, this requires that the entry came from user mode,
- * idle, or a kernel context in which RCU is watching.
- *
- * For kernel mode entries RCU handling is done conditional. If RCU is
- * watching then the only RCU requirement is to check whether the tick has
- * to be restarted. If RCU is not watching then rcu_irq_enter() has to be
- * invoked on entry and rcu_irq_exit() on exit.
- *
- * Avoiding the rcu_irq_enter/exit() calls is an optimization but also
- * solves the problem of kernel mode pagefaults which can schedule, which
- * is not possible after invoking rcu_irq_enter() without undoing it.
- *
- * For user mode entries irqentry_enter_from_user_mode() is invoked to
- * establish the proper context for NOHZ_FULL. Otherwise scheduling on exit
- * would not be possible.
- *
- * Returns: An opaque object that must be passed to idtentry_exit()
- */
-irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs);
-
-/**
- * irqentry_exit_cond_resched - Conditionally reschedule on return from interrupt
- *
- * Conditional reschedule with additional sanity checks.
- */
-void irqentry_exit_cond_resched(void);
-#ifdef CONFIG_PREEMPT_DYNAMIC
-DECLARE_STATIC_CALL(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
-#endif
-
-/**
- * irqentry_exit - Handle return from exception that used irqentry_enter()
- * @regs: Pointer to pt_regs (exception entry regs)
- * @state: Return value from matching call to irqentry_enter()
- *
- * Depending on the return target (kernel/user) this runs the necessary
- * preemption and work checks if possible and required and returns to
- * the caller with interrupts disabled and no further work pending.
- *
- * This is the last action before returning to the low level ASM code which
- * just needs to return to the appropriate context.
- *
- * Counterpart to irqentry_enter().
- */
-void noinstr irqentry_exit(struct pt_regs *regs, irqentry_state_t state);
-
-/**
- * irqentry_nmi_enter - Handle NMI entry
- * @regs: Pointer to currents pt_regs
- *
- * Similar to irqentry_enter() but taking care of the NMI constraints.
- */
-irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs);
-
-/**
- * irqentry_nmi_exit - Handle return from NMI handling
- * @regs: Pointer to pt_regs (NMI entry regs)
- * @irq_state: Return value from matching call to irqentry_nmi_enter()
- *
- * Last action before returning to the low level assembly code.
- *
- * Counterpart to irqentry_nmi_enter().
- */
-void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state);
+static __always_inline void syscall_exit_to_user_mode(struct pt_regs *regs)
+{
+ instrumentation_begin();
+ syscall_exit_to_user_mode_work(regs);
+ instrumentation_end();
+ exit_to_user_mode();
+}
#endif
diff --git a/include/linux/entry-kvm.h b/include/linux/entry-virt.h
index 8b2b1d68b954..bfa767702d9a 100644
--- a/include/linux/entry-kvm.h
+++ b/include/linux/entry-virt.h
@@ -1,21 +1,25 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_ENTRYKVM_H
-#define __LINUX_ENTRYKVM_H
+#ifndef __LINUX_ENTRYVIRT_H
+#define __LINUX_ENTRYVIRT_H
-#include <linux/entry-common.h>
+#include <linux/static_call_types.h>
+#include <linux/resume_user_mode.h>
+#include <linux/syscalls.h>
+#include <linux/seccomp.h>
+#include <linux/sched.h>
+#include <linux/tick.h>
/* Transfer to guest mode work */
-#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
+#ifdef CONFIG_VIRT_XFER_TO_GUEST_WORK
#ifndef ARCH_XFER_TO_GUEST_MODE_WORK
# define ARCH_XFER_TO_GUEST_MODE_WORK (0)
#endif
#define XFER_TO_GUEST_MODE_WORK \
- (_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL | \
- _TIF_NOTIFY_RESUME | ARCH_XFER_TO_GUEST_MODE_WORK)
-
-struct kvm_vcpu;
+ (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | _TIF_SIGPENDING | \
+ _TIF_NOTIFY_SIGNAL | _TIF_NOTIFY_RESUME | \
+ ARCH_XFER_TO_GUEST_MODE_WORK)
/**
* arch_xfer_to_guest_mode_handle_work - Architecture specific xfer to guest
@@ -26,12 +30,10 @@ struct kvm_vcpu;
* Invoked from xfer_to_guest_mode_handle_work(). Defaults to NOOP. Can be
* replaced by architecture specific code.
*/
-static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu,
- unsigned long ti_work);
+static inline int arch_xfer_to_guest_mode_handle_work(unsigned long ti_work);
-#ifndef arch_xfer_to_guest_mode_work
-static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu,
- unsigned long ti_work)
+#ifndef arch_xfer_to_guest_mode_handle_work
+static inline int arch_xfer_to_guest_mode_handle_work(unsigned long ti_work)
{
return 0;
}
@@ -40,11 +42,10 @@ static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu,
/**
* xfer_to_guest_mode_handle_work - Check and handle pending work which needs
* to be handled before going to guest mode
- * @vcpu: Pointer to current's VCPU data
*
* Returns: 0 or an error code
*/
-int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu);
+int xfer_to_guest_mode_handle_work(void);
/**
* xfer_to_guest_mode_prepare - Perform last minute preparation work that
@@ -57,7 +58,7 @@ int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu);
static inline void xfer_to_guest_mode_prepare(void)
{
lockdep_assert_irqs_disabled();
- rcu_nocb_flush_deferred_wakeup();
+ tick_nohz_user_enter_prepare();
}
/**
@@ -70,7 +71,7 @@ static inline void xfer_to_guest_mode_prepare(void)
*/
static inline bool __xfer_to_guest_mode_work_pending(void)
{
- unsigned long ti_work = READ_ONCE(current_thread_info()->flags);
+ unsigned long ti_work = read_thread_flags();
return !!(ti_work & XFER_TO_GUEST_MODE_WORK);
}
@@ -89,6 +90,6 @@ static inline bool xfer_to_guest_mode_work_pending(void)
lockdep_assert_irqs_disabled();
return __xfer_to_guest_mode_work_pending();
}
-#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */
+#endif /* CONFIG_VIRT_XFER_TO_GUEST_WORK */
#endif
diff --git a/include/linux/err.h b/include/linux/err.h
index a139c64aef2a..8c37be0620ab 100644
--- a/include/linux/err.h
+++ b/include/linux/err.h
@@ -19,23 +19,74 @@
#ifndef __ASSEMBLY__
+/**
+ * IS_ERR_VALUE - Detect an error pointer.
+ * @x: The pointer to check.
+ *
+ * Like IS_ERR(), but does not generate a compiler warning if result is unused.
+ */
#define IS_ERR_VALUE(x) unlikely((unsigned long)(void *)(x) >= (unsigned long)-MAX_ERRNO)
+/**
+ * ERR_PTR - Create an error pointer.
+ * @error: A negative error code.
+ *
+ * Encodes @error into a pointer value. Users should consider the result
+ * opaque and not assume anything about how the error is encoded.
+ *
+ * Return: A pointer with @error encoded within its value.
+ */
static inline void * __must_check ERR_PTR(long error)
{
return (void *) error;
}
+/**
+ * INIT_ERR_PTR - Init a const error pointer.
+ * @error: A negative error code.
+ *
+ * Like ERR_PTR(), but usable to initialize static variables.
+ */
+#define INIT_ERR_PTR(error) ((void *)(error))
+
+/* Return the pointer in the percpu address space. */
+#define ERR_PTR_PCPU(error) ((void __percpu *)(unsigned long)ERR_PTR(error))
+
+/* Cast an error pointer to __iomem. */
+#define IOMEM_ERR_PTR(error) (__force void __iomem *)ERR_PTR(error)
+
+/**
+ * PTR_ERR - Extract the error code from an error pointer.
+ * @ptr: An error pointer.
+ * Return: The error code within @ptr.
+ */
static inline long __must_check PTR_ERR(__force const void *ptr)
{
return (long) ptr;
}
+/* Read an error pointer from the percpu address space. */
+#define PTR_ERR_PCPU(ptr) (PTR_ERR((const void *)(__force const unsigned long)(ptr)))
+
+/**
+ * IS_ERR - Detect an error pointer.
+ * @ptr: The pointer to check.
+ * Return: true if @ptr is an error pointer, false otherwise.
+ */
static inline bool __must_check IS_ERR(__force const void *ptr)
{
return IS_ERR_VALUE((unsigned long)ptr);
}
+/* Read an error pointer from the percpu address space. */
+#define IS_ERR_PCPU(ptr) (IS_ERR((const void *)(__force const unsigned long)(ptr)))
+
+/**
+ * IS_ERR_OR_NULL - Detect an error pointer or a null pointer.
+ * @ptr: The pointer to check.
+ *
+ * Like IS_ERR(), but also returns true for a null pointer.
+ */
static inline bool __must_check IS_ERR_OR_NULL(__force const void *ptr)
{
return unlikely(!ptr) || IS_ERR_VALUE((unsigned long)ptr);
@@ -54,6 +105,23 @@ static inline void * __must_check ERR_CAST(__force const void *ptr)
return (void *) ptr;
}
+/**
+ * PTR_ERR_OR_ZERO - Extract the error code from a pointer if it has one.
+ * @ptr: A potential error pointer.
+ *
+ * Convenience function that can be used inside a function that returns
+ * an error code to propagate errors received as error pointers.
+ * For example, ``return PTR_ERR_OR_ZERO(ptr);`` replaces:
+ *
+ * .. code-block:: c
+ *
+ * if (IS_ERR(ptr))
+ * return PTR_ERR(ptr);
+ * else
+ * return 0;
+ *
+ * Return: The error code within @ptr if it is an error pointer; 0 otherwise.
+ */
static inline int __must_check PTR_ERR_OR_ZERO(__force const void *ptr)
{
if (IS_ERR(ptr))
diff --git a/include/linux/errno.h b/include/linux/errno.h
index d73f597a2484..8b0c754bab02 100644
--- a/include/linux/errno.h
+++ b/include/linux/errno.h
@@ -31,5 +31,6 @@
#define EJUKEBOX 528 /* Request initiated, but will not complete before timeout */
#define EIOCBQUEUED 529 /* iocb queued, will get completion event */
#define ERECALLCONFLICT 530 /* conflict with recalled state */
+#define ENOGRACE 531 /* NFS file lock reclaim refused */
#endif
diff --git a/include/linux/error-injection.h b/include/linux/error-injection.h
index 635a95caf29f..20e738f4eae8 100644
--- a/include/linux/error-injection.h
+++ b/include/linux/error-injection.h
@@ -3,6 +3,7 @@
#define _LINUX_ERROR_INJECTION_H
#include <linux/compiler.h>
+#include <linux/errno.h>
#include <asm-generic/error-injection.h>
#ifdef CONFIG_FUNCTION_ERROR_INJECTION
@@ -19,7 +20,7 @@ static inline bool within_error_injection_list(unsigned long addr)
static inline int get_injectable_error_type(unsigned long addr)
{
- return EI_ETYPE_NONE;
+ return -EOPNOTSUPP;
}
#endif
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 330345b1be54..9a1eacf35d37 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -21,14 +21,21 @@
#include <linux/netdevice.h>
#include <linux/random.h>
#include <linux/crc32.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <asm/bitsperlong.h>
#ifdef __KERNEL__
struct device;
+struct fwnode_handle;
+
int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr);
+int platform_get_ethdev_address(struct device *dev, struct net_device *netdev);
unsigned char *arch_get_platform_mac_address(void);
int nvmem_get_mac_address(struct device *dev, void *addrbuf);
+int device_get_mac_address(struct device *dev, char *addr);
+int device_get_ethdev_address(struct device *dev, struct net_device *netdev);
+int fwnode_get_mac_address(struct fwnode_handle *fwnode, char *addr);
+
u32 eth_get_headlen(const struct net_device *dev, const void *data, u32 len);
__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
extern const struct header_ops eth_header_ops;
@@ -64,11 +71,17 @@ static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) =
{ 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
#define eth_stp_addr eth_reserved_addr_base
+static const u8 eth_ipv4_mcast_addr_base[ETH_ALEN] __aligned(2) =
+{ 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
+
+static const u8 eth_ipv6_mcast_addr_base[ETH_ALEN] __aligned(2) =
+{ 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
+
/**
* is_link_local_ether_addr - Determine if given Ethernet address is link-local
* @addr: Pointer to a six-byte array containing the Ethernet address
*
- * Return true if address is link local reserved addr (01:80:c2:00:00:0X) per
+ * Return: true if address is link local reserved addr (01:80:c2:00:00:0X) per
* IEEE 802.1Q 8.6.3 Frame filtering.
*
* Please note: addr must be aligned to u16.
@@ -91,7 +104,7 @@ static inline bool is_link_local_ether_addr(const u8 *addr)
* is_zero_ether_addr - Determine if give Ethernet address is all zeros.
* @addr: Pointer to a six-byte array containing the Ethernet address
*
- * Return true if the address is all zeroes.
+ * Return: true if the address is all zeroes.
*
* Please note: addr must be aligned to u16.
*/
@@ -110,7 +123,7 @@ static inline bool is_zero_ether_addr(const u8 *addr)
* is_multicast_ether_addr - Determine if the Ethernet address is a multicast.
* @addr: Pointer to a six-byte array containing the Ethernet address
*
- * Return true if the address is a multicast address.
+ * Return: true if the address is a multicast address.
* By definition the broadcast address is also a multicast address.
*/
static inline bool is_multicast_ether_addr(const u8 *addr)
@@ -127,7 +140,7 @@ static inline bool is_multicast_ether_addr(const u8 *addr)
#endif
}
-static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2])
+static inline bool is_multicast_ether_addr_64bits(const u8 *addr)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
#ifdef __BIG_ENDIAN
@@ -144,7 +157,7 @@ static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2])
* is_local_ether_addr - Determine if the Ethernet address is locally-assigned one (IEEE 802).
* @addr: Pointer to a six-byte array containing the Ethernet address
*
- * Return true if the address is a local address.
+ * Return: true if the address is a local address.
*/
static inline bool is_local_ether_addr(const u8 *addr)
{
@@ -155,7 +168,7 @@ static inline bool is_local_ether_addr(const u8 *addr)
* is_broadcast_ether_addr - Determine if the Ethernet address is broadcast
* @addr: Pointer to a six-byte array containing the Ethernet address
*
- * Return true if the address is the broadcast address.
+ * Return: true if the address is the broadcast address.
*
* Please note: addr must be aligned to u16.
*/
@@ -170,7 +183,7 @@ static inline bool is_broadcast_ether_addr(const u8 *addr)
* is_unicast_ether_addr - Determine if the Ethernet address is unicast
* @addr: Pointer to a six-byte array containing the Ethernet address
*
- * Return true if the address is a unicast address.
+ * Return: true if the address is a unicast address.
*/
static inline bool is_unicast_ether_addr(const u8 *addr)
{
@@ -184,7 +197,7 @@ static inline bool is_unicast_ether_addr(const u8 *addr)
* Check that the Ethernet address (MAC) is not 00:00:00:00:00:00, is not
* a multicast address, and is not FF:FF:FF:FF:FF:FF.
*
- * Return true if the address is valid.
+ * Return: true if the address is valid.
*
* Please note: addr must be aligned to u16.
*/
@@ -201,7 +214,7 @@ static inline bool is_valid_ether_addr(const u8 *addr)
*
* Check that the value from the Ethertype/length field is a valid Ethertype.
*
- * Return true if the valid is an 802.3 supported Ethertype.
+ * Return: true if the valid is an 802.3 supported Ethertype.
*/
static inline bool eth_proto_is_802_3(__be16 proto)
{
@@ -227,8 +240,6 @@ static inline void eth_random_addr(u8 *addr)
addr[0] |= 0x02; /* set local assignment bit (IEEE802) */
}
-#define random_ether_addr(addr) eth_random_addr(addr)
-
/**
* eth_broadcast_addr - Assign broadcast address
* @addr: Pointer to a six-byte array containing the Ethernet address
@@ -262,8 +273,11 @@ static inline void eth_zero_addr(u8 *addr)
*/
static inline void eth_hw_addr_random(struct net_device *dev)
{
+ u8 addr[ETH_ALEN];
+
+ eth_random_addr(addr);
+ __dev_addr_set(dev, addr, ETH_ALEN);
dev->addr_assign_type = NET_ADDR_RANDOM;
- eth_random_addr(dev->dev_addr);
}
/**
@@ -300,6 +314,18 @@ static inline void ether_addr_copy(u8 *dst, const u8 *src)
}
/**
+ * eth_hw_addr_set - Assign Ethernet address to a net_device
+ * @dev: pointer to net_device structure
+ * @addr: address to assign
+ *
+ * Assign given address to the net_device, addr_assign_type is not changed.
+ */
+static inline void eth_hw_addr_set(struct net_device *dev, const u8 *addr)
+{
+ __dev_addr_set(dev, addr, ETH_ALEN);
+}
+
+/**
* eth_hw_addr_inherit - Copy dev_addr from another net_device
* @dst: pointer to net_device to copy dev_addr to
* @src: pointer to net_device to copy dev_addr from
@@ -311,7 +337,7 @@ static inline void eth_hw_addr_inherit(struct net_device *dst,
struct net_device *src)
{
dst->addr_assign_type = src->addr_assign_type;
- ether_addr_copy(dst->dev_addr, src->dev_addr);
+ eth_hw_addr_set(dst, src->dev_addr);
}
/**
@@ -352,8 +378,7 @@ static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
* Please note that alignment of addr1 & addr2 are only guaranteed to be 16 bits.
*/
-static inline bool ether_addr_equal_64bits(const u8 addr1[6+2],
- const u8 addr2[6+2])
+static inline bool ether_addr_equal_64bits(const u8 *addr1, const u8 *addr2)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2);
@@ -409,11 +434,31 @@ static inline bool ether_addr_equal_masked(const u8 *addr1, const u8 *addr2,
return true;
}
+static inline bool ether_addr_is_ipv4_mcast(const u8 *addr)
+{
+ u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
+
+ return ether_addr_equal_masked(addr, eth_ipv4_mcast_addr_base, mask);
+}
+
+static inline bool ether_addr_is_ipv6_mcast(const u8 *addr)
+{
+ u8 mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
+
+ return ether_addr_equal_masked(addr, eth_ipv6_mcast_addr_base, mask);
+}
+
+static inline bool ether_addr_is_ip_mcast(const u8 *addr)
+{
+ return ether_addr_is_ipv4_mcast(addr) ||
+ ether_addr_is_ipv6_mcast(addr);
+}
+
/**
* ether_addr_to_u64 - Convert an Ethernet address into a u64 value.
* @addr: Pointer to a six-byte array containing the Ethernet address
*
- * Return a u64 value of the address
+ * Return: a u64 value of the address
*/
static inline u64 ether_addr_to_u64(const u8 *addr)
{
@@ -467,6 +512,20 @@ static inline void eth_addr_inc(u8 *addr)
}
/**
+ * eth_addr_add() - Add (or subtract) an offset to/from the given MAC address.
+ *
+ * @offset: Offset to add.
+ * @addr: Pointer to a six-byte array containing Ethernet address to increment.
+ */
+static inline void eth_addr_add(u8 *addr, long offset)
+{
+ u64 u = ether_addr_to_u64(addr);
+
+ u += offset;
+ u64_to_ether_addr(u, addr);
+}
+
+/**
* is_etherdev_addr - Tell if given Ethernet address belongs to the device.
* @dev: Pointer to a device structure
* @addr: Pointer to a six-byte array containing the Ethernet address
@@ -532,7 +591,61 @@ static inline unsigned long compare_ether_header(const void *a, const void *b)
}
/**
- * eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame
+ * eth_hw_addr_gen - Generate and assign Ethernet address to a port
+ * @dev: pointer to port's net_device structure
+ * @base_addr: base Ethernet address
+ * @id: offset to add to the base address
+ *
+ * Generate a MAC address using a base address and an offset and assign it
+ * to a net_device. Commonly used by switch drivers which need to compute
+ * addresses for all their ports. addr_assign_type is not changed.
+ */
+static inline void eth_hw_addr_gen(struct net_device *dev, const u8 *base_addr,
+ unsigned int id)
+{
+ u64 u = ether_addr_to_u64(base_addr);
+ u8 addr[ETH_ALEN];
+
+ u += id;
+ u64_to_ether_addr(u, addr);
+ eth_hw_addr_set(dev, addr);
+}
+
+/**
+ * eth_skb_pkt_type - Assign packet type if destination address does not match
+ * @skb: Assigned a packet type if address does not match @dev address
+ * @dev: Network device used to compare packet address against
+ *
+ * If the destination MAC address of the packet does not match the network
+ * device address, assign an appropriate packet type.
+ */
+static inline void eth_skb_pkt_type(struct sk_buff *skb,
+ const struct net_device *dev)
+{
+ const struct ethhdr *eth = eth_hdr(skb);
+
+ if (unlikely(!ether_addr_equal_64bits(eth->h_dest, dev->dev_addr))) {
+ if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) {
+ if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
+ skb->pkt_type = PACKET_BROADCAST;
+ else
+ skb->pkt_type = PACKET_MULTICAST;
+ } else {
+ skb->pkt_type = PACKET_OTHERHOST;
+ }
+ }
+}
+
+static inline struct ethhdr *eth_skb_pull_mac(struct sk_buff *skb)
+{
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
+
+ skb_pull_inline(skb, ETH_HLEN);
+ return eth;
+}
+
+/**
+ * eth_skb_pad - Pad buffer to minimum number of octets for Ethernet frame
* @skb: Buffer to pad
*
* An Ethernet frame should have a minimum size of 60 bytes. This function
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index e030f7510cd3..5c9162193d26 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -15,9 +15,15 @@
#include <linux/bitmap.h>
#include <linux/compat.h>
+#include <linux/if_ether.h>
+#include <linux/netlink.h>
+#include <linux/timer_types.h>
#include <uapi/linux/ethtool.h>
+#include <uapi/linux/ethtool_netlink_generated.h>
+#include <uapi/linux/net_tstamp.h>
-#ifdef CONFIG_COMPAT
+#define ETHTOOL_MM_MAX_VERIFY_TIME_MS 128
+#define ETHTOOL_MM_MAX_VERIFY_RETRIES 3
struct compat_ethtool_rx_flow_spec {
u32 flow_type;
@@ -38,8 +44,6 @@ struct compat_ethtool_rxnfc {
u32 rule_locs[];
};
-#endif /* CONFIG_COMPAT */
-
#include <linux/rculist.h>
/**
@@ -70,6 +74,51 @@ enum {
ETH_RSS_HASH_FUNCS_COUNT
};
+/**
+ * struct kernel_ethtool_ringparam - RX/TX ring configuration
+ * @rx_buf_len: Current length of buffers on the rx ring.
+ * @tcp_data_split: Scatter packet headers and data to separate buffers
+ * @tx_push: The flag of tx push mode
+ * @rx_push: The flag of rx push mode
+ * @cqe_size: Size of TX/RX completion queue event
+ * @tx_push_buf_len: Size of TX push buffer
+ * @tx_push_buf_max_len: Maximum allowed size of TX push buffer
+ * @hds_thresh: Packet size threshold for header data split (HDS)
+ * @hds_thresh_max: Maximum supported setting for @hds_threshold
+ *
+ */
+struct kernel_ethtool_ringparam {
+ u32 rx_buf_len;
+ u8 tcp_data_split;
+ u8 tx_push;
+ u8 rx_push;
+ u32 cqe_size;
+ u32 tx_push_buf_len;
+ u32 tx_push_buf_max_len;
+ u32 hds_thresh;
+ u32 hds_thresh_max;
+};
+
+/**
+ * enum ethtool_supported_ring_param - indicator caps for setting ring params
+ * @ETHTOOL_RING_USE_RX_BUF_LEN: capture for setting rx_buf_len
+ * @ETHTOOL_RING_USE_CQE_SIZE: capture for setting cqe_size
+ * @ETHTOOL_RING_USE_TX_PUSH: capture for setting tx_push
+ * @ETHTOOL_RING_USE_RX_PUSH: capture for setting rx_push
+ * @ETHTOOL_RING_USE_TX_PUSH_BUF_LEN: capture for setting tx_push_buf_len
+ * @ETHTOOL_RING_USE_TCP_DATA_SPLIT: capture for setting tcp_data_split
+ * @ETHTOOL_RING_USE_HDS_THRS: capture for setting header-data-split-thresh
+ */
+enum ethtool_supported_ring_param {
+ ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0),
+ ETHTOOL_RING_USE_CQE_SIZE = BIT(1),
+ ETHTOOL_RING_USE_TX_PUSH = BIT(2),
+ ETHTOOL_RING_USE_RX_PUSH = BIT(3),
+ ETHTOOL_RING_USE_TX_PUSH_BUF_LEN = BIT(4),
+ ETHTOOL_RING_USE_TCP_DATA_SPLIT = BIT(5),
+ ETHTOOL_RING_USE_HDS_THRS = BIT(6),
+};
+
#define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit))
#define __ETH_RSS_HASH(name) __ETH_RSS_HASH_BIT(ETH_RSS_HASH_##name##_BIT)
@@ -83,11 +132,6 @@ enum {
struct net_device;
struct netlink_ext_ack;
-/* Some generic methods drivers may use in their ethtool_ops */
-u32 ethtool_op_get_link(struct net_device *dev);
-int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti);
-
-
/* Link extended state and substate. */
struct ethtool_link_ext_state_info {
enum ethtool_link_ext_state link_ext_state;
@@ -97,10 +141,25 @@ struct ethtool_link_ext_state_info {
enum ethtool_link_ext_substate_link_logical_mismatch link_logical_mismatch;
enum ethtool_link_ext_substate_bad_signal_integrity bad_signal_integrity;
enum ethtool_link_ext_substate_cable_issue cable_issue;
- u8 __link_ext_substate;
+ enum ethtool_link_ext_substate_module module;
+ u32 __link_ext_substate;
};
};
+struct ethtool_link_ext_stats {
+ /* Custom Linux statistic for PHY level link down events.
+ * In a simpler world it should be equal to netdev->carrier_down_count
+ * unfortunately netdev also counts local reconfigurations which don't
+ * actually take the physical link down, not to mention NC-SI which,
+ * if present, keeps the link up regardless of host state.
+ * This statistic counts when PHY _actually_ went down, or lost link.
+ *
+ * Note that we need u64 for ethtool_stats_init() and comparisons
+ * to ETHTOOL_STAT_NOT_SET, but only u32 is exposed to the user.
+ */
+ u64 link_down_events;
+};
+
/**
* ethtool_rxfh_indir_default - get default value for RX flow hash indirection
* @index: Index in RX flow hash indirection table
@@ -113,6 +172,57 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
return index % n_rx_rings;
}
+/**
+ * struct ethtool_rxfh_context - a custom RSS context configuration
+ * @indir_size: Number of u32 entries in indirection table
+ * @key_size: Size of hash key, in bytes
+ * @priv_size: Size of driver private data, in bytes
+ * @hfunc: RSS hash function identifier. One of the %ETH_RSS_HASH_*
+ * @input_xfrm: Defines how the input data is transformed. Valid values are one
+ * of %RXH_XFRM_*.
+ * @indir_configured: indir has been specified (at create time or subsequently)
+ * @key_configured: hkey has been specified (at create time or subsequently)
+ */
+struct ethtool_rxfh_context {
+ u32 indir_size;
+ u32 key_size;
+ u16 priv_size;
+ u8 hfunc;
+ u8 input_xfrm;
+ u8 indir_configured:1;
+ u8 key_configured:1;
+ /* private: driver private data, indirection table, and hash key are
+ * stored sequentially in @data area. Use below helpers to access.
+ */
+ u32 key_off;
+ u8 data[] __aligned(sizeof(void *));
+};
+
+static inline void *ethtool_rxfh_context_priv(struct ethtool_rxfh_context *ctx)
+{
+ return ctx->data;
+}
+
+static inline u32 *ethtool_rxfh_context_indir(struct ethtool_rxfh_context *ctx)
+{
+ return (u32 *)(ctx->data + ALIGN(ctx->priv_size, sizeof(u32)));
+}
+
+static inline u8 *ethtool_rxfh_context_key(struct ethtool_rxfh_context *ctx)
+{
+ return &ctx->data[ctx->key_off];
+}
+
+void ethtool_rxfh_context_lost(struct net_device *dev, u32 context_id);
+
+struct link_mode_info {
+ int speed;
+ u8 lanes;
+ u8 duplex;
+};
+
+extern const struct link_mode_info link_mode_params[];
+
/* declare a link mode bitmap */
#define __ETHTOOL_DECLARE_LINK_MODE_MASK(name) \
DECLARE_BITMAP(name, __ETHTOOL_LINK_MODE_MASK_NBITS)
@@ -167,7 +277,7 @@ struct ethtool_link_ksettings {
* @mode : one of the ETHTOOL_LINK_MODE_*_BIT
* (not atomic, no bound checking)
*
- * Returns true/false.
+ * Returns: true/false.
*/
#define ethtool_link_ksettings_test_link_mode(ptr, name, mode) \
test_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name)
@@ -176,6 +286,24 @@ extern int
__ethtool_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *link_ksettings);
+struct ethtool_keee {
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertised);
+ u32 tx_lpi_timer;
+ bool tx_lpi_enabled;
+ bool eee_active;
+ bool eee_enabled;
+};
+
+struct kernel_ethtool_coalesce {
+ u8 use_cqe_mode_tx;
+ u8 use_cqe_mode_rx;
+ u32 tx_aggr_max_bytes;
+ u32 tx_aggr_max_frames;
+ u32 tx_aggr_time_usecs;
+};
+
/**
* ethtool_intersect_link_masks - Given two link masks, AND them together
* @dst: first mask and where result is stored
@@ -215,7 +343,14 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
#define ETHTOOL_COALESCE_TX_USECS_HIGH BIT(19)
#define ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH BIT(20)
#define ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL BIT(21)
-#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(21, 0)
+#define ETHTOOL_COALESCE_USE_CQE_RX BIT(22)
+#define ETHTOOL_COALESCE_USE_CQE_TX BIT(23)
+#define ETHTOOL_COALESCE_TX_AGGR_MAX_BYTES BIT(24)
+#define ETHTOOL_COALESCE_TX_AGGR_MAX_FRAMES BIT(25)
+#define ETHTOOL_COALESCE_TX_AGGR_TIME_USECS BIT(26)
+#define ETHTOOL_COALESCE_RX_PROFILE BIT(27)
+#define ETHTOOL_COALESCE_TX_PROFILE BIT(28)
+#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(28, 0)
#define ETHTOOL_COALESCE_USECS \
(ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_TX_USECS)
@@ -241,6 +376,12 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
ETHTOOL_COALESCE_RX_USECS_LOW | ETHTOOL_COALESCE_RX_USECS_HIGH | \
ETHTOOL_COALESCE_PKT_RATE_LOW | ETHTOOL_COALESCE_PKT_RATE_HIGH | \
ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL)
+#define ETHTOOL_COALESCE_USE_CQE \
+ (ETHTOOL_COALESCE_USE_CQE_RX | ETHTOOL_COALESCE_USE_CQE_TX)
+#define ETHTOOL_COALESCE_TX_AGGR \
+ (ETHTOOL_COALESCE_TX_AGGR_MAX_BYTES | \
+ ETHTOOL_COALESCE_TX_AGGR_MAX_FRAMES | \
+ ETHTOOL_COALESCE_TX_AGGR_TIME_USECS)
#define ETHTOOL_STAT_NOT_SET (~0ULL)
@@ -254,48 +395,82 @@ static inline void ethtool_stats_init(u64 *stats, unsigned int n)
* via a more targeted API.
*/
struct ethtool_eth_mac_stats {
- u64 FramesTransmittedOK;
- u64 SingleCollisionFrames;
- u64 MultipleCollisionFrames;
- u64 FramesReceivedOK;
- u64 FrameCheckSequenceErrors;
- u64 AlignmentErrors;
- u64 OctetsTransmittedOK;
- u64 FramesWithDeferredXmissions;
- u64 LateCollisions;
- u64 FramesAbortedDueToXSColls;
- u64 FramesLostDueToIntMACXmitError;
- u64 CarrierSenseErrors;
- u64 OctetsReceivedOK;
- u64 FramesLostDueToIntMACRcvError;
- u64 MulticastFramesXmittedOK;
- u64 BroadcastFramesXmittedOK;
- u64 FramesWithExcessiveDeferral;
- u64 MulticastFramesReceivedOK;
- u64 BroadcastFramesReceivedOK;
- u64 InRangeLengthErrors;
- u64 OutOfRangeLengthField;
- u64 FrameTooLongErrors;
+ enum ethtool_mac_stats_src src;
+ struct_group(stats,
+ u64 FramesTransmittedOK;
+ u64 SingleCollisionFrames;
+ u64 MultipleCollisionFrames;
+ u64 FramesReceivedOK;
+ u64 FrameCheckSequenceErrors;
+ u64 AlignmentErrors;
+ u64 OctetsTransmittedOK;
+ u64 FramesWithDeferredXmissions;
+ u64 LateCollisions;
+ u64 FramesAbortedDueToXSColls;
+ u64 FramesLostDueToIntMACXmitError;
+ u64 CarrierSenseErrors;
+ u64 OctetsReceivedOK;
+ u64 FramesLostDueToIntMACRcvError;
+ u64 MulticastFramesXmittedOK;
+ u64 BroadcastFramesXmittedOK;
+ u64 FramesWithExcessiveDeferral;
+ u64 MulticastFramesReceivedOK;
+ u64 BroadcastFramesReceivedOK;
+ u64 InRangeLengthErrors;
+ u64 OutOfRangeLengthField;
+ u64 FrameTooLongErrors;
+ );
};
/* Basic IEEE 802.3 PHY statistics (30.3.2.1.*), not otherwise exposed
* via a more targeted API.
*/
struct ethtool_eth_phy_stats {
- u64 SymbolErrorDuringCarrier;
+ enum ethtool_mac_stats_src src;
+ struct_group(stats,
+ u64 SymbolErrorDuringCarrier;
+ );
+};
+
+/**
+ * struct ethtool_phy_stats - PHY-level statistics counters
+ * @rx_packets: Total successfully received frames
+ * @rx_bytes: Total successfully received bytes
+ * @rx_errors: Total received frames with errors (e.g., CRC errors)
+ * @tx_packets: Total successfully transmitted frames
+ * @tx_bytes: Total successfully transmitted bytes
+ * @tx_errors: Total transmitted frames with errors
+ *
+ * This structure provides a standardized interface for reporting
+ * PHY-level statistics counters. It is designed to expose statistics
+ * commonly provided by PHYs but not explicitly defined in the IEEE
+ * 802.3 standard.
+ */
+struct ethtool_phy_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_errors;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_errors;
};
/* Basic IEEE 802.3 MAC Ctrl statistics (30.3.3.*), not otherwise exposed
* via a more targeted API.
*/
struct ethtool_eth_ctrl_stats {
- u64 MACControlFramesTransmitted;
- u64 MACControlFramesReceived;
- u64 UnsupportedOpcodesReceived;
+ enum ethtool_mac_stats_src src;
+ struct_group(stats,
+ u64 MACControlFramesTransmitted;
+ u64 MACControlFramesReceived;
+ u64 UnsupportedOpcodesReceived;
+ );
};
/**
* struct ethtool_pause_stats - statistics for IEEE 802.3x pause frames
+ * @src: input field denoting whether stats should be queried from the eMAC or
+ * pMAC (if the MM layer is supported). To be ignored otherwise.
* @tx_pause_frames: transmitted pause frame count. Reported to user space
* as %ETHTOOL_A_PAUSE_STAT_TX_FRAMES.
*
@@ -309,12 +484,37 @@ struct ethtool_eth_ctrl_stats {
* from the standard.
*/
struct ethtool_pause_stats {
- u64 tx_pause_frames;
- u64 rx_pause_frames;
+ enum ethtool_mac_stats_src src;
+ struct_group(stats,
+ u64 tx_pause_frames;
+ u64 rx_pause_frames;
+ );
};
#define ETHTOOL_MAX_LANES 8
+/*
+ * IEEE 802.3ck/df defines 16 bins for FEC histogram plus one more for
+ * the end-of-list marker, total 17 items
+ */
+#define ETHTOOL_FEC_HIST_MAX 17
+/**
+ * struct ethtool_fec_hist_range - error bits range for FEC histogram
+ * statistics
+ * @low: low bound of the bin (inclusive)
+ * @high: high bound of the bin (inclusive)
+ */
+struct ethtool_fec_hist_range {
+ u16 low;
+ u16 high;
+};
+struct ethtool_fec_hist {
+ struct ethtool_fec_hist_value {
+ u64 sum;
+ u64 per_lane[ETHTOOL_MAX_LANES];
+ } values[ETHTOOL_FEC_HIST_MAX];
+ const struct ethtool_fec_hist_range *ranges;
+};
/**
* struct ethtool_fec_stats - statistics for IEEE 802.3 FEC
* @corrected_blocks: number of received blocks corrected by FEC
@@ -332,8 +532,10 @@ struct ethtool_pause_stats {
* not entire FEC data blocks. This is a non-standard statistic.
* Reported to user space as %ETHTOOL_A_FEC_STAT_CORR_BITS.
*
- * @lane: per-lane/PCS-instance counts as defined by the standard
- * @total: error counts for the entire port, for drivers incapable of reporting
+ * For each of the above fields, the two substructure members are:
+ *
+ * - @lanes: per-lane/PCS-instance counts as defined by the standard
+ * - @total: error counts for the entire port, for drivers incapable of reporting
* per-lane stats
*
* Drivers should fill in either only total or per-lane statistics, core
@@ -356,10 +558,12 @@ struct ethtool_rmon_hist_range {
u16 high;
};
-#define ETHTOOL_RMON_HIST_MAX 10
+#define ETHTOOL_RMON_HIST_MAX 11
/**
* struct ethtool_rmon_stats - selected RMON (RFC 2819) statistics
+ * @src: input field denoting whether stats should be queried from the eMAC or
+ * pMAC (if the MM layer is supported). To be ignored otherwise.
* @undersize_pkts: Equivalent to `etherStatsUndersizePkts` from the RFC.
* @oversize_pkts: Equivalent to `etherStatsOversizePkts` from the RFC.
* @fragments: Equivalent to `etherStatsFragments` from the RFC.
@@ -375,49 +579,339 @@ struct ethtool_rmon_hist_range {
* ranges is left to the driver.
*/
struct ethtool_rmon_stats {
- u64 undersize_pkts;
- u64 oversize_pkts;
- u64 fragments;
- u64 jabbers;
+ enum ethtool_mac_stats_src src;
+ struct_group(stats,
+ u64 undersize_pkts;
+ u64 oversize_pkts;
+ u64 fragments;
+ u64 jabbers;
+
+ u64 hist[ETHTOOL_RMON_HIST_MAX];
+ u64 hist_tx[ETHTOOL_RMON_HIST_MAX];
+ );
+};
- u64 hist[ETHTOOL_RMON_HIST_MAX];
- u64 hist_tx[ETHTOOL_RMON_HIST_MAX];
+/**
+ * struct ethtool_ts_stats - HW timestamping statistics
+ * @pkts: Number of packets successfully timestamped by the hardware.
+ * @onestep_pkts_unconfirmed: Number of PTP packets with one-step TX
+ * timestamping that were sent, but for which the
+ * device offers no confirmation whether they made
+ * it onto the wire and the timestamp was inserted
+ * in the originTimestamp or correctionField, or
+ * not.
+ * @lost: Number of hardware timestamping requests where the timestamping
+ * information from the hardware never arrived for submission with
+ * the skb.
+ * @err: Number of arbitrary timestamp generation error events that the
+ * hardware encountered, exclusive of @lost statistics. Cases such
+ * as resource exhaustion, unavailability, firmware errors, and
+ * detected illogical timestamp values not submitted with the skb
+ * are inclusive to this counter.
+ */
+struct ethtool_ts_stats {
+ struct_group(tx_stats,
+ u64 pkts;
+ u64 onestep_pkts_unconfirmed;
+ u64 lost;
+ u64 err;
+ );
};
#define ETH_MODULE_EEPROM_PAGE_LEN 128
#define ETH_MODULE_MAX_I2C_ADDRESS 0x7f
/**
- * struct ethtool_module_eeprom - EEPROM dump from specified page
- * @offset: Offset within the specified EEPROM page to begin read, in bytes.
- * @length: Number of bytes to read.
- * @page: Page number to read from.
- * @bank: Page bank number to read from, if applicable by EEPROM spec.
+ * struct ethtool_module_eeprom - plug-in module EEPROM read / write parameters
+ * @offset: When @offset is 0-127, it is used as an address to the Lower Memory
+ * (@page must be 0). Otherwise, it is used as an address to the
+ * Upper Memory.
+ * @length: Number of bytes to read / write.
+ * @page: Page number.
+ * @bank: Bank number, if supported by EEPROM spec.
* @i2c_address: I2C address of a page. Value less than 0x7f expected. Most
* EEPROMs use 0x50 or 0x51.
* @data: Pointer to buffer with EEPROM data of @length size.
- *
- * This can be used to manage pages during EEPROM dump in ethtool and pass
- * required information to the driver.
*/
struct ethtool_module_eeprom {
- __u32 offset;
- __u32 length;
- __u8 page;
- __u8 bank;
- __u8 i2c_address;
- __u8 *data;
+ u32 offset;
+ u32 length;
+ u8 page;
+ u8 bank;
+ u8 i2c_address;
+ u8 *data;
+};
+
+/**
+ * struct ethtool_module_power_mode_params - module power mode parameters
+ * @policy: The power mode policy enforced by the host for the plug-in module.
+ * @mode: The operational power mode of the plug-in module. Should be filled by
+ * device drivers on get operations.
+ */
+struct ethtool_module_power_mode_params {
+ enum ethtool_module_power_mode_policy policy;
+ enum ethtool_module_power_mode mode;
+};
+
+/**
+ * struct ethtool_mm_state - 802.3 MAC merge layer state
+ * @verify_time:
+ * wait time between verification attempts in ms (according to clause
+ * 30.14.1.6 aMACMergeVerifyTime)
+ * @max_verify_time:
+ * maximum accepted value for the @verify_time variable in set requests
+ * @verify_status:
+ * state of the verification state machine of the MM layer (according to
+ * clause 30.14.1.2 aMACMergeStatusVerify)
+ * @tx_enabled:
+ * set if the MM layer is administratively enabled in the TX direction
+ * (according to clause 30.14.1.3 aMACMergeEnableTx)
+ * @tx_active:
+ * set if the MM layer is enabled in the TX direction, which makes FP
+ * possible (according to 30.14.1.5 aMACMergeStatusTx). This should be
+ * true if MM is enabled, and the verification status is either verified,
+ * or disabled.
+ * @pmac_enabled:
+ * set if the preemptible MAC is powered on and is able to receive
+ * preemptible packets and respond to verification frames.
+ * @verify_enabled:
+ * set if the Verify function of the MM layer (which sends SMD-V
+ * verification requests) is administratively enabled (regardless of
+ * whether it is currently in the ETHTOOL_MM_VERIFY_STATUS_DISABLED state
+ * or not), according to clause 30.14.1.4 aMACMergeVerifyDisableTx (but
+ * using positive rather than negative logic). The device should always
+ * respond to received SMD-V requests as long as @pmac_enabled is set.
+ * @tx_min_frag_size:
+ * the minimum size of non-final mPacket fragments that the link partner
+ * supports receiving, expressed in octets. Compared to the definition
+ * from clause 30.14.1.7 aMACMergeAddFragSize which is expressed in the
+ * range 0 to 3 (requiring a translation to the size in octets according
+ * to the formula 64 * (1 + addFragSize) - 4), a value in a continuous and
+ * unbounded range can be specified here.
+ * @rx_min_frag_size:
+ * the minimum size of non-final mPacket fragments that this device
+ * supports receiving, expressed in octets.
+ */
+struct ethtool_mm_state {
+ u32 verify_time;
+ u32 max_verify_time;
+ enum ethtool_mm_verify_status verify_status;
+ bool tx_enabled;
+ bool tx_active;
+ bool pmac_enabled;
+ bool verify_enabled;
+ u32 tx_min_frag_size;
+ u32 rx_min_frag_size;
+};
+
+/**
+ * struct ethtool_mm_cfg - 802.3 MAC merge layer configuration
+ * @verify_time: see struct ethtool_mm_state
+ * @verify_enabled: see struct ethtool_mm_state
+ * @tx_enabled: see struct ethtool_mm_state
+ * @pmac_enabled: see struct ethtool_mm_state
+ * @tx_min_frag_size: see struct ethtool_mm_state
+ */
+struct ethtool_mm_cfg {
+ u32 verify_time;
+ bool verify_enabled;
+ bool tx_enabled;
+ bool pmac_enabled;
+ u32 tx_min_frag_size;
+};
+
+/**
+ * struct ethtool_mm_stats - 802.3 MAC merge layer statistics
+ * @MACMergeFrameAssErrorCount:
+ * received MAC frames with reassembly errors
+ * @MACMergeFrameSmdErrorCount:
+ * received MAC frames/fragments rejected due to unknown or incorrect SMD
+ * @MACMergeFrameAssOkCount:
+ * received MAC frames that were successfully reassembled and passed up
+ * @MACMergeFragCountRx:
+ * number of additional correct SMD-C mPackets received due to preemption
+ * @MACMergeFragCountTx:
+ * number of additional mPackets sent due to preemption
+ * @MACMergeHoldCount:
+ * number of times the MM layer entered the HOLD state, which blocks
+ * transmission of preemptible traffic
+ */
+struct ethtool_mm_stats {
+ u64 MACMergeFrameAssErrorCount;
+ u64 MACMergeFrameSmdErrorCount;
+ u64 MACMergeFrameAssOkCount;
+ u64 MACMergeFragCountRx;
+ u64 MACMergeFragCountTx;
+ u64 MACMergeHoldCount;
+};
+
+enum ethtool_mmsv_event {
+ ETHTOOL_MMSV_LP_SENT_VERIFY_MPACKET,
+ ETHTOOL_MMSV_LD_SENT_VERIFY_MPACKET,
+ ETHTOOL_MMSV_LP_SENT_RESPONSE_MPACKET,
+};
+
+/* MAC Merge verification mPacket type */
+enum ethtool_mpacket {
+ ETHTOOL_MPACKET_VERIFY,
+ ETHTOOL_MPACKET_RESPONSE,
+};
+
+struct ethtool_mmsv;
+
+/**
+ * struct ethtool_mmsv_ops - Operations for MAC Merge Software Verification
+ * @configure_tx: Driver callback for the event where the preemptible TX
+ * becomes active or inactive. Preemptible traffic
+ * classes must be committed to hardware only while
+ * preemptible TX is active.
+ * @configure_pmac: Driver callback for the event where the pMAC state
+ * changes as result of an administrative setting
+ * (ethtool) or a call to ethtool_mmsv_link_state_handle().
+ * @send_mpacket: Driver-provided method for sending a Verify or a Response
+ * mPacket.
+ */
+struct ethtool_mmsv_ops {
+ void (*configure_tx)(struct ethtool_mmsv *mmsv, bool tx_active);
+ void (*configure_pmac)(struct ethtool_mmsv *mmsv, bool pmac_enabled);
+ void (*send_mpacket)(struct ethtool_mmsv *mmsv, enum ethtool_mpacket mpacket);
+};
+
+/**
+ * struct ethtool_mmsv - MAC Merge Software Verification
+ * @ops: operations for MAC Merge Software Verification
+ * @dev: pointer to net_device structure
+ * @lock: serialize access to MAC Merge state between
+ * ethtool requests and link state updates.
+ * @status: current verification FSM state
+ * @verify_timer: timer for verification in local TX direction
+ * @verify_enabled: indicates if verification is enabled
+ * @verify_retries: number of retries for verification
+ * @pmac_enabled: indicates if the preemptible MAC is enabled
+ * @verify_time: time for verification in milliseconds
+ * @tx_enabled: indicates if transmission is enabled
+ */
+struct ethtool_mmsv {
+ const struct ethtool_mmsv_ops *ops;
+ struct net_device *dev;
+ spinlock_t lock;
+ enum ethtool_mm_verify_status status;
+ struct timer_list verify_timer;
+ bool verify_enabled;
+ int verify_retries;
+ bool pmac_enabled;
+ u32 verify_time;
+ bool tx_enabled;
+};
+
+void ethtool_mmsv_stop(struct ethtool_mmsv *mmsv);
+void ethtool_mmsv_link_state_handle(struct ethtool_mmsv *mmsv, bool up);
+void ethtool_mmsv_event_handle(struct ethtool_mmsv *mmsv,
+ enum ethtool_mmsv_event event);
+void ethtool_mmsv_get_mm(struct ethtool_mmsv *mmsv,
+ struct ethtool_mm_state *state);
+void ethtool_mmsv_set_mm(struct ethtool_mmsv *mmsv, struct ethtool_mm_cfg *cfg);
+void ethtool_mmsv_init(struct ethtool_mmsv *mmsv, struct net_device *dev,
+ const struct ethtool_mmsv_ops *ops);
+
+/**
+ * struct ethtool_rxfh_param - RXFH (RSS) parameters
+ * @hfunc: Defines the current RSS hash function used by HW (or to be set to).
+ * Valid values are one of the %ETH_RSS_HASH_*.
+ * @indir_size: On SET, the array size of the user buffer for the
+ * indirection table, which may be zero, or
+ * %ETH_RXFH_INDIR_NO_CHANGE. On GET (read from the driver),
+ * the array size of the hardware indirection table.
+ * @indir: The indirection table of size @indir_size entries.
+ * @key_size: On SET, the array size of the user buffer for the hash key,
+ * which may be zero. On GET (read from the driver), the size of the
+ * hardware hash key.
+ * @key: The hash key of size @key_size bytes.
+ * @rss_context: RSS context identifier. Context 0 is the default for normal
+ * traffic; other contexts can be referenced as the destination for RX flow
+ * classification rules. On SET, %ETH_RXFH_CONTEXT_ALLOC is used
+ * to allocate a new RSS context; on return this field will
+ * contain the ID of the newly allocated context.
+ * @rss_delete: Set to non-ZERO to remove the @rss_context context.
+ * @input_xfrm: Defines how the input data is transformed. Valid values are one
+ * of %RXH_XFRM_*.
+ */
+struct ethtool_rxfh_param {
+ u8 hfunc;
+ u32 indir_size;
+ u32 *indir;
+ u32 key_size;
+ u8 *key;
+ u32 rss_context;
+ u8 rss_delete;
+ u8 input_xfrm;
+};
+
+/**
+ * struct ethtool_rxfh_fields - Rx Flow Hashing (RXFH) header field config
+ * @data: which header fields are used for hashing, bitmask of RXH_* defines
+ * @flow_type: L2-L4 network traffic flow type
+ * @rss_context: RSS context, will only be used if rxfh_per_ctx_fields is
+ * set in struct ethtool_ops
+ */
+struct ethtool_rxfh_fields {
+ u32 data;
+ u32 flow_type;
+ u32 rss_context;
+};
+
+/**
+ * struct kernel_ethtool_ts_info - kernel copy of struct ethtool_ts_info
+ * @cmd: command number = %ETHTOOL_GET_TS_INFO
+ * @so_timestamping: bit mask of the sum of the supported SO_TIMESTAMPING flags
+ * @phc_index: device index of the associated PHC, or -1 if there is none
+ * @phc_qualifier: qualifier of the associated PHC
+ * @phc_source: source device of the associated PHC
+ * @phc_phyindex: index of PHY device source of the associated PHC
+ * @tx_types: bit mask of the supported hwtstamp_tx_types enumeration values
+ * @rx_filters: bit mask of the supported hwtstamp_rx_filters enumeration values
+ */
+struct kernel_ethtool_ts_info {
+ u32 cmd;
+ u32 so_timestamping;
+ int phc_index;
+ enum hwtstamp_provider_qualifier phc_qualifier;
+ enum hwtstamp_source phc_source;
+ int phc_phyindex;
+ u32 tx_types;
+ u32 rx_filters;
};
/**
* struct ethtool_ops - optional netdev operations
+ * @supported_input_xfrm: supported types of input xfrm from %RXH_XFRM_*.
* @cap_link_lanes_supported: indicates if the driver supports lanes
* parameter.
+ * @rxfh_per_ctx_fields: device supports selecting different header fields
+ * for Rx hash calculation and RSS for each additional context.
+ * @rxfh_per_ctx_key: device supports setting different RSS key for each
+ * additional context. Netlink API should report hfunc, key, and input_xfrm
+ * for every context, not just context 0.
+ * @cap_rss_rxnfc_adds: device supports nonzero ring_cookie in filters with
+ * %FLOW_RSS flag; the queue ID from the filter is added to the value from
+ * the indirection table to determine the delivery queue.
+ * @rxfh_indir_space: max size of RSS indirection tables, if indirection table
+ * size as returned by @get_rxfh_indir_size may change during lifetime
+ * of the device. Leave as 0 if the table size is constant.
+ * @rxfh_key_space: same as @rxfh_indir_space, but for the key.
+ * @rxfh_priv_size: size of the driver private data area the core should
+ * allocate for an RSS context (in &struct ethtool_rxfh_context).
+ * @rxfh_max_num_contexts: maximum (exclusive) supported RSS context ID.
+ * If this is zero then the core may choose any (nonzero) ID, otherwise
+ * the core will only use IDs strictly less than this value, as the
+ * @rss_context argument to @create_rxfh_context and friends.
* @supported_coalesce_params: supported types of interrupt coalescing.
- * @get_drvinfo: Report driver/device information. Should only set the
- * @driver, @version, @fw_version and @bus_info fields. If not
- * implemented, the @driver and @bus_info fields will be filled in
- * according to the netdev's parent device.
+ * @supported_ring_params: supported ring params.
+ * @supported_hwtstamp_qualifiers: bitfield of supported hwtstamp qualifier.
+ * @get_drvinfo: Report driver/device information. Modern drivers no
+ * longer have to implement this callback. Most fields are
+ * correctly filled in by the core using system information, or
+ * populated using other driver operations.
* @get_regs_len: Get buffer length required for @get_regs
* @get_regs: Get device registers
* @get_wol: Report whether Wake-on-Lan is enabled
@@ -436,6 +930,7 @@ struct ethtool_module_eeprom {
* do not attach ext_substate attribute to netlink message). If link_ext_state
* and link_ext_substate are unknown, return -ENODATA. If not implemented,
* link_ext_state and link_ext_substate will not be sent to userspace.
+ * @get_link_ext_stats: Read extra link-related counters.
* @get_eeprom_len: Read range of EEPROM addresses for validation of
* @get_eeprom and @set_eeprom requests.
* Returns 0 if device does not support EEPROM access.
@@ -495,6 +990,7 @@ struct ethtool_module_eeprom {
* @reset: Reset (part of) the device, as specified by a bitmask of
* flags from &enum ethtool_reset_flags. Returns a negative
* error code or zero.
+ * @get_rx_ring_count: Return the number of RX rings
* @get_rxfh_key_size: Get the size of the RX flow hash key.
* Returns zero if not supported for this specific device.
* @get_rxfh_indir_size: Get the size of the RX flow hash indirection table.
@@ -507,15 +1003,34 @@ struct ethtool_module_eeprom {
* will remain unchanged.
* Returns a negative error code or zero. An error code must be returned
* if at least one unsupported change was requested.
- * @get_rxfh_context: Get the contents of the RX flow hash indirection table,
- * hash key, and/or hash function assiciated to the given rss context.
+ * @get_rxfh_fields: Get header fields used for flow hashing.
+ * @set_rxfh_fields: Set header fields used for flow hashing.
+ * @create_rxfh_context: Create a new RSS context with the specified RX flow
+ * hash indirection table, hash key, and hash function.
+ * The &struct ethtool_rxfh_context for this context is passed in @ctx;
+ * note that the indir table, hkey and hfunc are not yet populated as
+ * of this call. The driver does not need to update these; the core
+ * will do so if this op succeeds.
+ * However, if @rxfh.indir is set to %NULL, the driver must update the
+ * indir table in @ctx with the (default or inherited) table actually in
+ * use; similarly, if @rxfh.key is %NULL, @rxfh.hfunc is
+ * %ETH_RSS_HASH_NO_CHANGE, or @rxfh.input_xfrm is %RXH_XFRM_NO_CHANGE,
+ * the driver should update the corresponding information in @ctx.
+ * If the driver provides this method, it must also provide
+ * @modify_rxfh_context and @remove_rxfh_context.
* Returns a negative error code or zero.
- * @set_rxfh_context: Create, remove and configure RSS contexts. Allows setting
+ * @modify_rxfh_context: Reconfigure the specified RSS context. Allows setting
* the contents of the RX flow hash indirection table, hash key, and/or
- * hash function associated to the given context. Arguments which are set
- * to %NULL or zero will remain unchanged.
+ * hash function associated with the given context.
+ * Parameters which are set to %NULL or zero will remain unchanged.
+ * The &struct ethtool_rxfh_context for this context is passed in @ctx;
+ * note that it will still contain the *old* settings. The driver does
+ * not need to update these; the core will do so if this op succeeds.
* Returns a negative error code or zero. An error code must be returned
* if at least one unsupported change was requested.
+ * @remove_rxfh_context: Remove the specified RSS context.
+ * The &struct ethtool_rxfh_context for this context is passed in @ctx.
+ * Returns a negative error code or zero.
* @get_channels: Get number of channels.
* @set_channels: Set number of channels. Returns a negative error code or
* zero.
@@ -524,8 +1039,13 @@ struct ethtool_module_eeprom {
* @get_dump_data: Get dump data.
* @set_dump: Set dump specific flags to the device.
* @get_ts_info: Get the time stamping and PTP hardware clock capabilities.
+ * It may be called with RCU, or rtnl or reference on the device.
* Drivers supporting transmit time stamps in software should set this to
* ethtool_op_get_ts_info().
+ * @get_ts_stats: Query the device hardware timestamping statistics. Drivers
+ * must not zero statistics which they don't report. The stats structure
+ * is initialized to ETHTOOL_STAT_NOT_SET indicating driver does not
+ * report statistics.
* @get_module_info: Get the size and type of the eeprom contained within
* a plug-in module.
* @get_module_eeprom: Get the eeprom information from the plug-in module
@@ -569,11 +1089,21 @@ struct ethtool_module_eeprom {
* @get_module_eeprom_by_page: Get a region of plug-in module EEPROM data from
* specified page. Returns a negative error code or the amount of bytes
* read.
+ * @set_module_eeprom_by_page: Write to a region of plug-in module EEPROM,
+ * from kernel space only. Returns a negative error code or zero.
* @get_eth_phy_stats: Query some of the IEEE 802.3 PHY statistics.
* @get_eth_mac_stats: Query some of the IEEE 802.3 MAC statistics.
* @get_eth_ctrl_stats: Query some of the IEEE 802.3 MAC Ctrl statistics.
* @get_rmon_stats: Query some of the RMON (RFC 2819) statistics.
* Set %ranges to a pointer to zero-terminated array of byte ranges.
+ * @get_module_power_mode: Get the power mode policy for the plug-in module
+ * used by the network device and its operational power mode, if
+ * plugged-in.
+ * @set_module_power_mode: Set the power mode policy for the plug-in module
+ * used by the network device.
+ * @get_mm: Query the 802.3 MAC Merge layer state.
+ * @set_mm: Set the 802.3 MAC Merge layer parameters.
+ * @get_mm_stats: Query the 802.3 MAC Merge layer statistics.
*
* All operations are optional (i.e. the function pointer may be set
* to %NULL) and callers must take this into account. Callers must
@@ -588,8 +1118,18 @@ struct ethtool_module_eeprom {
* of the generic netdev features interface.
*/
struct ethtool_ops {
+ u32 supported_input_xfrm:8;
u32 cap_link_lanes_supported:1;
+ u32 rxfh_per_ctx_fields:1;
+ u32 rxfh_per_ctx_key:1;
+ u32 cap_rss_rxnfc_adds:1;
+ u32 rxfh_indir_space;
+ u16 rxfh_key_space;
+ u16 rxfh_priv_size;
+ u32 rxfh_max_num_contexts;
u32 supported_coalesce_params;
+ u32 supported_ring_params;
+ u32 supported_hwtstamp_qualifiers;
void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
int (*get_regs_len)(struct net_device *);
void (*get_regs)(struct net_device *, struct ethtool_regs *, void *);
@@ -601,17 +1141,29 @@ struct ethtool_ops {
u32 (*get_link)(struct net_device *);
int (*get_link_ext_state)(struct net_device *,
struct ethtool_link_ext_state_info *);
+ void (*get_link_ext_stats)(struct net_device *dev,
+ struct ethtool_link_ext_stats *stats);
int (*get_eeprom_len)(struct net_device *);
int (*get_eeprom)(struct net_device *,
struct ethtool_eeprom *, u8 *);
int (*set_eeprom)(struct net_device *,
struct ethtool_eeprom *, u8 *);
- int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *);
- int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *);
+ int (*get_coalesce)(struct net_device *,
+ struct ethtool_coalesce *,
+ struct kernel_ethtool_coalesce *,
+ struct netlink_ext_ack *);
+ int (*set_coalesce)(struct net_device *,
+ struct ethtool_coalesce *,
+ struct kernel_ethtool_coalesce *,
+ struct netlink_ext_ack *);
void (*get_ringparam)(struct net_device *,
- struct ethtool_ringparam *);
+ struct ethtool_ringparam *,
+ struct kernel_ethtool_ringparam *,
+ struct netlink_ext_ack *);
int (*set_ringparam)(struct net_device *,
- struct ethtool_ringparam *);
+ struct ethtool_ringparam *,
+ struct kernel_ethtool_ringparam *,
+ struct netlink_ext_ack *);
void (*get_pause_stats)(struct net_device *dev,
struct ethtool_pause_stats *pause_stats);
void (*get_pauseparam)(struct net_device *,
@@ -633,30 +1185,44 @@ struct ethtool_ops {
int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *);
int (*flash_device)(struct net_device *, struct ethtool_flash *);
int (*reset)(struct net_device *, u32 *);
+ u32 (*get_rx_ring_count)(struct net_device *dev);
u32 (*get_rxfh_key_size)(struct net_device *);
u32 (*get_rxfh_indir_size)(struct net_device *);
- int (*get_rxfh)(struct net_device *, u32 *indir, u8 *key,
- u8 *hfunc);
- int (*set_rxfh)(struct net_device *, const u32 *indir,
- const u8 *key, const u8 hfunc);
- int (*get_rxfh_context)(struct net_device *, u32 *indir, u8 *key,
- u8 *hfunc, u32 rss_context);
- int (*set_rxfh_context)(struct net_device *, const u32 *indir,
- const u8 *key, const u8 hfunc,
- u32 *rss_context, bool delete);
+ int (*get_rxfh)(struct net_device *, struct ethtool_rxfh_param *);
+ int (*set_rxfh)(struct net_device *, struct ethtool_rxfh_param *,
+ struct netlink_ext_ack *extack);
+ int (*get_rxfh_fields)(struct net_device *,
+ struct ethtool_rxfh_fields *);
+ int (*set_rxfh_fields)(struct net_device *,
+ const struct ethtool_rxfh_fields *,
+ struct netlink_ext_ack *extack);
+ int (*create_rxfh_context)(struct net_device *,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack);
+ int (*modify_rxfh_context)(struct net_device *,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack);
+ int (*remove_rxfh_context)(struct net_device *,
+ struct ethtool_rxfh_context *ctx,
+ u32 rss_context,
+ struct netlink_ext_ack *extack);
void (*get_channels)(struct net_device *, struct ethtool_channels *);
int (*set_channels)(struct net_device *, struct ethtool_channels *);
int (*get_dump_flag)(struct net_device *, struct ethtool_dump *);
int (*get_dump_data)(struct net_device *,
struct ethtool_dump *, void *);
int (*set_dump)(struct net_device *, struct ethtool_dump *);
- int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *);
+ int (*get_ts_info)(struct net_device *, struct kernel_ethtool_ts_info *);
+ void (*get_ts_stats)(struct net_device *dev,
+ struct ethtool_ts_stats *ts_stats);
int (*get_module_info)(struct net_device *,
struct ethtool_modinfo *);
int (*get_module_eeprom)(struct net_device *,
struct ethtool_eeprom *, u8 *);
- int (*get_eee)(struct net_device *, struct ethtool_eee *);
- int (*set_eee)(struct net_device *, struct ethtool_eee *);
+ int (*get_eee)(struct net_device *dev, struct ethtool_keee *eee);
+ int (*set_eee)(struct net_device *dev, struct ethtool_keee *eee);
int (*get_tunable)(struct net_device *,
const struct ethtool_tunable *, void *);
int (*set_tunable)(struct net_device *,
@@ -670,7 +1236,8 @@ struct ethtool_ops {
int (*set_link_ksettings)(struct net_device *,
const struct ethtool_link_ksettings *);
void (*get_fec_stats)(struct net_device *dev,
- struct ethtool_fec_stats *fec_stats);
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist);
int (*get_fecparam)(struct net_device *,
struct ethtool_fecparam *);
int (*set_fecparam)(struct net_device *,
@@ -684,6 +1251,9 @@ struct ethtool_ops {
int (*get_module_eeprom_by_page)(struct net_device *dev,
const struct ethtool_module_eeprom *page,
struct netlink_ext_ack *extack);
+ int (*set_module_eeprom_by_page)(struct net_device *dev,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack);
void (*get_eth_phy_stats)(struct net_device *dev,
struct ethtool_eth_phy_stats *phy_stats);
void (*get_eth_mac_stats)(struct net_device *dev,
@@ -693,6 +1263,16 @@ struct ethtool_ops {
void (*get_rmon_stats)(struct net_device *dev,
struct ethtool_rmon_stats *rmon_stats,
const struct ethtool_rmon_hist_range **ranges);
+ int (*get_module_power_mode)(struct net_device *dev,
+ struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack);
+ int (*set_module_power_mode)(struct net_device *dev,
+ const struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack);
+ int (*get_mm)(struct net_device *dev, struct ethtool_mm_state *state);
+ int (*set_mm)(struct net_device *dev, struct ethtool_mm_cfg *cfg,
+ struct netlink_ext_ack *extack);
+ void (*get_mm_stats)(struct net_device *dev, struct ethtool_mm_stats *stats);
};
int ethtool_check_ops(const struct ethtool_ops *ops);
@@ -716,14 +1296,34 @@ int ethtool_virtdev_set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *cmd,
u32 *dev_speed, u8 *dev_duplex);
+/**
+ * struct ethtool_netdev_state - per-netdevice state for ethtool features
+ * @rss_ctx: XArray of custom RSS contexts
+ * @rss_lock: Protects entries in @rss_ctx. May be taken from
+ * within RTNL.
+ * @wol_enabled: Wake-on-LAN is enabled
+ * @module_fw_flash_in_progress: Module firmware flashing is in progress.
+ */
+struct ethtool_netdev_state {
+ struct xarray rss_ctx;
+ struct mutex rss_lock;
+ unsigned wol_enabled:1;
+ unsigned module_fw_flash_in_progress:1;
+};
+
struct phy_device;
struct phy_tdr_config;
+struct phy_plca_cfg;
+struct phy_plca_status;
/**
* struct ethtool_phy_ops - Optional PHY device options
* @get_sset_count: Get number of strings that @get_strings will write.
* @get_strings: Return a set of strings that describe the requested objects
* @get_stats: Return extended statistics about the PHY device.
+ * @get_plca_cfg: Return PLCA configuration.
+ * @set_plca_cfg: Set PLCA configuration.
+ * @get_plca_status: Get PLCA configuration.
* @start_cable_test: Start a cable test
* @start_cable_test_tdr: Start a Time Domain Reflectometry cable test
*
@@ -735,6 +1335,13 @@ struct ethtool_phy_ops {
int (*get_strings)(struct phy_device *dev, u8 *data);
int (*get_stats)(struct phy_device *dev,
struct ethtool_stats *stats, u64 *data);
+ int (*get_plca_cfg)(struct phy_device *dev,
+ struct phy_plca_cfg *plca_cfg);
+ int (*set_plca_cfg)(struct phy_device *dev,
+ const struct phy_plca_cfg *plca_cfg,
+ struct netlink_ext_ack *extack);
+ int (*get_plca_status)(struct phy_device *dev,
+ struct phy_plca_status *plca_st);
int (*start_cable_test)(struct phy_device *phydev,
struct netlink_ext_ack *extack);
int (*start_cable_test_tdr)(struct phy_device *phydev,
@@ -758,12 +1365,120 @@ ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings,
enum ethtool_link_mode_bit_indices link_mode);
/**
+ * ethtool_get_phc_vclocks - Derive phc vclocks information, and caller
+ * is responsible to free memory of vclock_index
+ * @dev: pointer to net_device structure
+ * @vclock_index: pointer to pointer of vclock index
+ *
+ * Return: number of phc vclocks
+ */
+int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index);
+
+/* Some generic methods drivers may use in their ethtool_ops */
+u32 ethtool_op_get_link(struct net_device *dev);
+int ethtool_op_get_ts_info(struct net_device *dev,
+ struct kernel_ethtool_ts_info *eti);
+
+/**
+ * ethtool_mm_frag_size_add_to_min - Translate (standard) additional fragment
+ * size expressed as multiplier into (absolute) minimum fragment size
+ * value expressed in octets
+ * @val_add: Value of addFragSize multiplier
+ */
+static inline u32 ethtool_mm_frag_size_add_to_min(u32 val_add)
+{
+ return (ETH_ZLEN + ETH_FCS_LEN) * (1 + val_add) - ETH_FCS_LEN;
+}
+
+/**
+ * ethtool_mm_frag_size_min_to_add - Translate (absolute) minimum fragment size
+ * expressed in octets into (standard) additional fragment size expressed
+ * as multiplier
+ * @val_min: Value of addFragSize variable in octets
+ * @val_add: Pointer where the standard addFragSize value is to be returned
+ * @extack: Netlink extended ack
+ *
+ * Translate a value in octets to one of 0, 1, 2, 3 according to the reverse
+ * application of the 802.3 formula 64 * (1 + addFragSize) - 4. To be called
+ * by drivers which do not support programming the minimum fragment size to a
+ * continuous range. Returns error on other fragment length values.
+ */
+static inline int ethtool_mm_frag_size_min_to_add(u32 val_min, u32 *val_add,
+ struct netlink_ext_ack *extack)
+{
+ u32 add_frag_size;
+
+ for (add_frag_size = 0; add_frag_size < 4; add_frag_size++) {
+ if (ethtool_mm_frag_size_add_to_min(add_frag_size) == val_min) {
+ *val_add = add_frag_size;
+ return 0;
+ }
+ }
+
+ NL_SET_ERR_MSG_MOD(extack,
+ "minFragSize required to be one of 60, 124, 188 or 252");
+ return -EINVAL;
+}
+
+/**
+ * ethtool_get_ts_info_by_layer - Obtains time stamping capabilities from the MAC or PHY layer.
+ * @dev: pointer to net_device structure
+ * @info: buffer to hold the result
+ * Returns: zero on success, non-zero otherwise.
+ */
+int ethtool_get_ts_info_by_layer(struct net_device *dev,
+ struct kernel_ethtool_ts_info *info);
+
+/**
* ethtool_sprintf - Write formatted string to ethtool string data
- * @data: Pointer to start of string to update
+ * @data: Pointer to a pointer to the start of string to update
* @fmt: Format of string to write
*
- * Write formatted string to data. Update data to point at start of
+ * Write formatted string to *data. Update *data to point at start of
* next string.
*/
extern __printf(2, 3) void ethtool_sprintf(u8 **data, const char *fmt, ...);
+
+/**
+ * ethtool_puts - Write string to ethtool string data
+ * @data: Pointer to a pointer to the start of string to update
+ * @str: String to write
+ *
+ * Write string to *data without a trailing newline. Update *data
+ * to point at start of next string.
+ *
+ * Prefer this function to ethtool_sprintf() when given only
+ * two arguments or if @fmt is just "%s".
+ */
+extern void ethtool_puts(u8 **data, const char *str);
+
+/**
+ * ethtool_cpy - Write possibly-not-NUL-terminated string to ethtool string data
+ * @data: Pointer to a pointer to the start of string to write into
+ * @str: NUL-byte padded char array of size ETH_GSTRING_LEN to copy from
+ */
+#define ethtool_cpy(data, str) do { \
+ BUILD_BUG_ON(sizeof(str) != ETH_GSTRING_LEN); \
+ memcpy(*(data), str, ETH_GSTRING_LEN); \
+ *(data) += ETH_GSTRING_LEN; \
+} while (0)
+
+/* Link mode to forced speed capabilities maps */
+struct ethtool_forced_speed_map {
+ u32 speed;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(caps);
+
+ const u32 *cap_arr;
+ u32 arr_size;
+};
+
+#define ETHTOOL_FORCED_SPEED_MAP(prefix, value) \
+{ \
+ .speed = SPEED_##value, \
+ .cap_arr = prefix##_##value, \
+ .arr_size = ARRAY_SIZE(prefix##_##value), \
+}
+
+void
+ethtool_forced_speed_maps_init(struct ethtool_forced_speed_map *maps, u32 size);
#endif /* _LINUX_ETHTOOL_H */
diff --git a/include/linux/ethtool_netlink.h b/include/linux/ethtool_netlink.h
index 1e7bf78cb382..39254b2726c0 100644
--- a/include/linux/ethtool_netlink.h
+++ b/include/linux/ethtool_netlink.h
@@ -10,6 +10,9 @@
#define __ETHTOOL_LINK_MODE_MASK_NWORDS \
DIV_ROUND_UP(__ETHTOOL_LINK_MODE_MASK_NBITS, 32)
+#define ETHTOOL_PAUSE_STAT_CNT (__ETHTOOL_A_PAUSE_STAT_CNT - \
+ ETHTOOL_A_PAUSE_STAT_TX_FRAMES)
+
enum ethtool_multicast_groups {
ETHNL_MCGRP_MONITOR,
};
@@ -20,12 +23,28 @@ struct phy_device;
int ethnl_cable_test_alloc(struct phy_device *phydev, u8 cmd);
void ethnl_cable_test_free(struct phy_device *phydev);
void ethnl_cable_test_finished(struct phy_device *phydev);
-int ethnl_cable_test_result(struct phy_device *phydev, u8 pair, u8 result);
-int ethnl_cable_test_fault_length(struct phy_device *phydev, u8 pair, u32 cm);
+int ethnl_cable_test_result_with_src(struct phy_device *phydev, u8 pair,
+ u8 result, u32 src);
+int ethnl_cable_test_fault_length_with_src(struct phy_device *phydev, u8 pair,
+ u32 cm, u32 src);
int ethnl_cable_test_amplitude(struct phy_device *phydev, u8 pair, s16 mV);
int ethnl_cable_test_pulse(struct phy_device *phydev, u16 mV);
int ethnl_cable_test_step(struct phy_device *phydev, u32 first, u32 last,
u32 step);
+void ethtool_aggregate_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats);
+void ethtool_aggregate_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats);
+void ethtool_aggregate_ctrl_stats(struct net_device *dev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats);
+void ethtool_aggregate_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats);
+void ethtool_aggregate_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats);
+bool ethtool_dev_mm_supported(struct net_device *dev);
+
+void ethnl_pse_send_ntf(struct net_device *netdev, unsigned long notif);
+
#else
static inline int ethnl_cable_test_alloc(struct phy_device *phydev, u8 cmd)
{
@@ -39,14 +58,14 @@ static inline void ethnl_cable_test_free(struct phy_device *phydev)
static inline void ethnl_cable_test_finished(struct phy_device *phydev)
{
}
-static inline int ethnl_cable_test_result(struct phy_device *phydev, u8 pair,
- u8 result)
+static inline int ethnl_cable_test_result_with_src(struct phy_device *phydev,
+ u8 pair, u8 result, u32 src)
{
return -EOPNOTSUPP;
}
-static inline int ethnl_cable_test_fault_length(struct phy_device *phydev,
- u8 pair, u32 cm)
+static inline int ethnl_cable_test_fault_length_with_src(struct phy_device *phydev,
+ u8 pair, u32 cm, u32 src)
{
return -EOPNOTSUPP;
}
@@ -67,5 +86,61 @@ static inline int ethnl_cable_test_step(struct phy_device *phydev, u32 first,
{
return -EOPNOTSUPP;
}
+
+static inline void
+ethtool_aggregate_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+}
+
+static inline void
+ethtool_aggregate_phy_stats(struct net_device *dev,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+}
+
+static inline void
+ethtool_aggregate_ctrl_stats(struct net_device *dev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+}
+
+static inline void
+ethtool_aggregate_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+}
+
+static inline void
+ethtool_aggregate_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *rmon_stats)
+{
+}
+
+static inline bool ethtool_dev_mm_supported(struct net_device *dev)
+{
+ return false;
+}
+
+static inline void ethnl_pse_send_ntf(struct net_device *netdev,
+ unsigned long notif)
+{
+}
+
#endif /* IS_ENABLED(CONFIG_ETHTOOL_NETLINK) */
+
+static inline int ethnl_cable_test_result(struct phy_device *phydev, u8 pair,
+ u8 result)
+{
+ return ethnl_cable_test_result_with_src(phydev, pair, result,
+ ETHTOOL_A_CABLE_INF_SRC_TDR);
+}
+
+static inline int ethnl_cable_test_fault_length(struct phy_device *phydev,
+ u8 pair, u32 cm)
+{
+ return ethnl_cable_test_fault_length_with_src(phydev, pair, cm,
+ ETHTOOL_A_CABLE_INF_SRC_TDR);
+}
+
#endif /* _LINUX_ETHTOOL_NETLINK_H_ */
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index fa0a524baed0..e32bee4345fb 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -9,11 +9,12 @@
#ifndef _LINUX_EVENTFD_H
#define _LINUX_EVENTFD_H
-#include <linux/fcntl.h>
#include <linux/wait.h>
#include <linux/err.h>
#include <linux/percpu-defs.h>
#include <linux/percpu.h>
+#include <linux/sched.h>
+#include <uapi/linux/eventfd.h>
/*
* CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
@@ -22,10 +23,6 @@
* from eventfd, in order to leave a free define-space for
* shared O_* flags.
*/
-#define EFD_SEMAPHORE (1 << 0)
-#define EFD_CLOEXEC O_CLOEXEC
-#define EFD_NONBLOCK O_NONBLOCK
-
#define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
#define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
@@ -38,16 +35,14 @@ void eventfd_ctx_put(struct eventfd_ctx *ctx);
struct file *eventfd_fget(int fd);
struct eventfd_ctx *eventfd_ctx_fdget(int fd);
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
-__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
+void eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask);
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
__u64 *cnt);
void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
-DECLARE_PER_CPU(int, eventfd_wake_count);
-
-static inline bool eventfd_signal_count(void)
+static inline bool eventfd_signal_allowed(void)
{
- return this_cpu_read(eventfd_wake_count);
+ return !current->in_eventfd;
}
#else /* CONFIG_EVENTFD */
@@ -62,9 +57,8 @@ static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd)
return ERR_PTR(-ENOSYS);
}
-static inline int eventfd_signal(struct eventfd_ctx *ctx, int n)
+static inline void eventfd_signal_mask(struct eventfd_ctx *ctx, __poll_t mask)
{
- return -ENOSYS;
}
static inline void eventfd_ctx_put(struct eventfd_ctx *ctx)
@@ -78,9 +72,9 @@ static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx,
return -ENOSYS;
}
-static inline bool eventfd_signal_count(void)
+static inline bool eventfd_signal_allowed(void)
{
- return false;
+ return true;
}
static inline void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
@@ -90,5 +84,10 @@ static inline void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
#endif
+static inline void eventfd_signal(struct eventfd_ctx *ctx)
+{
+ eventfd_signal_mask(ctx, 0);
+}
+
#endif /* _LINUX_EVENTFD_H */
diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h
index 593322c946e6..ccb478eb174b 100644
--- a/include/linux/eventpoll.h
+++ b/include/linux/eventpoll.h
@@ -25,6 +25,10 @@ struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd, unsigned long t
/* Used to release the epoll bits inside the "struct file" */
void eventpoll_release_file(struct file *file);
+/* Copy ready events to userspace */
+int epoll_sendevents(struct file *file, struct epoll_event __user *events,
+ int maxevents);
+
/*
* This is called from inside fs/file_table.c:__fput() to unlink files
* from the eventpoll interface. We need to have this facility to cleanup
@@ -42,7 +46,7 @@ static inline void eventpoll_release(struct file *file)
* because the file in on the way to be removed and nobody ( but
* eventpoll ) has still a reference to this file.
*/
- if (likely(!file->f_ep))
+ if (likely(!READ_ONCE(file->f_ep)))
return;
/*
@@ -68,4 +72,22 @@ static inline void eventpoll_release(struct file *file) {}
#endif
+#if defined(CONFIG_ARM) && defined(CONFIG_OABI_COMPAT)
+/* ARM OABI has an incompatible struct layout and needs a special handler */
+extern struct epoll_event __user *
+epoll_put_uevent(__poll_t revents, __u64 data,
+ struct epoll_event __user *uevent);
+#else
+static inline struct epoll_event __user *
+epoll_put_uevent(__poll_t revents, __u64 data,
+ struct epoll_event __user *uevent)
+{
+ if (__put_user(revents, &uevent->events) ||
+ __put_user(data, &uevent->data))
+ return NULL;
+
+ return uevent+1;
+}
+#endif
+
#endif /* #ifndef _LINUX_EVENTPOLL_H */
diff --git a/include/linux/evm.h b/include/linux/evm.h
index 8302bc29bb35..ddece4a6b25d 100644
--- a/include/linux/evm.h
+++ b/include/linux/evm.h
@@ -12,29 +12,22 @@
#include <linux/integrity.h>
#include <linux/xattr.h>
-struct integrity_iint_cache;
-
#ifdef CONFIG_EVM
extern int evm_set_key(void *key, size_t keylen);
extern enum integrity_status evm_verifyxattr(struct dentry *dentry,
const char *xattr_name,
void *xattr_value,
- size_t xattr_value_len,
- struct integrity_iint_cache *iint);
-extern int evm_inode_setattr(struct dentry *dentry, struct iattr *attr);
-extern void evm_inode_post_setattr(struct dentry *dentry, int ia_valid);
-extern int evm_inode_setxattr(struct dentry *dentry, const char *name,
- const void *value, size_t size);
-extern void evm_inode_post_setxattr(struct dentry *dentry,
- const char *xattr_name,
- const void *xattr_value,
- size_t xattr_value_len);
-extern int evm_inode_removexattr(struct dentry *dentry, const char *xattr_name);
-extern void evm_inode_post_removexattr(struct dentry *dentry,
- const char *xattr_name);
-extern int evm_inode_init_security(struct inode *inode,
- const struct xattr *xattr_array,
- struct xattr *evm);
+ size_t xattr_value_len);
+int evm_inode_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr, struct xattr *xattrs,
+ int *xattr_count);
+extern bool evm_revalidate_status(const char *xattr_name);
+extern int evm_protected_xattr_if_enabled(const char *req_xattr_name);
+extern int evm_read_protected_xattrs(struct dentry *dentry, u8 *buffer,
+ int buffer_size, char type,
+ bool canonical_fmt);
+extern bool evm_metadata_changed(struct inode *inode,
+ struct inode *metadata_inode);
#ifdef CONFIG_FS_POSIX_ACL
extern int posix_xattr_acl(const char *xattrname);
#else
@@ -54,54 +47,41 @@ static inline int evm_set_key(void *key, size_t keylen)
static inline enum integrity_status evm_verifyxattr(struct dentry *dentry,
const char *xattr_name,
void *xattr_value,
- size_t xattr_value_len,
- struct integrity_iint_cache *iint)
+ size_t xattr_value_len)
{
return INTEGRITY_UNKNOWN;
}
#endif
-static inline int evm_inode_setattr(struct dentry *dentry, struct iattr *attr)
+static inline int evm_inode_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr,
+ struct xattr *xattrs,
+ int *xattr_count)
{
return 0;
}
-static inline void evm_inode_post_setattr(struct dentry *dentry, int ia_valid)
-{
- return;
-}
-
-static inline int evm_inode_setxattr(struct dentry *dentry, const char *name,
- const void *value, size_t size)
+static inline bool evm_revalidate_status(const char *xattr_name)
{
- return 0;
+ return false;
}
-static inline void evm_inode_post_setxattr(struct dentry *dentry,
- const char *xattr_name,
- const void *xattr_value,
- size_t xattr_value_len)
+static inline int evm_protected_xattr_if_enabled(const char *req_xattr_name)
{
- return;
+ return false;
}
-static inline int evm_inode_removexattr(struct dentry *dentry,
- const char *xattr_name)
+static inline int evm_read_protected_xattrs(struct dentry *dentry, u8 *buffer,
+ int buffer_size, char type,
+ bool canonical_fmt)
{
- return 0;
-}
-
-static inline void evm_inode_post_removexattr(struct dentry *dentry,
- const char *xattr_name)
-{
- return;
+ return -EOPNOTSUPP;
}
-static inline int evm_inode_init_security(struct inode *inode,
- const struct xattr *xattr_array,
- struct xattr *evm)
+static inline bool evm_metadata_changed(struct inode *inode,
+ struct inode *metadata_inode)
{
- return 0;
+ return false;
}
#endif /* CONFIG_EVM */
diff --git a/include/linux/execmem.h b/include/linux/execmem.h
new file mode 100644
index 000000000000..7de229134e30
--- /dev/null
+++ b/include/linux/execmem.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_EXECMEM_ALLOC_H
+#define _LINUX_EXECMEM_ALLOC_H
+
+#include <linux/types.h>
+#include <linux/moduleloader.h>
+#include <linux/cleanup.h>
+
+#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
+ !defined(CONFIG_KASAN_VMALLOC)
+#include <linux/kasan.h>
+#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
+#else
+#define MODULE_ALIGN PAGE_SIZE
+#endif
+
+/**
+ * enum execmem_type - types of executable memory ranges
+ *
+ * There are several subsystems that allocate executable memory.
+ * Architectures define different restrictions on placement,
+ * permissions, alignment and other parameters for memory that can be used
+ * by these subsystems.
+ * Types in this enum identify subsystems that allocate executable memory
+ * and let architectures define parameters for ranges suitable for
+ * allocations by each subsystem.
+ *
+ * @EXECMEM_DEFAULT: default parameters that would be used for types that
+ * are not explicitly defined.
+ * @EXECMEM_MODULE_TEXT: parameters for module text sections
+ * @EXECMEM_KPROBES: parameters for kprobes
+ * @EXECMEM_FTRACE: parameters for ftrace
+ * @EXECMEM_BPF: parameters for BPF
+ * @EXECMEM_MODULE_DATA: parameters for module data sections
+ * @EXECMEM_TYPE_MAX:
+ */
+enum execmem_type {
+ EXECMEM_DEFAULT,
+ EXECMEM_MODULE_TEXT = EXECMEM_DEFAULT,
+ EXECMEM_KPROBES,
+ EXECMEM_FTRACE,
+ EXECMEM_BPF,
+ EXECMEM_MODULE_DATA,
+ EXECMEM_TYPE_MAX,
+};
+
+/**
+ * enum execmem_range_flags - options for executable memory allocations
+ * @EXECMEM_KASAN_SHADOW: allocate kasan shadow
+ * @EXECMEM_ROX_CACHE: allocations should use ROX cache of huge pages
+ */
+enum execmem_range_flags {
+ EXECMEM_KASAN_SHADOW = (1 << 0),
+ EXECMEM_ROX_CACHE = (1 << 1),
+};
+
+#ifdef CONFIG_ARCH_HAS_EXECMEM_ROX
+/**
+ * execmem_fill_trapping_insns - set memory to contain instructions that
+ * will trap
+ * @ptr: pointer to memory to fill
+ * @size: size of the range to fill
+ *
+ * A hook for architecures to fill execmem ranges with invalid instructions.
+ * Architectures that use EXECMEM_ROX_CACHE must implement this.
+ */
+void execmem_fill_trapping_insns(void *ptr, size_t size);
+
+/**
+ * execmem_restore_rox - restore read-only-execute permissions
+ * @ptr: address of the region to remap
+ * @size: size of the region to remap
+ *
+ * Restores read-only-execute permissions on a range [@ptr, @ptr + @size)
+ * after it was temporarily remapped as writable. Relies on architecture
+ * implementation of set_memory_rox() to restore mapping using large pages.
+ *
+ * Return: 0 on success or negative error code on failure.
+ */
+int execmem_restore_rox(void *ptr, size_t size);
+#else
+static inline int execmem_restore_rox(void *ptr, size_t size) { return 0; }
+#endif
+
+/**
+ * struct execmem_range - definition of an address space suitable for code and
+ * related data allocations
+ * @start: address space start
+ * @end: address space end (inclusive)
+ * @fallback_start: start of the secondary address space range for fallback
+ * allocations on architectures that require it
+ * @fallback_end: start of the secondary address space (inclusive)
+ * @pgprot: permissions for memory in this address space
+ * @alignment: alignment required for text allocations
+ * @flags: options for memory allocations for this range
+ */
+struct execmem_range {
+ unsigned long start;
+ unsigned long end;
+ unsigned long fallback_start;
+ unsigned long fallback_end;
+ pgprot_t pgprot;
+ unsigned int alignment;
+ enum execmem_range_flags flags;
+};
+
+/**
+ * struct execmem_info - architecture parameters for code allocations
+ * @ranges: array of parameter sets defining architecture specific
+ * parameters for executable memory allocations. The ranges that are not
+ * explicitly initialized by an architecture use parameters defined for
+ * @EXECMEM_DEFAULT.
+ */
+struct execmem_info {
+ struct execmem_range ranges[EXECMEM_TYPE_MAX];
+};
+
+/**
+ * execmem_arch_setup - define parameters for allocations of executable memory
+ *
+ * A hook for architectures to define parameters for allocations of
+ * executable memory. These parameters should be filled into the
+ * @execmem_info structure.
+ *
+ * For architectures that do not implement this method a default set of
+ * parameters will be used
+ *
+ * Return: a structure defining architecture parameters and restrictions
+ * for allocations of executable memory
+ */
+struct execmem_info *execmem_arch_setup(void);
+
+/**
+ * execmem_alloc - allocate executable memory
+ * @type: type of the allocation
+ * @size: how many bytes of memory are required
+ *
+ * Allocates memory that will contain executable code, either generated or
+ * loaded from kernel modules.
+ *
+ * Allocates memory that will contain data coupled with executable code,
+ * like data sections in kernel modules.
+ *
+ * The memory will have protections defined by architecture for executable
+ * region of the @type.
+ *
+ * Return: a pointer to the allocated memory or %NULL
+ */
+void *execmem_alloc(enum execmem_type type, size_t size);
+
+/**
+ * execmem_alloc_rw - allocate writable executable memory
+ * @type: type of the allocation
+ * @size: how many bytes of memory are required
+ *
+ * Allocates memory that will contain executable code, either generated or
+ * loaded from kernel modules.
+ *
+ * Allocates memory that will contain data coupled with executable code,
+ * like data sections in kernel modules.
+ *
+ * Forces writable permissions on the allocated memory and the caller is
+ * responsible to manage the permissions afterwards.
+ *
+ * For architectures that use ROX cache the permissions will be set to R+W.
+ * For architectures that don't use ROX cache the default permissions for @type
+ * will be used as they must be writable.
+ *
+ * Return: a pointer to the allocated memory or %NULL
+ */
+void *execmem_alloc_rw(enum execmem_type type, size_t size);
+
+/**
+ * execmem_free - free executable memory
+ * @ptr: pointer to the memory that should be freed
+ */
+void execmem_free(void *ptr);
+
+DEFINE_FREE(execmem, void *, if (_T) execmem_free(_T));
+
+#ifdef CONFIG_MMU
+/**
+ * execmem_vmap - create virtual mapping for EXECMEM_MODULE_DATA memory
+ * @size: size of the virtual mapping in bytes
+ *
+ * Maps virtually contiguous area in the range suitable for EXECMEM_MODULE_DATA.
+ *
+ * Return: the area descriptor on success or %NULL on failure.
+ */
+struct vm_struct *execmem_vmap(size_t size);
+#endif
+
+/**
+ * execmem_is_rox - check if execmem is read-only
+ * @type - the execmem type to check
+ *
+ * Return: %true if the @type is read-only, %false if it's writable
+ */
+bool execmem_is_rox(enum execmem_type type);
+
+#if defined(CONFIG_EXECMEM) && !defined(CONFIG_ARCH_WANTS_EXECMEM_LATE)
+void execmem_init(void);
+#else
+static inline void execmem_init(void) {}
+#endif
+
+#endif /* _LINUX_EXECMEM_ALLOC_H */
diff --git a/include/linux/export-internal.h b/include/linux/export-internal.h
new file mode 100644
index 000000000000..d445705ac13c
--- /dev/null
+++ b/include/linux/export-internal.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Please do not include this explicitly.
+ * This is used by C files generated by modpost.
+ */
+
+#ifndef __LINUX_EXPORT_INTERNAL_H__
+#define __LINUX_EXPORT_INTERNAL_H__
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+#if defined(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS)
+/*
+ * relative reference: this reduces the size by half on 64-bit architectures,
+ * and eliminates the need for absolute relocations that require runtime
+ * processing on relocatable kernels.
+ */
+#define __KSYM_ALIGN ".balign 4"
+#define __KSYM_REF(sym) ".long " #sym "- ."
+#elif defined(CONFIG_64BIT)
+#define __KSYM_ALIGN ".balign 8"
+#define __KSYM_REF(sym) ".quad " #sym
+#else
+#define __KSYM_ALIGN ".balign 4"
+#define __KSYM_REF(sym) ".long " #sym
+#endif
+
+/*
+ * For every exported symbol, do the following:
+ *
+ * - Put the name of the symbol and namespace (empty string "" for none) in
+ * __ksymtab_strings.
+ * - Place a struct kernel_symbol entry in the __ksymtab section.
+ *
+ * Note on .section use: we specify progbits since usage of the "M" (SHF_MERGE)
+ * section flag requires it. Use '%progbits' instead of '@progbits' since the
+ * former apparently works on all arches according to the binutils source.
+ */
+#define __KSYMTAB(name, sym, sec, ns) \
+ asm(" .section \"__ksymtab_strings\",\"aMS\",%progbits,1" "\n" \
+ "__kstrtab_" #name ":" "\n" \
+ " .asciz \"" #name "\"" "\n" \
+ "__kstrtabns_" #name ":" "\n" \
+ " .asciz \"" ns "\"" "\n" \
+ " .previous" "\n" \
+ " .section \"___ksymtab" sec "+" #name "\", \"a\"" "\n" \
+ __KSYM_ALIGN "\n" \
+ "__ksymtab_" #name ":" "\n" \
+ __KSYM_REF(sym) "\n" \
+ __KSYM_REF(__kstrtab_ ##name) "\n" \
+ __KSYM_REF(__kstrtabns_ ##name) "\n" \
+ " .previous" "\n" \
+ )
+
+#if defined(CONFIG_PARISC) && defined(CONFIG_64BIT)
+#define KSYM_FUNC(name) P%name
+#else
+#define KSYM_FUNC(name) name
+#endif
+
+#define KSYMTAB_FUNC(name, sec, ns) __KSYMTAB(name, KSYM_FUNC(name), sec, ns)
+#define KSYMTAB_DATA(name, sec, ns) __KSYMTAB(name, name, sec, ns)
+
+#define SYMBOL_CRC(sym, crc, sec) \
+ asm(".section \"___kcrctab" sec "+" #sym "\",\"a\"" "\n" \
+ ".balign 4" "\n" \
+ "__crc_" #sym ":" "\n" \
+ ".long " #crc "\n" \
+ ".previous" "\n")
+
+#endif /* __LINUX_EXPORT_INTERNAL_H__ */
diff --git a/include/linux/export.h b/include/linux/export.h
index 6271a5d9c988..a686fd0ba406 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -2,164 +2,95 @@
#ifndef _LINUX_EXPORT_H
#define _LINUX_EXPORT_H
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <linux/stringify.h>
+
/*
- * Export symbols from the kernel to modules. Forked from module.h
- * to reduce the amount of pointless cruft we feed to gcc when only
- * exporting a simple symbol or two.
+ * This comment block is used by fixdep. Please do not remove.
*
- * Try not to add #includes here. It slows compilation and makes kernel
- * hackers place grumpy comments in header files.
+ * When CONFIG_MODVERSIONS is changed from n to y, all source files having
+ * EXPORT_SYMBOL variants must be re-compiled because genksyms is run as a
+ * side effect of the *.o build rule.
*/
-#ifndef __ASSEMBLY__
-#ifdef MODULE
-extern struct module __this_module;
-#define THIS_MODULE (&__this_module)
-#else
-#define THIS_MODULE ((struct module *)0)
-#endif
-
-#ifdef CONFIG_MODVERSIONS
-/* Mark the CRC weak since genksyms apparently decides not to
- * generate a checksums for some symbols */
-#if defined(CONFIG_MODULE_REL_CRCS)
-#define __CRC_SYMBOL(sym, sec) \
- asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \
- " .weak __crc_" #sym " \n" \
- " .long __crc_" #sym " - . \n" \
- " .previous \n")
-#else
-#define __CRC_SYMBOL(sym, sec) \
- asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \
- " .weak __crc_" #sym " \n" \
- " .long __crc_" #sym " \n" \
- " .previous \n")
-#endif
+#ifdef CONFIG_64BIT
+#define __EXPORT_SYMBOL_REF(sym) \
+ .balign 8 ASM_NL \
+ .quad sym
#else
-#define __CRC_SYMBOL(sym, sec)
+#define __EXPORT_SYMBOL_REF(sym) \
+ .balign 4 ASM_NL \
+ .long sym
#endif
-#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
-#include <linux/compiler.h>
/*
- * Emit the ksymtab entry as a pair of relative references: this reduces
- * the size by half on 64-bit architectures, and eliminates the need for
- * absolute relocations that require runtime processing on relocatable
- * kernels.
- */
-#define __KSYMTAB_ENTRY(sym, sec) \
- __ADDRESSABLE(sym) \
- asm(" .section \"___ksymtab" sec "+" #sym "\", \"a\" \n" \
- " .balign 4 \n" \
- "__ksymtab_" #sym ": \n" \
- " .long " #sym "- . \n" \
- " .long __kstrtab_" #sym "- . \n" \
- " .long __kstrtabns_" #sym "- . \n" \
- " .previous \n")
-
-struct kernel_symbol {
- int value_offset;
- int name_offset;
- int namespace_offset;
-};
-#else
-#define __KSYMTAB_ENTRY(sym, sec) \
- static const struct kernel_symbol __ksymtab_##sym \
- __attribute__((section("___ksymtab" sec "+" #sym), used)) \
- __aligned(sizeof(void *)) \
- = { (unsigned long)&sym, __kstrtab_##sym, __kstrtabns_##sym }
-
-struct kernel_symbol {
- unsigned long value;
- const char *name;
- const char *namespace;
-};
-#endif
-
-#ifdef __GENKSYMS__
-
-#define ___EXPORT_SYMBOL(sym, sec, ns) __GENKSYMS_EXPORT_SYMBOL(sym)
-
-#else
-
-/*
- * For every exported symbol, do the following:
- *
- * - If applicable, place a CRC entry in the __kcrctab section.
- * - Put the name of the symbol and namespace (empty string "" for none) in
- * __ksymtab_strings.
- * - Place a struct kernel_symbol entry in the __ksymtab section.
+ * LLVM integrated assembler cam merge adjacent string literals (like
+ * C and GNU-as) passed to '.ascii', but not to '.asciz' and chokes on:
*
- * note on .section use: we specify progbits since usage of the "M" (SHF_MERGE)
- * section flag requires it. Use '%progbits' instead of '@progbits' since the
- * former apparently works on all arches according to the binutils source.
+ * .asciz "MODULE_" "kvm" ;
*/
-#define ___EXPORT_SYMBOL(sym, sec, ns) \
- extern typeof(sym) sym; \
- extern const char __kstrtab_##sym[]; \
- extern const char __kstrtabns_##sym[]; \
- __CRC_SYMBOL(sym, sec); \
- asm(" .section \"__ksymtab_strings\",\"aMS\",%progbits,1 \n" \
- "__kstrtab_" #sym ": \n" \
- " .asciz \"" #sym "\" \n" \
- "__kstrtabns_" #sym ": \n" \
- " .asciz \"" ns "\" \n" \
- " .previous \n"); \
- __KSYMTAB_ENTRY(sym, sec)
+#define ___EXPORT_SYMBOL(sym, license, ns...) \
+ .section ".export_symbol","a" ASM_NL \
+ __export_symbol_##sym: ASM_NL \
+ .asciz license ASM_NL \
+ .ascii ns "\0" ASM_NL \
+ __EXPORT_SYMBOL_REF(sym) ASM_NL \
+ .previous
-#endif
-
-#if !defined(CONFIG_MODULES) || defined(__DISABLE_EXPORTS)
+#if defined(__DISABLE_EXPORTS)
/*
* Allow symbol exports to be disabled completely so that C code may
* be reused in other execution contexts such as the UEFI stub or the
* decompressor.
*/
-#define __EXPORT_SYMBOL(sym, sec, ns)
+#define __EXPORT_SYMBOL(sym, license, ns)
+
+#elif defined(__GENKSYMS__)
-#elif defined(CONFIG_TRIM_UNUSED_KSYMS)
+#define __EXPORT_SYMBOL(sym, license, ns) __GENKSYMS_EXPORT_SYMBOL(sym)
-#include <generated/autoksyms.h>
+#elif defined(__ASSEMBLY__)
+#define __EXPORT_SYMBOL(sym, license, ns) \
+ ___EXPORT_SYMBOL(sym, license, ns)
+
+#else
+
+#ifdef CONFIG_GENDWARFKSYMS
/*
- * For fine grained build dependencies, we want to tell the build system
- * about each possible exported symbol even if they're not actually exported.
- * We use a symbol pattern __ksym_marker_<symbol> that the build system filters
- * from the $(NM) output (see scripts/gen_ksymdeps.sh). These symbols are
- * discarded in the final link stage.
+ * With CONFIG_GENDWARFKSYMS, ensure the compiler emits debugging
+ * information for all exported symbols, including those defined in
+ * different TUs, by adding a __gendwarfksyms_ptr_<symbol> pointer
+ * that's discarded during the final link.
*/
-#define __ksym_marker(sym) \
- static int __ksym_marker_##sym[0] __section(".discard.ksym") __used
-
-#define __EXPORT_SYMBOL(sym, sec, ns) \
- __ksym_marker(sym); \
- __cond_export_sym(sym, sec, ns, __is_defined(__KSYM_##sym))
-#define __cond_export_sym(sym, sec, ns, conf) \
- ___cond_export_sym(sym, sec, ns, conf)
-#define ___cond_export_sym(sym, sec, ns, enabled) \
- __cond_export_sym_##enabled(sym, sec, ns)
-#define __cond_export_sym_1(sym, sec, ns) ___EXPORT_SYMBOL(sym, sec, ns)
-#define __cond_export_sym_0(sym, sec, ns) /* nothing */
-
+#define __GENDWARFKSYMS_EXPORT(sym) \
+ static typeof(sym) *__gendwarfksyms_ptr_##sym __used \
+ __section(".discard.gendwarfksyms") = &sym;
#else
+#define __GENDWARFKSYMS_EXPORT(sym)
+#endif
-#define __EXPORT_SYMBOL(sym, sec, ns) ___EXPORT_SYMBOL(sym, sec, ns)
+#define __EXPORT_SYMBOL(sym, license, ns) \
+ extern typeof(sym) sym; \
+ __ADDRESSABLE(sym) \
+ __GENDWARFKSYMS_EXPORT(sym) \
+ asm(__stringify(___EXPORT_SYMBOL(sym, license, ns)))
-#endif /* CONFIG_MODULES */
+#endif
#ifdef DEFAULT_SYMBOL_NAMESPACE
-#include <linux/stringify.h>
-#define _EXPORT_SYMBOL(sym, sec) __EXPORT_SYMBOL(sym, sec, __stringify(DEFAULT_SYMBOL_NAMESPACE))
+#define _EXPORT_SYMBOL(sym, license) __EXPORT_SYMBOL(sym, license, DEFAULT_SYMBOL_NAMESPACE)
#else
-#define _EXPORT_SYMBOL(sym, sec) __EXPORT_SYMBOL(sym, sec, "")
+#define _EXPORT_SYMBOL(sym, license) __EXPORT_SYMBOL(sym, license, "")
#endif
#define EXPORT_SYMBOL(sym) _EXPORT_SYMBOL(sym, "")
-#define EXPORT_SYMBOL_GPL(sym) _EXPORT_SYMBOL(sym, "_gpl")
-#define EXPORT_SYMBOL_NS(sym, ns) __EXPORT_SYMBOL(sym, "", #ns)
-#define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL(sym, "_gpl", #ns)
+#define EXPORT_SYMBOL_GPL(sym) _EXPORT_SYMBOL(sym, "GPL")
+#define EXPORT_SYMBOL_NS(sym, ns) __EXPORT_SYMBOL(sym, "", ns)
+#define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL(sym, "GPL", ns)
-#endif /* !__ASSEMBLY__ */
+#define EXPORT_SYMBOL_FOR_MODULES(sym, mods) __EXPORT_SYMBOL(sym, "GPL", "module:" mods)
#endif /* _LINUX_EXPORT_H */
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index fe848901fcc3..f0cf2714ec52 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -3,6 +3,7 @@
#define LINUX_EXPORTFS_H 1
#include <linux/types.h>
+#include <linux/path.h>
struct dentry;
struct iattr;
@@ -99,12 +100,35 @@ enum fid_type {
FILEID_FAT_WITH_PARENT = 0x72,
/*
+ * 64 bit inode number, 32 bit generation number.
+ */
+ FILEID_INO64_GEN = 0x81,
+
+ /*
+ * 64 bit inode number, 32 bit generation number,
+ * 64 bit parent inode number, 32 bit parent generation.
+ */
+ FILEID_INO64_GEN_PARENT = 0x82,
+
+ /*
* 128 bit child FID (struct lu_fid)
* 128 bit parent FID (struct lu_fid)
*/
FILEID_LUSTRE = 0x97,
/*
+ * 64 bit inode number, 32 bit subvolume, 32 bit generation number:
+ */
+ FILEID_BCACHEFS_WITHOUT_PARENT = 0xb1,
+ FILEID_BCACHEFS_WITH_PARENT = 0xb2,
+
+ /*
+ *
+ * 64 bit namespace identifier, 32 bit namespace type, 32 bit inode number.
+ */
+ FILEID_NSFS = 0xf1,
+
+ /*
* 64 bit unique kernfs id
*/
FILEID_KERNFS = 0xfe,
@@ -123,7 +147,11 @@ struct fid {
u32 parent_ino;
u32 parent_gen;
} i32;
- struct {
+ struct {
+ u64 ino;
+ u32 gen;
+ } __packed i64;
+ struct {
u32 block;
u16 partref;
u16 parent_partref;
@@ -131,10 +159,38 @@ struct fid {
u32 parent_block;
u32 parent_generation;
} udf;
- __u32 raw[0];
+ DECLARE_FLEX_ARRAY(__u32, raw);
};
};
+enum handle_to_path_flags {
+ HANDLE_CHECK_PERMS = (1 << 0),
+ HANDLE_CHECK_SUBTREE = (1 << 1),
+};
+
+struct handle_to_path_ctx {
+ struct path root;
+ enum handle_to_path_flags flags;
+ unsigned int fh_flags;
+};
+
+#define EXPORT_FH_CONNECTABLE 0x1 /* Encode file handle with parent */
+#define EXPORT_FH_FID 0x2 /* File handle may be non-decodeable */
+#define EXPORT_FH_DIR_ONLY 0x4 /* Only decode file handle for a directory */
+
+/*
+ * Filesystems use only lower 8 bits of file_handle type for fid_type.
+ * name_to_handle_at() uses upper 16 bits of type as user flags to be
+ * interpreted by open_by_handle_at().
+ */
+#define FILEID_USER_FLAGS_MASK 0xffff0000
+#define FILEID_USER_FLAGS(type) ((type) & FILEID_USER_FLAGS_MASK)
+
+/* Flags supported in encoded handle_type that is exported to user */
+#define FILEID_IS_CONNECTABLE 0x10000
+#define FILEID_IS_DIR 0x20000
+#define FILEID_VALID_USER_FLAGS (FILEID_IS_CONNECTABLE | FILEID_IS_DIR)
+
/**
* struct export_operations - for nfsd to communicate with file systems
* @encode_fh: encode a file handle fragment from a dentry
@@ -150,7 +206,7 @@ struct fid {
* encode_fh:
* @encode_fh should store in the file handle fragment @fh (using at most
* @max_len bytes) information that can be used by @decode_fh to recover the
- * file referred to by the &struct dentry @de. If the @connectable flag is
+ * file referred to by the &struct dentry @de. If @flag has CONNECTABLE bit
* set, the encode_fh() should store sufficient information so that a good
* attempt can be made to find not only the file but also it's place in the
* filesystem. This typically means storing a reference to de->d_parent in
@@ -180,18 +236,24 @@ struct fid {
* directory. The name should be stored in the @name (with the
* understanding that it is already pointing to a %NAME_MAX+1 sized
* buffer. get_name() should return %0 on success, a negative error code
- * or error. @get_name will be called without @parent->i_mutex held.
+ * or error. @get_name will be called without @parent->i_rwsem held.
*
* get_parent:
* @get_parent should find the parent directory for the given @child which
* is also a directory. In the event that it cannot be found, or storage
* space cannot be allocated, a %ERR_PTR should be returned.
*
+ * permission:
+ * Allow filesystems to specify a custom permission function.
+ *
+ * open:
+ * Allow filesystems to specify a custom open function.
+ *
* commit_metadata:
* @commit_metadata should commit metadata changes to stable storage.
*
* Locking rules:
- * get_parent is called with child->d_inode->i_mutex down
+ * get_parent is called with child->d_inode->i_rwsem down
* get_name is not (which is possibly inconsistent)
*/
@@ -213,7 +275,8 @@ struct export_operations {
bool write, u32 *device_generation);
int (*commit_blocks)(struct inode *inode, struct iomap *iomaps,
int nr_iomaps, struct iattr *iattr);
- u64 (*fetch_iversion)(struct inode *);
+ int (*permission)(struct handle_to_path_ctx *ctx, unsigned int oflags);
+ struct file * (*open)(const struct path *path, unsigned int oflags);
#define EXPORT_OP_NOWCC (0x1) /* don't collect v3 wcc data */
#define EXPORT_OP_NOSUBTREECHK (0x2) /* no subtree checking */
#define EXPORT_OP_CLOSE_BEFORE_UNLINK (0x4) /* close files before unlink */
@@ -221,16 +284,78 @@ struct export_operations {
#define EXPORT_OP_NOATOMIC_ATTR (0x10) /* Filesystem cannot supply
atomic attribute updates
*/
+#define EXPORT_OP_FLUSH_ON_CLOSE (0x20) /* fs flushes file data on close */
+#define EXPORT_OP_NOLOCKS (0x40) /* no file locking support */
unsigned long flags;
};
+/**
+ * exportfs_cannot_lock() - check if export implements file locking
+ * @export_ops: the nfs export operations to check
+ *
+ * Returns true if the export does not support file locking.
+ */
+static inline bool
+exportfs_cannot_lock(const struct export_operations *export_ops)
+{
+ return export_ops->flags & EXPORT_OP_NOLOCKS;
+}
+
extern int exportfs_encode_inode_fh(struct inode *inode, struct fid *fid,
- int *max_len, struct inode *parent);
+ int *max_len, struct inode *parent,
+ int flags);
extern int exportfs_encode_fh(struct dentry *dentry, struct fid *fid,
- int *max_len, int connectable);
+ int *max_len, int flags);
+
+static inline bool exportfs_can_encode_fid(const struct export_operations *nop)
+{
+ return !nop || nop->encode_fh;
+}
+
+static inline bool exportfs_can_decode_fh(const struct export_operations *nop)
+{
+ return nop && nop->fh_to_dentry;
+}
+
+static inline bool exportfs_can_encode_fh(const struct export_operations *nop,
+ int fh_flags)
+{
+ /*
+ * If a non-decodeable file handle was requested, we only need to make
+ * sure that filesystem did not opt-out of encoding fid.
+ */
+ if (fh_flags & EXPORT_FH_FID)
+ return exportfs_can_encode_fid(nop);
+
+ /* Normal file handles cannot be created without export ops */
+ if (!nop)
+ return false;
+
+ /*
+ * If a connectable file handle was requested, we need to make sure that
+ * filesystem can also decode connected file handles.
+ */
+ if ((fh_flags & EXPORT_FH_CONNECTABLE) && !nop->fh_to_parent)
+ return false;
+
+ /*
+ * If a decodeable file handle was requested, we need to make sure that
+ * filesystem can also decode file handles.
+ */
+ return exportfs_can_decode_fh(nop);
+}
+
+static inline int exportfs_encode_fid(struct inode *inode, struct fid *fid,
+ int *max_len)
+{
+ return exportfs_encode_inode_fh(inode, fid, max_len, NULL,
+ EXPORT_FH_FID);
+}
+
extern struct dentry *exportfs_decode_fh_raw(struct vfsmount *mnt,
struct fid *fid, int fh_len,
int fileid_type,
+ unsigned int flags,
int (*acceptable)(void *, struct dentry *),
void *context);
extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
@@ -240,10 +365,12 @@ extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
/*
* Generic helpers for filesystems.
*/
-extern struct dentry *generic_fh_to_dentry(struct super_block *sb,
+int generic_encode_ino32_fh(struct inode *inode, __u32 *fh, int *max_len,
+ struct inode *parent);
+struct dentry *generic_fh_to_dentry(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type,
struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen));
-extern struct dentry *generic_fh_to_parent(struct super_block *sb,
+struct dentry *generic_fh_to_parent(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type,
struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen));
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index 0c19010da77f..e596a0abcb27 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -76,6 +76,8 @@
#define EXTCON_DISP_VGA 43 /* Video Graphics Array */
#define EXTCON_DISP_DP 44 /* Display Port */
#define EXTCON_DISP_HMD 45 /* Head-Mounted Display */
+#define EXTCON_DISP_CVBS 46 /* Composite Video Broadcast Signal */
+#define EXTCON_DISP_EDP 47 /* Embedded Display Port */
/* Miscellaneous external connector */
#define EXTCON_DOCK 60
@@ -296,7 +298,7 @@ static inline void devm_extcon_unregister_notifier_all(struct device *dev,
static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
{
- return ERR_PTR(-ENODEV);
+ return NULL;
}
static inline struct extcon_dev *extcon_find_edev_by_node(struct device_node *node)
@@ -326,16 +328,4 @@ struct extcon_specific_cable_nb {
struct extcon_dev *edev;
unsigned long previous_value;
};
-
-static inline int extcon_register_interest(struct extcon_specific_cable_nb *obj,
- const char *extcon_name, const char *cable_name,
- struct notifier_block *nb)
-{
- return -EINVAL;
-}
-
-static inline int extcon_unregister_interest(struct extcon_specific_cable_nb *obj)
-{
- return -EINVAL;
-}
#endif /* __LINUX_EXTCON_H__ */
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 5487a80617a3..a7880787cad3 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -13,20 +13,23 @@
#define F2FS_SUPER_OFFSET 1024 /* byte-size offset */
#define F2FS_MIN_LOG_SECTOR_SIZE 9 /* 9 bits for 512 bytes */
-#define F2FS_MAX_LOG_SECTOR_SIZE 12 /* 12 bits for 4096 bytes */
-#define F2FS_LOG_SECTORS_PER_BLOCK 3 /* log number for sector/blk */
-#define F2FS_BLKSIZE 4096 /* support only 4KB block */
-#define F2FS_BLKSIZE_BITS 12 /* bits for F2FS_BLKSIZE */
+#define F2FS_MAX_LOG_SECTOR_SIZE PAGE_SHIFT /* Max is Block Size */
+#define F2FS_LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9) /* log number for sector/blk */
+#define F2FS_BLKSIZE PAGE_SIZE /* support only block == page */
+#define F2FS_BLKSIZE_BITS PAGE_SHIFT /* bits for F2FS_BLKSIZE */
+#define F2FS_SUM_BLKSIZE 4096 /* only support 4096 byte sum block */
#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
#define F2FS_EXTENSION_LEN 8 /* max size of extension */
-#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS)
#define NULL_ADDR ((block_t)0) /* used as block_t addresses */
#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */
#define COMPRESS_ADDR ((block_t)-2) /* used as compressed data flag */
-#define F2FS_BYTES_TO_BLK(bytes) ((bytes) >> F2FS_BLKSIZE_BITS)
-#define F2FS_BLK_TO_BYTES(blk) ((blk) << F2FS_BLKSIZE_BITS)
+#define F2FS_BLKSIZE_MASK (F2FS_BLKSIZE - 1)
+#define F2FS_BYTES_TO_BLK(bytes) ((unsigned long long)(bytes) >> F2FS_BLKSIZE_BITS)
+#define F2FS_BLK_TO_BYTES(blk) ((unsigned long long)(blk) << F2FS_BLKSIZE_BITS)
+#define F2FS_BLK_END_BYTES(blk) (F2FS_BLK_TO_BYTES(blk + 1) - 1)
+#define F2FS_BLK_ALIGN(x) (F2FS_BYTES_TO_BLK((x) + F2FS_BLKSIZE - 1))
/* 0, 1(node nid), 2(meta nid) are reserved node id */
#define F2FS_RESERVED_NODE_NUM 3
@@ -34,18 +37,12 @@
#define F2FS_ROOT_INO(sbi) ((sbi)->root_ino_num)
#define F2FS_NODE_INO(sbi) ((sbi)->node_ino_num)
#define F2FS_META_INO(sbi) ((sbi)->meta_ino_num)
+#define F2FS_COMPRESS_INO(sbi) (NM_I(sbi)->max_nid)
#define F2FS_MAX_QUOTAS 3
#define F2FS_ENC_UTF8_12_1 1
-#define F2FS_IO_SIZE(sbi) (1 << F2FS_OPTION(sbi).write_io_size_bits) /* Blocks */
-#define F2FS_IO_SIZE_KB(sbi) (1 << (F2FS_OPTION(sbi).write_io_size_bits + 2)) /* KB */
-#define F2FS_IO_SIZE_BYTES(sbi) (1 << (F2FS_OPTION(sbi).write_io_size_bits + 12)) /* B */
-#define F2FS_IO_SIZE_BITS(sbi) (F2FS_OPTION(sbi).write_io_size_bits) /* power of 2 */
-#define F2FS_IO_SIZE_MASK(sbi) (F2FS_IO_SIZE(sbi) - 1)
-#define F2FS_IO_ALIGNED(sbi) (F2FS_IO_SIZE(sbi) > 1)
-
/* This flag is used by node and meta inodes, and by recovery */
#define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO)
@@ -72,6 +69,47 @@ struct f2fs_device {
__le32 total_segments;
} __packed;
+/* reason of stop_checkpoint */
+enum stop_cp_reason {
+ STOP_CP_REASON_SHUTDOWN,
+ STOP_CP_REASON_FAULT_INJECT,
+ STOP_CP_REASON_META_PAGE,
+ STOP_CP_REASON_WRITE_FAIL,
+ STOP_CP_REASON_CORRUPTED_SUMMARY,
+ STOP_CP_REASON_UPDATE_INODE,
+ STOP_CP_REASON_FLUSH_FAIL,
+ STOP_CP_REASON_NO_SEGMENT,
+ STOP_CP_REASON_CORRUPTED_FREE_BITMAP,
+ STOP_CP_REASON_CORRUPTED_NID,
+ STOP_CP_REASON_MAX,
+};
+
+#define MAX_STOP_REASON 32
+
+/* detail reason for EFSCORRUPTED */
+enum f2fs_error {
+ ERROR_CORRUPTED_CLUSTER,
+ ERROR_FAIL_DECOMPRESSION,
+ ERROR_INVALID_BLKADDR,
+ ERROR_CORRUPTED_DIRENT,
+ ERROR_CORRUPTED_INODE,
+ ERROR_INCONSISTENT_SUMMARY,
+ ERROR_INCONSISTENT_FOOTER,
+ ERROR_INCONSISTENT_SUM_TYPE,
+ ERROR_CORRUPTED_JOURNAL,
+ ERROR_INCONSISTENT_NODE_COUNT,
+ ERROR_INCONSISTENT_BLOCK_COUNT,
+ ERROR_INVALID_CURSEG,
+ ERROR_INCONSISTENT_SIT,
+ ERROR_CORRUPTED_VERITY_XATTR,
+ ERROR_CORRUPTED_XATTR,
+ ERROR_INVALID_NODE_REFERENCE,
+ ERROR_INCONSISTENT_NAT,
+ ERROR_MAX,
+};
+
+#define MAX_F2FS_ERRORS 16
+
struct f2fs_super_block {
__le32 magic; /* Magic Number */
__le16 major_ver; /* Major Version */
@@ -115,7 +153,9 @@ struct f2fs_super_block {
__u8 hot_ext_count; /* # of hot file extension */
__le16 s_encoding; /* Filename charset encoding */
__le16 s_encoding_flags; /* Filename charset encoding flags */
- __u8 reserved[306]; /* valid reserved region */
+ __u8 s_stop_reason[MAX_STOP_REASON]; /* stop checkpoint reason */
+ __u8 s_errors[MAX_F2FS_ERRORS]; /* reason of image corrupts */
+ __u8 reserved[258]; /* valid reserved region */
__le32 crc; /* checksum of superblock */
} __packed;
@@ -171,14 +211,14 @@ struct f2fs_checkpoint {
unsigned char sit_nat_version_bitmap[];
} __packed;
-#define CP_CHKSUM_OFFSET 4092 /* default chksum offset in checkpoint */
+#define CP_CHKSUM_OFFSET (F2FS_BLKSIZE - sizeof(__le32)) /* default chksum offset in checkpoint */
#define CP_MIN_CHKSUM_OFFSET \
(offsetof(struct f2fs_checkpoint, sit_nat_version_bitmap))
/*
* For orphan inode management
*/
-#define F2FS_ORPHANS_PER_BLOCK 1020
+#define F2FS_ORPHANS_PER_BLOCK ((F2FS_BLKSIZE - 4 * sizeof(__le32)) / sizeof(__le32))
#define GET_ORPHAN_BLOCKS(n) (((n) + F2FS_ORPHANS_PER_BLOCK - 1) / \
F2FS_ORPHANS_PER_BLOCK)
@@ -204,17 +244,33 @@ struct f2fs_extent {
#define F2FS_NAME_LEN 255
/* 200 bytes for inline xattrs by default */
#define DEFAULT_INLINE_XATTR_ADDRS 50
-#define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */
+
+#define OFFSET_OF_END_OF_I_EXT 360
+#define SIZE_OF_I_NID 20
+
+struct node_footer {
+ __le32 nid; /* node id */
+ __le32 ino; /* inode number */
+ __le32 flag; /* include cold/fsync/dentry marks and offset */
+ __le64 cp_ver; /* checkpoint version */
+ __le32 next_blkaddr; /* next node page block address */
+} __packed;
+
+/* Address Pointers in an Inode */
+#define DEF_ADDRS_PER_INODE ((F2FS_BLKSIZE - OFFSET_OF_END_OF_I_EXT \
+ - SIZE_OF_I_NID \
+ - sizeof(struct node_footer)) / sizeof(__le32))
#define CUR_ADDRS_PER_INODE(inode) (DEF_ADDRS_PER_INODE - \
get_extra_isize(inode))
#define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */
-#define ADDRS_PER_INODE(inode) addrs_per_inode(inode)
-#define DEF_ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */
-#define ADDRS_PER_BLOCK(inode) addrs_per_block(inode)
-#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */
+#define ADDRS_PER_INODE(inode) addrs_per_page(inode, true)
+/* Address Pointers in a Direct Block */
+#define DEF_ADDRS_PER_BLOCK ((F2FS_BLKSIZE - sizeof(struct node_footer)) / sizeof(__le32))
+#define ADDRS_PER_BLOCK(inode) addrs_per_page(inode, false)
+/* Node IDs in an Indirect Block */
+#define NIDS_PER_BLOCK ((F2FS_BLKSIZE - sizeof(struct node_footer)) / sizeof(__le32))
-#define ADDRS_PER_PAGE(page, inode) \
- (IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK(inode))
+#define ADDRS_PER_PAGE(folio, inode) (addrs_per_page(inode, IS_INODE(folio)))
#define NODE_DIR1_BLOCK (DEF_ADDRS_PER_INODE + 1)
#define NODE_DIR2_BLOCK (DEF_ADDRS_PER_INODE + 2)
@@ -226,9 +282,10 @@ struct f2fs_extent {
#define F2FS_INLINE_DATA 0x02 /* file inline data flag */
#define F2FS_INLINE_DENTRY 0x04 /* file inline dentry flag */
#define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */
-#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */
+#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries (obsolete) */
#define F2FS_EXTRA_ATTR 0x20 /* file having extra attribute */
#define F2FS_PIN_FILE 0x40 /* file should not be gced */
+#define F2FS_COMPRESS_RELEASED 0x80 /* file released compressed blocks */
struct f2fs_inode {
__le16 i_mode; /* file mode */
@@ -275,7 +332,7 @@ struct f2fs_inode {
__u8 i_log_cluster_size; /* log of cluster size */
__le16 i_compress_flag; /* compress flag */
/* 0 bit: chksum flag
- * [10,15] bits: compress level
+ * [8,15] bits: compress level
*/
__le32 i_extra_end[0]; /* for attribute size calculation */
} __packed;
@@ -300,15 +357,7 @@ enum {
OFFSET_BIT_SHIFT
};
-#define OFFSET_BIT_MASK (0x07) /* (0x01 << OFFSET_BIT_SHIFT) - 1 */
-
-struct node_footer {
- __le32 nid; /* node id */
- __le32 ino; /* inode number */
- __le32 flag; /* include cold/fsync/dentry marks and offset */
- __le64 cp_ver; /* checkpoint version */
- __le32 next_blkaddr; /* next node page block address */
-} __packed;
+#define OFFSET_BIT_MASK GENMASK(OFFSET_BIT_SHIFT - 1, 0)
struct f2fs_node {
/* can be one of three types: inode, direct, and indirect types */
@@ -323,7 +372,7 @@ struct f2fs_node {
/*
* For NAT entries
*/
-#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
+#define NAT_ENTRY_PER_BLOCK (F2FS_BLKSIZE / sizeof(struct f2fs_nat_entry))
struct f2fs_nat_entry {
__u8 version; /* latest version of cached nat entry */
@@ -338,16 +387,18 @@ struct f2fs_nat_block {
/*
* For SIT entries
*
- * Each segment is 2MB in size by default so that a bitmap for validity of
- * there-in blocks should occupy 64 bytes, 512 bits.
+ * A validity bitmap of 64 bytes covers 512 blocks of area. For a 4K page size,
+ * this results in a segment size of 2MB. For 16k pages, the default segment size
+ * is 8MB.
* Not allow to change this.
*/
#define SIT_VBLOCK_MAP_SIZE 64
-#define SIT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_sit_entry))
+#define SIT_ENTRY_PER_BLOCK (F2FS_BLKSIZE / sizeof(struct f2fs_sit_entry))
/*
* F2FS uses 4 bytes to represent block address. As a result, supported size of
- * disk is 16 TB and it equals to 16 * 1024 * 1024 / 2 segments.
+ * disk is 16 TB for a 4K page size and 64 TB for a 16K page size and it equals
+ * to 16 * 1024 * 1024 / 2 segments.
*/
#define F2FS_MAX_SEGMENT ((16 * 1024 * 1024) / 2)
@@ -377,8 +428,10 @@ struct f2fs_sit_block {
/*
* For segment summary
*
- * One summary block contains exactly 512 summary entries, which represents
- * exactly 2MB segment by default. Not allow to change the basic units.
+ * One summary block with 4KB size contains exactly 512 summary entries, which
+ * represents exactly one segment with 2MB size.
+ * Similarly, in the case of block with 16KB size, it represents one segment with 8MB size.
+ * Not allow to change the basic units.
*
* NOTE: For initializing fields, you must use set_summary
*
@@ -389,12 +442,12 @@ struct f2fs_sit_block {
* from node's page's beginning to get a data block address.
* ex) data_blkaddr = (block_t)(nodepage_start_address + ofs_in_node)
*/
-#define ENTRIES_IN_SUM 512
-#define SUMMARY_SIZE (7) /* sizeof(struct summary) */
+#define ENTRIES_IN_SUM (F2FS_SUM_BLKSIZE / 8)
+#define SUMMARY_SIZE (7) /* sizeof(struct f2fs_summary) */
#define SUM_FOOTER_SIZE (5) /* sizeof(struct summary_footer) */
#define SUM_ENTRY_SIZE (SUMMARY_SIZE * ENTRIES_IN_SUM)
-/* a summary entry for a 4KB-sized block in a segment */
+/* a summary entry for a block in a segment */
struct f2fs_summary {
__le32 nid; /* parent node id */
union {
@@ -415,7 +468,7 @@ struct summary_footer {
__le32 check_sum; /* summary checksum */
} __packed;
-#define SUM_JOURNAL_SIZE (F2FS_BLKSIZE - SUM_FOOTER_SIZE -\
+#define SUM_JOURNAL_SIZE (F2FS_SUM_BLKSIZE - SUM_FOOTER_SIZE -\
SUM_ENTRY_SIZE)
#define NAT_JOURNAL_ENTRIES ((SUM_JOURNAL_SIZE - 2) /\
sizeof(struct nat_journal_entry))
@@ -478,7 +531,7 @@ struct f2fs_journal {
};
} __packed;
-/* 4KB-sized summary block structure */
+/* Block-sized summary block structure */
struct f2fs_summary_block {
struct f2fs_summary entries[ENTRIES_IN_SUM];
struct f2fs_journal journal;
@@ -505,10 +558,11 @@ typedef __le32 f2fs_hash_t;
#define MAX_DIR_HASH_DEPTH 63
/* MAX buckets in one level of dir */
-#define MAX_DIR_BUCKETS (1 << ((MAX_DIR_HASH_DEPTH / 2) - 1))
+#define MAX_DIR_BUCKETS BIT((MAX_DIR_HASH_DEPTH / 2) - 1)
/*
* space utilization of regular dentry and inline dentry (w/o extra reservation)
+ * when block size is 4KB.
* regular dentry inline dentry (def) inline dentry (min)
* bitmap 1 * 27 = 27 1 * 23 = 23 1 * 1 = 1
* reserved 1 * 3 = 3 1 * 7 = 7 1 * 1 = 1
@@ -519,11 +573,14 @@ typedef __le32 f2fs_hash_t;
* Note: there are more reserved space in inline dentry than in regular
* dentry, when converting inline dentry we should handle this carefully.
*/
-#define NR_DENTRY_IN_BLOCK 214 /* the number of dentry in a block */
+
+/* the number of dentry in a block */
+#define NR_DENTRY_IN_BLOCK ((BITS_PER_BYTE * F2FS_BLKSIZE) / \
+ ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * BITS_PER_BYTE + 1))
#define SIZE_OF_DIR_ENTRY 11 /* by byte */
#define SIZE_OF_DENTRY_BITMAP ((NR_DENTRY_IN_BLOCK + BITS_PER_BYTE - 1) / \
BITS_PER_BYTE)
-#define SIZE_OF_RESERVED (PAGE_SIZE - ((SIZE_OF_DIR_ENTRY + \
+#define SIZE_OF_RESERVED (F2FS_BLKSIZE - ((SIZE_OF_DIR_ENTRY + \
F2FS_SLOT_LEN) * \
NR_DENTRY_IN_BLOCK + SIZE_OF_DENTRY_BITMAP))
#define MIN_INLINE_DENTRY_SIZE 40 /* just include '.' and '..' entries */
@@ -536,7 +593,7 @@ struct f2fs_dir_entry {
__u8 file_type; /* file type */
} __packed;
-/* 4KB-sized directory entry block */
+/* Block-sized directory entry block */
struct f2fs_dentry_block {
/* validity bitmap for directory entries in each block */
__u8 dentry_bitmap[SIZE_OF_DENTRY_BITMAP];
@@ -545,21 +602,6 @@ struct f2fs_dentry_block {
__u8 filename[NR_DENTRY_IN_BLOCK][F2FS_SLOT_LEN];
} __packed;
-/* file types used in inode_info->flags */
-enum {
- F2FS_FT_UNKNOWN,
- F2FS_FT_REG_FILE,
- F2FS_FT_DIR,
- F2FS_FT_CHRDEV,
- F2FS_FT_BLKDEV,
- F2FS_FT_FIFO,
- F2FS_FT_SOCK,
- F2FS_FT_SYMLINK,
- F2FS_FT_MAX
-};
-
-#define S_SHIFT 12
-
#define F2FS_DEF_PROJID 0 /* default project ID */
#endif /* _LINUX_F2FS_FS_H */
diff --git a/include/linux/falloc.h b/include/linux/falloc.h
index f3f0b97b1675..7c38c6b76b60 100644
--- a/include/linux/falloc.h
+++ b/include/linux/falloc.h
@@ -25,12 +25,19 @@ struct space_resv {
#define FS_IOC_UNRESVSP64 _IOW('X', 43, struct space_resv)
#define FS_IOC_ZERO_RANGE _IOW('X', 57, struct space_resv)
-#define FALLOC_FL_SUPPORTED_MASK (FALLOC_FL_KEEP_SIZE | \
- FALLOC_FL_PUNCH_HOLE | \
- FALLOC_FL_COLLAPSE_RANGE | \
- FALLOC_FL_ZERO_RANGE | \
- FALLOC_FL_INSERT_RANGE | \
- FALLOC_FL_UNSHARE_RANGE)
+/*
+ * Mask of all supported fallocate modes. Only one can be set at a time.
+ *
+ * In addition to the mode bit, the mode argument can also encode flags.
+ * FALLOC_FL_KEEP_SIZE is the only supported flag so far.
+ */
+#define FALLOC_FL_MODE_MASK (FALLOC_FL_ALLOCATE_RANGE | \
+ FALLOC_FL_PUNCH_HOLE | \
+ FALLOC_FL_COLLAPSE_RANGE | \
+ FALLOC_FL_ZERO_RANGE | \
+ FALLOC_FL_INSERT_RANGE | \
+ FALLOC_FL_UNSHARE_RANGE | \
+ FALLOC_FL_WRITE_ZEROES)
/* on ia32 l_start is on a 32-bit boundary */
#if defined(CONFIG_X86_64)
diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h
index bad41bcb25df..879cff5eccd4 100644
--- a/include/linux/fanotify.h
+++ b/include/linux/fanotify.h
@@ -5,8 +5,6 @@
#include <linux/sysctl.h>
#include <uapi/linux/fanotify.h>
-extern struct ctl_table fanotify_table[]; /* for sysctl */
-
#define FAN_GROUP_FLAG(group, flag) \
((group)->fanotify_data.flags & (flag))
@@ -25,7 +23,9 @@ extern struct ctl_table fanotify_table[]; /* for sysctl */
#define FANOTIFY_CLASS_BITS (FAN_CLASS_NOTIF | FANOTIFY_PERM_CLASSES)
-#define FANOTIFY_FID_BITS (FAN_REPORT_FID | FAN_REPORT_DFID_NAME)
+#define FANOTIFY_FID_BITS (FAN_REPORT_DFID_NAME_TARGET)
+
+#define FANOTIFY_INFO_MODES (FANOTIFY_FID_BITS | FAN_REPORT_PIDFD | FAN_REPORT_MNT)
/*
* fanotify_init() flags that require CAP_SYS_ADMIN.
@@ -35,6 +35,8 @@ extern struct ctl_table fanotify_table[]; /* for sysctl */
*/
#define FANOTIFY_ADMIN_INIT_FLAGS (FANOTIFY_PERM_CLASSES | \
FAN_REPORT_TID | \
+ FAN_REPORT_PIDFD | \
+ FAN_REPORT_FD_ERROR | \
FAN_UNLIMITED_QUEUE | \
FAN_UNLIMITED_MARKS)
@@ -45,23 +47,32 @@ extern struct ctl_table fanotify_table[]; /* for sysctl */
* so one of the flags for reporting file handles is required.
*/
#define FANOTIFY_USER_INIT_FLAGS (FAN_CLASS_NOTIF | \
- FANOTIFY_FID_BITS | \
+ FANOTIFY_FID_BITS | FAN_REPORT_MNT | \
FAN_CLOEXEC | FAN_NONBLOCK)
#define FANOTIFY_INIT_FLAGS (FANOTIFY_ADMIN_INIT_FLAGS | \
FANOTIFY_USER_INIT_FLAGS)
+/* Internal group flags */
+#define FANOTIFY_UNPRIV 0x80000000
+#define FANOTIFY_INTERNAL_GROUP_FLAGS (FANOTIFY_UNPRIV)
+
#define FANOTIFY_MARK_TYPE_BITS (FAN_MARK_INODE | FAN_MARK_MOUNT | \
- FAN_MARK_FILESYSTEM)
+ FAN_MARK_FILESYSTEM | FAN_MARK_MNTNS)
+
+#define FANOTIFY_MARK_CMD_BITS (FAN_MARK_ADD | FAN_MARK_REMOVE | \
+ FAN_MARK_FLUSH)
+
+#define FANOTIFY_MARK_IGNORE_BITS (FAN_MARK_IGNORED_MASK | \
+ FAN_MARK_IGNORE)
#define FANOTIFY_MARK_FLAGS (FANOTIFY_MARK_TYPE_BITS | \
- FAN_MARK_ADD | \
- FAN_MARK_REMOVE | \
+ FANOTIFY_MARK_CMD_BITS | \
+ FANOTIFY_MARK_IGNORE_BITS | \
FAN_MARK_DONT_FOLLOW | \
FAN_MARK_ONLYDIR | \
- FAN_MARK_IGNORED_MASK | \
FAN_MARK_IGNORED_SURV_MODIFY | \
- FAN_MARK_FLUSH)
+ FAN_MARK_EVICTABLE)
/*
* Events that can be reported with data type FSNOTIFY_EVENT_PATH.
@@ -75,19 +86,36 @@ extern struct ctl_table fanotify_table[]; /* for sysctl */
* Directory entry modification events - reported only to directory
* where entry is modified and not to a watching parent.
*/
-#define FANOTIFY_DIRENT_EVENTS (FAN_MOVE | FAN_CREATE | FAN_DELETE)
+#define FANOTIFY_DIRENT_EVENTS (FAN_MOVE | FAN_CREATE | FAN_DELETE | \
+ FAN_RENAME)
+
+/* Content events can be used to inspect file content */
+#define FANOTIFY_CONTENT_PERM_EVENTS (FAN_OPEN_PERM | FAN_OPEN_EXEC_PERM | \
+ FAN_ACCESS_PERM)
+/* Pre-content events can be used to fill file content */
+#define FANOTIFY_PRE_CONTENT_EVENTS (FAN_PRE_ACCESS)
+
+/* Events that require a permission response from user */
+#define FANOTIFY_PERM_EVENTS (FANOTIFY_CONTENT_PERM_EVENTS | \
+ FANOTIFY_PRE_CONTENT_EVENTS)
+
+/* Events that can be reported with event->fd */
+#define FANOTIFY_FD_EVENTS (FANOTIFY_PATH_EVENTS | FANOTIFY_PERM_EVENTS)
/* Events that can only be reported with data type FSNOTIFY_EVENT_INODE */
#define FANOTIFY_INODE_EVENTS (FANOTIFY_DIRENT_EVENTS | \
FAN_ATTRIB | FAN_MOVE_SELF | FAN_DELETE_SELF)
+/* Events that can only be reported with data type FSNOTIFY_EVENT_ERROR */
+#define FANOTIFY_ERROR_EVENTS (FAN_FS_ERROR)
+
+#define FANOTIFY_MOUNT_EVENTS (FAN_MNT_ATTACH | FAN_MNT_DETACH)
+
/* Events that user can request to be notified on */
#define FANOTIFY_EVENTS (FANOTIFY_PATH_EVENTS | \
- FANOTIFY_INODE_EVENTS)
-
-/* Events that require a permission response from user */
-#define FANOTIFY_PERM_EVENTS (FAN_OPEN_PERM | FAN_ACCESS_PERM | \
- FAN_OPEN_EXEC_PERM)
+ FANOTIFY_INODE_EVENTS | \
+ FANOTIFY_ERROR_EVENTS | \
+ FANOTIFY_MOUNT_EVENTS)
/* Extra flags that may be reported with event or control handling of events */
#define FANOTIFY_EVENT_FLAGS (FAN_EVENT_ON_CHILD | FAN_ONDIR)
@@ -97,9 +125,20 @@ extern struct ctl_table fanotify_table[]; /* for sysctl */
FANOTIFY_PERM_EVENTS | \
FAN_Q_OVERFLOW | FAN_ONDIR)
+/* Events and flags relevant only for directories */
+#define FANOTIFY_DIRONLY_EVENT_BITS (FANOTIFY_DIRENT_EVENTS | \
+ FAN_EVENT_ON_CHILD | FAN_ONDIR)
+
#define ALL_FANOTIFY_EVENT_BITS (FANOTIFY_OUTGOING_EVENTS | \
FANOTIFY_EVENT_FLAGS)
+/* These masks check for invalid bits in permission responses. */
+#define FANOTIFY_RESPONSE_ACCESS (FAN_ALLOW | FAN_DENY)
+#define FANOTIFY_RESPONSE_FLAGS (FAN_AUDIT | FAN_INFO)
+#define FANOTIFY_RESPONSE_VALID_MASK \
+ (FANOTIFY_RESPONSE_ACCESS | FANOTIFY_RESPONSE_FLAGS | \
+ (FAN_ERRNO_MASK << FAN_ERRNO_SHIFT))
+
/* Do not use these old uapi constants internally */
#undef FAN_ALL_CLASS_BITS
#undef FAN_ALL_INIT_FLAGS
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h
index e525f6957c49..58fd14c82270 100644
--- a/include/linux/fault-inject.h
+++ b/include/linux/fault-inject.h
@@ -2,12 +2,21 @@
#ifndef _LINUX_FAULT_INJECT_H
#define _LINUX_FAULT_INJECT_H
+#include <linux/err.h>
+#include <linux/types.h>
+
+struct dentry;
+struct kmem_cache;
+
+enum fault_flags {
+ FAULT_NOWARN = 1 << 0,
+};
+
#ifdef CONFIG_FAULT_INJECTION
-#include <linux/types.h>
-#include <linux/debugfs.h>
-#include <linux/ratelimit.h>
#include <linux/atomic.h>
+#include <linux/configfs.h>
+#include <linux/ratelimit.h>
/*
* For explanation of the elements of this struct, see
@@ -43,8 +52,31 @@ struct fault_attr {
#define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER
int setup_fault_attr(struct fault_attr *attr, char *str);
+bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags);
bool should_fail(struct fault_attr *attr, ssize_t size);
+#else /* CONFIG_FAULT_INJECTION */
+
+struct fault_attr {
+};
+
+#define DECLARE_FAULT_ATTR(name) struct fault_attr name = {}
+
+static inline int setup_fault_attr(struct fault_attr *attr, char *str)
+{
+ return 0; /* Note: 0 means error for __setup() handlers! */
+}
+static inline bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags)
+{
+ return false;
+}
+static inline bool should_fail(struct fault_attr *attr, ssize_t size)
+{
+ return false;
+}
+
+#endif /* CONFIG_FAULT_INJECTION */
+
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
struct dentry *fault_create_debugfs_attr(const char *name,
@@ -60,15 +92,40 @@ static inline struct dentry *fault_create_debugfs_attr(const char *name,
#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
-#endif /* CONFIG_FAULT_INJECTION */
+#ifdef CONFIG_FAULT_INJECTION_CONFIGFS
-struct kmem_cache;
+struct fault_config {
+ struct fault_attr attr;
+ struct config_group group;
+};
+
+void fault_config_init(struct fault_config *config, const char *name);
+
+#else /* CONFIG_FAULT_INJECTION_CONFIGFS */
+
+struct fault_config {
+};
+
+static inline void fault_config_init(struct fault_config *config,
+ const char *name)
+{
+}
+
+#endif /* CONFIG_FAULT_INJECTION_CONFIGFS */
+
+#ifdef CONFIG_FAIL_PAGE_ALLOC
+bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order);
+#else
+static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
+{
+ return false;
+}
+#endif /* CONFIG_FAIL_PAGE_ALLOC */
-int should_failslab(struct kmem_cache *s, gfp_t gfpflags);
#ifdef CONFIG_FAILSLAB
-extern bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags);
+int should_failslab(struct kmem_cache *s, gfp_t gfpflags);
#else
-static inline bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags)
+static inline int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
{
return false;
}
diff --git a/include/linux/fb.h b/include/linux/fb.h
index a8dccd23c249..05cc251035da 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -2,26 +2,31 @@
#ifndef _LINUX_FB_H
#define _LINUX_FB_H
-#include <linux/kgdb.h>
#include <uapi/linux/fb.h>
#define FBIO_CURSOR _IOWR('F', 0x08, struct fb_cursor_user)
-#include <linux/fs.h>
-#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/printk.h>
+#include <linux/refcount.h>
+#include <linux/types.h>
#include <linux/workqueue.h>
-#include <linux/notifier.h>
-#include <linux/list.h>
-#include <linux/backlight.h>
-#include <linux/slab.h>
-#include <asm/io.h>
-struct vm_area_struct;
-struct fb_info;
+#include <asm/video.h>
+
+struct backlight_device;
struct device;
+struct device_node;
+struct fb_info;
struct file;
+struct i2c_adapter;
+struct inode;
+struct lcd_device;
+struct module;
+struct notifier_block;
+struct page;
struct videomode;
-struct device_node;
+struct vm_area_struct;
/* Definitions below are used in the parsed monitor specs */
#define FB_DPMS_ACTIVE_OFF 1
@@ -124,26 +129,24 @@ struct fb_cursor_user {
* Register/unregister for framebuffer events
*/
-/* The resolution of the passed in fb_info about to change */
-#define FB_EVENT_MODE_CHANGE 0x01
-
#ifdef CONFIG_GUMSTIX_AM200EPD
/* only used by mach-pxa/am200epd.c */
#define FB_EVENT_FB_REGISTERED 0x05
#define FB_EVENT_FB_UNREGISTERED 0x06
#endif
-/* A display blank is requested */
-#define FB_EVENT_BLANK 0x09
-
struct fb_event {
struct fb_info *info;
void *data;
};
+/* Enough for the VT console needs, see its max_font_width/height */
+#define FB_MAX_BLIT_WIDTH 64
+#define FB_MAX_BLIT_HEIGHT 128
+
struct fb_blit_caps {
- u32 x;
- u32 y;
+ DECLARE_BITMAP(x, FB_MAX_BLIT_WIDTH);
+ DECLARE_BITMAP(y, FB_MAX_BLIT_HEIGHT);
u32 len;
u32 flags;
};
@@ -190,23 +193,35 @@ struct fb_pixmap {
u32 scan_align; /* alignment per scanline */
u32 access_align; /* alignment per read/write (bits) */
u32 flags; /* see FB_PIXMAP_* */
- u32 blit_x; /* supported bit block dimensions (1-32)*/
- u32 blit_y; /* Format: blit_x = 1 << (width - 1) */
- /* blit_y = 1 << (height - 1) */
- /* if 0, will be set to 0xffffffff (all)*/
+ /* supported bit block dimensions */
+ /* Format: test_bit(width - 1, blit_x) */
+ /* test_bit(height - 1, blit_y) */
+ /* if zero, will be set to full (all) */
+ DECLARE_BITMAP(blit_x, FB_MAX_BLIT_WIDTH);
+ DECLARE_BITMAP(blit_y, FB_MAX_BLIT_HEIGHT);
/* access methods */
void (*writeio)(struct fb_info *info, void __iomem *dst, void *src, unsigned int size);
void (*readio) (struct fb_info *info, void *dst, void __iomem *src, unsigned int size);
};
#ifdef CONFIG_FB_DEFERRED_IO
+struct fb_deferred_io_pageref {
+ struct page *page;
+ unsigned long offset;
+ /* private */
+ struct list_head list;
+};
+
struct fb_deferred_io {
/* delay between mkwrite and deferred handler */
unsigned long delay;
- struct mutex lock; /* mutex that protects the page list */
- struct list_head pagelist; /* list of touched pages */
+ bool sort_pagereflist; /* sort pagelist by offset */
+ int open_count; /* number of opened files; protected by fb_info lock */
+ struct mutex lock; /* mutex that protects the pageref list */
+ struct list_head pagereflist; /* list of pagerefs for touched pages */
+ struct address_space *mapping; /* page cache object for fb device */
/* callback */
- void (*first_io)(struct fb_info *info);
+ struct page *(*get_page)(struct fb_info *info, unsigned long offset);
void (*deferred_io)(struct fb_info *info, struct list_head *pagelist);
};
#endif
@@ -373,7 +388,6 @@ struct fb_tile_ops {
#endif /* CONFIG_FB_TILEBLITTING */
/* FBINFO_* = fb_info.flags bit flags */
-#define FBINFO_DEFAULT 0
#define FBINFO_HWACCEL_DISABLED 0x0002
/* When FBINFO_HWACCEL_DISABLED is set:
* Hardware acceleration is turned off. Software implementations
@@ -414,8 +428,6 @@ struct fb_tile_ops {
*/
#define FBINFO_MISC_ALWAYS_SETPAR 0x40000
-/* where the fb is a firmware driver, and can be replaced with a proper one */
-#define FBINFO_MISC_FIRMWARE 0x80000
/*
* Host and GPU endianness differ.
*/
@@ -435,7 +447,7 @@ struct fb_tile_ops {
struct fb_info {
- atomic_t count;
+ refcount_t count;
int node;
int flags;
/*
@@ -448,13 +460,14 @@ struct fb_info {
struct fb_var_screeninfo var; /* Current var */
struct fb_fix_screeninfo fix; /* Current fix */
struct fb_monspecs monspecs; /* Current Monitor specs */
- struct work_struct queue; /* Framebuffer event queue */
struct fb_pixmap pixmap; /* Image hardware mapper */
struct fb_pixmap sprite; /* Cursor hardware mapper */
struct fb_cmap cmap; /* Current cmap */
struct list_head modelist; /* mode list */
struct fb_videomode *mode; /* current mode */
+ int blank; /* current blanking; see FB_BLANK_ constants */
+
#if IS_ENABLED(CONFIG_FB_BACKLIGHT)
/* assigned backlight device */
/* set before framebuffer registration,
@@ -465,14 +478,25 @@ struct fb_info {
struct mutex bl_curve_mutex;
u8 bl_curve[FB_BACKLIGHT_LEVELS];
#endif
+
+ /*
+ * Assigned LCD device; set before framebuffer
+ * registration, remove after unregister
+ */
+ struct lcd_device *lcd_dev;
+
#ifdef CONFIG_FB_DEFERRED_IO
struct delayed_work deferred_work;
+ unsigned long npagerefs;
+ struct fb_deferred_io_pageref *pagerefs;
struct fb_deferred_io *fbdefio;
#endif
const struct fb_ops *fbops;
struct device *device; /* This is the parent */
+#if defined(CONFIG_FB_DEVICE)
struct device *dev; /* This is this fb device */
+#endif
int class_flag; /* private sysfs flags */
#ifdef CONFIG_FB_TILEBLITTING
struct fb_tile_ops *tileops; /* Tile Blitting */
@@ -489,32 +513,11 @@ struct fb_info {
void *fbcon_par; /* fbcon use-only private area */
/* From here on everything is device dependent */
void *par;
- /* we need the PCI or similar aperture base/size not
- smem_start/size as smem_start may just be an object
- allocated inside the aperture so may not actually overlap */
- struct apertures_struct {
- unsigned int count;
- struct aperture {
- resource_size_t base;
- resource_size_t size;
- } ranges[0];
- } *apertures;
bool skip_vt_switch; /* no VT switch on suspend/resume required */
+ bool skip_panic; /* Do not write to the fb after a panic */
};
-static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
- struct apertures_struct *a;
-
- a = kzalloc(struct_size(a, ranges, max_num), GFP_KERNEL);
- if (!a)
- return NULL;
- a->count = max_num;
- return a;
-}
-
-#define FBINFO_FLAG_DEFAULT FBINFO_DEFAULT
-
/* This will go away
* fbset currently hacks in FB_ACCELF_TEXT into var.accel_flags
* when it wants to turn the acceleration engine on. This is
@@ -523,58 +526,6 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
*/
#define STUPID_ACCELF_TEXT_SHIT
-// This will go away
-#if defined(__sparc__)
-
-/* We map all of our framebuffers such that big-endian accesses
- * are what we want, so the following is sufficient.
- */
-
-// This will go away
-#define fb_readb sbus_readb
-#define fb_readw sbus_readw
-#define fb_readl sbus_readl
-#define fb_readq sbus_readq
-#define fb_writeb sbus_writeb
-#define fb_writew sbus_writew
-#define fb_writel sbus_writel
-#define fb_writeq sbus_writeq
-#define fb_memset sbus_memset_io
-#define fb_memcpy_fromfb sbus_memcpy_fromio
-#define fb_memcpy_tofb sbus_memcpy_toio
-
-#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || \
- defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || \
- defined(__arm__) || defined(__aarch64__)
-
-#define fb_readb __raw_readb
-#define fb_readw __raw_readw
-#define fb_readl __raw_readl
-#define fb_readq __raw_readq
-#define fb_writeb __raw_writeb
-#define fb_writew __raw_writew
-#define fb_writel __raw_writel
-#define fb_writeq __raw_writeq
-#define fb_memset memset_io
-#define fb_memcpy_fromfb memcpy_fromio
-#define fb_memcpy_tofb memcpy_toio
-
-#else
-
-#define fb_readb(addr) (*(volatile u8 *) (addr))
-#define fb_readw(addr) (*(volatile u16 *) (addr))
-#define fb_readl(addr) (*(volatile u32 *) (addr))
-#define fb_readq(addr) (*(volatile u64 *) (addr))
-#define fb_writeb(b,addr) (*(volatile u8 *) (addr) = (b))
-#define fb_writew(b,addr) (*(volatile u16 *) (addr) = (b))
-#define fb_writel(b,addr) (*(volatile u32 *) (addr) = (b))
-#define fb_writeq(b,addr) (*(volatile u64 *) (addr) = (b))
-#define fb_memset memset
-#define fb_memcpy_fromfb memcpy
-#define fb_memcpy_tofb memcpy
-
-#endif
-
#define FB_LEFT_POS(p, bpp) (fb_be_math(p) ? (32 - (bpp)) : 0)
#define FB_SHIFT_HIGH(p, val, bits) (fb_be_math(p) ? (val) >> (bits) : \
(val) << (bits))
@@ -588,12 +539,41 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
extern int fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var);
extern int fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var);
extern int fb_blank(struct fb_info *info, int blank);
+
+/*
+ * Helpers for framebuffers in I/O memory
+ */
+
extern void cfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
extern void cfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
extern void cfb_imageblit(struct fb_info *info, const struct fb_image *image);
+extern ssize_t fb_io_read(struct fb_info *info, char __user *buf,
+ size_t count, loff_t *ppos);
+extern ssize_t fb_io_write(struct fb_info *info, const char __user *buf,
+ size_t count, loff_t *ppos);
+int fb_io_mmap(struct fb_info *info, struct vm_area_struct *vma);
+
+#define __FB_DEFAULT_IOMEM_OPS_RDWR \
+ .fb_read = fb_io_read, \
+ .fb_write = fb_io_write
+
+#define __FB_DEFAULT_IOMEM_OPS_DRAW \
+ .fb_fillrect = cfb_fillrect, \
+ .fb_copyarea = cfb_copyarea, \
+ .fb_imageblit = cfb_imageblit
+
+#define __FB_DEFAULT_IOMEM_OPS_MMAP \
+ .fb_mmap = fb_io_mmap
+
+#define FB_DEFAULT_IOMEM_OPS \
+ __FB_DEFAULT_IOMEM_OPS_RDWR, \
+ __FB_DEFAULT_IOMEM_OPS_DRAW, \
+ __FB_DEFAULT_IOMEM_OPS_MMAP
+
/*
- * Drawing operations where framebuffer is in system RAM
+ * Helpers for framebuffers in system memory
*/
+
extern void sys_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
extern void sys_copyarea(struct fb_info *info, const struct fb_copyarea *area);
extern void sys_imageblit(struct fb_info *info, const struct fb_image *image);
@@ -602,15 +582,32 @@ extern ssize_t fb_sys_read(struct fb_info *info, char __user *buf,
extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
size_t count, loff_t *ppos);
-/* drivers/video/fbmem.c */
+#define __FB_DEFAULT_SYSMEM_OPS_RDWR \
+ .fb_read = fb_sys_read, \
+ .fb_write = fb_sys_write
+
+#define __FB_DEFAULT_SYSMEM_OPS_DRAW \
+ .fb_fillrect = sys_fillrect, \
+ .fb_copyarea = sys_copyarea, \
+ .fb_imageblit = sys_imageblit
+
+/*
+ * Helpers for framebuffers in DMA-able memory
+ */
+
+#define __FB_DEFAULT_DMAMEM_OPS_RDWR \
+ .fb_read = fb_sys_read, \
+ .fb_write = fb_sys_write
+
+#define __FB_DEFAULT_DMAMEM_OPS_DRAW \
+ .fb_fillrect = sys_fillrect, \
+ .fb_copyarea = sys_copyarea, \
+ .fb_imageblit = sys_imageblit
+
+/* fbmem.c */
extern int register_framebuffer(struct fb_info *fb_info);
extern void unregister_framebuffer(struct fb_info *fb_info);
-extern int remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
- const char *name);
-extern int remove_conflicting_framebuffers(struct apertures_struct *a,
- const char *name, bool primary);
-extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);
-extern int fb_show_logo(struct fb_info *fb_info, int rotate);
+extern int devm_register_framebuffer(struct device *dev, struct fb_info *fb_info);
extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size);
extern void fb_pad_unaligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 idx,
u32 height, u32 shift_high, u32 shift_low, u32 mod);
@@ -621,16 +618,6 @@ extern int fb_get_color_depth(struct fb_var_screeninfo *var,
extern int fb_get_options(const char *name, char **option);
extern int fb_new_modelist(struct fb_info *info);
-extern struct fb_info *registered_fb[FB_MAX];
-extern int num_registered_fb;
-extern bool fb_center_logo;
-extern int fb_logo_count;
-extern struct class *fb_class;
-
-#define for_each_registered_fb(i) \
- for (i = 0; i < FB_MAX; i++) \
- if (!registered_fb[i]) {} else
-
static inline void lock_fb_info(struct fb_info *info)
{
mutex_lock(&info->lock);
@@ -656,13 +643,90 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch,
}
}
-/* drivers/video/fb_defio.c */
+/* fb_defio.c */
int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma);
-extern void fb_deferred_io_init(struct fb_info *info);
+extern int fb_deferred_io_init(struct fb_info *info);
+extern void fb_deferred_io_open(struct fb_info *info,
+ struct inode *inode,
+ struct file *file);
+extern void fb_deferred_io_release(struct fb_info *info);
extern void fb_deferred_io_cleanup(struct fb_info *info);
extern int fb_deferred_io_fsync(struct file *file, loff_t start,
loff_t end, int datasync);
+/*
+ * Generate callbacks for deferred I/O
+ */
+
+#define __FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, __mode) \
+ static ssize_t __prefix ## _defio_read(struct fb_info *info, char __user *buf, \
+ size_t count, loff_t *ppos) \
+ { \
+ return fb_ ## __mode ## _read(info, buf, count, ppos); \
+ } \
+ static ssize_t __prefix ## _defio_write(struct fb_info *info, const char __user *buf, \
+ size_t count, loff_t *ppos) \
+ { \
+ unsigned long offset = *ppos; \
+ ssize_t ret = fb_ ## __mode ## _write(info, buf, count, ppos); \
+ if (ret > 0) \
+ __damage_range(info, offset, ret); \
+ return ret; \
+ }
+
+#define __FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, __mode) \
+ static void __prefix ## _defio_fillrect(struct fb_info *info, \
+ const struct fb_fillrect *rect) \
+ { \
+ __mode ## _fillrect(info, rect); \
+ __damage_area(info, rect->dx, rect->dy, rect->width, rect->height); \
+ } \
+ static void __prefix ## _defio_copyarea(struct fb_info *info, \
+ const struct fb_copyarea *area) \
+ { \
+ __mode ## _copyarea(info, area); \
+ __damage_area(info, area->dx, area->dy, area->width, area->height); \
+ } \
+ static void __prefix ## _defio_imageblit(struct fb_info *info, \
+ const struct fb_image *image) \
+ { \
+ __mode ## _imageblit(info, image); \
+ __damage_area(info, image->dx, image->dy, image->width, image->height); \
+ }
+
+#define FB_GEN_DEFAULT_DEFERRED_IOMEM_OPS(__prefix, __damage_range, __damage_area) \
+ __FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, io) \
+ __FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, cfb)
+
+#define FB_GEN_DEFAULT_DEFERRED_SYSMEM_OPS(__prefix, __damage_range, __damage_area) \
+ __FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, sys) \
+ __FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, sys)
+
+#define FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(__prefix, __damage_range, __damage_area) \
+ __FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, sys) \
+ __FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, sys)
+
+/*
+ * Initializes struct fb_ops for deferred I/O.
+ */
+
+#define __FB_DEFAULT_DEFERRED_OPS_RDWR(__prefix) \
+ .fb_read = __prefix ## _defio_read, \
+ .fb_write = __prefix ## _defio_write
+
+#define __FB_DEFAULT_DEFERRED_OPS_DRAW(__prefix) \
+ .fb_fillrect = __prefix ## _defio_fillrect, \
+ .fb_copyarea = __prefix ## _defio_copyarea, \
+ .fb_imageblit = __prefix ## _defio_imageblit
+
+#define __FB_DEFAULT_DEFERRED_OPS_MMAP(__prefix) \
+ .fb_mmap = fb_deferred_io_mmap
+
+#define FB_DEFAULT_DEFERRED_OPS(__prefix) \
+ __FB_DEFAULT_DEFERRED_OPS_RDWR(__prefix), \
+ __FB_DEFAULT_DEFERRED_OPS_DRAW(__prefix), \
+ __FB_DEFAULT_DEFERRED_OPS_MMAP(__prefix)
+
static inline bool fb_be_math(struct fb_info *info)
{
#ifdef CONFIG_FB_FOREIGN_ENDIAN
@@ -682,14 +746,29 @@ static inline bool fb_be_math(struct fb_info *info)
#endif /* CONFIG_FB_FOREIGN_ENDIAN */
}
-/* drivers/video/fbsysfs.c */
extern struct fb_info *framebuffer_alloc(size_t size, struct device *dev);
extern void framebuffer_release(struct fb_info *info);
-extern int fb_init_device(struct fb_info *fb_info);
-extern void fb_cleanup_device(struct fb_info *head);
extern void fb_bl_default_curve(struct fb_info *fb_info, u8 off, u8 min, u8 max);
-/* drivers/video/fbmon.c */
+#if IS_ENABLED(CONFIG_FB_BACKLIGHT)
+struct backlight_device *fb_bl_device(struct fb_info *info);
+void fb_bl_notify_blank(struct fb_info *info, int old_blank);
+#else
+static inline struct backlight_device *fb_bl_device(struct fb_info *info)
+{
+ return NULL;
+}
+
+static inline void fb_bl_notify_blank(struct fb_info *info, int old_blank)
+{ }
+#endif
+
+static inline struct lcd_device *fb_lcd_device(struct fb_info *info)
+{
+ return info->lcd_dev;
+}
+
+/* fbmon.c */
#define FB_MAXTIMINGS 0
#define FB_VSYNCTIMINGS 1
#define FB_HSYNCTIMINGS 2
@@ -723,7 +802,7 @@ extern int of_get_fb_videomode(struct device_node *np,
extern int fb_videomode_from_videomode(const struct videomode *vm,
struct fb_videomode *fbmode);
-/* drivers/video/modedb.c */
+/* modedb.c */
#define VESA_MODEDB_SIZE 43
#define DMT_SIZE 0x50
@@ -749,7 +828,7 @@ extern void fb_videomode_to_modelist(const struct fb_videomode *modedb, int num,
extern const struct fb_videomode *fb_find_best_display(const struct fb_monspecs *specs,
struct list_head *head);
-/* drivers/video/fbcmap.c */
+/* fbcmap.c */
extern int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp);
extern int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags);
extern void fb_dealloc_cmap(struct fb_cmap *cmap);
@@ -784,7 +863,6 @@ struct dmt_videomode {
const struct fb_videomode *mode;
};
-extern const char *fb_mode_option;
extern const struct fb_videomode vesa_modes[];
extern const struct dmt_videomode dmt_modes[];
@@ -800,7 +878,12 @@ extern int fb_find_mode(struct fb_var_screeninfo *var,
const struct fb_videomode *default_mode,
unsigned int default_bpp);
-/* Convenience logging macros */
+bool fb_modesetting_disabled(const char *drvname);
+
+/*
+ * Convenience logging macros
+ */
+
#define fb_err(fb_info, fmt, ...) \
pr_err("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
#define fb_notice(info, fmt, ...) \
@@ -812,4 +895,12 @@ extern int fb_find_mode(struct fb_var_screeninfo *var,
#define fb_dbg(fb_info, fmt, ...) \
pr_debug("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+#define fb_warn_once(fb_info, fmt, ...) \
+ pr_warn_once("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+
+#define fb_WARN_ONCE(fb_info, condition, fmt, ...) \
+ WARN_ONCE(condition, "fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+#define fb_WARN_ON_ONCE(fb_info, x) \
+ fb_WARN_ONCE(fb_info, (x), "%s", "fb_WARN_ON_ONCE(" __stringify(x) ")")
+
#endif /* _LINUX_FB_H */
diff --git a/include/linux/fbcon.h b/include/linux/fbcon.h
index ff5596dd30f8..f206370060e1 100644
--- a/include/linux/fbcon.h
+++ b/include/linux/fbcon.h
@@ -1,6 +1,13 @@
#ifndef _LINUX_FBCON_H
#define _LINUX_FBCON_H
+#include <linux/compiler_types.h>
+
+struct fb_blit_caps;
+struct fb_info;
+struct fb_var_screeninfo;
+struct fb_videomode;
+
#ifdef CONFIG_FRAMEBUFFER_CONSOLE
void __init fb_console_init(void);
void __exit fb_console_exit(void);
@@ -11,10 +18,13 @@ void fbcon_suspended(struct fb_info *info);
void fbcon_resumed(struct fb_info *info);
int fbcon_mode_deleted(struct fb_info *info,
struct fb_videomode *mode);
+void fbcon_delete_modelist(struct list_head *head);
void fbcon_new_modelist(struct fb_info *info);
void fbcon_get_requirement(struct fb_info *info,
struct fb_blit_caps *caps);
void fbcon_fb_blanked(struct fb_info *info, int blank);
+int fbcon_modechange_possible(struct fb_info *info,
+ struct fb_var_screeninfo *var);
void fbcon_update_vcs(struct fb_info *info, bool all);
void fbcon_remap_all(struct fb_info *info);
int fbcon_set_con2fb_map_ioctl(void __user *argp);
@@ -29,10 +39,13 @@ static inline void fbcon_suspended(struct fb_info *info) {}
static inline void fbcon_resumed(struct fb_info *info) {}
static inline int fbcon_mode_deleted(struct fb_info *info,
struct fb_videomode *mode) { return 0; }
+static inline void fbcon_delete_modelist(struct list_head *head) {}
static inline void fbcon_new_modelist(struct fb_info *info) {}
static inline void fbcon_get_requirement(struct fb_info *info,
struct fb_blit_caps *caps) {}
static inline void fbcon_fb_blanked(struct fb_info *info, int blank) {}
+static inline int fbcon_modechange_possible(struct fb_info *info,
+ struct fb_var_screeninfo *var) { return 0; }
static inline void fbcon_update_vcs(struct fb_info *info, bool all) {}
static inline void fbcon_remap_all(struct fb_info *info) {}
static inline int fbcon_set_con2fb_map_ioctl(void __user *argp) { return 0; }
diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h
index 766fcd973beb..a332e79b3207 100644
--- a/include/linux/fcntl.h
+++ b/include/linux/fcntl.h
@@ -12,10 +12,6 @@
FASYNC | O_DIRECT | O_LARGEFILE | O_DIRECTORY | O_NOFOLLOW | \
O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE)
-/* List of all valid flags for the how->upgrade_mask argument: */
-#define VALID_UPGRADE_FLAGS \
- (UPGRADE_NOWRITE | UPGRADE_NOREAD)
-
/* List of all valid flags for the how->resolve argument: */
#define VALID_RESOLVE_FLAGS \
(RESOLVE_NO_XDEV | RESOLVE_NO_MAGICLINKS | RESOLVE_NO_SYMLINKS | \
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index d0e78174874a..c45306a9f007 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -22,7 +22,6 @@
* as this is the granularity returned by copy_fdset().
*/
#define NR_OPEN_DEFAULT BITS_PER_LONG
-#define NR_OPEN_MAX ~0U
struct fdtable {
unsigned int max_fds;
@@ -33,16 +32,6 @@ struct fdtable {
struct rcu_head rcu;
};
-static inline bool close_on_exec(unsigned int fd, const struct fdtable *fdt)
-{
- return test_bit(fd, fdt->close_on_exec);
-}
-
-static inline bool fd_is_open(unsigned int fd, const struct fdtable *fdt)
-{
- return test_bit(fd, fdt->open_fds);
-}
-
/*
* Open file table structure
*/
@@ -83,12 +72,17 @@ struct dentry;
static inline struct file *files_lookup_fd_raw(struct files_struct *files, unsigned int fd)
{
struct fdtable *fdt = rcu_dereference_raw(files->fdt);
-
- if (fd < fdt->max_fds) {
- fd = array_index_nospec(fd, fdt->max_fds);
- return rcu_dereference_raw(fdt->fd[fd]);
- }
- return NULL;
+ unsigned long mask = array_index_mask_nospec(fd, fdt->max_fds);
+ struct file *needs_masking;
+
+ /*
+ * 'mask' is zero for an out-of-bounds fd, all ones for ok.
+ * 'fd&mask' is 'fd' for ok, or 0 for out of bounds.
+ *
+ * Accessing fdt->fd[0] is ok, but needs masking of the result.
+ */
+ needs_masking = rcu_dereference_raw(fdt->fd[fd&mask]);
+ return (struct file *)(mask & (unsigned long)needs_masking);
}
static inline struct file *files_lookup_fd_locked(struct files_struct *files, unsigned int fd)
@@ -98,36 +92,26 @@ static inline struct file *files_lookup_fd_locked(struct files_struct *files, un
return files_lookup_fd_raw(files, fd);
}
-static inline struct file *files_lookup_fd_rcu(struct files_struct *files, unsigned int fd)
+static inline bool close_on_exec(unsigned int fd, const struct files_struct *files)
{
- RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
- "suspicious rcu_dereference_check() usage");
- return files_lookup_fd_raw(files, fd);
+ return test_bit(fd, files_fdtable(files)->close_on_exec);
}
-static inline struct file *lookup_fd_rcu(unsigned int fd)
-{
- return files_lookup_fd_rcu(current->files, fd);
-}
-
-struct file *task_lookup_fd_rcu(struct task_struct *task, unsigned int fd);
-struct file *task_lookup_next_fd_rcu(struct task_struct *task, unsigned int *fd);
-
struct task_struct;
void put_files_struct(struct files_struct *fs);
int unshare_files(void);
-struct files_struct *dup_fd(struct files_struct *, unsigned, int *) __latent_entropy;
+struct fd_range {
+ unsigned int from, to;
+};
+struct files_struct *dup_fd(struct files_struct *, struct fd_range *) __latent_entropy;
void do_close_on_exec(struct files_struct *);
int iterate_fd(struct files_struct *, unsigned,
int (*)(const void *, struct file *, unsigned),
const void *);
extern int close_fd(unsigned int fd);
-extern int __close_range(unsigned int fd, unsigned int max_fd, unsigned int flags);
-extern int close_fd_get_file(unsigned int fd, struct file **res);
-extern int unshare_fd(unsigned long unshare_flags, unsigned int max_fds,
- struct files_struct **new_fdp);
+extern struct file *file_close_fd(unsigned int fd);
extern struct kmem_cache *files_cachep;
diff --git a/include/linux/fiemap.h b/include/linux/fiemap.h
index 4e624c466583..966092ffa89a 100644
--- a/include/linux/fiemap.h
+++ b/include/linux/fiemap.h
@@ -5,12 +5,18 @@
#include <uapi/linux/fiemap.h>
#include <linux/fs.h>
+/**
+ * struct fiemap_extent_info - fiemap request to a filesystem
+ * @fi_flags: Flags as passed from user
+ * @fi_extents_mapped: Number of mapped extents
+ * @fi_extents_max: Size of fiemap_extent array
+ * @fi_extents_start: Start of fiemap_extent array
+ */
struct fiemap_extent_info {
- unsigned int fi_flags; /* Flags as passed from user */
- unsigned int fi_extents_mapped; /* Number of mapped extents */
- unsigned int fi_extents_max; /* Size of fiemap_extent array */
- struct fiemap_extent __user *fi_extents_start; /* Start of
- fiemap_extent array */
+ unsigned int fi_flags;
+ unsigned int fi_extents_mapped;
+ unsigned int fi_extents_max;
+ struct fiemap_extent __user *fi_extents_start;
};
int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo,
@@ -18,8 +24,4 @@ int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo,
int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
u64 phys, u64 len, u32 flags);
-int generic_block_fiemap(struct inode *inode,
- struct fiemap_extent_info *fieinfo, u64 start, u64 len,
- get_block_t *get_block);
-
#endif /* _LINUX_FIEMAP_H 1 */
diff --git a/include/linux/file.h b/include/linux/file.h
index 2de2e4613d7b..cf389fde9bc2 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -10,11 +10,12 @@
#include <linux/types.h>
#include <linux/posix_types.h>
#include <linux/errno.h>
+#include <linux/cleanup.h>
+#include <linux/err.h>
struct file;
extern void fput(struct file *);
-extern void fput_many(struct file *, unsigned int);
struct file_operations;
struct task_struct;
@@ -24,64 +25,65 @@ struct inode;
struct path;
extern struct file *alloc_file_pseudo(struct inode *, struct vfsmount *,
const char *, int flags, const struct file_operations *);
+extern struct file *alloc_file_pseudo_noaccount(struct inode *, struct vfsmount *,
+ const char *, int flags, const struct file_operations *);
extern struct file *alloc_file_clone(struct file *, int flags,
const struct file_operations *);
-static inline void fput_light(struct file *file, int fput_needed)
-{
- if (fput_needed)
- fput(file);
-}
-
+/* either a reference to struct file + flags
+ * (cloned vs. borrowed, pos locked), with
+ * flags stored in lower bits of value,
+ * or empty (represented by 0).
+ */
struct fd {
- struct file *file;
- unsigned int flags;
+ unsigned long word;
};
#define FDPUT_FPUT 1
#define FDPUT_POS_UNLOCK 2
-static inline void fdput(struct fd fd)
+#define fd_file(f) ((struct file *)((f).word & ~(FDPUT_FPUT|FDPUT_POS_UNLOCK)))
+static inline bool fd_empty(struct fd f)
{
- if (fd.flags & FDPUT_FPUT)
- fput(fd.file);
+ return unlikely(!f.word);
}
-extern struct file *fget(unsigned int fd);
-extern struct file *fget_many(unsigned int fd, unsigned int refs);
-extern struct file *fget_raw(unsigned int fd);
-extern struct file *fget_task(struct task_struct *task, unsigned int fd);
-extern unsigned long __fdget(unsigned int fd);
-extern unsigned long __fdget_raw(unsigned int fd);
-extern unsigned long __fdget_pos(unsigned int fd);
-extern void __f_unlock_pos(struct file *);
-
-static inline struct fd __to_fd(unsigned long v)
+#define EMPTY_FD (struct fd){0}
+static inline struct fd BORROWED_FD(struct file *f)
{
- return (struct fd){(struct file *)(v & ~3),v & 3};
+ return (struct fd){(unsigned long)f};
}
-
-static inline struct fd fdget(unsigned int fd)
+static inline struct fd CLONED_FD(struct file *f)
{
- return __to_fd(__fdget(fd));
+ return (struct fd){(unsigned long)f | FDPUT_FPUT};
}
-static inline struct fd fdget_raw(unsigned int fd)
+static inline void fdput(struct fd fd)
{
- return __to_fd(__fdget_raw(fd));
+ if (unlikely(fd.word & FDPUT_FPUT))
+ fput(fd_file(fd));
}
-static inline struct fd fdget_pos(int fd)
-{
- return __to_fd(__fdget_pos(fd));
-}
+extern struct file *fget(unsigned int fd);
+extern struct file *fget_raw(unsigned int fd);
+extern struct file *fget_task(struct task_struct *task, unsigned int fd);
+extern struct file *fget_task_next(struct task_struct *task, unsigned int *fd);
+extern void __f_unlock_pos(struct file *);
+
+struct fd fdget(unsigned int fd);
+struct fd fdget_raw(unsigned int fd);
+struct fd fdget_pos(unsigned int fd);
static inline void fdput_pos(struct fd f)
{
- if (f.flags & FDPUT_POS_UNLOCK)
- __f_unlock_pos(f.file);
+ if (f.word & FDPUT_POS_UNLOCK)
+ __f_unlock_pos(fd_file(f));
fdput(f);
}
+DEFINE_CLASS(fd, struct fd, fdput(_T), fdget(fd), int fd)
+DEFINE_CLASS(fd_raw, struct fd, fdput(_T), fdget_raw(fd), int fd)
+DEFINE_CLASS(fd_pos, struct fd, fdput_pos(_T), fdget_pos(fd), int fd)
+
extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
extern int replace_fd(unsigned fd, struct file *file, unsigned flags);
extern void set_close_on_exec(unsigned int fd, int flag);
@@ -90,21 +92,34 @@ extern int __get_unused_fd_flags(unsigned flags, unsigned long nofile);
extern int get_unused_fd_flags(unsigned flags);
extern void put_unused_fd(unsigned int fd);
+DEFINE_CLASS(get_unused_fd, int, if (_T >= 0) put_unused_fd(_T),
+ get_unused_fd_flags(flags), unsigned flags)
+DEFINE_FREE(fput, struct file *, if (!IS_ERR_OR_NULL(_T)) fput(_T))
+
+/*
+ * take_fd() will take care to set @fd to -EBADF ensuring that
+ * CLASS(get_unused_fd) won't call put_unused_fd(). This makes it
+ * easier to rely on CLASS(get_unused_fd):
+ *
+ * struct file *f;
+ *
+ * CLASS(get_unused_fd, fd)(O_CLOEXEC);
+ * if (fd < 0)
+ * return fd;
+ *
+ * f = dentry_open(&path, O_RDONLY, current_cred());
+ * if (IS_ERR(f))
+ * return PTR_ERR(f);
+ *
+ * fd_install(fd, f);
+ * return take_fd(fd);
+ */
+#define take_fd(fd) __get_and_null(fd, -EBADF)
+
extern void fd_install(unsigned int fd, struct file *file);
-extern int __receive_fd(struct file *file, int __user *ufd,
- unsigned int o_flags);
-static inline int receive_fd_user(struct file *file, int __user *ufd,
- unsigned int o_flags)
-{
- if (ufd == NULL)
- return -EFAULT;
- return __receive_fd(file, ufd, o_flags);
-}
-static inline int receive_fd(struct file *file, unsigned int o_flags)
-{
- return __receive_fd(file, NULL, o_flags);
-}
+int receive_fd(struct file *file, int __user *ufd, unsigned int o_flags);
+
int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags);
extern void flush_delayed_fput(void);
@@ -112,4 +127,130 @@ extern void __fput_sync(struct file *);
extern unsigned int sysctl_nr_open_min, sysctl_nr_open_max;
+/*
+ * fd_prepare: Combined fd + file allocation cleanup class.
+ * @err: Error code to indicate if allocation succeeded.
+ * @__fd: Allocated fd (may not be accessed directly)
+ * @__file: Allocated struct file pointer (may not be accessed directly)
+ *
+ * Allocates an fd and a file together. On error paths, automatically cleans
+ * up whichever resource was successfully allocated. Allows flexible file
+ * allocation with different functions per usage.
+ *
+ * Do not use directly.
+ */
+struct fd_prepare {
+ s32 err;
+ s32 __fd; /* do not access directly */
+ struct file *__file; /* do not access directly */
+};
+
+/* Typedef for fd_prepare cleanup guards. */
+typedef struct fd_prepare class_fd_prepare_t;
+
+/*
+ * Accessors for fd_prepare class members.
+ * _Generic() is used for zero-cost type safety.
+ */
+#define fd_prepare_fd(_fdf) \
+ (_Generic((_fdf), struct fd_prepare: (_fdf).__fd))
+
+#define fd_prepare_file(_fdf) \
+ (_Generic((_fdf), struct fd_prepare: (_fdf).__file))
+
+/* Do not use directly. */
+static inline void class_fd_prepare_destructor(const struct fd_prepare *fdf)
+{
+ if (unlikely(fdf->err)) {
+ if (likely(fdf->__fd >= 0))
+ put_unused_fd(fdf->__fd);
+ if (unlikely(!IS_ERR_OR_NULL(fdf->__file)))
+ fput(fdf->__file);
+ }
+}
+
+/* Do not use directly. */
+static inline int class_fd_prepare_lock_err(const struct fd_prepare *fdf)
+{
+ if (unlikely(fdf->err))
+ return fdf->err;
+ if (unlikely(fdf->__fd < 0))
+ return fdf->__fd;
+ if (unlikely(IS_ERR(fdf->__file)))
+ return PTR_ERR(fdf->__file);
+ if (unlikely(!fdf->__file))
+ return -ENOMEM;
+ return 0;
+}
+
+/*
+ * __FD_PREPARE_INIT - Helper to initialize fd_prepare class.
+ * @_fd_flags: flags for get_unused_fd_flags()
+ * @_file_owned: expression that returns struct file *
+ *
+ * Returns a struct fd_prepare with fd, file, and err set.
+ * If fd allocation fails, fd will be negative and err will be set. If
+ * fd succeeds but file_init_expr fails, file will be ERR_PTR and err
+ * will be set. The err field is the single source of truth for error
+ * checking.
+ */
+#define __FD_PREPARE_INIT(_fd_flags, _file_owned) \
+ ({ \
+ struct fd_prepare fdf = { \
+ .__fd = get_unused_fd_flags((_fd_flags)), \
+ }; \
+ if (likely(fdf.__fd >= 0)) \
+ fdf.__file = (_file_owned); \
+ fdf.err = ACQUIRE_ERR(fd_prepare, &fdf); \
+ fdf; \
+ })
+
+/*
+ * FD_PREPARE - Macro to declare and initialize an fd_prepare variable.
+ *
+ * Declares and initializes an fd_prepare variable with automatic
+ * cleanup. No separate scope required - cleanup happens when variable
+ * goes out of scope.
+ *
+ * @_fdf: name of struct fd_prepare variable to define
+ * @_fd_flags: flags for get_unused_fd_flags()
+ * @_file_owned: struct file to take ownership of (can be expression)
+ */
+#define FD_PREPARE(_fdf, _fd_flags, _file_owned) \
+ CLASS_INIT(fd_prepare, _fdf, __FD_PREPARE_INIT(_fd_flags, _file_owned))
+
+/*
+ * fd_publish - Publish prepared fd and file to the fd table.
+ * @_fdf: struct fd_prepare variable
+ */
+#define fd_publish(_fdf) \
+ ({ \
+ struct fd_prepare *fdp = &(_fdf); \
+ VFS_WARN_ON_ONCE(fdp->err); \
+ VFS_WARN_ON_ONCE(fdp->__fd < 0); \
+ VFS_WARN_ON_ONCE(IS_ERR_OR_NULL(fdp->__file)); \
+ fd_install(fdp->__fd, fdp->__file); \
+ fdp->__fd; \
+ })
+
+/* Do not use directly. */
+#define __FD_ADD(_fdf, _fd_flags, _file_owned) \
+ ({ \
+ FD_PREPARE(_fdf, _fd_flags, _file_owned); \
+ s32 ret = _fdf.err; \
+ if (likely(!ret)) \
+ ret = fd_publish(_fdf); \
+ ret; \
+ })
+
+/*
+ * FD_ADD - Allocate and install an fd and file in one step.
+ * @_fd_flags: flags for get_unused_fd_flags()
+ * @_file_owned: struct file to take ownership of
+ *
+ * Returns the allocated fd number, or negative error code on failure.
+ */
+#define FD_ADD(_fd_flags, _file_owned) \
+ __FD_ADD(__UNIQUE_ID(fd_prepare), _fd_flags, _file_owned)
+
#endif /* __LINUX_FILE_H */
diff --git a/include/linux/file_ref.h b/include/linux/file_ref.h
new file mode 100644
index 000000000000..31551e4cb8f3
--- /dev/null
+++ b/include/linux/file_ref.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _LINUX_FILE_REF_H
+#define _LINUX_FILE_REF_H
+
+#include <linux/atomic.h>
+#include <linux/preempt.h>
+#include <linux/types.h>
+
+/*
+ * file_ref is a reference count implementation specifically for use by
+ * files. It takes inspiration from rcuref but differs in key aspects
+ * such as support for SLAB_TYPESAFE_BY_RCU type caches.
+ *
+ * FILE_REF_ONEREF FILE_REF_MAXREF
+ * 0x0000000000000000UL 0x7FFFFFFFFFFFFFFFUL
+ * <-------------------valid ------------------->
+ *
+ * FILE_REF_SATURATED
+ * 0x8000000000000000UL 0xA000000000000000UL 0xBFFFFFFFFFFFFFFFUL
+ * <-----------------------saturation zone---------------------->
+ *
+ * FILE_REF_RELEASED FILE_REF_DEAD
+ * 0xC000000000000000UL 0xE000000000000000UL
+ * <-------------------dead zone------------------->
+ *
+ * FILE_REF_NOREF
+ * 0xFFFFFFFFFFFFFFFFUL
+ */
+
+#ifdef CONFIG_64BIT
+#define FILE_REF_ONEREF 0x0000000000000000UL
+#define FILE_REF_MAXREF 0x7FFFFFFFFFFFFFFFUL
+#define FILE_REF_SATURATED 0xA000000000000000UL
+#define FILE_REF_RELEASED 0xC000000000000000UL
+#define FILE_REF_DEAD 0xE000000000000000UL
+#define FILE_REF_NOREF 0xFFFFFFFFFFFFFFFFUL
+#else
+#define FILE_REF_ONEREF 0x00000000U
+#define FILE_REF_MAXREF 0x7FFFFFFFU
+#define FILE_REF_SATURATED 0xA0000000U
+#define FILE_REF_RELEASED 0xC0000000U
+#define FILE_REF_DEAD 0xE0000000U
+#define FILE_REF_NOREF 0xFFFFFFFFU
+#endif
+
+typedef struct {
+#ifdef CONFIG_64BIT
+ atomic64_t refcnt;
+#else
+ atomic_t refcnt;
+#endif
+} file_ref_t;
+
+/**
+ * file_ref_init - Initialize a file reference count
+ * @ref: Pointer to the reference count
+ * @cnt: The initial reference count typically '1'
+ */
+static inline void file_ref_init(file_ref_t *ref, unsigned long cnt)
+{
+ atomic_long_set(&ref->refcnt, cnt - 1);
+}
+
+bool __file_ref_put(file_ref_t *ref, unsigned long cnt);
+
+/**
+ * file_ref_get - Acquire one reference on a file
+ * @ref: Pointer to the reference count
+ *
+ * Similar to atomic_inc_not_zero() but saturates at FILE_REF_MAXREF.
+ *
+ * Provides full memory ordering.
+ *
+ * Return: False if the attempt to acquire a reference failed. This happens
+ * when the last reference has been put already. True if a reference
+ * was successfully acquired
+ */
+static __always_inline __must_check bool file_ref_get(file_ref_t *ref)
+{
+ /*
+ * Unconditionally increase the reference count with full
+ * ordering. The saturation and dead zones provide enough
+ * tolerance for this.
+ *
+ * If this indicates negative the file in question the fail can
+ * be freed and immediately reused due to SLAB_TYPSAFE_BY_RCU.
+ * Hence, unconditionally altering the file reference count to
+ * e.g., reset the file reference count back to the middle of
+ * the deadzone risk end up marking someone else's file as dead
+ * behind their back.
+ *
+ * It would be possible to do a careful:
+ *
+ * cnt = atomic_long_inc_return();
+ * if (likely(cnt >= 0))
+ * return true;
+ *
+ * and then something like:
+ *
+ * if (cnt >= FILE_REF_RELEASE)
+ * atomic_long_try_cmpxchg(&ref->refcnt, &cnt, FILE_REF_DEAD),
+ *
+ * to set the value back to the middle of the deadzone. But it's
+ * practically impossible to go from FILE_REF_DEAD to
+ * FILE_REF_ONEREF. It would need 2305843009213693952/2^61
+ * file_ref_get()s to resurrect such a dead file.
+ */
+ return !atomic_long_add_negative(1, &ref->refcnt);
+}
+
+/**
+ * file_ref_inc - Acquire one reference on a file
+ * @ref: Pointer to the reference count
+ *
+ * Acquire an additional reference on a file. Warns if the caller didn't
+ * already hold a reference.
+ */
+static __always_inline void file_ref_inc(file_ref_t *ref)
+{
+ long prior = atomic_long_fetch_inc_relaxed(&ref->refcnt);
+ WARN_ONCE(prior < 0, "file_ref_inc() on a released file reference");
+}
+
+/**
+ * file_ref_put -- Release a file reference
+ * @ref: Pointer to the reference count
+ *
+ * Provides release memory ordering, such that prior loads and stores
+ * are done before, and provides an acquire ordering on success such
+ * that free() must come after.
+ *
+ * Return: True if this was the last reference with no future references
+ * possible. This signals the caller that it can safely release
+ * the object which is protected by the reference counter.
+ * False if there are still active references or the put() raced
+ * with a concurrent get()/put() pair. Caller is not allowed to
+ * release the protected object.
+ */
+static __always_inline __must_check bool file_ref_put(file_ref_t *ref)
+{
+ long cnt;
+
+ /*
+ * While files are SLAB_TYPESAFE_BY_RCU and thus file_ref_put()
+ * calls don't risk UAFs when a file is recyclyed, it is still
+ * vulnerable to UAFs caused by freeing the whole slab page once
+ * it becomes unused. Prevent file_ref_put() from being
+ * preempted protects against this.
+ */
+ guard(preempt)();
+ /*
+ * Unconditionally decrease the reference count. The saturation
+ * and dead zones provide enough tolerance for this. If this
+ * fails then we need to handle the last reference drop and
+ * cases inside the saturation and dead zones.
+ */
+ cnt = atomic_long_dec_return(&ref->refcnt);
+ if (cnt >= 0)
+ return false;
+ return __file_ref_put(ref, cnt);
+}
+
+/**
+ * file_ref_put_close - drop a reference expecting it would transition to FILE_REF_NOREF
+ * @ref: Pointer to the reference count
+ *
+ * Semantically it is equivalent to calling file_ref_put(), but it trades lower
+ * performance in face of other CPUs also modifying the refcount for higher
+ * performance when this happens to be the last reference.
+ *
+ * For the last reference file_ref_put() issues 2 atomics. One to drop the
+ * reference and another to transition it to FILE_REF_DEAD. This routine does
+ * the work in one step, but in order to do it has to pre-read the variable which
+ * decreases scalability.
+ *
+ * Use with close() et al, stick to file_ref_put() by default.
+ */
+static __always_inline __must_check bool file_ref_put_close(file_ref_t *ref)
+{
+ long old;
+
+ old = atomic_long_read(&ref->refcnt);
+ if (likely(old == FILE_REF_ONEREF)) {
+ if (likely(atomic_long_try_cmpxchg(&ref->refcnt, &old, FILE_REF_DEAD)))
+ return true;
+ }
+ return file_ref_put(ref);
+}
+
+/**
+ * file_ref_read - Read the number of file references
+ * @ref: Pointer to the reference count
+ *
+ * Return: The number of held references (0 ... N)
+ */
+static inline unsigned long file_ref_read(file_ref_t *ref)
+{
+ unsigned long c = atomic_long_read(&ref->refcnt);
+
+ /* Return 0 if within the DEAD zone. */
+ return c >= FILE_REF_RELEASED ? 0 : c + 1;
+}
+
+/*
+ * __file_ref_read_raw - Return the value stored in ref->refcnt
+ * @ref: Pointer to the reference count
+ *
+ * Return: The raw value found in the counter
+ *
+ * A hack for file_needs_f_pos_lock(), you probably want to use
+ * file_ref_read() instead.
+ */
+static inline unsigned long __file_ref_read_raw(file_ref_t *ref)
+{
+ return atomic_long_read(&ref->refcnt);
+}
+
+#endif
diff --git a/include/linux/fileattr.h b/include/linux/fileattr.h
index 9e37e063ac69..f89dcfad3f8f 100644
--- a/include/linux/fileattr.h
+++ b/include/linux/fileattr.h
@@ -14,13 +14,33 @@
FS_XFLAG_NODUMP | FS_XFLAG_NOATIME | FS_XFLAG_DAX | \
FS_XFLAG_PROJINHERIT)
+/* Read-only inode flags */
+#define FS_XFLAG_RDONLY_MASK \
+ (FS_XFLAG_PREALLOC | FS_XFLAG_HASATTR)
+
+/* Flags to indicate valid value of fsx_ fields */
+#define FS_XFLAG_VALUES_MASK \
+ (FS_XFLAG_EXTSIZE | FS_XFLAG_COWEXTSIZE)
+
+/* Flags for directories */
+#define FS_XFLAG_DIRONLY_MASK \
+ (FS_XFLAG_RTINHERIT | FS_XFLAG_NOSYMLINKS | FS_XFLAG_EXTSZINHERIT)
+
+/* Misc settable flags */
+#define FS_XFLAG_MISC_MASK \
+ (FS_XFLAG_REALTIME | FS_XFLAG_NODEFRAG | FS_XFLAG_FILESTREAM)
+
+#define FS_XFLAGS_MASK \
+ (FS_XFLAG_COMMON | FS_XFLAG_RDONLY_MASK | FS_XFLAG_VALUES_MASK | \
+ FS_XFLAG_DIRONLY_MASK | FS_XFLAG_MISC_MASK)
+
/*
* Merged interface for miscellaneous file attributes. 'flags' originates from
* ext* and 'fsx_flags' from xfs. There's some overlap between the two, which
* is handled by the VFS helpers, so filesystems are free to implement just one
* or both of these sub-interfaces.
*/
-struct fileattr {
+struct file_kattr {
u32 flags; /* flags (FS_IOC_GETFLAGS/FS_IOC_SETFLAGS) */
/* struct fsxattr: */
u32 fsx_xflags; /* xflags field value (get/set) */
@@ -33,10 +53,10 @@ struct fileattr {
bool fsx_valid:1;
};
-int copy_fsxattr_to_user(const struct fileattr *fa, struct fsxattr __user *ufa);
+int copy_fsxattr_to_user(const struct file_kattr *fa, struct fsxattr __user *ufa);
-void fileattr_fill_xflags(struct fileattr *fa, u32 xflags);
-void fileattr_fill_flags(struct fileattr *fa, u32 flags);
+void fileattr_fill_xflags(struct file_kattr *fa, u32 xflags);
+void fileattr_fill_flags(struct file_kattr *fa, u32 flags);
/**
* fileattr_has_fsx - check for extended flags/attributes
@@ -45,15 +65,19 @@ void fileattr_fill_flags(struct fileattr *fa, u32 flags);
* Return: true if any attributes are present that are not represented in
* ->flags.
*/
-static inline bool fileattr_has_fsx(const struct fileattr *fa)
+static inline bool fileattr_has_fsx(const struct file_kattr *fa)
{
return fa->fsx_valid &&
((fa->fsx_xflags & ~FS_XFLAG_COMMON) || fa->fsx_extsize != 0 ||
fa->fsx_projid != 0 || fa->fsx_cowextsize != 0);
}
-int vfs_fileattr_get(struct dentry *dentry, struct fileattr *fa);
-int vfs_fileattr_set(struct user_namespace *mnt_userns, struct dentry *dentry,
- struct fileattr *fa);
+int vfs_fileattr_get(struct dentry *dentry, struct file_kattr *fa);
+int vfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct file_kattr *fa);
+int ioctl_getflags(struct file *file, unsigned int __user *argp);
+int ioctl_setflags(struct file *file, unsigned int __user *argp);
+int ioctl_fsgetxattr(struct file *file, void __user *argp);
+int ioctl_fssetxattr(struct file *file, void __user *argp);
#endif /* _LINUX_FILEATTR_H */
diff --git a/include/linux/filelock.h b/include/linux/filelock.h
new file mode 100644
index 000000000000..54b824c05299
--- /dev/null
+++ b/include/linux/filelock.h
@@ -0,0 +1,584 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_FILELOCK_H
+#define _LINUX_FILELOCK_H
+
+#include <linux/fs.h>
+
+#define FL_POSIX 1
+#define FL_FLOCK 2
+#define FL_DELEG 4 /* NFSv4 delegation */
+#define FL_ACCESS 8 /* not trying to lock, just looking */
+#define FL_EXISTS 16 /* when unlocking, test for existence */
+#define FL_LEASE 32 /* lease held on this file */
+#define FL_CLOSE 64 /* unlock on close */
+#define FL_SLEEP 128 /* A blocking lock */
+#define FL_DOWNGRADE_PENDING 256 /* Lease is being downgraded */
+#define FL_UNLOCK_PENDING 512 /* Lease is being broken */
+#define FL_OFDLCK 1024 /* lock is "owned" by struct file */
+#define FL_LAYOUT 2048 /* outstanding pNFS layout */
+#define FL_RECLAIM 4096 /* reclaiming from a reboot server */
+
+#define FL_CLOSE_POSIX (FL_POSIX | FL_CLOSE)
+
+/*
+ * Special return value from posix_lock_file() and vfs_lock_file() for
+ * asynchronous locking.
+ */
+#define FILE_LOCK_DEFERRED 1
+
+struct file_lock;
+struct file_lease;
+
+struct file_lock_operations {
+ void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
+ void (*fl_release_private)(struct file_lock *);
+};
+
+struct lock_manager_operations {
+ void *lm_mod_owner;
+ fl_owner_t (*lm_get_owner)(fl_owner_t);
+ void (*lm_put_owner)(fl_owner_t);
+ void (*lm_notify)(struct file_lock *); /* unblock callback */
+ int (*lm_grant)(struct file_lock *, int);
+ bool (*lm_lock_expirable)(struct file_lock *cfl);
+ void (*lm_expire_lock)(void);
+};
+
+struct lease_manager_operations {
+ bool (*lm_break)(struct file_lease *);
+ int (*lm_change)(struct file_lease *, int, struct list_head *);
+ void (*lm_setup)(struct file_lease *, void **);
+ bool (*lm_breaker_owns_lease)(struct file_lease *);
+};
+
+struct lock_manager {
+ struct list_head list;
+ /*
+ * NFSv4 and up also want opens blocked during the grace period;
+ * NLM doesn't care:
+ */
+ bool block_opens;
+};
+
+struct net;
+void locks_start_grace(struct net *, struct lock_manager *);
+void locks_end_grace(struct lock_manager *);
+bool locks_in_grace(struct net *);
+bool opens_in_grace(struct net *);
+
+/*
+ * struct file_lock has a union that some filesystems use to track
+ * their own private info. The NFS side of things is defined here:
+ */
+#include <linux/nfs_fs_i.h>
+
+/*
+ * struct file_lock represents a generic "file lock". It's used to represent
+ * POSIX byte range locks, BSD (flock) locks, and leases. It's important to
+ * note that the same struct is used to represent both a request for a lock and
+ * the lock itself, but the same object is never used for both.
+ *
+ * FIXME: should we create a separate "struct lock_request" to help distinguish
+ * these two uses?
+ *
+ * The varous i_flctx lists are ordered by:
+ *
+ * 1) lock owner
+ * 2) lock range start
+ * 3) lock range end
+ *
+ * Obviously, the last two criteria only matter for POSIX locks.
+ */
+
+struct file_lock_core {
+ struct file_lock_core *flc_blocker; /* The lock that is blocking us */
+ struct list_head flc_list; /* link into file_lock_context */
+ struct hlist_node flc_link; /* node in global lists */
+ struct list_head flc_blocked_requests; /* list of requests with
+ * ->fl_blocker pointing here
+ */
+ struct list_head flc_blocked_member; /* node in
+ * ->fl_blocker->fl_blocked_requests
+ */
+ fl_owner_t flc_owner;
+ unsigned int flc_flags;
+ unsigned char flc_type;
+ pid_t flc_pid;
+ int flc_link_cpu; /* what cpu's list is this on? */
+ wait_queue_head_t flc_wait;
+ struct file *flc_file;
+};
+
+struct file_lock {
+ struct file_lock_core c;
+ loff_t fl_start;
+ loff_t fl_end;
+
+ const struct file_lock_operations *fl_ops; /* Callbacks for filesystems */
+ const struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */
+ union {
+ struct nfs_lock_info nfs_fl;
+ struct nfs4_lock_info nfs4_fl;
+ struct {
+ struct list_head link; /* link in AFS vnode's pending_locks list */
+ int state; /* state of grant or error if -ve */
+ unsigned int debug_id;
+ } afs;
+ struct {
+ struct inode *inode;
+ } ceph;
+ } fl_u;
+} __randomize_layout;
+
+struct file_lease {
+ struct file_lock_core c;
+ struct fasync_struct * fl_fasync; /* for lease break notifications */
+ /* for lease breaks: */
+ unsigned long fl_break_time;
+ unsigned long fl_downgrade_time;
+ const struct lease_manager_operations *fl_lmops; /* Callbacks for lease managers */
+} __randomize_layout;
+
+struct file_lock_context {
+ spinlock_t flc_lock;
+ struct list_head flc_flock;
+ struct list_head flc_posix;
+ struct list_head flc_lease;
+};
+
+#ifdef CONFIG_FILE_LOCKING
+int fcntl_getlk(struct file *, unsigned int, struct flock *);
+int fcntl_setlk(unsigned int, struct file *, unsigned int,
+ struct flock *);
+
+#if BITS_PER_LONG == 32
+int fcntl_getlk64(struct file *, unsigned int, struct flock64 *);
+int fcntl_setlk64(unsigned int, struct file *, unsigned int,
+ struct flock64 *);
+#endif
+
+int fcntl_setlease(unsigned int fd, struct file *filp, int arg);
+int fcntl_getlease(struct file *filp);
+int fcntl_setdeleg(unsigned int fd, struct file *filp, struct delegation *deleg);
+int fcntl_getdeleg(struct file *filp, struct delegation *deleg);
+
+static inline bool lock_is_unlock(struct file_lock *fl)
+{
+ return fl->c.flc_type == F_UNLCK;
+}
+
+static inline bool lock_is_read(struct file_lock *fl)
+{
+ return fl->c.flc_type == F_RDLCK;
+}
+
+static inline bool lock_is_write(struct file_lock *fl)
+{
+ return fl->c.flc_type == F_WRLCK;
+}
+
+static inline void locks_wake_up_waiter(struct file_lock_core *flc)
+{
+ wake_up(&flc->flc_wait);
+}
+
+static inline void locks_wake_up(struct file_lock *fl)
+{
+ locks_wake_up_waiter(&fl->c);
+}
+
+static inline bool locks_can_async_lock(const struct file_operations *fops)
+{
+ return !fops->lock || fops->fop_flags & FOP_ASYNC_LOCK;
+}
+
+/* fs/locks.c */
+void locks_free_lock_context(struct inode *inode);
+void locks_free_lock(struct file_lock *fl);
+void locks_init_lock(struct file_lock *);
+struct file_lock *locks_alloc_lock(void);
+void locks_copy_lock(struct file_lock *, struct file_lock *);
+void locks_copy_conflock(struct file_lock *, struct file_lock *);
+void locks_remove_posix(struct file *, fl_owner_t);
+void locks_remove_file(struct file *);
+void locks_release_private(struct file_lock *);
+void posix_test_lock(struct file *, struct file_lock *);
+int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
+int locks_delete_block(struct file_lock *);
+int vfs_test_lock(struct file *, struct file_lock *);
+int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
+int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
+bool vfs_inode_has_locks(struct inode *inode);
+int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl);
+
+void locks_init_lease(struct file_lease *);
+void locks_free_lease(struct file_lease *fl);
+struct file_lease *locks_alloc_lease(void);
+
+#define LEASE_BREAK_LEASE BIT(0) // break leases and delegations
+#define LEASE_BREAK_DELEG BIT(1) // break delegations only
+#define LEASE_BREAK_LAYOUT BIT(2) // break layouts only
+#define LEASE_BREAK_NONBLOCK BIT(3) // non-blocking break
+#define LEASE_BREAK_OPEN_RDONLY BIT(4) // readonly open event
+
+int __break_lease(struct inode *inode, unsigned int flags);
+void lease_get_mtime(struct inode *, struct timespec64 *time);
+int generic_setlease(struct file *, int, struct file_lease **, void **priv);
+int kernel_setlease(struct file *, int, struct file_lease **, void **);
+int vfs_setlease(struct file *, int, struct file_lease **, void **);
+int lease_modify(struct file_lease *, int, struct list_head *);
+
+struct notifier_block;
+int lease_register_notifier(struct notifier_block *);
+void lease_unregister_notifier(struct notifier_block *);
+
+struct files_struct;
+void show_fd_locks(struct seq_file *f,
+ struct file *filp, struct files_struct *files);
+bool locks_owner_has_blockers(struct file_lock_context *flctx,
+ fl_owner_t owner);
+
+static inline struct file_lock_context *
+locks_inode_context(const struct inode *inode)
+{
+ return smp_load_acquire(&inode->i_flctx);
+}
+
+#else /* !CONFIG_FILE_LOCKING */
+static inline int fcntl_getlk(struct file *file, unsigned int cmd,
+ struct flock __user *user)
+{
+ return -EINVAL;
+}
+
+static inline int fcntl_setlk(unsigned int fd, struct file *file,
+ unsigned int cmd, struct flock __user *user)
+{
+ return -EACCES;
+}
+
+#if BITS_PER_LONG == 32
+static inline int fcntl_getlk64(struct file *file, unsigned int cmd,
+ struct flock64 *user)
+{
+ return -EINVAL;
+}
+
+static inline int fcntl_setlk64(unsigned int fd, struct file *file,
+ unsigned int cmd, struct flock64 *user)
+{
+ return -EACCES;
+}
+#endif
+static inline int fcntl_setlease(unsigned int fd, struct file *filp, int arg)
+{
+ return -EINVAL;
+}
+
+static inline int fcntl_getlease(struct file *filp)
+{
+ return F_UNLCK;
+}
+
+static inline int fcntl_setdeleg(unsigned int fd, struct file *filp, struct delegation *deleg)
+{
+ return -EINVAL;
+}
+
+static inline int fcntl_getdeleg(struct file *filp, struct delegation *deleg)
+{
+ return -EINVAL;
+}
+
+static inline bool lock_is_unlock(struct file_lock *fl)
+{
+ return false;
+}
+
+static inline bool lock_is_read(struct file_lock *fl)
+{
+ return false;
+}
+
+static inline bool lock_is_write(struct file_lock *fl)
+{
+ return false;
+}
+
+static inline void locks_wake_up(struct file_lock *fl)
+{
+}
+
+static inline void
+locks_free_lock_context(struct inode *inode)
+{
+}
+
+static inline void locks_init_lock(struct file_lock *fl)
+{
+ return;
+}
+
+static inline void locks_init_lease(struct file_lease *fl)
+{
+ return;
+}
+
+static inline void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
+{
+ return;
+}
+
+static inline void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
+{
+ return;
+}
+
+static inline void locks_remove_posix(struct file *filp, fl_owner_t owner)
+{
+ return;
+}
+
+static inline void locks_remove_file(struct file *filp)
+{
+ return;
+}
+
+static inline void posix_test_lock(struct file *filp, struct file_lock *fl)
+{
+ return;
+}
+
+static inline int posix_lock_file(struct file *filp, struct file_lock *fl,
+ struct file_lock *conflock)
+{
+ return -ENOLCK;
+}
+
+static inline int locks_delete_block(struct file_lock *waiter)
+{
+ return -ENOENT;
+}
+
+static inline int vfs_test_lock(struct file *filp, struct file_lock *fl)
+{
+ return 0;
+}
+
+static inline int vfs_lock_file(struct file *filp, unsigned int cmd,
+ struct file_lock *fl, struct file_lock *conf)
+{
+ return -ENOLCK;
+}
+
+static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
+{
+ return 0;
+}
+
+static inline bool vfs_inode_has_locks(struct inode *inode)
+{
+ return false;
+}
+
+static inline int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
+{
+ return -ENOLCK;
+}
+
+static inline int __break_lease(struct inode *inode, unsigned int flags)
+{
+ return 0;
+}
+
+static inline void lease_get_mtime(struct inode *inode,
+ struct timespec64 *time)
+{
+ return;
+}
+
+static inline int generic_setlease(struct file *filp, int arg,
+ struct file_lease **flp, void **priv)
+{
+ return -EINVAL;
+}
+
+static inline int kernel_setlease(struct file *filp, int arg,
+ struct file_lease **lease, void **priv)
+{
+ return -EINVAL;
+}
+
+static inline int vfs_setlease(struct file *filp, int arg,
+ struct file_lease **lease, void **priv)
+{
+ return -EINVAL;
+}
+
+static inline int lease_modify(struct file_lease *fl, int arg,
+ struct list_head *dispose)
+{
+ return -EINVAL;
+}
+
+struct files_struct;
+static inline void show_fd_locks(struct seq_file *f,
+ struct file *filp, struct files_struct *files) {}
+static inline bool locks_owner_has_blockers(struct file_lock_context *flctx,
+ fl_owner_t owner)
+{
+ return false;
+}
+
+static inline struct file_lock_context *
+locks_inode_context(const struct inode *inode)
+{
+ return NULL;
+}
+
+#endif /* !CONFIG_FILE_LOCKING */
+
+/* for walking lists of file_locks linked by fl_list */
+#define for_each_file_lock(_fl, _head) list_for_each_entry(_fl, _head, c.flc_list)
+
+static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
+{
+ return locks_lock_inode_wait(file_inode(filp), fl);
+}
+
+#ifdef CONFIG_FILE_LOCKING
+static inline unsigned int openmode_to_lease_flags(unsigned int mode)
+{
+ unsigned int flags = 0;
+
+ if ((mode & O_ACCMODE) == O_RDONLY)
+ flags |= LEASE_BREAK_OPEN_RDONLY;
+ if (mode & O_NONBLOCK)
+ flags |= LEASE_BREAK_NONBLOCK;
+ return flags;
+}
+
+static inline int break_lease(struct inode *inode, unsigned int mode)
+{
+ struct file_lock_context *flctx;
+
+ /*
+ * Since this check is lockless, we must ensure that any refcounts
+ * taken are done before checking i_flctx->flc_lease. Otherwise, we
+ * could end up racing with tasks trying to set a new lease on this
+ * file.
+ */
+ flctx = READ_ONCE(inode->i_flctx);
+ if (!flctx)
+ return 0;
+ smp_mb();
+ if (!list_empty_careful(&flctx->flc_lease))
+ return __break_lease(inode, LEASE_BREAK_LEASE | openmode_to_lease_flags(mode));
+ return 0;
+}
+
+static inline int break_deleg(struct inode *inode, unsigned int flags)
+{
+ struct file_lock_context *flctx;
+
+ /*
+ * Since this check is lockless, we must ensure that any refcounts
+ * taken are done before checking i_flctx->flc_lease. Otherwise, we
+ * could end up racing with tasks trying to set a new lease on this
+ * file.
+ */
+ flctx = READ_ONCE(inode->i_flctx);
+ if (!flctx)
+ return 0;
+ smp_mb();
+ if (!list_empty_careful(&flctx->flc_lease)) {
+ flags |= LEASE_BREAK_DELEG;
+ return __break_lease(inode, flags);
+ }
+ return 0;
+}
+
+struct delegated_inode {
+ struct inode *di_inode;
+};
+
+static inline bool is_delegated(struct delegated_inode *di)
+{
+ return di->di_inode;
+}
+
+static inline int try_break_deleg(struct inode *inode,
+ struct delegated_inode *di)
+{
+ int ret;
+
+ ret = break_deleg(inode, LEASE_BREAK_NONBLOCK);
+ if (ret == -EWOULDBLOCK && di) {
+ di->di_inode = inode;
+ ihold(inode);
+ }
+ return ret;
+}
+
+static inline int break_deleg_wait(struct delegated_inode *di)
+{
+ int ret;
+
+ ret = break_deleg(di->di_inode, 0);
+ iput(di->di_inode);
+ di->di_inode = NULL;
+ return ret;
+}
+
+static inline int break_layout(struct inode *inode, bool wait)
+{
+ smp_mb();
+ if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease)) {
+ unsigned int flags = LEASE_BREAK_LAYOUT;
+
+ if (!wait)
+ flags |= LEASE_BREAK_NONBLOCK;
+
+ return __break_lease(inode, flags);
+ }
+ return 0;
+}
+
+#else /* !CONFIG_FILE_LOCKING */
+struct delegated_inode { };
+
+static inline bool is_delegated(struct delegated_inode *di)
+{
+ return false;
+}
+
+static inline int break_lease(struct inode *inode, bool wait)
+{
+ return 0;
+}
+
+static inline int break_deleg(struct inode *inode, unsigned int flags)
+{
+ return 0;
+}
+
+static inline int try_break_deleg(struct inode *inode,
+ struct delegated_inode *delegated_inode)
+{
+ return 0;
+}
+
+static inline int break_deleg_wait(struct delegated_inode *delegated_inode)
+{
+ BUG();
+ return 0;
+}
+
+static inline int break_layout(struct inode *inode, bool wait)
+{
+ return 0;
+}
+
+#endif /* CONFIG_FILE_LOCKING */
+
+#endif /* _LINUX_FILELOCK_H */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 9a09547bc7ba..fd54fed8f95f 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -5,9 +5,8 @@
#ifndef __LINUX_FILTER_H__
#define __LINUX_FILTER_H__
-#include <stdarg.h>
-
#include <linux/atomic.h>
+#include <linux/bpf.h>
#include <linux/refcount.h>
#include <linux/compat.h>
#include <linux/skbuff.h>
@@ -15,6 +14,7 @@
#include <linux/printk.h>
#include <linux/workqueue.h>
#include <linux/sched.h>
+#include <linux/sched/clock.h>
#include <linux/capability.h>
#include <linux/set_memory.h>
#include <linux/kallsyms.h>
@@ -28,7 +28,6 @@
#include <asm/byteorder.h>
#include <uapi/linux/filter.h>
-#include <uapi/linux/bpf.h>
struct sk_buff;
struct sock;
@@ -70,9 +69,26 @@ struct ctl_table_header;
/* unused opcode to mark special load instruction. Same as BPF_ABS */
#define BPF_PROBE_MEM 0x20
+/* unused opcode to mark special ldsx instruction. Same as BPF_IND */
+#define BPF_PROBE_MEMSX 0x40
+
+/* unused opcode to mark special load instruction. Same as BPF_MSH */
+#define BPF_PROBE_MEM32 0xa0
+
+/* unused opcode to mark special atomic instruction */
+#define BPF_PROBE_ATOMIC 0xe0
+
+/* unused opcode to mark special ldsx instruction. Same as BPF_NOSPEC */
+#define BPF_PROBE_MEM32SX 0xc0
+
/* unused opcode to mark call to interpreter with arguments */
#define BPF_CALL_ARGS 0xe0
+/* unused opcode to mark speculation barrier for mitigating
+ * Spectre v1 and v4
+ */
+#define BPF_NOSPEC 0xc0
+
/* As per nm, we expose JITed images as text (code) section for
* kallsyms. That way, tools like perf can find it to match
* addresses.
@@ -86,39 +102,49 @@ struct ctl_table_header;
/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
-#define BPF_ALU64_REG(OP, DST, SRC) \
+#define BPF_ALU64_REG_OFF(OP, DST, SRC, OFF) \
((struct bpf_insn) { \
.code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
.dst_reg = DST, \
.src_reg = SRC, \
- .off = 0, \
+ .off = OFF, \
.imm = 0 })
-#define BPF_ALU32_REG(OP, DST, SRC) \
+#define BPF_ALU64_REG(OP, DST, SRC) \
+ BPF_ALU64_REG_OFF(OP, DST, SRC, 0)
+
+#define BPF_ALU32_REG_OFF(OP, DST, SRC, OFF) \
((struct bpf_insn) { \
.code = BPF_ALU | BPF_OP(OP) | BPF_X, \
.dst_reg = DST, \
.src_reg = SRC, \
- .off = 0, \
+ .off = OFF, \
.imm = 0 })
+#define BPF_ALU32_REG(OP, DST, SRC) \
+ BPF_ALU32_REG_OFF(OP, DST, SRC, 0)
+
/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
-#define BPF_ALU64_IMM(OP, DST, IMM) \
+#define BPF_ALU64_IMM_OFF(OP, DST, IMM, OFF) \
((struct bpf_insn) { \
.code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
.dst_reg = DST, \
.src_reg = 0, \
- .off = 0, \
+ .off = OFF, \
.imm = IMM })
+#define BPF_ALU64_IMM(OP, DST, IMM) \
+ BPF_ALU64_IMM_OFF(OP, DST, IMM, 0)
-#define BPF_ALU32_IMM(OP, DST, IMM) \
+#define BPF_ALU32_IMM_OFF(OP, DST, IMM, OFF) \
((struct bpf_insn) { \
.code = BPF_ALU | BPF_OP(OP) | BPF_K, \
.dst_reg = DST, \
.src_reg = 0, \
- .off = 0, \
+ .off = OFF, \
.imm = IMM })
+#define BPF_ALU32_IMM(OP, DST, IMM) \
+ BPF_ALU32_IMM_OFF(OP, DST, IMM, 0)
/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
@@ -130,6 +156,16 @@ struct ctl_table_header;
.off = 0, \
.imm = LEN })
+/* Byte Swap, bswap16/32/64 */
+
+#define BPF_BSWAP(DST, LEN) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU64 | BPF_END | BPF_SRC(BPF_TO_LE), \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = LEN })
+
/* Short form of mov, dst_reg = src_reg */
#define BPF_MOV64_REG(DST, SRC) \
@@ -148,6 +184,25 @@ struct ctl_table_header;
.off = 0, \
.imm = 0 })
+/* Special (internal-only) form of mov, used to resolve per-CPU addrs:
+ * dst_reg = src_reg + <percpu_base_off>
+ * BPF_ADDR_PERCPU is used as a special insn->off value.
+ */
+#define BPF_ADDR_PERCPU (-1)
+
+#define BPF_MOV64_PERCPU_REG(DST, SRC) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU64 | BPF_MOV | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = BPF_ADDR_PERCPU, \
+ .imm = 0 })
+
+static inline bool insn_is_mov_percpu_addr(const struct bpf_insn *insn)
+{
+ return insn->code == (BPF_ALU64 | BPF_MOV | BPF_X) && insn->off == BPF_ADDR_PERCPU;
+}
+
/* Short form of mov, dst_reg = imm32 */
#define BPF_MOV64_IMM(DST, IMM) \
@@ -166,6 +221,24 @@ struct ctl_table_header;
.off = 0, \
.imm = IMM })
+/* Short form of movsx, dst_reg = (s8,s16,s32)src_reg */
+
+#define BPF_MOVSX64_REG(DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU64 | BPF_MOV | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
+#define BPF_MOVSX32_REG(DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU | BPF_MOV | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
/* Special form of mov32, used for doing explicit zero extension on dst. */
#define BPF_ZEXT_REG(DST) \
((struct bpf_insn) { \
@@ -180,6 +253,16 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
return insn->code == (BPF_ALU | BPF_MOV | BPF_X) && insn->imm == 1;
}
+/* addr_space_cast from as(0) to as(1) is for converting bpf arena pointers
+ * to pointers in user vma.
+ */
+static inline bool insn_is_cast_user(const struct bpf_insn *insn)
+{
+ return insn->code == (BPF_ALU64 | BPF_MOV | BPF_X) &&
+ insn->off == BPF_ADDR_SPACE_CAST &&
+ insn->imm == 1U << 16;
+}
+
/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
#define BPF_LD_IMM64(DST, IMM) \
BPF_LD_IMM64_RAW(DST, 0, IMM)
@@ -250,6 +333,16 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
.off = OFF, \
.imm = 0 })
+/* Memory load, dst_reg = *(signed size *) (src_reg + off16) */
+
+#define BPF_LDX_MEMSX(SIZE, DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEMSX, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
@@ -274,6 +367,8 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
* BPF_XOR | BPF_FETCH src_reg = atomic_fetch_xor(dst_reg + off16, src_reg);
* BPF_XCHG src_reg = atomic_xchg(dst_reg + off16, src_reg)
* BPF_CMPXCHG r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg)
+ * BPF_LOAD_ACQ dst_reg = smp_load_acquire(src_reg + off16)
+ * BPF_STORE_REL smp_store_release(dst_reg + off16, src_reg)
*/
#define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF) \
@@ -347,6 +442,16 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
.off = OFF, \
.imm = 0 })
+/* Unconditional jumps, gotol pc + imm32 */
+
+#define BPF_JMP32_A(IMM) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP32 | BPF_JA, \
+ .dst_reg = 0, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = IMM })
+
/* Relative call */
#define BPF_CALL_REL(TGT) \
@@ -357,10 +462,9 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
.off = 0, \
.imm = TGT })
-/* Function call */
+/* Convert function address to BPF immediate */
-#define BPF_CAST_CALL(x) \
- ((u64 (*)(u64, u64, u64, u64, u64))(x))
+#define BPF_CALL_IMM(x) ((void *)(x) - (void *)__bpf_call_base)
#define BPF_EMIT_CALL(FUNC) \
((struct bpf_insn) { \
@@ -368,7 +472,17 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
.dst_reg = 0, \
.src_reg = 0, \
.off = 0, \
- .imm = ((FUNC) - __bpf_call_base) })
+ .imm = BPF_CALL_IMM(FUNC) })
+
+/* Kfunc call */
+
+#define BPF_CALL_KFUNC(OFF, IMM) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP | BPF_CALL, \
+ .dst_reg = 0, \
+ .src_reg = BPF_PSEUDO_KFUNC_CALL, \
+ .off = OFF, \
+ .imm = IMM })
/* Raw code statement block */
@@ -390,6 +504,16 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
.off = 0, \
.imm = 0 })
+/* Speculation barrier */
+
+#define BPF_ST_NOSPEC() \
+ ((struct bpf_insn) { \
+ .code = BPF_ST | BPF_NOSPEC, \
+ .dst_reg = 0, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = 0 })
+
/* Internal classic blocks for direct assignment */
#define __BPF_STMT(CODE, K) \
@@ -483,24 +607,27 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
__BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2, \
u64, __ur_3, u64, __ur_4, u64, __ur_5)
-#define BPF_CALL_x(x, name, ...) \
+#define BPF_CALL_x(x, attr, name, ...) \
static __always_inline \
u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
typedef u64 (*btf_##name)(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
- u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \
- u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \
+ attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \
+ attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \
{ \
return ((btf_##name)____##name)(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
} \
static __always_inline \
u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
-#define BPF_CALL_0(name, ...) BPF_CALL_x(0, name, __VA_ARGS__)
-#define BPF_CALL_1(name, ...) BPF_CALL_x(1, name, __VA_ARGS__)
-#define BPF_CALL_2(name, ...) BPF_CALL_x(2, name, __VA_ARGS__)
-#define BPF_CALL_3(name, ...) BPF_CALL_x(3, name, __VA_ARGS__)
-#define BPF_CALL_4(name, ...) BPF_CALL_x(4, name, __VA_ARGS__)
-#define BPF_CALL_5(name, ...) BPF_CALL_x(5, name, __VA_ARGS__)
+#define __NOATTR
+#define BPF_CALL_0(name, ...) BPF_CALL_x(0, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_1(name, ...) BPF_CALL_x(1, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_2(name, ...) BPF_CALL_x(2, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_3(name, ...) BPF_CALL_x(3, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_4(name, ...) BPF_CALL_x(4, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_5(name, ...) BPF_CALL_x(5, __NOATTR, name, __VA_ARGS__)
+
+#define NOTRACE_BPF_CALL_1(name, ...) BPF_CALL_x(1, notrace, name, __VA_ARGS__)
#define bpf_ctx_range(TYPE, MEMBER) \
offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
@@ -536,44 +663,20 @@ struct sock_fprog_kern {
#define BPF_IMAGE_ALIGNMENT 8
struct bpf_binary_header {
- u32 pages;
+ u32 size;
u8 image[] __aligned(BPF_IMAGE_ALIGNMENT);
};
struct bpf_prog_stats {
- u64 cnt;
- u64 nsecs;
- u64 misses;
+ u64_stats_t cnt;
+ u64_stats_t nsecs;
+ u64_stats_t misses;
struct u64_stats_sync syncp;
} __aligned(2 * sizeof(u64));
-struct bpf_prog {
- u16 pages; /* Number of allocated pages */
- u16 jited:1, /* Is our filter JIT'ed? */
- jit_requested:1,/* archs need to JIT the prog */
- gpl_compatible:1, /* Is filter GPL compatible? */
- cb_access:1, /* Is control block accessed? */
- dst_needed:1, /* Do we need dst entry? */
- blinded:1, /* Was blinded */
- is_func:1, /* program is a bpf function */
- kprobe_override:1, /* Do we override a kprobe? */
- has_callchain_buf:1, /* callchain buffer allocated? */
- enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
- call_get_stack:1; /* Do we call bpf_get_stack() or bpf_get_stackid() */
- enum bpf_prog_type type; /* Type of BPF program */
- enum bpf_attach_type expected_attach_type; /* For some prog types */
- u32 len; /* Number of filter blocks */
- u32 jited_len; /* Size of jited insns in bytes */
- u8 tag[BPF_TAG_SIZE];
- struct bpf_prog_stats __percpu *stats;
- int __percpu *active;
- unsigned int (*bpf_func)(const void *ctx,
- const struct bpf_insn *insn);
- struct bpf_prog_aux *aux; /* Auxiliary fields */
- struct sock_fprog_kern *orig_prog; /* Original BPF program */
- /* Instructions for interpreter */
- struct sock_filter insns[0];
- struct bpf_insn insnsi[];
+struct bpf_timed_may_goto {
+ u64 count;
+ u64 timestamp;
};
struct sk_filter {
@@ -584,25 +687,48 @@ struct sk_filter {
DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
-#define __BPF_PROG_RUN(prog, ctx, dfunc) ({ \
- u32 __ret; \
- cant_migrate(); \
- if (static_branch_unlikely(&bpf_stats_enabled_key)) { \
- struct bpf_prog_stats *__stats; \
- u64 __start = sched_clock(); \
- __ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
- __stats = this_cpu_ptr(prog->stats); \
- u64_stats_update_begin(&__stats->syncp); \
- __stats->cnt++; \
- __stats->nsecs += sched_clock() - __start; \
- u64_stats_update_end(&__stats->syncp); \
- } else { \
- __ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
- } \
- __ret; })
-
-#define BPF_PROG_RUN(prog, ctx) \
- __BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func)
+extern struct mutex nf_conn_btf_access_lock;
+extern int (*nfct_btf_struct_access)(struct bpf_verifier_log *log,
+ const struct bpf_reg_state *reg,
+ int off, int size);
+
+typedef unsigned int (*bpf_dispatcher_fn)(const void *ctx,
+ const struct bpf_insn *insnsi,
+ unsigned int (*bpf_func)(const void *,
+ const struct bpf_insn *));
+
+static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog,
+ const void *ctx,
+ bpf_dispatcher_fn dfunc)
+{
+ u32 ret;
+
+ cant_migrate();
+ if (static_branch_unlikely(&bpf_stats_enabled_key)) {
+ struct bpf_prog_stats *stats;
+ u64 duration, start = sched_clock();
+ unsigned long flags;
+
+ ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
+
+ duration = sched_clock() - start;
+ if (likely(prog->stats)) {
+ stats = this_cpu_ptr(prog->stats);
+ flags = u64_stats_update_begin_irqsave(&stats->syncp);
+ u64_stats_inc(&stats->cnt);
+ u64_stats_add(&stats->nsecs, duration);
+ u64_stats_update_end_irqrestore(&stats->syncp, flags);
+ }
+ } else {
+ ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
+ }
+ return ret;
+}
+
+static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void *ctx)
+{
+ return __bpf_prog_run(prog, ctx, bpf_dispatcher_nop_func);
+}
/*
* Use in preemptible and therefore migratable context to make sure that
@@ -611,9 +737,6 @@ DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
* This uses migrate_disable/enable() explicitly to document that the
* invocation of a BPF program does not require reentrancy protection
* against a BPF program which is invoked from a preempting task.
- *
- * For non RT enabled kernels migrate_disable/enable() maps to
- * preempt_disable/enable(), i.e. it disables also preemption.
*/
static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog,
const void *ctx)
@@ -621,7 +744,7 @@ static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog,
u32 ret;
migrate_disable();
- ret = __BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func);
+ ret = bpf_prog_run(prog, ctx);
migrate_enable();
return ret;
}
@@ -642,20 +765,128 @@ struct bpf_nh_params {
};
};
+/* flags for bpf_redirect_info kern_flags */
+#define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */
+#define BPF_RI_F_RI_INIT BIT(1)
+#define BPF_RI_F_CPU_MAP_INIT BIT(2)
+#define BPF_RI_F_DEV_MAP_INIT BIT(3)
+#define BPF_RI_F_XSK_MAP_INIT BIT(4)
+
struct bpf_redirect_info {
- u32 flags;
- u32 tgt_index;
+ u64 tgt_index;
void *tgt_value;
+ struct bpf_map *map;
+ u32 flags;
u32 map_id;
enum bpf_map_type map_type;
- u32 kern_flags;
struct bpf_nh_params nh;
+ u32 kern_flags;
};
-DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
+struct bpf_net_context {
+ struct bpf_redirect_info ri;
+ struct list_head cpu_map_flush_list;
+ struct list_head dev_map_flush_list;
+ struct list_head xskmap_map_flush_list;
+};
-/* flags for bpf_redirect_info kern_flags */
-#define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */
+static inline struct bpf_net_context *bpf_net_ctx_set(struct bpf_net_context *bpf_net_ctx)
+{
+ struct task_struct *tsk = current;
+
+ if (tsk->bpf_net_context != NULL)
+ return NULL;
+ bpf_net_ctx->ri.kern_flags = 0;
+
+ tsk->bpf_net_context = bpf_net_ctx;
+ return bpf_net_ctx;
+}
+
+static inline void bpf_net_ctx_clear(struct bpf_net_context *bpf_net_ctx)
+{
+ if (bpf_net_ctx)
+ current->bpf_net_context = NULL;
+}
+
+static inline struct bpf_net_context *bpf_net_ctx_get(void)
+{
+ return current->bpf_net_context;
+}
+
+static inline struct bpf_redirect_info *bpf_net_ctx_get_ri(void)
+{
+ struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
+
+ if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_RI_INIT)) {
+ memset(&bpf_net_ctx->ri, 0, offsetof(struct bpf_net_context, ri.nh));
+ bpf_net_ctx->ri.kern_flags |= BPF_RI_F_RI_INIT;
+ }
+
+ return &bpf_net_ctx->ri;
+}
+
+static inline struct list_head *bpf_net_ctx_get_cpu_map_flush_list(void)
+{
+ struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
+
+ if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_CPU_MAP_INIT)) {
+ INIT_LIST_HEAD(&bpf_net_ctx->cpu_map_flush_list);
+ bpf_net_ctx->ri.kern_flags |= BPF_RI_F_CPU_MAP_INIT;
+ }
+
+ return &bpf_net_ctx->cpu_map_flush_list;
+}
+
+static inline struct list_head *bpf_net_ctx_get_dev_flush_list(void)
+{
+ struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
+
+ if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_DEV_MAP_INIT)) {
+ INIT_LIST_HEAD(&bpf_net_ctx->dev_map_flush_list);
+ bpf_net_ctx->ri.kern_flags |= BPF_RI_F_DEV_MAP_INIT;
+ }
+
+ return &bpf_net_ctx->dev_map_flush_list;
+}
+
+static inline struct list_head *bpf_net_ctx_get_xskmap_flush_list(void)
+{
+ struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
+
+ if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_XSK_MAP_INIT)) {
+ INIT_LIST_HEAD(&bpf_net_ctx->xskmap_map_flush_list);
+ bpf_net_ctx->ri.kern_flags |= BPF_RI_F_XSK_MAP_INIT;
+ }
+
+ return &bpf_net_ctx->xskmap_map_flush_list;
+}
+
+static inline void bpf_net_ctx_get_all_used_flush_lists(struct list_head **lh_map,
+ struct list_head **lh_dev,
+ struct list_head **lh_xsk)
+{
+ struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
+ u32 kern_flags = bpf_net_ctx->ri.kern_flags;
+ struct list_head *lh;
+
+ *lh_map = *lh_dev = *lh_xsk = NULL;
+
+ if (!IS_ENABLED(CONFIG_BPF_SYSCALL))
+ return;
+
+ lh = &bpf_net_ctx->dev_map_flush_list;
+ if (kern_flags & BPF_RI_F_DEV_MAP_INIT && !list_empty(lh))
+ *lh_dev = lh;
+
+ lh = &bpf_net_ctx->cpu_map_flush_list;
+ if (kern_flags & BPF_RI_F_CPU_MAP_INIT && !list_empty(lh))
+ *lh_map = lh;
+
+ lh = &bpf_net_ctx->xskmap_map_flush_list;
+ if (IS_ENABLED(CONFIG_XDP_SOCKETS) &&
+ kern_flags & BPF_RI_F_XSK_MAP_INIT && !list_empty(lh))
+ *lh_xsk = lh;
+}
/* Compute the linear packet data range [data, data_end) which
* will be accessed by various program types (cls_bpf, act_bpf,
@@ -672,6 +903,26 @@ static inline void bpf_compute_data_pointers(struct sk_buff *skb)
cb->data_end = skb->data + skb_headlen(skb);
}
+static inline int bpf_prog_run_data_pointers(
+ const struct bpf_prog *prog,
+ struct sk_buff *skb)
+{
+ struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
+ void *save_data_meta, *save_data_end;
+ int res;
+
+ save_data_meta = cb->data_meta;
+ save_data_end = cb->data_end;
+
+ bpf_compute_data_pointers(skb);
+ res = bpf_prog_run(prog, skb);
+
+ cb->data_meta = save_data_meta;
+ cb->data_end = save_data_end;
+
+ return res;
+}
+
/* Similar to bpf_compute_data_pointers(), except that save orginal
* data in cb->data and cb->meta_data for restore.
*/
@@ -684,7 +935,7 @@ static inline void bpf_compute_and_save_data_end(
cb->data_end = skb->data + skb_headlen(skb);
}
-/* Restore data saved by bpf_compute_data_pointers(). */
+/* Restore data saved by bpf_compute_and_save_data_end(). */
static inline void bpf_restore_data_end(
struct sk_buff *skb, void *saved_data_end)
{
@@ -693,7 +944,7 @@ static inline void bpf_restore_data_end(
cb->data_end = saved_data_end;
}
-static inline u8 *bpf_skb_cb(struct sk_buff *skb)
+static inline u8 *bpf_skb_cb(const struct sk_buff *skb)
{
/* eBPF programs may read/write skb->cb[] area to transfer meta
* data between tail calls. Since this also needs to work with
@@ -714,8 +965,9 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb)
/* Must be invoked with migration disabled */
static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
- struct sk_buff *skb)
+ const void *ctx)
{
+ const struct sk_buff *skb = ctx;
u8 *cb_data = bpf_skb_cb(skb);
u8 cb_saved[BPF_SKB_CB_LEN];
u32 res;
@@ -725,7 +977,7 @@ static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
memset(cb_data, 0, sizeof(cb_saved));
}
- res = BPF_PROG_RUN(prog, skb);
+ res = bpf_prog_run(prog, skb);
if (unlikely(prog->cb_access))
memcpy(cb_data, cb_saved, sizeof(cb_saved));
@@ -759,17 +1011,9 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
DECLARE_BPF_DISPATCHER(xdp)
-static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
- struct xdp_buff *xdp)
-{
- /* Caller needs to hold rcu_read_lock() (!), otherwise program
- * can be released while still running, or map elements could be
- * freed early while still having concurrent users. XDP fastpath
- * already takes rcu_read_lock() when fetching the program, so
- * it's not necessary here anymore.
- */
- return __BPF_PROG_RUN(prog, xdp, BPF_DISPATCHER_FUNC(xdp));
-}
+DECLARE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key);
+
+u32 xdp_master_redirect(struct xdp_buff *xdp);
void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog);
@@ -778,12 +1022,6 @@ static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
return prog->len * sizeof(struct bpf_insn);
}
-static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog)
-{
- return round_up(bpf_prog_insn_size(prog) +
- sizeof(__be64) + 1, SHA1_BLOCK_SIZE);
-}
-
static inline unsigned int bpf_prog_size(unsigned int proglen)
{
return max(sizeof(struct bpf_prog),
@@ -836,36 +1074,38 @@ bpf_ctx_narrow_access_offset(u32 off, u32 size, u32 size_default)
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
-static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
+static inline int __must_check bpf_prog_lock_ro(struct bpf_prog *fp)
{
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
if (!fp->jited) {
set_vm_flush_reset_perms(fp);
- set_memory_ro((unsigned long)fp, fp->pages);
+ return set_memory_ro((unsigned long)fp, fp->pages);
}
#endif
+ return 0;
}
-static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
+static inline int __must_check
+bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
{
set_vm_flush_reset_perms(hdr);
- set_memory_ro((unsigned long)hdr, hdr->pages);
- set_memory_x((unsigned long)hdr, hdr->pages);
+ return set_memory_rox((unsigned long)hdr, hdr->size >> PAGE_SHIFT);
}
-static inline struct bpf_binary_header *
-bpf_jit_binary_hdr(const struct bpf_prog *fp)
+int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap,
+ enum skb_drop_reason *reason);
+
+static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
{
- unsigned long real_start = (unsigned long)fp->bpf_func;
- unsigned long addr = real_start & PAGE_MASK;
+ enum skb_drop_reason ignore_reason;
- return (void *)addr;
+ return sk_filter_trim_cap(sk, skb, 1, &ignore_reason);
}
-int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
-static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
+static inline int sk_filter_reason(struct sock *sk, struct sk_buff *skb,
+ enum skb_drop_reason *reason)
{
- return sk_filter_trim_cap(sk, skb, 1);
+ return sk_filter_trim_cap(sk, skb, 1, reason);
}
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
@@ -873,7 +1113,6 @@ void bpf_prog_free(struct bpf_prog *fp);
bool bpf_opcode_in_insntable(u8 code);
-void bpf_prog_free_linfo(struct bpf_prog *prog);
void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
const u32 *insn_to_jit_off);
int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog);
@@ -904,8 +1143,7 @@ int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
void sk_reuseport_prog_free(struct bpf_prog *prog);
int sk_detach_filter(struct sock *sk);
-int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
- unsigned int len);
+int sk_get_filter(struct sock *sk, sockptr_t optval, unsigned int len);
bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
@@ -918,8 +1156,22 @@ u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
void bpf_jit_compile(struct bpf_prog *prog);
bool bpf_jit_needs_zext(void);
+bool bpf_jit_inlines_helper_call(s32 imm);
+bool bpf_jit_supports_subprog_tailcalls(void);
+bool bpf_jit_supports_percpu_insn(void);
bool bpf_jit_supports_kfunc_call(void);
-bool bpf_helper_changes_pkt_data(void *func);
+bool bpf_jit_supports_far_kfunc_call(void);
+bool bpf_jit_supports_exceptions(void);
+bool bpf_jit_supports_ptr_xchg(void);
+bool bpf_jit_supports_arena(void);
+bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena);
+bool bpf_jit_supports_private_stack(void);
+bool bpf_jit_supports_timed_may_goto(void);
+u64 bpf_arch_uaddress_limit(void);
+void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie);
+u64 arch_bpf_timed_may_goto(void);
+u64 bpf_check_timed_may_goto(struct bpf_timed_may_goto *);
+bool bpf_helper_changes_pkt_data(enum bpf_func_id func_id);
static inline bool bpf_dump_raw_ok(const struct cred *cred)
{
@@ -933,25 +1185,23 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len);
int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt);
-void bpf_clear_redirect_map(struct bpf_map *map);
-
static inline bool xdp_return_frame_no_direct(void)
{
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT;
}
static inline void xdp_set_return_frame_no_direct(void)
{
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT;
}
static inline void xdp_clear_return_frame_no_direct(void)
{
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT;
}
@@ -978,28 +1228,29 @@ static inline int xdp_ok_fwd_dev(const struct net_device *fwd,
* This does not appear to be a real limitation for existing software.
*/
int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
- struct xdp_buff *xdp, struct bpf_prog *prog);
+ struct xdp_buff *xdp, const struct bpf_prog *prog);
int xdp_do_redirect(struct net_device *dev,
struct xdp_buff *xdp,
- struct bpf_prog *prog);
+ const struct bpf_prog *prog);
+int xdp_do_redirect_frame(struct net_device *dev,
+ struct xdp_buff *xdp,
+ struct xdp_frame *xdpf,
+ const struct bpf_prog *prog);
void xdp_do_flush(void);
-/* The xdp_do_flush_map() helper has been renamed to drop the _map suffix, as
- * it is no longer only flushing maps. Keep this define for compatibility
- * until all drivers are updated - do not use xdp_do_flush_map() in new code!
- */
-#define xdp_do_flush_map xdp_do_flush
-
-void bpf_warn_invalid_xdp_action(u32 act);
+void bpf_warn_invalid_xdp_action(const struct net_device *dev,
+ const struct bpf_prog *prog, u32 act);
#ifdef CONFIG_INET
struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
struct bpf_prog *prog, struct sk_buff *skb,
+ struct sock *migrating_sk,
u32 hash);
#else
static inline struct sock *
bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
struct bpf_prog *prog, struct sk_buff *skb,
+ struct sock *migrating_sk,
u32 hash)
{
return NULL;
@@ -1011,9 +1262,12 @@ extern int bpf_jit_enable;
extern int bpf_jit_harden;
extern int bpf_jit_kallsyms;
extern long bpf_jit_limit;
+extern long bpf_jit_limit_max;
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
+void bpf_jit_fill_hole_with_zero(void *area, unsigned int size);
+
struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
unsigned int alignment,
@@ -1023,6 +1277,28 @@ u64 bpf_jit_alloc_exec_limit(void);
void *bpf_jit_alloc_exec(unsigned long size);
void bpf_jit_free_exec(void *addr);
void bpf_jit_free(struct bpf_prog *fp);
+struct bpf_binary_header *
+bpf_jit_binary_pack_hdr(const struct bpf_prog *fp);
+
+void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns);
+void bpf_prog_pack_free(void *ptr, u32 size);
+
+static inline bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
+{
+ return list_empty(&fp->aux->ksym.lnode) ||
+ fp->aux->ksym.lnode.prev == LIST_POISON2;
+}
+
+struct bpf_binary_header *
+bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **ro_image,
+ unsigned int alignment,
+ struct bpf_binary_header **rw_hdr,
+ u8 **rw_image,
+ bpf_jit_fill_hole_t bpf_fill_ill_insns);
+int bpf_jit_binary_pack_finalize(struct bpf_binary_header *ro_header,
+ struct bpf_binary_header *rw_header);
+void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
+ struct bpf_binary_header *rw_header);
int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
struct bpf_jit_poke_descriptor *poke);
@@ -1031,13 +1307,15 @@ int bpf_jit_get_func_addr(const struct bpf_prog *prog,
const struct bpf_insn *insn, bool extra_pass,
u64 *func_addr, bool *func_addr_fixed);
+const char *bpf_jit_get_prog_name(struct bpf_prog *prog);
+
struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);
static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
u32 pass, void *image)
{
- pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen,
+ pr_err("flen=%u proglen=%u pass=%u image=%p from=%s pid=%d\n", flen,
proglen, pass, image, current->comm, task_pid_nr(current));
if (image)
@@ -1076,7 +1354,7 @@ static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
return false;
if (!bpf_jit_harden)
return false;
- if (bpf_jit_harden == 1 && capable(CAP_SYS_ADMIN))
+ if (bpf_jit_harden == 1 && bpf_token_capable(prog->aux->token, CAP_BPF))
return false;
return true;
@@ -1097,17 +1375,18 @@ static inline bool bpf_jit_kallsyms_enabled(void)
return false;
}
-const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
+int __bpf_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char *sym);
bool is_bpf_text_address(unsigned long addr);
int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
char *sym);
+struct bpf_prog *bpf_prog_ksym_find(unsigned long addr);
-static inline const char *
+static inline int
bpf_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char **modname, char *sym)
{
- const char *ret = __bpf_address_lookup(addr, size, off, sym);
+ int ret = __bpf_address_lookup(addr, size, off, sym);
if (ret && modname)
*modname = NULL;
@@ -1151,11 +1430,11 @@ static inline bool bpf_jit_kallsyms_enabled(void)
return false;
}
-static inline const char *
+static inline int
__bpf_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char *sym)
{
- return NULL;
+ return 0;
}
static inline bool is_bpf_text_address(unsigned long addr)
@@ -1169,11 +1448,16 @@ static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value,
return -ERANGE;
}
-static inline const char *
+static inline struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
+{
+ return NULL;
+}
+
+static inline int
bpf_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char **modname, char *sym)
{
- return NULL;
+ return 0;
}
static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp)
@@ -1253,13 +1537,14 @@ static inline int bpf_tell_extensions(void)
struct bpf_sock_addr_kern {
struct sock *sk;
- struct sockaddr *uaddr;
+ struct sockaddr_unsized *uaddr;
/* Temporary "register" to make indirect stores to nested structures
* defined above. We need three registers to make such a store, but
* only two (src and dst) are available at convert_ctx_access time
*/
u64 tmp_reg;
void *t_ctx; /* Attach type specific context. */
+ u32 uaddrlen;
};
struct bpf_sock_ops_kern {
@@ -1274,6 +1559,7 @@ struct bpf_sock_ops_kern {
void *skb_data_end;
u8 op;
u8 is_fullsock;
+ u8 is_locked_tcp_sock;
u8 remaining_opt_len;
u64 temp; /* temp and everything after is not
* initialized to 0 before calling
@@ -1288,7 +1574,7 @@ struct bpf_sock_ops_kern {
struct bpf_sysctl_kern {
struct ctl_table_header *head;
- struct ctl_table *table;
+ const struct ctl_table *table;
void *cur_val;
size_t cur_len;
void *new_val;
@@ -1312,7 +1598,10 @@ struct bpf_sockopt_kern {
s32 level;
s32 optname;
s32 optlen;
- s32 retval;
+ /* for retval in struct bpf_cg_run_ctx */
+ struct task_struct *current_task;
+ /* Temporary "register" for indirect stores to ppos. */
+ u64 tmp_reg;
};
int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len);
@@ -1331,6 +1620,7 @@ struct bpf_sk_lookup_kern {
const struct in6_addr *daddr;
} v6;
struct sock *selected_sk;
+ u32 ingress_ifindex;
bool no_reuseport;
};
@@ -1390,10 +1680,10 @@ extern struct static_key_false bpf_sk_lookup_enabled;
_all_pass || _selected_sk ? SK_PASS : SK_DROP; \
})
-static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol,
+static inline bool bpf_sk_lookup_run_v4(const struct net *net, int protocol,
const __be32 saddr, const __be16 sport,
const __be32 daddr, const u16 dport,
- struct sock **psk)
+ const int ifindex, struct sock **psk)
{
struct bpf_prog_array *run_array;
struct sock *selected_sk = NULL;
@@ -1409,10 +1699,11 @@ static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol,
.v4.daddr = daddr,
.sport = sport,
.dport = dport,
+ .ingress_ifindex = ifindex,
};
u32 act;
- act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, BPF_PROG_RUN);
+ act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run);
if (act == SK_PASS) {
selected_sk = ctx.selected_sk;
no_reuseport = ctx.no_reuseport;
@@ -1426,12 +1717,12 @@ static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol,
}
#if IS_ENABLED(CONFIG_IPV6)
-static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol,
+static inline bool bpf_sk_lookup_run_v6(const struct net *net, int protocol,
const struct in6_addr *saddr,
const __be16 sport,
const struct in6_addr *daddr,
const u16 dport,
- struct sock **psk)
+ const int ifindex, struct sock **psk)
{
struct bpf_prog_array *run_array;
struct sock *selected_sk = NULL;
@@ -1447,10 +1738,11 @@ static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol,
.v6.daddr = daddr,
.sport = sport,
.dport = dport,
+ .ingress_ifindex = ifindex,
};
u32 act;
- act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, BPF_PROG_RUN);
+ act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run);
if (act == SK_PASS) {
selected_sk = ctx.selected_sk;
no_reuseport = ctx.no_reuseport;
@@ -1464,17 +1756,19 @@ static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol,
}
#endif /* IS_ENABLED(CONFIG_IPV6) */
-static __always_inline int __bpf_xdp_redirect_map(struct bpf_map *map, u32 ifindex, u64 flags,
- void *lookup_elem(struct bpf_map *map, u32 key))
+static __always_inline long __bpf_xdp_redirect_map(struct bpf_map *map, u64 index,
+ u64 flags, const u64 flag_mask,
+ void *lookup_elem(struct bpf_map *map, u32 key))
{
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
+ const u64 action_mask = XDP_ABORTED | XDP_DROP | XDP_PASS | XDP_TX;
/* Lower bits of the flags are used as return code on lookup failure */
- if (unlikely(flags > XDP_TX))
+ if (unlikely(flags & ~(action_mask | flag_mask)))
return XDP_ABORTED;
- ri->tgt_value = lookup_elem(map, ifindex);
- if (unlikely(!ri->tgt_value)) {
+ ri->tgt_value = lookup_elem(map, index);
+ if (unlikely(!ri->tgt_value) && !(flags & BPF_F_BROADCAST)) {
/* If the lookup fails we want to clear out the state in the
* redirect_info struct completely, so that if an eBPF program
* performs multiple lookups, the last one always takes
@@ -1482,14 +1776,82 @@ static __always_inline int __bpf_xdp_redirect_map(struct bpf_map *map, u32 ifind
*/
ri->map_id = INT_MAX; /* Valid map id idr range: [1,INT_MAX[ */
ri->map_type = BPF_MAP_TYPE_UNSPEC;
- return flags;
+ return flags & action_mask;
}
- ri->tgt_index = ifindex;
+ ri->tgt_index = index;
ri->map_id = map->id;
ri->map_type = map->map_type;
+ if (flags & BPF_F_BROADCAST) {
+ WRITE_ONCE(ri->map, map);
+ ri->flags = flags;
+ } else {
+ WRITE_ONCE(ri->map, NULL);
+ ri->flags = 0;
+ }
+
return XDP_REDIRECT;
}
+#ifdef CONFIG_NET
+int __bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, void *to, u32 len);
+int __bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from,
+ u32 len, u64 flags);
+int __bpf_xdp_load_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len);
+int __bpf_xdp_store_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len);
+void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len);
+void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off,
+ void *buf, unsigned long len, bool flush);
+int __bpf_skb_meta_store_bytes(struct sk_buff *skb, u32 offset,
+ const void *from, u32 len, u64 flags);
+void *bpf_skb_meta_pointer(struct sk_buff *skb, u32 offset);
+#else /* CONFIG_NET */
+static inline int __bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset,
+ void *to, u32 len)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __bpf_skb_store_bytes(struct sk_buff *skb, u32 offset,
+ const void *from, u32 len, u64 flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __bpf_xdp_load_bytes(struct xdp_buff *xdp, u32 offset,
+ void *buf, u32 len)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int __bpf_xdp_store_bytes(struct xdp_buff *xdp, u32 offset,
+ void *buf, u32 len)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len)
+{
+ return NULL;
+}
+
+static inline void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off, void *buf,
+ unsigned long len, bool flush)
+{
+}
+
+static inline int __bpf_skb_meta_store_bytes(struct sk_buff *skb, u32 offset,
+ const void *from, u32 len,
+ u64 flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void *bpf_skb_meta_pointer(struct sk_buff *skb, u32 offset)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+#endif /* CONFIG_NET */
+
#endif /* __LINUX_FILTER_H__ */
diff --git a/include/linux/find.h b/include/linux/find.h
new file mode 100644
index 000000000000..9d720ad92bc1
--- /dev/null
+++ b/include/linux/find.h
@@ -0,0 +1,697 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_FIND_H_
+#define __LINUX_FIND_H_
+
+#ifndef __LINUX_BITMAP_H
+#error only <linux/bitmap.h> can be included directly
+#endif
+
+#include <linux/bitops.h>
+
+unsigned long _find_next_bit(const unsigned long *addr1, unsigned long nbits,
+ unsigned long start);
+unsigned long _find_next_and_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long nbits, unsigned long start);
+unsigned long _find_next_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long nbits, unsigned long start);
+unsigned long _find_next_or_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long nbits, unsigned long start);
+unsigned long _find_next_zero_bit(const unsigned long *addr, unsigned long nbits,
+ unsigned long start);
+extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long size);
+unsigned long __find_nth_bit(const unsigned long *addr, unsigned long size, unsigned long n);
+unsigned long __find_nth_and_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long size, unsigned long n);
+unsigned long __find_nth_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long size, unsigned long n);
+unsigned long __find_nth_and_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
+ const unsigned long *addr3, unsigned long size,
+ unsigned long n);
+extern unsigned long _find_first_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long size);
+unsigned long _find_first_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long size);
+unsigned long _find_first_and_and_bit(const unsigned long *addr1, const unsigned long *addr2,
+ const unsigned long *addr3, unsigned long size);
+extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size);
+extern unsigned long _find_last_bit(const unsigned long *addr, unsigned long size);
+
+#ifdef __BIG_ENDIAN
+unsigned long _find_first_zero_bit_le(const unsigned long *addr, unsigned long size);
+unsigned long _find_next_zero_bit_le(const unsigned long *addr, unsigned
+ long size, unsigned long offset);
+unsigned long _find_next_bit_le(const unsigned long *addr, unsigned
+ long size, unsigned long offset);
+#endif
+
+unsigned long find_random_bit(const unsigned long *addr, unsigned long size);
+
+#ifndef find_next_bit
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+static __always_inline
+unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = *addr & GENMASK(size - 1, offset);
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_next_bit(addr, size, offset);
+}
+#endif
+
+#ifndef find_next_and_bit
+/**
+ * find_next_and_bit - find the next set bit in both memory regions
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+static __always_inline
+unsigned long find_next_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long size,
+ unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = *addr1 & *addr2 & GENMASK(size - 1, offset);
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_next_and_bit(addr1, addr2, size, offset);
+}
+#endif
+
+#ifndef find_next_andnot_bit
+/**
+ * find_next_andnot_bit - find the next set bit in *addr1 excluding all the bits
+ * in *addr2
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+static __always_inline
+unsigned long find_next_andnot_bit(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long size,
+ unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = *addr1 & ~*addr2 & GENMASK(size - 1, offset);
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_next_andnot_bit(addr1, addr2, size, offset);
+}
+#endif
+
+#ifndef find_next_or_bit
+/**
+ * find_next_or_bit - find the next set bit in either memory regions
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+static __always_inline
+unsigned long find_next_or_bit(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long size,
+ unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = (*addr1 | *addr2) & GENMASK(size - 1, offset);
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_next_or_bit(addr1, addr2, size, offset);
+}
+#endif
+
+#ifndef find_next_zero_bit
+/**
+ * find_next_zero_bit - find the next cleared bit in a memory region
+ * @addr: The address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number of the next zero bit
+ * If no bits are zero, returns @size.
+ */
+static __always_inline
+unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = *addr | ~GENMASK(size - 1, offset);
+ return val == ~0UL ? size : ffz(val);
+ }
+
+ return _find_next_zero_bit(addr, size, offset);
+}
+#endif
+
+#ifndef find_first_bit
+/**
+ * find_first_bit - find the first set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum number of bits to search
+ *
+ * Returns the bit number of the first set bit.
+ * If no bits are set, returns @size.
+ */
+static __always_inline
+unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr & GENMASK(size - 1, 0);
+
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_first_bit(addr, size);
+}
+#endif
+
+/**
+ * find_nth_bit - find N'th set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum number of bits to search
+ * @n: The number of set bit, which position is needed, counting from 0
+ *
+ * The following is semantically equivalent:
+ * idx = find_nth_bit(addr, size, 0);
+ * idx = find_first_bit(addr, size);
+ *
+ * Returns the bit number of the N'th set bit.
+ * If no such, returns >= @size.
+ */
+static __always_inline
+unsigned long find_nth_bit(const unsigned long *addr, unsigned long size, unsigned long n)
+{
+ if (n >= size)
+ return size;
+
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr & GENMASK(size - 1, 0);
+
+ return val ? fns(val, n) : size;
+ }
+
+ return __find_nth_bit(addr, size, n);
+}
+
+/**
+ * find_nth_and_bit - find N'th set bit in 2 memory regions
+ * @addr1: The 1st address to start the search at
+ * @addr2: The 2nd address to start the search at
+ * @size: The maximum number of bits to search
+ * @n: The number of set bit, which position is needed, counting from 0
+ *
+ * Returns the bit number of the N'th set bit.
+ * If no such, returns @size.
+ */
+static __always_inline
+unsigned long find_nth_and_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long size, unsigned long n)
+{
+ if (n >= size)
+ return size;
+
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr1 & *addr2 & GENMASK(size - 1, 0);
+
+ return val ? fns(val, n) : size;
+ }
+
+ return __find_nth_and_bit(addr1, addr2, size, n);
+}
+
+/**
+ * find_nth_and_andnot_bit - find N'th set bit in 2 memory regions,
+ * excluding those set in 3rd region
+ * @addr1: The 1st address to start the search at
+ * @addr2: The 2nd address to start the search at
+ * @addr3: The 3rd address to start the search at
+ * @size: The maximum number of bits to search
+ * @n: The number of set bit, which position is needed, counting from 0
+ *
+ * Returns the bit number of the N'th set bit.
+ * If no such, returns @size.
+ */
+static __always_inline
+unsigned long find_nth_and_andnot_bit(const unsigned long *addr1,
+ const unsigned long *addr2,
+ const unsigned long *addr3,
+ unsigned long size, unsigned long n)
+{
+ if (n >= size)
+ return size;
+
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr1 & *addr2 & (~*addr3) & GENMASK(size - 1, 0);
+
+ return val ? fns(val, n) : size;
+ }
+
+ return __find_nth_and_andnot_bit(addr1, addr2, addr3, size, n);
+}
+
+#ifndef find_first_and_bit
+/**
+ * find_first_and_bit - find the first set bit in both memory regions
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+static __always_inline
+unsigned long find_first_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2,
+ unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr1 & *addr2 & GENMASK(size - 1, 0);
+
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_first_and_bit(addr1, addr2, size);
+}
+#endif
+
+/**
+ * find_first_andnot_bit - find the first bit set in 1st memory region and unset in 2nd
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ *
+ * Returns the bit number for the first set bit
+ * If no bits are set, returns >= @size.
+ */
+static __always_inline
+unsigned long find_first_andnot_bit(const unsigned long *addr1,
+ const unsigned long *addr2,
+ unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr1 & (~*addr2) & GENMASK(size - 1, 0);
+
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_first_andnot_bit(addr1, addr2, size);
+}
+
+/**
+ * find_first_and_and_bit - find the first set bit in 3 memory regions
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @addr3: The third address to base the search on
+ * @size: The bitmap size in bits
+ *
+ * Returns the bit number for the first set bit
+ * If no bits are set, returns @size.
+ */
+static __always_inline
+unsigned long find_first_and_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2,
+ const unsigned long *addr3,
+ unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr1 & *addr2 & *addr3 & GENMASK(size - 1, 0);
+
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_first_and_and_bit(addr1, addr2, addr3, size);
+}
+
+#ifndef find_first_zero_bit
+/**
+ * find_first_zero_bit - find the first cleared bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum number of bits to search
+ *
+ * Returns the bit number of the first cleared bit.
+ * If no bits are zero, returns @size.
+ */
+static __always_inline
+unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr | ~GENMASK(size - 1, 0);
+
+ return val == ~0UL ? size : ffz(val);
+ }
+
+ return _find_first_zero_bit(addr, size);
+}
+#endif
+
+#ifndef find_last_bit
+/**
+ * find_last_bit - find the last set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The number of bits to search
+ *
+ * Returns the bit number of the last set bit, or size.
+ */
+static __always_inline
+unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr & GENMASK(size - 1, 0);
+
+ return val ? __fls(val) : size;
+ }
+
+ return _find_last_bit(addr, size);
+}
+#endif
+
+/**
+ * find_next_and_bit_wrap - find the next set bit in both memory regions
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit, or first set bit up to @offset
+ * If no bits are set, returns @size.
+ */
+static __always_inline
+unsigned long find_next_and_bit_wrap(const unsigned long *addr1,
+ const unsigned long *addr2,
+ unsigned long size, unsigned long offset)
+{
+ unsigned long bit = find_next_and_bit(addr1, addr2, size, offset);
+
+ if (bit < size || offset == 0)
+ return bit;
+
+ bit = find_first_and_bit(addr1, addr2, offset);
+ return bit < offset ? bit : size;
+}
+
+/**
+ * find_next_bit_wrap - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit, or first set bit up to @offset
+ * If no bits are set, returns @size.
+ */
+static __always_inline
+unsigned long find_next_bit_wrap(const unsigned long *addr,
+ unsigned long size, unsigned long offset)
+{
+ unsigned long bit = find_next_bit(addr, size, offset);
+
+ if (bit < size || offset == 0)
+ return bit;
+
+ bit = find_first_bit(addr, offset);
+ return bit < offset ? bit : size;
+}
+
+/*
+ * Helper for for_each_set_bit_wrap(). Make sure you're doing right thing
+ * before using it alone.
+ */
+static __always_inline
+unsigned long __for_each_wrap(const unsigned long *bitmap, unsigned long size,
+ unsigned long start, unsigned long n)
+{
+ unsigned long bit;
+
+ /* If not wrapped around */
+ if (n > start) {
+ /* and have a bit, just return it. */
+ bit = find_next_bit(bitmap, size, n);
+ if (bit < size)
+ return bit;
+
+ /* Otherwise, wrap around and ... */
+ n = 0;
+ }
+
+ /* Search the other part. */
+ bit = find_next_bit(bitmap, start, n);
+ return bit < start ? bit : size;
+}
+
+/**
+ * find_next_clump8 - find next 8-bit clump with set bits in a memory region
+ * @clump: location to store copy of found clump
+ * @addr: address to base the search on
+ * @size: bitmap size in number of bits
+ * @offset: bit offset at which to start searching
+ *
+ * Returns the bit offset for the next set clump; the found clump value is
+ * copied to the location pointed by @clump. If no bits are set, returns @size.
+ */
+extern unsigned long find_next_clump8(unsigned long *clump,
+ const unsigned long *addr,
+ unsigned long size, unsigned long offset);
+
+#define find_first_clump8(clump, bits, size) \
+ find_next_clump8((clump), (bits), (size), 0)
+
+#if defined(__LITTLE_ENDIAN)
+
+static __always_inline
+unsigned long find_next_zero_bit_le(const void *addr, unsigned long size, unsigned long offset)
+{
+ return find_next_zero_bit(addr, size, offset);
+}
+
+static __always_inline
+unsigned long find_next_bit_le(const void *addr, unsigned long size, unsigned long offset)
+{
+ return find_next_bit(addr, size, offset);
+}
+
+static __always_inline
+unsigned long find_first_zero_bit_le(const void *addr, unsigned long size)
+{
+ return find_first_zero_bit(addr, size);
+}
+
+#elif defined(__BIG_ENDIAN)
+
+#ifndef find_next_zero_bit_le
+static __always_inline
+unsigned long find_next_zero_bit_le(const void *addr, unsigned
+ long size, unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *(const unsigned long *)addr;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = swab(val) | ~GENMASK(size - 1, offset);
+ return val == ~0UL ? size : ffz(val);
+ }
+
+ return _find_next_zero_bit_le(addr, size, offset);
+}
+#endif
+
+#ifndef find_first_zero_bit_le
+static __always_inline
+unsigned long find_first_zero_bit_le(const void *addr, unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = swab(*(const unsigned long *)addr) | ~GENMASK(size - 1, 0);
+
+ return val == ~0UL ? size : ffz(val);
+ }
+
+ return _find_first_zero_bit_le(addr, size);
+}
+#endif
+
+#ifndef find_next_bit_le
+static __always_inline
+unsigned long find_next_bit_le(const void *addr, unsigned
+ long size, unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *(const unsigned long *)addr;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = swab(val) & GENMASK(size - 1, offset);
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_next_bit_le(addr, size, offset);
+}
+#endif
+
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+
+#define for_each_set_bit(bit, addr, size) \
+ for ((bit) = 0; (bit) = find_next_bit((addr), (size), (bit)), (bit) < (size); (bit)++)
+
+#define for_each_and_bit(bit, addr1, addr2, size) \
+ for ((bit) = 0; \
+ (bit) = find_next_and_bit((addr1), (addr2), (size), (bit)), (bit) < (size);\
+ (bit)++)
+
+#define for_each_andnot_bit(bit, addr1, addr2, size) \
+ for ((bit) = 0; \
+ (bit) = find_next_andnot_bit((addr1), (addr2), (size), (bit)), (bit) < (size);\
+ (bit)++)
+
+#define for_each_or_bit(bit, addr1, addr2, size) \
+ for ((bit) = 0; \
+ (bit) = find_next_or_bit((addr1), (addr2), (size), (bit)), (bit) < (size);\
+ (bit)++)
+
+/* same as for_each_set_bit() but use bit as value to start with */
+#define for_each_set_bit_from(bit, addr, size) \
+ for (; (bit) = find_next_bit((addr), (size), (bit)), (bit) < (size); (bit)++)
+
+#define for_each_clear_bit(bit, addr, size) \
+ for ((bit) = 0; \
+ (bit) = find_next_zero_bit((addr), (size), (bit)), (bit) < (size); \
+ (bit)++)
+
+/* same as for_each_clear_bit() but use bit as value to start with */
+#define for_each_clear_bit_from(bit, addr, size) \
+ for (; (bit) = find_next_zero_bit((addr), (size), (bit)), (bit) < (size); (bit)++)
+
+/**
+ * for_each_set_bitrange - iterate over all set bit ranges [b; e)
+ * @b: bit offset of start of current bitrange (first set bit)
+ * @e: bit offset of end of current bitrange (first unset bit)
+ * @addr: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ */
+#define for_each_set_bitrange(b, e, addr, size) \
+ for ((b) = 0; \
+ (b) = find_next_bit((addr), (size), b), \
+ (e) = find_next_zero_bit((addr), (size), (b) + 1), \
+ (b) < (size); \
+ (b) = (e) + 1)
+
+/**
+ * for_each_set_bitrange_from - iterate over all set bit ranges [b; e)
+ * @b: bit offset of start of current bitrange (first set bit); must be initialized
+ * @e: bit offset of end of current bitrange (first unset bit)
+ * @addr: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ */
+#define for_each_set_bitrange_from(b, e, addr, size) \
+ for (; \
+ (b) = find_next_bit((addr), (size), (b)), \
+ (e) = find_next_zero_bit((addr), (size), (b) + 1), \
+ (b) < (size); \
+ (b) = (e) + 1)
+
+/**
+ * for_each_clear_bitrange - iterate over all unset bit ranges [b; e)
+ * @b: bit offset of start of current bitrange (first unset bit)
+ * @e: bit offset of end of current bitrange (first set bit)
+ * @addr: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ */
+#define for_each_clear_bitrange(b, e, addr, size) \
+ for ((b) = 0; \
+ (b) = find_next_zero_bit((addr), (size), (b)), \
+ (e) = find_next_bit((addr), (size), (b) + 1), \
+ (b) < (size); \
+ (b) = (e) + 1)
+
+/**
+ * for_each_clear_bitrange_from - iterate over all unset bit ranges [b; e)
+ * @b: bit offset of start of current bitrange (first set bit); must be initialized
+ * @e: bit offset of end of current bitrange (first unset bit)
+ * @addr: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ */
+#define for_each_clear_bitrange_from(b, e, addr, size) \
+ for (; \
+ (b) = find_next_zero_bit((addr), (size), (b)), \
+ (e) = find_next_bit((addr), (size), (b) + 1), \
+ (b) < (size); \
+ (b) = (e) + 1)
+
+/**
+ * for_each_set_bit_wrap - iterate over all set bits starting from @start, and
+ * wrapping around the end of bitmap.
+ * @bit: offset for current iteration
+ * @addr: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ * @start: Starting bit for bitmap traversing, wrapping around the bitmap end
+ */
+#define for_each_set_bit_wrap(bit, addr, size, start) \
+ for ((bit) = find_next_bit_wrap((addr), (size), (start)); \
+ (bit) < (size); \
+ (bit) = __for_each_wrap((addr), (size), (start), (bit) + 1))
+
+/**
+ * for_each_set_clump8 - iterate over bitmap for each 8-bit clump with set bits
+ * @start: bit offset to start search and to store the current iteration offset
+ * @clump: location to store copy of current 8-bit clump
+ * @bits: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ */
+#define for_each_set_clump8(start, clump, bits, size) \
+ for ((start) = find_first_clump8(&(clump), (bits), (size)); \
+ (start) < (size); \
+ (start) = find_next_clump8(&(clump), (bits), (size), (start) + 8))
+
+#endif /*__LINUX_FIND_H_ */
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index aec8f30ab200..6143b7d28eac 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -75,7 +75,7 @@ void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p);
int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value);
int fw_csr_string(const u32 *directory, int key, char *buf, size_t size);
-extern struct bus_type fw_bus_type;
+extern const struct bus_type fw_bus_type;
struct fw_card_driver;
struct fw_node;
@@ -88,23 +88,30 @@ struct fw_card {
int node_id;
int generation;
- int current_tlabel;
- u64 tlabel_mask;
- struct list_head transaction_list;
u64 reset_jiffies;
- u32 split_timeout_hi;
- u32 split_timeout_lo;
- unsigned int split_timeout_cycles;
- unsigned int split_timeout_jiffies;
+ struct {
+ int current_tlabel;
+ u64 tlabel_mask;
+ struct list_head list;
+ spinlock_t lock;
+ } transactions;
+
+ struct {
+ u32 hi;
+ u32 lo;
+ unsigned int cycles;
+ unsigned int jiffies;
+ spinlock_t lock;
+ } split_timeout;
unsigned long long guid;
unsigned max_receive;
int link_speed;
int config_rom_generation;
- spinlock_t lock; /* Take this lock when handling the lists in
- * this struct. */
+ spinlock_t lock;
+
struct fw_node *local_node;
struct fw_node *root_node;
struct fw_node *irm_node;
@@ -115,8 +122,6 @@ struct fw_card {
int index;
struct list_head link;
- struct list_head phy_receiver_list;
-
struct delayed_work br_work; /* bus reset job */
bool br_short;
@@ -131,9 +136,16 @@ struct fw_card {
bool broadcast_channel_allocated;
u32 broadcast_channel;
- __be32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
+
+ struct {
+ __be32 buffer[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
+ spinlock_t lock;
+ } topology_map;
__be32 maint_utility_register;
+
+ struct workqueue_struct *isoc_wq;
+ struct workqueue_struct *async_wq;
};
static inline struct fw_card *fw_card_get(struct fw_card *card)
@@ -150,12 +162,28 @@ static inline void fw_card_put(struct fw_card *card)
kref_put(&card->kref, fw_card_release);
}
+int fw_card_read_cycle_time(struct fw_card *card, u32 *cycle_time);
+
struct fw_attribute_group {
struct attribute_group *groups[2];
struct attribute_group group;
struct attribute *attrs[13];
};
+enum fw_device_quirk {
+ // See afa1282a35d3 ("firewire: core: check for 1394a compliant IRM, fix inaccessibility of Sony camcorder").
+ FW_DEVICE_QUIRK_IRM_IS_1394_1995_ONLY = BIT(0),
+
+ // See a509e43ff338 ("firewire: core: fix unstable I/O with Canon camcorder").
+ FW_DEVICE_QUIRK_IRM_IGNORES_BUS_MANAGER = BIT(1),
+
+ // MOTU Audio Express transfers acknowledge packet with 0x10 for pending state.
+ FW_DEVICE_QUIRK_ACK_PACKET_WITH_INVALID_PENDING_CODE = BIT(2),
+
+ // TASCAM FW-1082/FW-1804/FW-1884 often freezes when receiving S400 packets.
+ FW_DEVICE_QUIRK_UNSTABLE_AT_S400 = BIT(3),
+};
+
enum fw_device_state {
FW_DEVICE_INITIALIZING,
FW_DEVICE_RUNNING,
@@ -189,6 +217,9 @@ struct fw_device {
struct fw_card *card;
struct device device;
+ // A set of enum fw_device_quirk.
+ int quirks;
+
struct mutex client_list_mutex;
struct list_head client_list;
@@ -206,10 +237,7 @@ struct fw_device {
struct fw_attribute_group attribute_group;
};
-static inline struct fw_device *fw_device(struct device *dev)
-{
- return container_of(dev, struct fw_device, device);
-}
+#define fw_device(dev) container_of_const(dev, struct fw_device, device)
static inline int fw_device_is_shutdown(struct fw_device *device)
{
@@ -227,10 +255,7 @@ struct fw_unit {
struct fw_attribute_group attribute_group;
};
-static inline struct fw_unit *fw_unit(struct device *dev)
-{
- return container_of(dev, struct fw_unit, device);
-}
+#define fw_unit(dev) container_of_const(dev, struct fw_unit, device)
static inline struct fw_unit *fw_unit_get(struct fw_unit *unit)
{
@@ -244,10 +269,7 @@ static inline void fw_unit_put(struct fw_unit *unit)
put_device(&unit->device);
}
-static inline struct fw_device *fw_parent_device(struct fw_unit *unit)
-{
- return fw_device(unit->device.parent);
-}
+#define fw_parent_device(unit) fw_device(unit->device.parent)
struct ieee1394_device_id;
@@ -268,6 +290,15 @@ typedef void (*fw_packet_callback_t)(struct fw_packet *packet,
typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
void *data, size_t length,
void *callback_data);
+typedef void (*fw_transaction_callback_with_tstamp_t)(struct fw_card *card, int rcode,
+ u32 request_tstamp, u32 response_tstamp, void *data,
+ size_t length, void *callback_data);
+
+union fw_transaction_callback {
+ fw_transaction_callback_t without_tstamp;
+ fw_transaction_callback_with_tstamp_t with_tstamp;
+};
+
/*
* This callback handles an inbound request subaction. It is called in
* RCU read-side context, therefore must not sleep.
@@ -276,9 +307,8 @@ typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
* Otherwise there is a danger of recursion of inbound and outbound
* transactions from and to the local node.
*
- * The callback is responsible that either fw_send_response() or kfree()
- * is called on the @request, except for FCP registers for which the core
- * takes care of that.
+ * The callback is responsible that fw_send_response() is called on the @request, except for FCP
+ * registers for which the core takes care of that.
*/
typedef void (*fw_address_callback_t)(struct fw_card *card,
struct fw_request *request,
@@ -304,8 +334,7 @@ struct fw_packet {
* For successful transmission, the status code is the ack received
* from the destination. Otherwise it is one of the juju-specific
* rcodes: RCODE_SEND_ERROR, _CANCELLED, _BUSY, _GENERATION, _NO_ACK.
- * The callback can be called from tasklet context and thus
- * must never block.
+ * The callback can be called from workqueue and thus must never block.
*/
fw_packet_callback_t callback;
int ack;
@@ -320,6 +349,7 @@ struct fw_transaction {
struct fw_card *card;
bool is_split_transaction;
struct timer_list split_timeout_timer;
+ u32 split_timeout_cycle;
struct fw_packet packet;
@@ -327,7 +357,8 @@ struct fw_transaction {
* The data passed to the callback is valid only during the
* callback.
*/
- fw_transaction_callback_t callback;
+ union fw_transaction_callback callback;
+ bool with_tstamp;
void *callback_data;
};
@@ -336,7 +367,11 @@ struct fw_address_handler {
u64 length;
fw_address_callback_t address_callback;
void *callback_data;
+
+ // Only for core functions.
struct list_head link;
+ struct kref kref;
+ struct completion done;
};
struct fw_address_region {
@@ -352,10 +387,80 @@ void fw_core_remove_address_handler(struct fw_address_handler *handler);
void fw_send_response(struct fw_card *card,
struct fw_request *request, int rcode);
int fw_get_request_speed(struct fw_request *request);
-void fw_send_request(struct fw_card *card, struct fw_transaction *t,
- int tcode, int destination_id, int generation, int speed,
- unsigned long long offset, void *payload, size_t length,
- fw_transaction_callback_t callback, void *callback_data);
+u32 fw_request_get_timestamp(const struct fw_request *request);
+
+void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
+ int destination_id, int generation, int speed, unsigned long long offset,
+ void *payload, size_t length, union fw_transaction_callback callback,
+ bool with_tstamp, void *callback_data);
+
+/**
+ * fw_send_request() - submit a request packet for transmission to generate callback for response
+ * subaction without time stamp.
+ * @card: interface to send the request at
+ * @t: transaction instance to which the request belongs
+ * @tcode: transaction code
+ * @destination_id: destination node ID, consisting of bus_ID and phy_ID
+ * @generation: bus generation in which request and response are valid
+ * @speed: transmission speed
+ * @offset: 48bit wide offset into destination's address space
+ * @payload: data payload for the request subaction
+ * @length: length of the payload, in bytes
+ * @callback: function to be called when the transaction is completed
+ * @callback_data: data to be passed to the transaction completion callback
+ *
+ * A variation of __fw_send_request() to generate callback for response subaction without time
+ * stamp.
+ *
+ * The callback is invoked in the workqueue context in most cases. However, if an error is detected
+ * before queueing or the destination address refers to the local node, it is invoked in the
+ * current context instead.
+ */
+static inline void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
+ int destination_id, int generation, int speed,
+ unsigned long long offset, void *payload, size_t length,
+ fw_transaction_callback_t callback, void *callback_data)
+{
+ union fw_transaction_callback cb = {
+ .without_tstamp = callback,
+ };
+ __fw_send_request(card, t, tcode, destination_id, generation, speed, offset, payload,
+ length, cb, false, callback_data);
+}
+
+/**
+ * fw_send_request_with_tstamp() - submit a request packet for transmission to generate callback for
+ * response with time stamp.
+ * @card: interface to send the request at
+ * @t: transaction instance to which the request belongs
+ * @tcode: transaction code
+ * @destination_id: destination node ID, consisting of bus_ID and phy_ID
+ * @generation: bus generation in which request and response are valid
+ * @speed: transmission speed
+ * @offset: 48bit wide offset into destination's address space
+ * @payload: data payload for the request subaction
+ * @length: length of the payload, in bytes
+ * @callback: function to be called when the transaction is completed
+ * @callback_data: data to be passed to the transaction completion callback
+ *
+ * A variation of __fw_send_request() to generate callback for response subaction with time stamp.
+ *
+ * The callback is invoked in the workqueue context in most cases. However, if an error is detected
+ * before queueing or the destination address refers to the local node, it is invoked in the current
+ * context instead.
+ */
+static inline void fw_send_request_with_tstamp(struct fw_card *card, struct fw_transaction *t,
+ int tcode, int destination_id, int generation, int speed, unsigned long long offset,
+ void *payload, size_t length, fw_transaction_callback_with_tstamp_t callback,
+ void *callback_data)
+{
+ union fw_transaction_callback cb = {
+ .with_tstamp = callback,
+ };
+ __fw_send_request(card, t, tcode, destination_id, generation, speed, offset, payload,
+ length, cb, true, callback_data);
+}
+
int fw_cancel_transaction(struct fw_card *card,
struct fw_transaction *transaction);
int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
@@ -397,8 +502,8 @@ struct fw_iso_packet {
/* rx: Sync bit, wait for matching sy */
u32 tag:2; /* tx: Tag in packet header */
u32 sy:4; /* tx: Sy in packet header */
- u32 header_length:8; /* Length of immediate header */
- u32 header[0]; /* tx: Top of 1394 isoch. data_block */
+ u32 header_length:8; /* Size of immediate header */
+ u32 header[]; /* tx: Top of 1394 isoch. data_block */
};
#define FW_ISO_CONTEXT_TRANSMIT 0
@@ -436,17 +541,21 @@ typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
void *header, void *data);
typedef void (*fw_iso_mc_callback_t)(struct fw_iso_context *context,
dma_addr_t completed, void *data);
+
+union fw_iso_callback {
+ fw_iso_callback_t sc;
+ fw_iso_mc_callback_t mc;
+};
+
struct fw_iso_context {
struct fw_card *card;
+ struct work_struct work;
int type;
int channel;
int speed;
bool drop_overflow_headers;
size_t header_size;
- union {
- fw_iso_callback_t sc;
- fw_iso_mc_callback_t mc;
- } callback;
+ union fw_iso_callback callback;
void *callback_data;
};
@@ -460,6 +569,25 @@ int fw_iso_context_queue(struct fw_iso_context *ctx,
unsigned long payload);
void fw_iso_context_queue_flush(struct fw_iso_context *ctx);
int fw_iso_context_flush_completions(struct fw_iso_context *ctx);
+
+/**
+ * fw_iso_context_schedule_flush_completions() - schedule work item to process isochronous context.
+ * @ctx: the isochronous context
+ *
+ * Schedule a work item on workqueue to process the isochronous context. The registered callback
+ * function is called by the worker when a queued packet buffer with the interrupt flag is
+ * completed, either after transmission in the IT context or after being filled in the IR context.
+ * The callback function is also called when the header buffer in the context becomes full, If it
+ * is required to process the context in the current context, fw_iso_context_flush_completions() is
+ * available instead.
+ *
+ * Context: Any context.
+ */
+static inline void fw_iso_context_schedule_flush_completions(struct fw_iso_context *ctx)
+{
+ queue_work(ctx->card->isoc_wq, &ctx->work);
+}
+
int fw_iso_context_start(struct fw_iso_context *ctx,
int cycle, int sync, int tags);
int fw_iso_context_stop(struct fw_iso_context *ctx);
diff --git a/include/linux/firmware.h b/include/linux/firmware.h
index 84e346ae766e..aae1b85ffc10 100644
--- a/include/linux/firmware.h
+++ b/include/linux/firmware.h
@@ -4,10 +4,11 @@
#include <linux/types.h>
#include <linux/compiler.h>
+#include <linux/cleanup.h>
#include <linux/gfp.h>
-#define FW_ACTION_NOHOTPLUG 0
-#define FW_ACTION_HOTPLUG 1
+#define FW_ACTION_NOUEVENT 0
+#define FW_ACTION_UEVENT 1
struct firmware {
size_t size;
@@ -17,30 +18,90 @@ struct firmware {
void *priv;
};
-struct module;
-struct device;
+/**
+ * enum fw_upload_err - firmware upload error codes
+ * @FW_UPLOAD_ERR_NONE: returned to indicate success
+ * @FW_UPLOAD_ERR_HW_ERROR: error signalled by hardware, see kernel log
+ * @FW_UPLOAD_ERR_TIMEOUT: SW timed out on handshake with HW/firmware
+ * @FW_UPLOAD_ERR_CANCELED: upload was cancelled by the user
+ * @FW_UPLOAD_ERR_BUSY: there is an upload operation already in progress
+ * @FW_UPLOAD_ERR_INVALID_SIZE: invalid firmware image size
+ * @FW_UPLOAD_ERR_RW_ERROR: read or write to HW failed, see kernel log
+ * @FW_UPLOAD_ERR_WEAROUT: FLASH device is approaching wear-out, wait & retry
+ * @FW_UPLOAD_ERR_FW_INVALID: invalid firmware file
+ * @FW_UPLOAD_ERR_MAX: Maximum error code marker
+ */
+enum fw_upload_err {
+ FW_UPLOAD_ERR_NONE,
+ FW_UPLOAD_ERR_HW_ERROR,
+ FW_UPLOAD_ERR_TIMEOUT,
+ FW_UPLOAD_ERR_CANCELED,
+ FW_UPLOAD_ERR_BUSY,
+ FW_UPLOAD_ERR_INVALID_SIZE,
+ FW_UPLOAD_ERR_RW_ERROR,
+ FW_UPLOAD_ERR_WEAROUT,
+ FW_UPLOAD_ERR_FW_INVALID,
+ FW_UPLOAD_ERR_MAX
+};
-struct builtin_fw {
- char *name;
- void *data;
- unsigned long size;
+struct fw_upload {
+ void *dd_handle; /* reference to parent driver */
+ void *priv; /* firmware loader private fields */
};
-/* We have to play tricks here much like stringify() to get the
- __COUNTER__ macro to be expanded as we want it */
-#define __fw_concat1(x, y) x##y
-#define __fw_concat(x, y) __fw_concat1(x, y)
+/**
+ * struct fw_upload_ops - device specific operations to support firmware upload
+ * @prepare: Required: Prepare secure update
+ * @write: Required: The write() op receives the remaining
+ * size to be written and must return the actual
+ * size written or a negative error code. The write()
+ * op will be called repeatedly until all data is
+ * written.
+ * @poll_complete: Required: Check for the completion of the
+ * HW authentication/programming process.
+ * @cancel: Required: Request cancellation of update. This op
+ * is called from the context of a different kernel
+ * thread, so race conditions need to be considered.
+ * @cleanup: Optional: Complements the prepare()
+ * function and is called at the completion
+ * of the update, on success or failure, if the
+ * prepare function succeeded.
+ */
+struct fw_upload_ops {
+ enum fw_upload_err (*prepare)(struct fw_upload *fw_upload,
+ const u8 *data, u32 size);
+ enum fw_upload_err (*write)(struct fw_upload *fw_upload,
+ const u8 *data, u32 offset,
+ u32 size, u32 *written);
+ enum fw_upload_err (*poll_complete)(struct fw_upload *fw_upload);
+ void (*cancel)(struct fw_upload *fw_upload);
+ void (*cleanup)(struct fw_upload *fw_upload);
+};
-#define DECLARE_BUILTIN_FIRMWARE(name, blob) \
- DECLARE_BUILTIN_FIRMWARE_SIZE(name, &(blob), sizeof(blob))
+struct module;
+struct device;
-#define DECLARE_BUILTIN_FIRMWARE_SIZE(name, blob, size) \
- static const struct builtin_fw __fw_concat(__builtin_fw,__COUNTER__) \
- __used __section(".builtin_fw") = { name, blob, size }
+/*
+ * Built-in firmware functionality is only available if FW_LOADER=y, but not
+ * FW_LOADER=m
+ */
+#ifdef CONFIG_FW_LOADER
+bool firmware_request_builtin(struct firmware *fw, const char *name);
+#else
+static inline bool firmware_request_builtin(struct firmware *fw,
+ const char *name)
+{
+ return false;
+}
+#endif
-#if defined(CONFIG_FW_LOADER) || (defined(CONFIG_FW_LOADER_MODULE) && defined(MODULE))
+#if IS_REACHABLE(CONFIG_FW_LOADER)
int request_firmware(const struct firmware **fw, const char *name,
struct device *device);
+int firmware_request_nowait_nowarn(
+ struct module *module, const char *name,
+ struct device *device, gfp_t gfp, void *context,
+ void (*cont)(const struct firmware *fw, void *context));
int firmware_request_nowarn(const struct firmware **fw, const char *name,
struct device *device);
int firmware_request_platform(const struct firmware **fw, const char *name,
@@ -66,6 +127,14 @@ static inline int request_firmware(const struct firmware **fw,
return -EINVAL;
}
+static inline int firmware_request_nowait_nowarn(
+ struct module *module, const char *name,
+ struct device *device, gfp_t gfp, void *context,
+ void (*cont)(const struct firmware *fw, void *context))
+{
+ return -EINVAL;
+}
+
static inline int firmware_request_nowarn(const struct firmware **fw,
const char *name,
struct device *device)
@@ -116,6 +185,32 @@ static inline int request_partial_firmware_into_buf
#endif
+#ifdef CONFIG_FW_UPLOAD
+
+struct fw_upload *
+firmware_upload_register(struct module *module, struct device *parent,
+ const char *name, const struct fw_upload_ops *ops,
+ void *dd_handle);
+void firmware_upload_unregister(struct fw_upload *fw_upload);
+
+#else
+
+static inline struct fw_upload *
+firmware_upload_register(struct module *module, struct device *parent,
+ const char *name, const struct fw_upload_ops *ops,
+ void *dd_handle)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline void firmware_upload_unregister(struct fw_upload *fw_upload)
+{
+}
+
+#endif
+
int firmware_request_cache(struct device *device, const char *name);
+DEFINE_FREE(firmware, struct firmware *, release_firmware(_T))
+
#endif
diff --git a/include/linux/firmware/cirrus/cs_dsp.h b/include/linux/firmware/cirrus/cs_dsp.h
new file mode 100644
index 000000000000..0ec1cdc5585d
--- /dev/null
+++ b/include/linux/firmware/cirrus/cs_dsp.h
@@ -0,0 +1,357 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * cs_dsp.h -- Cirrus Logic DSP firmware support
+ *
+ * Based on sound/soc/codecs/wm_adsp.h
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ * Copyright (C) 2015-2021 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+#ifndef __CS_DSP_H
+#define __CS_DSP_H
+
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/list.h>
+#include <linux/regmap.h>
+
+#define CS_ADSP2_REGION_0 BIT(0)
+#define CS_ADSP2_REGION_1 BIT(1)
+#define CS_ADSP2_REGION_2 BIT(2)
+#define CS_ADSP2_REGION_3 BIT(3)
+#define CS_ADSP2_REGION_4 BIT(4)
+#define CS_ADSP2_REGION_5 BIT(5)
+#define CS_ADSP2_REGION_6 BIT(6)
+#define CS_ADSP2_REGION_7 BIT(7)
+#define CS_ADSP2_REGION_8 BIT(8)
+#define CS_ADSP2_REGION_9 BIT(9)
+#define CS_ADSP2_REGION_1_9 (CS_ADSP2_REGION_1 | \
+ CS_ADSP2_REGION_2 | CS_ADSP2_REGION_3 | \
+ CS_ADSP2_REGION_4 | CS_ADSP2_REGION_5 | \
+ CS_ADSP2_REGION_6 | CS_ADSP2_REGION_7 | \
+ CS_ADSP2_REGION_8 | CS_ADSP2_REGION_9)
+#define CS_ADSP2_REGION_ALL (CS_ADSP2_REGION_0 | CS_ADSP2_REGION_1_9)
+
+#define CS_DSP_DATA_WORD_SIZE 3
+#define CS_DSP_DATA_WORD_BITS (3 * BITS_PER_BYTE)
+
+#define CS_DSP_ACKED_CTL_TIMEOUT_MS 100
+#define CS_DSP_ACKED_CTL_N_QUICKPOLLS 10
+#define CS_DSP_ACKED_CTL_MIN_VALUE 0
+#define CS_DSP_ACKED_CTL_MAX_VALUE 0xFFFFFF
+
+/*
+ * Write sequence operation codes
+ */
+#define CS_DSP_WSEQ_FULL 0x00
+#define CS_DSP_WSEQ_ADDR8 0x02
+#define CS_DSP_WSEQ_L16 0x04
+#define CS_DSP_WSEQ_H16 0x05
+#define CS_DSP_WSEQ_UNLOCK 0xFD
+#define CS_DSP_WSEQ_END 0xFF
+
+/**
+ * struct cs_dsp_region - Describes a logical memory region in DSP address space
+ * @type: Memory region type
+ * @base: Address of region
+ */
+struct cs_dsp_region {
+ int type;
+ unsigned int base;
+};
+
+/**
+ * struct cs_dsp_alg_region - Describes a logical algorithm region in DSP address space
+ * @alg: Algorithm id
+ * @ver: Expected algorithm version
+ * @type: Memory region type
+ * @base: Address of region
+ */
+struct cs_dsp_alg_region {
+ unsigned int alg;
+ unsigned int ver;
+ int type;
+ unsigned int base;
+};
+
+/**
+ * struct cs_dsp_coeff_ctl - Describes a coefficient control
+ * @list: List node for internal use
+ * @dsp: DSP instance associated with this control
+ * @cache: Cached value of the control
+ * @fw_name: Name of the firmware
+ * @subname: Name of the control parsed from the WMFW
+ * @subname_len: Length of subname
+ * @offset: Offset of control within alg_region in words
+ * @len: Length of the cached value in bytes
+ * @type: One of the WMFW_CTL_TYPE_ control types defined in wmfw.h
+ * @flags: Bitfield of WMFW_CTL_FLAG_ control flags defined in wmfw.h
+ * @set: Flag indicating the value has been written by the user
+ * @enabled: Flag indicating whether control is enabled
+ * @alg_region: Logical region associated with this control
+ * @priv: For use by the client
+ */
+struct cs_dsp_coeff_ctl {
+ struct list_head list;
+ struct cs_dsp *dsp;
+ void *cache;
+ const char *fw_name;
+ /* Subname is needed to match with firmware */
+ const char *subname;
+ unsigned int subname_len;
+ unsigned int offset;
+ unsigned int len;
+ unsigned int type;
+ unsigned int flags;
+ unsigned int set:1;
+ unsigned int enabled:1;
+ struct cs_dsp_alg_region alg_region;
+
+ void *priv;
+};
+
+struct cs_dsp_ops;
+struct cs_dsp_client_ops;
+
+/**
+ * struct cs_dsp - Configuration and state of a Cirrus Logic DSP
+ * @name: The name of the DSP instance
+ * @rev: Revision of the DSP
+ * @num: DSP instance number
+ * @type: Type of DSP
+ * @dev: Driver model representation of the device
+ * @regmap: Register map of the device
+ * @ops: Function pointers for internal callbacks
+ * @client_ops: Function pointers for client callbacks
+ * @base: Address of the DSP registers
+ * @base_sysinfo: Address of the sysinfo register (Halo only)
+ * @sysclk_reg: Address of the sysclk register (ADSP1 only)
+ * @sysclk_mask: Mask of frequency bits within sysclk register (ADSP1 only)
+ * @sysclk_shift: Shift of frequency bits within sysclk register (ADSP1 only)
+ * @alg_regions: List of currently loaded algorithm regions
+ * @fw_name: Name of the current firmware
+ * @fw_id: ID of the current firmware, obtained from the wmfw
+ * @fw_id_version: Version of the firmware, obtained from the wmfw
+ * @fw_vendor_id: Vendor of the firmware, obtained from the wmfw
+ * @mem: DSP memory region descriptions
+ * @num_mems: Number of memory regions in this DSP
+ * @fw_ver: Version of the wmfw file format
+ * @booted: Flag indicating DSP has been configured
+ * @running: Flag indicating DSP is executing firmware
+ * @ctl_list: Controls defined within the loaded DSP firmware
+ * @lock_regions: Enable MPU traps on specified memory regions
+ * @pwr_lock: Lock used to serialize accesses
+ * @debugfs_root: Debugfs directory for this DSP instance
+ * @wmfw_file_name: Filename of the currently loaded firmware
+ * @bin_file_name: Filename of the currently loaded coefficients
+ */
+struct cs_dsp {
+ const char *name;
+ int rev;
+ int num;
+ int type;
+ struct device *dev;
+ struct regmap *regmap;
+
+ const struct cs_dsp_ops *ops;
+ const struct cs_dsp_client_ops *client_ops;
+
+ unsigned int base;
+ unsigned int base_sysinfo;
+ unsigned int sysclk_reg;
+ unsigned int sysclk_mask;
+ unsigned int sysclk_shift;
+ bool no_core_startstop;
+
+ struct list_head alg_regions;
+
+ const char *fw_name;
+ unsigned int fw_id;
+ unsigned int fw_id_version;
+ unsigned int fw_vendor_id;
+
+ const struct cs_dsp_region *mem;
+ int num_mems;
+
+ int wmfw_ver;
+
+ bool booted;
+ bool running;
+
+ struct list_head ctl_list;
+
+ struct mutex pwr_lock;
+
+ unsigned int lock_regions;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_root;
+ const char *wmfw_file_name;
+ const char *bin_file_name;
+#endif
+};
+
+/**
+ * struct cs_dsp_client_ops - client callbacks
+ * @control_add: Called under the pwr_lock when a control is created
+ * @control_remove: Called under the pwr_lock when a control is destroyed
+ * @pre_run: Called under the pwr_lock by cs_dsp_run() before the core is started
+ * @post_run: Called under the pwr_lock by cs_dsp_run() after the core is started
+ * @pre_stop: Called under the pwr_lock by cs_dsp_stop() before the core is stopped
+ * @post_stop: Called under the pwr_lock by cs_dsp_stop() after the core is stopped
+ * @watchdog_expired: Called when a watchdog expiry is detected
+ *
+ * These callbacks give the cs_dsp client an opportunity to respond to events
+ * or to perform actions atomically.
+ */
+struct cs_dsp_client_ops {
+ int (*control_add)(struct cs_dsp_coeff_ctl *ctl);
+ void (*control_remove)(struct cs_dsp_coeff_ctl *ctl);
+ int (*pre_run)(struct cs_dsp *dsp);
+ int (*post_run)(struct cs_dsp *dsp);
+ void (*pre_stop)(struct cs_dsp *dsp);
+ void (*post_stop)(struct cs_dsp *dsp);
+ void (*watchdog_expired)(struct cs_dsp *dsp);
+};
+
+int cs_dsp_adsp1_init(struct cs_dsp *dsp);
+int cs_dsp_adsp2_init(struct cs_dsp *dsp);
+int cs_dsp_halo_init(struct cs_dsp *dsp);
+
+int cs_dsp_adsp1_power_up(struct cs_dsp *dsp,
+ const struct firmware *wmfw_firmware, const char *wmfw_filename,
+ const struct firmware *coeff_firmware, const char *coeff_filename,
+ const char *fw_name);
+void cs_dsp_adsp1_power_down(struct cs_dsp *dsp);
+int cs_dsp_power_up(struct cs_dsp *dsp,
+ const struct firmware *wmfw_firmware, const char *wmfw_filename,
+ const struct firmware *coeff_firmware, const char *coeff_filename,
+ const char *fw_name);
+void cs_dsp_power_down(struct cs_dsp *dsp);
+int cs_dsp_run(struct cs_dsp *dsp);
+void cs_dsp_stop(struct cs_dsp *dsp);
+
+void cs_dsp_remove(struct cs_dsp *dsp);
+
+int cs_dsp_set_dspclk(struct cs_dsp *dsp, unsigned int freq);
+void cs_dsp_adsp2_bus_error(struct cs_dsp *dsp);
+void cs_dsp_halo_bus_error(struct cs_dsp *dsp);
+void cs_dsp_halo_wdt_expire(struct cs_dsp *dsp);
+
+void cs_dsp_init_debugfs(struct cs_dsp *dsp, struct dentry *debugfs_root);
+void cs_dsp_cleanup_debugfs(struct cs_dsp *dsp);
+
+int cs_dsp_coeff_write_acked_control(struct cs_dsp_coeff_ctl *ctl, unsigned int event_id);
+int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl, unsigned int off,
+ const void *buf, size_t len);
+int cs_dsp_coeff_lock_and_write_ctrl(struct cs_dsp_coeff_ctl *ctl, unsigned int off,
+ const void *buf, size_t len);
+int cs_dsp_coeff_read_ctrl(struct cs_dsp_coeff_ctl *ctl, unsigned int off,
+ void *buf, size_t len);
+int cs_dsp_coeff_lock_and_read_ctrl(struct cs_dsp_coeff_ctl *ctl, unsigned int off,
+ void *buf, size_t len);
+struct cs_dsp_coeff_ctl *cs_dsp_get_ctl(struct cs_dsp *dsp, const char *name, int type,
+ unsigned int alg);
+
+int cs_dsp_read_raw_data_block(struct cs_dsp *dsp, int mem_type, unsigned int mem_addr,
+ unsigned int num_words, __be32 *data);
+int cs_dsp_read_data_word(struct cs_dsp *dsp, int mem_type, unsigned int mem_addr, u32 *data);
+int cs_dsp_write_data_word(struct cs_dsp *dsp, int mem_type, unsigned int mem_addr, u32 data);
+void cs_dsp_remove_padding(u32 *buf, int nwords);
+
+struct cs_dsp_alg_region *cs_dsp_find_alg_region(struct cs_dsp *dsp,
+ int type, unsigned int id);
+
+const char *cs_dsp_mem_region_name(unsigned int type);
+
+/**
+ * struct cs_dsp_wseq - Describes a write sequence
+ * @ctl: Write sequence cs_dsp control
+ * @ops: Operations contained within
+ */
+struct cs_dsp_wseq {
+ struct cs_dsp_coeff_ctl *ctl;
+ struct list_head ops;
+};
+
+int cs_dsp_wseq_init(struct cs_dsp *dsp, struct cs_dsp_wseq *wseqs, unsigned int num_wseqs);
+int cs_dsp_wseq_write(struct cs_dsp *dsp, struct cs_dsp_wseq *wseq, u32 addr, u32 data,
+ u8 op_code, bool update);
+int cs_dsp_wseq_multi_write(struct cs_dsp *dsp, struct cs_dsp_wseq *wseq,
+ const struct reg_sequence *reg_seq, int num_regs,
+ u8 op_code, bool update);
+
+/**
+ * struct cs_dsp_chunk - Describes a buffer holding data formatted for the DSP
+ * @data: Pointer to underlying buffer memory
+ * @max: Pointer to end of the buffer memory
+ * @bytes: Number of bytes read/written into the memory chunk
+ * @cache: Temporary holding data as it is formatted
+ * @cachebits: Number of bits of data currently in cache
+ */
+struct cs_dsp_chunk {
+ u8 *data;
+ u8 *max;
+ int bytes;
+
+ u32 cache;
+ int cachebits;
+};
+
+/**
+ * cs_dsp_chunk() - Create a DSP memory chunk
+ * @data: Pointer to the buffer that will be used to store data
+ * @size: Size of the buffer in bytes
+ *
+ * Return: A cs_dsp_chunk structure
+ */
+static inline struct cs_dsp_chunk cs_dsp_chunk(void *data, int size)
+{
+ struct cs_dsp_chunk ch = {
+ .data = data,
+ .max = data + size,
+ };
+
+ return ch;
+}
+
+/**
+ * cs_dsp_chunk_end() - Check if a DSP memory chunk is full
+ * @ch: Pointer to the chunk structure
+ *
+ * Return: True if the whole buffer has been read/written
+ */
+static inline bool cs_dsp_chunk_end(struct cs_dsp_chunk *ch)
+{
+ return ch->data == ch->max;
+}
+
+/**
+ * cs_dsp_chunk_bytes() - Number of bytes written/read from a DSP memory chunk
+ * @ch: Pointer to the chunk structure
+ *
+ * Return: Number of bytes read/written to the buffer
+ */
+static inline int cs_dsp_chunk_bytes(struct cs_dsp_chunk *ch)
+{
+ return ch->bytes;
+}
+
+/**
+ * cs_dsp_chunk_valid_addr() - Check if an address is in a DSP memory chunk
+ * @ch: Pointer to the chunk structure
+ *
+ * Return: True if the given address is within the buffer
+ */
+static inline bool cs_dsp_chunk_valid_addr(struct cs_dsp_chunk *ch, void *addr)
+{
+ return (u8 *)addr >= ch->data && (u8 *)addr < ch->max;
+}
+
+int cs_dsp_chunk_write(struct cs_dsp_chunk *ch, int nbits, u32 val);
+int cs_dsp_chunk_flush(struct cs_dsp_chunk *ch);
+int cs_dsp_chunk_read(struct cs_dsp_chunk *ch, int nbits);
+
+#endif
diff --git a/include/linux/firmware/cirrus/cs_dsp_test_utils.h b/include/linux/firmware/cirrus/cs_dsp_test_utils.h
new file mode 100644
index 000000000000..1f97764fdfd7
--- /dev/null
+++ b/include/linux/firmware/cirrus/cs_dsp_test_utils.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Support utilities for cs_dsp testing.
+ *
+ * Copyright (C) 2024 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#include <linux/regmap.h>
+#include <linux/firmware/cirrus/wmfw.h>
+
+struct kunit;
+struct cs_dsp_test;
+struct cs_dsp_test_local;
+
+/**
+ * struct cs_dsp_test - base class for test utilities
+ *
+ * @test: Pointer to struct kunit instance.
+ * @dsp: Pointer to struct cs_dsp instance.
+ * @local: Private data for each test suite.
+ */
+struct cs_dsp_test {
+ struct kunit *test;
+ struct cs_dsp *dsp;
+
+ struct cs_dsp_test_local *local;
+
+ /* private: Following members are private */
+ bool saw_bus_write;
+};
+
+/**
+ * struct cs_dsp_mock_alg_def - Info for creating a mock algorithm entry.
+ *
+ * @id: Algorithm ID.
+ * @ver: Algorithm version.
+ * @xm_base_words: XM base address in DSP words.
+ * @xm_size_words: XM size in DSP words.
+ * @ym_base_words: YM base address in DSP words.
+ * @ym_size_words: YM size in DSP words.
+ * @zm_base_words: ZM base address in DSP words.
+ * @zm_size_words: ZM size in DSP words.
+ */
+struct cs_dsp_mock_alg_def {
+ unsigned int id;
+ unsigned int ver;
+ unsigned int xm_base_words;
+ unsigned int xm_size_words;
+ unsigned int ym_base_words;
+ unsigned int ym_size_words;
+ unsigned int zm_base_words;
+ unsigned int zm_size_words;
+};
+
+struct cs_dsp_mock_coeff_def {
+ const char *shortname;
+ const char *fullname;
+ const char *description;
+ u16 type;
+ u16 flags;
+ u16 mem_type;
+ unsigned int offset_dsp_words;
+ unsigned int length_bytes;
+};
+
+/**
+ * struct cs_dsp_mock_xm_header - XM header builder
+ *
+ * @test_priv: Pointer to the struct cs_dsp_test.
+ * @blob_data: Pointer to the created blob data.
+ * @blob_size_bytes: Size of the data at blob_data.
+ */
+struct cs_dsp_mock_xm_header {
+ struct cs_dsp_test *test_priv;
+ void *blob_data;
+ size_t blob_size_bytes;
+};
+
+struct cs_dsp_mock_wmfw_builder;
+struct cs_dsp_mock_bin_builder;
+
+extern const unsigned int cs_dsp_mock_adsp2_32bit_sysbase;
+extern const unsigned int cs_dsp_mock_adsp2_16bit_sysbase;
+extern const unsigned int cs_dsp_mock_halo_core_base;
+extern const unsigned int cs_dsp_mock_halo_sysinfo_base;
+
+extern const struct cs_dsp_region cs_dsp_mock_halo_dsp1_regions[];
+extern const unsigned int cs_dsp_mock_halo_dsp1_region_sizes[];
+extern const struct cs_dsp_region cs_dsp_mock_adsp2_32bit_dsp1_regions[];
+extern const unsigned int cs_dsp_mock_adsp2_32bit_dsp1_region_sizes[];
+extern const struct cs_dsp_region cs_dsp_mock_adsp2_16bit_dsp1_regions[];
+extern const unsigned int cs_dsp_mock_adsp2_16bit_dsp1_region_sizes[];
+int cs_dsp_mock_count_regions(const unsigned int *region_sizes);
+unsigned int cs_dsp_mock_size_of_region(const struct cs_dsp *dsp, int mem_type);
+unsigned int cs_dsp_mock_base_addr_for_mem(struct cs_dsp_test *priv, int mem_type);
+unsigned int cs_dsp_mock_reg_addr_inc_per_unpacked_word(struct cs_dsp_test *priv);
+unsigned int cs_dsp_mock_reg_block_length_bytes(struct cs_dsp_test *priv, int mem_type);
+unsigned int cs_dsp_mock_reg_block_length_registers(struct cs_dsp_test *priv, int mem_type);
+unsigned int cs_dsp_mock_reg_block_length_dsp_words(struct cs_dsp_test *priv, int mem_type);
+bool cs_dsp_mock_has_zm(struct cs_dsp_test *priv);
+int cs_dsp_mock_packed_to_unpacked_mem_type(int packed_mem_type);
+unsigned int cs_dsp_mock_num_dsp_words_to_num_packed_regs(unsigned int num_dsp_words);
+unsigned int cs_dsp_mock_xm_header_get_alg_base_in_words(struct cs_dsp_test *priv,
+ unsigned int alg_id,
+ int mem_type);
+unsigned int cs_dsp_mock_xm_header_get_fw_version(struct cs_dsp_mock_xm_header *header);
+void cs_dsp_mock_xm_header_drop_from_regmap_cache(struct cs_dsp_test *priv);
+int cs_dsp_mock_xm_header_write_to_regmap(struct cs_dsp_mock_xm_header *header);
+struct cs_dsp_mock_xm_header *cs_dsp_create_mock_xm_header(struct cs_dsp_test *priv,
+ const struct cs_dsp_mock_alg_def *algs,
+ size_t num_algs);
+
+int cs_dsp_mock_regmap_init(struct cs_dsp_test *priv);
+void cs_dsp_mock_regmap_drop_range(struct cs_dsp_test *priv,
+ unsigned int first_reg, unsigned int last_reg);
+void cs_dsp_mock_regmap_drop_regs(struct cs_dsp_test *priv,
+ unsigned int first_reg, size_t num_regs);
+void cs_dsp_mock_regmap_drop_bytes(struct cs_dsp_test *priv,
+ unsigned int first_reg, size_t num_bytes);
+void cs_dsp_mock_regmap_drop_system_regs(struct cs_dsp_test *priv);
+bool cs_dsp_mock_regmap_is_dirty(struct cs_dsp_test *priv, bool drop_system_regs);
+
+struct cs_dsp_mock_bin_builder *cs_dsp_mock_bin_init(struct cs_dsp_test *priv,
+ int format_version,
+ unsigned int fw_version);
+void cs_dsp_mock_bin_add_raw_block(struct cs_dsp_mock_bin_builder *builder,
+ unsigned int alg_id, unsigned int alg_ver,
+ int type, unsigned int offset,
+ const void *payload_data, size_t payload_len_bytes);
+void cs_dsp_mock_bin_add_info(struct cs_dsp_mock_bin_builder *builder,
+ const char *info);
+void cs_dsp_mock_bin_add_name(struct cs_dsp_mock_bin_builder *builder,
+ const char *name);
+void cs_dsp_mock_bin_add_patch(struct cs_dsp_mock_bin_builder *builder,
+ unsigned int alg_id, unsigned int alg_ver,
+ int mem_region, unsigned int reg_addr_offset,
+ const void *payload_data, size_t payload_len_bytes);
+struct firmware *cs_dsp_mock_bin_get_firmware(struct cs_dsp_mock_bin_builder *builder);
+
+struct cs_dsp_mock_wmfw_builder *cs_dsp_mock_wmfw_init(struct cs_dsp_test *priv,
+ int format_version);
+void cs_dsp_mock_wmfw_add_raw_block(struct cs_dsp_mock_wmfw_builder *builder,
+ int mem_region, unsigned int mem_offset_dsp_words,
+ const void *payload_data, size_t payload_len_bytes);
+void cs_dsp_mock_wmfw_add_info(struct cs_dsp_mock_wmfw_builder *builder,
+ const char *info);
+void cs_dsp_mock_wmfw_add_data_block(struct cs_dsp_mock_wmfw_builder *builder,
+ int mem_region, unsigned int mem_offset_dsp_words,
+ const void *payload_data, size_t payload_len_bytes);
+void cs_dsp_mock_wmfw_start_alg_info_block(struct cs_dsp_mock_wmfw_builder *builder,
+ unsigned int alg_id,
+ const char *name,
+ const char *description);
+void cs_dsp_mock_wmfw_add_coeff_desc(struct cs_dsp_mock_wmfw_builder *builder,
+ const struct cs_dsp_mock_coeff_def *def);
+void cs_dsp_mock_wmfw_end_alg_info_block(struct cs_dsp_mock_wmfw_builder *builder);
+struct firmware *cs_dsp_mock_wmfw_get_firmware(struct cs_dsp_mock_wmfw_builder *builder);
+int cs_dsp_mock_wmfw_format_version(struct cs_dsp_mock_wmfw_builder *builder);
diff --git a/include/linux/firmware/cirrus/wmfw.h b/include/linux/firmware/cirrus/wmfw.h
new file mode 100644
index 000000000000..74e5a4f6c13a
--- /dev/null
+++ b/include/linux/firmware/cirrus/wmfw.h
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * wmfw.h - Wolfson firmware format information
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ */
+
+#ifndef __WMFW_H
+#define __WMFW_H
+
+#include <linux/types.h>
+
+#define WMFW_MAX_ALG_NAME 256
+#define WMFW_MAX_ALG_DESCR_NAME 256
+
+#define WMFW_MAX_COEFF_NAME 256
+#define WMFW_MAX_COEFF_DESCR_NAME 256
+
+#define WMFW_CTL_FLAG_SYS 0x8000
+#define WMFW_CTL_FLAG_VOLATILE 0x0004
+#define WMFW_CTL_FLAG_WRITEABLE 0x0002
+#define WMFW_CTL_FLAG_READABLE 0x0001
+
+#define WMFW_CTL_TYPE_BYTES 0x0004 /* byte control */
+
+/* Non-ALSA coefficient types start at 0x1000 */
+#define WMFW_CTL_TYPE_ACKED 0x1000 /* acked control */
+#define WMFW_CTL_TYPE_HOSTEVENT 0x1001 /* event control */
+#define WMFW_CTL_TYPE_HOST_BUFFER 0x1002 /* host buffer pointer */
+#define WMFW_CTL_TYPE_FWEVENT 0x1004 /* firmware event control */
+
+struct wmfw_header {
+ char magic[4];
+ __le32 len;
+ __le16 rev;
+ u8 core;
+ u8 ver;
+} __packed;
+
+struct wmfw_footer {
+ __le64 timestamp;
+ __le32 checksum;
+} __packed;
+
+struct wmfw_adsp1_sizes {
+ __le32 dm;
+ __le32 pm;
+ __le32 zm;
+} __packed;
+
+struct wmfw_adsp2_sizes {
+ __le32 xm;
+ __le32 ym;
+ __le32 pm;
+ __le32 zm;
+} __packed;
+
+struct wmfw_region {
+ union {
+ __be32 type;
+ __le32 offset;
+ };
+ __le32 len;
+ u8 data[];
+} __packed;
+
+struct wmfw_id_hdr {
+ __be32 core_id;
+ __be32 core_rev;
+ __be32 id;
+ __be32 ver;
+} __packed;
+
+struct wmfw_v3_id_hdr {
+ __be32 core_id;
+ __be32 block_rev;
+ __be32 vendor_id;
+ __be32 id;
+ __be32 ver;
+} __packed;
+
+struct wmfw_adsp1_id_hdr {
+ struct wmfw_id_hdr fw;
+ __be32 zm;
+ __be32 dm;
+ __be32 n_algs;
+} __packed;
+
+struct wmfw_adsp2_id_hdr {
+ struct wmfw_id_hdr fw;
+ __be32 zm;
+ __be32 xm;
+ __be32 ym;
+ __be32 n_algs;
+} __packed;
+
+struct wmfw_halo_id_hdr {
+ struct wmfw_v3_id_hdr fw;
+ __be32 xm_base;
+ __be32 xm_size;
+ __be32 ym_base;
+ __be32 ym_size;
+ __be32 n_algs;
+} __packed;
+
+struct wmfw_alg_hdr {
+ __be32 id;
+ __be32 ver;
+} __packed;
+
+struct wmfw_adsp1_alg_hdr {
+ struct wmfw_alg_hdr alg;
+ __be32 zm;
+ __be32 dm;
+} __packed;
+
+struct wmfw_adsp2_alg_hdr {
+ struct wmfw_alg_hdr alg;
+ __be32 zm;
+ __be32 xm;
+ __be32 ym;
+} __packed;
+
+struct wmfw_halo_alg_hdr {
+ struct wmfw_alg_hdr alg;
+ __be32 xm_base;
+ __be32 xm_size;
+ __be32 ym_base;
+ __be32 ym_size;
+} __packed;
+
+struct wmfw_adsp_alg_data {
+ __le32 id;
+ u8 name[WMFW_MAX_ALG_NAME];
+ u8 descr[WMFW_MAX_ALG_DESCR_NAME];
+ __le32 ncoeff;
+ u8 data[];
+} __packed;
+
+struct wmfw_adsp_coeff_data {
+ struct {
+ __le16 offset;
+ __le16 type;
+ __le32 size;
+ } hdr;
+ u8 name[WMFW_MAX_COEFF_NAME];
+ u8 descr[WMFW_MAX_COEFF_DESCR_NAME];
+ __le16 ctl_type;
+ __le16 flags;
+ __le32 len;
+ u8 data[];
+} __packed;
+
+struct wmfw_coeff_hdr {
+ u8 magic[4];
+ __le32 len;
+ union {
+ __be32 rev;
+ __le32 ver;
+ };
+ union {
+ __be32 core;
+ __le32 core_ver;
+ };
+ u8 data[];
+} __packed;
+
+struct wmfw_coeff_item {
+ __le16 offset;
+ __le16 type;
+ __le32 id;
+ __le32 ver;
+ __le32 sr;
+ __le32 len;
+ u8 data[];
+} __packed;
+
+#define WMFW_ADSP1 1
+#define WMFW_ADSP2 2
+#define WMFW_HALO 4
+
+#define WMFW_ABSOLUTE 0xf0
+#define WMFW_ALGORITHM_DATA 0xf2
+#define WMFW_METADATA 0xfc
+#define WMFW_NAME_TEXT 0xfe
+#define WMFW_INFO_TEXT 0xff
+
+#define WMFW_ADSP1_PM 2
+#define WMFW_ADSP1_DM 3
+#define WMFW_ADSP1_ZM 4
+
+#define WMFW_ADSP2_PM 2
+#define WMFW_ADSP2_ZM 4
+#define WMFW_ADSP2_XM 5
+#define WMFW_ADSP2_YM 6
+
+#define WMFW_HALO_PM_PACKED 0x10
+#define WMFW_HALO_XM_PACKED 0x11
+#define WMFW_HALO_YM_PACKED 0x12
+
+#endif
diff --git a/include/linux/firmware/imx/dsp.h b/include/linux/firmware/imx/dsp.h
index 4f7895a3b73c..1f176a2683fe 100644
--- a/include/linux/firmware/imx/dsp.h
+++ b/include/linux/firmware/imx/dsp.h
@@ -37,17 +37,11 @@ struct imx_dsp_ipc {
static inline void imx_dsp_set_data(struct imx_dsp_ipc *ipc, void *data)
{
- if (!ipc)
- return;
-
ipc->private_data = data;
}
static inline void *imx_dsp_get_data(struct imx_dsp_ipc *ipc)
{
- if (!ipc)
- return NULL;
-
return ipc->private_data;
}
diff --git a/include/linux/firmware/imx/s4.h b/include/linux/firmware/imx/s4.h
new file mode 100644
index 000000000000..9e34923ae1d6
--- /dev/null
+++ b/include/linux/firmware/imx/s4.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2021 NXP
+ *
+ * Header file for the IPC implementation.
+ */
+
+#ifndef _S4_IPC_H
+#define _S4_IPC_H
+
+struct imx_s4_ipc;
+
+struct imx_s4_rpc_msg {
+ uint8_t ver;
+ uint8_t size;
+ uint8_t cmd;
+ uint8_t tag;
+} __packed;
+
+#endif /* _S4_IPC_H */
diff --git a/include/linux/firmware/imx/sci.h b/include/linux/firmware/imx/sci.h
index 5cc63fe7e84d..df17196df5ff 100644
--- a/include/linux/firmware/imx/sci.h
+++ b/include/linux/firmware/imx/sci.h
@@ -21,31 +21,37 @@ int imx_scu_enable_general_irq_channel(struct device *dev);
int imx_scu_irq_register_notifier(struct notifier_block *nb);
int imx_scu_irq_unregister_notifier(struct notifier_block *nb);
int imx_scu_irq_group_enable(u8 group, u32 mask, u8 enable);
+int imx_scu_irq_get_status(u8 group, u32 *irq_status);
int imx_scu_soc_init(struct device *dev);
#else
static inline int imx_scu_soc_init(struct device *dev)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int imx_scu_enable_general_irq_channel(struct device *dev)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int imx_scu_irq_register_notifier(struct notifier_block *nb)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int imx_scu_irq_unregister_notifier(struct notifier_block *nb)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int imx_scu_irq_group_enable(u8 group, u32 mask, u8 enable)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
+}
+
+static inline int imx_scu_irq_get_status(u8 group, u32 *irq_status)
+{
+ return -EOPNOTSUPP;
}
#endif
#endif /* _SC_SCI_H */
diff --git a/include/linux/firmware/imx/sm.h b/include/linux/firmware/imx/sm.h
new file mode 100644
index 000000000000..a33b45027356
--- /dev/null
+++ b/include/linux/firmware/imx/sm.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef _SCMI_IMX_H
+#define _SCMI_IMX_H
+
+#include <linux/bitfield.h>
+#include <linux/errno.h>
+#include <linux/scmi_imx_protocol.h>
+#include <linux/types.h>
+
+#define SCMI_IMX95_CTRL_PDM_CLK_SEL 0 /* AON PDM clock sel */
+#define SCMI_IMX95_CTRL_MQS1_SETTINGS 1 /* AON MQS settings */
+#define SCMI_IMX95_CTRL_SAI1_MCLK 2 /* AON SAI1 MCLK */
+#define SCMI_IMX95_CTRL_SAI3_MCLK 3 /* WAKE SAI3 MCLK */
+#define SCMI_IMX95_CTRL_SAI4_MCLK 4 /* WAKE SAI4 MCLK */
+#define SCMI_IMX95_CTRL_SAI5_MCLK 5 /* WAKE SAI5 MCLK */
+
+#define SCMI_IMX94_CTRL_PDM_CLK_SEL 0U /*!< AON PDM clock sel */
+#define SCMI_IMX94_CTRL_MQS1_SETTINGS 1U /*!< AON MQS settings */
+#define SCMI_IMX94_CTRL_MQS2_SETTINGS 2U /*!< WAKE MQS settings */
+#define SCMI_IMX94_CTRL_SAI1_MCLK 3U /*!< AON SAI1 MCLK */
+#define SCMI_IMX94_CTRL_SAI2_MCLK 4U /*!< WAKE SAI2 MCLK */
+#define SCMI_IMX94_CTRL_SAI3_MCLK 5U /*!< WAKE SAI3 MCLK */
+#define SCMI_IMX94_CTRL_SAI4_MCLK 6U /*!< WAKE SAI4 MCLK */
+
+#if IS_ENABLED(CONFIG_IMX_SCMI_MISC_DRV)
+int scmi_imx_misc_ctrl_get(u32 id, u32 *num, u32 *val);
+int scmi_imx_misc_ctrl_set(u32 id, u32 val);
+#else
+static inline int scmi_imx_misc_ctrl_get(u32 id, u32 *num, u32 *val)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int scmi_imx_misc_ctrl_set(u32 id, u32 val)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_IMX_SCMI_CPU_DRV)
+int scmi_imx_cpu_start(u32 cpuid, bool start);
+int scmi_imx_cpu_started(u32 cpuid, bool *started);
+int scmi_imx_cpu_reset_vector_set(u32 cpuid, u64 vector, bool start, bool boot,
+ bool resume);
+#else
+static inline int scmi_imx_cpu_start(u32 cpuid, bool start)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int scmi_imx_cpu_started(u32 cpuid, bool *started)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int scmi_imx_cpu_reset_vector_set(u32 cpuid, u64 vector, bool start,
+ bool boot, bool resume)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+enum scmi_imx_lmm_op {
+ SCMI_IMX_LMM_BOOT,
+ SCMI_IMX_LMM_POWER_ON,
+ SCMI_IMX_LMM_SHUTDOWN,
+};
+
+/* For shutdown pperation */
+#define SCMI_IMX_LMM_OP_FORCEFUL 0
+#define SCMI_IMX_LMM_OP_GRACEFUL BIT(0)
+
+#if IS_ENABLED(CONFIG_IMX_SCMI_LMM_DRV)
+int scmi_imx_lmm_operation(u32 lmid, enum scmi_imx_lmm_op op, u32 flags);
+int scmi_imx_lmm_info(u32 lmid, struct scmi_imx_lmm_info *info);
+int scmi_imx_lmm_reset_vector_set(u32 lmid, u32 cpuid, u32 flags, u64 vector);
+#else
+static inline int scmi_imx_lmm_operation(u32 lmid, enum scmi_imx_lmm_op op, u32 flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int scmi_imx_lmm_info(u32 lmid, struct scmi_imx_lmm_info *info)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int scmi_imx_lmm_reset_vector_set(u32 lmid, u32 cpuid, u32 flags, u64 vector)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+#endif
diff --git a/include/linux/firmware/imx/svc/rm.h b/include/linux/firmware/imx/svc/rm.h
index 456b6a59d29b..31456f897aa9 100644
--- a/include/linux/firmware/imx/svc/rm.h
+++ b/include/linux/firmware/imx/svc/rm.h
@@ -59,11 +59,16 @@ enum imx_sc_rm_func {
#if IS_ENABLED(CONFIG_IMX_SCU)
bool imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource);
+int imx_sc_rm_get_resource_owner(struct imx_sc_ipc *ipc, u16 resource, u8 *pt);
#else
static inline bool
imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource)
{
return true;
}
+static inline int imx_sc_rm_get_resource_owner(struct imx_sc_ipc *ipc, u16 resource, u8 *pt)
+{
+ return -EOPNOTSUPP;
+}
#endif
#endif
diff --git a/include/linux/firmware/intel/stratix10-smc.h b/include/linux/firmware/intel/stratix10-smc.h
index c3e5ab014caf..935dba3633b5 100644
--- a/include/linux/firmware/intel/stratix10-smc.h
+++ b/include/linux/firmware/intel/stratix10-smc.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2017-2018, Intel Corporation
+ * Copyright (C) 2025, Altera Corporation
*/
#ifndef __STRATIX10_SMC_H
@@ -47,6 +48,10 @@
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, \
ARM_SMCCC_OWNER_SIP, (func_num))
+#define INTEL_SIP_SMC_ASYNC_VAL(func_name) \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_SIP, (func_name))
+
/**
* Return values in INTEL_SIP_SMC_* call
*
@@ -321,8 +326,6 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
#define INTEL_SIP_SMC_ECC_DBE \
INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_ECC_DBE)
-#endif
-
/**
* Request INTEL_SIP_SMC_RSU_NOTIFY
*
@@ -404,3 +407,328 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
#define INTEL_SIP_SMC_FUNCID_RSU_MAX_RETRY 18
#define INTEL_SIP_SMC_RSU_MAX_RETRY \
INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_MAX_RETRY)
+
+/**
+ * Request INTEL_SIP_SMC_RSU_DCMF_STATUS
+ *
+ * Sync call used by service driver at EL1 to query DCMF status from FW
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_RSU_DCMF_STATUS
+ * a1-7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ * a1 dcmf3 | dcmf2 | dcmf1 | dcmf0
+ *
+ * Or
+ *
+ * a0 INTEL_SIP_SMC_RSU_ERROR
+ */
+#define INTEL_SIP_SMC_FUNCID_RSU_DCMF_STATUS 20
+#define INTEL_SIP_SMC_RSU_DCMF_STATUS \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_DCMF_STATUS)
+
+/**
+ * Request INTEL_SIP_SMC_SERVICE_COMPLETED
+ * Sync call to check if the secure world have completed service request
+ * or not.
+ *
+ * Call register usage:
+ * a0: INTEL_SIP_SMC_SERVICE_COMPLETED
+ * a1: this register is optional. If used, it is the physical address for
+ * secure firmware to put output data
+ * a2: this register is optional. If used, it is the size of output data
+ * a3-a7: not used
+ *
+ * Return status:
+ * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_ERROR,
+ * INTEL_SIP_SMC_REJECTED or INTEL_SIP_SMC_STATUS_BUSY
+ * a1: mailbox error if a0 is INTEL_SIP_SMC_STATUS_ERROR
+ * a2: physical address containing the process info
+ * for FCS certificate -- the data contains the certificate status
+ * for FCS cryption -- the data contains the actual data size FW processes
+ * a3: output data size
+ */
+#define INTEL_SIP_SMC_FUNCID_SERVICE_COMPLETED 30
+#define INTEL_SIP_SMC_SERVICE_COMPLETED \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_SERVICE_COMPLETED)
+
+/**
+ * Request INTEL_SIP_SMC_FIRMWARE_VERSION
+ *
+ * Sync call used to query the version of running firmware
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_FIRMWARE_VERSION
+ * a1-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_STATUS_ERROR
+ * a1 running firmware version
+ */
+#define INTEL_SIP_SMC_FUNCID_FIRMWARE_VERSION 31
+#define INTEL_SIP_SMC_FIRMWARE_VERSION \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FIRMWARE_VERSION)
+
+/**
+ * SMC call protocol for Mailbox, starting FUNCID from 60
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_MBOX_SEND_CMD
+ * a1 mailbox command code
+ * a2 physical address that contain mailbox command data (not include header)
+ * a3 mailbox command data size in word
+ * a4 set to 0 for CASUAL, set to 1 for URGENT
+ * a5 physical address for secure firmware to put response data
+ * (not include header)
+ * a6 maximum size in word of physical address to store response data
+ * a7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_REJECTED or
+ * INTEL_SIP_SMC_STATUS_ERROR
+ * a1 mailbox error code
+ * a2 response data length in word
+ * a3 not used
+ */
+#define INTEL_SIP_SMC_FUNCID_MBOX_SEND_CMD 60
+ #define INTEL_SIP_SMC_MBOX_SEND_CMD \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_MBOX_SEND_CMD)
+
+/**
+ * Request INTEL_SIP_SMC_SVC_VERSION
+ *
+ * Sync call used to query the SIP SMC API Version
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_SVC_VERSION
+ * a1-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ * a1 Major
+ * a2 Minor
+ */
+#define INTEL_SIP_SMC_SVC_FUNCID_VERSION 512
+#define INTEL_SIP_SMC_SVC_VERSION \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_SVC_FUNCID_VERSION)
+
+/**
+ * SMC call protocol for FPGA Crypto Service (FCS)
+ * FUNCID starts from 90
+ */
+
+/**
+ * Request INTEL_SIP_SMC_FCS_RANDOM_NUMBER
+ *
+ * Sync call used to query the random number generated by the firmware
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_FCS_RANDOM_NUMBER
+ * a1 the physical address for firmware to write generated random data
+ * a2-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FCS_ERROR or
+ * INTEL_SIP_SMC_FCS_REJECTED
+ * a1 mailbox error
+ * a2 the physical address of generated random number
+ * a3 size
+ */
+#define INTEL_SIP_SMC_FUNCID_FCS_RANDOM_NUMBER 90
+#define INTEL_SIP_SMC_FCS_RANDOM_NUMBER \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_RANDOM_NUMBER)
+
+/**
+ * Request INTEL_SIP_SMC_FCS_CRYPTION
+ * Async call for data encryption and HMAC signature generation, or for
+ * data decryption and HMAC verification.
+ *
+ * Call INTEL_SIP_SMC_SERVICE_COMPLETED to get the output encrypted or
+ * decrypted data
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_FCS_CRYPTION
+ * a1 cryption mode (1 for encryption and 0 for decryption)
+ * a2 physical address which stores to be encrypted or decrypted data
+ * a3 input data size
+ * a4 physical address which will hold the encrypted or decrypted output data
+ * a5 output data size
+ * a6-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_ERROR or
+ * INTEL_SIP_SMC_STATUS_REJECTED
+ * a1-3 not used
+ */
+#define INTEL_SIP_SMC_FUNCID_FCS_CRYPTION 91
+#define INTEL_SIP_SMC_FCS_CRYPTION \
+ INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_CRYPTION)
+
+/**
+ * Request INTEL_SIP_SMC_FCS_SERVICE_REQUEST
+ * Async call for authentication service of HPS software
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_FCS_SERVICE_REQUEST
+ * a1 the physical address of data block
+ * a2 size of data block
+ * a3-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_ERROR or
+ * INTEL_SIP_SMC_REJECTED
+ * a1-a3 not used
+ */
+#define INTEL_SIP_SMC_FUNCID_FCS_SERVICE_REQUEST 92
+#define INTEL_SIP_SMC_FCS_SERVICE_REQUEST \
+ INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_SERVICE_REQUEST)
+
+/**
+ * Request INTEL_SIP_SMC_FUNCID_FCS_SEND_CERTIFICATE
+ * Sync call to send a signed certificate
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_FCS_SEND_CERTIFICATE
+ * a1 the physical address of CERTIFICATE block
+ * a2 size of data block
+ * a3-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_FCS_REJECTED
+ * a1-a3 not used
+ */
+#define INTEL_SIP_SMC_FUNCID_FCS_SEND_CERTIFICATE 93
+#define INTEL_SIP_SMC_FCS_SEND_CERTIFICATE \
+ INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_SEND_CERTIFICATE)
+
+/**
+ * Request INTEL_SIP_SMC_FCS_GET_PROVISION_DATA
+ * Sync call to dump all the fuses and key hashes
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_FCS_GET_PROVISION_DATA
+ * a1 the physical address for firmware to write structure of fuse and
+ * key hashes
+ * a2-a7 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FCS_ERROR or
+ * INTEL_SIP_SMC_FCS_REJECTED
+ * a1 mailbox error
+ * a2 physical address for the structure of fuse and key hashes
+ * a3 the size of structure
+ *
+ */
+#define INTEL_SIP_SMC_FUNCID_FCS_GET_PROVISION_DATA 94
+#define INTEL_SIP_SMC_FCS_GET_PROVISION_DATA \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_GET_PROVISION_DATA)
+
+/**
+ * Request INTEL_SIP_SMC_HWMON_READTEMP
+ * Sync call to request temperature
+ *
+ * Call register usage:
+ * a0 Temperature Channel
+ * a1-a7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ * a1 Temperature Value
+ * a2-a3 not used
+ */
+#define INTEL_SIP_SMC_FUNCID_HWMON_READTEMP 32
+#define INTEL_SIP_SMC_HWMON_READTEMP \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_HWMON_READTEMP)
+
+/**
+ * Request INTEL_SIP_SMC_HWMON_READVOLT
+ * Sync call to request voltage
+ *
+ * Call register usage:
+ * a0 Voltage Channel
+ * a1-a7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ * a1 Voltage Value
+ * a2-a3 not used
+ */
+#define INTEL_SIP_SMC_FUNCID_HWMON_READVOLT 33
+#define INTEL_SIP_SMC_HWMON_READVOLT \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_HWMON_READVOLT)
+
+/**
+ * Request INTEL_SIP_SMC_ASYNC_POLL
+ * Async call used by service driver at EL1 to query mailbox response from SDM.
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_ASYNC_POLL
+ * a1 transaction job id
+ * a2-17 will be used to return the response data
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ * a1-17 will contain the response values from mailbox for the previous send
+ * transaction
+ * Or
+ * a0 INTEL_SIP_SMC_STATUS_NO_RESPONSE
+ * a1-17 not used
+ */
+#define INTEL_SIP_SMC_ASYNC_FUNC_ID_POLL (0xC8)
+#define INTEL_SIP_SMC_ASYNC_POLL \
+ INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_POLL)
+
+/**
+ * Request INTEL_SIP_SMC_ASYNC_RSU_GET_SPT
+ * Async call to get RSU SPT from SDM.
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_ASYNC_RSU_GET_SPT
+ * a1 transaction job id
+ * a2-a17 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED
+ * or INTEL_SIP_SMC_STATUS_BUSY
+ * a1-a17 not used
+ */
+#define INTEL_SIP_SMC_ASYNC_FUNC_ID_RSU_GET_SPT (0xEA)
+#define INTEL_SIP_SMC_ASYNC_RSU_GET_SPT \
+ INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_RSU_GET_SPT)
+
+/**
+ * Request INTEL_SIP_SMC_ASYNC_RSU_GET_ERROR_STATUS
+ * Async call to get RSU error status from SDM.
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_ASYNC_RSU_GET_ERROR_STATUS
+ * a1 transaction job id
+ * a2-a17 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED
+ * or INTEL_SIP_SMC_STATUS_BUSY
+ * a1-a17 not used
+ */
+#define INTEL_SIP_SMC_ASYNC_FUNC_ID_RSU_GET_ERROR_STATUS (0xEB)
+#define INTEL_SIP_SMC_ASYNC_RSU_GET_ERROR_STATUS \
+ INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_RSU_GET_ERROR_STATUS)
+
+/**
+ * Request INTEL_SIP_SMC_ASYNC_RSU_NOTIFY
+ * Async call to send NOTIFY value to SDM.
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_ASYNC_RSU_NOTIFY
+ * a1 transaction job id
+ * a2 notify value
+ * a3-a17 not used
+ *
+ * Return status:
+ * a0 INTEL_SIP_SMC_STATUS_OK ,INTEL_SIP_SMC_STATUS_REJECTED
+ * or INTEL_SIP_SMC_STATUS_BUSY
+ * a1-a17 not used
+ */
+#define INTEL_SIP_SMC_ASYNC_FUNC_ID_RSU_NOTIFY (0xEC)
+#define INTEL_SIP_SMC_ASYNC_RSU_NOTIFY \
+ INTEL_SIP_SMC_ASYNC_VAL(INTEL_SIP_SMC_ASYNC_FUNC_ID_RSU_NOTIFY)
+#endif
diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h
index 19781b0f6429..d290060f4c73 100644
--- a/include/linux/firmware/intel/stratix10-svc-client.h
+++ b/include/linux/firmware/intel/stratix10-svc-client.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2017-2018, Intel Corporation
+ * Copyright (C) 2025, Altera Corporation
*/
#ifndef __STRATIX10_SVC_CLIENT_H
@@ -11,9 +12,12 @@
*
* fpga: for FPGA configuration
* rsu: for remote status update
+ * hwmon: for hardware monitoring (voltage and temperature)
*/
#define SVC_CLIENT_FPGA "fpga"
#define SVC_CLIENT_RSU "rsu"
+#define SVC_CLIENT_FCS "fcs"
+#define SVC_CLIENT_HWMON "hwmon"
/*
* Status of the sent command, in bit number
@@ -49,6 +53,7 @@
#define SVC_STATUS_BUSY 4
#define SVC_STATUS_ERROR 5
#define SVC_STATUS_NO_SUPPORT 6
+#define SVC_STATUS_INVALID_PARAM 7
/*
* Flag bit for COMMAND_RECONFIG
@@ -66,6 +71,9 @@
#define SVC_RECONFIG_REQUEST_TIMEOUT_MS 300
#define SVC_RECONFIG_BUFFER_TIMEOUT_MS 720
#define SVC_RSU_REQUEST_TIMEOUT_MS 300
+#define SVC_FCS_REQUEST_TIMEOUT_MS 2000
+#define SVC_COMPLETED_TIMEOUT_MS 30000
+#define SVC_HWMON_REQUEST_TIMEOUT_MS 300
struct stratix10_svc_chan;
@@ -104,31 +112,98 @@ struct stratix10_svc_chan;
*
* @COMMAND_RSU_DCMF_VERSION: query firmware for the DCMF version, return status
* is SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
+ * @COMMAND_POLL_SERVICE_STATUS: poll if the service request is complete,
+ * return statis is SVC_STATUS_OK, SVC_STATUS_ERROR or SVC_STATUS_BUSY
+ *
+ * @COMMAND_FIRMWARE_VERSION: query running firmware version, return status
+ * is SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
+ * @COMMAND_SMC_SVC_VERSION: Non-mailbox SMC SVC API Version,
+ * return status is SVC_STATUS_OK
+ *
+ * @COMMAND_MBOX_SEND_CMD: send generic mailbox command, return status is
+ * SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
+ * @COMMAND_RSU_DCMF_STATUS: query firmware for the DCMF status
+ * return status is SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
+ * @COMMAND_RSU_GET_SPT_TABLE: query firmware for SPT table
+ * return status is SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
+ * @COMMAND_FCS_REQUEST_SERVICE: request validation of image from firmware,
+ * return status is SVC_STATUS_OK, SVC_STATUS_INVALID_PARAM
+ *
+ * @COMMAND_FCS_SEND_CERTIFICATE: send a certificate, return status is
+ * SVC_STATUS_OK, SVC_STATUS_INVALID_PARAM, SVC_STATUS_ERROR
+ *
+ * @COMMAND_FCS_GET_PROVISION_DATA: read the provisioning data, return status is
+ * SVC_STATUS_OK, SVC_STATUS_INVALID_PARAM, SVC_STATUS_ERROR
+ *
+ * @COMMAND_FCS_DATA_ENCRYPTION: encrypt the data, return status is
+ * SVC_STATUS_OK, SVC_STATUS_INVALID_PARAM, SVC_STATUS_ERROR
+ *
+ * @COMMAND_FCS_DATA_DECRYPTION: decrypt the data, return status is
+ * SVC_STATUS_OK, SVC_STATUS_INVALID_PARAM, SVC_STATUS_ERROR
+ *
+ * @COMMAND_FCS_RANDOM_NUMBER_GEN: generate a random number, return status
+ * is SVC_STATUS_OK, SVC_STATUS_ERROR
+ *
+ * @COMMAND_HWMON_READTEMP: query the temperature from the hardware monitor,
+ * return status is SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
+ * @COMMAND_HWMON_READVOLT: query the voltage from the hardware monitor,
+ * return status is SVC_STATUS_OK or SVC_STATUS_ERROR
*/
enum stratix10_svc_command_code {
+ /* for FPGA */
COMMAND_NOOP = 0,
COMMAND_RECONFIG,
COMMAND_RECONFIG_DATA_SUBMIT,
COMMAND_RECONFIG_DATA_CLAIM,
COMMAND_RECONFIG_STATUS,
- COMMAND_RSU_STATUS,
+ /* for RSU */
+ COMMAND_RSU_STATUS = 10,
COMMAND_RSU_UPDATE,
COMMAND_RSU_NOTIFY,
COMMAND_RSU_RETRY,
COMMAND_RSU_MAX_RETRY,
COMMAND_RSU_DCMF_VERSION,
+ COMMAND_RSU_DCMF_STATUS,
+ COMMAND_FIRMWARE_VERSION,
+ COMMAND_RSU_GET_SPT_TABLE,
+ /* for FCS */
+ COMMAND_FCS_REQUEST_SERVICE = 20,
+ COMMAND_FCS_SEND_CERTIFICATE,
+ COMMAND_FCS_GET_PROVISION_DATA,
+ COMMAND_FCS_DATA_ENCRYPTION,
+ COMMAND_FCS_DATA_DECRYPTION,
+ COMMAND_FCS_RANDOM_NUMBER_GEN,
+ /* for general status poll */
+ COMMAND_POLL_SERVICE_STATUS = 40,
+ /* for generic mailbox send command */
+ COMMAND_MBOX_SEND_CMD = 100,
+ /* Non-mailbox SMC Call */
+ COMMAND_SMC_SVC_VERSION = 200,
+ /* for HWMON */
+ COMMAND_HWMON_READTEMP,
+ COMMAND_HWMON_READVOLT
};
/**
* struct stratix10_svc_client_msg - message sent by client to service
* @payload: starting address of data need be processed
- * @payload_length: data size in bytes
+ * @payload_length: to be processed data size in bytes
+ * @payload_output: starting address of processed data
+ * @payload_length_output: processed data size in bytes
* @command: service command
* @arg: args to be passed via registers and not physically mapped buffers
*/
struct stratix10_svc_client_msg {
void *payload;
size_t payload_length;
+ void *payload_output;
+ size_t payload_length_output;
enum stratix10_svc_command_code command;
u64 arg[3];
};
@@ -226,5 +301,92 @@ int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg);
* request process.
*/
void stratix10_svc_done(struct stratix10_svc_chan *chan);
+
+/**
+ * typedef async_callback_t - A type definition for an asynchronous callback function.
+ *
+ * This type defines a function pointer for an asynchronous callback.
+ * The callback function takes a single argument, which is a pointer to
+ * user-defined data.
+ *
+ * @cb_arg: Argument to be passed to the callback function.
+ */
+typedef void (*async_callback_t)(void *cb_arg);
+
+/**
+ * stratix10_svc_add_async_client - Add an asynchronous client to a Stratix 10
+ * service channel.
+ * @chan: Pointer to the Stratix 10 service channel structure.
+ * @use_unique_clientid: Boolean flag indicating whether to use a unique client ID.
+ *
+ * This function registers an asynchronous client with the specified Stratix 10
+ * service channel. If the use_unique_clientid flag is set to true, a unique client
+ * ID will be assigned to the client.
+ *
+ * Return: 0 on success, or a negative error code on failure:
+ * -EINVAL if the channel is NULL or the async controller is not initialized.
+ * -EALREADY if the async channel is already allocated.
+ * -ENOMEM if memory allocation fails.
+ * Other negative values if ID allocation fails
+ */
+int stratix10_svc_add_async_client(struct stratix10_svc_chan *chan, bool use_unique_clientid);
+
+/**
+ * stratix10_svc_remove_async_client - Remove an asynchronous client from the Stratix 10
+ * service channel.
+ * @chan: Pointer to the Stratix 10 service channel structure.
+ *
+ * This function removes an asynchronous client from the specified Stratix 10 service channel.
+ * It is typically used to clean up and release resources associated with the client.
+ *
+ * Return: 0 on success, -EINVAL if the channel or asynchronous channel is invalid.
+ */
+int stratix10_svc_remove_async_client(struct stratix10_svc_chan *chan);
+
+/**
+ * stratix10_svc_async_send - Send an asynchronous message to the SDM mailbox
+ * in EL3 secure firmware.
+ * @chan: Pointer to the service channel structure.
+ * @msg: Pointer to the message to be sent.
+ * @handler: Pointer to the handler object used by caller to track the transaction.
+ * @cb: Callback function to be called upon completion.
+ * @cb_arg: Argument to be passed to the callback function.
+ *
+ * This function sends a message asynchronously to the SDM mailbox in EL3 secure firmware.
+ * and registers a callback function to be invoked when the operation completes.
+ *
+ * Return: 0 on success,and negative error codes on failure.
+ */
+int stratix10_svc_async_send(struct stratix10_svc_chan *chan, void *msg, void **handler,
+ async_callback_t cb, void *cb_arg);
+
+/**
+ * stratix10_svc_async_poll - Polls the status of an asynchronous service request.
+ * @chan: Pointer to the service channel structure.
+ * @tx_handle: Handle to the transaction being polled.
+ * @data: Pointer to the callback data structure to be filled with the result.
+ *
+ * This function checks the status of an asynchronous service request
+ * and fills the provided callback data structure with the result.
+ *
+ * Return: 0 on success, -EINVAL if any input parameter is invalid or if the
+ * async controller is not initialized, -EAGAIN if the transaction is
+ * still in progress, or other negative error codes on failure.
+ */
+int stratix10_svc_async_poll(struct stratix10_svc_chan *chan, void *tx_handle,
+ struct stratix10_svc_cb_data *data);
+
+/**
+ * stratix10_svc_async_done - Complete an asynchronous transaction
+ * @chan: Pointer to the service channel structure
+ * @tx_handle: Pointer to the transaction handle
+ *
+ * This function completes an asynchronous transaction by removing the
+ * transaction from the hash table and deallocating the associated resources.
+ *
+ * Return: 0 on success, -EINVAL on invalid input or errors.
+ */
+int stratix10_svc_async_done(struct stratix10_svc_chan *chan, void *tx_handle);
+
#endif
diff --git a/include/linux/firmware/mediatek/mtk-adsp-ipc.h b/include/linux/firmware/mediatek/mtk-adsp-ipc.h
new file mode 100644
index 000000000000..6e86799a7dc4
--- /dev/null
+++ b/include/linux/firmware/mediatek/mtk-adsp-ipc.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ */
+
+#ifndef MTK_ADSP_IPC_H
+#define MTK_ADSP_IPC_H
+
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox_client.h>
+
+#define MTK_ADSP_IPC_REQ 0
+#define MTK_ADSP_IPC_RSP 1
+#define MTK_ADSP_IPC_OP_REQ 0x1
+#define MTK_ADSP_IPC_OP_RSP 0x2
+
+enum {
+ MTK_ADSP_MBOX_REPLY,
+ MTK_ADSP_MBOX_REQUEST,
+ MTK_ADSP_MBOX_NUM,
+};
+
+struct mtk_adsp_ipc;
+
+struct mtk_adsp_ipc_ops {
+ void (*handle_reply)(struct mtk_adsp_ipc *ipc);
+ void (*handle_request)(struct mtk_adsp_ipc *ipc);
+};
+
+struct mtk_adsp_chan {
+ struct mtk_adsp_ipc *ipc;
+ struct mbox_client cl;
+ struct mbox_chan *ch;
+ char *name;
+ int idx;
+};
+
+struct mtk_adsp_ipc {
+ struct mtk_adsp_chan chans[MTK_ADSP_MBOX_NUM];
+ struct device *dev;
+ const struct mtk_adsp_ipc_ops *ops;
+ void *private_data;
+};
+
+static inline void mtk_adsp_ipc_set_data(struct mtk_adsp_ipc *ipc, void *data)
+{
+ ipc->private_data = data;
+}
+
+static inline void *mtk_adsp_ipc_get_data(struct mtk_adsp_ipc *ipc)
+{
+ return ipc->private_data;
+}
+
+int mtk_adsp_ipc_send(struct mtk_adsp_ipc *ipc, unsigned int idx, uint32_t op);
+
+#endif /* MTK_ADSP_IPC_H */
diff --git a/include/linux/firmware/meson/meson_sm.h b/include/linux/firmware/meson/meson_sm.h
index 95b0da2326a9..8eaf8922ab02 100644
--- a/include/linux/firmware/meson/meson_sm.h
+++ b/include/linux/firmware/meson/meson_sm.h
@@ -19,7 +19,7 @@ enum {
struct meson_sm_firmware;
int meson_sm_call(struct meson_sm_firmware *fw, unsigned int cmd_index,
- u32 *ret, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4);
+ s32 *ret, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4);
int meson_sm_call_write(struct meson_sm_firmware *fw, void *buffer,
unsigned int b_size, unsigned int cmd_index, u32 arg0,
u32 arg1, u32 arg2, u32 arg3, u32 arg4);
diff --git a/include/linux/firmware/qcom/qcom_qseecom.h b/include/linux/firmware/qcom/qcom_qseecom.h
new file mode 100644
index 000000000000..3387897bf368
--- /dev/null
+++ b/include/linux/firmware/qcom/qcom_qseecom.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Driver for Qualcomm Secure Execution Environment (SEE) interface (QSEECOM).
+ * Responsible for setting up and managing QSEECOM client devices.
+ *
+ * Copyright (C) 2023 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef __QCOM_QSEECOM_H
+#define __QCOM_QSEECOM_H
+
+#include <linux/auxiliary_bus.h>
+#include <linux/dma-mapping.h>
+#include <linux/types.h>
+
+#include <linux/firmware/qcom/qcom_scm.h>
+
+/**
+ * struct qseecom_client - QSEECOM client device.
+ * @aux_dev: Underlying auxiliary device.
+ * @app_id: ID of the loaded application.
+ */
+struct qseecom_client {
+ struct auxiliary_device aux_dev;
+ u32 app_id;
+};
+
+/**
+ * qcom_qseecom_app_send() - Send to and receive data from a given QSEE app.
+ * @client: The QSEECOM client associated with the target app.
+ * @req: Request buffer sent to the app (must be TZ memory).
+ * @req_size: Size of the request buffer.
+ * @rsp: Response buffer, written to by the app (must be TZ memory).
+ * @rsp_size: Size of the response buffer.
+ *
+ * Sends a request to the QSEE app associated with the given client and read
+ * back its response. The caller must provide two DMA memory regions, one for
+ * the request and one for the response, and fill out the @req region with the
+ * respective (app-specific) request data. The QSEE app reads this and returns
+ * its response in the @rsp region.
+ *
+ * Note: This is a convenience wrapper around qcom_scm_qseecom_app_send().
+ * Clients should prefer to use this wrapper.
+ *
+ * Return: Zero on success, nonzero on failure.
+ */
+static inline int qcom_qseecom_app_send(struct qseecom_client *client,
+ void *req, size_t req_size,
+ void *rsp, size_t rsp_size)
+{
+ return qcom_scm_qseecom_app_send(client->app_id, req, req_size, rsp, rsp_size);
+}
+
+#endif /* __QCOM_QSEECOM_H */
diff --git a/include/linux/firmware/qcom/qcom_scm.h b/include/linux/firmware/qcom/qcom_scm.h
new file mode 100644
index 000000000000..a55ca771286b
--- /dev/null
+++ b/include/linux/firmware/qcom/qcom_scm.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2010-2015, 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015 Linaro Ltd.
+ */
+#ifndef __QCOM_SCM_H
+#define __QCOM_SCM_H
+
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/cpumask.h>
+
+#include <dt-bindings/firmware/qcom,scm.h>
+
+#define QCOM_SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
+#define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0
+#define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1
+#define QCOM_SCM_HDCP_MAX_REQ_CNT 5
+
+struct qcom_scm_hdcp_req {
+ u32 addr;
+ u32 val;
+};
+
+struct qcom_scm_vmperm {
+ int vmid;
+ int perm;
+};
+
+enum qcom_scm_ocmem_client {
+ QCOM_SCM_OCMEM_UNUSED_ID = 0x0,
+ QCOM_SCM_OCMEM_GRAPHICS_ID,
+ QCOM_SCM_OCMEM_VIDEO_ID,
+ QCOM_SCM_OCMEM_LP_AUDIO_ID,
+ QCOM_SCM_OCMEM_SENSORS_ID,
+ QCOM_SCM_OCMEM_OTHER_OS_ID,
+ QCOM_SCM_OCMEM_DEBUG_ID,
+};
+
+enum qcom_scm_sec_dev_id {
+ QCOM_SCM_MDSS_DEV_ID = 1,
+ QCOM_SCM_OCMEM_DEV_ID = 5,
+ QCOM_SCM_PCIE0_DEV_ID = 11,
+ QCOM_SCM_PCIE1_DEV_ID = 12,
+ QCOM_SCM_GFX_DEV_ID = 18,
+ QCOM_SCM_UFS_DEV_ID = 19,
+ QCOM_SCM_ICE_DEV_ID = 20,
+};
+
+enum qcom_scm_ice_cipher {
+ QCOM_SCM_ICE_CIPHER_AES_128_XTS = 0,
+ QCOM_SCM_ICE_CIPHER_AES_128_CBC = 1,
+ QCOM_SCM_ICE_CIPHER_AES_256_XTS = 3,
+ QCOM_SCM_ICE_CIPHER_AES_256_CBC = 4,
+};
+
+#define QCOM_SCM_PERM_READ 0x4
+#define QCOM_SCM_PERM_WRITE 0x2
+#define QCOM_SCM_PERM_EXEC 0x1
+#define QCOM_SCM_PERM_RW (QCOM_SCM_PERM_READ | QCOM_SCM_PERM_WRITE)
+#define QCOM_SCM_PERM_RWX (QCOM_SCM_PERM_RW | QCOM_SCM_PERM_EXEC)
+
+bool qcom_scm_is_available(void);
+
+int qcom_scm_set_cold_boot_addr(void *entry);
+int qcom_scm_set_warm_boot_addr(void *entry);
+void qcom_scm_cpu_power_down(u32 flags);
+int qcom_scm_set_remote_state(u32 state, u32 id);
+
+struct qcom_scm_pas_metadata {
+ void *ptr;
+ dma_addr_t phys;
+ ssize_t size;
+};
+
+int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
+ struct qcom_scm_pas_metadata *ctx);
+void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx);
+int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size);
+int qcom_scm_pas_auth_and_reset(u32 peripheral);
+int qcom_scm_pas_shutdown(u32 peripheral);
+bool qcom_scm_pas_supported(u32 peripheral);
+
+int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val);
+int qcom_scm_io_writel(phys_addr_t addr, unsigned int val);
+
+bool qcom_scm_restore_sec_cfg_available(void);
+int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare);
+int qcom_scm_set_gpu_smmu_aperture(unsigned int context_bank);
+bool qcom_scm_set_gpu_smmu_aperture_is_available(void);
+int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size);
+int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
+int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size);
+int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
+ u32 cp_nonpixel_start, u32 cp_nonpixel_size);
+int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, u64 *src,
+ const struct qcom_scm_vmperm *newvm,
+ unsigned int dest_cnt);
+
+bool qcom_scm_ocmem_lock_available(void);
+int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
+ u32 mode);
+int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size);
+
+bool qcom_scm_ice_available(void);
+int qcom_scm_ice_invalidate_key(u32 index);
+int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
+ enum qcom_scm_ice_cipher cipher, u32 data_unit_size);
+bool qcom_scm_has_wrapped_key_support(void);
+int qcom_scm_derive_sw_secret(const u8 *eph_key, size_t eph_key_size,
+ u8 *sw_secret, size_t sw_secret_size);
+int qcom_scm_generate_ice_key(u8 *lt_key, size_t lt_key_size);
+int qcom_scm_prepare_ice_key(const u8 *lt_key, size_t lt_key_size,
+ u8 *eph_key, size_t eph_key_size);
+int qcom_scm_import_ice_key(const u8 *raw_key, size_t raw_key_size,
+ u8 *lt_key, size_t lt_key_size);
+
+bool qcom_scm_hdcp_available(void);
+int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp);
+
+int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt);
+int qcom_scm_qsmmu500_wait_safe_toggle(bool en);
+
+int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
+ u64 limit_node, u32 node_id, u64 version);
+int qcom_scm_lmh_profile_change(u32 profile_id);
+bool qcom_scm_lmh_dcvsh_available(void);
+
+/*
+ * Request TZ to program set of access controlled registers necessary
+ * irrespective of any features
+ */
+#define QCOM_SCM_GPU_ALWAYS_EN_REQ BIT(0)
+/*
+ * Request TZ to program BCL id to access controlled register when BCL is
+ * enabled
+ */
+#define QCOM_SCM_GPU_BCL_EN_REQ BIT(1)
+/*
+ * Request TZ to program set of access controlled register for CLX feature
+ * when enabled
+ */
+#define QCOM_SCM_GPU_CLX_EN_REQ BIT(2)
+/*
+ * Request TZ to program tsense ids to access controlled registers for reading
+ * gpu temperature sensors
+ */
+#define QCOM_SCM_GPU_TSENSE_EN_REQ BIT(3)
+
+int qcom_scm_gpu_init_regs(u32 gpu_req);
+
+int qcom_scm_shm_bridge_create(u64 pfn_and_ns_perm_flags,
+ u64 ipfn_and_s_perm_flags, u64 size_and_flags,
+ u64 ns_vmids, u64 *handle);
+int qcom_scm_shm_bridge_delete(u64 handle);
+
+#ifdef CONFIG_QCOM_QSEECOM
+
+int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id);
+int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size,
+ void *rsp, size_t rsp_size);
+
+#else /* CONFIG_QCOM_QSEECOM */
+
+static inline int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id)
+{
+ return -EINVAL;
+}
+
+static inline int qcom_scm_qseecom_app_send(u32 app_id,
+ void *req, size_t req_size,
+ void *rsp, size_t rsp_size)
+{
+ return -EINVAL;
+}
+
+#endif /* CONFIG_QCOM_QSEECOM */
+
+int qcom_scm_qtee_invoke_smc(phys_addr_t inbuf, size_t inbuf_size,
+ phys_addr_t outbuf, size_t outbuf_size,
+ u64 *result, u64 *response_type);
+int qcom_scm_qtee_callback_response(phys_addr_t buf, size_t buf_size,
+ u64 *result, u64 *response_type);
+
+#endif
diff --git a/include/linux/firmware/qcom/qcom_tzmem.h b/include/linux/firmware/qcom/qcom_tzmem.h
new file mode 100644
index 000000000000..23173e0c3ddd
--- /dev/null
+++ b/include/linux/firmware/qcom/qcom_tzmem.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023-2024 Linaro Ltd.
+ */
+
+#ifndef __QCOM_TZMEM_H
+#define __QCOM_TZMEM_H
+
+#include <linux/cleanup.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+
+struct device;
+struct qcom_tzmem_pool;
+
+/**
+ * enum qcom_tzmem_policy - Policy for pool growth.
+ */
+enum qcom_tzmem_policy {
+ /**
+ * @QCOM_TZMEM_POLICY_STATIC: Static pool,
+ * never grow above initial size.
+ */
+ QCOM_TZMEM_POLICY_STATIC = 1,
+ /**
+ * @QCOM_TZMEM_POLICY_MULTIPLIER: When out of memory,
+ * add increment * current size of memory.
+ */
+ QCOM_TZMEM_POLICY_MULTIPLIER,
+ /**
+ * @QCOM_TZMEM_POLICY_ON_DEMAND: When out of memory
+ * add as much as is needed until max_size.
+ */
+ QCOM_TZMEM_POLICY_ON_DEMAND,
+};
+
+/**
+ * struct qcom_tzmem_pool_config - TZ memory pool configuration.
+ * @initial_size: Number of bytes to allocate for the pool during its creation.
+ * @policy: Pool size growth policy.
+ * @increment: Used with policies that allow pool growth.
+ * @max_size: Size above which the pool will never grow.
+ */
+struct qcom_tzmem_pool_config {
+ size_t initial_size;
+ enum qcom_tzmem_policy policy;
+ size_t increment;
+ size_t max_size;
+};
+
+struct qcom_tzmem_pool *
+qcom_tzmem_pool_new(const struct qcom_tzmem_pool_config *config);
+void qcom_tzmem_pool_free(struct qcom_tzmem_pool *pool);
+struct qcom_tzmem_pool *
+devm_qcom_tzmem_pool_new(struct device *dev,
+ const struct qcom_tzmem_pool_config *config);
+
+void *qcom_tzmem_alloc(struct qcom_tzmem_pool *pool, size_t size, gfp_t gfp);
+void qcom_tzmem_free(void *ptr);
+
+DEFINE_FREE(qcom_tzmem, void *, if (_T) qcom_tzmem_free(_T))
+
+phys_addr_t qcom_tzmem_to_phys(void *ptr);
+
+#if IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE)
+int qcom_tzmem_shm_bridge_create(phys_addr_t paddr, size_t size, u64 *handle);
+void qcom_tzmem_shm_bridge_delete(u64 handle);
+#else
+static inline int qcom_tzmem_shm_bridge_create(phys_addr_t paddr,
+ size_t size, u64 *handle)
+{
+ return 0;
+}
+
+static inline void qcom_tzmem_shm_bridge_delete(u64 handle)
+{
+}
+#endif
+
+#endif /* __QCOM_TZMEM */
diff --git a/include/linux/firmware/samsung/exynos-acpm-protocol.h b/include/linux/firmware/samsung/exynos-acpm-protocol.h
new file mode 100644
index 000000000000..2091da965a5a
--- /dev/null
+++ b/include/linux/firmware/samsung/exynos-acpm-protocol.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2020 Samsung Electronics Co., Ltd.
+ * Copyright 2020 Google LLC.
+ * Copyright 2024 Linaro Ltd.
+ */
+
+#ifndef __EXYNOS_ACPM_PROTOCOL_H
+#define __EXYNOS_ACPM_PROTOCOL_H
+
+#include <linux/types.h>
+
+struct acpm_handle;
+struct device_node;
+
+struct acpm_dvfs_ops {
+ int (*set_rate)(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, unsigned int clk_id,
+ unsigned long rate);
+ unsigned long (*get_rate)(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id,
+ unsigned int clk_id);
+};
+
+struct acpm_pmic_ops {
+ int (*read_reg)(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 *buf);
+ int (*bulk_read)(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 count, u8 *buf);
+ int (*write_reg)(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 value);
+ int (*bulk_write)(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 count, const u8 *buf);
+ int (*update_reg)(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+ u8 value, u8 mask);
+};
+
+struct acpm_ops {
+ struct acpm_dvfs_ops dvfs_ops;
+ struct acpm_pmic_ops pmic_ops;
+};
+
+/**
+ * struct acpm_handle - Reference to an initialized protocol instance
+ * @ops:
+ */
+struct acpm_handle {
+ struct acpm_ops ops;
+};
+
+struct device;
+
+#if IS_ENABLED(CONFIG_EXYNOS_ACPM_PROTOCOL)
+const struct acpm_handle *devm_acpm_get_by_node(struct device *dev,
+ struct device_node *np);
+#else
+
+static inline const struct acpm_handle *devm_acpm_get_by_node(struct device *dev,
+ struct device_node *np)
+{
+ return NULL;
+}
+#endif
+
+#endif /* __EXYNOS_ACPM_PROTOCOL_H */
diff --git a/include/linux/firmware/thead/thead,th1520-aon.h b/include/linux/firmware/thead/thead,th1520-aon.h
new file mode 100644
index 000000000000..dae132b66873
--- /dev/null
+++ b/include/linux/firmware/thead/thead,th1520-aon.h
@@ -0,0 +1,200 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Alibaba Group Holding Limited.
+ */
+
+#ifndef _THEAD_AON_H
+#define _THEAD_AON_H
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+#define AON_RPC_MSG_MAGIC (0xef)
+#define TH1520_AON_RPC_VERSION 2
+#define TH1520_AON_RPC_MSG_NUM 7
+
+struct th1520_aon_chan;
+
+enum th1520_aon_rpc_svc {
+ TH1520_AON_RPC_SVC_UNKNOWN = 0,
+ TH1520_AON_RPC_SVC_PM = 1,
+ TH1520_AON_RPC_SVC_MISC = 2,
+ TH1520_AON_RPC_SVC_AVFS = 3,
+ TH1520_AON_RPC_SVC_SYS = 4,
+ TH1520_AON_RPC_SVC_WDG = 5,
+ TH1520_AON_RPC_SVC_LPM = 6,
+ TH1520_AON_RPC_SVC_MAX = 0x3F,
+};
+
+enum th1520_aon_misc_func {
+ TH1520_AON_MISC_FUNC_UNKNOWN = 0,
+ TH1520_AON_MISC_FUNC_SET_CONTROL = 1,
+ TH1520_AON_MISC_FUNC_GET_CONTROL = 2,
+ TH1520_AON_MISC_FUNC_REGDUMP_CFG = 3,
+};
+
+enum th1520_aon_wdg_func {
+ TH1520_AON_WDG_FUNC_UNKNOWN = 0,
+ TH1520_AON_WDG_FUNC_START = 1,
+ TH1520_AON_WDG_FUNC_STOP = 2,
+ TH1520_AON_WDG_FUNC_PING = 3,
+ TH1520_AON_WDG_FUNC_TIMEOUTSET = 4,
+ TH1520_AON_WDG_FUNC_RESTART = 5,
+ TH1520_AON_WDG_FUNC_GET_STATE = 6,
+ TH1520_AON_WDG_FUNC_POWER_OFF = 7,
+ TH1520_AON_WDG_FUNC_AON_WDT_ON = 8,
+ TH1520_AON_WDG_FUNC_AON_WDT_OFF = 9,
+};
+
+enum th1520_aon_sys_func {
+ TH1520_AON_SYS_FUNC_UNKNOWN = 0,
+ TH1520_AON_SYS_FUNC_AON_RESERVE_MEM = 1,
+};
+
+enum th1520_aon_lpm_func {
+ TH1520_AON_LPM_FUNC_UNKNOWN = 0,
+ TH1520_AON_LPM_FUNC_REQUIRE_STR = 1,
+ TH1520_AON_LPM_FUNC_RESUME_STR = 2,
+ TH1520_AON_LPM_FUNC_REQUIRE_STD = 3,
+ TH1520_AON_LPM_FUNC_CPUHP = 4,
+ TH1520_AON_LPM_FUNC_REGDUMP_CFG = 5,
+};
+
+enum th1520_aon_pm_func {
+ TH1520_AON_PM_FUNC_UNKNOWN = 0,
+ TH1520_AON_PM_FUNC_SET_RESOURCE_REGULATOR = 1,
+ TH1520_AON_PM_FUNC_GET_RESOURCE_REGULATOR = 2,
+ TH1520_AON_PM_FUNC_SET_RESOURCE_POWER_MODE = 3,
+ TH1520_AON_PM_FUNC_PWR_SET = 4,
+ TH1520_AON_PM_FUNC_PWR_GET = 5,
+ TH1520_AON_PM_FUNC_CHECK_FAULT = 6,
+ TH1520_AON_PM_FUNC_GET_TEMPERATURE = 7,
+};
+
+struct th1520_aon_rpc_msg_hdr {
+ u8 ver; /* version of msg hdr */
+ u8 size; /* msg size ,uinit in bytes,the size includes rpc msg header self */
+ u8 svc; /* rpc main service id */
+ u8 func; /* rpc sub func id of specific service, sent by caller */
+} __packed __aligned(1);
+
+struct th1520_aon_rpc_ack_common {
+ struct th1520_aon_rpc_msg_hdr hdr;
+ u8 err_code;
+} __packed __aligned(1);
+
+#define RPC_SVC_MSG_TYPE_DATA 0
+#define RPC_SVC_MSG_TYPE_ACK 1
+#define RPC_SVC_MSG_NEED_ACK 0
+#define RPC_SVC_MSG_NO_NEED_ACK 1
+
+#define RPC_GET_VER(MESG) ((MESG)->ver)
+#define RPC_SET_VER(MESG, VER) ((MESG)->ver = (VER))
+#define RPC_GET_SVC_ID(MESG) ((MESG)->svc & 0x3F)
+#define RPC_SET_SVC_ID(MESG, ID) ((MESG)->svc |= 0x3F & (ID))
+#define RPC_GET_SVC_FLAG_MSG_TYPE(MESG) (((MESG)->svc & 0x80) >> 7)
+#define RPC_SET_SVC_FLAG_MSG_TYPE(MESG, TYPE) ((MESG)->svc |= (TYPE) << 7)
+#define RPC_GET_SVC_FLAG_ACK_TYPE(MESG) (((MESG)->svc & 0x40) >> 6)
+#define RPC_SET_SVC_FLAG_ACK_TYPE(MESG, ACK) ((MESG)->svc |= (ACK) << 6)
+
+#define RPC_SET_BE64(MESG, OFFSET, SET_DATA) \
+ do { \
+ u8 *data = (u8 *)(MESG); \
+ u64 _offset = (OFFSET); \
+ u64 _set_data = (SET_DATA); \
+ data[_offset + 7] = _set_data & 0xFF; \
+ data[_offset + 6] = (_set_data & 0xFF00) >> 8; \
+ data[_offset + 5] = (_set_data & 0xFF0000) >> 16; \
+ data[_offset + 4] = (_set_data & 0xFF000000) >> 24; \
+ data[_offset + 3] = (_set_data & 0xFF00000000) >> 32; \
+ data[_offset + 2] = (_set_data & 0xFF0000000000) >> 40; \
+ data[_offset + 1] = (_set_data & 0xFF000000000000) >> 48; \
+ data[_offset + 0] = (_set_data & 0xFF00000000000000) >> 56; \
+ } while (0)
+
+#define RPC_SET_BE32(MESG, OFFSET, SET_DATA) \
+ do { \
+ u8 *data = (u8 *)(MESG); \
+ u64 _offset = (OFFSET); \
+ u64 _set_data = (SET_DATA); \
+ data[_offset + 3] = (_set_data) & 0xFF; \
+ data[_offset + 2] = (_set_data & 0xFF00) >> 8; \
+ data[_offset + 1] = (_set_data & 0xFF0000) >> 16; \
+ data[_offset + 0] = (_set_data & 0xFF000000) >> 24; \
+ } while (0)
+
+#define RPC_SET_BE16(MESG, OFFSET, SET_DATA) \
+ do { \
+ u8 *data = (u8 *)(MESG); \
+ u64 _offset = (OFFSET); \
+ u64 _set_data = (SET_DATA); \
+ data[_offset + 1] = (_set_data) & 0xFF; \
+ data[_offset + 0] = (_set_data & 0xFF00) >> 8; \
+ } while (0)
+
+#define RPC_SET_U8(MESG, OFFSET, SET_DATA) \
+ do { \
+ u8 *data = (u8 *)(MESG); \
+ data[OFFSET] = (SET_DATA) & 0xFF; \
+ } while (0)
+
+#define RPC_GET_BE64(MESG, OFFSET, PTR) \
+ do { \
+ u8 *data = (u8 *)(MESG); \
+ u64 _offset = (OFFSET); \
+ *(u32 *)(PTR) = \
+ (data[_offset + 7] | data[_offset + 6] << 8 | \
+ data[_offset + 5] << 16 | data[_offset + 4] << 24 | \
+ data[_offset + 3] << 32 | data[_offset + 2] << 40 | \
+ data[_offset + 1] << 48 | data[_offset + 0] << 56); \
+ } while (0)
+
+#define RPC_GET_BE32(MESG, OFFSET, PTR) \
+ do { \
+ u8 *data = (u8 *)(MESG); \
+ u64 _offset = (OFFSET); \
+ *(u32 *)(PTR) = \
+ (data[_offset + 3] | data[_offset + 2] << 8 | \
+ data[_offset + 1] << 16 | data[_offset + 0] << 24); \
+ } while (0)
+
+#define RPC_GET_BE16(MESG, OFFSET, PTR) \
+ do { \
+ u8 *data = (u8 *)(MESG); \
+ u64 _offset = (OFFSET); \
+ *(u16 *)(PTR) = (data[_offset + 1] | data[_offset + 0] << 8); \
+ } while (0)
+
+#define RPC_GET_U8(MESG, OFFSET, PTR) \
+ do { \
+ u8 *data = (u8 *)(MESG); \
+ *(u8 *)(PTR) = (data[OFFSET]); \
+ } while (0)
+
+/*
+ * Defines for SC PM Power Mode
+ */
+#define TH1520_AON_PM_PW_MODE_OFF 0 /* Power off */
+#define TH1520_AON_PM_PW_MODE_STBY 1 /* Power in standby */
+#define TH1520_AON_PM_PW_MODE_LP 2 /* Power in low-power */
+#define TH1520_AON_PM_PW_MODE_ON 3 /* Power on */
+
+/*
+ * Defines for AON power islands
+ */
+#define TH1520_AON_AUDIO_PD 0
+#define TH1520_AON_VDEC_PD 1
+#define TH1520_AON_NPU_PD 2
+#define TH1520_AON_VENC_PD 3
+#define TH1520_AON_GPU_PD 4
+#define TH1520_AON_DSP0_PD 5
+#define TH1520_AON_DSP1_PD 6
+
+struct th1520_aon_chan *th1520_aon_init(struct device *dev);
+void th1520_aon_deinit(struct th1520_aon_chan *aon_chan);
+
+int th1520_aon_call_rpc(struct th1520_aon_chan *aon_chan, void *msg);
+int th1520_aon_power_update(struct th1520_aon_chan *aon_chan, u16 rsrc,
+ bool power_on);
+
+#endif /* _THEAD_AON_H */
diff --git a/include/linux/firmware/trusted_foundations.h b/include/linux/firmware/trusted_foundations.h
index be5984bda592..931b6c5c72df 100644
--- a/include/linux/firmware/trusted_foundations.h
+++ b/include/linux/firmware/trusted_foundations.h
@@ -71,12 +71,16 @@ static inline void register_trusted_foundations(
static inline void of_register_trusted_foundations(void)
{
+ struct device_node *np = of_find_compatible_node(NULL, NULL, "tlm,trusted-foundations");
+
+ if (!np)
+ return;
+ of_node_put(np);
/*
* If we find the target should enable TF but does not support it,
* fail as the system won't be able to do much anyway
*/
- if (of_find_compatible_node(NULL, NULL, "tlm,trusted-foundations"))
- register_trusted_foundations(NULL);
+ register_trusted_foundations(NULL);
}
static inline bool trusted_foundations_registered(void)
diff --git a/include/linux/firmware/xlnx-event-manager.h b/include/linux/firmware/xlnx-event-manager.h
new file mode 100644
index 000000000000..645dd34155e6
--- /dev/null
+++ b/include/linux/firmware/xlnx-event-manager.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx Event Management Driver
+ *
+ * Copyright (C) 2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _FIRMWARE_XLNX_EVENT_MANAGER_H_
+#define _FIRMWARE_XLNX_EVENT_MANAGER_H_
+
+#include <linux/firmware/xlnx-zynqmp.h>
+
+#define CB_MAX_PAYLOAD_SIZE (4U) /*In payload maximum 32bytes */
+
+#define EVENT_SUBSYSTEM_RESTART (4U)
+
+#define PM_DEV_ACPU_0_0 (0x1810c0afU)
+#define PM_DEV_ACPU_0 (0x1810c003U)
+
+/************************** Exported Function *****************************/
+
+typedef void (*event_cb_func_t)(const u32 *payload, void *data);
+
+#if IS_REACHABLE(CONFIG_XLNX_EVENT_MANAGER)
+int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id,
+ const u32 event, const bool wake,
+ event_cb_func_t cb_fun, void *data);
+
+int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id,
+ const u32 event, event_cb_func_t cb_fun, void *data);
+#else
+static inline int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id,
+ const u32 event, const bool wake,
+ event_cb_func_t cb_fun, void *data)
+{
+ return -ENODEV;
+}
+
+static inline int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id,
+ const u32 event, event_cb_func_t cb_fun, void *data)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* _FIRMWARE_XLNX_EVENT_MANAGER_H_ */
diff --git a/include/linux/firmware/xlnx-zynqmp-ufs.h b/include/linux/firmware/xlnx-zynqmp-ufs.h
new file mode 100644
index 000000000000..d3538dd5822a
--- /dev/null
+++ b/include/linux/firmware/xlnx-zynqmp-ufs.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Firmware layer for UFS APIs.
+ *
+ * Copyright (c) 2025 Advanced Micro Devices, Inc.
+ */
+
+#ifndef __FIRMWARE_XLNX_ZYNQMP_UFS_H__
+#define __FIRMWARE_XLNX_ZYNQMP_UFS_H__
+
+#if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE)
+int zynqmp_pm_is_mphy_tx_rx_config_ready(bool *is_ready);
+int zynqmp_pm_is_sram_init_done(bool *is_done);
+int zynqmp_pm_set_sram_bypass(void);
+int zynqmp_pm_get_ufs_calibration_values(u32 *val);
+#else
+static inline int zynqmp_pm_is_mphy_tx_rx_config_ready(bool *is_ready)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_is_sram_init_done(bool *is_done)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_sram_bypass(void)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_get_ufs_calibration_values(u32 *val)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* __FIRMWARE_XLNX_ZYNQMP_UFS_H__ */
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index 9d1a5c175065..15fdbd089bbf 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -2,9 +2,10 @@
/*
* Xilinx Zynq MPSoC Firmware layer
*
- * Copyright (C) 2014-2019 Xilinx
+ * Copyright (C) 2014-2021 Xilinx
+ * Copyright (C) 2022 - 2025 Advanced Micro Devices, Inc.
*
- * Michal Simek <michal.simek@xilinx.com>
+ * Michal Simek <michal.simek@amd.com>
* Davorin Mista <davorin.mista@aggios.com>
* Jolly Shah <jollys@xilinx.com>
* Rajan Vaja <rajanv@xilinx.com>
@@ -12,8 +13,10 @@
#ifndef __FIRMWARE_ZYNQMP_H__
#define __FIRMWARE_ZYNQMP_H__
+#include <linux/types.h>
#include <linux/err.h>
+#include <linux/firmware/xlnx-zynqmp-ufs.h>
#define ZYNQMP_PM_VERSION_MAJOR 1
#define ZYNQMP_PM_VERSION_MINOR 0
@@ -29,12 +32,52 @@
/* SMC SIP service Call Function Identifier Prefix */
#define PM_SIP_SVC 0xC2000000
+
+/* SMC function ID to get SiP SVC version */
+#define GET_SIP_SVC_VERSION (0x8200ff03U)
+
+/* SiP Service Calls version numbers */
+#define SIP_SVC_VERSION_MAJOR (0U)
+#define SIP_SVC_VERSION_MINOR (2U)
+
+#define SIP_SVC_PASSTHROUGH_VERSION ((SIP_SVC_VERSION_MAJOR << 16) | \
+ SIP_SVC_VERSION_MINOR)
+
+/* Fixed ID for FW specific APIs */
+#define PASS_THROUGH_FW_CMD_ID GENMASK(11, 0)
+
+/* PM API versions */
+#define PM_API_VERSION_1 1
+#define PM_API_VERSION_2 2
+
+#define PM_PINCTRL_PARAM_SET_VERSION 2
+
+/* Family codes */
+#define PM_ZYNQMP_FAMILY_CODE 0x1 /* ZynqMP family code */
+#define PM_VERSAL_FAMILY_CODE 0x2 /* Versal family code */
+#define PM_VERSAL_NET_FAMILY_CODE 0x3 /* Versal NET family code */
+
+#define API_ID_MASK GENMASK(7, 0)
+#define MODULE_ID_MASK GENMASK(11, 8)
+#define PLM_MODULE_ID_MASK GENMASK(15, 8)
+
+/* Firmware feature check version mask */
+#define FIRMWARE_VERSION_MASK 0xFFFFU
+
+/* ATF only commands */
+#define TF_A_PM_REGISTER_SGI 0xa04
#define PM_GET_TRUSTZONE_VERSION 0xa03
#define PM_SET_SUSPEND_MODE 0xa02
#define GET_CALLBACK_DATA 0xa01
/* Number of 32bits values in payload */
-#define PAYLOAD_ARG_CNT 4U
+#define PAYLOAD_ARG_CNT 7U
+
+/* Number of 64bits arguments for SMC call */
+#define SMC_ARG_CNT_64 8U
+
+/* Number of 32bits arguments for SMC call */
+#define SMC_ARG_CNT_32 13U
/* Number of arguments for a callback */
#define CB_ARG_CNT 4
@@ -52,6 +95,10 @@
#define ZYNQMP_PM_CAPABILITY_WAKEUP 0x4U
#define ZYNQMP_PM_CAPABILITY_UNUSABLE 0x8U
+/* Loader commands */
+#define PM_LOAD_PDI 0x701
+#define PDI_SRC_DDR 0xF
+
/*
* Firmware FPGA Manager flags
* XILINX_ZYNQMP_PM_FPGA_FULL: FPGA full reconfiguration
@@ -60,21 +107,77 @@
#define XILINX_ZYNQMP_PM_FPGA_FULL 0x0U
#define XILINX_ZYNQMP_PM_FPGA_PARTIAL BIT(0)
+/* FPGA Status Reg */
+#define XILINX_ZYNQMP_PM_FPGA_CONFIG_STAT_OFFSET 7U
+#define XILINX_ZYNQMP_PM_FPGA_READ_CONFIG_REG 0U
+
+/*
+ * Node IDs for the Error Events.
+ */
+#define VERSAL_EVENT_ERROR_PMC_ERR1 (0x28100000U)
+#define VERSAL_EVENT_ERROR_PMC_ERR2 (0x28104000U)
+#define VERSAL_EVENT_ERROR_PSM_ERR1 (0x28108000U)
+#define VERSAL_EVENT_ERROR_PSM_ERR2 (0x2810C000U)
+
+#define VERSAL_NET_EVENT_ERROR_PMC_ERR1 (0x28100000U)
+#define VERSAL_NET_EVENT_ERROR_PMC_ERR2 (0x28104000U)
+#define VERSAL_NET_EVENT_ERROR_PMC_ERR3 (0x28108000U)
+#define VERSAL_NET_EVENT_ERROR_PSM_ERR1 (0x2810C000U)
+#define VERSAL_NET_EVENT_ERROR_PSM_ERR2 (0x28110000U)
+#define VERSAL_NET_EVENT_ERROR_PSM_ERR3 (0x28114000U)
+#define VERSAL_NET_EVENT_ERROR_PSM_ERR4 (0x28118000U)
+
+/* ZynqMP SD tap delay tuning */
+#define SD_ITAPDLY 0xFF180314
+#define SD_OTAPDLYSEL 0xFF180318
+
+/**
+ * XPM_EVENT_ERROR_MASK_DDRMC_CR: Error event mask for DDRMC MC Correctable ECC Error.
+ */
+#define XPM_EVENT_ERROR_MASK_DDRMC_CR BIT(18)
+
+/**
+ * XPM_EVENT_ERROR_MASK_DDRMC_NCR: Error event mask for DDRMC MC Non-Correctable ECC Error.
+ */
+#define XPM_EVENT_ERROR_MASK_DDRMC_NCR BIT(19)
+#define XPM_EVENT_ERROR_MASK_NOC_NCR BIT(13)
+#define XPM_EVENT_ERROR_MASK_NOC_CR BIT(12)
+
+enum pm_module_id {
+ PM_MODULE_ID = 0x0,
+ XPM_MODULE_ID = 0x2,
+ XSEM_MODULE_ID = 0x3,
+ TF_A_MODULE_ID = 0xa,
+};
+
+enum pm_api_cb_id {
+ PM_INIT_SUSPEND_CB = 30,
+ PM_ACKNOWLEDGE_CB = 31,
+ PM_NOTIFY_CB = 32,
+};
+
enum pm_api_id {
+ PM_API_FEATURES = 0,
PM_GET_API_VERSION = 1,
+ PM_GET_NODE_STATUS = 3,
+ PM_REGISTER_NOTIFIER = 5,
+ PM_FORCE_POWERDOWN = 8,
+ PM_REQUEST_WAKEUP = 10,
PM_SYSTEM_SHUTDOWN = 12,
PM_REQUEST_NODE = 13,
PM_RELEASE_NODE = 14,
PM_SET_REQUIREMENT = 15,
PM_RESET_ASSERT = 17,
PM_RESET_GET_STATUS = 18,
+ PM_MMIO_WRITE = 19,
+ PM_MMIO_READ = 20,
PM_PM_INIT_FINALIZE = 21,
PM_FPGA_LOAD = 22,
PM_FPGA_GET_STATUS = 23,
PM_GET_CHIPID = 24,
+ PM_SECURE_SHA = 26,
PM_PINCTRL_REQUEST = 28,
PM_PINCTRL_RELEASE = 29,
- PM_PINCTRL_GET_FUNCTION = 30,
PM_PINCTRL_SET_FUNCTION = 31,
PM_PINCTRL_CONFIG_PARAM_GET = 32,
PM_PINCTRL_CONFIG_PARAM_SET = 33,
@@ -85,18 +188,20 @@ enum pm_api_id {
PM_CLOCK_GETSTATE = 38,
PM_CLOCK_SETDIVIDER = 39,
PM_CLOCK_GETDIVIDER = 40,
- PM_CLOCK_SETRATE = 41,
- PM_CLOCK_GETRATE = 42,
PM_CLOCK_SETPARENT = 43,
PM_CLOCK_GETPARENT = 44,
+ PM_FPGA_READ = 46,
PM_SECURE_AES = 47,
+ PM_EFUSE_ACCESS = 53,
PM_FEATURE_CHECK = 63,
};
/* PMU-FW return status codes */
enum pm_ret_status {
XST_PM_SUCCESS = 0,
+ XST_PM_INVALID_VERSION = 4,
XST_PM_NO_FEATURE = 19,
+ XST_PM_INVALID_CRC = 301,
XST_PM_INTERNAL = 2000,
XST_PM_CONFLICT = 2001,
XST_PM_NO_ACCESS = 2002,
@@ -107,6 +212,11 @@ enum pm_ret_status {
};
enum pm_ioctl_id {
+ IOCTL_GET_RPU_OPER_MODE = 0,
+ IOCTL_SET_RPU_OPER_MODE = 1,
+ IOCTL_RPU_BOOT_ADDR_CONFIG = 2,
+ IOCTL_TCM_COMB_CONFIG = 3,
+ IOCTL_SET_TAPDELAY_BYPASS = 4,
IOCTL_SD_DLL_RESET = 6,
IOCTL_SET_SD_TAPDELAY = 7,
IOCTL_SET_PLL_FRAC_MODE = 8,
@@ -119,6 +229,20 @@ enum pm_ioctl_id {
IOCTL_READ_PGGS = 15,
/* Set healthy bit value */
IOCTL_SET_BOOT_HEALTH_STATUS = 17,
+ IOCTL_OSPI_MUX_SELECT = 21,
+ /* Register SGI to ATF */
+ IOCTL_REGISTER_SGI = 25,
+ /* Runtime feature configuration */
+ IOCTL_SET_FEATURE_CONFIG = 26,
+ IOCTL_GET_FEATURE_CONFIG = 27,
+ /* IOCTL for Secure Read/Write Interface */
+ IOCTL_READ_REG = 28,
+ IOCTL_MASK_WRITE_REG = 29,
+ /* Dynamic SD/GEM configuration */
+ IOCTL_SET_SD_CONFIG = 30,
+ IOCTL_SET_GEM_CONFIG = 31,
+ /* IOCTL to get default/current QoS */
+ IOCTL_GET_QOS = 34,
};
enum pm_query_id {
@@ -136,6 +260,22 @@ enum pm_query_id {
PM_QID_PINCTRL_GET_PIN_GROUPS = 11,
PM_QID_CLOCK_GET_NUM_CLOCKS = 12,
PM_QID_CLOCK_GET_MAX_DIVISOR = 13,
+ PM_QID_PINCTRL_GET_ATTRIBUTES = 15,
+};
+
+enum rpu_oper_mode {
+ PM_RPU_MODE_LOCKSTEP = 0,
+ PM_RPU_MODE_SPLIT = 1,
+};
+
+enum rpu_boot_mem {
+ PM_RPU_BOOTMEM_LOVEC = 0,
+ PM_RPU_BOOTMEM_HIVEC = 1,
+};
+
+enum rpu_tcm_comb {
+ PM_RPU_TCM_SPLIT = 0,
+ PM_RPU_TCM_COMB = 1,
};
enum zynqmp_pm_reset_action {
@@ -335,6 +475,11 @@ enum pm_pinctrl_drive_strength {
PM_PINCTRL_DRIVE_STRENGTH_12MA = 3,
};
+enum pm_pinctrl_tri_state {
+ PM_PINCTRL_TRI_STATE_DISABLE = 0,
+ PM_PINCTRL_TRI_STATE_ENABLE = 1,
+};
+
enum zynqmp_pm_shutdown_type {
ZYNQMP_PM_SHUTDOWN_TYPE_SHUTDOWN = 0,
ZYNQMP_PM_SHUTDOWN_TYPE_RESET = 1,
@@ -347,6 +492,55 @@ enum zynqmp_pm_shutdown_subtype {
ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM = 2,
};
+enum tap_delay_signal_type {
+ PM_TAPDELAY_NAND_DQS_IN = 0,
+ PM_TAPDELAY_NAND_DQS_OUT = 1,
+ PM_TAPDELAY_QSPI = 2,
+ PM_TAPDELAY_MAX = 3,
+};
+
+enum tap_delay_bypass_ctrl {
+ PM_TAPDELAY_BYPASS_DISABLE = 0,
+ PM_TAPDELAY_BYPASS_ENABLE = 1,
+};
+
+enum ospi_mux_select_type {
+ PM_OSPI_MUX_SEL_DMA = 0,
+ PM_OSPI_MUX_SEL_LINEAR = 1,
+};
+
+enum pm_feature_config_id {
+ PM_FEATURE_INVALID = 0,
+ PM_FEATURE_OVERTEMP_STATUS = 1,
+ PM_FEATURE_OVERTEMP_VALUE = 2,
+ PM_FEATURE_EXTWDT_STATUS = 3,
+ PM_FEATURE_EXTWDT_VALUE = 4,
+};
+
+/**
+ * enum pm_sd_config_type - PM SD configuration.
+ * @SD_CONFIG_EMMC_SEL: To set SD_EMMC_SEL in CTRL_REG_SD and SD_SLOTTYPE
+ * @SD_CONFIG_BASECLK: To set SD_BASECLK in SD_CONFIG_REG1
+ * @SD_CONFIG_8BIT: To set SD_8BIT in SD_CONFIG_REG2
+ * @SD_CONFIG_FIXED: To set fixed config registers
+ */
+enum pm_sd_config_type {
+ SD_CONFIG_EMMC_SEL = 1,
+ SD_CONFIG_BASECLK = 2,
+ SD_CONFIG_8BIT = 3,
+ SD_CONFIG_FIXED = 4,
+};
+
+/**
+ * enum pm_gem_config_type - PM GEM configuration.
+ * @GEM_CONFIG_SGMII_MODE: To set GEM_SGMII_MODE in GEM_CLK_CTRL register
+ * @GEM_CONFIG_FIXED: To set fixed config registers
+ */
+enum pm_gem_config_type {
+ GEM_CONFIG_SGMII_MODE = 1,
+ GEM_CONFIG_FIXED = 2,
+};
+
/**
* struct zynqmp_pm_query_data - PM query data
* @qid: query ID
@@ -361,20 +555,19 @@ struct zynqmp_pm_query_data {
u32 arg3;
};
-int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1,
- u32 arg2, u32 arg3, u32 *ret_payload);
+int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 *ret_payload, u32 num_args, ...);
+int zynqmp_pm_invoke_fw_fn(u32 pm_api_id, u32 *ret_payload, u32 num_args, ...);
#if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE)
int zynqmp_pm_get_api_version(u32 *version);
int zynqmp_pm_get_chipid(u32 *idcode, u32 *version);
+int zynqmp_pm_get_family_info(u32 *family);
int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out);
int zynqmp_pm_clock_enable(u32 clock_id);
int zynqmp_pm_clock_disable(u32 clock_id);
int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state);
int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider);
int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider);
-int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate);
-int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate);
int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id);
int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id);
int zynqmp_pm_set_pll_frac_mode(u32 clk_id, u32 mode);
@@ -383,10 +576,12 @@ int zynqmp_pm_set_pll_frac_data(u32 clk_id, u32 data);
int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data);
int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value);
int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type);
-int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
+int zynqmp_pm_ospi_mux_select(u32 dev_id, u32 select);
+int zynqmp_pm_reset_assert(const u32 reset,
const enum zynqmp_pm_reset_action assert_flag);
-int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, u32 *status);
-int zynqmp_pm_init_finalize(void);
+int zynqmp_pm_reset_get_status(const u32 reset, u32 *status);
+unsigned int zynqmp_pm_bootmode_read(u32 *ps_mode);
+int zynqmp_pm_bootmode_write(u32 ps_mode);
int zynqmp_pm_set_suspend_mode(u32 mode);
int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
const u32 qos, const enum zynqmp_pm_request_ack ack);
@@ -395,22 +590,50 @@ int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities,
const u32 qos,
const enum zynqmp_pm_request_ack ack);
int zynqmp_pm_aes_engine(const u64 address, u32 *out);
+int zynqmp_pm_efuse_access(const u64 address, u32 *out);
+int zynqmp_pm_sha_hash(const u64 address, const u32 size, const u32 flags);
int zynqmp_pm_fpga_load(const u64 address, const u32 size, const u32 flags);
int zynqmp_pm_fpga_get_status(u32 *value);
+int zynqmp_pm_fpga_get_config_status(u32 *value);
int zynqmp_pm_write_ggs(u32 index, u32 value);
int zynqmp_pm_read_ggs(u32 index, u32 *value);
int zynqmp_pm_write_pggs(u32 index, u32 value);
int zynqmp_pm_read_pggs(u32 index, u32 *value);
+int zynqmp_pm_set_tapdelay_bypass(u32 index, u32 value);
int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype);
int zynqmp_pm_set_boot_health_status(u32 value);
int zynqmp_pm_pinctrl_request(const u32 pin);
int zynqmp_pm_pinctrl_release(const u32 pin);
-int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id);
int zynqmp_pm_pinctrl_set_function(const u32 pin, const u32 id);
int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param,
u32 *value);
int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param,
u32 value);
+int zynqmp_pm_load_pdi(const u32 src, const u64 address);
+int zynqmp_pm_register_notifier(const u32 node, const u32 event,
+ const u32 wake, const u32 enable);
+int zynqmp_pm_feature(const u32 api_id);
+int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id);
+int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value);
+int zynqmp_pm_get_feature_config(enum pm_feature_config_id id, u32 *payload);
+int zynqmp_pm_sec_read_reg(u32 node_id, u32 offset, u32 *ret_value);
+int zynqmp_pm_sec_mask_write_reg(const u32 node_id, const u32 offset,
+ u32 mask, u32 value);
+int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset);
+int zynqmp_pm_force_pwrdwn(const u32 target,
+ const enum zynqmp_pm_request_ack ack);
+int zynqmp_pm_request_wake(const u32 node,
+ const bool set_addr,
+ const u64 address,
+ const enum zynqmp_pm_request_ack ack);
+int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mode);
+int zynqmp_pm_set_rpu_mode(u32 node_id, enum rpu_oper_mode rpu_mode);
+int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mode);
+int zynqmp_pm_get_node_status(const u32 node, u32 *const status,
+ u32 *const requirements, u32 *const usage);
+int zynqmp_pm_set_sd_config(u32 node, enum pm_sd_config_type config, u32 value);
+int zynqmp_pm_set_gem_config(u32 node, enum pm_gem_config_type config,
+ u32 value);
#else
static inline int zynqmp_pm_get_api_version(u32 *version)
{
@@ -422,6 +645,11 @@ static inline int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
return -ENODEV;
}
+static inline int zynqmp_pm_get_family_info(u32 *family)
+{
+ return -ENODEV;
+}
+
static inline int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata,
u32 *out)
{
@@ -453,16 +681,6 @@ static inline int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider)
return -ENODEV;
}
-static inline int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate)
-{
- return -ENODEV;
-}
-
-static inline int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate)
-{
- return -ENODEV;
-}
-
static inline int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id)
{
return -ENODEV;
@@ -503,19 +721,28 @@ static inline int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type)
return -ENODEV;
}
-static inline int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
+static inline int zynqmp_pm_ospi_mux_select(u32 dev_id, u32 select)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_reset_assert(const u32 reset,
const enum zynqmp_pm_reset_action assert_flag)
{
return -ENODEV;
}
-static inline int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset,
- u32 *status)
+static inline int zynqmp_pm_reset_get_status(const u32 reset, u32 *status)
{
return -ENODEV;
}
-static inline int zynqmp_pm_init_finalize(void)
+static inline unsigned int zynqmp_pm_bootmode_read(u32 *ps_mode)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_bootmode_write(u32 ps_mode)
{
return -ENODEV;
}
@@ -550,6 +777,17 @@ static inline int zynqmp_pm_aes_engine(const u64 address, u32 *out)
return -ENODEV;
}
+static inline int zynqmp_pm_efuse_access(const u64 address, u32 *out)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_sha_hash(const u64 address, const u32 size,
+ const u32 flags)
+{
+ return -ENODEV;
+}
+
static inline int zynqmp_pm_fpga_load(const u64 address, const u32 size,
const u32 flags)
{
@@ -561,6 +799,11 @@ static inline int zynqmp_pm_fpga_get_status(u32 *value)
return -ENODEV;
}
+static inline int zynqmp_pm_fpga_get_config_status(u32 *value)
+{
+ return -ENODEV;
+}
+
static inline int zynqmp_pm_write_ggs(u32 index, u32 value)
{
return -ENODEV;
@@ -581,6 +824,11 @@ static inline int zynqmp_pm_read_pggs(u32 index, u32 *value)
return -ENODEV;
}
+static inline int zynqmp_pm_set_tapdelay_bypass(u32 index, u32 value)
+{
+ return -ENODEV;
+}
+
static inline int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype)
{
return -ENODEV;
@@ -601,7 +849,7 @@ static inline int zynqmp_pm_pinctrl_release(const u32 pin)
return -ENODEV;
}
-static inline int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id)
+static inline int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id)
{
return -ENODEV;
}
@@ -622,6 +870,101 @@ static inline int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param,
{
return -ENODEV;
}
+
+static inline int zynqmp_pm_load_pdi(const u32 src, const u64 address)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_register_notifier(const u32 node, const u32 event,
+ const u32 wake, const u32 enable)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_feature(const u32 api_id)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_feature_config(enum pm_feature_config_id id,
+ u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_get_feature_config(enum pm_feature_config_id id,
+ u32 *payload)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_force_pwrdwn(const u32 target,
+ const enum zynqmp_pm_request_ack ack)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_request_wake(const u32 node,
+ const bool set_addr,
+ const u64 address,
+ const enum zynqmp_pm_request_ack ack)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_sec_read_reg(u32 node_id, u32 offset, u32 *ret_value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_sec_mask_write_reg(const u32 node_id, const u32 offset,
+ u32 mask, u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mode)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_rpu_mode(u32 node_id, enum rpu_oper_mode rpu_mode)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mode)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_get_node_status(const u32 node, u32 *const status,
+ u32 *const requirements,
+ u32 *const usage)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_sd_config(u32 node,
+ enum pm_sd_config_type config,
+ u32 value)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_set_gem_config(u32 node,
+ enum pm_gem_config_type config,
+ u32 value)
+{
+ return -ENODEV;
+}
+
#endif
#endif /* __FIRMWARE_ZYNQMP_H__ */
diff --git a/include/linux/fixp-arith.h b/include/linux/fixp-arith.h
index 281cb4f83dbe..e485fb0c1201 100644
--- a/include/linux/fixp-arith.h
+++ b/include/linux/fixp-arith.h
@@ -2,6 +2,7 @@
#ifndef _FIXP_ARITH_H
#define _FIXP_ARITH_H
+#include <linux/bug.h>
#include <linux/math64.h>
/*
diff --git a/include/linux/flex_proportions.h b/include/linux/flex_proportions.h
index c12df59d3f5f..e9a72fd0bfe7 100644
--- a/include/linux/flex_proportions.h
+++ b/include/linux/flex_proportions.h
@@ -39,38 +39,6 @@ void fprop_global_destroy(struct fprop_global *p);
bool fprop_new_period(struct fprop_global *p, int periods);
/*
- * ---- SINGLE ----
- */
-struct fprop_local_single {
- /* the local events counter */
- unsigned long events;
- /* Period in which we last updated events */
- unsigned int period;
- raw_spinlock_t lock; /* Protect period and numerator */
-};
-
-#define INIT_FPROP_LOCAL_SINGLE(name) \
-{ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
-}
-
-int fprop_local_init_single(struct fprop_local_single *pl);
-void fprop_local_destroy_single(struct fprop_local_single *pl);
-void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl);
-void fprop_fraction_single(struct fprop_global *p,
- struct fprop_local_single *pl, unsigned long *numerator,
- unsigned long *denominator);
-
-static inline
-void fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __fprop_inc_single(p, pl);
- local_irq_restore(flags);
-}
-
-/*
* ---- PERCPU ----
*/
struct fprop_local_percpu {
@@ -83,9 +51,10 @@ struct fprop_local_percpu {
int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp);
void fprop_local_destroy_percpu(struct fprop_local_percpu *pl);
-void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl);
-void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl,
- int max_frac);
+void __fprop_add_percpu(struct fprop_global *p, struct fprop_local_percpu *pl,
+ long nr);
+void __fprop_add_percpu_max(struct fprop_global *p,
+ struct fprop_local_percpu *pl, int max_frac, long nr);
void fprop_fraction_percpu(struct fprop_global *p,
struct fprop_local_percpu *pl, unsigned long *numerator,
unsigned long *denominator);
@@ -96,7 +65,7 @@ void fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
unsigned long flags;
local_irq_save(flags);
- __fprop_inc_percpu(p, pl);
+ __fprop_add_percpu(p, pl, 1);
local_irq_restore(flags);
}
diff --git a/include/linux/folio_queue.h b/include/linux/folio_queue.h
new file mode 100644
index 000000000000..adab609c972e
--- /dev/null
+++ b/include/linux/folio_queue.h
@@ -0,0 +1,282 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Queue of folios definitions
+ *
+ * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * See:
+ *
+ * Documentation/core-api/folio_queue.rst
+ *
+ * for a description of the API.
+ */
+
+#ifndef _LINUX_FOLIO_QUEUE_H
+#define _LINUX_FOLIO_QUEUE_H
+
+#include <linux/pagevec.h>
+#include <linux/mm.h>
+
+/*
+ * Segment in a queue of running buffers. Each segment can hold a number of
+ * folios and a portion of the queue can be referenced with the ITER_FOLIOQ
+ * iterator. The possibility exists of inserting non-folio elements into the
+ * queue (such as gaps).
+ *
+ * Explicit prev and next pointers are used instead of a list_head to make it
+ * easier to add segments to tail and remove them from the head without the
+ * need for a lock.
+ */
+struct folio_queue {
+ struct folio_batch vec; /* Folios in the queue segment */
+ u8 orders[PAGEVEC_SIZE]; /* Order of each folio */
+ struct folio_queue *next; /* Next queue segment or NULL */
+ struct folio_queue *prev; /* Previous queue segment of NULL */
+ unsigned long marks; /* 1-bit mark per folio */
+ unsigned long marks2; /* Second 1-bit mark per folio */
+#if PAGEVEC_SIZE > BITS_PER_LONG
+#error marks is not big enough
+#endif
+ unsigned int rreq_id;
+ unsigned int debug_id;
+};
+
+/**
+ * folioq_init - Initialise a folio queue segment
+ * @folioq: The segment to initialise
+ * @rreq_id: The request identifier to use in tracelines.
+ *
+ * Initialise a folio queue segment and set an identifier to be used in traces.
+ *
+ * Note that the folio pointers are left uninitialised.
+ */
+static inline void folioq_init(struct folio_queue *folioq, unsigned int rreq_id)
+{
+ folio_batch_init(&folioq->vec);
+ folioq->next = NULL;
+ folioq->prev = NULL;
+ folioq->marks = 0;
+ folioq->marks2 = 0;
+ folioq->rreq_id = rreq_id;
+ folioq->debug_id = 0;
+}
+
+/**
+ * folioq_nr_slots: Query the capacity of a folio queue segment
+ * @folioq: The segment to query
+ *
+ * Query the number of folios that a particular folio queue segment might hold.
+ * [!] NOTE: This must not be assumed to be the same for every segment!
+ */
+static inline unsigned int folioq_nr_slots(const struct folio_queue *folioq)
+{
+ return PAGEVEC_SIZE;
+}
+
+/**
+ * folioq_count: Query the occupancy of a folio queue segment
+ * @folioq: The segment to query
+ *
+ * Query the number of folios that have been added to a folio queue segment.
+ * Note that this is not decreased as folios are removed from a segment.
+ */
+static inline unsigned int folioq_count(struct folio_queue *folioq)
+{
+ return folio_batch_count(&folioq->vec);
+}
+
+/**
+ * folioq_full: Query if a folio queue segment is full
+ * @folioq: The segment to query
+ *
+ * Query if a folio queue segment is fully occupied. Note that this does not
+ * change if folios are removed from a segment.
+ */
+static inline bool folioq_full(struct folio_queue *folioq)
+{
+ //return !folio_batch_space(&folioq->vec);
+ return folioq_count(folioq) >= folioq_nr_slots(folioq);
+}
+
+/**
+ * folioq_is_marked: Check first folio mark in a folio queue segment
+ * @folioq: The segment to query
+ * @slot: The slot number of the folio to query
+ *
+ * Determine if the first mark is set for the folio in the specified slot in a
+ * folio queue segment.
+ */
+static inline bool folioq_is_marked(const struct folio_queue *folioq, unsigned int slot)
+{
+ return test_bit(slot, &folioq->marks);
+}
+
+/**
+ * folioq_mark: Set the first mark on a folio in a folio queue segment
+ * @folioq: The segment to modify
+ * @slot: The slot number of the folio to modify
+ *
+ * Set the first mark for the folio in the specified slot in a folio queue
+ * segment.
+ */
+static inline void folioq_mark(struct folio_queue *folioq, unsigned int slot)
+{
+ set_bit(slot, &folioq->marks);
+}
+
+/**
+ * folioq_unmark: Clear the first mark on a folio in a folio queue segment
+ * @folioq: The segment to modify
+ * @slot: The slot number of the folio to modify
+ *
+ * Clear the first mark for the folio in the specified slot in a folio queue
+ * segment.
+ */
+static inline void folioq_unmark(struct folio_queue *folioq, unsigned int slot)
+{
+ clear_bit(slot, &folioq->marks);
+}
+
+/**
+ * folioq_is_marked2: Check second folio mark in a folio queue segment
+ * @folioq: The segment to query
+ * @slot: The slot number of the folio to query
+ *
+ * Determine if the second mark is set for the folio in the specified slot in a
+ * folio queue segment.
+ */
+static inline bool folioq_is_marked2(const struct folio_queue *folioq, unsigned int slot)
+{
+ return test_bit(slot, &folioq->marks2);
+}
+
+/**
+ * folioq_mark2: Set the second mark on a folio in a folio queue segment
+ * @folioq: The segment to modify
+ * @slot: The slot number of the folio to modify
+ *
+ * Set the second mark for the folio in the specified slot in a folio queue
+ * segment.
+ */
+static inline void folioq_mark2(struct folio_queue *folioq, unsigned int slot)
+{
+ set_bit(slot, &folioq->marks2);
+}
+
+/**
+ * folioq_unmark2: Clear the second mark on a folio in a folio queue segment
+ * @folioq: The segment to modify
+ * @slot: The slot number of the folio to modify
+ *
+ * Clear the second mark for the folio in the specified slot in a folio queue
+ * segment.
+ */
+static inline void folioq_unmark2(struct folio_queue *folioq, unsigned int slot)
+{
+ clear_bit(slot, &folioq->marks2);
+}
+
+/**
+ * folioq_append: Add a folio to a folio queue segment
+ * @folioq: The segment to add to
+ * @folio: The folio to add
+ *
+ * Add a folio to the tail of the sequence in a folio queue segment, increasing
+ * the occupancy count and returning the slot number for the folio just added.
+ * The folio size is extracted and stored in the queue and the marks are left
+ * unmodified.
+ *
+ * Note that it's left up to the caller to check that the segment capacity will
+ * not be exceeded and to extend the queue.
+ */
+static inline unsigned int folioq_append(struct folio_queue *folioq, struct folio *folio)
+{
+ unsigned int slot = folioq->vec.nr++;
+
+ folioq->vec.folios[slot] = folio;
+ folioq->orders[slot] = folio_order(folio);
+ return slot;
+}
+
+/**
+ * folioq_append_mark: Add a folio to a folio queue segment
+ * @folioq: The segment to add to
+ * @folio: The folio to add
+ *
+ * Add a folio to the tail of the sequence in a folio queue segment, increasing
+ * the occupancy count and returning the slot number for the folio just added.
+ * The folio size is extracted and stored in the queue, the first mark is set
+ * and and the second and third marks are left unmodified.
+ *
+ * Note that it's left up to the caller to check that the segment capacity will
+ * not be exceeded and to extend the queue.
+ */
+static inline unsigned int folioq_append_mark(struct folio_queue *folioq, struct folio *folio)
+{
+ unsigned int slot = folioq->vec.nr++;
+
+ folioq->vec.folios[slot] = folio;
+ folioq->orders[slot] = folio_order(folio);
+ folioq_mark(folioq, slot);
+ return slot;
+}
+
+/**
+ * folioq_folio: Get a folio from a folio queue segment
+ * @folioq: The segment to access
+ * @slot: The folio slot to access
+ *
+ * Retrieve the folio in the specified slot from a folio queue segment. Note
+ * that no bounds check is made and if the slot hasn't been added into yet, the
+ * pointer will be undefined. If the slot has been cleared, NULL will be
+ * returned.
+ */
+static inline struct folio *folioq_folio(const struct folio_queue *folioq, unsigned int slot)
+{
+ return folioq->vec.folios[slot];
+}
+
+/**
+ * folioq_folio_order: Get the order of a folio from a folio queue segment
+ * @folioq: The segment to access
+ * @slot: The folio slot to access
+ *
+ * Retrieve the order of the folio in the specified slot from a folio queue
+ * segment. Note that no bounds check is made and if the slot hasn't been
+ * added into yet, the order returned will be 0.
+ */
+static inline unsigned int folioq_folio_order(const struct folio_queue *folioq, unsigned int slot)
+{
+ return folioq->orders[slot];
+}
+
+/**
+ * folioq_folio_size: Get the size of a folio from a folio queue segment
+ * @folioq: The segment to access
+ * @slot: The folio slot to access
+ *
+ * Retrieve the size of the folio in the specified slot from a folio queue
+ * segment. Note that no bounds check is made and if the slot hasn't been
+ * added into yet, the size returned will be PAGE_SIZE.
+ */
+static inline size_t folioq_folio_size(const struct folio_queue *folioq, unsigned int slot)
+{
+ return PAGE_SIZE << folioq_folio_order(folioq, slot);
+}
+
+/**
+ * folioq_clear: Clear a folio from a folio queue segment
+ * @folioq: The segment to clear
+ * @slot: The folio slot to clear
+ *
+ * Clear a folio from a sequence in a folio queue segment and clear its marks.
+ * The occupancy count is left unchanged.
+ */
+static inline void folioq_clear(struct folio_queue *folioq, unsigned int slot)
+{
+ folioq->vec.folios[slot] = NULL;
+ folioq_unmark(folioq, slot);
+ folioq_unmark2(folioq, slot);
+}
+
+#endif /* _LINUX_FOLIO_QUEUE_H */
diff --git a/include/linux/font.h b/include/linux/font.h
index abf1442ce719..fd8625cd76b2 100644
--- a/include/linux/font.h
+++ b/include/linux/font.h
@@ -35,6 +35,7 @@ struct font_desc {
#define FONT6x10_IDX 10
#define TER16x32_IDX 11
#define FONT6x8_IDX 12
+#define TER10x18_IDX 13
extern const struct font_desc font_vga_8x8,
font_vga_8x16,
@@ -48,7 +49,8 @@ extern const struct font_desc font_vga_8x8,
font_mini_4x6,
font_6x10,
font_ter_16x32,
- font_6x8;
+ font_6x8,
+ font_ter_10x18;
/* Find a font with a specific name */
@@ -57,7 +59,8 @@ extern const struct font_desc *find_font(const char *name);
/* Get the default font for a specific screen size */
extern const struct font_desc *get_default_font(int xres, int yres,
- u32 font_w, u32 font_h);
+ unsigned long *font_w,
+ unsigned long *font_h);
/* Max. length for the name of a predefined font */
#define MAX_FONT_NAME 32
diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
index c1be37437e77..b3b53f8c1b28 100644
--- a/include/linux/fortify-string.h
+++ b/include/linux/fortify-string.h
@@ -2,123 +2,306 @@
#ifndef _LINUX_FORTIFY_STRING_H_
#define _LINUX_FORTIFY_STRING_H_
+#include <linux/bitfield.h>
+#include <linux/bug.h>
+#include <linux/const.h>
+#include <linux/limits.h>
+
+#define __FORTIFY_INLINE extern __always_inline __gnu_inline __overloadable
+#define __RENAME(x) __asm__(#x)
+
+#define FORTIFY_REASON_DIR(r) FIELD_GET(BIT(0), r)
+#define FORTIFY_REASON_FUNC(r) FIELD_GET(GENMASK(7, 1), r)
+#define FORTIFY_REASON(func, write) (FIELD_PREP(BIT(0), write) | \
+ FIELD_PREP(GENMASK(7, 1), func))
+
+/* Overridden by KUnit tests. */
+#ifndef fortify_panic
+# define fortify_panic(func, write, avail, size, retfail) \
+ __fortify_panic(FORTIFY_REASON(func, write), avail, size)
+#endif
+#ifndef fortify_warn_once
+# define fortify_warn_once(x...) WARN_ONCE(x)
+#endif
+
+#define FORTIFY_READ 0
+#define FORTIFY_WRITE 1
+
+#define EACH_FORTIFY_FUNC(macro) \
+ macro(strncpy), \
+ macro(strnlen), \
+ macro(strlen), \
+ macro(strscpy), \
+ macro(strlcat), \
+ macro(strcat), \
+ macro(strncat), \
+ macro(memset), \
+ macro(memcpy), \
+ macro(memmove), \
+ macro(memscan), \
+ macro(memcmp), \
+ macro(memchr), \
+ macro(memchr_inv), \
+ macro(kmemdup), \
+ macro(strcpy), \
+ macro(UNKNOWN),
+
+#define MAKE_FORTIFY_FUNC(func) FORTIFY_FUNC_##func
+
+enum fortify_func {
+ EACH_FORTIFY_FUNC(MAKE_FORTIFY_FUNC)
+};
+
+void __fortify_report(const u8 reason, const size_t avail, const size_t size);
+void __fortify_panic(const u8 reason, const size_t avail, const size_t size) __cold __noreturn;
+void __read_overflow(void) __compiletime_error("detected read beyond size of object (1st parameter)");
+void __read_overflow2(void) __compiletime_error("detected read beyond size of object (2nd parameter)");
+void __read_overflow2_field(size_t avail, size_t wanted) __compiletime_warning("detected read beyond size of field (2nd parameter); maybe use struct_group()?");
+void __write_overflow(void) __compiletime_error("detected write beyond size of object (1st parameter)");
+void __write_overflow_field(size_t avail, size_t wanted) __compiletime_warning("detected write beyond size of field (1st parameter); maybe use struct_group()?");
+
+#define __compiletime_strlen(p) \
+({ \
+ char *__p = (char *)(p); \
+ size_t __ret = SIZE_MAX; \
+ const size_t __p_size = __member_size(p); \
+ if (__p_size != SIZE_MAX && \
+ __builtin_constant_p(*__p)) { \
+ size_t __p_len = __p_size - 1; \
+ if (__builtin_constant_p(__p[__p_len]) && \
+ __p[__p_len] == '\0') \
+ __ret = __builtin_strlen(__p); \
+ } \
+ __ret; \
+})
+
+#if defined(__SANITIZE_ADDRESS__)
+
+#if !defined(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX) && !defined(CONFIG_GENERIC_ENTRY)
+extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset);
+extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove);
+extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy);
+#elif defined(CONFIG_KASAN_GENERIC)
+extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(__asan_memset);
+extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(__asan_memmove);
+extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(__asan_memcpy);
+#else /* CONFIG_KASAN_SW_TAGS */
+extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(__hwasan_memset);
+extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(__hwasan_memmove);
+extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(__hwasan_memcpy);
+#endif
-#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr);
extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp);
-extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy);
-extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove);
-extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset);
extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat);
extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy);
extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen);
extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat);
extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy);
+
+#else
+
+#if defined(__SANITIZE_MEMORY__)
+/*
+ * For KMSAN builds all memcpy/memset/memmove calls should be replaced by the
+ * corresponding __msan_XXX functions.
+ */
+#include <linux/kmsan_string.h>
+#define __underlying_memcpy __msan_memcpy
+#define __underlying_memmove __msan_memmove
+#define __underlying_memset __msan_memset
#else
-#define __underlying_memchr __builtin_memchr
-#define __underlying_memcmp __builtin_memcmp
#define __underlying_memcpy __builtin_memcpy
#define __underlying_memmove __builtin_memmove
#define __underlying_memset __builtin_memset
+#endif
+
+#define __underlying_memchr __builtin_memchr
+#define __underlying_memcmp __builtin_memcmp
#define __underlying_strcat __builtin_strcat
#define __underlying_strcpy __builtin_strcpy
#define __underlying_strlen __builtin_strlen
#define __underlying_strncat __builtin_strncat
#define __underlying_strncpy __builtin_strncpy
+
+#endif
+
+/**
+ * unsafe_memcpy - memcpy implementation with no FORTIFY bounds checking
+ *
+ * @dst: Destination memory address to write to
+ * @src: Source memory address to read from
+ * @bytes: How many bytes to write to @dst from @src
+ * @justification: Free-form text or comment describing why the use is needed
+ *
+ * This should be used for corner cases where the compiler cannot do the
+ * right thing, or during transitions between APIs, etc. It should be used
+ * very rarely, and includes a place for justification detailing where bounds
+ * checking has happened, and why existing solutions cannot be employed.
+ */
+#define unsafe_memcpy(dst, src, bytes, justification) \
+ __underlying_memcpy(dst, src, bytes)
+
+/*
+ * Clang's use of __builtin_*object_size() within inlines needs hinting via
+ * __pass_*object_size(). The preference is to only ever use type 1 (member
+ * size, rather than struct size), but there remain some stragglers using
+ * type 0 that will be converted in the future.
+ */
+#if __has_builtin(__builtin_dynamic_object_size)
+#define POS __pass_dynamic_object_size(1)
+#define POS0 __pass_dynamic_object_size(0)
+#else
+#define POS __pass_object_size(1)
+#define POS0 __pass_object_size(0)
#endif
-__FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
+#define __compiletime_lessthan(bounds, length) ( \
+ __builtin_constant_p((bounds) < (length)) && \
+ (bounds) < (length) \
+)
+
+/**
+ * strncpy - Copy a string to memory with non-guaranteed NUL padding
+ *
+ * @p: pointer to destination of copy
+ * @q: pointer to NUL-terminated source string to copy
+ * @size: bytes to write at @p
+ *
+ * If strlen(@q) >= @size, the copy of @q will stop after @size bytes,
+ * and @p will NOT be NUL-terminated
+ *
+ * If strlen(@q) < @size, following the copy of @q, trailing NUL bytes
+ * will be written to @p until @size total bytes have been written.
+ *
+ * Do not use this function. While FORTIFY_SOURCE tries to avoid
+ * over-reads of @q, it cannot defend against writing unterminated
+ * results to @p. Using strncpy() remains ambiguous and fragile.
+ * Instead, please choose an alternative, so that the expectation
+ * of @p's contents is unambiguous:
+ *
+ * +--------------------+--------------------+------------+
+ * | **p** needs to be: | padded to **size** | not padded |
+ * +====================+====================+============+
+ * | NUL-terminated | strscpy_pad() | strscpy() |
+ * +--------------------+--------------------+------------+
+ * | not NUL-terminated | strtomem_pad() | strtomem() |
+ * +--------------------+--------------------+------------+
+ *
+ * Note strscpy*()'s differing return values for detecting truncation,
+ * and strtomem*()'s expectation that the destination is marked with
+ * __nonstring when it is a character array.
+ *
+ */
+__FORTIFY_INLINE __diagnose_as(__builtin_strncpy, 1, 2, 3)
+char *strncpy(char * const POS p, const char *q, __kernel_size_t size)
{
- size_t p_size = __builtin_object_size(p, 1);
+ const size_t p_size = __member_size(p);
- if (__builtin_constant_p(size) && p_size < size)
+ if (__compiletime_lessthan(p_size, size))
__write_overflow();
if (p_size < size)
- fortify_panic(__func__);
+ fortify_panic(FORTIFY_FUNC_strncpy, FORTIFY_WRITE, p_size, size, p);
return __underlying_strncpy(p, q, size);
}
-__FORTIFY_INLINE char *strcat(char *p, const char *q)
+extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen);
+/**
+ * strnlen - Return bounded count of characters in a NUL-terminated string
+ *
+ * @p: pointer to NUL-terminated string to count.
+ * @maxlen: maximum number of characters to count.
+ *
+ * Returns number of characters in @p (NOT including the final NUL), or
+ * @maxlen, if no NUL has been found up to there.
+ *
+ */
+__FORTIFY_INLINE __kernel_size_t strnlen(const char * const POS p, __kernel_size_t maxlen)
{
- size_t p_size = __builtin_object_size(p, 1);
+ const size_t p_size = __member_size(p);
+ const size_t p_len = __compiletime_strlen(p);
+ size_t ret;
- if (p_size == (size_t)-1)
- return __underlying_strcat(p, q);
- if (strlcat(p, q, p_size) >= p_size)
- fortify_panic(__func__);
- return p;
+ /* We can take compile-time actions when maxlen is const. */
+ if (__builtin_constant_p(maxlen) && p_len != SIZE_MAX) {
+ /* If p is const, we can use its compile-time-known len. */
+ if (maxlen >= p_size)
+ return p_len;
+ }
+
+ /* Do not check characters beyond the end of p. */
+ ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size);
+ if (p_size <= ret && maxlen != ret)
+ fortify_panic(FORTIFY_FUNC_strnlen, FORTIFY_READ, p_size, ret + 1, ret);
+ return ret;
}
-__FORTIFY_INLINE __kernel_size_t strlen(const char *p)
+/*
+ * Defined after fortified strnlen to reuse it. However, it must still be
+ * possible for strlen() to be used on compile-time strings for use in
+ * static initializers (i.e. as a constant expression).
+ */
+/**
+ * strlen - Return count of characters in a NUL-terminated string
+ *
+ * @p: pointer to NUL-terminated string to count.
+ *
+ * Do not use this function unless the string length is known at
+ * compile-time. When @p is unterminated, this function may crash
+ * or return unexpected counts that could lead to memory content
+ * exposures. Prefer strnlen().
+ *
+ * Returns number of characters in @p (NOT including the final NUL).
+ *
+ */
+#define strlen(p) \
+ __builtin_choose_expr(__is_constexpr(__builtin_strlen(p)), \
+ __builtin_strlen(p), __fortify_strlen(p))
+__FORTIFY_INLINE __diagnose_as(__builtin_strlen, 1)
+__kernel_size_t __fortify_strlen(const char * const POS p)
{
+ const size_t p_size = __member_size(p);
__kernel_size_t ret;
- size_t p_size = __builtin_object_size(p, 1);
- /* Work around gcc excess stack consumption issue */
- if (p_size == (size_t)-1 ||
- (__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0'))
+ /* Give up if we don't know how large p is. */
+ if (p_size == SIZE_MAX)
return __underlying_strlen(p);
ret = strnlen(p, p_size);
if (p_size <= ret)
- fortify_panic(__func__);
+ fortify_panic(FORTIFY_FUNC_strlen, FORTIFY_READ, p_size, ret + 1, ret);
return ret;
}
-extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen);
-__FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen)
+/* Defined after fortified strnlen() to reuse it. */
+extern ssize_t __real_strscpy(char *, const char *, size_t) __RENAME(sized_strscpy);
+__FORTIFY_INLINE ssize_t sized_strscpy(char * const POS p, const char * const POS q, size_t size)
{
- size_t p_size = __builtin_object_size(p, 1);
- __kernel_size_t ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size);
-
- if (p_size <= ret && maxlen != ret)
- fortify_panic(__func__);
- return ret;
-}
-
-/* defined after fortified strlen to reuse it */
-extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy);
-__FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size)
-{
- size_t ret;
- size_t p_size = __builtin_object_size(p, 1);
- size_t q_size = __builtin_object_size(q, 1);
-
- if (p_size == (size_t)-1 && q_size == (size_t)-1)
- return __real_strlcpy(p, q, size);
- ret = strlen(q);
- if (size) {
- size_t len = (ret >= size) ? size - 1 : ret;
-
- if (__builtin_constant_p(len) && len >= p_size)
- __write_overflow();
- if (len >= p_size)
- fortify_panic(__func__);
- __underlying_memcpy(p, q, len);
- p[len] = '\0';
- }
- return ret;
-}
-
-/* defined after fortified strnlen to reuse it */
-extern ssize_t __real_strscpy(char *, const char *, size_t) __RENAME(strscpy);
-__FORTIFY_INLINE ssize_t strscpy(char *p, const char *q, size_t size)
-{
- size_t len;
/* Use string size rather than possible enclosing struct size. */
- size_t p_size = __builtin_object_size(p, 1);
- size_t q_size = __builtin_object_size(q, 1);
+ const size_t p_size = __member_size(p);
+ const size_t q_size = __member_size(q);
+ size_t len;
/* If we cannot get size of p and q default to call strscpy. */
- if (p_size == (size_t) -1 && q_size == (size_t) -1)
+ if (p_size == SIZE_MAX && q_size == SIZE_MAX)
return __real_strscpy(p, q, size);
/*
* If size can be known at compile time and is greater than
* p_size, generate a compile time write overflow error.
*/
- if (__builtin_constant_p(size) && size > p_size)
+ if (__compiletime_lessthan(p_size, size))
__write_overflow();
+ /* Short-circuit for compile-time known-safe lengths. */
+ if (__compiletime_lessthan(p_size, SIZE_MAX)) {
+ len = __compiletime_strlen(q);
+
+ if (len < SIZE_MAX && __compiletime_lessthan(len, size)) {
+ __underlying_memcpy(p, q, len + 1);
+ return len;
+ }
+ }
+
/*
* This call protects from read overflow, because len will default to q
* length if it smaller than size.
@@ -135,8 +318,8 @@ __FORTIFY_INLINE ssize_t strscpy(char *p, const char *q, size_t size)
* Generate a runtime write overflow error if len is greater than
* p_size.
*/
- if (len > p_size)
- fortify_panic(__func__);
+ if (p_size < len)
+ fortify_panic(FORTIFY_FUNC_strscpy, FORTIFY_WRITE, p_size, len, -E2BIG);
/*
* We can now safely call vanilla strscpy because we are protected from:
@@ -146,157 +329,491 @@ __FORTIFY_INLINE ssize_t strscpy(char *p, const char *q, size_t size)
return __real_strscpy(p, q, len);
}
-/* defined after fortified strlen and strnlen to reuse them */
-__FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count)
+/* Defined after fortified strlen() to reuse it. */
+extern size_t __real_strlcat(char *p, const char *q, size_t avail) __RENAME(strlcat);
+/**
+ * strlcat - Append a string to an existing string
+ *
+ * @p: pointer to %NUL-terminated string to append to
+ * @q: pointer to %NUL-terminated string to append from
+ * @avail: Maximum bytes available in @p
+ *
+ * Appends %NUL-terminated string @q after the %NUL-terminated
+ * string at @p, but will not write beyond @avail bytes total,
+ * potentially truncating the copy from @q. @p will stay
+ * %NUL-terminated only if a %NUL already existed within
+ * the @avail bytes of @p. If so, the resulting number of
+ * bytes copied from @q will be at most "@avail - strlen(@p) - 1".
+ *
+ * Do not use this function. While FORTIFY_SOURCE tries to avoid
+ * read and write overflows, this is only possible when the sizes
+ * of @p and @q are known to the compiler. Prefer building the
+ * string with formatting, via scnprintf(), seq_buf, or similar.
+ *
+ * Returns total bytes that _would_ have been contained by @p
+ * regardless of truncation, similar to snprintf(). If return
+ * value is >= @avail, the string has been truncated.
+ *
+ */
+__FORTIFY_INLINE
+size_t strlcat(char * const POS p, const char * const POS q, size_t avail)
{
+ const size_t p_size = __member_size(p);
+ const size_t q_size = __member_size(q);
size_t p_len, copy_len;
- size_t p_size = __builtin_object_size(p, 1);
- size_t q_size = __builtin_object_size(q, 1);
+ size_t actual, wanted;
- if (p_size == (size_t)-1 && q_size == (size_t)-1)
- return __underlying_strncat(p, q, count);
- p_len = strlen(p);
- copy_len = strnlen(q, count);
- if (p_size < p_len + copy_len + 1)
- fortify_panic(__func__);
+ /* Give up immediately if both buffer sizes are unknown. */
+ if (p_size == SIZE_MAX && q_size == SIZE_MAX)
+ return __real_strlcat(p, q, avail);
+
+ p_len = strnlen(p, avail);
+ copy_len = strlen(q);
+ wanted = actual = p_len + copy_len;
+
+ /* Cannot append any more: report truncation. */
+ if (avail <= p_len)
+ return wanted;
+
+ /* Give up if string is already overflowed. */
+ if (p_size <= p_len)
+ fortify_panic(FORTIFY_FUNC_strlcat, FORTIFY_READ, p_size, p_len + 1, wanted);
+
+ if (actual >= avail) {
+ copy_len = avail - p_len - 1;
+ actual = p_len + copy_len;
+ }
+
+ /* Give up if copy will overflow. */
+ if (p_size <= actual)
+ fortify_panic(FORTIFY_FUNC_strlcat, FORTIFY_WRITE, p_size, actual + 1, wanted);
__underlying_memcpy(p + p_len, q, copy_len);
- p[p_len + copy_len] = '\0';
- return p;
+ p[actual] = '\0';
+
+ return wanted;
}
-__FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size)
+/* Defined after fortified strlcat() to reuse it. */
+/**
+ * strcat - Append a string to an existing string
+ *
+ * @p: pointer to NUL-terminated string to append to
+ * @q: pointer to NUL-terminated source string to append from
+ *
+ * Do not use this function. While FORTIFY_SOURCE tries to avoid
+ * read and write overflows, this is only possible when the
+ * destination buffer size is known to the compiler. Prefer
+ * building the string with formatting, via scnprintf() or similar.
+ * At the very least, use strncat().
+ *
+ * Returns @p.
+ *
+ */
+__FORTIFY_INLINE __diagnose_as(__builtin_strcat, 1, 2)
+char *strcat(char * const POS p, const char *q)
{
- size_t p_size = __builtin_object_size(p, 0);
+ const size_t p_size = __member_size(p);
+ const size_t wanted = strlcat(p, q, p_size);
- if (__builtin_constant_p(size) && p_size < size)
- __write_overflow();
- if (p_size < size)
- fortify_panic(__func__);
- return __underlying_memset(p, c, size);
+ if (p_size <= wanted)
+ fortify_panic(FORTIFY_FUNC_strcat, FORTIFY_WRITE, p_size, wanted + 1, p);
+ return p;
}
-__FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size)
+/**
+ * strncat - Append a string to an existing string
+ *
+ * @p: pointer to NUL-terminated string to append to
+ * @q: pointer to source string to append from
+ * @count: Maximum bytes to read from @q
+ *
+ * Appends at most @count bytes from @q (stopping at the first
+ * NUL byte) after the NUL-terminated string at @p. @p will be
+ * NUL-terminated.
+ *
+ * Do not use this function. While FORTIFY_SOURCE tries to avoid
+ * read and write overflows, this is only possible when the sizes
+ * of @p and @q are known to the compiler. Prefer building the
+ * string with formatting, via scnprintf() or similar.
+ *
+ * Returns @p.
+ *
+ */
+/* Defined after fortified strlen() and strnlen() to reuse them. */
+__FORTIFY_INLINE __diagnose_as(__builtin_strncat, 1, 2, 3)
+char *strncat(char * const POS p, const char * const POS q, __kernel_size_t count)
{
- size_t p_size = __builtin_object_size(p, 0);
- size_t q_size = __builtin_object_size(q, 0);
+ const size_t p_size = __member_size(p);
+ const size_t q_size = __member_size(q);
+ size_t p_len, copy_len, total;
+
+ if (p_size == SIZE_MAX && q_size == SIZE_MAX)
+ return __underlying_strncat(p, q, count);
+ p_len = strlen(p);
+ copy_len = strnlen(q, count);
+ total = p_len + copy_len + 1;
+ if (p_size < total)
+ fortify_panic(FORTIFY_FUNC_strncat, FORTIFY_WRITE, p_size, total, p);
+ __underlying_memcpy(p + p_len, q, copy_len);
+ p[p_len + copy_len] = '\0';
+ return p;
+}
+__FORTIFY_INLINE bool fortify_memset_chk(__kernel_size_t size,
+ const size_t p_size,
+ const size_t p_size_field)
+{
if (__builtin_constant_p(size)) {
- if (p_size < size)
+ /*
+ * Length argument is a constant expression, so we
+ * can perform compile-time bounds checking where
+ * buffer sizes are also known at compile time.
+ */
+
+ /* Error when size is larger than enclosing struct. */
+ if (__compiletime_lessthan(p_size_field, p_size) &&
+ __compiletime_lessthan(p_size, size))
__write_overflow();
- if (q_size < size)
- __read_overflow2();
+
+ /* Warn when write size is larger than dest field. */
+ if (__compiletime_lessthan(p_size_field, size))
+ __write_overflow_field(p_size_field, size);
}
- if (p_size < size || q_size < size)
- fortify_panic(__func__);
- return __underlying_memcpy(p, q, size);
+ /*
+ * At this point, length argument may not be a constant expression,
+ * so run-time bounds checking can be done where buffer sizes are
+ * known. (This is not an "else" because the above checks may only
+ * be compile-time warnings, and we want to still warn for run-time
+ * overflows.)
+ */
+
+ /*
+ * Always stop accesses beyond the struct that contains the
+ * field, when the buffer's remaining size is known.
+ * (The SIZE_MAX test is to optimize away checks where the buffer
+ * lengths are unknown.)
+ */
+ if (p_size != SIZE_MAX && p_size < size)
+ fortify_panic(FORTIFY_FUNC_memset, FORTIFY_WRITE, p_size, size, true);
+ return false;
}
-__FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size)
-{
- size_t p_size = __builtin_object_size(p, 0);
- size_t q_size = __builtin_object_size(q, 0);
+#define __fortify_memset_chk(p, c, size, p_size, p_size_field) ({ \
+ size_t __fortify_size = (size_t)(size); \
+ fortify_memset_chk(__fortify_size, p_size, p_size_field), \
+ __underlying_memset(p, c, __fortify_size); \
+})
+
+/*
+ * __struct_size() vs __member_size() must be captured here to avoid
+ * evaluating argument side-effects further into the macro layers.
+ */
+#ifndef CONFIG_KMSAN
+#define memset(p, c, s) __fortify_memset_chk(p, c, s, \
+ __struct_size(p), __member_size(p))
+#endif
+/*
+ * To make sure the compiler can enforce protection against buffer overflows,
+ * memcpy(), memmove(), and memset() must not be used beyond individual
+ * struct members. If you need to copy across multiple members, please use
+ * struct_group() to create a named mirror of an anonymous struct union.
+ * (e.g. see struct sk_buff.) Read overflow checking is currently only
+ * done when a write overflow is also present, or when building with W=1.
+ *
+ * Mitigation coverage matrix
+ * Bounds checking at:
+ * +-------+-------+-------+-------+
+ * | Compile time | Run time |
+ * memcpy() argument sizes: | write | read | write | read |
+ * dest source length +-------+-------+-------+-------+
+ * memcpy(known, known, constant) | y | y | n/a | n/a |
+ * memcpy(known, unknown, constant) | y | n | n/a | V |
+ * memcpy(known, known, dynamic) | n | n | B | B |
+ * memcpy(known, unknown, dynamic) | n | n | B | V |
+ * memcpy(unknown, known, constant) | n | y | V | n/a |
+ * memcpy(unknown, unknown, constant) | n | n | V | V |
+ * memcpy(unknown, known, dynamic) | n | n | V | B |
+ * memcpy(unknown, unknown, dynamic) | n | n | V | V |
+ * +-------+-------+-------+-------+
+ *
+ * y = perform deterministic compile-time bounds checking
+ * n = cannot perform deterministic compile-time bounds checking
+ * n/a = no run-time bounds checking needed since compile-time deterministic
+ * B = can perform run-time bounds checking (currently unimplemented)
+ * V = vulnerable to run-time overflow (will need refactoring to solve)
+ *
+ */
+__FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size,
+ const size_t p_size,
+ const size_t q_size,
+ const size_t p_size_field,
+ const size_t q_size_field,
+ const u8 func)
+{
if (__builtin_constant_p(size)) {
- if (p_size < size)
+ /*
+ * Length argument is a constant expression, so we
+ * can perform compile-time bounds checking where
+ * buffer sizes are also known at compile time.
+ */
+
+ /* Error when size is larger than enclosing struct. */
+ if (__compiletime_lessthan(p_size_field, p_size) &&
+ __compiletime_lessthan(p_size, size))
__write_overflow();
- if (q_size < size)
+ if (__compiletime_lessthan(q_size_field, q_size) &&
+ __compiletime_lessthan(q_size, size))
__read_overflow2();
+
+ /* Warn when write size argument larger than dest field. */
+ if (__compiletime_lessthan(p_size_field, size))
+ __write_overflow_field(p_size_field, size);
+ /*
+ * Warn for source field over-read when building with W=1
+ * or when an over-write happened, so both can be fixed at
+ * the same time.
+ */
+ if ((IS_ENABLED(KBUILD_EXTRA_WARN1) ||
+ __compiletime_lessthan(p_size_field, size)) &&
+ __compiletime_lessthan(q_size_field, size))
+ __read_overflow2_field(q_size_field, size);
}
- if (p_size < size || q_size < size)
- fortify_panic(__func__);
- return __underlying_memmove(p, q, size);
+ /*
+ * At this point, length argument may not be a constant expression,
+ * so run-time bounds checking can be done where buffer sizes are
+ * known. (This is not an "else" because the above checks may only
+ * be compile-time warnings, and we want to still warn for run-time
+ * overflows.)
+ */
+
+ /*
+ * Always stop accesses beyond the struct that contains the
+ * field, when the buffer's remaining size is known.
+ * (The SIZE_MAX test is to optimize away checks where the buffer
+ * lengths are unknown.)
+ */
+ if (p_size != SIZE_MAX && p_size < size)
+ fortify_panic(func, FORTIFY_WRITE, p_size, size, true);
+ else if (q_size != SIZE_MAX && q_size < size)
+ fortify_panic(func, FORTIFY_READ, q_size, size, true);
+
+ /*
+ * Warn when writing beyond destination field size.
+ *
+ * Note the implementation of __builtin_*object_size() behaves
+ * like sizeof() when not directly referencing a flexible
+ * array member, which means there will be many bounds checks
+ * that will appear at run-time, without a way for them to be
+ * detected at compile-time (as can be done when the destination
+ * is specifically the flexible array member).
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101832
+ */
+ if (p_size_field != SIZE_MAX &&
+ p_size != p_size_field && p_size_field < size)
+ return true;
+
+ return false;
}
+/*
+ * To work around what seems to be an optimizer bug, the macro arguments
+ * need to have const copies or the values end up changed by the time they
+ * reach fortify_warn_once(). See commit 6f7630b1b5bc ("fortify: Capture
+ * __bos() results in const temp vars") for more details.
+ */
+#define __fortify_memcpy_chk(p, q, size, p_size, q_size, \
+ p_size_field, q_size_field, op) ({ \
+ const size_t __fortify_size = (size_t)(size); \
+ const size_t __p_size = (p_size); \
+ const size_t __q_size = (q_size); \
+ const size_t __p_size_field = (p_size_field); \
+ const size_t __q_size_field = (q_size_field); \
+ /* Keep a mutable version of the size for the final copy. */ \
+ size_t __copy_size = __fortify_size; \
+ fortify_warn_once(fortify_memcpy_chk(__fortify_size, __p_size, \
+ __q_size, __p_size_field, \
+ __q_size_field, FORTIFY_FUNC_ ##op), \
+ #op ": detected field-spanning write (size %zu) of single %s (size %zu)\n", \
+ __fortify_size, \
+ "field \"" #p "\" at " FILE_LINE, \
+ __p_size_field); \
+ /* Hide only the run-time size from value range tracking to */ \
+ /* silence compile-time false positive bounds warnings. */ \
+ if (!__builtin_constant_p(__copy_size)) \
+ OPTIMIZER_HIDE_VAR(__copy_size); \
+ __underlying_##op(p, q, __copy_size); \
+})
+
+/*
+ * Notes about compile-time buffer size detection:
+ *
+ * With these types...
+ *
+ * struct middle {
+ * u16 a;
+ * u8 middle_buf[16];
+ * int b;
+ * };
+ * struct end {
+ * u16 a;
+ * u8 end_buf[16];
+ * };
+ * struct flex {
+ * int a;
+ * u8 flex_buf[];
+ * };
+ *
+ * void func(TYPE *ptr) { ... }
+ *
+ * Cases where destination size cannot be currently detected:
+ * - the size of ptr's object (seemingly by design, gcc & clang fail):
+ * __builtin_object_size(ptr, 1) == SIZE_MAX
+ * - the size of flexible arrays in ptr's obj (by design, dynamic size):
+ * __builtin_object_size(ptr->flex_buf, 1) == SIZE_MAX
+ * - the size of ANY array at the end of ptr's obj (gcc and clang bug):
+ * __builtin_object_size(ptr->end_buf, 1) == SIZE_MAX
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101836
+ *
+ * Cases where destination size is currently detected:
+ * - the size of non-array members within ptr's object:
+ * __builtin_object_size(ptr->a, 1) == 2
+ * - the size of non-flexible-array in the middle of ptr's obj:
+ * __builtin_object_size(ptr->middle_buf, 1) == 16
+ *
+ */
+
+/*
+ * __struct_size() vs __member_size() must be captured here to avoid
+ * evaluating argument side-effects further into the macro layers.
+ */
+#define memcpy(p, q, s) __fortify_memcpy_chk(p, q, s, \
+ __struct_size(p), __struct_size(q), \
+ __member_size(p), __member_size(q), \
+ memcpy)
+#define memmove(p, q, s) __fortify_memcpy_chk(p, q, s, \
+ __struct_size(p), __struct_size(q), \
+ __member_size(p), __member_size(q), \
+ memmove)
+
extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan);
-__FORTIFY_INLINE void *memscan(void *p, int c, __kernel_size_t size)
+__FORTIFY_INLINE void *memscan(void * const POS0 p, int c, __kernel_size_t size)
{
- size_t p_size = __builtin_object_size(p, 0);
+ const size_t p_size = __struct_size(p);
- if (__builtin_constant_p(size) && p_size < size)
+ if (__compiletime_lessthan(p_size, size))
__read_overflow();
if (p_size < size)
- fortify_panic(__func__);
+ fortify_panic(FORTIFY_FUNC_memscan, FORTIFY_READ, p_size, size, NULL);
return __real_memscan(p, c, size);
}
-__FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size)
+__FORTIFY_INLINE __diagnose_as(__builtin_memcmp, 1, 2, 3)
+int memcmp(const void * const POS0 p, const void * const POS0 q, __kernel_size_t size)
{
- size_t p_size = __builtin_object_size(p, 0);
- size_t q_size = __builtin_object_size(q, 0);
+ const size_t p_size = __struct_size(p);
+ const size_t q_size = __struct_size(q);
if (__builtin_constant_p(size)) {
- if (p_size < size)
+ if (__compiletime_lessthan(p_size, size))
__read_overflow();
- if (q_size < size)
+ if (__compiletime_lessthan(q_size, size))
__read_overflow2();
}
- if (p_size < size || q_size < size)
- fortify_panic(__func__);
+ if (p_size < size)
+ fortify_panic(FORTIFY_FUNC_memcmp, FORTIFY_READ, p_size, size, INT_MIN);
+ else if (q_size < size)
+ fortify_panic(FORTIFY_FUNC_memcmp, FORTIFY_READ, q_size, size, INT_MIN);
return __underlying_memcmp(p, q, size);
}
-__FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size)
+__FORTIFY_INLINE __diagnose_as(__builtin_memchr, 1, 2, 3)
+void *memchr(const void * const POS0 p, int c, __kernel_size_t size)
{
- size_t p_size = __builtin_object_size(p, 0);
+ const size_t p_size = __struct_size(p);
- if (__builtin_constant_p(size) && p_size < size)
+ if (__compiletime_lessthan(p_size, size))
__read_overflow();
if (p_size < size)
- fortify_panic(__func__);
+ fortify_panic(FORTIFY_FUNC_memchr, FORTIFY_READ, p_size, size, NULL);
return __underlying_memchr(p, c, size);
}
void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv);
-__FORTIFY_INLINE void *memchr_inv(const void *p, int c, size_t size)
+__FORTIFY_INLINE void *memchr_inv(const void * const POS0 p, int c, size_t size)
{
- size_t p_size = __builtin_object_size(p, 0);
+ const size_t p_size = __struct_size(p);
- if (__builtin_constant_p(size) && p_size < size)
+ if (__compiletime_lessthan(p_size, size))
__read_overflow();
if (p_size < size)
- fortify_panic(__func__);
+ fortify_panic(FORTIFY_FUNC_memchr_inv, FORTIFY_READ, p_size, size, NULL);
return __real_memchr_inv(p, c, size);
}
-extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup);
-__FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp)
+extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup_noprof)
+ __realloc_size(2);
+__FORTIFY_INLINE void *kmemdup_noprof(const void * const POS0 p, size_t size, gfp_t gfp)
{
- size_t p_size = __builtin_object_size(p, 0);
+ const size_t p_size = __struct_size(p);
- if (__builtin_constant_p(size) && p_size < size)
+ if (__compiletime_lessthan(p_size, size))
__read_overflow();
if (p_size < size)
- fortify_panic(__func__);
+ fortify_panic(FORTIFY_FUNC_kmemdup, FORTIFY_READ, p_size, size,
+ __real_kmemdup(p, 0, gfp));
return __real_kmemdup(p, size, gfp);
}
-
-/* defined after fortified strlen and memcpy to reuse them */
-__FORTIFY_INLINE char *strcpy(char *p, const char *q)
+#define kmemdup(...) alloc_hooks(kmemdup_noprof(__VA_ARGS__))
+
+/**
+ * strcpy - Copy a string into another string buffer
+ *
+ * @p: pointer to destination of copy
+ * @q: pointer to NUL-terminated source string to copy
+ *
+ * Do not use this function. While FORTIFY_SOURCE tries to avoid
+ * overflows, this is only possible when the sizes of @q and @p are
+ * known to the compiler. Prefer strscpy(), though note its different
+ * return values for detecting truncation.
+ *
+ * Returns @p.
+ *
+ */
+/* Defined after fortified strlen to reuse it. */
+__FORTIFY_INLINE __diagnose_as(__builtin_strcpy, 1, 2)
+char *strcpy(char * const POS p, const char * const POS q)
{
- size_t p_size = __builtin_object_size(p, 1);
- size_t q_size = __builtin_object_size(q, 1);
+ const size_t p_size = __member_size(p);
+ const size_t q_size = __member_size(q);
size_t size;
- if (p_size == (size_t)-1 && q_size == (size_t)-1)
+ /* If neither buffer size is known, immediately give up. */
+ if (__builtin_constant_p(p_size) &&
+ __builtin_constant_p(q_size) &&
+ p_size == SIZE_MAX && q_size == SIZE_MAX)
return __underlying_strcpy(p, q);
size = strlen(q) + 1;
- /* test here to use the more stringent object size */
+ /* Compile-time check for const size overflow. */
+ if (__compiletime_lessthan(p_size, size))
+ __write_overflow();
+ /* Run-time check for dynamic size overflow. */
if (p_size < size)
- fortify_panic(__func__);
- memcpy(p, q, size);
+ fortify_panic(FORTIFY_FUNC_strcpy, FORTIFY_WRITE, p_size, size, p);
+ __underlying_memcpy(p, q, size);
return p;
}
/* Don't use these outside the FORITFY_SOURCE implementation */
#undef __underlying_memchr
#undef __underlying_memcmp
-#undef __underlying_memcpy
-#undef __underlying_memmove
-#undef __underlying_memset
#undef __underlying_strcat
#undef __underlying_strcpy
#undef __underlying_strlen
#undef __underlying_strncat
#undef __underlying_strncpy
+#undef POS
+#undef POS0
+
#endif /* _LINUX_FORTIFY_STRING_H_ */
diff --git a/include/linux/fpga/adi-axi-common.h b/include/linux/fpga/adi-axi-common.h
deleted file mode 100644
index 141ac3f251e6..000000000000
--- a/include/linux/fpga/adi-axi-common.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Analog Devices AXI common registers & definitions
- *
- * Copyright 2019 Analog Devices Inc.
- *
- * https://wiki.analog.com/resources/fpga/docs/axi_ip
- * https://wiki.analog.com/resources/fpga/docs/hdl/regmap
- */
-
-#ifndef ADI_AXI_COMMON_H_
-#define ADI_AXI_COMMON_H_
-
-#define ADI_AXI_REG_VERSION 0x0000
-
-#define ADI_AXI_PCORE_VER(major, minor, patch) \
- (((major) << 16) | ((minor) << 8) | (patch))
-
-#define ADI_AXI_PCORE_VER_MAJOR(version) (((version) >> 16) & 0xff)
-#define ADI_AXI_PCORE_VER_MINOR(version) (((version) >> 8) & 0xff)
-#define ADI_AXI_PCORE_VER_PATCH(version) ((version) & 0xff)
-
-#endif /* ADI_AXI_COMMON_H_ */
diff --git a/include/linux/fpga/altera-pr-ip-core.h b/include/linux/fpga/altera-pr-ip-core.h
index 0b08ac20ab16..a6b4c07858cc 100644
--- a/include/linux/fpga/altera-pr-ip-core.h
+++ b/include/linux/fpga/altera-pr-ip-core.h
@@ -13,6 +13,5 @@
#include <linux/io.h>
int alt_pr_register(struct device *dev, void __iomem *reg_base);
-void alt_pr_unregister(struct device *dev);
#endif /* _ALT_PR_IP_CORE_H */
diff --git a/include/linux/fpga/fpga-bridge.h b/include/linux/fpga/fpga-bridge.h
index 817600a32c93..94c4edd047e5 100644
--- a/include/linux/fpga/fpga-bridge.h
+++ b/include/linux/fpga/fpga-bridge.h
@@ -11,7 +11,7 @@ struct fpga_bridge;
/**
* struct fpga_bridge_ops - ops for low level FPGA bridge drivers
* @enable_show: returns the FPGA bridge's status
- * @enable_set: set a FPGA bridge as enabled or disabled
+ * @enable_set: set an FPGA bridge as enabled or disabled
* @fpga_bridge_remove: set FPGA into a specific state during driver remove
* @groups: optional attribute groups.
*/
@@ -23,11 +23,29 @@ struct fpga_bridge_ops {
};
/**
+ * struct fpga_bridge_info - collection of parameters an FPGA Bridge
+ * @name: fpga bridge name
+ * @br_ops: pointer to structure of fpga bridge ops
+ * @priv: fpga bridge private data
+ *
+ * fpga_bridge_info contains parameters for the register function. These
+ * are separated into an info structure because they some are optional
+ * others could be added to in the future. The info structure facilitates
+ * maintaining a stable API.
+ */
+struct fpga_bridge_info {
+ const char *name;
+ const struct fpga_bridge_ops *br_ops;
+ void *priv;
+};
+
+/**
* struct fpga_bridge - FPGA bridge structure
* @name: name of low level FPGA bridge
* @dev: FPGA bridge device
* @mutex: enforces exclusive reference to bridge
* @br_ops: pointer to struct of FPGA bridge ops
+ * @br_ops_owner: module containing the br_ops
* @info: fpga image specific information
* @node: FPGA bridge list node
* @priv: low level driver private date
@@ -37,6 +55,7 @@ struct fpga_bridge {
struct device dev;
struct mutex mutex; /* for exclusive reference to bridge */
const struct fpga_bridge_ops *br_ops;
+ struct module *br_ops_owner;
struct fpga_image_info *info;
struct list_head node;
void *priv;
@@ -62,15 +81,12 @@ int of_fpga_bridge_get_to_list(struct device_node *np,
struct fpga_image_info *info,
struct list_head *bridge_list);
-struct fpga_bridge *fpga_bridge_create(struct device *dev, const char *name,
- const struct fpga_bridge_ops *br_ops,
- void *priv);
-void fpga_bridge_free(struct fpga_bridge *br);
-int fpga_bridge_register(struct fpga_bridge *br);
+#define fpga_bridge_register(parent, name, br_ops, priv) \
+ __fpga_bridge_register(parent, name, br_ops, priv, THIS_MODULE)
+struct fpga_bridge *
+__fpga_bridge_register(struct device *parent, const char *name,
+ const struct fpga_bridge_ops *br_ops, void *priv,
+ struct module *owner);
void fpga_bridge_unregister(struct fpga_bridge *br);
-struct fpga_bridge
-*devm_fpga_bridge_create(struct device *dev, const char *name,
- const struct fpga_bridge_ops *br_ops, void *priv);
-
#endif /* _LINUX_FPGA_BRIDGE_H */
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index 2bc3030a69e5..0d4fe068f3d8 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -22,6 +22,8 @@ struct sg_table;
* @FPGA_MGR_STATE_RESET: FPGA in reset state
* @FPGA_MGR_STATE_FIRMWARE_REQ: firmware request in progress
* @FPGA_MGR_STATE_FIRMWARE_REQ_ERR: firmware request failed
+ * @FPGA_MGR_STATE_PARSE_HEADER: parse FPGA image header
+ * @FPGA_MGR_STATE_PARSE_HEADER_ERR: Error during PARSE_HEADER stage
* @FPGA_MGR_STATE_WRITE_INIT: preparing FPGA for programming
* @FPGA_MGR_STATE_WRITE_INIT_ERR: Error during WRITE_INIT stage
* @FPGA_MGR_STATE_WRITE: writing image to FPGA
@@ -41,7 +43,9 @@ enum fpga_mgr_states {
FPGA_MGR_STATE_FIRMWARE_REQ,
FPGA_MGR_STATE_FIRMWARE_REQ_ERR,
- /* write sequence: init, write, complete */
+ /* write sequence: parse header, init, write, complete */
+ FPGA_MGR_STATE_PARSE_HEADER,
+ FPGA_MGR_STATE_PARSE_HEADER_ERR,
FPGA_MGR_STATE_WRITE_INIT,
FPGA_MGR_STATE_WRITE_INIT_ERR,
FPGA_MGR_STATE_WRITE,
@@ -75,7 +79,7 @@ enum fpga_mgr_states {
#define FPGA_MGR_COMPRESSED_BITSTREAM BIT(4)
/**
- * struct fpga_image_info - information specific to a FPGA image
+ * struct fpga_image_info - information specific to an FPGA image
* @flags: boolean flags as defined above
* @enable_timeout_us: maximum time to enable traffic through bridge (uSec)
* @disable_timeout_us: maximum time to disable traffic through bridge (uSec)
@@ -85,6 +89,9 @@ enum fpga_mgr_states {
* @sgt: scatter/gather table containing FPGA image
* @buf: contiguous buffer containing FPGA image
* @count: size of buf
+ * @header_size: size of image header.
+ * @data_size: size of image data to be sent to the device. If not specified,
+ * whole image will be used. Header may be skipped in either case.
* @region_id: id of target region
* @dev: device that owns this
* @overlay: Device Tree overlay
@@ -98,6 +105,8 @@ struct fpga_image_info {
struct sg_table *sgt;
const char *buf;
size_t count;
+ size_t header_size;
+ size_t data_size;
int region_id;
struct device *dev;
#ifdef CONFIG_OF
@@ -106,11 +115,48 @@ struct fpga_image_info {
};
/**
+ * struct fpga_compat_id - id for compatibility check
+ *
+ * @id_h: high 64bit of the compat_id
+ * @id_l: low 64bit of the compat_id
+ */
+struct fpga_compat_id {
+ u64 id_h;
+ u64 id_l;
+};
+
+/**
+ * struct fpga_manager_info - collection of parameters for an FPGA Manager
+ * @name: fpga manager name
+ * @compat_id: FPGA manager id for compatibility check.
+ * @mops: pointer to structure of fpga manager ops
+ * @priv: fpga manager private data
+ *
+ * fpga_manager_info contains parameters for the register_full function.
+ * These are separated into an info structure because they some are optional
+ * others could be added to in the future. The info structure facilitates
+ * maintaining a stable API.
+ */
+struct fpga_manager_info {
+ const char *name;
+ struct fpga_compat_id *compat_id;
+ const struct fpga_manager_ops *mops;
+ void *priv;
+};
+
+/**
* struct fpga_manager_ops - ops for low level fpga manager drivers
- * @initial_header_size: Maximum number of bytes that should be passed into write_init
+ * @initial_header_size: minimum number of bytes that should be passed into
+ * parse_header and write_init.
+ * @skip_header: bool flag to tell fpga-mgr core whether it should skip
+ * info->header_size part at the beginning of the image when invoking
+ * write callback.
* @state: returns an enum value of the FPGA's state
* @status: returns status of the FPGA, including reconfiguration error code
- * @write_init: prepare the FPGA to receive confuration data
+ * @parse_header: parse FPGA image header to set info->header_size and
+ * info->data_size. In case the input buffer is not large enough, set
+ * required size to info->header_size and return -EAGAIN.
+ * @write_init: prepare the FPGA to receive configuration data
* @write: write count bytes of configuration data to the FPGA
* @write_sg: write the scatter list of configuration data to the FPGA
* @write_complete: set FPGA to operating state after writing is done
@@ -123,8 +169,12 @@ struct fpga_image_info {
*/
struct fpga_manager_ops {
size_t initial_header_size;
+ bool skip_header;
enum fpga_mgr_states (*state)(struct fpga_manager *mgr);
u64 (*status)(struct fpga_manager *mgr);
+ int (*parse_header)(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t count);
int (*write_init)(struct fpga_manager *mgr,
struct fpga_image_info *info,
const char *buf, size_t count);
@@ -144,17 +194,6 @@ struct fpga_manager_ops {
#define FPGA_MGR_STATUS_FIFO_OVERFLOW_ERR BIT(4)
/**
- * struct fpga_compat_id - id for compatibility check
- *
- * @id_h: high 64bit of the compat_id
- * @id_l: low 64bit of the compat_id
- */
-struct fpga_compat_id {
- u64 id_h;
- u64 id_l;
-};
-
-/**
* struct fpga_manager - fpga manager structure
* @name: name of low level fpga manager
* @dev: fpga manager device
@@ -162,6 +201,7 @@ struct fpga_compat_id {
* @state: state of fpga manager
* @compat_id: FPGA manager id for compatibility check.
* @mops: pointer to struct of fpga manager ops
+ * @mops_owner: module containing the mops
* @priv: low level driver private date
*/
struct fpga_manager {
@@ -171,6 +211,7 @@ struct fpga_manager {
enum fpga_mgr_states state;
struct fpga_compat_id *compat_id;
const struct fpga_manager_ops *mops;
+ struct module *mops_owner;
void *priv;
};
@@ -191,17 +232,30 @@ struct fpga_manager *fpga_mgr_get(struct device *dev);
void fpga_mgr_put(struct fpga_manager *mgr);
-struct fpga_manager *fpga_mgr_create(struct device *dev, const char *name,
- const struct fpga_manager_ops *mops,
- void *priv);
-void fpga_mgr_free(struct fpga_manager *mgr);
-int fpga_mgr_register(struct fpga_manager *mgr);
-void fpga_mgr_unregister(struct fpga_manager *mgr);
+#define fpga_mgr_register_full(parent, info) \
+ __fpga_mgr_register_full(parent, info, THIS_MODULE)
+struct fpga_manager *
+__fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info,
+ struct module *owner);
-int devm_fpga_mgr_register(struct device *dev, struct fpga_manager *mgr);
+#define fpga_mgr_register(parent, name, mops, priv) \
+ __fpga_mgr_register(parent, name, mops, priv, THIS_MODULE)
+struct fpga_manager *
+__fpga_mgr_register(struct device *parent, const char *name,
+ const struct fpga_manager_ops *mops, void *priv, struct module *owner);
+
+void fpga_mgr_unregister(struct fpga_manager *mgr);
-struct fpga_manager *devm_fpga_mgr_create(struct device *dev, const char *name,
- const struct fpga_manager_ops *mops,
- void *priv);
+#define devm_fpga_mgr_register_full(parent, info) \
+ __devm_fpga_mgr_register_full(parent, info, THIS_MODULE)
+struct fpga_manager *
+__devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info,
+ struct module *owner);
+#define devm_fpga_mgr_register(parent, name, mops, priv) \
+ __devm_fpga_mgr_register(parent, name, mops, priv, THIS_MODULE)
+struct fpga_manager *
+__devm_fpga_mgr_register(struct device *parent, const char *name,
+ const struct fpga_manager_ops *mops, void *priv,
+ struct module *owner);
#endif /*_LINUX_FPGA_MGR_H */
diff --git a/include/linux/fpga/fpga-region.h b/include/linux/fpga/fpga-region.h
index 27cb706275db..5fbc05fe70a6 100644
--- a/include/linux/fpga/fpga-region.h
+++ b/include/linux/fpga/fpga-region.h
@@ -7,6 +7,27 @@
#include <linux/fpga/fpga-mgr.h>
#include <linux/fpga/fpga-bridge.h>
+struct fpga_region;
+
+/**
+ * struct fpga_region_info - collection of parameters an FPGA Region
+ * @mgr: fpga region manager
+ * @compat_id: FPGA region id for compatibility check.
+ * @priv: fpga region private data
+ * @get_bridges: optional function to get bridges to a list
+ *
+ * fpga_region_info contains parameters for the register_full function.
+ * These are separated into an info structure because they some are optional
+ * others could be added to in the future. The info structure facilitates
+ * maintaining a stable API.
+ */
+struct fpga_region_info {
+ struct fpga_manager *mgr;
+ struct fpga_compat_id *compat_id;
+ void *priv;
+ int (*get_bridges)(struct fpga_region *region);
+};
+
/**
* struct fpga_region - FPGA Region structure
* @dev: FPGA Region device
@@ -15,6 +36,7 @@
* @mgr: FPGA manager
* @info: FPGA image info
* @compat_id: FPGA region id for compatibility check.
+ * @ops_owner: module containing the get_bridges function
* @priv: private data
* @get_bridges: optional function to get bridges to a list
*/
@@ -25,27 +47,30 @@ struct fpga_region {
struct fpga_manager *mgr;
struct fpga_image_info *info;
struct fpga_compat_id *compat_id;
+ struct module *ops_owner;
void *priv;
int (*get_bridges)(struct fpga_region *region);
};
#define to_fpga_region(d) container_of(d, struct fpga_region, dev)
-struct fpga_region *fpga_region_class_find(
- struct device *start, const void *data,
- int (*match)(struct device *, const void *));
+struct fpga_region *
+fpga_region_class_find(struct device *start, const void *data,
+ int (*match)(struct device *, const void *));
int fpga_region_program_fpga(struct fpga_region *region);
-struct fpga_region
-*fpga_region_create(struct device *dev, struct fpga_manager *mgr,
- int (*get_bridges)(struct fpga_region *));
-void fpga_region_free(struct fpga_region *region);
-int fpga_region_register(struct fpga_region *region);
-void fpga_region_unregister(struct fpga_region *region);
+#define fpga_region_register_full(parent, info) \
+ __fpga_region_register_full(parent, info, THIS_MODULE)
+struct fpga_region *
+__fpga_region_register_full(struct device *parent, const struct fpga_region_info *info,
+ struct module *owner);
-struct fpga_region
-*devm_fpga_region_create(struct device *dev, struct fpga_manager *mgr,
- int (*get_bridges)(struct fpga_region *));
+#define fpga_region_register(parent, mgr, get_bridges) \
+ __fpga_region_register(parent, mgr, get_bridges, THIS_MODULE)
+struct fpga_region *
+__fpga_region_register(struct device *parent, struct fpga_manager *mgr,
+ int (*get_bridges)(struct fpga_region *), struct module *owner);
+void fpga_region_unregister(struct fpga_region *region);
#endif /* _FPGA_REGION_H */
diff --git a/include/linux/fprobe.h b/include/linux/fprobe.h
new file mode 100644
index 000000000000..0a3bcd1718f3
--- /dev/null
+++ b/include/linux/fprobe.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Simple ftrace probe wrapper */
+#ifndef _LINUX_FPROBE_H
+#define _LINUX_FPROBE_H
+
+#include <linux/compiler.h>
+#include <linux/ftrace.h>
+#include <linux/rcupdate.h>
+#include <linux/refcount.h>
+#include <linux/rhashtable.h>
+#include <linux/slab.h>
+
+struct fprobe;
+typedef int (*fprobe_entry_cb)(struct fprobe *fp, unsigned long entry_ip,
+ unsigned long ret_ip, struct ftrace_regs *regs,
+ void *entry_data);
+
+typedef void (*fprobe_exit_cb)(struct fprobe *fp, unsigned long entry_ip,
+ unsigned long ret_ip, struct ftrace_regs *regs,
+ void *entry_data);
+
+/**
+ * struct fprobe_hlist_node - address based hash list node for fprobe.
+ *
+ * @hlist: The hlist node for address search hash table.
+ * @addr: One of the probing address of @fp.
+ * @fp: The fprobe which owns this.
+ */
+struct fprobe_hlist_node {
+ struct rhlist_head hlist;
+ unsigned long addr;
+ struct fprobe *fp;
+};
+
+/**
+ * struct fprobe_hlist - hash list nodes for fprobe.
+ *
+ * @hlist: The hlist node for existence checking hash table.
+ * @rcu: rcu_head for RCU deferred release.
+ * @fp: The fprobe which owns this fprobe_hlist.
+ * @size: The size of @array.
+ * @array: The fprobe_hlist_node for each address to probe.
+ */
+struct fprobe_hlist {
+ struct hlist_node hlist;
+ struct rcu_head rcu;
+ struct fprobe *fp;
+ int size;
+ struct fprobe_hlist_node array[] __counted_by(size);
+};
+
+/**
+ * struct fprobe - ftrace based probe.
+ *
+ * @nmissed: The counter for missing events.
+ * @flags: The status flag.
+ * @entry_data_size: The private data storage size.
+ * @entry_handler: The callback function for function entry.
+ * @exit_handler: The callback function for function exit.
+ * @hlist_array: The fprobe_hlist for fprobe search from IP hash table.
+ */
+struct fprobe {
+ unsigned long nmissed;
+ unsigned int flags;
+ size_t entry_data_size;
+
+ fprobe_entry_cb entry_handler;
+ fprobe_exit_cb exit_handler;
+
+ struct fprobe_hlist *hlist_array;
+};
+
+/* This fprobe is soft-disabled. */
+#define FPROBE_FL_DISABLED 1
+
+/*
+ * This fprobe handler will be shared with kprobes.
+ * This flag must be set before registering.
+ */
+#define FPROBE_FL_KPROBE_SHARED 2
+
+static inline bool fprobe_disabled(struct fprobe *fp)
+{
+ return (fp) ? fp->flags & FPROBE_FL_DISABLED : false;
+}
+
+static inline bool fprobe_shared_with_kprobes(struct fprobe *fp)
+{
+ return (fp) ? fp->flags & FPROBE_FL_KPROBE_SHARED : false;
+}
+
+#ifdef CONFIG_FPROBE
+int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter);
+int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num);
+int register_fprobe_syms(struct fprobe *fp, const char **syms, int num);
+int unregister_fprobe(struct fprobe *fp);
+bool fprobe_is_registered(struct fprobe *fp);
+int fprobe_count_ips_from_filter(const char *filter, const char *notfilter);
+#else
+static inline int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter)
+{
+ return -EOPNOTSUPP;
+}
+static inline int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num)
+{
+ return -EOPNOTSUPP;
+}
+static inline int register_fprobe_syms(struct fprobe *fp, const char **syms, int num)
+{
+ return -EOPNOTSUPP;
+}
+static inline int unregister_fprobe(struct fprobe *fp)
+{
+ return -EOPNOTSUPP;
+}
+static inline bool fprobe_is_registered(struct fprobe *fp)
+{
+ return false;
+}
+static inline int fprobe_count_ips_from_filter(const char *filter, const char *notfilter)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+/**
+ * disable_fprobe() - Disable fprobe
+ * @fp: The fprobe to be disabled.
+ *
+ * This will soft-disable @fp. Note that this doesn't remove the ftrace
+ * hooks from the function entry.
+ */
+static inline void disable_fprobe(struct fprobe *fp)
+{
+ if (fp)
+ fp->flags |= FPROBE_FL_DISABLED;
+}
+
+/**
+ * enable_fprobe() - Enable fprobe
+ * @fp: The fprobe to be enabled.
+ *
+ * This will soft-enable @fp.
+ */
+static inline void enable_fprobe(struct fprobe *fp)
+{
+ if (fp)
+ fp->flags &= ~FPROBE_FL_DISABLED;
+}
+
+/* The entry data size is 4 bits (=16) * sizeof(long) in maximum */
+#define FPROBE_DATA_SIZE_BITS 4
+#define MAX_FPROBE_DATA_SIZE_WORD ((1L << FPROBE_DATA_SIZE_BITS) - 1)
+#define MAX_FPROBE_DATA_SIZE (MAX_FPROBE_DATA_SIZE_WORD * sizeof(long))
+
+#endif
diff --git a/include/linux/fpu.h b/include/linux/fpu.h
new file mode 100644
index 000000000000..2fb63e22913b
--- /dev/null
+++ b/include/linux/fpu.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_FPU_H
+#define _LINUX_FPU_H
+
+#ifdef _LINUX_FPU_COMPILATION_UNIT
+#error FP code must be compiled separately. See Documentation/core-api/floating-point.rst.
+#endif
+
+#include <asm/fpu.h>
+
+#endif
diff --git a/include/linux/framer/framer-provider.h b/include/linux/framer/framer-provider.h
new file mode 100644
index 000000000000..9724d4b44b9c
--- /dev/null
+++ b/include/linux/framer/framer-provider.h
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Generic framer profider header file
+ *
+ * Copyright 2023 CS GROUP France
+ *
+ * Author: Herve Codina <herve.codina@bootlin.com>
+ */
+
+#ifndef __DRIVERS_PROVIDER_FRAMER_H
+#define __DRIVERS_PROVIDER_FRAMER_H
+
+#include <linux/export.h>
+#include <linux/framer/framer.h>
+#include <linux/types.h>
+
+#define FRAMER_FLAG_POLL_STATUS BIT(0)
+
+/**
+ * struct framer_ops - set of function pointers for performing framer operations
+ * @init: operation to be performed for initializing the framer
+ * @exit: operation to be performed while exiting
+ * @power_on: powering on the framer
+ * @power_off: powering off the framer
+ * @flags: OR-ed flags (FRAMER_FLAG_*) to ask for core functionality
+ * - @FRAMER_FLAG_POLL_STATUS:
+ * Ask the core to perform a polling to get the framer status and
+ * notify consumers on change.
+ * The framer should call @framer_notify_status_change() when it
+ * detects a status change. This is usually done using interrupts.
+ * If the framer cannot detect this change, it can ask the core for
+ * a status polling. The core will call @get_status() periodically
+ * and, on change detected, it will notify the consumer.
+ * the @get_status()
+ * @owner: the module owner containing the ops
+ */
+struct framer_ops {
+ int (*init)(struct framer *framer);
+ void (*exit)(struct framer *framer);
+ int (*power_on)(struct framer *framer);
+ int (*power_off)(struct framer *framer);
+
+ /**
+ * @get_status:
+ *
+ * Optional.
+ *
+ * Used to get the framer status. framer_init() must have
+ * been called on the framer.
+ *
+ * Returns: 0 if successful, an negative error code otherwise
+ */
+ int (*get_status)(struct framer *framer, struct framer_status *status);
+
+ /**
+ * @set_config:
+ *
+ * Optional.
+ *
+ * Used to set the framer configuration. framer_init() must have
+ * been called on the framer.
+ *
+ * Returns: 0 if successful, an negative error code otherwise
+ */
+ int (*set_config)(struct framer *framer, const struct framer_config *config);
+
+ /**
+ * @get_config:
+ *
+ * Optional.
+ *
+ * Used to get the framer configuration. framer_init() must have
+ * been called on the framer.
+ *
+ * Returns: 0 if successful, an negative error code otherwise
+ */
+ int (*get_config)(struct framer *framer, struct framer_config *config);
+
+ u32 flags;
+ struct module *owner;
+};
+
+/**
+ * struct framer_provider - represents the framer provider
+ * @dev: framer provider device
+ * @owner: the module owner having of_xlate
+ * @list: to maintain a linked list of framer providers
+ * @of_xlate: function pointer to obtain framer instance from framer pointer
+ */
+struct framer_provider {
+ struct device *dev;
+ struct module *owner;
+ struct list_head list;
+ struct framer * (*of_xlate)(struct device *dev,
+ const struct of_phandle_args *args);
+};
+
+static inline void framer_set_drvdata(struct framer *framer, void *data)
+{
+ dev_set_drvdata(&framer->dev, data);
+}
+
+static inline void *framer_get_drvdata(struct framer *framer)
+{
+ return dev_get_drvdata(&framer->dev);
+}
+
+#if IS_ENABLED(CONFIG_GENERIC_FRAMER)
+
+/* Create and destroy a framer */
+struct framer *framer_create(struct device *dev, struct device_node *node,
+ const struct framer_ops *ops);
+void framer_destroy(struct framer *framer);
+
+/* devm version */
+struct framer *devm_framer_create(struct device *dev, struct device_node *node,
+ const struct framer_ops *ops);
+
+struct framer *framer_provider_simple_of_xlate(struct device *dev,
+ const struct of_phandle_args *args);
+
+struct framer_provider *
+__framer_provider_of_register(struct device *dev, struct module *owner,
+ struct framer *(*of_xlate)(struct device *dev,
+ const struct of_phandle_args *args));
+
+void framer_provider_of_unregister(struct framer_provider *framer_provider);
+
+struct framer_provider *
+__devm_framer_provider_of_register(struct device *dev, struct module *owner,
+ struct framer *(*of_xlate)(struct device *dev,
+ const struct of_phandle_args *args));
+
+void framer_notify_status_change(struct framer *framer);
+
+#else /* IS_ENABLED(CONFIG_GENERIC_FRAMER) */
+
+static inline struct framer *framer_create(struct device *dev, struct device_node *node,
+ const struct framer_ops *ops)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void framer_destroy(struct framer *framer)
+{
+}
+
+/* devm version */
+static inline struct framer *devm_framer_create(struct device *dev, struct device_node *node,
+ const struct framer_ops *ops)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct framer *framer_provider_simple_of_xlate(struct device *dev,
+ const struct of_phandle_args *args)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct framer_provider *
+__framer_provider_of_register(struct device *dev, struct module *owner,
+ struct framer *(*of_xlate)(struct device *dev,
+ const struct of_phandle_args *args))
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+void framer_provider_of_unregister(struct framer_provider *framer_provider)
+{
+}
+
+static inline struct framer_provider *
+__devm_framer_provider_of_register(struct device *dev, struct module *owner,
+ struct framer *(*of_xlate)(struct device *dev,
+ const struct of_phandle_args *args))
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+void framer_notify_status_change(struct framer *framer)
+{
+}
+
+#endif /* IS_ENABLED(CONFIG_GENERIC_FRAMER) */
+
+#define framer_provider_of_register(dev, xlate) \
+ __framer_provider_of_register((dev), THIS_MODULE, (xlate))
+
+#define devm_framer_provider_of_register(dev, xlate) \
+ __devm_framer_provider_of_register((dev), THIS_MODULE, (xlate))
+
+#endif /* __DRIVERS_PROVIDER_FRAMER_H */
diff --git a/include/linux/framer/framer.h b/include/linux/framer/framer.h
new file mode 100644
index 000000000000..2b85fe9e7f9a
--- /dev/null
+++ b/include/linux/framer/framer.h
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Generic framer header file
+ *
+ * Copyright 2023 CS GROUP France
+ *
+ * Author: Herve Codina <herve.codina@bootlin.com>
+ */
+
+#ifndef __DRIVERS_FRAMER_H
+#define __DRIVERS_FRAMER_H
+
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+
+/**
+ * enum framer_iface - Framer interface
+ * @FRAMER_IFACE_E1: E1 interface
+ * @FRAMER_IFACE_T1: T1 interface
+ */
+enum framer_iface {
+ FRAMER_IFACE_E1,
+ FRAMER_IFACE_T1,
+};
+
+/**
+ * enum framer_clock_type - Framer clock type
+ * @FRAMER_CLOCK_EXT: External clock
+ * @FRAMER_CLOCK_INT: Internal clock
+ */
+enum framer_clock_type {
+ FRAMER_CLOCK_EXT,
+ FRAMER_CLOCK_INT,
+};
+
+/**
+ * struct framer_config - Framer configuration
+ * @iface: Framer line interface
+ * @clock_type: Framer clock type
+ * @line_clock_rate: Framer line clock rate
+ */
+struct framer_config {
+ enum framer_iface iface;
+ enum framer_clock_type clock_type;
+ unsigned long line_clock_rate;
+};
+
+/**
+ * struct framer_status - Framer status
+ * @link_is_on: Framer link state. true, the link is on, false, the link is off.
+ */
+struct framer_status {
+ bool link_is_on;
+};
+
+/**
+ * enum framer_event - Event available for notification
+ * @FRAMER_EVENT_STATUS: Event notified on framer_status changes
+ */
+enum framer_event {
+ FRAMER_EVENT_STATUS,
+};
+
+/**
+ * struct framer - represents the framer device
+ * @dev: framer device
+ * @id: id of the framer device
+ * @ops: function pointers for performing framer operations
+ * @mutex: mutex to protect framer_ops
+ * @init_count: used to protect when the framer is used by multiple consumers
+ * @power_count: used to protect when the framer is used by multiple consumers
+ * @pwr: power regulator associated with the framer
+ * @notify_status_work: work structure used for status notifications
+ * @notifier_list: notifier list used for notifications
+ * @polling_work: delayed work structure used for the polling task
+ * @prev_status: previous read status used by the polling task to detect changes
+ */
+struct framer {
+ struct device dev;
+ int id;
+ const struct framer_ops *ops;
+ struct mutex mutex; /* Protect framer */
+ int init_count;
+ int power_count;
+ struct regulator *pwr;
+ struct work_struct notify_status_work;
+ struct blocking_notifier_head notifier_list;
+ struct delayed_work polling_work;
+ struct framer_status prev_status;
+};
+
+#if IS_ENABLED(CONFIG_GENERIC_FRAMER)
+int framer_pm_runtime_get(struct framer *framer);
+int framer_pm_runtime_get_sync(struct framer *framer);
+int framer_pm_runtime_put(struct framer *framer);
+int framer_pm_runtime_put_sync(struct framer *framer);
+int framer_init(struct framer *framer);
+int framer_exit(struct framer *framer);
+int framer_power_on(struct framer *framer);
+int framer_power_off(struct framer *framer);
+int framer_get_status(struct framer *framer, struct framer_status *status);
+int framer_get_config(struct framer *framer, struct framer_config *config);
+int framer_set_config(struct framer *framer, const struct framer_config *config);
+int framer_notifier_register(struct framer *framer, struct notifier_block *nb);
+int framer_notifier_unregister(struct framer *framer, struct notifier_block *nb);
+
+struct framer *framer_get(struct device *dev, const char *con_id);
+void framer_put(struct device *dev, struct framer *framer);
+
+struct framer *devm_framer_get(struct device *dev, const char *con_id);
+struct framer *devm_framer_optional_get(struct device *dev, const char *con_id);
+#else
+static inline int framer_pm_runtime_get(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_pm_runtime_get_sync(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_pm_runtime_put(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_pm_runtime_put_sync(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_init(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_exit(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_power_on(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_power_off(struct framer *framer)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_get_status(struct framer *framer, struct framer_status *status)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_get_config(struct framer *framer, struct framer_config *config)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_set_config(struct framer *framer, const struct framer_config *config)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_notifier_register(struct framer *framer,
+ struct notifier_block *nb)
+{
+ return -ENOSYS;
+}
+
+static inline int framer_notifier_unregister(struct framer *framer,
+ struct notifier_block *nb)
+{
+ return -ENOSYS;
+}
+
+static inline struct framer *framer_get(struct device *dev, const char *con_id)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void framer_put(struct device *dev, struct framer *framer)
+{
+}
+
+static inline struct framer *devm_framer_get(struct device *dev, const char *con_id)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct framer *devm_framer_optional_get(struct device *dev, const char *con_id)
+{
+ return NULL;
+}
+
+#endif
+
+#endif /* __DRIVERS_FRAMER_H */
diff --git a/include/linux/framer/pef2256.h b/include/linux/framer/pef2256.h
new file mode 100644
index 000000000000..71d80af58c40
--- /dev/null
+++ b/include/linux/framer/pef2256.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * PEF2256 consumer API
+ *
+ * Copyright 2023 CS GROUP France
+ *
+ * Author: Herve Codina <herve.codina@bootlin.com>
+ */
+#ifndef __PEF2256_H__
+#define __PEF2256_H__
+
+#include <linux/types.h>
+
+struct pef2256;
+struct regmap;
+
+/* Retrieve the PEF2256 regmap */
+struct regmap *pef2256_get_regmap(struct pef2256 *pef2256);
+
+/* PEF2256 hardware versions */
+enum pef2256_version {
+ PEF2256_VERSION_UNKNOWN,
+ PEF2256_VERSION_1_2,
+ PEF2256_VERSION_2_1,
+ PEF2256_VERSION_2_2,
+};
+
+/* Get the PEF2256 hardware version */
+enum pef2256_version pef2256_get_version(struct pef2256 *pef2256);
+
+#endif /* __PEF2256_H__ */
diff --git a/include/linux/freelist.h b/include/linux/freelist.h
deleted file mode 100644
index fc1842b96469..000000000000
--- a/include/linux/freelist.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
-#ifndef FREELIST_H
-#define FREELIST_H
-
-#include <linux/atomic.h>
-
-/*
- * Copyright: cameron@moodycamel.com
- *
- * A simple CAS-based lock-free free list. Not the fastest thing in the world
- * under heavy contention, but simple and correct (assuming nodes are never
- * freed until after the free list is destroyed), and fairly speedy under low
- * contention.
- *
- * Adapted from: https://moodycamel.com/blog/2014/solving-the-aba-problem-for-lock-free-free-lists
- */
-
-struct freelist_node {
- atomic_t refs;
- struct freelist_node *next;
-};
-
-struct freelist_head {
- struct freelist_node *head;
-};
-
-#define REFS_ON_FREELIST 0x80000000
-#define REFS_MASK 0x7FFFFFFF
-
-static inline void __freelist_add(struct freelist_node *node, struct freelist_head *list)
-{
- /*
- * Since the refcount is zero, and nobody can increase it once it's
- * zero (except us, and we run only one copy of this method per node at
- * a time, i.e. the single thread case), then we know we can safely
- * change the next pointer of the node; however, once the refcount is
- * back above zero, then other threads could increase it (happens under
- * heavy contention, when the refcount goes to zero in between a load
- * and a refcount increment of a node in try_get, then back up to
- * something non-zero, then the refcount increment is done by the other
- * thread) -- so if the CAS to add the node to the actual list fails,
- * decrese the refcount and leave the add operation to the next thread
- * who puts the refcount back to zero (which could be us, hence the
- * loop).
- */
- struct freelist_node *head = READ_ONCE(list->head);
-
- for (;;) {
- WRITE_ONCE(node->next, head);
- atomic_set_release(&node->refs, 1);
-
- if (!try_cmpxchg_release(&list->head, &head, node)) {
- /*
- * Hmm, the add failed, but we can only try again when
- * the refcount goes back to zero.
- */
- if (atomic_fetch_add_release(REFS_ON_FREELIST - 1, &node->refs) == 1)
- continue;
- }
- return;
- }
-}
-
-static inline void freelist_add(struct freelist_node *node, struct freelist_head *list)
-{
- /*
- * We know that the should-be-on-freelist bit is 0 at this point, so
- * it's safe to set it using a fetch_add.
- */
- if (!atomic_fetch_add_release(REFS_ON_FREELIST, &node->refs)) {
- /*
- * Oh look! We were the last ones referencing this node, and we
- * know we want to add it to the free list, so let's do it!
- */
- __freelist_add(node, list);
- }
-}
-
-static inline struct freelist_node *freelist_try_get(struct freelist_head *list)
-{
- struct freelist_node *prev, *next, *head = smp_load_acquire(&list->head);
- unsigned int refs;
-
- while (head) {
- prev = head;
- refs = atomic_read(&head->refs);
- if ((refs & REFS_MASK) == 0 ||
- !atomic_try_cmpxchg_acquire(&head->refs, &refs, refs+1)) {
- head = smp_load_acquire(&list->head);
- continue;
- }
-
- /*
- * Good, reference count has been incremented (it wasn't at
- * zero), which means we can read the next and not worry about
- * it changing between now and the time we do the CAS.
- */
- next = READ_ONCE(head->next);
- if (try_cmpxchg_acquire(&list->head, &head, next)) {
- /*
- * Yay, got the node. This means it was on the list,
- * which means should-be-on-freelist must be false no
- * matter the refcount (because nobody else knows it's
- * been taken off yet, it can't have been put back on).
- */
- WARN_ON_ONCE(atomic_read(&head->refs) & REFS_ON_FREELIST);
-
- /*
- * Decrease refcount twice, once for our ref, and once
- * for the list's ref.
- */
- atomic_fetch_add(-2, &head->refs);
-
- return head;
- }
-
- /*
- * OK, the head must have changed on us, but we still need to decrement
- * the refcount we increased.
- */
- refs = atomic_fetch_add(-1, &prev->refs);
- if (refs == REFS_ON_FREELIST + 1)
- __freelist_add(prev, list);
- }
-
- return NULL;
-}
-
-#endif /* FREELIST_H */
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index 0621c5f86c39..0a8c6c4d1a82 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -8,9 +8,11 @@
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/atomic.h>
+#include <linux/jump_label.h>
#ifdef CONFIG_FREEZER
-extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */
+DECLARE_STATIC_KEY_FALSE(freezer_active);
+
extern bool pm_freezing; /* PM freezing in effect */
extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
@@ -20,23 +22,25 @@ extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
extern unsigned int freeze_timeout_msecs;
/*
- * Check if a process has been frozen
+ * Check if a process has been frozen for PM or cgroup1 freezer. Note that
+ * cgroup2 freezer uses the job control mechanism and does not interact with
+ * the PM freezer.
*/
-static inline bool frozen(struct task_struct *p)
-{
- return p->flags & PF_FROZEN;
-}
+extern bool frozen(struct task_struct *p);
extern bool freezing_slow_path(struct task_struct *p);
/*
- * Check if there is a request to freeze a process
+ * Check if there is a request to freeze a task from PM or cgroup1 freezer.
+ * Note that cgroup2 freezer uses the job control mechanism and does not
+ * interact with the PM freezer.
*/
static inline bool freezing(struct task_struct *p)
{
- if (likely(!atomic_read(&system_freezing_cnt)))
- return false;
- return freezing_slow_path(p);
+ if (static_branch_unlikely(&freezer_active))
+ return freezing_slow_path(p);
+
+ return false;
}
/* Takes and releases task alloc lock using task_lock() */
@@ -47,227 +51,30 @@ extern int freeze_processes(void);
extern int freeze_kernel_threads(void);
extern void thaw_processes(void);
extern void thaw_kernel_threads(void);
+extern void thaw_process(struct task_struct *p);
-/*
- * DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION
- * If try_to_freeze causes a lockdep warning it means the caller may deadlock
- */
-static inline bool try_to_freeze_unsafe(void)
+static inline bool try_to_freeze(void)
{
might_sleep();
if (likely(!freezing(current)))
return false;
- return __refrigerator(false);
-}
-
-static inline bool try_to_freeze(void)
-{
if (!(current->flags & PF_NOFREEZE))
debug_check_no_locks_held();
- return try_to_freeze_unsafe();
+ return __refrigerator(false);
}
extern bool freeze_task(struct task_struct *p);
extern bool set_freezable(void);
#ifdef CONFIG_CGROUP_FREEZER
-extern bool cgroup_freezing(struct task_struct *task);
+extern bool cgroup1_freezing(struct task_struct *task);
#else /* !CONFIG_CGROUP_FREEZER */
-static inline bool cgroup_freezing(struct task_struct *task)
+static inline bool cgroup1_freezing(struct task_struct *task)
{
return false;
}
#endif /* !CONFIG_CGROUP_FREEZER */
-/*
- * The PF_FREEZER_SKIP flag should be set by a vfork parent right before it
- * calls wait_for_completion(&vfork) and reset right after it returns from this
- * function. Next, the parent should call try_to_freeze() to freeze itself
- * appropriately in case the child has exited before the freezing of tasks is
- * complete. However, we don't want kernel threads to be frozen in unexpected
- * places, so we allow them to block freeze_processes() instead or to set
- * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
- * parent won't really block freeze_processes(), since ____call_usermodehelper()
- * (the child) does a little before exec/exit and it can't be frozen before
- * waking up the parent.
- */
-
-
-/**
- * freezer_do_not_count - tell freezer to ignore %current
- *
- * Tell freezers to ignore the current task when determining whether the
- * target frozen state is reached. IOW, the current task will be
- * considered frozen enough by freezers.
- *
- * The caller shouldn't do anything which isn't allowed for a frozen task
- * until freezer_cont() is called. Usually, freezer[_do_not]_count() pair
- * wrap a scheduling operation and nothing much else.
- */
-static inline void freezer_do_not_count(void)
-{
- current->flags |= PF_FREEZER_SKIP;
-}
-
-/**
- * freezer_count - tell freezer to stop ignoring %current
- *
- * Undo freezer_do_not_count(). It tells freezers that %current should be
- * considered again and tries to freeze if freezing condition is already in
- * effect.
- */
-static inline void freezer_count(void)
-{
- current->flags &= ~PF_FREEZER_SKIP;
- /*
- * If freezing is in progress, the following paired with smp_mb()
- * in freezer_should_skip() ensures that either we see %true
- * freezing() or freezer_should_skip() sees !PF_FREEZER_SKIP.
- */
- smp_mb();
- try_to_freeze();
-}
-
-/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
-static inline void freezer_count_unsafe(void)
-{
- current->flags &= ~PF_FREEZER_SKIP;
- smp_mb();
- try_to_freeze_unsafe();
-}
-
-/**
- * freezer_should_skip - whether to skip a task when determining frozen
- * state is reached
- * @p: task in quesion
- *
- * This function is used by freezers after establishing %true freezing() to
- * test whether a task should be skipped when determining the target frozen
- * state is reached. IOW, if this function returns %true, @p is considered
- * frozen enough.
- */
-static inline bool freezer_should_skip(struct task_struct *p)
-{
- /*
- * The following smp_mb() paired with the one in freezer_count()
- * ensures that either freezer_count() sees %true freezing() or we
- * see cleared %PF_FREEZER_SKIP and return %false. This makes it
- * impossible for a task to slip frozen state testing after
- * clearing %PF_FREEZER_SKIP.
- */
- smp_mb();
- return p->flags & PF_FREEZER_SKIP;
-}
-
-/*
- * These functions are intended to be used whenever you want allow a sleeping
- * task to be frozen. Note that neither return any clear indication of
- * whether a freeze event happened while in this function.
- */
-
-/* Like schedule(), but should not block the freezer. */
-static inline void freezable_schedule(void)
-{
- freezer_do_not_count();
- schedule();
- freezer_count();
-}
-
-/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
-static inline void freezable_schedule_unsafe(void)
-{
- freezer_do_not_count();
- schedule();
- freezer_count_unsafe();
-}
-
-/*
- * Like schedule_timeout(), but should not block the freezer. Do not
- * call this with locks held.
- */
-static inline long freezable_schedule_timeout(long timeout)
-{
- long __retval;
- freezer_do_not_count();
- __retval = schedule_timeout(timeout);
- freezer_count();
- return __retval;
-}
-
-/*
- * Like schedule_timeout_interruptible(), but should not block the freezer. Do not
- * call this with locks held.
- */
-static inline long freezable_schedule_timeout_interruptible(long timeout)
-{
- long __retval;
- freezer_do_not_count();
- __retval = schedule_timeout_interruptible(timeout);
- freezer_count();
- return __retval;
-}
-
-/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
-static inline long freezable_schedule_timeout_interruptible_unsafe(long timeout)
-{
- long __retval;
-
- freezer_do_not_count();
- __retval = schedule_timeout_interruptible(timeout);
- freezer_count_unsafe();
- return __retval;
-}
-
-/* Like schedule_timeout_killable(), but should not block the freezer. */
-static inline long freezable_schedule_timeout_killable(long timeout)
-{
- long __retval;
- freezer_do_not_count();
- __retval = schedule_timeout_killable(timeout);
- freezer_count();
- return __retval;
-}
-
-/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
-static inline long freezable_schedule_timeout_killable_unsafe(long timeout)
-{
- long __retval;
- freezer_do_not_count();
- __retval = schedule_timeout_killable(timeout);
- freezer_count_unsafe();
- return __retval;
-}
-
-/*
- * Like schedule_hrtimeout_range(), but should not block the freezer. Do not
- * call this with locks held.
- */
-static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
- u64 delta, const enum hrtimer_mode mode)
-{
- int __retval;
- freezer_do_not_count();
- __retval = schedule_hrtimeout_range(expires, delta, mode);
- freezer_count();
- return __retval;
-}
-
-/*
- * Freezer-friendly wrappers around wait_event_interruptible(),
- * wait_event_killable() and wait_event_interruptible_timeout(), originally
- * defined in <linux/wait.h>
- */
-
-/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
-#define wait_event_freezekillable_unsafe(wq, condition) \
-({ \
- int __retval; \
- freezer_do_not_count(); \
- __retval = wait_event_killable(wq, (condition)); \
- freezer_count_unsafe(); \
- __retval; \
-})
-
#else /* !CONFIG_FREEZER */
static inline bool frozen(struct task_struct *p) { return false; }
static inline bool freezing(struct task_struct *p) { return false; }
@@ -278,38 +85,12 @@ static inline int freeze_processes(void) { return -ENOSYS; }
static inline int freeze_kernel_threads(void) { return -ENOSYS; }
static inline void thaw_processes(void) {}
static inline void thaw_kernel_threads(void) {}
+static inline void thaw_process(struct task_struct *p) {}
static inline bool try_to_freeze(void) { return false; }
-static inline void freezer_do_not_count(void) {}
-static inline void freezer_count(void) {}
-static inline int freezer_should_skip(struct task_struct *p) { return 0; }
static inline void set_freezable(void) {}
-#define freezable_schedule() schedule()
-
-#define freezable_schedule_unsafe() schedule()
-
-#define freezable_schedule_timeout(timeout) schedule_timeout(timeout)
-
-#define freezable_schedule_timeout_interruptible(timeout) \
- schedule_timeout_interruptible(timeout)
-
-#define freezable_schedule_timeout_interruptible_unsafe(timeout) \
- schedule_timeout_interruptible(timeout)
-
-#define freezable_schedule_timeout_killable(timeout) \
- schedule_timeout_killable(timeout)
-
-#define freezable_schedule_timeout_killable_unsafe(timeout) \
- schedule_timeout_killable(timeout)
-
-#define freezable_schedule_hrtimeout_range(expires, delta, mode) \
- schedule_hrtimeout_range(expires, delta, mode)
-
-#define wait_event_freezekillable_unsafe(wq, condition) \
- wait_event_killable(wq, condition)
-
#endif /* !CONFIG_FREEZER */
#endif /* FREEZER_H_INCLUDED */
diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
deleted file mode 100644
index b07d88c92bb2..000000000000
--- a/include/linux/frontswap.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_FRONTSWAP_H
-#define _LINUX_FRONTSWAP_H
-
-#include <linux/swap.h>
-#include <linux/mm.h>
-#include <linux/bitops.h>
-#include <linux/jump_label.h>
-
-/*
- * Return code to denote that requested number of
- * frontswap pages are unused(moved to page cache).
- * Used in shmem_unuse and try_to_unuse.
- */
-#define FRONTSWAP_PAGES_UNUSED 2
-
-struct frontswap_ops {
- void (*init)(unsigned); /* this swap type was just swapon'ed */
- int (*store)(unsigned, pgoff_t, struct page *); /* store a page */
- int (*load)(unsigned, pgoff_t, struct page *); /* load a page */
- void (*invalidate_page)(unsigned, pgoff_t); /* page no longer needed */
- void (*invalidate_area)(unsigned); /* swap type just swapoff'ed */
- struct frontswap_ops *next; /* private pointer to next ops */
-};
-
-extern void frontswap_register_ops(struct frontswap_ops *ops);
-extern void frontswap_shrink(unsigned long);
-extern unsigned long frontswap_curr_pages(void);
-extern void frontswap_writethrough(bool);
-#define FRONTSWAP_HAS_EXCLUSIVE_GETS
-extern void frontswap_tmem_exclusive_gets(bool);
-
-extern bool __frontswap_test(struct swap_info_struct *, pgoff_t);
-extern void __frontswap_init(unsigned type, unsigned long *map);
-extern int __frontswap_store(struct page *page);
-extern int __frontswap_load(struct page *page);
-extern void __frontswap_invalidate_page(unsigned, pgoff_t);
-extern void __frontswap_invalidate_area(unsigned);
-
-#ifdef CONFIG_FRONTSWAP
-extern struct static_key_false frontswap_enabled_key;
-
-static inline bool frontswap_enabled(void)
-{
- return static_branch_unlikely(&frontswap_enabled_key);
-}
-
-static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset)
-{
- return __frontswap_test(sis, offset);
-}
-
-static inline void frontswap_map_set(struct swap_info_struct *p,
- unsigned long *map)
-{
- p->frontswap_map = map;
-}
-
-static inline unsigned long *frontswap_map_get(struct swap_info_struct *p)
-{
- return p->frontswap_map;
-}
-#else
-/* all inline routines become no-ops and all externs are ignored */
-
-static inline bool frontswap_enabled(void)
-{
- return false;
-}
-
-static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset)
-{
- return false;
-}
-
-static inline void frontswap_map_set(struct swap_info_struct *p,
- unsigned long *map)
-{
-}
-
-static inline unsigned long *frontswap_map_get(struct swap_info_struct *p)
-{
- return NULL;
-}
-#endif
-
-static inline int frontswap_store(struct page *page)
-{
- if (frontswap_enabled())
- return __frontswap_store(page);
-
- return -1;
-}
-
-static inline int frontswap_load(struct page *page)
-{
- if (frontswap_enabled())
- return __frontswap_load(page);
-
- return -1;
-}
-
-static inline void frontswap_invalidate_page(unsigned type, pgoff_t offset)
-{
- if (frontswap_enabled())
- __frontswap_invalidate_page(type, offset);
-}
-
-static inline void frontswap_invalidate_area(unsigned type)
-{
- if (frontswap_enabled())
- __frontswap_invalidate_area(type);
-}
-
-static inline void frontswap_init(unsigned type, unsigned long *map)
-{
-#ifdef CONFIG_FRONTSWAP
- __frontswap_init(type, map);
-#endif
-}
-
-#endif /* _LINUX_FRONTSWAP_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index c3c88fdb9b2a..04ceeca12a0d 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2,6 +2,8 @@
#ifndef _LINUX_FS_H
#define _LINUX_FS_H
+#include <linux/fs/super.h>
+#include <linux/vfsdebug.h>
#include <linux/linkage.h>
#include <linux/wait_bit.h>
#include <linux/kdev_t.h>
@@ -10,7 +12,6 @@
#include <linux/stat.h>
#include <linux/cache.h>
#include <linux/list.h>
-#include <linux/list_lru.h>
#include <linux/llist.h>
#include <linux/radix-tree.h>
#include <linux/xarray.h>
@@ -36,19 +37,23 @@
#include <linux/uuid.h>
#include <linux/errseq.h>
#include <linux/ioprio.h>
-#include <linux/fs_types.h>
#include <linux/build_bug.h>
#include <linux/stddef.h>
#include <linux/mount.h>
#include <linux/cred.h>
+#include <linux/mnt_idmapping.h>
+#include <linux/slab.h>
+#include <linux/maple_tree.h>
+#include <linux/rw_hint.h>
+#include <linux/file_ref.h>
+#include <linux/unicode.h>
#include <asm/byteorder.h>
#include <uapi/linux/fs.h>
-struct backing_dev_info;
struct bdi_writeback;
struct bio;
-struct export_operations;
+struct io_comp_batch;
struct fiemap_extent_info;
struct hd_geometry;
struct iovec;
@@ -62,30 +67,21 @@ struct vfsmount;
struct cred;
struct swap_info_struct;
struct seq_file;
-struct workqueue_struct;
struct iov_iter;
-struct fscrypt_info;
-struct fscrypt_operations;
-struct fsverity_info;
-struct fsverity_operations;
+struct fsnotify_mark_connector;
struct fs_context;
struct fs_parameter_spec;
-struct fileattr;
+struct file_kattr;
+struct iomap_ops;
+struct delegated_inode;
extern void __init inode_init(void);
extern void __init inode_init_early(void);
extern void __init files_init(void);
extern void __init files_maxfiles_init(void);
-extern struct files_stat_struct files_stat;
extern unsigned long get_max_files(void);
extern unsigned int sysctl_nr_open;
-extern struct inodes_stat_t inodes_stat;
-extern int leases_enable, lease_break_time;
-extern int sysctl_protected_symlinks;
-extern int sysctl_protected_hardlinks;
-extern int sysctl_protected_fifos;
-extern int sysctl_protected_regular;
typedef __kernel_rwf_t rwf_t;
@@ -111,28 +107,28 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
*/
/* file is open for reading */
-#define FMODE_READ ((__force fmode_t)0x1)
+#define FMODE_READ ((__force fmode_t)(1 << 0))
/* file is open for writing */
-#define FMODE_WRITE ((__force fmode_t)0x2)
+#define FMODE_WRITE ((__force fmode_t)(1 << 1))
/* file is seekable */
-#define FMODE_LSEEK ((__force fmode_t)0x4)
+#define FMODE_LSEEK ((__force fmode_t)(1 << 2))
/* file can be accessed using pread */
-#define FMODE_PREAD ((__force fmode_t)0x8)
+#define FMODE_PREAD ((__force fmode_t)(1 << 3))
/* file can be accessed using pwrite */
-#define FMODE_PWRITE ((__force fmode_t)0x10)
+#define FMODE_PWRITE ((__force fmode_t)(1 << 4))
/* File is opened for execution with sys_execve / sys_uselib */
-#define FMODE_EXEC ((__force fmode_t)0x20)
-/* File is opened with O_NDELAY (only set for block devices) */
-#define FMODE_NDELAY ((__force fmode_t)0x40)
-/* File is opened with O_EXCL (only set for block devices) */
-#define FMODE_EXCL ((__force fmode_t)0x80)
-/* File is opened using open(.., 3, ..) and is writeable only for ioctls
- (specialy hack for floppy.c) */
-#define FMODE_WRITE_IOCTL ((__force fmode_t)0x100)
+#define FMODE_EXEC ((__force fmode_t)(1 << 5))
+/* File writes are restricted (block device specific) */
+#define FMODE_WRITE_RESTRICTED ((__force fmode_t)(1 << 6))
+/* File supports atomic writes */
+#define FMODE_CAN_ATOMIC_WRITE ((__force fmode_t)(1 << 7))
+
+/* FMODE_* bit 8 */
+
/* 32bit hashes as llseek() offset (for directories) */
-#define FMODE_32BITHASH ((__force fmode_t)0x200)
+#define FMODE_32BITHASH ((__force fmode_t)(1 << 9))
/* 64bit hashes as llseek() offset (for directories) */
-#define FMODE_64BITHASH ((__force fmode_t)0x400)
+#define FMODE_64BITHASH ((__force fmode_t)(1 << 10))
/*
* Don't update ctime and mtime.
@@ -140,46 +136,85 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
* Currently a special hack for the XFS open_by_handle ioctl, but we'll
* hopefully graduate it to a proper O_CMTIME flag supported by open(2) soon.
*/
-#define FMODE_NOCMTIME ((__force fmode_t)0x800)
+#define FMODE_NOCMTIME ((__force fmode_t)(1 << 11))
/* Expect random access pattern */
-#define FMODE_RANDOM ((__force fmode_t)0x1000)
+#define FMODE_RANDOM ((__force fmode_t)(1 << 12))
-/* File is huge (eg. /dev/mem): treat loff_t as unsigned */
-#define FMODE_UNSIGNED_OFFSET ((__force fmode_t)0x2000)
+/* Supports IOCB_HAS_METADATA */
+#define FMODE_HAS_METADATA ((__force fmode_t)(1 << 13))
/* File is opened with O_PATH; almost nothing can be done with it */
-#define FMODE_PATH ((__force fmode_t)0x4000)
+#define FMODE_PATH ((__force fmode_t)(1 << 14))
/* File needs atomic accesses to f_pos */
-#define FMODE_ATOMIC_POS ((__force fmode_t)0x8000)
+#define FMODE_ATOMIC_POS ((__force fmode_t)(1 << 15))
/* Write access to underlying fs */
-#define FMODE_WRITER ((__force fmode_t)0x10000)
+#define FMODE_WRITER ((__force fmode_t)(1 << 16))
/* Has read method(s) */
-#define FMODE_CAN_READ ((__force fmode_t)0x20000)
+#define FMODE_CAN_READ ((__force fmode_t)(1 << 17))
/* Has write method(s) */
-#define FMODE_CAN_WRITE ((__force fmode_t)0x40000)
+#define FMODE_CAN_WRITE ((__force fmode_t)(1 << 18))
-#define FMODE_OPENED ((__force fmode_t)0x80000)
-#define FMODE_CREATED ((__force fmode_t)0x100000)
+#define FMODE_OPENED ((__force fmode_t)(1 << 19))
+#define FMODE_CREATED ((__force fmode_t)(1 << 20))
/* File is stream-like */
-#define FMODE_STREAM ((__force fmode_t)0x200000)
+#define FMODE_STREAM ((__force fmode_t)(1 << 21))
+
+/* File supports DIRECT IO */
+#define FMODE_CAN_ODIRECT ((__force fmode_t)(1 << 22))
-/* File was opened by fanotify and shouldn't generate fanotify events */
-#define FMODE_NONOTIFY ((__force fmode_t)0x4000000)
+#define FMODE_NOREUSE ((__force fmode_t)(1 << 23))
+
+/* File is embedded in backing_file object */
+#define FMODE_BACKING ((__force fmode_t)(1 << 24))
+
+/*
+ * Together with FMODE_NONOTIFY_PERM defines which fsnotify events shouldn't be
+ * generated (see below)
+ */
+#define FMODE_NONOTIFY ((__force fmode_t)(1 << 25))
+
+/*
+ * Together with FMODE_NONOTIFY defines which fsnotify events shouldn't be
+ * generated (see below)
+ */
+#define FMODE_NONOTIFY_PERM ((__force fmode_t)(1 << 26))
/* File is capable of returning -EAGAIN if I/O will block */
-#define FMODE_NOWAIT ((__force fmode_t)0x8000000)
+#define FMODE_NOWAIT ((__force fmode_t)(1 << 27))
/* File represents mount that needs unmounting */
-#define FMODE_NEED_UNMOUNT ((__force fmode_t)0x10000000)
+#define FMODE_NEED_UNMOUNT ((__force fmode_t)(1 << 28))
/* File does not contribute to nr_files count */
-#define FMODE_NOACCOUNT ((__force fmode_t)0x20000000)
+#define FMODE_NOACCOUNT ((__force fmode_t)(1 << 29))
-/* File supports async buffered reads */
-#define FMODE_BUF_RASYNC ((__force fmode_t)0x40000000)
+/*
+ * The two FMODE_NONOTIFY* define which fsnotify events should not be generated
+ * for an open file. These are the possible values of
+ * (f->f_mode & FMODE_FSNOTIFY_MASK) and their meaning:
+ *
+ * FMODE_NONOTIFY - suppress all (incl. non-permission) events.
+ * FMODE_NONOTIFY_PERM - suppress permission (incl. pre-content) events.
+ * FMODE_NONOTIFY | FMODE_NONOTIFY_PERM - suppress only FAN_ACCESS_PERM.
+ */
+#define FMODE_FSNOTIFY_MASK \
+ (FMODE_NONOTIFY | FMODE_NONOTIFY_PERM)
+
+#define FMODE_FSNOTIFY_NONE(mode) \
+ ((mode & FMODE_FSNOTIFY_MASK) == FMODE_NONOTIFY)
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+#define FMODE_FSNOTIFY_HSM(mode) \
+ ((mode & FMODE_FSNOTIFY_MASK) == 0 || \
+ (mode & FMODE_FSNOTIFY_MASK) == (FMODE_NONOTIFY | FMODE_NONOTIFY_PERM))
+#define FMODE_FSNOTIFY_ACCESS_PERM(mode) \
+ ((mode & FMODE_FSNOTIFY_MASK) == 0)
+#else
+#define FMODE_FSNOTIFY_ACCESS_PERM(mode) 0
+#define FMODE_FSNOTIFY_HSM(mode) 0
+#endif
/*
* Attribute flags. These should be or-ed together to figure out what
@@ -195,6 +230,7 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define ATTR_ATIME_SET (1 << 7)
#define ATTR_MTIME_SET (1 << 8)
#define ATTR_FORCE (1 << 9) /* Not a change, but a change it */
+#define ATTR_CTIME_SET (1 << 10)
#define ATTR_KILL_SUID (1 << 11)
#define ATTR_KILL_SGID (1 << 12)
#define ATTR_FILE (1 << 13)
@@ -202,6 +238,7 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define ATTR_OPEN (1 << 15) /* Truncating from open(O_TRUNC) */
#define ATTR_TIMES_SET (1 << 16)
#define ATTR_TOUCH (1 << 17)
+#define ATTR_DELEG (1 << 18) /* Delegated attrs. Don't break write delegations */
/*
* Whiteout is represented by a char device. The following constants define the
@@ -222,8 +259,26 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
struct iattr {
unsigned int ia_valid;
umode_t ia_mode;
- kuid_t ia_uid;
- kgid_t ia_gid;
+ /*
+ * The two anonymous unions wrap structures with the same member.
+ *
+ * Filesystems raising FS_ALLOW_IDMAP need to use ia_vfs{g,u}id which
+ * are a dedicated type requiring the filesystem to use the dedicated
+ * helpers. Other filesystem can continue to use ia_{g,u}id until they
+ * have been ported.
+ *
+ * They always contain the same value. In other words FS_ALLOW_IDMAP
+ * pass down the same value on idmapped mounts as they would on regular
+ * mounts.
+ */
+ union {
+ kuid_t ia_uid;
+ vfsuid_t ia_vfsuid;
+ };
+ union {
+ kgid_t ia_gid;
+ vfsgid_t ia_vfsgid;
+ };
loff_t ia_size;
struct timespec64 ia_atime;
struct timespec64 ia_mtime;
@@ -238,11 +293,6 @@ struct iattr {
};
/*
- * Includes for diskquotas.
- */
-#include <linux/quota.h>
-
-/*
* Maximum number of layers of fs stack. Needs to be limited to
* prevent kernel stack overflow
*/
@@ -266,7 +316,7 @@ struct iattr {
* trying again. The aop will be taking reasonable
* precautions not to livelock. If the caller held a page
* reference, it should drop it before retrying. Returned
- * by readpage().
+ * by read_folio().
*
* address_space_operation functions return these large constants to indicate
* special semantics to the caller. These are much larger than the bytes in a
@@ -279,11 +329,6 @@ enum positive_aop_returns {
AOP_TRUNCATED_PAGE = 0x80001,
};
-#define AOP_FLAG_CONT_EXPAND 0x0001 /* called from cont_expand */
-#define AOP_FLAG_NOFS 0x0002 /* used by filesystem to direct
- * helper code (eg buffer layer)
- * to clear GFP_FS from alloc */
-
/*
* oh the beauties of C type declarations.
*/
@@ -292,25 +337,15 @@ struct address_space;
struct writeback_control;
struct readahead_control;
-/*
- * Write life time hint values.
- * Stored in struct inode as u8.
- */
-enum rw_hint {
- WRITE_LIFE_NOT_SET = 0,
- WRITE_LIFE_NONE = RWH_WRITE_LIFE_NONE,
- WRITE_LIFE_SHORT = RWH_WRITE_LIFE_SHORT,
- WRITE_LIFE_MEDIUM = RWH_WRITE_LIFE_MEDIUM,
- WRITE_LIFE_LONG = RWH_WRITE_LIFE_LONG,
- WRITE_LIFE_EXTREME = RWH_WRITE_LIFE_EXTREME,
-};
-
/* Match RWF_* bits to IOCB bits */
#define IOCB_HIPRI (__force int) RWF_HIPRI
#define IOCB_DSYNC (__force int) RWF_DSYNC
#define IOCB_SYNC (__force int) RWF_SYNC
#define IOCB_NOWAIT (__force int) RWF_NOWAIT
#define IOCB_APPEND (__force int) RWF_APPEND
+#define IOCB_ATOMIC (__force int) RWF_ATOMIC
+#define IOCB_DONTCACHE (__force int) RWF_DONTCACHE
+#define IOCB_NOSIGNAL (__force int) RWF_NOSIGNAL
/* non-RWF related bits - start at 16 */
#define IOCB_EVENTFD (1 << 16)
@@ -319,25 +354,45 @@ enum rw_hint {
/* iocb->ki_waitq is valid */
#define IOCB_WAITQ (1 << 19)
#define IOCB_NOIO (1 << 20)
+/* can use bio alloc cache */
+#define IOCB_ALLOC_CACHE (1 << 21)
+/* kiocb is a read or write operation submitted by fs/aio.c. */
+#define IOCB_AIO_RW (1 << 22)
+#define IOCB_HAS_METADATA (1 << 23)
+
+/* for use in trace events */
+#define TRACE_IOCB_STRINGS \
+ { IOCB_HIPRI, "HIPRI" }, \
+ { IOCB_DSYNC, "DSYNC" }, \
+ { IOCB_SYNC, "SYNC" }, \
+ { IOCB_NOWAIT, "NOWAIT" }, \
+ { IOCB_APPEND, "APPEND" }, \
+ { IOCB_ATOMIC, "ATOMIC" }, \
+ { IOCB_DONTCACHE, "DONTCACHE" }, \
+ { IOCB_EVENTFD, "EVENTFD"}, \
+ { IOCB_DIRECT, "DIRECT" }, \
+ { IOCB_WRITE, "WRITE" }, \
+ { IOCB_WAITQ, "WAITQ" }, \
+ { IOCB_NOIO, "NOIO" }, \
+ { IOCB_ALLOC_CACHE, "ALLOC_CACHE" }, \
+ { IOCB_AIO_RW, "AIO_RW" }, \
+ { IOCB_HAS_METADATA, "AIO_HAS_METADATA" }
struct kiocb {
struct file *ki_filp;
-
- /* The 'ki_filp' pointer is shared in a union for aio */
- randomized_struct_fields_start
-
loff_t ki_pos;
- void (*ki_complete)(struct kiocb *iocb, long ret, long ret2);
+ void (*ki_complete)(struct kiocb *iocb, long ret);
void *private;
int ki_flags;
- u16 ki_hint;
u16 ki_ioprio; /* See linux/ioprio.h */
- union {
- unsigned int ki_cookie; /* for ->iopoll */
- struct wait_page_queue *ki_waitq; /* for async buffered IO */
- };
+ u8 ki_write_stream;
- randomized_struct_fields_end
+ /*
+ * Only used for async buffered reads, where it denotes the page
+ * waitqueue associated with completing the read.
+ * Valid IFF IOCB_WAITQ is set.
+ */
+ struct wait_page_queue *ki_waitq;
};
static inline bool is_sync_kiocb(struct kiocb *kiocb)
@@ -345,99 +400,61 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb)
return kiocb->ki_complete == NULL;
}
-/*
- * "descriptor" for what we're up to with a read.
- * This allows us to use the same read code yet
- * have multiple different users of the data that
- * we read from a file.
- *
- * The simplest case just copies the data to user
- * mode.
- */
-typedef struct {
- size_t written;
- size_t count;
- union {
- char __user *buf;
- void *data;
- } arg;
- int error;
-} read_descriptor_t;
-
-typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
- unsigned long, unsigned long);
-
struct address_space_operations {
- int (*writepage)(struct page *page, struct writeback_control *wbc);
- int (*readpage)(struct file *, struct page *);
+ int (*read_folio)(struct file *, struct folio *);
/* Write back some dirty pages from this mapping. */
int (*writepages)(struct address_space *, struct writeback_control *);
- /* Set a page dirty. Return true if this dirtied it */
- int (*set_page_dirty)(struct page *page);
+ /* Mark a folio dirty. Return true if this dirtied it */
+ bool (*dirty_folio)(struct address_space *, struct folio *);
- /*
- * Reads in the requested pages. Unlike ->readpage(), this is
- * PURELY used for read-ahead!.
- */
- int (*readpages)(struct file *filp, struct address_space *mapping,
- struct list_head *pages, unsigned nr_pages);
void (*readahead)(struct readahead_control *);
- int (*write_begin)(struct file *, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata);
- int (*write_end)(struct file *, struct address_space *mapping,
+ int (*write_begin)(const struct kiocb *, struct address_space *mapping,
+ loff_t pos, unsigned len,
+ struct folio **foliop, void **fsdata);
+ int (*write_end)(const struct kiocb *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata);
+ struct folio *folio, void *fsdata);
/* Unfortunately this kludge is needed for FIBMAP. Don't use it */
sector_t (*bmap)(struct address_space *, sector_t);
- void (*invalidatepage) (struct page *, unsigned int, unsigned int);
- int (*releasepage) (struct page *, gfp_t);
- void (*freepage)(struct page *);
+ void (*invalidate_folio) (struct folio *, size_t offset, size_t len);
+ bool (*release_folio)(struct folio *, gfp_t);
+ void (*free_folio)(struct folio *folio);
ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter);
/*
- * migrate the contents of a page to the specified target. If
+ * migrate the contents of a folio to the specified target. If
* migrate_mode is MIGRATE_ASYNC, it must not block.
*/
- int (*migratepage) (struct address_space *,
- struct page *, struct page *, enum migrate_mode);
- bool (*isolate_page)(struct page *, isolate_mode_t);
- void (*putback_page)(struct page *);
- int (*launder_page) (struct page *);
- int (*is_partially_uptodate) (struct page *, unsigned long,
- unsigned long);
- void (*is_dirty_writeback) (struct page *, bool *, bool *);
- int (*error_remove_page)(struct address_space *, struct page *);
+ int (*migrate_folio)(struct address_space *, struct folio *dst,
+ struct folio *src, enum migrate_mode);
+ int (*launder_folio)(struct folio *);
+ bool (*is_partially_uptodate) (struct folio *, size_t from,
+ size_t count);
+ void (*is_dirty_writeback) (struct folio *, bool *dirty, bool *wb);
+ int (*error_remove_folio)(struct address_space *, struct folio *);
/* swapfile support */
int (*swap_activate)(struct swap_info_struct *sis, struct file *file,
sector_t *span);
void (*swap_deactivate)(struct file *file);
+ int (*swap_rw)(struct kiocb *iocb, struct iov_iter *iter);
};
extern const struct address_space_operations empty_aops;
-/*
- * pagecache_write_begin/pagecache_write_end must be used by general code
- * to write into the pagecache.
- */
-int pagecache_write_begin(struct file *, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata);
-
-int pagecache_write_end(struct file *, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata);
-
/**
* struct address_space - Contents of a cacheable, mappable object.
* @host: Owner, either the inode or the block_device.
* @i_pages: Cached pages.
+ * @invalidate_lock: Guards coherency between page cache contents and
+ * file offset->disk block mappings in the filesystem during invalidates.
+ * It is also used to block modification of page cache contents through
+ * memory mappings.
* @gfp_mask: Memory allocation flags to use for allocating pages.
- * @i_mmap_writable: Number of VM_SHARED mappings.
+ * @i_mmap_writable: Number of VM_SHARED, VM_MAYWRITE mappings.
* @nr_thps: Number of THPs in the pagecache (non-shmem only).
* @i_mmap: Tree of private and shared mappings.
* @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable.
@@ -446,13 +463,14 @@ int pagecache_write_end(struct file *, struct address_space *mapping,
* @a_ops: Methods.
* @flags: Error bits and flags (AS_*).
* @wb_err: The most recent error which has occurred.
- * @private_lock: For use by the owner of the address_space.
- * @private_list: For use by the owner of the address_space.
- * @private_data: For use by the owner of the address_space.
+ * @i_private_lock: For use by the owner of the address_space.
+ * @i_private_list: For use by the owner of the address_space.
+ * @i_private_data: For use by the owner of the address_space.
*/
struct address_space {
struct inode *host;
struct xarray i_pages;
+ struct rw_semaphore invalidate_lock;
gfp_t gfp_mask;
atomic_t i_mmap_writable;
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
@@ -460,20 +478,20 @@ struct address_space {
atomic_t nr_thps;
#endif
struct rb_root_cached i_mmap;
- struct rw_semaphore i_mmap_rwsem;
unsigned long nrpages;
pgoff_t writeback_index;
const struct address_space_operations *a_ops;
unsigned long flags;
errseq_t wb_err;
- spinlock_t private_lock;
- struct list_head private_list;
- void *private_data;
+ spinlock_t i_private_lock;
+ struct list_head i_private_list;
+ struct rw_semaphore i_mmap_rwsem;
+ void * i_private_data;
} __attribute__((aligned(sizeof(long)))) __randomize_layout;
/*
* On most architectures that alignment is already the case; but
* must be enforced here for CRIS, to let the least significant bit
- * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON.
+ * of struct folio's "mapping" pointer be used for FOLIO_MAPPING_ANON.
*/
/* XArray tags, for tagging dirty and writeback pages in the pagecache. */
@@ -484,7 +502,7 @@ struct address_space {
/*
* Returns true if any of the pages in the mapping are marked with the tag.
*/
-static inline bool mapping_tagged(struct address_space *mapping, xa_mark_t tag)
+static inline bool mapping_tagged(const struct address_space *mapping, xa_mark_t tag)
{
return xa_marked(&mapping->i_pages, tag);
}
@@ -504,6 +522,11 @@ static inline void i_mmap_unlock_write(struct address_space *mapping)
up_write(&mapping->i_mmap_rwsem);
}
+static inline int i_mmap_trylock_read(struct address_space *mapping)
+{
+ return down_read_trylock(&mapping->i_mmap_rwsem);
+}
+
static inline void i_mmap_lock_read(struct address_space *mapping)
{
down_read(&mapping->i_mmap_rwsem);
@@ -527,21 +550,21 @@ static inline void i_mmap_assert_write_locked(struct address_space *mapping)
/*
* Might pages of this file be mapped into userspace?
*/
-static inline int mapping_mapped(struct address_space *mapping)
+static inline int mapping_mapped(const struct address_space *mapping)
{
return !RB_EMPTY_ROOT(&mapping->i_mmap.rb_root);
}
/*
* Might pages of this file have been modified in userspace?
- * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap
+ * Note that i_mmap_writable counts all VM_SHARED, VM_MAYWRITE vmas: do_mmap
* marks vma as VM_SHARED if it is shared, and the file was opened for
* writing i.e. vma may be mprotected writable even if now readonly.
*
* If i_mmap_writable is negative, no new writable mappings are allowed. You
* can only deny writable mappings, if none exists right now.
*/
-static inline int mapping_writably_mapped(struct address_space *mapping)
+static inline int mapping_writably_mapped(const struct address_space *mapping)
{
return atomic_read(&mapping->i_mmap_writable) > 0;
}
@@ -581,6 +604,11 @@ static inline void mapping_allow_writable(struct address_space *mapping)
struct posix_acl;
#define ACL_NOT_CACHED ((void *)(-1))
+/*
+ * ACL_DONT_CACHE is for stacked filesystems, that rely on underlying fs to
+ * cache the ACL. This also means that ->get_inode_acl() can be called in RCU
+ * mode with the LOOKUP_RCU flag.
+ */
#define ACL_DONT_CACHE ((void *)(-3))
static inline struct posix_acl *
@@ -595,13 +623,139 @@ is_uncached_acl(struct posix_acl *acl)
return (long)acl & 1;
}
-#define IOP_FASTPERM 0x0001
-#define IOP_LOOKUP 0x0002
-#define IOP_NOFOLLOW 0x0004
-#define IOP_XATTR 0x0008
+#define IOP_FASTPERM 0x0001
+#define IOP_LOOKUP 0x0002
+#define IOP_NOFOLLOW 0x0004
+#define IOP_XATTR 0x0008
#define IOP_DEFAULT_READLINK 0x0010
+#define IOP_MGTIME 0x0020
+#define IOP_CACHED_LINK 0x0040
+#define IOP_FASTPERM_MAY_EXEC 0x0080
-struct fsnotify_mark_connector;
+/*
+ * Inode state bits. Protected by inode->i_lock
+ *
+ * Four bits determine the dirty state of the inode: I_DIRTY_SYNC,
+ * I_DIRTY_DATASYNC, I_DIRTY_PAGES, and I_DIRTY_TIME.
+ *
+ * Four bits define the lifetime of an inode. Initially, inodes are I_NEW,
+ * until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at
+ * various stages of removing an inode.
+ *
+ * Two bits are used for locking and completion notification, I_NEW and I_SYNC.
+ *
+ * I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on
+ * fdatasync() (unless I_DIRTY_DATASYNC is also set).
+ * Timestamp updates are the usual cause.
+ * I_DIRTY_DATASYNC Data-related inode changes pending. We keep track of
+ * these changes separately from I_DIRTY_SYNC so that we
+ * don't have to write inode on fdatasync() when only
+ * e.g. the timestamps have changed.
+ * I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean.
+ * I_DIRTY_TIME The inode itself has dirty timestamps, and the
+ * lazytime mount option is enabled. We keep track of this
+ * separately from I_DIRTY_SYNC in order to implement
+ * lazytime. This gets cleared if I_DIRTY_INODE
+ * (I_DIRTY_SYNC and/or I_DIRTY_DATASYNC) gets set. But
+ * I_DIRTY_TIME can still be set if I_DIRTY_SYNC is already
+ * in place because writeback might already be in progress
+ * and we don't want to lose the time update
+ * I_NEW Serves as both a mutex and completion notification.
+ * New inodes set I_NEW. If two processes both create
+ * the same inode, one of them will release its inode and
+ * wait for I_NEW to be released before returning.
+ * Inodes in I_WILL_FREE, I_FREEING or I_CLEAR state can
+ * also cause waiting on I_NEW, without I_NEW actually
+ * being set. find_inode() uses this to prevent returning
+ * nearly-dead inodes.
+ * I_WILL_FREE Must be set when calling write_inode_now() if i_count
+ * is zero. I_FREEING must be set when I_WILL_FREE is
+ * cleared.
+ * I_FREEING Set when inode is about to be freed but still has dirty
+ * pages or buffers attached or the inode itself is still
+ * dirty.
+ * I_CLEAR Added by clear_inode(). In this state the inode is
+ * clean and can be destroyed. Inode keeps I_FREEING.
+ *
+ * Inodes that are I_WILL_FREE, I_FREEING or I_CLEAR are
+ * prohibited for many purposes. iget() must wait for
+ * the inode to be completely released, then create it
+ * anew. Other functions will just ignore such inodes,
+ * if appropriate. I_NEW is used for waiting.
+ *
+ * I_SYNC Writeback of inode is running. The bit is set during
+ * data writeback, and cleared with a wakeup on the bit
+ * address once it is done. The bit is also used to pin
+ * the inode in memory for flusher thread.
+ *
+ * I_REFERENCED Marks the inode as recently references on the LRU list.
+ *
+ * I_WB_SWITCH Cgroup bdi_writeback switching in progress. Used to
+ * synchronize competing switching instances and to tell
+ * wb stat updates to grab the i_pages lock. See
+ * inode_switch_wbs_work_fn() for details.
+ *
+ * I_OVL_INUSE Used by overlayfs to get exclusive ownership on upper
+ * and work dirs among overlayfs mounts.
+ *
+ * I_CREATING New object's inode in the middle of setting up.
+ *
+ * I_DONTCACHE Evict inode as soon as it is not used anymore.
+ *
+ * I_SYNC_QUEUED Inode is queued in b_io or b_more_io writeback lists.
+ * Used to detect that mark_inode_dirty() should not move
+ * inode between dirty lists.
+ *
+ * I_PINNING_FSCACHE_WB Inode is pinning an fscache object for writeback.
+ *
+ * I_LRU_ISOLATING Inode is pinned being isolated from LRU without holding
+ * i_count.
+ *
+ * Q: What is the difference between I_WILL_FREE and I_FREEING?
+ *
+ * __I_{SYNC,NEW,LRU_ISOLATING} are used to derive unique addresses to wait
+ * upon. There's one free address left.
+ */
+
+enum inode_state_bits {
+ __I_NEW = 0U,
+ __I_SYNC = 1U,
+ __I_LRU_ISOLATING = 2U
+ /* reserved wait address bit 3 */
+};
+
+enum inode_state_flags_enum {
+ I_NEW = (1U << __I_NEW),
+ I_SYNC = (1U << __I_SYNC),
+ I_LRU_ISOLATING = (1U << __I_LRU_ISOLATING),
+ /* reserved flag bit 3 */
+ I_DIRTY_SYNC = (1U << 4),
+ I_DIRTY_DATASYNC = (1U << 5),
+ I_DIRTY_PAGES = (1U << 6),
+ I_WILL_FREE = (1U << 7),
+ I_FREEING = (1U << 8),
+ I_CLEAR = (1U << 9),
+ I_REFERENCED = (1U << 10),
+ I_LINKABLE = (1U << 11),
+ I_DIRTY_TIME = (1U << 12),
+ I_WB_SWITCH = (1U << 13),
+ I_OVL_INUSE = (1U << 14),
+ I_CREATING = (1U << 15),
+ I_DONTCACHE = (1U << 16),
+ I_SYNC_QUEUED = (1U << 17),
+ I_PINNING_NETFS_WB = (1U << 18)
+};
+
+#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
+#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
+#define I_DIRTY_ALL (I_DIRTY | I_DIRTY_TIME)
+
+/*
+ * Use inode_state_read() & friends to access.
+ */
+struct inode_state_flags {
+ enum inode_state_flags_enum __state;
+};
/*
* Keep mostly read-only and often accessed (especially for
@@ -611,14 +765,13 @@ struct fsnotify_mark_connector;
struct inode {
umode_t i_mode;
unsigned short i_opflags;
- kuid_t i_uid;
- kgid_t i_gid;
unsigned int i_flags;
-
#ifdef CONFIG_FS_POSIX_ACL
struct posix_acl *i_acl;
struct posix_acl *i_default_acl;
#endif
+ kuid_t i_uid;
+ kgid_t i_gid;
const struct inode_operations *i_op;
struct super_block *i_sb;
@@ -643,13 +796,17 @@ struct inode {
};
dev_t i_rdev;
loff_t i_size;
- struct timespec64 i_atime;
- struct timespec64 i_mtime;
- struct timespec64 i_ctime;
+ time64_t i_atime_sec;
+ time64_t i_mtime_sec;
+ time64_t i_ctime_sec;
+ u32 i_atime_nsec;
+ u32 i_mtime_nsec;
+ u32 i_ctime_nsec;
+ u32 i_generation;
spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
unsigned short i_bytes;
u8 i_blkbits;
- u8 i_write_hint;
+ enum rw_hint i_write_hint;
blkcnt_t i_blocks;
#ifdef __NEED_I_SIZE_ORDERED
@@ -657,7 +814,8 @@ struct inode {
#endif
/* Misc */
- unsigned long i_state;
+ struct inode_state_flags i_state;
+ /* 32-bit hole */
struct rw_semaphore i_rwsem;
unsigned long dirtied_when; /* jiffies of first dirtying */
@@ -694,7 +852,10 @@ struct inode {
};
struct file_lock_context *i_flctx;
struct address_space i_data;
- struct list_head i_devices;
+ union {
+ struct list_head i_devices;
+ int i_linklen;
+ };
union {
struct pipe_inode_info *i_pipe;
struct cdev *i_cdev;
@@ -702,24 +863,114 @@ struct inode {
unsigned i_dir_seq;
};
- __u32 i_generation;
#ifdef CONFIG_FSNOTIFY
__u32 i_fsnotify_mask; /* all events this inode cares about */
+ /* 32-bit hole reserved for expanding i_fsnotify_mask */
struct fsnotify_mark_connector __rcu *i_fsnotify_marks;
#endif
-#ifdef CONFIG_FS_ENCRYPTION
- struct fscrypt_info *i_crypt_info;
-#endif
-
-#ifdef CONFIG_FS_VERITY
- struct fsverity_info *i_verity_info;
-#endif
-
void *i_private; /* fs or device private pointer */
} __randomize_layout;
+/*
+ * i_state handling
+ *
+ * We hide all of it behind helpers so that we can validate consumers.
+ */
+static inline enum inode_state_flags_enum inode_state_read_once(struct inode *inode)
+{
+ return READ_ONCE(inode->i_state.__state);
+}
+
+static inline enum inode_state_flags_enum inode_state_read(struct inode *inode)
+{
+ lockdep_assert_held(&inode->i_lock);
+ return inode->i_state.__state;
+}
+
+static inline void inode_state_set_raw(struct inode *inode,
+ enum inode_state_flags_enum flags)
+{
+ WRITE_ONCE(inode->i_state.__state, inode->i_state.__state | flags);
+}
+
+static inline void inode_state_set(struct inode *inode,
+ enum inode_state_flags_enum flags)
+{
+ lockdep_assert_held(&inode->i_lock);
+ inode_state_set_raw(inode, flags);
+}
+
+static inline void inode_state_clear_raw(struct inode *inode,
+ enum inode_state_flags_enum flags)
+{
+ WRITE_ONCE(inode->i_state.__state, inode->i_state.__state & ~flags);
+}
+
+static inline void inode_state_clear(struct inode *inode,
+ enum inode_state_flags_enum flags)
+{
+ lockdep_assert_held(&inode->i_lock);
+ inode_state_clear_raw(inode, flags);
+}
+
+static inline void inode_state_assign_raw(struct inode *inode,
+ enum inode_state_flags_enum flags)
+{
+ WRITE_ONCE(inode->i_state.__state, flags);
+}
+
+static inline void inode_state_assign(struct inode *inode,
+ enum inode_state_flags_enum flags)
+{
+ lockdep_assert_held(&inode->i_lock);
+ inode_state_assign_raw(inode, flags);
+}
+
+static inline void inode_state_replace_raw(struct inode *inode,
+ enum inode_state_flags_enum clearflags,
+ enum inode_state_flags_enum setflags)
+{
+ enum inode_state_flags_enum flags;
+ flags = inode->i_state.__state;
+ flags &= ~clearflags;
+ flags |= setflags;
+ inode_state_assign_raw(inode, flags);
+}
+
+static inline void inode_state_replace(struct inode *inode,
+ enum inode_state_flags_enum clearflags,
+ enum inode_state_flags_enum setflags)
+{
+ lockdep_assert_held(&inode->i_lock);
+ inode_state_replace_raw(inode, clearflags, setflags);
+}
+
+static inline void inode_set_cached_link(struct inode *inode, char *link, int linklen)
+{
+ VFS_WARN_ON_INODE(strlen(link) != linklen, inode);
+ VFS_WARN_ON_INODE(inode->i_opflags & IOP_CACHED_LINK, inode);
+ inode->i_link = link;
+ inode->i_linklen = linklen;
+ inode->i_opflags |= IOP_CACHED_LINK;
+}
+
+/*
+ * Get bit address from inode->i_state to use with wait_var_event()
+ * infrastructre.
+ */
+#define inode_state_wait_address(inode, bit) ((char *)&(inode)->i_state + (bit))
+
+struct wait_queue_head *inode_bit_waitqueue(struct wait_bit_queue_entry *wqe,
+ struct inode *inode, u32 bit);
+
+static inline void inode_wake_up_bit(struct inode *inode, u32 bit)
+{
+ /* Caller is responsible for correct memory barriers. */
+ wake_up_var(inode_state_wait_address(inode, bit));
+}
+
struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode);
static inline unsigned int i_blocksize(const struct inode *node)
@@ -743,8 +994,10 @@ static inline void inode_fake_hash(struct inode *inode)
hlist_add_fake(&inode->i_hash);
}
+void wait_on_new_inode(struct inode *inode);
+
/*
- * inode->i_mutex nesting subclasses for the lock validator:
+ * inode->i_rwsem nesting subclasses for the lock validator:
*
* 0: the object of the current VFS operation
* 1: parent
@@ -774,6 +1027,11 @@ static inline void inode_lock(struct inode *inode)
down_write(&inode->i_rwsem);
}
+static inline __must_check int inode_lock_killable(struct inode *inode)
+{
+ return down_write_killable(&inode->i_rwsem);
+}
+
static inline void inode_unlock(struct inode *inode)
{
up_write(&inode->i_rwsem);
@@ -784,6 +1042,11 @@ static inline void inode_lock_shared(struct inode *inode)
down_read(&inode->i_rwsem);
}
+static inline __must_check int inode_lock_shared_killable(struct inode *inode)
+{
+ return down_read_killable(&inode->i_rwsem);
+}
+
static inline void inode_unlock_shared(struct inode *inode)
{
up_read(&inode->i_rwsem);
@@ -814,9 +1077,42 @@ static inline void inode_lock_shared_nested(struct inode *inode, unsigned subcla
down_read_nested(&inode->i_rwsem, subclass);
}
+static inline void filemap_invalidate_lock(struct address_space *mapping)
+{
+ down_write(&mapping->invalidate_lock);
+}
+
+static inline void filemap_invalidate_unlock(struct address_space *mapping)
+{
+ up_write(&mapping->invalidate_lock);
+}
+
+static inline void filemap_invalidate_lock_shared(struct address_space *mapping)
+{
+ down_read(&mapping->invalidate_lock);
+}
+
+static inline int filemap_invalidate_trylock_shared(
+ struct address_space *mapping)
+{
+ return down_read_trylock(&mapping->invalidate_lock);
+}
+
+static inline void filemap_invalidate_unlock_shared(
+ struct address_space *mapping)
+{
+ up_read(&mapping->invalidate_lock);
+}
+
void lock_two_nondirectories(struct inode *, struct inode*);
void unlock_two_nondirectories(struct inode *, struct inode*);
+void filemap_invalidate_lock_two(struct address_space *mapping1,
+ struct address_space *mapping2);
+void filemap_invalidate_unlock_two(struct address_space *mapping1,
+ struct address_space *mapping2);
+
+
/*
* NOTE: in a 32bit arch with a preemptable kernel and
* an UP compile the i_size_read/write must be atomic
@@ -846,13 +1142,14 @@ static inline loff_t i_size_read(const struct inode *inode)
preempt_enable();
return i_size;
#else
- return inode->i_size;
+ /* Pairs with smp_store_release() in i_size_write() */
+ return smp_load_acquire(&inode->i_size);
#endif
}
/*
* NOTE: unlike i_size_read(), i_size_write() does need locking around it
- * (normally i_mutex), otherwise on 32bit/SMP an update of i_size_seqcount
+ * (normally i_rwsem), otherwise on 32bit/SMP an update of i_size_seqcount
* can be lost, resulting in subsequent i_size_read() calls spinning forever.
*/
static inline void i_size_write(struct inode *inode, loff_t i_size)
@@ -868,7 +1165,12 @@ static inline void i_size_write(struct inode *inode, loff_t i_size)
inode->i_size = i_size;
preempt_enable();
#else
- inode->i_size = i_size;
+ /*
+ * Pairs with smp_load_acquire() in i_size_read() to ensure
+ * changes related to inode size (such as page contents) are
+ * visible before we see the changed inode size.
+ */
+ smp_store_release(&inode->i_size, i_size);
#endif
}
@@ -883,6 +1185,7 @@ static inline unsigned imajor(const struct inode *inode)
}
struct fown_struct {
+ struct file *file; /* backpointer for security modules */
rwlock_t lock; /* protects pid, uid, euid fields */
struct pid *pid; /* pid or -pgrp where SIGIO should be sent */
enum pid_type pid_type; /* Kind of process group SIGIO should be sent to */
@@ -894,17 +1197,24 @@ struct fown_struct {
* struct file_ra_state - Track a file's readahead state.
* @start: Where the most recent readahead started.
* @size: Number of pages read in the most recent readahead.
- * @async_size: Start next readahead when this many pages are left.
- * @ra_pages: Maximum size of a readahead request.
+ * @async_size: Numer of pages that were/are not needed immediately
+ * and so were/are genuinely "ahead". Start next readahead when
+ * the first of these pages is accessed.
+ * @ra_pages: Maximum size of a readahead request, copied from the bdi.
+ * @order: Preferred folio order used for most recent readahead.
* @mmap_miss: How many mmap accesses missed in the page cache.
* @prev_pos: The last byte in the most recent read request.
+ *
+ * When this structure is passed to ->readahead(), the "most recent"
+ * readahead means the current readahead.
*/
struct file_ra_state {
pgoff_t start;
unsigned int size;
unsigned int async_size;
unsigned int ra_pages;
- unsigned int mmap_miss;
+ unsigned short order;
+ unsigned short mmap_miss;
loff_t prev_pos;
};
@@ -917,44 +1227,74 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
index < ra->start + ra->size);
}
+/**
+ * struct file - Represents a file
+ * @f_lock: Protects f_ep, f_flags. Must not be taken from IRQ context.
+ * @f_mode: FMODE_* flags often used in hotpaths
+ * @f_op: file operations
+ * @f_mapping: Contents of a cacheable, mappable object.
+ * @private_data: filesystem or driver specific data
+ * @f_inode: cached inode
+ * @f_flags: file flags
+ * @f_iocb_flags: iocb flags
+ * @f_cred: stashed credentials of creator/opener
+ * @f_owner: file owner
+ * @f_path: path of the file
+ * @__f_path: writable alias for @f_path; *ONLY* for core VFS and only before
+ * the file gets open
+ * @f_pos_lock: lock protecting file position
+ * @f_pipe: specific to pipes
+ * @f_pos: file position
+ * @f_security: LSM security context of this file
+ * @f_wb_err: writeback error
+ * @f_sb_err: per sb writeback errors
+ * @f_ep: link of all epoll hooks for this file
+ * @f_task_work: task work entry point
+ * @f_llist: work queue entrypoint
+ * @f_ra: file's readahead state
+ * @f_freeptr: Pointer used by SLAB_TYPESAFE_BY_RCU file cache (don't touch.)
+ * @f_ref: reference count
+ */
struct file {
- union {
- struct llist_node fu_llist;
- struct rcu_head fu_rcuhead;
- } f_u;
- struct path f_path;
- struct inode *f_inode; /* cached value */
+ spinlock_t f_lock;
+ fmode_t f_mode;
const struct file_operations *f_op;
-
- /*
- * Protects f_ep, f_flags.
- * Must not be taken from IRQ context.
- */
- spinlock_t f_lock;
- enum rw_hint f_write_hint;
- atomic_long_t f_count;
- unsigned int f_flags;
- fmode_t f_mode;
- struct mutex f_pos_lock;
- loff_t f_pos;
- struct fown_struct f_owner;
- const struct cred *f_cred;
- struct file_ra_state f_ra;
-
- u64 f_version;
+ struct address_space *f_mapping;
+ void *private_data;
+ struct inode *f_inode;
+ unsigned int f_flags;
+ unsigned int f_iocb_flags;
+ const struct cred *f_cred;
+ struct fown_struct *f_owner;
+ /* --- cacheline 1 boundary (64 bytes) --- */
+ union {
+ const struct path f_path;
+ struct path __f_path;
+ };
+ union {
+ /* regular files (with FMODE_ATOMIC_POS) and directories */
+ struct mutex f_pos_lock;
+ /* pipes */
+ u64 f_pipe;
+ };
+ loff_t f_pos;
#ifdef CONFIG_SECURITY
- void *f_security;
+ void *f_security;
#endif
- /* needed for tty driver, and maybe others */
- void *private_data;
-
+ /* --- cacheline 2 boundary (128 bytes) --- */
+ errseq_t f_wb_err;
+ errseq_t f_sb_err;
#ifdef CONFIG_EPOLL
- /* Used by fs/eventpoll.c to link all the hooks to this file */
- struct hlist_head *f_ep;
-#endif /* #ifdef CONFIG_EPOLL */
- struct address_space *f_mapping;
- errseq_t f_wb_err;
- errseq_t f_sb_err; /* for syncfs */
+ struct hlist_head *f_ep;
+#endif
+ union {
+ struct callback_head f_task_work;
+ struct llist_node f_llist;
+ struct file_ra_state f_ra;
+ freeptr_t f_freeptr;
+ };
+ file_ref_t f_ref;
+ /* --- cacheline 3 boundary (192 bytes) --- */
} __randomize_layout
__attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
@@ -962,18 +1302,19 @@ struct file_handle {
__u32 handle_bytes;
int handle_type;
/* file identifier */
- unsigned char f_handle[];
+ unsigned char f_handle[] __counted_by(handle_bytes);
};
static inline struct file *get_file(struct file *f)
{
- atomic_long_inc(&f->f_count);
+ file_ref_inc(&f->f_ref);
return f;
}
-#define get_file_rcu_many(x, cnt) \
- atomic_long_add_unless(&(x)->f_count, (cnt), 0)
-#define get_file_rcu(x) get_file_rcu_many((x), 1)
-#define file_count(x) atomic_long_read(&(x)->f_count)
+
+struct file *get_file_rcu(struct file __rcu **f);
+struct file *get_file_active(struct file **f);
+
+#define file_count(f) file_ref_read(&(f)->f_ref)
#define MAX_NON_LFS ((1UL<<31) - 1)
@@ -985,333 +1326,45 @@ static inline struct file *get_file(struct file *f)
#define MAX_LFS_FILESIZE ((loff_t)LLONG_MAX)
#endif
-#define FL_POSIX 1
-#define FL_FLOCK 2
-#define FL_DELEG 4 /* NFSv4 delegation */
-#define FL_ACCESS 8 /* not trying to lock, just looking */
-#define FL_EXISTS 16 /* when unlocking, test for existence */
-#define FL_LEASE 32 /* lease held on this file */
-#define FL_CLOSE 64 /* unlock on close */
-#define FL_SLEEP 128 /* A blocking lock */
-#define FL_DOWNGRADE_PENDING 256 /* Lease is being downgraded */
-#define FL_UNLOCK_PENDING 512 /* Lease is being broken */
-#define FL_OFDLCK 1024 /* lock is "owned" by struct file */
-#define FL_LAYOUT 2048 /* outstanding pNFS layout */
-
-#define FL_CLOSE_POSIX (FL_POSIX | FL_CLOSE)
-
-/*
- * Special return value from posix_lock_file() and vfs_lock_file() for
- * asynchronous locking.
- */
-#define FILE_LOCK_DEFERRED 1
-
/* legacy typedef, should eventually be removed */
typedef void *fl_owner_t;
struct file_lock;
-
-struct file_lock_operations {
- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
- void (*fl_release_private)(struct file_lock *);
-};
-
-struct lock_manager_operations {
- fl_owner_t (*lm_get_owner)(fl_owner_t);
- void (*lm_put_owner)(fl_owner_t);
- void (*lm_notify)(struct file_lock *); /* unblock callback */
- int (*lm_grant)(struct file_lock *, int);
- bool (*lm_break)(struct file_lock *);
- int (*lm_change)(struct file_lock *, int, struct list_head *);
- void (*lm_setup)(struct file_lock *, void **);
- bool (*lm_breaker_owns_lease)(struct file_lock *);
-};
-
-struct lock_manager {
- struct list_head list;
- /*
- * NFSv4 and up also want opens blocked during the grace period;
- * NLM doesn't care:
- */
- bool block_opens;
-};
-
-struct net;
-void locks_start_grace(struct net *, struct lock_manager *);
-void locks_end_grace(struct lock_manager *);
-bool locks_in_grace(struct net *);
-bool opens_in_grace(struct net *);
-
-/* that will die - we need it for nfs_lock_info */
-#include <linux/nfs_fs_i.h>
-
-/*
- * struct file_lock represents a generic "file lock". It's used to represent
- * POSIX byte range locks, BSD (flock) locks, and leases. It's important to
- * note that the same struct is used to represent both a request for a lock and
- * the lock itself, but the same object is never used for both.
- *
- * FIXME: should we create a separate "struct lock_request" to help distinguish
- * these two uses?
- *
- * The varous i_flctx lists are ordered by:
- *
- * 1) lock owner
- * 2) lock range start
- * 3) lock range end
- *
- * Obviously, the last two criteria only matter for POSIX locks.
- */
-struct file_lock {
- struct file_lock *fl_blocker; /* The lock, that is blocking us */
- struct list_head fl_list; /* link into file_lock_context */
- struct hlist_node fl_link; /* node in global lists */
- struct list_head fl_blocked_requests; /* list of requests with
- * ->fl_blocker pointing here
- */
- struct list_head fl_blocked_member; /* node in
- * ->fl_blocker->fl_blocked_requests
- */
- fl_owner_t fl_owner;
- unsigned int fl_flags;
- unsigned char fl_type;
- unsigned int fl_pid;
- int fl_link_cpu; /* what cpu's list is this on? */
- wait_queue_head_t fl_wait;
- struct file *fl_file;
- loff_t fl_start;
- loff_t fl_end;
-
- struct fasync_struct * fl_fasync; /* for lease break notifications */
- /* for lease breaks: */
- unsigned long fl_break_time;
- unsigned long fl_downgrade_time;
-
- const struct file_lock_operations *fl_ops; /* Callbacks for filesystems */
- const struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */
- union {
- struct nfs_lock_info nfs_fl;
- struct nfs4_lock_info nfs4_fl;
- struct {
- struct list_head link; /* link in AFS vnode's pending_locks list */
- int state; /* state of grant or error if -ve */
- unsigned int debug_id;
- } afs;
- } fl_u;
-} __randomize_layout;
-
-struct file_lock_context {
- spinlock_t flc_lock;
- struct list_head flc_flock;
- struct list_head flc_posix;
- struct list_head flc_lease;
-};
+struct file_lease;
/* The following constant reflects the upper bound of the file/locking space */
#ifndef OFFSET_MAX
-#define INT_LIMIT(x) (~((x)1 << (sizeof(x)*8 - 1)))
-#define OFFSET_MAX INT_LIMIT(loff_t)
-#define OFFT_OFFSET_MAX INT_LIMIT(off_t)
-#endif
-
-extern void send_sigio(struct fown_struct *fown, int fd, int band);
-
-#define locks_inode(f) file_inode(f)
-
-#ifdef CONFIG_FILE_LOCKING
-extern int fcntl_getlk(struct file *, unsigned int, struct flock *);
-extern int fcntl_setlk(unsigned int, struct file *, unsigned int,
- struct flock *);
-
-#if BITS_PER_LONG == 32
-extern int fcntl_getlk64(struct file *, unsigned int, struct flock64 *);
-extern int fcntl_setlk64(unsigned int, struct file *, unsigned int,
- struct flock64 *);
-#endif
-
-extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg);
-extern int fcntl_getlease(struct file *filp);
-
-/* fs/locks.c */
-void locks_free_lock_context(struct inode *inode);
-void locks_free_lock(struct file_lock *fl);
-extern void locks_init_lock(struct file_lock *);
-extern struct file_lock * locks_alloc_lock(void);
-extern void locks_copy_lock(struct file_lock *, struct file_lock *);
-extern void locks_copy_conflock(struct file_lock *, struct file_lock *);
-extern void locks_remove_posix(struct file *, fl_owner_t);
-extern void locks_remove_file(struct file *);
-extern void locks_release_private(struct file_lock *);
-extern void posix_test_lock(struct file *, struct file_lock *);
-extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
-extern int locks_delete_block(struct file_lock *);
-extern int vfs_test_lock(struct file *, struct file_lock *);
-extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
-extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
-extern int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl);
-extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
-extern void lease_get_mtime(struct inode *, struct timespec64 *time);
-extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
-extern int vfs_setlease(struct file *, long, struct file_lock **, void **);
-extern int lease_modify(struct file_lock *, int, struct list_head *);
-
-struct notifier_block;
-extern int lease_register_notifier(struct notifier_block *);
-extern void lease_unregister_notifier(struct notifier_block *);
-
-struct files_struct;
-extern void show_fd_locks(struct seq_file *f,
- struct file *filp, struct files_struct *files);
-#else /* !CONFIG_FILE_LOCKING */
-static inline int fcntl_getlk(struct file *file, unsigned int cmd,
- struct flock __user *user)
-{
- return -EINVAL;
-}
-
-static inline int fcntl_setlk(unsigned int fd, struct file *file,
- unsigned int cmd, struct flock __user *user)
-{
- return -EACCES;
-}
-
-#if BITS_PER_LONG == 32
-static inline int fcntl_getlk64(struct file *file, unsigned int cmd,
- struct flock64 __user *user)
-{
- return -EINVAL;
-}
-
-static inline int fcntl_setlk64(unsigned int fd, struct file *file,
- unsigned int cmd, struct flock64 __user *user)
-{
- return -EACCES;
-}
+#define OFFSET_MAX type_max(loff_t)
+#define OFFT_OFFSET_MAX type_max(off_t)
#endif
-static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
-{
- return -EINVAL;
-}
-
-static inline int fcntl_getlease(struct file *filp)
-{
- return F_UNLCK;
-}
-
-static inline void
-locks_free_lock_context(struct inode *inode)
-{
-}
-
-static inline void locks_init_lock(struct file_lock *fl)
-{
- return;
-}
-
-static inline void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
-{
- return;
-}
-
-static inline void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
-{
- return;
-}
-
-static inline void locks_remove_posix(struct file *filp, fl_owner_t owner)
-{
- return;
-}
-
-static inline void locks_remove_file(struct file *filp)
-{
- return;
-}
-
-static inline void posix_test_lock(struct file *filp, struct file_lock *fl)
-{
- return;
-}
-
-static inline int posix_lock_file(struct file *filp, struct file_lock *fl,
- struct file_lock *conflock)
-{
- return -ENOLCK;
-}
-
-static inline int locks_delete_block(struct file_lock *waiter)
-{
- return -ENOENT;
-}
-
-static inline int vfs_test_lock(struct file *filp, struct file_lock *fl)
-{
- return 0;
-}
-
-static inline int vfs_lock_file(struct file *filp, unsigned int cmd,
- struct file_lock *fl, struct file_lock *conf)
-{
- return -ENOLCK;
-}
-
-static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
-{
- return 0;
-}
-
-static inline int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl)
-{
- return -ENOLCK;
-}
-
-static inline int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
-{
- return 0;
-}
-
-static inline void lease_get_mtime(struct inode *inode,
- struct timespec64 *time)
-{
- return;
-}
-
-static inline int generic_setlease(struct file *filp, long arg,
- struct file_lock **flp, void **priv)
-{
- return -EINVAL;
-}
-static inline int vfs_setlease(struct file *filp, long arg,
- struct file_lock **lease, void **priv)
+int file_f_owner_allocate(struct file *file);
+static inline struct fown_struct *file_f_owner(const struct file *file)
{
- return -EINVAL;
+ return READ_ONCE(file->f_owner);
}
-static inline int lease_modify(struct file_lock *fl, int arg,
- struct list_head *dispose)
-{
- return -EINVAL;
-}
-
-struct files_struct;
-static inline void show_fd_locks(struct seq_file *f,
- struct file *filp, struct files_struct *files) {}
-#endif /* !CONFIG_FILE_LOCKING */
+extern void send_sigio(struct fown_struct *fown, int fd, int band);
static inline struct inode *file_inode(const struct file *f)
{
return f->f_inode;
}
+/*
+ * file_dentry() is a relic from the days that overlayfs was using files with a
+ * "fake" path, meaning, f_path on overlayfs and f_inode on underlying fs.
+ * In those days, file_dentry() was needed to get the underlying fs dentry that
+ * matches f_inode.
+ * Files with "fake" path should not exist nowadays, so use an assertion to make
+ * sure that file_dentry() was not papering over filesystem bugs.
+ */
static inline struct dentry *file_dentry(const struct file *file)
{
- return d_real(file->f_path.dentry, file_inode(file));
-}
+ struct dentry *dentry = file->f_path.dentry;
-static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
-{
- return locks_lock_inode_wait(locks_inode(filp), fl);
+ WARN_ON_ONCE(d_inode(dentry) != file_inode(file));
+ return dentry;
}
struct fasync_struct {
@@ -1336,44 +1389,10 @@ extern void fasync_free(struct fasync_struct *);
extern void kill_fasync(struct fasync_struct **, int, int);
extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force);
-extern int f_setown(struct file *filp, unsigned long arg, int force);
+extern int f_setown(struct file *filp, int who, int force);
extern void f_delown(struct file *filp);
extern pid_t f_getown(struct file *filp);
-extern int send_sigurg(struct fown_struct *fown);
-
-/*
- * sb->s_flags. Note that these mirror the equivalent MS_* flags where
- * represented in both.
- */
-#define SB_RDONLY 1 /* Mount read-only */
-#define SB_NOSUID 2 /* Ignore suid and sgid bits */
-#define SB_NODEV 4 /* Disallow access to device special files */
-#define SB_NOEXEC 8 /* Disallow program execution */
-#define SB_SYNCHRONOUS 16 /* Writes are synced at once */
-#define SB_MANDLOCK 64 /* Allow mandatory locks on an FS */
-#define SB_DIRSYNC 128 /* Directory modifications are synchronous */
-#define SB_NOATIME 1024 /* Do not update access times. */
-#define SB_NODIRATIME 2048 /* Do not update directory access times */
-#define SB_SILENT 32768
-#define SB_POSIXACL (1<<16) /* VFS does not apply the umask */
-#define SB_INLINECRYPT (1<<17) /* Use blk-crypto for encrypted files */
-#define SB_KERNMOUNT (1<<22) /* this is a kern_mount call */
-#define SB_I_VERSION (1<<23) /* Update inode I_version field */
-#define SB_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */
-
-/* These sb flags are internal to the kernel */
-#define SB_SUBMOUNT (1<<26)
-#define SB_FORCE (1<<27)
-#define SB_NOSEC (1<<28)
-#define SB_BORN (1<<29)
-#define SB_ACTIVE (1<<30)
-#define SB_NOUSER (1<<31)
-
-/* These flags relate to encoding and casefolding */
-#define SB_ENC_STRICT_MODE_FL (1 << 0)
-
-#define sb_has_strict_encoding(sb) \
- (sb->s_encoding_flags & SB_ENC_STRICT_MODE_FL)
+extern int send_sigurg(struct file *file);
/*
* Umount options
@@ -1385,172 +1404,10 @@ extern int send_sigurg(struct fown_struct *fown);
#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */
#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */
-/* sb->s_iflags */
-#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */
-#define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */
-#define SB_I_NODEV 0x00000004 /* Ignore devices on this fs */
-#define SB_I_STABLE_WRITES 0x00000008 /* don't modify blks until WB is done */
-
-/* sb->s_iflags to limit user namespace mounts */
-#define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */
-#define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020
-#define SB_I_UNTRUSTED_MOUNTER 0x00000040
-
-#define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */
-
-/* Possible states of 'frozen' field */
-enum {
- SB_UNFROZEN = 0, /* FS is unfrozen */
- SB_FREEZE_WRITE = 1, /* Writes, dir ops, ioctls frozen */
- SB_FREEZE_PAGEFAULT = 2, /* Page faults stopped as well */
- SB_FREEZE_FS = 3, /* For internal FS use (e.g. to stop
- * internal threads if needed) */
- SB_FREEZE_COMPLETE = 4, /* ->freeze_fs finished successfully */
-};
-
-#define SB_FREEZE_LEVELS (SB_FREEZE_COMPLETE - 1)
-
-struct sb_writers {
- int frozen; /* Is sb frozen? */
- wait_queue_head_t wait_unfrozen; /* wait for thaw */
- struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS];
-};
-
-struct super_block {
- struct list_head s_list; /* Keep this first */
- dev_t s_dev; /* search index; _not_ kdev_t */
- unsigned char s_blocksize_bits;
- unsigned long s_blocksize;
- loff_t s_maxbytes; /* Max file size */
- struct file_system_type *s_type;
- const struct super_operations *s_op;
- const struct dquot_operations *dq_op;
- const struct quotactl_ops *s_qcop;
- const struct export_operations *s_export_op;
- unsigned long s_flags;
- unsigned long s_iflags; /* internal SB_I_* flags */
- unsigned long s_magic;
- struct dentry *s_root;
- struct rw_semaphore s_umount;
- int s_count;
- atomic_t s_active;
-#ifdef CONFIG_SECURITY
- void *s_security;
-#endif
- const struct xattr_handler **s_xattr;
-#ifdef CONFIG_FS_ENCRYPTION
- const struct fscrypt_operations *s_cop;
- struct key *s_master_keys; /* master crypto keys in use */
-#endif
-#ifdef CONFIG_FS_VERITY
- const struct fsverity_operations *s_vop;
-#endif
-#ifdef CONFIG_UNICODE
- struct unicode_map *s_encoding;
- __u16 s_encoding_flags;
-#endif
- struct hlist_bl_head s_roots; /* alternate root dentries for NFS */
- struct list_head s_mounts; /* list of mounts; _not_ for fs use */
- struct block_device *s_bdev;
- struct backing_dev_info *s_bdi;
- struct mtd_info *s_mtd;
- struct hlist_node s_instances;
- unsigned int s_quota_types; /* Bitmask of supported quota types */
- struct quota_info s_dquot; /* Diskquota specific options */
-
- struct sb_writers s_writers;
-
- /*
- * Keep s_fs_info, s_time_gran, s_fsnotify_mask, and
- * s_fsnotify_marks together for cache efficiency. They are frequently
- * accessed and rarely modified.
- */
- void *s_fs_info; /* Filesystem private info */
-
- /* Granularity of c/m/atime in ns (cannot be worse than a second) */
- u32 s_time_gran;
- /* Time limits for c/m/atime in seconds */
- time64_t s_time_min;
- time64_t s_time_max;
-#ifdef CONFIG_FSNOTIFY
- __u32 s_fsnotify_mask;
- struct fsnotify_mark_connector __rcu *s_fsnotify_marks;
-#endif
-
- char s_id[32]; /* Informational name */
- uuid_t s_uuid; /* UUID */
-
- unsigned int s_max_links;
- fmode_t s_mode;
-
- /*
- * The next field is for VFS *only*. No filesystems have any business
- * even looking at it. You had been warned.
- */
- struct mutex s_vfs_rename_mutex; /* Kludge */
-
- /*
- * Filesystem subtype. If non-empty the filesystem type field
- * in /proc/mounts will be "type.subtype"
- */
- const char *s_subtype;
-
- const struct dentry_operations *s_d_op; /* default d_op for dentries */
-
- /*
- * Saved pool identifier for cleancache (-1 means none)
- */
- int cleancache_poolid;
-
- struct shrinker s_shrink; /* per-sb shrinker handle */
-
- /* Number of inodes with nlink == 0 but still referenced */
- atomic_long_t s_remove_count;
-
- /* Pending fsnotify inode refs */
- atomic_long_t s_fsnotify_inode_refs;
-
- /* Being remounted read-only */
- int s_readonly_remount;
-
- /* per-sb errseq_t for reporting writeback errors via syncfs */
- errseq_t s_wb_err;
-
- /* AIO completions deferred from interrupt context */
- struct workqueue_struct *s_dio_done_wq;
- struct hlist_head s_pins;
-
- /*
- * Owning user namespace and default context in which to
- * interpret filesystem uids, gids, quotas, device nodes,
- * xattrs and security labels.
- */
- struct user_namespace *s_user_ns;
-
- /*
- * The list_lru structure is essentially just a pointer to a table
- * of per-node lru lists, each of which has its own spinlock.
- * There is no need to put them into separate cachelines.
- */
- struct list_lru s_dentry_lru;
- struct list_lru s_inode_lru;
- struct rcu_head rcu;
- struct work_struct destroy_work;
-
- struct mutex s_sync_lock; /* sync serialisation lock */
-
- /*
- * Indicates how deep in a filesystem stack this SB is
- */
- int s_stack_depth;
-
- /* s_inode_list_lock protects s_inodes */
- spinlock_t s_inode_list_lock ____cacheline_aligned_in_smp;
- struct list_head s_inodes; /* all inodes */
-
- spinlock_t s_inode_wblist_lock;
- struct list_head s_inodes_wb; /* writeback inodes */
-} __randomize_layout;
+static inline struct user_namespace *i_user_ns(const struct inode *inode)
+{
+ return inode->i_sb->s_user_ns;
+}
/* Helper functions so that in most cases filesystems will
* not need to deal directly with kuid_t and kgid_t and can
@@ -1559,385 +1416,397 @@ struct super_block {
*/
static inline uid_t i_uid_read(const struct inode *inode)
{
- return from_kuid(inode->i_sb->s_user_ns, inode->i_uid);
+ return from_kuid(i_user_ns(inode), inode->i_uid);
}
static inline gid_t i_gid_read(const struct inode *inode)
{
- return from_kgid(inode->i_sb->s_user_ns, inode->i_gid);
+ return from_kgid(i_user_ns(inode), inode->i_gid);
}
static inline void i_uid_write(struct inode *inode, uid_t uid)
{
- inode->i_uid = make_kuid(inode->i_sb->s_user_ns, uid);
+ inode->i_uid = make_kuid(i_user_ns(inode), uid);
}
static inline void i_gid_write(struct inode *inode, gid_t gid)
{
- inode->i_gid = make_kgid(inode->i_sb->s_user_ns, gid);
-}
-
-/**
- * kuid_into_mnt - map a kuid down into a mnt_userns
- * @mnt_userns: user namespace of the relevant mount
- * @kuid: kuid to be mapped
- *
- * Return: @kuid mapped according to @mnt_userns.
- * If @kuid has no mapping INVALID_UID is returned.
- */
-static inline kuid_t kuid_into_mnt(struct user_namespace *mnt_userns,
- kuid_t kuid)
-{
- return make_kuid(mnt_userns, __kuid_val(kuid));
+ inode->i_gid = make_kgid(i_user_ns(inode), gid);
}
/**
- * kgid_into_mnt - map a kgid down into a mnt_userns
- * @mnt_userns: user namespace of the relevant mount
- * @kgid: kgid to be mapped
- *
- * Return: @kgid mapped according to @mnt_userns.
- * If @kgid has no mapping INVALID_GID is returned.
- */
-static inline kgid_t kgid_into_mnt(struct user_namespace *mnt_userns,
- kgid_t kgid)
-{
- return make_kgid(mnt_userns, __kgid_val(kgid));
-}
-
-/**
- * i_uid_into_mnt - map an inode's i_uid down into a mnt_userns
- * @mnt_userns: user namespace of the mount the inode was found from
+ * i_uid_into_vfsuid - map an inode's i_uid down according to an idmapping
+ * @idmap: idmap of the mount the inode was found from
* @inode: inode to map
*
- * Return: the inode's i_uid mapped down according to @mnt_userns.
- * If the inode's i_uid has no mapping INVALID_UID is returned.
+ * Return: whe inode's i_uid mapped down according to @idmap.
+ * If the inode's i_uid has no mapping INVALID_VFSUID is returned.
*/
-static inline kuid_t i_uid_into_mnt(struct user_namespace *mnt_userns,
- const struct inode *inode)
+static inline vfsuid_t i_uid_into_vfsuid(struct mnt_idmap *idmap,
+ const struct inode *inode)
{
- return kuid_into_mnt(mnt_userns, inode->i_uid);
+ return make_vfsuid(idmap, i_user_ns(inode), inode->i_uid);
}
/**
- * i_gid_into_mnt - map an inode's i_gid down into a mnt_userns
- * @mnt_userns: user namespace of the mount the inode was found from
- * @inode: inode to map
+ * i_uid_needs_update - check whether inode's i_uid needs to be updated
+ * @idmap: idmap of the mount the inode was found from
+ * @attr: the new attributes of @inode
+ * @inode: the inode to update
*
- * Return: the inode's i_gid mapped down according to @mnt_userns.
- * If the inode's i_gid has no mapping INVALID_GID is returned.
+ * Check whether the $inode's i_uid field needs to be updated taking idmapped
+ * mounts into account if the filesystem supports it.
+ *
+ * Return: true if @inode's i_uid field needs to be updated, false if not.
*/
-static inline kgid_t i_gid_into_mnt(struct user_namespace *mnt_userns,
- const struct inode *inode)
+static inline bool i_uid_needs_update(struct mnt_idmap *idmap,
+ const struct iattr *attr,
+ const struct inode *inode)
{
- return kgid_into_mnt(mnt_userns, inode->i_gid);
+ return ((attr->ia_valid & ATTR_UID) &&
+ !vfsuid_eq(attr->ia_vfsuid,
+ i_uid_into_vfsuid(idmap, inode)));
}
/**
- * kuid_from_mnt - map a kuid up into a mnt_userns
- * @mnt_userns: user namespace of the relevant mount
- * @kuid: kuid to be mapped
+ * i_uid_update - update @inode's i_uid field
+ * @idmap: idmap of the mount the inode was found from
+ * @attr: the new attributes of @inode
+ * @inode: the inode to update
*
- * Return: @kuid mapped up according to @mnt_userns.
- * If @kuid has no mapping INVALID_UID is returned.
+ * Safely update @inode's i_uid field translating the vfsuid of any idmapped
+ * mount into the filesystem kuid.
*/
-static inline kuid_t kuid_from_mnt(struct user_namespace *mnt_userns,
- kuid_t kuid)
+static inline void i_uid_update(struct mnt_idmap *idmap,
+ const struct iattr *attr,
+ struct inode *inode)
{
- return KUIDT_INIT(from_kuid(mnt_userns, kuid));
+ if (attr->ia_valid & ATTR_UID)
+ inode->i_uid = from_vfsuid(idmap, i_user_ns(inode),
+ attr->ia_vfsuid);
}
/**
- * kgid_from_mnt - map a kgid up into a mnt_userns
- * @mnt_userns: user namespace of the relevant mount
- * @kgid: kgid to be mapped
+ * i_gid_into_vfsgid - map an inode's i_gid down according to an idmapping
+ * @idmap: idmap of the mount the inode was found from
+ * @inode: inode to map
*
- * Return: @kgid mapped up according to @mnt_userns.
- * If @kgid has no mapping INVALID_GID is returned.
+ * Return: the inode's i_gid mapped down according to @idmap.
+ * If the inode's i_gid has no mapping INVALID_VFSGID is returned.
*/
-static inline kgid_t kgid_from_mnt(struct user_namespace *mnt_userns,
- kgid_t kgid)
+static inline vfsgid_t i_gid_into_vfsgid(struct mnt_idmap *idmap,
+ const struct inode *inode)
{
- return KGIDT_INIT(from_kgid(mnt_userns, kgid));
+ return make_vfsgid(idmap, i_user_ns(inode), inode->i_gid);
}
/**
- * mapped_fsuid - return caller's fsuid mapped up into a mnt_userns
- * @mnt_userns: user namespace of the relevant mount
+ * i_gid_needs_update - check whether inode's i_gid needs to be updated
+ * @idmap: idmap of the mount the inode was found from
+ * @attr: the new attributes of @inode
+ * @inode: the inode to update
*
- * Use this helper to initialize a new vfs or filesystem object based on
- * the caller's fsuid. A common example is initializing the i_uid field of
- * a newly allocated inode triggered by a creation event such as mkdir or
- * O_CREAT. Other examples include the allocation of quotas for a specific
- * user.
+ * Check whether the $inode's i_gid field needs to be updated taking idmapped
+ * mounts into account if the filesystem supports it.
*
- * Return: the caller's current fsuid mapped up according to @mnt_userns.
+ * Return: true if @inode's i_gid field needs to be updated, false if not.
*/
-static inline kuid_t mapped_fsuid(struct user_namespace *mnt_userns)
+static inline bool i_gid_needs_update(struct mnt_idmap *idmap,
+ const struct iattr *attr,
+ const struct inode *inode)
{
- return kuid_from_mnt(mnt_userns, current_fsuid());
+ return ((attr->ia_valid & ATTR_GID) &&
+ !vfsgid_eq(attr->ia_vfsgid,
+ i_gid_into_vfsgid(idmap, inode)));
}
/**
- * mapped_fsgid - return caller's fsgid mapped up into a mnt_userns
- * @mnt_userns: user namespace of the relevant mount
+ * i_gid_update - update @inode's i_gid field
+ * @idmap: idmap of the mount the inode was found from
+ * @attr: the new attributes of @inode
+ * @inode: the inode to update
*
- * Use this helper to initialize a new vfs or filesystem object based on
- * the caller's fsgid. A common example is initializing the i_gid field of
- * a newly allocated inode triggered by a creation event such as mkdir or
- * O_CREAT. Other examples include the allocation of quotas for a specific
- * user.
- *
- * Return: the caller's current fsgid mapped up according to @mnt_userns.
+ * Safely update @inode's i_gid field translating the vfsgid of any idmapped
+ * mount into the filesystem kgid.
*/
-static inline kgid_t mapped_fsgid(struct user_namespace *mnt_userns)
+static inline void i_gid_update(struct mnt_idmap *idmap,
+ const struct iattr *attr,
+ struct inode *inode)
{
- return kgid_from_mnt(mnt_userns, current_fsgid());
+ if (attr->ia_valid & ATTR_GID)
+ inode->i_gid = from_vfsgid(idmap, i_user_ns(inode),
+ attr->ia_vfsgid);
}
/**
* inode_fsuid_set - initialize inode's i_uid field with callers fsuid
* @inode: inode to initialize
- * @mnt_userns: user namespace of the mount the inode was found from
+ * @idmap: idmap of the mount the inode was found from
*
* Initialize the i_uid field of @inode. If the inode was found/created via
- * an idmapped mount map the caller's fsuid according to @mnt_users.
+ * an idmapped mount map the caller's fsuid according to @idmap.
*/
static inline void inode_fsuid_set(struct inode *inode,
- struct user_namespace *mnt_userns)
+ struct mnt_idmap *idmap)
{
- inode->i_uid = mapped_fsuid(mnt_userns);
+ inode->i_uid = mapped_fsuid(idmap, i_user_ns(inode));
}
/**
* inode_fsgid_set - initialize inode's i_gid field with callers fsgid
* @inode: inode to initialize
- * @mnt_userns: user namespace of the mount the inode was found from
+ * @idmap: idmap of the mount the inode was found from
*
* Initialize the i_gid field of @inode. If the inode was found/created via
- * an idmapped mount map the caller's fsgid according to @mnt_users.
+ * an idmapped mount map the caller's fsgid according to @idmap.
*/
static inline void inode_fsgid_set(struct inode *inode,
- struct user_namespace *mnt_userns)
+ struct mnt_idmap *idmap)
{
- inode->i_gid = mapped_fsgid(mnt_userns);
+ inode->i_gid = mapped_fsgid(idmap, i_user_ns(inode));
}
/**
* fsuidgid_has_mapping() - check whether caller's fsuid/fsgid is mapped
* @sb: the superblock we want a mapping in
- * @mnt_userns: user namespace of the relevant mount
+ * @idmap: idmap of the relevant mount
*
* Check whether the caller's fsuid and fsgid have a valid mapping in the
* s_user_ns of the superblock @sb. If the caller is on an idmapped mount map
- * the caller's fsuid and fsgid according to the @mnt_userns first.
+ * the caller's fsuid and fsgid according to the @idmap first.
*
* Return: true if fsuid and fsgid is mapped, false if not.
*/
static inline bool fsuidgid_has_mapping(struct super_block *sb,
- struct user_namespace *mnt_userns)
+ struct mnt_idmap *idmap)
{
- struct user_namespace *s_user_ns = sb->s_user_ns;
+ struct user_namespace *fs_userns = sb->s_user_ns;
+ kuid_t kuid;
+ kgid_t kgid;
- return kuid_has_mapping(s_user_ns, mapped_fsuid(mnt_userns)) &&
- kgid_has_mapping(s_user_ns, mapped_fsgid(mnt_userns));
+ kuid = mapped_fsuid(idmap, fs_userns);
+ if (!uid_valid(kuid))
+ return false;
+ kgid = mapped_fsgid(idmap, fs_userns);
+ if (!gid_valid(kgid))
+ return false;
+ return kuid_has_mapping(fs_userns, kuid) &&
+ kgid_has_mapping(fs_userns, kgid);
}
-extern struct timespec64 current_time(struct inode *inode);
+struct timespec64 current_time(struct inode *inode);
+struct timespec64 inode_set_ctime_current(struct inode *inode);
+struct timespec64 inode_set_ctime_deleg(struct inode *inode,
+ struct timespec64 update);
-/*
- * Snapshotting support.
- */
+static inline time64_t inode_get_atime_sec(const struct inode *inode)
+{
+ return inode->i_atime_sec;
+}
-/*
- * These are internal functions, please use sb_start_{write,pagefault,intwrite}
- * instead.
- */
-static inline void __sb_end_write(struct super_block *sb, int level)
+static inline long inode_get_atime_nsec(const struct inode *inode)
{
- percpu_up_read(sb->s_writers.rw_sem + level-1);
+ return inode->i_atime_nsec;
}
-static inline void __sb_start_write(struct super_block *sb, int level)
+static inline struct timespec64 inode_get_atime(const struct inode *inode)
{
- percpu_down_read(sb->s_writers.rw_sem + level - 1);
+ struct timespec64 ts = { .tv_sec = inode_get_atime_sec(inode),
+ .tv_nsec = inode_get_atime_nsec(inode) };
+
+ return ts;
}
-static inline bool __sb_start_write_trylock(struct super_block *sb, int level)
+static inline struct timespec64 inode_set_atime_to_ts(struct inode *inode,
+ struct timespec64 ts)
{
- return percpu_down_read_trylock(sb->s_writers.rw_sem + level - 1);
+ inode->i_atime_sec = ts.tv_sec;
+ inode->i_atime_nsec = ts.tv_nsec;
+ return ts;
}
-#define __sb_writers_acquired(sb, lev) \
- percpu_rwsem_acquire(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
-#define __sb_writers_release(sb, lev) \
- percpu_rwsem_release(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
+static inline struct timespec64 inode_set_atime(struct inode *inode,
+ time64_t sec, long nsec)
+{
+ struct timespec64 ts = { .tv_sec = sec,
+ .tv_nsec = nsec };
+
+ return inode_set_atime_to_ts(inode, ts);
+}
-/**
- * sb_end_write - drop write access to a superblock
- * @sb: the super we wrote to
- *
- * Decrement number of writers to the filesystem. Wake up possible waiters
- * wanting to freeze the filesystem.
- */
-static inline void sb_end_write(struct super_block *sb)
+static inline time64_t inode_get_mtime_sec(const struct inode *inode)
{
- __sb_end_write(sb, SB_FREEZE_WRITE);
+ return inode->i_mtime_sec;
}
-/**
- * sb_end_pagefault - drop write access to a superblock from a page fault
- * @sb: the super we wrote to
- *
- * Decrement number of processes handling write page fault to the filesystem.
- * Wake up possible waiters wanting to freeze the filesystem.
- */
-static inline void sb_end_pagefault(struct super_block *sb)
+static inline long inode_get_mtime_nsec(const struct inode *inode)
{
- __sb_end_write(sb, SB_FREEZE_PAGEFAULT);
+ return inode->i_mtime_nsec;
}
-/**
- * sb_end_intwrite - drop write access to a superblock for internal fs purposes
- * @sb: the super we wrote to
- *
- * Decrement fs-internal number of writers to the filesystem. Wake up possible
- * waiters wanting to freeze the filesystem.
- */
-static inline void sb_end_intwrite(struct super_block *sb)
+static inline struct timespec64 inode_get_mtime(const struct inode *inode)
{
- __sb_end_write(sb, SB_FREEZE_FS);
+ struct timespec64 ts = { .tv_sec = inode_get_mtime_sec(inode),
+ .tv_nsec = inode_get_mtime_nsec(inode) };
+ return ts;
}
-/**
- * sb_start_write - get write access to a superblock
- * @sb: the super we write to
- *
- * When a process wants to write data or metadata to a file system (i.e. dirty
- * a page or an inode), it should embed the operation in a sb_start_write() -
- * sb_end_write() pair to get exclusion against file system freezing. This
- * function increments number of writers preventing freezing. If the file
- * system is already frozen, the function waits until the file system is
- * thawed.
- *
- * Since freeze protection behaves as a lock, users have to preserve
- * ordering of freeze protection and other filesystem locks. Generally,
- * freeze protection should be the outermost lock. In particular, we have:
+static inline struct timespec64 inode_set_mtime_to_ts(struct inode *inode,
+ struct timespec64 ts)
+{
+ inode->i_mtime_sec = ts.tv_sec;
+ inode->i_mtime_nsec = ts.tv_nsec;
+ return ts;
+}
+
+static inline struct timespec64 inode_set_mtime(struct inode *inode,
+ time64_t sec, long nsec)
+{
+ struct timespec64 ts = { .tv_sec = sec,
+ .tv_nsec = nsec };
+ return inode_set_mtime_to_ts(inode, ts);
+}
+
+/*
+ * Multigrain timestamps
*
- * sb_start_write
- * -> i_mutex (write path, truncate, directory ops, ...)
- * -> s_umount (freeze_super, thaw_super)
+ * Conditionally use fine-grained ctime and mtime timestamps when there
+ * are users actively observing them via getattr. The primary use-case
+ * for this is NFS clients that use the ctime to distinguish between
+ * different states of the file, and that are often fooled by multiple
+ * operations that occur in the same coarse-grained timer tick.
*/
-static inline void sb_start_write(struct super_block *sb)
+#define I_CTIME_QUERIED ((u32)BIT(31))
+
+static inline time64_t inode_get_ctime_sec(const struct inode *inode)
{
- __sb_start_write(sb, SB_FREEZE_WRITE);
+ return inode->i_ctime_sec;
}
-static inline bool sb_start_write_trylock(struct super_block *sb)
+static inline long inode_get_ctime_nsec(const struct inode *inode)
{
- return __sb_start_write_trylock(sb, SB_FREEZE_WRITE);
+ return inode->i_ctime_nsec & ~I_CTIME_QUERIED;
}
+static inline struct timespec64 inode_get_ctime(const struct inode *inode)
+{
+ struct timespec64 ts = { .tv_sec = inode_get_ctime_sec(inode),
+ .tv_nsec = inode_get_ctime_nsec(inode) };
+
+ return ts;
+}
+
+struct timespec64 inode_set_ctime_to_ts(struct inode *inode, struct timespec64 ts);
+
/**
- * sb_start_pagefault - get write access to a superblock from a page fault
- * @sb: the super we write to
- *
- * When a process starts handling write page fault, it should embed the
- * operation into sb_start_pagefault() - sb_end_pagefault() pair to get
- * exclusion against file system freezing. This is needed since the page fault
- * is going to dirty a page. This function increments number of running page
- * faults preventing freezing. If the file system is already frozen, the
- * function waits until the file system is thawed.
- *
- * Since page fault freeze protection behaves as a lock, users have to preserve
- * ordering of freeze protection and other filesystem locks. It is advised to
- * put sb_start_pagefault() close to mmap_lock in lock ordering. Page fault
- * handling code implies lock dependency:
+ * inode_set_ctime - set the ctime in the inode
+ * @inode: inode in which to set the ctime
+ * @sec: tv_sec value to set
+ * @nsec: tv_nsec value to set
*
- * mmap_lock
- * -> sb_start_pagefault
+ * Set the ctime in @inode to { @sec, @nsec }
*/
-static inline void sb_start_pagefault(struct super_block *sb)
+static inline struct timespec64 inode_set_ctime(struct inode *inode,
+ time64_t sec, long nsec)
{
- __sb_start_write(sb, SB_FREEZE_PAGEFAULT);
+ struct timespec64 ts = { .tv_sec = sec,
+ .tv_nsec = nsec };
+
+ return inode_set_ctime_to_ts(inode, ts);
}
+struct timespec64 simple_inode_init_ts(struct inode *inode);
+
+/*
+ * Snapshotting support.
+ */
+
/**
- * sb_start_intwrite - get write access to a superblock for internal fs purposes
- * @sb: the super we write to
- *
- * This is the third level of protection against filesystem freezing. It is
- * free for use by a filesystem. The only requirement is that it must rank
- * below sb_start_pagefault.
+ * file_write_started - check if SB_FREEZE_WRITE is held
+ * @file: the file we write to
*
- * For example filesystem can call sb_start_intwrite() when starting a
- * transaction which somewhat eases handling of freezing for internal sources
- * of filesystem changes (internal fs threads, discarding preallocation on file
- * close, etc.).
+ * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
+ * May be false positive with !S_ISREG, because file_start_write() has
+ * no effect on !S_ISREG.
*/
-static inline void sb_start_intwrite(struct super_block *sb)
+static inline bool file_write_started(const struct file *file)
{
- __sb_start_write(sb, SB_FREEZE_FS);
+ if (!S_ISREG(file_inode(file)->i_mode))
+ return true;
+ return sb_write_started(file_inode(file)->i_sb);
}
-static inline bool sb_start_intwrite_trylock(struct super_block *sb)
+/**
+ * file_write_not_started - check if SB_FREEZE_WRITE is not held
+ * @file: the file we write to
+ *
+ * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
+ * May be false positive with !S_ISREG, because file_start_write() has
+ * no effect on !S_ISREG.
+ */
+static inline bool file_write_not_started(const struct file *file)
{
- return __sb_start_write_trylock(sb, SB_FREEZE_FS);
+ if (!S_ISREG(file_inode(file)->i_mode))
+ return true;
+ return sb_write_not_started(file_inode(file)->i_sb);
}
-bool inode_owner_or_capable(struct user_namespace *mnt_userns,
+bool inode_owner_or_capable(struct mnt_idmap *idmap,
const struct inode *inode);
/*
* VFS helper functions..
*/
-int vfs_create(struct user_namespace *, struct inode *,
- struct dentry *, umode_t, bool);
-int vfs_mkdir(struct user_namespace *, struct inode *,
- struct dentry *, umode_t);
-int vfs_mknod(struct user_namespace *, struct inode *, struct dentry *,
- umode_t, dev_t);
-int vfs_symlink(struct user_namespace *, struct inode *,
- struct dentry *, const char *);
-int vfs_link(struct dentry *, struct user_namespace *, struct inode *,
- struct dentry *, struct inode **);
-int vfs_rmdir(struct user_namespace *, struct inode *, struct dentry *);
-int vfs_unlink(struct user_namespace *, struct inode *, struct dentry *,
- struct inode **);
+int vfs_create(struct mnt_idmap *, struct dentry *, umode_t,
+ struct delegated_inode *);
+struct dentry *vfs_mkdir(struct mnt_idmap *, struct inode *,
+ struct dentry *, umode_t, struct delegated_inode *);
+int vfs_mknod(struct mnt_idmap *, struct inode *, struct dentry *,
+ umode_t, dev_t, struct delegated_inode *);
+int vfs_symlink(struct mnt_idmap *, struct inode *,
+ struct dentry *, const char *, struct delegated_inode *);
+int vfs_link(struct dentry *, struct mnt_idmap *, struct inode *,
+ struct dentry *, struct delegated_inode *);
+int vfs_rmdir(struct mnt_idmap *, struct inode *, struct dentry *,
+ struct delegated_inode *);
+int vfs_unlink(struct mnt_idmap *, struct inode *, struct dentry *,
+ struct delegated_inode *);
/**
* struct renamedata - contains all information required for renaming
- * @old_mnt_userns: old user namespace of the mount the inode was found from
- * @old_dir: parent of source
+ * @mnt_idmap: idmap of the mount in which the rename is happening.
+ * @old_parent: parent of source
* @old_dentry: source
- * @new_mnt_userns: new user namespace of the mount the inode was found from
- * @new_dir: parent of destination
+ * @new_parent: parent of destination
* @new_dentry: destination
* @delegated_inode: returns an inode needing a delegation break
* @flags: rename flags
*/
struct renamedata {
- struct user_namespace *old_mnt_userns;
- struct inode *old_dir;
+ struct mnt_idmap *mnt_idmap;
+ struct dentry *old_parent;
struct dentry *old_dentry;
- struct user_namespace *new_mnt_userns;
- struct inode *new_dir;
+ struct dentry *new_parent;
struct dentry *new_dentry;
- struct inode **delegated_inode;
+ struct delegated_inode *delegated_inode;
unsigned int flags;
} __randomize_layout;
int vfs_rename(struct renamedata *);
-static inline int vfs_whiteout(struct user_namespace *mnt_userns,
+static inline int vfs_whiteout(struct mnt_idmap *idmap,
struct inode *dir, struct dentry *dentry)
{
- return vfs_mknod(mnt_userns, dir, dentry, S_IFCHR | WHITEOUT_MODE,
- WHITEOUT_DEV);
+ return vfs_mknod(idmap, dir, dentry, S_IFCHR | WHITEOUT_MODE,
+ WHITEOUT_DEV, NULL);
}
-struct dentry *vfs_tmpfile(struct user_namespace *mnt_userns,
- struct dentry *dentry, umode_t mode, int open_flag);
+struct file *kernel_tmpfile_open(struct mnt_idmap *idmap,
+ const struct path *parentpath,
+ umode_t mode, int open_flag,
+ const struct cred *cred);
+struct file *kernel_file_open(const struct path *path, int flags,
+ const struct cred *cred);
int vfs_mkobj(struct dentry *, umode_t,
int (*f)(struct dentry *, umode_t, void *),
@@ -1947,8 +1816,6 @@ int vfs_fchown(struct file *file, uid_t user, gid_t group);
int vfs_fchmod(struct file *file, umode_t mode);
int vfs_utimes(const struct path *path, struct timespec64 *times);
-extern long vfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-
#ifdef CONFIG_COMPAT
extern long compat_ptr_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
@@ -1959,25 +1826,40 @@ extern long compat_ptr_ioctl(struct file *file, unsigned int cmd,
/*
* VFS file helper functions.
*/
-void inode_init_owner(struct user_namespace *mnt_userns, struct inode *inode,
+void inode_init_owner(struct mnt_idmap *idmap, struct inode *inode,
const struct inode *dir, umode_t mode);
extern bool may_open_dev(const struct path *path);
+umode_t mode_strip_sgid(struct mnt_idmap *idmap,
+ const struct inode *dir, umode_t mode);
+bool in_group_or_capable(struct mnt_idmap *idmap,
+ const struct inode *inode, vfsgid_t vfsgid);
/*
* This is the "filldir" function type, used by readdir() to let
* the kernel specify what kind of dirent layout it wants to have.
* This allows the kernel to read directories into kernel space or
* to have different dirent layouts depending on the binary type.
+ * Return 'true' to keep going and 'false' if there are no more entries.
*/
struct dir_context;
-typedef int (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64,
+typedef bool (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64,
unsigned);
struct dir_context {
filldir_t actor;
loff_t pos;
+ /*
+ * Filesystems MUST NOT MODIFY count, but may use as a hint:
+ * 0 unknown
+ * > 0 space in buffer (assume at least one entry)
+ * INT_MAX unlimited
+ */
+ int count;
};
+/* If OR-ed with d_type, pending signals are not checked */
+#define FILLDIR_FLAG_NOINTR 0x1000
+
/*
* These flags let !MMU mmap() govern direct device mapping vs immediate
* copying more easily for MAP_PRIVATE, especially for ROM filesystems.
@@ -2017,36 +1899,48 @@ struct dir_context {
*/
#define REMAP_FILE_ADVISORY (REMAP_FILE_CAN_SHORTEN)
+/*
+ * These flags control the behavior of vfs_copy_file_range().
+ * They are not available to the user via syscall.
+ *
+ * COPY_FILE_SPLICE: call splice direct instead of fs clone/copy ops
+ */
+#define COPY_FILE_SPLICE (1 << 0)
+
struct iov_iter;
+struct io_uring_cmd;
+struct offset_ctx;
+
+typedef unsigned int __bitwise fop_flags_t;
struct file_operations {
struct module *owner;
+ fop_flags_t fop_flags;
loff_t (*llseek) (struct file *, loff_t, int);
ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
- int (*iopoll)(struct kiocb *kiocb, bool spin);
- int (*iterate) (struct file *, struct dir_context *);
+ int (*iopoll)(struct kiocb *kiocb, struct io_comp_batch *,
+ unsigned int flags);
int (*iterate_shared) (struct file *, struct dir_context *);
__poll_t (*poll) (struct file *, struct poll_table_struct *);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
- unsigned long mmap_supported_flags;
int (*open) (struct inode *, struct file *);
int (*flush) (struct file *, fl_owner_t id);
int (*release) (struct inode *, struct file *);
int (*fsync) (struct file *, loff_t, loff_t, int datasync);
int (*fasync) (int, struct file *, int);
int (*lock) (struct file *, int, struct file_lock *);
- ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
int (*check_flags)(int);
int (*flock) (struct file *, int, struct file_lock *);
ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
- int (*setlease)(struct file *, long, struct file_lock **, void **);
+ void (*splice_eof)(struct file *file);
+ int (*setlease)(struct file *, int, struct file_lease **, void **);
long (*fallocate)(struct file *file, int mode, loff_t offset,
loff_t len);
void (*show_fdinfo)(struct seq_file *m, struct file *f);
@@ -2059,80 +1953,123 @@ struct file_operations {
struct file *file_out, loff_t pos_out,
loff_t len, unsigned int remap_flags);
int (*fadvise)(struct file *, loff_t, loff_t, int);
+ int (*uring_cmd)(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
+ int (*uring_cmd_iopoll)(struct io_uring_cmd *, struct io_comp_batch *,
+ unsigned int poll_flags);
+ int (*mmap_prepare)(struct vm_area_desc *);
} __randomize_layout;
+/* Supports async buffered reads */
+#define FOP_BUFFER_RASYNC ((__force fop_flags_t)(1 << 0))
+/* Supports async buffered writes */
+#define FOP_BUFFER_WASYNC ((__force fop_flags_t)(1 << 1))
+/* Supports synchronous page faults for mappings */
+#define FOP_MMAP_SYNC ((__force fop_flags_t)(1 << 2))
+/* Supports non-exclusive O_DIRECT writes from multiple threads */
+#define FOP_DIO_PARALLEL_WRITE ((__force fop_flags_t)(1 << 3))
+/* Contains huge pages */
+#define FOP_HUGE_PAGES ((__force fop_flags_t)(1 << 4))
+/* Treat loff_t as unsigned (e.g., /dev/mem) */
+#define FOP_UNSIGNED_OFFSET ((__force fop_flags_t)(1 << 5))
+/* Supports asynchronous lock callbacks */
+#define FOP_ASYNC_LOCK ((__force fop_flags_t)(1 << 6))
+/* File system supports uncached read/write buffered IO */
+#define FOP_DONTCACHE ((__force fop_flags_t)(1 << 7))
+
+/* Wrap a directory iterator that needs exclusive inode access */
+int wrap_directory_iterator(struct file *, struct dir_context *,
+ int (*) (struct file *, struct dir_context *));
+#define WRAP_DIR_ITER(x) \
+ static int shared_##x(struct file *file , struct dir_context *ctx) \
+ { return wrap_directory_iterator(file, ctx, x); }
+
struct inode_operations {
struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
const char * (*get_link) (struct dentry *, struct inode *, struct delayed_call *);
- int (*permission) (struct user_namespace *, struct inode *, int);
- struct posix_acl * (*get_acl)(struct inode *, int);
+ int (*permission) (struct mnt_idmap *, struct inode *, int);
+ struct posix_acl * (*get_inode_acl)(struct inode *, int, bool);
int (*readlink) (struct dentry *, char __user *,int);
- int (*create) (struct user_namespace *, struct inode *,struct dentry *,
+ int (*create) (struct mnt_idmap *, struct inode *,struct dentry *,
umode_t, bool);
int (*link) (struct dentry *,struct inode *,struct dentry *);
int (*unlink) (struct inode *,struct dentry *);
- int (*symlink) (struct user_namespace *, struct inode *,struct dentry *,
+ int (*symlink) (struct mnt_idmap *, struct inode *,struct dentry *,
const char *);
- int (*mkdir) (struct user_namespace *, struct inode *,struct dentry *,
- umode_t);
+ struct dentry *(*mkdir) (struct mnt_idmap *, struct inode *,
+ struct dentry *, umode_t);
int (*rmdir) (struct inode *,struct dentry *);
- int (*mknod) (struct user_namespace *, struct inode *,struct dentry *,
+ int (*mknod) (struct mnt_idmap *, struct inode *,struct dentry *,
umode_t,dev_t);
- int (*rename) (struct user_namespace *, struct inode *, struct dentry *,
+ int (*rename) (struct mnt_idmap *, struct inode *, struct dentry *,
struct inode *, struct dentry *, unsigned int);
- int (*setattr) (struct user_namespace *, struct dentry *,
- struct iattr *);
- int (*getattr) (struct user_namespace *, const struct path *,
+ int (*setattr) (struct mnt_idmap *, struct dentry *, struct iattr *);
+ int (*getattr) (struct mnt_idmap *, const struct path *,
struct kstat *, u32, unsigned int);
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
u64 len);
- int (*update_time)(struct inode *, struct timespec64 *, int);
+ int (*update_time)(struct inode *, int);
int (*atomic_open)(struct inode *, struct dentry *,
struct file *, unsigned open_flag,
umode_t create_mode);
- int (*tmpfile) (struct user_namespace *, struct inode *,
- struct dentry *, umode_t);
- int (*set_acl)(struct user_namespace *, struct inode *,
+ int (*tmpfile) (struct mnt_idmap *, struct inode *,
+ struct file *, umode_t);
+ struct posix_acl *(*get_acl)(struct mnt_idmap *, struct dentry *,
+ int);
+ int (*set_acl)(struct mnt_idmap *, struct dentry *,
struct posix_acl *, int);
- int (*fileattr_set)(struct user_namespace *mnt_userns,
- struct dentry *dentry, struct fileattr *fa);
- int (*fileattr_get)(struct dentry *dentry, struct fileattr *fa);
+ int (*fileattr_set)(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct file_kattr *fa);
+ int (*fileattr_get)(struct dentry *dentry, struct file_kattr *fa);
+ struct offset_ctx *(*get_offset_ctx)(struct inode *inode);
} ____cacheline_aligned;
-static inline ssize_t call_read_iter(struct file *file, struct kiocb *kio,
- struct iov_iter *iter)
+/* Did the driver provide valid mmap hook configuration? */
+static inline bool can_mmap_file(struct file *file)
{
- return file->f_op->read_iter(kio, iter);
+ bool has_mmap = file->f_op->mmap;
+ bool has_mmap_prepare = file->f_op->mmap_prepare;
+
+ /* Hooks are mutually exclusive. */
+ if (WARN_ON_ONCE(has_mmap && has_mmap_prepare))
+ return false;
+ if (!has_mmap && !has_mmap_prepare)
+ return false;
+
+ return true;
}
-static inline ssize_t call_write_iter(struct file *file, struct kiocb *kio,
- struct iov_iter *iter)
+int __compat_vma_mmap(const struct file_operations *f_op,
+ struct file *file, struct vm_area_struct *vma);
+int compat_vma_mmap(struct file *file, struct vm_area_struct *vma);
+
+static inline int vfs_mmap(struct file *file, struct vm_area_struct *vma)
{
- return file->f_op->write_iter(kio, iter);
+ if (file->f_op->mmap_prepare)
+ return compat_vma_mmap(file, vma);
+
+ return file->f_op->mmap(file, vma);
}
-static inline int call_mmap(struct file *file, struct vm_area_struct *vma)
+static inline int vfs_mmap_prepare(struct file *file, struct vm_area_desc *desc)
{
- return file->f_op->mmap(file, vma);
+ return file->f_op->mmap_prepare(desc);
}
extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
loff_t, size_t, unsigned int);
-extern ssize_t generic_copy_file_range(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out,
- size_t len, unsigned int flags);
-extern int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out,
- loff_t *count,
- unsigned int remap_flags);
-extern loff_t do_clone_file_range(struct file *file_in, loff_t pos_in,
+int remap_verify_area(struct file *file, loff_t pos, loff_t len, bool write);
+int __generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t *len, unsigned int remap_flags,
+ const struct iomap_ops *dax_read_ops);
+int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
- loff_t len, unsigned int remap_flags);
+ loff_t *count, unsigned int remap_flags);
extern loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in,
struct file *file_out, loff_t pos_out,
loff_t len, unsigned int remap_flags);
@@ -2142,42 +2079,6 @@ extern loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
struct file *dst_file, loff_t dst_pos,
loff_t len, unsigned int remap_flags);
-
-struct super_operations {
- struct inode *(*alloc_inode)(struct super_block *sb);
- void (*destroy_inode)(struct inode *);
- void (*free_inode)(struct inode *);
-
- void (*dirty_inode) (struct inode *, int flags);
- int (*write_inode) (struct inode *, struct writeback_control *wbc);
- int (*drop_inode) (struct inode *);
- void (*evict_inode) (struct inode *);
- void (*put_super) (struct super_block *);
- int (*sync_fs)(struct super_block *sb, int wait);
- int (*freeze_super) (struct super_block *);
- int (*freeze_fs) (struct super_block *);
- int (*thaw_super) (struct super_block *);
- int (*unfreeze_fs) (struct super_block *);
- int (*statfs) (struct dentry *, struct kstatfs *);
- int (*remount_fs) (struct super_block *, int *, char *);
- void (*umount_begin) (struct super_block *);
-
- int (*show_options)(struct seq_file *, struct dentry *);
- int (*show_devname)(struct seq_file *, struct dentry *);
- int (*show_path)(struct seq_file *, struct dentry *);
- int (*show_stats)(struct seq_file *, struct dentry *);
-#ifdef CONFIG_QUOTA
- ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
- ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
- struct dquot **(*get_dquots)(struct inode *);
-#endif
- int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
- long (*nr_cached_objects)(struct super_block *,
- struct shrink_control *);
- long (*free_cached_objects)(struct super_block *,
- struct shrink_control *);
-};
-
/*
* Inode flags - they have no relation to superblock flags now
*/
@@ -2202,6 +2103,8 @@ struct super_operations {
#define S_ENCRYPTED (1 << 14) /* Encrypted file (using fs/crypto/) */
#define S_CASEFOLD (1 << 15) /* Casefolded file */
#define S_VERITY (1 << 16) /* Verity file (using fs/verity/) */
+#define S_KERNEL_FILE (1 << 17) /* File is in use by the kernel (eg. fs/cachefiles) */
+#define S_ANON_INODE (1 << 19) /* Inode is an anonymous inode */
/*
* Note that nosuid etc flags are inode-specific: setting some file-system
@@ -2218,7 +2121,6 @@ struct super_operations {
*/
#define __IS_FLG(inode, flg) ((inode)->i_sb->s_flags & (flg))
-static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags & SB_RDONLY; }
#define IS_RDONLY(inode) sb_rdonly((inode)->i_sb)
#define IS_SYNC(inode) (__IS_FLG(inode, SB_SYNCHRONOUS) || \
((inode)->i_flags & S_SYNC))
@@ -2231,11 +2133,22 @@ static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags
#define IS_NOQUOTA(inode) ((inode)->i_flags & S_NOQUOTA)
#define IS_APPEND(inode) ((inode)->i_flags & S_APPEND)
#define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE)
+
+#ifdef CONFIG_FS_POSIX_ACL
#define IS_POSIXACL(inode) __IS_FLG(inode, SB_POSIXACL)
+#else
+#define IS_POSIXACL(inode) 0
+#endif
#define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD)
#define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME)
+
+#ifdef CONFIG_SWAP
#define IS_SWAPFILE(inode) ((inode)->i_flags & S_SWAPFILE)
+#else
+#define IS_SWAPFILE(inode) ((void)(inode), 0U)
+#endif
+
#define IS_PRIVATE(inode) ((inode)->i_flags & S_PRIVATE)
#define IS_IMA(inode) ((inode)->i_flags & S_IMA)
#define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT)
@@ -2247,39 +2160,20 @@ static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags
#define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \
(inode)->i_rdev == WHITEOUT_DEV)
+#define IS_ANON_FILE(inode) ((inode)->i_flags & S_ANON_INODE)
-static inline bool HAS_UNMAPPED_ID(struct user_namespace *mnt_userns,
+static inline bool HAS_UNMAPPED_ID(struct mnt_idmap *idmap,
struct inode *inode)
{
- return !uid_valid(i_uid_into_mnt(mnt_userns, inode)) ||
- !gid_valid(i_gid_into_mnt(mnt_userns, inode));
-}
-
-static inline enum rw_hint file_write_hint(struct file *file)
-{
- if (file->f_write_hint != WRITE_LIFE_NOT_SET)
- return file->f_write_hint;
-
- return file_inode(file)->i_write_hint;
-}
-
-static inline int iocb_flags(struct file *file);
-
-static inline u16 ki_hint_validate(enum rw_hint hint)
-{
- typeof(((struct kiocb *)0)->ki_hint) max_hint = -1;
-
- if (hint <= max_hint)
- return hint;
- return 0;
+ return !vfsuid_valid(i_uid_into_vfsuid(idmap, inode)) ||
+ !vfsgid_valid(i_gid_into_vfsgid(idmap, inode));
}
static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
{
*kiocb = (struct kiocb) {
.ki_filp = filp,
- .ki_flags = iocb_flags(filp),
- .ki_hint = ki_hint_validate(file_write_hint(filp)),
+ .ki_flags = filp->f_iocb_flags,
.ki_ioprio = get_current_ioprio(),
};
}
@@ -2290,114 +2184,11 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
*kiocb = (struct kiocb) {
.ki_filp = filp,
.ki_flags = kiocb_src->ki_flags,
- .ki_hint = kiocb_src->ki_hint,
.ki_ioprio = kiocb_src->ki_ioprio,
.ki_pos = kiocb_src->ki_pos,
};
}
-/*
- * Inode state bits. Protected by inode->i_lock
- *
- * Four bits determine the dirty state of the inode: I_DIRTY_SYNC,
- * I_DIRTY_DATASYNC, I_DIRTY_PAGES, and I_DIRTY_TIME.
- *
- * Four bits define the lifetime of an inode. Initially, inodes are I_NEW,
- * until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at
- * various stages of removing an inode.
- *
- * Two bits are used for locking and completion notification, I_NEW and I_SYNC.
- *
- * I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on
- * fdatasync() (unless I_DIRTY_DATASYNC is also set).
- * Timestamp updates are the usual cause.
- * I_DIRTY_DATASYNC Data-related inode changes pending. We keep track of
- * these changes separately from I_DIRTY_SYNC so that we
- * don't have to write inode on fdatasync() when only
- * e.g. the timestamps have changed.
- * I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean.
- * I_DIRTY_TIME The inode itself only has dirty timestamps, and the
- * lazytime mount option is enabled. We keep track of this
- * separately from I_DIRTY_SYNC in order to implement
- * lazytime. This gets cleared if I_DIRTY_INODE
- * (I_DIRTY_SYNC and/or I_DIRTY_DATASYNC) gets set. I.e.
- * either I_DIRTY_TIME *or* I_DIRTY_INODE can be set in
- * i_state, but not both. I_DIRTY_PAGES may still be set.
- * I_NEW Serves as both a mutex and completion notification.
- * New inodes set I_NEW. If two processes both create
- * the same inode, one of them will release its inode and
- * wait for I_NEW to be released before returning.
- * Inodes in I_WILL_FREE, I_FREEING or I_CLEAR state can
- * also cause waiting on I_NEW, without I_NEW actually
- * being set. find_inode() uses this to prevent returning
- * nearly-dead inodes.
- * I_WILL_FREE Must be set when calling write_inode_now() if i_count
- * is zero. I_FREEING must be set when I_WILL_FREE is
- * cleared.
- * I_FREEING Set when inode is about to be freed but still has dirty
- * pages or buffers attached or the inode itself is still
- * dirty.
- * I_CLEAR Added by clear_inode(). In this state the inode is
- * clean and can be destroyed. Inode keeps I_FREEING.
- *
- * Inodes that are I_WILL_FREE, I_FREEING or I_CLEAR are
- * prohibited for many purposes. iget() must wait for
- * the inode to be completely released, then create it
- * anew. Other functions will just ignore such inodes,
- * if appropriate. I_NEW is used for waiting.
- *
- * I_SYNC Writeback of inode is running. The bit is set during
- * data writeback, and cleared with a wakeup on the bit
- * address once it is done. The bit is also used to pin
- * the inode in memory for flusher thread.
- *
- * I_REFERENCED Marks the inode as recently references on the LRU list.
- *
- * I_DIO_WAKEUP Never set. Only used as a key for wait_on_bit().
- *
- * I_WB_SWITCH Cgroup bdi_writeback switching in progress. Used to
- * synchronize competing switching instances and to tell
- * wb stat updates to grab the i_pages lock. See
- * inode_switch_wbs_work_fn() for details.
- *
- * I_OVL_INUSE Used by overlayfs to get exclusive ownership on upper
- * and work dirs among overlayfs mounts.
- *
- * I_CREATING New object's inode in the middle of setting up.
- *
- * I_DONTCACHE Evict inode as soon as it is not used anymore.
- *
- * I_SYNC_QUEUED Inode is queued in b_io or b_more_io writeback lists.
- * Used to detect that mark_inode_dirty() should not move
- * inode between dirty lists.
- *
- * Q: What is the difference between I_WILL_FREE and I_FREEING?
- */
-#define I_DIRTY_SYNC (1 << 0)
-#define I_DIRTY_DATASYNC (1 << 1)
-#define I_DIRTY_PAGES (1 << 2)
-#define __I_NEW 3
-#define I_NEW (1 << __I_NEW)
-#define I_WILL_FREE (1 << 4)
-#define I_FREEING (1 << 5)
-#define I_CLEAR (1 << 6)
-#define __I_SYNC 7
-#define I_SYNC (1 << __I_SYNC)
-#define I_REFERENCED (1 << 8)
-#define __I_DIO_WAKEUP 9
-#define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP)
-#define I_LINKABLE (1 << 10)
-#define I_DIRTY_TIME (1 << 11)
-#define I_WB_SWITCH (1 << 13)
-#define I_OVL_INUSE (1 << 14)
-#define I_CREATING (1 << 15)
-#define I_DONTCACHE (1 << 16)
-#define I_SYNC_QUEUED (1 << 17)
-
-#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
-#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
-#define I_DIRTY_ALL (I_DIRTY | I_DIRTY_TIME)
-
extern void __mark_inode_dirty(struct inode *, int);
static inline void mark_inode_dirty(struct inode *inode)
{
@@ -2409,6 +2200,11 @@ static inline void mark_inode_dirty_sync(struct inode *inode)
__mark_inode_dirty(inode, I_DIRTY_SYNC);
}
+static inline int icount_read(const struct inode *inode)
+{
+ return atomic_read(&inode->i_count);
+}
+
/*
* Returns true if the given inode itself only has dirty timestamps (its pages
* may still be dirty) and isn't currently being allocated or freed.
@@ -2420,8 +2216,8 @@ static inline void mark_inode_dirty_sync(struct inode *inode)
*/
static inline bool inode_is_dirtytime_only(struct inode *inode)
{
- return (inode->i_state & (I_DIRTY_TIME | I_NEW |
- I_FREEING | I_WILL_FREE)) == I_DIRTY_TIME;
+ return (inode_state_read_once(inode) &
+ (I_DIRTY_TIME | I_NEW | I_FREEING | I_WILL_FREE)) == I_DIRTY_TIME;
}
extern void inc_nlink(struct inode *inode);
@@ -2450,6 +2246,8 @@ enum file_time_flags {
extern bool atime_needs_update(const struct path *, struct inode *);
extern void touch_atime(const struct path *);
+int inode_update_time(struct inode *inode, int flags);
+
static inline void file_accessed(struct file *file)
{
if (!(file->f_flags & O_NOATIME))
@@ -2457,8 +2255,8 @@ static inline void file_accessed(struct file *file)
}
extern int file_modified(struct file *file);
+int kiocb_modified(struct kiocb *iocb);
-int sync_inode(struct inode *inode, struct writeback_control *wbc);
int sync_inode_metadata(struct inode *inode, int wait);
struct file_system_type {
@@ -2470,7 +2268,9 @@ struct file_system_type {
#define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */
#define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */
#define FS_ALLOW_IDMAP 32 /* FS has been updated to handle vfs idmappings. */
-#define FS_THP_SUPPORT 8192 /* Remove once all fs converted */
+#define FS_MGTIME 64 /* FS uses multigrain timestamps */
+#define FS_LBS 128 /* FS supports LBS */
+#define FS_POWER_FREEZE 256 /* Always freeze on suspend/hibernate */
#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
int (*init_fs_context)(struct fs_context *);
const struct fs_parameter_spec *parameters;
@@ -2488,25 +2288,28 @@ struct file_system_type {
struct lock_class_key i_lock_key;
struct lock_class_key i_mutex_key;
+ struct lock_class_key invalidate_lock_key;
struct lock_class_key i_mutex_dir_key;
};
#define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME)
-extern struct dentry *mount_bdev(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data,
- int (*fill_super)(struct super_block *, void *, int));
-extern struct dentry *mount_single(struct file_system_type *fs_type,
- int flags, void *data,
- int (*fill_super)(struct super_block *, void *, int));
-extern struct dentry *mount_nodev(struct file_system_type *fs_type,
- int flags, void *data,
- int (*fill_super)(struct super_block *, void *, int));
+/**
+ * is_mgtime: is this inode using multigrain timestamps
+ * @inode: inode to test for multigrain timestamps
+ *
+ * Return true if the inode uses multigrain timestamps, false otherwise.
+ */
+static inline bool is_mgtime(const struct inode *inode)
+{
+ return inode->i_opflags & IOP_MGTIME;
+}
+
extern struct dentry *mount_subtree(struct vfsmount *mnt, const char *path);
+void retire_super(struct super_block *sb);
void generic_shutdown_super(struct super_block *sb);
void kill_block_super(struct super_block *sb);
void kill_anon_super(struct super_block *sb);
-void kill_litter_super(struct super_block *sb);
void deactivate_super(struct super_block *sb);
void deactivate_locked_super(struct super_block *sb);
int set_anon_super(struct super_block *s, void *data);
@@ -2520,12 +2323,20 @@ struct super_block *sget(struct file_system_type *type,
int (*test)(struct super_block *,void *),
int (*set)(struct super_block *,void *),
int flags, void *data);
+struct super_block *sget_dev(struct fs_context *fc, dev_t dev);
/* Alas, no aliases. Too much hassle with bringing module.h everywhere */
-#define fops_get(fops) \
- (((fops) && try_module_get((fops)->owner) ? (fops) : NULL))
-#define fops_put(fops) \
- do { if (fops) module_put((fops)->owner); } while(0)
+#define fops_get(fops) ({ \
+ const struct file_operations *_fops = (fops); \
+ (((_fops) && try_module_get((_fops)->owner) ? (_fops) : NULL)); \
+})
+
+#define fops_put(fops) ({ \
+ const struct file_operations *_fops = (fops); \
+ if (_fops) \
+ module_put((_fops)->owner); \
+})
+
/*
* This one is to be used *ONLY* from ->open() instances.
* fops must be non-NULL, pinned down *and* module dependencies
@@ -2540,255 +2351,185 @@ struct super_block *sget(struct file_system_type *type,
extern int register_filesystem(struct file_system_type *);
extern int unregister_filesystem(struct file_system_type *);
-extern struct vfsmount *kern_mount(struct file_system_type *);
-extern void kern_unmount(struct vfsmount *mnt);
-extern int may_umount_tree(struct vfsmount *);
-extern int may_umount(struct vfsmount *);
-extern long do_mount(const char *, const char __user *,
- const char *, unsigned long, void *);
-extern struct vfsmount *collect_mounts(const struct path *);
-extern void drop_collected_mounts(struct vfsmount *);
-extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
- struct vfsmount *);
extern int vfs_statfs(const struct path *, struct kstatfs *);
extern int user_statfs(const char __user *, struct kstatfs *);
extern int fd_statfs(int, struct kstatfs *);
-extern int freeze_super(struct super_block *super);
-extern int thaw_super(struct super_block *super);
-extern bool our_mnt(struct vfsmount *mnt);
extern __printf(2, 3)
int super_setup_bdi_name(struct super_block *sb, char *fmt, ...);
extern int super_setup_bdi(struct super_block *sb);
-extern int current_umask(void);
-
-extern void ihold(struct inode * inode);
-extern void iput(struct inode *);
-extern int generic_update_time(struct inode *, struct timespec64 *, int);
-
-/* /sys/fs */
-extern struct kobject *fs_kobj;
-
-#define MAX_RW_COUNT (INT_MAX & PAGE_MASK)
-
-#ifdef CONFIG_MANDATORY_FILE_LOCKING
-extern int locks_mandatory_locked(struct file *);
-extern int locks_mandatory_area(struct inode *, struct file *, loff_t, loff_t, unsigned char);
-
-/*
- * Candidates for mandatory locking have the setgid bit set
- * but no group execute bit - an otherwise meaningless combination.
- */
-
-static inline int __mandatory_lock(struct inode *ino)
-{
- return (ino->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID;
-}
-
-/*
- * ... and these candidates should be on SB_MANDLOCK mounted fs,
- * otherwise these will be advisory locks
- */
-
-static inline int mandatory_lock(struct inode *ino)
-{
- return IS_MANDLOCK(ino) && __mandatory_lock(ino);
-}
-
-static inline int locks_verify_locked(struct file *file)
-{
- if (mandatory_lock(locks_inode(file)))
- return locks_mandatory_locked(file);
- return 0;
-}
-
-static inline int locks_verify_truncate(struct inode *inode,
- struct file *f,
- loff_t size)
-{
- if (!inode->i_flctx || !mandatory_lock(inode))
- return 0;
-
- if (size < inode->i_size) {
- return locks_mandatory_area(inode, f, size, inode->i_size - 1,
- F_WRLCK);
- } else {
- return locks_mandatory_area(inode, f, inode->i_size, size - 1,
- F_WRLCK);
- }
-}
-
-#else /* !CONFIG_MANDATORY_FILE_LOCKING */
-
-static inline int locks_mandatory_locked(struct file *file)
-{
- return 0;
-}
-
-static inline int locks_mandatory_area(struct inode *inode, struct file *filp,
- loff_t start, loff_t end, unsigned char type)
-{
- return 0;
-}
-
-static inline int __mandatory_lock(struct inode *inode)
-{
- return 0;
-}
-
-static inline int mandatory_lock(struct inode *inode)
-{
- return 0;
-}
-
-static inline int locks_verify_locked(struct file *file)
+static inline void super_set_uuid(struct super_block *sb, const u8 *uuid, unsigned len)
{
- return 0;
-}
-
-static inline int locks_verify_truncate(struct inode *inode, struct file *filp,
- size_t size)
-{
- return 0;
+ if (WARN_ON(len > sizeof(sb->s_uuid)))
+ len = sizeof(sb->s_uuid);
+ sb->s_uuid_len = len;
+ memcpy(&sb->s_uuid, uuid, len);
}
-#endif /* CONFIG_MANDATORY_FILE_LOCKING */
-
-
-#ifdef CONFIG_FILE_LOCKING
-static inline int break_lease(struct inode *inode, unsigned int mode)
+/* set sb sysfs name based on sb->s_bdev */
+static inline void super_set_sysfs_name_bdev(struct super_block *sb)
{
- /*
- * Since this check is lockless, we must ensure that any refcounts
- * taken are done before checking i_flctx->flc_lease. Otherwise, we
- * could end up racing with tasks trying to set a new lease on this
- * file.
- */
- smp_mb();
- if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
- return __break_lease(inode, mode, FL_LEASE);
- return 0;
+ snprintf(sb->s_sysfs_name, sizeof(sb->s_sysfs_name), "%pg", sb->s_bdev);
}
-static inline int break_deleg(struct inode *inode, unsigned int mode)
+/* set sb sysfs name based on sb->s_uuid */
+static inline void super_set_sysfs_name_uuid(struct super_block *sb)
{
- /*
- * Since this check is lockless, we must ensure that any refcounts
- * taken are done before checking i_flctx->flc_lease. Otherwise, we
- * could end up racing with tasks trying to set a new lease on this
- * file.
- */
- smp_mb();
- if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
- return __break_lease(inode, mode, FL_DELEG);
- return 0;
+ WARN_ON(sb->s_uuid_len != sizeof(sb->s_uuid));
+ snprintf(sb->s_sysfs_name, sizeof(sb->s_sysfs_name), "%pU", sb->s_uuid.b);
}
-static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode)
+/* set sb sysfs name based on sb->s_id */
+static inline void super_set_sysfs_name_id(struct super_block *sb)
{
- int ret;
-
- ret = break_deleg(inode, O_WRONLY|O_NONBLOCK);
- if (ret == -EWOULDBLOCK && delegated_inode) {
- *delegated_inode = inode;
- ihold(inode);
- }
- return ret;
+ strscpy(sb->s_sysfs_name, sb->s_id, sizeof(sb->s_sysfs_name));
}
-static inline int break_deleg_wait(struct inode **delegated_inode)
+/* try to use something standard before you use this */
+__printf(2, 3)
+static inline void super_set_sysfs_name_generic(struct super_block *sb, const char *fmt, ...)
{
- int ret;
+ va_list args;
- ret = break_deleg(*delegated_inode, O_WRONLY);
- iput(*delegated_inode);
- *delegated_inode = NULL;
- return ret;
-}
-
-static inline int break_layout(struct inode *inode, bool wait)
-{
- smp_mb();
- if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
- return __break_lease(inode,
- wait ? O_WRONLY : O_WRONLY | O_NONBLOCK,
- FL_LAYOUT);
- return 0;
+ va_start(args, fmt);
+ vsnprintf(sb->s_sysfs_name, sizeof(sb->s_sysfs_name), fmt, args);
+ va_end(args);
}
-#else /* !CONFIG_FILE_LOCKING */
-static inline int break_lease(struct inode *inode, unsigned int mode)
-{
- return 0;
-}
-
-static inline int break_deleg(struct inode *inode, unsigned int mode)
-{
- return 0;
-}
-
-static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode)
-{
- return 0;
-}
-
-static inline int break_deleg_wait(struct inode **delegated_inode)
-{
- BUG();
- return 0;
-}
+extern void ihold(struct inode * inode);
+extern void iput(struct inode *);
+void iput_not_last(struct inode *);
+int inode_update_timestamps(struct inode *inode, int flags);
+int generic_update_time(struct inode *, int);
-static inline int break_layout(struct inode *inode, bool wait)
-{
- return 0;
-}
+/* /sys/fs */
+extern struct kobject *fs_kobj;
-#endif /* CONFIG_FILE_LOCKING */
+#define MAX_RW_COUNT (INT_MAX & PAGE_MASK)
/* fs/open.c */
struct audit_names;
struct filename {
const char *name; /* pointer to actual string */
const __user char *uptr; /* original userland pointer */
- int refcnt;
+ atomic_t refcnt;
struct audit_names *aname;
const char iname[];
};
static_assert(offsetof(struct filename, iname) % sizeof(long) == 0);
-static inline struct user_namespace *file_mnt_user_ns(struct file *file)
+static inline struct mnt_idmap *file_mnt_idmap(const struct file *file)
{
- return mnt_user_ns(file->f_path.mnt);
+ return mnt_idmap(file->f_path.mnt);
}
-extern long vfs_truncate(const struct path *, loff_t);
-int do_truncate(struct user_namespace *, struct dentry *, loff_t start,
+
+/**
+ * is_idmapped_mnt - check whether a mount is mapped
+ * @mnt: the mount to check
+ *
+ * If @mnt has an non @nop_mnt_idmap attached to it then @mnt is mapped.
+ *
+ * Return: true if mount is mapped, false if not.
+ */
+static inline bool is_idmapped_mnt(const struct vfsmount *mnt)
+{
+ return mnt_idmap(mnt) != &nop_mnt_idmap;
+}
+
+int vfs_truncate(const struct path *, loff_t);
+int do_truncate(struct mnt_idmap *, struct dentry *, loff_t start,
unsigned int time_attrs, struct file *filp);
extern int vfs_fallocate(struct file *file, int mode, loff_t offset,
loff_t len);
-extern long do_sys_open(int dfd, const char __user *filename, int flags,
- umode_t mode);
+int do_sys_open(int dfd, const char __user *filename, int flags,
+ umode_t mode);
extern struct file *file_open_name(struct filename *, int, umode_t);
extern struct file *filp_open(const char *, int, umode_t);
-extern struct file *file_open_root(struct dentry *, struct vfsmount *,
+extern struct file *file_open_root(const struct path *,
const char *, int, umode_t);
-extern struct file * dentry_open(const struct path *, int, const struct cred *);
-extern struct file * open_with_fake_path(const struct path *, int,
- struct inode*, const struct cred *);
+static inline struct file *file_open_root_mnt(struct vfsmount *mnt,
+ const char *name, int flags, umode_t mode)
+{
+ return file_open_root(&(struct path){.mnt = mnt, .dentry = mnt->mnt_root},
+ name, flags, mode);
+}
+struct file *dentry_open(const struct path *path, int flags,
+ const struct cred *creds);
+struct file *dentry_open_nonotify(const struct path *path, int flags,
+ const struct cred *cred);
+struct file *dentry_create(const struct path *path, int flags, umode_t mode,
+ const struct cred *cred);
+const struct path *backing_file_user_path(const struct file *f);
+
+/*
+ * When mmapping a file on a stackable filesystem (e.g., overlayfs), the file
+ * stored in ->vm_file is a backing file whose f_inode is on the underlying
+ * filesystem. When the mapped file path and inode number are displayed to
+ * user (e.g. via /proc/<pid>/maps), these helpers should be used to get the
+ * path and inode number to display to the user, which is the path of the fd
+ * that user has requested to map and the inode number that would be returned
+ * by fstat() on that same fd.
+ */
+/* Get the path to display in /proc/<pid>/maps */
+static inline const struct path *file_user_path(const struct file *f)
+{
+ if (unlikely(f->f_mode & FMODE_BACKING))
+ return backing_file_user_path(f);
+ return &f->f_path;
+}
+/* Get the inode whose inode number to display in /proc/<pid>/maps */
+static inline const struct inode *file_user_inode(const struct file *f)
+{
+ if (unlikely(f->f_mode & FMODE_BACKING))
+ return d_inode(backing_file_user_path(f)->dentry);
+ return file_inode(f);
+}
+
static inline struct file *file_clone_open(struct file *file)
{
return dentry_open(&file->f_path, file->f_flags, file->f_cred);
}
extern int filp_close(struct file *, fl_owner_t id);
-extern struct filename *getname_flags(const char __user *, int, int *);
-extern struct filename *getname(const char __user *);
+extern struct filename *getname_flags(const char __user *, int);
+extern struct filename *getname_uflags(const char __user *, int);
+static inline struct filename *getname(const char __user *name)
+{
+ return getname_flags(name, 0);
+}
extern struct filename *getname_kernel(const char *);
+extern struct filename *__getname_maybe_null(const char __user *);
+static inline struct filename *getname_maybe_null(const char __user *name, int flags)
+{
+ if (!(flags & AT_EMPTY_PATH))
+ return getname(name);
+
+ if (!name)
+ return NULL;
+ return __getname_maybe_null(name);
+}
extern void putname(struct filename *name);
+DEFINE_FREE(putname, struct filename *, if (!IS_ERR_OR_NULL(_T)) putname(_T))
+
+static inline struct filename *refname(struct filename *name)
+{
+ atomic_inc(&name->refcnt);
+ return name;
+}
extern int finish_open(struct file *file, struct dentry *dentry,
int (*open)(struct inode *, struct file *));
extern int finish_no_open(struct file *file, struct dentry *dentry);
+/* Helper for the simple case when original dentry is used */
+static inline int finish_open_simple(struct file *file, int error)
+{
+ if (error)
+ return error;
+
+ return finish_open(file, file->f_path.dentry, NULL);
+}
+
/* fs/dcache.c */
extern void __init vfs_caches_init_early(void);
extern void __init vfs_caches_init(void);
@@ -2798,12 +2539,6 @@ extern struct kmem_cache *names_cachep;
#define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL)
#define __putname(name) kmem_cache_free(names_cachep, (void *)(name))
-extern struct super_block *blockdev_superblock;
-static inline bool sb_is_blkdev_sb(struct super_block *sb)
-{
- return IS_ENABLED(CONFIG_BLOCK) && sb == blockdev_superblock;
-}
-
void emergency_thaw_all(void);
extern int sync_filesystem(struct super_block *);
extern const struct file_operations def_blk_fops;
@@ -2844,126 +2579,19 @@ extern void init_special_inode(struct inode *, umode_t, dev_t);
extern void make_bad_inode(struct inode *);
extern bool is_bad_inode(struct inode *);
-unsigned long invalidate_mapping_pages(struct address_space *mapping,
- pgoff_t start, pgoff_t end);
-
-void invalidate_mapping_pagevec(struct address_space *mapping,
- pgoff_t start, pgoff_t end,
- unsigned long *nr_pagevec);
-
-static inline void invalidate_remote_inode(struct inode *inode)
-{
- if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
- S_ISLNK(inode->i_mode))
- invalidate_mapping_pages(inode->i_mapping, 0, -1);
-}
-extern int invalidate_inode_pages2(struct address_space *mapping);
-extern int invalidate_inode_pages2_range(struct address_space *mapping,
- pgoff_t start, pgoff_t end);
-extern int write_inode_now(struct inode *, int);
-extern int filemap_fdatawrite(struct address_space *);
-extern int filemap_flush(struct address_space *);
-extern int filemap_fdatawait_keep_errors(struct address_space *mapping);
-extern int filemap_fdatawait_range(struct address_space *, loff_t lstart,
- loff_t lend);
-extern int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
- loff_t start_byte, loff_t end_byte);
-
-static inline int filemap_fdatawait(struct address_space *mapping)
-{
- return filemap_fdatawait_range(mapping, 0, LLONG_MAX);
-}
-
-extern bool filemap_range_has_page(struct address_space *, loff_t lstart,
- loff_t lend);
-extern bool filemap_range_needs_writeback(struct address_space *,
- loff_t lstart, loff_t lend);
-extern int filemap_write_and_wait_range(struct address_space *mapping,
- loff_t lstart, loff_t lend);
-extern int __filemap_fdatawrite_range(struct address_space *mapping,
- loff_t start, loff_t end, int sync_mode);
-extern int filemap_fdatawrite_range(struct address_space *mapping,
- loff_t start, loff_t end);
-extern int filemap_check_errors(struct address_space *mapping);
-extern void __filemap_set_wb_err(struct address_space *mapping, int err);
-
-static inline int filemap_write_and_wait(struct address_space *mapping)
-{
- return filemap_write_and_wait_range(mapping, 0, LLONG_MAX);
-}
-
extern int __must_check file_fdatawait_range(struct file *file, loff_t lstart,
loff_t lend);
extern int __must_check file_check_and_advance_wb_err(struct file *file);
extern int __must_check file_write_and_wait_range(struct file *file,
loff_t start, loff_t end);
+int filemap_flush_range(struct address_space *mapping, loff_t start,
+ loff_t end);
static inline int file_write_and_wait(struct file *file)
{
return file_write_and_wait_range(file, 0, LLONG_MAX);
}
-/**
- * filemap_set_wb_err - set a writeback error on an address_space
- * @mapping: mapping in which to set writeback error
- * @err: error to be set in mapping
- *
- * When writeback fails in some way, we must record that error so that
- * userspace can be informed when fsync and the like are called. We endeavor
- * to report errors on any file that was open at the time of the error. Some
- * internal callers also need to know when writeback errors have occurred.
- *
- * When a writeback error occurs, most filesystems will want to call
- * filemap_set_wb_err to record the error in the mapping so that it will be
- * automatically reported whenever fsync is called on the file.
- */
-static inline void filemap_set_wb_err(struct address_space *mapping, int err)
-{
- /* Fastpath for common case of no error */
- if (unlikely(err))
- __filemap_set_wb_err(mapping, err);
-}
-
-/**
- * filemap_check_wb_err - has an error occurred since the mark was sampled?
- * @mapping: mapping to check for writeback errors
- * @since: previously-sampled errseq_t
- *
- * Grab the errseq_t value from the mapping, and see if it has changed "since"
- * the given value was sampled.
- *
- * If it has then report the latest error set, otherwise return 0.
- */
-static inline int filemap_check_wb_err(struct address_space *mapping,
- errseq_t since)
-{
- return errseq_check(&mapping->wb_err, since);
-}
-
-/**
- * filemap_sample_wb_err - sample the current errseq_t to test for later errors
- * @mapping: mapping to be sampled
- *
- * Writeback errors are always reported relative to a particular sample point
- * in the past. This function provides those sample points.
- */
-static inline errseq_t filemap_sample_wb_err(struct address_space *mapping)
-{
- return errseq_sample(&mapping->wb_err);
-}
-
-/**
- * file_sample_sb_err - sample the current errseq_t to test for later errors
- * @file: file pointer to be sampled
- *
- * Grab the most current superblock-level errseq_t value for the given
- * struct file.
- */
-static inline errseq_t file_sample_sb_err(struct file *file)
-{
- return errseq_sample(&file->f_path.dentry->d_sb->s_wb_err);
-}
-
extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
int datasync);
extern int vfs_fsync(struct file *file, int datasync);
@@ -2971,6 +2599,12 @@ extern int vfs_fsync(struct file *file, int datasync);
extern int sync_file_range(struct file *file, loff_t offset, loff_t nbytes,
unsigned int flags);
+static inline bool iocb_is_dsync(const struct kiocb *iocb)
+{
+ return (iocb->ki_flags & IOCB_DSYNC) ||
+ IS_SYNC(iocb->ki_filp->f_mapping->host);
+}
+
/*
* Sync the bytes written if this was a synchronous write. Expect ki_pos
* to already be updated for the write, and will return either the amount
@@ -2978,12 +2612,17 @@ extern int sync_file_range(struct file *file, loff_t offset, loff_t nbytes,
*/
static inline ssize_t generic_write_sync(struct kiocb *iocb, ssize_t count)
{
- if (iocb->ki_flags & IOCB_DSYNC) {
+ if (iocb_is_dsync(iocb)) {
int ret = vfs_fsync_range(iocb->ki_filp,
iocb->ki_pos - count, iocb->ki_pos - 1,
(iocb->ki_flags & IOCB_SYNC) ? 0 : 1);
if (ret)
return ret;
+ } else if (iocb->ki_flags & IOCB_DONTCACHE) {
+ struct address_space *mapping = iocb->ki_filp->f_mapping;
+
+ filemap_flush_range(mapping, iocb->ki_pos - count,
+ iocb->ki_pos - 1);
}
return count;
@@ -3001,21 +2640,21 @@ static inline int bmap(struct inode *inode, sector_t *block)
}
#endif
-int notify_change(struct user_namespace *, struct dentry *,
- struct iattr *, struct inode **);
-int inode_permission(struct user_namespace *, struct inode *, int);
-int generic_permission(struct user_namespace *, struct inode *, int);
+int notify_change(struct mnt_idmap *, struct dentry *,
+ struct iattr *, struct delegated_inode *);
+int inode_permission(struct mnt_idmap *, struct inode *, int);
+int generic_permission(struct mnt_idmap *, struct inode *, int);
static inline int file_permission(struct file *file, int mask)
{
- return inode_permission(file_mnt_user_ns(file),
+ return inode_permission(file_mnt_idmap(file),
file_inode(file), mask);
}
static inline int path_permission(const struct path *path, int mask)
{
- return inode_permission(mnt_user_ns(path->mnt),
+ return inode_permission(mnt_idmap(path->mnt),
d_inode(path->dentry), mask);
}
-int __check_sticky(struct user_namespace *mnt_userns, struct inode *dir,
+int __check_sticky(struct mnt_idmap *idmap, struct inode *dir,
struct inode *inode);
static inline bool execute_ok(struct inode *inode)
@@ -3028,6 +2667,13 @@ static inline bool inode_wrong_type(const struct inode *inode, umode_t mode)
return (inode->i_mode ^ mode) & S_IFMT;
}
+/**
+ * file_start_write - get write access to a superblock for regular file io
+ * @file: the file we want to write to
+ *
+ * This is a variant of sb_start_write() which is a noop on non-regular file.
+ * Should be matched with a call to file_end_write().
+ */
static inline void file_start_write(struct file *file)
{
if (!S_ISREG(file_inode(file)->i_mode))
@@ -3042,23 +2688,70 @@ static inline bool file_start_write_trylock(struct file *file)
return sb_start_write_trylock(file_inode(file)->i_sb);
}
+/**
+ * file_end_write - drop write access to a superblock of a regular file
+ * @file: the file we wrote to
+ *
+ * Should be matched with a call to file_start_write().
+ */
static inline void file_end_write(struct file *file)
{
if (!S_ISREG(file_inode(file)->i_mode))
return;
- __sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE);
+ sb_end_write(file_inode(file)->i_sb);
+}
+
+/**
+ * kiocb_start_write - get write access to a superblock for async file io
+ * @iocb: the io context we want to submit the write with
+ *
+ * This is a variant of sb_start_write() for async io submission.
+ * Should be matched with a call to kiocb_end_write().
+ */
+static inline void kiocb_start_write(struct kiocb *iocb)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+
+ sb_start_write(inode->i_sb);
+ /*
+ * Fool lockdep by telling it the lock got released so that it
+ * doesn't complain about the held lock when we return to userspace.
+ */
+ __sb_writers_release(inode->i_sb, SB_FREEZE_WRITE);
+}
+
+/**
+ * kiocb_end_write - drop write access to a superblock after async file io
+ * @iocb: the io context we sumbitted the write with
+ *
+ * Should be matched with a call to kiocb_start_write().
+ */
+static inline void kiocb_end_write(struct kiocb *iocb)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+
+ /*
+ * Tell lockdep we inherited freeze protection from submission thread.
+ */
+ __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
+ sb_end_write(inode->i_sb);
}
/*
+ * This is used for regular files where some users -- especially the
+ * currently executed binary in a process, previously handled via
+ * VM_DENYWRITE -- cannot handle concurrent write (and maybe mmap
+ * read-write shared) accesses.
+ *
* get_write_access() gets write permission for a file.
* put_write_access() releases this write permission.
- * This is used for regular files.
- * We cannot support write (and maybe mmap read-write shared) accesses and
- * MAP_DENYWRITE mmappings simultaneously. The i_writecount field of an inode
- * can have the following values:
- * 0: no writers, no VM_DENYWRITE mappings
- * < 0: (-i_writecount) vm_area_structs with VM_DENYWRITE set exist
- * > 0: (i_writecount) users are writing to the file.
+ * deny_write_access() denies write access to a file.
+ * allow_write_access() re-enables write access to a file.
+ *
+ * The i_writecount field of an inode can have the following values:
+ * 0: no write access, no denied write access
+ * < 0: (-i_writecount) users that denied write access to the file.
+ * > 0: (i_writecount) users that have write access to the file.
*
* Normally we operate on that counter with atomic_{inc,dec} and it's safe
* except for the cases where we don't hold i_writecount yet. Then we need to
@@ -3083,6 +2776,34 @@ static inline void allow_write_access(struct file *file)
if (file)
atomic_inc(&file_inode(file)->i_writecount);
}
+
+/*
+ * Do not prevent write to executable file when watched by pre-content events.
+ *
+ * Note that FMODE_FSNOTIFY_HSM mode is set depending on pre-content watches at
+ * the time of file open and remains constant for entire lifetime of the file,
+ * so if pre-content watches are added post execution or removed before the end
+ * of the execution, it will not cause i_writecount reference leak.
+ */
+static inline int exe_file_deny_write_access(struct file *exe_file)
+{
+ if (unlikely(FMODE_FSNOTIFY_HSM(exe_file->f_mode)))
+ return 0;
+ return deny_write_access(exe_file);
+}
+static inline void exe_file_allow_write_access(struct file *exe_file)
+{
+ if (unlikely(!exe_file || FMODE_FSNOTIFY_HSM(exe_file->f_mode)))
+ return;
+ allow_write_access(exe_file);
+}
+
+static inline void file_set_fsnotify_mode(struct file *file, fmode_t mode)
+{
+ file->f_mode &= ~FMODE_FSNOTIFY_MASK;
+ file->f_mode |= mode;
+}
+
static inline bool inode_is_open_for_write(const struct inode *inode)
{
return atomic_read(&inode->i_writecount) > 0;
@@ -3091,8 +2812,7 @@ static inline bool inode_is_open_for_write(const struct inode *inode)
#if defined(CONFIG_IMA) || defined(CONFIG_FILE_LOCKING)
static inline void i_readcount_dec(struct inode *inode)
{
- BUG_ON(!atomic_read(&inode->i_readcount));
- atomic_dec(&inode->i_readcount);
+ BUG_ON(atomic_dec_return(&inode->i_readcount) < 0);
}
static inline void i_readcount_inc(struct inode *inode)
{
@@ -3119,9 +2839,37 @@ extern struct file * open_exec(const char *);
/* fs/dcache.c -- generic fs support functions */
extern bool is_subdir(struct dentry *, struct dentry *);
extern bool path_is_under(const struct path *, const struct path *);
+u64 vfsmount_to_propagation_flags(struct vfsmount *mnt);
extern char *file_path(struct file *, char *, int);
+/**
+ * is_dot_dotdot - returns true only if @name is "." or ".."
+ * @name: file name to check
+ * @len: length of file name, in bytes
+ */
+static inline bool is_dot_dotdot(const char *name, size_t len)
+{
+ return len && unlikely(name[0] == '.') &&
+ (len == 1 || (len == 2 && name[1] == '.'));
+}
+
+/**
+ * name_contains_dotdot - check if a file name contains ".." path components
+ * @name: File path string to check
+ * Search for ".." surrounded by either '/' or start/end of string.
+ */
+static inline bool name_contains_dotdot(const char *name)
+{
+ size_t name_len;
+
+ name_len = strlen(name);
+ return strcmp(name, "..") == 0 ||
+ strncmp(name, "../", 3) == 0 ||
+ strstr(name, "/../") != NULL ||
+ (name_len >= 3 && strcmp(name + name_len - 3, "/..") == 0);
+}
+
#include <linux/err.h>
/* needed for stackable file system support */
@@ -3129,14 +2877,19 @@ extern loff_t default_llseek(struct file *file, loff_t offset, int whence);
extern loff_t vfs_llseek(struct file *file, loff_t offset, int whence);
-extern int inode_init_always(struct super_block *, struct inode *);
+extern int inode_init_always_gfp(struct super_block *, struct inode *, gfp_t);
+static inline int inode_init_always(struct super_block *sb, struct inode *inode)
+{
+ return inode_init_always_gfp(sb, inode, GFP_NOFS);
+}
+
extern void inode_init_once(struct inode *);
extern void address_space_init_once(struct address_space *mapping);
extern struct inode * igrab(struct inode *);
extern ino_t iunique(struct super_block *, ino_t);
extern int inode_needs_sync(struct inode *inode);
-extern int generic_delete_inode(struct inode *inode);
-static inline int generic_drop_inode(struct inode *inode)
+extern int inode_just_drop(struct inode *inode);
+static inline int inode_generic_drop(struct inode *inode)
{
return !inode->i_nlink || inode_unhashed(inode);
}
@@ -3144,7 +2897,7 @@ extern void d_mark_dontcache(struct inode *inode);
extern struct inode *ilookup5_nowait(struct super_block *sb,
unsigned long hashval, int (*test)(struct inode *, void *),
- void *data);
+ void *data, bool *isnew);
extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
int (*test)(struct inode *, void *), void *data);
extern struct inode *ilookup(struct super_block *sb, unsigned long ino);
@@ -3153,7 +2906,12 @@ extern struct inode *inode_insert5(struct inode *inode, unsigned long hashval,
int (*test)(struct inode *, void *),
int (*set)(struct inode *, void *),
void *data);
-extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *);
+struct inode *iget5_locked(struct super_block *, unsigned long,
+ int (*test)(struct inode *, void *),
+ int (*set)(struct inode *, void *), void *);
+struct inode *iget5_locked_rcu(struct super_block *, unsigned long,
+ int (*test)(struct inode *, void *),
+ int (*set)(struct inode *, void *), void *);
extern struct inode * iget_locked(struct super_block *, unsigned long);
extern struct inode *find_inode_nowait(struct super_block *,
unsigned long,
@@ -3174,9 +2932,10 @@ extern void unlock_new_inode(struct inode *);
extern void discard_new_inode(struct inode *);
extern unsigned int get_next_ino(void);
extern void evict_inodes(struct super_block *sb);
+void dump_mapping(const struct address_space *);
/*
- * Userspace may rely on the the inode number being non-zero. For example, glibc
+ * Userspace may rely on the inode number being non-zero. For example, glibc
* simply ignores files with zero i_ino in unlink() and other places.
*
* As an additional complication, if userspace was compiled with
@@ -3190,15 +2949,32 @@ static inline bool is_zero_ino(ino_t ino)
return (u32)ino == 0;
}
-extern void __iget(struct inode * inode);
+static inline void __iget(struct inode *inode)
+{
+ lockdep_assert_held(&inode->i_lock);
+ atomic_inc(&inode->i_count);
+}
+
extern void iget_failed(struct inode *);
extern void clear_inode(struct inode *);
extern void __destroy_inode(struct inode *);
-extern struct inode *new_inode_pseudo(struct super_block *sb);
+struct inode *alloc_inode(struct super_block *sb);
+static inline struct inode *new_inode_pseudo(struct super_block *sb)
+{
+ return alloc_inode(sb);
+}
extern struct inode *new_inode(struct super_block *sb);
extern void free_inode_nonrcu(struct inode *inode);
-extern int should_remove_suid(struct dentry *);
+extern int setattr_should_drop_suidgid(struct mnt_idmap *, struct inode *);
extern int file_remove_privs(struct file *);
+int setattr_should_drop_sgid(struct mnt_idmap *idmap,
+ const struct inode *inode);
+
+/*
+ * This must be used for allocating filesystems specific inodes to set
+ * up the inode reclaim context correctly.
+ */
+#define alloc_inode_sb(_sb, _cache, _gfp) kmem_cache_alloc_lru(_cache, &_sb->s_inode_lru, _gfp)
extern void __insert_inode_hash(struct inode *, unsigned long hashval);
static inline void insert_inode_hash(struct inode *inode)
@@ -3214,13 +2990,14 @@ static inline void remove_inode_hash(struct inode *inode)
}
extern void inode_sb_list_add(struct inode *inode);
+extern void inode_lru_list_add(struct inode *inode);
-extern int sb_set_blocksize(struct super_block *, int);
-extern int sb_min_blocksize(struct super_block *, int);
-
-extern int generic_file_mmap(struct file *, struct vm_area_struct *);
-extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
+int generic_file_mmap(struct file *, struct vm_area_struct *);
+int generic_file_mmap_prepare(struct vm_area_desc *desc);
+int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
+int generic_file_readonly_mmap_prepare(struct vm_area_desc *desc);
extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *);
+int generic_write_checks_count(struct kiocb *iocb, loff_t *count);
extern int generic_write_check_limits(struct file *file, loff_t pos,
loff_t *count);
extern int generic_file_rw_checks(struct file *file_in, struct file *file_out);
@@ -3230,7 +3007,9 @@ extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *);
extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *);
-extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t);
+ssize_t generic_perform_write(struct kiocb *, struct iov_iter *);
+ssize_t direct_write_fallback(struct kiocb *iocb, struct iov_iter *iter,
+ ssize_t direct_written, ssize_t buffered_written);
ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos,
rwf_t flags);
@@ -3241,36 +3020,31 @@ ssize_t vfs_iocb_iter_read(struct file *file, struct kiocb *iocb,
ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb,
struct iov_iter *iter);
-/* fs/block_dev.c */
-extern ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to);
-extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from);
-extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
- int datasync);
-extern void block_sync_page(struct page *page);
-
/* fs/splice.c */
-extern ssize_t generic_file_splice_read(struct file *, loff_t *,
- struct pipe_inode_info *, size_t, unsigned int);
+ssize_t filemap_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe,
+ size_t len, unsigned int flags);
+ssize_t copy_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe,
+ size_t len, unsigned int flags);
extern ssize_t iter_file_splice_write(struct pipe_inode_info *,
struct file *, loff_t *, size_t, unsigned int);
-extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
- struct file *out, loff_t *, size_t len, unsigned int flags);
-extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
- loff_t *opos, size_t len, unsigned int flags);
extern void
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
extern loff_t noop_llseek(struct file *file, loff_t offset, int whence);
-extern loff_t no_llseek(struct file *file, loff_t offset, int whence);
extern loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize);
extern loff_t generic_file_llseek(struct file *file, loff_t offset, int whence);
extern loff_t generic_file_llseek_size(struct file *file, loff_t offset,
int whence, loff_t maxsize, loff_t eof);
+loff_t generic_llseek_cookie(struct file *file, loff_t offset, int whence,
+ u64 *cookie);
extern loff_t fixed_size_llseek(struct file *file, loff_t offset,
int whence, loff_t size);
extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t);
extern loff_t no_seek_end_llseek(struct file *, loff_t, int);
+int rw_verify_area(int, struct file *, const loff_t *, size_t);
extern int generic_file_open(struct inode * inode, struct file * filp);
extern int nonseekable_open(struct inode * inode, struct file * filp);
extern int stream_open(struct inode * inode, struct file * filp);
@@ -3290,7 +3064,7 @@ enum {
ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, struct iov_iter *iter,
get_block_t get_block,
- dio_iodone_t end_io, dio_submit_t submit_io,
+ dio_iodone_t end_io,
int flags);
static inline ssize_t blockdev_direct_IO(struct kiocb *iocb,
@@ -3299,11 +3073,13 @@ static inline ssize_t blockdev_direct_IO(struct kiocb *iocb,
get_block_t get_block)
{
return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
- get_block, NULL, NULL, DIO_LOCKING | DIO_SKIP_HOLES);
+ get_block, NULL, DIO_LOCKING | DIO_SKIP_HOLES);
}
#endif
+bool inode_dio_finished(const struct inode *inode);
void inode_dio_wait(struct inode *inode);
+void inode_dio_wait_interruptible(struct inode *inode);
/**
* inode_dio_begin - signal start of a direct I/O requests
@@ -3327,14 +3103,9 @@ static inline void inode_dio_begin(struct inode *inode)
static inline void inode_dio_end(struct inode *inode)
{
if (atomic_dec_and_test(&inode->i_dio_count))
- wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
+ wake_up_var(&inode->i_dio_count);
}
-/*
- * Warn about a page cache invalidation failure diring a direct I/O write.
- */
-void dio_warn_stale_pagecache(struct file *filp);
-
extern void inode_set_flags(struct inode *inode, unsigned int flags,
unsigned int mask);
@@ -3342,17 +3113,23 @@ extern const struct file_operations generic_ro_fops;
#define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m))
-extern int readlink_copy(char __user *, int, const char *);
+extern int readlink_copy(char __user *, int, const char *, int);
extern int page_readlink(struct dentry *, char __user *, int);
+extern const char *page_get_link_raw(struct dentry *, struct inode *,
+ struct delayed_call *);
extern const char *page_get_link(struct dentry *, struct inode *,
struct delayed_call *);
extern void page_put_link(void *);
-extern int __page_symlink(struct inode *inode, const char *symname, int len,
- int nofs);
extern int page_symlink(struct inode *inode, const char *symname, int len);
extern const struct inode_operations page_symlink_inode_operations;
extern void kfree_link(void *);
-void generic_fillattr(struct user_namespace *, struct inode *, struct kstat *);
+void fill_mg_cmtime(struct kstat *stat, u32 request_mask, struct inode *inode);
+void generic_fillattr(struct mnt_idmap *, u32, struct inode *, struct kstat *);
+void generic_fill_statx_attr(struct inode *inode, struct kstat *stat);
+void generic_fill_statx_atomic_writes(struct kstat *stat,
+ unsigned int unit_min,
+ unsigned int unit_max,
+ unsigned int unit_max_opt);
extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int);
extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int);
void __inode_add_bytes(struct inode *inode, loff_t bytes);
@@ -3390,49 +3167,57 @@ extern int vfs_readlink(struct dentry *, char __user *, int);
extern struct file_system_type *get_filesystem(struct file_system_type *fs);
extern void put_filesystem(struct file_system_type *fs);
extern struct file_system_type *get_fs_type(const char *name);
-extern struct super_block *get_super(struct block_device *);
-extern struct super_block *get_active_super(struct block_device *bdev);
extern void drop_super(struct super_block *sb);
extern void drop_super_exclusive(struct super_block *sb);
-extern void iterate_supers(void (*)(struct super_block *, void *), void *);
+extern void iterate_supers(void (*f)(struct super_block *, void *), void *arg);
extern void iterate_supers_type(struct file_system_type *,
void (*)(struct super_block *, void *), void *);
+void filesystems_freeze(bool freeze_all);
+void filesystems_thaw(void);
+
+void end_dirop(struct dentry *de);
extern int dcache_dir_open(struct inode *, struct file *);
extern int dcache_dir_close(struct inode *, struct file *);
extern loff_t dcache_dir_lseek(struct file *, loff_t, int);
extern int dcache_readdir(struct file *, struct dir_context *);
-extern int simple_setattr(struct user_namespace *, struct dentry *,
+extern int simple_setattr(struct mnt_idmap *, struct dentry *,
struct iattr *);
-extern int simple_getattr(struct user_namespace *, const struct path *,
+extern int simple_getattr(struct mnt_idmap *, const struct path *,
struct kstat *, u32, unsigned int);
extern int simple_statfs(struct dentry *, struct kstatfs *);
extern int simple_open(struct inode *inode, struct file *file);
extern int simple_link(struct dentry *, struct inode *, struct dentry *);
extern int simple_unlink(struct inode *, struct dentry *);
extern int simple_rmdir(struct inode *, struct dentry *);
-extern int simple_rename(struct user_namespace *, struct inode *,
+extern void __simple_unlink(struct inode *, struct dentry *);
+extern void __simple_rmdir(struct inode *, struct dentry *);
+void simple_rename_timestamp(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry);
+extern int simple_rename_exchange(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry);
+extern int simple_rename(struct mnt_idmap *, struct inode *,
struct dentry *, struct inode *, struct dentry *,
unsigned int);
extern void simple_recursive_removal(struct dentry *,
void (*callback)(struct dentry *));
+extern void simple_remove_by_name(struct dentry *, const char *,
+ void (*callback)(struct dentry *));
+extern void locked_recursive_removal(struct dentry *,
+ void (*callback)(struct dentry *));
extern int noop_fsync(struct file *, loff_t, loff_t, int);
-extern int noop_set_page_dirty(struct page *page);
-extern void noop_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length);
extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
extern int simple_empty(struct dentry *);
-extern int simple_readpage(struct file *file, struct page *page);
-extern int simple_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata);
-extern int simple_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata);
+extern int simple_write_begin(const struct kiocb *iocb,
+ struct address_space *mapping,
+ loff_t pos, unsigned len,
+ struct folio **foliop, void **fsdata);
+extern const struct address_space_operations ram_aops;
extern int always_delete_dentry(const struct dentry *);
extern struct inode *alloc_anon_inode(struct super_block *);
-extern int simple_nosetlease(struct file *, long, struct file_lock **, void **);
-extern const struct dentry_operations simple_dentry_operations;
+struct inode *anon_inode_make_secure_inode(struct super_block *sb, const char *name,
+ const struct inode *context_inode);
+extern int simple_nosetlease(struct file *, int, struct file_lease **, void **);
extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags);
extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *);
@@ -3446,41 +3231,110 @@ extern int simple_fill_super(struct super_block *, unsigned long,
const struct tree_descr *);
extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count);
extern void simple_release_fs(struct vfsmount **mount, int *count);
+struct dentry *simple_start_creating(struct dentry *, const char *);
+void simple_done_creating(struct dentry *);
extern ssize_t simple_read_from_buffer(void __user *to, size_t count,
loff_t *ppos, const void *from, size_t available);
extern ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
const void __user *from, size_t count);
+struct offset_ctx {
+ struct maple_tree mt;
+ unsigned long next_offset;
+};
+
+void simple_offset_init(struct offset_ctx *octx);
+int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry);
+void simple_offset_remove(struct offset_ctx *octx, struct dentry *dentry);
+int simple_offset_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry);
+int simple_offset_rename_exchange(struct inode *old_dir,
+ struct dentry *old_dentry,
+ struct inode *new_dir,
+ struct dentry *new_dentry);
+void simple_offset_destroy(struct offset_ctx *octx);
+
+extern const struct file_operations simple_offset_dir_operations;
+
extern int __generic_file_fsync(struct file *, loff_t, loff_t, int);
extern int generic_file_fsync(struct file *, loff_t, loff_t, int);
extern int generic_check_addressable(unsigned, u64);
-extern void generic_set_encrypted_ci_d_ops(struct dentry *dentry);
+extern void generic_set_sb_d_ops(struct super_block *sb);
+extern int generic_ci_match(const struct inode *parent,
+ const struct qstr *name,
+ const struct qstr *folded_name,
+ const u8 *de_name, u32 de_name_len);
+
+#if IS_ENABLED(CONFIG_UNICODE)
+int generic_ci_d_hash(const struct dentry *dentry, struct qstr *str);
+int generic_ci_d_compare(const struct dentry *dentry, unsigned int len,
+ const char *str, const struct qstr *name);
+
+/**
+ * generic_ci_validate_strict_name - Check if a given name is suitable
+ * for a directory
+ *
+ * This functions checks if the proposed filename is valid for the
+ * parent directory. That means that only valid UTF-8 filenames will be
+ * accepted for casefold directories from filesystems created with the
+ * strict encoding flag. That also means that any name will be
+ * accepted for directories that doesn't have casefold enabled, or
+ * aren't being strict with the encoding.
+ *
+ * @dir: inode of the directory where the new file will be created
+ * @name: name of the new file
+ *
+ * Return:
+ * * True: if the filename is suitable for this directory. It can be
+ * true if a given name is not suitable for a strict encoding
+ * directory, but the directory being used isn't strict
+ * * False if the filename isn't suitable for this directory. This only
+ * happens when a directory is casefolded and the filesystem is strict
+ * about its encoding.
+ */
+static inline bool generic_ci_validate_strict_name(struct inode *dir,
+ const struct qstr *name)
+{
+ if (!IS_CASEFOLDED(dir) || !sb_has_strict_encoding(dir->i_sb))
+ return true;
+
+ /*
+ * A casefold dir must have a encoding set, unless the filesystem
+ * is corrupted
+ */
+ if (WARN_ON_ONCE(!dir->i_sb->s_encoding))
+ return true;
-#ifdef CONFIG_MIGRATION
-extern int buffer_migrate_page(struct address_space *,
- struct page *, struct page *,
- enum migrate_mode);
-extern int buffer_migrate_page_norefs(struct address_space *,
- struct page *, struct page *,
- enum migrate_mode);
+ return !utf8_validate(dir->i_sb->s_encoding, name);
+}
#else
-#define buffer_migrate_page NULL
-#define buffer_migrate_page_norefs NULL
+static inline bool generic_ci_validate_strict_name(struct inode *dir,
+ const struct qstr *name)
+{
+ return true;
+}
#endif
-int setattr_prepare(struct user_namespace *, struct dentry *, struct iattr *);
+int may_setattr(struct mnt_idmap *idmap, struct inode *inode,
+ unsigned int ia_valid);
+int setattr_prepare(struct mnt_idmap *, struct dentry *, struct iattr *);
extern int inode_newsize_ok(const struct inode *, loff_t offset);
-void setattr_copy(struct user_namespace *, struct inode *inode,
+void setattr_copy(struct mnt_idmap *, struct inode *inode,
const struct iattr *attr);
extern int file_update_time(struct file *file);
+static inline bool file_is_dax(const struct file *file)
+{
+ return file && IS_DAX(file->f_mapping->host);
+}
+
static inline bool vma_is_dax(const struct vm_area_struct *vma)
{
- return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
+ return file_is_dax(vma->vm_file);
}
static inline bool vma_is_fsdax(struct vm_area_struct *vma)
@@ -3504,14 +3358,15 @@ static inline int iocb_flags(struct file *file)
res |= IOCB_APPEND;
if (file->f_flags & O_DIRECT)
res |= IOCB_DIRECT;
- if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host))
+ if (file->f_flags & O_DSYNC)
res |= IOCB_DSYNC;
if (file->f_flags & __O_SYNC)
res |= IOCB_SYNC;
return res;
}
-static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags)
+static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags,
+ int rw_type)
{
int kiocb_flags = 0;
@@ -3522,34 +3377,41 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags)
return 0;
if (unlikely(flags & ~RWF_SUPPORTED))
return -EOPNOTSUPP;
+ if (unlikely((flags & RWF_APPEND) && (flags & RWF_NOAPPEND)))
+ return -EINVAL;
if (flags & RWF_NOWAIT) {
if (!(ki->ki_filp->f_mode & FMODE_NOWAIT))
return -EOPNOTSUPP;
- kiocb_flags |= IOCB_NOIO;
+ }
+ if (flags & RWF_ATOMIC) {
+ if (rw_type != WRITE)
+ return -EOPNOTSUPP;
+ if (!(ki->ki_filp->f_mode & FMODE_CAN_ATOMIC_WRITE))
+ return -EOPNOTSUPP;
+ }
+ if (flags & RWF_DONTCACHE) {
+ /* file system must support it */
+ if (!(ki->ki_filp->f_op->fop_flags & FOP_DONTCACHE))
+ return -EOPNOTSUPP;
+ /* DAX mappings not supported */
+ if (IS_DAX(ki->ki_filp->f_mapping->host))
+ return -EOPNOTSUPP;
}
kiocb_flags |= (__force int) (flags & RWF_SUPPORTED);
if (flags & RWF_SYNC)
kiocb_flags |= IOCB_DSYNC;
+ if ((flags & RWF_NOAPPEND) && (ki->ki_flags & IOCB_APPEND)) {
+ if (IS_APPEND(file_inode(ki->ki_filp)))
+ return -EPERM;
+ ki->ki_flags &= ~IOCB_APPEND;
+ }
+
ki->ki_flags |= kiocb_flags;
return 0;
}
-static inline ino_t parent_ino(struct dentry *dentry)
-{
- ino_t res;
-
- /*
- * Don't strictly need d_lock here? If the parent ino could change
- * then surely we'd have a deeper race in the caller?
- */
- spin_lock(&dentry->d_lock);
- res = dentry->d_parent->d_inode->i_ino;
- spin_unlock(&dentry->d_lock);
- return res;
-}
-
/* Transaction based IO helpers */
/*
@@ -3587,7 +3449,7 @@ void simple_transaction_set(struct file *file, size_t n);
* All attributes contain a text representation of a numeric value
* that are accessed with the get() and set() functions.
*/
-#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
+#define DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, __is_signed) \
static int __fops ## _open(struct inode *inode, struct file *file) \
{ \
__simple_attr_check_format(__fmt, 0ull); \
@@ -3598,10 +3460,16 @@ static const struct file_operations __fops = { \
.open = __fops ## _open, \
.release = simple_attr_release, \
.read = simple_attr_read, \
- .write = simple_attr_write, \
+ .write = (__is_signed) ? simple_attr_write_signed : simple_attr_write, \
.llseek = generic_file_llseek, \
}
+#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
+ DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, false)
+
+#define DEFINE_SIMPLE_ATTRIBUTE_SIGNED(__fops, __get, __set, __fmt) \
+ DEFINE_SIMPLE_ATTRIBUTE_XSIGNED(__fops, __get, __set, __fmt, true)
+
static inline __printf(1, 2)
void __simple_attr_check_format(const char *fmt, ...)
{
@@ -3616,35 +3484,29 @@ ssize_t simple_attr_read(struct file *file, char __user *buf,
size_t len, loff_t *ppos);
ssize_t simple_attr_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos);
+ssize_t simple_attr_write_signed(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos);
struct ctl_table;
-int proc_nr_files(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
-int proc_nr_dentry(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
-int proc_nr_inodes(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
-int __init get_filesystem_list(char *buf);
+int __init list_bdev_fs_names(char *buf, size_t size);
#define __FMODE_EXEC ((__force int) FMODE_EXEC)
-#define __FMODE_NONOTIFY ((__force int) FMODE_NONOTIFY)
#define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE])
-#define OPEN_FMODE(flag) ((__force fmode_t)(((flag + 1) & O_ACCMODE) | \
- (flag & __FMODE_NONOTIFY)))
+#define OPEN_FMODE(flag) ((__force fmode_t)((flag + 1) & O_ACCMODE))
static inline bool is_sxid(umode_t mode)
{
- return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP));
+ return mode & (S_ISUID | S_ISGID);
}
-static inline int check_sticky(struct user_namespace *mnt_userns,
+static inline int check_sticky(struct mnt_idmap *idmap,
struct inode *dir, struct inode *inode)
{
if (!(dir->i_mode & S_ISVTX))
return 0;
- return __check_sticky(mnt_userns, dir, inode);
+ return __check_sticky(idmap, dir, inode);
}
static inline void inode_has_no_xattr(struct inode *inode)
@@ -3662,17 +3524,17 @@ static inline bool dir_emit(struct dir_context *ctx,
const char *name, int namelen,
u64 ino, unsigned type)
{
- return ctx->actor(ctx, name, namelen, ctx->pos, ino, type) == 0;
+ return ctx->actor(ctx, name, namelen, ctx->pos, ino, type);
}
static inline bool dir_emit_dot(struct file *file, struct dir_context *ctx)
{
return ctx->actor(ctx, ".", 1, ctx->pos,
- file->f_path.dentry->d_inode->i_ino, DT_DIR) == 0;
+ file->f_path.dentry->d_inode->i_ino, DT_DIR);
}
static inline bool dir_emit_dotdot(struct file *file, struct dir_context *ctx)
{
return ctx->actor(ctx, "..", 2, ctx->pos,
- parent_ino(file->f_path.dentry), DT_DIR) == 0;
+ d_parent_ino(file->f_path.dentry), DT_DIR);
}
static inline bool dir_emit_dots(struct file *file, struct dir_context *ctx)
{
@@ -3711,15 +3573,37 @@ extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len,
extern int generic_fadvise(struct file *file, loff_t offset, loff_t len,
int advice);
-/*
- * Flush file data before changing attributes. Caller must hold any locks
- * required to prevent further writes to this file until we're done setting
- * flags.
- */
-static inline int inode_drain_writes(struct inode *inode)
+static inline bool vfs_empty_path(int dfd, const char __user *path)
+{
+ char c;
+
+ if (dfd < 0)
+ return false;
+
+ /* We now allow NULL to be used for empty path. */
+ if (!path)
+ return true;
+
+ if (unlikely(get_user(c, path)))
+ return false;
+
+ return !c;
+}
+
+int generic_atomic_write_valid(struct kiocb *iocb, struct iov_iter *iter);
+
+static inline bool extensible_ioctl_valid(unsigned int cmd_a,
+ unsigned int cmd_b, size_t min_size)
{
- inode_dio_wait(inode);
- return filemap_write_and_wait(inode->i_mapping);
+ if (_IOC_DIR(cmd_a) != _IOC_DIR(cmd_b))
+ return false;
+ if (_IOC_TYPE(cmd_a) != _IOC_TYPE(cmd_b))
+ return false;
+ if (_IOC_NR(cmd_a) != _IOC_NR(cmd_b))
+ return false;
+ if (_IOC_SIZE(cmd_a) < min_size)
+ return false;
+ return true;
}
#endif /* _LINUX_FS_H */
diff --git a/include/linux/fs/super.h b/include/linux/fs/super.h
new file mode 100644
index 000000000000..f21ffbb6dea5
--- /dev/null
+++ b/include/linux/fs/super.h
@@ -0,0 +1,238 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_FS_SUPER_H
+#define _LINUX_FS_SUPER_H
+
+#include <linux/fs/super_types.h>
+#include <linux/unicode.h>
+
+/*
+ * These are internal functions, please use sb_start_{write,pagefault,intwrite}
+ * instead.
+ */
+static inline void __sb_end_write(struct super_block *sb, int level)
+{
+ percpu_up_read(sb->s_writers.rw_sem + level - 1);
+}
+
+static inline void __sb_start_write(struct super_block *sb, int level)
+{
+ percpu_down_read_freezable(sb->s_writers.rw_sem + level - 1, true);
+}
+
+static inline bool __sb_start_write_trylock(struct super_block *sb, int level)
+{
+ return percpu_down_read_trylock(sb->s_writers.rw_sem + level - 1);
+}
+
+#define __sb_writers_acquired(sb, lev) \
+ percpu_rwsem_acquire(&(sb)->s_writers.rw_sem[(lev) - 1], 1, _THIS_IP_)
+#define __sb_writers_release(sb, lev) \
+ percpu_rwsem_release(&(sb)->s_writers.rw_sem[(lev) - 1], _THIS_IP_)
+
+/**
+ * __sb_write_started - check if sb freeze level is held
+ * @sb: the super we write to
+ * @level: the freeze level
+ *
+ * * > 0 - sb freeze level is held
+ * * 0 - sb freeze level is not held
+ * * < 0 - !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN
+ */
+static inline int __sb_write_started(const struct super_block *sb, int level)
+{
+ return lockdep_is_held_type(sb->s_writers.rw_sem + level - 1, 1);
+}
+
+/**
+ * sb_write_started - check if SB_FREEZE_WRITE is held
+ * @sb: the super we write to
+ *
+ * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
+ */
+static inline bool sb_write_started(const struct super_block *sb)
+{
+ return __sb_write_started(sb, SB_FREEZE_WRITE);
+}
+
+/**
+ * sb_write_not_started - check if SB_FREEZE_WRITE is not held
+ * @sb: the super we write to
+ *
+ * May be false positive with !CONFIG_LOCKDEP/LOCK_STATE_UNKNOWN.
+ */
+static inline bool sb_write_not_started(const struct super_block *sb)
+{
+ return __sb_write_started(sb, SB_FREEZE_WRITE) <= 0;
+}
+
+/**
+ * sb_end_write - drop write access to a superblock
+ * @sb: the super we wrote to
+ *
+ * Decrement number of writers to the filesystem. Wake up possible waiters
+ * wanting to freeze the filesystem.
+ */
+static inline void sb_end_write(struct super_block *sb)
+{
+ __sb_end_write(sb, SB_FREEZE_WRITE);
+}
+
+/**
+ * sb_end_pagefault - drop write access to a superblock from a page fault
+ * @sb: the super we wrote to
+ *
+ * Decrement number of processes handling write page fault to the filesystem.
+ * Wake up possible waiters wanting to freeze the filesystem.
+ */
+static inline void sb_end_pagefault(struct super_block *sb)
+{
+ __sb_end_write(sb, SB_FREEZE_PAGEFAULT);
+}
+
+/**
+ * sb_end_intwrite - drop write access to a superblock for internal fs purposes
+ * @sb: the super we wrote to
+ *
+ * Decrement fs-internal number of writers to the filesystem. Wake up possible
+ * waiters wanting to freeze the filesystem.
+ */
+static inline void sb_end_intwrite(struct super_block *sb)
+{
+ __sb_end_write(sb, SB_FREEZE_FS);
+}
+
+/**
+ * sb_start_write - get write access to a superblock
+ * @sb: the super we write to
+ *
+ * When a process wants to write data or metadata to a file system (i.e. dirty
+ * a page or an inode), it should embed the operation in a sb_start_write() -
+ * sb_end_write() pair to get exclusion against file system freezing. This
+ * function increments number of writers preventing freezing. If the file
+ * system is already frozen, the function waits until the file system is
+ * thawed.
+ *
+ * Since freeze protection behaves as a lock, users have to preserve
+ * ordering of freeze protection and other filesystem locks. Generally,
+ * freeze protection should be the outermost lock. In particular, we have:
+ *
+ * sb_start_write
+ * -> i_rwsem (write path, truncate, directory ops, ...)
+ * -> s_umount (freeze_super, thaw_super)
+ */
+static inline void sb_start_write(struct super_block *sb)
+{
+ __sb_start_write(sb, SB_FREEZE_WRITE);
+}
+
+DEFINE_GUARD(super_write,
+ struct super_block *,
+ sb_start_write(_T),
+ sb_end_write(_T))
+
+static inline bool sb_start_write_trylock(struct super_block *sb)
+{
+ return __sb_start_write_trylock(sb, SB_FREEZE_WRITE);
+}
+
+/**
+ * sb_start_pagefault - get write access to a superblock from a page fault
+ * @sb: the super we write to
+ *
+ * When a process starts handling write page fault, it should embed the
+ * operation into sb_start_pagefault() - sb_end_pagefault() pair to get
+ * exclusion against file system freezing. This is needed since the page fault
+ * is going to dirty a page. This function increments number of running page
+ * faults preventing freezing. If the file system is already frozen, the
+ * function waits until the file system is thawed.
+ *
+ * Since page fault freeze protection behaves as a lock, users have to preserve
+ * ordering of freeze protection and other filesystem locks. It is advised to
+ * put sb_start_pagefault() close to mmap_lock in lock ordering. Page fault
+ * handling code implies lock dependency:
+ *
+ * mmap_lock
+ * -> sb_start_pagefault
+ */
+static inline void sb_start_pagefault(struct super_block *sb)
+{
+ __sb_start_write(sb, SB_FREEZE_PAGEFAULT);
+}
+
+/**
+ * sb_start_intwrite - get write access to a superblock for internal fs purposes
+ * @sb: the super we write to
+ *
+ * This is the third level of protection against filesystem freezing. It is
+ * free for use by a filesystem. The only requirement is that it must rank
+ * below sb_start_pagefault.
+ *
+ * For example filesystem can call sb_start_intwrite() when starting a
+ * transaction which somewhat eases handling of freezing for internal sources
+ * of filesystem changes (internal fs threads, discarding preallocation on file
+ * close, etc.).
+ */
+static inline void sb_start_intwrite(struct super_block *sb)
+{
+ __sb_start_write(sb, SB_FREEZE_FS);
+}
+
+static inline bool sb_start_intwrite_trylock(struct super_block *sb)
+{
+ return __sb_start_write_trylock(sb, SB_FREEZE_FS);
+}
+
+static inline bool sb_rdonly(const struct super_block *sb)
+{
+ return sb->s_flags & SB_RDONLY;
+}
+
+static inline bool sb_is_blkdev_sb(struct super_block *sb)
+{
+ return IS_ENABLED(CONFIG_BLOCK) && sb == blockdev_superblock;
+}
+
+#if IS_ENABLED(CONFIG_UNICODE)
+static inline struct unicode_map *sb_encoding(const struct super_block *sb)
+{
+ return sb->s_encoding;
+}
+
+/* Compare if two super blocks have the same encoding and flags */
+static inline bool sb_same_encoding(const struct super_block *sb1,
+ const struct super_block *sb2)
+{
+ if (sb1->s_encoding == sb2->s_encoding)
+ return true;
+
+ return (sb1->s_encoding && sb2->s_encoding &&
+ (sb1->s_encoding->version == sb2->s_encoding->version) &&
+ (sb1->s_encoding_flags == sb2->s_encoding_flags));
+}
+#else
+static inline struct unicode_map *sb_encoding(const struct super_block *sb)
+{
+ return NULL;
+}
+
+static inline bool sb_same_encoding(const struct super_block *sb1,
+ const struct super_block *sb2)
+{
+ return true;
+}
+#endif
+
+static inline bool sb_has_encoding(const struct super_block *sb)
+{
+ return !!sb_encoding(sb);
+}
+
+int sb_set_blocksize(struct super_block *sb, int size);
+int __must_check sb_min_blocksize(struct super_block *sb, int size);
+
+int freeze_super(struct super_block *super, enum freeze_holder who,
+ const void *freeze_owner);
+int thaw_super(struct super_block *super, enum freeze_holder who,
+ const void *freeze_owner);
+
+#endif /* _LINUX_FS_SUPER_H */
diff --git a/include/linux/fs/super_types.h b/include/linux/fs/super_types.h
new file mode 100644
index 000000000000..6bd3009e09b3
--- /dev/null
+++ b/include/linux/fs/super_types.h
@@ -0,0 +1,336 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_FS_SUPER_TYPES_H
+#define _LINUX_FS_SUPER_TYPES_H
+
+#include <linux/fs_dirent.h>
+#include <linux/errseq.h>
+#include <linux/list_lru.h>
+#include <linux/list.h>
+#include <linux/list_bl.h>
+#include <linux/llist.h>
+#include <linux/uidgid.h>
+#include <linux/uuid.h>
+#include <linux/percpu-rwsem.h>
+#include <linux/workqueue_types.h>
+#include <linux/quota.h>
+
+struct backing_dev_info;
+struct block_device;
+struct dentry;
+struct dentry_operations;
+struct dquot_operations;
+struct export_operations;
+struct file;
+struct file_system_type;
+struct fscrypt_operations;
+struct fsnotify_sb_info;
+struct fsverity_operations;
+struct kstatfs;
+struct mount;
+struct mtd_info;
+struct quotactl_ops;
+struct shrinker;
+struct unicode_map;
+struct user_namespace;
+struct workqueue_struct;
+struct writeback_control;
+struct xattr_handler;
+
+extern struct super_block *blockdev_superblock;
+
+/* Possible states of 'frozen' field */
+enum {
+ SB_UNFROZEN = 0, /* FS is unfrozen */
+ SB_FREEZE_WRITE = 1, /* Writes, dir ops, ioctls frozen */
+ SB_FREEZE_PAGEFAULT = 2, /* Page faults stopped as well */
+ SB_FREEZE_FS = 3, /* For internal FS use (e.g. to stop internal threads if needed) */
+ SB_FREEZE_COMPLETE = 4, /* ->freeze_fs finished successfully */
+};
+
+#define SB_FREEZE_LEVELS (SB_FREEZE_COMPLETE - 1)
+
+struct sb_writers {
+ unsigned short frozen; /* Is sb frozen? */
+ int freeze_kcount; /* How many kernel freeze requests? */
+ int freeze_ucount; /* How many userspace freeze requests? */
+ const void *freeze_owner; /* Owner of the freeze */
+ struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS];
+};
+
+/**
+ * enum freeze_holder - holder of the freeze
+ * @FREEZE_HOLDER_KERNEL: kernel wants to freeze or thaw filesystem
+ * @FREEZE_HOLDER_USERSPACE: userspace wants to freeze or thaw filesystem
+ * @FREEZE_MAY_NEST: whether nesting freeze and thaw requests is allowed
+ * @FREEZE_EXCL: a freeze that can only be undone by the owner
+ *
+ * Indicate who the owner of the freeze or thaw request is and whether
+ * the freeze needs to be exclusive or can nest.
+ * Without @FREEZE_MAY_NEST, multiple freeze and thaw requests from the
+ * same holder aren't allowed. It is however allowed to hold a single
+ * @FREEZE_HOLDER_USERSPACE and a single @FREEZE_HOLDER_KERNEL freeze at
+ * the same time. This is relied upon by some filesystems during online
+ * repair or similar.
+ */
+enum freeze_holder {
+ FREEZE_HOLDER_KERNEL = (1U << 0),
+ FREEZE_HOLDER_USERSPACE = (1U << 1),
+ FREEZE_MAY_NEST = (1U << 2),
+ FREEZE_EXCL = (1U << 3),
+};
+
+struct super_operations {
+ struct inode *(*alloc_inode)(struct super_block *sb);
+ void (*destroy_inode)(struct inode *inode);
+ void (*free_inode)(struct inode *inode);
+ void (*dirty_inode)(struct inode *inode, int flags);
+ int (*write_inode)(struct inode *inode, struct writeback_control *wbc);
+ int (*drop_inode)(struct inode *inode);
+ void (*evict_inode)(struct inode *inode);
+ void (*put_super)(struct super_block *sb);
+ int (*sync_fs)(struct super_block *sb, int wait);
+ int (*freeze_super)(struct super_block *sb, enum freeze_holder who,
+ const void *owner);
+ int (*freeze_fs)(struct super_block *sb);
+ int (*thaw_super)(struct super_block *sb, enum freeze_holder who,
+ const void *owner);
+ int (*unfreeze_fs)(struct super_block *sb);
+ int (*statfs)(struct dentry *dentry, struct kstatfs *kstatfs);
+ int (*remount_fs) (struct super_block *, int *, char *);
+ void (*umount_begin)(struct super_block *sb);
+
+ int (*show_options)(struct seq_file *seq, struct dentry *dentry);
+ int (*show_devname)(struct seq_file *seq, struct dentry *dentry);
+ int (*show_path)(struct seq_file *seq, struct dentry *dentry);
+ int (*show_stats)(struct seq_file *seq, struct dentry *dentry);
+#ifdef CONFIG_QUOTA
+ ssize_t (*quota_read)(struct super_block *sb, int type, char *data,
+ size_t len, loff_t off);
+ ssize_t (*quota_write)(struct super_block *sb, int type,
+ const char *data, size_t len, loff_t off);
+ struct dquot __rcu **(*get_dquots)(struct inode *inode);
+#endif
+ long (*nr_cached_objects)(struct super_block *sb,
+ struct shrink_control *sc);
+ long (*free_cached_objects)(struct super_block *sb,
+ struct shrink_control *sc);
+ /*
+ * If a filesystem can support graceful removal of a device and
+ * continue read-write operations, implement this callback.
+ *
+ * Return 0 if the filesystem can continue read-write.
+ * Non-zero return value or no such callback means the fs will be shutdown
+ * as usual.
+ */
+ int (*remove_bdev)(struct super_block *sb, struct block_device *bdev);
+ void (*shutdown)(struct super_block *sb);
+};
+
+struct super_block {
+ struct list_head s_list; /* Keep this first */
+ dev_t s_dev; /* search index; _not_ kdev_t */
+ unsigned char s_blocksize_bits;
+ unsigned long s_blocksize;
+ loff_t s_maxbytes; /* Max file size */
+ struct file_system_type *s_type;
+ const struct super_operations *s_op;
+ const struct dquot_operations *dq_op;
+ const struct quotactl_ops *s_qcop;
+ const struct export_operations *s_export_op;
+ unsigned long s_flags;
+ unsigned long s_iflags; /* internal SB_I_* flags */
+ unsigned long s_magic;
+ struct dentry *s_root;
+ struct rw_semaphore s_umount;
+ int s_count;
+ atomic_t s_active;
+#ifdef CONFIG_SECURITY
+ void *s_security;
+#endif
+ const struct xattr_handler *const *s_xattr;
+#ifdef CONFIG_FS_ENCRYPTION
+ const struct fscrypt_operations *s_cop;
+ struct fscrypt_keyring *s_master_keys; /* master crypto keys in use */
+#endif
+#ifdef CONFIG_FS_VERITY
+ const struct fsverity_operations *s_vop;
+#endif
+#if IS_ENABLED(CONFIG_UNICODE)
+ struct unicode_map *s_encoding;
+ __u16 s_encoding_flags;
+#endif
+ struct hlist_bl_head s_roots; /* alternate root dentries for NFS */
+ struct mount *s_mounts; /* list of mounts; _not_ for fs use */
+ struct block_device *s_bdev; /* can go away once we use an accessor for @s_bdev_file */
+ struct file *s_bdev_file;
+ struct backing_dev_info *s_bdi;
+ struct mtd_info *s_mtd;
+ struct hlist_node s_instances;
+ unsigned int s_quota_types; /* Bitmask of supported quota types */
+ struct quota_info s_dquot; /* Diskquota specific options */
+
+ struct sb_writers s_writers;
+
+ /*
+ * Keep s_fs_info, s_time_gran, s_fsnotify_mask, and
+ * s_fsnotify_info together for cache efficiency. They are frequently
+ * accessed and rarely modified.
+ */
+ void *s_fs_info; /* Filesystem private info */
+
+ /* Granularity of c/m/atime in ns (cannot be worse than a second) */
+ u32 s_time_gran;
+ /* Time limits for c/m/atime in seconds */
+ time64_t s_time_min;
+ time64_t s_time_max;
+#ifdef CONFIG_FSNOTIFY
+ u32 s_fsnotify_mask;
+ struct fsnotify_sb_info *s_fsnotify_info;
+#endif
+
+ /*
+ * q: why are s_id and s_sysfs_name not the same? both are human
+ * readable strings that identify the filesystem
+ * a: s_id is allowed to change at runtime; it's used in log messages,
+ * and we want to when a device starts out as single device (s_id is dev
+ * name) but then a device is hot added and we have to switch to
+ * identifying it by UUID
+ * but s_sysfs_name is a handle for programmatic access, and can't
+ * change at runtime
+ */
+ char s_id[32]; /* Informational name */
+ uuid_t s_uuid; /* UUID */
+ u8 s_uuid_len; /* Default 16, possibly smaller for weird filesystems */
+
+ /* if set, fs shows up under sysfs at /sys/fs/$FSTYP/s_sysfs_name */
+ char s_sysfs_name[UUID_STRING_LEN + 1];
+
+ unsigned int s_max_links;
+ unsigned int s_d_flags; /* default d_flags for dentries */
+
+ /*
+ * The next field is for VFS *only*. No filesystems have any business
+ * even looking at it. You had been warned.
+ */
+ struct mutex s_vfs_rename_mutex; /* Kludge */
+
+ /*
+ * Filesystem subtype. If non-empty the filesystem type field
+ * in /proc/mounts will be "type.subtype"
+ */
+ const char *s_subtype;
+
+ const struct dentry_operations *__s_d_op; /* default d_op for dentries */
+
+ struct shrinker *s_shrink; /* per-sb shrinker handle */
+
+ /* Number of inodes with nlink == 0 but still referenced */
+ atomic_long_t s_remove_count;
+
+ /* Read-only state of the superblock is being changed */
+ int s_readonly_remount;
+
+ /* per-sb errseq_t for reporting writeback errors via syncfs */
+ errseq_t s_wb_err;
+
+ /* AIO completions deferred from interrupt context */
+ struct workqueue_struct *s_dio_done_wq;
+ struct hlist_head s_pins;
+
+ /*
+ * Owning user namespace and default context in which to
+ * interpret filesystem uids, gids, quotas, device nodes,
+ * xattrs and security labels.
+ */
+ struct user_namespace *s_user_ns;
+
+ /*
+ * The list_lru structure is essentially just a pointer to a table
+ * of per-node lru lists, each of which has its own spinlock.
+ * There is no need to put them into separate cachelines.
+ */
+ struct list_lru s_dentry_lru;
+ struct list_lru s_inode_lru;
+ struct rcu_head rcu;
+ struct work_struct destroy_work;
+
+ struct mutex s_sync_lock; /* sync serialisation lock */
+
+ /*
+ * Indicates how deep in a filesystem stack this SB is
+ */
+ int s_stack_depth;
+
+ /* s_inode_list_lock protects s_inodes */
+ spinlock_t s_inode_list_lock ____cacheline_aligned_in_smp;
+ struct list_head s_inodes; /* all inodes */
+
+ spinlock_t s_inode_wblist_lock;
+ struct list_head s_inodes_wb; /* writeback inodes */
+ long s_min_writeback_pages;
+} __randomize_layout;
+
+/*
+ * sb->s_flags. Note that these mirror the equivalent MS_* flags where
+ * represented in both.
+ */
+#define SB_RDONLY BIT(0) /* Mount read-only */
+#define SB_NOSUID BIT(1) /* Ignore suid and sgid bits */
+#define SB_NODEV BIT(2) /* Disallow access to device special files */
+#define SB_NOEXEC BIT(3) /* Disallow program execution */
+#define SB_SYNCHRONOUS BIT(4) /* Writes are synced at once */
+#define SB_MANDLOCK BIT(6) /* Allow mandatory locks on an FS */
+#define SB_DIRSYNC BIT(7) /* Directory modifications are synchronous */
+#define SB_NOATIME BIT(10) /* Do not update access times. */
+#define SB_NODIRATIME BIT(11) /* Do not update directory access times */
+#define SB_SILENT BIT(15)
+#define SB_POSIXACL BIT(16) /* Supports POSIX ACLs */
+#define SB_INLINECRYPT BIT(17) /* Use blk-crypto for encrypted files */
+#define SB_KERNMOUNT BIT(22) /* this is a kern_mount call */
+#define SB_I_VERSION BIT(23) /* Update inode I_version field */
+#define SB_LAZYTIME BIT(25) /* Update the on-disk [acm]times lazily */
+
+/* These sb flags are internal to the kernel */
+#define SB_DEAD BIT(21)
+#define SB_DYING BIT(24)
+#define SB_FORCE BIT(27)
+#define SB_NOSEC BIT(28)
+#define SB_BORN BIT(29)
+#define SB_ACTIVE BIT(30)
+#define SB_NOUSER BIT(31)
+
+/* These flags relate to encoding and casefolding */
+#define SB_ENC_STRICT_MODE_FL (1 << 0)
+#define SB_ENC_NO_COMPAT_FALLBACK_FL (1 << 1)
+
+#define sb_has_strict_encoding(sb) \
+ (sb->s_encoding_flags & SB_ENC_STRICT_MODE_FL)
+
+#if IS_ENABLED(CONFIG_UNICODE)
+#define sb_no_casefold_compat_fallback(sb) \
+ (sb->s_encoding_flags & SB_ENC_NO_COMPAT_FALLBACK_FL)
+#else
+#define sb_no_casefold_compat_fallback(sb) (1)
+#endif
+
+/* sb->s_iflags */
+#define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */
+#define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */
+#define SB_I_NODEV 0x00000004 /* Ignore devices on this fs */
+#define SB_I_STABLE_WRITES 0x00000008 /* don't modify blks until WB is done */
+
+/* sb->s_iflags to limit user namespace mounts */
+#define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */
+#define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020
+#define SB_I_UNTRUSTED_MOUNTER 0x00000040
+#define SB_I_EVM_HMAC_UNSUPPORTED 0x00000080
+
+#define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */
+#define SB_I_PERSB_BDI 0x00000200 /* has a per-sb bdi */
+#define SB_I_TS_EXPIRY_WARNED 0x00000400 /* warned about timestamp range expiry */
+#define SB_I_RETIRED 0x00000800 /* superblock shouldn't be reused */
+#define SB_I_NOUMASK 0x00001000 /* VFS does not apply umask */
+#define SB_I_NOIDMAP 0x00002000 /* No idmapped mounts on this superblock */
+#define SB_I_ALLOW_HSM 0x00004000 /* Allow HSM events on this superblock */
+
+#endif /* _LINUX_FS_SUPER_TYPES_H */
diff --git a/include/linux/fs_api.h b/include/linux/fs_api.h
new file mode 100644
index 000000000000..83be38d6d413
--- /dev/null
+++ b/include/linux/fs_api.h
@@ -0,0 +1 @@
+#include <linux/fs.h>
diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
index 37e1e8f7f08d..0d6c8a6d7be2 100644
--- a/include/linux/fs_context.h
+++ b/include/linux/fs_context.h
@@ -99,17 +99,17 @@ struct fs_context {
const struct cred *cred; /* The mounter's credentials */
struct p_log log; /* Logging buffer */
const char *source; /* The source name (eg. dev path) */
- void *security; /* Linux S&M options */
+ void *security; /* LSM options */
void *s_fs_info; /* Proposed s_fs_info */
unsigned int sb_flags; /* Proposed superblock flags (SB_*) */
unsigned int sb_flags_mask; /* Superblock flags that were changed */
unsigned int s_iflags; /* OR'd with sb->s_iflags */
- unsigned int lsm_flags; /* Information flags from the fs to the LSM */
enum fs_context_purpose purpose:8;
enum fs_context_phase phase:8; /* The phase the context is in */
bool need_free:1; /* Need to call ops->free() */
bool global:1; /* Goes into &init_user_ns */
bool oldapi:1; /* Coming from mount(2) */
+ bool exclusive:1; /* create new superblock, reject existing one */
};
struct fs_context_operations {
@@ -134,25 +134,21 @@ extern struct fs_context *fs_context_for_submount(struct file_system_type *fs_ty
extern struct fs_context *vfs_dup_fs_context(struct fs_context *fc);
extern int vfs_parse_fs_param(struct fs_context *fc, struct fs_parameter *param);
-extern int vfs_parse_fs_string(struct fs_context *fc, const char *key,
- const char *value, size_t v_size);
+extern int vfs_parse_fs_qstr(struct fs_context *fc, const char *key,
+ const struct qstr *value);
+static inline int vfs_parse_fs_string(struct fs_context *fc, const char *key,
+ const char *value)
+{
+ return vfs_parse_fs_qstr(fc, key, value ? &QSTR(value) : NULL);
+}
+int vfs_parse_monolithic_sep(struct fs_context *fc, void *data,
+ char *(*sep)(char **));
extern int generic_parse_monolithic(struct fs_context *fc, void *data);
extern int vfs_get_tree(struct fs_context *fc);
extern void put_fs_context(struct fs_context *fc);
-
-/*
- * sget() wrappers to be called from the ->get_tree() op.
- */
-enum vfs_get_super_keying {
- vfs_get_single_super, /* Only one such superblock may exist */
- vfs_get_single_reconf_super, /* As above, but reconfigure if it exists */
- vfs_get_keyed_super, /* Superblocks with different s_fs_info keys may exist */
- vfs_get_independent_super, /* Multiple independent superblocks may exist */
-};
-extern int vfs_get_super(struct fs_context *fc,
- enum vfs_get_super_keying keying,
- int (*fill_super)(struct super_block *sb,
- struct fs_context *fc));
+extern int vfs_parse_fs_param_source(struct fs_context *fc,
+ struct fs_parameter *param);
+extern void fc_drop_locked(struct fs_context *fc);
extern int get_tree_nodev(struct fs_context *fc,
int (*fill_super)(struct super_block *sb,
@@ -160,14 +156,19 @@ extern int get_tree_nodev(struct fs_context *fc,
extern int get_tree_single(struct fs_context *fc,
int (*fill_super)(struct super_block *sb,
struct fs_context *fc));
-extern int get_tree_single_reconf(struct fs_context *fc,
- int (*fill_super)(struct super_block *sb,
- struct fs_context *fc));
extern int get_tree_keyed(struct fs_context *fc,
int (*fill_super)(struct super_block *sb,
struct fs_context *fc),
void *key);
+int setup_bdev_super(struct super_block *sb, int sb_flags,
+ struct fs_context *fc);
+
+#define GET_TREE_BDEV_QUIET_LOOKUP 0x0001
+int get_tree_bdev_flags(struct fs_context *fc,
+ int (*fill_super)(struct super_block *sb,
+ struct fs_context *fc), unsigned int flags);
+
extern int get_tree_bdev(struct fs_context *fc,
int (*fill_super)(struct super_block *sb,
struct fs_context *fc));
@@ -190,10 +191,12 @@ struct fc_log {
extern __attribute__((format(printf, 4, 5)))
void logfc(struct fc_log *log, const char *prefix, char level, const char *fmt, ...);
-#define __logfc(fc, l, fmt, ...) logfc((fc)->log.log, NULL, \
- l, fmt, ## __VA_ARGS__)
-#define __plog(p, l, fmt, ...) logfc((p)->log, (p)->prefix, \
- l, fmt, ## __VA_ARGS__)
+#define __logfc(fc, l, fmt, ...) \
+ logfc((fc)->log.log, NULL, (l), (fmt), ## __VA_ARGS__)
+#define __plogp(p, prefix, l, fmt, ...) \
+ logfc((p)->log, (prefix), (l), (fmt), ## __VA_ARGS__)
+#define __plog(p, l, fmt, ...) __plogp(p, (p)->prefix, l, fmt, ## __VA_ARGS__)
+
/**
* infof - Store supplementary informational message
* @fc: The context in which to log the informational message
@@ -204,7 +207,9 @@ void logfc(struct fc_log *log, const char *prefix, char level, const char *fmt,
*/
#define infof(fc, fmt, ...) __logfc(fc, 'i', fmt, ## __VA_ARGS__)
#define info_plog(p, fmt, ...) __plog(p, 'i', fmt, ## __VA_ARGS__)
-#define infofc(p, fmt, ...) __plog((&(fc)->log), 'i', fmt, ## __VA_ARGS__)
+#define infofc(fc, fmt, ...) __plog((&(fc)->log), 'i', fmt, ## __VA_ARGS__)
+#define infofcp(fc, prefix, fmt, ...) \
+ __plogp((&(fc)->log), prefix, 'i', fmt, ## __VA_ARGS__)
/**
* warnf - Store supplementary warning message
@@ -217,6 +222,8 @@ void logfc(struct fc_log *log, const char *prefix, char level, const char *fmt,
#define warnf(fc, fmt, ...) __logfc(fc, 'w', fmt, ## __VA_ARGS__)
#define warn_plog(p, fmt, ...) __plog(p, 'w', fmt, ## __VA_ARGS__)
#define warnfc(fc, fmt, ...) __plog((&(fc)->log), 'w', fmt, ## __VA_ARGS__)
+#define warnfcp(fc, prefix, fmt, ...) \
+ __plogp((&(fc)->log), prefix, 'w', fmt, ## __VA_ARGS__)
/**
* errorf - Store supplementary error message
@@ -229,6 +236,8 @@ void logfc(struct fc_log *log, const char *prefix, char level, const char *fmt,
#define errorf(fc, fmt, ...) __logfc(fc, 'e', fmt, ## __VA_ARGS__)
#define error_plog(p, fmt, ...) __plog(p, 'e', fmt, ## __VA_ARGS__)
#define errorfc(fc, fmt, ...) __plog((&(fc)->log), 'e', fmt, ## __VA_ARGS__)
+#define errorfcp(fc, prefix, fmt, ...) \
+ __plogp((&(fc)->log), prefix, 'e', fmt, ## __VA_ARGS__)
/**
* invalf - Store supplementary invalid argument error message
@@ -241,5 +250,7 @@ void logfc(struct fc_log *log, const char *prefix, char level, const char *fmt,
#define invalf(fc, fmt, ...) (errorf(fc, fmt, ## __VA_ARGS__), -EINVAL)
#define inval_plog(p, fmt, ...) (error_plog(p, fmt, ## __VA_ARGS__), -EINVAL)
#define invalfc(fc, fmt, ...) (errorfc(fc, fmt, ## __VA_ARGS__), -EINVAL)
+#define invalfcp(fc, prefix, fmt, ...) \
+ (errorfcp(fc, prefix, fmt, ## __VA_ARGS__), -EINVAL)
#endif /* _LINUX_FS_CONTEXT_H */
diff --git a/include/linux/fs_types.h b/include/linux/fs_dirent.h
index 54816791196f..92f75c5bac19 100644
--- a/include/linux/fs_types.h
+++ b/include/linux/fs_dirent.h
@@ -1,6 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_FS_TYPES_H
-#define _LINUX_FS_TYPES_H
+#ifndef _LINUX_FS_DIRENT_H
+#define _LINUX_FS_DIRENT_H
+
+#include <linux/stat.h>
+#include <linux/types.h>
/*
* This is a header for the common implementation of dirent
@@ -66,10 +69,10 @@
/*
* declarations for helper functions, accompanying implementation
- * is in fs/fs_types.c
+ * is in fs/fs_dirent.c
*/
extern unsigned char fs_ftype_to_dtype(unsigned int filetype);
extern unsigned char fs_umode_to_ftype(umode_t mode);
extern unsigned char fs_umode_to_dtype(umode_t mode);
-#endif
+#endif /* _LINUX_FS_DIRENT_H */
diff --git a/include/linux/fs_enet_pd.h b/include/linux/fs_enet_pd.h
deleted file mode 100644
index 77d783f71527..000000000000
--- a/include/linux/fs_enet_pd.h
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Platform information definitions for the
- * universal Freescale Ethernet driver.
- *
- * Copyright (c) 2003 Intracom S.A.
- * by Pantelis Antoniou <panto@intracom.gr>
- *
- * 2005 (c) MontaVista Software, Inc.
- * Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef FS_ENET_PD_H
-#define FS_ENET_PD_H
-
-#include <linux/clk.h>
-#include <linux/string.h>
-#include <linux/of_mdio.h>
-#include <linux/if_ether.h>
-#include <asm/types.h>
-
-#define FS_ENET_NAME "fs_enet"
-
-enum fs_id {
- fsid_fec1,
- fsid_fec2,
- fsid_fcc1,
- fsid_fcc2,
- fsid_fcc3,
- fsid_scc1,
- fsid_scc2,
- fsid_scc3,
- fsid_scc4,
-};
-
-#define FS_MAX_INDEX 9
-
-static inline int fs_get_fec_index(enum fs_id id)
-{
- if (id >= fsid_fec1 && id <= fsid_fec2)
- return id - fsid_fec1;
- return -1;
-}
-
-static inline int fs_get_fcc_index(enum fs_id id)
-{
- if (id >= fsid_fcc1 && id <= fsid_fcc3)
- return id - fsid_fcc1;
- return -1;
-}
-
-static inline int fs_get_scc_index(enum fs_id id)
-{
- if (id >= fsid_scc1 && id <= fsid_scc4)
- return id - fsid_scc1;
- return -1;
-}
-
-static inline int fs_fec_index2id(int index)
-{
- int id = fsid_fec1 + index - 1;
- if (id >= fsid_fec1 && id <= fsid_fec2)
- return id;
- return FS_MAX_INDEX;
- }
-
-static inline int fs_fcc_index2id(int index)
-{
- int id = fsid_fcc1 + index - 1;
- if (id >= fsid_fcc1 && id <= fsid_fcc3)
- return id;
- return FS_MAX_INDEX;
-}
-
-static inline int fs_scc_index2id(int index)
-{
- int id = fsid_scc1 + index - 1;
- if (id >= fsid_scc1 && id <= fsid_scc4)
- return id;
- return FS_MAX_INDEX;
-}
-
-enum fs_mii_method {
- fsmii_fixed,
- fsmii_fec,
- fsmii_bitbang,
-};
-
-enum fs_ioport {
- fsiop_porta,
- fsiop_portb,
- fsiop_portc,
- fsiop_portd,
- fsiop_porte,
-};
-
-struct fs_mii_bit {
- u32 offset;
- u8 bit;
- u8 polarity;
-};
-struct fs_mii_bb_platform_info {
- struct fs_mii_bit mdio_dir;
- struct fs_mii_bit mdio_dat;
- struct fs_mii_bit mdc_dat;
- int delay; /* delay in us */
- int irq[32]; /* irqs per phy's */
-};
-
-struct fs_platform_info {
-
- void(*init_ioports)(struct fs_platform_info *);
- /* device specific information */
- int fs_no; /* controller index */
- char fs_type[4]; /* controller type */
-
- u32 cp_page; /* CPM page */
- u32 cp_block; /* CPM sblock */
- u32 cp_command; /* CPM page/sblock/mcn */
-
- u32 clk_trx; /* some stuff for pins & mux configuration*/
- u32 clk_rx;
- u32 clk_tx;
- u32 clk_route;
- u32 clk_mask;
-
- u32 mem_offset;
- u32 dpram_offset;
- u32 fcc_regs_c;
-
- u32 device_flags;
-
- struct device_node *phy_node;
- const struct fs_mii_bus_info *bus_info;
-
- int rx_ring, tx_ring; /* number of buffers on rx */
- __u8 macaddr[ETH_ALEN]; /* mac address */
- int rx_copybreak; /* limit we copy small frames */
- int napi_weight; /* NAPI weight */
-
- int use_rmii; /* use RMII mode */
- int has_phy; /* if the network is phy container as well...*/
-
- struct clk *clk_per; /* 'per' clock for register access */
-};
-struct fs_mii_fec_platform_info {
- u32 irq[32];
- u32 mii_speed;
-};
-
-static inline int fs_get_id(struct fs_platform_info *fpi)
-{
- if(strstr(fpi->fs_type, "SCC"))
- return fs_scc_index2id(fpi->fs_no);
- if(strstr(fpi->fs_type, "FCC"))
- return fs_fcc_index2id(fpi->fs_no);
- if(strstr(fpi->fs_type, "FEC"))
- return fs_fec_index2id(fpi->fs_no);
- return fpi->fs_no;
-}
-
-#endif
diff --git a/include/linux/fs_parser.h b/include/linux/fs_parser.h
index aab0ffc6bac6..5e8a3b546033 100644
--- a/include/linux/fs_parser.h
+++ b/include/linux/fs_parser.h
@@ -28,7 +28,8 @@ typedef int fs_param_type(struct p_log *,
*/
fs_param_type fs_param_is_bool, fs_param_is_u32, fs_param_is_s32, fs_param_is_u64,
fs_param_is_enum, fs_param_is_string, fs_param_is_blob, fs_param_is_blockdev,
- fs_param_is_path, fs_param_is_fd;
+ fs_param_is_path, fs_param_is_fd, fs_param_is_uid, fs_param_is_gid,
+ fs_param_is_file_or_string;
/*
* Specification of the type of value a parameter wants.
@@ -42,7 +43,7 @@ struct fs_parameter_spec {
u8 opt; /* Option number (returned by fs_parse()) */
unsigned short flags;
#define fs_param_neg_with_no 0x0002 /* "noxxx" is negative param */
-#define fs_param_neg_with_empty 0x0004 /* "xxx=" is negative param */
+#define fs_param_can_be_empty 0x0004 /* "xxx=" is allowed */
#define fs_param_deprecated 0x0008 /* The param is deprecated */
const void *data;
};
@@ -57,6 +58,8 @@ struct fs_parse_result {
int int_32; /* For spec_s32/spec_enum */
unsigned int uint_32; /* For spec_u32{,_octal,_hex}/spec_enum */
u64 uint_64; /* For spec_u64 */
+ kuid_t uid;
+ kgid_t gid;
};
};
@@ -76,19 +79,17 @@ static inline int fs_parse(struct fs_context *fc,
extern int fs_lookup_param(struct fs_context *fc,
struct fs_parameter *param,
bool want_bdev,
+ unsigned int flags,
struct path *_path);
extern int lookup_constant(const struct constant_table tbl[], const char *name, int not_found);
+extern const struct constant_table bool_names[];
+
#ifdef CONFIG_VALIDATE_FS_PARSER
-extern bool validate_constant_table(const struct constant_table *tbl, size_t tbl_size,
- int low, int high, int special);
extern bool fs_validate_description(const char *name,
const struct fs_parameter_spec *desc);
#else
-static inline bool validate_constant_table(const struct constant_table *tbl, size_t tbl_size,
- int low, int high, int special)
-{ return true; }
static inline bool fs_validate_description(const char *name,
const struct fs_parameter_spec *desc)
{ return true; }
@@ -120,7 +121,7 @@ static inline bool fs_validate_description(const char *name,
#define fsparam_u32oct(NAME, OPT) \
__fsparam(fs_param_is_u32, NAME, OPT, 0, (void *)8)
#define fsparam_u32hex(NAME, OPT) \
- __fsparam(fs_param_is_u32_hex, NAME, OPT, 0, (void *)16)
+ __fsparam(fs_param_is_u32, NAME, OPT, 0, (void *)16)
#define fsparam_s32(NAME, OPT) __fsparam(fs_param_is_s32, NAME, OPT, 0, NULL)
#define fsparam_u64(NAME, OPT) __fsparam(fs_param_is_u64, NAME, OPT, 0, NULL)
#define fsparam_enum(NAME, OPT, array) __fsparam(fs_param_is_enum, NAME, OPT, 0, array)
@@ -130,5 +131,13 @@ static inline bool fs_validate_description(const char *name,
#define fsparam_bdev(NAME, OPT) __fsparam(fs_param_is_blockdev, NAME, OPT, 0, NULL)
#define fsparam_path(NAME, OPT) __fsparam(fs_param_is_path, NAME, OPT, 0, NULL)
#define fsparam_fd(NAME, OPT) __fsparam(fs_param_is_fd, NAME, OPT, 0, NULL)
+#define fsparam_file_or_string(NAME, OPT) \
+ __fsparam(fs_param_is_file_or_string, NAME, OPT, 0, NULL)
+#define fsparam_uid(NAME, OPT) __fsparam(fs_param_is_uid, NAME, OPT, 0, NULL)
+#define fsparam_gid(NAME, OPT) __fsparam(fs_param_is_gid, NAME, OPT, 0, NULL)
+
+/* String parameter that allows empty argument */
+#define fsparam_string_empty(NAME, OPT) \
+ __fsparam(fs_param_is_string, NAME, OPT, fs_param_can_be_empty, NULL)
#endif /* _LINUX_FS_PARSER_H */
diff --git a/include/linux/fs_stack.h b/include/linux/fs_stack.h
index 54210a42c30d..0cc2fa283305 100644
--- a/include/linux/fs_stack.h
+++ b/include/linux/fs_stack.h
@@ -3,7 +3,7 @@
#define _LINUX_FS_STACK_H
/* This file defines generic functions used primarily by stackable
- * filesystems; none of these functions require i_mutex to be held.
+ * filesystems; none of these functions require i_rwsem to be held.
*/
#include <linux/fs.h>
@@ -16,15 +16,15 @@ extern void fsstack_copy_inode_size(struct inode *dst, struct inode *src);
static inline void fsstack_copy_attr_atime(struct inode *dest,
const struct inode *src)
{
- dest->i_atime = src->i_atime;
+ inode_set_atime_to_ts(dest, inode_get_atime(src));
}
static inline void fsstack_copy_attr_times(struct inode *dest,
const struct inode *src)
{
- dest->i_atime = src->i_atime;
- dest->i_mtime = src->i_mtime;
- dest->i_ctime = src->i_ctime;
+ inode_set_atime_to_ts(dest, inode_get_atime(src));
+ inode_set_mtime_to_ts(dest, inode_get_mtime(src));
+ inode_set_ctime_to_ts(dest, inode_get_ctime(src));
}
#endif /* _LINUX_FS_STACK_H */
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
index 783b48dedb72..0070764b790a 100644
--- a/include/linux/fs_struct.h
+++ b/include/linux/fs_struct.h
@@ -2,14 +2,14 @@
#ifndef _LINUX_FS_STRUCT_H
#define _LINUX_FS_STRUCT_H
+#include <linux/sched.h>
#include <linux/path.h>
#include <linux/spinlock.h>
#include <linux/seqlock.h>
struct fs_struct {
int users;
- spinlock_t lock;
- seqcount_spinlock_t seq;
+ seqlock_t seq;
int umask;
int in_exec;
struct path root, pwd;
@@ -26,20 +26,25 @@ extern int unshare_fs_struct(void);
static inline void get_fs_root(struct fs_struct *fs, struct path *root)
{
- spin_lock(&fs->lock);
+ read_seqlock_excl(&fs->seq);
*root = fs->root;
path_get(root);
- spin_unlock(&fs->lock);
+ read_sequnlock_excl(&fs->seq);
}
static inline void get_fs_pwd(struct fs_struct *fs, struct path *pwd)
{
- spin_lock(&fs->lock);
+ read_seqlock_excl(&fs->seq);
*pwd = fs->pwd;
path_get(pwd);
- spin_unlock(&fs->lock);
+ read_sequnlock_excl(&fs->seq);
}
extern bool current_chrooted(void);
+static inline int current_umask(void)
+{
+ return current->fs->umask;
+}
+
#endif /* _LINUX_FS_STRUCT_H */
diff --git a/include/linux/fs_uart_pd.h b/include/linux/fs_uart_pd.h
deleted file mode 100644
index 36b61ff39277..000000000000
--- a/include/linux/fs_uart_pd.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Platform information definitions for the CPM Uart driver.
- *
- * 2006 (c) MontaVista Software, Inc.
- * Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef FS_UART_PD_H
-#define FS_UART_PD_H
-
-#include <asm/types.h>
-
-enum fs_uart_id {
- fsid_smc1_uart,
- fsid_smc2_uart,
- fsid_scc1_uart,
- fsid_scc2_uart,
- fsid_scc3_uart,
- fsid_scc4_uart,
- fs_uart_nr,
-};
-
-static inline int fs_uart_id_scc2fsid(int id)
-{
- return fsid_scc1_uart + id - 1;
-}
-
-static inline int fs_uart_id_fsid2scc(int id)
-{
- return id - fsid_scc1_uart + 1;
-}
-
-static inline int fs_uart_id_smc2fsid(int id)
-{
- return fsid_smc1_uart + id - 1;
-}
-
-static inline int fs_uart_id_fsid2smc(int id)
-{
- return id - fsid_smc1_uart + 1;
-}
-
-struct fs_uart_platform_info {
- void(*init_ioports)(struct fs_uart_platform_info *);
- /* device specific information */
- int fs_no; /* controller index */
- char fs_type[4]; /* controller type */
- u32 uart_clk;
- u8 tx_num_fifo;
- u8 tx_buf_size;
- u8 rx_num_fifo;
- u8 rx_buf_size;
- u8 brg;
- u8 clk_rx;
- u8 clk_tx;
-};
-
-static inline int fs_uart_get_id(struct fs_uart_platform_info *fpi)
-{
- if(strstr(fpi->fs_type, "SMC"))
- return fs_uart_id_smc2fsid(fpi->fs_no);
- if(strstr(fpi->fs_type, "SCC"))
- return fs_uart_id_scc2fsid(fpi->fs_no);
- return fpi->fs_no;
-}
-
-#endif
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 3235ddbdcc09..4c91a019972b 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* General filesystem caching backing cache interface
*
- * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* NOTE!!! See:
@@ -15,208 +15,35 @@
#define _LINUX_FSCACHE_CACHE_H
#include <linux/fscache.h>
-#include <linux/sched.h>
-#include <linux/workqueue.h>
-#define NR_MAXCACHES BITS_PER_LONG
-
-struct fscache_cache;
-struct fscache_cache_ops;
-struct fscache_object;
-struct fscache_operation;
-
-enum fscache_obj_ref_trace {
- fscache_obj_get_add_to_deps,
- fscache_obj_get_queue,
- fscache_obj_put_alloc_fail,
- fscache_obj_put_attach_fail,
- fscache_obj_put_drop_obj,
- fscache_obj_put_enq_dep,
- fscache_obj_put_queue,
- fscache_obj_put_work,
- fscache_obj_ref__nr_traces
+enum fscache_cache_trace;
+enum fscache_cookie_trace;
+enum fscache_access_trace;
+enum fscache_volume_trace;
+
+enum fscache_cache_state {
+ FSCACHE_CACHE_IS_NOT_PRESENT, /* No cache is present for this name */
+ FSCACHE_CACHE_IS_PREPARING, /* A cache is preparing to come live */
+ FSCACHE_CACHE_IS_ACTIVE, /* Attached cache is active and can be used */
+ FSCACHE_CACHE_GOT_IOERROR, /* Attached cache stopped on I/O error */
+ FSCACHE_CACHE_IS_WITHDRAWN, /* Attached cache is being withdrawn */
+#define NR__FSCACHE_CACHE_STATE (FSCACHE_CACHE_IS_WITHDRAWN + 1)
};
/*
- * cache tag definition
- */
-struct fscache_cache_tag {
- struct list_head link;
- struct fscache_cache *cache; /* cache referred to by this tag */
- unsigned long flags;
-#define FSCACHE_TAG_RESERVED 0 /* T if tag is reserved for a cache */
- atomic_t usage;
- char name[]; /* tag name */
-};
-
-/*
- * cache definition
+ * Cache cookie.
*/
struct fscache_cache {
const struct fscache_cache_ops *ops;
- struct fscache_cache_tag *tag; /* tag representing this cache */
- struct kobject *kobj; /* system representation of this cache */
- struct list_head link; /* link in list of caches */
- size_t max_index_size; /* maximum size of index data */
- char identifier[36]; /* cache label */
-
- /* node management */
- struct work_struct op_gc; /* operation garbage collector */
- struct list_head object_list; /* list of data/index objects */
- struct list_head op_gc_list; /* list of ops to be deleted */
- spinlock_t object_list_lock;
- spinlock_t op_gc_list_lock;
+ struct list_head cache_link; /* Link in cache list */
+ void *cache_priv; /* Private cache data (or NULL) */
+ refcount_t ref;
+ atomic_t n_volumes; /* Number of active volumes; */
+ atomic_t n_accesses; /* Number of in-progress accesses on the cache */
atomic_t object_count; /* no. of live objects in this cache */
- struct fscache_object *fsdef; /* object for the fsdef index */
- unsigned long flags;
-#define FSCACHE_IOERROR 0 /* cache stopped on I/O error */
-#define FSCACHE_CACHE_WITHDRAWN 1 /* cache has been withdrawn */
-};
-
-extern wait_queue_head_t fscache_cache_cleared_wq;
-
-/*
- * operation to be applied to a cache object
- * - retrieval initiation operations are done in the context of the process
- * that issued them, and not in an async thread pool
- */
-typedef void (*fscache_operation_release_t)(struct fscache_operation *op);
-typedef void (*fscache_operation_processor_t)(struct fscache_operation *op);
-typedef void (*fscache_operation_cancel_t)(struct fscache_operation *op);
-
-enum fscache_operation_state {
- FSCACHE_OP_ST_BLANK, /* Op is not yet submitted */
- FSCACHE_OP_ST_INITIALISED, /* Op is initialised */
- FSCACHE_OP_ST_PENDING, /* Op is blocked from running */
- FSCACHE_OP_ST_IN_PROGRESS, /* Op is in progress */
- FSCACHE_OP_ST_COMPLETE, /* Op is complete */
- FSCACHE_OP_ST_CANCELLED, /* Op has been cancelled */
- FSCACHE_OP_ST_DEAD /* Op is now dead */
-};
-
-struct fscache_operation {
- struct work_struct work; /* record for async ops */
- struct list_head pend_link; /* link in object->pending_ops */
- struct fscache_object *object; /* object to be operated upon */
-
- unsigned long flags;
-#define FSCACHE_OP_TYPE 0x000f /* operation type */
-#define FSCACHE_OP_ASYNC 0x0001 /* - async op, processor may sleep for disk */
-#define FSCACHE_OP_MYTHREAD 0x0002 /* - processing is done be issuing thread, not pool */
-#define FSCACHE_OP_WAITING 4 /* cleared when op is woken */
-#define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */
-#define FSCACHE_OP_DEC_READ_CNT 6 /* decrement object->n_reads on destruction */
-#define FSCACHE_OP_UNUSE_COOKIE 7 /* call fscache_unuse_cookie() on completion */
-#define FSCACHE_OP_KEEP_FLAGS 0x00f0 /* flags to keep when repurposing an op */
-
- enum fscache_operation_state state;
- atomic_t usage;
- unsigned debug_id; /* debugging ID */
-
- /* operation processor callback
- * - can be NULL if FSCACHE_OP_WAITING is going to be used to perform
- * the op in a non-pool thread */
- fscache_operation_processor_t processor;
-
- /* Operation cancellation cleanup (optional) */
- fscache_operation_cancel_t cancel;
-
- /* operation releaser */
- fscache_operation_release_t release;
-};
-
-extern atomic_t fscache_op_debug_id;
-extern void fscache_op_work_func(struct work_struct *work);
-
-extern void fscache_enqueue_operation(struct fscache_operation *);
-extern void fscache_op_complete(struct fscache_operation *, bool);
-extern void fscache_put_operation(struct fscache_operation *);
-extern void fscache_operation_init(struct fscache_cookie *,
- struct fscache_operation *,
- fscache_operation_processor_t,
- fscache_operation_cancel_t,
- fscache_operation_release_t);
-
-/*
- * data read operation
- */
-struct fscache_retrieval {
- struct fscache_operation op;
- struct fscache_cookie *cookie; /* The netfs cookie */
- struct address_space *mapping; /* netfs pages */
- fscache_rw_complete_t end_io_func; /* function to call on I/O completion */
- void *context; /* netfs read context (pinned) */
- struct list_head to_do; /* list of things to be done by the backend */
- unsigned long start_time; /* time at which retrieval started */
- atomic_t n_pages; /* number of pages to be retrieved */
-};
-
-typedef int (*fscache_page_retrieval_func_t)(struct fscache_retrieval *op,
- struct page *page,
- gfp_t gfp);
-
-typedef int (*fscache_pages_retrieval_func_t)(struct fscache_retrieval *op,
- struct list_head *pages,
- unsigned *nr_pages,
- gfp_t gfp);
-
-/**
- * fscache_get_retrieval - Get an extra reference on a retrieval operation
- * @op: The retrieval operation to get a reference on
- *
- * Get an extra reference on a retrieval operation.
- */
-static inline
-struct fscache_retrieval *fscache_get_retrieval(struct fscache_retrieval *op)
-{
- atomic_inc(&op->op.usage);
- return op;
-}
-
-/**
- * fscache_enqueue_retrieval - Enqueue a retrieval operation for processing
- * @op: The retrieval operation affected
- *
- * Enqueue a retrieval operation for processing by the FS-Cache thread pool.
- */
-static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op)
-{
- fscache_enqueue_operation(&op->op);
-}
-
-/**
- * fscache_retrieval_complete - Record (partial) completion of a retrieval
- * @op: The retrieval operation affected
- * @n_pages: The number of pages to account for
- */
-static inline void fscache_retrieval_complete(struct fscache_retrieval *op,
- int n_pages)
-{
- if (atomic_sub_return_relaxed(n_pages, &op->n_pages) <= 0)
- fscache_op_complete(&op->op, false);
-}
-
-/**
- * fscache_put_retrieval - Drop a reference to a retrieval operation
- * @op: The retrieval operation affected
- *
- * Drop a reference to a retrieval operation.
- */
-static inline void fscache_put_retrieval(struct fscache_retrieval *op)
-{
- fscache_put_operation(&op->op);
-}
-
-/*
- * cached page storage work item
- * - used to do three things:
- * - batch writes to the cache
- * - do cache writes asynchronously
- * - defer writes until cache object lookup completion
- */
-struct fscache_storage {
- struct fscache_operation op;
- pgoff_t store_limit; /* don't write more than this */
+ unsigned int debug_id;
+ enum fscache_cache_state state;
+ char *name;
};
/*
@@ -226,344 +53,162 @@ struct fscache_cache_ops {
/* name of cache provider */
const char *name;
- /* allocate an object record for a cookie */
- struct fscache_object *(*alloc_object)(struct fscache_cache *cache,
- struct fscache_cookie *cookie);
-
- /* look up the object for a cookie
- * - return -ETIMEDOUT to be requeued
- */
- int (*lookup_object)(struct fscache_object *object);
-
- /* finished looking up */
- void (*lookup_complete)(struct fscache_object *object);
-
- /* increment the usage count on this object (may fail if unmounting) */
- struct fscache_object *(*grab_object)(struct fscache_object *object,
- enum fscache_obj_ref_trace why);
+ /* Acquire a volume */
+ void (*acquire_volume)(struct fscache_volume *volume);
- /* pin an object in the cache */
- int (*pin_object)(struct fscache_object *object);
+ /* Free the cache's data attached to a volume */
+ void (*free_volume)(struct fscache_volume *volume);
- /* unpin an object in the cache */
- void (*unpin_object)(struct fscache_object *object);
+ /* Look up a cookie in the cache */
+ bool (*lookup_cookie)(struct fscache_cookie *cookie);
- /* check the consistency between the backing cache and the FS-Cache
- * cookie */
- int (*check_consistency)(struct fscache_operation *op);
+ /* Withdraw an object without any cookie access counts held */
+ void (*withdraw_cookie)(struct fscache_cookie *cookie);
- /* store the updated auxiliary data on an object */
- void (*update_object)(struct fscache_object *object);
+ /* Change the size of a data object */
+ void (*resize_cookie)(struct netfs_cache_resources *cres,
+ loff_t new_size);
/* Invalidate an object */
- void (*invalidate_object)(struct fscache_operation *op);
-
- /* discard the resources pinned by an object and effect retirement if
- * necessary */
- void (*drop_object)(struct fscache_object *object);
-
- /* dispose of a reference to an object */
- void (*put_object)(struct fscache_object *object,
- enum fscache_obj_ref_trace why);
-
- /* sync a cache */
- void (*sync_cache)(struct fscache_cache *cache);
-
- /* notification that the attributes of a non-index object (such as
- * i_size) have changed */
- int (*attr_changed)(struct fscache_object *object);
-
- /* reserve space for an object's data and associated metadata */
- int (*reserve_space)(struct fscache_object *object, loff_t i_size);
-
- /* request a backing block for a page be read or allocated in the
- * cache */
- fscache_page_retrieval_func_t read_or_alloc_page;
-
- /* request backing blocks for a list of pages be read or allocated in
- * the cache */
- fscache_pages_retrieval_func_t read_or_alloc_pages;
-
- /* request a backing block for a page be allocated in the cache so that
- * it can be written directly */
- fscache_page_retrieval_func_t allocate_page;
-
- /* request backing blocks for pages be allocated in the cache so that
- * they can be written directly */
- fscache_pages_retrieval_func_t allocate_pages;
-
- /* write a page to its backing block in the cache */
- int (*write_page)(struct fscache_storage *op, struct page *page);
-
- /* detach backing block from a page (optional)
- * - must release the cookie lock before returning
- * - may sleep
- */
- void (*uncache_page)(struct fscache_object *object,
- struct page *page);
-
- /* dissociate a cache from all the pages it was backing */
- void (*dissociate_pages)(struct fscache_cache *cache);
-
- /* Begin a read operation for the netfs lib */
- int (*begin_read_operation)(struct netfs_read_request *rreq,
- struct fscache_retrieval *op);
-};
+ bool (*invalidate_cookie)(struct fscache_cookie *cookie);
-extern struct fscache_cookie fscache_fsdef_index;
+ /* Begin an operation for the netfs lib */
+ bool (*begin_operation)(struct netfs_cache_resources *cres,
+ enum fscache_want_state want_state);
-/*
- * Event list for fscache_object::{event_mask,events}
- */
-enum {
- FSCACHE_OBJECT_EV_NEW_CHILD, /* T if object has a new child */
- FSCACHE_OBJECT_EV_PARENT_READY, /* T if object's parent is ready */
- FSCACHE_OBJECT_EV_UPDATE, /* T if object should be updated */
- FSCACHE_OBJECT_EV_INVALIDATE, /* T if cache requested object invalidation */
- FSCACHE_OBJECT_EV_CLEARED, /* T if accessors all gone */
- FSCACHE_OBJECT_EV_ERROR, /* T if fatal error occurred during processing */
- FSCACHE_OBJECT_EV_KILL, /* T if netfs relinquished or cache withdrew object */
- NR_FSCACHE_OBJECT_EVENTS
+ /* Prepare to write to a live cache object */
+ void (*prepare_to_write)(struct fscache_cookie *cookie);
};
-#define FSCACHE_OBJECT_EVENTS_MASK ((1UL << NR_FSCACHE_OBJECT_EVENTS) - 1)
-
-/*
- * States for object state machine.
- */
-struct fscache_transition {
- unsigned long events;
- const struct fscache_state *transit_to;
-};
-
-struct fscache_state {
- char name[24];
- char short_name[8];
- const struct fscache_state *(*work)(struct fscache_object *object,
- int event);
- const struct fscache_transition transitions[];
-};
+extern struct workqueue_struct *fscache_wq;
+extern wait_queue_head_t fscache_clearance_waiters;
/*
- * on-disk cache file or index handle
+ * out-of-line cache backend functions
*/
-struct fscache_object {
- const struct fscache_state *state; /* Object state machine state */
- const struct fscache_transition *oob_table; /* OOB state transition table */
- int debug_id; /* debugging ID */
- int n_children; /* number of child objects */
- int n_ops; /* number of extant ops on object */
- int n_obj_ops; /* number of object ops outstanding on object */
- int n_in_progress; /* number of ops in progress */
- int n_exclusive; /* number of exclusive ops queued or in progress */
- atomic_t n_reads; /* number of read ops in progress */
- spinlock_t lock; /* state and operations lock */
-
- unsigned long lookup_jif; /* time at which lookup started */
- unsigned long oob_event_mask; /* OOB events this object is interested in */
- unsigned long event_mask; /* events this object is interested in */
- unsigned long events; /* events to be processed by this object
- * (order is important - using fls) */
-
- unsigned long flags;
-#define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */
-#define FSCACHE_OBJECT_PENDING_WRITE 1 /* T if object has pending write */
-#define FSCACHE_OBJECT_WAITING 2 /* T if object is waiting on its parent */
-#define FSCACHE_OBJECT_IS_LIVE 3 /* T if object is not withdrawn or relinquished */
-#define FSCACHE_OBJECT_IS_LOOKED_UP 4 /* T if object has been looked up */
-#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */
-#define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */
-#define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */
-#define FSCACHE_OBJECT_RUN_AFTER_DEAD 8 /* T if object has been dispatched after death */
-
- struct list_head cache_link; /* link in cache->object_list */
- struct hlist_node cookie_link; /* link in cookie->backing_objects */
- struct fscache_cache *cache; /* cache that supplied this object */
- struct fscache_cookie *cookie; /* netfs's file/index object */
- struct fscache_object *parent; /* parent object */
- struct work_struct work; /* attention scheduling record */
- struct list_head dependents; /* FIFO of dependent objects */
- struct list_head dep_link; /* link in parent's dependents list */
- struct list_head pending_ops; /* unstarted operations on this object */
-#ifdef CONFIG_FSCACHE_OBJECT_LIST
- struct rb_node objlist_link; /* link in global object list */
-#endif
- pgoff_t store_limit; /* current storage limit */
- loff_t store_limit_l; /* current storage limit */
-};
-
-extern void fscache_object_init(struct fscache_object *, struct fscache_cookie *,
- struct fscache_cache *);
-extern void fscache_object_destroy(struct fscache_object *);
-
-extern void fscache_object_lookup_negative(struct fscache_object *object);
-extern void fscache_obtained_object(struct fscache_object *object);
-
-static inline bool fscache_object_is_live(struct fscache_object *object)
-{
- return test_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
-}
-
-static inline bool fscache_object_is_dying(struct fscache_object *object)
-{
- return !fscache_object_is_live(object);
-}
-
-static inline bool fscache_object_is_available(struct fscache_object *object)
-{
- return test_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
-}
+extern struct rw_semaphore fscache_addremove_sem;
+extern struct fscache_cache *fscache_acquire_cache(const char *name);
+extern void fscache_relinquish_cache(struct fscache_cache *cache);
+extern int fscache_add_cache(struct fscache_cache *cache,
+ const struct fscache_cache_ops *ops,
+ void *cache_priv);
+extern void fscache_withdraw_cache(struct fscache_cache *cache);
+extern void fscache_withdraw_volume(struct fscache_volume *volume);
+extern void fscache_withdraw_cookie(struct fscache_cookie *cookie);
-static inline bool fscache_cache_is_broken(struct fscache_object *object)
-{
- return test_bit(FSCACHE_IOERROR, &object->cache->flags);
-}
+extern void fscache_io_error(struct fscache_cache *cache);
-static inline bool fscache_object_is_active(struct fscache_object *object)
-{
- return fscache_object_is_available(object) &&
- fscache_object_is_live(object) &&
- !fscache_cache_is_broken(object);
-}
+extern struct fscache_volume *
+fscache_try_get_volume(struct fscache_volume *volume,
+ enum fscache_volume_trace where);
+extern void fscache_put_volume(struct fscache_volume *volume,
+ enum fscache_volume_trace where);
+extern void fscache_end_volume_access(struct fscache_volume *volume,
+ struct fscache_cookie *cookie,
+ enum fscache_access_trace why);
+
+extern struct fscache_cookie *fscache_get_cookie(struct fscache_cookie *cookie,
+ enum fscache_cookie_trace where);
+extern void fscache_put_cookie(struct fscache_cookie *cookie,
+ enum fscache_cookie_trace where);
+extern void fscache_end_cookie_access(struct fscache_cookie *cookie,
+ enum fscache_access_trace why);
+extern void fscache_cookie_lookup_negative(struct fscache_cookie *cookie);
+extern void fscache_resume_after_invalidation(struct fscache_cookie *cookie);
+extern void fscache_caching_failed(struct fscache_cookie *cookie);
+extern bool fscache_wait_for_operation(struct netfs_cache_resources *cred,
+ enum fscache_want_state state);
/**
- * fscache_object_destroyed - Note destruction of an object in a cache
- * @cache: The cache from which the object came
+ * fscache_cookie_state - Read the state of a cookie
+ * @cookie: The cookie to query
*
- * Note the destruction and deallocation of an object record in a cache.
+ * Get the state of a cookie, imposing an ordering between the cookie contents
+ * and the state value. Paired with fscache_set_cookie_state().
*/
-static inline void fscache_object_destroyed(struct fscache_cache *cache)
+static inline
+enum fscache_cookie_state fscache_cookie_state(struct fscache_cookie *cookie)
{
- if (atomic_dec_and_test(&cache->object_count))
- wake_up_all(&fscache_cache_cleared_wq);
+ return smp_load_acquire(&cookie->state);
}
/**
- * fscache_object_lookup_error - Note an object encountered an error
- * @object: The object on which the error was encountered
+ * fscache_get_key - Get a pointer to the cookie key
+ * @cookie: The cookie to query
*
- * Note that an object encountered a fatal error (usually an I/O error) and
- * that it should be withdrawn as soon as possible.
+ * Return a pointer to the where a cookie's key is stored.
*/
-static inline void fscache_object_lookup_error(struct fscache_object *object)
+static inline void *fscache_get_key(struct fscache_cookie *cookie)
{
- set_bit(FSCACHE_OBJECT_EV_ERROR, &object->events);
+ if (cookie->key_len <= sizeof(cookie->inline_key))
+ return cookie->inline_key;
+ else
+ return cookie->key;
}
-/**
- * fscache_set_store_limit - Set the maximum size to be stored in an object
- * @object: The object to set the maximum on
- * @i_size: The limit to set in bytes
- *
- * Set the maximum size an object is permitted to reach, implying the highest
- * byte that may be written. Intended to be called by the attr_changed() op.
- *
- * See Documentation/filesystems/caching/backend-api.rst for a complete
- * description.
- */
-static inline
-void fscache_set_store_limit(struct fscache_object *object, loff_t i_size)
+static inline struct fscache_cookie *fscache_cres_cookie(struct netfs_cache_resources *cres)
{
- object->store_limit_l = i_size;
- object->store_limit = i_size >> PAGE_SHIFT;
- if (i_size & ~PAGE_MASK)
- object->store_limit++;
+ return cres->cache_priv;
}
/**
- * fscache_end_io - End a retrieval operation on a page
- * @op: The FS-Cache operation covering the retrieval
- * @page: The page that was to be fetched
- * @error: The error code (0 if successful)
+ * fscache_count_object - Tell fscache that an object has been added
+ * @cache: The cache to account to
*
- * Note the end of an operation to retrieve a page, as covered by a particular
- * operation record.
+ * Tell fscache that an object has been added to the cache. This prevents the
+ * cache from tearing down the cache structure until the object is uncounted.
*/
-static inline void fscache_end_io(struct fscache_retrieval *op,
- struct page *page, int error)
+static inline void fscache_count_object(struct fscache_cache *cache)
{
- op->end_io_func(page, op->context, error);
-}
-
-static inline void __fscache_use_cookie(struct fscache_cookie *cookie)
-{
- atomic_inc(&cookie->n_active);
+ atomic_inc(&cache->object_count);
}
/**
- * fscache_use_cookie - Request usage of cookie attached to an object
- * @object: Object description
- *
- * Request usage of the cookie attached to an object. NULL is returned if the
- * relinquishment had reduced the cookie usage count to 0.
+ * fscache_uncount_object - Tell fscache that an object has been removed
+ * @cache: The cache to account to
+ *
+ * Tell fscache that an object has been removed from the cache and will no
+ * longer be accessed. After this point, the cache cookie may be destroyed.
*/
-static inline bool fscache_use_cookie(struct fscache_object *object)
-{
- struct fscache_cookie *cookie = object->cookie;
- return atomic_inc_not_zero(&cookie->n_active) != 0;
-}
-
-static inline bool __fscache_unuse_cookie(struct fscache_cookie *cookie)
-{
- return atomic_dec_and_test(&cookie->n_active);
-}
-
-static inline void __fscache_wake_unused_cookie(struct fscache_cookie *cookie)
+static inline void fscache_uncount_object(struct fscache_cache *cache)
{
- wake_up_var(&cookie->n_active);
+ if (atomic_dec_and_test(&cache->object_count))
+ wake_up_all(&fscache_clearance_waiters);
}
/**
- * fscache_unuse_cookie - Cease usage of cookie attached to an object
- * @object: Object description
- *
- * Cease usage of the cookie attached to an object. When the users count
- * reaches zero then the cookie relinquishment will be permitted to proceed.
- */
-static inline void fscache_unuse_cookie(struct fscache_object *object)
-{
- struct fscache_cookie *cookie = object->cookie;
- if (__fscache_unuse_cookie(cookie))
- __fscache_wake_unused_cookie(cookie);
-}
-
-/*
- * out-of-line cache backend functions
- */
-extern __printf(3, 4)
-void fscache_init_cache(struct fscache_cache *cache,
- const struct fscache_cache_ops *ops,
- const char *idfmt, ...);
-
-extern int fscache_add_cache(struct fscache_cache *cache,
- struct fscache_object *fsdef,
- const char *tagname);
-extern void fscache_withdraw_cache(struct fscache_cache *cache);
-
-extern void fscache_io_error(struct fscache_cache *cache);
-
-extern void fscache_mark_page_cached(struct fscache_retrieval *op,
- struct page *page);
-
-extern void fscache_mark_pages_cached(struct fscache_retrieval *op,
- struct pagevec *pagevec);
-
-extern bool fscache_object_sleep_till_congested(signed long *timeoutp);
-
-extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
- const void *data,
- uint16_t datalen,
- loff_t object_size);
-
-extern void fscache_object_retrying_stale(struct fscache_object *object);
-
-enum fscache_why_object_killed {
- FSCACHE_OBJECT_IS_STALE,
- FSCACHE_OBJECT_NO_SPACE,
- FSCACHE_OBJECT_WAS_RETIRED,
- FSCACHE_OBJECT_WAS_CULLED,
-};
-extern void fscache_object_mark_killed(struct fscache_object *object,
- enum fscache_why_object_killed why);
+ * fscache_wait_for_objects - Wait for all objects to be withdrawn
+ * @cache: The cache to query
+ *
+ * Wait for all extant objects in a cache to finish being withdrawn
+ * and go away.
+ */
+static inline void fscache_wait_for_objects(struct fscache_cache *cache)
+{
+ wait_event(fscache_clearance_waiters,
+ atomic_read(&cache->object_count) == 0);
+}
+
+#ifdef CONFIG_FSCACHE_STATS
+extern atomic_t fscache_n_read;
+extern atomic_t fscache_n_write;
+extern atomic_t fscache_n_no_write_space;
+extern atomic_t fscache_n_no_create_space;
+extern atomic_t fscache_n_culled;
+extern atomic_t fscache_n_dio_misfit;
+#define fscache_count_read() atomic_inc(&fscache_n_read)
+#define fscache_count_write() atomic_inc(&fscache_n_write)
+#define fscache_count_no_write_space() atomic_inc(&fscache_n_no_write_space)
+#define fscache_count_no_create_space() atomic_inc(&fscache_n_no_create_space)
+#define fscache_count_culled() atomic_inc(&fscache_n_culled)
+#define fscache_count_dio_misfit() atomic_inc(&fscache_n_dio_misfit)
+#else
+#define fscache_count_read() do {} while(0)
+#define fscache_count_write() do {} while(0)
+#define fscache_count_no_write_space() do {} while(0)
+#define fscache_count_no_create_space() do {} while(0)
+#define fscache_count_culled() do {} while(0)
+#define fscache_count_dio_misfit() do {} while(0)
+#endif
#endif /* _LINUX_FSCACHE_CACHE_H */
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index abc1c4737fb8..58fdb9605425 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* General filesystem caching interface
*
- * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* NOTE!!! See:
@@ -15,144 +15,128 @@
#define _LINUX_FSCACHE_H
#include <linux/fs.h>
-#include <linux/list.h>
-#include <linux/pagemap.h>
-#include <linux/pagevec.h>
-#include <linux/list_bl.h>
#include <linux/netfs.h>
+#include <linux/writeback.h>
#if defined(CONFIG_FSCACHE) || defined(CONFIG_FSCACHE_MODULE)
+#define __fscache_available (1)
#define fscache_available() (1)
+#define fscache_volume_valid(volume) (volume)
#define fscache_cookie_valid(cookie) (cookie)
+#define fscache_resources_valid(cres) ((cres)->cache_priv)
+#define fscache_cookie_enabled(cookie) (cookie && !test_bit(FSCACHE_COOKIE_DISABLED, &cookie->flags))
#else
+#define __fscache_available (0)
#define fscache_available() (0)
+#define fscache_volume_valid(volume) (0)
#define fscache_cookie_valid(cookie) (0)
+#define fscache_resources_valid(cres) (false)
+#define fscache_cookie_enabled(cookie) (0)
#endif
-
-/* pattern used to fill dead space in an index entry */
-#define FSCACHE_INDEX_DEADFILL_PATTERN 0x79
-
-struct pagevec;
-struct fscache_cache_tag;
struct fscache_cookie;
-struct fscache_netfs;
-struct netfs_read_request;
-
-typedef void (*fscache_rw_complete_t)(struct page *page,
- void *context,
- int error);
-
-/* result of index entry consultation */
-enum fscache_checkaux {
- FSCACHE_CHECKAUX_OKAY, /* entry okay as is */
- FSCACHE_CHECKAUX_NEEDS_UPDATE, /* entry requires update */
- FSCACHE_CHECKAUX_OBSOLETE, /* entry requires deletion */
-};
-/*
- * fscache cookie definition
- */
-struct fscache_cookie_def {
- /* name of cookie type */
- char name[16];
-
- /* cookie type */
- uint8_t type;
-#define FSCACHE_COOKIE_TYPE_INDEX 0
-#define FSCACHE_COOKIE_TYPE_DATAFILE 1
-
- /* select the cache into which to insert an entry in this index
- * - optional
- * - should return a cache identifier or NULL to cause the cache to be
- * inherited from the parent if possible or the first cache picked
- * for a non-index file if not
- */
- struct fscache_cache_tag *(*select_cache)(
- const void *parent_netfs_data,
- const void *cookie_netfs_data);
-
- /* consult the netfs about the state of an object
- * - this function can be absent if the index carries no state data
- * - the netfs data from the cookie being used as the target is
- * presented, as is the auxiliary data and the object size
- */
- enum fscache_checkaux (*check_aux)(void *cookie_netfs_data,
- const void *data,
- uint16_t datalen,
- loff_t object_size);
-
- /* get an extra reference on a read context
- * - this function can be absent if the completion function doesn't
- * require a context
- */
- void (*get_context)(void *cookie_netfs_data, void *context);
+#define FSCACHE_ADV_SINGLE_CHUNK 0x01 /* The object is a single chunk of data */
+#define FSCACHE_ADV_WRITE_CACHE 0x00 /* Do cache if written to locally */
+#define FSCACHE_ADV_WRITE_NOCACHE 0x02 /* Don't cache if written to locally */
+#define FSCACHE_ADV_WANT_CACHE_SIZE 0x04 /* Retrieve cache size at runtime */
- /* release an extra reference on a read context
- * - this function can be absent if the completion function doesn't
- * require a context
- */
- void (*put_context)(void *cookie_netfs_data, void *context);
+#define FSCACHE_INVAL_DIO_WRITE 0x01 /* Invalidate due to DIO write */
- /* indicate page that now have cache metadata retained
- * - this function should mark the specified page as now being cached
- * - the page will have been marked with PG_fscache before this is
- * called, so this is optional
- */
- void (*mark_page_cached)(void *cookie_netfs_data,
- struct address_space *mapping,
- struct page *page);
+enum fscache_want_state {
+ FSCACHE_WANT_PARAMS,
+ FSCACHE_WANT_WRITE,
+ FSCACHE_WANT_READ,
};
/*
- * fscache cached network filesystem type
- * - name, version and ops must be filled in before registration
- * - all other fields will be set during registration
- */
-struct fscache_netfs {
- uint32_t version; /* indexing version */
- const char *name; /* filesystem name */
- struct fscache_cookie *primary_index;
+ * Data object state.
+ */
+enum fscache_cookie_state {
+ FSCACHE_COOKIE_STATE_QUIESCENT, /* The cookie is uncached */
+ FSCACHE_COOKIE_STATE_LOOKING_UP, /* The cache object is being looked up */
+ FSCACHE_COOKIE_STATE_CREATING, /* The cache object is being created */
+ FSCACHE_COOKIE_STATE_ACTIVE, /* The cache is active, readable and writable */
+ FSCACHE_COOKIE_STATE_INVALIDATING, /* The cache is being invalidated */
+ FSCACHE_COOKIE_STATE_FAILED, /* The cache failed, withdraw to clear */
+ FSCACHE_COOKIE_STATE_LRU_DISCARDING, /* The cookie is being discarded by the LRU */
+ FSCACHE_COOKIE_STATE_WITHDRAWING, /* The cookie is being withdrawn */
+ FSCACHE_COOKIE_STATE_RELINQUISHING, /* The cookie is being relinquished */
+ FSCACHE_COOKIE_STATE_DROPPED, /* The cookie has been dropped */
+#define FSCACHE_COOKIE_STATE__NR (FSCACHE_COOKIE_STATE_DROPPED + 1)
+} __attribute__((mode(byte)));
+
+/*
+ * Volume representation cookie.
+ */
+struct fscache_volume {
+ refcount_t ref;
+ atomic_t n_cookies; /* Number of data cookies in volume */
+ atomic_t n_accesses; /* Number of cache accesses in progress */
+ unsigned int debug_id;
+ unsigned int key_hash; /* Hash of key string */
+ u8 *key; /* Volume ID, eg. "afs@example.com@1234" */
+ struct list_head proc_link; /* Link in /proc/fs/fscache/volumes */
+ struct hlist_bl_node hash_link; /* Link in hash table */
+ struct work_struct work;
+ struct fscache_cache *cache; /* The cache in which this resides */
+ void *cache_priv; /* Cache private data */
+ spinlock_t lock;
+ unsigned long flags;
+#define FSCACHE_VOLUME_RELINQUISHED 0 /* Volume is being cleaned up */
+#define FSCACHE_VOLUME_INVALIDATE 1 /* Volume was invalidated */
+#define FSCACHE_VOLUME_COLLIDED_WITH 2 /* Volume was collided with */
+#define FSCACHE_VOLUME_ACQUIRE_PENDING 3 /* Volume is waiting to complete acquisition */
+#define FSCACHE_VOLUME_CREATING 4 /* Volume is being created on disk */
+ u8 coherency_len; /* Length of the coherency data */
+ u8 coherency[]; /* Coherency data */
};
/*
- * data file or index object cookie
+ * Data file representation cookie.
* - a file will only appear in one cache
* - a request to cache a file may or may not be honoured, subject to
* constraints such as disk space
* - indices are created on disk just-in-time
*/
struct fscache_cookie {
- atomic_t usage; /* number of users of this cookie */
- atomic_t n_children; /* number of children of this cookie */
- atomic_t n_active; /* number of active users of netfs ptrs */
+ refcount_t ref;
+ atomic_t n_active; /* number of active users of cookie */
+ atomic_t n_accesses; /* Number of cache accesses in progress */
+ unsigned int debug_id;
+ unsigned int inval_counter; /* Number of invalidations made */
spinlock_t lock;
- spinlock_t stores_lock; /* lock on page store tree */
- struct hlist_head backing_objects; /* object(s) backing this file/index */
- const struct fscache_cookie_def *def; /* definition */
- struct fscache_cookie *parent; /* parent of this entry */
+ struct fscache_volume *volume; /* Parent volume of this file. */
+ void *cache_priv; /* Cache-side representation */
struct hlist_bl_node hash_link; /* Link in hash table */
- void *netfs_data; /* back pointer to netfs */
- struct radix_tree_root stores; /* pages to be stored on this cookie */
-#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */
-#define FSCACHE_COOKIE_STORING_TAG 1 /* pages tag: writing to cache */
-
+ struct list_head proc_link; /* Link in proc list */
+ struct list_head commit_link; /* Link in commit queue */
+ struct work_struct work; /* Commit/relinq/withdraw work */
+ loff_t object_size; /* Size of the netfs object */
+ unsigned long unused_at; /* Time at which unused (jiffies) */
unsigned long flags;
-#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */
-#define FSCACHE_COOKIE_NO_DATA_YET 1 /* T if new object with no cached data yet */
-#define FSCACHE_COOKIE_UNAVAILABLE 2 /* T if cookie is unavailable (error, etc) */
-#define FSCACHE_COOKIE_INVALIDATING 3 /* T if cookie is being invalidated */
-#define FSCACHE_COOKIE_RELINQUISHED 4 /* T if cookie has been relinquished */
-#define FSCACHE_COOKIE_ENABLED 5 /* T if cookie is enabled */
-#define FSCACHE_COOKIE_ENABLEMENT_LOCK 6 /* T if cookie is being en/disabled */
-#define FSCACHE_COOKIE_AUX_UPDATED 8 /* T if the auxiliary data was updated */
-#define FSCACHE_COOKIE_ACQUIRED 9 /* T if cookie is in use */
-#define FSCACHE_COOKIE_RELINQUISHING 10 /* T if cookie is being relinquished */
-
- u8 type; /* Type of object */
+#define FSCACHE_COOKIE_RELINQUISHED 0 /* T if cookie has been relinquished */
+#define FSCACHE_COOKIE_RETIRED 1 /* T if this cookie has retired on relinq */
+#define FSCACHE_COOKIE_IS_CACHING 2 /* T if this cookie is cached */
+#define FSCACHE_COOKIE_NO_DATA_TO_READ 3 /* T if this cookie has nothing to read */
+#define FSCACHE_COOKIE_NEEDS_UPDATE 4 /* T if attrs have been updated */
+#define FSCACHE_COOKIE_HAS_BEEN_CACHED 5 /* T if cookie needs withdraw-on-relinq */
+#define FSCACHE_COOKIE_DISABLED 6 /* T if cookie has been disabled */
+#define FSCACHE_COOKIE_LOCAL_WRITE 7 /* T if cookie has been modified locally */
+#define FSCACHE_COOKIE_NO_ACCESS_WAKE 8 /* T if no wake when n_accesses goes 0 */
+#define FSCACHE_COOKIE_DO_RELINQUISH 9 /* T if this cookie needs relinquishment */
+#define FSCACHE_COOKIE_DO_WITHDRAW 10 /* T if this cookie needs withdrawing */
+#define FSCACHE_COOKIE_DO_LRU_DISCARD 11 /* T if this cookie needs LRU discard */
+#define FSCACHE_COOKIE_DO_PREP_TO_WRITE 12 /* T if cookie needs write preparation */
+#define FSCACHE_COOKIE_HAVE_DATA 13 /* T if this cookie has data stored */
+#define FSCACHE_COOKIE_IS_HASHED 14 /* T if this cookie is hashed */
+#define FSCACHE_COOKIE_DO_INVALIDATE 15 /* T if cookie needs invalidation */
+
+ enum fscache_cookie_state state;
+ u8 advice; /* FSCACHE_ADV_* */
u8 key_len; /* Length of index key */
u8 aux_len; /* Length of auxiliary data */
- u32 key_hash; /* Hash of parent, type, key, len */
+ u32 key_hash; /* Hash of volume, key, len */
union {
void *key; /* Index key */
u8 inline_key[16]; /* - If the key is short enough */
@@ -163,11 +147,6 @@ struct fscache_cookie {
};
};
-static inline bool fscache_cookie_enabled(struct fscache_cookie *cookie)
-{
- return test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
-}
-
/*
* slow-path functions for when there is actually caching available, and the
* netfs does actually have a valid token
@@ -175,349 +154,295 @@ static inline bool fscache_cookie_enabled(struct fscache_cookie *cookie)
* - these are undefined symbols when FS-Cache is not configured and the
* optimiser takes care of not using them
*/
-extern int __fscache_register_netfs(struct fscache_netfs *);
-extern void __fscache_unregister_netfs(struct fscache_netfs *);
-extern struct fscache_cache_tag *__fscache_lookup_cache_tag(const char *);
-extern void __fscache_release_cache_tag(struct fscache_cache_tag *);
+extern struct fscache_volume *__fscache_acquire_volume(const char *, const char *,
+ const void *, size_t);
+extern void __fscache_relinquish_volume(struct fscache_volume *, const void *, bool);
extern struct fscache_cookie *__fscache_acquire_cookie(
- struct fscache_cookie *,
- const struct fscache_cookie_def *,
+ struct fscache_volume *,
+ u8,
const void *, size_t,
const void *, size_t,
- void *, loff_t, bool);
-extern void __fscache_relinquish_cookie(struct fscache_cookie *, const void *, bool);
-extern int __fscache_check_consistency(struct fscache_cookie *, const void *);
-extern void __fscache_update_cookie(struct fscache_cookie *, const void *);
-extern int __fscache_attr_changed(struct fscache_cookie *);
-extern void __fscache_invalidate(struct fscache_cookie *);
-extern void __fscache_wait_on_invalidate(struct fscache_cookie *);
-
-#ifdef FSCACHE_USE_NEW_IO_API
-extern int __fscache_begin_read_operation(struct netfs_read_request *, struct fscache_cookie *);
-#else
-extern int __fscache_read_or_alloc_page(struct fscache_cookie *,
- struct page *,
- fscache_rw_complete_t,
- void *,
- gfp_t);
-extern int __fscache_read_or_alloc_pages(struct fscache_cookie *,
- struct address_space *,
- struct list_head *,
- unsigned *,
- fscache_rw_complete_t,
- void *,
- gfp_t);
-extern int __fscache_alloc_page(struct fscache_cookie *, struct page *, gfp_t);
-extern int __fscache_write_page(struct fscache_cookie *, struct page *, loff_t, gfp_t);
-extern void __fscache_uncache_page(struct fscache_cookie *, struct page *);
-extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *);
-extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *);
-extern bool __fscache_maybe_release_page(struct fscache_cookie *, struct page *,
- gfp_t);
-extern void __fscache_uncache_all_inode_pages(struct fscache_cookie *,
- struct inode *);
-extern void __fscache_readpages_cancel(struct fscache_cookie *cookie,
- struct list_head *pages);
-#endif /* FSCACHE_USE_NEW_IO_API */
-
-extern void __fscache_disable_cookie(struct fscache_cookie *, const void *, bool);
-extern void __fscache_enable_cookie(struct fscache_cookie *, const void *, loff_t,
- bool (*)(void *), void *);
+ loff_t);
+extern void __fscache_use_cookie(struct fscache_cookie *, bool);
+extern void __fscache_unuse_cookie(struct fscache_cookie *, const void *, const loff_t *);
+extern void __fscache_relinquish_cookie(struct fscache_cookie *, bool);
+extern void __fscache_resize_cookie(struct fscache_cookie *, loff_t);
+extern void __fscache_invalidate(struct fscache_cookie *, const void *, loff_t, unsigned int);
+extern int __fscache_begin_read_operation(struct netfs_cache_resources *, struct fscache_cookie *);
+extern int __fscache_begin_write_operation(struct netfs_cache_resources *, struct fscache_cookie *);
+
+void __fscache_write_to_cache(struct fscache_cookie *cookie,
+ struct address_space *mapping,
+ loff_t start, size_t len, loff_t i_size,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv,
+ bool using_pgpriv2, bool cond);
+extern void __fscache_clear_page_bits(struct address_space *, loff_t, size_t);
/**
- * fscache_register_netfs - Register a filesystem as desiring caching services
- * @netfs: The description of the filesystem
+ * fscache_acquire_volume - Register a volume as desiring caching services
+ * @volume_key: An identification string for the volume
+ * @cache_name: The name of the cache to use (or NULL for the default)
+ * @coherency_data: Piece of arbitrary coherency data to check (or NULL)
+ * @coherency_len: The size of the coherency data
*
- * Register a filesystem as desiring caching services if they're available.
+ * Register a volume as desiring caching services if they're available. The
+ * caller must provide an identifier for the volume and may also indicate which
+ * cache it should be in. If a preexisting volume entry is found in the cache,
+ * the coherency data must match otherwise the entry will be invalidated.
*
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
+ * Returns a cookie pointer on success, -ENOMEM if out of memory or -EBUSY if a
+ * cache volume of that name is already acquired. Note that "NULL" is a valid
+ * cookie pointer and can be returned if caching is refused.
*/
static inline
-int fscache_register_netfs(struct fscache_netfs *netfs)
+struct fscache_volume *fscache_acquire_volume(const char *volume_key,
+ const char *cache_name,
+ const void *coherency_data,
+ size_t coherency_len)
{
- if (fscache_available())
- return __fscache_register_netfs(netfs);
- else
- return 0;
+ if (!fscache_available())
+ return NULL;
+ return __fscache_acquire_volume(volume_key, cache_name,
+ coherency_data, coherency_len);
}
/**
- * fscache_unregister_netfs - Indicate that a filesystem no longer desires
- * caching services
- * @netfs: The description of the filesystem
- *
- * Indicate that a filesystem no longer desires caching services for the
- * moment.
+ * fscache_relinquish_volume - Cease caching a volume
+ * @volume: The volume cookie
+ * @coherency_data: Piece of arbitrary coherency data to set (or NULL)
+ * @invalidate: True if the volume should be invalidated
*
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
+ * Indicate that a filesystem no longer desires caching services for a volume.
+ * The caller must have relinquished all file cookies prior to calling this.
+ * The stored coherency data is updated.
*/
static inline
-void fscache_unregister_netfs(struct fscache_netfs *netfs)
+void fscache_relinquish_volume(struct fscache_volume *volume,
+ const void *coherency_data,
+ bool invalidate)
{
- if (fscache_available())
- __fscache_unregister_netfs(netfs);
+ if (fscache_volume_valid(volume))
+ __fscache_relinquish_volume(volume, coherency_data, invalidate);
}
/**
- * fscache_lookup_cache_tag - Look up a cache tag
- * @name: The name of the tag to search for
+ * fscache_acquire_cookie - Acquire a cookie to represent a cache object
+ * @volume: The volume in which to locate/create this cookie
+ * @advice: Advice flags (FSCACHE_COOKIE_ADV_*)
+ * @index_key: The index key for this cookie
+ * @index_key_len: Size of the index key
+ * @aux_data: The auxiliary data for the cookie (may be NULL)
+ * @aux_data_len: Size of the auxiliary data buffer
+ * @object_size: The initial size of object
*
- * Acquire a specific cache referral tag that can be used to select a specific
- * cache in which to cache an index.
+ * Acquire a cookie to represent a data file within the given cache volume.
*
* See Documentation/filesystems/caching/netfs-api.rst for a complete
* description.
*/
static inline
-struct fscache_cache_tag *fscache_lookup_cache_tag(const char *name)
+struct fscache_cookie *fscache_acquire_cookie(struct fscache_volume *volume,
+ u8 advice,
+ const void *index_key,
+ size_t index_key_len,
+ const void *aux_data,
+ size_t aux_data_len,
+ loff_t object_size)
{
- if (fscache_available())
- return __fscache_lookup_cache_tag(name);
- else
+ if (!fscache_volume_valid(volume))
return NULL;
+ return __fscache_acquire_cookie(volume, advice,
+ index_key, index_key_len,
+ aux_data, aux_data_len,
+ object_size);
}
/**
- * fscache_release_cache_tag - Release a cache tag
- * @tag: The tag to release
- *
- * Release a reference to a cache referral tag previously looked up.
+ * fscache_use_cookie - Request usage of cookie attached to an object
+ * @cookie: The cookie representing the cache object
+ * @will_modify: If cache is expected to be modified locally
*
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
+ * Request usage of the cookie attached to an object. The caller should tell
+ * the cache if the object's contents are about to be modified locally and then
+ * the cache can apply the policy that has been set to handle this case.
*/
-static inline
-void fscache_release_cache_tag(struct fscache_cache_tag *tag)
+static inline void fscache_use_cookie(struct fscache_cookie *cookie,
+ bool will_modify)
{
- if (fscache_available())
- __fscache_release_cache_tag(tag);
+ if (fscache_cookie_valid(cookie))
+ __fscache_use_cookie(cookie, will_modify);
}
/**
- * fscache_acquire_cookie - Acquire a cookie to represent a cache object
- * @parent: The cookie that's to be the parent of this one
- * @def: A description of the cache object, including callback operations
- * @index_key: The index key for this cookie
- * @index_key_len: Size of the index key
- * @aux_data: The auxiliary data for the cookie (may be NULL)
- * @aux_data_len: Size of the auxiliary data buffer
- * @netfs_data: An arbitrary piece of data to be kept in the cookie to
- * represent the cache object to the netfs
- * @object_size: The initial size of object
- * @enable: Whether or not to enable a data cookie immediately
- *
- * This function is used to inform FS-Cache about part of an index hierarchy
- * that can be used to locate files. This is done by requesting a cookie for
- * each index in the path to the file.
+ * fscache_unuse_cookie - Cease usage of cookie attached to an object
+ * @cookie: The cookie representing the cache object
+ * @aux_data: Updated auxiliary data (or NULL)
+ * @object_size: Revised size of the object (or NULL)
*
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
+ * Cease usage of the cookie attached to an object. When the users count
+ * reaches zero then the cookie relinquishment will be permitted to proceed.
*/
-static inline
-struct fscache_cookie *fscache_acquire_cookie(
- struct fscache_cookie *parent,
- const struct fscache_cookie_def *def,
- const void *index_key,
- size_t index_key_len,
- const void *aux_data,
- size_t aux_data_len,
- void *netfs_data,
- loff_t object_size,
- bool enable)
+static inline void fscache_unuse_cookie(struct fscache_cookie *cookie,
+ const void *aux_data,
+ const loff_t *object_size)
{
- if (fscache_cookie_valid(parent) && fscache_cookie_enabled(parent))
- return __fscache_acquire_cookie(parent, def,
- index_key, index_key_len,
- aux_data, aux_data_len,
- netfs_data, object_size, enable);
- else
- return NULL;
+ if (fscache_cookie_valid(cookie))
+ __fscache_unuse_cookie(cookie, aux_data, object_size);
}
/**
* fscache_relinquish_cookie - Return the cookie to the cache, maybe discarding
* it
* @cookie: The cookie being returned
- * @aux_data: The updated auxiliary data for the cookie (may be NULL)
* @retire: True if the cache object the cookie represents is to be discarded
*
* This function returns a cookie to the cache, forcibly discarding the
- * associated cache object if retire is set to true. The opportunity is
- * provided to update the auxiliary data in the cache before the object is
- * disconnected.
+ * associated cache object if retire is set to true.
*
* See Documentation/filesystems/caching/netfs-api.rst for a complete
* description.
*/
static inline
-void fscache_relinquish_cookie(struct fscache_cookie *cookie,
- const void *aux_data,
- bool retire)
+void fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
{
if (fscache_cookie_valid(cookie))
- __fscache_relinquish_cookie(cookie, aux_data, retire);
+ __fscache_relinquish_cookie(cookie, retire);
}
-/**
- * fscache_check_consistency - Request validation of a cache's auxiliary data
- * @cookie: The cookie representing the cache object
- * @aux_data: The updated auxiliary data for the cookie (may be NULL)
- *
- * Request an consistency check from fscache, which passes the request to the
- * backing cache. The auxiliary data on the cookie will be updated first if
- * @aux_data is set.
- *
- * Returns 0 if consistent and -ESTALE if inconsistent. May also
- * return -ENOMEM and -ERESTARTSYS.
+/*
+ * Find the auxiliary data on a cookie.
*/
-static inline
-int fscache_check_consistency(struct fscache_cookie *cookie,
- const void *aux_data)
+static inline void *fscache_get_aux(struct fscache_cookie *cookie)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_check_consistency(cookie, aux_data);
+ if (cookie->aux_len <= sizeof(cookie->inline_aux))
+ return cookie->inline_aux;
else
- return 0;
+ return cookie->aux;
}
-/**
- * fscache_update_cookie - Request that a cache object be updated
- * @cookie: The cookie representing the cache object
- * @aux_data: The updated auxiliary data for the cookie (may be NULL)
- *
- * Request an update of the index data for the cache object associated with the
- * cookie. The auxiliary data on the cookie will be updated first if @aux_data
- * is set.
- *
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
+/*
+ * Update the auxiliary data on a cookie.
*/
static inline
-void fscache_update_cookie(struct fscache_cookie *cookie, const void *aux_data)
+void fscache_update_aux(struct fscache_cookie *cookie,
+ const void *aux_data, const loff_t *object_size)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- __fscache_update_cookie(cookie, aux_data);
+ void *p = fscache_get_aux(cookie);
+
+ if (aux_data && p)
+ memcpy(p, aux_data, cookie->aux_len);
+ if (object_size)
+ cookie->object_size = *object_size;
}
-/**
- * fscache_pin_cookie - Pin a data-storage cache object in its cache
- * @cookie: The cookie representing the cache object
- *
- * Permit data-storage cache objects to be pinned in the cache.
- *
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
- */
+#ifdef CONFIG_FSCACHE_STATS
+extern atomic_t fscache_n_updates;
+#endif
+
static inline
-int fscache_pin_cookie(struct fscache_cookie *cookie)
+void __fscache_update_cookie(struct fscache_cookie *cookie, const void *aux_data,
+ const loff_t *object_size)
{
- return -ENOBUFS;
+#ifdef CONFIG_FSCACHE_STATS
+ atomic_inc(&fscache_n_updates);
+#endif
+ fscache_update_aux(cookie, aux_data, object_size);
+ smp_wmb();
+ set_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &cookie->flags);
}
/**
- * fscache_pin_cookie - Unpin a data-storage cache object in its cache
+ * fscache_update_cookie - Request that a cache object be updated
* @cookie: The cookie representing the cache object
+ * @aux_data: The updated auxiliary data for the cookie (may be NULL)
+ * @object_size: The current size of the object (may be NULL)
*
- * Permit data-storage cache objects to be unpinned from the cache.
+ * Request an update of the index data for the cache object associated with the
+ * cookie. The auxiliary data on the cookie will be updated first if @aux_data
+ * is set and the object size will be updated and the object possibly trimmed
+ * if @object_size is set.
*
* See Documentation/filesystems/caching/netfs-api.rst for a complete
* description.
*/
static inline
-void fscache_unpin_cookie(struct fscache_cookie *cookie)
+void fscache_update_cookie(struct fscache_cookie *cookie, const void *aux_data,
+ const loff_t *object_size)
{
+ if (fscache_cookie_enabled(cookie))
+ __fscache_update_cookie(cookie, aux_data, object_size);
}
/**
- * fscache_attr_changed - Notify cache that an object's attributes changed
+ * fscache_resize_cookie - Request that a cache object be resized
* @cookie: The cookie representing the cache object
+ * @new_size: The new size of the object (may be NULL)
*
- * Send a notification to the cache indicating that an object's attributes have
- * changed. This includes the data size. These attributes will be obtained
- * through the get_attr() cookie definition op.
+ * Request that the size of an object be changed.
*
* See Documentation/filesystems/caching/netfs-api.rst for a complete
* description.
*/
static inline
-int fscache_attr_changed(struct fscache_cookie *cookie)
+void fscache_resize_cookie(struct fscache_cookie *cookie, loff_t new_size)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_attr_changed(cookie);
- else
- return -ENOBUFS;
+ if (fscache_cookie_enabled(cookie))
+ __fscache_resize_cookie(cookie, new_size);
}
/**
* fscache_invalidate - Notify cache that an object needs invalidation
* @cookie: The cookie representing the cache object
+ * @aux_data: The updated auxiliary data for the cookie (may be NULL)
+ * @size: The revised size of the object.
+ * @flags: Invalidation flags (FSCACHE_INVAL_*)
*
* Notify the cache that an object is needs to be invalidated and that it
- * should abort any retrievals or stores it is doing on the cache. The object
- * is then marked non-caching until such time as the invalidation is complete.
- *
- * This can be called with spinlocks held.
- *
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
- */
-static inline
-void fscache_invalidate(struct fscache_cookie *cookie)
-{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- __fscache_invalidate(cookie);
-}
-
-/**
- * fscache_wait_on_invalidate - Wait for invalidation to complete
- * @cookie: The cookie representing the cache object
+ * should abort any retrievals or stores it is doing on the cache. This
+ * increments inval_counter on the cookie which can be used by the caller to
+ * reconsider I/O requests as they complete.
*
- * Wait for the invalidation of an object to complete.
+ * If @flags has FSCACHE_INVAL_DIO_WRITE set, this indicates that this is due
+ * to a direct I/O write and will cause caching to be disabled on this cookie
+ * until it is completely unused.
*
* See Documentation/filesystems/caching/netfs-api.rst for a complete
* description.
*/
static inline
-void fscache_wait_on_invalidate(struct fscache_cookie *cookie)
+void fscache_invalidate(struct fscache_cookie *cookie,
+ const void *aux_data, loff_t size, unsigned int flags)
{
- if (fscache_cookie_valid(cookie))
- __fscache_wait_on_invalidate(cookie);
+ if (fscache_cookie_enabled(cookie))
+ __fscache_invalidate(cookie, aux_data, size, flags);
}
/**
- * fscache_reserve_space - Reserve data space for a cached object
- * @cookie: The cookie representing the cache object
- * @i_size: The amount of space to be reserved
+ * fscache_operation_valid - Return true if operations resources are usable
+ * @cres: The resources to check.
*
- * Reserve an amount of space in the cache for the cache object attached to a
- * cookie so that a write to that object within the space can always be
- * honoured.
- *
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
+ * Returns a pointer to the operations table if usable or NULL if not.
*/
static inline
-int fscache_reserve_space(struct fscache_cookie *cookie, loff_t size)
+const struct netfs_cache_ops *fscache_operation_valid(const struct netfs_cache_resources *cres)
{
- return -ENOBUFS;
+ return fscache_resources_valid(cres) ? cres->ops : NULL;
}
-#ifdef FSCACHE_USE_NEW_IO_API
-
/**
* fscache_begin_read_operation - Begin a read operation for the netfs lib
- * @rreq: The read request being undertaken
+ * @cres: The cache resources for the read being performed
* @cookie: The cookie representing the cache object
*
- * Begin a read operation on behalf of the netfs helper library. @rreq
- * indicates the read request to which the operation state should be attached;
- * @cookie indicates the cache object that will be accessed.
+ * Begin a read operation on behalf of the netfs helper library. @cres
+ * indicates the cache resources to which the operation state should be
+ * attached; @cookie indicates the cache object that will be accessed.
*
- * This is intended to be called from the ->begin_cache_operation() netfs lib
- * operation as implemented by the network filesystem.
+ * @cres->inval_counter is set from @cookie->inval_counter for comparison at
+ * the end of the operation. This allows invalidation during the operation to
+ * be detected by the caller.
*
* Returns:
* * 0 - Success
@@ -525,349 +450,204 @@ int fscache_reserve_space(struct fscache_cookie *cookie, loff_t size)
* * Other error code from the cache, such as -ENOMEM.
*/
static inline
-int fscache_begin_read_operation(struct netfs_read_request *rreq,
+int fscache_begin_read_operation(struct netfs_cache_resources *cres,
struct fscache_cookie *cookie)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_begin_read_operation(rreq, cookie);
+ if (fscache_cookie_enabled(cookie))
+ return __fscache_begin_read_operation(cres, cookie);
return -ENOBUFS;
}
-#else /* FSCACHE_USE_NEW_IO_API */
-
/**
- * fscache_read_or_alloc_page - Read a page from the cache or allocate a block
- * in which to store it
- * @cookie: The cookie representing the cache object
- * @page: The netfs page to fill if possible
- * @end_io_func: The callback to invoke when and if the page is filled
- * @context: An arbitrary piece of data to pass on to end_io_func()
- * @gfp: The conditions under which memory allocation should be made
- *
- * Read a page from the cache, or if that's not possible make a potential
- * one-block reservation in the cache into which the page may be stored once
- * fetched from the server.
- *
- * If the page is not backed by the cache object, or if it there's some reason
- * it can't be, -ENOBUFS will be returned and nothing more will be done for
- * that page.
+ * fscache_end_operation - End the read operation for the netfs lib
+ * @cres: The cache resources for the read operation
*
- * Else, if that page is backed by the cache, a read will be initiated directly
- * to the netfs's page and 0 will be returned by this function. The
- * end_io_func() callback will be invoked when the operation terminates on a
- * completion or failure. Note that the callback may be invoked before the
- * return.
- *
- * Else, if the page is unbacked, -ENODATA is returned and a block may have
- * been allocated in the cache.
- *
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
+ * Clean up the resources at the end of the read request.
*/
-static inline
-int fscache_read_or_alloc_page(struct fscache_cookie *cookie,
- struct page *page,
- fscache_rw_complete_t end_io_func,
- void *context,
- gfp_t gfp)
+static inline void fscache_end_operation(struct netfs_cache_resources *cres)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_read_or_alloc_page(cookie, page, end_io_func,
- context, gfp);
- else
- return -ENOBUFS;
-}
+ const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
-/**
- * fscache_read_or_alloc_pages - Read pages from the cache and/or allocate
- * blocks in which to store them
- * @cookie: The cookie representing the cache object
- * @mapping: The netfs inode mapping to which the pages will be attached
- * @pages: A list of potential netfs pages to be filled
- * @nr_pages: Number of pages to be read and/or allocated
- * @end_io_func: The callback to invoke when and if each page is filled
- * @context: An arbitrary piece of data to pass on to end_io_func()
- * @gfp: The conditions under which memory allocation should be made
- *
- * Read a set of pages from the cache, or if that's not possible, attempt to
- * make a potential one-block reservation for each page in the cache into which
- * that page may be stored once fetched from the server.
- *
- * If some pages are not backed by the cache object, or if it there's some
- * reason they can't be, -ENOBUFS will be returned and nothing more will be
- * done for that pages.
- *
- * Else, if some of the pages are backed by the cache, a read will be initiated
- * directly to the netfs's page and 0 will be returned by this function. The
- * end_io_func() callback will be invoked when the operation terminates on a
- * completion or failure. Note that the callback may be invoked before the
- * return.
- *
- * Else, if a page is unbacked, -ENODATA is returned and a block may have
- * been allocated in the cache.
- *
- * Because the function may want to return all of -ENOBUFS, -ENODATA and 0 in
- * regard to different pages, the return values are prioritised in that order.
- * Any pages submitted for reading are removed from the pages list.
- *
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
- */
-static inline
-int fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
- struct address_space *mapping,
- struct list_head *pages,
- unsigned *nr_pages,
- fscache_rw_complete_t end_io_func,
- void *context,
- gfp_t gfp)
-{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_read_or_alloc_pages(cookie, mapping, pages,
- nr_pages, end_io_func,
- context, gfp);
- else
- return -ENOBUFS;
+ if (ops)
+ ops->end_operation(cres);
}
/**
- * fscache_alloc_page - Allocate a block in which to store a page
- * @cookie: The cookie representing the cache object
- * @page: The netfs page to allocate a page for
- * @gfp: The conditions under which memory allocation should be made
+ * fscache_read - Start a read from the cache.
+ * @cres: The cache resources to use
+ * @start_pos: The beginning file offset in the cache file
+ * @iter: The buffer to fill - and also the length
+ * @read_hole: How to handle a hole in the data.
+ * @term_func: The function to call upon completion
+ * @term_func_priv: The private data for @term_func
*
- * Request Allocation a block in the cache in which to store a netfs page
- * without retrieving any contents from the cache.
+ * Start a read from the cache. @cres indicates the cache object to read from
+ * and must be obtained by a call to fscache_begin_operation() beforehand.
*
- * If the page is not backed by a file then -ENOBUFS will be returned and
- * nothing more will be done, and no reservation will be made.
+ * The data is read into the iterator, @iter, and that also indicates the size
+ * of the operation. @start_pos is the start position in the file, though if
+ * @seek_data is set appropriately, the cache can use SEEK_DATA to find the
+ * next piece of data, writing zeros for the hole into the iterator.
*
- * Else, a block will be allocated if one wasn't already, and 0 will be
- * returned
+ * Upon termination of the operation, @term_func will be called and supplied
+ * with @term_func_priv plus the amount of data written, if successful, or the
+ * error code otherwise.
*
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
- */
-static inline
-int fscache_alloc_page(struct fscache_cookie *cookie,
- struct page *page,
- gfp_t gfp)
-{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_alloc_page(cookie, page, gfp);
- else
- return -ENOBUFS;
-}
-
-/**
- * fscache_readpages_cancel - Cancel read/alloc on pages
- * @cookie: The cookie representing the inode's cache object.
- * @pages: The netfs pages that we canceled write on in readpages()
+ * @read_hole indicates how a partially populated region in the cache should be
+ * handled. It can be one of a number of settings:
*
- * Uncache/unreserve the pages reserved earlier in readpages() via
- * fscache_readpages_or_alloc() and similar. In most successful caches in
- * readpages() this doesn't do anything. In cases when the underlying netfs's
- * readahead failed we need to clean up the pagelist (unmark and uncache).
+ * NETFS_READ_HOLE_IGNORE - Just try to read (may return a short read).
*
- * This function may sleep as it may have to clean up disk state.
+ * NETFS_READ_HOLE_FAIL - Give ENODATA if we encounter a hole.
*/
static inline
-void fscache_readpages_cancel(struct fscache_cookie *cookie,
- struct list_head *pages)
+int fscache_read(struct netfs_cache_resources *cres,
+ loff_t start_pos,
+ struct iov_iter *iter,
+ enum netfs_read_from_hole read_hole,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv)
{
- if (fscache_cookie_valid(cookie))
- __fscache_readpages_cancel(cookie, pages);
+ const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
+ return ops->read(cres, start_pos, iter, read_hole,
+ term_func, term_func_priv);
}
/**
- * fscache_write_page - Request storage of a page in the cache
+ * fscache_begin_write_operation - Begin a write operation for the netfs lib
+ * @cres: The cache resources for the write being performed
* @cookie: The cookie representing the cache object
- * @page: The netfs page to store
- * @object_size: Updated size of object
- * @gfp: The conditions under which memory allocation should be made
*
- * Request the contents of the netfs page be written into the cache. This
- * request may be ignored if no cache block is currently allocated, in which
- * case it will return -ENOBUFS.
+ * Begin a write operation on behalf of the netfs helper library. @cres
+ * indicates the cache resources to which the operation state should be
+ * attached; @cookie indicates the cache object that will be accessed.
*
- * If a cache block was already allocated, a write will be initiated and 0 will
- * be returned. The PG_fscache_write page bit is set immediately and will then
- * be cleared at the completion of the write to indicate the success or failure
- * of the operation. Note that the completion may happen before the return.
+ * @cres->inval_counter is set from @cookie->inval_counter for comparison at
+ * the end of the operation. This allows invalidation during the operation to
+ * be detected by the caller.
*
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
+ * Returns:
+ * * 0 - Success
+ * * -ENOBUFS - No caching available
+ * * Other error code from the cache, such as -ENOMEM.
*/
static inline
-int fscache_write_page(struct fscache_cookie *cookie,
- struct page *page,
- loff_t object_size,
- gfp_t gfp)
+int fscache_begin_write_operation(struct netfs_cache_resources *cres,
+ struct fscache_cookie *cookie)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_write_page(cookie, page, object_size, gfp);
- else
- return -ENOBUFS;
+ if (fscache_cookie_enabled(cookie))
+ return __fscache_begin_write_operation(cres, cookie);
+ return -ENOBUFS;
}
/**
- * fscache_uncache_page - Indicate that caching is no longer required on a page
- * @cookie: The cookie representing the cache object
- * @page: The netfs page that was being cached.
+ * fscache_write - Start a write to the cache.
+ * @cres: The cache resources to use
+ * @start_pos: The beginning file offset in the cache file
+ * @iter: The data to write - and also the length
+ * @term_func: The function to call upon completion
+ * @term_func_priv: The private data for @term_func
*
- * Tell the cache that we no longer want a page to be cached and that it should
- * remove any knowledge of the netfs page it may have.
+ * Start a write to the cache. @cres indicates the cache object to write to and
+ * must be obtained by a call to fscache_begin_operation() beforehand.
*
- * Note that this cannot cancel any outstanding I/O operations between this
- * page and the cache.
+ * The data to be written is obtained from the iterator, @iter, and that also
+ * indicates the size of the operation. @start_pos is the start position in
+ * the file.
*
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
+ * Upon termination of the operation, @term_func will be called and supplied
+ * with @term_func_priv plus the amount of data written, if successful, or the
+ * error code otherwise.
*/
static inline
-void fscache_uncache_page(struct fscache_cookie *cookie,
- struct page *page)
+int fscache_write(struct netfs_cache_resources *cres,
+ loff_t start_pos,
+ struct iov_iter *iter,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv)
{
- if (fscache_cookie_valid(cookie))
- __fscache_uncache_page(cookie, page);
+ const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
+ return ops->write(cres, start_pos, iter, term_func, term_func_priv);
}
/**
- * fscache_check_page_write - Ask if a page is being writing to the cache
- * @cookie: The cookie representing the cache object
- * @page: The netfs page that is being cached.
- *
- * Ask the cache if a page is being written to the cache.
- *
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
- */
-static inline
-bool fscache_check_page_write(struct fscache_cookie *cookie,
- struct page *page)
+ * fscache_clear_page_bits - Clear the PG_fscache bits from a set of pages
+ * @mapping: The netfs inode to use as the source
+ * @start: The start position in @mapping
+ * @len: The amount of data to unlock
+ * @caching: If PG_fscache has been set
+ *
+ * Clear the PG_fscache flag from a sequence of pages and wake up anyone who's
+ * waiting.
+ */
+static inline void fscache_clear_page_bits(struct address_space *mapping,
+ loff_t start, size_t len,
+ bool caching)
{
- if (fscache_cookie_valid(cookie))
- return __fscache_check_page_write(cookie, page);
- return false;
+ if (caching)
+ __fscache_clear_page_bits(mapping, start, len);
}
/**
- * fscache_wait_on_page_write - Wait for a page to complete writing to the cache
+ * fscache_write_to_cache - Save a write to the cache and clear PG_fscache
* @cookie: The cookie representing the cache object
- * @page: The netfs page that is being cached.
- *
- * Ask the cache to wake us up when a page is no longer being written to the
- * cache.
- *
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
- */
-static inline
-void fscache_wait_on_page_write(struct fscache_cookie *cookie,
- struct page *page)
-{
- if (fscache_cookie_valid(cookie))
- __fscache_wait_on_page_write(cookie, page);
-}
-
-/**
- * fscache_maybe_release_page - Consider releasing a page, cancelling a store
- * @cookie: The cookie representing the cache object
- * @page: The netfs page that is being cached.
- * @gfp: The gfp flags passed to releasepage()
- *
- * Consider releasing a page for the vmscan algorithm, on behalf of the netfs's
- * releasepage() call. A storage request on the page may cancelled if it is
- * not currently being processed.
- *
- * The function returns true if the page no longer has a storage request on it,
- * and false if a storage request is left in place. If true is returned, the
- * page will have been passed to fscache_uncache_page(). If false is returned
- * the page cannot be freed yet.
- */
-static inline
-bool fscache_maybe_release_page(struct fscache_cookie *cookie,
- struct page *page,
- gfp_t gfp)
+ * @mapping: The netfs inode to use as the source
+ * @start: The start position in @mapping
+ * @len: The amount of data to write back
+ * @i_size: The new size of the inode
+ * @term_func: The function to call upon completion
+ * @term_func_priv: The private data for @term_func
+ * @using_pgpriv2: If we're using PG_private_2 to mark in-progress write
+ * @caching: If we actually want to do the caching
+ *
+ * Helper function for a netfs to write dirty data from an inode into the cache
+ * object that's backing it.
+ *
+ * @start and @len describe the range of the data. This does not need to be
+ * page-aligned, but to satisfy DIO requirements, the cache may expand it up to
+ * the page boundaries on either end. All the pages covering the range must be
+ * marked with PG_fscache.
+ *
+ * If given, @term_func will be called upon completion and supplied with
+ * @term_func_priv. Note that if @using_pgpriv2 is set, the PG_private_2 flags
+ * will have been cleared by this point, so the netfs must retain its own pin
+ * on the mapping.
+ */
+static inline void fscache_write_to_cache(struct fscache_cookie *cookie,
+ struct address_space *mapping,
+ loff_t start, size_t len, loff_t i_size,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv,
+ bool using_pgpriv2, bool caching)
{
- if (fscache_cookie_valid(cookie) && PageFsCache(page))
- return __fscache_maybe_release_page(cookie, page, gfp);
- return true;
-}
+ if (caching)
+ __fscache_write_to_cache(cookie, mapping, start, len, i_size,
+ term_func, term_func_priv,
+ using_pgpriv2, caching);
+ else if (term_func)
+ term_func(term_func_priv, -ENOBUFS);
-/**
- * fscache_uncache_all_inode_pages - Uncache all an inode's pages
- * @cookie: The cookie representing the inode's cache object.
- * @inode: The inode to uncache pages from.
- *
- * Uncache all the pages in an inode that are marked PG_fscache, assuming them
- * to be associated with the given cookie.
- *
- * This function may sleep. It will wait for pages that are being written out
- * and will wait whilst the PG_fscache mark is removed by the cache.
- */
-static inline
-void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
- struct inode *inode)
-{
- if (fscache_cookie_valid(cookie))
- __fscache_uncache_all_inode_pages(cookie, inode);
}
-#endif /* FSCACHE_USE_NEW_IO_API */
-
/**
- * fscache_disable_cookie - Disable a cookie
- * @cookie: The cookie representing the cache object
- * @aux_data: The updated auxiliary data for the cookie (may be NULL)
- * @invalidate: Invalidate the backing object
- *
- * Disable a cookie from accepting further alloc, read, write, invalidate,
- * update or acquire operations. Outstanding operations can still be waited
- * upon and pages can still be uncached and the cookie relinquished.
- *
- * This will not return until all outstanding operations have completed.
+ * fscache_note_page_release - Note that a netfs page got released
+ * @cookie: The cookie corresponding to the file
*
- * If @invalidate is set, then the backing object will be invalidated and
- * detached, otherwise it will just be detached.
- *
- * If @aux_data is set, then auxiliary data will be updated from that.
- */
-static inline
-void fscache_disable_cookie(struct fscache_cookie *cookie,
- const void *aux_data,
- bool invalidate)
-{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- __fscache_disable_cookie(cookie, aux_data, invalidate);
-}
-
-/**
- * fscache_enable_cookie - Reenable a cookie
- * @cookie: The cookie representing the cache object
- * @aux_data: The updated auxiliary data for the cookie (may be NULL)
- * @object_size: Current size of object
- * @can_enable: A function to permit enablement once lock is held
- * @data: Data for can_enable()
- *
- * Reenable a previously disabled cookie, allowing it to accept further alloc,
- * read, write, invalidate, update or acquire operations. An attempt will be
- * made to immediately reattach the cookie to a backing object. If @aux_data
- * is set, the auxiliary data attached to the cookie will be updated.
- *
- * The can_enable() function is called (if not NULL) once the enablement lock
- * is held to rule on whether enablement is still permitted to go ahead.
+ * Note that a page that has been copied to the cache has been released. This
+ * means that future reads will need to look in the cache to see if it's there.
*/
static inline
-void fscache_enable_cookie(struct fscache_cookie *cookie,
- const void *aux_data,
- loff_t object_size,
- bool (*can_enable)(void *data),
- void *data)
+void fscache_note_page_release(struct fscache_cookie *cookie)
{
- if (fscache_cookie_valid(cookie) && !fscache_cookie_enabled(cookie))
- __fscache_enable_cookie(cookie, aux_data, object_size,
- can_enable, data);
+ /* If we've written data to the cache (HAVE_DATA) and there wasn't any
+ * data in the cache when we started (NO_DATA_TO_READ), it may no
+ * longer be true that we can skip reading from the cache - so clear
+ * the flag that causes reads to be skipped.
+ */
+ if (cookie &&
+ test_bit(FSCACHE_COOKIE_HAVE_DATA, &cookie->flags) &&
+ test_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags))
+ clear_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
}
#endif /* _LINUX_FSCACHE_H */
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index 2ea1387bb497..516aba5b858b 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -18,10 +18,21 @@
#include <linux/slab.h>
#include <uapi/linux/fscrypt.h>
-#define FS_CRYPTO_BLOCK_SIZE 16
+/*
+ * The lengths of all file contents blocks must be divisible by this value.
+ * This is needed to ensure that all contents encryption modes will work, as
+ * some of the supported modes don't support arbitrarily byte-aligned messages.
+ *
+ * Since the needed alignment is 16 bytes, most filesystems will meet this
+ * requirement naturally, as typical block sizes are powers of 2. However, if a
+ * filesystem can generate arbitrarily byte-aligned block lengths (e.g., via
+ * compression), then it will need to pad to this alignment before encryption.
+ */
+#define FSCRYPT_CONTENTS_ALIGNMENT 16
union fscrypt_policy;
-struct fscrypt_info;
+struct fscrypt_inode_info;
+struct fs_parameter;
struct seq_file;
struct fscrypt_str {
@@ -47,40 +58,187 @@ struct fscrypt_name {
#define FSCRYPT_SET_CONTEXT_MAX_SIZE 40
#ifdef CONFIG_FS_ENCRYPTION
-/*
- * fscrypt superblock flags
- */
-#define FS_CFLG_OWN_PAGES (1U << 1)
-/*
- * crypto operations for filesystems
- */
+/* Crypto operations for filesystems */
struct fscrypt_operations {
- unsigned int flags;
- const char *key_prefix;
+ /*
+ * The offset of the pointer to struct fscrypt_inode_info in the
+ * filesystem-specific part of the inode, relative to the beginning of
+ * the common part of the inode (the 'struct inode').
+ */
+ ptrdiff_t inode_info_offs;
+
+ /*
+ * If set, then fs/crypto/ will allocate a global bounce page pool the
+ * first time an encryption key is set up for a file. The bounce page
+ * pool is required by the following functions:
+ *
+ * - fscrypt_encrypt_pagecache_blocks()
+ * - fscrypt_zeroout_range() for files not using inline crypto
+ *
+ * If the filesystem doesn't use those, it doesn't need to set this.
+ */
+ unsigned int needs_bounce_pages : 1;
+
+ /*
+ * If set, then fs/crypto/ will allow the use of encryption settings
+ * that assume inode numbers fit in 32 bits (i.e.
+ * FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64}), provided that the other
+ * prerequisites for these settings are also met. This is only useful
+ * if the filesystem wants to support inline encryption hardware that is
+ * limited to 32-bit or 64-bit data unit numbers and where programming
+ * keyslots is very slow.
+ */
+ unsigned int has_32bit_inodes : 1;
+
+ /*
+ * If set, then fs/crypto/ will allow users to select a crypto data unit
+ * size that is less than the filesystem block size. This is done via
+ * the log2_data_unit_size field of the fscrypt policy. This flag is
+ * not compatible with filesystems that encrypt variable-length blocks
+ * (i.e. blocks that aren't all equal to filesystem's block size), for
+ * example as a result of compression. It's also not compatible with
+ * the fscrypt_encrypt_block_inplace() and
+ * fscrypt_decrypt_block_inplace() functions.
+ */
+ unsigned int supports_subblock_data_units : 1;
+
+ /*
+ * This field exists only for backwards compatibility reasons and should
+ * only be set by the filesystems that are setting it already. It
+ * contains the filesystem-specific key description prefix that is
+ * accepted for "logon" keys for v1 fscrypt policies. This
+ * functionality is deprecated in favor of the generic prefix
+ * "fscrypt:", which itself is deprecated in favor of the filesystem
+ * keyring ioctls such as FS_IOC_ADD_ENCRYPTION_KEY. Filesystems that
+ * are newly adding fscrypt support should not set this field.
+ */
+ const char *legacy_key_prefix;
+
+ /*
+ * Get the fscrypt context of the given inode.
+ *
+ * @inode: the inode whose context to get
+ * @ctx: the buffer into which to get the context
+ * @len: length of the @ctx buffer in bytes
+ *
+ * Return: On success, returns the length of the context in bytes; this
+ * may be less than @len. On failure, returns -ENODATA if the
+ * inode doesn't have a context, -ERANGE if the context is
+ * longer than @len, or another -errno code.
+ */
int (*get_context)(struct inode *inode, void *ctx, size_t len);
+
+ /*
+ * Set an fscrypt context on the given inode.
+ *
+ * @inode: the inode whose context to set. The inode won't already have
+ * an fscrypt context.
+ * @ctx: the context to set
+ * @len: length of @ctx in bytes (at most FSCRYPT_SET_CONTEXT_MAX_SIZE)
+ * @fs_data: If called from fscrypt_set_context(), this will be the
+ * value the filesystem passed to fscrypt_set_context().
+ * Otherwise (i.e. when called from
+ * FS_IOC_SET_ENCRYPTION_POLICY) this will be NULL.
+ *
+ * i_rwsem will be held for write.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
int (*set_context)(struct inode *inode, const void *ctx, size_t len,
void *fs_data);
+
+ /*
+ * Get the dummy fscrypt policy in use on the filesystem (if any).
+ *
+ * Filesystems only need to implement this function if they support the
+ * test_dummy_encryption mount option.
+ *
+ * Return: A pointer to the dummy fscrypt policy, if the filesystem is
+ * mounted with test_dummy_encryption; otherwise NULL.
+ */
const union fscrypt_policy *(*get_dummy_policy)(struct super_block *sb);
+
+ /*
+ * Check whether a directory is empty. i_rwsem will be held for write.
+ */
bool (*empty_dir)(struct inode *inode);
- unsigned int max_namelen;
+
+ /*
+ * Check whether the filesystem's inode numbers and UUID are stable,
+ * meaning that they will never be changed even by offline operations
+ * such as filesystem shrinking and therefore can be used in the
+ * encryption without the possibility of files becoming unreadable.
+ *
+ * Filesystems only need to implement this function if they want to
+ * support the FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64} flags. These
+ * flags are designed to work around the limitations of UFS and eMMC
+ * inline crypto hardware, and they shouldn't be used in scenarios where
+ * such hardware isn't being used.
+ *
+ * Leaving this NULL is equivalent to always returning false.
+ */
bool (*has_stable_inodes)(struct super_block *sb);
- void (*get_ino_and_lblk_bits)(struct super_block *sb,
- int *ino_bits_ret, int *lblk_bits_ret);
- int (*get_num_devices)(struct super_block *sb);
- void (*get_devices)(struct super_block *sb,
- struct request_queue **devs);
+
+ /*
+ * Return an array of pointers to the block devices to which the
+ * filesystem may write encrypted file contents, NULL if the filesystem
+ * only has a single such block device, or an ERR_PTR() on error.
+ *
+ * On successful non-NULL return, *num_devs is set to the number of
+ * devices in the returned array. The caller must free the returned
+ * array using kfree().
+ *
+ * If the filesystem can use multiple block devices (other than block
+ * devices that aren't used for encrypted file contents, such as
+ * external journal devices), and wants to support inline encryption,
+ * then it must implement this function. Otherwise it's not needed.
+ */
+ struct block_device **(*get_devices)(struct super_block *sb,
+ unsigned int *num_devs);
};
-static inline struct fscrypt_info *fscrypt_get_info(const struct inode *inode)
+int fscrypt_d_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags);
+
+/*
+ * Returns the address of the fscrypt info pointer within the
+ * filesystem-specific part of the inode. (To save memory on filesystems that
+ * don't support fscrypt, a field in 'struct inode' itself is no longer used.)
+ */
+static inline struct fscrypt_inode_info **
+fscrypt_inode_info_addr(const struct inode *inode)
+{
+ VFS_WARN_ON_ONCE(inode->i_sb->s_cop->inode_info_offs == 0);
+ return (void *)inode + inode->i_sb->s_cop->inode_info_offs;
+}
+
+/*
+ * Load the inode's fscrypt info pointer, using a raw dereference. Since this
+ * uses a raw dereference with no memory barrier, it is appropriate to use only
+ * when the caller knows the inode's key setup already happened, resulting in
+ * non-NULL fscrypt info. E.g., the file contents en/decryption functions use
+ * this, since fscrypt_file_open() set up the key.
+ */
+static inline struct fscrypt_inode_info *
+fscrypt_get_inode_info_raw(const struct inode *inode)
+{
+ struct fscrypt_inode_info *ci = *fscrypt_inode_info_addr(inode);
+
+ VFS_WARN_ON_ONCE(ci == NULL);
+ return ci;
+}
+
+static inline struct fscrypt_inode_info *
+fscrypt_get_inode_info(const struct inode *inode)
{
/*
* Pairs with the cmpxchg_release() in fscrypt_setup_encryption_info().
- * I.e., another task may publish ->i_crypt_info concurrently, executing
- * a RELEASE barrier. We need to use smp_load_acquire() here to safely
+ * I.e., another task may publish the fscrypt info concurrently,
+ * executing a RELEASE barrier. Use smp_load_acquire() here to safely
* ACQUIRE the memory the other task published.
*/
- return smp_load_acquire(&inode->i_crypt_info);
+ return smp_load_acquire(fscrypt_inode_info_addr(inode));
}
/**
@@ -100,15 +258,29 @@ static inline bool fscrypt_needs_contents_encryption(const struct inode *inode)
}
/*
- * When d_splice_alias() moves a directory's no-key alias to its plaintext alias
- * as a result of the encryption key being added, DCACHE_NOKEY_NAME must be
- * cleared. Note that we don't have to support arbitrary moves of this flag
- * because fscrypt doesn't allow no-key names to be the source or target of a
- * rename().
+ * When d_splice_alias() moves a directory's no-key alias to its
+ * plaintext alias as a result of the encryption key being added,
+ * DCACHE_NOKEY_NAME must be cleared and there might be an opportunity
+ * to disable d_revalidate. Note that we don't have to support the
+ * inverse operation because fscrypt doesn't allow no-key names to be
+ * the source or target of a rename().
*/
static inline void fscrypt_handle_d_move(struct dentry *dentry)
{
- dentry->d_flags &= ~DCACHE_NOKEY_NAME;
+ /*
+ * VFS calls fscrypt_handle_d_move even for non-fscrypt
+ * filesystems.
+ */
+ if (dentry->d_flags & DCACHE_NOKEY_NAME) {
+ dentry->d_flags &= ~DCACHE_NOKEY_NAME;
+
+ /*
+ * Other filesystem features might be handling dentry
+ * revalidation, in which case it cannot be disabled.
+ */
+ if (dentry->d_op->d_revalidate == fscrypt_d_revalidate)
+ dentry->d_flags &= ~DCACHE_OP_REVALIDATE;
+ }
}
/**
@@ -140,19 +312,46 @@ static inline bool fscrypt_is_nokey_name(const struct dentry *dentry)
return dentry->d_flags & DCACHE_NOKEY_NAME;
}
+static inline void fscrypt_prepare_dentry(struct dentry *dentry,
+ bool is_nokey_name)
+{
+ /*
+ * This code tries to only take ->d_lock when necessary to write
+ * to ->d_flags. We shouldn't be peeking on d_flags for
+ * DCACHE_OP_REVALIDATE unlocked, but in the unlikely case
+ * there is a race, the worst it can happen is that we fail to
+ * unset DCACHE_OP_REVALIDATE and pay the cost of an extra
+ * d_revalidate.
+ */
+ if (is_nokey_name) {
+ spin_lock(&dentry->d_lock);
+ dentry->d_flags |= DCACHE_NOKEY_NAME;
+ spin_unlock(&dentry->d_lock);
+ } else if (dentry->d_flags & DCACHE_OP_REVALIDATE &&
+ dentry->d_op->d_revalidate == fscrypt_d_revalidate) {
+ /*
+ * Unencrypted dentries and encrypted dentries where the
+ * key is available are always valid from fscrypt
+ * perspective. Avoid the cost of calling
+ * fscrypt_d_revalidate unnecessarily.
+ */
+ spin_lock(&dentry->d_lock);
+ dentry->d_flags &= ~DCACHE_OP_REVALIDATE;
+ spin_unlock(&dentry->d_lock);
+ }
+}
+
/* crypto.c */
void fscrypt_enqueue_decrypt_work(struct work_struct *);
-struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
- unsigned int len,
- unsigned int offs,
- gfp_t gfp_flags);
+struct page *fscrypt_encrypt_pagecache_blocks(struct folio *folio,
+ size_t len, size_t offs, gfp_t gfp_flags);
int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page,
unsigned int len, unsigned int offs,
- u64 lblk_num, gfp_t gfp_flags);
+ u64 lblk_num);
-int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len,
- unsigned int offs);
+int fscrypt_decrypt_pagecache_blocks(struct folio *folio, size_t len,
+ size_t offs);
int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page,
unsigned int len, unsigned int offs,
u64 lblk_num);
@@ -167,6 +366,17 @@ static inline struct page *fscrypt_pagecache_page(struct page *bounce_page)
return (struct page *)page_private(bounce_page);
}
+static inline bool fscrypt_is_bounce_folio(const struct folio *folio)
+{
+ return folio->mapping == NULL;
+}
+
+static inline
+struct folio *fscrypt_pagecache_folio(const struct folio *bounce_folio)
+{
+ return bounce_folio->private;
+}
+
void fscrypt_free_bounce_page(struct page *bounce_page);
/* policy.c */
@@ -175,16 +385,24 @@ int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg);
int fscrypt_ioctl_get_policy_ex(struct file *filp, void __user *arg);
int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg);
int fscrypt_has_permitted_context(struct inode *parent, struct inode *child);
+int fscrypt_context_for_new_inode(void *ctx, struct inode *inode);
int fscrypt_set_context(struct inode *inode, void *fs_data);
struct fscrypt_dummy_policy {
const union fscrypt_policy *policy;
};
-int fscrypt_set_test_dummy_encryption(struct super_block *sb, const char *arg,
- struct fscrypt_dummy_policy *dummy_policy);
+int fscrypt_parse_test_dummy_encryption(const struct fs_parameter *param,
+ struct fscrypt_dummy_policy *dummy_policy);
+bool fscrypt_dummy_policies_equal(const struct fscrypt_dummy_policy *p1,
+ const struct fscrypt_dummy_policy *p2);
void fscrypt_show_test_dummy_encryption(struct seq_file *seq, char sep,
struct super_block *sb);
+static inline bool
+fscrypt_is_dummy_policy_set(const struct fscrypt_dummy_policy *dummy_policy)
+{
+ return dummy_policy->policy != NULL;
+}
static inline void
fscrypt_free_dummy_policy(struct fscrypt_dummy_policy *dummy_policy)
{
@@ -193,7 +411,7 @@ fscrypt_free_dummy_policy(struct fscrypt_dummy_policy *dummy_policy)
}
/* keyring.c */
-void fscrypt_sb_free(struct super_block *sb);
+void fscrypt_destroy_keyring(struct super_block *sb);
int fscrypt_ioctl_add_key(struct file *filp, void __user *arg);
int fscrypt_ioctl_remove_key(struct file *filp, void __user *arg);
int fscrypt_ioctl_remove_key_all_users(struct file *filp, void __user *arg);
@@ -207,6 +425,10 @@ void fscrypt_free_inode(struct inode *inode);
int fscrypt_drop_inode(struct inode *inode);
/* fname.c */
+int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname,
+ u8 *out, unsigned int olen);
+bool fscrypt_fname_encrypted_size(const struct inode *inode, u32 orig_len,
+ u32 max_len, u32 *encrypted_len_ret);
int fscrypt_setup_filename(struct inode *inode, const struct qstr *iname,
int lookup, struct fscrypt_name *fname);
@@ -225,10 +447,9 @@ int fscrypt_fname_disk_to_usr(const struct inode *inode,
bool fscrypt_match_name(const struct fscrypt_name *fname,
const u8 *de_name, u32 de_name_len);
u64 fscrypt_fname_siphash(const struct inode *dir, const struct qstr *name);
-int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags);
/* bio.c */
-void fscrypt_decrypt_bio(struct bio *bio);
+bool fscrypt_decrypt_bio(struct bio *bio);
int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
sector_t pblk, unsigned int len);
@@ -241,6 +462,7 @@ int __fscrypt_prepare_rename(struct inode *old_dir, struct dentry *old_dentry,
unsigned int flags);
int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry,
struct fscrypt_name *fname);
+int fscrypt_prepare_lookup_partial(struct inode *dir, struct dentry *dentry);
int __fscrypt_prepare_readdir(struct inode *dir);
int __fscrypt_prepare_setattr(struct dentry *dentry, struct iattr *attr);
int fscrypt_prepare_setflags(struct inode *inode,
@@ -253,6 +475,7 @@ int __fscrypt_encrypt_symlink(struct inode *inode, const char *target,
const char *fscrypt_get_symlink(struct inode *inode, const void *caddr,
unsigned int max_size,
struct delayed_call *done);
+int fscrypt_symlink_getattr(const struct path *path, struct kstat *stat);
static inline void fscrypt_set_ops(struct super_block *sb,
const struct fscrypt_operations *s_cop)
{
@@ -260,7 +483,8 @@ static inline void fscrypt_set_ops(struct super_block *sb,
}
#else /* !CONFIG_FS_ENCRYPTION */
-static inline struct fscrypt_info *fscrypt_get_info(const struct inode *inode)
+static inline struct fscrypt_inode_info *
+fscrypt_get_inode_info(const struct inode *inode)
{
return NULL;
}
@@ -279,15 +503,18 @@ static inline bool fscrypt_is_nokey_name(const struct dentry *dentry)
return false;
}
+static inline void fscrypt_prepare_dentry(struct dentry *dentry,
+ bool is_nokey_name)
+{
+}
+
/* crypto.c */
static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work)
{
}
-static inline struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
- unsigned int len,
- unsigned int offs,
- gfp_t gfp_flags)
+static inline struct page *fscrypt_encrypt_pagecache_blocks(struct folio *folio,
+ size_t len, size_t offs, gfp_t gfp_flags)
{
return ERR_PTR(-EOPNOTSUPP);
}
@@ -295,15 +522,13 @@ static inline struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
static inline int fscrypt_encrypt_block_inplace(const struct inode *inode,
struct page *page,
unsigned int len,
- unsigned int offs, u64 lblk_num,
- gfp_t gfp_flags)
+ unsigned int offs, u64 lblk_num)
{
return -EOPNOTSUPP;
}
-static inline int fscrypt_decrypt_pagecache_blocks(struct page *page,
- unsigned int len,
- unsigned int offs)
+static inline int fscrypt_decrypt_pagecache_blocks(struct folio *folio,
+ size_t len, size_t offs)
{
return -EOPNOTSUPP;
}
@@ -327,6 +552,18 @@ static inline struct page *fscrypt_pagecache_page(struct page *bounce_page)
return ERR_PTR(-EINVAL);
}
+static inline bool fscrypt_is_bounce_folio(const struct folio *folio)
+{
+ return false;
+}
+
+static inline
+struct folio *fscrypt_pagecache_folio(const struct folio *bounce_folio)
+{
+ WARN_ON_ONCE(1);
+ return ERR_PTR(-EINVAL);
+}
+
static inline void fscrypt_free_bounce_page(struct page *bounce_page)
{
}
@@ -368,19 +605,39 @@ static inline int fscrypt_set_context(struct inode *inode, void *fs_data)
struct fscrypt_dummy_policy {
};
+static inline int
+fscrypt_parse_test_dummy_encryption(const struct fs_parameter *param,
+ struct fscrypt_dummy_policy *dummy_policy)
+{
+ return -EINVAL;
+}
+
+static inline bool
+fscrypt_dummy_policies_equal(const struct fscrypt_dummy_policy *p1,
+ const struct fscrypt_dummy_policy *p2)
+{
+ return true;
+}
+
static inline void fscrypt_show_test_dummy_encryption(struct seq_file *seq,
char sep,
struct super_block *sb)
{
}
+static inline bool
+fscrypt_is_dummy_policy_set(const struct fscrypt_dummy_policy *dummy_policy)
+{
+ return false;
+}
+
static inline void
fscrypt_free_dummy_policy(struct fscrypt_dummy_policy *dummy_policy)
{
}
/* keyring.c */
-static inline void fscrypt_sb_free(struct super_block *sb)
+static inline void fscrypt_destroy_keyring(struct super_block *sb)
{
}
@@ -486,15 +743,16 @@ static inline u64 fscrypt_fname_siphash(const struct inode *dir,
return 0;
}
-static inline int fscrypt_d_revalidate(struct dentry *dentry,
- unsigned int flags)
+static inline int fscrypt_d_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags)
{
return 1;
}
/* bio.c */
-static inline void fscrypt_decrypt_bio(struct bio *bio)
+static inline bool fscrypt_decrypt_bio(struct bio *bio)
{
+ return true;
}
static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
@@ -534,6 +792,12 @@ static inline int __fscrypt_prepare_lookup(struct inode *dir,
return -EOPNOTSUPP;
}
+static inline int fscrypt_prepare_lookup_partial(struct inode *dir,
+ struct dentry *dentry)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int __fscrypt_prepare_readdir(struct inode *dir)
{
return -EOPNOTSUPP;
@@ -583,6 +847,12 @@ static inline const char *fscrypt_get_symlink(struct inode *inode,
return ERR_PTR(-EOPNOTSUPP);
}
+static inline int fscrypt_symlink_getattr(const struct path *path,
+ struct kstat *stat)
+{
+ return -EOPNOTSUPP;
+}
+
static inline void fscrypt_set_ops(struct super_block *sb,
const struct fscrypt_operations *s_cop)
{
@@ -609,6 +879,10 @@ bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
bool fscrypt_mergeable_bio_bh(struct bio *bio,
const struct buffer_head *next_bh);
+bool fscrypt_dio_supported(struct inode *inode);
+
+u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks);
+
#else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
static inline bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode)
@@ -637,6 +911,17 @@ static inline bool fscrypt_mergeable_bio_bh(struct bio *bio,
{
return true;
}
+
+static inline bool fscrypt_dio_supported(struct inode *inode)
+{
+ return !fscrypt_needs_contents_encryption(inode);
+}
+
+static inline u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk,
+ u64 nr_blocks)
+{
+ return nr_blocks;
+}
#endif /* !CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
/**
@@ -680,7 +965,7 @@ static inline bool fscrypt_inode_uses_fs_layer_crypto(const struct inode *inode)
*/
static inline bool fscrypt_has_encryption_key(const struct inode *inode)
{
- return fscrypt_get_info(inode) != NULL;
+ return fscrypt_get_inode_info(inode) != NULL;
}
/**
@@ -778,6 +1063,9 @@ static inline int fscrypt_prepare_lookup(struct inode *dir,
fname->usr_fname = &dentry->d_name;
fname->disk_name.name = (unsigned char *)dentry->d_name.name;
fname->disk_name.len = dentry->d_name.len;
+
+ fscrypt_prepare_dentry(dentry, false);
+
return 0;
}
diff --git a/include/linux/fsi-occ.h b/include/linux/fsi-occ.h
index d4cdc2aa6e33..7ee3dbd7f4b3 100644
--- a/include/linux/fsi-occ.h
+++ b/include/linux/fsi-occ.h
@@ -19,6 +19,8 @@ struct device;
#define OCC_RESP_CRIT_OCB 0xE3
#define OCC_RESP_CRIT_HW 0xE4
+#define OCC_MAX_RESP_WORDS 2048
+
int fsi_occ_submit(struct device *dev, const void *request, size_t req_len,
void *response, size_t *resp_len);
diff --git a/include/linux/fsi.h b/include/linux/fsi.h
index 3df8c54868df..adea1b432f2d 100644
--- a/include/linux/fsi.h
+++ b/include/linux/fsi.h
@@ -44,7 +44,7 @@ struct fsi_driver {
};
#define to_fsi_dev(devp) container_of(devp, struct fsi_device, dev)
-#define to_fsi_drv(drvp) container_of(drvp, struct fsi_driver, drv)
+#define to_fsi_drv(drvp) container_of_const(drvp, struct fsi_driver, drv)
extern int fsi_driver_register(struct fsi_driver *fsi_drv);
extern void fsi_driver_unregister(struct fsi_driver *fsi_drv);
@@ -68,7 +68,7 @@ extern int fsi_slave_read(struct fsi_slave *slave, uint32_t addr,
extern int fsi_slave_write(struct fsi_slave *slave, uint32_t addr,
const void *val, size_t size);
-extern struct bus_type fsi_bus_type;
+extern const struct bus_type fsi_bus_type;
extern const struct device_type fsi_cdev_type;
enum fsi_dev_type {
diff --git a/include/linux/fsl/enetc_mdio.h b/include/linux/fsl/enetc_mdio.h
index 2d9203314865..623ccfcbf39c 100644
--- a/include/linux/fsl/enetc_mdio.h
+++ b/include/linux/fsl/enetc_mdio.h
@@ -37,18 +37,30 @@ struct enetc_mdio_priv {
#if IS_REACHABLE(CONFIG_FSL_ENETC_MDIO)
-int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum);
-int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value);
+int enetc_mdio_read_c22(struct mii_bus *bus, int phy_id, int regnum);
+int enetc_mdio_write_c22(struct mii_bus *bus, int phy_id, int regnum,
+ u16 value);
+int enetc_mdio_read_c45(struct mii_bus *bus, int phy_id, int devad, int regnum);
+int enetc_mdio_write_c45(struct mii_bus *bus, int phy_id, int devad, int regnum,
+ u16 value);
struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs);
#else
-static inline int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+static inline int enetc_mdio_read_c22(struct mii_bus *bus, int phy_id,
+ int regnum)
{ return -EINVAL; }
-static inline int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum,
- u16 value)
+static inline int enetc_mdio_write_c22(struct mii_bus *bus, int phy_id,
+ int regnum, u16 value)
{ return -EINVAL; }
-struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs)
+static inline int enetc_mdio_read_c45(struct mii_bus *bus, int phy_id,
+ int devad, int regnum)
+{ return -EINVAL; }
+static inline int enetc_mdio_write_c45(struct mii_bus *bus, int phy_id,
+ int devad, int regnum, u16 value)
+{ return -EINVAL; }
+static inline struct enetc_hw *enetc_hw_alloc(struct device *dev,
+ void __iomem *port_regs)
{ return ERR_PTR(-EINVAL); }
#endif
diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h
index 63b56aba925a..897d6211c163 100644
--- a/include/linux/fsl/mc.h
+++ b/include/linux/fsl/mc.h
@@ -32,6 +32,13 @@ struct fsl_mc_io;
* @shutdown: Function called at shutdown time to quiesce the device
* @suspend: Function called when a device is stopped
* @resume: Function called when a device is resumed
+ * @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA.
+ * For most device drivers, no need to care about this flag
+ * as long as all DMAs are handled through the kernel DMA API.
+ * For some special ones, for example VFIO drivers, they know
+ * how to manage the DMA themselves and set this flag so that
+ * the IOMMU layer will allow them to setup and manage their
+ * own I/O address space.
*
* Generic DPAA device driver object for device drivers that are registered
* with a DPRC bus. This structure is to be embedded in each device-specific
@@ -41,14 +48,15 @@ struct fsl_mc_driver {
struct device_driver driver;
const struct fsl_mc_device_id *match_id_table;
int (*probe)(struct fsl_mc_device *dev);
- int (*remove)(struct fsl_mc_device *dev);
+ void (*remove)(struct fsl_mc_device *dev);
void (*shutdown)(struct fsl_mc_device *dev);
int (*suspend)(struct fsl_mc_device *dev, pm_message_t state);
int (*resume)(struct fsl_mc_device *dev);
+ bool driver_managed_dma;
};
#define to_fsl_mc_driver(_drv) \
- container_of(_drv, struct fsl_mc_driver, driver)
+ container_of_const(_drv, struct fsl_mc_driver, driver)
/**
* enum fsl_mc_pool_type - Types of allocatable MC bus resources
@@ -91,13 +99,13 @@ struct fsl_mc_resource {
/**
* struct fsl_mc_device_irq - MC object device message-based interrupt
- * @msi_desc: pointer to MSI descriptor allocated by fsl_mc_msi_alloc_descs()
+ * @virq: Linux virtual interrupt number
* @mc_dev: MC object device that owns this interrupt
* @dev_irq_index: device-relative IRQ index
* @resource: MC generic resource associated with the interrupt
*/
struct fsl_mc_device_irq {
- struct msi_desc *msi_desc;
+ unsigned int virq;
struct fsl_mc_device *mc_dev;
u8 dev_irq_index;
struct fsl_mc_resource resource;
@@ -170,7 +178,9 @@ struct fsl_mc_obj_desc {
* @regions: pointer to array of MMIO region entries
* @irqs: pointer to array of pointers to interrupts allocated to this device
* @resource: generic resource associated with this MC object device, if any.
- * @driver_override: driver name to force a match
+ * @driver_override: driver name to force a match; do not set directly,
+ * because core frees it; use driver_set_override() to
+ * set or clear it.
*
* Generic device object for MC object devices that are "attached" to a
* MC bus.
@@ -204,7 +214,7 @@ struct fsl_mc_device {
struct fsl_mc_device_irq **irqs;
struct fsl_mc_resource *resource;
struct device_link *consumer_link;
- char *driver_override;
+ const char *driver_override;
};
#define to_fsl_mc_device(_dev) \
@@ -407,8 +417,6 @@ int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
void fsl_mc_portal_free(struct fsl_mc_io *mc_io);
-int fsl_mc_portal_reset(struct fsl_mc_io *mc_io);
-
int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
enum fsl_mc_pool_type pool_type,
struct fsl_mc_device **new_mc_adev);
@@ -423,25 +431,26 @@ int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev);
void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev);
-struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev);
-
-extern struct bus_type fsl_mc_bus_type;
-
-extern struct device_type fsl_mc_bus_dprc_type;
-extern struct device_type fsl_mc_bus_dpni_type;
-extern struct device_type fsl_mc_bus_dpio_type;
-extern struct device_type fsl_mc_bus_dpsw_type;
-extern struct device_type fsl_mc_bus_dpbp_type;
-extern struct device_type fsl_mc_bus_dpcon_type;
-extern struct device_type fsl_mc_bus_dpmcp_type;
-extern struct device_type fsl_mc_bus_dpmac_type;
-extern struct device_type fsl_mc_bus_dprtc_type;
-extern struct device_type fsl_mc_bus_dpseci_type;
-extern struct device_type fsl_mc_bus_dpdmux_type;
-extern struct device_type fsl_mc_bus_dpdcei_type;
-extern struct device_type fsl_mc_bus_dpaiop_type;
-extern struct device_type fsl_mc_bus_dpci_type;
-extern struct device_type fsl_mc_bus_dpdmai_type;
+struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
+ u16 if_id);
+
+extern const struct bus_type fsl_mc_bus_type;
+
+extern const struct device_type fsl_mc_bus_dprc_type;
+extern const struct device_type fsl_mc_bus_dpni_type;
+extern const struct device_type fsl_mc_bus_dpio_type;
+extern const struct device_type fsl_mc_bus_dpsw_type;
+extern const struct device_type fsl_mc_bus_dpbp_type;
+extern const struct device_type fsl_mc_bus_dpcon_type;
+extern const struct device_type fsl_mc_bus_dpmcp_type;
+extern const struct device_type fsl_mc_bus_dpmac_type;
+extern const struct device_type fsl_mc_bus_dprtc_type;
+extern const struct device_type fsl_mc_bus_dpseci_type;
+extern const struct device_type fsl_mc_bus_dpdmux_type;
+extern const struct device_type fsl_mc_bus_dpdcei_type;
+extern const struct device_type fsl_mc_bus_dpaiop_type;
+extern const struct device_type fsl_mc_bus_dpci_type;
+extern const struct device_type fsl_mc_bus_dpdmai_type;
static inline bool is_fsl_mc_bus_dprc(const struct fsl_mc_device *mc_dev)
{
@@ -619,6 +628,20 @@ int dpcon_reset(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token);
+int fsl_mc_obj_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int obj_id,
+ char *obj_type,
+ u16 *token);
+
+int fsl_mc_obj_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int fsl_mc_obj_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
/**
* struct dpcon_attr - Structure representing DPCON attributes
* @id: DPCON object ID
diff --git a/include/linux/fsl/netc_global.h b/include/linux/fsl/netc_global.h
new file mode 100644
index 000000000000..fdecca8c90f0
--- /dev/null
+++ b/include/linux/fsl/netc_global.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2024 NXP
+ */
+#ifndef __NETC_GLOBAL_H
+#define __NETC_GLOBAL_H
+
+#include <linux/io.h>
+
+static inline u32 netc_read(void __iomem *reg)
+{
+ return ioread32(reg);
+}
+
+static inline void netc_write(void __iomem *reg, u32 val)
+{
+ iowrite32(val, reg);
+}
+
+#endif
diff --git a/include/linux/fsl/ntmp.h b/include/linux/fsl/ntmp.h
new file mode 100644
index 000000000000..916dc4fe7de3
--- /dev/null
+++ b/include/linux/fsl/ntmp.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2025 NXP */
+#ifndef __NETC_NTMP_H
+#define __NETC_NTMP_H
+
+#include <linux/bitops.h>
+#include <linux/if_ether.h>
+
+struct maft_keye_data {
+ u8 mac_addr[ETH_ALEN];
+ __le16 resv;
+};
+
+struct maft_cfge_data {
+ __le16 si_bitmap;
+ __le16 resv;
+};
+
+struct netc_cbdr_regs {
+ void __iomem *pir;
+ void __iomem *cir;
+ void __iomem *mr;
+
+ void __iomem *bar0;
+ void __iomem *bar1;
+ void __iomem *lenr;
+};
+
+struct netc_tbl_vers {
+ u8 maft_ver;
+ u8 rsst_ver;
+};
+
+struct netc_cbdr {
+ struct device *dev;
+ struct netc_cbdr_regs regs;
+
+ int bd_num;
+ int next_to_use;
+ int next_to_clean;
+
+ int dma_size;
+ void *addr_base;
+ void *addr_base_align;
+ dma_addr_t dma_base;
+ dma_addr_t dma_base_align;
+
+ /* Serialize the order of command BD ring */
+ spinlock_t ring_lock;
+};
+
+struct ntmp_user {
+ int cbdr_num; /* number of control BD ring */
+ struct device *dev;
+ struct netc_cbdr *ring;
+ struct netc_tbl_vers tbl;
+};
+
+struct maft_entry_data {
+ struct maft_keye_data keye;
+ struct maft_cfge_data cfge;
+};
+
+#if IS_ENABLED(CONFIG_NXP_NETC_LIB)
+int ntmp_init_cbdr(struct netc_cbdr *cbdr, struct device *dev,
+ const struct netc_cbdr_regs *regs);
+void ntmp_free_cbdr(struct netc_cbdr *cbdr);
+
+/* NTMP APIs */
+int ntmp_maft_add_entry(struct ntmp_user *user, u32 entry_id,
+ struct maft_entry_data *maft);
+int ntmp_maft_query_entry(struct ntmp_user *user, u32 entry_id,
+ struct maft_entry_data *maft);
+int ntmp_maft_delete_entry(struct ntmp_user *user, u32 entry_id);
+int ntmp_rsst_update_entry(struct ntmp_user *user, const u32 *table,
+ int count);
+int ntmp_rsst_query_entry(struct ntmp_user *user,
+ u32 *table, int count);
+#else
+static inline int ntmp_init_cbdr(struct netc_cbdr *cbdr, struct device *dev,
+ const struct netc_cbdr_regs *regs)
+{
+ return 0;
+}
+
+static inline void ntmp_free_cbdr(struct netc_cbdr *cbdr)
+{
+}
+
+static inline int ntmp_maft_add_entry(struct ntmp_user *user, u32 entry_id,
+ struct maft_entry_data *maft)
+{
+ return 0;
+}
+
+static inline int ntmp_maft_query_entry(struct ntmp_user *user, u32 entry_id,
+ struct maft_entry_data *maft)
+{
+ return 0;
+}
+
+static inline int ntmp_maft_delete_entry(struct ntmp_user *user, u32 entry_id)
+{
+ return 0;
+}
+
+static inline int ntmp_rsst_update_entry(struct ntmp_user *user,
+ const u32 *table, int count)
+{
+ return 0;
+}
+
+static inline int ntmp_rsst_query_entry(struct ntmp_user *user,
+ u32 *table, int count)
+{
+ return 0;
+}
+
+#endif
+
+#endif
diff --git a/include/linux/fsl/ptp_qoriq.h b/include/linux/fsl/ptp_qoriq.h
index 01acebe37fab..3601e25779ba 100644
--- a/include/linux/fsl/ptp_qoriq.h
+++ b/include/linux/fsl/ptp_qoriq.h
@@ -145,10 +145,10 @@ struct ptp_qoriq {
struct ptp_clock *clock;
struct ptp_clock_info caps;
struct resource *rsrc;
- struct dentry *debugfs_root;
struct device *dev;
bool extts_fifo_support;
bool fiper3_support;
+ bool etsec;
int irq;
int phc_index;
u32 tclk_period; /* nanoseconds */
@@ -194,14 +194,5 @@ int ptp_qoriq_settime(struct ptp_clock_info *ptp,
int ptp_qoriq_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on);
int extts_clean_up(struct ptp_qoriq *ptp_qoriq, int index, bool update_event);
-#ifdef CONFIG_DEBUG_FS
-void ptp_qoriq_create_debugfs(struct ptp_qoriq *ptp_qoriq);
-void ptp_qoriq_remove_debugfs(struct ptp_qoriq *ptp_qoriq);
-#else
-static inline void ptp_qoriq_create_debugfs(struct ptp_qoriq *ptp_qoriq)
-{ }
-static inline void ptp_qoriq_remove_debugfs(struct ptp_qoriq *ptp_qoriq)
-{ }
-#endif
#endif
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index 5d231ce8709b..49f20c2f99bf 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -118,7 +118,6 @@ struct fsl_usb2_platform_data {
#define FSL_USB2_PORT0_ENABLED 0x00000001
#define FSL_USB2_PORT1_ENABLED 0x00000002
-#define FLS_USB2_WORKAROUND_ENGCM09152 (1 << 0)
struct spi_device;
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index f8acddcf54fb..28a9cb13fbfa 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -17,6 +17,25 @@
#include <linux/slab.h>
#include <linux/bug.h>
+/* Are there any inode/mount/sb objects watched with priority prio or above? */
+static inline bool fsnotify_sb_has_priority_watchers(struct super_block *sb,
+ int prio)
+{
+ struct fsnotify_sb_info *sbinfo = fsnotify_sb_info(sb);
+
+ /* Were any marks ever added to any object on this sb? */
+ if (!sbinfo)
+ return false;
+
+ return atomic_long_read(&sbinfo->watched_objects[prio]);
+}
+
+/* Are there any inode/mount/sb objects that are being watched at all? */
+static inline bool fsnotify_sb_has_watchers(struct super_block *sb)
+{
+ return fsnotify_sb_has_priority_watchers(sb, 0);
+}
+
/*
* Notify this @dir inode about a change in a child directory entry.
* The directory entry may have turned positive or negative or its inode may
@@ -26,21 +45,27 @@
* FS_EVENT_ON_CHILD mask on the parent inode and will not be reported if only
* the child is interested and not the parent.
*/
-static inline void fsnotify_name(struct inode *dir, __u32 mask,
- struct inode *child,
- const struct qstr *name, u32 cookie)
+static inline int fsnotify_name(__u32 mask, const void *data, int data_type,
+ struct inode *dir, const struct qstr *name,
+ u32 cookie)
{
- fsnotify(mask, child, FSNOTIFY_EVENT_INODE, dir, name, NULL, cookie);
+ if (!fsnotify_sb_has_watchers(dir->i_sb))
+ return 0;
+
+ return fsnotify(mask, data, data_type, dir, name, NULL, cookie);
}
static inline void fsnotify_dirent(struct inode *dir, struct dentry *dentry,
__u32 mask)
{
- fsnotify_name(dir, mask, d_inode(dentry), &dentry->d_name, 0);
+ fsnotify_name(mask, dentry, FSNOTIFY_EVENT_DENTRY, dir, &dentry->d_name, 0);
}
static inline void fsnotify_inode(struct inode *inode, __u32 mask)
{
+ if (!fsnotify_sb_has_watchers(inode->i_sb))
+ return;
+
if (S_ISDIR(inode->i_mode))
mask |= FS_ISDIR;
@@ -53,6 +78,9 @@ static inline int fsnotify_parent(struct dentry *dentry, __u32 mask,
{
struct inode *inode = d_inode(dentry);
+ if (!fsnotify_sb_has_watchers(inode->i_sb))
+ return 0;
+
if (S_ISDIR(inode->i_mode)) {
mask |= FS_ISDIR;
@@ -77,44 +105,136 @@ notify_child:
*/
static inline void fsnotify_dentry(struct dentry *dentry, __u32 mask)
{
- fsnotify_parent(dentry, mask, d_inode(dentry), FSNOTIFY_EVENT_INODE);
+ fsnotify_parent(dentry, mask, dentry, FSNOTIFY_EVENT_DENTRY);
+}
+
+static inline int fsnotify_path(const struct path *path, __u32 mask)
+{
+ return fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH);
}
static inline int fsnotify_file(struct file *file, __u32 mask)
{
- const struct path *path = &file->f_path;
+ /*
+ * FMODE_NONOTIFY are fds generated by fanotify itself which should not
+ * generate new events. We also don't want to generate events for
+ * FMODE_PATH fds (involves open & close events) as they are just
+ * handle creation / destruction events and not "real" file events.
+ */
+ if (FMODE_FSNOTIFY_NONE(file->f_mode))
+ return 0;
+
+ return fsnotify_path(&file->f_path, mask);
+}
+
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+
+int fsnotify_open_perm_and_set_mode(struct file *file);
+
+/*
+ * fsnotify_file_area_perm - permission hook before access to file range
+ */
+static inline int fsnotify_file_area_perm(struct file *file, int perm_mask,
+ const loff_t *ppos, size_t count)
+{
+ /*
+ * filesystem may be modified in the context of permission events
+ * (e.g. by HSM filling a file on access), so sb freeze protection
+ * must not be held.
+ */
+ lockdep_assert_once(file_write_not_started(file));
+
+ if (!(perm_mask & (MAY_READ | MAY_WRITE | MAY_ACCESS)))
+ return 0;
- if (file->f_mode & FMODE_NONOTIFY)
+ /*
+ * read()/write() and other types of access generate pre-content events.
+ */
+ if (unlikely(FMODE_FSNOTIFY_HSM(file->f_mode))) {
+ int ret = fsnotify_pre_content(&file->f_path, ppos, count);
+
+ if (ret)
+ return ret;
+ }
+
+ if (!(perm_mask & MAY_READ) ||
+ likely(!FMODE_FSNOTIFY_ACCESS_PERM(file->f_mode)))
return 0;
- return fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH);
+ /*
+ * read() also generates the legacy FS_ACCESS_PERM event, so content
+ * scanners can inspect the content filled by pre-content event.
+ */
+ return fsnotify_path(&file->f_path, FS_ACCESS_PERM);
}
-/* Simple call site for access decisions */
-static inline int fsnotify_perm(struct file *file, int mask)
+/*
+ * fsnotify_mmap_perm - permission hook before mmap of file range
+ */
+static inline int fsnotify_mmap_perm(struct file *file, int prot,
+ const loff_t off, size_t len)
{
- int ret;
- __u32 fsnotify_mask = 0;
+ /*
+ * mmap() generates only pre-content events.
+ */
+ if (!file || likely(!FMODE_FSNOTIFY_HSM(file->f_mode)))
+ return 0;
+
+ return fsnotify_pre_content(&file->f_path, &off, len);
+}
- if (!(mask & (MAY_READ | MAY_OPEN)))
+/*
+ * fsnotify_truncate_perm - permission hook before file truncate
+ */
+static inline int fsnotify_truncate_perm(const struct path *path, loff_t length)
+{
+ struct inode *inode = d_inode(path->dentry);
+
+ if (!(inode->i_sb->s_iflags & SB_I_ALLOW_HSM) ||
+ !fsnotify_sb_has_priority_watchers(inode->i_sb,
+ FSNOTIFY_PRIO_PRE_CONTENT))
return 0;
- if (mask & MAY_OPEN) {
- fsnotify_mask = FS_OPEN_PERM;
+ return fsnotify_pre_content(path, &length, 0);
+}
- if (file->f_flags & __FMODE_EXEC) {
- ret = fsnotify_file(file, FS_OPEN_EXEC_PERM);
+/*
+ * fsnotify_file_perm - permission hook before file access (unknown range)
+ */
+static inline int fsnotify_file_perm(struct file *file, int perm_mask)
+{
+ return fsnotify_file_area_perm(file, perm_mask, NULL, 0);
+}
- if (ret)
- return ret;
- }
- } else if (mask & MAY_READ) {
- fsnotify_mask = FS_ACCESS_PERM;
- }
+#else
+static inline int fsnotify_open_perm_and_set_mode(struct file *file)
+{
+ return 0;
+}
- return fsnotify_file(file, fsnotify_mask);
+static inline int fsnotify_file_area_perm(struct file *file, int perm_mask,
+ const loff_t *ppos, size_t count)
+{
+ return 0;
}
+static inline int fsnotify_mmap_perm(struct file *file, int prot,
+ const loff_t off, size_t len)
+{
+ return 0;
+}
+
+static inline int fsnotify_truncate_perm(const struct path *path, loff_t length)
+{
+ return 0;
+}
+
+static inline int fsnotify_file_perm(struct file *file, int perm_mask)
+{
+ return 0;
+}
+#endif
+
/*
* fsnotify_link_count - inode's link count changed
*/
@@ -135,18 +255,23 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
u32 fs_cookie = fsnotify_get_cookie();
__u32 old_dir_mask = FS_MOVED_FROM;
__u32 new_dir_mask = FS_MOVED_TO;
+ __u32 rename_mask = FS_RENAME;
const struct qstr *new_name = &moved->d_name;
- if (old_dir == new_dir)
- old_dir_mask |= FS_DN_RENAME;
-
if (isdir) {
old_dir_mask |= FS_ISDIR;
new_dir_mask |= FS_ISDIR;
+ rename_mask |= FS_ISDIR;
}
- fsnotify_name(old_dir, old_dir_mask, source, old_name, fs_cookie);
- fsnotify_name(new_dir, new_dir_mask, source, new_name, fs_cookie);
+ /* Event with information about both old and new parent+name */
+ fsnotify_name(rename_mask, moved, FSNOTIFY_EVENT_DENTRY,
+ old_dir, old_name, 0);
+
+ fsnotify_name(old_dir_mask, source, FSNOTIFY_EVENT_INODE,
+ old_dir, old_name, fs_cookie);
+ fsnotify_name(new_dir_mask, source, FSNOTIFY_EVENT_INODE,
+ new_dir, new_name, fs_cookie);
if (target)
fsnotify_link_count(target);
@@ -170,6 +295,11 @@ static inline void fsnotify_vfsmount_delete(struct vfsmount *mnt)
__fsnotify_vfsmount_delete(mnt);
}
+static inline void fsnotify_mntns_delete(struct mnt_namespace *mntns)
+{
+ __fsnotify_mntns_delete(mntns);
+}
+
/*
* fsnotify_inoderemove - an inode is going away
*/
@@ -181,16 +311,22 @@ static inline void fsnotify_inoderemove(struct inode *inode)
/*
* fsnotify_create - 'name' was linked in
+ *
+ * Caller must make sure that dentry->d_name is stable.
+ * Note: some filesystems (e.g. kernfs) leave @dentry negative and instantiate
+ * ->d_inode later
*/
-static inline void fsnotify_create(struct inode *inode, struct dentry *dentry)
+static inline void fsnotify_create(struct inode *dir, struct dentry *dentry)
{
- audit_inode_child(inode, dentry, AUDIT_TYPE_CHILD_CREATE);
+ audit_inode_child(dir, dentry, AUDIT_TYPE_CHILD_CREATE);
- fsnotify_dirent(inode, dentry, FS_CREATE);
+ fsnotify_dirent(dir, dentry, FS_CREATE);
}
/*
* fsnotify_link - new hardlink in 'inode' directory
+ *
+ * Caller must make sure that new_dentry->d_name is stable.
* Note: We have to pass also the linked inode ptr as some filesystems leave
* new_dentry->d_inode NULL and instantiate inode pointer later
*/
@@ -200,7 +336,45 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode,
fsnotify_link_count(inode);
audit_inode_child(dir, new_dentry, AUDIT_TYPE_CHILD_CREATE);
- fsnotify_name(dir, FS_CREATE, inode, &new_dentry->d_name, 0);
+ fsnotify_name(FS_CREATE, inode, FSNOTIFY_EVENT_INODE,
+ dir, &new_dentry->d_name, 0);
+}
+
+/*
+ * fsnotify_delete - @dentry was unlinked and unhashed
+ *
+ * Caller must make sure that dentry->d_name is stable.
+ *
+ * Note: unlike fsnotify_unlink(), we have to pass also the unlinked inode
+ * as this may be called after d_delete() and old_dentry may be negative.
+ */
+static inline void fsnotify_delete(struct inode *dir, struct inode *inode,
+ struct dentry *dentry)
+{
+ __u32 mask = FS_DELETE;
+
+ if (S_ISDIR(inode->i_mode))
+ mask |= FS_ISDIR;
+
+ fsnotify_name(mask, inode, FSNOTIFY_EVENT_INODE, dir, &dentry->d_name,
+ 0);
+}
+
+/**
+ * d_delete_notify - delete a dentry and call fsnotify_delete()
+ * @dentry: The dentry to delete
+ *
+ * This helper is used to guaranty that the unlinked inode cannot be found
+ * by lookup of this name after fsnotify_delete() event has been delivered.
+ */
+static inline void d_delete_notify(struct inode *dir, struct dentry *dentry)
+{
+ struct inode *inode = d_inode(dentry);
+
+ ihold(inode);
+ d_delete(dentry);
+ fsnotify_delete(dir, inode, dentry);
+ iput(inode);
}
/*
@@ -210,20 +384,24 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode,
*/
static inline void fsnotify_unlink(struct inode *dir, struct dentry *dentry)
{
- /* Expected to be called before d_delete() */
- WARN_ON_ONCE(d_is_negative(dentry));
+ if (WARN_ON_ONCE(d_is_negative(dentry)))
+ return;
- fsnotify_dirent(dir, dentry, FS_DELETE);
+ fsnotify_delete(dir, d_inode(dentry), dentry);
}
/*
* fsnotify_mkdir - directory 'name' was created
+ *
+ * Caller must make sure that dentry->d_name is stable.
+ * Note: some filesystems (e.g. kernfs) leave @dentry negative and instantiate
+ * ->d_inode later
*/
-static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
+static inline void fsnotify_mkdir(struct inode *dir, struct dentry *dentry)
{
- audit_inode_child(inode, dentry, AUDIT_TYPE_CHILD_CREATE);
+ audit_inode_child(dir, dentry, AUDIT_TYPE_CHILD_CREATE);
- fsnotify_dirent(inode, dentry, FS_CREATE | FS_ISDIR);
+ fsnotify_dirent(dir, dentry, FS_CREATE | FS_ISDIR);
}
/*
@@ -233,10 +411,10 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
*/
static inline void fsnotify_rmdir(struct inode *dir, struct dentry *dentry)
{
- /* Expected to be called before d_delete() */
- WARN_ON_ONCE(d_is_negative(dentry));
+ if (WARN_ON_ONCE(d_is_negative(dentry)))
+ return;
- fsnotify_dirent(dir, dentry, FS_DELETE | FS_ISDIR);
+ fsnotify_delete(dir, d_inode(dentry), dentry);
}
/*
@@ -317,4 +495,32 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
fsnotify_dentry(dentry, mask);
}
+static inline int fsnotify_sb_error(struct super_block *sb, struct inode *inode,
+ int error)
+{
+ struct fs_error_report report = {
+ .error = error,
+ .inode = inode,
+ .sb = sb,
+ };
+
+ return fsnotify(FS_ERROR, &report, FSNOTIFY_EVENT_ERROR,
+ NULL, NULL, NULL, 0);
+}
+
+static inline void fsnotify_mnt_attach(struct mnt_namespace *ns, struct vfsmount *mnt)
+{
+ fsnotify_mnt(FS_MNT_ATTACH, ns, mnt);
+}
+
+static inline void fsnotify_mnt_detach(struct mnt_namespace *ns, struct vfsmount *mnt)
+{
+ fsnotify_mnt(FS_MNT_DETACH, ns, mnt);
+}
+
+static inline void fsnotify_mnt_move(struct mnt_namespace *ns, struct vfsmount *mnt)
+{
+ fsnotify_mnt(FS_MNT_MOVE, ns, mnt);
+}
+
#endif /* _LINUX_FS_NOTIFY_H */
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 1ce66748a2d2..0d954ea7b179 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -19,6 +19,8 @@
#include <linux/atomic.h>
#include <linux/user_namespace.h>
#include <linux/refcount.h>
+#include <linux/mempool.h>
+#include <linux/sched/mm.h>
/*
* IN_* from inotfy.h lines up EXACTLY with FS_*, this is so we can easily
@@ -29,8 +31,8 @@
#define FS_ACCESS 0x00000001 /* File was accessed */
#define FS_MODIFY 0x00000002 /* File was modified */
#define FS_ATTRIB 0x00000004 /* Metadata changed */
-#define FS_CLOSE_WRITE 0x00000008 /* Writtable file was closed */
-#define FS_CLOSE_NOWRITE 0x00000010 /* Unwrittable file closed */
+#define FS_CLOSE_WRITE 0x00000008 /* Writable file was closed */
+#define FS_CLOSE_NOWRITE 0x00000010 /* Unwritable file closed */
#define FS_OPEN 0x00000020 /* File was opened */
#define FS_MOVED_FROM 0x00000040 /* File was moved from X */
#define FS_MOVED_TO 0x00000080 /* File was moved to Y */
@@ -42,13 +44,25 @@
#define FS_UNMOUNT 0x00002000 /* inode on umount fs */
#define FS_Q_OVERFLOW 0x00004000 /* Event queued overflowed */
+#define FS_ERROR 0x00008000 /* Filesystem Error (fanotify) */
+
+/*
+ * FS_IN_IGNORED overloads FS_ERROR. It is only used internally by inotify
+ * which does not support FS_ERROR.
+ */
#define FS_IN_IGNORED 0x00008000 /* last inotify event here */
#define FS_OPEN_PERM 0x00010000 /* open event in an permission hook */
#define FS_ACCESS_PERM 0x00020000 /* access event in a permissions hook */
#define FS_OPEN_EXEC_PERM 0x00040000 /* open/exec event in a permission hook */
+/* #define FS_DIR_MODIFY 0x00080000 */ /* Deprecated (reserved) */
+
+#define FS_PRE_ACCESS 0x00100000 /* Pre-content access hook */
+
+#define FS_MNT_ATTACH 0x01000000 /* Mount was attached */
+#define FS_MNT_DETACH 0x02000000 /* Mount was detached */
+#define FS_MNT_MOVE (FS_MNT_ATTACH | FS_MNT_DETACH)
-#define FS_EXCL_UNLINK 0x04000000 /* do not send events if object is unlinked */
/*
* Set on inode mark that cares about things that happen to its children.
* Always set for dnotify and inotify.
@@ -56,10 +70,9 @@
*/
#define FS_EVENT_ON_CHILD 0x08000000
-#define FS_DN_RENAME 0x10000000 /* file renamed */
+#define FS_RENAME 0x10000000 /* File was renamed */
#define FS_DN_MULTISHOT 0x20000000 /* dnotify multishot */
#define FS_ISDIR 0x40000000 /* event occurred against dir */
-#define FS_IN_ONESHOT 0x80000000 /* only send event once */
#define FS_MOVE (FS_MOVED_FROM | FS_MOVED_TO)
@@ -69,10 +82,19 @@
* The watching parent may get an FS_ATTRIB|FS_EVENT_ON_CHILD event
* when a directory entry inside a child subdir changes.
*/
-#define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE)
+#define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE | FS_RENAME)
+
+/* Mount namespace events */
+#define FSNOTIFY_MNT_EVENTS (FS_MNT_ATTACH | FS_MNT_DETACH)
-#define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM | \
- FS_OPEN_EXEC_PERM)
+/* Content events can be used to inspect file content */
+#define FSNOTIFY_CONTENT_PERM_EVENTS (FS_OPEN_PERM | FS_OPEN_EXEC_PERM | \
+ FS_ACCESS_PERM)
+/* Pre-content events can be used to fill file content */
+#define FSNOTIFY_PRE_CONTENT_EVENTS (FS_PRE_ACCESS)
+
+#define ALL_FSNOTIFY_PERM_EVENTS (FSNOTIFY_CONTENT_PERM_EVENTS | \
+ FSNOTIFY_PRE_CONTENT_EVENTS)
/*
* This is a list of all events that may get sent to a parent that is watching
@@ -93,13 +115,14 @@
/* Events that can be reported to backends */
#define ALL_FSNOTIFY_EVENTS (ALL_FSNOTIFY_DIRENT_EVENTS | \
+ FSNOTIFY_MNT_EVENTS | \
FS_EVENTS_POSS_ON_CHILD | \
- FS_DELETE_SELF | FS_MOVE_SELF | FS_DN_RENAME | \
- FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED)
+ FS_DELETE_SELF | FS_MOVE_SELF | \
+ FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED | \
+ FS_ERROR)
/* Extra flags that may be reported with event or control handling of events */
-#define ALL_FSNOTIFY_FLAGS (FS_EXCL_UNLINK | FS_ISDIR | FS_IN_ONESHOT | \
- FS_DN_MULTISHOT | FS_EVENT_ON_CHILD)
+#define ALL_FSNOTIFY_FLAGS (FS_ISDIR | FS_EVENT_ON_CHILD | FS_DN_MULTISHOT)
#define ALL_FSNOTIFY_BITS (ALL_FSNOTIFY_EVENTS | ALL_FSNOTIFY_FLAGS)
@@ -136,6 +159,7 @@ struct mem_cgroup;
* @dir: optional directory associated with event -
* if @file_name is not NULL, this is the directory that
* @file_name is relative to.
+ * Either @inode or @dir must be non-NULL.
* @file_name: optional file name associated with event
* @cookie: inotify rename cookie
*
@@ -155,7 +179,7 @@ struct fsnotify_ops {
const struct qstr *file_name, u32 cookie);
void (*free_group_priv)(struct fsnotify_group *group);
void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
- void (*free_event)(struct fsnotify_event *event);
+ void (*free_event)(struct fsnotify_group *group, struct fsnotify_event *event);
/* called on final put+free to free memory */
void (*free_mark)(struct fsnotify_mark *mark);
};
@@ -170,6 +194,17 @@ struct fsnotify_event {
};
/*
+ * fsnotify group priorities.
+ * Events are sent in order from highest priority to lowest priority.
+ */
+enum fsnotify_group_prio {
+ FSNOTIFY_PRIO_NORMAL = 0, /* normal notifiers, no permissions */
+ FSNOTIFY_PRIO_CONTENT, /* fanotify permission events */
+ FSNOTIFY_PRIO_PRE_CONTENT, /* fanotify pre-content events */
+ __FSNOTIFY_PRIO_NUM
+};
+
+/*
* A group is a "thing" that wants to receive notification about filesystem
* events. The mask holds the subset of event types this group cares about.
* refcnt on a group is up to the implementor and at any moment if it goes 0
@@ -194,16 +229,14 @@ struct fsnotify_group {
wait_queue_head_t notification_waitq; /* read() on the notification file blocks on this waitq */
unsigned int q_len; /* events on the queue */
unsigned int max_events; /* maximum events allowed on the list */
- /*
- * Valid fsnotify group priorities. Events are send in order from highest
- * priority to lowest priority. We default to the lowest priority.
- */
- #define FS_PRIO_0 0 /* normal notifiers, no permissions */
- #define FS_PRIO_1 1 /* fanotify content based access control */
- #define FS_PRIO_2 2 /* fanotify pre-content access */
- unsigned int priority;
+ enum fsnotify_group_prio priority; /* priority for sending events */
bool shutdown; /* group is being shut down, don't queue more events */
+#define FSNOTIFY_GROUP_USER 0x01 /* user allocated group */
+#define FSNOTIFY_GROUP_DUPS 0x02 /* allow multiple marks per object */
+ int flags;
+ unsigned int owner_flags; /* stored flags of mark_mutex owner */
+
/* stores all fastpath marks assoc with this group so they can be cleaned on unregister */
struct mutex mark_mutex; /* protect marks_list */
atomic_t user_waits; /* Number of tasks waiting for user
@@ -217,6 +250,7 @@ struct fsnotify_group {
* full */
struct mem_cgroup *memcg; /* memcg to charge allocations */
+ struct user_namespace *user_ns; /* user ns where group was created */
/* groups can define private fields here or use the void *private */
union {
@@ -238,16 +272,67 @@ struct fsnotify_group {
int flags; /* flags from fanotify_init() */
int f_flags; /* event_f_flags from fanotify_init() */
struct ucounts *ucounts;
+ mempool_t error_events_pool;
+ /* chained on perm_group_list */
+ struct list_head perm_grp_list;
} fanotify_data;
#endif /* CONFIG_FANOTIFY */
};
};
+/*
+ * These helpers are used to prevent deadlock when reclaiming inodes with
+ * evictable marks of the same group that is allocating a new mark.
+ */
+static inline void fsnotify_group_lock(struct fsnotify_group *group)
+{
+ mutex_lock(&group->mark_mutex);
+ group->owner_flags = memalloc_nofs_save();
+}
+
+static inline void fsnotify_group_unlock(struct fsnotify_group *group)
+{
+ memalloc_nofs_restore(group->owner_flags);
+ mutex_unlock(&group->mark_mutex);
+}
+
+static inline void fsnotify_group_assert_locked(struct fsnotify_group *group)
+{
+ WARN_ON_ONCE(!mutex_is_locked(&group->mark_mutex));
+ WARN_ON_ONCE(!(current->flags & PF_MEMALLOC_NOFS));
+}
+
/* When calling fsnotify tell it if the data is a path or inode */
enum fsnotify_data_type {
FSNOTIFY_EVENT_NONE,
+ FSNOTIFY_EVENT_FILE_RANGE,
FSNOTIFY_EVENT_PATH,
FSNOTIFY_EVENT_INODE,
+ FSNOTIFY_EVENT_DENTRY,
+ FSNOTIFY_EVENT_MNT,
+ FSNOTIFY_EVENT_ERROR,
+};
+
+struct fs_error_report {
+ int error;
+ struct inode *inode;
+ struct super_block *sb;
+};
+
+struct file_range {
+ const struct path *path;
+ loff_t pos;
+ size_t count;
+};
+
+static inline const struct path *file_range_path(const struct file_range *range)
+{
+ return range->path;
+}
+
+struct fsnotify_mnt {
+ const struct mnt_namespace *ns;
+ u64 mnt_id;
};
static inline struct inode *fsnotify_data_inode(const void *data, int data_type)
@@ -255,8 +340,29 @@ static inline struct inode *fsnotify_data_inode(const void *data, int data_type)
switch (data_type) {
case FSNOTIFY_EVENT_INODE:
return (struct inode *)data;
+ case FSNOTIFY_EVENT_DENTRY:
+ return d_inode(data);
case FSNOTIFY_EVENT_PATH:
return d_inode(((const struct path *)data)->dentry);
+ case FSNOTIFY_EVENT_FILE_RANGE:
+ return d_inode(file_range_path(data)->dentry);
+ case FSNOTIFY_EVENT_ERROR:
+ return ((struct fs_error_report *)data)->inode;
+ default:
+ return NULL;
+ }
+}
+
+static inline struct dentry *fsnotify_data_dentry(const void *data, int data_type)
+{
+ switch (data_type) {
+ case FSNOTIFY_EVENT_DENTRY:
+ /* Non const is needed for dget() */
+ return (struct dentry *)data;
+ case FSNOTIFY_EVENT_PATH:
+ return ((const struct path *)data)->dentry;
+ case FSNOTIFY_EVENT_FILE_RANGE:
+ return file_range_path(data)->dentry;
default:
return NULL;
}
@@ -268,63 +374,151 @@ static inline const struct path *fsnotify_data_path(const void *data,
switch (data_type) {
case FSNOTIFY_EVENT_PATH:
return data;
+ case FSNOTIFY_EVENT_FILE_RANGE:
+ return file_range_path(data);
default:
return NULL;
}
}
+static inline struct super_block *fsnotify_data_sb(const void *data,
+ int data_type)
+{
+ switch (data_type) {
+ case FSNOTIFY_EVENT_INODE:
+ return ((struct inode *)data)->i_sb;
+ case FSNOTIFY_EVENT_DENTRY:
+ return ((struct dentry *)data)->d_sb;
+ case FSNOTIFY_EVENT_PATH:
+ return ((const struct path *)data)->dentry->d_sb;
+ case FSNOTIFY_EVENT_FILE_RANGE:
+ return file_range_path(data)->dentry->d_sb;
+ case FSNOTIFY_EVENT_ERROR:
+ return ((struct fs_error_report *) data)->sb;
+ default:
+ return NULL;
+ }
+}
+
+static inline const struct fsnotify_mnt *fsnotify_data_mnt(const void *data,
+ int data_type)
+{
+ switch (data_type) {
+ case FSNOTIFY_EVENT_MNT:
+ return data;
+ default:
+ return NULL;
+ }
+}
+
+static inline u64 fsnotify_data_mnt_id(const void *data, int data_type)
+{
+ const struct fsnotify_mnt *mnt_data = fsnotify_data_mnt(data, data_type);
+
+ return mnt_data ? mnt_data->mnt_id : 0;
+}
+
+static inline struct fs_error_report *fsnotify_data_error_report(
+ const void *data,
+ int data_type)
+{
+ switch (data_type) {
+ case FSNOTIFY_EVENT_ERROR:
+ return (struct fs_error_report *) data;
+ default:
+ return NULL;
+ }
+}
+
+static inline const struct file_range *fsnotify_data_file_range(
+ const void *data,
+ int data_type)
+{
+ switch (data_type) {
+ case FSNOTIFY_EVENT_FILE_RANGE:
+ return (struct file_range *)data;
+ default:
+ return NULL;
+ }
+}
+
+/*
+ * Index to merged marks iterator array that correlates to a type of watch.
+ * The type of watched object can be deduced from the iterator type, but not
+ * the other way around, because an event can match different watched objects
+ * of the same object type.
+ * For example, both parent and child are watching an object of type inode.
+ */
+enum fsnotify_iter_type {
+ FSNOTIFY_ITER_TYPE_INODE,
+ FSNOTIFY_ITER_TYPE_VFSMOUNT,
+ FSNOTIFY_ITER_TYPE_SB,
+ FSNOTIFY_ITER_TYPE_PARENT,
+ FSNOTIFY_ITER_TYPE_INODE2,
+ FSNOTIFY_ITER_TYPE_MNTNS,
+ FSNOTIFY_ITER_TYPE_COUNT
+};
+
+/* The type of object that a mark is attached to */
enum fsnotify_obj_type {
+ FSNOTIFY_OBJ_TYPE_ANY = -1,
FSNOTIFY_OBJ_TYPE_INODE,
- FSNOTIFY_OBJ_TYPE_PARENT,
FSNOTIFY_OBJ_TYPE_VFSMOUNT,
FSNOTIFY_OBJ_TYPE_SB,
+ FSNOTIFY_OBJ_TYPE_MNTNS,
FSNOTIFY_OBJ_TYPE_COUNT,
FSNOTIFY_OBJ_TYPE_DETACHED = FSNOTIFY_OBJ_TYPE_COUNT
};
-#define FSNOTIFY_OBJ_TYPE_INODE_FL (1U << FSNOTIFY_OBJ_TYPE_INODE)
-#define FSNOTIFY_OBJ_TYPE_PARENT_FL (1U << FSNOTIFY_OBJ_TYPE_PARENT)
-#define FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL (1U << FSNOTIFY_OBJ_TYPE_VFSMOUNT)
-#define FSNOTIFY_OBJ_TYPE_SB_FL (1U << FSNOTIFY_OBJ_TYPE_SB)
-#define FSNOTIFY_OBJ_ALL_TYPES_MASK ((1U << FSNOTIFY_OBJ_TYPE_COUNT) - 1)
-
-static inline bool fsnotify_valid_obj_type(unsigned int type)
+static inline bool fsnotify_valid_obj_type(unsigned int obj_type)
{
- return (type < FSNOTIFY_OBJ_TYPE_COUNT);
+ return (obj_type < FSNOTIFY_OBJ_TYPE_COUNT);
}
struct fsnotify_iter_info {
- struct fsnotify_mark *marks[FSNOTIFY_OBJ_TYPE_COUNT];
+ struct fsnotify_mark *marks[FSNOTIFY_ITER_TYPE_COUNT];
+ struct fsnotify_group *current_group;
unsigned int report_mask;
int srcu_idx;
};
static inline bool fsnotify_iter_should_report_type(
- struct fsnotify_iter_info *iter_info, int type)
+ struct fsnotify_iter_info *iter_info, int iter_type)
{
- return (iter_info->report_mask & (1U << type));
+ return (iter_info->report_mask & (1U << iter_type));
}
static inline void fsnotify_iter_set_report_type(
- struct fsnotify_iter_info *iter_info, int type)
+ struct fsnotify_iter_info *iter_info, int iter_type)
+{
+ iter_info->report_mask |= (1U << iter_type);
+}
+
+static inline struct fsnotify_mark *fsnotify_iter_mark(
+ struct fsnotify_iter_info *iter_info, int iter_type)
{
- iter_info->report_mask |= (1U << type);
+ if (fsnotify_iter_should_report_type(iter_info, iter_type))
+ return iter_info->marks[iter_type];
+ return NULL;
}
-static inline void fsnotify_iter_set_report_type_mark(
- struct fsnotify_iter_info *iter_info, int type,
- struct fsnotify_mark *mark)
+static inline int fsnotify_iter_step(struct fsnotify_iter_info *iter, int type,
+ struct fsnotify_mark **markp)
{
- iter_info->marks[type] = mark;
- iter_info->report_mask |= (1U << type);
+ while (type < FSNOTIFY_ITER_TYPE_COUNT) {
+ *markp = fsnotify_iter_mark(iter, type);
+ if (*markp)
+ break;
+ type++;
+ }
+ return type;
}
#define FSNOTIFY_ITER_FUNCS(name, NAME) \
static inline struct fsnotify_mark *fsnotify_iter_##name##_mark( \
struct fsnotify_iter_info *iter_info) \
{ \
- return (iter_info->report_mask & FSNOTIFY_OBJ_TYPE_##NAME##_FL) ? \
- iter_info->marks[FSNOTIFY_OBJ_TYPE_##NAME] : NULL; \
+ return fsnotify_iter_mark(iter_info, FSNOTIFY_ITER_TYPE_##NAME); \
}
FSNOTIFY_ITER_FUNCS(inode, INODE)
@@ -332,15 +526,13 @@ FSNOTIFY_ITER_FUNCS(parent, PARENT)
FSNOTIFY_ITER_FUNCS(vfsmount, VFSMOUNT)
FSNOTIFY_ITER_FUNCS(sb, SB)
-#define fsnotify_foreach_obj_type(type) \
- for (type = 0; type < FSNOTIFY_OBJ_TYPE_COUNT; type++)
-
-/*
- * fsnotify_connp_t is what we embed in objects which connector can be attached
- * to. fsnotify_connp_t * is how we refer from connector back to object.
- */
-struct fsnotify_mark_connector;
-typedef struct fsnotify_mark_connector __rcu *fsnotify_connp_t;
+#define fsnotify_foreach_iter_type(type) \
+ for (type = 0; type < FSNOTIFY_ITER_TYPE_COUNT; type++)
+#define fsnotify_foreach_iter_mark_type(iter, mark, type) \
+ for (type = 0; \
+ type = fsnotify_iter_step(iter, type, &mark), \
+ type < FSNOTIFY_ITER_TYPE_COUNT; \
+ type++)
/*
* Inode/vfsmount/sb point to this structure which tracks all marks attached to
@@ -350,13 +542,14 @@ typedef struct fsnotify_mark_connector __rcu *fsnotify_connp_t;
*/
struct fsnotify_mark_connector {
spinlock_t lock;
- unsigned short type; /* Type of object [lock] */
-#define FSNOTIFY_CONN_FLAG_HAS_FSID 0x01
+ unsigned char type; /* Type of object [lock] */
+ unsigned char prio; /* Highest priority group */
+#define FSNOTIFY_CONN_FLAG_IS_WATCHED 0x01
+#define FSNOTIFY_CONN_FLAG_HAS_IREF 0x02
unsigned short flags; /* flags [lock] */
- __kernel_fsid_t fsid; /* fsid of filesystem containing object */
union {
/* Object pointer [lock] */
- fsnotify_connp_t *obj;
+ void *obj;
/* Used listing heads to free after srcu period expires */
struct fsnotify_mark_connector *destroy_next;
};
@@ -364,6 +557,37 @@ struct fsnotify_mark_connector {
};
/*
+ * Container for per-sb fsnotify state (sb marks and more).
+ * Attached lazily on first marked object on the sb and freed when killing sb.
+ */
+struct fsnotify_sb_info {
+ struct fsnotify_mark_connector __rcu *sb_marks;
+ /*
+ * Number of inode/mount/sb objects that are being watched in this sb.
+ * Note that inodes objects are currently double-accounted.
+ *
+ * The value in watched_objects[prio] is the number of objects that are
+ * watched by groups of priority >= prio, so watched_objects[0] is the
+ * total number of watched objects in this sb.
+ */
+ atomic_long_t watched_objects[__FSNOTIFY_PRIO_NUM];
+};
+
+static inline struct fsnotify_sb_info *fsnotify_sb_info(struct super_block *sb)
+{
+#ifdef CONFIG_FSNOTIFY
+ return READ_ONCE(sb->s_fsnotify_info);
+#else
+ return NULL;
+#endif
+}
+
+static inline atomic_long_t *fsnotify_sb_watched_objects(struct super_block *sb)
+{
+ return &fsnotify_sb_info(sb)->watched_objects[0];
+}
+
+/*
* A mark is simply an object attached to an in core inode which allows an
* fsnotify listener to indicate they are either no longer interested in events
* of a type matching mask or only interested in those events.
@@ -396,11 +620,20 @@ struct fsnotify_mark {
struct hlist_node obj_list;
/* Head of list of marks for an object [mark ref] */
struct fsnotify_mark_connector *connector;
- /* Events types to ignore [mark->lock, group->mark_mutex] */
- __u32 ignored_mask;
-#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x01
-#define FSNOTIFY_MARK_FLAG_ALIVE 0x02
-#define FSNOTIFY_MARK_FLAG_ATTACHED 0x04
+ /* Events types and flags to ignore [mark->lock, group->mark_mutex] */
+ __u32 ignore_mask;
+ /* General fsnotify mark flags */
+#define FSNOTIFY_MARK_FLAG_ALIVE 0x0001
+#define FSNOTIFY_MARK_FLAG_ATTACHED 0x0002
+ /* inotify mark flags */
+#define FSNOTIFY_MARK_FLAG_EXCL_UNLINK 0x0010
+#define FSNOTIFY_MARK_FLAG_IN_ONESHOT 0x0020
+ /* fanotify mark flags */
+#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x0100
+#define FSNOTIFY_MARK_FLAG_NO_IREF 0x0200
+#define FSNOTIFY_MARK_FLAG_HAS_IGNORE_FLAGS 0x0400
+#define FSNOTIFY_MARK_FLAG_HAS_FSID 0x0800
+#define FSNOTIFY_MARK_FLAG_WEAK_FSID 0x1000
unsigned int flags; /* flags [mark->lock] */
};
@@ -417,7 +650,10 @@ extern int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data
extern void __fsnotify_inode_delete(struct inode *inode);
extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt);
extern void fsnotify_sb_delete(struct super_block *sb);
+extern void __fsnotify_mntns_delete(struct mnt_namespace *mntns);
+extern void fsnotify_sb_free(struct super_block *sb);
extern u32 fsnotify_get_cookie(void);
+extern void fsnotify_mnt(__u32 mask, struct mnt_namespace *ns, struct vfsmount *mnt);
static inline __u32 fsnotify_parent_needed_mask(__u32 mask)
{
@@ -434,12 +670,14 @@ static inline __u32 fsnotify_parent_needed_mask(__u32 mask)
static inline int fsnotify_inode_watches_children(struct inode *inode)
{
+ __u32 parent_mask = READ_ONCE(inode->i_fsnotify_mask);
+
/* FS_EVENT_ON_CHILD is set if the inode may care */
- if (!(inode->i_fsnotify_mask & FS_EVENT_ON_CHILD))
+ if (!(parent_mask & FS_EVENT_ON_CHILD))
return 0;
/* this inode might care about child events, does it care about the
* specific set of events that can happen on a child? */
- return inode->i_fsnotify_mask & FS_EVENTS_POSS_ON_CHILD;
+ return parent_mask & FS_EVENTS_POSS_ON_CHILD;
}
/*
@@ -453,7 +691,7 @@ static inline void fsnotify_update_flags(struct dentry *dentry)
/*
* Serialisation of setting PARENT_WATCHED on the dentries is provided
* by d_lock. If inotify_inode_watched changes after we have taken
- * d_lock, the following __fsnotify_update_child_dentry_flags call will
+ * d_lock, the following fsnotify_set_children_dentry_flags call will
* find our entry, so it will spin until we complete here, and update
* us with the new state.
*/
@@ -466,8 +704,9 @@ static inline void fsnotify_update_flags(struct dentry *dentry)
/* called from fsnotify listeners, such as fanotify or dnotify */
/* create a new group */
-extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops);
-extern struct fsnotify_group *fsnotify_alloc_user_group(const struct fsnotify_ops *ops);
+extern struct fsnotify_group *fsnotify_alloc_group(
+ const struct fsnotify_ops *ops,
+ int flags);
/* get reference to a group */
extern void fsnotify_get_group(struct fsnotify_group *group);
/* drop reference on a group from fsnotify_alloc_group */
@@ -482,16 +721,30 @@ extern int fsnotify_fasync(int fd, struct file *file, int on);
extern void fsnotify_destroy_event(struct fsnotify_group *group,
struct fsnotify_event *event);
/* attach the event to the group notification queue */
-extern int fsnotify_add_event(struct fsnotify_group *group,
- struct fsnotify_event *event,
- int (*merge)(struct fsnotify_group *,
- struct fsnotify_event *),
- void (*insert)(struct fsnotify_group *,
- struct fsnotify_event *));
+extern int fsnotify_insert_event(struct fsnotify_group *group,
+ struct fsnotify_event *event,
+ int (*merge)(struct fsnotify_group *,
+ struct fsnotify_event *),
+ void (*insert)(struct fsnotify_group *,
+ struct fsnotify_event *));
+
+static inline int fsnotify_add_event(struct fsnotify_group *group,
+ struct fsnotify_event *event,
+ int (*merge)(struct fsnotify_group *,
+ struct fsnotify_event *))
+{
+ return fsnotify_insert_event(group, event, merge, NULL);
+}
+
/* Queue overflow event to a notification group */
static inline void fsnotify_queue_overflow(struct fsnotify_group *group)
{
- fsnotify_add_event(group, group->overflow_event, NULL, NULL);
+ fsnotify_add_event(group, group->overflow_event, NULL);
+}
+
+static inline bool fsnotify_is_overflow_event(u32 mask)
+{
+ return mask & FS_Q_OVERFLOW;
}
static inline bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group)
@@ -512,6 +765,101 @@ extern void fsnotify_remove_queued_event(struct fsnotify_group *group,
/* functions used to manipulate the marks attached to inodes */
+/*
+ * Canonical "ignore mask" including event flags.
+ *
+ * Note the subtle semantic difference from the legacy ->ignored_mask.
+ * ->ignored_mask traditionally only meant which events should be ignored,
+ * while ->ignore_mask also includes flags regarding the type of objects on
+ * which events should be ignored.
+ */
+static inline __u32 fsnotify_ignore_mask(struct fsnotify_mark *mark)
+{
+ __u32 ignore_mask = mark->ignore_mask;
+
+ /* The event flags in ignore mask take effect */
+ if (mark->flags & FSNOTIFY_MARK_FLAG_HAS_IGNORE_FLAGS)
+ return ignore_mask;
+
+ /*
+ * Legacy behavior:
+ * - Always ignore events on dir
+ * - Ignore events on child if parent is watching children
+ */
+ ignore_mask |= FS_ISDIR;
+ ignore_mask &= ~FS_EVENT_ON_CHILD;
+ ignore_mask |= mark->mask & FS_EVENT_ON_CHILD;
+
+ return ignore_mask;
+}
+
+/* Legacy ignored_mask - only event types to ignore */
+static inline __u32 fsnotify_ignored_events(struct fsnotify_mark *mark)
+{
+ return mark->ignore_mask & ALL_FSNOTIFY_EVENTS;
+}
+
+/*
+ * Check if mask (or ignore mask) should be applied depending if victim is a
+ * directory and whether it is reported to a watching parent.
+ */
+static inline bool fsnotify_mask_applicable(__u32 mask, bool is_dir,
+ int iter_type)
+{
+ /* Should mask be applied to a directory? */
+ if (is_dir && !(mask & FS_ISDIR))
+ return false;
+
+ /* Should mask be applied to a child? */
+ if (iter_type == FSNOTIFY_ITER_TYPE_PARENT &&
+ !(mask & FS_EVENT_ON_CHILD))
+ return false;
+
+ return true;
+}
+
+/*
+ * Effective ignore mask taking into account if event victim is a
+ * directory and whether it is reported to a watching parent.
+ */
+static inline __u32 fsnotify_effective_ignore_mask(struct fsnotify_mark *mark,
+ bool is_dir, int iter_type)
+{
+ __u32 ignore_mask = fsnotify_ignored_events(mark);
+
+ if (!ignore_mask)
+ return 0;
+
+ /* For non-dir and non-child, no need to consult the event flags */
+ if (!is_dir && iter_type != FSNOTIFY_ITER_TYPE_PARENT)
+ return ignore_mask;
+
+ ignore_mask = fsnotify_ignore_mask(mark);
+ if (!fsnotify_mask_applicable(ignore_mask, is_dir, iter_type))
+ return 0;
+
+ return ignore_mask & ALL_FSNOTIFY_EVENTS;
+}
+
+/* Get mask for calculating object interest taking ignore mask into account */
+static inline __u32 fsnotify_calc_mask(struct fsnotify_mark *mark)
+{
+ __u32 mask = mark->mask;
+
+ if (!fsnotify_ignored_events(mark))
+ return mask;
+
+ /* Interest in FS_MODIFY may be needed for clearing ignore mask */
+ if (!(mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY))
+ mask |= FS_MODIFY;
+
+ /*
+ * If mark is interested in ignoring events on children, the object must
+ * show interest in those events for fsnotify_parent() to notice it.
+ */
+ return mask | mark->ignore_mask;
+}
+
/* Get mask of events for a list of marks */
extern __u32 fsnotify_conn_mask(struct fsnotify_mark_connector *conn);
/* Calculate mask of events for a list of marks */
@@ -519,35 +867,35 @@ extern void fsnotify_recalc_mask(struct fsnotify_mark_connector *conn);
extern void fsnotify_init_mark(struct fsnotify_mark *mark,
struct fsnotify_group *group);
/* Find mark belonging to given group in the list of marks */
-extern struct fsnotify_mark *fsnotify_find_mark(fsnotify_connp_t *connp,
- struct fsnotify_group *group);
-/* Get cached fsid of filesystem containing object */
-extern int fsnotify_get_conn_fsid(const struct fsnotify_mark_connector *conn,
- __kernel_fsid_t *fsid);
+struct fsnotify_mark *fsnotify_find_mark(void *obj, unsigned int obj_type,
+ struct fsnotify_group *group);
/* attach the mark to the object */
-extern int fsnotify_add_mark(struct fsnotify_mark *mark,
- fsnotify_connp_t *connp, unsigned int type,
- int allow_dups, __kernel_fsid_t *fsid);
-extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark,
- fsnotify_connp_t *connp,
- unsigned int type, int allow_dups,
- __kernel_fsid_t *fsid);
+int fsnotify_add_mark(struct fsnotify_mark *mark, void *obj,
+ unsigned int obj_type, int add_flags);
+int fsnotify_add_mark_locked(struct fsnotify_mark *mark, void *obj,
+ unsigned int obj_type, int add_flags);
/* attach the mark to the inode */
static inline int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
struct inode *inode,
- int allow_dups)
+ int add_flags)
{
- return fsnotify_add_mark(mark, &inode->i_fsnotify_marks,
- FSNOTIFY_OBJ_TYPE_INODE, allow_dups, NULL);
+ return fsnotify_add_mark(mark, inode, FSNOTIFY_OBJ_TYPE_INODE,
+ add_flags);
}
static inline int fsnotify_add_inode_mark_locked(struct fsnotify_mark *mark,
struct inode *inode,
- int allow_dups)
+ int add_flags)
+{
+ return fsnotify_add_mark_locked(mark, inode, FSNOTIFY_OBJ_TYPE_INODE,
+ add_flags);
+}
+
+static inline struct fsnotify_mark *fsnotify_find_inode_mark(
+ struct inode *inode,
+ struct fsnotify_group *group)
{
- return fsnotify_add_mark_locked(mark, &inode->i_fsnotify_marks,
- FSNOTIFY_OBJ_TYPE_INODE, allow_dups,
- NULL);
+ return fsnotify_find_mark(inode, FSNOTIFY_OBJ_TYPE_INODE, group);
}
/* given a group and a mark, flag mark to be freed when all references are dropped */
@@ -559,23 +907,9 @@ extern void fsnotify_detach_mark(struct fsnotify_mark *mark);
extern void fsnotify_free_mark(struct fsnotify_mark *mark);
/* Wait until all marks queued for destruction are destroyed */
extern void fsnotify_wait_marks_destroyed(void);
-/* run all the marks in a group, and clear all of the marks attached to given object type */
-extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group, unsigned int type);
-/* run all the marks in a group, and clear all of the vfsmount marks */
-static inline void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group)
-{
- fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL);
-}
-/* run all the marks in a group, and clear all of the inode marks */
-static inline void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group)
-{
- fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_INODE_FL);
-}
-/* run all the marks in a group, and clear all of the sn marks */
-static inline void fsnotify_clear_sb_marks_by_group(struct fsnotify_group *group)
-{
- fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_SB_FL);
-}
+/* Clear all of the marks of a group attached to a given object type */
+extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group,
+ unsigned int obj_type);
extern void fsnotify_get_mark(struct fsnotify_mark *mark);
extern void fsnotify_put_mark(struct fsnotify_mark *mark);
extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info);
@@ -585,9 +919,17 @@ static inline void fsnotify_init_event(struct fsnotify_event *event)
{
INIT_LIST_HEAD(&event->list);
}
+int fsnotify_pre_content(const struct path *path, const loff_t *ppos,
+ size_t count);
#else
+static inline int fsnotify_pre_content(const struct path *path,
+ const loff_t *ppos, size_t count)
+{
+ return 0;
+}
+
static inline int fsnotify(__u32 mask, const void *data, int data_type,
struct inode *dir, const struct qstr *name,
struct inode *inode, u32 cookie)
@@ -610,6 +952,12 @@ static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
static inline void fsnotify_sb_delete(struct super_block *sb)
{}
+static inline void __fsnotify_mntns_delete(struct mnt_namespace *mntns)
+{}
+
+static inline void fsnotify_sb_free(struct super_block *sb)
+{}
+
static inline void fsnotify_update_flags(struct dentry *dentry)
{}
@@ -621,6 +969,9 @@ static inline u32 fsnotify_get_cookie(void)
static inline void fsnotify_unmount_inodes(struct super_block *sb)
{}
+static inline void fsnotify_mnt(__u32 mask, struct mnt_namespace *ns, struct vfsmount *mnt)
+{}
+
#endif /* CONFIG_FSNOTIFY */
#endif /* __KERNEL __ */
diff --git a/include/linux/fsverity.h b/include/linux/fsverity.h
index b568b3c7d095..5bc7280425a7 100644
--- a/include/linux/fsverity.h
+++ b/include/linux/fsverity.h
@@ -12,10 +12,30 @@
#define _LINUX_FSVERITY_H
#include <linux/fs.h>
+#include <linux/mm.h>
+#include <crypto/hash_info.h>
+#include <crypto/sha2.h>
#include <uapi/linux/fsverity.h>
+/*
+ * Largest digest size among all hash algorithms supported by fs-verity.
+ * Currently assumed to be <= size of fsverity_descriptor::root_hash.
+ */
+#define FS_VERITY_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE
+
+/* Arbitrary limit to bound the kmalloc() size. Can be changed. */
+#define FS_VERITY_MAX_DESCRIPTOR_SIZE 16384
+
+struct fsverity_info;
+
/* Verity operations for filesystems */
struct fsverity_operations {
+ /**
+ * The offset of the pointer to struct fsverity_info in the
+ * filesystem-specific part of the inode, relative to the beginning of
+ * the common part of the inode (the 'struct inode').
+ */
+ ptrdiff_t inode_info_offs;
/**
* Begin enabling verity on the given file.
@@ -82,8 +102,7 @@ struct fsverity_operations {
* isn't already cached. Implementations may ignore this
* argument; it's only a performance optimization.
*
- * This can be called at any time on an open verity file, as well as
- * between ->begin_enable_verity() and ->end_enable_verity(). It may be
+ * This can be called at any time on an open verity file. It may be
* called by multiple processes concurrently, even with the same page.
*
* Note that this must retrieve a *page*, not necessarily a *block*.
@@ -98,9 +117,9 @@ struct fsverity_operations {
* Write a Merkle tree block to the given inode.
*
* @inode: the inode for which the Merkle tree is being built
- * @buf: block to write
- * @index: 0-based index of the block within the Merkle tree
- * @log_blocksize: log base 2 of the Merkle tree block size
+ * @buf: the Merkle tree block to write
+ * @pos: the position of the block in the Merkle tree (in bytes)
+ * @size: the Merkle tree block size (in bytes)
*
* This is only called between ->begin_enable_verity() and
* ->end_enable_verity().
@@ -108,20 +127,42 @@ struct fsverity_operations {
* Return: 0 on success, -errno on failure
*/
int (*write_merkle_tree_block)(struct inode *inode, const void *buf,
- u64 index, int log_blocksize);
+ u64 pos, unsigned int size);
};
#ifdef CONFIG_FS_VERITY
+/*
+ * Returns the address of the verity info pointer within the filesystem-specific
+ * part of the inode. (To save memory on filesystems that don't support
+ * fsverity, a field in 'struct inode' itself is no longer used.)
+ */
+static inline struct fsverity_info **
+fsverity_info_addr(const struct inode *inode)
+{
+ VFS_WARN_ON_ONCE(inode->i_sb->s_vop->inode_info_offs == 0);
+ return (void *)inode + inode->i_sb->s_vop->inode_info_offs;
+}
+
static inline struct fsverity_info *fsverity_get_info(const struct inode *inode)
{
/*
- * Pairs with the cmpxchg_release() in fsverity_set_info().
- * I.e., another task may publish ->i_verity_info concurrently,
- * executing a RELEASE barrier. We need to use smp_load_acquire() here
- * to safely ACQUIRE the memory the other task published.
+ * Since this function can be called on inodes belonging to filesystems
+ * that don't support fsverity at all, and fsverity_info_addr() doesn't
+ * work on such filesystems, we have to start with an IS_VERITY() check.
+ * Checking IS_VERITY() here is also useful to minimize the overhead of
+ * fsverity_active() on non-verity files.
+ */
+ if (!IS_VERITY(inode))
+ return NULL;
+
+ /*
+ * Pairs with the cmpxchg_release() in fsverity_set_info(). I.e.,
+ * another task may publish the inode's verity info concurrently,
+ * executing a RELEASE barrier. Use smp_load_acquire() here to safely
+ * ACQUIRE the memory the other task published.
*/
- return smp_load_acquire(&inode->i_verity_info);
+ return smp_load_acquire(fsverity_info_addr(inode));
}
/* enable.c */
@@ -131,12 +172,34 @@ int fsverity_ioctl_enable(struct file *filp, const void __user *arg);
/* measure.c */
int fsverity_ioctl_measure(struct file *filp, void __user *arg);
+int fsverity_get_digest(struct inode *inode,
+ u8 raw_digest[FS_VERITY_MAX_DIGEST_SIZE],
+ u8 *alg, enum hash_algo *halg);
/* open.c */
-int fsverity_file_open(struct inode *inode, struct file *filp);
-int fsverity_prepare_setattr(struct dentry *dentry, struct iattr *attr);
-void fsverity_cleanup_inode(struct inode *inode);
+int __fsverity_file_open(struct inode *inode, struct file *filp);
+int __fsverity_prepare_setattr(struct dentry *dentry, struct iattr *attr);
+void __fsverity_cleanup_inode(struct inode *inode);
+
+/**
+ * fsverity_cleanup_inode() - free the inode's verity info, if present
+ * @inode: an inode being evicted
+ *
+ * Filesystems must call this on inode eviction to free the inode's verity info.
+ */
+static inline void fsverity_cleanup_inode(struct inode *inode)
+{
+ /*
+ * Only IS_VERITY() inodes can have verity info, so start by checking
+ * for IS_VERITY() (which is faster than retrieving the pointer to the
+ * verity info). This minimizes overhead for non-verity inodes.
+ */
+ if (IS_VERITY(inode))
+ __fsverity_cleanup_inode(inode);
+ else
+ VFS_WARN_ON_ONCE(*fsverity_info_addr(inode) != NULL);
+}
/* read_metadata.c */
@@ -144,7 +207,7 @@ int fsverity_ioctl_read_metadata(struct file *filp, const void __user *uarg);
/* verify.c */
-bool fsverity_verify_page(struct page *page);
+bool fsverity_verify_blocks(struct folio *folio, size_t len, size_t offset);
void fsverity_verify_bio(struct bio *bio);
void fsverity_enqueue_verify_work(struct work_struct *work);
@@ -170,17 +233,28 @@ static inline int fsverity_ioctl_measure(struct file *filp, void __user *arg)
return -EOPNOTSUPP;
}
+static inline int fsverity_get_digest(struct inode *inode,
+ u8 raw_digest[FS_VERITY_MAX_DIGEST_SIZE],
+ u8 *alg, enum hash_algo *halg)
+{
+ /*
+ * fsverity is not enabled in the kernel configuration, so always report
+ * that the file doesn't have fsverity enabled (digest size 0).
+ */
+ return 0;
+}
+
/* open.c */
-static inline int fsverity_file_open(struct inode *inode, struct file *filp)
+static inline int __fsverity_file_open(struct inode *inode, struct file *filp)
{
- return IS_VERITY(inode) ? -EOPNOTSUPP : 0;
+ return -EOPNOTSUPP;
}
-static inline int fsverity_prepare_setattr(struct dentry *dentry,
- struct iattr *attr)
+static inline int __fsverity_prepare_setattr(struct dentry *dentry,
+ struct iattr *attr)
{
- return IS_VERITY(d_inode(dentry)) ? -EOPNOTSUPP : 0;
+ return -EOPNOTSUPP;
}
static inline void fsverity_cleanup_inode(struct inode *inode)
@@ -197,34 +271,45 @@ static inline int fsverity_ioctl_read_metadata(struct file *filp,
/* verify.c */
-static inline bool fsverity_verify_page(struct page *page)
+static inline bool fsverity_verify_blocks(struct folio *folio, size_t len,
+ size_t offset)
{
- WARN_ON(1);
+ WARN_ON_ONCE(1);
return false;
}
static inline void fsverity_verify_bio(struct bio *bio)
{
- WARN_ON(1);
+ WARN_ON_ONCE(1);
}
static inline void fsverity_enqueue_verify_work(struct work_struct *work)
{
- WARN_ON(1);
+ WARN_ON_ONCE(1);
}
#endif /* !CONFIG_FS_VERITY */
+static inline bool fsverity_verify_folio(struct folio *folio)
+{
+ return fsverity_verify_blocks(folio, folio_size(folio), 0);
+}
+
+static inline bool fsverity_verify_page(struct page *page)
+{
+ return fsverity_verify_blocks(page_folio(page), PAGE_SIZE, 0);
+}
+
/**
* fsverity_active() - do reads from the inode need to go through fs-verity?
* @inode: inode to check
*
- * This checks whether ->i_verity_info has been set.
+ * This checks whether the inode's verity info has been set.
*
- * Filesystems call this from ->readpages() to check whether the pages need to
+ * Filesystems call this from ->readahead() to check whether the pages need to
* be verified or not. Don't use IS_VERITY() for this purpose; it's subject to
* a race condition where the file is being read concurrently with
- * FS_IOC_ENABLE_VERITY completing. (S_VERITY is set before ->i_verity_info.)
+ * FS_IOC_ENABLE_VERITY completing. (S_VERITY is set before the verity info.)
*
* Return: true if reads need to go through fs-verity, otherwise false
*/
@@ -233,4 +318,42 @@ static inline bool fsverity_active(const struct inode *inode)
return fsverity_get_info(inode) != NULL;
}
+/**
+ * fsverity_file_open() - prepare to open a verity file
+ * @inode: the inode being opened
+ * @filp: the struct file being set up
+ *
+ * When opening a verity file, deny the open if it is for writing. Otherwise,
+ * set up the inode's verity info if not already done.
+ *
+ * When combined with fscrypt, this must be called after fscrypt_file_open().
+ * Otherwise, we won't have the key set up to decrypt the verity metadata.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static inline int fsverity_file_open(struct inode *inode, struct file *filp)
+{
+ if (IS_VERITY(inode))
+ return __fsverity_file_open(inode, filp);
+ return 0;
+}
+
+/**
+ * fsverity_prepare_setattr() - prepare to change a verity inode's attributes
+ * @dentry: dentry through which the inode is being changed
+ * @attr: attributes to change
+ *
+ * Verity files are immutable, so deny truncates. This isn't covered by the
+ * open-time check because sys_truncate() takes a path, not a file descriptor.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static inline int fsverity_prepare_setattr(struct dentry *dentry,
+ struct iattr *attr)
+{
+ if (IS_VERITY(d_inode(dentry)))
+ return __fsverity_prepare_setattr(dentry, attr);
+ return 0;
+}
+
#endif /* _LINUX_FSVERITY_H */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index a69f363b61bf..770f0dc993cc 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -9,6 +9,7 @@
#include <linux/trace_recursion.h>
#include <linux/trace_clock.h>
+#include <linux/jump_label.h>
#include <linux/kallsyms.h>
#include <linux/linkage.h>
#include <linux/bitops.h>
@@ -30,16 +31,45 @@
#define ARCH_SUPPORTS_FTRACE_OPS 0
#endif
+#ifdef CONFIG_TRACING
+extern void ftrace_boot_snapshot(void);
+#else
+static inline void ftrace_boot_snapshot(void) { }
+#endif
+
+struct ftrace_ops;
+struct ftrace_regs;
+struct dyn_ftrace;
+
+char *arch_ftrace_match_adjust(char *str, const char *search);
+
+#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FREGS
+unsigned long ftrace_return_to_handler(struct ftrace_regs *fregs);
+#else
+unsigned long ftrace_return_to_handler(unsigned long frame_pointer);
+#endif
+
+#ifdef CONFIG_FUNCTION_TRACER
/*
* If the arch's mcount caller does not support all of ftrace's
* features, then it must call an indirect function that
* does. Or at least does enough to prevent any unwelcome side effects.
+ *
+ * Also define the function prototype that these architectures use
+ * to call the ftrace_ops_list_func().
*/
#if !ARCH_SUPPORTS_FTRACE_OPS
# define FTRACE_FORCE_LIST_FUNC 1
+void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
#else
# define FTRACE_FORCE_LIST_FUNC 0
+void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct ftrace_regs *fregs);
#endif
+extern const struct ftrace_ops ftrace_nop_ops;
+extern const struct ftrace_ops ftrace_list_ops;
+struct ftrace_ops *ftrace_find_unique_ops(struct dyn_ftrace *rec);
+#endif /* CONFIG_FUNCTION_TRACER */
/* Main tracing buffer and events set up */
#ifdef CONFIG_TRACING
@@ -52,19 +82,18 @@ static inline void early_trace_init(void) { }
struct module;
struct ftrace_hash;
-struct ftrace_direct_func;
#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
defined(CONFIG_DYNAMIC_FTRACE)
-const char *
+int
ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char **modname, char *sym);
#else
-static inline const char *
+static inline int
ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char **modname, char *sym)
{
- return NULL;
+ return 0;
}
#endif
@@ -83,29 +112,76 @@ static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *val
#ifdef CONFIG_FUNCTION_TRACER
-extern int ftrace_enabled;
-extern int
-ftrace_enable_sysctl(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
+#include <linux/ftrace_regs.h>
-struct ftrace_ops;
-
-#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
+extern int ftrace_enabled;
+/**
+ * ftrace_regs - ftrace partial/optimal register set
+ *
+ * ftrace_regs represents a group of registers which is used at the
+ * function entry and exit. There are three types of registers.
+ *
+ * - Registers for passing the parameters to callee, including the stack
+ * pointer. (e.g. rcx, rdx, rdi, rsi, r8, r9 and rsp on x86_64)
+ * - Registers for passing the return values to caller.
+ * (e.g. rax and rdx on x86_64)
+ * - Registers for hooking the function call and return including the
+ * frame pointer (the frame pointer is architecture/config dependent)
+ * (e.g. rip, rbp and rsp for x86_64)
+ *
+ * Also, architecture dependent fields can be used for internal process.
+ * (e.g. orig_ax on x86_64)
+ *
+ * Basically, ftrace_regs stores the registers related to the context.
+ * On function entry, registers for function parameters and hooking the
+ * function call are stored, and on function exit, registers for function
+ * return value and frame pointers are stored.
+ *
+ * And also, it dpends on the context that which registers are restored
+ * from the ftrace_regs.
+ * On the function entry, those registers will be restored except for
+ * the stack pointer, so that user can change the function parameters
+ * and instruction pointer (e.g. live patching.)
+ * On the function exit, only registers which is used for return values
+ * are restored.
+ *
+ * NOTE: user *must not* access regs directly, only do it via APIs, because
+ * the member can be changed according to the architecture.
+ * This is why the structure is empty here, so that nothing accesses
+ * the ftrace_regs directly.
+ */
struct ftrace_regs {
- struct pt_regs regs;
+ /* Nothing to see here, use the accessor functions! */
};
-#define arch_ftrace_get_regs(fregs) (&(fregs)->regs)
+
+#define ftrace_regs_size() sizeof(struct __arch_ftrace_regs)
+
+#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
+/*
+ * Architectures that define HAVE_DYNAMIC_FTRACE_WITH_ARGS must define their own
+ * arch_ftrace_get_regs() where it only returns pt_regs *if* it is fully
+ * populated. It should return NULL otherwise.
+ */
+static inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *fregs)
+{
+ return &arch_ftrace_regs(fregs)->regs;
+}
/*
- * ftrace_instruction_pointer_set() is to be defined by the architecture
- * if to allow setting of the instruction pointer from the ftrace_regs
- * when HAVE_DYNAMIC_FTRACE_WITH_ARGS is set and it supports
- * live kernel patching.
+ * ftrace_regs_set_instruction_pointer() is to be defined by the architecture
+ * if to allow setting of the instruction pointer from the ftrace_regs when
+ * HAVE_DYNAMIC_FTRACE_WITH_ARGS is set and it supports live kernel patching.
*/
-#define ftrace_instruction_pointer_set(fregs, ip) do { } while (0)
+#define ftrace_regs_set_instruction_pointer(fregs, ip) do { } while (0)
#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
+#ifdef CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS
+
+static_assert(sizeof(struct pt_regs) == ftrace_regs_size());
+
+#endif /* CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS */
+
static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs)
{
if (!fregs)
@@ -114,6 +190,91 @@ static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs
return arch_ftrace_get_regs(fregs);
}
+#if !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) || \
+ defined(CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS)
+
+#ifndef arch_ftrace_partial_regs
+#define arch_ftrace_partial_regs(regs) do {} while (0)
+#endif
+
+static __always_inline struct pt_regs *
+ftrace_partial_regs(struct ftrace_regs *fregs, struct pt_regs *regs)
+{
+ /*
+ * If CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS=y, ftrace_regs memory
+ * layout is including pt_regs. So always returns that address.
+ * Since arch_ftrace_get_regs() will check some members and may return
+ * NULL, we can not use it.
+ */
+ regs = &arch_ftrace_regs(fregs)->regs;
+
+ /* Allow arch specific updates to regs. */
+ arch_ftrace_partial_regs(regs);
+ return regs;
+}
+
+#endif /* !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS || CONFIG_HAVE_FTRACE_REGS_HAVING_PT_REGS */
+
+#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
+
+/*
+ * Please define arch dependent pt_regs which compatible to the
+ * perf_arch_fetch_caller_regs() but based on ftrace_regs.
+ * This requires
+ * - user_mode(_regs) returns false (always kernel mode).
+ * - able to use the _regs for stack trace.
+ */
+#ifndef arch_ftrace_fill_perf_regs
+/* As same as perf_arch_fetch_caller_regs(), do nothing by default */
+#define arch_ftrace_fill_perf_regs(fregs, _regs) do {} while (0)
+#endif
+
+static __always_inline struct pt_regs *
+ftrace_fill_perf_regs(struct ftrace_regs *fregs, struct pt_regs *regs)
+{
+ arch_ftrace_fill_perf_regs(fregs, regs);
+ return regs;
+}
+
+#else /* !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
+
+static __always_inline struct pt_regs *
+ftrace_fill_perf_regs(struct ftrace_regs *fregs, struct pt_regs *regs)
+{
+ return &arch_ftrace_regs(fregs)->regs;
+}
+
+#endif
+
+/*
+ * When true, the ftrace_regs_{get,set}_*() functions may be used on fregs.
+ * Note: this can be true even when ftrace_get_regs() cannot provide a pt_regs.
+ */
+static __always_inline bool ftrace_regs_has_args(struct ftrace_regs *fregs)
+{
+ if (IS_ENABLED(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS))
+ return true;
+
+ return ftrace_get_regs(fregs) != NULL;
+}
+
+#ifdef CONFIG_HAVE_REGS_AND_STACK_ACCESS_API
+static __always_inline unsigned long
+ftrace_regs_get_kernel_stack_nth(struct ftrace_regs *fregs, unsigned int nth)
+{
+ unsigned long *stackp;
+
+ stackp = (unsigned long *)ftrace_regs_get_stack_pointer(fregs);
+ if (((unsigned long)(stackp + nth) & ~(THREAD_SIZE - 1)) ==
+ ((unsigned long)stackp & ~(THREAD_SIZE - 1)))
+ return *(stackp + nth);
+
+ return 0;
+}
+#else /* !CONFIG_HAVE_REGS_AND_STACK_ACCESS_API */
+#define ftrace_regs_get_kernel_stack_nth(fregs, nth) (0L)
+#endif /* CONFIG_HAVE_REGS_AND_STACK_ACCESS_API */
+
typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs);
@@ -174,6 +335,8 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
* ftrace_enabled.
* DIRECT - Used by the direct ftrace_ops helper for direct functions
* (internal ftrace only, should not be used by others)
+ * SUBOP - Is controlled by another op in field managed.
+ * GRAPH - Is a component of the fgraph_ops structure
*/
enum {
FTRACE_OPS_FL_ENABLED = BIT(0),
@@ -194,8 +357,54 @@ enum {
FTRACE_OPS_FL_TRACE_ARRAY = BIT(15),
FTRACE_OPS_FL_PERMANENT = BIT(16),
FTRACE_OPS_FL_DIRECT = BIT(17),
+ FTRACE_OPS_FL_SUBOP = BIT(18),
+ FTRACE_OPS_FL_GRAPH = BIT(19),
+ FTRACE_OPS_FL_JMP = BIT(20),
+};
+
+#ifndef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
+#define FTRACE_OPS_FL_SAVE_ARGS FTRACE_OPS_FL_SAVE_REGS
+#else
+#define FTRACE_OPS_FL_SAVE_ARGS 0
+#endif
+
+/*
+ * FTRACE_OPS_CMD_* commands allow the ftrace core logic to request changes
+ * to a ftrace_ops. Note, the requests may fail.
+ *
+ * ENABLE_SHARE_IPMODIFY_SELF - enable a DIRECT ops to work on the same
+ * function as an ops with IPMODIFY. Called
+ * when the DIRECT ops is being registered.
+ * This is called with both direct_mutex and
+ * ftrace_lock are locked.
+ *
+ * ENABLE_SHARE_IPMODIFY_PEER - enable a DIRECT ops to work on the same
+ * function as an ops with IPMODIFY. Called
+ * when the other ops (the one with IPMODIFY)
+ * is being registered.
+ * This is called with direct_mutex locked.
+ *
+ * DISABLE_SHARE_IPMODIFY_PEER - disable a DIRECT ops to work on the same
+ * function as an ops with IPMODIFY. Called
+ * when the other ops (the one with IPMODIFY)
+ * is being unregistered.
+ * This is called with direct_mutex locked.
+ */
+enum ftrace_ops_cmd {
+ FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF,
+ FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER,
+ FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER,
};
+/*
+ * For most ftrace_ops_cmd,
+ * Returns:
+ * 0 - Success.
+ * Negative on failure. The return value is dependent on the
+ * callback.
+ */
+typedef int (*ftrace_ops_func_t)(struct ftrace_ops *op, enum ftrace_ops_cmd cmd);
+
#ifdef CONFIG_DYNAMIC_FTRACE
/* The hash used to know what functions callbacks trace */
struct ftrace_ops_hash {
@@ -207,7 +416,10 @@ struct ftrace_ops_hash {
void ftrace_free_init_mem(void);
void ftrace_free_mem(struct module *mod, void *start, void *end);
#else
-static inline void ftrace_free_init_mem(void) { }
+static inline void ftrace_free_init_mem(void)
+{
+ ftrace_boot_snapshot();
+}
static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
#endif
@@ -235,6 +447,12 @@ struct ftrace_ops {
unsigned long trampoline;
unsigned long trampoline_size;
struct list_head list;
+ struct list_head subop_list;
+ ftrace_ops_func_t ops_func;
+ struct ftrace_ops *managed;
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ unsigned long direct_call;
+#endif
#endif
};
@@ -285,6 +503,8 @@ int unregister_ftrace_function(struct ftrace_ops *ops);
extern void ftrace_stub(unsigned long a0, unsigned long a1,
struct ftrace_ops *op, struct ftrace_regs *fregs);
+
+int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs);
#else /* !CONFIG_FUNCTION_TRACER */
/*
* (un)register_ftrace_function must be a macro since the ops parameter
@@ -295,6 +515,10 @@ extern void ftrace_stub(unsigned long a0, unsigned long a1,
static inline void ftrace_kill(void) { }
static inline void ftrace_free_init_mem(void) { }
static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
+static inline int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *addrs)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_FUNCTION_TRACER */
struct ftrace_func_entry {
@@ -303,52 +527,40 @@ struct ftrace_func_entry {
unsigned long direct; /* for direct lookup only */
};
-struct dyn_ftrace;
-
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
-extern int ftrace_direct_func_count;
-int register_ftrace_direct(unsigned long ip, unsigned long addr);
-int unregister_ftrace_direct(unsigned long ip, unsigned long addr);
-int modify_ftrace_direct(unsigned long ip, unsigned long old_addr, unsigned long new_addr);
-struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr);
-int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
- struct dyn_ftrace *rec,
- unsigned long old_addr,
- unsigned long new_addr);
unsigned long ftrace_find_rec_direct(unsigned long ip);
+int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr);
+int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
+ bool free_filters);
+int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr);
+int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr);
+
+void ftrace_stub_direct_tramp(void);
+
#else
-# define ftrace_direct_func_count 0
-static inline int register_ftrace_direct(unsigned long ip, unsigned long addr)
-{
- return -ENOTSUPP;
-}
-static inline int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
+struct ftrace_ops;
+static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
{
- return -ENOTSUPP;
+ return 0;
}
-static inline int modify_ftrace_direct(unsigned long ip,
- unsigned long old_addr, unsigned long new_addr)
+static inline int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
{
- return -ENOTSUPP;
+ return -ENODEV;
}
-static inline struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
+static inline int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
+ bool free_filters)
{
- return NULL;
+ return -ENODEV;
}
-static inline int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
- struct dyn_ftrace *rec,
- unsigned long old_addr,
- unsigned long new_addr)
+static inline int modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
{
return -ENODEV;
}
-static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
+static inline int modify_ftrace_direct_nolock(struct ftrace_ops *ops, unsigned long addr)
{
- return 0;
+ return -ENODEV;
}
-#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
-#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
/*
* This must be implemented by the architecture.
* It is the way the ftrace direct_ops helper, when called
@@ -362,15 +574,45 @@ static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
* the return from the trampoline jump to the direct caller
* instead of going back to the function it just traced.
*/
-static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs,
+static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs,
unsigned long addr) { }
-#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
-#ifdef CONFIG_STACK_TRACER
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_JMP
+static inline bool ftrace_is_jmp(unsigned long addr)
+{
+ return addr & 1;
+}
+
+static inline unsigned long ftrace_jmp_set(unsigned long addr)
+{
+ return addr | 1UL;
+}
+
+static inline unsigned long ftrace_jmp_get(unsigned long addr)
+{
+ return addr & ~1UL;
+}
+#else
+static inline bool ftrace_is_jmp(unsigned long addr)
+{
+ return false;
+}
+
+static inline unsigned long ftrace_jmp_set(unsigned long addr)
+{
+ return addr;
+}
-extern int stack_tracer_enabled;
+static inline unsigned long ftrace_jmp_get(unsigned long addr)
+{
+ return addr;
+}
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_JMP */
-int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer,
+#ifdef CONFIG_STACK_TRACER
+
+int stack_trace_sysctl(const struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos);
/* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
@@ -412,10 +654,34 @@ static inline void stack_tracer_disable(void) { }
static inline void stack_tracer_enable(void) { }
#endif
+enum {
+ FTRACE_UPDATE_CALLS = (1 << 0),
+ FTRACE_DISABLE_CALLS = (1 << 1),
+ FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
+ FTRACE_START_FUNC_RET = (1 << 3),
+ FTRACE_STOP_FUNC_RET = (1 << 4),
+ FTRACE_MAY_SLEEP = (1 << 5),
+};
+
+/* Arches can override ftrace_get_symaddr() to convert fentry_ip to symaddr. */
+#ifndef ftrace_get_symaddr
+/**
+ * ftrace_get_symaddr - return the symbol address from fentry_ip
+ * @fentry_ip: the address of ftrace location
+ *
+ * Get the symbol address from @fentry_ip (fast path). If there is no fast
+ * search path, this returns 0.
+ * User may need to use kallsyms API to find the symbol address.
+ */
+#define ftrace_get_symaddr(fentry_ip) (0)
+#endif
+
+void ftrace_sync_ipi(void *data);
+
#ifdef CONFIG_DYNAMIC_FTRACE
-int ftrace_arch_code_modify_prepare(void);
-int ftrace_arch_code_modify_post_process(void);
+void ftrace_arch_code_modify_prepare(void);
+void ftrace_arch_code_modify_post_process(void);
enum ftrace_bug_type {
FTRACE_BUG_UNKNOWN,
@@ -455,6 +721,10 @@ bool is_ftrace_trampoline(unsigned long addr);
* IPMODIFY - the record allows for the IP address to be changed.
* DISABLED - the record is not ready to be touched yet
* DIRECT - there is a direct function to call
+ * CALL_OPS - the record can use callsite-specific ops
+ * CALL_OPS_EN - the function is set up to use callsite-specific ops
+ * TOUCHED - A callback was added since boot up
+ * MODIFIED - The function had IPMODIFY or DIRECT attached to it
*
* When a new ftrace_ops is registered and wants a function to save
* pt_regs, the rec->flags REGS is set. When the function has been
@@ -472,9 +742,13 @@ enum {
FTRACE_FL_DISABLED = (1UL << 25),
FTRACE_FL_DIRECT = (1UL << 24),
FTRACE_FL_DIRECT_EN = (1UL << 23),
+ FTRACE_FL_CALL_OPS = (1UL << 22),
+ FTRACE_FL_CALL_OPS_EN = (1UL << 21),
+ FTRACE_FL_TOUCHED = (1UL << 20),
+ FTRACE_FL_MODIFIED = (1UL << 19),
};
-#define FTRACE_REF_MAX_SHIFT 23
+#define FTRACE_REF_MAX_SHIFT 19
#define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
#define ftrace_rec_count(rec) ((rec)->flags & FTRACE_REF_MAX)
@@ -487,6 +761,8 @@ struct dyn_ftrace {
int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
int remove, int reset);
+int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips,
+ unsigned int cnt, int remove, int reset);
int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
int len, int reset);
int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
@@ -496,15 +772,6 @@ void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
void ftrace_free_filter(struct ftrace_ops *ops);
void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
-enum {
- FTRACE_UPDATE_CALLS = (1 << 0),
- FTRACE_DISABLE_CALLS = (1 << 1),
- FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
- FTRACE_START_FUNC_RET = (1 << 3),
- FTRACE_STOP_FUNC_RET = (1 << 4),
- FTRACE_MAY_SLEEP = (1 << 5),
-};
-
/*
* The FTRACE_UPDATE_* enum is used to pass information back
* from the ftrace_update_record() and ftrace_test_record()
@@ -531,6 +798,8 @@ enum {
FTRACE_ITER_PROBE = (1 << 4),
FTRACE_ITER_MOD = (1 << 5),
FTRACE_ITER_ENABLED = (1 << 6),
+ FTRACE_ITER_TOUCHED = (1 << 7),
+ FTRACE_ITER_ADDRS = (1 << 8),
};
void arch_ftrace_update_code(int command);
@@ -572,7 +841,6 @@ void __init
ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
/* defined in arch */
-extern int ftrace_ip_converted(unsigned long ip);
extern int ftrace_dyn_arch_init(void);
extern void ftrace_replace_code(int enable);
extern int ftrace_update_ftrace_func(ftrace_func_t func);
@@ -643,6 +911,22 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
extern int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr);
+/**
+ * ftrace_need_init_nop - return whether nop call sites should be initialized
+ *
+ * Normally the compiler's -mnop-mcount generates suitable nops, so we don't
+ * need to call ftrace_init_nop() if the code is built with that flag.
+ * Architectures where this is not always the case may define their own
+ * condition.
+ *
+ * Return must be:
+ * 0 if ftrace_init_nop() should be called
+ * Nonzero if ftrace_init_nop() should not be called
+ */
+
+#ifndef ftrace_need_init_nop
+#define ftrace_need_init_nop() (!__is_defined(CC_USING_NOP_MCOUNT))
+#endif
/**
* ftrace_init_nop - initialize a nop call site
@@ -694,7 +978,9 @@ static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
*/
extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+#if defined(CONFIG_DYNAMIC_FTRACE_WITH_REGS) || \
+ defined(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS) || \
+ defined(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS)
/**
* ftrace_modify_call - convert from one addr to another (no nop)
* @rec: the call site record (e.g. mcount/fentry)
@@ -707,6 +993,9 @@ extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
* what we expect it to be, and then on success of the compare,
* it should write to the location.
*
+ * When using call ops, this is called when the associated ops change, even
+ * when (addr == old_addr).
+ *
* The code segment at @rec->ip should be a caller to @old_addr
*
* Return must be:
@@ -727,20 +1016,12 @@ static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_a
}
#endif
-/* May be defined in arch */
-extern int ftrace_arch_read_dyn_info(char *buf, int size);
-
extern int skip_trace(unsigned long ip);
extern void ftrace_module_init(struct module *mod);
extern void ftrace_module_enable(struct module *mod);
extern void ftrace_release_mod(struct module *mod);
-
-extern void ftrace_disable_daemon(void);
-extern void ftrace_enable_daemon(void);
#else /* CONFIG_DYNAMIC_FTRACE */
static inline int skip_trace(unsigned long ip) { return 0; }
-static inline void ftrace_disable_daemon(void) { }
-static inline void ftrace_enable_daemon(void) { }
static inline void ftrace_module_init(struct module *mod) { }
static inline void ftrace_module_enable(struct module *mod) { }
static inline void ftrace_release_mod(struct module *mod) { }
@@ -761,6 +1042,7 @@ static inline unsigned long ftrace_location(unsigned long ip)
#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
#define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
#define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
+#define ftrace_set_filter_ips(ops, ips, cnt, remove, reset) ({ -ENODEV; })
#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
#define ftrace_free_filter(ops) do { } while (0)
@@ -779,6 +1061,15 @@ static inline bool is_ftrace_trampoline(unsigned long addr)
}
#endif /* CONFIG_DYNAMIC_FTRACE */
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#ifndef ftrace_graph_func
+#define ftrace_graph_func ftrace_stub
+#define FTRACE_OPS_GRAPH_STUB FTRACE_OPS_FL_STUB
+#else
+#define FTRACE_OPS_GRAPH_STUB 0
+#endif
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
/* totally disable ftrace - can not re-enable after this */
void ftrace_kill(void);
@@ -834,7 +1125,7 @@ static inline void __ftrace_enabled_restore(int enabled)
#define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
#define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
-static inline unsigned long get_lock_parent_ip(void)
+static __always_inline unsigned long get_lock_parent_ip(void)
{
unsigned long addr = CALLER_ADDR0;
@@ -858,7 +1149,7 @@ static inline unsigned long get_lock_parent_ip(void)
# define trace_preempt_off(a0, a1) do { } while (0)
#endif
-#ifdef CONFIG_FTRACE_MCOUNT_RECORD
+#ifdef CONFIG_DYNAMIC_FTRACE
extern void ftrace_init(void);
#ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY
#define FTRACE_CALLSITE_SECTION "__patchable_function_entries"
@@ -876,7 +1167,15 @@ static inline void ftrace_init(void) { }
*/
struct ftrace_graph_ent {
unsigned long func; /* Current function */
- int depth;
+ unsigned long depth;
+} __packed;
+
+/*
+ * Structure that defines an entry function trace with retaddr.
+ */
+struct fgraph_retaddr_ent {
+ struct ftrace_graph_ent ent;
+ unsigned long retaddr; /* Return address */
} __packed;
/*
@@ -886,26 +1185,44 @@ struct ftrace_graph_ent {
*/
struct ftrace_graph_ret {
unsigned long func; /* Current function */
+#ifdef CONFIG_FUNCTION_GRAPH_RETVAL
+ unsigned long retval;
+#endif
int depth;
/* Number of functions that overran the depth limit for current task */
unsigned int overrun;
- unsigned long long calltime;
- unsigned long long rettime;
} __packed;
-/* Type of the callback handlers for tracing function graph*/
-typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
-typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
+struct fgraph_ops;
-extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
+/* Type of the callback handlers for tracing function graph*/
+typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *,
+ struct fgraph_ops *,
+ struct ftrace_regs *); /* return */
+typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *,
+ struct fgraph_ops *,
+ struct ftrace_regs *); /* entry */
+
+extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace,
+ struct fgraph_ops *gops,
+ struct ftrace_regs *fregs);
+bool ftrace_pids_enabled(struct ftrace_ops *ops);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
struct fgraph_ops {
trace_func_graph_ent_t entryfunc;
trace_func_graph_ret_t retfunc;
+ struct ftrace_ops ops; /* for the hash lists */
+ void *private;
+ trace_func_graph_ent_t saved_func;
+ int idx;
};
+void *fgraph_reserve_data(int idx, int size_bytes);
+void *fgraph_retrieve_data(int idx, int *size_bytes);
+void *fgraph_retrieve_parent_data(int idx, int *size_bytes, int depth);
+
/*
* Stack of return addresses for functions
* of a thread.
@@ -914,16 +1231,10 @@ struct fgraph_ops {
struct ftrace_ret_stack {
unsigned long ret;
unsigned long func;
- unsigned long long calltime;
-#ifdef CONFIG_FUNCTION_PROFILER
- unsigned long long subtime;
-#endif
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
unsigned long fp;
#endif
-#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
unsigned long *retp;
-#endif
};
/*
@@ -934,14 +1245,23 @@ struct ftrace_ret_stack {
extern void return_to_handler(void);
extern int
-function_graph_enter(unsigned long ret, unsigned long func,
- unsigned long frame_pointer, unsigned long *retp);
+function_graph_enter_regs(unsigned long ret, unsigned long func,
+ unsigned long frame_pointer, unsigned long *retp,
+ struct ftrace_regs *fregs);
+
+static inline int function_graph_enter(unsigned long ret, unsigned long func,
+ unsigned long fp, unsigned long *retp)
+{
+ return function_graph_enter_regs(ret, func, fp, retp, NULL);
+}
struct ftrace_ret_stack *
-ftrace_graph_get_ret_stack(struct task_struct *task, int idx);
+ftrace_graph_get_ret_stack(struct task_struct *task, int skip);
+unsigned long ftrace_graph_top_ret_addr(struct task_struct *task);
unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
unsigned long ret, unsigned long *retp);
+unsigned long *fgraph_get_task_var(struct fgraph_ops *gops);
/*
* Sometimes we don't want to trace a function with the function
@@ -956,7 +1276,20 @@ unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
extern int register_ftrace_graph(struct fgraph_ops *ops);
extern void unregister_ftrace_graph(struct fgraph_ops *ops);
-extern bool ftrace_graph_is_dead(void);
+/**
+ * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
+ *
+ * ftrace_graph_stop() is called when a severe error is detected in
+ * the function graph tracing. This function is called by the critical
+ * paths of function graph to keep those paths from doing any more harm.
+ */
+DECLARE_STATIC_KEY_FALSE(kill_ftrace_graph);
+
+static inline bool ftrace_graph_is_dead(void)
+{
+ return static_branch_unlikely(&kill_ftrace_graph);
+}
+
extern void ftrace_graph_stop(void);
/* The current handlers in use */
@@ -967,6 +1300,9 @@ extern void ftrace_graph_init_task(struct task_struct *t);
extern void ftrace_graph_exit_task(struct task_struct *t);
extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
+/* Used by assembly, but to quiet sparse warnings */
+extern struct ftrace_ops *function_trace_op;
+
static inline void pause_graph_tracing(void)
{
atomic_inc(&current->tracing_graph_pause);
@@ -1000,57 +1336,11 @@ static inline void unpause_graph_tracing(void) { }
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_TRACING
-
-/* flags for current->trace */
-enum {
- TSK_TRACE_FL_TRACE_BIT = 0,
- TSK_TRACE_FL_GRAPH_BIT = 1,
-};
-enum {
- TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT,
- TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT,
-};
-
-static inline void set_tsk_trace_trace(struct task_struct *tsk)
-{
- set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
-}
-
-static inline void clear_tsk_trace_trace(struct task_struct *tsk)
-{
- clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
-}
-
-static inline int test_tsk_trace_trace(struct task_struct *tsk)
-{
- return tsk->trace & TSK_TRACE_FL_TRACE;
-}
-
-static inline void set_tsk_trace_graph(struct task_struct *tsk)
-{
- set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
-}
-
-static inline void clear_tsk_trace_graph(struct task_struct *tsk)
-{
- clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
-}
-
-static inline int test_tsk_trace_graph(struct task_struct *tsk)
-{
- return tsk->trace & TSK_TRACE_FL_GRAPH;
-}
-
enum ftrace_dump_mode;
-extern enum ftrace_dump_mode ftrace_dump_on_oops;
-extern int tracepoint_printk;
+extern int ftrace_dump_on_oops_enabled(void);
extern void disable_trace_on_warning(void);
-extern int __disable_trace_on_warning;
-
-int tracepoint_printk_sysctl(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
#else /* CONFIG_TRACING */
static inline void disable_trace_on_warning(void) { }
diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h
index 0abd9a1d2852..f6faa31289ba 100644
--- a/include/linux/ftrace_irq.h
+++ b/include/linux/ftrace_irq.h
@@ -7,12 +7,21 @@ extern bool trace_hwlat_callback_enabled;
extern void trace_hwlat_callback(bool enter);
#endif
+#ifdef CONFIG_OSNOISE_TRACER
+extern bool trace_osnoise_callback_enabled;
+extern void trace_osnoise_callback(bool enter);
+#endif
+
static inline void ftrace_nmi_enter(void)
{
#ifdef CONFIG_HWLAT_TRACER
if (trace_hwlat_callback_enabled)
trace_hwlat_callback(true);
#endif
+#ifdef CONFIG_OSNOISE_TRACER
+ if (trace_osnoise_callback_enabled)
+ trace_osnoise_callback(true);
+#endif
}
static inline void ftrace_nmi_exit(void)
@@ -21,6 +30,10 @@ static inline void ftrace_nmi_exit(void)
if (trace_hwlat_callback_enabled)
trace_hwlat_callback(false);
#endif
+#ifdef CONFIG_OSNOISE_TRACER
+ if (trace_osnoise_callback_enabled)
+ trace_osnoise_callback(false);
+#endif
}
#endif /* _LINUX_FTRACE_IRQ_H */
diff --git a/include/linux/ftrace_regs.h b/include/linux/ftrace_regs.h
new file mode 100644
index 000000000000..15627ceea9bc
--- /dev/null
+++ b/include/linux/ftrace_regs.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_FTRACE_REGS_H
+#define _LINUX_FTRACE_REGS_H
+
+/*
+ * For archs that just copy pt_regs in ftrace regs, it can use this default.
+ * If an architecture does not use pt_regs, it must define all the below
+ * accessor functions.
+ */
+#ifndef HAVE_ARCH_FTRACE_REGS
+struct __arch_ftrace_regs {
+ struct pt_regs regs;
+};
+
+#define arch_ftrace_regs(fregs) ((struct __arch_ftrace_regs *)(fregs))
+
+struct ftrace_regs;
+
+#define ftrace_regs_get_instruction_pointer(fregs) \
+ instruction_pointer(&arch_ftrace_regs(fregs)->regs)
+#define ftrace_regs_get_argument(fregs, n) \
+ regs_get_kernel_argument(&arch_ftrace_regs(fregs)->regs, n)
+#define ftrace_regs_get_stack_pointer(fregs) \
+ kernel_stack_pointer(&arch_ftrace_regs(fregs)->regs)
+#define ftrace_regs_get_return_value(fregs) \
+ regs_return_value(&arch_ftrace_regs(fregs)->regs)
+#define ftrace_regs_set_return_value(fregs, ret) \
+ regs_set_return_value(&arch_ftrace_regs(fregs)->regs, ret)
+#define ftrace_override_function_with_return(fregs) \
+ override_function_with_return(&arch_ftrace_regs(fregs)->regs)
+#define ftrace_regs_query_register_offset(name) \
+ regs_query_register_offset(name)
+#define ftrace_regs_get_frame_pointer(fregs) \
+ frame_pointer(&arch_ftrace_regs(fregs)->regs)
+
+#endif /* HAVE_ARCH_FTRACE_REGS */
+
+/* This can be overridden by the architectures */
+#ifndef FTRACE_REGS_MAX_ARGS
+# define FTRACE_REGS_MAX_ARGS 6
+#endif
+
+#endif /* _LINUX_FTRACE_REGS_H */
diff --git a/include/linux/futex.h b/include/linux/futex.h
index b70df27d7e85..9e9750f04980 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -4,11 +4,11 @@
#include <linux/sched.h>
#include <linux/ktime.h>
+#include <linux/mm_types.h>
#include <uapi/linux/futex.h>
struct inode;
-struct mm_struct;
struct task_struct;
/*
@@ -34,6 +34,7 @@ union futex_key {
u64 i_seq;
unsigned long pgoff;
unsigned int offset;
+ /* unsigned int node; */
} shared;
struct {
union {
@@ -42,11 +43,13 @@ union futex_key {
};
unsigned long address;
unsigned int offset;
+ /* unsigned int node; */
} private;
struct {
u64 ptr;
unsigned long word;
unsigned int offset;
+ unsigned int node; /* NOT hashed! */
} both;
};
@@ -77,7 +80,20 @@ void futex_exec_release(struct task_struct *tsk);
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
u32 __user *uaddr2, u32 val2, u32 val3);
-#else
+int futex_hash_prctl(unsigned long arg2, unsigned long arg3, unsigned long arg4);
+
+#ifdef CONFIG_FUTEX_PRIVATE_HASH
+int futex_hash_allocate_default(void);
+void futex_hash_free(struct mm_struct *mm);
+int futex_mm_init(struct mm_struct *mm);
+
+#else /* !CONFIG_FUTEX_PRIVATE_HASH */
+static inline int futex_hash_allocate_default(void) { return 0; }
+static inline int futex_hash_free(struct mm_struct *mm) { return 0; }
+static inline int futex_mm_init(struct mm_struct *mm) { return 0; }
+#endif /* CONFIG_FUTEX_PRIVATE_HASH */
+
+#else /* !CONFIG_FUTEX */
static inline void futex_init_task(struct task_struct *tsk) { }
static inline void futex_exit_recursive(struct task_struct *tsk) { }
static inline void futex_exit_release(struct task_struct *tsk) { }
@@ -88,6 +104,17 @@ static inline long do_futex(u32 __user *uaddr, int op, u32 val,
{
return -EINVAL;
}
+static inline int futex_hash_prctl(unsigned long arg2, unsigned long arg3, unsigned long arg4)
+{
+ return -EINVAL;
+}
+static inline int futex_hash_allocate_default(void)
+{
+ return 0;
+}
+static inline int futex_hash_free(struct mm_struct *mm) { return 0; }
+static inline int futex_mm_init(struct mm_struct *mm) { return 0; }
+
#endif
#endif
diff --git a/include/linux/fw_table.h b/include/linux/fw_table.h
new file mode 100644
index 000000000000..9bd605b87c4c
--- /dev/null
+++ b/include/linux/fw_table.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * fw_tables.h - Parsing support for ACPI and ACPI-like tables provided by
+ * platform or device firmware
+ *
+ * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Copyright (C) 2023 Intel Corp.
+ */
+#ifndef _FW_TABLE_H_
+#define _FW_TABLE_H_
+
+union acpi_subtable_headers;
+
+typedef int (*acpi_tbl_entry_handler)(union acpi_subtable_headers *header,
+ const unsigned long end);
+
+typedef int (*acpi_tbl_entry_handler_arg)(union acpi_subtable_headers *header,
+ void *arg, const unsigned long end);
+
+struct acpi_subtable_proc {
+ int id;
+ acpi_tbl_entry_handler handler;
+ acpi_tbl_entry_handler_arg handler_arg;
+ void *arg;
+ int count;
+};
+
+union fw_table_header {
+ struct acpi_table_header acpi;
+ struct acpi_table_cdat cdat;
+};
+
+union acpi_subtable_headers {
+ struct acpi_subtable_header common;
+ struct acpi_hmat_structure hmat;
+ struct acpi_prmt_module_header prmt;
+ struct acpi_cedt_header cedt;
+ struct acpi_cdat_header cdat;
+};
+
+int acpi_parse_entries_array(char *id, unsigned long table_size,
+ union fw_table_header *table_header,
+ unsigned long max_length,
+ struct acpi_subtable_proc *proc,
+ int proc_num, unsigned int max_entries);
+
+int cdat_table_parse(enum acpi_cdat_type type,
+ acpi_tbl_entry_handler_arg handler_arg, void *arg,
+ struct acpi_table_cdat *table_header,
+ unsigned long length);
+
+/* CXL is the only non-ACPI consumer of the FIRMWARE_TABLE library */
+#if IS_ENABLED(CONFIG_ACPI) && !IS_ENABLED(CONFIG_CXL_BUS)
+#define EXPORT_SYMBOL_FWTBL_LIB(x) EXPORT_SYMBOL_ACPI_LIB(x)
+#define __init_or_fwtbl_lib __init_or_acpilib
+#else
+#define EXPORT_SYMBOL_FWTBL_LIB(x) EXPORT_SYMBOL_NS_GPL(x, "CXL")
+#define __init_or_fwtbl_lib
+#endif
+
+#endif
diff --git a/include/linux/fwctl.h b/include/linux/fwctl.h
new file mode 100644
index 000000000000..5d61fc8a6871
--- /dev/null
+++ b/include/linux/fwctl.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
+ */
+#ifndef __LINUX_FWCTL_H
+#define __LINUX_FWCTL_H
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/cleanup.h>
+#include <uapi/fwctl/fwctl.h>
+
+struct fwctl_device;
+struct fwctl_uctx;
+
+/**
+ * struct fwctl_ops - Driver provided operations
+ *
+ * fwctl_unregister() will wait until all excuting ops are completed before it
+ * returns. Drivers should be mindful to not let their ops run for too long as
+ * it will block device hot unplug and module unloading.
+ */
+struct fwctl_ops {
+ /**
+ * @device_type: The drivers assigned device_type number. This is uABI.
+ */
+ enum fwctl_device_type device_type;
+ /**
+ * @uctx_size: The size of the fwctl_uctx struct to allocate. The first
+ * bytes of this memory will be a fwctl_uctx. The driver can use the
+ * remaining bytes as its private memory.
+ */
+ size_t uctx_size;
+ /**
+ * @open_uctx: Called when a file descriptor is opened before the uctx
+ * is ever used.
+ */
+ int (*open_uctx)(struct fwctl_uctx *uctx);
+ /**
+ * @close_uctx: Called when the uctx is destroyed, usually when the FD
+ * is closed.
+ */
+ void (*close_uctx)(struct fwctl_uctx *uctx);
+ /**
+ * @info: Implement FWCTL_INFO. Return a kmalloc() memory that is copied
+ * to out_device_data. On input length indicates the size of the user
+ * buffer on output it indicates the size of the memory. The driver can
+ * ignore length on input, the core code will handle everything.
+ */
+ void *(*info)(struct fwctl_uctx *uctx, size_t *length);
+ /**
+ * @fw_rpc: Implement FWCTL_RPC. Deliver rpc_in/in_len to the FW and
+ * return the response and set out_len. rpc_in can be returned as the
+ * response pointer. Otherwise the returned pointer is freed with
+ * kvfree().
+ */
+ void *(*fw_rpc)(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope,
+ void *rpc_in, size_t in_len, size_t *out_len);
+};
+
+/**
+ * struct fwctl_device - Per-driver registration struct
+ * @dev: The sysfs (class/fwctl/fwctlXX) device
+ *
+ * Each driver instance will have one of these structs with the driver private
+ * data following immediately after. This struct is refcounted, it is freed by
+ * calling fwctl_put().
+ */
+struct fwctl_device {
+ struct device dev;
+ /* private: */
+ struct cdev cdev;
+
+ /* Protect uctx_list */
+ struct mutex uctx_list_lock;
+ struct list_head uctx_list;
+ /*
+ * Protect ops, held for write when ops becomes NULL during unregister,
+ * held for read whenever ops is loaded or an ops function is running.
+ */
+ struct rw_semaphore registration_lock;
+ const struct fwctl_ops *ops;
+};
+
+struct fwctl_device *_fwctl_alloc_device(struct device *parent,
+ const struct fwctl_ops *ops,
+ size_t size);
+/**
+ * fwctl_alloc_device - Allocate a fwctl
+ * @parent: Physical device that provides the FW interface
+ * @ops: Driver ops to register
+ * @drv_struct: 'struct driver_fwctl' that holds the struct fwctl_device
+ * @member: Name of the struct fwctl_device in @drv_struct
+ *
+ * This allocates and initializes the fwctl_device embedded in the drv_struct.
+ * Upon success the pointer must be freed via fwctl_put(). Returns a 'drv_struct
+ * \*' on success, NULL on error.
+ */
+#define fwctl_alloc_device(parent, ops, drv_struct, member) \
+ ({ \
+ static_assert(__same_type(struct fwctl_device, \
+ ((drv_struct *)NULL)->member)); \
+ static_assert(offsetof(drv_struct, member) == 0); \
+ (drv_struct *)_fwctl_alloc_device(parent, ops, \
+ sizeof(drv_struct)); \
+ })
+
+static inline struct fwctl_device *fwctl_get(struct fwctl_device *fwctl)
+{
+ get_device(&fwctl->dev);
+ return fwctl;
+}
+static inline void fwctl_put(struct fwctl_device *fwctl)
+{
+ put_device(&fwctl->dev);
+}
+DEFINE_FREE(fwctl, struct fwctl_device *, if (_T) fwctl_put(_T));
+
+int fwctl_register(struct fwctl_device *fwctl);
+void fwctl_unregister(struct fwctl_device *fwctl);
+
+/**
+ * struct fwctl_uctx - Per user FD context
+ * @fwctl: fwctl instance that owns the context
+ *
+ * Every FD opened by userspace will get a unique context allocation. Any driver
+ * private data will follow immediately after.
+ */
+struct fwctl_uctx {
+ struct fwctl_device *fwctl;
+ /* private: */
+ /* Head at fwctl_device::uctx_list */
+ struct list_head uctx_list_entry;
+};
+
+#endif
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 59828516ebaf..097be89487bf 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -2,6 +2,11 @@
/*
* fwnode.h - Firmware device node object handle type definition.
*
+ * This header file provides low-level data types and definitions for firmware
+ * and device property providers. The respective API header files supplied by
+ * them should contain all of the requisite data types and definitions for end
+ * users, so including it directly should not be necessary.
+ *
* Copyright (C) 2015, Intel Corporation
* Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
*/
@@ -9,38 +14,67 @@
#ifndef _LINUX_FWNODE_H_
#define _LINUX_FWNODE_H_
-#include <linux/types.h>
-#include <linux/list.h>
+#include <linux/bits.h>
#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/types.h>
+
+enum dev_dma_attr {
+ DEV_DMA_NOT_SUPPORTED,
+ DEV_DMA_NON_COHERENT,
+ DEV_DMA_COHERENT,
+};
struct fwnode_operations;
struct device;
/*
- * fwnode link flags
+ * fwnode flags
*
* LINKS_ADDED: The fwnode has already be parsed to add fwnode links.
* NOT_DEVICE: The fwnode will never be populated as a struct device.
* INITIALIZED: The hardware corresponding to fwnode has been initialized.
+ * NEEDS_CHILD_BOUND_ON_ADD: For this fwnode/device to probe successfully, its
+ * driver needs its child devices to be bound with
+ * their respective drivers as soon as they are
+ * added.
+ * BEST_EFFORT: The fwnode/device needs to probe early and might be missing some
+ * suppliers. Only enforce ordering with suppliers that have
+ * drivers.
*/
-#define FWNODE_FLAG_LINKS_ADDED BIT(0)
-#define FWNODE_FLAG_NOT_DEVICE BIT(1)
-#define FWNODE_FLAG_INITIALIZED BIT(2)
+#define FWNODE_FLAG_LINKS_ADDED BIT(0)
+#define FWNODE_FLAG_NOT_DEVICE BIT(1)
+#define FWNODE_FLAG_INITIALIZED BIT(2)
+#define FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD BIT(3)
+#define FWNODE_FLAG_BEST_EFFORT BIT(4)
+#define FWNODE_FLAG_VISITED BIT(5)
struct fwnode_handle {
struct fwnode_handle *secondary;
const struct fwnode_operations *ops;
+
+ /* The below is used solely by device links, don't use otherwise */
struct device *dev;
struct list_head suppliers;
struct list_head consumers;
u8 flags;
};
+/*
+ * fwnode link flags
+ *
+ * CYCLE: The fwnode link is part of a cycle. Don't defer probe.
+ * IGNORE: Completely ignore this link, even during cycle detection.
+ */
+#define FWLINK_FLAG_CYCLE BIT(0)
+#define FWLINK_FLAG_IGNORE BIT(1)
+
struct fwnode_link {
struct fwnode_handle *supplier;
struct list_head s_hook;
struct fwnode_handle *consumer;
struct list_head c_hook;
+ u8 flags;
};
/**
@@ -62,7 +96,7 @@ struct fwnode_endpoint {
#define SWNODE_GRAPH_PORT_NAME_FMT "port@%u"
#define SWNODE_GRAPH_ENDPOINT_NAME_FMT "endpoint@%u"
-#define NR_FWNODE_REFERENCE_ARGS 8
+#define NR_FWNODE_REFERENCE_ARGS 16
/**
* struct fwnode_reference_args - Fwnode reference with additional arguments
@@ -83,6 +117,7 @@ struct fwnode_reference_args {
* @device_is_available: Return true if the device is available.
* @device_get_match_data: Return the device driver match data.
* @property_present: Return true if a property is present.
+ * @property_read_bool: Return a boolean property value.
* @property_read_int_array: Read an array of integer properties. Return zero on
* success, a negative error code otherwise.
* @property_read_string_array: Read an array of string properties. Return zero
@@ -107,8 +142,13 @@ struct fwnode_operations {
bool (*device_is_available)(const struct fwnode_handle *fwnode);
const void *(*device_get_match_data)(const struct fwnode_handle *fwnode,
const struct device *dev);
+ bool (*device_dma_supported)(const struct fwnode_handle *fwnode);
+ enum dev_dma_attr
+ (*device_get_dma_attr)(const struct fwnode_handle *fwnode);
bool (*property_present)(const struct fwnode_handle *fwnode,
const char *propname);
+ bool (*property_read_bool)(const struct fwnode_handle *fwnode,
+ const char *propname);
int (*property_read_int_array)(const struct fwnode_handle *fwnode,
const char *propname,
unsigned int elem_size, void *val,
@@ -139,15 +179,17 @@ struct fwnode_operations {
(*graph_get_port_parent)(struct fwnode_handle *fwnode);
int (*graph_parse_endpoint)(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint);
+ void __iomem *(*iomap)(struct fwnode_handle *fwnode, int index);
+ int (*irq_get)(const struct fwnode_handle *fwnode, unsigned int index);
int (*add_links)(struct fwnode_handle *fwnode);
};
-#define fwnode_has_op(fwnode, op) \
- ((fwnode) && (fwnode)->ops && (fwnode)->ops->op)
+#define fwnode_has_op(fwnode, op) \
+ (!IS_ERR_OR_NULL(fwnode) && (fwnode)->ops && (fwnode)->ops->op)
+
#define fwnode_call_int_op(fwnode, op, ...) \
- (fwnode ? (fwnode_has_op(fwnode, op) ? \
- (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : -ENXIO) : \
- -EINVAL)
+ (fwnode_has_op(fwnode, op) ? \
+ (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : (IS_ERR_OR_NULL(fwnode) ? -EINVAL : -ENXIO))
#define fwnode_call_bool_op(fwnode, op, ...) \
(fwnode_has_op(fwnode, op) ? \
@@ -161,7 +203,6 @@ struct fwnode_operations {
if (fwnode_has_op(fwnode, op)) \
(fwnode)->ops->op(fwnode, ## __VA_ARGS__); \
} while (false)
-#define get_dev_from_fwnode(fwnode) get_device((fwnode)->dev)
static inline void fwnode_init(struct fwnode_handle *fwnode,
const struct fwnode_operations *ops)
@@ -183,10 +224,10 @@ static inline void fwnode_dev_initialized(struct fwnode_handle *fwnode,
fwnode->flags &= ~FWNODE_FLAG_INITIALIZED;
}
-extern u32 fw_devlink_get_flags(void);
-extern bool fw_devlink_is_strict(void);
-int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup);
+int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup,
+ u8 flags);
void fwnode_links_purge(struct fwnode_handle *fwnode);
void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode);
+bool fw_devlink_is_strict(void);
#endif
diff --git a/include/linux/fwnode_mdio.h b/include/linux/fwnode_mdio.h
new file mode 100644
index 000000000000..faf603c48c86
--- /dev/null
+++ b/include/linux/fwnode_mdio.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * FWNODE helper for the MDIO (Ethernet PHY) API
+ */
+
+#ifndef __LINUX_FWNODE_MDIO_H
+#define __LINUX_FWNODE_MDIO_H
+
+#include <linux/phy.h>
+
+#if IS_ENABLED(CONFIG_FWNODE_MDIO)
+int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
+ struct phy_device *phy,
+ struct fwnode_handle *child, u32 addr);
+
+int fwnode_mdiobus_register_phy(struct mii_bus *bus,
+ struct fwnode_handle *child, u32 addr);
+
+#else /* CONFIG_FWNODE_MDIO */
+int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
+ struct phy_device *phy,
+ struct fwnode_handle *child, u32 addr)
+{
+ return -EINVAL;
+}
+
+static inline int fwnode_mdiobus_register_phy(struct mii_bus *bus,
+ struct fwnode_handle *child,
+ u32 addr)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif /* __LINUX_FWNODE_MDIO_H */
diff --git a/include/linux/gameport.h b/include/linux/gameport.h
index 69081d899492..86d62fdafd7a 100644
--- a/include/linux/gameport.h
+++ b/include/linux/gameport.h
@@ -5,7 +5,6 @@
#ifndef _GAMEPORT_H
#define _GAMEPORT_H
-#include <asm/io.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -59,12 +58,12 @@ struct gameport_driver {
bool ignore;
};
-#define to_gameport_driver(d) container_of(d, struct gameport_driver, driver)
+#define to_gameport_driver(d) container_of_const(d, struct gameport_driver, driver)
int gameport_open(struct gameport *gameport, struct gameport_driver *drv, int mode);
void gameport_close(struct gameport *gameport);
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
void __gameport_register_port(struct gameport *gameport, struct module *owner);
/* use a define to avoid include chaining to get THIS_MODULE */
@@ -110,7 +109,7 @@ static inline void gameport_free_port(struct gameport *gameport)
static inline void gameport_set_name(struct gameport *gameport, const char *name)
{
- strlcpy(gameport->name, name, sizeof(gameport->name));
+ strscpy(gameport->name, name, sizeof(gameport->name));
}
/*
@@ -165,18 +164,12 @@ void gameport_unregister_driver(struct gameport_driver *drv);
static inline void gameport_trigger(struct gameport *gameport)
{
- if (gameport->trigger)
- gameport->trigger(gameport);
- else
- outb(0xff, gameport->io);
+ gameport->trigger(gameport);
}
static inline unsigned char gameport_read(struct gameport *gameport)
{
- if (gameport->read)
- return gameport->read(gameport);
- else
- return inb(gameport->io);
+ return gameport->read(gameport);
}
static inline int gameport_cooked_read(struct gameport *gameport, int *axes, int *buttons)
diff --git a/include/linux/gcd.h b/include/linux/gcd.h
index cb572677fd7f..616e81a7f7e3 100644
--- a/include/linux/gcd.h
+++ b/include/linux/gcd.h
@@ -3,6 +3,9 @@
#define _GCD_H
#include <linux/compiler.h>
+#include <linux/jump_label.h>
+
+DECLARE_STATIC_KEY_TRUE(efficient_ffs_key);
unsigned long gcd(unsigned long a, unsigned long b) __attribute_const__;
diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h
index bfd00320c7f3..5b51c3d582d6 100644
--- a/include/linux/generic-radix-tree.h
+++ b/include/linux/generic-radix-tree.h
@@ -5,7 +5,7 @@
* DOC: Generic radix trees/sparse arrays
*
* Very simple and minimalistic, supporting arbitrary size entries up to
- * PAGE_SIZE.
+ * GENRADIX_NODE_SIZE.
*
* A genradix is defined with the type it will store, like so:
*
@@ -38,17 +38,76 @@
#include <asm/page.h>
#include <linux/bug.h>
-#include <linux/kernel.h>
+#include <linux/limits.h>
#include <linux/log2.h>
+#include <linux/math.h>
+#include <linux/slab.h>
+#include <linux/types.h>
struct genradix_root;
+#define GENRADIX_NODE_SHIFT 9
+#define GENRADIX_NODE_SIZE (1U << GENRADIX_NODE_SHIFT)
+
+#define GENRADIX_ARY (GENRADIX_NODE_SIZE / sizeof(struct genradix_node *))
+#define GENRADIX_ARY_SHIFT ilog2(GENRADIX_ARY)
+
+/* depth that's needed for a genradix that can address up to ULONG_MAX: */
+#define GENRADIX_MAX_DEPTH \
+ DIV_ROUND_UP(BITS_PER_LONG - GENRADIX_NODE_SHIFT, GENRADIX_ARY_SHIFT)
+
+#define GENRADIX_DEPTH_MASK \
+ ((unsigned long) (roundup_pow_of_two(GENRADIX_MAX_DEPTH + 1) - 1))
+
+static inline int genradix_depth_shift(unsigned depth)
+{
+ return GENRADIX_NODE_SHIFT + GENRADIX_ARY_SHIFT * depth;
+}
+
+/*
+ * Returns size (of data, in bytes) that a tree of a given depth holds:
+ */
+static inline size_t genradix_depth_size(unsigned depth)
+{
+ return 1UL << genradix_depth_shift(depth);
+}
+
+static inline unsigned genradix_root_to_depth(struct genradix_root *r)
+{
+ return (unsigned long) r & GENRADIX_DEPTH_MASK;
+}
+
+static inline struct genradix_node *genradix_root_to_node(struct genradix_root *r)
+{
+ return (void *) ((unsigned long) r & ~GENRADIX_DEPTH_MASK);
+}
+
struct __genradix {
struct genradix_root *root;
};
+struct genradix_node {
+ union {
+ /* Interior node: */
+ struct genradix_node *children[GENRADIX_ARY];
+
+ /* Leaf: */
+ u8 data[GENRADIX_NODE_SIZE];
+ };
+};
+
+static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask)
+{
+ return kzalloc(GENRADIX_NODE_SIZE, gfp_mask);
+}
+
+static inline void genradix_free_node(struct genradix_node *node)
+{
+ kfree(node);
+}
+
/*
- * NOTE: currently, sizeof(_type) must not be larger than PAGE_SIZE:
+ * NOTE: currently, sizeof(_type) must not be larger than GENRADIX_NODE_SIZE:
*/
#define __GENRADIX_INITIALIZER \
@@ -99,14 +158,14 @@ void __genradix_free(struct __genradix *);
static inline size_t __idx_to_offset(size_t idx, size_t obj_size)
{
if (__builtin_constant_p(obj_size))
- BUILD_BUG_ON(obj_size > PAGE_SIZE);
+ BUILD_BUG_ON(obj_size > GENRADIX_NODE_SIZE);
else
- BUG_ON(obj_size > PAGE_SIZE);
+ BUG_ON(obj_size > GENRADIX_NODE_SIZE);
if (!is_power_of_2(obj_size)) {
- size_t objs_per_page = PAGE_SIZE / obj_size;
+ size_t objs_per_page = GENRADIX_NODE_SIZE / obj_size;
- return (idx / objs_per_page) * PAGE_SIZE +
+ return (idx / objs_per_page) * GENRADIX_NODE_SIZE +
(idx % objs_per_page) * obj_size;
} else {
return idx * obj_size;
@@ -115,9 +174,38 @@ static inline size_t __idx_to_offset(size_t idx, size_t obj_size)
#define __genradix_cast(_radix) (typeof((_radix)->type[0]) *)
#define __genradix_obj_size(_radix) sizeof((_radix)->type[0])
+#define __genradix_objs_per_page(_radix) \
+ (GENRADIX_NODE_SIZE / sizeof((_radix)->type[0]))
+#define __genradix_page_remainder(_radix) \
+ (GENRADIX_NODE_SIZE % sizeof((_radix)->type[0]))
+
#define __genradix_idx_to_offset(_radix, _idx) \
__idx_to_offset(_idx, __genradix_obj_size(_radix))
+static inline void *__genradix_ptr_inlined(struct __genradix *radix, size_t offset)
+{
+ struct genradix_root *r = READ_ONCE(radix->root);
+ struct genradix_node *n = genradix_root_to_node(r);
+ unsigned level = genradix_root_to_depth(r);
+ unsigned shift = genradix_depth_shift(level);
+
+ if (unlikely(ilog2(offset) >= genradix_depth_shift(level)))
+ return NULL;
+
+ while (n && shift > GENRADIX_NODE_SHIFT) {
+ shift -= GENRADIX_ARY_SHIFT;
+ n = n->children[offset >> shift];
+ offset &= (1UL << shift) - 1;
+ }
+
+ return n ? &n->data[offset] : NULL;
+}
+
+#define genradix_ptr_inlined(_radix, _idx) \
+ (__genradix_cast(_radix) \
+ __genradix_ptr_inlined(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx)))
+
void *__genradix_ptr(struct __genradix *, size_t);
/**
@@ -132,7 +220,24 @@ void *__genradix_ptr(struct __genradix *, size_t);
__genradix_ptr(&(_radix)->tree, \
__genradix_idx_to_offset(_radix, _idx)))
-void *__genradix_ptr_alloc(struct __genradix *, size_t, gfp_t);
+void *__genradix_ptr_alloc(struct __genradix *, size_t,
+ struct genradix_node **, gfp_t);
+
+#define genradix_ptr_alloc_inlined(_radix, _idx, _gfp) \
+ (__genradix_cast(_radix) \
+ (__genradix_ptr_inlined(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx)) ?: \
+ __genradix_ptr_alloc(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx), \
+ NULL, _gfp)))
+
+#define genradix_ptr_alloc_preallocated_inlined(_radix, _idx, _new_node, _gfp)\
+ (__genradix_cast(_radix) \
+ (__genradix_ptr_inlined(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx)) ?: \
+ __genradix_ptr_alloc(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx), \
+ _new_node, _gfp)))
/**
* genradix_ptr_alloc - get a pointer to a genradix entry, allocating it
@@ -147,7 +252,13 @@ void *__genradix_ptr_alloc(struct __genradix *, size_t, gfp_t);
(__genradix_cast(_radix) \
__genradix_ptr_alloc(&(_radix)->tree, \
__genradix_idx_to_offset(_radix, _idx), \
- _gfp))
+ NULL, _gfp))
+
+#define genradix_ptr_alloc_preallocated(_radix, _idx, _new_node, _gfp)\
+ (__genradix_cast(_radix) \
+ __genradix_ptr_alloc(&(_radix)->tree, \
+ __genradix_idx_to_offset(_radix, _idx), \
+ _new_node, _gfp))
struct genradix_iter {
size_t offset;
@@ -178,16 +289,40 @@ void *__genradix_iter_peek(struct genradix_iter *, struct __genradix *, size_t);
#define genradix_iter_peek(_iter, _radix) \
(__genradix_cast(_radix) \
__genradix_iter_peek(_iter, &(_radix)->tree, \
- PAGE_SIZE / __genradix_obj_size(_radix)))
+ __genradix_objs_per_page(_radix)))
+
+void *__genradix_iter_peek_prev(struct genradix_iter *, struct __genradix *,
+ size_t, size_t);
+
+/**
+ * genradix_iter_peek_prev - get first entry at or below iterator's current
+ * position
+ * @_iter: a genradix_iter
+ * @_radix: genradix being iterated over
+ *
+ * If no more entries exist at or below @_iter's current position, returns NULL
+ */
+#define genradix_iter_peek_prev(_iter, _radix) \
+ (__genradix_cast(_radix) \
+ __genradix_iter_peek_prev(_iter, &(_radix)->tree, \
+ __genradix_objs_per_page(_radix), \
+ __genradix_obj_size(_radix) + \
+ __genradix_page_remainder(_radix)))
static inline void __genradix_iter_advance(struct genradix_iter *iter,
size_t obj_size)
{
+ if (iter->offset + obj_size < iter->offset) {
+ iter->offset = SIZE_MAX;
+ iter->pos = SIZE_MAX;
+ return;
+ }
+
iter->offset += obj_size;
if (!is_power_of_2(obj_size) &&
- (iter->offset & (PAGE_SIZE - 1)) + obj_size > PAGE_SIZE)
- iter->offset = round_up(iter->offset, PAGE_SIZE);
+ (iter->offset & (GENRADIX_NODE_SIZE - 1)) + obj_size > GENRADIX_NODE_SIZE)
+ iter->offset = round_up(iter->offset, GENRADIX_NODE_SIZE);
iter->pos++;
}
@@ -195,6 +330,25 @@ static inline void __genradix_iter_advance(struct genradix_iter *iter,
#define genradix_iter_advance(_iter, _radix) \
__genradix_iter_advance(_iter, __genradix_obj_size(_radix))
+static inline void __genradix_iter_rewind(struct genradix_iter *iter,
+ size_t obj_size)
+{
+ if (iter->offset == 0 ||
+ iter->offset == SIZE_MAX) {
+ iter->offset = SIZE_MAX;
+ return;
+ }
+
+ if ((iter->offset & (GENRADIX_NODE_SIZE - 1)) == 0)
+ iter->offset -= GENRADIX_NODE_SIZE % obj_size;
+
+ iter->offset -= obj_size;
+ iter->pos--;
+}
+
+#define genradix_iter_rewind(_iter, _radix) \
+ __genradix_iter_rewind(_iter, __genradix_obj_size(_radix))
+
#define genradix_for_each_from(_radix, _iter, _p, _start) \
for (_iter = genradix_iter_init(_radix, _start); \
(_p = genradix_iter_peek(&_iter, _radix)) != NULL; \
@@ -212,6 +366,23 @@ static inline void __genradix_iter_advance(struct genradix_iter *iter,
#define genradix_for_each(_radix, _iter, _p) \
genradix_for_each_from(_radix, _iter, _p, 0)
+#define genradix_last_pos(_radix) \
+ (SIZE_MAX / GENRADIX_NODE_SIZE * __genradix_objs_per_page(_radix) - 1)
+
+/**
+ * genradix_for_each_reverse - iterate over entry in a genradix, reverse order
+ * @_radix: genradix to iterate over
+ * @_iter: a genradix_iter to track current position
+ * @_p: pointer to genradix entry type
+ *
+ * On every iteration, @_p will point to the current entry, and @_iter.pos
+ * will be the current entry's index.
+ */
+#define genradix_for_each_reverse(_radix, _iter, _p) \
+ for (_iter = genradix_iter_init(_radix, genradix_last_pos(_radix));\
+ (_p = genradix_iter_peek_prev(&_iter, _radix)) != NULL;\
+ genradix_iter_rewind(&_iter, _radix))
+
int __genradix_prealloc(struct __genradix *, size_t, gfp_t);
/**
diff --git a/include/linux/generic_pt/common.h b/include/linux/generic_pt/common.h
new file mode 100644
index 000000000000..6a9a1acb5aad
--- /dev/null
+++ b/include/linux/generic_pt/common.h
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
+ */
+#ifndef __GENERIC_PT_COMMON_H
+#define __GENERIC_PT_COMMON_H
+
+#include <linux/types.h>
+#include <linux/build_bug.h>
+#include <linux/bits.h>
+
+/**
+ * DOC: Generic Radix Page Table
+ *
+ * Generic Radix Page Table is a set of functions and helpers to efficiently
+ * parse radix style page tables typically seen in HW implementations. The
+ * interface is built to deliver similar code generation as the mm's pte/pmd/etc
+ * system by fully inlining the exact code required to handle each table level.
+ *
+ * Like the mm subsystem each format contributes its parsing implementation
+ * under common names and the common code implements the required algorithms.
+ *
+ * The system is divided into three logical levels:
+ *
+ * - The page table format and its manipulation functions
+ * - Generic helpers to give a consistent API regardless of underlying format
+ * - An algorithm implementation (e.g. IOMMU/DRM/KVM/MM)
+ *
+ * Multiple implementations are supported. The intention is to have the generic
+ * format code be re-usable for whatever specialized implementation is required.
+ * The generic code is solely about the format of the radix tree; it does not
+ * include memory allocation or higher level decisions that are left for the
+ * implementation.
+ *
+ * The generic framework supports a superset of functions across many HW
+ * implementations:
+ *
+ * - Entries comprised of contiguous blocks of IO PTEs for larger page sizes
+ * - Multi-level tables, up to 6 levels. Runtime selected top level
+ * - Runtime variable table level size (ARM's concatenated tables)
+ * - Expandable top level allowing dynamic sizing of table levels
+ * - Optional leaf entries at any level
+ * - 32-bit/64-bit virtual and output addresses, using every address bit
+ * - Dirty tracking
+ * - Sign extended addressing
+ */
+
+/**
+ * struct pt_common - struct for all page table implementations
+ */
+struct pt_common {
+ /**
+ * @top_of_table: Encodes the table top pointer and the top level in a
+ * single value. Must use READ_ONCE/WRITE_ONCE to access it. The lower
+ * bits of the aligned table pointer are used for the level.
+ */
+ uintptr_t top_of_table;
+ /**
+ * @max_oasz_lg2: Maximum number of bits the OA can contain. Upper bits
+ * must be zero. This may be less than what the page table format
+ * supports, but must not be more.
+ */
+ u8 max_oasz_lg2;
+ /**
+ * @max_vasz_lg2: Maximum number of bits the VA can contain. Upper bits
+ * are 0 or 1 depending on pt_full_va_prefix(). This may be less than
+ * what the page table format supports, but must not be more. When
+ * PT_FEAT_DYNAMIC_TOP is set this reflects the maximum VA capability.
+ */
+ u8 max_vasz_lg2;
+ /**
+ * @features: Bitmap of `enum pt_features`
+ */
+ unsigned int features;
+};
+
+/* Encoding parameters for top_of_table */
+enum {
+ PT_TOP_LEVEL_BITS = 3,
+ PT_TOP_LEVEL_MASK = GENMASK(PT_TOP_LEVEL_BITS - 1, 0),
+};
+
+/**
+ * enum pt_features - Features turned on in the table. Each symbol is a bit
+ * position.
+ */
+enum pt_features {
+ /**
+ * @PT_FEAT_DMA_INCOHERENT: Cache flush page table memory before
+ * assuming the HW can read it. Otherwise a SMP release is sufficient
+ * for HW to read it.
+ */
+ PT_FEAT_DMA_INCOHERENT,
+ /**
+ * @PT_FEAT_FULL_VA: The table can span the full VA range from 0 to
+ * PT_VADDR_MAX.
+ */
+ PT_FEAT_FULL_VA,
+ /**
+ * @PT_FEAT_DYNAMIC_TOP: The table's top level can be increased
+ * dynamically during map. This requires HW support for atomically
+ * setting both the table top pointer and the starting table level.
+ */
+ PT_FEAT_DYNAMIC_TOP,
+ /**
+ * @PT_FEAT_SIGN_EXTEND: The top most bit of the valid VA range sign
+ * extends up to the full pt_vaddr_t. This divides the page table into
+ * three VA ranges::
+ *
+ * 0 -> 2^N - 1 Lower
+ * 2^N -> (MAX - 2^N - 1) Non-Canonical
+ * MAX - 2^N -> MAX Upper
+ *
+ * In this mode pt_common::max_vasz_lg2 includes the sign bit and the
+ * upper bits that don't fall within the translation are just validated.
+ *
+ * If not set there is no sign extension and valid VA goes from 0 to 2^N
+ * - 1.
+ */
+ PT_FEAT_SIGN_EXTEND,
+ /**
+ * @PT_FEAT_FLUSH_RANGE: IOTLB maintenance is done by flushing IOVA
+ * ranges which will clean out any walk cache or any IOPTE fully
+ * contained by the range. The optimization objective is to minimize the
+ * number of flushes even if ranges include IOVA gaps that do not need
+ * to be flushed.
+ */
+ PT_FEAT_FLUSH_RANGE,
+ /**
+ * @PT_FEAT_FLUSH_RANGE_NO_GAPS: Like PT_FEAT_FLUSH_RANGE except that
+ * the optimization objective is to only flush IOVA that has been
+ * changed. This mode is suitable for cases like hypervisor shadowing
+ * where flushing unchanged ranges may cause the hypervisor to reparse
+ * significant amount of page table.
+ */
+ PT_FEAT_FLUSH_RANGE_NO_GAPS,
+ /* private: */
+ PT_FEAT_FMT_START,
+};
+
+struct pt_amdv1 {
+ struct pt_common common;
+};
+
+enum {
+ /*
+ * The memory backing the tables is encrypted. Use __sme_set() to adjust
+ * the page table pointers in the tree. This only works with
+ * CONFIG_AMD_MEM_ENCRYPT.
+ */
+ PT_FEAT_AMDV1_ENCRYPT_TABLES = PT_FEAT_FMT_START,
+ /*
+ * The PTEs are set to prevent cache incoherent traffic, such as PCI no
+ * snoop. This is set either at creation time or before the first map
+ * operation.
+ */
+ PT_FEAT_AMDV1_FORCE_COHERENCE,
+};
+
+struct pt_vtdss {
+ struct pt_common common;
+};
+
+enum {
+ /*
+ * The PTEs are set to prevent cache incoherent traffic, such as PCI no
+ * snoop. This is set either at creation time or before the first map
+ * operation.
+ */
+ PT_FEAT_VTDSS_FORCE_COHERENCE = PT_FEAT_FMT_START,
+ /*
+ * Prevent creating read-only PTEs. Used to work around HW errata
+ * ERRATA_772415_SPR17.
+ */
+ PT_FEAT_VTDSS_FORCE_WRITEABLE,
+};
+
+struct pt_x86_64 {
+ struct pt_common common;
+};
+
+enum {
+ /*
+ * The memory backing the tables is encrypted. Use __sme_set() to adjust
+ * the page table pointers in the tree. This only works with
+ * CONFIG_AMD_MEM_ENCRYPT.
+ */
+ PT_FEAT_X86_64_AMD_ENCRYPT_TABLES = PT_FEAT_FMT_START,
+};
+
+#endif
diff --git a/include/linux/generic_pt/iommu.h b/include/linux/generic_pt/iommu.h
new file mode 100644
index 000000000000..9eefbb74efd0
--- /dev/null
+++ b/include/linux/generic_pt/iommu.h
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
+ */
+#ifndef __GENERIC_PT_IOMMU_H
+#define __GENERIC_PT_IOMMU_H
+
+#include <linux/generic_pt/common.h>
+#include <linux/iommu.h>
+#include <linux/mm_types.h>
+
+struct iommu_iotlb_gather;
+struct pt_iommu_ops;
+struct pt_iommu_driver_ops;
+struct iommu_dirty_bitmap;
+
+/**
+ * DOC: IOMMU Radix Page Table
+ *
+ * The IOMMU implementation of the Generic Page Table provides an ops struct
+ * that is useful to go with an iommu_domain to serve the DMA API, IOMMUFD and
+ * the generic map/unmap interface.
+ *
+ * This interface uses a caller provided locking approach. The caller must have
+ * a VA range lock concept that prevents concurrent threads from calling ops on
+ * the same VA. Generally the range lock must be at least as large as a single
+ * map call.
+ */
+
+/**
+ * struct pt_iommu - Base structure for IOMMU page tables
+ *
+ * The format-specific struct will include this as the first member.
+ */
+struct pt_iommu {
+ /**
+ * @domain: The core IOMMU domain. The driver should use a union to
+ * overlay this memory with its previously existing domain struct to
+ * create an alias.
+ */
+ struct iommu_domain domain;
+
+ /**
+ * @ops: Function pointers to access the API
+ */
+ const struct pt_iommu_ops *ops;
+
+ /**
+ * @driver_ops: Function pointers provided by the HW driver to help
+ * manage HW details like caches.
+ */
+ const struct pt_iommu_driver_ops *driver_ops;
+
+ /**
+ * @nid: Node ID to use for table memory allocations. The IOMMU driver
+ * may want to set the NID to the device's NID, if there are multiple
+ * table walkers.
+ */
+ int nid;
+
+ /**
+ * @iommu_device: Device pointer used for any DMA cache flushing when
+ * PT_FEAT_DMA_INCOHERENT. This is the iommu device that created the
+ * page table which must have dma ops that perform cache flushing.
+ */
+ struct device *iommu_device;
+};
+
+/**
+ * struct pt_iommu_info - Details about the IOMMU page table
+ *
+ * Returned from pt_iommu_ops->get_info()
+ */
+struct pt_iommu_info {
+ /**
+ * @pgsize_bitmap: A bitmask where each set bit indicates
+ * a page size that can be natively stored in the page table.
+ */
+ u64 pgsize_bitmap;
+};
+
+struct pt_iommu_ops {
+ /**
+ * @set_dirty: Make the iova write dirty
+ * @iommu_table: Table to manipulate
+ * @iova: IO virtual address to start
+ *
+ * This is only used by iommufd testing. It makes the iova dirty so that
+ * read_and_clear_dirty() will see it as dirty. Unlike all the other ops
+ * this one is safe to call without holding any locking. It may return
+ * -EAGAIN if there is a race.
+ */
+ int (*set_dirty)(struct pt_iommu *iommu_table, dma_addr_t iova);
+
+ /**
+ * @get_info: Return the pt_iommu_info structure
+ * @iommu_table: Table to query
+ *
+ * Return some basic static information about the page table.
+ */
+ void (*get_info)(struct pt_iommu *iommu_table,
+ struct pt_iommu_info *info);
+
+ /**
+ * @deinit: Undo a format specific init operation
+ * @iommu_table: Table to destroy
+ *
+ * Release all of the memory. The caller must have already removed the
+ * table from all HW access and all caches.
+ */
+ void (*deinit)(struct pt_iommu *iommu_table);
+};
+
+/**
+ * struct pt_iommu_driver_ops - HW IOTLB cache flushing operations
+ *
+ * The IOMMU driver should implement these using container_of(iommu_table) to
+ * get to it's iommu_domain derived structure. All ops can be called in atomic
+ * contexts as they are buried under DMA API calls.
+ */
+struct pt_iommu_driver_ops {
+ /**
+ * @change_top: Update the top of table pointer
+ * @iommu_table: Table to operate on
+ * @top_paddr: New CPU physical address of the top pointer
+ * @top_level: IOMMU PT level of the new top
+ *
+ * Called under the get_top_lock() spinlock. The driver must update all
+ * HW references to this domain with a new top address and
+ * configuration. On return mappings placed in the new top must be
+ * reachable by the HW.
+ *
+ * top_level encodes the level in IOMMU PT format, level 0 is the
+ * smallest page size increasing from there. This has to be translated
+ * to any HW specific format. During this call the new top will not be
+ * visible to any other API.
+ *
+ * This op is only used by PT_FEAT_DYNAMIC_TOP, and is required if
+ * enabled.
+ */
+ void (*change_top)(struct pt_iommu *iommu_table, phys_addr_t top_paddr,
+ unsigned int top_level);
+
+ /**
+ * @get_top_lock: lock to hold when changing the table top
+ * @iommu_table: Table to operate on
+ *
+ * Return a lock to hold when changing the table top page table from
+ * being stored in HW. The lock will be held prior to calling
+ * change_top() and released once the top is fully visible.
+ *
+ * Typically this would be a lock that protects the iommu_domain's
+ * attachment list.
+ *
+ * This op is only used by PT_FEAT_DYNAMIC_TOP, and is required if
+ * enabled.
+ */
+ spinlock_t *(*get_top_lock)(struct pt_iommu *iommu_table);
+};
+
+static inline void pt_iommu_deinit(struct pt_iommu *iommu_table)
+{
+ /*
+ * It is safe to call pt_iommu_deinit() before an init, or if init
+ * fails. The ops pointer will only become non-NULL if deinit needs to be
+ * run.
+ */
+ if (iommu_table->ops)
+ iommu_table->ops->deinit(iommu_table);
+}
+
+/**
+ * struct pt_iommu_cfg - Common configuration values for all formats
+ */
+struct pt_iommu_cfg {
+ /**
+ * @features: Features required. Only these features will be turned on.
+ * The feature list should reflect what the IOMMU HW is capable of.
+ */
+ unsigned int features;
+ /**
+ * @hw_max_vasz_lg2: Maximum VA the IOMMU HW can support. This will
+ * imply the top level of the table.
+ */
+ u8 hw_max_vasz_lg2;
+ /**
+ * @hw_max_oasz_lg2: Maximum OA the IOMMU HW can support. The format
+ * might select a lower maximum OA.
+ */
+ u8 hw_max_oasz_lg2;
+};
+
+/* Generate the exported function signatures from iommu_pt.h */
+#define IOMMU_PROTOTYPES(fmt) \
+ phys_addr_t pt_iommu_##fmt##_iova_to_phys(struct iommu_domain *domain, \
+ dma_addr_t iova); \
+ int pt_iommu_##fmt##_map_pages(struct iommu_domain *domain, \
+ unsigned long iova, phys_addr_t paddr, \
+ size_t pgsize, size_t pgcount, \
+ int prot, gfp_t gfp, size_t *mapped); \
+ size_t pt_iommu_##fmt##_unmap_pages( \
+ struct iommu_domain *domain, unsigned long iova, \
+ size_t pgsize, size_t pgcount, \
+ struct iommu_iotlb_gather *iotlb_gather); \
+ int pt_iommu_##fmt##_read_and_clear_dirty( \
+ struct iommu_domain *domain, unsigned long iova, size_t size, \
+ unsigned long flags, struct iommu_dirty_bitmap *dirty); \
+ int pt_iommu_##fmt##_init(struct pt_iommu_##fmt *table, \
+ const struct pt_iommu_##fmt##_cfg *cfg, \
+ gfp_t gfp); \
+ void pt_iommu_##fmt##_hw_info(struct pt_iommu_##fmt *table, \
+ struct pt_iommu_##fmt##_hw_info *info)
+#define IOMMU_FORMAT(fmt, member) \
+ struct pt_iommu_##fmt { \
+ struct pt_iommu iommu; \
+ struct pt_##fmt member; \
+ }; \
+ IOMMU_PROTOTYPES(fmt)
+
+/*
+ * A driver uses IOMMU_PT_DOMAIN_OPS to populate the iommu_domain_ops for the
+ * iommu_pt
+ */
+#define IOMMU_PT_DOMAIN_OPS(fmt) \
+ .iova_to_phys = &pt_iommu_##fmt##_iova_to_phys, \
+ .map_pages = &pt_iommu_##fmt##_map_pages, \
+ .unmap_pages = &pt_iommu_##fmt##_unmap_pages
+#define IOMMU_PT_DIRTY_OPS(fmt) \
+ .read_and_clear_dirty = &pt_iommu_##fmt##_read_and_clear_dirty
+
+/*
+ * The driver should setup its domain struct like
+ * union {
+ * struct iommu_domain domain;
+ * struct pt_iommu_xxx xx;
+ * };
+ * PT_IOMMU_CHECK_DOMAIN(struct mock_iommu_domain, xx.iommu, domain);
+ *
+ * Which creates an alias between driver_domain.domain and
+ * driver_domain.xx.iommu.domain. This is to avoid a mass rename of existing
+ * driver_domain.domain users.
+ */
+#define PT_IOMMU_CHECK_DOMAIN(s, pt_iommu_memb, domain_memb) \
+ static_assert(offsetof(s, pt_iommu_memb.domain) == \
+ offsetof(s, domain_memb))
+
+struct pt_iommu_amdv1_cfg {
+ struct pt_iommu_cfg common;
+ unsigned int starting_level;
+};
+
+struct pt_iommu_amdv1_hw_info {
+ u64 host_pt_root;
+ u8 mode;
+};
+
+IOMMU_FORMAT(amdv1, amdpt);
+
+/* amdv1_mock is used by the iommufd selftest */
+#define pt_iommu_amdv1_mock pt_iommu_amdv1
+#define pt_iommu_amdv1_mock_cfg pt_iommu_amdv1_cfg
+struct pt_iommu_amdv1_mock_hw_info;
+IOMMU_PROTOTYPES(amdv1_mock);
+
+struct pt_iommu_vtdss_cfg {
+ struct pt_iommu_cfg common;
+ /* 4 is a 57 bit 5 level table */
+ unsigned int top_level;
+};
+
+struct pt_iommu_vtdss_hw_info {
+ u64 ssptptr;
+ u8 aw;
+};
+
+IOMMU_FORMAT(vtdss, vtdss_pt);
+
+struct pt_iommu_x86_64_cfg {
+ struct pt_iommu_cfg common;
+ /* 4 is a 57 bit 5 level table */
+ unsigned int top_level;
+};
+
+struct pt_iommu_x86_64_hw_info {
+ u64 gcr3_pt;
+ u8 levels;
+};
+
+IOMMU_FORMAT(x86_64, x86_64_pt);
+
+#undef IOMMU_PROTOTYPES
+#undef IOMMU_FORMAT
+#endif
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
deleted file mode 100644
index bc738504ab4a..000000000000
--- a/include/linux/genetlink.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_GENERIC_NETLINK_H
-#define __LINUX_GENERIC_NETLINK_H
-
-#include <uapi/linux/genetlink.h>
-
-
-/* All generic netlink requests are serialized by a global lock. */
-extern void genl_lock(void);
-extern void genl_unlock(void);
-#ifdef CONFIG_LOCKDEP
-extern bool lockdep_genl_is_held(void);
-#endif
-
-/* for synchronisation between af_netlink and genetlink */
-extern atomic_t genl_sk_destructing_cnt;
-extern wait_queue_head_t genl_sk_destructing_waitq;
-
-/**
- * rcu_dereference_genl - rcu_dereference with debug checking
- * @p: The pointer to read, prior to dereferencing
- *
- * Do an rcu_dereference(p), but check caller either holds rcu_read_lock()
- * or genl mutex. Note : Please prefer genl_dereference() or rcu_dereference()
- */
-#define rcu_dereference_genl(p) \
- rcu_dereference_check(p, lockdep_genl_is_held())
-
-/**
- * genl_dereference - fetch RCU pointer when updates are prevented by genl mutex
- * @p: The pointer to read, prior to dereferencing
- *
- * Return the value of the specified RCU-protected pointer, but omit
- * the READ_ONCE(), because caller holds genl mutex.
- */
-#define genl_dereference(p) \
- rcu_dereference_protected(p, lockdep_genl_is_held())
-
-#define MODULE_ALIAS_GENL_FAMILY(family)\
- MODULE_ALIAS_NET_PF_PROTO_NAME(PF_NETLINK, NETLINK_GENERIC, "-family-" family)
-
-#endif /* __LINUX_GENERIC_NETLINK_H */
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
deleted file mode 100644
index 7e9660ea967d..000000000000
--- a/include/linux/genhd.h
+++ /dev/null
@@ -1,321 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_GENHD_H
-#define _LINUX_GENHD_H
-
-/*
- * genhd.h Copyright (C) 1992 Drew Eckhardt
- * Generic hard disk header file by
- * Drew Eckhardt
- *
- * <drew@colorado.edu>
- */
-
-#include <linux/types.h>
-#include <linux/kdev_t.h>
-#include <linux/rcupdate.h>
-#include <linux/slab.h>
-#include <linux/percpu-refcount.h>
-#include <linux/uuid.h>
-#include <linux/blk_types.h>
-#include <asm/local.h>
-
-extern const struct device_type disk_type;
-extern struct device_type part_type;
-extern struct class block_class;
-
-#define DISK_MAX_PARTS 256
-#define DISK_NAME_LEN 32
-
-#include <linux/major.h>
-#include <linux/device.h>
-#include <linux/smp.h>
-#include <linux/string.h>
-#include <linux/fs.h>
-#include <linux/workqueue.h>
-#include <linux/xarray.h>
-
-#define PARTITION_META_INFO_VOLNAMELTH 64
-/*
- * Enough for the string representation of any kind of UUID plus NULL.
- * EFI UUID is 36 characters. MSDOS UUID is 11 characters.
- */
-#define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1)
-
-struct partition_meta_info {
- char uuid[PARTITION_META_INFO_UUIDLTH];
- u8 volname[PARTITION_META_INFO_VOLNAMELTH];
-};
-
-/**
- * DOC: genhd capability flags
- *
- * ``GENHD_FL_REMOVABLE`` (0x0001): indicates that the block device
- * gives access to removable media.
- * When set, the device remains present even when media is not
- * inserted.
- * Must not be set for devices which are removed entirely when the
- * media is removed.
- *
- * ``GENHD_FL_CD`` (0x0008): the block device is a CD-ROM-style
- * device.
- * Affects responses to the ``CDROM_GET_CAPABILITY`` ioctl.
- *
- * ``GENHD_FL_UP`` (0x0010): indicates that the block device is "up",
- * with a similar meaning to network interfaces.
- *
- * ``GENHD_FL_SUPPRESS_PARTITION_INFO`` (0x0020): don't include
- * partition information in ``/proc/partitions`` or in the output of
- * printk_all_partitions().
- * Used for the null block device and some MMC devices.
- *
- * ``GENHD_FL_EXT_DEVT`` (0x0040): the driver supports extended
- * dynamic ``dev_t``, i.e. it wants extended device numbers
- * (``BLOCK_EXT_MAJOR``).
- * This affects the maximum number of partitions.
- *
- * ``GENHD_FL_NATIVE_CAPACITY`` (0x0080): based on information in the
- * partition table, the device's capacity has been extended to its
- * native capacity; i.e. the device has hidden capacity used by one
- * of the partitions (this is a flag used so that native capacity is
- * only ever unlocked once).
- *
- * ``GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE`` (0x0100): event polling is
- * blocked whenever a writer holds an exclusive lock.
- *
- * ``GENHD_FL_NO_PART_SCAN`` (0x0200): partition scanning is disabled.
- * Used for loop devices in their default settings and some MMC
- * devices.
- *
- * ``GENHD_FL_HIDDEN`` (0x0400): the block device is hidden; it
- * doesn't produce events, doesn't appear in sysfs, and doesn't have
- * an associated ``bdev``.
- * Implies ``GENHD_FL_SUPPRESS_PARTITION_INFO`` and
- * ``GENHD_FL_NO_PART_SCAN``.
- * Used for multipath devices.
- */
-#define GENHD_FL_REMOVABLE 0x0001
-/* 2 is unused (used to be GENHD_FL_DRIVERFS) */
-/* 4 is unused (used to be GENHD_FL_MEDIA_CHANGE_NOTIFY) */
-#define GENHD_FL_CD 0x0008
-#define GENHD_FL_UP 0x0010
-#define GENHD_FL_SUPPRESS_PARTITION_INFO 0x0020
-#define GENHD_FL_EXT_DEVT 0x0040
-#define GENHD_FL_NATIVE_CAPACITY 0x0080
-#define GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE 0x0100
-#define GENHD_FL_NO_PART_SCAN 0x0200
-#define GENHD_FL_HIDDEN 0x0400
-
-enum {
- DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */
- DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */
-};
-
-enum {
- /* Poll even if events_poll_msecs is unset */
- DISK_EVENT_FLAG_POLL = 1 << 0,
- /* Forward events to udev */
- DISK_EVENT_FLAG_UEVENT = 1 << 1,
-};
-
-struct disk_events;
-struct badblocks;
-
-struct blk_integrity {
- const struct blk_integrity_profile *profile;
- unsigned char flags;
- unsigned char tuple_size;
- unsigned char interval_exp;
- unsigned char tag_size;
-};
-
-struct gendisk {
- /* major, first_minor and minors are input parameters only,
- * don't use directly. Use disk_devt() and disk_max_parts().
- */
- int major; /* major number of driver */
- int first_minor;
- int minors; /* maximum number of minors, =1 for
- * disks that can't be partitioned. */
-
- char disk_name[DISK_NAME_LEN]; /* name of major driver */
-
- unsigned short events; /* supported events */
- unsigned short event_flags; /* flags related to event processing */
-
- struct xarray part_tbl;
- struct block_device *part0;
-
- const struct block_device_operations *fops;
- struct request_queue *queue;
- void *private_data;
-
- int flags;
- unsigned long state;
-#define GD_NEED_PART_SCAN 0
-#define GD_READ_ONLY 1
- struct kobject *slave_dir;
-
- struct timer_rand_state *random;
- atomic_t sync_io; /* RAID */
- struct disk_events *ev;
-#ifdef CONFIG_BLK_DEV_INTEGRITY
- struct kobject integrity_kobj;
-#endif /* CONFIG_BLK_DEV_INTEGRITY */
-#if IS_ENABLED(CONFIG_CDROM)
- struct cdrom_device_info *cdi;
-#endif
- int node_id;
- struct badblocks *bb;
- struct lockdep_map lockdep_map;
-};
-
-/*
- * The gendisk is refcounted by the part0 block_device, and the bd_device
- * therein is also used for device model presentation in sysfs.
- */
-#define dev_to_disk(device) \
- (dev_to_bdev(device)->bd_disk)
-#define disk_to_dev(disk) \
- (&((disk)->part0->bd_device))
-
-#if IS_REACHABLE(CONFIG_CDROM)
-#define disk_to_cdi(disk) ((disk)->cdi)
-#else
-#define disk_to_cdi(disk) NULL
-#endif
-
-static inline int disk_max_parts(struct gendisk *disk)
-{
- if (disk->flags & GENHD_FL_EXT_DEVT)
- return DISK_MAX_PARTS;
- return disk->minors;
-}
-
-static inline bool disk_part_scan_enabled(struct gendisk *disk)
-{
- return disk_max_parts(disk) > 1 &&
- !(disk->flags & GENHD_FL_NO_PART_SCAN);
-}
-
-static inline dev_t disk_devt(struct gendisk *disk)
-{
- return MKDEV(disk->major, disk->first_minor);
-}
-
-void disk_uevent(struct gendisk *disk, enum kobject_action action);
-
-/* block/genhd.c */
-extern void device_add_disk(struct device *parent, struct gendisk *disk,
- const struct attribute_group **groups);
-static inline void add_disk(struct gendisk *disk)
-{
- device_add_disk(NULL, disk, NULL);
-}
-extern void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk);
-static inline void add_disk_no_queue_reg(struct gendisk *disk)
-{
- device_add_disk_no_queue_reg(NULL, disk);
-}
-
-extern void del_gendisk(struct gendisk *gp);
-extern struct block_device *bdget_disk(struct gendisk *disk, int partno);
-
-void set_disk_ro(struct gendisk *disk, bool read_only);
-
-static inline int get_disk_ro(struct gendisk *disk)
-{
- return disk->part0->bd_read_only ||
- test_bit(GD_READ_ONLY, &disk->state);
-}
-
-extern void disk_block_events(struct gendisk *disk);
-extern void disk_unblock_events(struct gendisk *disk);
-extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
-bool set_capacity_and_notify(struct gendisk *disk, sector_t size);
-
-/* drivers/char/random.c */
-extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
-extern void rand_initialize_disk(struct gendisk *disk);
-
-static inline sector_t get_start_sect(struct block_device *bdev)
-{
- return bdev->bd_start_sect;
-}
-
-static inline sector_t bdev_nr_sectors(struct block_device *bdev)
-{
- return i_size_read(bdev->bd_inode) >> 9;
-}
-
-static inline sector_t get_capacity(struct gendisk *disk)
-{
- return bdev_nr_sectors(disk->part0);
-}
-
-int bdev_disk_changed(struct block_device *bdev, bool invalidate);
-int blk_add_partitions(struct gendisk *disk, struct block_device *bdev);
-void blk_drop_partitions(struct gendisk *disk);
-
-extern struct gendisk *__alloc_disk_node(int minors, int node_id);
-extern void put_disk(struct gendisk *disk);
-
-#define alloc_disk_node(minors, node_id) \
-({ \
- static struct lock_class_key __key; \
- const char *__name; \
- struct gendisk *__disk; \
- \
- __name = "(gendisk_completion)"#minors"("#node_id")"; \
- \
- __disk = __alloc_disk_node(minors, node_id); \
- \
- if (__disk) \
- lockdep_init_map(&__disk->lockdep_map, __name, &__key, 0); \
- \
- __disk; \
-})
-
-#define alloc_disk(minors) alloc_disk_node(minors, NUMA_NO_NODE)
-
-int __register_blkdev(unsigned int major, const char *name,
- void (*probe)(dev_t devt));
-#define register_blkdev(major, name) \
- __register_blkdev(major, name, NULL)
-void unregister_blkdev(unsigned int major, const char *name);
-
-bool bdev_check_media_change(struct block_device *bdev);
-int __invalidate_device(struct block_device *bdev, bool kill_dirty);
-void set_capacity(struct gendisk *disk, sector_t size);
-
-/* for drivers/char/raw.c: */
-int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long);
-long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
-
-#ifdef CONFIG_SYSFS
-int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
-void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk);
-#else
-static inline int bd_link_disk_holder(struct block_device *bdev,
- struct gendisk *disk)
-{
- return 0;
-}
-static inline void bd_unlink_disk_holder(struct block_device *bdev,
- struct gendisk *disk)
-{
-}
-#endif /* CONFIG_SYSFS */
-
-extern struct rw_semaphore bdev_lookup_sem;
-
-dev_t blk_lookup_devt(const char *name, int partno);
-void blk_request_module(dev_t devt);
-#ifdef CONFIG_BLOCK
-void printk_all_partitions(void);
-#else /* CONFIG_BLOCK */
-static inline void printk_all_partitions(void)
-{
-}
-#endif /* CONFIG_BLOCK */
-
-#endif /* _LINUX_GENHD_H */
diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
index 939b1a8f571b..d4da060b7532 100644
--- a/include/linux/genl_magic_func.h
+++ b/include/linux/genl_magic_func.h
@@ -2,6 +2,7 @@
#ifndef GENL_MAGIC_FUNC_H
#define GENL_MAGIC_FUNC_H
+#include <linux/args.h>
#include <linux/build_bug.h>
#include <linux/genl_magic_struct.h>
@@ -23,7 +24,7 @@
#define GENL_struct(tag_name, tag_number, s_name, s_fields) \
[tag_name] = { .type = NLA_NESTED },
-static struct nla_policy CONCAT_(GENL_MAGIC_FAMILY, _tla_nl_policy)[] = {
+static struct nla_policy CONCATENATE(GENL_MAGIC_FAMILY, _tla_nl_policy)[] = {
#include GENL_MAGIC_INCLUDE_FILE
};
@@ -209,7 +210,7 @@ static int s_name ## _from_attrs_for_change(struct s_name *s, \
* Magic: define op number to op name mapping {{{1
* {{{2
*/
-const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
+static const char *CONCATENATE(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
{
switch (cmd) {
#undef GENL_op
@@ -235,7 +236,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
.cmd = op_name, \
},
-#define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
+#define ZZZ_genl_ops CONCATENATE(GENL_MAGIC_FAMILY, _genl_ops)
static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
#include GENL_MAGIC_INCLUDE_FILE
};
@@ -248,32 +249,32 @@ static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
* and provide register/unregister functions.
* {{{2
*/
-#define ZZZ_genl_family CONCAT_(GENL_MAGIC_FAMILY, _genl_family)
+#define ZZZ_genl_family CONCATENATE(GENL_MAGIC_FAMILY, _genl_family)
static struct genl_family ZZZ_genl_family;
/*
* Magic: define multicast groups
* Magic: define multicast group registration helper
*/
-#define ZZZ_genl_mcgrps CONCAT_(GENL_MAGIC_FAMILY, _genl_mcgrps)
+#define ZZZ_genl_mcgrps CONCATENATE(GENL_MAGIC_FAMILY, _genl_mcgrps)
static const struct genl_multicast_group ZZZ_genl_mcgrps[] = {
#undef GENL_mc_group
#define GENL_mc_group(group) { .name = #group, },
#include GENL_MAGIC_INCLUDE_FILE
};
-enum CONCAT_(GENL_MAGIC_FAMILY, group_ids) {
+enum CONCATENATE(GENL_MAGIC_FAMILY, group_ids) {
#undef GENL_mc_group
-#define GENL_mc_group(group) CONCAT_(GENL_MAGIC_FAMILY, _group_ ## group),
+#define GENL_mc_group(group) CONCATENATE(GENL_MAGIC_FAMILY, _group_ ## group),
#include GENL_MAGIC_INCLUDE_FILE
};
#undef GENL_mc_group
#define GENL_mc_group(group) \
-static int CONCAT_(GENL_MAGIC_FAMILY, _genl_multicast_ ## group)( \
+static int CONCATENATE(GENL_MAGIC_FAMILY, _genl_multicast_ ## group)( \
struct sk_buff *skb, gfp_t flags) \
{ \
unsigned int group_id = \
- CONCAT_(GENL_MAGIC_FAMILY, _group_ ## group); \
+ CONCATENATE(GENL_MAGIC_FAMILY, _group_ ## group); \
return genlmsg_multicast(&ZZZ_genl_family, skb, 0, \
group_id, flags); \
}
@@ -289,21 +290,22 @@ static struct genl_family ZZZ_genl_family __ro_after_init = {
#ifdef GENL_MAGIC_FAMILY_HDRSZ
.hdrsize = NLA_ALIGN(GENL_MAGIC_FAMILY_HDRSZ),
#endif
- .maxattr = ARRAY_SIZE(CONCAT_(GENL_MAGIC_FAMILY, _tla_nl_policy))-1,
- .policy = CONCAT_(GENL_MAGIC_FAMILY, _tla_nl_policy),
+ .maxattr = ARRAY_SIZE(CONCATENATE(GENL_MAGIC_FAMILY, _tla_nl_policy))-1,
+ .policy = CONCATENATE(GENL_MAGIC_FAMILY, _tla_nl_policy),
.ops = ZZZ_genl_ops,
.n_ops = ARRAY_SIZE(ZZZ_genl_ops),
.mcgrps = ZZZ_genl_mcgrps,
+ .resv_start_op = 42, /* drbd is currently the only user */
.n_mcgrps = ARRAY_SIZE(ZZZ_genl_mcgrps),
.module = THIS_MODULE,
};
-int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void)
+int CONCATENATE(GENL_MAGIC_FAMILY, _genl_register)(void)
{
return genl_register_family(&ZZZ_genl_family);
}
-void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void)
+void CONCATENATE(GENL_MAGIC_FAMILY, _genl_unregister)(void)
{
genl_unregister_family(&ZZZ_genl_family);
}
diff --git a/include/linux/genl_magic_struct.h b/include/linux/genl_magic_struct.h
index f81d48987528..621b87a87d74 100644
--- a/include/linux/genl_magic_struct.h
+++ b/include/linux/genl_magic_struct.h
@@ -14,14 +14,12 @@
# error "you need to define GENL_MAGIC_INCLUDE_FILE before inclusion"
#endif
-#include <linux/genetlink.h>
+#include <linux/args.h>
#include <linux/types.h>
+#include <net/genetlink.h>
-#define CONCAT__(a,b) a ## b
-#define CONCAT_(a,b) CONCAT__(a,b)
-
-extern int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void);
-extern void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void);
+extern int CONCATENATE(GENL_MAGIC_FAMILY, _genl_register)(void);
+extern void CONCATENATE(GENL_MAGIC_FAMILY, _genl_unregister)(void);
/*
* Extension of genl attribute validation policies {{{2
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 11da8af06704..b155929af5b1 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -2,327 +2,16 @@
#ifndef __LINUX_GFP_H
#define __LINUX_GFP_H
-#include <linux/mmdebug.h>
+#include <linux/gfp_types.h>
+
#include <linux/mmzone.h>
-#include <linux/stddef.h>
-#include <linux/linkage.h>
#include <linux/topology.h>
-
-/* The typedef is in types.h but we want the documentation here */
-#if 0
-/**
- * typedef gfp_t - Memory allocation flags.
- *
- * GFP flags are commonly used throughout Linux to indicate how memory
- * should be allocated. The GFP acronym stands for get_free_pages(),
- * the underlying memory allocation function. Not every GFP flag is
- * supported by every function which may allocate memory. Most users
- * will want to use a plain ``GFP_KERNEL``.
- */
-typedef unsigned int __bitwise gfp_t;
-#endif
+#include <linux/alloc_tag.h>
+#include <linux/cleanup.h>
+#include <linux/sched.h>
struct vm_area_struct;
-
-/*
- * In case of changes, please don't forget to update
- * include/trace/events/mmflags.h and tools/perf/builtin-kmem.c
- */
-
-/* Plain integer GFP bitmasks. Do not use this directly. */
-#define ___GFP_DMA 0x01u
-#define ___GFP_HIGHMEM 0x02u
-#define ___GFP_DMA32 0x04u
-#define ___GFP_MOVABLE 0x08u
-#define ___GFP_RECLAIMABLE 0x10u
-#define ___GFP_HIGH 0x20u
-#define ___GFP_IO 0x40u
-#define ___GFP_FS 0x80u
-#define ___GFP_ZERO 0x100u
-#define ___GFP_ATOMIC 0x200u
-#define ___GFP_DIRECT_RECLAIM 0x400u
-#define ___GFP_KSWAPD_RECLAIM 0x800u
-#define ___GFP_WRITE 0x1000u
-#define ___GFP_NOWARN 0x2000u
-#define ___GFP_RETRY_MAYFAIL 0x4000u
-#define ___GFP_NOFAIL 0x8000u
-#define ___GFP_NORETRY 0x10000u
-#define ___GFP_MEMALLOC 0x20000u
-#define ___GFP_COMP 0x40000u
-#define ___GFP_NOMEMALLOC 0x80000u
-#define ___GFP_HARDWALL 0x100000u
-#define ___GFP_THISNODE 0x200000u
-#define ___GFP_ACCOUNT 0x400000u
-#ifdef CONFIG_LOCKDEP
-#define ___GFP_NOLOCKDEP 0x800000u
-#else
-#define ___GFP_NOLOCKDEP 0
-#endif
-/* If the above are modified, __GFP_BITS_SHIFT may need updating */
-
-/*
- * Physical address zone modifiers (see linux/mmzone.h - low four bits)
- *
- * Do not put any conditional on these. If necessary modify the definitions
- * without the underscores and use them consistently. The definitions here may
- * be used in bit comparisons.
- */
-#define __GFP_DMA ((__force gfp_t)___GFP_DMA)
-#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
-#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
-#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
-#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
-
-/**
- * DOC: Page mobility and placement hints
- *
- * Page mobility and placement hints
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * These flags provide hints about how mobile the page is. Pages with similar
- * mobility are placed within the same pageblocks to minimise problems due
- * to external fragmentation.
- *
- * %__GFP_MOVABLE (also a zone modifier) indicates that the page can be
- * moved by page migration during memory compaction or can be reclaimed.
- *
- * %__GFP_RECLAIMABLE is used for slab allocations that specify
- * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers.
- *
- * %__GFP_WRITE indicates the caller intends to dirty the page. Where possible,
- * these pages will be spread between local zones to avoid all the dirty
- * pages being in one zone (fair zone allocation policy).
- *
- * %__GFP_HARDWALL enforces the cpuset memory allocation policy.
- *
- * %__GFP_THISNODE forces the allocation to be satisfied from the requested
- * node with no fallbacks or placement policy enforcements.
- *
- * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg.
- */
-#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
-#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
-#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL)
-#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
-#define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT)
-
-/**
- * DOC: Watermark modifiers
- *
- * Watermark modifiers -- controls access to emergency reserves
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * %__GFP_HIGH indicates that the caller is high-priority and that granting
- * the request is necessary before the system can make forward progress.
- * For example, creating an IO context to clean pages.
- *
- * %__GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is
- * high priority. Users are typically interrupt handlers. This may be
- * used in conjunction with %__GFP_HIGH
- *
- * %__GFP_MEMALLOC allows access to all memory. This should only be used when
- * the caller guarantees the allocation will allow more memory to be freed
- * very shortly e.g. process exiting or swapping. Users either should
- * be the MM or co-ordinating closely with the VM (e.g. swap over NFS).
- * Users of this flag have to be extremely careful to not deplete the reserve
- * completely and implement a throttling mechanism which controls the
- * consumption of the reserve based on the amount of freed memory.
- * Usage of a pre-allocated pool (e.g. mempool) should be always considered
- * before using this flag.
- *
- * %__GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves.
- * This takes precedence over the %__GFP_MEMALLOC flag if both are set.
- */
-#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC)
-#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
-#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)
-#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC)
-
-/**
- * DOC: Reclaim modifiers
- *
- * Reclaim modifiers
- * ~~~~~~~~~~~~~~~~~
- * Please note that all the following flags are only applicable to sleepable
- * allocations (e.g. %GFP_NOWAIT and %GFP_ATOMIC will ignore them).
- *
- * %__GFP_IO can start physical IO.
- *
- * %__GFP_FS can call down to the low-level FS. Clearing the flag avoids the
- * allocator recursing into the filesystem which might already be holding
- * locks.
- *
- * %__GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim.
- * This flag can be cleared to avoid unnecessary delays when a fallback
- * option is available.
- *
- * %__GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when
- * the low watermark is reached and have it reclaim pages until the high
- * watermark is reached. A caller may wish to clear this flag when fallback
- * options are available and the reclaim is likely to disrupt the system. The
- * canonical example is THP allocation where a fallback is cheap but
- * reclaim/compaction may cause indirect stalls.
- *
- * %__GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim.
- *
- * The default allocator behavior depends on the request size. We have a concept
- * of so called costly allocations (with order > %PAGE_ALLOC_COSTLY_ORDER).
- * !costly allocations are too essential to fail so they are implicitly
- * non-failing by default (with some exceptions like OOM victims might fail so
- * the caller still has to check for failures) while costly requests try to be
- * not disruptive and back off even without invoking the OOM killer.
- * The following three modifiers might be used to override some of these
- * implicit rules
- *
- * %__GFP_NORETRY: The VM implementation will try only very lightweight
- * memory direct reclaim to get some memory under memory pressure (thus
- * it can sleep). It will avoid disruptive actions like OOM killer. The
- * caller must handle the failure which is quite likely to happen under
- * heavy memory pressure. The flag is suitable when failure can easily be
- * handled at small cost, such as reduced throughput
- *
- * %__GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim
- * procedures that have previously failed if there is some indication
- * that progress has been made else where. It can wait for other
- * tasks to attempt high level approaches to freeing memory such as
- * compaction (which removes fragmentation) and page-out.
- * There is still a definite limit to the number of retries, but it is
- * a larger limit than with %__GFP_NORETRY.
- * Allocations with this flag may fail, but only when there is
- * genuinely little unused memory. While these allocations do not
- * directly trigger the OOM killer, their failure indicates that
- * the system is likely to need to use the OOM killer soon. The
- * caller must handle failure, but can reasonably do so by failing
- * a higher-level request, or completing it only in a much less
- * efficient manner.
- * If the allocation does fail, and the caller is in a position to
- * free some non-essential memory, doing so could benefit the system
- * as a whole.
- *
- * %__GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
- * cannot handle allocation failures. The allocation could block
- * indefinitely but will never return with failure. Testing for
- * failure is pointless.
- * New users should be evaluated carefully (and the flag should be
- * used only when there is no reasonable failure policy) but it is
- * definitely preferable to use the flag rather than opencode endless
- * loop around allocator.
- * Using this flag for costly allocations is _highly_ discouraged.
- */
-#define __GFP_IO ((__force gfp_t)___GFP_IO)
-#define __GFP_FS ((__force gfp_t)___GFP_FS)
-#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */
-#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */
-#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
-#define __GFP_RETRY_MAYFAIL ((__force gfp_t)___GFP_RETRY_MAYFAIL)
-#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL)
-#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY)
-
-/**
- * DOC: Action modifiers
- *
- * Action modifiers
- * ~~~~~~~~~~~~~~~~
- *
- * %__GFP_NOWARN suppresses allocation failure reports.
- *
- * %__GFP_COMP address compound page metadata.
- *
- * %__GFP_ZERO returns a zeroed page on success.
- */
-#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
-#define __GFP_COMP ((__force gfp_t)___GFP_COMP)
-#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
-
-/* Disable lockdep for GFP context tracking */
-#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
-
-/* Room for N __GFP_FOO bits */
-#define __GFP_BITS_SHIFT (23 + IS_ENABLED(CONFIG_LOCKDEP))
-#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
-
-/**
- * DOC: Useful GFP flag combinations
- *
- * Useful GFP flag combinations
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * Useful GFP flag combinations that are commonly used. It is recommended
- * that subsystems start with one of these combinations and then set/clear
- * %__GFP_FOO flags as necessary.
- *
- * %GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower
- * watermark is applied to allow access to "atomic reserves".
- * The current implementation doesn't support NMI and few other strict
- * non-preemptive contexts (e.g. raw_spin_lock). The same applies to %GFP_NOWAIT.
- *
- * %GFP_KERNEL is typical for kernel-internal allocations. The caller requires
- * %ZONE_NORMAL or a lower zone for direct access but can direct reclaim.
- *
- * %GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is
- * accounted to kmemcg.
- *
- * %GFP_NOWAIT is for kernel allocations that should not stall for direct
- * reclaim, start physical IO or use any filesystem callback.
- *
- * %GFP_NOIO will use direct reclaim to discard clean pages or slab pages
- * that do not require the starting of any physical IO.
- * Please try to avoid using this flag directly and instead use
- * memalloc_noio_{save,restore} to mark the whole scope which cannot
- * perform any IO with a short explanation why. All allocation requests
- * will inherit GFP_NOIO implicitly.
- *
- * %GFP_NOFS will use direct reclaim but will not use any filesystem interfaces.
- * Please try to avoid using this flag directly and instead use
- * memalloc_nofs_{save,restore} to mark the whole scope which cannot/shouldn't
- * recurse into the FS layer with a short explanation why. All allocation
- * requests will inherit GFP_NOFS implicitly.
- *
- * %GFP_USER is for userspace allocations that also need to be directly
- * accessibly by the kernel or hardware. It is typically used by hardware
- * for buffers that are mapped to userspace (e.g. graphics) that hardware
- * still must DMA to. cpuset limits are enforced for these allocations.
- *
- * %GFP_DMA exists for historical reasons and should be avoided where possible.
- * The flags indicates that the caller requires that the lowest zone be
- * used (%ZONE_DMA or 16M on x86-64). Ideally, this would be removed but
- * it would require careful auditing as some users really require it and
- * others use the flag to avoid lowmem reserves in %ZONE_DMA and treat the
- * lowest zone as a type of emergency reserve.
- *
- * %GFP_DMA32 is similar to %GFP_DMA except that the caller requires a 32-bit
- * address.
- *
- * %GFP_HIGHUSER is for userspace allocations that may be mapped to userspace,
- * do not need to be directly accessible by the kernel but that cannot
- * move once in use. An example may be a hardware allocation that maps
- * data directly into userspace but has no addressing limitations.
- *
- * %GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not
- * need direct access to but can use kmap() when access is required. They
- * are expected to be movable via page reclaim or page migration. Typically,
- * pages on the LRU would also be allocated with %GFP_HIGHUSER_MOVABLE.
- *
- * %GFP_TRANSHUGE and %GFP_TRANSHUGE_LIGHT are used for THP allocations. They
- * are compound allocations that will generally fail quickly if memory is not
- * available and will not wake kswapd/kcompactd on failure. The _LIGHT
- * version does not attempt reclaim/compaction at all and is by default used
- * in page fault path, while the non-light is used by khugepaged.
- */
-#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
-#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
-#define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT)
-#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
-#define GFP_NOIO (__GFP_RECLAIM)
-#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO)
-#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
-#define GFP_DMA __GFP_DMA
-#define GFP_DMA32 __GFP_DMA32
-#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
-#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE)
-#define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
- __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM)
-#define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM)
+struct mempolicy;
/* Convert GFP flags to their corresponding migrate type */
#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
@@ -333,12 +22,15 @@ static inline int gfp_migratetype(const gfp_t gfp_flags)
VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE);
BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE);
+ BUILD_BUG_ON((___GFP_RECLAIMABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_RECLAIMABLE);
+ BUILD_BUG_ON(((___GFP_MOVABLE | ___GFP_RECLAIMABLE) >>
+ GFP_MOVABLE_SHIFT) != MIGRATE_HIGHATOMIC);
if (unlikely(page_group_by_mobility_disabled))
return MIGRATE_UNMOVABLE;
/* Group based on mobility */
- return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
+ return (__force unsigned long)(gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
}
#undef GFP_MOVABLE_MASK
#undef GFP_MOVABLE_SHIFT
@@ -348,27 +40,23 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
}
-/**
- * gfpflags_normal_context - is gfp_flags a normal sleepable context?
- * @gfp_flags: gfp_flags to test
- *
- * Test whether @gfp_flags indicates that the allocation is from the
- * %current context and allowed to sleep.
- *
- * An allocation being allowed to block doesn't mean it owns the %current
- * context. When direct reclaim path tries to allocate memory, the
- * allocation context is nested inside whatever %current was doing at the
- * time of the original allocation. The nested allocation may be allowed
- * to block but modifying anything %current owns can corrupt the outer
- * context's expectations.
- *
- * %true result from this function indicates that the allocation context
- * can sleep and use anything that's associated with %current.
- */
-static inline bool gfpflags_normal_context(const gfp_t gfp_flags)
+static inline bool gfpflags_allow_spinning(const gfp_t gfp_flags)
{
- return (gfp_flags & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC)) ==
- __GFP_DIRECT_RECLAIM;
+ /*
+ * !__GFP_DIRECT_RECLAIM -> direct claim is not allowed.
+ * !__GFP_KSWAPD_RECLAIM -> it's not safe to wake up kswapd.
+ * All GFP_* flags including GFP_NOWAIT use one or both flags.
+ * alloc_pages_nolock() is the only API that doesn't specify either flag.
+ *
+ * This is stronger than GFP_NOWAIT or GFP_ATOMIC because
+ * those are guaranteed to never block on a sleeping lock.
+ * Here we are enforcing that the allocation doesn't ever spin
+ * on any locks (i.e. only trylocks). There is no high level
+ * GFP_$FOO flag for this use in alloc_pages_nolock() as the
+ * regular page allocator doesn't fully support this
+ * allocation mode.
+ */
+ return !!(gfp_flags & __GFP_RECLAIM);
}
#ifdef CONFIG_HIGHMEM
@@ -489,13 +177,38 @@ static inline int gfp_zonelist(gfp_t flags)
}
/*
+ * gfp flag masking for nested internal allocations.
+ *
+ * For code that needs to do allocations inside the public allocation API (e.g.
+ * memory allocation tracking code) the allocations need to obey the caller
+ * allocation context constrains to prevent allocation context mismatches (e.g.
+ * GFP_KERNEL allocations in GFP_NOFS contexts) from potential deadlock
+ * situations.
+ *
+ * It is also assumed that these nested allocations are for internal kernel
+ * object storage purposes only and are not going to be used for DMA, etc. Hence
+ * we strip out all the zone information and leave just the context information
+ * intact.
+ *
+ * Further, internal allocations must fail before the higher level allocation
+ * can fail, so we must make them fail faster and fail silently. We also don't
+ * want them to deplete emergency reserves. Hence nested allocations must be
+ * prepared for these allocations to fail.
+ */
+static inline gfp_t gfp_nested_mask(gfp_t flags)
+{
+ return ((flags & (GFP_KERNEL | GFP_ATOMIC | __GFP_NOLOCKDEP)) |
+ (__GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN));
+}
+
+/*
* We get the zone list from the current node and the gfp_mask.
* This zone list contains a maximum of MAX_NUMNODES*MAX_NR_ZONES zones.
* There are two zonelists per node, one for all zones with memory and
* one containing just zones from the node the zonelist belongs to.
*
- * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets
- * optimized to &contig_page_data at compile-time.
+ * For the case of non-NUMA systems the NODE_DATA() gets optimized to
+ * &contig_page_data at compile-time.
*/
static inline struct zonelist *node_zonelist(int nid, gfp_t flags)
{
@@ -508,32 +221,55 @@ static inline void arch_free_page(struct page *page, int order) { }
#ifndef HAVE_ARCH_ALLOC_PAGE
static inline void arch_alloc_page(struct page *page, int order) { }
#endif
-#ifndef HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
-static inline int arch_make_page_accessible(struct page *page)
-{
- return 0;
-}
-#endif
-struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
+struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, int preferred_nid,
+ nodemask_t *nodemask);
+#define __alloc_pages(...) alloc_hooks(__alloc_pages_noprof(__VA_ARGS__))
+
+struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid,
nodemask_t *nodemask);
+#define __folio_alloc(...) alloc_hooks(__folio_alloc_noprof(__VA_ARGS__))
-unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
+unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
nodemask_t *nodemask, int nr_pages,
- struct list_head *page_list,
struct page **page_array);
+#define __alloc_pages_bulk(...) alloc_hooks(alloc_pages_bulk_noprof(__VA_ARGS__))
+
+unsigned long alloc_pages_bulk_mempolicy_noprof(gfp_t gfp,
+ unsigned long nr_pages,
+ struct page **page_array);
+#define alloc_pages_bulk_mempolicy(...) \
+ alloc_hooks(alloc_pages_bulk_mempolicy_noprof(__VA_ARGS__))
/* Bulk allocate order-0 pages */
+#define alloc_pages_bulk(_gfp, _nr_pages, _page_array) \
+ __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _page_array)
+
static inline unsigned long
-alloc_pages_bulk_list(gfp_t gfp, unsigned long nr_pages, struct list_head *list)
+alloc_pages_bulk_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages,
+ struct page **page_array)
{
- return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, list, NULL);
+ if (nid == NUMA_NO_NODE)
+ nid = numa_mem_id();
+
+ return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, page_array);
}
-static inline unsigned long
-alloc_pages_bulk_array(gfp_t gfp, unsigned long nr_pages, struct page **page_array)
+#define alloc_pages_bulk_node(...) \
+ alloc_hooks(alloc_pages_bulk_node_noprof(__VA_ARGS__))
+
+static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask)
{
- return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, NULL, page_array);
+ gfp_t warn_gfp = gfp_mask & (__GFP_THISNODE|__GFP_NOWARN);
+
+ if (warn_gfp != (__GFP_THISNODE|__GFP_NOWARN))
+ return;
+
+ if (node_online(this_node))
+ return;
+
+ pr_warn("%pGg allocation from offline node %d\n", &gfp_mask, this_node);
+ dump_stack();
}
/*
@@ -541,88 +277,123 @@ alloc_pages_bulk_array(gfp_t gfp, unsigned long nr_pages, struct page **page_arr
* online. For more general interface, see alloc_pages_node().
*/
static inline struct page *
-__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
+__alloc_pages_node_noprof(int nid, gfp_t gfp_mask, unsigned int order)
{
VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
- VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid));
+ warn_if_node_offline(nid, gfp_mask);
- return __alloc_pages(gfp_mask, order, nid, NULL);
+ return __alloc_pages_noprof(gfp_mask, order, nid, NULL);
}
+#define __alloc_pages_node(...) alloc_hooks(__alloc_pages_node_noprof(__VA_ARGS__))
+
+static inline
+struct folio *__folio_alloc_node_noprof(gfp_t gfp, unsigned int order, int nid)
+{
+ VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
+ warn_if_node_offline(nid, gfp);
+
+ return __folio_alloc_noprof(gfp, order, nid, NULL);
+}
+
+#define __folio_alloc_node(...) alloc_hooks(__folio_alloc_node_noprof(__VA_ARGS__))
+
/*
* Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE,
* prefer the current CPU's closest node. Otherwise node must be valid and
* online.
*/
-static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
- unsigned int order)
+static inline struct page *alloc_pages_node_noprof(int nid, gfp_t gfp_mask,
+ unsigned int order)
{
if (nid == NUMA_NO_NODE)
nid = numa_mem_id();
- return __alloc_pages_node(nid, gfp_mask, order);
+ return __alloc_pages_node_noprof(nid, gfp_mask, order);
}
+#define alloc_pages_node(...) alloc_hooks(alloc_pages_node_noprof(__VA_ARGS__))
+
#ifdef CONFIG_NUMA
-struct page *alloc_pages(gfp_t gfp, unsigned int order);
-extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
- struct vm_area_struct *vma, unsigned long addr,
- int node, bool hugepage);
-#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
- alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
+struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order);
+struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order);
+struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
+ struct mempolicy *mpol, pgoff_t ilx, int nid);
+struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma,
+ unsigned long addr);
#else
-static inline struct page *alloc_pages(gfp_t gfp_mask, unsigned int order)
+static inline struct page *alloc_pages_noprof(gfp_t gfp_mask, unsigned int order)
+{
+ return alloc_pages_node_noprof(numa_node_id(), gfp_mask, order);
+}
+static inline struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order)
+{
+ return __folio_alloc_node_noprof(gfp, order, numa_node_id());
+}
+static inline struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order,
+ struct mempolicy *mpol, pgoff_t ilx, int nid)
{
- return alloc_pages_node(numa_node_id(), gfp_mask, order);
+ return folio_alloc_noprof(gfp, order);
}
-#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
- alloc_pages(gfp_mask, order)
-#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
- alloc_pages(gfp_mask, order)
+#define vma_alloc_folio_noprof(gfp, order, vma, addr) \
+ folio_alloc_noprof(gfp, order)
#endif
+
+#define alloc_pages(...) alloc_hooks(alloc_pages_noprof(__VA_ARGS__))
+#define folio_alloc(...) alloc_hooks(folio_alloc_noprof(__VA_ARGS__))
+#define folio_alloc_mpol(...) alloc_hooks(folio_alloc_mpol_noprof(__VA_ARGS__))
+#define vma_alloc_folio(...) alloc_hooks(vma_alloc_folio_noprof(__VA_ARGS__))
+
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
-#define alloc_page_vma(gfp_mask, vma, addr) \
- alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false)
-extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
-extern unsigned long get_zeroed_page(gfp_t gfp_mask);
+static inline struct page *alloc_page_vma_noprof(gfp_t gfp,
+ struct vm_area_struct *vma, unsigned long addr)
+{
+ struct folio *folio = vma_alloc_folio_noprof(gfp, 0, vma, addr);
+
+ return &folio->page;
+}
+#define alloc_page_vma(...) alloc_hooks(alloc_page_vma_noprof(__VA_ARGS__))
-void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
-void free_pages_exact(void *virt, size_t size);
-void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
+struct page *alloc_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order);
+#define alloc_pages_nolock(...) alloc_hooks(alloc_pages_nolock_noprof(__VA_ARGS__))
-#define __get_free_page(gfp_mask) \
- __get_free_pages((gfp_mask), 0)
+extern unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order);
+#define __get_free_pages(...) alloc_hooks(get_free_pages_noprof(__VA_ARGS__))
-#define __get_dma_pages(gfp_mask, order) \
- __get_free_pages((gfp_mask) | GFP_DMA, (order))
+extern unsigned long get_zeroed_page_noprof(gfp_t gfp_mask);
+#define get_zeroed_page(...) alloc_hooks(get_zeroed_page_noprof(__VA_ARGS__))
-extern void __free_pages(struct page *page, unsigned int order);
-extern void free_pages(unsigned long addr, unsigned int order);
+void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask) __alloc_size(1);
+#define alloc_pages_exact(...) alloc_hooks(alloc_pages_exact_noprof(__VA_ARGS__))
-struct page_frag_cache;
-extern void __page_frag_cache_drain(struct page *page, unsigned int count);
-extern void *page_frag_alloc_align(struct page_frag_cache *nc,
- unsigned int fragsz, gfp_t gfp_mask,
- unsigned int align_mask);
+void free_pages_exact(void *virt, size_t size);
-static inline void *page_frag_alloc(struct page_frag_cache *nc,
- unsigned int fragsz, gfp_t gfp_mask)
-{
- return page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u);
-}
+__meminit void *alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask) __alloc_size(2);
+#define alloc_pages_exact_nid(...) \
+ alloc_hooks(alloc_pages_exact_nid_noprof(__VA_ARGS__))
+
+#define __get_free_page(gfp_mask) \
+ __get_free_pages((gfp_mask), 0)
-extern void page_frag_free(void *addr);
+#define __get_dma_pages(gfp_mask, order) \
+ __get_free_pages((gfp_mask) | GFP_DMA, (order))
+
+extern void __free_pages(struct page *page, unsigned int order);
+extern void free_pages_nolock(struct page *page, unsigned int order);
+extern void free_pages(unsigned long addr, unsigned int order);
#define __free_page(page) __free_pages((page), 0)
#define free_page(addr) free_pages((addr), 0)
-void page_alloc_init(void);
+void page_alloc_init_cpuhp(void);
+bool decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp);
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
void drain_all_pages(struct zone *zone);
void drain_local_pages(struct zone *zone);
void page_alloc_init_late(void);
+void setup_pcp_cacheinfo(unsigned int cpu);
/*
* gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what
@@ -636,32 +407,63 @@ extern gfp_t gfp_allowed_mask;
/* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */
bool gfp_pfmemalloc_allowed(gfp_t gfp_mask);
-extern void pm_restrict_gfp_mask(void);
-extern void pm_restore_gfp_mask(void);
-
-extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma);
+static inline bool gfp_has_io_fs(gfp_t gfp)
+{
+ return (gfp & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS);
+}
-#ifdef CONFIG_PM_SLEEP
-extern bool pm_suspended_storage(void);
-#else
-static inline bool pm_suspended_storage(void)
+/*
+ * Check if the gfp flags allow compaction - GFP_NOIO is a really
+ * tricky context because the migration might require IO.
+ */
+static inline bool gfp_compaction_allowed(gfp_t gfp_mask)
{
- return false;
+ return IS_ENABLED(CONFIG_COMPACTION) && (gfp_mask & __GFP_IO);
}
-#endif /* CONFIG_PM_SLEEP */
+
+extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma);
#ifdef CONFIG_CONTIG_ALLOC
+
+typedef unsigned int __bitwise acr_flags_t;
+#define ACR_FLAGS_NONE ((__force acr_flags_t)0) // ordinary allocation request
+#define ACR_FLAGS_CMA ((__force acr_flags_t)BIT(0)) // allocate for CMA
+
/* The below functions must be run on a range from a single zone. */
-extern int alloc_contig_range(unsigned long start, unsigned long end,
- unsigned migratetype, gfp_t gfp_mask);
-extern struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
- int nid, nodemask_t *nodemask);
+extern int alloc_contig_range_noprof(unsigned long start, unsigned long end,
+ acr_flags_t alloc_flags, gfp_t gfp_mask);
+#define alloc_contig_range(...) alloc_hooks(alloc_contig_range_noprof(__VA_ARGS__))
+
+extern struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask,
+ int nid, nodemask_t *nodemask);
+#define alloc_contig_pages(...) alloc_hooks(alloc_contig_pages_noprof(__VA_ARGS__))
+
#endif
void free_contig_range(unsigned long pfn, unsigned long nr_pages);
-#ifdef CONFIG_CMA
-/* CMA stuff */
-extern void init_cma_reserved_pageblock(struct page *page);
+#ifdef CONFIG_CONTIG_ALLOC
+static inline struct folio *folio_alloc_gigantic_noprof(int order, gfp_t gfp,
+ int nid, nodemask_t *node)
+{
+ struct page *page;
+
+ if (WARN_ON(!order || !(gfp & __GFP_COMP)))
+ return NULL;
+
+ page = alloc_contig_pages_noprof(1 << order, gfp, nid, node);
+
+ return page ? page_folio(page) : NULL;
+}
+#else
+static inline struct folio *folio_alloc_gigantic_noprof(int order, gfp_t gfp,
+ int nid, nodemask_t *node)
+{
+ return NULL;
+}
#endif
+/* This should be paired with folio_put() rather than free_contig_range(). */
+#define folio_alloc_gigantic(...) alloc_hooks(folio_alloc_gigantic_noprof(__VA_ARGS__))
+
+DEFINE_FREE(free_page, void *, free_page((unsigned long)_T))
#endif /* __LINUX_GFP_H */
diff --git a/include/linux/gfp_api.h b/include/linux/gfp_api.h
new file mode 100644
index 000000000000..5a05a2764a86
--- /dev/null
+++ b/include/linux/gfp_api.h
@@ -0,0 +1 @@
+#include <linux/gfp.h>
diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h
new file mode 100644
index 000000000000..3de43b12209e
--- /dev/null
+++ b/include/linux/gfp_types.h
@@ -0,0 +1,386 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_GFP_TYPES_H
+#define __LINUX_GFP_TYPES_H
+
+#include <linux/bits.h>
+
+/* The typedef is in types.h but we want the documentation here */
+#if 0
+/**
+ * typedef gfp_t - Memory allocation flags.
+ *
+ * GFP flags are commonly used throughout Linux to indicate how memory
+ * should be allocated. The GFP acronym stands for get_free_pages(),
+ * the underlying memory allocation function. Not every GFP flag is
+ * supported by every function which may allocate memory. Most users
+ * will want to use a plain ``GFP_KERNEL``.
+ */
+typedef unsigned int __bitwise gfp_t;
+#endif
+
+/*
+ * In case of changes, please don't forget to update
+ * include/trace/events/mmflags.h and tools/perf/builtin-kmem.c
+ */
+
+enum {
+ ___GFP_DMA_BIT,
+ ___GFP_HIGHMEM_BIT,
+ ___GFP_DMA32_BIT,
+ ___GFP_MOVABLE_BIT,
+ ___GFP_RECLAIMABLE_BIT,
+ ___GFP_HIGH_BIT,
+ ___GFP_IO_BIT,
+ ___GFP_FS_BIT,
+ ___GFP_ZERO_BIT,
+ ___GFP_UNUSED_BIT, /* 0x200u unused */
+ ___GFP_DIRECT_RECLAIM_BIT,
+ ___GFP_KSWAPD_RECLAIM_BIT,
+ ___GFP_WRITE_BIT,
+ ___GFP_NOWARN_BIT,
+ ___GFP_RETRY_MAYFAIL_BIT,
+ ___GFP_NOFAIL_BIT,
+ ___GFP_NORETRY_BIT,
+ ___GFP_MEMALLOC_BIT,
+ ___GFP_COMP_BIT,
+ ___GFP_NOMEMALLOC_BIT,
+ ___GFP_HARDWALL_BIT,
+ ___GFP_THISNODE_BIT,
+ ___GFP_ACCOUNT_BIT,
+ ___GFP_ZEROTAGS_BIT,
+#ifdef CONFIG_KASAN_HW_TAGS
+ ___GFP_SKIP_ZERO_BIT,
+ ___GFP_SKIP_KASAN_BIT,
+#endif
+#ifdef CONFIG_LOCKDEP
+ ___GFP_NOLOCKDEP_BIT,
+#endif
+ ___GFP_NO_OBJ_EXT_BIT,
+ ___GFP_LAST_BIT
+};
+
+/* Plain integer GFP bitmasks. Do not use this directly. */
+#define ___GFP_DMA BIT(___GFP_DMA_BIT)
+#define ___GFP_HIGHMEM BIT(___GFP_HIGHMEM_BIT)
+#define ___GFP_DMA32 BIT(___GFP_DMA32_BIT)
+#define ___GFP_MOVABLE BIT(___GFP_MOVABLE_BIT)
+#define ___GFP_RECLAIMABLE BIT(___GFP_RECLAIMABLE_BIT)
+#define ___GFP_HIGH BIT(___GFP_HIGH_BIT)
+#define ___GFP_IO BIT(___GFP_IO_BIT)
+#define ___GFP_FS BIT(___GFP_FS_BIT)
+#define ___GFP_ZERO BIT(___GFP_ZERO_BIT)
+/* 0x200u unused */
+#define ___GFP_DIRECT_RECLAIM BIT(___GFP_DIRECT_RECLAIM_BIT)
+#define ___GFP_KSWAPD_RECLAIM BIT(___GFP_KSWAPD_RECLAIM_BIT)
+#define ___GFP_WRITE BIT(___GFP_WRITE_BIT)
+#define ___GFP_NOWARN BIT(___GFP_NOWARN_BIT)
+#define ___GFP_RETRY_MAYFAIL BIT(___GFP_RETRY_MAYFAIL_BIT)
+#define ___GFP_NOFAIL BIT(___GFP_NOFAIL_BIT)
+#define ___GFP_NORETRY BIT(___GFP_NORETRY_BIT)
+#define ___GFP_MEMALLOC BIT(___GFP_MEMALLOC_BIT)
+#define ___GFP_COMP BIT(___GFP_COMP_BIT)
+#define ___GFP_NOMEMALLOC BIT(___GFP_NOMEMALLOC_BIT)
+#define ___GFP_HARDWALL BIT(___GFP_HARDWALL_BIT)
+#define ___GFP_THISNODE BIT(___GFP_THISNODE_BIT)
+#define ___GFP_ACCOUNT BIT(___GFP_ACCOUNT_BIT)
+#define ___GFP_ZEROTAGS BIT(___GFP_ZEROTAGS_BIT)
+#ifdef CONFIG_KASAN_HW_TAGS
+#define ___GFP_SKIP_ZERO BIT(___GFP_SKIP_ZERO_BIT)
+#define ___GFP_SKIP_KASAN BIT(___GFP_SKIP_KASAN_BIT)
+#else
+#define ___GFP_SKIP_ZERO 0
+#define ___GFP_SKIP_KASAN 0
+#endif
+#ifdef CONFIG_LOCKDEP
+#define ___GFP_NOLOCKDEP BIT(___GFP_NOLOCKDEP_BIT)
+#else
+#define ___GFP_NOLOCKDEP 0
+#endif
+#define ___GFP_NO_OBJ_EXT BIT(___GFP_NO_OBJ_EXT_BIT)
+
+/*
+ * Physical address zone modifiers (see linux/mmzone.h - low four bits)
+ *
+ * Do not put any conditional on these. If necessary modify the definitions
+ * without the underscores and use them consistently. The definitions here may
+ * be used in bit comparisons.
+ */
+#define __GFP_DMA ((__force gfp_t)___GFP_DMA)
+#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
+#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
+#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
+#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
+
+/**
+ * DOC: Page mobility and placement hints
+ *
+ * Page mobility and placement hints
+ * ---------------------------------
+ *
+ * These flags provide hints about how mobile the page is. Pages with similar
+ * mobility are placed within the same pageblocks to minimise problems due
+ * to external fragmentation.
+ *
+ * %__GFP_MOVABLE (also a zone modifier) indicates that the page can be
+ * moved by page migration during memory compaction or can be reclaimed.
+ *
+ * %__GFP_RECLAIMABLE is used for slab allocations that specify
+ * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers.
+ *
+ * %__GFP_WRITE indicates the caller intends to dirty the page. Where possible,
+ * these pages will be spread between local zones to avoid all the dirty
+ * pages being in one zone (fair zone allocation policy).
+ *
+ * %__GFP_HARDWALL enforces the cpuset memory allocation policy.
+ *
+ * %__GFP_THISNODE forces the allocation to be satisfied from the requested
+ * node with no fallbacks or placement policy enforcements.
+ *
+ * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg.
+ *
+ * %__GFP_NO_OBJ_EXT causes slab allocation to have no object extension.
+ */
+#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
+#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
+#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL)
+#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
+#define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT)
+#define __GFP_NO_OBJ_EXT ((__force gfp_t)___GFP_NO_OBJ_EXT)
+
+/**
+ * DOC: Watermark modifiers
+ *
+ * Watermark modifiers -- controls access to emergency reserves
+ * ------------------------------------------------------------
+ *
+ * %__GFP_HIGH indicates that the caller is high-priority and that granting
+ * the request is necessary before the system can make forward progress.
+ * For example creating an IO context to clean pages and requests
+ * from atomic context.
+ *
+ * %__GFP_MEMALLOC allows access to all memory. This should only be used when
+ * the caller guarantees the allocation will allow more memory to be freed
+ * very shortly e.g. process exiting or swapping. Users either should
+ * be the MM or co-ordinating closely with the VM (e.g. swap over NFS).
+ * Users of this flag have to be extremely careful to not deplete the reserve
+ * completely and implement a throttling mechanism which controls the
+ * consumption of the reserve based on the amount of freed memory.
+ * Usage of a pre-allocated pool (e.g. mempool) should be always considered
+ * before using this flag.
+ *
+ * %__GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves.
+ * This takes precedence over the %__GFP_MEMALLOC flag if both are set.
+ */
+#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
+#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)
+#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC)
+
+/**
+ * DOC: Reclaim modifiers
+ *
+ * Reclaim modifiers
+ * -----------------
+ * Please note that all the following flags are only applicable to sleepable
+ * allocations (e.g. %GFP_NOWAIT and %GFP_ATOMIC will ignore them).
+ *
+ * %__GFP_IO can start physical IO.
+ *
+ * %__GFP_FS can call down to the low-level FS. Clearing the flag avoids the
+ * allocator recursing into the filesystem which might already be holding
+ * locks.
+ *
+ * %__GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim.
+ * This flag can be cleared to avoid unnecessary delays when a fallback
+ * option is available.
+ *
+ * %__GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when
+ * the low watermark is reached and have it reclaim pages until the high
+ * watermark is reached. A caller may wish to clear this flag when fallback
+ * options are available and the reclaim is likely to disrupt the system. The
+ * canonical example is THP allocation where a fallback is cheap but
+ * reclaim/compaction may cause indirect stalls.
+ *
+ * %__GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim.
+ *
+ * The default allocator behavior depends on the request size. We have a concept
+ * of so-called costly allocations (with order > %PAGE_ALLOC_COSTLY_ORDER).
+ * !costly allocations are too essential to fail so they are implicitly
+ * non-failing by default (with some exceptions like OOM victims might fail so
+ * the caller still has to check for failures) while costly requests try to be
+ * not disruptive and back off even without invoking the OOM killer.
+ * The following three modifiers might be used to override some of these
+ * implicit rules. Please note that all of them must be used along with
+ * %__GFP_DIRECT_RECLAIM flag.
+ *
+ * %__GFP_NORETRY: The VM implementation will try only very lightweight
+ * memory direct reclaim to get some memory under memory pressure (thus
+ * it can sleep). It will avoid disruptive actions like OOM killer. The
+ * caller must handle the failure which is quite likely to happen under
+ * heavy memory pressure. The flag is suitable when failure can easily be
+ * handled at small cost, such as reduced throughput.
+ *
+ * %__GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim
+ * procedures that have previously failed if there is some indication
+ * that progress has been made elsewhere. It can wait for other
+ * tasks to attempt high-level approaches to freeing memory such as
+ * compaction (which removes fragmentation) and page-out.
+ * There is still a definite limit to the number of retries, but it is
+ * a larger limit than with %__GFP_NORETRY.
+ * Allocations with this flag may fail, but only when there is
+ * genuinely little unused memory. While these allocations do not
+ * directly trigger the OOM killer, their failure indicates that
+ * the system is likely to need to use the OOM killer soon. The
+ * caller must handle failure, but can reasonably do so by failing
+ * a higher-level request, or completing it only in a much less
+ * efficient manner.
+ * If the allocation does fail, and the caller is in a position to
+ * free some non-essential memory, doing so could benefit the system
+ * as a whole.
+ *
+ * %__GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
+ * cannot handle allocation failures. The allocation could block
+ * indefinitely but will never return with failure. Testing for
+ * failure is pointless.
+ * It _must_ be blockable and used together with __GFP_DIRECT_RECLAIM.
+ * It should _never_ be used in non-sleepable contexts.
+ * New users should be evaluated carefully (and the flag should be
+ * used only when there is no reasonable failure policy) but it is
+ * definitely preferable to use the flag rather than opencode endless
+ * loop around allocator.
+ * Allocating pages from the buddy with __GFP_NOFAIL and order > 1 is
+ * not supported. Please consider using kvmalloc() instead.
+ */
+#define __GFP_IO ((__force gfp_t)___GFP_IO)
+#define __GFP_FS ((__force gfp_t)___GFP_FS)
+#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */
+#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */
+#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
+#define __GFP_RETRY_MAYFAIL ((__force gfp_t)___GFP_RETRY_MAYFAIL)
+#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL)
+#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY)
+
+/**
+ * DOC: Action modifiers
+ *
+ * Action modifiers
+ * ----------------
+ *
+ * %__GFP_NOWARN suppresses allocation failure reports.
+ *
+ * %__GFP_COMP address compound page metadata.
+ *
+ * %__GFP_ZERO returns a zeroed page on success.
+ *
+ * %__GFP_ZEROTAGS zeroes memory tags at allocation time if the memory itself
+ * is being zeroed (either via __GFP_ZERO or via init_on_alloc, provided that
+ * __GFP_SKIP_ZERO is not set). This flag is intended for optimization: setting
+ * memory tags at the same time as zeroing memory has minimal additional
+ * performance impact.
+ *
+ * %__GFP_SKIP_KASAN makes KASAN skip unpoisoning on page allocation.
+ * Used for userspace and vmalloc pages; the latter are unpoisoned by
+ * kasan_unpoison_vmalloc instead. For userspace pages, results in
+ * poisoning being skipped as well, see should_skip_kasan_poison for
+ * details. Only effective in HW_TAGS mode.
+ */
+#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
+#define __GFP_COMP ((__force gfp_t)___GFP_COMP)
+#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
+#define __GFP_ZEROTAGS ((__force gfp_t)___GFP_ZEROTAGS)
+#define __GFP_SKIP_ZERO ((__force gfp_t)___GFP_SKIP_ZERO)
+#define __GFP_SKIP_KASAN ((__force gfp_t)___GFP_SKIP_KASAN)
+
+/* Disable lockdep for GFP context tracking */
+#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
+
+/* Room for N __GFP_FOO bits */
+#define __GFP_BITS_SHIFT ___GFP_LAST_BIT
+#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
+
+/**
+ * DOC: Useful GFP flag combinations
+ *
+ * Useful GFP flag combinations
+ * ----------------------------
+ *
+ * Useful GFP flag combinations that are commonly used. It is recommended
+ * that subsystems start with one of these combinations and then set/clear
+ * %__GFP_FOO flags as necessary.
+ *
+ * %GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower
+ * watermark is applied to allow access to "atomic reserves".
+ * The current implementation doesn't support NMI and few other strict
+ * non-preemptive contexts (e.g. raw_spin_lock). The same applies to %GFP_NOWAIT.
+ *
+ * %GFP_KERNEL is typical for kernel-internal allocations. The caller requires
+ * %ZONE_NORMAL or a lower zone for direct access but can direct reclaim.
+ *
+ * %GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is
+ * accounted to kmemcg.
+ *
+ * %GFP_NOWAIT is for kernel allocations that should not stall for direct
+ * reclaim, start physical IO or use any filesystem callback. It is very
+ * likely to fail to allocate memory, even for very small allocations.
+ *
+ * %GFP_NOIO will use direct reclaim to discard clean pages or slab pages
+ * that do not require the starting of any physical IO.
+ * Please try to avoid using this flag directly and instead use
+ * memalloc_noio_{save,restore} to mark the whole scope which cannot
+ * perform any IO with a short explanation why. All allocation requests
+ * will inherit GFP_NOIO implicitly.
+ *
+ * %GFP_NOFS will use direct reclaim but will not use any filesystem interfaces.
+ * Please try to avoid using this flag directly and instead use
+ * memalloc_nofs_{save,restore} to mark the whole scope which cannot/shouldn't
+ * recurse into the FS layer with a short explanation why. All allocation
+ * requests will inherit GFP_NOFS implicitly.
+ *
+ * %GFP_USER is for userspace allocations that also need to be directly
+ * accessibly by the kernel or hardware. It is typically used by hardware
+ * for buffers that are mapped to userspace (e.g. graphics) that hardware
+ * still must DMA to. cpuset limits are enforced for these allocations.
+ *
+ * %GFP_DMA exists for historical reasons and should be avoided where possible.
+ * The flags indicates that the caller requires that the lowest zone be
+ * used (%ZONE_DMA or 16M on x86-64). Ideally, this would be removed but
+ * it would require careful auditing as some users really require it and
+ * others use the flag to avoid lowmem reserves in %ZONE_DMA and treat the
+ * lowest zone as a type of emergency reserve.
+ *
+ * %GFP_DMA32 is similar to %GFP_DMA except that the caller requires a 32-bit
+ * address. Note that kmalloc(..., GFP_DMA32) does not return DMA32 memory
+ * because the DMA32 kmalloc cache array is not implemented.
+ * (Reason: there is no such user in kernel).
+ *
+ * %GFP_HIGHUSER is for userspace allocations that may be mapped to userspace,
+ * do not need to be directly accessible by the kernel but that cannot
+ * move once in use. An example may be a hardware allocation that maps
+ * data directly into userspace but has no addressing limitations.
+ *
+ * %GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not
+ * need direct access to but can use kmap() when access is required. They
+ * are expected to be movable via page reclaim or page migration. Typically,
+ * pages on the LRU would also be allocated with %GFP_HIGHUSER_MOVABLE.
+ *
+ * %GFP_TRANSHUGE and %GFP_TRANSHUGE_LIGHT are used for THP allocations. They
+ * are compound allocations that will generally fail quickly if memory is not
+ * available and will not wake kswapd/kcompactd on failure. The _LIGHT
+ * version does not attempt reclaim/compaction at all and is by default used
+ * in page fault path, while the non-light is used by khugepaged.
+ */
+#define GFP_ATOMIC (__GFP_HIGH|__GFP_KSWAPD_RECLAIM)
+#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
+#define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT)
+#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM | __GFP_NOWARN)
+#define GFP_NOIO (__GFP_RECLAIM)
+#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO)
+#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
+#define GFP_DMA __GFP_DMA
+#define GFP_DMA32 __GFP_DMA32
+#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
+#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE | __GFP_SKIP_KASAN)
+#define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
+ __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM)
+#define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM)
+
+#endif /* __LINUX_GFP_TYPES_H */
diff --git a/include/linux/goldfish.h b/include/linux/goldfish.h
index 12be1601fd84..bcc17f95b906 100644
--- a/include/linux/goldfish.h
+++ b/include/linux/goldfish.h
@@ -8,14 +8,21 @@
/* Helpers for Goldfish virtual platform */
+#ifndef gf_ioread32
+#define gf_ioread32 ioread32
+#endif
+#ifndef gf_iowrite32
+#define gf_iowrite32 iowrite32
+#endif
+
static inline void gf_write_ptr(const void *ptr, void __iomem *portl,
void __iomem *porth)
{
const unsigned long addr = (unsigned long)ptr;
- __raw_writel(lower_32_bits(addr), portl);
+ gf_iowrite32(lower_32_bits(addr), portl);
#ifdef CONFIG_64BIT
- __raw_writel(upper_32_bits(addr), porth);
+ gf_iowrite32(upper_32_bits(addr), porth);
#endif
}
@@ -23,9 +30,9 @@ static inline void gf_write_dma_addr(const dma_addr_t addr,
void __iomem *portl,
void __iomem *porth)
{
- __raw_writel(lower_32_bits(addr), portl);
+ gf_iowrite32(lower_32_bits(addr), portl);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- __raw_writel(upper_32_bits(addr), porth);
+ gf_iowrite32(upper_32_bits(addr), porth);
#endif
}
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 008ad3ee56b7..8f85ddb26429 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * <linux/gpio.h>
+ * NOTE: This header *must not* be included.
*
* This is the LEGACY GPIO bulk include file, including legacy APIs. It is
* used for GPIO drivers still referencing the global GPIO numberspace,
@@ -12,99 +12,90 @@
#ifndef __LINUX_GPIO_H
#define __LINUX_GPIO_H
-#include <linux/errno.h>
-
-/* see Documentation/driver-api/gpio/legacy.rst */
-
-/* make these flag values available regardless of GPIO kconfig options */
-#define GPIOF_DIR_OUT (0 << 0)
-#define GPIOF_DIR_IN (1 << 0)
-
-#define GPIOF_INIT_LOW (0 << 1)
-#define GPIOF_INIT_HIGH (1 << 1)
-
-#define GPIOF_IN (GPIOF_DIR_IN)
-#define GPIOF_OUT_INIT_LOW (GPIOF_DIR_OUT | GPIOF_INIT_LOW)
-#define GPIOF_OUT_INIT_HIGH (GPIOF_DIR_OUT | GPIOF_INIT_HIGH)
-
-/* Gpio pin is active-low */
-#define GPIOF_ACTIVE_LOW (1 << 2)
-
-/* Gpio pin is open drain */
-#define GPIOF_OPEN_DRAIN (1 << 3)
+#include <linux/types.h>
+#ifdef CONFIG_GPIOLIB
+#include <linux/gpio/consumer.h>
+#endif
-/* Gpio pin is open source */
-#define GPIOF_OPEN_SOURCE (1 << 4)
+#ifdef CONFIG_GPIOLIB_LEGACY
-#define GPIOF_EXPORT (1 << 5)
-#define GPIOF_EXPORT_CHANGEABLE (1 << 6)
-#define GPIOF_EXPORT_DIR_FIXED (GPIOF_EXPORT)
-#define GPIOF_EXPORT_DIR_CHANGEABLE (GPIOF_EXPORT | GPIOF_EXPORT_CHANGEABLE)
+struct device;
-/**
- * struct gpio - a structure describing a GPIO with configuration
- * @gpio: the GPIO number
- * @flags: GPIO configuration as specified by GPIOF_*
- * @label: a literal description string of this GPIO
- */
-struct gpio {
- unsigned gpio;
- unsigned long flags;
- const char *label;
-};
+/* make these flag values available regardless of GPIO kconfig options */
+#define GPIOF_IN ((1 << 0))
+#define GPIOF_OUT_INIT_LOW ((0 << 0) | (0 << 1))
+#define GPIOF_OUT_INIT_HIGH ((0 << 0) | (1 << 1))
#ifdef CONFIG_GPIOLIB
+/*
+ * "valid" GPIO numbers are nonnegative and may be passed to
+ * setup routines like gpio_request(). Only some valid numbers
+ * can successfully be requested and used.
+ *
+ * Invalid GPIO numbers are useful for indicating no-such-GPIO in
+ * platform data and other tables.
+ */
+static inline bool gpio_is_valid(int number)
+{
+ /* only non-negative numbers are valid */
+ return number >= 0;
+}
-#ifdef CONFIG_ARCH_HAVE_CUSTOM_GPIO_H
-#include <asm/gpio.h>
-#else
+/*
+ * Platforms may implement their GPIO interface with library code,
+ * at a small performance cost for non-inlined operations and some
+ * extra memory (for code and for per-GPIO table entries).
+ */
-#include <asm-generic/gpio.h>
+/* Always use the library code for GPIO management calls,
+ * or when sleeping may be involved.
+ */
+int gpio_request(unsigned gpio, const char *label);
+void gpio_free(unsigned gpio);
-static inline int gpio_get_value(unsigned int gpio)
+static inline int gpio_direction_input(unsigned gpio)
{
- return __gpio_get_value(gpio);
+ return gpiod_direction_input(gpio_to_desc(gpio));
}
-
-static inline void gpio_set_value(unsigned int gpio, int value)
+static inline int gpio_direction_output(unsigned gpio, int value)
{
- __gpio_set_value(gpio, value);
+ return gpiod_direction_output_raw(gpio_to_desc(gpio), value);
}
-static inline int gpio_cansleep(unsigned int gpio)
+static inline int gpio_get_value_cansleep(unsigned gpio)
{
- return __gpio_cansleep(gpio);
+ return gpiod_get_raw_value_cansleep(gpio_to_desc(gpio));
}
-
-static inline int gpio_to_irq(unsigned int gpio)
+static inline void gpio_set_value_cansleep(unsigned gpio, int value)
{
- return __gpio_to_irq(gpio);
+ gpiod_set_raw_value_cansleep(gpio_to_desc(gpio), value);
}
-static inline int irq_to_gpio(unsigned int irq)
+static inline int gpio_get_value(unsigned gpio)
{
- return -EINVAL;
+ return gpiod_get_raw_value(gpio_to_desc(gpio));
+}
+static inline void gpio_set_value(unsigned gpio, int value)
+{
+ gpiod_set_raw_value(gpio_to_desc(gpio), value);
}
-#endif /* ! CONFIG_ARCH_HAVE_CUSTOM_GPIO_H */
-
-/* CONFIG_GPIOLIB: bindings for managed devices that want to request gpios */
+static inline int gpio_to_irq(unsigned gpio)
+{
+ return gpiod_to_irq(gpio_to_desc(gpio));
+}
-struct device;
+int gpio_request_one(unsigned gpio, unsigned long flags, const char *label);
-int devm_gpio_request(struct device *dev, unsigned gpio, const char *label);
int devm_gpio_request_one(struct device *dev, unsigned gpio,
unsigned long flags, const char *label);
-void devm_gpio_free(struct device *dev, unsigned int gpio);
#else /* ! CONFIG_GPIOLIB */
#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/bug.h>
-struct device;
-struct gpio_chip;
+#include <asm/bug.h>
+#include <asm/errno.h>
static inline bool gpio_is_valid(int number)
{
@@ -122,11 +113,6 @@ static inline int gpio_request_one(unsigned gpio,
return -ENOSYS;
}
-static inline int gpio_request_array(const struct gpio *array, size_t num)
-{
- return -ENOSYS;
-}
-
static inline void gpio_free(unsigned gpio)
{
might_sleep();
@@ -135,14 +121,6 @@ static inline void gpio_free(unsigned gpio)
WARN_ON(1);
}
-static inline void gpio_free_array(const struct gpio *array, size_t num)
-{
- might_sleep();
-
- /* GPIO can never have been requested */
- WARN_ON(1);
-}
-
static inline int gpio_direction_input(unsigned gpio)
{
return -ENOSYS;
@@ -153,11 +131,6 @@ static inline int gpio_direction_output(unsigned gpio, int value)
return -ENOSYS;
}
-static inline int gpio_set_debounce(unsigned gpio, unsigned debounce)
-{
- return -ENOSYS;
-}
-
static inline int gpio_get_value(unsigned gpio)
{
/* GPIO can never have been requested or set as {in,out}put */
@@ -171,13 +144,6 @@ static inline void gpio_set_value(unsigned gpio, int value)
WARN_ON(1);
}
-static inline int gpio_cansleep(unsigned gpio)
-{
- /* GPIO can never have been requested or set as {in,out}put */
- WARN_ON(1);
- return 0;
-}
-
static inline int gpio_get_value_cansleep(unsigned gpio)
{
/* GPIO can never have been requested or set as {in,out}put */
@@ -191,27 +157,6 @@ static inline void gpio_set_value_cansleep(unsigned gpio, int value)
WARN_ON(1);
}
-static inline int gpio_export(unsigned gpio, bool direction_may_change)
-{
- /* GPIO can never have been requested or set as {in,out}put */
- WARN_ON(1);
- return -EINVAL;
-}
-
-static inline int gpio_export_link(struct device *dev, const char *name,
- unsigned gpio)
-{
- /* GPIO can never have been exported */
- WARN_ON(1);
- return -EINVAL;
-}
-
-static inline void gpio_unexport(unsigned gpio)
-{
- /* GPIO can never have been exported */
- WARN_ON(1);
-}
-
static inline int gpio_to_irq(unsigned gpio)
{
/* GPIO can never have been requested or set as input */
@@ -219,20 +164,6 @@ static inline int gpio_to_irq(unsigned gpio)
return -EINVAL;
}
-static inline int irq_to_gpio(unsigned irq)
-{
- /* irq can never have been returned from gpio_to_irq() */
- WARN_ON(1);
- return -EINVAL;
-}
-
-static inline int devm_gpio_request(struct device *dev, unsigned gpio,
- const char *label)
-{
- WARN_ON(1);
- return -EINVAL;
-}
-
static inline int devm_gpio_request_one(struct device *dev, unsigned gpio,
unsigned long flags, const char *label)
{
@@ -240,11 +171,6 @@ static inline int devm_gpio_request_one(struct device *dev, unsigned gpio,
return -EINVAL;
}
-static inline void devm_gpio_free(struct device *dev, unsigned int gpio)
-{
- WARN_ON(1);
-}
-
#endif /* ! CONFIG_GPIOLIB */
-
+#endif /* CONFIG_GPIOLIB_LEGACY */
#endif /* __LINUX_GPIO_H */
diff --git a/include/linux/gpio/aspeed.h b/include/linux/gpio/aspeed.h
index 1bfb3cdc86d0..9a547e66c8c4 100644
--- a/include/linux/gpio/aspeed.h
+++ b/include/linux/gpio/aspeed.h
@@ -1,6 +1,10 @@
#ifndef __GPIO_ASPEED_H
#define __GPIO_ASPEED_H
+#include <linux/types.h>
+
+struct gpio_desc;
+
struct aspeed_gpio_copro_ops {
int (*request_access)(void *data);
int (*release_access)(void *data);
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index c73b25bc9213..cafeb7a40ad1 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -3,32 +3,23 @@
#define __LINUX_GPIO_CONSUMER_H
#include <linux/bits.h>
-#include <linux/bug.h>
-#include <linux/compiler_types.h>
#include <linux/err.h>
+#include <linux/types.h>
+struct acpi_device;
struct device;
+struct fwnode_handle;
-/**
- * Opaque descriptor for a GPIO. These are obtained using gpiod_get() and are
- * preferable to the old integer-based handles.
- *
- * Contrary to integers, a pointer to a gpio_desc is guaranteed to be valid
- * until the GPIO is released.
- */
-struct gpio_desc;
-
-/**
- * Opaque descriptor for a structure of GPIO array attributes. This structure
- * is attached to struct gpiod_descs obtained from gpiod_get_array() and can be
- * passed back to get/set array functions in order to activate fast processing
- * path if applicable.
- */
struct gpio_array;
+struct gpio_desc;
/**
- * Struct containing an array of descriptors that can be obtained using
- * gpiod_get_array().
+ * struct gpio_descs - Struct containing an array of descriptors that can be
+ * obtained using gpiod_get_array()
+ *
+ * @info: Pointer to the opaque gpio_array structure
+ * @ndescs: Number of held descriptors
+ * @desc: Array of pointers to GPIO descriptors
*/
struct gpio_descs {
struct gpio_array *info;
@@ -40,11 +31,20 @@ struct gpio_descs {
#define GPIOD_FLAGS_BIT_DIR_OUT BIT(1)
#define GPIOD_FLAGS_BIT_DIR_VAL BIT(2)
#define GPIOD_FLAGS_BIT_OPEN_DRAIN BIT(3)
+/* GPIOD_FLAGS_BIT_NONEXCLUSIVE is DEPRECATED, don't use in new code. */
#define GPIOD_FLAGS_BIT_NONEXCLUSIVE BIT(4)
/**
- * Optional flags that can be passed to one of gpiod_* to configure direction
- * and output value. These values cannot be OR'd.
+ * enum gpiod_flags - Optional flags that can be passed to one of gpiod_* to
+ * configure direction and output value. These values
+ * cannot be OR'd.
+ *
+ * @GPIOD_ASIS: Don't change anything
+ * @GPIOD_IN: Set lines to input mode
+ * @GPIOD_OUT_LOW: Set lines to output and drive them low
+ * @GPIOD_OUT_HIGH: Set lines to output and drive them high
+ * @GPIOD_OUT_LOW_OPEN_DRAIN: Set lines to open-drain output and drive them low
+ * @GPIOD_OUT_HIGH_OPEN_DRAIN: Set lines to open-drain output and drive them high
*/
enum gpiod_flags {
GPIOD_ASIS = 0,
@@ -119,7 +119,7 @@ int gpiod_get_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
struct gpio_array *array_info,
unsigned long *value_bitmap);
-void gpiod_set_value(struct gpio_desc *desc, int value);
+int gpiod_set_value(struct gpio_desc *desc, int value);
int gpiod_set_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
struct gpio_array *array_info,
@@ -129,7 +129,7 @@ int gpiod_get_raw_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
struct gpio_array *array_info,
unsigned long *value_bitmap);
-void gpiod_set_raw_value(struct gpio_desc *desc, int value);
+int gpiod_set_raw_value(struct gpio_desc *desc, int value);
int gpiod_set_raw_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
struct gpio_array *array_info,
@@ -141,7 +141,7 @@ int gpiod_get_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
struct gpio_array *array_info,
unsigned long *value_bitmap);
-void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
+int gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
int gpiod_set_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
struct gpio_array *array_info,
@@ -151,7 +151,7 @@ int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
struct gpio_array *array_info,
unsigned long *value_bitmap);
-void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value);
+int gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value);
int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
struct gpio_array *array_info,
@@ -159,7 +159,6 @@ int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
int gpiod_set_config(struct gpio_desc *desc, unsigned long config);
int gpiod_set_debounce(struct gpio_desc *desc, unsigned int debounce);
-int gpiod_set_transitory(struct gpio_desc *desc, bool transitory);
void gpiod_toggle_active_low(struct gpio_desc *desc);
int gpiod_is_active_low(const struct gpio_desc *desc);
@@ -168,17 +167,14 @@ int gpiod_cansleep(const struct gpio_desc *desc);
int gpiod_to_irq(const struct gpio_desc *desc);
int gpiod_set_consumer_name(struct gpio_desc *desc, const char *name);
+bool gpiod_is_shared(const struct gpio_desc *desc);
+
/* Convert between the old gpio_ and new gpiod_ interfaces */
struct gpio_desc *gpio_to_desc(unsigned gpio);
int desc_to_gpio(const struct gpio_desc *desc);
-/* Child properties interface */
-struct fwnode_handle;
+int gpiod_hwgpio(const struct gpio_desc *desc);
-struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
- const char *propname, int index,
- enum gpiod_flags dflags,
- const char *label);
struct gpio_desc *fwnode_gpiod_get_index(struct fwnode_handle *fwnode,
const char *con_id, int index,
enum gpiod_flags flags,
@@ -189,8 +185,12 @@ struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev,
enum gpiod_flags flags,
const char *label);
+bool gpiod_is_equal(const struct gpio_desc *desc,
+ const struct gpio_desc *other);
+
#else /* CONFIG_GPIOLIB */
+#include <linux/bug.h>
#include <linux/kernel.h>
static inline int gpiod_count(struct device *dev, const char *con_id)
@@ -353,8 +353,6 @@ static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
WARN_ON(desc);
return -ENOSYS;
}
-
-
static inline int gpiod_get_value(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
@@ -370,10 +368,11 @@ static inline int gpiod_get_array_value(unsigned int array_size,
WARN_ON(desc_array);
return 0;
}
-static inline void gpiod_set_value(struct gpio_desc *desc, int value)
+static inline int gpiod_set_value(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
WARN_ON(desc);
+ return 0;
}
static inline int gpiod_set_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
@@ -399,10 +398,11 @@ static inline int gpiod_get_raw_array_value(unsigned int array_size,
WARN_ON(desc_array);
return 0;
}
-static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
+static inline int gpiod_set_raw_value(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
WARN_ON(desc);
+ return 0;
}
static inline int gpiod_set_raw_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
@@ -429,10 +429,11 @@ static inline int gpiod_get_array_value_cansleep(unsigned int array_size,
WARN_ON(desc_array);
return 0;
}
-static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
+static inline int gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
WARN_ON(desc);
+ return 0;
}
static inline int gpiod_set_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
@@ -458,11 +459,12 @@ static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
WARN_ON(desc_array);
return 0;
}
-static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
- int value)
+static inline int gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
+ int value)
{
/* GPIO can never have been requested */
WARN_ON(desc);
+ return 0;
}
static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
@@ -488,13 +490,6 @@ static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned int deboun
return -ENOSYS;
}
-static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
-{
- /* GPIO can never have been requested */
- WARN_ON(desc);
- return -ENOSYS;
-}
-
static inline void gpiod_toggle_active_low(struct gpio_desc *desc)
{
/* GPIO can never have been requested */
@@ -529,6 +524,13 @@ static inline int gpiod_set_consumer_name(struct gpio_desc *desc,
return -EINVAL;
}
+static inline bool gpiod_is_shared(const struct gpio_desc *desc)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(desc);
+ return false;
+}
+
static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
{
return NULL;
@@ -541,18 +543,6 @@ static inline int desc_to_gpio(const struct gpio_desc *desc)
return -EINVAL;
}
-/* Child properties interface */
-struct fwnode_handle;
-
-static inline
-struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
- const char *propname, int index,
- enum gpiod_flags dflags,
- const char *label)
-{
- return ERR_PTR(-ENOSYS);
-}
-
static inline
struct gpio_desc *fwnode_gpiod_get_index(struct fwnode_handle *fwnode,
const char *con_id, int index,
@@ -572,91 +562,54 @@ struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev,
return ERR_PTR(-ENOSYS);
}
-#endif /* CONFIG_GPIOLIB */
-
-static inline
-struct gpio_desc *devm_fwnode_gpiod_get(struct device *dev,
- struct fwnode_handle *fwnode,
- const char *con_id,
- enum gpiod_flags flags,
- const char *label)
+static inline bool
+gpiod_is_equal(const struct gpio_desc *desc, const struct gpio_desc *other)
{
- return devm_fwnode_gpiod_get_index(dev, fwnode, con_id, 0,
- flags, label);
+ WARN_ON(desc || other);
+ return false;
}
-static inline
-struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev,
- const char *con_id, int index,
- struct fwnode_handle *child,
- enum gpiod_flags flags,
- const char *label)
-{
- return devm_fwnode_gpiod_get_index(dev, child, con_id, index,
- flags, label);
-}
-
-static inline
-struct gpio_desc *devm_fwnode_get_gpiod_from_child(struct device *dev,
- const char *con_id,
- struct fwnode_handle *child,
- enum gpiod_flags flags,
- const char *label)
-{
- return devm_fwnode_gpiod_get_index(dev, child, con_id, 0, flags, label);
-}
-
-#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_OF_GPIO)
-struct device_node;
-
-struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
- const char *propname, int index,
- enum gpiod_flags dflags,
- const char *label);
+#endif /* CONFIG_GPIOLIB */
-#else /* CONFIG_GPIOLIB && CONFIG_OF_GPIO */
+#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_HTE)
+int gpiod_enable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags);
+int gpiod_disable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags);
+#else
-struct device_node;
+#include <linux/bug.h>
-static inline
-struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
- const char *propname, int index,
- enum gpiod_flags dflags,
- const char *label)
+static inline int gpiod_enable_hw_timestamp_ns(struct gpio_desc *desc,
+ unsigned long flags)
{
- return ERR_PTR(-ENOSYS);
-}
-
-#endif /* CONFIG_GPIOLIB && CONFIG_OF_GPIO */
-
-#ifdef CONFIG_GPIOLIB
-struct device_node;
-
-struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
- struct device_node *node,
- const char *propname, int index,
- enum gpiod_flags dflags,
- const char *label);
+ if (!IS_ENABLED(CONFIG_GPIOLIB))
+ WARN_ON(desc);
-#else /* CONFIG_GPIOLIB */
+ return -ENOSYS;
+}
+static inline int gpiod_disable_hw_timestamp_ns(struct gpio_desc *desc,
+ unsigned long flags)
+{
+ if (!IS_ENABLED(CONFIG_GPIOLIB))
+ WARN_ON(desc);
-struct device_node;
+ return -ENOSYS;
+}
+#endif /* CONFIG_GPIOLIB && CONFIG_HTE */
static inline
-struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
- struct device_node *node,
- const char *propname, int index,
- enum gpiod_flags dflags,
- const char *label)
+struct gpio_desc *devm_fwnode_gpiod_get(struct device *dev,
+ struct fwnode_handle *fwnode,
+ const char *con_id,
+ enum gpiod_flags flags,
+ const char *label)
{
- return ERR_PTR(-ENOSYS);
+ return devm_fwnode_gpiod_get_index(dev, fwnode, con_id, 0,
+ flags, label);
}
-#endif /* CONFIG_GPIOLIB */
-
struct acpi_gpio_params {
unsigned int crs_entry_index;
- unsigned int line_index;
+ unsigned short line_index;
bool active_low;
};
@@ -682,20 +635,15 @@ struct acpi_gpio_mapping {
#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_ACPI)
-struct acpi_device;
-
int acpi_dev_add_driver_gpios(struct acpi_device *adev,
const struct acpi_gpio_mapping *gpios);
void acpi_dev_remove_driver_gpios(struct acpi_device *adev);
int devm_acpi_dev_add_driver_gpios(struct device *dev,
const struct acpi_gpio_mapping *gpios);
-void devm_acpi_dev_remove_driver_gpios(struct device *dev);
#else /* CONFIG_GPIOLIB && CONFIG_ACPI */
-struct acpi_device;
-
static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev,
const struct acpi_gpio_mapping *gpios)
{
@@ -708,7 +656,6 @@ static inline int devm_acpi_dev_add_driver_gpios(struct device *dev,
{
return -ENXIO;
}
-static inline void devm_acpi_dev_remove_driver_gpios(struct device *dev) {}
#endif /* CONFIG_GPIOLIB && CONFIG_ACPI */
@@ -740,4 +687,14 @@ static inline void gpiod_unexport(struct gpio_desc *desc)
#endif /* CONFIG_GPIOLIB && CONFIG_GPIO_SYSFS */
+static inline int gpiod_multi_set_value_cansleep(struct gpio_descs *descs,
+ unsigned long *value_bitmap)
+{
+ if (IS_ERR_OR_NULL(descs))
+ return PTR_ERR_OR_ZERO(descs);
+
+ return gpiod_set_array_value_cansleep(descs->ndescs, descs->desc,
+ descs->info, value_bitmap);
+}
+
#endif
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 3a268781fcec..fabe2baf7b50 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -2,25 +2,45 @@
#ifndef __LINUX_GPIO_DRIVER_H
#define __LINUX_GPIO_DRIVER_H
-#include <linux/device.h>
-#include <linux/types.h>
-#include <linux/irq.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/err.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
+#include <linux/irqhandler.h>
#include <linux/lockdep.h>
-#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/property.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+#include <linux/util_macros.h>
-struct gpio_desc;
+#ifdef CONFIG_GENERIC_MSI_IRQ
+#include <asm/msi.h>
+#endif
+
+struct device;
+struct irq_chip;
+struct irq_data;
+struct module;
struct of_phandle_args;
-struct device_node;
+struct pinctrl_dev;
struct seq_file;
+
+struct gpio_chip;
+struct gpio_desc;
struct gpio_device;
-struct module;
-enum gpiod_flags;
+
enum gpio_lookup_flags;
+enum gpiod_flags;
-struct gpio_chip;
+union gpio_irq_fwspec {
+ struct irq_fwspec fwspec;
+#ifdef CONFIG_GENERIC_MSI_IRQ
+ msi_alloc_info_t msiinfo;
+#endif
+};
#define GPIO_LINE_DIRECTION_IN 1
#define GPIO_LINE_DIRECTION_OUT 0
@@ -44,13 +64,6 @@ struct gpio_irq_chip {
*/
struct irq_domain *domain;
- /**
- * @domain_ops:
- *
- * Table of interrupt domain operations for this IRQ chip.
- */
- const struct irq_domain_ops *domain_ops;
-
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
/**
* @fwnode:
@@ -102,9 +115,10 @@ struct gpio_irq_chip {
* variant named &gpiochip_populate_parent_fwspec_fourcell is also
* available.
*/
- void *(*populate_parent_alloc_arg)(struct gpio_chip *gc,
- unsigned int parent_hwirq,
- unsigned int parent_type);
+ int (*populate_parent_alloc_arg)(struct gpio_chip *gc,
+ union gpio_irq_fwspec *fwspec,
+ unsigned int parent_hwirq,
+ unsigned int parent_type);
/**
* @child_offset_to_irq:
@@ -166,13 +180,26 @@ struct gpio_irq_chip {
*/
irq_flow_handler_t parent_handler;
- /**
- * @parent_handler_data:
- *
- * Data associated, and passed to, the handler for the parent
- * interrupt.
- */
- void *parent_handler_data;
+ union {
+ /**
+ * @parent_handler_data:
+ *
+ * If @per_parent_data is false, @parent_handler_data is a
+ * single pointer used as the data associated with every
+ * parent interrupt.
+ */
+ void *parent_handler_data;
+
+ /**
+ * @parent_handler_data_array:
+ *
+ * If @per_parent_data is true, @parent_handler_data_array is
+ * an array of @num_parents pointers, and is used to associate
+ * different data for each parent. This cannot be NULL if
+ * @per_parent_data is true.
+ */
+ void **parent_handler_data_array;
+ };
/**
* @num_parents:
@@ -204,6 +231,31 @@ struct gpio_irq_chip {
bool threaded;
/**
+ * @per_parent_data:
+ *
+ * True if parent_handler_data_array describes a @num_parents
+ * sized array to be used as parent data.
+ */
+ bool per_parent_data;
+
+ /**
+ * @initialized:
+ *
+ * Flag to track GPIO chip irq member's initialization.
+ * This flag will make sure GPIO chip irq members are not used
+ * before they are initialized.
+ */
+ bool initialized;
+
+ /**
+ * @domain_is_allocated_externally:
+ *
+ * True it the irq_domain was allocated outside of gpiolib, in which
+ * case gpiolib won't free the irq_domain itself.
+ */
+ bool domain_is_allocated_externally;
+
+ /**
* @init_hw: optional routine to initialize hardware before
* an IRQ chip will be added. This is quite useful when
* a particular driver wants to clear IRQ related registers
@@ -235,8 +287,9 @@ struct gpio_irq_chip {
/**
* @first:
*
- * Required for static IRQ allocation. If set, irq_domain_add_simple()
- * will allocate and map all IRQs during initialization.
+ * Required for static IRQ allocation. If set,
+ * irq_domain_create_simple() will allocate and map all IRQs
+ * during initialization.
*/
unsigned int first;
@@ -274,27 +327,34 @@ struct gpio_irq_chip {
* number or the name of the SoC IP-block implementing it.
* @gpiodev: the internal state holder, opaque struct
* @parent: optional parent device providing the GPIOs
+ * @fwnode: optional fwnode providing this controller's properties
* @owner: helps prevent removal of modules exporting active GPIOs
* @request: optional hook for chip-specific activation, such as
- * enabling module power and clock; may sleep
+ * enabling module power and clock; may sleep; must return 0 on success
+ * or negative error number on failure
* @free: optional hook for chip-specific deactivation, such as
* disabling module power and clock; may sleep
* @get_direction: returns direction for signal "offset", 0=out, 1=in,
* (same as GPIO_LINE_DIRECTION_OUT / GPIO_LINE_DIRECTION_IN),
* or negative error. It is recommended to always implement this
* function, even on input-only or output-only gpio chips.
- * @direction_input: configures signal "offset" as input, or returns error
- * This can be omitted on input-only or output-only gpio chips.
- * @direction_output: configures signal "offset" as output, or returns error
- * This can be omitted on input-only or output-only gpio chips.
+ * @direction_input: configures signal "offset" as input, returns 0 on success
+ * or a negative error number. This can be omitted on input-only or
+ * output-only gpio chips.
+ * @direction_output: configures signal "offset" as output, returns 0 on
+ * success or a negative error number. This can be omitted on input-only
+ * or output-only gpio chips.
* @get: returns value for signal "offset", 0=low, 1=high, or negative error
* @get_multiple: reads values for multiple signals defined by "mask" and
* stores them in "bits", returns 0 on success or negative error
- * @set: assigns output value for signal "offset"
- * @set_multiple: assigns output values for multiple signals defined by "mask"
+ * @set: assigns output value for signal "offset", returns 0 on success or
+ * negative error value
+ * @set_multiple: assigns output values for multiple signals defined by
+ * "mask", returns 0 on success or negative error value
* @set_config: optional hook for all kinds of settings. Uses the same
- * packed config format as generic pinconf.
- * @to_irq: optional hook supporting non-static gpio_to_irq() mappings;
+ * packed config format as generic pinconf. Must return 0 on success and
+ * a negative error number on failure.
+ * @to_irq: optional hook supporting non-static gpiod_to_irq() mappings;
* implementation may not sleep
* @dbg_show: optional routine to show contents in debugfs; default code
* will be used when this is omitted, but custom code can show extra
@@ -304,6 +364,10 @@ struct gpio_irq_chip {
* @add_pin_ranges: optional routine to initialize pin ranges, to be used when
* requires special mapping of the pins that provides GPIO functionality.
* It is called after adding GPIO chip and before adding IRQ chip.
+ * @en_hw_timestamp: Dependent on GPIO chip, an optional routine to
+ * enable hardware timestamp.
+ * @dis_hw_timestamp: Dependent on GPIO chip, an optional routine to
+ * disable hardware timestamp.
* @base: identifies the first GPIO number handled by this chip;
* or, if negative during registration, requests dynamic ID allocation.
* DEPRECATION: providing anything non-negative and nailing the base
@@ -312,38 +376,18 @@ struct gpio_irq_chip {
* get rid of the static GPIO number space in the long run.
* @ngpio: the number of GPIOs handled by this controller; the last GPIO
* handled is (base + ngpio - 1).
+ * @offset: when multiple gpio chips belong to the same device this
+ * can be used as offset within the device so friendly names can
+ * be properly assigned.
* @names: if set, must be an array of strings to use as alternative
* names for the GPIOs in this chip. Any entry in the array
* may be NULL if there is no alias for the GPIO, however the
- * array must be @ngpio entries long. A name can include a single printk
- * format specifier for an unsigned int. It is substituted by the actual
- * number of the gpio.
+ * array must be @ngpio entries long.
* @can_sleep: flag must be set iff get()/set() methods sleep, as they
* must while accessing GPIO expander chips over I2C or SPI. This
* implies that if the chip supports IRQs, these IRQs need to be threaded
* as the chip access may sleep when e.g. reading out the IRQ status
* registers.
- * @read_reg: reader function for generic GPIO
- * @write_reg: writer function for generic GPIO
- * @be_bits: if the generic GPIO has big endian bit order (bit 31 is representing
- * line 0, bit 30 is line 1 ... bit 0 is line 31) this is set to true by the
- * generic GPIO core. It is for internal housekeeping only.
- * @reg_dat: data (in) register for generic GPIO
- * @reg_set: output set register (out=high) for generic GPIO
- * @reg_clr: output clear register (out=low) for generic GPIO
- * @reg_dir_out: direction out setting register for generic GPIO
- * @reg_dir_in: direction in setting register for generic GPIO
- * @bgpio_dir_unreadable: indicates that the direction register(s) cannot
- * be read and we need to rely on out internal state tracking.
- * @bgpio_bits: number of register bits used for a generic GPIO i.e.
- * <register width> * 8
- * @bgpio_lock: used to lock chip->bgpio_data. Also, this is needed to keep
- * shadowed and real data registers writes together.
- * @bgpio_data: shadowed data register for generic GPIO to clear/set bits
- * safely.
- * @bgpio_dir: shadowed direction register for generic GPIO to clear/set
- * direction safely. A "1" in this word means the line is set as
- * output.
*
* A gpio_chip can help platforms abstract various sources of GPIOs so
* they can all be accessed through a common programming interface.
@@ -359,6 +403,7 @@ struct gpio_chip {
const char *label;
struct gpio_device *gpiodev;
struct device *parent;
+ struct fwnode_handle *fwnode;
struct module *owner;
int (*request)(struct gpio_chip *gc,
@@ -376,9 +421,9 @@ struct gpio_chip {
int (*get_multiple)(struct gpio_chip *gc,
unsigned long *mask,
unsigned long *bits);
- void (*set)(struct gpio_chip *gc,
- unsigned int offset, int value);
- void (*set_multiple)(struct gpio_chip *gc,
+ int (*set)(struct gpio_chip *gc,
+ unsigned int offset, int value);
+ int (*set_multiple)(struct gpio_chip *gc,
unsigned long *mask,
unsigned long *bits);
int (*set_config)(struct gpio_chip *gc,
@@ -396,27 +441,18 @@ struct gpio_chip {
int (*add_pin_ranges)(struct gpio_chip *gc);
+ int (*en_hw_timestamp)(struct gpio_chip *gc,
+ u32 offset,
+ unsigned long flags);
+ int (*dis_hw_timestamp)(struct gpio_chip *gc,
+ u32 offset,
+ unsigned long flags);
int base;
u16 ngpio;
+ u16 offset;
const char *const *names;
bool can_sleep;
-#if IS_ENABLED(CONFIG_GPIO_GENERIC)
- unsigned long (*read_reg)(void __iomem *reg);
- void (*write_reg)(void __iomem *reg, unsigned long data);
- bool be_bits;
- void __iomem *reg_dat;
- void __iomem *reg_set;
- void __iomem *reg_clr;
- void __iomem *reg_dir_out;
- void __iomem *reg_dir_in;
- bool bgpio_dir_unreadable;
- int bgpio_bits;
- spinlock_t bgpio_lock;
- unsigned long bgpio_data;
- unsigned long bgpio_dir;
-#endif /* CONFIG_GPIO_GENERIC */
-
#ifdef CONFIG_GPIOLIB_IRQCHIP
/*
* With CONFIG_GPIOLIB_IRQCHIP we get an irqchip inside the gpiolib
@@ -432,14 +468,6 @@ struct gpio_chip {
struct gpio_irq_chip irq;
#endif /* CONFIG_GPIOLIB_IRQCHIP */
- /**
- * @valid_mask:
- *
- * If not %NULL, holds bitmask of GPIOs which are valid to be used
- * from the chip.
- */
- unsigned long *valid_mask;
-
#if defined(CONFIG_OF_GPIO)
/*
* If CONFIG_OF_GPIO is enabled, then all GPIO controllers described in
@@ -447,18 +475,33 @@ struct gpio_chip {
*/
/**
- * @of_node:
+ * @of_gpio_n_cells:
+ *
+ * Number of cells used to form the GPIO specifier. The standard is 2
+ * cells:
+ *
+ * gpios = <&gpio offset flags>;
+ *
+ * some complex GPIO controllers instantiate more than one chip per
+ * device tree node and have 3 cells:
*
- * Pointer to a device tree node representing this GPIO controller.
+ * gpios = <&gpio instance offset flags>;
+ *
+ * Legacy GPIO controllers may even have 1 cell:
+ *
+ * gpios = <&gpio offset>;
*/
- struct device_node *of_node;
+ unsigned int of_gpio_n_cells;
/**
- * @of_gpio_n_cells:
+ * @of_node_instance_match:
*
- * Number of cells used to form the GPIO specifier.
+ * Determine if a chip is the right instance. Must be implemented by
+ * any driver using more than one gpio_chip per device tree node.
+ * Returns true if gc is the instance indicated by i (which is the
+ * first cell in the phandles for GPIO lines and gpio-ranges).
*/
- unsigned int of_gpio_n_cells;
+ bool (*of_node_instance_match)(struct gpio_chip *gc, unsigned int i);
/**
* @of_xlate:
@@ -471,29 +514,69 @@ struct gpio_chip {
#endif /* CONFIG_OF_GPIO */
};
-extern const char *gpiochip_is_requested(struct gpio_chip *gc,
- unsigned int offset);
+char *gpiochip_dup_line_label(struct gpio_chip *gc, unsigned int offset);
+
+
+struct _gpiochip_for_each_data {
+ const char **label;
+ unsigned int *i;
+};
+
+DEFINE_CLASS(_gpiochip_for_each_data,
+ struct _gpiochip_for_each_data,
+ if (*_T.label) kfree(*_T.label),
+ ({
+ struct _gpiochip_for_each_data _data = { label, i };
+ *_data.i = 0;
+ _data;
+ }),
+ const char **label, int *i)
+
+/**
+ * for_each_hwgpio_in_range - Iterates over all GPIOs in a given range
+ * @_chip: Chip to iterate over.
+ * @_i: Loop counter.
+ * @_base: First GPIO in the ranger.
+ * @_size: Amount of GPIOs to check starting from @base.
+ * @_label: Place to store the address of the label if the GPIO is requested.
+ * Set to NULL for unused GPIOs.
+ */
+#define for_each_hwgpio_in_range(_chip, _i, _base, _size, _label) \
+ for (CLASS(_gpiochip_for_each_data, _data)(&_label, &_i); \
+ _i < _size; \
+ _i++, kfree(_label), _label = NULL) \
+ for_each_if(!IS_ERR(_label = gpiochip_dup_line_label(_chip, _base + _i)))
+
+/**
+ * for_each_hwgpio - Iterates over all GPIOs for given chip.
+ * @_chip: Chip to iterate over.
+ * @_i: Loop counter.
+ * @_label: Place to store the address of the label if the GPIO is requested.
+ * Set to NULL for unused GPIOs.
+ */
+#define for_each_hwgpio(_chip, _i, _label) \
+ for_each_hwgpio_in_range(_chip, _i, 0, _chip->ngpio, _label)
/**
* for_each_requested_gpio_in_range - iterates over requested GPIOs in a given range
- * @chip: the chip to query
- * @i: loop variable
- * @base: first GPIO in the range
- * @size: amount of GPIOs to check starting from @base
- * @label: label of current GPIO
+ * @_chip: the chip to query
+ * @_i: loop variable
+ * @_base: first GPIO in the range
+ * @_size: amount of GPIOs to check starting from @base
+ * @_label: label of current GPIO
*/
-#define for_each_requested_gpio_in_range(chip, i, base, size, label) \
- for (i = 0; i < size; i++) \
- if ((label = gpiochip_is_requested(chip, base + i)) == NULL) {} else
+#define for_each_requested_gpio_in_range(_chip, _i, _base, _size, _label) \
+ for_each_hwgpio_in_range(_chip, _i, _base, _size, _label) \
+ for_each_if(_label)
/* Iterates over all requested GPIO of the given @chip */
#define for_each_requested_gpio(chip, i, label) \
for_each_requested_gpio_in_range(chip, i, 0, chip->ngpio, label)
/* add/remove chips */
-extern int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
- struct lock_class_key *lock_key,
- struct lock_class_key *request_key);
+int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
+ struct lock_class_key *lock_key,
+ struct lock_class_key *request_key);
/**
* gpiochip_add_data() - register a gpio_chip
@@ -537,17 +620,22 @@ extern int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
devm_gpiochip_add_data_with_key(dev, gc, data, NULL, NULL)
#endif /* CONFIG_LOCKDEP */
-static inline int gpiochip_add(struct gpio_chip *gc)
-{
- return gpiochip_add_data(gc, NULL);
-}
-extern void gpiochip_remove(struct gpio_chip *gc);
-extern int devm_gpiochip_add_data_with_key(struct device *dev, struct gpio_chip *gc, void *data,
- struct lock_class_key *lock_key,
- struct lock_class_key *request_key);
+void gpiochip_remove(struct gpio_chip *gc);
+int devm_gpiochip_add_data_with_key(struct device *dev, struct gpio_chip *gc,
+ void *data, struct lock_class_key *lock_key,
+ struct lock_class_key *request_key);
+
+struct gpio_device *gpio_device_find(const void *data,
+ int (*match)(struct gpio_chip *gc,
+ const void *data));
+
+struct gpio_device *gpio_device_get(struct gpio_device *gdev);
+void gpio_device_put(struct gpio_device *gdev);
+
+DEFINE_FREE(gpio_device_put, struct gpio_device *,
+ if (!IS_ERR_OR_NULL(_T)) gpio_device_put(_T))
-extern struct gpio_chip *gpiochip_find(void *data,
- int (*match)(struct gpio_chip *gc, void *data));
+struct device *gpio_device_to_device(struct gpio_device *gdev);
bool gpiochip_line_is_irq(struct gpio_chip *gc, unsigned int offset);
int gpiochip_reqres_irq(struct gpio_chip *gc, unsigned int offset);
@@ -555,6 +643,22 @@ void gpiochip_relres_irq(struct gpio_chip *gc, unsigned int offset);
void gpiochip_disable_irq(struct gpio_chip *gc, unsigned int offset);
void gpiochip_enable_irq(struct gpio_chip *gc, unsigned int offset);
+/* irq_data versions of the above */
+int gpiochip_irq_reqres(struct irq_data *data);
+void gpiochip_irq_relres(struct irq_data *data);
+
+/* Paste this in your irq_chip structure */
+#define GPIOCHIP_IRQ_RESOURCE_HELPERS \
+ .irq_request_resources = gpiochip_irq_reqres, \
+ .irq_release_resources = gpiochip_irq_relres
+
+static inline void gpio_irq_chip_set_chip(struct gpio_irq_chip *girq,
+ const struct irq_chip *chip)
+{
+ /* Yes, dropping const is ugly, but it isn't like we have a choice */
+ girq->chip = (struct irq_chip *)chip;
+}
+
/* Line status inquiry for drivers */
bool gpiochip_line_is_open_drain(struct gpio_chip *gc, unsigned int offset);
bool gpiochip_line_is_open_source(struct gpio_chip *gc, unsigned int offset);
@@ -562,72 +666,31 @@ bool gpiochip_line_is_open_source(struct gpio_chip *gc, unsigned int offset);
/* Sleep persistence inquiry for drivers */
bool gpiochip_line_is_persistent(struct gpio_chip *gc, unsigned int offset);
bool gpiochip_line_is_valid(const struct gpio_chip *gc, unsigned int offset);
+const unsigned long *gpiochip_query_valid_mask(const struct gpio_chip *gc);
/* get driver data */
void *gpiochip_get_data(struct gpio_chip *gc);
-struct bgpio_pdata {
- const char *label;
- int base;
- int ngpio;
-};
-
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
-void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc,
+int gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc,
+ union gpio_irq_fwspec *gfwspec,
+ unsigned int parent_hwirq,
+ unsigned int parent_type);
+int gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc,
+ union gpio_irq_fwspec *gfwspec,
unsigned int parent_hwirq,
unsigned int parent_type);
-void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc,
- unsigned int parent_hwirq,
- unsigned int parent_type);
-
-#else
-
-static inline void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc,
- unsigned int parent_hwirq,
- unsigned int parent_type)
-{
- return NULL;
-}
-
-static inline void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc,
- unsigned int parent_hwirq,
- unsigned int parent_type)
-{
- return NULL;
-}
#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
-int bgpio_init(struct gpio_chip *gc, struct device *dev,
- unsigned long sz, void __iomem *dat, void __iomem *set,
- void __iomem *clr, void __iomem *dirout, void __iomem *dirin,
- unsigned long flags);
-
-#define BGPIOF_BIG_ENDIAN BIT(0)
-#define BGPIOF_UNREADABLE_REG_SET BIT(1) /* reg_set is unreadable */
-#define BGPIOF_UNREADABLE_REG_DIR BIT(2) /* reg_dir is unreadable */
-#define BGPIOF_BIG_ENDIAN_BYTE_ORDER BIT(3)
-#define BGPIOF_READ_OUTPUT_REG_SET BIT(4) /* reg_set stores output value */
-#define BGPIOF_NO_OUTPUT BIT(5) /* only input */
-#define BGPIOF_NO_SET_ON_INPUT BIT(6)
-
-int gpiochip_irq_map(struct irq_domain *d, unsigned int irq,
- irq_hw_number_t hwirq);
-void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq);
-
-int gpiochip_irq_domain_activate(struct irq_domain *domain,
- struct irq_data *data, bool reserve);
-void gpiochip_irq_domain_deactivate(struct irq_domain *domain,
- struct irq_data *data);
-
-bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gc,
- unsigned int offset);
-
#ifdef CONFIG_GPIOLIB_IRQCHIP
int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
struct irq_domain *domain);
#else
+
+#include <asm/bug.h>
+
static inline int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
struct irq_domain *domain)
{
@@ -655,23 +718,68 @@ struct gpio_pin_range {
#ifdef CONFIG_PINCTRL
-int gpiochip_add_pin_range(struct gpio_chip *gc, const char *pinctl_name,
- unsigned int gpio_offset, unsigned int pin_offset,
- unsigned int npins);
+int gpiochip_add_pin_range_with_pins(struct gpio_chip *gc,
+ const char *pinctl_name,
+ unsigned int gpio_offset,
+ unsigned int pin_offset,
+ unsigned int const *pins,
+ unsigned int npins);
int gpiochip_add_pingroup_range(struct gpio_chip *gc,
struct pinctrl_dev *pctldev,
unsigned int gpio_offset, const char *pin_group);
void gpiochip_remove_pin_ranges(struct gpio_chip *gc);
+static inline int
+gpiochip_add_pin_range(struct gpio_chip *gc,
+ const char *pinctl_name,
+ unsigned int gpio_offset,
+ unsigned int pin_offset,
+ unsigned int npins)
+{
+ return gpiochip_add_pin_range_with_pins(gc, pinctl_name, gpio_offset,
+ pin_offset, NULL, npins);
+}
+
+static inline int
+gpiochip_add_sparse_pin_range(struct gpio_chip *gc,
+ const char *pinctl_name,
+ unsigned int gpio_offset,
+ unsigned int const *pins,
+ unsigned int npins)
+{
+ return gpiochip_add_pin_range_with_pins(gc, pinctl_name, gpio_offset, 0,
+ pins, npins);
+}
#else /* ! CONFIG_PINCTRL */
static inline int
+gpiochip_add_pin_range_with_pins(struct gpio_chip *gc,
+ const char *pinctl_name,
+ unsigned int gpio_offset,
+ unsigned int pin_offset,
+ unsigned int npins)
+{
+ return 0;
+}
+
+static inline int
gpiochip_add_pin_range(struct gpio_chip *gc, const char *pinctl_name,
unsigned int gpio_offset, unsigned int pin_offset,
unsigned int npins)
{
return 0;
}
+
+static inline int
+gpiochip_add_sparse_pin_range(struct gpio_chip *gc,
+ const char *pinctl_name,
+ unsigned int gpio_offset,
+ unsigned int const *pins,
+ unsigned int npins)
+{
+ return 0;
+}
+
static inline int
gpiochip_add_pingroup_range(struct gpio_chip *gc,
struct pinctrl_dev *pctldev,
@@ -694,17 +802,31 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc,
enum gpiod_flags dflags);
void gpiochip_free_own_desc(struct gpio_desc *desc);
+struct gpio_desc *
+gpio_device_get_desc(struct gpio_device *gdev, unsigned int hwnum);
+
+struct gpio_chip *gpio_device_get_chip(struct gpio_device *gdev);
+
#ifdef CONFIG_GPIOLIB
/* lock/unlock as IRQ */
int gpiochip_lock_as_irq(struct gpio_chip *gc, unsigned int offset);
void gpiochip_unlock_as_irq(struct gpio_chip *gc, unsigned int offset);
-
struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc);
+struct gpio_device *gpiod_to_gpio_device(struct gpio_desc *desc);
+
+/* struct gpio_device getters */
+int gpio_device_get_base(struct gpio_device *gdev);
+const char *gpio_device_get_label(struct gpio_device *gdev);
+
+struct gpio_device *gpio_device_find_by_label(const char *label);
+struct gpio_device *gpio_device_find_by_fwnode(const struct fwnode_handle *fwnode);
#else /* CONFIG_GPIOLIB */
+#include <asm/bug.h>
+
static inline struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
@@ -712,6 +834,36 @@ static inline struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
return ERR_PTR(-ENODEV);
}
+static inline struct gpio_device *gpiod_to_gpio_device(struct gpio_desc *desc)
+{
+ WARN_ON(1);
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int gpio_device_get_base(struct gpio_device *gdev)
+{
+ WARN_ON(1);
+ return -ENODEV;
+}
+
+static inline const char *gpio_device_get_label(struct gpio_device *gdev)
+{
+ WARN_ON(1);
+ return NULL;
+}
+
+static inline struct gpio_device *gpio_device_find_by_label(const char *label)
+{
+ WARN_ON(1);
+ return NULL;
+}
+
+static inline struct gpio_device *gpio_device_find_by_fwnode(const struct fwnode_handle *fwnode)
+{
+ WARN_ON(1);
+ return NULL;
+}
+
static inline int gpiochip_lock_as_irq(struct gpio_chip *gc,
unsigned int offset)
{
@@ -726,4 +878,29 @@ static inline void gpiochip_unlock_as_irq(struct gpio_chip *gc,
}
#endif /* CONFIG_GPIOLIB */
+#define for_each_gpiochip_node(dev, child) \
+ device_for_each_child_node(dev, child) \
+ for_each_if(fwnode_property_present(child, "gpio-controller"))
+
+static inline unsigned int gpiochip_node_count(struct device *dev)
+{
+ struct fwnode_handle *child;
+ unsigned int count = 0;
+
+ for_each_gpiochip_node(dev, child)
+ count++;
+
+ return count;
+}
+
+static inline struct fwnode_handle *gpiochip_node_get_first(struct device *dev)
+{
+ struct fwnode_handle *fwnode;
+
+ for_each_gpiochip_node(dev, fwnode)
+ return fwnode;
+
+ return NULL;
+}
+
#endif /* __LINUX_GPIO_DRIVER_H */
diff --git a/include/linux/gpio/forwarder.h b/include/linux/gpio/forwarder.h
new file mode 100644
index 000000000000..ee5d8355f735
--- /dev/null
+++ b/include/linux/gpio/forwarder.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_GPIO_FORWARDER_H
+#define __LINUX_GPIO_FORWARDER_H
+
+struct gpio_desc;
+struct gpio_chip;
+struct gpiochip_fwd;
+
+struct gpiochip_fwd *devm_gpiochip_fwd_alloc(struct device *dev,
+ unsigned int ngpios);
+int gpiochip_fwd_desc_add(struct gpiochip_fwd *fwd,
+ struct gpio_desc *desc, unsigned int offset);
+void gpiochip_fwd_desc_free(struct gpiochip_fwd *fwd, unsigned int offset);
+int gpiochip_fwd_register(struct gpiochip_fwd *fwd, void *data);
+
+struct gpio_chip *gpiochip_fwd_get_gpiochip(struct gpiochip_fwd *fwd);
+
+void *gpiochip_fwd_get_data(struct gpiochip_fwd *fwd);
+
+int gpiochip_fwd_gpio_request(struct gpiochip_fwd *fwd, unsigned int offset);
+int gpiochip_fwd_gpio_get_direction(struct gpiochip_fwd *fwd,
+ unsigned int offset);
+int gpiochip_fwd_gpio_direction_input(struct gpiochip_fwd *fwd,
+ unsigned int offset);
+int gpiochip_fwd_gpio_direction_output(struct gpiochip_fwd *fwd,
+ unsigned int offset,
+ int value);
+int gpiochip_fwd_gpio_get(struct gpiochip_fwd *fwd, unsigned int offset);
+int gpiochip_fwd_gpio_get_multiple(struct gpiochip_fwd *fwd,
+ unsigned long *mask,
+ unsigned long *bits);
+int gpiochip_fwd_gpio_set(struct gpiochip_fwd *fwd, unsigned int offset,
+ int value);
+int gpiochip_fwd_gpio_set_multiple(struct gpiochip_fwd *fwd,
+ unsigned long *mask,
+ unsigned long *bits);
+int gpiochip_fwd_gpio_set_config(struct gpiochip_fwd *fwd, unsigned int offset,
+ unsigned long config);
+int gpiochip_fwd_gpio_to_irq(struct gpiochip_fwd *fwd, unsigned int offset);
+
+#endif
diff --git a/include/linux/gpio/generic.h b/include/linux/gpio/generic.h
new file mode 100644
index 000000000000..ff566dc9c3cb
--- /dev/null
+++ b/include/linux/gpio/generic.h
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_GPIO_GENERIC_H
+#define __LINUX_GPIO_GENERIC_H
+
+#include <linux/cleanup.h>
+#include <linux/gpio/driver.h>
+#include <linux/spinlock.h>
+
+struct device;
+
+#define GPIO_GENERIC_BIG_ENDIAN BIT(0)
+#define GPIO_GENERIC_UNREADABLE_REG_SET BIT(1) /* reg_set is unreadable */
+#define GPIO_GENERIC_UNREADABLE_REG_DIR BIT(2) /* reg_dir is unreadable */
+#define GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER BIT(3)
+#define GPIO_GENERIC_READ_OUTPUT_REG_SET BIT(4) /* reg_set stores output value */
+#define GPIO_GENERIC_NO_OUTPUT BIT(5) /* only input */
+#define GPIO_GENERIC_NO_SET_ON_INPUT BIT(6)
+#define GPIO_GENERIC_PINCTRL_BACKEND BIT(7) /* Call pinctrl direction setters */
+#define GPIO_GENERIC_NO_INPUT BIT(8) /* only output */
+
+/**
+ * struct gpio_generic_chip_config - Generic GPIO chip configuration data
+ * @dev: Parent device of the new GPIO chip (compulsory).
+ * @sz: Size (width) of the MMIO registers in bytes, typically 1, 2 or 4.
+ * @dat: MMIO address for the register to READ the value of the GPIO lines, it
+ * is expected that a 1 in the corresponding bit in this register means
+ * the line is asserted.
+ * @set: MMIO address for the register to SET the value of the GPIO lines, it
+ * is expected that we write the line with 1 in this register to drive
+ * the GPIO line high.
+ * @clr: MMIO address for the register to CLEAR the value of the GPIO lines,
+ * it is expected that we write the line with 1 in this register to
+ * drive the GPIO line low. It is allowed to leave this address as NULL,
+ * in that case the SET register will be assumed to also clear the GPIO
+ * lines, by actively writing the line with 0.
+ * @dirout: MMIO address for the register to set the line as OUTPUT. It is
+ * assumed that setting a line to 1 in this register will turn that
+ * line into an output line. Conversely, setting the line to 0 will
+ * turn that line into an input.
+ * @dirin: MMIO address for the register to set this line as INPUT. It is
+ * assumed that setting a line to 1 in this register will turn that
+ * line into an input line. Conversely, setting the line to 0 will
+ * turn that line into an output.
+ * @flags: Different flags that will affect the behaviour of the device, such
+ * as endianness etc.
+ */
+struct gpio_generic_chip_config {
+ struct device *dev;
+ unsigned long sz;
+ void __iomem *dat;
+ void __iomem *set;
+ void __iomem *clr;
+ void __iomem *dirout;
+ void __iomem *dirin;
+ unsigned long flags;
+};
+
+/**
+ * struct gpio_generic_chip - Generic GPIO chip implementation.
+ * @gc: The underlying struct gpio_chip object, implementing low-level GPIO
+ * chip routines.
+ * @read_reg: reader function for generic GPIO
+ * @write_reg: writer function for generic GPIO
+ * @be_bits: if the generic GPIO has big endian bit order (bit 31 is
+ * representing line 0, bit 30 is line 1 ... bit 0 is line 31) this
+ * is set to true by the generic GPIO core. It is for internal
+ * housekeeping only.
+ * @reg_dat: data (in) register for generic GPIO
+ * @reg_set: output set register (out=high) for generic GPIO
+ * @reg_clr: output clear register (out=low) for generic GPIO
+ * @reg_dir_out: direction out setting register for generic GPIO
+ * @reg_dir_in: direction in setting register for generic GPIO
+ * @dir_unreadable: indicates that the direction register(s) cannot be read and
+ * we need to rely on out internal state tracking.
+ * @pinctrl: the generic GPIO uses a pin control backend.
+ * @bits: number of register bits used for a generic GPIO
+ * i.e. <register width> * 8
+ * @lock: used to lock chip->sdata. Also, this is needed to keep
+ * shadowed and real data registers writes together.
+ * @sdata: shadowed data register for generic GPIO to clear/set bits safely.
+ * @sdir: shadowed direction register for generic GPIO to clear/set direction
+ * safely. A "1" in this word means the line is set as output.
+ */
+struct gpio_generic_chip {
+ struct gpio_chip gc;
+ unsigned long (*read_reg)(void __iomem *reg);
+ void (*write_reg)(void __iomem *reg, unsigned long data);
+ bool be_bits;
+ void __iomem *reg_dat;
+ void __iomem *reg_set;
+ void __iomem *reg_clr;
+ void __iomem *reg_dir_out;
+ void __iomem *reg_dir_in;
+ bool dir_unreadable;
+ bool pinctrl;
+ int bits;
+ raw_spinlock_t lock;
+ unsigned long sdata;
+ unsigned long sdir;
+};
+
+static inline struct gpio_generic_chip *
+to_gpio_generic_chip(struct gpio_chip *gc)
+{
+ return container_of(gc, struct gpio_generic_chip, gc);
+}
+
+int gpio_generic_chip_init(struct gpio_generic_chip *chip,
+ const struct gpio_generic_chip_config *cfg);
+
+/**
+ * gpio_generic_chip_set() - Set the GPIO line value of the generic GPIO chip.
+ * @chip: Generic GPIO chip to use.
+ * @offset: Hardware offset of the line to set.
+ * @value: New GPIO line value.
+ *
+ * Some modules using the generic GPIO chip, need to set line values in their
+ * direction setters but they don't have access to the gpio-mmio symbols so
+ * they use the function pointer in struct gpio_chip directly. This is not
+ * optimal and can lead to crashes at run-time in some instances. This wrapper
+ * provides a safe interface for users.
+ *
+ * Returns: 0 on success, negative error number of failure.
+ */
+static inline int
+gpio_generic_chip_set(struct gpio_generic_chip *chip, unsigned int offset,
+ int value)
+{
+ if (WARN_ON(!chip->gc.set))
+ return -EOPNOTSUPP;
+
+ return chip->gc.set(&chip->gc, offset, value);
+}
+
+/**
+ * gpio_generic_read_reg() - Read a register using the underlying callback.
+ * @chip: Generic GPIO chip to use.
+ * @reg: Register to read.
+ *
+ * Returns: value read from register.
+ */
+static inline unsigned long
+gpio_generic_read_reg(struct gpio_generic_chip *chip, void __iomem *reg)
+{
+ if (WARN_ON(!chip->read_reg))
+ return 0;
+
+ return chip->read_reg(reg);
+}
+
+/**
+ * gpio_generic_write_reg() - Write a register using the underlying callback.
+ * @chip: Generic GPIO chip to use.
+ * @reg: Register to write to.
+ * @val: New value to write.
+ */
+static inline void gpio_generic_write_reg(struct gpio_generic_chip *chip,
+ void __iomem *reg, unsigned long val)
+{
+ if (WARN_ON(!chip->write_reg))
+ return;
+
+ chip->write_reg(reg, val);
+}
+
+#define gpio_generic_chip_lock(gen_gc) \
+ raw_spin_lock(&(gen_gc)->lock)
+
+#define gpio_generic_chip_unlock(gen_gc) \
+ raw_spin_unlock(&(gen_gc)->lock)
+
+#define gpio_generic_chip_lock_irqsave(gen_gc, flags) \
+ raw_spin_lock_irqsave(&(gen_gc)->lock, flags)
+
+#define gpio_generic_chip_unlock_irqrestore(gen_gc, flags) \
+ raw_spin_unlock_irqrestore(&(gen_gc)->lock, flags)
+
+DEFINE_LOCK_GUARD_1(gpio_generic_lock,
+ struct gpio_generic_chip,
+ gpio_generic_chip_lock(_T->lock),
+ gpio_generic_chip_unlock(_T->lock))
+
+DEFINE_LOCK_GUARD_1(gpio_generic_lock_irqsave,
+ struct gpio_generic_chip,
+ gpio_generic_chip_lock_irqsave(_T->lock, _T->flags),
+ gpio_generic_chip_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags)
+
+#endif /* __LINUX_GPIO_GENERIC_H */
diff --git a/include/linux/gpio/gpio-nomadik.h b/include/linux/gpio/gpio-nomadik.h
new file mode 100644
index 000000000000..592a774a53cd
--- /dev/null
+++ b/include/linux/gpio/gpio-nomadik.h
@@ -0,0 +1,292 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_GPIO_NOMADIK_H
+#define __LINUX_GPIO_NOMADIK_H
+
+struct fwnode_handle;
+
+/* Package definitions */
+#define PINCTRL_NMK_STN8815 0
+#define PINCTRL_NMK_DB8500 1
+
+#define GPIO_BLOCK_SHIFT 5
+#define NMK_GPIO_PER_CHIP BIT(GPIO_BLOCK_SHIFT)
+#define NMK_MAX_BANKS DIV_ROUND_UP(512, NMK_GPIO_PER_CHIP)
+
+/* Register in the logic block */
+#define NMK_GPIO_DAT 0x00
+#define NMK_GPIO_DATS 0x04
+#define NMK_GPIO_DATC 0x08
+#define NMK_GPIO_PDIS 0x0c
+#define NMK_GPIO_DIR 0x10
+#define NMK_GPIO_DIRS 0x14
+#define NMK_GPIO_DIRC 0x18
+#define NMK_GPIO_SLPC 0x1c
+#define NMK_GPIO_AFSLA 0x20
+#define NMK_GPIO_AFSLB 0x24
+#define NMK_GPIO_LOWEMI 0x28
+
+#define NMK_GPIO_RIMSC 0x40
+#define NMK_GPIO_FIMSC 0x44
+#define NMK_GPIO_IS 0x48
+#define NMK_GPIO_IC 0x4c
+#define NMK_GPIO_RWIMSC 0x50
+#define NMK_GPIO_FWIMSC 0x54
+#define NMK_GPIO_WKS 0x58
+/* These appear in DB8540 and later ASICs */
+#define NMK_GPIO_EDGELEVEL 0x5C
+#define NMK_GPIO_LEVEL 0x60
+
+/* Pull up/down values */
+enum nmk_gpio_pull {
+ NMK_GPIO_PULL_NONE,
+ NMK_GPIO_PULL_UP,
+ NMK_GPIO_PULL_DOWN,
+};
+
+/* Sleep mode */
+enum nmk_gpio_slpm {
+ NMK_GPIO_SLPM_INPUT,
+ NMK_GPIO_SLPM_WAKEUP_ENABLE = NMK_GPIO_SLPM_INPUT,
+ NMK_GPIO_SLPM_NOCHANGE,
+ NMK_GPIO_SLPM_WAKEUP_DISABLE = NMK_GPIO_SLPM_NOCHANGE,
+};
+
+struct nmk_gpio_chip {
+ struct gpio_chip chip;
+ void __iomem *addr;
+ struct clk *clk;
+ unsigned int bank;
+ void (*set_ioforce)(bool enable);
+ spinlock_t lock;
+ bool sleepmode;
+ bool is_mobileye_soc;
+ /* Keep track of configured edges */
+ u32 edge_rising;
+ u32 edge_falling;
+ u32 real_wake;
+ u32 rwimsc;
+ u32 fwimsc;
+ u32 rimsc;
+ u32 fimsc;
+ u32 pull_up;
+ u32 lowemi;
+};
+
+/* Alternate functions: function C is set in hw by setting both A and B */
+#define NMK_GPIO_ALT_GPIO 0
+#define NMK_GPIO_ALT_A 1
+#define NMK_GPIO_ALT_B 2
+#define NMK_GPIO_ALT_C (NMK_GPIO_ALT_A | NMK_GPIO_ALT_B)
+
+#define NMK_GPIO_ALT_CX_SHIFT 2
+#define NMK_GPIO_ALT_C1 ((1<<NMK_GPIO_ALT_CX_SHIFT) | NMK_GPIO_ALT_C)
+#define NMK_GPIO_ALT_C2 ((2<<NMK_GPIO_ALT_CX_SHIFT) | NMK_GPIO_ALT_C)
+#define NMK_GPIO_ALT_C3 ((3<<NMK_GPIO_ALT_CX_SHIFT) | NMK_GPIO_ALT_C)
+#define NMK_GPIO_ALT_C4 ((4<<NMK_GPIO_ALT_CX_SHIFT) | NMK_GPIO_ALT_C)
+
+#define PRCM_GPIOCR_ALTCX(pin_num,\
+ altc1_used, altc1_ri, altc1_cb,\
+ altc2_used, altc2_ri, altc2_cb,\
+ altc3_used, altc3_ri, altc3_cb,\
+ altc4_used, altc4_ri, altc4_cb)\
+{\
+ .pin = pin_num,\
+ .altcx[PRCM_IDX_GPIOCR_ALTC1] = {\
+ .used = altc1_used,\
+ .reg_index = altc1_ri,\
+ .control_bit = altc1_cb\
+ },\
+ .altcx[PRCM_IDX_GPIOCR_ALTC2] = {\
+ .used = altc2_used,\
+ .reg_index = altc2_ri,\
+ .control_bit = altc2_cb\
+ },\
+ .altcx[PRCM_IDX_GPIOCR_ALTC3] = {\
+ .used = altc3_used,\
+ .reg_index = altc3_ri,\
+ .control_bit = altc3_cb\
+ },\
+ .altcx[PRCM_IDX_GPIOCR_ALTC4] = {\
+ .used = altc4_used,\
+ .reg_index = altc4_ri,\
+ .control_bit = altc4_cb\
+ },\
+}
+
+/**
+ * enum prcm_gpiocr_reg_index
+ * Used to reference an PRCM GPIOCR register address.
+ */
+enum prcm_gpiocr_reg_index {
+ PRCM_IDX_GPIOCR1,
+ PRCM_IDX_GPIOCR2,
+ PRCM_IDX_GPIOCR3
+};
+/**
+ * enum prcm_gpiocr_altcx_index
+ * Used to reference an Other alternate-C function.
+ */
+enum prcm_gpiocr_altcx_index {
+ PRCM_IDX_GPIOCR_ALTC1,
+ PRCM_IDX_GPIOCR_ALTC2,
+ PRCM_IDX_GPIOCR_ALTC3,
+ PRCM_IDX_GPIOCR_ALTC4,
+ PRCM_IDX_GPIOCR_ALTC_MAX,
+};
+
+/**
+ * struct prcm_gpio_altcx - Other alternate-C function
+ * @used: other alternate-C function availability
+ * @reg_index: PRCM GPIOCR register index used to control the function
+ * @control_bit: PRCM GPIOCR bit used to control the function
+ */
+struct prcm_gpiocr_altcx {
+ bool used:1;
+ u8 reg_index:2;
+ u8 control_bit:5;
+} __packed;
+
+/**
+ * struct prcm_gpio_altcx_pin_desc - Other alternate-C pin
+ * @pin: The pin number
+ * @altcx: array of other alternate-C[1-4] functions
+ */
+struct prcm_gpiocr_altcx_pin_desc {
+ unsigned short pin;
+ struct prcm_gpiocr_altcx altcx[PRCM_IDX_GPIOCR_ALTC_MAX];
+};
+
+/**
+ * struct nmk_function - Nomadik pinctrl mux function
+ * @name: The name of the function, exported to pinctrl core.
+ * @groups: An array of pin groups that may select this function.
+ * @ngroups: The number of entries in @groups.
+ */
+struct nmk_function {
+ const char *name;
+ const char * const *groups;
+ unsigned int ngroups;
+};
+
+/**
+ * struct nmk_pingroup - describes a Nomadik pin group
+ * @grp: Generic data of the pin group (name and pins)
+ * @altsetting: the altsetting to apply to all pins in this group to
+ * configure them to be used by a function
+ */
+struct nmk_pingroup {
+ struct pingroup grp;
+ int altsetting;
+};
+
+#define NMK_PIN_GROUP(a, b) \
+ { \
+ .grp = PINCTRL_PINGROUP(#a, a##_pins, ARRAY_SIZE(a##_pins)), \
+ .altsetting = b, \
+ }
+
+/**
+ * struct nmk_pinctrl_soc_data - Nomadik pin controller per-SoC configuration
+ * @pins: An array describing all pins the pin controller affects.
+ * All pins which are also GPIOs must be listed first within the
+ * array, and be numbered identically to the GPIO controller's
+ * numbering.
+ * @npins: The number of entries in @pins.
+ * @functions: The functions supported on this SoC.
+ * @nfunction: The number of entries in @functions.
+ * @groups: An array describing all pin groups the pin SoC supports.
+ * @ngroups: The number of entries in @groups.
+ * @altcx_pins: The pins that support Other alternate-C function on this SoC
+ * @npins_altcx: The number of Other alternate-C pins
+ * @prcm_gpiocr_registers: The array of PRCM GPIOCR registers on this SoC
+ */
+struct nmk_pinctrl_soc_data {
+ const struct pinctrl_pin_desc *pins;
+ unsigned int npins;
+ const struct nmk_function *functions;
+ unsigned int nfunctions;
+ const struct nmk_pingroup *groups;
+ unsigned int ngroups;
+ const struct prcm_gpiocr_altcx_pin_desc *altcx_pins;
+ unsigned int npins_altcx;
+ const u16 *prcm_gpiocr_registers;
+};
+
+#ifdef CONFIG_PINCTRL_STN8815
+
+void nmk_pinctrl_stn8815_init(const struct nmk_pinctrl_soc_data **soc);
+
+#else
+
+static inline void
+nmk_pinctrl_stn8815_init(const struct nmk_pinctrl_soc_data **soc)
+{
+}
+
+#endif
+
+#ifdef CONFIG_PINCTRL_DB8500
+
+void nmk_pinctrl_db8500_init(const struct nmk_pinctrl_soc_data **soc);
+
+#else
+
+static inline void
+nmk_pinctrl_db8500_init(const struct nmk_pinctrl_soc_data **soc)
+{
+}
+
+#endif
+
+#ifdef CONFIG_PINCTRL_DB8540
+
+void nmk_pinctrl_db8540_init(const struct nmk_pinctrl_soc_data **soc);
+
+#else
+
+static inline void
+nmk_pinctrl_db8540_init(const struct nmk_pinctrl_soc_data **soc)
+{
+}
+
+#endif
+
+struct platform_device;
+
+#ifdef CONFIG_DEBUG_FS
+
+/*
+ * Symbols declared in gpio-nomadik used by pinctrl-nomadik. If pinctrl-nomadik
+ * is enabled, then gpio-nomadik is enabled as well; the reverse if not always
+ * true.
+ */
+void nmk_gpio_dbg_show_one(struct seq_file *s, struct pinctrl_dev *pctldev,
+ struct gpio_chip *chip, unsigned int offset);
+
+#else
+
+static inline void nmk_gpio_dbg_show_one(struct seq_file *s,
+ struct pinctrl_dev *pctldev,
+ struct gpio_chip *chip,
+ unsigned int offset)
+{
+}
+
+#endif
+
+void __nmk_gpio_make_output(struct nmk_gpio_chip *nmk_chip,
+ unsigned int offset, int val);
+void __nmk_gpio_set_slpm(struct nmk_gpio_chip *nmk_chip, unsigned int offset,
+ enum nmk_gpio_slpm mode);
+struct nmk_gpio_chip *nmk_gpio_populate_chip(struct fwnode_handle *fwnode,
+ struct platform_device *pdev);
+
+/* Symbols declared in pinctrl-nomadik used by gpio-nomadik. */
+#ifdef CONFIG_PINCTRL_NOMADIK
+extern struct nmk_gpio_chip *nmk_gpio_chips[NMK_MAX_BANKS];
+extern spinlock_t nmk_gpio_slpm_lock;
+int __maybe_unused nmk_prcm_gpiocr_get_mode(struct pinctrl_dev *pctldev,
+ int gpio);
+#endif
+
+#endif /* __LINUX_GPIO_NOMADIK_H */
diff --git a/include/linux/gpio/gpio-reg.h b/include/linux/gpio/gpio-reg.h
index 39b888c40b39..3913b6660ed1 100644
--- a/include/linux/gpio/gpio-reg.h
+++ b/include/linux/gpio/gpio-reg.h
@@ -2,9 +2,13 @@
#ifndef GPIO_REG_H
#define GPIO_REG_H
+#include <linux/types.h>
+
struct device;
struct irq_domain;
+struct gpio_chip;
+
struct gpio_chip *gpio_reg_init(struct device *dev, void __iomem *reg,
int base, int num, const char *label, u32 direction, u32 def_out,
const char *const *names, struct irq_domain *irqdom, const int *irqs);
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h
index d755e529c1e3..44e5f162973e 100644
--- a/include/linux/gpio/machine.h
+++ b/include/linux/gpio/machine.h
@@ -3,7 +3,6 @@
#define __LINUX_GPIO_MACHINE_H
#include <linux/types.h>
-#include <linux/list.h>
enum gpio_lookup_flags {
GPIO_ACTIVE_HIGH = (0 << 0),
@@ -14,6 +13,7 @@ enum gpio_lookup_flags {
GPIO_TRANSITORY = (1 << 3),
GPIO_PULL_UP = (1 << 4),
GPIO_PULL_DOWN = (1 << 5),
+ GPIO_PULL_DISABLE = (1 << 6),
GPIO_LOOKUP_FLAGS_DEFAULT = GPIO_ACTIVE_HIGH | GPIO_PERSISTENT,
};
@@ -64,6 +64,18 @@ struct gpiod_hog {
};
/*
+ * Helper for lookup tables with just one single lookup for a device.
+ */
+#define GPIO_LOOKUP_SINGLE(_name, _dev_id, _key, _chip_hwnum, _con_id, _flags) \
+static struct gpiod_lookup_table _name = { \
+ .dev_id = _dev_id, \
+ .table = { \
+ GPIO_LOOKUP(_key, _chip_hwnum, _con_id, _flags), \
+ {}, \
+ }, \
+}
+
+/*
* Simple definition of a single GPIO under a con_id
*/
#define GPIO_LOOKUP(_key, _chip_hwnum, _con_id, _flags) \
@@ -100,6 +112,7 @@ void gpiod_add_lookup_table(struct gpiod_lookup_table *table);
void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n);
void gpiod_remove_lookup_table(struct gpiod_lookup_table *table);
void gpiod_add_hogs(struct gpiod_hog *hogs);
+void gpiod_remove_hogs(struct gpiod_hog *hogs);
#else /* ! CONFIG_GPIOLIB */
static inline
void gpiod_add_lookup_table(struct gpiod_lookup_table *table) {}
@@ -108,6 +121,7 @@ void gpiod_add_lookup_tables(struct gpiod_lookup_table **tables, size_t n) {}
static inline
void gpiod_remove_lookup_table(struct gpiod_lookup_table *table) {}
static inline void gpiod_add_hogs(struct gpiod_hog *hogs) {}
+static inline void gpiod_remove_hogs(struct gpiod_hog *hogs) {}
#endif /* CONFIG_GPIOLIB */
#endif /* __LINUX_GPIO_MACHINE_H */
diff --git a/include/linux/gpio/property.h b/include/linux/gpio/property.h
new file mode 100644
index 000000000000..0d2209308002
--- /dev/null
+++ b/include/linux/gpio/property.h
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+#ifndef __LINUX_GPIO_PROPERTY_H
+#define __LINUX_GPIO_PROPERTY_H
+
+#include <linux/property.h>
+
+struct software_node;
+
+#define PROPERTY_ENTRY_GPIO(_name_, _chip_node_, _idx_, _flags_) \
+ PROPERTY_ENTRY_REF(_name_, _chip_node_, _idx_, _flags_)
+
+extern const struct software_node swnode_gpio_undefined;
+
+#endif /* __LINUX_GPIO_PROPERTY_H */
diff --git a/include/linux/gpio/regmap.h b/include/linux/gpio/regmap.h
index 334dd928042b..12d154732ca9 100644
--- a/include/linux/gpio/regmap.h
+++ b/include/linux/gpio/regmap.h
@@ -6,6 +6,7 @@
struct device;
struct fwnode_handle;
struct gpio_regmap;
+struct gpio_chip;
struct irq_domain;
struct regmap;
@@ -21,7 +22,7 @@ struct regmap;
* If not given, the fwnode of the parent is used.
* @label: (Optional) Descriptive name for GPIO controller.
* If not given, the name of the device is used.
- * @ngpio: Number of GPIOs
+ * @ngpio: (Optional) Number of GPIOs
* @names: (Optional) Array of names for gpios
* @reg_dat_base: (Optional) (in) register base address
* @reg_set_base: (Optional) set register base address
@@ -30,13 +31,27 @@ struct regmap;
* @reg_dir_out_base: (Optional) out setting register base address
* @reg_stride: (Optional) May be set if the registers (of the
* same type, dat, set, etc) are not consecutive.
- * @ngpio_per_reg: Number of GPIOs per register
+ * @ngpio_per_reg: (Optional) Number of GPIOs per register
* @irq_domain: (Optional) IRQ domain if the controller is
* interrupt-capable
* @reg_mask_xlate: (Optional) Translates base address and GPIO
* offset to a register/bitmask pair. If not
* given the default gpio_regmap_simple_xlate()
* is used.
+ * @fixed_direction_output:
+ * (Optional) Bitmap representing the fixed direction of
+ * the GPIO lines. Useful when there are GPIO lines with a
+ * fixed direction mixed together in the same register.
+ * @drvdata: (Optional) Pointer to driver specific data which is
+ * not used by gpio-remap but is provided "as is" to the
+ * driver callback(s).
+ * @init_valid_mask: (Optional) Routine to initialize @valid_mask, to be used
+ * if not all GPIOs are valid.
+ * @regmap_irq_chip: (Optional) Pointer on an regmap_irq_chip structure. If
+ * set, a regmap-irq device will be created and the IRQ
+ * domain will be set accordingly.
+ * @regmap_irq_line: (Optional) The IRQ the device uses to signal interrupts.
+ * @regmap_irq_flags: (Optional) The IRQF_ flags to use for the interrupt.
*
* The ->reg_mask_xlate translates a given base address and GPIO offset to
* register and mask pair. The base address is one of the given register
@@ -74,17 +89,29 @@ struct gpio_regmap_config {
int reg_stride;
int ngpio_per_reg;
struct irq_domain *irq_domain;
+ unsigned long *fixed_direction_output;
+
+#ifdef CONFIG_REGMAP_IRQ
+ struct regmap_irq_chip *regmap_irq_chip;
+ int regmap_irq_line;
+ unsigned long regmap_irq_flags;
+#endif
int (*reg_mask_xlate)(struct gpio_regmap *gpio, unsigned int base,
unsigned int offset, unsigned int *reg,
unsigned int *mask);
+
+ int (*init_valid_mask)(struct gpio_chip *gc,
+ unsigned long *valid_mask,
+ unsigned int ngpios);
+
+ void *drvdata;
};
struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config);
void gpio_regmap_unregister(struct gpio_regmap *gpio);
struct gpio_regmap *devm_gpio_regmap_register(struct device *dev,
const struct gpio_regmap_config *config);
-void gpio_regmap_set_drvdata(struct gpio_regmap *gpio, void *data);
void *gpio_regmap_get_drvdata(struct gpio_regmap *gpio);
#endif /* _LINUX_GPIO_REGMAP_H */
diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h
index 3f84aeb81e48..80fa930b04c6 100644
--- a/include/linux/gpio_keys.h
+++ b/include/linux/gpio_keys.h
@@ -21,6 +21,7 @@ struct device;
* disable button via sysfs
* @value: axis value for %EV_ABS
* @irq: Irq number in case of interrupt keys
+ * @wakeirq: Optional dedicated wake-up interrupt
*/
struct gpio_keys_button {
unsigned int code;
@@ -34,6 +35,7 @@ struct gpio_keys_button {
bool can_disable;
int value;
unsigned int irq;
+ unsigned int wakeirq;
};
/**
diff --git a/include/linux/greybus.h b/include/linux/greybus.h
index 18c0fb958b74..4d58e27ceaf6 100644
--- a/include/linux/greybus.h
+++ b/include/linux/greybus.h
@@ -64,7 +64,7 @@ struct greybus_driver {
struct device_driver driver;
};
-#define to_greybus_driver(d) container_of(d, struct greybus_driver, driver)
+#define to_greybus_driver(d) container_of_const(d, struct greybus_driver, driver)
static inline void greybus_set_drvdata(struct gb_bundle *bundle, void *data)
{
@@ -104,44 +104,14 @@ void gb_debugfs_init(void);
void gb_debugfs_cleanup(void);
struct dentry *gb_debugfs_get(void);
-extern struct bus_type greybus_bus_type;
+extern const struct bus_type greybus_bus_type;
-extern struct device_type greybus_hd_type;
-extern struct device_type greybus_module_type;
-extern struct device_type greybus_interface_type;
-extern struct device_type greybus_control_type;
-extern struct device_type greybus_bundle_type;
-extern struct device_type greybus_svc_type;
-
-static inline int is_gb_host_device(const struct device *dev)
-{
- return dev->type == &greybus_hd_type;
-}
-
-static inline int is_gb_module(const struct device *dev)
-{
- return dev->type == &greybus_module_type;
-}
-
-static inline int is_gb_interface(const struct device *dev)
-{
- return dev->type == &greybus_interface_type;
-}
-
-static inline int is_gb_control(const struct device *dev)
-{
- return dev->type == &greybus_control_type;
-}
-
-static inline int is_gb_bundle(const struct device *dev)
-{
- return dev->type == &greybus_bundle_type;
-}
-
-static inline int is_gb_svc(const struct device *dev)
-{
- return dev->type == &greybus_svc_type;
-}
+extern const struct device_type greybus_hd_type;
+extern const struct device_type greybus_module_type;
+extern const struct device_type greybus_interface_type;
+extern const struct device_type greybus_control_type;
+extern const struct device_type greybus_bundle_type;
+extern const struct device_type greybus_svc_type;
static inline bool cport_id_valid(struct gb_host_device *hd, u16 cport_id)
{
diff --git a/include/linux/greybus/greybus_manifest.h b/include/linux/greybus/greybus_manifest.h
index 6e62fe478712..bef9eb2093e9 100644
--- a/include/linux/greybus/greybus_manifest.h
+++ b/include/linux/greybus/greybus_manifest.h
@@ -100,7 +100,7 @@ enum {
struct greybus_descriptor_string {
__u8 length;
__u8 id;
- __u8 string[0];
+ __u8 string[];
} __packed;
/*
@@ -175,7 +175,7 @@ struct greybus_manifest_header {
struct greybus_manifest {
struct greybus_manifest_header header;
- struct greybus_descriptor descriptors[0];
+ struct greybus_descriptor descriptors[];
} __packed;
#endif /* __GREYBUS_MANIFEST_H */
diff --git a/include/linux/greybus/greybus_protocols.h b/include/linux/greybus/greybus_protocols.h
index aeb8f9243545..820134b0105c 100644
--- a/include/linux/greybus/greybus_protocols.h
+++ b/include/linux/greybus/greybus_protocols.h
@@ -232,9 +232,7 @@ struct gb_fw_download_fetch_firmware_request {
__le32 size;
} __packed;
-struct gb_fw_download_fetch_firmware_response {
- __u8 data[0];
-} __packed;
+/* gb_fw_download_fetch_firmware_response contains no other data */
/* firmware download release firmware request */
struct gb_fw_download_release_firmware_request {
@@ -414,9 +412,7 @@ struct gb_bootrom_get_firmware_request {
__le32 size;
} __packed;
-struct gb_bootrom_get_firmware_response {
- __u8 data[0];
-} __packed;
+/* gb_bootrom_get_firmware_response contains no other data */
/* Bootrom protocol Ready to boot request */
struct gb_bootrom_ready_to_boot_request {
diff --git a/include/linux/greybus/hd.h b/include/linux/greybus/hd.h
index d3faf0c1a569..718e2857054e 100644
--- a/include/linux/greybus/hd.h
+++ b/include/linux/greybus/hd.h
@@ -58,7 +58,7 @@ struct gb_host_device {
struct gb_svc *svc;
/* Private data for the host driver */
- unsigned long hd_priv[0] __aligned(sizeof(s64));
+ unsigned long hd_priv[] __aligned(sizeof(s64));
};
#define to_gb_host_device(d) container_of(d, struct gb_host_device, dev)
diff --git a/include/linux/greybus/module.h b/include/linux/greybus/module.h
index 47b839af145d..3efe2133acfd 100644
--- a/include/linux/greybus/module.h
+++ b/include/linux/greybus/module.h
@@ -23,7 +23,7 @@ struct gb_module {
bool disconnected;
- struct gb_interface *interfaces[0];
+ struct gb_interface *interfaces[];
};
#define to_gb_module(d) container_of(d, struct gb_module, dev)
diff --git a/include/linux/greybus/svc.h b/include/linux/greybus/svc.h
index 5afaf5f06856..da547fb9071b 100644
--- a/include/linux/greybus/svc.h
+++ b/include/linux/greybus/svc.h
@@ -100,7 +100,4 @@ bool gb_svc_watchdog_enabled(struct gb_svc *svc);
int gb_svc_watchdog_enable(struct gb_svc *svc);
int gb_svc_watchdog_disable(struct gb_svc *svc);
-int gb_svc_protocol_init(void);
-void gb_svc_protocol_exit(void);
-
#endif /* __SVC_H */
diff --git a/include/linux/group_cpus.h b/include/linux/group_cpus.h
new file mode 100644
index 000000000000..9d4e5ab6c314
--- /dev/null
+++ b/include/linux/group_cpus.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2016 Thomas Gleixner.
+ * Copyright (C) 2016-2017 Christoph Hellwig.
+ */
+
+#ifndef __LINUX_GROUP_CPUS_H
+#define __LINUX_GROUP_CPUS_H
+#include <linux/kernel.h>
+#include <linux/cpu.h>
+
+struct cpumask *group_cpus_evenly(unsigned int numgrps, unsigned int *nummasks);
+
+#endif
diff --git a/include/linux/habanalabs/cpucp_if.h b/include/linux/habanalabs/cpucp_if.h
new file mode 100644
index 000000000000..45f181bcf890
--- /dev/null
+++ b/include/linux/habanalabs/cpucp_if.h
@@ -0,0 +1,1437 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2020-2023 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef CPUCP_IF_H
+#define CPUCP_IF_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+#include "hl_boot_if.h"
+
+#define NUM_HBM_PSEUDO_CH 2
+#define NUM_HBM_CH_PER_DEV 8
+#define CPUCP_PKT_HBM_ECC_INFO_WR_PAR_SHIFT 0
+#define CPUCP_PKT_HBM_ECC_INFO_WR_PAR_MASK 0x00000001
+#define CPUCP_PKT_HBM_ECC_INFO_RD_PAR_SHIFT 1
+#define CPUCP_PKT_HBM_ECC_INFO_RD_PAR_MASK 0x00000002
+#define CPUCP_PKT_HBM_ECC_INFO_CA_PAR_SHIFT 2
+#define CPUCP_PKT_HBM_ECC_INFO_CA_PAR_MASK 0x00000004
+#define CPUCP_PKT_HBM_ECC_INFO_DERR_SHIFT 3
+#define CPUCP_PKT_HBM_ECC_INFO_DERR_MASK 0x00000008
+#define CPUCP_PKT_HBM_ECC_INFO_SERR_SHIFT 4
+#define CPUCP_PKT_HBM_ECC_INFO_SERR_MASK 0x00000010
+#define CPUCP_PKT_HBM_ECC_INFO_TYPE_SHIFT 5
+#define CPUCP_PKT_HBM_ECC_INFO_TYPE_MASK 0x00000020
+#define CPUCP_PKT_HBM_ECC_INFO_HBM_CH_SHIFT 6
+#define CPUCP_PKT_HBM_ECC_INFO_HBM_CH_MASK 0x000007C0
+
+#define PLL_MAP_MAX_BITS 128
+#define PLL_MAP_LEN (PLL_MAP_MAX_BITS / 8)
+
+enum eq_event_id {
+ EQ_EVENT_NIC_STS_REQUEST = 0,
+ EQ_EVENT_PWR_MODE_0,
+ EQ_EVENT_PWR_MODE_1,
+ EQ_EVENT_PWR_MODE_2,
+ EQ_EVENT_PWR_MODE_3,
+ EQ_EVENT_PWR_BRK_ENTRY,
+ EQ_EVENT_PWR_BRK_EXIT,
+ EQ_EVENT_HEARTBEAT,
+ EQ_EVENT_CPLD_RESET_REASON,
+ EQ_EVENT_CPLD_SHUTDOWN,
+ EQ_EVENT_POWER_EVT_START,
+ EQ_EVENT_POWER_EVT_END,
+ EQ_EVENT_THERMAL_EVT_START,
+ EQ_EVENT_THERMAL_EVT_END,
+};
+
+/*
+ * info of the pkt queue pointers in the first async occurrence
+ */
+struct cpucp_pkt_sync_err {
+ __le32 pi;
+ __le32 ci;
+};
+
+struct hl_eq_hbm_ecc_data {
+ /* SERR counter */
+ __le32 sec_cnt;
+ /* DERR counter */
+ __le32 dec_cnt;
+ /* Supplemental Information according to the mask bits */
+ __le32 hbm_ecc_info;
+ /* Address in hbm where the ecc happened */
+ __le32 first_addr;
+ /* SERR continuous address counter */
+ __le32 sec_cont_cnt;
+ __le32 pad;
+};
+
+/*
+ * EVENT QUEUE
+ */
+
+struct hl_eq_header {
+ __le32 reserved;
+ __le32 ctl;
+};
+
+struct hl_eq_ecc_data {
+ __le64 ecc_address;
+ __le64 ecc_syndrom;
+ __u8 memory_wrapper_idx;
+ __u8 is_critical;
+ __le16 block_id;
+ __u8 pad[4];
+};
+
+enum hl_sm_sei_cause {
+ SM_SEI_SO_OVERFLOW,
+ SM_SEI_LBW_4B_UNALIGNED,
+ SM_SEI_AXI_RESPONSE_ERR
+};
+
+struct hl_eq_sm_sei_data {
+ __le32 sei_log;
+ /* enum hl_sm_sei_cause */
+ __u8 sei_cause;
+ __u8 pad[3];
+};
+
+enum hl_fw_alive_severity {
+ FW_ALIVE_SEVERITY_MINOR,
+ FW_ALIVE_SEVERITY_CRITICAL
+};
+
+struct hl_eq_fw_alive {
+ __le64 uptime_seconds;
+ __le32 process_id;
+ __le32 thread_id;
+ /* enum hl_fw_alive_severity */
+ __u8 severity;
+ __u8 pad[7];
+};
+
+struct hl_eq_intr_cause {
+ __le64 intr_cause_data;
+};
+
+struct hl_eq_pcie_drain_ind_data {
+ struct hl_eq_intr_cause intr_cause;
+ __le64 drain_wr_addr_lbw;
+ __le64 drain_rd_addr_lbw;
+ __le64 drain_wr_addr_hbw;
+ __le64 drain_rd_addr_hbw;
+};
+
+struct hl_eq_razwi_lbw_info_regs {
+ __le32 rr_aw_razwi_reg;
+ __le32 rr_aw_razwi_id_reg;
+ __le32 rr_ar_razwi_reg;
+ __le32 rr_ar_razwi_id_reg;
+};
+
+struct hl_eq_razwi_hbw_info_regs {
+ __le32 rr_aw_razwi_hi_reg;
+ __le32 rr_aw_razwi_lo_reg;
+ __le32 rr_aw_razwi_id_reg;
+ __le32 rr_ar_razwi_hi_reg;
+ __le32 rr_ar_razwi_lo_reg;
+ __le32 rr_ar_razwi_id_reg;
+};
+
+/* razwi_happened masks */
+#define RAZWI_HAPPENED_HBW 0x1
+#define RAZWI_HAPPENED_LBW 0x2
+#define RAZWI_HAPPENED_AW 0x4
+#define RAZWI_HAPPENED_AR 0x8
+
+struct hl_eq_razwi_info {
+ __le32 razwi_happened_mask;
+ union {
+ struct hl_eq_razwi_lbw_info_regs lbw;
+ struct hl_eq_razwi_hbw_info_regs hbw;
+ };
+ __le32 pad;
+};
+
+struct hl_eq_razwi_with_intr_cause {
+ struct hl_eq_razwi_info razwi_info;
+ struct hl_eq_intr_cause intr_cause;
+};
+
+#define HBM_CA_ERR_CMD_LIFO_LEN 8
+#define HBM_RD_ERR_DATA_LIFO_LEN 8
+#define HBM_WR_PAR_CMD_LIFO_LEN 11
+
+enum hl_hbm_sei_cause {
+ /* Command/address parity error event is split into 2 events due to
+ * size limitation: ODD suffix for odd HBM CK_t cycles and EVEN suffix
+ * for even HBM CK_t cycles
+ */
+ HBM_SEI_CMD_PARITY_EVEN,
+ HBM_SEI_CMD_PARITY_ODD,
+ /* Read errors can be reflected as a combination of SERR/DERR/parity
+ * errors. Therefore, we define one event for all read error types.
+ * LKD will perform further proccessing.
+ */
+ HBM_SEI_READ_ERR,
+ HBM_SEI_WRITE_DATA_PARITY_ERR,
+ HBM_SEI_CATTRIP,
+ HBM_SEI_MEM_BIST_FAIL,
+ HBM_SEI_DFI,
+ HBM_SEI_INV_TEMP_READ_OUT,
+ HBM_SEI_BIST_FAIL,
+};
+
+/* Masks for parsing hl_hbm_sei_headr fields */
+#define HBM_ECC_SERR_CNTR_MASK 0xFF
+#define HBM_ECC_DERR_CNTR_MASK 0xFF00
+#define HBM_RD_PARITY_CNTR_MASK 0xFF0000
+
+/* HBM index and MC index are known by the event_id */
+struct hl_hbm_sei_header {
+ union {
+ /* relevant only in case of HBM read error */
+ struct {
+ __u8 ecc_serr_cnt;
+ __u8 ecc_derr_cnt;
+ __u8 read_par_cnt;
+ __u8 reserved;
+ };
+ /* All other cases */
+ __le32 cnt;
+ };
+ __u8 sei_cause; /* enum hl_hbm_sei_cause */
+ __u8 mc_channel; /* range: 0-3 */
+ __u8 mc_pseudo_channel; /* range: 0-7 */
+ __u8 is_critical;
+};
+
+#define HBM_RD_ADDR_SID_SHIFT 0
+#define HBM_RD_ADDR_SID_MASK 0x1
+#define HBM_RD_ADDR_BG_SHIFT 1
+#define HBM_RD_ADDR_BG_MASK 0x6
+#define HBM_RD_ADDR_BA_SHIFT 3
+#define HBM_RD_ADDR_BA_MASK 0x18
+#define HBM_RD_ADDR_COL_SHIFT 5
+#define HBM_RD_ADDR_COL_MASK 0x7E0
+#define HBM_RD_ADDR_ROW_SHIFT 11
+#define HBM_RD_ADDR_ROW_MASK 0x3FFF800
+
+struct hbm_rd_addr {
+ union {
+ /* bit fields are only for FW use */
+ struct {
+ u32 dbg_rd_err_addr_sid:1;
+ u32 dbg_rd_err_addr_bg:2;
+ u32 dbg_rd_err_addr_ba:2;
+ u32 dbg_rd_err_addr_col:6;
+ u32 dbg_rd_err_addr_row:15;
+ u32 reserved:6;
+ };
+ __le32 rd_addr_val;
+ };
+};
+
+#define HBM_RD_ERR_BEAT_SHIFT 2
+/* dbg_rd_err_misc fields: */
+/* Read parity is calculated per DW on every beat */
+#define HBM_RD_ERR_PAR_ERR_BEAT0_SHIFT 0
+#define HBM_RD_ERR_PAR_ERR_BEAT0_MASK 0x3
+#define HBM_RD_ERR_PAR_DATA_BEAT0_SHIFT 8
+#define HBM_RD_ERR_PAR_DATA_BEAT0_MASK 0x300
+/* ECC is calculated per PC on every beat */
+#define HBM_RD_ERR_SERR_BEAT0_SHIFT 16
+#define HBM_RD_ERR_SERR_BEAT0_MASK 0x10000
+#define HBM_RD_ERR_DERR_BEAT0_SHIFT 24
+#define HBM_RD_ERR_DERR_BEAT0_MASK 0x100000
+
+struct hl_eq_hbm_sei_read_err_intr_info {
+ /* DFI_RD_ERR_REP_ADDR */
+ struct hbm_rd_addr dbg_rd_err_addr;
+ /* DFI_RD_ERR_REP_ERR */
+ union {
+ struct {
+ /* bit fields are only for FW use */
+ u32 dbg_rd_err_par:8;
+ u32 dbg_rd_err_par_data:8;
+ u32 dbg_rd_err_serr:4;
+ u32 dbg_rd_err_derr:4;
+ u32 reserved:8;
+ };
+ __le32 dbg_rd_err_misc;
+ };
+ /* DFI_RD_ERR_REP_DM */
+ __le32 dbg_rd_err_dm;
+ /* DFI_RD_ERR_REP_SYNDROME */
+ __le32 dbg_rd_err_syndrome;
+ /* DFI_RD_ERR_REP_DATA */
+ __le32 dbg_rd_err_data[HBM_RD_ERR_DATA_LIFO_LEN];
+};
+
+struct hl_eq_hbm_sei_ca_par_intr_info {
+ /* 14 LSBs */
+ __le16 dbg_row[HBM_CA_ERR_CMD_LIFO_LEN];
+ /* 18 LSBs */
+ __le32 dbg_col[HBM_CA_ERR_CMD_LIFO_LEN];
+};
+
+#define WR_PAR_LAST_CMD_COL_SHIFT 0
+#define WR_PAR_LAST_CMD_COL_MASK 0x3F
+#define WR_PAR_LAST_CMD_BG_SHIFT 6
+#define WR_PAR_LAST_CMD_BG_MASK 0xC0
+#define WR_PAR_LAST_CMD_BA_SHIFT 8
+#define WR_PAR_LAST_CMD_BA_MASK 0x300
+#define WR_PAR_LAST_CMD_SID_SHIFT 10
+#define WR_PAR_LAST_CMD_SID_MASK 0x400
+
+/* Row address isn't latched */
+struct hbm_sei_wr_cmd_address {
+ /* DFI_DERR_LAST_CMD */
+ union {
+ struct {
+ /* bit fields are only for FW use */
+ u32 col:6;
+ u32 bg:2;
+ u32 ba:2;
+ u32 sid:1;
+ u32 reserved:21;
+ };
+ __le32 dbg_wr_cmd_addr;
+ };
+};
+
+struct hl_eq_hbm_sei_wr_par_intr_info {
+ /* entry 0: WR command address from the 1st cycle prior to the error
+ * entry 1: WR command address from the 2nd cycle prior to the error
+ * and so on...
+ */
+ struct hbm_sei_wr_cmd_address dbg_last_wr_cmds[HBM_WR_PAR_CMD_LIFO_LEN];
+ /* derr[0:1] - 1st HBM cycle DERR output
+ * derr[2:3] - 2nd HBM cycle DERR output
+ */
+ __u8 dbg_derr;
+ /* extend to reach 8B */
+ __u8 pad[3];
+};
+
+/*
+ * this struct represents the following sei causes:
+ * command parity, ECC double error, ECC single error, dfi error, cattrip,
+ * temperature read-out, read parity error and write parity error.
+ * some only use the header while some have extra data.
+ */
+struct hl_eq_hbm_sei_data {
+ struct hl_hbm_sei_header hdr;
+ union {
+ struct hl_eq_hbm_sei_ca_par_intr_info ca_parity_even_info;
+ struct hl_eq_hbm_sei_ca_par_intr_info ca_parity_odd_info;
+ struct hl_eq_hbm_sei_read_err_intr_info read_err_info;
+ struct hl_eq_hbm_sei_wr_par_intr_info wr_parity_info;
+ };
+};
+
+/* Engine/farm arc interrupt type */
+enum hl_engine_arc_interrupt_type {
+ /* Qman/farm ARC DCCM QUEUE FULL interrupt type */
+ ENGINE_ARC_DCCM_QUEUE_FULL_IRQ = 1
+};
+
+/* Data structure specifies details of payload of DCCM QUEUE FULL interrupt */
+struct hl_engine_arc_dccm_queue_full_irq {
+ /* Queue index value which caused DCCM QUEUE FULL */
+ __le32 queue_index;
+ __le32 pad;
+};
+
+/* Data structure specifies details of QM/FARM ARC interrupt */
+struct hl_eq_engine_arc_intr_data {
+ /* ARC engine id e.g. DCORE0_TPC0_QM_ARC, DCORE0_TCP1_QM_ARC */
+ __le32 engine_id;
+ __le32 intr_type; /* enum hl_engine_arc_interrupt_type */
+ /* More info related to the interrupt e.g. queue index
+ * incase of DCCM_QUEUE_FULL interrupt.
+ */
+ __le64 payload;
+ __le64 pad[5];
+};
+
+#define ADDR_DEC_ADDRESS_COUNT_MAX 4
+
+/* Data structure specifies details of ADDR_DEC interrupt */
+struct hl_eq_addr_dec_intr_data {
+ struct hl_eq_intr_cause intr_cause;
+ __le64 addr[ADDR_DEC_ADDRESS_COUNT_MAX];
+ __u8 addr_cnt;
+ __u8 pad[7];
+};
+
+struct hl_eq_entry {
+ struct hl_eq_header hdr;
+ union {
+ __le64 data_placeholder;
+ struct hl_eq_ecc_data ecc_data;
+ struct hl_eq_hbm_ecc_data hbm_ecc_data; /* Obsolete */
+ struct hl_eq_sm_sei_data sm_sei_data;
+ struct cpucp_pkt_sync_err pkt_sync_err;
+ struct hl_eq_fw_alive fw_alive;
+ struct hl_eq_intr_cause intr_cause;
+ struct hl_eq_pcie_drain_ind_data pcie_drain_ind_data;
+ struct hl_eq_razwi_info razwi_info;
+ struct hl_eq_razwi_with_intr_cause razwi_with_intr_cause;
+ struct hl_eq_hbm_sei_data sei_data; /* Gaudi2 HBM */
+ struct hl_eq_engine_arc_intr_data arc_data;
+ struct hl_eq_addr_dec_intr_data addr_dec;
+ __le64 data[7];
+ };
+};
+
+#define HL_EQ_ENTRY_SIZE sizeof(struct hl_eq_entry)
+
+#define EQ_CTL_READY_SHIFT 31
+#define EQ_CTL_READY_MASK 0x80000000
+
+#define EQ_CTL_EVENT_MODE_SHIFT 28
+#define EQ_CTL_EVENT_MODE_MASK 0x70000000
+
+#define EQ_CTL_EVENT_TYPE_SHIFT 16
+#define EQ_CTL_EVENT_TYPE_MASK 0x0FFF0000
+
+#define EQ_CTL_INDEX_SHIFT 0
+#define EQ_CTL_INDEX_MASK 0x0000FFFF
+
+enum pq_init_status {
+ PQ_INIT_STATUS_NA = 0,
+ PQ_INIT_STATUS_READY_FOR_CP,
+ PQ_INIT_STATUS_READY_FOR_HOST,
+ PQ_INIT_STATUS_READY_FOR_CP_SINGLE_MSI,
+ PQ_INIT_STATUS_LEN_NOT_POWER_OF_TWO_ERR,
+ PQ_INIT_STATUS_ILLEGAL_Q_ADDR_ERR
+};
+
+/*
+ * CpuCP Primary Queue Packets
+ *
+ * During normal operation, the host's kernel driver needs to send various
+ * messages to CpuCP, usually either to SET some value into a H/W periphery or
+ * to GET the current value of some H/W periphery. For example, SET the
+ * frequency of MME/TPC and GET the value of the thermal sensor.
+ *
+ * These messages can be initiated either by the User application or by the
+ * host's driver itself, e.g. power management code. In either case, the
+ * communication from the host's driver to CpuCP will *always* be in
+ * synchronous mode, meaning that the host will send a single message and poll
+ * until the message was acknowledged and the results are ready (if results are
+ * needed).
+ *
+ * This means that only a single message can be sent at a time and the host's
+ * driver must wait for its result before sending the next message. Having said
+ * that, because these are control messages which are sent in a relatively low
+ * frequency, this limitation seems acceptable. It's important to note that
+ * in case of multiple devices, messages to different devices *can* be sent
+ * at the same time.
+ *
+ * The message, inputs/outputs (if relevant) and fence object will be located
+ * on the device DDR at an address that will be determined by the host's driver.
+ * During device initialization phase, the host will pass to CpuCP that address.
+ * Most of the message types will contain inputs/outputs inside the message
+ * itself. The common part of each message will contain the opcode of the
+ * message (its type) and a field representing a fence object.
+ *
+ * When the host's driver wishes to send a message to CPU CP, it will write the
+ * message contents to the device DDR, clear the fence object and then write to
+ * the PSOC_ARC1_AUX_SW_INTR, to issue interrupt 121 to ARC Management CPU.
+ *
+ * Upon receiving the interrupt (#121), CpuCP will read the message from the
+ * DDR. In case the message is a SET operation, CpuCP will first perform the
+ * operation and then write to the fence object on the device DDR. In case the
+ * message is a GET operation, CpuCP will first fill the results section on the
+ * device DDR and then write to the fence object. If an error occurred, CpuCP
+ * will fill the rc field with the right error code.
+ *
+ * In the meantime, the host's driver will poll on the fence object. Once the
+ * host sees that the fence object is signaled, it will read the results from
+ * the device DDR (if relevant) and resume the code execution in the host's
+ * driver.
+ *
+ * To use QMAN packets, the opcode must be the QMAN opcode, shifted by 8
+ * so the value being put by the host's driver matches the value read by CpuCP
+ *
+ * Non-QMAN packets should be limited to values 1 through (2^8 - 1)
+ *
+ * Detailed description:
+ *
+ * CPUCP_PACKET_DISABLE_PCI_ACCESS -
+ * After receiving this packet the embedded CPU must NOT issue PCI
+ * transactions (read/write) towards the Host CPU. This also include
+ * sending MSI-X interrupts.
+ * This packet is usually sent before the device is moved to D3Hot state.
+ *
+ * CPUCP_PACKET_ENABLE_PCI_ACCESS -
+ * After receiving this packet the embedded CPU is allowed to issue PCI
+ * transactions towards the Host CPU, including sending MSI-X interrupts.
+ * This packet is usually send after the device is moved to D0 state.
+ *
+ * CPUCP_PACKET_TEMPERATURE_GET -
+ * Fetch the current temperature / Max / Max Hyst / Critical /
+ * Critical Hyst of a specified thermal sensor. The packet's
+ * arguments specify the desired sensor and the field to get.
+ *
+ * CPUCP_PACKET_VOLTAGE_GET -
+ * Fetch the voltage / Max / Min of a specified sensor. The packet's
+ * arguments specify the sensor and type.
+ *
+ * CPUCP_PACKET_CURRENT_GET -
+ * Fetch the current / Max / Min of a specified sensor. The packet's
+ * arguments specify the sensor and type.
+ *
+ * CPUCP_PACKET_FAN_SPEED_GET -
+ * Fetch the speed / Max / Min of a specified fan. The packet's
+ * arguments specify the sensor and type.
+ *
+ * CPUCP_PACKET_PWM_GET -
+ * Fetch the pwm value / mode of a specified pwm. The packet's
+ * arguments specify the sensor and type.
+ *
+ * CPUCP_PACKET_PWM_SET -
+ * Set the pwm value / mode of a specified pwm. The packet's
+ * arguments specify the sensor, type and value.
+ *
+ * CPUCP_PACKET_FREQUENCY_SET -
+ * Set the frequency of a specified PLL. The packet's arguments specify
+ * the PLL and the desired frequency. The actual frequency in the device
+ * might differ from the requested frequency.
+ *
+ * CPUCP_PACKET_FREQUENCY_GET -
+ * Fetch the frequency of a specified PLL. The packet's arguments specify
+ * the PLL.
+ *
+ * CPUCP_PACKET_LED_SET -
+ * Set the state of a specified led. The packet's arguments
+ * specify the led and the desired state.
+ *
+ * CPUCP_PACKET_I2C_WR -
+ * Write 32-bit value to I2C device. The packet's arguments specify the
+ * I2C bus, address and value.
+ *
+ * CPUCP_PACKET_I2C_RD -
+ * Read 32-bit value from I2C device. The packet's arguments specify the
+ * I2C bus and address.
+ *
+ * CPUCP_PACKET_INFO_GET -
+ * Fetch information from the device as specified in the packet's
+ * structure. The host's driver passes the max size it allows the CpuCP to
+ * write to the structure, to prevent data corruption in case of
+ * mismatched driver/FW versions.
+ *
+ * CPUCP_PACKET_FLASH_PROGRAM_REMOVED - this packet was removed
+ *
+ * CPUCP_PACKET_UNMASK_RAZWI_IRQ -
+ * Unmask the given IRQ. The IRQ number is specified in the value field.
+ * The packet is sent after receiving an interrupt and printing its
+ * relevant information.
+ *
+ * CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY -
+ * Unmask the given IRQs. The IRQs numbers are specified in an array right
+ * after the cpucp_packet structure, where its first element is the array
+ * length. The packet is sent after a soft reset was done in order to
+ * handle any interrupts that were sent during the reset process.
+ *
+ * CPUCP_PACKET_TEST -
+ * Test packet for CpuCP connectivity. The CPU will put the fence value
+ * in the result field.
+ *
+ * CPUCP_PACKET_FREQUENCY_CURR_GET -
+ * Fetch the current frequency of a specified PLL. The packet's arguments
+ * specify the PLL.
+ *
+ * CPUCP_PACKET_MAX_POWER_GET -
+ * Fetch the maximal power of the device.
+ *
+ * CPUCP_PACKET_MAX_POWER_SET -
+ * Set the maximal power of the device. The packet's arguments specify
+ * the power.
+ *
+ * CPUCP_PACKET_EEPROM_DATA_GET -
+ * Get EEPROM data from the CpuCP kernel. The buffer is specified in the
+ * addr field. The CPU will put the returned data size in the result
+ * field. In addition, the host's driver passes the max size it allows the
+ * CpuCP to write to the structure, to prevent data corruption in case of
+ * mismatched driver/FW versions.
+ *
+ * CPUCP_PACKET_NIC_INFO_GET -
+ * Fetch information from the device regarding the NIC. the host's driver
+ * passes the max size it allows the CpuCP to write to the structure, to
+ * prevent data corruption in case of mismatched driver/FW versions.
+ *
+ * CPUCP_PACKET_TEMPERATURE_SET -
+ * Set the value of the offset property of a specified thermal sensor.
+ * The packet's arguments specify the desired sensor and the field to
+ * set.
+ *
+ * CPUCP_PACKET_VOLTAGE_SET -
+ * Trigger the reset_history property of a specified voltage sensor.
+ * The packet's arguments specify the desired sensor and the field to
+ * set.
+ *
+ * CPUCP_PACKET_CURRENT_SET -
+ * Trigger the reset_history property of a specified current sensor.
+ * The packet's arguments specify the desired sensor and the field to
+ * set.
+ *
+ * CPUCP_PACKET_PCIE_THROUGHPUT_GET -
+ * Get throughput of PCIe.
+ * The packet's arguments specify the transaction direction (TX/RX).
+ * The window measurement is 10[msec], and the return value is in KB/sec.
+ *
+ * CPUCP_PACKET_PCIE_REPLAY_CNT_GET
+ * Replay count measures number of "replay" events, which is basicly
+ * number of retries done by PCIe.
+ *
+ * CPUCP_PACKET_TOTAL_ENERGY_GET -
+ * Total Energy is measurement of energy from the time FW Linux
+ * is loaded. It is calculated by multiplying the average power
+ * by time (passed from armcp start). The units are in MilliJouls.
+ *
+ * CPUCP_PACKET_PLL_INFO_GET -
+ * Fetch frequencies of PLL from the required PLL IP.
+ * The packet's arguments specify the device PLL type
+ * Pll type is the PLL from device pll_index enum.
+ * The result is composed of 4 outputs, each is 16-bit
+ * frequency in MHz.
+ *
+ * CPUCP_PACKET_POWER_GET -
+ * Fetch the present power consumption of the device (Current * Voltage).
+ *
+ * CPUCP_PACKET_NIC_PFC_SET -
+ * Enable/Disable the NIC PFC feature. The packet's arguments specify the
+ * NIC port, relevant lanes to configure and one bit indication for
+ * enable/disable.
+ *
+ * CPUCP_PACKET_NIC_FAULT_GET -
+ * Fetch the current indication for local/remote faults from the NIC MAC.
+ * The result is 32-bit value of the relevant register.
+ *
+ * CPUCP_PACKET_NIC_LPBK_SET -
+ * Enable/Disable the MAC loopback feature. The packet's arguments specify
+ * the NIC port, relevant lanes to configure and one bit indication for
+ * enable/disable.
+ *
+ * CPUCP_PACKET_NIC_MAC_INIT -
+ * Configure the NIC MAC channels. The packet's arguments specify the
+ * NIC port and the speed.
+ *
+ * CPUCP_PACKET_MSI_INFO_SET -
+ * set the index number for each supported msi type going from
+ * host to device
+ *
+ * CPUCP_PACKET_NIC_XPCS91_REGS_GET -
+ * Fetch the un/correctable counters values from the NIC MAC.
+ *
+ * CPUCP_PACKET_NIC_STAT_REGS_GET -
+ * Fetch various NIC MAC counters from the NIC STAT.
+ *
+ * CPUCP_PACKET_NIC_STAT_REGS_CLR -
+ * Clear the various NIC MAC counters in the NIC STAT.
+ *
+ * CPUCP_PACKET_NIC_STAT_REGS_ALL_GET -
+ * Fetch all NIC MAC counters from the NIC STAT.
+ *
+ * CPUCP_PACKET_IS_IDLE_CHECK -
+ * Check if the device is IDLE in regard to the DMA/compute engines
+ * and QMANs. The f/w will return a bitmask where each bit represents
+ * a different engine or QMAN according to enum cpucp_idle_mask.
+ * The bit will be 1 if the engine is NOT idle.
+ *
+ * CPUCP_PACKET_HBM_REPLACED_ROWS_INFO_GET -
+ * Fetch all HBM replaced-rows and prending to be replaced rows data.
+ *
+ * CPUCP_PACKET_HBM_PENDING_ROWS_STATUS -
+ * Fetch status of HBM rows pending replacement and need a reboot to
+ * be replaced.
+ *
+ * CPUCP_PACKET_POWER_SET -
+ * Resets power history of device to 0
+ *
+ * CPUCP_PACKET_ENGINE_CORE_ASID_SET -
+ * Packet to perform engine core ASID configuration
+ *
+ * CPUCP_PACKET_SEC_ATTEST_GET -
+ * Get the attestaion data that is collected during various stages of the
+ * boot sequence. the attestation data is also hashed with some unique
+ * number (nonce) provided by the host to prevent replay attacks.
+ * public key and certificate also provided as part of the FW response.
+ *
+ * CPUCP_PACKET_INFO_SIGNED_GET -
+ * Get the device information signed by the Trusted Platform device.
+ * device info data is also hashed with some unique number (nonce) provided
+ * by the host to prevent replay attacks. public key and certificate also
+ * provided as part of the FW response.
+ *
+ * CPUCP_PACKET_MONITOR_DUMP_GET -
+ * Get monitors registers dump from the CpuCP kernel.
+ * The CPU will put the registers dump in the a buffer allocated by the driver
+ * which address is passed via the CpuCp packet. In addition, the host's driver
+ * passes the max size it allows the CpuCP to write to the structure, to prevent
+ * data corruption in case of mismatched driver/FW versions.
+ * Obsolete.
+ *
+ * CPUCP_PACKET_GENERIC_PASSTHROUGH -
+ * Generic opcode for all firmware info that is only passed to host
+ * through the LKD, without getting parsed there.
+ *
+ * CPUCP_PACKET_ACTIVE_STATUS_SET -
+ * LKD sends FW indication whether device is free or in use, this indication is reported
+ * also to the BMC.
+ *
+ * CPUCP_PACKET_SOFT_RESET -
+ * Packet to perform soft-reset.
+ *
+ * CPUCP_PACKET_INTS_REGISTER -
+ * Packet to inform FW that queues have been established and LKD is ready to receive
+ * EQ events.
+ */
+
+enum cpucp_packet_id {
+ CPUCP_PACKET_DISABLE_PCI_ACCESS = 1, /* internal */
+ CPUCP_PACKET_ENABLE_PCI_ACCESS, /* internal */
+ CPUCP_PACKET_TEMPERATURE_GET, /* sysfs */
+ CPUCP_PACKET_VOLTAGE_GET, /* sysfs */
+ CPUCP_PACKET_CURRENT_GET, /* sysfs */
+ CPUCP_PACKET_FAN_SPEED_GET, /* sysfs */
+ CPUCP_PACKET_PWM_GET, /* sysfs */
+ CPUCP_PACKET_PWM_SET, /* sysfs */
+ CPUCP_PACKET_FREQUENCY_SET, /* sysfs */
+ CPUCP_PACKET_FREQUENCY_GET, /* sysfs */
+ CPUCP_PACKET_LED_SET, /* debugfs */
+ CPUCP_PACKET_I2C_WR, /* debugfs */
+ CPUCP_PACKET_I2C_RD, /* debugfs */
+ CPUCP_PACKET_INFO_GET, /* IOCTL */
+ CPUCP_PACKET_FLASH_PROGRAM_REMOVED,
+ CPUCP_PACKET_UNMASK_RAZWI_IRQ, /* internal */
+ CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY, /* internal */
+ CPUCP_PACKET_TEST, /* internal */
+ CPUCP_PACKET_FREQUENCY_CURR_GET, /* sysfs */
+ CPUCP_PACKET_MAX_POWER_GET, /* sysfs */
+ CPUCP_PACKET_MAX_POWER_SET, /* sysfs */
+ CPUCP_PACKET_EEPROM_DATA_GET, /* sysfs */
+ CPUCP_PACKET_NIC_INFO_GET, /* internal */
+ CPUCP_PACKET_TEMPERATURE_SET, /* sysfs */
+ CPUCP_PACKET_VOLTAGE_SET, /* sysfs */
+ CPUCP_PACKET_CURRENT_SET, /* sysfs */
+ CPUCP_PACKET_PCIE_THROUGHPUT_GET, /* internal */
+ CPUCP_PACKET_PCIE_REPLAY_CNT_GET, /* internal */
+ CPUCP_PACKET_TOTAL_ENERGY_GET, /* internal */
+ CPUCP_PACKET_PLL_INFO_GET, /* internal */
+ CPUCP_PACKET_NIC_STATUS, /* internal */
+ CPUCP_PACKET_POWER_GET, /* internal */
+ CPUCP_PACKET_NIC_PFC_SET, /* internal */
+ CPUCP_PACKET_NIC_FAULT_GET, /* internal */
+ CPUCP_PACKET_NIC_LPBK_SET, /* internal */
+ CPUCP_PACKET_NIC_MAC_CFG, /* internal */
+ CPUCP_PACKET_MSI_INFO_SET, /* internal */
+ CPUCP_PACKET_NIC_XPCS91_REGS_GET, /* internal */
+ CPUCP_PACKET_NIC_STAT_REGS_GET, /* internal */
+ CPUCP_PACKET_NIC_STAT_REGS_CLR, /* internal */
+ CPUCP_PACKET_NIC_STAT_REGS_ALL_GET, /* internal */
+ CPUCP_PACKET_IS_IDLE_CHECK, /* internal */
+ CPUCP_PACKET_HBM_REPLACED_ROWS_INFO_GET,/* internal */
+ CPUCP_PACKET_HBM_PENDING_ROWS_STATUS, /* internal */
+ CPUCP_PACKET_POWER_SET, /* internal */
+ CPUCP_PACKET_RESERVED, /* not used */
+ CPUCP_PACKET_ENGINE_CORE_ASID_SET, /* internal */
+ CPUCP_PACKET_RESERVED2, /* not used */
+ CPUCP_PACKET_SEC_ATTEST_GET, /* internal */
+ CPUCP_PACKET_INFO_SIGNED_GET, /* internal */
+ CPUCP_PACKET_RESERVED4, /* not used */
+ CPUCP_PACKET_MONITOR_DUMP_GET, /* debugfs */
+ CPUCP_PACKET_RESERVED5, /* not used */
+ CPUCP_PACKET_RESERVED6, /* not used */
+ CPUCP_PACKET_RESERVED7, /* not used */
+ CPUCP_PACKET_GENERIC_PASSTHROUGH, /* IOCTL */
+ CPUCP_PACKET_RESERVED8, /* not used */
+ CPUCP_PACKET_ACTIVE_STATUS_SET, /* internal */
+ CPUCP_PACKET_RESERVED9, /* not used */
+ CPUCP_PACKET_RESERVED10, /* not used */
+ CPUCP_PACKET_RESERVED11, /* not used */
+ CPUCP_PACKET_RESERVED12, /* internal */
+ CPUCP_PACKET_RESERVED13, /* internal */
+ CPUCP_PACKET_SOFT_RESET, /* internal */
+ CPUCP_PACKET_INTS_REGISTER, /* internal */
+ CPUCP_PACKET_ID_MAX /* must be last */
+};
+
+#define CPUCP_PACKET_FENCE_VAL 0xFE8CE7A5
+
+#define CPUCP_PKT_CTL_RC_SHIFT 12
+#define CPUCP_PKT_CTL_RC_MASK 0x0000F000
+
+#define CPUCP_PKT_CTL_OPCODE_SHIFT 16
+#define CPUCP_PKT_CTL_OPCODE_MASK 0x1FFF0000
+
+#define CPUCP_PKT_RES_PLL_OUT0_SHIFT 0
+#define CPUCP_PKT_RES_PLL_OUT0_MASK 0x000000000000FFFFull
+#define CPUCP_PKT_RES_PLL_OUT1_SHIFT 16
+#define CPUCP_PKT_RES_PLL_OUT1_MASK 0x00000000FFFF0000ull
+#define CPUCP_PKT_RES_PLL_OUT2_SHIFT 32
+#define CPUCP_PKT_RES_PLL_OUT2_MASK 0x0000FFFF00000000ull
+#define CPUCP_PKT_RES_PLL_OUT3_SHIFT 48
+#define CPUCP_PKT_RES_PLL_OUT3_MASK 0xFFFF000000000000ull
+
+#define CPUCP_PKT_RES_EEPROM_OUT0_SHIFT 0
+#define CPUCP_PKT_RES_EEPROM_OUT0_MASK 0x000000000000FFFFull
+#define CPUCP_PKT_RES_EEPROM_OUT1_SHIFT 16
+#define CPUCP_PKT_RES_EEPROM_OUT1_MASK 0x0000000000FF0000ull
+
+#define CPUCP_PKT_VAL_PFC_IN1_SHIFT 0
+#define CPUCP_PKT_VAL_PFC_IN1_MASK 0x0000000000000001ull
+#define CPUCP_PKT_VAL_PFC_IN2_SHIFT 1
+#define CPUCP_PKT_VAL_PFC_IN2_MASK 0x000000000000001Eull
+
+#define CPUCP_PKT_VAL_LPBK_IN1_SHIFT 0
+#define CPUCP_PKT_VAL_LPBK_IN1_MASK 0x0000000000000001ull
+#define CPUCP_PKT_VAL_LPBK_IN2_SHIFT 1
+#define CPUCP_PKT_VAL_LPBK_IN2_MASK 0x000000000000001Eull
+
+#define CPUCP_PKT_VAL_MAC_CNT_IN1_SHIFT 0
+#define CPUCP_PKT_VAL_MAC_CNT_IN1_MASK 0x0000000000000001ull
+#define CPUCP_PKT_VAL_MAC_CNT_IN2_SHIFT 1
+#define CPUCP_PKT_VAL_MAC_CNT_IN2_MASK 0x00000000FFFFFFFEull
+
+/* heartbeat status bits */
+#define CPUCP_PKT_HB_STATUS_EQ_FAULT_SHIFT 0
+#define CPUCP_PKT_HB_STATUS_EQ_FAULT_MASK 0x00000001
+
+struct cpucp_packet {
+ union {
+ __le64 value; /* For SET packets */
+ __le64 result; /* For GET packets */
+ __le64 addr; /* For PQ */
+ };
+
+ __le32 ctl;
+
+ __le32 fence; /* Signal to host that message is completed */
+
+ union {
+ struct {/* For temperature/current/voltage/fan/pwm get/set */
+ __le16 sensor_index;
+ __le16 type;
+ };
+
+ struct { /* For I2C read/write */
+ __u8 i2c_bus;
+ __u8 i2c_addr;
+ __u8 i2c_reg;
+ /*
+ * In legacy implemetations, i2c_len was not present,
+ * was unused and just added as pad.
+ * So if i2c_len is 0, it is treated as legacy
+ * and r/w 1 Byte, else if i2c_len is specified,
+ * its treated as new multibyte r/w support.
+ */
+ __u8 i2c_len;
+ };
+
+ struct {/* For PLL info fetch */
+ __le16 pll_type;
+ /* TODO pll_reg is kept temporary before removal */
+ __le16 pll_reg;
+ };
+
+ /* For any general request */
+ __le32 index;
+
+ /* For frequency get/set */
+ __le32 pll_index;
+
+ /* For led set */
+ __le32 led_index;
+
+ /* For get CpuCP info/EEPROM data/NIC info */
+ __le32 data_max_size;
+
+ /*
+ * For any general status bitmask. Shall be used whenever the
+ * result cannot be used to hold general purpose data.
+ */
+ __le32 status_mask;
+ };
+
+ union {
+ /* For NIC requests */
+ __le32 port_index;
+
+ /* For Generic packet sub index */
+ __le32 pkt_subidx;
+
+ /* random, used once number, for security packets */
+ __le32 nonce;
+ };
+};
+
+struct cpucp_unmask_irq_arr_packet {
+ struct cpucp_packet cpucp_pkt;
+ __le32 length;
+ __le32 irqs[];
+};
+
+struct cpucp_nic_status_packet {
+ struct cpucp_packet cpucp_pkt;
+ __le32 length;
+ __le32 data[];
+};
+
+struct cpucp_array_data_packet {
+ struct cpucp_packet cpucp_pkt;
+ __le32 length;
+ __le32 data[];
+};
+
+enum cpucp_led_index {
+ CPUCP_LED0_INDEX = 0,
+ CPUCP_LED1_INDEX,
+ CPUCP_LED2_INDEX,
+ CPUCP_LED_MAX_INDEX = CPUCP_LED2_INDEX
+};
+
+/*
+ * enum cpucp_packet_rc - Error return code
+ * @cpucp_packet_success -> in case of success.
+ * @cpucp_packet_invalid -> this is to support first generation platforms.
+ * @cpucp_packet_fault -> in case of processing error like failing to
+ * get device binding or semaphore etc.
+ * @cpucp_packet_invalid_pkt -> when cpucp packet is un-supported.
+ * @cpucp_packet_invalid_params -> when checking parameter like length of buffer
+ * or attribute value etc.
+ * @cpucp_packet_rc_max -> It indicates size of enum so should be at last.
+ */
+enum cpucp_packet_rc {
+ cpucp_packet_success,
+ cpucp_packet_invalid,
+ cpucp_packet_fault,
+ cpucp_packet_invalid_pkt,
+ cpucp_packet_invalid_params,
+ cpucp_packet_rc_max
+};
+
+/*
+ * cpucp_temp_type should adhere to hwmon_temp_attributes
+ * defined in Linux kernel hwmon.h file
+ */
+enum cpucp_temp_type {
+ cpucp_temp_input,
+ cpucp_temp_min = 4,
+ cpucp_temp_min_hyst,
+ cpucp_temp_max = 6,
+ cpucp_temp_max_hyst,
+ cpucp_temp_crit,
+ cpucp_temp_crit_hyst,
+ cpucp_temp_offset = 19,
+ cpucp_temp_lowest = 21,
+ cpucp_temp_highest = 22,
+ cpucp_temp_reset_history = 23,
+ cpucp_temp_warn = 24,
+ cpucp_temp_max_crit = 25,
+ cpucp_temp_max_warn = 26,
+};
+
+enum cpucp_in_attributes {
+ cpucp_in_input,
+ cpucp_in_min,
+ cpucp_in_max,
+ cpucp_in_lowest = 6,
+ cpucp_in_highest = 7,
+ cpucp_in_reset_history,
+ cpucp_in_intr_alarm_a,
+ cpucp_in_intr_alarm_b,
+};
+
+enum cpucp_curr_attributes {
+ cpucp_curr_input,
+ cpucp_curr_min,
+ cpucp_curr_max,
+ cpucp_curr_lowest = 6,
+ cpucp_curr_highest = 7,
+ cpucp_curr_reset_history
+};
+
+enum cpucp_fan_attributes {
+ cpucp_fan_input,
+ cpucp_fan_min = 2,
+ cpucp_fan_max
+};
+
+enum cpucp_pwm_attributes {
+ cpucp_pwm_input,
+ cpucp_pwm_enable
+};
+
+enum cpucp_pcie_throughput_attributes {
+ cpucp_pcie_throughput_tx,
+ cpucp_pcie_throughput_rx
+};
+
+/* TODO temporary kept before removal */
+enum cpucp_pll_reg_attributes {
+ cpucp_pll_nr_reg,
+ cpucp_pll_nf_reg,
+ cpucp_pll_od_reg,
+ cpucp_pll_div_factor_reg,
+ cpucp_pll_div_sel_reg
+};
+
+/* TODO temporary kept before removal */
+enum cpucp_pll_type_attributes {
+ cpucp_pll_cpu,
+ cpucp_pll_pci,
+};
+
+/*
+ * cpucp_power_type aligns with hwmon_power_attributes
+ * defined in Linux kernel hwmon.h file
+ */
+enum cpucp_power_type {
+ CPUCP_POWER_INPUT = 8,
+ CPUCP_POWER_INPUT_HIGHEST = 9,
+ CPUCP_POWER_RESET_INPUT_HISTORY = 11
+};
+
+/*
+ * MSI type enumeration table for all ASICs and future SW versions.
+ * For future ASIC-LKD compatibility, we can only add new enumerations.
+ * at the end of the table (before CPUCP_NUM_OF_MSI_TYPES).
+ * Changing the order of entries or removing entries is not allowed.
+ */
+enum cpucp_msi_type {
+ CPUCP_EVENT_QUEUE_MSI_TYPE,
+ CPUCP_NIC_PORT1_MSI_TYPE,
+ CPUCP_NIC_PORT3_MSI_TYPE,
+ CPUCP_NIC_PORT5_MSI_TYPE,
+ CPUCP_NIC_PORT7_MSI_TYPE,
+ CPUCP_NIC_PORT9_MSI_TYPE,
+ CPUCP_EVENT_QUEUE_ERR_MSI_TYPE,
+ CPUCP_NUM_OF_MSI_TYPES
+};
+
+/*
+ * PLL enumeration table used for all ASICs and future SW versions.
+ * For future ASIC-LKD compatibility, we can only add new enumerations.
+ * at the end of the table.
+ * Changing the order of entries or removing entries is not allowed.
+ */
+enum pll_index {
+ CPU_PLL = 0,
+ PCI_PLL = 1,
+ NIC_PLL = 2,
+ DMA_PLL = 3,
+ MESH_PLL = 4,
+ MME_PLL = 5,
+ TPC_PLL = 6,
+ IF_PLL = 7,
+ SRAM_PLL = 8,
+ NS_PLL = 9,
+ HBM_PLL = 10,
+ MSS_PLL = 11,
+ DDR_PLL = 12,
+ VID_PLL = 13,
+ BANK_PLL = 14,
+ MMU_PLL = 15,
+ IC_PLL = 16,
+ MC_PLL = 17,
+ EMMC_PLL = 18,
+ D2D_PLL = 19,
+ CS_PLL = 20,
+ C2C_PLL = 21,
+ NCH_PLL = 22,
+ C2M_PLL = 23,
+ PLL_MAX
+};
+
+enum rl_index {
+ TPC_RL = 0,
+ MME_RL,
+ EDMA_RL,
+};
+
+enum pvt_index {
+ PVT_SW,
+ PVT_SE,
+ PVT_NW,
+ PVT_NE
+};
+
+/* Event Queue Packets */
+
+struct eq_generic_event {
+ __le64 data[7];
+};
+
+/*
+ * CpuCP info
+ */
+
+#define CARD_NAME_MAX_LEN 16
+#define CPUCP_MAX_SENSORS 128
+#define CPUCP_MAX_NICS 128
+#define CPUCP_LANES_PER_NIC 4
+#define CPUCP_NIC_QSFP_EEPROM_MAX_LEN 1024
+#define CPUCP_MAX_NIC_LANES (CPUCP_MAX_NICS * CPUCP_LANES_PER_NIC)
+#define CPUCP_NIC_MASK_ARR_LEN ((CPUCP_MAX_NICS + 63) / 64)
+#define CPUCP_NIC_POLARITY_ARR_LEN ((CPUCP_MAX_NIC_LANES + 63) / 64)
+#define CPUCP_HBM_ROW_REPLACE_MAX 32
+
+struct cpucp_sensor {
+ __le32 type;
+ __le32 flags;
+};
+
+/**
+ * struct cpucp_card_types - ASIC card type.
+ * @cpucp_card_type_pci: PCI card.
+ * @cpucp_card_type_pmc: PCI Mezzanine Card.
+ */
+enum cpucp_card_types {
+ cpucp_card_type_pci,
+ cpucp_card_type_pmc
+};
+
+#define CPUCP_SEC_CONF_ENABLED_SHIFT 0
+#define CPUCP_SEC_CONF_ENABLED_MASK 0x00000001
+
+#define CPUCP_SEC_CONF_FLASH_WP_SHIFT 1
+#define CPUCP_SEC_CONF_FLASH_WP_MASK 0x00000002
+
+#define CPUCP_SEC_CONF_EEPROM_WP_SHIFT 2
+#define CPUCP_SEC_CONF_EEPROM_WP_MASK 0x00000004
+
+/**
+ * struct cpucp_security_info - Security information.
+ * @config: configuration bit field
+ * @keys_num: number of stored keys
+ * @revoked_keys: revoked keys bit field
+ * @min_svn: minimal security version
+ */
+struct cpucp_security_info {
+ __u8 config;
+ __u8 keys_num;
+ __u8 revoked_keys;
+ __u8 min_svn;
+};
+
+/**
+ * struct cpucp_info - Info from CpuCP that is necessary to the host's driver
+ * @sensors: available sensors description.
+ * @kernel_version: CpuCP linux kernel version.
+ * @reserved: reserved field.
+ * @card_type: card configuration type.
+ * @card_location: in a server, each card has different connections topology
+ * depending on its location (relevant for PMC card type)
+ * @cpld_version: CPLD programmed F/W version.
+ * @infineon_version: Infineon main DC-DC version.
+ * @fuse_version: silicon production FUSE information.
+ * @thermal_version: thermald S/W version.
+ * @cpucp_version: CpuCP S/W version.
+ * @infineon_second_stage_version: Infineon 2nd stage DC-DC version.
+ * @dram_size: available DRAM size.
+ * @card_name: card name that will be displayed in HWMON subsystem on the host
+ * @tpc_binning_mask: TPC binning mask, 1 bit per TPC instance
+ * (0 = functional, 1 = binned)
+ * @decoder_binning_mask: Decoder binning mask, 1 bit per decoder instance
+ * (0 = functional, 1 = binned), maximum 1 per dcore
+ * @sram_binning: Categorize SRAM functionality
+ * (0 = fully functional, 1 = lower-half is not functional,
+ * 2 = upper-half is not functional)
+ * @sec_info: security information
+ * @cpld_timestamp: CPLD programmed F/W timestamp.
+ * @pll_map: Bit map of supported PLLs for current ASIC version.
+ * @mme_binning_mask: MME binning mask,
+ * bits [0:6] <==> dcore0 mme fma
+ * bits [7:13] <==> dcore1 mme fma
+ * bits [14:20] <==> dcore0 mme ima
+ * bits [21:27] <==> dcore1 mme ima
+ * For each group, if the 6th bit is set then first 5 bits
+ * represent the col's idx [0-31], otherwise these bits are
+ * ignored, and col idx 32 is binned. 7th bit is don't care.
+ * @dram_binning_mask: DRAM binning mask, 1 bit per dram instance
+ * (0 = functional 1 = binned)
+ * @memory_repair_flag: eFuse flag indicating memory repair
+ * @edma_binning_mask: EDMA binning mask, 1 bit per EDMA instance
+ * (0 = functional 1 = binned)
+ * @xbar_binning_mask: Xbar binning mask, 1 bit per Xbar instance
+ * (0 = functional 1 = binned)
+ * @interposer_version: Interposer version programmed in eFuse
+ * @substrate_version: Substrate version programmed in eFuse
+ * @eq_health_check_supported: eq health check feature supported in FW.
+ * @fw_hbm_region_size: Size in bytes of FW reserved region in HBM.
+ * @fw_os_version: Firmware OS Version
+ */
+struct cpucp_info {
+ struct cpucp_sensor sensors[CPUCP_MAX_SENSORS];
+ __u8 kernel_version[VERSION_MAX_LEN];
+ __le32 reserved1;
+ __le32 card_type;
+ __le32 card_location;
+ __le32 cpld_version;
+ __le32 infineon_version;
+ __u8 fuse_version[VERSION_MAX_LEN];
+ __u8 thermal_version[VERSION_MAX_LEN];
+ __u8 cpucp_version[VERSION_MAX_LEN];
+ __le32 infineon_second_stage_version;
+ __le64 dram_size;
+ char card_name[CARD_NAME_MAX_LEN];
+ __le64 tpc_binning_mask;
+ __le64 decoder_binning_mask;
+ __u8 sram_binning;
+ __u8 dram_binning_mask;
+ __u8 memory_repair_flag;
+ __u8 edma_binning_mask;
+ __u8 xbar_binning_mask;
+ __u8 interposer_version;
+ __u8 substrate_version;
+ __u8 eq_health_check_supported;
+ struct cpucp_security_info sec_info;
+ __le32 cpld_timestamp;
+ __u8 pll_map[PLL_MAP_LEN];
+ __le64 mme_binning_mask;
+ __u8 fw_os_version[VERSION_MAX_LEN];
+};
+
+struct cpucp_mac_addr {
+ __u8 mac_addr[ETH_ALEN];
+};
+
+enum cpucp_serdes_type {
+ TYPE_1_SERDES_TYPE,
+ TYPE_2_SERDES_TYPE,
+ HLS1_SERDES_TYPE,
+ HLS1H_SERDES_TYPE,
+ HLS2_SERDES_TYPE,
+ HLS2_TYPE_1_SERDES_TYPE,
+ MAX_NUM_SERDES_TYPE, /* number of types */
+ UNKNOWN_SERDES_TYPE = 0xFFFF /* serdes_type is u16 */
+};
+
+struct cpucp_nic_info {
+ struct cpucp_mac_addr mac_addrs[CPUCP_MAX_NICS];
+ __le64 link_mask[CPUCP_NIC_MASK_ARR_LEN];
+ __le64 pol_tx_mask[CPUCP_NIC_POLARITY_ARR_LEN];
+ __le64 pol_rx_mask[CPUCP_NIC_POLARITY_ARR_LEN];
+ __le64 link_ext_mask[CPUCP_NIC_MASK_ARR_LEN];
+ __u8 qsfp_eeprom[CPUCP_NIC_QSFP_EEPROM_MAX_LEN];
+ __le64 auto_neg_mask[CPUCP_NIC_MASK_ARR_LEN];
+ __le16 serdes_type; /* enum cpucp_serdes_type */
+ __le16 tx_swap_map[CPUCP_MAX_NICS];
+ __u8 reserved[6];
+};
+
+#define PAGE_DISCARD_MAX 64
+
+struct page_discard_info {
+ __u8 num_entries;
+ __u8 reserved[7];
+ __le32 mmu_page_idx[PAGE_DISCARD_MAX];
+};
+
+/*
+ * struct frac_val - fracture value represented by "integer.frac".
+ * @integer: the integer part of the fracture value;
+ * @frac: the fracture part of the fracture value.
+ */
+struct frac_val {
+ union {
+ struct {
+ __le16 integer;
+ __le16 frac;
+ };
+ __le32 val;
+ };
+};
+
+/*
+ * struct ser_val - the SER (symbol error rate) value is represented by "integer * 10 ^ -exp".
+ * @integer: the integer part of the SER value;
+ * @exp: the exponent part of the SER value.
+ */
+struct ser_val {
+ __le16 integer;
+ __le16 exp;
+};
+
+/*
+ * struct cpucp_nic_status - describes the status of a NIC port.
+ * @port: NIC port index.
+ * @bad_format_cnt: e.g. CRC.
+ * @responder_out_of_sequence_psn_cnt: e.g NAK.
+ * @high_ber_reinit_cnt: link reinit due to high BER.
+ * @correctable_err_cnt: e.g. bit-flip.
+ * @uncorrectable_err_cnt: e.g. MAC errors.
+ * @retraining_cnt: re-training counter.
+ * @up: is port up.
+ * @pcs_link: has PCS link.
+ * @phy_ready: is PHY ready.
+ * @auto_neg: is Autoneg enabled.
+ * @timeout_retransmission_cnt: timeout retransmission events.
+ * @high_ber_cnt: high ber events.
+ * @pre_fec_ser: pre FEC SER value.
+ * @post_fec_ser: post FEC SER value.
+ * @throughput: measured throughput.
+ * @latency: measured latency.
+ */
+struct cpucp_nic_status {
+ __le32 port;
+ __le32 bad_format_cnt;
+ __le32 responder_out_of_sequence_psn_cnt;
+ __le32 high_ber_reinit;
+ __le32 correctable_err_cnt;
+ __le32 uncorrectable_err_cnt;
+ __le32 retraining_cnt;
+ __u8 up;
+ __u8 pcs_link;
+ __u8 phy_ready;
+ __u8 auto_neg;
+ __le32 timeout_retransmission_cnt;
+ __le32 high_ber_cnt;
+ struct ser_val pre_fec_ser;
+ struct ser_val post_fec_ser;
+ struct frac_val bandwidth;
+ struct frac_val lat;
+};
+
+enum cpucp_hbm_row_replace_cause {
+ REPLACE_CAUSE_DOUBLE_ECC_ERR,
+ REPLACE_CAUSE_MULTI_SINGLE_ECC_ERR,
+};
+
+struct cpucp_hbm_row_info {
+ __u8 hbm_idx;
+ __u8 pc;
+ __u8 sid;
+ __u8 bank_idx;
+ __le16 row_addr;
+ __u8 replaced_row_cause; /* enum cpucp_hbm_row_replace_cause */
+ __u8 pad;
+};
+
+struct cpucp_hbm_row_replaced_rows_info {
+ __le16 num_replaced_rows;
+ __u8 pad[6];
+ struct cpucp_hbm_row_info replaced_rows[CPUCP_HBM_ROW_REPLACE_MAX];
+};
+
+enum cpu_reset_status {
+ CPU_RST_STATUS_NA = 0,
+ CPU_RST_STATUS_SOFT_RST_DONE = 1,
+};
+
+#define SEC_PCR_DATA_BUF_SZ 256
+#define SEC_PCR_QUOTE_BUF_SZ 510 /* (512 - 2) 2 bytes used for size */
+#define SEC_SIGNATURE_BUF_SZ 255 /* (256 - 1) 1 byte used for size */
+#define SEC_PUB_DATA_BUF_SZ 510 /* (512 - 2) 2 bytes used for size */
+#define SEC_CERTIFICATE_BUF_SZ 2046 /* (2048 - 2) 2 bytes used for size */
+
+/*
+ * struct cpucp_sec_attest_info - attestation report of the boot
+ * @pcr_data: raw values of the PCR registers
+ * @pcr_num_reg: number of PCR registers in the pcr_data array
+ * @pcr_reg_len: length of each PCR register in the pcr_data array (bytes)
+ * @nonce: number only used once. random number provided by host. this also
+ * passed to the quote command as a qualifying data.
+ * @pcr_quote_len: length of the attestation quote data (bytes)
+ * @pcr_quote: attestation report data structure
+ * @quote_sig_len: length of the attestation report signature (bytes)
+ * @quote_sig: signature structure of the attestation report
+ * @pub_data_len: length of the public data (bytes)
+ * @public_data: public key for the signed attestation
+ * (outPublic + name + qualifiedName)
+ * @certificate_len: length of the certificate (bytes)
+ * @certificate: certificate for the attestation signing key
+ */
+struct cpucp_sec_attest_info {
+ __u8 pcr_data[SEC_PCR_DATA_BUF_SZ];
+ __u8 pcr_num_reg;
+ __u8 pcr_reg_len;
+ __le16 pad0;
+ __le32 nonce;
+ __le16 pcr_quote_len;
+ __u8 pcr_quote[SEC_PCR_QUOTE_BUF_SZ];
+ __u8 quote_sig_len;
+ __u8 quote_sig[SEC_SIGNATURE_BUF_SZ];
+ __le16 pub_data_len;
+ __u8 public_data[SEC_PUB_DATA_BUF_SZ];
+ __le16 certificate_len;
+ __u8 certificate[SEC_CERTIFICATE_BUF_SZ];
+};
+
+/*
+ * struct cpucp_dev_info_signed - device information signed by a secured device
+ * @info: device information structure as defined above
+ * @nonce: number only used once. random number provided by host. this number is
+ * hashed and signed along with the device information.
+ * @info_sig_len: length of the attestation signature (bytes)
+ * @info_sig: signature of the info + nonce data.
+ * @pub_data_len: length of the public data (bytes)
+ * @public_data: public key info signed info data
+ * (outPublic + name + qualifiedName)
+ * @certificate_len: length of the certificate (bytes)
+ * @certificate: certificate for the signing key
+ */
+struct cpucp_dev_info_signed {
+ struct cpucp_info info; /* assumed to be 64bit aligned */
+ __le32 nonce;
+ __le32 pad0;
+ __u8 info_sig_len;
+ __u8 info_sig[SEC_SIGNATURE_BUF_SZ];
+ __le16 pub_data_len;
+ __u8 public_data[SEC_PUB_DATA_BUF_SZ];
+ __le16 certificate_len;
+ __u8 certificate[SEC_CERTIFICATE_BUF_SZ];
+};
+
+#define DCORE_MON_REGS_SZ 512
+/*
+ * struct dcore_monitor_regs_data - DCORE monitor regs data.
+ * the structure follows sync manager block layout. Obsolete.
+ * @mon_pay_addrl: array of payload address low bits.
+ * @mon_pay_addrh: array of payload address high bits.
+ * @mon_pay_data: array of payload data.
+ * @mon_arm: array of monitor arm.
+ * @mon_status: array of monitor status.
+ */
+struct dcore_monitor_regs_data {
+ __le32 mon_pay_addrl[DCORE_MON_REGS_SZ];
+ __le32 mon_pay_addrh[DCORE_MON_REGS_SZ];
+ __le32 mon_pay_data[DCORE_MON_REGS_SZ];
+ __le32 mon_arm[DCORE_MON_REGS_SZ];
+ __le32 mon_status[DCORE_MON_REGS_SZ];
+};
+
+/* contains SM data for each SYNC_MNGR (Obsolete) */
+struct cpucp_monitor_dump {
+ struct dcore_monitor_regs_data sync_mngr_w_s;
+ struct dcore_monitor_regs_data sync_mngr_e_s;
+ struct dcore_monitor_regs_data sync_mngr_w_n;
+ struct dcore_monitor_regs_data sync_mngr_e_n;
+};
+
+/*
+ * The Type of the generic request (and other input arguments) will be fetched from user by reading
+ * from "pkt_subidx" field in struct cpucp_packet.
+ *
+ * HL_PASSTHROUGHT_VERSIONS - Fetch all firmware versions.
+ * HL_GET_ERR_COUNTERS_CMD - Command to get error counters
+ * HL_GET_P_STATE - get performance state
+ */
+enum hl_passthrough_type {
+ HL_PASSTHROUGH_VERSIONS,
+ HL_GET_ERR_COUNTERS_CMD,
+ HL_GET_P_STATE,
+};
+
+#endif /* CPUCP_IF_H */
diff --git a/include/linux/habanalabs/hl_boot_if.h b/include/linux/habanalabs/hl_boot_if.h
new file mode 100644
index 000000000000..af5fb4ad77eb
--- /dev/null
+++ b/include/linux/habanalabs/hl_boot_if.h
@@ -0,0 +1,807 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2018-2023 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef HL_BOOT_IF_H
+#define HL_BOOT_IF_H
+
+#define LKD_HARD_RESET_MAGIC 0xED7BD694 /* deprecated - do not use */
+#define HL_POWER9_HOST_MAGIC 0x1DA30009
+
+#define BOOT_FIT_SRAM_OFFSET 0x200000
+
+#define VERSION_MAX_LEN 128
+
+enum cpu_boot_err {
+ CPU_BOOT_ERR_DRAM_INIT_FAIL = 0,
+ CPU_BOOT_ERR_FIT_CORRUPTED = 1,
+ CPU_BOOT_ERR_TS_INIT_FAIL = 2,
+ CPU_BOOT_ERR_DRAM_SKIPPED = 3,
+ CPU_BOOT_ERR_BMC_WAIT_SKIPPED = 4,
+ CPU_BOOT_ERR_NIC_DATA_NOT_RDY = 5,
+ CPU_BOOT_ERR_NIC_FW_FAIL = 6,
+ CPU_BOOT_ERR_SECURITY_NOT_RDY = 7,
+ CPU_BOOT_ERR_SECURITY_FAIL = 8,
+ CPU_BOOT_ERR_EFUSE_FAIL = 9,
+ CPU_BOOT_ERR_PRI_IMG_VER_FAIL = 10,
+ CPU_BOOT_ERR_SEC_IMG_VER_FAIL = 11,
+ CPU_BOOT_ERR_PLL_FAIL = 12,
+ CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL = 13,
+ CPU_BOOT_ERR_BOOT_FW_CRIT_ERR = 18,
+ CPU_BOOT_ERR_BINNING_FAIL = 19,
+ CPU_BOOT_ERR_TPM_FAIL = 20,
+ CPU_BOOT_ERR_TMP_THRESH_INIT_FAIL = 21,
+ CPU_BOOT_ERR_EEPROM_FAIL = 22,
+ CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL = 23,
+ CPU_BOOT_ERR_ENABLED = 31,
+ CPU_BOOT_ERR_SCND_EN = 63,
+ CPU_BOOT_ERR_LAST = 64 /* we have 2 registers of 32 bits */
+};
+
+/*
+ * Mask for fatal failures
+ * This mask contains all possible fatal failures, and a dynamic code
+ * will clear the non-relevant ones.
+ */
+#define CPU_BOOT_ERR_FATAL_MASK \
+ ((1 << CPU_BOOT_ERR_DRAM_INIT_FAIL) | \
+ (1 << CPU_BOOT_ERR_PLL_FAIL) | \
+ (1 << CPU_BOOT_ERR_BINNING_FAIL) | \
+ (1 << CPU_BOOT_ERR_DRAM_SKIPPED) | \
+ (1 << CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL) | \
+ (1 << CPU_BOOT_ERR_EEPROM_FAIL))
+
+/*
+ * CPU error bits in BOOT_ERROR registers
+ *
+ * CPU_BOOT_ERR0_DRAM_INIT_FAIL DRAM initialization failed.
+ * DRAM is not reliable to use.
+ *
+ * CPU_BOOT_ERR0_FIT_CORRUPTED FIT data integrity verification of the
+ * image provided by the host has failed.
+ *
+ * CPU_BOOT_ERR0_TS_INIT_FAIL Thermal Sensor initialization failed.
+ * Boot continues as usual, but keep in
+ * mind this is a warning.
+ *
+ * CPU_BOOT_ERR0_DRAM_SKIPPED DRAM initialization has been skipped.
+ * Skipping DRAM initialization has been
+ * requested (e.g. strap, command, etc.)
+ * and FW skipped the DRAM initialization.
+ * Host can initialize the DRAM.
+ *
+ * CPU_BOOT_ERR0_BMC_WAIT_SKIPPED Waiting for BMC data will be skipped.
+ * Meaning the BMC data might not be
+ * available until reset.
+ *
+ * CPU_BOOT_ERR0_NIC_DATA_NOT_RDY NIC data from BMC is not ready.
+ * BMC has not provided the NIC data yet.
+ * Once provided this bit will be cleared.
+ *
+ * CPU_BOOT_ERR0_NIC_FW_FAIL NIC FW loading failed.
+ * The NIC FW loading and initialization
+ * failed. This means NICs are not usable.
+ *
+ * CPU_BOOT_ERR0_SECURITY_NOT_RDY Chip security initialization has been
+ * started, but is not ready yet - chip
+ * cannot be accessed.
+ *
+ * CPU_BOOT_ERR0_SECURITY_FAIL Security related tasks have failed.
+ * The tasks are security init (root of
+ * trust), boot authentication (chain of
+ * trust), data packets authentication.
+ *
+ * CPU_BOOT_ERR0_EFUSE_FAIL Reading from eFuse failed.
+ * The PCI device ID might be wrong.
+ *
+ * CPU_BOOT_ERR0_PRI_IMG_VER_FAIL Verification of primary image failed.
+ * It mean that ppboot checksum
+ * verification for the preboot primary
+ * image has failed to match expected
+ * checksum. Trying to program image again
+ * might solve this.
+ *
+ * CPU_BOOT_ERR0_SEC_IMG_VER_FAIL Verification of secondary image failed.
+ * It mean that ppboot checksum
+ * verification for the preboot secondary
+ * image has failed to match expected
+ * checksum. Trying to program image again
+ * might solve this.
+ *
+ * CPU_BOOT_ERR0_PLL_FAIL PLL settings failed, meaning that one
+ * of the PLLs remains in REF_CLK
+ *
+ * CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL Device is unusable and customer support
+ * should be contacted.
+ *
+ * CPU_BOOT_ERR0_BOOT_FW_CRIT_ERR Critical error was detected during
+ * the execution of ppboot or preboot.
+ * for example: stack overflow.
+ *
+ * CPU_BOOT_ERR0_BINNING_FAIL Binning settings failed, meaning
+ * malfunctioning components might still be
+ * in use.
+ *
+ * CPU_BOOT_ERR0_TPM_FAIL TPM verification flow failed.
+ *
+ * CPU_BOOT_ERR0_TMP_THRESH_INIT_FAIL Failed to set threshold for tmperature
+ * sensor.
+ *
+ * CPU_BOOT_ERR_EEPROM_FAIL Failed reading EEPROM data. Defaults
+ * are used.
+ *
+ * CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL Failed scrubbing the Engines/ARCFarm
+ * memories. Boot disabled until reset.
+ *
+ * CPU_BOOT_ERR0_ENABLED Error registers enabled.
+ * This is a main indication that the
+ * running FW populates the error
+ * registers. Meaning the error bits are
+ * not garbage, but actual error statuses.
+ */
+#define CPU_BOOT_ERR0_DRAM_INIT_FAIL (1 << CPU_BOOT_ERR_DRAM_INIT_FAIL)
+#define CPU_BOOT_ERR0_FIT_CORRUPTED (1 << CPU_BOOT_ERR_FIT_CORRUPTED)
+#define CPU_BOOT_ERR0_TS_INIT_FAIL (1 << CPU_BOOT_ERR_TS_INIT_FAIL)
+#define CPU_BOOT_ERR0_DRAM_SKIPPED (1 << CPU_BOOT_ERR_DRAM_SKIPPED)
+#define CPU_BOOT_ERR0_BMC_WAIT_SKIPPED (1 << CPU_BOOT_ERR_BMC_WAIT_SKIPPED)
+#define CPU_BOOT_ERR0_NIC_DATA_NOT_RDY (1 << CPU_BOOT_ERR_NIC_DATA_NOT_RDY)
+#define CPU_BOOT_ERR0_NIC_FW_FAIL (1 << CPU_BOOT_ERR_NIC_FW_FAIL)
+#define CPU_BOOT_ERR0_SECURITY_NOT_RDY (1 << CPU_BOOT_ERR_SECURITY_NOT_RDY)
+#define CPU_BOOT_ERR0_SECURITY_FAIL (1 << CPU_BOOT_ERR_SECURITY_FAIL)
+#define CPU_BOOT_ERR0_EFUSE_FAIL (1 << CPU_BOOT_ERR_EFUSE_FAIL)
+#define CPU_BOOT_ERR0_PRI_IMG_VER_FAIL (1 << CPU_BOOT_ERR_PRI_IMG_VER_FAIL)
+#define CPU_BOOT_ERR0_SEC_IMG_VER_FAIL (1 << CPU_BOOT_ERR_SEC_IMG_VER_FAIL)
+#define CPU_BOOT_ERR0_PLL_FAIL (1 << CPU_BOOT_ERR_PLL_FAIL)
+#define CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL (1 << CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL)
+#define CPU_BOOT_ERR0_BOOT_FW_CRIT_ERR (1 << CPU_BOOT_ERR_BOOT_FW_CRIT_ERR)
+#define CPU_BOOT_ERR0_BINNING_FAIL (1 << CPU_BOOT_ERR_BINNING_FAIL)
+#define CPU_BOOT_ERR0_TPM_FAIL (1 << CPU_BOOT_ERR_TPM_FAIL)
+#define CPU_BOOT_ERR0_TMP_THRESH_INIT_FAIL (1 << CPU_BOOT_ERR_TMP_THRESH_INIT_FAIL)
+#define CPU_BOOT_ERR0_EEPROM_FAIL (1 << CPU_BOOT_ERR_EEPROM_FAIL)
+#define CPU_BOOT_ERR0_ENG_ARC_MEM_SCRUB_FAIL (1 << CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL)
+#define CPU_BOOT_ERR0_ENABLED (1 << CPU_BOOT_ERR_ENABLED)
+#define CPU_BOOT_ERR1_ENABLED (1 << CPU_BOOT_ERR_ENABLED)
+
+enum cpu_boot_dev_sts {
+ CPU_BOOT_DEV_STS_SECURITY_EN = 0,
+ CPU_BOOT_DEV_STS_DEBUG_EN = 1,
+ CPU_BOOT_DEV_STS_WATCHDOG_EN = 2,
+ CPU_BOOT_DEV_STS_DRAM_INIT_EN = 3,
+ CPU_BOOT_DEV_STS_BMC_WAIT_EN = 4,
+ CPU_BOOT_DEV_STS_E2E_CRED_EN = 5,
+ CPU_BOOT_DEV_STS_HBM_CRED_EN = 6,
+ CPU_BOOT_DEV_STS_RL_EN = 7,
+ CPU_BOOT_DEV_STS_SRAM_SCR_EN = 8,
+ CPU_BOOT_DEV_STS_DRAM_SCR_EN = 9,
+ CPU_BOOT_DEV_STS_FW_HARD_RST_EN = 10,
+ CPU_BOOT_DEV_STS_PLL_INFO_EN = 11,
+ CPU_BOOT_DEV_STS_SP_SRAM_EN = 12,
+ CPU_BOOT_DEV_STS_CLK_GATE_EN = 13,
+ CPU_BOOT_DEV_STS_HBM_ECC_EN = 14,
+ CPU_BOOT_DEV_STS_PKT_PI_ACK_EN = 15,
+ CPU_BOOT_DEV_STS_FW_LD_COM_EN = 16,
+ CPU_BOOT_DEV_STS_FW_IATU_CONF_EN = 17,
+ CPU_BOOT_DEV_STS_FW_NIC_MAC_EN = 18,
+ CPU_BOOT_DEV_STS_DYN_PLL_EN = 19,
+ CPU_BOOT_DEV_STS_GIC_PRIVILEGED_EN = 20,
+ CPU_BOOT_DEV_STS_EQ_INDEX_EN = 21,
+ CPU_BOOT_DEV_STS_MULTI_IRQ_POLL_EN = 22,
+ CPU_BOOT_DEV_STS_FW_NIC_STAT_XPCS91_EN = 23,
+ CPU_BOOT_DEV_STS_FW_NIC_STAT_EXT_EN = 24,
+ CPU_BOOT_DEV_STS_IS_IDLE_CHECK_EN = 25,
+ CPU_BOOT_DEV_STS_MAP_HWMON_EN = 26,
+ CPU_BOOT_DEV_STS_NIC_MEM_CLEAR_EN = 27,
+ CPU_BOOT_DEV_STS_MMU_PGTBL_DRAM_EN = 28,
+ CPU_BOOT_DEV_STS_ENABLED = 31,
+ CPU_BOOT_DEV_STS_SCND_EN = 63,
+ CPU_BOOT_DEV_STS_LAST = 64 /* we have 2 registers of 32 bits */
+};
+
+/*
+ * BOOT DEVICE STATUS bits in BOOT_DEVICE_STS registers
+ *
+ * CPU_BOOT_DEV_STS0_SECURITY_EN Security is Enabled.
+ * This is an indication for security
+ * enabled in FW, which means that
+ * all conditions for security are met:
+ * device is indicated as security enabled,
+ * registers are protected, and device
+ * uses keys for image verification.
+ * Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_DEBUG_EN Debug is enabled.
+ * Enabled when JTAG or DEBUG is enabled
+ * in FW.
+ * Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_WATCHDOG_EN Watchdog is enabled.
+ * Watchdog is enabled in FW.
+ * Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_DRAM_INIT_EN DRAM initialization is enabled.
+ * DRAM initialization has been done in FW.
+ * Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_BMC_WAIT_EN Waiting for BMC data enabled.
+ * If set, it means that during boot,
+ * FW waited for BMC data.
+ * Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_E2E_CRED_EN E2E credits initialized.
+ * FW initialized E2E credits.
+ * Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_HBM_CRED_EN HBM credits initialized.
+ * FW initialized HBM credits.
+ * Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_RL_EN Rate limiter initialized.
+ * FW initialized rate limiter.
+ * Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_SRAM_SCR_EN SRAM scrambler enabled.
+ * FW initialized SRAM scrambler.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_DRAM_SCR_EN DRAM scrambler enabled.
+ * FW initialized DRAM scrambler.
+ * Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_FW_HARD_RST_EN FW hard reset procedure is enabled.
+ * FW has the hard reset procedure
+ * implemented. This means that FW will
+ * perform hard reset procedure on
+ * receiving the halt-machine event.
+ * Initialized in: preboot, u-boot, linux
+ *
+ * CPU_BOOT_DEV_STS0_PLL_INFO_EN FW retrieval of PLL info is enabled.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_SP_SRAM_EN SP SRAM is initialized and available
+ * for use.
+ * Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_CLK_GATE_EN Clock Gating enabled.
+ * FW initialized Clock Gating.
+ * Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_HBM_ECC_EN HBM ECC handling Enabled.
+ * FW handles HBM ECC indications.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN Packets ack value used in the armcpd
+ * is set to the PI counter.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_FW_LD_COM_EN Flexible FW loading communication
+ * protocol is enabled.
+ * Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN FW iATU configuration is enabled.
+ * This bit if set, means the iATU has been
+ * configured and is ready for use.
+ * Initialized in: ppboot
+ *
+ * CPU_BOOT_DEV_STS0_FW_NIC_MAC_EN NIC MAC channels init is done by FW and
+ * any access to them is done via the FW.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_DYN_PLL_EN Dynamic PLL configuration is enabled.
+ * FW sends to host a bitmap of supported
+ * PLLs.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN GIC access permission only from
+ * privileged entity. FW sets this status
+ * bit for host. If this bit is set then
+ * GIC can not be accessed from host.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_EQ_INDEX_EN Event Queue (EQ) index is a running
+ * index for each new event sent to host.
+ * This is used as a method in host to
+ * identify that the waiting event in
+ * queue is actually a new event which
+ * was not served before.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN Use multiple scratchpad interfaces to
+ * prevent IRQs overriding each other.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_FW_NIC_STAT_XPCS91_EN
+ * NIC STAT and XPCS91 access is restricted
+ * and is done via FW only.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_FW_NIC_STAT_EXT_EN
+ * NIC STAT get all is supported.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_IS_IDLE_CHECK_EN
+ * F/W checks if the device is idle by reading defined set
+ * of registers. It returns a bitmask of all the engines,
+ * where a bit is set if the engine is not idle.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_MAP_HWMON_EN
+ * If set, means f/w supports proprietary
+ * HWMON enum mapping to cpucp enums.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_NIC_MEM_CLEAR_EN
+ * If set, means f/w supports nic hbm memory clear and
+ * tmr,txs hbm memory init.
+ * Initialized in: zephyr-mgmt
+ *
+ * CPU_BOOT_DEV_STS_MMU_PGTBL_DRAM_EN
+ * MMU page tables are located in DRAM.
+ * F/W initializes security settings for MMU
+ * page tables to reside in DRAM.
+ * Initialized in: zephyr-mgmt
+ *
+ * CPU_BOOT_DEV_STS0_ENABLED Device status register enabled.
+ * This is a main indication that the
+ * running FW populates the device status
+ * register. Meaning the device status
+ * bits are not garbage, but actual
+ * statuses.
+ * Initialized in: preboot
+ *
+ */
+#define CPU_BOOT_DEV_STS0_SECURITY_EN (1 << CPU_BOOT_DEV_STS_SECURITY_EN)
+#define CPU_BOOT_DEV_STS0_DEBUG_EN (1 << CPU_BOOT_DEV_STS_DEBUG_EN)
+#define CPU_BOOT_DEV_STS0_WATCHDOG_EN (1 << CPU_BOOT_DEV_STS_WATCHDOG_EN)
+#define CPU_BOOT_DEV_STS0_DRAM_INIT_EN (1 << CPU_BOOT_DEV_STS_DRAM_INIT_EN)
+#define CPU_BOOT_DEV_STS0_BMC_WAIT_EN (1 << CPU_BOOT_DEV_STS_BMC_WAIT_EN)
+#define CPU_BOOT_DEV_STS0_E2E_CRED_EN (1 << CPU_BOOT_DEV_STS_E2E_CRED_EN)
+#define CPU_BOOT_DEV_STS0_HBM_CRED_EN (1 << CPU_BOOT_DEV_STS_HBM_CRED_EN)
+#define CPU_BOOT_DEV_STS0_RL_EN (1 << CPU_BOOT_DEV_STS_RL_EN)
+#define CPU_BOOT_DEV_STS0_SRAM_SCR_EN (1 << CPU_BOOT_DEV_STS_SRAM_SCR_EN)
+#define CPU_BOOT_DEV_STS0_DRAM_SCR_EN (1 << CPU_BOOT_DEV_STS_DRAM_SCR_EN)
+#define CPU_BOOT_DEV_STS0_FW_HARD_RST_EN (1 << CPU_BOOT_DEV_STS_FW_HARD_RST_EN)
+#define CPU_BOOT_DEV_STS0_PLL_INFO_EN (1 << CPU_BOOT_DEV_STS_PLL_INFO_EN)
+#define CPU_BOOT_DEV_STS0_SP_SRAM_EN (1 << CPU_BOOT_DEV_STS_SP_SRAM_EN)
+#define CPU_BOOT_DEV_STS0_CLK_GATE_EN (1 << CPU_BOOT_DEV_STS_CLK_GATE_EN)
+#define CPU_BOOT_DEV_STS0_HBM_ECC_EN (1 << CPU_BOOT_DEV_STS_HBM_ECC_EN)
+#define CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN (1 << CPU_BOOT_DEV_STS_PKT_PI_ACK_EN)
+#define CPU_BOOT_DEV_STS0_FW_LD_COM_EN (1 << CPU_BOOT_DEV_STS_FW_LD_COM_EN)
+#define CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN (1 << CPU_BOOT_DEV_STS_FW_IATU_CONF_EN)
+#define CPU_BOOT_DEV_STS0_FW_NIC_MAC_EN (1 << CPU_BOOT_DEV_STS_FW_NIC_MAC_EN)
+#define CPU_BOOT_DEV_STS0_DYN_PLL_EN (1 << CPU_BOOT_DEV_STS_DYN_PLL_EN)
+#define CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN (1 << CPU_BOOT_DEV_STS_GIC_PRIVILEGED_EN)
+#define CPU_BOOT_DEV_STS0_EQ_INDEX_EN (1 << CPU_BOOT_DEV_STS_EQ_INDEX_EN)
+#define CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN (1 << CPU_BOOT_DEV_STS_MULTI_IRQ_POLL_EN)
+#define CPU_BOOT_DEV_STS0_FW_NIC_STAT_XPCS91_EN (1 << CPU_BOOT_DEV_STS_FW_NIC_STAT_XPCS91_EN)
+#define CPU_BOOT_DEV_STS0_FW_NIC_STAT_EXT_EN (1 << CPU_BOOT_DEV_STS_FW_NIC_STAT_EXT_EN)
+#define CPU_BOOT_DEV_STS0_IS_IDLE_CHECK_EN (1 << CPU_BOOT_DEV_STS_IS_IDLE_CHECK_EN)
+#define CPU_BOOT_DEV_STS0_MAP_HWMON_EN (1 << CPU_BOOT_DEV_STS_MAP_HWMON_EN)
+#define CPU_BOOT_DEV_STS0_NIC_MEM_CLEAR_EN (1 << CPU_BOOT_DEV_STS_NIC_MEM_CLEAR_EN)
+#define CPU_BOOT_DEV_STS0_MMU_PGTBL_DRAM_EN (1 << CPU_BOOT_DEV_STS_MMU_PGTBL_DRAM_EN)
+#define CPU_BOOT_DEV_STS0_ENABLED (1 << CPU_BOOT_DEV_STS_ENABLED)
+#define CPU_BOOT_DEV_STS1_ENABLED (1 << CPU_BOOT_DEV_STS_ENABLED)
+
+enum cpu_boot_status {
+ CPU_BOOT_STATUS_NA = 0, /* Default value after reset of chip */
+ CPU_BOOT_STATUS_IN_WFE = 1,
+ CPU_BOOT_STATUS_DRAM_RDY = 2,
+ CPU_BOOT_STATUS_SRAM_AVAIL = 3,
+ CPU_BOOT_STATUS_IN_BTL = 4, /* BTL is H/W FSM */
+ CPU_BOOT_STATUS_IN_PREBOOT = 5,
+ CPU_BOOT_STATUS_IN_SPL, /* deprecated - not reported */
+ CPU_BOOT_STATUS_IN_UBOOT = 7,
+ CPU_BOOT_STATUS_DRAM_INIT_FAIL, /* deprecated - will be removed */
+ CPU_BOOT_STATUS_FIT_CORRUPTED, /* deprecated - will be removed */
+ /* U-Boot console prompt activated, commands are not processed */
+ CPU_BOOT_STATUS_UBOOT_NOT_READY = 10,
+ /* Finished NICs init, reported after DRAM and NICs */
+ CPU_BOOT_STATUS_NIC_FW_RDY = 11,
+ CPU_BOOT_STATUS_TS_INIT_FAIL, /* deprecated - will be removed */
+ CPU_BOOT_STATUS_DRAM_SKIPPED, /* deprecated - will be removed */
+ CPU_BOOT_STATUS_BMC_WAITING_SKIPPED, /* deprecated - will be removed */
+ /* Last boot loader progress status, ready to receive commands */
+ CPU_BOOT_STATUS_READY_TO_BOOT = 15,
+ /* Internal Boot finished, ready for boot-fit */
+ CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT = 16,
+ /* Internal Security has been initialized, device can be accessed */
+ CPU_BOOT_STATUS_SECURITY_READY = 17,
+ /* FW component is preparing to shutdown and communication with host is not available */
+ CPU_BOOT_STATUS_FW_SHUTDOWN_PREP = 18,
+};
+
+enum kmd_msg {
+ KMD_MSG_NA = 0,
+ KMD_MSG_GOTO_WFE,
+ KMD_MSG_FIT_RDY,
+ KMD_MSG_SKIP_BMC,
+ RESERVED,
+ KMD_MSG_RST_DEV,
+ KMD_MSG_LAST
+};
+
+enum cpu_msg_status {
+ CPU_MSG_CLR = 0,
+ CPU_MSG_OK,
+ CPU_MSG_ERR,
+};
+
+/* communication registers mapping - consider ABI when changing */
+struct cpu_dyn_regs {
+ __le32 cpu_pq_base_addr_low;
+ __le32 cpu_pq_base_addr_high;
+ __le32 cpu_pq_length;
+ __le32 cpu_pq_init_status;
+ __le32 cpu_eq_base_addr_low;
+ __le32 cpu_eq_base_addr_high;
+ __le32 cpu_eq_length;
+ __le32 cpu_eq_ci;
+ __le32 cpu_cq_base_addr_low;
+ __le32 cpu_cq_base_addr_high;
+ __le32 cpu_cq_length;
+ __le32 cpu_pf_pq_pi;
+ __le32 cpu_boot_dev_sts0;
+ __le32 cpu_boot_dev_sts1;
+ __le32 cpu_boot_err0;
+ __le32 cpu_boot_err1;
+ __le32 cpu_boot_status;
+ __le32 fw_upd_sts;
+ __le32 fw_upd_cmd;
+ __le32 fw_upd_pending_sts;
+ __le32 fuse_ver_offset;
+ __le32 preboot_ver_offset;
+ __le32 uboot_ver_offset;
+ __le32 hw_state;
+ __le32 kmd_msg_to_cpu;
+ __le32 cpu_cmd_status_to_host;
+ __le32 gic_host_pi_upd_irq;
+ __le32 gic_tpc_qm_irq_ctrl;
+ __le32 gic_mme_qm_irq_ctrl;
+ __le32 gic_dma_qm_irq_ctrl;
+ __le32 gic_nic_qm_irq_ctrl;
+ __le32 gic_dma_core_irq_ctrl;
+ __le32 gic_host_halt_irq;
+ __le32 gic_host_ints_irq;
+ __le32 reserved0;
+ __le32 gic_rot_qm_irq_ctrl;
+ __le32 reserved1;
+ __le32 eng_arc_irq_ctrl;
+ __le32 reserved2[20]; /* reserve for future use */
+};
+
+/* TODO: remove the desc magic after the code is updated to use message */
+/* HCDM - Habana Communications Descriptor Magic */
+#define HL_COMMS_DESC_MAGIC 0x4843444D
+#define HL_COMMS_DESC_VER 3
+
+/* HCMv - Habana Communications Message + header version */
+#define HL_COMMS_MSG_MAGIC_VALUE 0x48434D00
+#define HL_COMMS_MSG_MAGIC_MASK 0xFFFFFF00
+#define HL_COMMS_MSG_MAGIC_VER_MASK 0xFF
+
+#define HL_COMMS_MSG_MAGIC_VER(ver) (HL_COMMS_MSG_MAGIC_VALUE | \
+ ((ver) & HL_COMMS_MSG_MAGIC_VER_MASK))
+#define HL_COMMS_MSG_MAGIC_V0 HL_COMMS_DESC_MAGIC
+#define HL_COMMS_MSG_MAGIC_V1 HL_COMMS_MSG_MAGIC_VER(1)
+#define HL_COMMS_MSG_MAGIC_V2 HL_COMMS_MSG_MAGIC_VER(2)
+#define HL_COMMS_MSG_MAGIC_V3 HL_COMMS_MSG_MAGIC_VER(3)
+
+#define HL_COMMS_MSG_MAGIC HL_COMMS_MSG_MAGIC_V3
+
+#define HL_COMMS_MSG_MAGIC_VALIDATE_MAGIC(magic) \
+ (((magic) & HL_COMMS_MSG_MAGIC_MASK) == \
+ HL_COMMS_MSG_MAGIC_VALUE)
+
+#define HL_COMMS_MSG_MAGIC_VALIDATE_VERSION(magic, ver) \
+ (((magic) & HL_COMMS_MSG_MAGIC_VER_MASK) >= \
+ ((ver) & HL_COMMS_MSG_MAGIC_VER_MASK))
+
+#define HL_COMMS_MSG_MAGIC_VALIDATE(magic, ver) \
+ (HL_COMMS_MSG_MAGIC_VALIDATE_MAGIC((magic)) && \
+ HL_COMMS_MSG_MAGIC_VALIDATE_VERSION((magic), (ver)))
+
+enum comms_msg_type {
+ HL_COMMS_DESC_TYPE = 0,
+ HL_COMMS_RESET_CAUSE_TYPE = 1,
+ HL_COMMS_FW_CFG_SKIP_TYPE = 2,
+ HL_COMMS_BINNING_CONF_TYPE = 3,
+};
+
+/*
+ * Binning information shared between LKD and FW
+ * @tpc_mask_l - TPC binning information lower 64 bit
+ * @dec_mask - Decoder binning information
+ * @dram_mask - DRAM binning information
+ * @edma_mask - EDMA binning information
+ * @mme_mask_l - MME binning information lower 32
+ * @mme_mask_h - MME binning information upper 32
+ * @rot_mask - Rotator binning information
+ * @xbar_mask - xBAR binning information
+ * @reserved - reserved field for future binning info w/o ABI change
+ * @tpc_mask_h - TPC binning information upper 64 bit
+ * @nic_mask - NIC binning information
+ */
+struct lkd_fw_binning_info {
+ __le64 tpc_mask_l;
+ __le32 dec_mask;
+ __le32 dram_mask;
+ __le32 edma_mask;
+ __le32 mme_mask_l;
+ __le32 mme_mask_h;
+ __le32 rot_mask;
+ __le32 xbar_mask;
+ __le32 reserved0;
+ __le64 tpc_mask_h;
+ __le64 nic_mask;
+ __le32 reserved1[8];
+};
+
+/* TODO: remove this struct after the code is updated to use message */
+/* this is the comms descriptor header - meta data */
+struct comms_desc_header {
+ __le32 magic; /* magic for validation */
+ __le32 crc32; /* CRC32 of the descriptor w/o header */
+ __le16 size; /* size of the descriptor w/o header */
+ __u8 version; /* descriptor version */
+ __u8 reserved[5]; /* pad to 64 bit */
+};
+
+/* this is the comms message header - meta data */
+struct comms_msg_header {
+ __le32 magic; /* magic for validation */
+ __le32 crc32; /* CRC32 of the message w/o header */
+ __le16 size; /* size of the message w/o header */
+ __u8 version; /* message payload version */
+ __u8 type; /* message type */
+ __u8 reserved[4]; /* pad to 64 bit */
+};
+
+enum lkd_fw_ascii_msg_lvls {
+ LKD_FW_ASCII_MSG_ERR = 0,
+ LKD_FW_ASCII_MSG_WRN = 1,
+ LKD_FW_ASCII_MSG_INF = 2,
+ LKD_FW_ASCII_MSG_DBG = 3,
+};
+
+#define LKD_FW_ASCII_MSG_MAX_LEN 128
+#define LKD_FW_ASCII_MSG_MAX 4 /* consider ABI when changing */
+#define LKD_FW_ASCII_MSG_MIN_DESC_VERSION 3
+
+struct lkd_fw_ascii_msg {
+ __u8 valid;
+ __u8 msg_lvl;
+ __u8 reserved[6];
+ char msg[LKD_FW_ASCII_MSG_MAX_LEN];
+};
+
+/* this is the main FW descriptor - consider ABI when changing */
+struct lkd_fw_comms_desc {
+ struct comms_desc_header header;
+ struct cpu_dyn_regs cpu_dyn_regs;
+ char fuse_ver[VERSION_MAX_LEN];
+ char cur_fw_ver[VERSION_MAX_LEN];
+ /* can be used for 1 more version w/o ABI change */
+ char reserved0[VERSION_MAX_LEN];
+ __le64 img_addr; /* address for next FW component load */
+ struct lkd_fw_binning_info binning_info;
+ struct lkd_fw_ascii_msg ascii_msg[LKD_FW_ASCII_MSG_MAX];
+ __le32 rsvd_mem_size_mb; /* reserved memory size [MB] for FW/SVE */
+ char reserved1[4];
+};
+
+enum comms_reset_cause {
+ HL_RESET_CAUSE_UNKNOWN = 0,
+ HL_RESET_CAUSE_HEARTBEAT = 1,
+ HL_RESET_CAUSE_TDR = 2,
+};
+
+/* TODO: remove define after struct name is aligned on all projects */
+#define lkd_msg_comms lkd_fw_comms_msg
+
+/* this is the comms message descriptor */
+struct lkd_fw_comms_msg {
+ struct comms_msg_header header;
+ /* union for future expantions of new messages */
+ union {
+ struct {
+ struct cpu_dyn_regs cpu_dyn_regs;
+ char fuse_ver[VERSION_MAX_LEN];
+ char cur_fw_ver[VERSION_MAX_LEN];
+ /* can be used for 1 more version w/o ABI change */
+ char reserved0[VERSION_MAX_LEN];
+ /* address for next FW component load */
+ __le64 img_addr;
+ struct lkd_fw_binning_info binning_info;
+ struct lkd_fw_ascii_msg ascii_msg[LKD_FW_ASCII_MSG_MAX];
+ /* reserved memory size [MB] for FW/SVE */
+ __le32 rsvd_mem_size_mb;
+ char reserved1[4];
+ };
+ struct {
+ __u8 reset_cause;
+ };
+ struct {
+ __u8 fw_cfg_skip; /* 1 - skip, 0 - don't skip */
+ };
+ struct lkd_fw_binning_info binning_conf;
+ };
+};
+
+/*
+ * LKD commands:
+ *
+ * COMMS_NOOP Used to clear the command register and no actual
+ * command is send.
+ *
+ * COMMS_CLR_STS Clear status command - FW should clear the
+ * status register. Used for synchronization
+ * between the commands as part of the race free
+ * protocol.
+ *
+ * COMMS_RST_STATE Reset the current communication state which is
+ * kept by FW for proper responses.
+ * Should be used in the beginning of the
+ * communication cycle to clean any leftovers from
+ * previous communication attempts.
+ *
+ * COMMS_PREP_DESC Prepare descriptor for setting up the
+ * communication and other dynamic data:
+ * struct lkd_fw_comms_desc.
+ * This command has a parameter stating the next FW
+ * component size, so the FW can actually prepare a
+ * space for it and in the status response provide
+ * the descriptor offset. The Offset of the next FW
+ * data component is a part of the descriptor
+ * structure.
+ *
+ * COMMS_DATA_RDY The FW data has been uploaded and is ready for
+ * validation.
+ *
+ * COMMS_EXEC Execute the next FW component.
+ *
+ * COMMS_RST_DEV Reset the device.
+ *
+ * COMMS_GOTO_WFE Execute WFE command. Allowed only on non-secure
+ * devices.
+ *
+ * COMMS_SKIP_BMC Perform actions required for BMC-less servers.
+ * Do not wait for BMC response.
+ *
+ * COMMS_PREP_DESC_ELBI Same as COMMS_PREP_DESC only that the memory
+ * space is allocated in a ELBI access only
+ * address range.
+ *
+ */
+enum comms_cmd {
+ COMMS_NOOP = 0,
+ COMMS_CLR_STS = 1,
+ COMMS_RST_STATE = 2,
+ COMMS_PREP_DESC = 3,
+ COMMS_DATA_RDY = 4,
+ COMMS_EXEC = 5,
+ COMMS_RST_DEV = 6,
+ COMMS_GOTO_WFE = 7,
+ COMMS_SKIP_BMC = 8,
+ COMMS_PREP_DESC_ELBI = 10,
+ COMMS_INVLD_LAST
+};
+
+#define COMMS_COMMAND_SIZE_SHIFT 0
+#define COMMS_COMMAND_SIZE_MASK 0x1FFFFFF
+#define COMMS_COMMAND_CMD_SHIFT 27
+#define COMMS_COMMAND_CMD_MASK 0xF8000000
+
+/*
+ * LKD command to FW register structure
+ * @size - FW component size
+ * @cmd - command from enum comms_cmd
+ */
+struct comms_command {
+ union { /* bit fields are only for FW use */
+ struct {
+ u32 size :25; /* 32MB max. */
+ u32 reserved :2;
+ enum comms_cmd cmd :5; /* 32 commands */
+ };
+ __le32 val;
+ };
+};
+
+/*
+ * FW status
+ *
+ * COMMS_STS_NOOP Used to clear the status register and no actual
+ * status is provided.
+ *
+ * COMMS_STS_ACK Command has been received and recognized.
+ *
+ * COMMS_STS_OK Command execution has finished successfully.
+ *
+ * COMMS_STS_ERR Command execution was unsuccessful and resulted
+ * in error.
+ *
+ * COMMS_STS_VALID_ERR FW validation has failed.
+ *
+ * COMMS_STS_TIMEOUT_ERR Command execution has timed out.
+ */
+enum comms_sts {
+ COMMS_STS_NOOP = 0,
+ COMMS_STS_ACK = 1,
+ COMMS_STS_OK = 2,
+ COMMS_STS_ERR = 3,
+ COMMS_STS_VALID_ERR = 4,
+ COMMS_STS_TIMEOUT_ERR = 5,
+ COMMS_STS_INVLD_LAST
+};
+
+/* RAM types for FW components loading - defines the base address */
+enum comms_ram_types {
+ COMMS_SRAM = 0,
+ COMMS_DRAM = 1,
+};
+
+#define COMMS_STATUS_OFFSET_SHIFT 0
+#define COMMS_STATUS_OFFSET_MASK 0x03FFFFFF
+#define COMMS_STATUS_OFFSET_ALIGN_SHIFT 2
+#define COMMS_STATUS_RAM_TYPE_SHIFT 26
+#define COMMS_STATUS_RAM_TYPE_MASK 0x0C000000
+#define COMMS_STATUS_STATUS_SHIFT 28
+#define COMMS_STATUS_STATUS_MASK 0xF0000000
+
+/*
+ * FW status to LKD register structure
+ * @offset - an offset from the base of the ram_type shifted right by
+ * 2 bits (always aligned to 32 bits).
+ * Allows a maximum addressable offset of 256MB from RAM base.
+ * Example: for real offset in RAM of 0x800000 (8MB), the value
+ * in offset field is (0x800000 >> 2) = 0x200000.
+ * @ram_type - the RAM type that should be used for offset from
+ * enum comms_ram_types
+ * @status - status from enum comms_sts
+ */
+struct comms_status {
+ union { /* bit fields are only for FW use */
+ struct {
+ u32 offset :26;
+ enum comms_ram_types ram_type :2;
+ enum comms_sts status :4; /* 16 statuses */
+ };
+ __le32 val;
+ };
+};
+
+#define NAME_MAX_LEN 32 /* bytes */
+struct hl_module_data {
+ __u8 name[NAME_MAX_LEN];
+ __u8 version[VERSION_MAX_LEN];
+};
+
+/**
+ * struct hl_component_versions - versions associated with hl component.
+ * @struct_size: size of all the struct (including dynamic size of modules).
+ * @modules_offset: offset of the modules field in this struct.
+ * @component: version of the component itself.
+ * @fw_os: Firmware OS Version.
+ * @comp_name: Name of the component.
+ * @modules_counter: number of set bits in modules_mask.
+ * @reserved: reserved for future use.
+ * @modules: versions of the component's modules. Elborated explanation in
+ * struct cpucp_versions.
+ */
+struct hl_component_versions {
+ __le16 struct_size;
+ __le16 modules_offset;
+ __u8 component[VERSION_MAX_LEN];
+ __u8 fw_os[VERSION_MAX_LEN];
+ __u8 comp_name[NAME_MAX_LEN];
+ __u8 modules_counter;
+ __u8 reserved[3];
+ struct hl_module_data modules[];
+};
+
+/* Max size of fit size */
+#define HL_FW_VERSIONS_FIT_SIZE 4096
+
+#endif /* HL_BOOT_IF_H */
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 69bc86ea382c..d57cab4d4c06 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -92,14 +92,6 @@ void irq_exit_rcu(void);
#define arch_nmi_exit() do { } while (0)
#endif
-#ifdef CONFIG_TINY_RCU
-static inline void rcu_nmi_enter(void) { }
-static inline void rcu_nmi_exit(void) { }
-#else
-extern void rcu_nmi_enter(void);
-extern void rcu_nmi_exit(void);
-#endif
-
/*
* NMI vs Tracing
* --------------
@@ -116,7 +108,6 @@ extern void rcu_nmi_exit(void);
do { \
lockdep_off(); \
arch_nmi_enter(); \
- printk_nmi_enter(); \
BUG_ON(in_nmi() == NMI_MASK); \
__preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
} while (0)
@@ -125,7 +116,7 @@ extern void rcu_nmi_exit(void);
do { \
__nmi_enter(); \
lockdep_hardirq_enter(); \
- rcu_nmi_enter(); \
+ ct_nmi_enter(); \
instrumentation_begin(); \
ftrace_nmi_enter(); \
instrumentation_end(); \
@@ -135,7 +126,6 @@ extern void rcu_nmi_exit(void);
do { \
BUG_ON(!in_nmi()); \
__preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
- printk_nmi_exit(); \
arch_nmi_exit(); \
lockdep_on(); \
} while (0)
@@ -145,7 +135,7 @@ extern void rcu_nmi_exit(void);
instrumentation_begin(); \
ftrace_nmi_exit(); \
instrumentation_end(); \
- rcu_nmi_exit(); \
+ ct_nmi_exit(); \
lockdep_hardirq_exit(); \
__nmi_exit(); \
} while (0)
diff --git a/include/linux/hash.h b/include/linux/hash.h
index ad6fa21d977b..38edaa08f862 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -62,10 +62,7 @@ static inline u32 __hash_32_generic(u32 val)
return val * GOLDEN_RATIO_32;
}
-#ifndef HAVE_ARCH_HASH_32
-#define hash_32 hash_32_generic
-#endif
-static inline u32 hash_32_generic(u32 val, unsigned int bits)
+static inline u32 hash_32(u32 val, unsigned int bits)
{
/* High bits are more random, so use them. */
return __hash_32(val) >> (32 - bits);
diff --git a/include/linux/hashtable_api.h b/include/linux/hashtable_api.h
new file mode 100644
index 000000000000..c268ac2c5c0e
--- /dev/null
+++ b/include/linux/hashtable_api.h
@@ -0,0 +1 @@
+#include <linux/hashtable.h>
diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h
index cacc4dd27794..630a388035f1 100644
--- a/include/linux/hdlc.h
+++ b/include/linux/hdlc.h
@@ -22,7 +22,7 @@ struct hdlc_proto {
void (*start)(struct net_device *dev); /* if open & DCD */
void (*stop)(struct net_device *dev); /* if open & !DCD */
void (*detach)(struct net_device *dev);
- int (*ioctl)(struct net_device *dev, struct ifreq *ifr);
+ int (*ioctl)(struct net_device *dev, struct if_settings *ifs);
__be16 (*type_trans)(struct sk_buff *skb, struct net_device *dev);
int (*netif_rx)(struct sk_buff *skb);
netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *dev);
@@ -54,7 +54,7 @@ typedef struct hdlc_device {
/* Exported from hdlc module */
/* Called by hardware driver when a user requests HDLC service */
-int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+int hdlc_ioctl(struct net_device *dev, struct if_settings *ifs);
/* Must be used by hardware driver on module startup/exit */
#define register_hdlc_device(dev) register_netdev(dev)
diff --git a/include/linux/hdlcdrv.h b/include/linux/hdlcdrv.h
index d4d633a49d36..5d70c3f98f5b 100644
--- a/include/linux/hdlcdrv.h
+++ b/include/linux/hdlcdrv.h
@@ -79,7 +79,7 @@ struct hdlcdrv_ops {
*/
int (*open)(struct net_device *);
int (*close)(struct net_device *);
- int (*ioctl)(struct net_device *, struct ifreq *,
+ int (*ioctl)(struct net_device *, void __user *,
struct hdlcdrv_ioctl *, int);
};
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index c8ec982ff498..96bda41d9148 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -59,6 +59,15 @@ enum hdmi_infoframe_type {
#define HDMI_DRM_INFOFRAME_SIZE 26
#define HDMI_VENDOR_INFOFRAME_SIZE 4
+/*
+ * HDMI 1.3a table 5-14 states that the largest InfoFrame_length is 27,
+ * not including the packet header or checksum byte. We include the
+ * checksum byte in HDMI_INFOFRAME_HEADER_SIZE, so this should allow
+ * HDMI_INFOFRAME_SIZE(MAX) to be the largest buffer we could ever need
+ * for any HDMI infoframe.
+ */
+#define HDMI_MAX_INFOFRAME_SIZE 27
+
#define HDMI_INFOFRAME_SIZE(type) \
(HDMI_INFOFRAME_HEADER_SIZE + HDMI_ ## type ## _INFOFRAME_SIZE)
@@ -170,19 +179,19 @@ struct hdmi_avi_infoframe {
enum hdmi_infoframe_type type;
unsigned char version;
unsigned char length;
+ bool itc;
+ unsigned char pixel_repeat;
enum hdmi_colorspace colorspace;
enum hdmi_scan_mode scan_mode;
enum hdmi_colorimetry colorimetry;
enum hdmi_picture_aspect picture_aspect;
enum hdmi_active_aspect active_aspect;
- bool itc;
enum hdmi_extended_colorimetry extended_colorimetry;
enum hdmi_quantization_range quantization_range;
enum hdmi_nups nups;
unsigned char video_code;
enum hdmi_ycc_quantization_range ycc_quantization_range;
enum hdmi_content_type content_type;
- unsigned char pixel_repeat;
unsigned short top_bar;
unsigned short bottom_bar;
unsigned short left_bar;
@@ -336,7 +345,12 @@ ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
void *buffer, size_t size);
ssize_t hdmi_audio_infoframe_pack_only(const struct hdmi_audio_infoframe *frame,
void *buffer, size_t size);
-int hdmi_audio_infoframe_check(struct hdmi_audio_infoframe *frame);
+int hdmi_audio_infoframe_check(const struct hdmi_audio_infoframe *frame);
+
+struct dp_sdp;
+ssize_t
+hdmi_audio_infoframe_pack_for_dp(const struct hdmi_audio_infoframe *frame,
+ struct dp_sdp *sdp, u8 dp_version);
enum hdmi_3d_structure {
HDMI_3D_STRUCTURE_INVALID = -1,
@@ -431,7 +445,6 @@ ssize_t hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer,
size_t size);
ssize_t hdmi_infoframe_pack_only(const union hdmi_infoframe *frame,
void *buffer, size_t size);
-int hdmi_infoframe_check(union hdmi_infoframe *frame);
int hdmi_infoframe_unpack(union hdmi_infoframe *frame,
const void *buffer, size_t size);
void hdmi_infoframe_log(const char *level, struct device *dev,
diff --git a/include/linux/hex.h b/include/linux/hex.h
new file mode 100644
index 000000000000..2618382e5b0c
--- /dev/null
+++ b/include/linux/hex.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_HEX_H
+#define _LINUX_HEX_H
+
+#include <linux/types.h>
+
+extern const char hex_asc[];
+#define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
+#define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4]
+
+static inline char *hex_byte_pack(char *buf, u8 byte)
+{
+ *buf++ = hex_asc_hi(byte);
+ *buf++ = hex_asc_lo(byte);
+ return buf;
+}
+
+extern const char hex_asc_upper[];
+#define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)]
+#define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4]
+
+static inline char *hex_byte_pack_upper(char *buf, u8 byte)
+{
+ *buf++ = hex_asc_upper_hi(byte);
+ *buf++ = hex_asc_upper_lo(byte);
+ return buf;
+}
+
+extern int hex_to_bin(unsigned char ch);
+extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
+extern char *bin2hex(char *dst, const void *src, size_t count);
+
+bool mac_pton(const char *s, u8 *mac);
+
+#endif
diff --git a/include/linux/hfs_common.h b/include/linux/hfs_common.h
new file mode 100644
index 000000000000..dadb5e0aa8a3
--- /dev/null
+++ b/include/linux/hfs_common.h
@@ -0,0 +1,653 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * HFS/HFS+ common definitions, inline functions,
+ * and shared functionality.
+ */
+
+#ifndef _HFS_COMMON_H_
+#define _HFS_COMMON_H_
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define hfs_dbg(fmt, ...) \
+ pr_debug("pid %d:%s:%d %s(): " fmt, \
+ current->pid, __FILE__, __LINE__, __func__, ##__VA_ARGS__) \
+
+/*
+ * Format of structures on disk
+ * Information taken from Apple Technote #1150 (HFS Plus Volume Format)
+ */
+
+/* offsets to various blocks */
+#define HFS_DD_BLK 0 /* Driver Descriptor block */
+#define HFS_PMAP_BLK 1 /* First block of partition map */
+#define HFS_MDB_BLK 2 /* Block (w/i partition) of MDB */
+
+/* magic numbers for various disk blocks */
+#define HFS_DRVR_DESC_MAGIC 0x4552 /* "ER": driver descriptor map */
+#define HFS_OLD_PMAP_MAGIC 0x5453 /* "TS": old-type partition map */
+#define HFS_NEW_PMAP_MAGIC 0x504D /* "PM": new-type partition map */
+#define HFS_SUPER_MAGIC 0x4244 /* "BD": HFS MDB (super block) */
+#define HFS_MFS_SUPER_MAGIC 0xD2D7 /* MFS MDB (super block) */
+
+#define HFSPLUS_VOLHEAD_SIG 0x482b
+#define HFSPLUS_VOLHEAD_SIGX 0x4858
+#define HFSPLUS_SUPER_MAGIC 0x482b
+
+#define HFSP_WRAP_MAGIC 0x4244
+#define HFSP_WRAP_ATTRIB_SLOCK 0x8000
+#define HFSP_WRAP_ATTRIB_SPARED 0x0200
+
+#define HFSP_WRAPOFF_SIG 0x00
+#define HFSP_WRAPOFF_ATTRIB 0x0A
+#define HFSP_WRAPOFF_ABLKSIZE 0x14
+#define HFSP_WRAPOFF_ABLKSTART 0x1C
+#define HFSP_WRAPOFF_EMBEDSIG 0x7C
+#define HFSP_WRAPOFF_EMBEDEXT 0x7E
+
+#define HFSP_HARDLINK_TYPE 0x686c6e6b /* 'hlnk' */
+#define HFSP_HFSPLUS_CREATOR 0x6866732b /* 'hfs+' */
+
+#define HFSP_SYMLINK_TYPE 0x736c6e6b /* 'slnk' */
+#define HFSP_SYMLINK_CREATOR 0x72686170 /* 'rhap' */
+
+#define HFSP_MOUNT_VERSION 0x482b4c78 /* 'H+Lx' */
+
+#define HFSP_HIDDENDIR_NAME \
+ "\xe2\x90\x80\xe2\x90\x80\xe2\x90\x80\xe2\x90\x80HFS+ Private Data"
+
+/* various FIXED size parameters */
+#define HFS_SECTOR_SIZE 512 /* size of an HFS sector */
+#define HFS_SECTOR_SIZE_BITS 9 /* log_2(HFS_SECTOR_SIZE) */
+#define HFS_MAX_VALENCE 32767U
+
+#define HFSPLUS_SECTOR_SIZE HFS_SECTOR_SIZE
+#define HFSPLUS_SECTOR_SHIFT HFS_SECTOR_SIZE_BITS
+#define HFSPLUS_VOLHEAD_SECTOR 2
+#define HFSPLUS_MIN_VERSION 4
+#define HFSPLUS_CURRENT_VERSION 5
+
+#define HFS_NAMELEN 31 /* maximum length of an HFS filename */
+#define HFS_MAX_NAMELEN 128
+
+#define HFSPLUS_MAX_STRLEN 255
+#define HFSPLUS_ATTR_MAX_STRLEN 127
+
+/* Meanings of the drAtrb field of the MDB,
+ * Reference: _Inside Macintosh: Files_ p. 2-61
+ */
+#define HFS_SB_ATTRIB_HLOCK (1 << 7)
+#define HFS_SB_ATTRIB_UNMNT (1 << 8)
+#define HFS_SB_ATTRIB_SPARED (1 << 9)
+#define HFS_SB_ATTRIB_INCNSTNT (1 << 11)
+#define HFS_SB_ATTRIB_SLOCK (1 << 15)
+
+/* values for hfs_cat_rec.cdrType */
+#define HFS_CDR_DIR 0x01 /* folder (directory) */
+#define HFS_CDR_FIL 0x02 /* file */
+#define HFS_CDR_THD 0x03 /* folder (directory) thread */
+#define HFS_CDR_FTH 0x04 /* file thread */
+
+/* legal values for hfs_ext_key.FkType and hfs_file.fork */
+#define HFS_FK_DATA 0x00
+#define HFS_FK_RSRC 0xFF
+
+/* bits in hfs_fil_entry.Flags */
+#define HFS_FIL_LOCK 0x01 /* locked */
+#define HFS_FIL_THD 0x02 /* file thread */
+#define HFS_FIL_DOPEN 0x04 /* data fork open */
+#define HFS_FIL_ROPEN 0x08 /* resource fork open */
+#define HFS_FIL_DIR 0x10 /* directory (always clear) */
+#define HFS_FIL_NOCOPY 0x40 /* copy-protected file */
+#define HFS_FIL_USED 0x80 /* open */
+
+/* bits in hfs_dir_entry.Flags. dirflags is 16 bits. */
+#define HFS_DIR_LOCK 0x01 /* locked */
+#define HFS_DIR_THD 0x02 /* directory thread */
+#define HFS_DIR_INEXPFOLDER 0x04 /* in a shared area */
+#define HFS_DIR_MOUNTED 0x08 /* mounted */
+#define HFS_DIR_DIR 0x10 /* directory (always set) */
+#define HFS_DIR_EXPFOLDER 0x20 /* share point */
+
+/* bits hfs_finfo.fdFlags */
+#define HFS_FLG_INITED 0x0100
+#define HFS_FLG_LOCKED 0x1000
+#define HFS_FLG_INVISIBLE 0x4000
+
+/* Some special File ID numbers */
+#define HFS_POR_CNID 1 /* Parent Of the Root */
+#define HFSPLUS_POR_CNID HFS_POR_CNID
+#define HFS_ROOT_CNID 2 /* ROOT directory */
+#define HFSPLUS_ROOT_CNID HFS_ROOT_CNID
+#define HFS_EXT_CNID 3 /* EXTents B-tree */
+#define HFSPLUS_EXT_CNID HFS_EXT_CNID
+#define HFS_CAT_CNID 4 /* CATalog B-tree */
+#define HFSPLUS_CAT_CNID HFS_CAT_CNID
+#define HFS_BAD_CNID 5 /* BAD blocks file */
+#define HFSPLUS_BAD_CNID HFS_BAD_CNID
+#define HFS_ALLOC_CNID 6 /* ALLOCation file (HFS+) */
+#define HFSPLUS_ALLOC_CNID HFS_ALLOC_CNID
+#define HFS_START_CNID 7 /* STARTup file (HFS+) */
+#define HFSPLUS_START_CNID HFS_START_CNID
+#define HFS_ATTR_CNID 8 /* ATTRibutes file (HFS+) */
+#define HFSPLUS_ATTR_CNID HFS_ATTR_CNID
+#define HFS_EXCH_CNID 15 /* ExchangeFiles temp id */
+#define HFSPLUS_EXCH_CNID HFS_EXCH_CNID
+#define HFS_FIRSTUSER_CNID 16 /* first available user id */
+#define HFSPLUS_FIRSTUSER_CNID HFS_FIRSTUSER_CNID
+
+/*======== HFS/HFS+ structures as they appear on the disk ========*/
+
+typedef __be32 hfsplus_cnid;
+typedef __be16 hfsplus_unichr;
+
+/* Pascal-style string of up to 31 characters */
+struct hfs_name {
+ u8 len;
+ u8 name[HFS_NAMELEN];
+} __packed;
+
+/* A "string" as used in filenames, etc. */
+struct hfsplus_unistr {
+ __be16 length;
+ hfsplus_unichr unicode[HFSPLUS_MAX_STRLEN];
+} __packed;
+
+/*
+ * A "string" is used in attributes file
+ * for name of extended attribute
+ */
+struct hfsplus_attr_unistr {
+ __be16 length;
+ hfsplus_unichr unicode[HFSPLUS_ATTR_MAX_STRLEN];
+} __packed;
+
+struct hfs_extent {
+ __be16 block;
+ __be16 count;
+};
+typedef struct hfs_extent hfs_extent_rec[3];
+
+/* A single contiguous area of a file */
+struct hfsplus_extent {
+ __be32 start_block;
+ __be32 block_count;
+} __packed;
+typedef struct hfsplus_extent hfsplus_extent_rec[8];
+
+/* Information for a "Fork" in a file */
+struct hfsplus_fork_raw {
+ __be64 total_size;
+ __be32 clump_size;
+ __be32 total_blocks;
+ hfsplus_extent_rec extents;
+} __packed;
+
+struct hfs_mdb {
+ __be16 drSigWord; /* Signature word indicating fs type */
+ __be32 drCrDate; /* fs creation date/time */
+ __be32 drLsMod; /* fs modification date/time */
+ __be16 drAtrb; /* fs attributes */
+ __be16 drNmFls; /* number of files in root directory */
+ __be16 drVBMSt; /* location (in 512-byte blocks)
+ of the volume bitmap */
+ __be16 drAllocPtr; /* location (in allocation blocks)
+ to begin next allocation search */
+ __be16 drNmAlBlks; /* number of allocation blocks */
+ __be32 drAlBlkSiz; /* bytes in an allocation block */
+ __be32 drClpSiz; /* clumpsize, the number of bytes to
+ allocate when extending a file */
+ __be16 drAlBlSt; /* location (in 512-byte blocks)
+ of the first allocation block */
+ __be32 drNxtCNID; /* CNID to assign to the next
+ file or directory created */
+ __be16 drFreeBks; /* number of free allocation blocks */
+ u8 drVN[28]; /* the volume label */
+ __be32 drVolBkUp; /* fs backup date/time */
+ __be16 drVSeqNum; /* backup sequence number */
+ __be32 drWrCnt; /* fs write count */
+ __be32 drXTClpSiz; /* clumpsize for the extents B-tree */
+ __be32 drCTClpSiz; /* clumpsize for the catalog B-tree */
+ __be16 drNmRtDirs; /* number of directories in
+ the root directory */
+ __be32 drFilCnt; /* number of files in the fs */
+ __be32 drDirCnt; /* number of directories in the fs */
+ u8 drFndrInfo[32]; /* data used by the Finder */
+ __be16 drEmbedSigWord; /* embedded volume signature */
+ __be32 drEmbedExtent; /* starting block number (xdrStABN)
+ and number of allocation blocks
+ (xdrNumABlks) occupied by embedded
+ volume */
+ __be32 drXTFlSize; /* bytes in the extents B-tree */
+ hfs_extent_rec drXTExtRec; /* extents B-tree's first 3 extents */
+ __be32 drCTFlSize; /* bytes in the catalog B-tree */
+ hfs_extent_rec drCTExtRec; /* catalog B-tree's first 3 extents */
+} __packed;
+
+/* HFS+ Volume Header */
+struct hfsplus_vh {
+ __be16 signature;
+ __be16 version;
+ __be32 attributes;
+ __be32 last_mount_vers;
+ u32 reserved;
+
+ __be32 create_date;
+ __be32 modify_date;
+ __be32 backup_date;
+ __be32 checked_date;
+
+ __be32 file_count;
+ __be32 folder_count;
+
+ __be32 blocksize;
+ __be32 total_blocks;
+ __be32 free_blocks;
+
+ __be32 next_alloc;
+ __be32 rsrc_clump_sz;
+ __be32 data_clump_sz;
+ hfsplus_cnid next_cnid;
+
+ __be32 write_count;
+ __be64 encodings_bmp;
+
+ u32 finder_info[8];
+
+ struct hfsplus_fork_raw alloc_file;
+ struct hfsplus_fork_raw ext_file;
+ struct hfsplus_fork_raw cat_file;
+ struct hfsplus_fork_raw attr_file;
+ struct hfsplus_fork_raw start_file;
+} __packed;
+
+/* HFS+ volume attributes */
+#define HFSPLUS_VOL_UNMNT (1 << 8)
+#define HFSPLUS_VOL_SPARE_BLK (1 << 9)
+#define HFSPLUS_VOL_NOCACHE (1 << 10)
+#define HFSPLUS_VOL_INCNSTNT (1 << 11)
+#define HFSPLUS_VOL_NODEID_REUSED (1 << 12)
+#define HFSPLUS_VOL_JOURNALED (1 << 13)
+#define HFSPLUS_VOL_SOFTLOCK (1 << 15)
+#define HFSPLUS_VOL_UNUSED_NODE_FIX (1 << 31)
+
+struct hfs_point {
+ __be16 v;
+ __be16 h;
+} __packed;
+
+typedef struct hfs_point hfsp_point;
+
+struct hfs_rect {
+ __be16 top;
+ __be16 left;
+ __be16 bottom;
+ __be16 right;
+} __packed;
+
+typedef struct hfs_rect hfsp_rect;
+
+struct hfs_finfo {
+ __be32 fdType;
+ __be32 fdCreator;
+ __be16 fdFlags;
+ struct hfs_point fdLocation;
+ __be16 fdFldr;
+} __packed;
+
+typedef struct hfs_finfo FInfo;
+
+struct hfs_fxinfo {
+ __be16 fdIconID;
+ u8 fdUnused[8];
+ __be16 fdComment;
+ __be32 fdPutAway;
+} __packed;
+
+typedef struct hfs_fxinfo FXInfo;
+
+struct hfs_dinfo {
+ struct hfs_rect frRect;
+ __be16 frFlags;
+ struct hfs_point frLocation;
+ __be16 frView;
+} __packed;
+
+typedef struct hfs_dinfo DInfo;
+
+struct hfs_dxinfo {
+ struct hfs_point frScroll;
+ __be32 frOpenChain;
+ __be16 frUnused;
+ __be16 frComment;
+ __be32 frPutAway;
+} __packed;
+
+typedef struct hfs_dxinfo DXInfo;
+
+union hfs_finder_info {
+ struct {
+ struct hfs_finfo finfo;
+ struct hfs_fxinfo fxinfo;
+ } file;
+ struct {
+ struct hfs_dinfo dinfo;
+ struct hfs_dxinfo dxinfo;
+ } dir;
+} __packed;
+
+/* The key used in the catalog b-tree: */
+struct hfs_cat_key {
+ u8 key_len; /* number of bytes in the key */
+ u8 reserved; /* padding */
+ __be32 ParID; /* CNID of the parent dir */
+ struct hfs_name CName; /* The filename of the entry */
+} __packed;
+
+/* HFS+ catalog entry key */
+struct hfsplus_cat_key {
+ __be16 key_len;
+ hfsplus_cnid parent;
+ struct hfsplus_unistr name;
+} __packed;
+
+#define HFSPLUS_CAT_KEYLEN (sizeof(struct hfsplus_cat_key))
+
+/* The key used in the extents b-tree: */
+struct hfs_ext_key {
+ u8 key_len; /* number of bytes in the key */
+ u8 FkType; /* HFS_FK_{DATA,RSRC} */
+ __be32 FNum; /* The File ID of the file */
+ __be16 FABN; /* allocation blocks number*/
+} __packed;
+
+/* HFS+ extents tree key */
+struct hfsplus_ext_key {
+ __be16 key_len;
+ u8 fork_type;
+ u8 pad;
+ hfsplus_cnid cnid;
+ __be32 start_block;
+} __packed;
+
+#define HFSPLUS_EXT_KEYLEN sizeof(struct hfsplus_ext_key)
+
+typedef union hfs_btree_key {
+ u8 key_len; /* number of bytes in the key */
+ struct hfs_cat_key cat;
+ struct hfs_ext_key ext;
+} hfs_btree_key;
+
+#define HFS_MAX_CAT_KEYLEN (sizeof(struct hfs_cat_key) - sizeof(u8))
+#define HFS_MAX_EXT_KEYLEN (sizeof(struct hfs_ext_key) - sizeof(u8))
+
+typedef union hfs_btree_key btree_key;
+
+/* The catalog record for a file */
+struct hfs_cat_file {
+ s8 type; /* The type of entry */
+ u8 reserved;
+ u8 Flags; /* Flags such as read-only */
+ s8 Typ; /* file version number = 0 */
+ struct hfs_finfo UsrWds; /* data used by the Finder */
+ __be32 FlNum; /* The CNID */
+ __be16 StBlk; /* obsolete */
+ __be32 LgLen; /* The logical EOF of the data fork*/
+ __be32 PyLen; /* The physical EOF of the data fork */
+ __be16 RStBlk; /* obsolete */
+ __be32 RLgLen; /* The logical EOF of the rsrc fork */
+ __be32 RPyLen; /* The physical EOF of the rsrc fork */
+ __be32 CrDat; /* The creation date */
+ __be32 MdDat; /* The modified date */
+ __be32 BkDat; /* The last backup date */
+ struct hfs_fxinfo FndrInfo; /* more data for the Finder */
+ __be16 ClpSize; /* number of bytes to allocate
+ when extending files */
+ hfs_extent_rec ExtRec; /* first extent record
+ for the data fork */
+ hfs_extent_rec RExtRec; /* first extent record
+ for the resource fork */
+ u32 Resrv; /* reserved by Apple */
+} __packed;
+
+/* the catalog record for a directory */
+struct hfs_cat_dir {
+ s8 type; /* The type of entry */
+ u8 reserved;
+ __be16 Flags; /* flags */
+ __be16 Val; /* Valence: number of files and
+ dirs in the directory */
+ __be32 DirID; /* The CNID */
+ __be32 CrDat; /* The creation date */
+ __be32 MdDat; /* The modification date */
+ __be32 BkDat; /* The last backup date */
+ struct hfs_dinfo UsrInfo; /* data used by the Finder */
+ struct hfs_dxinfo FndrInfo; /* more data used by Finder */
+ u8 Resrv[16]; /* reserved by Apple */
+} __packed;
+
+/* the catalog record for a thread */
+struct hfs_cat_thread {
+ s8 type; /* The type of entry */
+ u8 reserved[9]; /* reserved by Apple */
+ __be32 ParID; /* CNID of parent directory */
+ struct hfs_name CName; /* The name of this entry */
+} __packed;
+
+/* A catalog tree record */
+typedef union hfs_cat_rec {
+ s8 type; /* The type of entry */
+ struct hfs_cat_file file;
+ struct hfs_cat_dir dir;
+ struct hfs_cat_thread thread;
+} hfs_cat_rec;
+
+/* POSIX permissions */
+struct hfsplus_perm {
+ __be32 owner;
+ __be32 group;
+ u8 rootflags;
+ u8 userflags;
+ __be16 mode;
+ __be32 dev;
+} __packed;
+
+#define HFSPLUS_FLG_NODUMP 0x01
+#define HFSPLUS_FLG_IMMUTABLE 0x02
+#define HFSPLUS_FLG_APPEND 0x04
+
+/* HFS/HFS+ BTree node descriptor */
+struct hfs_bnode_desc {
+ __be32 next; /* (V) Number of the next node at this level */
+ __be32 prev; /* (V) Number of the prev node at this level */
+ u8 type; /* (F) The type of node */
+ u8 height; /* (F) The level of this node (leaves=1) */
+ __be16 num_recs; /* (V) The number of records in this node */
+ u16 reserved;
+} __packed;
+
+/* HFS/HFS+ BTree node types */
+#define HFS_NODE_INDEX 0x00 /* An internal (index) node */
+#define HFS_NODE_HEADER 0x01 /* The tree header node (node 0) */
+#define HFS_NODE_MAP 0x02 /* Holds part of the bitmap of used nodes */
+#define HFS_NODE_LEAF 0xFF /* A leaf (ndNHeight==1) node */
+
+/* HFS/HFS+ BTree header */
+struct hfs_btree_header_rec {
+ __be16 depth; /* (V) The number of levels in this B-tree */
+ __be32 root; /* (V) The node number of the root node */
+ __be32 leaf_count; /* (V) The number of leaf records */
+ __be32 leaf_head; /* (V) The number of the first leaf node */
+ __be32 leaf_tail; /* (V) The number of the last leaf node */
+ __be16 node_size; /* (F) The number of bytes in a node (=512) */
+ __be16 max_key_len; /* (F) The length of a key in an index node */
+ __be32 node_count; /* (V) The total number of nodes */
+ __be32 free_nodes; /* (V) The number of unused nodes */
+ u16 reserved1;
+ __be32 clump_size; /* (F) clump size. not usually used. */
+ u8 btree_type; /* (F) BTree type */
+ u8 key_type;
+ __be32 attributes; /* (F) attributes */
+ u32 reserved3[16];
+} __packed;
+
+/* BTree attributes */
+#define BTREE_ATTR_BADCLOSE 0x00000001 /* b-tree not closed properly. not
+ used by hfsplus. */
+#define HFS_TREE_BIGKEYS 0x00000002 /* key length is u16 instead of u8.
+ used by hfsplus. */
+#define HFS_TREE_VARIDXKEYS 0x00000004 /* variable key length instead of
+ max key length. use din catalog
+ b-tree but not in extents
+ b-tree (hfsplus). */
+
+/* HFS+ BTree misc info */
+#define HFSPLUS_TREE_HEAD 0
+#define HFSPLUS_NODE_MXSZ 32768
+#define HFSPLUS_ATTR_TREE_NODE_SIZE 8192
+#define HFSPLUS_BTREE_HDR_NODE_RECS_COUNT 3
+#define HFSPLUS_BTREE_HDR_USER_BYTES 128
+
+/* btree key type */
+#define HFSPLUS_KEY_CASEFOLDING 0xCF /* case-insensitive */
+#define HFSPLUS_KEY_BINARY 0xBC /* case-sensitive */
+
+/* HFS+ folder data (part of an hfsplus_cat_entry) */
+struct hfsplus_cat_folder {
+ __be16 type;
+ __be16 flags;
+ __be32 valence;
+ hfsplus_cnid id;
+ __be32 create_date;
+ __be32 content_mod_date;
+ __be32 attribute_mod_date;
+ __be32 access_date;
+ __be32 backup_date;
+ struct hfsplus_perm permissions;
+ struct_group_attr(info, __packed,
+ DInfo user_info;
+ DXInfo finder_info;
+ );
+ __be32 text_encoding;
+ __be32 subfolders; /* Subfolder count in HFSX. Reserved in HFS+. */
+} __packed;
+
+/* HFS+ file data (part of a cat_entry) */
+struct hfsplus_cat_file {
+ __be16 type;
+ __be16 flags;
+ u32 reserved1;
+ hfsplus_cnid id;
+ __be32 create_date;
+ __be32 content_mod_date;
+ __be32 attribute_mod_date;
+ __be32 access_date;
+ __be32 backup_date;
+ struct hfsplus_perm permissions;
+ struct_group_attr(info, __packed,
+ FInfo user_info;
+ FXInfo finder_info;
+ );
+ __be32 text_encoding;
+ u32 reserved2;
+
+ struct hfsplus_fork_raw data_fork;
+ struct hfsplus_fork_raw rsrc_fork;
+} __packed;
+
+/* File and folder flag bits */
+#define HFSPLUS_FILE_LOCKED 0x0001
+#define HFSPLUS_FILE_THREAD_EXISTS 0x0002
+#define HFSPLUS_XATTR_EXISTS 0x0004
+#define HFSPLUS_ACL_EXISTS 0x0008
+#define HFSPLUS_HAS_FOLDER_COUNT 0x0010 /* Folder has subfolder count
+ * (HFSX only) */
+
+/* HFS+ catalog thread (part of a cat_entry) */
+struct hfsplus_cat_thread {
+ __be16 type;
+ s16 reserved;
+ hfsplus_cnid parentID;
+ struct hfsplus_unistr nodeName;
+} __packed;
+
+#define HFSPLUS_MIN_THREAD_SZ 10
+
+/* A data record in the catalog tree */
+typedef union {
+ __be16 type;
+ struct hfsplus_cat_folder folder;
+ struct hfsplus_cat_file file;
+ struct hfsplus_cat_thread thread;
+} __packed hfsplus_cat_entry;
+
+/* HFS+ catalog entry type */
+#define HFSPLUS_FOLDER 0x0001
+#define HFSPLUS_FILE 0x0002
+#define HFSPLUS_FOLDER_THREAD 0x0003
+#define HFSPLUS_FILE_THREAD 0x0004
+
+#define HFSPLUS_XATTR_FINDER_INFO_NAME "com.apple.FinderInfo"
+#define HFSPLUS_XATTR_ACL_NAME "com.apple.system.Security"
+
+#define HFSPLUS_ATTR_INLINE_DATA 0x10
+#define HFSPLUS_ATTR_FORK_DATA 0x20
+#define HFSPLUS_ATTR_EXTENTS 0x30
+
+/* HFS+ attributes tree key */
+struct hfsplus_attr_key {
+ __be16 key_len;
+ __be16 pad;
+ hfsplus_cnid cnid;
+ __be32 start_block;
+ struct hfsplus_attr_unistr key_name;
+} __packed;
+
+#define HFSPLUS_ATTR_KEYLEN sizeof(struct hfsplus_attr_key)
+
+/* HFS+ fork data attribute */
+struct hfsplus_attr_fork_data {
+ __be32 record_type;
+ __be32 reserved;
+ struct hfsplus_fork_raw the_fork;
+} __packed;
+
+/* HFS+ extension attribute */
+struct hfsplus_attr_extents {
+ __be32 record_type;
+ __be32 reserved;
+ struct hfsplus_extent extents;
+} __packed;
+
+#define HFSPLUS_MAX_INLINE_DATA_SIZE 3802
+
+/* HFS+ attribute inline data */
+struct hfsplus_attr_inline_data {
+ __be32 record_type;
+ __be32 reserved1;
+ u8 reserved2[6];
+ __be16 length;
+ u8 raw_bytes[HFSPLUS_MAX_INLINE_DATA_SIZE];
+} __packed;
+
+/* A data record in the attributes tree */
+typedef union {
+ __be32 record_type;
+ struct hfsplus_attr_fork_data fork_data;
+ struct hfsplus_attr_extents extents;
+ struct hfsplus_attr_inline_data inline_data;
+} __packed hfsplus_attr_entry;
+
+/* HFS+ generic BTree key */
+typedef union {
+ __be16 key_len;
+ struct hfsplus_cat_key cat;
+ struct hfsplus_ext_key ext;
+ struct hfsplus_attr_key attr;
+} __packed hfsplus_btree_key;
+
+#endif /* _HFS_COMMON_H_ */
diff --git a/include/linux/hid-over-i2c.h b/include/linux/hid-over-i2c.h
new file mode 100644
index 000000000000..3b1a0208a6b8
--- /dev/null
+++ b/include/linux/hid-over-i2c.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2024 Intel Corporation */
+
+#include <linux/bits.h>
+
+#ifndef _HID_OVER_I2C_H_
+#define _HID_OVER_I2C_H_
+
+#define HIDI2C_REG_LEN sizeof(__le16)
+
+/* Input report type definition in HIDI2C protocol */
+enum hidi2c_report_type {
+ HIDI2C_RESERVED = 0,
+ HIDI2C_INPUT,
+ HIDI2C_OUTPUT,
+ HIDI2C_FEATURE,
+};
+
+/* Power state type definition in HIDI2C protocol */
+enum hidi2c_power_state {
+ HIDI2C_ON,
+ HIDI2C_SLEEP,
+};
+
+/* Opcode type definition in HIDI2C protocol */
+enum hidi2c_opcode {
+ HIDI2C_RESET = 1,
+ HIDI2C_GET_REPORT,
+ HIDI2C_SET_REPORT,
+ HIDI2C_GET_IDLE,
+ HIDI2C_SET_IDLE,
+ HIDI2C_GET_PROTOCOL,
+ HIDI2C_SET_PROTOCOL,
+ HIDI2C_SET_POWER,
+};
+
+/**
+ * struct hidi2c_report_packet - Report packet definition in HIDI2C protocol
+ * @len: data field length
+ * @data: HIDI2C report packet data
+ */
+struct hidi2c_report_packet {
+ __le16 len;
+ u8 data[];
+} __packed;
+
+#define HIDI2C_LENGTH_LEN sizeof(__le16)
+
+#define HIDI2C_PACKET_LEN(data_len) ((data_len) + HIDI2C_LENGTH_LEN)
+#define HIDI2C_DATA_LEN(pkt_len) ((pkt_len) - HIDI2C_LENGTH_LEN)
+
+#define HIDI2C_CMD_MAX_RI 0x0F
+
+/**
+ * HIDI2C command data packet - Command packet definition in HIDI2C protocol
+ * @report_id: [0:3] report id (<15) for features or output reports
+ * @report_type: [4:5] indicate report type, reference to hidi2c_report_type
+ * @reserved0: [6:7] reserved bits
+ * @opcode: [8:11] command operation code, reference to hidi2c_opcode
+ * @reserved1: [12:15] reserved bits
+ * @report_id_optional: [23:16] appended 3rd byte.
+ * If the report_id in the low byte is set to the
+ * sentinel value (HIDI2C_CMD_MAX_RI), then this
+ * optional third byte represents the report id (>=15)
+ * Otherwise, not this 3rd byte.
+ */
+
+#define HIDI2C_CMD_LEN sizeof(__le16)
+#define HIDI2C_CMD_LEN_OPT (sizeof(__le16) + 1)
+#define HIDI2C_CMD_REPORT_ID GENMASK(3, 0)
+#define HIDI2C_CMD_REPORT_TYPE GENMASK(5, 4)
+#define HIDI2C_CMD_OPCODE GENMASK(11, 8)
+#define HIDI2C_CMD_OPCODE GENMASK(11, 8)
+#define HIDI2C_CMD_3RD_BYTE GENMASK(23, 16)
+
+#define HIDI2C_HID_DESC_BCDVERSION 0x100
+
+/**
+ * struct hidi2c_dev_descriptor - HIDI2C device descriptor definition
+ * @dev_desc_len: The length of the complete device descriptor, fixed to 0x1E (30).
+ * @bcd_ver: The version number of the HIDI2C protocol supported.
+ * In binary coded decimal (BCD) format.
+ * @report_desc_len: The length of the report descriptor
+ * @report_desc_reg: The register address to retrieve report descriptor
+ * @input_reg: the register address to retrieve input report
+ * @max_input_len: The length of the largest possible HID input (or feature) report
+ * @output_reg: the register address to send output report
+ * @max_output_len: The length of the largest output (or feature) report
+ * @cmd_reg: the register address to send command
+ * @data_reg: the register address to send command data
+ * @vendor_id: Device manufacturers vendor ID
+ * @product_id: Device unique model/product ID
+ * @version_id: Device’s unique version
+ * @reserved0: Reserved and should be 0
+ * @reserved1: Reserved and should be 0
+ */
+struct hidi2c_dev_descriptor {
+ __le16 dev_desc_len;
+ __le16 bcd_ver;
+ __le16 report_desc_len;
+ __le16 report_desc_reg;
+ __le16 input_reg;
+ __le16 max_input_len;
+ __le16 output_reg;
+ __le16 max_output_len;
+ __le16 cmd_reg;
+ __le16 data_reg;
+ __le16 vendor_id;
+ __le16 product_id;
+ __le16 version_id;
+ __le16 reserved0;
+ __le16 reserved1;
+} __packed;
+
+#define HIDI2C_DEV_DESC_LEN sizeof(struct hidi2c_dev_descriptor)
+
+#endif /* _HID_OVER_I2C_H_ */
diff --git a/include/linux/hid-over-spi.h b/include/linux/hid-over-spi.h
new file mode 100644
index 000000000000..da5a14b5e89b
--- /dev/null
+++ b/include/linux/hid-over-spi.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2024 Intel Corporation */
+
+#ifndef _HID_OVER_SPI_H_
+#define _HID_OVER_SPI_H_
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+/* Input report type definition in HIDSPI protocol */
+enum input_report_type {
+ INVALID_INPUT_REPORT_TYPE_0 = 0,
+ DATA = 1,
+ INVALID_TYPE_2 = 2,
+ RESET_RESPONSE = 3,
+ COMMAND_RESPONSE = 4,
+ GET_FEATURE_RESPONSE = 5,
+ INVALID_TYPE_6 = 6,
+ DEVICE_DESCRIPTOR_RESPONSE = 7,
+ REPORT_DESCRIPTOR_RESPONSE = 8,
+ SET_FEATURE_RESPONSE = 9,
+ OUTPUT_REPORT_RESPONSE = 10,
+ GET_INPUT_REPORT_RESPONSE = 11,
+ INVALID_INPUT_REPORT_TYPE = 0xF,
+};
+
+/* Output report type definition in HIDSPI protocol */
+enum output_report_type {
+ INVALID_OUTPUT_REPORT_TYPE_0 = 0,
+ DEVICE_DESCRIPTOR = 1,
+ REPORT_DESCRIPTOR = 2,
+ SET_FEATURE = 3,
+ GET_FEATURE = 4,
+ OUTPUT_REPORT = 5,
+ GET_INPUT_REPORT = 6,
+ COMMAND_CONTENT = 7,
+};
+
+/* Set power command ID for output report */
+#define HIDSPI_SET_POWER_CMD_ID 1
+
+/* Power state definition in HIDSPI protocol */
+enum hidspi_power_state {
+ HIDSPI_ON = 1,
+ HIDSPI_SLEEP = 2,
+ HIDSPI_OFF = 3,
+};
+
+/**
+ * Input report header definition in HIDSPI protocol
+ * Report header size is 32bits, it includes:
+ * protocol_ver: [0:3] Current supported HIDSPI protocol version, must be 0x3
+ * reserved0: [4:7] Reserved bits
+ * input_report_len: [8:21] Input report length in number bytes divided by 4
+ * last_frag_flag: [22]Indicate if this packet is last fragment.
+ * 1 - indicates last fragment
+ * 0 - indicates additional fragments
+ * reserved1: [23] Reserved bits
+ * @sync_const: [24:31] Used to validate input report header, must be 0x5A
+ */
+#define HIDSPI_INPUT_HEADER_SIZE sizeof(u32)
+#define HIDSPI_INPUT_HEADER_VER GENMASK(3, 0)
+#define HIDSPI_INPUT_HEADER_REPORT_LEN GENMASK(21, 8)
+#define HIDSPI_INPUT_HEADER_LAST_FLAG BIT(22)
+#define HIDSPI_INPUT_HEADER_SYNC GENMASK(31, 24)
+
+/**
+ * struct input_report_body_header - Input report body header definition in HIDSPI protocol
+ * @input_report_type: indicate input report type, reference to enum input_report_type
+ * @content_len: this input report body packet length
+ * @content_id: indicate this input report's report id
+ */
+struct input_report_body_header {
+ u8 input_report_type;
+ __le16 content_len;
+ u8 content_id;
+} __packed;
+
+#define HIDSPI_INPUT_BODY_HEADER_SIZE sizeof(struct input_report_body_header)
+
+/**
+ * struct input_report_body - Input report body definition in HIDSPI protocol
+ * @body_hdr: input report body header
+ * @content: input report body content
+ */
+struct input_report_body {
+ struct input_report_body_header body_hdr;
+ u8 content[];
+} __packed;
+
+#define HIDSPI_INPUT_BODY_SIZE(content_len) ((content_len) + HIDSPI_INPUT_BODY_HEADER_SIZE)
+
+/**
+ * struct output_report_header - Output report header definition in HIDSPI protocol
+ * @report_type: output report type, reference to enum output_report_type
+ * @content_len: length of content
+ * @content_id: 0x00 - descriptors
+ * report id - Set/Feature feature or Input/Output Reports
+ * command opcode - for commands
+ */
+struct output_report_header {
+ u8 report_type;
+ __le16 content_len;
+ u8 content_id;
+} __packed;
+
+#define HIDSPI_OUTPUT_REPORT_HEADER_SIZE sizeof(struct output_report_header)
+
+/**
+ * struct output_report - Output report definition in HIDSPI protocol
+ * @output_hdr: output report header
+ * @content: output report content
+ */
+struct output_report {
+ struct output_report_header output_hdr;
+ u8 content[];
+} __packed;
+
+#define HIDSPI_OUTPUT_REPORT_SIZE(content_len) ((content_len) + HIDSPI_OUTPUT_REPORT_HEADER_SIZE)
+
+/**
+ * struct hidspi_dev_descriptor - HIDSPI device descriptor definition
+ * @dev_desc_len: The length of the complete device descriptor, fixed to 0x18 (24).
+ * @bcd_ver: The version number of the HIDSPI protocol supported.
+ * In binary coded decimal (BCD) format. Must be fixed to 0x0300.
+ * @rep_desc_len: The length of the report descriptor
+ * @max_input_len: The length of the largest possible HID input (or feature) report
+ * @max_output_len: The length of the largest output (or feature) report
+ * @max_frag_len: The length of the largest fragment, where a fragment represents
+ * the body of an input report.
+ * @vendor_id: Device manufacturers vendor ID
+ * @product_id: Device unique model/product ID
+ * @version_id: Device’s unique version
+ * @flags: Specify flags for the device’s operation
+ * @reserved: Reserved and should be 0
+ */
+struct hidspi_dev_descriptor {
+ __le16 dev_desc_len;
+ __le16 bcd_ver;
+ __le16 rep_desc_len;
+ __le16 max_input_len;
+ __le16 max_output_len;
+ __le16 max_frag_len;
+ __le16 vendor_id;
+ __le16 product_id;
+ __le16 version_id;
+ __le16 flags;
+ __le32 reserved;
+};
+
+#define HIDSPI_DEVICE_DESCRIPTOR_SIZE sizeof(struct hidspi_dev_descriptor)
+#define HIDSPI_INPUT_DEVICE_DESCRIPTOR_SIZE \
+ (HIDSPI_INPUT_BODY_HEADER_SIZE + HIDSPI_DEVICE_DESCRIPTOR_SIZE)
+
+#endif /* _HID_OVER_SPI_H_ */
diff --git a/include/linux/hid-roccat.h b/include/linux/hid-roccat.h
index 3214fb0815fc..753654fff07f 100644
--- a/include/linux/hid-roccat.h
+++ b/include/linux/hid-roccat.h
@@ -16,7 +16,7 @@
#ifdef __KERNEL__
-int roccat_connect(struct class *klass, struct hid_device *hid,
+int roccat_connect(const struct class *klass, struct hid_device *hid,
int report_size);
void roccat_disconnect(int minor);
int roccat_report_event(int minor, u8 const *data);
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index c27329e2a5ad..e71056553108 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -17,7 +17,7 @@
* @attrib_id: Attribute id for this attribute.
* @report_id: Report id in which this information resides.
* @index: Field index in the report.
- * @units: Measurment unit for this attribute.
+ * @units: Measurement unit for this attribute.
* @unit_expo: Exponent used in the data.
* @size: Size in bytes for data size.
* @logical_minimum: Logical minimum value for this attribute.
@@ -39,8 +39,8 @@ struct hid_sensor_hub_attribute_info {
* struct sensor_hub_pending - Synchronous read pending information
* @status: Pending status true/false.
* @ready: Completion synchronization data.
- * @usage_id: Usage id for physical device, E.g. Gyro usage id.
- * @attr_usage_id: Usage Id of a field, E.g. X-AXIS for a gyro.
+ * @usage_id: Usage id for physical device, e.g. gyro usage id.
+ * @attr_usage_id: Usage Id of a field, e.g. X-axis for a gyro.
* @raw_size: Response size for a read request.
* @raw_data: Place holder for received response.
*/
@@ -104,10 +104,10 @@ struct hid_sensor_hub_callbacks {
int sensor_hub_device_open(struct hid_sensor_hub_device *hsdev);
/**
-* sensor_hub_device_clode() - Close hub device
+* sensor_hub_device_close() - Close hub device
* @hsdev: Hub device instance.
*
-* Used to clode hid device for sensor hub.
+* Used to close hid device for sensor hub.
*/
void sensor_hub_device_close(struct hid_sensor_hub_device *hsdev);
@@ -128,12 +128,13 @@ int sensor_hub_register_callback(struct hid_sensor_hub_device *hsdev,
struct hid_sensor_hub_callbacks *usage_callback);
/**
-* sensor_hub_remove_callback() - Remove client callbacks
+* sensor_hub_remove_callback() - Remove client callback
* @hsdev: Hub device instance.
-* @usage_id: Usage id of the client (E.g. 0x200076 for Gyro).
+* @usage_id: Usage id of the client (e.g. 0x200076 for gyro).
*
-* If there is a callback registred, this call will remove that
-* callbacks, so that it will stop data and event notifications.
+* Removes a previously registered callback for the given usage_id
+* and hsdev. Once removed, the client will no longer receive data or
+* event notifications.
*/
int sensor_hub_remove_callback(struct hid_sensor_hub_device *hsdev,
u32 usage_id);
diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h
index ac631159403a..8a03d9696b1c 100644
--- a/include/linux/hid-sensor-ids.h
+++ b/include/linux/hid-sensor-ids.h
@@ -21,11 +21,17 @@
#define HID_USAGE_SENSOR_ALS 0x200041
#define HID_USAGE_SENSOR_DATA_LIGHT 0x2004d0
#define HID_USAGE_SENSOR_LIGHT_ILLUM 0x2004d1
+#define HID_USAGE_SENSOR_LIGHT_COLOR_TEMPERATURE 0x2004d2
+#define HID_USAGE_SENSOR_LIGHT_CHROMATICITY 0x2004d3
+#define HID_USAGE_SENSOR_LIGHT_CHROMATICITY_X 0x2004d4
+#define HID_USAGE_SENSOR_LIGHT_CHROMATICITY_Y 0x2004d5
/* PROX (200011) */
#define HID_USAGE_SENSOR_PROX 0x200011
#define HID_USAGE_SENSOR_DATA_PRESENCE 0x2004b0
#define HID_USAGE_SENSOR_HUMAN_PRESENCE 0x2004b1
+#define HID_USAGE_SENSOR_HUMAN_PROXIMITY 0x2004b2
+#define HID_USAGE_SENSOR_HUMAN_ATTENTION 0x2004bd
/* Pressure (200031) */
#define HID_USAGE_SENSOR_PRESSURE 0x200031
@@ -132,6 +138,7 @@
#define HID_USAGE_SENSOR_PROP_FRIENDLY_NAME 0x200301
#define HID_USAGE_SENSOR_PROP_SERIAL_NUM 0x200307
#define HID_USAGE_SENSOR_PROP_MANUFACTURER 0x200305
+#define HID_USAGE_SENSOR_PROP_MODEL 0x200306
#define HID_USAGE_SENSOR_PROP_REPORT_INTERVAL 0x20030E
#define HID_USAGE_SENSOR_PROP_SENSITIVITY_ABS 0x20030F
#define HID_USAGE_SENSOR_PROP_SENSITIVITY_RANGE_PCT 0x200310
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 271021e20a3f..dce862cafbbd 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -26,6 +26,7 @@
#include <linux/mutex.h>
#include <linux/power_supply.h>
#include <uapi/linux/hid.h>
+#include <linux/hid_bpf.h>
/*
* We parse each description item into this structure. Short items data
@@ -45,7 +46,7 @@ struct hid_item {
__s16 s16;
__u32 u32;
__s32 s32;
- __u8 *longdata;
+ const __u8 *longdata;
} data;
};
@@ -80,6 +81,8 @@ struct hid_item {
#define HID_MAIN_ITEM_TAG_FEATURE 11
#define HID_MAIN_ITEM_TAG_BEGIN_COLLECTION 10
#define HID_MAIN_ITEM_TAG_END_COLLECTION 12
+#define HID_MAIN_ITEM_TAG_RESERVED_MIN 13
+#define HID_MAIN_ITEM_TAG_RESERVED_MAX 15
/*
* HID report descriptor main item contents
@@ -102,6 +105,7 @@ struct hid_item {
#define HID_COLLECTION_PHYSICAL 0
#define HID_COLLECTION_APPLICATION 1
#define HID_COLLECTION_LOGICAL 2
+#define HID_COLLECTION_NAMED_ARRAY 4
/*
* HID report descriptor global item tags
@@ -152,8 +156,10 @@ struct hid_item {
#define HID_UP_TELEPHONY 0x000b0000
#define HID_UP_CONSUMER 0x000c0000
#define HID_UP_DIGITIZER 0x000d0000
+#define HID_UP_HAPTIC 0x000e0000
#define HID_UP_PID 0x000f0000
#define HID_UP_BATTERY 0x00850000
+#define HID_UP_CAMERA 0x00900000
#define HID_UP_HPVENDOR 0xff7f0000
#define HID_UP_HPVENDOR2 0xff010000
#define HID_UP_MSVENDOR 0xff000000
@@ -215,6 +221,7 @@ struct hid_item {
#define HID_GD_DOWN 0x00010091
#define HID_GD_RIGHT 0x00010092
#define HID_GD_LEFT 0x00010093
+#define HID_GD_DO_NOT_DISTURB 0x0001009b
/* Microsoft Win8 Wireless Radio Controls CA usage codes */
#define HID_GD_RFKILL_BTN 0x000100c6
#define HID_GD_RFKILL_LED 0x000100c7
@@ -240,6 +247,7 @@ struct hid_item {
#define HID_DG_TOUCH 0x000d0033
#define HID_DG_UNTOUCH 0x000d0034
#define HID_DG_TAP 0x000d0035
+#define HID_DG_TRANSDUCER_INDEX 0x000d0038
#define HID_DG_TABLETFUNCTIONKEY 0x000d0039
#define HID_DG_PROGRAMCHANGEKEY 0x000d003a
#define HID_DG_BATTERYSTRENGTH 0x000d003b
@@ -252,6 +260,15 @@ struct hid_item {
#define HID_DG_BARRELSWITCH 0x000d0044
#define HID_DG_ERASER 0x000d0045
#define HID_DG_TABLETPICK 0x000d0046
+#define HID_DG_PEN_COLOR 0x000d005c
+#define HID_DG_PEN_LINE_WIDTH 0x000d005e
+#define HID_DG_PEN_LINE_STYLE 0x000d0070
+#define HID_DG_PEN_LINE_STYLE_INK 0x000d0072
+#define HID_DG_PEN_LINE_STYLE_PENCIL 0x000d0073
+#define HID_DG_PEN_LINE_STYLE_HIGHLIGHTER 0x000d0074
+#define HID_DG_PEN_LINE_STYLE_CHISEL_MARKER 0x000d0075
+#define HID_DG_PEN_LINE_STYLE_BRUSH 0x000d0076
+#define HID_DG_PEN_LINE_STYLE_NO_PREFERENCE 0x000d0077
#define HID_CP_CONSUMERCONTROL 0x000c0001
#define HID_CP_NUMERICKEYPAD 0x000c0002
@@ -300,18 +317,32 @@ struct hid_item {
#define HID_DG_TOOLSERIALNUMBER 0x000d005b
#define HID_DG_LATENCYMODE 0x000d0060
+#define HID_HP_SIMPLECONTROLLER 0x000e0001
+#define HID_HP_WAVEFORMLIST 0x000e0010
+#define HID_HP_DURATIONLIST 0x000e0011
+#define HID_HP_AUTOTRIGGER 0x000e0020
+#define HID_HP_MANUALTRIGGER 0x000e0021
+#define HID_HP_AUTOTRIGGERASSOCIATEDCONTROL 0x000e0022
+#define HID_HP_INTENSITY 0x000e0023
+#define HID_HP_REPEATCOUNT 0x000e0024
+#define HID_HP_RETRIGGERPERIOD 0x000e0025
+#define HID_HP_WAVEFORMVENDORPAGE 0x000e0026
+#define HID_HP_WAVEFORMVENDORID 0x000e0027
+#define HID_HP_WAVEFORMCUTOFFTIME 0x000e0028
+#define HID_HP_WAVEFORMNONE 0x000e1001
+#define HID_HP_WAVEFORMSTOP 0x000e1002
+#define HID_HP_WAVEFORMCLICK 0x000e1003
+#define HID_HP_WAVEFORMBUZZCONTINUOUS 0x000e1004
+#define HID_HP_WAVEFORMRUMBLECONTINUOUS 0x000e1005
+#define HID_HP_WAVEFORMPRESS 0x000e1006
+#define HID_HP_WAVEFORMRELEASE 0x000e1007
+#define HID_HP_VENDORWAVEFORMMIN 0x000e2001
+#define HID_HP_VENDORWAVEFORMMAX 0x000e2fff
+
#define HID_BAT_ABSOLUTESTATEOFCHARGE 0x00850065
+#define HID_BAT_CHARGING 0x00850044
#define HID_VD_ASUS_CUSTOM_MEDIA_KEYS 0xff310076
-/*
- * HID report types --- Ouch! HID spec says 1 2 3!
- */
-
-#define HID_INPUT_REPORT 0
-#define HID_OUTPUT_REPORT 1
-#define HID_FEATURE_REPORT 2
-
-#define HID_REPORT_TYPES 3
/*
* HID connect requests
@@ -331,12 +362,38 @@ struct hid_item {
* HID device quirks.
*/
-/*
+/*
* Increase this if you need to configure more HID quirks at module load time
*/
#define MAX_USBHID_BOOT_QUIRKS 4
-#define HID_QUIRK_INVERT BIT(0)
+/**
+ * DOC: HID quirks
+ * | @HID_QUIRK_NOTOUCH:
+ * | @HID_QUIRK_IGNORE: ignore this device
+ * | @HID_QUIRK_NOGET:
+ * | @HID_QUIRK_HIDDEV_FORCE:
+ * | @HID_QUIRK_BADPAD:
+ * | @HID_QUIRK_MULTI_INPUT:
+ * | @HID_QUIRK_HIDINPUT_FORCE:
+ * | @HID_QUIRK_ALWAYS_POLL:
+ * | @HID_QUIRK_INPUT_PER_APP:
+ * | @HID_QUIRK_X_INVERT:
+ * | @HID_QUIRK_Y_INVERT:
+ * | @HID_QUIRK_IGNORE_MOUSE:
+ * | @HID_QUIRK_SKIP_OUTPUT_REPORTS:
+ * | @HID_QUIRK_SKIP_OUTPUT_REPORT_ID:
+ * | @HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP:
+ * | @HID_QUIRK_HAVE_SPECIAL_DRIVER:
+ * | @HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE:
+ * | @HID_QUIRK_IGNORE_SPECIAL_DRIVER
+ * | @HID_QUIRK_POWER_ON_AFTER_BACKLIGHT
+ * | @HID_QUIRK_FULLSPEED_INTERVAL:
+ * | @HID_QUIRK_NO_INIT_REPORTS:
+ * | @HID_QUIRK_NO_IGNORE:
+ * | @HID_QUIRK_NO_INPUT_SYNC:
+ */
+/* BIT(0) reserved for backward compatibility, was HID_QUIRK_INVERT */
#define HID_QUIRK_NOTOUCH BIT(1)
#define HID_QUIRK_IGNORE BIT(2)
#define HID_QUIRK_NOGET BIT(3)
@@ -348,11 +405,17 @@ struct hid_item {
/* BIT(9) reserved for backward compatibility, was NO_INIT_INPUT_REPORTS */
#define HID_QUIRK_ALWAYS_POLL BIT(10)
#define HID_QUIRK_INPUT_PER_APP BIT(11)
+#define HID_QUIRK_X_INVERT BIT(12)
+#define HID_QUIRK_Y_INVERT BIT(13)
+#define HID_QUIRK_IGNORE_MOUSE BIT(14)
#define HID_QUIRK_SKIP_OUTPUT_REPORTS BIT(16)
#define HID_QUIRK_SKIP_OUTPUT_REPORT_ID BIT(17)
#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP BIT(18)
#define HID_QUIRK_HAVE_SPECIAL_DRIVER BIT(19)
#define HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE BIT(20)
+#define HID_QUIRK_NOINVERT BIT(21)
+#define HID_QUIRK_IGNORE_SPECIAL_DRIVER BIT(22)
+#define HID_QUIRK_POWER_ON_AFTER_BACKLIGHT BIT(23)
#define HID_QUIRK_FULLSPEED_INTERVAL BIT(28)
#define HID_QUIRK_NO_INIT_REPORTS BIT(29)
#define HID_QUIRK_NO_IGNORE BIT(30)
@@ -386,6 +449,12 @@ struct hid_item {
#define HID_BOOT_PROTOCOL 0
/*
+ * HID units
+ */
+#define HID_UNIT_GRAM 0x0101
+#define HID_UNIT_NEWTON 0xe111
+
+/*
* This is the global environment of the parser. This information is
* persistent for main-items. The global environment can be saved and
* restored with PUSH/POP statements.
@@ -443,9 +512,9 @@ struct hid_usage {
__s8 wheel_factor; /* 120/resolution_multiplier */
__u16 code; /* input driver code */
__u8 type; /* input driver type */
- __s8 hat_min; /* hat switch fun */
- __s8 hat_max; /* ditto */
- __s8 hat_dir; /* ditto */
+ __s16 hat_min; /* hat switch fun */
+ __s16 hat_max; /* ditto */
+ __s16 hat_dir; /* ditto */
__s16 wheel_accumulated; /* hi-res wheel */
};
@@ -463,31 +532,50 @@ struct hid_field {
unsigned report_count; /* number of this field in the report */
unsigned report_type; /* (input,output,feature) */
__s32 *value; /* last known value(s) */
+ __s32 *new_value; /* newly read value(s) */
+ __s32 *usages_priorities; /* priority of each usage when reading the report
+ * bits 8-16 are reserved for hid-input usage
+ */
__s32 logical_minimum;
__s32 logical_maximum;
__s32 physical_minimum;
__s32 physical_maximum;
__s32 unit_exponent;
unsigned unit;
+ bool ignored; /* this field is ignored in this event */
struct hid_report *report; /* associated report */
unsigned index; /* index into report->field[] */
/* hidinput data */
struct hid_input *hidinput; /* associated input structure */
__u16 dpad; /* dpad input code */
+ unsigned int slot_idx; /* slot index in a report */
};
#define HID_MAX_FIELDS 256
+struct hid_field_entry {
+ struct list_head list;
+ struct hid_field *field;
+ unsigned int index;
+ __s32 priority;
+};
+
struct hid_report {
struct list_head list;
struct list_head hidinput_list;
+ struct list_head field_entry_list; /* ordered list of input fields */
unsigned int id; /* id of this report */
- unsigned int type; /* report type */
+ enum hid_report_type type; /* report type */
unsigned int application; /* application usage for this report */
struct hid_field *field[HID_MAX_FIELDS]; /* fields of the report */
+ struct hid_field_entry *field_entries; /* allocated memory of input field_entry */
unsigned maxfield; /* maximum valid field index */
unsigned size; /* size of the report (bits) */
struct hid_device *device; /* associated device */
+
+ /* tool related state */
+ bool tool_active; /* whether the current tool is active */
+ unsigned int tool; /* BTN_TOOL_* */
};
#define HID_MAX_IDS 256
@@ -529,9 +617,9 @@ struct hid_input {
struct hid_report *report;
struct input_dev *input;
const char *name;
- bool registered;
struct list_head reports; /* the list of reports */
unsigned int application; /* application usage for this input */
+ bool registered;
};
enum hid_type {
@@ -549,15 +637,17 @@ enum hid_battery_status {
struct hid_driver;
struct hid_ll_driver;
-struct hid_device { /* device report descriptor */
- __u8 *dev_rdesc;
- unsigned dev_rsize;
- __u8 *rdesc;
- unsigned rsize;
+struct hid_device {
+ const __u8 *dev_rdesc; /* device report descriptor */
+ const __u8 *bpf_rdesc; /* bpf modified report descriptor, if any */
+ const __u8 *rdesc; /* currently used report descriptor */
+ unsigned int dev_rsize;
+ unsigned int bpf_rsize;
+ unsigned int rsize;
+ unsigned int collection_size; /* Number of allocated hid_collections */
struct hid_collection *collection; /* List of HID collections */
- unsigned collection_size; /* Number of allocated hid_collections */
- unsigned maxcollection; /* Number of parsed collections */
- unsigned maxapplication; /* Number of applications */
+ unsigned int maxcollection; /* Number of parsed collections */
+ unsigned int maxapplication; /* Number of applications */
__u16 bus; /* BUS ID */
__u16 group; /* Report group */
__u32 vendor; /* Vendor ID */
@@ -571,8 +661,9 @@ struct hid_device { /* device report descriptor */
struct semaphore driver_input_lock; /* protects the current driver */
struct device dev; /* device */
struct hid_driver *driver;
+ void *devres_group_id; /* ID of probe devres group */
- struct hid_ll_driver *ll_driver;
+ const struct hid_ll_driver *ll_driver;
struct mutex ll_open_lock;
unsigned int ll_open_count;
@@ -588,6 +679,7 @@ struct hid_device { /* device report descriptor */
__s32 battery_max;
__s32 battery_report_type;
__s32 battery_report_id;
+ __s32 battery_charge_status;
enum hid_battery_status battery_status;
bool battery_avoid_query;
ktime_t battery_ratelimit_time;
@@ -596,6 +688,7 @@ struct hid_device { /* device report descriptor */
unsigned long status; /* see STAT flags above */
unsigned claimed; /* Claimed by hidinput, hiddev? */
unsigned quirks; /* Various quirks the device can pull on us */
+ unsigned initial_quirks; /* Initial set of quirks supplied when creating device */
bool io_started; /* If IO has started */
struct list_head inputs; /* The list of inputs */
@@ -626,8 +719,17 @@ struct hid_device { /* device report descriptor */
struct list_head debug_list;
spinlock_t debug_list_lock;
wait_queue_head_t debug_wait;
+ struct kref ref;
+
+ unsigned int id; /* system unique id */
+
+#ifdef CONFIG_HID_BPF
+ struct hid_bpf bpf; /* hid-bpf data */
+#endif /* CONFIG_HID_BPF */
};
+void hiddev_free(struct kref *ref);
+
#define to_hid_device(pdev) \
container_of(pdev, struct hid_device, dev)
@@ -671,8 +773,9 @@ struct hid_descriptor {
__le16 bcdHID;
__u8 bCountryCode;
__u8 bNumDescriptors;
+ struct hid_class_descriptor rpt_desc;
- struct hid_class_descriptor desc[1];
+ struct hid_class_descriptor opt_descs[];
} __attribute__ ((packed));
#define HID_DEVICE(b, g, ven, prod) \
@@ -723,6 +826,8 @@ struct hid_usage_id {
* @suspend: invoked on suspend (NULL means nop)
* @resume: invoked on resume if device was not reset (NULL means nop)
* @reset_resume: invoked on resume if device was reset (NULL means nop)
+ * @on_hid_hw_open: invoked when hid core opens first instance (NULL means nop)
+ * @on_hid_hw_close: invoked when hid core closes last instance (NULL means nop)
*
* probe should return -errno on error, or 0 on success. During probe,
* input will not be passed to raw_event unless hid_device_io_start is
@@ -742,7 +847,7 @@ struct hid_usage_id {
* zero from them.
*/
struct hid_driver {
- char *name;
+ const char *name;
const struct hid_device_id *id_table;
struct list_head dyn_list;
@@ -760,7 +865,7 @@ struct hid_driver {
struct hid_usage *usage, __s32 value);
void (*report)(struct hid_device *hdev, struct hid_report *report);
- __u8 *(*report_fixup)(struct hid_device *hdev, __u8 *buf,
+ const __u8 *(*report_fixup)(struct hid_device *hdev, __u8 *buf,
unsigned int *size);
int (*input_mapping)(struct hid_device *hdev,
@@ -774,11 +879,13 @@ struct hid_driver {
void (*feature_mapping)(struct hid_device *hdev,
struct hid_field *field,
struct hid_usage *usage);
-#ifdef CONFIG_PM
+
int (*suspend)(struct hid_device *hdev, pm_message_t message);
int (*resume)(struct hid_device *hdev);
int (*reset_resume)(struct hid_device *hdev);
-#endif
+ void (*on_hid_hw_open)(struct hid_device *hdev);
+ void (*on_hid_hw_close)(struct hid_device *hdev);
+
/* private: */
struct device_driver driver;
};
@@ -787,7 +894,7 @@ struct hid_driver {
container_of(pdrv, struct hid_driver, driver)
/**
- * hid_ll_driver - low level driver callbacks
+ * struct hid_ll_driver - low level driver callbacks
* @start: called on probe to start the device
* @stop: called on remove
* @open: called by input layer on open
@@ -800,6 +907,8 @@ struct hid_driver {
* @raw_request: send raw report request to device (e.g. feature report)
* @output_report: send output report to device
* @idle: send idle request to device
+ * @may_wakeup: return if device may act as a wakeup source during system-suspend
+ * @max_buffer_size: over-ride maximum data buffer size (default: HID_MAX_BUFFER_SIZE)
*/
struct hid_ll_driver {
int (*start)(struct hid_device *hdev);
@@ -824,18 +933,12 @@ struct hid_ll_driver {
int (*output_report) (struct hid_device *hdev, __u8 *buf, size_t len);
int (*idle)(struct hid_device *hdev, int report, int idle, int reqtype);
-};
+ bool (*may_wakeup)(struct hid_device *hdev);
-extern struct hid_ll_driver i2c_hid_ll_driver;
-extern struct hid_ll_driver hidp_hid_driver;
-extern struct hid_ll_driver uhid_hid_driver;
-extern struct hid_ll_driver usb_hid_driver;
+ unsigned int max_buffer_size;
+};
-static inline bool hid_is_using_ll_driver(struct hid_device *hdev,
- struct hid_ll_driver *driver)
-{
- return hdev->ll_driver == driver;
-}
+extern bool hid_is_usb(const struct hid_device *hdev);
#define PM_HINT_FULLON 1<<5
#define PM_HINT_NORMAL 1<<1
@@ -844,19 +947,17 @@ static inline bool hid_is_using_ll_driver(struct hid_device *hdev,
/* We ignore a few input applications that are not widely used */
#define IS_INPUT_APPLICATION(a) \
(((a >= HID_UP_GENDESK) && (a <= HID_GD_MULTIAXIS)) \
- || ((a >= HID_DG_PEN) && (a <= HID_DG_WHITEBOARD)) \
+ || ((a >= HID_DG_DIGITIZER) && (a <= HID_DG_WHITEBOARD)) \
|| (a == HID_GD_SYSTEM_CONTROL) || (a == HID_CP_CONSUMER_CONTROL) \
|| (a == HID_GD_WIRELESS_RADIO_CTLS))
/* HID core API */
-extern int hid_debug;
-
extern bool hid_ignore(struct hid_device *);
extern int hid_add_device(struct hid_device *);
extern void hid_destroy_device(struct hid_device *);
-extern struct bus_type hid_bus_type;
+extern const struct bus_type hid_bus_type;
extern int __must_check __hid_register_driver(struct hid_driver *,
struct module *, const char *mod_name);
@@ -883,23 +984,26 @@ extern void hidinput_hid_event(struct hid_device *, struct hid_field *, struct h
extern void hidinput_report_event(struct hid_device *hid, struct hid_report *report);
extern int hidinput_connect(struct hid_device *hid, unsigned int force);
extern void hidinput_disconnect(struct hid_device *);
+void hidinput_reset_resume(struct hid_device *hid);
+struct hid_field *hid_find_field(struct hid_device *hdev, unsigned int report_type,
+ unsigned int application, unsigned int usage);
int hid_set_field(struct hid_field *, unsigned, __s32);
-int hid_input_report(struct hid_device *, int type, u8 *, u32, int);
-int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int code, struct hid_field **field);
+int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
+ int interrupt);
struct hid_field *hidinput_get_led_field(struct hid_device *hid);
unsigned int hidinput_count_leds(struct hid_device *hid);
__s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code);
void hid_output_report(struct hid_report *report, __u8 *data);
-int __hid_request(struct hid_device *hid, struct hid_report *rep, int reqtype);
+int __hid_request(struct hid_device *hid, struct hid_report *rep, enum hid_class_request reqtype);
u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags);
struct hid_device *hid_allocate_device(void);
struct hid_report *hid_register_report(struct hid_device *device,
- unsigned int type, unsigned int id,
+ enum hid_report_type type, unsigned int id,
unsigned int application);
-int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
+int hid_parse_report(struct hid_device *hid, const __u8 *start, unsigned size);
struct hid_report *hid_validate_values(struct hid_device *hid,
- unsigned int type, unsigned int id,
+ enum hid_report_type type, unsigned int id,
unsigned int field_index,
unsigned int report_counts);
@@ -916,10 +1020,19 @@ const struct hid_device_id *hid_match_device(struct hid_device *hdev,
struct hid_driver *hdrv);
bool hid_compare_device_paths(struct hid_device *hdev_a,
struct hid_device *hdev_b, char separator);
-s32 hid_snto32(__u32 value, unsigned n);
__u32 hid_field_extract(const struct hid_device *hid, __u8 *report,
unsigned offset, unsigned n);
+#ifdef CONFIG_PM
+int hid_driver_suspend(struct hid_device *hdev, pm_message_t state);
+int hid_driver_reset_resume(struct hid_device *hdev);
+int hid_driver_resume(struct hid_device *hdev);
+#else
+static inline int hid_driver_suspend(struct hid_device *hdev, pm_message_t state) { return 0; }
+static inline int hid_driver_reset_resume(struct hid_device *hdev) { return 0; }
+static inline int hid_driver_resume(struct hid_device *hdev) { return 0; }
+#endif
+
/**
* hid_device_io_start - enable HID input during probe, remove
*
@@ -997,6 +1110,10 @@ static inline void hid_map_usage(struct hid_input *hidinput,
bmap = input->ledbit;
limit = LED_MAX;
break;
+ case EV_MSC:
+ bmap = input->mscbit;
+ limit = MSC_MAX;
+ break;
}
if (unlikely(c > limit || !bmap)) {
@@ -1053,6 +1170,20 @@ int __must_check hid_hw_start(struct hid_device *hdev,
void hid_hw_stop(struct hid_device *hdev);
int __must_check hid_hw_open(struct hid_device *hdev);
void hid_hw_close(struct hid_device *hdev);
+void hid_hw_request(struct hid_device *hdev,
+ struct hid_report *report, enum hid_class_request reqtype);
+int __hid_hw_raw_request(struct hid_device *hdev,
+ unsigned char reportnum, __u8 *buf,
+ size_t len, enum hid_report_type rtype,
+ enum hid_class_request reqtype,
+ __u64 source, bool from_bpf);
+int __hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len, __u64 source,
+ bool from_bpf);
+int hid_hw_raw_request(struct hid_device *hdev,
+ unsigned char reportnum, __u8 *buf,
+ size_t len, enum hid_report_type rtype,
+ enum hid_class_request reqtype);
+int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len);
/**
* hid_hw_power - requests underlying HW to go into given power mode
@@ -1071,82 +1202,36 @@ static inline int hid_hw_power(struct hid_device *hdev, int level)
/**
- * hid_hw_request - send report request to device
+ * hid_hw_idle - send idle request to device
*
* @hdev: hid device
- * @report: report to send
+ * @report: report to control
+ * @idle: idle state
* @reqtype: hid request type
*/
-static inline void hid_hw_request(struct hid_device *hdev,
- struct hid_report *report, int reqtype)
-{
- if (hdev->ll_driver->request)
- return hdev->ll_driver->request(hdev, report, reqtype);
-
- __hid_request(hdev, report, reqtype);
-}
-
-/**
- * hid_hw_raw_request - send report request to device
- *
- * @hdev: hid device
- * @reportnum: report ID
- * @buf: in/out data to transfer
- * @len: length of buf
- * @rtype: HID report type
- * @reqtype: HID_REQ_GET_REPORT or HID_REQ_SET_REPORT
- *
- * Return: count of data transferred, negative if error
- *
- * Same behavior as hid_hw_request, but with raw buffers instead.
- */
-static inline int hid_hw_raw_request(struct hid_device *hdev,
- unsigned char reportnum, __u8 *buf,
- size_t len, unsigned char rtype, int reqtype)
+static inline int hid_hw_idle(struct hid_device *hdev, int report, int idle,
+ enum hid_class_request reqtype)
{
- if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
- return -EINVAL;
+ if (hdev->ll_driver->idle)
+ return hdev->ll_driver->idle(hdev, report, idle, reqtype);
- return hdev->ll_driver->raw_request(hdev, reportnum, buf, len,
- rtype, reqtype);
+ return 0;
}
/**
- * hid_hw_output_report - send output report to device
+ * hid_hw_may_wakeup - return if the hid device may act as a wakeup source during system-suspend
*
* @hdev: hid device
- * @buf: raw data to transfer
- * @len: length of buf
- *
- * Return: count of data transferred, negative if error
*/
-static inline int hid_hw_output_report(struct hid_device *hdev, __u8 *buf,
- size_t len)
+static inline bool hid_hw_may_wakeup(struct hid_device *hdev)
{
- if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
- return -EINVAL;
+ if (hdev->ll_driver->may_wakeup)
+ return hdev->ll_driver->may_wakeup(hdev);
- if (hdev->ll_driver->output_report)
- return hdev->ll_driver->output_report(hdev, buf, len);
+ if (hdev->dev.parent)
+ return device_may_wakeup(hdev->dev.parent);
- return -ENOSYS;
-}
-
-/**
- * hid_hw_idle - send idle request to device
- *
- * @hdev: hid device
- * @report: report to control
- * @idle: idle state
- * @reqtype: hid request type
- */
-static inline int hid_hw_idle(struct hid_device *hdev, int report, int idle,
- int reqtype)
-{
- if (hdev->ll_driver->idle)
- return hdev->ll_driver->idle(hdev, report, idle, reqtype);
-
- return 0;
+ return false;
}
/**
@@ -1163,33 +1248,26 @@ static inline void hid_hw_wait(struct hid_device *hdev)
/**
* hid_report_len - calculate the report length
*
- * @report: the report we want to know the length
+ * @report: the report whose length we want to know
+ *
+ * The length counts the report ID byte, but only if the ID is nonzero
+ * and therefore is included in the report. Reports whose ID is zero
+ * never include an ID byte.
*/
static inline u32 hid_report_len(struct hid_report *report)
{
- /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
- return ((report->size - 1) >> 3) + 1 + (report->id > 0);
+ return DIV_ROUND_UP(report->size, 8) + (report->id > 0);
}
-int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
- int interrupt);
+int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
+ int interrupt);
/* HID quirks API */
unsigned long hid_lookup_quirk(const struct hid_device *hdev);
int hid_quirks_init(char **quirks_param, __u16 bus, int count);
void hid_quirks_exit(__u16 bus);
-#ifdef CONFIG_HID_PID
-int hid_pidff_init(struct hid_device *hid);
-#else
-#define hid_pidff_init NULL
-#endif
-
-#define dbg_hid(fmt, ...) \
-do { \
- if (hid_debug) \
- printk(KERN_DEBUG "%s: " fmt, __FILE__, ##__VA_ARGS__); \
-} while (0)
+#define dbg_hid(fmt, ...) pr_debug("%s: " fmt, __FILE__, ##__VA_ARGS__)
#define hid_err(hid, fmt, ...) \
dev_err(&(hid)->dev, fmt, ##__VA_ARGS__)
@@ -1197,6 +1275,8 @@ do { \
dev_notice(&(hid)->dev, fmt, ##__VA_ARGS__)
#define hid_warn(hid, fmt, ...) \
dev_warn(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_warn_ratelimited(hid, fmt, ...) \
+ dev_warn_ratelimited(&(hid)->dev, fmt, ##__VA_ARGS__)
#define hid_info(hid, fmt, ...) \
dev_info(&(hid)->dev, fmt, ##__VA_ARGS__)
#define hid_dbg(hid, fmt, ...) \
@@ -1213,4 +1293,15 @@ do { \
#define hid_dbg_once(hid, fmt, ...) \
dev_dbg_once(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_err_ratelimited(hid, fmt, ...) \
+ dev_err_ratelimited(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_notice_ratelimited(hid, fmt, ...) \
+ dev_notice_ratelimited(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_warn_ratelimited(hid, fmt, ...) \
+ dev_warn_ratelimited(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_info_ratelimited(hid, fmt, ...) \
+ dev_info_ratelimited(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_dbg_ratelimited(hid, fmt, ...) \
+ dev_dbg_ratelimited(&(hid)->dev, fmt, ##__VA_ARGS__)
+
#endif
diff --git a/include/linux/hid_bpf.h b/include/linux/hid_bpf.h
new file mode 100644
index 000000000000..a2e47dbcf82c
--- /dev/null
+++ b/include/linux/hid_bpf.h
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __HID_BPF_H
+#define __HID_BPF_H
+
+#include <linux/bpf.h>
+#include <linux/mutex.h>
+#include <linux/srcu.h>
+#include <uapi/linux/hid.h>
+
+struct hid_device;
+
+/*
+ * The following is the user facing HID BPF API.
+ *
+ * Extra care should be taken when editing this part, as
+ * it might break existing out of the tree bpf programs.
+ */
+
+/**
+ * struct hid_bpf_ctx - User accessible data for all HID programs
+ *
+ * ``data`` is not directly accessible from the context. We need to issue
+ * a call to hid_bpf_get_data() in order to get a pointer to that field.
+ *
+ * @hid: the &struct hid_device representing the device itself
+ * @allocated_size: Allocated size of data.
+ *
+ * This is how much memory is available and can be requested
+ * by the HID program.
+ * Note that for ``HID_BPF_RDESC_FIXUP``, that memory is set to
+ * ``4096`` (4 KB)
+ * @size: Valid data in the data field.
+ *
+ * Programs can get the available valid size in data by fetching this field.
+ * Programs can also change this value by returning a positive number in the
+ * program.
+ * To discard the event, return a negative error code.
+ *
+ * ``size`` must always be less or equal than ``allocated_size`` (it is enforced
+ * once all BPF programs have been run).
+ * @retval: Return value of the previous program.
+ *
+ * ``hid`` and ``allocated_size`` are read-only, ``size`` and ``retval`` are read-write.
+ */
+struct hid_bpf_ctx {
+ struct hid_device *hid;
+ __u32 allocated_size;
+ union {
+ __s32 retval;
+ __s32 size;
+ };
+};
+
+/*
+ * Below is HID internal
+ */
+
+#define HID_BPF_MAX_PROGS_PER_DEV 64
+#define HID_BPF_FLAG_MASK (((HID_BPF_FLAG_MAX - 1) << 1) - 1)
+
+
+struct hid_report_enum;
+
+struct hid_ops {
+ struct hid_report *(*hid_get_report)(struct hid_report_enum *report_enum, const u8 *data);
+ int (*hid_hw_raw_request)(struct hid_device *hdev,
+ unsigned char reportnum, __u8 *buf,
+ size_t len, enum hid_report_type rtype,
+ enum hid_class_request reqtype,
+ u64 source, bool from_bpf);
+ int (*hid_hw_output_report)(struct hid_device *hdev, __u8 *buf, size_t len,
+ u64 source, bool from_bpf);
+ int (*hid_input_report)(struct hid_device *hid, enum hid_report_type type,
+ u8 *data, u32 size, int interrupt, u64 source, bool from_bpf,
+ bool lock_already_taken);
+ struct module *owner;
+ const struct bus_type *bus_type;
+};
+
+extern const struct hid_ops *hid_ops;
+
+/**
+ * struct hid_bpf_ops - A BPF struct_ops of callbacks allowing to attach HID-BPF
+ * programs to a HID device
+ * @hid_id: the HID uniq ID to attach to. This is writeable before ``load()``, and
+ * cannot be changed after
+ * @flags: flags used while attaching the struct_ops to the device. Currently only
+ * available value is %0 or ``BPF_F_BEFORE``.
+ * Writeable only before ``load()``
+ */
+struct hid_bpf_ops {
+ /* hid_id needs to stay first so we can easily change it
+ * from userspace.
+ */
+ int hid_id;
+ u32 flags;
+
+ /* private: do not show up in the docs */
+ struct list_head list;
+
+ /* public: rest should show up in the docs */
+
+ /**
+ * @hid_device_event: called whenever an event is coming in from the device
+ *
+ * It has the following arguments:
+ *
+ * ``ctx``: The HID-BPF context as &struct hid_bpf_ctx
+ *
+ * Return: %0 on success and keep processing; a positive
+ * value to change the incoming size buffer; a negative
+ * error code to interrupt the processing of this event
+ *
+ * Context: Interrupt context.
+ */
+ int (*hid_device_event)(struct hid_bpf_ctx *ctx, enum hid_report_type report_type,
+ u64 source);
+
+ /**
+ * @hid_rdesc_fixup: called when the probe function parses the report descriptor
+ * of the HID device
+ *
+ * It has the following arguments:
+ *
+ * ``ctx``: The HID-BPF context as &struct hid_bpf_ctx
+ *
+ * Return: %0 on success and keep processing; a positive
+ * value to change the incoming size buffer; a negative
+ * error code to interrupt the processing of this device
+ */
+ int (*hid_rdesc_fixup)(struct hid_bpf_ctx *ctx);
+
+ /**
+ * @hid_hw_request: called whenever a hid_hw_raw_request() call is emitted
+ * on the HID device
+ *
+ * It has the following arguments:
+ *
+ * ``ctx``: The HID-BPF context as &struct hid_bpf_ctx
+ *
+ * ``reportnum``: the report number, as in hid_hw_raw_request()
+ *
+ * ``rtype``: the report type (``HID_INPUT_REPORT``, ``HID_FEATURE_REPORT``,
+ * ``HID_OUTPUT_REPORT``)
+ *
+ * ``reqtype``: the request
+ *
+ * ``source``: a u64 referring to a uniq but identifiable source. If %0, the
+ * kernel itself emitted that call. For hidraw, ``source`` is set
+ * to the associated ``struct file *``.
+ *
+ * Return: %0 to keep processing the request by hid-core; any other value
+ * stops hid-core from processing that event. A positive value should be
+ * returned with the number of bytes returned in the incoming buffer; a
+ * negative error code interrupts the processing of this call.
+ */
+ int (*hid_hw_request)(struct hid_bpf_ctx *ctx, unsigned char reportnum,
+ enum hid_report_type rtype, enum hid_class_request reqtype,
+ u64 source);
+
+ /**
+ * @hid_hw_output_report: called whenever a hid_hw_output_report() call is emitted
+ * on the HID device
+ *
+ * It has the following arguments:
+ *
+ * ``ctx``: The HID-BPF context as &struct hid_bpf_ctx
+ *
+ * ``source``: a u64 referring to a uniq but identifiable source. If %0, the
+ * kernel itself emitted that call. For hidraw, ``source`` is set
+ * to the associated ``struct file *``.
+ *
+ * Return: %0 to keep processing the request by hid-core; any other value
+ * stops hid-core from processing that event. A positive value should be
+ * returned with the number of bytes written to the device; a negative error
+ * code interrupts the processing of this call.
+ */
+ int (*hid_hw_output_report)(struct hid_bpf_ctx *ctx, u64 source);
+
+
+ /* private: do not show up in the docs */
+ struct hid_device *hdev;
+};
+
+/* stored in each device */
+struct hid_bpf {
+ u8 *device_data; /* allocated when a bpf program of type
+ * SEC(f.../hid_bpf_device_event) has been attached
+ * to this HID device
+ */
+ u32 allocated_data;
+ bool destroyed; /* prevents the assignment of any progs */
+
+ struct hid_bpf_ops *rdesc_ops;
+ struct list_head prog_list;
+ struct mutex prog_list_lock; /* protects prog_list update */
+ struct srcu_struct srcu; /* protects prog_list read-only access */
+};
+
+#ifdef CONFIG_HID_BPF
+u8 *dispatch_hid_bpf_device_event(struct hid_device *hid, enum hid_report_type type, u8 *data,
+ u32 *size, int interrupt, u64 source, bool from_bpf);
+int dispatch_hid_bpf_raw_requests(struct hid_device *hdev,
+ unsigned char reportnum, __u8 *buf,
+ u32 size, enum hid_report_type rtype,
+ enum hid_class_request reqtype,
+ u64 source, bool from_bpf);
+int dispatch_hid_bpf_output_report(struct hid_device *hdev, __u8 *buf, u32 size,
+ u64 source, bool from_bpf);
+int hid_bpf_connect_device(struct hid_device *hdev);
+void hid_bpf_disconnect_device(struct hid_device *hdev);
+void hid_bpf_destroy_device(struct hid_device *hid);
+int hid_bpf_device_init(struct hid_device *hid);
+const u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, const u8 *rdesc, unsigned int *size);
+#else /* CONFIG_HID_BPF */
+static inline u8 *dispatch_hid_bpf_device_event(struct hid_device *hid, enum hid_report_type type,
+ u8 *data, u32 *size, int interrupt,
+ u64 source, bool from_bpf) { return data; }
+static inline int dispatch_hid_bpf_raw_requests(struct hid_device *hdev,
+ unsigned char reportnum, u8 *buf,
+ u32 size, enum hid_report_type rtype,
+ enum hid_class_request reqtype,
+ u64 source, bool from_bpf) { return 0; }
+static inline int dispatch_hid_bpf_output_report(struct hid_device *hdev, __u8 *buf, u32 size,
+ u64 source, bool from_bpf) { return 0; }
+static inline int hid_bpf_connect_device(struct hid_device *hdev) { return 0; }
+static inline void hid_bpf_disconnect_device(struct hid_device *hdev) {}
+static inline void hid_bpf_destroy_device(struct hid_device *hid) {}
+static inline int hid_bpf_device_init(struct hid_device *hid) { return 0; }
+static inline const u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, const u8 *rdesc,
+ unsigned int *size) { return rdesc; }
+
+#endif /* CONFIG_HID_BPF */
+
+#endif /* __HID_BPF_H */
diff --git a/include/linux/hidraw.h b/include/linux/hidraw.h
index cd67f4ca5599..18fd30a288de 100644
--- a/include/linux/hidraw.h
+++ b/include/linux/hidraw.h
@@ -32,6 +32,7 @@ struct hidraw_list {
struct hidraw *hidraw;
struct list_head node;
struct mutex read_mutex;
+ bool revoked;
};
#ifdef CONFIG_HIDRAW
diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h
index 7902c7d8b55f..0574c21ca45d 100644
--- a/include/linux/highmem-internal.h
+++ b/include/linux/highmem-internal.h
@@ -7,8 +7,8 @@
*/
#ifdef CONFIG_KMAP_LOCAL
void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
-void *__kmap_local_page_prot(struct page *page, pgprot_t prot);
-void kunmap_local_indexed(void *vaddr);
+void *__kmap_local_page_prot(const struct page *page, pgprot_t prot);
+void kunmap_local_indexed(const void *vaddr);
void kmap_local_fork(struct task_struct *tsk);
void __kmap_local_sched_out(void);
void __kmap_local_sched_in(void);
@@ -33,7 +33,7 @@ static inline void kmap_flush_tlb(unsigned long addr) { }
#endif
void *kmap_high(struct page *page);
-void kunmap_high(struct page *page);
+void kunmap_high(const struct page *page);
void __kmap_flush_unused(void);
struct page *__kmap_to_page(void *addr);
@@ -50,7 +50,7 @@ static inline void *kmap(struct page *page)
return addr;
}
-static inline void kunmap(struct page *page)
+static inline void kunmap(const struct page *page)
{
might_sleep();
if (!PageHighMem(page))
@@ -68,12 +68,26 @@ static inline void kmap_flush_unused(void)
__kmap_flush_unused();
}
-static inline void *kmap_local_page(struct page *page)
+static inline void *kmap_local_page(const struct page *page)
{
return __kmap_local_page_prot(page, kmap_prot);
}
-static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
+static inline void *kmap_local_page_try_from_panic(const struct page *page)
+{
+ if (!PageHighMem(page))
+ return page_address(page);
+ /* If the page is in HighMem, it's not safe to kmap it.*/
+ return NULL;
+}
+
+static inline void *kmap_local_folio(const struct folio *folio, size_t offset)
+{
+ const struct page *page = folio_page(folio, offset / PAGE_SIZE);
+ return __kmap_local_page_prot(page, kmap_prot) + offset % PAGE_SIZE;
+}
+
+static inline void *kmap_local_page_prot(const struct page *page, pgprot_t prot)
{
return __kmap_local_page_prot(page, prot);
}
@@ -83,55 +97,69 @@ static inline void *kmap_local_pfn(unsigned long pfn)
return __kmap_local_pfn_prot(pfn, kmap_prot);
}
-static inline void __kunmap_local(void *vaddr)
+static inline void __kunmap_local(const void *vaddr)
{
kunmap_local_indexed(vaddr);
}
-static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+static inline void *kmap_atomic_prot(const struct page *page, pgprot_t prot)
{
- preempt_disable();
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ migrate_disable();
+ else
+ preempt_disable();
+
pagefault_disable();
return __kmap_local_page_prot(page, prot);
}
-static inline void *kmap_atomic(struct page *page)
+static inline void *kmap_atomic(const struct page *page)
{
return kmap_atomic_prot(page, kmap_prot);
}
static inline void *kmap_atomic_pfn(unsigned long pfn)
{
- preempt_disable();
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ migrate_disable();
+ else
+ preempt_disable();
+
pagefault_disable();
return __kmap_local_pfn_prot(pfn, kmap_prot);
}
-static inline void __kunmap_atomic(void *addr)
+static inline void __kunmap_atomic(const void *addr)
{
kunmap_local_indexed(addr);
pagefault_enable();
- preempt_enable();
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ migrate_enable();
+ else
+ preempt_enable();
}
-unsigned int __nr_free_highpages(void);
-extern atomic_long_t _totalhigh_pages;
+unsigned long __nr_free_highpages(void);
+unsigned long __totalhigh_pages(void);
-static inline unsigned int nr_free_highpages(void)
+static inline unsigned long nr_free_highpages(void)
{
return __nr_free_highpages();
}
static inline unsigned long totalhigh_pages(void)
{
- return (unsigned long)atomic_long_read(&_totalhigh_pages);
+ return __totalhigh_pages();
}
-static inline void totalhigh_pages_add(long count)
+static inline bool is_kmap_addr(const void *x)
{
- atomic_long_add(count, &_totalhigh_pages);
-}
+ unsigned long addr = (unsigned long)x;
+ return (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) ||
+ (addr >= __fix_to_virt(FIX_KMAP_END) &&
+ addr < __fix_to_virt(FIX_KMAP_BEGIN));
+}
#else /* CONFIG_HIGHMEM */
static inline struct page *kmap_to_page(void *addr)
@@ -145,22 +173,32 @@ static inline void *kmap(struct page *page)
return page_address(page);
}
-static inline void kunmap_high(struct page *page) { }
+static inline void kunmap_high(const struct page *page) { }
static inline void kmap_flush_unused(void) { }
-static inline void kunmap(struct page *page)
+static inline void kunmap(const struct page *page)
{
#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
kunmap_flush_on_unmap(page_address(page));
#endif
}
-static inline void *kmap_local_page(struct page *page)
+static inline void *kmap_local_page(const struct page *page)
{
return page_address(page);
}
-static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
+static inline void *kmap_local_page_try_from_panic(const struct page *page)
+{
+ return page_address(page);
+}
+
+static inline void *kmap_local_folio(const struct folio *folio, size_t offset)
+{
+ return folio_address(folio) + offset;
+}
+
+static inline void *kmap_local_page_prot(const struct page *page, pgprot_t prot)
{
return kmap_local_page(page);
}
@@ -170,21 +208,24 @@ static inline void *kmap_local_pfn(unsigned long pfn)
return kmap_local_page(pfn_to_page(pfn));
}
-static inline void __kunmap_local(void *addr)
+static inline void __kunmap_local(const void *addr)
{
#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
- kunmap_flush_on_unmap(addr);
+ kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
#endif
}
-static inline void *kmap_atomic(struct page *page)
+static inline void *kmap_atomic(const struct page *page)
{
- preempt_disable();
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ migrate_disable();
+ else
+ preempt_disable();
pagefault_disable();
return page_address(page);
}
-static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+static inline void *kmap_atomic_prot(const struct page *page, pgprot_t prot)
{
return kmap_atomic(page);
}
@@ -194,23 +235,43 @@ static inline void *kmap_atomic_pfn(unsigned long pfn)
return kmap_atomic(pfn_to_page(pfn));
}
-static inline void __kunmap_atomic(void *addr)
+static inline void __kunmap_atomic(const void *addr)
{
#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
- kunmap_flush_on_unmap(addr);
+ kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
#endif
pagefault_enable();
- preempt_enable();
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ migrate_enable();
+ else
+ preempt_enable();
}
-static inline unsigned int nr_free_highpages(void) { return 0; }
-static inline unsigned long totalhigh_pages(void) { return 0UL; }
+static inline unsigned long nr_free_highpages(void) { return 0; }
+static inline unsigned long totalhigh_pages(void) { return 0; }
+
+static inline bool is_kmap_addr(const void *x)
+{
+ return false;
+}
#endif /* CONFIG_HIGHMEM */
-/*
- * Prevent people trying to call kunmap_atomic() as if it were kunmap()
- * kunmap_atomic() should get the return value of kmap_atomic, not the page.
+/**
+ * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic() - deprecated!
+ * @__addr: Virtual address to be unmapped
+ *
+ * Unmaps an address previously mapped by kmap_atomic() and re-enables
+ * pagefaults. Depending on PREEMP_RT configuration, re-enables also
+ * migration and preemption. Users should not count on these side effects.
+ *
+ * Mappings should be unmapped in the reverse order that they were mapped.
+ * See kmap_local_page() for details on nesting.
+ *
+ * @__addr can be any address within the mapped page, so there is no need
+ * to subtract any offset that has been added. In contrast to kunmap(),
+ * this function takes the address returned from kmap_atomic(), not the
+ * page passed to it. The compiler will warn you if you pass the page.
*/
#define kunmap_atomic(__addr) \
do { \
@@ -218,6 +279,16 @@ do { \
__kunmap_atomic(__addr); \
} while (0)
+/**
+ * kunmap_local - Unmap a page mapped via kmap_local_page().
+ * @__addr: An address within the page mapped
+ *
+ * @__addr can be any address within the mapped page. Commonly it is the
+ * address return from kmap_local_page(), but it can also include offsets.
+ *
+ * Unmapping should be done in the reverse order of the mapping. See
+ * kmap_local_page() for details.
+ */
#define kunmap_local(__addr) \
do { \
BUILD_BUG_ON(__same_type((__addr), struct page *)); \
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 832b49b50c7b..abc20f9810fd 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -5,12 +5,12 @@
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/bug.h>
+#include <linux/cacheflush.h>
+#include <linux/kmsan.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
-#include <asm/cacheflush.h>
-
#include "highmem-internal.h"
/**
@@ -38,12 +38,12 @@ static inline void *kmap(struct page *page);
/**
* kunmap - Unmap the virtual address mapped by kmap()
- * @addr: Virtual address to be unmapped
+ * @page: Pointer to the page which was mapped by kmap()
*
* Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of
* pages in the low memory area.
*/
-static inline void kunmap(struct page *page);
+static inline void kunmap(const struct page *page);
/**
* kmap_to_page - Get the page for a kmap'ed address
@@ -61,11 +61,11 @@ static inline void kmap_flush_unused(void);
/**
* kmap_local_page - Map a page for temporary usage
- * @page: Pointer to the page to be mapped
+ * @page: Pointer to the page to be mapped
*
* Returns: The virtual address of the mapping
*
- * Can be invoked from any context.
+ * Can be invoked from any context, including interrupts.
*
* Requires careful handling when nesting multiple mappings because the map
* management is stack based. The unmap has to be in the reverse order of
@@ -86,15 +86,50 @@ static inline void kmap_flush_unused(void);
* virtual address of the direct mapping. Only real highmem pages are
* temporarily mapped.
*
- * While it is significantly faster than kmap() for the higmem case it
- * comes with restrictions about the pointer validity. Only use when really
- * necessary.
+ * While kmap_local_page() is significantly faster than kmap() for the highmem
+ * case it comes with restrictions about the pointer validity.
*
* On HIGHMEM enabled systems mapping a highmem page has the side effect of
* disabling migration in order to keep the virtual address stable across
* preemption. No caller of kmap_local_page() can rely on this side effect.
*/
-static inline void *kmap_local_page(struct page *page);
+static inline void *kmap_local_page(const struct page *page);
+
+/**
+ * kmap_local_folio - Map a page in this folio for temporary usage
+ * @folio: The folio containing the page.
+ * @offset: The byte offset within the folio which identifies the page.
+ *
+ * Requires careful handling when nesting multiple mappings because the map
+ * management is stack based. The unmap has to be in the reverse order of
+ * the map operation::
+ *
+ * addr1 = kmap_local_folio(folio1, offset1);
+ * addr2 = kmap_local_folio(folio2, offset2);
+ * ...
+ * kunmap_local(addr2);
+ * kunmap_local(addr1);
+ *
+ * Unmapping addr1 before addr2 is invalid and causes malfunction.
+ *
+ * Contrary to kmap() mappings the mapping is only valid in the context of
+ * the caller and cannot be handed to other contexts.
+ *
+ * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
+ * virtual address of the direct mapping. Only real highmem pages are
+ * temporarily mapped.
+ *
+ * While it is significantly faster than kmap() for the highmem case it
+ * comes with restrictions about the pointer validity.
+ *
+ * On HIGHMEM enabled systems mapping a highmem page has the side effect of
+ * disabling migration in order to keep the virtual address stable across
+ * preemption. No caller of kmap_local_folio() can rely on this side effect.
+ *
+ * Context: Can be invoked from any context.
+ * Return: The virtual address of @offset.
+ */
+static inline void *kmap_local_folio(const struct folio *folio, size_t offset);
/**
* kmap_atomic - Atomically map a page for temporary usage - Deprecated!
@@ -102,26 +137,49 @@ static inline void *kmap_local_page(struct page *page);
*
* Returns: The virtual address of the mapping
*
- * Effectively a wrapper around kmap_local_page() which disables pagefaults
- * and preemption.
+ * In fact a wrapper around kmap_local_page() which also disables pagefaults
+ * and, depending on PREEMPT_RT configuration, also CPU migration and
+ * preemption. Therefore users should not count on the latter two side effects.
+ *
+ * Mappings should always be released by kunmap_atomic().
*
* Do not use in new code. Use kmap_local_page() instead.
- */
-static inline void *kmap_atomic(struct page *page);
-
-/**
- * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic()
- * @addr: Virtual address to be unmapped
*
- * Counterpart to kmap_atomic().
+ * It is used in atomic context when code wants to access the contents of a
+ * page that might be allocated from high memory (see __GFP_HIGHMEM), for
+ * example a page in the pagecache. The API has two functions, and they
+ * can be used in a manner similar to the following::
+ *
+ * // Find the page of interest.
+ * struct page *page = find_get_page(mapping, offset);
+ *
+ * // Gain access to the contents of that page.
+ * void *vaddr = kmap_atomic(page);
+ *
+ * // Do something to the contents of that page.
+ * memset(vaddr, 0, PAGE_SIZE);
*
- * Effectively a wrapper around kunmap_local() which additionally undoes
- * the side effects of kmap_atomic(), i.e. reenabling pagefaults and
- * preemption.
+ * // Unmap that page.
+ * kunmap_atomic(vaddr);
+ *
+ * Note that the kunmap_atomic() call takes the result of the kmap_atomic()
+ * call, not the argument.
+ *
+ * If you need to map two pages because you want to copy from one page to
+ * another you need to keep the kmap_atomic calls strictly nested, like:
+ *
+ * vaddr1 = kmap_atomic(page1);
+ * vaddr2 = kmap_atomic(page2);
+ *
+ * memcpy(vaddr1, vaddr2, PAGE_SIZE);
+ *
+ * kunmap_atomic(vaddr2);
+ * kunmap_atomic(vaddr1);
*/
+static inline void *kmap_atomic(const struct page *page);
/* Highmem related interfaces for management code */
-static inline unsigned int nr_free_highpages(void);
+static inline unsigned long nr_free_highpages(void);
static inline unsigned long totalhigh_pages(void);
#ifndef ARCH_HAS_FLUSH_ANON_PAGE
@@ -130,10 +188,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page
}
#endif
-#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
-static inline void flush_kernel_dcache_page(struct page *page)
-{
-}
+#ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
static inline void flush_kernel_vmap_range(void *vaddr, int size)
{
}
@@ -146,77 +201,77 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
#ifndef clear_user_highpage
static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
{
- void *addr = kmap_atomic(page);
+ void *addr = kmap_local_page(page);
clear_user_page(addr, vaddr, page);
- kunmap_atomic(addr);
+ kunmap_local(addr);
}
#endif
-#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+#ifndef vma_alloc_zeroed_movable_folio
/**
- * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
- * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
- * @vma: The VMA the page is to be allocated for
- * @vaddr: The virtual address the page will be inserted into
- *
- * This function will allocate a page for a VMA but the caller is expected
- * to specify via movableflags whether the page will be movable in the
- * future or not
- *
- * An architecture may override this function by defining
- * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
- * implementation.
+ * vma_alloc_zeroed_movable_folio - Allocate a zeroed page for a VMA.
+ * @vma: The VMA the page is to be allocated for.
+ * @vaddr: The virtual address the page will be inserted into.
+ *
+ * This function will allocate a page suitable for inserting into this
+ * VMA at this virtual address. It may be allocated from highmem or
+ * the movable zone. An architecture may provide its own implementation.
+ *
+ * Return: A folio containing one allocated and zeroed page or NULL if
+ * we are out of memory.
*/
-static inline struct page *
-__alloc_zeroed_user_highpage(gfp_t movableflags,
- struct vm_area_struct *vma,
- unsigned long vaddr)
+static inline
+struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
+ unsigned long vaddr)
{
- struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
- vma, vaddr);
+ struct folio *folio;
- if (page)
- clear_user_highpage(page, vaddr);
+ folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr);
+ if (folio && user_alloc_needs_zeroing())
+ clear_user_highpage(&folio->page, vaddr);
- return page;
+ return folio;
}
#endif
-/**
- * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
- * @vma: The VMA the page is to be allocated for
- * @vaddr: The virtual address the page will be inserted into
- *
- * This function will allocate a page for a VMA that the caller knows will
- * be able to migrate in the future using move_pages() or reclaimed
- */
-static inline struct page *
-alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
- unsigned long vaddr)
+static inline void clear_highpage(struct page *page)
{
- return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
+ void *kaddr = kmap_local_page(page);
+ clear_page(kaddr);
+ kunmap_local(kaddr);
}
-static inline void clear_highpage(struct page *page)
+static inline void clear_highpage_kasan_tagged(struct page *page)
{
- void *kaddr = kmap_atomic(page);
- clear_page(kaddr);
- kunmap_atomic(kaddr);
+ void *kaddr = kmap_local_page(page);
+
+ clear_page(kasan_reset_tag(kaddr));
+ kunmap_local(kaddr);
+}
+
+#ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGES
+
+/* Return false to let people know we did not initialize the pages */
+static inline bool tag_clear_highpages(struct page *page, int numpages)
+{
+ return false;
}
+#endif
+
/*
* If we pass in a base or tail page, we can zero up to PAGE_SIZE.
* If we pass in a head page, we can zero up to the size of the compound page.
*/
-#if defined(CONFIG_HIGHMEM) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
+#ifdef CONFIG_HIGHMEM
void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
unsigned start2, unsigned end2);
-#else /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */
+#else
static inline void zero_user_segments(struct page *page,
unsigned start1, unsigned end1,
unsigned start2, unsigned end2)
{
- void *kaddr = kmap_atomic(page);
+ void *kaddr = kmap_local_page(page);
unsigned int i;
BUG_ON(end1 > page_size(page) || end2 > page_size(page));
@@ -227,11 +282,11 @@ static inline void zero_user_segments(struct page *page,
if (end2 > start2)
memset(kaddr + start2, 0, end2 - start2);
- kunmap_atomic(kaddr);
+ kunmap_local(kaddr);
for (i = 0; i < compound_nr(page); i++)
flush_dcache_page(page + i);
}
-#endif /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */
+#endif
static inline void zero_user_segment(struct page *page,
unsigned start, unsigned end)
@@ -239,12 +294,6 @@ static inline void zero_user_segment(struct page *page,
zero_user_segments(page, start, end, 0, 0);
}
-static inline void zero_user(struct page *page,
- unsigned start, unsigned size)
-{
- zero_user_segments(page, start, start + size, 0, 0);
-}
-
#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
static inline void copy_user_highpage(struct page *to, struct page *from,
@@ -252,11 +301,12 @@ static inline void copy_user_highpage(struct page *to, struct page *from,
{
char *vfrom, *vto;
- vfrom = kmap_atomic(from);
- vto = kmap_atomic(to);
+ vfrom = kmap_local_page(from);
+ vto = kmap_local_page(to);
copy_user_page(vto, vfrom, vaddr, to);
- kunmap_atomic(vto);
- kunmap_atomic(vfrom);
+ kmsan_unpoison_memory(page_address(to), PAGE_SIZE);
+ kunmap_local(vto);
+ kunmap_local(vfrom);
}
#endif
@@ -267,13 +317,74 @@ static inline void copy_highpage(struct page *to, struct page *from)
{
char *vfrom, *vto;
- vfrom = kmap_atomic(from);
- vto = kmap_atomic(to);
+ vfrom = kmap_local_page(from);
+ vto = kmap_local_page(to);
copy_page(vto, vfrom);
- kunmap_atomic(vto);
- kunmap_atomic(vfrom);
+ kmsan_copy_page_meta(to, from);
+ kunmap_local(vto);
+ kunmap_local(vfrom);
+}
+
+#endif
+
+#ifdef copy_mc_to_kernel
+/*
+ * If architecture supports machine check exception handling, define the
+ * #MC versions of copy_user_highpage and copy_highpage. They copy a memory
+ * page with #MC in source page (@from) handled, and return the number
+ * of bytes not copied if there was a #MC, otherwise 0 for success.
+ */
+static inline int copy_mc_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma)
+{
+ unsigned long ret;
+ char *vfrom, *vto;
+
+ vfrom = kmap_local_page(from);
+ vto = kmap_local_page(to);
+ ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE);
+ if (!ret)
+ kmsan_unpoison_memory(page_address(to), PAGE_SIZE);
+ kunmap_local(vto);
+ kunmap_local(vfrom);
+
+ if (ret)
+ memory_failure_queue(page_to_pfn(from), 0);
+
+ return ret;
+}
+
+static inline int copy_mc_highpage(struct page *to, struct page *from)
+{
+ unsigned long ret;
+ char *vfrom, *vto;
+
+ vfrom = kmap_local_page(from);
+ vto = kmap_local_page(to);
+ ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE);
+ if (!ret)
+ kmsan_copy_page_meta(to, from);
+ kunmap_local(vto);
+ kunmap_local(vfrom);
+
+ if (ret)
+ memory_failure_queue(page_to_pfn(from), 0);
+
+ return ret;
+}
+#else
+static inline int copy_mc_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma)
+{
+ copy_user_highpage(to, from, vaddr, vma);
+ return 0;
}
+static inline int copy_mc_highpage(struct page *to, struct page *from)
+{
+ copy_highpage(to, from);
+ return 0;
+}
#endif
static inline void memcpy_page(struct page *dst_page, size_t dst_off,
@@ -289,17 +400,31 @@ static inline void memcpy_page(struct page *dst_page, size_t dst_off,
kunmap_local(dst);
}
-static inline void memmove_page(struct page *dst_page, size_t dst_off,
- struct page *src_page, size_t src_off,
- size_t len)
+static inline void memcpy_folio(struct folio *dst_folio, size_t dst_off,
+ struct folio *src_folio, size_t src_off, size_t len)
{
- char *dst = kmap_local_page(dst_page);
- char *src = kmap_local_page(src_page);
-
- VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE);
- memmove(dst + dst_off, src + src_off, len);
- kunmap_local(src);
- kunmap_local(dst);
+ VM_BUG_ON(dst_off + len > folio_size(dst_folio));
+ VM_BUG_ON(src_off + len > folio_size(src_folio));
+
+ do {
+ char *dst = kmap_local_folio(dst_folio, dst_off);
+ const char *src = kmap_local_folio(src_folio, src_off);
+ size_t chunk = len;
+
+ if (folio_test_highmem(dst_folio) &&
+ chunk > PAGE_SIZE - offset_in_page(dst_off))
+ chunk = PAGE_SIZE - offset_in_page(dst_off);
+ if (folio_test_highmem(src_folio) &&
+ chunk > PAGE_SIZE - offset_in_page(src_off))
+ chunk = PAGE_SIZE - offset_in_page(src_off);
+ memcpy(dst, src, chunk);
+ kunmap_local(src);
+ kunmap_local(dst);
+
+ dst_off += chunk;
+ src_off += chunk;
+ len -= chunk;
+ } while (len > 0);
}
static inline void memset_page(struct page *page, size_t offset, int val,
@@ -329,14 +454,234 @@ static inline void memcpy_to_page(struct page *page, size_t offset,
VM_BUG_ON(offset + len > PAGE_SIZE);
memcpy(to + offset, from, len);
+ flush_dcache_page(page);
kunmap_local(to);
}
static inline void memzero_page(struct page *page, size_t offset, size_t len)
{
- char *addr = kmap_atomic(page);
+ char *addr = kmap_local_page(page);
+
+ VM_BUG_ON(offset + len > PAGE_SIZE);
memset(addr + offset, 0, len);
- kunmap_atomic(addr);
+ flush_dcache_page(page);
+ kunmap_local(addr);
+}
+
+/**
+ * memcpy_from_folio - Copy a range of bytes from a folio.
+ * @to: The memory to copy to.
+ * @folio: The folio to read from.
+ * @offset: The first byte in the folio to read.
+ * @len: The number of bytes to copy.
+ */
+static inline void memcpy_from_folio(char *to, struct folio *folio,
+ size_t offset, size_t len)
+{
+ VM_BUG_ON(offset + len > folio_size(folio));
+
+ do {
+ const char *from = kmap_local_folio(folio, offset);
+ size_t chunk = len;
+
+ if (folio_test_partial_kmap(folio) &&
+ chunk > PAGE_SIZE - offset_in_page(offset))
+ chunk = PAGE_SIZE - offset_in_page(offset);
+ memcpy(to, from, chunk);
+ kunmap_local(from);
+
+ to += chunk;
+ offset += chunk;
+ len -= chunk;
+ } while (len > 0);
+}
+
+/**
+ * memcpy_to_folio - Copy a range of bytes to a folio.
+ * @folio: The folio to write to.
+ * @offset: The first byte in the folio to store to.
+ * @from: The memory to copy from.
+ * @len: The number of bytes to copy.
+ */
+static inline void memcpy_to_folio(struct folio *folio, size_t offset,
+ const char *from, size_t len)
+{
+ VM_BUG_ON(offset + len > folio_size(folio));
+
+ do {
+ char *to = kmap_local_folio(folio, offset);
+ size_t chunk = len;
+
+ if (folio_test_partial_kmap(folio) &&
+ chunk > PAGE_SIZE - offset_in_page(offset))
+ chunk = PAGE_SIZE - offset_in_page(offset);
+ memcpy(to, from, chunk);
+ kunmap_local(to);
+
+ from += chunk;
+ offset += chunk;
+ len -= chunk;
+ } while (len > 0);
+
+ flush_dcache_folio(folio);
+}
+
+/**
+ * folio_zero_tail - Zero the tail of a folio.
+ * @folio: The folio to zero.
+ * @offset: The byte offset in the folio to start zeroing at.
+ * @kaddr: The address the folio is currently mapped to.
+ *
+ * If you have already used kmap_local_folio() to map a folio, written
+ * some data to it and now need to zero the end of the folio (and flush
+ * the dcache), you can use this function. If you do not have the
+ * folio kmapped (eg the folio has been partially populated by DMA),
+ * use folio_zero_range() or folio_zero_segment() instead.
+ *
+ * Return: An address which can be passed to kunmap_local().
+ */
+static inline __must_check void *folio_zero_tail(struct folio *folio,
+ size_t offset, void *kaddr)
+{
+ size_t len = folio_size(folio) - offset;
+
+ if (folio_test_partial_kmap(folio)) {
+ size_t max = PAGE_SIZE - offset_in_page(offset);
+
+ while (len > max) {
+ memset(kaddr, 0, max);
+ kunmap_local(kaddr);
+ len -= max;
+ offset += max;
+ max = PAGE_SIZE;
+ kaddr = kmap_local_folio(folio, offset);
+ }
+ }
+
+ memset(kaddr, 0, len);
+ flush_dcache_folio(folio);
+
+ return kaddr;
}
+/**
+ * folio_fill_tail - Copy some data to a folio and pad with zeroes.
+ * @folio: The destination folio.
+ * @offset: The offset into @folio at which to start copying.
+ * @from: The data to copy.
+ * @len: How many bytes of data to copy.
+ *
+ * This function is most useful for filesystems which support inline data.
+ * When they want to copy data from the inode into the page cache, this
+ * function does everything for them. It supports large folios even on
+ * HIGHMEM configurations.
+ */
+static inline void folio_fill_tail(struct folio *folio, size_t offset,
+ const char *from, size_t len)
+{
+ char *to = kmap_local_folio(folio, offset);
+
+ VM_BUG_ON(offset + len > folio_size(folio));
+
+ if (folio_test_partial_kmap(folio)) {
+ size_t max = PAGE_SIZE - offset_in_page(offset);
+
+ while (len > max) {
+ memcpy(to, from, max);
+ kunmap_local(to);
+ len -= max;
+ from += max;
+ offset += max;
+ max = PAGE_SIZE;
+ to = kmap_local_folio(folio, offset);
+ }
+ }
+
+ memcpy(to, from, len);
+ to = folio_zero_tail(folio, offset + len, to + len);
+ kunmap_local(to);
+}
+
+/**
+ * memcpy_from_file_folio - Copy some bytes from a file folio.
+ * @to: The destination buffer.
+ * @folio: The folio to copy from.
+ * @pos: The position in the file.
+ * @len: The maximum number of bytes to copy.
+ *
+ * Copy up to @len bytes from this folio. This may be limited by PAGE_SIZE
+ * if the folio comes from HIGHMEM, and by the size of the folio.
+ *
+ * Return: The number of bytes copied from the folio.
+ */
+static inline size_t memcpy_from_file_folio(char *to, struct folio *folio,
+ loff_t pos, size_t len)
+{
+ size_t offset = offset_in_folio(folio, pos);
+ char *from = kmap_local_folio(folio, offset);
+
+ if (folio_test_partial_kmap(folio)) {
+ offset = offset_in_page(offset);
+ len = min_t(size_t, len, PAGE_SIZE - offset);
+ } else
+ len = min(len, folio_size(folio) - offset);
+
+ memcpy(to, from, len);
+ kunmap_local(from);
+
+ return len;
+}
+
+/**
+ * folio_zero_segments() - Zero two byte ranges in a folio.
+ * @folio: The folio to write to.
+ * @start1: The first byte to zero.
+ * @xend1: One more than the last byte in the first range.
+ * @start2: The first byte to zero in the second range.
+ * @xend2: One more than the last byte in the second range.
+ */
+static inline void folio_zero_segments(struct folio *folio,
+ size_t start1, size_t xend1, size_t start2, size_t xend2)
+{
+ zero_user_segments(&folio->page, start1, xend1, start2, xend2);
+}
+
+/**
+ * folio_zero_segment() - Zero a byte range in a folio.
+ * @folio: The folio to write to.
+ * @start: The first byte to zero.
+ * @xend: One more than the last byte to zero.
+ */
+static inline void folio_zero_segment(struct folio *folio,
+ size_t start, size_t xend)
+{
+ zero_user_segments(&folio->page, start, xend, 0, 0);
+}
+
+/**
+ * folio_zero_range() - Zero a byte range in a folio.
+ * @folio: The folio to write to.
+ * @start: The first byte to zero.
+ * @length: The number of bytes to zero.
+ */
+static inline void folio_zero_range(struct folio *folio,
+ size_t start, size_t length)
+{
+ zero_user_segments(&folio->page, start, start + length, 0, 0);
+}
+
+/**
+ * folio_release_kmap - Unmap a folio and drop a refcount.
+ * @folio: The folio to release.
+ * @addr: The address previously returned by a call to kmap_local_folio().
+ *
+ * It is common, eg in directory handling to kmap a folio. This function
+ * unmaps the folio and drops the refcount that was being held to keep the
+ * folio alive while we accessed it.
+ */
+static inline void folio_release_kmap(struct folio *folio, void *addr)
+{
+ kunmap_local(addr);
+ folio_put(folio);
+}
#endif /* _LINUX_HIGHMEM_H */
diff --git a/include/linux/hippidevice.h b/include/linux/hippidevice.h
index 9dc01f7ab5b4..07414c241e65 100644
--- a/include/linux/hippidevice.h
+++ b/include/linux/hippidevice.h
@@ -23,6 +23,10 @@
#ifdef __KERNEL__
+struct neigh_parms;
+struct net_device;
+struct sk_buff;
+
struct hippi_cb {
__u32 ifield;
};
diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
new file mode 100644
index 000000000000..ca1ec437a3ca
--- /dev/null
+++ b/include/linux/hisi_acc_qm.h
@@ -0,0 +1,604 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 HiSilicon Limited. */
+#ifndef HISI_ACC_QM_H
+#define HISI_ACC_QM_H
+
+#include <linux/bitfield.h>
+#include <linux/debugfs.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#define QM_QNUM_V1 4096
+#define QM_QNUM_V2 1024
+#define QM_MAX_VFS_NUM_V2 63
+
+/* qm user domain */
+#define QM_ARUSER_M_CFG_1 0x100088
+#define AXUSER_SNOOP_ENABLE BIT(30)
+#define AXUSER_CMD_TYPE GENMASK(14, 12)
+#define AXUSER_CMD_SMMU_NORMAL 1
+#define AXUSER_NS BIT(6)
+#define AXUSER_NO BIT(5)
+#define AXUSER_FP BIT(4)
+#define AXUSER_SSV BIT(0)
+#define AXUSER_BASE (AXUSER_SNOOP_ENABLE | \
+ FIELD_PREP(AXUSER_CMD_TYPE, \
+ AXUSER_CMD_SMMU_NORMAL) | \
+ AXUSER_NS | AXUSER_NO | AXUSER_FP)
+#define QM_ARUSER_M_CFG_ENABLE 0x100090
+#define ARUSER_M_CFG_ENABLE 0xfffffffe
+#define QM_AWUSER_M_CFG_1 0x100098
+#define QM_AWUSER_M_CFG_ENABLE 0x1000a0
+#define AWUSER_M_CFG_ENABLE 0xfffffffe
+#define QM_WUSER_M_CFG_ENABLE 0x1000a8
+#define WUSER_M_CFG_ENABLE 0xffffffff
+
+/* mailbox */
+#define QM_MB_CMD_SQC 0x0
+#define QM_MB_CMD_CQC 0x1
+#define QM_MB_CMD_EQC 0x2
+#define QM_MB_CMD_AEQC 0x3
+#define QM_MB_CMD_SQC_BT 0x4
+#define QM_MB_CMD_CQC_BT 0x5
+#define QM_MB_CMD_SQC_VFT_V2 0x6
+#define QM_MB_CMD_STOP_QP 0x8
+#define QM_MB_CMD_FLUSH_QM 0x9
+#define QM_MB_CMD_SRC 0xc
+#define QM_MB_CMD_DST 0xd
+
+#define QM_MB_CMD_SEND_BASE 0x300
+#define QM_MB_EVENT_SHIFT 8
+#define QM_MB_BUSY_SHIFT 13
+#define QM_MB_OP_SHIFT 14
+#define QM_MB_CMD_DATA_ADDR_L 0x304
+#define QM_MB_CMD_DATA_ADDR_H 0x308
+#define QM_MB_MAX_WAIT_CNT 6000
+
+/* doorbell */
+#define QM_DOORBELL_CMD_SQ 0
+#define QM_DOORBELL_CMD_CQ 1
+#define QM_DOORBELL_CMD_EQ 2
+#define QM_DOORBELL_CMD_AEQ 3
+
+#define QM_DOORBELL_SQ_CQ_BASE_V2 0x1000
+#define QM_DOORBELL_EQ_AEQ_BASE_V2 0x2000
+#define QM_QP_MAX_NUM_SHIFT 11
+#define QM_DB_CMD_SHIFT_V2 12
+#define QM_DB_RAND_SHIFT_V2 16
+#define QM_DB_INDEX_SHIFT_V2 32
+#define QM_DB_PRIORITY_SHIFT_V2 48
+#define QM_VF_STATE 0x60
+
+/* qm cache */
+#define QM_CACHE_CTL 0x100050
+#define SQC_CACHE_ENABLE BIT(0)
+#define CQC_CACHE_ENABLE BIT(1)
+#define SQC_CACHE_WB_ENABLE BIT(4)
+#define SQC_CACHE_WB_THRD GENMASK(10, 5)
+#define CQC_CACHE_WB_ENABLE BIT(11)
+#define CQC_CACHE_WB_THRD GENMASK(17, 12)
+#define QM_AXI_M_CFG 0x1000ac
+#define AXI_M_CFG 0xffff
+#define QM_AXI_M_CFG_ENABLE 0x1000b0
+#define AM_CFG_SINGLE_PORT_MAX_TRANS 0x300014
+#define AXI_M_CFG_ENABLE 0xffffffff
+#define QM_PEH_AXUSER_CFG 0x1000cc
+#define QM_PEH_AXUSER_CFG_ENABLE 0x1000d0
+#define PEH_AXUSER_CFG 0x401001
+#define PEH_AXUSER_CFG_ENABLE 0xffffffff
+
+#define QM_MIN_QNUM 2
+#define HISI_ACC_SGL_SGE_NR_MAX 255
+#define QM_SHAPER_CFG 0x100164
+#define QM_SHAPER_ENABLE BIT(30)
+#define QM_SHAPER_TYPE1_OFFSET 10
+
+/* page number for queue file region */
+#define QM_DOORBELL_PAGE_NR 1
+
+#define QM_DEV_ALG_MAX_LEN 256
+
+#define QM_MIG_REGION_SEL 0x100198
+#define QM_MIG_REGION_EN BIT(0)
+
+/* uacce mode of the driver */
+#define UACCE_MODE_NOUACCE 0 /* don't use uacce */
+#define UACCE_MODE_SVA 1 /* use uacce sva mode */
+#define UACCE_MODE_DESC "0(default) means only register to crypto, 1 means both register to crypto and uacce"
+
+#define QM_ECC_MBIT BIT(2)
+
+enum qm_stop_reason {
+ QM_NORMAL,
+ QM_SOFT_RESET,
+ QM_DOWN,
+};
+
+enum qm_state {
+ QM_WORK = 0,
+ QM_STOP,
+};
+
+enum qp_state {
+ QP_START = 1,
+ QP_STOP,
+};
+
+enum qm_hw_ver {
+ QM_HW_V1 = 0x20,
+ QM_HW_V2 = 0x21,
+ QM_HW_V3 = 0x30,
+ QM_HW_V4 = 0x50,
+ QM_HW_V5 = 0x51,
+};
+
+enum qm_fun_type {
+ QM_HW_PF,
+ QM_HW_VF,
+};
+
+enum qm_debug_file {
+ CURRENT_QM,
+ CURRENT_Q,
+ CLEAR_ENABLE,
+ DEBUG_FILE_NUM,
+};
+
+enum qm_vf_state {
+ QM_READY = 0,
+ QM_NOT_READY,
+};
+
+enum qm_misc_ctl_bits {
+ QM_DRIVER_REMOVING = 0x0,
+ QM_RST_SCHED,
+ QM_RESETTING,
+ QM_MODULE_PARAM,
+};
+
+enum qm_cap_bits {
+ QM_SUPPORT_DB_ISOLATION = 0x0,
+ QM_SUPPORT_FUNC_QOS,
+ QM_SUPPORT_STOP_QP,
+ QM_SUPPORT_STOP_FUNC,
+ QM_SUPPORT_MB_COMMAND,
+ QM_SUPPORT_SVA_PREFETCH,
+ QM_SUPPORT_RPM,
+ QM_SUPPORT_DAE,
+};
+
+struct qm_dev_alg {
+ u64 alg_msk;
+ const char *alg;
+};
+
+struct qm_dev_dfx {
+ u32 dev_state;
+ u32 dev_timeout;
+};
+
+struct dfx_diff_registers {
+ u32 *regs;
+ u32 reg_offset;
+ u32 reg_len;
+};
+
+struct qm_dfx {
+ atomic64_t err_irq_cnt;
+ atomic64_t aeq_irq_cnt;
+ atomic64_t abnormal_irq_cnt;
+ atomic64_t create_qp_err_cnt;
+ atomic64_t mb_err_cnt;
+};
+
+struct debugfs_file {
+ enum qm_debug_file index;
+ struct mutex lock;
+ struct qm_debug *debug;
+};
+
+struct qm_debug {
+ u32 curr_qm_qp_num;
+ u32 sqe_mask_offset;
+ u32 sqe_mask_len;
+ struct qm_dfx dfx;
+ struct dentry *debug_root;
+ struct dentry *qm_d;
+ struct debugfs_file files[DEBUG_FILE_NUM];
+ struct qm_dev_dfx dev_dfx;
+ unsigned int *qm_last_words;
+ /* ACC engines recoreding last regs */
+ unsigned int *last_words;
+ struct dfx_diff_registers *qm_diff_regs;
+ struct dfx_diff_registers *acc_diff_regs;
+};
+
+struct qm_shaper_factor {
+ u32 func_qos;
+ u64 cir_b;
+ u64 cir_u;
+ u64 cir_s;
+ u64 cbs_s;
+};
+
+struct qm_dma {
+ void *va;
+ dma_addr_t dma;
+ size_t size;
+};
+
+struct hisi_qm_status {
+ u32 eq_head;
+ bool eqc_phase;
+ u32 aeq_head;
+ bool aeqc_phase;
+ atomic_t flags;
+ int stop_reason;
+};
+
+struct hisi_qm;
+
+enum acc_err_result {
+ ACC_ERR_NONE,
+ ACC_ERR_NEED_RESET,
+ ACC_ERR_RECOVERED,
+};
+
+struct hisi_qm_err_mask {
+ u32 ecc_2bits_mask;
+ u32 shutdown_mask;
+ u32 reset_mask;
+ u32 ce;
+ u32 nfe;
+ u32 fe;
+};
+
+struct hisi_qm_err_info {
+ char *acpi_rst;
+ u32 msi_wr_port;
+ struct hisi_qm_err_mask qm_err;
+ struct hisi_qm_err_mask dev_err;
+};
+
+struct hisi_qm_err_status {
+ u32 is_qm_ecc_mbit;
+ u32 is_dev_ecc_mbit;
+};
+
+struct hisi_qm_err_ini {
+ int (*hw_init)(struct hisi_qm *qm);
+ void (*hw_err_enable)(struct hisi_qm *qm);
+ void (*hw_err_disable)(struct hisi_qm *qm);
+ u32 (*get_dev_hw_err_status)(struct hisi_qm *qm);
+ void (*clear_dev_hw_err_status)(struct hisi_qm *qm, u32 err_sts);
+ void (*open_axi_master_ooo)(struct hisi_qm *qm);
+ void (*close_axi_master_ooo)(struct hisi_qm *qm);
+ void (*open_sva_prefetch)(struct hisi_qm *qm);
+ void (*close_sva_prefetch)(struct hisi_qm *qm);
+ void (*show_last_dfx_regs)(struct hisi_qm *qm);
+ void (*err_info_init)(struct hisi_qm *qm);
+ enum acc_err_result (*get_err_result)(struct hisi_qm *qm);
+ bool (*dev_is_abnormal)(struct hisi_qm *qm);
+ int (*set_priv_status)(struct hisi_qm *qm);
+ void (*disable_axi_error)(struct hisi_qm *qm);
+ void (*enable_axi_error)(struct hisi_qm *qm);
+};
+
+struct hisi_qm_cap_info {
+ u32 type;
+ /* Register offset */
+ u32 offset;
+ /* Bit offset in register */
+ u32 shift;
+ u32 mask;
+ u32 v1_val;
+ u32 v2_val;
+ u32 v3_val;
+};
+
+struct hisi_qm_cap_query_info {
+ u32 type;
+ const char *name;
+ u32 offset;
+ u32 v1_val;
+ u32 v2_val;
+ u32 v3_val;
+};
+
+struct hisi_qm_cap_record {
+ u32 type;
+ const char *name;
+ u32 cap_val;
+};
+
+struct hisi_qm_cap_tables {
+ u32 qm_cap_size;
+ struct hisi_qm_cap_record *qm_cap_table;
+ u32 dev_cap_size;
+ struct hisi_qm_cap_record *dev_cap_table;
+};
+
+struct hisi_qm_list {
+ struct mutex lock;
+ struct list_head list;
+ int (*register_to_crypto)(struct hisi_qm *qm);
+ void (*unregister_from_crypto)(struct hisi_qm *qm);
+};
+
+struct hisi_qm_poll_data {
+ struct hisi_qm *qm;
+ struct work_struct work;
+ u16 *qp_finish_id;
+ u16 eqe_num;
+};
+
+/**
+ * struct qm_err_isolate
+ * @isolate_lock: protects device error log
+ * @err_threshold: user config error threshold which triggers isolation
+ * @is_isolate: device isolation state
+ * @uacce_hw_errs: index into qm device error list
+ */
+struct qm_err_isolate {
+ struct mutex isolate_lock;
+ u32 err_threshold;
+ bool is_isolate;
+ struct list_head qm_hw_errs;
+};
+
+struct qm_rsv_buf {
+ struct qm_sqc *sqc;
+ struct qm_cqc *cqc;
+ struct qm_eqc *eqc;
+ struct qm_aeqc *aeqc;
+ dma_addr_t sqc_dma;
+ dma_addr_t cqc_dma;
+ dma_addr_t eqc_dma;
+ dma_addr_t aeqc_dma;
+ struct qm_dma qcdma;
+};
+
+struct hisi_qm {
+ enum qm_hw_ver ver;
+ enum qm_fun_type fun_type;
+ const char *dev_name;
+ struct pci_dev *pdev;
+ void __iomem *io_base;
+ void __iomem *db_io_base;
+
+ /* Capbility version, 0: not supports */
+ u32 cap_ver;
+ u32 sqe_size;
+ u32 qp_base;
+ u32 qp_num;
+ u32 qp_in_used;
+ u32 ctrl_qp_num;
+ u32 max_qp_num;
+ u32 vfs_num;
+ u32 db_interval;
+ u16 eq_depth;
+ u16 aeq_depth;
+ struct list_head list;
+ struct hisi_qm_list *qm_list;
+
+ struct qm_dma qdma;
+ struct qm_sqc *sqc;
+ struct qm_cqc *cqc;
+ struct qm_eqe *eqe;
+ struct qm_aeqe *aeqe;
+ dma_addr_t sqc_dma;
+ dma_addr_t cqc_dma;
+ dma_addr_t eqe_dma;
+ dma_addr_t aeqe_dma;
+ struct qm_rsv_buf xqc_buf;
+
+ struct hisi_qm_status status;
+ const struct hisi_qm_err_ini *err_ini;
+ struct hisi_qm_err_info err_info;
+ struct hisi_qm_err_status err_status;
+ /* driver removing and reset sched */
+ unsigned long misc_ctl;
+ /* Device capability bit */
+ unsigned long caps;
+
+ struct rw_semaphore qps_lock;
+ struct idr qp_idr;
+ struct hisi_qp *qp_array;
+ struct hisi_qm_poll_data *poll_data;
+
+ struct mutex mailbox_lock;
+
+ struct mutex ifc_lock;
+
+ const struct hisi_qm_hw_ops *ops;
+
+ struct qm_debug debug;
+
+ u32 error_mask;
+
+ struct workqueue_struct *wq;
+ struct work_struct rst_work;
+ struct work_struct cmd_process;
+
+ bool use_sva;
+
+ resource_size_t phys_base;
+ resource_size_t db_phys_base;
+ struct uacce_device *uacce;
+ int mode;
+ struct qm_shaper_factor *factor;
+ u32 mb_qos;
+ u32 type_rate;
+ struct qm_err_isolate isolate_data;
+
+ struct hisi_qm_cap_tables cap_tables;
+};
+
+struct hisi_qp_status {
+ atomic_t used;
+ u16 sq_tail;
+ u16 cq_head;
+ bool cqc_phase;
+ atomic_t flags;
+};
+
+struct hisi_qp_ops {
+ int (*fill_sqe)(void *sqe, void *q_parm, void *d_parm);
+};
+
+struct hisi_qp {
+ u32 qp_id;
+ u16 sq_depth;
+ u16 cq_depth;
+ u8 alg_type;
+ u8 req_type;
+
+ struct qm_dma qdma;
+ void *sqe;
+ struct qm_cqe *cqe;
+ dma_addr_t sqe_dma;
+ dma_addr_t cqe_dma;
+
+ struct hisi_qp_status qp_status;
+ struct hisi_qp_ops *hw_ops;
+ void *qp_ctx;
+ void (*req_cb)(struct hisi_qp *qp, void *data);
+ void (*event_cb)(struct hisi_qp *qp);
+
+ struct hisi_qm *qm;
+ bool is_resetting;
+ bool is_in_kernel;
+ u16 pasid;
+ struct uacce_queue *uacce_q;
+};
+
+static inline int vfs_num_set(const char *val, const struct kernel_param *kp)
+{
+ u32 n;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret < 0)
+ return ret;
+
+ if (n > QM_MAX_VFS_NUM_V2)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static inline int mode_set(const char *val, const struct kernel_param *kp)
+{
+ u32 n;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret != 0 || (n != UACCE_MODE_SVA &&
+ n != UACCE_MODE_NOUACCE))
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static inline int uacce_mode_set(const char *val, const struct kernel_param *kp)
+{
+ return mode_set(val, kp);
+}
+
+static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list)
+{
+ INIT_LIST_HEAD(&qm_list->list);
+ mutex_init(&qm_list->lock);
+}
+
+static inline void hisi_qm_add_list(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
+{
+ mutex_lock(&qm_list->lock);
+ list_add_tail(&qm->list, &qm_list->list);
+ mutex_unlock(&qm_list->lock);
+}
+
+static inline void hisi_qm_del_list(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
+{
+ mutex_lock(&qm_list->lock);
+ list_del(&qm->list);
+ mutex_unlock(&qm_list->lock);
+}
+
+int hisi_qm_q_num_set(const char *val, const struct kernel_param *kp,
+ unsigned int device);
+int hisi_qm_init(struct hisi_qm *qm);
+void hisi_qm_uninit(struct hisi_qm *qm);
+int hisi_qm_start(struct hisi_qm *qm);
+int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r);
+int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg);
+void hisi_qm_stop_qp(struct hisi_qp *qp);
+int hisi_qp_send(struct hisi_qp *qp, const void *msg);
+void hisi_qm_debug_init(struct hisi_qm *qm);
+void hisi_qm_debug_regs_clear(struct hisi_qm *qm);
+int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs);
+int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen);
+int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs);
+void hisi_qm_dev_err_init(struct hisi_qm *qm);
+void hisi_qm_dev_err_uninit(struct hisi_qm *qm);
+int hisi_qm_regs_debugfs_init(struct hisi_qm *qm,
+ struct dfx_diff_registers *dregs, u32 reg_len);
+void hisi_qm_regs_debugfs_uninit(struct hisi_qm *qm, u32 reg_len);
+void hisi_qm_acc_diff_regs_dump(struct hisi_qm *qm, struct seq_file *s,
+ struct dfx_diff_registers *dregs, u32 regs_len);
+
+pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state);
+pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev);
+void hisi_qm_reset_prepare(struct pci_dev *pdev);
+void hisi_qm_reset_done(struct pci_dev *pdev);
+
+int hisi_qm_wait_mb_ready(struct hisi_qm *qm);
+int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
+ bool op);
+
+struct hisi_acc_sgl_pool;
+struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
+ struct scatterlist *sgl, struct hisi_acc_sgl_pool *pool,
+ u32 index, dma_addr_t *hw_sgl_dma, enum dma_data_direction dir);
+void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
+ struct hisi_acc_hw_sgl *hw_sgl, enum dma_data_direction dir);
+struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
+ u32 count, u32 sge_nr);
+void hisi_acc_free_sgl_pool(struct device *dev,
+ struct hisi_acc_sgl_pool *pool);
+int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
+ u8 alg_type, int node, struct hisi_qp **qps);
+void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num);
+void hisi_qm_dev_shutdown(struct pci_dev *pdev);
+void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
+int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard);
+void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard);
+int hisi_qm_resume(struct device *dev);
+int hisi_qm_suspend(struct device *dev);
+void hisi_qm_pm_uninit(struct hisi_qm *qm);
+void hisi_qm_pm_init(struct hisi_qm *qm);
+int hisi_qm_get_dfx_access(struct hisi_qm *qm);
+void hisi_qm_put_dfx_access(struct hisi_qm *qm);
+void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset);
+u32 hisi_qm_get_hw_info(struct hisi_qm *qm,
+ const struct hisi_qm_cap_info *info_table,
+ u32 index, bool is_read);
+u32 hisi_qm_get_cap_value(struct hisi_qm *qm,
+ const struct hisi_qm_cap_query_info *info_table,
+ u32 index, bool is_read);
+int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *dev_algs,
+ u32 dev_algs_size);
+
+/* Used by VFIO ACC live migration driver */
+struct pci_driver *hisi_sec_get_pf_driver(void);
+struct pci_driver *hisi_hpre_get_pf_driver(void);
+struct pci_driver *hisi_zip_get_pf_driver(void);
+#endif
diff --git a/include/linux/hmm-dma.h b/include/linux/hmm-dma.h
new file mode 100644
index 000000000000..f58b9fc71999
--- /dev/null
+++ b/include/linux/hmm-dma.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (c) 2024 NVIDIA Corporation & Affiliates */
+#ifndef LINUX_HMM_DMA_H
+#define LINUX_HMM_DMA_H
+
+#include <linux/dma-mapping.h>
+
+struct dma_iova_state;
+struct pci_p2pdma_map_state;
+
+/*
+ * struct hmm_dma_map - array of PFNs and DMA addresses
+ *
+ * @state: DMA IOVA state
+ * @pfns: array of PFNs
+ * @dma_list: array of DMA addresses
+ * @dma_entry_size: size of each DMA entry in the array
+ */
+struct hmm_dma_map {
+ struct dma_iova_state state;
+ unsigned long *pfn_list;
+ dma_addr_t *dma_list;
+ size_t dma_entry_size;
+};
+
+int hmm_dma_map_alloc(struct device *dev, struct hmm_dma_map *map,
+ size_t nr_entries, size_t dma_entry_size);
+void hmm_dma_map_free(struct device *dev, struct hmm_dma_map *map);
+dma_addr_t hmm_dma_map_pfn(struct device *dev, struct hmm_dma_map *map,
+ size_t idx,
+ struct pci_p2pdma_map_state *p2pdma_state);
+bool hmm_dma_unmap_pfn(struct device *dev, struct hmm_dma_map *map, size_t idx);
+#endif /* LINUX_HMM_DMA_H */
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index 866a0fa104c4..db75ffc949a7 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -4,19 +4,14 @@
*
* Authors: Jérôme Glisse <jglisse@redhat.com>
*
- * See Documentation/vm/hmm.rst for reasons and overview of what HMM is.
+ * See Documentation/mm/hmm.rst for reasons and overview of what HMM is.
*/
#ifndef LINUX_HMM_H
#define LINUX_HMM_H
-#include <linux/kconfig.h>
-#include <linux/pgtable.h>
+#include <linux/mm.h>
-#include <linux/device.h>
-#include <linux/migrate.h>
-#include <linux/memremap.h>
-#include <linux/completion.h>
-#include <linux/mmu_notifier.h>
+struct mmu_interval_notifier;
/*
* On output:
@@ -28,6 +23,10 @@
* HMM_PFN_WRITE - if the page memory can be written to (requires HMM_PFN_VALID)
* HMM_PFN_ERROR - accessing the pfn is impossible and the device should
* fail. ie poisoned memory, special pages, no vma, etc
+ * HMM_PFN_P2PDMA - P2P page
+ * HMM_PFN_P2PDMA_BUS - Bus mapped P2P transfer
+ * HMM_PFN_DMA_MAPPED - Flag preserved on input-to-output transformation
+ * to mark that page is already DMA mapped
*
* On input:
* 0 - Return the current state of the page, do not fault it.
@@ -41,13 +40,21 @@ enum hmm_pfn_flags {
HMM_PFN_VALID = 1UL << (BITS_PER_LONG - 1),
HMM_PFN_WRITE = 1UL << (BITS_PER_LONG - 2),
HMM_PFN_ERROR = 1UL << (BITS_PER_LONG - 3),
- HMM_PFN_ORDER_SHIFT = (BITS_PER_LONG - 8),
+ /*
+ * Sticky flags, carried from input to output,
+ * don't forget to update HMM_PFN_INOUT_FLAGS
+ */
+ HMM_PFN_DMA_MAPPED = 1UL << (BITS_PER_LONG - 4),
+ HMM_PFN_P2PDMA = 1UL << (BITS_PER_LONG - 5),
+ HMM_PFN_P2PDMA_BUS = 1UL << (BITS_PER_LONG - 6),
+
+ HMM_PFN_ORDER_SHIFT = (BITS_PER_LONG - 11),
/* Input flags */
HMM_PFN_REQ_FAULT = HMM_PFN_VALID,
HMM_PFN_REQ_WRITE = HMM_PFN_WRITE,
- HMM_PFN_FLAGS = 0xFFUL << HMM_PFN_ORDER_SHIFT,
+ HMM_PFN_FLAGS = ~((1UL << HMM_PFN_ORDER_SHIFT) - 1),
};
/*
@@ -63,6 +70,14 @@ static inline struct page *hmm_pfn_to_page(unsigned long hmm_pfn)
}
/*
+ * hmm_pfn_to_phys() - return physical address pointed to by a device entry
+ */
+static inline phys_addr_t hmm_pfn_to_phys(unsigned long hmm_pfn)
+{
+ return __pfn_to_phys(hmm_pfn & ~HMM_PFN_FLAGS);
+}
+
+/*
* hmm_pfn_to_map_order() - return the CPU mapping size order
*
* This is optionally useful to optimize processing of the pfn result
@@ -105,7 +120,7 @@ struct hmm_range {
};
/*
- * Please see Documentation/vm/hmm.rst for how to use the range API.
+ * Please see Documentation/mm/hmm.rst for how to use the range API.
*/
int hmm_range_fault(struct hmm_range *range);
@@ -113,7 +128,7 @@ int hmm_range_fault(struct hmm_range *range);
* HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range
*
* When waiting for mmu notifiers we need some kind of time out otherwise we
- * could potentialy wait for ever, 1000ms ie 1s sounds like a long time to
+ * could potentially wait for ever, 1000ms ie 1s sounds like a long time to
* wait already.
*/
#define HMM_RANGE_DEFAULT_TIMEOUT 1000
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index 232e1bd507a7..9fa9c30a34e6 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -7,14 +7,24 @@
#define __LINUX_HOST1X_H
#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-fence.h>
+#include <linux/spinlock.h>
#include <linux/types.h>
enum host1x_class {
HOST1X_CLASS_HOST1X = 0x1,
+ HOST1X_CLASS_NVJPG1 = 0x7,
+ HOST1X_CLASS_NVENC = 0x21,
+ HOST1X_CLASS_NVENC1 = 0x22,
HOST1X_CLASS_GR2D = 0x51,
HOST1X_CLASS_GR2D_SB = 0x52,
HOST1X_CLASS_VIC = 0x5D,
HOST1X_CLASS_GR3D = 0x60,
+ HOST1X_CLASS_NVJPG = 0xC0,
+ HOST1X_CLASS_NVDEC = 0xF0,
+ HOST1X_CLASS_NVDEC1 = 0xF5,
+ HOST1X_CLASS_OFA = 0xF8,
};
struct host1x;
@@ -24,6 +34,33 @@ struct iommu_group;
u64 host1x_get_dma_mask(struct host1x *host1x);
/**
+ * struct host1x_bo_cache - host1x buffer object cache
+ * @mappings: list of mappings
+ * @lock: synchronizes accesses to the list of mappings
+ *
+ * Note that entries are not periodically evicted from this cache and instead need to be
+ * explicitly released. This is used primarily for DRM/KMS where the cache's reference is
+ * released when the last reference to a buffer object represented by a mapping in this
+ * cache is dropped.
+ */
+struct host1x_bo_cache {
+ struct list_head mappings;
+ struct mutex lock;
+};
+
+static inline void host1x_bo_cache_init(struct host1x_bo_cache *cache)
+{
+ INIT_LIST_HEAD(&cache->mappings);
+ mutex_init(&cache->lock);
+}
+
+static inline void host1x_bo_cache_destroy(struct host1x_bo_cache *cache)
+{
+ /* XXX warn if not empty? */
+ mutex_destroy(&cache->lock);
+}
+
+/**
* struct host1x_client_ops - host1x client operations
* @early_init: host1x client early initialization code
* @init: host1x client initialization code
@@ -55,6 +92,7 @@ struct host1x_client_ops {
* @parent: pointer to parent structure
* @usecount: reference count for this structure
* @lock: mutex for mutually exclusive concurrency
+ * @cache: host1x buffer object cache
*/
struct host1x_client {
struct list_head list;
@@ -73,6 +111,8 @@ struct host1x_client {
struct host1x_client *parent;
unsigned int usecount;
struct mutex lock;
+
+ struct host1x_bo_cache cache;
};
/*
@@ -82,23 +122,48 @@ struct host1x_client {
struct host1x_bo;
struct sg_table;
+struct host1x_bo_mapping {
+ struct kref ref;
+ struct dma_buf_attachment *attach;
+ enum dma_data_direction direction;
+ struct list_head list;
+ struct host1x_bo *bo;
+ struct sg_table *sgt;
+ unsigned int chunks;
+ struct device *dev;
+ dma_addr_t phys;
+ size_t size;
+
+ struct host1x_bo_cache *cache;
+ struct list_head entry;
+};
+
+static inline struct host1x_bo_mapping *to_host1x_bo_mapping(struct kref *ref)
+{
+ return container_of(ref, struct host1x_bo_mapping, ref);
+}
+
struct host1x_bo_ops {
struct host1x_bo *(*get)(struct host1x_bo *bo);
void (*put)(struct host1x_bo *bo);
- struct sg_table *(*pin)(struct device *dev, struct host1x_bo *bo,
- dma_addr_t *phys);
- void (*unpin)(struct device *dev, struct sg_table *sgt);
+ struct host1x_bo_mapping *(*pin)(struct device *dev, struct host1x_bo *bo,
+ enum dma_data_direction dir);
+ void (*unpin)(struct host1x_bo_mapping *map);
void *(*mmap)(struct host1x_bo *bo);
void (*munmap)(struct host1x_bo *bo, void *addr);
};
struct host1x_bo {
const struct host1x_bo_ops *ops;
+ struct list_head mappings;
+ spinlock_t lock;
};
static inline void host1x_bo_init(struct host1x_bo *bo,
const struct host1x_bo_ops *ops)
{
+ INIT_LIST_HEAD(&bo->mappings);
+ spin_lock_init(&bo->lock);
bo->ops = ops;
}
@@ -112,18 +177,10 @@ static inline void host1x_bo_put(struct host1x_bo *bo)
bo->ops->put(bo);
}
-static inline struct sg_table *host1x_bo_pin(struct device *dev,
- struct host1x_bo *bo,
- dma_addr_t *phys)
-{
- return bo->ops->pin(dev, bo, phys);
-}
-
-static inline void host1x_bo_unpin(struct device *dev, struct host1x_bo *bo,
- struct sg_table *sgt)
-{
- bo->ops->unpin(dev, sgt);
-}
+struct host1x_bo_mapping *host1x_bo_pin(struct device *dev, struct host1x_bo *bo,
+ enum dma_data_direction dir,
+ struct host1x_bo_cache *cache);
+void host1x_bo_unpin(struct host1x_bo_mapping *map);
static inline void *host1x_bo_mmap(struct host1x_bo *bo)
{
@@ -170,6 +227,10 @@ u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
void host1x_syncpt_release_vblank_reservation(struct host1x_client *client,
u32 syncpt_id);
+struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold,
+ bool timeout);
+void host1x_fence_cancel(struct dma_fence *fence);
+
/*
* host1x channel
*/
@@ -179,6 +240,7 @@ struct host1x_job;
struct host1x_channel *host1x_channel_request(struct host1x_client *client);
struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
+void host1x_channel_stop(struct host1x_channel *channel);
void host1x_channel_put(struct host1x_channel *channel);
int host1x_job_submit(struct host1x_job *job);
@@ -216,8 +278,8 @@ struct host1x_job {
struct host1x_client *client;
/* Gathers and their memory */
- struct host1x_job_gather *gathers;
- unsigned int num_gathers;
+ struct host1x_job_cmd *cmds;
+ unsigned int num_cmds;
/* Array of handles to be pinned & unpinned */
struct host1x_reloc *relocs;
@@ -234,9 +296,16 @@ struct host1x_job {
u32 syncpt_incrs;
u32 syncpt_end;
+ /* Completion fence for job tracking */
+ struct dma_fence *fence;
+ struct dma_fence_cb fence_cb;
+
/* Maximum time to wait for this job */
unsigned int timeout;
+ /* Job has timed out and should be released */
+ bool cancelled;
+
/* Index and number of slots used in the push buffer */
unsigned int first_get;
unsigned int num_slots;
@@ -257,12 +326,33 @@ struct host1x_job {
/* Add a channel wait for previous ops to complete */
bool serialize;
+
+ /* Fast-forward syncpoint increments on job timeout */
+ bool syncpt_recovery;
+
+ /* Callback called when job is freed */
+ void (*release)(struct host1x_job *job);
+ void *user_data;
+
+ /* Whether host1x-side firewall should be ran for this job or not */
+ bool enable_firewall;
+
+ /* Options for configuring engine data stream ID */
+ /* Context device to use for job */
+ struct host1x_memory_context *memory_context;
+ /* Stream ID to use if context isolation is disabled (!memory_context) */
+ u32 engine_fallback_streamid;
+ /* Engine offset to program stream ID to */
+ u32 engine_streamid_offset;
};
struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
- u32 num_cmdbufs, u32 num_relocs);
+ u32 num_cmdbufs, u32 num_relocs,
+ bool skip_firewall);
void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
unsigned int words, unsigned int offset);
+void host1x_job_add_wait(struct host1x_job *job, u32 id, u32 thresh,
+ bool relative, u32 next_class);
struct host1x_job *host1x_job_get(struct host1x_job *job);
void host1x_job_put(struct host1x_job *job);
int host1x_job_pin(struct host1x_job *job, struct device *dev);
@@ -332,15 +422,33 @@ static inline struct host1x_device *to_host1x_device(struct device *dev)
int host1x_device_init(struct host1x_device *device);
int host1x_device_exit(struct host1x_device *device);
-int __host1x_client_register(struct host1x_client *client,
- struct lock_class_key *key);
-#define host1x_client_register(class) \
- ({ \
- static struct lock_class_key __key; \
- __host1x_client_register(class, &__key); \
+void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key);
+void host1x_client_exit(struct host1x_client *client);
+
+#define host1x_client_init(client) \
+ ({ \
+ static struct lock_class_key __key; \
+ __host1x_client_init(client, &__key); \
+ })
+
+int __host1x_client_register(struct host1x_client *client);
+
+/*
+ * Note that this wrapper calls __host1x_client_init() for compatibility
+ * with existing callers. Callers that want to separately initialize and
+ * register a host1x client must first initialize using either of the
+ * __host1x_client_init() or host1x_client_init() functions and then use
+ * the low-level __host1x_client_register() function to avoid the client
+ * getting reinitialized.
+ */
+#define host1x_client_register(client) \
+ ({ \
+ static struct lock_class_key __key; \
+ __host1x_client_init(client, &__key); \
+ __host1x_client_register(client); \
})
-int host1x_client_unregister(struct host1x_client *client);
+void host1x_client_unregister(struct host1x_client *client);
int host1x_client_suspend(struct host1x_client *client);
int host1x_client_resume(struct host1x_client *client);
@@ -355,4 +463,41 @@ int tegra_mipi_disable(struct tegra_mipi_device *device);
int tegra_mipi_start_calibration(struct tegra_mipi_device *device);
int tegra_mipi_finish_calibration(struct tegra_mipi_device *device);
+/* host1x memory contexts */
+
+struct host1x_memory_context {
+ struct host1x *host;
+
+ refcount_t ref;
+ struct pid *owner;
+
+ struct device_dma_parameters dma_parms;
+ struct device dev;
+ u64 dma_mask;
+ u32 stream_id;
+};
+
+#ifdef CONFIG_IOMMU_API
+struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x,
+ struct device *dev,
+ struct pid *pid);
+void host1x_memory_context_get(struct host1x_memory_context *cd);
+void host1x_memory_context_put(struct host1x_memory_context *cd);
+#else
+static inline struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x,
+ struct device *dev,
+ struct pid *pid)
+{
+ return NULL;
+}
+
+static inline void host1x_memory_context_get(struct host1x_memory_context *cd)
+{
+}
+
+static inline void host1x_memory_context_put(struct host1x_memory_context *cd)
+{
+}
+#endif
+
#endif
diff --git a/include/linux/host1x_context_bus.h b/include/linux/host1x_context_bus.h
new file mode 100644
index 000000000000..c928cb432680
--- /dev/null
+++ b/include/linux/host1x_context_bus.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2021, NVIDIA Corporation. All rights reserved.
+ */
+
+#ifndef __LINUX_HOST1X_CONTEXT_BUS_H
+#define __LINUX_HOST1X_CONTEXT_BUS_H
+
+#include <linux/device.h>
+
+#ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS
+extern const struct bus_type host1x_context_device_bus_type;
+#endif
+
+#endif
diff --git a/include/linux/hp_sdc.h b/include/linux/hp_sdc.h
index 6f1dee7e67e0..9be8704e2d38 100644
--- a/include/linux/hp_sdc.h
+++ b/include/linux/hp_sdc.h
@@ -180,7 +180,7 @@ switch (val) { \
#define HP_SDC_CMD_SET_IM 0x40 /* 010xxxxx == set irq mask */
-/* The documents provided do not explicitly state that all registers betweem
+/* The documents provided do not explicitly state that all registers between
* 0x01 and 0x1f inclusive can be read by sending their register index as a
* command, but this is implied and appears to be the case.
*/
diff --git a/include/linux/hpet.h b/include/linux/hpet.h
index 8604564b985d..21e69eaf7a36 100644
--- a/include/linux/hpet.h
+++ b/include/linux/hpet.h
@@ -30,7 +30,7 @@ struct hpet {
unsigned long _hpet_compare;
} _u1;
u64 hpet_fsb[2]; /* FSB route */
- } hpet_timers[1];
+ } hpet_timers[];
};
#define hpet_mc _u0._hpet_mc
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index bb5e7b0a4274..2cf1bf65b225 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -13,16 +13,12 @@
#define _LINUX_HRTIMER_H
#include <linux/hrtimer_defs.h>
-#include <linux/rbtree.h>
+#include <linux/hrtimer_types.h>
#include <linux/init.h>
#include <linux/list.h>
-#include <linux/percpu.h>
-#include <linux/seqlock.h>
+#include <linux/percpu-defs.h>
+#include <linux/rbtree.h>
#include <linux/timer.h>
-#include <linux/timerqueue.h>
-
-struct hrtimer_clock_base;
-struct hrtimer_cpu_base;
/*
* Mode arguments of xxx_hrtimer functions:
@@ -60,14 +56,6 @@ enum hrtimer_mode {
};
/*
- * Return values for the callback function
- */
-enum hrtimer_restart {
- HRTIMER_NORESTART, /* Timer is not restarted */
- HRTIMER_RESTART, /* Timer must be restarted */
-};
-
-/*
* Values to track state of the timer
*
* Possible states:
@@ -95,38 +83,6 @@ enum hrtimer_restart {
#define HRTIMER_STATE_ENQUEUED 0x01
/**
- * struct hrtimer - the basic hrtimer structure
- * @node: timerqueue node, which also manages node.expires,
- * the absolute expiry time in the hrtimers internal
- * representation. The time is related to the clock on
- * which the timer is based. Is setup by adding
- * slack to the _softexpires value. For non range timers
- * identical to _softexpires.
- * @_softexpires: the absolute earliest expiry time of the hrtimer.
- * The time which was given as expiry time when the timer
- * was armed.
- * @function: timer expiry callback function
- * @base: pointer to the timer base (per cpu and per clock)
- * @state: state information (See bit values above)
- * @is_rel: Set if the timer was armed relative
- * @is_soft: Set if hrtimer will be expired in soft interrupt context.
- * @is_hard: Set if hrtimer will be expired in hard interrupt context
- * even on RT.
- *
- * The hrtimer structure must be initialized by hrtimer_init()
- */
-struct hrtimer {
- struct timerqueue_node node;
- ktime_t _softexpires;
- enum hrtimer_restart (*function)(struct hrtimer *);
- struct hrtimer_clock_base *base;
- u8 state;
- u8 is_rel;
- u8 is_soft;
- u8 is_hard;
-};
-
-/**
* struct hrtimer_sleeper - simple sleeper structure
* @timer: embedded timer structure
* @task: task to wake up
@@ -138,105 +94,6 @@ struct hrtimer_sleeper {
struct task_struct *task;
};
-#ifdef CONFIG_64BIT
-# define __hrtimer_clock_base_align ____cacheline_aligned
-#else
-# define __hrtimer_clock_base_align
-#endif
-
-/**
- * struct hrtimer_clock_base - the timer base for a specific clock
- * @cpu_base: per cpu clock base
- * @index: clock type index for per_cpu support when moving a
- * timer to a base on another cpu.
- * @clockid: clock id for per_cpu support
- * @seq: seqcount around __run_hrtimer
- * @running: pointer to the currently running hrtimer
- * @active: red black tree root node for the active timers
- * @get_time: function to retrieve the current time of the clock
- * @offset: offset of this clock to the monotonic base
- */
-struct hrtimer_clock_base {
- struct hrtimer_cpu_base *cpu_base;
- unsigned int index;
- clockid_t clockid;
- seqcount_raw_spinlock_t seq;
- struct hrtimer *running;
- struct timerqueue_head active;
- ktime_t (*get_time)(void);
- ktime_t offset;
-} __hrtimer_clock_base_align;
-
-enum hrtimer_base_type {
- HRTIMER_BASE_MONOTONIC,
- HRTIMER_BASE_REALTIME,
- HRTIMER_BASE_BOOTTIME,
- HRTIMER_BASE_TAI,
- HRTIMER_BASE_MONOTONIC_SOFT,
- HRTIMER_BASE_REALTIME_SOFT,
- HRTIMER_BASE_BOOTTIME_SOFT,
- HRTIMER_BASE_TAI_SOFT,
- HRTIMER_MAX_CLOCK_BASES,
-};
-
-/**
- * struct hrtimer_cpu_base - the per cpu clock bases
- * @lock: lock protecting the base and associated clock bases
- * and timers
- * @cpu: cpu number
- * @active_bases: Bitfield to mark bases with active timers
- * @clock_was_set_seq: Sequence counter of clock was set events
- * @hres_active: State of high resolution mode
- * @in_hrtirq: hrtimer_interrupt() is currently executing
- * @hang_detected: The last hrtimer interrupt detected a hang
- * @softirq_activated: displays, if the softirq is raised - update of softirq
- * related settings is not required then.
- * @nr_events: Total number of hrtimer interrupt events
- * @nr_retries: Total number of hrtimer interrupt retries
- * @nr_hangs: Total number of hrtimer interrupt hangs
- * @max_hang_time: Maximum time spent in hrtimer_interrupt
- * @softirq_expiry_lock: Lock which is taken while softirq based hrtimer are
- * expired
- * @timer_waiters: A hrtimer_cancel() invocation waits for the timer
- * callback to finish.
- * @expires_next: absolute time of the next event, is required for remote
- * hrtimer enqueue; it is the total first expiry time (hard
- * and soft hrtimer are taken into account)
- * @next_timer: Pointer to the first expiring timer
- * @softirq_expires_next: Time to check, if soft queues needs also to be expired
- * @softirq_next_timer: Pointer to the first expiring softirq based timer
- * @clock_base: array of clock bases for this cpu
- *
- * Note: next_timer is just an optimization for __remove_hrtimer().
- * Do not dereference the pointer because it is not reliable on
- * cross cpu removals.
- */
-struct hrtimer_cpu_base {
- raw_spinlock_t lock;
- unsigned int cpu;
- unsigned int active_bases;
- unsigned int clock_was_set_seq;
- unsigned int hres_active : 1,
- in_hrtirq : 1,
- hang_detected : 1,
- softirq_activated : 1;
-#ifdef CONFIG_HIGH_RES_TIMERS
- unsigned int nr_events;
- unsigned short nr_retries;
- unsigned short nr_hangs;
- unsigned int max_hang_time;
-#endif
-#ifdef CONFIG_PREEMPT_RT
- spinlock_t softirq_expiry_lock;
- atomic_t timer_waiters;
-#endif
- ktime_t expires_next;
- struct hrtimer *next_timer;
- ktime_t softirq_expires_next;
- struct hrtimer *softirq_next_timer;
- struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
-} ____cacheline_aligned;
-
static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
{
timer->node.expires = time;
@@ -297,14 +154,11 @@ static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer)
return ktime_to_ns(timer->node.expires);
}
-static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
-{
- return ktime_sub(timer->node.expires, timer->base->get_time());
-}
+ktime_t hrtimer_cb_get_time(const struct hrtimer *timer);
-static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
+static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
{
- return timer->base->get_time();
+ return ktime_sub(timer->node.expires, hrtimer_cb_get_time(timer));
}
static inline int hrtimer_is_hres_active(struct hrtimer *timer)
@@ -318,16 +172,12 @@ struct clock_event_device;
extern void hrtimer_interrupt(struct clock_event_device *dev);
-extern void clock_was_set_delayed(void);
-
extern unsigned int hrtimer_resolution;
#else
#define hrtimer_resolution (unsigned int)LOW_RES_NSEC
-static inline void clock_was_set_delayed(void) { }
-
#endif
static inline ktime_t
@@ -347,17 +197,16 @@ __hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now)
static inline ktime_t
hrtimer_expires_remaining_adjusted(const struct hrtimer *timer)
{
- return __hrtimer_expires_remaining_adjusted(timer,
- timer->base->get_time());
+ return __hrtimer_expires_remaining_adjusted(timer, hrtimer_cb_get_time(timer));
}
-extern void clock_was_set(void);
#ifdef CONFIG_TIMERFD
extern void timerfd_clock_was_set(void);
+extern void timerfd_resume(void);
#else
static inline void timerfd_clock_was_set(void) { }
+static inline void timerfd_resume(void) { }
#endif
-extern void hrtimers_resume(void);
DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
@@ -370,37 +219,25 @@ static inline void hrtimer_cancel_wait_running(struct hrtimer *timer)
}
#endif
+static inline enum hrtimer_restart hrtimer_dummy_timeout(struct hrtimer *unused)
+{
+ return HRTIMER_NORESTART;
+}
+
/* Exported timer functions: */
/* Initialize timers: */
-extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock,
- enum hrtimer_mode mode);
-extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id,
- enum hrtimer_mode mode);
+extern void hrtimer_setup(struct hrtimer *timer, enum hrtimer_restart (*function)(struct hrtimer *),
+ clockid_t clock_id, enum hrtimer_mode mode);
+extern void hrtimer_setup_on_stack(struct hrtimer *timer,
+ enum hrtimer_restart (*function)(struct hrtimer *),
+ clockid_t clock_id, enum hrtimer_mode mode);
+extern void hrtimer_setup_sleeper_on_stack(struct hrtimer_sleeper *sl, clockid_t clock_id,
+ enum hrtimer_mode mode);
#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
-extern void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t which_clock,
- enum hrtimer_mode mode);
-extern void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl,
- clockid_t clock_id,
- enum hrtimer_mode mode);
-
extern void destroy_hrtimer_on_stack(struct hrtimer *timer);
#else
-static inline void hrtimer_init_on_stack(struct hrtimer *timer,
- clockid_t which_clock,
- enum hrtimer_mode mode)
-{
- hrtimer_init(timer, which_clock, mode);
-}
-
-static inline void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl,
- clockid_t clock_id,
- enum hrtimer_mode mode)
-{
- hrtimer_init_sleeper(sl, clock_id, mode);
-}
-
static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
#endif
@@ -484,30 +321,45 @@ static inline int hrtimer_callback_running(struct hrtimer *timer)
return timer->base->running == timer;
}
+/**
+ * hrtimer_update_function - Update the timer's callback function
+ * @timer: Timer to update
+ * @function: New callback function
+ *
+ * Only safe to call if the timer is not enqueued. Can be called in the callback function if the
+ * timer is not enqueued at the same time (see the comments above HRTIMER_STATE_ENQUEUED).
+ */
+static inline void hrtimer_update_function(struct hrtimer *timer,
+ enum hrtimer_restart (*function)(struct hrtimer *))
+{
+#ifdef CONFIG_PROVE_LOCKING
+ guard(raw_spinlock_irqsave)(&timer->base->cpu_base->lock);
+
+ if (WARN_ON_ONCE(hrtimer_is_queued(timer)))
+ return;
+
+ if (WARN_ON_ONCE(!function))
+ return;
+#endif
+ ACCESS_PRIVATE(timer, function) = function;
+}
+
/* Forward a hrtimer so it expires after now: */
extern u64
hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
/**
- * hrtimer_forward_now - forward the timer expiry so it expires after now
+ * hrtimer_forward_now() - forward the timer expiry so it expires after now
* @timer: hrtimer to forward
* @interval: the interval to forward
*
- * Forward the timer expiry so it will expire after the current time
- * of the hrtimer clock base. Returns the number of overruns.
- *
- * Can be safely called from the callback function of @timer. If
- * called from other contexts @timer must neither be enqueued nor
- * running the callback and the caller needs to take care of
- * serialization.
- *
- * Note: This only updates the timer expiry value and does not requeue
- * the timer.
+ * It is a variant of hrtimer_forward(). The timer will expire after the current
+ * time of the hrtimer clock base. See hrtimer_forward() for details.
*/
static inline u64 hrtimer_forward_now(struct hrtimer *timer,
ktime_t interval)
{
- return hrtimer_forward(timer, timer->base->get_time(), interval);
+ return hrtimer_forward(timer, hrtimer_cb_get_time(timer), interval);
}
/* Precise sleep: */
@@ -534,10 +386,11 @@ extern void __init hrtimers_init(void);
extern void sysrq_timer_list_show(void);
int hrtimers_prepare_cpu(unsigned int cpu);
+int hrtimers_cpu_starting(unsigned int cpu);
#ifdef CONFIG_HOTPLUG_CPU
-int hrtimers_dead_cpu(unsigned int cpu);
+int hrtimers_cpu_dying(unsigned int cpu);
#else
-#define hrtimers_dead_cpu NULL
+#define hrtimers_cpu_dying NULL
#endif
#endif
diff --git a/include/linux/hrtimer_api.h b/include/linux/hrtimer_api.h
new file mode 100644
index 000000000000..8d9700894468
--- /dev/null
+++ b/include/linux/hrtimer_api.h
@@ -0,0 +1 @@
+#include <linux/hrtimer.h>
diff --git a/include/linux/hrtimer_defs.h b/include/linux/hrtimer_defs.h
index 2d3e3c5fb946..aa49ffa130e5 100644
--- a/include/linux/hrtimer_defs.h
+++ b/include/linux/hrtimer_defs.h
@@ -3,6 +3,8 @@
#define _LINUX_HRTIMER_DEFS_H
#include <linux/ktime.h>
+#include <linux/timerqueue.h>
+#include <linux/seqlock.h>
#ifdef CONFIG_HIGH_RES_TIMERS
@@ -24,4 +26,105 @@
#endif
+#ifdef CONFIG_64BIT
+# define __hrtimer_clock_base_align ____cacheline_aligned
+#else
+# define __hrtimer_clock_base_align
+#endif
+
+/**
+ * struct hrtimer_clock_base - the timer base for a specific clock
+ * @cpu_base: per cpu clock base
+ * @index: clock type index for per_cpu support when moving a
+ * timer to a base on another cpu.
+ * @clockid: clock id for per_cpu support
+ * @seq: seqcount around __run_hrtimer
+ * @running: pointer to the currently running hrtimer
+ * @active: red black tree root node for the active timers
+ * @offset: offset of this clock to the monotonic base
+ */
+struct hrtimer_clock_base {
+ struct hrtimer_cpu_base *cpu_base;
+ unsigned int index;
+ clockid_t clockid;
+ seqcount_raw_spinlock_t seq;
+ struct hrtimer *running;
+ struct timerqueue_head active;
+ ktime_t offset;
+} __hrtimer_clock_base_align;
+
+enum hrtimer_base_type {
+ HRTIMER_BASE_MONOTONIC,
+ HRTIMER_BASE_REALTIME,
+ HRTIMER_BASE_BOOTTIME,
+ HRTIMER_BASE_TAI,
+ HRTIMER_BASE_MONOTONIC_SOFT,
+ HRTIMER_BASE_REALTIME_SOFT,
+ HRTIMER_BASE_BOOTTIME_SOFT,
+ HRTIMER_BASE_TAI_SOFT,
+ HRTIMER_MAX_CLOCK_BASES,
+};
+
+/**
+ * struct hrtimer_cpu_base - the per cpu clock bases
+ * @lock: lock protecting the base and associated clock bases
+ * and timers
+ * @cpu: cpu number
+ * @active_bases: Bitfield to mark bases with active timers
+ * @clock_was_set_seq: Sequence counter of clock was set events
+ * @hres_active: State of high resolution mode
+ * @in_hrtirq: hrtimer_interrupt() is currently executing
+ * @hang_detected: The last hrtimer interrupt detected a hang
+ * @softirq_activated: displays, if the softirq is raised - update of softirq
+ * related settings is not required then.
+ * @nr_events: Total number of hrtimer interrupt events
+ * @nr_retries: Total number of hrtimer interrupt retries
+ * @nr_hangs: Total number of hrtimer interrupt hangs
+ * @max_hang_time: Maximum time spent in hrtimer_interrupt
+ * @softirq_expiry_lock: Lock which is taken while softirq based hrtimer are
+ * expired
+ * @online: CPU is online from an hrtimers point of view
+ * @timer_waiters: A hrtimer_cancel() invocation waits for the timer
+ * callback to finish.
+ * @expires_next: absolute time of the next event, is required for remote
+ * hrtimer enqueue; it is the total first expiry time (hard
+ * and soft hrtimer are taken into account)
+ * @next_timer: Pointer to the first expiring timer
+ * @softirq_expires_next: Time to check, if soft queues needs also to be expired
+ * @softirq_next_timer: Pointer to the first expiring softirq based timer
+ * @clock_base: array of clock bases for this cpu
+ *
+ * Note: next_timer is just an optimization for __remove_hrtimer().
+ * Do not dereference the pointer because it is not reliable on
+ * cross cpu removals.
+ */
+struct hrtimer_cpu_base {
+ raw_spinlock_t lock;
+ unsigned int cpu;
+ unsigned int active_bases;
+ unsigned int clock_was_set_seq;
+ unsigned int hres_active : 1,
+ in_hrtirq : 1,
+ hang_detected : 1,
+ softirq_activated : 1,
+ online : 1;
+#ifdef CONFIG_HIGH_RES_TIMERS
+ unsigned int nr_events;
+ unsigned short nr_retries;
+ unsigned short nr_hangs;
+ unsigned int max_hang_time;
+#endif
+#ifdef CONFIG_PREEMPT_RT
+ spinlock_t softirq_expiry_lock;
+ atomic_t timer_waiters;
+#endif
+ ktime_t expires_next;
+ struct hrtimer *next_timer;
+ ktime_t softirq_expires_next;
+ struct hrtimer *softirq_next_timer;
+ struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
+ call_single_data_t csd;
+} ____cacheline_aligned;
+
+
#endif
diff --git a/include/linux/hrtimer_types.h b/include/linux/hrtimer_types.h
new file mode 100644
index 000000000000..8fbbb6bdf7a1
--- /dev/null
+++ b/include/linux/hrtimer_types.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_HRTIMER_TYPES_H
+#define _LINUX_HRTIMER_TYPES_H
+
+#include <linux/types.h>
+#include <linux/timerqueue_types.h>
+
+struct hrtimer_clock_base;
+
+/*
+ * Return values for the callback function
+ */
+enum hrtimer_restart {
+ HRTIMER_NORESTART, /* Timer is not restarted */
+ HRTIMER_RESTART, /* Timer must be restarted */
+};
+
+/**
+ * struct hrtimer - the basic hrtimer structure
+ * @node: timerqueue node, which also manages node.expires,
+ * the absolute expiry time in the hrtimers internal
+ * representation. The time is related to the clock on
+ * which the timer is based. Is setup by adding
+ * slack to the _softexpires value. For non range timers
+ * identical to _softexpires.
+ * @_softexpires: the absolute earliest expiry time of the hrtimer.
+ * The time which was given as expiry time when the timer
+ * was armed.
+ * @function: timer expiry callback function
+ * @base: pointer to the timer base (per cpu and per clock)
+ * @state: state information (See bit values above)
+ * @is_rel: Set if the timer was armed relative
+ * @is_soft: Set if hrtimer will be expired in soft interrupt context.
+ * @is_hard: Set if hrtimer will be expired in hard interrupt context
+ * even on RT.
+ *
+ * The hrtimer structure must be initialized by hrtimer_setup()
+ */
+struct hrtimer {
+ struct timerqueue_node node;
+ ktime_t _softexpires;
+ enum hrtimer_restart (*__private function)(struct hrtimer *);
+ struct hrtimer_clock_base *base;
+ u8 state;
+ u8 is_rel;
+ u8 is_soft;
+ u8 is_hard;
+};
+
+#endif /* _LINUX_HRTIMER_TYPES_H */
diff --git a/include/linux/hsi/ssi_protocol.h b/include/linux/hsi/ssi_protocol.h
index 2d6f3cfa7dea..972434daa000 100644
--- a/include/linux/hsi/ssi_protocol.h
+++ b/include/linux/hsi/ssi_protocol.h
@@ -24,6 +24,7 @@ int ssip_slave_stop_tx(struct hsi_client *master);
void ssip_reset_event(struct hsi_client *master);
int ssip_slave_running(struct hsi_client *master);
+void ssi_waketest(struct hsi_client *cl, unsigned int enable);
#endif /* __LINUX_SSIP_SLAVE_H__ */
diff --git a/include/linux/htcpld.h b/include/linux/htcpld.h
deleted file mode 100644
index 842fce69ac06..000000000000
--- a/include/linux/htcpld.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_HTCPLD_H
-#define __LINUX_HTCPLD_H
-
-struct htcpld_chip_platform_data {
- unsigned int addr;
- unsigned int reset;
- unsigned int num_gpios;
- unsigned int gpio_out_base;
- unsigned int gpio_in_base;
- unsigned int irq_base;
- unsigned int num_irqs;
-};
-
-struct htcpld_core_platform_data {
- unsigned int int_reset_gpio_hi;
- unsigned int int_reset_gpio_lo;
- unsigned int i2c_adapter_id;
-
- struct htcpld_chip_platform_data *chip;
- unsigned int num_chip;
-};
-
-#endif /* __LINUX_HTCPLD_H */
-
diff --git a/include/linux/hte.h b/include/linux/hte.h
new file mode 100644
index 000000000000..8289055061ab
--- /dev/null
+++ b/include/linux/hte.h
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_HTE_H
+#define __LINUX_HTE_H
+
+#include <linux/errno.h>
+
+struct hte_chip;
+struct hte_device;
+struct of_phandle_args;
+
+/**
+ * enum hte_edge - HTE line edge flags.
+ *
+ * @HTE_EDGE_NO_SETUP: No edge setup. In this case consumer will setup edges,
+ * for example during request irq call.
+ * @HTE_RISING_EDGE_TS: Rising edge.
+ * @HTE_FALLING_EDGE_TS: Falling edge.
+ *
+ */
+enum hte_edge {
+ HTE_EDGE_NO_SETUP = 1U << 0,
+ HTE_RISING_EDGE_TS = 1U << 1,
+ HTE_FALLING_EDGE_TS = 1U << 2,
+};
+
+/**
+ * enum hte_return - HTE subsystem return values used during callback.
+ *
+ * @HTE_CB_HANDLED: The consumer handled the data.
+ * @HTE_RUN_SECOND_CB: The consumer needs further processing, in that case
+ * HTE subsystem calls secondary callback provided by the consumer where it
+ * is allowed to sleep.
+ */
+enum hte_return {
+ HTE_CB_HANDLED,
+ HTE_RUN_SECOND_CB,
+};
+
+/**
+ * struct hte_ts_data - HTE timestamp data.
+ *
+ * @tsc: Timestamp value.
+ * @seq: Sequence counter of the timestamps.
+ * @raw_level: Level of the line at the timestamp if provider supports it,
+ * -1 otherwise.
+ */
+struct hte_ts_data {
+ u64 tsc;
+ u64 seq;
+ int raw_level;
+};
+
+/**
+ * struct hte_clk_info - Clock source info that HTE provider uses to timestamp.
+ *
+ * @hz: Supported clock rate in HZ, for example 1KHz clock = 1000.
+ * @type: Supported clock type.
+ */
+struct hte_clk_info {
+ u64 hz;
+ clockid_t type;
+};
+
+/**
+ * typedef hte_ts_cb_t - HTE timestamp data processing primary callback.
+ *
+ * The callback is used to push timestamp data to the client and it is
+ * not allowed to sleep.
+ *
+ * @ts: HW timestamp data.
+ * @data: Client supplied data.
+ */
+typedef enum hte_return (*hte_ts_cb_t)(struct hte_ts_data *ts, void *data);
+
+/**
+ * typedef hte_ts_sec_cb_t - HTE timestamp data processing secondary callback.
+ *
+ * This is used when the client needs further processing where it is
+ * allowed to sleep.
+ *
+ * @data: Client supplied data.
+ *
+ */
+typedef enum hte_return (*hte_ts_sec_cb_t)(void *data);
+
+/**
+ * struct hte_line_attr - Line attributes.
+ *
+ * @line_id: The logical ID understood by the consumers and providers.
+ * @line_data: Line data related to line_id.
+ * @edge_flags: Edge setup flags.
+ * @name: Descriptive name of the entity that is being monitored for the
+ * hardware timestamping. If null, HTE core will construct the name.
+ *
+ */
+struct hte_line_attr {
+ u32 line_id;
+ void *line_data;
+ unsigned long edge_flags;
+ const char *name;
+};
+
+/**
+ * struct hte_ts_desc - HTE timestamp descriptor.
+ *
+ * This structure is a communication token between consumers to subsystem
+ * and subsystem to providers.
+ *
+ * @attr: The line attributes.
+ * @hte_data: Subsystem's private data, set by HTE subsystem.
+ */
+struct hte_ts_desc {
+ struct hte_line_attr attr;
+ void *hte_data;
+};
+
+/**
+ * struct hte_ops - HTE operations set by providers.
+ *
+ * @request: Hook for requesting a HTE timestamp. Returns 0 on success,
+ * non-zero for failures.
+ * @release: Hook for releasing a HTE timestamp. Returns 0 on success,
+ * non-zero for failures.
+ * @enable: Hook to enable the specified timestamp. Returns 0 on success,
+ * non-zero for failures.
+ * @disable: Hook to disable specified timestamp. Returns 0 on success,
+ * non-zero for failures.
+ * @get_clk_src_info: Hook to get the clock information the provider uses
+ * to timestamp. Returns 0 for success and negative error code for failure. On
+ * success HTE subsystem fills up provided struct hte_clk_info.
+ *
+ * xlated_id parameter is used to communicate between HTE subsystem and the
+ * providers and is translated by the provider.
+ */
+struct hte_ops {
+ int (*request)(struct hte_chip *chip, struct hte_ts_desc *desc,
+ u32 xlated_id);
+ int (*release)(struct hte_chip *chip, struct hte_ts_desc *desc,
+ u32 xlated_id);
+ int (*enable)(struct hte_chip *chip, u32 xlated_id);
+ int (*disable)(struct hte_chip *chip, u32 xlated_id);
+ int (*get_clk_src_info)(struct hte_chip *chip,
+ struct hte_clk_info *ci);
+};
+
+/**
+ * struct hte_chip - Abstract HTE chip.
+ *
+ * @name: functional name of the HTE IP block.
+ * @dev: device providing the HTE.
+ * @ops: callbacks for this HTE.
+ * @nlines: number of lines/signals supported by this chip.
+ * @xlate_of: Callback which translates consumer supplied logical ids to
+ * physical ids, return 0 for the success and negative for the failures.
+ * It stores (between 0 to @nlines) in xlated_id parameter for the success.
+ * @xlate_plat: Same as above but for the consumers with no DT node.
+ * @match_from_linedata: Match HTE device using the line_data.
+ * @of_hte_n_cells: Number of cells used to form the HTE specifier.
+ * @gdev: HTE subsystem abstract device, internal to the HTE subsystem.
+ * @data: chip specific private data.
+ */
+struct hte_chip {
+ const char *name;
+ struct device *dev;
+ const struct hte_ops *ops;
+ u32 nlines;
+ int (*xlate_of)(struct hte_chip *gc,
+ const struct of_phandle_args *args,
+ struct hte_ts_desc *desc, u32 *xlated_id);
+ int (*xlate_plat)(struct hte_chip *gc, struct hte_ts_desc *desc,
+ u32 *xlated_id);
+ bool (*match_from_linedata)(const struct hte_chip *chip,
+ const struct hte_ts_desc *hdesc);
+ u8 of_hte_n_cells;
+
+ struct hte_device *gdev;
+ void *data;
+};
+
+#if IS_ENABLED(CONFIG_HTE)
+/* HTE APIs for the providers */
+int devm_hte_register_chip(struct hte_chip *chip);
+int hte_push_ts_ns(const struct hte_chip *chip, u32 xlated_id,
+ struct hte_ts_data *data);
+
+/* HTE APIs for the consumers */
+int hte_init_line_attr(struct hte_ts_desc *desc, u32 line_id,
+ unsigned long edge_flags, const char *name,
+ void *data);
+int hte_ts_get(struct device *dev, struct hte_ts_desc *desc, int index);
+int hte_ts_put(struct hte_ts_desc *desc);
+int hte_request_ts_ns(struct hte_ts_desc *desc, hte_ts_cb_t cb,
+ hte_ts_sec_cb_t tcb, void *data);
+int devm_hte_request_ts_ns(struct device *dev, struct hte_ts_desc *desc,
+ hte_ts_cb_t cb, hte_ts_sec_cb_t tcb, void *data);
+int of_hte_req_count(struct device *dev);
+int hte_enable_ts(struct hte_ts_desc *desc);
+int hte_disable_ts(struct hte_ts_desc *desc);
+int hte_get_clk_src_info(const struct hte_ts_desc *desc,
+ struct hte_clk_info *ci);
+
+#else /* !CONFIG_HTE */
+static inline int devm_hte_register_chip(struct hte_chip *chip)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_push_ts_ns(const struct hte_chip *chip,
+ u32 xlated_id,
+ const struct hte_ts_data *data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_init_line_attr(struct hte_ts_desc *desc, u32 line_id,
+ unsigned long edge_flags,
+ const char *name, void *data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_ts_get(struct device *dev, struct hte_ts_desc *desc,
+ int index)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_ts_put(struct hte_ts_desc *desc)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_request_ts_ns(struct hte_ts_desc *desc, hte_ts_cb_t cb,
+ hte_ts_sec_cb_t tcb, void *data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int devm_hte_request_ts_ns(struct device *dev,
+ struct hte_ts_desc *desc,
+ hte_ts_cb_t cb,
+ hte_ts_sec_cb_t tcb,
+ void *data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int of_hte_req_count(struct device *dev)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_enable_ts(struct hte_ts_desc *desc)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_disable_ts(struct hte_ts_desc *desc)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hte_get_clk_src_info(const struct hte_ts_desc *desc,
+ struct hte_clk_info *ci)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* !CONFIG_HTE */
+
+#endif
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 9626fda5efce..ae7f21aad0ac 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -2,16 +2,16 @@
#ifndef _LINUX_HUGE_MM_H
#define _LINUX_HUGE_MM_H
-#include <linux/sched/coredump.h>
#include <linux/mm_types.h>
#include <linux/fs.h> /* only for vma_is_dax() */
+#include <linux/kobject.h>
vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
- struct vm_area_struct *vma);
-void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
+ struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
+bool huge_pmd_set_accessed(struct vm_fault *vmf);
int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
struct vm_area_struct *vma);
@@ -24,10 +24,7 @@ static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
}
#endif
-vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
-struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
- unsigned long addr, pmd_t *pmd,
- unsigned int flags);
+vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pmd_t *pmd, unsigned long addr, unsigned long next);
int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
@@ -36,49 +33,21 @@ int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
unsigned long addr);
bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
-int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
- pgprot_t newprot, unsigned long cp_flags);
-vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
- pgprot_t pgprot, bool write);
-
-/**
- * vmf_insert_pfn_pmd - insert a pmd size pfn
- * @vmf: Structure describing the fault
- * @pfn: pfn to insert
- * @pgprot: page protection to use
- * @write: whether it's a write fault
- *
- * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
- *
- * Return: vm_fault_t value.
- */
-static inline vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn,
- bool write)
-{
- return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
-}
-vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
- pgprot_t pgprot, bool write);
-
-/**
- * vmf_insert_pfn_pud - insert a pud size pfn
- * @vmf: Structure describing the fault
- * @pfn: pfn to insert
- * @pgprot: page protection to use
- * @write: whether it's a write fault
- *
- * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
- *
- * Return: vm_fault_t value.
- */
-static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn,
- bool write)
-{
- return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
-}
+int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ pmd_t *pmd, unsigned long addr, pgprot_t newprot,
+ unsigned long cp_flags);
+
+vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn,
+ bool write);
+vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, unsigned long pfn,
+ bool write);
+vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio,
+ bool write);
+vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio,
+ bool write);
enum transparent_hugepage_flag {
- TRANSPARENT_HUGEPAGE_NEVER_DAX,
+ TRANSPARENT_HUGEPAGE_UNSUPPORTED,
TRANSPARENT_HUGEPAGE_FLAG,
TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
@@ -100,324 +69,629 @@ ssize_t single_hugepage_flag_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf,
enum transparent_hugepage_flag flag);
extern struct kobj_attribute shmem_enabled_attr;
+extern struct kobj_attribute thpsize_shmem_enabled_attr;
-#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
-#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
+/*
+ * Mask of all large folio orders supported for anonymous THP; all orders up to
+ * and including PMD_ORDER, except order-0 (which is not "huge") and order-1
+ * (which is a limitation of the THP implementation).
+ */
+#define THP_ORDERS_ALL_ANON ((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1)))
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/*
+ * Mask of all large folio orders supported for file THP. Folios in a DAX
+ * file is never split and the MAX_PAGECACHE_ORDER limit does not apply to
+ * it. Same to PFNMAPs where there's neither page* nor pagecache.
+ */
+#define THP_ORDERS_ALL_SPECIAL \
+ (BIT(PMD_ORDER) | BIT(PUD_ORDER))
+#define THP_ORDERS_ALL_FILE_DEFAULT \
+ ((BIT(MAX_PAGECACHE_ORDER + 1) - 1) & ~BIT(0))
+
+/*
+ * Mask of all large folio orders supported for THP.
+ */
+#define THP_ORDERS_ALL \
+ (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_SPECIAL | THP_ORDERS_ALL_FILE_DEFAULT)
+
+enum tva_type {
+ TVA_SMAPS, /* Exposing "THPeligible:" in smaps. */
+ TVA_PAGEFAULT, /* Serving a page fault. */
+ TVA_KHUGEPAGED, /* Khugepaged collapse. */
+ TVA_FORCED_COLLAPSE, /* Forced collapse (e.g. MADV_COLLAPSE). */
+};
+
+#define thp_vma_allowable_order(vma, vm_flags, type, order) \
+ (!!thp_vma_allowable_orders(vma, vm_flags, type, BIT(order)))
+
+#define split_folio(f) split_folio_to_list(f, NULL)
+
+#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
#define HPAGE_PMD_SHIFT PMD_SHIFT
-#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
+#define HPAGE_PUD_SHIFT PUD_SHIFT
+#else
+#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
+#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
+#endif
+
+#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
+#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
+#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
-#define HPAGE_PUD_SHIFT PUD_SHIFT
-#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
+#define HPAGE_PUD_ORDER (HPAGE_PUD_SHIFT-PAGE_SHIFT)
+#define HPAGE_PUD_NR (1<<HPAGE_PUD_ORDER)
#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
+#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
-extern unsigned long transparent_hugepage_flags;
+enum mthp_stat_item {
+ MTHP_STAT_ANON_FAULT_ALLOC,
+ MTHP_STAT_ANON_FAULT_FALLBACK,
+ MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
+ MTHP_STAT_ZSWPOUT,
+ MTHP_STAT_SWPIN,
+ MTHP_STAT_SWPIN_FALLBACK,
+ MTHP_STAT_SWPIN_FALLBACK_CHARGE,
+ MTHP_STAT_SWPOUT,
+ MTHP_STAT_SWPOUT_FALLBACK,
+ MTHP_STAT_SHMEM_ALLOC,
+ MTHP_STAT_SHMEM_FALLBACK,
+ MTHP_STAT_SHMEM_FALLBACK_CHARGE,
+ MTHP_STAT_SPLIT,
+ MTHP_STAT_SPLIT_FAILED,
+ MTHP_STAT_SPLIT_DEFERRED,
+ MTHP_STAT_NR_ANON,
+ MTHP_STAT_NR_ANON_PARTIALLY_MAPPED,
+ __MTHP_STAT_COUNT
+};
-/*
- * to be used on vmas which are known to support THP.
- * Use transparent_hugepage_enabled otherwise
- */
-static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
+struct mthp_stat {
+ unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT];
+};
+
+DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
+
+static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta)
{
+ if (order <= 0 || order > PMD_ORDER)
+ return;
- /*
- * If the hardware/firmware marked hugepage support disabled.
- */
- if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX))
- return false;
+ this_cpu_add(mthp_stats.stats[order][item], delta);
+}
- if (vma->vm_flags & VM_NOHUGEPAGE)
- return false;
+static inline void count_mthp_stat(int order, enum mthp_stat_item item)
+{
+ mod_mthp_stat(order, item, 1);
+}
- if (vma_is_temporary_stack(vma))
- return false;
+#else
+static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta)
+{
+}
- if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
- return false;
+static inline void count_mthp_stat(int order, enum mthp_stat_item item)
+{
+}
+#endif
- if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
- return true;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- if (vma_is_dax(vma))
- return true;
+extern unsigned long transparent_hugepage_flags;
+extern unsigned long huge_anon_orders_always;
+extern unsigned long huge_anon_orders_madvise;
+extern unsigned long huge_anon_orders_inherit;
- if (transparent_hugepage_flags &
- (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
- return !!(vma->vm_flags & VM_HUGEPAGE);
+static inline bool hugepage_global_enabled(void)
+{
+ return transparent_hugepage_flags &
+ ((1<<TRANSPARENT_HUGEPAGE_FLAG) |
+ (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG));
+}
- return false;
+static inline bool hugepage_global_always(void)
+{
+ return transparent_hugepage_flags &
+ (1<<TRANSPARENT_HUGEPAGE_FLAG);
}
-bool transparent_hugepage_enabled(struct vm_area_struct *vma);
+static inline int highest_order(unsigned long orders)
+{
+ return fls_long(orders) - 1;
+}
-#define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1)
+static inline int next_order(unsigned long *orders, int prev)
+{
+ *orders &= ~BIT(prev);
+ return highest_order(*orders);
+}
-static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
- unsigned long haddr)
+/*
+ * Do the below checks:
+ * - For file vma, check if the linear page offset of vma is
+ * order-aligned within the file. The hugepage is
+ * guaranteed to be order-aligned within the file, but we must
+ * check that the order-aligned addresses in the VMA map to
+ * order-aligned offsets within the file, else the hugepage will
+ * not be mappable.
+ * - For all vmas, check if the haddr is in an aligned hugepage
+ * area.
+ */
+static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
+ unsigned long addr, int order)
{
+ unsigned long hpage_size = PAGE_SIZE << order;
+ unsigned long haddr;
+
/* Don't have to check pgoff for anonymous vma */
if (!vma_is_anonymous(vma)) {
- if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) !=
- (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK))
+ if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
+ hpage_size >> PAGE_SHIFT))
return false;
}
- if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
+ haddr = ALIGN_DOWN(addr, hpage_size);
+
+ if (haddr < vma->vm_start || haddr + hpage_size > vma->vm_end)
return false;
return true;
}
+/*
+ * Filter the bitfield of input orders to the ones suitable for use in the vma.
+ * See thp_vma_suitable_order().
+ * All orders that pass the checks are returned as a bitfield.
+ */
+static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long orders)
+{
+ int order;
+
+ /*
+ * Iterate over orders, highest to lowest, removing orders that don't
+ * meet alignment requirements from the set. Exit loop at first order
+ * that meets requirements, since all lower orders must also meet
+ * requirements.
+ */
+
+ order = highest_order(orders);
+
+ while (orders) {
+ if (thp_vma_suitable_order(vma, addr, order))
+ break;
+ order = next_order(&orders, order);
+ }
+
+ return orders;
+}
+
+unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
+ vm_flags_t vm_flags,
+ enum tva_type type,
+ unsigned long orders);
+
+/**
+ * thp_vma_allowable_orders - determine hugepage orders that are allowed for vma
+ * @vma: the vm area to check
+ * @vm_flags: use these vm_flags instead of vma->vm_flags
+ * @type: TVA type
+ * @orders: bitfield of all orders to consider
+ *
+ * Calculates the intersection of the requested hugepage orders and the allowed
+ * hugepage orders for the provided vma. Permitted orders are encoded as a set
+ * bit at the corresponding bit position (bit-2 corresponds to order-2, bit-3
+ * corresponds to order-3, etc). Order-0 is never considered a hugepage order.
+ *
+ * Return: bitfield of orders allowed for hugepage in the vma. 0 if no hugepage
+ * orders are allowed.
+ */
+static inline
+unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
+ vm_flags_t vm_flags,
+ enum tva_type type,
+ unsigned long orders)
+{
+ /*
+ * Optimization to check if required orders are enabled early. Only
+ * forced collapse ignores sysfs configs.
+ */
+ if (type != TVA_FORCED_COLLAPSE && vma_is_anonymous(vma)) {
+ unsigned long mask = READ_ONCE(huge_anon_orders_always);
+
+ if (vm_flags & VM_HUGEPAGE)
+ mask |= READ_ONCE(huge_anon_orders_madvise);
+ if (hugepage_global_always() ||
+ ((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled()))
+ mask |= READ_ONCE(huge_anon_orders_inherit);
+
+ orders &= mask;
+ if (!orders)
+ return 0;
+ }
+
+ return __thp_vma_allowable_orders(vma, vm_flags, type, orders);
+}
+
+struct thpsize {
+ struct kobject kobj;
+ struct list_head node;
+ int order;
+};
+
+#define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj)
+
#define transparent_hugepage_use_zero_page() \
(transparent_hugepage_flags & \
(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
+/*
+ * Check whether THPs are explicitly disabled for this VMA, for example,
+ * through madvise or prctl.
+ */
+static inline bool vma_thp_disabled(struct vm_area_struct *vma,
+ vm_flags_t vm_flags, bool forced_collapse)
+{
+ /* Are THPs disabled for this VMA? */
+ if (vm_flags & VM_NOHUGEPAGE)
+ return true;
+ /* Are THPs disabled for all VMAs in the whole process? */
+ if (mm_flags_test(MMF_DISABLE_THP_COMPLETELY, vma->vm_mm))
+ return true;
+ /*
+ * Are THPs disabled only for VMAs where we didn't get an explicit
+ * advise to use them?
+ */
+ if (vm_flags & VM_HUGEPAGE)
+ return false;
+ /*
+ * Forcing a collapse (e.g., madv_collapse), is a clear advice to
+ * use THPs.
+ */
+ if (forced_collapse)
+ return false;
+ return mm_flags_test(MMF_DISABLE_THP_EXCEPT_ADVISED, vma->vm_mm);
+}
+
+static inline bool thp_disabled_by_hw(void)
+{
+ /* If the hardware/firmware marked hugepage support disabled. */
+ return transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED);
+}
+
unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags);
+unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags,
+ vm_flags_t vm_flags);
-void prep_transhuge_page(struct page *page);
-void free_transhuge_page(struct page *page);
-bool is_transparent_hugepage(struct page *page);
+enum split_type {
+ SPLIT_TYPE_UNIFORM,
+ SPLIT_TYPE_NON_UNIFORM,
+};
+
+bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins);
+int __split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
+ unsigned int new_order);
+int folio_split_unmapped(struct folio *folio, unsigned int new_order);
+int min_order_for_split(struct folio *folio);
+int split_folio_to_list(struct folio *folio, struct list_head *list);
+bool folio_split_supported(struct folio *folio, unsigned int new_order,
+ enum split_type split_type, bool warns);
+int folio_split(struct folio *folio, unsigned int new_order, struct page *page,
+ struct list_head *list);
+
+static inline int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
+ unsigned int new_order)
+{
+ return __split_huge_page_to_list_to_order(page, list, new_order);
+}
+static inline int split_huge_page_to_order(struct page *page, unsigned int new_order)
+{
+ return split_huge_page_to_list_to_order(page, NULL, new_order);
+}
-bool can_split_huge_page(struct page *page, int *pextra_pins);
-int split_huge_page_to_list(struct page *page, struct list_head *list);
+/**
+ * try_folio_split_to_order() - try to split a @folio at @page to @new_order
+ * using non uniform split.
+ * @folio: folio to be split
+ * @page: split to @new_order at the given page
+ * @new_order: the target split order
+ *
+ * Try to split a @folio at @page using non uniform split to @new_order, if
+ * non uniform split is not supported, fall back to uniform split. After-split
+ * folios are put back to LRU list. Use min_order_for_split() to get the lower
+ * bound of @new_order.
+ *
+ * Return: 0 - split is successful, otherwise split failed.
+ */
+static inline int try_folio_split_to_order(struct folio *folio,
+ struct page *page, unsigned int new_order)
+{
+ if (!folio_split_supported(folio, new_order, SPLIT_TYPE_NON_UNIFORM, /* warns= */ false))
+ return split_huge_page_to_order(&folio->page, new_order);
+ return folio_split(folio, new_order, page, NULL);
+}
static inline int split_huge_page(struct page *page)
{
- return split_huge_page_to_list(page, NULL);
+ return split_huge_page_to_list_to_order(page, NULL, 0);
}
-void deferred_split_huge_page(struct page *page);
+void deferred_split_folio(struct folio *folio, bool partially_mapped);
+#ifdef CONFIG_MEMCG
+void reparent_deferred_split_queue(struct mem_cgroup *memcg);
+#endif
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long address, bool freeze, struct page *page);
+ unsigned long address, bool freeze);
+
+/**
+ * pmd_is_huge() - Is this PMD either a huge PMD entry or a software leaf entry?
+ * @pmd: The PMD to check.
+ *
+ * A huge PMD entry is a non-empty entry which is present and marked huge or a
+ * software leaf entry. This check be performed without the appropriate locks
+ * held, in which case the condition should be rechecked after they are
+ * acquired.
+ *
+ * Returns: true if this PMD is huge, false otherwise.
+ */
+static inline bool pmd_is_huge(pmd_t pmd)
+{
+ if (pmd_present(pmd)) {
+ return pmd_trans_huge(pmd);
+ } else if (!pmd_none(pmd)) {
+ /*
+ * Non-present PMDs must be valid huge non-present entries. We
+ * cannot assert that here due to header dependency issues.
+ */
+ return true;
+ }
+
+ return false;
+}
#define split_huge_pmd(__vma, __pmd, __address) \
do { \
pmd_t *____pmd = (__pmd); \
- if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \
- || pmd_devmap(*____pmd)) \
+ if (pmd_is_huge(*____pmd)) \
__split_huge_pmd(__vma, __pmd, __address, \
- false, NULL); \
+ false); \
} while (0)
-
void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
- bool freeze, struct page *page);
+ bool freeze);
void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
unsigned long address);
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+int change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ pud_t *pudp, unsigned long addr, pgprot_t newprot,
+ unsigned long cp_flags);
+#else
+static inline int
+change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ pud_t *pudp, unsigned long addr, pgprot_t newprot,
+ unsigned long cp_flags) { return 0; }
+#endif
+
#define split_huge_pud(__vma, __pud, __address) \
do { \
pud_t *____pud = (__pud); \
- if (pud_trans_huge(*____pud) \
- || pud_devmap(*____pud)) \
+ if (pud_trans_huge(*____pud)) \
__split_huge_pud(__vma, __pud, __address); \
} while (0)
-int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
+int hugepage_madvise(struct vm_area_struct *vma, vm_flags_t *vm_flags,
int advice);
+int madvise_collapse(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, bool *lock_dropped);
void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, long adjust_next);
+ unsigned long end, struct vm_area_struct *next);
spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
-static inline int is_swap_pmd(pmd_t pmd)
-{
- return !pmd_none(pmd) && !pmd_present(pmd);
-}
-
/* mmap_lock must be held on entry */
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
{
- if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
+ if (pmd_is_huge(*pmd))
return __pmd_trans_huge_lock(pmd, vma);
- else
- return NULL;
+
+ return NULL;
}
static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
struct vm_area_struct *vma)
{
- if (pud_trans_huge(*pud) || pud_devmap(*pud))
+ if (pud_trans_huge(*pud))
return __pud_trans_huge_lock(pud, vma);
else
return NULL;
}
/**
- * thp_head - Head page of a transparent huge page.
- * @page: Any page (tail, head or regular) found in the page cache.
+ * folio_test_pmd_mappable - Can we map this folio with a PMD?
+ * @folio: The folio to test
+ *
+ * Return: true - @folio can be mapped, false - @folio cannot be mapped.
*/
-static inline struct page *thp_head(struct page *page)
+static inline bool folio_test_pmd_mappable(struct folio *folio)
{
- return compound_head(page);
+ return folio_order(folio) >= HPAGE_PMD_ORDER;
}
-/**
- * thp_order - Order of a transparent huge page.
- * @page: Head page of a transparent huge page.
- */
-static inline unsigned int thp_order(struct page *page)
-{
- VM_BUG_ON_PGFLAGS(PageTail(page), page);
- if (PageHead(page))
- return HPAGE_PMD_ORDER;
- return 0;
-}
+vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
-/**
- * thp_nr_pages - The number of regular pages in this huge page.
- * @page: The head page of a huge page.
- */
-static inline int thp_nr_pages(struct page *page)
-{
- VM_BUG_ON_PGFLAGS(PageTail(page), page);
- if (PageHead(page))
- return HPAGE_PMD_NR;
- return 1;
-}
+vm_fault_t do_huge_pmd_device_private(struct vm_fault *vmf);
-struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
- pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
-struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
- pud_t *pud, int flags, struct dev_pagemap **pgmap);
+extern struct folio *huge_zero_folio;
+extern unsigned long huge_zero_pfn;
-vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
+static inline bool is_huge_zero_folio(const struct folio *folio)
+{
+ VM_WARN_ON_ONCE(!folio);
-extern struct page *huge_zero_page;
+ return READ_ONCE(huge_zero_folio) == folio;
+}
-static inline bool is_huge_zero_page(struct page *page)
+static inline bool is_huge_zero_pfn(unsigned long pfn)
{
- return READ_ONCE(huge_zero_page) == page;
+ return READ_ONCE(huge_zero_pfn) == (pfn & ~(HPAGE_PMD_NR - 1));
}
static inline bool is_huge_zero_pmd(pmd_t pmd)
{
- return is_huge_zero_page(pmd_page(pmd));
+ return pmd_present(pmd) && is_huge_zero_pfn(pmd_pfn(pmd));
}
-static inline bool is_huge_zero_pud(pud_t pud)
+struct folio *mm_get_huge_zero_folio(struct mm_struct *mm);
+void mm_put_huge_zero_folio(struct mm_struct *mm);
+
+static inline struct folio *get_persistent_huge_zero_folio(void)
{
- return false;
-}
+ if (!IS_ENABLED(CONFIG_PERSISTENT_HUGE_ZERO_FOLIO))
+ return NULL;
-struct page *mm_get_huge_zero_page(struct mm_struct *mm);
-void mm_put_huge_zero_page(struct mm_struct *mm);
+ if (unlikely(!huge_zero_folio))
+ return NULL;
-#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
+ return huge_zero_folio;
+}
static inline bool thp_migration_supported(void)
{
return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
}
-static inline struct list_head *page_deferred_list(struct page *page)
-{
- /*
- * Global or memcg deferred list in the second tail pages is
- * occupied by compound_head.
- */
- return &page[2].deferred_list;
-}
+void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmd, bool freeze);
+bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t *pmdp, struct folio *folio);
+void map_anon_folio_pmd_nopf(struct folio *folio, pmd_t *pmd,
+ struct vm_area_struct *vma, unsigned long haddr);
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
-#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
-#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
-#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
-
-#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
-#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
-#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
-static inline struct page *thp_head(struct page *page)
-{
- VM_BUG_ON_PGFLAGS(PageTail(page), page);
- return page;
-}
-
-static inline unsigned int thp_order(struct page *page)
-{
- VM_BUG_ON_PGFLAGS(PageTail(page), page);
- return 0;
-}
-
-static inline int thp_nr_pages(struct page *page)
-{
- VM_BUG_ON_PGFLAGS(PageTail(page), page);
- return 1;
-}
-
-static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
+static inline bool folio_test_pmd_mappable(struct folio *folio)
{
return false;
}
-static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
+static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
+ unsigned long addr, int order)
{
return false;
}
-static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
- unsigned long haddr)
+static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long orders)
{
- return false;
+ return 0;
}
-static inline void prep_transhuge_page(struct page *page) {}
-
-static inline bool is_transparent_hugepage(struct page *page)
+static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
+ vm_flags_t vm_flags,
+ enum tva_type type,
+ unsigned long orders)
{
- return false;
+ return 0;
}
#define transparent_hugepage_flags 0UL
#define thp_get_unmapped_area NULL
+static inline unsigned long
+thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags, vm_flags_t vm_flags)
+{
+ return 0;
+}
+
static inline bool
-can_split_huge_page(struct page *page, int *pextra_pins)
+can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins)
{
- BUILD_BUG();
return false;
}
static inline int
-split_huge_page_to_list(struct page *page, struct list_head *list)
+split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
+ unsigned int new_order)
{
- return 0;
+ VM_WARN_ON_ONCE_PAGE(1, page);
+ return -EINVAL;
+}
+static inline int split_huge_page_to_order(struct page *page, unsigned int new_order)
+{
+ VM_WARN_ON_ONCE_PAGE(1, page);
+ return -EINVAL;
}
static inline int split_huge_page(struct page *page)
{
- return 0;
+ VM_WARN_ON_ONCE_PAGE(1, page);
+ return -EINVAL;
+}
+
+static inline int min_order_for_split(struct folio *folio)
+{
+ VM_WARN_ON_ONCE_FOLIO(1, folio);
+ return -EINVAL;
}
-static inline void deferred_split_huge_page(struct page *page) {}
+
+static inline int split_folio_to_list(struct folio *folio, struct list_head *list)
+{
+ VM_WARN_ON_ONCE_FOLIO(1, folio);
+ return -EINVAL;
+}
+
+static inline int try_folio_split_to_order(struct folio *folio,
+ struct page *page, unsigned int new_order)
+{
+ VM_WARN_ON_ONCE_FOLIO(1, folio);
+ return -EINVAL;
+}
+
+static inline void deferred_split_folio(struct folio *folio, bool partially_mapped) {}
+static inline void reparent_deferred_split_queue(struct mem_cgroup *memcg) {}
#define split_huge_pmd(__vma, __pmd, __address) \
do { } while (0)
static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long address, bool freeze, struct page *page) {}
+ unsigned long address, bool freeze) {}
static inline void split_huge_pmd_address(struct vm_area_struct *vma,
- unsigned long address, bool freeze, struct page *page) {}
+ unsigned long address, bool freeze) {}
+static inline void split_huge_pmd_locked(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmd,
+ bool freeze) {}
+
+static inline bool unmap_huge_pmd_locked(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmdp,
+ struct folio *folio)
+{
+ return false;
+}
#define split_huge_pud(__vma, __pmd, __address) \
do { } while (0)
static inline int hugepage_madvise(struct vm_area_struct *vma,
- unsigned long *vm_flags, int advice)
+ vm_flags_t *vm_flags, int advice)
{
- BUG();
- return 0;
+ return -EINVAL;
}
+
+static inline int madvise_collapse(struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end, bool *lock_dropped)
+{
+ return -EINVAL;
+}
+
static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
- long adjust_next)
+ struct vm_area_struct *next)
{
}
-static inline int is_swap_pmd(pmd_t pmd)
-{
- return 0;
-}
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
{
@@ -429,54 +703,106 @@ static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
return NULL;
}
-static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf,
- pmd_t orig_pmd)
+static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
+{
+ return 0;
+}
+
+static inline vm_fault_t do_huge_pmd_device_private(struct vm_fault *vmf)
{
return 0;
}
-static inline bool is_huge_zero_page(struct page *page)
+static inline bool is_huge_zero_folio(const struct folio *folio)
{
return false;
}
-static inline bool is_huge_zero_pud(pud_t pud)
+static inline bool is_huge_zero_pfn(unsigned long pfn)
{
return false;
}
-static inline void mm_put_huge_zero_page(struct mm_struct *mm)
+static inline bool is_huge_zero_pmd(pmd_t pmd)
+{
+ return false;
+}
+
+static inline void mm_put_huge_zero_folio(struct mm_struct *mm)
{
return;
}
-static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
- unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
+static inline bool thp_migration_supported(void)
+{
+ return false;
+}
+
+static inline int highest_order(unsigned long orders)
+{
+ return 0;
+}
+
+static inline int next_order(unsigned long *orders, int prev)
+{
+ return 0;
+}
+
+static inline void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
+ unsigned long address)
{
- return NULL;
}
-static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
- unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
+static inline int change_huge_pud(struct mmu_gather *tlb,
+ struct vm_area_struct *vma, pud_t *pudp,
+ unsigned long addr, pgprot_t newprot,
+ unsigned long cp_flags)
+{
+ return 0;
+}
+
+static inline struct folio *get_persistent_huge_zero_folio(void)
{
return NULL;
}
-static inline bool thp_migration_supported(void)
+static inline bool pmd_is_huge(pmd_t pmd)
{
return false;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+static inline int split_folio_to_list_to_order(struct folio *folio,
+ struct list_head *list, int new_order)
+{
+ return split_huge_page_to_list_to_order(&folio->page, list, new_order);
+}
+
+static inline int split_folio_to_order(struct folio *folio, int new_order)
+{
+ return split_folio_to_list_to_order(folio, NULL, new_order);
+}
+
/**
- * thp_size - Size of a transparent huge page.
- * @page: Head page of a transparent huge page.
+ * largest_zero_folio - Get the largest zero size folio available
+ *
+ * This function shall be used when mm_get_huge_zero_folio() cannot be
+ * used as there is no appropriate mm lifetime to tie the huge zero folio
+ * from the caller.
*
- * Return: Number of bytes in this page.
+ * Deduce the size of the folio with folio_size instead of assuming the
+ * folio size.
+ *
+ * Return: pointer to PMD sized zero folio if CONFIG_PERSISTENT_HUGE_ZERO_FOLIO
+ * is enabled or a single page sized zero folio
*/
-static inline unsigned long thp_size(struct page *page)
+static inline struct folio *largest_zero_folio(void)
{
- return PAGE_SIZE << thp_order(page);
-}
+ struct folio *folio = get_persistent_huge_zero_folio();
+ if (folio)
+ return folio;
+
+ return page_folio(ZERO_PAGE(0));
+}
#endif /* _LINUX_HUGE_MM_H */
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index b92f25ccef58..019a1c5281e4 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -2,39 +2,46 @@
#ifndef _LINUX_HUGETLB_H
#define _LINUX_HUGETLB_H
+#include <linux/mm.h>
#include <linux/mm_types.h>
#include <linux/mmdebug.h>
#include <linux/fs.h>
#include <linux/hugetlb_inline.h>
#include <linux/cgroup.h>
+#include <linux/page_ref.h>
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/pgtable.h>
#include <linux/gfp.h>
#include <linux/userfaultfd_k.h>
+#include <linux/nodemask.h>
struct ctl_table;
struct user_struct;
struct mmu_gather;
+struct node;
-#ifndef is_hugepd
-typedef struct { unsigned long pd; } hugepd_t;
-#define is_hugepd(hugepd) (0)
-#define __hugepd(x) ((hugepd_t) { (x) })
-#endif
+void free_huge_folio(struct folio *folio);
#ifdef CONFIG_HUGETLB_PAGE
-#include <linux/mempolicy.h>
+#include <linux/pagemap.h>
#include <linux/shm.h>
#include <asm/tlbflush.h>
+/*
+ * For HugeTLB page, there are more metadata to save in the struct page. But
+ * the head struct page cannot meet our needs, so we have to abuse other tail
+ * struct page to store the metadata.
+ */
+#define __NR_USED_SUBPAGE 3
+
struct hugepage_subpool {
spinlock_t lock;
long count;
long max_hpages; /* Maximum huge pages or -1 if no maximum. */
long used_hpages; /* Used count against maximum, includes */
- /* both alloced and reserved pages. */
+ /* both allocated and reserved pages. */
struct hstate *hstate;
long min_hpages; /* Minimum huge pages or -1 if no minimum. */
long rsv_hpages; /* Pages reserved against global pool to */
@@ -48,6 +55,7 @@ struct resv_map {
long adds_in_progress;
struct list_head region_cache;
long region_cache_count;
+ struct rw_semaphore rw_sema;
#ifdef CONFIG_CGROUP_HUGETLB
/*
* On private mappings, the counter to uncharge reservations is stored
@@ -68,7 +76,7 @@ struct resv_map {
* by a resv_map's lock. The set of regions within the resv_map represent
* reservations for huge pages, or huge pages that have already been
* instantiated within the map. The from and to elements are huge page
- * indicies into the associated mapping. from indicates the starting index
+ * indices into the associated mapping. from indicates the starting index
* of the region. to represents the first index past the end of the region.
*
* For example, a file region structure with from == 0 and to == 4 represents
@@ -94,6 +102,12 @@ struct file_region {
#endif
};
+struct hugetlb_vma_lock {
+ struct kref refs;
+ struct rw_semaphore rw_sema;
+ struct vm_area_struct *vma;
+};
+
extern struct resv_map *resv_map_alloc(void);
void resv_map_release(struct kref *ref);
@@ -106,97 +120,172 @@ struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
long min_hpages);
void hugepage_put_subpool(struct hugepage_subpool *spool);
-void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
-int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
-int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
- loff_t *);
-int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
- loff_t *);
-int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
- loff_t *);
-
-int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
-long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
- struct page **, struct vm_area_struct **,
- unsigned long *, unsigned long *, long, unsigned int,
- int *);
+void hugetlb_dup_vma_private(struct vm_area_struct *vma);
+void clear_vma_resv_huge_pages(struct vm_area_struct *vma);
+int move_hugetlb_page_tables(struct vm_area_struct *vma,
+ struct vm_area_struct *new_vma,
+ unsigned long old_addr, unsigned long new_addr,
+ unsigned long len);
+int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
+ struct vm_area_struct *, struct vm_area_struct *);
void unmap_hugepage_range(struct vm_area_struct *,
- unsigned long, unsigned long, struct page *);
-void __unmap_hugepage_range_final(struct mmu_gather *tlb,
+ unsigned long start, unsigned long end,
+ struct folio *, zap_flags_t);
+void __unmap_hugepage_range(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long start, unsigned long end,
- struct page *ref_page);
-void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- struct page *ref_page);
+ struct folio *, zap_flags_t zap_flags);
void hugetlb_report_meminfo(struct seq_file *);
int hugetlb_report_node_meminfo(char *buf, int len, int nid);
-void hugetlb_show_meminfo(void);
+void hugetlb_show_meminfo_node(int nid);
unsigned long hugetlb_total_pages(void);
vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags);
#ifdef CONFIG_USERFAULTFD
-int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
- struct vm_area_struct *dst_vma,
- unsigned long dst_addr,
- unsigned long src_addr,
- enum mcopy_atomic_mode mode,
- struct page **pagep);
+int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
+ struct vm_area_struct *dst_vma,
+ unsigned long dst_addr,
+ unsigned long src_addr,
+ uffd_flags_t flags,
+ struct folio **foliop);
#endif /* CONFIG_USERFAULTFD */
-bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
- struct vm_area_struct *vma,
- vm_flags_t vm_flags);
+long hugetlb_reserve_pages(struct inode *inode, long from, long to,
+ struct vm_area_desc *desc, vm_flags_t vm_flags);
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
long freed);
-bool isolate_huge_page(struct page *page, struct list_head *list);
-void putback_active_hugepage(struct page *page);
-void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
-void free_huge_page(struct page *page);
+bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list);
+int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison);
+int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
+ bool *migratable_cleared);
+void folio_putback_hugetlb(struct folio *folio);
+void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason);
void hugetlb_fix_reserve_counts(struct inode *inode);
extern struct mutex *hugetlb_fault_mutex_table;
u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pud_t *pud);
+bool hugetlbfs_pagecache_present(struct hstate *h,
+ struct vm_area_struct *vma,
+ unsigned long address);
-struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
+struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio);
-extern int sysctl_hugetlb_shm_group;
-extern struct list_head huge_boot_pages;
+extern int sysctl_hugetlb_shm_group __read_mostly;
+extern struct list_head huge_boot_pages[MAX_NUMNODES];
+
+void hugetlb_bootmem_alloc(void);
+bool hugetlb_bootmem_allocated(void);
+extern nodemask_t hugetlb_bootmem_nodes;
+void hugetlb_bootmem_set_nodes(void);
/* arch callbacks */
+#ifndef CONFIG_HIGHPTE
+/*
+ * pte_offset_huge() and pte_alloc_huge() are helpers for those architectures
+ * which may go down to the lowest PTE level in their huge_pte_offset() and
+ * huge_pte_alloc(): to avoid reliance on pte_offset_map() without pte_unmap().
+ */
+static inline pte_t *pte_offset_huge(pmd_t *pmd, unsigned long address)
+{
+ return pte_offset_kernel(pmd, address);
+}
+static inline pte_t *pte_alloc_huge(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long address)
+{
+ return pte_alloc(mm, pmd) ? NULL : pte_offset_huge(pmd, address);
+}
+#endif
+
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz);
+/*
+ * huge_pte_offset(): Walk the hugetlb pgtable until the last level PTE.
+ * Returns the pte_t* if found, or NULL if the address is not mapped.
+ *
+ * IMPORTANT: we should normally not directly call this function, instead
+ * this is only a common interface to implement arch-specific
+ * walker. Please use hugetlb_walk() instead, because that will attempt to
+ * verify the locking for you.
+ *
+ * Since this function will walk all the pgtable pages (including not only
+ * high-level pgtable page, but also PUD entry that can be unshared
+ * concurrently for VM_SHARED), the caller of this function should be
+ * responsible of its thread safety. One can follow this rule:
+ *
+ * (1) For private mappings: pmd unsharing is not possible, so holding the
+ * mmap_lock for either read or write is sufficient. Most callers
+ * already hold the mmap_lock, so normally, no special action is
+ * required.
+ *
+ * (2) For shared mappings: pmd unsharing is possible (so the PUD-ranged
+ * pgtable page can go away from under us! It can be done by a pmd
+ * unshare with a follow up munmap() on the other process), then we
+ * need either:
+ *
+ * (2.1) hugetlb vma lock read or write held, to make sure pmd unshare
+ * won't happen upon the range (it also makes sure the pte_t we
+ * read is the right and stable one), or,
+ *
+ * (2.2) hugetlb mapping i_mmap_rwsem lock held read or write, to make
+ * sure even if unshare happened the racy unmap() will wait until
+ * i_mmap_rwsem is released.
+ *
+ * Option (2.1) is the safest, which guarantees pte stability from pmd
+ * sharing pov, until the vma lock released. Option (2.2) doesn't protect
+ * a concurrent pmd unshare, but it makes sure the pgtable page is safe to
+ * access.
+ */
pte_t *huge_pte_offset(struct mm_struct *mm,
unsigned long addr, unsigned long sz);
+unsigned long hugetlb_mask_last_page(struct hstate *h);
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
- unsigned long *addr, pte_t *ptep);
+ unsigned long addr, pte_t *ptep);
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
unsigned long *start, unsigned long *end);
-struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
- int write);
-struct page *follow_huge_pd(struct vm_area_struct *vma,
- unsigned long address, hugepd_t hpd,
- int flags, int pdshift);
-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- pmd_t *pmd, int flags);
-struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
- pud_t *pud, int flags);
-struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
- pgd_t *pgd, int flags);
-
-int pmd_huge(pmd_t pmd);
-int pud_huge(pud_t pud);
-unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
- unsigned long address, unsigned long end, pgprot_t newprot);
-
-bool is_hugetlb_entry_migration(pte_t pte);
+
+extern void __hugetlb_zap_begin(struct vm_area_struct *vma,
+ unsigned long *begin, unsigned long *end);
+extern void __hugetlb_zap_end(struct vm_area_struct *vma,
+ struct zap_details *details);
+
+static inline void hugetlb_zap_begin(struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
+{
+ if (is_vm_hugetlb_page(vma))
+ __hugetlb_zap_begin(vma, start, end);
+}
+
+static inline void hugetlb_zap_end(struct vm_area_struct *vma,
+ struct zap_details *details)
+{
+ if (is_vm_hugetlb_page(vma))
+ __hugetlb_zap_end(vma, details);
+}
+
+void hugetlb_vma_lock_read(struct vm_area_struct *vma);
+void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
+void hugetlb_vma_lock_write(struct vm_area_struct *vma);
+void hugetlb_vma_unlock_write(struct vm_area_struct *vma);
+int hugetlb_vma_trylock_write(struct vm_area_struct *vma);
+void hugetlb_vma_assert_locked(struct vm_area_struct *vma);
+void hugetlb_vma_lock_release(struct kref *kref);
+long hugetlb_change_protection(struct vm_area_struct *vma,
+ unsigned long address, unsigned long end, pgprot_t newprot,
+ unsigned long cp_flags);
void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
+void fixup_hugetlb_reservations(struct vm_area_struct *vma);
+void hugetlb_split(struct vm_area_struct *vma, unsigned long addr);
+int hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
#else /* !CONFIG_HUGETLB_PAGE */
-static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
+static inline void hugetlb_dup_vma_private(struct vm_area_struct *vma)
+{
+}
+
+static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
{
}
@@ -205,15 +294,15 @@ static inline unsigned long hugetlb_total_pages(void)
return 0;
}
-static inline struct address_space *hugetlb_page_mapping_lock_write(
- struct page *hpage)
+static inline struct address_space *hugetlb_folio_mapping_lock_write(
+ struct folio *folio)
{
return NULL;
}
static inline int huge_pmd_unshare(struct mm_struct *mm,
struct vm_area_struct *vma,
- unsigned long *addr, pte_t *ptep)
+ unsigned long addr, pte_t *ptep)
{
return 0;
}
@@ -224,81 +313,73 @@ static inline void adjust_range_if_pmd_sharing_possible(
{
}
-static inline long follow_hugetlb_page(struct mm_struct *mm,
- struct vm_area_struct *vma, struct page **pages,
- struct vm_area_struct **vmas, unsigned long *position,
- unsigned long *nr_pages, long i, unsigned int flags,
- int *nonblocking)
+static inline void hugetlb_zap_begin(
+ struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
{
- BUG();
- return 0;
}
-static inline struct page *follow_huge_addr(struct mm_struct *mm,
- unsigned long address, int write)
+static inline void hugetlb_zap_end(
+ struct vm_area_struct *vma,
+ struct zap_details *details)
{
- return ERR_PTR(-EINVAL);
}
static inline int copy_hugetlb_page_range(struct mm_struct *dst,
- struct mm_struct *src, struct vm_area_struct *vma)
+ struct mm_struct *src,
+ struct vm_area_struct *dst_vma,
+ struct vm_area_struct *src_vma)
{
BUG();
return 0;
}
-static inline void hugetlb_report_meminfo(struct seq_file *m)
+static inline int move_hugetlb_page_tables(struct vm_area_struct *vma,
+ struct vm_area_struct *new_vma,
+ unsigned long old_addr,
+ unsigned long new_addr,
+ unsigned long len)
{
+ BUG();
+ return 0;
}
-static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
+static inline void hugetlb_report_meminfo(struct seq_file *m)
{
- return 0;
}
-static inline void hugetlb_show_meminfo(void)
+static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
{
+ return 0;
}
-static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
- unsigned long address, hugepd_t hpd, int flags,
- int pdshift)
+static inline void hugetlb_show_meminfo_node(int nid)
{
- return NULL;
}
-static inline struct page *follow_huge_pmd(struct mm_struct *mm,
- unsigned long address, pmd_t *pmd, int flags)
+static inline void hugetlb_vma_lock_read(struct vm_area_struct *vma)
{
- return NULL;
}
-static inline struct page *follow_huge_pud(struct mm_struct *mm,
- unsigned long address, pud_t *pud, int flags)
+static inline void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
{
- return NULL;
}
-static inline struct page *follow_huge_pgd(struct mm_struct *mm,
- unsigned long address, pgd_t *pgd, int flags)
+static inline void hugetlb_vma_lock_write(struct vm_area_struct *vma)
{
- return NULL;
}
-static inline int prepare_hugepage_range(struct file *file,
- unsigned long addr, unsigned long len)
+static inline void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
{
- return -EINVAL;
}
-static inline int pmd_huge(pmd_t pmd)
+static inline int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
{
- return 0;
+ return 1;
}
-static inline int pud_huge(pud_t pud)
+static inline void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
{
- return 0;
}
static inline int is_hugepage_only_range(struct mm_struct *mm,
@@ -307,21 +388,13 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
return 0;
}
-static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
- unsigned long addr, unsigned long end,
- unsigned long floor, unsigned long ceiling)
-{
- BUG();
-}
-
#ifdef CONFIG_USERFAULTFD
-static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
- pte_t *dst_pte,
- struct vm_area_struct *dst_vma,
- unsigned long dst_addr,
- unsigned long src_addr,
- enum mcopy_atomic_mode mode,
- struct page **pagep)
+static inline int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
+ struct vm_area_struct *dst_vma,
+ unsigned long dst_addr,
+ unsigned long src_addr,
+ uffd_flags_t flags,
+ struct folio **foliop)
{
BUG();
return 0;
@@ -334,37 +407,43 @@ static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
return NULL;
}
-static inline bool isolate_huge_page(struct page *page, struct list_head *list)
+static inline bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list)
{
return false;
}
-static inline void putback_active_hugepage(struct page *page)
+static inline int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison)
{
+ return 0;
}
-static inline void move_hugetlb_state(struct page *oldpage,
- struct page *newpage, int reason)
+static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
+ bool *migratable_cleared)
{
+ return 0;
}
-static inline unsigned long hugetlb_change_protection(
- struct vm_area_struct *vma, unsigned long address,
- unsigned long end, pgprot_t newprot)
+static inline void folio_putback_hugetlb(struct folio *folio)
{
- return 0;
}
-static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
- struct vm_area_struct *vma, unsigned long start,
- unsigned long end, struct page *ref_page)
+static inline void move_hugetlb_state(struct folio *old_folio,
+ struct folio *new_folio, int reason)
{
- BUG();
+}
+
+static inline long hugetlb_change_protection(
+ struct vm_area_struct *vma, unsigned long address,
+ unsigned long end, pgprot_t newprot,
+ unsigned long cp_flags)
+{
+ return 0;
}
static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start,
- unsigned long end, struct page *ref_page)
+ unsigned long end, struct folio *folio,
+ zap_flags_t zap_flags)
{
BUG();
}
@@ -379,17 +458,18 @@ static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
+static inline void fixup_hugetlb_reservations(struct vm_area_struct *vma)
+{
+}
+
+static inline void hugetlb_split(struct vm_area_struct *vma, unsigned long addr) {}
+
+static inline int hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
+{
+ return 0;
+}
+
#endif /* !CONFIG_HUGETLB_PAGE */
-/*
- * hugepages at page global directory. If arch support
- * hugepages at pgd level, they need to define this.
- */
-#ifndef pgd_huge
-#define pgd_huge(x) 0
-#endif
-#ifndef p4d_huge
-#define p4d_huge(x) 0
-#endif
#ifndef pgd_write
static inline int pgd_write(pgd_t pgd)
@@ -432,7 +512,6 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
}
struct hugetlbfs_inode_info {
- struct shared_policy policy;
struct inode vfs_inode;
unsigned int seals;
};
@@ -442,18 +521,13 @@ static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
}
-extern const struct file_operations hugetlbfs_file_operations;
extern const struct vm_operations_struct hugetlb_vm_ops;
struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
- struct user_struct **user, int creat_flags,
- int page_size_log);
+ int creat_flags, int page_size_log);
-static inline bool is_file_hugepages(struct file *file)
+static inline bool is_file_hugepages(const struct file *file)
{
- if (file->f_op == &hugetlbfs_file_operations)
- return true;
-
- return is_file_shm_hugepages(file);
+ return file->f_op->fop_flags & FOP_HUGE_PAGES;
}
static inline struct hstate *hstate_inode(struct inode *i)
@@ -465,8 +539,7 @@ static inline struct hstate *hstate_inode(struct inode *i)
#define is_file_hugepages(file) false
static inline struct file *
hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
- struct user_struct **user, int creat_flags,
- int page_size_log)
+ int creat_flags, int page_size_log)
{
return ERR_PTR(-ENOSYS);
}
@@ -477,11 +550,10 @@ static inline struct hstate *hstate_inode(struct inode *i)
}
#endif /* !CONFIG_HUGETLBFS */
-#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
-unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags);
-#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
+unsigned long
+hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
/*
* huegtlb page specific state flags. These flags are located in page.private
@@ -500,7 +572,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
* Synchronization: Initially set after new page allocation with no
* locking. When examined and modified during migration processing
* (isolate, migrate, putback) the hugetlb_lock is held.
- * HPG_temporary - - Set on a page that is temporarily allocated from the buddy
+ * HPG_temporary - Set on a page that is temporarily allocated from the buddy
* allocator. Typically used for migration target pages when no pages
* are available in the pool. The hugetlb free page path will
* immediately free pages with this flag set to the buddy allocator.
@@ -509,12 +581,18 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
* modifications require hugetlb_lock.
* HPG_freed - Set when page is on the free lists.
* Synchronization: hugetlb_lock held for examination and modification.
+ * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
+ * HPG_raw_hwp_unreliable - Set when the hugetlb page has a hwpoison sub-page
+ * that is not tracked by raw_hwp_page list.
*/
enum hugetlb_page_flags {
HPG_restore_reserve = 0,
HPG_migratable,
HPG_temporary,
HPG_freed,
+ HPG_vmemmap_optimized,
+ HPG_raw_hwp_unreliable,
+ HPG_cma,
__NR_HPAGEFLAGS,
};
@@ -524,27 +602,39 @@ enum hugetlb_page_flags {
*/
#ifdef CONFIG_HUGETLB_PAGE
#define TESTHPAGEFLAG(uname, flname) \
-static inline int HPage##uname(struct page *page) \
- { return test_bit(HPG_##flname, &(page->private)); }
+static __always_inline \
+bool folio_test_hugetlb_##flname(struct folio *folio) \
+ { void *private = &folio->private; \
+ return test_bit(HPG_##flname, private); \
+ }
#define SETHPAGEFLAG(uname, flname) \
-static inline void SetHPage##uname(struct page *page) \
- { set_bit(HPG_##flname, &(page->private)); }
+static __always_inline \
+void folio_set_hugetlb_##flname(struct folio *folio) \
+ { void *private = &folio->private; \
+ set_bit(HPG_##flname, private); \
+ }
#define CLEARHPAGEFLAG(uname, flname) \
-static inline void ClearHPage##uname(struct page *page) \
- { clear_bit(HPG_##flname, &(page->private)); }
+static __always_inline \
+void folio_clear_hugetlb_##flname(struct folio *folio) \
+ { void *private = &folio->private; \
+ clear_bit(HPG_##flname, private); \
+ }
#else
#define TESTHPAGEFLAG(uname, flname) \
-static inline int HPage##uname(struct page *page) \
+static inline bool \
+folio_test_hugetlb_##flname(struct folio *folio) \
{ return 0; }
#define SETHPAGEFLAG(uname, flname) \
-static inline void SetHPage##uname(struct page *page) \
+static inline void \
+folio_set_hugetlb_##flname(struct folio *folio) \
{ }
#define CLEARHPAGEFLAG(uname, flname) \
-static inline void ClearHPage##uname(struct page *page) \
+static inline void \
+folio_clear_hugetlb_##flname(struct folio *folio) \
{ }
#endif
@@ -560,6 +650,9 @@ HPAGEFLAG(RestoreReserve, restore_reserve)
HPAGEFLAG(Migratable, migratable)
HPAGEFLAG(Temporary, temporary)
HPAGEFLAG(Freed, freed)
+HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
+HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable)
+HPAGEFLAG(Cma, cma)
#ifdef CONFIG_HUGETLB_PAGE
@@ -567,9 +660,11 @@ HPAGEFLAG(Freed, freed)
/* Defines one hugetlb page size */
struct hstate {
struct mutex resize_lock;
+ struct lock_class_key resize_key;
int next_nid_to_alloc;
int next_nid_to_free;
unsigned int order;
+ unsigned int demote_order;
unsigned long mask;
unsigned long max_huge_pages;
unsigned long nr_huge_pages;
@@ -579,35 +674,48 @@ struct hstate {
unsigned long nr_overcommit_huge_pages;
struct list_head hugepage_activelist;
struct list_head hugepage_freelists[MAX_NUMNODES];
+ unsigned int max_huge_pages_node[MAX_NUMNODES];
unsigned int nr_huge_pages_node[MAX_NUMNODES];
unsigned int free_huge_pages_node[MAX_NUMNODES];
unsigned int surplus_huge_pages_node[MAX_NUMNODES];
-#ifdef CONFIG_CGROUP_HUGETLB
- /* cgroup control files */
- struct cftype cgroup_files_dfl[7];
- struct cftype cgroup_files_legacy[9];
-#endif
char name[HSTATE_NAME_LEN];
};
+struct cma;
+
struct huge_bootmem_page {
struct list_head list;
struct hstate *hstate;
+ unsigned long flags;
+ struct cma *cma;
};
-int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
-struct page *alloc_huge_page(struct vm_area_struct *vma,
- unsigned long addr, int avoid_reserve);
-struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
- nodemask_t *nmask, gfp_t gfp_mask);
-struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
- unsigned long address);
-int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
+#define HUGE_BOOTMEM_HVO 0x0001
+#define HUGE_BOOTMEM_ZONES_VALID 0x0002
+#define HUGE_BOOTMEM_CMA 0x0004
+
+bool hugetlb_bootmem_page_zones_valid(int nid, struct huge_bootmem_page *m);
+
+int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list);
+int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn);
+void wait_for_freed_hugetlb_folios(void);
+struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
+ unsigned long addr, bool cow_from_owner);
+struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
+ nodemask_t *nmask, gfp_t gfp_mask,
+ bool allow_alloc_fallback);
+struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
+ nodemask_t *nmask, gfp_t gfp_mask);
+
+int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
pgoff_t idx);
+void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
+ unsigned long address, struct folio *folio);
/* arch callback */
-int __init __alloc_bootmem_huge_page(struct hstate *h);
-int __init alloc_bootmem_huge_page(struct hstate *h);
+int __init __alloc_bootmem_huge_page(struct hstate *h, int nid);
+int __init alloc_bootmem_huge_page(struct hstate *h, int nid);
+bool __init hugetlb_node_alloc_supported(void);
void __init hugetlb_add_hstate(unsigned order);
bool __init arch_hugetlb_valid_size(unsigned long size);
@@ -622,18 +730,20 @@ extern unsigned int default_hstate_idx;
#define default_hstate (hstates[default_hstate_idx])
-/*
- * hugetlb page subpool pointer located in hpage[1].private
- */
-static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
+static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
{
- return (struct hugepage_subpool *)(hpage+1)->private;
+ return HUGETLBFS_SB(inode->i_sb)->spool;
}
-static inline void hugetlb_set_page_subpool(struct page *hpage,
+static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
+{
+ return folio->_hugetlb_subpool;
+}
+
+static inline void hugetlb_set_folio_subpool(struct folio *folio,
struct hugepage_subpool *subpool)
{
- set_page_private(hpage+1, (unsigned long)subpool);
+ folio->_hugetlb_subpool = subpool;
}
static inline struct hstate *hstate_file(struct file *f)
@@ -646,7 +756,10 @@ static inline struct hstate *hstate_sizelog(int page_size_log)
if (!page_size_log)
return &default_hstate;
- return size_to_hstate(1UL << page_size_log);
+ if (page_size_log < BITS_PER_LONG)
+ return size_to_hstate(1UL << page_size_log);
+
+ return NULL;
}
static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
@@ -654,7 +767,7 @@ static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
return hstate_file(vma->vm_file);
}
-static inline unsigned long huge_page_size(struct hstate *h)
+static inline unsigned long huge_page_size(const struct hstate *h)
{
return (unsigned long)PAGE_SIZE << h->order;
}
@@ -678,12 +791,17 @@ static inline unsigned huge_page_shift(struct hstate *h)
return h->order + PAGE_SHIFT;
}
+static inline bool order_is_gigantic(unsigned int order)
+{
+ return order > MAX_PAGE_ORDER;
+}
+
static inline bool hstate_is_gigantic(struct hstate *h)
{
- return huge_page_order(h) >= MAX_ORDER;
+ return order_is_gigantic(huge_page_order(h));
}
-static inline unsigned int pages_per_huge_page(struct hstate *h)
+static inline unsigned int pages_per_huge_page(const struct hstate *h)
{
return 1 << h->order;
}
@@ -693,6 +811,12 @@ static inline unsigned int blocks_per_huge_page(struct hstate *h)
return huge_page_size(h) / 512;
}
+static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h,
+ struct address_space *mapping, pgoff_t idx)
+{
+ return filemap_lock_folio(mapping, idx << huge_page_order(h));
+}
+
#include <asm/hugetlb.h>
#ifndef is_hugepage_only_range
@@ -704,23 +828,34 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
#define is_hugepage_only_range is_hugepage_only_range
#endif
-#ifndef arch_clear_hugepage_flags
-static inline void arch_clear_hugepage_flags(struct page *page) { }
-#define arch_clear_hugepage_flags arch_clear_hugepage_flags
+#ifndef arch_clear_hugetlb_flags
+static inline void arch_clear_hugetlb_flags(struct folio *folio) { }
+#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
#endif
#ifndef arch_make_huge_pte
-static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
- struct page *page, int writable)
+static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
+ vm_flags_t flags)
+{
+ return pte_mkhuge(entry);
+}
+#endif
+
+#ifndef arch_has_huge_bootmem_alloc
+/*
+ * Some architectures do their own bootmem allocation, so they can't use
+ * early CMA allocation.
+ */
+static inline bool arch_has_huge_bootmem_alloc(void)
{
- return entry;
+ return false;
}
#endif
-static inline struct hstate *page_hstate(struct page *page)
+static inline struct hstate *folio_hstate(struct folio *folio)
{
- VM_BUG_ON_PAGE(!PageHuge(page), page);
- return size_to_hstate(page_size(page));
+ VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+ return size_to_hstate(folio_size(folio));
}
static inline unsigned hstate_index_to_shift(unsigned index)
@@ -733,20 +868,17 @@ static inline int hstate_index(struct hstate *h)
return h - hstates;
}
-pgoff_t __basepage_index(struct page *page);
+int dissolve_free_hugetlb_folio(struct folio *folio);
+int dissolve_free_hugetlb_folios(unsigned long start_pfn,
+ unsigned long end_pfn);
-/* Return page->index in PAGE_SIZE units */
-static inline pgoff_t basepage_index(struct page *page)
+#ifdef CONFIG_MEMORY_FAILURE
+extern void folio_clear_hugetlb_hwpoison(struct folio *folio);
+#else
+static inline void folio_clear_hugetlb_hwpoison(struct folio *folio)
{
- if (!PageCompound(page))
- return page->index;
-
- return __basepage_index(page);
}
-
-extern int dissolve_free_huge_page(struct page *page);
-extern int dissolve_free_huge_pages(unsigned long start_pfn,
- unsigned long end_pfn);
+#endif
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
#ifndef arch_hugetlb_migration_supported
@@ -777,7 +909,7 @@ static inline bool hugepage_migration_supported(struct hstate *h)
* It determines whether or not a huge page should be placed on
* movable zone or not. Movability of any huge page should be
* required only if huge page size is supported for migration.
- * There wont be any reason for the huge page to be movable if
+ * There won't be any reason for the huge page to be movable if
* it is not migratable to start with. Also the size of the huge
* page should be large enough to be placed under a movable zone
* and still feasible enough to be migratable. Just the presence
@@ -800,10 +932,11 @@ static inline bool hugepage_movable_supported(struct hstate *h)
/* Movability of hugepages depends on migration support. */
static inline gfp_t htlb_alloc_mask(struct hstate *h)
{
- if (hugepage_movable_supported(h))
- return GFP_HIGHUSER_MOVABLE;
- else
- return GFP_HIGHUSER;
+ gfp_t gfp = __GFP_COMP | __GFP_NOWARN;
+
+ gfp |= hugepage_movable_supported(h) ? GFP_HIGHUSER_MOVABLE : GFP_HIGHUSER;
+
+ return gfp;
}
static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
@@ -818,13 +951,64 @@ static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
return modified_mask;
}
+static inline bool htlb_allow_alloc_fallback(int reason)
+{
+ bool allowed_fallback = false;
+
+ /*
+ * Note: the memory offline, memory failure and migration syscalls will
+ * be allowed to fallback to other nodes due to lack of a better chioce,
+ * that might break the per-node hugetlb pool. While other cases will
+ * set the __GFP_THISNODE to avoid breaking the per-node hugetlb pool.
+ */
+ switch (reason) {
+ case MR_MEMORY_HOTPLUG:
+ case MR_MEMORY_FAILURE:
+ case MR_SYSCALL:
+ case MR_MEMPOLICY_MBIND:
+ allowed_fallback = true;
+ break;
+ default:
+ break;
+ }
+
+ return allowed_fallback;
+}
+
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
struct mm_struct *mm, pte_t *pte)
{
- if (huge_page_size(h) == PMD_SIZE)
+ const unsigned long size = huge_page_size(h);
+
+ VM_WARN_ON(size == PAGE_SIZE);
+
+ /*
+ * hugetlb must use the exact same PT locks as core-mm page table
+ * walkers would. When modifying a PTE table, hugetlb must take the
+ * PTE PT lock, when modifying a PMD table, hugetlb must take the PMD
+ * PT lock etc.
+ *
+ * The expectation is that any hugetlb folio smaller than a PMD is
+ * always mapped into a single PTE table and that any hugetlb folio
+ * smaller than a PUD (but at least as big as a PMD) is always mapped
+ * into a single PMD table.
+ *
+ * If that does not hold for an architecture, then that architecture
+ * must disable split PT locks such that all *_lockptr() functions
+ * will give us the same result: the per-MM PT lock.
+ *
+ * Note that with e.g., CONFIG_PGTABLE_LEVELS=2 where
+ * PGDIR_SIZE==P4D_SIZE==PUD_SIZE==PMD_SIZE, we'd use pud_lockptr()
+ * and core-mm would use pmd_lockptr(). However, in such configurations
+ * split PMD locks are disabled -- they don't make sense on a single
+ * PGDIR page table -- and the end result is the same.
+ */
+ if (size >= PUD_SIZE)
+ return pud_lockptr(mm, (pud_t *) pte);
+ else if (size >= PMD_SIZE || IS_ENABLED(CONFIG_HIGHPTE))
return pmd_lockptr(mm, (pmd_t *) pte);
- VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
- return &mm->page_table_lock;
+ /* pte_alloc_huge() only applies with !CONFIG_HIGHPTE */
+ return ptep_lockptr(mm, pte);
}
#ifndef hugepages_supported
@@ -838,6 +1022,11 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
+static inline void hugetlb_count_init(struct mm_struct *mm)
+{
+ atomic_long_set(&mm->hugetlb_usage, 0);
+}
+
static inline void hugetlb_count_add(long l, struct mm_struct *mm)
{
atomic_long_add(l, &mm->hugetlb_usage);
@@ -848,20 +1037,14 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
atomic_long_sub(l, &mm->hugetlb_usage);
}
-#ifndef set_huge_swap_pte_at
-static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte, unsigned long sz)
-{
- set_huge_pte_at(mm, addr, ptep, pte);
-}
-#endif
-
#ifndef huge_ptep_modify_prot_start
#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
- return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+ unsigned long psize = huge_page_size(hstate_vma(vma));
+
+ return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, psize);
}
#endif
@@ -871,36 +1054,80 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t old_pte, pte_t pte)
{
- set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+ unsigned long psize = huge_page_size(hstate_vma(vma));
+
+ set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize);
}
#endif
+#ifdef CONFIG_NUMA
+void hugetlb_register_node(struct node *node);
+void hugetlb_unregister_node(struct node *node);
+#endif
+
+/*
+ * Check if a given raw @page in a hugepage is HWPOISON.
+ */
+bool is_raw_hwpoison_page_in_hugepage(struct page *page);
+
+static inline unsigned long huge_page_mask_align(struct file *file)
+{
+ return PAGE_MASK & ~huge_page_mask(hstate_file(file));
+}
+
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
-static inline int isolate_or_dissolve_huge_page(struct page *page,
+static inline unsigned long huge_page_mask_align(struct file *file)
+{
+ return 0;
+}
+
+static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
+{
+ return NULL;
+}
+
+static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h,
+ struct address_space *mapping, pgoff_t idx)
+{
+ return NULL;
+}
+
+static inline int isolate_or_dissolve_huge_folio(struct folio *folio,
struct list_head *list)
{
return -ENOMEM;
}
-static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
+static inline int replace_free_hugepage_folios(unsigned long start_pfn,
+ unsigned long end_pfn)
+{
+ return 0;
+}
+
+static inline void wait_for_freed_hugetlb_folios(void)
+{
+}
+
+static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
unsigned long addr,
- int avoid_reserve)
+ bool cow_from_owner)
{
return NULL;
}
-static inline struct page *
-alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
- nodemask_t *nmask, gfp_t gfp_mask)
+static inline struct folio *
+alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
+ nodemask_t *nmask, gfp_t gfp_mask)
{
return NULL;
}
-static inline struct page *alloc_huge_page_vma(struct hstate *h,
- struct vm_area_struct *vma,
- unsigned long address)
+static inline struct folio *
+alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
+ nodemask_t *nmask, gfp_t gfp_mask,
+ bool allow_alloc_fallback)
{
return NULL;
}
@@ -925,7 +1152,12 @@ static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
return NULL;
}
-static inline struct hstate *page_hstate(struct page *page)
+static inline struct hstate *folio_hstate(struct folio *folio)
+{
+ return NULL;
+}
+
+static inline struct hstate *size_to_hstate(unsigned long size)
{
return NULL;
}
@@ -980,17 +1212,12 @@ static inline int hstate_index(struct hstate *h)
return 0;
}
-static inline pgoff_t basepage_index(struct page *page)
-{
- return page->index;
-}
-
-static inline int dissolve_free_huge_page(struct page *page)
+static inline int dissolve_free_hugetlb_folio(struct folio *folio)
{
return 0;
}
-static inline int dissolve_free_huge_pages(unsigned long start_pfn,
+static inline int dissolve_free_hugetlb_folios(unsigned long start_pfn,
unsigned long end_pfn)
{
return 0;
@@ -1016,12 +1243,21 @@ static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
return 0;
}
+static inline bool htlb_allow_alloc_fallback(int reason)
+{
+ return false;
+}
+
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
struct mm_struct *mm, pte_t *pte)
{
return &mm->page_table_lock;
}
+static inline void hugetlb_count_init(struct mm_struct *mm)
+{
+}
+
static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
{
}
@@ -1030,9 +1266,42 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
{
}
-static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte, unsigned long sz)
+static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+#ifdef CONFIG_MMU
+ return ptep_get(ptep);
+#else
+ return *ptep;
+#endif
+}
+
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, unsigned long sz)
+{
+}
+
+static inline void hugetlb_register_node(struct node *node)
+{
+}
+
+static inline void hugetlb_unregister_node(struct node *node)
+{
+}
+
+static inline bool hugetlbfs_pagecache_present(
+ struct hstate *h, struct vm_area_struct *vma, unsigned long address)
+{
+ return false;
+}
+
+static inline void hugetlb_bootmem_alloc(void)
+{
+}
+
+static inline bool hugetlb_bootmem_allocated(void)
{
+ return false;
}
#endif /* CONFIG_HUGETLB_PAGE */
@@ -1048,13 +1317,21 @@ static inline spinlock_t *huge_pte_lock(struct hstate *h,
#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
extern void __init hugetlb_cma_reserve(int order);
-extern void __init hugetlb_cma_check(void);
#else
static inline __init void hugetlb_cma_reserve(int order)
{
}
-static inline __init void hugetlb_cma_check(void)
+#endif
+
+#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
+static inline bool hugetlb_pmd_shared(pte_t *pte)
+{
+ return page_count(virt_to_page(pte)) > 1;
+}
+#else
+static inline bool hugetlb_pmd_shared(pte_t *pte)
{
+ return false;
}
#endif
@@ -1068,4 +1345,36 @@ bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
#endif
+static inline bool __vma_shareable_lock(struct vm_area_struct *vma)
+{
+ return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data;
+}
+
+bool __vma_private_lock(struct vm_area_struct *vma);
+
+/*
+ * Safe version of huge_pte_offset() to check the locks. See comments
+ * above huge_pte_offset().
+ */
+static inline pte_t *
+hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz)
+{
+#if defined(CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING) && defined(CONFIG_LOCKDEP)
+ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+ /*
+ * If pmd sharing possible, locking needed to safely walk the
+ * hugetlb pgtables. More information can be found at the comment
+ * above huge_pte_offset() in the same file.
+ *
+ * NOTE: lockdep_is_held() is only defined with CONFIG_LOCKDEP.
+ */
+ if (__vma_shareable_lock(vma))
+ WARN_ON_ONCE(!lockdep_is_held(&vma_lock->rw_sema) &&
+ !lockdep_is_held(
+ &vma->vm_file->f_mapping->i_mmap_rwsem));
+#endif
+ return huge_pte_offset(vma->vm_mm, addr, sz);
+}
+
#endif /* _LINUX_HUGETLB_H */
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
index 0bff345c4bc6..e5d64b8b59c2 100644
--- a/include/linux/hugetlb_cgroup.h
+++ b/include/linux/hugetlb_cgroup.h
@@ -21,20 +21,17 @@ struct hugetlb_cgroup;
struct resv_map;
struct file_region;
-/*
- * Minimum page order trackable by hugetlb cgroup.
- * At least 4 pages are necessary for all the tracking information.
- * The second tail page (hpage[2]) is the fault usage cgroup.
- * The third tail page (hpage[3]) is the reservation usage cgroup.
- */
-#define HUGETLB_CGROUP_MIN_ORDER 2
-
#ifdef CONFIG_CGROUP_HUGETLB
enum hugetlb_memory_event {
HUGETLB_MAX,
HUGETLB_NR_MEMORY_EVENTS,
};
+struct hugetlb_cgroup_per_node {
+ /* hugetlb usage in pages over all hstates. */
+ unsigned long usage[HUGE_MAX_HSTATE];
+};
+
struct hugetlb_cgroup {
struct cgroup_subsys_state css;
@@ -56,56 +53,51 @@ struct hugetlb_cgroup {
/* Handle for "hugetlb.events.local" */
struct cgroup_file events_local_file[HUGE_MAX_HSTATE];
+
+ struct hugetlb_cgroup_per_node *nodeinfo[];
};
static inline struct hugetlb_cgroup *
-__hugetlb_cgroup_from_page(struct page *page, bool rsvd)
+__hugetlb_cgroup_from_folio(struct folio *folio, bool rsvd)
{
- VM_BUG_ON_PAGE(!PageHuge(page), page);
-
- if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
- return NULL;
+ VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
if (rsvd)
- return (struct hugetlb_cgroup *)page[3].private;
+ return folio->_hugetlb_cgroup_rsvd;
else
- return (struct hugetlb_cgroup *)page[2].private;
+ return folio->_hugetlb_cgroup;
}
-static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
+static inline struct hugetlb_cgroup *hugetlb_cgroup_from_folio(struct folio *folio)
{
- return __hugetlb_cgroup_from_page(page, false);
+ return __hugetlb_cgroup_from_folio(folio, false);
}
static inline struct hugetlb_cgroup *
-hugetlb_cgroup_from_page_rsvd(struct page *page)
+hugetlb_cgroup_from_folio_rsvd(struct folio *folio)
{
- return __hugetlb_cgroup_from_page(page, true);
+ return __hugetlb_cgroup_from_folio(folio, true);
}
-static inline int __set_hugetlb_cgroup(struct page *page,
+static inline void __set_hugetlb_cgroup(struct folio *folio,
struct hugetlb_cgroup *h_cg, bool rsvd)
{
- VM_BUG_ON_PAGE(!PageHuge(page), page);
-
- if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
- return -1;
+ VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
if (rsvd)
- page[3].private = (unsigned long)h_cg;
+ folio->_hugetlb_cgroup_rsvd = h_cg;
else
- page[2].private = (unsigned long)h_cg;
- return 0;
+ folio->_hugetlb_cgroup = h_cg;
}
-static inline int set_hugetlb_cgroup(struct page *page,
+static inline void set_hugetlb_cgroup(struct folio *folio,
struct hugetlb_cgroup *h_cg)
{
- return __set_hugetlb_cgroup(page, h_cg, false);
+ __set_hugetlb_cgroup(folio, h_cg, false);
}
-static inline int set_hugetlb_cgroup_rsvd(struct page *page,
+static inline void set_hugetlb_cgroup_rsvd(struct folio *folio,
struct hugetlb_cgroup *h_cg)
{
- return __set_hugetlb_cgroup(page, h_cg, true);
+ __set_hugetlb_cgroup(folio, h_cg, true);
}
static inline bool hugetlb_cgroup_disabled(void)
@@ -118,20 +110,34 @@ static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
css_put(&h_cg->css);
}
+static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
+ struct resv_map *resv_map)
+{
+ if (resv_map->css)
+ css_get(resv_map->css);
+}
+
+static inline void resv_map_put_hugetlb_cgroup_uncharge_info(
+ struct resv_map *resv_map)
+{
+ if (resv_map->css)
+ css_put(resv_map->css);
+}
+
extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup **ptr);
extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
struct hugetlb_cgroup **ptr);
extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg,
- struct page *page);
+ struct folio *folio);
extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg,
- struct page *page);
-extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
- struct page *page);
-extern void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages,
- struct page *page);
+ struct folio *folio);
+extern void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
+ struct folio *folio);
+extern void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
+ struct folio *folio);
extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg);
@@ -147,8 +153,8 @@ extern void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
bool region_del);
extern void hugetlb_cgroup_file_init(void) __init;
-extern void hugetlb_cgroup_migrate(struct page *oldhpage,
- struct page *newhpage);
+extern void hugetlb_cgroup_migrate(struct folio *old_folio,
+ struct folio *new_folio);
#else
static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
@@ -158,33 +164,25 @@ static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
{
}
-static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
+static inline struct hugetlb_cgroup *hugetlb_cgroup_from_folio(struct folio *folio)
{
return NULL;
}
static inline struct hugetlb_cgroup *
-hugetlb_cgroup_from_page_resv(struct page *page)
+hugetlb_cgroup_from_folio_rsvd(struct folio *folio)
{
return NULL;
}
-static inline struct hugetlb_cgroup *
-hugetlb_cgroup_from_page_rsvd(struct page *page)
-{
- return NULL;
-}
-
-static inline int set_hugetlb_cgroup(struct page *page,
+static inline void set_hugetlb_cgroup(struct folio *folio,
struct hugetlb_cgroup *h_cg)
{
- return 0;
}
-static inline int set_hugetlb_cgroup_rsvd(struct page *page,
+static inline void set_hugetlb_cgroup_rsvd(struct folio *folio,
struct hugetlb_cgroup *h_cg)
{
- return 0;
}
static inline bool hugetlb_cgroup_disabled(void)
@@ -196,6 +194,16 @@ static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
{
}
+static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
+ struct resv_map *resv_map)
+{
+}
+
+static inline void resv_map_put_hugetlb_cgroup_uncharge_info(
+ struct resv_map *resv_map)
+{
+}
+
static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup **ptr)
{
@@ -211,25 +219,25 @@ static inline int hugetlb_cgroup_charge_cgroup_rsvd(int idx,
static inline void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg,
- struct page *page)
+ struct folio *folio)
{
}
static inline void
hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg,
- struct page *page)
+ struct folio *folio)
{
}
-static inline void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
- struct page *page)
+static inline void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
+ struct folio *folio)
{
}
-static inline void hugetlb_cgroup_uncharge_page_rsvd(int idx,
+static inline void hugetlb_cgroup_uncharge_folio_rsvd(int idx,
unsigned long nr_pages,
- struct page *page)
+ struct folio *folio)
{
}
static inline void hugetlb_cgroup_uncharge_cgroup(int idx,
@@ -254,8 +262,8 @@ static inline void hugetlb_cgroup_file_init(void)
{
}
-static inline void hugetlb_cgroup_migrate(struct page *oldhpage,
- struct page *newhpage)
+static inline void hugetlb_cgroup_migrate(struct folio *old_folio,
+ struct folio *new_folio)
{
}
diff --git a/include/linux/hugetlb_inline.h b/include/linux/hugetlb_inline.h
index 0660a03d37d9..a27aa0162918 100644
--- a/include/linux/hugetlb_inline.h
+++ b/include/linux/hugetlb_inline.h
@@ -2,22 +2,27 @@
#ifndef _LINUX_HUGETLB_INLINE_H
#define _LINUX_HUGETLB_INLINE_H
-#ifdef CONFIG_HUGETLB_PAGE
-
#include <linux/mm.h>
-static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
+#ifdef CONFIG_HUGETLB_PAGE
+
+static inline bool is_vm_hugetlb_flags(vm_flags_t vm_flags)
{
- return !!(vma->vm_flags & VM_HUGETLB);
+ return !!(vm_flags & VM_HUGETLB);
}
#else
-static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
+static inline bool is_vm_hugetlb_flags(vm_flags_t vm_flags)
{
return false;
}
#endif
+static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
+{
+ return is_vm_hugetlb_flags(vma->vm_flags);
+}
+
#endif
diff --git a/include/linux/hung_task.h b/include/linux/hung_task.h
new file mode 100644
index 000000000000..c4403eeb7144
--- /dev/null
+++ b/include/linux/hung_task.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Detect Hung Task: detecting tasks stuck in D state
+ *
+ * Copyright (C) 2025 Tongcheng Travel (www.ly.com)
+ * Author: Lance Yang <mingzhe.yang@ly.com>
+ */
+#ifndef __LINUX_HUNG_TASK_H
+#define __LINUX_HUNG_TASK_H
+
+#include <linux/bug.h>
+#include <linux/sched.h>
+#include <linux/compiler.h>
+
+/*
+ * @blocker: Combines lock address and blocking type.
+ *
+ * Since lock pointers are at least 4-byte aligned(32-bit) or 8-byte
+ * aligned(64-bit). This leaves the 2 least bits (LSBs) of the pointer
+ * always zero. So we can use these bits to encode the specific blocking
+ * type.
+ *
+ * Note that on architectures where this is not guaranteed, or for any
+ * unaligned lock, this tracking mechanism is silently skipped for that
+ * lock.
+ *
+ * Type encoding:
+ * 00 - Blocked on mutex (BLOCKER_TYPE_MUTEX)
+ * 01 - Blocked on semaphore (BLOCKER_TYPE_SEM)
+ * 10 - Blocked on rw-semaphore as READER (BLOCKER_TYPE_RWSEM_READER)
+ * 11 - Blocked on rw-semaphore as WRITER (BLOCKER_TYPE_RWSEM_WRITER)
+ */
+#define BLOCKER_TYPE_MUTEX 0x00UL
+#define BLOCKER_TYPE_SEM 0x01UL
+#define BLOCKER_TYPE_RWSEM_READER 0x02UL
+#define BLOCKER_TYPE_RWSEM_WRITER 0x03UL
+
+#define BLOCKER_TYPE_MASK 0x03UL
+
+#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
+static inline void hung_task_set_blocker(void *lock, unsigned long type)
+{
+ unsigned long lock_ptr = (unsigned long)lock;
+
+ WARN_ON_ONCE(!lock_ptr);
+ WARN_ON_ONCE(READ_ONCE(current->blocker));
+
+ /*
+ * If the lock pointer matches the BLOCKER_TYPE_MASK, return
+ * without writing anything.
+ */
+ if (lock_ptr & BLOCKER_TYPE_MASK)
+ return;
+
+ WRITE_ONCE(current->blocker, lock_ptr | type);
+}
+
+static inline void hung_task_clear_blocker(void)
+{
+ WRITE_ONCE(current->blocker, 0UL);
+}
+
+/*
+ * hung_task_get_blocker_type - Extracts blocker type from encoded blocker
+ * address.
+ *
+ * @blocker: Blocker pointer with encoded type (via LSB bits)
+ *
+ * Returns: BLOCKER_TYPE_MUTEX, BLOCKER_TYPE_SEM, etc.
+ */
+static inline unsigned long hung_task_get_blocker_type(unsigned long blocker)
+{
+ WARN_ON_ONCE(!blocker);
+
+ return blocker & BLOCKER_TYPE_MASK;
+}
+
+static inline void *hung_task_blocker_to_lock(unsigned long blocker)
+{
+ WARN_ON_ONCE(!blocker);
+
+ return (void *)(blocker & ~BLOCKER_TYPE_MASK);
+}
+#else
+static inline void hung_task_set_blocker(void *lock, unsigned long type)
+{
+}
+static inline void hung_task_clear_blocker(void)
+{
+}
+static inline unsigned long hung_task_get_blocker_type(unsigned long blocker)
+{
+ return 0UL;
+}
+static inline void *hung_task_blocker_to_lock(unsigned long blocker)
+{
+ return NULL;
+}
+#endif
+
+#endif /* __LINUX_HUNG_TASK_H */
diff --git a/include/linux/hw_bitfield.h b/include/linux/hw_bitfield.h
new file mode 100644
index 000000000000..df202e167ce4
--- /dev/null
+++ b/include/linux/hw_bitfield.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2025, Collabora Ltd.
+ */
+
+#ifndef _LINUX_HW_BITFIELD_H
+#define _LINUX_HW_BITFIELD_H
+
+#include <linux/bitfield.h>
+#include <linux/build_bug.h>
+#include <linux/limits.h>
+
+/**
+ * FIELD_PREP_WM16() - prepare a bitfield element with a mask in the upper half
+ * @_mask: shifted mask defining the field's length and position
+ * @_val: value to put in the field
+ *
+ * FIELD_PREP_WM16() masks and shifts up the value, as well as bitwise ORs the
+ * result with the mask shifted up by 16.
+ *
+ * This is useful for a common design of hardware registers where the upper
+ * 16-bit half of a 32-bit register is used as a write-enable mask. In such a
+ * register, a bit in the lower half is only updated if the corresponding bit
+ * in the upper half is high.
+ */
+#define FIELD_PREP_WM16(_mask, _val) \
+ ({ \
+ typeof(_val) __val = _val; \
+ typeof(_mask) __mask = _mask; \
+ __BF_FIELD_CHECK(__mask, ((u16)0U), __val, \
+ "HWORD_UPDATE: "); \
+ (((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) | \
+ ((__mask) << 16); \
+ })
+
+/**
+ * FIELD_PREP_WM16_CONST() - prepare a constant bitfield element with a mask in
+ * the upper half
+ * @_mask: shifted mask defining the field's length and position
+ * @_val: value to put in the field
+ *
+ * FIELD_PREP_WM16_CONST() masks and shifts up the value, as well as bitwise ORs
+ * the result with the mask shifted up by 16.
+ *
+ * This is useful for a common design of hardware registers where the upper
+ * 16-bit half of a 32-bit register is used as a write-enable mask. In such a
+ * register, a bit in the lower half is only updated if the corresponding bit
+ * in the upper half is high.
+ *
+ * Unlike FIELD_PREP_WM16(), this is a constant expression and can therefore
+ * be used in initializers. Error checking is less comfortable for this
+ * version.
+ */
+#define FIELD_PREP_WM16_CONST(_mask, _val) \
+ ( \
+ FIELD_PREP_CONST(_mask, _val) | \
+ (BUILD_BUG_ON_ZERO(const_true((u64)(_mask) > U16_MAX)) + \
+ ((_mask) << 16)) \
+ )
+
+
+#endif /* _LINUX_HW_BITFIELD_H */
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h
index 78dd7035d1e5..db199d653dd1 100644
--- a/include/linux/hw_breakpoint.h
+++ b/include/linux/hw_breakpoint.h
@@ -7,6 +7,16 @@
#ifdef CONFIG_HAVE_HW_BREAKPOINT
+enum bp_type_idx {
+ TYPE_INST = 0,
+#if defined(CONFIG_HAVE_MIXED_BREAKPOINTS_REGS)
+ TYPE_DATA = 0,
+#else
+ TYPE_DATA = 1,
+#endif
+ TYPE_MAX
+};
+
extern int __init init_hw_breakpoint(void);
static inline void hw_breakpoint_init(struct perf_event_attr *attr)
@@ -74,15 +84,12 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr,
extern int register_perf_hw_breakpoint(struct perf_event *bp);
extern void unregister_hw_breakpoint(struct perf_event *bp);
extern void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events);
+extern bool hw_breakpoint_is_used(void);
extern int dbg_reserve_bp_slot(struct perf_event *bp);
extern int dbg_release_bp_slot(struct perf_event *bp);
extern int reserve_bp_slot(struct perf_event *bp);
extern void release_bp_slot(struct perf_event *bp);
-int hw_breakpoint_weight(struct perf_event *bp);
-int arch_reserve_bp_slot(struct perf_event *bp);
-void arch_release_bp_slot(struct perf_event *bp);
-void arch_unregister_hw_breakpoint(struct perf_event *bp);
extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk);
@@ -121,6 +128,8 @@ register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; }
static inline void unregister_hw_breakpoint(struct perf_event *bp) { }
static inline void
unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events) { }
+static inline bool hw_breakpoint_is_used(void) { return false; }
+
static inline int
reserve_bp_slot(struct perf_event *bp) {return -ENOSYS; }
static inline void release_bp_slot(struct perf_event *bp) { }
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
index 8e6dd908da21..b424555753b1 100644
--- a/include/linux/hw_random.h
+++ b/include/linux/hw_random.h
@@ -13,9 +13,8 @@
#define LINUX_HWRANDOM_H_
#include <linux/completion.h>
-#include <linux/types.h>
-#include <linux/list.h>
#include <linux/kref.h>
+#include <linux/types.h>
/**
* struct hwrng - Hardware Random Number Generator driver
@@ -34,7 +33,7 @@
* @priv: Private data, for use by the RNG driver.
* @quality: Estimation of true entropy in RNG's bitstream
* (in bits of entropy per 1024 bits of input;
- * valid values: 1 to 1024, or 0 for unknown).
+ * valid values: 1 to 1024, or 0 for maximum).
*/
struct hwrng {
const char *name;
@@ -50,6 +49,7 @@ struct hwrng {
struct list_head list;
struct kref ref;
struct completion cleanup_done;
+ struct completion dying;
};
struct device;
@@ -60,7 +60,8 @@ extern int devm_hwrng_register(struct device *dev, struct hwrng *rng);
/** Unregister a Hardware Random Number Generator driver. */
extern void hwrng_unregister(struct hwrng *rng);
extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng);
-/** Feed random bits into the pool. */
-extern void add_hwgenerator_randomness(const char *buffer, size_t count, size_t entropy);
+
+extern long hwrng_msleep(struct hwrng *rng, unsigned int msecs);
+extern long hwrng_yield(struct hwrng *rng);
#endif /* LINUX_HWRANDOM_H_ */
diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
index cb26d02f52f3..d896713359cd 100644
--- a/include/linux/hwmon-sysfs.h
+++ b/include/linux/hwmon-sysfs.h
@@ -8,6 +8,7 @@
#define _LINUX_HWMON_SYSFS_H
#include <linux/device.h>
+#include <linux/kstrtox.h>
struct sensor_device_attribute{
struct device_attribute dev_attr;
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index 1e8d6ea8992e..301a83afbd66 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -24,6 +24,7 @@ enum hwmon_sensor_types {
hwmon_curr,
hwmon_power,
hwmon_energy,
+ hwmon_energy64,
hwmon_humidity,
hwmon_fan,
hwmon_pwm,
@@ -44,6 +45,8 @@ enum hwmon_chip_attributes {
hwmon_chip_in_samples,
hwmon_chip_power_samples,
hwmon_chip_temp_samples,
+ hwmon_chip_beep_enable,
+ hwmon_chip_pec,
};
#define HWMON_C_TEMP_RESET_HISTORY BIT(hwmon_chip_temp_reset_history)
@@ -58,6 +61,8 @@ enum hwmon_chip_attributes {
#define HWMON_C_IN_SAMPLES BIT(hwmon_chip_in_samples)
#define HWMON_C_POWER_SAMPLES BIT(hwmon_chip_power_samples)
#define HWMON_C_TEMP_SAMPLES BIT(hwmon_chip_temp_samples)
+#define HWMON_C_BEEP_ENABLE BIT(hwmon_chip_beep_enable)
+#define HWMON_C_PEC BIT(hwmon_chip_pec)
enum hwmon_temp_attributes {
hwmon_temp_enable,
@@ -87,6 +92,7 @@ enum hwmon_temp_attributes {
hwmon_temp_reset_history,
hwmon_temp_rated_min,
hwmon_temp_rated_max,
+ hwmon_temp_beep,
};
#define HWMON_T_ENABLE BIT(hwmon_temp_enable)
@@ -116,6 +122,7 @@ enum hwmon_temp_attributes {
#define HWMON_T_RESET_HISTORY BIT(hwmon_temp_reset_history)
#define HWMON_T_RATED_MIN BIT(hwmon_temp_rated_min)
#define HWMON_T_RATED_MAX BIT(hwmon_temp_rated_max)
+#define HWMON_T_BEEP BIT(hwmon_temp_beep)
enum hwmon_in_attributes {
hwmon_in_enable,
@@ -136,6 +143,8 @@ enum hwmon_in_attributes {
hwmon_in_crit_alarm,
hwmon_in_rated_min,
hwmon_in_rated_max,
+ hwmon_in_beep,
+ hwmon_in_fault,
};
#define HWMON_I_ENABLE BIT(hwmon_in_enable)
@@ -156,6 +165,8 @@ enum hwmon_in_attributes {
#define HWMON_I_CRIT_ALARM BIT(hwmon_in_crit_alarm)
#define HWMON_I_RATED_MIN BIT(hwmon_in_rated_min)
#define HWMON_I_RATED_MAX BIT(hwmon_in_rated_max)
+#define HWMON_I_BEEP BIT(hwmon_in_beep)
+#define HWMON_I_FAULT BIT(hwmon_in_fault)
enum hwmon_curr_attributes {
hwmon_curr_enable,
@@ -176,6 +187,7 @@ enum hwmon_curr_attributes {
hwmon_curr_crit_alarm,
hwmon_curr_rated_min,
hwmon_curr_rated_max,
+ hwmon_curr_beep,
};
#define HWMON_C_ENABLE BIT(hwmon_curr_enable)
@@ -196,6 +208,7 @@ enum hwmon_curr_attributes {
#define HWMON_C_CRIT_ALARM BIT(hwmon_curr_crit_alarm)
#define HWMON_C_RATED_MIN BIT(hwmon_curr_rated_min)
#define HWMON_C_RATED_MAX BIT(hwmon_curr_rated_max)
+#define HWMON_C_BEEP BIT(hwmon_curr_beep)
enum hwmon_power_attributes {
hwmon_power_enable,
@@ -285,6 +298,8 @@ enum hwmon_humidity_attributes {
hwmon_humidity_fault,
hwmon_humidity_rated_min,
hwmon_humidity_rated_max,
+ hwmon_humidity_min_alarm,
+ hwmon_humidity_max_alarm,
};
#define HWMON_H_ENABLE BIT(hwmon_humidity_enable)
@@ -298,6 +313,8 @@ enum hwmon_humidity_attributes {
#define HWMON_H_FAULT BIT(hwmon_humidity_fault)
#define HWMON_H_RATED_MIN BIT(hwmon_humidity_rated_min)
#define HWMON_H_RATED_MAX BIT(hwmon_humidity_rated_max)
+#define HWMON_H_MIN_ALARM BIT(hwmon_humidity_min_alarm)
+#define HWMON_H_MAX_ALARM BIT(hwmon_humidity_max_alarm)
enum hwmon_fan_attributes {
hwmon_fan_enable,
@@ -312,6 +329,7 @@ enum hwmon_fan_attributes {
hwmon_fan_min_alarm,
hwmon_fan_max_alarm,
hwmon_fan_fault,
+ hwmon_fan_beep,
};
#define HWMON_F_ENABLE BIT(hwmon_fan_enable)
@@ -326,18 +344,21 @@ enum hwmon_fan_attributes {
#define HWMON_F_MIN_ALARM BIT(hwmon_fan_min_alarm)
#define HWMON_F_MAX_ALARM BIT(hwmon_fan_max_alarm)
#define HWMON_F_FAULT BIT(hwmon_fan_fault)
+#define HWMON_F_BEEP BIT(hwmon_fan_beep)
enum hwmon_pwm_attributes {
hwmon_pwm_input,
hwmon_pwm_enable,
hwmon_pwm_mode,
hwmon_pwm_freq,
+ hwmon_pwm_auto_channels_temp,
};
#define HWMON_PWM_INPUT BIT(hwmon_pwm_input)
#define HWMON_PWM_ENABLE BIT(hwmon_pwm_enable)
#define HWMON_PWM_MODE BIT(hwmon_pwm_mode)
#define HWMON_PWM_FREQ BIT(hwmon_pwm_freq)
+#define HWMON_PWM_AUTO_CHANNELS_TEMP BIT(hwmon_pwm_auto_channels_temp)
enum hwmon_intrusion_attributes {
hwmon_intrusion_alarm,
@@ -348,7 +369,9 @@ enum hwmon_intrusion_attributes {
/**
* struct hwmon_ops - hwmon device operations
- * @is_visible: Callback to return attribute visibility. Mandatory.
+ * @visible: Static visibility. If non-zero, 'is_visible' is ignored.
+ * @is_visible: Callback to return attribute visibility. Mandatory unless
+ * 'visible' is non-zero.
* Parameters are:
* @const void *drvdata:
* Pointer to driver-private data structure passed
@@ -392,6 +415,7 @@ enum hwmon_intrusion_attributes {
* The function returns 0 on success or a negative error number.
*/
struct hwmon_ops {
+ umode_t visible;
umode_t (*is_visible)(const void *drvdata, enum hwmon_sensor_types type,
u32 attr, int channel);
int (*read)(struct device *dev, enum hwmon_sensor_types type,
@@ -403,7 +427,7 @@ struct hwmon_ops {
};
/**
- * Channel information
+ * struct hwmon_channel_info - Channel information
* @type: Channel type.
* @config: Pointer to NULL-terminated list of channel parameters.
* Use for per-channel attributes.
@@ -413,27 +437,31 @@ struct hwmon_channel_info {
const u32 *config;
};
-#define HWMON_CHANNEL_INFO(stype, ...) \
- (&(struct hwmon_channel_info) { \
- .type = hwmon_##stype, \
- .config = (u32 []) { \
- __VA_ARGS__, 0 \
- } \
+#define HWMON_CHANNEL_INFO(stype, ...) \
+ (&(const struct hwmon_channel_info) { \
+ .type = hwmon_##stype, \
+ .config = (const u32 []) { \
+ __VA_ARGS__, 0 \
+ } \
})
/**
- * Chip configuration
+ * struct hwmon_chip_info - Chip configuration
* @ops: Pointer to hwmon operations.
* @info: Null-terminated list of channel information.
*/
struct hwmon_chip_info {
const struct hwmon_ops *ops;
- const struct hwmon_channel_info **info;
+ const struct hwmon_channel_info * const *info;
};
/* hwmon_device_register() is deprecated */
struct device *hwmon_device_register(struct device *dev);
+/*
+ * hwmon_device_register_with_groups() and
+ * devm_hwmon_device_register_with_groups() are deprecated.
+ */
struct device *
hwmon_device_register_with_groups(struct device *dev, const char *name,
void *drvdata,
@@ -448,17 +476,25 @@ hwmon_device_register_with_info(struct device *dev,
const struct hwmon_chip_info *info,
const struct attribute_group **extra_groups);
struct device *
+hwmon_device_register_for_thermal(struct device *dev, const char *name,
+ void *drvdata);
+struct device *
devm_hwmon_device_register_with_info(struct device *dev,
const char *name, void *drvdata,
const struct hwmon_chip_info *info,
const struct attribute_group **extra_groups);
void hwmon_device_unregister(struct device *dev);
-void devm_hwmon_device_unregister(struct device *dev);
int hwmon_notify_event(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel);
+char *hwmon_sanitize_name(const char *name);
+char *devm_hwmon_sanitize_name(struct device *dev, const char *name);
+
+void hwmon_lock(struct device *dev);
+void hwmon_unlock(struct device *dev);
+
/**
* hwmon_is_bad_char - Is the char invalid in a hwmon name
* @ch: the char to be considered
diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h
index bfe7c1f1ac6d..f35b42e8c5de 100644
--- a/include/linux/hwspinlock.h
+++ b/include/linux/hwspinlock.h
@@ -58,18 +58,16 @@ struct hwspinlock_pdata {
int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
const struct hwspinlock_ops *ops, int base_id, int num_locks);
int hwspin_lock_unregister(struct hwspinlock_device *bank);
-struct hwspinlock *hwspin_lock_request(void);
struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
int hwspin_lock_free(struct hwspinlock *hwlock);
int of_hwspin_lock_get_id(struct device_node *np, int index);
-int hwspin_lock_get_id(struct hwspinlock *hwlock);
int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
unsigned long *);
int __hwspin_trylock(struct hwspinlock *, int, unsigned long *);
void __hwspin_unlock(struct hwspinlock *, int, unsigned long *);
int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name);
+int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id);
int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock);
-struct hwspinlock *devm_hwspin_lock_request(struct device *dev);
struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
unsigned int id);
int devm_hwspin_lock_unregister(struct device *dev,
@@ -94,11 +92,6 @@ int devm_hwspin_lock_register(struct device *dev,
* Note: ERR_PTR(-ENODEV) will still be considered a success for NULL-checking
* users. Others, which care, can still check this with IS_ERR.
*/
-static inline struct hwspinlock *hwspin_lock_request(void)
-{
- return ERR_PTR(-ENODEV);
-}
-
static inline struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
{
return ERR_PTR(-ENODEV);
@@ -127,12 +120,12 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
{
}
-static inline int of_hwspin_lock_get_id(struct device_node *np, int index)
+static inline int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id)
{
return 0;
}
-static inline int hwspin_lock_get_id(struct hwspinlock *hwlock)
+static inline int of_hwspin_lock_get_id(struct device_node *np, int index)
{
return 0;
}
@@ -149,11 +142,6 @@ int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock)
return 0;
}
-static inline struct hwspinlock *devm_hwspin_lock_request(struct device *dev)
-{
- return ERR_PTR(-ENODEV);
-}
-
static inline
struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
unsigned int id)
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index d1e59dbef1dd..dfc516c1c719 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -24,7 +24,7 @@
#include <linux/mod_devicetable.h>
#include <linux/interrupt.h>
#include <linux/reciprocal_div.h>
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
#define MAX_PAGE_BUFFER_COUNT 32
#define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
@@ -164,8 +164,28 @@ struct hv_ring_buffer {
u8 buffer[];
} __packed;
+
+/*
+ * If the requested ring buffer size is at least 8 times the size of the
+ * header, steal space from the ring buffer for the header. Otherwise, add
+ * space for the header so that is doesn't take too much of the ring buffer
+ * space.
+ *
+ * The factor of 8 is somewhat arbitrary. The goal is to prevent adding a
+ * relatively small header (4 Kbytes on x86) to a large-ish power-of-2 ring
+ * buffer size (such as 128 Kbytes) and so end up making a nearly twice as
+ * large allocation that will be almost half wasted. As a contrasting example,
+ * on ARM64 with 64 Kbyte page size, we don't want to take 64 Kbytes for the
+ * header from a 128 Kbyte allocation, leaving only 64 Kbytes for the ring.
+ * In this latter case, we must add 64 Kbytes for the header and not worry
+ * about what's wasted.
+ */
+#define VMBUS_HEADER_ADJ(payload_sz) \
+ ((payload_sz) >= 8 * sizeof(struct hv_ring_buffer) ? \
+ 0 : sizeof(struct hv_ring_buffer))
+
/* Calculate the proper size of a ringbuffer, it must be page-aligned */
-#define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(sizeof(struct hv_ring_buffer) + \
+#define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(VMBUS_HEADER_ADJ(payload_sz) + \
(payload_sz))
struct hv_ring_buffer_info {
@@ -181,6 +201,10 @@ struct hv_ring_buffer_info {
* being freed while the ring buffer is being accessed.
*/
struct mutex ring_buffer_mutex;
+
+ /* Buffer that holds a copy of an incoming host packet */
+ void *pkt_buffer;
+ u32 pkt_buffer_size;
};
@@ -226,27 +250,33 @@ static inline u32 hv_get_avail_to_write_percent(
* two 16 bit quantities: major_number. minor_number.
*
* 0 . 13 (Windows Server 2008)
- * 1 . 1 (Windows 7)
- * 2 . 4 (Windows 8)
- * 3 . 0 (Windows 8 R2)
+ * 1 . 1 (Windows 7, WS2008 R2)
+ * 2 . 4 (Windows 8, WS2012)
+ * 3 . 0 (Windows 8.1, WS2012 R2)
* 4 . 0 (Windows 10)
* 4 . 1 (Windows 10 RS3)
* 5 . 0 (Newer Windows 10)
* 5 . 1 (Windows 10 RS4)
* 5 . 2 (Windows Server 2019, RS5)
* 5 . 3 (Windows Server 2022)
+ *
+ * The WS2008 and WIN7 versions are listed here for
+ * completeness but are no longer supported in the
+ * Linux kernel.
*/
-#define VERSION_WS2008 ((0 << 16) | (13))
-#define VERSION_WIN7 ((1 << 16) | (1))
-#define VERSION_WIN8 ((2 << 16) | (4))
-#define VERSION_WIN8_1 ((3 << 16) | (0))
-#define VERSION_WIN10 ((4 << 16) | (0))
-#define VERSION_WIN10_V4_1 ((4 << 16) | (1))
-#define VERSION_WIN10_V5 ((5 << 16) | (0))
-#define VERSION_WIN10_V5_1 ((5 << 16) | (1))
-#define VERSION_WIN10_V5_2 ((5 << 16) | (2))
-#define VERSION_WIN10_V5_3 ((5 << 16) | (3))
+#define VMBUS_MAKE_VERSION(MAJ, MIN) ((((u32)MAJ) << 16) | (MIN))
+#define VERSION_WS2008 VMBUS_MAKE_VERSION(0, 13)
+#define VERSION_WIN7 VMBUS_MAKE_VERSION(1, 1)
+#define VERSION_WIN8 VMBUS_MAKE_VERSION(2, 4)
+#define VERSION_WIN8_1 VMBUS_MAKE_VERSION(3, 0)
+#define VERSION_WIN10 VMBUS_MAKE_VERSION(4, 0)
+#define VERSION_WIN10_V4_1 VMBUS_MAKE_VERSION(4, 1)
+#define VERSION_WIN10_V5 VMBUS_MAKE_VERSION(5, 0)
+#define VERSION_WIN10_V5_1 VMBUS_MAKE_VERSION(5, 1)
+#define VERSION_WIN10_V5_2 VMBUS_MAKE_VERSION(5, 2)
+#define VERSION_WIN10_V5_3 VMBUS_MAKE_VERSION(5, 3)
+#define VERSION_WIN10_V6_0 VMBUS_MAKE_VERSION(6, 0)
/* Make maximum size of pipe payload of 16K */
#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
@@ -307,14 +337,22 @@ struct vmbus_channel_offer {
} __packed;
/* Server Flags */
-#define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1
-#define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2
-#define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4
-#define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10
-#define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100
-#define VMBUS_CHANNEL_PARENT_OFFER 0x200
-#define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400
-#define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000
+#define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 0x0001
+/*
+ * This flag indicates that the channel is offered by the paravisor, and must
+ * use encrypted memory for the channel ring buffer.
+ */
+#define VMBUS_CHANNEL_CONFIDENTIAL_RING_BUFFER 0x0002
+/*
+ * This flag indicates that the channel is offered by the paravisor, and must
+ * use encrypted memory for GPA direct packets and additional GPADLs.
+ */
+#define VMBUS_CHANNEL_CONFIDENTIAL_EXTERNAL_MEMORY 0x0004
+#define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x0010
+#define VMBUS_CHANNEL_LOOPBACK_OFFER 0x0100
+#define VMBUS_CHANNEL_PARENT_OFFER 0x0200
+#define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x0400
+#define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000
struct vmpacket_descriptor {
u16 type;
@@ -340,20 +378,7 @@ struct vmtransfer_page_packet_header {
u8 sender_owns_set;
u8 reserved;
u32 range_cnt;
- struct vmtransfer_page_range ranges[1];
-} __packed;
-
-struct vmgpadl_packet_header {
- struct vmpacket_descriptor d;
- u32 gpadl;
- u32 reserved;
-} __packed;
-
-struct vmadd_remove_transfer_page_set {
- struct vmpacket_descriptor d;
- u32 gpadl;
- u16 xfer_pageset_id;
- u16 reserved;
+ struct vmtransfer_page_range ranges[];
} __packed;
/*
@@ -367,30 +392,6 @@ struct gpa_range {
};
/*
- * This is the format for an Establish Gpadl packet, which contains a handle by
- * which this GPADL will be known and a set of GPA ranges associated with it.
- * This can be converted to a MDL by the guest OS. If there are multiple GPA
- * ranges, then the resulting MDL will be "chained," representing multiple VA
- * ranges.
- */
-struct vmestablish_gpadl {
- struct vmpacket_descriptor d;
- u32 gpadl;
- u32 range_cnt;
- struct gpa_range range[1];
-} __packed;
-
-/*
- * This is the format for a Teardown Gpadl packet, which indicates that the
- * GPADL handle in the Establish Gpadl packet will never be referenced again.
- */
-struct vmteardown_gpadl {
- struct vmpacket_descriptor d;
- u32 gpadl;
- u32 reserved; /* for alignment to a 8-byte boundary */
-} __packed;
-
-/*
* This is the format for a GPA-Direct packet, which contains a set of GPA
* ranges, in addition to commands and/or data.
*/
@@ -401,25 +402,6 @@ struct vmdata_gpa_direct {
struct gpa_range range[1];
} __packed;
-/* This is the format for a Additional Data Packet. */
-struct vmadditional_data {
- struct vmpacket_descriptor d;
- u64 total_bytes;
- u32 offset;
- u32 byte_cnt;
- unsigned char data[1];
-} __packed;
-
-union vmpacket_largest_possible_header {
- struct vmpacket_descriptor simple_hdr;
- struct vmtransfer_page_packet_header xfer_page_hdr;
- struct vmgpadl_packet_header gpadl_hdr;
- struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr;
- struct vmestablish_gpadl establish_gpadl_hdr;
- struct vmteardown_gpadl teardown_gpadl_hdr;
- struct vmdata_gpa_direct data_gpa_direct_hdr;
-};
-
#define VMPACKET_DATA_START_ADDRESS(__packet) \
(void *)(((unsigned char *)__packet) + \
((struct vmpacket_descriptor)__packet)->offset8 * 8)
@@ -534,12 +516,6 @@ struct vmbus_channel_rescind_offer {
u32 child_relid;
} __packed;
-static inline u32
-hv_ringbuffer_pending_size(const struct hv_ring_buffer_info *rbi)
-{
- return rbi->ring_buffer->pending_send_sz;
-}
-
/*
* Request Offer -- no parameters, SynIC message contains the partition ID
* Set Snoop -- no parameters, SynIC message contains the partition ID
@@ -655,6 +631,12 @@ struct vmbus_channel_relid_released {
u32 child_relid;
} __packed;
+/*
+ * Used by the paravisor only, means that the encrypted ring buffers and
+ * the encrypted external memory are supported
+ */
+#define VMBUS_FEATURE_FLAG_CONFIDENTIAL_CHANNELS 0x10
+
struct vmbus_channel_initiate_contact {
struct vmbus_channel_message_header header;
u32 vmbus_version_requested;
@@ -663,8 +645,9 @@ struct vmbus_channel_initiate_contact {
u64 interrupt_page;
struct {
u8 msg_sint;
- u8 padding1[3];
- u32 padding2;
+ u8 msg_vtl;
+ u8 reserved[2];
+ u32 feature_flags; /* VMBus version 6.0 */
};
};
u64 monitor_page1;
@@ -741,20 +724,6 @@ struct vmbus_channel_msginfo {
unsigned char msg[];
};
-struct vmbus_close_msg {
- struct vmbus_channel_msginfo info;
- struct vmbus_channel_close_channel msg;
-};
-
-/* Define connection identifier type. */
-union hv_connection_id {
- u32 asu32;
- struct {
- u32 id:24;
- u32 reserved:8;
- } u;
-};
-
enum vmbus_device_type {
HV_IDE = 0,
HV_SCSI,
@@ -790,15 +759,31 @@ struct vmbus_requestor {
#define VMBUS_NO_RQSTOR U64_MAX
#define VMBUS_RQST_ERROR (U64_MAX - 1)
+#define VMBUS_RQST_ADDR_ANY U64_MAX
+/* NetVSC-specific */
#define VMBUS_RQST_ID_NO_RESPONSE (U64_MAX - 2)
+/* StorVSC-specific */
+#define VMBUS_RQST_INIT (U64_MAX - 2)
+#define VMBUS_RQST_RESET (U64_MAX - 3)
struct vmbus_device {
+ /* preferred ring buffer size in KB, 0 means no preferred size for this device */
+ size_t pref_ring_size;
u16 dev_type;
guid_t guid;
bool perf_device;
bool allowed_in_isolated;
};
+#define VMBUS_DEFAULT_MAX_PKT_SIZE 4096
+
+struct vmbus_gpadl {
+ u32 gpadl_handle;
+ u32 size;
+ void *buffer;
+ bool decrypted;
+};
+
struct vmbus_channel {
struct list_head listentry;
@@ -818,7 +803,7 @@ struct vmbus_channel {
bool rescind_ref; /* got rescind msg, got channel reference */
struct completion rescind_event;
- u32 ringbuffer_gpadlhandle;
+ struct vmbus_gpadl ringbuffer_gpadlhandle;
/* Allocated memory for ring buffer */
struct page *ringbuffer_page;
@@ -827,7 +812,7 @@ struct vmbus_channel {
struct hv_ring_buffer_info outbound; /* send to parent */
struct hv_ring_buffer_info inbound; /* receive from parent */
- struct vmbus_close_msg close_msg;
+ struct vmbus_channel_close_channel close_msg;
/* Statistics */
u64 interrupts; /* Host to Guest interrupts */
@@ -954,7 +939,7 @@ struct vmbus_channel {
* mechanism improves throughput by:
*
* A) Making the host more efficient - each time it wakes up,
- * potentially it will process morev number of packets. The
+ * potentially it will process more number of packets. The
* monitor latency allows a batch to build up.
* B) By deferring the hypercall to signal, we will also minimize
* the interrupts.
@@ -1018,18 +1003,69 @@ struct vmbus_channel {
u32 fuzz_testing_interrupt_delay;
u32 fuzz_testing_message_delay;
+ /* callback to generate a request ID from a request address */
+ u64 (*next_request_id_callback)(struct vmbus_channel *channel, u64 rqst_addr);
+ /* callback to retrieve a request address from a request ID */
+ u64 (*request_addr_callback)(struct vmbus_channel *channel, u64 rqst_id);
+
/* request/transaction ids for VMBus */
struct vmbus_requestor requestor;
u32 rqstor_size;
+
+ /* The max size of a packet on this channel */
+ u32 max_pkt_size;
+
+ /* function to mmap ring buffer memory to the channel's sysfs ring attribute */
+ int (*mmap_ring_buffer)(struct vmbus_channel *channel, struct vm_area_struct *vma);
+
+ /* boolean to control visibility of sysfs for ring buffer */
+ bool ring_sysfs_visible;
+ /* The ring buffer is encrypted */
+ bool co_ring_buffer;
+ /* The external memory is encrypted */
+ bool co_external_memory;
};
-u64 vmbus_next_request_id(struct vmbus_requestor *rqstor, u64 rqst_addr);
-u64 vmbus_request_addr(struct vmbus_requestor *rqstor, u64 trans_id);
+#define lock_requestor(channel, flags) \
+do { \
+ struct vmbus_requestor *rqstor = &(channel)->requestor; \
+ \
+ spin_lock_irqsave(&rqstor->req_lock, flags); \
+} while (0)
+
+static __always_inline void unlock_requestor(struct vmbus_channel *channel,
+ unsigned long flags)
+{
+ struct vmbus_requestor *rqstor = &channel->requestor;
+
+ spin_unlock_irqrestore(&rqstor->req_lock, flags);
+}
+
+u64 vmbus_next_request_id(struct vmbus_channel *channel, u64 rqst_addr);
+u64 __vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id,
+ u64 rqst_addr);
+u64 vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id,
+ u64 rqst_addr);
+u64 vmbus_request_addr(struct vmbus_channel *channel, u64 trans_id);
+
+static inline bool is_co_ring_buffer(const struct vmbus_channel_offer_channel *o)
+{
+ return !!(o->offer.chn_flags & VMBUS_CHANNEL_CONFIDENTIAL_RING_BUFFER);
+}
+
+static inline bool is_co_external_memory(const struct vmbus_channel_offer_channel *o)
+{
+ return !!(o->offer.chn_flags & VMBUS_CHANNEL_CONFIDENTIAL_EXTERNAL_MEMORY);
+}
+
+static inline bool is_hvsock_offer(const struct vmbus_channel_offer_channel *o)
+{
+ return !!(o->offer.chn_flags & VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
+}
static inline bool is_hvsock_channel(const struct vmbus_channel *c)
{
- return !!(c->offermsg.offer.chn_flags &
- VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
+ return is_hvsock_offer(&c->offermsg);
}
static inline bool is_sub_channel(const struct vmbus_channel *c)
@@ -1074,16 +1110,6 @@ static inline void set_channel_pending_send_size(struct vmbus_channel *c,
c->outbound.ring_buffer->pending_send_sz = size;
}
-static inline void set_low_latency_mode(struct vmbus_channel *c)
-{
- c->low_latency = true;
-}
-
-static inline void clear_low_latency_mode(struct vmbus_channel *c)
-{
- c->low_latency = false;
-}
-
void vmbus_onmessage(struct vmbus_channel_message_header *hdr);
int vmbus_request_offers(void);
@@ -1098,19 +1124,6 @@ void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
void (*chn_rescind_cb)(struct vmbus_channel *));
-/*
- * Check if sub-channels have already been offerred. This API will be useful
- * when the driver is unloaded after establishing sub-channels. In this case,
- * when the driver is re-loaded, the driver would have to check if the
- * subchannels have already been established before attempting to request
- * the creation of sub-channels.
- * This function returns TRUE to indicate that subchannels have already been
- * created.
- * This function should be invoked after setting the callback function for
- * sub-channel creation.
- */
-bool vmbus_are_subchannels_present(struct vmbus_channel *primary);
-
/* The format must be the same as struct vmdata_gpa_direct */
struct vmbus_channel_packet_page_buffer {
u16 type;
@@ -1166,6 +1179,13 @@ extern int vmbus_open(struct vmbus_channel *channel,
extern void vmbus_close(struct vmbus_channel *channel);
+extern int vmbus_sendpacket_getid(struct vmbus_channel *channel,
+ void *buffer,
+ u32 bufferLen,
+ u64 requestid,
+ u64 *trans_id,
+ enum vmbus_packet_type type,
+ u32 flags);
extern int vmbus_sendpacket(struct vmbus_channel *channel,
void *buffer,
u32 bufferLen,
@@ -1173,13 +1193,6 @@ extern int vmbus_sendpacket(struct vmbus_channel *channel,
enum vmbus_packet_type type,
u32 flags);
-extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
- struct hv_page_buffer pagebuffers[],
- u32 pagecount,
- void *buffer,
- u32 bufferlen,
- u64 requestid);
-
extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
struct vmbus_packet_mpb_array *mpb,
u32 desc_size,
@@ -1190,10 +1203,10 @@ extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
void *kbuffer,
u32 size,
- u32 *gpadl_handle);
+ struct vmbus_gpadl *gpadl);
extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
- u32 gpadl_handle);
+ struct vmbus_gpadl *gpadl);
void vmbus_reset_channel_cb(struct vmbus_channel *channel);
@@ -1209,9 +1222,6 @@ extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
u32 *buffer_actual_len,
u64 *requestid);
-
-extern void vmbus_ontimer(unsigned long data);
-
/* Base driver object */
struct hv_driver {
const char *name;
@@ -1243,7 +1253,7 @@ struct hv_driver {
} dynids;
int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
- int (*remove)(struct hv_device *);
+ void (*remove)(struct hv_device *dev);
void (*shutdown)(struct hv_device *);
int (*suspend)(struct hv_device *);
@@ -1262,10 +1272,16 @@ struct hv_device {
u16 device_id;
struct device device;
- char *driver_override; /* Driver name to force a match */
+ /*
+ * Driver name to force a match. Do not set directly, because core
+ * frees it. Use driver_set_override() to set or clear it.
+ */
+ const char *driver_override;
struct vmbus_channel *channel;
struct kset *channels_kset;
+ struct device_dma_parameters dma_parms;
+ u64 dma_mask;
/* place holder to keep track of the dir for hv device in debugfs */
struct dentry *debug_dir;
@@ -1273,15 +1289,8 @@ struct hv_device {
};
-static inline struct hv_device *device_to_hv_device(struct device *d)
-{
- return container_of(d, struct hv_device, device);
-}
-
-static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
-{
- return container_of(d, struct hv_driver, driver);
-}
+#define device_to_hv_device(d) container_of_const(d, struct hv_device, device)
+#define drv_to_hv_drv(d) container_of_const(d, struct hv_driver, driver)
static inline void hv_set_drvdata(struct hv_device *dev, void *data)
{
@@ -1293,6 +1302,8 @@ static inline void *hv_get_drvdata(struct hv_device *dev)
return dev_get_drvdata(&dev->device);
}
+struct device *hv_get_vmbus_root_device(void);
+
struct hv_ring_buffer_debug_info {
u32 current_interrupt_mask;
u32 current_read_index;
@@ -1305,6 +1316,8 @@ struct hv_ring_buffer_debug_info {
int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
struct hv_ring_buffer_debug_info *debug_info);
+bool hv_ringbuffer_spinlock_busy(struct vmbus_channel *channel);
+
/* Vmbus interface */
#define vmbus_driver_register(driver) \
__vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
@@ -1454,12 +1467,14 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size);
0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
/*
- * Linux doesn't support the 3 devices: the first two are for
- * Automatic Virtual Machine Activation, and the third is for
- * Remote Desktop Virtualization.
+ * Linux doesn't support these 4 devices: the first two are for
+ * Automatic Virtual Machine Activation, the third is for
+ * Remote Desktop Virtualization, and the fourth is Initial
+ * Machine Configuration (IMC) used only by Windows guests.
* {f8e65716-3cb3-4a06-9a60-1889c5cccab5}
* {3375baf4-9e15-4b30-b765-67acb10d607b}
* {276aacf4-ac15-426c-98dd-7521ad3f01fe}
+ * {c376c1c3-d276-48d2-90a9-c04748072c60}
*/
#define HV_AVMA1_GUID \
@@ -1474,6 +1489,10 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size);
.guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
+#define HV_IMC_GUID \
+ .guid = GUID_INIT(0xc376c1c3, 0xd276, 0x48d2, 0x90, 0xa9, \
+ 0xc0, 0x47, 0x48, 0x07, 0x2c, 0x60)
+
/*
* Common header for Hyper-V ICs
*/
@@ -1502,6 +1521,7 @@ struct hv_util_service {
void *channel;
void (*util_cb)(void *);
int (*util_init)(struct hv_util_service *);
+ int (*util_init_transport)(void);
void (*util_deinit)(void);
int (*util_pre_suspend)(void);
int (*util_pre_resume)(void);
@@ -1588,6 +1608,11 @@ struct hyperv_service_callback {
void (*callback)(void *context);
};
+struct hv_dma_range {
+ dma_addr_t dma;
+ u32 mapping_size;
+};
+
#define MAX_SRV_VER 0x7ffffff
extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf, u32 buflen,
const int *fw_version, int fw_vercnt,
@@ -1607,6 +1632,7 @@ int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
const guid_t *shv_host_servie_id);
int vmbus_send_modifychannel(struct vmbus_channel *channel, u32 target_vp);
void vmbus_set_event(struct vmbus_channel *channel);
+int vmbus_channel_set_cpu(struct vmbus_channel *channel, u32 target_cpu);
/* Get the start of the ring buffer. */
static inline void *
@@ -1661,6 +1687,11 @@ static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc)
return (desc->len8 << 3) - (desc->offset8 << 3);
}
+/* Get packet length associated with descriptor */
+static inline u32 hv_pkt_len(const struct vmpacket_descriptor *desc)
+{
+ return desc->len8 << 3;
+}
struct vmpacket_descriptor *
hv_pkt_iter_first(struct vmbus_channel *channel);
@@ -1671,10 +1702,6 @@ __hv_pkt_iter_next(struct vmbus_channel *channel,
void hv_pkt_iter_close(struct vmbus_channel *channel);
-/*
- * Get next packet descriptor from iterator
- * If at end of list, return NULL and update host.
- */
static inline struct vmpacket_descriptor *
hv_pkt_iter_next(struct vmbus_channel *channel,
const struct vmpacket_descriptor *pkt)
diff --git a/include/linux/hypervisor.h b/include/linux/hypervisor.h
index fc08b433c856..be5417303ecf 100644
--- a/include/linux/hypervisor.h
+++ b/include/linux/hypervisor.h
@@ -32,4 +32,15 @@ static inline bool jailhouse_paravirt(void)
#endif /* !CONFIG_X86 */
+static inline bool hypervisor_isolated_pci_functions(void)
+{
+ if (IS_ENABLED(CONFIG_S390))
+ return true;
+
+ if (IS_ENABLED(CONFIG_LOONGARCH))
+ return true;
+
+ return jailhouse_paravirt();
+}
+
#endif /* __LINUX_HYPEVISOR_H */
diff --git a/include/linux/i2c-algo-pca.h b/include/linux/i2c-algo-pca.h
index 7c522fdd9ea7..e305bf32e40a 100644
--- a/include/linux/i2c-algo-pca.h
+++ b/include/linux/i2c-algo-pca.h
@@ -71,7 +71,7 @@ struct i2c_algo_pca_data {
void *data; /* private low level data */
void (*write_byte) (void *data, int reg, int val);
int (*read_byte) (void *data, int reg);
- int (*wait_for_completion) (void *data);
+ int (*wait_for_completion_cb) (void *data);
void (*reset_chip) (void *data);
/* For PCA9564, use one of the predefined frequencies:
* 330000, 288000, 217000, 146000, 88000, 59000, 44000, 36000
diff --git a/include/linux/i2c-atr.h b/include/linux/i2c-atr.h
new file mode 100644
index 000000000000..2bb54dc87c8e
--- /dev/null
+++ b/include/linux/i2c-atr.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * I2C Address Translator
+ *
+ * Copyright (c) 2019,2022 Luca Ceresoli <luca@lucaceresoli.net>
+ * Copyright (c) 2022,2023 Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ *
+ * Based on i2c-mux.h
+ */
+
+#ifndef _LINUX_I2C_ATR_H
+#define _LINUX_I2C_ATR_H
+
+#include <linux/i2c.h>
+#include <linux/types.h>
+
+struct device;
+struct fwnode_handle;
+struct i2c_atr;
+
+/**
+ * enum i2c_atr_flags - Flags for an I2C ATR driver
+ *
+ * @I2C_ATR_F_STATIC: ATR does not support dynamic mapping, use static mapping.
+ * Mappings will only be added or removed as a result of
+ * devices being added or removed from a child bus.
+ * The ATR pool will have to be big enough to accomodate all
+ * devices expected to be added to the child buses.
+ * @I2C_ATR_F_PASSTHROUGH: Allow unmapped incoming addresses to pass through
+ */
+enum i2c_atr_flags {
+ I2C_ATR_F_STATIC = BIT(0),
+ I2C_ATR_F_PASSTHROUGH = BIT(1),
+};
+
+/**
+ * struct i2c_atr_ops - Callbacks from ATR to the device driver.
+ * @attach_addr: Notify the driver of a new device connected on a child
+ * bus, with the alias assigned to it. The driver must
+ * configure the hardware to use the alias.
+ * @detach_addr: Notify the driver of a device getting disconnected. The
+ * driver must configure the hardware to stop using the
+ * alias.
+ *
+ * All these functions return 0 on success, a negative error code otherwise.
+ */
+struct i2c_atr_ops {
+ int (*attach_addr)(struct i2c_atr *atr, u32 chan_id,
+ u16 addr, u16 alias);
+ void (*detach_addr)(struct i2c_atr *atr, u32 chan_id,
+ u16 addr);
+};
+
+/**
+ * struct i2c_atr_adap_desc - An ATR downstream bus descriptor
+ * @chan_id: Index of the new adapter (0 .. max_adapters-1). This value is
+ * passed to the callbacks in `struct i2c_atr_ops`.
+ * @parent: The device used as the parent of the new i2c adapter, or NULL
+ * to use the i2c-atr device as the parent.
+ * @bus_handle: The fwnode handle that points to the adapter's i2c
+ * peripherals, or NULL.
+ * @num_aliases: The number of aliases in this adapter's private alias pool. Set
+ * to zero if this adapter uses the ATR's global alias pool.
+ * @aliases: An optional array of private aliases used by the adapter
+ * instead of the ATR's global pool of aliases. Must contain
+ * exactly num_aliases entries if num_aliases > 0, is ignored
+ * otherwise.
+ */
+struct i2c_atr_adap_desc {
+ u32 chan_id;
+ struct device *parent;
+ struct fwnode_handle *bus_handle;
+ size_t num_aliases;
+ u16 *aliases;
+};
+
+/**
+ * i2c_atr_new() - Allocate and initialize an I2C ATR helper.
+ * @parent: The parent (upstream) adapter
+ * @dev: The device acting as an ATR
+ * @ops: Driver-specific callbacks
+ * @max_adapters: Maximum number of child adapters
+ * @flags: Flags for ATR
+ *
+ * The new ATR helper is connected to the parent adapter but has no child
+ * adapters. Call i2c_atr_add_adapter() to add some.
+ *
+ * Call i2c_atr_delete() to remove.
+ *
+ * Return: pointer to the new ATR helper object, or ERR_PTR
+ */
+struct i2c_atr *i2c_atr_new(struct i2c_adapter *parent, struct device *dev,
+ const struct i2c_atr_ops *ops, int max_adapters,
+ u32 flags);
+
+/**
+ * i2c_atr_delete - Delete an I2C ATR helper.
+ * @atr: I2C ATR helper to be deleted.
+ *
+ * Precondition: all the adapters added with i2c_atr_add_adapter() must be
+ * removed by calling i2c_atr_del_adapter().
+ */
+void i2c_atr_delete(struct i2c_atr *atr);
+
+/**
+ * i2c_atr_add_adapter - Create a child ("downstream") I2C bus.
+ * @atr: The I2C ATR
+ * @desc: An ATR adapter descriptor
+ *
+ * After calling this function a new i2c bus will appear. Adding and removing
+ * devices on the downstream bus will result in calls to the
+ * &i2c_atr_ops->attach_client and &i2c_atr_ops->detach_client callbacks for the
+ * driver to assign an alias to the device.
+ *
+ * The adapter's fwnode is set to @bus_handle, or if @bus_handle is NULL the
+ * function looks for a child node whose 'reg' property matches the chan_id
+ * under the i2c-atr device's 'i2c-atr' node.
+ *
+ * Call i2c_atr_del_adapter() to remove the adapter.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int i2c_atr_add_adapter(struct i2c_atr *atr, struct i2c_atr_adap_desc *desc);
+
+/**
+ * i2c_atr_del_adapter - Remove a child ("downstream") I2C bus added by
+ * i2c_atr_add_adapter(). If no I2C bus has been added
+ * this function is a no-op.
+ * @atr: The I2C ATR
+ * @chan_id: Index of the adapter to be removed (0 .. max_adapters-1)
+ */
+void i2c_atr_del_adapter(struct i2c_atr *atr, u32 chan_id);
+
+/**
+ * i2c_atr_set_driver_data - Set private driver data to the i2c-atr instance.
+ * @atr: The I2C ATR
+ * @data: Pointer to the data to store
+ */
+void i2c_atr_set_driver_data(struct i2c_atr *atr, void *data);
+
+/**
+ * i2c_atr_get_driver_data - Get the stored drive data.
+ * @atr: The I2C ATR
+ *
+ * Return: Pointer to the stored data
+ */
+void *i2c_atr_get_driver_data(struct i2c_atr *atr);
+
+#endif /* _LINUX_I2C_ATR_H */
diff --git a/include/linux/i2c-mux.h b/include/linux/i2c-mux.h
index 98ef73b7c8fd..1784ac7afb11 100644
--- a/include/linux/i2c-mux.h
+++ b/include/linux/i2c-mux.h
@@ -56,8 +56,7 @@ struct i2c_adapter *i2c_root_adapter(struct device *dev);
* callback functions to perform hardware-specific mux control.
*/
int i2c_mux_add_adapter(struct i2c_mux_core *muxc,
- u32 force_nr, u32 chan_id,
- unsigned int class);
+ u32 force_nr, u32 chan_id);
void i2c_mux_del_adapters(struct i2c_mux_core *muxc);
diff --git a/include/linux/i2c-of-prober.h b/include/linux/i2c-of-prober.h
new file mode 100644
index 000000000000..bb6d47f50ee5
--- /dev/null
+++ b/include/linux/i2c-of-prober.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Definitions for the Linux I2C OF component prober
+ *
+ * Copyright (C) 2024 Google LLC
+ */
+
+#ifndef _LINUX_I2C_OF_PROBER_H
+#define _LINUX_I2C_OF_PROBER_H
+
+#include <linux/kconfig.h>
+#include <linux/types.h>
+
+struct device;
+struct device_node;
+
+/**
+ * struct i2c_of_probe_ops - I2C OF component prober callbacks
+ *
+ * A set of callbacks to be used by i2c_of_probe_component().
+ *
+ * All callbacks are optional. Callbacks are called only once per run, and are
+ * used in the order they are defined in this structure.
+ *
+ * All callbacks that have return values shall return %0 on success,
+ * or a negative error number on failure.
+ *
+ * The @dev parameter passed to the callbacks is the same as @dev passed to
+ * i2c_of_probe_component(). It should only be used for dev_printk() calls
+ * and nothing else, especially not managed device resource (devres) APIs.
+ */
+struct i2c_of_probe_ops {
+ /**
+ * @enable: Retrieve and enable resources so that the components respond to probes.
+ *
+ * It is OK for this callback to return -EPROBE_DEFER since the intended use includes
+ * retrieving resources and enables them. Resources should be reverted to their initial
+ * state and released before returning if this fails.
+ */
+ int (*enable)(struct device *dev, struct device_node *bus_node, void *data);
+
+ /**
+ * @cleanup_early: Release exclusive resources prior to calling probe() on a
+ * detected component.
+ *
+ * Only called if a matching component is actually found. If none are found,
+ * resources that would have been released in this callback should be released in
+ * @free_resourcs_late instead.
+ */
+ void (*cleanup_early)(struct device *dev, void *data);
+
+ /**
+ * @cleanup: Opposite of @enable to balance refcounts and free resources after probing.
+ *
+ * Should check if resources were already freed by @cleanup_early.
+ */
+ void (*cleanup)(struct device *dev, void *data);
+};
+
+/**
+ * struct i2c_of_probe_cfg - I2C OF component prober configuration
+ * @ops: Callbacks for the prober to use.
+ * @type: A string to match the device node name prefix to probe for.
+ */
+struct i2c_of_probe_cfg {
+ const struct i2c_of_probe_ops *ops;
+ const char *type;
+};
+
+#if IS_ENABLED(CONFIG_OF_DYNAMIC)
+
+int i2c_of_probe_component(struct device *dev, const struct i2c_of_probe_cfg *cfg, void *ctx);
+
+/**
+ * DOC: I2C OF component prober simple helpers
+ *
+ * Components such as trackpads are commonly connected to a devices baseboard
+ * with a 6-pin ribbon cable. That gives at most one voltage supply and one
+ * GPIO (commonly a "enable" or "reset" line) besides the I2C bus, interrupt
+ * pin, and common ground. Touchscreens, while integrated into the display
+ * panel's connection, typically have the same set of connections.
+ *
+ * A simple set of helpers are provided here for use with the I2C OF component
+ * prober. This implementation targets such components, allowing for at most
+ * one regulator supply.
+ *
+ * The following helpers are provided:
+ * * i2c_of_probe_simple_enable()
+ * * i2c_of_probe_simple_cleanup_early()
+ * * i2c_of_probe_simple_cleanup()
+ */
+
+/**
+ * struct i2c_of_probe_simple_opts - Options for simple I2C component prober callbacks
+ * @res_node_compatible: Compatible string of device node to retrieve resources from.
+ * @supply_name: Name of regulator supply.
+ * @gpio_name: Name of GPIO. NULL if no GPIO line is used. Empty string ("") if GPIO
+ * line is unnamed.
+ * @post_power_on_delay_ms: Delay after regulators are powered on. Passed to msleep().
+ * @post_gpio_config_delay_ms: Delay after GPIO is configured. Passed to msleep().
+ * @gpio_assert_to_enable: %true if GPIO should be asserted, i.e. set to logical high,
+ * to enable the component.
+ *
+ * This describes power sequences common for the class of components supported by the
+ * simple component prober:
+ * * @gpio_name is configured to the non-active setting according to @gpio_assert_to_enable.
+ * * @supply_name regulator supply is enabled.
+ * * Wait for @post_power_on_delay_ms to pass.
+ * * @gpio_name is configured to the active setting according to @gpio_assert_to_enable.
+ * * Wait for @post_gpio_config_delay_ms to pass.
+ */
+struct i2c_of_probe_simple_opts {
+ const char *res_node_compatible;
+ const char *supply_name;
+ const char *gpio_name;
+ unsigned int post_power_on_delay_ms;
+ unsigned int post_gpio_config_delay_ms;
+ bool gpio_assert_to_enable;
+};
+
+struct gpio_desc;
+struct regulator;
+
+struct i2c_of_probe_simple_ctx {
+ /* public: provided by user before helpers are used. */
+ const struct i2c_of_probe_simple_opts *opts;
+ /* private: internal fields for helpers. */
+ struct regulator *supply;
+ struct gpio_desc *gpiod;
+};
+
+int i2c_of_probe_simple_enable(struct device *dev, struct device_node *bus_node, void *data);
+void i2c_of_probe_simple_cleanup_early(struct device *dev, void *data);
+void i2c_of_probe_simple_cleanup(struct device *dev, void *data);
+
+extern struct i2c_of_probe_ops i2c_of_probe_simple_ops;
+
+#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
+
+#endif /* _LINUX_I2C_OF_PROBER_H */
diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h
index 1ef421818d3a..dc1bd2ab4c13 100644
--- a/include/linux/i2c-smbus.h
+++ b/include/linux/i2c-smbus.h
@@ -30,14 +30,6 @@ struct i2c_client *i2c_new_smbus_alert_device(struct i2c_adapter *adapter,
struct i2c_smbus_alert_setup *setup);
int i2c_handle_smbus_alert(struct i2c_client *ara);
-#if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_OF)
-int of_i2c_setup_smbus_alert(struct i2c_adapter *adap);
-#else
-static inline int of_i2c_setup_smbus_alert(struct i2c_adapter *adap)
-{
- return 0;
-}
-#endif
#if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_I2C_SLAVE)
struct i2c_client *i2c_new_slave_host_notify_device(struct i2c_adapter *adapter);
void i2c_free_slave_host_notify_device(struct i2c_client *client);
@@ -52,9 +44,11 @@ static inline void i2c_free_slave_host_notify_device(struct i2c_client *client)
#endif
#if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_DMI)
-void i2c_register_spd(struct i2c_adapter *adap);
+void i2c_register_spd_write_disable(struct i2c_adapter *adap);
+void i2c_register_spd_write_enable(struct i2c_adapter *adap);
#else
-static inline void i2c_register_spd(struct i2c_adapter *adap) { }
+static inline void i2c_register_spd_write_disable(struct i2c_adapter *adap) { }
+static inline void i2c_register_spd_write_enable(struct i2c_adapter *adap) { }
#endif
#endif /* _LINUX_I2C_SMBUS_H */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index e8f2ac8c9c3d..20fd41b51d5c 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -11,24 +11,25 @@
#define _LINUX_I2C_H
#include <linux/acpi.h> /* for acpi_handle */
+#include <linux/bits.h>
#include <linux/mod_devicetable.h>
#include <linux/device.h> /* for struct device */
#include <linux/sched.h> /* for completion */
#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
#include <linux/rtmutex.h>
#include <linux/irqdomain.h> /* for Host Notify IRQ */
#include <linux/of.h> /* for struct device_node */
#include <linux/swab.h> /* for swab16 */
#include <uapi/linux/i2c.h>
-extern struct bus_type i2c_bus_type;
-extern struct device_type i2c_adapter_type;
-extern struct device_type i2c_client_type;
+extern const struct bus_type i2c_bus_type;
+extern const struct device_type i2c_adapter_type;
+extern const struct device_type i2c_client_type;
/* --- General options ------------------------------------------------ */
struct i2c_msg;
-struct i2c_algorithm;
struct i2c_adapter;
struct i2c_client;
struct i2c_driver;
@@ -147,6 +148,7 @@ s32 __i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
/* Now follow the 'nice' access routines. These also document the calling
conventions of i2c_smbus_xfer. */
+u8 i2c_smbus_pec(u8 crc, u8 *p, size_t count);
s32 i2c_smbus_read_byte(const struct i2c_client *client);
s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value);
s32 i2c_smbus_read_byte_data(const struct i2c_client *client, u8 command);
@@ -186,6 +188,7 @@ s32 i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client,
u8 *values);
int i2c_get_device_id(const struct i2c_client *client,
struct i2c_device_identity *id);
+const struct i2c_device_id *i2c_client_get_device_id(const struct i2c_client *client);
#endif /* I2C */
/**
@@ -221,10 +224,18 @@ enum i2c_alert_protocol {
};
/**
+ * enum i2c_driver_flags - Flags for an I2C device driver
+ *
+ * @I2C_DRV_ACPI_WAIVE_D0_PROBE: Don't put the device in D0 state for probe
+ */
+enum i2c_driver_flags {
+ I2C_DRV_ACPI_WAIVE_D0_PROBE = BIT(0),
+};
+
+/**
* struct i2c_driver - represent an I2C device driver
* @class: What kind of i2c device we instantiate (for detect)
- * @probe: Callback for device binding - soon to be deprecated
- * @probe_new: New callback for device binding
+ * @probe: Callback for device binding
* @remove: Callback for device unbinding
* @shutdown: Callback for device shutdown
* @alert: Alert callback, for example for the SMBus alert protocol
@@ -234,6 +245,7 @@ enum i2c_alert_protocol {
* @detect: Callback for device detection
* @address_list: The I2C addresses to probe (for detect)
* @clients: List of detected clients we created (for i2c-core use only)
+ * @flags: A bitmask of flags defined in &enum i2c_driver_flags
*
* The driver.owner field should be set to the module owner of this driver.
* The driver.name field should be set to the name of this driver.
@@ -259,13 +271,9 @@ struct i2c_driver {
unsigned int class;
/* Standard driver model interfaces */
- int (*probe)(struct i2c_client *client, const struct i2c_device_id *id);
- int (*remove)(struct i2c_client *client);
+ int (*probe)(struct i2c_client *client);
+ void (*remove)(struct i2c_client *client);
- /* New driver model interface to aid the seamless removal of the
- * current probe()'s, more commonly unused than used second parameter.
- */
- int (*probe_new)(struct i2c_client *client);
/* driver model interfaces that don't relate to enumeration */
void (*shutdown)(struct i2c_client *client);
@@ -292,8 +300,10 @@ struct i2c_driver {
int (*detect)(struct i2c_client *client, struct i2c_board_info *info);
const unsigned short *address_list;
struct list_head clients;
+
+ u32 flags;
};
-#define to_i2c_driver(d) container_of(d, struct i2c_driver, driver)
+#define to_i2c_driver(d) container_of_const(d, struct i2c_driver, driver)
/**
* struct i2c_client - represent an I2C slave device
@@ -311,6 +321,8 @@ struct i2c_driver {
* calls it to pass on slave events to the slave driver.
* @devres_group_id: id of the devres group that will be created for resources
* acquired when probing this device.
+ * @debugfs: pointer to the debugfs subdirectory which the I2C core created
+ * for this client.
*
* An i2c_client identifies a single device (i.e. chip) connected to an
* i2c bus. The behaviour exposed to Linux is defined by the driver
@@ -340,14 +352,16 @@ struct i2c_client {
i2c_slave_cb_t slave_cb; /* callback for slave mode */
#endif
void *devres_group_id; /* ID of probe devres group */
+ struct dentry *debugfs; /* per-client debugfs dir */
};
#define to_i2c_client(d) container_of(d, struct i2c_client, dev)
-struct i2c_client *i2c_verify_client(struct device *dev);
struct i2c_adapter *i2c_verify_adapter(struct device *dev);
const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
const struct i2c_client *client);
+const void *i2c_get_match_data(const struct i2c_client *client);
+
static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj)
{
struct device * const dev = kobj_to_dev(kobj);
@@ -366,7 +380,6 @@ static inline void i2c_set_clientdata(struct i2c_client *client, void *data)
/* I2C slave support */
-#if IS_ENABLED(CONFIG_I2C_SLAVE)
enum i2c_slave_event {
I2C_SLAVE_READ_REQUESTED,
I2C_SLAVE_WRITE_REQUESTED,
@@ -377,13 +390,10 @@ enum i2c_slave_event {
int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb);
int i2c_slave_unregister(struct i2c_client *client);
+int i2c_slave_event(struct i2c_client *client,
+ enum i2c_slave_event event, u8 *val);
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
bool i2c_detect_slave_mode(struct device *dev);
-
-static inline int i2c_slave_event(struct i2c_client *client,
- enum i2c_slave_event event, u8 *val)
-{
- return client->slave_cb(client, event, val);
-}
#else
static inline bool i2c_detect_slave_mode(struct device *dev) { return false; }
#endif
@@ -395,7 +405,6 @@ static inline bool i2c_detect_slave_mode(struct device *dev) { return false; }
* @addr: stored in i2c_client.addr
* @dev_name: Overrides the default <busnr>-<addr> dev_name if set
* @platform_data: stored in i2c_client.dev.platform_data
- * @of_node: pointer to OpenFirmware device node
* @fwnode: device node supplied by the platform firmware
* @swnode: software node for the device
* @resources: resources associated with the device
@@ -419,7 +428,6 @@ struct i2c_board_info {
unsigned short addr;
const char *dev_name;
void *platform_data;
- struct device_node *of_node;
struct fwnode_handle *fwnode;
const struct software_node *swnode;
const struct resource *resources;
@@ -477,6 +485,13 @@ i2c_new_ancillary_device(struct i2c_client *client,
u16 default_addr);
void i2c_unregister_device(struct i2c_client *client);
+
+struct i2c_client *i2c_verify_client(struct device *dev);
+#else
+static inline struct i2c_client *i2c_verify_client(struct device *dev)
+{
+ return NULL;
+}
#endif /* I2C */
/* Mainboard arch_initcall() code should register all its I2C devices.
@@ -497,45 +512,54 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info,
#endif /* I2C_BOARDINFO */
/**
- * struct i2c_algorithm - represent I2C transfer method
- * @master_xfer: Issue a set of i2c transactions to the given I2C adapter
- * defined by the msgs array, with num messages available to transfer via
- * the adapter specified by adap.
- * @master_xfer_atomic: same as @master_xfer. Yet, only using atomic context
- * so e.g. PMICs can be accessed very late before shutdown. Optional.
- * @smbus_xfer: Issue smbus transactions to the given I2C adapter. If this
+ * struct i2c_algorithm - represent I2C transfer methods
+ * @xfer: Transfer a given number of messages defined by the msgs array via
+ * the specified adapter.
+ * @xfer_atomic: Same as @xfer. Yet, only using atomic context so e.g. PMICs
+ * can be accessed very late before shutdown. Optional.
+ * @smbus_xfer: Issue SMBus transactions to the given I2C adapter. If this
* is not present, then the bus layer will try and convert the SMBus calls
* into I2C transfers instead.
- * @smbus_xfer_atomic: same as @smbus_xfer. Yet, only using atomic context
+ * @smbus_xfer_atomic: Same as @smbus_xfer. Yet, only using atomic context
* so e.g. PMICs can be accessed very late before shutdown. Optional.
* @functionality: Return the flags that this algorithm/adapter pair supports
* from the ``I2C_FUNC_*`` flags.
- * @reg_slave: Register given client to I2C slave mode of this adapter
- * @unreg_slave: Unregister given client from I2C slave mode of this adapter
+ * @reg_target: Register given client to local target mode of this adapter
+ * @unreg_target: Unregister given client from local target mode of this adapter
+ *
+ * @master_xfer: deprecated, use @xfer
+ * @master_xfer_atomic: deprecated, use @xfer_atomic
+ * @reg_slave: deprecated, use @reg_target
+ * @unreg_slave: deprecated, use @unreg_target
*
- * The following structs are for those who like to implement new bus drivers:
* i2c_algorithm is the interface to a class of hardware solutions which can
* be addressed using the same bus algorithms - i.e. bit-banging or the PCF8584
* to name two of the most common.
*
- * The return codes from the ``master_xfer{_atomic}`` fields should indicate the
+ * The return codes from the ``xfer{_atomic}`` fields should indicate the
* type of error code that occurred during the transfer, as documented in the
- * Kernel Documentation file Documentation/i2c/fault-codes.rst.
+ * Kernel Documentation file Documentation/i2c/fault-codes.rst. Otherwise, the
+ * number of messages executed should be returned.
*/
struct i2c_algorithm {
/*
- * If an adapter algorithm can't do I2C-level access, set master_xfer
+ * If an adapter algorithm can't do I2C-level access, set xfer
* to NULL. If an adapter algorithm can do SMBus access, set
* smbus_xfer. If set to NULL, the SMBus protocol is simulated
* using common I2C messages.
- *
- * master_xfer should return the number of messages successfully
- * processed, or a negative value on error
*/
- int (*master_xfer)(struct i2c_adapter *adap, struct i2c_msg *msgs,
- int num);
- int (*master_xfer_atomic)(struct i2c_adapter *adap,
+ union {
+ int (*xfer)(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num);
+ int (*master_xfer)(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num);
+ };
+ union {
+ int (*xfer_atomic)(struct i2c_adapter *adap,
struct i2c_msg *msgs, int num);
+ int (*master_xfer_atomic)(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num);
+ };
int (*smbus_xfer)(struct i2c_adapter *adap, u16 addr,
unsigned short flags, char read_write,
u8 command, int size, union i2c_smbus_data *data);
@@ -547,8 +571,14 @@ struct i2c_algorithm {
u32 (*functionality)(struct i2c_adapter *adap);
#if IS_ENABLED(CONFIG_I2C_SLAVE)
- int (*reg_slave)(struct i2c_client *client);
- int (*unreg_slave)(struct i2c_client *client);
+ union {
+ int (*reg_target)(struct i2c_client *client);
+ int (*reg_slave)(struct i2c_client *client);
+ };
+ union {
+ int (*unreg_target)(struct i2c_client *client);
+ int (*unreg_slave)(struct i2c_client *client);
+ };
#endif
};
@@ -729,6 +759,12 @@ struct i2c_adapter {
const struct i2c_adapter_quirks *quirks;
struct irq_domain *host_notify_domain;
+ struct regulator *bus_regulator;
+
+ struct dentry *debugfs;
+
+ /* 7bit address space */
+ DECLARE_BITMAP(addrs_in_instantiation, 1 << 7);
};
#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
@@ -833,8 +869,6 @@ static inline void i2c_mark_adapter_resumed(struct i2c_adapter *adap)
/* i2c adapter classes (bitmask) */
#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */
-#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */
-#define I2C_CLASS_SPD (1<<7) /* Memory modules */
/* Warn users that the adapter doesn't support classes anymore */
#define I2C_CLASS_DEPRECATED (1<<8)
@@ -913,7 +947,22 @@ static inline int i2c_adapter_id(struct i2c_adapter *adap)
static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg)
{
- return (msg->addr << 1) | (msg->flags & I2C_M_RD ? 1 : 0);
+ return (msg->addr << 1) | (msg->flags & I2C_M_RD);
+}
+
+/*
+ * 10-bit address
+ * addr_1: 5'b11110 | addr[9:8] | (R/nW)
+ * addr_2: addr[7:0]
+ */
+static inline u8 i2c_10bit_addr_hi_from_msg(const struct i2c_msg *msg)
+{
+ return 0xf0 | ((msg->addr & GENMASK(9, 8)) >> 7) | (msg->flags & I2C_M_RD);
+}
+
+static inline u8 i2c_10bit_addr_lo_from_msg(const struct i2c_msg *msg)
+{
+ return msg->addr & GENMASK(7, 0);
}
u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold);
@@ -943,21 +992,55 @@ int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr);
#define builtin_i2c_driver(__i2c_driver) \
builtin_driver(__i2c_driver, i2c_add_driver)
-#endif /* I2C */
+/* must call put_device() when done with returned i2c_client device */
+struct i2c_client *i2c_find_device_by_fwnode(struct fwnode_handle *fwnode);
+
+/* must call put_device() when done with returned i2c_adapter device */
+struct i2c_adapter *i2c_find_adapter_by_fwnode(struct fwnode_handle *fwnode);
+
+/* must call i2c_put_adapter() when done with returned i2c_adapter device */
+struct i2c_adapter *i2c_get_adapter_by_fwnode(struct fwnode_handle *fwnode);
+
+#else /* I2C */
+
+static inline struct i2c_client *
+i2c_find_device_by_fwnode(struct fwnode_handle *fwnode)
+{
+ return NULL;
+}
+
+static inline struct i2c_adapter *
+i2c_find_adapter_by_fwnode(struct fwnode_handle *fwnode)
+{
+ return NULL;
+}
+
+static inline struct i2c_adapter *
+i2c_get_adapter_by_fwnode(struct fwnode_handle *fwnode)
+{
+ return NULL;
+}
+
+#endif /* !I2C */
#if IS_ENABLED(CONFIG_OF)
/* must call put_device() when done with returned i2c_client device */
-struct i2c_client *of_find_i2c_device_by_node(struct device_node *node);
+static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
+{
+ return i2c_find_device_by_fwnode(of_fwnode_handle(node));
+}
/* must call put_device() when done with returned i2c_adapter device */
-struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node);
+static inline struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
+{
+ return i2c_find_adapter_by_fwnode(of_fwnode_handle(node));
+}
/* must call i2c_put_adapter() when done with returned i2c_adapter device */
-struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node);
-
-const struct of_device_id
-*i2c_of_match_device(const struct of_device_id *matches,
- struct i2c_client *client);
+static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node)
+{
+ return i2c_get_adapter_by_fwnode(of_fwnode_handle(node));
+}
int of_i2c_get_board_info(struct device *dev, struct device_node *node,
struct i2c_board_info *info);
@@ -979,13 +1062,6 @@ static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node
return NULL;
}
-static inline const struct of_device_id
-*i2c_of_match_device(const struct of_device_id *matches,
- struct i2c_client *client)
-{
- return NULL;
-}
-
static inline int of_i2c_get_board_info(struct device *dev,
struct device_node *node,
struct i2c_board_info *info)
@@ -998,25 +1074,33 @@ static inline int of_i2c_get_board_info(struct device *dev,
struct acpi_resource;
struct acpi_resource_i2c_serialbus;
-#if IS_ENABLED(CONFIG_ACPI)
+#if IS_REACHABLE(CONFIG_ACPI) && IS_REACHABLE(CONFIG_I2C)
bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares,
struct acpi_resource_i2c_serialbus **i2c);
+int i2c_acpi_client_count(struct acpi_device *adev);
u32 i2c_acpi_find_bus_speed(struct device *dev);
-struct i2c_client *i2c_acpi_new_device(struct device *dev, int index,
- struct i2c_board_info *info);
+struct i2c_client *i2c_acpi_new_device_by_fwnode(struct fwnode_handle *fwnode,
+ int index,
+ struct i2c_board_info *info);
struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle);
+bool i2c_acpi_waive_d0_probe(struct device *dev);
#else
static inline bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares,
struct acpi_resource_i2c_serialbus **i2c)
{
return false;
}
+static inline int i2c_acpi_client_count(struct acpi_device *adev)
+{
+ return 0;
+}
static inline u32 i2c_acpi_find_bus_speed(struct device *dev)
{
return 0;
}
-static inline struct i2c_client *i2c_acpi_new_device(struct device *dev,
- int index, struct i2c_board_info *info)
+static inline struct i2c_client *i2c_acpi_new_device_by_fwnode(
+ struct fwnode_handle *fwnode, int index,
+ struct i2c_board_info *info)
{
return ERR_PTR(-ENODEV);
}
@@ -1024,6 +1108,17 @@ static inline struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle ha
{
return NULL;
}
+static inline bool i2c_acpi_waive_d0_probe(struct device *dev)
+{
+ return false;
+}
#endif /* CONFIG_ACPI */
+static inline struct i2c_client *i2c_acpi_new_device(struct device *dev,
+ int index,
+ struct i2c_board_info *info)
+{
+ return i2c_acpi_new_device_by_fwnode(dev_fwnode(dev), index, info);
+}
+
#endif /* _LINUX_I2C_H */
diff --git a/include/linux/i3c/ccc.h b/include/linux/i3c/ccc.h
index 73b0982cc519..ad59a4ae60d1 100644
--- a/include/linux/i3c/ccc.h
+++ b/include/linux/i3c/ccc.h
@@ -132,7 +132,7 @@ struct i3c_ccc_dev_desc {
struct i3c_ccc_defslvs {
u8 count;
struct i3c_ccc_dev_desc master;
- struct i3c_ccc_dev_desc slaves[0];
+ struct i3c_ccc_dev_desc slaves[];
} __packed;
/**
@@ -240,7 +240,7 @@ struct i3c_ccc_bridged_slave_desc {
*/
struct i3c_ccc_setbrgtgt {
u8 count;
- struct i3c_ccc_bridged_slave_desc bslaves[0];
+ struct i3c_ccc_bridged_slave_desc bslaves[];
} __packed;
/**
@@ -318,7 +318,7 @@ enum i3c_ccc_setxtime_subcmd {
*/
struct i3c_ccc_setxtime {
u8 subcmd;
- u8 data[0];
+ u8 data[];
} __packed;
#define I3C_CCC_GETXTIME_SYNC_MODE BIT(0)
diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h
index 8242e13e7b0b..9fcb6410a584 100644
--- a/include/linux/i3c/device.h
+++ b/include/linux/i3c/device.h
@@ -18,17 +18,18 @@
/**
* enum i3c_error_code - I3C error codes
*
- * These are the standard error codes as defined by the I3C specification.
- * When -EIO is returned by the i3c_device_do_priv_xfers() or
- * i3c_device_send_hdr_cmds() one can check the error code in
- * &struct_i3c_priv_xfer.err or &struct i3c_hdr_cmd.err to get a better idea of
- * what went wrong.
- *
* @I3C_ERROR_UNKNOWN: unknown error, usually means the error is not I3C
* related
* @I3C_ERROR_M0: M0 error
* @I3C_ERROR_M1: M1 error
* @I3C_ERROR_M2: M2 error
+ *
+ * These are the standard error codes as defined by the I3C specification.
+ * When -EIO is returned by the i3c_device_do_priv_xfers() or
+ * i3c_device_send_hdr_cmds() one can check the error code in
+ * &struct_i3c_xfer.err or &struct i3c_hdr_cmd.err to get a better idea of
+ * what went wrong.
+ *
*/
enum i3c_error_code {
I3C_ERROR_UNKNOWN = 0,
@@ -38,29 +39,39 @@ enum i3c_error_code {
};
/**
- * enum i3c_hdr_mode - HDR mode ids
+ * enum i3c_xfer_mode - I3C xfer mode ids
* @I3C_HDR_DDR: DDR mode
* @I3C_HDR_TSP: TSP mode
* @I3C_HDR_TSL: TSL mode
+ * @I3C_SDR: SDR mode (NOT HDR mode)
*/
-enum i3c_hdr_mode {
- I3C_HDR_DDR,
- I3C_HDR_TSP,
- I3C_HDR_TSL,
+enum i3c_xfer_mode {
+ /* The below 3 value (I3C_HDR*) must match GETCAP1 Byte bit position */
+ I3C_HDR_DDR = 0,
+ I3C_HDR_TSP = 1,
+ I3C_HDR_TSL = 2,
+ /* Use for default SDR transfer mode */
+ I3C_SDR = 31,
};
/**
- * struct i3c_priv_xfer - I3C SDR private transfer
+ * struct i3c_xfer - I3C data transfer
* @rnw: encodes the transfer direction. true for a read, false for a write
+ * @cmd: Read/Write command in HDR mode, read: 0x80 - 0xff, write: 0x00 - 0x7f
* @len: transfer length in bytes of the transfer
+ * @actual_len: actual length in bytes are transferred by the controller
* @data: input/output buffer
* @data.in: input buffer. Must point to a DMA-able buffer
* @data.out: output buffer. Must point to a DMA-able buffer
* @err: I3C error code
*/
-struct i3c_priv_xfer {
- u8 rnw;
+struct i3c_xfer {
+ union {
+ u8 rnw;
+ u8 cmd;
+ };
u16 len;
+ u16 actual_len;
union {
void *in;
const void *out;
@@ -68,6 +79,9 @@ struct i3c_priv_xfer {
enum i3c_error_code err;
};
+/* keep back compatible */
+#define i3c_priv_xfer i3c_xfer
+
/**
* enum i3c_dcr - I3C DCR values
* @I3C_DCR_GENERIC_DEVICE: generic I3C device
@@ -95,7 +109,7 @@ enum i3c_dcr {
/**
* struct i3c_device_info - I3C device information
- * @pid: Provisional ID
+ * @pid: Provisioned ID
* @bcr: Bus Characteristic Register
* @dcr: Device Characteristic Register
* @static_addr: static/I2C address
@@ -180,13 +194,17 @@ struct i3c_driver {
const struct i3c_device_id *id_table;
};
-static inline struct i3c_driver *drv_to_i3cdrv(struct device_driver *drv)
-{
- return container_of(drv, struct i3c_driver, driver);
-}
+#define drv_to_i3cdrv(__drv) container_of_const(__drv, struct i3c_driver, driver)
struct device *i3cdev_to_dev(struct i3c_device *i3cdev);
-struct i3c_device *dev_to_i3cdev(struct device *dev);
+
+/**
+ * dev_to_i3cdev() - Returns the I3C device containing @dev
+ * @__dev: device object
+ *
+ * Return: a pointer to an I3C device object.
+ */
+#define dev_to_i3cdev(__dev) container_of_const(__dev, struct i3c_device, dev)
const struct i3c_device_id *
i3c_device_match_id(struct i3c_device *i3cdev,
@@ -238,7 +256,7 @@ void i3c_driver_unregister(struct i3c_driver *drv);
*
* Return: 0 if both registrations succeeds, a negative error code otherwise.
*/
-static inline int i3c_i2c_driver_register(struct i3c_driver *i3cdrv,
+static __always_inline int i3c_i2c_driver_register(struct i3c_driver *i3cdrv,
struct i2c_driver *i2cdrv)
{
int ret;
@@ -263,7 +281,7 @@ static inline int i3c_i2c_driver_register(struct i3c_driver *i3cdrv,
* Note that when CONFIG_I3C is not enabled, this function only unregisters the
* @i2cdrv.
*/
-static inline void i3c_i2c_driver_unregister(struct i3c_driver *i3cdrv,
+static __always_inline void i3c_i2c_driver_unregister(struct i3c_driver *i3cdrv,
struct i2c_driver *i2cdrv)
{
if (IS_ENABLED(CONFIG_I3C))
@@ -276,7 +294,7 @@ static inline void i3c_i2c_driver_unregister(struct i3c_driver *i3cdrv,
* module_i3c_i2c_driver() - Register a module providing an I3C and an I2C
* driver
* @__i3cdrv: the I3C driver to register
- * @__i2cdrv: the I3C driver to register
+ * @__i2cdrv: the I2C driver to register
*
* Provide generic init/exit functions that simply register/unregister an I3C
* and an I2C driver.
@@ -287,13 +305,22 @@ static inline void i3c_i2c_driver_unregister(struct i3c_driver *i3cdrv,
#define module_i3c_i2c_driver(__i3cdrv, __i2cdrv) \
module_driver(__i3cdrv, \
i3c_i2c_driver_register, \
- i3c_i2c_driver_unregister)
+ i3c_i2c_driver_unregister, \
+ __i2cdrv)
+
+int i3c_device_do_xfers(struct i3c_device *dev, struct i3c_xfer *xfers,
+ int nxfers, enum i3c_xfer_mode mode);
+
+static inline int i3c_device_do_priv_xfers(struct i3c_device *dev,
+ struct i3c_xfer *xfers,
+ int nxfers)
+{
+ return i3c_device_do_xfers(dev, xfers, nxfers, I3C_SDR);
+}
-int i3c_device_do_priv_xfers(struct i3c_device *dev,
- struct i3c_priv_xfer *xfers,
- int nxfers);
+int i3c_device_do_setdasa(struct i3c_device *dev);
-void i3c_device_get_info(struct i3c_device *dev, struct i3c_device_info *info);
+void i3c_device_get_info(const struct i3c_device *dev, struct i3c_device_info *info);
struct i3c_ibi_payload {
unsigned int len;
@@ -331,5 +358,6 @@ int i3c_device_request_ibi(struct i3c_device *dev,
void i3c_device_free_ibi(struct i3c_device *dev);
int i3c_device_enable_ibi(struct i3c_device *dev);
int i3c_device_disable_ibi(struct i3c_device *dev);
+u32 i3c_device_get_supported_xfer_mode(struct i3c_device *dev);
#endif /* I3C_DEV_H */
diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h
index 9cb39d901cd5..2fd850f4678b 100644
--- a/include/linux/i3c/master.h
+++ b/include/linux/i3c/master.h
@@ -22,10 +22,18 @@
#define I3C_BROADCAST_ADDR 0x7e
#define I3C_MAX_ADDR GENMASK(6, 0)
+struct i2c_client;
+
+/* notifier actions. notifier call data is the struct i3c_bus */
+enum {
+ I3C_NOTIFY_BUS_ADD,
+ I3C_NOTIFY_BUS_REMOVE,
+};
+
struct i3c_master_controller;
struct i3c_bus;
-struct i2c_device;
struct i3c_device;
+extern const struct bus_type i3c_bus_type;
/**
* struct i3c_i2c_dev_desc - Common part of the I3C/I2C device descriptor
@@ -69,7 +77,6 @@ struct i2c_dev_boardinfo {
/**
* struct i2c_dev_desc - I2C device descriptor
* @common: common part of the I2C device descriptor
- * @boardinfo: pointer to the boardinfo attached to this I2C device
* @dev: I2C device object registered to the I2C framework
* @addr: I2C device address
* @lvr: LVR (Legacy Virtual Register) needed by the I3C core to know about
@@ -85,7 +92,6 @@ struct i2c_dev_boardinfo {
*/
struct i2c_dev_desc {
struct i3c_i2c_dev_desc common;
- const struct i2c_dev_boardinfo *boardinfo;
struct i2c_client *dev;
u16 addr;
u8 lvr;
@@ -129,6 +135,7 @@ struct i3c_ibi_slot {
* rejected by the master
* @num_slots: number of IBI slots reserved for this device
* @enabled: reflect the IBI status
+ * @wq: workqueue used to execute IBI handlers.
* @handler: IBI handler specified at i3c_device_request_ibi() call time. This
* handler will be called from the controller workqueue, and as such
* is allowed to sleep (though it is recommended to process the IBI
@@ -151,6 +158,7 @@ struct i3c_device_ibi_info {
unsigned int max_payload_len;
unsigned int num_slots;
unsigned int enabled;
+ struct workqueue_struct *wq;
void (*handler)(struct i3c_device *dev,
const struct i3c_ibi_payload *payload);
};
@@ -166,7 +174,7 @@ struct i3c_device_ibi_info {
* assigned a dynamic address by the master. Will be used during
* bus initialization to assign it a specific dynamic address
* before starting DAA (Dynamic Address Assignment)
- * @pid: I3C Provisional ID exposed by the device. This is a unique identifier
+ * @pid: I3C Provisioned ID exposed by the device. This is a unique identifier
* that may be used to attach boardinfo to i3c_dev_desc when the device
* does not have a static address
* @of_node: optional DT node in case the device has been described in the DT
@@ -241,10 +249,15 @@ struct i3c_device {
*/
#define I3C_BUS_MAX_DEVS 11
-#define I3C_BUS_MAX_I3C_SCL_RATE 12900000
-#define I3C_BUS_TYP_I3C_SCL_RATE 12500000
-#define I3C_BUS_I2C_FM_PLUS_SCL_RATE 1000000
-#define I3C_BUS_I2C_FM_SCL_RATE 400000
+/* Taken from the I3C Spec V1.1.1, chapter 6.2. "Timing specification" */
+#define I3C_BUS_I2C_FM_PLUS_SCL_MAX_RATE 1000000
+#define I3C_BUS_I2C_FM_SCL_MAX_RATE 400000
+#define I3C_BUS_I3C_SCL_MAX_RATE 12900000
+#define I3C_BUS_I3C_SCL_TYP_RATE 12500000
+#define I3C_BUS_TAVAL_MIN_NS 1000
+#define I3C_BUS_TBUF_MIXED_FM_MIN_NS 1300
+#define I3C_BUS_THIGH_MIXED_MAX_NS 41
+#define I3C_BUS_TIDLE_MIN_NS 200000
#define I3C_BUS_TLOW_OD_MIN_NS 200
/**
@@ -270,13 +283,29 @@ enum i3c_bus_mode {
};
/**
+ * enum i3c_open_drain_speed - I3C open-drain speed
+ * @I3C_OPEN_DRAIN_SLOW_SPEED: Slow open-drain speed for sending the first
+ * broadcast address. The first broadcast address at this speed
+ * will be visible to all devices on the I3C bus. I3C devices
+ * working in I2C mode will turn off their spike filter when
+ * switching into I3C mode.
+ * @I3C_OPEN_DRAIN_NORMAL_SPEED: Normal open-drain speed in I3C bus mode.
+ */
+enum i3c_open_drain_speed {
+ I3C_OPEN_DRAIN_SLOW_SPEED,
+ I3C_OPEN_DRAIN_NORMAL_SPEED,
+};
+
+/**
* enum i3c_addr_slot_status - I3C address slot status
* @I3C_ADDR_SLOT_FREE: address is free
* @I3C_ADDR_SLOT_RSVD: address is reserved
* @I3C_ADDR_SLOT_I2C_DEV: address is assigned to an I2C device
* @I3C_ADDR_SLOT_I3C_DEV: address is assigned to an I3C device
* @I3C_ADDR_SLOT_STATUS_MASK: address slot mask
- *
+ * @I3C_ADDR_SLOT_EXT_STATUS_MASK: address slot mask with extended information
+ * @I3C_ADDR_SLOT_EXT_DESIRED: the bitmask represents addresses that are preferred by some devices,
+ * such as the "assigned-address" property in a device tree source.
* On an I3C bus, addresses are assigned dynamically, and we need to know which
* addresses are free to use and which ones are already assigned.
*
@@ -289,8 +318,12 @@ enum i3c_addr_slot_status {
I3C_ADDR_SLOT_I2C_DEV,
I3C_ADDR_SLOT_I3C_DEV,
I3C_ADDR_SLOT_STATUS_MASK = 3,
+ I3C_ADDR_SLOT_EXT_STATUS_MASK = 7,
+ I3C_ADDR_SLOT_EXT_DESIRED = BIT(2),
};
+#define I3C_ADDR_SLOT_STATUS_BITS 4
+
/**
* struct i3c_bus - I3C bus object
* @cur_master: I3C master currently driving the bus. Since I3C is multi-master
@@ -332,7 +365,7 @@ enum i3c_addr_slot_status {
struct i3c_bus {
struct i3c_dev_desc *cur_master;
int id;
- unsigned long addrslots[((I2C_MAX_ADDR + 1) * 2) / BITS_PER_LONG];
+ unsigned long addrslots[((I2C_MAX_ADDR + 1) * I3C_ADDR_SLOT_STATUS_BITS) / BITS_PER_LONG];
enum i3c_bus_mode mode;
struct {
unsigned long i3c;
@@ -385,7 +418,11 @@ struct i3c_bus {
* @send_ccc_cmd: send a CCC command
* This method is mandatory.
* @priv_xfers: do one or several private I3C SDR transfers
- * This method is mandatory.
+ * This method is mandatory when i3c_xfers is not implemented. It
+ * is deprecated.
+ * @i3c_xfers: do one or several I3C SDR or HDR transfers
+ * This method is mandatory when priv_xfers is not implemented but
+ * should be implemented instead of priv_xfers.
* @attach_i2c_dev: called every time an I2C device is attached to the bus.
* This is a good place to attach master controller specific
* data to I2C devices.
@@ -426,6 +463,9 @@ struct i3c_bus {
* for a future IBI
* This method is mandatory only if ->request_ibi is not
* NULL.
+ * @enable_hotjoin: enable hot join event detect.
+ * @disable_hotjoin: disable hot join event detect.
+ * @set_speed: adjust I3C open drain mode timing.
*/
struct i3c_master_controller_ops {
int (*bus_init)(struct i3c_master_controller *master);
@@ -438,13 +478,17 @@ struct i3c_master_controller_ops {
const struct i3c_ccc_cmd *cmd);
int (*send_ccc_cmd)(struct i3c_master_controller *master,
struct i3c_ccc_cmd *cmd);
+ /* Deprecated, please use i3c_xfers() */
int (*priv_xfers)(struct i3c_dev_desc *dev,
struct i3c_priv_xfer *xfers,
int nxfers);
+ int (*i3c_xfers)(struct i3c_dev_desc *dev,
+ struct i3c_xfer *xfers,
+ int nxfers, enum i3c_xfer_mode mode);
int (*attach_i2c_dev)(struct i2c_dev_desc *dev);
void (*detach_i2c_dev)(struct i2c_dev_desc *dev);
int (*i2c_xfers)(struct i2c_dev_desc *dev,
- const struct i2c_msg *xfers, int nxfers);
+ struct i2c_msg *xfers, int nxfers);
int (*request_ibi)(struct i3c_dev_desc *dev,
const struct i3c_ibi_setup *req);
void (*free_ibi)(struct i3c_dev_desc *dev);
@@ -452,6 +496,9 @@ struct i3c_master_controller_ops {
int (*disable_ibi)(struct i3c_dev_desc *dev);
void (*recycle_ibi_slot)(struct i3c_dev_desc *dev,
struct i3c_ibi_slot *slot);
+ int (*enable_hotjoin)(struct i3c_master_controller *master);
+ int (*disable_hotjoin)(struct i3c_master_controller *master);
+ int (*set_speed)(struct i3c_master_controller *master, enum i3c_open_drain_speed speed);
};
/**
@@ -465,11 +512,12 @@ struct i3c_master_controller_ops {
* @ops: master operations. See &struct i3c_master_controller_ops
* @secondary: true if the master is a secondary master
* @init_done: true when the bus initialization is done
+ * @hotjoin: true if the master support hotjoin
* @boardinfo.i3c: list of I3C boardinfo objects
* @boardinfo.i2c: list of I2C boardinfo objects
* @boardinfo: board-level information attached to devices connected on the bus
* @bus: I3C bus exposed by this master
- * @wq: workqueue used to execute IBI handlers. Can also be used by master
+ * @wq: workqueue which can be used by master
* drivers if they need to postpone operations that need to take place
* in a thread context. Typical examples are Hot Join processing which
* requires taking the bus lock in maintenance, which in turn, can only
@@ -487,6 +535,7 @@ struct i3c_master_controller {
const struct i3c_master_controller_ops *ops;
unsigned int secondary : 1;
unsigned int init_done : 1;
+ unsigned int hotjoin: 1;
struct {
struct list_head i3c;
struct list_head i2c;
@@ -517,6 +566,26 @@ struct i3c_master_controller {
#define i3c_bus_for_each_i3cdev(bus, dev) \
list_for_each_entry(dev, &(bus)->devs.i3c, common.node)
+/**
+ * struct i3c_dma - DMA transfer and mapping descriptor
+ * @dev: device object of a device doing DMA
+ * @buf: destination/source buffer for DMA
+ * @len: length of transfer
+ * @map_len: length of DMA mapping
+ * @addr: mapped DMA address for a Host Controller Driver
+ * @dir: DMA direction
+ * @bounce_buf: an allocated bounce buffer if transfer needs it or NULL
+ */
+struct i3c_dma {
+ struct device *dev;
+ void *buf;
+ size_t len;
+ size_t map_len;
+ dma_addr_t addr;
+ enum dma_data_direction dir;
+ void *bounce_buf;
+};
+
int i3c_master_do_i2c_xfers(struct i3c_master_controller *master,
const struct i2c_msg *xfers,
int nxfers);
@@ -534,6 +603,12 @@ int i3c_master_get_free_addr(struct i3c_master_controller *master,
int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
u8 addr);
int i3c_master_do_daa(struct i3c_master_controller *master);
+struct i3c_dma *i3c_master_dma_map_single(struct device *dev, void *ptr,
+ size_t len, bool force_bounce,
+ enum dma_data_direction dir);
+void i3c_master_dma_unmap_single(struct i3c_dma *dma_xfer);
+DEFINE_FREE(i3c_master_dma_unmap_single, void *,
+ if (_T) i3c_master_dma_unmap_single(_T))
int i3c_master_set_info(struct i3c_master_controller *master,
const struct i3c_device_info *info);
@@ -542,7 +617,9 @@ int i3c_master_register(struct i3c_master_controller *master,
struct device *parent,
const struct i3c_master_controller_ops *ops,
bool secondary);
-int i3c_master_unregister(struct i3c_master_controller *master);
+void i3c_master_unregister(struct i3c_master_controller *master);
+int i3c_master_enable_hotjoin(struct i3c_master_controller *master);
+int i3c_master_disable_hotjoin(struct i3c_master_controller *master);
/**
* i3c_dev_get_master_data() - get master private data attached to an I3C
@@ -652,4 +729,9 @@ void i3c_master_queue_ibi(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot);
struct i3c_ibi_slot *i3c_master_get_free_ibi_slot(struct i3c_dev_desc *dev);
+void i3c_for_each_bus_locked(int (*fn)(struct i3c_bus *bus, void *data),
+ void *data);
+int i3c_register_notifier(struct notifier_block *nb);
+int i3c_unregister_notifier(struct notifier_block *nb);
+
#endif /* I3C_MASTER_H */
diff --git a/include/linux/i8042.h b/include/linux/i8042.h
index 0261e2fb3636..00037c13abc8 100644
--- a/include/linux/i8042.h
+++ b/include/linux/i8042.h
@@ -3,6 +3,7 @@
#define _LINUX_I8042_H
+#include <linux/errno.h>
#include <linux/types.h>
/*
@@ -53,15 +54,29 @@
struct serio;
+/**
+ * typedef i8042_filter_t - i8042 filter callback
+ * @data: Data received by the i8042 controller
+ * @str: Status register of the i8042 controller
+ * @serio: Serio of the i8042 controller
+ * @context: Context pointer associated with this callback
+ *
+ * This represents a i8042 filter callback which can be used with i8042_install_filter()
+ * and i8042_remove_filter() to filter the i8042 input for platform-specific key codes.
+ *
+ * Context: Interrupt context.
+ * Returns: true if the data should be filtered out, false if otherwise.
+ */
+typedef bool (*i8042_filter_t)(unsigned char data, unsigned char str, struct serio *serio,
+ void *context);
+
#if defined(CONFIG_SERIO_I8042) || defined(CONFIG_SERIO_I8042_MODULE)
void i8042_lock_chip(void);
void i8042_unlock_chip(void);
int i8042_command(unsigned char *param, int command);
-int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
- struct serio *serio));
-int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
- struct serio *serio));
+int i8042_install_filter(i8042_filter_t filter, void *context);
+int i8042_remove_filter(i8042_filter_t filter);
#else
@@ -78,14 +93,12 @@ static inline int i8042_command(unsigned char *param, int command)
return -ENODEV;
}
-static inline int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
- struct serio *serio))
+static inline int i8042_install_filter(i8042_filter_t filter, void *context)
{
return -ENODEV;
}
-static inline int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
- struct serio *serio))
+static inline int i8042_remove_filter(i8042_filter_t filter)
{
return -ENODEV;
}
diff --git a/include/linux/i8253.h b/include/linux/i8253.h
index 8336b2f6f834..56c280eb2d4f 100644
--- a/include/linux/i8253.h
+++ b/include/linux/i8253.h
@@ -21,9 +21,9 @@
#define PIT_LATCH ((PIT_TICK_RATE + HZ/2) / HZ)
extern raw_spinlock_t i8253_lock;
-extern bool i8253_clear_counter_on_shutdown;
extern struct clock_event_device i8253_clockevent;
extern void clockevent_i8253_init(bool oneshot);
+extern void clockevent_i8253_disable(void);
extern void setup_pit_timer(void);
diff --git a/include/linux/i8254.h b/include/linux/i8254.h
new file mode 100644
index 000000000000..a675c309232b
--- /dev/null
+++ b/include/linux/i8254.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) William Breathitt Gray */
+#ifndef _I8254_H_
+#define _I8254_H_
+
+struct device;
+struct regmap;
+
+/**
+ * struct i8254_regmap_config - Configuration for the register map of an i8254
+ * @parent: parent device
+ * @map: regmap for the i8254
+ */
+struct i8254_regmap_config {
+ struct device *parent;
+ struct regmap *map;
+};
+
+int devm_i8254_regmap_register(struct device *dev, const struct i8254_regmap_config *config);
+
+#endif /* _I8254_H_ */
diff --git a/include/linux/icmp.h b/include/linux/icmp.h
index 0af4d210ee31..043ec5d9c882 100644
--- a/include/linux/icmp.h
+++ b/include/linux/icmp.h
@@ -40,4 +40,36 @@ void ip_icmp_error_rfc4884(const struct sk_buff *skb,
struct sock_ee_data_rfc4884 *out,
int thlen, int off);
+/* RFC 4884 */
+#define ICMP_EXT_ORIG_DGRAM_MIN_LEN 128
+#define ICMP_EXT_VERSION_2 2
+
+/* ICMP Extension Object Classes */
+#define ICMP_EXT_OBJ_CLASS_IIO 2 /* RFC 5837 */
+
+/* Interface Information Object - RFC 5837 */
+enum {
+ ICMP_EXT_CTYPE_IIO_ROLE_IIF,
+};
+
+#define ICMP_EXT_CTYPE_IIO_ROLE(ROLE) ((ROLE) << 6)
+#define ICMP_EXT_CTYPE_IIO_MTU BIT(0)
+#define ICMP_EXT_CTYPE_IIO_NAME BIT(1)
+#define ICMP_EXT_CTYPE_IIO_IPADDR BIT(2)
+#define ICMP_EXT_CTYPE_IIO_IFINDEX BIT(3)
+
+struct icmp_ext_iio_name_subobj {
+ u8 len;
+ char name[IFNAMSIZ];
+};
+
+enum {
+ /* RFC 5837 - Incoming IP Interface Role */
+ ICMP_ERR_EXT_IIO_IIF,
+ /* Add new constants above. Used by "icmp_errors_extension_mask"
+ * sysctl.
+ */
+ ICMP_ERR_EXT_COUNT,
+};
+
#endif /* _LINUX_ICMP_H */
diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
index 9055cb380ee2..e3b3b0fa2a8f 100644
--- a/include/linux/icmpv6.h
+++ b/include/linux/icmpv6.h
@@ -79,17 +79,22 @@ extern int icmpv6_init(void);
extern int icmpv6_err_convert(u8 type, u8 code,
int *err);
extern void icmpv6_cleanup(void);
-extern void icmpv6_param_prob(struct sk_buff *skb,
- u8 code, int pos);
+extern void icmpv6_param_prob_reason(struct sk_buff *skb,
+ u8 code, int pos,
+ enum skb_drop_reason reason);
struct flowi6;
struct in6_addr;
-extern void icmpv6_flow_init(struct sock *sk,
- struct flowi6 *fl6,
- u8 type,
- const struct in6_addr *saddr,
- const struct in6_addr *daddr,
- int oif);
+
+void icmpv6_flow_init(const struct sock *sk, struct flowi6 *fl6, u8 type,
+ const struct in6_addr *saddr,
+ const struct in6_addr *daddr, int oif);
+
+static inline void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos)
+{
+ icmpv6_param_prob_reason(skb, code, pos,
+ SKB_DROP_REASON_NOT_SPECIFIED);
+}
static inline bool icmpv6_is_err(int type)
{
diff --git a/include/linux/ide.h b/include/linux/ide.h
deleted file mode 100644
index 2c300689a51a..000000000000
--- a/include/linux/ide.h
+++ /dev/null
@@ -1,1623 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _IDE_H
-#define _IDE_H
-/*
- * linux/include/linux/ide.h
- *
- * Copyright (C) 1994-2002 Linus Torvalds & authors
- */
-
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/ata.h>
-#include <linux/blk-mq.h>
-#include <linux/proc_fs.h>
-#include <linux/interrupt.h>
-#include <linux/bitops.h>
-#include <linux/bio.h>
-#include <linux/pci.h>
-#include <linux/completion.h>
-#include <linux/pm.h>
-#include <linux/mutex.h>
-/* for request_sense */
-#include <linux/cdrom.h>
-#include <scsi/scsi_cmnd.h>
-#include <asm/byteorder.h>
-#include <asm/io.h>
-
-/*
- * Probably not wise to fiddle with these
- */
-#define SUPPORT_VLB_SYNC 1
-#define IDE_DEFAULT_MAX_FAILURES 1
-#define ERROR_MAX 8 /* Max read/write errors per sector */
-#define ERROR_RESET 3 /* Reset controller every 4th retry */
-#define ERROR_RECAL 1 /* Recalibrate every 2nd retry */
-
-struct device;
-
-/* values for ide_request.type */
-enum ata_priv_type {
- ATA_PRIV_MISC,
- ATA_PRIV_TASKFILE,
- ATA_PRIV_PC,
- ATA_PRIV_SENSE, /* sense request */
- ATA_PRIV_PM_SUSPEND, /* suspend request */
- ATA_PRIV_PM_RESUME, /* resume request */
-};
-
-struct ide_request {
- struct scsi_request sreq;
- u8 sense[SCSI_SENSE_BUFFERSIZE];
- u8 type;
- void *special;
-};
-
-static inline struct ide_request *ide_req(struct request *rq)
-{
- return blk_mq_rq_to_pdu(rq);
-}
-
-static inline bool ata_misc_request(struct request *rq)
-{
- return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_MISC;
-}
-
-static inline bool ata_taskfile_request(struct request *rq)
-{
- return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_TASKFILE;
-}
-
-static inline bool ata_pc_request(struct request *rq)
-{
- return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_PC;
-}
-
-static inline bool ata_sense_request(struct request *rq)
-{
- return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_SENSE;
-}
-
-static inline bool ata_pm_request(struct request *rq)
-{
- return blk_rq_is_private(rq) &&
- (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND ||
- ide_req(rq)->type == ATA_PRIV_PM_RESUME);
-}
-
-/* Error codes returned in result to the higher part of the driver. */
-enum {
- IDE_DRV_ERROR_GENERAL = 101,
- IDE_DRV_ERROR_FILEMARK = 102,
- IDE_DRV_ERROR_EOD = 103,
-};
-
-/*
- * Definitions for accessing IDE controller registers
- */
-#define IDE_NR_PORTS (10)
-
-struct ide_io_ports {
- unsigned long data_addr;
-
- union {
- unsigned long error_addr; /* read: error */
- unsigned long feature_addr; /* write: feature */
- };
-
- unsigned long nsect_addr;
- unsigned long lbal_addr;
- unsigned long lbam_addr;
- unsigned long lbah_addr;
-
- unsigned long device_addr;
-
- union {
- unsigned long status_addr; /*  read: status  */
- unsigned long command_addr; /* write: command */
- };
-
- unsigned long ctl_addr;
-
- unsigned long irq_addr;
-};
-
-#define OK_STAT(stat,good,bad) (((stat)&((good)|(bad)))==(good))
-
-#define BAD_R_STAT (ATA_BUSY | ATA_ERR)
-#define BAD_W_STAT (BAD_R_STAT | ATA_DF)
-#define BAD_STAT (BAD_R_STAT | ATA_DRQ)
-#define DRIVE_READY (ATA_DRDY | ATA_DSC)
-
-#define BAD_CRC (ATA_ABORTED | ATA_ICRC)
-
-#define SATA_NR_PORTS (3) /* 16 possible ?? */
-
-#define SATA_STATUS_OFFSET (0)
-#define SATA_ERROR_OFFSET (1)
-#define SATA_CONTROL_OFFSET (2)
-
-/*
- * Our Physical Region Descriptor (PRD) table should be large enough
- * to handle the biggest I/O request we are likely to see. Since requests
- * can have no more than 256 sectors, and since the typical blocksize is
- * two or more sectors, we could get by with a limit of 128 entries here for
- * the usual worst case. Most requests seem to include some contiguous blocks,
- * further reducing the number of table entries required.
- *
- * The driver reverts to PIO mode for individual requests that exceed
- * this limit (possible with 512 byte blocksizes, eg. MSDOS f/s), so handling
- * 100% of all crazy scenarios here is not necessary.
- *
- * As it turns out though, we must allocate a full 4KB page for this,
- * so the two PRD tables (ide0 & ide1) will each get half of that,
- * allowing each to have about 256 entries (8 bytes each) from this.
- */
-#define PRD_BYTES 8
-#define PRD_ENTRIES 256
-
-/*
- * Some more useful definitions
- */
-#define PARTN_BITS 6 /* number of minor dev bits for partitions */
-#define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */
-
-/*
- * Timeouts for various operations:
- */
-enum {
- /* spec allows up to 20ms, but CF cards and SSD drives need more */
- WAIT_DRQ = 1 * HZ, /* 1s */
- /* some laptops are very slow */
- WAIT_READY = 5 * HZ, /* 5s */
- /* should be less than 3ms (?), if all ATAPI CD is closed at boot */
- WAIT_PIDENTIFY = 10 * HZ, /* 10s */
- /* worst case when spinning up */
- WAIT_WORSTCASE = 30 * HZ, /* 30s */
- /* maximum wait for an IRQ to happen */
- WAIT_CMD = 10 * HZ, /* 10s */
- /* Some drives require a longer IRQ timeout. */
- WAIT_FLOPPY_CMD = 50 * HZ, /* 50s */
- /*
- * Some drives (for example, Seagate STT3401A Travan) require a very
- * long timeout, because they don't return an interrupt or clear their
- * BSY bit until after the command completes (even retension commands).
- */
- WAIT_TAPE_CMD = 900 * HZ, /* 900s */
- /* minimum sleep time */
- WAIT_MIN_SLEEP = HZ / 50, /* 20ms */
-};
-
-/*
- * Op codes for special requests to be handled by ide_special_rq().
- * Values should be in the range of 0x20 to 0x3f.
- */
-#define REQ_DRIVE_RESET 0x20
-#define REQ_DEVSET_EXEC 0x21
-#define REQ_PARK_HEADS 0x22
-#define REQ_UNPARK_HEADS 0x23
-
-/*
- * hwif_chipset_t is used to keep track of the specific hardware
- * chipset used by each IDE interface, if known.
- */
-enum { ide_unknown, ide_generic, ide_pci,
- ide_cmd640, ide_dtc2278, ide_ali14xx,
- ide_qd65xx, ide_umc8672, ide_ht6560b,
- ide_4drives, ide_pmac, ide_acorn,
- ide_au1xxx, ide_palm3710
-};
-
-typedef u8 hwif_chipset_t;
-
-/*
- * Structure to hold all information about the location of this port
- */
-struct ide_hw {
- union {
- struct ide_io_ports io_ports;
- unsigned long io_ports_array[IDE_NR_PORTS];
- };
-
- int irq; /* our irq number */
- struct device *dev, *parent;
- unsigned long config;
-};
-
-static inline void ide_std_init_ports(struct ide_hw *hw,
- unsigned long io_addr,
- unsigned long ctl_addr)
-{
- unsigned int i;
-
- for (i = 0; i <= 7; i++)
- hw->io_ports_array[i] = io_addr++;
-
- hw->io_ports.ctl_addr = ctl_addr;
-}
-
-#define MAX_HWIFS 10
-
-/*
- * Now for the data we need to maintain per-drive: ide_drive_t
- */
-
-#define ide_scsi 0x21
-#define ide_disk 0x20
-#define ide_optical 0x7
-#define ide_cdrom 0x5
-#define ide_tape 0x1
-#define ide_floppy 0x0
-
-/*
- * Special Driver Flags
- */
-enum {
- IDE_SFLAG_SET_GEOMETRY = BIT(0),
- IDE_SFLAG_RECALIBRATE = BIT(1),
- IDE_SFLAG_SET_MULTMODE = BIT(2),
-};
-
-/*
- * Status returned from various ide_ functions
- */
-typedef enum {
- ide_stopped, /* no drive operation was started */
- ide_started, /* a drive operation was started, handler was set */
-} ide_startstop_t;
-
-enum {
- IDE_VALID_ERROR = BIT(1),
- IDE_VALID_FEATURE = IDE_VALID_ERROR,
- IDE_VALID_NSECT = BIT(2),
- IDE_VALID_LBAL = BIT(3),
- IDE_VALID_LBAM = BIT(4),
- IDE_VALID_LBAH = BIT(5),
- IDE_VALID_DEVICE = BIT(6),
- IDE_VALID_LBA = IDE_VALID_LBAL |
- IDE_VALID_LBAM |
- IDE_VALID_LBAH,
- IDE_VALID_OUT_TF = IDE_VALID_FEATURE |
- IDE_VALID_NSECT |
- IDE_VALID_LBA,
- IDE_VALID_IN_TF = IDE_VALID_NSECT |
- IDE_VALID_LBA,
- IDE_VALID_OUT_HOB = IDE_VALID_OUT_TF,
- IDE_VALID_IN_HOB = IDE_VALID_ERROR |
- IDE_VALID_NSECT |
- IDE_VALID_LBA,
-};
-
-enum {
- IDE_TFLAG_LBA48 = BIT(0),
- IDE_TFLAG_WRITE = BIT(1),
- IDE_TFLAG_CUSTOM_HANDLER = BIT(2),
- IDE_TFLAG_DMA_PIO_FALLBACK = BIT(3),
- /* force 16-bit I/O operations */
- IDE_TFLAG_IO_16BIT = BIT(4),
- /* struct ide_cmd was allocated using kmalloc() */
- IDE_TFLAG_DYN = BIT(5),
- IDE_TFLAG_FS = BIT(6),
- IDE_TFLAG_MULTI_PIO = BIT(7),
- IDE_TFLAG_SET_XFER = BIT(8),
-};
-
-enum {
- IDE_FTFLAG_FLAGGED = BIT(0),
- IDE_FTFLAG_SET_IN_FLAGS = BIT(1),
- IDE_FTFLAG_OUT_DATA = BIT(2),
- IDE_FTFLAG_IN_DATA = BIT(3),
-};
-
-struct ide_taskfile {
- u8 data; /* 0: data byte (for TASKFILE ioctl) */
- union { /* 1: */
- u8 error; /* read: error */
- u8 feature; /* write: feature */
- };
- u8 nsect; /* 2: number of sectors */
- u8 lbal; /* 3: LBA low */
- u8 lbam; /* 4: LBA mid */
- u8 lbah; /* 5: LBA high */
- u8 device; /* 6: device select */
- union { /* 7: */
- u8 status; /* read: status */
- u8 command; /* write: command */
- };
-};
-
-struct ide_cmd {
- struct ide_taskfile tf;
- struct ide_taskfile hob;
- struct {
- struct {
- u8 tf;
- u8 hob;
- } out, in;
- } valid;
-
- u16 tf_flags;
- u8 ftf_flags; /* for TASKFILE ioctl */
- int protocol;
-
- int sg_nents; /* number of sg entries */
- int orig_sg_nents;
- int sg_dma_direction; /* DMA transfer direction */
-
- unsigned int nbytes;
- unsigned int nleft;
- unsigned int last_xfer_len;
-
- struct scatterlist *cursg;
- unsigned int cursg_ofs;
-
- struct request *rq; /* copy of request */
-};
-
-/* ATAPI packet command flags */
-enum {
- /* set when an error is considered normal - no retry (ide-tape) */
- PC_FLAG_ABORT = BIT(0),
- PC_FLAG_SUPPRESS_ERROR = BIT(1),
- PC_FLAG_WAIT_FOR_DSC = BIT(2),
- PC_FLAG_DMA_OK = BIT(3),
- PC_FLAG_DMA_IN_PROGRESS = BIT(4),
- PC_FLAG_DMA_ERROR = BIT(5),
- PC_FLAG_WRITING = BIT(6),
-};
-
-#define ATAPI_WAIT_PC (60 * HZ)
-
-struct ide_atapi_pc {
- /* actual packet bytes */
- u8 c[12];
- /* incremented on each retry */
- int retries;
- int error;
-
- /* bytes to transfer */
- int req_xfer;
-
- /* the corresponding request */
- struct request *rq;
-
- unsigned long flags;
-
- /*
- * those are more or less driver-specific and some of them are subject
- * to change/removal later.
- */
- unsigned long timeout;
-};
-
-struct ide_devset;
-struct ide_driver;
-
-#ifdef CONFIG_BLK_DEV_IDEACPI
-struct ide_acpi_drive_link;
-struct ide_acpi_hwif_link;
-#endif
-
-struct ide_drive_s;
-
-struct ide_disk_ops {
- int (*check)(struct ide_drive_s *, const char *);
- int (*get_capacity)(struct ide_drive_s *);
- void (*unlock_native_capacity)(struct ide_drive_s *);
- void (*setup)(struct ide_drive_s *);
- void (*flush)(struct ide_drive_s *);
- int (*init_media)(struct ide_drive_s *, struct gendisk *);
- int (*set_doorlock)(struct ide_drive_s *, struct gendisk *,
- int);
- ide_startstop_t (*do_request)(struct ide_drive_s *, struct request *,
- sector_t);
- int (*ioctl)(struct ide_drive_s *, struct block_device *,
- fmode_t, unsigned int, unsigned long);
- int (*compat_ioctl)(struct ide_drive_s *, struct block_device *,
- fmode_t, unsigned int, unsigned long);
-};
-
-/* ATAPI device flags */
-enum {
- IDE_AFLAG_DRQ_INTERRUPT = BIT(0),
-
- /* ide-cd */
- /* Drive cannot eject the disc. */
- IDE_AFLAG_NO_EJECT = BIT(1),
- /* Drive is a pre ATAPI 1.2 drive. */
- IDE_AFLAG_PRE_ATAPI12 = BIT(2),
- /* TOC addresses are in BCD. */
- IDE_AFLAG_TOCADDR_AS_BCD = BIT(3),
- /* TOC track numbers are in BCD. */
- IDE_AFLAG_TOCTRACKS_AS_BCD = BIT(4),
- /* Saved TOC information is current. */
- IDE_AFLAG_TOC_VALID = BIT(6),
- /* We think that the drive door is locked. */
- IDE_AFLAG_DOOR_LOCKED = BIT(7),
- /* SET_CD_SPEED command is unsupported. */
- IDE_AFLAG_NO_SPEED_SELECT = BIT(8),
- IDE_AFLAG_VERTOS_300_SSD = BIT(9),
- IDE_AFLAG_VERTOS_600_ESD = BIT(10),
- IDE_AFLAG_SANYO_3CD = BIT(11),
- IDE_AFLAG_FULL_CAPS_PAGE = BIT(12),
- IDE_AFLAG_PLAY_AUDIO_OK = BIT(13),
- IDE_AFLAG_LE_SPEED_FIELDS = BIT(14),
-
- /* ide-floppy */
- /* Avoid commands not supported in Clik drive */
- IDE_AFLAG_CLIK_DRIVE = BIT(15),
- /* Requires BH algorithm for packets */
- IDE_AFLAG_ZIP_DRIVE = BIT(16),
- /* Supports format progress report */
- IDE_AFLAG_SRFP = BIT(17),
-
- /* ide-tape */
- IDE_AFLAG_IGNORE_DSC = BIT(18),
- /* 0 When the tape position is unknown */
- IDE_AFLAG_ADDRESS_VALID = BIT(19),
- /* Device already opened */
- IDE_AFLAG_BUSY = BIT(20),
- /* Attempt to auto-detect the current user block size */
- IDE_AFLAG_DETECT_BS = BIT(21),
- /* Currently on a filemark */
- IDE_AFLAG_FILEMARK = BIT(22),
- /* 0 = no tape is loaded, so we don't rewind after ejecting */
- IDE_AFLAG_MEDIUM_PRESENT = BIT(23),
-
- IDE_AFLAG_NO_AUTOCLOSE = BIT(24),
-};
-
-/* device flags */
-enum {
- /* restore settings after device reset */
- IDE_DFLAG_KEEP_SETTINGS = BIT(0),
- /* device is using DMA for read/write */
- IDE_DFLAG_USING_DMA = BIT(1),
- /* okay to unmask other IRQs */
- IDE_DFLAG_UNMASK = BIT(2),
- /* don't attempt flushes */
- IDE_DFLAG_NOFLUSH = BIT(3),
- /* DSC overlap */
- IDE_DFLAG_DSC_OVERLAP = BIT(4),
- /* give potential excess bandwidth */
- IDE_DFLAG_NICE1 = BIT(5),
- /* device is physically present */
- IDE_DFLAG_PRESENT = BIT(6),
- /* disable Host Protected Area */
- IDE_DFLAG_NOHPA = BIT(7),
- /* id read from device (synthetic if not set) */
- IDE_DFLAG_ID_READ = BIT(8),
- IDE_DFLAG_NOPROBE = BIT(9),
- /* need to do check_media_change() */
- IDE_DFLAG_REMOVABLE = BIT(10),
- IDE_DFLAG_FORCED_GEOM = BIT(12),
- /* disallow setting unmask bit */
- IDE_DFLAG_NO_UNMASK = BIT(13),
- /* disallow enabling 32-bit I/O */
- IDE_DFLAG_NO_IO_32BIT = BIT(14),
- /* for removable only: door lock/unlock works */
- IDE_DFLAG_DOORLOCKING = BIT(15),
- /* disallow DMA */
- IDE_DFLAG_NODMA = BIT(16),
- /* powermanagement told us not to do anything, so sleep nicely */
- IDE_DFLAG_BLOCKED = BIT(17),
- /* sleeping & sleep field valid */
- IDE_DFLAG_SLEEPING = BIT(18),
- IDE_DFLAG_POST_RESET = BIT(19),
- IDE_DFLAG_UDMA33_WARNED = BIT(20),
- IDE_DFLAG_LBA48 = BIT(21),
- /* status of write cache */
- IDE_DFLAG_WCACHE = BIT(22),
- /* used for ignoring ATA_DF */
- IDE_DFLAG_NOWERR = BIT(23),
- /* retrying in PIO */
- IDE_DFLAG_DMA_PIO_RETRY = BIT(24),
- IDE_DFLAG_LBA = BIT(25),
- /* don't unload heads */
- IDE_DFLAG_NO_UNLOAD = BIT(26),
- /* heads unloaded, please don't reset port */
- IDE_DFLAG_PARKED = BIT(27),
- IDE_DFLAG_MEDIA_CHANGED = BIT(28),
- /* write protect */
- IDE_DFLAG_WP = BIT(29),
- IDE_DFLAG_FORMAT_IN_PROGRESS = BIT(30),
- IDE_DFLAG_NIEN_QUIRK = BIT(31),
-};
-
-struct ide_drive_s {
- char name[4]; /* drive name, such as "hda" */
- char driver_req[10]; /* requests specific driver */
-
- struct request_queue *queue; /* request queue */
-
- bool (*prep_rq)(struct ide_drive_s *, struct request *);
-
- struct blk_mq_tag_set tag_set;
-
- struct request *rq; /* current request */
- void *driver_data; /* extra driver data */
- u16 *id; /* identification info */
-#ifdef CONFIG_IDE_PROC_FS
- struct proc_dir_entry *proc; /* /proc/ide/ directory entry */
- const struct ide_proc_devset *settings; /* /proc/ide/ drive settings */
-#endif
- struct hwif_s *hwif; /* actually (ide_hwif_t *) */
-
- const struct ide_disk_ops *disk_ops;
-
- unsigned long dev_flags;
-
- unsigned long sleep; /* sleep until this time */
- unsigned long timeout; /* max time to wait for irq */
-
- u8 special_flags; /* special action flags */
-
- u8 select; /* basic drive/head select reg value */
- u8 retry_pio; /* retrying dma capable host in pio */
- u8 waiting_for_dma; /* dma currently in progress */
- u8 dma; /* atapi dma flag */
-
- u8 init_speed; /* transfer rate set at boot */
- u8 current_speed; /* current transfer rate set */
- u8 desired_speed; /* desired transfer rate set */
- u8 pio_mode; /* for ->set_pio_mode _only_ */
- u8 dma_mode; /* for ->set_dma_mode _only_ */
- u8 dn; /* now wide spread use */
- u8 acoustic; /* acoustic management */
- u8 media; /* disk, cdrom, tape, floppy, ... */
- u8 ready_stat; /* min status value for drive ready */
- u8 mult_count; /* current multiple sector setting */
- u8 mult_req; /* requested multiple sector setting */
- u8 io_32bit; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */
- u8 bad_wstat; /* used for ignoring ATA_DF */
- u8 head; /* "real" number of heads */
- u8 sect; /* "real" sectors per track */
- u8 bios_head; /* BIOS/fdisk/LILO number of heads */
- u8 bios_sect; /* BIOS/fdisk/LILO sectors per track */
-
- /* delay this long before sending packet command */
- u8 pc_delay;
-
- unsigned int bios_cyl; /* BIOS/fdisk/LILO number of cyls */
- unsigned int cyl; /* "real" number of cyls */
- void *drive_data; /* used by set_pio_mode/dev_select() */
- unsigned int failures; /* current failure count */
- unsigned int max_failures; /* maximum allowed failure count */
- u64 probed_capacity;/* initial/native media capacity */
- u64 capacity64; /* total number of sectors */
-
- int lun; /* logical unit */
- int crc_count; /* crc counter to reduce drive speed */
-
- unsigned long debug_mask; /* debugging levels switch */
-
-#ifdef CONFIG_BLK_DEV_IDEACPI
- struct ide_acpi_drive_link *acpidata;
-#endif
- struct list_head list;
- struct device gendev;
- struct completion gendev_rel_comp; /* to deal with device release() */
-
- /* current packet command */
- struct ide_atapi_pc *pc;
-
- /* last failed packet command */
- struct ide_atapi_pc *failed_pc;
-
- /* callback for packet commands */
- int (*pc_callback)(struct ide_drive_s *, int);
-
- ide_startstop_t (*irq_handler)(struct ide_drive_s *);
-
- unsigned long atapi_flags;
-
- struct ide_atapi_pc request_sense_pc;
-
- /* current sense rq and buffer */
- bool sense_rq_armed;
- bool sense_rq_active;
- struct request *sense_rq;
- struct request_sense sense_data;
-
- /* async sense insertion */
- struct work_struct rq_work;
- struct list_head rq_list;
-};
-
-typedef struct ide_drive_s ide_drive_t;
-
-#define to_ide_device(dev) container_of(dev, ide_drive_t, gendev)
-
-#define to_ide_drv(obj, cont_type) \
- container_of(obj, struct cont_type, dev)
-
-#define ide_drv_g(disk, cont_type) \
- container_of((disk)->private_data, struct cont_type, driver)
-
-struct ide_port_info;
-
-struct ide_tp_ops {
- void (*exec_command)(struct hwif_s *, u8);
- u8 (*read_status)(struct hwif_s *);
- u8 (*read_altstatus)(struct hwif_s *);
- void (*write_devctl)(struct hwif_s *, u8);
-
- void (*dev_select)(ide_drive_t *);
- void (*tf_load)(ide_drive_t *, struct ide_taskfile *, u8);
- void (*tf_read)(ide_drive_t *, struct ide_taskfile *, u8);
-
- void (*input_data)(ide_drive_t *, struct ide_cmd *,
- void *, unsigned int);
- void (*output_data)(ide_drive_t *, struct ide_cmd *,
- void *, unsigned int);
-};
-
-extern const struct ide_tp_ops default_tp_ops;
-
-/**
- * struct ide_port_ops - IDE port operations
- *
- * @init_dev: host specific initialization of a device
- * @set_pio_mode: routine to program host for PIO mode
- * @set_dma_mode: routine to program host for DMA mode
- * @reset_poll: chipset polling based on hba specifics
- * @pre_reset: chipset specific changes to default for device-hba resets
- * @resetproc: routine to reset controller after a disk reset
- * @maskproc: special host masking for drive selection
- * @quirkproc: check host's drive quirk list
- * @clear_irq: clear IRQ
- *
- * @mdma_filter: filter MDMA modes
- * @udma_filter: filter UDMA modes
- *
- * @cable_detect: detect cable type
- */
-struct ide_port_ops {
- void (*init_dev)(ide_drive_t *);
- void (*set_pio_mode)(struct hwif_s *, ide_drive_t *);
- void (*set_dma_mode)(struct hwif_s *, ide_drive_t *);
- blk_status_t (*reset_poll)(ide_drive_t *);
- void (*pre_reset)(ide_drive_t *);
- void (*resetproc)(ide_drive_t *);
- void (*maskproc)(ide_drive_t *, int);
- void (*quirkproc)(ide_drive_t *);
- void (*clear_irq)(ide_drive_t *);
- int (*test_irq)(struct hwif_s *);
-
- u8 (*mdma_filter)(ide_drive_t *);
- u8 (*udma_filter)(ide_drive_t *);
-
- u8 (*cable_detect)(struct hwif_s *);
-};
-
-struct ide_dma_ops {
- void (*dma_host_set)(struct ide_drive_s *, int);
- int (*dma_setup)(struct ide_drive_s *, struct ide_cmd *);
- void (*dma_start)(struct ide_drive_s *);
- int (*dma_end)(struct ide_drive_s *);
- int (*dma_test_irq)(struct ide_drive_s *);
- void (*dma_lost_irq)(struct ide_drive_s *);
- /* below ones are optional */
- int (*dma_check)(struct ide_drive_s *, struct ide_cmd *);
- int (*dma_timer_expiry)(struct ide_drive_s *);
- void (*dma_clear)(struct ide_drive_s *);
- /*
- * The following method is optional and only required to be
- * implemented for the SFF-8038i compatible controllers.
- */
- u8 (*dma_sff_read_status)(struct hwif_s *);
-};
-
-enum {
- IDE_PFLAG_PROBING = BIT(0),
-};
-
-struct ide_host;
-
-typedef struct hwif_s {
- struct hwif_s *mate; /* other hwif from same PCI chip */
- struct proc_dir_entry *proc; /* /proc/ide/ directory entry */
-
- struct ide_host *host;
-
- char name[6]; /* name of interface, eg. "ide0" */
-
- struct ide_io_ports io_ports;
-
- unsigned long sata_scr[SATA_NR_PORTS];
-
- ide_drive_t *devices[MAX_DRIVES + 1];
-
- unsigned long port_flags;
-
- u8 major; /* our major number */
- u8 index; /* 0 for ide0; 1 for ide1; ... */
- u8 channel; /* for dual-port chips: 0=primary, 1=secondary */
-
- u32 host_flags;
-
- u8 pio_mask;
-
- u8 ultra_mask;
- u8 mwdma_mask;
- u8 swdma_mask;
-
- u8 cbl; /* cable type */
-
- hwif_chipset_t chipset; /* sub-module for tuning.. */
-
- struct device *dev;
-
- void (*rw_disk)(ide_drive_t *, struct request *);
-
- const struct ide_tp_ops *tp_ops;
- const struct ide_port_ops *port_ops;
- const struct ide_dma_ops *dma_ops;
-
- /* dma physical region descriptor table (cpu view) */
- unsigned int *dmatable_cpu;
- /* dma physical region descriptor table (dma view) */
- dma_addr_t dmatable_dma;
-
- /* maximum number of PRD table entries */
- int prd_max_nents;
- /* PRD entry size in bytes */
- int prd_ent_size;
-
- /* Scatter-gather list used to build the above */
- struct scatterlist *sg_table;
- int sg_max_nents; /* Maximum number of entries in it */
-
- struct ide_cmd cmd; /* current command */
-
- int rqsize; /* max sectors per request */
- int irq; /* our irq number */
-
- unsigned long dma_base; /* base addr for dma ports */
-
- unsigned long config_data; /* for use by chipset-specific code */
- unsigned long select_data; /* for use by chipset-specific code */
-
- unsigned long extra_base; /* extra addr for dma ports */
- unsigned extra_ports; /* number of extra dma ports */
-
- unsigned present : 1; /* this interface exists */
- unsigned busy : 1; /* serializes devices on a port */
-
- struct device gendev;
- struct device *portdev;
-
- struct completion gendev_rel_comp; /* To deal with device release() */
-
- void *hwif_data; /* extra hwif data */
-
-#ifdef CONFIG_BLK_DEV_IDEACPI
- struct ide_acpi_hwif_link *acpidata;
-#endif
-
- /* IRQ handler, if active */
- ide_startstop_t (*handler)(ide_drive_t *);
-
- /* BOOL: polling active & poll_timeout field valid */
- unsigned int polling : 1;
-
- /* current drive */
- ide_drive_t *cur_dev;
-
- /* current request */
- struct request *rq;
-
- /* failsafe timer */
- struct timer_list timer;
- /* timeout value during long polls */
- unsigned long poll_timeout;
- /* queried upon timeouts */
- int (*expiry)(ide_drive_t *);
-
- int req_gen;
- int req_gen_timer;
-
- spinlock_t lock;
-} ____cacheline_internodealigned_in_smp ide_hwif_t;
-
-#define MAX_HOST_PORTS 4
-
-struct ide_host {
- ide_hwif_t *ports[MAX_HOST_PORTS + 1];
- unsigned int n_ports;
- struct device *dev[2];
-
- int (*init_chipset)(struct pci_dev *);
-
- void (*get_lock)(irq_handler_t, void *);
- void (*release_lock)(void);
-
- irq_handler_t irq_handler;
-
- unsigned long host_flags;
-
- int irq_flags;
-
- void *host_priv;
- ide_hwif_t *cur_port; /* for hosts requiring serialization */
-
- /* used for hosts requiring serialization */
- volatile unsigned long host_busy;
-};
-
-#define IDE_HOST_BUSY 0
-
-/*
- * internal ide interrupt handler type
- */
-typedef ide_startstop_t (ide_handler_t)(ide_drive_t *);
-typedef int (ide_expiry_t)(ide_drive_t *);
-
-/* used by ide-cd, ide-floppy, etc. */
-typedef void (xfer_func_t)(ide_drive_t *, struct ide_cmd *, void *, unsigned);
-
-extern struct mutex ide_setting_mtx;
-
-/*
- * configurable drive settings
- */
-
-#define DS_SYNC BIT(0)
-
-struct ide_devset {
- int (*get)(ide_drive_t *);
- int (*set)(ide_drive_t *, int);
- unsigned int flags;
-};
-
-#define __DEVSET(_flags, _get, _set) { \
- .flags = _flags, \
- .get = _get, \
- .set = _set, \
-}
-
-#define ide_devset_get(name, field) \
-static int get_##name(ide_drive_t *drive) \
-{ \
- return drive->field; \
-}
-
-#define ide_devset_set(name, field) \
-static int set_##name(ide_drive_t *drive, int arg) \
-{ \
- drive->field = arg; \
- return 0; \
-}
-
-#define ide_devset_get_flag(name, flag) \
-static int get_##name(ide_drive_t *drive) \
-{ \
- return !!(drive->dev_flags & flag); \
-}
-
-#define ide_devset_set_flag(name, flag) \
-static int set_##name(ide_drive_t *drive, int arg) \
-{ \
- if (arg) \
- drive->dev_flags |= flag; \
- else \
- drive->dev_flags &= ~flag; \
- return 0; \
-}
-
-#define __IDE_DEVSET(_name, _flags, _get, _set) \
-const struct ide_devset ide_devset_##_name = \
- __DEVSET(_flags, _get, _set)
-
-#define IDE_DEVSET(_name, _flags, _get, _set) \
-static __IDE_DEVSET(_name, _flags, _get, _set)
-
-#define ide_devset_rw(_name, _func) \
-IDE_DEVSET(_name, 0, get_##_func, set_##_func)
-
-#define ide_devset_w(_name, _func) \
-IDE_DEVSET(_name, 0, NULL, set_##_func)
-
-#define ide_ext_devset_rw(_name, _func) \
-__IDE_DEVSET(_name, 0, get_##_func, set_##_func)
-
-#define ide_ext_devset_rw_sync(_name, _func) \
-__IDE_DEVSET(_name, DS_SYNC, get_##_func, set_##_func)
-
-#define ide_decl_devset(_name) \
-extern const struct ide_devset ide_devset_##_name
-
-ide_decl_devset(io_32bit);
-ide_decl_devset(keepsettings);
-ide_decl_devset(pio_mode);
-ide_decl_devset(unmaskirq);
-ide_decl_devset(using_dma);
-
-#ifdef CONFIG_IDE_PROC_FS
-/*
- * /proc/ide interface
- */
-
-#define ide_devset_rw_field(_name, _field) \
-ide_devset_get(_name, _field); \
-ide_devset_set(_name, _field); \
-IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name)
-
-#define ide_devset_ro_field(_name, _field) \
-ide_devset_get(_name, _field); \
-IDE_DEVSET(_name, 0, get_##_name, NULL)
-
-#define ide_devset_rw_flag(_name, _field) \
-ide_devset_get_flag(_name, _field); \
-ide_devset_set_flag(_name, _field); \
-IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name)
-
-struct ide_proc_devset {
- const char *name;
- const struct ide_devset *setting;
- int min, max;
- int (*mulf)(ide_drive_t *);
- int (*divf)(ide_drive_t *);
-};
-
-#define __IDE_PROC_DEVSET(_name, _min, _max, _mulf, _divf) { \
- .name = __stringify(_name), \
- .setting = &ide_devset_##_name, \
- .min = _min, \
- .max = _max, \
- .mulf = _mulf, \
- .divf = _divf, \
-}
-
-#define IDE_PROC_DEVSET(_name, _min, _max) \
-__IDE_PROC_DEVSET(_name, _min, _max, NULL, NULL)
-
-typedef struct {
- const char *name;
- umode_t mode;
- int (*show)(struct seq_file *, void *);
-} ide_proc_entry_t;
-
-void proc_ide_create(void);
-void proc_ide_destroy(void);
-void ide_proc_register_port(ide_hwif_t *);
-void ide_proc_port_register_devices(ide_hwif_t *);
-void ide_proc_unregister_device(ide_drive_t *);
-void ide_proc_unregister_port(ide_hwif_t *);
-void ide_proc_register_driver(ide_drive_t *, struct ide_driver *);
-void ide_proc_unregister_driver(ide_drive_t *, struct ide_driver *);
-
-int ide_capacity_proc_show(struct seq_file *m, void *v);
-int ide_geometry_proc_show(struct seq_file *m, void *v);
-#else
-static inline void proc_ide_create(void) { ; }
-static inline void proc_ide_destroy(void) { ; }
-static inline void ide_proc_register_port(ide_hwif_t *hwif) { ; }
-static inline void ide_proc_port_register_devices(ide_hwif_t *hwif) { ; }
-static inline void ide_proc_unregister_device(ide_drive_t *drive) { ; }
-static inline void ide_proc_unregister_port(ide_hwif_t *hwif) { ; }
-static inline void ide_proc_register_driver(ide_drive_t *drive,
- struct ide_driver *driver) { ; }
-static inline void ide_proc_unregister_driver(ide_drive_t *drive,
- struct ide_driver *driver) { ; }
-#endif
-
-enum {
- /* enter/exit functions */
- IDE_DBG_FUNC = BIT(0),
- /* sense key/asc handling */
- IDE_DBG_SENSE = BIT(1),
- /* packet commands handling */
- IDE_DBG_PC = BIT(2),
- /* request handling */
- IDE_DBG_RQ = BIT(3),
- /* driver probing/setup */
- IDE_DBG_PROBE = BIT(4),
-};
-
-/* DRV_NAME has to be defined in the driver before using the macro below */
-#define __ide_debug_log(lvl, fmt, args...) \
-{ \
- if (unlikely(drive->debug_mask & lvl)) \
- printk(KERN_INFO DRV_NAME ": %s: " fmt "\n", \
- __func__, ## args); \
-}
-
-/*
- * Power Management state machine (rq->pm->pm_step).
- *
- * For each step, the core calls ide_start_power_step() first.
- * This can return:
- * - ide_stopped : In this case, the core calls us back again unless
- * step have been set to ide_power_state_completed.
- * - ide_started : In this case, the channel is left busy until an
- * async event (interrupt) occurs.
- * Typically, ide_start_power_step() will issue a taskfile request with
- * do_rw_taskfile().
- *
- * Upon reception of the interrupt, the core will call ide_complete_power_step()
- * with the error code if any. This routine should update the step value
- * and return. It should not start a new request. The core will call
- * ide_start_power_step() for the new step value, unless step have been
- * set to IDE_PM_COMPLETED.
- */
-enum {
- IDE_PM_START_SUSPEND,
- IDE_PM_FLUSH_CACHE = IDE_PM_START_SUSPEND,
- IDE_PM_STANDBY,
-
- IDE_PM_START_RESUME,
- IDE_PM_RESTORE_PIO = IDE_PM_START_RESUME,
- IDE_PM_IDLE,
- IDE_PM_RESTORE_DMA,
-
- IDE_PM_COMPLETED,
-};
-
-int generic_ide_suspend(struct device *, pm_message_t);
-int generic_ide_resume(struct device *);
-
-void ide_complete_power_step(ide_drive_t *, struct request *);
-ide_startstop_t ide_start_power_step(ide_drive_t *, struct request *);
-void ide_complete_pm_rq(ide_drive_t *, struct request *);
-void ide_check_pm_state(ide_drive_t *, struct request *);
-
-/*
- * Subdrivers support.
- *
- * The gendriver.owner field should be set to the module owner of this driver.
- * The gendriver.name field should be set to the name of this driver
- */
-struct ide_driver {
- const char *version;
- ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t);
- struct device_driver gen_driver;
- int (*probe)(ide_drive_t *);
- void (*remove)(ide_drive_t *);
- void (*resume)(ide_drive_t *);
- void (*shutdown)(ide_drive_t *);
-#ifdef CONFIG_IDE_PROC_FS
- ide_proc_entry_t * (*proc_entries)(ide_drive_t *);
- const struct ide_proc_devset * (*proc_devsets)(ide_drive_t *);
-#endif
-};
-
-#define to_ide_driver(drv) container_of(drv, struct ide_driver, gen_driver)
-
-int ide_device_get(ide_drive_t *);
-void ide_device_put(ide_drive_t *);
-
-struct ide_ioctl_devset {
- unsigned int get_ioctl;
- unsigned int set_ioctl;
- const struct ide_devset *setting;
-};
-
-int ide_setting_ioctl(ide_drive_t *, struct block_device *, unsigned int,
- unsigned long, const struct ide_ioctl_devset *);
-
-int generic_ide_ioctl(ide_drive_t *, struct block_device *, unsigned, unsigned long);
-
-extern int ide_vlb_clk;
-extern int ide_pci_clk;
-
-int ide_end_rq(ide_drive_t *, struct request *, blk_status_t, unsigned int);
-void ide_kill_rq(ide_drive_t *, struct request *);
-void ide_insert_request_head(ide_drive_t *, struct request *);
-
-void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
-void ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
-
-void ide_execute_command(ide_drive_t *, struct ide_cmd *, ide_handler_t *,
- unsigned int);
-
-void ide_pad_transfer(ide_drive_t *, int, int);
-
-ide_startstop_t ide_error(ide_drive_t *, const char *, u8);
-
-void ide_fix_driveid(u16 *);
-
-extern void ide_fixstring(u8 *, const int, const int);
-
-int ide_busy_sleep(ide_drive_t *, unsigned long, int);
-
-int __ide_wait_stat(ide_drive_t *, u8, u8, unsigned long, u8 *);
-int ide_wait_stat(ide_startstop_t *, ide_drive_t *, u8, u8, unsigned long);
-
-ide_startstop_t ide_do_park_unpark(ide_drive_t *, struct request *);
-ide_startstop_t ide_do_devset(ide_drive_t *, struct request *);
-
-extern ide_startstop_t ide_do_reset (ide_drive_t *);
-
-extern int ide_devset_execute(ide_drive_t *drive,
- const struct ide_devset *setting, int arg);
-
-void ide_complete_cmd(ide_drive_t *, struct ide_cmd *, u8, u8);
-int ide_complete_rq(ide_drive_t *, blk_status_t, unsigned int);
-
-void ide_tf_readback(ide_drive_t *drive, struct ide_cmd *cmd);
-void ide_tf_dump(const char *, struct ide_cmd *);
-
-void ide_exec_command(ide_hwif_t *, u8);
-u8 ide_read_status(ide_hwif_t *);
-u8 ide_read_altstatus(ide_hwif_t *);
-void ide_write_devctl(ide_hwif_t *, u8);
-
-void ide_dev_select(ide_drive_t *);
-void ide_tf_load(ide_drive_t *, struct ide_taskfile *, u8);
-void ide_tf_read(ide_drive_t *, struct ide_taskfile *, u8);
-
-void ide_input_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int);
-void ide_output_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int);
-
-void SELECT_MASK(ide_drive_t *, int);
-
-u8 ide_read_error(ide_drive_t *);
-void ide_read_bcount_and_ireason(ide_drive_t *, u16 *, u8 *);
-
-int ide_check_ireason(ide_drive_t *, struct request *, int, int, int);
-
-int ide_check_atapi_device(ide_drive_t *, const char *);
-
-void ide_init_pc(struct ide_atapi_pc *);
-
-/* Disk head parking */
-extern wait_queue_head_t ide_park_wq;
-ssize_t ide_park_show(struct device *dev, struct device_attribute *attr,
- char *buf);
-ssize_t ide_park_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t len);
-
-/*
- * Special requests for ide-tape block device strategy routine.
- *
- * In order to service a character device command, we add special requests to
- * the tail of our block device request queue and wait for their completion.
- */
-enum {
- REQ_IDETAPE_PC1 = BIT(0), /* packet command (first stage) */
- REQ_IDETAPE_PC2 = BIT(1), /* packet command (second stage) */
- REQ_IDETAPE_READ = BIT(2),
- REQ_IDETAPE_WRITE = BIT(3),
-};
-
-int ide_queue_pc_tail(ide_drive_t *, struct gendisk *, struct ide_atapi_pc *,
- void *, unsigned int);
-
-int ide_do_test_unit_ready(ide_drive_t *, struct gendisk *);
-int ide_do_start_stop(ide_drive_t *, struct gendisk *, int);
-int ide_set_media_lock(ide_drive_t *, struct gendisk *, int);
-void ide_create_request_sense_cmd(ide_drive_t *, struct ide_atapi_pc *);
-void ide_retry_pc(ide_drive_t *drive);
-
-void ide_prep_sense(ide_drive_t *drive, struct request *rq);
-int ide_queue_sense_rq(ide_drive_t *drive, void *special);
-
-int ide_cd_expiry(ide_drive_t *);
-
-int ide_cd_get_xferlen(struct request *);
-
-ide_startstop_t ide_issue_pc(ide_drive_t *, struct ide_cmd *);
-
-ide_startstop_t do_rw_taskfile(ide_drive_t *, struct ide_cmd *);
-
-void ide_pio_bytes(ide_drive_t *, struct ide_cmd *, unsigned int, unsigned int);
-
-void ide_finish_cmd(ide_drive_t *, struct ide_cmd *, u8);
-
-int ide_raw_taskfile(ide_drive_t *, struct ide_cmd *, u8 *, u16);
-int ide_no_data_taskfile(ide_drive_t *, struct ide_cmd *);
-
-int ide_taskfile_ioctl(ide_drive_t *, unsigned long);
-
-int ide_dev_read_id(ide_drive_t *, u8, u16 *, int);
-
-extern int ide_driveid_update(ide_drive_t *);
-extern int ide_config_drive_speed(ide_drive_t *, u8);
-extern u8 eighty_ninty_three (ide_drive_t *);
-extern int taskfile_lib_get_identify(ide_drive_t *drive, u8 *);
-
-extern int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout);
-
-extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout);
-
-extern void ide_timer_expiry(struct timer_list *t);
-extern irqreturn_t ide_intr(int irq, void *dev_id);
-extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
-extern blk_status_t ide_issue_rq(ide_drive_t *, struct request *, bool);
-extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq);
-
-void ide_init_disk(struct gendisk *, ide_drive_t *);
-
-#ifdef CONFIG_IDEPCI_PCIBUS_ORDER
-extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *owner, const char *mod_name);
-#define ide_pci_register_driver(d) __ide_pci_register_driver(d, THIS_MODULE, KBUILD_MODNAME)
-#else
-#define ide_pci_register_driver(d) pci_register_driver(d)
-#endif
-
-static inline int ide_pci_is_in_compatibility_mode(struct pci_dev *dev)
-{
- if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && (dev->class & 5) != 5)
- return 1;
- return 0;
-}
-
-void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *,
- struct ide_hw *, struct ide_hw **);
-void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *);
-
-#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
-int ide_pci_set_master(struct pci_dev *, const char *);
-unsigned long ide_pci_dma_base(ide_hwif_t *, const struct ide_port_info *);
-int ide_pci_check_simplex(ide_hwif_t *, const struct ide_port_info *);
-int ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *);
-#else
-static inline int ide_hwif_setup_dma(ide_hwif_t *hwif,
- const struct ide_port_info *d)
-{
- return -EINVAL;
-}
-#endif
-
-struct ide_pci_enablebit {
- u8 reg; /* byte pci reg holding the enable-bit */
- u8 mask; /* mask to isolate the enable-bit */
- u8 val; /* value of masked reg when "enabled" */
-};
-
-enum {
- /* Uses ISA control ports not PCI ones. */
- IDE_HFLAG_ISA_PORTS = BIT(0),
- /* single port device */
- IDE_HFLAG_SINGLE = BIT(1),
- /* don't use legacy PIO blacklist */
- IDE_HFLAG_PIO_NO_BLACKLIST = BIT(2),
- /* set for the second port of QD65xx */
- IDE_HFLAG_QD_2ND_PORT = BIT(3),
- /* use PIO8/9 for prefetch off/on */
- IDE_HFLAG_ABUSE_PREFETCH = BIT(4),
- /* use PIO6/7 for fast-devsel off/on */
- IDE_HFLAG_ABUSE_FAST_DEVSEL = BIT(5),
- /* use 100-102 and 200-202 PIO values to set DMA modes */
- IDE_HFLAG_ABUSE_DMA_MODES = BIT(6),
- /*
- * keep DMA setting when programming PIO mode, may be used only
- * for hosts which have separate PIO and DMA timings (ie. PMAC)
- */
- IDE_HFLAG_SET_PIO_MODE_KEEP_DMA = BIT(7),
- /* program host for the transfer mode after programming device */
- IDE_HFLAG_POST_SET_MODE = BIT(8),
- /* don't program host/device for the transfer mode ("smart" hosts) */
- IDE_HFLAG_NO_SET_MODE = BIT(9),
- /* trust BIOS for programming chipset/device for DMA */
- IDE_HFLAG_TRUST_BIOS_FOR_DMA = BIT(10),
- /* host is CS5510/CS5520 */
- IDE_HFLAG_CS5520 = BIT(11),
- /* ATAPI DMA is unsupported */
- IDE_HFLAG_NO_ATAPI_DMA = BIT(12),
- /* set if host is a "non-bootable" controller */
- IDE_HFLAG_NON_BOOTABLE = BIT(13),
- /* host doesn't support DMA */
- IDE_HFLAG_NO_DMA = BIT(14),
- /* check if host is PCI IDE device before allowing DMA */
- IDE_HFLAG_NO_AUTODMA = BIT(15),
- /* host uses MMIO */
- IDE_HFLAG_MMIO = BIT(16),
- /* no LBA48 */
- IDE_HFLAG_NO_LBA48 = BIT(17),
- /* no LBA48 DMA */
- IDE_HFLAG_NO_LBA48_DMA = BIT(18),
- /* data FIFO is cleared by an error */
- IDE_HFLAG_ERROR_STOPS_FIFO = BIT(19),
- /* serialize ports */
- IDE_HFLAG_SERIALIZE = BIT(20),
- /* host is DTC2278 */
- IDE_HFLAG_DTC2278 = BIT(21),
- /* 4 devices on a single set of I/O ports */
- IDE_HFLAG_4DRIVES = BIT(22),
- /* host is TRM290 */
- IDE_HFLAG_TRM290 = BIT(23),
- /* use 32-bit I/O ops */
- IDE_HFLAG_IO_32BIT = BIT(24),
- /* unmask IRQs */
- IDE_HFLAG_UNMASK_IRQS = BIT(25),
- IDE_HFLAG_BROKEN_ALTSTATUS = BIT(26),
- /* serialize ports if DMA is possible (for sl82c105) */
- IDE_HFLAG_SERIALIZE_DMA = BIT(27),
- /* force host out of "simplex" mode */
- IDE_HFLAG_CLEAR_SIMPLEX = BIT(28),
- /* DSC overlap is unsupported */
- IDE_HFLAG_NO_DSC = BIT(29),
- /* never use 32-bit I/O ops */
- IDE_HFLAG_NO_IO_32BIT = BIT(30),
- /* never unmask IRQs */
- IDE_HFLAG_NO_UNMASK_IRQS = BIT(31),
-};
-
-#ifdef CONFIG_BLK_DEV_OFFBOARD
-# define IDE_HFLAG_OFF_BOARD 0
-#else
-# define IDE_HFLAG_OFF_BOARD IDE_HFLAG_NON_BOOTABLE
-#endif
-
-struct ide_port_info {
- char *name;
-
- int (*init_chipset)(struct pci_dev *);
-
- void (*get_lock)(irq_handler_t, void *);
- void (*release_lock)(void);
-
- void (*init_iops)(ide_hwif_t *);
- void (*init_hwif)(ide_hwif_t *);
- int (*init_dma)(ide_hwif_t *,
- const struct ide_port_info *);
-
- const struct ide_tp_ops *tp_ops;
- const struct ide_port_ops *port_ops;
- const struct ide_dma_ops *dma_ops;
-
- struct ide_pci_enablebit enablebits[2];
-
- hwif_chipset_t chipset;
-
- u16 max_sectors; /* if < than the default one */
-
- u32 host_flags;
-
- int irq_flags;
-
- u8 pio_mask;
- u8 swdma_mask;
- u8 mwdma_mask;
- u8 udma_mask;
-};
-
-/*
- * State information carried for REQ_TYPE_ATA_PM_SUSPEND and REQ_TYPE_ATA_PM_RESUME
- * requests.
- */
-struct ide_pm_state {
- /* PM state machine step value, currently driver specific */
- int pm_step;
- /* requested PM state value (S1, S2, S3, S4, ...) */
- u32 pm_state;
- void* data; /* for driver use */
-};
-
-
-int ide_pci_init_one(struct pci_dev *, const struct ide_port_info *, void *);
-int ide_pci_init_two(struct pci_dev *, struct pci_dev *,
- const struct ide_port_info *, void *);
-void ide_pci_remove(struct pci_dev *);
-
-#ifdef CONFIG_PM
-int ide_pci_suspend(struct pci_dev *, pm_message_t);
-int ide_pci_resume(struct pci_dev *);
-#else
-#define ide_pci_suspend NULL
-#define ide_pci_resume NULL
-#endif
-
-void ide_map_sg(ide_drive_t *, struct ide_cmd *);
-void ide_init_sg_cmd(struct ide_cmd *, unsigned int);
-
-#define BAD_DMA_DRIVE 0
-#define GOOD_DMA_DRIVE 1
-
-struct drive_list_entry {
- const char *id_model;
- const char *id_firmware;
-};
-
-int ide_in_drive_list(u16 *, const struct drive_list_entry *);
-
-#ifdef CONFIG_BLK_DEV_IDEDMA
-int ide_dma_good_drive(ide_drive_t *);
-int __ide_dma_bad_drive(ide_drive_t *);
-
-u8 ide_find_dma_mode(ide_drive_t *, u8);
-
-static inline u8 ide_max_dma_mode(ide_drive_t *drive)
-{
- return ide_find_dma_mode(drive, XFER_UDMA_6);
-}
-
-void ide_dma_off_quietly(ide_drive_t *);
-void ide_dma_off(ide_drive_t *);
-void ide_dma_on(ide_drive_t *);
-int ide_set_dma(ide_drive_t *);
-void ide_check_dma_crc(ide_drive_t *);
-ide_startstop_t ide_dma_intr(ide_drive_t *);
-
-int ide_allocate_dma_engine(ide_hwif_t *);
-void ide_release_dma_engine(ide_hwif_t *);
-
-int ide_dma_prepare(ide_drive_t *, struct ide_cmd *);
-void ide_dma_unmap_sg(ide_drive_t *, struct ide_cmd *);
-
-#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
-int config_drive_for_dma(ide_drive_t *);
-int ide_build_dmatable(ide_drive_t *, struct ide_cmd *);
-void ide_dma_host_set(ide_drive_t *, int);
-int ide_dma_setup(ide_drive_t *, struct ide_cmd *);
-extern void ide_dma_start(ide_drive_t *);
-int ide_dma_end(ide_drive_t *);
-int ide_dma_test_irq(ide_drive_t *);
-int ide_dma_sff_timer_expiry(ide_drive_t *);
-u8 ide_dma_sff_read_status(ide_hwif_t *);
-extern const struct ide_dma_ops sff_dma_ops;
-#else
-static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; }
-#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
-
-void ide_dma_lost_irq(ide_drive_t *);
-ide_startstop_t ide_dma_timeout_retry(ide_drive_t *, int);
-
-#else
-static inline u8 ide_find_dma_mode(ide_drive_t *drive, u8 speed) { return 0; }
-static inline u8 ide_max_dma_mode(ide_drive_t *drive) { return 0; }
-static inline void ide_dma_off_quietly(ide_drive_t *drive) { ; }
-static inline void ide_dma_off(ide_drive_t *drive) { ; }
-static inline void ide_dma_on(ide_drive_t *drive) { ; }
-static inline void ide_dma_verbose(ide_drive_t *drive) { ; }
-static inline int ide_set_dma(ide_drive_t *drive) { return 1; }
-static inline void ide_check_dma_crc(ide_drive_t *drive) { ; }
-static inline ide_startstop_t ide_dma_intr(ide_drive_t *drive) { return ide_stopped; }
-static inline ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) { return ide_stopped; }
-static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; }
-static inline int ide_dma_prepare(ide_drive_t *drive,
- struct ide_cmd *cmd) { return 1; }
-static inline void ide_dma_unmap_sg(ide_drive_t *drive,
- struct ide_cmd *cmd) { ; }
-#endif /* CONFIG_BLK_DEV_IDEDMA */
-
-#ifdef CONFIG_BLK_DEV_IDEACPI
-int ide_acpi_init(void);
-bool ide_port_acpi(ide_hwif_t *hwif);
-extern int ide_acpi_exec_tfs(ide_drive_t *drive);
-extern void ide_acpi_get_timing(ide_hwif_t *hwif);
-extern void ide_acpi_push_timing(ide_hwif_t *hwif);
-void ide_acpi_init_port(ide_hwif_t *);
-void ide_acpi_port_init_devices(ide_hwif_t *);
-extern void ide_acpi_set_state(ide_hwif_t *hwif, int on);
-#else
-static inline int ide_acpi_init(void) { return 0; }
-static inline bool ide_port_acpi(ide_hwif_t *hwif) { return 0; }
-static inline int ide_acpi_exec_tfs(ide_drive_t *drive) { return 0; }
-static inline void ide_acpi_get_timing(ide_hwif_t *hwif) { ; }
-static inline void ide_acpi_push_timing(ide_hwif_t *hwif) { ; }
-static inline void ide_acpi_init_port(ide_hwif_t *hwif) { ; }
-static inline void ide_acpi_port_init_devices(ide_hwif_t *hwif) { ; }
-static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {}
-#endif
-
-void ide_check_nien_quirk_list(ide_drive_t *);
-void ide_undecoded_slave(ide_drive_t *);
-
-void ide_port_apply_params(ide_hwif_t *);
-int ide_sysfs_register_port(ide_hwif_t *);
-
-struct ide_host *ide_host_alloc(const struct ide_port_info *, struct ide_hw **,
- unsigned int);
-void ide_host_free(struct ide_host *);
-int ide_host_register(struct ide_host *, const struct ide_port_info *,
- struct ide_hw **);
-int ide_host_add(const struct ide_port_info *, struct ide_hw **, unsigned int,
- struct ide_host **);
-void ide_host_remove(struct ide_host *);
-int ide_legacy_device_add(const struct ide_port_info *, unsigned long);
-void ide_port_unregister_devices(ide_hwif_t *);
-void ide_port_scan(ide_hwif_t *);
-
-static inline void *ide_get_hwifdata (ide_hwif_t * hwif)
-{
- return hwif->hwif_data;
-}
-
-static inline void ide_set_hwifdata (ide_hwif_t * hwif, void *data)
-{
- hwif->hwif_data = data;
-}
-
-u64 ide_get_lba_addr(struct ide_cmd *, int);
-u8 ide_dump_status(ide_drive_t *, const char *, u8);
-
-struct ide_timing {
- u8 mode;
- u8 setup; /* t1 */
- u16 act8b; /* t2 for 8-bit io */
- u16 rec8b; /* t2i for 8-bit io */
- u16 cyc8b; /* t0 for 8-bit io */
- u16 active; /* t2 or tD */
- u16 recover; /* t2i or tK */
- u16 cycle; /* t0 */
- u16 udma; /* t2CYCTYP/2 */
-};
-
-enum {
- IDE_TIMING_SETUP = BIT(0),
- IDE_TIMING_ACT8B = BIT(1),
- IDE_TIMING_REC8B = BIT(2),
- IDE_TIMING_CYC8B = BIT(3),
- IDE_TIMING_8BIT = IDE_TIMING_ACT8B | IDE_TIMING_REC8B |
- IDE_TIMING_CYC8B,
- IDE_TIMING_ACTIVE = BIT(4),
- IDE_TIMING_RECOVER = BIT(5),
- IDE_TIMING_CYCLE = BIT(6),
- IDE_TIMING_UDMA = BIT(7),
- IDE_TIMING_ALL = IDE_TIMING_SETUP | IDE_TIMING_8BIT |
- IDE_TIMING_ACTIVE | IDE_TIMING_RECOVER |
- IDE_TIMING_CYCLE | IDE_TIMING_UDMA,
-};
-
-struct ide_timing *ide_timing_find_mode(u8);
-u16 ide_pio_cycle_time(ide_drive_t *, u8);
-void ide_timing_merge(struct ide_timing *, struct ide_timing *,
- struct ide_timing *, unsigned int);
-int ide_timing_compute(ide_drive_t *, u8, struct ide_timing *, int, int);
-
-#ifdef CONFIG_IDE_XFER_MODE
-int ide_scan_pio_blacklist(char *);
-const char *ide_xfer_verbose(u8);
-int ide_pio_need_iordy(ide_drive_t *, const u8);
-int ide_set_pio_mode(ide_drive_t *, u8);
-int ide_set_dma_mode(ide_drive_t *, u8);
-void ide_set_pio(ide_drive_t *, u8);
-int ide_set_xfer_rate(ide_drive_t *, u8);
-#else
-static inline void ide_set_pio(ide_drive_t *drive, u8 pio) { ; }
-static inline int ide_set_xfer_rate(ide_drive_t *drive, u8 rate) { return -1; }
-#endif
-
-static inline void ide_set_max_pio(ide_drive_t *drive)
-{
- ide_set_pio(drive, 255);
-}
-
-char *ide_media_string(ide_drive_t *);
-
-extern const struct attribute_group *ide_dev_groups[];
-extern struct bus_type ide_bus_type;
-extern struct class *ide_port_class;
-
-static inline void ide_dump_identify(u8 *id)
-{
- print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 2, id, 512, 0);
-}
-
-static inline int hwif_to_node(ide_hwif_t *hwif)
-{
- return hwif->dev ? dev_to_node(hwif->dev) : -1;
-}
-
-static inline ide_drive_t *ide_get_pair_dev(ide_drive_t *drive)
-{
- ide_drive_t *peer = drive->hwif->devices[(drive->dn ^ 1) & 1];
-
- return (peer->dev_flags & IDE_DFLAG_PRESENT) ? peer : NULL;
-}
-
-static inline void *ide_get_drivedata(ide_drive_t *drive)
-{
- return drive->drive_data;
-}
-
-static inline void ide_set_drivedata(ide_drive_t *drive, void *data)
-{
- drive->drive_data = data;
-}
-
-#define ide_port_for_each_dev(i, dev, port) \
- for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++)
-
-#define ide_port_for_each_present_dev(i, dev, port) \
- for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++) \
- if ((dev)->dev_flags & IDE_DFLAG_PRESENT)
-
-#define ide_host_for_each_port(i, port, host) \
- for ((i) = 0; ((port) = (host)->ports[i]) || (i) < MAX_HOST_PORTS; (i)++)
-
-
-#endif /* _IDE_H */
diff --git a/include/linux/idle_inject.h b/include/linux/idle_inject.h
index fb88e23a99d3..a85d5dd40f72 100644
--- a/include/linux/idle_inject.h
+++ b/include/linux/idle_inject.h
@@ -13,6 +13,9 @@ struct idle_inject_device;
struct idle_inject_device *idle_inject_register(struct cpumask *cpumask);
+struct idle_inject_device *idle_inject_register_full(struct cpumask *cpumask,
+ bool (*update)(void));
+
void idle_inject_unregister(struct idle_inject_device *ii_dev);
int idle_inject_start(struct idle_inject_device *ii_dev);
diff --git a/include/linux/idr.h b/include/linux/idr.h
index a0dce14090a9..789e23e67444 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -15,6 +15,7 @@
#include <linux/radix-tree.h>
#include <linux/gfp.h>
#include <linux/percpu.h>
+#include <linux/cleanup.h>
struct idr {
struct radix_tree_root idr_rt;
@@ -124,6 +125,22 @@ void *idr_get_next_ul(struct idr *, unsigned long *nextid);
void *idr_replace(struct idr *, void *, unsigned long id);
void idr_destroy(struct idr *);
+struct __class_idr {
+ struct idr *idr;
+ int id;
+};
+
+#define idr_null ((struct __class_idr){ NULL, -1 })
+#define take_idr_id(id) __get_and_null(id, idr_null)
+
+DEFINE_CLASS(idr_alloc, struct __class_idr,
+ if (_T.id >= 0) idr_remove(_T.idr, _T.id),
+ ((struct __class_idr){
+ .idr = idr,
+ .id = idr_alloc(idr, ptr, start, end, gfp),
+ }),
+ struct idr *idr, void *ptr, int start, int end, gfp_t gfp);
+
/**
* idr_init_base() - Initialise an IDR.
* @idr: IDR handle.
@@ -200,7 +217,7 @@ static inline void idr_preload_end(void)
*/
#define idr_for_each_entry_ul(idr, entry, tmp, id) \
for (tmp = 0, id = 0; \
- tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
+ ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \
tmp = id, ++id)
/**
@@ -224,10 +241,12 @@ static inline void idr_preload_end(void)
* @id: Entry ID.
*
* Continue to iterate over entries, continuing after the current position.
+ * After normal termination @entry is left with the value NULL. This
+ * is convenient for a "not found" value.
*/
#define idr_for_each_entry_continue_ul(idr, entry, tmp, id) \
for (tmp = id; \
- tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
+ ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \
tmp = id, ++id)
/*
@@ -255,6 +274,7 @@ struct ida {
int ida_alloc_range(struct ida *, unsigned int min, unsigned int max, gfp_t);
void ida_free(struct ida *, unsigned int id);
void ida_destroy(struct ida *ida);
+int ida_find_first_range(struct ida *ida, unsigned int min, unsigned int max);
/**
* ida_alloc() - Allocate an unused ID.
@@ -314,16 +334,18 @@ static inline void ida_init(struct ida *ida)
xa_init_flags(&ida->xa, IDA_INIT_FLAGS);
}
-/*
- * ida_simple_get() and ida_simple_remove() are deprecated. Use
- * ida_alloc() and ida_free() instead respectively.
- */
-#define ida_simple_get(ida, start, end, gfp) \
- ida_alloc_range(ida, start, (end) - 1, gfp)
-#define ida_simple_remove(ida, id) ida_free(ida, id)
-
static inline bool ida_is_empty(const struct ida *ida)
{
return xa_empty(&ida->xa);
}
+
+static inline bool ida_exists(struct ida *ida, unsigned int id)
+{
+ return ida_find_first_range(ida, id, id) == id;
+}
+
+static inline int ida_find_first(struct ida *ida)
+{
+ return ida_find_first_range(ida, 0, ~0);
+}
#endif /* __IDR_H__ */
diff --git a/include/linux/ieee80211-eht.h b/include/linux/ieee80211-eht.h
new file mode 100644
index 000000000000..f9782e46c5e5
--- /dev/null
+++ b/include/linux/ieee80211-eht.h
@@ -0,0 +1,1182 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IEEE 802.11 EHT definitions
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ * Copyright (c) 2005, Devicescape Software, Inc.
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (c) 2018 - 2025 Intel Corporation
+ */
+
+#ifndef LINUX_IEEE80211_EHT_H
+#define LINUX_IEEE80211_EHT_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+/* need HE definitions for the inlines here */
+#include <linux/ieee80211-he.h>
+
+#define IEEE80211_TTLM_MAX_CNT 2
+#define IEEE80211_TTLM_CONTROL_DIRECTION 0x03
+#define IEEE80211_TTLM_CONTROL_DEF_LINK_MAP 0x04
+#define IEEE80211_TTLM_CONTROL_SWITCH_TIME_PRESENT 0x08
+#define IEEE80211_TTLM_CONTROL_EXPECTED_DUR_PRESENT 0x10
+#define IEEE80211_TTLM_CONTROL_LINK_MAP_SIZE 0x20
+
+#define IEEE80211_TTLM_DIRECTION_DOWN 0
+#define IEEE80211_TTLM_DIRECTION_UP 1
+#define IEEE80211_TTLM_DIRECTION_BOTH 2
+
+/**
+ * struct ieee80211_ttlm_elem - TID-To-Link Mapping element
+ *
+ * Defined in section 9.4.2.314 in P802.11be_D4
+ *
+ * @control: the first part of control field
+ * @optional: the second part of control field
+ */
+struct ieee80211_ttlm_elem {
+ u8 control;
+ u8 optional[];
+} __packed;
+
+#define IEEE80211_EHT_MCS_NSS_RX 0x0f
+#define IEEE80211_EHT_MCS_NSS_TX 0xf0
+
+/**
+ * struct ieee80211_eht_mcs_nss_supp_20mhz_only - EHT 20MHz only station max
+ * supported NSS for per MCS.
+ *
+ * For each field below, bits 0 - 3 indicate the maximal number of spatial
+ * streams for Rx, and bits 4 - 7 indicate the maximal number of spatial streams
+ * for Tx.
+ *
+ * @rx_tx_mcs7_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 0 - 7.
+ * @rx_tx_mcs9_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 8 - 9.
+ * @rx_tx_mcs11_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 10 - 11.
+ * @rx_tx_mcs13_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 12 - 13.
+ * @rx_tx_max_nss: array of the previous fields for easier loop access
+ */
+struct ieee80211_eht_mcs_nss_supp_20mhz_only {
+ union {
+ struct {
+ u8 rx_tx_mcs7_max_nss;
+ u8 rx_tx_mcs9_max_nss;
+ u8 rx_tx_mcs11_max_nss;
+ u8 rx_tx_mcs13_max_nss;
+ };
+ u8 rx_tx_max_nss[4];
+ };
+};
+
+/**
+ * struct ieee80211_eht_mcs_nss_supp_bw - EHT max supported NSS per MCS (except
+ * 20MHz only stations).
+ *
+ * For each field below, bits 0 - 3 indicate the maximal number of spatial
+ * streams for Rx, and bits 4 - 7 indicate the maximal number of spatial streams
+ * for Tx.
+ *
+ * @rx_tx_mcs9_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 0 - 9.
+ * @rx_tx_mcs11_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 10 - 11.
+ * @rx_tx_mcs13_max_nss: indicates the maximum number of spatial streams
+ * supported for reception and the maximum number of spatial streams
+ * supported for transmission for MCS 12 - 13.
+ * @rx_tx_max_nss: array of the previous fields for easier loop access
+ */
+struct ieee80211_eht_mcs_nss_supp_bw {
+ union {
+ struct {
+ u8 rx_tx_mcs9_max_nss;
+ u8 rx_tx_mcs11_max_nss;
+ u8 rx_tx_mcs13_max_nss;
+ };
+ u8 rx_tx_max_nss[3];
+ };
+};
+
+/**
+ * struct ieee80211_eht_cap_elem_fixed - EHT capabilities fixed data
+ *
+ * This structure is the "EHT Capabilities element" fixed fields as
+ * described in P802.11be_D2.0 section 9.4.2.313.
+ *
+ * @mac_cap_info: MAC capabilities, see IEEE80211_EHT_MAC_CAP*
+ * @phy_cap_info: PHY capabilities, see IEEE80211_EHT_PHY_CAP*
+ */
+struct ieee80211_eht_cap_elem_fixed {
+ u8 mac_cap_info[2];
+ u8 phy_cap_info[9];
+} __packed;
+
+/**
+ * struct ieee80211_eht_cap_elem - EHT capabilities element
+ * @fixed: fixed parts, see &ieee80211_eht_cap_elem_fixed
+ * @optional: optional parts
+ */
+struct ieee80211_eht_cap_elem {
+ struct ieee80211_eht_cap_elem_fixed fixed;
+
+ /*
+ * Followed by:
+ * Supported EHT-MCS And NSS Set field: 4, 3, 6 or 9 octets.
+ * EHT PPE Thresholds field: variable length.
+ */
+ u8 optional[];
+} __packed;
+
+#define IEEE80211_EHT_OPER_INFO_PRESENT 0x01
+#define IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT 0x02
+#define IEEE80211_EHT_OPER_EHT_DEF_PE_DURATION 0x04
+#define IEEE80211_EHT_OPER_GROUP_ADDRESSED_BU_IND_LIMIT 0x08
+#define IEEE80211_EHT_OPER_GROUP_ADDRESSED_BU_IND_EXP_MASK 0x30
+#define IEEE80211_EHT_OPER_MCS15_DISABLE 0x40
+
+/**
+ * struct ieee80211_eht_operation - eht operation element
+ *
+ * This structure is the "EHT Operation Element" fields as
+ * described in P802.11be_D2.0 section 9.4.2.311
+ *
+ * @params: EHT operation element parameters. See &IEEE80211_EHT_OPER_*
+ * @basic_mcs_nss: indicates the EHT-MCSs for each number of spatial streams in
+ * EHT PPDUs that are supported by all EHT STAs in the BSS in transmit and
+ * receive.
+ * @optional: optional parts
+ */
+struct ieee80211_eht_operation {
+ u8 params;
+ struct ieee80211_eht_mcs_nss_supp_20mhz_only basic_mcs_nss;
+ u8 optional[];
+} __packed;
+
+/**
+ * struct ieee80211_eht_operation_info - eht operation information
+ *
+ * @control: EHT operation information control.
+ * @ccfs0: defines a channel center frequency for a 20, 40, 80, 160, or 320 MHz
+ * EHT BSS.
+ * @ccfs1: defines a channel center frequency for a 160 or 320 MHz EHT BSS.
+ * @optional: optional parts
+ */
+struct ieee80211_eht_operation_info {
+ u8 control;
+ u8 ccfs0;
+ u8 ccfs1;
+ u8 optional[];
+} __packed;
+
+/* EHT MAC capabilities as defined in P802.11be_D2.0 section 9.4.2.313.2 */
+#define IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS 0x01
+#define IEEE80211_EHT_MAC_CAP0_OM_CONTROL 0x02
+#define IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 0x04
+#define IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2 0x08
+#define IEEE80211_EHT_MAC_CAP0_RESTRICTED_TWT 0x10
+#define IEEE80211_EHT_MAC_CAP0_SCS_TRAFFIC_DESC 0x20
+#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK 0xc0
+#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_3895 0
+#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_7991 1
+#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454 2
+
+#define IEEE80211_EHT_MAC_CAP1_MAX_AMPDU_LEN_MASK 0x01
+#define IEEE80211_EHT_MAC_CAP1_EHT_TRS 0x02
+#define IEEE80211_EHT_MAC_CAP1_TXOP_RET 0x04
+#define IEEE80211_EHT_MAC_CAP1_TWO_BQRS 0x08
+#define IEEE80211_EHT_MAC_CAP1_EHT_LINK_ADAPT_MASK 0x30
+#define IEEE80211_EHT_MAC_CAP1_UNSOL_EPCS_PRIO_ACCESS 0x40
+
+/* EHT PHY capabilities as defined in P802.11be_D2.0 section 9.4.2.313.3 */
+#define IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ 0x02
+#define IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ 0x04
+#define IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI 0x08
+#define IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO 0x10
+#define IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER 0x20
+#define IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE 0x40
+
+/* EHT beamformee number of spatial streams <= 80MHz is split */
+#define IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK 0x80
+#define IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK 0x03
+
+#define IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK 0x1c
+#define IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK 0xe0
+
+#define IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK 0x07
+#define IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK 0x38
+
+/* EHT number of sounding dimensions for 320MHz is split */
+#define IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK 0xc0
+#define IEEE80211_EHT_PHY_CAP3_SOUNDING_DIM_320MHZ_MASK 0x01
+#define IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK 0x02
+#define IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK 0x04
+#define IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK 0x08
+#define IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK 0x10
+#define IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK 0x20
+#define IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK 0x40
+#define IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK 0x80
+
+#define IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO 0x01
+#define IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP 0x02
+#define IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP 0x04
+#define IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI 0x08
+#define IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK 0xf0
+
+#define IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK 0x01
+#define IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP 0x02
+#define IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP 0x04
+#define IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT 0x08
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK 0x30
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_0US 0
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US 1
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US 2
+#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_20US 3
+
+/* Maximum number of supported EHT LTF is split */
+#define IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK 0xc0
+#define IEEE80211_EHT_PHY_CAP5_SUPP_EXTRA_EHT_LTF 0x40
+#define IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK 0x07
+
+#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_80MHZ 0x08
+#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_160MHZ 0x30
+#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_320MHZ 0x40
+#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK 0x78
+#define IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP 0x80
+
+#define IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW 0x01
+#define IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ 0x02
+#define IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ 0x04
+#define IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ 0x08
+#define IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ 0x10
+#define IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ 0x20
+#define IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ 0x40
+#define IEEE80211_EHT_PHY_CAP7_TB_SOUNDING_FDBK_RATE_LIMIT 0x80
+
+#define IEEE80211_EHT_PHY_CAP8_RX_1024QAM_WIDER_BW_DL_OFDMA 0x01
+#define IEEE80211_EHT_PHY_CAP8_RX_4096QAM_WIDER_BW_DL_OFDMA 0x02
+
+/*
+ * EHT operation channel width as defined in P802.11be_D2.0 section 9.4.2.311
+ */
+#define IEEE80211_EHT_OPER_CHAN_WIDTH 0x7
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_20MHZ 0
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_40MHZ 1
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_80MHZ 2
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_160MHZ 3
+#define IEEE80211_EHT_OPER_CHAN_WIDTH_320MHZ 4
+
+/* Calculate 802.11be EHT capabilities IE Tx/Rx EHT MCS NSS Support Field size */
+static inline u8
+ieee80211_eht_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap,
+ const struct ieee80211_eht_cap_elem_fixed *eht_cap,
+ bool from_ap)
+{
+ u8 count = 0;
+
+ /* on 2.4 GHz, if it supports 40 MHz, the result is 3 */
+ if (he_cap->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G)
+ return 3;
+
+ /* on 2.4 GHz, these three bits are reserved, so should be 0 */
+ if (he_cap->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G)
+ count += 3;
+
+ if (he_cap->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+ count += 3;
+
+ if (eht_cap->phy_cap_info[0] & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ)
+ count += 3;
+
+ if (count)
+ return count;
+
+ return from_ap ? 3 : 4;
+}
+
+/* 802.11be EHT PPE Thresholds */
+#define IEEE80211_EHT_PPE_THRES_NSS_POS 0
+#define IEEE80211_EHT_PPE_THRES_NSS_MASK 0xf
+#define IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK 0x1f0
+#define IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE 3
+#define IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE 9
+
+/*
+ * Calculate 802.11be EHT capabilities IE EHT field size
+ */
+static inline u8
+ieee80211_eht_ppe_size(u16 ppe_thres_hdr, const u8 *phy_cap_info)
+{
+ u32 n;
+
+ if (!(phy_cap_info[5] &
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT))
+ return 0;
+
+ n = hweight16(ppe_thres_hdr &
+ IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK);
+ n *= 1 + u16_get_bits(ppe_thres_hdr, IEEE80211_EHT_PPE_THRES_NSS_MASK);
+
+ /*
+ * Each pair is 6 bits, and we need to add the 9 "header" bits to the
+ * total size.
+ */
+ n = n * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2 +
+ IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE;
+ return DIV_ROUND_UP(n, 8);
+}
+
+static inline bool
+ieee80211_eht_capa_size_ok(const u8 *he_capa, const u8 *data, u8 len,
+ bool from_ap)
+{
+ const struct ieee80211_eht_cap_elem_fixed *elem = (const void *)data;
+ u8 needed = sizeof(struct ieee80211_eht_cap_elem_fixed);
+
+ if (len < needed || !he_capa)
+ return false;
+
+ needed += ieee80211_eht_mcs_nss_size((const void *)he_capa,
+ (const void *)data,
+ from_ap);
+ if (len < needed)
+ return false;
+
+ if (elem->phy_cap_info[5] &
+ IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) {
+ u16 ppe_thres_hdr;
+
+ if (len < needed + sizeof(ppe_thres_hdr))
+ return false;
+
+ ppe_thres_hdr = get_unaligned_le16(data + needed);
+ needed += ieee80211_eht_ppe_size(ppe_thres_hdr,
+ elem->phy_cap_info);
+ }
+
+ return len >= needed;
+}
+
+static inline bool
+ieee80211_eht_oper_size_ok(const u8 *data, u8 len)
+{
+ const struct ieee80211_eht_operation *elem = (const void *)data;
+ u8 needed = sizeof(*elem);
+
+ if (len < needed)
+ return false;
+
+ if (elem->params & IEEE80211_EHT_OPER_INFO_PRESENT) {
+ needed += 3;
+
+ if (elem->params &
+ IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT)
+ needed += 2;
+ }
+
+ return len >= needed;
+}
+
+/* must validate ieee80211_eht_oper_size_ok() first */
+static inline u16
+ieee80211_eht_oper_dis_subchan_bitmap(const struct ieee80211_eht_operation *eht_oper)
+{
+ const struct ieee80211_eht_operation_info *info =
+ (const void *)eht_oper->optional;
+
+ if (!(eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT))
+ return 0;
+
+ if (!(eht_oper->params & IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT))
+ return 0;
+
+ return get_unaligned_le16(info->optional);
+}
+
+#define IEEE80211_BW_IND_DIS_SUBCH_PRESENT BIT(1)
+
+struct ieee80211_bandwidth_indication {
+ u8 params;
+ struct ieee80211_eht_operation_info info;
+} __packed;
+
+static inline bool
+ieee80211_bandwidth_indication_size_ok(const u8 *data, u8 len)
+{
+ const struct ieee80211_bandwidth_indication *bwi = (const void *)data;
+
+ if (len < sizeof(*bwi))
+ return false;
+
+ if (bwi->params & IEEE80211_BW_IND_DIS_SUBCH_PRESENT &&
+ len < sizeof(*bwi) + 2)
+ return false;
+
+ return true;
+}
+
+/* Protected EHT action codes */
+enum ieee80211_protected_eht_actioncode {
+ WLAN_PROTECTED_EHT_ACTION_TTLM_REQ = 0,
+ WLAN_PROTECTED_EHT_ACTION_TTLM_RES = 1,
+ WLAN_PROTECTED_EHT_ACTION_TTLM_TEARDOWN = 2,
+ WLAN_PROTECTED_EHT_ACTION_EPCS_ENABLE_REQ = 3,
+ WLAN_PROTECTED_EHT_ACTION_EPCS_ENABLE_RESP = 4,
+ WLAN_PROTECTED_EHT_ACTION_EPCS_ENABLE_TEARDOWN = 5,
+ WLAN_PROTECTED_EHT_ACTION_EML_OP_MODE_NOTIF = 6,
+ WLAN_PROTECTED_EHT_ACTION_LINK_RECOMMEND = 7,
+ WLAN_PROTECTED_EHT_ACTION_ML_OP_UPDATE_REQ = 8,
+ WLAN_PROTECTED_EHT_ACTION_ML_OP_UPDATE_RESP = 9,
+ WLAN_PROTECTED_EHT_ACTION_LINK_RECONFIG_NOTIF = 10,
+ WLAN_PROTECTED_EHT_ACTION_LINK_RECONFIG_REQ = 11,
+ WLAN_PROTECTED_EHT_ACTION_LINK_RECONFIG_RESP = 12,
+};
+
+/* multi-link device */
+#define IEEE80211_MLD_MAX_NUM_LINKS 15
+
+#define IEEE80211_ML_CONTROL_TYPE 0x0007
+#define IEEE80211_ML_CONTROL_TYPE_BASIC 0
+#define IEEE80211_ML_CONTROL_TYPE_PREQ 1
+#define IEEE80211_ML_CONTROL_TYPE_RECONF 2
+#define IEEE80211_ML_CONTROL_TYPE_TDLS 3
+#define IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS 4
+#define IEEE80211_ML_CONTROL_PRESENCE_MASK 0xfff0
+
+struct ieee80211_multi_link_elem {
+ __le16 control;
+ u8 variable[];
+} __packed;
+
+#define IEEE80211_MLC_BASIC_PRES_LINK_ID 0x0010
+#define IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT 0x0020
+#define IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY 0x0040
+#define IEEE80211_MLC_BASIC_PRES_EML_CAPA 0x0080
+#define IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP 0x0100
+#define IEEE80211_MLC_BASIC_PRES_MLD_ID 0x0200
+#define IEEE80211_MLC_BASIC_PRES_EXT_MLD_CAPA_OP 0x0400
+
+#define IEEE80211_MED_SYNC_DELAY_DURATION 0x00ff
+#define IEEE80211_MED_SYNC_DELAY_SYNC_OFDM_ED_THRESH 0x0f00
+#define IEEE80211_MED_SYNC_DELAY_SYNC_MAX_NUM_TXOPS 0xf000
+
+/*
+ * Described in P802.11be_D3.0
+ * dot11MSDTimerDuration should default to 5484 (i.e. 171.375)
+ * dot11MSDOFDMEDthreshold defaults to -72 (i.e. 0)
+ * dot11MSDTXOPMAX defaults to 1
+ */
+#define IEEE80211_MED_SYNC_DELAY_DEFAULT 0x10ac
+
+#define IEEE80211_EML_CAP_EMLSR_SUPP 0x0001
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY 0x000e
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_0US 0
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_32US 1
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_64US 2
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_128US 3
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_256US 4
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY 0x0070
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_0US 0
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_16US 1
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_32US 2
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_64US 3
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_128US 4
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_256US 5
+#define IEEE80211_EML_CAP_EMLMR_SUPPORT 0x0080
+#define IEEE80211_EML_CAP_EMLMR_DELAY 0x0700
+#define IEEE80211_EML_CAP_EMLMR_DELAY_0US 0
+#define IEEE80211_EML_CAP_EMLMR_DELAY_32US 1
+#define IEEE80211_EML_CAP_EMLMR_DELAY_64US 2
+#define IEEE80211_EML_CAP_EMLMR_DELAY_128US 3
+#define IEEE80211_EML_CAP_EMLMR_DELAY_256US 4
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT 0x7800
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_0 0
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_128US 1
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_256US 2
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_512US 3
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_1TU 4
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_2TU 5
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_4TU 6
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_8TU 7
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_16TU 8
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_32TU 9
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_64TU 10
+#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_128TU 11
+
+#define IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS 0x000f
+#define IEEE80211_MLD_CAP_OP_SRS_SUPPORT 0x0010
+#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP 0x0060
+#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_NO_SUPP 0
+#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_SAME 1
+#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_RESERVED 2
+#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_DIFF 3
+#define IEEE80211_MLD_CAP_OP_FREQ_SEP_TYPE_IND 0x0f80
+#define IEEE80211_MLD_CAP_OP_AAR_SUPPORT 0x1000
+#define IEEE80211_MLD_CAP_OP_LINK_RECONF_SUPPORT 0x2000
+#define IEEE80211_MLD_CAP_OP_ALIGNED_TWT_SUPPORT 0x4000
+
+struct ieee80211_mle_basic_common_info {
+ u8 len;
+ u8 mld_mac_addr[ETH_ALEN];
+ u8 variable[];
+} __packed;
+
+#define IEEE80211_MLC_PREQ_PRES_MLD_ID 0x0010
+
+struct ieee80211_mle_preq_common_info {
+ u8 len;
+ u8 variable[];
+} __packed;
+
+#define IEEE80211_MLC_RECONF_PRES_MLD_MAC_ADDR 0x0010
+#define IEEE80211_MLC_RECONF_PRES_EML_CAPA 0x0020
+#define IEEE80211_MLC_RECONF_PRES_MLD_CAPA_OP 0x0040
+#define IEEE80211_MLC_RECONF_PRES_EXT_MLD_CAPA_OP 0x0080
+
+/* no fixed fields in RECONF */
+
+struct ieee80211_mle_tdls_common_info {
+ u8 len;
+ u8 ap_mld_mac_addr[ETH_ALEN];
+} __packed;
+
+#define IEEE80211_MLC_PRIO_ACCESS_PRES_AP_MLD_MAC_ADDR 0x0010
+
+/* no fixed fields in PRIO_ACCESS */
+
+/**
+ * ieee80211_mle_common_size - check multi-link element common size
+ * @data: multi-link element, must already be checked for size using
+ * ieee80211_mle_size_ok()
+ * Return: the size of the multi-link element's "common" subfield
+ */
+static inline u8 ieee80211_mle_common_size(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+
+ switch (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE)) {
+ case IEEE80211_ML_CONTROL_TYPE_BASIC:
+ case IEEE80211_ML_CONTROL_TYPE_PREQ:
+ case IEEE80211_ML_CONTROL_TYPE_TDLS:
+ case IEEE80211_ML_CONTROL_TYPE_RECONF:
+ case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS:
+ /*
+ * The length is the first octet pointed by mle->variable so no
+ * need to add anything
+ */
+ break;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+
+ return sizeof(*mle) + mle->variable[0];
+}
+
+/**
+ * ieee80211_mle_get_link_id - returns the link ID
+ * @data: the basic multi link element
+ * Return: the link ID, or -1 if not present
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ */
+static inline int ieee80211_mle_get_link_id(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /* common points now at the beginning of ieee80211_mle_basic_common_info */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_LINK_ID))
+ return -1;
+
+ return *common;
+}
+
+/**
+ * ieee80211_mle_get_bss_param_ch_cnt - returns the BSS parameter change count
+ * @data: pointer to the basic multi link element
+ * Return: the BSS Parameter Change Count field value, or -1 if not present
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ */
+static inline int
+ieee80211_mle_get_bss_param_ch_cnt(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /* common points now at the beginning of ieee80211_mle_basic_common_info */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT))
+ return -1;
+
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+
+ return *common;
+}
+
+/**
+ * ieee80211_mle_get_eml_med_sync_delay - returns the medium sync delay
+ * @data: pointer to the multi-link element
+ * Return: the medium synchronization delay field value from the multi-link
+ * element, or the default value (%IEEE80211_MED_SYNC_DELAY_DEFAULT)
+ * if not present
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ */
+static inline u16 ieee80211_mle_get_eml_med_sync_delay(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /* common points now at the beginning of ieee80211_mle_basic_common_info */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY))
+ return IEEE80211_MED_SYNC_DELAY_DEFAULT;
+
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+ common += 1;
+
+ return get_unaligned_le16(common);
+}
+
+/**
+ * ieee80211_mle_get_eml_cap - returns the EML capability
+ * @data: pointer to the multi-link element
+ * Return: the EML capability field value from the multi-link element,
+ * or 0 if not present
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ */
+static inline u16 ieee80211_mle_get_eml_cap(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /* common points now at the beginning of ieee80211_mle_basic_common_info */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_EML_CAPA))
+ return 0;
+
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)
+ common += 2;
+
+ return get_unaligned_le16(common);
+}
+
+/**
+ * ieee80211_mle_get_mld_capa_op - returns the MLD capabilities and operations.
+ * @data: pointer to the multi-link element
+ * Return: the MLD capabilities and operations field value from the multi-link
+ * element, or 0 if not present
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ */
+static inline u16 ieee80211_mle_get_mld_capa_op(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /*
+ * common points now at the beginning of
+ * ieee80211_mle_basic_common_info
+ */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP))
+ return 0;
+
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_EML_CAPA)
+ common += 2;
+
+ return get_unaligned_le16(common);
+}
+
+/* Defined in Figure 9-1074t in P802.11be_D7.0 */
+#define IEEE80211_EHT_ML_EXT_MLD_CAPA_OP_PARAM_UPDATE 0x0001
+#define IEEE80211_EHT_ML_EXT_MLD_CAPA_OP_RECO_MAX_LINKS_MASK 0x001e
+#define IEEE80211_EHT_ML_EXT_MLD_CAPA_NSTR_UPDATE 0x0020
+#define IEEE80211_EHT_ML_EXT_MLD_CAPA_EMLSR_ENA_ON_ONE_LINK 0x0040
+#define IEEE80211_EHT_ML_EXT_MLD_CAPA_BTM_MLD_RECO_MULTI_AP 0x0080
+
+/**
+ * ieee80211_mle_get_ext_mld_capa_op - returns the extended MLD capabilities
+ * and operations.
+ * @data: pointer to the multi-link element
+ * Return: the extended MLD capabilities and operations field value from
+ * the multi-link element, or 0 if not present
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ */
+static inline u16 ieee80211_mle_get_ext_mld_capa_op(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /*
+ * common points now at the beginning of
+ * ieee80211_mle_basic_common_info
+ */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_EXT_MLD_CAPA_OP))
+ return 0;
+
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_EML_CAPA)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_MLD_ID)
+ common += 1;
+
+ return get_unaligned_le16(common);
+}
+
+/**
+ * ieee80211_mle_get_mld_id - returns the MLD ID
+ * @data: pointer to the multi-link element
+ * Return: The MLD ID in the given multi-link element, or 0 if not present
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ */
+static inline u8 ieee80211_mle_get_mld_id(const u8 *data)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control = le16_to_cpu(mle->control);
+ const u8 *common = mle->variable;
+
+ /*
+ * common points now at the beginning of
+ * ieee80211_mle_basic_common_info
+ */
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+
+ if (!(control & IEEE80211_MLC_BASIC_PRES_MLD_ID))
+ return 0;
+
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_EML_CAPA)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP)
+ common += 2;
+
+ return *common;
+}
+
+/**
+ * ieee80211_mle_size_ok - validate multi-link element size
+ * @data: pointer to the element data
+ * @len: length of the containing element
+ * Return: whether or not the multi-link element size is OK
+ */
+static inline bool ieee80211_mle_size_ok(const u8 *data, size_t len)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u8 fixed = sizeof(*mle);
+ u8 common = 0;
+ bool check_common_len = false;
+ u16 control;
+
+ if (!data || len < fixed)
+ return false;
+
+ control = le16_to_cpu(mle->control);
+
+ switch (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE)) {
+ case IEEE80211_ML_CONTROL_TYPE_BASIC:
+ common += sizeof(struct ieee80211_mle_basic_common_info);
+ check_common_len = true;
+ if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_EML_CAPA)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP)
+ common += 2;
+ if (control & IEEE80211_MLC_BASIC_PRES_MLD_ID)
+ common += 1;
+ if (control & IEEE80211_MLC_BASIC_PRES_EXT_MLD_CAPA_OP)
+ common += 2;
+ break;
+ case IEEE80211_ML_CONTROL_TYPE_PREQ:
+ common += sizeof(struct ieee80211_mle_preq_common_info);
+ if (control & IEEE80211_MLC_PREQ_PRES_MLD_ID)
+ common += 1;
+ check_common_len = true;
+ break;
+ case IEEE80211_ML_CONTROL_TYPE_RECONF:
+ if (control & IEEE80211_MLC_RECONF_PRES_MLD_MAC_ADDR)
+ common += ETH_ALEN;
+ if (control & IEEE80211_MLC_RECONF_PRES_EML_CAPA)
+ common += 2;
+ if (control & IEEE80211_MLC_RECONF_PRES_MLD_CAPA_OP)
+ common += 2;
+ if (control & IEEE80211_MLC_RECONF_PRES_EXT_MLD_CAPA_OP)
+ common += 2;
+ break;
+ case IEEE80211_ML_CONTROL_TYPE_TDLS:
+ common += sizeof(struct ieee80211_mle_tdls_common_info);
+ check_common_len = true;
+ break;
+ case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS:
+ common = ETH_ALEN + 1;
+ break;
+ default:
+ /* we don't know this type */
+ return true;
+ }
+
+ if (len < fixed + common)
+ return false;
+
+ if (!check_common_len)
+ return true;
+
+ /* if present, common length is the first octet there */
+ return mle->variable[0] >= common;
+}
+
+/**
+ * ieee80211_mle_type_ok - validate multi-link element type and size
+ * @data: pointer to the element data
+ * @type: expected type of the element
+ * @len: length of the containing element
+ * Return: whether or not the multi-link element type matches and size is OK
+ */
+static inline bool ieee80211_mle_type_ok(const u8 *data, u8 type, size_t len)
+{
+ const struct ieee80211_multi_link_elem *mle = (const void *)data;
+ u16 control;
+
+ if (!ieee80211_mle_size_ok(data, len))
+ return false;
+
+ control = le16_to_cpu(mle->control);
+
+ if (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE) == type)
+ return true;
+
+ return false;
+}
+
+enum ieee80211_mle_subelems {
+ IEEE80211_MLE_SUBELEM_PER_STA_PROFILE = 0,
+ IEEE80211_MLE_SUBELEM_FRAGMENT = 254,
+};
+
+#define IEEE80211_MLE_STA_CONTROL_LINK_ID 0x000f
+#define IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE 0x0010
+#define IEEE80211_MLE_STA_CONTROL_STA_MAC_ADDR_PRESENT 0x0020
+#define IEEE80211_MLE_STA_CONTROL_BEACON_INT_PRESENT 0x0040
+#define IEEE80211_MLE_STA_CONTROL_TSF_OFFS_PRESENT 0x0080
+#define IEEE80211_MLE_STA_CONTROL_DTIM_INFO_PRESENT 0x0100
+#define IEEE80211_MLE_STA_CONTROL_NSTR_LINK_PAIR_PRESENT 0x0200
+#define IEEE80211_MLE_STA_CONTROL_NSTR_BITMAP_SIZE 0x0400
+#define IEEE80211_MLE_STA_CONTROL_BSS_PARAM_CHANGE_CNT_PRESENT 0x0800
+
+struct ieee80211_mle_per_sta_profile {
+ __le16 control;
+ u8 sta_info_len;
+ u8 variable[];
+} __packed;
+
+/**
+ * ieee80211_mle_basic_sta_prof_size_ok - validate basic multi-link element sta
+ * profile size
+ * @data: pointer to the sub element data
+ * @len: length of the containing sub element
+ * Return: %true if the STA profile is large enough, %false otherwise
+ */
+static inline bool ieee80211_mle_basic_sta_prof_size_ok(const u8 *data,
+ size_t len)
+{
+ const struct ieee80211_mle_per_sta_profile *prof = (const void *)data;
+ u16 control;
+ u8 fixed = sizeof(*prof);
+ u8 info_len = 1;
+
+ if (len < fixed)
+ return false;
+
+ control = le16_to_cpu(prof->control);
+
+ if (control & IEEE80211_MLE_STA_CONTROL_STA_MAC_ADDR_PRESENT)
+ info_len += 6;
+ if (control & IEEE80211_MLE_STA_CONTROL_BEACON_INT_PRESENT)
+ info_len += 2;
+ if (control & IEEE80211_MLE_STA_CONTROL_TSF_OFFS_PRESENT)
+ info_len += 8;
+ if (control & IEEE80211_MLE_STA_CONTROL_DTIM_INFO_PRESENT)
+ info_len += 2;
+ if (control & IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE &&
+ control & IEEE80211_MLE_STA_CONTROL_NSTR_LINK_PAIR_PRESENT) {
+ if (control & IEEE80211_MLE_STA_CONTROL_NSTR_BITMAP_SIZE)
+ info_len += 2;
+ else
+ info_len += 1;
+ }
+ if (control & IEEE80211_MLE_STA_CONTROL_BSS_PARAM_CHANGE_CNT_PRESENT)
+ info_len += 1;
+
+ return prof->sta_info_len >= info_len &&
+ fixed + prof->sta_info_len - 1 <= len;
+}
+
+/**
+ * ieee80211_mle_basic_sta_prof_bss_param_ch_cnt - get per-STA profile BSS
+ * parameter change count
+ * @prof: the per-STA profile, having been checked with
+ * ieee80211_mle_basic_sta_prof_size_ok() for the correct length
+ *
+ * Return: The BSS parameter change count value if present, 0 otherwise.
+ */
+static inline u8
+ieee80211_mle_basic_sta_prof_bss_param_ch_cnt(const struct ieee80211_mle_per_sta_profile *prof)
+{
+ u16 control = le16_to_cpu(prof->control);
+ const u8 *pos = prof->variable;
+
+ if (!(control & IEEE80211_MLE_STA_CONTROL_BSS_PARAM_CHANGE_CNT_PRESENT))
+ return 0;
+
+ if (control & IEEE80211_MLE_STA_CONTROL_STA_MAC_ADDR_PRESENT)
+ pos += 6;
+ if (control & IEEE80211_MLE_STA_CONTROL_BEACON_INT_PRESENT)
+ pos += 2;
+ if (control & IEEE80211_MLE_STA_CONTROL_TSF_OFFS_PRESENT)
+ pos += 8;
+ if (control & IEEE80211_MLE_STA_CONTROL_DTIM_INFO_PRESENT)
+ pos += 2;
+ if (control & IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE &&
+ control & IEEE80211_MLE_STA_CONTROL_NSTR_LINK_PAIR_PRESENT) {
+ if (control & IEEE80211_MLE_STA_CONTROL_NSTR_BITMAP_SIZE)
+ pos += 2;
+ else
+ pos += 1;
+ }
+
+ return *pos;
+}
+
+#define IEEE80211_MLE_STA_RECONF_CONTROL_LINK_ID 0x000f
+#define IEEE80211_MLE_STA_RECONF_CONTROL_COMPLETE_PROFILE 0x0010
+#define IEEE80211_MLE_STA_RECONF_CONTROL_STA_MAC_ADDR_PRESENT 0x0020
+#define IEEE80211_MLE_STA_RECONF_CONTROL_AP_REM_TIMER_PRESENT 0x0040
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE 0x0780
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_AP_REM 0
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_OP_PARAM_UPDATE 1
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_ADD_LINK 2
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_DEL_LINK 3
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_NSTR_STATUS 4
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_PARAMS_PRESENT 0x0800
+
+/**
+ * ieee80211_mle_reconf_sta_prof_size_ok - validate reconfiguration multi-link
+ * element sta profile size.
+ * @data: pointer to the sub element data
+ * @len: length of the containing sub element
+ * Return: %true if the STA profile is large enough, %false otherwise
+ */
+static inline bool ieee80211_mle_reconf_sta_prof_size_ok(const u8 *data,
+ size_t len)
+{
+ const struct ieee80211_mle_per_sta_profile *prof = (const void *)data;
+ u16 control;
+ u8 fixed = sizeof(*prof);
+ u8 info_len = 1;
+
+ if (len < fixed)
+ return false;
+
+ control = le16_to_cpu(prof->control);
+
+ if (control & IEEE80211_MLE_STA_RECONF_CONTROL_STA_MAC_ADDR_PRESENT)
+ info_len += ETH_ALEN;
+ if (control & IEEE80211_MLE_STA_RECONF_CONTROL_AP_REM_TIMER_PRESENT)
+ info_len += 2;
+ if (control & IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_PARAMS_PRESENT)
+ info_len += 2;
+
+ return prof->sta_info_len >= info_len &&
+ fixed + prof->sta_info_len - 1 <= len;
+}
+
+#define IEEE80211_MLE_STA_EPCS_CONTROL_LINK_ID 0x000f
+#define IEEE80211_EPCS_ENA_RESP_BODY_LEN 3
+
+static inline bool ieee80211_tid_to_link_map_size_ok(const u8 *data, size_t len)
+{
+ const struct ieee80211_ttlm_elem *t2l = (const void *)data;
+ u8 control, fixed = sizeof(*t2l), elem_len = 0;
+
+ if (len < fixed)
+ return false;
+
+ control = t2l->control;
+
+ if (control & IEEE80211_TTLM_CONTROL_SWITCH_TIME_PRESENT)
+ elem_len += 2;
+ if (control & IEEE80211_TTLM_CONTROL_EXPECTED_DUR_PRESENT)
+ elem_len += 3;
+
+ if (!(control & IEEE80211_TTLM_CONTROL_DEF_LINK_MAP)) {
+ u8 bm_size;
+
+ elem_len += 1;
+ if (len < fixed + elem_len)
+ return false;
+
+ if (control & IEEE80211_TTLM_CONTROL_LINK_MAP_SIZE)
+ bm_size = 1;
+ else
+ bm_size = 2;
+
+ elem_len += hweight8(t2l->optional[0]) * bm_size;
+ }
+
+ return len >= fixed + elem_len;
+}
+
+/**
+ * ieee80211_emlsr_pad_delay_in_us - Fetch the EMLSR Padding delay
+ * in microseconds
+ * @eml_cap: EML capabilities field value from common info field of
+ * the Multi-link element
+ * Return: the EMLSR Padding delay (in microseconds) encoded in the
+ * EML Capabilities field
+ */
+
+static inline u32 ieee80211_emlsr_pad_delay_in_us(u16 eml_cap)
+{
+ /* IEEE Std 802.11be-2024 Table 9-417i—Encoding of the EMLSR
+ * Padding Delay subfield.
+ */
+ u32 pad_delay = u16_get_bits(eml_cap,
+ IEEE80211_EML_CAP_EMLSR_PADDING_DELAY);
+
+ if (!pad_delay ||
+ pad_delay > IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_256US)
+ return 0;
+
+ return 32 * (1 << (pad_delay - 1));
+}
+
+/**
+ * ieee80211_emlsr_trans_delay_in_us - Fetch the EMLSR Transition
+ * delay in microseconds
+ * @eml_cap: EML capabilities field value from common info field of
+ * the Multi-link element
+ * Return: the EMLSR Transition delay (in microseconds) encoded in the
+ * EML Capabilities field
+ */
+
+static inline u32 ieee80211_emlsr_trans_delay_in_us(u16 eml_cap)
+{
+ /* IEEE Std 802.11be-2024 Table 9-417j—Encoding of the EMLSR
+ * Transition Delay subfield.
+ */
+ u32 trans_delay =
+ u16_get_bits(eml_cap,
+ IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY);
+
+ /* invalid values also just use 0 */
+ if (!trans_delay ||
+ trans_delay > IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_256US)
+ return 0;
+
+ return 16 * (1 << (trans_delay - 1));
+}
+
+/**
+ * ieee80211_eml_trans_timeout_in_us - Fetch the EMLSR Transition
+ * timeout value in microseconds
+ * @eml_cap: EML capabilities field value from common info field of
+ * the Multi-link element
+ * Return: the EMLSR Transition timeout (in microseconds) encoded in
+ * the EML Capabilities field
+ */
+
+static inline u32 ieee80211_eml_trans_timeout_in_us(u16 eml_cap)
+{
+ /* IEEE Std 802.11be-2024 Table 9-417m—Encoding of the
+ * Transition Timeout subfield.
+ */
+ u8 timeout = u16_get_bits(eml_cap,
+ IEEE80211_EML_CAP_TRANSITION_TIMEOUT);
+
+ /* invalid values also just use 0 */
+ if (!timeout || timeout > IEEE80211_EML_CAP_TRANSITION_TIMEOUT_128TU)
+ return 0;
+
+ return 128 * (1 << (timeout - 1));
+}
+
+#define for_each_mle_subelement(_elem, _data, _len) \
+ if (ieee80211_mle_size_ok(_data, _len)) \
+ for_each_element(_elem, \
+ _data + ieee80211_mle_common_size(_data),\
+ _len - ieee80211_mle_common_size(_data))
+
+#endif /* LINUX_IEEE80211_H */
diff --git a/include/linux/ieee80211-he.h b/include/linux/ieee80211-he.h
new file mode 100644
index 000000000000..a08c446fbb04
--- /dev/null
+++ b/include/linux/ieee80211-he.h
@@ -0,0 +1,825 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IEEE 802.11 HE definitions
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ * Copyright (c) 2005, Devicescape Software, Inc.
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (c) 2018 - 2025 Intel Corporation
+ */
+
+#ifndef LINUX_IEEE80211_HE_H
+#define LINUX_IEEE80211_HE_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+#define IEEE80211_TWT_CONTROL_NDP BIT(0)
+#define IEEE80211_TWT_CONTROL_RESP_MODE BIT(1)
+#define IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST BIT(3)
+#define IEEE80211_TWT_CONTROL_RX_DISABLED BIT(4)
+#define IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT BIT(5)
+
+#define IEEE80211_TWT_REQTYPE_REQUEST BIT(0)
+#define IEEE80211_TWT_REQTYPE_SETUP_CMD GENMASK(3, 1)
+#define IEEE80211_TWT_REQTYPE_TRIGGER BIT(4)
+#define IEEE80211_TWT_REQTYPE_IMPLICIT BIT(5)
+#define IEEE80211_TWT_REQTYPE_FLOWTYPE BIT(6)
+#define IEEE80211_TWT_REQTYPE_FLOWID GENMASK(9, 7)
+#define IEEE80211_TWT_REQTYPE_WAKE_INT_EXP GENMASK(14, 10)
+#define IEEE80211_TWT_REQTYPE_PROTECTION BIT(15)
+
+enum ieee80211_twt_setup_cmd {
+ TWT_SETUP_CMD_REQUEST,
+ TWT_SETUP_CMD_SUGGEST,
+ TWT_SETUP_CMD_DEMAND,
+ TWT_SETUP_CMD_GROUPING,
+ TWT_SETUP_CMD_ACCEPT,
+ TWT_SETUP_CMD_ALTERNATE,
+ TWT_SETUP_CMD_DICTATE,
+ TWT_SETUP_CMD_REJECT,
+};
+
+struct ieee80211_twt_params {
+ __le16 req_type;
+ __le64 twt;
+ u8 min_twt_dur;
+ __le16 mantissa;
+ u8 channel;
+} __packed;
+
+struct ieee80211_twt_setup {
+ u8 dialog_token;
+ u8 element_id;
+ u8 length;
+ u8 control;
+ u8 params[];
+} __packed;
+
+/**
+ * struct ieee80211_he_cap_elem - HE capabilities element
+ * @mac_cap_info: HE MAC Capabilities Information
+ * @phy_cap_info: HE PHY Capabilities Information
+ *
+ * This structure represents the fixed fields of the payload of the
+ * "HE capabilities element" as described in IEEE Std 802.11ax-2021
+ * sections 9.4.2.248.2 and 9.4.2.248.3.
+ */
+struct ieee80211_he_cap_elem {
+ u8 mac_cap_info[6];
+ u8 phy_cap_info[11];
+} __packed;
+
+#define IEEE80211_TX_RX_MCS_NSS_DESC_MAX_LEN 5
+
+/**
+ * enum ieee80211_he_mcs_support - HE MCS support definitions
+ * @IEEE80211_HE_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the
+ * number of streams
+ * @IEEE80211_HE_MCS_SUPPORT_0_9: MCSes 0-9 are supported
+ * @IEEE80211_HE_MCS_SUPPORT_0_11: MCSes 0-11 are supported
+ * @IEEE80211_HE_MCS_NOT_SUPPORTED: This number of streams isn't supported
+ *
+ * These definitions are used in each 2-bit subfield of the rx_mcs_*
+ * and tx_mcs_* fields of &struct ieee80211_he_mcs_nss_supp, which are
+ * both split into 8 subfields by number of streams. These values indicate
+ * which MCSes are supported for the number of streams the value appears
+ * for.
+ */
+enum ieee80211_he_mcs_support {
+ IEEE80211_HE_MCS_SUPPORT_0_7 = 0,
+ IEEE80211_HE_MCS_SUPPORT_0_9 = 1,
+ IEEE80211_HE_MCS_SUPPORT_0_11 = 2,
+ IEEE80211_HE_MCS_NOT_SUPPORTED = 3,
+};
+
+/**
+ * struct ieee80211_he_mcs_nss_supp - HE Tx/Rx HE MCS NSS Support Field
+ *
+ * This structure holds the data required for the Tx/Rx HE MCS NSS Support Field
+ * described in P802.11ax_D2.0 section 9.4.2.237.4
+ *
+ * @rx_mcs_80: Rx MCS map 2 bits for each stream, total 8 streams, for channel
+ * widths less than 80MHz.
+ * @tx_mcs_80: Tx MCS map 2 bits for each stream, total 8 streams, for channel
+ * widths less than 80MHz.
+ * @rx_mcs_160: Rx MCS map 2 bits for each stream, total 8 streams, for channel
+ * width 160MHz.
+ * @tx_mcs_160: Tx MCS map 2 bits for each stream, total 8 streams, for channel
+ * width 160MHz.
+ * @rx_mcs_80p80: Rx MCS map 2 bits for each stream, total 8 streams, for
+ * channel width 80p80MHz.
+ * @tx_mcs_80p80: Tx MCS map 2 bits for each stream, total 8 streams, for
+ * channel width 80p80MHz.
+ */
+struct ieee80211_he_mcs_nss_supp {
+ __le16 rx_mcs_80;
+ __le16 tx_mcs_80;
+ __le16 rx_mcs_160;
+ __le16 tx_mcs_160;
+ __le16 rx_mcs_80p80;
+ __le16 tx_mcs_80p80;
+} __packed;
+
+/**
+ * struct ieee80211_he_operation - HE Operation element
+ * @he_oper_params: HE Operation Parameters + BSS Color Information
+ * @he_mcs_nss_set: Basic HE-MCS And NSS Set
+ * @optional: Optional fields VHT Operation Information, Max Co-Hosted
+ * BSSID Indicator, and 6 GHz Operation Information
+ *
+ * This structure represents the payload of the "HE Operation
+ * element" as described in IEEE Std 802.11ax-2021 section 9.4.2.249.
+ */
+struct ieee80211_he_operation {
+ __le32 he_oper_params;
+ __le16 he_mcs_nss_set;
+ u8 optional[];
+} __packed;
+
+/**
+ * struct ieee80211_he_spr - Spatial Reuse Parameter Set element
+ * @he_sr_control: SR Control
+ * @optional: Optional fields Non-SRG OBSS PD Max Offset, SRG OBSS PD
+ * Min Offset, SRG OBSS PD Max Offset, SRG BSS Color
+ * Bitmap, and SRG Partial BSSID Bitmap
+ *
+ * This structure represents the payload of the "Spatial Reuse
+ * Parameter Set element" as described in IEEE Std 802.11ax-2021
+ * section 9.4.2.252.
+ */
+struct ieee80211_he_spr {
+ u8 he_sr_control;
+ u8 optional[];
+} __packed;
+
+/**
+ * struct ieee80211_he_mu_edca_param_ac_rec - MU AC Parameter Record field
+ * @aifsn: ACI/AIFSN
+ * @ecw_min_max: ECWmin/ECWmax
+ * @mu_edca_timer: MU EDCA Timer
+ *
+ * This structure represents the "MU AC Parameter Record" as described
+ * in IEEE Std 802.11ax-2021 section 9.4.2.251, Figure 9-788p.
+ */
+struct ieee80211_he_mu_edca_param_ac_rec {
+ u8 aifsn;
+ u8 ecw_min_max;
+ u8 mu_edca_timer;
+} __packed;
+
+/**
+ * struct ieee80211_mu_edca_param_set - MU EDCA Parameter Set element
+ * @mu_qos_info: QoS Info
+ * @ac_be: MU AC_BE Parameter Record
+ * @ac_bk: MU AC_BK Parameter Record
+ * @ac_vi: MU AC_VI Parameter Record
+ * @ac_vo: MU AC_VO Parameter Record
+ *
+ * This structure represents the payload of the "MU EDCA Parameter Set
+ * element" as described in IEEE Std 802.11ax-2021 section 9.4.2.251.
+ */
+struct ieee80211_mu_edca_param_set {
+ u8 mu_qos_info;
+ struct ieee80211_he_mu_edca_param_ac_rec ac_be;
+ struct ieee80211_he_mu_edca_param_ac_rec ac_bk;
+ struct ieee80211_he_mu_edca_param_ac_rec ac_vi;
+ struct ieee80211_he_mu_edca_param_ac_rec ac_vo;
+} __packed;
+
+/* 802.11ax HE MAC capabilities */
+#define IEEE80211_HE_MAC_CAP0_HTC_HE 0x01
+#define IEEE80211_HE_MAC_CAP0_TWT_REQ 0x02
+#define IEEE80211_HE_MAC_CAP0_TWT_RES 0x04
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_NOT_SUPP 0x00
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_1 0x08
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_2 0x10
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_3 0x18
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_MASK 0x18
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_1 0x00
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_2 0x20
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_4 0x40
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_8 0x60
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_16 0x80
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_32 0xa0
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_64 0xc0
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_UNLIMITED 0xe0
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_MASK 0xe0
+
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_UNLIMITED 0x00
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_128 0x01
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_256 0x02
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_512 0x03
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_MASK 0x03
+#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_0US 0x00
+#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_8US 0x04
+#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US 0x08
+#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK 0x0c
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_1 0x00
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_2 0x10
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_3 0x20
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_4 0x30
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_5 0x40
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_6 0x50
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_7 0x60
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8 0x70
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_MASK 0x70
+
+/* Link adaptation is split between byte HE_MAC_CAP1 and
+ * HE_MAC_CAP2. It should be set only if IEEE80211_HE_MAC_CAP0_HTC_HE
+ * in which case the following values apply:
+ * 0 = No feedback.
+ * 1 = reserved.
+ * 2 = Unsolicited feedback.
+ * 3 = both
+ */
+#define IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION 0x80
+
+#define IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION 0x01
+#define IEEE80211_HE_MAC_CAP2_ALL_ACK 0x02
+#define IEEE80211_HE_MAC_CAP2_TRS 0x04
+#define IEEE80211_HE_MAC_CAP2_BSR 0x08
+#define IEEE80211_HE_MAC_CAP2_BCAST_TWT 0x10
+#define IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP 0x20
+#define IEEE80211_HE_MAC_CAP2_MU_CASCADING 0x40
+#define IEEE80211_HE_MAC_CAP2_ACK_EN 0x80
+
+#define IEEE80211_HE_MAC_CAP3_OMI_CONTROL 0x02
+#define IEEE80211_HE_MAC_CAP3_OFDMA_RA 0x04
+
+/* The maximum length of an A-MDPU is defined by the combination of the Maximum
+ * A-MDPU Length Exponent field in the HT capabilities, VHT capabilities and the
+ * same field in the HE capabilities.
+ */
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_0 0x00
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_1 0x08
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2 0x10
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3 0x18
+#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK 0x18
+#define IEEE80211_HE_MAC_CAP3_AMSDU_FRAG 0x20
+#define IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED 0x40
+#define IEEE80211_HE_MAC_CAP3_RX_CTRL_FRAME_TO_MULTIBSS 0x80
+
+#define IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG 0x01
+#define IEEE80211_HE_MAC_CAP4_QTP 0x02
+#define IEEE80211_HE_MAC_CAP4_BQR 0x04
+#define IEEE80211_HE_MAC_CAP4_PSR_RESP 0x08
+#define IEEE80211_HE_MAC_CAP4_NDP_FB_REP 0x10
+#define IEEE80211_HE_MAC_CAP4_OPS 0x20
+#define IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU 0x40
+/* Multi TID agg TX is split between byte #4 and #5
+ * The value is a combination of B39,B40,B41
+ */
+#define IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39 0x80
+
+#define IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B40 0x01
+#define IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B41 0x02
+#define IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECTIVE_TRANSMISSION 0x04
+#define IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU 0x08
+#define IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX 0x10
+#define IEEE80211_HE_MAC_CAP5_HE_DYNAMIC_SM_PS 0x20
+#define IEEE80211_HE_MAC_CAP5_PUNCTURED_SOUNDING 0x40
+#define IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX 0x80
+
+#define IEEE80211_HE_VHT_MAX_AMPDU_FACTOR 20
+#define IEEE80211_HE_HT_MAX_AMPDU_FACTOR 16
+#define IEEE80211_HE_6GHZ_MAX_AMPDU_FACTOR 13
+
+/* 802.11ax HE PHY capabilities */
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G 0x04
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G 0x08
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G 0x10
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL 0x1e
+
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G 0x20
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G 0x40
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK 0xfe
+
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_20MHZ 0x01
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_40MHZ 0x02
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_20MHZ 0x04
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_40MHZ 0x08
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK 0x0f
+#define IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A 0x10
+#define IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD 0x20
+#define IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US 0x40
+/* Midamble RX/TX Max NSTS is split between byte #2 and byte #3 */
+#define IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS 0x80
+
+#define IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS 0x01
+#define IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US 0x02
+#define IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ 0x04
+#define IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ 0x08
+#define IEEE80211_HE_PHY_CAP2_DOPPLER_TX 0x10
+#define IEEE80211_HE_PHY_CAP2_DOPPLER_RX 0x20
+
+/* Note that the meaning of UL MU below is different between an AP and a non-AP
+ * sta, where in the AP case it indicates support for Rx and in the non-AP sta
+ * case it indicates support for Tx.
+ */
+#define IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO 0x40
+#define IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO 0x80
+
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM 0x00
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK 0x01
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK 0x02
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM 0x03
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK 0x03
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 0x00
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2 0x04
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM 0x00
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK 0x08
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK 0x10
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM 0x18
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK 0x18
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1 0x00
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_2 0x20
+#define IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU 0x40
+#define IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER 0x80
+
+#define IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE 0x01
+#define IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER 0x02
+
+/* Minimal allowed value of Max STS under 80MHz is 3 */
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 0x0c
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_5 0x10
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_6 0x14
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_7 0x18
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8 0x1c
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK 0x1c
+
+/* Minimal allowed value of Max STS above 80MHz is 3 */
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4 0x60
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_5 0x80
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_6 0xa0
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_7 0xc0
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 0xe0
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK 0xe0
+
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_1 0x00
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 0x01
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_3 0x02
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_4 0x03
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_5 0x04
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_6 0x05
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_7 0x06
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_8 0x07
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK 0x07
+
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_1 0x00
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2 0x08
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_3 0x10
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_4 0x18
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_5 0x20
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_6 0x28
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_7 0x30
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_8 0x38
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK 0x38
+
+#define IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK 0x40
+#define IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK 0x80
+
+#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU 0x01
+#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU 0x02
+#define IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB 0x04
+#define IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB 0x08
+#define IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB 0x10
+#define IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE 0x20
+#define IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO 0x40
+#define IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT 0x80
+
+#define IEEE80211_HE_PHY_CAP7_PSR_BASED_SR 0x01
+#define IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP 0x02
+#define IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI 0x04
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_1 0x08
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_2 0x10
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_3 0x18
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_4 0x20
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_5 0x28
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_6 0x30
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_7 0x38
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_MASK 0x38
+#define IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ 0x40
+#define IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ 0x80
+
+#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI 0x01
+#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G 0x02
+#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU 0x04
+#define IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU 0x08
+#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI 0x10
+#define IEEE80211_HE_PHY_CAP8_MIDAMBLE_RX_TX_2X_AND_1XLTF 0x20
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242 0x00
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484 0x40
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996 0x80
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996 0xc0
+#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_MASK 0xc0
+
+#define IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM 0x01
+#define IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK 0x02
+#define IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU 0x04
+#define IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU 0x08
+#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB 0x10
+#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB 0x20
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US 0x0
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US 0x1
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US 0x2
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED 0x3
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_POS 6
+#define IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK 0xc0
+
+#define IEEE80211_HE_PHY_CAP10_HE_MU_M1RU_MAX_LTF 0x01
+
+/* 802.11ax HE TX/RX MCS NSS Support */
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_HIGHEST_MCS_POS (3)
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_POS (6)
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_RX_BITMAP_POS (11)
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_MASK 0x07c0
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_RX_BITMAP_MASK 0xf800
+
+/* TX/RX HE MCS Support field Highest MCS subfield encoding */
+enum ieee80211_he_highest_mcs_supported_subfield_enc {
+ HIGHEST_MCS_SUPPORTED_MCS7 = 0,
+ HIGHEST_MCS_SUPPORTED_MCS8,
+ HIGHEST_MCS_SUPPORTED_MCS9,
+ HIGHEST_MCS_SUPPORTED_MCS10,
+ HIGHEST_MCS_SUPPORTED_MCS11,
+};
+
+/* Calculate 802.11ax HE capabilities IE Tx/Rx HE MCS NSS Support Field size */
+static inline u8
+ieee80211_he_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap)
+{
+ u8 count = 4;
+
+ if (he_cap->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+ count += 4;
+
+ if (he_cap->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
+ count += 4;
+
+ return count;
+}
+
+/* 802.11ax HE PPE Thresholds */
+#define IEEE80211_PPE_THRES_NSS_SUPPORT_2NSS (1)
+#define IEEE80211_PPE_THRES_NSS_POS (0)
+#define IEEE80211_PPE_THRES_NSS_MASK (7)
+#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_2x966_AND_966_RU \
+ (BIT(5) | BIT(6))
+#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK 0x78
+#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS (3)
+#define IEEE80211_PPE_THRES_INFO_PPET_SIZE (3)
+#define IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE (7)
+
+/*
+ * Calculate 802.11ax HE capabilities IE PPE field size
+ * Input: Header byte of ppe_thres (first byte), and HE capa IE's PHY cap u8*
+ */
+static inline u8
+ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info)
+{
+ u8 n;
+
+ if ((phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) == 0)
+ return 0;
+
+ n = hweight8(ppe_thres_hdr &
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK);
+ n *= (1 + ((ppe_thres_hdr & IEEE80211_PPE_THRES_NSS_MASK) >>
+ IEEE80211_PPE_THRES_NSS_POS));
+
+ /*
+ * Each pair is 6 bits, and we need to add the 7 "header" bits to the
+ * total size.
+ */
+ n = (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) + 7;
+ n = DIV_ROUND_UP(n, 8);
+
+ return n;
+}
+
+static inline bool ieee80211_he_capa_size_ok(const u8 *data, u8 len)
+{
+ const struct ieee80211_he_cap_elem *he_cap_ie_elem = (const void *)data;
+ u8 needed = sizeof(*he_cap_ie_elem);
+
+ if (len < needed)
+ return false;
+
+ needed += ieee80211_he_mcs_nss_size(he_cap_ie_elem);
+ if (len < needed)
+ return false;
+
+ if (he_cap_ie_elem->phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ if (len < needed + 1)
+ return false;
+ needed += ieee80211_he_ppe_size(data[needed],
+ he_cap_ie_elem->phy_cap_info);
+ }
+
+ return len >= needed;
+}
+
+/* HE Operation defines */
+#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000007
+#define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000008
+#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x00003ff0
+#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 4
+#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x00004000
+#define IEEE80211_HE_OPERATION_CO_HOSTED_BSS 0x00008000
+#define IEEE80211_HE_OPERATION_ER_SU_DISABLE 0x00010000
+#define IEEE80211_HE_OPERATION_6GHZ_OP_INFO 0x00020000
+#define IEEE80211_HE_OPERATION_BSS_COLOR_MASK 0x3f000000
+#define IEEE80211_HE_OPERATION_BSS_COLOR_OFFSET 24
+#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x40000000
+#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x80000000
+
+#define IEEE80211_6GHZ_CTRL_REG_LPI_AP 0
+#define IEEE80211_6GHZ_CTRL_REG_SP_AP 1
+#define IEEE80211_6GHZ_CTRL_REG_VLP_AP 2
+#define IEEE80211_6GHZ_CTRL_REG_INDOOR_LPI_AP 3
+#define IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP_OLD 4
+#define IEEE80211_6GHZ_CTRL_REG_AP_ROLE_NOT_RELEVANT 7
+#define IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP 8
+
+/**
+ * struct ieee80211_he_6ghz_oper - HE 6 GHz operation Information field
+ * @primary: primary channel
+ * @control: control flags
+ * @ccfs0: channel center frequency segment 0
+ * @ccfs1: channel center frequency segment 1
+ * @minrate: minimum rate (in 1 Mbps units)
+ */
+struct ieee80211_he_6ghz_oper {
+ u8 primary;
+#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH 0x3
+#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_20MHZ 0
+#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_40MHZ 1
+#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_80MHZ 2
+#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ 3
+#define IEEE80211_HE_6GHZ_OPER_CTRL_DUP_BEACON 0x4
+#define IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO 0x78
+ u8 control;
+ u8 ccfs0;
+ u8 ccfs1;
+ u8 minrate;
+} __packed;
+
+/**
+ * enum ieee80211_reg_conn_bits - represents Regulatory connectivity field bits.
+ *
+ * This enumeration defines bit flags used to represent regulatory connectivity
+ * field bits.
+ *
+ * @IEEE80211_REG_CONN_LPI_VALID: Indicates whether the LPI bit is valid.
+ * @IEEE80211_REG_CONN_LPI_VALUE: Represents the value of the LPI bit.
+ * @IEEE80211_REG_CONN_SP_VALID: Indicates whether the SP bit is valid.
+ * @IEEE80211_REG_CONN_SP_VALUE: Represents the value of the SP bit.
+ */
+enum ieee80211_reg_conn_bits {
+ IEEE80211_REG_CONN_LPI_VALID = BIT(0),
+ IEEE80211_REG_CONN_LPI_VALUE = BIT(1),
+ IEEE80211_REG_CONN_SP_VALID = BIT(2),
+ IEEE80211_REG_CONN_SP_VALUE = BIT(3),
+};
+
+/* transmit power interpretation type of transmit power envelope element */
+enum ieee80211_tx_power_intrpt_type {
+ IEEE80211_TPE_LOCAL_EIRP,
+ IEEE80211_TPE_LOCAL_EIRP_PSD,
+ IEEE80211_TPE_REG_CLIENT_EIRP,
+ IEEE80211_TPE_REG_CLIENT_EIRP_PSD,
+};
+
+/* category type of transmit power envelope element */
+enum ieee80211_tx_power_category_6ghz {
+ IEEE80211_TPE_CAT_6GHZ_DEFAULT = 0,
+ IEEE80211_TPE_CAT_6GHZ_SUBORDINATE = 1,
+};
+
+/*
+ * For IEEE80211_TPE_LOCAL_EIRP / IEEE80211_TPE_REG_CLIENT_EIRP,
+ * setting to 63.5 dBm means no constraint.
+ */
+#define IEEE80211_TPE_MAX_TX_PWR_NO_CONSTRAINT 127
+
+/*
+ * For IEEE80211_TPE_LOCAL_EIRP_PSD / IEEE80211_TPE_REG_CLIENT_EIRP_PSD,
+ * setting to 127 indicates no PSD limit for the 20 MHz channel.
+ */
+#define IEEE80211_TPE_PSD_NO_LIMIT 127
+
+/**
+ * struct ieee80211_tx_pwr_env - Transmit Power Envelope
+ * @info: Transmit Power Information field
+ * @variable: Maximum Transmit Power field
+ *
+ * This structure represents the payload of the "Transmit Power
+ * Envelope element" as described in IEEE Std 802.11ax-2021 section
+ * 9.4.2.161
+ */
+struct ieee80211_tx_pwr_env {
+ u8 info;
+ u8 variable[];
+} __packed;
+
+#define IEEE80211_TX_PWR_ENV_INFO_COUNT 0x7
+#define IEEE80211_TX_PWR_ENV_INFO_INTERPRET 0x38
+#define IEEE80211_TX_PWR_ENV_INFO_CATEGORY 0xC0
+
+#define IEEE80211_TX_PWR_ENV_EXT_COUNT 0xF
+
+static inline bool ieee80211_valid_tpe_element(const u8 *data, u8 len)
+{
+ const struct ieee80211_tx_pwr_env *env = (const void *)data;
+ u8 count, interpret, category;
+ u8 needed = sizeof(*env);
+ u8 N; /* also called N in the spec */
+
+ if (len < needed)
+ return false;
+
+ count = u8_get_bits(env->info, IEEE80211_TX_PWR_ENV_INFO_COUNT);
+ interpret = u8_get_bits(env->info, IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
+ category = u8_get_bits(env->info, IEEE80211_TX_PWR_ENV_INFO_CATEGORY);
+
+ switch (category) {
+ case IEEE80211_TPE_CAT_6GHZ_DEFAULT:
+ case IEEE80211_TPE_CAT_6GHZ_SUBORDINATE:
+ break;
+ default:
+ return false;
+ }
+
+ switch (interpret) {
+ case IEEE80211_TPE_LOCAL_EIRP:
+ case IEEE80211_TPE_REG_CLIENT_EIRP:
+ if (count > 3)
+ return false;
+
+ /* count == 0 encodes 1 value for 20 MHz, etc. */
+ needed += count + 1;
+
+ if (len < needed)
+ return false;
+
+ /* there can be extension fields not accounted for in 'count' */
+
+ return true;
+ case IEEE80211_TPE_LOCAL_EIRP_PSD:
+ case IEEE80211_TPE_REG_CLIENT_EIRP_PSD:
+ if (count > 4)
+ return false;
+
+ N = count ? 1 << (count - 1) : 1;
+ needed += N;
+
+ if (len < needed)
+ return false;
+
+ if (len > needed) {
+ u8 K = u8_get_bits(env->variable[N],
+ IEEE80211_TX_PWR_ENV_EXT_COUNT);
+
+ needed += 1 + K;
+ if (len < needed)
+ return false;
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size
+ * @he_oper_ie: byte data of the He Operations IE, stating from the byte
+ * after the ext ID byte. It is assumed that he_oper_ie has at least
+ * sizeof(struct ieee80211_he_operation) bytes, the caller must have
+ * validated this.
+ * @return the actual size of the IE data (not including header), or 0 on error
+ */
+static inline u8
+ieee80211_he_oper_size(const u8 *he_oper_ie)
+{
+ const struct ieee80211_he_operation *he_oper = (const void *)he_oper_ie;
+ u8 oper_len = sizeof(struct ieee80211_he_operation);
+ u32 he_oper_params;
+
+ /* Make sure the input is not NULL */
+ if (!he_oper_ie)
+ return 0;
+
+ /* Calc required length */
+ he_oper_params = le32_to_cpu(he_oper->he_oper_params);
+ if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO)
+ oper_len += 3;
+ if (he_oper_params & IEEE80211_HE_OPERATION_CO_HOSTED_BSS)
+ oper_len++;
+ if (he_oper_params & IEEE80211_HE_OPERATION_6GHZ_OP_INFO)
+ oper_len += sizeof(struct ieee80211_he_6ghz_oper);
+
+ /* Add the first byte (extension ID) to the total length */
+ oper_len++;
+
+ return oper_len;
+}
+
+/**
+ * ieee80211_he_6ghz_oper - obtain 6 GHz operation field
+ * @he_oper: HE operation element (must be pre-validated for size)
+ * but may be %NULL
+ *
+ * Return: a pointer to the 6 GHz operation field, or %NULL
+ */
+static inline const struct ieee80211_he_6ghz_oper *
+ieee80211_he_6ghz_oper(const struct ieee80211_he_operation *he_oper)
+{
+ const u8 *ret;
+ u32 he_oper_params;
+
+ if (!he_oper)
+ return NULL;
+
+ ret = (const void *)&he_oper->optional;
+
+ he_oper_params = le32_to_cpu(he_oper->he_oper_params);
+
+ if (!(he_oper_params & IEEE80211_HE_OPERATION_6GHZ_OP_INFO))
+ return NULL;
+ if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO)
+ ret += 3;
+ if (he_oper_params & IEEE80211_HE_OPERATION_CO_HOSTED_BSS)
+ ret++;
+
+ return (const void *)ret;
+}
+
+/* HE Spatial Reuse defines */
+#define IEEE80211_HE_SPR_PSR_DISALLOWED BIT(0)
+#define IEEE80211_HE_SPR_NON_SRG_OBSS_PD_SR_DISALLOWED BIT(1)
+#define IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT BIT(2)
+#define IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT BIT(3)
+#define IEEE80211_HE_SPR_HESIGA_SR_VAL15_ALLOWED BIT(4)
+
+/*
+ * ieee80211_he_spr_size - calculate 802.11ax HE Spatial Reuse IE size
+ * @he_spr_ie: byte data of the He Spatial Reuse IE, stating from the byte
+ * after the ext ID byte. It is assumed that he_spr_ie has at least
+ * sizeof(struct ieee80211_he_spr) bytes, the caller must have validated
+ * this
+ * @return the actual size of the IE data (not including header), or 0 on error
+ */
+static inline u8
+ieee80211_he_spr_size(const u8 *he_spr_ie)
+{
+ const struct ieee80211_he_spr *he_spr = (const void *)he_spr_ie;
+ u8 spr_len = sizeof(struct ieee80211_he_spr);
+ u8 he_spr_params;
+
+ /* Make sure the input is not NULL */
+ if (!he_spr_ie)
+ return 0;
+
+ /* Calc required length */
+ he_spr_params = he_spr->he_sr_control;
+ if (he_spr_params & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
+ spr_len++;
+ if (he_spr_params & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT)
+ spr_len += 18;
+
+ /* Add the first byte (extension ID) to the total length */
+ spr_len++;
+
+ return spr_len;
+}
+
+struct ieee80211_he_6ghz_capa {
+ /* uses IEEE80211_HE_6GHZ_CAP_* below */
+ __le16 capa;
+} __packed;
+
+/* HE 6 GHz band capabilities */
+/* uses enum ieee80211_min_mpdu_spacing values */
+#define IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START 0x0007
+/* uses enum ieee80211_vht_max_ampdu_length_exp values */
+#define IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP 0x0038
+/* uses IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_* values */
+#define IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN 0x00c0
+/* WLAN_HT_CAP_SM_PS_* values */
+#define IEEE80211_HE_6GHZ_CAP_SM_PS 0x0600
+#define IEEE80211_HE_6GHZ_CAP_RD_RESPONDER 0x0800
+#define IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS 0x1000
+#define IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS 0x2000
+
+#endif /* LINUX_IEEE80211_HE_H */
diff --git a/include/linux/ieee80211-ht.h b/include/linux/ieee80211-ht.h
new file mode 100644
index 000000000000..21bbf470540f
--- /dev/null
+++ b/include/linux/ieee80211-ht.h
@@ -0,0 +1,292 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IEEE 802.11 HT definitions
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ * Copyright (c) 2005, Devicescape Software, Inc.
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (c) 2018 - 2025 Intel Corporation
+ */
+
+#ifndef LINUX_IEEE80211_HT_H
+#define LINUX_IEEE80211_HT_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+/* Maximal size of an A-MSDU that can be transported in a HT BA session */
+#define IEEE80211_MAX_MPDU_LEN_HT_BA 4095
+
+/* Maximal size of an A-MSDU */
+#define IEEE80211_MAX_MPDU_LEN_HT_3839 3839
+#define IEEE80211_MAX_MPDU_LEN_HT_7935 7935
+
+#define IEEE80211_HT_CTL_LEN 4
+
+enum ieee80211_ht_chanwidth_values {
+ IEEE80211_HT_CHANWIDTH_20MHZ = 0,
+ IEEE80211_HT_CHANWIDTH_ANY = 1,
+};
+
+/**
+ * struct ieee80211_bar - Block Ack Request frame format
+ * @frame_control: Frame Control
+ * @duration: Duration
+ * @ra: RA
+ * @ta: TA
+ * @control: BAR Control
+ * @start_seq_num: Starting Sequence Number (see Figure 9-37)
+ *
+ * This structure represents the "BlockAckReq frame format"
+ * as described in IEEE Std 802.11-2020 section 9.3.1.7.
+*/
+struct ieee80211_bar {
+ __le16 frame_control;
+ __le16 duration;
+ __u8 ra[ETH_ALEN];
+ __u8 ta[ETH_ALEN];
+ __le16 control;
+ __le16 start_seq_num;
+} __packed;
+
+/* 802.11 BAR control masks */
+#define IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL 0x0000
+#define IEEE80211_BAR_CTRL_MULTI_TID 0x0002
+#define IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA 0x0004
+#define IEEE80211_BAR_CTRL_TID_INFO_MASK 0xf000
+#define IEEE80211_BAR_CTRL_TID_INFO_SHIFT 12
+
+#define IEEE80211_HT_MCS_MASK_LEN 10
+
+/**
+ * struct ieee80211_mcs_info - Supported MCS Set field
+ * @rx_mask: RX mask
+ * @rx_highest: highest supported RX rate. If set represents
+ * the highest supported RX data rate in units of 1 Mbps.
+ * If this field is 0 this value should not be used to
+ * consider the highest RX data rate supported.
+ * @tx_params: TX parameters
+ * @reserved: Reserved bits
+ *
+ * This structure represents the "Supported MCS Set field" as
+ * described in IEEE Std 802.11-2020 section 9.4.2.55.4.
+ */
+struct ieee80211_mcs_info {
+ u8 rx_mask[IEEE80211_HT_MCS_MASK_LEN];
+ __le16 rx_highest;
+ u8 tx_params;
+ u8 reserved[3];
+} __packed;
+
+/* 802.11n HT capability MSC set */
+#define IEEE80211_HT_MCS_RX_HIGHEST_MASK 0x3ff
+#define IEEE80211_HT_MCS_TX_DEFINED 0x01
+#define IEEE80211_HT_MCS_TX_RX_DIFF 0x02
+/* value 0 == 1 stream etc */
+#define IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK 0x0C
+#define IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT 2
+#define IEEE80211_HT_MCS_TX_MAX_STREAMS 4
+#define IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION 0x10
+
+#define IEEE80211_HT_MCS_CHAINS(mcs) ((mcs) == 32 ? 1 : (1 + ((mcs) >> 3)))
+
+/*
+ * 802.11n D5.0 20.3.5 / 20.6 says:
+ * - indices 0 to 7 and 32 are single spatial stream
+ * - 8 to 31 are multiple spatial streams using equal modulation
+ * [8..15 for two streams, 16..23 for three and 24..31 for four]
+ * - remainder are multiple spatial streams using unequal modulation
+ */
+#define IEEE80211_HT_MCS_UNEQUAL_MODULATION_START 33
+#define IEEE80211_HT_MCS_UNEQUAL_MODULATION_START_BYTE \
+ (IEEE80211_HT_MCS_UNEQUAL_MODULATION_START / 8)
+
+/**
+ * struct ieee80211_ht_cap - HT capabilities element
+ * @cap_info: HT Capability Information
+ * @ampdu_params_info: A-MPDU Parameters
+ * @mcs: Supported MCS Set
+ * @extended_ht_cap_info: HT Extended Capabilities
+ * @tx_BF_cap_info: Transmit Beamforming Capabilities
+ * @antenna_selection_info: ASEL Capability
+ *
+ * This structure represents the payload of the "HT Capabilities
+ * element" as described in IEEE Std 802.11-2020 section 9.4.2.55.
+ */
+struct ieee80211_ht_cap {
+ __le16 cap_info;
+ u8 ampdu_params_info;
+
+ /* 16 bytes MCS information */
+ struct ieee80211_mcs_info mcs;
+
+ __le16 extended_ht_cap_info;
+ __le32 tx_BF_cap_info;
+ u8 antenna_selection_info;
+} __packed;
+
+/* 802.11n HT capabilities masks (for cap_info) */
+#define IEEE80211_HT_CAP_LDPC_CODING 0x0001
+#define IEEE80211_HT_CAP_SUP_WIDTH_20_40 0x0002
+#define IEEE80211_HT_CAP_SM_PS 0x000C
+#define IEEE80211_HT_CAP_SM_PS_SHIFT 2
+#define IEEE80211_HT_CAP_GRN_FLD 0x0010
+#define IEEE80211_HT_CAP_SGI_20 0x0020
+#define IEEE80211_HT_CAP_SGI_40 0x0040
+#define IEEE80211_HT_CAP_TX_STBC 0x0080
+#define IEEE80211_HT_CAP_RX_STBC 0x0300
+#define IEEE80211_HT_CAP_RX_STBC_SHIFT 8
+#define IEEE80211_HT_CAP_DELAY_BA 0x0400
+#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800
+#define IEEE80211_HT_CAP_DSSSCCK40 0x1000
+#define IEEE80211_HT_CAP_RESERVED 0x2000
+#define IEEE80211_HT_CAP_40MHZ_INTOLERANT 0x4000
+#define IEEE80211_HT_CAP_LSIG_TXOP_PROT 0x8000
+
+/* 802.11n HT extended capabilities masks (for extended_ht_cap_info) */
+#define IEEE80211_HT_EXT_CAP_PCO 0x0001
+#define IEEE80211_HT_EXT_CAP_PCO_TIME 0x0006
+#define IEEE80211_HT_EXT_CAP_PCO_TIME_SHIFT 1
+#define IEEE80211_HT_EXT_CAP_MCS_FB 0x0300
+#define IEEE80211_HT_EXT_CAP_MCS_FB_SHIFT 8
+#define IEEE80211_HT_EXT_CAP_HTC_SUP 0x0400
+#define IEEE80211_HT_EXT_CAP_RD_RESPONDER 0x0800
+
+/* 802.11n HT capability AMPDU settings (for ampdu_params_info) */
+#define IEEE80211_HT_AMPDU_PARM_FACTOR 0x03
+#define IEEE80211_HT_AMPDU_PARM_DENSITY 0x1C
+#define IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT 2
+
+/*
+ * Maximum length of AMPDU that the STA can receive in high-throughput (HT).
+ * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets)
+ */
+enum ieee80211_max_ampdu_length_exp {
+ IEEE80211_HT_MAX_AMPDU_8K = 0,
+ IEEE80211_HT_MAX_AMPDU_16K = 1,
+ IEEE80211_HT_MAX_AMPDU_32K = 2,
+ IEEE80211_HT_MAX_AMPDU_64K = 3
+};
+
+#define IEEE80211_HT_MAX_AMPDU_FACTOR 13
+
+/* Minimum MPDU start spacing */
+enum ieee80211_min_mpdu_spacing {
+ IEEE80211_HT_MPDU_DENSITY_NONE = 0, /* No restriction */
+ IEEE80211_HT_MPDU_DENSITY_0_25 = 1, /* 1/4 usec */
+ IEEE80211_HT_MPDU_DENSITY_0_5 = 2, /* 1/2 usec */
+ IEEE80211_HT_MPDU_DENSITY_1 = 3, /* 1 usec */
+ IEEE80211_HT_MPDU_DENSITY_2 = 4, /* 2 usec */
+ IEEE80211_HT_MPDU_DENSITY_4 = 5, /* 4 usec */
+ IEEE80211_HT_MPDU_DENSITY_8 = 6, /* 8 usec */
+ IEEE80211_HT_MPDU_DENSITY_16 = 7 /* 16 usec */
+};
+
+/**
+ * struct ieee80211_ht_operation - HT operation IE
+ * @primary_chan: Primary Channel
+ * @ht_param: HT Operation Information parameters
+ * @operation_mode: HT Operation Information operation mode
+ * @stbc_param: HT Operation Information STBC params
+ * @basic_set: Basic HT-MCS Set
+ *
+ * This structure represents the payload of the "HT Operation
+ * element" as described in IEEE Std 802.11-2020 section 9.4.2.56.
+ */
+struct ieee80211_ht_operation {
+ u8 primary_chan;
+ u8 ht_param;
+ __le16 operation_mode;
+ __le16 stbc_param;
+ u8 basic_set[16];
+} __packed;
+
+/* for ht_param */
+#define IEEE80211_HT_PARAM_CHA_SEC_OFFSET 0x03
+#define IEEE80211_HT_PARAM_CHA_SEC_NONE 0x00
+#define IEEE80211_HT_PARAM_CHA_SEC_ABOVE 0x01
+#define IEEE80211_HT_PARAM_CHA_SEC_BELOW 0x03
+#define IEEE80211_HT_PARAM_CHAN_WIDTH_ANY 0x04
+#define IEEE80211_HT_PARAM_RIFS_MODE 0x08
+
+/* for operation_mode */
+#define IEEE80211_HT_OP_MODE_PROTECTION 0x0003
+#define IEEE80211_HT_OP_MODE_PROTECTION_NONE 0
+#define IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER 1
+#define IEEE80211_HT_OP_MODE_PROTECTION_20MHZ 2
+#define IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED 3
+#define IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT 0x0004
+#define IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT 0x0010
+#define IEEE80211_HT_OP_MODE_CCFS2_SHIFT 5
+#define IEEE80211_HT_OP_MODE_CCFS2_MASK 0x1fe0
+
+/* for stbc_param */
+#define IEEE80211_HT_STBC_PARAM_DUAL_BEACON 0x0040
+#define IEEE80211_HT_STBC_PARAM_DUAL_CTS_PROT 0x0080
+#define IEEE80211_HT_STBC_PARAM_STBC_BEACON 0x0100
+#define IEEE80211_HT_STBC_PARAM_LSIG_TXOP_FULLPROT 0x0200
+#define IEEE80211_HT_STBC_PARAM_PCO_ACTIVE 0x0400
+#define IEEE80211_HT_STBC_PARAM_PCO_PHASE 0x0800
+
+
+/* block-ack parameters */
+#define IEEE80211_ADDBA_PARAM_AMSDU_MASK 0x0001
+#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
+#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C
+#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0
+#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000
+#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
+
+/*
+ * A-MPDU buffer sizes
+ * According to HT size varies from 8 to 64 frames
+ * HE adds the ability to have up to 256 frames.
+ * EHT adds the ability to have up to 1K frames.
+ */
+#define IEEE80211_MIN_AMPDU_BUF 0x8
+#define IEEE80211_MAX_AMPDU_BUF_HT 0x40
+#define IEEE80211_MAX_AMPDU_BUF_HE 0x100
+#define IEEE80211_MAX_AMPDU_BUF_EHT 0x400
+
+
+/* Spatial Multiplexing Power Save Modes (for capability) */
+#define WLAN_HT_CAP_SM_PS_STATIC 0
+#define WLAN_HT_CAP_SM_PS_DYNAMIC 1
+#define WLAN_HT_CAP_SM_PS_INVALID 2
+#define WLAN_HT_CAP_SM_PS_DISABLED 3
+
+/* for SM power control field lower two bits */
+#define WLAN_HT_SMPS_CONTROL_DISABLED 0
+#define WLAN_HT_SMPS_CONTROL_STATIC 1
+#define WLAN_HT_SMPS_CONTROL_DYNAMIC 3
+
+/* HT action codes */
+enum ieee80211_ht_actioncode {
+ WLAN_HT_ACTION_NOTIFY_CHANWIDTH = 0,
+ WLAN_HT_ACTION_SMPS = 1,
+ WLAN_HT_ACTION_PSMP = 2,
+ WLAN_HT_ACTION_PCO_PHASE = 3,
+ WLAN_HT_ACTION_CSI = 4,
+ WLAN_HT_ACTION_NONCOMPRESSED_BF = 5,
+ WLAN_HT_ACTION_COMPRESSED_BF = 6,
+ WLAN_HT_ACTION_ASEL_IDX_FEEDBACK = 7,
+};
+
+/* BACK action code */
+enum ieee80211_back_actioncode {
+ WLAN_ACTION_ADDBA_REQ = 0,
+ WLAN_ACTION_ADDBA_RESP = 1,
+ WLAN_ACTION_DELBA = 2,
+};
+
+/* BACK (block-ack) parties */
+enum ieee80211_back_parties {
+ WLAN_BACK_RECIPIENT = 0,
+ WLAN_BACK_INITIATOR = 1,
+};
+
+#endif /* LINUX_IEEE80211_HT_H */
diff --git a/include/linux/ieee80211-mesh.h b/include/linux/ieee80211-mesh.h
new file mode 100644
index 000000000000..4b829bcb38b6
--- /dev/null
+++ b/include/linux/ieee80211-mesh.h
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IEEE 802.11 mesh definitions
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ * Copyright (c) 2005, Devicescape Software, Inc.
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (c) 2018 - 2025 Intel Corporation
+ */
+
+#ifndef LINUX_IEEE80211_MESH_H
+#define LINUX_IEEE80211_MESH_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+#define IEEE80211_MAX_MESH_ID_LEN 32
+
+struct ieee80211s_hdr {
+ u8 flags;
+ u8 ttl;
+ __le32 seqnum;
+ u8 eaddr1[ETH_ALEN];
+ u8 eaddr2[ETH_ALEN];
+} __packed __aligned(2);
+
+/* Mesh flags */
+#define MESH_FLAGS_AE_A4 0x1
+#define MESH_FLAGS_AE_A5_A6 0x2
+#define MESH_FLAGS_AE 0x3
+#define MESH_FLAGS_PS_DEEP 0x4
+
+/**
+ * enum ieee80211_preq_flags - mesh PREQ element flags
+ *
+ * @IEEE80211_PREQ_PROACTIVE_PREP_FLAG: proactive PREP subfield
+ */
+enum ieee80211_preq_flags {
+ IEEE80211_PREQ_PROACTIVE_PREP_FLAG = 1<<2,
+};
+
+/**
+ * enum ieee80211_preq_target_flags - mesh PREQ element per target flags
+ *
+ * @IEEE80211_PREQ_TO_FLAG: target only subfield
+ * @IEEE80211_PREQ_USN_FLAG: unknown target HWMP sequence number subfield
+ */
+enum ieee80211_preq_target_flags {
+ IEEE80211_PREQ_TO_FLAG = 1<<0,
+ IEEE80211_PREQ_USN_FLAG = 1<<2,
+};
+
+/**
+ * struct ieee80211_mesh_chansw_params_ie - mesh channel switch parameters IE
+ * @mesh_ttl: Time To Live
+ * @mesh_flags: Flags
+ * @mesh_reason: Reason Code
+ * @mesh_pre_value: Precedence Value
+ *
+ * This structure represents the payload of the "Mesh Channel Switch
+ * Parameters element" as described in IEEE Std 802.11-2020 section
+ * 9.4.2.102.
+ */
+struct ieee80211_mesh_chansw_params_ie {
+ u8 mesh_ttl;
+ u8 mesh_flags;
+ __le16 mesh_reason;
+ __le16 mesh_pre_value;
+} __packed;
+
+/**
+ * struct ieee80211_meshconf_ie - Mesh Configuration element
+ * @meshconf_psel: Active Path Selection Protocol Identifier
+ * @meshconf_pmetric: Active Path Selection Metric Identifier
+ * @meshconf_congest: Congestion Control Mode Identifier
+ * @meshconf_synch: Synchronization Method Identifier
+ * @meshconf_auth: Authentication Protocol Identifier
+ * @meshconf_form: Mesh Formation Info
+ * @meshconf_cap: Mesh Capability (see &enum mesh_config_capab_flags)
+ *
+ * This structure represents the payload of the "Mesh Configuration
+ * element" as described in IEEE Std 802.11-2020 section 9.4.2.97.
+ */
+struct ieee80211_meshconf_ie {
+ u8 meshconf_psel;
+ u8 meshconf_pmetric;
+ u8 meshconf_congest;
+ u8 meshconf_synch;
+ u8 meshconf_auth;
+ u8 meshconf_form;
+ u8 meshconf_cap;
+} __packed;
+
+/**
+ * enum mesh_config_capab_flags - Mesh Configuration IE capability field flags
+ *
+ * @IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS: STA is willing to establish
+ * additional mesh peerings with other mesh STAs
+ * @IEEE80211_MESHCONF_CAPAB_FORWARDING: the STA forwards MSDUs
+ * @IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING: TBTT adjustment procedure
+ * is ongoing
+ * @IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL: STA is in deep sleep mode or has
+ * neighbors in deep sleep mode
+ *
+ * Enumerates the "Mesh Capability" as described in IEEE Std
+ * 802.11-2020 section 9.4.2.97.7.
+ */
+enum mesh_config_capab_flags {
+ IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS = 0x01,
+ IEEE80211_MESHCONF_CAPAB_FORWARDING = 0x08,
+ IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING = 0x20,
+ IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL = 0x40,
+};
+
+#define IEEE80211_MESHCONF_FORM_CONNECTED_TO_GATE 0x1
+
+/*
+ * mesh channel switch parameters element's flag indicator
+ *
+ */
+#define WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT BIT(0)
+#define WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR BIT(1)
+#define WLAN_EID_CHAN_SWITCH_PARAM_REASON BIT(2)
+
+/**
+ * struct ieee80211_rann_ie - RANN (root announcement) element
+ * @rann_flags: Flags
+ * @rann_hopcount: Hop Count
+ * @rann_ttl: Element TTL
+ * @rann_addr: Root Mesh STA Address
+ * @rann_seq: HWMP Sequence Number
+ * @rann_interval: Interval
+ * @rann_metric: Metric
+ *
+ * This structure represents the payload of the "RANN element" as
+ * described in IEEE Std 802.11-2020 section 9.4.2.111.
+ */
+struct ieee80211_rann_ie {
+ u8 rann_flags;
+ u8 rann_hopcount;
+ u8 rann_ttl;
+ u8 rann_addr[ETH_ALEN];
+ __le32 rann_seq;
+ __le32 rann_interval;
+ __le32 rann_metric;
+} __packed;
+
+enum ieee80211_rann_flags {
+ RANN_FLAG_IS_GATE = 1 << 0,
+};
+
+/* Mesh action codes */
+enum ieee80211_mesh_actioncode {
+ WLAN_MESH_ACTION_LINK_METRIC_REPORT,
+ WLAN_MESH_ACTION_HWMP_PATH_SELECTION,
+ WLAN_MESH_ACTION_GATE_ANNOUNCEMENT,
+ WLAN_MESH_ACTION_CONGESTION_CONTROL_NOTIFICATION,
+ WLAN_MESH_ACTION_MCCA_SETUP_REQUEST,
+ WLAN_MESH_ACTION_MCCA_SETUP_REPLY,
+ WLAN_MESH_ACTION_MCCA_ADVERTISEMENT_REQUEST,
+ WLAN_MESH_ACTION_MCCA_ADVERTISEMENT,
+ WLAN_MESH_ACTION_MCCA_TEARDOWN,
+ WLAN_MESH_ACTION_TBTT_ADJUSTMENT_REQUEST,
+ WLAN_MESH_ACTION_TBTT_ADJUSTMENT_RESPONSE,
+};
+
+/**
+ * enum ieee80211_mesh_sync_method - mesh synchronization method identifier
+ *
+ * @IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET: the default synchronization method
+ * @IEEE80211_SYNC_METHOD_VENDOR: a vendor specific synchronization method
+ * that will be specified in a vendor specific information element
+ */
+enum ieee80211_mesh_sync_method {
+ IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET = 1,
+ IEEE80211_SYNC_METHOD_VENDOR = 255,
+};
+
+/**
+ * enum ieee80211_mesh_path_protocol - mesh path selection protocol identifier
+ *
+ * @IEEE80211_PATH_PROTOCOL_HWMP: the default path selection protocol
+ * @IEEE80211_PATH_PROTOCOL_VENDOR: a vendor specific protocol that will
+ * be specified in a vendor specific information element
+ */
+enum ieee80211_mesh_path_protocol {
+ IEEE80211_PATH_PROTOCOL_HWMP = 1,
+ IEEE80211_PATH_PROTOCOL_VENDOR = 255,
+};
+
+/**
+ * enum ieee80211_mesh_path_metric - mesh path selection metric identifier
+ *
+ * @IEEE80211_PATH_METRIC_AIRTIME: the default path selection metric
+ * @IEEE80211_PATH_METRIC_VENDOR: a vendor specific metric that will be
+ * specified in a vendor specific information element
+ */
+enum ieee80211_mesh_path_metric {
+ IEEE80211_PATH_METRIC_AIRTIME = 1,
+ IEEE80211_PATH_METRIC_VENDOR = 255,
+};
+
+/**
+ * enum ieee80211_root_mode_identifier - root mesh STA mode identifier
+ *
+ * These attribute are used by dot11MeshHWMPRootMode to set root mesh STA mode
+ *
+ * @IEEE80211_ROOTMODE_NO_ROOT: the mesh STA is not a root mesh STA (default)
+ * @IEEE80211_ROOTMODE_ROOT: the mesh STA is a root mesh STA if greater than
+ * this value
+ * @IEEE80211_PROACTIVE_PREQ_NO_PREP: the mesh STA is a root mesh STA supports
+ * the proactive PREQ with proactive PREP subfield set to 0
+ * @IEEE80211_PROACTIVE_PREQ_WITH_PREP: the mesh STA is a root mesh STA
+ * supports the proactive PREQ with proactive PREP subfield set to 1
+ * @IEEE80211_PROACTIVE_RANN: the mesh STA is a root mesh STA supports
+ * the proactive RANN
+ */
+enum ieee80211_root_mode_identifier {
+ IEEE80211_ROOTMODE_NO_ROOT = 0,
+ IEEE80211_ROOTMODE_ROOT = 1,
+ IEEE80211_PROACTIVE_PREQ_NO_PREP = 2,
+ IEEE80211_PROACTIVE_PREQ_WITH_PREP = 3,
+ IEEE80211_PROACTIVE_RANN = 4,
+};
+
+#endif /* LINUX_IEEE80211_MESH_H */
diff --git a/include/linux/ieee80211-nan.h b/include/linux/ieee80211-nan.h
new file mode 100644
index 000000000000..d07959bf8a90
--- /dev/null
+++ b/include/linux/ieee80211-nan.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * WFA NAN definitions
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ * Copyright (c) 2005, Devicescape Software, Inc.
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (c) 2018 - 2025 Intel Corporation
+ */
+
+#ifndef LINUX_IEEE80211_NAN_H
+#define LINUX_IEEE80211_NAN_H
+
+/* NAN operation mode, as defined in Wi-Fi Aware (TM) specification Table 81 */
+#define NAN_OP_MODE_PHY_MODE_VHT 0x01
+#define NAN_OP_MODE_PHY_MODE_HE 0x10
+#define NAN_OP_MODE_PHY_MODE_MASK 0x11
+#define NAN_OP_MODE_80P80MHZ 0x02
+#define NAN_OP_MODE_160MHZ 0x04
+#define NAN_OP_MODE_PNDL_SUPPRTED 0x08
+
+/* NAN Device capabilities, as defined in Wi-Fi Aware (TM) specification
+ * Table 79
+ */
+#define NAN_DEV_CAPA_DFS_OWNER 0x01
+#define NAN_DEV_CAPA_EXT_KEY_ID_SUPPORTED 0x02
+#define NAN_DEV_CAPA_SIM_NDP_RX_SUPPORTED 0x04
+#define NAN_DEV_CAPA_NDPE_SUPPORTED 0x08
+#define NAN_DEV_CAPA_S3_SUPPORTED 0x10
+
+#endif /* LINUX_IEEE80211_NAN_H */
diff --git a/include/linux/ieee80211-p2p.h b/include/linux/ieee80211-p2p.h
new file mode 100644
index 000000000000..180891c11f08
--- /dev/null
+++ b/include/linux/ieee80211-p2p.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * WFA P2P definitions
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ * Copyright (c) 2005, Devicescape Software, Inc.
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (c) 2018 - 2025 Intel Corporation
+ */
+
+#ifndef LINUX_IEEE80211_P2P_H
+#define LINUX_IEEE80211_P2P_H
+
+#include <linux/types.h>
+/*
+ * Peer-to-Peer IE attribute related definitions.
+ */
+/*
+ * enum ieee80211_p2p_attr_id - identifies type of peer-to-peer attribute.
+ */
+enum ieee80211_p2p_attr_id {
+ IEEE80211_P2P_ATTR_STATUS = 0,
+ IEEE80211_P2P_ATTR_MINOR_REASON,
+ IEEE80211_P2P_ATTR_CAPABILITY,
+ IEEE80211_P2P_ATTR_DEVICE_ID,
+ IEEE80211_P2P_ATTR_GO_INTENT,
+ IEEE80211_P2P_ATTR_GO_CONFIG_TIMEOUT,
+ IEEE80211_P2P_ATTR_LISTEN_CHANNEL,
+ IEEE80211_P2P_ATTR_GROUP_BSSID,
+ IEEE80211_P2P_ATTR_EXT_LISTEN_TIMING,
+ IEEE80211_P2P_ATTR_INTENDED_IFACE_ADDR,
+ IEEE80211_P2P_ATTR_MANAGABILITY,
+ IEEE80211_P2P_ATTR_CHANNEL_LIST,
+ IEEE80211_P2P_ATTR_ABSENCE_NOTICE,
+ IEEE80211_P2P_ATTR_DEVICE_INFO,
+ IEEE80211_P2P_ATTR_GROUP_INFO,
+ IEEE80211_P2P_ATTR_GROUP_ID,
+ IEEE80211_P2P_ATTR_INTERFACE,
+ IEEE80211_P2P_ATTR_OPER_CHANNEL,
+ IEEE80211_P2P_ATTR_INVITE_FLAGS,
+ /* 19 - 220: Reserved */
+ IEEE80211_P2P_ATTR_VENDOR_SPECIFIC = 221,
+
+ IEEE80211_P2P_ATTR_MAX
+};
+
+/* Notice of Absence attribute - described in P2P spec 4.1.14 */
+/* Typical max value used here */
+#define IEEE80211_P2P_NOA_DESC_MAX 4
+
+struct ieee80211_p2p_noa_desc {
+ u8 count;
+ __le32 duration;
+ __le32 interval;
+ __le32 start_time;
+} __packed;
+
+struct ieee80211_p2p_noa_attr {
+ u8 index;
+ u8 oppps_ctwindow;
+ struct ieee80211_p2p_noa_desc desc[IEEE80211_P2P_NOA_DESC_MAX];
+} __packed;
+
+#define IEEE80211_P2P_OPPPS_ENABLE_BIT BIT(7)
+#define IEEE80211_P2P_OPPPS_CTWINDOW_MASK 0x7F
+
+#endif /* LINUX_IEEE80211_P2P_H */
diff --git a/include/linux/ieee80211-s1g.h b/include/linux/ieee80211-s1g.h
new file mode 100644
index 000000000000..5b9ed2dcc00e
--- /dev/null
+++ b/include/linux/ieee80211-s1g.h
@@ -0,0 +1,575 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IEEE 802.11 S1G definitions
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ * Copyright (c) 2005, Devicescape Software, Inc.
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (c) 2018 - 2025 Intel Corporation
+ */
+
+#ifndef LINUX_IEEE80211_S1G_H
+#define LINUX_IEEE80211_S1G_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+/* bits unique to S1G beacon frame control */
+#define IEEE80211_S1G_BCN_NEXT_TBTT 0x100
+#define IEEE80211_S1G_BCN_CSSID 0x200
+#define IEEE80211_S1G_BCN_ANO 0x400
+
+/* see 802.11ah-2016 9.9 NDP CMAC frames */
+#define IEEE80211_S1G_1MHZ_NDP_BITS 25
+#define IEEE80211_S1G_1MHZ_NDP_BYTES 4
+#define IEEE80211_S1G_2MHZ_NDP_BITS 37
+#define IEEE80211_S1G_2MHZ_NDP_BYTES 5
+
+/**
+ * ieee80211_is_s1g_beacon - check if IEEE80211_FTYPE_EXT &&
+ * IEEE80211_STYPE_S1G_BEACON
+ * @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is an S1G beacon
+ */
+static inline bool ieee80211_is_s1g_beacon(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE |
+ IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_EXT | IEEE80211_STYPE_S1G_BEACON);
+}
+
+/**
+ * ieee80211_s1g_has_next_tbtt - check if IEEE80211_S1G_BCN_NEXT_TBTT
+ * @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame contains the variable-length
+ * next TBTT field
+ */
+static inline bool ieee80211_s1g_has_next_tbtt(__le16 fc)
+{
+ return ieee80211_is_s1g_beacon(fc) &&
+ (fc & cpu_to_le16(IEEE80211_S1G_BCN_NEXT_TBTT));
+}
+
+/**
+ * ieee80211_s1g_has_ano - check if IEEE80211_S1G_BCN_ANO
+ * @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame contains the variable-length
+ * ANO field
+ */
+static inline bool ieee80211_s1g_has_ano(__le16 fc)
+{
+ return ieee80211_is_s1g_beacon(fc) &&
+ (fc & cpu_to_le16(IEEE80211_S1G_BCN_ANO));
+}
+
+/**
+ * ieee80211_s1g_has_cssid - check if IEEE80211_S1G_BCN_CSSID
+ * @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame contains the variable-length
+ * compressed SSID field
+ */
+static inline bool ieee80211_s1g_has_cssid(__le16 fc)
+{
+ return ieee80211_is_s1g_beacon(fc) &&
+ (fc & cpu_to_le16(IEEE80211_S1G_BCN_CSSID));
+}
+
+/**
+ * enum ieee80211_s1g_chanwidth - S1G channel widths
+ * These are defined in IEEE802.11-2016ah Table 10-20
+ * as BSS Channel Width
+ *
+ * @IEEE80211_S1G_CHANWIDTH_1MHZ: 1MHz operating channel
+ * @IEEE80211_S1G_CHANWIDTH_2MHZ: 2MHz operating channel
+ * @IEEE80211_S1G_CHANWIDTH_4MHZ: 4MHz operating channel
+ * @IEEE80211_S1G_CHANWIDTH_8MHZ: 8MHz operating channel
+ * @IEEE80211_S1G_CHANWIDTH_16MHZ: 16MHz operating channel
+ */
+enum ieee80211_s1g_chanwidth {
+ IEEE80211_S1G_CHANWIDTH_1MHZ = 0,
+ IEEE80211_S1G_CHANWIDTH_2MHZ = 1,
+ IEEE80211_S1G_CHANWIDTH_4MHZ = 3,
+ IEEE80211_S1G_CHANWIDTH_8MHZ = 7,
+ IEEE80211_S1G_CHANWIDTH_16MHZ = 15,
+};
+
+/**
+ * enum ieee80211_s1g_pri_chanwidth - S1G primary channel widths
+ * described in IEEE80211-2024 Table 10-39.
+ *
+ * @IEEE80211_S1G_PRI_CHANWIDTH_2MHZ: 2MHz primary channel
+ * @IEEE80211_S1G_PRI_CHANWIDTH_1MHZ: 1MHz primary channel
+ */
+enum ieee80211_s1g_pri_chanwidth {
+ IEEE80211_S1G_PRI_CHANWIDTH_2MHZ = 0,
+ IEEE80211_S1G_PRI_CHANWIDTH_1MHZ = 1,
+};
+
+/**
+ * struct ieee80211_s1g_bcn_compat_ie - S1G Beacon Compatibility element
+ * @compat_info: Compatibility Information
+ * @beacon_int: Beacon Interval
+ * @tsf_completion: TSF Completion
+ *
+ * This structure represents the payload of the "S1G Beacon
+ * Compatibility element" as described in IEEE Std 802.11-2020 section
+ * 9.4.2.196.
+ */
+struct ieee80211_s1g_bcn_compat_ie {
+ __le16 compat_info;
+ __le16 beacon_int;
+ __le32 tsf_completion;
+} __packed;
+
+/**
+ * struct ieee80211_s1g_oper_ie - S1G Operation element
+ * @ch_width: S1G Operation Information Channel Width
+ * @oper_class: S1G Operation Information Operating Class
+ * @primary_ch: S1G Operation Information Primary Channel Number
+ * @oper_ch: S1G Operation Information Channel Center Frequency
+ * @basic_mcs_nss: Basic S1G-MCS and NSS Set
+ *
+ * This structure represents the payload of the "S1G Operation
+ * element" as described in IEEE Std 802.11-2020 section 9.4.2.212.
+ */
+struct ieee80211_s1g_oper_ie {
+ u8 ch_width;
+ u8 oper_class;
+ u8 primary_ch;
+ u8 oper_ch;
+ __le16 basic_mcs_nss;
+} __packed;
+
+/**
+ * struct ieee80211_aid_response_ie - AID Response element
+ * @aid: AID/Group AID
+ * @switch_count: AID Switch Count
+ * @response_int: AID Response Interval
+ *
+ * This structure represents the payload of the "AID Response element"
+ * as described in IEEE Std 802.11-2020 section 9.4.2.194.
+ */
+struct ieee80211_aid_response_ie {
+ __le16 aid;
+ u8 switch_count;
+ __le16 response_int;
+} __packed;
+
+struct ieee80211_s1g_cap {
+ u8 capab_info[10];
+ u8 supp_mcs_nss[5];
+} __packed;
+
+/**
+ * ieee80211_s1g_optional_len - determine length of optional S1G beacon fields
+ * @fc: frame control bytes in little-endian byteorder
+ * Return: total length in bytes of the optional fixed-length fields
+ *
+ * S1G beacons may contain up to three optional fixed-length fields that
+ * precede the variable-length elements. Whether these fields are present
+ * is indicated by flags in the frame control field.
+ *
+ * From IEEE 802.11-2024 section 9.3.4.3:
+ * - Next TBTT field may be 0 or 3 bytes
+ * - Short SSID field may be 0 or 4 bytes
+ * - Access Network Options (ANO) field may be 0 or 1 byte
+ */
+static inline size_t
+ieee80211_s1g_optional_len(__le16 fc)
+{
+ size_t len = 0;
+
+ if (ieee80211_s1g_has_next_tbtt(fc))
+ len += 3;
+
+ if (ieee80211_s1g_has_cssid(fc))
+ len += 4;
+
+ if (ieee80211_s1g_has_ano(fc))
+ len += 1;
+
+ return len;
+}
+
+/* S1G Capabilities Information field */
+#define IEEE80211_S1G_CAPABILITY_LEN 15
+
+#define S1G_CAP0_S1G_LONG BIT(0)
+#define S1G_CAP0_SGI_1MHZ BIT(1)
+#define S1G_CAP0_SGI_2MHZ BIT(2)
+#define S1G_CAP0_SGI_4MHZ BIT(3)
+#define S1G_CAP0_SGI_8MHZ BIT(4)
+#define S1G_CAP0_SGI_16MHZ BIT(5)
+#define S1G_CAP0_SUPP_CH_WIDTH GENMASK(7, 6)
+
+#define S1G_SUPP_CH_WIDTH_2 0
+#define S1G_SUPP_CH_WIDTH_4 1
+#define S1G_SUPP_CH_WIDTH_8 2
+#define S1G_SUPP_CH_WIDTH_16 3
+#define S1G_SUPP_CH_WIDTH_MAX(cap) ((1 << FIELD_GET(S1G_CAP0_SUPP_CH_WIDTH, \
+ cap[0])) << 1)
+
+#define S1G_CAP1_RX_LDPC BIT(0)
+#define S1G_CAP1_TX_STBC BIT(1)
+#define S1G_CAP1_RX_STBC BIT(2)
+#define S1G_CAP1_SU_BFER BIT(3)
+#define S1G_CAP1_SU_BFEE BIT(4)
+#define S1G_CAP1_BFEE_STS GENMASK(7, 5)
+
+#define S1G_CAP2_SOUNDING_DIMENSIONS GENMASK(2, 0)
+#define S1G_CAP2_MU_BFER BIT(3)
+#define S1G_CAP2_MU_BFEE BIT(4)
+#define S1G_CAP2_PLUS_HTC_VHT BIT(5)
+#define S1G_CAP2_TRAVELING_PILOT GENMASK(7, 6)
+
+#define S1G_CAP3_RD_RESPONDER BIT(0)
+#define S1G_CAP3_HT_DELAYED_BA BIT(1)
+#define S1G_CAP3_MAX_MPDU_LEN BIT(2)
+#define S1G_CAP3_MAX_AMPDU_LEN_EXP GENMASK(4, 3)
+#define S1G_CAP3_MIN_MPDU_START GENMASK(7, 5)
+
+#define S1G_CAP4_UPLINK_SYNC BIT(0)
+#define S1G_CAP4_DYNAMIC_AID BIT(1)
+#define S1G_CAP4_BAT BIT(2)
+#define S1G_CAP4_TIME_ADE BIT(3)
+#define S1G_CAP4_NON_TIM BIT(4)
+#define S1G_CAP4_GROUP_AID BIT(5)
+#define S1G_CAP4_STA_TYPE GENMASK(7, 6)
+
+#define S1G_CAP5_CENT_AUTH_CONTROL BIT(0)
+#define S1G_CAP5_DIST_AUTH_CONTROL BIT(1)
+#define S1G_CAP5_AMSDU BIT(2)
+#define S1G_CAP5_AMPDU BIT(3)
+#define S1G_CAP5_ASYMMETRIC_BA BIT(4)
+#define S1G_CAP5_FLOW_CONTROL BIT(5)
+#define S1G_CAP5_SECTORIZED_BEAM GENMASK(7, 6)
+
+#define S1G_CAP6_OBSS_MITIGATION BIT(0)
+#define S1G_CAP6_FRAGMENT_BA BIT(1)
+#define S1G_CAP6_NDP_PS_POLL BIT(2)
+#define S1G_CAP6_RAW_OPERATION BIT(3)
+#define S1G_CAP6_PAGE_SLICING BIT(4)
+#define S1G_CAP6_TXOP_SHARING_IMP_ACK BIT(5)
+#define S1G_CAP6_VHT_LINK_ADAPT GENMASK(7, 6)
+
+#define S1G_CAP7_TACK_AS_PS_POLL BIT(0)
+#define S1G_CAP7_DUP_1MHZ BIT(1)
+#define S1G_CAP7_MCS_NEGOTIATION BIT(2)
+#define S1G_CAP7_1MHZ_CTL_RESPONSE_PREAMBLE BIT(3)
+#define S1G_CAP7_NDP_BFING_REPORT_POLL BIT(4)
+#define S1G_CAP7_UNSOLICITED_DYN_AID BIT(5)
+#define S1G_CAP7_SECTOR_TRAINING_OPERATION BIT(6)
+#define S1G_CAP7_TEMP_PS_MODE_SWITCH BIT(7)
+
+#define S1G_CAP8_TWT_GROUPING BIT(0)
+#define S1G_CAP8_BDT BIT(1)
+#define S1G_CAP8_COLOR GENMASK(4, 2)
+#define S1G_CAP8_TWT_REQUEST BIT(5)
+#define S1G_CAP8_TWT_RESPOND BIT(6)
+#define S1G_CAP8_PV1_FRAME BIT(7)
+
+#define S1G_CAP9_LINK_ADAPT_PER_CONTROL_RESPONSE BIT(0)
+
+#define S1G_OPER_CH_WIDTH_PRIMARY BIT(0)
+#define S1G_OPER_CH_WIDTH_OPER GENMASK(4, 1)
+#define S1G_OPER_CH_PRIMARY_LOCATION BIT(5)
+
+#define S1G_2M_PRIMARY_LOCATION_LOWER 0
+#define S1G_2M_PRIMARY_LOCATION_UPPER 1
+
+#define LISTEN_INT_USF GENMASK(15, 14)
+#define LISTEN_INT_UI GENMASK(13, 0)
+
+#define IEEE80211_MAX_USF FIELD_MAX(LISTEN_INT_USF)
+#define IEEE80211_MAX_UI FIELD_MAX(LISTEN_INT_UI)
+
+/* S1G encoding types */
+#define IEEE80211_S1G_TIM_ENC_MODE_BLOCK 0
+#define IEEE80211_S1G_TIM_ENC_MODE_SINGLE 1
+#define IEEE80211_S1G_TIM_ENC_MODE_OLB 2
+
+enum ieee80211_s1g_actioncode {
+ WLAN_S1G_AID_SWITCH_REQUEST,
+ WLAN_S1G_AID_SWITCH_RESPONSE,
+ WLAN_S1G_SYNC_CONTROL,
+ WLAN_S1G_STA_INFO_ANNOUNCE,
+ WLAN_S1G_EDCA_PARAM_SET,
+ WLAN_S1G_EL_OPERATION,
+ WLAN_S1G_TWT_SETUP,
+ WLAN_S1G_TWT_TEARDOWN,
+ WLAN_S1G_SECT_GROUP_ID_LIST,
+ WLAN_S1G_SECT_ID_FEEDBACK,
+ WLAN_S1G_TWT_INFORMATION = 11,
+};
+
+/**
+ * ieee80211_is_s1g_short_beacon - check if frame is an S1G short beacon
+ * @fc: frame control bytes in little-endian byteorder
+ * @variable: pointer to the beacon frame elements
+ * @variable_len: length of the frame elements
+ * Return: whether or not the frame is an S1G short beacon. As per
+ * IEEE80211-2024 11.1.3.10.1, The S1G beacon compatibility element shall
+ * always be present as the first element in beacon frames generated at a
+ * TBTT (Target Beacon Transmission Time), so any frame not containing
+ * this element must have been generated at a TSBTT (Target Short Beacon
+ * Transmission Time) that is not a TBTT. Additionally, short beacons are
+ * prohibited from containing the S1G beacon compatibility element as per
+ * IEEE80211-2024 9.3.4.3 Table 9-76, so if we have an S1G beacon with
+ * either no elements or the first element is not the beacon compatibility
+ * element, we have a short beacon.
+ */
+static inline bool ieee80211_is_s1g_short_beacon(__le16 fc, const u8 *variable,
+ size_t variable_len)
+{
+ if (!ieee80211_is_s1g_beacon(fc))
+ return false;
+
+ /*
+ * If the frame does not contain at least 1 element (this is perfectly
+ * valid in a short beacon) and is an S1G beacon, we have a short
+ * beacon.
+ */
+ if (variable_len < 2)
+ return true;
+
+ return variable[0] != WLAN_EID_S1G_BCN_COMPAT;
+}
+
+struct s1g_tim_aid {
+ u16 aid;
+ u8 target_blk; /* Target block index */
+ u8 target_subblk; /* Target subblock index */
+ u8 target_subblk_bit; /* Target subblock bit */
+};
+
+struct s1g_tim_enc_block {
+ u8 enc_mode;
+ bool inverse;
+ const u8 *ptr;
+ u8 len;
+
+ /*
+ * For an OLB encoded block that spans multiple blocks, this
+ * is the offset into the span described by that encoded block.
+ */
+ u8 olb_blk_offset;
+};
+
+/*
+ * Helper routines to quickly extract the length of an encoded block. Validation
+ * is also performed to ensure the length extracted lies within the TIM.
+ */
+
+static inline int ieee80211_s1g_len_bitmap(const u8 *ptr, const u8 *end)
+{
+ u8 blkmap;
+ u8 n_subblks;
+
+ if (ptr >= end)
+ return -EINVAL;
+
+ blkmap = *ptr;
+ n_subblks = hweight8(blkmap);
+
+ if (ptr + 1 + n_subblks > end)
+ return -EINVAL;
+
+ return 1 + n_subblks;
+}
+
+static inline int ieee80211_s1g_len_single(const u8 *ptr, const u8 *end)
+{
+ return (ptr + 1 > end) ? -EINVAL : 1;
+}
+
+static inline int ieee80211_s1g_len_olb(const u8 *ptr, const u8 *end)
+{
+ if (ptr >= end)
+ return -EINVAL;
+
+ return (ptr + 1 + *ptr > end) ? -EINVAL : 1 + *ptr;
+}
+
+/*
+ * Enumerate all encoded blocks until we find the encoded block that describes
+ * our target AID. OLB is a special case as a single encoded block can describe
+ * multiple blocks as a single encoded block.
+ */
+static inline int ieee80211_s1g_find_target_block(struct s1g_tim_enc_block *enc,
+ const struct s1g_tim_aid *aid,
+ const u8 *ptr, const u8 *end)
+{
+ /* need at least block-control octet */
+ while (ptr + 1 <= end) {
+ u8 ctrl = *ptr++;
+ u8 mode = ctrl & 0x03;
+ bool contains, inverse = ctrl & BIT(2);
+ u8 span, blk_off = ctrl >> 3;
+ int len;
+
+ switch (mode) {
+ case IEEE80211_S1G_TIM_ENC_MODE_BLOCK:
+ len = ieee80211_s1g_len_bitmap(ptr, end);
+ contains = blk_off == aid->target_blk;
+ break;
+ case IEEE80211_S1G_TIM_ENC_MODE_SINGLE:
+ len = ieee80211_s1g_len_single(ptr, end);
+ contains = blk_off == aid->target_blk;
+ break;
+ case IEEE80211_S1G_TIM_ENC_MODE_OLB:
+ len = ieee80211_s1g_len_olb(ptr, end);
+ /*
+ * An OLB encoded block can describe more then one
+ * block, meaning an encoded OLB block can span more
+ * then a single block.
+ */
+ if (len > 0) {
+ /* Minus one for the length octet */
+ span = DIV_ROUND_UP(len - 1, 8);
+ /*
+ * Check if our target block lies within the
+ * block span described by this encoded block.
+ */
+ contains = (aid->target_blk >= blk_off) &&
+ (aid->target_blk < blk_off + span);
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (len < 0)
+ return len;
+
+ if (contains) {
+ enc->enc_mode = mode;
+ enc->inverse = inverse;
+ enc->ptr = ptr;
+ enc->len = (u8)len;
+ enc->olb_blk_offset = blk_off;
+ return 0;
+ }
+
+ ptr += len;
+ }
+
+ return -ENOENT;
+}
+
+static inline bool ieee80211_s1g_parse_bitmap(struct s1g_tim_enc_block *enc,
+ struct s1g_tim_aid *aid)
+{
+ const u8 *ptr = enc->ptr;
+ u8 blkmap = *ptr++;
+
+ /*
+ * If our block bitmap does not contain a set bit that corresponds
+ * to our AID, it could mean a variety of things depending on if
+ * the encoding mode is inverted or not.
+ *
+ * 1. If inverted, it means the entire subblock is present and hence
+ * our AID has been set.
+ * 2. If not inverted, it means our subblock is not present and hence
+ * it is all zero meaning our AID is not set.
+ */
+ if (!(blkmap & BIT(aid->target_subblk)))
+ return enc->inverse;
+
+ /*
+ * Increment ptr by the number of set subblocks that appear before our
+ * target subblock. If our target subblock is 0, do nothing as ptr
+ * already points to our target subblock.
+ */
+ if (aid->target_subblk)
+ ptr += hweight8(blkmap & GENMASK(aid->target_subblk - 1, 0));
+
+ return !!(*ptr & BIT(aid->target_subblk_bit)) ^ enc->inverse;
+}
+
+static inline bool ieee80211_s1g_parse_single(struct s1g_tim_enc_block *enc,
+ struct s1g_tim_aid *aid)
+{
+ /*
+ * Single AID mode describes, as the name suggests, a single AID
+ * within the block described by the encoded block. The octet
+ * contains the 6 LSBs of the AID described in the block. The other
+ * 2 bits are reserved. When inversed, every single AID described
+ * by the current block have buffered traffic except for the AID
+ * described in the single AID octet.
+ */
+ return ((*enc->ptr & 0x3f) == (aid->aid & 0x3f)) ^ enc->inverse;
+}
+
+static inline bool ieee80211_s1g_parse_olb(struct s1g_tim_enc_block *enc,
+ struct s1g_tim_aid *aid)
+{
+ const u8 *ptr = enc->ptr;
+ u8 blk_len = *ptr++;
+ /*
+ * Given an OLB encoded block that describes multiple blocks,
+ * calculate the offset into the span. Then calculate the
+ * subblock location normally.
+ */
+ u16 span_offset = aid->target_blk - enc->olb_blk_offset;
+ u16 subblk_idx = span_offset * 8 + aid->target_subblk;
+
+ if (subblk_idx >= blk_len)
+ return enc->inverse;
+
+ return !!(ptr[subblk_idx] & BIT(aid->target_subblk_bit)) ^ enc->inverse;
+}
+
+/*
+ * An S1G PVB has 3 non optional encoding types, each that can be inverted.
+ * An S1G PVB is constructed with zero or more encoded block subfields. Each
+ * encoded block represents a single "block" of AIDs (64), and each encoded
+ * block can contain one of the 3 encoding types alongside a single bit for
+ * whether the bits should be inverted.
+ *
+ * As the standard makes no guarantee about the ordering of encoded blocks,
+ * we must parse every encoded block in the worst case scenario given an
+ * AID that lies within the last block.
+ */
+static inline bool ieee80211_s1g_check_tim(const struct ieee80211_tim_ie *tim,
+ u8 tim_len, u16 aid)
+{
+ int err;
+ struct s1g_tim_aid target_aid;
+ struct s1g_tim_enc_block enc_blk;
+
+ if (tim_len < 3)
+ return false;
+
+ target_aid.aid = aid;
+ target_aid.target_blk = (aid >> 6) & 0x1f;
+ target_aid.target_subblk = (aid >> 3) & 0x7;
+ target_aid.target_subblk_bit = aid & 0x7;
+
+ /*
+ * Find our AIDs target encoded block and fill &enc_blk with the
+ * encoded blocks information. If no entry is found or an error
+ * occurs return false.
+ */
+ err = ieee80211_s1g_find_target_block(&enc_blk, &target_aid,
+ tim->virtual_map,
+ (const u8 *)tim + tim_len + 2);
+ if (err)
+ return false;
+
+ switch (enc_blk.enc_mode) {
+ case IEEE80211_S1G_TIM_ENC_MODE_BLOCK:
+ return ieee80211_s1g_parse_bitmap(&enc_blk, &target_aid);
+ case IEEE80211_S1G_TIM_ENC_MODE_SINGLE:
+ return ieee80211_s1g_parse_single(&enc_blk, &target_aid);
+ case IEEE80211_S1G_TIM_ENC_MODE_OLB:
+ return ieee80211_s1g_parse_olb(&enc_blk, &target_aid);
+ default:
+ return false;
+ }
+}
+
+#endif /* LINUX_IEEE80211_H */
diff --git a/include/linux/ieee80211-vht.h b/include/linux/ieee80211-vht.h
new file mode 100644
index 000000000000..898dfb561fef
--- /dev/null
+++ b/include/linux/ieee80211-vht.h
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IEEE 802.11 VHT definitions
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ * Copyright (c) 2005, Devicescape Software, Inc.
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (c) 2018 - 2025 Intel Corporation
+ */
+
+#ifndef LINUX_IEEE80211_VHT_H
+#define LINUX_IEEE80211_VHT_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+#define IEEE80211_MAX_MPDU_LEN_VHT_3895 3895
+#define IEEE80211_MAX_MPDU_LEN_VHT_7991 7991
+#define IEEE80211_MAX_MPDU_LEN_VHT_11454 11454
+
+/**
+ * enum ieee80211_vht_opmode_bits - VHT operating mode field bits
+ * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK: channel width mask
+ * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ: 20 MHz channel width
+ * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ: 40 MHz channel width
+ * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ: 80 MHz channel width
+ * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ: 160 MHz or 80+80 MHz channel width
+ * @IEEE80211_OPMODE_NOTIF_BW_160_80P80: 160 / 80+80 MHz indicator flag
+ * @IEEE80211_OPMODE_NOTIF_RX_NSS_MASK: number of spatial streams mask
+ * (the NSS value is the value of this field + 1)
+ * @IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT: number of spatial streams shift
+ * @IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF: indicates streams in SU-MIMO PPDU
+ * using a beamforming steering matrix
+ */
+enum ieee80211_vht_opmode_bits {
+ IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK = 0x03,
+ IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ = 0,
+ IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ = 1,
+ IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ = 2,
+ IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ = 3,
+ IEEE80211_OPMODE_NOTIF_BW_160_80P80 = 0x04,
+ IEEE80211_OPMODE_NOTIF_RX_NSS_MASK = 0x70,
+ IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT = 4,
+ IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF = 0x80,
+};
+
+/*
+ * Maximum length of AMPDU that the STA can receive in VHT.
+ * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets)
+ */
+enum ieee80211_vht_max_ampdu_length_exp {
+ IEEE80211_VHT_MAX_AMPDU_8K = 0,
+ IEEE80211_VHT_MAX_AMPDU_16K = 1,
+ IEEE80211_VHT_MAX_AMPDU_32K = 2,
+ IEEE80211_VHT_MAX_AMPDU_64K = 3,
+ IEEE80211_VHT_MAX_AMPDU_128K = 4,
+ IEEE80211_VHT_MAX_AMPDU_256K = 5,
+ IEEE80211_VHT_MAX_AMPDU_512K = 6,
+ IEEE80211_VHT_MAX_AMPDU_1024K = 7
+};
+
+/**
+ * struct ieee80211_vht_mcs_info - VHT MCS information
+ * @rx_mcs_map: RX MCS map 2 bits for each stream, total 8 streams
+ * @rx_highest: Indicates highest long GI VHT PPDU data rate
+ * STA can receive. Rate expressed in units of 1 Mbps.
+ * If this field is 0 this value should not be used to
+ * consider the highest RX data rate supported.
+ * The top 3 bits of this field indicate the Maximum NSTS,total
+ * (a beamformee capability.)
+ * @tx_mcs_map: TX MCS map 2 bits for each stream, total 8 streams
+ * @tx_highest: Indicates highest long GI VHT PPDU data rate
+ * STA can transmit. Rate expressed in units of 1 Mbps.
+ * If this field is 0 this value should not be used to
+ * consider the highest TX data rate supported.
+ * The top 2 bits of this field are reserved, the
+ * 3rd bit from the top indiciates VHT Extended NSS BW
+ * Capability.
+ */
+struct ieee80211_vht_mcs_info {
+ __le16 rx_mcs_map;
+ __le16 rx_highest;
+ __le16 tx_mcs_map;
+ __le16 tx_highest;
+} __packed;
+
+/* for rx_highest */
+#define IEEE80211_VHT_MAX_NSTS_TOTAL_SHIFT 13
+#define IEEE80211_VHT_MAX_NSTS_TOTAL_MASK (7 << IEEE80211_VHT_MAX_NSTS_TOTAL_SHIFT)
+
+/* for tx_highest */
+#define IEEE80211_VHT_EXT_NSS_BW_CAPABLE (1 << 13)
+
+/**
+ * enum ieee80211_vht_mcs_support - VHT MCS support definitions
+ * @IEEE80211_VHT_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the
+ * number of streams
+ * @IEEE80211_VHT_MCS_SUPPORT_0_8: MCSes 0-8 are supported
+ * @IEEE80211_VHT_MCS_SUPPORT_0_9: MCSes 0-9 are supported
+ * @IEEE80211_VHT_MCS_NOT_SUPPORTED: This number of streams isn't supported
+ *
+ * These definitions are used in each 2-bit subfield of the @rx_mcs_map
+ * and @tx_mcs_map fields of &struct ieee80211_vht_mcs_info, which are
+ * both split into 8 subfields by number of streams. These values indicate
+ * which MCSes are supported for the number of streams the value appears
+ * for.
+ */
+enum ieee80211_vht_mcs_support {
+ IEEE80211_VHT_MCS_SUPPORT_0_7 = 0,
+ IEEE80211_VHT_MCS_SUPPORT_0_8 = 1,
+ IEEE80211_VHT_MCS_SUPPORT_0_9 = 2,
+ IEEE80211_VHT_MCS_NOT_SUPPORTED = 3,
+};
+
+/**
+ * struct ieee80211_vht_cap - VHT capabilities
+ *
+ * This structure is the "VHT capabilities element" as
+ * described in 802.11ac D3.0 8.4.2.160
+ * @vht_cap_info: VHT capability info
+ * @supp_mcs: VHT MCS supported rates
+ */
+struct ieee80211_vht_cap {
+ __le32 vht_cap_info;
+ struct ieee80211_vht_mcs_info supp_mcs;
+} __packed;
+
+/**
+ * enum ieee80211_vht_chanwidth - VHT channel width
+ * @IEEE80211_VHT_CHANWIDTH_USE_HT: use the HT operation IE to
+ * determine the channel width (20 or 40 MHz)
+ * @IEEE80211_VHT_CHANWIDTH_80MHZ: 80 MHz bandwidth
+ * @IEEE80211_VHT_CHANWIDTH_160MHZ: 160 MHz bandwidth
+ * @IEEE80211_VHT_CHANWIDTH_80P80MHZ: 80+80 MHz bandwidth
+ */
+enum ieee80211_vht_chanwidth {
+ IEEE80211_VHT_CHANWIDTH_USE_HT = 0,
+ IEEE80211_VHT_CHANWIDTH_80MHZ = 1,
+ IEEE80211_VHT_CHANWIDTH_160MHZ = 2,
+ IEEE80211_VHT_CHANWIDTH_80P80MHZ = 3,
+};
+
+/**
+ * struct ieee80211_vht_operation - VHT operation IE
+ *
+ * This structure is the "VHT operation element" as
+ * described in 802.11ac D3.0 8.4.2.161
+ * @chan_width: Operating channel width
+ * @center_freq_seg0_idx: center freq segment 0 index
+ * @center_freq_seg1_idx: center freq segment 1 index
+ * @basic_mcs_set: VHT Basic MCS rate set
+ */
+struct ieee80211_vht_operation {
+ u8 chan_width;
+ u8 center_freq_seg0_idx;
+ u8 center_freq_seg1_idx;
+ __le16 basic_mcs_set;
+} __packed;
+
+/* 802.11ac VHT Capabilities */
+#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000
+#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 0x00000001
+#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 0x00000002
+#define IEEE80211_VHT_CAP_MAX_MPDU_MASK 0x00000003
+#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ 0x00000004
+#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ 0x00000008
+#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK 0x0000000C
+#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_SHIFT 2
+#define IEEE80211_VHT_CAP_RXLDPC 0x00000010
+#define IEEE80211_VHT_CAP_SHORT_GI_80 0x00000020
+#define IEEE80211_VHT_CAP_SHORT_GI_160 0x00000040
+#define IEEE80211_VHT_CAP_TXSTBC 0x00000080
+#define IEEE80211_VHT_CAP_RXSTBC_1 0x00000100
+#define IEEE80211_VHT_CAP_RXSTBC_2 0x00000200
+#define IEEE80211_VHT_CAP_RXSTBC_3 0x00000300
+#define IEEE80211_VHT_CAP_RXSTBC_4 0x00000400
+#define IEEE80211_VHT_CAP_RXSTBC_MASK 0x00000700
+#define IEEE80211_VHT_CAP_RXSTBC_SHIFT 8
+#define IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE 0x00000800
+#define IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE 0x00001000
+#define IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT 13
+#define IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK \
+ (7 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT)
+#define IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT 16
+#define IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK \
+ (7 << IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT)
+#define IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE 0x00080000
+#define IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE 0x00100000
+#define IEEE80211_VHT_CAP_VHT_TXOP_PS 0x00200000
+#define IEEE80211_VHT_CAP_HTC_VHT 0x00400000
+#define IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT 23
+#define IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK \
+ (7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT)
+#define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_UNSOL_MFB 0x08000000
+#define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB 0x0c000000
+#define IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN 0x10000000
+#define IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN 0x20000000
+#define IEEE80211_VHT_CAP_EXT_NSS_BW_SHIFT 30
+#define IEEE80211_VHT_CAP_EXT_NSS_BW_MASK 0xc0000000
+
+/**
+ * ieee80211_get_vht_max_nss - return max NSS for a given bandwidth/MCS
+ * @cap: VHT capabilities of the peer
+ * @bw: bandwidth to use
+ * @mcs: MCS index to use
+ * @ext_nss_bw_capable: indicates whether or not the local transmitter
+ * (rate scaling algorithm) can deal with the new logic
+ * (dot11VHTExtendedNSSBWCapable)
+ * @max_vht_nss: current maximum NSS as advertised by the STA in
+ * operating mode notification, can be 0 in which case the
+ * capability data will be used to derive this (from MCS support)
+ * Return: The maximum NSS that can be used for the given bandwidth/MCS
+ * combination
+ *
+ * Due to the VHT Extended NSS Bandwidth Support, the maximum NSS can
+ * vary for a given BW/MCS. This function parses the data.
+ *
+ * Note: This function is exported by cfg80211.
+ */
+int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
+ enum ieee80211_vht_chanwidth bw,
+ int mcs, bool ext_nss_bw_capable,
+ unsigned int max_vht_nss);
+
+/* VHT action codes */
+enum ieee80211_vht_actioncode {
+ WLAN_VHT_ACTION_COMPRESSED_BF = 0,
+ WLAN_VHT_ACTION_GROUPID_MGMT = 1,
+ WLAN_VHT_ACTION_OPMODE_NOTIF = 2,
+};
+
+#endif /* LINUX_IEEE80211_VHT_H */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 2967437f1b11..96439de55f07 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -9,7 +9,7 @@
* Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
* Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright (c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (c) 2018 - 2020 Intel Corporation
+ * Copyright (c) 2018 - 2025 Intel Corporation
*/
#ifndef LINUX_IEEE80211_H
@@ -18,8 +18,9 @@
#include <linux/types.h>
#include <linux/if_ether.h>
#include <linux/etherdevice.h>
+#include <linux/bitfield.h>
#include <asm/byteorder.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
/*
* DS bit usage
@@ -42,6 +43,7 @@
#define IEEE80211_FCTL_VERS 0x0003
#define IEEE80211_FCTL_FTYPE 0x000c
#define IEEE80211_FCTL_STYPE 0x00f0
+#define IEEE80211_FCTL_TYPE (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)
#define IEEE80211_FCTL_TODS 0x0100
#define IEEE80211_FCTL_FROMDS 0x0200
#define IEEE80211_FCTL_MOREFRAGS 0x0400
@@ -75,6 +77,7 @@
#define IEEE80211_STYPE_ACTION 0x00D0
/* control */
+#define IEEE80211_STYPE_TRIGGER 0x0020
#define IEEE80211_STYPE_CTL_EXT 0x0060
#define IEEE80211_STYPE_BACK_REQ 0x0080
#define IEEE80211_STYPE_BACK 0x0090
@@ -107,15 +110,6 @@
#define IEEE80211_STYPE_DMG_BEACON 0x0000
#define IEEE80211_STYPE_S1G_BEACON 0x0010
-/* bits unique to S1G beacon */
-#define IEEE80211_S1G_BCN_NEXT_TBTT 0x100
-
-/* see 802.11ah-2016 9.9 NDP CMAC frames */
-#define IEEE80211_S1G_1MHZ_NDP_BITS 25
-#define IEEE80211_S1G_1MHZ_NDP_BYTES 4
-#define IEEE80211_S1G_2MHZ_NDP_BITS 37
-#define IEEE80211_S1G_2MHZ_NDP_BYTES 5
-
#define IEEE80211_NDP_FTYPE_CTS 0
#define IEEE80211_NDP_FTYPE_CF_END 0
#define IEEE80211_NDP_FTYPE_PS_POLL 1
@@ -151,9 +145,6 @@
#define IEEE80211_ANO_NETTYPE_WILD 15
-/* bits unique to S1G beacon */
-#define IEEE80211_S1G_BCN_NEXT_TBTT 0x100
-
/* control extension - for IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTL_EXT */
#define IEEE80211_CTL_EXT_POLL 0x2000
#define IEEE80211_CTL_EXT_SPR 0x3000
@@ -170,11 +161,11 @@
#define IEEE80211_SN_MODULO (IEEE80211_MAX_SN + 1)
-/* PV1 Layout 11ah 9.8.3.1 */
+/* PV1 Layout IEEE 802.11-2020 9.8.3.1 */
#define IEEE80211_PV1_FCTL_VERS 0x0003
#define IEEE80211_PV1_FCTL_FTYPE 0x001c
#define IEEE80211_PV1_FCTL_STYPE 0x00e0
-#define IEEE80211_PV1_FCTL_TODS 0x0100
+#define IEEE80211_PV1_FCTL_FROMDS 0x0100
#define IEEE80211_PV1_FCTL_MOREFRAGS 0x0200
#define IEEE80211_PV1_FCTL_PM 0x0400
#define IEEE80211_PV1_FCTL_MOREDATA 0x0800
@@ -189,6 +180,11 @@ static inline bool ieee80211_sn_less(u16 sn1, u16 sn2)
return ((sn1 - sn2) & IEEE80211_SN_MASK) > (IEEE80211_SN_MODULO >> 1);
}
+static inline bool ieee80211_sn_less_eq(u16 sn1, u16 sn2)
+{
+ return ((sn2 - sn1) & IEEE80211_SN_MASK) <= (IEEE80211_SN_MODULO >> 1);
+}
+
static inline u16 ieee80211_sn_add(u16 sn1, u16 sn2)
{
return (sn1 + sn2) & IEEE80211_SN_MASK;
@@ -214,6 +210,7 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
#define IEEE80211_MAX_AID_S1G 8191
#define IEEE80211_MAX_TIM_LEN 251
#define IEEE80211_MAX_MESH_PEERINGS 63
+
/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
6.2.1.1.2.
@@ -227,21 +224,8 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
/* 30 byte 4 addr hdr, 2 byte QoS, 2304 byte MSDU, 12 byte crypt, 4 byte FCS */
#define IEEE80211_MAX_FRAME_LEN 2352
-/* Maximal size of an A-MSDU that can be transported in a HT BA session */
-#define IEEE80211_MAX_MPDU_LEN_HT_BA 4095
-
-/* Maximal size of an A-MSDU */
-#define IEEE80211_MAX_MPDU_LEN_HT_3839 3839
-#define IEEE80211_MAX_MPDU_LEN_HT_7935 7935
-
-#define IEEE80211_MAX_MPDU_LEN_VHT_3895 3895
-#define IEEE80211_MAX_MPDU_LEN_VHT_7991 7991
-#define IEEE80211_MAX_MPDU_LEN_VHT_11454 11454
-
#define IEEE80211_MAX_SSID_LEN 32
-#define IEEE80211_MAX_MESH_ID_LEN 32
-
#define IEEE80211_FIRST_TSPEC_TSID 8
#define IEEE80211_NUM_TIDS 16
@@ -292,14 +276,32 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
#define IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK 0x03
#define IEEE80211_WMM_IE_STA_QOSINFO_SP_SHIFT 5
-#define IEEE80211_HT_CTL_LEN 4
+/* trigger type within common_info of trigger frame */
+#define IEEE80211_TRIGGER_TYPE_MASK 0xf
+#define IEEE80211_TRIGGER_TYPE_BASIC 0x0
+#define IEEE80211_TRIGGER_TYPE_BFRP 0x1
+#define IEEE80211_TRIGGER_TYPE_MU_BAR 0x2
+#define IEEE80211_TRIGGER_TYPE_MU_RTS 0x3
+#define IEEE80211_TRIGGER_TYPE_BSRP 0x4
+#define IEEE80211_TRIGGER_TYPE_GCR_MU_BAR 0x5
+#define IEEE80211_TRIGGER_TYPE_BQRP 0x6
+#define IEEE80211_TRIGGER_TYPE_NFRP 0x7
+
+/* UL-bandwidth within common_info of trigger frame */
+#define IEEE80211_TRIGGER_ULBW_MASK 0xc0000
+#define IEEE80211_TRIGGER_ULBW_20MHZ 0x0
+#define IEEE80211_TRIGGER_ULBW_40MHZ 0x1
+#define IEEE80211_TRIGGER_ULBW_80MHZ 0x2
+#define IEEE80211_TRIGGER_ULBW_160_80P80MHZ 0x3
struct ieee80211_hdr {
__le16 frame_control;
__le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
+ struct_group(addrs,
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ );
__le16 seq_ctrl;
u8 addr4[ETH_ALEN];
} __packed __aligned(2);
@@ -323,9 +325,30 @@ struct ieee80211_qos_hdr {
__le16 qos_ctrl;
} __packed __aligned(2);
+struct ieee80211_qos_hdr_4addr {
+ __le16 frame_control;
+ __le16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctrl;
+ u8 addr4[ETH_ALEN];
+ __le16 qos_ctrl;
+} __packed __aligned(2);
+
+struct ieee80211_trigger {
+ __le16 frame_control;
+ __le16 duration;
+ u8 ra[ETH_ALEN];
+ u8 ta[ETH_ALEN];
+ __le64 common_info;
+ u8 variable[];
+} __packed __aligned(2);
+
/**
* ieee80211_has_tods - check if IEEE80211_FCTL_TODS is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame has to-DS set
*/
static inline bool ieee80211_has_tods(__le16 fc)
{
@@ -335,6 +358,7 @@ static inline bool ieee80211_has_tods(__le16 fc)
/**
* ieee80211_has_fromds - check if IEEE80211_FCTL_FROMDS is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame has from-DS set
*/
static inline bool ieee80211_has_fromds(__le16 fc)
{
@@ -344,6 +368,7 @@ static inline bool ieee80211_has_fromds(__le16 fc)
/**
* ieee80211_has_a4 - check if IEEE80211_FCTL_TODS and IEEE80211_FCTL_FROMDS are set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not it's a 4-address frame (from-DS and to-DS set)
*/
static inline bool ieee80211_has_a4(__le16 fc)
{
@@ -354,6 +379,7 @@ static inline bool ieee80211_has_a4(__le16 fc)
/**
* ieee80211_has_morefrags - check if IEEE80211_FCTL_MOREFRAGS is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame has more fragments (more frags bit set)
*/
static inline bool ieee80211_has_morefrags(__le16 fc)
{
@@ -363,6 +389,7 @@ static inline bool ieee80211_has_morefrags(__le16 fc)
/**
* ieee80211_has_retry - check if IEEE80211_FCTL_RETRY is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the retry flag is set
*/
static inline bool ieee80211_has_retry(__le16 fc)
{
@@ -372,6 +399,7 @@ static inline bool ieee80211_has_retry(__le16 fc)
/**
* ieee80211_has_pm - check if IEEE80211_FCTL_PM is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the power management flag is set
*/
static inline bool ieee80211_has_pm(__le16 fc)
{
@@ -381,6 +409,7 @@ static inline bool ieee80211_has_pm(__le16 fc)
/**
* ieee80211_has_moredata - check if IEEE80211_FCTL_MOREDATA is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the more data flag is set
*/
static inline bool ieee80211_has_moredata(__le16 fc)
{
@@ -390,6 +419,7 @@ static inline bool ieee80211_has_moredata(__le16 fc)
/**
* ieee80211_has_protected - check if IEEE80211_FCTL_PROTECTED is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the protected flag is set
*/
static inline bool ieee80211_has_protected(__le16 fc)
{
@@ -399,6 +429,7 @@ static inline bool ieee80211_has_protected(__le16 fc)
/**
* ieee80211_has_order - check if IEEE80211_FCTL_ORDER is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the order flag is set
*/
static inline bool ieee80211_has_order(__le16 fc)
{
@@ -408,6 +439,7 @@ static inline bool ieee80211_has_order(__le16 fc)
/**
* ieee80211_is_mgmt - check if type is IEEE80211_FTYPE_MGMT
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame type is management
*/
static inline bool ieee80211_is_mgmt(__le16 fc)
{
@@ -418,6 +450,7 @@ static inline bool ieee80211_is_mgmt(__le16 fc)
/**
* ieee80211_is_ctl - check if type is IEEE80211_FTYPE_CTL
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame type is control
*/
static inline bool ieee80211_is_ctl(__le16 fc)
{
@@ -428,6 +461,7 @@ static inline bool ieee80211_is_ctl(__le16 fc)
/**
* ieee80211_is_data - check if type is IEEE80211_FTYPE_DATA
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a data frame
*/
static inline bool ieee80211_is_data(__le16 fc)
{
@@ -438,6 +472,7 @@ static inline bool ieee80211_is_data(__le16 fc)
/**
* ieee80211_is_ext - check if type is IEEE80211_FTYPE_EXT
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame type is extended
*/
static inline bool ieee80211_is_ext(__le16 fc)
{
@@ -449,6 +484,7 @@ static inline bool ieee80211_is_ext(__le16 fc)
/**
* ieee80211_is_data_qos - check if type is IEEE80211_FTYPE_DATA and IEEE80211_STYPE_QOS_DATA is set
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a QoS data frame
*/
static inline bool ieee80211_is_data_qos(__le16 fc)
{
@@ -463,6 +499,8 @@ static inline bool ieee80211_is_data_qos(__le16 fc)
/**
* ieee80211_is_data_present - check if type is IEEE80211_FTYPE_DATA and has data
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a QoS data frame that has data
+ * (i.e. is not null data)
*/
static inline bool ieee80211_is_data_present(__le16 fc)
{
@@ -477,6 +515,7 @@ static inline bool ieee80211_is_data_present(__le16 fc)
/**
* ieee80211_is_assoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_REQ
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is an association request
*/
static inline bool ieee80211_is_assoc_req(__le16 fc)
{
@@ -487,6 +526,7 @@ static inline bool ieee80211_is_assoc_req(__le16 fc)
/**
* ieee80211_is_assoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_RESP
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is an association response
*/
static inline bool ieee80211_is_assoc_resp(__le16 fc)
{
@@ -497,6 +537,7 @@ static inline bool ieee80211_is_assoc_resp(__le16 fc)
/**
* ieee80211_is_reassoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_REQ
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a reassociation request
*/
static inline bool ieee80211_is_reassoc_req(__le16 fc)
{
@@ -507,6 +548,7 @@ static inline bool ieee80211_is_reassoc_req(__le16 fc)
/**
* ieee80211_is_reassoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_RESP
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a reassociation response
*/
static inline bool ieee80211_is_reassoc_resp(__le16 fc)
{
@@ -517,6 +559,7 @@ static inline bool ieee80211_is_reassoc_resp(__le16 fc)
/**
* ieee80211_is_probe_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_REQ
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a probe request
*/
static inline bool ieee80211_is_probe_req(__le16 fc)
{
@@ -527,6 +570,7 @@ static inline bool ieee80211_is_probe_req(__le16 fc)
/**
* ieee80211_is_probe_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_RESP
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a probe response
*/
static inline bool ieee80211_is_probe_resp(__le16 fc)
{
@@ -537,6 +581,7 @@ static inline bool ieee80211_is_probe_resp(__le16 fc)
/**
* ieee80211_is_beacon - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_BEACON
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a (regular, not S1G) beacon
*/
static inline bool ieee80211_is_beacon(__le16 fc)
{
@@ -545,42 +590,9 @@ static inline bool ieee80211_is_beacon(__le16 fc)
}
/**
- * ieee80211_is_s1g_beacon - check if IEEE80211_FTYPE_EXT &&
- * IEEE80211_STYPE_S1G_BEACON
- * @fc: frame control bytes in little-endian byteorder
- */
-static inline bool ieee80211_is_s1g_beacon(__le16 fc)
-{
- return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE |
- IEEE80211_FCTL_STYPE)) ==
- cpu_to_le16(IEEE80211_FTYPE_EXT | IEEE80211_STYPE_S1G_BEACON);
-}
-
-/**
- * ieee80211_next_tbtt_present - check if IEEE80211_FTYPE_EXT &&
- * IEEE80211_STYPE_S1G_BEACON && IEEE80211_S1G_BCN_NEXT_TBTT
- * @fc: frame control bytes in little-endian byteorder
- */
-static inline bool ieee80211_next_tbtt_present(__le16 fc)
-{
- return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
- cpu_to_le16(IEEE80211_FTYPE_EXT | IEEE80211_STYPE_S1G_BEACON) &&
- fc & cpu_to_le16(IEEE80211_S1G_BCN_NEXT_TBTT);
-}
-
-/**
- * ieee80211_is_s1g_short_beacon - check if next tbtt present bit is set. Only
- * true for S1G beacons when they're short.
- * @fc: frame control bytes in little-endian byteorder
- */
-static inline bool ieee80211_is_s1g_short_beacon(__le16 fc)
-{
- return ieee80211_is_s1g_beacon(fc) && ieee80211_next_tbtt_present(fc);
-}
-
-/**
* ieee80211_is_atim - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ATIM
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is an ATIM frame
*/
static inline bool ieee80211_is_atim(__le16 fc)
{
@@ -591,6 +603,7 @@ static inline bool ieee80211_is_atim(__le16 fc)
/**
* ieee80211_is_disassoc - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DISASSOC
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a disassociation frame
*/
static inline bool ieee80211_is_disassoc(__le16 fc)
{
@@ -601,6 +614,7 @@ static inline bool ieee80211_is_disassoc(__le16 fc)
/**
* ieee80211_is_auth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_AUTH
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is an authentication frame
*/
static inline bool ieee80211_is_auth(__le16 fc)
{
@@ -611,6 +625,7 @@ static inline bool ieee80211_is_auth(__le16 fc)
/**
* ieee80211_is_deauth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DEAUTH
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a deauthentication frame
*/
static inline bool ieee80211_is_deauth(__le16 fc)
{
@@ -621,6 +636,7 @@ static inline bool ieee80211_is_deauth(__le16 fc)
/**
* ieee80211_is_action - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ACTION
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is an action frame
*/
static inline bool ieee80211_is_action(__le16 fc)
{
@@ -631,6 +647,7 @@ static inline bool ieee80211_is_action(__le16 fc)
/**
* ieee80211_is_back_req - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK_REQ
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a block-ACK request frame
*/
static inline bool ieee80211_is_back_req(__le16 fc)
{
@@ -641,6 +658,7 @@ static inline bool ieee80211_is_back_req(__le16 fc)
/**
* ieee80211_is_back - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a block-ACK frame
*/
static inline bool ieee80211_is_back(__le16 fc)
{
@@ -651,6 +669,7 @@ static inline bool ieee80211_is_back(__le16 fc)
/**
* ieee80211_is_pspoll - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_PSPOLL
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a PS-poll frame
*/
static inline bool ieee80211_is_pspoll(__le16 fc)
{
@@ -661,6 +680,7 @@ static inline bool ieee80211_is_pspoll(__le16 fc)
/**
* ieee80211_is_rts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_RTS
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is an RTS frame
*/
static inline bool ieee80211_is_rts(__le16 fc)
{
@@ -671,6 +691,7 @@ static inline bool ieee80211_is_rts(__le16 fc)
/**
* ieee80211_is_cts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CTS
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a CTS frame
*/
static inline bool ieee80211_is_cts(__le16 fc)
{
@@ -681,6 +702,7 @@ static inline bool ieee80211_is_cts(__le16 fc)
/**
* ieee80211_is_ack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_ACK
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is an ACK frame
*/
static inline bool ieee80211_is_ack(__le16 fc)
{
@@ -691,6 +713,7 @@ static inline bool ieee80211_is_ack(__le16 fc)
/**
* ieee80211_is_cfend - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFEND
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a CF-end frame
*/
static inline bool ieee80211_is_cfend(__le16 fc)
{
@@ -701,6 +724,7 @@ static inline bool ieee80211_is_cfend(__le16 fc)
/**
* ieee80211_is_cfendack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFENDACK
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a CF-end-ack frame
*/
static inline bool ieee80211_is_cfendack(__le16 fc)
{
@@ -711,6 +735,7 @@ static inline bool ieee80211_is_cfendack(__le16 fc)
/**
* ieee80211_is_nullfunc - check if frame is a regular (non-QoS) nullfunc frame
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a nullfunc frame
*/
static inline bool ieee80211_is_nullfunc(__le16 fc)
{
@@ -721,6 +746,7 @@ static inline bool ieee80211_is_nullfunc(__le16 fc)
/**
* ieee80211_is_qos_nullfunc - check if frame is a QoS nullfunc frame
* @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a QoS nullfunc frame
*/
static inline bool ieee80211_is_qos_nullfunc(__le16 fc)
{
@@ -729,31 +755,31 @@ static inline bool ieee80211_is_qos_nullfunc(__le16 fc)
}
/**
- * ieee80211_is_any_nullfunc - check if frame is regular or QoS nullfunc frame
- * @fc: frame control bytes in little-endian byteorder
+ * ieee80211_is_trigger - check if frame is trigger frame
+ * @fc: frame control field in little-endian byteorder
+ * Return: whether or not the frame is a trigger frame
*/
-static inline bool ieee80211_is_any_nullfunc(__le16 fc)
+static inline bool ieee80211_is_trigger(__le16 fc)
{
- return (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc));
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_TRIGGER);
}
/**
- * ieee80211_is_bufferable_mmpdu - check if frame is bufferable MMPDU
- * @fc: frame control field in little-endian byteorder
+ * ieee80211_is_any_nullfunc - check if frame is regular or QoS nullfunc frame
+ * @fc: frame control bytes in little-endian byteorder
+ * Return: whether or not the frame is a nullfunc or QoS nullfunc frame
*/
-static inline bool ieee80211_is_bufferable_mmpdu(__le16 fc)
+static inline bool ieee80211_is_any_nullfunc(__le16 fc)
{
- /* IEEE 802.11-2012, definition of "bufferable management frame";
- * note that this ignores the IBSS special case. */
- return ieee80211_is_mgmt(fc) &&
- (ieee80211_is_action(fc) ||
- ieee80211_is_disassoc(fc) ||
- ieee80211_is_deauth(fc));
+ return (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc));
}
/**
* ieee80211_is_first_frag - check if IEEE80211_SCTL_FRAG is not set
* @seq_ctrl: frame sequence control bytes in little-endian byteorder
+ * Return: whether or not the frame is the first fragment (also true if
+ * it's not fragmented at all)
*/
static inline bool ieee80211_is_first_frag(__le16 seq_ctrl)
{
@@ -763,6 +789,7 @@ static inline bool ieee80211_is_first_frag(__le16 seq_ctrl)
/**
* ieee80211_is_frag - check if a frame is a fragment
* @hdr: 802.11 header of the frame
+ * Return: whether or not the frame is a fragment
*/
static inline bool ieee80211_is_frag(struct ieee80211_hdr *hdr)
{
@@ -770,44 +797,20 @@ static inline bool ieee80211_is_frag(struct ieee80211_hdr *hdr)
hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG);
}
-struct ieee80211s_hdr {
- u8 flags;
- u8 ttl;
- __le32 seqnum;
- u8 eaddr1[ETH_ALEN];
- u8 eaddr2[ETH_ALEN];
-} __packed __aligned(2);
-
-/* Mesh flags */
-#define MESH_FLAGS_AE_A4 0x1
-#define MESH_FLAGS_AE_A5_A6 0x2
-#define MESH_FLAGS_AE 0x3
-#define MESH_FLAGS_PS_DEEP 0x4
-
-/**
- * enum ieee80211_preq_flags - mesh PREQ element flags
- *
- * @IEEE80211_PREQ_PROACTIVE_PREP_FLAG: proactive PREP subfield
- */
-enum ieee80211_preq_flags {
- IEEE80211_PREQ_PROACTIVE_PREP_FLAG = 1<<2,
-};
-
-/**
- * enum ieee80211_preq_target_flags - mesh PREQ element per target flags
- *
- * @IEEE80211_PREQ_TO_FLAG: target only subfield
- * @IEEE80211_PREQ_USN_FLAG: unknown target HWMP sequence number subfield
- */
-enum ieee80211_preq_target_flags {
- IEEE80211_PREQ_TO_FLAG = 1<<0,
- IEEE80211_PREQ_USN_FLAG = 1<<2,
-};
+static inline u16 ieee80211_get_sn(struct ieee80211_hdr *hdr)
+{
+ return le16_get_bits(hdr->seq_ctrl, IEEE80211_SCTL_SEQ);
+}
/**
- * struct ieee80211_quiet_ie
+ * struct ieee80211_quiet_ie - Quiet element
+ * @count: Quiet Count
+ * @period: Quiet Period
+ * @duration: Quiet Duration
+ * @offset: Quiet Offset
*
- * This structure refers to "Quiet information element"
+ * This structure represents the payload of the "Quiet element" as
+ * described in IEEE Std 802.11-2020 section 9.4.2.22.
*/
struct ieee80211_quiet_ie {
u8 count;
@@ -817,9 +820,15 @@ struct ieee80211_quiet_ie {
} __packed;
/**
- * struct ieee80211_msrment_ie
+ * struct ieee80211_msrment_ie - Measurement element
+ * @token: Measurement Token
+ * @mode: Measurement Report Mode
+ * @type: Measurement Type
+ * @request: Measurement Request or Measurement Report
*
- * This structure refers to "Measurement Request/Report information element"
+ * This structure represents the payload of both the "Measurement
+ * Request element" and the "Measurement Report element" as described
+ * in IEEE Std 802.11-2020 sections 9.4.2.20 and 9.4.2.21.
*/
struct ieee80211_msrment_ie {
u8 token;
@@ -829,9 +838,14 @@ struct ieee80211_msrment_ie {
} __packed;
/**
- * struct ieee80211_channel_sw_ie
+ * struct ieee80211_channel_sw_ie - Channel Switch Announcement element
+ * @mode: Channel Switch Mode
+ * @new_ch_num: New Channel Number
+ * @count: Channel Switch Count
*
- * This structure refers to "Channel Switch Announcement information element"
+ * This structure represents the payload of the "Channel Switch
+ * Announcement element" as described in IEEE Std 802.11-2020 section
+ * 9.4.2.18.
*/
struct ieee80211_channel_sw_ie {
u8 mode;
@@ -840,9 +854,14 @@ struct ieee80211_channel_sw_ie {
} __packed;
/**
- * struct ieee80211_ext_chansw_ie
+ * struct ieee80211_ext_chansw_ie - Extended Channel Switch Announcement element
+ * @mode: Channel Switch Mode
+ * @new_operating_class: New Operating Class
+ * @new_ch_num: New Channel Number
+ * @count: Channel Switch Count
*
- * This structure represents the "Extended Channel Switch Announcement element"
+ * This structure represents the "Extended Channel Switch Announcement
+ * element" as described in IEEE Std 802.11-2020 section 9.4.2.52.
*/
struct ieee80211_ext_chansw_ie {
u8 mode;
@@ -862,19 +881,14 @@ struct ieee80211_sec_chan_offs_ie {
} __packed;
/**
- * struct ieee80211_mesh_chansw_params_ie - mesh channel switch parameters IE
- *
- * This structure represents the "Mesh Channel Switch Paramters element"
- */
-struct ieee80211_mesh_chansw_params_ie {
- u8 mesh_ttl;
- u8 mesh_flags;
- __le16 mesh_reason;
- __le16 mesh_pre_value;
-} __packed;
-
-/**
* struct ieee80211_wide_bw_chansw_ie - wide bandwidth channel switch IE
+ * @new_channel_width: New Channel Width
+ * @new_center_freq_seg0: New Channel Center Frequency Segment 0
+ * @new_center_freq_seg1: New Channel Center Frequency Segment 1
+ *
+ * This structure represents the payload of the "Wide Bandwidth
+ * Channel Switch element" as described in IEEE Std 802.11-2020
+ * section 9.4.2.160.
*/
struct ieee80211_wide_bw_chansw_ie {
u8 new_channel_width;
@@ -882,138 +896,41 @@ struct ieee80211_wide_bw_chansw_ie {
} __packed;
/**
- * struct ieee80211_tim
+ * struct ieee80211_tim_ie - Traffic Indication Map information element
+ * @dtim_count: DTIM Count
+ * @dtim_period: DTIM Period
+ * @bitmap_ctrl: Bitmap Control
+ * @required_octet: "Syntatic sugar" to force the struct size to the
+ * minimum valid size when carried in a non-S1G PPDU
+ * @virtual_map: Partial Virtual Bitmap
*
- * This structure refers to "Traffic Indication Map information element"
+ * This structure represents the payload of the "TIM element" as
+ * described in IEEE Std 802.11-2020 section 9.4.2.5. Note that this
+ * definition is only applicable when the element is carried in a
+ * non-S1G PPDU. When the TIM is carried in an S1G PPDU, the Bitmap
+ * Control and Partial Virtual Bitmap may not be present.
*/
struct ieee80211_tim_ie {
u8 dtim_count;
u8 dtim_period;
u8 bitmap_ctrl;
- /* variable size: 1 - 251 bytes */
- u8 virtual_map[1];
-} __packed;
-
-/**
- * struct ieee80211_meshconf_ie
- *
- * This structure refers to "Mesh Configuration information element"
- */
-struct ieee80211_meshconf_ie {
- u8 meshconf_psel;
- u8 meshconf_pmetric;
- u8 meshconf_congest;
- u8 meshconf_synch;
- u8 meshconf_auth;
- u8 meshconf_form;
- u8 meshconf_cap;
-} __packed;
-
-/**
- * enum mesh_config_capab_flags - Mesh Configuration IE capability field flags
- *
- * @IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS: STA is willing to establish
- * additional mesh peerings with other mesh STAs
- * @IEEE80211_MESHCONF_CAPAB_FORWARDING: the STA forwards MSDUs
- * @IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING: TBTT adjustment procedure
- * is ongoing
- * @IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL: STA is in deep sleep mode or has
- * neighbors in deep sleep mode
- */
-enum mesh_config_capab_flags {
- IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS = 0x01,
- IEEE80211_MESHCONF_CAPAB_FORWARDING = 0x08,
- IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING = 0x20,
- IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL = 0x40,
-};
-
-#define IEEE80211_MESHCONF_FORM_CONNECTED_TO_GATE 0x1
-
-/**
- * mesh channel switch parameters element's flag indicator
- *
- */
-#define WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT BIT(0)
-#define WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR BIT(1)
-#define WLAN_EID_CHAN_SWITCH_PARAM_REASON BIT(2)
-
-/**
- * struct ieee80211_rann_ie
- *
- * This structure refers to "Root Announcement information element"
- */
-struct ieee80211_rann_ie {
- u8 rann_flags;
- u8 rann_hopcount;
- u8 rann_ttl;
- u8 rann_addr[ETH_ALEN];
- __le32 rann_seq;
- __le32 rann_interval;
- __le32 rann_metric;
+ union {
+ u8 required_octet;
+ DECLARE_FLEX_ARRAY(u8, virtual_map);
+ };
} __packed;
-enum ieee80211_rann_flags {
- RANN_FLAG_IS_GATE = 1 << 0,
-};
-
-enum ieee80211_ht_chanwidth_values {
- IEEE80211_HT_CHANWIDTH_20MHZ = 0,
- IEEE80211_HT_CHANWIDTH_ANY = 1,
-};
-
-/**
- * enum ieee80211_opmode_bits - VHT operating mode field bits
- * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK: channel width mask
- * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ: 20 MHz channel width
- * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ: 40 MHz channel width
- * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ: 80 MHz channel width
- * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ: 160 MHz or 80+80 MHz channel width
- * @IEEE80211_OPMODE_NOTIF_BW_160_80P80: 160 / 80+80 MHz indicator flag
- * @IEEE80211_OPMODE_NOTIF_RX_NSS_MASK: number of spatial streams mask
- * (the NSS value is the value of this field + 1)
- * @IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT: number of spatial streams shift
- * @IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF: indicates streams in SU-MIMO PPDU
- * using a beamforming steering matrix
- */
-enum ieee80211_vht_opmode_bits {
- IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK = 0x03,
- IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ = 0,
- IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ = 1,
- IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ = 2,
- IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ = 3,
- IEEE80211_OPMODE_NOTIF_BW_160_80P80 = 0x04,
- IEEE80211_OPMODE_NOTIF_RX_NSS_MASK = 0x70,
- IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT = 4,
- IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF = 0x80,
-};
-
-/**
- * enum ieee80211_s1g_chanwidth
- * These are defined in IEEE802.11-2016ah Table 10-20
- * as BSS Channel Width
- *
- * @IEEE80211_S1G_CHANWIDTH_1MHZ: 1MHz operating channel
- * @IEEE80211_S1G_CHANWIDTH_2MHZ: 2MHz operating channel
- * @IEEE80211_S1G_CHANWIDTH_4MHZ: 4MHz operating channel
- * @IEEE80211_S1G_CHANWIDTH_8MHZ: 8MHz operating channel
- * @IEEE80211_S1G_CHANWIDTH_16MHZ: 16MHz operating channel
- */
-enum ieee80211_s1g_chanwidth {
- IEEE80211_S1G_CHANWIDTH_1MHZ = 0,
- IEEE80211_S1G_CHANWIDTH_2MHZ = 1,
- IEEE80211_S1G_CHANWIDTH_4MHZ = 3,
- IEEE80211_S1G_CHANWIDTH_8MHZ = 7,
- IEEE80211_S1G_CHANWIDTH_16MHZ = 15,
-};
-
#define WLAN_SA_QUERY_TR_ID_LEN 2
#define WLAN_MEMBERSHIP_LEN 8
#define WLAN_USER_POSITION_LEN 16
/**
- * struct ieee80211_tpc_report_ie
+ * struct ieee80211_tpc_report_ie - TPC Report element
+ * @tx_power: Transmit Power
+ * @link_margin: Link Margin
*
- * This structure refers to "TPC Report element"
+ * This structure represents the payload of the "TPC Report element" as
+ * described in IEEE Std 802.11-2020 section 9.4.2.16.
*/
struct ieee80211_tpc_report_ie {
u8 tx_power;
@@ -1023,51 +940,13 @@ struct ieee80211_tpc_report_ie {
#define IEEE80211_ADDBA_EXT_FRAG_LEVEL_MASK GENMASK(2, 1)
#define IEEE80211_ADDBA_EXT_FRAG_LEVEL_SHIFT 1
#define IEEE80211_ADDBA_EXT_NO_FRAG BIT(0)
+#define IEEE80211_ADDBA_EXT_BUF_SIZE_MASK GENMASK(7, 5)
+#define IEEE80211_ADDBA_EXT_BUF_SIZE_SHIFT 10
struct ieee80211_addba_ext_ie {
u8 data;
} __packed;
-/**
- * struct ieee80211_s1g_bcn_compat_ie
- *
- * S1G Beacon Compatibility element
- */
-struct ieee80211_s1g_bcn_compat_ie {
- __le16 compat_info;
- __le16 beacon_int;
- __le32 tsf_completion;
-} __packed;
-
-/**
- * struct ieee80211_s1g_oper_ie
- *
- * S1G Operation element
- */
-struct ieee80211_s1g_oper_ie {
- u8 ch_width;
- u8 oper_class;
- u8 primary_ch;
- u8 oper_ch;
- __le16 basic_mcs_nss;
-} __packed;
-
-/**
- * struct ieee80211_aid_response_ie
- *
- * AID Response element
- */
-struct ieee80211_aid_response_ie {
- __le16 aid;
- u8 switch_count;
- __le16 response_int;
-} __packed;
-
-struct ieee80211_s1g_cap {
- u8 capab_info[10];
- u8 supp_mcs_nss[5];
-} __packed;
-
struct ieee80211_ext {
__le16 frame_control;
__le16 duration;
@@ -1076,18 +955,29 @@ struct ieee80211_ext {
u8 sa[ETH_ALEN];
__le32 timestamp;
u8 change_seq;
- u8 variable[0];
+ u8 variable[];
} __packed s1g_beacon;
- struct {
- u8 sa[ETH_ALEN];
- __le32 timestamp;
- u8 change_seq;
- u8 next_tbtt[3];
- u8 variable[0];
- } __packed s1g_short_beacon;
} u;
} __packed __aligned(2);
+/**
+ * struct ieee80211_bss_load_elem - BSS Load elemen
+ *
+ * Defined in section 9.4.2.26 in IEEE 802.11-REVme D4.1
+ *
+ * @sta_count: total number of STAs currently associated with the AP.
+ * @channel_util: Percentage of time that the access point sensed the channel
+ * was busy. This value is in range [0, 255], the highest value means
+ * 100% busy.
+ * @avail_admission_capa: remaining amount of medium time used for admission
+ * control.
+ */
+struct ieee80211_bss_load_elem {
+ __le16 sta_count;
+ u8 channel_util;
+ __le16 avail_admission_capa;
+} __packed;
+
struct ieee80211_mgmt {
__le16 frame_control;
__le16 duration;
@@ -1101,7 +991,7 @@ struct ieee80211_mgmt {
__le16 auth_transaction;
__le16 status_code;
/* possibly followed by Challenge text */
- u8 variable[0];
+ u8 variable[];
} __packed auth;
struct {
__le16 reason_code;
@@ -1110,26 +1000,26 @@ struct ieee80211_mgmt {
__le16 capab_info;
__le16 listen_interval;
/* followed by SSID and Supported rates */
- u8 variable[0];
+ u8 variable[];
} __packed assoc_req;
struct {
__le16 capab_info;
__le16 status_code;
__le16 aid;
/* followed by Supported rates */
- u8 variable[0];
+ u8 variable[];
} __packed assoc_resp, reassoc_resp;
struct {
__le16 capab_info;
__le16 status_code;
- u8 variable[0];
+ u8 variable[];
} __packed s1g_assoc_resp, s1g_reassoc_resp;
struct {
__le16 capab_info;
__le16 listen_interval;
u8 current_ap[ETH_ALEN];
/* followed by SSID and Supported rates */
- u8 variable[0];
+ u8 variable[];
} __packed reassoc_req;
struct {
__le16 reason_code;
@@ -1140,11 +1030,11 @@ struct ieee80211_mgmt {
__le16 capab_info;
/* followed by some of SSID, Supported rates,
* FH Params, DS Params, CF Params, IBSS Params, TIM */
- u8 variable[0];
+ u8 variable[];
} __packed beacon;
struct {
/* only variable items: SSID, Supported rates */
- u8 variable[0];
+ DECLARE_FLEX_ARRAY(u8, variable);
} __packed probe_req;
struct {
__le64 timestamp;
@@ -1152,7 +1042,7 @@ struct ieee80211_mgmt {
__le16 capab_info;
/* followed by some of SSID, Supported rates,
* FH Params, DS Params, CF Params, IBSS Params */
- u8 variable[0];
+ u8 variable[];
} __packed probe_resp;
struct {
u8 category;
@@ -1161,16 +1051,16 @@ struct ieee80211_mgmt {
u8 action_code;
u8 dialog_token;
u8 status_code;
- u8 variable[0];
+ u8 variable[];
} __packed wme_action;
struct{
u8 action_code;
- u8 variable[0];
+ u8 variable[];
} __packed chan_switch;
struct{
u8 action_code;
struct ieee80211_ext_chansw_ie data;
- u8 variable[0];
+ u8 variable[];
} __packed ext_chan_switch;
struct{
u8 action_code;
@@ -1186,7 +1076,7 @@ struct ieee80211_mgmt {
__le16 timeout;
__le16 start_seq_num;
/* followed by BA Extension */
- u8 variable[0];
+ u8 variable[];
} __packed addba_req;
struct{
u8 action_code;
@@ -1194,6 +1084,8 @@ struct ieee80211_mgmt {
__le16 status;
__le16 capab;
__le16 timeout;
+ /* followed by BA Extension */
+ u8 variable[];
} __packed addba_resp;
struct{
u8 action_code;
@@ -1202,11 +1094,11 @@ struct ieee80211_mgmt {
} __packed delba;
struct {
u8 action_code;
- u8 variable[0];
+ u8 variable[];
} __packed self_prot;
struct{
u8 action_code;
- u8 variable[0];
+ u8 variable[];
} __packed mesh_action;
struct {
u8 action;
@@ -1224,7 +1116,7 @@ struct ieee80211_mgmt {
u8 action_code;
u8 dialog_token;
__le16 capability;
- u8 variable[0];
+ u8 variable[];
} __packed tdls_discover_resp;
struct {
u8 action_code;
@@ -1250,24 +1142,72 @@ struct ieee80211_mgmt {
u8 toa[6];
__le16 tod_error;
__le16 toa_error;
- u8 variable[0];
+ u8 variable[];
} __packed ftm;
+ struct {
+ u8 action_code;
+ u8 variable[];
+ } __packed s1g;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u8 follow_up;
+ u32 tod;
+ u32 toa;
+ u8 max_tod_error;
+ u8 max_toa_error;
+ } __packed wnm_timing_msr;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u8 variable[];
+ } __packed ttlm_req;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ __le16 status_code;
+ u8 variable[];
+ } __packed ttlm_res;
+ struct {
+ u8 action_code;
+ } __packed ttlm_tear_down;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u8 variable[];
+ } __packed ml_reconf_req;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u8 count;
+ u8 variable[];
+ } __packed ml_reconf_resp;
+ struct {
+ u8 action_code;
+ u8 variable[];
+ } __packed epcs;
} u;
} __packed action;
+ DECLARE_FLEX_ARRAY(u8, body); /* Generic frame body */
} u;
} __packed __aligned(2);
/* Supported rates membership selectors */
#define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127
#define BSS_MEMBERSHIP_SELECTOR_VHT_PHY 126
-#define BSS_MEMBERSHIP_SELECTOR_HE_PHY 122
+#define BSS_MEMBERSHIP_SELECTOR_GLK 125
+#define BSS_MEMBERSHIP_SELECTOR_EPD 124
#define BSS_MEMBERSHIP_SELECTOR_SAE_H2E 123
+#define BSS_MEMBERSHIP_SELECTOR_HE_PHY 122
+#define BSS_MEMBERSHIP_SELECTOR_EHT_PHY 121
+
+#define BSS_MEMBERSHIP_SELECTOR_MIN BSS_MEMBERSHIP_SELECTOR_EHT_PHY
/* mgmt header + 1 byte category code */
#define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u)
-/* Management MIC information element (IEEE 802.11w) */
+/* Management MIC information element (IEEE 802.11w) for CMAC */
struct ieee80211_mmie {
u8 element_id;
u8 length;
@@ -1285,6 +1225,15 @@ struct ieee80211_mmie_16 {
u8 mic[16];
} __packed;
+/* Management MIC information element (IEEE 802.11w) for all variants */
+struct ieee80211_mmie_var {
+ u8 element_id;
+ u8 length;
+ __le16 key_id;
+ u8 sequence_number[6];
+ u8 mic[]; /* 8 or 16 bytes */
+} __packed;
+
struct ieee80211_vendor_ie {
u8 element_id;
u8 len;
@@ -1361,1121 +1310,39 @@ struct ieee80211_tdls_data {
struct {
u8 dialog_token;
__le16 capability;
- u8 variable[0];
+ u8 variable[];
} __packed setup_req;
struct {
__le16 status_code;
u8 dialog_token;
__le16 capability;
- u8 variable[0];
+ u8 variable[];
} __packed setup_resp;
struct {
__le16 status_code;
u8 dialog_token;
- u8 variable[0];
+ u8 variable[];
} __packed setup_cfm;
struct {
__le16 reason_code;
- u8 variable[0];
+ u8 variable[];
} __packed teardown;
struct {
u8 dialog_token;
- u8 variable[0];
+ u8 variable[];
} __packed discover_req;
struct {
u8 target_channel;
u8 oper_class;
- u8 variable[0];
+ u8 variable[];
} __packed chan_switch_req;
struct {
__le16 status_code;
- u8 variable[0];
+ u8 variable[];
} __packed chan_switch_resp;
} u;
} __packed;
-/*
- * Peer-to-Peer IE attribute related definitions.
- */
-/**
- * enum ieee80211_p2p_attr_id - identifies type of peer-to-peer attribute.
- */
-enum ieee80211_p2p_attr_id {
- IEEE80211_P2P_ATTR_STATUS = 0,
- IEEE80211_P2P_ATTR_MINOR_REASON,
- IEEE80211_P2P_ATTR_CAPABILITY,
- IEEE80211_P2P_ATTR_DEVICE_ID,
- IEEE80211_P2P_ATTR_GO_INTENT,
- IEEE80211_P2P_ATTR_GO_CONFIG_TIMEOUT,
- IEEE80211_P2P_ATTR_LISTEN_CHANNEL,
- IEEE80211_P2P_ATTR_GROUP_BSSID,
- IEEE80211_P2P_ATTR_EXT_LISTEN_TIMING,
- IEEE80211_P2P_ATTR_INTENDED_IFACE_ADDR,
- IEEE80211_P2P_ATTR_MANAGABILITY,
- IEEE80211_P2P_ATTR_CHANNEL_LIST,
- IEEE80211_P2P_ATTR_ABSENCE_NOTICE,
- IEEE80211_P2P_ATTR_DEVICE_INFO,
- IEEE80211_P2P_ATTR_GROUP_INFO,
- IEEE80211_P2P_ATTR_GROUP_ID,
- IEEE80211_P2P_ATTR_INTERFACE,
- IEEE80211_P2P_ATTR_OPER_CHANNEL,
- IEEE80211_P2P_ATTR_INVITE_FLAGS,
- /* 19 - 220: Reserved */
- IEEE80211_P2P_ATTR_VENDOR_SPECIFIC = 221,
-
- IEEE80211_P2P_ATTR_MAX
-};
-
-/* Notice of Absence attribute - described in P2P spec 4.1.14 */
-/* Typical max value used here */
-#define IEEE80211_P2P_NOA_DESC_MAX 4
-
-struct ieee80211_p2p_noa_desc {
- u8 count;
- __le32 duration;
- __le32 interval;
- __le32 start_time;
-} __packed;
-
-struct ieee80211_p2p_noa_attr {
- u8 index;
- u8 oppps_ctwindow;
- struct ieee80211_p2p_noa_desc desc[IEEE80211_P2P_NOA_DESC_MAX];
-} __packed;
-
-#define IEEE80211_P2P_OPPPS_ENABLE_BIT BIT(7)
-#define IEEE80211_P2P_OPPPS_CTWINDOW_MASK 0x7F
-
-/**
- * struct ieee80211_bar - HT Block Ack Request
- *
- * This structure refers to "HT BlockAckReq" as
- * described in 802.11n draft section 7.2.1.7.1
- */
-struct ieee80211_bar {
- __le16 frame_control;
- __le16 duration;
- __u8 ra[ETH_ALEN];
- __u8 ta[ETH_ALEN];
- __le16 control;
- __le16 start_seq_num;
-} __packed;
-
-/* 802.11 BAR control masks */
-#define IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL 0x0000
-#define IEEE80211_BAR_CTRL_MULTI_TID 0x0002
-#define IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA 0x0004
-#define IEEE80211_BAR_CTRL_TID_INFO_MASK 0xf000
-#define IEEE80211_BAR_CTRL_TID_INFO_SHIFT 12
-
-#define IEEE80211_HT_MCS_MASK_LEN 10
-
-/**
- * struct ieee80211_mcs_info - MCS information
- * @rx_mask: RX mask
- * @rx_highest: highest supported RX rate. If set represents
- * the highest supported RX data rate in units of 1 Mbps.
- * If this field is 0 this value should not be used to
- * consider the highest RX data rate supported.
- * @tx_params: TX parameters
- */
-struct ieee80211_mcs_info {
- u8 rx_mask[IEEE80211_HT_MCS_MASK_LEN];
- __le16 rx_highest;
- u8 tx_params;
- u8 reserved[3];
-} __packed;
-
-/* 802.11n HT capability MSC set */
-#define IEEE80211_HT_MCS_RX_HIGHEST_MASK 0x3ff
-#define IEEE80211_HT_MCS_TX_DEFINED 0x01
-#define IEEE80211_HT_MCS_TX_RX_DIFF 0x02
-/* value 0 == 1 stream etc */
-#define IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK 0x0C
-#define IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT 2
-#define IEEE80211_HT_MCS_TX_MAX_STREAMS 4
-#define IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION 0x10
-
-/*
- * 802.11n D5.0 20.3.5 / 20.6 says:
- * - indices 0 to 7 and 32 are single spatial stream
- * - 8 to 31 are multiple spatial streams using equal modulation
- * [8..15 for two streams, 16..23 for three and 24..31 for four]
- * - remainder are multiple spatial streams using unequal modulation
- */
-#define IEEE80211_HT_MCS_UNEQUAL_MODULATION_START 33
-#define IEEE80211_HT_MCS_UNEQUAL_MODULATION_START_BYTE \
- (IEEE80211_HT_MCS_UNEQUAL_MODULATION_START / 8)
-
-/**
- * struct ieee80211_ht_cap - HT capabilities
- *
- * This structure is the "HT capabilities element" as
- * described in 802.11n D5.0 7.3.2.57
- */
-struct ieee80211_ht_cap {
- __le16 cap_info;
- u8 ampdu_params_info;
-
- /* 16 bytes MCS information */
- struct ieee80211_mcs_info mcs;
-
- __le16 extended_ht_cap_info;
- __le32 tx_BF_cap_info;
- u8 antenna_selection_info;
-} __packed;
-
-/* 802.11n HT capabilities masks (for cap_info) */
-#define IEEE80211_HT_CAP_LDPC_CODING 0x0001
-#define IEEE80211_HT_CAP_SUP_WIDTH_20_40 0x0002
-#define IEEE80211_HT_CAP_SM_PS 0x000C
-#define IEEE80211_HT_CAP_SM_PS_SHIFT 2
-#define IEEE80211_HT_CAP_GRN_FLD 0x0010
-#define IEEE80211_HT_CAP_SGI_20 0x0020
-#define IEEE80211_HT_CAP_SGI_40 0x0040
-#define IEEE80211_HT_CAP_TX_STBC 0x0080
-#define IEEE80211_HT_CAP_RX_STBC 0x0300
-#define IEEE80211_HT_CAP_RX_STBC_SHIFT 8
-#define IEEE80211_HT_CAP_DELAY_BA 0x0400
-#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800
-#define IEEE80211_HT_CAP_DSSSCCK40 0x1000
-#define IEEE80211_HT_CAP_RESERVED 0x2000
-#define IEEE80211_HT_CAP_40MHZ_INTOLERANT 0x4000
-#define IEEE80211_HT_CAP_LSIG_TXOP_PROT 0x8000
-
-/* 802.11n HT extended capabilities masks (for extended_ht_cap_info) */
-#define IEEE80211_HT_EXT_CAP_PCO 0x0001
-#define IEEE80211_HT_EXT_CAP_PCO_TIME 0x0006
-#define IEEE80211_HT_EXT_CAP_PCO_TIME_SHIFT 1
-#define IEEE80211_HT_EXT_CAP_MCS_FB 0x0300
-#define IEEE80211_HT_EXT_CAP_MCS_FB_SHIFT 8
-#define IEEE80211_HT_EXT_CAP_HTC_SUP 0x0400
-#define IEEE80211_HT_EXT_CAP_RD_RESPONDER 0x0800
-
-/* 802.11n HT capability AMPDU settings (for ampdu_params_info) */
-#define IEEE80211_HT_AMPDU_PARM_FACTOR 0x03
-#define IEEE80211_HT_AMPDU_PARM_DENSITY 0x1C
-#define IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT 2
-
-/*
- * Maximum length of AMPDU that the STA can receive in high-throughput (HT).
- * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets)
- */
-enum ieee80211_max_ampdu_length_exp {
- IEEE80211_HT_MAX_AMPDU_8K = 0,
- IEEE80211_HT_MAX_AMPDU_16K = 1,
- IEEE80211_HT_MAX_AMPDU_32K = 2,
- IEEE80211_HT_MAX_AMPDU_64K = 3
-};
-
-/*
- * Maximum length of AMPDU that the STA can receive in VHT.
- * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets)
- */
-enum ieee80211_vht_max_ampdu_length_exp {
- IEEE80211_VHT_MAX_AMPDU_8K = 0,
- IEEE80211_VHT_MAX_AMPDU_16K = 1,
- IEEE80211_VHT_MAX_AMPDU_32K = 2,
- IEEE80211_VHT_MAX_AMPDU_64K = 3,
- IEEE80211_VHT_MAX_AMPDU_128K = 4,
- IEEE80211_VHT_MAX_AMPDU_256K = 5,
- IEEE80211_VHT_MAX_AMPDU_512K = 6,
- IEEE80211_VHT_MAX_AMPDU_1024K = 7
-};
-
-#define IEEE80211_HT_MAX_AMPDU_FACTOR 13
-
-/* Minimum MPDU start spacing */
-enum ieee80211_min_mpdu_spacing {
- IEEE80211_HT_MPDU_DENSITY_NONE = 0, /* No restriction */
- IEEE80211_HT_MPDU_DENSITY_0_25 = 1, /* 1/4 usec */
- IEEE80211_HT_MPDU_DENSITY_0_5 = 2, /* 1/2 usec */
- IEEE80211_HT_MPDU_DENSITY_1 = 3, /* 1 usec */
- IEEE80211_HT_MPDU_DENSITY_2 = 4, /* 2 usec */
- IEEE80211_HT_MPDU_DENSITY_4 = 5, /* 4 usec */
- IEEE80211_HT_MPDU_DENSITY_8 = 6, /* 8 usec */
- IEEE80211_HT_MPDU_DENSITY_16 = 7 /* 16 usec */
-};
-
-/**
- * struct ieee80211_ht_operation - HT operation IE
- *
- * This structure is the "HT operation element" as
- * described in 802.11n-2009 7.3.2.57
- */
-struct ieee80211_ht_operation {
- u8 primary_chan;
- u8 ht_param;
- __le16 operation_mode;
- __le16 stbc_param;
- u8 basic_set[16];
-} __packed;
-
-/* for ht_param */
-#define IEEE80211_HT_PARAM_CHA_SEC_OFFSET 0x03
-#define IEEE80211_HT_PARAM_CHA_SEC_NONE 0x00
-#define IEEE80211_HT_PARAM_CHA_SEC_ABOVE 0x01
-#define IEEE80211_HT_PARAM_CHA_SEC_BELOW 0x03
-#define IEEE80211_HT_PARAM_CHAN_WIDTH_ANY 0x04
-#define IEEE80211_HT_PARAM_RIFS_MODE 0x08
-
-/* for operation_mode */
-#define IEEE80211_HT_OP_MODE_PROTECTION 0x0003
-#define IEEE80211_HT_OP_MODE_PROTECTION_NONE 0
-#define IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER 1
-#define IEEE80211_HT_OP_MODE_PROTECTION_20MHZ 2
-#define IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED 3
-#define IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT 0x0004
-#define IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT 0x0010
-#define IEEE80211_HT_OP_MODE_CCFS2_SHIFT 5
-#define IEEE80211_HT_OP_MODE_CCFS2_MASK 0x1fe0
-
-/* for stbc_param */
-#define IEEE80211_HT_STBC_PARAM_DUAL_BEACON 0x0040
-#define IEEE80211_HT_STBC_PARAM_DUAL_CTS_PROT 0x0080
-#define IEEE80211_HT_STBC_PARAM_STBC_BEACON 0x0100
-#define IEEE80211_HT_STBC_PARAM_LSIG_TXOP_FULLPROT 0x0200
-#define IEEE80211_HT_STBC_PARAM_PCO_ACTIVE 0x0400
-#define IEEE80211_HT_STBC_PARAM_PCO_PHASE 0x0800
-
-
-/* block-ack parameters */
-#define IEEE80211_ADDBA_PARAM_AMSDU_MASK 0x0001
-#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
-#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C
-#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0
-#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000
-#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
-
-/*
- * A-MPDU buffer sizes
- * According to HT size varies from 8 to 64 frames
- * HE adds the ability to have up to 256 frames.
- */
-#define IEEE80211_MIN_AMPDU_BUF 0x8
-#define IEEE80211_MAX_AMPDU_BUF_HT 0x40
-#define IEEE80211_MAX_AMPDU_BUF 0x100
-
-
-/* Spatial Multiplexing Power Save Modes (for capability) */
-#define WLAN_HT_CAP_SM_PS_STATIC 0
-#define WLAN_HT_CAP_SM_PS_DYNAMIC 1
-#define WLAN_HT_CAP_SM_PS_INVALID 2
-#define WLAN_HT_CAP_SM_PS_DISABLED 3
-
-/* for SM power control field lower two bits */
-#define WLAN_HT_SMPS_CONTROL_DISABLED 0
-#define WLAN_HT_SMPS_CONTROL_STATIC 1
-#define WLAN_HT_SMPS_CONTROL_DYNAMIC 3
-
-/**
- * struct ieee80211_vht_mcs_info - VHT MCS information
- * @rx_mcs_map: RX MCS map 2 bits for each stream, total 8 streams
- * @rx_highest: Indicates highest long GI VHT PPDU data rate
- * STA can receive. Rate expressed in units of 1 Mbps.
- * If this field is 0 this value should not be used to
- * consider the highest RX data rate supported.
- * The top 3 bits of this field indicate the Maximum NSTS,total
- * (a beamformee capability.)
- * @tx_mcs_map: TX MCS map 2 bits for each stream, total 8 streams
- * @tx_highest: Indicates highest long GI VHT PPDU data rate
- * STA can transmit. Rate expressed in units of 1 Mbps.
- * If this field is 0 this value should not be used to
- * consider the highest TX data rate supported.
- * The top 2 bits of this field are reserved, the
- * 3rd bit from the top indiciates VHT Extended NSS BW
- * Capability.
- */
-struct ieee80211_vht_mcs_info {
- __le16 rx_mcs_map;
- __le16 rx_highest;
- __le16 tx_mcs_map;
- __le16 tx_highest;
-} __packed;
-
-/* for rx_highest */
-#define IEEE80211_VHT_MAX_NSTS_TOTAL_SHIFT 13
-#define IEEE80211_VHT_MAX_NSTS_TOTAL_MASK (7 << IEEE80211_VHT_MAX_NSTS_TOTAL_SHIFT)
-
-/* for tx_highest */
-#define IEEE80211_VHT_EXT_NSS_BW_CAPABLE (1 << 13)
-
-/**
- * enum ieee80211_vht_mcs_support - VHT MCS support definitions
- * @IEEE80211_VHT_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the
- * number of streams
- * @IEEE80211_VHT_MCS_SUPPORT_0_8: MCSes 0-8 are supported
- * @IEEE80211_VHT_MCS_SUPPORT_0_9: MCSes 0-9 are supported
- * @IEEE80211_VHT_MCS_NOT_SUPPORTED: This number of streams isn't supported
- *
- * These definitions are used in each 2-bit subfield of the @rx_mcs_map
- * and @tx_mcs_map fields of &struct ieee80211_vht_mcs_info, which are
- * both split into 8 subfields by number of streams. These values indicate
- * which MCSes are supported for the number of streams the value appears
- * for.
- */
-enum ieee80211_vht_mcs_support {
- IEEE80211_VHT_MCS_SUPPORT_0_7 = 0,
- IEEE80211_VHT_MCS_SUPPORT_0_8 = 1,
- IEEE80211_VHT_MCS_SUPPORT_0_9 = 2,
- IEEE80211_VHT_MCS_NOT_SUPPORTED = 3,
-};
-
-/**
- * struct ieee80211_vht_cap - VHT capabilities
- *
- * This structure is the "VHT capabilities element" as
- * described in 802.11ac D3.0 8.4.2.160
- * @vht_cap_info: VHT capability info
- * @supp_mcs: VHT MCS supported rates
- */
-struct ieee80211_vht_cap {
- __le32 vht_cap_info;
- struct ieee80211_vht_mcs_info supp_mcs;
-} __packed;
-
-/**
- * enum ieee80211_vht_chanwidth - VHT channel width
- * @IEEE80211_VHT_CHANWIDTH_USE_HT: use the HT operation IE to
- * determine the channel width (20 or 40 MHz)
- * @IEEE80211_VHT_CHANWIDTH_80MHZ: 80 MHz bandwidth
- * @IEEE80211_VHT_CHANWIDTH_160MHZ: 160 MHz bandwidth
- * @IEEE80211_VHT_CHANWIDTH_80P80MHZ: 80+80 MHz bandwidth
- */
-enum ieee80211_vht_chanwidth {
- IEEE80211_VHT_CHANWIDTH_USE_HT = 0,
- IEEE80211_VHT_CHANWIDTH_80MHZ = 1,
- IEEE80211_VHT_CHANWIDTH_160MHZ = 2,
- IEEE80211_VHT_CHANWIDTH_80P80MHZ = 3,
-};
-
-/**
- * struct ieee80211_vht_operation - VHT operation IE
- *
- * This structure is the "VHT operation element" as
- * described in 802.11ac D3.0 8.4.2.161
- * @chan_width: Operating channel width
- * @center_freq_seg0_idx: center freq segment 0 index
- * @center_freq_seg1_idx: center freq segment 1 index
- * @basic_mcs_set: VHT Basic MCS rate set
- */
-struct ieee80211_vht_operation {
- u8 chan_width;
- u8 center_freq_seg0_idx;
- u8 center_freq_seg1_idx;
- __le16 basic_mcs_set;
-} __packed;
-
-/**
- * struct ieee80211_he_cap_elem - HE capabilities element
- *
- * This structure is the "HE capabilities element" fixed fields as
- * described in P802.11ax_D4.0 section 9.4.2.242.2 and 9.4.2.242.3
- */
-struct ieee80211_he_cap_elem {
- u8 mac_cap_info[6];
- u8 phy_cap_info[11];
-} __packed;
-
-#define IEEE80211_TX_RX_MCS_NSS_DESC_MAX_LEN 5
-
-/**
- * enum ieee80211_he_mcs_support - HE MCS support definitions
- * @IEEE80211_HE_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the
- * number of streams
- * @IEEE80211_HE_MCS_SUPPORT_0_9: MCSes 0-9 are supported
- * @IEEE80211_HE_MCS_SUPPORT_0_11: MCSes 0-11 are supported
- * @IEEE80211_HE_MCS_NOT_SUPPORTED: This number of streams isn't supported
- *
- * These definitions are used in each 2-bit subfield of the rx_mcs_*
- * and tx_mcs_* fields of &struct ieee80211_he_mcs_nss_supp, which are
- * both split into 8 subfields by number of streams. These values indicate
- * which MCSes are supported for the number of streams the value appears
- * for.
- */
-enum ieee80211_he_mcs_support {
- IEEE80211_HE_MCS_SUPPORT_0_7 = 0,
- IEEE80211_HE_MCS_SUPPORT_0_9 = 1,
- IEEE80211_HE_MCS_SUPPORT_0_11 = 2,
- IEEE80211_HE_MCS_NOT_SUPPORTED = 3,
-};
-
-/**
- * struct ieee80211_he_mcs_nss_supp - HE Tx/Rx HE MCS NSS Support Field
- *
- * This structure holds the data required for the Tx/Rx HE MCS NSS Support Field
- * described in P802.11ax_D2.0 section 9.4.2.237.4
- *
- * @rx_mcs_80: Rx MCS map 2 bits for each stream, total 8 streams, for channel
- * widths less than 80MHz.
- * @tx_mcs_80: Tx MCS map 2 bits for each stream, total 8 streams, for channel
- * widths less than 80MHz.
- * @rx_mcs_160: Rx MCS map 2 bits for each stream, total 8 streams, for channel
- * width 160MHz.
- * @tx_mcs_160: Tx MCS map 2 bits for each stream, total 8 streams, for channel
- * width 160MHz.
- * @rx_mcs_80p80: Rx MCS map 2 bits for each stream, total 8 streams, for
- * channel width 80p80MHz.
- * @tx_mcs_80p80: Tx MCS map 2 bits for each stream, total 8 streams, for
- * channel width 80p80MHz.
- */
-struct ieee80211_he_mcs_nss_supp {
- __le16 rx_mcs_80;
- __le16 tx_mcs_80;
- __le16 rx_mcs_160;
- __le16 tx_mcs_160;
- __le16 rx_mcs_80p80;
- __le16 tx_mcs_80p80;
-} __packed;
-
-/**
- * struct ieee80211_he_operation - HE capabilities element
- *
- * This structure is the "HE operation element" fields as
- * described in P802.11ax_D4.0 section 9.4.2.243
- */
-struct ieee80211_he_operation {
- __le32 he_oper_params;
- __le16 he_mcs_nss_set;
- /* Optional 0,1,3,4,5,7 or 8 bytes: depends on @he_oper_params */
- u8 optional[];
-} __packed;
-
-/**
- * struct ieee80211_he_spr - HE spatial reuse element
- *
- * This structure is the "HE spatial reuse element" element as
- * described in P802.11ax_D4.0 section 9.4.2.241
- */
-struct ieee80211_he_spr {
- u8 he_sr_control;
- /* Optional 0 to 19 bytes: depends on @he_sr_control */
- u8 optional[];
-} __packed;
-
-/**
- * struct ieee80211_he_mu_edca_param_ac_rec - MU AC Parameter Record field
- *
- * This structure is the "MU AC Parameter Record" fields as
- * described in P802.11ax_D4.0 section 9.4.2.245
- */
-struct ieee80211_he_mu_edca_param_ac_rec {
- u8 aifsn;
- u8 ecw_min_max;
- u8 mu_edca_timer;
-} __packed;
-
-/**
- * struct ieee80211_mu_edca_param_set - MU EDCA Parameter Set element
- *
- * This structure is the "MU EDCA Parameter Set element" fields as
- * described in P802.11ax_D4.0 section 9.4.2.245
- */
-struct ieee80211_mu_edca_param_set {
- u8 mu_qos_info;
- struct ieee80211_he_mu_edca_param_ac_rec ac_be;
- struct ieee80211_he_mu_edca_param_ac_rec ac_bk;
- struct ieee80211_he_mu_edca_param_ac_rec ac_vi;
- struct ieee80211_he_mu_edca_param_ac_rec ac_vo;
-} __packed;
-
-/* 802.11ac VHT Capabilities */
-#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000
-#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 0x00000001
-#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 0x00000002
-#define IEEE80211_VHT_CAP_MAX_MPDU_MASK 0x00000003
-#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ 0x00000004
-#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ 0x00000008
-#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK 0x0000000C
-#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_SHIFT 2
-#define IEEE80211_VHT_CAP_RXLDPC 0x00000010
-#define IEEE80211_VHT_CAP_SHORT_GI_80 0x00000020
-#define IEEE80211_VHT_CAP_SHORT_GI_160 0x00000040
-#define IEEE80211_VHT_CAP_TXSTBC 0x00000080
-#define IEEE80211_VHT_CAP_RXSTBC_1 0x00000100
-#define IEEE80211_VHT_CAP_RXSTBC_2 0x00000200
-#define IEEE80211_VHT_CAP_RXSTBC_3 0x00000300
-#define IEEE80211_VHT_CAP_RXSTBC_4 0x00000400
-#define IEEE80211_VHT_CAP_RXSTBC_MASK 0x00000700
-#define IEEE80211_VHT_CAP_RXSTBC_SHIFT 8
-#define IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE 0x00000800
-#define IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE 0x00001000
-#define IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT 13
-#define IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK \
- (7 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT)
-#define IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT 16
-#define IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK \
- (7 << IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT)
-#define IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE 0x00080000
-#define IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE 0x00100000
-#define IEEE80211_VHT_CAP_VHT_TXOP_PS 0x00200000
-#define IEEE80211_VHT_CAP_HTC_VHT 0x00400000
-#define IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT 23
-#define IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK \
- (7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT)
-#define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_UNSOL_MFB 0x08000000
-#define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB 0x0c000000
-#define IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN 0x10000000
-#define IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN 0x20000000
-#define IEEE80211_VHT_CAP_EXT_NSS_BW_SHIFT 30
-#define IEEE80211_VHT_CAP_EXT_NSS_BW_MASK 0xc0000000
-
-/**
- * ieee80211_get_vht_max_nss - return max NSS for a given bandwidth/MCS
- * @cap: VHT capabilities of the peer
- * @bw: bandwidth to use
- * @mcs: MCS index to use
- * @ext_nss_bw_capable: indicates whether or not the local transmitter
- * (rate scaling algorithm) can deal with the new logic
- * (dot11VHTExtendedNSSBWCapable)
- * @max_vht_nss: current maximum NSS as advertised by the STA in
- * operating mode notification, can be 0 in which case the
- * capability data will be used to derive this (from MCS support)
- *
- * Due to the VHT Extended NSS Bandwidth Support, the maximum NSS can
- * vary for a given BW/MCS. This function parses the data.
- *
- * Note: This function is exported by cfg80211.
- */
-int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
- enum ieee80211_vht_chanwidth bw,
- int mcs, bool ext_nss_bw_capable,
- unsigned int max_vht_nss);
-
-/* 802.11ax HE MAC capabilities */
-#define IEEE80211_HE_MAC_CAP0_HTC_HE 0x01
-#define IEEE80211_HE_MAC_CAP0_TWT_REQ 0x02
-#define IEEE80211_HE_MAC_CAP0_TWT_RES 0x04
-#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_NOT_SUPP 0x00
-#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_1 0x08
-#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_2 0x10
-#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_3 0x18
-#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_MASK 0x18
-#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_1 0x00
-#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_2 0x20
-#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_4 0x40
-#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_8 0x60
-#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_16 0x80
-#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_32 0xa0
-#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_64 0xc0
-#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_UNLIMITED 0xe0
-#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_MASK 0xe0
-
-#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_UNLIMITED 0x00
-#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_128 0x01
-#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_256 0x02
-#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_512 0x03
-#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_MASK 0x03
-#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_0US 0x00
-#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_8US 0x04
-#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US 0x08
-#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK 0x0c
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_1 0x00
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_2 0x10
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_3 0x20
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_4 0x30
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_5 0x40
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_6 0x50
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_7 0x60
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8 0x70
-#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_MASK 0x70
-
-/* Link adaptation is split between byte HE_MAC_CAP1 and
- * HE_MAC_CAP2. It should be set only if IEEE80211_HE_MAC_CAP0_HTC_HE
- * in which case the following values apply:
- * 0 = No feedback.
- * 1 = reserved.
- * 2 = Unsolicited feedback.
- * 3 = both
- */
-#define IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION 0x80
-
-#define IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION 0x01
-#define IEEE80211_HE_MAC_CAP2_ALL_ACK 0x02
-#define IEEE80211_HE_MAC_CAP2_TRS 0x04
-#define IEEE80211_HE_MAC_CAP2_BSR 0x08
-#define IEEE80211_HE_MAC_CAP2_BCAST_TWT 0x10
-#define IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP 0x20
-#define IEEE80211_HE_MAC_CAP2_MU_CASCADING 0x40
-#define IEEE80211_HE_MAC_CAP2_ACK_EN 0x80
-
-#define IEEE80211_HE_MAC_CAP3_OMI_CONTROL 0x02
-#define IEEE80211_HE_MAC_CAP3_OFDMA_RA 0x04
-
-/* The maximum length of an A-MDPU is defined by the combination of the Maximum
- * A-MDPU Length Exponent field in the HT capabilities, VHT capabilities and the
- * same field in the HE capabilities.
- */
-#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_0 0x00
-#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_1 0x08
-#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2 0x10
-#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3 0x18
-#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK 0x18
-#define IEEE80211_HE_MAC_CAP3_AMSDU_FRAG 0x20
-#define IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED 0x40
-#define IEEE80211_HE_MAC_CAP3_RX_CTRL_FRAME_TO_MULTIBSS 0x80
-
-#define IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG 0x01
-#define IEEE80211_HE_MAC_CAP4_QTP 0x02
-#define IEEE80211_HE_MAC_CAP4_BQR 0x04
-#define IEEE80211_HE_MAC_CAP4_PSR_RESP 0x08
-#define IEEE80211_HE_MAC_CAP4_NDP_FB_REP 0x10
-#define IEEE80211_HE_MAC_CAP4_OPS 0x20
-#define IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU 0x40
-/* Multi TID agg TX is split between byte #4 and #5
- * The value is a combination of B39,B40,B41
- */
-#define IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39 0x80
-
-#define IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B40 0x01
-#define IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B41 0x02
-#define IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECTIVE_TRANSMISSION 0x04
-#define IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU 0x08
-#define IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX 0x10
-#define IEEE80211_HE_MAC_CAP5_HE_DYNAMIC_SM_PS 0x20
-#define IEEE80211_HE_MAC_CAP5_PUNCTURED_SOUNDING 0x40
-#define IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX 0x80
-
-#define IEEE80211_HE_VHT_MAX_AMPDU_FACTOR 20
-#define IEEE80211_HE_HT_MAX_AMPDU_FACTOR 16
-
-/* 802.11ax HE PHY capabilities */
-#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02
-#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G 0x04
-#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G 0x08
-#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G 0x10
-#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G 0x20
-#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G 0x40
-#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK 0xfe
-
-#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_20MHZ 0x01
-#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_40MHZ 0x02
-#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_20MHZ 0x04
-#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_40MHZ 0x08
-#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK 0x0f
-#define IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A 0x10
-#define IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD 0x20
-#define IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US 0x40
-/* Midamble RX/TX Max NSTS is split between byte #2 and byte #3 */
-#define IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS 0x80
-
-#define IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS 0x01
-#define IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US 0x02
-#define IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ 0x04
-#define IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ 0x08
-#define IEEE80211_HE_PHY_CAP2_DOPPLER_TX 0x10
-#define IEEE80211_HE_PHY_CAP2_DOPPLER_RX 0x20
-
-/* Note that the meaning of UL MU below is different between an AP and a non-AP
- * sta, where in the AP case it indicates support for Rx and in the non-AP sta
- * case it indicates support for Tx.
- */
-#define IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO 0x40
-#define IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO 0x80
-
-#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM 0x00
-#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK 0x01
-#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK 0x02
-#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM 0x03
-#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK 0x03
-#define IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 0x00
-#define IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2 0x04
-#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM 0x00
-#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK 0x08
-#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK 0x10
-#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM 0x18
-#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK 0x18
-#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1 0x00
-#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_2 0x20
-#define IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU 0x40
-#define IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER 0x80
-
-#define IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE 0x01
-#define IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER 0x02
-
-/* Minimal allowed value of Max STS under 80MHz is 3 */
-#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 0x0c
-#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_5 0x10
-#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_6 0x14
-#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_7 0x18
-#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8 0x1c
-#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK 0x1c
-
-/* Minimal allowed value of Max STS above 80MHz is 3 */
-#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4 0x60
-#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_5 0x80
-#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_6 0xa0
-#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_7 0xc0
-#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 0xe0
-#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK 0xe0
-
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_1 0x00
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 0x01
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_3 0x02
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_4 0x03
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_5 0x04
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_6 0x05
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_7 0x06
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_8 0x07
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK 0x07
-
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_1 0x00
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2 0x08
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_3 0x10
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_4 0x18
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_5 0x20
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_6 0x28
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_7 0x30
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_8 0x38
-#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK 0x38
-
-#define IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK 0x40
-#define IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK 0x80
-
-#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU 0x01
-#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU 0x02
-#define IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB 0x04
-#define IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB 0x08
-#define IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB 0x10
-#define IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE 0x20
-#define IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO 0x40
-#define IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT 0x80
-
-#define IEEE80211_HE_PHY_CAP7_PSR_BASED_SR 0x01
-#define IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP 0x02
-#define IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI 0x04
-#define IEEE80211_HE_PHY_CAP7_MAX_NC_1 0x08
-#define IEEE80211_HE_PHY_CAP7_MAX_NC_2 0x10
-#define IEEE80211_HE_PHY_CAP7_MAX_NC_3 0x18
-#define IEEE80211_HE_PHY_CAP7_MAX_NC_4 0x20
-#define IEEE80211_HE_PHY_CAP7_MAX_NC_5 0x28
-#define IEEE80211_HE_PHY_CAP7_MAX_NC_6 0x30
-#define IEEE80211_HE_PHY_CAP7_MAX_NC_7 0x38
-#define IEEE80211_HE_PHY_CAP7_MAX_NC_MASK 0x38
-#define IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ 0x40
-#define IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ 0x80
-
-#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI 0x01
-#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G 0x02
-#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU 0x04
-#define IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU 0x08
-#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI 0x10
-#define IEEE80211_HE_PHY_CAP8_MIDAMBLE_RX_TX_2X_AND_1XLTF 0x20
-#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242 0x00
-#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484 0x40
-#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996 0x80
-#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996 0xc0
-#define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_MASK 0xc0
-
-#define IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM 0x01
-#define IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK 0x02
-#define IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU 0x04
-#define IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU 0x08
-#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB 0x10
-#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB 0x20
-#define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_0US 0x00
-#define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_8US 0x40
-#define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US 0x80
-#define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED 0xc0
-#define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK 0xc0
-
-/* 802.11ax HE TX/RX MCS NSS Support */
-#define IEEE80211_TX_RX_MCS_NSS_SUPP_HIGHEST_MCS_POS (3)
-#define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_POS (6)
-#define IEEE80211_TX_RX_MCS_NSS_SUPP_RX_BITMAP_POS (11)
-#define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_MASK 0x07c0
-#define IEEE80211_TX_RX_MCS_NSS_SUPP_RX_BITMAP_MASK 0xf800
-
-/* TX/RX HE MCS Support field Highest MCS subfield encoding */
-enum ieee80211_he_highest_mcs_supported_subfield_enc {
- HIGHEST_MCS_SUPPORTED_MCS7 = 0,
- HIGHEST_MCS_SUPPORTED_MCS8,
- HIGHEST_MCS_SUPPORTED_MCS9,
- HIGHEST_MCS_SUPPORTED_MCS10,
- HIGHEST_MCS_SUPPORTED_MCS11,
-};
-
-/* Calculate 802.11ax HE capabilities IE Tx/Rx HE MCS NSS Support Field size */
-static inline u8
-ieee80211_he_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap)
-{
- u8 count = 4;
-
- if (he_cap->phy_cap_info[0] &
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
- count += 4;
-
- if (he_cap->phy_cap_info[0] &
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
- count += 4;
-
- return count;
-}
-
-/* 802.11ax HE PPE Thresholds */
-#define IEEE80211_PPE_THRES_NSS_SUPPORT_2NSS (1)
-#define IEEE80211_PPE_THRES_NSS_POS (0)
-#define IEEE80211_PPE_THRES_NSS_MASK (7)
-#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_2x966_AND_966_RU \
- (BIT(5) | BIT(6))
-#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK 0x78
-#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS (3)
-#define IEEE80211_PPE_THRES_INFO_PPET_SIZE (3)
-
-/*
- * Calculate 802.11ax HE capabilities IE PPE field size
- * Input: Header byte of ppe_thres (first byte), and HE capa IE's PHY cap u8*
- */
-static inline u8
-ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info)
-{
- u8 n;
-
- if ((phy_cap_info[6] &
- IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) == 0)
- return 0;
-
- n = hweight8(ppe_thres_hdr &
- IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK);
- n *= (1 + ((ppe_thres_hdr & IEEE80211_PPE_THRES_NSS_MASK) >>
- IEEE80211_PPE_THRES_NSS_POS));
-
- /*
- * Each pair is 6 bits, and we need to add the 7 "header" bits to the
- * total size.
- */
- n = (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) + 7;
- n = DIV_ROUND_UP(n, 8);
-
- return n;
-}
-
-/* HE Operation defines */
-#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000007
-#define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000008
-#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x00003ff0
-#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 4
-#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x00004000
-#define IEEE80211_HE_OPERATION_CO_HOSTED_BSS 0x00008000
-#define IEEE80211_HE_OPERATION_ER_SU_DISABLE 0x00010000
-#define IEEE80211_HE_OPERATION_6GHZ_OP_INFO 0x00020000
-#define IEEE80211_HE_OPERATION_BSS_COLOR_MASK 0x3f000000
-#define IEEE80211_HE_OPERATION_BSS_COLOR_OFFSET 24
-#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x40000000
-#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x80000000
-
-/**
- * ieee80211_he_6ghz_oper - HE 6 GHz operation Information field
- * @primary: primary channel
- * @control: control flags
- * @ccfs0: channel center frequency segment 0
- * @ccfs1: channel center frequency segment 1
- * @minrate: minimum rate (in 1 Mbps units)
- */
-struct ieee80211_he_6ghz_oper {
- u8 primary;
-#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH 0x3
-#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_20MHZ 0
-#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_40MHZ 1
-#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_80MHZ 2
-#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ 3
-#define IEEE80211_HE_6GHZ_OPER_CTRL_DUP_BEACON 0x4
- u8 control;
- u8 ccfs0;
- u8 ccfs1;
- u8 minrate;
-} __packed;
-
-/*
- * ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size
- * @he_oper_ie: byte data of the He Operations IE, stating from the byte
- * after the ext ID byte. It is assumed that he_oper_ie has at least
- * sizeof(struct ieee80211_he_operation) bytes, the caller must have
- * validated this.
- * @return the actual size of the IE data (not including header), or 0 on error
- */
-static inline u8
-ieee80211_he_oper_size(const u8 *he_oper_ie)
-{
- struct ieee80211_he_operation *he_oper = (void *)he_oper_ie;
- u8 oper_len = sizeof(struct ieee80211_he_operation);
- u32 he_oper_params;
-
- /* Make sure the input is not NULL */
- if (!he_oper_ie)
- return 0;
-
- /* Calc required length */
- he_oper_params = le32_to_cpu(he_oper->he_oper_params);
- if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO)
- oper_len += 3;
- if (he_oper_params & IEEE80211_HE_OPERATION_CO_HOSTED_BSS)
- oper_len++;
- if (he_oper_params & IEEE80211_HE_OPERATION_6GHZ_OP_INFO)
- oper_len += sizeof(struct ieee80211_he_6ghz_oper);
-
- /* Add the first byte (extension ID) to the total length */
- oper_len++;
-
- return oper_len;
-}
-
-/**
- * ieee80211_he_6ghz_oper - obtain 6 GHz operation field
- * @he_oper: HE operation element (must be pre-validated for size)
- * but may be %NULL
- *
- * Return: a pointer to the 6 GHz operation field, or %NULL
- */
-static inline const struct ieee80211_he_6ghz_oper *
-ieee80211_he_6ghz_oper(const struct ieee80211_he_operation *he_oper)
-{
- const u8 *ret = (void *)&he_oper->optional;
- u32 he_oper_params;
-
- if (!he_oper)
- return NULL;
-
- he_oper_params = le32_to_cpu(he_oper->he_oper_params);
-
- if (!(he_oper_params & IEEE80211_HE_OPERATION_6GHZ_OP_INFO))
- return NULL;
- if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO)
- ret += 3;
- if (he_oper_params & IEEE80211_HE_OPERATION_CO_HOSTED_BSS)
- ret++;
-
- return (void *)ret;
-}
-
-/* HE Spatial Reuse defines */
-#define IEEE80211_HE_SPR_PSR_DISALLOWED BIT(0)
-#define IEEE80211_HE_SPR_NON_SRG_OBSS_PD_SR_DISALLOWED BIT(1)
-#define IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT BIT(2)
-#define IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT BIT(3)
-#define IEEE80211_HE_SPR_HESIGA_SR_VAL15_ALLOWED BIT(4)
-
-/*
- * ieee80211_he_spr_size - calculate 802.11ax HE Spatial Reuse IE size
- * @he_spr_ie: byte data of the He Spatial Reuse IE, stating from the byte
- * after the ext ID byte. It is assumed that he_spr_ie has at least
- * sizeof(struct ieee80211_he_spr) bytes, the caller must have validated
- * this
- * @return the actual size of the IE data (not including header), or 0 on error
- */
-static inline u8
-ieee80211_he_spr_size(const u8 *he_spr_ie)
-{
- struct ieee80211_he_spr *he_spr = (void *)he_spr_ie;
- u8 spr_len = sizeof(struct ieee80211_he_spr);
- u8 he_spr_params;
-
- /* Make sure the input is not NULL */
- if (!he_spr_ie)
- return 0;
-
- /* Calc required length */
- he_spr_params = he_spr->he_sr_control;
- if (he_spr_params & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
- spr_len++;
- if (he_spr_params & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT)
- spr_len += 18;
-
- /* Add the first byte (extension ID) to the total length */
- spr_len++;
-
- return spr_len;
-}
-
-/* S1G Capabilities Information field */
-#define IEEE80211_S1G_CAPABILITY_LEN 15
-
-#define S1G_CAP0_S1G_LONG BIT(0)
-#define S1G_CAP0_SGI_1MHZ BIT(1)
-#define S1G_CAP0_SGI_2MHZ BIT(2)
-#define S1G_CAP0_SGI_4MHZ BIT(3)
-#define S1G_CAP0_SGI_8MHZ BIT(4)
-#define S1G_CAP0_SGI_16MHZ BIT(5)
-#define S1G_CAP0_SUPP_CH_WIDTH GENMASK(7, 6)
-
-#define S1G_SUPP_CH_WIDTH_2 0
-#define S1G_SUPP_CH_WIDTH_4 1
-#define S1G_SUPP_CH_WIDTH_8 2
-#define S1G_SUPP_CH_WIDTH_16 3
-#define S1G_SUPP_CH_WIDTH_MAX(cap) ((1 << FIELD_GET(S1G_CAP0_SUPP_CH_WIDTH, \
- cap[0])) << 1)
-
-#define S1G_CAP1_RX_LDPC BIT(0)
-#define S1G_CAP1_TX_STBC BIT(1)
-#define S1G_CAP1_RX_STBC BIT(2)
-#define S1G_CAP1_SU_BFER BIT(3)
-#define S1G_CAP1_SU_BFEE BIT(4)
-#define S1G_CAP1_BFEE_STS GENMASK(7, 5)
-
-#define S1G_CAP2_SOUNDING_DIMENSIONS GENMASK(2, 0)
-#define S1G_CAP2_MU_BFER BIT(3)
-#define S1G_CAP2_MU_BFEE BIT(4)
-#define S1G_CAP2_PLUS_HTC_VHT BIT(5)
-#define S1G_CAP2_TRAVELING_PILOT GENMASK(7, 6)
-
-#define S1G_CAP3_RD_RESPONDER BIT(0)
-#define S1G_CAP3_HT_DELAYED_BA BIT(1)
-#define S1G_CAP3_MAX_MPDU_LEN BIT(2)
-#define S1G_CAP3_MAX_AMPDU_LEN_EXP GENMASK(4, 3)
-#define S1G_CAP3_MIN_MPDU_START GENMASK(7, 5)
-
-#define S1G_CAP4_UPLINK_SYNC BIT(0)
-#define S1G_CAP4_DYNAMIC_AID BIT(1)
-#define S1G_CAP4_BAT BIT(2)
-#define S1G_CAP4_TIME_ADE BIT(3)
-#define S1G_CAP4_NON_TIM BIT(4)
-#define S1G_CAP4_GROUP_AID BIT(5)
-#define S1G_CAP4_STA_TYPE GENMASK(7, 6)
-
-#define S1G_CAP5_CENT_AUTH_CONTROL BIT(0)
-#define S1G_CAP5_DIST_AUTH_CONTROL BIT(1)
-#define S1G_CAP5_AMSDU BIT(2)
-#define S1G_CAP5_AMPDU BIT(3)
-#define S1G_CAP5_ASYMMETRIC_BA BIT(4)
-#define S1G_CAP5_FLOW_CONTROL BIT(5)
-#define S1G_CAP5_SECTORIZED_BEAM GENMASK(7, 6)
-
-#define S1G_CAP6_OBSS_MITIGATION BIT(0)
-#define S1G_CAP6_FRAGMENT_BA BIT(1)
-#define S1G_CAP6_NDP_PS_POLL BIT(2)
-#define S1G_CAP6_RAW_OPERATION BIT(3)
-#define S1G_CAP6_PAGE_SLICING BIT(4)
-#define S1G_CAP6_TXOP_SHARING_IMP_ACK BIT(5)
-#define S1G_CAP6_VHT_LINK_ADAPT GENMASK(7, 6)
-
-#define S1G_CAP7_TACK_AS_PS_POLL BIT(0)
-#define S1G_CAP7_DUP_1MHZ BIT(1)
-#define S1G_CAP7_MCS_NEGOTIATION BIT(2)
-#define S1G_CAP7_1MHZ_CTL_RESPONSE_PREAMBLE BIT(3)
-#define S1G_CAP7_NDP_BFING_REPORT_POLL BIT(4)
-#define S1G_CAP7_UNSOLICITED_DYN_AID BIT(5)
-#define S1G_CAP7_SECTOR_TRAINING_OPERATION BIT(6)
-#define S1G_CAP7_TEMP_PS_MODE_SWITCH BIT(7)
-
-#define S1G_CAP8_TWT_GROUPING BIT(0)
-#define S1G_CAP8_BDT BIT(1)
-#define S1G_CAP8_COLOR GENMASK(4, 2)
-#define S1G_CAP8_TWT_REQUEST BIT(5)
-#define S1G_CAP8_TWT_RESPOND BIT(6)
-#define S1G_CAP8_PV1_FRAME BIT(7)
-
-#define S1G_CAP9_LINK_ADAPT_PER_CONTROL_RESPONSE BIT(0)
-
-#define S1G_OPER_CH_WIDTH_PRIMARY_1MHZ BIT(0)
-#define S1G_OPER_CH_WIDTH_OPER GENMASK(4, 1)
-
-
-#define LISTEN_INT_USF GENMASK(15, 14)
-#define LISTEN_INT_UI GENMASK(13, 0)
-
-#define IEEE80211_MAX_USF FIELD_MAX(LISTEN_INT_USF)
-#define IEEE80211_MAX_UI FIELD_MAX(LISTEN_INT_UI)
-
/* Authentication algorithms */
#define WLAN_AUTH_OPEN 0
#define WLAN_AUTH_SHARED_KEY 1
@@ -2626,10 +1493,12 @@ enum ieee80211_statuscode {
WLAN_STATUS_DENIED_WITH_SUGGESTED_BAND_AND_CHANNEL = 99,
WLAN_STATUS_DENIED_DUE_TO_SPECTRUM_MANAGEMENT = 103,
/* 802.11ai */
- WLAN_STATUS_FILS_AUTHENTICATION_FAILURE = 108,
- WLAN_STATUS_UNKNOWN_AUTHENTICATION_SERVER = 109,
+ WLAN_STATUS_FILS_AUTHENTICATION_FAILURE = 112,
+ WLAN_STATUS_UNKNOWN_AUTHENTICATION_SERVER = 113,
WLAN_STATUS_SAE_HASH_TO_ELEMENT = 126,
WLAN_STATUS_SAE_PK = 127,
+ WLAN_STATUS_DENIED_TID_TO_LINK_MAPPING = 133,
+ WLAN_STATUS_PREF_TID_TO_LINK_MAPPING_SUGGESTED = 134,
};
@@ -2867,7 +1736,7 @@ enum ieee80211_eid {
WLAN_EID_VHT_OPERATION = 192,
WLAN_EID_EXTENDED_BSS_LOAD = 193,
WLAN_EID_WIDE_BW_CHANNEL_SWITCH = 194,
- WLAN_EID_VHT_TX_POWER_ENVELOPE = 195,
+ WLAN_EID_TX_POWER_ENVELOPE = 195,
WLAN_EID_CHANNEL_SWITCH_WRAPPER = 196,
WLAN_EID_AID = 197,
WLAN_EID_QUIET_CHANNEL = 198,
@@ -2879,6 +1748,7 @@ enum ieee80211_eid {
WLAN_EID_AID_RESPONSE = 211,
WLAN_EID_S1G_BCN_COMPAT = 213,
WLAN_EID_S1G_SHORT_BCN_INTERVAL = 214,
+ WLAN_EID_S1G_TWT = 216,
WLAN_EID_S1G_CAPABILITIES = 217,
WLAN_EID_VENDOR_SPECIFIC = 221,
WLAN_EID_QOS_PARAMETER = 222,
@@ -2905,6 +1775,7 @@ enum ieee80211_eid_ext {
WLAN_EID_EXT_FILS_PUBLIC_KEY = 12,
WLAN_EID_EXT_FILS_NONCE = 13,
WLAN_EID_EXT_FUTURE_CHAN_GUIDANCE = 14,
+ WLAN_EID_EXT_DH_PARAMETER = 32,
WLAN_EID_EXT_HE_CAPABILITY = 35,
WLAN_EID_EXT_HE_OPERATION = 36,
WLAN_EID_EXT_UORA = 37,
@@ -2923,6 +1794,13 @@ enum ieee80211_eid_ext {
WLAN_EID_EXT_SHORT_SSID_LIST = 58,
WLAN_EID_EXT_HE_6GHZ_CAPA = 59,
WLAN_EID_EXT_UL_MU_POWER_CAPA = 60,
+ WLAN_EID_EXT_EHT_OPERATION = 106,
+ WLAN_EID_EXT_EHT_MULTI_LINK = 107,
+ WLAN_EID_EXT_EHT_CAPABILITY = 108,
+ WLAN_EID_EXT_TID_TO_LINK_MAPPING = 109,
+ WLAN_EID_EXT_BANDWIDTH_INDICATION = 135,
+ WLAN_EID_EXT_KNOWN_STA_IDENTIFCATION = 136,
+ WLAN_EID_EXT_NON_AP_STA_REG_CON = 137,
};
/* Action category code */
@@ -2933,6 +1811,7 @@ enum ieee80211_category {
WLAN_CATEGORY_BACK = 3,
WLAN_CATEGORY_PUBLIC = 4,
WLAN_CATEGORY_RADIO_MEASUREMENT = 5,
+ WLAN_CATEGORY_FAST_BBS_TRANSITION = 6,
WLAN_CATEGORY_HT = 7,
WLAN_CATEGORY_SA_QUERY = 8,
WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9,
@@ -2947,6 +1826,8 @@ enum ieee80211_category {
WLAN_CATEGORY_FST = 18,
WLAN_CATEGORY_UNPROT_DMG = 20,
WLAN_CATEGORY_VHT = 21,
+ WLAN_CATEGORY_S1G = 22,
+ WLAN_CATEGORY_PROTECTED_EHT = 37,
WLAN_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126,
WLAN_CATEGORY_VENDOR_SPECIFIC = 127,
};
@@ -2960,25 +1841,6 @@ enum ieee80211_spectrum_mgmt_actioncode {
WLAN_ACTION_SPCT_CHL_SWITCH = 4,
};
-/* HT action codes */
-enum ieee80211_ht_actioncode {
- WLAN_HT_ACTION_NOTIFY_CHANWIDTH = 0,
- WLAN_HT_ACTION_SMPS = 1,
- WLAN_HT_ACTION_PSMP = 2,
- WLAN_HT_ACTION_PCO_PHASE = 3,
- WLAN_HT_ACTION_CSI = 4,
- WLAN_HT_ACTION_NONCOMPRESSED_BF = 5,
- WLAN_HT_ACTION_COMPRESSED_BF = 6,
- WLAN_HT_ACTION_ASEL_IDX_FEEDBACK = 7,
-};
-
-/* VHT action codes */
-enum ieee80211_vht_actioncode {
- WLAN_VHT_ACTION_COMPRESSED_BF = 0,
- WLAN_VHT_ACTION_GROUPID_MGMT = 1,
- WLAN_VHT_ACTION_OPMODE_NOTIF = 2,
-};
-
/* Self Protected Action codes */
enum ieee80211_self_protected_actioncode {
WLAN_SP_RESERVED = 0,
@@ -2989,19 +1851,10 @@ enum ieee80211_self_protected_actioncode {
WLAN_SP_MGK_ACK = 5,
};
-/* Mesh action codes */
-enum ieee80211_mesh_actioncode {
- WLAN_MESH_ACTION_LINK_METRIC_REPORT,
- WLAN_MESH_ACTION_HWMP_PATH_SELECTION,
- WLAN_MESH_ACTION_GATE_ANNOUNCEMENT,
- WLAN_MESH_ACTION_CONGESTION_CONTROL_NOTIFICATION,
- WLAN_MESH_ACTION_MCCA_SETUP_REQUEST,
- WLAN_MESH_ACTION_MCCA_SETUP_REPLY,
- WLAN_MESH_ACTION_MCCA_ADVERTISEMENT_REQUEST,
- WLAN_MESH_ACTION_MCCA_ADVERTISEMENT,
- WLAN_MESH_ACTION_MCCA_TEARDOWN,
- WLAN_MESH_ACTION_TBTT_ADJUSTMENT_REQUEST,
- WLAN_MESH_ACTION_TBTT_ADJUSTMENT_RESPONSE,
+/* Unprotected WNM action codes */
+enum ieee80211_unprotected_wnm_actioncode {
+ WLAN_UNPROTECTED_WNM_ACTION_TIM = 0,
+ WLAN_UNPROTECTED_WNM_ACTION_TIMING_MEASUREMENT_RESPONSE = 1,
};
/* Security key length */
@@ -3020,6 +1873,16 @@ enum ieee80211_key_len {
WLAN_KEY_LEN_BIP_GMAC_256 = 32,
};
+/* Radio measurement action codes as defined in IEEE 802.11-2024 - Table 9-470 */
+enum ieee80211_radio_measurement_actioncode {
+ WLAN_RM_ACTION_RADIO_MEASUREMENT_REQUEST = 0,
+ WLAN_RM_ACTION_RADIO_MEASUREMENT_REPORT = 1,
+ WLAN_RM_ACTION_LINK_MEASUREMENT_REQUEST = 2,
+ WLAN_RM_ACTION_LINK_MEASUREMENT_REPORT = 3,
+ WLAN_RM_ACTION_NEIGHBOR_REPORT_REQUEST = 4,
+ WLAN_RM_ACTION_NEIGHBOR_REPORT_RESPONSE = 5,
+};
+
#define IEEE80211_WEP_IV_LEN 4
#define IEEE80211_WEP_ICV_LEN 4
#define IEEE80211_CCMP_HDR_LEN 8
@@ -3035,6 +1898,9 @@ enum ieee80211_key_len {
#define IEEE80211_GCMP_HDR_LEN 8
#define IEEE80211_GCMP_MIC_LEN 16
#define IEEE80211_GCMP_PN_LEN 6
+#define IEEE80211_CMAC_128_MIC_LEN 8
+#define IEEE80211_CMAC_256_MIC_LEN 16
+#define IEEE80211_GMAC_MIC_LEN 16
#define FILS_NONCE_LEN 16
#define FILS_MAX_KEK_LEN 64
@@ -3081,7 +1947,7 @@ enum ieee80211_pub_actioncode {
WLAN_PUB_ACTION_NETWORK_CHANNEL_CONTROL = 30,
WLAN_PUB_ACTION_WHITE_SPACE_MAP_ANN = 31,
WLAN_PUB_ACTION_FTM_REQUEST = 32,
- WLAN_PUB_ACTION_FTM = 33,
+ WLAN_PUB_ACTION_FTM_RESPONSE = 33,
WLAN_PUB_ACTION_FILS_DISCOVERY = 34,
};
@@ -3110,6 +1976,11 @@ enum ieee80211_tdls_actioncode {
*/
#define WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT BIT(6)
+/* Timing Measurement protocol for time sync is set in the 7th bit of 3rd byte
+ * of the @WLAN_EID_EXT_CAPABILITY information element
+ */
+#define WLAN_EXT_CAPA3_TIMING_MEASUREMENT_SUPPORT BIT(7)
+
/* TDLS capabilities in the 4th byte of @WLAN_EID_EXT_CAPABILITY */
#define WLAN_EXT_CAPA4_TDLS_BUFFER_STA BIT(4)
#define WLAN_EXT_CAPA4_TDLS_PEER_PSM BIT(5)
@@ -3155,71 +2026,15 @@ enum ieee80211_tdls_actioncode {
/* Defines support for enhanced multi-bssid advertisement*/
#define WLAN_EXT_CAPA11_EMA_SUPPORT BIT(3)
+/* Enable Beacon Protection */
+#define WLAN_EXT_CAPA11_BCN_PROTECT BIT(4)
+
/* TDLS specific payload type in the LLC/SNAP header */
#define WLAN_TDLS_SNAP_RFTYPE 0x2
/* BSS Coex IE information field bits */
#define WLAN_BSS_COEX_INFORMATION_REQUEST BIT(0)
-/**
- * enum ieee80211_mesh_sync_method - mesh synchronization method identifier
- *
- * @IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET: the default synchronization method
- * @IEEE80211_SYNC_METHOD_VENDOR: a vendor specific synchronization method
- * that will be specified in a vendor specific information element
- */
-enum ieee80211_mesh_sync_method {
- IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET = 1,
- IEEE80211_SYNC_METHOD_VENDOR = 255,
-};
-
-/**
- * enum ieee80211_mesh_path_protocol - mesh path selection protocol identifier
- *
- * @IEEE80211_PATH_PROTOCOL_HWMP: the default path selection protocol
- * @IEEE80211_PATH_PROTOCOL_VENDOR: a vendor specific protocol that will
- * be specified in a vendor specific information element
- */
-enum ieee80211_mesh_path_protocol {
- IEEE80211_PATH_PROTOCOL_HWMP = 1,
- IEEE80211_PATH_PROTOCOL_VENDOR = 255,
-};
-
-/**
- * enum ieee80211_mesh_path_metric - mesh path selection metric identifier
- *
- * @IEEE80211_PATH_METRIC_AIRTIME: the default path selection metric
- * @IEEE80211_PATH_METRIC_VENDOR: a vendor specific metric that will be
- * specified in a vendor specific information element
- */
-enum ieee80211_mesh_path_metric {
- IEEE80211_PATH_METRIC_AIRTIME = 1,
- IEEE80211_PATH_METRIC_VENDOR = 255,
-};
-
-/**
- * enum ieee80211_root_mode_identifier - root mesh STA mode identifier
- *
- * These attribute are used by dot11MeshHWMPRootMode to set root mesh STA mode
- *
- * @IEEE80211_ROOTMODE_NO_ROOT: the mesh STA is not a root mesh STA (default)
- * @IEEE80211_ROOTMODE_ROOT: the mesh STA is a root mesh STA if greater than
- * this value
- * @IEEE80211_PROACTIVE_PREQ_NO_PREP: the mesh STA is a root mesh STA supports
- * the proactive PREQ with proactive PREP subfield set to 0
- * @IEEE80211_PROACTIVE_PREQ_WITH_PREP: the mesh STA is a root mesh STA
- * supports the proactive PREQ with proactive PREP subfield set to 1
- * @IEEE80211_PROACTIVE_RANN: the mesh STA is a root mesh STA supports
- * the proactive RANN
- */
-enum ieee80211_root_mode_identifier {
- IEEE80211_ROOTMODE_NO_ROOT = 0,
- IEEE80211_ROOTMODE_ROOT = 1,
- IEEE80211_PROACTIVE_PREQ_NO_PREP = 2,
- IEEE80211_PROACTIVE_PREQ_WITH_PREP = 3,
- IEEE80211_PROACTIVE_RANN = 4,
-};
-
/*
* IEEE 802.11-2007 7.3.2.9 Country information element
*
@@ -3312,7 +2127,7 @@ enum ieee80211_idle_options {
};
/**
- * struct ieee80211_bss_max_idle_period_ie
+ * struct ieee80211_bss_max_idle_period_ie - BSS max idle period element struct
*
* This structure refers to "BSS Max idle period element"
*
@@ -3327,19 +2142,6 @@ struct ieee80211_bss_max_idle_period_ie {
u8 idle_options;
} __packed;
-/* BACK action code */
-enum ieee80211_back_actioncode {
- WLAN_ACTION_ADDBA_REQ = 0,
- WLAN_ACTION_ADDBA_RESP = 1,
- WLAN_ACTION_DELBA = 2,
-};
-
-/* BACK (block-ack) parties */
-enum ieee80211_back_parties {
- WLAN_BACK_RECIPIENT = 0,
- WLAN_BACK_INITIATOR = 1,
-};
-
/* SA Query action */
enum ieee80211_sa_query_action {
WLAN_ACTION_SA_QUERY_REQUEST = 0,
@@ -3347,7 +2149,7 @@ enum ieee80211_sa_query_action {
};
/**
- * struct ieee80211_bssid_index
+ * struct ieee80211_bssid_index - multiple BSSID index element structure
*
* This structure refers to "Multiple BSSID-index element"
*
@@ -3362,7 +2164,8 @@ struct ieee80211_bssid_index {
};
/**
- * struct ieee80211_multiple_bssid_configuration
+ * struct ieee80211_multiple_bssid_configuration - multiple BSSID configuration
+ * element structure
*
* This structure refers to "Multiple BSSID Configuration element"
*
@@ -3472,44 +2275,33 @@ struct ieee80211_tspec_ie {
__le16 medium_time;
} __packed;
-struct ieee80211_he_6ghz_capa {
- /* uses IEEE80211_HE_6GHZ_CAP_* below */
- __le16 capa;
-} __packed;
-
-/* HE 6 GHz band capabilities */
-/* uses enum ieee80211_min_mpdu_spacing values */
-#define IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START 0x0007
-/* uses enum ieee80211_vht_max_ampdu_length_exp values */
-#define IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP 0x0038
-/* uses IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_* values */
-#define IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN 0x00c0
-/* WLAN_HT_CAP_SM_PS_* values */
-#define IEEE80211_HE_6GHZ_CAP_SM_PS 0x0600
-#define IEEE80211_HE_6GHZ_CAP_RD_RESPONDER 0x0800
-#define IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS 0x1000
-#define IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS 0x2000
-
/**
* ieee80211_get_qos_ctl - get pointer to qos control bytes
* @hdr: the frame
+ * Return: a pointer to the QoS control field in the frame header
*
* The qos ctrl bytes come after the frame_control, duration, seq_num
- * and 3 or 4 addresses of length ETH_ALEN.
- * 3 addr: 2 + 2 + 2 + 3*6 = 24
- * 4 addr: 2 + 2 + 2 + 4*6 = 30
+ * and 3 or 4 addresses of length ETH_ALEN. Checks frame_control to choose
+ * between struct ieee80211_qos_hdr_4addr and struct ieee80211_qos_hdr.
*/
static inline u8 *ieee80211_get_qos_ctl(struct ieee80211_hdr *hdr)
{
- if (ieee80211_has_a4(hdr->frame_control))
- return (u8 *)hdr + 30;
+ union {
+ struct ieee80211_qos_hdr addr3;
+ struct ieee80211_qos_hdr_4addr addr4;
+ } *qos;
+
+ qos = (void *)hdr;
+ if (ieee80211_has_a4(qos->addr3.frame_control))
+ return (u8 *)&qos->addr4.qos_ctrl;
else
- return (u8 *)hdr + 24;
+ return (u8 *)&qos->addr3.qos_ctrl;
}
/**
* ieee80211_get_tid - get qos TID
* @hdr: the frame
+ * Return: the TID from the QoS control field
*/
static inline u8 ieee80211_get_tid(struct ieee80211_hdr *hdr)
{
@@ -3521,6 +2313,7 @@ static inline u8 ieee80211_get_tid(struct ieee80211_hdr *hdr)
/**
* ieee80211_get_SA - get pointer to SA
* @hdr: the frame
+ * Return: a pointer to the source address (SA)
*
* Given an 802.11 frame, this function returns the offset
* to the source address (SA). It does not verify that the
@@ -3540,6 +2333,7 @@ static inline u8 *ieee80211_get_SA(struct ieee80211_hdr *hdr)
/**
* ieee80211_get_DA - get pointer to DA
* @hdr: the frame
+ * Return: a pointer to the destination address (DA)
*
* Given an 802.11 frame, this function returns the offset
* to the destination address (DA). It does not verify that
@@ -3556,8 +2350,48 @@ static inline u8 *ieee80211_get_DA(struct ieee80211_hdr *hdr)
}
/**
+ * ieee80211_is_bufferable_mmpdu - check if frame is bufferable MMPDU
+ * @skb: the skb to check, starting with the 802.11 header
+ * Return: whether or not the MMPDU is bufferable
+ */
+static inline bool ieee80211_is_bufferable_mmpdu(struct sk_buff *skb)
+{
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+ __le16 fc = mgmt->frame_control;
+
+ /*
+ * IEEE 802.11 REVme D2.0 definition of bufferable MMPDU;
+ * note that this ignores the IBSS special case.
+ */
+ if (!ieee80211_is_mgmt(fc))
+ return false;
+
+ if (ieee80211_is_disassoc(fc) || ieee80211_is_deauth(fc))
+ return true;
+
+ if (!ieee80211_is_action(fc))
+ return false;
+
+ if (skb->len < offsetofend(typeof(*mgmt), u.action.u.ftm.action_code))
+ return true;
+
+ /* action frame - additionally check for non-bufferable FTM */
+
+ if (mgmt->u.action.category != WLAN_CATEGORY_PUBLIC &&
+ mgmt->u.action.category != WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION)
+ return true;
+
+ if (mgmt->u.action.u.ftm.action_code == WLAN_PUB_ACTION_FTM_REQUEST ||
+ mgmt->u.action.u.ftm.action_code == WLAN_PUB_ACTION_FTM_RESPONSE)
+ return false;
+
+ return true;
+}
+
+/**
* _ieee80211_is_robust_mgmt_frame - check if frame is a robust management frame
* @hdr: the frame (buffer must include at least the first octet of payload)
+ * Return: whether or not the frame is a robust management frame
*/
static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr)
{
@@ -3584,6 +2418,7 @@ static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr)
*category != WLAN_CATEGORY_SELF_PROTECTED &&
*category != WLAN_CATEGORY_UNPROT_DMG &&
*category != WLAN_CATEGORY_VHT &&
+ *category != WLAN_CATEGORY_S1G &&
*category != WLAN_CATEGORY_VENDOR_SPECIFIC;
}
@@ -3593,6 +2428,7 @@ static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr)
/**
* ieee80211_is_robust_mgmt_frame - check if skb contains a robust mgmt frame
* @skb: the skb containing the frame, length will be checked
+ * Return: whether or not the frame is a robust management frame
*/
static inline bool ieee80211_is_robust_mgmt_frame(struct sk_buff *skb)
{
@@ -3605,6 +2441,7 @@ static inline bool ieee80211_is_robust_mgmt_frame(struct sk_buff *skb)
* ieee80211_is_public_action - check if frame is a public action frame
* @hdr: the frame
* @len: length of the frame
+ * Return: whether or not the frame is a public action frame
*/
static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr,
size_t len)
@@ -3619,9 +2456,40 @@ static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr,
}
/**
+ * ieee80211_is_protected_dual_of_public_action - check if skb contains a
+ * protected dual of public action management frame
+ * @skb: the skb containing the frame, length will be checked
+ *
+ * Return: true if the skb contains a protected dual of public action
+ * management frame, false otherwise.
+ */
+static inline bool
+ieee80211_is_protected_dual_of_public_action(struct sk_buff *skb)
+{
+ u8 action;
+
+ if (!ieee80211_is_public_action((void *)skb->data, skb->len) ||
+ skb->len < IEEE80211_MIN_ACTION_SIZE + 1)
+ return false;
+
+ action = *(u8 *)(skb->data + IEEE80211_MIN_ACTION_SIZE);
+
+ return action != WLAN_PUB_ACTION_20_40_BSS_COEX &&
+ action != WLAN_PUB_ACTION_DSE_REG_LOC_ANN &&
+ action != WLAN_PUB_ACTION_MSMT_PILOT &&
+ action != WLAN_PUB_ACTION_TDLS_DISCOVER_RES &&
+ action != WLAN_PUB_ACTION_LOC_TRACK_NOTI &&
+ action != WLAN_PUB_ACTION_FTM_REQUEST &&
+ action != WLAN_PUB_ACTION_FTM_RESPONSE &&
+ action != WLAN_PUB_ACTION_FILS_DISCOVERY &&
+ action != WLAN_PUB_ACTION_VENDOR_SPECIFIC;
+}
+
+/**
* _ieee80211_is_group_privacy_action - check if frame is a group addressed
- * privacy action frame
+ * privacy action frame
* @hdr: the frame
+ * Return: whether or not the frame is a group addressed privacy action frame
*/
static inline bool _ieee80211_is_group_privacy_action(struct ieee80211_hdr *hdr)
{
@@ -3637,8 +2505,9 @@ static inline bool _ieee80211_is_group_privacy_action(struct ieee80211_hdr *hdr)
/**
* ieee80211_is_group_privacy_action - check if frame is a group addressed
- * privacy action frame
+ * privacy action frame
* @skb: the skb containing the frame, length will be checked
+ * Return: whether or not the frame is a group addressed privacy action frame
*/
static inline bool ieee80211_is_group_privacy_action(struct sk_buff *skb)
{
@@ -3650,20 +2519,15 @@ static inline bool ieee80211_is_group_privacy_action(struct sk_buff *skb)
/**
* ieee80211_tu_to_usec - convert time units (TU) to microseconds
* @tu: the TUs
+ * Return: the time value converted to microseconds
*/
static inline unsigned long ieee80211_tu_to_usec(unsigned long tu)
{
return 1024 * tu;
}
-/**
- * ieee80211_check_tim - check if AID bit is set in TIM
- * @tim: the TIM IE
- * @tim_len: length of the TIM IE
- * @aid: the AID to look for
- */
-static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
- u8 tim_len, u16 aid)
+static inline bool __ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
+ u8 tim_len, u16 aid)
{
u8 mask;
u8 index, indexn1, indexn2;
@@ -3687,14 +2551,15 @@ static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
}
/**
- * ieee80211_get_tdls_action - get tdls packet action (or -1, if not tdls packet)
+ * ieee80211_get_tdls_action - get TDLS action code
* @skb: the skb containing the frame, length will not be checked
- * @hdr_size: the size of the ieee80211_hdr that starts at skb->data
+ * Return: the TDLS action code, or -1 if it's not an encapsulated TDLS action
+ * frame
*
* This function assumes the frame is a data frame, and that the network header
* is in the correct place.
*/
-static inline int ieee80211_get_tdls_action(struct sk_buff *skb, u32 hdr_size)
+static inline int ieee80211_get_tdls_action(struct sk_buff *skb)
{
if (!skb_is_nonlinear(skb) &&
skb->len > (skb_network_offset(skb) + 2)) {
@@ -3729,6 +2594,7 @@ static inline int ieee80211_get_tdls_action(struct sk_buff *skb, u32 hdr_size)
/**
* ieee80211_action_contains_tpc - checks if the frame contains TPC element
* @skb: the skb containing the frame, length will be checked
+ * Return: %true if the frame contains a TPC element, %false otherwise
*
* This function checks if it's either TPC report action frame or Link
* Measurement report action frame as defined in IEEE Std. 802.11-2012 8.5.2.5
@@ -3773,6 +2639,50 @@ static inline bool ieee80211_action_contains_tpc(struct sk_buff *skb)
return true;
}
+/**
+ * ieee80211_is_timing_measurement - check if frame is timing measurement response
+ * @skb: the SKB to check
+ * Return: whether or not the frame is a valid timing measurement response
+ */
+static inline bool ieee80211_is_timing_measurement(struct sk_buff *skb)
+{
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+
+ if (skb->len < IEEE80211_MIN_ACTION_SIZE)
+ return false;
+
+ if (!ieee80211_is_action(mgmt->frame_control))
+ return false;
+
+ if (mgmt->u.action.category == WLAN_CATEGORY_WNM_UNPROTECTED &&
+ mgmt->u.action.u.wnm_timing_msr.action_code ==
+ WLAN_UNPROTECTED_WNM_ACTION_TIMING_MEASUREMENT_RESPONSE &&
+ skb->len >= offsetofend(typeof(*mgmt), u.action.u.wnm_timing_msr))
+ return true;
+
+ return false;
+}
+
+/**
+ * ieee80211_is_ftm - check if frame is FTM response
+ * @skb: the SKB to check
+ * Return: whether or not the frame is a valid FTM response action frame
+ */
+static inline bool ieee80211_is_ftm(struct sk_buff *skb)
+{
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+
+ if (!ieee80211_is_public_action((void *)mgmt, skb->len))
+ return false;
+
+ if (mgmt->u.action.u.ftm.action_code ==
+ WLAN_PUB_ACTION_FTM_RESPONSE &&
+ skb->len >= offsetofend(typeof(*mgmt), u.action.u.ftm))
+ return true;
+
+ return false;
+}
+
struct element {
u8 id;
u8 datalen;
@@ -3812,6 +2722,7 @@ struct element {
* @element: element pointer after for_each_element() or friends
* @data: same data pointer as passed to for_each_element() or friends
* @datalen: same data length as passed to for_each_element() or friends
+ * Return: %true if all elements were iterated, %false otherwise; see notes
*
* This function returns %true if all the data was parsed or considered
* while walking the elements. Only use this if your for_each_element()
@@ -3826,7 +2737,7 @@ static inline bool for_each_element_completed(const struct element *element,
return (const u8 *)element == (const u8 *)data + datalen;
}
-/**
+/*
* RSNX Capabilities:
* bits 0-3: Field length (n-1)
*/
@@ -3841,8 +2752,8 @@ static inline bool for_each_element_completed(const struct element *element,
#define IEEE80211_AP_INFO_TBTT_HDR_FILTERED 0x04
#define IEEE80211_AP_INFO_TBTT_HDR_COLOC 0x08
#define IEEE80211_AP_INFO_TBTT_HDR_COUNT 0xF0
-#define IEEE80211_TBTT_INFO_OFFSET_BSSID_BSS_PARAM 9
-#define IEEE80211_TBTT_INFO_OFFSET_BSSID_SSSID_BSS_PARAM 13
+#define IEEE80211_TBTT_INFO_TYPE_TBTT 0
+#define IEEE80211_TBTT_INFO_TYPE_MLD 1
#define IEEE80211_RNR_TBTT_PARAMS_OCT_RECOMMENDED 0x01
#define IEEE80211_RNR_TBTT_PARAMS_SAME_SSID 0x02
@@ -3852,11 +2763,14 @@ static inline bool for_each_element_completed(const struct element *element,
#define IEEE80211_RNR_TBTT_PARAMS_PROBE_ACTIVE 0x20
#define IEEE80211_RNR_TBTT_PARAMS_COLOC_AP 0x40
+#define IEEE80211_RNR_TBTT_PARAMS_PSD_NO_LIMIT 127
+#define IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED -128
+
struct ieee80211_neighbor_ap_info {
- u8 tbtt_info_hdr;
- u8 tbtt_info_len;
- u8 op_class;
- u8 channel;
+ u8 tbtt_info_hdr;
+ u8 tbtt_info_len;
+ u8 op_class;
+ u8 channel;
} __packed;
enum ieee80211_range_params_max_total_ltf {
@@ -3866,4 +2780,64 @@ enum ieee80211_range_params_max_total_ltf {
IEEE80211_RANGE_PARAMS_MAX_TOTAL_LTF_UNSPECIFIED,
};
+/*
+ * reduced neighbor report, based on Draft P802.11be_D3.0,
+ * section 9.4.2.170.2.
+ */
+struct ieee80211_rnr_mld_params {
+ u8 mld_id;
+ __le16 params;
+} __packed;
+
+#define IEEE80211_RNR_MLD_PARAMS_LINK_ID 0x000F
+#define IEEE80211_RNR_MLD_PARAMS_BSS_CHANGE_COUNT 0x0FF0
+#define IEEE80211_RNR_MLD_PARAMS_UPDATES_INCLUDED 0x1000
+#define IEEE80211_RNR_MLD_PARAMS_DISABLED_LINK 0x2000
+
+/* Format of the TBTT information element if it has 7, 8 or 9 bytes */
+struct ieee80211_tbtt_info_7_8_9 {
+ u8 tbtt_offset;
+ u8 bssid[ETH_ALEN];
+
+ /* The following element is optional, structure may not grow */
+ u8 bss_params;
+ s8 psd_20;
+} __packed;
+
+/* Format of the TBTT information element if it has >= 11 bytes */
+struct ieee80211_tbtt_info_ge_11 {
+ u8 tbtt_offset;
+ u8 bssid[ETH_ALEN];
+ __le32 short_ssid;
+
+ /* The following elements are optional, structure may grow */
+ u8 bss_params;
+ s8 psd_20;
+ struct ieee80211_rnr_mld_params mld_params;
+} __packed;
+
+#include "ieee80211-ht.h"
+#include "ieee80211-vht.h"
+#include "ieee80211-he.h"
+#include "ieee80211-eht.h"
+#include "ieee80211-mesh.h"
+#include "ieee80211-s1g.h"
+#include "ieee80211-p2p.h"
+#include "ieee80211-nan.h"
+
+/**
+ * ieee80211_check_tim - check if AID bit is set in TIM
+ * @tim: the TIM IE
+ * @tim_len: length of the TIM IE
+ * @aid: the AID to look for
+ * @s1g: whether the TIM is from an S1G PPDU
+ * Return: whether or not traffic is indicated in the TIM for the given AID
+ */
+static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
+ u8 tim_len, u16 aid, bool s1g)
+{
+ return s1g ? ieee80211_s1g_check_tim(tim, tim_len, aid) :
+ __ieee80211_check_tim(tim, tim_len, aid);
+}
+
#endif /* LINUX_IEEE80211_H */
diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h
index 95c831162212..140f61ec0f5f 100644
--- a/include/linux/ieee802154.h
+++ b/include/linux/ieee802154.h
@@ -44,6 +44,13 @@
#define IEEE802154_SHORT_ADDR_LEN 2
#define IEEE802154_PAN_ID_LEN 2
+/* Duration in superframe order */
+#define IEEE802154_MAX_SCAN_DURATION 14
+#define IEEE802154_ACTIVE_SCAN_DURATION 15
+/* Superframe duration in slots */
+#define IEEE802154_SUPERFRAME_PERIOD 16
+/* Various periods expressed in symbols */
+#define IEEE802154_SLOT_PERIOD 60
#define IEEE802154_LIFS_PERIOD 40
#define IEEE802154_SIFS_PERIOD 12
#define IEEE802154_MAX_SIFS_FRAME_SIZE 18
@@ -134,18 +141,46 @@ enum {
* a successful transmission.
*/
IEEE802154_SUCCESS = 0x0,
-
+ /* The requested operation failed. */
+ IEEE802154_MAC_ERROR = 0x1,
+ /* The requested operation has been cancelled. */
+ IEEE802154_CANCELLED = 0x2,
+ /*
+ * Device is ready to poll the coordinator for data in a non beacon
+ * enabled PAN.
+ */
+ IEEE802154_READY_FOR_POLL = 0x3,
+ /* Wrong frame counter. */
+ IEEE802154_COUNTER_ERROR = 0xdb,
+ /*
+ * The frame does not conforms to the incoming key usage policy checking
+ * procedure.
+ */
+ IEEE802154_IMPROPER_KEY_TYPE = 0xdc,
+ /*
+ * The frame does not conforms to the incoming security level usage
+ * policy checking procedure.
+ */
+ IEEE802154_IMPROPER_SECURITY_LEVEL = 0xdd,
+ /* Secured frame received with an empty Frame Version field. */
+ IEEE802154_UNSUPPORTED_LEGACY = 0xde,
+ /*
+ * A secured frame is received or must be sent but security is not
+ * enabled in the device. Or, the Auxiliary Security Header has security
+ * level of zero in it.
+ */
+ IEEE802154_UNSUPPORTED_SECURITY = 0xdf,
/* The beacon was lost following a synchronization request. */
- IEEE802154_BEACON_LOSS = 0xe0,
+ IEEE802154_BEACON_LOST = 0xe0,
/*
* A transmission could not take place due to activity on the
* channel, i.e., the CSMA-CA mechanism has failed.
*/
- IEEE802154_CHNL_ACCESS_FAIL = 0xe1,
+ IEEE802154_CHANNEL_ACCESS_FAILURE = 0xe1,
/* The GTS request has been denied by the PAN coordinator. */
- IEEE802154_DENINED = 0xe2,
+ IEEE802154_DENIED = 0xe2,
/* The attempt to disable the transceiver has failed. */
- IEEE802154_DISABLE_TRX_FAIL = 0xe3,
+ IEEE802154_DISABLE_TRX_FAILURE = 0xe3,
/*
* The received frame induces a failed security check according to
* the security suite.
@@ -185,9 +220,9 @@ enum {
* A PAN identifier conflict has been detected and communicated to the
* PAN coordinator.
*/
- IEEE802154_PANID_CONFLICT = 0xee,
+ IEEE802154_PAN_ID_CONFLICT = 0xee,
/* A coordinator realignment command has been received. */
- IEEE802154_REALIGMENT = 0xef,
+ IEEE802154_REALIGNMENT = 0xef,
/* The transaction has expired and its information discarded. */
IEEE802154_TRANSACTION_EXPIRED = 0xf0,
/* There is no capacity to store the transaction. */
@@ -203,12 +238,73 @@ enum {
* A SET/GET request was issued with the identifier of a PIB attribute
* that is not supported.
*/
- IEEE802154_UNSUPPORTED_ATTR = 0xf4,
+ IEEE802154_UNSUPPORTED_ATTRIBUTE = 0xf4,
+ /* Missing source or destination address or address mode. */
+ IEEE802154_INVALID_ADDRESS = 0xf5,
+ /*
+ * MLME asked to turn the receiver on, but the on time duration is too
+ * big compared to the macBeaconOrder.
+ */
+ IEEE802154_ON_TIME_TOO_LONG = 0xf6,
+ /*
+ * MLME asaked to turn the receiver on, but the request was delayed for
+ * too long before getting processed.
+ */
+ IEEE802154_PAST_TIME = 0xf7,
+ /*
+ * The StartTime parameter is nonzero, and the MLME is not currently
+ * tracking the beacon of the coordinator through which it is
+ * associated.
+ */
+ IEEE802154_TRACKING_OFF = 0xf8,
+ /*
+ * The index inside the hierarchical values in PIBAttribute is out of
+ * range.
+ */
+ IEEE802154_INVALID_INDEX = 0xf9,
+ /*
+ * The number of PAN descriptors discovered during a scan has been
+ * reached.
+ */
+ IEEE802154_LIMIT_REACHED = 0xfa,
+ /*
+ * The PIBAttribute parameter specifies an attribute that is a read-only
+ * attribute.
+ */
+ IEEE802154_READ_ONLY = 0xfb,
/*
* A request to perform a scan operation failed because the MLME was
* in the process of performing a previously initiated scan operation.
*/
IEEE802154_SCAN_IN_PROGRESS = 0xfc,
+ /* The outgoing superframe overlaps the incoming superframe. */
+ IEEE802154_SUPERFRAME_OVERLAP = 0xfd,
+ /* Any other error situation. */
+ IEEE802154_SYSTEM_ERROR = 0xff,
+};
+
+/**
+ * enum ieee802154_filtering_level - Filtering levels applicable to a PHY
+ *
+ * @IEEE802154_FILTERING_NONE: No filtering at all, what is received is
+ * forwarded to the softMAC
+ * @IEEE802154_FILTERING_1_FCS: First filtering level, frames with an invalid
+ * FCS should be dropped
+ * @IEEE802154_FILTERING_2_PROMISCUOUS: Second filtering level, promiscuous
+ * mode as described in the spec, identical in terms of filtering to the
+ * level one on PHY side, but at the MAC level the frame should be
+ * forwarded to the upper layer directly
+ * @IEEE802154_FILTERING_3_SCAN: Third filtering level, scan related, where
+ * only beacons must be processed, all remaining traffic gets dropped
+ * @IEEE802154_FILTERING_4_FRAME_FIELDS: Fourth filtering level actually
+ * enforcing the validity of the content of the frame with various checks
+ */
+enum ieee802154_filtering_level {
+ IEEE802154_FILTERING_NONE,
+ IEEE802154_FILTERING_1_FCS,
+ IEEE802154_FILTERING_2_PROMISCUOUS,
+ IEEE802154_FILTERING_3_SCAN,
+ IEEE802154_FILTERING_4_FRAME_FIELDS,
};
/* frame control handling */
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
index bf5c5f32c65e..10a1e81434cb 100644
--- a/include/linux/if_arp.h
+++ b/include/linux/if_arp.h
@@ -48,9 +48,15 @@ static inline bool dev_is_mac_header_xmit(const struct net_device *dev)
case ARPHRD_TUNNEL6:
case ARPHRD_SIT:
case ARPHRD_IPGRE:
+ case ARPHRD_IP6GRE:
case ARPHRD_VOID:
case ARPHRD_NONE:
case ARPHRD_RAWIP:
+ case ARPHRD_PIMREG:
+ /* PPP adds its l2 header automatically in ppp_start_xmit().
+ * This makes it look like an l3 device to __bpf_redirect() and tcf_mirred_init().
+ */
+ case ARPHRD_PPP:
return false;
default:
return true;
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 2cc35038a8ca..c5fe3b2a53e8 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -57,20 +57,26 @@ struct br_ip_list {
#define BR_MRP_AWARE BIT(17)
#define BR_MRP_LOST_CONT BIT(18)
#define BR_MRP_LOST_IN_CONT BIT(19)
+#define BR_TX_FWD_OFFLOAD BIT(20)
+#define BR_PORT_LOCKED BIT(21)
+#define BR_PORT_MAB BIT(22)
+#define BR_NEIGH_VLAN_SUPPRESS BIT(23)
#define BR_DEFAULT_AGEING_TIME (300 * HZ)
-extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
+struct net_bridge;
+void brioctl_set(int (*hook)(struct net *net, unsigned int cmd,
+ void __user *uarg));
+int br_ioctl_call(struct net *net, unsigned int cmd, void __user *uarg);
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
int br_multicast_list_adjacent(struct net_device *dev,
struct list_head *br_ip_list);
bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto);
bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto);
+bool br_multicast_has_router_adjacent(struct net_device *dev, int proto);
bool br_multicast_enabled(const struct net_device *dev);
bool br_multicast_router(const struct net_device *dev);
-int br_mdb_replay(struct net_device *br_dev, struct net_device *dev,
- struct notifier_block *nb, struct netlink_ext_ack *extack);
#else
static inline int br_multicast_list_adjacent(struct net_device *dev,
struct list_head *br_ip_list)
@@ -87,6 +93,13 @@ static inline bool br_multicast_has_querier_adjacent(struct net_device *dev,
{
return false;
}
+
+static inline bool br_multicast_has_router_adjacent(struct net_device *dev,
+ int proto)
+{
+ return true;
+}
+
static inline bool br_multicast_enabled(const struct net_device *dev)
{
return false;
@@ -95,13 +108,6 @@ static inline bool br_multicast_router(const struct net_device *dev)
{
return false;
}
-static inline int br_mdb_replay(struct net_device *br_dev,
- struct net_device *dev,
- struct notifier_block *nb,
- struct netlink_ext_ack *extack)
-{
- return -EOPNOTSUPP;
-}
#endif
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING)
@@ -111,8 +117,11 @@ int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid);
int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto);
int br_vlan_get_info(const struct net_device *dev, u16 vid,
struct bridge_vlan_info *p_vinfo);
-int br_vlan_replay(struct net_device *br_dev, struct net_device *dev,
- struct notifier_block *nb, struct netlink_ext_ack *extack);
+int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid,
+ struct bridge_vlan_info *p_vinfo);
+bool br_mst_enabled(const struct net_device *dev);
+int br_mst_get_info(const struct net_device *dev, u16 msti, unsigned long *vids);
+int br_mst_get_state(const struct net_device *dev, u16 msti, u8 *state);
#else
static inline bool br_vlan_enabled(const struct net_device *dev)
{
@@ -140,12 +149,26 @@ static inline int br_vlan_get_info(const struct net_device *dev, u16 vid,
return -EINVAL;
}
-static inline int br_vlan_replay(struct net_device *br_dev,
- struct net_device *dev,
- struct notifier_block *nb,
- struct netlink_ext_ack *extack)
+static inline int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid,
+ struct bridge_vlan_info *p_vinfo)
{
- return -EOPNOTSUPP;
+ return -EINVAL;
+}
+
+static inline bool br_mst_enabled(const struct net_device *dev)
+{
+ return false;
+}
+
+static inline int br_mst_get_info(const struct net_device *dev, u16 msti,
+ unsigned long *vids)
+{
+ return -EINVAL;
+}
+static inline int br_mst_get_state(const struct net_device *dev, u16 msti,
+ u8 *state)
+{
+ return -EINVAL;
}
#endif
@@ -156,9 +179,7 @@ struct net_device *br_fdb_find_port(const struct net_device *br_dev,
void br_fdb_clear_offload(const struct net_device *dev, u16 vid);
bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag);
u8 br_port_get_stp_state(const struct net_device *dev);
-clock_t br_get_ageing_time(struct net_device *br_dev);
-int br_fdb_replay(struct net_device *br_dev, struct net_device *dev,
- struct notifier_block *nb);
+clock_t br_get_ageing_time(const struct net_device *br_dev);
#else
static inline struct net_device *
br_fdb_find_port(const struct net_device *br_dev,
@@ -183,17 +204,10 @@ static inline u8 br_port_get_stp_state(const struct net_device *dev)
return BR_STATE_DISABLED;
}
-static inline clock_t br_get_ageing_time(struct net_device *br_dev)
+static inline clock_t br_get_ageing_time(const struct net_device *br_dev)
{
return 0;
}
-
-static inline int br_fdb_replay(struct net_device *br_dev,
- struct net_device *dev,
- struct notifier_block *nb)
-{
- return -EOPNOTSUPP;
-}
#endif
#endif
diff --git a/include/linux/if_eql.h b/include/linux/if_eql.h
index d984694c384d..07f9b660b741 100644
--- a/include/linux/if_eql.h
+++ b/include/linux/if_eql.h
@@ -21,11 +21,13 @@
#include <linux/timer.h>
#include <linux/spinlock.h>
+#include <net/net_trackers.h>
#include <uapi/linux/if_eql.h>
typedef struct slave {
struct list_head list;
struct net_device *dev;
+ netdevice_tracker dev_tracker;
long priority;
long priority_bps;
long priority_Bps;
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index 8a9792a6427a..61b7335aa037 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -19,6 +19,9 @@
#include <linux/skbuff.h>
#include <uapi/linux/if_ether.h>
+/* XX:XX:XX:XX:XX:XX */
+#define MAC_ADDR_STR_LEN (3 * ETH_ALEN - 1)
+
static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
{
return (struct ethhdr *)skb_mac_header(skb);
diff --git a/include/linux/if_hsr.h b/include/linux/if_hsr.h
index 38bbc537d4e4..f4cf2dd36d19 100644
--- a/include/linux/if_hsr.h
+++ b/include/linux/if_hsr.h
@@ -2,6 +2,10 @@
#ifndef _LINUX_IF_HSR_H_
#define _LINUX_IF_HSR_H_
+#include <linux/types.h>
+
+struct net_device;
+
/* used to differentiate various protocols */
enum hsr_version {
HSR_V0 = 0,
@@ -9,9 +13,38 @@ enum hsr_version {
PRP_V1,
};
+enum hsr_port_type {
+ HSR_PT_NONE = 0, /* Must be 0, used by framereg */
+ HSR_PT_SLAVE_A,
+ HSR_PT_SLAVE_B,
+ HSR_PT_INTERLINK,
+ HSR_PT_MASTER,
+ HSR_PT_PORTS, /* This must be the last item in the enum */
+};
+
+/* HSR Tag.
+ * As defined in IEC-62439-3:2010, the HSR tag is really { ethertype = 0x88FB,
+ * path, LSDU_size, sequence Nr }. But we let eth_header() create { h_dest,
+ * h_source, h_proto = 0x88FB }, and add { path, LSDU_size, sequence Nr,
+ * encapsulated protocol } instead.
+ *
+ * Field names as defined in the IEC:2010 standard for HSR.
+ */
+struct hsr_tag {
+ __be16 path_and_LSDU_size;
+ __be16 sequence_nr;
+ __be16 encap_proto;
+} __packed;
+
+#define HSR_HLEN 6
+
#if IS_ENABLED(CONFIG_HSR)
extern bool is_hsr_master(struct net_device *dev);
extern int hsr_get_version(struct net_device *dev, enum hsr_version *ver);
+struct net_device *hsr_get_port_ndev(struct net_device *ndev,
+ enum hsr_port_type pt);
+int hsr_get_port_type(struct net_device *hsr_dev, struct net_device *dev,
+ enum hsr_port_type *type);
#else
static inline bool is_hsr_master(struct net_device *dev)
{
@@ -22,6 +55,19 @@ static inline int hsr_get_version(struct net_device *dev,
{
return -EINVAL;
}
+
+static inline struct net_device *hsr_get_port_ndev(struct net_device *ndev,
+ enum hsr_port_type pt)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline int hsr_get_port_type(struct net_device *hsr_dev,
+ struct net_device *dev,
+ enum hsr_port_type *type)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_HSR */
#endif /*_LINUX_IF_HSR_H_*/
diff --git a/include/linux/if_ltalk.h b/include/linux/if_ltalk.h
deleted file mode 100644
index 4cc1c0b77870..000000000000
--- a/include/linux/if_ltalk.h
+++ /dev/null
@@ -1,8 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_LTALK_H
-#define __LINUX_LTALK_H
-
-#include <uapi/linux/if_ltalk.h>
-
-extern struct net_device *alloc_ltalkdev(int sizeof_priv);
-#endif
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index 10c94a3936ca..0f7281e3e448 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -21,6 +21,7 @@ struct macvlan_dev {
struct hlist_node hlist;
struct macvlan_port *port;
struct net_device *lowerdev;
+ netdevice_tracker dev_tracker;
void *accel_priv;
struct vlan_pcpu_stats __percpu *pcpu_stats;
@@ -45,10 +46,10 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
pcpu_stats = get_cpu_ptr(vlan->pcpu_stats);
u64_stats_update_begin(&pcpu_stats->syncp);
- pcpu_stats->rx_packets++;
- pcpu_stats->rx_bytes += len;
+ u64_stats_inc(&pcpu_stats->rx_packets);
+ u64_stats_add(&pcpu_stats->rx_bytes, len);
if (multicast)
- pcpu_stats->rx_multicast++;
+ u64_stats_inc(&pcpu_stats->rx_multicast);
u64_stats_update_end(&pcpu_stats->syncp);
put_cpu_ptr(vlan->pcpu_stats);
} else {
@@ -58,8 +59,10 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
extern void macvlan_common_setup(struct net_device *dev);
-extern int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
+struct rtnl_newlink_params;
+
+extern int macvlan_common_newlink(struct net_device *dev,
+ struct rtnl_newlink_params *params,
struct netlink_ext_ack *extack);
extern void macvlan_dellink(struct net_device *dev, struct list_head *head);
diff --git a/include/linux/if_pppol2tp.h b/include/linux/if_pppol2tp.h
index 96d40942e5a3..c87efd333faa 100644
--- a/include/linux/if_pppol2tp.h
+++ b/include/linux/if_pppol2tp.h
@@ -4,8 +4,6 @@
*
* This file supplies definitions required by the PPP over L2TP driver
* (l2tp_ppp.c). All version information wrt this file is located in l2tp_ppp.c
- *
- * License:
*/
#ifndef __LINUX_IF_PPPOL2TP_H
#define __LINUX_IF_PPPOL2TP_H
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index 69e813bcb947..db45d6f1c4f4 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -5,8 +5,6 @@
*
* This file supplies definitions required by the PPP over Ethernet driver
* (pppox.c). All version information wrt this file is located in pppox.c
- *
- * License:
*/
#ifndef __LINUX_IF_PPPOX_H
#define __LINUX_IF_PPPOX_H
@@ -45,7 +43,7 @@ struct pppox_sock {
/* struct sock must be the first member of pppox_sock */
struct sock sk;
struct ppp_channel chan;
- struct pppox_sock *next; /* for hash table */
+ struct pppox_sock __rcu *next; /* for hash table */
union {
struct pppoe_opt pppoe;
struct pptp_opt pptp;
diff --git a/include/linux/if_rmnet.h b/include/linux/if_rmnet.h
index 4efb537f57f3..c44bf6e80ecb 100644
--- a/include/linux/if_rmnet.h
+++ b/include/linux/if_rmnet.h
@@ -1,10 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0-only
- * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2019, 2021 The Linux Foundation. All rights reserved.
*/
#ifndef _LINUX_IF_RMNET_H_
#define _LINUX_IF_RMNET_H_
+#include <linux/types.h>
+
struct rmnet_map_header {
u8 flags; /* MAP_CMD_FLAG, MAP_PAD_LEN_MASK */
u8 mux_id;
@@ -12,10 +14,12 @@ struct rmnet_map_header {
} __aligned(1);
/* rmnet_map_header flags field:
- * PAD_LEN: number of pad bytes following packet data
- * CMD: 1 = packet contains a MAP command; 0 = packet contains data
+ * PAD_LEN: number of pad bytes following packet data
+ * CMD: 1 = packet contains a MAP command; 0 = packet contains data
+ * NEXT_HEADER: 1 = packet contains V5 CSUM header 0 = no V5 CSUM header
*/
#define MAP_PAD_LEN_MASK GENMASK(5, 0)
+#define MAP_NEXT_HEADER_FLAG BIT(6)
#define MAP_CMD_FLAG BIT(7)
struct rmnet_map_dl_csum_trailer {
@@ -23,7 +27,7 @@ struct rmnet_map_dl_csum_trailer {
u8 flags; /* MAP_CSUM_DL_VALID_FLAG */
__be16 csum_start_offset;
__be16 csum_length;
- __be16 csum_value;
+ __sum16 csum_value;
} __aligned(1);
/* rmnet_map_dl_csum_trailer flags field:
@@ -38,11 +42,33 @@ struct rmnet_map_ul_csum_header {
/* csum_info field:
* OFFSET: where (offset in bytes) to insert computed checksum
- * UDP: 1 = UDP checksum (zero checkum means no checksum)
+ * UDP: 1 = UDP checksum (zero checksum means no checksum)
* ENABLED: 1 = checksum computation requested
*/
#define MAP_CSUM_UL_OFFSET_MASK GENMASK(13, 0)
#define MAP_CSUM_UL_UDP_FLAG BIT(14)
#define MAP_CSUM_UL_ENABLED_FLAG BIT(15)
+/* MAP CSUM headers */
+struct rmnet_map_v5_csum_header {
+ u8 header_info;
+ u8 csum_info;
+ __be16 reserved;
+} __aligned(1);
+
+/* v5 header_info field
+ * NEXT_HEADER: represents whether there is any next header
+ * HEADER_TYPE: represents the type of this header
+ *
+ * csum_info field
+ * CSUM_VALID_OR_REQ:
+ * 1 = for UL, checksum computation is requested.
+ * 1 = for DL, validated the checksum and has found it valid
+ */
+
+#define MAPV5_HDRINFO_NXT_HDR_FLAG BIT(0)
+#define MAPV5_HDRINFO_HDR_TYPE_FMASK GENMASK(7, 1)
+#define MAPV5_CSUMINFO_VALID_FLAG BIT(7)
+
+#define RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD 2
#endif /* !(_LINUX_IF_RMNET_H_) */
diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h
index 915a187cfabd..553552fa635c 100644
--- a/include/linux/if_tap.h
+++ b/include/linux/if_tap.h
@@ -2,14 +2,18 @@
#ifndef _LINUX_IF_TAP_H_
#define _LINUX_IF_TAP_H_
+#include <net/sock.h>
+#include <linux/skb_array.h>
+
+struct file;
+struct socket;
+
#if IS_ENABLED(CONFIG_TAP)
struct socket *tap_get_socket(struct file *);
struct ptr_ring *tap_get_ptr_ring(struct file *file);
#else
#include <linux/err.h>
#include <linux/errno.h>
-struct file;
-struct socket;
static inline struct socket *tap_get_socket(struct file *f)
{
return ERR_PTR(-EINVAL);
@@ -20,9 +24,6 @@ static inline struct ptr_ring *tap_get_ptr_ring(struct file *f)
}
#endif /* CONFIG_TAP */
-#include <net/sock.h>
-#include <linux/skb_array.h>
-
/*
* Maximum times a tap device can be opened. This can be used to
* configure the number of receive queue, e.g. for multiqueue virtio.
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index add607943c95..ce97d891cf72 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -12,11 +12,11 @@
#include <uapi/linux/if_team.h>
struct team_pcpu_stats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 rx_multicast;
- u64 tx_packets;
- u64 tx_bytes;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_multicast;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
struct u64_stats_sync syncp;
u32 rx_dropped;
u32 tx_dropped;
@@ -162,8 +162,8 @@ struct team_option {
bool per_port;
unsigned int array_size; /* != 0 means the option is array */
enum team_option_type type;
- int (*init)(struct team *team, struct team_option_inst_info *info);
- int (*getter)(struct team *team, struct team_gsetter_ctx *ctx);
+ void (*init)(struct team *team, struct team_option_inst_info *info);
+ void (*getter)(struct team *team, struct team_gsetter_ctx *ctx);
int (*setter)(struct team *team, struct team_gsetter_ctx *ctx);
};
@@ -189,7 +189,7 @@ struct team {
struct net_device *dev; /* associated netdevice */
struct team_pcpu_stats __percpu *pcpu_stats;
- struct mutex lock; /* used for overall locking, e.g. port lists write */
+ const struct header_ops *header_ops_cache;
/*
* List of enabled ports and their count
@@ -208,6 +208,7 @@ struct team {
bool queue_override_enabled;
struct list_head *qom_lists; /* array of queue override mapping lists */
bool port_mtu_change_allowed;
+ bool notifier_ctx;
struct {
unsigned int count;
unsigned int interval; /* in ms */
@@ -220,7 +221,6 @@ struct team {
atomic_t count_pending;
struct delayed_work dw;
} mcast_rejoin;
- struct lock_class_key team_lock_key;
long mode_priv[TEAM_MODE_PRIV_LONGS];
};
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index 2a7660843444..80166eb62f41 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -19,52 +19,57 @@ struct tun_msg_ctl {
void *ptr;
};
-struct tun_xdp_hdr {
- int buflen;
- struct virtio_net_hdr gso;
-};
-
#if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE)
struct socket *tun_get_socket(struct file *);
struct ptr_ring *tun_get_tx_ring(struct file *file);
+
static inline bool tun_is_xdp_frame(void *ptr)
{
- return (unsigned long)ptr & TUN_XDP_FLAG;
+ return (unsigned long)ptr & TUN_XDP_FLAG;
}
+
static inline void *tun_xdp_to_ptr(struct xdp_frame *xdp)
{
- return (void *)((unsigned long)xdp | TUN_XDP_FLAG);
+ return (void *)((unsigned long)xdp | TUN_XDP_FLAG);
}
+
static inline struct xdp_frame *tun_ptr_to_xdp(void *ptr)
{
- return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG);
+ return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG);
}
+
void tun_ptr_free(void *ptr);
#else
#include <linux/err.h>
#include <linux/errno.h>
struct file;
struct socket;
+
static inline struct socket *tun_get_socket(struct file *f)
{
return ERR_PTR(-EINVAL);
}
+
static inline struct ptr_ring *tun_get_tx_ring(struct file *f)
{
return ERR_PTR(-EINVAL);
}
+
static inline bool tun_is_xdp_frame(void *ptr)
{
return false;
}
+
static inline void *tun_xdp_to_ptr(struct xdp_frame *xdp)
{
return NULL;
}
+
static inline struct xdp_frame *tun_ptr_to_xdp(void *ptr)
{
return NULL;
}
+
static inline void tun_ptr_free(void *ptr)
{
}
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 41a518336673..f7f34eb15e06 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -46,8 +46,10 @@ struct vlan_hdr {
* @h_vlan_encapsulated_proto: packet type ID or len
*/
struct vlan_ethhdr {
- unsigned char h_dest[ETH_ALEN];
- unsigned char h_source[ETH_ALEN];
+ struct_group(addrs,
+ unsigned char h_dest[ETH_ALEN];
+ unsigned char h_source[ETH_ALEN];
+ );
__be16 h_vlan_proto;
__be16 h_vlan_TCI;
__be16 h_vlan_encapsulated_proto;
@@ -60,6 +62,14 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
return (struct vlan_ethhdr *)skb_mac_header(skb);
}
+/* Prefer this version in TX path, instead of
+ * skb_reset_mac_header() + vlan_eth_hdr()
+ */
+static inline struct vlan_ethhdr *skb_vlan_eth_hdr(const struct sk_buff *skb)
+{
+ return (struct vlan_ethhdr *)skb->data;
+}
+
#define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */
#define VLAN_PRIO_SHIFT 13
#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator / Drop Eligible Indicator */
@@ -69,12 +79,7 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
/* found in socket.c */
extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
-static inline bool is_vlan_dev(const struct net_device *dev)
-{
- return dev->priv_flags & IFF_802_1Q_VLAN;
-}
-
-#define skb_vlan_tag_present(__skb) ((__skb)->vlan_present)
+#define skb_vlan_tag_present(__skb) (!!(__skb)->vlan_all)
#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci)
#define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
#define skb_vlan_tag_get_cfi(__skb) (!!((__skb)->vlan_tci & VLAN_CFI_MASK))
@@ -116,17 +121,17 @@ static inline void vlan_drop_rx_stag_filter_info(struct net_device *dev)
* @tx_dropped: number of tx drops
*/
struct vlan_pcpu_stats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 rx_multicast;
- u64 tx_packets;
- u64 tx_bytes;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_multicast;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
struct u64_stats_sync syncp;
u32 rx_errors;
u32 tx_dropped;
};
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev,
__be16 vlan_proto, u16 vlan_id);
@@ -162,9 +167,11 @@ struct netpoll;
* @vlan_id: VLAN identifier
* @flags: device flags
* @real_dev: underlying netdevice
+ * @dev_tracker: refcount tracker for @real_dev reference
* @real_dev_addr: address of underlying netdevice
* @dent: proc dir entry
* @vlan_pcpu_stats: ptr to percpu rx stats
+ * @netpoll: netpoll instance "propagated" down to @real_dev
*/
struct vlan_dev_priv {
unsigned int nr_ingress_mappings;
@@ -177,6 +184,8 @@ struct vlan_dev_priv {
u16 flags;
struct net_device *real_dev;
+ netdevice_tracker dev_tracker;
+
unsigned char real_dev_addr[ETH_ALEN];
struct proc_dir_entry *dent;
@@ -186,6 +195,11 @@ struct vlan_dev_priv {
#endif
};
+static inline bool is_vlan_dev(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_802_1Q_VLAN;
+}
+
static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
{
return netdev_priv(dev);
@@ -223,6 +237,11 @@ extern void vlan_vids_del_by_dev(struct net_device *dev,
extern bool vlan_uses_dev(const struct net_device *dev);
#else
+static inline bool is_vlan_dev(const struct net_device *dev)
+{
+ return false;
+}
+
static inline struct net_device *
__vlan_find_dev_deep_rcu(struct net_device *real_dev,
__be16 vlan_proto, u16 vlan_id)
@@ -240,19 +259,19 @@ vlan_for_each(struct net_device *dev,
static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev)
{
- BUG();
+ WARN_ON_ONCE(1);
return NULL;
}
static inline u16 vlan_dev_vlan_id(const struct net_device *dev)
{
- BUG();
+ WARN_ON_ONCE(1);
return 0;
}
static inline __be16 vlan_dev_vlan_proto(const struct net_device *dev)
{
- BUG();
+ WARN_ON_ONCE(1);
return 0;
}
@@ -297,7 +316,7 @@ static inline bool vlan_uses_dev(const struct net_device *dev)
* eth_type_vlan - check for valid vlan ether type.
* @ethertype: ether type to check
*
- * Returns true if the ether type is a vlan ether type.
+ * Returns: true if the ether type is a vlan ether type.
*/
static inline bool eth_type_vlan(__be16 ethertype)
{
@@ -328,25 +347,27 @@ static inline bool vlan_hw_offload_capable(netdev_features_t features,
* @mac_len: MAC header length including outer vlan headers
*
* Inserts the VLAN tag into @skb as part of the payload at offset mac_len
- * Returns error if skb_cow_head fails.
- *
* Does not change skb->protocol so this function can be used during receive.
+ *
+ * Returns: error if skb_cow_head fails.
*/
static inline int __vlan_insert_inner_tag(struct sk_buff *skb,
__be16 vlan_proto, u16 vlan_tci,
unsigned int mac_len)
{
+ const u8 meta_len = mac_len > ETH_TLEN ? skb_metadata_len(skb) : 0;
struct vlan_ethhdr *veth;
- if (skb_cow_head(skb, VLAN_HLEN) < 0)
+ if (skb_cow_head(skb, meta_len + VLAN_HLEN) < 0)
return -ENOMEM;
skb_push(skb, VLAN_HLEN);
/* Move the mac header sans proto to the beginning of the new header. */
if (likely(mac_len > ETH_TLEN))
- memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN);
- skb->mac_header -= VLAN_HLEN;
+ skb_postpush_data_move(skb, VLAN_HLEN, mac_len - ETH_TLEN);
+ if (skb_mac_header_was_set(skb))
+ skb->mac_header -= VLAN_HLEN;
veth = (struct vlan_ethhdr *)(skb->data + mac_len - ETH_HLEN);
@@ -376,9 +397,9 @@ static inline int __vlan_insert_inner_tag(struct sk_buff *skb,
* @vlan_tci: VLAN TCI to insert
*
* Inserts the VLAN tag into @skb as part of the payload
- * Returns error if skb_cow_head fails.
- *
* Does not change skb->protocol so this function can be used during receive.
+ *
+ * Returns: error if skb_cow_head fails.
*/
static inline int __vlan_insert_tag(struct sk_buff *skb,
__be16 vlan_proto, u16 vlan_tci)
@@ -394,12 +415,14 @@ static inline int __vlan_insert_tag(struct sk_buff *skb,
* @mac_len: MAC header length including outer vlan headers
*
* Inserts the VLAN tag into @skb as part of the payload at offset mac_len
- * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
+ * Returns a VLAN tagged skb. This might change skb->head.
*
* Following the skb_unshare() example, in case of error, the calling function
* doesn't have to worry about freeing the original skb.
*
* Does not change skb->protocol so this function can be used during receive.
+ *
+ * Return: modified @skb on success, NULL on error (@skb is freed).
*/
static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb,
__be16 vlan_proto,
@@ -423,12 +446,14 @@ static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb,
* @vlan_tci: VLAN TCI to insert
*
* Inserts the VLAN tag into @skb as part of the payload
- * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
+ * Returns a VLAN tagged skb. This might change skb->head.
*
* Following the skb_unshare() example, in case of error, the calling function
* doesn't have to worry about freeing the original skb.
*
* Does not change skb->protocol so this function can be used during receive.
+ *
+ * Return: modified @skb on success, NULL on error (@skb is freed).
*/
static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
__be16 vlan_proto, u16 vlan_tci)
@@ -443,10 +468,12 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
* @vlan_tci: VLAN TCI to insert
*
* Inserts the VLAN tag into @skb as part of the payload
- * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
+ * Returns a VLAN tagged skb. This might change skb->head.
*
* Following the skb_unshare() example, in case of error, the calling function
* doesn't have to worry about freeing the original skb.
+ *
+ * Return: modified @skb on success, NULL on error (@skb is freed).
*/
static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
__be16 vlan_proto,
@@ -466,7 +493,7 @@ static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
*/
static inline void __vlan_hwaccel_clear_tag(struct sk_buff *skb)
{
- skb->vlan_present = 0;
+ skb->vlan_all = 0;
}
/**
@@ -478,9 +505,7 @@ static inline void __vlan_hwaccel_clear_tag(struct sk_buff *skb)
*/
static inline void __vlan_hwaccel_copy_tag(struct sk_buff *dst, const struct sk_buff *src)
{
- dst->vlan_present = src->vlan_present;
- dst->vlan_proto = src->vlan_proto;
- dst->vlan_tci = src->vlan_tci;
+ dst->vlan_all = src->vlan_all;
}
/*
@@ -514,7 +539,6 @@ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
{
skb->vlan_proto = vlan_proto;
skb->vlan_tci = vlan_tci;
- skb->vlan_present = 1;
}
/**
@@ -522,14 +546,14 @@ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
* @skb: skbuff to query
* @vlan_tci: buffer to store value
*
- * Returns error if the skb is not of VLAN type
+ * Returns: error if the skb is not of VLAN type
*/
static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
{
- struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data;
+ struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
if (!eth_type_vlan(veth->h_vlan_proto))
- return -EINVAL;
+ return -ENODATA;
*vlan_tci = ntohs(veth->h_vlan_TCI);
return 0;
@@ -540,7 +564,7 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
* @skb: skbuff to query
* @vlan_tci: buffer to store value
*
- * Returns error if @skb->vlan_tci is not set correctly
+ * Returns: error if @skb->vlan_tci is not set correctly
*/
static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
u16 *vlan_tci)
@@ -550,7 +574,7 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
return 0;
} else {
*vlan_tci = 0;
- return -EINVAL;
+ return -ENODATA;
}
}
@@ -559,7 +583,7 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
* @skb: skbuff to query
* @vlan_tci: buffer to store value
*
- * Returns error if the skb is not VLAN tagged
+ * Returns: error if the skb is not VLAN tagged
*/
static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
{
@@ -571,16 +595,19 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
}
/**
- * vlan_get_protocol - get protocol EtherType.
+ * __vlan_get_protocol_offset() - get protocol EtherType.
* @skb: skbuff to query
* @type: first vlan protocol
+ * @mac_offset: MAC offset
* @depth: buffer to store length of eth and vlan tags in bytes
*
- * Returns the EtherType of the packet, regardless of whether it is
+ * Returns: the EtherType of the packet, regardless of whether it is
* vlan encapsulated (normal or hardware accelerated) or not.
*/
-static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
- int *depth)
+static inline __be16 __vlan_get_protocol_offset(const struct sk_buff *skb,
+ __be16 type,
+ int mac_offset,
+ int *depth)
{
unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH;
@@ -599,7 +626,8 @@ static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
do {
struct vlan_hdr vhdr, *vh;
- vh = skb_header_pointer(skb, vlan_depth, sizeof(vhdr), &vhdr);
+ vh = skb_header_pointer(skb, mac_offset + vlan_depth,
+ sizeof(vhdr), &vhdr);
if (unlikely(!vh || !--parse_depth))
return 0;
@@ -614,11 +642,17 @@ static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
return type;
}
+static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
+ int *depth)
+{
+ return __vlan_get_protocol_offset(skb, type, 0, depth);
+}
+
/**
* vlan_get_protocol - get protocol EtherType.
* @skb: skbuff to query
*
- * Returns the EtherType of the packet, regardless of whether it is
+ * Returns: the EtherType of the packet, regardless of whether it is
* vlan encapsulated (normal or hardware accelerated) or not.
*/
static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
@@ -626,6 +660,23 @@ static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
return __vlan_get_protocol(skb, skb->protocol, NULL);
}
+/* This version of __vlan_get_protocol() also pulls mac header in skb->head */
+static inline __be16 vlan_get_protocol_and_depth(struct sk_buff *skb,
+ __be16 type, int *depth)
+{
+ int maclen;
+
+ type = __vlan_get_protocol(skb, type, &maclen);
+
+ if (type) {
+ if (!pskb_may_pull(skb, maclen))
+ type = 0;
+ else if (depth)
+ *depth = maclen;
+ }
+ return type;
+}
+
/* A getter for the SKB protocol field which will handle VLAN tags consistently
* whether VLAN acceleration is enabled or not.
*/
@@ -675,10 +726,29 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
}
/**
+ * vlan_remove_tag - remove outer VLAN tag from payload
+ * @skb: skbuff to remove tag from
+ * @vlan_tci: buffer to store value
+ *
+ * Expects the skb to contain a VLAN tag in the payload, and to have skb->data
+ * pointing at the MAC header.
+ */
+static inline void vlan_remove_tag(struct sk_buff *skb, u16 *vlan_tci)
+{
+ struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
+
+ *vlan_tci = ntohs(vhdr->h_vlan_TCI);
+
+ vlan_set_encap_proto(skb, vhdr);
+ __skb_pull(skb, VLAN_HLEN);
+ skb_postpull_data_move(skb, VLAN_HLEN, 2 * ETH_ALEN);
+}
+
+/**
* skb_vlan_tagged - check if skb is vlan tagged.
* @skb: skbuff to query
*
- * Returns true if the skb is tagged, regardless of whether it is hardware
+ * Returns: true if the skb is tagged, regardless of whether it is hardware
* accelerated or not.
*/
static inline bool skb_vlan_tagged(const struct sk_buff *skb)
@@ -694,7 +764,7 @@ static inline bool skb_vlan_tagged(const struct sk_buff *skb)
* skb_vlan_tagged_multi - check if skb is vlan tagged with multiple headers.
* @skb: skbuff to query
*
- * Returns true if the skb is tagged with multiple vlan headers, regardless
+ * Returns: true if the skb is tagged with multiple vlan headers, regardless
* of whether it is hardware accelerated or not.
*/
static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
@@ -710,7 +780,7 @@ static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
return false;
- veh = (struct vlan_ethhdr *)skb->data;
+ veh = skb_vlan_eth_hdr(skb);
protocol = veh->h_vlan_encapsulated_proto;
}
@@ -725,7 +795,7 @@ static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
* @skb: skbuff to query
* @features: features to be checked
*
- * Returns features without unsafe ones if the skb has multiple tags.
+ * Returns: features without unsafe ones if the skb has multiple tags.
*/
static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
netdev_features_t features)
@@ -749,9 +819,11 @@ static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
* @h1: Pointer to vlan header
* @h2: Pointer to vlan header
*
- * Compare two vlan headers, returns 0 if equal.
+ * Compare two vlan headers.
*
* Please note that alignment of h1 & h2 are only guaranteed to be 16 bits.
+ *
+ * Return: 0 if equal, arbitrary non-zero value if not equal.
*/
static inline unsigned long compare_vlan_header(const struct vlan_hdr *h1,
const struct vlan_hdr *h2)
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 64ce8cd1cfaf..073b30a9b850 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -15,6 +15,7 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/refcount.h>
+#include <linux/sockptr.h>
#include <uapi/linux/igmp.h>
static inline struct igmphdr *igmp_hdr(const struct sk_buff *skb)
@@ -38,12 +39,9 @@ struct ip_sf_socklist {
unsigned int sl_max;
unsigned int sl_count;
struct rcu_head rcu;
- __be32 sl_addr[];
+ __be32 sl_addr[] __counted_by(sl_max);
};
-#define IP_SFLSIZE(count) (sizeof(struct ip_sf_socklist) + \
- (count) * sizeof(__be32))
-
#define IP_SFBLOCK 10 /* allocate this many at once */
/* ip_mc_socklist is real list now. Speed is not argument;
@@ -89,6 +87,8 @@ struct ip_mc_list {
char loaded;
unsigned char gsquery; /* check source marks? */
unsigned char crcount;
+ unsigned long mca_cstamp;
+ unsigned long mca_tstamp;
struct rcu_head rcu;
};
@@ -121,10 +121,10 @@ extern int ip_mc_source(int add, int omode, struct sock *sk,
struct ip_mreq_source *mreqs, int ifindex);
extern int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf,int ifindex);
extern int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
- struct ip_msfilter __user *optval, int __user *optlen);
+ sockptr_t optval, sockptr_t optlen);
extern int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
- struct sockaddr_storage __user *p);
-extern int ip_mc_sf_allow(struct sock *sk, __be32 local, __be32 rmt,
+ sockptr_t optval, size_t offset);
+extern int ip_mc_sf_allow(const struct sock *sk, __be32 local, __be32 rmt,
int dif, int sdif);
extern void ip_mc_init_dev(struct in_device *);
extern void ip_mc_destroy_dev(struct in_device *);
diff --git a/include/linux/iio/adc-helpers.h b/include/linux/iio/adc-helpers.h
new file mode 100644
index 000000000000..56b092a2a4c4
--- /dev/null
+++ b/include/linux/iio/adc-helpers.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/*
+ * The industrial I/O ADC firmware property parsing helpers
+ *
+ * Copyright (c) 2025 Matti Vaittinen <mazziesaccount@gmail.com>
+ */
+
+#ifndef _INDUSTRIAL_IO_ADC_HELPERS_H_
+#define _INDUSTRIAL_IO_ADC_HELPERS_H_
+
+#include <linux/property.h>
+
+struct device;
+struct iio_chan_spec;
+
+static inline int iio_adc_device_num_channels(struct device *dev)
+{
+ return device_get_named_child_node_count(dev, "channel");
+}
+
+int devm_iio_adc_device_alloc_chaninfo_se(struct device *dev,
+ const struct iio_chan_spec *template,
+ int max_chan_id,
+ struct iio_chan_spec **cs);
+
+#endif /* _INDUSTRIAL_IO_ADC_HELPERS_H_ */
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
index 7199280d89ca..6e70a412e218 100644
--- a/include/linux/iio/adc/ad_sigma_delta.h
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -8,6 +8,8 @@
#ifndef __AD_SIGMA_DELTA_H__
#define __AD_SIGMA_DELTA_H__
+#include <linux/iio/iio.h>
+
enum ad_sigma_delta_mode {
AD_SD_MODE_CONTINUOUS = 0,
AD_SD_MODE_SINGLE = 1,
@@ -26,31 +28,57 @@ struct ad_sd_calib_data {
};
struct ad_sigma_delta;
+struct device;
+struct gpio_desc;
struct iio_dev;
+struct spi_offload;
+struct spi_offload_trigger;
/**
* struct ad_sigma_delta_info - Sigma Delta driver specific callbacks and options
* @set_channel: Will be called to select the current channel, may be NULL.
+ * @append_status: Will be called to enable status append at the end of the sample, may be NULL.
* @set_mode: Will be called to select the current mode, may be NULL.
+ * @disable_all: Will be called to disable all channels, may be NULL.
+ * @disable_one: Will be called to disable a single channel after
+ * ad_sigma_delta_single_conversion(), may be NULL.
+ * Usage of this callback expects iio_chan_spec.address to contain
+ * the value required for the driver to identify the channel.
* @postprocess_sample: Is called for each sampled data word, can be used to
* modify or drop the sample data, it, may be NULL.
* @has_registers: true if the device has writable and readable registers, false
* if there is just one read-only sample data shift register.
+ * @has_named_irqs: Set to true if there is more than one IRQ line.
+ * @supports_spi_offload: Set to true if the driver supports SPI offload. Often
+ * special considerations are needed for scan_type and other channel
+ * info, so individual drivers have to set this to let the core
+ * code know that it can use SPI offload if it is available.
* @addr_shift: Shift of the register address in the communications register.
* @read_mask: Mask for the communications register having the read bit set.
+ * @status_ch_mask: Mask for the channel number stored in status register.
* @data_reg: Address of the data register, if 0 the default address of 0x3 will
* be used.
* @irq_flags: flags for the interrupt used by the triggered buffer
+ * @num_slots: Number of sequencer slots
+ * @num_resetclks: Number of SPI clk cycles with MOSI=1 to reset the chip.
*/
struct ad_sigma_delta_info {
int (*set_channel)(struct ad_sigma_delta *, unsigned int channel);
+ int (*append_status)(struct ad_sigma_delta *, bool append);
int (*set_mode)(struct ad_sigma_delta *, enum ad_sigma_delta_mode mode);
+ int (*disable_all)(struct ad_sigma_delta *);
+ int (*disable_one)(struct ad_sigma_delta *, unsigned int chan);
int (*postprocess_sample)(struct ad_sigma_delta *, unsigned int raw_sample);
bool has_registers;
+ bool has_named_irqs;
+ bool supports_spi_offload;
unsigned int addr_shift;
unsigned int read_mask;
+ unsigned int status_ch_mask;
unsigned int data_reg;
unsigned long irq_flags;
+ unsigned int num_slots;
+ unsigned int num_resetclks;
};
/**
@@ -67,14 +95,28 @@ struct ad_sigma_delta {
/* private: */
struct completion completion;
+ spinlock_t irq_lock; /* protects .irq_dis and irq en/disable state */
bool irq_dis;
bool bus_locked;
bool keep_cs_asserted;
- uint8_t comm;
+ u8 comm;
const struct ad_sigma_delta_info *info;
+ unsigned int active_slots;
+ unsigned int current_slot;
+ unsigned int num_slots;
+ struct gpio_desc *rdy_gpiod;
+ int irq_line;
+ bool status_appended;
+ /* map slots to channels in order to know what to expect from devices */
+ unsigned int *slots;
+ struct spi_message sample_msg;
+ struct spi_transfer sample_xfer[2];
+ u8 *samples_buf;
+ struct spi_offload *offload;
+ struct spi_offload_trigger *offload_trigger;
/*
* DMA (thus cache coherency maintenance) requires the
@@ -83,10 +125,16 @@ struct ad_sigma_delta {
* 'rx_buf' is up to 32 bits per sample + 64 bit timestamp,
* rounded to 16 bytes to take into account padding.
*/
- uint8_t tx_buf[4] ____cacheline_aligned;
- uint8_t rx_buf[16] __aligned(8);
+ u8 tx_buf[4] __aligned(IIO_DMA_MINALIGN);
+ u8 rx_buf[16] __aligned(8);
+ u8 sample_addr;
};
+static inline bool ad_sigma_delta_has_spi_offload(struct ad_sigma_delta *sd)
+{
+ return sd->offload != NULL;
+}
+
static inline int ad_sigma_delta_set_channel(struct ad_sigma_delta *sd,
unsigned int channel)
{
@@ -96,6 +144,38 @@ static inline int ad_sigma_delta_set_channel(struct ad_sigma_delta *sd,
return 0;
}
+static inline int ad_sigma_delta_append_status(struct ad_sigma_delta *sd, bool append)
+{
+ int ret;
+
+ if (sd->info->append_status) {
+ ret = sd->info->append_status(sd, append);
+ if (ret < 0)
+ return ret;
+
+ sd->status_appended = append;
+ }
+
+ return 0;
+}
+
+static inline int ad_sigma_delta_disable_all(struct ad_sigma_delta *sd)
+{
+ if (sd->info->disable_all)
+ return sd->info->disable_all(sd);
+
+ return 0;
+}
+
+static inline int ad_sigma_delta_disable_one(struct ad_sigma_delta *sd,
+ unsigned int chan)
+{
+ if (sd->info->disable_one)
+ return sd->info->disable_one(sd, chan);
+
+ return 0;
+}
+
static inline int ad_sigma_delta_set_mode(struct ad_sigma_delta *sd,
unsigned int mode)
{
@@ -114,14 +194,13 @@ static inline int ad_sigma_delta_postprocess_sample(struct ad_sigma_delta *sd,
return 0;
}
-void ad_sd_set_comm(struct ad_sigma_delta *sigma_delta, uint8_t comm);
+void ad_sd_set_comm(struct ad_sigma_delta *sigma_delta, u8 comm);
int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
unsigned int size, unsigned int val);
int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
unsigned int size, unsigned int *val);
-int ad_sd_reset(struct ad_sigma_delta *sigma_delta,
- unsigned int reset_length);
+int ad_sd_reset(struct ad_sigma_delta *sigma_delta);
int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, int *val);
@@ -132,8 +211,7 @@ int ad_sd_calibrate_all(struct ad_sigma_delta *sigma_delta,
int ad_sd_init(struct ad_sigma_delta *sigma_delta, struct iio_dev *indio_dev,
struct spi_device *spi, const struct ad_sigma_delta_info *info);
-int ad_sd_setup_buffer_and_trigger(struct iio_dev *indio_dev);
-void ad_sd_cleanup_buffer_and_trigger(struct iio_dev *indio_dev);
+int devm_ad_sd_setup_buffer_and_trigger(struct device *dev, struct iio_dev *indio_dev);
int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig);
diff --git a/include/linux/iio/adc/adi-axi-adc.h b/include/linux/iio/adc/adi-axi-adc.h
deleted file mode 100644
index 52620e5b8052..000000000000
--- a/include/linux/iio/adc/adi-axi-adc.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Analog Devices Generic AXI ADC IP core driver/library
- * Link: https://wiki.analog.com/resources/fpga/docs/axi_adc_ip
- *
- * Copyright 2012-2020 Analog Devices Inc.
- */
-#ifndef __ADI_AXI_ADC_H__
-#define __ADI_AXI_ADC_H__
-
-struct device;
-struct iio_chan_spec;
-
-/**
- * struct adi_axi_adc_chip_info - Chip specific information
- * @name Chip name
- * @id Chip ID (usually product ID)
- * @channels Channel specifications of type @struct iio_chan_spec
- * @num_channels Number of @channels
- * @scale_table Supported scales by the chip; tuples of 2 ints
- * @num_scales Number of scales in the table
- * @max_rate Maximum sampling rate supported by the device
- */
-struct adi_axi_adc_chip_info {
- const char *name;
- unsigned int id;
-
- const struct iio_chan_spec *channels;
- unsigned int num_channels;
-
- const unsigned int (*scale_table)[2];
- int num_scales;
-
- unsigned long max_rate;
-};
-
-/**
- * struct adi_axi_adc_conv - data of the ADC attached to the AXI ADC
- * @chip_info chip info details for the client ADC
- * @preenable_setup op to run in the client before enabling the AXI ADC
- * @reg_access IIO debugfs_reg_access hook for the client ADC
- * @read_raw IIO read_raw hook for the client ADC
- * @write_raw IIO write_raw hook for the client ADC
- */
-struct adi_axi_adc_conv {
- const struct adi_axi_adc_chip_info *chip_info;
-
- int (*preenable_setup)(struct adi_axi_adc_conv *conv);
- int (*reg_access)(struct adi_axi_adc_conv *conv, unsigned int reg,
- unsigned int writeval, unsigned int *readval);
- int (*read_raw)(struct adi_axi_adc_conv *conv,
- struct iio_chan_spec const *chan,
- int *val, int *val2, long mask);
- int (*write_raw)(struct adi_axi_adc_conv *conv,
- struct iio_chan_spec const *chan,
- int val, int val2, long mask);
-};
-
-struct adi_axi_adc_conv *devm_adi_axi_adc_conv_register(struct device *dev,
- size_t sizeof_priv);
-
-void *adi_axi_adc_conv_priv(struct adi_axi_adc_conv *conv);
-
-#endif
diff --git a/include/linux/iio/adc/qcom-vadc-common.h b/include/linux/iio/adc/qcom-vadc-common.h
index 33f60f43e1aa..3bf4c49726a7 100644
--- a/include/linux/iio/adc/qcom-vadc-common.h
+++ b/include/linux/iio/adc/qcom-vadc-common.h
@@ -6,6 +6,7 @@
#ifndef QCOM_VADC_COMMON_H
#define QCOM_VADC_COMMON_H
+#include <linux/math.h>
#include <linux/types.h>
#define VADC_CONV_TIME_MIN_US 2000
@@ -80,39 +81,29 @@ struct vadc_linear_graph {
};
/**
- * struct vadc_prescale_ratio - Represent scaling ratio for ADC input.
- * @num: the inverse numerator of the gain applied to the input channel.
- * @den: the inverse denominator of the gain applied to the input channel.
- */
-struct vadc_prescale_ratio {
- u32 num;
- u32 den;
-};
-
-/**
* enum vadc_scale_fn_type - Scaling function to convert ADC code to
* physical scaled units for the channel.
- * SCALE_DEFAULT: Default scaling to convert raw adc code to voltage (uV).
- * SCALE_THERM_100K_PULLUP: Returns temperature in millidegC.
+ * @SCALE_DEFAULT: Default scaling to convert raw adc code to voltage (uV).
+ * @SCALE_THERM_100K_PULLUP: Returns temperature in millidegC.
* Uses a mapping table with 100K pullup.
- * SCALE_PMIC_THERM: Returns result in milli degree's Centigrade.
- * SCALE_XOTHERM: Returns XO thermistor voltage in millidegC.
- * SCALE_PMI_CHG_TEMP: Conversion for PMI CHG temp
- * SCALE_HW_CALIB_DEFAULT: Default scaling to convert raw adc code to
+ * @SCALE_PMIC_THERM: Returns result in milli degree's Centigrade.
+ * @SCALE_XOTHERM: Returns XO thermistor voltage in millidegC.
+ * @SCALE_PMI_CHG_TEMP: Conversion for PMI CHG temp
+ * @SCALE_HW_CALIB_DEFAULT: Default scaling to convert raw adc code to
* voltage (uV) with hardware applied offset/slope values to adc code.
- * SCALE_HW_CALIB_THERM_100K_PULLUP: Returns temperature in millidegC using
+ * @SCALE_HW_CALIB_THERM_100K_PULLUP: Returns temperature in millidegC using
* lookup table. The hardware applies offset/slope to adc code.
- * SCALE_HW_CALIB_XOTHERM: Returns XO thermistor voltage in millidegC using
+ * @SCALE_HW_CALIB_XOTHERM: Returns XO thermistor voltage in millidegC using
* 100k pullup. The hardware applies offset/slope to adc code.
- * SCALE_HW_CALIB_THERM_100K_PU_PM7: Returns temperature in millidegC using
+ * @SCALE_HW_CALIB_THERM_100K_PU_PM7: Returns temperature in millidegC using
* lookup table for PMIC7. The hardware applies offset/slope to adc code.
- * SCALE_HW_CALIB_PMIC_THERM: Returns result in milli degree's Centigrade.
+ * @SCALE_HW_CALIB_PMIC_THERM: Returns result in milli degree's Centigrade.
* The hardware applies offset/slope to adc code.
- * SCALE_HW_CALIB_PMIC_THERM: Returns result in milli degree's Centigrade.
+ * @SCALE_HW_CALIB_PMIC_THERM: Returns result in milli degree's Centigrade.
* The hardware applies offset/slope to adc code. This is for PMIC7.
- * SCALE_HW_CALIB_PM5_CHG_TEMP: Returns result in millidegrees for PMIC5
+ * @SCALE_HW_CALIB_PM5_CHG_TEMP: Returns result in millidegrees for PMIC5
* charger temperature.
- * SCALE_HW_CALIB_PM5_SMB_TEMP: Returns result in millidegrees for PMIC5
+ * @SCALE_HW_CALIB_PM5_SMB_TEMP: Returns result in millidegrees for PMIC5
* SMB1390 temperature.
*/
enum vadc_scale_fn_type {
@@ -129,6 +120,7 @@ enum vadc_scale_fn_type {
SCALE_HW_CALIB_PMIC_THERM_PM7,
SCALE_HW_CALIB_PM5_CHG_TEMP,
SCALE_HW_CALIB_PM5_SMB_TEMP,
+ /* private: */
SCALE_HW_CALIB_INVALID,
};
@@ -144,12 +136,12 @@ struct adc5_data {
int qcom_vadc_scale(enum vadc_scale_fn_type scaletype,
const struct vadc_linear_graph *calib_graph,
- const struct vadc_prescale_ratio *prescale,
+ const struct u32_fract *prescale,
bool absolute,
u16 adc_code, int *result_mdec);
struct qcom_adc5_scale_type {
- int (*scale_fn)(const struct vadc_prescale_ratio *prescale,
+ int (*scale_fn)(const struct u32_fract *prescale,
const struct adc5_data *data, u16 adc_code, int *result);
};
@@ -161,6 +153,8 @@ int qcom_adc5_hw_scale(enum vadc_scale_fn_type scaletype,
u16 qcom_adc_tm5_temp_volt_scale(unsigned int prescale_ratio,
u32 full_scale_code_volt, int temp);
+u16 qcom_adc_tm5_gen2_temp_res_scale(int temp);
+
int qcom_adc5_prescaling_from_dt(u32 num, u32 den);
int qcom_adc5_hw_settle_time_from_dt(u32 value, const unsigned int *hw_settle);
diff --git a/include/linux/iio/afe/rescale.h b/include/linux/iio/afe/rescale.h
new file mode 100644
index 000000000000..6eecb435488f
--- /dev/null
+++ b/include/linux/iio/afe/rescale.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2018 Axentia Technologies AB
+ */
+
+#ifndef __IIO_RESCALE_H__
+#define __IIO_RESCALE_H__
+
+#include <linux/types.h>
+#include <linux/iio/iio.h>
+
+struct device;
+struct rescale;
+
+struct rescale_cfg {
+ enum iio_chan_type type;
+ int (*props)(struct device *dev, struct rescale *rescale);
+};
+
+struct rescale {
+ const struct rescale_cfg *cfg;
+ struct iio_channel *source;
+ struct iio_chan_spec chan;
+ struct iio_chan_spec_ext_info *ext_info;
+ bool chan_processed;
+ s32 numerator;
+ s32 denominator;
+ s32 offset;
+};
+
+int rescale_process_scale(struct rescale *rescale, int scale_type,
+ int *val, int *val2);
+int rescale_process_offset(struct rescale *rescale, int scale_type,
+ int scale, int scale2, int schan_off,
+ int *val, int *val2);
+#endif /* __IIO_RESCALE_H__ */
diff --git a/include/linux/iio/backend.h b/include/linux/iio/backend.h
new file mode 100644
index 000000000000..7f815f3fed6a
--- /dev/null
+++ b/include/linux/iio/backend.h
@@ -0,0 +1,270 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _IIO_BACKEND_H_
+#define _IIO_BACKEND_H_
+
+#include <linux/types.h>
+#include <linux/iio/iio.h>
+
+struct iio_chan_spec;
+struct fwnode_handle;
+struct iio_backend;
+struct device;
+struct iio_dev;
+
+enum iio_backend_data_type {
+ IIO_BACKEND_TWOS_COMPLEMENT,
+ IIO_BACKEND_OFFSET_BINARY,
+ IIO_BACKEND_DATA_UNSIGNED,
+ IIO_BACKEND_DATA_TYPE_MAX
+};
+
+enum iio_backend_data_source {
+ IIO_BACKEND_INTERNAL_CONTINUOUS_WAVE,
+ IIO_BACKEND_EXTERNAL,
+ IIO_BACKEND_INTERNAL_RAMP_16BIT,
+ IIO_BACKEND_DATA_SOURCE_MAX
+};
+
+#define iio_backend_debugfs_ptr(ptr) PTR_IF(IS_ENABLED(CONFIG_DEBUG_FS), ptr)
+
+/**
+ * IIO_BACKEND_EX_INFO - Helper for an IIO extended channel attribute
+ * @_name: Attribute name
+ * @_shared: Whether the attribute is shared between all channels
+ * @_what: Data private to the driver
+ */
+#define IIO_BACKEND_EX_INFO(_name, _shared, _what) { \
+ .name = (_name), \
+ .shared = (_shared), \
+ .read = iio_backend_ext_info_get, \
+ .write = iio_backend_ext_info_set, \
+ .private = (_what), \
+}
+
+/**
+ * struct iio_backend_data_fmt - Backend data format
+ * @type: Data type.
+ * @sign_extend: Bool to tell if the data is sign extended.
+ * @enable: Enable/Disable the data format module. If disabled,
+ * not formatting will happen.
+ */
+struct iio_backend_data_fmt {
+ enum iio_backend_data_type type;
+ bool sign_extend;
+ bool enable;
+};
+
+/* vendor specific from 32 */
+enum iio_backend_test_pattern {
+ IIO_BACKEND_NO_TEST_PATTERN,
+ /* modified prbs9 */
+ IIO_BACKEND_ADI_PRBS_9A = 32,
+ /* modified prbs23 */
+ IIO_BACKEND_ADI_PRBS_23A,
+ IIO_BACKEND_TEST_PATTERN_MAX
+};
+
+enum iio_backend_sample_trigger {
+ IIO_BACKEND_SAMPLE_TRIGGER_EDGE_FALLING,
+ IIO_BACKEND_SAMPLE_TRIGGER_EDGE_RISING,
+ IIO_BACKEND_SAMPLE_TRIGGER_MAX
+};
+
+enum iio_backend_interface_type {
+ IIO_BACKEND_INTERFACE_SERIAL_LVDS,
+ IIO_BACKEND_INTERFACE_SERIAL_CMOS,
+ IIO_BACKEND_INTERFACE_MAX
+};
+
+enum iio_backend_filter_type {
+ IIO_BACKEND_FILTER_TYPE_DISABLED,
+ IIO_BACKEND_FILTER_TYPE_SINC1,
+ IIO_BACKEND_FILTER_TYPE_SINC5,
+ IIO_BACKEND_FILTER_TYPE_SINC5_PLUS_COMP,
+ IIO_BACKEND_FILTER_TYPE_MAX
+};
+
+/**
+ * struct iio_backend_ops - operations structure for an iio_backend
+ * @enable: Enable backend.
+ * @disable: Disable backend.
+ * @chan_enable: Enable one channel.
+ * @chan_disable: Disable one channel.
+ * @data_format_set: Configure the data format for a specific channel.
+ * @data_source_set: Configure the data source for a specific channel.
+ * @data_source_get: Data source getter for a specific channel.
+ * @set_sample_rate: Configure the sampling rate for a specific channel.
+ * @test_pattern_set: Configure a test pattern.
+ * @chan_status: Get the channel status.
+ * @iodelay_set: Set digital I/O delay.
+ * @data_sample_trigger: Control when to sample data.
+ * @request_buffer: Request an IIO buffer.
+ * @free_buffer: Free an IIO buffer.
+ * @extend_chan_spec: Extend an IIO channel.
+ * @ext_info_set: Extended info setter.
+ * @ext_info_get: Extended info getter.
+ * @interface_type_get: Interface type.
+ * @data_size_set: Data size.
+ * @oversampling_ratio_set: Set Oversampling ratio.
+ * @read_raw: Read a channel attribute from a backend device
+ * @debugfs_print_chan_status: Print channel status into a buffer.
+ * @debugfs_reg_access: Read or write register value of backend.
+ * @filter_type_set: Set filter type.
+ * @interface_data_align: Perform the data alignment process.
+ * @num_lanes_set: Set the number of lanes enabled.
+ * @ddr_enable: Enable interface DDR (Double Data Rate) mode.
+ * @ddr_disable: Disable interface DDR (Double Data Rate) mode.
+ * @data_stream_enable: Enable data stream.
+ * @data_stream_disable: Disable data stream.
+ * @data_transfer_addr: Set data address.
+ **/
+struct iio_backend_ops {
+ int (*enable)(struct iio_backend *back);
+ void (*disable)(struct iio_backend *back);
+ int (*chan_enable)(struct iio_backend *back, unsigned int chan);
+ int (*chan_disable)(struct iio_backend *back, unsigned int chan);
+ int (*data_format_set)(struct iio_backend *back, unsigned int chan,
+ const struct iio_backend_data_fmt *data);
+ int (*data_source_set)(struct iio_backend *back, unsigned int chan,
+ enum iio_backend_data_source data);
+ int (*data_source_get)(struct iio_backend *back, unsigned int chan,
+ enum iio_backend_data_source *data);
+ int (*set_sample_rate)(struct iio_backend *back, unsigned int chan,
+ u64 sample_rate_hz);
+ int (*test_pattern_set)(struct iio_backend *back,
+ unsigned int chan,
+ enum iio_backend_test_pattern pattern);
+ int (*chan_status)(struct iio_backend *back, unsigned int chan,
+ bool *error);
+ int (*iodelay_set)(struct iio_backend *back, unsigned int chan,
+ unsigned int taps);
+ int (*data_sample_trigger)(struct iio_backend *back,
+ enum iio_backend_sample_trigger trigger);
+ struct iio_buffer *(*request_buffer)(struct iio_backend *back,
+ struct iio_dev *indio_dev);
+ void (*free_buffer)(struct iio_backend *back,
+ struct iio_buffer *buffer);
+ int (*extend_chan_spec)(struct iio_backend *back,
+ struct iio_chan_spec *chan);
+ int (*ext_info_set)(struct iio_backend *back, uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len);
+ int (*ext_info_get)(struct iio_backend *back, uintptr_t private,
+ const struct iio_chan_spec *chan, char *buf);
+ int (*interface_type_get)(struct iio_backend *back,
+ enum iio_backend_interface_type *type);
+ int (*data_size_set)(struct iio_backend *back, unsigned int size);
+ int (*oversampling_ratio_set)(struct iio_backend *back,
+ unsigned int chan, unsigned int ratio);
+ int (*read_raw)(struct iio_backend *back,
+ struct iio_chan_spec const *chan, int *val, int *val2,
+ long mask);
+ int (*debugfs_print_chan_status)(struct iio_backend *back,
+ unsigned int chan, char *buf,
+ size_t len);
+ int (*debugfs_reg_access)(struct iio_backend *back, unsigned int reg,
+ unsigned int writeval, unsigned int *readval);
+ int (*filter_type_set)(struct iio_backend *back,
+ enum iio_backend_filter_type type);
+ int (*interface_data_align)(struct iio_backend *back, u32 timeout_us);
+ int (*num_lanes_set)(struct iio_backend *back, unsigned int num_lanes);
+ int (*ddr_enable)(struct iio_backend *back);
+ int (*ddr_disable)(struct iio_backend *back);
+ int (*data_stream_enable)(struct iio_backend *back);
+ int (*data_stream_disable)(struct iio_backend *back);
+ int (*data_transfer_addr)(struct iio_backend *back, u32 address);
+};
+
+/**
+ * struct iio_backend_info - info structure for an iio_backend
+ * @name: Backend name.
+ * @ops: Backend operations.
+ */
+struct iio_backend_info {
+ const char *name;
+ const struct iio_backend_ops *ops;
+};
+
+int iio_backend_chan_enable(struct iio_backend *back, unsigned int chan);
+int iio_backend_chan_disable(struct iio_backend *back, unsigned int chan);
+int devm_iio_backend_enable(struct device *dev, struct iio_backend *back);
+int iio_backend_enable(struct iio_backend *back);
+void iio_backend_disable(struct iio_backend *back);
+int iio_backend_data_format_set(struct iio_backend *back, unsigned int chan,
+ const struct iio_backend_data_fmt *data);
+int iio_backend_data_source_set(struct iio_backend *back, unsigned int chan,
+ enum iio_backend_data_source data);
+int iio_backend_data_source_get(struct iio_backend *back, unsigned int chan,
+ enum iio_backend_data_source *data);
+int iio_backend_set_sampling_freq(struct iio_backend *back, unsigned int chan,
+ u64 sample_rate_hz);
+int iio_backend_test_pattern_set(struct iio_backend *back,
+ unsigned int chan,
+ enum iio_backend_test_pattern pattern);
+int iio_backend_chan_status(struct iio_backend *back, unsigned int chan,
+ bool *error);
+int iio_backend_iodelay_set(struct iio_backend *back, unsigned int lane,
+ unsigned int taps);
+int iio_backend_data_sample_trigger(struct iio_backend *back,
+ enum iio_backend_sample_trigger trigger);
+int devm_iio_backend_request_buffer(struct device *dev,
+ struct iio_backend *back,
+ struct iio_dev *indio_dev);
+int iio_backend_filter_type_set(struct iio_backend *back,
+ enum iio_backend_filter_type type);
+int iio_backend_interface_data_align(struct iio_backend *back, u32 timeout_us);
+int iio_backend_num_lanes_set(struct iio_backend *back, unsigned int num_lanes);
+int iio_backend_ddr_enable(struct iio_backend *back);
+int iio_backend_ddr_disable(struct iio_backend *back);
+int iio_backend_data_stream_enable(struct iio_backend *back);
+int iio_backend_data_stream_disable(struct iio_backend *back);
+int iio_backend_data_transfer_addr(struct iio_backend *back, u32 address);
+ssize_t iio_backend_ext_info_set(struct iio_dev *indio_dev, uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len);
+ssize_t iio_backend_ext_info_get(struct iio_dev *indio_dev, uintptr_t private,
+ const struct iio_chan_spec *chan, char *buf);
+int iio_backend_interface_type_get(struct iio_backend *back,
+ enum iio_backend_interface_type *type);
+int iio_backend_data_size_set(struct iio_backend *back, unsigned int size);
+int iio_backend_oversampling_ratio_set(struct iio_backend *back,
+ unsigned int chan,
+ unsigned int ratio);
+int iio_backend_read_raw(struct iio_backend *back,
+ struct iio_chan_spec const *chan, int *val, int *val2,
+ long mask);
+int iio_backend_extend_chan_spec(struct iio_backend *back,
+ struct iio_chan_spec *chan);
+void *iio_backend_get_priv(const struct iio_backend *conv);
+struct iio_backend *devm_iio_backend_get(struct device *dev, const char *name);
+struct iio_backend *devm_iio_backend_fwnode_get(struct device *dev,
+ const char *name,
+ struct fwnode_handle *fwnode);
+struct iio_backend *
+__devm_iio_backend_get_from_fwnode_lookup(struct device *dev,
+ struct fwnode_handle *fwnode);
+
+int devm_iio_backend_register(struct device *dev,
+ const struct iio_backend_info *info, void *priv);
+
+static inline int iio_backend_read_scale(struct iio_backend *back,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2)
+{
+ return iio_backend_read_raw(back, chan, val, val2, IIO_CHAN_INFO_SCALE);
+}
+
+static inline int iio_backend_read_offset(struct iio_backend *back,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2)
+{
+ return iio_backend_read_raw(back, chan, val, val2,
+ IIO_CHAN_INFO_OFFSET);
+}
+
+ssize_t iio_backend_debugfs_print_chan_status(struct iio_backend *back,
+ unsigned int chan, char *buf,
+ size_t len);
+void iio_backend_debugfs_add(struct iio_backend *back,
+ struct iio_dev *indio_dev);
+#endif
diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h
index ff15c61bf319..4f33e6a39797 100644
--- a/include/linux/iio/buffer-dma.h
+++ b/include/linux/iio/buffer-dma.h
@@ -7,6 +7,7 @@
#ifndef __INDUSTRIALIO_DMA_BUFFER_H__
#define __INDUSTRIALIO_DMA_BUFFER_H__
+#include <linux/atomic.h>
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/spinlock.h>
@@ -16,22 +17,18 @@
struct iio_dma_buffer_queue;
struct iio_dma_buffer_ops;
struct device;
-
-struct iio_buffer_block {
- u32 size;
- u32 bytes_used;
-};
+struct dma_buf_attachment;
+struct dma_fence;
+struct sg_table;
/**
* enum iio_block_state - State of a struct iio_dma_buffer_block
- * @IIO_BLOCK_STATE_DEQUEUED: Block is not queued
* @IIO_BLOCK_STATE_QUEUED: Block is on the incoming queue
* @IIO_BLOCK_STATE_ACTIVE: Block is currently being processed by the DMA
* @IIO_BLOCK_STATE_DONE: Block is on the outgoing queue
* @IIO_BLOCK_STATE_DEAD: Block has been marked as to be freed
*/
enum iio_block_state {
- IIO_BLOCK_STATE_DEQUEUED,
IIO_BLOCK_STATE_QUEUED,
IIO_BLOCK_STATE_ACTIVE,
IIO_BLOCK_STATE_DONE,
@@ -48,6 +45,10 @@ enum iio_block_state {
* @queue: Parent DMA buffer queue
* @kref: kref used to manage the lifetime of block
* @state: Current state of the block
+ * @cyclic: True if this is a cyclic buffer
+ * @fileio: True if this buffer is used for fileio mode
+ * @sg_table: DMA table for the transfer when transferring a DMABUF
+ * @fence: DMA fence to be signaled when a DMABUF transfer is complete
*/
struct iio_dma_buffer_block {
/* May only be accessed by the owner of the block */
@@ -70,6 +71,12 @@ struct iio_dma_buffer_block {
* queue->list_lock if the block is not owned by the core.
*/
enum iio_block_state state;
+
+ bool cyclic;
+ bool fileio;
+
+ struct sg_table *sg_table;
+ struct dma_fence *fence;
};
/**
@@ -78,12 +85,17 @@ struct iio_dma_buffer_block {
* @active_block: Block being used in read()
* @pos: Read offset in the active block
* @block_size: Size of each block
+ * @next_dequeue: index of next block that will be dequeued
+ * @enabled: Whether the buffer is operating in fileio mode
*/
struct iio_dma_buffer_queue_fileio {
struct iio_dma_buffer_block *blocks[2];
struct iio_dma_buffer_block *active_block;
size_t pos;
size_t block_size;
+
+ unsigned int next_dequeue;
+ bool enabled;
};
/**
@@ -98,8 +110,8 @@ struct iio_dma_buffer_queue_fileio {
* list and typically also a list of active blocks in the part that handles
* the DMA controller
* @incoming: List of buffers on the incoming queue
- * @outgoing: List of buffers on the outgoing queue
* @active: Whether the buffer is currently active
+ * @num_dmabufs: Total number of DMABUFs attached to this queue
* @fileio: FileIO state
*/
struct iio_dma_buffer_queue {
@@ -110,9 +122,9 @@ struct iio_dma_buffer_queue {
struct mutex lock;
spinlock_t list_lock;
struct list_head incoming;
- struct list_head outgoing;
bool active;
+ atomic_t num_dmabufs;
struct iio_dma_buffer_queue_fileio fileio;
};
@@ -138,7 +150,9 @@ int iio_dma_buffer_disable(struct iio_buffer *buffer,
struct iio_dev *indio_dev);
int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
char __user *user_buffer);
-size_t iio_dma_buffer_data_available(struct iio_buffer *buffer);
+int iio_dma_buffer_write(struct iio_buffer *buffer, size_t n,
+ const char __user *user_buffer);
+size_t iio_dma_buffer_usage(struct iio_buffer *buffer);
int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd);
int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length);
int iio_dma_buffer_request_update(struct iio_buffer *buffer);
@@ -148,4 +162,18 @@ int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue);
void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue);
+struct iio_dma_buffer_block *
+iio_dma_buffer_attach_dmabuf(struct iio_buffer *buffer,
+ struct dma_buf_attachment *attach);
+void iio_dma_buffer_detach_dmabuf(struct iio_buffer *buffer,
+ struct iio_dma_buffer_block *block);
+int iio_dma_buffer_enqueue_dmabuf(struct iio_buffer *buffer,
+ struct iio_dma_buffer_block *block,
+ struct dma_fence *fence,
+ struct sg_table *sgt,
+ size_t size, bool cyclic);
+void iio_dma_buffer_lock_queue(struct iio_buffer *buffer);
+void iio_dma_buffer_unlock_queue(struct iio_buffer *buffer);
+struct device *iio_dma_buffer_get_dma_dev(struct iio_buffer *buffer);
+
#endif
diff --git a/include/linux/iio/buffer-dmaengine.h b/include/linux/iio/buffer-dmaengine.h
index 5c355be89814..37f27545f69f 100644
--- a/include/linux/iio/buffer-dmaengine.h
+++ b/include/linux/iio/buffer-dmaengine.h
@@ -7,11 +7,33 @@
#ifndef __IIO_DMAENGINE_H__
#define __IIO_DMAENGINE_H__
+#include <linux/iio/buffer.h>
+
struct iio_dev;
struct device;
+struct dma_chan;
+
+void iio_dmaengine_buffer_teardown(struct iio_buffer *buffer);
+struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
+ struct iio_dev *indio_dev,
+ const char *channel,
+ enum iio_buffer_direction dir);
+
+#define iio_dmaengine_buffer_setup(dev, indio_dev, channel) \
+ iio_dmaengine_buffer_setup_ext(dev, indio_dev, channel, \
+ IIO_BUFFER_DIRECTION_IN)
+
+int devm_iio_dmaengine_buffer_setup_ext(struct device *dev,
+ struct iio_dev *indio_dev,
+ const char *channel,
+ enum iio_buffer_direction dir);
+int devm_iio_dmaengine_buffer_setup_with_handle(struct device *dev,
+ struct iio_dev *indio_dev,
+ struct dma_chan *chan,
+ enum iio_buffer_direction dir);
-int devm_iio_dmaengine_buffer_setup(struct device *dev,
- struct iio_dev *indio_dev,
- const char *channel);
+#define devm_iio_dmaengine_buffer_setup(dev, indio_dev, channel) \
+ devm_iio_dmaengine_buffer_setup_ext(dev, indio_dev, channel, \
+ IIO_BUFFER_DIRECTION_IN)
#endif
diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h
index b6928ac5c63d..d37f82678f71 100644
--- a/include/linux/iio/buffer.h
+++ b/include/linux/iio/buffer.h
@@ -11,26 +11,29 @@
struct iio_buffer;
+enum iio_buffer_direction {
+ IIO_BUFFER_DIRECTION_IN,
+ IIO_BUFFER_DIRECTION_OUT,
+};
+
int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data);
+int iio_pop_from_buffer(struct iio_buffer *buffer, void *data);
+
/**
* iio_push_to_buffers_with_timestamp() - push data and timestamp to buffers
* @indio_dev: iio_dev structure for device.
* @data: sample data
* @timestamp: timestamp for the sample data
*
- * Pushes data to the IIO device's buffers. If timestamps are enabled for the
- * device the function will store the supplied timestamp as the last element in
- * the sample data buffer before pushing it to the device buffers. The sample
- * data buffer needs to be large enough to hold the additional timestamp
- * (usually the buffer should be indio->scan_bytes bytes large).
+ * DEPRECATED: Use iio_push_to_buffers_with_ts() instead.
*
* Returns 0 on success, a negative error code otherwise.
*/
static inline int iio_push_to_buffers_with_timestamp(struct iio_dev *indio_dev,
void *data, int64_t timestamp)
{
- if (indio_dev->scan_timestamp) {
+ if (ACCESS_PRIVATE(indio_dev, scan_timestamp)) {
size_t ts_offset = indio_dev->scan_bytes / sizeof(int64_t) - 1;
((int64_t *)data)[ts_offset] = timestamp;
}
@@ -38,6 +41,38 @@ static inline int iio_push_to_buffers_with_timestamp(struct iio_dev *indio_dev,
return iio_push_to_buffers(indio_dev, data);
}
+/**
+ * iio_push_to_buffers_with_ts() - push data and timestamp to buffers
+ * @indio_dev: iio_dev structure for device.
+ * @data: Pointer to sample data buffer.
+ * @data_total_len: The size of @data in bytes.
+ * @timestamp: Timestamp for the sample data.
+ *
+ * Pushes data to the IIO device's buffers. If timestamps are enabled for the
+ * device the function will store the supplied timestamp as the last element in
+ * the sample data buffer before pushing it to the device buffers. The sample
+ * data buffer needs to be large enough to hold the additional timestamp
+ * (usually the buffer should be at least indio->scan_bytes bytes large).
+ *
+ * Context: Any context.
+ * Return: 0 on success, a negative error code otherwise.
+ */
+static inline int iio_push_to_buffers_with_ts(struct iio_dev *indio_dev,
+ void *data, size_t data_total_len,
+ s64 timestamp)
+{
+ if (unlikely(data_total_len < indio_dev->scan_bytes)) {
+ dev_err(&indio_dev->dev, "Undersized storage pushed to buffer\n");
+ return -ENOSPC;
+ }
+
+ return iio_push_to_buffers_with_timestamp(indio_dev, data, timestamp);
+}
+
+int iio_push_to_buffers_with_ts_unaligned(struct iio_dev *indio_dev,
+ const void *data, size_t data_sz,
+ int64_t timestamp);
+
bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
const unsigned long *mask);
diff --git a/include/linux/iio/buffer_impl.h b/include/linux/iio/buffer_impl.h
index 245b32918ae1..c0b0e0992a85 100644
--- a/include/linux/iio/buffer_impl.h
+++ b/include/linux/iio/buffer_impl.h
@@ -7,9 +7,14 @@
#ifdef CONFIG_IIO_BUFFER
#include <uapi/linux/iio/buffer.h>
+#include <linux/iio/buffer.h>
+struct dma_buf_attachment;
+struct dma_fence;
struct iio_dev;
+struct iio_dma_buffer_block;
struct iio_buffer;
+struct sg_table;
/**
* INDIO_BUFFER_FLAG_FIXED_WATERMARK - Watermark level of the buffer can not be
@@ -19,10 +24,15 @@ struct iio_buffer;
/**
* struct iio_buffer_access_funcs - access functions for buffers.
- * @store_to: actually store stuff to the buffer
+ * @store_to: actually store stuff to the buffer - must be safe to
+ * call from any context (e.g. must not sleep).
* @read: try to get a specified number of bytes (must exist)
* @data_available: indicates how much data is available for reading from
* the buffer.
+ * @remove_from: remove scan from buffer. Drivers should calls this to
+ * remove a scan from a buffer.
+ * @write: try to write a number of bytes
+ * @space_available: returns the amount of bytes available in a buffer
* @request_update: if a parameter change has been marked, update underlying
* storage.
* @set_bytes_per_datum:set number of bytes per datum
@@ -34,6 +44,17 @@ struct iio_buffer;
* device stops sampling. Calles are balanced with @enable.
* @release: called when the last reference to the buffer is dropped,
* should free all resources allocated by the buffer.
+ * @attach_dmabuf: called from userspace via ioctl to attach one external
+ * DMABUF.
+ * @detach_dmabuf: called from userspace via ioctl to detach one previously
+ * attached DMABUF.
+ * @enqueue_dmabuf: called from userspace via ioctl to queue this DMABUF
+ * object to this buffer. Requires a valid DMABUF fd, that
+ * was previouly attached to this buffer.
+ * @get_dma_dev: called to get the DMA channel associated with this buffer.
+ * @lock_queue: called when the core needs to lock the buffer queue;
+ * it is used when enqueueing DMABUF objects.
+ * @unlock_queue: used to unlock a previously locked buffer queue
* @modes: Supported operating modes by this buffer type
* @flags: A bitmask combination of INDIO_BUFFER_FLAG_*
*
@@ -49,6 +70,9 @@ struct iio_buffer_access_funcs {
int (*store_to)(struct iio_buffer *buffer, const void *data);
int (*read)(struct iio_buffer *buffer, size_t n, char __user *buf);
size_t (*data_available)(struct iio_buffer *buffer);
+ int (*remove_from)(struct iio_buffer *buffer, void *data);
+ int (*write)(struct iio_buffer *buffer, size_t n, const char __user *buf);
+ size_t (*space_available)(struct iio_buffer *buffer);
int (*request_update)(struct iio_buffer *buffer);
@@ -60,6 +84,18 @@ struct iio_buffer_access_funcs {
void (*release)(struct iio_buffer *buffer);
+ struct iio_dma_buffer_block * (*attach_dmabuf)(struct iio_buffer *buffer,
+ struct dma_buf_attachment *attach);
+ void (*detach_dmabuf)(struct iio_buffer *buffer,
+ struct iio_dma_buffer_block *block);
+ int (*enqueue_dmabuf)(struct iio_buffer *buffer,
+ struct iio_dma_buffer_block *block,
+ struct dma_fence *fence, struct sg_table *sgt,
+ size_t size, bool cyclic);
+ struct device * (*get_dma_dev)(struct iio_buffer *buffer);
+ void (*lock_queue)(struct iio_buffer *buffer);
+ void (*unlock_queue)(struct iio_buffer *buffer);
+
unsigned int modes;
unsigned int flags;
};
@@ -80,6 +116,9 @@ struct iio_buffer {
/** @bytes_per_datum: Size of individual datum including timestamp. */
size_t bytes_per_datum;
+ /* @direction: Direction of the data stream (in/out). */
+ enum iio_buffer_direction direction;
+
/**
* @access: Buffer access functions associated with the
* implementation.
@@ -112,7 +151,7 @@ struct iio_buffer {
struct attribute_group buffer_group;
/* @attrs: Standard attributes of the buffer. */
- const struct attribute **attrs;
+ const struct iio_dev_attr **attrs;
/* @demux_bounce: Buffer for doing gather from incoming scan. */
void *demux_bounce;
@@ -125,6 +164,12 @@ struct iio_buffer {
/* @ref: Reference count of the buffer. */
struct kref ref;
+
+ /* @dmabufs: List of DMABUF attachments */
+ struct list_head dmabufs; /* P: dmabufs_mutex */
+
+ /* @dmabufs_mutex: Protects dmabufs */
+ struct mutex dmabufs_mutex;
};
/**
@@ -148,6 +193,8 @@ void iio_buffer_init(struct iio_buffer *buffer);
struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer);
void iio_buffer_put(struct iio_buffer *buffer);
+void iio_buffer_signal_dmabuf_done(struct dma_fence *fence, int ret);
+
#else /* CONFIG_IIO_BUFFER */
static inline void iio_buffer_get(struct iio_buffer *buffer) {}
diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h
index 7ce8a8adad58..bb966abcde53 100644
--- a/include/linux/iio/common/cros_ec_sensors_core.h
+++ b/include/linux/iio/common/cros_ec_sensors_core.h
@@ -41,7 +41,6 @@ typedef irqreturn_t (*cros_ec_sensors_capture_t)(int irq, void *p);
* @param: motion sensor parameters structure
* @resp: motion sensor response structure
* @type: type of motion sensor
- * @loc: location where the motion sensor is placed
* @range_updated: True if the range of the sensor has been
* updated.
* @curr_range: If updated, the current range value.
@@ -67,7 +66,6 @@ struct cros_ec_sensors_core_state {
struct ec_response_motion_sense *resp;
enum motionsensor_type type;
- enum motionsensor_location loc;
bool range_updated;
int curr_range;
@@ -77,7 +75,7 @@ struct cros_ec_sensors_core_state {
u16 scale;
} calib[CROS_EC_SENSOR_MAX_AXIS];
s8 sign[CROS_EC_SENSOR_MAX_AXIS];
- u8 samples[CROS_EC_SAMPLE_SIZE];
+ u8 samples[CROS_EC_SAMPLE_SIZE] __aligned(8);
int (*read_ec_sensors_data)(struct iio_dev *indio_dev,
unsigned long scan_mask, s16 *data);
@@ -95,8 +93,11 @@ int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev, unsigned long scan_mask,
struct platform_device;
int cros_ec_sensors_core_init(struct platform_device *pdev,
struct iio_dev *indio_dev, bool physical_device,
- cros_ec_sensors_capture_t trigger_capture,
- cros_ec_sensorhub_push_data_cb_t push_data);
+ cros_ec_sensors_capture_t trigger_capture);
+
+int cros_ec_sensors_core_register(struct device *dev,
+ struct iio_dev *indio_dev,
+ cros_ec_sensorhub_push_data_cb_t push_data);
irqreturn_t cros_ec_sensors_capture(int irq, void *p);
int cros_ec_sensors_push_data(struct iio_dev *indio_dev,
@@ -125,5 +126,6 @@ extern const struct dev_pm_ops cros_ec_sensors_pm_ops;
/* List of extended channel specification for all sensors. */
extern const struct iio_chan_spec_ext_info cros_ec_sensors_ext_info[];
+extern const struct iio_chan_spec_ext_info cros_ec_sensors_limited_info[];
#endif /* __CROS_EC_SENSORS_CORE_H */
diff --git a/include/linux/iio/common/inv_sensors_timestamp.h b/include/linux/iio/common/inv_sensors_timestamp.h
new file mode 100644
index 000000000000..8d506f1e9df2
--- /dev/null
+++ b/include/linux/iio/common/inv_sensors_timestamp.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2020 Invensense, Inc.
+ */
+
+#ifndef INV_SENSORS_TIMESTAMP_H_
+#define INV_SENSORS_TIMESTAMP_H_
+
+/**
+ * struct inv_sensors_timestamp_chip - chip internal properties
+ * @clock_period: internal clock period in ns
+ * @jitter: acceptable jitter in per-mille
+ * @init_period: chip initial period at reset in ns
+ */
+struct inv_sensors_timestamp_chip {
+ uint32_t clock_period;
+ uint32_t jitter;
+ uint32_t init_period;
+};
+
+/**
+ * struct inv_sensors_timestamp_interval - timestamps interval
+ * @lo: interval lower bound
+ * @up: interval upper bound
+ */
+struct inv_sensors_timestamp_interval {
+ int64_t lo;
+ int64_t up;
+};
+
+/**
+ * struct inv_sensors_timestamp_acc - accumulator for computing an estimation
+ * @val: current estimation of the value, the mean of all values
+ * @idx: current index of the next free place in values table
+ * @values: table of all measured values, use for computing the mean
+ */
+struct inv_sensors_timestamp_acc {
+ uint32_t val;
+ size_t idx;
+ uint32_t values[32];
+};
+
+/**
+ * struct inv_sensors_timestamp - timestamp management states
+ * @chip: chip internal characteristics
+ * @min_period: minimal acceptable clock period
+ * @max_period: maximal acceptable clock period
+ * @it: interrupts interval timestamps
+ * @timestamp: store last timestamp for computing next data timestamp
+ * @mult: current internal period multiplier
+ * @new_mult: new set internal period multiplier (not yet effective)
+ * @period: measured current period of the sensor
+ * @chip_period: accumulator for computing internal chip period
+ */
+struct inv_sensors_timestamp {
+ struct inv_sensors_timestamp_chip chip;
+ uint32_t min_period;
+ uint32_t max_period;
+ struct inv_sensors_timestamp_interval it;
+ int64_t timestamp;
+ uint32_t mult;
+ uint32_t new_mult;
+ uint32_t period;
+ struct inv_sensors_timestamp_acc chip_period;
+};
+
+void inv_sensors_timestamp_init(struct inv_sensors_timestamp *ts,
+ const struct inv_sensors_timestamp_chip *chip);
+
+int inv_sensors_timestamp_update_odr(struct inv_sensors_timestamp *ts,
+ uint32_t period, bool fifo);
+
+void inv_sensors_timestamp_interrupt(struct inv_sensors_timestamp *ts,
+ size_t sample_nb, int64_t timestamp);
+
+static inline int64_t inv_sensors_timestamp_pop(struct inv_sensors_timestamp *ts)
+{
+ ts->timestamp += ts->period;
+ return ts->timestamp;
+}
+
+void inv_sensors_timestamp_apply_odr(struct inv_sensors_timestamp *ts,
+ uint32_t fifo_period, size_t fifo_nb,
+ unsigned int fifo_no);
+
+static inline void inv_sensors_timestamp_reset(struct inv_sensors_timestamp *ts)
+{
+ const struct inv_sensors_timestamp_interval interval_init = {0LL, 0LL};
+
+ ts->it = interval_init;
+ ts->timestamp = 0;
+}
+
+#endif
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 33e939977444..f9ae5cdd884f 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -13,6 +13,7 @@
#include <linux/i2c.h>
#include <linux/spi/spi.h>
#include <linux/irqreturn.h>
+#include <linux/iio/iio.h>
#include <linux/iio/trigger.h>
#include <linux/bitops.h>
#include <linux/regulator/consumer.h>
@@ -20,6 +21,9 @@
#include <linux/platform_data/st_sensors_pdata.h>
+#define LSM9DS0_IMU_DEV_NAME "lsm9ds0"
+#define LSM303D_IMU_DEV_NAME "lsm303d"
+
/*
* Buffer size max case: 2bytes per channel, 3 channels in total +
* 8bytes timestamp channel (s64)
@@ -46,8 +50,8 @@
#define ST_SENSORS_MAX_NAME 17
#define ST_SENSORS_MAX_4WAI 8
-#define ST_SENSORS_LSM_CHANNELS(device_type, mask, index, mod, \
- ch2, s, endian, rbits, sbits, addr) \
+#define ST_SENSORS_LSM_CHANNELS_EXT(device_type, mask, index, mod, \
+ ch2, s, endian, rbits, sbits, addr, ext) \
{ \
.type = device_type, \
.modified = mod, \
@@ -63,8 +67,14 @@
.storagebits = sbits, \
.endianness = endian, \
}, \
+ .ext_info = ext, \
}
+#define ST_SENSORS_LSM_CHANNELS(device_type, mask, index, mod, \
+ ch2, s, endian, rbits, sbits, addr) \
+ ST_SENSORS_LSM_CHANNELS_EXT(device_type, mask, index, mod, \
+ ch2, s, endian, rbits, sbits, addr, NULL)
+
#define ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL() \
IIO_DEV_ATTR_SAMP_FREQ_AVAIL( \
st_sensors_sysfs_sampling_frequency_avail)
@@ -211,12 +221,10 @@ struct st_sensor_settings {
/**
* struct st_sensor_data - ST sensor device status
- * @dev: Pointer to instance of struct device (I2C or SPI).
* @trig: The trigger in use by the core driver.
+ * @mount_matrix: The mounting matrix of the sensor.
* @sensor_settings: Pointer to the specific sensor settings in use.
* @current_fullscale: Maximum range of measure by the sensor.
- * @vdd: Pointer to sensor's Vdd power supply
- * @vdd_io: Pointer to sensor's Vdd-IO power supply
* @regmap: Pointer to specific sensor regmap configuration.
* @enabled: Status of the sensor (false->off, true->on).
* @odr: Output data rate of the sensor [Hz].
@@ -228,15 +236,13 @@ struct st_sensor_settings {
* @hw_irq_trigger: if we're using the hardware interrupt on the sensor.
* @hw_timestamp: Latest timestamp from the interrupt handler, when in use.
* @buffer_data: Data used by buffer part.
+ * @odr_lock: Local lock for preventing concurrent ODR accesses/changes
*/
struct st_sensor_data {
- struct device *dev;
struct iio_trigger *trig;
- struct iio_mount_matrix *mount_matrix;
+ struct iio_mount_matrix mount_matrix;
struct st_sensor_settings *sensor_settings;
struct st_sensor_fullscale_avl *current_fullscale;
- struct regulator *vdd;
- struct regulator *vdd_io;
struct regmap *regmap;
bool enabled;
@@ -252,7 +258,9 @@ struct st_sensor_data {
bool hw_irq_trigger;
s64 hw_timestamp;
- char buffer_data[ST_SENSORS_MAX_BUFFER_SIZE] ____cacheline_aligned;
+ struct mutex odr_lock;
+
+ char buffer_data[ST_SENSORS_MAX_BUFFER_SIZE] __aligned(IIO_DMA_MINALIGN);
};
#ifdef CONFIG_IIO_BUFFER
@@ -263,7 +271,6 @@ irqreturn_t st_sensors_trigger_handler(int irq, void *p);
int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
const struct iio_trigger_ops *trigger_ops);
-void st_sensors_deallocate_trigger(struct iio_dev *indio_dev);
int st_sensors_validate_device(struct iio_trigger *trig,
struct iio_dev *indio_dev);
#else
@@ -272,10 +279,6 @@ static inline int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
{
return 0;
}
-static inline void st_sensors_deallocate_trigger(struct iio_dev *indio_dev)
-{
- return;
-}
#define st_sensors_validate_device NULL
#endif
@@ -288,8 +291,6 @@ int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable);
int st_sensors_power_enable(struct iio_dev *indio_dev);
-void st_sensors_power_disable(struct iio_dev *indio_dev);
-
int st_sensors_debugfs_reg_access(struct iio_dev *indio_dev,
unsigned reg, unsigned writeval,
unsigned *readval);
@@ -317,4 +318,20 @@ ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
void st_sensors_dev_name_probe(struct device *dev, char *name, int len);
+/* Accelerometer */
+const struct st_sensor_settings *st_accel_get_settings(const char *name);
+int st_accel_common_probe(struct iio_dev *indio_dev);
+
+/* Gyroscope */
+const struct st_sensor_settings *st_gyro_get_settings(const char *name);
+int st_gyro_common_probe(struct iio_dev *indio_dev);
+
+/* Magnetometer */
+const struct st_sensor_settings *st_magn_get_settings(const char *name);
+int st_magn_common_probe(struct iio_dev *indio_dev);
+
+/* Pressure */
+const struct st_sensor_settings *st_press_get_settings(const char *name);
+int st_press_common_probe(struct iio_dev *indio_dev);
+
#endif /* ST_SENSORS_H */
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 5fa5957586cf..5039558267e4 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -13,7 +13,7 @@
struct iio_dev;
struct iio_chan_spec;
struct device;
-struct device_node;
+struct fwnode_handle;
/**
* struct iio_channel - everything needed for a consumer to use a channel
@@ -99,26 +99,20 @@ void iio_channel_release_all(struct iio_channel *chan);
struct iio_channel *devm_iio_channel_get_all(struct device *dev);
/**
- * of_iio_channel_get_by_name() - get description of all that is needed to access channel.
- * @np: Pointer to consumer device tree node
+ * fwnode_iio_channel_get_by_name() - get description of all that is needed to access channel.
+ * @fwnode: Pointer to consumer Firmware node
* @consumer_channel: Unique name to identify the channel on the consumer
* side. This typically describes the channels use within
* the consumer. E.g. 'battery_voltage'
*/
-#ifdef CONFIG_OF
-struct iio_channel *of_iio_channel_get_by_name(struct device_node *np, const char *name);
-#else
-static inline struct iio_channel *
-of_iio_channel_get_by_name(struct device_node *np, const char *name)
-{
- return NULL;
-}
-#endif
+struct iio_channel *fwnode_iio_channel_get_by_name(struct fwnode_handle *fwnode,
+ const char *name);
/**
- * devm_of_iio_channel_get_by_name() - Resource managed version of of_iio_channel_get_by_name().
+ * devm_fwnode_iio_channel_get_by_name() - Resource managed version of
+ * fwnode_iio_channel_get_by_name().
* @dev: Pointer to consumer device.
- * @np: Pointer to consumer device tree node
+ * @fwnode: Pointer to consumer Firmware node
* @consumer_channel: Unique name to identify the channel on the consumer
* side. This typically describes the channels use within
* the consumer. E.g. 'battery_voltage'
@@ -129,15 +123,16 @@ of_iio_channel_get_by_name(struct device_node *np, const char *name)
* The allocated iio channel is automatically released when the device is
* unbound.
*/
-struct iio_channel *devm_of_iio_channel_get_by_name(struct device *dev,
- struct device_node *np,
- const char *consumer_channel);
+struct iio_channel *devm_fwnode_iio_channel_get_by_name(struct device *dev,
+ struct fwnode_handle *fwnode,
+ const char *consumer_channel);
struct iio_cb_buffer;
/**
* iio_channel_get_all_cb() - register callback for triggered capture
* @dev: Pointer to client device.
- * @cb: Callback function.
+ * @cb: Callback function. Must be safe to call from any context
+ * (e.g. must not sleep).
* @private: Private data passed to callback.
*
* NB right now we have no ability to mux data from multiple devices.
@@ -207,8 +202,9 @@ struct iio_dev
* @chan: The channel being queried.
* @val: Value read back.
*
- * Note raw reads from iio channels are in adc counts and hence
- * scale will need to be applied if standard units required.
+ * Note, if standard units are required, raw reads from iio channels
+ * need the offset (default 0) and scale (default 1) to be applied
+ * as (raw + offset) * scale.
*/
int iio_read_channel_raw(struct iio_channel *chan,
int *val);
@@ -218,8 +214,9 @@ int iio_read_channel_raw(struct iio_channel *chan,
* @chan: The channel being queried.
* @val: Value read back.
*
- * Note raw reads from iio channels are in adc counts and hence
- * scale will need to be applied if standard units required.
+ * Note, if standard units are required, raw reads from iio channels
+ * need the offset (default 0) and scale (default 1) to be applied
+ * as (raw + offset) * scale.
*
* In opposit to the normal iio_read_channel_raw this function
* returns the average of multiple reads.
@@ -287,8 +284,9 @@ int iio_read_channel_attribute(struct iio_channel *chan, int *val,
* @chan: The channel being queried.
* @val: Value being written.
*
- * Note raw writes to iio channels are in dac counts and hence
- * scale will need to be applied if standard units required.
+ * Note that for raw writes to iio channels, if the value provided is
+ * in standard units, the affect of the scale and offset must be removed
+ * as (value / scale) - offset.
*/
int iio_write_channel_raw(struct iio_channel *chan, int val);
@@ -298,12 +296,25 @@ int iio_write_channel_raw(struct iio_channel *chan, int val);
* @chan: The channel being queried.
* @val: Value read back.
*
- * Note raw reads from iio channels are in adc counts and hence
- * scale will need to be applied if standard units are required.
+ * Note, if standard units are required, raw reads from iio channels
+ * need the offset (default 0) and scale (default 1) to be applied
+ * as (raw + offset) * scale.
*/
int iio_read_max_channel_raw(struct iio_channel *chan, int *val);
/**
+ * iio_read_min_channel_raw() - read minimum available raw value from a given
+ * channel, i.e. the minimum possible value.
+ * @chan: The channel being queried.
+ * @val: Value read back.
+ *
+ * Note, if standard units are required, raw reads from iio channels
+ * need the offset (default 0) and scale (default 1) to be applied
+ * as (raw + offset) * scale.
+ */
+int iio_read_min_channel_raw(struct iio_channel *chan, int *val);
+
+/**
* iio_read_avail_channel_raw() - read available raw values from a given channel
* @chan: The channel being queried.
* @vals: Available values read back.
@@ -314,8 +325,9 @@ int iio_read_max_channel_raw(struct iio_channel *chan, int *val);
* For ranges, three vals are always returned; min, step and max.
* For lists, all the possible values are enumerated.
*
- * Note raw available values from iio channels are in adc counts and
- * hence scale will need to be applied if standard units are required.
+ * Note, if standard units are required, raw available values from iio
+ * channels need the offset (default 0) and scale (default 1) to be applied
+ * as (raw + offset) * scale.
*/
int iio_read_avail_channel_raw(struct iio_channel *chan,
const int **vals, int *length);
@@ -371,6 +383,24 @@ int iio_read_channel_scale(struct iio_channel *chan, int *val,
int *val2);
/**
+ * iio_multiply_value() - Multiply an IIO value
+ * @result: Destination pointer for the multiplication result
+ * @multiplier: Multiplier.
+ * @type: One of the IIO_VAL_* constants. This decides how the @val and
+ * @val2 parameters are interpreted.
+ * @val: Value being multiplied.
+ * @val2: Value being multiplied. @val2 use depends on type.
+ *
+ * Multiply an IIO value with a s64 multiplier storing the result as
+ * IIO_VAL_INT. This is typically used for scaling.
+ *
+ * Returns:
+ * IIO_VAL_INT on success or a negative error-number on failure.
+ */
+int iio_multiply_value(int *result, s64 multiplier,
+ unsigned int type, int val, int val2);
+
+/**
* iio_convert_raw_to_processed() - Converts a raw value to a processed value
* @chan: The channel being queried
* @raw: The raw IIO to convert
@@ -407,7 +437,7 @@ unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan);
* @chan: The channel being queried.
* @attr: The ext_info attribute to read.
* @buf: Where to store the attribute value. Assumed to hold
- * at least PAGE_SIZE bytes.
+ * at least PAGE_SIZE bytes and to be aligned at PAGE_SIZE.
*
* Returns the number of bytes written to buf (perhaps w/o zero termination;
* it need not even be a string), or an error code.
@@ -430,4 +460,14 @@ ssize_t iio_read_channel_ext_info(struct iio_channel *chan,
ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr,
const char *buf, size_t len);
+/**
+ * iio_read_channel_label() - read label for a given channel
+ * @chan: The channel being queried.
+ * @buf: Where to store the attribute value. Assumed to hold
+ * at least PAGE_SIZE bytes and to be aligned at PAGE_SIZE.
+ *
+ * Returns the number of bytes written to buf, or an error code.
+ */
+ssize_t iio_read_channel_label(struct iio_channel *chan, char *buf);
+
#endif
diff --git a/include/linux/iio/driver.h b/include/linux/iio/driver.h
index 36de60a5da7a..7f8b55551ed0 100644
--- a/include/linux/iio/driver.h
+++ b/include/linux/iio/driver.h
@@ -8,6 +8,7 @@
#ifndef _IIO_INKERN_H_
#define _IIO_INKERN_H_
+struct device;
struct iio_dev;
struct iio_map;
@@ -17,7 +18,7 @@ struct iio_map;
* @map: array of mappings specifying association of channel with client
*/
int iio_map_array_register(struct iio_dev *indio_dev,
- struct iio_map *map);
+ const struct iio_map *map);
/**
* iio_map_array_unregister() - tell the core to remove consumer mappings for
@@ -26,4 +27,18 @@ int iio_map_array_register(struct iio_dev *indio_dev,
*/
int iio_map_array_unregister(struct iio_dev *indio_dev);
+/**
+ * devm_iio_map_array_register - device-managed version of iio_map_array_register
+ * @dev: Device object to which to bind the unwinding of this registration
+ * @indio_dev: Pointer to the iio_dev structure
+ * @maps: Pointer to an IIO map object which is to be registered to this IIO device
+ *
+ * This function will call iio_map_array_register() to register an IIO map object
+ * and will also hook a callback to the iio_map_array_unregister() function to
+ * handle de-registration of the IIO map object when the device's refcount goes to
+ * zero.
+ */
+int devm_iio_map_array_register(struct device *dev, struct iio_dev *indio_dev,
+ const struct iio_map *maps);
+
#endif
diff --git a/include/linux/iio/events.h b/include/linux/iio/events.h
index a4558c45a548..72062a0c7c87 100644
--- a/include/linux/iio/events.h
+++ b/include/linux/iio/events.h
@@ -10,7 +10,7 @@
#include <uapi/linux/iio/events.h>
/**
- * IIO_EVENT_CODE() - create event identifier
+ * _IIO_EVENT_CODE() - create event identifier
* @chan_type: Type of the channel. Should be one of enum iio_chan_type.
* @diff: Whether the event is for an differential channel or not.
* @modifier: Modifier for the channel. Should be one of enum iio_modifier.
@@ -19,10 +19,13 @@
* @chan: Channel number for non-differential channels.
* @chan1: First channel number for differential channels.
* @chan2: Second channel number for differential channels.
+ *
+ * Drivers should use the specialized macros below instead of using this one
+ * directly.
*/
-#define IIO_EVENT_CODE(chan_type, diff, modifier, direction, \
- type, chan, chan1, chan2) \
+#define _IIO_EVENT_CODE(chan_type, diff, modifier, direction, \
+ type, chan, chan1, chan2) \
(((u64)type << 56) | ((u64)diff << 55) | \
((u64)direction << 48) | ((u64)modifier << 40) | \
((u64)chan_type << 32) | (((u16)chan2) << 16) | ((u16)chan1) | \
@@ -30,7 +33,8 @@
/**
- * IIO_MOD_EVENT_CODE() - create event identifier for modified channels
+ * IIO_MOD_EVENT_CODE() - create event identifier for modified (non
+ * differential) channels
* @chan_type: Type of the channel. Should be one of enum iio_chan_type.
* @number: Channel number.
* @modifier: Modifier for the channel. Should be one of enum iio_modifier.
@@ -40,10 +44,11 @@
#define IIO_MOD_EVENT_CODE(chan_type, number, modifier, \
type, direction) \
- IIO_EVENT_CODE(chan_type, 0, modifier, direction, type, number, 0, 0)
+ _IIO_EVENT_CODE(chan_type, 0, modifier, direction, type, number, 0, 0)
/**
- * IIO_UNMOD_EVENT_CODE() - create event identifier for unmodified channels
+ * IIO_UNMOD_EVENT_CODE() - create event identifier for unmodified (non
+ * differential) channels
* @chan_type: Type of the channel. Should be one of enum iio_chan_type.
* @number: Channel number.
* @type: Type of the event. Should be one of enum iio_event_type.
@@ -51,6 +56,18 @@
*/
#define IIO_UNMOD_EVENT_CODE(chan_type, number, type, direction) \
- IIO_EVENT_CODE(chan_type, 0, 0, direction, type, number, 0, 0)
+ _IIO_EVENT_CODE(chan_type, 0, 0, direction, type, number, 0, 0)
+
+/**
+ * IIO_DIFF_EVENT_CODE() - create event identifier for differential channels
+ * @chan_type: Type of the channel. Should be one of enum iio_chan_type.
+ * @chan1: First channel number for differential channels.
+ * @chan2: Second channel number for differential channels.
+ * @type: Type of the event. Should be one of enum iio_event_type.
+ * @direction: Direction of the event. One of enum iio_event_direction.
+ */
+
+#define IIO_DIFF_EVENT_CODE(chan_type, chan1, chan2, type, direction) \
+ _IIO_EVENT_CODE(chan_type, 1, 0, direction, type, 0, chan1, chan2)
#endif
diff --git a/include/linux/iio/frequency/adf4350.h b/include/linux/iio/frequency/adf4350.h
index de45cf2ee1e4..ce2086f97e3f 100644
--- a/include/linux/iio/frequency/adf4350.h
+++ b/include/linux/iio/frequency/adf4350.h
@@ -51,7 +51,7 @@
/* REG3 Bit Definitions */
#define ADF4350_REG3_12BIT_CLKDIV(x) ((x) << 3)
-#define ADF4350_REG3_12BIT_CLKDIV_MODE(x) ((x) << 16)
+#define ADF4350_REG3_12BIT_CLKDIV_MODE(x) ((x) << 15)
#define ADF4350_REG3_12BIT_CSR_EN (1 << 18)
#define ADF4351_REG3_CHARGE_CANCELLATION_EN (1 << 21)
#define ADF4351_REG3_ANTI_BACKLASH_3ns_EN (1 << 22)
diff --git a/include/linux/iio/gyro/itg3200.h b/include/linux/iio/gyro/itg3200.h
index a602fe7b84fa..74b6d1cadc86 100644
--- a/include/linux/iio/gyro/itg3200.h
+++ b/include/linux/iio/gyro/itg3200.h
@@ -102,6 +102,8 @@ struct itg3200 {
struct i2c_client *i2c;
struct iio_trigger *trig;
struct iio_mount_matrix orientation;
+ /* lock to protect against multiple access to the device */
+ struct mutex lock;
};
enum ITG3200_SCAN_INDEX {
diff --git a/include/linux/iio/iio-gts-helper.h b/include/linux/iio/iio-gts-helper.h
new file mode 100644
index 000000000000..66f830ab9b49
--- /dev/null
+++ b/include/linux/iio/iio-gts-helper.h
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* gain-time-scale conversion helpers for IIO light sensors
+ *
+ * Copyright (c) 2023 Matti Vaittinen <mazziesaccount@gmail.com>
+ */
+
+#ifndef __IIO_GTS_HELPER__
+#define __IIO_GTS_HELPER__
+
+#include <linux/types.h>
+
+struct device;
+
+/**
+ * struct iio_gain_sel_pair - gain - selector values
+ *
+ * In many cases devices like light sensors allow setting signal amplification
+ * (gain) using a register interface. This structure describes amplification
+ * and corresponding selector (register value)
+ *
+ * @gain: Gain (multiplication) value. Gain must be positive, negative
+ * values are reserved for error handling.
+ * @sel: Selector (usually register value) used to indicate this gain.
+ * NOTE: Only selectors >= 0 supported.
+ */
+struct iio_gain_sel_pair {
+ int gain;
+ int sel;
+};
+
+/**
+ * struct iio_itime_sel_mul - integration time description
+ *
+ * In many cases devices like light sensors allow setting the duration of
+ * collecting data. Typically this duration has also an impact to the magnitude
+ * of measured values (gain). This structure describes the relation of
+ * integration time and amplification as well as corresponding selector
+ * (register value).
+ *
+ * An example could be a sensor allowing 50, 100, 200 and 400 mS times. The
+ * respective multiplication values could be 50 mS => 1, 100 mS => 2,
+ * 200 mS => 4 and 400 mS => 8 assuming the impact of integration time would be
+ * linear in a way that when collecting data for 50 mS caused value X, doubling
+ * the data collection time caused value 2X etc.
+ *
+ * @time_us: Integration time in microseconds. Time values must be positive,
+ * negative values are reserved for error handling.
+ * @sel: Selector (usually register value) used to indicate this time
+ * NOTE: Only selectors >= 0 supported.
+ * @mul: Multiplication to the values caused by this time.
+ * NOTE: Only multipliers > 0 supported.
+ */
+struct iio_itime_sel_mul {
+ int time_us;
+ int sel;
+ int mul;
+};
+
+struct iio_gts {
+ u64 max_scale;
+ const struct iio_gain_sel_pair *hwgain_table;
+ int num_hwgain;
+ const struct iio_itime_sel_mul *itime_table;
+ int num_itime;
+ int **per_time_avail_scale_tables;
+ int *avail_all_scales_table;
+ int num_avail_all_scales;
+ int *avail_time_tables;
+ int num_avail_time_tables;
+};
+
+#define GAIN_SCALE_GAIN(_gain, _sel) \
+{ \
+ .gain = (_gain), \
+ .sel = (_sel), \
+}
+
+#define GAIN_SCALE_ITIME_US(_itime, _sel, _mul) \
+{ \
+ .time_us = (_itime), \
+ .sel = (_sel), \
+ .mul = (_mul), \
+}
+
+static inline const struct iio_itime_sel_mul *
+iio_gts_find_itime_by_time(struct iio_gts *gts, int time)
+{
+ int i;
+
+ if (!gts->num_itime)
+ return NULL;
+
+ for (i = 0; i < gts->num_itime; i++)
+ if (gts->itime_table[i].time_us == time)
+ return &gts->itime_table[i];
+
+ return NULL;
+}
+
+static inline const struct iio_itime_sel_mul *
+iio_gts_find_itime_by_sel(struct iio_gts *gts, int sel)
+{
+ int i;
+
+ for (i = 0; i < gts->num_itime; i++)
+ if (gts->itime_table[i].sel == sel)
+ return &gts->itime_table[i];
+
+ return NULL;
+}
+
+int devm_iio_init_iio_gts(struct device *dev, int max_scale_int, int max_scale_nano,
+ const struct iio_gain_sel_pair *gain_tbl, int num_gain,
+ const struct iio_itime_sel_mul *tim_tbl, int num_times,
+ struct iio_gts *gts);
+/**
+ * iio_gts_find_int_time_by_sel - find integration time matching a selector
+ * @gts: Gain time scale descriptor
+ * @sel: selector for which matching integration time is searched for
+ *
+ * Return: integration time matching given selector or -EINVAL if
+ * integration time was not found.
+ */
+static inline int iio_gts_find_int_time_by_sel(struct iio_gts *gts, int sel)
+{
+ const struct iio_itime_sel_mul *itime;
+
+ itime = iio_gts_find_itime_by_sel(gts, sel);
+ if (!itime)
+ return -EINVAL;
+
+ return itime->time_us;
+}
+
+/**
+ * iio_gts_find_sel_by_int_time - find selector matching integration time
+ * @gts: Gain time scale descriptor
+ * @time: Integration time for which matching selector is searched for
+ *
+ * Return: a selector matching given integration time or -EINVAL if
+ * selector was not found.
+ */
+static inline int iio_gts_find_sel_by_int_time(struct iio_gts *gts, int time)
+{
+ const struct iio_itime_sel_mul *itime;
+
+ itime = iio_gts_find_itime_by_time(gts, time);
+ if (!itime)
+ return -EINVAL;
+
+ return itime->sel;
+}
+
+/**
+ * iio_gts_valid_time - check if given integration time is valid
+ * @gts: Gain time scale descriptor
+ * @time_us: Integration time to check
+ *
+ * Return: True if given time is supported by device. False if not.
+ */
+static inline bool iio_gts_valid_time(struct iio_gts *gts, int time_us)
+{
+ return iio_gts_find_itime_by_time(gts, time_us) != NULL;
+}
+
+int iio_gts_find_sel_by_gain(struct iio_gts *gts, int gain);
+
+/**
+ * iio_gts_valid_gain - check if given HW-gain is valid
+ * @gts: Gain time scale descriptor
+ * @gain: HW-gain to check
+ *
+ * Return: True if given time is supported by device. False if not.
+ */
+static inline bool iio_gts_valid_gain(struct iio_gts *gts, int gain)
+{
+ return iio_gts_find_sel_by_gain(gts, gain) >= 0;
+}
+
+int iio_find_closest_gain_low(struct iio_gts *gts, int gain, bool *in_range);
+int iio_gts_find_gain_by_sel(struct iio_gts *gts, int sel);
+int iio_gts_get_min_gain(struct iio_gts *gts);
+int iio_gts_find_int_time_by_sel(struct iio_gts *gts, int sel);
+int iio_gts_find_sel_by_int_time(struct iio_gts *gts, int time);
+
+int iio_gts_total_gain_to_scale(struct iio_gts *gts, int total_gain,
+ int *scale_int, int *scale_nano);
+int iio_gts_find_gain_sel_for_scale_using_time(struct iio_gts *gts, int time_sel,
+ int scale_int, int scale_nano,
+ int *gain_sel);
+int iio_gts_find_gain_time_sel_for_scale(struct iio_gts *gts, int scale_int,
+ int scale_nano, int *gain_sel,
+ int *time_sel);
+int iio_gts_get_scale(struct iio_gts *gts, int gain, int time, int *scale_int,
+ int *scale_nano);
+int iio_gts_find_new_gain_sel_by_old_gain_time(struct iio_gts *gts,
+ int old_gain, int old_time_sel,
+ int new_time_sel, int *new_gain);
+int iio_gts_find_new_gain_by_old_gain_time(struct iio_gts *gts, int old_gain,
+ int old_time, int new_time,
+ int *new_gain);
+int iio_gts_find_new_gain_by_gain_time_min(struct iio_gts *gts, int old_gain,
+ int old_time, int new_time,
+ int *new_gain, bool *in_range);
+int iio_gts_avail_times(struct iio_gts *gts, const int **vals, int *type,
+ int *length);
+int iio_gts_all_avail_scales(struct iio_gts *gts, const int **vals, int *type,
+ int *length);
+int iio_gts_avail_scales_for_time(struct iio_gts *gts, int time,
+ const int **vals, int *type, int *length);
+int iio_gts_get_total_gain(struct iio_gts *gts, int gain, int time);
+
+#endif
diff --git a/include/linux/iio/iio-opaque.h b/include/linux/iio/iio-opaque.h
index 32addd5e790e..4247497f3f8b 100644
--- a/include/linux/iio/iio-opaque.h
+++ b/include/linux/iio/iio-opaque.h
@@ -6,6 +6,15 @@
/**
* struct iio_dev_opaque - industrial I/O device opaque information
* @indio_dev: public industrial I/O device information
+ * @id: used to identify device internally
+ * @currentmode: operating mode currently in use, may be eventually
+ * checked by device drivers but should be considered
+ * read-only as this is a core internal bit
+ * @driver_module: used to make it harder to undercut users
+ * @mlock: lock used to prevent simultaneous device state changes
+ * @mlock_key: lockdep class for iio_dev lock
+ * @info_exist_lock: lock to prevent use during removal
+ * @trig_readonly: mark the current trigger immutable
* @event_interface: event chrdevs associated with interrupt lines
* @attached_buffers: array of buffers statically attached by the driver
* @attached_buffers_cnt: number of buffers in the array of statically attached buffers
@@ -19,6 +28,12 @@
* @groupcounter: index of next attribute group
* @legacy_scan_el_group: attribute group for legacy scan elements attribute group
* @legacy_buffer_group: attribute group for legacy buffer attributes group
+ * @bounce_buffer: for devices that call iio_push_to_buffers_with_ts_unaligned()
+ * @bounce_buffer_size: size of currently allocate bounce buffer
+ * @scan_index_timestamp: cache of the index to the timestamp
+ * @clock_id: timestamping clock posix identifier
+ * @chrdev: associated character device
+ * @flags: file ops related flags including busy flag.
* @debugfs_dentry: device specific debugfs dentry
* @cached_reg_addr: cached register address for debugfs reads
* @read_buf: read buffer to be used for the initial reg read
@@ -26,6 +41,13 @@
*/
struct iio_dev_opaque {
struct iio_dev indio_dev;
+ int currentmode;
+ int id;
+ struct module *driver_module;
+ struct mutex mlock;
+ struct lock_class_key mlock_key;
+ struct mutex info_exist_lock;
+ bool trig_readonly;
struct iio_event_interface *event_interface;
struct iio_buffer **attached_buffers;
unsigned int attached_buffers_cnt;
@@ -38,15 +60,23 @@ struct iio_dev_opaque {
int groupcounter;
struct attribute_group legacy_scan_el_group;
struct attribute_group legacy_buffer_group;
+ void *bounce_buffer;
+ size_t bounce_buffer_size;
+
+ unsigned int scan_index_timestamp;
+ clockid_t clock_id;
+ struct cdev chrdev;
+ unsigned long flags;
+
#if defined(CONFIG_DEBUG_FS)
struct dentry *debugfs_dentry;
- unsigned cached_reg_addr;
+ unsigned int cached_reg_addr;
char read_buf[20];
unsigned int read_buf_len;
#endif
};
-#define to_iio_dev_opaque(indio_dev) \
- container_of(indio_dev, struct iio_dev_opaque, indio_dev)
+#define to_iio_dev_opaque(_indio_dev) \
+ container_of((_indio_dev), struct iio_dev_opaque, indio_dev)
#endif
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index f2d65e2e88b6..872ebdf0dd77 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -7,16 +7,21 @@
#ifndef _INDUSTRIAL_IO_H_
#define _INDUSTRIAL_IO_H_
+#include <linux/align.h>
#include <linux/device.h>
#include <linux/cdev.h>
+#include <linux/compiler_types.h>
+#include <linux/minmax.h>
+#include <linux/slab.h>
#include <linux/iio/types.h>
-#include <linux/of.h>
/* IIO TODO LIST */
/*
* Provide means of adjusting timer accuracy.
* Currently assumes nano seconds.
*/
+struct fwnode_reference_args;
+
enum iio_shared_by {
IIO_SEPARATE,
IIO_SHARED_BY_TYPE,
@@ -103,15 +108,16 @@ ssize_t iio_enum_write(struct iio_dev *indio_dev,
/**
* IIO_ENUM_AVAILABLE() - Initialize enum available extended channel attribute
* @_name: Attribute name ("_available" will be appended to the name)
+ * @_shared: Whether the attribute is shared between all channels
* @_e: Pointer to an iio_enum struct
*
* Creates a read only attribute which lists all the available enum items in a
* space separated list. This should usually be used together with IIO_ENUM()
*/
-#define IIO_ENUM_AVAILABLE(_name, _e) \
+#define IIO_ENUM_AVAILABLE(_name, _shared, _e) \
{ \
.name = (_name "_available"), \
- .shared = IIO_SHARED_BY_TYPE, \
+ .shared = _shared, \
.read = iio_enum_available_read, \
.private = (uintptr_t)(_e), \
}
@@ -127,8 +133,7 @@ struct iio_mount_matrix {
ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv,
const struct iio_chan_spec *chan, char *buf);
-int iio_read_mount_matrix(struct device *dev, const char *propname,
- struct iio_mount_matrix *matrix);
+int iio_read_mount_matrix(struct device *dev, struct iio_mount_matrix *matrix);
typedef const struct iio_mount_matrix *
(iio_get_mount_matrix_t)(const struct iio_dev *indio_dev,
@@ -171,6 +176,27 @@ struct iio_event_spec {
};
/**
+ * struct iio_scan_type - specification for channel data format in buffer
+ * @sign: 's' or 'u' to specify signed or unsigned
+ * @realbits: Number of valid bits of data
+ * @storagebits: Realbits + padding
+ * @shift: Shift right by this before masking out realbits.
+ * @repeat: Number of times real/storage bits repeats. When the
+ * repeat element is more than 1, then the type element in
+ * sysfs will show a repeat value. Otherwise, the number
+ * of repetitions is omitted.
+ * @endianness: little or big endian
+ */
+struct iio_scan_type {
+ char sign;
+ u8 realbits;
+ u8 storagebits;
+ u8 shift;
+ u8 repeat;
+ enum iio_endian endianness;
+};
+
+/**
* struct iio_chan_spec - specification of a single channel
* @type: What type of measurement is the channel making.
* @channel: What number do we wish to assign the channel.
@@ -180,18 +206,13 @@ struct iio_event_spec {
* @address: Driver specific identifier.
* @scan_index: Monotonic index to give ordering in scans when read
* from a buffer.
- * @scan_type: struct describing the scan type
- * @scan_type.sign: 's' or 'u' to specify signed or unsigned
- * @scan_type.realbits: Number of valid bits of data
- * @scan_type.storagebits: Realbits + padding
- * @scan_type.shift: Shift right by this before masking out
- * realbits.
- * @scan_type.repeat: Number of times real/storage bits repeats.
- * When the repeat element is more than 1, then
- * the type element in sysfs will show a repeat
- * value. Otherwise, the number of repetitions
- * is omitted.
- * @scan_type.endianness: little or big endian
+ * @scan_type: struct describing the scan type - mutually exclusive
+ * with ext_scan_type.
+ * @ext_scan_type: Used in rare cases where there is more than one scan
+ * format for a channel. When this is used, the flag
+ * has_ext_scan_type must be set and the driver must
+ * implement get_current_scan_type in struct iio_info.
+ * @num_ext_scan_type: Number of elements in ext_scan_type.
* @info_mask_separate: What information is to be exported that is specific to
* this channel.
* @info_mask_separate_available: What availability information is to be
@@ -219,6 +240,9 @@ struct iio_event_spec {
* @extend_name: Allows labeling of channel attributes with an
* informative name. Note this has no effect codes etc,
* unlike modifiers.
+ * This field is deprecated in favour of providing
+ * iio_info->read_label() to override the label, which
+ * unlike @extend_name does not affect sysfs filenames.
* @datasheet_name: A name used in in-kernel mapping of channels. It should
* correspond to the first name that the channel is referred
* to by in the datasheet (e.g. IND), or the nearest
@@ -232,6 +256,7 @@ struct iio_event_spec {
* attributes but not for event codes.
* @output: Channel is output.
* @differential: Channel is differential.
+ * @has_ext_scan_type: True if ext_scan_type is used instead of scan_type.
*/
struct iio_chan_spec {
enum iio_chan_type type;
@@ -239,31 +264,31 @@ struct iio_chan_spec {
int channel2;
unsigned long address;
int scan_index;
- struct {
- char sign;
- u8 realbits;
- u8 storagebits;
- u8 shift;
- u8 repeat;
- enum iio_endian endianness;
- } scan_type;
- long info_mask_separate;
- long info_mask_separate_available;
- long info_mask_shared_by_type;
- long info_mask_shared_by_type_available;
- long info_mask_shared_by_dir;
- long info_mask_shared_by_dir_available;
- long info_mask_shared_by_all;
- long info_mask_shared_by_all_available;
+ union {
+ struct iio_scan_type scan_type;
+ struct {
+ const struct iio_scan_type *ext_scan_type;
+ unsigned int num_ext_scan_type;
+ };
+ };
+ unsigned long info_mask_separate;
+ unsigned long info_mask_separate_available;
+ unsigned long info_mask_shared_by_type;
+ unsigned long info_mask_shared_by_type_available;
+ unsigned long info_mask_shared_by_dir;
+ unsigned long info_mask_shared_by_dir_available;
+ unsigned long info_mask_shared_by_all;
+ unsigned long info_mask_shared_by_all_available;
const struct iio_event_spec *event_spec;
unsigned int num_event_specs;
const struct iio_chan_spec_ext_info *ext_info;
const char *extend_name;
const char *datasheet_name;
- unsigned modified:1;
- unsigned indexed:1;
- unsigned output:1;
- unsigned differential:1;
+ unsigned int modified:1;
+ unsigned int indexed:1;
+ unsigned int output:1;
+ unsigned int differential:1;
+ unsigned int has_ext_scan_type:1;
};
@@ -313,9 +338,55 @@ static inline bool iio_channel_has_available(const struct iio_chan_spec *chan,
}
s64 iio_get_time_ns(const struct iio_dev *indio_dev);
-unsigned int iio_get_time_res(const struct iio_dev *indio_dev);
-/* Device operating modes */
+/*
+ * Device operating modes
+ * @INDIO_DIRECT_MODE: There is an access to either:
+ * a) The last single value available for devices that do not provide
+ * on-demand reads.
+ * b) A new value after performing an on-demand read otherwise.
+ * On most devices, this is a single-shot read. On some devices with data
+ * streams without an 'on-demand' function, this might also be the 'last value'
+ * feature. Above all, this mode internally means that we are not in any of the
+ * other modes, and sysfs reads should work.
+ * Device drivers should inform the core if they support this mode.
+ * @INDIO_BUFFER_TRIGGERED: Common mode when dealing with kfifo buffers.
+ * It indicates that an explicit trigger is required. This requests the core to
+ * attach a poll function when enabling the buffer, which is indicated by the
+ * _TRIGGERED suffix.
+ * The core will ensure this mode is set when registering a triggered buffer
+ * with iio_triggered_buffer_setup().
+ * @INDIO_BUFFER_SOFTWARE: Another kfifo buffer mode, but not event triggered.
+ * No poll function can be attached because there is no triggered infrastructure
+ * we can use to cause capture. There is a kfifo that the driver will fill, but
+ * not "only one scan at a time". Typically, hardware will have a buffer that
+ * can hold multiple scans. Software may read one or more scans at a single time
+ * and push the available data to a Kfifo. This means the core will not attach
+ * any poll function when enabling the buffer.
+ * The core will ensure this mode is set when registering a simple kfifo buffer
+ * with devm_iio_kfifo_buffer_setup().
+ * @INDIO_BUFFER_HARDWARE: For specific hardware, if unsure do not use this mode.
+ * Same as above but this time the buffer is not a kfifo where we have direct
+ * access to the data. Instead, the consumer driver must access the data through
+ * non software visible channels (or DMA when there is no demux possible in
+ * software)
+ * The core will ensure this mode is set when registering a dmaengine buffer
+ * with devm_iio_dmaengine_buffer_setup().
+ * @INDIO_EVENT_TRIGGERED: Very unusual mode.
+ * Triggers usually refer to an external event which will start data capture.
+ * Here it is kind of the opposite as, a particular state of the data might
+ * produce an event which can be considered as an event. We don't necessarily
+ * have access to the data itself, but to the event produced. For example, this
+ * can be a threshold detector. The internal path of this mode is very close to
+ * the INDIO_BUFFER_TRIGGERED mode.
+ * The core will ensure this mode is set when registering a triggered event.
+ * @INDIO_HARDWARE_TRIGGERED: Very unusual mode.
+ * Here, triggers can result in data capture and can be routed to multiple
+ * hardware components, which make them close to regular triggers in the way
+ * they must be managed by the core, but without the entire interrupts/poll
+ * functions burden. Interrupts are irrelevant as the data flow is hardware
+ * mediated and distributed.
+ */
#define INDIO_DIRECT_MODE 0x01
#define INDIO_BUFFER_TRIGGERED 0x02
#define INDIO_BUFFER_SOFTWARE 0x04
@@ -333,6 +404,11 @@ unsigned int iio_get_time_res(const struct iio_dev *indio_dev);
#define INDIO_MAX_RAW_ELEMENTS 4
+struct iio_val_int_plus_micro {
+ int integer;
+ int micro;
+};
+
struct iio_trigger; /* forward declaration */
/**
@@ -371,16 +447,17 @@ struct iio_trigger; /* forward declaration */
* @write_event_config: set if the event is enabled.
* @read_event_value: read a configuration value associated with the event.
* @write_event_value: write a configuration value for the event.
+ * @read_event_label: function to request label name for a specified label,
+ * for better event identification.
* @validate_trigger: function to validate the trigger when the
* current trigger gets changed.
+ * @get_current_scan_type: must be implemented by drivers that use ext_scan_type
+ * in the channel spec to return the index of the currently
+ * active ext_scan type for a channel.
* @update_scan_mode: function to configure device and scan buffer when
* channels have changed
* @debugfs_reg_access: function to read or write register value of device
- * @of_xlate: function pointer to obtain channel specifier index.
- * When #iio-cells is greater than '0', the driver could
- * provide a custom of_xlate function that reads the
- * *args* and returns the appropriate index in registered
- * IIO channels array.
+ * @fwnode_xlate: fwnode based function pointer to obtain channel specifier index.
* @hwfifo_set_watermark: function pointer to set the current hardware
* fifo watermark level; see hwfifo_* entries in
* Documentation/ABI/testing/sysfs-bus-iio for details on
@@ -439,7 +516,7 @@ struct iio_info {
const struct iio_chan_spec *chan,
enum iio_event_type type,
enum iio_event_direction dir,
- int state);
+ bool state);
int (*read_event_value)(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
@@ -453,18 +530,26 @@ struct iio_info {
enum iio_event_direction dir,
enum iio_event_info info, int val, int val2);
+ int (*read_event_label)(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ char *label);
+
int (*validate_trigger)(struct iio_dev *indio_dev,
struct iio_trigger *trig);
+ int (*get_current_scan_type)(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan);
int (*update_scan_mode)(struct iio_dev *indio_dev,
const unsigned long *scan_mask);
int (*debugfs_reg_access)(struct iio_dev *indio_dev,
- unsigned reg, unsigned writeval,
- unsigned *readval);
- int (*of_xlate)(struct iio_dev *indio_dev,
- const struct of_phandle_args *iiospec);
- int (*hwfifo_set_watermark)(struct iio_dev *indio_dev, unsigned val);
+ unsigned int reg, unsigned int writeval,
+ unsigned int *readval);
+ int (*fwnode_xlate)(struct iio_dev *indio_dev,
+ const struct fwnode_reference_args *iiospec);
+ int (*hwfifo_set_watermark)(struct iio_dev *indio_dev, unsigned int val);
int (*hwfifo_flush_to_buffer)(struct iio_dev *indio_dev,
- unsigned count);
+ unsigned int count);
};
/**
@@ -488,24 +573,24 @@ struct iio_buffer_setup_ops {
/**
* struct iio_dev - industrial I/O device
- * @id: [INTERN] used to identify device internally
- * @driver_module: [INTERN] used to make it harder to undercut users
- * @modes: [DRIVER] operating modes supported by device
- * @currentmode: [DRIVER] current operating mode
+ * @modes: [DRIVER] bitmask listing all the operating modes
+ * supported by the IIO device. This list should be
+ * initialized before registering the IIO device. It can
+ * also be filed up by the IIO core, as a result of
+ * enabling particular features in the driver
+ * (see iio_triggered_event_setup()).
* @dev: [DRIVER] device structure, should be assigned a parent
* and owner
* @buffer: [DRIVER] any buffer present
* @scan_bytes: [INTERN] num bytes captured to be fed to buffer demux
- * @mlock: [INTERN] lock used to prevent simultaneous device state
- * changes
- * @available_scan_masks: [DRIVER] optional array of allowed bitmasks
+ * @available_scan_masks: [DRIVER] optional array of allowed bitmasks. Sort the
+ * array in order of preference, the most preferred
+ * masks first.
* @masklength: [INTERN] the length of the mask established from
* channels
* @active_scan_mask: [INTERN] union of all scan masks requested by buffers
* @scan_timestamp: [INTERN] set if any buffers have requested timestamp
- * @scan_index_timestamp:[INTERN] cache of the index to the timestamp
* @trig: [INTERN] current device trigger (buffer modes)
- * @trig_readonly: [INTERN] mark the current trigger immutable
* @pollfunc: [DRIVER] function run on trigger being received
* @pollfunc_event: [DRIVER] function run on events trigger being received
* @channels: [DRIVER] channel specification structure table
@@ -513,34 +598,23 @@ struct iio_buffer_setup_ops {
* @name: [DRIVER] name of the device.
* @label: [DRIVER] unique name to identify which device this is
* @info: [DRIVER] callbacks and constant info from driver
- * @clock_id: [INTERN] timestamping clock posix identifier
- * @info_exist_lock: [INTERN] lock to prevent use during removal
* @setup_ops: [DRIVER] callbacks to call before and after buffer
* enable/disable
- * @chrdev: [INTERN] associated character device
- * @flags: [INTERN] file ops related flags including busy flag.
* @priv: [DRIVER] reference to driver's private information
* **MUST** be accessed **ONLY** via iio_priv() helper
*/
struct iio_dev {
- int id;
- struct module *driver_module;
-
int modes;
- int currentmode;
struct device dev;
struct iio_buffer *buffer;
int scan_bytes;
- struct mutex mlock;
const unsigned long *available_scan_masks;
- unsigned masklength;
+ unsigned int __private masklength;
const unsigned long *active_scan_mask;
- bool scan_timestamp;
- unsigned scan_index_timestamp;
+ bool __private scan_timestamp;
struct iio_trigger *trig;
- bool trig_readonly;
struct iio_poll_func *pollfunc;
struct iio_poll_func *pollfunc_event;
@@ -550,15 +624,15 @@ struct iio_dev {
const char *name;
const char *label;
const struct iio_info *info;
- clockid_t clock_id;
- struct mutex info_exist_lock;
const struct iio_buffer_setup_ops *setup_ops;
- struct cdev chrdev;
- unsigned long flags;
- void *priv;
+ void *__private priv;
};
+int iio_device_id(struct iio_dev *indio_dev);
+int iio_device_get_current_mode(struct iio_dev *indio_dev);
+bool iio_buffer_enabled(struct iio_dev *indio_dev);
+
const struct iio_chan_spec
*iio_find_channel_from_si(struct iio_dev *indio_dev, int si);
/**
@@ -587,10 +661,36 @@ void iio_device_unregister(struct iio_dev *indio_dev);
int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
struct module *this_mod);
int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp);
-int iio_device_claim_direct_mode(struct iio_dev *indio_dev);
-void iio_device_release_direct_mode(struct iio_dev *indio_dev);
+bool __iio_device_claim_direct(struct iio_dev *indio_dev);
+void __iio_device_release_direct(struct iio_dev *indio_dev);
+
+/*
+ * Helper functions that allow claim and release of direct mode
+ * in a fashion that doesn't generate many false positives from sparse.
+ * Note this must remain static inline in the header so that sparse
+ * can see the __acquire() marking. Revisit when sparse supports
+ * __cond_acquires()
+ */
+static inline bool iio_device_claim_direct(struct iio_dev *indio_dev)
+{
+ if (!__iio_device_claim_direct(indio_dev))
+ return false;
+
+ __acquire(iio_dev);
+
+ return true;
+}
-extern struct bus_type iio_bus_type;
+static inline void iio_device_release_direct(struct iio_dev *indio_dev)
+{
+ __iio_device_release_direct(indio_dev);
+ __release(indio_dev);
+}
+
+int iio_device_claim_buffer_mode(struct iio_dev *indio_dev);
+void iio_device_release_buffer_mode(struct iio_dev *indio_dev);
+
+extern const struct bus_type iio_bus_type;
/**
* iio_device_put() - reference counted deallocation of struct device
@@ -602,15 +702,7 @@ static inline void iio_device_put(struct iio_dev *indio_dev)
put_device(&indio_dev->dev);
}
-/**
- * iio_device_get_clock() - Retrieve current timestamping clock for the device
- * @indio_dev: IIO device structure containing the device
- */
-static inline clockid_t iio_device_get_clock(const struct iio_dev *indio_dev)
-{
- return indio_dev->clock_id;
-}
-
+clockid_t iio_device_get_clock(const struct iio_dev *indio_dev);
int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id);
/**
@@ -680,32 +772,67 @@ static inline void *iio_device_get_drvdata(const struct iio_dev *indio_dev)
return dev_get_drvdata(&indio_dev->dev);
}
-/* Can we make this smaller? */
-#define IIO_ALIGN L1_CACHE_BYTES
+/*
+ * Used to ensure the iio_priv() structure is aligned to allow that structure
+ * to in turn include IIO_DMA_MINALIGN'd elements such as buffers which
+ * must not share cachelines with the rest of the structure, thus making
+ * them safe for use with non-coherent DMA.
+ *
+ * A number of drivers also use this on buffers that include a 64-bit timestamp
+ * that is used with iio_push_to_buffers_with_ts(). Therefore, in the case where
+ * DMA alignment is not sufficient for proper timestamp alignment, we align to
+ * 8 bytes instead.
+ */
+#define IIO_DMA_MINALIGN MAX(ARCH_DMA_MINALIGN, sizeof(s64))
+
+#define __IIO_DECLARE_BUFFER_WITH_TS(type, name, count) \
+ type name[ALIGN((count), sizeof(s64) / sizeof(type)) + sizeof(s64) / sizeof(type)]
+
+/**
+ * IIO_DECLARE_BUFFER_WITH_TS() - Declare a buffer with timestamp
+ * @type: element type of the buffer
+ * @name: identifier name of the buffer
+ * @count: number of elements in the buffer
+ *
+ * Declares a buffer that is safe to use with iio_push_to_buffers_with_ts(). In
+ * addition to allocating enough space for @count elements of @type, it also
+ * allocates space for a s64 timestamp at the end of the buffer and ensures
+ * proper alignment of the timestamp.
+ */
+#define IIO_DECLARE_BUFFER_WITH_TS(type, name, count) \
+ __IIO_DECLARE_BUFFER_WITH_TS(type, name, count) __aligned(sizeof(s64))
+
+/**
+ * IIO_DECLARE_DMA_BUFFER_WITH_TS() - Declare a DMA-aligned buffer with timestamp
+ * @type: element type of the buffer
+ * @name: identifier name of the buffer
+ * @count: number of elements in the buffer
+ *
+ * Same as IIO_DECLARE_BUFFER_WITH_TS(), but is uses __aligned(IIO_DMA_MINALIGN)
+ * to ensure that the buffer doesn't share cachelines with anything that comes
+ * before it in a struct. This should not be used for stack-allocated buffers
+ * as stack memory cannot generally be used for DMA.
+ */
+#define IIO_DECLARE_DMA_BUFFER_WITH_TS(type, name, count) \
+ __IIO_DECLARE_BUFFER_WITH_TS(type, name, count) __aligned(IIO_DMA_MINALIGN)
+
struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv);
/* The information at the returned address is guaranteed to be cacheline aligned */
static inline void *iio_priv(const struct iio_dev *indio_dev)
{
- return indio_dev->priv;
+ return ACCESS_PRIVATE(indio_dev, priv);
}
void iio_device_free(struct iio_dev *indio_dev);
struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv);
-__printf(2, 3)
-struct iio_trigger *devm_iio_trigger_alloc(struct device *parent,
- const char *fmt, ...);
-/**
- * iio_buffer_enabled() - helper function to test if the buffer is enabled
- * @indio_dev: IIO device structure for device
- **/
-static inline bool iio_buffer_enabled(struct iio_dev *indio_dev)
-{
- return indio_dev->currentmode
- & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE |
- INDIO_BUFFER_SOFTWARE);
-}
+#define devm_iio_trigger_alloc(parent, fmt, ...) \
+ __devm_iio_trigger_alloc((parent), THIS_MODULE, (fmt), ##__VA_ARGS__)
+__printf(3, 4)
+struct iio_trigger *__devm_iio_trigger_alloc(struct device *parent,
+ struct module *this_mod,
+ const char *fmt, ...);
/**
* iio_get_debugfs_dentry() - helper function to get the debugfs_dentry
* @indio_dev: IIO device structure for device
@@ -719,6 +846,98 @@ static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
}
#endif
+/**
+ * iio_device_suspend_triggering() - suspend trigger attached to an iio_dev
+ * @indio_dev: iio_dev associated with the device that will have triggers suspended
+ *
+ * Return 0 if successful, negative otherwise
+ **/
+int iio_device_suspend_triggering(struct iio_dev *indio_dev);
+
+/**
+ * iio_device_resume_triggering() - resume trigger attached to an iio_dev
+ * that was previously suspended with iio_device_suspend_triggering()
+ * @indio_dev: iio_dev associated with the device that will have triggers resumed
+ *
+ * Return 0 if successful, negative otherwise
+ **/
+int iio_device_resume_triggering(struct iio_dev *indio_dev);
+
+#ifdef CONFIG_ACPI
+bool iio_read_acpi_mount_matrix(struct device *dev,
+ struct iio_mount_matrix *orientation,
+ char *acpi_method);
+const char *iio_get_acpi_device_name_and_data(struct device *dev, const void **data);
+#else
+static inline bool iio_read_acpi_mount_matrix(struct device *dev,
+ struct iio_mount_matrix *orientation,
+ char *acpi_method)
+{
+ return false;
+}
+static inline const char *
+iio_get_acpi_device_name_and_data(struct device *dev, const void **data)
+{
+ return NULL;
+}
+#endif
+static inline const char *iio_get_acpi_device_name(struct device *dev)
+{
+ return iio_get_acpi_device_name_and_data(dev, NULL);
+}
+
+/**
+ * iio_get_current_scan_type - Get the current scan type for a channel
+ * @indio_dev: the IIO device to get the scan type for
+ * @chan: the channel to get the scan type for
+ *
+ * Most devices only have one scan type per channel and can just access it
+ * directly without calling this function. Core IIO code and drivers that
+ * implement ext_scan_type in the channel spec should use this function to
+ * get the current scan type for a channel.
+ *
+ * Returns: the current scan type for the channel or error.
+ */
+static inline const struct iio_scan_type
+*iio_get_current_scan_type(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ int ret;
+
+ if (chan->has_ext_scan_type) {
+ ret = indio_dev->info->get_current_scan_type(indio_dev, chan);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ if (ret >= chan->num_ext_scan_type)
+ return ERR_PTR(-EINVAL);
+
+ return &chan->ext_scan_type[ret];
+ }
+
+ return &chan->scan_type;
+}
+
+/**
+ * iio_get_masklength - Get length of the channels mask
+ * @indio_dev: the IIO device to get the masklength for
+ */
+static inline unsigned int iio_get_masklength(const struct iio_dev *indio_dev)
+{
+ return ACCESS_PRIVATE(indio_dev, masklength);
+}
+
+int iio_active_scan_mask_index(struct iio_dev *indio_dev);
+
+/**
+ * iio_for_each_active_channel - Iterated over active channels
+ * @indio_dev: the IIO device
+ * @chan: Holds the index of the enabled channel
+ */
+#define iio_for_each_active_channel(indio_dev, chan) \
+ for_each_set_bit((chan), (indio_dev)->active_scan_mask, \
+ iio_get_masklength(indio_dev))
+
ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals);
int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer,
diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h
index f9b728d490b1..bfb6df68e6c9 100644
--- a/include/linux/iio/imu/adis.h
+++ b/include/linux/iio/imu/adis.h
@@ -9,8 +9,10 @@
#ifndef __IIO_ADIS_H__
#define __IIO_ADIS_H__
+#include <linux/cleanup.h>
#include <linux/spi/spi.h>
#include <linux/interrupt.h>
+#include <linux/iio/iio.h>
#include <linux/iio/types.h>
#define ADIS_WRITE_REG(reg) ((0x80 | (reg)))
@@ -20,6 +22,7 @@
#define ADIS_REG_PAGE_ID 0x00
struct adis;
+struct iio_dev_attr;
/**
* struct adis_timeouts - ADIS chip variant timeouts
@@ -32,6 +35,7 @@ struct adis_timeout {
u16 sw_reset_ms;
u16 self_test_ms;
};
+
/**
* struct adis_data - ADIS chip variant specific data
* @read_delay: SPI delay for read operations in us
@@ -40,21 +44,26 @@ struct adis_timeout {
* @glob_cmd_reg: Register address of the GLOB_CMD register
* @msc_ctrl_reg: Register address of the MSC_CTRL register
* @diag_stat_reg: Register address of the DIAG_STAT register
+ * @diag_stat_size: Length (in bytes) of the DIAG_STAT register. If 0 the
+ * default length is 2 bytes long.
* @prod_id_reg: Register address of the PROD_ID register
* @prod_id: Product ID code that should be expected when reading @prod_id_reg
* @self_test_mask: Bitmask of supported self-test operations
* @self_test_reg: Register address to request self test command
* @self_test_no_autoclear: True if device's self-test needs clear of ctrl reg
- * @status_error_msgs: Array of error messgaes
+ * @status_error_msgs: Array of error messages
* @status_error_mask: Bitmask of errors supported by the device
* @timeouts: Chip specific delays
* @enable_irq: Hook for ADIS devices that have a special IRQ enable/disable
+ * @unmasked_drdy: True for devices that cannot mask/unmask the data ready pin
* @has_paging: True if ADIS device has paged registers
+ * @has_fifo: True if ADIS device has a hardware FIFO
* @burst_reg_cmd: Register command that triggers burst
* @burst_len: Burst size in the SPI RX buffer. If @burst_max_len is defined,
* this should be the minimum size supported by the device.
* @burst_max_len: Holds the maximum burst size when the device supports
* more than one burst mode with different sizes
+ * @burst_max_speed_hz: Maximum spi speed that can be used in burst mode
*/
struct adis_data {
unsigned int read_delay;
@@ -64,6 +73,7 @@ struct adis_data {
unsigned int glob_cmd_reg;
unsigned int msc_ctrl_reg;
unsigned int diag_stat_reg;
+ unsigned int diag_stat_size;
unsigned int prod_id_reg;
unsigned int prod_id;
@@ -77,12 +87,30 @@ struct adis_data {
unsigned int status_error_mask;
int (*enable_irq)(struct adis *adis, bool enable);
+ bool unmasked_drdy;
bool has_paging;
+ bool has_fifo;
unsigned int burst_reg_cmd;
unsigned int burst_len;
unsigned int burst_max_len;
+ unsigned int burst_max_speed_hz;
+};
+
+/**
+ * struct adis_ops: Custom ops for adis devices.
+ * @write: Custom spi write implementation.
+ * @read: Custom spi read implementation.
+ * @reset: Custom sw reset implementation. The custom implementation does not
+ * need to sleep after the reset. It's done by the library already.
+ */
+struct adis_ops {
+ int (*write)(struct adis *adis, unsigned int reg, unsigned int value,
+ unsigned int size);
+ int (*read)(struct adis *adis, unsigned int reg, unsigned int *value,
+ unsigned int size);
+ int (*reset)(struct adis *adis);
};
/**
@@ -90,9 +118,9 @@ struct adis_data {
* @spi: Reference to SPI device which owns this ADIS IIO device
* @trig: IIO trigger object data
* @data: ADIS chip variant specific data
- * @burst: ADIS burst transfer information
* @burst_extra_len: Burst extra length. Should only be used by devices that can
* dynamically change their burst mode length.
+ * @ops: ops struct for custom read and write functions
* @state_lock: Lock used by the device to protect state
* @msg: SPI message object
* @xfer: SPI transfer objects to be used for a @msg
@@ -108,7 +136,8 @@ struct adis {
const struct adis_data *data;
unsigned int burst_extra_len;
- /**
+ const struct adis_ops *ops;
+ /*
* The state_lock is meant to be used during operations that require
* a sequence of SPI R/W in order to protect the SPI transfer
* information (fields 'xfer', 'msg' & 'current_page') between
@@ -126,70 +155,71 @@ struct adis {
unsigned long irq_flag;
void *buffer;
- uint8_t tx[10] ____cacheline_aligned;
- uint8_t rx[4];
+ u8 tx[10] __aligned(IIO_DMA_MINALIGN);
+ u8 rx[4];
};
int adis_init(struct adis *adis, struct iio_dev *indio_dev,
- struct spi_device *spi, const struct adis_data *data);
+ struct spi_device *spi, const struct adis_data *data);
int __adis_reset(struct adis *adis);
/**
* adis_reset() - Reset the device
* @adis: The adis device
*
- * Returns 0 on success, a negative error code otherwise
+ * Returns: %0 on success, a negative error code otherwise
*/
static inline int adis_reset(struct adis *adis)
{
- int ret;
-
- mutex_lock(&adis->state_lock);
- ret = __adis_reset(adis);
- mutex_unlock(&adis->state_lock);
-
- return ret;
+ guard(mutex)(&adis->state_lock);
+ return __adis_reset(adis);
}
int __adis_write_reg(struct adis *adis, unsigned int reg,
- unsigned int val, unsigned int size);
+ unsigned int val, unsigned int size);
int __adis_read_reg(struct adis *adis, unsigned int reg,
- unsigned int *val, unsigned int size);
+ unsigned int *val, unsigned int size);
/**
* __adis_write_reg_8() - Write single byte to a register (unlocked)
* @adis: The adis device
* @reg: The address of the register to be written
- * @value: The value to write
+ * @val: The value to write
+ *
+ * Returns: %0 on success, a negative error code otherwise
*/
static inline int __adis_write_reg_8(struct adis *adis, unsigned int reg,
- uint8_t val)
+ u8 val)
{
- return __adis_write_reg(adis, reg, val, 1);
+ return adis->ops->write(adis, reg, val, 1);
}
/**
* __adis_write_reg_16() - Write 2 bytes to a pair of registers (unlocked)
* @adis: The adis device
* @reg: The address of the lower of the two registers
- * @value: Value to be written
+ * @val: Value to be written
+ *
+ * Returns: %0 on success, a negative error code otherwise
*/
static inline int __adis_write_reg_16(struct adis *adis, unsigned int reg,
- uint16_t val)
+ u16 val)
{
- return __adis_write_reg(adis, reg, val, 2);
+ return adis->ops->write(adis, reg, val, 2);
}
/**
* __adis_write_reg_32() - write 4 bytes to four registers (unlocked)
* @adis: The adis device
* @reg: The address of the lower of the four register
- * @value: Value to be written
+ * @val: Value to be written
+ *
+ * Returns: %0 on success, a negative error code otherwise
*/
static inline int __adis_write_reg_32(struct adis *adis, unsigned int reg,
- uint32_t val)
+ u32 val)
{
- return __adis_write_reg(adis, reg, val, 4);
+ return adis->ops->write(adis, reg, val, 4);
}
/**
@@ -197,14 +227,16 @@ static inline int __adis_write_reg_32(struct adis *adis, unsigned int reg,
* @adis: The adis device
* @reg: The address of the lower of the two registers
* @val: The value read back from the device
+ *
+ * Returns: %0 on success, a negative error code otherwise
*/
static inline int __adis_read_reg_16(struct adis *adis, unsigned int reg,
- uint16_t *val)
+ u16 *val)
{
unsigned int tmp;
int ret;
- ret = __adis_read_reg(adis, reg, &tmp, 2);
+ ret = adis->ops->read(adis, reg, &tmp, 2);
if (ret == 0)
*val = tmp;
@@ -216,14 +248,16 @@ static inline int __adis_read_reg_16(struct adis *adis, unsigned int reg,
* @adis: The adis device
* @reg: The address of the lower of the two registers
* @val: The value read back from the device
+ *
+ * Returns: %0 on success, a negative error code otherwise
*/
static inline int __adis_read_reg_32(struct adis *adis, unsigned int reg,
- uint32_t *val)
+ u32 *val)
{
unsigned int tmp;
int ret;
- ret = __adis_read_reg(adis, reg, &tmp, 4);
+ ret = adis->ops->read(adis, reg, &tmp, 4);
if (ret == 0)
*val = tmp;
@@ -234,19 +268,16 @@ static inline int __adis_read_reg_32(struct adis *adis, unsigned int reg,
* adis_write_reg() - write N bytes to register
* @adis: The adis device
* @reg: The address of the lower of the two registers
- * @value: The value to write to device (up to 4 bytes)
+ * @val: The value to write to device (up to 4 bytes)
* @size: The size of the @value (in bytes)
+ *
+ * Returns: %0 on success, a negative error code otherwise
*/
static inline int adis_write_reg(struct adis *adis, unsigned int reg,
- unsigned int val, unsigned int size)
+ unsigned int val, unsigned int size)
{
- int ret;
-
- mutex_lock(&adis->state_lock);
- ret = __adis_write_reg(adis, reg, val, size);
- mutex_unlock(&adis->state_lock);
-
- return ret;
+ guard(mutex)(&adis->state_lock);
+ return adis->ops->write(adis, reg, val, size);
}
/**
@@ -255,27 +286,26 @@ static inline int adis_write_reg(struct adis *adis, unsigned int reg,
* @reg: The address of the lower of the two registers
* @val: The value read back from the device
* @size: The size of the @val buffer
+ *
+ * Returns: %0 on success, a negative error code otherwise
*/
static int adis_read_reg(struct adis *adis, unsigned int reg,
- unsigned int *val, unsigned int size)
+ unsigned int *val, unsigned int size)
{
- int ret;
-
- mutex_lock(&adis->state_lock);
- ret = __adis_read_reg(adis, reg, val, size);
- mutex_unlock(&adis->state_lock);
-
- return ret;
+ guard(mutex)(&adis->state_lock);
+ return adis->ops->read(adis, reg, val, size);
}
/**
* adis_write_reg_8() - Write single byte to a register
* @adis: The adis device
* @reg: The address of the register to be written
- * @value: The value to write
+ * @val: The value to write
+ *
+ * Returns: %0 on success, a negative error code otherwise
*/
static inline int adis_write_reg_8(struct adis *adis, unsigned int reg,
- uint8_t val)
+ u8 val)
{
return adis_write_reg(adis, reg, val, 1);
}
@@ -284,10 +314,12 @@ static inline int adis_write_reg_8(struct adis *adis, unsigned int reg,
* adis_write_reg_16() - Write 2 bytes to a pair of registers
* @adis: The adis device
* @reg: The address of the lower of the two registers
- * @value: Value to be written
+ * @val: Value to be written
+ *
+ * Returns: %0 on success, a negative error code otherwise
*/
static inline int adis_write_reg_16(struct adis *adis, unsigned int reg,
- uint16_t val)
+ u16 val)
{
return adis_write_reg(adis, reg, val, 2);
}
@@ -296,10 +328,12 @@ static inline int adis_write_reg_16(struct adis *adis, unsigned int reg,
* adis_write_reg_32() - write 4 bytes to four registers
* @adis: The adis device
* @reg: The address of the lower of the four register
- * @value: Value to be written
+ * @val: Value to be written
+ *
+ * Returns: %0 on success, a negative error code otherwise
*/
static inline int adis_write_reg_32(struct adis *adis, unsigned int reg,
- uint32_t val)
+ u32 val)
{
return adis_write_reg(adis, reg, val, 4);
}
@@ -309,9 +343,11 @@ static inline int adis_write_reg_32(struct adis *adis, unsigned int reg,
* @adis: The adis device
* @reg: The address of the lower of the two registers
* @val: The value read back from the device
+ *
+ * Returns: %0 on success, a negative error code otherwise
*/
static inline int adis_read_reg_16(struct adis *adis, unsigned int reg,
- uint16_t *val)
+ u16 *val)
{
unsigned int tmp;
int ret;
@@ -328,9 +364,11 @@ static inline int adis_read_reg_16(struct adis *adis, unsigned int reg,
* @adis: The adis device
* @reg: The address of the lower of the two registers
* @val: The value read back from the device
+ *
+ * Returns: %0 on success, a negative error code otherwise
*/
static inline int adis_read_reg_32(struct adis *adis, unsigned int reg,
- uint32_t *val)
+ u32 *val)
{
unsigned int tmp;
int ret;
@@ -353,16 +391,14 @@ int __adis_update_bits_base(struct adis *adis, unsigned int reg, const u32 mask,
* @size: Size of the register to update
*
* Updates the desired bits of @reg in accordance with @mask and @val.
+ *
+ * Returns: %0 on success, a negative error code otherwise
*/
static inline int adis_update_bits_base(struct adis *adis, unsigned int reg,
const u32 mask, const u32 val, u8 size)
{
- int ret;
-
- mutex_lock(&adis->state_lock);
- ret = __adis_update_bits_base(adis, reg, mask, val, size);
- mutex_unlock(&adis->state_lock);
- return ret;
+ guard(mutex)(&adis->state_lock);
+ return __adis_update_bits_base(adis, reg, mask, val, size);
}
/**
@@ -377,10 +413,8 @@ static inline int adis_update_bits_base(struct adis *adis, unsigned int reg,
* @val can lead to undesired behavior if the register to update is 16bit.
*/
#define adis_update_bits(adis, reg, mask, val) ({ \
- BUILD_BUG_ON(sizeof(val) == 1 || sizeof(val) == 8); \
- __builtin_choose_expr(sizeof(val) == 4, \
- adis_update_bits_base(adis, reg, mask, val, 4), \
- adis_update_bits_base(adis, reg, mask, val, 2)); \
+ BUILD_BUG_ON(sizeof(val) != 2 && sizeof(val) != 4); \
+ adis_update_bits_base(adis, reg, mask, val, sizeof(val)); \
})
/**
@@ -395,52 +429,33 @@ static inline int adis_update_bits_base(struct adis *adis, unsigned int reg,
* @val can lead to undesired behavior if the register to update is 16bit.
*/
#define __adis_update_bits(adis, reg, mask, val) ({ \
- BUILD_BUG_ON(sizeof(val) == 1 || sizeof(val) == 8); \
- __builtin_choose_expr(sizeof(val) == 4, \
- __adis_update_bits_base(adis, reg, mask, val, 4), \
- __adis_update_bits_base(adis, reg, mask, val, 2)); \
+ BUILD_BUG_ON(sizeof(val) != 2 && sizeof(val) != 4); \
+ __adis_update_bits_base(adis, reg, mask, val, sizeof(val)); \
})
-int adis_enable_irq(struct adis *adis, bool enable);
int __adis_check_status(struct adis *adis);
int __adis_initial_startup(struct adis *adis);
+int __adis_enable_irq(struct adis *adis, bool enable);
-static inline int adis_check_status(struct adis *adis)
-{
- int ret;
-
- mutex_lock(&adis->state_lock);
- ret = __adis_check_status(adis);
- mutex_unlock(&adis->state_lock);
-
- return ret;
-}
-
-/* locked version of __adis_initial_startup() */
-static inline int adis_initial_startup(struct adis *adis)
+static inline int adis_enable_irq(struct adis *adis, bool enable)
{
- int ret;
-
- mutex_lock(&adis->state_lock);
- ret = __adis_initial_startup(adis);
- mutex_unlock(&adis->state_lock);
-
- return ret;
+ guard(mutex)(&adis->state_lock);
+ return __adis_enable_irq(adis, enable);
}
-static inline void adis_dev_lock(struct adis *adis)
+static inline int adis_check_status(struct adis *adis)
{
- mutex_lock(&adis->state_lock);
+ guard(mutex)(&adis->state_lock);
+ return __adis_check_status(adis);
}
-static inline void adis_dev_unlock(struct adis *adis)
-{
- mutex_unlock(&adis->state_lock);
-}
+#define adis_dev_auto_lock(adis) guard(mutex)(&(adis)->state_lock)
+#define adis_dev_auto_scoped_lock(adis) \
+ scoped_guard(mutex, &(adis)->state_lock)
int adis_single_conversion(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan, unsigned int error_mask,
- int *val);
+ const struct iio_chan_spec *chan,
+ unsigned int error_mask, int *val);
#define ADIS_VOLTAGE_CHAN(addr, si, chan, name, info_all, bits) { \
.type = IIO_VOLTAGE, \
@@ -489,7 +504,7 @@ int adis_single_conversion(struct iio_dev *indio_dev,
.modified = 1, \
.channel2 = IIO_MOD_ ## mod, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
- info_sep, \
+ (info_sep), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
.info_mask_shared_by_all = info_all, \
.address = (addr), \
@@ -514,22 +529,33 @@ int adis_single_conversion(struct iio_dev *indio_dev,
#define ADIS_ROT_CHAN(mod, addr, si, info_sep, info_all, bits) \
ADIS_MOD_CHAN(IIO_ROT, mod, addr, si, info_sep, info_all, bits)
+#define devm_adis_setup_buffer_and_trigger(adis, indio_dev, trigger_handler) \
+ devm_adis_setup_buffer_and_trigger_with_attrs((adis), (indio_dev), \
+ (trigger_handler), NULL, \
+ NULL)
+
#ifdef CONFIG_IIO_ADIS_LIB_BUFFER
int
-devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev,
- irq_handler_t trigger_handler);
+devm_adis_setup_buffer_and_trigger_with_attrs(struct adis *adis,
+ struct iio_dev *indio_dev,
+ irq_handler_t trigger_handler,
+ const struct iio_buffer_setup_ops *ops,
+ const struct iio_dev_attr **buffer_attrs);
int devm_adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev);
int adis_update_scan_mode(struct iio_dev *indio_dev,
- const unsigned long *scan_mask);
+ const unsigned long *scan_mask);
#else /* CONFIG_IIO_BUFFER */
static inline int
-devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev,
- irq_handler_t trigger_handler)
+devm_adis_setup_buffer_and_trigger_with_attrs(struct adis *adis,
+ struct iio_dev *indio_dev,
+ irq_handler_t trigger_handler,
+ const struct iio_buffer_setup_ops *ops,
+ const struct iio_dev_attr **buffer_attrs)
{
return 0;
}
@@ -547,7 +573,8 @@ static inline int devm_adis_probe_trigger(struct adis *adis,
#ifdef CONFIG_DEBUG_FS
int adis_debugfs_reg_access(struct iio_dev *indio_dev,
- unsigned int reg, unsigned int writeval, unsigned int *readval);
+ unsigned int reg, unsigned int writeval,
+ unsigned int *readval);
#else
diff --git a/include/linux/iio/kfifo_buf.h b/include/linux/iio/kfifo_buf.h
index ccd2ceae7b25..22874da0c8be 100644
--- a/include/linux/iio/kfifo_buf.h
+++ b/include/linux/iio/kfifo_buf.h
@@ -5,6 +5,7 @@
struct iio_buffer;
struct iio_buffer_setup_ops;
struct iio_dev;
+struct iio_dev_attr;
struct device;
struct iio_buffer *iio_kfifo_allocate(void);
@@ -12,11 +13,10 @@ void iio_kfifo_free(struct iio_buffer *r);
int devm_iio_kfifo_buffer_setup_ext(struct device *dev,
struct iio_dev *indio_dev,
- int mode_flags,
const struct iio_buffer_setup_ops *setup_ops,
- const struct attribute **buffer_attrs);
+ const struct iio_dev_attr **buffer_attrs);
-#define devm_iio_kfifo_buffer_setup(dev, indio_dev, mode_flags, setup_ops) \
- devm_iio_kfifo_buffer_setup_ext((dev), (indio_dev), (mode_flags), (setup_ops), NULL)
+#define devm_iio_kfifo_buffer_setup(dev, indio_dev, setup_ops) \
+ devm_iio_kfifo_buffer_setup_ext((dev), (indio_dev), (setup_ops), NULL)
#endif
diff --git a/include/linux/iio/sw_device.h b/include/linux/iio/sw_device.h
index eff1e6b2595c..0f7fe7b522e3 100644
--- a/include/linux/iio/sw_device.h
+++ b/include/linux/iio/sw_device.h
@@ -51,9 +51,6 @@ void iio_unregister_sw_device_type(struct iio_sw_device_type *dt);
struct iio_sw_device *iio_sw_device_create(const char *, const char *);
void iio_sw_device_destroy(struct iio_sw_device *);
-int iio_sw_device_type_configfs_register(struct iio_sw_device_type *dt);
-void iio_sw_device_type_configfs_unregister(struct iio_sw_device_type *dt);
-
static inline
void iio_swd_group_init_type_name(struct iio_sw_device *d,
const char *name,
diff --git a/include/linux/iio/sw_trigger.h b/include/linux/iio/sw_trigger.h
index 47de2443e984..bc77f88df303 100644
--- a/include/linux/iio/sw_trigger.h
+++ b/include/linux/iio/sw_trigger.h
@@ -51,9 +51,6 @@ void iio_unregister_sw_trigger_type(struct iio_sw_trigger_type *tt);
struct iio_sw_trigger *iio_sw_trigger_create(const char *, const char *);
void iio_sw_trigger_destroy(struct iio_sw_trigger *);
-int iio_sw_trigger_type_configfs_register(struct iio_sw_trigger_type *tt);
-void iio_sw_trigger_type_configfs_unregister(struct iio_sw_trigger_type *tt);
-
static inline
void iio_swt_group_init_type_name(struct iio_sw_trigger *t,
const char *name,
diff --git a/include/linux/iio/sysfs.h b/include/linux/iio/sysfs.h
index e51fba66de4b..de5bb125815c 100644
--- a/include/linux/iio/sysfs.h
+++ b/include/linux/iio/sysfs.h
@@ -97,6 +97,17 @@ struct iio_const_attr {
= { .string = _string, \
.dev_attr = __ATTR(_name, S_IRUGO, iio_read_const_attr, NULL)}
+#define IIO_STATIC_CONST_DEVICE_ATTR(_name, _string) \
+ static ssize_t iio_const_dev_attr_show_##_name( \
+ struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+ { \
+ return sysfs_emit(buf, "%s\n", _string); \
+ } \
+ static IIO_DEVICE_ATTR(_name, 0444, \
+ iio_const_dev_attr_show_##_name, NULL, 0)
+
/* Generic attributes of onetype or another */
/**
diff --git a/include/linux/iio/timer/stm32-lptim-trigger.h b/include/linux/iio/timer/stm32-lptim-trigger.h
index a34dcf6a6001..ce3cf0addb2e 100644
--- a/include/linux/iio/timer/stm32-lptim-trigger.h
+++ b/include/linux/iio/timer/stm32-lptim-trigger.h
@@ -14,6 +14,15 @@
#define LPTIM1_OUT "lptim1_out"
#define LPTIM2_OUT "lptim2_out"
#define LPTIM3_OUT "lptim3_out"
+#define LPTIM4_OUT "lptim4_out"
+#define LPTIM5_OUT "lptim5_out"
+
+#define LPTIM1_CH1 "lptim1_ch1"
+#define LPTIM1_CH2 "lptim1_ch2"
+#define LPTIM2_CH1 "lptim2_ch1"
+#define LPTIM2_CH2 "lptim2_ch2"
+#define LPTIM3_CH1 "lptim3_ch1"
+#define LPTIM4_CH1 "lptim4_ch1"
#if IS_REACHABLE(CONFIG_IIO_STM32_LPTIMER_TRIGGER)
bool is_stm32_lptim_trigger(struct iio_trigger *trig);
diff --git a/include/linux/iio/timer/stm32-timer-trigger.h b/include/linux/iio/timer/stm32-timer-trigger.h
index 37572e4dc73a..1ee237b56183 100644
--- a/include/linux/iio/timer/stm32-timer-trigger.h
+++ b/include/linux/iio/timer/stm32-timer-trigger.h
@@ -72,6 +72,12 @@
#define TIM17_OC1 "tim17_oc1"
+#define TIM20_OC1 "tim20_oc1"
+#define TIM20_OC2 "tim20_oc2"
+#define TIM20_OC3 "tim20_oc3"
+#define TIM20_TRGO "tim20_trgo"
+#define TIM20_TRGO2 "tim20_trgo2"
+
#if IS_REACHABLE(CONFIG_IIO_STM32_TIMER_TRIGGER)
bool is_stm32_timer_trigger(struct iio_trigger *trig);
#else
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index 096f68dd2e0c..bce3b1788199 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -55,6 +55,7 @@ struct iio_trigger_ops {
* @attached_own_device:[INTERN] if we are using our own device as trigger,
* i.e. if we registered a poll function to the same
* device as the one providing the trigger.
+ * @reenable_work: [INTERN] work item used to ensure reenable can sleep.
**/
struct iio_trigger {
const struct iio_trigger_ops *ops;
@@ -74,6 +75,7 @@ struct iio_trigger {
unsigned long pool[BITS_TO_LONGS(CONFIG_IIO_CONSUMERS_PER_TRIGGER)];
struct mutex pool_lock;
bool attached_own_device;
+ struct work_struct reenable_work;
};
@@ -91,6 +93,11 @@ static inline void iio_trigger_put(struct iio_trigger *trig)
static inline struct iio_trigger *iio_trigger_get(struct iio_trigger *trig)
{
get_device(&trig->dev);
+
+ WARN_ONCE(list_empty(&trig->list),
+ "Getting non-registered iio trigger %s is prohibited\n",
+ trig->name);
+
__module_get(trig->owner);
return trig;
@@ -124,16 +131,10 @@ static inline void *iio_trigger_get_drvdata(struct iio_trigger *trig)
* iio_trigger_register() - register a trigger with the IIO core
* @trig_info: trigger to be registered
**/
-#define iio_trigger_register(trig_info) \
- __iio_trigger_register((trig_info), THIS_MODULE)
-int __iio_trigger_register(struct iio_trigger *trig_info,
- struct module *this_mod);
+int iio_trigger_register(struct iio_trigger *trig_info);
-#define devm_iio_trigger_register(dev, trig_info) \
- __devm_iio_trigger_register((dev), (trig_info), THIS_MODULE)
-int __devm_iio_trigger_register(struct device *dev,
- struct iio_trigger *trig_info,
- struct module *this_mod);
+int devm_iio_trigger_register(struct device *dev,
+ struct iio_trigger *trig_info);
/**
* iio_trigger_unregister() - unregister a trigger from the core
@@ -150,19 +151,18 @@ void iio_trigger_unregister(struct iio_trigger *trig_info);
**/
int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig);
-/**
- * iio_trigger_poll() - called on a trigger occurring
- * @trig: trigger which occurred
- *
- * Typically called in relevant hardware interrupt handler.
- **/
void iio_trigger_poll(struct iio_trigger *trig);
-void iio_trigger_poll_chained(struct iio_trigger *trig);
+void iio_trigger_poll_nested(struct iio_trigger *trig);
irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private);
-__printf(2, 3)
-struct iio_trigger *iio_trigger_alloc(struct device *parent, const char *fmt, ...);
+#define iio_trigger_alloc(parent, fmt, ...) \
+ __iio_trigger_alloc((parent), THIS_MODULE, (fmt), ##__VA_ARGS__)
+
+__printf(3, 4)
+struct iio_trigger *__iio_trigger_alloc(struct device *parent,
+ struct module *this_mod,
+ const char *fmt, ...);
void iio_trigger_free(struct iio_trigger *trig);
/**
@@ -171,6 +171,7 @@ void iio_trigger_free(struct iio_trigger *trig);
*/
bool iio_trigger_using_own(struct iio_dev *indio_dev);
+int iio_validate_own_trigger(struct iio_dev *idev, struct iio_trigger *trig);
int iio_trigger_validate_own_device(struct iio_trigger *trig,
struct iio_dev *indio_dev);
diff --git a/include/linux/iio/triggered_buffer.h b/include/linux/iio/triggered_buffer.h
index 7f154d1f8739..29e1fe146879 100644
--- a/include/linux/iio/triggered_buffer.h
+++ b/include/linux/iio/triggered_buffer.h
@@ -2,30 +2,37 @@
#ifndef _LINUX_IIO_TRIGGERED_BUFFER_H_
#define _LINUX_IIO_TRIGGERED_BUFFER_H_
+#include <linux/iio/buffer.h>
#include <linux/interrupt.h>
-struct attribute;
struct iio_dev;
+struct iio_dev_attr;
struct iio_buffer_setup_ops;
int iio_triggered_buffer_setup_ext(struct iio_dev *indio_dev,
irqreturn_t (*h)(int irq, void *p),
irqreturn_t (*thread)(int irq, void *p),
+ enum iio_buffer_direction direction,
const struct iio_buffer_setup_ops *setup_ops,
- const struct attribute **buffer_attrs);
+ const struct iio_dev_attr **buffer_attrs);
void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev);
#define iio_triggered_buffer_setup(indio_dev, h, thread, setup_ops) \
- iio_triggered_buffer_setup_ext((indio_dev), (h), (thread), (setup_ops), NULL)
+ iio_triggered_buffer_setup_ext((indio_dev), (h), (thread), \
+ IIO_BUFFER_DIRECTION_IN, (setup_ops), \
+ NULL)
int devm_iio_triggered_buffer_setup_ext(struct device *dev,
struct iio_dev *indio_dev,
irqreturn_t (*h)(int irq, void *p),
irqreturn_t (*thread)(int irq, void *p),
+ enum iio_buffer_direction direction,
const struct iio_buffer_setup_ops *ops,
- const struct attribute **buffer_attrs);
+ const struct iio_dev_attr **buffer_attrs);
#define devm_iio_triggered_buffer_setup(dev, indio_dev, h, thread, setup_ops) \
- devm_iio_triggered_buffer_setup_ext((dev), (indio_dev), (h), (thread), (setup_ops), NULL)
+ devm_iio_triggered_buffer_setup_ext((dev), (indio_dev), (h), (thread), \
+ IIO_BUFFER_DIRECTION_IN, \
+ (setup_ops), NULL)
#endif
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h
index 84b3f8175cc6..34eebad12d2c 100644
--- a/include/linux/iio/types.h
+++ b/include/linux/iio/types.h
@@ -17,6 +17,10 @@ enum iio_event_info {
IIO_EV_INFO_HIGH_PASS_FILTER_3DB,
IIO_EV_INFO_LOW_PASS_FILTER_3DB,
IIO_EV_INFO_TIMEOUT,
+ IIO_EV_INFO_RESET_TIMEOUT,
+ IIO_EV_INFO_TAP2_MIN_DELAY,
+ IIO_EV_INFO_RUNNING_PERIOD,
+ IIO_EV_INFO_RUNNING_COUNT,
};
#define IIO_VAL_INT 1
@@ -24,6 +28,7 @@ enum iio_event_info {
#define IIO_VAL_INT_PLUS_NANO 3
#define IIO_VAL_INT_PLUS_MICRO_DB 4
#define IIO_VAL_INT_MULTIPLE 5
+#define IIO_VAL_INT_64 6 /* 64-bit data, val is lower 32 bits */
#define IIO_VAL_FRACTIONAL 10
#define IIO_VAL_FRACTIONAL_LOG2 11
#define IIO_VAL_CHAR 12
@@ -62,6 +67,10 @@ enum iio_chan_info_enum {
IIO_CHAN_INFO_OVERSAMPLING_RATIO,
IIO_CHAN_INFO_THERMOCOUPLE_TYPE,
IIO_CHAN_INFO_CALIBAMBIENT,
+ IIO_CHAN_INFO_ZEROPOINT,
+ IIO_CHAN_INFO_TROUGH,
+ IIO_CHAN_INFO_CONVDELAY,
+ IIO_CHAN_INFO_POWERFACTOR,
};
#endif /* _IIO_TYPES_H_ */
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 61d5723ec303..8e29cb4e6a01 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -11,32 +11,18 @@
#include <linux/fs.h>
#include <linux/security.h>
#include <linux/kexec.h>
+#include <crypto/hash_info.h>
struct linux_binprm;
#ifdef CONFIG_IMA
-extern int ima_bprm_check(struct linux_binprm *bprm);
-extern int ima_file_check(struct file *file, int mask);
-extern void ima_post_create_tmpfile(struct user_namespace *mnt_userns,
- struct inode *inode);
-extern void ima_file_free(struct file *file);
-extern int ima_file_mmap(struct file *file, unsigned long prot);
-extern int ima_file_mprotect(struct vm_area_struct *vma, unsigned long prot);
-extern int ima_load_data(enum kernel_load_data_id id, bool contents);
-extern int ima_post_load_data(char *buf, loff_t size,
- enum kernel_load_data_id id, char *description);
-extern int ima_read_file(struct file *file, enum kernel_read_file_id id,
- bool contents);
-extern int ima_post_read_file(struct file *file, void *buf, loff_t size,
- enum kernel_read_file_id id);
-extern void ima_post_path_mknod(struct user_namespace *mnt_userns,
- struct dentry *dentry);
+extern enum hash_algo ima_get_current_hash_algo(void);
extern int ima_file_hash(struct file *file, char *buf, size_t buf_size);
extern int ima_inode_hash(struct inode *inode, char *buf, size_t buf_size);
extern void ima_kexec_cmdline(int kernel_fd, const void *buf, int size);
-extern void ima_measure_critical_data(const char *event_label,
- const char *event_name,
- const void *buf, size_t buf_len,
- bool hash);
+extern int ima_measure_critical_data(const char *event_label,
+ const char *event_name,
+ const void *buf, size_t buf_len,
+ bool hash, u8 *digest, size_t digest_len);
#ifdef CONFIG_IMA_APPRAISE_BOOTPARAM
extern void ima_appraise_parse_cmdline(void);
@@ -46,103 +32,59 @@ static inline void ima_appraise_parse_cmdline(void) {}
#ifdef CONFIG_IMA_KEXEC
extern void ima_add_kexec_buffer(struct kimage *image);
-#endif
-
-#ifdef CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT
-extern bool arch_ima_get_secureboot(void);
-extern const char * const *arch_get_ima_policy(void);
+extern void ima_kexec_post_load(struct kimage *image);
#else
-static inline bool arch_ima_get_secureboot(void)
-{
- return false;
-}
-
-static inline const char * const *arch_get_ima_policy(void)
-{
- return NULL;
-}
+static inline void ima_kexec_post_load(struct kimage *image) {}
#endif
#else
-static inline int ima_bprm_check(struct linux_binprm *bprm)
-{
- return 0;
-}
-
-static inline int ima_file_check(struct file *file, int mask)
+static inline enum hash_algo ima_get_current_hash_algo(void)
{
- return 0;
+ return HASH_ALGO__LAST;
}
-static inline void ima_post_create_tmpfile(struct user_namespace *mnt_userns,
- struct inode *inode)
-{
-}
-
-static inline void ima_file_free(struct file *file)
-{
- return;
-}
-
-static inline int ima_file_mmap(struct file *file, unsigned long prot)
-{
- return 0;
-}
-
-static inline int ima_file_mprotect(struct vm_area_struct *vma,
- unsigned long prot)
+static inline int ima_file_hash(struct file *file, char *buf, size_t buf_size)
{
- return 0;
+ return -EOPNOTSUPP;
}
-static inline int ima_load_data(enum kernel_load_data_id id, bool contents)
+static inline int ima_inode_hash(struct inode *inode, char *buf, size_t buf_size)
{
- return 0;
+ return -EOPNOTSUPP;
}
-static inline int ima_post_load_data(char *buf, loff_t size,
- enum kernel_load_data_id id,
- char *description)
-{
- return 0;
-}
+static inline void ima_kexec_cmdline(int kernel_fd, const void *buf, int size) {}
-static inline int ima_read_file(struct file *file, enum kernel_read_file_id id,
- bool contents)
+static inline int ima_measure_critical_data(const char *event_label,
+ const char *event_name,
+ const void *buf, size_t buf_len,
+ bool hash, u8 *digest,
+ size_t digest_len)
{
- return 0;
+ return -ENOENT;
}
-static inline int ima_post_read_file(struct file *file, void *buf, loff_t size,
- enum kernel_read_file_id id)
-{
- return 0;
-}
+#endif /* CONFIG_IMA */
-static inline void ima_post_path_mknod(struct user_namespace *mnt_userns,
- struct dentry *dentry)
-{
- return;
-}
+#ifdef CONFIG_HAVE_IMA_KEXEC
+int __init ima_free_kexec_buffer(void);
+int __init ima_get_kexec_buffer(void **addr, size_t *size);
+#endif
-static inline int ima_file_hash(struct file *file, char *buf, size_t buf_size)
+#ifdef CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT
+extern bool arch_ima_get_secureboot(void);
+extern const char * const *arch_get_ima_policy(void);
+#else
+static inline bool arch_ima_get_secureboot(void)
{
- return -EOPNOTSUPP;
+ return false;
}
-static inline int ima_inode_hash(struct inode *inode, char *buf, size_t buf_size)
+static inline const char * const *arch_get_ima_policy(void)
{
- return -EOPNOTSUPP;
+ return NULL;
}
-
-static inline void ima_kexec_cmdline(int kernel_fd, const void *buf, int size) {}
-
-static inline void ima_measure_critical_data(const char *event_label,
- const char *event_name,
- const void *buf, size_t buf_len,
- bool hash) {}
-
-#endif /* CONFIG_IMA */
+#endif
#ifndef CONFIG_IMA_KEXEC
struct kimage;
@@ -151,52 +93,13 @@ static inline void ima_add_kexec_buffer(struct kimage *image)
{}
#endif
-#ifdef CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS
-extern void ima_post_key_create_or_update(struct key *keyring,
- struct key *key,
- const void *payload, size_t plen,
- unsigned long flags, bool create);
-#else
-static inline void ima_post_key_create_or_update(struct key *keyring,
- struct key *key,
- const void *payload,
- size_t plen,
- unsigned long flags,
- bool create) {}
-#endif /* CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS */
-
#ifdef CONFIG_IMA_APPRAISE
extern bool is_ima_appraise_enabled(void);
-extern void ima_inode_post_setattr(struct user_namespace *mnt_userns,
- struct dentry *dentry);
-extern int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
- const void *xattr_value, size_t xattr_value_len);
-extern int ima_inode_removexattr(struct dentry *dentry, const char *xattr_name);
#else
static inline bool is_ima_appraise_enabled(void)
{
return 0;
}
-
-static inline void ima_inode_post_setattr(struct user_namespace *mnt_userns,
- struct dentry *dentry)
-{
- return;
-}
-
-static inline int ima_inode_setxattr(struct dentry *dentry,
- const char *xattr_name,
- const void *xattr_value,
- size_t xattr_value_len)
-{
- return 0;
-}
-
-static inline int ima_inode_removexattr(struct dentry *dentry,
- const char *xattr_name)
-{
- return 0;
-}
#endif /* CONFIG_IMA_APPRAISE */
#if defined(CONFIG_IMA_APPRAISE) && defined(CONFIG_INTEGRITY_TRUSTED_KEYRING)
diff --git a/include/linux/in6.h b/include/linux/in6.h
index 0777a21cbf86..403f926d33d8 100644
--- a/include/linux/in6.h
+++ b/include/linux/in6.h
@@ -18,6 +18,13 @@
#include <uapi/linux/in6.h>
+/* Large enough to hold both sockaddr_in and sockaddr_in6. */
+struct sockaddr_inet {
+ unsigned short sa_family;
+ char sa_data[sizeof(struct sockaddr_in6) -
+ sizeof(unsigned short)];
+};
+
/* IPv6 Wildcard Address (::) and Loopback Address (::1) defined in RFC2553
* NOTE: Be aware the IN6ADDR_* constants and in6addr_* externals are defined
* in network byte order, not in host byte order as are the IPv4 equivalents
diff --git a/include/linux/indirect_call_wrapper.h b/include/linux/indirect_call_wrapper.h
index c1c76a70a6ce..35227d47cfc9 100644
--- a/include/linux/indirect_call_wrapper.h
+++ b/include/linux/indirect_call_wrapper.h
@@ -2,7 +2,7 @@
#ifndef _LINUX_INDIRECT_CALL_WRAPPER_H
#define _LINUX_INDIRECT_CALL_WRAPPER_H
-#ifdef CONFIG_RETPOLINE
+#ifdef CONFIG_MITIGATION_RETPOLINE
/*
* INDIRECT_CALL_$NR - wrapper for indirect calls with $NR known builtin
@@ -11,7 +11,7 @@
* @__VA_ARGS__: arguments for @f
*
* Avoid retpoline overhead for known builtin, checking @f vs each of them and
- * eventually invoking directly the builtin function. The functions are check
+ * eventually invoking directly the builtin function. The functions are checked
* in the given order. Fallback to the indirect call.
*/
#define INDIRECT_CALL_1(f, f1, ...) \
diff --git a/include/linux/inet.h b/include/linux/inet.h
index bd8276e96e60..9158772f3559 100644
--- a/include/linux/inet.h
+++ b/include/linux/inet.h
@@ -55,6 +55,6 @@ extern int in6_pton(const char *src, int srclen, u8 *dst, int delim, const char
extern int inet_pton_with_scope(struct net *net, unsigned short af,
const char *src, const char *port, struct sockaddr_storage *addr);
-extern bool inet_addr_is_any(struct sockaddr *addr);
+bool inet_addr_is_any(struct sockaddr_storage *addr);
#endif /* _LINUX_INET_H */
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index 84abb30a3fbb..704fd415c2b4 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -8,6 +8,7 @@
struct inet_hashinfo;
struct inet_diag_handler {
+ struct module *owner;
void (*dump)(struct sk_buff *skb,
struct netlink_callback *cb,
const struct inet_diag_req_v2 *r);
@@ -23,9 +24,6 @@ struct inet_diag_handler {
bool net_admin,
struct sk_buff *skb);
- size_t (*idiag_get_aux_size)(struct sock *sk,
- bool net_admin);
-
int (*destroy)(struct sk_buff *in_skb,
const struct inet_diag_req_v2 *req);
@@ -40,6 +38,11 @@ struct inet_diag_dump_data {
#define inet_diag_nla_bpf_stgs req_nlas[INET_DIAG_REQ_SK_BPF_STORAGES]
struct bpf_sk_storage_diag *bpf_stg_diag;
+ bool mark_needed; /* INET_DIAG_BC_MARK_COND present. */
+#ifdef CONFIG_SOCK_CGROUP_DATA
+ bool cgroup_needed; /* INET_DIAG_BC_CGROUP_COND present. */
+#endif
+ bool userlocks_needed; /* INET_DIAG_BC_AUTO present. */
};
struct inet_connection_sock;
@@ -47,18 +50,8 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
struct sk_buff *skb, struct netlink_callback *cb,
const struct inet_diag_req_v2 *req,
u16 nlmsg_flags, bool net_admin);
-void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
- struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r);
-int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
- struct netlink_callback *cb,
- const struct inet_diag_req_v2 *req);
-
-struct sock *inet_diag_find_one_icsk(struct net *net,
- struct inet_hashinfo *hashinfo,
- const struct inet_diag_req_v2 *req);
-int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
+int inet_diag_bc_sk(const struct inet_diag_dump_data *cb_data, struct sock *sk);
void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk);
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 53aa0343bf69..5730ba6b1cfa 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -24,6 +24,8 @@ struct ipv4_devconf {
struct in_device {
struct net_device *dev;
+ netdevice_tracker dev_tracker;
+
refcount_t refcnt;
int dead;
struct in_ifaddr __rcu *ifa_list;/* IP ifaddr chain */
@@ -41,7 +43,7 @@ struct in_device {
unsigned long mr_qri; /* Query Response Interval */
unsigned char mr_qrv; /* Query Robustness Variable */
unsigned char mr_gq_running;
- unsigned char mr_ifc_count;
+ u32 mr_ifc_count;
struct timer_list mr_gq_timer; /* general query timer */
struct timer_list mr_ifc_timer; /* interface change timer */
@@ -51,13 +53,15 @@ struct in_device {
};
#define IPV4_DEVCONF(cnf, attr) ((cnf).data[IPV4_DEVCONF_ ## attr - 1])
+#define IPV4_DEVCONF_RO(cnf, attr) READ_ONCE(IPV4_DEVCONF(cnf, attr))
#define IPV4_DEVCONF_ALL(net, attr) \
IPV4_DEVCONF((*(net)->ipv4.devconf_all), attr)
+#define IPV4_DEVCONF_ALL_RO(net, attr) READ_ONCE(IPV4_DEVCONF_ALL(net, attr))
-static inline int ipv4_devconf_get(struct in_device *in_dev, int index)
+static inline int ipv4_devconf_get(const struct in_device *in_dev, int index)
{
index--;
- return in_dev->cnf.data[index];
+ return READ_ONCE(in_dev->cnf.data[index]);
}
static inline void ipv4_devconf_set(struct in_device *in_dev, int index,
@@ -65,7 +69,7 @@ static inline void ipv4_devconf_set(struct in_device *in_dev, int index,
{
index--;
set_bit(index, in_dev->cnf.state);
- in_dev->cnf.data[index] = val;
+ WRITE_ONCE(in_dev->cnf.data[index], val);
}
static inline void ipv4_devconf_setall(struct in_device *in_dev)
@@ -79,18 +83,18 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
ipv4_devconf_set((in_dev), IPV4_DEVCONF_ ## attr, (val))
#define IN_DEV_ANDCONF(in_dev, attr) \
- (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr) && \
+ (IPV4_DEVCONF_ALL_RO(dev_net(in_dev->dev), attr) && \
IN_DEV_CONF_GET((in_dev), attr))
#define IN_DEV_NET_ORCONF(in_dev, net, attr) \
- (IPV4_DEVCONF_ALL(net, attr) || \
+ (IPV4_DEVCONF_ALL_RO(net, attr) || \
IN_DEV_CONF_GET((in_dev), attr))
#define IN_DEV_ORCONF(in_dev, attr) \
IN_DEV_NET_ORCONF(in_dev, dev_net(in_dev->dev), attr)
#define IN_DEV_MAXCONF(in_dev, attr) \
- (max(IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr), \
+ (max(IPV4_DEVCONF_ALL_RO(dev_net(in_dev->dev), attr), \
IN_DEV_CONF_GET((in_dev), attr)))
#define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING)
@@ -129,13 +133,15 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
IN_DEV_ORCONF((in_dev), IGNORE_ROUTES_WITH_LINKDOWN)
#define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER)
-#define IN_DEV_ARP_ACCEPT(in_dev) IN_DEV_ORCONF((in_dev), ARP_ACCEPT)
+#define IN_DEV_ARP_ACCEPT(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ACCEPT)
#define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE)
#define IN_DEV_ARP_IGNORE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_IGNORE)
#define IN_DEV_ARP_NOTIFY(in_dev) IN_DEV_MAXCONF((in_dev), ARP_NOTIFY)
+#define IN_DEV_ARP_EVICT_NOCARRIER(in_dev) IN_DEV_ANDCONF((in_dev), \
+ ARP_EVICT_NOCARRIER)
struct in_ifaddr {
- struct hlist_node hash;
+ struct hlist_node addr_lst;
struct in_ifaddr __rcu *ifa_next;
struct in_device *ifa_dev;
struct rcu_head rcu_head;
@@ -146,6 +152,7 @@ struct in_ifaddr {
__be32 ifa_broadcast;
unsigned char ifa_scope;
unsigned char ifa_prefixlen;
+ unsigned char ifa_proto;
__u32 ifa_flags;
char ifa_label[IFNAMSIZ];
@@ -178,6 +185,15 @@ static inline struct net_device *ip_dev_find(struct net *net, __be32 addr)
int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b);
int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *);
+#ifdef CONFIG_INET
+int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size);
+#else
+static inline int inet_gifconf(struct net_device *dev, char __user *buf,
+ int len, int size)
+{
+ return 0;
+}
+#endif
void devinet_init(void);
struct in_device *inetdev_by_index(struct net *, int);
__be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope);
@@ -210,6 +226,10 @@ static __inline__ bool bad_mask(__be32 mask, __be32 addr)
for (ifa = rtnl_dereference((in_dev)->ifa_list); ifa; \
ifa = rtnl_dereference(ifa->ifa_next))
+#define in_dev_for_each_ifa_rtnl_net(net, ifa, in_dev) \
+ for (ifa = rtnl_net_dereference(net, (in_dev)->ifa_list); ifa; \
+ ifa = rtnl_net_dereference(net, ifa->ifa_next))
+
#define in_dev_for_each_ifa_rcu(ifa, in_dev) \
for (ifa = rcu_dereference((in_dev)->ifa_list); ifa; \
ifa = rcu_dereference(ifa->ifa_next))
@@ -236,6 +256,11 @@ static inline struct in_device *__in_dev_get_rtnl(const struct net_device *dev)
return rtnl_dereference(dev->ip_ptr);
}
+static inline struct in_device *__in_dev_get_rtnl_net(const struct net_device *dev)
+{
+ return rtnl_net_dereference(dev_net(dev), dev->ip_ptr);
+}
+
/* called with rcu_read_lock or rtnl held */
static inline bool ip_ignore_linkdown(const struct net_device *dev)
{
diff --git a/include/linux/init.h b/include/linux/init.h
index 045ad1650ed1..40331923b9f4 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -2,16 +2,11 @@
#ifndef _LINUX_INIT_H
#define _LINUX_INIT_H
+#include <linux/build_bug.h>
#include <linux/compiler.h>
+#include <linux/stringify.h>
#include <linux/types.h>
-/* Built-in __init functions needn't be compiled with retpoline */
-#if defined(__noretpoline) && !defined(MODULE)
-#define __noinitretpoline __noretpoline
-#else
-#define __noinitretpoline
-#endif
-
/* These macros are used to mark some functions or
* initialized data (doesn't apply to uninitialized data)
* as `initialization' functions. The kernel can take this
@@ -47,7 +42,8 @@
/* These are for everybody (although not all archs will actually
discard it in modules) */
-#define __init __section(".init.text") __cold __latent_entropy __noinitretpoline __nocfi
+#define __init __section(".init.text") __cold __latent_entropy \
+ __no_kstack_erase
#define __initdata __section(".init.data")
#define __initconst __section(".init.rodata")
#define __exitdata __section(".exit.data")
@@ -82,14 +78,15 @@
#define __exit __section(".exit.text") __exitused __cold notrace
-/* Used for MEMORY_HOTPLUG */
-#define __meminit __section(".meminit.text") __cold notrace \
- __latent_entropy
-#define __meminitdata __section(".meminit.data")
-#define __meminitconst __section(".meminit.rodata")
-#define __memexit __section(".memexit.text") __exitused __cold notrace
-#define __memexitdata __section(".memexit.data")
-#define __memexitconst __section(".memexit.rodata")
+#ifdef CONFIG_MEMORY_HOTPLUG
+#define __meminit
+#define __meminitdata
+#define __meminitconst
+#else
+#define __meminit __init
+#define __meminitdata __initdata
+#define __meminitconst __initconst
+#endif
/* For assembly routines */
#define __HEAD .section ".head.text","ax"
@@ -100,10 +97,6 @@
#define __INITRODATA .section ".init.rodata","a",%progbits
#define __FINITDATA .previous
-#define __MEMINIT .section ".meminit.text", "ax"
-#define __MEMINITDATA .section ".meminit.data", "aw"
-#define __MEMINITRODATA .section ".meminit.rodata", "a"
-
/* silence warnings when references are OK */
#define __REF .section ".ref.text", "ax"
#define __REFDATA .section ".ref.data", "aw"
@@ -134,7 +127,7 @@ static inline initcall_t initcall_from_entry(initcall_entry_t *entry)
extern initcall_entry_t __con_initcall_start[], __con_initcall_end[];
-/* Used for contructor calls. */
+/* Used for constructor calls. */
typedef void (*ctor_fn_t)(void);
struct file_system_type;
@@ -143,25 +136,46 @@ struct file_system_type;
extern int do_one_initcall(initcall_t fn);
extern char __initdata boot_command_line[];
extern char *saved_command_line;
+extern unsigned int saved_command_line_len;
extern unsigned int reset_devices;
/* used by init/main.c */
void setup_arch(char **);
void prepare_namespace(void);
void __init init_rootfs(void);
+
+void init_IRQ(void);
+void time_init(void);
+void poking_init(void);
+void pgtable_cache_init(void);
+
+extern initcall_entry_t __initcall_start[];
+extern initcall_entry_t __initcall0_start[];
+extern initcall_entry_t __initcall1_start[];
+extern initcall_entry_t __initcall2_start[];
+extern initcall_entry_t __initcall3_start[];
+extern initcall_entry_t __initcall4_start[];
+extern initcall_entry_t __initcall5_start[];
+extern initcall_entry_t __initcall6_start[];
+extern initcall_entry_t __initcall7_start[];
+extern initcall_entry_t __initcall_end[];
+
extern struct file_system_type rootfs_fs_type;
-#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_STRICT_MODULE_RWX)
extern bool rodata_enabled;
-#endif
-#ifdef CONFIG_STRICT_KERNEL_RWX
void mark_rodata_ro(void);
-#endif
extern void (*late_time_init)(void);
extern bool initcall_debug;
+#ifdef MODULE
+extern struct module __this_module;
+#define THIS_MODULE (&__this_module)
+#else
+#define THIS_MODULE ((struct module *)0)
+#endif
+
#endif
#ifndef MODULE
@@ -186,12 +200,13 @@ extern bool initcall_debug;
/* Format: <modname>__<counter>_<line>_<fn> */
#define __initcall_id(fn) \
+ __PASTE(kmod_, \
__PASTE(__KBUILD_MODNAME, \
__PASTE(__, \
__PASTE(__COUNTER__, \
__PASTE(_, \
__PASTE(__LINE__, \
- __PASTE(_, fn))))))
+ __PASTE(_, fn)))))))
/* Format: __<prefix>__<iid><id> */
#define __initcall_name(prefix, __iid, id) \
@@ -220,8 +235,8 @@ extern bool initcall_debug;
__initcall_name(initstub, __iid, id)
#define __define_initcall_stub(__stub, fn) \
- int __init __cficanonical __stub(void); \
- int __init __cficanonical __stub(void) \
+ int __init __stub(void); \
+ int __init __stub(void) \
{ \
return fn(); \
} \
@@ -242,7 +257,8 @@ extern bool initcall_debug;
asm(".section \"" __sec "\", \"a\" \n" \
__stringify(__name) ": \n" \
".long " __stringify(__stub) " - . \n" \
- ".previous \n");
+ ".previous \n"); \
+ static_assert(__same_type(initcall_t, &fn));
#else
#define ____define_initcall(fn, __unused, __name, __sec) \
static initcall_t __name __used \
@@ -305,6 +321,8 @@ struct obs_kernel_param {
int early;
};
+extern const struct obs_kernel_param __setup_start[], __setup_end[];
+
/*
* Only for really core code. See moduleparam.h for the normal way.
*
@@ -319,12 +337,19 @@ struct obs_kernel_param {
__aligned(__alignof__(struct obs_kernel_param)) \
= { __setup_str_##unique_id, fn, early }
+/*
+ * NOTE: __setup functions return values:
+ * @fn returns 1 (or non-zero) if the option argument is "handled"
+ * and returns 0 if the option argument is "not handled".
+ */
#define __setup(str, fn) \
__setup_param(str, fn, fn, 0)
/*
- * NOTE: fn is as per module_param, not __setup!
- * Emits warning if fn returns non-zero.
+ * NOTE: @fn is as per module_param, not __setup!
+ * I.e., @fn returns 0 for no error or non-zero for error
+ * (possibly @fn returns a -errno value, but it does not matter).
+ * Emits warning if @fn returns non-zero.
*/
#define early_param(str, fn) \
__setup_param(str, fn, fn, 1)
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 40fc5813cf93..a6cb241ea00c 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -25,7 +25,6 @@
extern struct files_struct init_files;
extern struct fs_struct init_fs;
extern struct nsproxy init_nsproxy;
-extern struct cred init_cred;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
#define INIT_PREV_CPUTIME(x) .prev_cputime = { \
@@ -37,13 +36,6 @@ extern struct cred init_cred;
#define INIT_TASK_COMM "swapper"
-/* Attach to the init_task data structure for proper alignment */
-#ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK
-#define __init_task_data __section(".data..init_task")
-#else
-#define __init_task_data /**/
-#endif
-
/* Attach to the thread_info data structure for proper alignment */
#define __init_thread_info __section(".data..init_thread_info")
diff --git a/include/linux/initrd.h b/include/linux/initrd.h
index 1bbe9af48dc3..f1a1f4c92ded 100644
--- a/include/linux/initrd.h
+++ b/include/linux/initrd.h
@@ -29,8 +29,6 @@ static inline void wait_for_initramfs(void) {}
extern phys_addr_t phys_initrd_start;
extern unsigned long phys_initrd_size;
-extern unsigned int real_root_dev;
-
extern char __initramfs_start[];
extern unsigned long __initramfs_size;
diff --git a/include/linux/inotify.h b/include/linux/inotify.h
index 6a24905f6e1e..8d20caa1b268 100644
--- a/include/linux/inotify.h
+++ b/include/linux/inotify.h
@@ -7,11 +7,8 @@
#ifndef _LINUX_INOTIFY_H
#define _LINUX_INOTIFY_H
-#include <linux/sysctl.h>
#include <uapi/linux/inotify.h>
-extern struct ctl_table inotify_table[]; /* for sysctl */
-
#define ALL_INOTIFY_BITS (IN_ACCESS | IN_MODIFY | IN_ATTRIB | IN_CLOSE_WRITE | \
IN_CLOSE_NOWRITE | IN_OPEN | IN_MOVED_FROM | \
IN_MOVED_TO | IN_CREATE | IN_DELETE | \
diff --git a/include/linux/input.h b/include/linux/input.h
index 0354b298d874..7d7cb0593a63 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -275,7 +275,8 @@ struct input_handle;
* it may not sleep
* @events: event sequence handler. This method is being called by
* input core with interrupts disabled and dev->event_lock
- * spinlock held and so it may not sleep
+ * spinlock held and so it may not sleep. The method must return
+ * number of events passed to it.
* @filter: similar to @event; separates normal event handlers from
* "filters".
* @match: called after comparing device's id with handler's id_table
@@ -285,6 +286,10 @@ struct input_handle;
* @start: starts handler for given handle. This function is called by
* input core right after connect() method and also when a process
* that "grabbed" a device releases it
+ * @passive_observer: set to %true by drivers only interested in observing
+ * data stream from devices if there are other users present. Such
+ * drivers will not result in starting underlying hardware device
+ * when input_open_device() is called for their handles
* @legacy_minors: set to %true by drivers using legacy minor ranges
* @minor: beginning of range of 32 legacy minors for devices this driver
* can provide
@@ -312,14 +317,15 @@ struct input_handler {
void *private;
void (*event)(struct input_handle *handle, unsigned int type, unsigned int code, int value);
- void (*events)(struct input_handle *handle,
- const struct input_value *vals, unsigned int count);
+ unsigned int (*events)(struct input_handle *handle,
+ struct input_value *vals, unsigned int count);
bool (*filter)(struct input_handle *handle, unsigned int type, unsigned int code, int value);
bool (*match)(struct input_handler *handler, struct input_dev *dev);
int (*connect)(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id);
void (*disconnect)(struct input_handle *handle);
void (*start)(struct input_handle *handle);
+ bool passive_observer;
bool legacy_minors;
int minor;
const char *name;
@@ -338,12 +344,16 @@ struct input_handler {
* @name: name given to the handle by handler that created it
* @dev: input device the handle is attached to
* @handler: handler that works with the device through this handle
+ * @handle_events: event sequence handler. It is set up by the input core
+ * according to event handling method specified in the @handler. See
+ * input_handle_setup_event_handler().
+ * This method is being called by the input core with interrupts disabled
+ * and dev->event_lock spinlock held and so it may not sleep.
* @d_node: used to put the handle on device's list of attached handles
* @h_node: used to put the handle on handler's list of handles from which
* it gets events
*/
struct input_handle {
-
void *private;
int open;
@@ -352,6 +362,10 @@ struct input_handle {
struct input_dev *dev;
struct input_handler *handler;
+ unsigned int (*handle_events)(struct input_handle *handle,
+ struct input_value *vals,
+ unsigned int count);
+
struct list_head d_node;
struct list_head h_node;
};
@@ -475,6 +489,8 @@ static inline void input_set_events_per_packet(struct input_dev *dev, int n_even
void input_alloc_absinfo(struct input_dev *dev);
void input_set_abs_params(struct input_dev *dev, unsigned int axis,
int min, int max, int fuzz, int flat);
+void input_copy_abs(struct input_dev *dst, unsigned int dst_axis,
+ const struct input_dev *src, unsigned int src_axis);
#define INPUT_GENERATE_ABS_ACCESSORS(_suffix, _item) \
static inline int input_abs_get_##_suffix(struct input_dev *dev, \
@@ -512,7 +528,7 @@ void input_enable_softrepeat(struct input_dev *dev, int delay, int period);
bool input_device_enabled(struct input_dev *dev);
-extern struct class input_class;
+extern const struct class input_class;
/**
* struct ff_device - force-feedback part of an input device
@@ -560,7 +576,7 @@ struct ff_device {
int max_effects;
struct ff_effect *effects;
- struct file *effect_owners[];
+ struct file *effect_owners[] __counted_by(max_effects);
};
int input_ff_create(struct input_dev *dev, unsigned int max_effects);
diff --git a/include/linux/input/as5011.h b/include/linux/input/as5011.h
index 5fba52a56cd6..5705d5de3aea 100644
--- a/include/linux/input/as5011.h
+++ b/include/linux/input/as5011.h
@@ -7,7 +7,6 @@
*/
struct as5011_platform_data {
- unsigned int button_gpio;
unsigned int axis_irq; /* irq number */
unsigned long axis_irqflags;
char xp, xn; /* threshold for x axis */
diff --git a/include/linux/input/auo-pixcir-ts.h b/include/linux/input/auo-pixcir-ts.h
deleted file mode 100644
index ed0776997a7a..000000000000
--- a/include/linux/input/auo-pixcir-ts.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Driver for AUO in-cell touchscreens
- *
- * Copyright (c) 2011 Heiko Stuebner <heiko@sntech.de>
- *
- * based on auo_touch.h from Dell Streak kernel
- *
- * Copyright (c) 2008 QUALCOMM Incorporated.
- * Copyright (c) 2008 QUALCOMM USA, INC.
- */
-
-#ifndef __AUO_PIXCIR_TS_H__
-#define __AUO_PIXCIR_TS_H__
-
-/*
- * Interrupt modes:
- * periodical: interrupt is asserted periodicaly
- * compare coordinates: interrupt is asserted when coordinates change
- * indicate touch: interrupt is asserted during touch
- */
-#define AUO_PIXCIR_INT_PERIODICAL 0x00
-#define AUO_PIXCIR_INT_COMP_COORD 0x01
-#define AUO_PIXCIR_INT_TOUCH_IND 0x02
-
-/*
- * @gpio_int interrupt gpio
- * @int_setting one of AUO_PIXCIR_INT_*
- * @init_hw hardwarespecific init
- * @exit_hw hardwarespecific shutdown
- * @x_max x-resolution
- * @y_max y-resolution
- */
-struct auo_pixcir_ts_platdata {
- int gpio_int;
- int gpio_rst;
-
- int int_setting;
-
- unsigned int x_max;
- unsigned int y_max;
-};
-
-#endif
diff --git a/include/linux/input/cy8ctmg110_pdata.h b/include/linux/input/cy8ctmg110_pdata.h
deleted file mode 100644
index 77582ae1745a..000000000000
--- a/include/linux/input/cy8ctmg110_pdata.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_CY8CTMG110_PDATA_H
-#define _LINUX_CY8CTMG110_PDATA_H
-
-struct cy8ctmg110_pdata
-{
- int reset_pin; /* Reset pin is wired to this GPIO (optional) */
- int irq_pin; /* IRQ pin is wired to this GPIO */
-};
-
-#endif
diff --git a/include/linux/input/cyttsp.h b/include/linux/input/cyttsp.h
deleted file mode 100644
index 118b9af6e01a..000000000000
--- a/include/linux/input/cyttsp.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Header file for:
- * Cypress TrueTouch(TM) Standard Product (TTSP) touchscreen drivers.
- * For use with Cypress Txx3xx parts.
- * Supported parts include:
- * CY8CTST341
- * CY8CTMA340
- *
- * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc.
- * Copyright (C) 2012 Javier Martinez Canillas <javier@dowhile0.org>
- *
- * Contact Cypress Semiconductor at www.cypress.com (kev@cypress.com)
- */
-#ifndef _CYTTSP_H_
-#define _CYTTSP_H_
-
-#define CY_SPI_NAME "cyttsp-spi"
-#define CY_I2C_NAME "cyttsp-i2c"
-/* Active Power state scanning/processing refresh interval */
-#define CY_ACT_INTRVL_DFLT 0x00 /* ms */
-/* touch timeout for the Active power */
-#define CY_TCH_TMOUT_DFLT 0xFF /* ms */
-/* Low Power state scanning/processing refresh interval */
-#define CY_LP_INTRVL_DFLT 0x0A /* ms */
-/* Active distance in pixels for a gesture to be reported */
-#define CY_ACT_DIST_DFLT 0xF8 /* pixels */
-
-#endif /* _CYTTSP_H_ */
diff --git a/include/linux/input/elan-i2c-ids.h b/include/linux/input/elan-i2c-ids.h
index 520858d12680..51cca17ee94c 100644
--- a/include/linux/input/elan-i2c-ids.h
+++ b/include/linux/input/elan-i2c-ids.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Elan I2C/SMBus Touchpad device whitelist
*
@@ -11,10 +12,6 @@
* copyright (c) 2011-2012 Cypress Semiconductor, Inc.
* copyright (c) 2011-2012 Google, Inc.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
* Trademarks are the property of their respective owners.
*/
diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h
index 9476768c3b90..90867f44ab4d 100644
--- a/include/linux/input/matrix_keypad.h
+++ b/include/linux/input/matrix_keypad.h
@@ -3,8 +3,9 @@
#define _MATRIX_KEYPAD_H
#include <linux/types.h>
-#include <linux/input.h>
-#include <linux/of.h>
+
+struct device;
+struct input_dev;
#define MATRIX_MAX_ROWS 32
#define MATRIX_MAX_COLS 32
@@ -33,52 +34,6 @@ struct matrix_keymap_data {
unsigned int keymap_size;
};
-/**
- * struct matrix_keypad_platform_data - platform-dependent keypad data
- * @keymap_data: pointer to &matrix_keymap_data
- * @row_gpios: pointer to array of gpio numbers representing rows
- * @col_gpios: pointer to array of gpio numbers reporesenting colums
- * @num_row_gpios: actual number of row gpios used by device
- * @num_col_gpios: actual number of col gpios used by device
- * @col_scan_delay_us: delay, measured in microseconds, that is
- * needed before we can keypad after activating column gpio
- * @debounce_ms: debounce interval in milliseconds
- * @clustered_irq: may be specified if interrupts of all row/column GPIOs
- * are bundled to one single irq
- * @clustered_irq_flags: flags that are needed for the clustered irq
- * @active_low: gpio polarity
- * @wakeup: controls whether the device should be set up as wakeup
- * source
- * @no_autorepeat: disable key autorepeat
- * @drive_inactive_cols: drive inactive columns during scan, rather than
- * making them inputs.
- *
- * This structure represents platform-specific data that use used by
- * matrix_keypad driver to perform proper initialization.
- */
-struct matrix_keypad_platform_data {
- const struct matrix_keymap_data *keymap_data;
-
- const unsigned int *row_gpios;
- const unsigned int *col_gpios;
-
- unsigned int num_row_gpios;
- unsigned int num_col_gpios;
-
- unsigned int col_scan_delay_us;
-
- /* key debounce interval in milli-second */
- unsigned int debounce_ms;
-
- unsigned int clustered_irq;
- unsigned int clustered_irq_flags;
-
- bool active_low;
- bool wakeup;
- bool no_autorepeat;
- bool drive_inactive_cols;
-};
-
int matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data,
const char *keymap_name,
unsigned int rows, unsigned int cols,
@@ -87,6 +42,4 @@ int matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data,
int matrix_keypad_parse_properties(struct device *dev,
unsigned int *rows, unsigned int *cols);
-#define matrix_keypad_parse_of_params matrix_keypad_parse_properties
-
#endif /* _MATRIX_KEYPAD_H */
diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h
index 3b8580bd33c1..d30286298a00 100644
--- a/include/linux/input/mt.h
+++ b/include/linux/input/mt.h
@@ -17,6 +17,7 @@
#define INPUT_MT_DROP_UNUSED 0x0004 /* drop contacts not seen in frame */
#define INPUT_MT_TRACK 0x0008 /* use in-kernel tracking */
#define INPUT_MT_SEMI_MT 0x0010 /* semi-mt device, finger count handled manually */
+#define INPUT_MT_TOTAL_FORCE 0x0020 /* calculate total force from slots pressure */
/**
* struct input_mt_slot - represents the state of an input MT slot
@@ -47,7 +48,7 @@ struct input_mt {
unsigned int flags;
unsigned int frame;
int *red;
- struct input_mt_slot slots[];
+ struct input_mt_slot slots[] __counted_by(num_slots);
};
static inline void input_mt_set_value(struct input_mt_slot *slot,
diff --git a/include/linux/input/navpoint.h b/include/linux/input/navpoint.h
deleted file mode 100644
index d464ffb4db52..000000000000
--- a/include/linux/input/navpoint.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 Paul Parsons <lost.distance@yahoo.com>
- */
-
-struct navpoint_platform_data {
- int port; /* PXA SSP port for pxa_ssp_request() */
- int gpio; /* GPIO for power on/off */
-};
diff --git a/include/linux/input/touch-overlay.h b/include/linux/input/touch-overlay.h
new file mode 100644
index 000000000000..0253e554d3cd
--- /dev/null
+++ b/include/linux/input/touch-overlay.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023 Javier Carrasco <javier.carrasco@wolfvision.net>
+ */
+
+#ifndef _TOUCH_OVERLAY
+#define _TOUCH_OVERLAY
+
+#include <linux/types.h>
+
+struct input_dev;
+
+int touch_overlay_map(struct list_head *list, struct input_dev *input);
+
+void touch_overlay_get_touchscreen_abs(struct list_head *list, u16 *x, u16 *y);
+
+bool touch_overlay_mapped_touchscreen(struct list_head *list);
+
+bool touch_overlay_process_contact(struct list_head *list,
+ struct input_dev *input,
+ struct input_mt_pos *pos, int slot);
+
+void touch_overlay_sync_frame(struct list_head *list, struct input_dev *input);
+
+#endif
diff --git a/include/linux/input/vivaldi-fmap.h b/include/linux/input/vivaldi-fmap.h
new file mode 100644
index 000000000000..7e4b7023bf04
--- /dev/null
+++ b/include/linux/input/vivaldi-fmap.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _VIVALDI_FMAP_H
+#define _VIVALDI_FMAP_H
+
+#include <linux/types.h>
+
+#define VIVALDI_MAX_FUNCTION_ROW_KEYS 24
+
+/**
+ * struct vivaldi_data - Function row map data for ChromeOS Vivaldi keyboards
+ * @function_row_physmap: An array of scancodes or their equivalent (HID usage
+ * codes, encoded rows/columns, etc) for the top
+ * row function keys, in an order from left to right
+ * @num_function_row_keys: The number of top row keys in a custom keyboard
+ *
+ * This structure is supposed to be used by ChromeOS keyboards using
+ * the Vivaldi keyboard function row design.
+ */
+struct vivaldi_data {
+ u32 function_row_physmap[VIVALDI_MAX_FUNCTION_ROW_KEYS];
+ unsigned int num_function_row_keys;
+};
+
+ssize_t vivaldi_function_row_physmap_show(const struct vivaldi_data *data,
+ char *buf);
+
+#endif /* _VIVALDI_FMAP_H */
diff --git a/include/linux/instruction_pointer.h b/include/linux/instruction_pointer.h
new file mode 100644
index 000000000000..aa0b3ffea935
--- /dev/null
+++ b/include/linux/instruction_pointer.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_INSTRUCTION_POINTER_H
+#define _LINUX_INSTRUCTION_POINTER_H
+
+#include <asm/linkage.h>
+
+#define _RET_IP_ (unsigned long)__builtin_return_address(0)
+
+#ifndef _THIS_IP_
+#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
+#endif
+
+#endif /* _LINUX_INSTRUCTION_POINTER_H */
diff --git a/include/linux/instrumentation.h b/include/linux/instrumentation.h
index 93e2ad67fc10..bf675a8aef8a 100644
--- a/include/linux/instrumentation.h
+++ b/include/linux/instrumentation.h
@@ -2,15 +2,18 @@
#ifndef __LINUX_INSTRUMENTATION_H
#define __LINUX_INSTRUMENTATION_H
-#if defined(CONFIG_DEBUG_ENTRY) && defined(CONFIG_STACK_VALIDATION)
+#ifdef CONFIG_NOINSTR_VALIDATION
+
+#include <linux/objtool.h>
+#include <linux/stringify.h>
/* Begin/end of an instrumentation safe region */
-#define instrumentation_begin() ({ \
- asm volatile("%c0: nop\n\t" \
- ".pushsection .discard.instr_begin\n\t" \
- ".long %c0b - .\n\t" \
- ".popsection\n\t" : : "i" (__COUNTER__)); \
+#define __instrumentation_begin(c) ({ \
+ asm volatile(__stringify(c) ": nop\n\t" \
+ ANNOTATE_INSTR_BEGIN(__ASM_BREF(c)) \
+ : : "i" (c)); \
})
+#define instrumentation_begin() __instrumentation_begin(__COUNTER__)
/*
* Because instrumentation_{begin,end}() can nest, objtool validation considers
@@ -43,15 +46,15 @@
* To avoid this, have _end() be a NOP instruction, this ensures it will be
* part of the condition block and does not escape.
*/
-#define instrumentation_end() ({ \
- asm volatile("%c0: nop\n\t" \
- ".pushsection .discard.instr_end\n\t" \
- ".long %c0b - .\n\t" \
- ".popsection\n\t" : : "i" (__COUNTER__)); \
+#define __instrumentation_end(c) ({ \
+ asm volatile(__stringify(c) ": nop\n\t" \
+ ANNOTATE_INSTR_END(__ASM_BREF(c)) \
+ : : "i" (c)); \
})
-#else
+#define instrumentation_end() __instrumentation_end(__COUNTER__)
+#else /* !CONFIG_NOINSTR_VALIDATION */
# define instrumentation_begin() do { } while(0)
# define instrumentation_end() do { } while(0)
-#endif
+#endif /* CONFIG_NOINSTR_VALIDATION */
#endif /* __LINUX_INSTRUMENTATION_H */
diff --git a/include/linux/instrumented.h b/include/linux/instrumented.h
index 42faebbaa202..711a1f0d1a73 100644
--- a/include/linux/instrumented.h
+++ b/include/linux/instrumented.h
@@ -2,7 +2,7 @@
/*
* This header provides generic wrappers for memory access instrumentation that
- * the compiler cannot emit for: KASAN, KCSAN.
+ * the compiler cannot emit for: KASAN, KCSAN, KMSAN.
*/
#ifndef _LINUX_INSTRUMENTED_H
#define _LINUX_INSTRUMENTED_H
@@ -10,16 +10,16 @@
#include <linux/compiler.h>
#include <linux/kasan-checks.h>
#include <linux/kcsan-checks.h>
+#include <linux/kmsan-checks.h>
#include <linux/types.h>
/**
* instrument_read - instrument regular read access
+ * @v: address of access
+ * @size: size of access
*
* Instrument a regular read access. The instrumentation should be inserted
* before the actual read happens.
- *
- * @ptr address of access
- * @size size of access
*/
static __always_inline void instrument_read(const volatile void *v, size_t size)
{
@@ -29,12 +29,11 @@ static __always_inline void instrument_read(const volatile void *v, size_t size)
/**
* instrument_write - instrument regular write access
+ * @v: address of access
+ * @size: size of access
*
* Instrument a regular write access. The instrumentation should be inserted
* before the actual write happens.
- *
- * @ptr address of access
- * @size size of access
*/
static __always_inline void instrument_write(const volatile void *v, size_t size)
{
@@ -44,12 +43,11 @@ static __always_inline void instrument_write(const volatile void *v, size_t size
/**
* instrument_read_write - instrument regular read-write access
+ * @v: address of access
+ * @size: size of access
*
* Instrument a regular write access. The instrumentation should be inserted
* before the actual write happens.
- *
- * @ptr address of access
- * @size size of access
*/
static __always_inline void instrument_read_write(const volatile void *v, size_t size)
{
@@ -59,12 +57,11 @@ static __always_inline void instrument_read_write(const volatile void *v, size_t
/**
* instrument_atomic_read - instrument atomic read access
+ * @v: address of access
+ * @size: size of access
*
* Instrument an atomic read access. The instrumentation should be inserted
* before the actual read happens.
- *
- * @ptr address of access
- * @size size of access
*/
static __always_inline void instrument_atomic_read(const volatile void *v, size_t size)
{
@@ -74,12 +71,11 @@ static __always_inline void instrument_atomic_read(const volatile void *v, size_
/**
* instrument_atomic_write - instrument atomic write access
+ * @v: address of access
+ * @size: size of access
*
* Instrument an atomic write access. The instrumentation should be inserted
* before the actual write happens.
- *
- * @ptr address of access
- * @size size of access
*/
static __always_inline void instrument_atomic_write(const volatile void *v, size_t size)
{
@@ -89,12 +85,11 @@ static __always_inline void instrument_atomic_write(const volatile void *v, size
/**
* instrument_atomic_read_write - instrument atomic read-write access
+ * @v: address of access
+ * @size: size of access
*
* Instrument an atomic read-write access. The instrumentation should be
* inserted before the actual write happens.
- *
- * @ptr address of access
- * @size size of access
*/
static __always_inline void instrument_atomic_read_write(const volatile void *v, size_t size)
{
@@ -104,36 +99,118 @@ static __always_inline void instrument_atomic_read_write(const volatile void *v,
/**
* instrument_copy_to_user - instrument reads of copy_to_user
+ * @to: destination address
+ * @from: source address
+ * @n: number of bytes to copy
*
* Instrument reads from kernel memory, that are due to copy_to_user (and
* variants). The instrumentation must be inserted before the accesses.
- *
- * @to destination address
- * @from source address
- * @n number of bytes to copy
*/
static __always_inline void
instrument_copy_to_user(void __user *to, const void *from, unsigned long n)
{
kasan_check_read(from, n);
kcsan_check_read(from, n);
+ kmsan_copy_to_user(to, from, n, 0);
}
/**
- * instrument_copy_from_user - instrument writes of copy_from_user
+ * instrument_copy_from_user_before - add instrumentation before copy_from_user
+ * @to: destination address
+ * @from: source address
+ * @n: number of bytes to copy
*
* Instrument writes to kernel memory, that are due to copy_from_user (and
* variants). The instrumentation should be inserted before the accesses.
+ */
+static __always_inline void
+instrument_copy_from_user_before(const void *to, const void __user *from, unsigned long n)
+{
+ kasan_check_write(to, n);
+ kcsan_check_write(to, n);
+}
+
+/**
+ * instrument_copy_from_user_after - add instrumentation after copy_from_user
+ * @to: destination address
+ * @from: source address
+ * @n: number of bytes to copy
+ * @left: number of bytes not copied (as returned by copy_from_user)
*
- * @to destination address
- * @from source address
- * @n number of bytes to copy
+ * Instrument writes to kernel memory, that are due to copy_from_user (and
+ * variants). The instrumentation should be inserted after the accesses.
*/
static __always_inline void
-instrument_copy_from_user(const void *to, const void __user *from, unsigned long n)
+instrument_copy_from_user_after(const void *to, const void __user *from,
+ unsigned long n, unsigned long left)
+{
+ kmsan_unpoison_memory(to, n - left);
+}
+
+/**
+ * instrument_memcpy_before - add instrumentation before non-instrumented memcpy
+ * @to: destination address
+ * @from: source address
+ * @n: number of bytes to copy
+ *
+ * Instrument memory accesses that happen in custom memcpy implementations. The
+ * instrumentation should be inserted before the memcpy call.
+ */
+static __always_inline void instrument_memcpy_before(void *to, const void *from,
+ unsigned long n)
{
kasan_check_write(to, n);
+ kasan_check_read(from, n);
kcsan_check_write(to, n);
+ kcsan_check_read(from, n);
+}
+
+/**
+ * instrument_memcpy_after - add instrumentation after non-instrumented memcpy
+ * @to: destination address
+ * @from: source address
+ * @n: number of bytes to copy
+ * @left: number of bytes not copied (if known)
+ *
+ * Instrument memory accesses that happen in custom memcpy implementations. The
+ * instrumentation should be inserted after the memcpy call.
+ */
+static __always_inline void instrument_memcpy_after(void *to, const void *from,
+ unsigned long n,
+ unsigned long left)
+{
+ kmsan_memmove(to, from, n - left);
}
+/**
+ * instrument_get_user() - add instrumentation to get_user()-like macros
+ * @to: destination variable, may not be address-taken
+ *
+ * get_user() and friends are fragile, so it may depend on the implementation
+ * whether the instrumentation happens before or after the data is copied from
+ * the userspace.
+ */
+#define instrument_get_user(to) \
+({ \
+ u64 __tmp = (u64)(to); \
+ kmsan_unpoison_memory(&__tmp, sizeof(__tmp)); \
+ to = __tmp; \
+})
+
+
+/**
+ * instrument_put_user() - add instrumentation to put_user()-like macros
+ * @from: source address
+ * @ptr: userspace pointer to copy to
+ * @size: number of bytes to copy
+ *
+ * put_user() and friends are fragile, so it may depend on the implementation
+ * whether the instrumentation happens before or after the data is copied from
+ * the userspace.
+ */
+#define instrument_put_user(from, ptr, size) \
+({ \
+ kmsan_copy_to_user(ptr, &from, sizeof(from), 0); \
+})
+
#endif /* _LINUX_INSTRUMENTED_H */
diff --git a/include/media/dvb_math.h b/include/linux/int_log.h
index 8690ec42954d..0a6f58c38b61 100644
--- a/include/media/dvb_math.h
+++ b/include/linux/int_log.h
@@ -1,22 +1,12 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
- * dvb-math provides some complex fixed-point math
- * operations shared between the dvb related stuff
+ * Provides fixed-point logarithm operations.
*
* Copyright (C) 2006 Christoph Pfister (christophpfister@gmail.com)
- *
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
*/
-#ifndef __DVB_MATH_H
-#define __DVB_MATH_H
+#ifndef __LINUX_INT_LOG_H
+#define __LINUX_INT_LOG_H
#include <linux/types.h>
diff --git a/include/linux/integrity.h b/include/linux/integrity.h
index 2271939c5c31..f5842372359b 100644
--- a/include/linux/integrity.h
+++ b/include/linux/integrity.h
@@ -8,50 +8,58 @@
#define _LINUX_INTEGRITY_H
#include <linux/fs.h>
+#include <linux/iversion.h>
enum integrity_status {
INTEGRITY_PASS = 0,
INTEGRITY_PASS_IMMUTABLE,
INTEGRITY_FAIL,
+ INTEGRITY_FAIL_IMMUTABLE,
INTEGRITY_NOLABEL,
INTEGRITY_NOXATTRS,
INTEGRITY_UNKNOWN,
};
-/* List of EVM protected security xattrs */
#ifdef CONFIG_INTEGRITY
-extern struct integrity_iint_cache *integrity_inode_get(struct inode *inode);
-extern void integrity_inode_free(struct inode *inode);
extern void __init integrity_load_keys(void);
#else
-static inline struct integrity_iint_cache *
- integrity_inode_get(struct inode *inode)
-{
- return NULL;
-}
-
-static inline void integrity_inode_free(struct inode *inode)
-{
- return;
-}
-
static inline void integrity_load_keys(void)
{
}
#endif /* CONFIG_INTEGRITY */
-#ifdef CONFIG_INTEGRITY_ASYMMETRIC_KEYS
-
-extern int integrity_kernel_module_request(char *kmod_name);
+/* An inode's attributes for detection of changes */
+struct integrity_inode_attributes {
+ u64 version; /* track inode changes */
+ unsigned long ino;
+ dev_t dev;
+};
-#else
+/*
+ * On stacked filesystems the i_version alone is not enough to detect file data
+ * or metadata change. Additional metadata is required.
+ */
+static inline void
+integrity_inode_attrs_store(struct integrity_inode_attributes *attrs,
+ u64 i_version, const struct inode *inode)
+{
+ attrs->version = i_version;
+ attrs->dev = inode->i_sb->s_dev;
+ attrs->ino = inode->i_ino;
+}
-static inline int integrity_kernel_module_request(char *kmod_name)
+/*
+ * On stacked filesystems detect whether the inode or its content has changed.
+ */
+static inline bool
+integrity_inode_attrs_changed(const struct integrity_inode_attributes *attrs,
+ const struct inode *inode)
{
- return 0;
+ return (inode->i_sb->s_dev != attrs->dev ||
+ inode->i_ino != attrs->ino ||
+ !inode_eq_iversion(inode, attrs->version));
}
-#endif /* CONFIG_INTEGRITY_ASYMMETRIC_KEYS */
#endif /* _LINUX_INTEGRITY_H */
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
deleted file mode 100644
index 03faf20a6817..000000000000
--- a/include/linux/intel-iommu.h
+++ /dev/null
@@ -1,830 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright © 2006-2015, Intel Corporation.
- *
- * Authors: Ashok Raj <ashok.raj@intel.com>
- * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
- * David Woodhouse <David.Woodhouse@intel.com>
- */
-
-#ifndef _INTEL_IOMMU_H_
-#define _INTEL_IOMMU_H_
-
-#include <linux/types.h>
-#include <linux/iova.h>
-#include <linux/io.h>
-#include <linux/idr.h>
-#include <linux/mmu_notifier.h>
-#include <linux/list.h>
-#include <linux/iommu.h>
-#include <linux/io-64-nonatomic-lo-hi.h>
-#include <linux/dmar.h>
-#include <linux/ioasid.h>
-#include <linux/bitfield.h>
-
-#include <asm/cacheflush.h>
-#include <asm/iommu.h>
-
-/*
- * VT-d hardware uses 4KiB page size regardless of host page size.
- */
-#define VTD_PAGE_SHIFT (12)
-#define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT)
-#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT)
-#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
-
-#define VTD_STRIDE_SHIFT (9)
-#define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT)
-
-#define DMA_PTE_READ BIT_ULL(0)
-#define DMA_PTE_WRITE BIT_ULL(1)
-#define DMA_PTE_LARGE_PAGE BIT_ULL(7)
-#define DMA_PTE_SNP BIT_ULL(11)
-
-#define DMA_FL_PTE_PRESENT BIT_ULL(0)
-#define DMA_FL_PTE_US BIT_ULL(2)
-#define DMA_FL_PTE_ACCESS BIT_ULL(5)
-#define DMA_FL_PTE_DIRTY BIT_ULL(6)
-#define DMA_FL_PTE_XD BIT_ULL(63)
-
-#define ADDR_WIDTH_5LEVEL (57)
-#define ADDR_WIDTH_4LEVEL (48)
-
-#define CONTEXT_TT_MULTI_LEVEL 0
-#define CONTEXT_TT_DEV_IOTLB 1
-#define CONTEXT_TT_PASS_THROUGH 2
-#define CONTEXT_PASIDE BIT_ULL(3)
-
-/*
- * Intel IOMMU register specification per version 1.0 public spec.
- */
-#define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */
-#define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */
-#define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */
-#define DMAR_GCMD_REG 0x18 /* Global command register */
-#define DMAR_GSTS_REG 0x1c /* Global status register */
-#define DMAR_RTADDR_REG 0x20 /* Root entry table */
-#define DMAR_CCMD_REG 0x28 /* Context command reg */
-#define DMAR_FSTS_REG 0x34 /* Fault Status register */
-#define DMAR_FECTL_REG 0x38 /* Fault control register */
-#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
-#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
-#define DMAR_FEUADDR_REG 0x44 /* Upper address register */
-#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
-#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
-#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
-#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
-#define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */
-#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
-#define DMAR_IQH_REG 0x80 /* Invalidation queue head register */
-#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
-#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */
-#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
-#define DMAR_ICS_REG 0x9c /* Invalidation complete status register */
-#define DMAR_IQER_REG 0xb0 /* Invalidation queue error record register */
-#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
-#define DMAR_PQH_REG 0xc0 /* Page request queue head register */
-#define DMAR_PQT_REG 0xc8 /* Page request queue tail register */
-#define DMAR_PQA_REG 0xd0 /* Page request queue address register */
-#define DMAR_PRS_REG 0xdc /* Page request status register */
-#define DMAR_PECTL_REG 0xe0 /* Page request event control register */
-#define DMAR_PEDATA_REG 0xe4 /* Page request event interrupt data register */
-#define DMAR_PEADDR_REG 0xe8 /* Page request event interrupt addr register */
-#define DMAR_PEUADDR_REG 0xec /* Page request event Upper address register */
-#define DMAR_MTRRCAP_REG 0x100 /* MTRR capability register */
-#define DMAR_MTRRDEF_REG 0x108 /* MTRR default type register */
-#define DMAR_MTRR_FIX64K_00000_REG 0x120 /* MTRR Fixed range registers */
-#define DMAR_MTRR_FIX16K_80000_REG 0x128
-#define DMAR_MTRR_FIX16K_A0000_REG 0x130
-#define DMAR_MTRR_FIX4K_C0000_REG 0x138
-#define DMAR_MTRR_FIX4K_C8000_REG 0x140
-#define DMAR_MTRR_FIX4K_D0000_REG 0x148
-#define DMAR_MTRR_FIX4K_D8000_REG 0x150
-#define DMAR_MTRR_FIX4K_E0000_REG 0x158
-#define DMAR_MTRR_FIX4K_E8000_REG 0x160
-#define DMAR_MTRR_FIX4K_F0000_REG 0x168
-#define DMAR_MTRR_FIX4K_F8000_REG 0x170
-#define DMAR_MTRR_PHYSBASE0_REG 0x180 /* MTRR Variable range registers */
-#define DMAR_MTRR_PHYSMASK0_REG 0x188
-#define DMAR_MTRR_PHYSBASE1_REG 0x190
-#define DMAR_MTRR_PHYSMASK1_REG 0x198
-#define DMAR_MTRR_PHYSBASE2_REG 0x1a0
-#define DMAR_MTRR_PHYSMASK2_REG 0x1a8
-#define DMAR_MTRR_PHYSBASE3_REG 0x1b0
-#define DMAR_MTRR_PHYSMASK3_REG 0x1b8
-#define DMAR_MTRR_PHYSBASE4_REG 0x1c0
-#define DMAR_MTRR_PHYSMASK4_REG 0x1c8
-#define DMAR_MTRR_PHYSBASE5_REG 0x1d0
-#define DMAR_MTRR_PHYSMASK5_REG 0x1d8
-#define DMAR_MTRR_PHYSBASE6_REG 0x1e0
-#define DMAR_MTRR_PHYSMASK6_REG 0x1e8
-#define DMAR_MTRR_PHYSBASE7_REG 0x1f0
-#define DMAR_MTRR_PHYSMASK7_REG 0x1f8
-#define DMAR_MTRR_PHYSBASE8_REG 0x200
-#define DMAR_MTRR_PHYSMASK8_REG 0x208
-#define DMAR_MTRR_PHYSBASE9_REG 0x210
-#define DMAR_MTRR_PHYSMASK9_REG 0x218
-#define DMAR_VCCAP_REG 0xe00 /* Virtual command capability register */
-#define DMAR_VCMD_REG 0xe10 /* Virtual command register */
-#define DMAR_VCRSP_REG 0xe20 /* Virtual command response register */
-
-#define DMAR_IQER_REG_IQEI(reg) FIELD_GET(GENMASK_ULL(3, 0), reg)
-#define DMAR_IQER_REG_ITESID(reg) FIELD_GET(GENMASK_ULL(47, 32), reg)
-#define DMAR_IQER_REG_ICESID(reg) FIELD_GET(GENMASK_ULL(63, 48), reg)
-
-#define OFFSET_STRIDE (9)
-
-#define dmar_readq(a) readq(a)
-#define dmar_writeq(a,v) writeq(v,a)
-#define dmar_readl(a) readl(a)
-#define dmar_writel(a, v) writel(v, a)
-
-#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
-#define DMAR_VER_MINOR(v) ((v) & 0x0f)
-
-/*
- * Decoding Capability Register
- */
-#define cap_5lp_support(c) (((c) >> 60) & 1)
-#define cap_pi_support(c) (((c) >> 59) & 1)
-#define cap_fl1gp_support(c) (((c) >> 56) & 1)
-#define cap_read_drain(c) (((c) >> 55) & 1)
-#define cap_write_drain(c) (((c) >> 54) & 1)
-#define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
-#define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1)
-#define cap_pgsel_inv(c) (((c) >> 39) & 1)
-
-#define cap_super_page_val(c) (((c) >> 34) & 0xf)
-#define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \
- * OFFSET_STRIDE) + 21)
-
-#define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
-#define cap_max_fault_reg_offset(c) \
- (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16)
-
-#define cap_zlr(c) (((c) >> 22) & 1)
-#define cap_isoch(c) (((c) >> 23) & 1)
-#define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1)
-#define cap_sagaw(c) (((c) >> 8) & 0x1f)
-#define cap_caching_mode(c) (((c) >> 7) & 1)
-#define cap_phmr(c) (((c) >> 6) & 1)
-#define cap_plmr(c) (((c) >> 5) & 1)
-#define cap_rwbf(c) (((c) >> 4) & 1)
-#define cap_afl(c) (((c) >> 3) & 1)
-#define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7)))
-/*
- * Extended Capability Register
- */
-
-#define ecap_rps(e) (((e) >> 49) & 0x1)
-#define ecap_smpwc(e) (((e) >> 48) & 0x1)
-#define ecap_flts(e) (((e) >> 47) & 0x1)
-#define ecap_slts(e) (((e) >> 46) & 0x1)
-#define ecap_slads(e) (((e) >> 45) & 0x1)
-#define ecap_vcs(e) (((e) >> 44) & 0x1)
-#define ecap_smts(e) (((e) >> 43) & 0x1)
-#define ecap_dit(e) (((e) >> 41) & 0x1)
-#define ecap_pds(e) (((e) >> 42) & 0x1)
-#define ecap_pasid(e) (((e) >> 40) & 0x1)
-#define ecap_pss(e) (((e) >> 35) & 0x1f)
-#define ecap_eafs(e) (((e) >> 34) & 0x1)
-#define ecap_nwfs(e) (((e) >> 33) & 0x1)
-#define ecap_srs(e) (((e) >> 31) & 0x1)
-#define ecap_ers(e) (((e) >> 30) & 0x1)
-#define ecap_prs(e) (((e) >> 29) & 0x1)
-#define ecap_broken_pasid(e) (((e) >> 28) & 0x1)
-#define ecap_dis(e) (((e) >> 27) & 0x1)
-#define ecap_nest(e) (((e) >> 26) & 0x1)
-#define ecap_mts(e) (((e) >> 25) & 0x1)
-#define ecap_ecs(e) (((e) >> 24) & 0x1)
-#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
-#define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16)
-#define ecap_coherent(e) ((e) & 0x1)
-#define ecap_qis(e) ((e) & 0x2)
-#define ecap_pass_through(e) (((e) >> 6) & 0x1)
-#define ecap_eim_support(e) (((e) >> 4) & 0x1)
-#define ecap_ir_support(e) (((e) >> 3) & 0x1)
-#define ecap_dev_iotlb_support(e) (((e) >> 2) & 0x1)
-#define ecap_max_handle_mask(e) (((e) >> 20) & 0xf)
-#define ecap_sc_support(e) (((e) >> 7) & 0x1) /* Snooping Control */
-
-/* Virtual command interface capability */
-#define vccap_pasid(v) (((v) & DMA_VCS_PAS)) /* PASID allocation */
-
-/* IOTLB_REG */
-#define DMA_TLB_FLUSH_GRANU_OFFSET 60
-#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
-#define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
-#define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
-#define DMA_TLB_IIRG(type) ((type >> 60) & 3)
-#define DMA_TLB_IAIG(val) (((val) >> 57) & 3)
-#define DMA_TLB_READ_DRAIN (((u64)1) << 49)
-#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
-#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
-#define DMA_TLB_IVT (((u64)1) << 63)
-#define DMA_TLB_IH_NONLEAF (((u64)1) << 6)
-#define DMA_TLB_MAX_SIZE (0x3f)
-
-/* INVALID_DESC */
-#define DMA_CCMD_INVL_GRANU_OFFSET 61
-#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 4)
-#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 4)
-#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 4)
-#define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7)
-#define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6)
-#define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16)))
-#define DMA_ID_TLB_IH_NONLEAF (((u64)1) << 6)
-#define DMA_ID_TLB_ADDR(addr) (addr)
-#define DMA_ID_TLB_ADDR_MASK(mask) (mask)
-
-/* PMEN_REG */
-#define DMA_PMEN_EPM (((u32)1)<<31)
-#define DMA_PMEN_PRS (((u32)1)<<0)
-
-/* GCMD_REG */
-#define DMA_GCMD_TE (((u32)1) << 31)
-#define DMA_GCMD_SRTP (((u32)1) << 30)
-#define DMA_GCMD_SFL (((u32)1) << 29)
-#define DMA_GCMD_EAFL (((u32)1) << 28)
-#define DMA_GCMD_WBF (((u32)1) << 27)
-#define DMA_GCMD_QIE (((u32)1) << 26)
-#define DMA_GCMD_SIRTP (((u32)1) << 24)
-#define DMA_GCMD_IRE (((u32) 1) << 25)
-#define DMA_GCMD_CFI (((u32) 1) << 23)
-
-/* GSTS_REG */
-#define DMA_GSTS_TES (((u32)1) << 31)
-#define DMA_GSTS_RTPS (((u32)1) << 30)
-#define DMA_GSTS_FLS (((u32)1) << 29)
-#define DMA_GSTS_AFLS (((u32)1) << 28)
-#define DMA_GSTS_WBFS (((u32)1) << 27)
-#define DMA_GSTS_QIES (((u32)1) << 26)
-#define DMA_GSTS_IRTPS (((u32)1) << 24)
-#define DMA_GSTS_IRES (((u32)1) << 25)
-#define DMA_GSTS_CFIS (((u32)1) << 23)
-
-/* DMA_RTADDR_REG */
-#define DMA_RTADDR_RTT (((u64)1) << 11)
-#define DMA_RTADDR_SMT (((u64)1) << 10)
-
-/* CCMD_REG */
-#define DMA_CCMD_ICC (((u64)1) << 63)
-#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
-#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
-#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
-#define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
-#define DMA_CCMD_MASK_NOBIT 0
-#define DMA_CCMD_MASK_1BIT 1
-#define DMA_CCMD_MASK_2BIT 2
-#define DMA_CCMD_MASK_3BIT 3
-#define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
-#define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
-
-/* FECTL_REG */
-#define DMA_FECTL_IM (((u32)1) << 31)
-
-/* FSTS_REG */
-#define DMA_FSTS_PFO (1 << 0) /* Primary Fault Overflow */
-#define DMA_FSTS_PPF (1 << 1) /* Primary Pending Fault */
-#define DMA_FSTS_IQE (1 << 4) /* Invalidation Queue Error */
-#define DMA_FSTS_ICE (1 << 5) /* Invalidation Completion Error */
-#define DMA_FSTS_ITE (1 << 6) /* Invalidation Time-out Error */
-#define DMA_FSTS_PRO (1 << 7) /* Page Request Overflow */
-#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
-
-/* FRCD_REG, 32 bits access */
-#define DMA_FRCD_F (((u32)1) << 31)
-#define dma_frcd_type(d) ((d >> 30) & 1)
-#define dma_frcd_fault_reason(c) (c & 0xff)
-#define dma_frcd_source_id(c) (c & 0xffff)
-#define dma_frcd_pasid_value(c) (((c) >> 8) & 0xfffff)
-#define dma_frcd_pasid_present(c) (((c) >> 31) & 1)
-/* low 64 bit */
-#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
-
-/* PRS_REG */
-#define DMA_PRS_PPR ((u32)1)
-#define DMA_PRS_PRO ((u32)2)
-
-#define DMA_VCS_PAS ((u64)1)
-
-#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
-do { \
- cycles_t start_time = get_cycles(); \
- while (1) { \
- sts = op(iommu->reg + offset); \
- if (cond) \
- break; \
- if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
- panic("DMAR hardware is malfunctioning\n"); \
- cpu_relax(); \
- } \
-} while (0)
-
-#define QI_LENGTH 256 /* queue length */
-
-enum {
- QI_FREE,
- QI_IN_USE,
- QI_DONE,
- QI_ABORT
-};
-
-#define QI_CC_TYPE 0x1
-#define QI_IOTLB_TYPE 0x2
-#define QI_DIOTLB_TYPE 0x3
-#define QI_IEC_TYPE 0x4
-#define QI_IWD_TYPE 0x5
-#define QI_EIOTLB_TYPE 0x6
-#define QI_PC_TYPE 0x7
-#define QI_DEIOTLB_TYPE 0x8
-#define QI_PGRP_RESP_TYPE 0x9
-#define QI_PSTRM_RESP_TYPE 0xa
-
-#define QI_IEC_SELECTIVE (((u64)1) << 4)
-#define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32))
-#define QI_IEC_IM(m) (((u64)(m & 0x1f) << 27))
-
-#define QI_IWD_STATUS_DATA(d) (((u64)d) << 32)
-#define QI_IWD_STATUS_WRITE (((u64)1) << 5)
-#define QI_IWD_FENCE (((u64)1) << 6)
-#define QI_IWD_PRQ_DRAIN (((u64)1) << 7)
-
-#define QI_IOTLB_DID(did) (((u64)did) << 16)
-#define QI_IOTLB_DR(dr) (((u64)dr) << 7)
-#define QI_IOTLB_DW(dw) (((u64)dw) << 6)
-#define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4))
-#define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK)
-#define QI_IOTLB_IH(ih) (((u64)ih) << 6)
-#define QI_IOTLB_AM(am) (((u8)am) & 0x3f)
-
-#define QI_CC_FM(fm) (((u64)fm) << 48)
-#define QI_CC_SID(sid) (((u64)sid) << 32)
-#define QI_CC_DID(did) (((u64)did) << 16)
-#define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4))
-
-#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
-#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
-#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
-#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
- ((u64)((pfsid >> 4) & 0xfff) << 52))
-#define QI_DEV_IOTLB_SIZE 1
-#define QI_DEV_IOTLB_MAX_INVS 32
-
-#define QI_PC_PASID(pasid) (((u64)pasid) << 32)
-#define QI_PC_DID(did) (((u64)did) << 16)
-#define QI_PC_GRAN(gran) (((u64)gran) << 4)
-
-/* PASID cache invalidation granu */
-#define QI_PC_ALL_PASIDS 0
-#define QI_PC_PASID_SEL 1
-#define QI_PC_GLOBAL 3
-
-#define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
-#define QI_EIOTLB_IH(ih) (((u64)ih) << 6)
-#define QI_EIOTLB_AM(am) (((u64)am) & 0x3f)
-#define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32)
-#define QI_EIOTLB_DID(did) (((u64)did) << 16)
-#define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4)
-
-/* QI Dev-IOTLB inv granu */
-#define QI_DEV_IOTLB_GRAN_ALL 1
-#define QI_DEV_IOTLB_GRAN_PASID_SEL 0
-
-#define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK)
-#define QI_DEV_EIOTLB_SIZE (((u64)1) << 11)
-#define QI_DEV_EIOTLB_PASID(p) ((u64)((p) & 0xfffff) << 32)
-#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
-#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
-#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
- ((u64)((pfsid >> 4) & 0xfff) << 52))
-#define QI_DEV_EIOTLB_MAX_INVS 32
-
-/* Page group response descriptor QW0 */
-#define QI_PGRP_PASID_P(p) (((u64)(p)) << 4)
-#define QI_PGRP_PDP(p) (((u64)(p)) << 5)
-#define QI_PGRP_RESP_CODE(res) (((u64)(res)) << 12)
-#define QI_PGRP_DID(rid) (((u64)(rid)) << 16)
-#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32)
-
-/* Page group response descriptor QW1 */
-#define QI_PGRP_LPIG(x) (((u64)(x)) << 2)
-#define QI_PGRP_IDX(idx) (((u64)(idx)) << 3)
-
-
-#define QI_RESP_SUCCESS 0x0
-#define QI_RESP_INVALID 0x1
-#define QI_RESP_FAILURE 0xf
-
-#define QI_GRAN_NONG_PASID 2
-#define QI_GRAN_PSI_PASID 3
-
-#define qi_shift(iommu) (DMAR_IQ_SHIFT + !!ecap_smts((iommu)->ecap))
-
-struct qi_desc {
- u64 qw0;
- u64 qw1;
- u64 qw2;
- u64 qw3;
-};
-
-struct q_inval {
- raw_spinlock_t q_lock;
- void *desc; /* invalidation queue */
- int *desc_status; /* desc status */
- int free_head; /* first free entry */
- int free_tail; /* last free entry */
- int free_cnt;
-};
-
-struct dmar_pci_notify_info;
-
-#ifdef CONFIG_IRQ_REMAP
-/* 1MB - maximum possible interrupt remapping table size */
-#define INTR_REMAP_PAGE_ORDER 8
-#define INTR_REMAP_TABLE_REG_SIZE 0xf
-#define INTR_REMAP_TABLE_REG_SIZE_MASK 0xf
-
-#define INTR_REMAP_TABLE_ENTRIES 65536
-
-struct irq_domain;
-
-struct ir_table {
- struct irte *base;
- unsigned long *bitmap;
-};
-
-void intel_irq_remap_add_device(struct dmar_pci_notify_info *info);
-#else
-static inline void
-intel_irq_remap_add_device(struct dmar_pci_notify_info *info) { }
-#endif
-
-struct iommu_flush {
- void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid,
- u8 fm, u64 type);
- void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
- unsigned int size_order, u64 type);
-};
-
-enum {
- SR_DMAR_FECTL_REG,
- SR_DMAR_FEDATA_REG,
- SR_DMAR_FEADDR_REG,
- SR_DMAR_FEUADDR_REG,
- MAX_SR_DMAR_REGS
-};
-
-#define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0)
-#define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1)
-#define VTD_FLAG_SVM_CAPABLE (1 << 2)
-
-extern int intel_iommu_sm;
-extern spinlock_t device_domain_lock;
-
-#define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap))
-#define pasid_supported(iommu) (sm_supported(iommu) && \
- ecap_pasid((iommu)->ecap))
-
-struct pasid_entry;
-struct pasid_state_entry;
-struct page_req_dsc;
-
-/*
- * 0: Present
- * 1-11: Reserved
- * 12-63: Context Ptr (12 - (haw-1))
- * 64-127: Reserved
- */
-struct root_entry {
- u64 lo;
- u64 hi;
-};
-
-/*
- * low 64 bits:
- * 0: present
- * 1: fault processing disable
- * 2-3: translation type
- * 12-63: address space root
- * high 64 bits:
- * 0-2: address width
- * 3-6: aval
- * 8-23: domain id
- */
-struct context_entry {
- u64 lo;
- u64 hi;
-};
-
-/* si_domain contains mulitple devices */
-#define DOMAIN_FLAG_STATIC_IDENTITY BIT(0)
-
-/*
- * When VT-d works in the scalable mode, it allows DMA translation to
- * happen through either first level or second level page table. This
- * bit marks that the DMA translation for the domain goes through the
- * first level page table, otherwise, it goes through the second level.
- */
-#define DOMAIN_FLAG_USE_FIRST_LEVEL BIT(1)
-
-/*
- * Domain represents a virtual machine which demands iommu nested
- * translation mode support.
- */
-#define DOMAIN_FLAG_NESTING_MODE BIT(2)
-
-struct dmar_domain {
- int nid; /* node id */
-
- unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED];
- /* Refcount of devices per iommu */
-
-
- u16 iommu_did[DMAR_UNITS_SUPPORTED];
- /* Domain ids per IOMMU. Use u16 since
- * domain ids are 16 bit wide according
- * to VT-d spec, section 9.3 */
-
- bool has_iotlb_device;
- struct list_head devices; /* all devices' list */
- struct list_head subdevices; /* all subdevices' list */
- struct iova_domain iovad; /* iova's that belong to this domain */
-
- struct dma_pte *pgd; /* virtual address */
- int gaw; /* max guest address width */
-
- /* adjusted guest address width, 0 is level 2 30-bit */
- int agaw;
-
- int flags; /* flags to find out type of domain */
-
- int iommu_coherency;/* indicate coherency of iommu access */
- int iommu_snooping; /* indicate snooping control feature*/
- int iommu_count; /* reference count of iommu */
- int iommu_superpage;/* Level of superpages supported:
- 0 == 4KiB (no superpages), 1 == 2MiB,
- 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
- u64 max_addr; /* maximum mapped address */
-
- u32 default_pasid; /*
- * The default pasid used for non-SVM
- * traffic on mediated devices.
- */
-
- struct iommu_domain domain; /* generic domain data structure for
- iommu core */
-};
-
-struct intel_iommu {
- void __iomem *reg; /* Pointer to hardware regs, virtual addr */
- u64 reg_phys; /* physical address of hw register set */
- u64 reg_size; /* size of hw register set */
- u64 cap;
- u64 ecap;
- u64 vccap;
- u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
- raw_spinlock_t register_lock; /* protect register handling */
- int seq_id; /* sequence id of the iommu */
- int agaw; /* agaw of this iommu */
- int msagaw; /* max sagaw of this iommu */
- unsigned int irq, pr_irq;
- u16 segment; /* PCI segment# */
- unsigned char name[13]; /* Device Name */
-
-#ifdef CONFIG_INTEL_IOMMU
- unsigned long *domain_ids; /* bitmap of domains */
- struct dmar_domain ***domains; /* ptr to domains */
- spinlock_t lock; /* protect context, domain ids */
- struct root_entry *root_entry; /* virtual address */
-
- struct iommu_flush flush;
-#endif
-#ifdef CONFIG_INTEL_IOMMU_SVM
- struct page_req_dsc *prq;
- unsigned char prq_name[16]; /* Name for PRQ interrupt */
- struct completion prq_complete;
- struct ioasid_allocator_ops pasid_allocator; /* Custom allocator for PASIDs */
-#endif
- struct q_inval *qi; /* Queued invalidation info */
- u32 *iommu_state; /* Store iommu states between suspend and resume.*/
-
-#ifdef CONFIG_IRQ_REMAP
- struct ir_table *ir_table; /* Interrupt remapping info */
- struct irq_domain *ir_domain;
- struct irq_domain *ir_msi_domain;
-#endif
- struct iommu_device iommu; /* IOMMU core code handle */
- int node;
- u32 flags; /* Software defined flags */
-
- struct dmar_drhd_unit *drhd;
-};
-
-/* Per subdevice private data */
-struct subdev_domain_info {
- struct list_head link_phys; /* link to phys device siblings */
- struct list_head link_domain; /* link to domain siblings */
- struct device *pdev; /* physical device derived from */
- struct dmar_domain *domain; /* aux-domain */
- int users; /* user count */
-};
-
-/* PCI domain-device relationship */
-struct device_domain_info {
- struct list_head link; /* link to domain siblings */
- struct list_head global; /* link to global list */
- struct list_head table; /* link to pasid table */
- struct list_head subdevices; /* subdevices sibling */
- u32 segment; /* PCI segment number */
- u8 bus; /* PCI bus number */
- u8 devfn; /* PCI devfn number */
- u16 pfsid; /* SRIOV physical function source ID */
- u8 pasid_supported:3;
- u8 pasid_enabled:1;
- u8 pri_supported:1;
- u8 pri_enabled:1;
- u8 ats_supported:1;
- u8 ats_enabled:1;
- u8 auxd_enabled:1; /* Multiple domains per device */
- u8 ats_qdep;
- struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
- struct intel_iommu *iommu; /* IOMMU used by this device */
- struct dmar_domain *domain; /* pointer to domain */
- struct pasid_table *pasid_table; /* pasid table */
-};
-
-static inline void __iommu_flush_cache(
- struct intel_iommu *iommu, void *addr, int size)
-{
- if (!ecap_coherent(iommu->ecap))
- clflush_cache_range(addr, size);
-}
-
-/* Convert generic struct iommu_domain to private struct dmar_domain */
-static inline struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
-{
- return container_of(dom, struct dmar_domain, domain);
-}
-
-/*
- * 0: readable
- * 1: writable
- * 2-6: reserved
- * 7: super page
- * 8-10: available
- * 11: snoop behavior
- * 12-63: Host physical address
- */
-struct dma_pte {
- u64 val;
-};
-
-static inline void dma_clear_pte(struct dma_pte *pte)
-{
- pte->val = 0;
-}
-
-static inline u64 dma_pte_addr(struct dma_pte *pte)
-{
-#ifdef CONFIG_64BIT
- return pte->val & VTD_PAGE_MASK & (~DMA_FL_PTE_XD);
-#else
- /* Must have a full atomic 64-bit read */
- return __cmpxchg64(&pte->val, 0ULL, 0ULL) &
- VTD_PAGE_MASK & (~DMA_FL_PTE_XD);
-#endif
-}
-
-static inline bool dma_pte_present(struct dma_pte *pte)
-{
- return (pte->val & 3) != 0;
-}
-
-static inline bool dma_pte_superpage(struct dma_pte *pte)
-{
- return (pte->val & DMA_PTE_LARGE_PAGE);
-}
-
-static inline int first_pte_in_page(struct dma_pte *pte)
-{
- return !((unsigned long)pte & ~VTD_PAGE_MASK);
-}
-
-extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
-extern int dmar_find_matched_atsr_unit(struct pci_dev *dev);
-
-extern int dmar_enable_qi(struct intel_iommu *iommu);
-extern void dmar_disable_qi(struct intel_iommu *iommu);
-extern int dmar_reenable_qi(struct intel_iommu *iommu);
-extern void qi_global_iec(struct intel_iommu *iommu);
-
-extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
- u8 fm, u64 type);
-extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
- unsigned int size_order, u64 type);
-extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
- u16 qdep, u64 addr, unsigned mask);
-
-void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
- unsigned long npages, bool ih);
-
-void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
- u32 pasid, u16 qdep, u64 addr,
- unsigned int size_order);
-void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu,
- u32 pasid);
-
-int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
- unsigned int count, unsigned long options);
-/*
- * Options used in qi_submit_sync:
- * QI_OPT_WAIT_DRAIN - Wait for PRQ drain completion, spec 6.5.2.8.
- */
-#define QI_OPT_WAIT_DRAIN BIT(0)
-
-extern int dmar_ir_support(void);
-
-void *alloc_pgtable_page(int node);
-void free_pgtable_page(void *vaddr);
-struct intel_iommu *domain_get_iommu(struct dmar_domain *domain);
-int for_each_device_domain(int (*fn)(struct device_domain_info *info,
- void *data), void *data);
-void iommu_flush_write_buffer(struct intel_iommu *iommu);
-int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev);
-struct dmar_domain *find_domain(struct device *dev);
-struct device_domain_info *get_domain_info(struct device *dev);
-struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn);
-
-#ifdef CONFIG_INTEL_IOMMU_SVM
-extern void intel_svm_check(struct intel_iommu *iommu);
-extern int intel_svm_enable_prq(struct intel_iommu *iommu);
-extern int intel_svm_finish_prq(struct intel_iommu *iommu);
-int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
- struct iommu_gpasid_bind_data *data);
-int intel_svm_unbind_gpasid(struct device *dev, u32 pasid);
-struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm,
- void *drvdata);
-void intel_svm_unbind(struct iommu_sva *handle);
-u32 intel_svm_get_pasid(struct iommu_sva *handle);
-int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt,
- struct iommu_page_response *msg);
-
-struct intel_svm_dev {
- struct list_head list;
- struct rcu_head rcu;
- struct device *dev;
- struct intel_iommu *iommu;
- struct iommu_sva sva;
- u32 pasid;
- int users;
- u16 did;
- u16 dev_iotlb:1;
- u16 sid, qdep;
-};
-
-struct intel_svm {
- struct mmu_notifier notifier;
- struct mm_struct *mm;
-
- unsigned int flags;
- u32 pasid;
- int gpasid; /* In case that guest PASID is different from host PASID */
- struct list_head devs;
- struct list_head list;
-};
-#else
-static inline void intel_svm_check(struct intel_iommu *iommu) {}
-#endif
-
-#ifdef CONFIG_INTEL_IOMMU_DEBUGFS
-void intel_iommu_debugfs_init(void);
-#else
-static inline void intel_iommu_debugfs_init(void) {}
-#endif /* CONFIG_INTEL_IOMMU_DEBUGFS */
-
-extern const struct attribute_group *intel_iommu_groups[];
-bool context_present(struct context_entry *context);
-struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
- u8 devfn, int alloc);
-
-#ifdef CONFIG_INTEL_IOMMU
-extern int iommu_calculate_agaw(struct intel_iommu *iommu);
-extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
-extern int dmar_disabled;
-extern int intel_iommu_enabled;
-extern int intel_iommu_gfx_mapped;
-#else
-static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
-{
- return 0;
-}
-static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
-{
- return 0;
-}
-#define dmar_disabled (1)
-#define intel_iommu_enabled (0)
-#endif
-
-#endif
diff --git a/include/linux/intel-ish-client-if.h b/include/linux/intel-ish-client-if.h
index 0d6b4bc191c5..2cd4f65aaa37 100644
--- a/include/linux/intel-ish-client-if.h
+++ b/include/linux/intel-ish-client-if.h
@@ -8,11 +8,17 @@
#ifndef _INTEL_ISH_CLIENT_IF_H_
#define _INTEL_ISH_CLIENT_IF_H_
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
struct ishtp_cl_device;
struct ishtp_device;
struct ishtp_cl;
struct ishtp_fw_client;
+typedef __printf(2, 3) void (*ishtp_print_log)(struct ishtp_device *dev,
+ const char *format, ...);
+
/* Client state */
enum cl_state {
ISHTP_CL_INITIALIZING = 0,
@@ -34,9 +40,9 @@ enum cl_state {
struct ishtp_cl_driver {
struct device_driver driver;
const char *name;
- const guid_t *guid;
+ const struct ishtp_device_id *id;
int (*probe)(struct ishtp_cl_device *dev);
- int (*remove)(struct ishtp_cl_device *dev);
+ void (*remove)(struct ishtp_cl_device *dev);
int (*reset)(struct ishtp_cl_device *dev);
const struct dev_pm_ops *pm;
};
@@ -75,10 +81,14 @@ int ishtp_register_event_cb(struct ishtp_cl_device *device,
/* Get the device * from ishtp device instance */
struct device *ishtp_device(struct ishtp_cl_device *cl_device);
+/* wait for IPC resume */
+bool ishtp_wait_resume(struct ishtp_device *dev);
/* Trace interface for clients */
-void *ishtp_trace_callback(struct ishtp_cl_device *cl_device);
+ishtp_print_log ishtp_trace_callback(struct ishtp_cl_device *cl_device);
/* Get device pointer of PCI device for DMA acces */
struct device *ishtp_get_pci_device(struct ishtp_cl_device *cl_device);
+/* Get the ISHTP workqueue */
+struct workqueue_struct *ishtp_get_workqueue(struct ishtp_cl_device *cl_device);
struct ishtp_cl *ishtp_cl_allocate(struct ishtp_cl_device *cl_device);
void ishtp_cl_free(struct ishtp_cl *cl);
@@ -86,10 +96,12 @@ int ishtp_cl_link(struct ishtp_cl *cl);
void ishtp_cl_unlink(struct ishtp_cl *cl);
int ishtp_cl_disconnect(struct ishtp_cl *cl);
int ishtp_cl_connect(struct ishtp_cl *cl);
+int ishtp_cl_establish_connection(struct ishtp_cl *cl, const guid_t *uuid,
+ int tx_size, int rx_size, bool reset);
+void ishtp_cl_destroy_connection(struct ishtp_cl *cl, bool reset);
int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length);
int ishtp_cl_flush_queues(struct ishtp_cl *cl);
int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb);
-bool ishtp_cl_tx_empty(struct ishtp_cl *cl);
struct ishtp_cl_rb *ishtp_cl_rx_get_rb(struct ishtp_cl *cl);
void *ishtp_get_client_data(struct ishtp_cl *cl);
void ishtp_set_client_data(struct ishtp_cl *cl, void *data);
@@ -97,6 +109,7 @@ struct ishtp_device *ishtp_get_ishtp_device(struct ishtp_cl *cl);
void ishtp_set_tx_ring_size(struct ishtp_cl *cl, int size);
void ishtp_set_rx_ring_size(struct ishtp_cl *cl, int size);
void ishtp_set_connection_state(struct ishtp_cl *cl, int state);
+int ishtp_get_connection_state(struct ishtp_cl *cl);
void ishtp_cl_set_fw_client_id(struct ishtp_cl *cl, int fw_client_id);
void ishtp_put_device(struct ishtp_cl_device *cl_dev);
diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h
deleted file mode 100644
index 10fa80eef13a..000000000000
--- a/include/linux/intel-svm.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright © 2015 Intel Corporation.
- *
- * Authors: David Woodhouse <David.Woodhouse@intel.com>
- */
-
-#ifndef __INTEL_SVM_H__
-#define __INTEL_SVM_H__
-
-/* Values for rxwp in fault_cb callback */
-#define SVM_REQ_READ (1<<3)
-#define SVM_REQ_WRITE (1<<2)
-#define SVM_REQ_EXEC (1<<1)
-#define SVM_REQ_PRIV (1<<0)
-
-/*
- * The SVM_FLAG_SUPERVISOR_MODE flag requests a PASID which can be used only
- * for access to kernel addresses. No IOTLB flushes are automatically done
- * for kernel mappings; it is valid only for access to the kernel's static
- * 1:1 mapping of physical memory — not to vmalloc or even module mappings.
- * A future API addition may permit the use of such ranges, by means of an
- * explicit IOTLB flush call (akin to the DMA API's unmap method).
- *
- * It is unlikely that we will ever hook into flush_tlb_kernel_range() to
- * do such IOTLB flushes automatically.
- */
-#define SVM_FLAG_SUPERVISOR_MODE BIT(0)
-/*
- * The SVM_FLAG_GUEST_MODE flag is used when a PASID bind is for guest
- * processes. Compared to the host bind, the primary differences are:
- * 1. mm life cycle management
- * 2. fault reporting
- */
-#define SVM_FLAG_GUEST_MODE BIT(1)
-/*
- * The SVM_FLAG_GUEST_PASID flag is used when a guest has its own PASID space,
- * which requires guest and host PASID translation at both directions.
- */
-#define SVM_FLAG_GUEST_PASID BIT(2)
-
-#endif /* __INTEL_SVM_H__ */
diff --git a/include/linux/intel_dg_nvm_aux.h b/include/linux/intel_dg_nvm_aux.h
new file mode 100644
index 000000000000..625d46a6b96e
--- /dev/null
+++ b/include/linux/intel_dg_nvm_aux.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2019-2025, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_DG_NVM_AUX_H__
+#define __INTEL_DG_NVM_AUX_H__
+
+#include <linux/auxiliary_bus.h>
+#include <linux/container_of.h>
+#include <linux/ioport.h>
+#include <linux/types.h>
+
+#define INTEL_DG_NVM_REGIONS 13
+
+struct intel_dg_nvm_region {
+ const char *name;
+};
+
+struct intel_dg_nvm_dev {
+ struct auxiliary_device aux_dev;
+ bool writable_override;
+ bool non_posted_erase;
+ struct resource bar;
+ struct resource bar2;
+ const struct intel_dg_nvm_region *regions;
+};
+
+#define auxiliary_dev_to_intel_dg_nvm_dev(auxiliary_dev) \
+ container_of(auxiliary_dev, struct intel_dg_nvm_dev, aux_dev)
+
+#endif /* __INTEL_DG_NVM_AUX_H__ */
diff --git a/include/linux/intel_pmt_features.h b/include/linux/intel_pmt_features.h
new file mode 100644
index 000000000000..53573a4a49b7
--- /dev/null
+++ b/include/linux/intel_pmt_features.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _FEATURES_H
+#define _FEATURES_H
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+/* Common masks */
+#define PMT_CAP_TELEM BIT(0)
+#define PMT_CAP_WATCHER BIT(1)
+#define PMT_CAP_CRASHLOG BIT(2)
+#define PMT_CAP_STREAMING BIT(3)
+#define PMT_CAP_THRESHOLD BIT(4)
+#define PMT_CAP_WINDOW BIT(5)
+#define PMT_CAP_CONFIG BIT(6)
+#define PMT_CAP_TRACING BIT(7)
+#define PMT_CAP_INBAND BIT(8)
+#define PMT_CAP_OOB BIT(9)
+#define PMT_CAP_SECURED_CHAN BIT(10)
+
+#define PMT_CAP_PMT_SP BIT(11)
+#define PMT_CAP_PMT_SP_POLICY GENMASK(17, 12)
+
+/* Per Core Performance Telemetry (PCPT) specific masks */
+#define PMT_CAP_PCPT_CORE_PERF BIT(18)
+#define PMT_CAP_PCPT_CORE_C0_RES BIT(19)
+#define PMT_CAP_PCPT_CORE_ACTIVITY BIT(20)
+#define PMT_CAP_PCPT_CACHE_PERF BIT(21)
+#define PMT_CAP_PCPT_QUALITY_TELEM BIT(22)
+
+/* Per Core Environmental Telemetry (PCET) specific masks */
+#define PMT_CAP_PCET_WORKPOINT_HIST BIT(18)
+#define PMT_CAP_PCET_CORE_CURR_TEMP BIT(19)
+#define PMT_CAP_PCET_CORE_INST_RES BIT(20)
+#define PMT_CAP_PCET_QUALITY_TELEM BIT(21) /* Same as PMT_CAP_PCPT */
+#define PMT_CAP_PCET_CORE_CDYN_LVL BIT(22)
+#define PMT_CAP_PCET_CORE_STRESS_LVL BIT(23)
+#define PMT_CAP_PCET_CORE_DAS BIT(24)
+#define PMT_CAP_PCET_FIVR_HEALTH BIT(25)
+#define PMT_CAP_PCET_ENERGY BIT(26)
+#define PMT_CAP_PCET_PEM_STATUS BIT(27)
+#define PMT_CAP_PCET_CORE_C_STATE BIT(28)
+
+/* Per RMID Performance Telemetry specific masks */
+#define PMT_CAP_RMID_CORES_PERF BIT(18)
+#define PMT_CAP_RMID_CACHE_PERF BIT(19)
+#define PMT_CAP_RMID_PERF_QUAL BIT(20)
+
+/* Accelerator Telemetry specific masks */
+#define PMT_CAP_ACCEL_CPM_TELEM BIT(18)
+#define PMT_CAP_ACCEL_TIP_TELEM BIT(19)
+
+/* Uncore Telemetry specific masks */
+#define PMT_CAP_UNCORE_IO_CA_TELEM BIT(18)
+#define PMT_CAP_UNCORE_RMID_TELEM BIT(19)
+#define PMT_CAP_UNCORE_D2D_ULA_TELEM BIT(20)
+#define PMT_CAP_UNCORE_PKGC_TELEM BIT(21)
+
+/* Crash Log specific masks */
+#define PMT_CAP_CRASHLOG_MAN_TRIG BIT(11)
+#define PMT_CAP_CRASHLOG_CORE BIT(12)
+#define PMT_CAP_CRASHLOG_UNCORE BIT(13)
+#define PMT_CAP_CRASHLOG_TOR BIT(14)
+#define PMT_CAP_CRASHLOG_S3M BIT(15)
+#define PMT_CAP_CRASHLOG_PERSISTENCY BIT(16)
+#define PMT_CAP_CRASHLOG_CLIP_GPIO BIT(17)
+#define PMT_CAP_CRASHLOG_PRE_RESET BIT(18)
+#define PMT_CAP_CRASHLOG_POST_RESET BIT(19)
+
+/* PeTe Log specific masks */
+#define PMT_CAP_PETE_MAN_TRIG BIT(11)
+#define PMT_CAP_PETE_ENCRYPTION BIT(12)
+#define PMT_CAP_PETE_PERSISTENCY BIT(13)
+#define PMT_CAP_PETE_REQ_TOKENS BIT(14)
+#define PMT_CAP_PETE_PROD_ENABLED BIT(15)
+#define PMT_CAP_PETE_DEBUG_ENABLED BIT(16)
+
+/* TPMI control specific masks */
+#define PMT_CAP_TPMI_MAILBOX BIT(11)
+#define PMT_CAP_TPMI_LOCK BIT(12)
+
+/* Tracing specific masks */
+#define PMT_CAP_TRACE_SRAR BIT(11)
+#define PMT_CAP_TRACE_CORRECTABLE BIT(12)
+#define PMT_CAP_TRACE_MCTP BIT(13)
+#define PMT_CAP_TRACE_MRT BIT(14)
+
+/* Per RMID Energy Telemetry specific masks */
+#define PMT_CAP_RMID_ENERGY BIT(18)
+#define PMT_CAP_RMID_ACTIVITY BIT(19)
+#define PMT_CAP_RMID_ENERGY_QUAL BIT(20)
+
+enum pmt_feature_id {
+ FEATURE_INVALID = 0x0,
+ FEATURE_PER_CORE_PERF_TELEM = 0x1,
+ FEATURE_PER_CORE_ENV_TELEM = 0x2,
+ FEATURE_PER_RMID_PERF_TELEM = 0x3,
+ FEATURE_ACCEL_TELEM = 0x4,
+ FEATURE_UNCORE_TELEM = 0x5,
+ FEATURE_CRASH_LOG = 0x6,
+ FEATURE_PETE_LOG = 0x7,
+ FEATURE_TPMI_CTRL = 0x8,
+ FEATURE_RESERVED = 0x9,
+ FEATURE_TRACING = 0xA,
+ FEATURE_PER_RMID_ENERGY_TELEM = 0xB,
+ FEATURE_MAX = 0xB,
+};
+
+enum feature_layout {
+ LAYOUT_RMID,
+ LAYOUT_WATCHER,
+ LAYOUT_COMMAND,
+ LAYOUT_CAPS_ONLY,
+};
+
+struct pmt_cap {
+ u32 mask;
+ const char *name;
+};
+
+extern const char * const pmt_feature_names[];
+extern enum feature_layout feature_layout[];
+extern struct pmt_cap pmt_cap_common[];
+extern struct pmt_cap pmt_cap_pcpt[];
+extern struct pmt_cap *pmt_caps_pcpt[];
+extern struct pmt_cap pmt_cap_pcet[];
+extern struct pmt_cap *pmt_caps_pcet[];
+extern struct pmt_cap pmt_cap_rmid_perf[];
+extern struct pmt_cap *pmt_caps_rmid_perf[];
+extern struct pmt_cap pmt_cap_accel[];
+extern struct pmt_cap *pmt_caps_accel[];
+extern struct pmt_cap pmt_cap_uncore[];
+extern struct pmt_cap *pmt_caps_uncore[];
+extern struct pmt_cap pmt_cap_crashlog[];
+extern struct pmt_cap *pmt_caps_crashlog[];
+extern struct pmt_cap pmt_cap_pete[];
+extern struct pmt_cap *pmt_caps_pete[];
+extern struct pmt_cap pmt_cap_tpmi[];
+extern struct pmt_cap *pmt_caps_tpmi[];
+extern struct pmt_cap pmt_cap_s3m[];
+extern struct pmt_cap *pmt_caps_s3m[];
+extern struct pmt_cap pmt_cap_tracing[];
+extern struct pmt_cap *pmt_caps_tracing[];
+extern struct pmt_cap pmt_cap_rmid_energy[];
+extern struct pmt_cap *pmt_caps_rmid_energy[];
+
+static inline bool pmt_feature_id_is_valid(enum pmt_feature_id id)
+{
+ if (id > FEATURE_MAX)
+ return false;
+
+ if (id == FEATURE_INVALID || id == FEATURE_RESERVED)
+ return false;
+
+ return true;
+}
+#endif
diff --git a/include/linux/intel_rapl.h b/include/linux/intel_rapl.h
index 93780834fc8f..e9ade2ff4af6 100644
--- a/include/linux/intel_rapl.h
+++ b/include/linux/intel_rapl.h
@@ -14,6 +14,12 @@
#include <linux/powercap.h>
#include <linux/cpuhotplug.h>
+enum rapl_if_type {
+ RAPL_IF_MSR, /* RAPL I/F using MSR registers */
+ RAPL_IF_MMIO, /* RAPL I/F using MMIO registers */
+ RAPL_IF_TPMI, /* RAPL I/F using TPMI registers */
+};
+
enum rapl_domain_type {
RAPL_DOMAIN_PACKAGE, /* entire package/socket */
RAPL_DOMAIN_PP0, /* core power plane */
@@ -30,17 +36,23 @@ enum rapl_domain_reg_id {
RAPL_DOMAIN_REG_POLICY,
RAPL_DOMAIN_REG_INFO,
RAPL_DOMAIN_REG_PL4,
+ RAPL_DOMAIN_REG_UNIT,
+ RAPL_DOMAIN_REG_PL2,
RAPL_DOMAIN_REG_MAX,
};
struct rapl_domain;
enum rapl_primitives {
- ENERGY_COUNTER,
POWER_LIMIT1,
POWER_LIMIT2,
POWER_LIMIT4,
+ ENERGY_COUNTER,
FW_LOCK,
+ FW_HIGH_LOCK,
+ PL1_LOCK,
+ PL2_LOCK,
+ PL4_LOCK,
PL1_ENABLE, /* power limit 1, aka long term */
PL1_CLAMP, /* allow frequency to go below OS request */
@@ -58,6 +70,12 @@ enum rapl_primitives {
THROTTLED_TIME,
PRIORITY_LEVEL,
+ PSYS_POWER_LIMIT1,
+ PSYS_POWER_LIMIT2,
+ PSYS_PL1_ENABLE,
+ PSYS_PL2_ENABLE,
+ PSYS_TIME_WINDOW1,
+ PSYS_TIME_WINDOW2,
/* below are not raw primitive data */
AVERAGE_POWER,
NR_RAPL_PRIMITIVES,
@@ -68,12 +86,13 @@ struct rapl_domain_data {
unsigned long timestamp;
};
-#define NR_POWER_LIMITS (3)
+#define NR_POWER_LIMITS (POWER_LIMIT4 + 1)
+
struct rapl_power_limit {
struct powercap_zone_constraint *constraint;
- int prim_id; /* primitive ID used to enable */
struct rapl_domain *domain;
const char *name;
+ bool locked;
u64 last_power_limit;
};
@@ -81,21 +100,29 @@ struct rapl_package;
#define RAPL_DOMAIN_NAME_LENGTH 16
+union rapl_reg {
+ void __iomem *mmio;
+ u32 msr;
+ u64 val;
+};
+
struct rapl_domain {
char name[RAPL_DOMAIN_NAME_LENGTH];
enum rapl_domain_type id;
- u64 regs[RAPL_DOMAIN_REG_MAX];
+ union rapl_reg regs[RAPL_DOMAIN_REG_MAX];
struct powercap_zone power_zone;
struct rapl_domain_data rdd;
struct rapl_power_limit rpl[NR_POWER_LIMITS];
u64 attr_map; /* track capabilities */
unsigned int state;
- unsigned int domain_energy_unit;
+ unsigned int power_unit;
+ unsigned int energy_unit;
+ unsigned int time_unit;
struct rapl_package *rp;
};
struct reg_action {
- u64 reg;
+ union rapl_reg reg;
u64 mask;
u64 value;
int err;
@@ -115,18 +142,42 @@ struct reg_action {
* registers.
* @write_raw: Callback for writing RAPL interface specific
* registers.
+ * @defaults: internal pointer to interface default settings
+ * @rpi: internal pointer to interface primitive info
*/
struct rapl_if_priv {
+ enum rapl_if_type type;
struct powercap_control_type *control_type;
- struct rapl_domain *platform_rapl_domain;
enum cpuhp_state pcap_rapl_online;
- u64 reg_unit;
- u64 regs[RAPL_DOMAIN_MAX][RAPL_DOMAIN_REG_MAX];
+ union rapl_reg reg_unit;
+ union rapl_reg regs[RAPL_DOMAIN_MAX][RAPL_DOMAIN_REG_MAX];
int limits[RAPL_DOMAIN_MAX];
- int (*read_raw)(int cpu, struct reg_action *ra);
- int (*write_raw)(int cpu, struct reg_action *ra);
+ int (*read_raw)(int id, struct reg_action *ra, bool atomic);
+ int (*write_raw)(int id, struct reg_action *ra);
+ void *defaults;
+ void *rpi;
};
+#ifdef CONFIG_PERF_EVENTS
+/**
+ * struct rapl_package_pmu_data: Per package data for PMU support
+ * @scale: Scale of 2^-32 Joules for each energy counter increase.
+ * @lock: Lock to protect n_active and active_list.
+ * @n_active: Number of active events.
+ * @active_list: List of active events.
+ * @timer_interval: Maximum timer expiration time before counter overflow.
+ * @hrtimer: Periodically update the counter to prevent overflow.
+ */
+struct rapl_package_pmu_data {
+ u64 scale[RAPL_DOMAIN_MAX];
+ raw_spinlock_t lock;
+ int n_active;
+ struct list_head active_list;
+ ktime_t timer_interval;
+ struct hrtimer hrtimer;
+};
+#endif
+
/* maximum rapl package domain name: package-%d-die-%d */
#define PACKAGE_DOMAIN_NAME_LENGTH 30
@@ -134,9 +185,6 @@ struct rapl_package {
unsigned int id; /* logical die id, equals physical 1-die systems */
unsigned int nr_domains;
unsigned long domain_map; /* bit map of active domains */
- unsigned int power_unit;
- unsigned int energy_unit;
- unsigned int time_unit;
struct rapl_domain *domains; /* array of domains, sized at runtime */
struct powercap_zone *power_zone; /* keep track of parent zone */
unsigned long power_limit_irq; /* keep track of package power limit
@@ -148,10 +196,28 @@ struct rapl_package {
struct cpumask cpumask;
char name[PACKAGE_DOMAIN_NAME_LENGTH];
struct rapl_if_priv *priv;
+#ifdef CONFIG_PERF_EVENTS
+ bool has_pmu;
+ struct rapl_package_pmu_data pmu_data;
+#endif
};
-struct rapl_package *rapl_find_package_domain(int cpu, struct rapl_if_priv *priv);
-struct rapl_package *rapl_add_package(int cpu, struct rapl_if_priv *priv);
+struct rapl_package *rapl_find_package_domain_cpuslocked(int id, struct rapl_if_priv *priv,
+ bool id_is_cpu);
+struct rapl_package *rapl_add_package_cpuslocked(int id, struct rapl_if_priv *priv,
+ bool id_is_cpu);
+void rapl_remove_package_cpuslocked(struct rapl_package *rp);
+
+struct rapl_package *rapl_find_package_domain(int id, struct rapl_if_priv *priv, bool id_is_cpu);
+struct rapl_package *rapl_add_package(int id, struct rapl_if_priv *priv, bool id_is_cpu);
void rapl_remove_package(struct rapl_package *rp);
+#ifdef CONFIG_PERF_EVENTS
+int rapl_package_add_pmu(struct rapl_package *rp);
+void rapl_package_remove_pmu(struct rapl_package *rp);
+#else
+static inline int rapl_package_add_pmu(struct rapl_package *rp) { return 0; }
+static inline void rapl_package_remove_pmu(struct rapl_package *rp) { }
+#endif
+
#endif /* __INTEL_RAPL_H__ */
diff --git a/include/linux/intel_tcc.h b/include/linux/intel_tcc.h
new file mode 100644
index 000000000000..fa788817acfc
--- /dev/null
+++ b/include/linux/intel_tcc.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * header for Intel TCC (thermal control circuitry) library
+ *
+ * Copyright (C) 2022 Intel Corporation.
+ */
+
+#ifndef __INTEL_TCC_H__
+#define __INTEL_TCC_H__
+
+#include <linux/types.h>
+
+int intel_tcc_get_tjmax(int cpu);
+int intel_tcc_get_offset(int cpu);
+int intel_tcc_set_offset(int cpu, int offset);
+int intel_tcc_get_temp(int cpu, int *temp, bool pkg);
+u32 intel_tcc_get_offset_mask(void);
+
+#endif /* __INTEL_TCC_H__ */
diff --git a/include/linux/intel_tpmi.h b/include/linux/intel_tpmi.h
new file mode 100644
index 000000000000..94c06bf214fb
--- /dev/null
+++ b/include/linux/intel_tpmi.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * intel_tpmi.h: Intel TPMI core external interface
+ */
+
+#ifndef _INTEL_TPMI_H_
+#define _INTEL_TPMI_H_
+
+#include <linux/bitfield.h>
+
+struct oobmsm_plat_info;
+
+#define TPMI_VERSION_INVALID 0xff
+#define TPMI_MINOR_VERSION(val) FIELD_GET(GENMASK(4, 0), val)
+#define TPMI_MAJOR_VERSION(val) FIELD_GET(GENMASK(7, 5), val)
+
+/*
+ * List of supported TMPI IDs.
+ * Some TMPI IDs are not used by Linux, so the numbers are not consecutive.
+ */
+enum intel_tpmi_id {
+ TPMI_ID_RAPL = 0, /* Running Average Power Limit */
+ TPMI_ID_PEM = 1, /* Power and Perf excursion Monitor */
+ TPMI_ID_UNCORE = 2, /* Uncore Frequency Scaling */
+ TPMI_ID_SST = 5, /* Speed Select Technology */
+ TPMI_ID_PLR = 0xc, /* Performance Limit Reasons */
+ TPMI_CONTROL_ID = 0x80, /* Special ID for getting feature status */
+ TPMI_INFO_ID = 0x81, /* Special ID for PCI BDF and Package ID information */
+};
+
+struct oobmsm_plat_info *tpmi_get_platform_data(struct auxiliary_device *auxdev);
+struct resource *tpmi_get_resource_at_index(struct auxiliary_device *auxdev, int index);
+int tpmi_get_resource_count(struct auxiliary_device *auxdev);
+int tpmi_get_feature_status(struct auxiliary_device *auxdev, int feature_id, bool *read_blocked,
+ bool *write_blocked);
+struct dentry *tpmi_get_debugfs_dir(struct auxiliary_device *auxdev);
+#endif
diff --git a/include/linux/intel_vsec.h b/include/linux/intel_vsec.h
new file mode 100644
index 000000000000..53f6fe88e369
--- /dev/null
+++ b/include/linux/intel_vsec.h
@@ -0,0 +1,239 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _INTEL_VSEC_H
+#define _INTEL_VSEC_H
+
+#include <linux/auxiliary_bus.h>
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/intel_pmt_features.h>
+
+/*
+ * VSEC_CAP_UNUSED is reserved. It exists to prevent zero initialized
+ * intel_vsec devices from being automatically set to a known
+ * capability with ID 0
+ */
+#define VSEC_CAP_UNUSED BIT(0)
+#define VSEC_CAP_TELEMETRY BIT(1)
+#define VSEC_CAP_WATCHER BIT(2)
+#define VSEC_CAP_CRASHLOG BIT(3)
+#define VSEC_CAP_SDSI BIT(4)
+#define VSEC_CAP_TPMI BIT(5)
+#define VSEC_CAP_DISCOVERY BIT(6)
+#define VSEC_FEATURE_COUNT 7
+
+/* Intel DVSEC offsets */
+#define INTEL_DVSEC_ENTRIES 0xA
+#define INTEL_DVSEC_SIZE 0xB
+#define INTEL_DVSEC_TABLE 0xC
+#define INTEL_DVSEC_TABLE_BAR(x) ((x) & GENMASK(2, 0))
+#define INTEL_DVSEC_TABLE_OFFSET(x) ((x) & GENMASK(31, 3))
+#define TABLE_OFFSET_SHIFT 3
+
+struct pci_dev;
+struct resource;
+
+enum intel_vsec_id {
+ VSEC_ID_TELEMETRY = 2,
+ VSEC_ID_WATCHER = 3,
+ VSEC_ID_CRASHLOG = 4,
+ VSEC_ID_DISCOVERY = 12,
+ VSEC_ID_SDSI = 65,
+ VSEC_ID_TPMI = 66,
+};
+
+/**
+ * struct intel_vsec_header - Common fields of Intel VSEC and DVSEC registers.
+ * @rev: Revision ID of the VSEC/DVSEC register space
+ * @length: Length of the VSEC/DVSEC register space
+ * @id: ID of the feature
+ * @num_entries: Number of instances of the feature
+ * @entry_size: Size of the discovery table for each feature
+ * @tbir: BAR containing the discovery tables
+ * @offset: BAR offset of start of the first discovery table
+ */
+struct intel_vsec_header {
+ u8 rev;
+ u16 length;
+ u16 id;
+ u8 num_entries;
+ u8 entry_size;
+ u8 tbir;
+ u32 offset;
+};
+
+enum intel_vsec_quirks {
+ /* Watcher feature not supported */
+ VSEC_QUIRK_NO_WATCHER = BIT(0),
+
+ /* Crashlog feature not supported */
+ VSEC_QUIRK_NO_CRASHLOG = BIT(1),
+
+ /* Use shift instead of mask to read discovery table offset */
+ VSEC_QUIRK_TABLE_SHIFT = BIT(2),
+
+ /* DVSEC not present (provided in driver data) */
+ VSEC_QUIRK_NO_DVSEC = BIT(3),
+
+ /* Platforms requiring quirk in the auxiliary driver */
+ VSEC_QUIRK_EARLY_HW = BIT(4),
+};
+
+/**
+ * struct pmt_callbacks - Callback infrastructure for PMT devices
+ * ->read_telem() when specified, called by client driver to access PMT data (instead
+ * of direct copy).
+ * @pdev: PCI device reference for the callback's use
+ * @guid: ID of data to acccss
+ * @data: buffer for the data to be copied
+ * @off: offset into the requested buffer
+ * @count: size of buffer
+ */
+struct pmt_callbacks {
+ int (*read_telem)(struct pci_dev *pdev, u32 guid, u64 *data, loff_t off, u32 count);
+};
+
+struct vsec_feature_dependency {
+ unsigned long feature;
+ unsigned long supplier_bitmap;
+};
+
+/**
+ * struct intel_vsec_platform_info - Platform specific data
+ * @parent: parent device in the auxbus chain
+ * @headers: list of headers to define the PMT client devices to create
+ * @deps: array of feature dependencies
+ * @priv_data: private data, usable by parent devices, currently a callback
+ * @caps: bitmask of PMT capabilities for the given headers
+ * @quirks: bitmask of VSEC device quirks
+ * @base_addr: allow a base address to be specified (rather than derived)
+ * @num_deps: Count feature dependencies
+ */
+struct intel_vsec_platform_info {
+ struct device *parent;
+ struct intel_vsec_header **headers;
+ const struct vsec_feature_dependency *deps;
+ void *priv_data;
+ unsigned long caps;
+ unsigned long quirks;
+ u64 base_addr;
+ int num_deps;
+};
+
+/**
+ * struct intel_sec_device - Auxbus specific device information
+ * @auxdev: auxbus device struct for auxbus access
+ * @pcidev: pci device associated with the device
+ * @resource: any resources shared by the parent
+ * @ida: id reference
+ * @num_resources: number of resources
+ * @id: xarray id
+ * @priv_data: any private data needed
+ * @quirks: specified quirks
+ * @base_addr: base address of entries (if specified)
+ * @cap_id: the enumerated id of the vsec feature
+ */
+struct intel_vsec_device {
+ struct auxiliary_device auxdev;
+ struct pci_dev *pcidev;
+ struct resource *resource;
+ struct ida *ida;
+ int num_resources;
+ int id; /* xa */
+ void *priv_data;
+ size_t priv_data_size;
+ unsigned long quirks;
+ u64 base_addr;
+ unsigned long cap_id;
+};
+
+/**
+ * struct oobmsm_plat_info - Platform information for a device instance
+ * @cdie_mask: Mask of all compute dies in the partition
+ * @package_id: CPU Package id
+ * @partition: Package partition id when multiple VSEC PCI devices per package
+ * @segment: PCI segment ID
+ * @bus_number: PCI bus number
+ * @device_number: PCI device number
+ * @function_number: PCI function number
+ *
+ * Structure to store platform data for a OOBMSM device instance.
+ */
+struct oobmsm_plat_info {
+ u16 cdie_mask;
+ u8 package_id;
+ u8 partition;
+ u8 segment;
+ u8 bus_number;
+ u8 device_number;
+ u8 function_number;
+};
+
+struct telemetry_region {
+ struct oobmsm_plat_info plat_info;
+ void __iomem *addr;
+ size_t size;
+ u32 guid;
+ u32 num_rmids;
+};
+
+struct pmt_feature_group {
+ enum pmt_feature_id id;
+ int count;
+ struct kref kref;
+ struct telemetry_region regions[];
+};
+
+int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent,
+ struct intel_vsec_device *intel_vsec_dev,
+ const char *name);
+
+static inline struct intel_vsec_device *dev_to_ivdev(struct device *dev)
+{
+ return container_of(dev, struct intel_vsec_device, auxdev.dev);
+}
+
+static inline struct intel_vsec_device *auxdev_to_ivdev(struct auxiliary_device *auxdev)
+{
+ return container_of(auxdev, struct intel_vsec_device, auxdev);
+}
+
+#if IS_ENABLED(CONFIG_INTEL_VSEC)
+int intel_vsec_register(struct pci_dev *pdev,
+ struct intel_vsec_platform_info *info);
+int intel_vsec_set_mapping(struct oobmsm_plat_info *plat_info,
+ struct intel_vsec_device *vsec_dev);
+struct oobmsm_plat_info *intel_vsec_get_mapping(struct pci_dev *pdev);
+#else
+static inline int intel_vsec_register(struct pci_dev *pdev,
+ struct intel_vsec_platform_info *info)
+{
+ return -ENODEV;
+}
+static inline int intel_vsec_set_mapping(struct oobmsm_plat_info *plat_info,
+ struct intel_vsec_device *vsec_dev)
+{
+ return -ENODEV;
+}
+static inline struct oobmsm_plat_info *intel_vsec_get_mapping(struct pci_dev *pdev)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif
+
+#if IS_ENABLED(CONFIG_INTEL_PMT_TELEMETRY)
+struct pmt_feature_group *
+intel_pmt_get_regions_by_feature(enum pmt_feature_id id);
+
+void intel_pmt_put_feature_group(struct pmt_feature_group *feature_group);
+#else
+static inline struct pmt_feature_group *
+intel_pmt_get_regions_by_feature(enum pmt_feature_id id)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void
+intel_pmt_put_feature_group(struct pmt_feature_group *feature_group) {}
+#endif
+
+#endif
diff --git a/include/linux/interconnect-clk.h b/include/linux/interconnect-clk.h
new file mode 100644
index 000000000000..9bcee3e9c56c
--- /dev/null
+++ b/include/linux/interconnect-clk.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023, Linaro Ltd.
+ */
+
+#ifndef __LINUX_INTERCONNECT_CLK_H
+#define __LINUX_INTERCONNECT_CLK_H
+
+struct device;
+
+struct icc_clk_data {
+ struct clk *clk;
+ const char *name;
+ unsigned int master_id;
+ unsigned int slave_id;
+};
+
+struct icc_provider *icc_clk_register(struct device *dev,
+ unsigned int first_id,
+ unsigned int num_clocks,
+ const struct icc_clk_data *data);
+int devm_icc_clk_register(struct device *dev, unsigned int first_id,
+ unsigned int num_clocks, const struct icc_clk_data *data);
+void icc_clk_unregister(struct icc_provider *provider);
+
+#endif
diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h
index 6bd01f7159c6..8a2f652a05ff 100644
--- a/include/linux/interconnect-provider.h
+++ b/include/linux/interconnect-provider.h
@@ -33,10 +33,10 @@ struct icc_node_data {
*/
struct icc_onecell_data {
unsigned int num_nodes;
- struct icc_node *nodes[];
+ struct icc_node *nodes[] __counted_by(num_nodes);
};
-struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec,
+struct icc_node *of_icc_xlate_onecell(const struct of_phandle_args *spec,
void *data);
/**
@@ -65,8 +65,9 @@ struct icc_provider {
u32 peak_bw, u32 *agg_avg, u32 *agg_peak);
void (*pre_aggregate)(struct icc_node *node);
int (*get_bw)(struct icc_node *node, u32 *avg, u32 *peak);
- struct icc_node* (*xlate)(struct of_phandle_args *spec, void *data);
- struct icc_node_data* (*xlate_extended)(struct of_phandle_args *spec, void *data);
+ struct icc_node* (*xlate)(const struct of_phandle_args *spec, void *data);
+ struct icc_node_data* (*xlate_extended)(const struct of_phandle_args *spec,
+ void *data);
struct device *dev;
int users;
bool inter_set;
@@ -115,16 +116,19 @@ struct icc_node {
int icc_std_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
u32 peak_bw, u32 *agg_avg, u32 *agg_peak);
+struct icc_node *icc_node_create_dyn(void);
struct icc_node *icc_node_create(int id);
void icc_node_destroy(int id);
+int icc_node_set_name(struct icc_node *node, const struct icc_provider *provider, const char *name);
+int icc_link_nodes(struct icc_node *src_node, struct icc_node **dst_node);
int icc_link_create(struct icc_node *node, const int dst_id);
-int icc_link_destroy(struct icc_node *src, struct icc_node *dst);
void icc_node_add(struct icc_node *node, struct icc_provider *provider);
void icc_node_del(struct icc_node *node);
int icc_nodes_remove(struct icc_provider *provider);
-int icc_provider_add(struct icc_provider *provider);
-int icc_provider_del(struct icc_provider *provider);
-struct icc_node_data *of_icc_get_from_provider(struct of_phandle_args *spec);
+void icc_provider_init(struct icc_provider *provider);
+int icc_provider_register(struct icc_provider *provider);
+void icc_provider_deregister(struct icc_provider *provider);
+struct icc_node_data *of_icc_get_from_provider(const struct of_phandle_args *spec);
void icc_sync_state(struct device *dev);
#else
@@ -135,6 +139,11 @@ static inline int icc_std_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
return -ENOTSUPP;
}
+static inline struct icc_node *icc_node_create_dyn(void)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
static inline struct icc_node *icc_node_create(int id)
{
return ERR_PTR(-ENOTSUPP);
@@ -144,12 +153,18 @@ static inline void icc_node_destroy(int id)
{
}
-static inline int icc_link_create(struct icc_node *node, const int dst_id)
+static inline int icc_node_set_name(struct icc_node *node, const struct icc_provider *provider,
+ const char *name)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
-static inline int icc_link_destroy(struct icc_node *src, struct icc_node *dst)
+static inline int icc_link_nodes(struct icc_node *src_node, struct icc_node **dst_node)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int icc_link_create(struct icc_node *node, const int dst_id)
{
return -ENOTSUPP;
}
@@ -167,17 +182,16 @@ static inline int icc_nodes_remove(struct icc_provider *provider)
return -ENOTSUPP;
}
-static inline int icc_provider_add(struct icc_provider *provider)
-{
- return -ENOTSUPP;
-}
+static inline void icc_provider_init(struct icc_provider *provider) { }
-static inline int icc_provider_del(struct icc_provider *provider)
+static inline int icc_provider_register(struct icc_provider *provider)
{
return -ENOTSUPP;
}
-static inline struct icc_node_data *of_icc_get_from_provider(struct of_phandle_args *spec)
+static inline void icc_provider_deregister(struct icc_provider *provider) { }
+
+static inline struct icc_node_data *of_icc_get_from_provider(const struct of_phandle_args *spec)
{
return ERR_PTR(-ENOTSUPP);
}
diff --git a/include/linux/interconnect.h b/include/linux/interconnect.h
index f2dd2fc8d3cd..4b12821528a6 100644
--- a/include/linux/interconnect.h
+++ b/include/linux/interconnect.h
@@ -16,10 +16,13 @@
#define MBps_to_icc(x) ((x) * 1000)
#define GBps_to_icc(x) ((x) * 1000 * 1000)
#define bps_to_icc(x) (1)
-#define kbps_to_icc(x) ((x) / 8 + ((x) % 8 ? 1 : 0))
+#define kbps_to_icc(x) (((x) + 7) / 8)
#define Mbps_to_icc(x) ((x) * 1000 / 8)
#define Gbps_to_icc(x) ((x) * 1000 * 1000 / 8)
+/* macro to indicate dynamic id allocation */
+#define ICC_ALLOC_DYN_ID -1
+
struct icc_path;
struct device;
@@ -38,19 +41,11 @@ struct icc_bulk_data {
u32 peak_bw;
};
-int __must_check of_icc_bulk_get(struct device *dev, int num_paths,
- struct icc_bulk_data *paths);
-void icc_bulk_put(int num_paths, struct icc_bulk_data *paths);
-int icc_bulk_set_bw(int num_paths, const struct icc_bulk_data *paths);
-int icc_bulk_enable(int num_paths, const struct icc_bulk_data *paths);
-void icc_bulk_disable(int num_paths, const struct icc_bulk_data *paths);
-
#if IS_ENABLED(CONFIG_INTERCONNECT)
-struct icc_path *icc_get(struct device *dev, const int src_id,
- const int dst_id);
struct icc_path *of_icc_get(struct device *dev, const char *name);
struct icc_path *devm_of_icc_get(struct device *dev, const char *name);
+int devm_of_icc_bulk_get(struct device *dev, int num_paths, struct icc_bulk_data *paths);
struct icc_path *of_icc_get_by_index(struct device *dev, int idx);
void icc_put(struct icc_path *path);
int icc_enable(struct icc_path *path);
@@ -58,15 +53,15 @@ int icc_disable(struct icc_path *path);
int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw);
void icc_set_tag(struct icc_path *path, u32 tag);
const char *icc_get_name(struct icc_path *path);
+int __must_check of_icc_bulk_get(struct device *dev, int num_paths,
+ struct icc_bulk_data *paths);
+void icc_bulk_put(int num_paths, struct icc_bulk_data *paths);
+int icc_bulk_set_bw(int num_paths, const struct icc_bulk_data *paths);
+int icc_bulk_enable(int num_paths, const struct icc_bulk_data *paths);
+void icc_bulk_disable(int num_paths, const struct icc_bulk_data *paths);
#else
-static inline struct icc_path *icc_get(struct device *dev, const int src_id,
- const int dst_id)
-{
- return NULL;
-}
-
static inline struct icc_path *of_icc_get(struct device *dev,
const char *name)
{
@@ -112,6 +107,35 @@ static inline const char *icc_get_name(struct icc_path *path)
return NULL;
}
+static inline int of_icc_bulk_get(struct device *dev, int num_paths, struct icc_bulk_data *paths)
+{
+ return 0;
+}
+
+static inline int devm_of_icc_bulk_get(struct device *dev, int num_paths,
+ struct icc_bulk_data *paths)
+{
+ return 0;
+}
+
+static inline void icc_bulk_put(int num_paths, struct icc_bulk_data *paths)
+{
+}
+
+static inline int icc_bulk_set_bw(int num_paths, const struct icc_bulk_data *paths)
+{
+ return 0;
+}
+
+static inline int icc_bulk_enable(int num_paths, const struct icc_bulk_data *paths)
+{
+ return 0;
+}
+
+static inline void icc_bulk_disable(int num_paths, const struct icc_bulk_data *paths)
+{
+}
+
#endif /* CONFIG_INTERCONNECT */
#endif /* __LINUX_INTERCONNECT_H */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 4777850a6dc7..266f2b39213a 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -5,14 +5,16 @@
#include <linux/kernel.h>
#include <linux/bitops.h>
-#include <linux/cpumask.h>
+#include <linux/cleanup.h>
#include <linux/irqreturn.h>
#include <linux/irqnr.h>
#include <linux/hardirq.h>
#include <linux/irqflags.h>
#include <linux/hrtimer.h>
#include <linux/kref.h>
+#include <linux/cpumask_types.h>
#include <linux/workqueue.h>
+#include <linux/jump_label.h>
#include <linux/atomic.h>
#include <asm/ptrace.h>
@@ -64,6 +66,10 @@
* IRQF_NO_AUTOEN - Don't enable IRQ or NMI automatically when users request it.
* Users will enable it explicitly by enable_irq() or enable_nmi()
* later.
+ * IRQF_NO_DEBUG - Exclude from runnaway detection for IPI and similar handlers,
+ * depends on IRQF_PERCPU.
+ * IRQF_COND_ONESHOT - Agree to do IRQF_ONESHOT if already set for a shared
+ * interrupt.
*/
#define IRQF_SHARED 0x00000080
#define IRQF_PROBE_SHARED 0x00000100
@@ -78,6 +84,8 @@
#define IRQF_EARLY_RESUME 0x00020000
#define IRQF_COND_SUSPEND 0x00040000
#define IRQF_NO_AUTOEN 0x00080000
+#define IRQF_NO_DEBUG 0x00100000
+#define IRQF_COND_ONESHOT 0x00200000
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
@@ -101,6 +109,7 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
* @name: name of the device
* @dev_id: cookie to identify the device
* @percpu_dev_id: cookie to identify the device
+ * @affinity: CPUs this irqaction is allowed to run on
* @next: pointer to the next irqaction for shared interrupts
* @irq: interrupt number
* @flags: flags (see IRQF_* above)
@@ -113,8 +122,11 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
*/
struct irqaction {
irq_handler_t handler;
- void *dev_id;
- void __percpu *percpu_dev_id;
+ union {
+ void *dev_id;
+ void __percpu *percpu_dev_id;
+ };
+ const struct cpumask *affinity;
struct irqaction *next;
irq_handler_t thread_fn;
struct task_struct *thread;
@@ -132,7 +144,7 @@ extern irqreturn_t no_action(int cpl, void *dev_id);
/*
* If a (PCI) device interrupt is not connected we set dev->irq to
* IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we
- * can distingiush that case from other error returns.
+ * can distinguish that case from other error returns.
*
* 0x80000000 is guaranteed to be outside the available range of interrupts
* and easy to distinguish from other possible incorrect values.
@@ -161,7 +173,7 @@ static inline int __must_check
request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
const char *name, void *dev)
{
- return request_threaded_irq(irq, handler, NULL, flags, name, dev);
+ return request_threaded_irq(irq, handler, NULL, flags | IRQF_COND_ONESHOT, name, dev);
}
extern int __must_check
@@ -171,7 +183,7 @@ request_any_context_irq(unsigned int irq, irq_handler_t handler,
extern int __must_check
__request_percpu_irq(unsigned int irq, irq_handler_t handler,
unsigned long flags, const char *devname,
- void __percpu *percpu_dev_id);
+ const cpumask_t *affinity, void __percpu *percpu_dev_id);
extern int __must_check
request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags,
@@ -182,12 +194,21 @@ request_percpu_irq(unsigned int irq, irq_handler_t handler,
const char *devname, void __percpu *percpu_dev_id)
{
return __request_percpu_irq(irq, handler, 0,
- devname, percpu_dev_id);
+ devname, NULL, percpu_dev_id);
+}
+
+static inline int __must_check
+request_percpu_irq_affinity(unsigned int irq, irq_handler_t handler,
+ const char *devname, const cpumask_t *affinity,
+ void __percpu *percpu_dev_id)
+{
+ return __request_percpu_irq(irq, handler, 0,
+ devname, affinity, percpu_dev_id);
}
extern int __must_check
-request_percpu_nmi(unsigned int irq, irq_handler_t handler,
- const char *devname, void __percpu *dev);
+request_percpu_nmi(unsigned int irq, irq_handler_t handler, const char *name,
+ const struct cpumask *affinity, void __percpu *dev_id);
extern const void *free_irq(unsigned int, void *);
extern void free_percpu_irq(unsigned int, void __percpu *);
@@ -218,24 +239,6 @@ devm_request_any_context_irq(struct device *dev, unsigned int irq,
extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
-/*
- * On lockdep we dont want to enable hardirqs in hardirq
- * context. Use local_irq_enable_in_hardirq() to annotate
- * kernel code that has to do this nevertheless (pretty much
- * the only valid case is for old/broken hardware that is
- * insanely slow).
- *
- * NOTE: in theory this might break fragile code that relies
- * on hardirq delivery - in practice we dont seem to have such
- * places left. So the only effect should be slightly increased
- * irqs-off latencies.
- */
-#ifdef CONFIG_LOCKDEP
-# define local_irq_enable_in_hardirq() do { } while (0)
-#else
-# define local_irq_enable_in_hardirq() local_irq_enable()
-#endif
-
bool irq_has_action(unsigned int irq);
extern void disable_irq_nosync(unsigned int irq);
extern bool disable_hardirq(unsigned int irq);
@@ -246,6 +249,9 @@ extern void enable_percpu_irq(unsigned int irq, unsigned int type);
extern bool irq_percpu_is_enabled(unsigned int irq);
extern void irq_wake_thread(unsigned int irq, void *dev_id);
+DEFINE_LOCK_GUARD_1(disable_irq, int,
+ disable_irq(*_T->lock), enable_irq(*_T->lock))
+
extern void disable_nmi_nosync(unsigned int irq);
extern void disable_percpu_nmi(unsigned int irq);
extern void enable_nmi(unsigned int irq);
@@ -283,7 +289,7 @@ struct irq_affinity_notify {
#define IRQ_AFFINITY_MAX_SETS 4
/**
- * struct irq_affinity - Description for automatic irq affinity assignements
+ * struct irq_affinity - Description for automatic irq affinity assignments
* @pre_vectors: Don't apply affinity to @pre_vectors at beginning of
* the MSI(-X) vector space
* @post_vectors: Don't apply affinity to @post_vectors at end of
@@ -319,44 +325,52 @@ struct irq_affinity_desc {
extern cpumask_var_t irq_default_affinity;
-/* Internal implementation. Use the helpers below */
-extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
- bool force);
+extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
+extern int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask);
+
+extern int irq_can_set_affinity(unsigned int irq);
+extern int irq_select_affinity(unsigned int irq);
+
+extern int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
+ bool setaffinity);
/**
- * irq_set_affinity - Set the irq affinity of a given irq
- * @irq: Interrupt to set affinity
- * @cpumask: cpumask
+ * irq_update_affinity_hint - Update the affinity hint
+ * @irq: Interrupt to update
+ * @m: cpumask pointer (NULL to clear the hint)
*
- * Fails if cpumask does not contain an online CPU
+ * Updates the affinity hint, but does not change the affinity of the interrupt.
*/
static inline int
-irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
+irq_update_affinity_hint(unsigned int irq, const struct cpumask *m)
{
- return __irq_set_affinity(irq, cpumask, false);
+ return __irq_apply_affinity_hint(irq, m, false);
}
/**
- * irq_force_affinity - Force the irq affinity of a given irq
- * @irq: Interrupt to set affinity
- * @cpumask: cpumask
+ * irq_set_affinity_and_hint - Update the affinity hint and apply the provided
+ * cpumask to the interrupt
+ * @irq: Interrupt to update
+ * @m: cpumask pointer (NULL to clear the hint)
*
- * Same as irq_set_affinity, but without checking the mask against
- * online cpus.
- *
- * Solely for low level cpu hotplug code, where we need to make per
- * cpu interrupts affine before the cpu becomes online.
+ * Updates the affinity hint and if @m is not NULL it applies it as the
+ * affinity of that interrupt.
*/
static inline int
-irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
+irq_set_affinity_and_hint(unsigned int irq, const struct cpumask *m)
{
- return __irq_set_affinity(irq, cpumask, true);
+ return __irq_apply_affinity_hint(irq, m, true);
}
-extern int irq_can_set_affinity(unsigned int irq);
-extern int irq_select_affinity(unsigned int irq);
+/*
+ * Deprecated. Use irq_update_affinity_hint() or irq_set_affinity_and_hint()
+ * instead.
+ */
+static inline int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
+{
+ return irq_set_affinity_and_hint(irq, m);
+}
-extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
extern int irq_update_affinity_desc(unsigned int irq,
struct irq_affinity_desc *affinity);
@@ -388,6 +402,18 @@ static inline int irq_can_set_affinity(unsigned int irq)
static inline int irq_select_affinity(unsigned int irq) { return 0; }
+static inline int irq_update_affinity_hint(unsigned int irq,
+ const struct cpumask *m)
+{
+ return -EINVAL;
+}
+
+static inline int irq_set_affinity_and_hint(unsigned int irq,
+ const struct cpumask *m)
+{
+ return -EINVAL;
+}
+
static inline int irq_set_affinity_hint(unsigned int irq,
const struct cpumask *m)
{
@@ -435,7 +461,7 @@ irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
static inline void disable_irq_nosync_lockdep(unsigned int irq)
{
disable_irq_nosync(irq);
-#ifdef CONFIG_LOCKDEP
+#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT)
local_irq_disable();
#endif
}
@@ -443,22 +469,14 @@ static inline void disable_irq_nosync_lockdep(unsigned int irq)
static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
{
disable_irq_nosync(irq);
-#ifdef CONFIG_LOCKDEP
+#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT)
local_irq_save(*flags);
#endif
}
-static inline void disable_irq_lockdep(unsigned int irq)
-{
- disable_irq(irq);
-#ifdef CONFIG_LOCKDEP
- local_irq_disable();
-#endif
-}
-
static inline void enable_irq_lockdep(unsigned int irq)
{
-#ifdef CONFIG_LOCKDEP
+#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT)
local_irq_enable();
#endif
enable_irq(irq);
@@ -466,7 +484,7 @@ static inline void enable_irq_lockdep(unsigned int irq)
static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
{
-#ifdef CONFIG_LOCKDEP
+#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT)
local_irq_restore(*flags);
#endif
enable_irq(irq);
@@ -502,12 +520,13 @@ extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
#ifdef CONFIG_IRQ_FORCED_THREADING
# ifdef CONFIG_PREEMPT_RT
-# define force_irqthreads (true)
+# define force_irqthreads() (true)
# else
-extern bool force_irqthreads;
+DECLARE_STATIC_KEY_FALSE(force_irqthreads_key);
+# define force_irqthreads() (static_branch_unlikely(&force_irqthreads_key))
# endif
#else
-#define force_irqthreads (0)
+#define force_irqthreads() (false)
#endif
#ifndef local_softirq_pending
@@ -554,7 +573,20 @@ enum
NR_SOFTIRQS
};
-#define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
+/*
+ * The following vectors can be safely ignored after ksoftirqd is parked:
+ *
+ * _ RCU:
+ * 1) rcutree_migrate_callbacks() migrates the queue.
+ * 2) rcutree_report_cpu_dead() reports the final quiescent states.
+ *
+ * _ IRQ_POLL: irq_poll_cpu_dead() migrates the queue
+ *
+ * _ (HR)TIMER_SOFTIRQ: (hr)timers_dead_cpu() migrates the queue
+ */
+#define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(TIMER_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ) |\
+ BIT(HRTIMER_SOFTIRQ) | BIT(RCU_SOFTIRQ))
+
/* map softirq index to softirq name. update 'softirq_to_name' in
* kernel/softirq.c when adding a new softirq.
@@ -567,19 +599,75 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
struct softirq_action
{
- void (*action)(struct softirq_action *);
+ void (*action)(void);
};
asmlinkage void do_softirq(void);
asmlinkage void __do_softirq(void);
-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
+#ifdef CONFIG_PREEMPT_RT
+extern void do_softirq_post_smp_call_flush(unsigned int was_pending);
+#else
+static inline void do_softirq_post_smp_call_flush(unsigned int unused)
+{
+ do_softirq();
+}
+#endif
+
+extern void open_softirq(int nr, void (*action)(void));
extern void softirq_init(void);
extern void __raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
+/*
+ * With forced-threaded interrupts enabled a raised softirq is deferred to
+ * ksoftirqd unless it can be handled within the threaded interrupt. This
+ * affects timer_list timers and hrtimers which are explicitly marked with
+ * HRTIMER_MODE_SOFT.
+ * With PREEMPT_RT enabled more hrtimers are moved to softirq for processing
+ * which includes all timers which are not explicitly marked HRTIMER_MODE_HARD.
+ * Userspace controlled timers (like the clock_nanosleep() interface) is divided
+ * into two categories: Tasks with elevated scheduling policy including
+ * SCHED_{FIFO|RR|DL} and the remaining scheduling policy. The tasks with the
+ * elevated scheduling policy are woken up directly from the HARDIRQ while all
+ * other wake ups are delayed to softirq and so to ksoftirqd.
+ *
+ * The ksoftirqd runs at SCHED_OTHER policy at which it should remain since it
+ * handles the softirq in an overloaded situation (not handled everything
+ * within its last run).
+ * If the timers are handled at SCHED_OTHER priority then they competes with all
+ * other SCHED_OTHER tasks for CPU resources are possibly delayed.
+ * Moving timers softirqs to a low priority SCHED_FIFO thread instead ensures
+ * that timer are performed before scheduling any SCHED_OTHER thread.
+ */
+DECLARE_PER_CPU(struct task_struct *, ktimerd);
+DECLARE_PER_CPU(unsigned long, pending_timer_softirq);
+void raise_ktimers_thread(unsigned int nr);
+
+static inline unsigned int local_timers_pending_force_th(void)
+{
+ return __this_cpu_read(pending_timer_softirq);
+}
+
+static inline void raise_timer_softirq(unsigned int nr)
+{
+ lockdep_assert_in_irq();
+ if (force_irqthreads())
+ raise_ktimers_thread(nr);
+ else
+ __raise_softirq_irqoff(nr);
+}
+
+static inline unsigned int local_timers_pending(void)
+{
+ if (force_irqthreads())
+ return local_timers_pending_force_th();
+ else
+ return local_softirq_pending();
+}
+
DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
static inline struct task_struct *this_cpu_ksoftirqd(void)
diff --git a/include/linux/interval_tree.h b/include/linux/interval_tree.h
index 288c26f50732..9d5791e9f737 100644
--- a/include/linux/interval_tree.h
+++ b/include/linux/interval_tree.h
@@ -20,6 +20,10 @@ interval_tree_remove(struct interval_tree_node *node,
struct rb_root_cached *root);
extern struct interval_tree_node *
+interval_tree_subtree_search(struct interval_tree_node *node,
+ unsigned long start, unsigned long last);
+
+extern struct interval_tree_node *
interval_tree_iter_first(struct rb_root_cached *root,
unsigned long start, unsigned long last);
@@ -27,4 +31,62 @@ extern struct interval_tree_node *
interval_tree_iter_next(struct interval_tree_node *node,
unsigned long start, unsigned long last);
+/**
+ * struct interval_tree_span_iter - Find used and unused spans.
+ * @start_hole: Start of an interval for a hole when is_hole == 1
+ * @last_hole: Inclusive end of an interval for a hole when is_hole == 1
+ * @start_used: Start of a used interval when is_hole == 0
+ * @last_used: Inclusive end of a used interval when is_hole == 0
+ * @is_hole: 0 == used, 1 == is_hole, -1 == done iteration
+ *
+ * This iterator travels over spans in an interval tree. It does not return
+ * nodes but classifies each span as either a hole, where no nodes intersect, or
+ * a used, which is fully covered by nodes. Each iteration step toggles between
+ * hole and used until the entire range is covered. The returned spans always
+ * fully cover the requested range.
+ *
+ * The iterator is greedy, it always returns the largest hole or used possible,
+ * consolidating all consecutive nodes.
+ *
+ * Use interval_tree_span_iter_done() to detect end of iteration.
+ */
+struct interval_tree_span_iter {
+ /* private: not for use by the caller */
+ struct interval_tree_node *nodes[2];
+ unsigned long first_index;
+ unsigned long last_index;
+
+ /* public: */
+ union {
+ unsigned long start_hole;
+ unsigned long start_used;
+ };
+ union {
+ unsigned long last_hole;
+ unsigned long last_used;
+ };
+ int is_hole;
+};
+
+void interval_tree_span_iter_first(struct interval_tree_span_iter *state,
+ struct rb_root_cached *itree,
+ unsigned long first_index,
+ unsigned long last_index);
+void interval_tree_span_iter_advance(struct interval_tree_span_iter *iter,
+ struct rb_root_cached *itree,
+ unsigned long new_index);
+void interval_tree_span_iter_next(struct interval_tree_span_iter *state);
+
+static inline bool
+interval_tree_span_iter_done(struct interval_tree_span_iter *state)
+{
+ return state->is_hole == -1;
+}
+
+#define interval_tree_for_each_span(span, itree, first_index, last_index) \
+ for (interval_tree_span_iter_first(span, itree, \
+ first_index, last_index); \
+ !interval_tree_span_iter_done(span); \
+ interval_tree_span_iter_next(span))
+
#endif /* _LINUX_INTERVAL_TREE_H */
diff --git a/include/linux/interval_tree_generic.h b/include/linux/interval_tree_generic.h
index aaa8a0767aa3..c5a2fed49eb0 100644
--- a/include/linux/interval_tree_generic.h
+++ b/include/linux/interval_tree_generic.h
@@ -77,7 +77,7 @@ ITSTATIC void ITPREFIX ## _remove(ITSTRUCT *node, \
* Cond2: start <= ITLAST(node) \
*/ \
\
-static ITSTRUCT * \
+ITSTATIC ITSTRUCT * \
ITPREFIX ## _subtree_search(ITSTRUCT *node, ITTYPE start, ITTYPE last) \
{ \
while (true) { \
@@ -104,12 +104,8 @@ ITPREFIX ## _subtree_search(ITSTRUCT *node, ITTYPE start, ITTYPE last) \
if (ITSTART(node) <= last) { /* Cond1 */ \
if (start <= ITLAST(node)) /* Cond2 */ \
return node; /* node is leftmost match */ \
- if (node->ITRB.rb_right) { \
- node = rb_entry(node->ITRB.rb_right, \
- ITSTRUCT, ITRB); \
- if (start <= node->ITSUBTREE) \
- continue; \
- } \
+ node = rb_entry(node->ITRB.rb_right, ITSTRUCT, ITRB); \
+ continue; \
} \
return NULL; /* No match */ \
} \
diff --git a/include/linux/io-64-nonatomic-hi-lo.h b/include/linux/io-64-nonatomic-hi-lo.h
index f32522bb3aa5..d3eade7cf663 100644
--- a/include/linux/io-64-nonatomic-hi-lo.h
+++ b/include/linux/io-64-nonatomic-hi-lo.h
@@ -101,22 +101,38 @@ static inline void iowrite64be_hi_lo(u64 val, void __iomem *addr)
#ifndef ioread64
#define ioread64_is_nonatomic
+#if defined(CONFIG_GENERIC_IOMAP) && defined(CONFIG_64BIT)
+#define ioread64 __ioread64_hi_lo
+#else
#define ioread64 ioread64_hi_lo
#endif
+#endif
#ifndef iowrite64
#define iowrite64_is_nonatomic
+#if defined(CONFIG_GENERIC_IOMAP) && defined(CONFIG_64BIT)
+#define iowrite64 __iowrite64_hi_lo
+#else
#define iowrite64 iowrite64_hi_lo
#endif
+#endif
#ifndef ioread64be
#define ioread64be_is_nonatomic
+#if defined(CONFIG_GENERIC_IOMAP) && defined(CONFIG_64BIT)
+#define ioread64be __ioread64be_hi_lo
+#else
#define ioread64be ioread64be_hi_lo
#endif
+#endif
#ifndef iowrite64be
#define iowrite64be_is_nonatomic
+#if defined(CONFIG_GENERIC_IOMAP) && defined(CONFIG_64BIT)
+#define iowrite64be __iowrite64be_hi_lo
+#else
#define iowrite64be iowrite64be_hi_lo
#endif
+#endif
#endif /* _LINUX_IO_64_NONATOMIC_HI_LO_H_ */
diff --git a/include/linux/io-64-nonatomic-lo-hi.h b/include/linux/io-64-nonatomic-lo-hi.h
index 448a21435dba..94e676ec3d3f 100644
--- a/include/linux/io-64-nonatomic-lo-hi.h
+++ b/include/linux/io-64-nonatomic-lo-hi.h
@@ -101,22 +101,38 @@ static inline void iowrite64be_lo_hi(u64 val, void __iomem *addr)
#ifndef ioread64
#define ioread64_is_nonatomic
+#if defined(CONFIG_GENERIC_IOMAP) && defined(CONFIG_64BIT)
+#define ioread64 __ioread64_lo_hi
+#else
#define ioread64 ioread64_lo_hi
#endif
+#endif
#ifndef iowrite64
#define iowrite64_is_nonatomic
+#if defined(CONFIG_GENERIC_IOMAP) && defined(CONFIG_64BIT)
+#define iowrite64 __iowrite64_lo_hi
+#else
#define iowrite64 iowrite64_lo_hi
#endif
+#endif
#ifndef ioread64be
#define ioread64be_is_nonatomic
+#if defined(CONFIG_GENERIC_IOMAP) && defined(CONFIG_64BIT)
+#define ioread64be __ioread64be_lo_hi
+#else
#define ioread64be ioread64be_lo_hi
#endif
+#endif
#ifndef iowrite64be
#define iowrite64be_is_nonatomic
+#if defined(CONFIG_GENERIC_IOMAP) && defined(CONFIG_64BIT)
+#define iowrite64be __iowrite64be_lo_hi
+#else
#define iowrite64be iowrite64be_lo_hi
#endif
+#endif
#endif /* _LINUX_IO_64_NONATOMIC_LO_HI_H_ */
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index e9743cfd8585..c16353cc6e3c 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -69,7 +69,10 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
BUG_ON(offset >= mapping->size);
phys_addr = mapping->base + offset;
- preempt_disable();
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_disable();
+ else
+ migrate_disable();
pagefault_disable();
return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
}
@@ -79,7 +82,10 @@ io_mapping_unmap_atomic(void __iomem *vaddr)
{
kunmap_local_indexed((void __force *)vaddr);
pagefault_enable();
- preempt_enable();
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
+ else
+ migrate_enable();
}
static inline void __iomem *
@@ -132,13 +138,7 @@ io_mapping_init_wc(struct io_mapping *iomap,
iomap->base = base;
iomap->size = size;
-#if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */
- iomap->prot = pgprot_noncached_wc(PAGE_KERNEL);
-#elif defined(pgprot_writecombine)
iomap->prot = pgprot_writecombine(PAGE_KERNEL);
-#else
- iomap->prot = pgprot_noncached(PAGE_KERNEL);
-#endif
return iomap;
}
@@ -168,7 +168,10 @@ static inline void __iomem *
io_mapping_map_atomic_wc(struct io_mapping *mapping,
unsigned long offset)
{
- preempt_disable();
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_disable();
+ else
+ migrate_disable();
pagefault_disable();
return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
}
@@ -178,7 +181,10 @@ io_mapping_unmap_atomic(void __iomem *vaddr)
{
io_mapping_unmap(vaddr);
pagefault_enable();
- preempt_enable();
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
+ else
+ migrate_enable();
}
static inline void __iomem *
@@ -220,6 +226,3 @@ io_mapping_free(struct io_mapping *iomap)
}
#endif /* _LINUX_IO_MAPPING_H */
-
-int io_mapping_map_user(struct io_mapping *iomap, struct vm_area_struct *vma,
- unsigned long addr, unsigned long pfn, unsigned long size);
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index 4d40dfa75b55..7a1516011ccf 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -15,7 +15,8 @@ enum io_pgtable_fmt {
ARM_64_LPAE_S2,
ARM_V7S,
ARM_MALI_LPAE,
- AMD_IOMMU_V1,
+ APPLE_DART,
+ APPLE_DART2,
IO_PGTABLE_NUM_FMTS,
};
@@ -73,22 +74,35 @@ struct io_pgtable_cfg {
* to support up to 35 bits PA where the bit32, bit33 and bit34 are
* encoded in the bit9, bit4 and bit5 of the PTE respectively.
*
- * IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs
- * on unmap, for DMA domains using the flush queue mechanism for
- * delayed invalidation.
+ * IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT: (ARM v7s format) MediaTek IOMMUs
+ * extend the translation table base support up to 35 bits PA, the
+ * encoding format is same with IO_PGTABLE_QUIRK_ARM_MTK_EXT.
*
* IO_PGTABLE_QUIRK_ARM_TTBR1: (ARM LPAE format) Configure the table
* for use in the upper half of a split address space.
*
* IO_PGTABLE_QUIRK_ARM_OUTER_WBWA: Override the outer-cacheability
* attributes set in the TCR for a non-coherent page-table walker.
+ *
+ * IO_PGTABLE_QUIRK_ARM_HD: Enables dirty tracking in stage 1 pagetable.
+ * IO_PGTABLE_QUIRK_ARM_S2FWB: Use the FWB format for the MemAttrs bits
+ *
+ * IO_PGTABLE_QUIRK_NO_WARN: Do not WARN_ON() on conflicting
+ * mappings, but silently return -EEXISTS. Normally an attempt
+ * to map over an existing mapping would indicate some sort of
+ * kernel bug, which would justify the WARN_ON(). But for GPU
+ * drivers, this could be under control of userspace. Which
+ * deserves an error return, but not to spam dmesg.
*/
- #define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
- #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
- #define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3)
- #define IO_PGTABLE_QUIRK_NON_STRICT BIT(4)
- #define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5)
- #define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6)
+ #define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
+ #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
+ #define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3)
+ #define IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT BIT(4)
+ #define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5)
+ #define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6)
+ #define IO_PGTABLE_QUIRK_ARM_HD BIT(7)
+ #define IO_PGTABLE_QUIRK_ARM_S2FWB BIT(8)
+ #define IO_PGTABLE_QUIRK_NO_WARN BIT(9)
unsigned long quirks;
unsigned long pgsize_bitmap;
unsigned int ias;
@@ -97,6 +111,30 @@ struct io_pgtable_cfg {
const struct iommu_flush_ops *tlb;
struct device *iommu_dev;
+ /**
+ * @alloc: Custom page allocator.
+ *
+ * Optional hook used to allocate page tables. If this function is NULL,
+ * @free must be NULL too.
+ *
+ * Memory returned should be zeroed and suitable for dma_map_single() and
+ * virt_to_phys().
+ *
+ * Not all formats support custom page allocators. Before considering
+ * passing a non-NULL value, make sure the chosen page format supports
+ * this feature.
+ */
+ void *(*alloc)(void *cookie, size_t size, gfp_t gfp);
+
+ /**
+ * @free: Custom page de-allocator.
+ *
+ * Optional hook used to free page tables allocated with the @alloc
+ * hook. Must be non-NULL if @alloc is not NULL, must be NULL
+ * otherwise.
+ */
+ void (*free)(void *cookie, void *pages, size_t size);
+
/* Low-level data specific to the table format */
union {
struct {
@@ -136,26 +174,53 @@ struct io_pgtable_cfg {
u64 transtab;
u64 memattr;
} arm_mali_lpae_cfg;
+
+ struct {
+ u64 ttbr[4];
+ u32 n_ttbrs;
+ u32 n_levels;
+ } apple_dart_cfg;
+
+ struct {
+ int nid;
+ } amd;
};
};
/**
+ * struct arm_lpae_io_pgtable_walk_data - information from a pgtable walk
+ *
+ * @ptes: The recorded PTE values from the walk
+ */
+struct arm_lpae_io_pgtable_walk_data {
+ u64 ptes[4];
+};
+
+/**
* struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
*
- * @map: Map a physically contiguous memory region.
- * @unmap: Unmap a physically contiguous memory region.
+ * @map_pages: Map a physically contiguous range of pages of the same size.
+ * @unmap_pages: Unmap a range of virtually contiguous pages of the same size.
* @iova_to_phys: Translate iova to physical address.
+ * @pgtable_walk: (optional) Perform a page table walk for a given iova.
*
* These functions map directly onto the iommu_ops member functions with
* the same names.
*/
struct io_pgtable_ops {
- int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
- size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
- size_t size, struct iommu_iotlb_gather *gather);
+ int (*map_pages)(struct io_pgtable_ops *ops, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped);
+ size_t (*unmap_pages)(struct io_pgtable_ops *ops, unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather);
phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
unsigned long iova);
+ int (*pgtable_walk)(struct io_pgtable_ops *ops, unsigned long iova, void *wd);
+ int (*read_and_clear_dirty)(struct io_pgtable_ops *ops,
+ unsigned long iova, size_t size,
+ unsigned long flags,
+ struct iommu_dirty_bitmap *dirty);
};
/**
@@ -228,15 +293,25 @@ io_pgtable_tlb_add_page(struct io_pgtable *iop,
}
/**
+ * enum io_pgtable_caps - IO page table backend capabilities.
+ */
+enum io_pgtable_caps {
+ /** @IO_PGTABLE_CAP_CUSTOM_ALLOCATOR: Backend accepts custom page table allocators. */
+ IO_PGTABLE_CAP_CUSTOM_ALLOCATOR = BIT(0),
+};
+
+/**
* struct io_pgtable_init_fns - Alloc/free a set of page tables for a
* particular format.
*
* @alloc: Allocate a set of page tables described by cfg.
* @free: Free the page tables associated with iop.
+ * @caps: Combination of @io_pgtable_caps flags encoding the backend capabilities.
*/
struct io_pgtable_init_fns {
struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie);
void (*free)(struct io_pgtable *iop);
+ u32 caps;
};
extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
@@ -246,5 +321,7 @@ extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns;
extern struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_amd_iommu_v2_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_apple_dart_init_fns;
#endif /* __IO_PGTABLE_H */
diff --git a/include/linux/io.h b/include/linux/io.h
index 9595151d800d..0642c7ee41db 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -6,29 +6,40 @@
#ifndef _LINUX_IO_H
#define _LINUX_IO_H
+#include <linux/sizes.h>
#include <linux/types.h>
#include <linux/init.h>
-#include <linux/bug.h>
-#include <linux/err.h>
#include <asm/io.h>
#include <asm/page.h>
struct device;
-struct resource;
-__visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
+#ifndef __iowrite32_copy
+void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
+#endif
+
void __ioread32_copy(void *to, const void __iomem *from, size_t count);
+
+#ifndef __iowrite64_copy
void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
+#endif
#ifdef CONFIG_MMU
int ioremap_page_range(unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot);
+int vmap_page_range(unsigned long addr, unsigned long end,
+ phys_addr_t phys_addr, pgprot_t prot);
#else
static inline int ioremap_page_range(unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot)
{
return 0;
}
+static inline int vmap_page_range(unsigned long addr, unsigned long end,
+ phys_addr_t phys_addr, pgprot_t prot)
+{
+ return 0;
+}
#endif
/*
@@ -51,16 +62,12 @@ static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr)
}
#endif
-#define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err)
-
void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
resource_size_t size);
void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset,
resource_size_t size);
void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
resource_size_t size);
-void __iomem *devm_ioremap_np(struct device *dev, resource_size_t offset,
- resource_size_t size);
void devm_iounmap(struct device *dev, void __iomem *addr);
int check_signature(const volatile void __iomem *io_addr,
const unsigned char *signature, int length);
@@ -70,6 +77,11 @@ void *devm_memremap(struct device *dev, resource_size_t offset,
size_t size, unsigned long flags);
void devm_memunmap(struct device *dev, void *addr);
+/* architectures can override this */
+pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
+ unsigned long size, pgprot_t prot);
+
+
#ifdef CONFIG_PCI
/*
* The PCI specifications (Rev 3.0, 3.2.5 "Transaction Ordering and
@@ -132,6 +144,8 @@ static inline int arch_phys_wc_index(int handle)
#endif
#endif
+int devm_arch_phys_wc_add(struct device *dev, unsigned long base, unsigned long size);
+
enum {
/* See memremap() kernel-doc for usage description... */
MEMREMAP_WB = 1 << 0,
@@ -166,4 +180,28 @@ static inline void arch_io_free_memtype_wc(resource_size_t base,
}
#endif
+int devm_arch_io_reserve_memtype_wc(struct device *dev, resource_size_t start,
+ resource_size_t size);
+
+#ifdef CONFIG_STRICT_DEVMEM
+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+{
+ u64 from = ((u64)pfn) << PAGE_SHIFT;
+ u64 to = from + size;
+ u64 cursor = from;
+
+ while (cursor < to) {
+ if (!devmem_is_allowed(pfn))
+ return 0;
+ cursor += PAGE_SIZE;
+ pfn++;
+ }
+ return 1;
+}
+#else
+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+{
+ return 1;
+}
+#endif
#endif /* _LINUX_IO_H */
diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
index 04b650bcbbe5..85fe4e6b275c 100644
--- a/include/linux/io_uring.h
+++ b/include/linux/io_uring.h
@@ -4,20 +4,24 @@
#include <linux/sched.h>
#include <linux/xarray.h>
+#include <uapi/linux/io_uring.h>
#if defined(CONFIG_IO_URING)
-struct sock *io_uring_get_socket(struct file *file);
-void __io_uring_cancel(struct files_struct *files);
+void __io_uring_cancel(bool cancel_all);
void __io_uring_free(struct task_struct *tsk);
+void io_uring_unreg_ringfd(void);
+const char *io_uring_get_opcode(u8 opcode);
+bool io_is_uring_fops(struct file *file);
-static inline void io_uring_files_cancel(struct files_struct *files)
+static inline void io_uring_files_cancel(void)
{
if (current->io_uring)
- __io_uring_cancel(files);
+ __io_uring_cancel(false);
}
static inline void io_uring_task_cancel(void)
{
- return io_uring_files_cancel(NULL);
+ if (current->io_uring)
+ __io_uring_cancel(true);
}
static inline void io_uring_free(struct task_struct *tsk)
{
@@ -25,19 +29,23 @@ static inline void io_uring_free(struct task_struct *tsk)
__io_uring_free(tsk);
}
#else
-static inline struct sock *io_uring_get_socket(struct file *file)
-{
- return NULL;
-}
static inline void io_uring_task_cancel(void)
{
}
-static inline void io_uring_files_cancel(struct files_struct *files)
+static inline void io_uring_files_cancel(void)
{
}
static inline void io_uring_free(struct task_struct *tsk)
{
}
+static inline const char *io_uring_get_opcode(u8 opcode)
+{
+ return "";
+}
+static inline bool io_is_uring_fops(struct file *file)
+{
+ return false;
+}
#endif
#endif
diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h
new file mode 100644
index 000000000000..375fd048c4cb
--- /dev/null
+++ b/include/linux/io_uring/cmd.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_IO_URING_CMD_H
+#define _LINUX_IO_URING_CMD_H
+
+#include <uapi/linux/io_uring.h>
+#include <linux/io_uring_types.h>
+#include <linux/blk-mq.h>
+
+/* only top 8 bits of sqe->uring_cmd_flags for kernel internal use */
+#define IORING_URING_CMD_CANCELABLE (1U << 30)
+/* io_uring_cmd is being issued again */
+#define IORING_URING_CMD_REISSUE (1U << 31)
+
+struct io_uring_cmd {
+ struct file *file;
+ const struct io_uring_sqe *sqe;
+ u32 cmd_op;
+ u32 flags;
+ u8 pdu[32]; /* available inline for free use */
+ u8 unused[8];
+};
+
+static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)
+{
+ return sqe->cmd;
+}
+
+static inline void io_uring_cmd_private_sz_check(size_t cmd_sz)
+{
+ BUILD_BUG_ON(cmd_sz > sizeof_field(struct io_uring_cmd, pdu));
+}
+#define io_uring_cmd_to_pdu(cmd, pdu_type) ( \
+ io_uring_cmd_private_sz_check(sizeof(pdu_type)), \
+ ((pdu_type *)&(cmd)->pdu) \
+)
+
+#if defined(CONFIG_IO_URING)
+int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
+ struct iov_iter *iter,
+ struct io_uring_cmd *ioucmd,
+ unsigned int issue_flags);
+int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd,
+ const struct iovec __user *uvec,
+ size_t uvec_segs,
+ int ddir, struct iov_iter *iter,
+ unsigned issue_flags);
+
+/*
+ * Completes the request, i.e. posts an io_uring CQE and deallocates @ioucmd
+ * and the corresponding io_uring request.
+ *
+ * Note: the caller should never hard code @issue_flags and is only allowed
+ * to pass the mask provided by the core io_uring code.
+ */
+void __io_uring_cmd_done(struct io_uring_cmd *cmd, s32 ret, u64 res2,
+ unsigned issue_flags, bool is_cqe32);
+
+void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
+ io_req_tw_func_t task_work_cb,
+ unsigned flags);
+
+/*
+ * Note: the caller should never hard code @issue_flags and only use the
+ * mask provided by the core io_uring code.
+ */
+void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
+ unsigned int issue_flags);
+
+/* Execute the request from a blocking context */
+void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd);
+
+/*
+ * Select a buffer from the provided buffer group for multishot uring_cmd.
+ * Returns the selected buffer address and size.
+ */
+struct io_br_sel io_uring_cmd_buffer_select(struct io_uring_cmd *ioucmd,
+ unsigned buf_group, size_t *len,
+ unsigned int issue_flags);
+
+/*
+ * Complete a multishot uring_cmd event. This will post a CQE to the completion
+ * queue and update the provided buffer.
+ */
+bool io_uring_mshot_cmd_post_cqe(struct io_uring_cmd *ioucmd,
+ struct io_br_sel *sel, unsigned int issue_flags);
+
+#else
+static inline int
+io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
+ struct iov_iter *iter, struct io_uring_cmd *ioucmd,
+ unsigned int issue_flags)
+{
+ return -EOPNOTSUPP;
+}
+static inline int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd,
+ const struct iovec __user *uvec,
+ size_t uvec_segs,
+ int ddir, struct iov_iter *iter,
+ unsigned issue_flags)
+{
+ return -EOPNOTSUPP;
+}
+static inline void __io_uring_cmd_done(struct io_uring_cmd *cmd, s32 ret,
+ u64 ret2, unsigned issue_flags, bool is_cqe32)
+{
+}
+static inline void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
+ io_req_tw_func_t task_work_cb, unsigned flags)
+{
+}
+static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+}
+static inline void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd)
+{
+}
+static inline struct io_br_sel
+io_uring_cmd_buffer_select(struct io_uring_cmd *ioucmd, unsigned buf_group,
+ size_t *len, unsigned int issue_flags)
+{
+ return (struct io_br_sel) { .val = -EOPNOTSUPP };
+}
+static inline bool io_uring_mshot_cmd_post_cqe(struct io_uring_cmd *ioucmd,
+ struct io_br_sel *sel, unsigned int issue_flags)
+{
+ return true;
+}
+#endif
+
+static inline struct io_uring_cmd *io_uring_cmd_from_tw(struct io_tw_req tw_req)
+{
+ return io_kiocb_to_cmd(tw_req.req, struct io_uring_cmd);
+}
+
+/* task_work executor checks the deferred list completion */
+#define IO_URING_CMD_TASK_WORK_ISSUE_FLAGS IO_URING_F_COMPLETE_DEFER
+
+/* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */
+static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd,
+ io_req_tw_func_t task_work_cb)
+{
+ __io_uring_cmd_do_in_task(ioucmd, task_work_cb, IOU_F_TWQ_LAZY_WAKE);
+}
+
+static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
+ io_req_tw_func_t task_work_cb)
+{
+ __io_uring_cmd_do_in_task(ioucmd, task_work_cb, 0);
+}
+
+static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd)
+{
+ return cmd_to_io_kiocb(cmd)->tctx->task;
+}
+
+/*
+ * Return uring_cmd's context reference as its context handle for driver to
+ * track per-context resource, such as registered kernel IO buffer
+ */
+static inline void *io_uring_cmd_ctx_handle(struct io_uring_cmd *cmd)
+{
+ return cmd_to_io_kiocb(cmd)->ctx;
+}
+
+static inline void io_uring_cmd_done(struct io_uring_cmd *ioucmd, s32 ret,
+ unsigned issue_flags)
+{
+ return __io_uring_cmd_done(ioucmd, ret, 0, issue_flags, false);
+}
+
+static inline void io_uring_cmd_done32(struct io_uring_cmd *ioucmd, s32 ret,
+ u64 res2, unsigned issue_flags)
+{
+ return __io_uring_cmd_done(ioucmd, ret, res2, issue_flags, true);
+}
+
+int io_buffer_register_bvec(struct io_uring_cmd *cmd, struct request *rq,
+ void (*release)(void *), unsigned int index,
+ unsigned int issue_flags);
+int io_buffer_unregister_bvec(struct io_uring_cmd *cmd, unsigned int index,
+ unsigned int issue_flags);
+
+#endif /* _LINUX_IO_URING_CMD_H */
diff --git a/include/linux/io_uring/net.h b/include/linux/io_uring/net.h
new file mode 100644
index 000000000000..b58f39fed4d5
--- /dev/null
+++ b/include/linux/io_uring/net.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_IO_URING_NET_H
+#define _LINUX_IO_URING_NET_H
+
+struct io_uring_cmd;
+
+#if defined(CONFIG_IO_URING)
+int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags);
+
+#else
+static inline int io_uring_cmd_sock(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#endif
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
new file mode 100644
index 000000000000..e1adb0d20a0a
--- /dev/null
+++ b/include/linux/io_uring_types.h
@@ -0,0 +1,744 @@
+#ifndef IO_URING_TYPES_H
+#define IO_URING_TYPES_H
+
+#include <linux/blkdev.h>
+#include <linux/hashtable.h>
+#include <linux/task_work.h>
+#include <linux/bitmap.h>
+#include <linux/llist.h>
+#include <uapi/linux/io_uring.h>
+
+enum {
+ /*
+ * A hint to not wake right away but delay until there are enough of
+ * tw's queued to match the number of CQEs the task is waiting for.
+ *
+ * Must not be used with requests generating more than one CQE.
+ * It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set.
+ */
+ IOU_F_TWQ_LAZY_WAKE = 1,
+};
+
+enum io_uring_cmd_flags {
+ IO_URING_F_COMPLETE_DEFER = 1,
+ IO_URING_F_UNLOCKED = 2,
+ /* the request is executed from poll, it should not be freed */
+ IO_URING_F_MULTISHOT = 4,
+ /* executed by io-wq */
+ IO_URING_F_IOWQ = 8,
+ /* executed inline from syscall */
+ IO_URING_F_INLINE = 16,
+ /* int's last bit, sign checks are usually faster than a bit test */
+ IO_URING_F_NONBLOCK = INT_MIN,
+
+ /* ctx state flags, for URING_CMD */
+ IO_URING_F_SQE128 = (1 << 8),
+ IO_URING_F_CQE32 = (1 << 9),
+ IO_URING_F_IOPOLL = (1 << 10),
+
+ /* set when uring wants to cancel a previously issued command */
+ IO_URING_F_CANCEL = (1 << 11),
+ IO_URING_F_COMPAT = (1 << 12),
+};
+
+struct io_wq_work_node {
+ struct io_wq_work_node *next;
+};
+
+struct io_wq_work_list {
+ struct io_wq_work_node *first;
+ struct io_wq_work_node *last;
+};
+
+struct io_wq_work {
+ struct io_wq_work_node list;
+ atomic_t flags;
+ /* place it here instead of io_kiocb as it fills padding and saves 4B */
+ int cancel_seq;
+};
+
+struct io_rsrc_data {
+ unsigned int nr;
+ struct io_rsrc_node **nodes;
+};
+
+struct io_file_table {
+ struct io_rsrc_data data;
+ unsigned long *bitmap;
+ unsigned int alloc_hint;
+};
+
+struct io_hash_bucket {
+ struct hlist_head list;
+} ____cacheline_aligned_in_smp;
+
+struct io_hash_table {
+ struct io_hash_bucket *hbs;
+ unsigned hash_bits;
+};
+
+struct io_mapped_region {
+ struct page **pages;
+ void *ptr;
+ unsigned nr_pages;
+ unsigned flags;
+};
+
+/*
+ * Return value from io_buffer_list selection, to avoid stashing it in
+ * struct io_kiocb. For legacy/classic provided buffers, keeping a reference
+ * across execution contexts are fine. But for ring provided buffers, the
+ * list may go away as soon as ->uring_lock is dropped. As the io_kiocb
+ * persists, it's better to just keep the buffer local for those cases.
+ */
+struct io_br_sel {
+ struct io_buffer_list *buf_list;
+ /*
+ * Some selection parts return the user address, others return an error.
+ */
+ union {
+ void __user *addr;
+ ssize_t val;
+ };
+};
+
+
+/*
+ * Arbitrary limit, can be raised if need be
+ */
+#define IO_RINGFD_REG_MAX 16
+
+struct io_uring_task {
+ /* submission side */
+ int cached_refs;
+ const struct io_ring_ctx *last;
+ struct task_struct *task;
+ struct io_wq *io_wq;
+ struct file *registered_rings[IO_RINGFD_REG_MAX];
+
+ struct xarray xa;
+ struct wait_queue_head wait;
+ atomic_t in_cancel;
+ atomic_t inflight_tracked;
+ struct percpu_counter inflight;
+
+ struct { /* task_work */
+ struct llist_head task_list;
+ struct callback_head task_work;
+ } ____cacheline_aligned_in_smp;
+};
+
+struct iou_vec {
+ union {
+ struct iovec *iovec;
+ struct bio_vec *bvec;
+ };
+ unsigned nr; /* number of struct iovec it can hold */
+};
+
+struct io_uring {
+ u32 head;
+ u32 tail;
+};
+
+/*
+ * This data is shared with the application through the mmap at offsets
+ * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
+ *
+ * The offsets to the member fields are published through struct
+ * io_sqring_offsets when calling io_uring_setup.
+ */
+struct io_rings {
+ /*
+ * Head and tail offsets into the ring; the offsets need to be
+ * masked to get valid indices.
+ *
+ * The kernel controls head of the sq ring and the tail of the cq ring,
+ * and the application controls tail of the sq ring and the head of the
+ * cq ring.
+ */
+ struct io_uring sq, cq;
+ /*
+ * Bitmasks to apply to head and tail offsets (constant, equals
+ * ring_entries - 1)
+ */
+ u32 sq_ring_mask, cq_ring_mask;
+ /* Ring sizes (constant, power of 2) */
+ u32 sq_ring_entries, cq_ring_entries;
+ /*
+ * Number of invalid entries dropped by the kernel due to
+ * invalid index stored in array
+ *
+ * Written by the kernel, shouldn't be modified by the
+ * application (i.e. get number of "new events" by comparing to
+ * cached value).
+ *
+ * After a new SQ head value was read by the application this
+ * counter includes all submissions that were dropped reaching
+ * the new SQ head (and possibly more).
+ */
+ u32 sq_dropped;
+ /*
+ * Runtime SQ flags
+ *
+ * Written by the kernel, shouldn't be modified by the
+ * application.
+ *
+ * The application needs a full memory barrier before checking
+ * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
+ */
+ atomic_t sq_flags;
+ /*
+ * Runtime CQ flags
+ *
+ * Written by the application, shouldn't be modified by the
+ * kernel.
+ */
+ u32 cq_flags;
+ /*
+ * Number of completion events lost because the queue was full;
+ * this should be avoided by the application by making sure
+ * there are not more requests pending than there is space in
+ * the completion queue.
+ *
+ * Written by the kernel, shouldn't be modified by the
+ * application (i.e. get number of "new events" by comparing to
+ * cached value).
+ *
+ * As completion events come in out of order this counter is not
+ * ordered with any other data.
+ */
+ u32 cq_overflow;
+ /*
+ * Ring buffer of completion events.
+ *
+ * The kernel writes completion events fresh every time they are
+ * produced, so the application is allowed to modify pending
+ * entries.
+ */
+ struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
+};
+
+struct io_restriction {
+ DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
+ DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
+ u8 sqe_flags_allowed;
+ u8 sqe_flags_required;
+ bool registered;
+};
+
+struct io_submit_link {
+ struct io_kiocb *head;
+ struct io_kiocb *last;
+};
+
+struct io_submit_state {
+ /* inline/task_work completion list, under ->uring_lock */
+ struct io_wq_work_node free_list;
+ /* batch completion logic */
+ struct io_wq_work_list compl_reqs;
+ struct io_submit_link link;
+
+ bool plug_started;
+ bool need_plug;
+ bool cq_flush;
+ unsigned short submit_nr;
+ struct blk_plug plug;
+};
+
+struct io_alloc_cache {
+ void **entries;
+ unsigned int nr_cached;
+ unsigned int max_cached;
+ unsigned int elem_size;
+ unsigned int init_clear;
+};
+
+struct io_ring_ctx {
+ /* const or read-mostly hot data */
+ struct {
+ unsigned int flags;
+ unsigned int drain_next: 1;
+ unsigned int restricted: 1;
+ unsigned int off_timeout_used: 1;
+ unsigned int drain_active: 1;
+ unsigned int has_evfd: 1;
+ /* all CQEs should be posted only by the submitter task */
+ unsigned int task_complete: 1;
+ unsigned int lockless_cq: 1;
+ unsigned int syscall_iopoll: 1;
+ unsigned int poll_activated: 1;
+ unsigned int drain_disabled: 1;
+ unsigned int compat: 1;
+ unsigned int iowq_limits_set : 1;
+
+ struct task_struct *submitter_task;
+ struct io_rings *rings;
+ struct percpu_ref refs;
+
+ clockid_t clockid;
+ enum tk_offsets clock_offset;
+
+ enum task_work_notify_mode notify_method;
+ unsigned sq_thread_idle;
+ } ____cacheline_aligned_in_smp;
+
+ /* submission data */
+ struct {
+ struct mutex uring_lock;
+
+ /*
+ * Ring buffer of indices into array of io_uring_sqe, which is
+ * mmapped by the application using the IORING_OFF_SQES offset.
+ *
+ * This indirection could e.g. be used to assign fixed
+ * io_uring_sqe entries to operations and only submit them to
+ * the queue when needed.
+ *
+ * The kernel modifies neither the indices array nor the entries
+ * array.
+ */
+ u32 *sq_array;
+ struct io_uring_sqe *sq_sqes;
+ unsigned cached_sq_head;
+ unsigned sq_entries;
+
+ /*
+ * Fixed resources fast path, should be accessed only under
+ * uring_lock, and updated through io_uring_register(2)
+ */
+ atomic_t cancel_seq;
+
+ /*
+ * ->iopoll_list is protected by the ctx->uring_lock for
+ * io_uring instances that don't use IORING_SETUP_SQPOLL.
+ * For SQPOLL, only the single threaded io_sq_thread() will
+ * manipulate the list, hence no extra locking is needed there.
+ */
+ bool poll_multi_queue;
+ struct io_wq_work_list iopoll_list;
+
+ struct io_file_table file_table;
+ struct io_rsrc_data buf_table;
+ struct io_alloc_cache node_cache;
+ struct io_alloc_cache imu_cache;
+
+ struct io_submit_state submit_state;
+
+ /*
+ * Modifications are protected by ->uring_lock and ->mmap_lock.
+ * The buffer list's io mapped region should be stable once
+ * published.
+ */
+ struct xarray io_bl_xa;
+
+ struct io_hash_table cancel_table;
+ struct io_alloc_cache apoll_cache;
+ struct io_alloc_cache netmsg_cache;
+ struct io_alloc_cache rw_cache;
+ struct io_alloc_cache cmd_cache;
+
+ /*
+ * Any cancelable uring_cmd is added to this list in
+ * ->uring_cmd() by io_uring_cmd_insert_cancelable()
+ */
+ struct hlist_head cancelable_uring_cmd;
+ /*
+ * For Hybrid IOPOLL, runtime in hybrid polling, without
+ * scheduling time
+ */
+ u64 hybrid_poll_time;
+ } ____cacheline_aligned_in_smp;
+
+ struct {
+ /*
+ * We cache a range of free CQEs we can use, once exhausted it
+ * should go through a slower range setup, see __io_get_cqe()
+ */
+ struct io_uring_cqe *cqe_cached;
+ struct io_uring_cqe *cqe_sentinel;
+
+ unsigned cached_cq_tail;
+ unsigned cq_entries;
+ struct io_ev_fd __rcu *io_ev_fd;
+
+ void *cq_wait_arg;
+ size_t cq_wait_size;
+ } ____cacheline_aligned_in_smp;
+
+ /*
+ * task_work and async notification delivery cacheline. Expected to
+ * regularly bounce b/w CPUs.
+ */
+ struct {
+ struct llist_head work_llist;
+ struct llist_head retry_llist;
+ unsigned long check_cq;
+ atomic_t cq_wait_nr;
+ atomic_t cq_timeouts;
+ struct wait_queue_head cq_wait;
+ } ____cacheline_aligned_in_smp;
+
+ /* timeouts */
+ struct {
+ raw_spinlock_t timeout_lock;
+ struct list_head timeout_list;
+ struct list_head ltimeout_list;
+ unsigned cq_last_tm_flush;
+ } ____cacheline_aligned_in_smp;
+
+ spinlock_t completion_lock;
+
+ struct list_head cq_overflow_list;
+
+ struct hlist_head waitid_list;
+
+#ifdef CONFIG_FUTEX
+ struct hlist_head futex_list;
+ struct io_alloc_cache futex_cache;
+#endif
+
+ const struct cred *sq_creds; /* cred used for __io_sq_thread() */
+ struct io_sq_data *sq_data; /* if using sq thread polling */
+
+ struct wait_queue_head sqo_sq_wait;
+ struct list_head sqd_list;
+
+ unsigned int file_alloc_start;
+ unsigned int file_alloc_end;
+
+ /* Keep this last, we don't need it for the fast path */
+ struct wait_queue_head poll_wq;
+ struct io_restriction restrictions;
+
+ /* Stores zcrx object pointers of type struct io_zcrx_ifq */
+ struct xarray zcrx_ctxs;
+
+ u32 pers_next;
+ struct xarray personalities;
+
+ /* hashed buffered write serialization */
+ struct io_wq_hash *hash_map;
+
+ /* Only used for accounting purposes */
+ struct user_struct *user;
+ struct mm_struct *mm_account;
+
+ /* ctx exit and cancelation */
+ struct llist_head fallback_llist;
+ struct delayed_work fallback_work;
+ struct work_struct exit_work;
+ struct list_head tctx_list;
+ struct completion ref_comp;
+
+ /* io-wq management, e.g. thread count */
+ u32 iowq_limits[2];
+
+ struct callback_head poll_wq_task_work;
+ struct list_head defer_list;
+ unsigned nr_drained;
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ struct list_head napi_list; /* track busy poll napi_id */
+ spinlock_t napi_lock; /* napi_list lock */
+
+ /* napi busy poll default timeout */
+ ktime_t napi_busy_poll_dt;
+ bool napi_prefer_busy_poll;
+ u8 napi_track_mode;
+
+ DECLARE_HASHTABLE(napi_ht, 4);
+#endif
+
+ /* protected by ->completion_lock */
+ unsigned evfd_last_cq_tail;
+ unsigned nr_req_allocated;
+
+ /*
+ * Protection for resize vs mmap races - both the mmap and resize
+ * side will need to grab this lock, to prevent either side from
+ * being run concurrently with the other.
+ */
+ struct mutex mmap_lock;
+
+ struct io_mapped_region sq_region;
+ struct io_mapped_region ring_region;
+ /* used for optimised request parameter and wait argument passing */
+ struct io_mapped_region param_region;
+};
+
+/*
+ * Token indicating function is called in task work context:
+ * ctx->uring_lock is held and any completions generated will be flushed.
+ * ONLY core io_uring.c should instantiate this struct.
+ */
+struct io_tw_state {
+ bool cancel;
+};
+/* Alias to use in code that doesn't instantiate struct io_tw_state */
+typedef struct io_tw_state io_tw_token_t;
+
+enum {
+ REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
+ REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
+ REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
+ REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
+ REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
+ REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
+ REQ_F_CQE_SKIP_BIT = IOSQE_CQE_SKIP_SUCCESS_BIT,
+
+ /* first byte is taken by user flags, shift it to not overlap */
+ REQ_F_FAIL_BIT = 8,
+ REQ_F_INFLIGHT_BIT,
+ REQ_F_CUR_POS_BIT,
+ REQ_F_NOWAIT_BIT,
+ REQ_F_LINK_TIMEOUT_BIT,
+ REQ_F_NEED_CLEANUP_BIT,
+ REQ_F_POLLED_BIT,
+ REQ_F_HYBRID_IOPOLL_STATE_BIT,
+ REQ_F_BUFFER_SELECTED_BIT,
+ REQ_F_BUFFER_RING_BIT,
+ REQ_F_REISSUE_BIT,
+ REQ_F_CREDS_BIT,
+ REQ_F_REFCOUNT_BIT,
+ REQ_F_ARM_LTIMEOUT_BIT,
+ REQ_F_ASYNC_DATA_BIT,
+ REQ_F_SKIP_LINK_CQES_BIT,
+ REQ_F_SINGLE_POLL_BIT,
+ REQ_F_DOUBLE_POLL_BIT,
+ REQ_F_MULTISHOT_BIT,
+ REQ_F_APOLL_MULTISHOT_BIT,
+ REQ_F_CLEAR_POLLIN_BIT,
+ /* keep async read/write and isreg together and in order */
+ REQ_F_SUPPORT_NOWAIT_BIT,
+ REQ_F_ISREG_BIT,
+ REQ_F_POLL_NO_LAZY_BIT,
+ REQ_F_CAN_POLL_BIT,
+ REQ_F_BL_EMPTY_BIT,
+ REQ_F_BL_NO_RECYCLE_BIT,
+ REQ_F_BUFFERS_COMMIT_BIT,
+ REQ_F_BUF_NODE_BIT,
+ REQ_F_HAS_METADATA_BIT,
+ REQ_F_IMPORT_BUFFER_BIT,
+ REQ_F_SQE_COPIED_BIT,
+
+ /* not a real bit, just to check we're not overflowing the space */
+ __REQ_F_LAST_BIT,
+};
+
+typedef u64 __bitwise io_req_flags_t;
+#define IO_REQ_FLAG(bitno) ((__force io_req_flags_t) BIT_ULL((bitno)))
+
+enum {
+ /* ctx owns file */
+ REQ_F_FIXED_FILE = IO_REQ_FLAG(REQ_F_FIXED_FILE_BIT),
+ /* drain existing IO first */
+ REQ_F_IO_DRAIN = IO_REQ_FLAG(REQ_F_IO_DRAIN_BIT),
+ /* linked sqes */
+ REQ_F_LINK = IO_REQ_FLAG(REQ_F_LINK_BIT),
+ /* doesn't sever on completion < 0 */
+ REQ_F_HARDLINK = IO_REQ_FLAG(REQ_F_HARDLINK_BIT),
+ /* IOSQE_ASYNC */
+ REQ_F_FORCE_ASYNC = IO_REQ_FLAG(REQ_F_FORCE_ASYNC_BIT),
+ /* IOSQE_BUFFER_SELECT */
+ REQ_F_BUFFER_SELECT = IO_REQ_FLAG(REQ_F_BUFFER_SELECT_BIT),
+ /* IOSQE_CQE_SKIP_SUCCESS */
+ REQ_F_CQE_SKIP = IO_REQ_FLAG(REQ_F_CQE_SKIP_BIT),
+
+ /* fail rest of links */
+ REQ_F_FAIL = IO_REQ_FLAG(REQ_F_FAIL_BIT),
+ /* on inflight list, should be cancelled and waited on exit reliably */
+ REQ_F_INFLIGHT = IO_REQ_FLAG(REQ_F_INFLIGHT_BIT),
+ /* read/write uses file position */
+ REQ_F_CUR_POS = IO_REQ_FLAG(REQ_F_CUR_POS_BIT),
+ /* must not punt to workers */
+ REQ_F_NOWAIT = IO_REQ_FLAG(REQ_F_NOWAIT_BIT),
+ /* has or had linked timeout */
+ REQ_F_LINK_TIMEOUT = IO_REQ_FLAG(REQ_F_LINK_TIMEOUT_BIT),
+ /* needs cleanup */
+ REQ_F_NEED_CLEANUP = IO_REQ_FLAG(REQ_F_NEED_CLEANUP_BIT),
+ /* already went through poll handler */
+ REQ_F_POLLED = IO_REQ_FLAG(REQ_F_POLLED_BIT),
+ /* every req only blocks once in hybrid poll */
+ REQ_F_IOPOLL_STATE = IO_REQ_FLAG(REQ_F_HYBRID_IOPOLL_STATE_BIT),
+ /* buffer already selected */
+ REQ_F_BUFFER_SELECTED = IO_REQ_FLAG(REQ_F_BUFFER_SELECTED_BIT),
+ /* buffer selected from ring, needs commit */
+ REQ_F_BUFFER_RING = IO_REQ_FLAG(REQ_F_BUFFER_RING_BIT),
+ /* caller should reissue async */
+ REQ_F_REISSUE = IO_REQ_FLAG(REQ_F_REISSUE_BIT),
+ /* supports async reads/writes */
+ REQ_F_SUPPORT_NOWAIT = IO_REQ_FLAG(REQ_F_SUPPORT_NOWAIT_BIT),
+ /* regular file */
+ REQ_F_ISREG = IO_REQ_FLAG(REQ_F_ISREG_BIT),
+ /* has creds assigned */
+ REQ_F_CREDS = IO_REQ_FLAG(REQ_F_CREDS_BIT),
+ /* skip refcounting if not set */
+ REQ_F_REFCOUNT = IO_REQ_FLAG(REQ_F_REFCOUNT_BIT),
+ /* there is a linked timeout that has to be armed */
+ REQ_F_ARM_LTIMEOUT = IO_REQ_FLAG(REQ_F_ARM_LTIMEOUT_BIT),
+ /* ->async_data allocated */
+ REQ_F_ASYNC_DATA = IO_REQ_FLAG(REQ_F_ASYNC_DATA_BIT),
+ /* don't post CQEs while failing linked requests */
+ REQ_F_SKIP_LINK_CQES = IO_REQ_FLAG(REQ_F_SKIP_LINK_CQES_BIT),
+ /* single poll may be active */
+ REQ_F_SINGLE_POLL = IO_REQ_FLAG(REQ_F_SINGLE_POLL_BIT),
+ /* double poll may active */
+ REQ_F_DOUBLE_POLL = IO_REQ_FLAG(REQ_F_DOUBLE_POLL_BIT),
+ /* request posts multiple completions, should be set at prep time */
+ REQ_F_MULTISHOT = IO_REQ_FLAG(REQ_F_MULTISHOT_BIT),
+ /* fast poll multishot mode */
+ REQ_F_APOLL_MULTISHOT = IO_REQ_FLAG(REQ_F_APOLL_MULTISHOT_BIT),
+ /* recvmsg special flag, clear EPOLLIN */
+ REQ_F_CLEAR_POLLIN = IO_REQ_FLAG(REQ_F_CLEAR_POLLIN_BIT),
+ /* don't use lazy poll wake for this request */
+ REQ_F_POLL_NO_LAZY = IO_REQ_FLAG(REQ_F_POLL_NO_LAZY_BIT),
+ /* file is pollable */
+ REQ_F_CAN_POLL = IO_REQ_FLAG(REQ_F_CAN_POLL_BIT),
+ /* buffer list was empty after selection of buffer */
+ REQ_F_BL_EMPTY = IO_REQ_FLAG(REQ_F_BL_EMPTY_BIT),
+ /* don't recycle provided buffers for this request */
+ REQ_F_BL_NO_RECYCLE = IO_REQ_FLAG(REQ_F_BL_NO_RECYCLE_BIT),
+ /* buffer ring head needs incrementing on put */
+ REQ_F_BUFFERS_COMMIT = IO_REQ_FLAG(REQ_F_BUFFERS_COMMIT_BIT),
+ /* buf node is valid */
+ REQ_F_BUF_NODE = IO_REQ_FLAG(REQ_F_BUF_NODE_BIT),
+ /* request has read/write metadata assigned */
+ REQ_F_HAS_METADATA = IO_REQ_FLAG(REQ_F_HAS_METADATA_BIT),
+ /*
+ * For vectored fixed buffers, resolve iovec to registered buffers.
+ * For SEND_ZC, whether to import buffers (i.e. the first issue).
+ */
+ REQ_F_IMPORT_BUFFER = IO_REQ_FLAG(REQ_F_IMPORT_BUFFER_BIT),
+ /* ->sqe_copy() has been called, if necessary */
+ REQ_F_SQE_COPIED = IO_REQ_FLAG(REQ_F_SQE_COPIED_BIT),
+};
+
+struct io_tw_req {
+ struct io_kiocb *req;
+};
+
+typedef void (*io_req_tw_func_t)(struct io_tw_req tw_req, io_tw_token_t tw);
+
+struct io_task_work {
+ struct llist_node node;
+ io_req_tw_func_t func;
+};
+
+struct io_cqe {
+ __u64 user_data;
+ __s32 res;
+ /* fd initially, then cflags for completion */
+ union {
+ __u32 flags;
+ int fd;
+ };
+};
+
+/*
+ * Each request type overlays its private data structure on top of this one.
+ * They must not exceed this one in size.
+ */
+struct io_cmd_data {
+ struct file *file;
+ /* each command gets 56 bytes of data */
+ __u8 data[56];
+};
+
+static inline void io_kiocb_cmd_sz_check(size_t cmd_sz)
+{
+ BUILD_BUG_ON(cmd_sz > sizeof(struct io_cmd_data));
+}
+#define io_kiocb_to_cmd(req, cmd_type) ( \
+ io_kiocb_cmd_sz_check(sizeof(cmd_type)) , \
+ ((cmd_type *)&(req)->cmd) \
+)
+
+static inline struct io_kiocb *cmd_to_io_kiocb(void *ptr)
+{
+ return ptr;
+}
+
+struct io_kiocb {
+ union {
+ /*
+ * NOTE! Each of the io_kiocb union members has the file pointer
+ * as the first entry in their struct definition. So you can
+ * access the file pointer through any of the sub-structs,
+ * or directly as just 'file' in this struct.
+ */
+ struct file *file;
+ struct io_cmd_data cmd;
+ };
+
+ u8 opcode;
+ /* polled IO has completed */
+ u8 iopoll_completed;
+ /*
+ * Can be either a fixed buffer index, or used with provided buffers.
+ * For the latter, it points to the selected buffer ID.
+ */
+ u16 buf_index;
+
+ unsigned nr_tw;
+
+ /* REQ_F_* flags */
+ io_req_flags_t flags;
+
+ struct io_cqe cqe;
+
+ struct io_ring_ctx *ctx;
+ struct io_uring_task *tctx;
+
+ union {
+ /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
+ struct io_buffer *kbuf;
+
+ struct io_rsrc_node *buf_node;
+ };
+
+ union {
+ /* used by request caches, completion batching and iopoll */
+ struct io_wq_work_node comp_list;
+ /* cache ->apoll->events */
+ __poll_t apoll_events;
+ };
+
+ struct io_rsrc_node *file_node;
+
+ atomic_t refs;
+ bool cancel_seq_set;
+ struct io_task_work io_task_work;
+ union {
+ /*
+ * for polled requests, i.e. IORING_OP_POLL_ADD and async armed
+ * poll
+ */
+ struct hlist_node hash_node;
+ /* For IOPOLL setup queues, with hybrid polling */
+ u64 iopoll_start;
+ /* for private io_kiocb freeing */
+ struct rcu_head rcu_head;
+ };
+ /* internal polling, see IORING_FEAT_FAST_POLL */
+ struct async_poll *apoll;
+ /* opcode allocated if it needs to store data for async defer */
+ void *async_data;
+ /* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */
+ atomic_t poll_refs;
+ struct io_kiocb *link;
+ /* custom credentials, valid IFF REQ_F_CREDS is set */
+ const struct cred *creds;
+ struct io_wq_work work;
+
+ struct io_big_cqe {
+ u64 extra1;
+ u64 extra2;
+ } big_cqe;
+};
+
+struct io_overflow_cqe {
+ struct list_head list;
+ struct io_uring_cqe cqe;
+};
+#endif
diff --git a/include/linux/ioam6.h b/include/linux/ioam6.h
new file mode 100644
index 000000000000..94a24b36998f
--- /dev/null
+++ b/include/linux/ioam6.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * IPv6 IOAM
+ *
+ * Author:
+ * Justin Iurman <justin.iurman@uliege.be>
+ */
+#ifndef _LINUX_IOAM6_H
+#define _LINUX_IOAM6_H
+
+#include <uapi/linux/ioam6.h>
+
+#endif /* _LINUX_IOAM6_H */
diff --git a/include/linux/ioam6_genl.h b/include/linux/ioam6_genl.h
new file mode 100644
index 000000000000..176e67919de3
--- /dev/null
+++ b/include/linux/ioam6_genl.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * IPv6 IOAM Generic Netlink API
+ *
+ * Author:
+ * Justin Iurman <justin.iurman@uliege.be>
+ */
+#ifndef _LINUX_IOAM6_GENL_H
+#define _LINUX_IOAM6_GENL_H
+
+#include <uapi/linux/ioam6_genl.h>
+
+#endif /* _LINUX_IOAM6_GENL_H */
diff --git a/include/linux/ioam6_iptunnel.h b/include/linux/ioam6_iptunnel.h
new file mode 100644
index 000000000000..07d9dfedd29d
--- /dev/null
+++ b/include/linux/ioam6_iptunnel.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * IPv6 IOAM Lightweight Tunnel API
+ *
+ * Author:
+ * Justin Iurman <justin.iurman@uliege.be>
+ */
+#ifndef _LINUX_IOAM6_IPTUNNEL_H
+#define _LINUX_IOAM6_IPTUNNEL_H
+
+#include <uapi/linux/ioam6_iptunnel.h>
+
+#endif /* _LINUX_IOAM6_IPTUNNEL_H */
diff --git a/include/linux/ioasid.h b/include/linux/ioasid.h
deleted file mode 100644
index e9dacd4b9f6b..000000000000
--- a/include/linux/ioasid.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_IOASID_H
-#define __LINUX_IOASID_H
-
-#include <linux/types.h>
-#include <linux/errno.h>
-
-#define INVALID_IOASID ((ioasid_t)-1)
-typedef unsigned int ioasid_t;
-typedef ioasid_t (*ioasid_alloc_fn_t)(ioasid_t min, ioasid_t max, void *data);
-typedef void (*ioasid_free_fn_t)(ioasid_t ioasid, void *data);
-
-struct ioasid_set {
- int dummy;
-};
-
-/**
- * struct ioasid_allocator_ops - IOASID allocator helper functions and data
- *
- * @alloc: helper function to allocate IOASID
- * @free: helper function to free IOASID
- * @list: for tracking ops that share helper functions but not data
- * @pdata: data belong to the allocator, provided when calling alloc()
- */
-struct ioasid_allocator_ops {
- ioasid_alloc_fn_t alloc;
- ioasid_free_fn_t free;
- struct list_head list;
- void *pdata;
-};
-
-#define DECLARE_IOASID_SET(name) struct ioasid_set name = { 0 }
-
-#if IS_ENABLED(CONFIG_IOASID)
-ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max,
- void *private);
-void ioasid_get(ioasid_t ioasid);
-bool ioasid_put(ioasid_t ioasid);
-void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid,
- bool (*getter)(void *));
-int ioasid_register_allocator(struct ioasid_allocator_ops *allocator);
-void ioasid_unregister_allocator(struct ioasid_allocator_ops *allocator);
-int ioasid_set_data(ioasid_t ioasid, void *data);
-
-#else /* !CONFIG_IOASID */
-static inline ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min,
- ioasid_t max, void *private)
-{
- return INVALID_IOASID;
-}
-
-static inline void ioasid_get(ioasid_t ioasid)
-{
-}
-
-static inline bool ioasid_put(ioasid_t ioasid)
-{
- return false;
-}
-
-static inline void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid,
- bool (*getter)(void *))
-{
- return NULL;
-}
-
-static inline int ioasid_register_allocator(struct ioasid_allocator_ops *allocator)
-{
- return -ENOTSUPP;
-}
-
-static inline void ioasid_unregister_allocator(struct ioasid_allocator_ops *allocator)
-{
-}
-
-static inline int ioasid_set_data(ioasid_t ioasid, void *data)
-{
- return -ENOTSUPP;
-}
-
-#endif /* CONFIG_IOASID */
-#endif /* __LINUX_IOASID_H */
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 0a9dc40b7be8..079d8773790c 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -99,55 +99,40 @@ struct io_cq {
struct io_context {
atomic_long_t refcount;
atomic_t active_ref;
- atomic_t nr_tasks;
+ unsigned short ioprio;
+
+#ifdef CONFIG_BLK_ICQ
/* all the fields below are protected by this lock */
spinlock_t lock;
- unsigned short ioprio;
-
struct radix_tree_root icq_tree;
struct io_cq __rcu *icq_hint;
struct hlist_head icq_list;
struct work_struct release_work;
+#endif /* CONFIG_BLK_ICQ */
};
-/**
- * get_io_context_active - get active reference on ioc
- * @ioc: ioc of interest
- *
- * Only iocs with active reference can issue new IOs. This function
- * acquires an active reference on @ioc. The caller must already have an
- * active reference on @ioc.
- */
-static inline void get_io_context_active(struct io_context *ioc)
-{
- WARN_ON_ONCE(atomic_long_read(&ioc->refcount) <= 0);
- WARN_ON_ONCE(atomic_read(&ioc->active_ref) <= 0);
- atomic_long_inc(&ioc->refcount);
- atomic_inc(&ioc->active_ref);
-}
-
-static inline void ioc_task_link(struct io_context *ioc)
-{
- get_io_context_active(ioc);
-
- WARN_ON_ONCE(atomic_read(&ioc->nr_tasks) <= 0);
- atomic_inc(&ioc->nr_tasks);
-}
-
struct task_struct;
#ifdef CONFIG_BLOCK
void put_io_context(struct io_context *ioc);
-void put_io_context_active(struct io_context *ioc);
void exit_io_context(struct task_struct *task);
-struct io_context *get_task_io_context(struct task_struct *task,
- gfp_t gfp_flags, int node);
+int __copy_io(u64 clone_flags, struct task_struct *tsk);
+static inline int copy_io(u64 clone_flags, struct task_struct *tsk)
+{
+ if (!current->io_context)
+ return 0;
+ return __copy_io(clone_flags, tsk);
+}
#else
struct io_context;
static inline void put_io_context(struct io_context *ioc) { }
static inline void exit_io_context(struct task_struct *task) { }
-#endif
+static inline int copy_io(u64 clone_flags, struct task_struct *tsk)
+{
+ return 0;
+}
+#endif /* CONFIG_BLOCK */
-#endif
+#endif /* IOCONTEXT_H */
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index c87d0cb0de6d..520e967cb501 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -9,12 +9,15 @@
#include <linux/types.h>
#include <linux/mm_types.h>
#include <linux/blkdev.h>
+#include <linux/pagevec.h>
struct address_space;
struct fiemap_extent_info;
struct inode;
+struct iomap_iter;
struct iomap_dio;
struct iomap_writepage_ctx;
+struct iomap_read_folio_ctx;
struct iov_iter;
struct kiocb;
struct page;
@@ -49,35 +52,57 @@ struct vm_fault;
*
* IOMAP_F_BUFFER_HEAD indicates that the file system requires the use of
* buffer heads for this mapping.
+ *
+ * IOMAP_F_XATTR indicates that the iomap is for an extended attribute extent
+ * rather than a file data extent.
+ *
+ * IOMAP_F_BOUNDARY indicates that I/O and I/O completions for this iomap must
+ * never be merged with the mapping before it.
+ *
+ * IOMAP_F_ANON_WRITE indicates that (write) I/O does not have a target block
+ * assigned to it yet and the file system will do that in the bio submission
+ * handler, splitting the I/O as needed.
+ *
+ * IOMAP_F_ATOMIC_BIO indicates that (write) I/O will be issued as an atomic
+ * bio, i.e. set REQ_ATOMIC.
+ */
+#define IOMAP_F_NEW (1U << 0)
+#define IOMAP_F_DIRTY (1U << 1)
+#define IOMAP_F_SHARED (1U << 2)
+#define IOMAP_F_MERGED (1U << 3)
+#ifdef CONFIG_BUFFER_HEAD
+#define IOMAP_F_BUFFER_HEAD (1U << 4)
+#else
+#define IOMAP_F_BUFFER_HEAD 0
+#endif /* CONFIG_BUFFER_HEAD */
+#define IOMAP_F_XATTR (1U << 5)
+#define IOMAP_F_BOUNDARY (1U << 6)
+#define IOMAP_F_ANON_WRITE (1U << 7)
+#define IOMAP_F_ATOMIC_BIO (1U << 8)
+
+/*
+ * Flag reserved for file system specific usage
*/
-#define IOMAP_F_NEW 0x01
-#define IOMAP_F_DIRTY 0x02
-#define IOMAP_F_SHARED 0x04
-#define IOMAP_F_MERGED 0x08
-#define IOMAP_F_BUFFER_HEAD 0x10
-#define IOMAP_F_ZONE_APPEND 0x20
+#define IOMAP_F_PRIVATE (1U << 12)
/*
* Flags set by the core iomap code during operations:
*
* IOMAP_F_SIZE_CHANGED indicates to the iomap_end method that the file size
* has changed as the result of this write operation.
+ *
+ * IOMAP_F_STALE indicates that the iomap is not valid any longer and the file
+ * range it covers needs to be remapped by the high level before the operation
+ * can proceed.
*/
-#define IOMAP_F_SIZE_CHANGED 0x100
-
-/*
- * Flags from 0x1000 up are for file system specific usage:
- */
-#define IOMAP_F_PRIVATE 0x1000
-
+#define IOMAP_F_SIZE_CHANGED (1U << 14)
+#define IOMAP_F_STALE (1U << 15)
/*
* Magic value for addr:
*/
#define IOMAP_NULL_ADDR -1ULL /* addr is not valid */
-struct iomap_page_ops;
-
struct iomap {
u64 addr; /* disk offset of mapping, bytes */
loff_t offset; /* file offset of mapping, bytes */
@@ -88,30 +113,71 @@ struct iomap {
struct dax_device *dax_dev; /* dax_dev for dax operations */
void *inline_data;
void *private; /* filesystem private */
- const struct iomap_page_ops *page_ops;
+ u64 validity_cookie; /* used with .iomap_valid() */
};
-static inline sector_t
-iomap_sector(struct iomap *iomap, loff_t pos)
+static inline sector_t iomap_sector(const struct iomap *iomap, loff_t pos)
{
+ if (iomap->flags & IOMAP_F_ANON_WRITE)
+ return U64_MAX; /* invalid */
return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
}
/*
- * When a filesystem sets page_ops in an iomap mapping it returns, page_prepare
- * and page_done will be called for each page written to. This only applies to
- * buffered writes as unbuffered writes will not typically have pages
- * associated with them.
- *
- * When page_prepare succeeds, page_done will always be called to do any
- * cleanup work necessary. In that page_done call, @page will be NULL if the
- * associated page could not be obtained.
+ * Returns the inline data pointer for logical offset @pos.
+ */
+static inline void *iomap_inline_data(const struct iomap *iomap, loff_t pos)
+{
+ return iomap->inline_data + pos - iomap->offset;
+}
+
+/*
+ * Check if the mapping's length is within the valid range for inline data.
+ * This is used to guard against accessing data beyond the page inline_data
+ * points at.
+ */
+static inline bool iomap_inline_data_valid(const struct iomap *iomap)
+{
+ return iomap->length <= PAGE_SIZE - offset_in_page(iomap->inline_data);
+}
+
+/*
+ * When get_folio succeeds, put_folio will always be called to do any
+ * cleanup work necessary. put_folio is responsible for unlocking and putting
+ * @folio.
*/
-struct iomap_page_ops {
- int (*page_prepare)(struct inode *inode, loff_t pos, unsigned len,
- struct iomap *iomap);
- void (*page_done)(struct inode *inode, loff_t pos, unsigned copied,
- struct page *page, struct iomap *iomap);
+struct iomap_write_ops {
+ struct folio *(*get_folio)(struct iomap_iter *iter, loff_t pos,
+ unsigned len);
+ void (*put_folio)(struct inode *inode, loff_t pos, unsigned copied,
+ struct folio *folio);
+
+ /*
+ * Check that the cached iomap still maps correctly to the filesystem's
+ * internal extent map. FS internal extent maps can change while iomap
+ * is iterating a cached iomap, so this hook allows iomap to detect that
+ * the iomap needs to be refreshed during a long running write
+ * operation.
+ *
+ * The filesystem can store internal state (e.g. a sequence number) in
+ * iomap->validity_cookie when the iomap is first mapped to be able to
+ * detect changes between mapping time and whenever .iomap_valid() is
+ * called.
+ *
+ * This is called with the folio over the specified file position held
+ * locked by the iomap code.
+ */
+ bool (*iomap_valid)(struct inode *inode, const struct iomap *iomap);
+
+ /*
+ * Optional if the filesystem wishes to provide a custom handler for
+ * reading in the contents of a folio, otherwise iomap will default to
+ * submitting a bio read request.
+ *
+ * The read must be done synchronously.
+ */
+ int (*read_folio_range)(const struct iomap_iter *iter,
+ struct folio *folio, loff_t pos, size_t len);
};
/*
@@ -124,6 +190,14 @@ struct iomap_page_ops {
#define IOMAP_DIRECT (1 << 4) /* direct I/O */
#define IOMAP_NOWAIT (1 << 5) /* do not block */
#define IOMAP_OVERWRITE_ONLY (1 << 6) /* only pure overwrites allowed */
+#define IOMAP_UNSHARE (1 << 7) /* unshare_file_range */
+#ifdef CONFIG_FS_DAX
+#define IOMAP_DAX (1 << 8) /* DAX mapping */
+#else
+#define IOMAP_DAX 0
+#endif /* CONFIG_FS_DAX */
+#define IOMAP_ATOMIC (1 << 9) /* torn-write protection */
+#define IOMAP_DONTCACHE (1 << 10)
struct iomap_ops {
/*
@@ -145,40 +219,155 @@ struct iomap_ops {
ssize_t written, unsigned flags, struct iomap *iomap);
};
+/**
+ * struct iomap_iter - Iterate through a range of a file
+ * @inode: Set at the start of the iteration and should not change.
+ * @pos: The current file position we are operating on. It is updated by
+ * calls to iomap_iter(). Treat as read-only in the body.
+ * @len: The remaining length of the file segment we're operating on.
+ * It is updated at the same time as @pos.
+ * @iter_start_pos: The original start pos for the current iomap. Used for
+ * incremental iter advance.
+ * @status: Status of the most recent iteration. Zero on success or a negative
+ * errno on error.
+ * @flags: Zero or more of the iomap_begin flags above.
+ * @iomap: Map describing the I/O iteration
+ * @srcmap: Source map for COW operations
+ */
+struct iomap_iter {
+ struct inode *inode;
+ loff_t pos;
+ u64 len;
+ loff_t iter_start_pos;
+ int status;
+ unsigned flags;
+ struct iomap iomap;
+ struct iomap srcmap;
+ struct folio_batch *fbatch;
+ void *private;
+};
+
+int iomap_iter(struct iomap_iter *iter, const struct iomap_ops *ops);
+int iomap_iter_advance(struct iomap_iter *iter, u64 count);
+
+/**
+ * iomap_length_trim - trimmed length of the current iomap iteration
+ * @iter: iteration structure
+ * @pos: File position to trim from.
+ * @len: Length of the mapping to trim to.
+ *
+ * Returns a trimmed length that the operation applies to for the current
+ * iteration.
+ */
+static inline u64 iomap_length_trim(const struct iomap_iter *iter, loff_t pos,
+ u64 len)
+{
+ u64 end = iter->iomap.offset + iter->iomap.length;
+
+ if (iter->srcmap.type != IOMAP_HOLE)
+ end = min(end, iter->srcmap.offset + iter->srcmap.length);
+ return min(len, end - pos);
+}
+
+/**
+ * iomap_length - length of the current iomap iteration
+ * @iter: iteration structure
+ *
+ * Returns the length that the operation applies to for the current iteration.
+ */
+static inline u64 iomap_length(const struct iomap_iter *iter)
+{
+ return iomap_length_trim(iter, iter->pos, iter->len);
+}
+
+/**
+ * iomap_iter_advance_full - advance by the full length of current map
+ */
+static inline int iomap_iter_advance_full(struct iomap_iter *iter)
+{
+ return iomap_iter_advance(iter, iomap_length(iter));
+}
+
+/**
+ * iomap_iter_srcmap - return the source map for the current iomap iteration
+ * @i: iteration structure
+ *
+ * Write operations on file systems with reflink support might require a
+ * source and a destination map. This function retourns the source map
+ * for a given operation, which may or may no be identical to the destination
+ * map in &i->iomap.
+ */
+static inline const struct iomap *iomap_iter_srcmap(const struct iomap_iter *i)
+{
+ if (i->srcmap.type != IOMAP_HOLE)
+ return &i->srcmap;
+ return &i->iomap;
+}
+
/*
- * Main iomap iterator function.
+ * Return the file offset for the first unchanged block after a short write.
+ *
+ * If nothing was written, round @pos down to point at the first block in
+ * the range, else round up to include the partially written block.
*/
-typedef loff_t (*iomap_actor_t)(struct inode *inode, loff_t pos, loff_t len,
- void *data, struct iomap *iomap, struct iomap *srcmap);
+static inline loff_t iomap_last_written_block(struct inode *inode, loff_t pos,
+ ssize_t written)
+{
+ if (unlikely(!written))
+ return round_down(pos, i_blocksize(inode));
+ return round_up(pos + written, i_blocksize(inode));
+}
-loff_t iomap_apply(struct inode *inode, loff_t pos, loff_t length,
- unsigned flags, const struct iomap_ops *ops, void *data,
- iomap_actor_t actor);
+/*
+ * Check if the range needs to be unshared for a FALLOC_FL_UNSHARE_RANGE
+ * operation.
+ *
+ * Don't bother with blocks that are not shared to start with; or mappings that
+ * cannot be shared, such as inline data, delalloc reservations, holes or
+ * unwritten extents.
+ *
+ * Note that we use srcmap directly instead of iomap_iter_srcmap as unsharing
+ * requires providing a separate source map, and the presence of one is a good
+ * indicator that unsharing is needed, unlike IOMAP_F_SHARED which can be set
+ * for any data that goes into the COW fork for XFS.
+ */
+static inline bool iomap_want_unshare_iter(const struct iomap_iter *iter)
+{
+ return (iter->iomap.flags & IOMAP_F_SHARED) &&
+ iter->srcmap.type == IOMAP_MAPPED;
+}
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
- const struct iomap_ops *ops);
-int iomap_readpage(struct page *page, const struct iomap_ops *ops);
-void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
-int iomap_set_page_dirty(struct page *page);
-int iomap_is_partially_uptodate(struct page *page, unsigned long from,
- unsigned long count);
-int iomap_releasepage(struct page *page, gfp_t gfp_mask);
-void iomap_invalidatepage(struct page *page, unsigned int offset,
- unsigned int len);
-#ifdef CONFIG_MIGRATION
-int iomap_migrate_page(struct address_space *mapping, struct page *newpage,
- struct page *page, enum migrate_mode mode);
-#else
-#define iomap_migrate_page NULL
-#endif
+ const struct iomap_ops *ops,
+ const struct iomap_write_ops *write_ops, void *private);
+void iomap_read_folio(const struct iomap_ops *ops,
+ struct iomap_read_folio_ctx *ctx);
+void iomap_readahead(const struct iomap_ops *ops,
+ struct iomap_read_folio_ctx *ctx);
+bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count);
+struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len);
+bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags);
+void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len);
+bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio);
int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
- const struct iomap_ops *ops);
+ const struct iomap_ops *ops,
+ const struct iomap_write_ops *write_ops);
+loff_t iomap_fill_dirty_folios(struct iomap_iter *iter, loff_t offset,
+ loff_t length);
int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
- bool *did_zero, const struct iomap_ops *ops);
+ bool *did_zero, const struct iomap_ops *ops,
+ const struct iomap_write_ops *write_ops, void *private);
int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
- const struct iomap_ops *ops);
-vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf,
- const struct iomap_ops *ops);
+ const struct iomap_ops *ops,
+ const struct iomap_write_ops *write_ops, void *private);
+vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops,
+ void *private);
+typedef void (*iomap_punch_t)(struct inode *inode, loff_t offset, loff_t length,
+ struct iomap *iomap);
+void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
+ loff_t end_byte, unsigned flags, struct iomap *iomap,
+ iomap_punch_t punch);
+
int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len, const struct iomap_ops *ops);
loff_t iomap_seek_hole(struct inode *inode, loff_t offset,
@@ -189,58 +378,138 @@ sector_t iomap_bmap(struct address_space *mapping, sector_t bno,
const struct iomap_ops *ops);
/*
+ * Flags for iomap_ioend->io_flags.
+ */
+/* shared COW extent */
+#define IOMAP_IOEND_SHARED (1U << 0)
+/* unwritten extent */
+#define IOMAP_IOEND_UNWRITTEN (1U << 1)
+/* don't merge into previous ioend */
+#define IOMAP_IOEND_BOUNDARY (1U << 2)
+/* is direct I/O */
+#define IOMAP_IOEND_DIRECT (1U << 3)
+/* is DONTCACHE I/O */
+#define IOMAP_IOEND_DONTCACHE (1U << 4)
+
+/*
+ * Flags that if set on either ioend prevent the merge of two ioends.
+ * (IOMAP_IOEND_BOUNDARY also prevents merges, but only one-way)
+ */
+#define IOMAP_IOEND_NOMERGE_FLAGS \
+ (IOMAP_IOEND_SHARED | IOMAP_IOEND_UNWRITTEN | IOMAP_IOEND_DIRECT | \
+ IOMAP_IOEND_DONTCACHE)
+
+/*
* Structure for writeback I/O completions.
+ *
+ * File systems can split a bio generated by iomap. In that case the parent
+ * ioend it was split from is recorded in ioend->io_parent.
*/
struct iomap_ioend {
struct list_head io_list; /* next ioend in chain */
- u16 io_type;
- u16 io_flags; /* IOMAP_F_* */
+ u16 io_flags; /* IOMAP_IOEND_* */
struct inode *io_inode; /* file being written to */
size_t io_size; /* size of the extent */
+ atomic_t io_remaining; /* completetion defer count */
+ int io_error; /* stashed away status */
+ struct iomap_ioend *io_parent; /* parent for completions */
loff_t io_offset; /* offset in the file */
- struct bio *io_bio; /* bio being built */
- struct bio io_inline_bio; /* MUST BE LAST! */
+ sector_t io_sector; /* start sector of ioend */
+ void *io_private; /* file system private data */
+ struct bio io_bio; /* MUST BE LAST! */
};
-struct iomap_writeback_ops {
- /*
- * Required, maps the blocks so that writeback can be performed on
- * the range starting at offset.
- */
- int (*map_blocks)(struct iomap_writepage_ctx *wpc, struct inode *inode,
- loff_t offset);
+static inline struct iomap_ioend *iomap_ioend_from_bio(struct bio *bio)
+{
+ return container_of(bio, struct iomap_ioend, io_bio);
+}
+struct iomap_writeback_ops {
/*
- * Optional, allows the file systems to perform actions just before
- * submitting the bio and/or override the bio end_io handler for complex
- * operations like copy on write extent manipulation or unwritten extent
- * conversions.
+ * Performs writeback on the passed in range
+ *
+ * Can map arbitrarily large regions, but we need to call into it at
+ * least once per folio to allow the file systems to synchronize with
+ * the write path that could be invalidating mappings.
+ *
+ * An existing mapping from a previous call to this method can be reused
+ * by the file system if it is still valid.
+ *
+ * If this succeeds, iomap_finish_folio_write() must be called once
+ * writeback completes for the range, regardless of whether the
+ * writeback succeeded or failed.
+ *
+ * Returns the number of bytes processed or a negative errno.
*/
- int (*prepare_ioend)(struct iomap_ioend *ioend, int status);
+ ssize_t (*writeback_range)(struct iomap_writepage_ctx *wpc,
+ struct folio *folio, u64 pos, unsigned int len,
+ u64 end_pos);
/*
- * Optional, allows the file system to discard state on a page where
- * we failed to submit any I/O.
+ * Submit a writeback context previously build up by ->writeback_range.
+ *
+ * Returns 0 if the context was successfully submitted, or a negative
+ * error code if not. If @error is non-zero a failure occurred, and
+ * the writeback context should be completed with an error.
*/
- void (*discard_page)(struct page *page, loff_t fileoff);
+ int (*writeback_submit)(struct iomap_writepage_ctx *wpc, int error);
};
struct iomap_writepage_ctx {
struct iomap iomap;
- struct iomap_ioend *ioend;
+ struct inode *inode;
+ struct writeback_control *wbc;
const struct iomap_writeback_ops *ops;
+ u32 nr_folios; /* folios added to the ioend */
+ void *wb_ctx; /* pending writeback context */
};
+struct iomap_ioend *iomap_init_ioend(struct inode *inode, struct bio *bio,
+ loff_t file_offset, u16 ioend_flags);
+struct iomap_ioend *iomap_split_ioend(struct iomap_ioend *ioend,
+ unsigned int max_len, bool is_append);
void iomap_finish_ioends(struct iomap_ioend *ioend, int error);
void iomap_ioend_try_merge(struct iomap_ioend *ioend,
struct list_head *more_ioends);
void iomap_sort_ioends(struct list_head *ioend_list);
-int iomap_writepage(struct page *page, struct writeback_control *wbc,
- struct iomap_writepage_ctx *wpc,
- const struct iomap_writeback_ops *ops);
-int iomap_writepages(struct address_space *mapping,
- struct writeback_control *wbc, struct iomap_writepage_ctx *wpc,
- const struct iomap_writeback_ops *ops);
+ssize_t iomap_add_to_ioend(struct iomap_writepage_ctx *wpc, struct folio *folio,
+ loff_t pos, loff_t end_pos, unsigned int dirty_len);
+int iomap_ioend_writeback_submit(struct iomap_writepage_ctx *wpc, int error);
+
+void iomap_finish_folio_read(struct folio *folio, size_t off, size_t len,
+ int error);
+void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
+ size_t len);
+
+int iomap_writeback_folio(struct iomap_writepage_ctx *wpc, struct folio *folio);
+int iomap_writepages(struct iomap_writepage_ctx *wpc);
+
+struct iomap_read_folio_ctx {
+ const struct iomap_read_ops *ops;
+ struct folio *cur_folio;
+ struct readahead_control *rac;
+ void *read_ctx;
+};
+
+struct iomap_read_ops {
+ /*
+ * Read in a folio range.
+ *
+ * If this succeeds, iomap_finish_folio_read() must be called after the
+ * range is read in, regardless of whether the read succeeded or failed.
+ *
+ * Returns 0 on success or a negative error on failure.
+ */
+ int (*read_folio_range)(const struct iomap_iter *iter,
+ struct iomap_read_folio_ctx *ctx, size_t len);
+
+ /*
+ * Submit any pending read requests.
+ *
+ * This is optional.
+ */
+ void (*submit_read)(struct iomap_read_folio_ctx *ctx);
+};
/*
* Flags for direct I/O ->end_io:
@@ -251,8 +520,18 @@ int iomap_writepages(struct address_space *mapping,
struct iomap_dio_ops {
int (*end_io)(struct kiocb *iocb, ssize_t size, int error,
unsigned flags);
- blk_qc_t (*submit_io)(struct inode *inode, struct iomap *iomap,
- struct bio *bio, loff_t file_offset);
+ void (*submit_io)(const struct iomap_iter *iter, struct bio *bio,
+ loff_t file_offset);
+
+ /*
+ * Filesystems wishing to attach private information to a direct io bio
+ * must provide a ->submit_io method that attaches the additional
+ * information to the bio and changes the ->bi_end_io callback to a
+ * custom function. This function should, at a minimum, perform any
+ * relevant post-processing of the bio and end with a call to
+ * iomap_dio_bio_end_io.
+ */
+ struct bio_set *bio_set;
};
/*
@@ -268,14 +547,29 @@ struct iomap_dio_ops {
*/
#define IOMAP_DIO_OVERWRITE_ONLY (1 << 1)
+/*
+ * When a page fault occurs, return a partial synchronous result and allow
+ * the caller to retry the rest of the operation after dealing with the page
+ * fault.
+ */
+#define IOMAP_DIO_PARTIAL (1 << 2)
+
+/*
+ * Ensure each bio is aligned to fs block size.
+ *
+ * For filesystems which need to calculate/verify the checksum of each fs
+ * block. Otherwise they may not be able to handle unaligned bios.
+ */
+#define IOMAP_DIO_FSBLOCK_ALIGNED (1 << 3)
+
ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
- unsigned int dio_flags);
+ unsigned int dio_flags, void *private, size_t done_before);
struct iomap_dio *__iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
- unsigned int dio_flags);
+ unsigned int dio_flags, void *private, size_t done_before);
ssize_t iomap_dio_complete(struct iomap_dio *dio);
-int iomap_dio_iopoll(struct kiocb *kiocb, bool spin);
+void iomap_dio_bio_end_io(struct bio *bio);
#ifdef CONFIG_SWAP
struct file;
@@ -288,4 +582,32 @@ int iomap_swapfile_activate(struct swap_info_struct *sis,
# define iomap_swapfile_activate(sis, swapfile, pagespan, ops) (-EIO)
#endif /* CONFIG_SWAP */
+extern struct bio_set iomap_ioend_bioset;
+
+#ifdef CONFIG_BLOCK
+extern const struct iomap_read_ops iomap_bio_read_ops;
+
+static inline void iomap_bio_read_folio(struct folio *folio,
+ const struct iomap_ops *ops)
+{
+ struct iomap_read_folio_ctx ctx = {
+ .ops = &iomap_bio_read_ops,
+ .cur_folio = folio,
+ };
+
+ iomap_read_folio(ops, &ctx);
+}
+
+static inline void iomap_bio_readahead(struct readahead_control *rac,
+ const struct iomap_ops *ops)
+{
+ struct iomap_read_folio_ctx ctx = {
+ .ops = &iomap_bio_read_ops,
+ .rac = rac,
+ };
+
+ iomap_readahead(ops, &ctx);
+}
+#endif /* CONFIG_BLOCK */
+
#endif /* LINUX_IOMAP_H */
diff --git a/include/linux/iommu-dma.h b/include/linux/iommu-dma.h
new file mode 100644
index 000000000000..a92b3ff9b934
--- /dev/null
+++ b/include/linux/iommu-dma.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ *
+ * DMA operations that map physical memory through IOMMU.
+ */
+#ifndef _LINUX_IOMMU_DMA_H
+#define _LINUX_IOMMU_DMA_H
+
+#include <linux/dma-direction.h>
+
+#ifdef CONFIG_IOMMU_DMA
+static inline bool use_dma_iommu(struct device *dev)
+{
+ return dev->dma_iommu;
+}
+#else
+static inline bool use_dma_iommu(struct device *dev)
+{
+ return false;
+}
+#endif /* CONFIG_IOMMU_DMA */
+
+dma_addr_t iommu_dma_map_phys(struct device *dev, phys_addr_t phys, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+void iommu_dma_unmap_phys(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir, unsigned long attrs);
+int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, unsigned long attrs);
+void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, unsigned long attrs);
+void *iommu_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp, unsigned long attrs);
+int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
+int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
+unsigned long iommu_dma_get_merge_boundary(struct device *dev);
+size_t iommu_dma_opt_mapping_size(void);
+size_t iommu_dma_max_mapping_size(struct device *dev);
+void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, unsigned long attrs);
+struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, size_t size,
+ enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
+void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
+ struct sg_table *sgt, enum dma_data_direction dir);
+void *iommu_dma_vmap_noncontiguous(struct device *dev, size_t size,
+ struct sg_table *sgt);
+#define iommu_dma_vunmap_noncontiguous(dev, vaddr) \
+ vunmap(vaddr);
+int iommu_dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
+ size_t size, struct sg_table *sgt);
+void iommu_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir);
+void iommu_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir);
+void iommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir);
+void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir);
+
+#endif /* _LINUX_IOMMU_DMA_H */
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 32d448050bf7..8c66284a91a8 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -13,8 +13,8 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/of.h>
-#include <linux/ioasid.h>
-#include <uapi/linux/iommu.h>
+#include <linux/iova_bitmap.h>
+#include <uapi/linux/iommufd.h>
#define IOMMU_READ (1 << 0)
#define IOMMU_WRITE (1 << 1)
@@ -37,9 +37,122 @@ struct iommu_group;
struct bus_type;
struct device;
struct iommu_domain;
+struct iommu_domain_ops;
+struct iommu_dirty_ops;
struct notifier_block;
struct iommu_sva;
-struct iommu_fault_event;
+struct iommu_dma_cookie;
+struct iommu_dma_msi_cookie;
+struct iommu_fault_param;
+struct iommufd_ctx;
+struct iommufd_viommu;
+struct msi_desc;
+struct msi_msg;
+
+#define IOMMU_FAULT_PERM_READ (1 << 0) /* read */
+#define IOMMU_FAULT_PERM_WRITE (1 << 1) /* write */
+#define IOMMU_FAULT_PERM_EXEC (1 << 2) /* exec */
+#define IOMMU_FAULT_PERM_PRIV (1 << 3) /* privileged */
+
+/* Generic fault types, can be expanded IRQ remapping fault */
+enum iommu_fault_type {
+ IOMMU_FAULT_PAGE_REQ = 1, /* page request fault */
+};
+
+/**
+ * struct iommu_fault_page_request - Page Request data
+ * @flags: encodes whether the corresponding fields are valid and whether this
+ * is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values).
+ * When IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID is set, the page response
+ * must have the same PASID value as the page request. When it is clear,
+ * the page response should not have a PASID.
+ * @pasid: Process Address Space ID
+ * @grpid: Page Request Group Index
+ * @perm: requested page permissions (IOMMU_FAULT_PERM_* values)
+ * @addr: page address
+ * @private_data: device-specific private information
+ */
+struct iommu_fault_page_request {
+#define IOMMU_FAULT_PAGE_REQUEST_PASID_VALID (1 << 0)
+#define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE (1 << 1)
+#define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID (1 << 2)
+ u32 flags;
+ u32 pasid;
+ u32 grpid;
+ u32 perm;
+ u64 addr;
+ u64 private_data[2];
+};
+
+/**
+ * struct iommu_fault - Generic fault data
+ * @type: fault type from &enum iommu_fault_type
+ * @prm: Page Request message, when @type is %IOMMU_FAULT_PAGE_REQ
+ */
+struct iommu_fault {
+ u32 type;
+ struct iommu_fault_page_request prm;
+};
+
+/**
+ * enum iommu_page_response_code - Return status of fault handlers
+ * @IOMMU_PAGE_RESP_SUCCESS: Fault has been handled and the page tables
+ * populated, retry the access. This is "Success" in PCI PRI.
+ * @IOMMU_PAGE_RESP_FAILURE: General error. Drop all subsequent faults from
+ * this device if possible. This is "Response Failure" in PCI PRI.
+ * @IOMMU_PAGE_RESP_INVALID: Could not handle this fault, don't retry the
+ * access. This is "Invalid Request" in PCI PRI.
+ */
+enum iommu_page_response_code {
+ IOMMU_PAGE_RESP_SUCCESS = 0,
+ IOMMU_PAGE_RESP_INVALID,
+ IOMMU_PAGE_RESP_FAILURE,
+};
+
+/**
+ * struct iommu_page_response - Generic page response information
+ * @pasid: Process Address Space ID
+ * @grpid: Page Request Group Index
+ * @code: response code from &enum iommu_page_response_code
+ */
+struct iommu_page_response {
+ u32 pasid;
+ u32 grpid;
+ u32 code;
+};
+
+struct iopf_fault {
+ struct iommu_fault fault;
+ /* node for pending lists */
+ struct list_head list;
+};
+
+struct iopf_group {
+ struct iopf_fault last_fault;
+ struct list_head faults;
+ size_t fault_count;
+ /* list node for iommu_fault_param::faults */
+ struct list_head pending_node;
+ struct work_struct work;
+ struct iommu_attach_handle *attach_handle;
+ /* The device's fault data parameter. */
+ struct iommu_fault_param *fault_param;
+ /* Used by handler provider to hook the group on its own lists. */
+ struct list_head node;
+ u32 cookie;
+};
+
+/**
+ * struct iopf_queue - IO Page Fault queue
+ * @wq: the fault workqueue
+ * @devices: devices attached to this queue
+ * @lock: protects the device list
+ */
+struct iopf_queue {
+ struct workqueue_struct *wq;
+ struct list_head devices;
+ struct mutex lock;
+};
/* iommu fault flags */
#define IOMMU_FAULT_READ 0x0
@@ -47,7 +160,6 @@ struct iommu_fault_event;
typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
struct device *, unsigned long, int, void *);
-typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *);
struct iommu_domain_geometry {
dma_addr_t aperture_start; /* First address that can be mapped */
@@ -55,12 +167,29 @@ struct iommu_domain_geometry {
bool force_aperture; /* DMA only allowed in mappable range? */
};
+enum iommu_domain_cookie_type {
+ IOMMU_COOKIE_NONE,
+ IOMMU_COOKIE_DMA_IOVA,
+ IOMMU_COOKIE_DMA_MSI,
+ IOMMU_COOKIE_FAULT_HANDLER,
+ IOMMU_COOKIE_SVA,
+ IOMMU_COOKIE_IOMMUFD,
+};
+
/* Domain feature flags */
#define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */
#define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
implementation */
#define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
+#define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */
+
+#define __IOMMU_DOMAIN_SVA (1U << 4) /* Shared process address space */
+#define __IOMMU_DOMAIN_PLATFORM (1U << 5)
+#define __IOMMU_DOMAIN_NESTED (1U << 6) /* User-managed address space nested
+ on a stage-2 translation */
+
+#define IOMMU_DOMAIN_ALLOC_FLAGS ~__IOMMU_DOMAIN_DMA_FQ
/*
* This are the possible domain-types
*
@@ -72,28 +201,76 @@ struct iommu_domain_geometry {
* IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations.
* This flag allows IOMMU drivers to implement
* certain optimizations for these domains
+ * IOMMU_DOMAIN_DMA_FQ - As above, but definitely using batched TLB
+ * invalidation.
+ * IOMMU_DOMAIN_SVA - DMA addresses are shared process addresses
+ * represented by mm_struct's.
+ * IOMMU_DOMAIN_PLATFORM - Legacy domain for drivers that do their own
+ * dma_api stuff. Do not use in new drivers.
*/
#define IOMMU_DOMAIN_BLOCKED (0U)
#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
#define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
__IOMMU_DOMAIN_DMA_API)
+#define IOMMU_DOMAIN_DMA_FQ (__IOMMU_DOMAIN_PAGING | \
+ __IOMMU_DOMAIN_DMA_API | \
+ __IOMMU_DOMAIN_DMA_FQ)
+#define IOMMU_DOMAIN_SVA (__IOMMU_DOMAIN_SVA)
+#define IOMMU_DOMAIN_PLATFORM (__IOMMU_DOMAIN_PLATFORM)
+#define IOMMU_DOMAIN_NESTED (__IOMMU_DOMAIN_NESTED)
struct iommu_domain {
unsigned type;
- const struct iommu_ops *ops;
+ enum iommu_domain_cookie_type cookie_type;
+ const struct iommu_domain_ops *ops;
+ const struct iommu_dirty_ops *dirty_ops;
+ const struct iommu_ops *owner; /* Whose domain_alloc we came from */
unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
- iommu_fault_handler_t handler;
- void *handler_token;
struct iommu_domain_geometry geometry;
- void *iova_cookie;
+ int (*iopf_handler)(struct iopf_group *group);
+
+ union { /* cookie */
+ struct iommu_dma_cookie *iova_cookie;
+ struct iommu_dma_msi_cookie *msi_cookie;
+ struct iommufd_hw_pagetable *iommufd_hwpt;
+ struct {
+ iommu_fault_handler_t handler;
+ void *handler_token;
+ };
+ struct { /* IOMMU_DOMAIN_SVA */
+ struct mm_struct *mm;
+ int users;
+ /*
+ * Next iommu_domain in mm->iommu_mm->sva-domains list
+ * protected by iommu_sva_lock.
+ */
+ struct list_head next;
+ };
+ };
};
+static inline bool iommu_is_dma_domain(struct iommu_domain *domain)
+{
+ return domain->type & __IOMMU_DOMAIN_DMA_API;
+}
+
enum iommu_cap {
- IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA
- transactions */
- IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */
+ IOMMU_CAP_CACHE_COHERENCY, /* IOMMU_CACHE is supported */
IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
+ IOMMU_CAP_PRE_BOOT_PROTECTION, /* Firmware says it used the IOMMU for
+ DMA protection and we should too */
+ /*
+ * Per-device flag indicating if enforce_cache_coherency() will work on
+ * this device.
+ */
+ IOMMU_CAP_ENFORCE_CACHE_COHERENCY,
+ /*
+ * IOMMU driver does not issue TLB maintenance during .unmap, so can
+ * usefully support the non-strict DMA flush queue.
+ */
+ IOMMU_CAP_DEFERRED_FLUSH,
+ IOMMU_CAP_DIRTY_TRACKING, /* IOMMU supports dirty tracking */
};
/* These are the possible reserved region types */
@@ -121,6 +298,7 @@ enum iommu_resv_type {
* @length: Length of the region in bytes
* @prot: IOMMU Protection flags (READ/WRITE/...)
* @type: Type of the reserved region
+ * @free: Callback to free associated memory allocations
*/
struct iommu_resv_region {
struct list_head list;
@@ -128,29 +306,36 @@ struct iommu_resv_region {
size_t length;
int prot;
enum iommu_resv_type type;
+ void (*free)(struct device *dev, struct iommu_resv_region *region);
};
-/**
- * enum iommu_dev_features - Per device IOMMU features
- * @IOMMU_DEV_FEAT_AUX: Auxiliary domain feature
- * @IOMMU_DEV_FEAT_SVA: Shared Virtual Addresses
- * @IOMMU_DEV_FEAT_IOPF: I/O Page Faults such as PRI or Stall. Generally
- * enabling %IOMMU_DEV_FEAT_SVA requires
- * %IOMMU_DEV_FEAT_IOPF, but some devices manage I/O Page
- * Faults themselves instead of relying on the IOMMU. When
- * supported, this feature must be enabled before and
- * disabled after %IOMMU_DEV_FEAT_SVA.
- *
- * Device drivers query whether a feature is supported using
- * iommu_dev_has_feature(), and enable it using iommu_dev_enable_feature().
- */
-enum iommu_dev_features {
- IOMMU_DEV_FEAT_AUX,
- IOMMU_DEV_FEAT_SVA,
- IOMMU_DEV_FEAT_IOPF,
+struct iommu_iort_rmr_data {
+ struct iommu_resv_region rr;
+
+ /* Stream IDs associated with IORT RMR entry */
+ const u32 *sids;
+ u32 num_sids;
};
+#define IOMMU_NO_PASID (0U) /* Reserved for DMA w/o PASID */
+#define IOMMU_FIRST_GLOBAL_PASID (1U) /*starting range for allocation */
#define IOMMU_PASID_INVALID (-1U)
+typedef unsigned int ioasid_t;
+
+/* Read but do not clear any dirty bits */
+#define IOMMU_DIRTY_NO_CLEAR (1 << 0)
+
+/*
+ * Pages allocated through iommu_alloc_pages_node_sz() can be placed on this
+ * list using iommu_pages_list_add(). Note: ONLY pages from
+ * iommu_alloc_pages_node_sz() can be used this way!
+ */
+struct iommu_pages_list {
+ struct list_head pages;
+};
+
+#define IOMMU_PAGES_LIST_INIT(name) \
+ ((struct iommu_pages_list){ .pages = LIST_HEAD_INIT(name.pages) })
#ifdef CONFIG_IOMMU_API
@@ -160,131 +345,440 @@ enum iommu_dev_features {
* @start: IOVA representing the start of the range to be flushed
* @end: IOVA representing the end of the range to be flushed (inclusive)
* @pgsize: The interval at which to perform the flush
+ * @freelist: Removed pages to free after sync
+ * @queued: Indicates that the flush will be queued
*
* This structure is intended to be updated by multiple calls to the
* ->unmap() function in struct iommu_ops before eventually being passed
- * into ->iotlb_sync().
+ * into ->iotlb_sync(). Drivers can add pages to @freelist to be freed after
+ * ->iotlb_sync() or ->iotlb_flush_all() have cleared all cached references to
+ * them. @queued is set to indicate when ->iotlb_flush_all() will be called
+ * later instead of ->iotlb_sync(), so drivers may optimise accordingly.
*/
struct iommu_iotlb_gather {
unsigned long start;
unsigned long end;
size_t pgsize;
- struct page *freelist;
+ struct iommu_pages_list freelist;
+ bool queued;
};
/**
+ * struct iommu_dirty_bitmap - Dirty IOVA bitmap state
+ * @bitmap: IOVA bitmap
+ * @gather: Range information for a pending IOTLB flush
+ */
+struct iommu_dirty_bitmap {
+ struct iova_bitmap *bitmap;
+ struct iommu_iotlb_gather *gather;
+};
+
+/**
+ * struct iommu_dirty_ops - domain specific dirty tracking operations
+ * @set_dirty_tracking: Enable or Disable dirty tracking on the iommu domain
+ * @read_and_clear_dirty: Walk IOMMU page tables for dirtied PTEs marshalled
+ * into a bitmap, with a bit represented as a page.
+ * Reads the dirty PTE bits and clears it from IO
+ * pagetables.
+ */
+struct iommu_dirty_ops {
+ int (*set_dirty_tracking)(struct iommu_domain *domain, bool enabled);
+ int (*read_and_clear_dirty)(struct iommu_domain *domain,
+ unsigned long iova, size_t size,
+ unsigned long flags,
+ struct iommu_dirty_bitmap *dirty);
+};
+
+/**
+ * struct iommu_user_data - iommu driver specific user space data info
+ * @type: The data type of the user buffer
+ * @uptr: Pointer to the user buffer for copy_from_user()
+ * @len: The length of the user buffer in bytes
+ *
+ * A user space data is an uAPI that is defined in include/uapi/linux/iommufd.h
+ * @type, @uptr and @len should be just copied from an iommufd core uAPI struct.
+ */
+struct iommu_user_data {
+ unsigned int type;
+ void __user *uptr;
+ size_t len;
+};
+
+/**
+ * struct iommu_user_data_array - iommu driver specific user space data array
+ * @type: The data type of all the entries in the user buffer array
+ * @uptr: Pointer to the user buffer array
+ * @entry_len: The fixed-width length of an entry in the array, in bytes
+ * @entry_num: The number of total entries in the array
+ *
+ * The user buffer includes an array of requests with format defined in
+ * include/uapi/linux/iommufd.h
+ */
+struct iommu_user_data_array {
+ unsigned int type;
+ void __user *uptr;
+ size_t entry_len;
+ u32 entry_num;
+};
+
+/**
+ * __iommu_copy_struct_from_user - Copy iommu driver specific user space data
+ * @dst_data: Pointer to an iommu driver specific user data that is defined in
+ * include/uapi/linux/iommufd.h
+ * @src_data: Pointer to a struct iommu_user_data for user space data info
+ * @data_type: The data type of the @dst_data. Must match with @src_data.type
+ * @data_len: Length of current user data structure, i.e. sizeof(struct _dst)
+ * @min_len: Initial length of user data structure for backward compatibility.
+ * This should be offsetofend using the last member in the user data
+ * struct that was initially added to include/uapi/linux/iommufd.h
+ */
+static inline int __iommu_copy_struct_from_user(
+ void *dst_data, const struct iommu_user_data *src_data,
+ unsigned int data_type, size_t data_len, size_t min_len)
+{
+ if (WARN_ON(!dst_data || !src_data))
+ return -EINVAL;
+ if (src_data->type != data_type)
+ return -EINVAL;
+ if (src_data->len < min_len || data_len < src_data->len)
+ return -EINVAL;
+ return copy_struct_from_user(dst_data, data_len, src_data->uptr,
+ src_data->len);
+}
+
+/**
+ * iommu_copy_struct_from_user - Copy iommu driver specific user space data
+ * @kdst: Pointer to an iommu driver specific user data that is defined in
+ * include/uapi/linux/iommufd.h
+ * @user_data: Pointer to a struct iommu_user_data for user space data info
+ * @data_type: The data type of the @kdst. Must match with @user_data->type
+ * @min_last: The last member of the data structure @kdst points in the initial
+ * version.
+ * Return 0 for success, otherwise -error.
+ */
+#define iommu_copy_struct_from_user(kdst, user_data, data_type, min_last) \
+ __iommu_copy_struct_from_user(kdst, user_data, data_type, \
+ sizeof(*kdst), \
+ offsetofend(typeof(*kdst), min_last))
+
+/**
+ * __iommu_copy_struct_from_user_array - Copy iommu driver specific user space
+ * data from an iommu_user_data_array
+ * @dst_data: Pointer to an iommu driver specific user data that is defined in
+ * include/uapi/linux/iommufd.h
+ * @src_array: Pointer to a struct iommu_user_data_array for a user space array
+ * @data_type: The data type of the @dst_data. Must match with @src_array.type
+ * @index: Index to the location in the array to copy user data from
+ * @data_len: Length of current user data structure, i.e. sizeof(struct _dst)
+ * @min_len: Initial length of user data structure for backward compatibility.
+ * This should be offsetofend using the last member in the user data
+ * struct that was initially added to include/uapi/linux/iommufd.h
+ */
+static inline int __iommu_copy_struct_from_user_array(
+ void *dst_data, const struct iommu_user_data_array *src_array,
+ unsigned int data_type, unsigned int index, size_t data_len,
+ size_t min_len)
+{
+ struct iommu_user_data src_data;
+
+ if (WARN_ON(!src_array || index >= src_array->entry_num))
+ return -EINVAL;
+ if (!src_array->entry_num)
+ return -EINVAL;
+ src_data.uptr = src_array->uptr + src_array->entry_len * index;
+ src_data.len = src_array->entry_len;
+ src_data.type = src_array->type;
+
+ return __iommu_copy_struct_from_user(dst_data, &src_data, data_type,
+ data_len, min_len);
+}
+
+/**
+ * iommu_copy_struct_from_user_array - Copy iommu driver specific user space
+ * data from an iommu_user_data_array
+ * @kdst: Pointer to an iommu driver specific user data that is defined in
+ * include/uapi/linux/iommufd.h
+ * @user_array: Pointer to a struct iommu_user_data_array for a user space
+ * array
+ * @data_type: The data type of the @kdst. Must match with @user_array->type
+ * @index: Index to the location in the array to copy user data from
+ * @min_last: The last member of the data structure @kdst points in the
+ * initial version.
+ *
+ * Copy a single entry from a user array. Return 0 for success, otherwise
+ * -error.
+ */
+#define iommu_copy_struct_from_user_array(kdst, user_array, data_type, index, \
+ min_last) \
+ __iommu_copy_struct_from_user_array( \
+ kdst, user_array, data_type, index, sizeof(*(kdst)), \
+ offsetofend(typeof(*(kdst)), min_last))
+
+/**
+ * iommu_copy_struct_from_full_user_array - Copy iommu driver specific user
+ * space data from an iommu_user_data_array
+ * @kdst: Pointer to an iommu driver specific user data that is defined in
+ * include/uapi/linux/iommufd.h
+ * @kdst_entry_size: sizeof(*kdst)
+ * @user_array: Pointer to a struct iommu_user_data_array for a user space
+ * array
+ * @data_type: The data type of the @kdst. Must match with @user_array->type
+ *
+ * Copy the entire user array. kdst must have room for kdst_entry_size *
+ * user_array->entry_num bytes. Return 0 for success, otherwise -error.
+ */
+static inline int
+iommu_copy_struct_from_full_user_array(void *kdst, size_t kdst_entry_size,
+ struct iommu_user_data_array *user_array,
+ unsigned int data_type)
+{
+ unsigned int i;
+ int ret;
+
+ if (user_array->type != data_type)
+ return -EINVAL;
+ if (!user_array->entry_num)
+ return -EINVAL;
+ if (likely(user_array->entry_len == kdst_entry_size)) {
+ if (copy_from_user(kdst, user_array->uptr,
+ user_array->entry_num *
+ user_array->entry_len))
+ return -EFAULT;
+ }
+
+ /* Copy item by item */
+ for (i = 0; i != user_array->entry_num; i++) {
+ ret = copy_struct_from_user(
+ kdst + kdst_entry_size * i, kdst_entry_size,
+ user_array->uptr + user_array->entry_len * i,
+ user_array->entry_len);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * __iommu_copy_struct_to_user - Report iommu driver specific user space data
+ * @dst_data: Pointer to a struct iommu_user_data for user space data location
+ * @src_data: Pointer to an iommu driver specific user data that is defined in
+ * include/uapi/linux/iommufd.h
+ * @data_type: The data type of the @src_data. Must match with @dst_data.type
+ * @data_len: Length of current user data structure, i.e. sizeof(struct _src)
+ * @min_len: Initial length of user data structure for backward compatibility.
+ * This should be offsetofend using the last member in the user data
+ * struct that was initially added to include/uapi/linux/iommufd.h
+ */
+static inline int
+__iommu_copy_struct_to_user(const struct iommu_user_data *dst_data,
+ void *src_data, unsigned int data_type,
+ size_t data_len, size_t min_len)
+{
+ if (WARN_ON(!dst_data || !src_data))
+ return -EINVAL;
+ if (dst_data->type != data_type)
+ return -EINVAL;
+ if (dst_data->len < min_len || data_len < dst_data->len)
+ return -EINVAL;
+ return copy_struct_to_user(dst_data->uptr, dst_data->len, src_data,
+ data_len, NULL);
+}
+
+/**
+ * iommu_copy_struct_to_user - Report iommu driver specific user space data
+ * @user_data: Pointer to a struct iommu_user_data for user space data location
+ * @ksrc: Pointer to an iommu driver specific user data that is defined in
+ * include/uapi/linux/iommufd.h
+ * @data_type: The data type of the @ksrc. Must match with @user_data->type
+ * @min_last: The last member of the data structure @ksrc points in the initial
+ * version.
+ * Return 0 for success, otherwise -error.
+ */
+#define iommu_copy_struct_to_user(user_data, ksrc, data_type, min_last) \
+ __iommu_copy_struct_to_user(user_data, ksrc, data_type, sizeof(*ksrc), \
+ offsetofend(typeof(*ksrc), min_last))
+
+/**
* struct iommu_ops - iommu ops and capabilities
* @capable: check capability
- * @domain_alloc: allocate iommu domain
- * @domain_free: free iommu domain
- * @attach_dev: attach device to an iommu domain
- * @detach_dev: detach device from an iommu domain
- * @map: map a physically contiguous memory region to an iommu domain
- * @unmap: unmap a physically contiguous memory region from an iommu domain
- * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
- * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
- * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
- * queue
- * @iova_to_phys: translate iova to physical address
+ * @hw_info: report iommu hardware information. The data buffer returned by this
+ * op is allocated in the iommu driver and freed by the caller after
+ * use. @type can input a requested type and output a supported type.
+ * Driver should reject an unsupported data @type input
+ * @domain_alloc: Do not use in new drivers
+ * @domain_alloc_identity: allocate an IDENTITY domain. Drivers should prefer to
+ * use identity_domain instead. This should only be used
+ * if dynamic logic is necessary.
+ * @domain_alloc_paging_flags: Allocate an iommu domain corresponding to the
+ * input parameters as defined in
+ * include/uapi/linux/iommufd.h. The @user_data can be
+ * optionally provided, the new domain must support
+ * __IOMMU_DOMAIN_PAGING. Upon failure, ERR_PTR must be
+ * returned.
+ * @domain_alloc_paging: Allocate an iommu_domain that can be used for
+ * UNMANAGED, DMA, and DMA_FQ domain types. This is the
+ * same as invoking domain_alloc_paging_flags() with
+ * @flags=0, @user_data=NULL. A driver should implement
+ * only one of the two ops.
+ * @domain_alloc_sva: Allocate an iommu_domain for Shared Virtual Addressing.
+ * @domain_alloc_nested: Allocate an iommu_domain for nested translation.
* @probe_device: Add device to iommu driver handling
* @release_device: Remove device from iommu driver handling
* @probe_finalize: Do final setup work after the device is added to an IOMMU
* group and attached to the groups domain
* @device_group: find iommu group for a particular device
- * @enable_nesting: Enable nesting
- * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*)
* @get_resv_regions: Request list of reserved regions for a device
- * @put_resv_regions: Free list of reserved regions for a device
- * @apply_resv_region: Temporary helper call-back for iova reserved ranges
* @of_xlate: add OF master IDs to iommu grouping
* @is_attach_deferred: Check if domain attach should be deferred from iommu
* driver init to device driver init (default no)
- * @dev_has/enable/disable_feat: per device entries to check/enable/disable
- * iommu specific features.
- * @dev_feat_enabled: check enabled feature
- * @aux_attach/detach_dev: aux-domain specific attach/detach entries.
- * @aux_get_pasid: get the pasid given an aux-domain
- * @sva_bind: Bind process address space to device
- * @sva_unbind: Unbind process address space from device
- * @sva_get_pasid: Get PASID associated to a SVA handle
* @page_response: handle page request response
- * @cache_invalidate: invalidate translation caches
- * @sva_bind_gpasid: bind guest pasid and mm
- * @sva_unbind_gpasid: unbind guest pasid and mm
* @def_domain_type: device default domain type, return value:
* - IOMMU_DOMAIN_IDENTITY: must use an identity domain
* - IOMMU_DOMAIN_DMA: must use a dma domain
* - 0: use the default setting
- * @pgsize_bitmap: bitmap of all possible supported page sizes
+ * @default_domain_ops: the default ops for domains
+ * @get_viommu_size: Get the size of a driver-level vIOMMU structure for a given
+ * @dev corresponding to @viommu_type. Driver should return 0
+ * if vIOMMU isn't supported accordingly. It is required for
+ * driver to use the VIOMMU_STRUCT_SIZE macro to sanitize the
+ * driver-level vIOMMU structure related to the core one
+ * @viommu_init: Init the driver-level struct of an iommufd_viommu on a physical
+ * IOMMU instance @viommu->iommu_dev, as the set of virtualization
+ * resources shared/passed to user space IOMMU instance. Associate
+ * it with a nesting @parent_domain. It is required for driver to
+ * set @viommu->ops pointing to its own viommu_ops
* @owner: Driver module providing these ops
+ * @identity_domain: An always available, always attachable identity
+ * translation.
+ * @blocked_domain: An always available, always attachable blocking
+ * translation.
+ * @default_domain: If not NULL this will always be set as the default domain.
+ * This should be an IDENTITY/BLOCKED/PLATFORM domain.
+ * Do not use in new drivers.
+ * @user_pasid_table: IOMMU driver supports user-managed PASID table. There is
+ * no user domain for each PASID and the I/O page faults are
+ * forwarded through the user domain attached to the device
+ * RID.
*/
struct iommu_ops {
- bool (*capable)(enum iommu_cap);
+ bool (*capable)(struct device *dev, enum iommu_cap);
+ void *(*hw_info)(struct device *dev, u32 *length,
+ enum iommu_hw_info_type *type);
/* Domain allocation and freeing by the iommu driver */
+#if IS_ENABLED(CONFIG_FSL_PAMU)
struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
- void (*domain_free)(struct iommu_domain *);
-
- int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
- void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
- int (*map)(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
- size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
- size_t size, struct iommu_iotlb_gather *iotlb_gather);
- void (*flush_iotlb_all)(struct iommu_domain *domain);
- void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
- size_t size);
- void (*iotlb_sync)(struct iommu_domain *domain,
- struct iommu_iotlb_gather *iotlb_gather);
- phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
+#endif
+ struct iommu_domain *(*domain_alloc_identity)(struct device *dev);
+ struct iommu_domain *(*domain_alloc_paging_flags)(
+ struct device *dev, u32 flags,
+ const struct iommu_user_data *user_data);
+ struct iommu_domain *(*domain_alloc_paging)(struct device *dev);
+ struct iommu_domain *(*domain_alloc_sva)(struct device *dev,
+ struct mm_struct *mm);
+ struct iommu_domain *(*domain_alloc_nested)(
+ struct device *dev, struct iommu_domain *parent, u32 flags,
+ const struct iommu_user_data *user_data);
+
struct iommu_device *(*probe_device)(struct device *dev);
void (*release_device)(struct device *dev);
void (*probe_finalize)(struct device *dev);
struct iommu_group *(*device_group)(struct device *dev);
- int (*enable_nesting)(struct iommu_domain *domain);
- int (*set_pgtable_quirks)(struct iommu_domain *domain,
- unsigned long quirks);
/* Request/Free a list of reserved regions for a device */
void (*get_resv_regions)(struct device *dev, struct list_head *list);
- void (*put_resv_regions)(struct device *dev, struct list_head *list);
- void (*apply_resv_region)(struct device *dev,
- struct iommu_domain *domain,
- struct iommu_resv_region *region);
- int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
- bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev);
+ int (*of_xlate)(struct device *dev, const struct of_phandle_args *args);
+ bool (*is_attach_deferred)(struct device *dev);
/* Per device IOMMU features */
- bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f);
- bool (*dev_feat_enabled)(struct device *dev, enum iommu_dev_features f);
- int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
- int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
-
- /* Aux-domain specific attach/detach entries */
- int (*aux_attach_dev)(struct iommu_domain *domain, struct device *dev);
- void (*aux_detach_dev)(struct iommu_domain *domain, struct device *dev);
- int (*aux_get_pasid)(struct iommu_domain *domain, struct device *dev);
-
- struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm,
- void *drvdata);
- void (*sva_unbind)(struct iommu_sva *handle);
- u32 (*sva_get_pasid)(struct iommu_sva *handle);
-
- int (*page_response)(struct device *dev,
- struct iommu_fault_event *evt,
- struct iommu_page_response *msg);
- int (*cache_invalidate)(struct iommu_domain *domain, struct device *dev,
- struct iommu_cache_invalidate_info *inv_info);
- int (*sva_bind_gpasid)(struct iommu_domain *domain,
- struct device *dev, struct iommu_gpasid_bind_data *data);
-
- int (*sva_unbind_gpasid)(struct device *dev, u32 pasid);
+ void (*page_response)(struct device *dev, struct iopf_fault *evt,
+ struct iommu_page_response *msg);
int (*def_domain_type)(struct device *dev);
- unsigned long pgsize_bitmap;
+ size_t (*get_viommu_size)(struct device *dev,
+ enum iommu_viommu_type viommu_type);
+ int (*viommu_init)(struct iommufd_viommu *viommu,
+ struct iommu_domain *parent_domain,
+ const struct iommu_user_data *user_data);
+
+ const struct iommu_domain_ops *default_domain_ops;
struct module *owner;
+ struct iommu_domain *identity_domain;
+ struct iommu_domain *blocked_domain;
+ struct iommu_domain *release_domain;
+ struct iommu_domain *default_domain;
+ u8 user_pasid_table:1;
+};
+
+/**
+ * struct iommu_domain_ops - domain specific operations
+ * @attach_dev: attach an iommu domain to a device
+ * Return:
+ * * 0 - success
+ * * EINVAL - can indicate that device and domain are incompatible due to
+ * some previous configuration of the domain, in which case the
+ * driver shouldn't log an error, since it is legitimate for a
+ * caller to test reuse of existing domains. Otherwise, it may
+ * still represent some other fundamental problem
+ * * ENOMEM - out of memory
+ * * ENOSPC - non-ENOMEM type of resource allocation failures
+ * * EBUSY - device is attached to a domain and cannot be changed
+ * * ENODEV - device specific errors, not able to be attached
+ * * <others> - treated as ENODEV by the caller. Use is discouraged
+ * @set_dev_pasid: set or replace an iommu domain to a pasid of device. The pasid of
+ * the device should be left in the old config in error case.
+ * @map_pages: map a physically contiguous set of pages of the same size to
+ * an iommu domain.
+ * @unmap_pages: unmap a number of pages of the same size from an iommu domain
+ * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
+ * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
+ * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
+ * queue
+ * @cache_invalidate_user: Flush hardware cache for user space IO page table.
+ * The @domain must be IOMMU_DOMAIN_NESTED. The @array
+ * passes in the cache invalidation requests, in form
+ * of a driver data structure. The driver must update
+ * array->entry_num to report the number of handled
+ * invalidation requests. The driver data structure
+ * must be defined in include/uapi/linux/iommufd.h
+ * @iova_to_phys: translate iova to physical address
+ * @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE,
+ * including no-snoop TLPs on PCIe or other platform
+ * specific mechanisms.
+ * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*)
+ * @free: Release the domain after use.
+ */
+struct iommu_domain_ops {
+ int (*attach_dev)(struct iommu_domain *domain, struct device *dev,
+ struct iommu_domain *old);
+ int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev,
+ ioasid_t pasid, struct iommu_domain *old);
+
+ int (*map_pages)(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped);
+ size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *iotlb_gather);
+
+ void (*flush_iotlb_all)(struct iommu_domain *domain);
+ int (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
+ size_t size);
+ void (*iotlb_sync)(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *iotlb_gather);
+ int (*cache_invalidate_user)(struct iommu_domain *domain,
+ struct iommu_user_data_array *array);
+
+ phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
+ dma_addr_t iova);
+
+ bool (*enforce_cache_coherency)(struct iommu_domain *domain);
+ int (*set_pgtable_quirks)(struct iommu_domain *domain,
+ unsigned long quirks);
+
+ void (*free)(struct iommu_domain *domain);
};
/**
@@ -293,61 +787,72 @@ struct iommu_ops {
* @list: Used by the iommu-core to keep a list of registered iommus
* @ops: iommu-ops for talking to this iommu
* @dev: struct device for sysfs handling
+ * @singleton_group: Used internally for drivers that have only one group
+ * @max_pasids: number of supported PASIDs
+ * @ready: set once iommu_device_register() has completed successfully
*/
struct iommu_device {
struct list_head list;
const struct iommu_ops *ops;
struct fwnode_handle *fwnode;
struct device *dev;
-};
-
-/**
- * struct iommu_fault_event - Generic fault event
- *
- * Can represent recoverable faults such as a page requests or
- * unrecoverable faults such as DMA or IRQ remapping faults.
- *
- * @fault: fault descriptor
- * @list: pending fault event list, used for tracking responses
- */
-struct iommu_fault_event {
- struct iommu_fault fault;
- struct list_head list;
+ struct iommu_group *singleton_group;
+ u32 max_pasids;
+ bool ready;
};
/**
* struct iommu_fault_param - per-device IOMMU fault data
- * @handler: Callback function to handle IOMMU faults at device level
- * @data: handler private data
- * @faults: holds the pending faults which needs response
* @lock: protect pending faults list
+ * @users: user counter to manage the lifetime of the data
+ * @rcu: rcu head for kfree_rcu()
+ * @dev: the device that owns this param
+ * @queue: IOPF queue
+ * @queue_list: index into queue->devices
+ * @partial: faults that are part of a Page Request Group for which the last
+ * request hasn't been submitted yet.
+ * @faults: holds the pending faults which need response
*/
struct iommu_fault_param {
- iommu_dev_fault_handler_t handler;
- void *data;
- struct list_head faults;
struct mutex lock;
+ refcount_t users;
+ struct rcu_head rcu;
+
+ struct device *dev;
+ struct iopf_queue *queue;
+ struct list_head queue_list;
+
+ struct list_head partial;
+ struct list_head faults;
};
/**
* struct dev_iommu - Collection of per-device IOMMU data
*
* @fault_param: IOMMU detected device fault reporting data
- * @iopf_param: I/O Page Fault queue and data
* @fwspec: IOMMU fwspec data
* @iommu_dev: IOMMU device this device is linked to
* @priv: IOMMU Driver private data
+ * @max_pasids: number of PASIDs this device can consume
+ * @attach_deferred: the dma domain attachment is deferred
+ * @pci_32bit_workaround: Limit DMA allocations to 32-bit IOVAs
+ * @require_direct: device requires IOMMU_RESV_DIRECT regions
+ * @shadow_on_flush: IOTLB flushes are used to sync shadow tables
*
* TODO: migrate other per device data pointers under iommu_dev_data, e.g.
* struct iommu_group *iommu_group;
*/
struct dev_iommu {
struct mutex lock;
- struct iommu_fault_param *fault_param;
- struct iopf_device_param *iopf_param;
+ struct iommu_fault_param __rcu *fault_param;
struct iommu_fwspec *fwspec;
struct iommu_device *iommu_dev;
void *priv;
+ u32 max_pasids;
+ u32 attach_deferred:1;
+ u32 pci_32bit_workaround:1;
+ u32 require_direct:1;
+ u32 shadow_on_flush:1;
};
int iommu_device_register(struct iommu_device *iommu,
@@ -368,71 +873,70 @@ static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
return (struct iommu_device *)dev_get_drvdata(dev);
}
+/**
+ * iommu_get_iommu_dev - Get iommu_device for a device
+ * @dev: an end-point device
+ *
+ * Note that this function must be called from the iommu_ops
+ * to retrieve the iommu_device for a device, which the core code
+ * guarentees it will not invoke the op without an attached iommu.
+ */
+static inline struct iommu_device *__iommu_get_iommu_dev(struct device *dev)
+{
+ return dev->iommu->iommu_dev;
+}
+
+#define iommu_get_iommu_dev(dev, type, member) \
+ container_of(__iommu_get_iommu_dev(dev), type, member)
+
static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
{
*gather = (struct iommu_iotlb_gather) {
.start = ULONG_MAX,
+ .freelist = IOMMU_PAGES_LIST_INIT(gather->freelist),
};
}
-#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
-#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
-#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
-#define IOMMU_GROUP_NOTIFY_BOUND_DRIVER 4 /* Post Driver bind */
-#define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER 5 /* Pre Driver unbind */
-#define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */
-
-extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops);
-extern int bus_iommu_probe(struct bus_type *bus);
-extern bool iommu_present(struct bus_type *bus);
-extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap);
-extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
-extern struct iommu_group *iommu_group_get_by_id(int id);
+extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap);
+extern bool iommu_group_has_isolated_msi(struct iommu_group *group);
+struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev, unsigned int flags);
+static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev)
+{
+ return iommu_paging_domain_alloc_flags(dev, 0);
+}
extern void iommu_domain_free(struct iommu_domain *domain);
extern int iommu_attach_device(struct iommu_domain *domain,
struct device *dev);
extern void iommu_detach_device(struct iommu_domain *domain,
struct device *dev);
-extern int iommu_uapi_cache_invalidate(struct iommu_domain *domain,
- struct device *dev,
- void __user *uinfo);
-
-extern int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain,
- struct device *dev, void __user *udata);
-extern int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain,
- struct device *dev, void __user *udata);
-extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
- struct device *dev, ioasid_t pasid);
extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot);
-extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot);
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
+int iommu_map_nosync(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
+int iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
+ size_t size);
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size);
extern size_t iommu_unmap_fast(struct iommu_domain *domain,
unsigned long iova, size_t size,
struct iommu_iotlb_gather *iotlb_gather);
-extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
- struct scatterlist *sg,unsigned int nents, int prot);
-extern size_t iommu_map_sg_atomic(struct iommu_domain *domain,
- unsigned long iova, struct scatterlist *sg,
- unsigned int nents, int prot);
+extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents,
+ int prot, gfp_t gfp);
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token);
extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
-extern void generic_iommu_put_resv_regions(struct device *dev,
- struct list_head *list);
extern void iommu_set_default_passthrough(bool cmd_line);
extern void iommu_set_default_translated(bool cmd_line);
extern bool iommu_default_passthrough(void);
extern struct iommu_resv_region *
iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
- enum iommu_resv_type type);
+ enum iommu_resv_type type, gfp_t gfp);
extern int iommu_get_group_resv_regions(struct iommu_group *group,
struct list_head *head);
@@ -454,30 +958,14 @@ extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
extern struct iommu_group *iommu_group_get(struct device *dev);
extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
extern void iommu_group_put(struct iommu_group *group);
-extern int iommu_group_register_notifier(struct iommu_group *group,
- struct notifier_block *nb);
-extern int iommu_group_unregister_notifier(struct iommu_group *group,
- struct notifier_block *nb);
-extern int iommu_register_device_fault_handler(struct device *dev,
- iommu_dev_fault_handler_t handler,
- void *data);
-
-extern int iommu_unregister_device_fault_handler(struct device *dev);
-
-extern int iommu_report_device_fault(struct device *dev,
- struct iommu_fault_event *evt);
-extern int iommu_page_response(struct device *dev,
- struct iommu_page_response *msg);
extern int iommu_group_id(struct iommu_group *group);
extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
-int iommu_enable_nesting(struct iommu_domain *domain);
int iommu_set_pgtable_quirks(struct iommu_domain *domain,
unsigned long quirks);
-void iommu_set_dma_strict(bool val);
-bool iommu_get_dma_strict(struct iommu_domain *domain);
+void iommu_set_dma_strict(void);
extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags);
@@ -497,29 +985,102 @@ static inline void iommu_iotlb_sync(struct iommu_domain *domain,
iommu_iotlb_gather_init(iotlb_gather);
}
+/**
+ * iommu_iotlb_gather_is_disjoint - Checks whether a new range is disjoint
+ *
+ * @gather: TLB gather data
+ * @iova: start of page to invalidate
+ * @size: size of page to invalidate
+ *
+ * Helper for IOMMU drivers to check whether a new range and the gathered range
+ * are disjoint. For many IOMMUs, flushing the IOMMU in this case is better
+ * than merging the two, which might lead to unnecessary invalidations.
+ */
+static inline
+bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t size)
+{
+ unsigned long start = iova, end = start + size - 1;
+
+ return gather->end != 0 &&
+ (end + 1 < gather->start || start > gather->end + 1);
+}
+
+
+/**
+ * iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation
+ * @gather: TLB gather data
+ * @iova: start of page to invalidate
+ * @size: size of page to invalidate
+ *
+ * Helper for IOMMU drivers to build arbitrarily-sized invalidation commands
+ * where only the address range matters, and simply minimising intermediate
+ * syncs is preferred.
+ */
+static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t size)
+{
+ unsigned long end = iova + size - 1;
+
+ if (gather->start > iova)
+ gather->start = iova;
+ if (gather->end < end)
+ gather->end = end;
+}
+
+/**
+ * iommu_iotlb_gather_add_page - Gather for page-based TLB invalidation
+ * @domain: IOMMU domain to be invalidated
+ * @gather: TLB gather data
+ * @iova: start of page to invalidate
+ * @size: size of page to invalidate
+ *
+ * Helper for IOMMU drivers to build invalidation commands based on individual
+ * pages, or with page size/table level hints which cannot be gathered if they
+ * differ.
+ */
static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather,
unsigned long iova, size_t size)
{
- unsigned long start = iova, end = start + size - 1;
-
/*
* If the new page is disjoint from the current range or is mapped at
* a different granularity, then sync the TLB so that the gather
* structure can be rewritten.
*/
- if (gather->pgsize != size ||
- end + 1 < gather->start || start > gather->end + 1) {
- if (gather->pgsize)
- iommu_iotlb_sync(domain, gather);
- gather->pgsize = size;
- }
+ if ((gather->pgsize && gather->pgsize != size) ||
+ iommu_iotlb_gather_is_disjoint(gather, iova, size))
+ iommu_iotlb_sync(domain, gather);
- if (gather->end < end)
- gather->end = end;
+ gather->pgsize = size;
+ iommu_iotlb_gather_add_range(gather, iova, size);
+}
+
+static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
+{
+ return gather && gather->queued;
+}
+
+static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty,
+ struct iova_bitmap *bitmap,
+ struct iommu_iotlb_gather *gather)
+{
+ if (gather)
+ iommu_iotlb_gather_init(gather);
- if (gather->start > start)
- gather->start = start;
+ dirty->bitmap = bitmap;
+ dirty->gather = gather;
+}
+
+static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty,
+ unsigned long iova,
+ unsigned long length)
+{
+ if (dirty->bitmap)
+ iova_bitmap_set(dirty->bitmap, iova, length);
+
+ if (dirty->gather)
+ iommu_iotlb_gather_add_range(dirty->gather, iova, length);
}
/* PCI device grouping function */
@@ -528,17 +1089,20 @@ extern struct iommu_group *pci_device_group(struct device *dev);
extern struct iommu_group *generic_device_group(struct device *dev);
/* FSL-MC device grouping function */
struct iommu_group *fsl_mc_device_group(struct device *dev);
+extern struct iommu_group *generic_single_device_group(struct device *dev);
/**
* struct iommu_fwspec - per-device IOMMU instance data
- * @ops: ops for this device's IOMMU
* @iommu_fwnode: firmware handle for this device's IOMMU
* @flags: IOMMU_FWSPEC_* flags
* @num_ids: number of associated device IDs
* @ids: IDs which this device may present to the IOMMU
+ *
+ * Note that the IDs (and any other information, really) stored in this structure should be
+ * considered private to the IOMMU device driver and are not to be used directly by IOMMU
+ * consumers.
*/
struct iommu_fwspec {
- const struct iommu_ops *ops;
struct fwnode_handle *iommu_fwnode;
u32 flags;
unsigned int num_ids;
@@ -547,19 +1111,37 @@ struct iommu_fwspec {
/* ATS is supported */
#define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0)
+/* CANWBS is supported */
+#define IOMMU_FWSPEC_PCI_RC_CANWBS (1 << 1)
+
+/*
+ * An iommu attach handle represents a relationship between an iommu domain
+ * and a PASID or RID of a device. It is allocated and managed by the component
+ * that manages the domain and is stored in the iommu group during the time the
+ * domain is attached.
+ */
+struct iommu_attach_handle {
+ struct iommu_domain *domain;
+};
/**
* struct iommu_sva - handle to a device-mm bond
*/
struct iommu_sva {
+ struct iommu_attach_handle handle;
struct device *dev;
+ refcount_t users;
+};
+
+struct iommu_mm_data {
+ u32 pasid;
+ struct mm_struct *mm;
+ struct list_head sva_domains;
+ struct list_head mm_list_elm;
};
-int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
- const struct iommu_ops *ops);
-void iommu_fwspec_free(struct device *dev);
-int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
-const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode);
+int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode);
+int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids);
static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
{
@@ -583,27 +1165,28 @@ static inline void *dev_iommu_priv_get(struct device *dev)
return NULL;
}
-static inline void dev_iommu_priv_set(struct device *dev, void *priv)
-{
- dev->iommu->priv = priv;
-}
+void dev_iommu_priv_set(struct device *dev, void *priv);
+extern struct mutex iommu_probe_device_lock;
int iommu_probe_device(struct device *dev);
-void iommu_release_device(struct device *dev);
-int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
-int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f);
-bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features f);
-int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev);
-void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev);
-int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev);
+int iommu_device_use_default_domain(struct device *dev);
+void iommu_device_unuse_default_domain(struct device *dev);
-struct iommu_sva *iommu_sva_bind_device(struct device *dev,
- struct mm_struct *mm,
- void *drvdata);
-void iommu_sva_unbind_device(struct iommu_sva *handle);
-u32 iommu_sva_get_pasid(struct iommu_sva *handle);
+int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner);
+void iommu_group_release_dma_owner(struct iommu_group *group);
+bool iommu_group_dma_owner_claimed(struct iommu_group *group);
+int iommu_device_claim_dma_owner(struct device *dev, void *owner);
+void iommu_device_release_dma_owner(struct device *dev);
+
+int iommu_attach_device_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid,
+ struct iommu_attach_handle *handle);
+void iommu_detach_device_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid);
+ioasid_t iommu_alloc_global_pasid(struct device *dev);
+void iommu_free_global_pasid(ioasid_t pasid);
#else /* CONFIG_IOMMU_API */
struct iommu_ops {};
@@ -612,25 +1195,23 @@ struct iommu_fwspec {};
struct iommu_device {};
struct iommu_fault_param {};
struct iommu_iotlb_gather {};
+struct iommu_dirty_bitmap {};
+struct iommu_dirty_ops {};
-static inline bool iommu_present(struct bus_type *bus)
-{
- return false;
-}
-
-static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
+static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
{
return false;
}
-static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
+static inline struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev,
+ unsigned int flags)
{
- return NULL;
+ return ERR_PTR(-ENODEV);
}
-static inline struct iommu_group *iommu_group_get_by_id(int id)
+static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev)
{
- return NULL;
+ return ERR_PTR(-ENODEV);
}
static inline void iommu_domain_free(struct iommu_domain *domain)
@@ -654,14 +1235,7 @@ static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
}
static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
-{
- return -ENODEV;
-}
-
-static inline int iommu_map_atomic(struct iommu_domain *domain,
- unsigned long iova, phys_addr_t paddr,
- size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
return -ENODEV;
}
@@ -679,18 +1253,11 @@ static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
return 0;
}
-static inline size_t iommu_map_sg(struct iommu_domain *domain,
- unsigned long iova, struct scatterlist *sg,
- unsigned int nents, int prot)
+static inline ssize_t iommu_map_sg(struct iommu_domain *domain,
+ unsigned long iova, struct scatterlist *sg,
+ unsigned int nents, int prot, gfp_t gfp)
{
- return 0;
-}
-
-static inline size_t iommu_map_sg_atomic(struct iommu_domain *domain,
- unsigned long iova, struct scatterlist *sg,
- unsigned int nents, int prot)
-{
- return 0;
+ return -ENODEV;
}
static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
@@ -800,43 +1367,6 @@ static inline void iommu_group_put(struct iommu_group *group)
{
}
-static inline int iommu_group_register_notifier(struct iommu_group *group,
- struct notifier_block *nb)
-{
- return -ENODEV;
-}
-
-static inline int iommu_group_unregister_notifier(struct iommu_group *group,
- struct notifier_block *nb)
-{
- return 0;
-}
-
-static inline
-int iommu_register_device_fault_handler(struct device *dev,
- iommu_dev_fault_handler_t handler,
- void *data)
-{
- return -ENODEV;
-}
-
-static inline int iommu_unregister_device_fault_handler(struct device *dev)
-{
- return 0;
-}
-
-static inline
-int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
-{
- return -ENODEV;
-}
-
-static inline int iommu_page_response(struct device *dev,
- struct iommu_page_response *msg)
-{
- return -ENODEV;
-}
-
static inline int iommu_group_id(struct iommu_group *group)
{
return -ENODEV;
@@ -870,6 +1400,23 @@ static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
{
}
+static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
+{
+ return false;
+}
+
+static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty,
+ struct iova_bitmap *bitmap,
+ struct iommu_iotlb_gather *gather)
+{
+}
+
+static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty,
+ unsigned long iova,
+ unsigned long length)
+{
+}
+
static inline void iommu_device_unregister(struct iommu_device *iommu)
{
}
@@ -896,110 +1443,94 @@ static inline void iommu_device_unlink(struct device *dev, struct device *link)
}
static inline int iommu_fwspec_init(struct device *dev,
- struct fwnode_handle *iommu_fwnode,
- const struct iommu_ops *ops)
+ struct fwnode_handle *iommu_fwnode)
{
return -ENODEV;
}
-static inline void iommu_fwspec_free(struct device *dev)
-{
-}
-
static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
int num_ids)
{
return -ENODEV;
}
-static inline
-const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
+static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
{
return NULL;
}
-static inline bool
-iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
+static inline int iommu_device_use_default_domain(struct device *dev)
{
- return false;
-}
-
-static inline int
-iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
-{
- return -ENODEV;
+ return 0;
}
-static inline int
-iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
+static inline void iommu_device_unuse_default_domain(struct device *dev)
{
- return -ENODEV;
}
static inline int
-iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
+iommu_group_claim_dma_owner(struct iommu_group *group, void *owner)
{
return -ENODEV;
}
-static inline void
-iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
+static inline void iommu_group_release_dma_owner(struct iommu_group *group)
{
}
-static inline int
-iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
+static inline bool iommu_group_dma_owner_claimed(struct iommu_group *group)
{
- return -ENODEV;
-}
-
-static inline struct iommu_sva *
-iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
-{
- return NULL;
+ return false;
}
-static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
+static inline void iommu_device_release_dma_owner(struct device *dev)
{
}
-static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle)
+static inline int iommu_device_claim_dma_owner(struct device *dev, void *owner)
{
- return IOMMU_PASID_INVALID;
+ return -ENODEV;
}
-static inline int
-iommu_uapi_cache_invalidate(struct iommu_domain *domain,
- struct device *dev,
- struct iommu_cache_invalidate_info *inv_info)
+static inline int iommu_attach_device_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid,
+ struct iommu_attach_handle *handle)
{
return -ENODEV;
}
-static inline int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain,
- struct device *dev, void __user *udata)
+static inline void iommu_detach_device_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid)
{
- return -ENODEV;
}
-static inline int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain,
- struct device *dev, void __user *udata)
+static inline ioasid_t iommu_alloc_global_pasid(struct device *dev)
{
- return -ENODEV;
+ return IOMMU_PASID_INVALID;
}
-static inline int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
- struct device *dev,
- ioasid_t pasid)
+static inline void iommu_free_global_pasid(ioasid_t pasid) {}
+#endif /* CONFIG_IOMMU_API */
+
+#ifdef CONFIG_IRQ_MSI_IOMMU
+#ifdef CONFIG_IOMMU_API
+int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr);
+#else
+static inline int iommu_dma_prepare_msi(struct msi_desc *desc,
+ phys_addr_t msi_addr)
{
- return -ENODEV;
+ return 0;
}
+#endif /* CONFIG_IOMMU_API */
+#endif /* CONFIG_IRQ_MSI_IOMMU */
-static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
+#if IS_ENABLED(CONFIG_LOCKDEP) && IS_ENABLED(CONFIG_IOMMU_API)
+void iommu_group_mutex_assert(struct device *dev);
+#else
+static inline void iommu_group_mutex_assert(struct device *dev)
{
- return NULL;
}
-#endif /* CONFIG_IOMMU_API */
+#endif
/**
* iommu_map_sgtable - Map the given buffer to the IOMMU domain
@@ -1011,10 +1542,11 @@ static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
* Creates a mapping at @iova for the buffer described by a scatterlist
* stored in the given sg_table object in the provided IOMMU domain.
*/
-static inline size_t iommu_map_sgtable(struct iommu_domain *domain,
+static inline ssize_t iommu_map_sgtable(struct iommu_domain *domain,
unsigned long iova, struct sg_table *sgt, int prot)
{
- return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot);
+ return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot,
+ GFP_KERNEL);
}
#ifdef CONFIG_IOMMU_DEBUGFS
@@ -1024,4 +1556,151 @@ void iommu_debugfs_setup(void);
static inline void iommu_debugfs_setup(void) {}
#endif
+#ifdef CONFIG_IOMMU_DMA
+int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
+#else /* CONFIG_IOMMU_DMA */
+static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_IOMMU_DMA */
+
+/*
+ * Newer generations of Tegra SoCs require devices' stream IDs to be directly programmed into
+ * some registers. These are always paired with a Tegra SMMU or ARM SMMU, for which the contents
+ * of the struct iommu_fwspec are known. Use this helper to formalize access to these internals.
+ */
+#define TEGRA_STREAM_ID_BYPASS 0x7f
+
+static inline bool tegra_dev_iommu_get_stream_id(struct device *dev, u32 *stream_id)
+{
+#ifdef CONFIG_IOMMU_API
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+
+ if (fwspec && fwspec->num_ids == 1) {
+ *stream_id = fwspec->ids[0] & 0xffff;
+ return true;
+ }
+#endif
+
+ return false;
+}
+
+#ifdef CONFIG_IOMMU_MM_DATA
+static inline void mm_pasid_init(struct mm_struct *mm)
+{
+ /*
+ * During dup_mm(), a new mm will be memcpy'd from an old one and that makes
+ * the new mm and the old one point to a same iommu_mm instance. When either
+ * one of the two mms gets released, the iommu_mm instance is freed, leaving
+ * the other mm running into a use-after-free/double-free problem. To avoid
+ * the problem, zeroing the iommu_mm pointer of a new mm is needed here.
+ */
+ mm->iommu_mm = NULL;
+}
+
+static inline bool mm_valid_pasid(struct mm_struct *mm)
+{
+ return READ_ONCE(mm->iommu_mm);
+}
+
+static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm)
+{
+ struct iommu_mm_data *iommu_mm = READ_ONCE(mm->iommu_mm);
+
+ if (!iommu_mm)
+ return IOMMU_PASID_INVALID;
+ return iommu_mm->pasid;
+}
+
+void mm_pasid_drop(struct mm_struct *mm);
+struct iommu_sva *iommu_sva_bind_device(struct device *dev,
+ struct mm_struct *mm);
+void iommu_sva_unbind_device(struct iommu_sva *handle);
+u32 iommu_sva_get_pasid(struct iommu_sva *handle);
+void iommu_sva_invalidate_kva_range(unsigned long start, unsigned long end);
+#else
+static inline struct iommu_sva *
+iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
+{
+}
+
+static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle)
+{
+ return IOMMU_PASID_INVALID;
+}
+static inline void mm_pasid_init(struct mm_struct *mm) {}
+static inline bool mm_valid_pasid(struct mm_struct *mm) { return false; }
+
+static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm)
+{
+ return IOMMU_PASID_INVALID;
+}
+
+static inline void mm_pasid_drop(struct mm_struct *mm) {}
+static inline void iommu_sva_invalidate_kva_range(unsigned long start, unsigned long end) {}
+#endif /* CONFIG_IOMMU_SVA */
+
+#ifdef CONFIG_IOMMU_IOPF
+int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev);
+void iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev);
+int iopf_queue_flush_dev(struct device *dev);
+struct iopf_queue *iopf_queue_alloc(const char *name);
+void iopf_queue_free(struct iopf_queue *queue);
+int iopf_queue_discard_partial(struct iopf_queue *queue);
+void iopf_free_group(struct iopf_group *group);
+int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt);
+void iopf_group_response(struct iopf_group *group,
+ enum iommu_page_response_code status);
+#else
+static inline int
+iopf_queue_add_device(struct iopf_queue *queue, struct device *dev)
+{
+ return -ENODEV;
+}
+
+static inline void
+iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
+{
+}
+
+static inline int iopf_queue_flush_dev(struct device *dev)
+{
+ return -ENODEV;
+}
+
+static inline struct iopf_queue *iopf_queue_alloc(const char *name)
+{
+ return NULL;
+}
+
+static inline void iopf_queue_free(struct iopf_queue *queue)
+{
+}
+
+static inline int iopf_queue_discard_partial(struct iopf_queue *queue)
+{
+ return -ENODEV;
+}
+
+static inline void iopf_free_group(struct iopf_group *group)
+{
+}
+
+static inline int
+iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
+{
+ return -ENODEV;
+}
+
+static inline void iopf_group_response(struct iopf_group *group,
+ enum iommu_page_response_code status)
+{
+}
+#endif /* CONFIG_IOMMU_IOPF */
#endif /* __LINUX_IOMMU_H */
diff --git a/include/linux/iommufd.h b/include/linux/iommufd.h
new file mode 100644
index 000000000000..6e7efe83bc5d
--- /dev/null
+++ b/include/linux/iommufd.h
@@ -0,0 +1,400 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Intel Corporation
+ * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
+ */
+#ifndef __LINUX_IOMMUFD_H
+#define __LINUX_IOMMUFD_H
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/iommu.h>
+#include <linux/refcount.h>
+#include <linux/types.h>
+#include <linux/xarray.h>
+#include <uapi/linux/iommufd.h>
+
+struct device;
+struct file;
+struct iommu_group;
+struct iommu_user_data;
+struct iommu_user_data_array;
+struct iommufd_access;
+struct iommufd_ctx;
+struct iommufd_device;
+struct iommufd_viommu_ops;
+struct page;
+
+enum iommufd_object_type {
+ IOMMUFD_OBJ_NONE,
+ IOMMUFD_OBJ_ANY = IOMMUFD_OBJ_NONE,
+ IOMMUFD_OBJ_DEVICE,
+ IOMMUFD_OBJ_HWPT_PAGING,
+ IOMMUFD_OBJ_HWPT_NESTED,
+ IOMMUFD_OBJ_IOAS,
+ IOMMUFD_OBJ_ACCESS,
+ IOMMUFD_OBJ_FAULT,
+ IOMMUFD_OBJ_VIOMMU,
+ IOMMUFD_OBJ_VDEVICE,
+ IOMMUFD_OBJ_VEVENTQ,
+ IOMMUFD_OBJ_HW_QUEUE,
+#ifdef CONFIG_IOMMUFD_TEST
+ IOMMUFD_OBJ_SELFTEST,
+#endif
+ IOMMUFD_OBJ_MAX,
+};
+
+/* Base struct for all objects with a userspace ID handle. */
+struct iommufd_object {
+ /*
+ * Destroy will sleep and wait for wait_cnt to go to zero. This allows
+ * concurrent users of the ID to reliably avoid causing a spurious
+ * destroy failure. Incrementing this count should either be short
+ * lived or be revoked and blocked during pre_destroy().
+ */
+ refcount_t wait_cnt;
+ refcount_t users;
+ enum iommufd_object_type type;
+ unsigned int id;
+};
+
+struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
+ struct device *dev, u32 *id);
+void iommufd_device_unbind(struct iommufd_device *idev);
+
+int iommufd_device_attach(struct iommufd_device *idev, ioasid_t pasid,
+ u32 *pt_id);
+int iommufd_device_replace(struct iommufd_device *idev, ioasid_t pasid,
+ u32 *pt_id);
+void iommufd_device_detach(struct iommufd_device *idev, ioasid_t pasid);
+
+struct iommufd_ctx *iommufd_device_to_ictx(struct iommufd_device *idev);
+u32 iommufd_device_to_id(struct iommufd_device *idev);
+
+struct iommufd_access_ops {
+ u8 needs_pin_pages : 1;
+ void (*unmap)(void *data, unsigned long iova, unsigned long length);
+};
+
+enum {
+ IOMMUFD_ACCESS_RW_READ = 0,
+ IOMMUFD_ACCESS_RW_WRITE = 1 << 0,
+ /* Set if the caller is in a kthread then rw will use kthread_use_mm() */
+ IOMMUFD_ACCESS_RW_KTHREAD = 1 << 1,
+
+ /* Only for use by selftest */
+ __IOMMUFD_ACCESS_RW_SLOW_PATH = 1 << 2,
+};
+
+struct iommufd_access *
+iommufd_access_create(struct iommufd_ctx *ictx,
+ const struct iommufd_access_ops *ops, void *data, u32 *id);
+void iommufd_access_destroy(struct iommufd_access *access);
+int iommufd_access_attach(struct iommufd_access *access, u32 ioas_id);
+int iommufd_access_replace(struct iommufd_access *access, u32 ioas_id);
+void iommufd_access_detach(struct iommufd_access *access);
+
+void iommufd_ctx_get(struct iommufd_ctx *ictx);
+
+struct iommufd_viommu {
+ struct iommufd_object obj;
+ struct iommufd_ctx *ictx;
+ struct iommu_device *iommu_dev;
+ struct iommufd_hwpt_paging *hwpt;
+
+ const struct iommufd_viommu_ops *ops;
+
+ struct xarray vdevs;
+ struct list_head veventqs;
+ struct rw_semaphore veventqs_rwsem;
+
+ enum iommu_viommu_type type;
+};
+
+struct iommufd_vdevice {
+ struct iommufd_object obj;
+ struct iommufd_viommu *viommu;
+ struct iommufd_device *idev;
+
+ /*
+ * Virtual device ID per vIOMMU, e.g. vSID of ARM SMMUv3, vDeviceID of
+ * AMD IOMMU, and vRID of Intel VT-d
+ */
+ u64 virt_id;
+
+ /* Clean up all driver-specific parts of an iommufd_vdevice */
+ void (*destroy)(struct iommufd_vdevice *vdev);
+};
+
+struct iommufd_hw_queue {
+ struct iommufd_object obj;
+ struct iommufd_viommu *viommu;
+ struct iommufd_access *access;
+
+ u64 base_addr; /* in guest physical address space */
+ size_t length;
+
+ enum iommu_hw_queue_type type;
+
+ /* Clean up all driver-specific parts of an iommufd_hw_queue */
+ void (*destroy)(struct iommufd_hw_queue *hw_queue);
+};
+
+/**
+ * struct iommufd_viommu_ops - vIOMMU specific operations
+ * @destroy: Clean up all driver-specific parts of an iommufd_viommu. The memory
+ * of the vIOMMU will be free-ed by iommufd core after calling this op
+ * @alloc_domain_nested: Allocate a IOMMU_DOMAIN_NESTED on a vIOMMU that holds a
+ * nesting parent domain (IOMMU_DOMAIN_PAGING). @user_data
+ * must be defined in include/uapi/linux/iommufd.h.
+ * It must fully initialize the new iommu_domain before
+ * returning. Upon failure, ERR_PTR must be returned.
+ * @cache_invalidate: Flush hardware cache used by a vIOMMU. It can be used for
+ * any IOMMU hardware specific cache: TLB and device cache.
+ * The @array passes in the cache invalidation requests, in
+ * form of a driver data structure. A driver must update the
+ * array->entry_num to report the number of handled requests.
+ * The data structure of the array entry must be defined in
+ * include/uapi/linux/iommufd.h
+ * @vdevice_size: Size of the driver-defined vDEVICE structure per this vIOMMU
+ * @vdevice_init: Initialize the driver-level structure of a vDEVICE object, or
+ * related HW procedure. @vdev is already initialized by iommufd
+ * core: vdev->dev and vdev->viommu pointers; vdev->id carries a
+ * per-vIOMMU virtual ID (refer to struct iommu_vdevice_alloc in
+ * include/uapi/linux/iommufd.h)
+ * If driver has a deinit function to revert what vdevice_init op
+ * does, it should set it to the @vdev->destroy function pointer
+ * @get_hw_queue_size: Get the size of a driver-defined HW queue structure for a
+ * given @viommu corresponding to @queue_type. Driver should
+ * return 0 if HW queue aren't supported accordingly. It is
+ * required for driver to use the HW_QUEUE_STRUCT_SIZE macro
+ * to sanitize the driver-level HW queue structure related
+ * to the core one
+ * @hw_queue_init_phys: Initialize the driver-level structure of a HW queue that
+ * is initialized with its core-level structure that holds
+ * all the info about a guest queue memory.
+ * Driver providing this op indicates that HW accesses the
+ * guest queue memory via physical addresses.
+ * @index carries the logical HW QUEUE ID per vIOMMU in a
+ * guest VM, for a multi-queue model. @base_addr_pa carries
+ * the physical location of the guest queue
+ * If driver has a deinit function to revert what this op
+ * does, it should set it to the @hw_queue->destroy pointer
+ */
+struct iommufd_viommu_ops {
+ void (*destroy)(struct iommufd_viommu *viommu);
+ struct iommu_domain *(*alloc_domain_nested)(
+ struct iommufd_viommu *viommu, u32 flags,
+ const struct iommu_user_data *user_data);
+ int (*cache_invalidate)(struct iommufd_viommu *viommu,
+ struct iommu_user_data_array *array);
+ const size_t vdevice_size;
+ int (*vdevice_init)(struct iommufd_vdevice *vdev);
+ size_t (*get_hw_queue_size)(struct iommufd_viommu *viommu,
+ enum iommu_hw_queue_type queue_type);
+ /* AMD's HW will add hw_queue_init simply using @hw_queue->base_addr */
+ int (*hw_queue_init_phys)(struct iommufd_hw_queue *hw_queue, u32 index,
+ phys_addr_t base_addr_pa);
+};
+
+#if IS_ENABLED(CONFIG_IOMMUFD)
+struct iommufd_ctx *iommufd_ctx_from_file(struct file *file);
+struct iommufd_ctx *iommufd_ctx_from_fd(int fd);
+void iommufd_ctx_put(struct iommufd_ctx *ictx);
+bool iommufd_ctx_has_group(struct iommufd_ctx *ictx, struct iommu_group *group);
+
+int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
+ unsigned long length, struct page **out_pages,
+ unsigned int flags);
+void iommufd_access_unpin_pages(struct iommufd_access *access,
+ unsigned long iova, unsigned long length);
+int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
+ void *data, size_t len, unsigned int flags);
+int iommufd_vfio_compat_ioas_get_id(struct iommufd_ctx *ictx, u32 *out_ioas_id);
+int iommufd_vfio_compat_ioas_create(struct iommufd_ctx *ictx);
+int iommufd_vfio_compat_set_no_iommu(struct iommufd_ctx *ictx);
+#else /* !CONFIG_IOMMUFD */
+static inline struct iommufd_ctx *iommufd_ctx_from_file(struct file *file)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void iommufd_ctx_put(struct iommufd_ctx *ictx)
+{
+}
+
+static inline int iommufd_access_pin_pages(struct iommufd_access *access,
+ unsigned long iova,
+ unsigned long length,
+ struct page **out_pages,
+ unsigned int flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void iommufd_access_unpin_pages(struct iommufd_access *access,
+ unsigned long iova,
+ unsigned long length)
+{
+}
+
+static inline int iommufd_access_rw(struct iommufd_access *access,
+ unsigned long iova, void *data, size_t len,
+ unsigned int flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int iommufd_vfio_compat_ioas_create(struct iommufd_ctx *ictx)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int iommufd_vfio_compat_set_no_iommu(struct iommufd_ctx *ictx)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_IOMMUFD */
+
+#if IS_ENABLED(CONFIG_IOMMUFD_DRIVER_CORE)
+int _iommufd_object_depend(struct iommufd_object *obj_dependent,
+ struct iommufd_object *obj_depended);
+void _iommufd_object_undepend(struct iommufd_object *obj_dependent,
+ struct iommufd_object *obj_depended);
+int _iommufd_alloc_mmap(struct iommufd_ctx *ictx, struct iommufd_object *owner,
+ phys_addr_t mmio_addr, size_t length,
+ unsigned long *offset);
+void _iommufd_destroy_mmap(struct iommufd_ctx *ictx,
+ struct iommufd_object *owner, unsigned long offset);
+struct device *iommufd_vdevice_to_device(struct iommufd_vdevice *vdev);
+struct device *iommufd_viommu_find_dev(struct iommufd_viommu *viommu,
+ unsigned long vdev_id);
+int iommufd_viommu_get_vdev_id(struct iommufd_viommu *viommu,
+ struct device *dev, unsigned long *vdev_id);
+int iommufd_viommu_report_event(struct iommufd_viommu *viommu,
+ enum iommu_veventq_type type, void *event_data,
+ size_t data_len);
+#else /* !CONFIG_IOMMUFD_DRIVER_CORE */
+static inline int _iommufd_object_depend(struct iommufd_object *obj_dependent,
+ struct iommufd_object *obj_depended)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void
+_iommufd_object_undepend(struct iommufd_object *obj_dependent,
+ struct iommufd_object *obj_depended)
+{
+}
+
+static inline int _iommufd_alloc_mmap(struct iommufd_ctx *ictx,
+ struct iommufd_object *owner,
+ phys_addr_t mmio_addr, size_t length,
+ unsigned long *offset)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void _iommufd_destroy_mmap(struct iommufd_ctx *ictx,
+ struct iommufd_object *owner,
+ unsigned long offset)
+{
+}
+
+static inline struct device *
+iommufd_vdevice_to_device(struct iommufd_vdevice *vdev)
+{
+ return NULL;
+}
+
+static inline struct device *
+iommufd_viommu_find_dev(struct iommufd_viommu *viommu, unsigned long vdev_id)
+{
+ return NULL;
+}
+
+static inline int iommufd_viommu_get_vdev_id(struct iommufd_viommu *viommu,
+ struct device *dev,
+ unsigned long *vdev_id)
+{
+ return -ENOENT;
+}
+
+static inline int iommufd_viommu_report_event(struct iommufd_viommu *viommu,
+ enum iommu_veventq_type type,
+ void *event_data, size_t data_len)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_IOMMUFD_DRIVER_CORE */
+
+#define VIOMMU_STRUCT_SIZE(drv_struct, member) \
+ (sizeof(drv_struct) + \
+ BUILD_BUG_ON_ZERO(offsetof(drv_struct, member)) + \
+ BUILD_BUG_ON_ZERO(!__same_type(struct iommufd_viommu, \
+ ((drv_struct *)NULL)->member)))
+
+#define VDEVICE_STRUCT_SIZE(drv_struct, member) \
+ (sizeof(drv_struct) + \
+ BUILD_BUG_ON_ZERO(offsetof(drv_struct, member)) + \
+ BUILD_BUG_ON_ZERO(!__same_type(struct iommufd_vdevice, \
+ ((drv_struct *)NULL)->member)))
+
+#define HW_QUEUE_STRUCT_SIZE(drv_struct, member) \
+ (sizeof(drv_struct) + \
+ BUILD_BUG_ON_ZERO(offsetof(drv_struct, member)) + \
+ BUILD_BUG_ON_ZERO(!__same_type(struct iommufd_hw_queue, \
+ ((drv_struct *)NULL)->member)))
+
+/*
+ * Helpers for IOMMU driver to build/destroy a dependency between two sibling
+ * structures created by one of the allocators above
+ */
+#define iommufd_hw_queue_depend(dependent, depended, member) \
+ ({ \
+ int ret = -EINVAL; \
+ \
+ static_assert(__same_type(struct iommufd_hw_queue, \
+ dependent->member)); \
+ static_assert(__same_type(typeof(*dependent), *depended)); \
+ if (!WARN_ON_ONCE(dependent->member.viommu != \
+ depended->member.viommu)) \
+ ret = _iommufd_object_depend(&dependent->member.obj, \
+ &depended->member.obj); \
+ ret; \
+ })
+
+#define iommufd_hw_queue_undepend(dependent, depended, member) \
+ ({ \
+ static_assert(__same_type(struct iommufd_hw_queue, \
+ dependent->member)); \
+ static_assert(__same_type(typeof(*dependent), *depended)); \
+ WARN_ON_ONCE(dependent->member.viommu != \
+ depended->member.viommu); \
+ _iommufd_object_undepend(&dependent->member.obj, \
+ &depended->member.obj); \
+ })
+
+/*
+ * Helpers for IOMMU driver to alloc/destroy an mmapable area for a structure.
+ *
+ * To support an mmappable MMIO region, kernel driver must first register it to
+ * iommufd core to allocate an @offset, during a driver-structure initialization
+ * (e.g. viommu_init op). Then, it should report to user space this @offset and
+ * the @length of the MMIO region for mmap syscall.
+ */
+static inline int iommufd_viommu_alloc_mmap(struct iommufd_viommu *viommu,
+ phys_addr_t mmio_addr,
+ size_t length,
+ unsigned long *offset)
+{
+ return _iommufd_alloc_mmap(viommu->ictx, &viommu->obj, mmio_addr,
+ length, offset);
+}
+
+static inline void iommufd_viommu_destroy_mmap(struct iommufd_viommu *viommu,
+ unsigned long offset)
+{
+ _iommufd_destroy_mmap(viommu->ictx, &viommu->obj, offset);
+}
+#endif
diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h
index 2c8860e406bd..bdd2e0652bc3 100644
--- a/include/linux/iopoll.h
+++ b/include/linux/iopoll.h
@@ -14,108 +14,180 @@
#include <linux/io.h>
/**
- * read_poll_timeout - Periodically poll an address until a condition is
- * met or a timeout occurs
- * @op: accessor function (takes @args as its arguments)
- * @val: Variable to read the value into
- * @cond: Break condition (usually involving @val)
- * @sleep_us: Maximum time to sleep between reads in us (0
- * tight-loops). Should be less than ~20ms since usleep_range
- * is used (see Documentation/timers/timers-howto.rst).
- * @timeout_us: Timeout in us, 0 means never timeout
- * @sleep_before_read: if it is true, sleep @sleep_us before read.
- * @args: arguments for @op poll
+ * poll_timeout_us - Periodically poll and perform an operation until
+ * a condition is met or a timeout occurs
*
- * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
- * case, the last read value at @args is stored in @val. Must not
- * be called from atomic context if sleep_us or timeout_us are used.
+ * @op: Operation
+ * @cond: Break condition
+ * @sleep_us: Maximum time to sleep between operations in us (0 tight-loops).
+ * Please read usleep_range() function description for details and
+ * limitations.
+ * @timeout_us: Timeout in us, 0 means never timeout
+ * @sleep_before_op: if it is true, sleep @sleep_us before operation.
*
* When available, you'll probably want to use one of the specialized
* macros defined below rather than this macro directly.
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout. Must not
+ * be called from atomic context if sleep_us or timeout_us are used.
*/
-#define read_poll_timeout(op, val, cond, sleep_us, timeout_us, \
- sleep_before_read, args...) \
+#define poll_timeout_us(op, cond, sleep_us, timeout_us, sleep_before_op) \
({ \
u64 __timeout_us = (timeout_us); \
unsigned long __sleep_us = (sleep_us); \
ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
+ int ___ret; \
might_sleep_if((__sleep_us) != 0); \
- if (sleep_before_read && __sleep_us) \
+ if ((sleep_before_op) && __sleep_us) \
usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
for (;;) { \
- (val) = op(args); \
- if (cond) \
+ bool __expired = __timeout_us && \
+ ktime_compare(ktime_get(), __timeout) > 0; \
+ /* guarantee 'op' and 'cond' are evaluated after timeout expired */ \
+ barrier(); \
+ op; \
+ if (cond) { \
+ ___ret = 0; \
break; \
- if (__timeout_us && \
- ktime_compare(ktime_get(), __timeout) > 0) { \
- (val) = op(args); \
+ } \
+ if (__expired) { \
+ ___ret = -ETIMEDOUT; \
break; \
} \
if (__sleep_us) \
usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
+ cpu_relax(); \
} \
- (cond) ? 0 : -ETIMEDOUT; \
+ ___ret; \
})
/**
- * read_poll_timeout_atomic - Periodically poll an address until a condition is
- * met or a timeout occurs
- * @op: accessor function (takes @args as its arguments)
- * @val: Variable to read the value into
- * @cond: Break condition (usually involving @val)
- * @delay_us: Time to udelay between reads in us (0 tight-loops). Should
- * be less than ~10us since udelay is used (see
- * Documentation/timers/timers-howto.rst).
+ * poll_timeout_us_atomic - Periodically poll and perform an operation until
+ * a condition is met or a timeout occurs
+ *
+ * @op: Operation
+ * @cond: Break condition
+ * @delay_us: Time to udelay between operations in us (0 tight-loops).
+ * Please read udelay() function description for details and
+ * limitations.
* @timeout_us: Timeout in us, 0 means never timeout
- * @delay_before_read: if it is true, delay @delay_us before read.
- * @args: arguments for @op poll
+ * @delay_before_op: if it is true, delay @delay_us before operation.
*
- * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
- * case, the last read value at @args is stored in @val.
+ * This macro does not rely on timekeeping. Hence it is safe to call even when
+ * timekeeping is suspended, at the expense of an underestimation of wall clock
+ * time, which is rather minimal with a non-zero delay_us.
*
* When available, you'll probably want to use one of the specialized
* macros defined below rather than this macro directly.
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout.
*/
-#define read_poll_timeout_atomic(op, val, cond, delay_us, timeout_us, \
- delay_before_read, args...) \
+#define poll_timeout_us_atomic(op, cond, delay_us, timeout_us, \
+ delay_before_op) \
({ \
u64 __timeout_us = (timeout_us); \
+ s64 __left_ns = __timeout_us * NSEC_PER_USEC; \
unsigned long __delay_us = (delay_us); \
- ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
- if (delay_before_read && __delay_us) \
+ u64 __delay_ns = __delay_us * NSEC_PER_USEC; \
+ int ___ret; \
+ if ((delay_before_op) && __delay_us) { \
udelay(__delay_us); \
+ if (__timeout_us) \
+ __left_ns -= __delay_ns; \
+ } \
for (;;) { \
- (val) = op(args); \
- if (cond) \
+ bool __expired = __timeout_us && __left_ns < 0; \
+ /* guarantee 'op' and 'cond' are evaluated after timeout expired */ \
+ barrier(); \
+ op; \
+ if (cond) { \
+ ___ret = 0; \
break; \
- if (__timeout_us && \
- ktime_compare(ktime_get(), __timeout) > 0) { \
- (val) = op(args); \
+ } \
+ if (__expired) { \
+ ___ret = -ETIMEDOUT; \
break; \
} \
- if (__delay_us) \
+ if (__delay_us) { \
udelay(__delay_us); \
+ if (__timeout_us) \
+ __left_ns -= __delay_ns; \
+ } \
+ cpu_relax(); \
+ if (__timeout_us) \
+ __left_ns--; \
} \
- (cond) ? 0 : -ETIMEDOUT; \
+ ___ret; \
})
/**
+ * read_poll_timeout - Periodically poll an address until a condition is
+ * met or a timeout occurs
+ * @op: accessor function (takes @args as its arguments)
+ * @val: Variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @sleep_us: Maximum time to sleep between reads in us (0 tight-loops). Please
+ * read usleep_range() function description for details and
+ * limitations.
+ * @timeout_us: Timeout in us, 0 means never timeout
+ * @sleep_before_read: if it is true, sleep @sleep_us before read.
+ * @args: arguments for @op poll
+ *
+ * When available, you'll probably want to use one of the specialized
+ * macros defined below rather than this macro directly.
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @args is stored in @val. Must not
+ * be called from atomic context if sleep_us or timeout_us are used.
+ */
+#define read_poll_timeout(op, val, cond, sleep_us, timeout_us, \
+ sleep_before_read, args...) \
+ poll_timeout_us((val) = op(args), cond, sleep_us, timeout_us, sleep_before_read)
+
+/**
+ * read_poll_timeout_atomic - Periodically poll an address until a condition is
+ * met or a timeout occurs
+ * @op: accessor function (takes @args as its arguments)
+ * @val: Variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @delay_us: Time to udelay between reads in us (0 tight-loops). Please
+ * read udelay() function description for details and
+ * limitations.
+ * @timeout_us: Timeout in us, 0 means never timeout
+ * @delay_before_read: if it is true, delay @delay_us before read.
+ * @args: arguments for @op poll
+ *
+ * This macro does not rely on timekeeping. Hence it is safe to call even when
+ * timekeeping is suspended, at the expense of an underestimation of wall clock
+ * time, which is rather minimal with a non-zero delay_us.
+ *
+ * When available, you'll probably want to use one of the specialized
+ * macros defined below rather than this macro directly.
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @args is stored in @val.
+ */
+#define read_poll_timeout_atomic(op, val, cond, sleep_us, timeout_us, \
+ sleep_before_read, args...) \
+ poll_timeout_us_atomic((val) = op(args), cond, sleep_us, timeout_us, sleep_before_read)
+
+/**
* readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs
* @op: accessor function (takes @addr as its only argument)
* @addr: Address to poll
* @val: Variable to read the value into
* @cond: Break condition (usually involving @val)
- * @sleep_us: Maximum time to sleep between reads in us (0
- * tight-loops). Should be less than ~20ms since usleep_range
- * is used (see Documentation/timers/timers-howto.rst).
+ * @sleep_us: Maximum time to sleep between reads in us (0 tight-loops). Please
+ * read usleep_range() function description for details and
+ * limitations.
* @timeout_us: Timeout in us, 0 means never timeout
*
- * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
- * case, the last read value at @addr is stored in @val. Must not
- * be called from atomic context if sleep_us or timeout_us are used.
- *
* When available, you'll probably want to use one of the specialized
* macros defined below rather than this macro directly.
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @addr is stored in @val. Must not
+ * be called from atomic context if sleep_us or timeout_us are used.
*/
#define readx_poll_timeout(op, addr, val, cond, sleep_us, timeout_us) \
read_poll_timeout(op, val, cond, sleep_us, timeout_us, false, addr)
@@ -126,16 +198,16 @@
* @addr: Address to poll
* @val: Variable to read the value into
* @cond: Break condition (usually involving @val)
- * @delay_us: Time to udelay between reads in us (0 tight-loops). Should
- * be less than ~10us since udelay is used (see
- * Documentation/timers/timers-howto.rst).
+ * @delay_us: Time to udelay between reads in us (0 tight-loops). Please
+ * read udelay() function description for details and
+ * limitations.
* @timeout_us: Timeout in us, 0 means never timeout
*
- * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
- * case, the last read value at @addr is stored in @val.
- *
* When available, you'll probably want to use one of the specialized
* macros defined below rather than this macro directly.
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @addr is stored in @val.
*/
#define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \
read_poll_timeout_atomic(op, val, cond, delay_us, timeout_us, false, addr)
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 8359c50f9988..9afa30f9346f 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -79,7 +79,8 @@ struct resource {
#define IORESOURCE_IRQ_HIGHLEVEL (1<<2)
#define IORESOURCE_IRQ_LOWLEVEL (1<<3)
#define IORESOURCE_IRQ_SHAREABLE (1<<4)
-#define IORESOURCE_IRQ_OPTIONAL (1<<5)
+#define IORESOURCE_IRQ_OPTIONAL (1<<5)
+#define IORESOURCE_IRQ_WAKECAPABLE (1<<6)
/* PnP DMA specific bits (IORESOURCE_BITS) */
#define IORESOURCE_DMA_TYPE_MASK (3<<0)
@@ -141,6 +142,7 @@ enum {
IORES_DESC_DEVICE_PRIVATE_MEMORY = 6,
IORES_DESC_RESERVED = 7,
IORES_DESC_SOFT_RESERVED = 8,
+ IORES_DESC_CXL = 9,
};
/*
@@ -152,15 +154,20 @@ enum {
};
/* helpers to define resources */
-#define DEFINE_RES_NAMED(_start, _size, _name, _flags) \
- { \
+#define DEFINE_RES_NAMED_DESC(_start, _size, _name, _flags, _desc) \
+(struct resource) { \
.start = (_start), \
.end = (_start) + (_size) - 1, \
.name = (_name), \
.flags = (_flags), \
- .desc = IORES_DESC_NONE, \
+ .desc = (_desc), \
}
+#define DEFINE_RES_NAMED(_start, _size, _name, _flags) \
+ DEFINE_RES_NAMED_DESC(_start, _size, _name, _flags, IORES_DESC_NONE)
+#define DEFINE_RES(_start, _size, _flags) \
+ DEFINE_RES_NAMED(_start, _size, NULL, _flags)
+
#define DEFINE_RES_IO_NAMED(_start, _size, _name) \
DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_IO)
#define DEFINE_RES_IO(_start, _size) \
@@ -171,6 +178,11 @@ enum {
#define DEFINE_RES_MEM(_start, _size) \
DEFINE_RES_MEM_NAMED((_start), (_size), NULL)
+#define DEFINE_RES_REG_NAMED(_start, _size, _name) \
+ DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_REG)
+#define DEFINE_RES_REG(_start, _size) \
+ DEFINE_RES_REG_NAMED((_start), (_size), NULL)
+
#define DEFINE_RES_IRQ_NAMED(_irq, _name) \
DEFINE_RES_NAMED((_irq), 1, (_name), IORESOURCE_IRQ)
#define DEFINE_RES_IRQ(_irq) \
@@ -181,6 +193,42 @@ enum {
#define DEFINE_RES_DMA(_dma) \
DEFINE_RES_DMA_NAMED((_dma), NULL)
+/**
+ * typedef resource_alignf - Resource alignment callback
+ * @data: Private data used by the callback
+ * @res: Resource candidate range (an empty resource space)
+ * @size: The minimum size of the empty space
+ * @align: Alignment from the constraints
+ *
+ * Callback allows calculating resource placement and alignment beyond min,
+ * max, and align fields in the struct resource_constraint.
+ *
+ * Return: Start address for the resource.
+ */
+typedef resource_size_t (*resource_alignf)(void *data,
+ const struct resource *res,
+ resource_size_t size,
+ resource_size_t align);
+
+/**
+ * struct resource_constraint - constraints to be met while searching empty
+ * resource space
+ * @min: The minimum address for the memory range
+ * @max: The maximum address for the memory range
+ * @align: Alignment for the start address of the empty space
+ * @alignf: Additional alignment constraints callback
+ * @alignf_data: Data provided for @alignf callback
+ *
+ * Contains the range and alignment constraints that have to be met during
+ * find_resource_space(). @alignf can be NULL indicating no alignment beyond
+ * @align is necessary.
+ */
+struct resource_constraint {
+ resource_size_t min, max, align;
+ resource_alignf alignf;
+ void *alignf_data;
+};
+
/* PC/ISA/whatever - the normal PC address spaces: IO and memory */
extern struct resource ioport_resource;
extern struct resource iomem_resource;
@@ -200,15 +248,44 @@ extern void arch_remove_reservations(struct resource *avail);
extern int allocate_resource(struct resource *root, struct resource *new,
resource_size_t size, resource_size_t min,
resource_size_t max, resource_size_t align,
- resource_size_t (*alignf)(void *,
- const struct resource *,
- resource_size_t,
- resource_size_t),
+ resource_alignf alignf,
void *alignf_data);
struct resource *lookup_resource(struct resource *root, resource_size_t start);
int adjust_resource(struct resource *res, resource_size_t start,
resource_size_t size);
resource_size_t resource_alignment(struct resource *res);
+
+/**
+ * resource_set_size - Calculate resource end address from size and start
+ * @res: Resource descriptor
+ * @size: Size of the resource
+ *
+ * Calculate the end address for @res based on @size.
+ *
+ * Note: The start address of @res must be set when calling this function.
+ * Prefer resource_set_range() if setting both the start address and @size.
+ */
+static inline void resource_set_size(struct resource *res, resource_size_t size)
+{
+ res->end = res->start + size - 1;
+}
+
+/**
+ * resource_set_range - Set resource start and end addresses
+ * @res: Resource descriptor
+ * @start: Start address for the resource
+ * @size: Size of the resource
+ *
+ * Set @res start address and calculate the end address based on @size.
+ */
+static inline void resource_set_range(struct resource *res,
+ resource_size_t start,
+ resource_size_t size)
+{
+ res->start = start;
+ resource_set_size(res, size);
+}
+
static inline resource_size_t resource_size(const struct resource *res)
{
return res->end - res->start + 1;
@@ -222,7 +299,7 @@ static inline unsigned long resource_ext_type(const struct resource *res)
return res->flags & IORESOURCE_EXT_TYPE_BITS;
}
/* True iff r1 completely contains r2 */
-static inline bool resource_contains(struct resource *r1, struct resource *r2)
+static inline bool resource_contains(const struct resource *r1, const struct resource *r2)
{
if (resource_type(r1) != resource_type(r2))
return false;
@@ -232,13 +309,13 @@ static inline bool resource_contains(struct resource *r1, struct resource *r2)
}
/* True if any part of r1 overlaps r2 */
-static inline bool resource_overlaps(struct resource *r1, struct resource *r2)
+static inline bool resource_overlaps(const struct resource *r1, const struct resource *r2)
{
return r1->start <= r2->end && r1->end >= r2->start;
}
-static inline bool
-resource_intersection(struct resource *r1, struct resource *r2, struct resource *r)
+static inline bool resource_intersection(const struct resource *r1, const struct resource *r2,
+ struct resource *r)
{
if (!resource_overlaps(r1, r2))
return false;
@@ -247,8 +324,8 @@ resource_intersection(struct resource *r1, struct resource *r2, struct resource
return true;
}
-static inline bool
-resource_union(struct resource *r1, struct resource *r2, struct resource *r)
+static inline bool resource_union(const struct resource *r1, const struct resource *r2,
+ struct resource *r)
{
if (!resource_overlaps(r1, r2))
return false;
@@ -257,11 +334,25 @@ resource_union(struct resource *r1, struct resource *r2, struct resource *r)
return true;
}
+/*
+ * Check if this resource is added to a resource tree or detached. Caller is
+ * responsible for not racing assignment.
+ */
+static inline bool resource_assigned(struct resource *res)
+{
+ return res->parent;
+}
+
+int find_resource_space(struct resource *root, struct resource *new,
+ resource_size_t size, struct resource_constraint *constraint);
+
/* Convenience shorthand with allocation */
#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), 0)
#define request_muxed_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), IORESOURCE_MUXED)
#define __request_mem_region(start,n,name, excl) __request_region(&iomem_resource, (start), (n), (name), excl)
#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name), 0)
+#define request_mem_region_muxed(start, n, name) \
+ __request_region(&iomem_resource, (start), (n), (name), IORESOURCE_MUXED)
#define request_mem_region_exclusive(start,n,name) \
__request_region(&iomem_resource, (start), (n), (name), IORESOURCE_EXCLUSIVE)
#define rename_region(region, newname) do { (region)->name = (newname); } while (0)
@@ -309,6 +400,8 @@ extern void __devm_release_region(struct device *dev, struct resource *parent,
resource_size_t start, resource_size_t n);
extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size);
extern bool iomem_is_exclusive(u64 addr);
+extern bool resource_is_exclusive(struct resource *resource, u64 addr,
+ resource_size_t size);
extern int
walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
@@ -320,6 +413,9 @@ extern int
walk_system_ram_res(u64 start, u64 end, void *arg,
int (*func)(struct resource *, void *));
extern int
+walk_system_ram_res_rev(u64 start, u64 end, void *arg,
+ int (*func)(struct resource *, void *));
+extern int
walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end,
void *arg, int (*func)(struct resource *, void *));
@@ -327,6 +423,8 @@ struct resource *devm_request_free_mem_region(struct device *dev,
struct resource *base, unsigned long size);
struct resource *request_free_mem_region(struct resource *base,
unsigned long size, const char *name);
+struct resource *alloc_free_mem_region(struct resource *base,
+ unsigned long size, unsigned long align, const char *name);
static inline void irqresource_disabled(struct resource *res, u32 irq)
{
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index e9bfe6972aed..5210e8371238 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -6,46 +6,22 @@
#include <linux/sched/rt.h>
#include <linux/iocontext.h>
-/*
- * Gives us 8 prio classes with 13-bits of data for each class
- */
-#define IOPRIO_CLASS_SHIFT (13)
-#define IOPRIO_PRIO_MASK ((1UL << IOPRIO_CLASS_SHIFT) - 1)
-
-#define IOPRIO_PRIO_CLASS(mask) ((mask) >> IOPRIO_CLASS_SHIFT)
-#define IOPRIO_PRIO_DATA(mask) ((mask) & IOPRIO_PRIO_MASK)
-#define IOPRIO_PRIO_VALUE(class, data) (((class) << IOPRIO_CLASS_SHIFT) | data)
-
-#define ioprio_valid(mask) (IOPRIO_PRIO_CLASS((mask)) != IOPRIO_CLASS_NONE)
+#include <uapi/linux/ioprio.h>
/*
- * These are the io priority groups as implemented by CFQ. RT is the realtime
- * class, it always gets premium service. BE is the best-effort scheduling
- * class, the default for any process. IDLE is the idle scheduling class, it
- * is only served when no one else is using the disk.
+ * Default IO priority.
*/
-enum {
- IOPRIO_CLASS_NONE,
- IOPRIO_CLASS_RT,
- IOPRIO_CLASS_BE,
- IOPRIO_CLASS_IDLE,
-};
+#define IOPRIO_DEFAULT IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0)
/*
- * 8 best effort priority levels are supported
+ * Check that a priority value has a valid class.
*/
-#define IOPRIO_BE_NR (8)
-
-enum {
- IOPRIO_WHO_PROCESS = 1,
- IOPRIO_WHO_PGRP,
- IOPRIO_WHO_USER,
-};
+static inline bool ioprio_valid(unsigned short ioprio)
+{
+ unsigned short class = IOPRIO_PRIO_CLASS(ioprio);
-/*
- * Fallback BE priority
- */
-#define IOPRIO_NORM (4)
+ return class > IOPRIO_CLASS_NONE && class <= IOPRIO_CLASS_IDLE;
+}
/*
* if process has set io priority explicitly, use that. if not, convert
@@ -64,29 +40,49 @@ static inline int task_nice_ioclass(struct task_struct *task)
{
if (task->policy == SCHED_IDLE)
return IOPRIO_CLASS_IDLE;
- else if (task_is_realtime(task))
+ else if (rt_or_dl_task_policy(task))
return IOPRIO_CLASS_RT;
else
return IOPRIO_CLASS_BE;
}
+#ifdef CONFIG_BLOCK
/*
- * If the calling process has set an I/O priority, use that. Otherwise, return
+ * If the task has set an I/O priority, use that. Otherwise, return
* the default I/O priority.
+ *
+ * Expected to be called for current task or with task_lock() held to keep
+ * io_context stable.
*/
-static inline int get_current_ioprio(void)
+static inline int __get_task_ioprio(struct task_struct *p)
{
- struct io_context *ioc = current->io_context;
+ struct io_context *ioc = p->io_context;
+ int prio;
- if (ioc)
- return ioc->ioprio;
- return IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
+ if (!ioc)
+ return IOPRIO_PRIO_VALUE(task_nice_ioclass(p),
+ task_nice_ioprio(p));
+
+ if (p != current)
+ lockdep_assert_held(&p->alloc_lock);
+
+ prio = ioc->ioprio;
+ if (IOPRIO_PRIO_CLASS(prio) == IOPRIO_CLASS_NONE)
+ prio = IOPRIO_PRIO_VALUE(task_nice_ioclass(p),
+ task_nice_ioprio(p));
+ return prio;
+}
+#else
+static inline int __get_task_ioprio(struct task_struct *p)
+{
+ return IOPRIO_DEFAULT;
}
+#endif /* CONFIG_BLOCK */
-/*
- * For inheritance, return the highest of the two given priorities
- */
-extern int ioprio_best(unsigned short aprio, unsigned short bprio);
+static inline int get_current_ioprio(void)
+{
+ return __get_task_ioprio(current);
+}
extern int set_task_ioprio(struct task_struct *task, int ioprio);
diff --git a/include/linux/ioremap.h b/include/linux/ioremap.h
new file mode 100644
index 000000000000..2bd1661fe9ad
--- /dev/null
+++ b/include/linux/ioremap.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IOREMAP_H
+#define _LINUX_IOREMAP_H
+
+#include <linux/kasan.h>
+#include <asm/pgtable.h>
+#include <asm/vmalloc.h>
+
+#if defined(CONFIG_HAS_IOMEM) || defined(CONFIG_GENERIC_IOREMAP)
+/*
+ * Ioremap often, but not always uses the generic vmalloc area. E.g on
+ * Power ARCH, it could have different ioremap space.
+ */
+#ifndef IOREMAP_START
+#define IOREMAP_START VMALLOC_START
+#define IOREMAP_END VMALLOC_END
+#endif
+static inline bool is_ioremap_addr(const void *x)
+{
+ unsigned long addr = (unsigned long)kasan_reset_tag(x);
+
+ return addr >= IOREMAP_START && addr < IOREMAP_END;
+}
+#else
+static inline bool is_ioremap_addr(const void *x)
+{
+ return false;
+}
+#endif
+
+#endif /* _LINUX_IOREMAP_H */
diff --git a/include/linux/iosys-map.h b/include/linux/iosys-map.h
new file mode 100644
index 000000000000..3e85afe794c0
--- /dev/null
+++ b/include/linux/iosys-map.h
@@ -0,0 +1,511 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Pointer abstraction for IO/system memory
+ */
+
+#ifndef __IOSYS_MAP_H__
+#define __IOSYS_MAP_H__
+
+#include <linux/compiler_types.h>
+#include <linux/io.h>
+#include <linux/string.h>
+
+/**
+ * DOC: overview
+ *
+ * When accessing a memory region, depending on its location, users may have to
+ * access it with I/O operations or memory load/store operations. For example,
+ * copying to system memory could be done with memcpy(), copying to I/O memory
+ * would be done with memcpy_toio().
+ *
+ * .. code-block:: c
+ *
+ * void *vaddr = ...; // pointer to system memory
+ * memcpy(vaddr, src, len);
+ *
+ * void *vaddr_iomem = ...; // pointer to I/O memory
+ * memcpy_toio(vaddr_iomem, src, len);
+ *
+ * The user of such pointer may not have information about the mapping of that
+ * region or may want to have a single code path to handle operations on that
+ * buffer, regardless if it's located in system or IO memory. The type
+ * :c:type:`struct iosys_map <iosys_map>` and its helpers abstract that so the
+ * buffer can be passed around to other drivers or have separate duties inside
+ * the same driver for allocation, read and write operations.
+ *
+ * Open-coding access to :c:type:`struct iosys_map <iosys_map>` is considered
+ * bad style. Rather than accessing its fields directly, use one of the provided
+ * helper functions, or implement your own. For example, instances of
+ * :c:type:`struct iosys_map <iosys_map>` can be initialized statically with
+ * IOSYS_MAP_INIT_VADDR(), or at runtime with iosys_map_set_vaddr(). These
+ * helpers will set an address in system memory.
+ *
+ * .. code-block:: c
+ *
+ * struct iosys_map map = IOSYS_MAP_INIT_VADDR(0xdeadbeaf);
+ *
+ * iosys_map_set_vaddr(&map, 0xdeadbeaf);
+ *
+ * To set an address in I/O memory, use IOSYS_MAP_INIT_VADDR_IOMEM() or
+ * iosys_map_set_vaddr_iomem().
+ *
+ * .. code-block:: c
+ *
+ * struct iosys_map map = IOSYS_MAP_INIT_VADDR_IOMEM(0xdeadbeaf);
+ *
+ * iosys_map_set_vaddr_iomem(&map, 0xdeadbeaf);
+ *
+ * Instances of struct iosys_map do not have to be cleaned up, but
+ * can be cleared to NULL with iosys_map_clear(). Cleared mappings
+ * always refer to system memory.
+ *
+ * .. code-block:: c
+ *
+ * iosys_map_clear(&map);
+ *
+ * Test if a mapping is valid with either iosys_map_is_set() or
+ * iosys_map_is_null().
+ *
+ * .. code-block:: c
+ *
+ * if (iosys_map_is_set(&map) != iosys_map_is_null(&map))
+ * // always true
+ *
+ * Instances of :c:type:`struct iosys_map <iosys_map>` can be compared for
+ * equality with iosys_map_is_equal(). Mappings that point to different memory
+ * spaces, system or I/O, are never equal. That's even true if both spaces are
+ * located in the same address space, both mappings contain the same address
+ * value, or both mappings refer to NULL.
+ *
+ * .. code-block:: c
+ *
+ * struct iosys_map sys_map; // refers to system memory
+ * struct iosys_map io_map; // refers to I/O memory
+ *
+ * if (iosys_map_is_equal(&sys_map, &io_map))
+ * // always false
+ *
+ * A set up instance of struct iosys_map can be used to access or manipulate the
+ * buffer memory. Depending on the location of the memory, the provided helpers
+ * will pick the correct operations. Data can be copied into the memory with
+ * iosys_map_memcpy_to(). The address can be manipulated with iosys_map_incr().
+ *
+ * .. code-block:: c
+ *
+ * const void *src = ...; // source buffer
+ * size_t len = ...; // length of src
+ *
+ * iosys_map_memcpy_to(&map, src, len);
+ * iosys_map_incr(&map, len); // go to first byte after the memcpy
+ */
+
+/**
+ * struct iosys_map - Pointer to IO/system memory
+ * @vaddr_iomem: The buffer's address if in I/O memory
+ * @vaddr: The buffer's address if in system memory
+ * @is_iomem: True if the buffer is located in I/O memory, or false
+ * otherwise.
+ */
+struct iosys_map {
+ union {
+ void __iomem *vaddr_iomem;
+ void *vaddr;
+ };
+ bool is_iomem;
+};
+
+/**
+ * IOSYS_MAP_INIT_VADDR - Initializes struct iosys_map to an address in system memory
+ * @vaddr_: A system-memory address
+ */
+#define IOSYS_MAP_INIT_VADDR(vaddr_) \
+ { \
+ .vaddr = (vaddr_), \
+ .is_iomem = false, \
+ }
+
+/**
+ * IOSYS_MAP_INIT_VADDR_IOMEM - Initializes struct iosys_map to an address in I/O memory
+ * @vaddr_iomem_: An I/O-memory address
+ */
+#define IOSYS_MAP_INIT_VADDR_IOMEM(vaddr_iomem_) \
+ { \
+ .vaddr_iomem = (vaddr_iomem_), \
+ .is_iomem = true, \
+ }
+
+/**
+ * IOSYS_MAP_INIT_OFFSET - Initializes struct iosys_map from another iosys_map
+ * @map_: The dma-buf mapping structure to copy from
+ * @offset_: Offset to add to the other mapping
+ *
+ * Initializes a new iosys_map struct based on another passed as argument. It
+ * does a shallow copy of the struct so it's possible to update the back storage
+ * without changing where the original map points to. It is the equivalent of
+ * doing:
+ *
+ * .. code-block:: c
+ *
+ * iosys_map map = other_map;
+ * iosys_map_incr(&map, &offset);
+ *
+ * Example usage:
+ *
+ * .. code-block:: c
+ *
+ * void foo(struct device *dev, struct iosys_map *base_map)
+ * {
+ * ...
+ * struct iosys_map map = IOSYS_MAP_INIT_OFFSET(base_map, FIELD_OFFSET);
+ * ...
+ * }
+ *
+ * The advantage of using the initializer over just increasing the offset with
+ * iosys_map_incr() like above is that the new map will always point to the
+ * right place of the buffer during its scope. It reduces the risk of updating
+ * the wrong part of the buffer and having no compiler warning about that. If
+ * the assignment to IOSYS_MAP_INIT_OFFSET() is forgotten, the compiler can warn
+ * about the use of uninitialized variable.
+ */
+#define IOSYS_MAP_INIT_OFFSET(map_, offset_) ({ \
+ struct iosys_map copy_ = *map_; \
+ iosys_map_incr(&copy_, offset_); \
+ copy_; \
+})
+
+/**
+ * iosys_map_set_vaddr - Sets a iosys mapping structure to an address in system memory
+ * @map: The iosys_map structure
+ * @vaddr: A system-memory address
+ *
+ * Sets the address and clears the I/O-memory flag.
+ */
+static inline void iosys_map_set_vaddr(struct iosys_map *map, void *vaddr)
+{
+ map->vaddr = vaddr;
+ map->is_iomem = false;
+}
+
+/**
+ * iosys_map_set_vaddr_iomem - Sets a iosys mapping structure to an address in I/O memory
+ * @map: The iosys_map structure
+ * @vaddr_iomem: An I/O-memory address
+ *
+ * Sets the address and the I/O-memory flag.
+ */
+static inline void iosys_map_set_vaddr_iomem(struct iosys_map *map,
+ void __iomem *vaddr_iomem)
+{
+ map->vaddr_iomem = vaddr_iomem;
+ map->is_iomem = true;
+}
+
+/**
+ * iosys_map_is_equal - Compares two iosys mapping structures for equality
+ * @lhs: The iosys_map structure
+ * @rhs: A iosys_map structure to compare with
+ *
+ * Two iosys mapping structures are equal if they both refer to the same type of memory
+ * and to the same address within that memory.
+ *
+ * Returns:
+ * True is both structures are equal, or false otherwise.
+ */
+static inline bool iosys_map_is_equal(const struct iosys_map *lhs,
+ const struct iosys_map *rhs)
+{
+ if (lhs->is_iomem != rhs->is_iomem)
+ return false;
+ else if (lhs->is_iomem)
+ return lhs->vaddr_iomem == rhs->vaddr_iomem;
+ else
+ return lhs->vaddr == rhs->vaddr;
+}
+
+/**
+ * iosys_map_is_null - Tests for a iosys mapping to be NULL
+ * @map: The iosys_map structure
+ *
+ * Depending on the state of struct iosys_map.is_iomem, tests if the
+ * mapping is NULL.
+ *
+ * Returns:
+ * True if the mapping is NULL, or false otherwise.
+ */
+static inline bool iosys_map_is_null(const struct iosys_map *map)
+{
+ if (map->is_iomem)
+ return !map->vaddr_iomem;
+ return !map->vaddr;
+}
+
+/**
+ * iosys_map_is_set - Tests if the iosys mapping has been set
+ * @map: The iosys_map structure
+ *
+ * Depending on the state of struct iosys_map.is_iomem, tests if the
+ * mapping has been set.
+ *
+ * Returns:
+ * True if the mapping is been set, or false otherwise.
+ */
+static inline bool iosys_map_is_set(const struct iosys_map *map)
+{
+ return !iosys_map_is_null(map);
+}
+
+/**
+ * iosys_map_clear - Clears a iosys mapping structure
+ * @map: The iosys_map structure
+ *
+ * Clears all fields to zero, including struct iosys_map.is_iomem, so
+ * mapping structures that were set to point to I/O memory are reset for
+ * system memory. Pointers are cleared to NULL. This is the default.
+ */
+static inline void iosys_map_clear(struct iosys_map *map)
+{
+ memset(map, 0, sizeof(*map));
+}
+
+/**
+ * iosys_map_memcpy_to - Memcpy into offset of iosys_map
+ * @dst: The iosys_map structure
+ * @dst_offset: The offset from which to copy
+ * @src: The source buffer
+ * @len: The number of byte in src
+ *
+ * Copies data into a iosys_map with an offset. The source buffer is in
+ * system memory. Depending on the buffer's location, the helper picks the
+ * correct method of accessing the memory.
+ */
+static inline void iosys_map_memcpy_to(struct iosys_map *dst, size_t dst_offset,
+ const void *src, size_t len)
+{
+ if (dst->is_iomem)
+ memcpy_toio(dst->vaddr_iomem + dst_offset, src, len);
+ else
+ memcpy(dst->vaddr + dst_offset, src, len);
+}
+
+/**
+ * iosys_map_memcpy_from - Memcpy from iosys_map into system memory
+ * @dst: Destination in system memory
+ * @src: The iosys_map structure
+ * @src_offset: The offset from which to copy
+ * @len: The number of byte in src
+ *
+ * Copies data from a iosys_map with an offset. The dest buffer is in
+ * system memory. Depending on the mapping location, the helper picks the
+ * correct method of accessing the memory.
+ */
+static inline void iosys_map_memcpy_from(void *dst, const struct iosys_map *src,
+ size_t src_offset, size_t len)
+{
+ if (src->is_iomem)
+ memcpy_fromio(dst, src->vaddr_iomem + src_offset, len);
+ else
+ memcpy(dst, src->vaddr + src_offset, len);
+}
+
+/**
+ * iosys_map_incr - Increments the address stored in a iosys mapping
+ * @map: The iosys_map structure
+ * @incr: The number of bytes to increment
+ *
+ * Increments the address stored in a iosys mapping. Depending on the
+ * buffer's location, the correct value will be updated.
+ */
+static inline void iosys_map_incr(struct iosys_map *map, size_t incr)
+{
+ if (map->is_iomem)
+ map->vaddr_iomem += incr;
+ else
+ map->vaddr += incr;
+}
+
+/**
+ * iosys_map_memset - Memset iosys_map
+ * @dst: The iosys_map structure
+ * @offset: Offset from dst where to start setting value
+ * @value: The value to set
+ * @len: The number of bytes to set in dst
+ *
+ * Set value in iosys_map. Depending on the buffer's location, the helper
+ * picks the correct method of accessing the memory.
+ */
+static inline void iosys_map_memset(struct iosys_map *dst, size_t offset,
+ int value, size_t len)
+{
+ if (dst->is_iomem)
+ memset_io(dst->vaddr_iomem + offset, value, len);
+ else
+ memset(dst->vaddr + offset, value, len);
+}
+
+#ifdef CONFIG_64BIT
+#define __iosys_map_rd_io_u64_case(val_, vaddr_iomem_) \
+ u64: val_ = readq(vaddr_iomem_)
+#define __iosys_map_wr_io_u64_case(val_, vaddr_iomem_) \
+ u64: writeq(val_, vaddr_iomem_)
+#else
+#define __iosys_map_rd_io_u64_case(val_, vaddr_iomem_) \
+ u64: memcpy_fromio(&(val_), vaddr_iomem_, sizeof(u64))
+#define __iosys_map_wr_io_u64_case(val_, vaddr_iomem_) \
+ u64: memcpy_toio(vaddr_iomem_, &(val_), sizeof(u64))
+#endif
+
+#define __iosys_map_rd_io(val__, vaddr_iomem__, type__) _Generic(val__, \
+ u8: val__ = readb(vaddr_iomem__), \
+ u16: val__ = readw(vaddr_iomem__), \
+ u32: val__ = readl(vaddr_iomem__), \
+ __iosys_map_rd_io_u64_case(val__, vaddr_iomem__))
+
+#define __iosys_map_rd_sys(val__, vaddr__, type__) \
+ val__ = READ_ONCE(*(type__ *)(vaddr__))
+
+#define __iosys_map_wr_io(val__, vaddr_iomem__, type__) _Generic(val__, \
+ u8: writeb(val__, vaddr_iomem__), \
+ u16: writew(val__, vaddr_iomem__), \
+ u32: writel(val__, vaddr_iomem__), \
+ __iosys_map_wr_io_u64_case(val__, vaddr_iomem__))
+
+#define __iosys_map_wr_sys(val__, vaddr__, type__) \
+ WRITE_ONCE(*(type__ *)(vaddr__), val__)
+
+/**
+ * iosys_map_rd - Read a C-type value from the iosys_map
+ *
+ * @map__: The iosys_map structure
+ * @offset__: The offset from which to read
+ * @type__: Type of the value being read
+ *
+ * Read a C type value (u8, u16, u32 and u64) from iosys_map. For other types or
+ * if pointer may be unaligned (and problematic for the architecture supported),
+ * use iosys_map_memcpy_from().
+ *
+ * Returns:
+ * The value read from the mapping.
+ */
+#define iosys_map_rd(map__, offset__, type__) ({ \
+ type__ val_; \
+ if ((map__)->is_iomem) { \
+ __iosys_map_rd_io(val_, (map__)->vaddr_iomem + (offset__), type__); \
+ } else { \
+ __iosys_map_rd_sys(val_, (map__)->vaddr + (offset__), type__); \
+ } \
+ val_; \
+})
+
+/**
+ * iosys_map_wr - Write a C-type value to the iosys_map
+ *
+ * @map__: The iosys_map structure
+ * @offset__: The offset from the mapping to write to
+ * @type__: Type of the value being written
+ * @val__: Value to write
+ *
+ * Write a C type value (u8, u16, u32 and u64) to the iosys_map. For other types
+ * or if pointer may be unaligned (and problematic for the architecture
+ * supported), use iosys_map_memcpy_to()
+ */
+#define iosys_map_wr(map__, offset__, type__, val__) ({ \
+ type__ val_ = (val__); \
+ if ((map__)->is_iomem) { \
+ __iosys_map_wr_io(val_, (map__)->vaddr_iomem + (offset__), type__); \
+ } else { \
+ __iosys_map_wr_sys(val_, (map__)->vaddr + (offset__), type__); \
+ } \
+})
+
+/**
+ * iosys_map_rd_field - Read a member from a struct in the iosys_map
+ *
+ * @map__: The iosys_map structure
+ * @struct_offset__: Offset from the beginning of the map, where the struct
+ * is located
+ * @struct_type__: The struct describing the layout of the mapping
+ * @field__: Member of the struct to read
+ *
+ * Read a value from iosys_map considering its layout is described by a C struct
+ * starting at @struct_offset__. The field offset and size is calculated and its
+ * value read. If the field access would incur in un-aligned access, then either
+ * iosys_map_memcpy_from() needs to be used or the architecture must support it.
+ * For example: suppose there is a @struct foo defined as below and the value
+ * ``foo.field2.inner2`` needs to be read from the iosys_map:
+ *
+ * .. code-block:: c
+ *
+ * struct foo {
+ * int field1;
+ * struct {
+ * int inner1;
+ * int inner2;
+ * } field2;
+ * int field3;
+ * } __packed;
+ *
+ * This is the expected memory layout of a buffer using iosys_map_rd_field():
+ *
+ * +------------------------------+--------------------------+
+ * | Address | Content |
+ * +==============================+==========================+
+ * | buffer + 0000 | start of mmapped buffer |
+ * | | pointed by iosys_map |
+ * +------------------------------+--------------------------+
+ * | ... | ... |
+ * +------------------------------+--------------------------+
+ * | buffer + ``struct_offset__`` | start of ``struct foo`` |
+ * +------------------------------+--------------------------+
+ * | ... | ... |
+ * +------------------------------+--------------------------+
+ * | buffer + wwww | ``foo.field2.inner2`` |
+ * +------------------------------+--------------------------+
+ * | ... | ... |
+ * +------------------------------+--------------------------+
+ * | buffer + yyyy | end of ``struct foo`` |
+ * +------------------------------+--------------------------+
+ * | ... | ... |
+ * +------------------------------+--------------------------+
+ * | buffer + zzzz | end of mmaped buffer |
+ * +------------------------------+--------------------------+
+ *
+ * Values automatically calculated by this macro or not needed are denoted by
+ * wwww, yyyy and zzzz. This is the code to read that value:
+ *
+ * .. code-block:: c
+ *
+ * x = iosys_map_rd_field(&map, offset, struct foo, field2.inner2);
+ *
+ * Returns:
+ * The value read from the mapping.
+ */
+#define iosys_map_rd_field(map__, struct_offset__, struct_type__, field__) ({ \
+ struct_type__ *s_; \
+ iosys_map_rd(map__, struct_offset__ + offsetof(struct_type__, field__), \
+ typeof(s_->field__)); \
+})
+
+/**
+ * iosys_map_wr_field - Write to a member of a struct in the iosys_map
+ *
+ * @map__: The iosys_map structure
+ * @struct_offset__: Offset from the beginning of the map, where the struct
+ * is located
+ * @struct_type__: The struct describing the layout of the mapping
+ * @field__: Member of the struct to read
+ * @val__: Value to write
+ *
+ * Write a value to the iosys_map considering its layout is described by a C
+ * struct starting at @struct_offset__. The field offset and size is calculated
+ * and the @val__ is written. If the field access would incur in un-aligned
+ * access, then either iosys_map_memcpy_to() needs to be used or the
+ * architecture must support it. Refer to iosys_map_rd_field() for expected
+ * usage and memory layout.
+ */
+#define iosys_map_wr_field(map__, struct_offset__, struct_type__, field__, val__) ({ \
+ struct_type__ *s_; \
+ iosys_map_wr(map__, struct_offset__ + offsetof(struct_type__, field__), \
+ typeof(s_->field__), val__); \
+})
+
+#endif /* __IOSYS_MAP_H__ */
diff --git a/include/linux/iov_iter.h b/include/linux/iov_iter.h
new file mode 100644
index 000000000000..f9a17fbbd398
--- /dev/null
+++ b/include/linux/iov_iter.h
@@ -0,0 +1,380 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* I/O iterator iteration building functions.
+ *
+ * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#ifndef _LINUX_IOV_ITER_H
+#define _LINUX_IOV_ITER_H
+
+#include <linux/uio.h>
+#include <linux/bvec.h>
+#include <linux/folio_queue.h>
+
+typedef size_t (*iov_step_f)(void *iter_base, size_t progress, size_t len,
+ void *priv, void *priv2);
+typedef size_t (*iov_ustep_f)(void __user *iter_base, size_t progress, size_t len,
+ void *priv, void *priv2);
+
+/*
+ * Handle ITER_UBUF.
+ */
+static __always_inline
+size_t iterate_ubuf(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_ustep_f step)
+{
+ void __user *base = iter->ubuf;
+ size_t progress = 0, remain;
+
+ remain = step(base + iter->iov_offset, 0, len, priv, priv2);
+ progress = len - remain;
+ iter->iov_offset += progress;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_IOVEC.
+ */
+static __always_inline
+size_t iterate_iovec(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_ustep_f step)
+{
+ const struct iovec *p = iter->__iov;
+ size_t progress = 0, skip = iter->iov_offset;
+
+ do {
+ size_t remain, consumed;
+ size_t part = min(len, p->iov_len - skip);
+
+ if (likely(part)) {
+ remain = step(p->iov_base + skip, progress, part, priv, priv2);
+ consumed = part - remain;
+ progress += consumed;
+ skip += consumed;
+ len -= consumed;
+ if (skip < p->iov_len)
+ break;
+ }
+ p++;
+ skip = 0;
+ } while (len);
+
+ iter->nr_segs -= p - iter->__iov;
+ iter->__iov = p;
+ iter->iov_offset = skip;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_KVEC.
+ */
+static __always_inline
+size_t iterate_kvec(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_step_f step)
+{
+ const struct kvec *p = iter->kvec;
+ size_t progress = 0, skip = iter->iov_offset;
+
+ do {
+ size_t remain, consumed;
+ size_t part = min(len, p->iov_len - skip);
+
+ if (likely(part)) {
+ remain = step(p->iov_base + skip, progress, part, priv, priv2);
+ consumed = part - remain;
+ progress += consumed;
+ skip += consumed;
+ len -= consumed;
+ if (skip < p->iov_len)
+ break;
+ }
+ p++;
+ skip = 0;
+ } while (len);
+
+ iter->nr_segs -= p - iter->kvec;
+ iter->kvec = p;
+ iter->iov_offset = skip;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_BVEC.
+ */
+static __always_inline
+size_t iterate_bvec(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_step_f step)
+{
+ const struct bio_vec *p = iter->bvec;
+ size_t progress = 0, skip = iter->iov_offset;
+
+ do {
+ size_t remain, consumed;
+ size_t offset = p->bv_offset + skip, part;
+ void *kaddr = kmap_local_page(p->bv_page + offset / PAGE_SIZE);
+
+ part = min3(len,
+ (size_t)(p->bv_len - skip),
+ (size_t)(PAGE_SIZE - offset % PAGE_SIZE));
+ remain = step(kaddr + offset % PAGE_SIZE, progress, part, priv, priv2);
+ kunmap_local(kaddr);
+ consumed = part - remain;
+ len -= consumed;
+ progress += consumed;
+ skip += consumed;
+ if (skip >= p->bv_len) {
+ skip = 0;
+ p++;
+ }
+ if (remain)
+ break;
+ } while (len);
+
+ iter->nr_segs -= p - iter->bvec;
+ iter->bvec = p;
+ iter->iov_offset = skip;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_FOLIOQ.
+ */
+static __always_inline
+size_t iterate_folioq(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_step_f step)
+{
+ const struct folio_queue *folioq = iter->folioq;
+ unsigned int slot = iter->folioq_slot;
+ size_t progress = 0, skip = iter->iov_offset;
+
+ if (slot == folioq_nr_slots(folioq)) {
+ /* The iterator may have been extended. */
+ folioq = folioq->next;
+ slot = 0;
+ }
+
+ do {
+ struct folio *folio = folioq_folio(folioq, slot);
+ size_t part, remain = 0, consumed;
+ size_t fsize;
+ void *base;
+
+ if (!folio)
+ break;
+
+ fsize = folioq_folio_size(folioq, slot);
+ if (skip < fsize) {
+ base = kmap_local_folio(folio, skip);
+ part = umin(len, PAGE_SIZE - skip % PAGE_SIZE);
+ remain = step(base, progress, part, priv, priv2);
+ kunmap_local(base);
+ consumed = part - remain;
+ len -= consumed;
+ progress += consumed;
+ skip += consumed;
+ }
+ if (skip >= fsize) {
+ skip = 0;
+ slot++;
+ if (slot == folioq_nr_slots(folioq) && folioq->next) {
+ folioq = folioq->next;
+ slot = 0;
+ }
+ }
+ if (remain)
+ break;
+ } while (len);
+
+ iter->folioq_slot = slot;
+ iter->folioq = folioq;
+ iter->iov_offset = skip;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_XARRAY.
+ */
+static __always_inline
+size_t iterate_xarray(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_step_f step)
+{
+ struct folio *folio;
+ size_t progress = 0;
+ loff_t start = iter->xarray_start + iter->iov_offset;
+ pgoff_t index = start / PAGE_SIZE;
+ XA_STATE(xas, iter->xarray, index);
+
+ rcu_read_lock();
+ xas_for_each(&xas, folio, ULONG_MAX) {
+ size_t remain, consumed, offset, part, flen;
+
+ if (xas_retry(&xas, folio))
+ continue;
+ if (WARN_ON(xa_is_value(folio)))
+ break;
+ if (WARN_ON(folio_test_hugetlb(folio)))
+ break;
+
+ offset = offset_in_folio(folio, start + progress);
+ flen = min(folio_size(folio) - offset, len);
+
+ while (flen) {
+ void *base = kmap_local_folio(folio, offset);
+
+ part = min_t(size_t, flen,
+ PAGE_SIZE - offset_in_page(offset));
+ remain = step(base, progress, part, priv, priv2);
+ kunmap_local(base);
+
+ consumed = part - remain;
+ progress += consumed;
+ len -= consumed;
+
+ if (remain || len == 0)
+ goto out;
+ flen -= consumed;
+ offset += consumed;
+ }
+ }
+
+out:
+ rcu_read_unlock();
+ iter->iov_offset += progress;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_DISCARD.
+ */
+static __always_inline
+size_t iterate_discard(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_step_f step)
+{
+ size_t progress = len;
+
+ iter->count -= progress;
+ return progress;
+}
+
+/**
+ * iterate_and_advance2 - Iterate over an iterator
+ * @iter: The iterator to iterate over.
+ * @len: The amount to iterate over.
+ * @priv: Data for the step functions.
+ * @priv2: More data for the step functions.
+ * @ustep: Function for UBUF/IOVEC iterators; given __user addresses.
+ * @step: Function for other iterators; given kernel addresses.
+ *
+ * Iterate over the next part of an iterator, up to the specified length. The
+ * buffer is presented in segments, which for kernel iteration are broken up by
+ * physical pages and mapped, with the mapped address being presented.
+ *
+ * Two step functions, @step and @ustep, must be provided, one for handling
+ * mapped kernel addresses and the other is given user addresses which have the
+ * potential to fault since no pinning is performed.
+ *
+ * The step functions are passed the address and length of the segment, @priv,
+ * @priv2 and the amount of data so far iterated over (which can, for example,
+ * be added to @priv to point to the right part of a second buffer). The step
+ * functions should return the amount of the segment they didn't process (ie. 0
+ * indicates complete processsing).
+ *
+ * This function returns the amount of data processed (ie. 0 means nothing was
+ * processed and the value of @len means processes to completion).
+ */
+static __always_inline
+size_t iterate_and_advance2(struct iov_iter *iter, size_t len, void *priv,
+ void *priv2, iov_ustep_f ustep, iov_step_f step)
+{
+ if (unlikely(iter->count < len))
+ len = iter->count;
+ if (unlikely(!len))
+ return 0;
+
+ if (likely(iter_is_ubuf(iter)))
+ return iterate_ubuf(iter, len, priv, priv2, ustep);
+ if (likely(iter_is_iovec(iter)))
+ return iterate_iovec(iter, len, priv, priv2, ustep);
+ if (iov_iter_is_bvec(iter))
+ return iterate_bvec(iter, len, priv, priv2, step);
+ if (iov_iter_is_kvec(iter))
+ return iterate_kvec(iter, len, priv, priv2, step);
+ if (iov_iter_is_folioq(iter))
+ return iterate_folioq(iter, len, priv, priv2, step);
+ if (iov_iter_is_xarray(iter))
+ return iterate_xarray(iter, len, priv, priv2, step);
+ return iterate_discard(iter, len, priv, priv2, step);
+}
+
+/**
+ * iterate_and_advance - Iterate over an iterator
+ * @iter: The iterator to iterate over.
+ * @len: The amount to iterate over.
+ * @priv: Data for the step functions.
+ * @ustep: Function for UBUF/IOVEC iterators; given __user addresses.
+ * @step: Function for other iterators; given kernel addresses.
+ *
+ * As iterate_and_advance2(), but priv2 is always NULL.
+ */
+static __always_inline
+size_t iterate_and_advance(struct iov_iter *iter, size_t len, void *priv,
+ iov_ustep_f ustep, iov_step_f step)
+{
+ return iterate_and_advance2(iter, len, priv, NULL, ustep, step);
+}
+
+/**
+ * iterate_and_advance_kernel - Iterate over a kernel-internal iterator
+ * @iter: The iterator to iterate over.
+ * @len: The amount to iterate over.
+ * @priv: Data for the step functions.
+ * @priv2: More data for the step functions.
+ * @step: Function for other iterators; given kernel addresses.
+ *
+ * Iterate over the next part of an iterator, up to the specified length. The
+ * buffer is presented in segments, which for kernel iteration are broken up by
+ * physical pages and mapped, with the mapped address being presented.
+ *
+ * [!] Note This will only handle BVEC, KVEC, FOLIOQ, XARRAY and DISCARD-type
+ * iterators; it will not handle UBUF or IOVEC-type iterators.
+ *
+ * A step functions, @step, must be provided, one for handling mapped kernel
+ * addresses and the other is given user addresses which have the potential to
+ * fault since no pinning is performed.
+ *
+ * The step functions are passed the address and length of the segment, @priv,
+ * @priv2 and the amount of data so far iterated over (which can, for example,
+ * be added to @priv to point to the right part of a second buffer). The step
+ * functions should return the amount of the segment they didn't process (ie. 0
+ * indicates complete processsing).
+ *
+ * This function returns the amount of data processed (ie. 0 means nothing was
+ * processed and the value of @len means processes to completion).
+ */
+static __always_inline
+size_t iterate_and_advance_kernel(struct iov_iter *iter, size_t len, void *priv,
+ void *priv2, iov_step_f step)
+{
+ if (unlikely(iter->count < len))
+ len = iter->count;
+ if (unlikely(!len))
+ return 0;
+ if (iov_iter_is_bvec(iter))
+ return iterate_bvec(iter, len, priv, priv2, step);
+ if (iov_iter_is_kvec(iter))
+ return iterate_kvec(iter, len, priv, priv2, step);
+ if (iov_iter_is_folioq(iter))
+ return iterate_folioq(iter, len, priv, priv2, step);
+ if (iov_iter_is_xarray(iter))
+ return iterate_xarray(iter, len, priv, priv2, step);
+ return iterate_discard(iter, len, priv, priv2, step);
+}
+
+#endif /* _LINUX_IOV_ITER_H */
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 71d8a2de6635..d2c4fd923efa 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -12,7 +12,6 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/rbtree.h>
-#include <linux/atomic.h>
#include <linux/dma-mapping.h>
/* iova structure */
@@ -22,47 +21,8 @@ struct iova {
unsigned long pfn_lo; /* Lowest allocated pfn */
};
-struct iova_magazine;
-struct iova_cpu_rcache;
-#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */
-#define MAX_GLOBAL_MAGS 32 /* magazines per bin */
-
-struct iova_rcache {
- spinlock_t lock;
- unsigned long depot_size;
- struct iova_magazine *depot[MAX_GLOBAL_MAGS];
- struct iova_cpu_rcache __percpu *cpu_rcaches;
-};
-
-struct iova_domain;
-
-/* Call-Back from IOVA code into IOMMU drivers */
-typedef void (* iova_flush_cb)(struct iova_domain *domain);
-
-/* Destructor for per-entry data */
-typedef void (* iova_entry_dtor)(unsigned long data);
-
-/* Number of entries per Flush Queue */
-#define IOVA_FQ_SIZE 256
-
-/* Timeout (in ms) after which entries are flushed from the Flush-Queue */
-#define IOVA_FQ_TIMEOUT 10
-
-/* Flush Queue entry for defered flushing */
-struct iova_fq_entry {
- unsigned long iova_pfn;
- unsigned long pages;
- unsigned long data;
- u64 counter; /* Flush counter when this entrie was added */
-};
-
-/* Per-CPU Flush Queue structure */
-struct iova_fq {
- struct iova_fq_entry entries[IOVA_FQ_SIZE];
- unsigned head, tail;
- spinlock_t lock;
-};
+struct iova_rcache;
/* holds all the iova translations for a domain */
struct iova_domain {
@@ -74,27 +34,9 @@ struct iova_domain {
unsigned long start_pfn; /* Lower limit for this domain */
unsigned long dma_32bit_pfn;
unsigned long max32_alloc_size; /* Size of last failed allocation */
- struct iova_fq __percpu *fq; /* Flush Queue */
-
- atomic64_t fq_flush_start_cnt; /* Number of TLB flushes that
- have been started */
-
- atomic64_t fq_flush_finish_cnt; /* Number of TLB flushes that
- have been finished */
-
struct iova anchor; /* rbtree lookup anchor */
- struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
-
- iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU
- TLBs */
- iova_entry_dtor entry_dtor; /* IOMMU driver specific destructor for
- iova entry */
-
- struct timer_list fq_timer; /* Timer to regularily empty the
- flush-queues */
- atomic_t fq_timer_on; /* 1 when timer is active, 0
- when not */
+ struct iova_rcache *rcaches;
struct hlist_node cpuhp_dead;
};
@@ -123,6 +65,11 @@ static inline size_t iova_align(struct iova_domain *iovad, size_t size)
return ALIGN(size, iovad->granule);
}
+static inline size_t iova_align_down(struct iova_domain *iovad, size_t size)
+{
+ return ALIGN_DOWN(size, iovad->granule);
+}
+
static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova)
{
return (dma_addr_t)iova->pfn_lo << iova_shift(iovad);
@@ -133,10 +80,12 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
return iova >> iova_shift(iovad);
}
-#if IS_ENABLED(CONFIG_IOMMU_IOVA)
+#if IS_REACHABLE(CONFIG_IOMMU_IOVA)
int iova_cache_get(void);
void iova_cache_put(void);
+unsigned long iova_rcache_range(void);
+
void free_iova(struct iova_domain *iovad, unsigned long pfn);
void __free_iova(struct iova_domain *iovad, struct iova *iova);
struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
@@ -144,17 +93,13 @@ struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
bool size_aligned);
void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,
unsigned long size);
-void queue_iova(struct iova_domain *iovad,
- unsigned long pfn, unsigned long pages,
- unsigned long data);
unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
unsigned long limit_pfn, bool flush_rcache);
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
unsigned long pfn_hi);
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
unsigned long start_pfn);
-int init_iova_flush_queue(struct iova_domain *iovad,
- iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
+int iova_domain_init_rcaches(struct iova_domain *iovad);
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
void put_iova_domain(struct iova_domain *iovad);
#else
@@ -189,12 +134,6 @@ static inline void free_iova_fast(struct iova_domain *iovad,
{
}
-static inline void queue_iova(struct iova_domain *iovad,
- unsigned long pfn, unsigned long pages,
- unsigned long data)
-{
-}
-
static inline unsigned long alloc_iova_fast(struct iova_domain *iovad,
unsigned long size,
unsigned long limit_pfn,
@@ -216,13 +155,6 @@ static inline void init_iova_domain(struct iova_domain *iovad,
{
}
-static inline int init_iova_flush_queue(struct iova_domain *iovad,
- iova_flush_cb flush_cb,
- iova_entry_dtor entry_dtor)
-{
- return -ENODEV;
-}
-
static inline struct iova *find_iova(struct iova_domain *iovad,
unsigned long pfn)
{
diff --git a/include/linux/iova_bitmap.h b/include/linux/iova_bitmap.h
new file mode 100644
index 000000000000..1c338f5e5b7a
--- /dev/null
+++ b/include/linux/iova_bitmap.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022, Oracle and/or its affiliates.
+ * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+#ifndef _IOVA_BITMAP_H_
+#define _IOVA_BITMAP_H_
+
+#include <linux/types.h>
+#include <linux/errno.h>
+
+struct iova_bitmap;
+
+typedef int (*iova_bitmap_fn_t)(struct iova_bitmap *bitmap,
+ unsigned long iova, size_t length,
+ void *opaque);
+
+#if IS_ENABLED(CONFIG_IOMMUFD_DRIVER)
+struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length,
+ unsigned long page_size,
+ u64 __user *data);
+void iova_bitmap_free(struct iova_bitmap *bitmap);
+int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque,
+ iova_bitmap_fn_t fn);
+void iova_bitmap_set(struct iova_bitmap *bitmap,
+ unsigned long iova, size_t length);
+#else
+static inline struct iova_bitmap *iova_bitmap_alloc(unsigned long iova,
+ size_t length,
+ unsigned long page_size,
+ u64 __user *data)
+{
+ return NULL;
+}
+
+static inline void iova_bitmap_free(struct iova_bitmap *bitmap)
+{
+}
+
+static inline int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque,
+ iova_bitmap_fn_t fn)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void iova_bitmap_set(struct iova_bitmap *bitmap,
+ unsigned long iova, size_t length)
+{
+}
+#endif
+
+#endif
diff --git a/include/linux/ip.h b/include/linux/ip.h
index 3d9c6750af62..d11c25f5030a 100644
--- a/include/linux/ip.h
+++ b/include/linux/ip.h
@@ -35,4 +35,25 @@ static inline unsigned int ip_transport_len(const struct sk_buff *skb)
{
return ntohs(ip_hdr(skb)->tot_len) - skb_network_header_len(skb);
}
+
+static inline unsigned int iph_totlen(const struct sk_buff *skb, const struct iphdr *iph)
+{
+ u32 len = ntohs(iph->tot_len);
+
+ return (len || !skb_is_gso(skb) || !skb_is_gso_tcp(skb)) ?
+ len : skb->len - skb_network_offset(skb);
+}
+
+static inline unsigned int skb_ip_totlen(const struct sk_buff *skb)
+{
+ return iph_totlen(skb, ip_hdr(skb));
+}
+
+/* IPv4 datagram length is stored into 16bit field (tot_len) */
+#define IP_MAX_MTU 0xFFFFU
+
+static inline void iph_set_totlen(struct iphdr *iph, unsigned int len)
+{
+ iph->tot_len = len <= IP_MAX_MTU ? htons(len) : 0;
+}
#endif /* _LINUX_IP_H */
diff --git a/include/linux/ipack.h b/include/linux/ipack.h
index 2c6936b8371f..455f6c2a1903 100644
--- a/include/linux/ipack.h
+++ b/include/linux/ipack.h
@@ -70,15 +70,13 @@ enum ipack_space {
IPACK_SPACE_COUNT,
};
-/**
- */
struct ipack_region {
phys_addr_t start;
size_t size;
};
/**
- * struct ipack_device
+ * struct ipack_device - subsystem representation of an IPack device
*
* @slot: Slot where the device is plugged in the carrier board
* @bus: ipack_bus_device where the device is plugged to.
@@ -89,7 +87,7 @@ struct ipack_region {
*
* Warning: Direct access to mapped memory is possible but the endianness
* is not the same with PCI carrier or VME carrier. The endianness is managed
- * by the carrier board throught bus->ops.
+ * by the carrier board through bus->ops.
*/
struct ipack_device {
unsigned int slot;
@@ -124,6 +122,7 @@ struct ipack_driver_ops {
* struct ipack_driver -- Specific data to each ipack device driver
*
* @driver: Device driver kernel representation
+ * @id_table: Device ID table for this driver
* @ops: Callbacks provided by the IPack device driver
*/
struct ipack_driver {
@@ -161,7 +160,7 @@ struct ipack_bus_ops {
};
/**
- * struct ipack_bus_device
+ * struct ipack_bus_device - IPack bus representation
*
* @dev: pointer to carrier device
* @slots: number of slots available
@@ -185,6 +184,8 @@ struct ipack_bus_device {
*
* The carrier board device should call this function to register itself as
* available bus device in ipack.
+ *
+ * Return: %NULL on error or &struct ipack_bus_device on success
*/
struct ipack_bus_device *ipack_bus_register(struct device *parent, int slots,
const struct ipack_bus_ops *ops,
@@ -192,6 +193,8 @@ struct ipack_bus_device *ipack_bus_register(struct device *parent, int slots,
/**
* ipack_bus_unregister -- unregister an ipack bus
+ *
+ * Return: %0
*/
int ipack_bus_unregister(struct ipack_bus_device *bus);
@@ -200,6 +203,8 @@ int ipack_bus_unregister(struct ipack_bus_device *bus);
*
* Called by a ipack driver to register itself as a driver
* that can manage ipack devices.
+ *
+ * Return: zero on success or error code on failure.
*/
int ipack_driver_register(struct ipack_driver *edrv, struct module *owner,
const char *name);
@@ -215,7 +220,7 @@ void ipack_driver_unregister(struct ipack_driver *edrv);
* function. The rest of the fields will be allocated and populated
* during initalization.
*
- * Return zero on success or error code on failure.
+ * Return: zero on success or error code on failure.
*
* NOTE: _Never_ directly free @dev after calling this function, even
* if it returned an error! Always use ipack_put_device() to give up the
@@ -230,7 +235,7 @@ int ipack_device_init(struct ipack_device *dev);
* Add a new IPack device. The call is done by the carrier driver
* after calling ipack_device_init().
*
- * Return zero on success or error code on failure.
+ * Return: zero on success or error code on failure.
*
* NOTE: _Never_ directly free @dev after calling this function, even
* if it returned an error! Always use ipack_put_device() to give up the
@@ -266,9 +271,11 @@ void ipack_put_device(struct ipack_device *dev);
.device = (dev)
/**
- * ipack_get_carrier - it increase the carrier ref. counter of
+ * ipack_get_carrier - try to increase the carrier ref. counter of
* the carrier module
* @dev: mezzanine device which wants to get the carrier
+ *
+ * Return: true on success.
*/
static inline int ipack_get_carrier(struct ipack_device *dev)
{
diff --git a/include/linux/ipc.h b/include/linux/ipc.h
index e1c9eea6015b..9b1434247aab 100644
--- a/include/linux/ipc.h
+++ b/include/linux/ipc.h
@@ -2,7 +2,7 @@
#ifndef _LINUX_IPC_H
#define _LINUX_IPC_H
-#include <linux/spinlock.h>
+#include <linux/spinlock_types.h>
#include <linux/uidgid.h>
#include <linux/rhashtable-types.h>
#include <uapi/linux/ipc.h>
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index 05e22770af51..12faca29bbb9 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -10,6 +10,8 @@
#include <linux/ns_common.h>
#include <linux/refcount.h>
#include <linux/rhashtable-types.h>
+#include <linux/sysctl.h>
+#include <linux/percpu_counter.h>
struct user_namespace;
@@ -35,8 +37,8 @@ struct ipc_namespace {
unsigned int msg_ctlmax;
unsigned int msg_ctlmnb;
unsigned int msg_ctlmni;
- atomic_t msg_bytes;
- atomic_t msg_hdrs;
+ struct percpu_counter percpu_msg_bytes;
+ struct percpu_counter percpu_msg_hdrs;
size_t shm_ctlmax;
size_t shm_ctlall;
@@ -63,6 +65,12 @@ struct ipc_namespace {
unsigned int mq_msg_default;
unsigned int mq_msgsize_default;
+ struct ctl_table_set mq_set;
+ struct ctl_table_header *mq_sysctls;
+
+ struct ctl_table_set ipc_set;
+ struct ctl_table_header *ipc_sysctls;
+
/* user_ns which owns the ipc ns */
struct user_namespace *user_ns;
struct ucounts *ucounts;
@@ -121,19 +129,34 @@ static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; }
#endif
#if defined(CONFIG_IPC_NS)
-extern struct ipc_namespace *copy_ipcs(unsigned long flags,
+static inline struct ipc_namespace *to_ipc_ns(struct ns_common *ns)
+{
+ return container_of(ns, struct ipc_namespace, ns);
+}
+
+extern struct ipc_namespace *copy_ipcs(u64 flags,
struct user_namespace *user_ns, struct ipc_namespace *ns);
static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
{
if (ns)
- refcount_inc(&ns->ns.count);
+ ns_ref_inc(ns);
return ns;
}
+static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns)
+{
+ if (ns) {
+ if (ns_ref_get(ns))
+ return ns;
+ }
+
+ return NULL;
+}
+
extern void put_ipc_ns(struct ipc_namespace *ns);
#else
-static inline struct ipc_namespace *copy_ipcs(unsigned long flags,
+static inline struct ipc_namespace *copy_ipcs(u64 flags,
struct user_namespace *user_ns, struct ipc_namespace *ns)
{
if (flags & CLONE_NEWIPC)
@@ -147,6 +170,11 @@ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
return ns;
}
+static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns)
+{
+ return ns;
+}
+
static inline void put_ipc_ns(struct ipc_namespace *ns)
{
}
@@ -154,15 +182,37 @@ static inline void put_ipc_ns(struct ipc_namespace *ns)
#ifdef CONFIG_POSIX_MQUEUE_SYSCTL
-struct ctl_table_header;
-extern struct ctl_table_header *mq_register_sysctl_table(void);
+void retire_mq_sysctls(struct ipc_namespace *ns);
+bool setup_mq_sysctls(struct ipc_namespace *ns);
#else /* CONFIG_POSIX_MQUEUE_SYSCTL */
-static inline struct ctl_table_header *mq_register_sysctl_table(void)
+static inline void retire_mq_sysctls(struct ipc_namespace *ns)
{
- return NULL;
+}
+
+static inline bool setup_mq_sysctls(struct ipc_namespace *ns)
+{
+ return true;
}
#endif /* CONFIG_POSIX_MQUEUE_SYSCTL */
+
+#ifdef CONFIG_SYSVIPC_SYSCTL
+
+bool setup_ipc_sysctls(struct ipc_namespace *ns);
+void retire_ipc_sysctls(struct ipc_namespace *ns);
+
+#else /* CONFIG_SYSVIPC_SYSCTL */
+
+static inline void retire_ipc_sysctls(struct ipc_namespace *ns)
+{
+}
+
+static inline bool setup_ipc_sysctls(struct ipc_namespace *ns)
+{
+ return true;
+}
+
+#endif /* CONFIG_SYSVIPC_SYSCTL */
#endif
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index 52850a02a3d0..7da6602eab71 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -72,6 +72,11 @@ struct ipmi_recv_msg {
unsigned char msg_data[IPMI_MAX_MSG_LENGTH];
};
+#define INIT_IPMI_RECV_MSG(done_handler) \
+{ \
+ .done = done_handler \
+}
+
/* Allocate and free the receive message. */
void ipmi_free_recv_msg(struct ipmi_recv_msg *msg);
@@ -88,7 +93,8 @@ struct ipmi_user_hndl {
/*
* Called when the interface detects a watchdog pre-timeout. If
- * this is NULL, it will be ignored for the user.
+ * this is NULL, it will be ignored for the user. Note that you
+ * can't do any IPMI calls from here, it's called with locks held.
*/
void (*ipmi_watchdog_pretimeout)(void *handler_data);
@@ -121,7 +127,7 @@ int ipmi_create_user(unsigned int if_num,
* the users before you destroy the callback structures, it should be
* safe, too.
*/
-int ipmi_destroy_user(struct ipmi_user *user);
+void ipmi_destroy_user(struct ipmi_user *user);
/* Get the IPMI version of the BMC we are talking to. */
int ipmi_get_version(struct ipmi_user *user,
@@ -335,4 +341,17 @@ extern int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data);
#define GET_DEVICE_ID_MAX_RETRY 5
+/* Helper function for computing the IPMB checksum of some data. */
+unsigned char ipmb_checksum(unsigned char *data, int size);
+
+/*
+ * For things that must send messages at panic time, like the IPMI watchdog
+ * driver that extends the reset time on a panic, use this to send messages
+ * from panic context. Note that this puts the driver into a mode that
+ * only works at panic time, so only use it then.
+ */
+void ipmi_panic_request_and_wait(struct ipmi_user *user,
+ struct ipmi_addr *addr,
+ struct kernel_ipmi_msg *msg);
+
#endif /* __LINUX_IPMI_H */
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index deec18b8944a..892e2d656e1e 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -39,6 +39,59 @@ struct ipmi_smi;
#define IPMI_WATCH_MASK_CHECK_COMMANDS (1 << 2)
/*
+ * SMI messages
+ *
+ * When communicating with an SMI, messages come in two formats:
+ *
+ * * Normal (to a BMC over a BMC interface)
+ *
+ * * IPMB (over a IPMB to another MC)
+ *
+ * When normal, commands are sent using the format defined by a
+ * standard message over KCS (NetFn must be even):
+ *
+ * +-----------+-----+------+
+ * | NetFn/LUN | Cmd | Data |
+ * +-----------+-----+------+
+ *
+ * And responses, similarly, with an completion code added (NetFn must
+ * be odd):
+ *
+ * +-----------+-----+------+------+
+ * | NetFn/LUN | Cmd | CC | Data |
+ * +-----------+-----+------+------+
+ *
+ * With normal messages, only commands are sent and only responses are
+ * received.
+ *
+ * In IPMB mode, we are acting as an IPMB device. Commands will be in
+ * the following format (NetFn must be even):
+ *
+ * +-------------+------+-------------+-----+------+
+ * | NetFn/rsLUN | Addr | rqSeq/rqLUN | Cmd | Data |
+ * +-------------+------+-------------+-----+------+
+ *
+ * Responses will using the following format:
+ *
+ * +-------------+------+-------------+-----+------+------+
+ * | NetFn/rqLUN | Addr | rqSeq/rsLUN | Cmd | CC | Data |
+ * +-------------+------+-------------+-----+------+------+
+ *
+ * This is similar to the format defined in the IPMB manual section
+ * 2.11.1 with the checksums and the first address removed. Also, the
+ * address is always the remote address.
+ *
+ * IPMB messages can be commands and responses in both directions.
+ * Received commands are handled as received commands from the message
+ * queue.
+ */
+
+enum ipmi_smi_msg_type {
+ IPMI_SMI_MSG_TYPE_NORMAL = 0,
+ IPMI_SMI_MSG_TYPE_IPMB_DIRECT
+};
+
+/*
* Messages to/from the lower layer. The smi interface will take one
* of these to send. After the send has occurred and a response has
* been received, it will report this same data structure back up to
@@ -54,8 +107,11 @@ struct ipmi_smi;
struct ipmi_smi_msg {
struct list_head link;
- long msgid;
- void *user_data;
+ enum ipmi_smi_msg_type type;
+
+ long msgid;
+ /* Response to this message, will be NULL if not from a user request. */
+ struct ipmi_recv_msg *recv_msg;
int data_size;
unsigned char data[IPMI_MAX_MSG_LENGTH];
@@ -70,9 +126,19 @@ struct ipmi_smi_msg {
void (*done)(struct ipmi_smi_msg *msg);
};
+#define INIT_IPMI_SMI_MSG(done_handler) \
+{ \
+ .done = done_handler, \
+ .type = IPMI_SMI_MSG_TYPE_NORMAL \
+}
+
struct ipmi_smi_handlers {
struct module *owner;
+ /* Capabilities of the SMI. */
+#define IPMI_SMI_CAN_HANDLE_IPMB_DIRECT (1 << 0)
+ unsigned int flags;
+
/*
* The low-level interface cannot start sending messages to
* the upper layer until this function is called. This may
@@ -103,9 +169,11 @@ struct ipmi_smi_handlers {
* are held when this is run. Message are delivered one at
* a time by the message handler, a new message will not be
* delivered until the previous message is returned.
+ *
+ * This can return an error if the SMI is not in a state where it
+ * can send a message.
*/
- void (*sender)(void *send_info,
- struct ipmi_smi_msg *msg);
+ int (*sender)(void *send_info, struct ipmi_smi_msg *msg);
/*
* Called by the upper layer to request that we try to get
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 70b2ad3b9884..7294e4e89b79 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -3,6 +3,7 @@
#define _IPV6_H
#include <uapi/linux/ipv6.h>
+#include <linux/cache.h>
#define ipv6_optlen(p) (((p)->hdrlen+1) << 3)
#define ipv6_authlen(p) (((p)->hdrlen+2) << 2)
@@ -10,9 +11,17 @@
* This structure contains configuration options per IPv6 link.
*/
struct ipv6_devconf {
- __s32 forwarding;
+ /* RX & TX fastpath fields. */
+ __cacheline_group_begin(ipv6_devconf_read_txrx);
+ __s32 disable_ipv6;
__s32 hop_limit;
__s32 mtu6;
+ __s32 forwarding;
+ __s32 force_forwarding;
+ __s32 disable_policy;
+ __s32 proxy_ndp;
+ __cacheline_group_end(ipv6_devconf_read_txrx);
+
__s32 accept_ra;
__s32 accept_redirects;
__s32 autoconf;
@@ -27,12 +36,14 @@ struct ipv6_devconf {
__s32 use_tempaddr;
__s32 temp_valid_lft;
__s32 temp_prefered_lft;
+ __s32 regen_min_advance;
__s32 regen_max_retry;
__s32 max_desync_factor;
__s32 max_addresses;
__s32 accept_ra_defrtr;
__u32 ra_defrtr_metric;
__s32 accept_ra_min_hop_limit;
+ __s32 accept_ra_min_lft;
__s32 accept_ra_pinfo;
__s32 ignore_routes_with_linkdown;
#ifdef CONFIG_IPV6_ROUTER_PREF
@@ -43,7 +54,6 @@ struct ipv6_devconf {
__s32 accept_ra_rt_info_max_plen;
#endif
#endif
- __s32 proxy_ndp;
__s32 accept_source_route;
__s32 accept_ra_from_local;
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
@@ -51,9 +61,8 @@ struct ipv6_devconf {
__s32 use_optimistic;
#endif
#ifdef CONFIG_IPV6_MROUTE
- __s32 mc_forwarding;
+ atomic_t mc_forwarding;
#endif
- __s32 disable_ipv6;
__s32 drop_unicast_in_l2_multicast;
__s32 accept_dad;
__s32 force_tllao;
@@ -61,6 +70,7 @@ struct ipv6_devconf {
__s32 suppress_frag_ndisc;
__s32 accept_ra_mtu;
__s32 drop_unsolicited_na;
+ __s32 accept_untracked_na;
struct ipv6_stable_secret {
bool initialized;
struct in6_addr secret;
@@ -73,9 +83,14 @@ struct ipv6_devconf {
#endif
__u32 enhanced_dad;
__u32 addr_gen_mode;
- __s32 disable_policy;
__s32 ndisc_tclass;
__s32 rpl_seg_enabled;
+ __u32 ioam6_id;
+ __u32 ioam6_id_wide;
+ __u8 ioam6_enabled;
+ __u8 ndisc_evict_nocarrier;
+ __u8 ra_honor_pio_life;
+ __u8 ra_honor_pio_pflag;
struct ctl_table_header *sysctl_header;
};
@@ -129,6 +144,7 @@ struct inet6_skb_parm {
__u16 dsthao;
#endif
__u16 frag_max_size;
+ __u16 srhoff;
#define IP6SKB_XFRM_TRANSFORMED 1
#define IP6SKB_FORWARDED 2
@@ -138,6 +154,10 @@ struct inet6_skb_parm {
#define IP6SKB_HOPBYHOP 32
#define IP6SKB_L3SLAVE 64
#define IP6SKB_JUMBOGRAM 128
+#define IP6SKB_SEG6 256
+#define IP6SKB_FAKEJUMBO 512
+#define IP6SKB_MULTIPATH 1024
+#define IP6SKB_MCROUTE 2048
};
#if defined(CONFIG_NET_L3_MASTER_DEV)
@@ -189,56 +209,34 @@ struct inet6_cork {
struct ipv6_txoptions *opt;
u8 hop_limit;
u8 tclass;
+ u8 dontfrag:1;
};
-/**
- * struct ipv6_pinfo - ipv6 private area
- *
- * In the struct sock hierarchy (tcp6_sock, upd6_sock, etc)
- * this _must_ be the last member, so that inet6_sk_generic
- * is able to calculate its offset from the base struct sock
- * by using the struct proto->slab_obj_size member. -acme
- */
+/* struct ipv6_pinfo - ipv6 private area */
struct ipv6_pinfo {
+ /* Used in tx path (inet6_csk_route_socket(), ip6_xmit()) */
struct in6_addr saddr;
- struct in6_pktinfo sticky_pktinfo;
- const struct in6_addr *daddr_cache;
+ __be32 flow_label;
+ u32 dst_cookie;
+ struct ipv6_txoptions __rcu *opt;
+ s16 hop_limit;
+ u8 pmtudisc;
+ u8 tclass;
#ifdef CONFIG_IPV6_SUBTREES
- const struct in6_addr *saddr_cache;
+ bool saddr_cache;
#endif
+ bool daddr_cache;
- __be32 flow_label;
- __u32 frag_size;
-
- /*
- * Packed in 16bits.
- * Omit one shift by putting the signed field at MSB.
- */
-#if defined(__BIG_ENDIAN_BITFIELD)
- __s16 hop_limit:9;
- __u16 __unused_1:7;
-#else
- __u16 __unused_1:7;
- __s16 hop_limit:9;
-#endif
+ u8 mcast_hops;
+ u32 frag_size;
-#if defined(__BIG_ENDIAN_BITFIELD)
- /* Packed in 16bits. */
- __s16 mcast_hops:9;
- __u16 __unused_2:6,
- mc_loop:1;
-#else
- __u16 mc_loop:1,
- __unused_2:6;
- __s16 mcast_hops:9;
-#endif
int ucast_oif;
int mcast_oif;
/* pktoption flags */
union {
struct {
- __u16 srcrt:1,
+ u16 srcrt:1,
osrcrt:1,
rxinfo:1,
rxoinfo:1,
@@ -255,42 +253,38 @@ struct ipv6_pinfo {
recvfragsize:1;
/* 1 bits hole */
} bits;
- __u16 all;
+ u16 all;
} rxopt;
/* sockopt flags */
- __u16 recverr:1,
- sndflow:1,
- repflow:1,
- pmtudisc:3,
- padding:1, /* 1 bit hole */
- srcprefs:3, /* 001: prefer temporary address
+ u8 srcprefs; /* 001: prefer temporary address
* 010: prefer public address
* 100: prefer care-of address
*/
- dontfrag:1,
- autoflowlabel:1,
- autoflowlabel_set:1,
- mc_all:1,
- recverr_rfc4884:1,
- rtalert_isolate:1;
- __u8 min_hopcount;
- __u8 tclass;
+ u8 min_hopcount;
__be32 rcv_flowinfo;
+ struct in6_pktinfo sticky_pktinfo;
- __u32 dst_cookie;
- __u32 rx_dst_cookie;
-
- struct ipv6_mc_socklist __rcu *ipv6_mc_list;
- struct ipv6_ac_socklist *ipv6_ac_list;
- struct ipv6_fl_socklist __rcu *ipv6_fl_list;
-
- struct ipv6_txoptions __rcu *opt;
struct sk_buff *pktoptions;
struct sk_buff *rxpmtu;
struct inet6_cork cork;
+
+ struct ipv6_mc_socklist __rcu *ipv6_mc_list;
+ struct ipv6_ac_socklist *ipv6_ac_list;
};
+/* We currently use available bits from inet_sk(sk)->inet_flags,
+ * this could change in the future.
+ */
+#define inet6_test_bit(nr, sk) \
+ test_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags)
+#define inet6_set_bit(nr, sk) \
+ set_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags)
+#define inet6_clear_bit(nr, sk) \
+ clear_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags)
+#define inet6_assign_bit(nr, sk, val) \
+ assign_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags, val)
+
/* WARNING: don't change the layout of the members in {raw,udp,tcp}6_sock! */
struct raw6_sock {
/* inet_sock has to be the first member of raw6_sock */
@@ -299,19 +293,19 @@ struct raw6_sock {
__u32 offset; /* checksum offset */
struct icmp6_filter filter;
__u32 ip6mr_table;
- /* ipv6_pinfo has to be the last member of raw6_sock, see inet6_sk_generic */
+ struct numa_drop_counters drop_counters;
struct ipv6_pinfo inet6;
};
struct udp6_sock {
struct udp_sock udp;
- /* ipv6_pinfo has to be the last member of udp6_sock, see inet6_sk_generic */
+
struct ipv6_pinfo inet6;
};
struct tcp6_sock {
struct tcp_sock tcp;
- /* ipv6_pinfo has to be the last member of tcp6_sock, see inet6_sk_generic */
+
struct ipv6_pinfo inet6;
};
@@ -329,13 +323,9 @@ static inline struct ipv6_pinfo *inet6_sk(const struct sock *__sk)
return sk_fullsock(__sk) ? inet_sk(__sk)->pinet6 : NULL;
}
-static inline struct raw6_sock *raw6_sk(const struct sock *sk)
-{
- return (struct raw6_sock *)sk;
-}
+#define raw6_sk(ptr) container_of_const(ptr, struct raw6_sock, inet.sk)
-#define __ipv6_only_sock(sk) (sk->sk_ipv6only)
-#define ipv6_only_sock(sk) (__ipv6_only_sock(sk))
+#define ipv6_only_sock(sk) (sk->sk_ipv6only)
#define ipv6_sk_rxinfo(sk) ((sk)->sk_family == PF_INET6 && \
inet6_sk(sk)->rxopt.bits.rxinfo)
@@ -352,7 +342,6 @@ static inline int inet_v6_ipv6only(const struct sock *sk)
return ipv6_only_sock(sk);
}
#else
-#define __ipv6_only_sock(sk) 0
#define ipv6_only_sock(sk) 0
#define ipv6_sk_rxinfo(sk) 0
@@ -366,19 +355,12 @@ static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
return NULL;
}
-static inline struct inet6_request_sock *
- inet6_rsk(const struct request_sock *rsk)
-{
- return NULL;
-}
-
static inline struct raw6_sock *raw6_sk(const struct sock *sk)
{
return NULL;
}
#define inet6_rcv_saddr(__sk) NULL
-#define tcp_twsk_ipv6only(__sk) 0
#define inet_v6_ipv6only(__sk) 0
#endif /* IS_ENABLED(CONFIG_IPV6) */
#endif /* _IPV6_H */
diff --git a/include/linux/irq-entry-common.h b/include/linux/irq-entry-common.h
new file mode 100644
index 000000000000..6ab913e57da0
--- /dev/null
+++ b/include/linux/irq-entry-common.h
@@ -0,0 +1,458 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_IRQENTRYCOMMON_H
+#define __LINUX_IRQENTRYCOMMON_H
+
+#include <linux/context_tracking.h>
+#include <linux/kmsan.h>
+#include <linux/rseq_entry.h>
+#include <linux/static_call_types.h>
+#include <linux/syscalls.h>
+#include <linux/tick.h>
+#include <linux/unwind_deferred.h>
+
+#include <asm/entry-common.h>
+
+/*
+ * Define dummy _TIF work flags if not defined by the architecture or for
+ * disabled functionality.
+ */
+#ifndef _TIF_PATCH_PENDING
+# define _TIF_PATCH_PENDING (0)
+#endif
+
+/*
+ * TIF flags handled in exit_to_user_mode_loop()
+ */
+#ifndef ARCH_EXIT_TO_USER_MODE_WORK
+# define ARCH_EXIT_TO_USER_MODE_WORK (0)
+#endif
+
+#define EXIT_TO_USER_MODE_WORK \
+ (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+ _TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | \
+ _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | _TIF_RSEQ | \
+ ARCH_EXIT_TO_USER_MODE_WORK)
+
+/**
+ * arch_enter_from_user_mode - Architecture specific sanity check for user mode regs
+ * @regs: Pointer to currents pt_regs
+ *
+ * Defaults to an empty implementation. Can be replaced by architecture
+ * specific code.
+ *
+ * Invoked from syscall_enter_from_user_mode() in the non-instrumentable
+ * section. Use __always_inline so the compiler cannot push it out of line
+ * and make it instrumentable.
+ */
+static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs);
+
+#ifndef arch_enter_from_user_mode
+static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) {}
+#endif
+
+/**
+ * arch_in_rcu_eqs - Architecture specific check for RCU extended quiescent
+ * states.
+ *
+ * Returns: true if the CPU is potentially in an RCU EQS, false otherwise.
+ *
+ * Architectures only need to define this if threads other than the idle thread
+ * may have an interruptible EQS. This does not need to handle idle threads. It
+ * is safe to over-estimate at the cost of redundant RCU management work.
+ *
+ * Invoked from irqentry_enter()
+ */
+#ifndef arch_in_rcu_eqs
+static __always_inline bool arch_in_rcu_eqs(void) { return false; }
+#endif
+
+/**
+ * enter_from_user_mode - Establish state when coming from user mode
+ * @regs: Pointer to currents pt_regs
+ *
+ * Syscall/interrupt entry disables interrupts, but user mode is traced as
+ * interrupts enabled. Also with NO_HZ_FULL RCU might be idle.
+ *
+ * 1) Tell lockdep that interrupts are disabled
+ * 2) Invoke context tracking if enabled to reactivate RCU
+ * 3) Trace interrupts off state
+ *
+ * Invoked from architecture specific syscall entry code with interrupts
+ * disabled. The calling code has to be non-instrumentable. When the
+ * function returns all state is correct and interrupts are still
+ * disabled. The subsequent functions can be instrumented.
+ *
+ * This is invoked when there is architecture specific functionality to be
+ * done between establishing state and enabling interrupts. The caller must
+ * enable interrupts before invoking syscall_enter_from_user_mode_work().
+ */
+static __always_inline void enter_from_user_mode(struct pt_regs *regs)
+{
+ arch_enter_from_user_mode(regs);
+ lockdep_hardirqs_off(CALLER_ADDR0);
+
+ CT_WARN_ON(__ct_state() != CT_STATE_USER);
+ user_exit_irqoff();
+
+ instrumentation_begin();
+ kmsan_unpoison_entry_regs(regs);
+ trace_hardirqs_off_finish();
+ instrumentation_end();
+}
+
+/**
+ * local_irq_enable_exit_to_user - Exit to user variant of local_irq_enable()
+ * @ti_work: Cached TIF flags gathered with interrupts disabled
+ *
+ * Defaults to local_irq_enable(). Can be supplied by architecture specific
+ * code.
+ */
+static inline void local_irq_enable_exit_to_user(unsigned long ti_work);
+
+#ifndef local_irq_enable_exit_to_user
+static inline void local_irq_enable_exit_to_user(unsigned long ti_work)
+{
+ local_irq_enable();
+}
+#endif
+
+/**
+ * local_irq_disable_exit_to_user - Exit to user variant of local_irq_disable()
+ *
+ * Defaults to local_irq_disable(). Can be supplied by architecture specific
+ * code.
+ */
+static inline void local_irq_disable_exit_to_user(void);
+
+#ifndef local_irq_disable_exit_to_user
+static inline void local_irq_disable_exit_to_user(void)
+{
+ local_irq_disable();
+}
+#endif
+
+/**
+ * arch_exit_to_user_mode_work - Architecture specific TIF work for exit
+ * to user mode.
+ * @regs: Pointer to currents pt_regs
+ * @ti_work: Cached TIF flags gathered with interrupts disabled
+ *
+ * Invoked from exit_to_user_mode_loop() with interrupt enabled
+ *
+ * Defaults to NOOP. Can be supplied by architecture specific code.
+ */
+static inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
+ unsigned long ti_work);
+
+#ifndef arch_exit_to_user_mode_work
+static inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
+ unsigned long ti_work)
+{
+}
+#endif
+
+/**
+ * arch_exit_to_user_mode_prepare - Architecture specific preparation for
+ * exit to user mode.
+ * @regs: Pointer to currents pt_regs
+ * @ti_work: Cached TIF flags gathered with interrupts disabled
+ *
+ * Invoked from exit_to_user_mode_prepare() with interrupt disabled as the last
+ * function before return. Defaults to NOOP.
+ */
+static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
+ unsigned long ti_work);
+
+#ifndef arch_exit_to_user_mode_prepare
+static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
+ unsigned long ti_work)
+{
+}
+#endif
+
+/**
+ * arch_exit_to_user_mode - Architecture specific final work before
+ * exit to user mode.
+ *
+ * Invoked from exit_to_user_mode() with interrupt disabled as the last
+ * function before return. Defaults to NOOP.
+ *
+ * This needs to be __always_inline because it is non-instrumentable code
+ * invoked after context tracking switched to user mode.
+ *
+ * An architecture implementation must not do anything complex, no locking
+ * etc. The main purpose is for speculation mitigations.
+ */
+static __always_inline void arch_exit_to_user_mode(void);
+
+#ifndef arch_exit_to_user_mode
+static __always_inline void arch_exit_to_user_mode(void) { }
+#endif
+
+/**
+ * arch_do_signal_or_restart - Architecture specific signal delivery function
+ * @regs: Pointer to currents pt_regs
+ *
+ * Invoked from exit_to_user_mode_loop().
+ */
+void arch_do_signal_or_restart(struct pt_regs *regs);
+
+/* Handle pending TIF work */
+unsigned long exit_to_user_mode_loop(struct pt_regs *regs, unsigned long ti_work);
+
+/**
+ * __exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required
+ * @regs: Pointer to pt_regs on entry stack
+ *
+ * 1) check that interrupts are disabled
+ * 2) call tick_nohz_user_enter_prepare()
+ * 3) call exit_to_user_mode_loop() if any flags from
+ * EXIT_TO_USER_MODE_WORK are set
+ * 4) check that interrupts are still disabled
+ *
+ * Don't invoke directly, use the syscall/irqentry_ prefixed variants below
+ */
+static __always_inline void __exit_to_user_mode_prepare(struct pt_regs *regs)
+{
+ unsigned long ti_work;
+
+ lockdep_assert_irqs_disabled();
+
+ /* Flush pending rcuog wakeup before the last need_resched() check */
+ tick_nohz_user_enter_prepare();
+
+ ti_work = read_thread_flags();
+ if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
+ ti_work = exit_to_user_mode_loop(regs, ti_work);
+
+ arch_exit_to_user_mode_prepare(regs, ti_work);
+}
+
+static __always_inline void __exit_to_user_mode_validate(void)
+{
+ /* Ensure that kernel state is sane for a return to userspace */
+ kmap_assert_nomap();
+ lockdep_assert_irqs_disabled();
+ lockdep_sys_exit();
+}
+
+/* Temporary workaround to keep ARM64 alive */
+static __always_inline void exit_to_user_mode_prepare_legacy(struct pt_regs *regs)
+{
+ __exit_to_user_mode_prepare(regs);
+ rseq_exit_to_user_mode_legacy();
+ __exit_to_user_mode_validate();
+}
+
+/**
+ * syscall_exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required
+ * @regs: Pointer to pt_regs on entry stack
+ *
+ * Wrapper around __exit_to_user_mode_prepare() to separate the exit work for
+ * syscalls and interrupts.
+ */
+static __always_inline void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
+{
+ __exit_to_user_mode_prepare(regs);
+ rseq_syscall_exit_to_user_mode();
+ __exit_to_user_mode_validate();
+}
+
+/**
+ * irqentry_exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required
+ * @regs: Pointer to pt_regs on entry stack
+ *
+ * Wrapper around __exit_to_user_mode_prepare() to separate the exit work for
+ * syscalls and interrupts.
+ */
+static __always_inline void irqentry_exit_to_user_mode_prepare(struct pt_regs *regs)
+{
+ __exit_to_user_mode_prepare(regs);
+ rseq_irqentry_exit_to_user_mode();
+ __exit_to_user_mode_validate();
+}
+
+/**
+ * exit_to_user_mode - Fixup state when exiting to user mode
+ *
+ * Syscall/interrupt exit enables interrupts, but the kernel state is
+ * interrupts disabled when this is invoked. Also tell RCU about it.
+ *
+ * 1) Trace interrupts on state
+ * 2) Invoke context tracking if enabled to adjust RCU state
+ * 3) Invoke architecture specific last minute exit code, e.g. speculation
+ * mitigations, etc.: arch_exit_to_user_mode()
+ * 4) Tell lockdep that interrupts are enabled
+ *
+ * Invoked from architecture specific code when syscall_exit_to_user_mode()
+ * is not suitable as the last step before returning to userspace. Must be
+ * invoked with interrupts disabled and the caller must be
+ * non-instrumentable.
+ * The caller has to invoke syscall_exit_to_user_mode_work() before this.
+ */
+static __always_inline void exit_to_user_mode(void)
+{
+ instrumentation_begin();
+ unwind_reset_info();
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare();
+ instrumentation_end();
+
+ user_enter_irqoff();
+ arch_exit_to_user_mode();
+ lockdep_hardirqs_on(CALLER_ADDR0);
+}
+
+/**
+ * irqentry_enter_from_user_mode - Establish state before invoking the irq handler
+ * @regs: Pointer to currents pt_regs
+ *
+ * Invoked from architecture specific entry code with interrupts disabled.
+ * Can only be called when the interrupt entry came from user mode. The
+ * calling code must be non-instrumentable. When the function returns all
+ * state is correct and the subsequent functions can be instrumented.
+ *
+ * The function establishes state (lockdep, RCU (context tracking), tracing)
+ */
+static __always_inline void irqentry_enter_from_user_mode(struct pt_regs *regs)
+{
+ enter_from_user_mode(regs);
+ rseq_note_user_irq_entry();
+}
+
+/**
+ * irqentry_exit_to_user_mode - Interrupt exit work
+ * @regs: Pointer to current's pt_regs
+ *
+ * Invoked with interrupts disabled and fully valid regs. Returns with all
+ * work handled, interrupts disabled such that the caller can immediately
+ * switch to user mode. Called from architecture specific interrupt
+ * handling code.
+ *
+ * The call order is #2 and #3 as described in syscall_exit_to_user_mode().
+ * Interrupt exit is not invoking #1 which is the syscall specific one time
+ * work.
+ */
+static __always_inline void irqentry_exit_to_user_mode(struct pt_regs *regs)
+{
+ instrumentation_begin();
+ irqentry_exit_to_user_mode_prepare(regs);
+ instrumentation_end();
+ exit_to_user_mode();
+}
+
+#ifndef irqentry_state
+/**
+ * struct irqentry_state - Opaque object for exception state storage
+ * @exit_rcu: Used exclusively in the irqentry_*() calls; signals whether the
+ * exit path has to invoke ct_irq_exit().
+ * @lockdep: Used exclusively in the irqentry_nmi_*() calls; ensures that
+ * lockdep state is restored correctly on exit from nmi.
+ *
+ * This opaque object is filled in by the irqentry_*_enter() functions and
+ * must be passed back into the corresponding irqentry_*_exit() functions
+ * when the exception is complete.
+ *
+ * Callers of irqentry_*_[enter|exit]() must consider this structure opaque
+ * and all members private. Descriptions of the members are provided to aid in
+ * the maintenance of the irqentry_*() functions.
+ */
+typedef struct irqentry_state {
+ union {
+ bool exit_rcu;
+ bool lockdep;
+ };
+} irqentry_state_t;
+#endif
+
+/**
+ * irqentry_enter - Handle state tracking on ordinary interrupt entries
+ * @regs: Pointer to pt_regs of interrupted context
+ *
+ * Invokes:
+ * - lockdep irqflag state tracking as low level ASM entry disabled
+ * interrupts.
+ *
+ * - Context tracking if the exception hit user mode.
+ *
+ * - The hardirq tracer to keep the state consistent as low level ASM
+ * entry disabled interrupts.
+ *
+ * As a precondition, this requires that the entry came from user mode,
+ * idle, or a kernel context in which RCU is watching.
+ *
+ * For kernel mode entries RCU handling is done conditional. If RCU is
+ * watching then the only RCU requirement is to check whether the tick has
+ * to be restarted. If RCU is not watching then ct_irq_enter() has to be
+ * invoked on entry and ct_irq_exit() on exit.
+ *
+ * Avoiding the ct_irq_enter/exit() calls is an optimization but also
+ * solves the problem of kernel mode pagefaults which can schedule, which
+ * is not possible after invoking ct_irq_enter() without undoing it.
+ *
+ * For user mode entries irqentry_enter_from_user_mode() is invoked to
+ * establish the proper context for NOHZ_FULL. Otherwise scheduling on exit
+ * would not be possible.
+ *
+ * Returns: An opaque object that must be passed to idtentry_exit()
+ */
+irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs);
+
+/**
+ * irqentry_exit_cond_resched - Conditionally reschedule on return from interrupt
+ *
+ * Conditional reschedule with additional sanity checks.
+ */
+void raw_irqentry_exit_cond_resched(void);
+
+#ifdef CONFIG_PREEMPT_DYNAMIC
+#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
+#define irqentry_exit_cond_resched_dynamic_enabled raw_irqentry_exit_cond_resched
+#define irqentry_exit_cond_resched_dynamic_disabled NULL
+DECLARE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
+#define irqentry_exit_cond_resched() static_call(irqentry_exit_cond_resched)()
+#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
+DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
+void dynamic_irqentry_exit_cond_resched(void);
+#define irqentry_exit_cond_resched() dynamic_irqentry_exit_cond_resched()
+#endif
+#else /* CONFIG_PREEMPT_DYNAMIC */
+#define irqentry_exit_cond_resched() raw_irqentry_exit_cond_resched()
+#endif /* CONFIG_PREEMPT_DYNAMIC */
+
+/**
+ * irqentry_exit - Handle return from exception that used irqentry_enter()
+ * @regs: Pointer to pt_regs (exception entry regs)
+ * @state: Return value from matching call to irqentry_enter()
+ *
+ * Depending on the return target (kernel/user) this runs the necessary
+ * preemption and work checks if possible and required and returns to
+ * the caller with interrupts disabled and no further work pending.
+ *
+ * This is the last action before returning to the low level ASM code which
+ * just needs to return to the appropriate context.
+ *
+ * Counterpart to irqentry_enter().
+ */
+void noinstr irqentry_exit(struct pt_regs *regs, irqentry_state_t state);
+
+/**
+ * irqentry_nmi_enter - Handle NMI entry
+ * @regs: Pointer to currents pt_regs
+ *
+ * Similar to irqentry_enter() but taking care of the NMI constraints.
+ */
+irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs);
+
+/**
+ * irqentry_nmi_exit - Handle return from NMI handling
+ * @regs: Pointer to pt_regs (NMI entry regs)
+ * @irq_state: Return value from matching call to irqentry_nmi_enter()
+ *
+ * Last action before returning to the low level assembly code.
+ *
+ * Counterpart to irqentry_nmi_enter().
+ */
+void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state);
+
+#endif
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 31b347c9f8dd..4a9f1d7b08c3 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -64,7 +64,6 @@ enum irqchip_irq_state;
* IRQ_NOAUTOEN - Interrupt is not automatically enabled in
* request/setup_irq()
* IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set)
- * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context
* IRQ_NESTED_THREAD - Interrupt nests into another thread
* IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable
* IRQ_IS_POLLED - Always polled by another interrupt. Exclude
@@ -72,6 +71,7 @@ enum irqchip_irq_state;
* mechanism and from core side polling.
* IRQ_DISABLE_UNLAZY - Disable lazy irq disable
* IRQ_HIDDEN - Don't show up in /proc/interrupts
+ * IRQ_NO_DEBUG - Exclude from note_interrupt() debugging
*/
enum {
IRQ_TYPE_NONE = 0x00000000,
@@ -92,18 +92,18 @@ enum {
IRQ_NOREQUEST = (1 << 11),
IRQ_NOAUTOEN = (1 << 12),
IRQ_NO_BALANCING = (1 << 13),
- IRQ_MOVE_PCNTXT = (1 << 14),
IRQ_NESTED_THREAD = (1 << 15),
IRQ_NOTHREAD = (1 << 16),
IRQ_PER_CPU_DEVID = (1 << 17),
IRQ_IS_POLLED = (1 << 18),
IRQ_DISABLE_UNLAZY = (1 << 19),
IRQ_HIDDEN = (1 << 20),
+ IRQ_NO_DEBUG = (1 << 21),
};
#define IRQF_MODIFY_MASK \
(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
- IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
+ IRQ_NOAUTOEN | IRQ_LEVEL | IRQ_NO_BALANCING | \
IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_HIDDEN)
@@ -113,7 +113,7 @@ enum {
* Return value for chip->irq_set_affinity()
*
* IRQ_SET_MASK_OK - OK, core updates irq_common_data.affinity
- * IRQ_SET_MASK_NOCPY - OK, chip did update irq_common_data.affinity
+ * IRQ_SET_MASK_NOCOPY - OK, chip did update irq_common_data.affinity
* IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to
* support stacked irqchips, which indicates skipping
* all descendant irqchips.
@@ -149,7 +149,9 @@ struct irq_common_data {
#endif
void *handler_data;
struct msi_desc *msi_desc;
+#ifdef CONFIG_SMP
cpumask_var_t affinity;
+#endif
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
cpumask_var_t effective_affinity;
#endif
@@ -175,7 +177,7 @@ struct irq_common_data {
struct irq_data {
u32 mask;
unsigned int irq;
- unsigned long hwirq;
+ irq_hw_number_t hwirq;
struct irq_common_data *common;
struct irq_chip *chip;
struct irq_domain *domain;
@@ -197,8 +199,6 @@ struct irq_data {
* IRQD_LEVEL - Interrupt is level triggered
* IRQD_WAKEUP_STATE - Interrupt is configured for wakeup
* from suspend
- * IRQD_MOVE_PCNTXT - Interrupt can be moved in process
- * context
* IRQD_IRQ_DISABLED - Disabled state of the interrupt
* IRQD_IRQ_MASKED - Masked state of the interrupt
* IRQD_IRQ_INPROGRESS - In progress state of the interrupt
@@ -211,40 +211,39 @@ struct irq_data {
* IRQD_SINGLE_TARGET - IRQ allows only a single affinity target
* IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set
* IRQD_CAN_RESERVE - Can use reservation mode
- * IRQD_MSI_NOMASK_QUIRK - Non-maskable MSI quirk for affinity change
- * required
* IRQD_HANDLE_ENFORCE_IRQCTX - Enforce that handle_irq_*() is only invoked
* from actual interrupt context.
* IRQD_AFFINITY_ON_ACTIVATE - Affinity is set on activation. Don't call
* irq_chip::irq_set_affinity() when deactivated.
* IRQD_IRQ_ENABLED_ON_SUSPEND - Interrupt is enabled on suspend by irq pm if
* irqchip have flag IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND set.
+ * IRQD_RESEND_WHEN_IN_PROGRESS - Interrupt may fire when already in progress in which
+ * case it must be resent at the next available opportunity.
*/
enum {
IRQD_TRIGGER_MASK = 0xf,
- IRQD_SETAFFINITY_PENDING = (1 << 8),
- IRQD_ACTIVATED = (1 << 9),
- IRQD_NO_BALANCING = (1 << 10),
- IRQD_PER_CPU = (1 << 11),
- IRQD_AFFINITY_SET = (1 << 12),
- IRQD_LEVEL = (1 << 13),
- IRQD_WAKEUP_STATE = (1 << 14),
- IRQD_MOVE_PCNTXT = (1 << 15),
- IRQD_IRQ_DISABLED = (1 << 16),
- IRQD_IRQ_MASKED = (1 << 17),
- IRQD_IRQ_INPROGRESS = (1 << 18),
- IRQD_WAKEUP_ARMED = (1 << 19),
- IRQD_FORWARDED_TO_VCPU = (1 << 20),
- IRQD_AFFINITY_MANAGED = (1 << 21),
- IRQD_IRQ_STARTED = (1 << 22),
- IRQD_MANAGED_SHUTDOWN = (1 << 23),
- IRQD_SINGLE_TARGET = (1 << 24),
- IRQD_DEFAULT_TRIGGER_SET = (1 << 25),
- IRQD_CAN_RESERVE = (1 << 26),
- IRQD_MSI_NOMASK_QUIRK = (1 << 27),
- IRQD_HANDLE_ENFORCE_IRQCTX = (1 << 28),
- IRQD_AFFINITY_ON_ACTIVATE = (1 << 29),
- IRQD_IRQ_ENABLED_ON_SUSPEND = (1 << 30),
+ IRQD_SETAFFINITY_PENDING = BIT(8),
+ IRQD_ACTIVATED = BIT(9),
+ IRQD_NO_BALANCING = BIT(10),
+ IRQD_PER_CPU = BIT(11),
+ IRQD_AFFINITY_SET = BIT(12),
+ IRQD_LEVEL = BIT(13),
+ IRQD_WAKEUP_STATE = BIT(14),
+ IRQD_IRQ_DISABLED = BIT(16),
+ IRQD_IRQ_MASKED = BIT(17),
+ IRQD_IRQ_INPROGRESS = BIT(18),
+ IRQD_WAKEUP_ARMED = BIT(19),
+ IRQD_FORWARDED_TO_VCPU = BIT(20),
+ IRQD_AFFINITY_MANAGED = BIT(21),
+ IRQD_IRQ_STARTED = BIT(22),
+ IRQD_MANAGED_SHUTDOWN = BIT(23),
+ IRQD_SINGLE_TARGET = BIT(24),
+ IRQD_DEFAULT_TRIGGER_SET = BIT(25),
+ IRQD_CAN_RESERVE = BIT(26),
+ IRQD_HANDLE_ENFORCE_IRQCTX = BIT(27),
+ IRQD_AFFINITY_ON_ACTIVATE = BIT(28),
+ IRQD_IRQ_ENABLED_ON_SUSPEND = BIT(29),
+ IRQD_RESEND_WHEN_IN_PROGRESS = BIT(30),
};
#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
@@ -334,11 +333,6 @@ static inline bool irqd_is_wakeup_set(struct irq_data *d)
return __irqd_to_state(d) & IRQD_WAKEUP_STATE;
}
-static inline bool irqd_can_move_in_process_context(struct irq_data *d)
-{
- return __irqd_to_state(d) & IRQD_MOVE_PCNTXT;
-}
-
static inline bool irqd_irq_disabled(struct irq_data *d)
{
return __irqd_to_state(d) & IRQD_IRQ_DISABLED;
@@ -419,29 +413,24 @@ static inline bool irqd_can_reserve(struct irq_data *d)
return __irqd_to_state(d) & IRQD_CAN_RESERVE;
}
-static inline void irqd_set_msi_nomask_quirk(struct irq_data *d)
-{
- __irqd_to_state(d) |= IRQD_MSI_NOMASK_QUIRK;
-}
-
-static inline void irqd_clr_msi_nomask_quirk(struct irq_data *d)
+static inline void irqd_set_affinity_on_activate(struct irq_data *d)
{
- __irqd_to_state(d) &= ~IRQD_MSI_NOMASK_QUIRK;
+ __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE;
}
-static inline bool irqd_msi_nomask_quirk(struct irq_data *d)
+static inline bool irqd_affinity_on_activate(struct irq_data *d)
{
- return __irqd_to_state(d) & IRQD_MSI_NOMASK_QUIRK;
+ return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE;
}
-static inline void irqd_set_affinity_on_activate(struct irq_data *d)
+static inline void irqd_set_resend_when_in_progress(struct irq_data *d)
{
- __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE;
+ __irqd_to_state(d) |= IRQD_RESEND_WHEN_IN_PROGRESS;
}
-static inline bool irqd_affinity_on_activate(struct irq_data *d)
+static inline bool irqd_needs_resend_when_in_progress(struct irq_data *d)
{
- return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE;
+ return __irqd_to_state(d) & IRQD_RESEND_WHEN_IN_PROGRESS;
}
#undef __irqd_to_state
@@ -454,7 +443,6 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
/**
* struct irq_chip - hardware interrupt chip descriptor
*
- * @parent_device: pointer to parent device for irqchip
* @name: name for /proc/interrupts
* @irq_startup: start up the interrupt (defaults to ->enable if NULL)
* @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL)
@@ -498,10 +486,10 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
* @ipi_send_mask: send an IPI to destination cpus in cpumask
* @irq_nmi_setup: function called from core code before enabling an NMI
* @irq_nmi_teardown: function called from core code after disabling an NMI
+ * @irq_force_complete_move: optional function to force complete pending irq move
* @flags: chip specific flags
*/
struct irq_chip {
- struct device *parent_device;
const char *name;
unsigned int (*irq_startup)(struct irq_data *data);
void (*irq_shutdown)(struct irq_data *data);
@@ -522,9 +510,10 @@ struct irq_chip {
void (*irq_bus_lock)(struct irq_data *data);
void (*irq_bus_sync_unlock)(struct irq_data *data);
+#ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE
void (*irq_cpu_online)(struct irq_data *data);
void (*irq_cpu_offline)(struct irq_data *data);
-
+#endif
void (*irq_suspend)(struct irq_data *data);
void (*irq_resume)(struct irq_data *data);
void (*irq_pm_shutdown)(struct irq_data *data);
@@ -549,6 +538,8 @@ struct irq_chip {
int (*irq_nmi_setup)(struct irq_data *data);
void (*irq_nmi_teardown)(struct irq_data *data);
+ void (*irq_force_complete_move)(struct irq_data *data);
+
unsigned long flags;
};
@@ -567,6 +558,9 @@ struct irq_chip {
* IRQCHIP_SUPPORTS_NMI: Chip can deliver NMIs, only for root irqchips
* IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND: Invokes __enable_irq()/__disable_irq() for wake irqs
* in the suspend path if they are in disabled state
+ * IRQCHIP_AFFINITY_PRE_STARTUP: Default affinity update before startup
+ * IRQCHIP_IMMUTABLE: Don't ever change anything in this chip
+ * IRQCHIP_MOVE_DEFERRED: Move the interrupt in actual interrupt context
*/
enum {
IRQCHIP_SET_TYPE_MASKED = (1 << 0),
@@ -579,6 +573,9 @@ enum {
IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7),
IRQCHIP_SUPPORTS_NMI = (1 << 8),
IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND = (1 << 9),
+ IRQCHIP_AFFINITY_PRE_STARTUP = (1 << 10),
+ IRQCHIP_IMMUTABLE = (1 << 11),
+ IRQCHIP_MOVE_DEFERRED = (1 << 12),
};
#include <linux/irqdesc.h>
@@ -600,10 +597,11 @@ enum {
struct irqaction;
extern int setup_percpu_irq(unsigned int irq, struct irqaction *new);
-extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);
+#ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE
extern void irq_cpu_online(void);
extern void irq_cpu_offline(void);
+#endif
extern int irq_set_affinity_locked(struct irq_data *data,
const struct cpumask *cpumask, bool force);
extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info);
@@ -616,6 +614,7 @@ extern int irq_affinity_online_cpu(unsigned int cpu);
#endif
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
+bool irq_can_move_in_process_context(struct irq_data *data);
void __irq_move_irq(struct irq_data *data);
static inline void irq_move_irq(struct irq_data *data)
{
@@ -623,11 +622,10 @@ static inline void irq_move_irq(struct irq_data *data)
__irq_move_irq(data);
}
void irq_move_masked_irq(struct irq_data *data);
-void irq_force_complete_move(struct irq_desc *desc);
#else
+static inline bool irq_can_move_in_process_context(struct irq_data *data) { return true; }
static inline void irq_move_irq(struct irq_data *data) { }
static inline void irq_move_masked_irq(struct irq_data *data) { }
-static inline void irq_force_complete_move(struct irq_desc *desc) { }
#endif
extern int no_irq_affinity;
@@ -657,7 +655,6 @@ extern void handle_bad_irq(struct irq_desc *desc);
extern void handle_nested_irq(unsigned int irq);
extern void handle_fasteoi_nmi(struct irq_desc *desc);
-extern void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc);
extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
extern int irq_chip_pm_get(struct irq_data *data);
@@ -671,6 +668,8 @@ extern int irq_chip_set_parent_state(struct irq_data *data,
extern int irq_chip_get_parent_state(struct irq_data *data,
enum irqchip_irq_state which,
bool *state);
+extern void irq_chip_shutdown_parent(struct irq_data *data);
+extern unsigned int irq_chip_startup_parent(struct irq_data *data);
extern void irq_chip_enable_parent(struct irq_data *data);
extern void irq_chip_disable_parent(struct irq_data *data);
extern void irq_chip_ack_parent(struct irq_data *data);
@@ -690,6 +689,9 @@ extern int irq_chip_request_resources_parent(struct irq_data *data);
extern void irq_chip_release_resources_parent(struct irq_data *data);
#endif
+/* Disable or mask interrupts during a kernel kexec */
+extern void machine_kexec_mask_interrupts(void);
+
/* Handling of unhandled and spurious interrupts: */
extern void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret);
@@ -698,27 +700,24 @@ extern void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret);
extern int noirqdebug_setup(char *str);
/* Checks whether the interrupt can be requested by request_irq(): */
-extern int can_request_irq(unsigned int irq, unsigned long irqflags);
+extern bool can_request_irq(unsigned int irq, unsigned long irqflags);
/* Dummy irq-chip implementations: */
extern struct irq_chip no_irq_chip;
extern struct irq_chip dummy_irq_chip;
extern void
-irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
+irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip,
irq_flow_handler_t handle, const char *name);
-static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip,
+static inline void irq_set_chip_and_handler(unsigned int irq,
+ const struct irq_chip *chip,
irq_flow_handler_t handle)
{
irq_set_chip_and_handler_name(irq, chip, handle, NULL);
}
extern int irq_set_percpu_devid(unsigned int irq);
-extern int irq_set_percpu_devid_partition(unsigned int irq,
- const struct cpumask *affinity);
-extern int irq_get_percpu_devid_partition(unsigned int irq,
- struct cpumask *affinity);
extern void
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
@@ -798,7 +797,7 @@ static inline void irq_set_percpu_devid_flags(unsigned int irq)
}
/* Set/get chip/data for an IRQ: */
-extern int irq_set_chip(unsigned int irq, struct irq_chip *chip);
+extern int irq_set_chip(unsigned int irq, const struct irq_chip *chip);
extern int irq_set_handler_data(unsigned int irq, void *data);
extern int irq_set_chip_data(unsigned int irq, void *data);
extern int irq_set_irq_type(unsigned int irq, unsigned int type);
@@ -871,21 +870,34 @@ static inline int irq_data_get_node(struct irq_data *d)
return irq_common_data_get_node(d->common);
}
-static inline struct cpumask *irq_get_affinity_mask(int irq)
+static inline
+const struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
{
- struct irq_data *d = irq_get_irq_data(irq);
+#ifdef CONFIG_SMP
+ return d->common->affinity;
+#else
+ return cpumask_of(0);
+#endif
+}
- return d ? d->common->affinity : NULL;
+static inline void irq_data_update_affinity(struct irq_data *d,
+ const struct cpumask *m)
+{
+#ifdef CONFIG_SMP
+ cpumask_copy(d->common->affinity, m);
+#endif
}
-static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
+static inline const struct cpumask *irq_get_affinity_mask(int irq)
{
- return d->common->affinity;
+ struct irq_data *d = irq_get_irq_data(irq);
+
+ return d ? irq_data_get_affinity_mask(d) : NULL;
}
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
static inline
-struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
+const struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
{
return d->common->effective_affinity;
}
@@ -900,13 +912,14 @@ static inline void irq_data_update_effective_affinity(struct irq_data *d,
{
}
static inline
-struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
+const struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
{
- return d->common->affinity;
+ return irq_data_get_affinity_mask(d);
}
#endif
-static inline struct cpumask *irq_get_effective_affinity_mask(unsigned int irq)
+static inline
+const struct cpumask *irq_get_effective_affinity_mask(unsigned int irq)
{
struct irq_data *d = irq_get_irq_data(irq);
@@ -960,10 +973,6 @@ static inline void irq_free_desc(unsigned int irq)
irq_free_descs(irq, 1);
}
-#ifdef CONFIG_GENERIC_IRQ_LEGACY
-void irq_init_desc(unsigned int irq);
-#endif
-
/**
* struct irq_chip_regs - register offsets for struct irq_gci
* @enable: Enable register offset to reg_base
@@ -972,7 +981,6 @@ void irq_init_desc(unsigned int irq);
* @ack: Ack register offset to reg_base
* @eoi: Eoi register offset to reg_base
* @type: Type configuration register offset to reg_base
- * @polarity: Polarity configuration register offset to reg_base
*/
struct irq_chip_regs {
unsigned long enable;
@@ -981,7 +989,6 @@ struct irq_chip_regs {
unsigned long ack;
unsigned long eoi;
unsigned long type;
- unsigned long polarity;
};
/**
@@ -1021,8 +1028,6 @@ struct irq_chip_type {
* @irq_base: Interrupt base nr for this chip
* @irq_cnt: Number of interrupts handled by this chip
* @mask_cache: Cached mask register shared between all chip types
- * @type_cache: Cached type register
- * @polarity_cache: Cached polarity register
* @wake_enabled: Interrupt can wakeup from suspend
* @wake_active: Interrupt is marked as an wakeup from suspend source
* @num_ct: Number of available irq_chip_type instances (usually 1)
@@ -1049,8 +1054,6 @@ struct irq_chip_generic {
unsigned int irq_base;
unsigned int irq_cnt;
u32 mask_cache;
- u32 type_cache;
- u32 polarity_cache;
u32 wake_enabled;
u32 wake_active;
unsigned int num_ct;
@@ -1087,6 +1090,7 @@ enum irq_gc_flags {
* @irq_flags_to_set: IRQ* flags to set on irq setup
* @irq_flags_to_clear: IRQ* flags to clear on irq setup
* @gc_flags: Generic chip specific setup flags
+ * @exit: Function called on each chip when they are destroyed.
* @gc: Array of pointers to generic interrupt chips
*/
struct irq_domain_chip_generic {
@@ -1095,9 +1099,37 @@ struct irq_domain_chip_generic {
unsigned int irq_flags_to_clear;
unsigned int irq_flags_to_set;
enum irq_gc_flags gc_flags;
+ void (*exit)(struct irq_chip_generic *gc);
struct irq_chip_generic *gc[];
};
+/**
+ * struct irq_domain_chip_generic_info - Generic chip information structure
+ * @name: Name of the generic interrupt chip
+ * @handler: Interrupt handler used by the generic interrupt chip
+ * @irqs_per_chip: Number of interrupts each chip handles (max 32)
+ * @num_ct: Number of irq_chip_type instances associated with each
+ * chip
+ * @irq_flags_to_clear: IRQ_* bits to clear in the mapping function
+ * @irq_flags_to_set: IRQ_* bits to set in the mapping function
+ * @gc_flags: Generic chip specific setup flags
+ * @init: Function called on each chip when they are created.
+ * Allow to do some additional chip initialisation.
+ * @exit: Function called on each chip when they are destroyed.
+ * Allow to do some chip cleanup operation.
+ */
+struct irq_domain_chip_generic_info {
+ const char *name;
+ irq_flow_handler_t handler;
+ unsigned int irqs_per_chip;
+ unsigned int num_ct;
+ unsigned int irq_flags_to_clear;
+ unsigned int irq_flags_to_set;
+ enum irq_gc_flags gc_flags;
+ int (*init)(struct irq_chip_generic *gc);
+ void (*exit)(struct irq_chip_generic *gc);
+};
+
/* Generic chip callback functions */
void irq_gc_noop(struct irq_data *d);
void irq_gc_mask_disable_reg(struct irq_data *d);
@@ -1113,6 +1145,7 @@ int irq_gc_set_wake(struct irq_data *d, unsigned int on);
/* Setup functions for irq_chip_generic */
int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hw_irq);
+void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq);
struct irq_chip_generic *
irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base,
void __iomem *reg_base, irq_flow_handler_t handler);
@@ -1133,6 +1166,20 @@ int devm_irq_setup_generic_chip(struct device *dev, struct irq_chip_generic *gc,
struct irq_chip_generic *irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq);
+#ifdef CONFIG_GENERIC_IRQ_CHIP
+int irq_domain_alloc_generic_chips(struct irq_domain *d,
+ const struct irq_domain_chip_generic_info *info);
+void irq_domain_remove_generic_chips(struct irq_domain *d);
+#else
+static inline int
+irq_domain_alloc_generic_chips(struct irq_domain *d,
+ const struct irq_domain_chip_generic_info *info)
+{
+ return -EINVAL;
+}
+static inline void irq_domain_remove_generic_chips(struct irq_domain *d) { }
+#endif /* CONFIG_GENERIC_IRQ_CHIP */
+
int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
int num_ct, const char *name,
irq_flow_handler_t handler,
@@ -1167,31 +1214,6 @@ static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d)
#define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX)
-#ifdef CONFIG_SMP
-static inline void irq_gc_lock(struct irq_chip_generic *gc)
-{
- raw_spin_lock(&gc->lock);
-}
-
-static inline void irq_gc_unlock(struct irq_chip_generic *gc)
-{
- raw_spin_unlock(&gc->lock);
-}
-#else
-static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
-static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
-#endif
-
-/*
- * The irqsave variants are for usage in non interrupt code. Do not use
- * them in irq_chip callbacks. Use irq_gc_lock() instead.
- */
-#define irq_gc_lock_irqsave(gc, flags) \
- raw_spin_lock_irqsave(&(gc)->lock, flags)
-
-#define irq_gc_unlock_irqrestore(gc, flags) \
- raw_spin_unlock_irqrestore(&(gc)->lock, flags)
-
static inline void irq_reg_writel(struct irq_chip_generic *gc,
u32 val, int reg_offset)
{
@@ -1241,6 +1263,9 @@ int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest);
int ipi_send_single(unsigned int virq, unsigned int cpu);
int ipi_send_mask(unsigned int virq, const struct cpumask *dest);
+void ipi_mux_process(void);
+int ipi_mux_create(unsigned int nr_ipi, void (*mux_send)(unsigned int cpu));
+
#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
/*
* Registers a generic IRQ handling function as the top-level IRQ handler in
@@ -1257,6 +1282,7 @@ int __init set_handle_irq(void (*handle_irq)(struct pt_regs *));
* top-level IRQ handler.
*/
extern void (*handle_arch_irq)(struct pt_regs *) __ro_after_init;
+asmlinkage void generic_handle_arch_irq(struct pt_regs *regs);
#else
#ifndef set_handle_irq
#define set_handle_irq(handle_irq) \
diff --git a/include/linux/irq_sim.h b/include/linux/irq_sim.h
index ab831e5ae748..89b4d8ff274b 100644
--- a/include/linux/irq_sim.h
+++ b/include/linux/irq_sim.h
@@ -16,11 +16,28 @@
* requested like normal irqs and enqueued from process context.
*/
+struct irq_sim_ops {
+ int (*irq_sim_irq_requested)(struct irq_domain *domain,
+ irq_hw_number_t hwirq, void *data);
+ void (*irq_sim_irq_released)(struct irq_domain *domain,
+ irq_hw_number_t hwirq, void *data);
+};
+
struct irq_domain *irq_domain_create_sim(struct fwnode_handle *fwnode,
unsigned int num_irqs);
struct irq_domain *devm_irq_domain_create_sim(struct device *dev,
struct fwnode_handle *fwnode,
unsigned int num_irqs);
+struct irq_domain *irq_domain_create_sim_full(struct fwnode_handle *fwnode,
+ unsigned int num_irqs,
+ const struct irq_sim_ops *ops,
+ void *data);
+struct irq_domain *
+devm_irq_domain_create_sim_full(struct device *dev,
+ struct fwnode_handle *fwnode,
+ unsigned int num_irqs,
+ const struct irq_sim_ops *ops,
+ void *data);
void irq_domain_remove_sim(struct irq_domain *domain);
#endif /* _LINUX_IRQ_SIM_H */
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index ec2a47a81e42..c5afd053ae32 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -2,6 +2,8 @@
#ifndef _LINUX_IRQ_WORK_H
#define _LINUX_IRQ_WORK_H
+#include <linux/irq_work_types.h>
+#include <linux/rcuwait.h>
#include <linux/smp_types.h>
/*
@@ -13,14 +15,10 @@
* busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
*/
-struct irq_work {
- struct __call_single_node node;
- void (*func)(struct irq_work *);
-};
-
#define __IRQ_WORK_INIT(_func, _flags) (struct irq_work){ \
.node = { .u_flags = (_flags), }, \
.func = (_func), \
+ .irqwait = __RCUWAIT_INITIALIZER(irqwait), \
}
#define IRQ_WORK_INIT(_func) __IRQ_WORK_INIT(_func, 0)
@@ -46,6 +44,11 @@ static inline bool irq_work_is_busy(struct irq_work *work)
return atomic_read(&work->node.a_flags) & IRQ_WORK_BUSY;
}
+static inline bool irq_work_is_hard(struct irq_work *work)
+{
+ return atomic_read(&work->node.a_flags) & IRQ_WORK_HARD_IRQ;
+}
+
bool irq_work_queue(struct irq_work *work);
bool irq_work_queue_on(struct irq_work *work, int cpu);
@@ -58,6 +61,9 @@ void irq_work_sync(struct irq_work *work);
void irq_work_run(void);
bool irq_work_needs_cpu(void);
void irq_work_single(void *arg);
+
+void arch_irq_work_raise(void);
+
#else
static inline bool irq_work_needs_cpu(void) { return false; }
static inline void irq_work_run(void) { }
diff --git a/include/linux/irq_work_types.h b/include/linux/irq_work_types.h
new file mode 100644
index 000000000000..73abec5bb06e
--- /dev/null
+++ b/include/linux/irq_work_types.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IRQ_WORK_TYPES_H
+#define _LINUX_IRQ_WORK_TYPES_H
+
+#include <linux/smp_types.h>
+#include <linux/types.h>
+
+struct irq_work {
+ struct __call_single_node node;
+ void (*func)(struct irq_work *);
+ struct rcuwait irqwait;
+};
+
+#endif
diff --git a/include/linux/irqbypass.h b/include/linux/irqbypass.h
index 9bdb2a781841..ede1fa938152 100644
--- a/include/linux/irqbypass.h
+++ b/include/linux/irqbypass.h
@@ -10,6 +10,7 @@
#include <linux/list.h>
+struct eventfd_ctx;
struct irq_bypass_consumer;
/*
@@ -18,20 +19,22 @@ struct irq_bypass_consumer;
* The IRQ bypass manager is a simple set of lists and callbacks that allows
* IRQ producers (ex. physical interrupt sources) to be matched to IRQ
* consumers (ex. virtualization hardware that allows IRQ bypass or offload)
- * via a shared token (ex. eventfd_ctx). Producers and consumers register
- * independently. When a token match is found, the optional @stop callback
- * will be called for each participant. The pair will then be connected via
- * the @add_* callbacks, and finally the optional @start callback will allow
- * any final coordination. When either participant is unregistered, the
- * process is repeated using the @del_* callbacks in place of the @add_*
- * callbacks. Match tokens must be unique per producer/consumer, 1:N pairings
- * are not supported.
+ * via a shared eventfd_ctx. Producers and consumers register independently.
+ * When a producer and consumer are paired, i.e. an eventfd match is found, the
+ * optional @stop callback will be called for each participant. The pair will
+ * then be connected via the @add_* callbacks, and finally the optional @start
+ * callback will allow any final coordination. When either participant is
+ * unregistered, the process is repeated using the @del_* callbacks in place of
+ * the @add_* callbacks. eventfds must be unique per producer/consumer, 1:N
+ * pairings are not supported.
*/
+struct irq_bypass_consumer;
+
/**
* struct irq_bypass_producer - IRQ bypass producer definition
- * @node: IRQ bypass manager private list management
- * @token: opaque token to match between producer and consumer (non-NULL)
+ * @eventfd: eventfd context used to match producers and consumers
+ * @consumer: The connected consumer (NULL if no connection)
* @irq: Linux IRQ number for the producer device
* @add_consumer: Connect the IRQ producer to an IRQ consumer (optional)
* @del_consumer: Disconnect the IRQ producer from an IRQ consumer (optional)
@@ -43,8 +46,8 @@ struct irq_bypass_consumer;
* for a physical device assigned to a VM.
*/
struct irq_bypass_producer {
- struct list_head node;
- void *token;
+ struct eventfd_ctx *eventfd;
+ struct irq_bypass_consumer *consumer;
int irq;
int (*add_consumer)(struct irq_bypass_producer *,
struct irq_bypass_consumer *);
@@ -56,8 +59,8 @@ struct irq_bypass_producer {
/**
* struct irq_bypass_consumer - IRQ bypass consumer definition
- * @node: IRQ bypass manager private list management
- * @token: opaque token to match between producer and consumer (non-NULL)
+ * @eventfd: eventfd context used to match producers and consumers
+ * @producer: The connected producer (NULL if no connection)
* @add_producer: Connect the IRQ consumer to an IRQ producer
* @del_producer: Disconnect the IRQ consumer from an IRQ producer
* @stop: Perform any quiesce operations necessary prior to add/del (optional)
@@ -69,8 +72,9 @@ struct irq_bypass_producer {
* portions of the interrupt handling to the VM.
*/
struct irq_bypass_consumer {
- struct list_head node;
- void *token;
+ struct eventfd_ctx *eventfd;
+ struct irq_bypass_producer *producer;
+
int (*add_producer)(struct irq_bypass_consumer *,
struct irq_bypass_producer *);
void (*del_producer)(struct irq_bypass_consumer *,
@@ -79,9 +83,11 @@ struct irq_bypass_consumer {
void (*start)(struct irq_bypass_consumer *);
};
-int irq_bypass_register_producer(struct irq_bypass_producer *);
-void irq_bypass_unregister_producer(struct irq_bypass_producer *);
-int irq_bypass_register_consumer(struct irq_bypass_consumer *);
-void irq_bypass_unregister_consumer(struct irq_bypass_consumer *);
+int irq_bypass_register_producer(struct irq_bypass_producer *producer,
+ struct eventfd_ctx *eventfd, int irq);
+void irq_bypass_unregister_producer(struct irq_bypass_producer *producer);
+int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer,
+ struct eventfd_ctx *eventfd);
+void irq_bypass_unregister_consumer(struct irq_bypass_consumer *consumer);
#endif /* IRQBYPASS_H */
diff --git a/include/linux/irqchip.h b/include/linux/irqchip.h
index 67351aac65ef..bc4ddacd6ddc 100644
--- a/include/linux/irqchip.h
+++ b/include/linux/irqchip.h
@@ -14,8 +14,21 @@
#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/platform_device.h>
+typedef int (*platform_irq_probe_t)(struct platform_device *, struct device_node *);
+
+/* Undefined on purpose */
+extern of_irq_init_cb_t typecheck_irq_init_cb;
+extern platform_irq_probe_t typecheck_irq_probe;
+
+#define typecheck_irq_init_cb(fn) \
+ (__typecheck(typecheck_irq_init_cb, &fn) ? fn : fn)
+
+#define typecheck_irq_probe(fn) \
+ (__typecheck(typecheck_irq_probe, &fn) ? fn : fn)
+
/*
* This macro must be used by the different irqchip drivers to declare
* the association between their DT compatible string and their
@@ -23,29 +36,34 @@
*
* @name: name that must be unique across all IRQCHIP_DECLARE of the
* same file.
- * @compstr: compatible string of the irqchip driver
+ * @compat: compatible string of the irqchip driver
* @fn: initialization function
*/
-#define IRQCHIP_DECLARE(name, compat, fn) OF_DECLARE_2(irqchip, name, compat, fn)
+#define IRQCHIP_DECLARE(name, compat, fn) \
+ OF_DECLARE_2(irqchip, name, compat, typecheck_irq_init_cb(fn))
extern int platform_irqchip_probe(struct platform_device *pdev);
#define IRQCHIP_PLATFORM_DRIVER_BEGIN(drv_name) \
static const struct of_device_id drv_name##_irqchip_match_table[] = {
-#define IRQCHIP_MATCH(compat, fn) { .compatible = compat, .data = fn },
+#define IRQCHIP_MATCH(compat, fn) { .compatible = compat, \
+ .data = typecheck_irq_probe(fn), },
+
-#define IRQCHIP_PLATFORM_DRIVER_END(drv_name) \
+#define IRQCHIP_PLATFORM_DRIVER_END(drv_name, ...) \
{}, \
}; \
MODULE_DEVICE_TABLE(of, drv_name##_irqchip_match_table); \
-static struct platform_driver drv_name##_driver = { \
- .probe = platform_irqchip_probe, \
+static struct platform_driver drv_name##_driver = { \
+ .probe = IS_ENABLED(CONFIG_IRQCHIP) ? \
+ platform_irqchip_probe : NULL, \
.driver = { \
.name = #drv_name, \
.owner = THIS_MODULE, \
.of_match_table = drv_name##_irqchip_match_table, \
.suppress_bind_attrs = true, \
+ __VA_ARGS__ \
}, \
}; \
builtin_platform_driver(drv_name##_driver)
diff --git a/include/linux/irqchip/arm-gic-common.h b/include/linux/irqchip/arm-gic-common.h
index fa8c0455c352..fc0246cc05ac 100644
--- a/include/linux/irqchip/arm-gic-common.h
+++ b/include/linux/irqchip/arm-gic-common.h
@@ -7,36 +7,9 @@
#ifndef __LINUX_IRQCHIP_ARM_GIC_COMMON_H
#define __LINUX_IRQCHIP_ARM_GIC_COMMON_H
-#include <linux/types.h>
-#include <linux/ioport.h>
+#include <linux/irqchip/arm-vgic-info.h>
#define GICD_INT_DEF_PRI 0xa0
-#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\
- (GICD_INT_DEF_PRI << 16) |\
- (GICD_INT_DEF_PRI << 8) |\
- GICD_INT_DEF_PRI)
-
-enum gic_type {
- GIC_V2,
- GIC_V3,
-};
-
-struct gic_kvm_info {
- /* GIC type */
- enum gic_type type;
- /* Virtual CPU interface */
- struct resource vcpu;
- /* Interrupt number */
- unsigned int maint_irq;
- /* Virtual control interface */
- struct resource vctrl;
- /* vlpi support */
- bool has_v4;
- /* rvpeid support */
- bool has_v4_1;
-};
-
-const struct gic_kvm_info *gic_get_kvm_info(void);
struct irq_domain;
struct fwnode_handle;
diff --git a/include/linux/irqchip/arm-gic-v3-prio.h b/include/linux/irqchip/arm-gic-v3-prio.h
new file mode 100644
index 000000000000..44157c9abb78
--- /dev/null
+++ b/include/linux/irqchip/arm-gic-v3-prio.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __LINUX_IRQCHIP_ARM_GIC_V3_PRIO_H
+#define __LINUX_IRQCHIP_ARM_GIC_V3_PRIO_H
+
+/*
+ * GIC priorities from the view of the PMR/RPR.
+ *
+ * These values are chosen to be valid in either the absolute priority space or
+ * the NS view of the priority space. The value programmed into the distributor
+ * and ITS will be chosen at boot time such that these values appear in the
+ * PMR/RPR.
+ *
+ * GICV3_PRIO_UNMASKED is the PMR view of the priority to use to permit both
+ * IRQs and pseudo-NMIs.
+ *
+ * GICV3_PRIO_IRQ is the PMR view of the priority of regular interrupts. This
+ * can be written to the PMR to mask regular IRQs.
+ *
+ * GICV3_PRIO_NMI is the PMR view of the priority of pseudo-NMIs. This can be
+ * written to the PMR to mask pseudo-NMIs.
+ *
+ * On arm64 some code sections either automatically switch back to PSR.I or
+ * explicitly require to not use priority masking. If bit GICV3_PRIO_PSR_I_SET
+ * is included in the priority mask, it indicates that PSR.I should be set and
+ * interrupt disabling temporarily does not rely on IRQ priorities.
+ */
+#define GICV3_PRIO_UNMASKED 0xe0
+#define GICV3_PRIO_IRQ 0xc0
+#define GICV3_PRIO_NMI 0x80
+
+#define GICV3_PRIO_PSR_I_SET (1 << 4)
+
+#ifndef __ASSEMBLER__
+
+#define __gicv3_prio_to_ns(p) (0xff & ((p) << 1))
+#define __gicv3_ns_to_prio(ns) (0x80 | ((ns) >> 1))
+
+#define __gicv3_prio_valid_ns(p) \
+ (__gicv3_ns_to_prio(__gicv3_prio_to_ns(p)) == (p))
+
+static_assert(__gicv3_prio_valid_ns(GICV3_PRIO_NMI));
+static_assert(__gicv3_prio_valid_ns(GICV3_PRIO_IRQ));
+
+static_assert(GICV3_PRIO_NMI < GICV3_PRIO_IRQ);
+static_assert(GICV3_PRIO_IRQ < GICV3_PRIO_UNMASKED);
+
+static_assert(GICV3_PRIO_IRQ < (GICV3_PRIO_IRQ | GICV3_PRIO_PSR_I_SET));
+
+#endif /* __ASSEMBLER */
+
+#endif /* __LINUX_IRQCHIP_ARM_GIC_V3_PRIO_H */
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 81cbf85f73de..70c0948f978e 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -127,6 +127,8 @@
#define GICR_PIDR2 GICD_PIDR2
#define GICR_CTLR_ENABLE_LPIS (1UL << 0)
+#define GICR_CTLR_CES (1UL << 1)
+#define GICR_CTLR_IR (1UL << 2)
#define GICR_CTLR_RWP (1UL << 3)
#define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff)
@@ -615,7 +617,7 @@ struct rdists {
void __iomem *rd_base;
struct page *pend_page;
phys_addr_t phys_base;
- bool lpi_enabled;
+ u64 flags;
cpumask_t *vpe_table_mask;
void *vpe_l1_base;
} __percpu *rdist;
@@ -624,6 +626,7 @@ struct rdists {
u64 flags;
u32 gicd_typer;
u32 gicd_typer2;
+ int cpuhp_memreserve_state;
bool has_vlpis;
bool has_rvpeid;
bool has_direct_lpi;
@@ -632,9 +635,10 @@ struct rdists {
struct irq_domain;
struct fwnode_handle;
+int __init its_lpi_memreserve_init(void);
int its_cpu_init(void);
int its_init(struct fwnode_handle *handle, struct rdists *rdists,
- struct irq_domain *domain);
+ struct irq_domain *domain, u8 irq_prio);
int mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent);
static inline bool gic_enable_sre(void)
diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h
index 2c63375bbd43..0b0887099fd7 100644
--- a/include/linux/irqchip/arm-gic-v4.h
+++ b/include/linux/irqchip/arm-gic-v4.h
@@ -25,6 +25,14 @@ struct its_vm {
irq_hw_number_t db_lpi_base;
unsigned long *db_bitmap;
int nr_db_lpis;
+ /*
+ * Ensures mutual exclusion between updates to vlpi_count[]
+ * and map/unmap when using the ITSList mechanism.
+ *
+ * The lock order for any sequence involving the ITSList is
+ * vmapp_lock -> vpe_lock ->vmovp_lock.
+ */
+ raw_spinlock_t vmapp_lock;
u32 vlpi_count[GICv4_ITS_LIST_MAX];
};
@@ -58,10 +66,12 @@ struct its_vpe {
bool enabled;
bool group;
} sgi_config[16];
- atomic_t vmapp_count;
};
};
+ /* Track the VPE being mapped */
+ atomic_t vmapp_count;
+
/*
* Ensures mutual exclusion between affinity setting of the
* vPE and vLPI operations using vpe->col_idx.
@@ -136,7 +146,7 @@ int its_commit_vpe(struct its_vpe *vpe);
int its_invall_vpe(struct its_vpe *vpe);
int its_map_vlpi(int irq, struct its_vlpi_map *map);
int its_get_vlpi(int irq, struct its_vlpi_map *map);
-int its_unmap_vlpi(int irq);
+void its_unmap_vlpi(int irq);
int its_prop_update_vlpi(int irq, u8 config, bool inv);
int its_prop_update_vsgi(int irq, u8 priority, bool group);
diff --git a/include/linux/irqchip/arm-gic-v5.h b/include/linux/irqchip/arm-gic-v5.h
new file mode 100644
index 000000000000..68ddcdb1cec5
--- /dev/null
+++ b/include/linux/irqchip/arm-gic-v5.h
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2025 ARM Limited, All Rights Reserved.
+ */
+#ifndef __LINUX_IRQCHIP_ARM_GIC_V5_H
+#define __LINUX_IRQCHIP_ARM_GIC_V5_H
+
+#include <linux/iopoll.h>
+
+#include <asm/cacheflush.h>
+#include <asm/smp.h>
+#include <asm/sysreg.h>
+
+#define GICV5_IPIS_PER_CPU MAX_IPI
+
+/*
+ * INTID handling
+ */
+#define GICV5_HWIRQ_ID GENMASK(23, 0)
+#define GICV5_HWIRQ_TYPE GENMASK(31, 29)
+#define GICV5_HWIRQ_INTID GENMASK_ULL(31, 0)
+
+#define GICV5_HWIRQ_TYPE_PPI UL(0x1)
+#define GICV5_HWIRQ_TYPE_LPI UL(0x2)
+#define GICV5_HWIRQ_TYPE_SPI UL(0x3)
+
+/*
+ * Tables attributes
+ */
+#define GICV5_NO_READ_ALLOC 0b0
+#define GICV5_READ_ALLOC 0b1
+#define GICV5_NO_WRITE_ALLOC 0b0
+#define GICV5_WRITE_ALLOC 0b1
+
+#define GICV5_NON_CACHE 0b00
+#define GICV5_WB_CACHE 0b01
+#define GICV5_WT_CACHE 0b10
+
+#define GICV5_NON_SHARE 0b00
+#define GICV5_OUTER_SHARE 0b10
+#define GICV5_INNER_SHARE 0b11
+
+/*
+ * IRS registers and tables structures
+ */
+#define GICV5_IRS_IDR1 0x0004
+#define GICV5_IRS_IDR2 0x0008
+#define GICV5_IRS_IDR5 0x0014
+#define GICV5_IRS_IDR6 0x0018
+#define GICV5_IRS_IDR7 0x001c
+#define GICV5_IRS_CR0 0x0080
+#define GICV5_IRS_CR1 0x0084
+#define GICV5_IRS_SYNCR 0x00c0
+#define GICV5_IRS_SYNC_STATUSR 0x00c4
+#define GICV5_IRS_SPI_SELR 0x0108
+#define GICV5_IRS_SPI_CFGR 0x0114
+#define GICV5_IRS_SPI_STATUSR 0x0118
+#define GICV5_IRS_PE_SELR 0x0140
+#define GICV5_IRS_PE_STATUSR 0x0144
+#define GICV5_IRS_PE_CR0 0x0148
+#define GICV5_IRS_IST_BASER 0x0180
+#define GICV5_IRS_IST_CFGR 0x0190
+#define GICV5_IRS_IST_STATUSR 0x0194
+#define GICV5_IRS_MAP_L2_ISTR 0x01c0
+
+#define GICV5_IRS_IDR1_PRIORITY_BITS GENMASK(22, 20)
+#define GICV5_IRS_IDR1_IAFFID_BITS GENMASK(19, 16)
+
+#define GICV5_IRS_IDR1_PRIORITY_BITS_1BITS 0b000
+#define GICV5_IRS_IDR1_PRIORITY_BITS_2BITS 0b001
+#define GICV5_IRS_IDR1_PRIORITY_BITS_3BITS 0b010
+#define GICV5_IRS_IDR1_PRIORITY_BITS_4BITS 0b011
+#define GICV5_IRS_IDR1_PRIORITY_BITS_5BITS 0b100
+
+#define GICV5_IRS_IDR2_ISTMD_SZ GENMASK(19, 15)
+#define GICV5_IRS_IDR2_ISTMD BIT(14)
+#define GICV5_IRS_IDR2_IST_L2SZ GENMASK(13, 11)
+#define GICV5_IRS_IDR2_IST_LEVELS BIT(10)
+#define GICV5_IRS_IDR2_MIN_LPI_ID_BITS GENMASK(9, 6)
+#define GICV5_IRS_IDR2_LPI BIT(5)
+#define GICV5_IRS_IDR2_ID_BITS GENMASK(4, 0)
+
+#define GICV5_IRS_IDR5_SPI_RANGE GENMASK(24, 0)
+#define GICV5_IRS_IDR6_SPI_IRS_RANGE GENMASK(24, 0)
+#define GICV5_IRS_IDR7_SPI_BASE GENMASK(23, 0)
+
+#define GICV5_IRS_IST_L2SZ_SUPPORT_4KB(r) FIELD_GET(BIT(11), (r))
+#define GICV5_IRS_IST_L2SZ_SUPPORT_16KB(r) FIELD_GET(BIT(12), (r))
+#define GICV5_IRS_IST_L2SZ_SUPPORT_64KB(r) FIELD_GET(BIT(13), (r))
+
+#define GICV5_IRS_CR0_IDLE BIT(1)
+#define GICV5_IRS_CR0_IRSEN BIT(0)
+
+#define GICV5_IRS_CR1_VPED_WA BIT(15)
+#define GICV5_IRS_CR1_VPED_RA BIT(14)
+#define GICV5_IRS_CR1_VMD_WA BIT(13)
+#define GICV5_IRS_CR1_VMD_RA BIT(12)
+#define GICV5_IRS_CR1_VPET_WA BIT(11)
+#define GICV5_IRS_CR1_VPET_RA BIT(10)
+#define GICV5_IRS_CR1_VMT_WA BIT(9)
+#define GICV5_IRS_CR1_VMT_RA BIT(8)
+#define GICV5_IRS_CR1_IST_WA BIT(7)
+#define GICV5_IRS_CR1_IST_RA BIT(6)
+#define GICV5_IRS_CR1_IC GENMASK(5, 4)
+#define GICV5_IRS_CR1_OC GENMASK(3, 2)
+#define GICV5_IRS_CR1_SH GENMASK(1, 0)
+
+#define GICV5_IRS_SYNCR_SYNC BIT(31)
+
+#define GICV5_IRS_SYNC_STATUSR_IDLE BIT(0)
+
+#define GICV5_IRS_SPI_STATUSR_V BIT(1)
+#define GICV5_IRS_SPI_STATUSR_IDLE BIT(0)
+
+#define GICV5_IRS_SPI_SELR_ID GENMASK(23, 0)
+
+#define GICV5_IRS_SPI_CFGR_TM BIT(0)
+
+#define GICV5_IRS_PE_SELR_IAFFID GENMASK(15, 0)
+
+#define GICV5_IRS_PE_STATUSR_V BIT(1)
+#define GICV5_IRS_PE_STATUSR_IDLE BIT(0)
+
+#define GICV5_IRS_PE_CR0_DPS BIT(0)
+
+#define GICV5_IRS_IST_STATUSR_IDLE BIT(0)
+
+#define GICV5_IRS_IST_CFGR_STRUCTURE BIT(16)
+#define GICV5_IRS_IST_CFGR_ISTSZ GENMASK(8, 7)
+#define GICV5_IRS_IST_CFGR_L2SZ GENMASK(6, 5)
+#define GICV5_IRS_IST_CFGR_LPI_ID_BITS GENMASK(4, 0)
+
+#define GICV5_IRS_IST_CFGR_STRUCTURE_LINEAR 0b0
+#define GICV5_IRS_IST_CFGR_STRUCTURE_TWO_LEVEL 0b1
+
+#define GICV5_IRS_IST_CFGR_ISTSZ_4 0b00
+#define GICV5_IRS_IST_CFGR_ISTSZ_8 0b01
+#define GICV5_IRS_IST_CFGR_ISTSZ_16 0b10
+
+#define GICV5_IRS_IST_CFGR_L2SZ_4K 0b00
+#define GICV5_IRS_IST_CFGR_L2SZ_16K 0b01
+#define GICV5_IRS_IST_CFGR_L2SZ_64K 0b10
+
+#define GICV5_IRS_IST_BASER_ADDR_MASK GENMASK_ULL(55, 6)
+#define GICV5_IRS_IST_BASER_VALID BIT_ULL(0)
+
+#define GICV5_IRS_MAP_L2_ISTR_ID GENMASK(23, 0)
+
+#define GICV5_ISTL1E_VALID BIT_ULL(0)
+
+#define GICV5_ISTL1E_L2_ADDR_MASK GENMASK_ULL(55, 12)
+
+/*
+ * ITS registers and tables structures
+ */
+#define GICV5_ITS_IDR1 0x0004
+#define GICV5_ITS_IDR2 0x0008
+#define GICV5_ITS_CR0 0x0080
+#define GICV5_ITS_CR1 0x0084
+#define GICV5_ITS_DT_BASER 0x00c0
+#define GICV5_ITS_DT_CFGR 0x00d0
+#define GICV5_ITS_DIDR 0x0100
+#define GICV5_ITS_EIDR 0x0108
+#define GICV5_ITS_INV_EVENTR 0x010c
+#define GICV5_ITS_INV_DEVICER 0x0110
+#define GICV5_ITS_STATUSR 0x0120
+#define GICV5_ITS_SYNCR 0x0140
+#define GICV5_ITS_SYNC_STATUSR 0x0148
+
+#define GICV5_ITS_IDR1_L2SZ GENMASK(10, 8)
+#define GICV5_ITS_IDR1_ITT_LEVELS BIT(7)
+#define GICV5_ITS_IDR1_DT_LEVELS BIT(6)
+#define GICV5_ITS_IDR1_DEVICEID_BITS GENMASK(5, 0)
+
+#define GICV5_ITS_IDR1_L2SZ_SUPPORT_4KB(r) FIELD_GET(BIT(8), (r))
+#define GICV5_ITS_IDR1_L2SZ_SUPPORT_16KB(r) FIELD_GET(BIT(9), (r))
+#define GICV5_ITS_IDR1_L2SZ_SUPPORT_64KB(r) FIELD_GET(BIT(10), (r))
+
+#define GICV5_ITS_IDR2_XDMN_EVENTs GENMASK(6, 5)
+#define GICV5_ITS_IDR2_EVENTID_BITS GENMASK(4, 0)
+
+#define GICV5_ITS_CR0_IDLE BIT(1)
+#define GICV5_ITS_CR0_ITSEN BIT(0)
+
+#define GICV5_ITS_CR1_ITT_RA BIT(7)
+#define GICV5_ITS_CR1_DT_RA BIT(6)
+#define GICV5_ITS_CR1_IC GENMASK(5, 4)
+#define GICV5_ITS_CR1_OC GENMASK(3, 2)
+#define GICV5_ITS_CR1_SH GENMASK(1, 0)
+
+#define GICV5_ITS_DT_CFGR_STRUCTURE BIT(16)
+#define GICV5_ITS_DT_CFGR_L2SZ GENMASK(7, 6)
+#define GICV5_ITS_DT_CFGR_DEVICEID_BITS GENMASK(5, 0)
+
+#define GICV5_ITS_DT_BASER_ADDR_MASK GENMASK_ULL(55, 3)
+
+#define GICV5_ITS_INV_DEVICER_I BIT(31)
+#define GICV5_ITS_INV_DEVICER_EVENTID_BITS GENMASK(5, 1)
+#define GICV5_ITS_INV_DEVICER_L1 BIT(0)
+
+#define GICV5_ITS_DIDR_DEVICEID GENMASK_ULL(31, 0)
+
+#define GICV5_ITS_EIDR_EVENTID GENMASK(15, 0)
+
+#define GICV5_ITS_INV_EVENTR_I BIT(31)
+#define GICV5_ITS_INV_EVENTR_ITT_L2SZ GENMASK(2, 1)
+#define GICV5_ITS_INV_EVENTR_L1 BIT(0)
+
+#define GICV5_ITS_STATUSR_IDLE BIT(0)
+
+#define GICV5_ITS_SYNCR_SYNC BIT_ULL(63)
+#define GICV5_ITS_SYNCR_SYNCALL BIT_ULL(32)
+#define GICV5_ITS_SYNCR_DEVICEID GENMASK_ULL(31, 0)
+
+#define GICV5_ITS_SYNC_STATUSR_IDLE BIT(0)
+
+#define GICV5_DTL1E_VALID BIT_ULL(0)
+/* Note that there is no shift for the address by design */
+#define GICV5_DTL1E_L2_ADDR_MASK GENMASK_ULL(55, 3)
+#define GICV5_DTL1E_SPAN GENMASK_ULL(63, 60)
+
+#define GICV5_DTL2E_VALID BIT_ULL(0)
+#define GICV5_DTL2E_ITT_L2SZ GENMASK_ULL(2, 1)
+/* Note that there is no shift for the address by design */
+#define GICV5_DTL2E_ITT_ADDR_MASK GENMASK_ULL(55, 3)
+#define GICV5_DTL2E_ITT_DSWE BIT_ULL(57)
+#define GICV5_DTL2E_ITT_STRUCTURE BIT_ULL(58)
+#define GICV5_DTL2E_EVENT_ID_BITS GENMASK_ULL(63, 59)
+
+#define GICV5_ITTL1E_VALID BIT_ULL(0)
+/* Note that there is no shift for the address by design */
+#define GICV5_ITTL1E_L2_ADDR_MASK GENMASK_ULL(55, 3)
+#define GICV5_ITTL1E_SPAN GENMASK_ULL(63, 60)
+
+#define GICV5_ITTL2E_LPI_ID GENMASK_ULL(23, 0)
+#define GICV5_ITTL2E_DAC GENMASK_ULL(29, 28)
+#define GICV5_ITTL2E_VIRTUAL BIT_ULL(30)
+#define GICV5_ITTL2E_VALID BIT_ULL(31)
+#define GICV5_ITTL2E_VM_ID GENMASK_ULL(47, 32)
+
+#define GICV5_ITS_DT_ITT_CFGR_L2SZ_4k 0b00
+#define GICV5_ITS_DT_ITT_CFGR_L2SZ_16k 0b01
+#define GICV5_ITS_DT_ITT_CFGR_L2SZ_64k 0b10
+
+#define GICV5_ITS_DT_ITT_CFGR_STRUCTURE_LINEAR 0
+#define GICV5_ITS_DT_ITT_CFGR_STRUCTURE_TWO_LEVEL 1
+
+#define GICV5_ITS_HWIRQ_DEVICE_ID GENMASK_ULL(31, 0)
+#define GICV5_ITS_HWIRQ_EVENT_ID GENMASK_ULL(63, 32)
+
+/*
+ * IWB registers
+ */
+#define GICV5_IWB_IDR0 0x0000
+#define GICV5_IWB_CR0 0x0080
+#define GICV5_IWB_WENABLE_STATUSR 0x00c0
+#define GICV5_IWB_WENABLER 0x2000
+#define GICV5_IWB_WTMR 0x4000
+
+#define GICV5_IWB_IDR0_INT_DOMS GENMASK(14, 11)
+#define GICV5_IWB_IDR0_IW_RANGE GENMASK(10, 0)
+
+#define GICV5_IWB_CR0_IDLE BIT(1)
+#define GICV5_IWB_CR0_IWBEN BIT(0)
+
+#define GICV5_IWB_WENABLE_STATUSR_IDLE BIT(0)
+
+/*
+ * Global Data structures and functions
+ */
+struct gicv5_chip_data {
+ struct fwnode_handle *fwnode;
+ struct irq_domain *ppi_domain;
+ struct irq_domain *spi_domain;
+ struct irq_domain *lpi_domain;
+ struct irq_domain *ipi_domain;
+ u32 global_spi_count;
+ u8 cpuif_pri_bits;
+ u8 cpuif_id_bits;
+ u8 irs_pri_bits;
+ struct {
+ __le64 *l1ist_addr;
+ u32 l2_size;
+ u8 l2_bits;
+ bool l2;
+ } ist;
+};
+
+extern struct gicv5_chip_data gicv5_global_data __read_mostly;
+
+struct gicv5_irs_chip_data {
+ struct list_head entry;
+ struct fwnode_handle *fwnode;
+ void __iomem *irs_base;
+ u32 flags;
+ u32 spi_min;
+ u32 spi_range;
+ raw_spinlock_t spi_config_lock;
+};
+
+static inline int gicv5_wait_for_op_s_atomic(void __iomem *addr, u32 offset,
+ const char *reg_s, u32 mask,
+ u32 *val)
+{
+ void __iomem *reg = addr + offset;
+ u32 tmp;
+ int ret;
+
+ ret = readl_poll_timeout_atomic(reg, tmp, tmp & mask, 1, 10 * USEC_PER_MSEC);
+ if (unlikely(ret == -ETIMEDOUT)) {
+ pr_err_ratelimited("%s timeout...\n", reg_s);
+ return ret;
+ }
+
+ if (val)
+ *val = tmp;
+
+ return 0;
+}
+
+static inline int gicv5_wait_for_op_s(void __iomem *addr, u32 offset,
+ const char *reg_s, u32 mask)
+{
+ void __iomem *reg = addr + offset;
+ u32 val;
+ int ret;
+
+ ret = readl_poll_timeout(reg, val, val & mask, 1, 10 * USEC_PER_MSEC);
+ if (unlikely(ret == -ETIMEDOUT)) {
+ pr_err_ratelimited("%s timeout...\n", reg_s);
+ return ret;
+ }
+
+ return 0;
+}
+
+#define gicv5_wait_for_op_atomic(base, reg, mask, val) \
+ gicv5_wait_for_op_s_atomic(base, reg, #reg, mask, val)
+
+#define gicv5_wait_for_op(base, reg, mask) \
+ gicv5_wait_for_op_s(base, reg, #reg, mask)
+
+void __init gicv5_init_lpi_domain(void);
+void __init gicv5_free_lpi_domain(void);
+
+int gicv5_irs_of_probe(struct device_node *parent);
+void gicv5_irs_remove(void);
+int gicv5_irs_enable(void);
+void gicv5_irs_its_probe(void);
+int gicv5_irs_register_cpu(int cpuid);
+int gicv5_irs_cpu_to_iaffid(int cpu_id, u16 *iaffid);
+struct gicv5_irs_chip_data *gicv5_irs_lookup_by_spi_id(u32 spi_id);
+int gicv5_spi_irq_set_type(struct irq_data *d, unsigned int type);
+int gicv5_irs_iste_alloc(u32 lpi);
+void gicv5_irs_syncr(void);
+
+struct gicv5_its_devtab_cfg {
+ union {
+ struct {
+ __le64 *devtab;
+ } linear;
+ struct {
+ __le64 *l1devtab;
+ __le64 **l2ptrs;
+ } l2;
+ };
+ u32 cfgr;
+};
+
+struct gicv5_its_itt_cfg {
+ union {
+ struct {
+ __le64 *itt;
+ unsigned int num_ents;
+ } linear;
+ struct {
+ __le64 *l1itt;
+ __le64 **l2ptrs;
+ unsigned int num_l1_ents;
+ u8 l2sz;
+ } l2;
+ };
+ u8 event_id_bits;
+ bool l2itt;
+};
+
+void gicv5_init_lpis(u32 max);
+void gicv5_deinit_lpis(void);
+
+int gicv5_alloc_lpi(void);
+void gicv5_free_lpi(u32 lpi);
+
+void __init gicv5_its_of_probe(struct device_node *parent);
+#endif
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 5686711b0f40..d45fa19f9e47 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -86,7 +86,13 @@
#define GICH_HCR_EN (1 << 0)
#define GICH_HCR_UIE (1 << 1)
+#define GICH_HCR_LRENPIE (1 << 2)
#define GICH_HCR_NPIE (1 << 3)
+#define GICH_HCR_VGrp0EIE (1 << 4)
+#define GICH_HCR_VGrp0DIE (1 << 5)
+#define GICH_HCR_VGrp1EIE (1 << 6)
+#define GICH_HCR_VGrp1DIE (1 << 7)
+#define GICH_HCR_EOICOUNT GENMASK(31, 27)
#define GICH_LR_VIRTUALID (0x3ff << 0)
#define GICH_LR_PHYSID_CPUID_SHIFT (10)
@@ -151,12 +157,6 @@ int gic_of_init(struct device_node *node, struct device_node *parent);
*/
int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq);
-/*
- * Legacy platforms not converted to DT yet must use this to init
- * their GIC
- */
-void gic_init(void __iomem *dist , void __iomem *cpu);
-
void gic_send_sgi(unsigned int cpu_id, unsigned int irq);
int gic_get_cpu_id(unsigned int cpu);
void gic_migrate_target(unsigned int new_cpu_id);
diff --git a/include/linux/irqchip/arm-vgic-info.h b/include/linux/irqchip/arm-vgic-info.h
new file mode 100644
index 000000000000..67d9d960273b
--- /dev/null
+++ b/include/linux/irqchip/arm-vgic-info.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * include/linux/irqchip/arm-vgic-info.h
+ *
+ * Copyright (C) 2016 ARM Limited, All Rights Reserved.
+ */
+#ifndef __LINUX_IRQCHIP_ARM_VGIC_INFO_H
+#define __LINUX_IRQCHIP_ARM_VGIC_INFO_H
+
+#include <linux/types.h>
+#include <linux/ioport.h>
+
+enum gic_type {
+ /* Full GICv2 */
+ GIC_V2,
+ /* Full GICv3, optionally with v2 compat */
+ GIC_V3,
+ /* Full GICv5, optionally with v3 compat */
+ GIC_V5,
+};
+
+struct gic_kvm_info {
+ /* GIC type */
+ enum gic_type type;
+ /* Virtual CPU interface */
+ struct resource vcpu;
+ /* GICv2 GICC VA */
+ void __iomem *gicc_base;
+ /* Interrupt number */
+ unsigned int maint_irq;
+ /* No interrupt mask, no need to use the above field */
+ bool no_maint_irq_mask;
+ /* Virtual control interface */
+ struct resource vctrl;
+ /* vlpi support */
+ bool has_v4;
+ /* rvpeid support */
+ bool has_v4_1;
+ /* Deactivation impared, subpar stuff */
+ bool no_hw_deactivation;
+};
+
+#ifdef CONFIG_KVM
+void vgic_set_kvm_info(const struct gic_kvm_info *info);
+#else
+static inline void vgic_set_kvm_info(const struct gic_kvm_info *info) {}
+#endif
+
+#endif
diff --git a/include/linux/irqchip/irq-davinci-aintc.h b/include/linux/irqchip/irq-davinci-aintc.h
deleted file mode 100644
index ea4e087fac98..000000000000
--- a/include/linux/irqchip/irq-davinci-aintc.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2019 Texas Instruments
- */
-
-#ifndef _LINUX_IRQ_DAVINCI_AINTC_
-#define _LINUX_IRQ_DAVINCI_AINTC_
-
-#include <linux/ioport.h>
-
-/**
- * struct davinci_aintc_config - configuration data for davinci-aintc driver.
- *
- * @reg: register range to map
- * @num_irqs: number of HW interrupts supported by the controller
- * @prios: an array of size num_irqs containing priority settings for
- * each interrupt
- */
-struct davinci_aintc_config {
- struct resource reg;
- unsigned int num_irqs;
- u8 *prios;
-};
-
-void davinci_aintc_init(const struct davinci_aintc_config *config);
-
-#endif /* _LINUX_IRQ_DAVINCI_AINTC_ */
diff --git a/include/linux/irqchip/irq-davinci-cp-intc.h b/include/linux/irqchip/irq-davinci-cp-intc.h
deleted file mode 100644
index 8d71ed5b5a61..000000000000
--- a/include/linux/irqchip/irq-davinci-cp-intc.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2019 Texas Instruments
- */
-
-#ifndef _LINUX_IRQ_DAVINCI_CP_INTC_
-#define _LINUX_IRQ_DAVINCI_CP_INTC_
-
-#include <linux/ioport.h>
-
-/**
- * struct davinci_cp_intc_config - configuration data for davinci-cp-intc
- * driver.
- *
- * @reg: register range to map
- * @num_irqs: number of HW interrupts supported by the controller
- */
-struct davinci_cp_intc_config {
- struct resource reg;
- unsigned int num_irqs;
-};
-
-int davinci_cp_intc_init(const struct davinci_cp_intc_config *config);
-
-#endif /* _LINUX_IRQ_DAVINCI_CP_INTC_ */
diff --git a/include/linux/irqchip/irq-ixp4xx.h b/include/linux/irqchip/irq-ixp4xx.h
deleted file mode 100644
index 9395917d6936..000000000000
--- a/include/linux/irqchip/irq-ixp4xx.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __IRQ_IXP4XX_H
-#define __IRQ_IXP4XX_H
-
-#include <linux/ioport.h>
-struct irq_domain;
-
-void ixp4xx_irq_init(resource_size_t irqbase,
- bool is_356);
-struct irq_domain *ixp4xx_get_irq_domain(void);
-
-#endif /* __IRQ_IXP4XX_H */
diff --git a/include/linux/irqchip/irq-msi-lib.h b/include/linux/irqchip/irq-msi-lib.h
new file mode 100644
index 000000000000..224ac28e88d7
--- /dev/null
+++ b/include/linux/irqchip/irq-msi-lib.h
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2022 Linutronix GmbH
+// Copyright (C) 2022 Intel
+
+#ifndef _IRQCHIP_IRQ_MSI_LIB_H
+#define _IRQCHIP_IRQ_MSI_LIB_H
+
+#include <linux/bits.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+
+#ifdef CONFIG_PCI_MSI
+#define MATCH_PCI_MSI BIT(DOMAIN_BUS_PCI_MSI)
+#else
+#define MATCH_PCI_MSI (0)
+#endif
+
+#define MATCH_PLATFORM_MSI BIT(DOMAIN_BUS_PLATFORM_MSI)
+
+struct msi_domain_info;
+int msi_lib_irq_domain_select(struct irq_domain *d, struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token);
+
+bool msi_lib_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *real_parent,
+ struct msi_domain_info *info);
+
+#endif /* _IRQCHIP_IRQ_MSI_LIB_H */
diff --git a/include/linux/irqchip/irq-partition-percpu.h b/include/linux/irqchip/irq-partition-percpu.h
deleted file mode 100644
index 2f6ae7551748..000000000000
--- a/include/linux/irqchip/irq-partition-percpu.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2016 ARM Limited, All Rights Reserved.
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- */
-
-#ifndef __LINUX_IRQCHIP_IRQ_PARTITION_PERCPU_H
-#define __LINUX_IRQCHIP_IRQ_PARTITION_PERCPU_H
-
-#include <linux/fwnode.h>
-#include <linux/cpumask.h>
-#include <linux/irqdomain.h>
-
-struct partition_affinity {
- cpumask_t mask;
- void *partition_id;
-};
-
-struct partition_desc;
-
-#ifdef CONFIG_PARTITION_PERCPU
-int partition_translate_id(struct partition_desc *desc, void *partition_id);
-struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode,
- struct partition_affinity *parts,
- int nr_parts,
- int chained_irq,
- const struct irq_domain_ops *ops);
-struct irq_domain *partition_get_domain(struct partition_desc *dsc);
-#else
-static inline int partition_translate_id(struct partition_desc *desc,
- void *partition_id)
-{
- return -EINVAL;
-}
-
-static inline
-struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode,
- struct partition_affinity *parts,
- int nr_parts,
- int chained_irq,
- const struct irq_domain_ops *ops)
-{
- return NULL;
-}
-
-static inline
-struct irq_domain *partition_get_domain(struct partition_desc *dsc)
-{
- return NULL;
-}
-#endif
-
-#endif /* __LINUX_IRQCHIP_IRQ_PARTITION_PERCPU_H */
diff --git a/include/linux/irqchip/irq-renesas-rzv2h.h b/include/linux/irqchip/irq-renesas-rzv2h.h
new file mode 100644
index 000000000000..618a60d2eac0
--- /dev/null
+++ b/include/linux/irqchip/irq-renesas-rzv2h.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Renesas RZ/V2H(P) Interrupt Control Unit (ICU)
+ *
+ * Copyright (C) 2025 Renesas Electronics Corporation.
+ */
+
+#ifndef __LINUX_IRQ_RENESAS_RZV2H
+#define __LINUX_IRQ_RENESAS_RZV2H
+
+#include <linux/platform_device.h>
+
+#define RZV2H_ICU_DMAC_REQ_NO_DEFAULT 0x3ff
+
+#ifdef CONFIG_RENESAS_RZV2H_ICU
+void rzv2h_icu_register_dma_req(struct platform_device *icu_dev, u8 dmac_index, u8 dmac_channel,
+ u16 req_no);
+#else
+static inline void rzv2h_icu_register_dma_req(struct platform_device *icu_dev, u8 dmac_index,
+ u8 dmac_channel, u16 req_no) { }
+#endif
+
+#endif /* __LINUX_IRQ_RENESAS_RZV2H */
diff --git a/include/linux/irqchip/mmp.h b/include/linux/irqchip/mmp.h
deleted file mode 100644
index cb8455c87c8a..000000000000
--- a/include/linux/irqchip/mmp.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __IRQCHIP_MMP_H
-#define __IRQCHIP_MMP_H
-
-extern struct irq_chip icu_irq_chip;
-
-#endif /* __IRQCHIP_MMP_H */
diff --git a/include/linux/irqchip/mxs.h b/include/linux/irqchip/mxs.h
deleted file mode 100644
index 4f447e3f0f3a..000000000000
--- a/include/linux/irqchip/mxs.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2013 Freescale Semiconductor, Inc.
- */
-
-#ifndef __LINUX_IRQCHIP_MXS_H
-#define __LINUX_IRQCHIP_MXS_H
-
-extern void icoll_handle_irq(struct pt_regs *);
-
-#endif
diff --git a/include/linux/irqchip/riscv-aplic.h b/include/linux/irqchip/riscv-aplic.h
new file mode 100644
index 000000000000..ec8f7df50583
--- /dev/null
+++ b/include/linux/irqchip/riscv-aplic.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+#ifndef __LINUX_IRQCHIP_RISCV_APLIC_H
+#define __LINUX_IRQCHIP_RISCV_APLIC_H
+
+#include <linux/bitops.h>
+
+#define APLIC_MAX_IDC BIT(14)
+#define APLIC_MAX_SOURCE 1024
+
+#define APLIC_DOMAINCFG 0x0000
+#define APLIC_DOMAINCFG_RDONLY 0x80000000
+#define APLIC_DOMAINCFG_IE BIT(8)
+#define APLIC_DOMAINCFG_DM BIT(2)
+#define APLIC_DOMAINCFG_BE BIT(0)
+
+#define APLIC_SOURCECFG_BASE 0x0004
+#define APLIC_SOURCECFG_D BIT(10)
+#define APLIC_SOURCECFG_CHILDIDX_MASK 0x000003ff
+#define APLIC_SOURCECFG_SM_MASK 0x00000007
+#define APLIC_SOURCECFG_SM_INACTIVE 0x0
+#define APLIC_SOURCECFG_SM_DETACH 0x1
+#define APLIC_SOURCECFG_SM_EDGE_RISE 0x4
+#define APLIC_SOURCECFG_SM_EDGE_FALL 0x5
+#define APLIC_SOURCECFG_SM_LEVEL_HIGH 0x6
+#define APLIC_SOURCECFG_SM_LEVEL_LOW 0x7
+
+#define APLIC_MMSICFGADDR 0x1bc0
+#define APLIC_MMSICFGADDRH 0x1bc4
+#define APLIC_SMSICFGADDR 0x1bc8
+#define APLIC_SMSICFGADDRH 0x1bcc
+
+#ifdef CONFIG_RISCV_M_MODE
+#define APLIC_xMSICFGADDR APLIC_MMSICFGADDR
+#define APLIC_xMSICFGADDRH APLIC_MMSICFGADDRH
+#else
+#define APLIC_xMSICFGADDR APLIC_SMSICFGADDR
+#define APLIC_xMSICFGADDRH APLIC_SMSICFGADDRH
+#endif
+
+#define APLIC_xMSICFGADDRH_L BIT(31)
+#define APLIC_xMSICFGADDRH_HHXS_MASK 0x1f
+#define APLIC_xMSICFGADDRH_HHXS_SHIFT 24
+#define APLIC_xMSICFGADDRH_HHXS (APLIC_xMSICFGADDRH_HHXS_MASK << \
+ APLIC_xMSICFGADDRH_HHXS_SHIFT)
+#define APLIC_xMSICFGADDRH_LHXS_MASK 0x7
+#define APLIC_xMSICFGADDRH_LHXS_SHIFT 20
+#define APLIC_xMSICFGADDRH_LHXS (APLIC_xMSICFGADDRH_LHXS_MASK << \
+ APLIC_xMSICFGADDRH_LHXS_SHIFT)
+#define APLIC_xMSICFGADDRH_HHXW_MASK 0x7
+#define APLIC_xMSICFGADDRH_HHXW_SHIFT 16
+#define APLIC_xMSICFGADDRH_HHXW (APLIC_xMSICFGADDRH_HHXW_MASK << \
+ APLIC_xMSICFGADDRH_HHXW_SHIFT)
+#define APLIC_xMSICFGADDRH_LHXW_MASK 0xf
+#define APLIC_xMSICFGADDRH_LHXW_SHIFT 12
+#define APLIC_xMSICFGADDRH_LHXW (APLIC_xMSICFGADDRH_LHXW_MASK << \
+ APLIC_xMSICFGADDRH_LHXW_SHIFT)
+#define APLIC_xMSICFGADDRH_BAPPN_MASK 0xfff
+#define APLIC_xMSICFGADDRH_BAPPN_SHIFT 0
+#define APLIC_xMSICFGADDRH_BAPPN (APLIC_xMSICFGADDRH_BAPPN_MASK << \
+ APLIC_xMSICFGADDRH_BAPPN_SHIFT)
+
+#define APLIC_xMSICFGADDR_PPN_SHIFT 12
+
+#define APLIC_xMSICFGADDR_PPN_HART(__lhxs) \
+ (BIT(__lhxs) - 1)
+
+#define APLIC_xMSICFGADDR_PPN_LHX_MASK(__lhxw) \
+ (BIT(__lhxw) - 1)
+#define APLIC_xMSICFGADDR_PPN_LHX_SHIFT(__lhxs) \
+ ((__lhxs))
+#define APLIC_xMSICFGADDR_PPN_LHX(__lhxw, __lhxs) \
+ (APLIC_xMSICFGADDR_PPN_LHX_MASK(__lhxw) << \
+ APLIC_xMSICFGADDR_PPN_LHX_SHIFT(__lhxs))
+
+#define APLIC_xMSICFGADDR_PPN_HHX_MASK(__hhxw) \
+ (BIT(__hhxw) - 1)
+#define APLIC_xMSICFGADDR_PPN_HHX_SHIFT(__hhxs) \
+ ((__hhxs) + APLIC_xMSICFGADDR_PPN_SHIFT)
+#define APLIC_xMSICFGADDR_PPN_HHX(__hhxw, __hhxs) \
+ (APLIC_xMSICFGADDR_PPN_HHX_MASK(__hhxw) << \
+ APLIC_xMSICFGADDR_PPN_HHX_SHIFT(__hhxs))
+
+#define APLIC_IRQBITS_PER_REG 32
+
+#define APLIC_SETIP_BASE 0x1c00
+#define APLIC_SETIPNUM 0x1cdc
+
+#define APLIC_CLRIP_BASE 0x1d00
+#define APLIC_CLRIPNUM 0x1ddc
+
+#define APLIC_SETIE_BASE 0x1e00
+#define APLIC_SETIENUM 0x1edc
+
+#define APLIC_CLRIE_BASE 0x1f00
+#define APLIC_CLRIENUM 0x1fdc
+
+#define APLIC_SETIPNUM_LE 0x2000
+#define APLIC_SETIPNUM_BE 0x2004
+
+#define APLIC_GENMSI 0x3000
+
+#define APLIC_TARGET_BASE 0x3004
+#define APLIC_TARGET_HART_IDX_SHIFT 18
+#define APLIC_TARGET_HART_IDX_MASK 0x3fff
+#define APLIC_TARGET_HART_IDX (APLIC_TARGET_HART_IDX_MASK << \
+ APLIC_TARGET_HART_IDX_SHIFT)
+#define APLIC_TARGET_GUEST_IDX_SHIFT 12
+#define APLIC_TARGET_GUEST_IDX_MASK 0x3f
+#define APLIC_TARGET_GUEST_IDX (APLIC_TARGET_GUEST_IDX_MASK << \
+ APLIC_TARGET_GUEST_IDX_SHIFT)
+#define APLIC_TARGET_IPRIO_SHIFT 0
+#define APLIC_TARGET_IPRIO_MASK 0xff
+#define APLIC_TARGET_IPRIO (APLIC_TARGET_IPRIO_MASK << \
+ APLIC_TARGET_IPRIO_SHIFT)
+#define APLIC_TARGET_EIID_SHIFT 0
+#define APLIC_TARGET_EIID_MASK 0x7ff
+#define APLIC_TARGET_EIID (APLIC_TARGET_EIID_MASK << \
+ APLIC_TARGET_EIID_SHIFT)
+
+#define APLIC_IDC_BASE 0x4000
+#define APLIC_IDC_SIZE 32
+
+#define APLIC_IDC_IDELIVERY 0x00
+
+#define APLIC_IDC_IFORCE 0x04
+
+#define APLIC_IDC_ITHRESHOLD 0x08
+
+#define APLIC_IDC_TOPI 0x18
+#define APLIC_IDC_TOPI_ID_SHIFT 16
+#define APLIC_IDC_TOPI_ID_MASK 0x3ff
+#define APLIC_IDC_TOPI_ID (APLIC_IDC_TOPI_ID_MASK << \
+ APLIC_IDC_TOPI_ID_SHIFT)
+#define APLIC_IDC_TOPI_PRIO_SHIFT 0
+#define APLIC_IDC_TOPI_PRIO_MASK 0xff
+#define APLIC_IDC_TOPI_PRIO (APLIC_IDC_TOPI_PRIO_MASK << \
+ APLIC_IDC_TOPI_PRIO_SHIFT)
+
+#define APLIC_IDC_CLAIMI 0x1c
+
+#endif
diff --git a/include/linux/irqchip/riscv-imsic.h b/include/linux/irqchip/riscv-imsic.h
new file mode 100644
index 000000000000..7f3ff5c5ea53
--- /dev/null
+++ b/include/linux/irqchip/riscv-imsic.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+#ifndef __LINUX_IRQCHIP_RISCV_IMSIC_H
+#define __LINUX_IRQCHIP_RISCV_IMSIC_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/fwnode.h>
+
+#define IMSIC_MMIO_PAGE_SHIFT 12
+#define IMSIC_MMIO_PAGE_SZ BIT(IMSIC_MMIO_PAGE_SHIFT)
+#define IMSIC_MMIO_PAGE_LE 0x00
+#define IMSIC_MMIO_PAGE_BE 0x04
+
+#define IMSIC_MIN_ID 63
+#define IMSIC_MAX_ID 2048
+
+#define IMSIC_EIDELIVERY 0x70
+
+#define IMSIC_EITHRESHOLD 0x72
+
+#define IMSIC_EIP0 0x80
+#define IMSIC_EIP63 0xbf
+#define IMSIC_EIPx_BITS 32
+
+#define IMSIC_EIE0 0xc0
+#define IMSIC_EIE63 0xff
+#define IMSIC_EIEx_BITS 32
+
+#define IMSIC_FIRST IMSIC_EIDELIVERY
+#define IMSIC_LAST IMSIC_EIE63
+
+#define IMSIC_MMIO_SETIPNUM_LE 0x00
+#define IMSIC_MMIO_SETIPNUM_BE 0x04
+
+struct imsic_local_config {
+ phys_addr_t msi_pa;
+ void __iomem *msi_va;
+};
+
+struct imsic_global_config {
+ /*
+ * MSI Target Address Scheme
+ *
+ * XLEN-1 12 0
+ * | | |
+ * -------------------------------------------------------------
+ * |xxxxxx|Group Index|xxxxxxxxxxx|HART Index|Guest Index| 0 |
+ * -------------------------------------------------------------
+ */
+
+ /* Bits representing Guest index, HART index, and Group index */
+ u32 guest_index_bits;
+ u32 hart_index_bits;
+ u32 group_index_bits;
+ u32 group_index_shift;
+
+ /* Global base address matching all target MSI addresses */
+ phys_addr_t base_addr;
+
+ /* Number of interrupt identities */
+ u32 nr_ids;
+
+ /* Number of guest interrupt identities */
+ u32 nr_guest_ids;
+
+ /* Per-CPU IMSIC addresses */
+ struct imsic_local_config __percpu *local;
+};
+
+#ifdef CONFIG_RISCV_IMSIC
+
+const struct imsic_global_config *imsic_get_global_config(void);
+
+#else
+
+static inline const struct imsic_global_config *imsic_get_global_config(void)
+{
+ return NULL;
+}
+
+#endif
+
+#if IS_ENABLED(CONFIG_ACPI) && IS_ENABLED(CONFIG_RISCV_IMSIC)
+int imsic_platform_acpi_probe(struct fwnode_handle *fwnode);
+struct fwnode_handle *imsic_acpi_get_fwnode(struct device *dev);
+#else
+static inline struct fwnode_handle *imsic_acpi_get_fwnode(struct device *dev) { return NULL; }
+#endif
+
+#endif
diff --git a/include/linux/irqchip/versatile-fpga.h b/include/linux/irqchip/versatile-fpga.h
deleted file mode 100644
index a978fc8c7996..000000000000
--- a/include/linux/irqchip/versatile-fpga.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef PLAT_FPGA_IRQ_H
-#define PLAT_FPGA_IRQ_H
-
-struct device_node;
-struct pt_regs;
-
-void fpga_handle_irq(struct pt_regs *regs);
-void fpga_irq_init(void __iomem *, const char *, int, int, u32,
- struct device_node *node);
-int fpga_irq_of_init(struct device_node *node,
- struct device_node *parent);
-
-#endif
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index df4651250785..37e0b5b5600a 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -18,6 +18,18 @@ struct irq_domain;
struct pt_regs;
/**
+ * struct irqstat - interrupt statistics
+ * @cnt: real-time interrupt count
+ * @ref: snapshot of interrupt count
+ */
+struct irqstat {
+ unsigned int cnt;
+#ifdef CONFIG_GENERIC_IRQ_STAT_SNAPSHOT
+ unsigned int ref;
+#endif
+};
+
+/**
* struct irq_desc - interrupt descriptor
* @irq_common_data: per irq and chip data passed down to chip functions
* @kstat_irqs: irq stats per cpu
@@ -55,7 +67,7 @@ struct pt_regs;
struct irq_desc {
struct irq_common_data irq_common_data;
struct irq_data irq_data;
- unsigned int __percpu *kstat_irqs;
+ struct irqstat __percpu *kstat_irqs;
irq_flow_handler_t handle_irq;
struct irqaction *action; /* IRQ action list */
unsigned int status_use_accessors;
@@ -70,7 +82,6 @@ struct irq_desc {
int threads_handled_last;
raw_spinlock_t lock;
struct cpumask *percpu_enabled;
- const struct cpumask *percpu_affinity;
#ifdef CONFIG_SMP
const struct cpumask *affinity_hint;
struct irq_affinity_notify *affinity_notify;
@@ -102,6 +113,9 @@ struct irq_desc {
int parent_irq;
struct module *owner;
const char *name;
+#ifdef CONFIG_HARDIRQS_SW_RESEND
+ struct hlist_node resend_node;
+#endif
} ____cacheline_internodealigned_in_smp;
#ifdef CONFIG_SPARSE_IRQ
@@ -116,7 +130,7 @@ extern struct irq_desc irq_desc[NR_IRQS];
static inline unsigned int irq_desc_kstat_cpu(struct irq_desc *desc,
unsigned int cpu)
{
- return desc->kstat_irqs ? *per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
+ return desc->kstat_irqs ? per_cpu(desc->kstat_irqs->cnt, cpu) : 0;
}
static inline struct irq_desc *irq_data_to_desc(struct irq_data *data)
@@ -158,28 +172,19 @@ static inline void generic_handle_irq_desc(struct irq_desc *desc)
desc->handle_irq(desc);
}
+int handle_irq_desc(struct irq_desc *desc);
int generic_handle_irq(unsigned int irq);
+int generic_handle_irq_safe(unsigned int irq);
-#ifdef CONFIG_HANDLE_DOMAIN_IRQ
+#ifdef CONFIG_IRQ_DOMAIN
/*
* Convert a HW interrupt number to a logical one using a IRQ domain,
* and handle the result interrupt number. Return -EINVAL if
- * conversion failed. Providing a NULL domain indicates that the
- * conversion has already been done.
+ * conversion failed.
*/
-int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
- bool lookup, struct pt_regs *regs);
-
-static inline int handle_domain_irq(struct irq_domain *domain,
- unsigned int hwirq, struct pt_regs *regs)
-{
- return __handle_domain_irq(domain, hwirq, true, regs);
-}
-
-#ifdef CONFIG_IRQ_DOMAIN
-int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq,
- struct pt_regs *regs);
-#endif
+int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq);
+int generic_handle_domain_irq_safe(struct irq_domain *domain, unsigned int hwirq);
+int generic_handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq);
#endif
/* Test to see if a driver has successfully requested an irq */
@@ -219,14 +224,15 @@ static inline void irq_set_handler_locked(struct irq_data *data,
* Must be called with irq_desc locked and valid parameters.
*/
static inline void
-irq_set_chip_handler_name_locked(struct irq_data *data, struct irq_chip *chip,
+irq_set_chip_handler_name_locked(struct irq_data *data,
+ const struct irq_chip *chip,
irq_flow_handler_t handler, const char *name)
{
struct irq_desc *desc = irq_data_to_desc(data);
desc->handle_irq = handler;
desc->name = name;
- data->chip = chip;
+ data->chip = (struct irq_chip *)chip;
}
bool irq_check_status_bit(unsigned int irq, unsigned int bitmask);
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 62a8e3d23829..952d3c8dd6b7 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -1,36 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * irq_domain - IRQ translation domains
+ * irq_domain - IRQ Translation Domains
*
- * Translation infrastructure between hw and linux irq numbers. This is
- * helpful for interrupt controllers to implement mapping between hardware
- * irq numbers and the Linux irq number space.
- *
- * irq_domains also have hooks for translating device tree or other
- * firmware interrupt representations into a hardware irq number that
- * can be mapped back to a Linux irq number without any extra platform
- * support code.
- *
- * Interrupt controller "domain" data structure. This could be defined as a
- * irq domain controller. That is, it handles the mapping between hardware
- * and virtual interrupt numbers for a given interrupt domain. The domain
- * structure is generally created by the PIC code for a given PIC instance
- * (though a domain can cover more than one PIC if they have a flat number
- * model). It's the domain callbacks that are responsible for setting the
- * irq_chip on a given irq_desc after it's been mapped.
- *
- * The host code and data structures use a fwnode_handle pointer to
- * identify the domain. In some cases, and in order to preserve source
- * code compatibility, this fwnode pointer is "upgraded" to a DT
- * device_node. For those firmware infrastructures that do not provide
- * a unique identifier for an interrupt controller, the irq_domain
- * code offers a fwnode allocator.
+ * See Documentation/core-api/irq/irq-domain.rst for the details.
*/
#ifndef _LINUX_IRQDOMAIN_H
#define _LINUX_IRQDOMAIN_H
#include <linux/types.h>
+#include <linux/irqdomain_defs.h>
#include <linux/irqhandler.h>
#include <linux/of.h>
#include <linux/mutex.h>
@@ -41,12 +20,11 @@ struct fwnode_handle;
struct irq_domain;
struct irq_chip;
struct irq_data;
+struct irq_desc;
struct cpumask;
struct seq_file;
struct irq_affinity_desc;
-
-/* Number of irqs reserved for a legacy isa controller */
-#define NUM_ISA_INTERRUPTS 16
+struct msi_parent_ops;
#define IRQ_DOMAIN_IRQ_SPEC_PARAMS 16
@@ -61,41 +39,57 @@ struct irq_affinity_desc;
* pass a device-specific description of an interrupt.
*/
struct irq_fwspec {
- struct fwnode_handle *fwnode;
- int param_count;
- u32 param[IRQ_DOMAIN_IRQ_SPEC_PARAMS];
+ struct fwnode_handle *fwnode;
+ int param_count;
+ u32 param[IRQ_DOMAIN_IRQ_SPEC_PARAMS];
};
-/*
- * Should several domains have the same device node, but serve
- * different purposes (for example one domain is for PCI/MSI, and the
- * other for wired IRQs), they can be distinguished using a
- * bus-specific token. Most domains are expected to only carry
- * DOMAIN_BUS_ANY.
+/**
+ * struct irq_fwspec_info - firmware provided IRQ information structure
+ *
+ * @flags: Information validity flags
+ * @affinity: Affinity mask for this interrupt
+ *
+ * This structure reports firmware-specific information about an
+ * interrupt. The only significant information is the affinity of a
+ * per-CPU interrupt, but this is designed to be extended as required.
*/
-enum irq_domain_bus_token {
- DOMAIN_BUS_ANY = 0,
- DOMAIN_BUS_WIRED,
- DOMAIN_BUS_GENERIC_MSI,
- DOMAIN_BUS_PCI_MSI,
- DOMAIN_BUS_PLATFORM_MSI,
- DOMAIN_BUS_NEXUS,
- DOMAIN_BUS_IPI,
- DOMAIN_BUS_FSL_MC_MSI,
- DOMAIN_BUS_TI_SCI_INTA_MSI,
- DOMAIN_BUS_WAKEUP,
- DOMAIN_BUS_VMD_MSI,
+struct irq_fwspec_info {
+ unsigned long flags;
+ const struct cpumask *affinity;
};
+#define IRQ_FWSPEC_INFO_AFFINITY_VALID BIT(0)
+
+/* Conversion function from of_phandle_args fields to fwspec */
+void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args,
+ unsigned int count, struct irq_fwspec *fwspec);
+
/**
* struct irq_domain_ops - Methods for irq_domain objects
- * @match: Match an interrupt controller device node to a host, returns
- * 1 on a match
- * @map: Create or update a mapping between a virtual irq number and a hw
- * irq number. This is called only once for a given mapping.
- * @unmap: Dispose of such a mapping
- * @xlate: Given a device tree node and interrupt specifier, decode
- * the hardware irq number and linux irq type value.
+ * @match: Match an interrupt controller device node to a domain, returns
+ * 1 on a match
+ * @select: Match an interrupt controller fw specification. It is more generic
+ * than @match as it receives a complete struct irq_fwspec. Therefore,
+ * @select is preferred if provided. Returns 1 on a match.
+ * @map: Create or update a mapping between a virtual irq number and a hw
+ * irq number. This is called only once for a given mapping.
+ * @unmap: Dispose of such a mapping
+ * @xlate: Given a device tree node and interrupt specifier, decode
+ * the hardware irq number and linux irq type value.
+ * @alloc: Allocate @nr_irqs interrupts starting from @virq.
+ * @free: Free @nr_irqs interrupts starting from @virq.
+ * @activate: Activate one interrupt in HW (@irqd). If @reserve is set, only
+ * reserve the vector. If unset, assign the vector (called from
+ * request_irq()).
+ * @deactivate: Disarm one interrupt (@irqd).
+ * @translate: Given @fwspec, decode the hardware irq number (@out_hwirq) and
+ * linux irq type value (@out_type). This is a generalised @xlate
+ * (over struct irq_fwspec) and is preferred if provided.
+ * @get_fwspec_info:
+ * Given @fwspec, report additional firmware-provided information in
+ * @info. Optional.
+ * @debug_show: For domains to show specific data for an interrupt in debugfs.
*
* Functions below are provided by the driver and called whenever a new mapping
* is created or an old mapping is disposed. The driver can then proceed to
@@ -103,84 +97,103 @@ enum irq_domain_bus_token {
* to setup the irq_desc when returning from map().
*/
struct irq_domain_ops {
- int (*match)(struct irq_domain *d, struct device_node *node,
- enum irq_domain_bus_token bus_token);
- int (*select)(struct irq_domain *d, struct irq_fwspec *fwspec,
- enum irq_domain_bus_token bus_token);
- int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw);
- void (*unmap)(struct irq_domain *d, unsigned int virq);
- int (*xlate)(struct irq_domain *d, struct device_node *node,
- const u32 *intspec, unsigned int intsize,
- unsigned long *out_hwirq, unsigned int *out_type);
+ int (*match)(struct irq_domain *d, struct device_node *node,
+ enum irq_domain_bus_token bus_token);
+ int (*select)(struct irq_domain *d, struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token);
+ int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw);
+ void (*unmap)(struct irq_domain *d, unsigned int virq);
+ int (*xlate)(struct irq_domain *d, struct device_node *node,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq, unsigned int *out_type);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
/* extended V2 interfaces to support hierarchy irq_domains */
- int (*alloc)(struct irq_domain *d, unsigned int virq,
- unsigned int nr_irqs, void *arg);
- void (*free)(struct irq_domain *d, unsigned int virq,
- unsigned int nr_irqs);
- int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool reserve);
- void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data);
- int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec,
- unsigned long *out_hwirq, unsigned int *out_type);
+ int (*alloc)(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs, void *arg);
+ void (*free)(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs);
+ int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool reserve);
+ void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data);
+ int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec,
+ unsigned long *out_hwirq, unsigned int *out_type);
+ int (*get_fwspec_info)(struct irq_fwspec *fwspec, struct irq_fwspec_info *info);
#endif
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
- void (*debug_show)(struct seq_file *m, struct irq_domain *d,
- struct irq_data *irqd, int ind);
+ void (*debug_show)(struct seq_file *m, struct irq_domain *d,
+ struct irq_data *irqd, int ind);
#endif
};
-extern struct irq_domain_ops irq_generic_chip_ops;
+extern const struct irq_domain_ops irq_generic_chip_ops;
struct irq_domain_chip_generic;
/**
* struct irq_domain - Hardware interrupt number translation object
- * @link: Element in global irq_domain list.
- * @name: Name of interrupt domain
- * @ops: pointer to irq_domain methods
- * @host_data: private data pointer for use by owner. Not touched by irq_domain
- * core code.
- * @flags: host per irq_domain flags
- * @mapcount: The number of mapped interrupts
+ * @link: Element in global irq_domain list.
+ * @name: Name of interrupt domain
+ * @ops: Pointer to irq_domain methods
+ * @host_data: Private data pointer for use by owner. Not touched by irq_domain
+ * core code.
+ * @flags: Per irq_domain flags
+ * @mapcount: The number of mapped interrupts
+ * @mutex: Domain lock, hierarchical domains use root domain's lock
+ * @root: Pointer to root domain, or containing structure if non-hierarchical
*
- * Optional elements
- * @fwnode: Pointer to firmware node associated with the irq_domain. Pretty easy
- * to swap it for the of_node via the irq_domain_get_of_node accessor
- * @gc: Pointer to a list of generic chips. There is a helper function for
- * setting up one or more generic chips for interrupt controllers
- * drivers using the generic chip library which uses this pointer.
- * @parent: Pointer to parent irq_domain to support hierarchy irq_domains
+ * Optional elements:
+ * @fwnode: Pointer to firmware node associated with the irq_domain. Pretty easy
+ * to swap it for the of_node via the irq_domain_get_of_node accessor
+ * @bus_token: @fwnode's device_node might be used for several irq domains. But
+ * in connection with @bus_token, the pair shall be unique in a
+ * system.
+ * @gc: Pointer to a list of generic chips. There is a helper function for
+ * setting up one or more generic chips for interrupt controllers
+ * drivers using the generic chip library which uses this pointer.
+ * @dev: Pointer to the device which instantiated the irqdomain
+ * With per device irq domains this is not necessarily the same
+ * as @pm_dev.
+ * @pm_dev: Pointer to a device that can be utilized for power management
+ * purposes related to the irq domain.
+ * @parent: Pointer to parent irq_domain to support hierarchy irq_domains
+ * @msi_parent_ops: Pointer to MSI parent domain methods for per device domain init
+ * @exit: Function called when the domain is destroyed
*
- * Revmap data, used internally by irq_domain
- * @revmap_direct_max_irq: The largest hwirq that can be set for controllers that
- * support direct mapping
- * @revmap_size: Size of the linear map table @linear_revmap[]
- * @revmap_tree: Radix map tree for hwirqs that don't fit in the linear map
- * @linear_revmap: Linear table of hwirq->virq reverse mappings
+ * Revmap data, used internally by the irq domain code:
+ * @hwirq_max: Top limit for the HW irq number. Especially to avoid
+ * conflicts/failures with reserved HW irqs. Can be ~0.
+ * @revmap_size: Size of the linear map table @revmap
+ * @revmap_tree: Radix map tree for hwirqs that don't fit in the linear map
+ * @revmap: Linear table of irq_data pointers
*/
struct irq_domain {
- struct list_head link;
- const char *name;
- const struct irq_domain_ops *ops;
- void *host_data;
- unsigned int flags;
- unsigned int mapcount;
+ struct list_head link;
+ const char *name;
+ const struct irq_domain_ops *ops;
+ void *host_data;
+ unsigned int flags;
+ unsigned int mapcount;
+ struct mutex mutex;
+ struct irq_domain *root;
/* Optional data */
- struct fwnode_handle *fwnode;
- enum irq_domain_bus_token bus_token;
- struct irq_domain_chip_generic *gc;
+ struct fwnode_handle *fwnode;
+ enum irq_domain_bus_token bus_token;
+ struct irq_domain_chip_generic *gc;
+ struct device *dev;
+ struct device *pm_dev;
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
- struct irq_domain *parent;
+ struct irq_domain *parent;
+#endif
+#ifdef CONFIG_GENERIC_MSI_IRQ
+ const struct msi_parent_ops *msi_parent_ops;
#endif
+ void (*exit)(struct irq_domain *d);
/* reverse map data. The linear map gets appended to the irq_domain */
- irq_hw_number_t hwirq_max;
- unsigned int revmap_direct_max_irq;
- unsigned int revmap_size;
- struct radix_tree_root revmap_tree;
- struct mutex revmap_tree_mutex;
- unsigned int linear_revmap[];
+ irq_hw_number_t hwirq_max;
+ unsigned int revmap_size;
+ struct radix_tree_root revmap_tree;
+ struct irq_data __rcu *revmap[] __counted_by(revmap_size);
};
/* Irq domain flags */
@@ -188,7 +201,7 @@ enum {
/* Irq domain is hierarchical */
IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0),
- /* Irq domain name was allocated in __irq_domain_add() */
+ /* Irq domain name was allocated internally */
IRQ_DOMAIN_NAME_ALLOCATED = (1 << 1),
/* Irq domain is an IPI domain with virq per cpu */
@@ -200,15 +213,28 @@ enum {
/* Irq domain implements MSIs */
IRQ_DOMAIN_FLAG_MSI = (1 << 4),
- /* Irq domain implements MSI remapping */
- IRQ_DOMAIN_FLAG_MSI_REMAP = (1 << 5),
-
/*
- * Quirk to handle MSI implementations which do not provide
- * masking. Currently known to affect x86, but partially
- * handled in core code.
+ * Irq domain implements isolated MSI, see msi_device_has_isolated_msi()
*/
- IRQ_DOMAIN_MSI_NOMASK_QUIRK = (1 << 6),
+ IRQ_DOMAIN_FLAG_ISOLATED_MSI = (1 << 5),
+
+ /* Irq domain doesn't translate anything */
+ IRQ_DOMAIN_FLAG_NO_MAP = (1 << 6),
+
+ /* Irq domain is a MSI parent domain */
+ IRQ_DOMAIN_FLAG_MSI_PARENT = (1 << 8),
+
+ /* Irq domain is a MSI device domain */
+ IRQ_DOMAIN_FLAG_MSI_DEVICE = (1 << 9),
+
+ /* Irq domain must destroy generic chips when removed */
+ IRQ_DOMAIN_FLAG_DESTROY_GC = (1 << 10),
+
+ /* Address and data pair is mutable when irq_set_affinity() */
+ IRQ_DOMAIN_FLAG_MSI_IMMUTABLE = (1 << 11),
+
+ /* IRQ domain requires parent fwnode matching */
+ IRQ_DOMAIN_FLAG_FWNODE_PARENT = (1 << 12),
/*
* Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
@@ -223,6 +249,12 @@ static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d)
return to_of_node(d->fwnode);
}
+static inline void irq_domain_set_pm_device(struct irq_domain *d, struct device *dev)
+{
+ if (d)
+ d->pm_dev = dev;
+}
+
#ifdef CONFIG_IRQ_DOMAIN
struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id,
const char *name, phys_addr_t *pa);
@@ -233,14 +265,12 @@ enum {
IRQCHIP_FWNODE_NAMED_ID,
};
-static inline
-struct fwnode_handle *irq_domain_alloc_named_fwnode(const char *name)
+static inline struct fwnode_handle *irq_domain_alloc_named_fwnode(const char *name)
{
return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED, 0, name, NULL);
}
-static inline
-struct fwnode_handle *irq_domain_alloc_named_id_fwnode(const char *name, int id)
+static inline struct fwnode_handle *irq_domain_alloc_named_id_fwnode(const char *name, int id)
{
return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_NAMED_ID, id, name,
NULL);
@@ -252,54 +282,87 @@ static inline struct fwnode_handle *irq_domain_alloc_fwnode(phys_addr_t *pa)
}
void irq_domain_free_fwnode(struct fwnode_handle *fwnode);
-struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
- irq_hw_number_t hwirq_max, int direct_max,
- const struct irq_domain_ops *ops,
- void *host_data);
-struct irq_domain *irq_domain_create_simple(struct fwnode_handle *fwnode,
- unsigned int size,
- unsigned int first_irq,
- const struct irq_domain_ops *ops,
- void *host_data);
-struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
- unsigned int size,
- unsigned int first_irq,
- irq_hw_number_t first_hwirq,
- const struct irq_domain_ops *ops,
- void *host_data);
-struct irq_domain *irq_domain_create_legacy(struct fwnode_handle *fwnode,
- unsigned int size,
- unsigned int first_irq,
- irq_hw_number_t first_hwirq,
- const struct irq_domain_ops *ops,
- void *host_data);
-extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
- enum irq_domain_bus_token bus_token);
-extern bool irq_domain_check_msi_remap(void);
-extern void irq_set_default_host(struct irq_domain *host);
-extern struct irq_domain *irq_get_default_host(void);
-extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
- irq_hw_number_t hwirq, int node,
- const struct irq_affinity_desc *affinity);
-static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node)
-{
- return node ? &node->fwnode : NULL;
-}
+DEFINE_FREE(irq_domain_free_fwnode, struct fwnode_handle *, if (_T) irq_domain_free_fwnode(_T))
+
+struct irq_domain_chip_generic_info;
+
+/**
+ * struct irq_domain_info - Domain information structure
+ * @fwnode: firmware node for the interrupt controller
+ * @domain_flags: Additional flags to add to the domain flags
+ * @size: Size of linear map; 0 for radix mapping only
+ * @hwirq_max: Maximum number of interrupts supported by controller
+ * @direct_max: Maximum value of direct maps;
+ * Use ~0 for no limit; 0 for no direct mapping
+ * @hwirq_base: The first hardware interrupt number (legacy domains only)
+ * @virq_base: The first Linux interrupt number for legacy domains to
+ * immediately associate the interrupts after domain creation
+ * @bus_token: Domain bus token
+ * @name_suffix: Optional name suffix to avoid collisions when multiple
+ * domains are added using same fwnode
+ * @ops: Domain operation callbacks
+ * @host_data: Controller private data pointer
+ * @dev: Device which creates the domain
+ * @dgc_info: Geneneric chip information structure pointer used to
+ * create generic chips for the domain if not NULL.
+ * @init: Function called when the domain is created.
+ * Allow to do some additional domain initialisation.
+ * @exit: Function called when the domain is destroyed.
+ * Allow to do some additional cleanup operation.
+ */
+struct irq_domain_info {
+ struct fwnode_handle *fwnode;
+ unsigned int domain_flags;
+ unsigned int size;
+ irq_hw_number_t hwirq_max;
+ int direct_max;
+ unsigned int hwirq_base;
+ unsigned int virq_base;
+ enum irq_domain_bus_token bus_token;
+ const char *name_suffix;
+ const struct irq_domain_ops *ops;
+ void *host_data;
+ struct device *dev;
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+ /**
+ * @parent: Pointer to the parent irq domain used in a hierarchy domain
+ */
+ struct irq_domain *parent;
+#endif
+ struct irq_domain_chip_generic_info *dgc_info;
+ int (*init)(struct irq_domain *d);
+ void (*exit)(struct irq_domain *d);
+};
+
+struct irq_domain *irq_domain_instantiate(const struct irq_domain_info *info);
+struct irq_domain *devm_irq_domain_instantiate(struct device *dev,
+ const struct irq_domain_info *info);
+
+struct irq_domain *irq_domain_create_simple(struct fwnode_handle *fwnode, unsigned int size,
+ unsigned int first_irq,
+ const struct irq_domain_ops *ops, void *host_data);
+struct irq_domain *irq_domain_create_legacy(struct fwnode_handle *fwnode, unsigned int size,
+ unsigned int first_irq, irq_hw_number_t first_hwirq,
+ const struct irq_domain_ops *ops, void *host_data);
+struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token);
+void irq_set_default_domain(struct irq_domain *domain);
+struct irq_domain *irq_get_default_domain(void);
+int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, irq_hw_number_t hwirq, int node,
+ const struct irq_affinity_desc *affinity);
extern const struct fwnode_operations irqchip_fwnode_ops;
-static inline bool is_fwnode_irqchip(struct fwnode_handle *fwnode)
+static inline bool is_fwnode_irqchip(const struct fwnode_handle *fwnode)
{
return fwnode && fwnode->ops == &irqchip_fwnode_ops;
}
-extern void irq_domain_update_bus_token(struct irq_domain *domain,
- enum irq_domain_bus_token bus_token);
+void irq_domain_update_bus_token(struct irq_domain *domain, enum irq_domain_bus_token bus_token);
-static inline
-struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
- enum irq_domain_bus_token bus_token)
+static inline struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
+ enum irq_domain_bus_token bus_token)
{
struct irq_fwspec fwspec = {
.fwnode = fwnode,
@@ -311,7 +374,7 @@ struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
static inline struct irq_domain *irq_find_matching_host(struct device_node *node,
enum irq_domain_bus_token bus_token)
{
- return irq_find_matching_fwnode(of_node_to_fwnode(node), bus_token);
+ return irq_find_matching_fwnode(of_fwnode_handle(node), bus_token);
}
static inline struct irq_domain *irq_find_host(struct device_node *node)
@@ -325,201 +388,242 @@ static inline struct irq_domain *irq_find_host(struct device_node *node)
return d;
}
-static inline struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
- unsigned int size,
- unsigned int first_irq,
- const struct irq_domain_ops *ops,
- void *host_data)
+#ifdef CONFIG_IRQ_DOMAIN_NOMAP
+static inline struct irq_domain *irq_domain_create_nomap(struct fwnode_handle *fwnode,
+ unsigned int max_irq,
+ const struct irq_domain_ops *ops,
+ void *host_data)
{
- return irq_domain_create_simple(of_node_to_fwnode(of_node), size, first_irq, ops, host_data);
+ const struct irq_domain_info info = {
+ .fwnode = fwnode,
+ .hwirq_max = max_irq,
+ .direct_max = max_irq,
+ .ops = ops,
+ .host_data = host_data,
+ };
+ struct irq_domain *d = irq_domain_instantiate(&info);
+
+ return IS_ERR(d) ? NULL : d;
}
+unsigned int irq_create_direct_mapping(struct irq_domain *domain);
+#endif
+
/**
- * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain.
- * @of_node: pointer to interrupt controller's device tree node.
- * @size: Number of interrupts in the domain.
- * @ops: map/unmap domain callbacks
- * @host_data: Controller private data pointer
+ * irq_domain_create_linear - Allocate and register a linear revmap irq_domain.
+ * @fwnode: pointer to interrupt controller's FW node.
+ * @size: Number of interrupts in the domain.
+ * @ops: map/unmap domain callbacks
+ * @host_data: Controller private data pointer
+ *
+ * Returns: Newly created irq_domain
*/
-static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
- unsigned int size,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- return __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data);
-}
-static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
- unsigned int max_irq,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- return __irq_domain_add(of_node_to_fwnode(of_node), 0, max_irq, max_irq, ops, host_data);
-}
-static inline struct irq_domain *irq_domain_add_legacy_isa(
- struct device_node *of_node,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- return irq_domain_add_legacy(of_node, NUM_ISA_INTERRUPTS, 0, 0, ops,
- host_data);
-}
-static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- return __irq_domain_add(of_node_to_fwnode(of_node), 0, ~0, 0, ops, host_data);
-}
-
static inline struct irq_domain *irq_domain_create_linear(struct fwnode_handle *fwnode,
- unsigned int size,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- return __irq_domain_add(fwnode, size, size, 0, ops, host_data);
+ unsigned int size,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ const struct irq_domain_info info = {
+ .fwnode = fwnode,
+ .size = size,
+ .hwirq_max = size,
+ .ops = ops,
+ .host_data = host_data,
+ };
+ struct irq_domain *d = irq_domain_instantiate(&info);
+
+ return IS_ERR(d) ? NULL : d;
}
static inline struct irq_domain *irq_domain_create_tree(struct fwnode_handle *fwnode,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- return __irq_domain_add(fwnode, 0, ~0, 0, ops, host_data);
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ const struct irq_domain_info info = {
+ .fwnode = fwnode,
+ .hwirq_max = ~0,
+ .ops = ops,
+ .host_data = host_data,
+ };
+ struct irq_domain *d = irq_domain_instantiate(&info);
+
+ return IS_ERR(d) ? NULL : d;
}
-extern void irq_domain_remove(struct irq_domain *host);
+void irq_domain_remove(struct irq_domain *domain);
-extern int irq_domain_associate(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq);
-extern void irq_domain_associate_many(struct irq_domain *domain,
- unsigned int irq_base,
- irq_hw_number_t hwirq_base, int count);
+int irq_domain_associate(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq);
+void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
+ irq_hw_number_t hwirq_base, int count);
-extern unsigned int irq_create_mapping_affinity(struct irq_domain *host,
- irq_hw_number_t hwirq,
- const struct irq_affinity_desc *affinity);
-extern unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec);
-extern void irq_dispose_mapping(unsigned int virq);
+unsigned int irq_create_mapping_affinity(struct irq_domain *domain, irq_hw_number_t hwirq,
+ const struct irq_affinity_desc *affinity);
+unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec);
+void irq_dispose_mapping(unsigned int virq);
-static inline unsigned int irq_create_mapping(struct irq_domain *host,
- irq_hw_number_t hwirq)
+/**
+ * irq_create_mapping - Map a hardware interrupt into linux irq space
+ * @domain: domain owning this hardware interrupt or NULL for default domain
+ * @hwirq: hardware irq number in that domain space
+ *
+ * Only one mapping per hardware interrupt is permitted.
+ *
+ * If the sense/trigger is to be specified, set_irq_type() should be called
+ * on the number returned from that call.
+ *
+ * Returns: Linux irq number or 0 on error
+ */
+static inline unsigned int irq_create_mapping(struct irq_domain *domain, irq_hw_number_t hwirq)
{
- return irq_create_mapping_affinity(host, hwirq, NULL);
+ return irq_create_mapping_affinity(domain, hwirq, NULL);
}
+struct irq_desc *__irq_resolve_mapping(struct irq_domain *domain,
+ irq_hw_number_t hwirq,
+ unsigned int *irq);
/**
- * irq_linear_revmap() - Find a linux irq from a hw irq number.
- * @domain: domain owning this hardware interrupt
- * @hwirq: hardware irq number in that domain space
+ * irq_resolve_mapping - Find a linux irq from a hw irq number.
+ * @domain: domain owning this hardware interrupt
+ * @hwirq: hardware irq number in that domain space
*
- * This is a fast path alternative to irq_find_mapping() that can be
- * called directly by irq controller code to save a handful of
- * instructions. It is always safe to call, but won't find irqs mapped
- * using the radix tree.
+ * Returns: Interrupt descriptor
*/
-static inline unsigned int irq_linear_revmap(struct irq_domain *domain,
- irq_hw_number_t hwirq)
+static inline struct irq_desc *irq_resolve_mapping(struct irq_domain *domain,
+ irq_hw_number_t hwirq)
{
- return hwirq < domain->revmap_size ? domain->linear_revmap[hwirq] : 0;
+ return __irq_resolve_mapping(domain, hwirq, NULL);
+}
+
+/**
+ * irq_find_mapping() - Find a linux irq from a hw irq number.
+ * @domain: domain owning this hardware interrupt
+ * @hwirq: hardware irq number in that domain space
+ *
+ * Returns: Linux irq number or 0 if not found
+ */
+static inline unsigned int irq_find_mapping(struct irq_domain *domain,
+ irq_hw_number_t hwirq)
+{
+ unsigned int irq;
+
+ if (__irq_resolve_mapping(domain, hwirq, &irq))
+ return irq;
+
+ return 0;
}
-extern unsigned int irq_find_mapping(struct irq_domain *host,
- irq_hw_number_t hwirq);
-extern unsigned int irq_create_direct_mapping(struct irq_domain *host);
extern const struct irq_domain_ops irq_domain_simple_ops;
/* stock xlate functions */
int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq, unsigned int *out_type);
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type);
int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq, unsigned int *out_type);
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type);
int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq, unsigned int *out_type);
-
-int irq_domain_translate_twocell(struct irq_domain *d,
- struct irq_fwspec *fwspec,
- unsigned long *out_hwirq,
- unsigned int *out_type);
-
-int irq_domain_translate_onecell(struct irq_domain *d,
- struct irq_fwspec *fwspec,
- unsigned long *out_hwirq,
- unsigned int *out_type);
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type);
+int irq_domain_xlate_twothreecell(struct irq_domain *d, struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type);
+
+int irq_domain_translate_onecell(struct irq_domain *d, struct irq_fwspec *fwspec,
+ unsigned long *out_hwirq, unsigned int *out_type);
+int irq_domain_translate_twocell(struct irq_domain *d, struct irq_fwspec *fwspec,
+ unsigned long *out_hwirq, unsigned int *out_type);
+int irq_domain_translate_twothreecell(struct irq_domain *d, struct irq_fwspec *fwspec,
+ unsigned long *out_hwirq, unsigned int *out_type);
/* IPI functions */
int irq_reserve_ipi(struct irq_domain *domain, const struct cpumask *dest);
int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest);
/* V2 interfaces to support hierarchy IRQ domains. */
-extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
- unsigned int virq);
-extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
- irq_hw_number_t hwirq, struct irq_chip *chip,
- void *chip_data, irq_flow_handler_t handler,
- void *handler_data, const char *handler_name);
-extern void irq_domain_reset_irq_data(struct irq_data *irq_data);
+struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, unsigned int virq);
+void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq,
+ const struct irq_chip *chip, void *chip_data, irq_flow_handler_t handler,
+ void *handler_data, const char *handler_name);
+void irq_domain_reset_irq_data(struct irq_data *irq_data);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
-extern struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
- unsigned int flags, unsigned int size,
- struct fwnode_handle *fwnode,
- const struct irq_domain_ops *ops, void *host_data);
-
-static inline struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent,
- unsigned int flags,
- unsigned int size,
- struct device_node *node,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- return irq_domain_create_hierarchy(parent, flags, size,
- of_node_to_fwnode(node),
- ops, host_data);
-}
-
-extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
- unsigned int nr_irqs, int node, void *arg,
- bool realloc,
- const struct irq_affinity_desc *affinity);
-extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs);
-extern int irq_domain_activate_irq(struct irq_data *irq_data, bool early);
-extern void irq_domain_deactivate_irq(struct irq_data *irq_data);
-
-static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
- unsigned int nr_irqs, int node, void *arg)
-{
- return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false,
- NULL);
-}
-
-extern int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
- unsigned int irq_base,
- unsigned int nr_irqs, void *arg);
-extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain,
- unsigned int virq,
- irq_hw_number_t hwirq,
- struct irq_chip *chip,
- void *chip_data);
-extern void irq_domain_free_irqs_common(struct irq_domain *domain,
- unsigned int virq,
- unsigned int nr_irqs);
-extern void irq_domain_free_irqs_top(struct irq_domain *domain,
- unsigned int virq, unsigned int nr_irqs);
-
-extern int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg);
-extern int irq_domain_pop_irq(struct irq_domain *domain, int virq);
-
-extern int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
- unsigned int irq_base,
- unsigned int nr_irqs, void *arg);
-
-extern void irq_domain_free_irqs_parent(struct irq_domain *domain,
- unsigned int irq_base,
- unsigned int nr_irqs);
-
-extern int irq_domain_disconnect_hierarchy(struct irq_domain *domain,
- unsigned int virq);
+/**
+ * irq_domain_create_hierarchy - Add a irqdomain into the hierarchy
+ * @parent: Parent irq domain to associate with the new domain
+ * @flags: Irq domain flags associated to the domain
+ * @size: Size of the domain. See below
+ * @fwnode: Optional fwnode of the interrupt controller
+ * @ops: Pointer to the interrupt domain callbacks
+ * @host_data: Controller private data pointer
+ *
+ * If @size is 0 a tree domain is created, otherwise a linear domain.
+ *
+ * If successful the parent is associated to the new domain and the
+ * domain flags are set.
+ *
+ * Returns: A pointer to IRQ domain, or %NULL on failure.
+ */
+static inline struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
+ unsigned int flags, unsigned int size,
+ struct fwnode_handle *fwnode,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ const struct irq_domain_info info = {
+ .fwnode = fwnode,
+ .size = size,
+ .hwirq_max = size ? : ~0U,
+ .ops = ops,
+ .host_data = host_data,
+ .domain_flags = flags,
+ .parent = parent,
+ };
+ struct irq_domain *d = irq_domain_instantiate(&info);
+
+ return IS_ERR(d) ? NULL : d;
+}
+
+int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, unsigned int nr_irqs,
+ int node, void *arg, bool realloc,
+ const struct irq_affinity_desc *affinity);
+void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs);
+int irq_domain_activate_irq(struct irq_data *irq_data, bool early);
+void irq_domain_deactivate_irq(struct irq_data *irq_data);
+
+/**
+ * irq_domain_alloc_irqs - Allocate IRQs from domain
+ * @domain: domain to allocate from
+ * @nr_irqs: number of IRQs to allocate
+ * @node: NUMA node id for memory allocation
+ * @arg: domain specific argument
+ *
+ * See __irq_domain_alloc_irqs()' documentation.
+ */
+static inline int irq_domain_alloc_irqs(struct irq_domain *domain, unsigned int nr_irqs,
+ int node, void *arg)
+{
+ return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false, NULL);
+}
+
+int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq,
+ irq_hw_number_t hwirq, const struct irq_chip *chip,
+ void *chip_data);
+void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs);
+void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs);
+
+int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg);
+int irq_domain_pop_irq(struct irq_domain *domain, int virq);
+
+int irq_domain_alloc_irqs_parent(struct irq_domain *domain, unsigned int irq_base,
+ unsigned int nr_irqs, void *arg);
+
+void irq_domain_free_irqs_parent(struct irq_domain *domain, unsigned int irq_base,
+ unsigned int nr_irqs);
+
+int irq_domain_disconnect_hierarchy(struct irq_domain *domain, unsigned int virq);
+
+int irq_populate_fwspec_info(struct irq_fwspec *fwspec, struct irq_fwspec_info *info);
static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
{
@@ -528,8 +632,7 @@ static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
static inline bool irq_domain_is_ipi(struct irq_domain *domain)
{
- return domain->flags &
- (IRQ_DOMAIN_FLAG_IPI_PER_CPU | IRQ_DOMAIN_FLAG_IPI_SINGLE);
+ return domain->flags & (IRQ_DOMAIN_FLAG_IPI_PER_CPU | IRQ_DOMAIN_FLAG_IPI_SINGLE);
}
static inline bool irq_domain_is_ipi_per_cpu(struct irq_domain *domain)
@@ -547,22 +650,28 @@ static inline bool irq_domain_is_msi(struct irq_domain *domain)
return domain->flags & IRQ_DOMAIN_FLAG_MSI;
}
-static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
+static inline bool irq_domain_is_msi_parent(struct irq_domain *domain)
{
- return domain->flags & IRQ_DOMAIN_FLAG_MSI_REMAP;
+ return domain->flags & IRQ_DOMAIN_FLAG_MSI_PARENT;
}
-extern bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain);
+static inline bool irq_domain_is_msi_device(struct irq_domain *domain)
+{
+ return domain->flags & IRQ_DOMAIN_FLAG_MSI_DEVICE;
+}
+static inline bool irq_domain_is_msi_immutable(struct irq_domain *domain)
+{
+ return domain->flags & IRQ_DOMAIN_FLAG_MSI_IMMUTABLE;
+}
#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
-static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
- unsigned int nr_irqs, int node, void *arg)
+static inline int irq_domain_alloc_irqs(struct irq_domain *domain, unsigned int nr_irqs,
+ int node, void *arg)
{
return -1;
}
-static inline void irq_domain_free_irqs(unsigned int virq,
- unsigned int nr_irqs) { }
+static inline void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs) { }
static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
{
@@ -589,29 +698,79 @@ static inline bool irq_domain_is_msi(struct irq_domain *domain)
return false;
}
-static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
+static inline bool irq_domain_is_msi_parent(struct irq_domain *domain)
{
return false;
}
-static inline bool
-irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
+static inline bool irq_domain_is_msi_device(struct irq_domain *domain)
{
return false;
}
+
+static inline int irq_populate_fwspec_info(struct irq_fwspec *fwspec, struct irq_fwspec_info *info)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
+#ifdef CONFIG_GENERIC_MSI_IRQ
+int msi_device_domain_alloc_wired(struct irq_domain *domain, unsigned int hwirq, unsigned int type);
+void msi_device_domain_free_wired(struct irq_domain *domain, unsigned int virq);
+#else
+static inline int msi_device_domain_alloc_wired(struct irq_domain *domain, unsigned int hwirq,
+ unsigned int type)
+{
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+}
+static inline void msi_device_domain_free_wired(struct irq_domain *domain, unsigned int virq)
+{
+ WARN_ON_ONCE(1);
+}
+#endif
+
+static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ struct irq_domain_info info = {
+ .fwnode = of_fwnode_handle(of_node),
+ .hwirq_max = ~0U,
+ .ops = ops,
+ .host_data = host_data,
+ };
+ struct irq_domain *d;
+
+ d = irq_domain_instantiate(&info);
+ return IS_ERR(d) ? NULL : d;
+}
+
+static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
+ unsigned int size,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ struct irq_domain_info info = {
+ .fwnode = of_fwnode_handle(of_node),
+ .size = size,
+ .hwirq_max = size,
+ .ops = ops,
+ .host_data = host_data,
+ };
+ struct irq_domain *d;
+
+ d = irq_domain_instantiate(&info);
+ return IS_ERR(d) ? NULL : d;
+}
+
#else /* CONFIG_IRQ_DOMAIN */
static inline void irq_dispose_mapping(unsigned int virq) { }
-static inline struct irq_domain *irq_find_matching_fwnode(
- struct fwnode_handle *fwnode, enum irq_domain_bus_token bus_token)
+static inline struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
+ enum irq_domain_bus_token bus_token)
{
return NULL;
}
-static inline bool irq_domain_check_msi_remap(void)
-{
- return false;
-}
#endif /* !CONFIG_IRQ_DOMAIN */
#endif /* _LINUX_IRQDOMAIN_H */
diff --git a/include/linux/irqdomain_defs.h b/include/linux/irqdomain_defs.h
new file mode 100644
index 000000000000..36653e2ee1c9
--- /dev/null
+++ b/include/linux/irqdomain_defs.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IRQDOMAIN_DEFS_H
+#define _LINUX_IRQDOMAIN_DEFS_H
+
+/*
+ * Should several domains have the same device node, but serve
+ * different purposes (for example one domain is for PCI/MSI, and the
+ * other for wired IRQs), they can be distinguished using a
+ * bus-specific token. Most domains are expected to only carry
+ * DOMAIN_BUS_ANY.
+ */
+enum irq_domain_bus_token {
+ DOMAIN_BUS_ANY = 0,
+ DOMAIN_BUS_WIRED,
+ DOMAIN_BUS_GENERIC_MSI,
+ DOMAIN_BUS_PCI_MSI,
+ DOMAIN_BUS_PLATFORM_MSI,
+ DOMAIN_BUS_NEXUS,
+ DOMAIN_BUS_IPI,
+ DOMAIN_BUS_FSL_MC_MSI,
+ DOMAIN_BUS_TI_SCI_INTA_MSI,
+ DOMAIN_BUS_WAKEUP,
+ DOMAIN_BUS_VMD_MSI,
+ DOMAIN_BUS_PCI_DEVICE_MSI,
+ DOMAIN_BUS_PCI_DEVICE_MSIX,
+ DOMAIN_BUS_DMAR,
+ DOMAIN_BUS_AMDVI,
+ DOMAIN_BUS_DEVICE_MSI,
+ DOMAIN_BUS_WIRED_TO_MSI,
+};
+
+#endif /* _LINUX_IRQDOMAIN_DEFS_H */
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 600c10da321a..57b074e0cfbb 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -12,40 +12,35 @@
#ifndef _LINUX_TRACE_IRQFLAGS_H
#define _LINUX_TRACE_IRQFLAGS_H
+#include <linux/irqflags_types.h>
#include <linux/typecheck.h>
+#include <linux/cleanup.h>
#include <asm/irqflags.h>
#include <asm/percpu.h>
+struct task_struct;
+
/* Currently lockdep_softirqs_on/off is used only by lockdep */
#ifdef CONFIG_PROVE_LOCKING
extern void lockdep_softirqs_on(unsigned long ip);
extern void lockdep_softirqs_off(unsigned long ip);
- extern void lockdep_hardirqs_on_prepare(unsigned long ip);
+ extern void lockdep_hardirqs_on_prepare(void);
extern void lockdep_hardirqs_on(unsigned long ip);
extern void lockdep_hardirqs_off(unsigned long ip);
+ extern void lockdep_cleanup_dead_cpu(unsigned int cpu,
+ struct task_struct *idle);
#else
static inline void lockdep_softirqs_on(unsigned long ip) { }
static inline void lockdep_softirqs_off(unsigned long ip) { }
- static inline void lockdep_hardirqs_on_prepare(unsigned long ip) { }
+ static inline void lockdep_hardirqs_on_prepare(void) { }
static inline void lockdep_hardirqs_on(unsigned long ip) { }
static inline void lockdep_hardirqs_off(unsigned long ip) { }
+ static inline void lockdep_cleanup_dead_cpu(unsigned int cpu,
+ struct task_struct *idle) {}
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
-/* Per-task IRQ trace events information. */
-struct irqtrace_events {
- unsigned int irq_events;
- unsigned long hardirq_enable_ip;
- unsigned long hardirq_disable_ip;
- unsigned int hardirq_enable_event;
- unsigned int hardirq_disable_event;
- unsigned long softirq_disable_ip;
- unsigned long softirq_enable_ip;
- unsigned int softirq_disable_event;
- unsigned int softirq_enable_event;
-};
-
DECLARE_PER_CPU(int, hardirqs_enabled);
DECLARE_PER_CPU(int, hardirq_context);
@@ -71,14 +66,6 @@ do { \
do { \
__this_cpu_dec(hardirq_context); \
} while (0)
-# define lockdep_softirq_enter() \
-do { \
- current->softirq_context++; \
-} while (0)
-# define lockdep_softirq_exit() \
-do { \
- current->softirq_context--; \
-} while (0)
# define lockdep_hrtimer_enter(__hrtimer) \
({ \
@@ -133,13 +120,28 @@ do { \
# define lockdep_softirq_enter() do { } while (0)
# define lockdep_softirq_exit() do { } while (0)
# define lockdep_hrtimer_enter(__hrtimer) false
-# define lockdep_hrtimer_exit(__context) do { } while (0)
+# define lockdep_hrtimer_exit(__context) do { (void)(__context); } while (0)
# define lockdep_posixtimer_enter() do { } while (0)
# define lockdep_posixtimer_exit() do { } while (0)
# define lockdep_irq_work_enter(__work) do { } while (0)
# define lockdep_irq_work_exit(__work) do { } while (0)
#endif
+#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT)
+# define lockdep_softirq_enter() \
+do { \
+ current->softirq_context++; \
+} while (0)
+# define lockdep_softirq_exit() \
+do { \
+ current->softirq_context--; \
+} while (0)
+
+#else
+# define lockdep_softirq_enter() do { } while (0)
+# define lockdep_softirq_exit() do { } while (0)
+#endif
+
#if defined(CONFIG_IRQSOFF_TRACER) || \
defined(CONFIG_PREEMPT_TRACER)
extern void stop_critical_timings(void);
@@ -260,4 +262,10 @@ extern void warn_bogus_irq_restore(void);
#define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags)
+DEFINE_LOCK_GUARD_0(irq, local_irq_disable(), local_irq_enable())
+DEFINE_LOCK_GUARD_0(irqsave,
+ local_irq_save(_T->flags),
+ local_irq_restore(_T->flags),
+ unsigned long flags)
+
#endif
diff --git a/include/linux/irqflags_types.h b/include/linux/irqflags_types.h
new file mode 100644
index 000000000000..c13f0d915097
--- /dev/null
+++ b/include/linux/irqflags_types.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IRQFLAGS_TYPES_H
+#define _LINUX_IRQFLAGS_TYPES_H
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+
+/* Per-task IRQ trace events information. */
+struct irqtrace_events {
+ unsigned int irq_events;
+ unsigned long hardirq_enable_ip;
+ unsigned long hardirq_disable_ip;
+ unsigned int hardirq_enable_event;
+ unsigned int hardirq_disable_event;
+ unsigned long softirq_disable_ip;
+ unsigned long softirq_enable_ip;
+ unsigned int softirq_disable_event;
+ unsigned int softirq_enable_event;
+};
+
+#endif
+
+#endif /* _LINUX_IRQFLAGS_TYPES_H */
diff --git a/include/linux/irqhandler.h b/include/linux/irqhandler.h
index c30f454a9518..72dd1eb3a0e7 100644
--- a/include/linux/irqhandler.h
+++ b/include/linux/irqhandler.h
@@ -8,7 +8,7 @@
*/
struct irq_desc;
-struct irq_data;
+
typedef void (*irq_flow_handler_t)(struct irq_desc *desc);
#endif
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h
index 3496baa0b07f..e97206c721a0 100644
--- a/include/linux/irqnr.h
+++ b/include/linux/irqnr.h
@@ -5,30 +5,36 @@
#include <uapi/linux/irqnr.h>
-extern int nr_irqs;
+unsigned int irq_get_nr_irqs(void) __pure;
+unsigned int irq_set_nr_irqs(unsigned int nr);
extern struct irq_desc *irq_to_desc(unsigned int irq);
unsigned int irq_get_next_irq(unsigned int offset);
-# define for_each_irq_desc(irq, desc) \
- for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \
- irq++, desc = irq_to_desc(irq)) \
- if (!desc) \
- ; \
- else
-
+#define for_each_irq_desc(irq, desc) \
+ for (unsigned int __nr_irqs__ = irq_get_nr_irqs(); __nr_irqs__; \
+ __nr_irqs__ = 0) \
+ for (irq = 0, desc = irq_to_desc(irq); irq < __nr_irqs__; \
+ irq++, desc = irq_to_desc(irq)) \
+ if (!desc) \
+ ; \
+ else
# define for_each_irq_desc_reverse(irq, desc) \
- for (irq = nr_irqs - 1, desc = irq_to_desc(irq); irq >= 0; \
- irq--, desc = irq_to_desc(irq)) \
+ for (irq = irq_get_nr_irqs() - 1, desc = irq_to_desc(irq); \
+ irq >= 0; irq--, desc = irq_to_desc(irq)) \
if (!desc) \
; \
else
-# define for_each_active_irq(irq) \
- for (irq = irq_get_next_irq(0); irq < nr_irqs; \
- irq = irq_get_next_irq(irq + 1))
+#define for_each_active_irq(irq) \
+ for (unsigned int __nr_irqs__ = irq_get_nr_irqs(); __nr_irqs__; \
+ __nr_irqs__ = 0) \
+ for (irq = irq_get_next_irq(0); irq < __nr_irqs__; \
+ irq = irq_get_next_irq(irq + 1))
-#define for_each_irq_nr(irq) \
- for (irq = 0; irq < nr_irqs; irq++)
+#define for_each_irq_nr(irq) \
+ for (unsigned int __nr_irqs__ = irq_get_nr_irqs(); __nr_irqs__; \
+ __nr_irqs__ = 0) \
+ for (irq = 0; irq < __nr_irqs__; irq++)
#endif
diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h
index bd4c066ad39b..d426c7ad92bf 100644
--- a/include/linux/irqreturn.h
+++ b/include/linux/irqreturn.h
@@ -3,10 +3,10 @@
#define _LINUX_IRQRETURN_H
/**
- * enum irqreturn
- * @IRQ_NONE interrupt was not from this device or was not handled
- * @IRQ_HANDLED interrupt was handled by this device
- * @IRQ_WAKE_THREAD handler requests to wake the handler thread
+ * enum irqreturn - irqreturn type values
+ * @IRQ_NONE: interrupt was not from this device or was not handled
+ * @IRQ_HANDLED: interrupt was handled by this device
+ * @IRQ_WAKE_THREAD: handler requests to wake the handler thread
*/
enum irqreturn {
IRQ_NONE = (0 << 0),
diff --git a/include/linux/isa-dma.h b/include/linux/isa-dma.h
new file mode 100644
index 000000000000..61504a8c1b9e
--- /dev/null
+++ b/include/linux/isa-dma.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_ISA_DMA_H
+#define __LINUX_ISA_DMA_H
+
+#include <asm/dma.h>
+
+#if defined(CONFIG_PCI) && defined(CONFIG_X86_32)
+extern int isa_dma_bridge_buggy;
+#else
+#define isa_dma_bridge_buggy (0)
+#endif
+
+#endif /* __LINUX_ISA_DMA_H */
diff --git a/include/linux/isa.h b/include/linux/isa.h
index e30963190968..4fbbf5e36e08 100644
--- a/include/linux/isa.h
+++ b/include/linux/isa.h
@@ -38,6 +38,32 @@ static inline void isa_unregister_driver(struct isa_driver *d)
}
#endif
+#define module_isa_driver_init(__isa_driver, __num_isa_dev) \
+static int __init __isa_driver##_init(void) \
+{ \
+ return isa_register_driver(&(__isa_driver), __num_isa_dev); \
+} \
+module_init(__isa_driver##_init)
+
+#define module_isa_driver_with_irq_init(__isa_driver, __num_isa_dev, __num_irq) \
+static int __init __isa_driver##_init(void) \
+{ \
+ if (__num_irq != __num_isa_dev) { \
+ pr_err("%s: Number of irq (%u) does not match number of base (%u)\n", \
+ __isa_driver.driver.name, __num_irq, __num_isa_dev); \
+ return -EINVAL; \
+ } \
+ return isa_register_driver(&(__isa_driver), __num_isa_dev); \
+} \
+module_init(__isa_driver##_init)
+
+#define module_isa_driver_exit(__isa_driver) \
+static void __exit __isa_driver##_exit(void) \
+{ \
+ isa_unregister_driver(&(__isa_driver)); \
+} \
+module_exit(__isa_driver##_exit)
+
/**
* module_isa_driver() - Helper macro for registering a ISA driver
* @__isa_driver: isa_driver struct
@@ -48,16 +74,22 @@ static inline void isa_unregister_driver(struct isa_driver *d)
* use this macro once, and calling it replaces module_init and module_exit.
*/
#define module_isa_driver(__isa_driver, __num_isa_dev) \
-static int __init __isa_driver##_init(void) \
-{ \
- return isa_register_driver(&(__isa_driver), __num_isa_dev); \
-} \
-module_init(__isa_driver##_init); \
-static void __exit __isa_driver##_exit(void) \
-{ \
- isa_unregister_driver(&(__isa_driver)); \
-} \
-module_exit(__isa_driver##_exit);
+module_isa_driver_init(__isa_driver, __num_isa_dev); \
+module_isa_driver_exit(__isa_driver)
+
+/**
+ * module_isa_driver_with_irq() - Helper macro for registering an ISA driver with irq
+ * @__isa_driver: isa_driver struct
+ * @__num_isa_dev: number of devices to register
+ * @__num_irq: number of IRQ to register
+ *
+ * Helper macro for ISA drivers with irq that do not do anything special in
+ * module init/exit. Each module may only use this macro once, and calling it
+ * replaces module_init and module_exit.
+ */
+#define module_isa_driver_with_irq(__isa_driver, __num_isa_dev, __num_irq) \
+module_isa_driver_with_irq_init(__isa_driver, __num_isa_dev, __num_irq); \
+module_isa_driver_exit(__isa_driver)
/**
* max_num_isa_dev() - Maximum possible number registered of an ISA device
diff --git a/include/linux/iscsi_ibft.h b/include/linux/iscsi_ibft.h
index b7b45ca82bea..e2742748104d 100644
--- a/include/linux/iscsi_ibft.h
+++ b/include/linux/iscsi_ibft.h
@@ -13,26 +13,30 @@
#ifndef ISCSI_IBFT_H
#define ISCSI_IBFT_H
-#include <linux/acpi.h>
+#include <linux/types.h>
/*
- * Logical location of iSCSI Boot Format Table.
- * If the value is NULL there is no iBFT on the machine.
+ * Physical location of iSCSI Boot Format Table.
+ * If the value is 0 there is no iBFT on the machine.
*/
-extern struct acpi_table_ibft *ibft_addr;
+extern phys_addr_t ibft_phys_addr;
+
+#ifdef CONFIG_ISCSI_IBFT_FIND
/*
* Routine used to find and reserve the iSCSI Boot Format Table. The
- * mapped address is set in the ibft_addr variable.
+ * physical address is set in the ibft_phys_addr variable.
*/
-#ifdef CONFIG_ISCSI_IBFT_FIND
-unsigned long find_ibft_region(unsigned long *sizep);
+void reserve_ibft_region(void);
+
+/*
+ * Physical bounds to search for the iSCSI Boot Format Table.
+ */
+#define IBFT_START 0x80000 /* 512kB */
+#define IBFT_END 0x100000 /* 1MB */
+
#else
-static inline unsigned long find_ibft_region(unsigned long *sizep)
-{
- *sizep = 0;
- return 0;
-}
+static inline void reserve_ibft_region(void) {}
#endif
#endif /* ISCSI_IBFT_H */
diff --git a/include/linux/ism.h b/include/linux/ism.h
new file mode 100644
index 000000000000..b7feb4dcd5a8
--- /dev/null
+++ b/include/linux/ism.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Internal Shared Memory
+ *
+ * Definitions for the ISM module
+ *
+ * Copyright IBM Corp. 2022
+ */
+#ifndef _ISM_H
+#define _ISM_H
+
+#include <linux/workqueue.h>
+
+/* Unless we gain unexpected popularity, this limit should hold for a while */
+#define MAX_CLIENTS 8
+#define ISM_NR_DMBS 1920
+
+struct ism_dev {
+ spinlock_t lock; /* protects the ism device */
+ spinlock_t cmd_lock; /* serializes cmds */
+ struct list_head list;
+ struct dibs_dev *dibs;
+ struct pci_dev *pdev;
+
+ struct ism_sba *sba;
+ dma_addr_t sba_dma_addr;
+ DECLARE_BITMAP(sba_bitmap, ISM_NR_DMBS);
+ void *priv[MAX_CLIENTS];
+
+ struct ism_eq *ieq;
+ dma_addr_t ieq_dma_addr;
+
+ int ieq_idx;
+
+ struct ism_client *subs[MAX_CLIENTS];
+};
+
+struct ism_event {
+ u32 type;
+ u32 code;
+ u64 tok;
+ u64 time;
+ u64 info;
+};
+
+struct ism_client {
+ const char *name;
+ void (*handle_event)(struct ism_dev *dev, struct ism_event *event);
+ /* Private area - don't touch! */
+ u8 id;
+};
+
+int ism_register_client(struct ism_client *client);
+int ism_unregister_client(struct ism_client *client);
+static inline void *ism_get_priv(struct ism_dev *dev,
+ struct ism_client *client) {
+ return dev->priv[client->id];
+}
+
+static inline void ism_set_priv(struct ism_dev *dev, struct ism_client *client,
+ void *priv) {
+ dev->priv[client->id] = priv;
+}
+
+const struct smcd_ops *ism_get_smcd_ops(void);
+
+#endif /* _ISM_H */
diff --git a/include/linux/iversion.h b/include/linux/iversion.h
index 3bfebde5a1a6..8f972eaca2ed 100644
--- a/include/linux/iversion.h
+++ b/include/linux/iversion.h
@@ -9,8 +9,26 @@
* ---------------------------
* The change attribute (i_version) is mandated by NFSv4 and is mostly for
* knfsd, but is also used for other purposes (e.g. IMA). The i_version must
- * appear different to observers if there was a change to the inode's data or
- * metadata since it was last queried.
+ * appear larger to observers if there was an explicit change to the inode's
+ * data or metadata since it was last queried.
+ *
+ * An explicit change is one that would ordinarily result in a change to the
+ * inode status change time (aka ctime). i_version must appear to change, even
+ * if the ctime does not (since the whole point is to avoid missing updates due
+ * to timestamp granularity). If POSIX or other relevant spec mandates that the
+ * ctime must change due to an operation, then the i_version counter must be
+ * incremented as well.
+ *
+ * Making the i_version update completely atomic with the operation itself would
+ * be prohibitively expensive. Traditionally the kernel has updated the times on
+ * directories after an operation that changes its contents. For regular files,
+ * the ctime is usually updated before the data is copied into the cache for a
+ * write. This means that there is a window of time when an observer can
+ * associate a new timestamp with old file contents. Since the purpose of the
+ * i_version is to allow for better cache coherency, the i_version must always
+ * be updated after the results of the operation are visible. Updating it before
+ * and after a change is also permitted. (Note that no filesystems currently do
+ * this. Fixing that is a work-in-progress).
*
* Observers see the i_version as a 64-bit number that never decreases. If it
* remains the same since it was last checked, then nothing has changed in the
@@ -123,17 +141,12 @@ inode_peek_iversion_raw(const struct inode *inode)
static inline void
inode_set_max_iversion_raw(struct inode *inode, u64 val)
{
- u64 cur, old;
+ u64 cur = inode_peek_iversion_raw(inode);
- cur = inode_peek_iversion_raw(inode);
- for (;;) {
+ do {
if (cur > val)
break;
- old = atomic64_cmpxchg(&inode->i_version, cur, val);
- if (likely(old == cur))
- break;
- cur = old;
- }
+ } while (!atomic64_try_cmpxchg(&inode->i_version, &cur, val));
}
/**
@@ -177,56 +190,7 @@ inode_set_iversion_queried(struct inode *inode, u64 val)
I_VERSION_QUERIED);
}
-/**
- * inode_maybe_inc_iversion - increments i_version
- * @inode: inode with the i_version that should be updated
- * @force: increment the counter even if it's not necessary?
- *
- * Every time the inode is modified, the i_version field must be seen to have
- * changed by any observer.
- *
- * If "force" is set or the QUERIED flag is set, then ensure that we increment
- * the value, and clear the queried flag.
- *
- * In the common case where neither is set, then we can return "false" without
- * updating i_version.
- *
- * If this function returns false, and no other metadata has changed, then we
- * can avoid logging the metadata.
- */
-static inline bool
-inode_maybe_inc_iversion(struct inode *inode, bool force)
-{
- u64 cur, old, new;
-
- /*
- * The i_version field is not strictly ordered with any other inode
- * information, but the legacy inode_inc_iversion code used a spinlock
- * to serialize increments.
- *
- * Here, we add full memory barriers to ensure that any de-facto
- * ordering with other info is preserved.
- *
- * This barrier pairs with the barrier in inode_query_iversion()
- */
- smp_mb();
- cur = inode_peek_iversion_raw(inode);
- for (;;) {
- /* If flag is clear then we needn't do anything */
- if (!force && !(cur & I_VERSION_QUERIED))
- return false;
-
- /* Since lowest bit is flag, add 2 to avoid it */
- new = (cur & ~I_VERSION_QUERIED) + I_VERSION_INCREMENT;
-
- old = atomic64_cmpxchg(&inode->i_version, cur, new);
- if (likely(old == cur))
- break;
- cur = old;
- }
- return true;
-}
-
+bool inode_maybe_inc_iversion(struct inode *inode, bool force);
/**
* inode_inc_iversion - forcibly increment i_version
@@ -288,51 +252,11 @@ inode_peek_iversion(const struct inode *inode)
return inode_peek_iversion_raw(inode) >> I_VERSION_QUERIED_SHIFT;
}
-/**
- * inode_query_iversion - read i_version for later use
- * @inode: inode from which i_version should be read
- *
- * Read the inode i_version counter. This should be used by callers that wish
- * to store the returned i_version for later comparison. This will guarantee
- * that a later query of the i_version will result in a different value if
- * anything has changed.
- *
- * In this implementation, we fetch the current value, set the QUERIED flag and
- * then try to swap it into place with a cmpxchg, if it wasn't already set. If
- * that fails, we try again with the newly fetched value from the cmpxchg.
- */
-static inline u64
-inode_query_iversion(struct inode *inode)
-{
- u64 cur, old, new;
-
- cur = inode_peek_iversion_raw(inode);
- for (;;) {
- /* If flag is already set, then no need to swap */
- if (cur & I_VERSION_QUERIED) {
- /*
- * This barrier (and the implicit barrier in the
- * cmpxchg below) pairs with the barrier in
- * inode_maybe_inc_iversion().
- */
- smp_mb();
- break;
- }
-
- new = cur | I_VERSION_QUERIED;
- old = atomic64_cmpxchg(&inode->i_version, cur, new);
- if (likely(old == cur))
- break;
- cur = old;
- }
- return cur >> I_VERSION_QUERIED_SHIFT;
-}
-
/*
* For filesystems without any sort of change attribute, the best we can
* do is fake one up from the ctime:
*/
-static inline u64 time_to_chattr(struct timespec64 *t)
+static inline u64 time_to_chattr(const struct timespec64 *t)
{
u64 chattr = t->tv_sec;
@@ -341,6 +265,8 @@ static inline u64 time_to_chattr(struct timespec64 *t)
return chattr;
}
+u64 inode_query_iversion(struct inode *inode);
+
/**
* inode_eq_iversion_raw - check whether the raw i_version counter has changed
* @inode: inode to check
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index db0e1920cb12..f5eaf76198f3 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -28,7 +28,7 @@
#include <linux/slab.h>
#include <linux/bit_spinlock.h>
#include <linux/blkdev.h>
-#include <crypto/hash.h>
+#include <linux/crc32c.h>
#endif
#define journal_oom_retry 1
@@ -54,14 +54,13 @@
* CONFIG_JBD2_DEBUG is on.
*/
#define JBD2_EXPENSIVE_CHECKING
-extern ushort jbd2_journal_enable_debug;
void __jbd2_debug(int level, const char *file, const char *func,
unsigned int line, const char *fmt, ...);
-#define jbd_debug(n, fmt, a...) \
+#define jbd2_debug(n, fmt, a...) \
__jbd2_debug((n), __FILE__, __func__, __LINE__, (fmt), ##a)
#else
-#define jbd_debug(n, fmt, a...) no_printk(fmt, ##a)
+#define jbd2_debug(n, fmt, a...) no_printk(fmt, ##a)
#endif
extern void *jbd2_alloc(size_t size, gfp_t flags);
@@ -266,8 +265,10 @@ typedef struct journal_superblock_s
__u8 s_padding2[3];
/* 0x0054 */
__be32 s_num_fc_blks; /* Number of fast commit blocks */
-/* 0x0058 */
- __u32 s_padding[41];
+ __be32 s_head; /* blocknr of head of log, only uptodate
+ * while the filesystem is clean */
+/* 0x005C */
+ __u32 s_padding[40];
__be32 s_checksum; /* crc32c(superblock) */
/* 0x0100 */
@@ -275,17 +276,6 @@ typedef struct journal_superblock_s
/* 0x0400 */
} journal_superblock_t;
-/* Use the jbd2_{has,set,clear}_feature_* helpers; these will be removed */
-#define JBD2_HAS_COMPAT_FEATURE(j,mask) \
- ((j)->j_format_version >= 2 && \
- ((j)->j_superblock->s_feature_compat & cpu_to_be32((mask))))
-#define JBD2_HAS_RO_COMPAT_FEATURE(j,mask) \
- ((j)->j_format_version >= 2 && \
- ((j)->j_superblock->s_feature_ro_compat & cpu_to_be32((mask))))
-#define JBD2_HAS_INCOMPAT_FEATURE(j,mask) \
- ((j)->j_format_version >= 2 && \
- ((j)->j_superblock->s_feature_incompat & cpu_to_be32((mask))))
-
#define JBD2_FEATURE_COMPAT_CHECKSUM 0x00000001
#define JBD2_FEATURE_INCOMPAT_REVOKE 0x00000001
@@ -469,7 +459,6 @@ struct jbd2_revoke_table_s;
* @h_ref: Reference count on this handle.
* @h_err: Field for caller's use to track errors through large fs operations.
* @h_sync: Flag for sync-on-close.
- * @h_jdata: Flag to force data journaling.
* @h_reserved: Flag for handle for reserved credits.
* @h_aborted: Flag indicating fatal error on handle.
* @h_type: For handle statistics.
@@ -501,7 +490,6 @@ struct jbd2_journal_handle
/* Flags [no locking] */
unsigned int h_sync: 1;
- unsigned int h_jdata: 1;
unsigned int h_reserved: 1;
unsigned int h_aborted: 1;
unsigned int h_type: 8;
@@ -554,9 +542,6 @@ struct transaction_chp_stats_s {
* ->j_list_lock
*
* j_state_lock
- * ->t_handle_lock
- *
- * j_state_lock
* ->j_list_lock (journal_unmap_buffer)
*
*/
@@ -594,7 +579,7 @@ struct transaction_s
*/
unsigned long t_log_start;
- /*
+ /*
* Number of buffers on the t_buffers list [j_list_lock, no locks
* needed for jbd2 thread]
*/
@@ -627,12 +612,6 @@ struct transaction_s
struct journal_head *t_checkpoint_list;
/*
- * Doubly-linked circular list of all buffers submitted for IO while
- * checkpointing. [j_list_lock]
- */
- struct journal_head *t_checkpoint_io_list;
-
- /*
* Doubly-linked circular list of metadata buffers being
* shadowed by log IO. The IO buffers on the iobuf list and
* the shadow buffers on this list match each other one for
@@ -650,11 +629,6 @@ struct transaction_s
struct list_head t_inode_list;
/*
- * Protects info related to handles
- */
- spinlock_t t_handle_lock;
-
- /*
* Longest time some handle had to wait for running transaction
*/
unsigned long t_max_wait;
@@ -724,12 +698,6 @@ struct transaction_s
/* Disk flush needs to be sent to fs partition [no locking] */
int t_need_data_flush;
-
- /*
- * For use by the filesystem to store fs-specific data
- * structures associated with the transaction
- */
- struct list_head t_private_list;
};
struct transaction_run_stats_s {
@@ -803,11 +771,6 @@ struct journal_s
journal_superblock_t *j_superblock;
/**
- * @j_format_version: Version of the superblock format.
- */
- int j_format_version;
-
- /**
* @j_state_lock: Protect the various scalars in the journal.
*/
rwlock_t j_state_lock;
@@ -905,6 +868,29 @@ struct journal_s
struct buffer_head *j_chkpt_bhs[JBD2_NR_BATCH];
/**
+ * @j_shrinker:
+ *
+ * Journal head shrinker, reclaim buffer's journal head which
+ * has been written back.
+ */
+ struct shrinker *j_shrinker;
+
+ /**
+ * @j_checkpoint_jh_count:
+ *
+ * Number of journal buffers on the checkpoint list. [j_list_lock]
+ */
+ struct percpu_counter j_checkpoint_jh_count;
+
+ /**
+ * @j_shrink_transaction:
+ *
+ * Record next transaction will shrink on the checkpoint list.
+ * [j_list_lock]
+ */
+ transaction_t *j_shrink_transaction;
+
+ /**
* @j_head:
*
* Journal head: identifies the first unused block in the journal.
@@ -1000,6 +986,13 @@ struct journal_s
struct block_device *j_fs_dev;
/**
+ * @j_fs_dev_wb_err:
+ *
+ * Records the errseq of the client fs's backing block device.
+ */
+ errseq_t j_fs_dev_wb_err;
+
+ /**
* @j_total_len: Total maximum capacity of the journal region on disk.
*/
unsigned int j_total_len;
@@ -1085,6 +1078,13 @@ struct journal_s
int j_revoke_records_per_block;
/**
+ * @j_transaction_overhead_buffers:
+ *
+ * Number of blocks each transaction needs for its own bookkeeping
+ */
+ int j_transaction_overhead_buffers;
+
+ /**
* @j_commit_interval:
*
* What is the maximum transaction lifetime before we begin a commit?
@@ -1234,13 +1234,6 @@ struct journal_s
void *j_private;
/**
- * @j_chksum_driver:
- *
- * Reference to checksum algorithm driver via cryptoapi.
- */
- struct crypto_shash *j_chksum_driver;
-
- /**
* @j_csum_seed:
*
* Precomputed journal UUID checksum for seeding other checksums.
@@ -1260,6 +1253,12 @@ struct journal_s
*/
struct lockdep_map j_trans_commit_map;
#endif
+ /**
+ * @jbd2_trans_commit_key:
+ *
+ * "struct lock_class_key" for @j_trans_commit_map
+ */
+ struct lock_class_key jbd2_trans_commit_key;
/**
* @j_fc_cleanup_callback:
@@ -1267,7 +1266,7 @@ struct journal_s
* Clean-up after fast commit or full commit. JBD2 calls this function
* after every commit operation.
*/
- void (*j_fc_cleanup_callback)(struct journal_s *journal, int);
+ void (*j_fc_cleanup_callback)(struct journal_s *journal, int full, tid_t tid);
/**
* @j_fc_replay_callback:
@@ -1284,6 +1283,14 @@ struct journal_s
struct buffer_head *bh,
enum passtype pass, int off,
tid_t expected_commit_id);
+
+ /**
+ * @j_bmap:
+ *
+ * Bmap function that should be used instead of the generic
+ * VFS bmap function.
+ */
+ int (*j_bmap)(struct journal_s *journal, sector_t *block);
};
#define jbd2_might_wait_for_commit(j) \
@@ -1292,11 +1299,22 @@ struct journal_s
rwsem_release(&j->j_trans_commit_map, _THIS_IP_); \
} while (0)
+/*
+ * We can support any known requested features iff the
+ * superblock is not in version 1. Otherwise we fail to support any
+ * extended sb features.
+ */
+static inline bool jbd2_format_support_feature(journal_t *j)
+{
+ return j->j_superblock->s_header.h_blocktype !=
+ cpu_to_be32(JBD2_SUPERBLOCK_V1);
+}
+
/* journal feature predicate functions */
#define JBD2_FEATURE_COMPAT_FUNCS(name, flagname) \
static inline bool jbd2_has_feature_##name(journal_t *j) \
{ \
- return ((j)->j_format_version >= 2 && \
+ return (jbd2_format_support_feature(j) && \
((j)->j_superblock->s_feature_compat & \
cpu_to_be32(JBD2_FEATURE_COMPAT_##flagname)) != 0); \
} \
@@ -1314,7 +1332,7 @@ static inline void jbd2_clear_feature_##name(journal_t *j) \
#define JBD2_FEATURE_RO_COMPAT_FUNCS(name, flagname) \
static inline bool jbd2_has_feature_##name(journal_t *j) \
{ \
- return ((j)->j_format_version >= 2 && \
+ return (jbd2_format_support_feature(j) && \
((j)->j_superblock->s_feature_ro_compat & \
cpu_to_be32(JBD2_FEATURE_RO_COMPAT_##flagname)) != 0); \
} \
@@ -1332,7 +1350,7 @@ static inline void jbd2_clear_feature_##name(journal_t *j) \
#define JBD2_FEATURE_INCOMPAT_FUNCS(name, flagname) \
static inline bool jbd2_has_feature_##name(journal_t *j) \
{ \
- return ((j)->j_format_version >= 2 && \
+ return (jbd2_format_support_feature(j) && \
((j)->j_superblock->s_feature_incompat & \
cpu_to_be32(JBD2_FEATURE_INCOMPAT_##flagname)) != 0); \
} \
@@ -1356,6 +1374,9 @@ JBD2_FEATURE_INCOMPAT_FUNCS(csum2, CSUM_V2)
JBD2_FEATURE_INCOMPAT_FUNCS(csum3, CSUM_V3)
JBD2_FEATURE_INCOMPAT_FUNCS(fast_commit, FAST_COMMIT)
+/* Journal high priority write IO operation flags */
+#define JBD2_JOURNAL_REQ_FLAGS (REQ_META | REQ_SYNC | REQ_IDLE)
+
/*
* Journal flag definitions
*/
@@ -1365,11 +1386,15 @@ JBD2_FEATURE_INCOMPAT_FUNCS(fast_commit, FAST_COMMIT)
#define JBD2_FLUSHED 0x008 /* The journal superblock has been flushed */
#define JBD2_LOADED 0x010 /* The journal superblock has been loaded */
#define JBD2_BARRIER 0x020 /* Use IDE barriers */
-#define JBD2_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file
- * data write error in ordered
- * mode */
+#define JBD2_CYCLE_RECORD 0x080 /* Journal cycled record log on
+ * clean and empty filesystem
+ * logging area */
#define JBD2_FAST_COMMIT_ONGOING 0x100 /* Fast commit is ongoing */
#define JBD2_FULL_COMMIT_ONGOING 0x200 /* Full commit is ongoing */
+#define JBD2_JOURNAL_FLUSH_DISCARD 0x0001
+#define JBD2_JOURNAL_FLUSH_ZEROOUT 0x0002
+#define JBD2_JOURNAL_FLUSH_VALID (JBD2_JOURNAL_FLUSH_DISCARD | \
+ JBD2_JOURNAL_FLUSH_ZEROOUT)
/*
* Function declarations for the journaling transaction and buffer
@@ -1377,13 +1402,10 @@ JBD2_FEATURE_INCOMPAT_FUNCS(fast_commit, FAST_COMMIT)
*/
/* Filing buffers */
-extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *);
extern bool __jbd2_journal_refile_buffer(struct journal_head *);
extern void jbd2_journal_refile_buffer(journal_t *, struct journal_head *);
extern void __jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
-extern void __journal_free_buffer(struct journal_head *bh);
extern void jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
-extern void __journal_clean_data_list(transaction_t *transaction);
static inline void jbd2_file_log_bh(struct list_head *head, struct buffer_head *bh)
{
list_add_tail(&bh->b_assoc_buffers, head);
@@ -1406,8 +1428,12 @@ void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
extern void jbd2_journal_commit_transaction(journal_t *);
/* Checkpoint list management */
-void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy);
+enum jbd2_shrink_type {JBD2_SHRINK_DESTROY, JBD2_SHRINK_BUSY_STOP, JBD2_SHRINK_BUSY_SKIP};
+
+void __jbd2_journal_clean_checkpoint_list(journal_t *journal, enum jbd2_shrink_type type);
+unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal, unsigned long *nr_to_scan);
int __jbd2_journal_remove_checkpoint(struct journal_head *);
+int jbd2_journal_try_remove_checkpoint(struct journal_head *jh);
void jbd2_journal_destroy_checkpoint(journal_t *journal);
void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
@@ -1447,9 +1473,6 @@ extern int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
struct buffer_head **bh_out,
sector_t blocknr);
-/* Transaction locking */
-extern void __wait_on_journal (journal_t *);
-
/* Transaction cache support */
extern void jbd2_journal_destroy_transaction_cache(void);
extern int __init jbd2_journal_init_transaction_cache(void);
@@ -1496,14 +1519,16 @@ void jbd2_journal_set_triggers(struct buffer_head *,
struct jbd2_buffer_trigger_type *type);
extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *);
extern int jbd2_journal_forget (handle_t *, struct buffer_head *);
-extern int jbd2_journal_invalidatepage(journal_t *,
- struct page *, unsigned int, unsigned int);
-extern int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page);
+int jbd2_journal_invalidate_folio(journal_t *, struct folio *,
+ size_t offset, size_t length);
+bool jbd2_journal_try_to_free_buffers(journal_t *journal, struct folio *folio);
extern int jbd2_journal_stop(handle_t *);
-extern int jbd2_journal_flush (journal_t *);
+extern int jbd2_journal_flush(journal_t *journal, unsigned int flags);
extern void jbd2_journal_lock_updates (journal_t *);
extern void jbd2_journal_unlock_updates (journal_t *);
+void jbd2_journal_wait_updates(journal_t *);
+
extern journal_t * jbd2_journal_init_dev(struct block_device *bdev,
struct block_device *fs_dev,
unsigned long long start, int len, int bsize);
@@ -1524,7 +1549,7 @@ extern int jbd2_journal_wipe (journal_t *, int);
extern int jbd2_journal_skip_recovery (journal_t *);
extern void jbd2_journal_update_sb_errno(journal_t *);
extern int jbd2_journal_update_sb_log_tail (journal_t *, tid_t,
- unsigned long, int);
+ unsigned long, blk_opf_t);
extern void jbd2_journal_abort (journal_t *, int);
extern int jbd2_journal_errno (journal_t *);
extern void jbd2_journal_ack_err (journal_t *);
@@ -1538,8 +1563,6 @@ extern int jbd2_journal_inode_ranged_write(handle_t *handle,
extern int jbd2_journal_inode_ranged_wait(handle_t *handle,
struct jbd2_inode *inode, loff_t start_byte,
loff_t length);
-extern int jbd2_journal_submit_inode_data_buffers(
- struct jbd2_inode *jinode);
extern int jbd2_journal_finish_inode_data_buffers(
struct jbd2_inode *jinode);
extern int jbd2_journal_begin_ordered_truncate(journal_t *journal,
@@ -1559,10 +1582,13 @@ void jbd2_journal_put_journal_head(struct journal_head *jh);
*/
extern struct kmem_cache *jbd2_handle_cache;
-static inline handle_t *jbd2_alloc_handle(gfp_t gfp_flags)
-{
- return kmem_cache_zalloc(jbd2_handle_cache, gfp_flags);
-}
+/*
+ * This specialized allocator has to be a macro for its allocations to be
+ * accounted separately (to have a separate alloc_tag). The typecast is
+ * intentional to enforce typesafety.
+ */
+#define jbd2_alloc_handle(_gfp_flags) \
+ ((handle_t *)kmem_cache_zalloc(jbd2_handle_cache, _gfp_flags))
static inline void jbd2_free_handle(handle_t *handle)
{
@@ -1575,10 +1601,13 @@ static inline void jbd2_free_handle(handle_t *handle)
*/
extern struct kmem_cache *jbd2_inode_cache;
-static inline struct jbd2_inode *jbd2_alloc_inode(gfp_t gfp_flags)
-{
- return kmem_cache_alloc(jbd2_inode_cache, gfp_flags);
-}
+/*
+ * This specialized allocator has to be a macro for its allocations to be
+ * accounted separately (to have a separate alloc_tag). The typecast is
+ * intentional to enforce typesafety.
+ */
+#define jbd2_alloc_inode(_gfp_flags) \
+ ((struct jbd2_inode *)kmem_cache_alloc(jbd2_inode_cache, _gfp_flags))
static inline void jbd2_free_inode(struct jbd2_inode *jinode)
{
@@ -1592,10 +1621,12 @@ extern void jbd2_journal_destroy_revoke_record_cache(void);
extern void jbd2_journal_destroy_revoke_table_cache(void);
extern int __init jbd2_journal_init_revoke_record_cache(void);
extern int __init jbd2_journal_init_revoke_table_cache(void);
+struct jbd2_revoke_table_s *jbd2_journal_init_revoke_table(int hash_size);
+void jbd2_journal_destroy_revoke_table(struct jbd2_revoke_table_s *table);
extern void jbd2_journal_destroy_revoke(journal_t *);
extern int jbd2_journal_revoke (handle_t *, unsigned long long, struct buffer_head *);
-extern int jbd2_journal_cancel_revoke(handle_t *, struct journal_head *);
+extern void jbd2_journal_cancel_revoke(handle_t *, struct journal_head *);
extern void jbd2_journal_write_revoke_records(transaction_t *transaction,
struct list_head *log_bufs);
@@ -1614,7 +1645,6 @@ extern void jbd2_clear_buffer_revoked_flags(journal_t *journal);
*/
int jbd2_log_start_commit(journal_t *journal, tid_t tid);
-int __jbd2_log_start_commit(journal_t *journal, tid_t tid);
int jbd2_journal_start_commit(journal_t *journal, tid_t *tid);
int jbd2_log_wait_commit(journal_t *journal, tid_t tid);
int jbd2_transaction_committed(journal_t *journal, tid_t tid);
@@ -1631,15 +1661,10 @@ int jbd2_fc_begin_commit(journal_t *journal, tid_t tid);
int jbd2_fc_end_commit(journal_t *journal);
int jbd2_fc_end_commit_fallback(journal_t *journal);
int jbd2_fc_get_buf(journal_t *journal, struct buffer_head **bh_out);
-int jbd2_submit_inode_data(struct jbd2_inode *jinode);
+int jbd2_submit_inode_data(journal_t *journal, struct jbd2_inode *jinode);
int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode);
int jbd2_fc_wait_bufs(journal_t *journal, int num_blks);
-int jbd2_fc_release_bufs(journal_t *journal);
-
-static inline int jbd2_journal_get_max_txn_bufs(journal_t *journal)
-{
- return (journal->j_total_len - journal->j_fc_wbufsize) / 4;
-}
+void jbd2_fc_release_bufs(journal_t *journal);
/*
* is_journal_abort
@@ -1668,6 +1693,25 @@ static inline void jbd2_journal_abort_handle(handle_t *handle)
handle->h_aborted = 1;
}
+static inline void jbd2_init_fs_dev_write_error(journal_t *journal)
+{
+ struct address_space *mapping = journal->j_fs_dev->bd_mapping;
+
+ /*
+ * Save the original wb_err value of client fs's bdev mapping which
+ * could be used to detect the client fs's metadata async write error.
+ */
+ errseq_check_and_advance(&mapping->wb_err, &journal->j_fs_dev_wb_err);
+}
+
+static inline int jbd2_check_fs_dev_write_error(journal_t *journal)
+{
+ struct address_space *mapping = journal->j_fs_dev->bd_mapping;
+
+ return errseq_check(&mapping->wb_err,
+ READ_ONCE(journal->j_fs_dev_wb_err));
+}
+
#endif /* __KERNEL__ */
/* Comparison functions for transaction IDs: perform comparisons using
@@ -1685,20 +1729,13 @@ static inline int tid_geq(tid_t x, tid_t y)
return (difference >= 0);
}
-extern int jbd2_journal_blocks_per_page(struct inode *inode);
+extern int jbd2_journal_blocks_per_folio(struct inode *inode);
extern size_t journal_tag_bytes(journal_t *journal);
-static inline bool jbd2_journal_has_csum_v2or3_feature(journal_t *j)
-{
- return jbd2_has_feature_csum2(j) || jbd2_has_feature_csum3(j);
-}
-
static inline int jbd2_journal_has_csum_v2or3(journal_t *journal)
{
- WARN_ON_ONCE(jbd2_journal_has_csum_v2or3_feature(journal) &&
- journal->j_chksum_driver == NULL);
-
- return journal->j_chksum_driver != NULL;
+ return jbd2_has_feature_csum2(journal) ||
+ jbd2_has_feature_csum3(journal);
}
static inline int jbd2_journal_get_num_fc_blks(journal_superblock_t *jsb)
@@ -1735,30 +1772,9 @@ static inline unsigned long jbd2_log_space_left(journal_t *journal)
#define BJ_Reserved 4 /* Buffer is reserved for access by journal */
#define BJ_Types 5
-extern int jbd_blocks_per_page(struct inode *inode);
-
-/* JBD uses a CRC32 checksum */
-#define JBD_MAX_CHECKSUM_SIZE 4
-
-static inline u32 jbd2_chksum(journal_t *journal, u32 crc,
- const void *address, unsigned int length)
+static inline u32 jbd2_chksum(u32 crc, const void *address, unsigned int length)
{
- struct {
- struct shash_desc shash;
- char ctx[JBD_MAX_CHECKSUM_SIZE];
- } desc;
- int err;
-
- BUG_ON(crypto_shash_descsize(journal->j_chksum_driver) >
- JBD_MAX_CHECKSUM_SIZE);
-
- desc.shash.tfm = journal->j_chksum_driver;
- *(u32 *)desc.ctx = crc;
-
- err = crypto_shash_update(&desc.shash, address, length);
- BUG_ON(err);
-
- return *(u32 *)desc.ctx;
+ return crc32c(crc, address, length);
}
/* Return most recent uncommitted transaction */
diff --git a/include/linux/jhash.h b/include/linux/jhash.h
index ab7f8c152b89..7c1c1821c694 100644
--- a/include/linux/jhash.h
+++ b/include/linux/jhash.h
@@ -24,14 +24,14 @@
* Jozsef
*/
#include <linux/bitops.h>
-#include <linux/unaligned/packed_struct.h>
+#include <linux/unaligned.h>
/* Best hash sizes are of power of two */
#define jhash_size(n) ((u32)1<<(n))
/* Mask the hash value, i.e (value & jhash_mask(n)) instead of (value % n) */
#define jhash_mask(n) (jhash_size(n)-1)
-/* __jhash_mix -- mix 3 32-bit values reversibly. */
+/* __jhash_mix - mix 3 32-bit values reversibly. */
#define __jhash_mix(a, b, c) \
{ \
a -= c; a ^= rol32(c, 4); c += b; \
@@ -60,7 +60,7 @@
/* jhash - hash an arbitrary key
* @k: sequence of bytes as key
* @length: the length of the key
- * @initval: the previous hash, or an arbitray value
+ * @initval: the previous hash, or an arbitrary value
*
* The generic version, hashes an arbitrary sequence of bytes.
* No alignment or length assumptions are made about the input key.
@@ -77,9 +77,9 @@ static inline u32 jhash(const void *key, u32 length, u32 initval)
/* All but the last block: affect some 32 bits of (a,b,c) */
while (length > 12) {
- a += __get_unaligned_cpu32(k);
- b += __get_unaligned_cpu32(k + 4);
- c += __get_unaligned_cpu32(k + 8);
+ a += get_unaligned((u32 *)k);
+ b += get_unaligned((u32 *)(k + 4));
+ c += get_unaligned((u32 *)(k + 8));
__jhash_mix(a, b, c);
length -= 12;
k += 12;
@@ -110,7 +110,7 @@ static inline u32 jhash(const void *key, u32 length, u32 initval)
/* jhash2 - hash an array of u32's
* @k: the key which must be an array of u32's
* @length: the number of u32's in the key
- * @initval: the previous hash, or an arbitray value
+ * @initval: the previous hash, or an arbitrary value
*
* Returns the hash value of the key.
*/
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 5e13f801c902..fdef2c155c27 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -59,9 +59,9 @@
/* LATCH is used in the interval timer and ftape setup. */
#define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */
-extern int register_refined_jiffies(long clock_tick_rate);
+extern void register_refined_jiffies(long clock_tick_rate);
-/* TICK_USEC is the time between ticks in usec assuming SHIFTED_HZ */
+/* TICK_USEC is the time between ticks in usec */
#define TICK_USEC ((USEC_PER_SEC + HZ/2) / HZ)
/* USER_TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
@@ -72,9 +72,15 @@ extern int register_refined_jiffies(long clock_tick_rate);
#endif
/*
- * The 64-bit value is not atomic - you MUST NOT read it
+ * The 64-bit value is not atomic on 32-bit systems - you MUST NOT read it
* without sampling the sequence number in jiffies_lock.
* get_jiffies_64() will do this for you as appropriate.
+ *
+ * jiffies and jiffies_64 are at the same address for little-endian systems
+ * and for 64-bit big-endian systems.
+ * On 32-bit big-endian systems, jiffies is the lower 32 bits of jiffies_64
+ * (i.e., at address @jiffies_64 + 4).
+ * See arch/ARCH/kernel/vmlinux.lds.S
*/
extern u64 __cacheline_aligned_in_smp jiffies_64;
extern unsigned long volatile __cacheline_aligned_in_smp __jiffy_arch_data jiffies;
@@ -82,46 +88,94 @@ extern unsigned long volatile __cacheline_aligned_in_smp __jiffy_arch_data jiffi
#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void);
#else
+/**
+ * get_jiffies_64 - read the 64-bit non-atomic jiffies_64 value
+ *
+ * When BITS_PER_LONG < 64, this uses sequence number sampling using
+ * jiffies_lock to protect the 64-bit read.
+ *
+ * Return: current 64-bit jiffies value
+ */
static inline u64 get_jiffies_64(void)
{
return (u64)jiffies;
}
#endif
-/*
- * These inlines deal with timer wrapping correctly. You are
- * strongly encouraged to use them
- * 1. Because people otherwise forget
- * 2. Because if the timer wrap changes in future you won't have to
- * alter your driver code.
+/**
+ * DOC: General information about time_* inlines
+ *
+ * These inlines deal with timer wrapping correctly. You are strongly encouraged
+ * to use them:
*
- * time_after(a,b) returns true if the time a is after time b.
+ * #. Because people otherwise forget
+ * #. Because if the timer wrap changes in future you won't have to alter your
+ * driver code.
+ */
+
+/**
+ * time_after - returns true if the time a is after time b.
+ * @a: first comparable as unsigned long
+ * @b: second comparable as unsigned long
*
* Do this with "<0" and ">=0" to only test the sign of the result. A
* good compiler would generate better code (and a really good compiler
* wouldn't care). Gcc is currently neither.
+ *
+ * Return: %true is time a is after time b, otherwise %false.
*/
#define time_after(a,b) \
(typecheck(unsigned long, a) && \
typecheck(unsigned long, b) && \
((long)((b) - (a)) < 0))
+/**
+ * time_before - returns true if the time a is before time b.
+ * @a: first comparable as unsigned long
+ * @b: second comparable as unsigned long
+ *
+ * Return: %true is time a is before time b, otherwise %false.
+ */
#define time_before(a,b) time_after(b,a)
+/**
+ * time_after_eq - returns true if the time a is after or the same as time b.
+ * @a: first comparable as unsigned long
+ * @b: second comparable as unsigned long
+ *
+ * Return: %true is time a is after or the same as time b, otherwise %false.
+ */
#define time_after_eq(a,b) \
(typecheck(unsigned long, a) && \
typecheck(unsigned long, b) && \
((long)((a) - (b)) >= 0))
+/**
+ * time_before_eq - returns true if the time a is before or the same as time b.
+ * @a: first comparable as unsigned long
+ * @b: second comparable as unsigned long
+ *
+ * Return: %true is time a is before or the same as time b, otherwise %false.
+ */
#define time_before_eq(a,b) time_after_eq(b,a)
-/*
- * Calculate whether a is in the range of [b, c].
+/**
+ * time_in_range - Calculate whether a is in the range of [b, c].
+ * @a: time to test
+ * @b: beginning of the range
+ * @c: end of the range
+ *
+ * Return: %true is time a is in the range [b, c], otherwise %false.
*/
#define time_in_range(a,b,c) \
(time_after_eq(a,b) && \
time_before_eq(a,c))
-/*
- * Calculate whether a is in the range of [b, c).
+/**
+ * time_in_range_open - Calculate whether a is in the range of [b, c).
+ * @a: time to test
+ * @b: beginning of the range
+ * @c: end of the range
+ *
+ * Return: %true is time a is in the range [b, c), otherwise %false.
*/
#define time_in_range_open(a,b,c) \
(time_after_eq(a,b) && \
@@ -129,45 +183,138 @@ static inline u64 get_jiffies_64(void)
/* Same as above, but does so with platform independent 64bit types.
* These must be used when utilizing jiffies_64 (i.e. return value of
- * get_jiffies_64() */
+ * get_jiffies_64()). */
+
+/**
+ * time_after64 - returns true if the time a is after time b.
+ * @a: first comparable as __u64
+ * @b: second comparable as __u64
+ *
+ * This must be used when utilizing jiffies_64 (i.e. return value of
+ * get_jiffies_64()).
+ *
+ * Return: %true is time a is after time b, otherwise %false.
+ */
#define time_after64(a,b) \
(typecheck(__u64, a) && \
typecheck(__u64, b) && \
((__s64)((b) - (a)) < 0))
+/**
+ * time_before64 - returns true if the time a is before time b.
+ * @a: first comparable as __u64
+ * @b: second comparable as __u64
+ *
+ * This must be used when utilizing jiffies_64 (i.e. return value of
+ * get_jiffies_64()).
+ *
+ * Return: %true is time a is before time b, otherwise %false.
+ */
#define time_before64(a,b) time_after64(b,a)
+/**
+ * time_after_eq64 - returns true if the time a is after or the same as time b.
+ * @a: first comparable as __u64
+ * @b: second comparable as __u64
+ *
+ * This must be used when utilizing jiffies_64 (i.e. return value of
+ * get_jiffies_64()).
+ *
+ * Return: %true is time a is after or the same as time b, otherwise %false.
+ */
#define time_after_eq64(a,b) \
(typecheck(__u64, a) && \
typecheck(__u64, b) && \
((__s64)((a) - (b)) >= 0))
+/**
+ * time_before_eq64 - returns true if the time a is before or the same as time b.
+ * @a: first comparable as __u64
+ * @b: second comparable as __u64
+ *
+ * This must be used when utilizing jiffies_64 (i.e. return value of
+ * get_jiffies_64()).
+ *
+ * Return: %true is time a is before or the same as time b, otherwise %false.
+ */
#define time_before_eq64(a,b) time_after_eq64(b,a)
+/**
+ * time_in_range64 - Calculate whether a is in the range of [b, c].
+ * @a: time to test
+ * @b: beginning of the range
+ * @c: end of the range
+ *
+ * Return: %true is time a is in the range [b, c], otherwise %false.
+ */
#define time_in_range64(a, b, c) \
(time_after_eq64(a, b) && \
time_before_eq64(a, c))
/*
- * These four macros compare jiffies and 'a' for convenience.
+ * These eight macros compare jiffies[_64] and 'a' for convenience.
*/
-/* time_is_before_jiffies(a) return true if a is before jiffies */
+/**
+ * time_is_before_jiffies - return true if a is before jiffies
+ * @a: time (unsigned long) to compare to jiffies
+ *
+ * Return: %true is time a is before jiffies, otherwise %false.
+ */
#define time_is_before_jiffies(a) time_after(jiffies, a)
+/**
+ * time_is_before_jiffies64 - return true if a is before jiffies_64
+ * @a: time (__u64) to compare to jiffies_64
+ *
+ * Return: %true is time a is before jiffies_64, otherwise %false.
+ */
#define time_is_before_jiffies64(a) time_after64(get_jiffies_64(), a)
-/* time_is_after_jiffies(a) return true if a is after jiffies */
+/**
+ * time_is_after_jiffies - return true if a is after jiffies
+ * @a: time (unsigned long) to compare to jiffies
+ *
+ * Return: %true is time a is after jiffies, otherwise %false.
+ */
#define time_is_after_jiffies(a) time_before(jiffies, a)
+/**
+ * time_is_after_jiffies64 - return true if a is after jiffies_64
+ * @a: time (__u64) to compare to jiffies_64
+ *
+ * Return: %true is time a is after jiffies_64, otherwise %false.
+ */
#define time_is_after_jiffies64(a) time_before64(get_jiffies_64(), a)
-/* time_is_before_eq_jiffies(a) return true if a is before or equal to jiffies*/
+/**
+ * time_is_before_eq_jiffies - return true if a is before or equal to jiffies
+ * @a: time (unsigned long) to compare to jiffies
+ *
+ * Return: %true is time a is before or the same as jiffies, otherwise %false.
+ */
#define time_is_before_eq_jiffies(a) time_after_eq(jiffies, a)
+/**
+ * time_is_before_eq_jiffies64 - return true if a is before or equal to jiffies_64
+ * @a: time (__u64) to compare to jiffies_64
+ *
+ * Return: %true is time a is before or the same jiffies_64, otherwise %false.
+ */
#define time_is_before_eq_jiffies64(a) time_after_eq64(get_jiffies_64(), a)
-/* time_is_after_eq_jiffies(a) return true if a is after or equal to jiffies*/
+/**
+ * time_is_after_eq_jiffies - return true if a is after or equal to jiffies
+ * @a: time (unsigned long) to compare to jiffies
+ *
+ * Return: %true is time a is after or the same as jiffies, otherwise %false.
+ */
#define time_is_after_eq_jiffies(a) time_before_eq(jiffies, a)
+/**
+ * time_is_after_eq_jiffies64 - return true if a is after or equal to jiffies_64
+ * @a: time (__u64) to compare to jiffies_64
+ *
+ * Return: %true is time a is after or the same as jiffies_64, otherwise %false.
+ */
#define time_is_after_eq_jiffies64(a) time_before_eq64(get_jiffies_64(), a)
/*
- * Have the 32 bit jiffies value wrap 5 minutes after boot
+ * Have the 32-bit jiffies value wrap 5 minutes after boot
* so jiffies wrap bugs show up earlier.
*/
#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
@@ -271,14 +418,14 @@ extern unsigned long preset_lpj;
#define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\
TICK_NSEC -1) / (u64)TICK_NSEC))
/*
- * The maximum jiffie value is (MAX_INT >> 1). Here we translate that
+ * The maximum jiffy value is (MAX_INT >> 1). Here we translate that
* into seconds. The 64-bit case will overflow if we are not careful,
* so use the messy SH_DIV macro to do it. Still all constants.
*/
#if BITS_PER_LONG < 64
# define MAX_SEC_IN_JIFFIES \
(long)((u64)((u64)MAX_JIFFY_OFFSET * TICK_NSEC) / NSEC_PER_SEC)
-#else /* take care of overflow on 64 bits machines */
+#else /* take care of overflow on 64-bit machines */
# define MAX_SEC_IN_JIFFIES \
(SH_DIV((MAX_JIFFY_OFFSET >> SEC_JIFFIE_SC) * TICK_NSEC, NSEC_PER_SEC, 1) - 1)
@@ -290,6 +437,12 @@ extern unsigned long preset_lpj;
extern unsigned int jiffies_to_msecs(const unsigned long j);
extern unsigned int jiffies_to_usecs(const unsigned long j);
+/**
+ * jiffies_to_nsecs - Convert jiffies to nanoseconds
+ * @j: jiffies value
+ *
+ * Return: nanoseconds value
+ */
static inline u64 jiffies_to_nsecs(const unsigned long j)
{
return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
@@ -349,16 +502,18 @@ static inline unsigned long _msecs_to_jiffies(const unsigned int m)
* - all other values are converted to jiffies by either multiplying
* the input value by a factor or dividing it with a factor and
* handling any 32-bit overflows.
- * for the details see __msecs_to_jiffies()
+ * for the details see _msecs_to_jiffies()
*
* msecs_to_jiffies() checks for the passed in value being a constant
* via __builtin_constant_p() allowing gcc to eliminate most of the
- * code, __msecs_to_jiffies() is called if the value passed does not
+ * code. __msecs_to_jiffies() is called if the value passed does not
* allow constant folding and the actual conversion must be done at
* runtime.
- * the HZ range specific helpers _msecs_to_jiffies() are called both
+ * The HZ range specific helpers _msecs_to_jiffies() are called both
* directly here and from __msecs_to_jiffies() in the case where
* constant folding is not possible.
+ *
+ * Return: jiffies value
*/
static __always_inline unsigned long msecs_to_jiffies(const unsigned int m)
{
@@ -371,6 +526,19 @@ static __always_inline unsigned long msecs_to_jiffies(const unsigned int m)
}
}
+/**
+ * secs_to_jiffies: - convert seconds to jiffies
+ * @_secs: time in seconds
+ *
+ * Conversion is done by simple multiplication with HZ
+ *
+ * secs_to_jiffies() is defined as a macro rather than a static inline
+ * function so it can be used in static initializers.
+ *
+ * Return: jiffies value
+ */
+#define secs_to_jiffies(_secs) (unsigned long)((_secs) * HZ)
+
extern unsigned long __usecs_to_jiffies(const unsigned int u);
#if !(USEC_PER_SEC % HZ)
static inline unsigned long _usecs_to_jiffies(const unsigned int u)
@@ -400,12 +568,14 @@ static inline unsigned long _usecs_to_jiffies(const unsigned int u)
*
* usecs_to_jiffies() checks for the passed in value being a constant
* via __builtin_constant_p() allowing gcc to eliminate most of the
- * code, __usecs_to_jiffies() is called if the value passed does not
+ * code. __usecs_to_jiffies() is called if the value passed does not
* allow constant folding and the actual conversion must be done at
* runtime.
- * the HZ range specific helpers _usecs_to_jiffies() are called both
+ * The HZ range specific helpers _usecs_to_jiffies() are called both
* directly here and from __msecs_to_jiffies() in the case where
* constant folding is not possible.
+ *
+ * Return: jiffies value
*/
static __always_inline unsigned long usecs_to_jiffies(const unsigned int u)
{
@@ -422,6 +592,7 @@ extern unsigned long timespec64_to_jiffies(const struct timespec64 *value);
extern void jiffies_to_timespec64(const unsigned long jiffies,
struct timespec64 *value);
extern clock_t jiffies_to_clock_t(unsigned long x);
+
static inline clock_t jiffies_delta_to_clock_t(long delta)
{
return jiffies_to_clock_t(max(0L, delta));
@@ -440,4 +611,16 @@ extern unsigned long nsecs_to_jiffies(u64 n);
#define TIMESTAMP_SIZE 30
+struct ctl_table;
+int proc_dointvec_jiffies(const struct ctl_table *table, int dir, void *buffer,
+ size_t *lenp, loff_t *ppos);
+int proc_dointvec_ms_jiffies_minmax(const struct ctl_table *table, int dir,
+ void *buffer, size_t *lenp, loff_t *ppos);
+int proc_dointvec_userhz_jiffies(const struct ctl_table *table, int dir,
+ void *buffer, size_t *lenp, loff_t *ppos);
+int proc_dointvec_ms_jiffies(const struct ctl_table *table, int dir, void *buffer,
+ size_t *lenp, loff_t *ppos);
+int proc_doulongvec_ms_jiffies_minmax(const struct ctl_table *table, int dir,
+ void *buffer, size_t *lenp, loff_t *ppos);
+
#endif
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 05f5554d860f..fdb79dd1ebd8 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -75,6 +75,7 @@
#include <linux/types.h>
#include <linux/compiler.h>
+#include <linux/cleanup.h>
extern bool static_key_initialized;
@@ -82,10 +83,9 @@ extern bool static_key_initialized;
"%s(): static key '%pS' used before call to jump_label_init()", \
__func__, (key))
-#ifdef CONFIG_JUMP_LABEL
-
struct static_key {
atomic_t enabled;
+#ifdef CONFIG_JUMP_LABEL
/*
* Note:
* To make anonymous unions work with old compilers, the static
@@ -104,13 +104,9 @@ struct static_key {
struct jump_entry *entries;
struct static_key_mod *next;
};
+#endif /* CONFIG_JUMP_LABEL */
};
-#else
-struct static_key {
- atomic_t enabled;
-};
-#endif /* CONFIG_JUMP_LABEL */
#endif /* __ASSEMBLY__ */
#ifdef CONFIG_JUMP_LABEL
@@ -171,9 +167,21 @@ static inline bool jump_entry_is_init(const struct jump_entry *entry)
return (unsigned long)entry->key & 2UL;
}
-static inline void jump_entry_set_init(struct jump_entry *entry)
+static inline void jump_entry_set_init(struct jump_entry *entry, bool set)
{
- entry->key |= 2;
+ if (set)
+ entry->key |= 2;
+ else
+ entry->key &= ~2;
+}
+
+static inline int jump_entry_size(struct jump_entry *entry)
+{
+#ifdef JUMP_LABEL_NOP_SIZE
+ return JUMP_LABEL_NOP_SIZE;
+#else
+ return arch_jump_entry_size(entry);
+#endif
}
#endif
@@ -209,26 +217,26 @@ extern struct jump_entry __start___jump_table[];
extern struct jump_entry __stop___jump_table[];
extern void jump_label_init(void);
+extern void jump_label_init_ro(void);
extern void jump_label_lock(void);
extern void jump_label_unlock(void);
extern void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type);
-extern void arch_jump_label_transform_static(struct jump_entry *entry,
- enum jump_label_type type);
extern bool arch_jump_label_transform_queue(struct jump_entry *entry,
enum jump_label_type type);
extern void arch_jump_label_transform_apply(void);
extern int jump_label_text_reserved(void *start, void *end);
-extern void static_key_slow_inc(struct static_key *key);
+extern bool static_key_slow_inc(struct static_key *key);
+extern bool static_key_fast_inc_not_disabled(struct static_key *key);
extern void static_key_slow_dec(struct static_key *key);
-extern void static_key_slow_inc_cpuslocked(struct static_key *key);
+extern bool static_key_slow_inc_cpuslocked(struct static_key *key);
extern void static_key_slow_dec_cpuslocked(struct static_key *key);
-extern void jump_label_apply_nops(struct module *mod);
extern int static_key_count(struct static_key *key);
extern void static_key_enable(struct static_key *key);
extern void static_key_disable(struct static_key *key);
extern void static_key_enable_cpuslocked(struct static_key *key);
extern void static_key_disable_cpuslocked(struct static_key *key);
+extern enum jump_label_type jump_label_init_type(struct jump_entry *entry);
/*
* We should be using ATOMIC_INIT() for initializing .enabled, but
@@ -239,19 +247,19 @@ extern void static_key_disable_cpuslocked(struct static_key *key);
*/
#define STATIC_KEY_INIT_TRUE \
{ .enabled = { 1 }, \
- { .entries = (void *)JUMP_TYPE_TRUE } }
+ { .type = JUMP_TYPE_TRUE } }
#define STATIC_KEY_INIT_FALSE \
{ .enabled = { 0 }, \
- { .entries = (void *)JUMP_TYPE_FALSE } }
+ { .type = JUMP_TYPE_FALSE } }
#else /* !CONFIG_JUMP_LABEL */
#include <linux/atomic.h>
#include <linux/bug.h>
-static inline int static_key_count(struct static_key *key)
+static __always_inline int static_key_count(struct static_key *key)
{
- return atomic_read(&key->enabled);
+ return raw_atomic_read(&key->enabled);
}
static __always_inline void jump_label_init(void)
@@ -259,6 +267,8 @@ static __always_inline void jump_label_init(void)
static_key_initialized = true;
}
+static __always_inline void jump_label_init_ro(void) { }
+
static __always_inline bool static_key_false(struct static_key *key)
{
if (unlikely_notrace(static_key_count(key) > 0))
@@ -273,11 +283,23 @@ static __always_inline bool static_key_true(struct static_key *key)
return false;
}
-static inline void static_key_slow_inc(struct static_key *key)
+static inline bool static_key_fast_inc_not_disabled(struct static_key *key)
{
+ int v;
+
STATIC_KEY_CHECK_USE(key);
- atomic_inc(&key->enabled);
+ /*
+ * Prevent key->enabled getting negative to follow the same semantics
+ * as for CONFIG_JUMP_LABEL=y, see kernel/jump_label.c comment.
+ */
+ v = atomic_read(&key->enabled);
+ do {
+ if (v < 0 || (v + 1) < 0)
+ return false;
+ } while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1)));
+ return true;
}
+#define static_key_slow_inc(key) static_key_fast_inc_not_disabled(key)
static inline void static_key_slow_dec(struct static_key *key)
{
@@ -296,11 +318,6 @@ static inline int jump_label_text_reserved(void *start, void *end)
static inline void jump_label_lock(void) {}
static inline void jump_label_unlock(void) {}
-static inline int jump_label_apply_nops(struct module *mod)
-{
- return 0;
-}
-
static inline void static_key_enable(struct static_key *key)
{
STATIC_KEY_CHECK_USE(key);
@@ -331,6 +348,8 @@ static inline void static_key_disable(struct static_key *key)
#endif /* CONFIG_JUMP_LABEL */
+DEFINE_LOCK_GUARD_0(jump_label_lock, jump_label_lock(), jump_label_unlock())
+
#define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
#define jump_label_enabled static_key_enabled
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 465060acc981..d5dd54c53ace 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -7,6 +7,7 @@
#define _LINUX_KALLSYMS_H
#include <linux/errno.h>
+#include <linux/buildid.h>
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/mm.h>
@@ -14,32 +15,25 @@
#include <asm/sections.h>
-#define KSYM_NAME_LEN 128
-#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \
- 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1)
+#define KSYM_NAME_LEN 512
+#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s %s]") + \
+ (KSYM_NAME_LEN - 1) + \
+ 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + \
+ (BUILD_ID_SIZE_MAX * 2) + 1)
struct cred;
struct module;
-static inline int is_kernel_inittext(unsigned long addr)
-{
- if (addr >= (unsigned long)_sinittext
- && addr <= (unsigned long)_einittext)
- return 1;
- return 0;
-}
-
static inline int is_kernel_text(unsigned long addr)
{
- if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
- arch_is_kernel_text(addr))
+ if (__is_kernel_text(addr))
return 1;
return in_gate_area_no_mm(addr);
}
static inline int is_kernel(unsigned long addr)
{
- if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
+ if (__is_kernel(addr))
return 1;
return in_gate_area_no_mm(addr);
}
@@ -54,16 +48,15 @@ static inline int is_ksym_addr(unsigned long addr)
static inline void *dereference_symbol_descriptor(void *ptr)
{
-#ifdef HAVE_DEREFERENCE_FUNCTION_DESCRIPTOR
+#ifdef CONFIG_HAVE_FUNCTION_DESCRIPTORS
struct module *mod;
ptr = dereference_kernel_function_descriptor(ptr);
if (is_ksym_addr((unsigned long)ptr))
return ptr;
- preempt_disable();
+ guard(rcu)();
mod = __module_address((unsigned long)ptr);
- preempt_enable();
if (mod)
ptr = dereference_module_function_descriptor(mod, ptr);
@@ -71,11 +64,16 @@ static inline void *dereference_symbol_descriptor(void *ptr)
return ptr;
}
-int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
- unsigned long),
- void *data);
+/* How and when do we show kallsyms values? */
+extern bool kallsyms_show_value(const struct cred *cred);
#ifdef CONFIG_KALLSYMS
+unsigned long kallsyms_sym_address(int idx);
+int kallsyms_on_each_symbol(int (*fn)(void *, const char *, unsigned long),
+ void *data);
+int kallsyms_on_each_match_symbol(int (*fn)(void *, unsigned long),
+ const char *name, void *data);
+
/* Lookup the address for a symbol. Returns 0 if not found. */
unsigned long kallsyms_lookup_name(const char *name);
@@ -91,14 +89,12 @@ const char *kallsyms_lookup(unsigned long addr,
/* Look up a kernel symbol and return it in a text buffer. */
extern int sprint_symbol(char *buffer, unsigned long address);
+extern int sprint_symbol_build_id(char *buffer, unsigned long address);
extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
extern int sprint_backtrace(char *buffer, unsigned long address);
+extern int sprint_backtrace_build_id(char *buffer, unsigned long address);
int lookup_symbol_name(unsigned long addr, char *symname);
-int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
-
-/* How and when do we show kallsyms values? */
-extern bool kallsyms_show_value(const struct cred *cred);
#else /* !CONFIG_KALLSYMS */
@@ -128,6 +124,12 @@ static inline int sprint_symbol(char *buffer, unsigned long addr)
return 0;
}
+static inline int sprint_symbol_build_id(char *buffer, unsigned long address)
+{
+ *buffer = '\0';
+ return 0;
+}
+
static inline int sprint_symbol_no_offset(char *buffer, unsigned long addr)
{
*buffer = '\0';
@@ -140,21 +142,28 @@ static inline int sprint_backtrace(char *buffer, unsigned long addr)
return 0;
}
-static inline int lookup_symbol_name(unsigned long addr, char *symname)
+static inline int sprint_backtrace_build_id(char *buffer, unsigned long addr)
{
- return -ERANGE;
+ *buffer = '\0';
+ return 0;
}
-static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name)
+static inline int lookup_symbol_name(unsigned long addr, char *symname)
{
return -ERANGE;
}
-static inline bool kallsyms_show_value(const struct cred *cred)
+static inline int kallsyms_on_each_symbol(int (*fn)(void *, const char *, unsigned long),
+ void *data)
{
- return false;
+ return -EOPNOTSUPP;
}
+static inline int kallsyms_on_each_match_symbol(int (*fn)(void *, unsigned long),
+ const char *name, void *data)
+{
+ return -EOPNOTSUPP;
+}
#endif /*CONFIG_KALLSYMS*/
static inline void print_ip_sym(const char *loglvl, unsigned long ip)
diff --git a/include/linux/kasan-enabled.h b/include/linux/kasan-enabled.h
new file mode 100644
index 000000000000..9eca967d8526
--- /dev/null
+++ b/include/linux/kasan-enabled.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_KASAN_ENABLED_H
+#define _LINUX_KASAN_ENABLED_H
+
+#include <linux/static_key.h>
+
+#if defined(CONFIG_ARCH_DEFER_KASAN) || defined(CONFIG_KASAN_HW_TAGS)
+/*
+ * Global runtime flag for KASAN modes that need runtime control.
+ * Used by ARCH_DEFER_KASAN architectures and HW_TAGS mode.
+ */
+DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
+
+/*
+ * Runtime control for shadow memory initialization or HW_TAGS mode.
+ * Uses static key for architectures that need deferred KASAN or HW_TAGS.
+ */
+static __always_inline bool kasan_enabled(void)
+{
+ return static_branch_likely(&kasan_flag_enabled);
+}
+
+static inline void kasan_enable(void)
+{
+ static_branch_enable(&kasan_flag_enabled);
+}
+#else
+/* For architectures that can enable KASAN early, use compile-time check. */
+static __always_inline bool kasan_enabled(void)
+{
+ return IS_ENABLED(CONFIG_KASAN);
+}
+
+static inline void kasan_enable(void) {}
+#endif /* CONFIG_ARCH_DEFER_KASAN || CONFIG_KASAN_HW_TAGS */
+
+#ifdef CONFIG_KASAN_HW_TAGS
+static inline bool kasan_hw_tags_enabled(void)
+{
+ return kasan_enabled();
+}
+#else
+static inline bool kasan_hw_tags_enabled(void)
+{
+ return false;
+}
+#endif /* CONFIG_KASAN_HW_TAGS */
+
+#endif /* LINUX_KASAN_ENABLED_H */
diff --git a/include/linux/kasan-tags.h b/include/linux/kasan-tags.h
new file mode 100644
index 000000000000..4f85f562512c
--- /dev/null
+++ b/include/linux/kasan-tags.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_KASAN_TAGS_H
+#define _LINUX_KASAN_TAGS_H
+
+#define KASAN_TAG_KERNEL 0xFF /* native kernel pointers tag */
+#define KASAN_TAG_INVALID 0xFE /* inaccessible memory tag */
+#define KASAN_TAG_MAX 0xFD /* maximum value for random tags */
+
+#ifdef CONFIG_KASAN_HW_TAGS
+#define KASAN_TAG_MIN 0xF0 /* minimum value for random tags */
+#else
+#define KASAN_TAG_MIN 0x00 /* minimum value for random tags */
+#endif
+
+#endif /* LINUX_KASAN_TAGS_H */
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index b1678a61e6a7..f335c1d7b61d 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -2,11 +2,16 @@
#ifndef _LINUX_KASAN_H
#define _LINUX_KASAN_H
+#include <linux/bug.h>
+#include <linux/kasan-enabled.h>
+#include <linux/kasan-tags.h>
+#include <linux/kernel.h>
#include <linux/static_key.h>
#include <linux/types.h>
struct kmem_cache;
struct page;
+struct slab;
struct vm_struct;
struct task_struct;
@@ -15,14 +20,18 @@ struct task_struct;
#include <linux/linkage.h>
#include <asm/kasan.h>
-/* kasan_data struct is used in KUnit tests for KASAN expected failures */
-struct kunit_kasan_expectation {
- bool report_expected;
- bool report_found;
-};
-
#endif
+typedef unsigned int __bitwise kasan_vmalloc_flags_t;
+
+#define KASAN_VMALLOC_NONE ((__force kasan_vmalloc_flags_t)0x00u)
+#define KASAN_VMALLOC_INIT ((__force kasan_vmalloc_flags_t)0x01u)
+#define KASAN_VMALLOC_VM_ALLOC ((__force kasan_vmalloc_flags_t)0x02u)
+#define KASAN_VMALLOC_PROT_NORMAL ((__force kasan_vmalloc_flags_t)0x04u)
+
+#define KASAN_VMALLOC_PAGE_RANGE 0x1 /* Apply exsiting page range */
+#define KASAN_VMALLOC_TLB_FLUSH 0x2 /* TLB flush */
+
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
#include <linux/pgtable.h>
@@ -41,19 +50,21 @@ struct kunit_kasan_expectation {
#endif
extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
-extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS];
-extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
-extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD];
+extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS];
+extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
+extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
int kasan_populate_early_shadow(const void *shadow_start,
const void *shadow_end);
+#ifndef kasan_mem_to_shadow
static inline void *kasan_mem_to_shadow(const void *addr)
{
return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
+ KASAN_SHADOW_OFFSET;
}
+#endif
int kasan_add_zero_shadow(void *start, unsigned long size);
void kasan_remove_zero_shadow(void *start, unsigned long size);
@@ -79,50 +90,18 @@ static inline void kasan_disable_current(void) {}
#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
-#ifdef CONFIG_KASAN
-
-struct kasan_cache {
- int alloc_meta_offset;
- int free_meta_offset;
- bool is_kmalloc;
-};
-
#ifdef CONFIG_KASAN_HW_TAGS
-DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
-
-static __always_inline bool kasan_enabled(void)
-{
- return static_branch_likely(&kasan_flag_enabled);
-}
-
-static inline bool kasan_has_integrated_init(void)
-{
- return kasan_enabled();
-}
-
#else /* CONFIG_KASAN_HW_TAGS */
-static inline bool kasan_enabled(void)
-{
- return true;
-}
-
-static inline bool kasan_has_integrated_init(void)
-{
- return false;
-}
-
#endif /* CONFIG_KASAN_HW_TAGS */
-slab_flags_t __kasan_never_merge(void);
-static __always_inline slab_flags_t kasan_never_merge(void)
+static inline bool kasan_has_integrated_init(void)
{
- if (kasan_enabled())
- return __kasan_never_merge();
- return 0;
+ return kasan_hw_tags_enabled();
}
+#ifdef CONFIG_KASAN
void __kasan_unpoison_range(const void *addr, size_t size);
static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
{
@@ -130,67 +109,63 @@ static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
__kasan_unpoison_range(addr, size);
}
-void __kasan_alloc_pages(struct page *page, unsigned int order, bool init);
-static __always_inline void kasan_alloc_pages(struct page *page,
+void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
+static __always_inline void kasan_poison_pages(struct page *page,
unsigned int order, bool init)
{
if (kasan_enabled())
- __kasan_alloc_pages(page, order, init);
-}
-
-void __kasan_free_pages(struct page *page, unsigned int order, bool init);
-static __always_inline void kasan_free_pages(struct page *page,
- unsigned int order, bool init)
-{
- if (kasan_enabled())
- __kasan_free_pages(page, order, init);
-}
-
-void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
- slab_flags_t *flags);
-static __always_inline void kasan_cache_create(struct kmem_cache *cache,
- unsigned int *size, slab_flags_t *flags)
-{
- if (kasan_enabled())
- __kasan_cache_create(cache, size, flags);
-}
-
-void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
-static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
-{
- if (kasan_enabled())
- __kasan_cache_create_kmalloc(cache);
+ __kasan_poison_pages(page, order, init);
}
-size_t __kasan_metadata_size(struct kmem_cache *cache);
-static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
+bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
+static __always_inline bool kasan_unpoison_pages(struct page *page,
+ unsigned int order, bool init)
{
if (kasan_enabled())
- return __kasan_metadata_size(cache);
- return 0;
+ return __kasan_unpoison_pages(page, order, init);
+ return false;
}
-void __kasan_poison_slab(struct page *page);
-static __always_inline void kasan_poison_slab(struct page *page)
+void __kasan_poison_slab(struct slab *slab);
+static __always_inline void kasan_poison_slab(struct slab *slab)
{
if (kasan_enabled())
- __kasan_poison_slab(page);
+ __kasan_poison_slab(slab);
}
-void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
-static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
+void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object);
+/**
+ * kasan_unpoison_new_object - Temporarily unpoison a new slab object.
+ * @cache: Cache the object belong to.
+ * @object: Pointer to the object.
+ *
+ * This function is intended for the slab allocator's internal use. It
+ * temporarily unpoisons an object from a newly allocated slab without doing
+ * anything else. The object must later be repoisoned by
+ * kasan_poison_new_object().
+ */
+static __always_inline void kasan_unpoison_new_object(struct kmem_cache *cache,
void *object)
{
if (kasan_enabled())
- __kasan_unpoison_object_data(cache, object);
+ __kasan_unpoison_new_object(cache, object);
}
-void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
-static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
+void __kasan_poison_new_object(struct kmem_cache *cache, void *object);
+/**
+ * kasan_poison_new_object - Repoison a new slab object.
+ * @cache: Cache the object belong to.
+ * @object: Pointer to the object.
+ *
+ * This function is intended for the slab allocator's internal use. It
+ * repoisons an object that was previously unpoisoned by
+ * kasan_unpoison_new_object() without doing anything else.
+ */
+static __always_inline void kasan_poison_new_object(struct kmem_cache *cache,
void *object)
{
if (kasan_enabled())
- __kasan_poison_object_data(cache, object);
+ __kasan_poison_new_object(cache, object);
}
void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
@@ -203,28 +178,69 @@ static __always_inline void * __must_check kasan_init_slab_obj(
return (void *)object;
}
-bool __kasan_slab_free(struct kmem_cache *s, void *object,
- unsigned long ip, bool init);
-static __always_inline bool kasan_slab_free(struct kmem_cache *s,
- void *object, bool init)
+bool __kasan_slab_pre_free(struct kmem_cache *s, void *object,
+ unsigned long ip);
+/**
+ * kasan_slab_pre_free - Check whether freeing a slab object is safe.
+ * @object: Object to be freed.
+ *
+ * This function checks whether freeing the given object is safe. It may
+ * check for double-free and invalid-free bugs and report them.
+ *
+ * This function is intended only for use by the slab allocator.
+ *
+ * @Return true if freeing the object is unsafe; false otherwise.
+ */
+static __always_inline bool kasan_slab_pre_free(struct kmem_cache *s,
+ void *object)
{
if (kasan_enabled())
- return __kasan_slab_free(s, object, _RET_IP_, init);
+ return __kasan_slab_pre_free(s, object, _RET_IP_);
return false;
}
-void __kasan_kfree_large(void *ptr, unsigned long ip);
-static __always_inline void kasan_kfree_large(void *ptr)
+bool __kasan_slab_free(struct kmem_cache *s, void *object, bool init,
+ bool still_accessible, bool no_quarantine);
+/**
+ * kasan_slab_free - Poison, initialize, and quarantine a slab object.
+ * @object: Object to be freed.
+ * @init: Whether to initialize the object.
+ * @still_accessible: Whether the object contents are still accessible.
+ *
+ * This function informs that a slab object has been freed and is not
+ * supposed to be accessed anymore, except when @still_accessible is set
+ * (indicating that the object is in a SLAB_TYPESAFE_BY_RCU cache and an RCU
+ * grace period might not have passed yet).
+ *
+ * For KASAN modes that have integrated memory initialization
+ * (kasan_has_integrated_init() == true), this function also initializes
+ * the object's memory. For other modes, the @init argument is ignored.
+ *
+ * This function might also take ownership of the object to quarantine it.
+ * When this happens, KASAN will defer freeing the object to a later
+ * stage and handle it internally until then. The return value indicates
+ * whether KASAN took ownership of the object.
+ *
+ * This function is intended only for use by the slab allocator.
+ *
+ * @Return true if KASAN took ownership of the object; false otherwise.
+ */
+static __always_inline bool kasan_slab_free(struct kmem_cache *s,
+ void *object, bool init,
+ bool still_accessible,
+ bool no_quarantine)
{
if (kasan_enabled())
- __kasan_kfree_large(ptr, _RET_IP_);
+ return __kasan_slab_free(s, object, init, still_accessible,
+ no_quarantine);
+ return false;
}
-void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
-static __always_inline void kasan_slab_free_mempool(void *ptr)
+void __kasan_kfree_large(void *ptr, unsigned long ip);
+static __always_inline void kasan_kfree_large(void *ptr)
{
if (kasan_enabled())
- __kasan_slab_free_mempool(ptr, _RET_IP_);
+ __kasan_kfree_large(ptr, _RET_IP_);
}
void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
@@ -267,6 +283,113 @@ static __always_inline void * __must_check kasan_krealloc(const void *object,
return (void *)object;
}
+bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
+ unsigned long ip);
+/**
+ * kasan_mempool_poison_pages - Check and poison a mempool page allocation.
+ * @page: Pointer to the page allocation.
+ * @order: Order of the allocation.
+ *
+ * This function is intended for kernel subsystems that cache page allocations
+ * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
+ *
+ * This function is similar to kasan_mempool_poison_object() but operates on
+ * page allocations.
+ *
+ * Before the poisoned allocation can be reused, it must be unpoisoned via
+ * kasan_mempool_unpoison_pages().
+ *
+ * Return: true if the allocation can be safely reused; false otherwise.
+ */
+static __always_inline bool kasan_mempool_poison_pages(struct page *page,
+ unsigned int order)
+{
+ if (kasan_enabled())
+ return __kasan_mempool_poison_pages(page, order, _RET_IP_);
+ return true;
+}
+
+void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
+ unsigned long ip);
+/**
+ * kasan_mempool_unpoison_pages - Unpoison a mempool page allocation.
+ * @page: Pointer to the page allocation.
+ * @order: Order of the allocation.
+ *
+ * This function is intended for kernel subsystems that cache page allocations
+ * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
+ *
+ * This function unpoisons a page allocation that was previously poisoned by
+ * kasan_mempool_poison_pages() without zeroing the allocation's memory. For
+ * the tag-based modes, this function assigns a new tag to the allocation.
+ */
+static __always_inline void kasan_mempool_unpoison_pages(struct page *page,
+ unsigned int order)
+{
+ if (kasan_enabled())
+ __kasan_mempool_unpoison_pages(page, order, _RET_IP_);
+}
+
+bool __kasan_mempool_poison_object(void *ptr, unsigned long ip);
+/**
+ * kasan_mempool_poison_object - Check and poison a mempool slab allocation.
+ * @ptr: Pointer to the slab allocation.
+ *
+ * This function is intended for kernel subsystems that cache slab allocations
+ * to reuse them instead of freeing them back to the slab allocator (e.g.
+ * mempool).
+ *
+ * This function poisons a slab allocation and saves a free stack trace for it
+ * without initializing the allocation's memory and without putting it into the
+ * quarantine (for the Generic mode).
+ *
+ * This function also performs checks to detect double-free and invalid-free
+ * bugs and reports them. The caller can use the return value of this function
+ * to find out if the allocation is buggy.
+ *
+ * Before the poisoned allocation can be reused, it must be unpoisoned via
+ * kasan_mempool_unpoison_object().
+ *
+ * This function operates on all slab allocations including large kmalloc
+ * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
+ * size > KMALLOC_MAX_SIZE).
+ *
+ * Return: true if the allocation can be safely reused; false otherwise.
+ */
+static __always_inline bool kasan_mempool_poison_object(void *ptr)
+{
+ if (kasan_enabled())
+ return __kasan_mempool_poison_object(ptr, _RET_IP_);
+ return true;
+}
+
+void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip);
+/**
+ * kasan_mempool_unpoison_object - Unpoison a mempool slab allocation.
+ * @ptr: Pointer to the slab allocation.
+ * @size: Size to be unpoisoned.
+ *
+ * This function is intended for kernel subsystems that cache slab allocations
+ * to reuse them instead of freeing them back to the slab allocator (e.g.
+ * mempool).
+ *
+ * This function unpoisons a slab allocation that was previously poisoned via
+ * kasan_mempool_poison_object() and saves an alloc stack trace for it without
+ * initializing the allocation's memory. For the tag-based modes, this function
+ * does not assign a new tag to the allocation and instead restores the
+ * original tags based on the pointer value.
+ *
+ * This function operates on all slab allocations including large kmalloc
+ * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
+ * size > KMALLOC_MAX_SIZE).
+ */
+static __always_inline void kasan_mempool_unpoison_object(void *ptr,
+ size_t size)
+{
+ if (kasan_enabled())
+ __kasan_mempool_unpoison_object(ptr, size, _RET_IP_);
+}
+
/*
* Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
* the hardware tag-based mode that doesn't rely on compiler instrumentation.
@@ -279,48 +402,39 @@ static __always_inline bool kasan_check_byte(const void *addr)
return true;
}
-
-bool kasan_save_enable_multi_shot(void);
-void kasan_restore_multi_shot(bool enabled);
-
#else /* CONFIG_KASAN */
-static inline bool kasan_enabled(void)
-{
- return false;
-}
-static inline bool kasan_has_integrated_init(void)
+static inline void kasan_unpoison_range(const void *address, size_t size) {}
+static inline void kasan_poison_pages(struct page *page, unsigned int order,
+ bool init) {}
+static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
+ bool init)
{
return false;
}
-static inline slab_flags_t kasan_never_merge(void)
-{
- return 0;
-}
-static inline void kasan_unpoison_range(const void *address, size_t size) {}
-static inline void kasan_alloc_pages(struct page *page, unsigned int order, bool init) {}
-static inline void kasan_free_pages(struct page *page, unsigned int order, bool init) {}
-static inline void kasan_cache_create(struct kmem_cache *cache,
- unsigned int *size,
- slab_flags_t *flags) {}
-static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
-static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
-static inline void kasan_poison_slab(struct page *page) {}
-static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
+static inline void kasan_poison_slab(struct slab *slab) {}
+static inline void kasan_unpoison_new_object(struct kmem_cache *cache,
void *object) {}
-static inline void kasan_poison_object_data(struct kmem_cache *cache,
+static inline void kasan_poison_new_object(struct kmem_cache *cache,
void *object) {}
static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
const void *object)
{
return (void *)object;
}
-static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
+
+static inline bool kasan_slab_pre_free(struct kmem_cache *s, void *object)
+{
+ return false;
+}
+
+static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
+ bool init, bool still_accessible,
+ bool no_quarantine)
{
return false;
}
static inline void kasan_kfree_large(void *ptr) {}
-static inline void kasan_slab_free_mempool(void *ptr) {}
static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
gfp_t flags, bool init)
{
@@ -340,6 +454,17 @@ static inline void *kasan_krealloc(const void *object, size_t new_size,
{
return (void *)object;
}
+static inline bool kasan_mempool_poison_pages(struct page *page, unsigned int order)
+{
+ return true;
+}
+static inline void kasan_mempool_unpoison_pages(struct page *page, unsigned int order) {}
+static inline bool kasan_mempool_poison_object(void *ptr)
+{
+ return true;
+}
+static inline void kasan_mempool_unpoison_object(void *ptr, size_t size) {}
+
static inline bool kasan_check_byte(const void *address)
{
return true;
@@ -349,18 +474,40 @@ static inline bool kasan_check_byte(const void *address)
#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
void kasan_unpoison_task_stack(struct task_struct *task);
+asmlinkage void kasan_unpoison_task_stack_below(const void *watermark);
#else
static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
+static inline void kasan_unpoison_task_stack_below(const void *watermark) {}
#endif
#ifdef CONFIG_KASAN_GENERIC
+struct kasan_cache {
+ int alloc_meta_offset;
+ int free_meta_offset;
+};
+
+size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object);
+void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
+ slab_flags_t *flags);
+
void kasan_cache_shrink(struct kmem_cache *cache);
void kasan_cache_shutdown(struct kmem_cache *cache);
void kasan_record_aux_stack(void *ptr);
#else /* CONFIG_KASAN_GENERIC */
+/* Tag-based KASAN modes do not use per-object metadata. */
+static inline size_t kasan_metadata_size(struct kmem_cache *cache,
+ bool in_object)
+{
+ return 0;
+}
+/* And no cache-related metadata initialization is required. */
+static inline void kasan_cache_create(struct kmem_cache *cache,
+ unsigned int *size,
+ slab_flags_t *flags) {}
+
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
static inline void kasan_record_aux_stack(void *ptr) {}
@@ -381,7 +528,7 @@ static inline void *kasan_reset_tag(const void *addr)
* @is_write: whether the bad access is a write or a read
* @ip: instruction pointer for the accessibility check or the bad access itself
*/
-bool kasan_report(unsigned long addr, size_t size,
+bool kasan_report(const void *addr, size_t size,
bool is_write, unsigned long ip);
#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
@@ -399,6 +546,12 @@ void kasan_report_async(void);
#endif /* CONFIG_KASAN_HW_TAGS */
+#ifdef CONFIG_KASAN_GENERIC
+void __init kasan_init_generic(void);
+#else
+static inline void kasan_init_generic(void) { }
+#endif
+
#ifdef CONFIG_KASAN_SW_TAGS
void __init kasan_init_sw_tags(void);
#else
@@ -415,29 +568,91 @@ static inline void kasan_init_hw_tags(void) { }
#ifdef CONFIG_KASAN_VMALLOC
-int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
-void kasan_poison_vmalloc(const void *start, unsigned long size);
-void kasan_unpoison_vmalloc(const void *start, unsigned long size);
-void kasan_release_vmalloc(unsigned long start, unsigned long end,
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
+
+void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
+int __kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask);
+static inline int kasan_populate_vmalloc(unsigned long addr,
+ unsigned long size, gfp_t gfp_mask)
+{
+ if (kasan_enabled())
+ return __kasan_populate_vmalloc(addr, size, gfp_mask);
+ return 0;
+}
+void __kasan_release_vmalloc(unsigned long start, unsigned long end,
unsigned long free_region_start,
- unsigned long free_region_end);
+ unsigned long free_region_end,
+ unsigned long flags);
+static inline void kasan_release_vmalloc(unsigned long start, unsigned long end,
+ unsigned long free_region_start,
+ unsigned long free_region_end,
+ unsigned long flags)
+{
+ if (kasan_enabled())
+ return __kasan_release_vmalloc(start, end, free_region_start,
+ free_region_end, flags);
+}
-#else /* CONFIG_KASAN_VMALLOC */
+#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
+static inline void kasan_populate_early_vm_area_shadow(void *start,
+ unsigned long size)
+{ }
static inline int kasan_populate_vmalloc(unsigned long start,
- unsigned long size)
+ unsigned long size, gfp_t gfp_mask)
{
return 0;
}
+static inline void kasan_release_vmalloc(unsigned long start,
+ unsigned long end,
+ unsigned long free_region_start,
+ unsigned long free_region_end,
+ unsigned long flags) { }
-static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
-{ }
-static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size)
-{ }
+#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
+
+void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
+ kasan_vmalloc_flags_t flags);
+static __always_inline void *kasan_unpoison_vmalloc(const void *start,
+ unsigned long size,
+ kasan_vmalloc_flags_t flags)
+{
+ if (kasan_enabled())
+ return __kasan_unpoison_vmalloc(start, size, flags);
+ return (void *)start;
+}
+
+void __kasan_poison_vmalloc(const void *start, unsigned long size);
+static __always_inline void kasan_poison_vmalloc(const void *start,
+ unsigned long size)
+{
+ if (kasan_enabled())
+ __kasan_poison_vmalloc(start, size);
+}
+
+#else /* CONFIG_KASAN_VMALLOC */
+
+static inline void kasan_populate_early_vm_area_shadow(void *start,
+ unsigned long size) { }
+static inline int kasan_populate_vmalloc(unsigned long start,
+ unsigned long size, gfp_t gfp_mask)
+{
+ return 0;
+}
static inline void kasan_release_vmalloc(unsigned long start,
unsigned long end,
unsigned long free_region_start,
- unsigned long free_region_end) {}
+ unsigned long free_region_end,
+ unsigned long flags) { }
+
+static inline void *kasan_unpoison_vmalloc(const void *start,
+ unsigned long size,
+ kasan_vmalloc_flags_t flags)
+{
+ return (void *)start;
+}
+static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
+{ }
#endif /* CONFIG_KASAN_VMALLOC */
@@ -445,24 +660,24 @@ static inline void kasan_release_vmalloc(unsigned long start,
!defined(CONFIG_KASAN_VMALLOC)
/*
- * These functions provide a special case to support backing module
- * allocations with real shadow memory. With KASAN vmalloc, the special
- * case is unnecessary, as the work is handled in the generic case.
+ * These functions allocate and free shadow memory for kernel modules.
+ * They are only required when KASAN_VMALLOC is not supported, as otherwise
+ * shadow memory is allocated by the generic vmalloc handlers.
*/
-int kasan_module_alloc(void *addr, size_t size);
-void kasan_free_shadow(const struct vm_struct *vm);
+int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask);
+void kasan_free_module_shadow(const struct vm_struct *vm);
#else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
-static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
-static inline void kasan_free_shadow(const struct vm_struct *vm) {}
+static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
+static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
-#ifdef CONFIG_KASAN_INLINE
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
void kasan_non_canonical_hook(unsigned long addr);
-#else /* CONFIG_KASAN_INLINE */
+#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
static inline void kasan_non_canonical_hook(unsigned long addr) { }
-#endif /* CONFIG_KASAN_INLINE */
+#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
#endif /* LINUX_KASAN_H */
diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h
index cc8fa109cfa3..20d1079e92b4 100644
--- a/include/linux/kconfig.h
+++ b/include/linux/kconfig.h
@@ -51,7 +51,8 @@
/*
* IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0
- * otherwise.
+ * otherwise. CONFIG_FOO=m results in "#define CONFIG_FOO_MODULE 1" in
+ * autoconf.h.
*/
#define IS_MODULE(option) __is_defined(option##_MODULE)
@@ -66,7 +67,8 @@
/*
* IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm',
- * 0 otherwise.
+ * 0 otherwise. Note that CONFIG_FOO=y results in "#define CONFIG_FOO 1" in
+ * autoconf.h, while CONFIG_FOO=m results in "#define CONFIG_FOO_MODULE 1".
*/
#define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option))
diff --git a/include/linux/kcore.h b/include/linux/kcore.h
index da676cdbd727..9a2fa013c91d 100644
--- a/include/linux/kcore.h
+++ b/include/linux/kcore.h
@@ -11,31 +11,15 @@ enum kcore_type {
KCORE_RAM,
KCORE_VMEMMAP,
KCORE_USER,
- KCORE_OTHER,
- KCORE_REMAP,
};
struct kcore_list {
struct list_head list;
unsigned long addr;
- unsigned long vaddr;
size_t size;
int type;
};
-struct vmcore {
- struct list_head list;
- unsigned long long paddr;
- unsigned long long size;
- loff_t offset;
-};
-
-struct vmcoredd_node {
- struct list_head list; /* List of dumps */
- void *buf; /* Buffer containing device's dump */
- unsigned int size; /* Size of the buffer */
-};
-
#ifdef CONFIG_PROC_KCORE
void __init kclist_add(struct kcore_list *, void *, size_t, int type);
diff --git a/include/linux/kcov.h b/include/linux/kcov.h
index 55dc338f6bcd..0143358874b0 100644
--- a/include/linux/kcov.h
+++ b/include/linux/kcov.h
@@ -21,6 +21,8 @@ enum kcov_mode {
KCOV_MODE_TRACE_PC = 2,
/* Collecting comparison operands mode. */
KCOV_MODE_TRACE_CMP = 3,
+ /* The process owns a KCOV remote reference. */
+ KCOV_MODE_REMOTE = 4,
};
#define KCOV_IN_CTXSW (1 << 30)
@@ -56,22 +58,39 @@ static inline void kcov_remote_start_usb(u64 id)
/*
* The softirq flavor of kcov_remote_*() functions is introduced as a temporary
* work around for kcov's lack of nested remote coverage sections support in
- * task context. Adding suport for nested sections is tracked in:
+ * task context. Adding support for nested sections is tracked in:
* https://bugzilla.kernel.org/show_bug.cgi?id=210337
*/
static inline void kcov_remote_start_usb_softirq(u64 id)
{
- if (in_serving_softirq())
+ if (in_serving_softirq() && !in_hardirq())
kcov_remote_start_usb(id);
}
static inline void kcov_remote_stop_softirq(void)
{
- if (in_serving_softirq())
+ if (in_serving_softirq() && !in_hardirq())
kcov_remote_stop();
}
+#ifdef CONFIG_64BIT
+typedef unsigned long kcov_u64;
+#else
+typedef unsigned long long kcov_u64;
+#endif
+
+void __sanitizer_cov_trace_pc(void);
+void __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2);
+void __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2);
+void __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2);
+void __sanitizer_cov_trace_cmp8(kcov_u64 arg1, kcov_u64 arg2);
+void __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2);
+void __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2);
+void __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2);
+void __sanitizer_cov_trace_const_cmp8(kcov_u64 arg1, kcov_u64 arg2);
+void __sanitizer_cov_trace_switch(kcov_u64 val, void *cases);
+
#else
static inline void kcov_task_init(struct task_struct *t) {}
diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
index 9fd0ad80fef6..92f3843d9ebb 100644
--- a/include/linux/kcsan-checks.h
+++ b/include/linux/kcsan-checks.h
@@ -36,6 +36,36 @@
*/
void __kcsan_check_access(const volatile void *ptr, size_t size, int type);
+/*
+ * See definition of __tsan_atomic_signal_fence() in kernel/kcsan/core.c.
+ * Note: The mappings are arbitrary, and do not reflect any real mappings of C11
+ * memory orders to the LKMM memory orders and vice-versa!
+ */
+#define __KCSAN_BARRIER_TO_SIGNAL_FENCE_mb __ATOMIC_SEQ_CST
+#define __KCSAN_BARRIER_TO_SIGNAL_FENCE_wmb __ATOMIC_ACQ_REL
+#define __KCSAN_BARRIER_TO_SIGNAL_FENCE_rmb __ATOMIC_ACQUIRE
+#define __KCSAN_BARRIER_TO_SIGNAL_FENCE_release __ATOMIC_RELEASE
+
+/**
+ * __kcsan_mb - full memory barrier instrumentation
+ */
+void __kcsan_mb(void);
+
+/**
+ * __kcsan_wmb - write memory barrier instrumentation
+ */
+void __kcsan_wmb(void);
+
+/**
+ * __kcsan_rmb - read memory barrier instrumentation
+ */
+void __kcsan_rmb(void);
+
+/**
+ * __kcsan_release - release barrier instrumentation
+ */
+void __kcsan_release(void);
+
/**
* kcsan_disable_current - disable KCSAN for the current context
*
@@ -99,10 +129,21 @@ void kcsan_set_access_mask(unsigned long mask);
/* Scoped access information. */
struct kcsan_scoped_access {
- struct list_head list;
+ union {
+ struct list_head list; /* scoped_accesses list */
+ /*
+ * Not an entry in scoped_accesses list; stack depth from where
+ * the access was initialized.
+ */
+ int stack_depth;
+ };
+
+ /* Access information. */
const volatile void *ptr;
size_t size;
int type;
+ /* Location where scoped access was set up. */
+ unsigned long ip;
};
/*
* Automatically call kcsan_end_scoped_access() when kcsan_scoped_access goes
@@ -148,6 +189,10 @@ void kcsan_end_scoped_access(struct kcsan_scoped_access *sa);
static inline void __kcsan_check_access(const volatile void *ptr, size_t size,
int type) { }
+static inline void __kcsan_mb(void) { }
+static inline void __kcsan_wmb(void) { }
+static inline void __kcsan_rmb(void) { }
+static inline void __kcsan_release(void) { }
static inline void kcsan_disable_current(void) { }
static inline void kcsan_enable_current(void) { }
static inline void kcsan_enable_current_nowarn(void) { }
@@ -180,12 +225,47 @@ static inline void kcsan_end_scoped_access(struct kcsan_scoped_access *sa) { }
*/
#define __kcsan_disable_current kcsan_disable_current
#define __kcsan_enable_current kcsan_enable_current_nowarn
-#else
+#else /* __SANITIZE_THREAD__ */
static inline void kcsan_check_access(const volatile void *ptr, size_t size,
int type) { }
static inline void __kcsan_enable_current(void) { }
static inline void __kcsan_disable_current(void) { }
-#endif
+#endif /* __SANITIZE_THREAD__ */
+
+#if defined(CONFIG_KCSAN_WEAK_MEMORY) && defined(__SANITIZE_THREAD__)
+/*
+ * Normal barrier instrumentation is not done via explicit calls, but by mapping
+ * to a repurposed __atomic_signal_fence(), which normally does not generate any
+ * real instructions, but is still intercepted by fsanitize=thread. This means,
+ * like any other compile-time instrumentation, barrier instrumentation can be
+ * disabled with the __no_kcsan function attribute.
+ *
+ * Also see definition of __tsan_atomic_signal_fence() in kernel/kcsan/core.c.
+ *
+ * These are all macros, like <asm/barrier.h>, since some architectures use them
+ * in non-static inline functions.
+ */
+#define __KCSAN_BARRIER_TO_SIGNAL_FENCE(name) \
+ do { \
+ barrier(); \
+ __atomic_signal_fence(__KCSAN_BARRIER_TO_SIGNAL_FENCE_##name); \
+ barrier(); \
+ } while (0)
+#define kcsan_mb() __KCSAN_BARRIER_TO_SIGNAL_FENCE(mb)
+#define kcsan_wmb() __KCSAN_BARRIER_TO_SIGNAL_FENCE(wmb)
+#define kcsan_rmb() __KCSAN_BARRIER_TO_SIGNAL_FENCE(rmb)
+#define kcsan_release() __KCSAN_BARRIER_TO_SIGNAL_FENCE(release)
+#elif defined(CONFIG_KCSAN_WEAK_MEMORY) && defined(__KCSAN_INSTRUMENT_BARRIERS__)
+#define kcsan_mb __kcsan_mb
+#define kcsan_wmb __kcsan_wmb
+#define kcsan_rmb __kcsan_rmb
+#define kcsan_release __kcsan_release
+#else /* CONFIG_KCSAN_WEAK_MEMORY && ... */
+#define kcsan_mb() do { } while (0)
+#define kcsan_wmb() do { } while (0)
+#define kcsan_rmb() do { } while (0)
+#define kcsan_release() do { } while (0)
+#endif /* CONFIG_KCSAN_WEAK_MEMORY && ... */
/**
* __kcsan_check_read - check regular read access for races
diff --git a/include/linux/kcsan.h b/include/linux/kcsan.h
index fc266ecb2a4d..c07c71f5ba4f 100644
--- a/include/linux/kcsan.h
+++ b/include/linux/kcsan.h
@@ -21,6 +21,7 @@
*/
struct kcsan_ctx {
int disable_count; /* disable counter */
+ int disable_scoped; /* disable scoped access counter */
int atomic_next; /* number of following atomic ops */
/*
@@ -48,8 +49,16 @@ struct kcsan_ctx {
*/
unsigned long access_mask;
- /* List of scoped accesses. */
+ /* List of scoped accesses; likely to be empty. */
struct list_head scoped_accesses;
+
+#ifdef CONFIG_KCSAN_WEAK_MEMORY
+ /*
+ * Scoped access for modeling access reordering to detect missing memory
+ * barriers; only keep 1 to keep fast-path complexity manageable.
+ */
+ struct kcsan_scoped_access reorder_access;
+#endif
};
/**
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index 0125a677b67f..741c58e86431 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -13,6 +13,9 @@
* Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com>
*/
+#include <linux/list.h>
+#include <linux/smp.h>
+
/* Shifted versions of the command enable bits are be used if the command
* has no arguments (see kdb_check_flags). This allows commands, such as
* go, to have different permissions depending upon whether it is called
@@ -64,6 +67,17 @@ typedef enum {
typedef int (*kdb_func_t)(int, const char **);
+/* The KDB shell command table */
+typedef struct _kdbtab {
+ char *name; /* Command name */
+ kdb_func_t func; /* Function to execute command */
+ char *usage; /* Usage String for this command */
+ char *help; /* Help message for this command */
+ short minlen; /* Minimum legal # cmd chars required */
+ kdb_cmdflags_t flags; /* Command behaviour flags */
+ struct list_head list_node; /* Command list */
+} kdbtab_t;
+
#ifdef CONFIG_KGDB_KDB
#include <linux/init.h>
#include <linux/sched.h>
@@ -91,7 +105,7 @@ extern int kdb_initial_cpu;
#define KDB_NOENVVALUE (-6)
#define KDB_NOTIMP (-7)
#define KDB_ENVFULL (-8)
-#define KDB_ENVBUFFULL (-9)
+#define KDB_KMALLOCFAILED (-9)
#define KDB_TOOMANYBPT (-10)
#define KDB_TOOMANYDBREGS (-11)
#define KDB_DUPBPT (-12)
@@ -127,9 +141,6 @@ extern const char *kdb_diemsg;
extern unsigned int kdb_flags; /* Global flags, see kdb_state for per cpu state */
-extern void kdb_save_flags(void);
-extern void kdb_restore_flags(void);
-
#define KDB_FLAG(flag) (kdb_flags & KDB_FLAG_##flag)
#define KDB_FLAG_SET(flag) ((void)(kdb_flags |= KDB_FLAG_##flag))
#define KDB_FLAG_CLEAR(flag) ((void)(kdb_flags &= ~KDB_FLAG_##flag))
@@ -183,6 +194,8 @@ int kdb_process_cpu(const struct task_struct *p)
return cpu;
}
+extern void kdb_send_sig(struct task_struct *p, int sig);
+
#ifdef CONFIG_KALLSYMS
extern const char *kdb_walk_kallsyms(loff_t *pos);
#else /* ! CONFIG_KALLSYMS */
@@ -193,19 +206,28 @@ static inline const char *kdb_walk_kallsyms(loff_t *pos)
#endif /* ! CONFIG_KALLSYMS */
/* Dynamic kdb shell command registration */
-extern int kdb_register(char *, kdb_func_t, char *, char *, short);
-extern int kdb_register_flags(char *, kdb_func_t, char *, char *,
- short, kdb_cmdflags_t);
-extern int kdb_unregister(char *);
+extern int kdb_register(kdbtab_t *cmd);
+extern void kdb_unregister(kdbtab_t *cmd);
+
+/* Return true when KDB as locked for printing a message on this CPU. */
+static inline
+bool kdb_printf_on_this_cpu(void)
+{
+ /*
+ * We can use raw_smp_processor_id() here because the task could
+ * not get migrated when KDB has locked for printing on this CPU.
+ */
+ return unlikely(READ_ONCE(kdb_printf_cpu) == raw_smp_processor_id());
+}
+
#else /* ! CONFIG_KGDB_KDB */
static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
static inline void kdb_init(int level) {}
-static inline int kdb_register(char *cmd, kdb_func_t func, char *usage,
- char *help, short minlen) { return 0; }
-static inline int kdb_register_flags(char *cmd, kdb_func_t func, char *usage,
- char *help, short minlen,
- kdb_cmdflags_t flags) { return 0; }
-static inline int kdb_unregister(char *cmd) { return 0; }
+static inline int kdb_register(kdbtab_t *cmd) { return 0; }
+static inline void kdb_unregister(kdbtab_t *cmd) {}
+
+static inline bool kdb_printf_on_this_cpu(void) { return false; }
+
#endif /* CONFIG_KGDB_KDB */
enum {
KDB_NOT_INITIALIZED,
@@ -215,5 +237,6 @@ enum {
extern int kdbgetintenv(const char *, int *);
extern int kdb_set(int, const char **);
+int kdb_lsmod(int argc, const char **argv);
#endif /* !_KDB_H */
diff --git a/include/linux/kernel-page-flags.h b/include/linux/kernel-page-flags.h
index eee1877a354e..196778a087c4 100644
--- a/include/linux/kernel-page-flags.h
+++ b/include/linux/kernel-page-flags.h
@@ -10,13 +10,13 @@
*/
#define KPF_RESERVED 32
#define KPF_MLOCKED 33
-#define KPF_MAPPEDTODISK 34
+#define KPF_OWNER_2 34
#define KPF_PRIVATE 35
#define KPF_PRIVATE_2 36
#define KPF_OWNER_PRIVATE 37
#define KPF_ARCH 38
-#define KPF_UNCACHED 39
#define KPF_SOFTDIRTY 40
#define KPF_ARCH_2 41
+#define KPF_ARCH_3 42
#endif /* LINUX_KERNEL_PAGE_FLAGS_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 15d8bad3d2f2..5b46924fdff5 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -1,86 +1,56 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * NOTE:
+ *
+ * This header has combined a lot of unrelated to each other stuff.
+ * The process of splitting its content is in progress while keeping
+ * backward compatibility. That's why it's highly recommended NOT to
+ * include this header inside another header file, especially under
+ * generic or architectural include/ directory.
+ */
#ifndef _LINUX_KERNEL_H
#define _LINUX_KERNEL_H
-#include <stdarg.h>
+#include <linux/stdarg.h>
#include <linux/align.h>
+#include <linux/array_size.h>
#include <linux/limits.h>
#include <linux/linkage.h>
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/compiler.h>
+#include <linux/container_of.h>
#include <linux/bitops.h>
+#include <linux/hex.h>
+#include <linux/kstrtox.h>
#include <linux/log2.h>
#include <linux/math.h>
#include <linux/minmax.h>
#include <linux/typecheck.h>
+#include <linux/panic.h>
#include <linux/printk.h>
#include <linux/build_bug.h>
+#include <linux/sprintf.h>
#include <linux/static_call_types.h>
+#include <linux/instruction_pointer.h>
+#include <linux/util_macros.h>
+#include <linux/wordpart.h>
+
#include <asm/byteorder.h>
#include <uapi/linux/kernel.h>
#define STACK_MAGIC 0xdeadbeef
-/**
- * REPEAT_BYTE - repeat the value @x multiple times as an unsigned long value
- * @x: value to repeat
- *
- * NOTE: @x is not checked for > 0xff; larger values produce odd results.
- */
-#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
-
-/* generic data direction definitions */
-#define READ 0
-#define WRITE 1
-
-/**
- * ARRAY_SIZE - get the number of elements in array @arr
- * @arr: array to be sized
- */
-#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
-
-#define PTR_IF(cond, ptr) ((cond) ? (ptr) : NULL)
-
-#define u64_to_user_ptr(x) ( \
-{ \
- typecheck(u64, (x)); \
- (void __user *)(uintptr_t)(x); \
-} \
-)
-
-#define typeof_member(T, m) typeof(((T*)0)->m)
-
-#define _RET_IP_ (unsigned long)__builtin_return_address(0)
-#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
-
-/**
- * upper_32_bits - return bits 32-63 of a number
- * @n: the number we're accessing
- *
- * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress
- * the "right shift count >= width of type" warning when that quantity is
- * 32-bits.
- */
-#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
-
-/**
- * lower_32_bits - return bits 0-31 of a number
- * @n: the number we're accessing
- */
-#define lower_32_bits(n) ((u32)((n) & 0xffffffff))
-
struct completion;
-struct pt_regs;
struct user;
-#ifdef CONFIG_PREEMPT_VOLUNTARY
+#ifdef CONFIG_PREEMPT_VOLUNTARY_BUILD
extern int __cond_resched(void);
# define might_resched() __cond_resched()
-#elif defined(CONFIG_PREEMPT_DYNAMIC)
+#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
extern int __cond_resched(void);
@@ -91,6 +61,11 @@ static __always_inline void might_resched(void)
static_call_mod(might_resched)();
}
+#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
+
+extern int dynamic_might_resched(void);
+# define might_resched() dynamic_might_resched()
+
#else
# define might_resched() do { } while (0)
@@ -98,8 +73,8 @@ static __always_inline void might_resched(void)
#endif /* CONFIG_PREEMPT_* */
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
-extern void ___might_sleep(const char *file, int line, int preempt_offset);
-extern void __might_sleep(const char *file, int line, int preempt_offset);
+extern void __might_resched(const char *file, int line, unsigned int offsets);
+extern void __might_sleep(const char *file, int line);
extern void __cant_sleep(const char *file, int line, int preempt_offset);
extern void __cant_migrate(const char *file, int line);
@@ -116,7 +91,7 @@ extern void __cant_migrate(const char *file, int line);
* supposed to.
*/
# define might_sleep() \
- do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
+ do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0)
/**
* cant_sleep - annotation for functions that cannot sleep
*
@@ -155,10 +130,9 @@ extern void __cant_migrate(const char *file, int line);
*/
# define non_block_end() WARN_ON(current->non_block_count-- == 0)
#else
- static inline void ___might_sleep(const char *file, int line,
- int preempt_offset) { }
- static inline void __might_sleep(const char *file, int line,
- int preempt_offset) { }
+ static inline void __might_resched(const char *file, int line,
+ unsigned int offsets) { }
+static inline void __might_sleep(const char *file, int line) { }
# define might_sleep() do { might_resched(); } while (0)
# define cant_sleep() do { } while (0)
# define cant_migrate() do { } while (0)
@@ -177,320 +151,46 @@ void __might_fault(const char *file, int line);
static inline void might_fault(void) { }
#endif
-extern struct atomic_notifier_head panic_notifier_list;
-extern long (*panic_blink)(int state);
-__printf(1, 2)
-void panic(const char *fmt, ...) __noreturn __cold;
-void nmi_panic(struct pt_regs *regs, const char *msg);
-extern void oops_enter(void);
-extern void oops_exit(void);
-extern bool oops_may_print(void);
void do_exit(long error_code) __noreturn;
-void complete_and_exit(struct completion *, long) __noreturn;
-
-/* Internal, do not use. */
-int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
-int __must_check _kstrtol(const char *s, unsigned int base, long *res);
-
-int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res);
-int __must_check kstrtoll(const char *s, unsigned int base, long long *res);
-
-/**
- * kstrtoul - convert a string to an unsigned long
- * @s: The start of the string. The string must be null-terminated, and may also
- * include a single newline before its terminating null. The first character
- * may also be a plus sign, but not a minus sign.
- * @base: The number base to use. The maximum supported base is 16. If base is
- * given as 0, then the base of the string is automatically detected with the
- * conventional semantics - If it begins with 0x the number will be parsed as a
- * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
- * parsed as an octal number. Otherwise it will be parsed as a decimal.
- * @res: Where to write the result of the conversion on success.
- *
- * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
- * Preferred over simple_strtoul(). Return code must be checked.
-*/
-static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res)
-{
- /*
- * We want to shortcut function call, but
- * __builtin_types_compatible_p(unsigned long, unsigned long long) = 0.
- */
- if (sizeof(unsigned long) == sizeof(unsigned long long) &&
- __alignof__(unsigned long) == __alignof__(unsigned long long))
- return kstrtoull(s, base, (unsigned long long *)res);
- else
- return _kstrtoul(s, base, res);
-}
-
-/**
- * kstrtol - convert a string to a long
- * @s: The start of the string. The string must be null-terminated, and may also
- * include a single newline before its terminating null. The first character
- * may also be a plus sign or a minus sign.
- * @base: The number base to use. The maximum supported base is 16. If base is
- * given as 0, then the base of the string is automatically detected with the
- * conventional semantics - If it begins with 0x the number will be parsed as a
- * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
- * parsed as an octal number. Otherwise it will be parsed as a decimal.
- * @res: Where to write the result of the conversion on success.
- *
- * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
- * Preferred over simple_strtol(). Return code must be checked.
- */
-static inline int __must_check kstrtol(const char *s, unsigned int base, long *res)
-{
- /*
- * We want to shortcut function call, but
- * __builtin_types_compatible_p(long, long long) = 0.
- */
- if (sizeof(long) == sizeof(long long) &&
- __alignof__(long) == __alignof__(long long))
- return kstrtoll(s, base, (long long *)res);
- else
- return _kstrtol(s, base, res);
-}
-
-int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res);
-int __must_check kstrtoint(const char *s, unsigned int base, int *res);
-
-static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res)
-{
- return kstrtoull(s, base, res);
-}
-
-static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res)
-{
- return kstrtoll(s, base, res);
-}
-
-static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res)
-{
- return kstrtouint(s, base, res);
-}
-
-static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res)
-{
- return kstrtoint(s, base, res);
-}
-
-int __must_check kstrtou16(const char *s, unsigned int base, u16 *res);
-int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
-int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
-int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
-int __must_check kstrtobool(const char *s, bool *res);
-
-int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res);
-int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res);
-int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res);
-int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res);
-int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res);
-int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res);
-int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res);
-int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res);
-int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res);
-int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res);
-int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res);
-
-static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res)
-{
- return kstrtoull_from_user(s, count, base, res);
-}
-
-static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res)
-{
- return kstrtoll_from_user(s, count, base, res);
-}
-
-static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res)
-{
- return kstrtouint_from_user(s, count, base, res);
-}
-
-static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res)
-{
- return kstrtoint_from_user(s, count, base, res);
-}
-
-/*
- * Use kstrto<foo> instead.
- *
- * NOTE: simple_strto<foo> does not check for the range overflow and,
- * depending on the input, may give interesting results.
- *
- * Use these functions if and only if you cannot use kstrto<foo>, because
- * the conversion ends on the first non-digit character, which may be far
- * beyond the supported range. It might be useful to parse the strings like
- * 10x50 or 12:21 without altering original string or temporary buffer in use.
- * Keep in mind above caveat.
- */
-
-extern unsigned long simple_strtoul(const char *,char **,unsigned int);
-extern long simple_strtol(const char *,char **,unsigned int);
-extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
-extern long long simple_strtoll(const char *,char **,unsigned int);
-
-extern int num_to_str(char *buf, int size,
- unsigned long long num, unsigned int width);
-
-/* lib/printf utilities */
-
-extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...);
-extern __printf(2, 0) int vsprintf(char *buf, const char *, va_list);
-extern __printf(3, 4)
-int snprintf(char *buf, size_t size, const char *fmt, ...);
-extern __printf(3, 0)
-int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
-extern __printf(3, 4)
-int scnprintf(char *buf, size_t size, const char *fmt, ...);
-extern __printf(3, 0)
-int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
-extern __printf(2, 3) __malloc
-char *kasprintf(gfp_t gfp, const char *fmt, ...);
-extern __printf(2, 0) __malloc
-char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
-extern __printf(2, 0)
-const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args);
-
-extern __scanf(2, 3)
-int sscanf(const char *, const char *, ...);
-extern __scanf(2, 0)
-int vsscanf(const char *, const char *, va_list);
-
-extern int get_option(char **str, int *pint);
-extern char *get_options(const char *str, int nints, int *ints);
-extern unsigned long long memparse(const char *ptr, char **retptr);
-extern bool parse_option_str(const char *str, const char *option);
-extern char *next_arg(char *args, char **param, char **val);
extern int core_kernel_text(unsigned long addr);
-extern int init_kernel_text(unsigned long addr);
-extern int core_kernel_data(unsigned long addr);
extern int __kernel_text_address(unsigned long addr);
extern int kernel_text_address(unsigned long addr);
extern int func_ptr_is_kernel_text(void *ptr);
-#ifdef CONFIG_SMP
-extern unsigned int sysctl_oops_all_cpu_backtrace;
-#else
-#define sysctl_oops_all_cpu_backtrace 0
-#endif /* CONFIG_SMP */
-
extern void bust_spinlocks(int yes);
-extern int panic_timeout;
-extern unsigned long panic_print;
-extern int panic_on_oops;
-extern int panic_on_unrecovered_nmi;
-extern int panic_on_io_nmi;
-extern int panic_on_warn;
-extern unsigned long panic_on_taint;
-extern bool panic_on_taint_nousertaint;
-extern int sysctl_panic_on_rcu_stall;
-extern int sysctl_max_rcu_stall_to_panic;
-extern int sysctl_panic_on_stackoverflow;
-
-extern bool crash_kexec_post_notifiers;
-/*
- * panic_cpu is used for synchronizing panic() and crash_kexec() execution. It
- * holds a CPU number which is executing panic() currently. A value of
- * PANIC_CPU_INVALID means no CPU has entered panic() or crash_kexec().
- */
-extern atomic_t panic_cpu;
-#define PANIC_CPU_INVALID -1
-
-/*
- * Only to be used by arch init code. If the user over-wrote the default
- * CONFIG_PANIC_TIMEOUT, honor it.
- */
-static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout)
-{
- if (panic_timeout == arch_default_timeout)
- panic_timeout = timeout;
-}
-extern const char *print_tainted(void);
-enum lockdep_ok {
- LOCKDEP_STILL_OK,
- LOCKDEP_NOW_UNRELIABLE
-};
-extern void add_taint(unsigned flag, enum lockdep_ok);
-extern int test_taint(unsigned flag);
-extern unsigned long get_taint(void);
extern int root_mountflags;
extern bool early_boot_irqs_disabled;
-/*
- * Values used for system_state. Ordering of the states must not be changed
+/**
+ * enum system_states - Values used for system_state.
+ *
+ * @SYSTEM_BOOTING: %0, no init needed
+ * @SYSTEM_SCHEDULING: system is ready for scheduling; OK to use RCU
+ * @SYSTEM_FREEING_INITMEM: system is freeing all of initmem; almost running
+ * @SYSTEM_RUNNING: system is up and running
+ * @SYSTEM_HALT: system entered clean system halt state
+ * @SYSTEM_POWER_OFF: system entered shutdown/clean power off state
+ * @SYSTEM_RESTART: system entered emergency power off or normal restart
+ * @SYSTEM_SUSPEND: system entered suspend or hibernate state
+ *
+ * Note:
+ * Ordering of the states must not be changed
* as code checks for <, <=, >, >= STATE.
*/
-extern enum system_states {
+enum system_states {
SYSTEM_BOOTING,
SYSTEM_SCHEDULING,
+ SYSTEM_FREEING_INITMEM,
SYSTEM_RUNNING,
SYSTEM_HALT,
SYSTEM_POWER_OFF,
SYSTEM_RESTART,
SYSTEM_SUSPEND,
-} system_state;
-
-/* This cannot be an enum because some may be used in assembly source. */
-#define TAINT_PROPRIETARY_MODULE 0
-#define TAINT_FORCED_MODULE 1
-#define TAINT_CPU_OUT_OF_SPEC 2
-#define TAINT_FORCED_RMMOD 3
-#define TAINT_MACHINE_CHECK 4
-#define TAINT_BAD_PAGE 5
-#define TAINT_USER 6
-#define TAINT_DIE 7
-#define TAINT_OVERRIDDEN_ACPI_TABLE 8
-#define TAINT_WARN 9
-#define TAINT_CRAP 10
-#define TAINT_FIRMWARE_WORKAROUND 11
-#define TAINT_OOT_MODULE 12
-#define TAINT_UNSIGNED_MODULE 13
-#define TAINT_SOFTLOCKUP 14
-#define TAINT_LIVEPATCH 15
-#define TAINT_AUX 16
-#define TAINT_RANDSTRUCT 17
-#define TAINT_FLAGS_COUNT 18
-#define TAINT_FLAGS_MAX ((1UL << TAINT_FLAGS_COUNT) - 1)
-
-struct taint_flag {
- char c_true; /* character printed when tainted */
- char c_false; /* character printed when not tainted */
- bool module; /* also show as a per-module taint flag */
};
-
-extern const struct taint_flag taint_flags[TAINT_FLAGS_COUNT];
-
-extern const char hex_asc[];
-#define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
-#define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4]
-
-static inline char *hex_byte_pack(char *buf, u8 byte)
-{
- *buf++ = hex_asc_hi(byte);
- *buf++ = hex_asc_lo(byte);
- return buf;
-}
-
-extern const char hex_asc_upper[];
-#define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)]
-#define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4]
-
-static inline char *hex_byte_pack_upper(char *buf, u8 byte)
-{
- *buf++ = hex_asc_upper_hi(byte);
- *buf++ = hex_asc_upper_lo(byte);
- return buf;
-}
-
-extern int hex_to_bin(char ch);
-extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
-extern char *bin2hex(char *dst, const void *src, size_t count);
-
-bool mac_pton(const char *s, u8 *mac);
+extern enum system_states system_state;
/*
* General tracing related utility functions - trace_printk(),
@@ -498,7 +198,7 @@ bool mac_pton(const char *s, u8 *mac);
*
* Use tracing_on/tracing_off when you want to quickly turn on or off
* tracing. It simply enables or disables the recording of the trace events.
- * This also corresponds to the user space /sys/kernel/debug/tracing/tracing_on
+ * This also corresponds to the user space /sys/kernel/tracing/tracing_on
* file, which gives a means for the kernel and userspace to interact.
* Place a tracing_off() in the kernel where you want tracing to end.
* From user space, examine the trace, and then echo 1 > tracing_on
@@ -516,6 +216,7 @@ enum ftrace_dump_mode {
DUMP_NONE,
DUMP_ALL,
DUMP_ORIG,
+ DUMP_PARAM,
};
#ifdef CONFIG_TRACING
@@ -685,46 +386,9 @@ ftrace_vprintk(const char *fmt, va_list ap)
static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
#endif /* CONFIG_TRACING */
-/* This counts to 12. Any more, it will return 13th argument. */
-#define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _n, X...) _n
-#define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
-
-#define __CONCAT(a, b) a ## b
-#define CONCATENATE(a, b) __CONCAT(a, b)
-
-/**
- * container_of - cast a member of a structure out to the containing structure
- * @ptr: the pointer to the member.
- * @type: the type of the container struct this is embedded in.
- * @member: the name of the member within the struct.
- *
- */
-#define container_of(ptr, type, member) ({ \
- void *__mptr = (void *)(ptr); \
- BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \
- !__same_type(*(ptr), void), \
- "pointer type mismatch in container_of()"); \
- ((type *)(__mptr - offsetof(type, member))); })
-
-/**
- * container_of_safe - cast a member of a structure out to the containing structure
- * @ptr: the pointer to the member.
- * @type: the type of the container struct this is embedded in.
- * @member: the name of the member within the struct.
- *
- * If IS_ERR_OR_NULL(ptr), ptr is returned unchanged.
- */
-#define container_of_safe(ptr, type, member) ({ \
- void *__mptr = (void *)(ptr); \
- BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \
- !__same_type(*(ptr), void), \
- "pointer type mismatch in container_of()"); \
- IS_ERR_OR_NULL(__mptr) ? ERR_CAST(__mptr) : \
- ((type *)(__mptr - offsetof(type, member))); })
-
-/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
-#ifdef CONFIG_FTRACE_MCOUNT_RECORD
-# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
+/* Rebuild everything on CONFIG_DYNAMIC_FTRACE */
+#ifdef CONFIG_DYNAMIC_FTRACE
+# define REBUILD_DUE_TO_DYNAMIC_FTRACE
#endif
/* Permissions on a sysfs file: you didn't miss the 0 prefix did you? */
diff --git a/include/linux/kernel_read_file.h b/include/linux/kernel_read_file.h
index 575ffa1031d3..d613a7b4dd35 100644
--- a/include/linux/kernel_read_file.h
+++ b/include/linux/kernel_read_file.h
@@ -14,6 +14,7 @@
id(KEXEC_INITRAMFS, kexec-initramfs) \
id(POLICY, security-policy) \
id(X509_CERTIFICATE, x509-certificate) \
+ id(MODULE_COMPRESSED, kernel-module-compressed) \
id(MAX_ID, )
#define __fid_enumify(ENUM, dummy) READING_ ## ENUM,
@@ -35,21 +36,21 @@ static inline const char *kernel_read_file_id_str(enum kernel_read_file_id id)
return kernel_read_file_str[id];
}
-int kernel_read_file(struct file *file, loff_t offset,
- void **buf, size_t buf_size,
- size_t *file_size,
- enum kernel_read_file_id id);
-int kernel_read_file_from_path(const char *path, loff_t offset,
- void **buf, size_t buf_size,
- size_t *file_size,
- enum kernel_read_file_id id);
-int kernel_read_file_from_path_initns(const char *path, loff_t offset,
- void **buf, size_t buf_size,
- size_t *file_size,
- enum kernel_read_file_id id);
-int kernel_read_file_from_fd(int fd, loff_t offset,
- void **buf, size_t buf_size,
- size_t *file_size,
- enum kernel_read_file_id id);
+ssize_t kernel_read_file(struct file *file, loff_t offset,
+ void **buf, size_t buf_size,
+ size_t *file_size,
+ enum kernel_read_file_id id);
+ssize_t kernel_read_file_from_path(const char *path, loff_t offset,
+ void **buf, size_t buf_size,
+ size_t *file_size,
+ enum kernel_read_file_id id);
+ssize_t kernel_read_file_from_path_initns(const char *path, loff_t offset,
+ void **buf, size_t buf_size,
+ size_t *file_size,
+ enum kernel_read_file_id id);
+ssize_t kernel_read_file_from_fd(int fd, loff_t offset,
+ void **buf, size_t buf_size,
+ size_t *file_size,
+ enum kernel_read_file_id id);
#endif /* _LINUX_KERNEL_READ_FILE_H */
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 44ae1a7eb9e3..b97ce2df376f 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -5,7 +5,6 @@
#include <linux/smp.h>
#include <linux/threads.h>
#include <linux/percpu.h>
-#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/vtime.h>
@@ -28,6 +27,9 @@ enum cpu_usage_stat {
CPUTIME_STEAL,
CPUTIME_GUEST,
CPUTIME_GUEST_NICE,
+#ifdef CONFIG_SCHED_CORE
+ CPUTIME_FORCEIDLE,
+#endif
NR_STATS,
};
@@ -49,6 +51,7 @@ DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
#define kstat_cpu(cpu) per_cpu(kstat, cpu)
#define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)
+extern unsigned long long nr_context_switches_cpu(int cpu);
extern unsigned long long nr_context_switches(void);
extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
@@ -64,6 +67,25 @@ static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
return kstat_cpu(cpu).softirqs[irq];
}
+static inline unsigned int kstat_cpu_softirqs_sum(int cpu)
+{
+ int i;
+ unsigned int sum = 0;
+
+ for (i = 0; i < NR_SOFTIRQS; i++)
+ sum += kstat_softirqs_cpu(i, cpu);
+
+ return sum;
+}
+
+#ifdef CONFIG_GENERIC_IRQ_STAT_SNAPSHOT
+extern void kstat_snapshot_irqs(void);
+extern unsigned int kstat_get_irq_since_snapshot(unsigned int irq);
+#else
+static inline void kstat_snapshot_irqs(void) { }
+static inline unsigned int kstat_get_irq_since_snapshot(unsigned int irq) { return 0; }
+#endif
+
/*
* Number of interrupts per specific IRQ source, since bootup
*/
@@ -72,7 +94,7 @@ extern unsigned int kstat_irqs_usr(unsigned int irq);
/*
* Number of interrupts per cpu, since bootup
*/
-static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
+static inline unsigned long kstat_cpu_irqs_sum(unsigned int cpu)
{
return kstat_cpu(cpu).irqs_sum;
}
@@ -102,6 +124,7 @@ extern void account_system_index_time(struct task_struct *, u64,
enum cpu_usage_stat);
extern void account_steal_time(u64);
extern void account_idle_time(u64);
+extern u64 get_idle_time(struct kernel_cpustat *kcs, int cpu);
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
static inline void account_process_tick(struct task_struct *tsk, int user)
@@ -114,4 +137,8 @@ extern void account_process_tick(struct task_struct *, int user);
extern void account_idle_ticks(unsigned long ticks);
+#ifdef CONFIG_SCHED_CORE
+extern void __account_forceidle_time(struct task_struct *tsk, u64 delta);
+#endif
+
#endif /* _LINUX_KERNEL_STAT_H */
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 9e8ca8743c26..b5a5f32fdfd1 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -6,7 +6,6 @@
#ifndef __LINUX_KERNFS_H
#define __LINUX_KERNFS_H
-#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -14,14 +13,19 @@
#include <linux/lockdep.h>
#include <linux/rbtree.h>
#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/types.h>
#include <linux/uidgid.h>
#include <linux/wait.h>
+#include <linux/rwsem.h>
+#include <linux/cache.h>
struct file;
struct dentry;
struct iattr;
struct seq_file;
struct vm_area_struct;
+struct vm_operations_struct;
struct super_block;
struct file_system_type;
struct poll_table_struct;
@@ -31,6 +35,62 @@ struct kernfs_fs_context;
struct kernfs_open_node;
struct kernfs_iattrs;
+/*
+ * NR_KERNFS_LOCK_BITS determines size (NR_KERNFS_LOCKS) of hash
+ * table of locks.
+ * Having a small hash table would impact scalability, since
+ * more and more kernfs_node objects will end up using same lock
+ * and having a very large hash table would waste memory.
+ *
+ * At the moment size of hash table of locks is being set based on
+ * the number of CPUs as follows:
+ *
+ * NR_CPU NR_KERNFS_LOCK_BITS NR_KERNFS_LOCKS
+ * 1 1 2
+ * 2-3 2 4
+ * 4-7 4 16
+ * 8-15 6 64
+ * 16-31 8 256
+ * 32 and more 10 1024
+ *
+ * The above relation between NR_CPU and number of locks is based
+ * on some internal experimentation which involved booting qemu
+ * with different values of smp, performing some sysfs operations
+ * on all CPUs and observing how increase in number of locks impacts
+ * completion time of these sysfs operations on each CPU.
+ */
+#ifdef CONFIG_SMP
+#define NR_KERNFS_LOCK_BITS (2 * (ilog2(NR_CPUS < 32 ? NR_CPUS : 32)))
+#else
+#define NR_KERNFS_LOCK_BITS 1
+#endif
+
+#define NR_KERNFS_LOCKS (1 << NR_KERNFS_LOCK_BITS)
+
+/*
+ * There's one kernfs_open_file for each open file and one kernfs_open_node
+ * for each kernfs_node with one or more open files.
+ *
+ * filp->private_data points to seq_file whose ->private points to
+ * kernfs_open_file.
+ *
+ * kernfs_open_files are chained at kernfs_open_node->files, which is
+ * protected by kernfs_global_locks.open_file_mutex[i].
+ *
+ * To reduce possible contention in sysfs access, arising due to single
+ * locks, use an array of locks (e.g. open_file_mutex) and use kernfs_node
+ * object address as hash keys to get the index of these locks.
+ *
+ * Hashed mutexes are safe to use here because operations using these don't
+ * rely on global exclusion.
+ *
+ * In future we intend to replace other global locks with hashed ones as well.
+ * kernfs_global_locks acts as a holder for all such hash tables.
+ */
+struct kernfs_global_locks {
+ struct mutex open_file_mutex[NR_KERNFS_LOCKS];
+};
+
enum kernfs_node_type {
KERNFS_DIR = 0x0001,
KERNFS_FILE = 0x0002,
@@ -48,10 +108,12 @@ enum kernfs_node_flag {
KERNFS_HAS_SEQ_SHOW = 0x0040,
KERNFS_HAS_MMAP = 0x0080,
KERNFS_LOCKDEP = 0x0100,
+ KERNFS_HIDDEN = 0x0200,
KERNFS_SUICIDAL = 0x0400,
KERNFS_SUICIDED = 0x0800,
KERNFS_EMPTY_DIR = 0x1000,
KERNFS_HAS_RELEASE = 0x2000,
+ KERNFS_REMOVING = 0x4000,
};
/* @flags for kernfs_create_root() */
@@ -85,6 +147,11 @@ enum kernfs_root_flag {
* Support user xattrs to be written to nodes rooted at this root.
*/
KERNFS_ROOT_SUPPORT_USER_XATTR = 0x0008,
+
+ /*
+ * Renames must not change the parent node.
+ */
+ KERNFS_ROOT_INVARIANT_PARENT = 0x0010,
};
/* type-specific structures for kernfs_node union members */
@@ -98,6 +165,11 @@ struct kernfs_elem_dir {
* better directly in kernfs_node but is here to save space.
*/
struct kernfs_root *root;
+ /*
+ * Monotonic revision counter, used to identify if a directory
+ * node has changed during negative dentry revalidation.
+ */
+ unsigned long rev;
};
struct kernfs_elem_symlink {
@@ -106,7 +178,7 @@ struct kernfs_elem_symlink {
struct kernfs_elem_attr {
const struct kernfs_ops *ops;
- struct kernfs_open_node *open;
+ struct kernfs_open_node __rcu *open;
loff_t size;
struct kernfs_node *notify_next; /* for kernfs_notify() */
};
@@ -132,30 +204,32 @@ struct kernfs_node {
* never moved to a different parent, it is safe to access the
* parent directly.
*/
- struct kernfs_node *parent;
- const char *name;
+ struct kernfs_node __rcu *__parent;
+ const char __rcu *name;
struct rb_node rb;
const void *ns; /* namespace tag */
unsigned int hash; /* ns + name hash */
+ unsigned short flags;
+ umode_t mode;
+
union {
struct kernfs_elem_dir dir;
struct kernfs_elem_symlink symlink;
struct kernfs_elem_attr attr;
};
- void *priv;
-
/*
* 64bit unique ID. On 64bit ino setups, id is the ino. On 32bit,
* the low 32bits are ino and upper generation.
*/
u64 id;
- unsigned short flags;
- umode_t mode;
+ void *priv;
struct kernfs_iattrs *iattr;
+
+ struct rcu_head rcu;
};
/*
@@ -177,22 +251,7 @@ struct kernfs_syscall_ops {
struct kernfs_root *root);
};
-struct kernfs_root {
- /* published fields */
- struct kernfs_node *kn;
- unsigned int flags; /* KERNFS_ROOT_* flags */
-
- /* private fields, do not use outside kernfs proper */
- struct idr ino_idr;
- u32 last_id_lowbits;
- u32 id_highbits;
- struct kernfs_syscall_ops *syscall_ops;
-
- /* list of kernfs_super_info of this root, protected by kernfs_mutex */
- struct list_head supers;
-
- wait_queue_head_t deactivate_waitq;
-};
+struct kernfs_node *kernfs_root_to_node(struct kernfs_root *root);
struct kernfs_open_file {
/* published fields */
@@ -264,10 +323,7 @@ struct kernfs_ops {
struct poll_table_struct *pt);
int (*mmap)(struct kernfs_open_file *of, struct vm_area_struct *vma);
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lock_class_key lockdep_key;
-#endif
+ loff_t (*llseek)(struct kernfs_open_file *of, loff_t offset, int whence);
};
/*
@@ -344,7 +400,7 @@ static inline bool kernfs_ns_enabled(struct kernfs_node *kn)
}
int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen);
-int kernfs_path_from_node(struct kernfs_node *root_kn, struct kernfs_node *kn,
+int kernfs_path_from_node(struct kernfs_node *kn_to, struct kernfs_node *kn_from,
char *buf, size_t buflen);
void pr_cont_kernfs_name(struct kernfs_node *kn);
void pr_cont_kernfs_path(struct kernfs_node *kn);
@@ -365,6 +421,7 @@ struct dentry *kernfs_node_dentry(struct kernfs_node *kn,
struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
unsigned int flags, void *priv);
void kernfs_destroy_root(struct kernfs_root *root);
+unsigned int kernfs_root_flags(struct kernfs_node *kn);
struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
const char *name, umode_t mode,
@@ -383,6 +440,7 @@ struct kernfs_node *kernfs_create_link(struct kernfs_node *parent,
const char *name,
struct kernfs_node *target);
void kernfs_activate(struct kernfs_node *kn);
+void kernfs_show(struct kernfs_node *kn, bool show);
void kernfs_remove(struct kernfs_node *kn);
void kernfs_break_active_protection(struct kernfs_node *kn);
void kernfs_unbreak_active_protection(struct kernfs_node *kn);
@@ -462,6 +520,8 @@ kernfs_create_root(struct kernfs_syscall_ops *scops, unsigned int flags,
{ return ERR_PTR(-ENOSYS); }
static inline void kernfs_destroy_root(struct kernfs_root *root) { }
+static inline unsigned int kernfs_root_flags(struct kernfs_node *kn)
+{ return 0; }
static inline struct kernfs_node *
kernfs_create_dir_ns(struct kernfs_node *parent, const char *name,
@@ -501,6 +561,10 @@ static inline int kernfs_setattr(struct kernfs_node *kn,
const struct iattr *iattr)
{ return -ENOSYS; }
+static inline __poll_t kernfs_generic_poll(struct kernfs_open_file *of,
+ struct poll_table_struct *pt)
+{ return -ENOSYS; }
+
static inline void kernfs_notify(struct kernfs_node *kn) { }
static inline int kernfs_xattr_get(struct kernfs_node *kn, const char *name,
@@ -563,30 +627,6 @@ kernfs_create_dir(struct kernfs_node *parent, const char *name, umode_t mode,
priv, NULL);
}
-static inline struct kernfs_node *
-kernfs_create_file_ns(struct kernfs_node *parent, const char *name,
- umode_t mode, kuid_t uid, kgid_t gid,
- loff_t size, const struct kernfs_ops *ops,
- void *priv, const void *ns)
-{
- struct lock_class_key *key = NULL;
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- key = (struct lock_class_key *)&ops->lockdep_key;
-#endif
- return __kernfs_create_file(parent, name, mode, uid, gid,
- size, ops, priv, ns, key);
-}
-
-static inline struct kernfs_node *
-kernfs_create_file(struct kernfs_node *parent, const char *name, umode_t mode,
- loff_t size, const struct kernfs_ops *ops, void *priv)
-{
- return kernfs_create_file_ns(parent, name, mode,
- GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
- size, ops, priv, NULL);
-}
-
static inline int kernfs_remove_by_name(struct kernfs_node *parent,
const char *name)
{
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 0c994ae37729..ff7e231b0485 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -15,17 +15,28 @@
#if !defined(__ASSEMBLY__)
-#include <linux/crash_core.h>
+#include <linux/vmcore_info.h>
+#include <linux/crash_reserve.h>
#include <asm/io.h>
+#include <linux/range.h>
#include <uapi/linux/kexec.h>
+#include <linux/verification.h>
+
+extern note_buf_t __percpu *crash_notes;
+
+#ifdef CONFIG_CRASH_DUMP
+#include <linux/prandom.h>
+#endif
#ifdef CONFIG_KEXEC_CORE
#include <linux/list.h>
#include <linux/compat.h>
#include <linux/ioport.h>
#include <linux/module.h>
+#include <linux/highmem.h>
#include <asm/kexec.h>
+#include <linux/crash_core.h>
/* Verify architecture specific macros are defined */
@@ -61,8 +72,6 @@
#define KEXEC_CRASH_MEM_ALIGN PAGE_SIZE
#endif
-#define KEXEC_CORE_NOTE_NAME CRASH_CORE_NOTE_NAME
-
/*
* This structure is used to hold the arguments that are used when loading
* kernel binaries.
@@ -70,6 +79,12 @@
typedef unsigned long kimage_entry_t;
+/*
+ * This is a copy of the UAPI struct kexec_segment and must be identical
+ * to it because it gets copied straight from user space into kernel
+ * memory. Do not modify this structure unless you change the way segments
+ * get ingested from user space.
+ */
struct kexec_segment {
/*
* This pointer can point to user memory if kexec_load() system
@@ -163,7 +178,9 @@ int kexec_image_post_load_cleanup_default(struct kimage *image);
* @buf_align: Minimum alignment needed.
* @buf_min: The buffer can't be placed below this address.
* @buf_max: The buffer can't be placed above this address.
+ * @cma: CMA page if the buffer is backed by CMA.
* @top_down: Allocate from top of memory.
+ * @random: Place the buffer at a random position.
*/
struct kexec_buf {
struct kimage *image;
@@ -174,55 +191,126 @@ struct kexec_buf {
unsigned long buf_align;
unsigned long buf_min;
unsigned long buf_max;
+ struct page *cma;
bool top_down;
+#ifdef CONFIG_CRASH_DUMP
+ bool random;
+#endif
};
+
+#ifdef CONFIG_CRASH_DUMP
+static inline void kexec_random_range_start(unsigned long start,
+ unsigned long end,
+ struct kexec_buf *kbuf,
+ unsigned long *temp_start)
+{
+ unsigned short i;
+
+ if (kbuf->random) {
+ get_random_bytes(&i, sizeof(unsigned short));
+ *temp_start = start + (end - start) / USHRT_MAX * i;
+ }
+}
+#else
+static inline void kexec_random_range_start(unsigned long start,
+ unsigned long end,
+ struct kexec_buf *kbuf,
+ unsigned long *temp_start)
+{}
+#endif
+
int kexec_load_purgatory(struct kimage *image, struct kexec_buf *kbuf);
int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
void *buf, unsigned int size,
bool get_value);
void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name);
-/* Architectures may override the below functions */
-int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
- unsigned long buf_len);
-void *arch_kexec_kernel_image_load(struct kimage *image);
-int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
- Elf_Shdr *section,
- const Elf_Shdr *relsec,
- const Elf_Shdr *symtab);
-int arch_kexec_apply_relocations(struct purgatory_info *pi,
- Elf_Shdr *section,
- const Elf_Shdr *relsec,
- const Elf_Shdr *symtab);
-int arch_kimage_file_post_load_cleanup(struct kimage *image);
+#ifndef arch_kexec_kernel_image_probe
+static inline int
+arch_kexec_kernel_image_probe(struct kimage *image, void *buf, unsigned long buf_len)
+{
+ return kexec_image_probe_default(image, buf, buf_len);
+}
+#endif
+
+#ifndef arch_kimage_file_post_load_cleanup
+static inline int arch_kimage_file_post_load_cleanup(struct kimage *image)
+{
+ return kexec_image_post_load_cleanup_default(image);
+}
+#endif
+
+#ifndef arch_check_excluded_range
+static inline int arch_check_excluded_range(struct kimage *image,
+ unsigned long start,
+ unsigned long end)
+{
+ return 0;
+}
+#endif
+
#ifdef CONFIG_KEXEC_SIG
-int arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
- unsigned long buf_len);
+#ifdef CONFIG_SIGNED_PE_FILE_VERIFICATION
+int kexec_kernel_verify_pe_sig(const char *kernel, unsigned long kernel_len);
+#endif
#endif
-int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf);
extern int kexec_add_buffer(struct kexec_buf *kbuf);
int kexec_locate_mem_hole(struct kexec_buf *kbuf);
-/* Alignment required for elf header segment */
-#define ELF_CORE_HEADER_ALIGN 4096
-
-struct crash_mem_range {
- u64 start, end;
-};
+#ifndef arch_kexec_locate_mem_hole
+/**
+ * arch_kexec_locate_mem_hole - Find free memory to place the segments.
+ * @kbuf: Parameters for the memory search.
+ *
+ * On success, kbuf->mem will have the start address of the memory region found.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+static inline int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf)
+{
+ return kexec_locate_mem_hole(kbuf);
+}
+#endif
-struct crash_mem {
- unsigned int max_nr_ranges;
- unsigned int nr_ranges;
- struct crash_mem_range ranges[];
-};
+#ifndef arch_kexec_apply_relocations_add
+/*
+ * arch_kexec_apply_relocations_add - apply relocations of type RELA
+ * @pi: Purgatory to be relocated.
+ * @section: Section relocations applying to.
+ * @relsec: Section containing RELAs.
+ * @symtab: Corresponding symtab.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+static inline int
+arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section,
+ const Elf_Shdr *relsec, const Elf_Shdr *symtab)
+{
+ pr_err("RELA relocation unsupported.\n");
+ return -ENOEXEC;
+}
+#endif
-extern int crash_exclude_mem_range(struct crash_mem *mem,
- unsigned long long mstart,
- unsigned long long mend);
-extern int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
- void **addr, unsigned long *sz);
+#ifndef arch_kexec_apply_relocations
+/*
+ * arch_kexec_apply_relocations - apply relocations of type REL
+ * @pi: Purgatory to be relocated.
+ * @section: Section relocations applying to.
+ * @relsec: Section containing RELs.
+ * @symtab: Corresponding symtab.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+static inline int
+arch_kexec_apply_relocations(struct purgatory_info *pi, Elf_Shdr *section,
+ const Elf_Shdr *relsec, const Elf_Shdr *symtab)
+{
+ pr_err("REL relocation unsupported.\n");
+ return -ENOEXEC;
+}
+#endif
#endif /* CONFIG_KEXEC_FILE */
#ifdef CONFIG_KEXEC_ELF
@@ -260,6 +348,7 @@ struct kimage {
unsigned long nr_segments;
struct kexec_segment segment[KEXEC_SEGMENT_MAX];
+ struct page *segment_cma[KEXEC_SEGMENT_MAX];
struct list_head control_pages;
struct list_head dest_pages;
@@ -275,6 +364,13 @@ struct kimage {
unsigned int preserve_context : 1;
/* If set, we are using file mode kexec syscall */
unsigned int file_mode:1;
+#ifdef CONFIG_CRASH_HOTPLUG
+ /* If set, it is safe to update kexec segments that are
+ * excluded from SHA calculation.
+ */
+ unsigned int hotplug_support:1;
+#endif
+ unsigned int no_cma:1;
#ifdef ARCH_HAS_KIMAGE_ARCH
struct kimage_arch arch;
@@ -299,6 +395,15 @@ struct kimage {
/* Information for loading purgatory */
struct purgatory_info purgatory_info;
+
+ /* Force carrying over the DTB from the current boot */
+ bool force_dtb;
+#endif
+
+#ifdef CONFIG_CRASH_HOTPLUG
+ int hp_action;
+ int elfcorehdr_index;
+ bool elfcorehdr_updated;
#endif
#ifdef CONFIG_IMA_KEXEC
@@ -307,12 +412,24 @@ struct kimage {
phys_addr_t ima_buffer_addr;
size_t ima_buffer_size;
+
+ unsigned long ima_segment_index;
+ bool is_ima_segment_index_set;
#endif
+ struct {
+ struct kexec_segment *scratch;
+ phys_addr_t fdt;
+ } kho;
+
/* Core ELF header buffer */
void *elf_headers;
unsigned long elf_headers_sz;
unsigned long elf_load_addr;
+
+ /* dm crypt keys buffer */
+ unsigned long dm_crypt_keys_addr;
+ unsigned long dm_crypt_keys_sz;
};
/* kexec interface functions */
@@ -322,18 +439,15 @@ extern void machine_kexec_cleanup(struct kimage *image);
extern int kernel_kexec(void);
extern struct page *kimage_alloc_control_pages(struct kimage *image,
unsigned int order);
-int machine_kexec_post_load(struct kimage *image);
-extern void __crash_kexec(struct pt_regs *);
-extern void crash_kexec(struct pt_regs *);
-int kexec_should_crash(struct task_struct *);
-int kexec_crash_loaded(void);
-void crash_save_cpu(struct pt_regs *regs, int cpu);
-extern int kimage_crash_copy_vmcoreinfo(struct kimage *image);
+#ifndef machine_kexec_post_load
+static inline int machine_kexec_post_load(struct kimage *image) { return 0; }
+#endif
extern struct kimage *kexec_image;
extern struct kimage *kexec_crash_image;
-extern int kexec_load_disabled;
+
+bool kexec_load_permitted(int kexec_image_type);
#ifndef kexec_flush_icache_page
#define kexec_flush_icache_page(page)
@@ -341,31 +455,20 @@ extern int kexec_load_disabled;
/* List of defined/legal kexec flags */
#ifndef CONFIG_KEXEC_JUMP
-#define KEXEC_FLAGS KEXEC_ON_CRASH
+#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_UPDATE_ELFCOREHDR | KEXEC_CRASH_HOTPLUG_SUPPORT)
#else
-#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT)
+#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT | KEXEC_UPDATE_ELFCOREHDR | \
+ KEXEC_CRASH_HOTPLUG_SUPPORT)
#endif
/* List of defined/legal kexec file flags */
#define KEXEC_FILE_FLAGS (KEXEC_FILE_UNLOAD | KEXEC_FILE_ON_CRASH | \
- KEXEC_FILE_NO_INITRAMFS)
-
-/* Location of a reserved region to hold the crash kernel.
- */
-extern struct resource crashk_res;
-extern struct resource crashk_low_res;
-extern note_buf_t __percpu *crash_notes;
+ KEXEC_FILE_NO_INITRAMFS | KEXEC_FILE_DEBUG | \
+ KEXEC_FILE_NO_CMA | KEXEC_FILE_FORCE_DTB)
/* flag to track if kexec reboot is in progress */
extern bool kexec_in_progress;
-int crash_shrink_memory(unsigned long new_size);
-size_t crash_get_memory_size(void);
-void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
-
-void arch_kexec_protect_crashkres(void);
-void arch_kexec_unprotect_crashkres(void);
-
#ifndef page_to_boot_pfn
static inline unsigned long page_to_boot_pfn(struct page *page)
{
@@ -394,6 +497,16 @@ static inline phys_addr_t boot_phys_to_phys(unsigned long boot_phys)
}
#endif
+#ifndef crash_free_reserved_phys_range
+static inline void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
+{
+ unsigned long addr;
+
+ for (addr = begin; addr < end; addr += PAGE_SIZE)
+ free_reserved_page(boot_pfn_to_page(addr >> PAGE_SHIFT));
+}
+#endif
+
static inline unsigned long virt_to_boot_phys(void *addr)
{
return phys_to_boot_phys(__pa((unsigned long)addr));
@@ -412,16 +525,33 @@ static inline int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, g
static inline void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages) { }
#endif
+extern bool kexec_file_dbg_print;
+
+#define kexec_dprintk(fmt, arg...) \
+ do { if (kexec_file_dbg_print) pr_info(fmt, ##arg); } while (0)
+
+extern void *kimage_map_segment(struct kimage *image, unsigned long addr, unsigned long size);
+extern void kimage_unmap_segment(void *buffer);
#else /* !CONFIG_KEXEC_CORE */
struct pt_regs;
struct task_struct;
+struct kimage;
static inline void __crash_kexec(struct pt_regs *regs) { }
static inline void crash_kexec(struct pt_regs *regs) { }
static inline int kexec_should_crash(struct task_struct *p) { return 0; }
static inline int kexec_crash_loaded(void) { return 0; }
+static inline void *kimage_map_segment(struct kimage *image, unsigned long addr, unsigned long size)
+{ return NULL; }
+static inline void kimage_unmap_segment(void *buffer) { }
#define kexec_in_progress false
#endif /* CONFIG_KEXEC_CORE */
+#ifdef CONFIG_KEXEC_SIG
+void set_kexec_sig_enforced(void);
+#else
+static inline void set_kexec_sig_enforced(void) {}
+#endif
+
#endif /* !defined(__ASSEBMLY__) */
#endif /* LINUX_KEXEC_H */
diff --git a/include/linux/kexec_handover.h b/include/linux/kexec_handover.h
new file mode 100644
index 000000000000..5f7b9de97e8d
--- /dev/null
+++ b/include/linux/kexec_handover.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef LINUX_KEXEC_HANDOVER_H
+#define LINUX_KEXEC_HANDOVER_H
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+
+struct kho_scratch {
+ phys_addr_t addr;
+ phys_addr_t size;
+};
+
+struct folio;
+struct page;
+
+#define DECLARE_KHOSER_PTR(name, type) \
+ union { \
+ phys_addr_t phys; \
+ type ptr; \
+ } name
+#define KHOSER_STORE_PTR(dest, val) \
+ ({ \
+ typeof(val) v = val; \
+ typecheck(typeof((dest).ptr), v); \
+ (dest).phys = virt_to_phys(v); \
+ })
+#define KHOSER_LOAD_PTR(src) \
+ ({ \
+ typeof(src) s = src; \
+ (typeof((s).ptr))((s).phys ? phys_to_virt((s).phys) : NULL); \
+ })
+
+struct kho_vmalloc_chunk;
+struct kho_vmalloc {
+ DECLARE_KHOSER_PTR(first, struct kho_vmalloc_chunk *);
+ unsigned int total_pages;
+ unsigned short flags;
+ unsigned short order;
+};
+
+#ifdef CONFIG_KEXEC_HANDOVER
+bool kho_is_enabled(void);
+bool is_kho_boot(void);
+
+int kho_preserve_folio(struct folio *folio);
+void kho_unpreserve_folio(struct folio *folio);
+int kho_preserve_pages(struct page *page, unsigned int nr_pages);
+void kho_unpreserve_pages(struct page *page, unsigned int nr_pages);
+int kho_preserve_vmalloc(void *ptr, struct kho_vmalloc *preservation);
+void kho_unpreserve_vmalloc(struct kho_vmalloc *preservation);
+void *kho_alloc_preserve(size_t size);
+void kho_unpreserve_free(void *mem);
+void kho_restore_free(void *mem);
+struct folio *kho_restore_folio(phys_addr_t phys);
+struct page *kho_restore_pages(phys_addr_t phys, unsigned int nr_pages);
+void *kho_restore_vmalloc(const struct kho_vmalloc *preservation);
+int kho_add_subtree(const char *name, void *fdt);
+void kho_remove_subtree(void *fdt);
+int kho_retrieve_subtree(const char *name, phys_addr_t *phys);
+
+void kho_memory_init(void);
+
+void kho_populate(phys_addr_t fdt_phys, u64 fdt_len, phys_addr_t scratch_phys,
+ u64 scratch_len);
+#else
+static inline bool kho_is_enabled(void)
+{
+ return false;
+}
+
+static inline bool is_kho_boot(void)
+{
+ return false;
+}
+
+static inline int kho_preserve_folio(struct folio *folio)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void kho_unpreserve_folio(struct folio *folio) { }
+
+static inline int kho_preserve_pages(struct page *page, unsigned int nr_pages)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void kho_unpreserve_pages(struct page *page, unsigned int nr_pages) { }
+
+static inline int kho_preserve_vmalloc(void *ptr,
+ struct kho_vmalloc *preservation)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void kho_unpreserve_vmalloc(struct kho_vmalloc *preservation) { }
+
+static inline void *kho_alloc_preserve(size_t size)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void kho_unpreserve_free(void *mem) { }
+static inline void kho_restore_free(void *mem) { }
+
+static inline struct folio *kho_restore_folio(phys_addr_t phys)
+{
+ return NULL;
+}
+
+static inline struct page *kho_restore_pages(phys_addr_t phys,
+ unsigned int nr_pages)
+{
+ return NULL;
+}
+
+static inline void *kho_restore_vmalloc(const struct kho_vmalloc *preservation)
+{
+ return NULL;
+}
+
+static inline int kho_add_subtree(const char *name, void *fdt)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void kho_remove_subtree(void *fdt) { }
+
+static inline int kho_retrieve_subtree(const char *name, phys_addr_t *phys)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void kho_memory_init(void) { }
+
+static inline void kho_populate(phys_addr_t fdt_phys, u64 fdt_len,
+ phys_addr_t scratch_phys, u64 scratch_len)
+{
+}
+#endif /* CONFIG_KEXEC_HANDOVER */
+
+#endif /* LINUX_KEXEC_HANDOVER_H */
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index 7d985a1dfe4a..bb97bd3e5af4 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -73,6 +73,7 @@ struct key_type {
unsigned int flags;
#define KEY_TYPE_NET_DOMAIN 0x00000001 /* Keys of this type have a net namespace domain */
+#define KEY_TYPE_INSTANT_REAP 0x00000002 /* Keys of this type don't have a delay after expiring */
/* vet a description */
int (*vet_description)(const char *description);
@@ -106,11 +107,14 @@ struct key_type {
*/
int (*match_preparse)(struct key_match_data *match_data);
- /* Free preparsed match data (optional). This should be supplied it
- * ->match_preparse() is supplied. */
+ /*
+ * Free preparsed match data (optional). This should be supplied if
+ * ->match_preparse() is supplied.
+ */
void (*match_free)(struct key_match_data *match_data);
- /* clear some of the data from a key on revokation (optional)
+ /*
+ * Clear some of the data from a key on revocation (optional).
* - the key's semaphore will be write-locked by the caller
*/
void (*revoke)(struct key *key);
diff --git a/include/linux/key.h b/include/linux/key.h
index 7febc4881363..81b8f05c6898 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -88,6 +88,12 @@ enum key_need_perm {
KEY_DEFER_PERM_CHECK, /* Special: permission check is deferred */
};
+enum key_lookup_flag {
+ KEY_LOOKUP_CREATE = 0x01,
+ KEY_LOOKUP_PARTIAL = 0x02,
+ KEY_LOOKUP_ALL = (KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL),
+};
+
struct seq_file;
struct user_struct;
struct signal_struct;
@@ -230,6 +236,7 @@ struct key {
#define KEY_FLAG_ROOT_CAN_INVAL 7 /* set if key can be invalidated by root without permission */
#define KEY_FLAG_KEEP 8 /* set if key should not be removed */
#define KEY_FLAG_UID_KEYRING 9 /* set if key is a user or user session keyring */
+#define KEY_FLAG_USER_ALIVE 10 /* set if final put has not happened on key yet */
/* the key type and key description string
* - the desc is used to match a key against search criteria
@@ -380,6 +387,14 @@ extern int wait_for_key_construction(struct key *key, bool intr);
extern int key_validate(const struct key *key);
+extern key_ref_t key_create(key_ref_t keyring,
+ const char *type,
+ const char *description,
+ const void *payload,
+ size_t plen,
+ key_perm_t perm,
+ unsigned long flags);
+
extern key_ref_t key_create_or_update(key_ref_t keyring,
const char *type,
const char *description,
@@ -422,9 +437,6 @@ extern key_ref_t keyring_search(key_ref_t keyring,
const char *description,
bool recurse);
-extern int keyring_add_key(struct key *keyring,
- struct key *key);
-
extern int keyring_restrict(key_ref_t keyring, const char *type,
const char *restriction);
@@ -476,9 +488,6 @@ do { \
rcu_assign_pointer((KEY)->payload.rcu_data0, (PAYLOAD)); \
} while (0)
-#ifdef CONFIG_SYSCTL
-extern struct ctl_table key_sysctls[];
-#endif
/*
* the userspace interface
*/
@@ -504,6 +513,7 @@ extern void key_init(void);
#define key_init() do { } while(0)
#define key_free_user_ns(ns) do { } while(0)
#define key_remove_domain(d) do { } while(0)
+#define key_lookup(k) NULL
#endif /* CONFIG_KEYS */
#endif /* __KERNEL__ */
diff --git a/include/linux/keyslot-manager.h b/include/linux/keyslot-manager.h
deleted file mode 100644
index a27605e2f826..000000000000
--- a/include/linux/keyslot-manager.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright 2019 Google LLC
- */
-
-#ifndef __LINUX_KEYSLOT_MANAGER_H
-#define __LINUX_KEYSLOT_MANAGER_H
-
-#include <linux/bio.h>
-#include <linux/blk-crypto.h>
-
-struct blk_keyslot_manager;
-
-/**
- * struct blk_ksm_ll_ops - functions to manage keyslots in hardware
- * @keyslot_program: Program the specified key into the specified slot in the
- * inline encryption hardware.
- * @keyslot_evict: Evict key from the specified keyslot in the hardware.
- * The key is provided so that e.g. dm layers can evict
- * keys from the devices that they map over.
- * Returns 0 on success, -errno otherwise.
- *
- * This structure should be provided by storage device drivers when they set up
- * a keyslot manager - this structure holds the function ptrs that the keyslot
- * manager will use to manipulate keyslots in the hardware.
- */
-struct blk_ksm_ll_ops {
- int (*keyslot_program)(struct blk_keyslot_manager *ksm,
- const struct blk_crypto_key *key,
- unsigned int slot);
- int (*keyslot_evict)(struct blk_keyslot_manager *ksm,
- const struct blk_crypto_key *key,
- unsigned int slot);
-};
-
-struct blk_keyslot_manager {
- /*
- * The struct blk_ksm_ll_ops that this keyslot manager will use
- * to perform operations like programming and evicting keys on the
- * device
- */
- struct blk_ksm_ll_ops ksm_ll_ops;
-
- /*
- * The maximum number of bytes supported for specifying the data unit
- * number.
- */
- unsigned int max_dun_bytes_supported;
-
- /*
- * Array of size BLK_ENCRYPTION_MODE_MAX of bitmasks that represents
- * whether a crypto mode and data unit size are supported. The i'th
- * bit of crypto_mode_supported[crypto_mode] is set iff a data unit
- * size of (1 << i) is supported. We only support data unit sizes
- * that are powers of 2.
- */
- unsigned int crypto_modes_supported[BLK_ENCRYPTION_MODE_MAX];
-
- /* Device for runtime power management (NULL if none) */
- struct device *dev;
-
- /* Here onwards are *private* fields for internal keyslot manager use */
-
- unsigned int num_slots;
-
- /* Protects programming and evicting keys from the device */
- struct rw_semaphore lock;
-
- /* List of idle slots, with least recently used slot at front */
- wait_queue_head_t idle_slots_wait_queue;
- struct list_head idle_slots;
- spinlock_t idle_slots_lock;
-
- /*
- * Hash table which maps struct *blk_crypto_key to keyslots, so that we
- * can find a key's keyslot in O(1) time rather than O(num_slots).
- * Protected by 'lock'.
- */
- struct hlist_head *slot_hashtable;
- unsigned int log_slot_ht_size;
-
- /* Per-keyslot data */
- struct blk_ksm_keyslot *slots;
-};
-
-int blk_ksm_init(struct blk_keyslot_manager *ksm, unsigned int num_slots);
-
-int devm_blk_ksm_init(struct device *dev, struct blk_keyslot_manager *ksm,
- unsigned int num_slots);
-
-blk_status_t blk_ksm_get_slot_for_key(struct blk_keyslot_manager *ksm,
- const struct blk_crypto_key *key,
- struct blk_ksm_keyslot **slot_ptr);
-
-unsigned int blk_ksm_get_slot_idx(struct blk_ksm_keyslot *slot);
-
-void blk_ksm_put_slot(struct blk_ksm_keyslot *slot);
-
-bool blk_ksm_crypto_cfg_supported(struct blk_keyslot_manager *ksm,
- const struct blk_crypto_config *cfg);
-
-int blk_ksm_evict_key(struct blk_keyslot_manager *ksm,
- const struct blk_crypto_key *key);
-
-void blk_ksm_reprogram_all_keys(struct blk_keyslot_manager *ksm);
-
-void blk_ksm_destroy(struct blk_keyslot_manager *ksm);
-
-void blk_ksm_intersect_modes(struct blk_keyslot_manager *parent,
- const struct blk_keyslot_manager *child);
-
-void blk_ksm_init_passthrough(struct blk_keyslot_manager *ksm);
-
-bool blk_ksm_is_superset(struct blk_keyslot_manager *ksm_superset,
- struct blk_keyslot_manager *ksm_subset);
-
-void blk_ksm_update_capabilities(struct blk_keyslot_manager *target_ksm,
- struct blk_keyslot_manager *reference_ksm);
-
-#endif /* __LINUX_KEYSLOT_MANAGER_H */
diff --git a/include/linux/kfence.h b/include/linux/kfence.h
index a70d1ea03532..0ad1ddbb8b99 100644
--- a/include/linux/kfence.h
+++ b/include/linux/kfence.h
@@ -14,6 +14,11 @@
#ifdef CONFIG_KFENCE
+#include <linux/atomic.h>
+#include <linux/static_key.h>
+
+extern unsigned long kfence_sample_interval;
+
/*
* We allocate an even number of pages, as it simplifies calculations to map
* address to metadata indices; effectively, the very first page serves as an
@@ -22,13 +27,8 @@
#define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE)
extern char *__kfence_pool;
-#ifdef CONFIG_KFENCE_STATIC_KEYS
-#include <linux/static_key.h>
DECLARE_STATIC_KEY_FALSE(kfence_allocation_key);
-#else
-#include <linux/atomic.h>
extern atomic_t kfence_allocation_gate;
-#endif
/**
* is_kfence_address() - check if an address belongs to KFENCE pool
@@ -51,22 +51,24 @@ extern atomic_t kfence_allocation_gate;
static __always_inline bool is_kfence_address(const void *addr)
{
/*
- * The non-NULL check is required in case the __kfence_pool pointer was
- * never initialized; keep it in the slow-path after the range-check.
+ * The __kfence_pool != NULL check is required to deal with the case
+ * where __kfence_pool == NULL && addr < KFENCE_POOL_SIZE. Keep it in
+ * the slow-path after the range-check!
*/
- return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && addr);
+ return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && __kfence_pool);
}
/**
- * kfence_alloc_pool() - allocate the KFENCE pool via memblock
+ * kfence_alloc_pool_and_metadata() - allocate the KFENCE pool and KFENCE
+ * metadata via memblock
*/
-void __init kfence_alloc_pool(void);
+void __init kfence_alloc_pool_and_metadata(void);
/**
* kfence_init() - perform KFENCE initialization at boot time
*
- * Requires that kfence_alloc_pool() was called before. This sets up the
- * allocation gate timer, and requires that workqueues are available.
+ * Requires that kfence_alloc_pool_and_metadata() was called before. This sets
+ * up the allocation gate timer, and requires that workqueues are available.
*/
void __init kfence_init(void);
@@ -115,13 +117,16 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags);
*/
static __always_inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
{
-#ifdef CONFIG_KFENCE_STATIC_KEYS
- if (static_branch_unlikely(&kfence_allocation_key))
+#if defined(CONFIG_KFENCE_STATIC_KEYS) || CONFIG_KFENCE_SAMPLE_INTERVAL == 0
+ if (!static_branch_unlikely(&kfence_allocation_key))
+ return NULL;
#else
- if (unlikely(!atomic_read(&kfence_allocation_gate)))
+ if (!static_branch_likely(&kfence_allocation_key))
+ return NULL;
#endif
- return __kfence_alloc(s, size, flags);
- return NULL;
+ if (likely(atomic_read(&kfence_allocation_gate) > 0))
+ return NULL;
+ return __kfence_alloc(s, size, flags);
}
/**
@@ -200,10 +205,28 @@ static __always_inline __must_check bool kfence_free(void *addr)
*/
bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs);
+#ifdef CONFIG_PRINTK
+struct kmem_obj_info;
+/**
+ * __kfence_obj_info() - fill kmem_obj_info struct
+ * @kpp: kmem_obj_info to be filled
+ * @object: the object
+ *
+ * Return:
+ * * false - not a KFENCE object
+ * * true - a KFENCE object, filled @kpp
+ *
+ * Copies information to @kpp for KFENCE objects.
+ */
+bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
+#endif
+
#else /* CONFIG_KFENCE */
+#define kfence_sample_interval (0)
+
static inline bool is_kfence_address(const void *addr) { return false; }
-static inline void kfence_alloc_pool(void) { }
+static inline void kfence_alloc_pool_and_metadata(void) { }
static inline void kfence_init(void) { }
static inline void kfence_shutdown_cache(struct kmem_cache *s) { }
static inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) { return NULL; }
@@ -217,6 +240,14 @@ static inline bool __must_check kfence_handle_page_fault(unsigned long addr, boo
return false;
}
+#ifdef CONFIG_PRINTK
+struct kmem_obj_info;
+static inline bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+{
+ return false;
+}
+#endif
+
#endif
#endif /* _LINUX_KFENCE_H */
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 86249476b57f..8b81ac74829c 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -36,10 +36,15 @@
* to lock the reader.
*/
-#include <linux/kernel.h>
+#include <linux/array_size.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
-#include <linux/scatterlist.h>
+#include <linux/types.h>
+
+#include <asm/barrier.h>
+#include <asm/errno.h>
+
+struct scatterlist;
struct __kfifo {
unsigned int in;
@@ -304,19 +309,25 @@ __kfifo_uint_must_check_helper( \
)
/**
- * kfifo_skip - skip output data
+ * kfifo_skip_count - skip output data
* @fifo: address of the fifo to be used
+ * @count: count of data to skip
*/
-#define kfifo_skip(fifo) \
-(void)({ \
+#define kfifo_skip_count(fifo, count) do { \
typeof((fifo) + 1) __tmp = (fifo); \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
if (__recsize) \
__kfifo_skip_r(__kfifo, __recsize); \
else \
- __kfifo->out++; \
-})
+ __kfifo->out += (count); \
+} while(0)
+
+/**
+ * kfifo_skip - skip output data
+ * @fifo: address of the fifo to be used
+ */
+#define kfifo_skip(fifo) kfifo_skip_count(fifo, 1)
/**
* kfifo_peek_len - gets the size of the next fifo record
@@ -359,6 +370,30 @@ __kfifo_int_must_check_helper( \
)
/**
+ * kfifo_alloc_node - dynamically allocates a new fifo buffer on a NUMA node
+ * @fifo: pointer to the fifo
+ * @size: the number of elements in the fifo, this must be a power of 2
+ * @gfp_mask: get_free_pages mask, passed to kmalloc()
+ * @node: NUMA node to allocate memory on
+ *
+ * This macro dynamically allocates a new fifo buffer with NUMA node awareness.
+ *
+ * The number of elements will be rounded-up to a power of 2.
+ * The fifo will be release with kfifo_free().
+ * Return 0 if no error, otherwise an error code.
+ */
+#define kfifo_alloc_node(fifo, size, gfp_mask, node) \
+__kfifo_int_must_check_helper( \
+({ \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ struct __kfifo *__kfifo = &__tmp->kfifo; \
+ __is_kfifo_ptr(__tmp) ? \
+ __kfifo_alloc_node(__kfifo, size, sizeof(*__tmp->type), gfp_mask, node) : \
+ -EINVAL; \
+}) \
+)
+
+/**
* kfifo_free - frees the fifo
* @fifo: the fifo to be freed
*/
@@ -578,7 +613,7 @@ __kfifo_uint_must_check_helper( \
* @buf: pointer to the storage buffer
* @n: max. number of elements to get
*
- * This macro get some data from the fifo and return the numbers of elements
+ * This macro gets some data from the fifo and returns the numbers of elements
* copied.
*
* Note that with only one concurrent reader and one concurrent
@@ -605,7 +640,7 @@ __kfifo_uint_must_check_helper( \
* @n: max. number of elements to get
* @lock: pointer to the spinlock to use for locking
*
- * This macro get the data from the fifo and return the numbers of elements
+ * This macro gets the data from the fifo and returns the numbers of elements
* copied.
*/
#define kfifo_out_spinlocked(fifo, buf, n, lock) \
@@ -688,7 +723,7 @@ __kfifo_uint_must_check_helper( \
* writer, you don't need extra locking to use these macro.
*/
#define kfifo_to_user(fifo, to, len, copied) \
-__kfifo_uint_must_check_helper( \
+__kfifo_int_must_check_helper( \
({ \
typeof((fifo) + 1) __tmp = (fifo); \
void __user *__to = (to); \
@@ -703,11 +738,12 @@ __kfifo_uint_must_check_helper( \
)
/**
- * kfifo_dma_in_prepare - setup a scatterlist for DMA input
+ * kfifo_dma_in_prepare_mapped - setup a scatterlist for DMA input
* @fifo: address of the fifo to be used
* @sgl: pointer to the scatterlist array
* @nents: number of entries in the scatterlist array
* @len: number of elements to transfer
+ * @dma: mapped dma address to fill into @sgl
*
* This macro fills a scatterlist for DMA input.
* It returns the number entries in the scatterlist array.
@@ -715,7 +751,7 @@ __kfifo_uint_must_check_helper( \
* Note that with only one concurrent reader and one concurrent
* writer, you don't need extra locking to use these macros.
*/
-#define kfifo_dma_in_prepare(fifo, sgl, nents, len) \
+#define kfifo_dma_in_prepare_mapped(fifo, sgl, nents, len, dma) \
({ \
typeof((fifo) + 1) __tmp = (fifo); \
struct scatterlist *__sgl = (sgl); \
@@ -724,16 +760,20 @@ __kfifo_uint_must_check_helper( \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
(__recsize) ? \
- __kfifo_dma_in_prepare_r(__kfifo, __sgl, __nents, __len, __recsize) : \
- __kfifo_dma_in_prepare(__kfifo, __sgl, __nents, __len); \
+ __kfifo_dma_in_prepare_r(__kfifo, __sgl, __nents, __len, __recsize, \
+ dma) : \
+ __kfifo_dma_in_prepare(__kfifo, __sgl, __nents, __len, dma); \
})
+#define kfifo_dma_in_prepare(fifo, sgl, nents, len) \
+ kfifo_dma_in_prepare_mapped(fifo, sgl, nents, len, DMA_MAPPING_ERROR)
+
/**
* kfifo_dma_in_finish - finish a DMA IN operation
* @fifo: address of the fifo to be used
* @len: number of bytes to received
*
- * This macro finish a DMA IN operation. The in counter will be updated by
+ * This macro finishes a DMA IN operation. The in counter will be updated by
* the len parameter. No error checking will be done.
*
* Note that with only one concurrent reader and one concurrent
@@ -752,11 +792,12 @@ __kfifo_uint_must_check_helper( \
})
/**
- * kfifo_dma_out_prepare - setup a scatterlist for DMA output
+ * kfifo_dma_out_prepare_mapped - setup a scatterlist for DMA output
* @fifo: address of the fifo to be used
* @sgl: pointer to the scatterlist array
* @nents: number of entries in the scatterlist array
* @len: number of elements to transfer
+ * @dma: mapped dma address to fill into @sgl
*
* This macro fills a scatterlist for DMA output which at most @len bytes
* to transfer.
@@ -766,7 +807,7 @@ __kfifo_uint_must_check_helper( \
* Note that with only one concurrent reader and one concurrent
* writer, you don't need extra locking to use these macros.
*/
-#define kfifo_dma_out_prepare(fifo, sgl, nents, len) \
+#define kfifo_dma_out_prepare_mapped(fifo, sgl, nents, len, dma) \
({ \
typeof((fifo) + 1) __tmp = (fifo); \
struct scatterlist *__sgl = (sgl); \
@@ -775,32 +816,29 @@ __kfifo_uint_must_check_helper( \
const size_t __recsize = sizeof(*__tmp->rectype); \
struct __kfifo *__kfifo = &__tmp->kfifo; \
(__recsize) ? \
- __kfifo_dma_out_prepare_r(__kfifo, __sgl, __nents, __len, __recsize) : \
- __kfifo_dma_out_prepare(__kfifo, __sgl, __nents, __len); \
+ __kfifo_dma_out_prepare_r(__kfifo, __sgl, __nents, __len, __recsize, \
+ dma) : \
+ __kfifo_dma_out_prepare(__kfifo, __sgl, __nents, __len, dma); \
})
+#define kfifo_dma_out_prepare(fifo, sgl, nents, len) \
+ kfifo_dma_out_prepare_mapped(fifo, sgl, nents, len, DMA_MAPPING_ERROR)
+
/**
* kfifo_dma_out_finish - finish a DMA OUT operation
* @fifo: address of the fifo to be used
* @len: number of bytes transferred
*
- * This macro finish a DMA OUT operation. The out counter will be updated by
+ * This macro finishes a DMA OUT operation. The out counter will be updated by
* the len parameter. No error checking will be done.
*
* Note that with only one concurrent reader and one concurrent
* writer, you don't need extra locking to use these macros.
*/
-#define kfifo_dma_out_finish(fifo, len) \
-(void)({ \
- typeof((fifo) + 1) __tmp = (fifo); \
- unsigned int __len = (len); \
- const size_t __recsize = sizeof(*__tmp->rectype); \
- struct __kfifo *__kfifo = &__tmp->kfifo; \
- if (__recsize) \
- __kfifo_dma_out_finish_r(__kfifo, __recsize); \
- else \
- __kfifo->out += __len / sizeof(*__tmp->type); \
-})
+#define kfifo_dma_out_finish(fifo, len) do { \
+ typeof((fifo) + 1) ___tmp = (fifo); \
+ kfifo_skip_count(___tmp, (len) / sizeof(*___tmp->type)); \
+} while (0)
/**
* kfifo_out_peek - gets some data from the fifo
@@ -808,7 +846,7 @@ __kfifo_uint_must_check_helper( \
* @buf: pointer to the storage buffer
* @n: max. number of elements to get
*
- * This macro get the data from the fifo and return the numbers of elements
+ * This macro gets the data from the fifo and returns the numbers of elements
* copied. The data is not removed from the fifo.
*
* Note that with only one concurrent reader and one concurrent
@@ -828,8 +866,71 @@ __kfifo_uint_must_check_helper( \
}) \
)
-extern int __kfifo_alloc(struct __kfifo *fifo, unsigned int size,
- size_t esize, gfp_t gfp_mask);
+/**
+ * kfifo_out_linear - gets a tail of/offset to available data
+ * @fifo: address of the fifo to be used
+ * @tail: pointer to an unsigned int to store the value of tail
+ * @n: max. number of elements to point at
+ *
+ * This macro obtains the offset (tail) to the available data in the fifo
+ * buffer and returns the
+ * numbers of elements available. It returns the available count till the end
+ * of data or till the end of the buffer. So that it can be used for linear
+ * data processing (like memcpy() of (@fifo->data + @tail) with count
+ * returned).
+ *
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these macro.
+ */
+#define kfifo_out_linear(fifo, tail, n) \
+__kfifo_uint_must_check_helper( \
+({ \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ unsigned int *__tail = (tail); \
+ unsigned long __n = (n); \
+ const size_t __recsize = sizeof(*__tmp->rectype); \
+ struct __kfifo *__kfifo = &__tmp->kfifo; \
+ (__recsize) ? \
+ __kfifo_out_linear_r(__kfifo, __tail, __n, __recsize) : \
+ __kfifo_out_linear(__kfifo, __tail, __n); \
+}) \
+)
+
+/**
+ * kfifo_out_linear_ptr - gets a pointer to the available data
+ * @fifo: address of the fifo to be used
+ * @ptr: pointer to data to store the pointer to tail
+ * @n: max. number of elements to point at
+ *
+ * Similarly to kfifo_out_linear(), this macro obtains the pointer to the
+ * available data in the fifo buffer and returns the numbers of elements
+ * available. It returns the available count till the end of available data or
+ * till the end of the buffer. So that it can be used for linear data
+ * processing (like memcpy() of @ptr with count returned).
+ *
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these macro.
+ */
+#define kfifo_out_linear_ptr(fifo, ptr, n) \
+__kfifo_uint_must_check_helper( \
+({ \
+ typeof((fifo) + 1) ___tmp = (fifo); \
+ unsigned int ___tail; \
+ unsigned int ___n = kfifo_out_linear(___tmp, &___tail, (n)); \
+ *(ptr) = ___tmp->kfifo.data + ___tail * kfifo_esize(___tmp); \
+ ___n; \
+}) \
+)
+
+
+extern int __kfifo_alloc_node(struct __kfifo *fifo, unsigned int size,
+ size_t esize, gfp_t gfp_mask, int node);
+
+static inline int __kfifo_alloc(struct __kfifo *fifo, unsigned int size,
+ size_t esize, gfp_t gfp_mask)
+{
+ return __kfifo_alloc_node(fifo, size, esize, gfp_mask, NUMA_NO_NODE);
+}
extern void __kfifo_free(struct __kfifo *fifo);
@@ -849,14 +950,17 @@ extern int __kfifo_to_user(struct __kfifo *fifo,
void __user *to, unsigned long len, unsigned int *copied);
extern unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo,
- struct scatterlist *sgl, int nents, unsigned int len);
+ struct scatterlist *sgl, int nents, unsigned int len, dma_addr_t dma);
extern unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo,
- struct scatterlist *sgl, int nents, unsigned int len);
+ struct scatterlist *sgl, int nents, unsigned int len, dma_addr_t dma);
extern unsigned int __kfifo_out_peek(struct __kfifo *fifo,
void *buf, unsigned int len);
+extern unsigned int __kfifo_out_linear(struct __kfifo *fifo,
+ unsigned int *tail, unsigned int n);
+
extern unsigned int __kfifo_in_r(struct __kfifo *fifo,
const void *buf, unsigned int len, size_t recsize);
@@ -871,15 +975,15 @@ extern int __kfifo_to_user_r(struct __kfifo *fifo, void __user *to,
unsigned long len, unsigned int *copied, size_t recsize);
extern unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo,
- struct scatterlist *sgl, int nents, unsigned int len, size_t recsize);
+ struct scatterlist *sgl, int nents, unsigned int len, size_t recsize,
+ dma_addr_t dma);
extern void __kfifo_dma_in_finish_r(struct __kfifo *fifo,
unsigned int len, size_t recsize);
extern unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo,
- struct scatterlist *sgl, int nents, unsigned int len, size_t recsize);
-
-extern void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize);
+ struct scatterlist *sgl, int nents, unsigned int len, size_t recsize,
+ dma_addr_t dma);
extern unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize);
@@ -888,6 +992,9 @@ extern void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize);
extern unsigned int __kfifo_out_peek_r(struct __kfifo *fifo,
void *buf, unsigned int len, size_t recsize);
+extern unsigned int __kfifo_out_linear_r(struct __kfifo *fifo,
+ unsigned int *tail, unsigned int n, size_t recsize);
+
extern unsigned int __kfifo_max_r(unsigned int len, size_t recsize);
#endif
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index 392a3670944c..5eebbe7a3545 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -105,9 +105,9 @@ extern int dbg_set_reg(int regno, void *mem, struct pt_regs *regs);
*/
/**
- * kgdb_arch_init - Perform any architecture specific initalization.
+ * kgdb_arch_init - Perform any architecture specific initialization.
*
- * This function will handle the initalization of any architecture
+ * This function will handle the initialization of any architecture
* specific callbacks.
*/
extern int kgdb_arch_init(void);
@@ -229,9 +229,9 @@ extern int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt);
extern int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt);
/**
- * kgdb_arch_late - Perform any architecture specific initalization.
+ * kgdb_arch_late - Perform any architecture specific initialization.
*
- * This function will handle the late initalization of any
+ * This function will handle the late initialization of any
* architecture specific callbacks. This is an optional function for
* handling things like late initialization of hw breakpoints. The
* default implementation does nothing.
@@ -257,7 +257,6 @@ extern void kgdb_arch_late(void);
* hardware breakpoints.
* @correct_hw_break: Allow an architecture to specify how to correct the
* hardware debug registers.
- * @enable_nmi: Manage NMI-triggered entry to KGDB
*/
struct kgdb_arch {
unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
@@ -270,8 +269,6 @@ struct kgdb_arch {
void (*disable_hw_break)(struct pt_regs *regs);
void (*remove_all_hw_break)(void);
void (*correct_hw_break)(void);
-
- void (*enable_nmi)(bool on);
};
/**
@@ -306,16 +303,6 @@ extern const struct kgdb_arch arch_kgdb_ops;
extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs);
-#ifdef CONFIG_SERIAL_KGDB_NMI
-extern int kgdb_register_nmi_console(void);
-extern int kgdb_unregister_nmi_console(void);
-extern bool kgdb_nmi_poll_knock(void);
-#else
-static inline int kgdb_register_nmi_console(void) { return 0; }
-static inline int kgdb_unregister_nmi_console(void) { return 0; }
-static inline bool kgdb_nmi_poll_knock(void) { return true; }
-#endif
-
extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
extern struct kgdb_io *dbg_io_ops;
@@ -365,5 +352,6 @@ extern void kgdb_free_init_mem(void);
#define dbg_late_init()
static inline void kgdb_panic(const char *msg) {}
static inline void kgdb_free_init_mem(void) { }
+static inline int kgdb_nmicallback(int cpu, void *regs) { return 1; }
#endif /* ! CONFIG_KGDB */
#endif /* _KGDB_H_ */
diff --git a/include/linux/kho/abi/luo.h b/include/linux/kho/abi/luo.h
new file mode 100644
index 000000000000..bb099c92e469
--- /dev/null
+++ b/include/linux/kho/abi/luo.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (c) 2025, Google LLC.
+ * Pasha Tatashin <pasha.tatashin@soleen.com>
+ */
+
+/**
+ * DOC: Live Update Orchestrator ABI
+ *
+ * This header defines the stable Application Binary Interface used by the
+ * Live Update Orchestrator to pass state from a pre-update kernel to a
+ * post-update kernel. The ABI is built upon the Kexec HandOver framework
+ * and uses a Flattened Device Tree to describe the preserved data.
+ *
+ * This interface is a contract. Any modification to the FDT structure, node
+ * properties, compatible strings, or the layout of the `__packed` serialization
+ * structures defined here constitutes a breaking change. Such changes require
+ * incrementing the version number in the relevant `_COMPATIBLE` string to
+ * prevent a new kernel from misinterpreting data from an old kernel.
+ *
+ * Changes are allowed provided the compatibility version is incremented;
+ * however, backward/forward compatibility is only guaranteed for kernels
+ * supporting the same ABI version.
+ *
+ * FDT Structure Overview:
+ * The entire LUO state is encapsulated within a single KHO entry named "LUO".
+ * This entry contains an FDT with the following layout:
+ *
+ * .. code-block:: none
+ *
+ * / {
+ * compatible = "luo-v1";
+ * liveupdate-number = <...>;
+ *
+ * luo-session {
+ * compatible = "luo-session-v1";
+ * luo-session-header = <phys_addr_of_session_header_ser>;
+ * };
+ * };
+ *
+ * Main LUO Node (/):
+ *
+ * - compatible: "luo-v1"
+ * Identifies the overall LUO ABI version.
+ * - liveupdate-number: u64
+ * A counter tracking the number of successful live updates performed.
+ *
+ * Session Node (luo-session):
+ * This node describes all preserved user-space sessions.
+ *
+ * - compatible: "luo-session-v1"
+ * Identifies the session ABI version.
+ * - luo-session-header: u64
+ * The physical address of a `struct luo_session_header_ser`. This structure
+ * is the header for a contiguous block of memory containing an array of
+ * `struct luo_session_ser`, one for each preserved session.
+ *
+ * Serialization Structures:
+ * The FDT properties point to memory regions containing arrays of simple,
+ * `__packed` structures. These structures contain the actual preserved state.
+ *
+ * - struct luo_session_header_ser:
+ * Header for the session array. Contains the total page count of the
+ * preserved memory block and the number of `struct luo_session_ser`
+ * entries that follow.
+ *
+ * - struct luo_session_ser:
+ * Metadata for a single session, including its name and a physical pointer
+ * to another preserved memory block containing an array of
+ * `struct luo_file_ser` for all files in that session.
+ *
+ * - struct luo_file_ser:
+ * Metadata for a single preserved file. Contains the `compatible` string to
+ * find the correct handler in the new kernel, a user-provided `token` for
+ * identification, and an opaque `data` handle for the handler to use.
+ */
+
+#ifndef _LINUX_KHO_ABI_LUO_H
+#define _LINUX_KHO_ABI_LUO_H
+
+#include <uapi/linux/liveupdate.h>
+
+/*
+ * The LUO FDT hooks all LUO state for sessions, fds, etc.
+ * In the root it also carries "liveupdate-number" 64-bit property that
+ * corresponds to the number of live-updates performed on this machine.
+ */
+#define LUO_FDT_SIZE PAGE_SIZE
+#define LUO_FDT_KHO_ENTRY_NAME "LUO"
+#define LUO_FDT_COMPATIBLE "luo-v1"
+#define LUO_FDT_LIVEUPDATE_NUM "liveupdate-number"
+
+#define LIVEUPDATE_HNDL_COMPAT_LENGTH 48
+
+/**
+ * struct luo_file_ser - Represents the serialized preserves files.
+ * @compatible: File handler compatible string.
+ * @data: Private data
+ * @token: User provided token for this file
+ *
+ * If this structure is modified, LUO_SESSION_COMPATIBLE must be updated.
+ */
+struct luo_file_ser {
+ char compatible[LIVEUPDATE_HNDL_COMPAT_LENGTH];
+ u64 data;
+ u64 token;
+} __packed;
+
+/**
+ * struct luo_file_set_ser - Represents the serialized metadata for file set
+ * @files: The physical address of a contiguous memory block that holds
+ * the serialized state of files (array of luo_file_ser) in this file
+ * set.
+ * @count: The total number of files that were part of this session during
+ * serialization. Used for iteration and validation during
+ * restoration.
+ */
+struct luo_file_set_ser {
+ u64 files;
+ u64 count;
+} __packed;
+
+/*
+ * LUO FDT session node
+ * LUO_FDT_SESSION_HEADER: is a u64 physical address of struct
+ * luo_session_header_ser
+ */
+#define LUO_FDT_SESSION_NODE_NAME "luo-session"
+#define LUO_FDT_SESSION_COMPATIBLE "luo-session-v2"
+#define LUO_FDT_SESSION_HEADER "luo-session-header"
+
+/**
+ * struct luo_session_header_ser - Header for the serialized session data block.
+ * @count: The number of `struct luo_session_ser` entries that immediately
+ * follow this header in the memory block.
+ *
+ * This structure is located at the beginning of a contiguous block of
+ * physical memory preserved across the kexec. It provides the necessary
+ * metadata to interpret the array of session entries that follow.
+ *
+ * If this structure is modified, `LUO_FDT_SESSION_COMPATIBLE` must be updated.
+ */
+struct luo_session_header_ser {
+ u64 count;
+} __packed;
+
+/**
+ * struct luo_session_ser - Represents the serialized metadata for a LUO session.
+ * @name: The unique name of the session, provided by the userspace at
+ * the time of session creation.
+ * @file_set_ser: Serialized files belonging to this session,
+ *
+ * This structure is used to package session-specific metadata for transfer
+ * between kernels via Kexec Handover. An array of these structures (one per
+ * session) is created and passed to the new kernel, allowing it to reconstruct
+ * the session context.
+ *
+ * If this structure is modified, `LUO_FDT_SESSION_COMPATIBLE` must be updated.
+ */
+struct luo_session_ser {
+ char name[LIVEUPDATE_SESSION_NAME_LENGTH];
+ struct luo_file_set_ser file_set_ser;
+} __packed;
+
+#endif /* _LINUX_KHO_ABI_LUO_H */
diff --git a/include/linux/kho/abi/memfd.h b/include/linux/kho/abi/memfd.h
new file mode 100644
index 000000000000..da7d063474a1
--- /dev/null
+++ b/include/linux/kho/abi/memfd.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (c) 2025, Google LLC.
+ * Pasha Tatashin <pasha.tatashin@soleen.com>
+ *
+ * Copyright (C) 2025 Amazon.com Inc. or its affiliates.
+ * Pratyush Yadav <ptyadav@amazon.de>
+ */
+
+#ifndef _LINUX_KHO_ABI_MEMFD_H
+#define _LINUX_KHO_ABI_MEMFD_H
+
+#include <linux/types.h>
+#include <linux/kexec_handover.h>
+
+/**
+ * DOC: memfd Live Update ABI
+ *
+ * This header defines the ABI for preserving the state of a memfd across a
+ * kexec reboot using the LUO.
+ *
+ * The state is serialized into a packed structure `struct memfd_luo_ser`
+ * which is handed over to the next kernel via the KHO mechanism.
+ *
+ * This interface is a contract. Any modification to the structure layout
+ * constitutes a breaking change. Such changes require incrementing the
+ * version number in the MEMFD_LUO_FH_COMPATIBLE string.
+ */
+
+/**
+ * MEMFD_LUO_FOLIO_DIRTY - The folio is dirty.
+ *
+ * This flag indicates the folio contains data from user. A non-dirty folio is
+ * one that was allocated (say using fallocate(2)) but not written to.
+ */
+#define MEMFD_LUO_FOLIO_DIRTY BIT(0)
+
+/**
+ * MEMFD_LUO_FOLIO_UPTODATE - The folio is up-to-date.
+ *
+ * An up-to-date folio has been zeroed out. shmem zeroes out folios on first
+ * use. This flag tracks which folios need zeroing.
+ */
+#define MEMFD_LUO_FOLIO_UPTODATE BIT(1)
+
+/**
+ * struct memfd_luo_folio_ser - Serialized state of a single folio.
+ * @pfn: The page frame number of the folio.
+ * @flags: Flags to describe the state of the folio.
+ * @index: The page offset (pgoff_t) of the folio within the original file.
+ */
+struct memfd_luo_folio_ser {
+ u64 pfn:52;
+ u64 flags:12;
+ u64 index;
+} __packed;
+
+/**
+ * struct memfd_luo_ser - Main serialization structure for a memfd.
+ * @pos: The file's current position (f_pos).
+ * @size: The total size of the file in bytes (i_size).
+ * @nr_folios: Number of folios in the folios array.
+ * @folios: KHO vmalloc descriptor pointing to the array of
+ * struct memfd_luo_folio_ser.
+ */
+struct memfd_luo_ser {
+ u64 pos;
+ u64 size;
+ u64 nr_folios;
+ struct kho_vmalloc folios;
+} __packed;
+
+/* The compatibility string for memfd file handler */
+#define MEMFD_LUO_FH_COMPATIBLE "memfd-v1"
+
+#endif /* _LINUX_KHO_ABI_MEMFD_H */
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index 2fcc01891b47..eb1946a70cff 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -2,95 +2,59 @@
#ifndef _LINUX_KHUGEPAGED_H
#define _LINUX_KHUGEPAGED_H
-#include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */
-#include <linux/shmem_fs.h>
-
+#include <linux/mm.h>
+extern unsigned int khugepaged_max_ptes_none __read_mostly;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern struct attribute_group khugepaged_attr_group;
extern int khugepaged_init(void);
extern void khugepaged_destroy(void);
extern int start_stop_khugepaged(void);
-extern int __khugepaged_enter(struct mm_struct *mm);
+extern void __khugepaged_enter(struct mm_struct *mm);
extern void __khugepaged_exit(struct mm_struct *mm);
-extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
- unsigned long vm_flags);
+extern void khugepaged_enter_vma(struct vm_area_struct *vma,
+ vm_flags_t vm_flags);
extern void khugepaged_min_free_kbytes_update(void);
-#ifdef CONFIG_SHMEM
-extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr);
-#else
-static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
- unsigned long addr)
-{
-}
-#endif
-
-#define khugepaged_enabled() \
- (transparent_hugepage_flags & \
- ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \
- (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
-#define khugepaged_always() \
- (transparent_hugepage_flags & \
- (1<<TRANSPARENT_HUGEPAGE_FLAG))
-#define khugepaged_req_madv() \
- (transparent_hugepage_flags & \
- (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
-#define khugepaged_defrag() \
- (transparent_hugepage_flags & \
- (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
+extern bool current_is_khugepaged(void);
+extern int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
+ bool install_pmd);
-static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
- if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags))
- return __khugepaged_enter(mm);
- return 0;
+ if (mm_flags_test(MMF_VM_HUGEPAGE, oldmm))
+ __khugepaged_enter(mm);
}
static inline void khugepaged_exit(struct mm_struct *mm)
{
- if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
+ if (mm_flags_test(MMF_VM_HUGEPAGE, mm))
__khugepaged_exit(mm);
}
-
-static inline int khugepaged_enter(struct vm_area_struct *vma,
- unsigned long vm_flags)
-{
- if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
- if ((khugepaged_always() ||
- (shmem_file(vma->vm_file) && shmem_huge_enabled(vma)) ||
- (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
- !(vm_flags & VM_NOHUGEPAGE) &&
- !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
- if (__khugepaged_enter(vma->vm_mm))
- return -ENOMEM;
- return 0;
-}
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
-static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
- return 0;
}
static inline void khugepaged_exit(struct mm_struct *mm)
{
}
-static inline int khugepaged_enter(struct vm_area_struct *vma,
- unsigned long vm_flags)
+static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
+ vm_flags_t vm_flags)
{
- return 0;
}
-static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
- unsigned long vm_flags)
+static inline int collapse_pte_mapped_thp(struct mm_struct *mm,
+ unsigned long addr, bool install_pmd)
{
return 0;
}
-static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
- unsigned long addr)
+
+static inline void khugepaged_min_free_kbytes_update(void)
{
}
-static inline void khugepaged_min_free_kbytes_update(void)
+static inline bool current_is_khugepaged(void)
{
+ return false;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index 34684b2026ab..fbd424b2abb1 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -26,13 +26,14 @@ extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
extern void kmemleak_free_percpu(const void __percpu *ptr) __ref;
extern void kmemleak_update_trace(const void *ptr) __ref;
extern void kmemleak_not_leak(const void *ptr) __ref;
+extern void kmemleak_transient_leak(const void *ptr) __ref;
extern void kmemleak_ignore(const void *ptr) __ref;
+extern void kmemleak_ignore_percpu(const void __percpu *ptr) __ref;
extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref;
extern void kmemleak_no_scan(const void *ptr) __ref;
-extern void kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
+extern void kmemleak_alloc_phys(phys_addr_t phys, size_t size,
gfp_t gfp) __ref;
extern void kmemleak_free_part_phys(phys_addr_t phys, size_t size) __ref;
-extern void kmemleak_not_leak_phys(phys_addr_t phys) __ref;
extern void kmemleak_ignore_phys(phys_addr_t phys) __ref;
static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
@@ -94,6 +95,12 @@ static inline void kmemleak_update_trace(const void *ptr)
static inline void kmemleak_not_leak(const void *ptr)
{
}
+static inline void kmemleak_transient_leak(const void *ptr)
+{
+}
+static inline void kmemleak_ignore_percpu(const void __percpu *ptr)
+{
+}
static inline void kmemleak_ignore(const void *ptr)
{
}
@@ -107,15 +114,12 @@ static inline void kmemleak_no_scan(const void *ptr)
{
}
static inline void kmemleak_alloc_phys(phys_addr_t phys, size_t size,
- int min_count, gfp_t gfp)
+ gfp_t gfp)
{
}
static inline void kmemleak_free_part_phys(phys_addr_t phys, size_t size)
{
}
-static inline void kmemleak_not_leak_phys(phys_addr_t phys)
-{
-}
static inline void kmemleak_ignore_phys(phys_addr_t phys)
{
}
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index 68f69362d427..9a07c3215389 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -14,10 +14,7 @@
#include <linux/workqueue.h>
#include <linux/sysctl.h>
-#define KMOD_PATH_LEN 256
-
#ifdef CONFIG_MODULES
-extern char modprobe_path[]; /* for sysctl */
/* modprobe exit status on success, -ve on error. Return value
* usually useless though. */
extern __printf(2, 3)
diff --git a/include/linux/kmsan-checks.h b/include/linux/kmsan-checks.h
new file mode 100644
index 000000000000..e1082dc40abc
--- /dev/null
+++ b/include/linux/kmsan-checks.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KMSAN checks to be used for one-off annotations in subsystems.
+ *
+ * Copyright (C) 2017-2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+
+#ifndef _LINUX_KMSAN_CHECKS_H
+#define _LINUX_KMSAN_CHECKS_H
+
+#include <linux/types.h>
+
+#ifdef CONFIG_KMSAN
+
+/**
+ * kmsan_poison_memory() - Mark the memory range as uninitialized.
+ * @address: address to start with.
+ * @size: size of buffer to poison.
+ * @flags: GFP flags for allocations done by this function.
+ *
+ * Until other data is written to this range, KMSAN will treat it as
+ * uninitialized. Error reports for this memory will reference the call site of
+ * kmsan_poison_memory() as origin.
+ */
+void kmsan_poison_memory(const void *address, size_t size, gfp_t flags);
+
+/**
+ * kmsan_unpoison_memory() - Mark the memory range as initialized.
+ * @address: address to start with.
+ * @size: size of buffer to unpoison.
+ *
+ * Until other data is written to this range, KMSAN will treat it as
+ * initialized.
+ */
+void kmsan_unpoison_memory(const void *address, size_t size);
+
+/**
+ * kmsan_check_memory() - Check the memory range for being initialized.
+ * @address: address to start with.
+ * @size: size of buffer to check.
+ *
+ * If any piece of the given range is marked as uninitialized, KMSAN will report
+ * an error.
+ */
+void kmsan_check_memory(const void *address, size_t size);
+
+/**
+ * kmsan_copy_to_user() - Notify KMSAN about a data transfer to userspace.
+ * @to: destination address in the userspace.
+ * @from: source address in the kernel.
+ * @to_copy: number of bytes to copy.
+ * @left: number of bytes not copied.
+ *
+ * If this is a real userspace data transfer, KMSAN checks the bytes that were
+ * actually copied to ensure there was no information leak. If @to belongs to
+ * the kernel space (which is possible for compat syscalls), KMSAN just copies
+ * the metadata.
+ */
+void kmsan_copy_to_user(void __user *to, const void *from, size_t to_copy,
+ size_t left);
+
+/**
+ * kmsan_memmove() - Notify KMSAN about a data copy within kernel.
+ * @to: destination address in the kernel.
+ * @from: source address in the kernel.
+ * @size: number of bytes to copy.
+ *
+ * Invoked after non-instrumented version (e.g. implemented using assembly
+ * code) of memmove()/memcpy() is called, in order to copy KMSAN's metadata.
+ */
+void kmsan_memmove(void *to, const void *from, size_t to_copy);
+
+#else
+
+static inline void kmsan_poison_memory(const void *address, size_t size,
+ gfp_t flags)
+{
+}
+static inline void kmsan_unpoison_memory(const void *address, size_t size)
+{
+}
+static inline void kmsan_check_memory(const void *address, size_t size)
+{
+}
+static inline void kmsan_copy_to_user(void __user *to, const void *from,
+ size_t to_copy, size_t left)
+{
+}
+
+static inline void kmsan_memmove(void *to, const void *from, size_t to_copy)
+{
+}
+
+#endif
+
+#endif /* _LINUX_KMSAN_CHECKS_H */
diff --git a/include/linux/kmsan.h b/include/linux/kmsan.h
new file mode 100644
index 000000000000..7da9fd506b39
--- /dev/null
+++ b/include/linux/kmsan.h
@@ -0,0 +1,411 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KMSAN API for subsystems.
+ *
+ * Copyright (C) 2017-2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+#ifndef _LINUX_KMSAN_H
+#define _LINUX_KMSAN_H
+
+#include <linux/dma-direction.h>
+#include <linux/gfp.h>
+#include <linux/kmsan-checks.h>
+#include <linux/types.h>
+
+struct page;
+struct kmem_cache;
+struct task_struct;
+struct scatterlist;
+struct urb;
+
+#ifdef CONFIG_KMSAN
+
+/**
+ * kmsan_task_create() - Initialize KMSAN state for the task.
+ * @task: task to initialize.
+ */
+void kmsan_task_create(struct task_struct *task);
+
+/**
+ * kmsan_task_exit() - Notify KMSAN that a task has exited.
+ * @task: task about to finish.
+ */
+void kmsan_task_exit(struct task_struct *task);
+
+/**
+ * kmsan_init_shadow() - Initialize KMSAN shadow at boot time.
+ *
+ * Allocate and initialize KMSAN metadata for early allocations.
+ */
+void __init kmsan_init_shadow(void);
+
+/**
+ * kmsan_init_runtime() - Initialize KMSAN state and enable KMSAN.
+ */
+void __init kmsan_init_runtime(void);
+
+/**
+ * kmsan_memblock_free_pages() - handle freeing of memblock pages.
+ * @page: struct page to free.
+ * @order: order of @page.
+ *
+ * Freed pages are either returned to buddy allocator or held back to be used
+ * as metadata pages.
+ */
+bool __init __must_check kmsan_memblock_free_pages(struct page *page,
+ unsigned int order);
+
+/**
+ * kmsan_alloc_page() - Notify KMSAN about an alloc_pages() call.
+ * @page: struct page pointer returned by alloc_pages().
+ * @order: order of allocated struct page.
+ * @flags: GFP flags used by alloc_pages()
+ *
+ * KMSAN marks 1<<@order pages starting at @page as uninitialized, unless
+ * @flags contain __GFP_ZERO.
+ */
+void kmsan_alloc_page(struct page *page, unsigned int order, gfp_t flags);
+
+/**
+ * kmsan_free_page() - Notify KMSAN about a free_pages() call.
+ * @page: struct page pointer passed to free_pages().
+ * @order: order of deallocated struct page.
+ *
+ * KMSAN marks freed memory as uninitialized.
+ */
+void kmsan_free_page(struct page *page, unsigned int order);
+
+/**
+ * kmsan_copy_page_meta() - Copy KMSAN metadata between two pages.
+ * @dst: destination page.
+ * @src: source page.
+ *
+ * KMSAN copies the contents of metadata pages for @src into the metadata pages
+ * for @dst. If @dst has no associated metadata pages, nothing happens.
+ * If @src has no associated metadata pages, @dst metadata pages are unpoisoned.
+ */
+void kmsan_copy_page_meta(struct page *dst, struct page *src);
+
+/**
+ * kmsan_slab_alloc() - Notify KMSAN about a slab allocation.
+ * @s: slab cache the object belongs to.
+ * @object: object pointer.
+ * @flags: GFP flags passed to the allocator.
+ *
+ * Depending on cache flags and GFP flags, KMSAN sets up the metadata of the
+ * newly created object, marking it as initialized or uninitialized.
+ */
+void kmsan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
+
+/**
+ * kmsan_slab_free() - Notify KMSAN about a slab deallocation.
+ * @s: slab cache the object belongs to.
+ * @object: object pointer.
+ *
+ * KMSAN marks the freed object as uninitialized.
+ */
+void kmsan_slab_free(struct kmem_cache *s, void *object);
+
+/**
+ * kmsan_kmalloc_large() - Notify KMSAN about a large slab allocation.
+ * @ptr: object pointer.
+ * @size: object size.
+ * @flags: GFP flags passed to the allocator.
+ *
+ * Similar to kmsan_slab_alloc(), but for large allocations.
+ */
+void kmsan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
+
+/**
+ * kmsan_kfree_large() - Notify KMSAN about a large slab deallocation.
+ * @ptr: object pointer.
+ *
+ * Similar to kmsan_slab_free(), but for large allocations.
+ */
+void kmsan_kfree_large(const void *ptr);
+
+/**
+ * kmsan_map_kernel_range_noflush() - Notify KMSAN about a vmap.
+ * @start: start of vmapped range.
+ * @end: end of vmapped range.
+ * @prot: page protection flags used for vmap.
+ * @pages: array of pages.
+ * @page_shift: page_shift passed to vmap_range_noflush().
+ * @gfp_mask: gfp_mask to use internally.
+ *
+ * KMSAN maps shadow and origin pages of @pages into contiguous ranges in
+ * vmalloc metadata address range. Returns 0 on success, callers must check
+ * for non-zero return value.
+ */
+int __must_check kmsan_vmap_pages_range_noflush(unsigned long start,
+ unsigned long end,
+ pgprot_t prot,
+ struct page **pages,
+ unsigned int page_shift,
+ gfp_t gfp_mask);
+
+/**
+ * kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap.
+ * @start: start of vunmapped range.
+ * @end: end of vunmapped range.
+ *
+ * KMSAN unmaps the contiguous metadata ranges created by
+ * kmsan_map_kernel_range_noflush().
+ */
+void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end);
+
+/**
+ * kmsan_ioremap_page_range() - Notify KMSAN about a ioremap_page_range() call.
+ * @addr: range start.
+ * @end: range end.
+ * @phys_addr: physical range start.
+ * @prot: page protection flags used for ioremap_page_range().
+ * @page_shift: page_shift argument passed to vmap_range_noflush().
+ *
+ * KMSAN creates new metadata pages for the physical pages mapped into the
+ * virtual memory. Returns 0 on success, callers must check for non-zero return
+ * value.
+ */
+int __must_check kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
+ phys_addr_t phys_addr, pgprot_t prot,
+ unsigned int page_shift);
+
+/**
+ * kmsan_iounmap_page_range() - Notify KMSAN about a iounmap_page_range() call.
+ * @start: range start.
+ * @end: range end.
+ *
+ * KMSAN unmaps the metadata pages for the given range and, unlike for
+ * vunmap_page_range(), also deallocates them.
+ */
+void kmsan_iounmap_page_range(unsigned long start, unsigned long end);
+
+/**
+ * kmsan_handle_dma() - Handle a DMA data transfer.
+ * @phys: physical address of the buffer.
+ * @size: buffer size.
+ * @dir: one of possible dma_data_direction values.
+ *
+ * Depending on @direction, KMSAN:
+ * * checks the buffer, if it is copied to device;
+ * * initializes the buffer, if it is copied from device;
+ * * does both, if this is a DMA_BIDIRECTIONAL transfer.
+ */
+void kmsan_handle_dma(phys_addr_t phys, size_t size,
+ enum dma_data_direction dir);
+
+/**
+ * kmsan_handle_dma_sg() - Handle a DMA transfer using scatterlist.
+ * @sg: scatterlist holding DMA buffers.
+ * @nents: number of scatterlist entries.
+ * @dir: one of possible dma_data_direction values.
+ *
+ * Depending on @direction, KMSAN:
+ * * checks the buffers in the scatterlist, if they are copied to device;
+ * * initializes the buffers, if they are copied from device;
+ * * does both, if this is a DMA_BIDIRECTIONAL transfer.
+ */
+void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
+ enum dma_data_direction dir);
+
+/**
+ * kmsan_handle_urb() - Handle a USB data transfer.
+ * @urb: struct urb pointer.
+ * @is_out: data transfer direction (true means output to hardware).
+ *
+ * If @is_out is true, KMSAN checks the transfer buffer of @urb. Otherwise,
+ * KMSAN initializes the transfer buffer.
+ */
+void kmsan_handle_urb(const struct urb *urb, bool is_out);
+
+/**
+ * kmsan_unpoison_entry_regs() - Handle pt_regs in low-level entry code.
+ * @regs: struct pt_regs pointer received from assembly code.
+ *
+ * KMSAN unpoisons the contents of the passed pt_regs, preventing potential
+ * false positive reports. Unlike kmsan_unpoison_memory(),
+ * kmsan_unpoison_entry_regs() can be called from the regions where
+ * kmsan_in_runtime() returns true, which is the case in early entry code.
+ */
+void kmsan_unpoison_entry_regs(const struct pt_regs *regs);
+
+/**
+ * kmsan_get_metadata() - Return a pointer to KMSAN shadow or origins.
+ * @addr: kernel address.
+ * @is_origin: whether to return origins or shadow.
+ *
+ * Return NULL if metadata cannot be found.
+ */
+void *kmsan_get_metadata(void *addr, bool is_origin);
+
+/**
+ * kmsan_enable_current(): Enable KMSAN for the current task.
+ *
+ * Each kmsan_enable_current() current call must be preceded by a
+ * kmsan_disable_current() call. These call pairs may be nested.
+ */
+void kmsan_enable_current(void);
+
+/**
+ * kmsan_disable_current(): Disable KMSAN for the current task.
+ *
+ * Each kmsan_disable_current() current call must be followed by a
+ * kmsan_enable_current() call. These call pairs may be nested.
+ */
+void kmsan_disable_current(void);
+
+/**
+ * memset_no_sanitize_memory(): Fill memory without KMSAN instrumentation.
+ * @s: address of kernel memory to fill.
+ * @c: constant byte to fill the memory with.
+ * @n: number of bytes to fill.
+ *
+ * This is like memset(), but without KMSAN instrumentation.
+ */
+static inline void *memset_no_sanitize_memory(void *s, int c, size_t n)
+{
+ return __memset(s, c, n);
+}
+
+extern bool kmsan_enabled;
+extern int panic_on_kmsan;
+
+/*
+ * KMSAN performs a lot of consistency checks that are currently enabled by
+ * default. BUG_ON is normally discouraged in the kernel, unless used for
+ * debugging, but KMSAN itself is a debugging tool, so it makes little sense to
+ * recover if something goes wrong.
+ */
+#define KMSAN_WARN_ON(cond) \
+ ({ \
+ const bool __cond = WARN_ON(cond); \
+ if (unlikely(__cond)) { \
+ WRITE_ONCE(kmsan_enabled, false); \
+ if (panic_on_kmsan) { \
+ /* Can't call panic() here because */ \
+ /* of uaccess checks. */ \
+ BUG(); \
+ } \
+ } \
+ __cond; \
+ })
+
+#else
+
+static inline void kmsan_init_shadow(void)
+{
+}
+
+static inline void kmsan_init_runtime(void)
+{
+}
+
+static inline bool __must_check kmsan_memblock_free_pages(struct page *page,
+ unsigned int order)
+{
+ return true;
+}
+
+static inline void kmsan_task_create(struct task_struct *task)
+{
+}
+
+static inline void kmsan_task_exit(struct task_struct *task)
+{
+}
+
+static inline void kmsan_alloc_page(struct page *page, unsigned int order,
+ gfp_t flags)
+{
+}
+
+static inline void kmsan_free_page(struct page *page, unsigned int order)
+{
+}
+
+static inline void kmsan_copy_page_meta(struct page *dst, struct page *src)
+{
+}
+
+static inline void kmsan_slab_alloc(struct kmem_cache *s, void *object,
+ gfp_t flags)
+{
+}
+
+static inline void kmsan_slab_free(struct kmem_cache *s, void *object)
+{
+}
+
+static inline void kmsan_kmalloc_large(const void *ptr, size_t size,
+ gfp_t flags)
+{
+}
+
+static inline void kmsan_kfree_large(const void *ptr)
+{
+}
+
+static inline int __must_check kmsan_vmap_pages_range_noflush(
+ unsigned long start, unsigned long end, pgprot_t prot,
+ struct page **pages, unsigned int page_shift, gfp_t gfp_mask)
+{
+ return 0;
+}
+
+static inline void kmsan_vunmap_range_noflush(unsigned long start,
+ unsigned long end)
+{
+}
+
+static inline int __must_check kmsan_ioremap_page_range(unsigned long start,
+ unsigned long end,
+ phys_addr_t phys_addr,
+ pgprot_t prot,
+ unsigned int page_shift)
+{
+ return 0;
+}
+
+static inline void kmsan_iounmap_page_range(unsigned long start,
+ unsigned long end)
+{
+}
+
+static inline void kmsan_handle_dma(phys_addr_t phys, size_t size,
+ enum dma_data_direction dir)
+{
+}
+
+static inline void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
+{
+}
+
+static inline void kmsan_handle_urb(const struct urb *urb, bool is_out)
+{
+}
+
+static inline void kmsan_unpoison_entry_regs(const struct pt_regs *regs)
+{
+}
+
+static inline void kmsan_enable_current(void)
+{
+}
+
+static inline void kmsan_disable_current(void)
+{
+}
+
+static inline void *memset_no_sanitize_memory(void *s, int c, size_t n)
+{
+ return memset(s, c, n);
+}
+
+#define KMSAN_WARN_ON WARN_ON
+
+#endif
+
+#endif /* _LINUX_KMSAN_H */
diff --git a/include/linux/kmsan_string.h b/include/linux/kmsan_string.h
new file mode 100644
index 000000000000..7287da6f52ef
--- /dev/null
+++ b/include/linux/kmsan_string.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KMSAN string functions API used in other headers.
+ *
+ * Copyright (C) 2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+#ifndef _LINUX_KMSAN_STRING_H
+#define _LINUX_KMSAN_STRING_H
+
+/*
+ * KMSAN overrides the default memcpy/memset/memmove implementations in the
+ * kernel, which requires having __msan_XXX function prototypes in several other
+ * headers. Keep them in one place instead of open-coding.
+ */
+void *__msan_memcpy(void *dst, const void *src, size_t size);
+void *__msan_memset(void *s, int c, size_t n);
+void *__msan_memmove(void *dest, const void *src, size_t len);
+
+#endif /* _LINUX_KMSAN_STRING_H */
diff --git a/include/linux/kmsan_types.h b/include/linux/kmsan_types.h
new file mode 100644
index 000000000000..dfc59918b3c0
--- /dev/null
+++ b/include/linux/kmsan_types.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * A minimal header declaring types added by KMSAN to existing kernel structs.
+ *
+ * Copyright (C) 2017-2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+#ifndef _LINUX_KMSAN_TYPES_H
+#define _LINUX_KMSAN_TYPES_H
+
+#include <linux/types.h>
+
+/* These constants are defined in the MSan LLVM instrumentation pass. */
+#define KMSAN_RETVAL_SIZE 800
+#define KMSAN_PARAM_SIZE 800
+
+struct kmsan_context_state {
+ char param_tls[KMSAN_PARAM_SIZE];
+ char retval_tls[KMSAN_RETVAL_SIZE];
+ char va_arg_tls[KMSAN_PARAM_SIZE];
+ char va_arg_origin_tls[KMSAN_PARAM_SIZE];
+ u64 va_arg_overflow_size_tls;
+ char param_origin_tls[KMSAN_PARAM_SIZE];
+ u32 retval_origin_tls;
+};
+
+#undef KMSAN_PARAM_SIZE
+#undef KMSAN_RETVAL_SIZE
+
+struct kmsan_ctx {
+ struct kmsan_context_state cstate;
+ int kmsan_in_runtime;
+ unsigned int depth;
+};
+
+#endif /* _LINUX_KMSAN_TYPES_H */
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
index 906521c2329c..6055fc969877 100644
--- a/include/linux/kmsg_dump.h
+++ b/include/linux/kmsg_dump.h
@@ -40,6 +40,17 @@ struct kmsg_dump_iter {
};
/**
+ * struct kmsg_dump_detail - kernel crash detail
+ * @reason: reason for the crash, see kmsg_dump_reason.
+ * @description: optional short string, to provide additional information.
+ */
+
+struct kmsg_dump_detail {
+ enum kmsg_dump_reason reason;
+ const char *description;
+};
+
+/**
* struct kmsg_dumper - kernel crash message dumper structure
* @list: Entry in the dumper list (private)
* @dump: Call into dumping code which will retrieve the data with
@@ -49,13 +60,13 @@ struct kmsg_dump_iter {
*/
struct kmsg_dumper {
struct list_head list;
- void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason);
+ void (*dump)(struct kmsg_dumper *dumper, struct kmsg_dump_detail *detail);
enum kmsg_dump_reason max_reason;
bool registered;
};
#ifdef CONFIG_PRINTK
-void kmsg_dump(enum kmsg_dump_reason reason);
+void kmsg_dump_desc(enum kmsg_dump_reason reason, const char *desc);
bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog,
char *line, size_t size, size_t *len);
@@ -71,7 +82,7 @@ int kmsg_dump_unregister(struct kmsg_dumper *dumper);
const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason);
#else
-static inline void kmsg_dump(enum kmsg_dump_reason reason)
+static inline void kmsg_dump_desc(enum kmsg_dump_reason reason, const char *desc)
{
}
@@ -107,4 +118,9 @@ static inline const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason)
}
#endif
+static inline void kmsg_dump(enum kmsg_dump_reason reason)
+{
+ kmsg_dump_desc(reason, NULL);
+}
+
#endif /* _LINUX_KMSG_DUMP_H */
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index ea30529fba08..c8219505a79f 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -19,10 +19,10 @@
#include <linux/list.h>
#include <linux/sysfs.h>
#include <linux/compiler.h>
+#include <linux/container_of.h>
#include <linux/spinlock.h>
#include <linux/kref.h>
#include <linux/kobject_ns.h>
-#include <linux/kernel.h>
#include <linux/wait.h>
#include <linux/atomic.h>
#include <linux/workqueue.h>
@@ -38,7 +38,7 @@ extern char uevent_helper[];
#endif
/* counter to tag the uevent, read only except for the kobject core */
-extern u64 uevent_seqnum;
+extern atomic64_t uevent_seqnum;
/*
* The actions here must match the index to the string array
@@ -66,83 +66,60 @@ struct kobject {
struct list_head entry;
struct kobject *parent;
struct kset *kset;
- struct kobj_type *ktype;
+ const struct kobj_type *ktype;
struct kernfs_node *sd; /* sysfs directory entry */
struct kref kref;
-#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
- struct delayed_work release;
-#endif
+
unsigned int state_initialized:1;
unsigned int state_in_sysfs:1;
unsigned int state_add_uevent_sent:1;
unsigned int state_remove_uevent_sent:1;
unsigned int uevent_suppress:1;
+
+#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
+ struct delayed_work release;
+#endif
};
-extern __printf(2, 3)
-int kobject_set_name(struct kobject *kobj, const char *name, ...);
-extern __printf(2, 0)
-int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
- va_list vargs);
+__printf(2, 3) int kobject_set_name(struct kobject *kobj, const char *name, ...);
+__printf(2, 0) int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list vargs);
static inline const char *kobject_name(const struct kobject *kobj)
{
return kobj->name;
}
-extern void kobject_init(struct kobject *kobj, struct kobj_type *ktype);
-extern __printf(3, 4) __must_check
-int kobject_add(struct kobject *kobj, struct kobject *parent,
- const char *fmt, ...);
-extern __printf(4, 5) __must_check
-int kobject_init_and_add(struct kobject *kobj,
- struct kobj_type *ktype, struct kobject *parent,
- const char *fmt, ...);
+void kobject_init(struct kobject *kobj, const struct kobj_type *ktype);
+__printf(3, 4) __must_check int kobject_add(struct kobject *kobj,
+ struct kobject *parent,
+ const char *fmt, ...);
+__printf(4, 5) __must_check int kobject_init_and_add(struct kobject *kobj,
+ const struct kobj_type *ktype,
+ struct kobject *parent,
+ const char *fmt, ...);
-extern void kobject_del(struct kobject *kobj);
+void kobject_del(struct kobject *kobj);
-extern struct kobject * __must_check kobject_create(void);
-extern struct kobject * __must_check kobject_create_and_add(const char *name,
- struct kobject *parent);
+struct kobject * __must_check kobject_create_and_add(const char *name, struct kobject *parent);
-extern int __must_check kobject_rename(struct kobject *, const char *new_name);
-extern int __must_check kobject_move(struct kobject *, struct kobject *);
+int __must_check kobject_rename(struct kobject *, const char *new_name);
+int __must_check kobject_move(struct kobject *, struct kobject *);
-extern struct kobject *kobject_get(struct kobject *kobj);
-extern struct kobject * __must_check kobject_get_unless_zero(
- struct kobject *kobj);
-extern void kobject_put(struct kobject *kobj);
+struct kobject *kobject_get(struct kobject *kobj);
+struct kobject * __must_check kobject_get_unless_zero(struct kobject *kobj);
+void kobject_put(struct kobject *kobj);
-extern const void *kobject_namespace(struct kobject *kobj);
-extern void kobject_get_ownership(struct kobject *kobj,
- kuid_t *uid, kgid_t *gid);
-extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
-
-/**
- * kobject_has_children - Returns whether a kobject has children.
- * @kobj: the object to test
- *
- * This will return whether a kobject has other kobjects as children.
- *
- * It does NOT account for the presence of attribute files, only sub
- * directories. It also assumes there is no concurrent addition or
- * removal of such children, and thus relies on external locking.
- */
-static inline bool kobject_has_children(struct kobject *kobj)
-{
- WARN_ON_ONCE(kref_read(&kobj->kref) == 0);
-
- return kobj->sd && kobj->sd->dir.subdirs;
-}
+const void *kobject_namespace(const struct kobject *kobj);
+void kobject_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t *gid);
+char *kobject_get_path(const struct kobject *kobj, gfp_t flag);
struct kobj_type {
void (*release)(struct kobject *kobj);
const struct sysfs_ops *sysfs_ops;
- struct attribute **default_attrs; /* use default_groups instead */
const struct attribute_group **default_groups;
- const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
- const void *(*namespace)(struct kobject *kobj);
- void (*get_ownership)(struct kobject *kobj, kuid_t *uid, kgid_t *gid);
+ const struct kobj_ns_type_operations *(*child_ns_type)(const struct kobject *kobj);
+ const void *(*namespace)(const struct kobject *kobj);
+ void (*get_ownership)(const struct kobject *kobj, kuid_t *uid, kgid_t *gid);
};
struct kobj_uevent_env {
@@ -154,10 +131,9 @@ struct kobj_uevent_env {
};
struct kset_uevent_ops {
- int (* const filter)(struct kset *kset, struct kobject *kobj);
- const char *(* const name)(struct kset *kset, struct kobject *kobj);
- int (* const uevent)(struct kset *kset, struct kobject *kobj,
- struct kobj_uevent_env *env);
+ int (* const filter)(const struct kobject *kobj);
+ const char *(* const name)(const struct kobject *kobj);
+ int (* const uevent)(const struct kobject *kobj, struct kobj_uevent_env *env);
};
struct kobj_attribute {
@@ -196,12 +172,11 @@ struct kset {
const struct kset_uevent_ops *uevent_ops;
} __randomize_layout;
-extern void kset_init(struct kset *kset);
-extern int __must_check kset_register(struct kset *kset);
-extern void kset_unregister(struct kset *kset);
-extern struct kset * __must_check kset_create_and_add(const char *name,
- const struct kset_uevent_ops *u,
- struct kobject *parent_kobj);
+void kset_init(struct kset *kset);
+int __must_check kset_register(struct kset *kset);
+void kset_unregister(struct kset *kset);
+struct kset * __must_check kset_create_and_add(const char *name, const struct kset_uevent_ops *u,
+ struct kobject *parent_kobj);
static inline struct kset *to_kset(struct kobject *kobj)
{
@@ -218,12 +193,12 @@ static inline void kset_put(struct kset *k)
kobject_put(&k->kobj);
}
-static inline struct kobj_type *get_ktype(struct kobject *kobj)
+static inline const struct kobj_type *get_ktype(const struct kobject *kobj)
{
return kobj->ktype;
}
-extern struct kobject *kset_find_obj(struct kset *, const char *);
+struct kobject *kset_find_obj(struct kset *, const char *);
/* The global /sys/kernel/ kobject for people to chain off of */
extern struct kobject *kernel_kobj;
diff --git a/include/linux/kobject_api.h b/include/linux/kobject_api.h
new file mode 100644
index 000000000000..6e36a054c2d6
--- /dev/null
+++ b/include/linux/kobject_api.h
@@ -0,0 +1 @@
+#include <linux/kobject.h>
diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
index 2b5b64256cf4..150fe2ae1b6b 100644
--- a/include/linux/kobject_ns.h
+++ b/include/linux/kobject_ns.h
@@ -47,13 +47,11 @@ struct kobj_ns_type_operations {
int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
int kobj_ns_type_registered(enum kobj_ns_type type);
-const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent);
-const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj);
+const struct kobj_ns_type_operations *kobj_child_ns_ops(const struct kobject *parent);
+const struct kobj_ns_type_operations *kobj_ns_ops(const struct kobject *kobj);
bool kobj_ns_current_may_mount(enum kobj_ns_type type);
void *kobj_ns_grab_current(enum kobj_ns_type type);
-const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk);
-const void *kobj_ns_initial(enum kobj_ns_type type);
void kobj_ns_drop(enum kobj_ns_type type, void *ns);
#endif /* _LINUX_KOBJECT_NS_H */
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 1883a4a9f16a..8c4f3bb24429 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -3,7 +3,6 @@
#define _LINUX_KPROBES_H
/*
* Kernel Probes (KProbes)
- * include/linux/kprobes.h
*
* Copyright (C) IBM Corporation, 2002, 2004
*
@@ -27,8 +26,8 @@
#include <linux/rcupdate.h>
#include <linux/mutex.h>
#include <linux/ftrace.h>
-#include <linux/refcount.h>
-#include <linux/freelist.h>
+#include <linux/objpool.h>
+#include <linux/rethook.h>
#include <asm/kprobes.h>
#ifdef CONFIG_KPROBES
@@ -39,7 +38,7 @@
#define KPROBE_REENTER 0x00000004
#define KPROBE_HIT_SSDONE 0x00000008
-#else /* CONFIG_KPROBES */
+#else /* !CONFIG_KPROBES */
#include <asm-generic/kprobes.h>
typedef int kprobe_opcode_t;
struct arch_specific_insn {
@@ -54,8 +53,6 @@ struct kretprobe_instance;
typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *);
typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *,
unsigned long flags);
-typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *,
- int trapnr);
typedef int (*kretprobe_handler_t) (struct kretprobe_instance *,
struct pt_regs *);
@@ -83,12 +80,6 @@ struct kprobe {
/* Called after addr is executed, unless... */
kprobe_post_handler_t post_handler;
- /*
- * ... called if executing addr causes a fault (eg. page fault).
- * Return 1 if it handled fault, otherwise kernel will see it.
- */
- kprobe_fault_handler_t fault_handler;
-
/* Saved opcode (which has been replaced with breakpoint) */
kprobe_opcode_t opcode;
@@ -111,27 +102,28 @@ struct kprobe {
* this flag is only for optimized_kprobe.
*/
#define KPROBE_FLAG_FTRACE 8 /* probe is using ftrace */
+#define KPROBE_FLAG_ON_FUNC_ENTRY 16 /* probe is on the function entry */
/* Has this kprobe gone ? */
-static inline int kprobe_gone(struct kprobe *p)
+static inline bool kprobe_gone(struct kprobe *p)
{
return p->flags & KPROBE_FLAG_GONE;
}
/* Is this kprobe disabled ? */
-static inline int kprobe_disabled(struct kprobe *p)
+static inline bool kprobe_disabled(struct kprobe *p)
{
return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE);
}
/* Is this kprobe really running optimized path ? */
-static inline int kprobe_optimized(struct kprobe *p)
+static inline bool kprobe_optimized(struct kprobe *p)
{
return p->flags & KPROBE_FLAG_OPTIMIZED;
}
/* Is this kprobe uses ftrace ? */
-static inline int kprobe_ftrace(struct kprobe *p)
+static inline bool kprobe_ftrace(struct kprobe *p)
{
return p->flags & KPROBE_FLAG_FTRACE;
}
@@ -147,8 +139,8 @@ static inline int kprobe_ftrace(struct kprobe *p)
*
*/
struct kretprobe_holder {
- struct kretprobe *rp;
- refcount_t ref;
+ struct kretprobe __rcu *rp;
+ struct objpool_head pool;
};
struct kretprobe {
@@ -158,19 +150,25 @@ struct kretprobe {
int maxactive;
int nmissed;
size_t data_size;
- struct freelist_head freelist;
+#ifdef CONFIG_KRETPROBE_ON_RETHOOK
+ struct rethook *rh;
+#else
struct kretprobe_holder *rph;
+#endif
};
+#define KRETPROBE_MAX_DATA_SIZE 4096
+
struct kretprobe_instance {
- union {
- struct freelist_node freelist;
- struct rcu_head rcu;
- };
+#ifdef CONFIG_KRETPROBE_ON_RETHOOK
+ struct rethook_node node;
+#else
+ struct rcu_head rcu;
struct llist_node llist;
struct kretprobe_holder *rph;
kprobe_opcode_t *ret_addr;
void *fp;
+#endif
char data[];
};
@@ -189,31 +187,46 @@ struct kprobe_blacklist_entry {
DECLARE_PER_CPU(struct kprobe *, current_kprobe);
DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
-/*
- * For #ifdef avoidance:
- */
-static inline int kprobes_built_in(void)
-{
- return 1;
-}
-
extern void kprobe_busy_begin(void);
extern void kprobe_busy_end(void);
#ifdef CONFIG_KRETPROBES
+/* Check whether @p is used for implementing a trampoline. */
+extern int arch_trampoline_kprobe(struct kprobe *p);
+
+#ifdef CONFIG_KRETPROBE_ON_RETHOOK
+static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri)
+{
+ /* rethook::data is non-changed field, so that you can access it freely. */
+ return (struct kretprobe *)ri->node.rethook->data;
+}
+static nokprobe_inline unsigned long get_kretprobe_retaddr(struct kretprobe_instance *ri)
+{
+ return ri->node.ret_addr;
+}
+#else
extern void arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs);
-extern int arch_trampoline_kprobe(struct kprobe *p);
+void arch_kretprobe_fixup_return(struct pt_regs *regs,
+ kprobe_opcode_t *correct_ret_addr);
+
+void __kretprobe_trampoline(void);
+/*
+ * Since some architecture uses structured function pointer,
+ * use dereference_function_descriptor() to get real function address.
+ */
+static nokprobe_inline void *kretprobe_trampoline_addr(void)
+{
+ return dereference_kernel_function_descriptor(__kretprobe_trampoline);
+}
/* If the trampoline handler called from a kprobe, use this version */
unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs,
- void *trampoline_address,
- void *frame_pointer);
+ void *frame_pointer);
static nokprobe_inline
unsigned long kretprobe_trampoline_handler(struct pt_regs *regs,
- void *trampoline_address,
- void *frame_pointer)
+ void *frame_pointer)
{
unsigned long ret;
/*
@@ -222,7 +235,7 @@ unsigned long kretprobe_trampoline_handler(struct pt_regs *regs,
* be running at this point.
*/
kprobe_busy_begin();
- ret = __kretprobe_trampoline_handler(regs, trampoline_address, frame_pointer);
+ ret = __kretprobe_trampoline_handler(regs, frame_pointer);
kprobe_busy_end();
return ret;
@@ -230,13 +243,16 @@ unsigned long kretprobe_trampoline_handler(struct pt_regs *regs,
static nokprobe_inline struct kretprobe *get_kretprobe(struct kretprobe_instance *ri)
{
- RCU_LOCKDEP_WARN(!rcu_read_lock_any_held(),
- "Kretprobe is accessed from instance under preemptive context");
+ return rcu_dereference_check(ri->rph->rp, rcu_read_lock_any_held());
+}
- return READ_ONCE(ri->rph->rp);
+static nokprobe_inline unsigned long get_kretprobe_retaddr(struct kretprobe_instance *ri)
+{
+ return (unsigned long)ri->ret_addr;
}
+#endif /* CONFIG_KRETPROBE_ON_RETHOOK */
-#else /* CONFIG_KRETPROBES */
+#else /* !CONFIG_KRETPROBES */
static inline void arch_prepare_kretprobe(struct kretprobe *rp,
struct pt_regs *regs)
{
@@ -247,16 +263,11 @@ static inline int arch_trampoline_kprobe(struct kprobe *p)
}
#endif /* CONFIG_KRETPROBES */
-extern struct kretprobe_blackpoint kretprobe_blacklist[];
+/* Markers of '_kprobe_blacklist' section */
+extern unsigned long __start_kprobe_blacklist[];
+extern unsigned long __stop_kprobe_blacklist[];
-#ifdef CONFIG_KPROBES_SANITY_TEST
-extern int init_test_probes(void);
-#else
-static inline int init_test_probes(void)
-{
- return 0;
-}
-#endif /* CONFIG_KPROBES_SANITY_TEST */
+extern struct kretprobe_blackpoint kretprobe_blacklist[];
extern int arch_prepare_kprobe(struct kprobe *p);
extern void arch_arm_kprobe(struct kprobe *p);
@@ -265,7 +276,6 @@ extern int arch_init_kprobes(void);
extern void kprobes_inc_nmissed_count(struct kprobe *p);
extern bool arch_within_kprobe_blacklist(unsigned long addr);
extern int arch_populate_kprobe_blacklist(void);
-extern bool arch_kprobe_on_func_entry(unsigned long offset);
extern int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
extern bool within_kprobe_blacklist(unsigned long addr);
@@ -311,7 +321,7 @@ static inline bool is_kprobe_##__name##_slot(unsigned long addr) \
#define KPROBE_OPTINSN_PAGE_SYM "kprobe_optinsn_page"
int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum,
unsigned long *value, char *type, char *sym);
-#else /* __ARCH_WANT_KPROBES_INSN_SLOT */
+#else /* !__ARCH_WANT_KPROBES_INSN_SLOT */
#define DEFINE_INSN_CACHE_OPS(__name) \
static inline bool is_kprobe_##__name##_slot(unsigned long addr) \
{ \
@@ -342,29 +352,33 @@ extern void arch_unoptimize_kprobes(struct list_head *oplist,
struct list_head *done_list);
extern void arch_unoptimize_kprobe(struct optimized_kprobe *op);
extern int arch_within_optimized_kprobe(struct optimized_kprobe *op,
- unsigned long addr);
+ kprobe_opcode_t *addr);
extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs);
DEFINE_INSN_CACHE_OPS(optinsn);
-#ifdef CONFIG_SYSCTL
-extern int sysctl_kprobes_optimization;
-extern int proc_kprobes_optimization_handler(struct ctl_table *table,
- int write, void *buffer,
- size_t *length, loff_t *ppos);
-#endif
extern void wait_for_kprobe_optimizer(void);
-#else
+bool optprobe_queued_unopt(struct optimized_kprobe *op);
+bool kprobe_disarmed(struct kprobe *p);
+#else /* !CONFIG_OPTPROBES */
static inline void wait_for_kprobe_optimizer(void) { }
#endif /* CONFIG_OPTPROBES */
+
#ifdef CONFIG_KPROBES_ON_FTRACE
extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct ftrace_regs *fregs);
extern int arch_prepare_kprobe_ftrace(struct kprobe *p);
-#endif
-
-int arch_check_ftrace_location(struct kprobe *p);
+/* Set when ftrace has been killed: kprobes on ftrace must be disabled for safety */
+extern bool kprobe_ftrace_disabled __read_mostly;
+extern void kprobe_ftrace_kill(void);
+#else
+static inline int arch_prepare_kprobe_ftrace(struct kprobe *p)
+{
+ return -EINVAL;
+}
+static inline void kprobe_ftrace_kill(void) {}
+#endif /* CONFIG_KPROBES_ON_FTRACE */
/* Get the kprobe at this addr (if any) - called with preemption disabled */
struct kprobe *get_kprobe(void *addr);
@@ -372,7 +386,7 @@ struct kprobe *get_kprobe(void *addr);
/* kprobe_running() will just return the current_kprobe on this CPU */
static inline struct kprobe *kprobe_running(void)
{
- return (__this_cpu_read(current_kprobe));
+ return __this_cpu_read(current_kprobe);
}
static inline void reset_current_kprobe(void)
@@ -386,18 +400,23 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void)
}
kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset);
+kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset, bool *on_func_entry);
+
int register_kprobe(struct kprobe *p);
void unregister_kprobe(struct kprobe *p);
int register_kprobes(struct kprobe **kps, int num);
void unregister_kprobes(struct kprobe **kps, int num);
-unsigned long arch_deref_entry_point(void *);
int register_kretprobe(struct kretprobe *rp);
void unregister_kretprobe(struct kretprobe *rp);
int register_kretprobes(struct kretprobe **rps, int num);
void unregister_kretprobes(struct kretprobe **rps, int num);
+#if defined(CONFIG_KRETPROBE_ON_RETHOOK) || !defined(CONFIG_KRETPROBES)
+#define kprobe_flush_task(tk) do {} while (0)
+#else
void kprobe_flush_task(struct task_struct *tk);
+#endif
void kprobe_free_init_mem(void);
@@ -407,19 +426,21 @@ int enable_kprobe(struct kprobe *kp);
void dump_kprobe(struct kprobe *kp);
void *alloc_insn_page(void);
-void free_insn_page(void *page);
+
+void *alloc_optinsn_page(void);
+void free_optinsn_page(void *page);
int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
char *sym);
int arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value,
char *type, char *sym);
+
+int kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data);
+
#else /* !CONFIG_KPROBES: */
-static inline int kprobes_built_in(void)
-{
- return 0;
-}
static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
return 0;
@@ -432,13 +453,16 @@ static inline struct kprobe *kprobe_running(void)
{
return NULL;
}
+#define kprobe_busy_begin() do {} while (0)
+#define kprobe_busy_end() do {} while (0)
+
static inline int register_kprobe(struct kprobe *p)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline int register_kprobes(struct kprobe **kps, int num)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline void unregister_kprobe(struct kprobe *p)
{
@@ -448,11 +472,11 @@ static inline void unregister_kprobes(struct kprobe **kps, int num)
}
static inline int register_kretprobe(struct kretprobe *rp)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline int register_kretprobes(struct kretprobe **rps, int num)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline void unregister_kretprobe(struct kretprobe *rp)
{
@@ -466,13 +490,16 @@ static inline void kprobe_flush_task(struct task_struct *tk)
static inline void kprobe_free_init_mem(void)
{
}
+static inline void kprobe_ftrace_kill(void)
+{
+}
static inline int disable_kprobe(struct kprobe *kp)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline int enable_kprobe(struct kprobe *kp)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline bool within_kprobe_blacklist(unsigned long addr)
@@ -485,6 +512,7 @@ static inline int kprobe_get_kallsym(unsigned int symnum, unsigned long *value,
return -ERANGE;
}
#endif /* CONFIG_KPROBES */
+
static inline int disable_kretprobe(struct kretprobe *rp)
{
return disable_kprobe(&rp->kp);
@@ -499,19 +527,56 @@ static inline bool is_kprobe_insn_slot(unsigned long addr)
{
return false;
}
-#endif
+#endif /* !CONFIG_KPROBES */
+
#ifndef CONFIG_OPTPROBES
static inline bool is_kprobe_optinsn_slot(unsigned long addr)
{
return false;
}
+#endif /* !CONFIG_OPTPROBES */
+
+#ifdef CONFIG_KRETPROBES
+#ifdef CONFIG_KRETPROBE_ON_RETHOOK
+static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr)
+{
+ return is_rethook_trampoline(addr);
+}
+
+static nokprobe_inline
+unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
+ struct llist_node **cur)
+{
+ return rethook_find_ret_addr(tsk, (unsigned long)fp, cur);
+}
+#else
+static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr)
+{
+ return (void *)addr == kretprobe_trampoline_addr();
+}
+
+unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
+ struct llist_node **cur);
+#endif
+#else
+static nokprobe_inline bool is_kretprobe_trampoline(unsigned long addr)
+{
+ return false;
+}
+
+static nokprobe_inline
+unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp,
+ struct llist_node **cur)
+{
+ return 0;
+}
#endif
/* Returns true if kprobes handled the fault */
static nokprobe_inline bool kprobe_page_fault(struct pt_regs *regs,
unsigned int trap)
{
- if (!kprobes_built_in())
+ if (!IS_ENABLED(CONFIG_KPROBES))
return false;
if (user_mode(regs))
return false;
diff --git a/include/linux/kref.h b/include/linux/kref.h
index d32e21a2538c..88e82ab1367c 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -46,18 +46,18 @@ static inline void kref_get(struct kref *kref)
}
/**
- * kref_put - decrement refcount for object.
- * @kref: object.
- * @release: pointer to the function that will clean up the object when the
+ * kref_put - Decrement refcount for object
+ * @kref: Object
+ * @release: Pointer to the function that will clean up the object when the
* last reference to the object is released.
- * This pointer is required, and it is not acceptable to pass kfree
- * in as this function.
*
- * Decrement the refcount, and if 0, call release().
- * Return 1 if the object was removed, otherwise return 0. Beware, if this
- * function returns 0, you still can not count on the kref from remaining in
- * memory. Only use the return value if you want to see if the kref is now
- * gone, not present.
+ * Decrement the refcount, and if 0, call @release. The caller may not
+ * pass NULL or kfree() as the release function.
+ *
+ * Return: 1 if this call removed the object, otherwise return 0. Beware,
+ * if this function returns 0, another caller may have removed the object
+ * by the time this function returns. The return value is only certain
+ * if you want to see if the object is definitely released.
*/
static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref))
{
@@ -68,17 +68,37 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)
return 0;
}
+/**
+ * kref_put_mutex - Decrement refcount for object
+ * @kref: Object
+ * @release: Pointer to the function that will clean up the object when the
+ * last reference to the object is released.
+ * @mutex: Mutex which protects the release function.
+ *
+ * This variant of kref_lock() calls the @release function with the @mutex
+ * held. The @release function will release the mutex.
+ */
static inline int kref_put_mutex(struct kref *kref,
void (*release)(struct kref *kref),
- struct mutex *lock)
+ struct mutex *mutex)
{
- if (refcount_dec_and_mutex_lock(&kref->refcount, lock)) {
+ if (refcount_dec_and_mutex_lock(&kref->refcount, mutex)) {
release(kref);
return 1;
}
return 0;
}
+/**
+ * kref_put_lock - Decrement refcount for object
+ * @kref: Object
+ * @release: Pointer to the function that will clean up the object when the
+ * last reference to the object is released.
+ * @lock: Spinlock which protects the release function.
+ *
+ * This variant of kref_lock() calls the @release function with the @lock
+ * held. The @release function will release the lock.
+ */
static inline int kref_put_lock(struct kref *kref,
void (*release)(struct kref *kref),
spinlock_t *lock)
@@ -94,8 +114,6 @@ static inline int kref_put_lock(struct kref *kref,
* kref_get_unless_zero - Increment refcount for object unless it is zero.
* @kref: object.
*
- * Return non-zero if the increment succeeded. Otherwise return 0.
- *
* This function is intended to simplify locking around refcounting for
* objects that can be looked up from a lookup structure, and which are
* removed from that lookup structure in the object destructor.
@@ -105,6 +123,8 @@ static inline int kref_put_lock(struct kref *kref,
* With a lookup followed by a kref_get_unless_zero *with return value check*
* locking in the kref_put path can be deferred to the actual removal from
* the lookup structure and RCU lookups become trivial.
+ *
+ * Return: non-zero if the increment succeeded. Otherwise return 0.
*/
static inline int __must_check kref_get_unless_zero(struct kref *kref)
{
diff --git a/include/linux/kref_api.h b/include/linux/kref_api.h
new file mode 100644
index 000000000000..d67e554721d2
--- /dev/null
+++ b/include/linux/kref_api.h
@@ -0,0 +1 @@
+#include <linux/kref.h>
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 161e8164abcf..c982694c987b 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -13,27 +13,70 @@
#include <linux/pagemap.h>
#include <linux/rmap.h>
#include <linux/sched.h>
-#include <linux/sched/coredump.h>
-
-struct stable_node;
-struct mem_cgroup;
#ifdef CONFIG_KSM
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, int advice, unsigned long *vm_flags);
+ unsigned long end, int advice, vm_flags_t *vm_flags);
+vm_flags_t ksm_vma_flags(struct mm_struct *mm, const struct file *file,
+ vm_flags_t vm_flags);
+int ksm_enable_merge_any(struct mm_struct *mm);
+int ksm_disable_merge_any(struct mm_struct *mm);
+int ksm_disable(struct mm_struct *mm);
+
int __ksm_enter(struct mm_struct *mm);
void __ksm_exit(struct mm_struct *mm);
+/*
+ * To identify zeropages that were mapped by KSM, we reuse the dirty bit
+ * in the PTE. If the PTE is dirty, the zeropage was mapped by KSM when
+ * deduplicating memory.
+ */
+#define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte))
+
+extern atomic_long_t ksm_zero_pages;
+
+static inline void ksm_map_zero_page(struct mm_struct *mm)
+{
+ atomic_long_inc(&ksm_zero_pages);
+ atomic_long_inc(&mm->ksm_zero_pages);
+}
+
+static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
+{
+ if (is_ksm_zero_pte(pte)) {
+ atomic_long_dec(&ksm_zero_pages);
+ atomic_long_dec(&mm->ksm_zero_pages);
+ }
+}
+
+static inline long mm_ksm_zero_pages(struct mm_struct *mm)
+{
+ return atomic_long_read(&mm->ksm_zero_pages);
+}
+
+static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+{
+ /* Adding mm to ksm is best effort on fork. */
+ if (mm_flags_test(MMF_VM_MERGEABLE, oldmm)) {
+ long nr_ksm_zero_pages = atomic_long_read(&mm->ksm_zero_pages);
+
+ mm->ksm_merging_pages = 0;
+ mm->ksm_rmap_items = 0;
+ atomic_long_add(nr_ksm_zero_pages, &ksm_zero_pages);
+ __ksm_enter(mm);
+ }
+}
-static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+static inline int ksm_execve(struct mm_struct *mm)
{
- if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
+ if (mm_flags_test(MMF_VM_MERGE_ANY, mm))
return __ksm_enter(mm);
+
return 0;
}
static inline void ksm_exit(struct mm_struct *mm)
{
- if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
+ if (mm_flags_test(MMF_VM_MERGEABLE, mm))
__ksm_exit(mm);
}
@@ -48,15 +91,34 @@ static inline void ksm_exit(struct mm_struct *mm)
* We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
* but what if the vma was unmerged while the page was swapped out?
*/
-struct page *ksm_might_need_to_copy(struct page *page,
- struct vm_area_struct *vma, unsigned long address);
+struct folio *ksm_might_need_to_copy(struct folio *folio,
+ struct vm_area_struct *vma, unsigned long addr);
-void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
-void ksm_migrate_page(struct page *newpage, struct page *oldpage);
+void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
+void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
+void collect_procs_ksm(const struct folio *folio, const struct page *page,
+ struct list_head *to_kill, int force_early);
+long ksm_process_profit(struct mm_struct *);
+bool ksm_process_mergeable(struct mm_struct *mm);
#else /* !CONFIG_KSM */
-static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+static inline vm_flags_t ksm_vma_flags(struct mm_struct *mm,
+ const struct file *file, vm_flags_t vm_flags)
+{
+ return vm_flags;
+}
+
+static inline int ksm_disable(struct mm_struct *mm)
+{
+ return 0;
+}
+
+static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+{
+}
+
+static inline int ksm_execve(struct mm_struct *mm)
{
return 0;
}
@@ -65,25 +127,35 @@ static inline void ksm_exit(struct mm_struct *mm)
{
}
+static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
+{
+}
+
+static inline void collect_procs_ksm(const struct folio *folio,
+ const struct page *page, struct list_head *to_kill,
+ int force_early)
+{
+}
+
#ifdef CONFIG_MMU
static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, int advice, unsigned long *vm_flags)
+ unsigned long end, int advice, vm_flags_t *vm_flags)
{
return 0;
}
-static inline struct page *ksm_might_need_to_copy(struct page *page,
- struct vm_area_struct *vma, unsigned long address)
+static inline struct folio *ksm_might_need_to_copy(struct folio *folio,
+ struct vm_area_struct *vma, unsigned long addr)
{
- return page;
+ return folio;
}
-static inline void rmap_walk_ksm(struct page *page,
+static inline void rmap_walk_ksm(struct folio *folio,
struct rmap_walk_control *rwc)
{
}
-static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
+static inline void folio_migrate_ksm(struct folio *newfolio, struct folio *old)
{
}
#endif /* CONFIG_MMU */
diff --git a/include/linux/kstack_erase.h b/include/linux/kstack_erase.h
new file mode 100644
index 000000000000..bf3bf1905557
--- /dev/null
+++ b/include/linux/kstack_erase.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_KSTACK_ERASE_H
+#define _LINUX_KSTACK_ERASE_H
+
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+
+/*
+ * Check that the poison value points to the unused hole in the
+ * virtual memory map for your platform.
+ */
+#define KSTACK_ERASE_POISON -0xBEEF
+#define KSTACK_ERASE_SEARCH_DEPTH 128
+
+#ifdef CONFIG_KSTACK_ERASE
+#include <asm/stacktrace.h>
+#include <linux/linkage.h>
+
+/*
+ * The lowest address on tsk's stack which we can plausibly erase.
+ */
+static __always_inline unsigned long
+stackleak_task_low_bound(const struct task_struct *tsk)
+{
+ /*
+ * The lowest unsigned long on the task stack contains STACK_END_MAGIC,
+ * which we must not corrupt.
+ */
+ return (unsigned long)end_of_stack(tsk) + sizeof(unsigned long);
+}
+
+/*
+ * The address immediately after the highest address on tsk's stack which we
+ * can plausibly erase.
+ */
+static __always_inline unsigned long
+stackleak_task_high_bound(const struct task_struct *tsk)
+{
+ /*
+ * The task's pt_regs lives at the top of the task stack and will be
+ * overwritten by exception entry, so there's no need to erase them.
+ */
+ return (unsigned long)task_pt_regs(tsk);
+}
+
+/*
+ * Find the address immediately above the poisoned region of the stack, where
+ * that region falls between 'low' (inclusive) and 'high' (exclusive).
+ */
+static __always_inline unsigned long
+stackleak_find_top_of_poison(const unsigned long low, const unsigned long high)
+{
+ const unsigned int depth = KSTACK_ERASE_SEARCH_DEPTH / sizeof(unsigned long);
+ unsigned int poison_count = 0;
+ unsigned long poison_high = high;
+ unsigned long sp = high;
+
+ while (sp > low && poison_count < depth) {
+ sp -= sizeof(unsigned long);
+
+ if (*(unsigned long *)sp == KSTACK_ERASE_POISON) {
+ poison_count++;
+ } else {
+ poison_count = 0;
+ poison_high = sp;
+ }
+ }
+
+ return poison_high;
+}
+
+static inline void stackleak_task_init(struct task_struct *t)
+{
+ t->lowest_stack = stackleak_task_low_bound(t);
+# ifdef CONFIG_KSTACK_ERASE_METRICS
+ t->prev_lowest_stack = t->lowest_stack;
+# endif
+}
+
+asmlinkage void noinstr stackleak_erase(void);
+asmlinkage void noinstr stackleak_erase_on_task_stack(void);
+asmlinkage void noinstr stackleak_erase_off_task_stack(void);
+void __no_caller_saved_registers noinstr __sanitizer_cov_stack_depth(void);
+
+#else /* !CONFIG_KSTACK_ERASE */
+static inline void stackleak_task_init(struct task_struct *t) { }
+#endif
+
+#endif
diff --git a/include/linux/kstrtox.h b/include/linux/kstrtox.h
new file mode 100644
index 000000000000..6ea897222af1
--- /dev/null
+++ b/include/linux/kstrtox.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_KSTRTOX_H
+#define _LINUX_KSTRTOX_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+/* Internal, do not use. */
+int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
+int __must_check _kstrtol(const char *s, unsigned int base, long *res);
+
+int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res);
+int __must_check kstrtoll(const char *s, unsigned int base, long long *res);
+
+/**
+ * kstrtoul - convert a string to an unsigned long
+ * @s: The start of the string. The string must be null-terminated, and may also
+ * include a single newline before its terminating null. The first character
+ * may also be a plus sign, but not a minus sign.
+ * @base: The number base to use. The maximum supported base is 16. If base is
+ * given as 0, then the base of the string is automatically detected with the
+ * conventional semantics - If it begins with 0x the number will be parsed as a
+ * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
+ * parsed as an octal number. Otherwise it will be parsed as a decimal.
+ * @res: Where to write the result of the conversion on success.
+ *
+ * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
+ * Preferred over simple_strtoul(). Return code must be checked.
+*/
+static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res)
+{
+ /*
+ * We want to shortcut function call, but
+ * __builtin_types_compatible_p(unsigned long, unsigned long long) = 0.
+ */
+ if (sizeof(unsigned long) == sizeof(unsigned long long) &&
+ __alignof__(unsigned long) == __alignof__(unsigned long long))
+ return kstrtoull(s, base, (unsigned long long *)res);
+ else
+ return _kstrtoul(s, base, res);
+}
+
+/**
+ * kstrtol - convert a string to a long
+ * @s: The start of the string. The string must be null-terminated, and may also
+ * include a single newline before its terminating null. The first character
+ * may also be a plus sign or a minus sign.
+ * @base: The number base to use. The maximum supported base is 16. If base is
+ * given as 0, then the base of the string is automatically detected with the
+ * conventional semantics - If it begins with 0x the number will be parsed as a
+ * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
+ * parsed as an octal number. Otherwise it will be parsed as a decimal.
+ * @res: Where to write the result of the conversion on success.
+ *
+ * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
+ * Preferred over simple_strtol(). Return code must be checked.
+ */
+static inline int __must_check kstrtol(const char *s, unsigned int base, long *res)
+{
+ /*
+ * We want to shortcut function call, but
+ * __builtin_types_compatible_p(long, long long) = 0.
+ */
+ if (sizeof(long) == sizeof(long long) &&
+ __alignof__(long) == __alignof__(long long))
+ return kstrtoll(s, base, (long long *)res);
+ else
+ return _kstrtol(s, base, res);
+}
+
+int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res);
+int __must_check kstrtoint(const char *s, unsigned int base, int *res);
+
+static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res)
+{
+ return kstrtoull(s, base, res);
+}
+
+static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res)
+{
+ return kstrtoll(s, base, res);
+}
+
+static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res)
+{
+ return kstrtouint(s, base, res);
+}
+
+static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res)
+{
+ return kstrtoint(s, base, res);
+}
+
+int __must_check kstrtou16(const char *s, unsigned int base, u16 *res);
+int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
+int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
+int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
+int __must_check kstrtobool(const char *s, bool *res);
+
+int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res);
+int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res);
+int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res);
+int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res);
+int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res);
+int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res);
+int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res);
+int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res);
+int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res);
+int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res);
+int __must_check kstrtobool_from_user(const char __user *s, size_t count, bool *res);
+
+static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res)
+{
+ return kstrtoull_from_user(s, count, base, res);
+}
+
+static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res)
+{
+ return kstrtoll_from_user(s, count, base, res);
+}
+
+static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res)
+{
+ return kstrtouint_from_user(s, count, base, res);
+}
+
+static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res)
+{
+ return kstrtoint_from_user(s, count, base, res);
+}
+
+/*
+ * Use kstrto<foo> instead.
+ *
+ * NOTE: simple_strto<foo> does not check for the range overflow and,
+ * depending on the input, may give interesting results.
+ *
+ * Use these functions if and only if you cannot use kstrto<foo>, because
+ * the conversion ends on the first non-digit character, which may be far
+ * beyond the supported range. It might be useful to parse the strings like
+ * 10x50 or 12:21 without altering original string or temporary buffer in use.
+ * Keep in mind above caveat.
+ */
+
+extern unsigned long simple_strtoul(const char *,char **,unsigned int);
+extern unsigned long simple_strntoul(const char *,char **,unsigned int,size_t);
+extern long simple_strtol(const char *,char **,unsigned int);
+extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
+extern long long simple_strtoll(const char *,char **,unsigned int);
+
+#endif /* _LINUX_KSTRTOX_H */
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 2484ed97e72f..8d27403888ce 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -18,7 +18,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
* @threadfn: the function to run in the thread
* @data: data pointer for @threadfn()
* @namefmt: printf-style format string for the thread name
- * @arg...: arguments for @namefmt.
+ * @arg: arguments for @namefmt.
*
* This macro will create a kthread on the current node, leaving it in
* the stopped state. This is just a helper for kthread_create_on_node();
@@ -33,6 +33,9 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
unsigned int cpu,
const char *namefmt);
+void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk);
+bool set_kthread_struct(struct task_struct *p);
+
void kthread_set_per_cpu(struct task_struct *k, int cpu);
bool kthread_is_per_cpu(struct task_struct *k);
@@ -54,13 +57,40 @@ bool kthread_is_per_cpu(struct task_struct *k);
__k; \
})
+/**
+ * kthread_run_on_cpu - create and wake a cpu bound thread.
+ * @threadfn: the function to run until signal_pending(current).
+ * @data: data ptr for @threadfn.
+ * @cpu: The cpu on which the thread should be bound,
+ * @namefmt: printf-style name for the thread. Format is restricted
+ * to "name.*%u". Code fills in cpu number.
+ *
+ * Description: Convenient wrapper for kthread_create_on_cpu()
+ * followed by wake_up_process(). Returns the kthread or
+ * ERR_PTR(-ENOMEM).
+ */
+static inline struct task_struct *
+kthread_run_on_cpu(int (*threadfn)(void *data), void *data,
+ unsigned int cpu, const char *namefmt)
+{
+ struct task_struct *p;
+
+ p = kthread_create_on_cpu(threadfn, data, cpu, namefmt);
+ if (!IS_ERR(p))
+ wake_up_process(p);
+
+ return p;
+}
+
void free_kthread_struct(struct task_struct *k);
void kthread_bind(struct task_struct *k, unsigned int cpu);
void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
+int kthread_affine_preferred(struct task_struct *p, const struct cpumask *mask);
int kthread_stop(struct task_struct *k);
+int kthread_stop_put(struct task_struct *k);
bool kthread_should_stop(void);
bool kthread_should_park(void);
-bool __kthread_should_park(struct task_struct *k);
+bool kthread_should_stop_or_park(void);
bool kthread_freezable_should_stop(bool *was_frozen);
void *kthread_func(struct task_struct *k);
void *kthread_data(struct task_struct *k);
@@ -68,6 +98,8 @@ void *kthread_probe_data(struct task_struct *k);
int kthread_park(struct task_struct *k);
void kthread_unpark(struct task_struct *k);
void kthread_parkme(void);
+void kthread_exit(long result) __noreturn;
+void kthread_complete_and_exit(struct completion *, long) __noreturn;
int kthreadd(void *unused);
extern struct task_struct *kthreadd_task;
@@ -111,12 +143,6 @@ struct kthread_delayed_work {
struct timer_list timer;
};
-#define KTHREAD_WORKER_INIT(worker) { \
- .lock = __RAW_SPIN_LOCK_UNLOCKED((worker).lock), \
- .work_list = LIST_HEAD_INIT((worker).work_list), \
- .delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\
- }
-
#define KTHREAD_WORK_INIT(work, fn) { \
.node = LIST_HEAD_INIT((work).node), \
.func = (fn), \
@@ -128,9 +154,6 @@ struct kthread_delayed_work {
TIMER_IRQSAFE), \
}
-#define DEFINE_KTHREAD_WORKER(worker) \
- struct kthread_worker worker = KTHREAD_WORKER_INIT(worker)
-
#define DEFINE_KTHREAD_WORK(work, fn) \
struct kthread_work work = KTHREAD_WORK_INIT(work, fn)
@@ -138,19 +161,6 @@ struct kthread_delayed_work {
struct kthread_delayed_work dwork = \
KTHREAD_DELAYED_WORK_INIT(dwork, fn)
-/*
- * kthread_worker.lock needs its own lockdep class key when defined on
- * stack with lockdep enabled. Use the following macros in such cases.
- */
-#ifdef CONFIG_LOCKDEP
-# define KTHREAD_WORKER_INIT_ONSTACK(worker) \
- ({ kthread_init_worker(&worker); worker; })
-# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \
- struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker)
-#else
-# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker)
-#endif
-
extern void __kthread_init_worker(struct kthread_worker *worker,
const char *name, struct lock_class_key *key);
@@ -177,13 +187,58 @@ extern void __kthread_init_worker(struct kthread_worker *worker,
int kthread_worker_fn(void *worker_ptr);
-__printf(2, 3)
-struct kthread_worker *
-kthread_create_worker(unsigned int flags, const char namefmt[], ...);
+__printf(3, 4)
+struct kthread_worker *kthread_create_worker_on_node(unsigned int flags,
+ int node,
+ const char namefmt[], ...);
+
+#define kthread_create_worker(flags, namefmt, ...) \
+ kthread_create_worker_on_node(flags, NUMA_NO_NODE, namefmt, ## __VA_ARGS__);
+
+/**
+ * kthread_run_worker - create and wake a kthread worker.
+ * @flags: flags modifying the default behavior of the worker
+ * @namefmt: printf-style name for the thread.
+ *
+ * Description: Convenient wrapper for kthread_create_worker() followed by
+ * wake_up_process(). Returns the kthread_worker or ERR_PTR(-ENOMEM).
+ */
+#define kthread_run_worker(flags, namefmt, ...) \
+({ \
+ struct kthread_worker *__kw \
+ = kthread_create_worker(flags, namefmt, ## __VA_ARGS__); \
+ if (!IS_ERR(__kw)) \
+ wake_up_process(__kw->task); \
+ __kw; \
+})
-__printf(3, 4) struct kthread_worker *
+struct kthread_worker *
kthread_create_worker_on_cpu(int cpu, unsigned int flags,
- const char namefmt[], ...);
+ const char namefmt[]);
+
+/**
+ * kthread_run_worker_on_cpu - create and wake a cpu bound kthread worker.
+ * @cpu: CPU number
+ * @flags: flags modifying the default behavior of the worker
+ * @namefmt: printf-style name for the thread. Format is restricted
+ * to "name.*%u". Code fills in cpu number.
+ *
+ * Description: Convenient wrapper for kthread_create_worker_on_cpu()
+ * followed by wake_up_process(). Returns the kthread_worker or
+ * ERR_PTR(-ENOMEM).
+ */
+static inline struct kthread_worker *
+kthread_run_worker_on_cpu(int cpu, unsigned int flags,
+ const char namefmt[])
+{
+ struct kthread_worker *kw;
+
+ kw = kthread_create_worker_on_cpu(cpu, flags, namefmt);
+ if (!IS_ERR(kw))
+ wake_up_process(kw->task);
+
+ return kw;
+}
bool kthread_queue_work(struct kthread_worker *worker,
struct kthread_work *work);
@@ -214,9 +269,5 @@ void kthread_associate_blkcg(struct cgroup_subsys_state *css);
struct cgroup_subsys_state *kthread_blkcg(void);
#else
static inline void kthread_associate_blkcg(struct cgroup_subsys_state *css) { }
-static inline struct cgroup_subsys_state *kthread_blkcg(void)
-{
- return NULL;
-}
#endif
#endif /* _LINUX_KTHREAD_H */
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 73f20deb497d..383ed9985802 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -21,12 +21,10 @@
#ifndef _LINUX_KTIME_H
#define _LINUX_KTIME_H
-#include <linux/time.h>
-#include <linux/jiffies.h>
#include <asm/bug.h>
-
-/* Nanosecond scalar representation for kernel time values */
-typedef s64 ktime_t;
+#include <linux/jiffies.h>
+#include <linux/time.h>
+#include <linux/types.h>
/**
* ktime_set - Set a ktime_t variable from a seconds/nanoseconds value
@@ -224,6 +222,11 @@ static inline ktime_t ns_to_ktime(u64 ns)
return ns;
}
+static inline ktime_t us_to_ktime(u64 us)
+{
+ return us * NSEC_PER_USEC;
+}
+
static inline ktime_t ms_to_ktime(u64 ms)
{
return ms * NSEC_PER_MSEC;
diff --git a/include/linux/ktime_api.h b/include/linux/ktime_api.h
new file mode 100644
index 000000000000..f697d493960f
--- /dev/null
+++ b/include/linux/ktime_api.h
@@ -0,0 +1 @@
+#include <linux/ktime.h>
diff --git a/include/linux/kvm_dirty_ring.h b/include/linux/kvm_dirty_ring.h
index 120e5e90fa1d..eb10d87adf7d 100644
--- a/include/linux/kvm_dirty_ring.h
+++ b/include/linux/kvm_dirty_ring.h
@@ -27,34 +27,35 @@ struct kvm_dirty_ring {
int index;
};
-#if (KVM_DIRTY_LOG_PAGE_OFFSET == 0)
+#ifndef CONFIG_HAVE_KVM_DIRTY_RING
/*
- * If KVM_DIRTY_LOG_PAGE_OFFSET not defined, kvm_dirty_ring.o should
+ * If CONFIG_HAVE_HVM_DIRTY_RING not defined, kvm_dirty_ring.o should
* not be included as well, so define these nop functions for the arch.
*/
-static inline u32 kvm_dirty_ring_get_rsvd_entries(void)
+static inline u32 kvm_dirty_ring_get_rsvd_entries(struct kvm *kvm)
{
return 0;
}
-static inline int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring,
- int index, u32 size)
+static inline bool kvm_use_dirty_bitmap(struct kvm *kvm)
{
- return 0;
+ return true;
}
-static inline struct kvm_dirty_ring *kvm_dirty_ring_get(struct kvm *kvm)
+static inline int kvm_dirty_ring_alloc(struct kvm *kvm, struct kvm_dirty_ring *ring,
+ int index, u32 size)
{
- return NULL;
+ return 0;
}
static inline int kvm_dirty_ring_reset(struct kvm *kvm,
- struct kvm_dirty_ring *ring)
+ struct kvm_dirty_ring *ring,
+ int *nr_entries_reset)
{
- return 0;
+ return -ENOENT;
}
-static inline void kvm_dirty_ring_push(struct kvm_dirty_ring *ring,
+static inline void kvm_dirty_ring_push(struct kvm_vcpu *vcpu,
u32 slot, u64 offset)
{
}
@@ -69,35 +70,25 @@ static inline void kvm_dirty_ring_free(struct kvm_dirty_ring *ring)
{
}
-static inline bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring)
-{
- return true;
-}
-
-#else /* KVM_DIRTY_LOG_PAGE_OFFSET == 0 */
-
-u32 kvm_dirty_ring_get_rsvd_entries(void);
-int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size);
-struct kvm_dirty_ring *kvm_dirty_ring_get(struct kvm *kvm);
+#else /* CONFIG_HAVE_KVM_DIRTY_RING */
-/*
- * called with kvm->slots_lock held, returns the number of
- * processed pages.
- */
-int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring);
+int kvm_cpu_dirty_log_size(struct kvm *kvm);
+bool kvm_use_dirty_bitmap(struct kvm *kvm);
+bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm);
+u32 kvm_dirty_ring_get_rsvd_entries(struct kvm *kvm);
+int kvm_dirty_ring_alloc(struct kvm *kvm, struct kvm_dirty_ring *ring,
+ int index, u32 size);
+int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring,
+ int *nr_entries_reset);
+void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, u32 slot, u64 offset);
-/*
- * returns =0: successfully pushed
- * <0: unable to push, need to wait
- */
-void kvm_dirty_ring_push(struct kvm_dirty_ring *ring, u32 slot, u64 offset);
+bool kvm_dirty_ring_check_request(struct kvm_vcpu *vcpu);
/* for use in vm_operations_struct */
struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset);
void kvm_dirty_ring_free(struct kvm_dirty_ring *ring);
-bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring);
-#endif /* KVM_DIRTY_LOG_PAGE_OFFSET == 0 */
+#endif /* CONFIG_HAVE_KVM_DIRTY_RING */
#endif /* KVM_DIRTY_RING_H */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 2f34487e21f2..d93f75b05ae2 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -2,7 +2,7 @@
#ifndef __KVM_HOST_H
#define __KVM_HOST_H
-
+#include <linux/entry-virt.h>
#include <linux/types.h>
#include <linux/hardirq.h>
#include <linux/list.h>
@@ -10,6 +10,7 @@
#include <linux/spinlock.h>
#include <linux/signal.h>
#include <linux/sched.h>
+#include <linux/sched/stat.h>
#include <linux/bug.h>
#include <linux/minmax.h>
#include <linux/mm.h>
@@ -27,6 +28,13 @@
#include <linux/rcuwait.h>
#include <linux/refcount.h>
#include <linux/nospec.h>
+#include <linux/notifier.h>
+#include <linux/ftrace.h>
+#include <linux/hashtable.h>
+#include <linux/instrumentation.h>
+#include <linux/interval_tree.h>
+#include <linux/rbtree.h>
+#include <linux/xarray.h>
#include <asm/signal.h>
#include <linux/kvm.h>
@@ -37,20 +45,21 @@
#include <asm/kvm_host.h>
#include <linux/kvm_dirty_ring.h>
-#ifndef KVM_MAX_VCPU_ID
-#define KVM_MAX_VCPU_ID KVM_MAX_VCPUS
+#ifndef KVM_MAX_VCPU_IDS
+#define KVM_MAX_VCPU_IDS KVM_MAX_VCPUS
#endif
/*
- * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
- * in kvm, other bits are visible for userspace which are defined in
- * include/linux/kvm_h.
+ * The bit 16 ~ bit 31 of kvm_userspace_memory_region::flags are internally
+ * used in kvm, other bits are visible for userspace which are defined in
+ * include/uapi/linux/kvm.h.
*/
-#define KVM_MEMSLOT_INVALID (1UL << 16)
+#define KVM_MEMSLOT_INVALID (1UL << 16)
+#define KVM_MEMSLOT_GMEM_ONLY (1UL << 17)
/*
* Bit 63 of the memslot generation number is an "update in-progress flag",
- * e.g. is temporarily set for the duration of install_new_memslots().
+ * e.g. is temporarily set for the duration of kvm_swap_active_memslots().
* This flag effectively creates a unique generation number that is used to
* mark cached memslot data, e.g. MMIO accesses, as potentially being stale,
* i.e. may (or may not) have come from the previous memslots generation.
@@ -72,8 +81,8 @@
/* Two fragments for cross MMIO pages. */
#define KVM_MAX_MMIO_FRAGMENTS 2
-#ifndef KVM_ADDRESS_SPACE_NUM
-#define KVM_ADDRESS_SPACE_NUM 1
+#ifndef KVM_MAX_NR_ADDRESS_SPACES
+#define KVM_MAX_NR_ADDRESS_SPACES 1
#endif
/*
@@ -88,6 +97,8 @@
#define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK)
#define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1)
#define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2)
+#define KVM_PFN_ERR_SIGPENDING (KVM_PFN_ERR_MASK + 3)
+#define KVM_PFN_ERR_NEEDS_IO (KVM_PFN_ERR_MASK + 4)
/*
* error pfns indicate that the gfn is in slot but faild to
@@ -99,6 +110,15 @@ static inline bool is_error_pfn(kvm_pfn_t pfn)
}
/*
+ * KVM_PFN_ERR_SIGPENDING indicates that fetching the PFN was interrupted
+ * by a pending signal. Note, the signal may or may not be fatal.
+ */
+static inline bool is_sigpending_pfn(kvm_pfn_t pfn)
+{
+ return pfn == KVM_PFN_ERR_SIGPENDING;
+}
+
+/*
* error_noslot pfns indicate that the gfn can not be
* translated to pfn - it is not in slot or failed to
* translate it to pfn.
@@ -130,25 +150,34 @@ static inline bool kvm_is_error_hva(unsigned long addr)
#endif
-#define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT))
-
-static inline bool is_error_page(struct page *page)
+static inline bool kvm_is_error_gpa(gpa_t gpa)
{
- return IS_ERR(page);
+ return gpa == INVALID_GPA;
}
#define KVM_REQUEST_MASK GENMASK(7,0)
#define KVM_REQUEST_NO_WAKEUP BIT(8)
#define KVM_REQUEST_WAIT BIT(9)
+#define KVM_REQUEST_NO_ACTION BIT(10)
/*
* Architecture-independent vcpu->requests bit members
- * Bits 4-7 are reserved for more arch-independent bits.
+ * Bits 3-7 are reserved for more arch-independent bits.
+ */
+#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_VM_DEAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_UNBLOCK 2
+#define KVM_REQ_DIRTY_RING_SOFT_FULL 3
+#define KVM_REQUEST_ARCH_BASE 8
+
+/*
+ * KVM_REQ_OUTSIDE_GUEST_MODE exists is purely as way to force the vCPU to
+ * OUTSIDE_GUEST_MODE. KVM_REQ_OUTSIDE_GUEST_MODE differs from a vCPU "kick"
+ * in that it ensures the vCPU has reached OUTSIDE_GUEST_MODE before continuing
+ * on. A kick only guarantees that the vCPU is on its way out, e.g. a previous
+ * kick may have set vcpu->mode to EXITING_GUEST_MODE, and so there's no
+ * guarantee the vCPU received an IPI and has actually exited guest mode.
*/
-#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
-#define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
-#define KVM_REQ_PENDING_TIMER 2
-#define KVM_REQ_UNHALT 3
-#define KVM_REQUEST_ARCH_BASE 8
+#define KVM_REQ_OUTSIDE_GUEST_MODE (KVM_REQUEST_NO_ACTION | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \
@@ -156,8 +185,13 @@ static inline bool is_error_page(struct page *page)
})
#define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0)
+bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
+ unsigned long *vcpu_bitmap);
+bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
+
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
+#define KVM_PIT_IRQ_SOURCE_ID 2
extern struct mutex kvm_lock;
extern struct list_head vm_list;
@@ -173,6 +207,7 @@ struct kvm_io_range {
struct kvm_io_bus {
int dev_count;
int ioeventfd_count;
+ struct rcu_head rcu;
struct kvm_io_range range[];
};
@@ -181,6 +216,7 @@ enum kvm_bus {
KVM_PIO_BUS,
KVM_VIRTIO_CCW_NOTIFY_BUS,
KVM_FAST_MMIO_BUS,
+ KVM_IOCSR_BUS,
KVM_NR_BUSES
};
@@ -203,7 +239,6 @@ struct kvm_async_pf {
struct list_head link;
struct list_head queue;
struct kvm_vcpu *vcpu;
- struct mm_struct *mm;
gpa_t cr2_or_gpa;
unsigned long addr;
struct kvm_arch_async_pf arch;
@@ -218,18 +253,28 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
#endif
-#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
+#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
+union kvm_mmu_notifier_arg {
+ unsigned long attributes;
+};
+
+enum kvm_gfn_range_filter {
+ KVM_FILTER_SHARED = BIT(0),
+ KVM_FILTER_PRIVATE = BIT(1),
+};
+
struct kvm_gfn_range {
struct kvm_memory_slot *slot;
gfn_t start;
gfn_t end;
- pte_t pte;
+ union kvm_mmu_notifier_arg arg;
+ enum kvm_gfn_range_filter attr_filter;
bool may_block;
+ bool lockless;
};
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
-bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
#endif
enum {
@@ -239,21 +284,19 @@ enum {
READING_SHADOW_PAGE_TABLES,
};
-#define KVM_UNMAPPED_PAGE ((void *) 0x500 + POISON_POINTER_DELTA)
-
struct kvm_host_map {
/*
* Only valid if the 'pfn' is managed by the host kernel (i.e. There is
* a 'struct page' for it. When using mem= kernel parameter some memory
* can be used as guest memory but they are not managed by host
* kernel).
- * If 'pfn' is not managed by the host kernel, this field is
- * initialized to KVM_UNMAPPED_PAGE.
*/
+ struct page *pinned_page;
struct page *page;
void *hva;
kvm_pfn_t pfn;
kvm_pfn_t gfn;
+ bool writable;
};
/*
@@ -265,6 +308,11 @@ static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
return !!map->hva;
}
+static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop)
+{
+ return single_task_running() && !need_resched() && ktime_before(cur, stop);
+}
+
/*
* Sometimes a large or cross-page mmio needs to be broken up into separate
* exits for userspace servicing.
@@ -282,23 +330,25 @@ struct kvm_vcpu {
#endif
int cpu;
int vcpu_id; /* id given by userspace at creation */
- int vcpu_idx; /* index in kvm->vcpus array */
- int srcu_idx;
+ int vcpu_idx; /* index into kvm->vcpu_array */
+ int ____srcu_idx; /* Don't use this directly. You've been warned. */
+#ifdef CONFIG_PROVE_RCU
+ int srcu_depth;
+#endif
int mode;
u64 requests;
unsigned long guest_debug;
- int pre_pcpu;
- struct list_head blocked_vcpu_list;
-
struct mutex mutex;
struct kvm_run *run;
+#ifndef __KVM_HAVE_ARCH_WQP
struct rcuwait wait;
- struct pid __rcu *pid;
+#endif
+ struct pid *pid;
+ rwlock_t pid_lock;
int sigset_active;
sigset_t sigset;
- struct kvm_vcpu_stat stat;
unsigned int halt_poll_ns;
bool valid_wakeup;
@@ -332,14 +382,30 @@ struct kvm_vcpu {
bool dy_eligible;
} spin_loop;
#endif
+ bool wants_to_run;
bool preempted;
bool ready;
+ bool scheduled_out;
struct kvm_vcpu_arch arch;
+ struct kvm_vcpu_stat stat;
+ char stats_id[KVM_STATS_NAME_SIZE];
struct kvm_dirty_ring dirty_ring;
+
+ /*
+ * The most recently used memslot by this vCPU and the slots generation
+ * for which it is valid.
+ * No wraparound protection is needed since generations won't overflow in
+ * thousands of years, even assuming 1M memslot operations per second.
+ */
+ struct kvm_memory_slot *last_used_slot;
+ u64 last_used_slot_gen;
};
-/* must be called with irqs disabled */
-static __always_inline void guest_enter_irqoff(void)
+/*
+ * Start accounting time towards a guest.
+ * Must be called before entering guest context.
+ */
+static __always_inline void guest_timing_enter_irqoff(void)
{
/*
* This is running in ioctl context so its safe to assume that it's the
@@ -348,7 +414,18 @@ static __always_inline void guest_enter_irqoff(void)
instrumentation_begin();
vtime_account_guest_enter();
instrumentation_end();
+}
+/*
+ * Enter guest context and enter an RCU extended quiescent state.
+ *
+ * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is
+ * unsafe to use any code which may directly or indirectly use RCU, tracing
+ * (including IRQ flag tracing), or lockdep. All code in this period must be
+ * non-instrumentable.
+ */
+static __always_inline void guest_context_enter_irqoff(void)
+{
/*
* KVM does not hold any references to rcu protected data when it
* switches CPU into a guest mode. In fact switching to a guest mode
@@ -359,21 +436,92 @@ static __always_inline void guest_enter_irqoff(void)
*/
if (!context_tracking_guest_enter()) {
instrumentation_begin();
- rcu_virt_note_context_switch(smp_processor_id());
+ rcu_virt_note_context_switch();
instrumentation_end();
}
}
-static __always_inline void guest_exit_irqoff(void)
+/*
+ * Deprecated. Architectures should move to guest_timing_enter_irqoff() and
+ * guest_state_enter_irqoff().
+ */
+static __always_inline void guest_enter_irqoff(void)
{
- context_tracking_guest_exit();
+ guest_timing_enter_irqoff();
+ guest_context_enter_irqoff();
+}
+
+/**
+ * guest_state_enter_irqoff - Fixup state when entering a guest
+ *
+ * Entry to a guest will enable interrupts, but the kernel state is interrupts
+ * disabled when this is invoked. Also tell RCU about it.
+ *
+ * 1) Trace interrupts on state
+ * 2) Invoke context tracking if enabled to adjust RCU state
+ * 3) Tell lockdep that interrupts are enabled
+ *
+ * Invoked from architecture specific code before entering a guest.
+ * Must be called with interrupts disabled and the caller must be
+ * non-instrumentable.
+ * The caller has to invoke guest_timing_enter_irqoff() before this.
+ *
+ * Note: this is analogous to exit_to_user_mode().
+ */
+static __always_inline void guest_state_enter_irqoff(void)
+{
+ instrumentation_begin();
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare();
+ instrumentation_end();
+
+ guest_context_enter_irqoff();
+ lockdep_hardirqs_on(CALLER_ADDR0);
+}
+
+/*
+ * Exit guest context and exit an RCU extended quiescent state.
+ *
+ * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is
+ * unsafe to use any code which may directly or indirectly use RCU, tracing
+ * (including IRQ flag tracing), or lockdep. All code in this period must be
+ * non-instrumentable.
+ */
+static __always_inline void guest_context_exit_irqoff(void)
+{
+ /*
+ * Guest mode is treated as a quiescent state, see
+ * guest_context_enter_irqoff() for more details.
+ */
+ if (!context_tracking_guest_exit()) {
+ instrumentation_begin();
+ rcu_virt_note_context_switch();
+ instrumentation_end();
+ }
+}
+/*
+ * Stop accounting time towards a guest.
+ * Must be called after exiting guest context.
+ */
+static __always_inline void guest_timing_exit_irqoff(void)
+{
instrumentation_begin();
/* Flush the guest cputime we spent on the guest */
vtime_account_guest_exit();
instrumentation_end();
}
+/*
+ * Deprecated. Architectures should move to guest_state_exit_irqoff() and
+ * guest_timing_exit_irqoff().
+ */
+static __always_inline void guest_exit_irqoff(void)
+{
+ guest_context_exit_irqoff();
+ guest_timing_exit_irqoff();
+}
+
static inline void guest_exit(void)
{
unsigned long flags;
@@ -383,6 +531,33 @@ static inline void guest_exit(void)
local_irq_restore(flags);
}
+/**
+ * guest_state_exit_irqoff - Establish state when returning from guest mode
+ *
+ * Entry from a guest disables interrupts, but guest mode is traced as
+ * interrupts enabled. Also with NO_HZ_FULL RCU might be idle.
+ *
+ * 1) Tell lockdep that interrupts are disabled
+ * 2) Invoke context tracking if enabled to reactivate RCU
+ * 3) Trace interrupts off state
+ *
+ * Invoked from architecture specific code after exiting a guest.
+ * Must be invoked with interrupts disabled and the caller must be
+ * non-instrumentable.
+ * The caller has to invoke guest_timing_exit_irqoff() after this.
+ *
+ * Note: this is analogous to enter_from_user_mode().
+ */
+static __always_inline void guest_state_exit_irqoff(void)
+{
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ guest_context_exit_irqoff();
+
+ instrumentation_begin();
+ trace_hardirqs_off_finish();
+ instrumentation_end();
+}
+
static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
{
/*
@@ -400,7 +575,26 @@ static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
*/
#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
+/*
+ * Since at idle each memslot belongs to two memslot sets it has to contain
+ * two embedded nodes for each data structure that it forms a part of.
+ *
+ * Two memslot sets (one active and one inactive) are necessary so the VM
+ * continues to run on one memslot set while the other is being modified.
+ *
+ * These two memslot sets normally point to the same set of memslots.
+ * They can, however, be desynchronized when performing a memslot management
+ * operation by replacing the memslot to be modified by its copy.
+ * After the operation is complete, both memslot sets once again point to
+ * the same, common set of memslot data.
+ *
+ * The memslots themselves are independent of each other so they can be
+ * individually added or deleted.
+ */
struct kvm_memory_slot {
+ struct hlist_node id_node[2];
+ struct interval_tree_node hva_node[2];
+ struct rb_node gfn_node[2];
gfn_t base_gfn;
unsigned long npages;
unsigned long *dirty_bitmap;
@@ -409,9 +603,26 @@ struct kvm_memory_slot {
u32 flags;
short id;
u16 as_id;
+
+#ifdef CONFIG_KVM_GUEST_MEMFD
+ struct {
+ /*
+ * Writes protected by kvm->slots_lock. Acquiring a
+ * reference via kvm_gmem_get_file() is protected by
+ * either kvm->slots_lock or kvm->srcu.
+ */
+ struct file *file;
+ pgoff_t pgoff;
+ } gmem;
+#endif
};
-static inline bool kvm_slot_dirty_track_enabled(struct kvm_memory_slot *slot)
+static inline bool kvm_slot_has_gmem(const struct kvm_memory_slot *slot)
+{
+ return slot && (slot->flags & KVM_MEM_GUEST_MEMFD);
+}
+
+static inline bool kvm_slot_dirty_track_enabled(const struct kvm_memory_slot *slot)
{
return slot->flags & KVM_MEM_LOG_DIRTY_PAGES;
}
@@ -445,6 +656,13 @@ struct kvm_hv_sint {
u32 sint;
};
+struct kvm_xen_evtchn {
+ u32 port;
+ u32 vcpu_id;
+ int vcpu_idx;
+ u32 priority;
+};
+
struct kvm_kernel_irq_routing_entry {
u32 gsi;
u32 type;
@@ -465,6 +683,7 @@ struct kvm_kernel_irq_routing_entry {
} msi;
struct kvm_s390_adapter_int adapter;
struct kvm_hv_sint hv_sint;
+ struct kvm_xen_evtchn xen_evtchn;
};
struct hlist_node link;
};
@@ -477,36 +696,74 @@ struct kvm_irq_routing_table {
* Array indexed by gsi. Each entry contains list of irq chips
* the gsi is connected to.
*/
- struct hlist_head map[];
+ struct hlist_head map[] __counted_by(nr_rt_entries);
};
#endif
-#ifndef KVM_PRIVATE_MEM_SLOTS
-#define KVM_PRIVATE_MEM_SLOTS 0
+bool kvm_arch_irqchip_in_kernel(struct kvm *kvm);
+
+#ifndef KVM_INTERNAL_MEM_SLOTS
+#define KVM_INTERNAL_MEM_SLOTS 0
#endif
#define KVM_MEM_SLOTS_NUM SHRT_MAX
-#define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_PRIVATE_MEM_SLOTS)
+#define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_INTERNAL_MEM_SLOTS)
+
+#if KVM_MAX_NR_ADDRESS_SPACES == 1
+static inline int kvm_arch_nr_memslot_as_ids(struct kvm *kvm)
+{
+ return KVM_MAX_NR_ADDRESS_SPACES;
+}
-#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
{
return 0;
}
#endif
-/*
- * Note:
- * memslots are not sorted by id anymore, please use id_to_memslot()
- * to get the memslot by its id.
- */
+#ifndef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+static inline bool kvm_arch_has_private_mem(struct kvm *kvm)
+{
+ return false;
+}
+#endif
+
+#ifdef CONFIG_KVM_GUEST_MEMFD
+bool kvm_arch_supports_gmem_init_shared(struct kvm *kvm);
+
+static inline u64 kvm_gmem_get_supported_flags(struct kvm *kvm)
+{
+ u64 flags = GUEST_MEMFD_FLAG_MMAP;
+
+ if (!kvm || kvm_arch_supports_gmem_init_shared(kvm))
+ flags |= GUEST_MEMFD_FLAG_INIT_SHARED;
+
+ return flags;
+}
+#endif
+
+#ifndef kvm_arch_has_readonly_mem
+static inline bool kvm_arch_has_readonly_mem(struct kvm *kvm)
+{
+ return IS_ENABLED(CONFIG_HAVE_KVM_READONLY_MEM);
+}
+#endif
+
struct kvm_memslots {
u64 generation;
- /* The mapping table from slot id to the index in memslots[]. */
- short id_to_index[KVM_MEM_SLOTS_NUM];
- atomic_t lru_slot;
- int used_slots;
- struct kvm_memory_slot memslots[];
+ atomic_long_t last_used_slot;
+ struct rb_root_cached hva_tree;
+ struct rb_root gfn_tree;
+ /*
+ * The mapping table from slot id to memslot.
+ *
+ * 7-bit bucket count matches the size of the old id to index array for
+ * 512 slots, while giving good performance with this slot count.
+ * Higher bucket counts bring only small performance improvements but
+ * always result in higher memory usage (even for lower memslot counts).
+ */
+ DECLARE_HASHTABLE(id_hash, 7);
+ int node_idx;
};
struct kvm {
@@ -517,9 +774,36 @@ struct kvm {
#endif /* KVM_HAVE_MMU_RWLOCK */
struct mutex slots_lock;
+
+ /*
+ * Protects the arch-specific fields of struct kvm_memory_slots in
+ * use by the VM. To be used under the slots_lock (above) or in a
+ * kvm->srcu critical section where acquiring the slots_lock would
+ * lead to deadlock with the synchronize_srcu in
+ * kvm_swap_active_memslots().
+ */
+ struct mutex slots_arch_lock;
struct mm_struct *mm; /* userspace tied to this vm */
- struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
- struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
+ unsigned long nr_memslot_pages;
+ /* The two memslot sets - active and inactive (per address space) */
+ struct kvm_memslots __memslots[KVM_MAX_NR_ADDRESS_SPACES][2];
+ /* The current active memslot set for each address space */
+ struct kvm_memslots __rcu *memslots[KVM_MAX_NR_ADDRESS_SPACES];
+ struct xarray vcpu_array;
+ /*
+ * Protected by slots_lock, but can be read outside if an
+ * incorrect answer is acceptable.
+ */
+ atomic_t nr_memslots_dirty_logging;
+
+ /* Used to wait for completion of MMU notifiers. */
+ spinlock_t mn_invalidate_lock;
+ unsigned long mn_active_invalidate_count;
+ struct rcuwait mn_memslots_update_rcuwait;
+
+ /* For management / invalidation of gfn_to_pfn_caches */
+ spinlock_t gpc_lock;
+ struct list_head gpc_list;
/*
* created_vcpus is protected by kvm->lock, and is incremented
@@ -528,20 +812,22 @@ struct kvm {
* and is accessed atomically.
*/
atomic_t online_vcpus;
+ int max_vcpus;
int created_vcpus;
int last_boosted_vcpu;
struct list_head vm_list;
struct mutex lock;
struct kvm_io_bus __rcu *buses[KVM_NR_BUSES];
-#ifdef CONFIG_HAVE_KVM_EVENTFD
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
struct {
spinlock_t lock;
struct list_head items;
+ /* resampler_list update side is protected by resampler_lock. */
struct list_head resampler_list;
struct mutex resampler_lock;
} irqfds;
- struct list_head ioeventfds;
#endif
+ struct list_head ioeventfds;
struct kvm_vm_stat stat;
struct kvm_arch arch;
refcount_t users_count;
@@ -557,19 +843,17 @@ struct kvm {
* Update side is protected by irq_lock.
*/
struct kvm_irq_routing_table __rcu *irq_routing;
-#endif
-#ifdef CONFIG_HAVE_KVM_IRQFD
+
struct hlist_head irq_ack_notifier_list;
#endif
-#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
struct mmu_notifier mmu_notifier;
- unsigned long mmu_notifier_seq;
- long mmu_notifier_count;
- unsigned long mmu_notifier_range_start;
- unsigned long mmu_notifier_range_end;
+ unsigned long mmu_invalidate_seq;
+ long mmu_invalidate_in_progress;
+ gfn_t mmu_invalidate_range_start;
+ gfn_t mmu_invalidate_range_end;
#endif
- long tlbs_dirty;
struct list_head devices;
u64 manual_dirty_log_protect;
struct dentry *debugfs_dentry;
@@ -577,8 +861,21 @@ struct kvm {
struct srcu_struct srcu;
struct srcu_struct irq_srcu;
pid_t userspace_pid;
+ bool override_halt_poll_ns;
unsigned int max_halt_poll_ns;
u32 dirty_ring_size;
+ bool dirty_ring_with_bitmap;
+ bool vm_bugged;
+ bool vm_dead;
+
+#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
+ struct notifier_block pm_notifier;
+#endif
+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+ /* Protected by slots_lock (for writes) and RCU (for reads) */
+ struct xarray mem_attr_array;
+#endif
+ char stats_id[KVM_STATS_NAME_SIZE];
};
#define kvm_err(fmt, ...) \
@@ -607,38 +904,119 @@ struct kvm {
#define vcpu_err(vcpu, fmt, ...) \
kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
+static inline void kvm_vm_dead(struct kvm *kvm)
+{
+ kvm->vm_dead = true;
+ kvm_make_all_cpus_request(kvm, KVM_REQ_VM_DEAD);
+}
+
+static inline void kvm_vm_bugged(struct kvm *kvm)
+{
+ kvm->vm_bugged = true;
+ kvm_vm_dead(kvm);
+}
+
+
+#define KVM_BUG(cond, kvm, fmt...) \
+({ \
+ bool __ret = !!(cond); \
+ \
+ if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt)) \
+ kvm_vm_bugged(kvm); \
+ unlikely(__ret); \
+})
+
+#define KVM_BUG_ON(cond, kvm) \
+({ \
+ bool __ret = !!(cond); \
+ \
+ if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
+ kvm_vm_bugged(kvm); \
+ unlikely(__ret); \
+})
+
+/*
+ * Note, "data corruption" refers to corruption of host kernel data structures,
+ * not guest data. Guest data corruption, suspected or confirmed, that is tied
+ * and contained to a single VM should *never* BUG() and potentially panic the
+ * host, i.e. use this variant of KVM_BUG() if and only if a KVM data structure
+ * is corrupted and that corruption can have a cascading effect to other parts
+ * of the hosts and/or to other VMs.
+ */
+#define KVM_BUG_ON_DATA_CORRUPTION(cond, kvm) \
+({ \
+ bool __ret = !!(cond); \
+ \
+ if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) \
+ BUG_ON(__ret); \
+ else if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
+ kvm_vm_bugged(kvm); \
+ unlikely(__ret); \
+})
+
+static inline void kvm_vcpu_srcu_read_lock(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_PROVE_RCU
+ WARN_ONCE(vcpu->srcu_depth++,
+ "KVM: Illegal vCPU srcu_idx LOCK, depth=%d", vcpu->srcu_depth - 1);
+#endif
+ vcpu->____srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+}
+
+static inline void kvm_vcpu_srcu_read_unlock(struct kvm_vcpu *vcpu)
+{
+ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->____srcu_idx);
+
+#ifdef CONFIG_PROVE_RCU
+ WARN_ONCE(--vcpu->srcu_depth,
+ "KVM: Illegal vCPU srcu_idx UNLOCK, depth=%d", vcpu->srcu_depth);
+#endif
+}
+
static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm)
{
return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET);
}
+/*
+ * Get a bus reference under the update-side lock. No long-term SRCU reader
+ * references are permitted, to avoid stale reads vs concurrent IO
+ * registrations.
+ */
static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
{
- return srcu_dereference_check(kvm->buses[idx], &kvm->srcu,
- lockdep_is_held(&kvm->slots_lock) ||
- !refcount_read(&kvm->users_count));
+ return rcu_dereference_protected(kvm->buses[idx],
+ lockdep_is_held(&kvm->slots_lock));
}
static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
{
int num_vcpus = atomic_read(&kvm->online_vcpus);
+
+ /*
+ * Explicitly verify the target vCPU is online, as the anti-speculation
+ * logic only limits the CPU's ability to speculate, e.g. given a "bad"
+ * index, clamping the index to 0 would return vCPU0, not NULL.
+ */
+ if (i >= num_vcpus)
+ return NULL;
+
i = array_index_nospec(i, num_vcpus);
/* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */
smp_rmb();
- return kvm->vcpus[i];
+ return xa_load(&kvm->vcpu_array, i);
}
-#define kvm_for_each_vcpu(idx, vcpup, kvm) \
- for (idx = 0; \
- idx < atomic_read(&kvm->online_vcpus) && \
- (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
- idx++)
+#define kvm_for_each_vcpu(idx, vcpup, kvm) \
+ if (atomic_read(&kvm->online_vcpus)) \
+ xa_for_each_range(&kvm->vcpu_array, idx, vcpup, 0, \
+ (atomic_read(&kvm->online_vcpus) - 1))
static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
{
struct kvm_vcpu *vcpu = NULL;
- int i;
+ unsigned long i;
if (id < 0)
return NULL;
@@ -652,35 +1030,24 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
return NULL;
}
-static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
-{
- return vcpu->vcpu_idx;
-}
-
-#define kvm_for_each_memslot(memslot, slots) \
- for (memslot = &slots->memslots[0]; \
- memslot < slots->memslots + slots->used_slots; memslot++) \
- if (WARN_ON_ONCE(!memslot->npages)) { \
- } else
+void kvm_destroy_vcpus(struct kvm *kvm);
-void kvm_vcpu_destroy(struct kvm_vcpu *vcpu);
+int kvm_trylock_all_vcpus(struct kvm *kvm);
+int kvm_lock_all_vcpus(struct kvm *kvm);
+void kvm_unlock_all_vcpus(struct kvm *kvm);
void vcpu_load(struct kvm_vcpu *vcpu);
void vcpu_put(struct kvm_vcpu *vcpu);
-#ifdef __KVM_HAVE_IOAPIC
+#ifdef CONFIG_KVM_IOAPIC
void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm);
-void kvm_arch_post_irq_routing_update(struct kvm *kvm);
#else
static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
{
}
-static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm)
-{
-}
#endif
-#ifdef CONFIG_HAVE_KVM_IRQFD
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
int kvm_irqfd_init(void);
void kvm_irqfd_exit(void);
#else
@@ -693,18 +1060,18 @@ static inline void kvm_irqfd_exit(void)
{
}
#endif
-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
- struct module *module);
+int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module);
void kvm_exit(void);
void kvm_get_kvm(struct kvm *kvm);
+bool kvm_get_kvm_safe(struct kvm *kvm);
void kvm_put_kvm(struct kvm *kvm);
bool file_is_kvm(struct file *file);
void kvm_put_kvm_no_destroy(struct kvm *kvm);
static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
{
- as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM);
+ as_id = array_index_nospec(as_id, KVM_MAX_NR_ADDRESS_SPACES);
return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
lockdep_is_held(&kvm->slots_lock) ||
!refcount_read(&kvm->users_count));
@@ -722,21 +1089,130 @@ static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
return __kvm_memslots(vcpu->kvm, as_id);
}
+static inline bool kvm_memslots_empty(struct kvm_memslots *slots)
+{
+ return RB_EMPTY_ROOT(&slots->gfn_tree);
+}
+
+bool kvm_are_all_memslots_empty(struct kvm *kvm);
+
+#define kvm_for_each_memslot(memslot, bkt, slots) \
+ hash_for_each(slots->id_hash, bkt, memslot, id_node[slots->node_idx]) \
+ if (WARN_ON_ONCE(!memslot->npages)) { \
+ } else
+
static inline
struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id)
{
- int index = slots->id_to_index[id];
struct kvm_memory_slot *slot;
+ int idx = slots->node_idx;
- if (index < 0)
- return NULL;
+ hash_for_each_possible(slots->id_hash, slot, id_node[idx], id) {
+ if (slot->id == id)
+ return slot;
+ }
+
+ return NULL;
+}
+
+/* Iterator used for walking memslots that overlap a gfn range. */
+struct kvm_memslot_iter {
+ struct kvm_memslots *slots;
+ struct rb_node *node;
+ struct kvm_memory_slot *slot;
+};
+
+static inline void kvm_memslot_iter_next(struct kvm_memslot_iter *iter)
+{
+ iter->node = rb_next(iter->node);
+ if (!iter->node)
+ return;
+
+ iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[iter->slots->node_idx]);
+}
+
+static inline void kvm_memslot_iter_start(struct kvm_memslot_iter *iter,
+ struct kvm_memslots *slots,
+ gfn_t start)
+{
+ int idx = slots->node_idx;
+ struct rb_node *tmp;
+ struct kvm_memory_slot *slot;
- slot = &slots->memslots[index];
+ iter->slots = slots;
+
+ /*
+ * Find the so called "upper bound" of a key - the first node that has
+ * its key strictly greater than the searched one (the start gfn in our case).
+ */
+ iter->node = NULL;
+ for (tmp = slots->gfn_tree.rb_node; tmp; ) {
+ slot = container_of(tmp, struct kvm_memory_slot, gfn_node[idx]);
+ if (start < slot->base_gfn) {
+ iter->node = tmp;
+ tmp = tmp->rb_left;
+ } else {
+ tmp = tmp->rb_right;
+ }
+ }
+
+ /*
+ * Find the slot with the lowest gfn that can possibly intersect with
+ * the range, so we'll ideally have slot start <= range start
+ */
+ if (iter->node) {
+ /*
+ * A NULL previous node means that the very first slot
+ * already has a higher start gfn.
+ * In this case slot start > range start.
+ */
+ tmp = rb_prev(iter->node);
+ if (tmp)
+ iter->node = tmp;
+ } else {
+ /* a NULL node below means no slots */
+ iter->node = rb_last(&slots->gfn_tree);
+ }
+
+ if (iter->node) {
+ iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[idx]);
+
+ /*
+ * It is possible in the slot start < range start case that the
+ * found slot ends before or at range start (slot end <= range start)
+ * and so it does not overlap the requested range.
+ *
+ * In such non-overlapping case the next slot (if it exists) will
+ * already have slot start > range start, otherwise the logic above
+ * would have found it instead of the current slot.
+ */
+ if (iter->slot->base_gfn + iter->slot->npages <= start)
+ kvm_memslot_iter_next(iter);
+ }
+}
+
+static inline bool kvm_memslot_iter_is_valid(struct kvm_memslot_iter *iter, gfn_t end)
+{
+ if (!iter->node)
+ return false;
- WARN_ON(slot->id != id);
- return slot;
+ /*
+ * If this slot starts beyond or at the end of the range so does
+ * every next one
+ */
+ return iter->slot->base_gfn < end;
}
+/* Iterate over each memslot at least partially intersecting [start, end) range */
+#define kvm_for_each_memslot_in_gfn_range(iter, slots, start, end) \
+ for (kvm_memslot_iter_start(iter, slots, start); \
+ kvm_memslot_iter_is_valid(iter, end); \
+ kvm_memslot_iter_next(iter))
+
+struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
+struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
+struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
+
/*
* KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
* - create a new memory slot
@@ -746,7 +1222,7 @@ struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id)
* -- just change its flags
*
* Since flags can be changed by some of these operations, the following
- * differentiation is the best we can do for __kvm_set_memory_region():
+ * differentiation is the best we can do for kvm_set_memory_region():
*/
enum kvm_mr_change {
KVM_MR_CREATE,
@@ -755,18 +1231,15 @@ enum kvm_mr_change {
KVM_MR_FLAGS_ONLY,
};
-int kvm_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem);
-int __kvm_set_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem);
+int kvm_set_internal_memslot(struct kvm *kvm,
+ const struct kvm_userspace_memory_region2 *mem);
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
enum kvm_mr_change change);
void kvm_arch_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change);
@@ -776,35 +1249,70 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm);
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot);
-int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
- struct page **pages, int nr_pages);
+int kvm_prefetch_pages(struct kvm_memory_slot *slot, gfn_t gfn,
+ struct page **pages, int nr_pages);
+
+struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn, bool write);
+static inline struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
+{
+ return __gfn_to_page(kvm, gfn, true);
+}
-struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
bool *writable);
+
+static inline void kvm_release_page_unused(struct page *page)
+{
+ if (!page)
+ return;
+
+ put_page(page);
+}
+
void kvm_release_page_clean(struct page *page);
void kvm_release_page_dirty(struct page *page);
-void kvm_set_page_accessed(struct page *page);
-
-kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
-kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
- bool *writable);
-kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
-kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
-kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
- bool atomic, bool *async, bool write_fault,
- bool *writable, hva_t *hva);
-
-void kvm_release_pfn_clean(kvm_pfn_t pfn);
-void kvm_release_pfn_dirty(kvm_pfn_t pfn);
-void kvm_set_pfn_dirty(kvm_pfn_t pfn);
-void kvm_set_pfn_accessed(kvm_pfn_t pfn);
-void kvm_get_pfn(kvm_pfn_t pfn);
-
-void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache);
+
+static inline void kvm_release_faultin_page(struct kvm *kvm, struct page *page,
+ bool unused, bool dirty)
+{
+ lockdep_assert_once(lockdep_is_held(&kvm->mmu_lock) || unused);
+
+ if (!page)
+ return;
+
+ /*
+ * If the page that KVM got from the *primary MMU* is writable, and KVM
+ * installed or reused a SPTE, mark the page/folio dirty. Note, this
+ * may mark a folio dirty even if KVM created a read-only SPTE, e.g. if
+ * the GFN is write-protected. Folios can't be safely marked dirty
+ * outside of mmu_lock as doing so could race with writeback on the
+ * folio. As a result, KVM can't mark folios dirty in the fast page
+ * fault handler, and so KVM must (somewhat) speculatively mark the
+ * folio dirty if KVM could locklessly make the SPTE writable.
+ */
+ if (unused)
+ kvm_release_page_unused(page);
+ else if (dirty)
+ kvm_release_page_dirty(page);
+ else
+ kvm_release_page_clean(page);
+}
+
+kvm_pfn_t __kvm_faultin_pfn(const struct kvm_memory_slot *slot, gfn_t gfn,
+ unsigned int foll, bool *writable,
+ struct page **refcounted_page);
+
+static inline kvm_pfn_t kvm_faultin_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
+ bool write, bool *writable,
+ struct page **refcounted_page)
+{
+ return __kvm_faultin_pfn(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn,
+ write ? FOLL_WRITE : 0, writable, refcounted_page);
+}
+
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
int len);
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
@@ -868,24 +1376,28 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
})
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
-struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
-void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, gfn_t gfn);
+void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
-struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
-struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
-kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
-kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
-int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
-int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
- struct gfn_to_pfn_cache *cache, bool atomic);
-struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
-void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
-int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
- struct gfn_to_pfn_cache *cache, bool dirty, bool atomic);
+int __kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map,
+ bool writable);
+void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map);
+
+static inline int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa,
+ struct kvm_host_map *map)
+{
+ return __kvm_vcpu_map(vcpu, gpa, map, true);
+}
+
+static inline int kvm_vcpu_map_readonly(struct kvm_vcpu *vcpu, gpa_t gpa,
+ struct kvm_host_map *map)
+{
+ return __kvm_vcpu_map(vcpu, gpa, map, false);
+}
+
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
@@ -900,40 +1412,153 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
unsigned long len);
void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
+/**
+ * kvm_gpc_init - initialize gfn_to_pfn_cache.
+ *
+ * @gpc: struct gfn_to_pfn_cache object.
+ * @kvm: pointer to kvm instance.
+ *
+ * This sets up a gfn_to_pfn_cache by initializing locks and assigning the
+ * immutable attributes. Note, the cache must be zero-allocated (or zeroed by
+ * the caller before init).
+ */
+void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm);
+
+/**
+ * kvm_gpc_activate - prepare a cached kernel mapping and HPA for a given guest
+ * physical address.
+ *
+ * @gpc: struct gfn_to_pfn_cache object.
+ * @gpa: guest physical address to map.
+ * @len: sanity check; the range being access must fit a single page.
+ *
+ * @return: 0 for success.
+ * -EINVAL for a mapping which would cross a page boundary.
+ * -EFAULT for an untranslatable guest physical address.
+ *
+ * This primes a gfn_to_pfn_cache and links it into the @gpc->kvm's list for
+ * invalidations to be processed. Callers are required to use kvm_gpc_check()
+ * to ensure that the cache is valid before accessing the target page.
+ */
+int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len);
+
+/**
+ * kvm_gpc_activate_hva - prepare a cached kernel mapping and HPA for a given HVA.
+ *
+ * @gpc: struct gfn_to_pfn_cache object.
+ * @hva: userspace virtual address to map.
+ * @len: sanity check; the range being access must fit a single page.
+ *
+ * @return: 0 for success.
+ * -EINVAL for a mapping which would cross a page boundary.
+ * -EFAULT for an untranslatable guest physical address.
+ *
+ * The semantics of this function are the same as those of kvm_gpc_activate(). It
+ * merely bypasses a layer of address translation.
+ */
+int kvm_gpc_activate_hva(struct gfn_to_pfn_cache *gpc, unsigned long hva, unsigned long len);
+
+/**
+ * kvm_gpc_check - check validity of a gfn_to_pfn_cache.
+ *
+ * @gpc: struct gfn_to_pfn_cache object.
+ * @len: sanity check; the range being access must fit a single page.
+ *
+ * @return: %true if the cache is still valid and the address matches.
+ * %false if the cache is not valid.
+ *
+ * Callers outside IN_GUEST_MODE context should hold a read lock on @gpc->lock
+ * while calling this function, and then continue to hold the lock until the
+ * access is complete.
+ *
+ * Callers in IN_GUEST_MODE may do so without locking, although they should
+ * still hold a read lock on kvm->scru for the memslot checks.
+ */
+bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len);
+
+/**
+ * kvm_gpc_refresh - update a previously initialized cache.
+ *
+ * @gpc: struct gfn_to_pfn_cache object.
+ * @len: sanity check; the range being access must fit a single page.
+ *
+ * @return: 0 for success.
+ * -EINVAL for a mapping which would cross a page boundary.
+ * -EFAULT for an untranslatable guest physical address.
+ *
+ * This will attempt to refresh a gfn_to_pfn_cache. Note that a successful
+ * return from this function does not mean the page can be immediately
+ * accessed because it may have raced with an invalidation. Callers must
+ * still lock and check the cache status, as this function does not return
+ * with the lock still held to permit access.
+ */
+int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len);
+
+/**
+ * kvm_gpc_deactivate - deactivate and unlink a gfn_to_pfn_cache.
+ *
+ * @gpc: struct gfn_to_pfn_cache object.
+ *
+ * This removes a cache from the VM's list to be processed on MMU notifier
+ * invocation.
+ */
+void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc);
+
+static inline bool kvm_gpc_is_gpa_active(struct gfn_to_pfn_cache *gpc)
+{
+ return gpc->active && !kvm_is_error_gpa(gpc->gpa);
+}
+
+static inline bool kvm_gpc_is_hva_active(struct gfn_to_pfn_cache *gpc)
+{
+ return gpc->active && kvm_is_error_gpa(gpc->gpa);
+}
+
void kvm_sigset_activate(struct kvm_vcpu *vcpu);
void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
-void kvm_vcpu_block(struct kvm_vcpu *vcpu);
+void kvm_vcpu_halt(struct kvm_vcpu *vcpu);
+bool kvm_vcpu_block(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
-void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
+
+#ifndef CONFIG_S390
+void __kvm_vcpu_kick(struct kvm_vcpu *vcpu, bool wait);
+
+static inline void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
+{
+ __kvm_vcpu_kick(vcpu, false);
+}
+#endif
+
int kvm_vcpu_yield_to(struct kvm_vcpu *target);
-void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible);
+void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool yield_to_kernel_mode);
void kvm_flush_remote_tlbs(struct kvm *kvm);
-void kvm_reload_remote_mmus(struct kvm *kvm);
+void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages);
+void kvm_flush_remote_tlbs_memslot(struct kvm *kvm,
+ const struct kvm_memory_slot *memslot);
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min);
+int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min);
int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc);
void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc);
void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
#endif
-bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
- struct kvm_vcpu *except,
- unsigned long *vcpu_bitmap, cpumask_var_t tmp);
-bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
-bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
- struct kvm_vcpu *except);
-bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req,
- unsigned long *vcpu_bitmap);
+void kvm_mmu_invalidate_begin(struct kvm *kvm);
+void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end);
+void kvm_mmu_invalidate_end(struct kvm *kvm);
+bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
+long kvm_arch_vcpu_unlocked_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg);
vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
@@ -944,10 +1569,7 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
unsigned long mask);
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot);
-#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
-void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
- const struct kvm_memory_slot *memslot);
-#else /* !CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
+#ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
int *is_dirty, struct kvm_memory_slot **memslot);
@@ -957,8 +1579,9 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
bool line_status);
int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
struct kvm_enable_cap *cap);
-long kvm_arch_vm_ioctl(struct file *filp,
- unsigned int ioctl, unsigned long arg);
+int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg);
+long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
+ unsigned long arg);
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
@@ -980,11 +1603,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg);
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu);
-int kvm_arch_init(void *opaque);
-void kvm_arch_exit(void);
-
-void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
-
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id);
@@ -992,22 +1610,43 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
+#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
+int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state);
+#endif
+
#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry);
+#else
+static inline void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) {}
#endif
-int kvm_arch_hardware_enable(void);
-void kvm_arch_hardware_disable(void);
-int kvm_arch_hardware_setup(void *opaque);
-void kvm_arch_hardware_unsetup(void);
-int kvm_arch_check_processor_compat(void *opaque);
+#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
+/*
+ * kvm_arch_{enable,disable}_virtualization() are called on one CPU, under
+ * kvm_usage_lock, immediately after/before 0=>1 and 1=>0 transitions of
+ * kvm_usage_count, i.e. at the beginning of the generic hardware enabling
+ * sequence, and at the end of the generic hardware disabling sequence.
+ */
+void kvm_arch_enable_virtualization(void);
+void kvm_arch_disable_virtualization(void);
+/*
+ * kvm_arch_{enable,disable}_virtualization_cpu() are called on "every" CPU to
+ * do the actual twiddling of hardware bits. The hooks are called on all
+ * online CPUs when KVM enables/disabled virtualization, and on a single CPU
+ * when that CPU is onlined/offlined (including for Resume/Suspend).
+ */
+int kvm_arch_enable_virtualization_cpu(void);
+void kvm_arch_disable_virtualization_cpu(void);
+#endif
+bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu);
-int kvm_arch_post_init_vm(struct kvm *kvm);
+bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu);
void kvm_arch_pre_destroy_vm(struct kvm *kvm);
+void kvm_arch_create_vm_debugfs(struct kvm *kvm);
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
/*
@@ -1016,20 +1655,39 @@ void kvm_arch_pre_destroy_vm(struct kvm *kvm);
*/
static inline struct kvm *kvm_arch_alloc_vm(void)
{
- return kzalloc(sizeof(struct kvm), GFP_KERNEL);
+ return kzalloc(sizeof(struct kvm), GFP_KERNEL_ACCOUNT);
+}
+#endif
+
+static inline void __kvm_arch_free_vm(struct kvm *kvm)
+{
+ kvfree(kvm);
}
+#ifndef __KVM_HAVE_ARCH_VM_FREE
static inline void kvm_arch_free_vm(struct kvm *kvm)
{
- kfree(kvm);
+ __kvm_arch_free_vm(kvm);
}
#endif
-#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
-static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
+#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
+static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
{
return -ENOTSUPP;
}
+#else
+int kvm_arch_flush_remote_tlbs(struct kvm *kvm);
+#endif
+
+#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
+static inline int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm,
+ gfn_t gfn, u64 nr_pages)
+{
+ return -EOPNOTSUPP;
+}
+#else
+int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages);
#endif
#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
@@ -1050,24 +1708,6 @@ static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
return false;
}
#endif
-#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
-void kvm_arch_start_assignment(struct kvm *kvm);
-void kvm_arch_end_assignment(struct kvm *kvm);
-bool kvm_arch_has_assigned_device(struct kvm *kvm);
-#else
-static inline void kvm_arch_start_assignment(struct kvm *kvm)
-{
-}
-
-static inline void kvm_arch_end_assignment(struct kvm *kvm)
-{
-}
-
-static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
-{
- return false;
-}
-#endif
static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu)
{
@@ -1078,6 +1718,20 @@ static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu)
#endif
}
+/*
+ * Wake a vCPU if necessary, but don't do any stats/metadata updates. Returns
+ * true if the vCPU was blocking and was awakened, false otherwise.
+ */
+static inline bool __kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
+{
+ return !!rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu));
+}
+
+static inline bool kvm_vcpu_is_blocking(struct kvm_vcpu *vcpu)
+{
+ return rcuwait_active(kvm_arch_vcpu_get_wait(vcpu));
+}
+
#ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED
/*
* returns true if the virtual interrupt controller is initialized and
@@ -1092,16 +1746,21 @@ static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
}
#endif
+#ifdef CONFIG_GUEST_PERF_EVENTS
+unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu);
+
+void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void));
+void kvm_unregister_perf_callbacks(void);
+#else
+static inline void kvm_register_perf_callbacks(void *ign) {}
+static inline void kvm_unregister_perf_callbacks(void) {}
+#endif /* CONFIG_GUEST_PERF_EVENTS */
+
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
void kvm_arch_destroy_vm(struct kvm *kvm);
-void kvm_arch_sync_events(struct kvm *kvm);
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
-bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
-bool kvm_is_zone_device_pfn(kvm_pfn_t pfn);
-bool kvm_is_transparent_hugepage(kvm_pfn_t pfn);
-
struct kvm_irq_ack_notifier {
struct hlist_node link;
unsigned gsi;
@@ -1126,60 +1785,94 @@ void kvm_register_irq_ack_notifier(struct kvm *kvm,
struct kvm_irq_ack_notifier *kian);
void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
struct kvm_irq_ack_notifier *kian);
-int kvm_request_irq_source_id(struct kvm *kvm);
-void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
/*
- * search_memslots() and __gfn_to_memslot() are here because they are
- * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
- * gfn_to_memslot() itself isn't here as an inline because that would
- * bloat other code too much.
- *
- * IMPORTANT: Slots are sorted from highest GFN to lowest GFN!
+ * Returns a pointer to the memslot if it contains gfn.
+ * Otherwise returns NULL.
*/
static inline struct kvm_memory_slot *
-search_memslots(struct kvm_memslots *slots, gfn_t gfn)
+try_get_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
{
- int start = 0, end = slots->used_slots;
- int slot = atomic_read(&slots->lru_slot);
- struct kvm_memory_slot *memslots = slots->memslots;
+ if (!slot)
+ return NULL;
- if (unlikely(!slots->used_slots))
+ if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages)
+ return slot;
+ else
return NULL;
+}
- if (gfn >= memslots[slot].base_gfn &&
- gfn < memslots[slot].base_gfn + memslots[slot].npages)
- return &memslots[slot];
+/*
+ * Returns a pointer to the memslot that contains gfn. Otherwise returns NULL.
+ *
+ * With "approx" set returns the memslot also when the address falls
+ * in a hole. In that case one of the memslots bordering the hole is
+ * returned.
+ */
+static inline struct kvm_memory_slot *
+search_memslots(struct kvm_memslots *slots, gfn_t gfn, bool approx)
+{
+ struct kvm_memory_slot *slot;
+ struct rb_node *node;
+ int idx = slots->node_idx;
+
+ slot = NULL;
+ for (node = slots->gfn_tree.rb_node; node; ) {
+ slot = container_of(node, struct kvm_memory_slot, gfn_node[idx]);
+ if (gfn >= slot->base_gfn) {
+ if (gfn < slot->base_gfn + slot->npages)
+ return slot;
+ node = node->rb_right;
+ } else
+ node = node->rb_left;
+ }
- while (start < end) {
- slot = start + (end - start) / 2;
+ return approx ? slot : NULL;
+}
- if (gfn >= memslots[slot].base_gfn)
- end = slot;
- else
- start = slot + 1;
- }
+static inline struct kvm_memory_slot *
+____gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn, bool approx)
+{
+ struct kvm_memory_slot *slot;
- if (start < slots->used_slots && gfn >= memslots[start].base_gfn &&
- gfn < memslots[start].base_gfn + memslots[start].npages) {
- atomic_set(&slots->lru_slot, start);
- return &memslots[start];
+ slot = (struct kvm_memory_slot *)atomic_long_read(&slots->last_used_slot);
+ slot = try_get_memslot(slot, gfn);
+ if (slot)
+ return slot;
+
+ slot = search_memslots(slots, gfn, approx);
+ if (slot) {
+ atomic_long_set(&slots->last_used_slot, (unsigned long)slot);
+ return slot;
}
return NULL;
}
+/*
+ * __gfn_to_memslot() and its descendants are here to allow arch code to inline
+ * the lookups in hot paths. gfn_to_memslot() itself isn't here as an inline
+ * because that would bloat other code too much.
+ */
static inline struct kvm_memory_slot *
__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
{
- return search_memslots(slots, gfn);
+ return ____gfn_to_memslot(slots, gfn, false);
}
static inline unsigned long
__gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
{
- return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
+ /*
+ * The index was checked originally in search_memslots. To avoid
+ * that a malicious guest builds a Spectre gadget out of e.g. page
+ * table walks, do not let the processor speculate loads outside
+ * the guest's registered memslots.
+ */
+ unsigned long offset = gfn - slot->base_gfn;
+ offset = array_index_nospec(offset, slot->npages);
+ return slot->userspace_addr + offset * PAGE_SIZE;
}
static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
@@ -1210,17 +1903,21 @@ static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
return (hpa_t)pfn << PAGE_SHIFT;
}
-static inline struct page *kvm_vcpu_gpa_to_page(struct kvm_vcpu *vcpu,
- gpa_t gpa)
+static inline bool kvm_is_gpa_in_memslot(struct kvm *kvm, gpa_t gpa)
{
- return kvm_vcpu_gfn_to_page(vcpu, gpa_to_gfn(gpa));
+ unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
+
+ return !kvm_is_error_hva(hva);
}
-static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
+static inline void kvm_gpc_mark_dirty_in_slot(struct gfn_to_pfn_cache *gpc)
{
- unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
+ lockdep_assert_held(&gpc->lock);
- return kvm_is_error_hva(hva);
+ if (!gpc->memslot)
+ return;
+
+ mark_page_dirty_in_slot(gpc->kvm, gpc->memslot, gpa_to_gfn(gpc->gpa));
}
enum kvm_stat_kind {
@@ -1230,67 +1927,259 @@ enum kvm_stat_kind {
struct kvm_stat_data {
struct kvm *kvm;
- struct kvm_stats_debugfs_item *dbgfs_item;
+ const struct _kvm_stats_desc *desc;
+ enum kvm_stat_kind kind;
};
-struct kvm_stats_debugfs_item {
- const char *name;
- int offset;
- enum kvm_stat_kind kind;
- int mode;
+struct _kvm_stats_desc {
+ struct kvm_stats_desc desc;
+ char name[KVM_STATS_NAME_SIZE];
};
-#define KVM_DBGFS_GET_MODE(dbgfs_item) \
- ((dbgfs_item)->mode ? (dbgfs_item)->mode : 0644)
+#define STATS_DESC_COMMON(type, unit, base, exp, sz, bsz) \
+ .flags = type | unit | base | \
+ BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) | \
+ BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) | \
+ BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK), \
+ .exponent = exp, \
+ .size = sz, \
+ .bucket_size = bsz
+
+#define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
+ { \
+ { \
+ STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
+ .offset = offsetof(struct kvm_vm_stat, generic.stat) \
+ }, \
+ .name = #stat, \
+ }
+#define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
+ { \
+ { \
+ STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
+ .offset = offsetof(struct kvm_vcpu_stat, generic.stat) \
+ }, \
+ .name = #stat, \
+ }
+#define VM_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
+ { \
+ { \
+ STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
+ .offset = offsetof(struct kvm_vm_stat, stat) \
+ }, \
+ .name = #stat, \
+ }
+#define VCPU_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
+ { \
+ { \
+ STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
+ .offset = offsetof(struct kvm_vcpu_stat, stat) \
+ }, \
+ .name = #stat, \
+ }
+/* SCOPE: VM, VM_GENERIC, VCPU, VCPU_GENERIC */
+#define STATS_DESC(SCOPE, stat, type, unit, base, exp, sz, bsz) \
+ SCOPE##_STATS_DESC(stat, type, unit, base, exp, sz, bsz)
+
+#define STATS_DESC_CUMULATIVE(SCOPE, name, unit, base, exponent) \
+ STATS_DESC(SCOPE, name, KVM_STATS_TYPE_CUMULATIVE, \
+ unit, base, exponent, 1, 0)
+#define STATS_DESC_INSTANT(SCOPE, name, unit, base, exponent) \
+ STATS_DESC(SCOPE, name, KVM_STATS_TYPE_INSTANT, \
+ unit, base, exponent, 1, 0)
+#define STATS_DESC_PEAK(SCOPE, name, unit, base, exponent) \
+ STATS_DESC(SCOPE, name, KVM_STATS_TYPE_PEAK, \
+ unit, base, exponent, 1, 0)
+#define STATS_DESC_LINEAR_HIST(SCOPE, name, unit, base, exponent, sz, bsz) \
+ STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LINEAR_HIST, \
+ unit, base, exponent, sz, bsz)
+#define STATS_DESC_LOG_HIST(SCOPE, name, unit, base, exponent, sz) \
+ STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LOG_HIST, \
+ unit, base, exponent, sz, 0)
+
+/* Cumulative counter, read/write */
+#define STATS_DESC_COUNTER(SCOPE, name) \
+ STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_NONE, \
+ KVM_STATS_BASE_POW10, 0)
+/* Instantaneous counter, read only */
+#define STATS_DESC_ICOUNTER(SCOPE, name) \
+ STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_NONE, \
+ KVM_STATS_BASE_POW10, 0)
+/* Peak counter, read/write */
+#define STATS_DESC_PCOUNTER(SCOPE, name) \
+ STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_NONE, \
+ KVM_STATS_BASE_POW10, 0)
+
+/* Instantaneous boolean value, read only */
+#define STATS_DESC_IBOOLEAN(SCOPE, name) \
+ STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \
+ KVM_STATS_BASE_POW10, 0)
+/* Peak (sticky) boolean value, read/write */
+#define STATS_DESC_PBOOLEAN(SCOPE, name) \
+ STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \
+ KVM_STATS_BASE_POW10, 0)
+
+/* Cumulative time in nanosecond */
+#define STATS_DESC_TIME_NSEC(SCOPE, name) \
+ STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
+ KVM_STATS_BASE_POW10, -9)
+/* Linear histogram for time in nanosecond */
+#define STATS_DESC_LINHIST_TIME_NSEC(SCOPE, name, sz, bsz) \
+ STATS_DESC_LINEAR_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
+ KVM_STATS_BASE_POW10, -9, sz, bsz)
+/* Logarithmic histogram for time in nanosecond */
+#define STATS_DESC_LOGHIST_TIME_NSEC(SCOPE, name, sz) \
+ STATS_DESC_LOG_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \
+ KVM_STATS_BASE_POW10, -9, sz)
+
+#define KVM_GENERIC_VM_STATS() \
+ STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush), \
+ STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush_requests)
+
+#define KVM_GENERIC_VCPU_STATS() \
+ STATS_DESC_COUNTER(VCPU_GENERIC, halt_successful_poll), \
+ STATS_DESC_COUNTER(VCPU_GENERIC, halt_attempted_poll), \
+ STATS_DESC_COUNTER(VCPU_GENERIC, halt_poll_invalid), \
+ STATS_DESC_COUNTER(VCPU_GENERIC, halt_wakeup), \
+ STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_success_ns), \
+ STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_ns), \
+ STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_wait_ns), \
+ STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_success_hist, \
+ HALT_POLL_HIST_COUNT), \
+ STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_hist, \
+ HALT_POLL_HIST_COUNT), \
+ STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_wait_hist, \
+ HALT_POLL_HIST_COUNT), \
+ STATS_DESC_IBOOLEAN(VCPU_GENERIC, blocking)
+
+ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header,
+ const struct _kvm_stats_desc *desc,
+ void *stats, size_t size_stats,
+ char __user *user_buffer, size_t size, loff_t *offset);
+
+/**
+ * kvm_stats_linear_hist_update() - Update bucket value for linear histogram
+ * statistics data.
+ *
+ * @data: start address of the stats data
+ * @size: the number of bucket of the stats data
+ * @value: the new value used to update the linear histogram's bucket
+ * @bucket_size: the size (width) of a bucket
+ */
+static inline void kvm_stats_linear_hist_update(u64 *data, size_t size,
+ u64 value, size_t bucket_size)
+{
+ size_t index = div64_u64(value, bucket_size);
+
+ index = min(index, size - 1);
+ ++data[index];
+}
+
+/**
+ * kvm_stats_log_hist_update() - Update bucket value for logarithmic histogram
+ * statistics data.
+ *
+ * @data: start address of the stats data
+ * @size: the number of bucket of the stats data
+ * @value: the new value used to update the logarithmic histogram's bucket
+ */
+static inline void kvm_stats_log_hist_update(u64 *data, size_t size, u64 value)
+{
+ size_t index = fls64(value);
+
+ index = min(index, size - 1);
+ ++data[index];
+}
+
+#define KVM_STATS_LINEAR_HIST_UPDATE(array, value, bsize) \
+ kvm_stats_linear_hist_update(array, ARRAY_SIZE(array), value, bsize)
+#define KVM_STATS_LOG_HIST_UPDATE(array, value) \
+ kvm_stats_log_hist_update(array, ARRAY_SIZE(array), value)
-#define VM_STAT(n, x, ...) \
- { n, offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__ }
-#define VCPU_STAT(n, x, ...) \
- { n, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__ }
-extern struct kvm_stats_debugfs_item debugfs_entries[];
-extern struct dentry *kvm_debugfs_dir;
+extern const struct kvm_stats_header kvm_vm_stats_header;
+extern const struct _kvm_stats_desc kvm_vm_stats_desc[];
+extern const struct kvm_stats_header kvm_vcpu_stats_header;
+extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[];
-#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
-static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
+#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
+static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq)
{
- if (unlikely(kvm->mmu_notifier_count))
+ if (unlikely(kvm->mmu_invalidate_in_progress))
return 1;
/*
- * Ensure the read of mmu_notifier_count happens before the read
- * of mmu_notifier_seq. This interacts with the smp_wmb() in
- * mmu_notifier_invalidate_range_end to make sure that the caller
- * either sees the old (non-zero) value of mmu_notifier_count or
- * the new (incremented) value of mmu_notifier_seq.
- * PowerPC Book3s HV KVM calls this under a per-page lock
- * rather than under kvm->mmu_lock, for scalability, so
- * can't rely on kvm->mmu_lock to keep things ordered.
+ * Ensure the read of mmu_invalidate_in_progress happens before
+ * the read of mmu_invalidate_seq. This interacts with the
+ * smp_wmb() in mmu_notifier_invalidate_range_end to make sure
+ * that the caller either sees the old (non-zero) value of
+ * mmu_invalidate_in_progress or the new (incremented) value of
+ * mmu_invalidate_seq.
+ *
+ * PowerPC Book3s HV KVM calls this under a per-page lock rather
+ * than under kvm->mmu_lock, for scalability, so can't rely on
+ * kvm->mmu_lock to keep things ordered.
*/
smp_rmb();
- if (kvm->mmu_notifier_seq != mmu_seq)
+ if (kvm->mmu_invalidate_seq != mmu_seq)
return 1;
return 0;
}
-static inline int mmu_notifier_retry_hva(struct kvm *kvm,
- unsigned long mmu_seq,
- unsigned long hva)
+static inline int mmu_invalidate_retry_gfn(struct kvm *kvm,
+ unsigned long mmu_seq,
+ gfn_t gfn)
{
lockdep_assert_held(&kvm->mmu_lock);
/*
- * If mmu_notifier_count is non-zero, then the range maintained by
- * kvm_mmu_notifier_invalidate_range_start contains all addresses that
- * might be being invalidated. Note that it may include some false
+ * If mmu_invalidate_in_progress is non-zero, then the range maintained
+ * by kvm_mmu_notifier_invalidate_range_start contains all addresses
+ * that might be being invalidated. Note that it may include some false
* positives, due to shortcuts when handing concurrent invalidations.
*/
- if (unlikely(kvm->mmu_notifier_count) &&
- hva >= kvm->mmu_notifier_range_start &&
- hva < kvm->mmu_notifier_range_end)
- return 1;
- if (kvm->mmu_notifier_seq != mmu_seq)
+ if (unlikely(kvm->mmu_invalidate_in_progress)) {
+ /*
+ * Dropping mmu_lock after bumping mmu_invalidate_in_progress
+ * but before updating the range is a KVM bug.
+ */
+ if (WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA ||
+ kvm->mmu_invalidate_range_end == INVALID_GPA))
+ return 1;
+
+ if (gfn >= kvm->mmu_invalidate_range_start &&
+ gfn < kvm->mmu_invalidate_range_end)
+ return 1;
+ }
+
+ if (kvm->mmu_invalidate_seq != mmu_seq)
return 1;
return 0;
}
+
+/*
+ * This lockless version of the range-based retry check *must* be paired with a
+ * call to the locked version after acquiring mmu_lock, i.e. this is safe to
+ * use only as a pre-check to avoid contending mmu_lock. This version *will*
+ * get false negatives and false positives.
+ */
+static inline bool mmu_invalidate_retry_gfn_unsafe(struct kvm *kvm,
+ unsigned long mmu_seq,
+ gfn_t gfn)
+{
+ /*
+ * Use READ_ONCE() to ensure the in-progress flag and sequence counter
+ * are always read from memory, e.g. so that checking for retry in a
+ * loop won't result in an infinite retry loop. Don't force loads for
+ * start+end, as the key to avoiding infinite retry loops is observing
+ * the 1=>0 transition of in-progress, i.e. getting false negatives
+ * due to stale start+end values is acceptable.
+ */
+ if (unlikely(READ_ONCE(kvm->mmu_invalidate_in_progress)) &&
+ gfn >= kvm->mmu_invalidate_range_start &&
+ gfn < kvm->mmu_invalidate_range_end)
+ return true;
+
+ return READ_ONCE(kvm->mmu_invalidate_seq) != mmu_seq;
+}
#endif
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
@@ -1302,6 +2191,7 @@ int kvm_set_irq_routing(struct kvm *kvm,
const struct kvm_irq_routing_entry *entries,
unsigned nr,
unsigned flags);
+int kvm_init_irq_routing(struct kvm *kvm);
int kvm_set_routing_entry(struct kvm *kvm,
struct kvm_kernel_irq_routing_entry *e,
const struct kvm_irq_routing_entry *ue);
@@ -1311,18 +2201,24 @@ void kvm_free_irq_routing(struct kvm *kvm);
static inline void kvm_free_irq_routing(struct kvm *kvm) {}
+static inline int kvm_init_irq_routing(struct kvm *kvm)
+{
+ return 0;
+}
+
#endif
int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
-#ifdef CONFIG_HAVE_KVM_EVENTFD
-
void kvm_eventfd_init(struct kvm *kvm);
int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
-#ifdef CONFIG_HAVE_KVM_IRQFD
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
void kvm_irqfd_release(struct kvm *kvm);
+bool kvm_notify_irqfd_resampler(struct kvm *kvm,
+ unsigned int irqchip,
+ unsigned int pin);
void kvm_irq_routing_update(struct kvm *);
#else
static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
@@ -1331,35 +2227,18 @@ static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
}
static inline void kvm_irqfd_release(struct kvm *kvm) {}
-#endif
-
-#else
-static inline void kvm_eventfd_init(struct kvm *kvm) {}
-
-static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
+static inline bool kvm_notify_irqfd_resampler(struct kvm *kvm,
+ unsigned int irqchip,
+ unsigned int pin)
{
- return -EINVAL;
-}
-
-static inline void kvm_irqfd_release(struct kvm *kvm) {}
-
-#ifdef CONFIG_HAVE_KVM_IRQCHIP
-static inline void kvm_irq_routing_update(struct kvm *kvm)
-{
-}
-#endif
-
-static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
-{
- return -ENOSYS;
+ return false;
}
-
-#endif /* CONFIG_HAVE_KVM_EVENTFD */
+#endif /* CONFIG_HAVE_KVM_IRQCHIP */
void kvm_arch_irq_routing_update(struct kvm *kvm);
-static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
+static inline void __kvm_make_request(int req, struct kvm_vcpu *vcpu)
{
/*
* Ensure the rest of the request is published to kvm_check_request's
@@ -1369,6 +2248,27 @@ static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
}
+static __always_inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
+{
+ /*
+ * Request that don't require vCPU action should never be logged in
+ * vcpu->requests. The vCPU won't clear the request, so it will stay
+ * logged indefinitely and prevent the vCPU from entering the guest.
+ */
+ BUILD_BUG_ON(!__builtin_constant_p(req) ||
+ (req & KVM_REQUEST_NO_ACTION));
+
+ __kvm_make_request(req, vcpu);
+}
+
+#ifndef CONFIG_S390
+static inline void kvm_make_request_and_kick(int req, struct kvm_vcpu *vcpu)
+{
+ kvm_make_request(req, vcpu);
+ __kvm_vcpu_kick(vcpu, req & KVM_REQUEST_WAIT);
+}
+#endif
+
static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
{
return READ_ONCE(vcpu->requests);
@@ -1400,7 +2300,10 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
}
}
+#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
+extern bool enable_virt_at_load;
extern bool kvm_rebooting;
+#endif
extern unsigned int halt_poll_ns;
extern unsigned int halt_poll_ns_grow;
@@ -1458,8 +2361,6 @@ struct kvm_device_ops {
int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma);
};
-void kvm_device_get(struct kvm_device *dev);
-void kvm_device_put(struct kvm_device *dev);
struct kvm_device *kvm_device_from_filp(struct file *filp);
int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type);
void kvm_unregister_device_ops(u32 type);
@@ -1499,7 +2400,9 @@ static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot)
struct kvm_vcpu *kvm_get_running_vcpu(void);
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
-#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
+struct kvm_kernel_irqfd;
+
bool kvm_arch_has_irq_bypass(void);
int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
struct irq_bypass_producer *);
@@ -1507,8 +2410,9 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *,
struct irq_bypass_producer *);
void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *);
void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
-int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
- uint32_t guest_irq, bool set);
+void kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd,
+ struct kvm_kernel_irq_routing_entry *old,
+ struct kvm_kernel_irq_routing_entry *new);
#endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */
#ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS
@@ -1535,20 +2439,7 @@ static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
}
#endif /* CONFIG_HAVE_KVM_NO_POLL */
-#ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL
-long kvm_arch_vcpu_async_ioctl(struct file *filp,
- unsigned int ioctl, unsigned long arg);
-#else
-static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
- unsigned int ioctl,
- unsigned long arg)
-{
- return -ENOIOCTLCMD;
-}
-#endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */
-
-void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
- unsigned long start, unsigned long end);
+void kvm_arch_guest_memory_reclaimed(struct kvm *kvm);
#ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
@@ -1559,19 +2450,37 @@ static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
}
#endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */
-typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data);
-
-int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
- uintptr_t data, const char *name,
- struct task_struct **thread_ptr);
-
-#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
+#ifdef CONFIG_VIRT_XFER_TO_GUEST_WORK
static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
{
vcpu->run->exit_reason = KVM_EXIT_INTR;
vcpu->stat.signal_exits++;
}
-#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */
+
+static inline int kvm_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu)
+{
+ int r = xfer_to_guest_mode_handle_work();
+
+ if (r) {
+ WARN_ON_ONCE(r != -EINTR);
+ kvm_handle_signal_exit(vcpu);
+ }
+ return r;
+}
+#endif /* CONFIG_VIRT_XFER_TO_GUEST_WORK */
+
+/*
+ * If more than one page is being (un)accounted, @virt must be the address of
+ * the first page of a block of pages what were allocated together (i.e
+ * accounted together).
+ *
+ * kvm_account_pgtable_pages() is thread-safe because mod_lruvec_page_state()
+ * is thread-safe.
+ */
+static inline void kvm_account_pgtable_pages(void *virt, int nr)
+{
+ mod_lruvec_page_state(virt_to_page(virt), NR_SECONDARY_PAGETABLE, nr);
+}
/*
* This defines how many reserved entries we want to keep before we
@@ -1583,4 +2492,116 @@ static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
/* Max number of entries allowed for each kvm dirty ring */
#define KVM_DIRTY_RING_MAX_ENTRIES 65536
+static inline void kvm_prepare_memory_fault_exit(struct kvm_vcpu *vcpu,
+ gpa_t gpa, gpa_t size,
+ bool is_write, bool is_exec,
+ bool is_private)
+{
+ vcpu->run->exit_reason = KVM_EXIT_MEMORY_FAULT;
+ vcpu->run->memory_fault.gpa = gpa;
+ vcpu->run->memory_fault.size = size;
+
+ /* RWX flags are not (yet) defined or communicated to userspace. */
+ vcpu->run->memory_fault.flags = 0;
+ if (is_private)
+ vcpu->run->memory_fault.flags |= KVM_MEMORY_EXIT_FLAG_PRIVATE;
+}
+
+static inline bool kvm_memslot_is_gmem_only(const struct kvm_memory_slot *slot)
+{
+ if (!IS_ENABLED(CONFIG_KVM_GUEST_MEMFD))
+ return false;
+
+ return slot->flags & KVM_MEMSLOT_GMEM_ONLY;
+}
+
+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+static inline unsigned long kvm_get_memory_attributes(struct kvm *kvm, gfn_t gfn)
+{
+ return xa_to_value(xa_load(&kvm->mem_attr_array, gfn));
+}
+
+bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
+ unsigned long mask, unsigned long attrs);
+bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
+ struct kvm_gfn_range *range);
+bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
+ struct kvm_gfn_range *range);
+
+static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
+{
+ return kvm_get_memory_attributes(kvm, gfn) & KVM_MEMORY_ATTRIBUTE_PRIVATE;
+}
+#else
+static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
+{
+ return false;
+}
+#endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
+
+#ifdef CONFIG_KVM_GUEST_MEMFD
+int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
+ gfn_t gfn, kvm_pfn_t *pfn, struct page **page,
+ int *max_order);
+#else
+static inline int kvm_gmem_get_pfn(struct kvm *kvm,
+ struct kvm_memory_slot *slot, gfn_t gfn,
+ kvm_pfn_t *pfn, struct page **page,
+ int *max_order)
+{
+ KVM_BUG_ON(1, kvm);
+ return -EIO;
+}
+#endif /* CONFIG_KVM_GUEST_MEMFD */
+
+#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE
+int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order);
+#endif
+
+#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_POPULATE
+/**
+ * kvm_gmem_populate() - Populate/prepare a GPA range with guest data
+ *
+ * @kvm: KVM instance
+ * @gfn: starting GFN to be populated
+ * @src: userspace-provided buffer containing data to copy into GFN range
+ * (passed to @post_populate, and incremented on each iteration
+ * if not NULL)
+ * @npages: number of pages to copy from userspace-buffer
+ * @post_populate: callback to issue for each gmem page that backs the GPA
+ * range
+ * @opaque: opaque data to pass to @post_populate callback
+ *
+ * This is primarily intended for cases where a gmem-backed GPA range needs
+ * to be initialized with userspace-provided data prior to being mapped into
+ * the guest as a private page. This should be called with the slots->lock
+ * held so that caller-enforced invariants regarding the expected memory
+ * attributes of the GPA range do not race with KVM_SET_MEMORY_ATTRIBUTES.
+ *
+ * Returns the number of pages that were populated.
+ */
+typedef int (*kvm_gmem_populate_cb)(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
+ void __user *src, int order, void *opaque);
+
+long kvm_gmem_populate(struct kvm *kvm, gfn_t gfn, void __user *src, long npages,
+ kvm_gmem_populate_cb post_populate, void *opaque);
+#endif
+
+#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
+void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
+#endif
+
+#ifdef CONFIG_KVM_GENERIC_PRE_FAULT_MEMORY
+long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
+ struct kvm_pre_fault_memory *range);
+#endif
+
+#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
+int kvm_enable_virtualization(void);
+void kvm_disable_virtualization(void);
+#else
+static inline int kvm_enable_virtualization(void) { return 0; }
+static inline void kvm_disable_virtualization(void) { }
+#endif
+
#endif
diff --git a/include/linux/kvm_irqfd.h b/include/linux/kvm_irqfd.h
index dac047abdba7..ef8c134ded8a 100644
--- a/include/linux/kvm_irqfd.h
+++ b/include/linux/kvm_irqfd.h
@@ -31,7 +31,7 @@ struct kvm_kernel_irqfd_resampler {
/*
* Entry in list of kvm->irqfd.resampler_list. Use for sharing
* resamplers among irqfds on the same gsi.
- * Accessed and modified under kvm->irqfds.resampler_lock
+ * RCU list modified under kvm->irqfds.resampler_lock
*/
struct list_head link;
};
@@ -55,10 +55,13 @@ struct kvm_kernel_irqfd {
/* Used for setup/shutdown */
struct eventfd_ctx *eventfd;
struct list_head list;
- poll_table pt;
struct work_struct shutdown;
struct irq_bypass_consumer consumer;
struct irq_bypass_producer *producer;
+
+ struct kvm_vcpu *irq_bypass_vcpu;
+ struct list_head vcpu_list;
+ void *irq_bypass_data;
};
#endif /* __LINUX_KVM_IRQFD_H */
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index a7580f69dda0..a568d8e6f4e8 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -3,9 +3,41 @@
#ifndef __KVM_TYPES_H__
#define __KVM_TYPES_H__
+#include <linux/bits.h>
+#include <linux/export.h>
+#include <linux/types.h>
+#include <asm/kvm_types.h>
+
+#ifdef KVM_SUB_MODULES
+#define EXPORT_SYMBOL_FOR_KVM_INTERNAL(symbol) \
+ EXPORT_SYMBOL_FOR_MODULES(symbol, __stringify(KVM_SUB_MODULES))
+#define EXPORT_SYMBOL_FOR_KVM(symbol) \
+ EXPORT_SYMBOL_FOR_MODULES(symbol, "kvm," __stringify(KVM_SUB_MODULES))
+#else
+#define EXPORT_SYMBOL_FOR_KVM_INTERNAL(symbol)
+/*
+ * Allow architectures to provide a custom EXPORT_SYMBOL_FOR_KVM, but only
+ * if there are no submodules, e.g. to allow suppressing exports if KVM=m, but
+ * kvm.ko won't actually be built (due to lack of at least one submodule).
+ */
+#ifndef EXPORT_SYMBOL_FOR_KVM
+#if IS_MODULE(CONFIG_KVM)
+#define EXPORT_SYMBOL_FOR_KVM(symbol) EXPORT_SYMBOL_FOR_MODULES(symbol, "kvm")
+#else
+#define EXPORT_SYMBOL_FOR_KVM(symbol)
+#endif /* IS_MODULE(CONFIG_KVM) */
+#endif /* EXPORT_SYMBOL_FOR_KVM */
+#endif
+
+#ifndef __ASSEMBLER__
+
+#include <linux/mutex.h>
+#include <linux/spinlock_types.h>
+
struct kvm;
struct kvm_async_pf;
struct kvm_device_ops;
+struct kvm_gfn_range;
struct kvm_interrupt;
struct kvm_irq_routing_table;
struct kvm_memory_slot;
@@ -18,10 +50,6 @@ struct kvm_memslots;
enum kvm_mr_change;
-#include <linux/types.h>
-
-#include <asm/kvm_types.h>
-
/*
* Address types:
*
@@ -37,7 +65,7 @@ typedef unsigned long gva_t;
typedef u64 gpa_t;
typedef u64 gfn_t;
-#define GPA_INVALID (~(gpa_t)0)
+#define INVALID_GPA (~(gpa_t)0)
typedef unsigned long hva_t;
typedef u64 hpa_t;
@@ -55,9 +83,17 @@ struct gfn_to_hva_cache {
struct gfn_to_pfn_cache {
u64 generation;
- gfn_t gfn;
+ gpa_t gpa;
+ unsigned long uhva;
+ struct kvm_memory_slot *memslot;
+ struct kvm *kvm;
+ struct list_head list;
+ rwlock_t lock;
+ struct mutex refresh_lock;
+ void *khva;
kvm_pfn_t pfn;
- bool dirty;
+ bool active;
+ bool valid;
};
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
@@ -67,14 +103,43 @@ struct gfn_to_pfn_cache {
* MMU flows is problematic, as is triggering reclaim, I/O, etc... while
* holding MMU locks. Note, these caches act more like prefetch buffers than
* classical caches, i.e. objects are not returned to the cache on being freed.
+ *
+ * The @capacity field and @objects array are lazily initialized when the cache
+ * is topped up (__kvm_mmu_topup_memory_cache()).
*/
struct kvm_mmu_memory_cache {
- int nobjs;
gfp_t gfp_zero;
+ gfp_t gfp_custom;
+ u64 init_value;
struct kmem_cache *kmem_cache;
- void *objects[KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE];
+ int capacity;
+ int nobjs;
+ void **objects;
};
#endif
+#define HALT_POLL_HIST_COUNT 32
+
+struct kvm_vm_stat_generic {
+ u64 remote_tlb_flush;
+ u64 remote_tlb_flush_requests;
+};
+
+struct kvm_vcpu_stat_generic {
+ u64 halt_successful_poll;
+ u64 halt_attempted_poll;
+ u64 halt_poll_invalid;
+ u64 halt_wakeup;
+ u64 halt_poll_success_ns;
+ u64 halt_poll_fail_ns;
+ u64 halt_wait_ns;
+ u64 halt_poll_success_hist[HALT_POLL_HIST_COUNT];
+ u64 halt_poll_fail_hist[HALT_POLL_HIST_COUNT];
+ u64 halt_wait_hist[HALT_POLL_HIST_COUNT];
+ u64 blocking;
+};
+
+#define KVM_STATS_NAME_SIZE 48
+#endif /* !__ASSEMBLER__ */
#endif /* __KVM_TYPES_H__ */
diff --git a/include/linux/lapb.h b/include/linux/lapb.h
index eb56472f23b2..b5333f9413dc 100644
--- a/include/linux/lapb.h
+++ b/include/linux/lapb.h
@@ -6,6 +6,11 @@
#ifndef LAPB_KERNEL_H
#define LAPB_KERNEL_H
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+
+struct net_device;
+
#define LAPB_OK 0
#define LAPB_BADTOKEN 1
#define LAPB_INVALUE 2
diff --git a/include/linux/latencytop.h b/include/linux/latencytop.h
index abe3d95f795b..84f1053cf2a8 100644
--- a/include/linux/latencytop.h
+++ b/include/linux/latencytop.h
@@ -38,9 +38,6 @@ account_scheduler_latency(struct task_struct *task, int usecs, int inter)
void clear_tsk_latency_tracing(struct task_struct *p);
-int sysctl_latencytop(struct ctl_table *table, int write, void *buffer,
- size_t *lenp, loff_t *ppos);
-
#else
static inline void
diff --git a/include/linux/lcd.h b/include/linux/lcd.h
index 238fb1dfed98..d4fa03722b72 100644
--- a/include/linux/lcd.h
+++ b/include/linux/lcd.h
@@ -11,8 +11,11 @@
#include <linux/device.h>
#include <linux/mutex.h>
-#include <linux/notifier.h>
-#include <linux/fb.h>
+
+#define LCD_POWER_ON (0)
+#define LCD_POWER_REDUCED (1) // deprecated; don't use in new code
+#define LCD_POWER_REDUCED_VSYNC_SUSPEND (2) // deprecated; don't use in new code
+#define LCD_POWER_OFF (4)
/* Notes on locking:
*
@@ -30,7 +33,6 @@
*/
struct lcd_device;
-struct fb_info;
struct lcd_properties {
/* The maximum value for contrast (read-only) */
@@ -47,11 +49,23 @@ struct lcd_ops {
int (*get_contrast)(struct lcd_device *);
/* Set LCD panel contrast */
int (*set_contrast)(struct lcd_device *, int contrast);
- /* Set LCD panel mode (resolutions ...) */
- int (*set_mode)(struct lcd_device *, struct fb_videomode *);
- /* Check if given framebuffer device is the one LCD is bound to;
- return 0 if not, !=0 if it is. If NULL, lcd always matches the fb. */
- int (*check_fb)(struct lcd_device *, struct fb_info *);
+
+ /*
+ * Set LCD panel mode (resolutions ...)
+ */
+ int (*set_mode)(struct lcd_device *lcd, u32 xres, u32 yres);
+
+ /*
+ * Check if the LCD controls the given display device. This
+ * operation is optional and if not implemented it is assumed that
+ * the display is always the one controlled by the LCD.
+ *
+ * RETURNS:
+ *
+ * If display_dev is NULL or display_dev matches the device controlled by
+ * the LCD, return true. Otherwise return false.
+ */
+ bool (*controls_device)(struct lcd_device *lcd, struct device *display_device);
};
struct lcd_device {
@@ -61,11 +75,14 @@ struct lcd_device {
points to something in the body of that driver, it is also invalid. */
struct mutex ops_lock;
/* If this is NULL, the backing module is unloaded */
- struct lcd_ops *ops;
+ const struct lcd_ops *ops;
/* Serialise access to set_power method */
struct mutex update_lock;
- /* The framebuffer notifier block */
- struct notifier_block fb_notif;
+
+ /**
+ * @entry: List entry of all registered lcd devices
+ */
+ struct list_head entry;
struct device dev;
};
@@ -102,14 +119,27 @@ static inline void lcd_set_power(struct lcd_device *ld, int power)
}
extern struct lcd_device *lcd_device_register(const char *name,
- struct device *parent, void *devdata, struct lcd_ops *ops);
+ struct device *parent, void *devdata, const struct lcd_ops *ops);
extern struct lcd_device *devm_lcd_device_register(struct device *dev,
const char *name, struct device *parent,
- void *devdata, struct lcd_ops *ops);
+ void *devdata, const struct lcd_ops *ops);
extern void lcd_device_unregister(struct lcd_device *ld);
extern void devm_lcd_device_unregister(struct device *dev,
struct lcd_device *ld);
+#if IS_REACHABLE(CONFIG_LCD_CLASS_DEVICE)
+void lcd_notify_blank_all(struct device *display_dev, int power);
+void lcd_notify_mode_change_all(struct device *display_dev,
+ unsigned int width, unsigned int height);
+#else
+static inline void lcd_notify_blank_all(struct device *display_dev, int power)
+{}
+
+static inline void lcd_notify_mode_change_all(struct device *display_dev,
+ unsigned int width, unsigned int height)
+{}
+#endif
+
#define to_lcd_device(obj) container_of(obj, struct lcd_device, dev)
static inline void * lcd_get_data(struct lcd_device *ld_dev)
diff --git a/include/linux/leafops.h b/include/linux/leafops.h
new file mode 100644
index 000000000000..cfafe7a5e7b1
--- /dev/null
+++ b/include/linux/leafops.h
@@ -0,0 +1,619 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Describes operations that can be performed on software-defined page table
+ * leaf entries. These are abstracted from the hardware page table entries
+ * themselves by the softleaf_t type, see mm_types.h.
+ */
+#ifndef _LINUX_LEAFOPS_H
+#define _LINUX_LEAFOPS_H
+
+#include <linux/mm_types.h>
+#include <linux/swapops.h>
+#include <linux/swap.h>
+
+#ifdef CONFIG_MMU
+
+/* Temporary until swp_entry_t eliminated. */
+#define LEAF_TYPE_SHIFT SWP_TYPE_SHIFT
+
+enum softleaf_type {
+ /* Fundamental types. */
+ SOFTLEAF_NONE,
+ SOFTLEAF_SWAP,
+ /* Migration types. */
+ SOFTLEAF_MIGRATION_READ,
+ SOFTLEAF_MIGRATION_READ_EXCLUSIVE,
+ SOFTLEAF_MIGRATION_WRITE,
+ /* Device types. */
+ SOFTLEAF_DEVICE_PRIVATE_READ,
+ SOFTLEAF_DEVICE_PRIVATE_WRITE,
+ SOFTLEAF_DEVICE_EXCLUSIVE,
+ /* H/W posion types. */
+ SOFTLEAF_HWPOISON,
+ /* Marker types. */
+ SOFTLEAF_MARKER,
+};
+
+/**
+ * softleaf_mk_none() - Create an empty ('none') leaf entry.
+ * Returns: empty leaf entry.
+ */
+static inline softleaf_t softleaf_mk_none(void)
+{
+ return ((softleaf_t) { 0 });
+}
+
+/**
+ * softleaf_from_pte() - Obtain a leaf entry from a PTE entry.
+ * @pte: PTE entry.
+ *
+ * If @pte is present (therefore not a leaf entry) the function returns an empty
+ * leaf entry. Otherwise, it returns a leaf entry.
+ *
+ * Returns: Leaf entry.
+ */
+static inline softleaf_t softleaf_from_pte(pte_t pte)
+{
+ softleaf_t arch_entry;
+
+ if (pte_present(pte) || pte_none(pte))
+ return softleaf_mk_none();
+
+ pte = pte_swp_clear_flags(pte);
+ arch_entry = __pte_to_swp_entry(pte);
+
+ /* Temporary until swp_entry_t eliminated. */
+ return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
+}
+
+/**
+ * softleaf_to_pte() - Obtain a PTE entry from a leaf entry.
+ * @entry: Leaf entry.
+ *
+ * This generates an architecture-specific PTE entry that can be utilised to
+ * encode the metadata the leaf entry encodes.
+ *
+ * Returns: Architecture-specific PTE entry encoding leaf entry.
+ */
+static inline pte_t softleaf_to_pte(softleaf_t entry)
+{
+ /* Temporary until swp_entry_t eliminated. */
+ return swp_entry_to_pte(entry);
+}
+
+#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+/**
+ * softleaf_from_pmd() - Obtain a leaf entry from a PMD entry.
+ * @pmd: PMD entry.
+ *
+ * If @pmd is present (therefore not a leaf entry) the function returns an empty
+ * leaf entry. Otherwise, it returns a leaf entry.
+ *
+ * Returns: Leaf entry.
+ */
+static inline softleaf_t softleaf_from_pmd(pmd_t pmd)
+{
+ softleaf_t arch_entry;
+
+ if (pmd_present(pmd) || pmd_none(pmd))
+ return softleaf_mk_none();
+
+ if (pmd_swp_soft_dirty(pmd))
+ pmd = pmd_swp_clear_soft_dirty(pmd);
+ if (pmd_swp_uffd_wp(pmd))
+ pmd = pmd_swp_clear_uffd_wp(pmd);
+ arch_entry = __pmd_to_swp_entry(pmd);
+
+ /* Temporary until swp_entry_t eliminated. */
+ return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
+}
+
+#else
+
+static inline softleaf_t softleaf_from_pmd(pmd_t pmd)
+{
+ return softleaf_mk_none();
+}
+
+#endif
+
+/**
+ * softleaf_is_none() - Is the leaf entry empty?
+ * @entry: Leaf entry.
+ *
+ * Empty entries are typically the result of a 'none' page table leaf entry
+ * being converted to a leaf entry.
+ *
+ * Returns: true if the entry is empty, false otherwise.
+ */
+static inline bool softleaf_is_none(softleaf_t entry)
+{
+ return entry.val == 0;
+}
+
+/**
+ * softleaf_type() - Identify the type of leaf entry.
+ * @enntry: Leaf entry.
+ *
+ * Returns: the leaf entry type associated with @entry.
+ */
+static inline enum softleaf_type softleaf_type(softleaf_t entry)
+{
+ unsigned int type_num;
+
+ if (softleaf_is_none(entry))
+ return SOFTLEAF_NONE;
+
+ type_num = entry.val >> LEAF_TYPE_SHIFT;
+
+ if (type_num < MAX_SWAPFILES)
+ return SOFTLEAF_SWAP;
+
+ switch (type_num) {
+#ifdef CONFIG_MIGRATION
+ case SWP_MIGRATION_READ:
+ return SOFTLEAF_MIGRATION_READ;
+ case SWP_MIGRATION_READ_EXCLUSIVE:
+ return SOFTLEAF_MIGRATION_READ_EXCLUSIVE;
+ case SWP_MIGRATION_WRITE:
+ return SOFTLEAF_MIGRATION_WRITE;
+#endif
+#ifdef CONFIG_DEVICE_PRIVATE
+ case SWP_DEVICE_WRITE:
+ return SOFTLEAF_DEVICE_PRIVATE_WRITE;
+ case SWP_DEVICE_READ:
+ return SOFTLEAF_DEVICE_PRIVATE_READ;
+ case SWP_DEVICE_EXCLUSIVE:
+ return SOFTLEAF_DEVICE_EXCLUSIVE;
+#endif
+#ifdef CONFIG_MEMORY_FAILURE
+ case SWP_HWPOISON:
+ return SOFTLEAF_HWPOISON;
+#endif
+ case SWP_PTE_MARKER:
+ return SOFTLEAF_MARKER;
+ }
+
+ /* Unknown entry type. */
+ VM_WARN_ON_ONCE(1);
+ return SOFTLEAF_NONE;
+}
+
+/**
+ * softleaf_is_swap() - Is this leaf entry a swap entry?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is a swap entry, otherwise false.
+ */
+static inline bool softleaf_is_swap(softleaf_t entry)
+{
+ return softleaf_type(entry) == SOFTLEAF_SWAP;
+}
+
+/**
+ * softleaf_is_migration_write() - Is this leaf entry a writable migration entry?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is a writable migration entry, otherwise
+ * false.
+ */
+static inline bool softleaf_is_migration_write(softleaf_t entry)
+{
+ return softleaf_type(entry) == SOFTLEAF_MIGRATION_WRITE;
+}
+
+/**
+ * softleaf_is_migration_read() - Is this leaf entry a readable migration entry?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is a readable migration entry, otherwise
+ * false.
+ */
+static inline bool softleaf_is_migration_read(softleaf_t entry)
+{
+ return softleaf_type(entry) == SOFTLEAF_MIGRATION_READ;
+}
+
+/**
+ * softleaf_is_migration_read_exclusive() - Is this leaf entry an exclusive
+ * readable migration entry?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is an exclusive readable migration entry,
+ * otherwise false.
+ */
+static inline bool softleaf_is_migration_read_exclusive(softleaf_t entry)
+{
+ return softleaf_type(entry) == SOFTLEAF_MIGRATION_READ_EXCLUSIVE;
+}
+
+/**
+ * softleaf_is_migration() - Is this leaf entry a migration entry?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is a migration entry, otherwise false.
+ */
+static inline bool softleaf_is_migration(softleaf_t entry)
+{
+ switch (softleaf_type(entry)) {
+ case SOFTLEAF_MIGRATION_READ:
+ case SOFTLEAF_MIGRATION_READ_EXCLUSIVE:
+ case SOFTLEAF_MIGRATION_WRITE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * softleaf_is_device_private_write() - Is this leaf entry a device private
+ * writable entry?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is a device private writable entry, otherwise
+ * false.
+ */
+static inline bool softleaf_is_device_private_write(softleaf_t entry)
+{
+ return softleaf_type(entry) == SOFTLEAF_DEVICE_PRIVATE_WRITE;
+}
+
+/**
+ * softleaf_is_device_private() - Is this leaf entry a device private entry?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is a device private entry, otherwise false.
+ */
+static inline bool softleaf_is_device_private(softleaf_t entry)
+{
+ switch (softleaf_type(entry)) {
+ case SOFTLEAF_DEVICE_PRIVATE_WRITE:
+ case SOFTLEAF_DEVICE_PRIVATE_READ:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * softleaf_is_device_exclusive() - Is this leaf entry a device-exclusive entry?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is a device-exclusive entry, otherwise false.
+ */
+static inline bool softleaf_is_device_exclusive(softleaf_t entry)
+{
+ return softleaf_type(entry) == SOFTLEAF_DEVICE_EXCLUSIVE;
+}
+
+/**
+ * softleaf_is_hwpoison() - Is this leaf entry a hardware poison entry?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is a hardware poison entry, otherwise false.
+ */
+static inline bool softleaf_is_hwpoison(softleaf_t entry)
+{
+ return softleaf_type(entry) == SOFTLEAF_HWPOISON;
+}
+
+/**
+ * softleaf_is_marker() - Is this leaf entry a marker?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is a marker entry, otherwise false.
+ */
+static inline bool softleaf_is_marker(softleaf_t entry)
+{
+ return softleaf_type(entry) == SOFTLEAF_MARKER;
+}
+
+/**
+ * softleaf_to_marker() - Obtain marker associated with leaf entry.
+ * @entry: Leaf entry, softleaf_is_marker(@entry) must return true.
+ *
+ * Returns: Marker associated with the leaf entry.
+ */
+static inline pte_marker softleaf_to_marker(softleaf_t entry)
+{
+ VM_WARN_ON_ONCE(!softleaf_is_marker(entry));
+
+ return swp_offset(entry) & PTE_MARKER_MASK;
+}
+
+/**
+ * softleaf_has_pfn() - Does this leaf entry encode a valid PFN number?
+ * @entry: Leaf entry.
+ *
+ * A pfn swap entry is a special type of swap entry that always has a pfn stored
+ * in the swap offset. They can either be used to represent unaddressable device
+ * memory, to restrict access to a page undergoing migration or to represent a
+ * pfn which has been hwpoisoned and unmapped.
+ *
+ * Returns: true if the leaf entry encodes a PFN, otherwise false.
+ */
+static inline bool softleaf_has_pfn(softleaf_t entry)
+{
+ /* Make sure the swp offset can always store the needed fields. */
+ BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS);
+
+ if (softleaf_is_migration(entry))
+ return true;
+ if (softleaf_is_device_private(entry))
+ return true;
+ if (softleaf_is_device_exclusive(entry))
+ return true;
+ if (softleaf_is_hwpoison(entry))
+ return true;
+
+ return false;
+}
+
+/**
+ * softleaf_to_pfn() - Obtain PFN encoded within leaf entry.
+ * @entry: Leaf entry, softleaf_has_pfn(@entry) must return true.
+ *
+ * Returns: The PFN associated with the leaf entry.
+ */
+static inline unsigned long softleaf_to_pfn(softleaf_t entry)
+{
+ VM_WARN_ON_ONCE(!softleaf_has_pfn(entry));
+
+ /* Temporary until swp_entry_t eliminated. */
+ return swp_offset(entry) & SWP_PFN_MASK;
+}
+
+/**
+ * softleaf_to_page() - Obtains struct page for PFN encoded within leaf entry.
+ * @entry: Leaf entry, softleaf_has_pfn(@entry) must return true.
+ *
+ * Returns: Pointer to the struct page associated with the leaf entry's PFN.
+ */
+static inline struct page *softleaf_to_page(softleaf_t entry)
+{
+ struct page *page = pfn_to_page(softleaf_to_pfn(entry));
+
+ VM_WARN_ON_ONCE(!softleaf_has_pfn(entry));
+ /*
+ * Any use of migration entries may only occur while the
+ * corresponding page is locked
+ */
+ VM_WARN_ON_ONCE(softleaf_is_migration(entry) && !PageLocked(page));
+
+ return page;
+}
+
+/**
+ * softleaf_to_folio() - Obtains struct folio for PFN encoded within leaf entry.
+ * @entry: Leaf entry, softleaf_has_pfn(@entry) must return true.
+ *
+ * Returns: Pointer to the struct folio associated with the leaf entry's PFN.
+ */
+static inline struct folio *softleaf_to_folio(softleaf_t entry)
+{
+ struct folio *folio = pfn_folio(softleaf_to_pfn(entry));
+
+ VM_WARN_ON_ONCE(!softleaf_has_pfn(entry));
+ /*
+ * Any use of migration entries may only occur while the
+ * corresponding folio is locked.
+ */
+ VM_WARN_ON_ONCE(softleaf_is_migration(entry) &&
+ !folio_test_locked(folio));
+
+ return folio;
+}
+
+/**
+ * softleaf_is_poison_marker() - Is this leaf entry a poison marker?
+ * @entry: Leaf entry.
+ *
+ * The poison marker is set via UFFDIO_POISON. Userfaultfd-specific.
+ *
+ * Returns: true if the leaf entry is a poison marker, otherwise false.
+ */
+static inline bool softleaf_is_poison_marker(softleaf_t entry)
+{
+ if (!softleaf_is_marker(entry))
+ return false;
+
+ return softleaf_to_marker(entry) & PTE_MARKER_POISONED;
+}
+
+/**
+ * softleaf_is_guard_marker() - Is this leaf entry a guard region marker?
+ * @entry: Leaf entry.
+ *
+ * Returns: true if the leaf entry is a guard marker, otherwise false.
+ */
+static inline bool softleaf_is_guard_marker(softleaf_t entry)
+{
+ if (!softleaf_is_marker(entry))
+ return false;
+
+ return softleaf_to_marker(entry) & PTE_MARKER_GUARD;
+}
+
+/**
+ * softleaf_is_uffd_wp_marker() - Is this leaf entry a userfautlfd write protect
+ * marker?
+ * @entry: Leaf entry.
+ *
+ * Userfaultfd-specific.
+ *
+ * Returns: true if the leaf entry is a UFFD WP marker, otherwise false.
+ */
+static inline bool softleaf_is_uffd_wp_marker(softleaf_t entry)
+{
+ if (!softleaf_is_marker(entry))
+ return false;
+
+ return softleaf_to_marker(entry) & PTE_MARKER_UFFD_WP;
+}
+
+#ifdef CONFIG_MIGRATION
+
+/**
+ * softleaf_is_migration_young() - Does this migration entry contain an accessed
+ * bit?
+ * @entry: Leaf entry.
+ *
+ * If the architecture can support storing A/D bits in migration entries, this
+ * determines whether the accessed (or 'young') bit was set on the migrated page
+ * table entry.
+ *
+ * Returns: true if the entry contains an accessed bit, otherwise false.
+ */
+static inline bool softleaf_is_migration_young(softleaf_t entry)
+{
+ VM_WARN_ON_ONCE(!softleaf_is_migration(entry));
+
+ if (migration_entry_supports_ad())
+ return swp_offset(entry) & SWP_MIG_YOUNG;
+ /* Keep the old behavior of aging page after migration */
+ return false;
+}
+
+/**
+ * softleaf_is_migration_dirty() - Does this migration entry contain a dirty bit?
+ * @entry: Leaf entry.
+ *
+ * If the architecture can support storing A/D bits in migration entries, this
+ * determines whether the dirty bit was set on the migrated page table entry.
+ *
+ * Returns: true if the entry contains a dirty bit, otherwise false.
+ */
+static inline bool softleaf_is_migration_dirty(softleaf_t entry)
+{
+ VM_WARN_ON_ONCE(!softleaf_is_migration(entry));
+
+ if (migration_entry_supports_ad())
+ return swp_offset(entry) & SWP_MIG_DIRTY;
+ /* Keep the old behavior of clean page after migration */
+ return false;
+}
+
+#else /* CONFIG_MIGRATION */
+
+static inline bool softleaf_is_migration_young(softleaf_t entry)
+{
+ return false;
+}
+
+static inline bool softleaf_is_migration_dirty(softleaf_t entry)
+{
+ return false;
+}
+#endif /* CONFIG_MIGRATION */
+
+/**
+ * pte_is_marker() - Does the PTE entry encode a marker leaf entry?
+ * @pte: PTE entry.
+ *
+ * Returns: true if this PTE is a marker leaf entry, otherwise false.
+ */
+static inline bool pte_is_marker(pte_t pte)
+{
+ return softleaf_is_marker(softleaf_from_pte(pte));
+}
+
+/**
+ * pte_is_uffd_wp_marker() - Does this PTE entry encode a userfaultfd write
+ * protect marker leaf entry?
+ * @pte: PTE entry.
+ *
+ * Returns: true if this PTE is a UFFD WP marker leaf entry, otherwise false.
+ */
+static inline bool pte_is_uffd_wp_marker(pte_t pte)
+{
+ const softleaf_t entry = softleaf_from_pte(pte);
+
+ return softleaf_is_uffd_wp_marker(entry);
+}
+
+/**
+ * pte_is_uffd_marker() - Does this PTE entry encode a userfault-specific marker
+ * leaf entry?
+ * @entry: Leaf entry.
+ *
+ * It's useful to be able to determine which leaf entries encode UFFD-specific
+ * markers so we can handle these correctly.
+ *
+ * Returns: true if this PTE entry is a UFFD-specific marker, otherwise false.
+ */
+static inline bool pte_is_uffd_marker(pte_t pte)
+{
+ const softleaf_t entry = softleaf_from_pte(pte);
+
+ if (!softleaf_is_marker(entry))
+ return false;
+
+ /* UFFD WP, poisoned swap entries are UFFD-handled. */
+ if (softleaf_is_uffd_wp_marker(entry))
+ return true;
+ if (softleaf_is_poison_marker(entry))
+ return true;
+
+ return false;
+}
+
+#if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_ARCH_ENABLE_THP_MIGRATION)
+
+/**
+ * pmd_is_device_private_entry() - Check if PMD contains a device private swap
+ * entry.
+ * @pmd: The PMD to check.
+ *
+ * Returns true if the PMD contains a swap entry that represents a device private
+ * page mapping. This is used for zone device private pages that have been
+ * swapped out but still need special handling during various memory management
+ * operations.
+ *
+ * Return: true if PMD contains device private entry, false otherwise
+ */
+static inline bool pmd_is_device_private_entry(pmd_t pmd)
+{
+ return softleaf_is_device_private(softleaf_from_pmd(pmd));
+}
+
+#else /* CONFIG_ZONE_DEVICE && CONFIG_ARCH_ENABLE_THP_MIGRATION */
+
+static inline bool pmd_is_device_private_entry(pmd_t pmd)
+{
+ return false;
+}
+
+#endif /* CONFIG_ZONE_DEVICE && CONFIG_ARCH_ENABLE_THP_MIGRATION */
+
+/**
+ * pmd_is_migration_entry() - Does this PMD entry encode a migration entry?
+ * @pmd: PMD entry.
+ *
+ * Returns: true if the PMD encodes a migration entry, otherwise false.
+ */
+static inline bool pmd_is_migration_entry(pmd_t pmd)
+{
+ return softleaf_is_migration(softleaf_from_pmd(pmd));
+}
+
+/**
+ * pmd_is_valid_softleaf() - Is this PMD entry a valid leaf entry?
+ * @pmd: PMD entry.
+ *
+ * PMD leaf entries are valid only if they are device private or migration
+ * entries. This function asserts that a PMD leaf entry is valid in this
+ * respect.
+ *
+ * Returns: true if the PMD entry is a valid leaf entry, otherwise false.
+ */
+static inline bool pmd_is_valid_softleaf(pmd_t pmd)
+{
+ const softleaf_t entry = softleaf_from_pmd(pmd);
+
+ /* Only device private, migration entries valid for PMD. */
+ return softleaf_is_device_private(entry) ||
+ softleaf_is_migration(entry);
+}
+
+#endif /* CONFIG_MMU */
+#endif /* _LINUX_LEAFOPS_H */
diff --git a/include/linux/led-class-flash.h b/include/linux/led-class-flash.h
index 612b4cab3819..775a96217518 100644
--- a/include/linux/led-class-flash.h
+++ b/include/linux/led-class-flash.h
@@ -45,6 +45,8 @@ struct led_flash_ops {
int (*timeout_set)(struct led_classdev_flash *fled_cdev, u32 timeout);
/* get the flash LED fault */
int (*fault_get)(struct led_classdev_flash *fled_cdev, u32 *fault);
+ /* set flash duration */
+ int (*duration_set)(struct led_classdev_flash *fled_cdev, u32 duration);
};
/*
@@ -75,6 +77,9 @@ struct led_classdev_flash {
/* flash timeout value in microseconds along with its constraints */
struct led_flash_setting timeout;
+ /* flash timeout value in microseconds along with its constraints */
+ struct led_flash_setting duration;
+
/* LED Flash class sysfs groups */
const struct attribute_group *sysfs_groups[LED_FLASH_SYSFS_GROUPS_SIZE];
};
@@ -85,7 +90,6 @@ static inline struct led_classdev_flash *lcdev_to_flcdev(
return container_of(lcdev, struct led_classdev_flash, led_cdev);
}
-#if IS_ENABLED(CONFIG_LEDS_CLASS_FLASH)
/**
* led_classdev_flash_register_ext - register a new object of LED class with
* init data and with support for flash LEDs
@@ -116,29 +120,6 @@ int devm_led_classdev_flash_register_ext(struct device *parent,
void devm_led_classdev_flash_unregister(struct device *parent,
struct led_classdev_flash *fled_cdev);
-#else
-
-static inline int led_classdev_flash_register_ext(struct device *parent,
- struct led_classdev_flash *fled_cdev,
- struct led_init_data *init_data)
-{
- return 0;
-}
-
-static inline void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev) {};
-static inline int devm_led_classdev_flash_register_ext(struct device *parent,
- struct led_classdev_flash *fled_cdev,
- struct led_init_data *init_data)
-{
- return 0;
-}
-
-static inline void devm_led_classdev_flash_unregister(struct device *parent,
- struct led_classdev_flash *fled_cdev)
-{};
-
-#endif /* IS_ENABLED(CONFIG_LEDS_CLASS_FLASH) */
-
static inline int led_classdev_flash_register(struct device *parent,
struct led_classdev_flash *fled_cdev)
{
@@ -216,7 +197,7 @@ int led_update_flash_brightness(struct led_classdev_flash *fled_cdev);
* @fled_cdev: the flash LED to set
* @timeout: the flash timeout to set it to
*
- * Set the flash strobe duration.
+ * Set the flash strobe timeout.
*
* Returns: 0 on success or negative error value on failure
*/
@@ -233,4 +214,15 @@ int led_set_flash_timeout(struct led_classdev_flash *fled_cdev, u32 timeout);
*/
int led_get_flash_fault(struct led_classdev_flash *fled_cdev, u32 *fault);
+/**
+ * led_set_flash_duration - set flash LED duration
+ * @fled_cdev: the flash LED to set
+ * @timeout: the flash duration to set it to
+ *
+ * Set the flash strobe duration.
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+int led_set_flash_duration(struct led_classdev_flash *fled_cdev, u32 duration);
+
#endif /* __LINUX_FLASH_LEDS_H_INCLUDED */
diff --git a/include/linux/led-class-multicolor.h b/include/linux/led-class-multicolor.h
index 210d57bcd767..db9f34c6736e 100644
--- a/include/linux/led-class-multicolor.h
+++ b/include/linux/led-class-multicolor.h
@@ -30,7 +30,6 @@ static inline struct led_classdev_mc *lcdev_to_mccdev(
return container_of(led_cdev, struct led_classdev_mc, led_cdev);
}
-#if IS_ENABLED(CONFIG_LEDS_CLASS_MULTICOLOR)
/**
* led_classdev_multicolor_register_ext - register a new object of led_classdev
* class with support for multicolor LEDs
@@ -64,34 +63,6 @@ int devm_led_classdev_multicolor_register_ext(struct device *parent,
void devm_led_classdev_multicolor_unregister(struct device *parent,
struct led_classdev_mc *mcled_cdev);
-#else
-
-static inline int led_classdev_multicolor_register_ext(struct device *parent,
- struct led_classdev_mc *mcled_cdev,
- struct led_init_data *init_data)
-{
- return 0;
-}
-
-static inline void led_classdev_multicolor_unregister(struct led_classdev_mc *mcled_cdev) {};
-static inline int led_mc_calc_color_components(struct led_classdev_mc *mcled_cdev,
- enum led_brightness brightness)
-{
- return 0;
-}
-
-static inline int devm_led_classdev_multicolor_register_ext(struct device *parent,
- struct led_classdev_mc *mcled_cdev,
- struct led_init_data *init_data)
-{
- return 0;
-}
-
-static inline void devm_led_classdev_multicolor_unregister(struct device *parent,
- struct led_classdev_mc *mcled_cdev)
-{};
-
-#endif /* IS_ENABLED(CONFIG_LEDS_CLASS_MULTICOLOR) */
static inline int led_classdev_multicolor_register(struct device *parent,
struct led_classdev_mc *mcled_cdev)
diff --git a/include/linux/leds-expresswire.h b/include/linux/leds-expresswire.h
new file mode 100644
index 000000000000..a422921f4159
--- /dev/null
+++ b/include/linux/leds-expresswire.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Shared library for Kinetic's ExpressWire protocol.
+ * This protocol works by pulsing the ExpressWire IC's control GPIO.
+ * ktd2692 and ktd2801 are known to use this protocol.
+ */
+
+#ifndef _LEDS_EXPRESSWIRE_H
+#define _LEDS_EXPRESSWIRE_H
+
+#include <linux/types.h>
+
+struct gpio_desc;
+
+struct expresswire_timing {
+ unsigned long poweroff_us;
+ unsigned long detect_delay_us;
+ unsigned long detect_us;
+ unsigned long data_start_us;
+ unsigned long end_of_data_low_us;
+ unsigned long end_of_data_high_us;
+ unsigned long short_bitset_us;
+ unsigned long long_bitset_us;
+};
+
+struct expresswire_common_props {
+ struct gpio_desc *ctrl_gpio;
+ struct expresswire_timing timing;
+};
+
+void expresswire_power_off(struct expresswire_common_props *props);
+void expresswire_enable(struct expresswire_common_props *props);
+void expresswire_start(struct expresswire_common_props *props);
+void expresswire_end(struct expresswire_common_props *props);
+void expresswire_set_bit(struct expresswire_common_props *props, bool bit);
+void expresswire_write_u8(struct expresswire_common_props *props, u8 val);
+
+#endif /* _LEDS_EXPRESSWIRE_H */
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 329fd914cf24..b16b803cc1ac 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -10,17 +10,21 @@
#include <dt-bindings/leds/common.h>
#include <linux/device.h>
-#include <linux/kernfs.h>
-#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
+#include <linux/types.h>
#include <linux/workqueue.h>
-struct device;
-struct led_pattern;
+struct attribute_group;
struct device_node;
+struct fwnode_handle;
+struct gpio_desc;
+struct kernfs_node;
+struct led_pattern;
+struct platform_device;
+
/*
* LED Core
*/
@@ -33,6 +37,27 @@ enum led_brightness {
LED_FULL = 255,
};
+enum led_default_state {
+ LEDS_DEFSTATE_OFF = 0,
+ LEDS_DEFSTATE_ON = 1,
+ LEDS_DEFSTATE_KEEP = 2,
+};
+
+/**
+ * struct led_lookup_data - represents a single LED lookup entry
+ *
+ * @list: internal list of all LED lookup entries
+ * @provider: name of led_classdev providing the LED
+ * @dev_id: name of the device associated with this LED
+ * @con_id: name of the LED from the device's point of view
+ */
+struct led_lookup_data {
+ struct list_head list;
+ const char *provider;
+ const char *dev_id;
+ const char *con_id;
+};
+
struct led_init_data {
/* device fwnode handle */
struct fwnode_handle *fwnode;
@@ -57,6 +82,8 @@ struct led_init_data {
bool devname_mandatory;
};
+enum led_default_state led_init_default_state_get(struct fwnode_handle *fwnode);
+
struct led_hw_trigger_type {
int dummy;
};
@@ -65,6 +92,7 @@ struct led_classdev {
const char *name;
unsigned int brightness;
unsigned int max_brightness;
+ unsigned int color;
int flags;
/* Lower 16 bits reflect status */
@@ -79,6 +107,8 @@ struct led_classdev {
#define LED_BRIGHT_HW_CHANGED BIT(21)
#define LED_RETAIN_AT_SHUTDOWN BIT(22)
#define LED_INIT_DEFAULT_TRIGGER BIT(23)
+#define LED_REJECT_NAME_CONFLICT BIT(24)
+#define LED_MULTI_COLOR BIT(25)
/* set_brightness_work / blink_timer flags, atomic, private. */
unsigned long work_flags;
@@ -89,6 +119,10 @@ struct led_classdev {
#define LED_BLINK_INVERT 3
#define LED_BLINK_BRIGHTNESS_CHANGE 4
#define LED_BLINK_DISABLE 5
+ /* Brightness off also disables hw-blinking so it is a separate action */
+#define LED_SET_BRIGHTNESS_OFF 6
+#define LED_SET_BRIGHTNESS 7
+#define LED_SET_BLINK 8
/* Set LED brightness level
* Must not sleep. Use brightness_set_blocking for drivers
@@ -112,6 +146,10 @@ struct led_classdev {
* match the values specified exactly.
* Deactivate blinking again when the brightness is set to LED_OFF
* via the brightness_set() callback.
+ * For led_blink_set_nosleep() the LED core assumes that blink_set
+ * implementations, of drivers which do not use brightness_set_blocking,
+ * will not sleep. Therefor if brightness_set_blocking is not set
+ * this function must not sleep!
*/
int (*blink_set)(struct led_classdev *led_cdev,
unsigned long *delay_on,
@@ -133,8 +171,11 @@ struct led_classdev {
int new_blink_brightness;
void (*flash_resume)(struct led_classdev *led_cdev);
+ struct workqueue_struct *wq; /* LED workqueue */
struct work_struct set_brightness_work;
int delayed_set_value;
+ unsigned long delayed_delay_on;
+ unsigned long delayed_delay_off;
#ifdef CONFIG_LEDS_TRIGGERS
/* Protects the trigger data below */
@@ -148,6 +189,49 @@ struct led_classdev {
/* LEDs that have private triggers have this set */
struct led_hw_trigger_type *trigger_type;
+
+ /* Unique trigger name supported by LED set in hw control mode */
+ const char *hw_control_trigger;
+ /*
+ * Check if the LED driver supports the requested mode provided by the
+ * defined supported trigger to setup the LED to hw control mode.
+ *
+ * Return 0 on success. Return -EOPNOTSUPP when the passed flags are not
+ * supported and software fallback needs to be used.
+ * Return a negative error number on any other case for check fail due
+ * to various reason like device not ready or timeouts.
+ */
+ int (*hw_control_is_supported)(struct led_classdev *led_cdev,
+ unsigned long flags);
+ /*
+ * Activate hardware control, LED driver will use the provided flags
+ * from the supported trigger and setup the LED to be driven by hardware
+ * following the requested mode from the trigger flags.
+ * Deactivate hardware blink control by setting brightness to LED_OFF via
+ * the brightness_set() callback.
+ *
+ * Return 0 on success, a negative error number on flags apply fail.
+ */
+ int (*hw_control_set)(struct led_classdev *led_cdev,
+ unsigned long flags);
+ /*
+ * Get from the LED driver the current mode that the LED is set in hw
+ * control mode and put them in flags.
+ * Trigger can use this to get the initial state of a LED already set in
+ * hardware blink control.
+ *
+ * Return 0 on success, a negative error number on failing parsing the
+ * initial mode. Error from this function is NOT FATAL as the device
+ * may be in a not supported initial state by the attached LED trigger.
+ */
+ int (*hw_control_get)(struct led_classdev *led_cdev,
+ unsigned long *flags);
+ /*
+ * Get the device this LED blinks in response to.
+ * e.g. for a PHY LED, it is the network device. If the LED is
+ * not yet associated to a device, return NULL.
+ */
+ struct device *(*hw_control_get_device)(struct led_classdev *led_cdev);
#endif
#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED
@@ -155,7 +239,7 @@ struct led_classdev {
struct kernfs_node *brightness_hw_changed_kn;
#endif
- /* Ensures consistent access to the LED Flash Class device */
+ /* Ensures consistent access to the LED class device */
struct mutex led_access;
};
@@ -193,7 +277,6 @@ static inline int led_classdev_register(struct device *parent,
int devm_led_classdev_register_ext(struct device *parent,
struct led_classdev *led_cdev,
struct led_init_data *init_data);
-
static inline int devm_led_classdev_register(struct device *parent,
struct led_classdev *led_cdev)
{
@@ -205,10 +288,17 @@ void devm_led_classdev_unregister(struct device *parent,
void led_classdev_suspend(struct led_classdev *led_cdev);
void led_classdev_resume(struct led_classdev *led_cdev);
-extern struct led_classdev *of_led_get(struct device_node *np, int index);
+void led_add_lookup(struct led_lookup_data *led_lookup);
+void led_remove_lookup(struct led_lookup_data *led_lookup);
+
+struct led_classdev *__must_check led_get(struct device *dev, char *con_id);
+struct led_classdev *__must_check devm_led_get(struct device *dev, char *con_id);
+
extern void led_put(struct led_classdev *led_cdev);
struct led_classdev *__must_check devm_of_led_get(struct device *dev,
int index);
+struct led_classdev *__must_check devm_of_led_get_optional(struct device *dev,
+ int index);
/**
* led_blink_set - set blinking with software fallback
@@ -221,12 +311,27 @@ struct led_classdev *__must_check devm_of_led_get(struct device *dev,
* software blinking if there is no hardware blinking or if
* the LED refuses the passed values.
*
+ * This function may sleep!
+ *
* Note that if software blinking is active, simply calling
* led_cdev->brightness_set() will not stop the blinking,
- * use led_classdev_brightness_set() instead.
+ * use led_set_brightness() instead.
*/
void led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on,
unsigned long *delay_off);
+
+/**
+ * led_blink_set_nosleep - set blinking, guaranteed to not sleep
+ * @led_cdev: the LED to start blinking
+ * @delay_on: the time it should be on (in ms)
+ * @delay_off: the time it should ble off (in ms)
+ *
+ * This function makes the LED blink and is guaranteed to not sleep. Otherwise
+ * this is the same as led_blink_set(), see led_blink_set() for details.
+ */
+void led_blink_set_nosleep(struct led_classdev *led_cdev, unsigned long delay_on,
+ unsigned long delay_off);
+
/**
* led_blink_set_oneshot - do a oneshot software blink
* @led_cdev: the LED to start blinking
@@ -240,6 +345,8 @@ void led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on,
*
* If invert is set, led blinks for delay_off first, then for
* delay_on and leave the led on after the on-off cycle.
+ *
+ * This function is guaranteed not to sleep.
*/
void led_blink_set_oneshot(struct led_classdev *led_cdev,
unsigned long *delay_on, unsigned long *delay_off,
@@ -269,6 +376,25 @@ void led_set_brightness(struct led_classdev *led_cdev, unsigned int brightness);
int led_set_brightness_sync(struct led_classdev *led_cdev, unsigned int value);
/**
+ * led_mc_set_brightness - set mc LED color intensity values and brightness
+ * @led_cdev: the LED to set
+ * @intensity_value: array of per color intensity values to set
+ * @num_colors: amount of entries in intensity_value array
+ * @brightness: the brightness to set the LED to
+ *
+ * Set a multi-color LED's per color intensity values and brightness.
+ * If necessary, this cancels the software blink timer. This function is
+ * guaranteed not to sleep.
+ *
+ * Calling this function on a non multi-color led_classdev or with the wrong
+ * num_colors value is an error. In this case an error will be logged once
+ * and the call will do nothing.
+ */
+void led_mc_set_brightness(struct led_classdev *led_cdev,
+ unsigned int *intensity_value, unsigned int num_colors,
+ unsigned int brightness);
+
+/**
* led_update_brightness - update LED brightness
* @led_cdev: the LED to query
*
@@ -323,6 +449,16 @@ int led_compose_name(struct device *dev, struct led_init_data *init_data,
char *led_classdev_name);
/**
+ * led_get_color_name - get string representation of color ID
+ * @color_id: The LED_COLOR_ID_* constant
+ *
+ * Get the string name of a LED_COLOR_ID_* constant.
+ *
+ * Returns: A string constant or NULL on an invalid ID.
+ */
+const char *led_get_color_name(u8 color_id);
+
+/**
* led_sysfs_is_disabled - check if LED sysfs interface is disabled
* @led_cdev: the LED to query
*
@@ -350,11 +486,14 @@ struct led_trigger {
int (*activate)(struct led_classdev *led_cdev);
void (*deactivate)(struct led_classdev *led_cdev);
+ /* Brightness set by led_trigger_event */
+ enum led_brightness brightness;
+
/* LED-private triggers have this set */
struct led_hw_trigger_type *trigger_type;
/* LEDs under control by this trigger (for simple triggers) */
- rwlock_t leddev_list_lock;
+ spinlock_t leddev_list_lock;
struct list_head led_cdevs;
/* Link to next registered trigger */
@@ -382,11 +521,14 @@ void led_trigger_register_simple(const char *name,
struct led_trigger **trigger);
void led_trigger_unregister_simple(struct led_trigger *trigger);
void led_trigger_event(struct led_trigger *trigger, enum led_brightness event);
-void led_trigger_blink(struct led_trigger *trigger, unsigned long *delay_on,
- unsigned long *delay_off);
+void led_mc_trigger_event(struct led_trigger *trig,
+ unsigned int *intensity_value, unsigned int num_colors,
+ enum led_brightness brightness);
+void led_trigger_blink(struct led_trigger *trigger, unsigned long delay_on,
+ unsigned long delay_off);
void led_trigger_blink_oneshot(struct led_trigger *trigger,
- unsigned long *delay_on,
- unsigned long *delay_off,
+ unsigned long delay_on,
+ unsigned long delay_off,
int invert);
void led_trigger_set_default(struct led_classdev *led_cdev);
int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trigger);
@@ -403,22 +545,11 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
return led_cdev->trigger_data;
}
-/**
- * led_trigger_rename_static - rename a trigger
- * @name: the new trigger name
- * @trig: the LED trigger to rename
- *
- * Change a LED trigger name by copying the string passed in
- * name into current trigger name, which MUST be large
- * enough for the new string.
- *
- * Note that name must NOT point to the same string used
- * during LED registration, as that could lead to races.
- *
- * This is meant to be used on triggers with statically
- * allocated name.
- */
-void led_trigger_rename_static(const char *name, struct led_trigger *trig);
+static inline enum led_brightness
+led_trigger_get_brightness(const struct led_trigger *trigger)
+{
+ return trigger ? trigger->brightness : LED_OFF;
+}
#define module_led_trigger(__led_trigger) \
module_driver(__led_trigger, led_trigger_register, \
@@ -435,12 +566,15 @@ static inline void led_trigger_register_simple(const char *name,
static inline void led_trigger_unregister_simple(struct led_trigger *trigger) {}
static inline void led_trigger_event(struct led_trigger *trigger,
enum led_brightness event) {}
+static inline void led_mc_trigger_event(struct led_trigger *trig,
+ unsigned int *intensity_value, unsigned int num_colors,
+ enum led_brightness brightness) {}
static inline void led_trigger_blink(struct led_trigger *trigger,
- unsigned long *delay_on,
- unsigned long *delay_off) {}
+ unsigned long delay_on,
+ unsigned long delay_off) {}
static inline void led_trigger_blink_oneshot(struct led_trigger *trigger,
- unsigned long *delay_on,
- unsigned long *delay_off,
+ unsigned long delay_on,
+ unsigned long delay_off,
int invert) {}
static inline void led_trigger_set_default(struct led_classdev *led_cdev) {}
static inline int led_trigger_set(struct led_classdev *led_cdev,
@@ -456,8 +590,34 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
return NULL;
}
+static inline enum led_brightness
+led_trigger_get_brightness(const struct led_trigger *trigger)
+{
+ return LED_OFF;
+}
+
#endif /* CONFIG_LEDS_TRIGGERS */
+/* Trigger specific enum */
+enum led_trigger_netdev_modes {
+ TRIGGER_NETDEV_LINK = 0,
+ TRIGGER_NETDEV_LINK_10,
+ TRIGGER_NETDEV_LINK_100,
+ TRIGGER_NETDEV_LINK_1000,
+ TRIGGER_NETDEV_LINK_2500,
+ TRIGGER_NETDEV_LINK_5000,
+ TRIGGER_NETDEV_LINK_10000,
+ TRIGGER_NETDEV_HALF_DUPLEX,
+ TRIGGER_NETDEV_FULL_DUPLEX,
+ TRIGGER_NETDEV_TX,
+ TRIGGER_NETDEV_RX,
+ TRIGGER_NETDEV_TX_ERR,
+ TRIGGER_NETDEV_RX_ERR,
+
+ /* Keep last */
+ __TRIGGER_NETDEV_MAX,
+};
+
/* Trigger specific functions */
#ifdef CONFIG_LEDS_TRIGGER_DISK
void ledtrig_disk_activity(bool write);
@@ -479,6 +639,12 @@ static inline void ledtrig_flash_ctrl(bool on) {}
static inline void ledtrig_torch_ctrl(bool on) {}
#endif
+#if IS_REACHABLE(CONFIG_LEDS_TRIGGER_BACKLIGHT)
+void ledtrig_backlight_blank(bool blank);
+#else
+static inline void ledtrig_backlight_blank(bool blank) {}
+#endif
+
/*
* Generic LED platform data for describing LED names and default triggers.
*/
@@ -502,7 +668,6 @@ struct led_properties {
const char *label;
};
-struct gpio_desc;
typedef int (*gpio_blink_set_t)(struct gpio_desc *desc, int state,
unsigned long *delay_on,
unsigned long *delay_off);
@@ -520,9 +685,9 @@ struct gpio_led {
/* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */
struct gpio_desc *gpiod;
};
-#define LEDS_GPIO_DEFSTATE_OFF 0
-#define LEDS_GPIO_DEFSTATE_ON 1
-#define LEDS_GPIO_DEFSTATE_KEEP 2
+#define LEDS_GPIO_DEFSTATE_OFF LEDS_DEFSTATE_OFF
+#define LEDS_GPIO_DEFSTATE_ON LEDS_DEFSTATE_ON
+#define LEDS_GPIO_DEFSTATE_KEEP LEDS_DEFSTATE_KEEP
struct gpio_led_platform_data {
int num_leds;
@@ -534,7 +699,7 @@ struct gpio_led_platform_data {
gpio_blink_set_t gpio_blink_set;
};
-#ifdef CONFIG_NEW_LEDS
+#ifdef CONFIG_LEDS_GPIO_REGISTER
struct platform_device *gpio_led_register_device(
int id, const struct gpio_led_platform_data *pdata);
#else
@@ -585,18 +750,4 @@ enum led_audio {
NUM_AUDIO_LEDS
};
-#if IS_ENABLED(CONFIG_LEDS_TRIGGER_AUDIO)
-enum led_brightness ledtrig_audio_get(enum led_audio type);
-void ledtrig_audio_set(enum led_audio type, enum led_brightness state);
-#else
-static inline enum led_brightness ledtrig_audio_get(enum led_audio type)
-{
- return LED_OFF;
-}
-static inline void ledtrig_audio_set(enum led_audio type,
- enum led_brightness state)
-{
-}
-#endif
-
#endif /* __LINUX_LEDS_H_INCLUDED */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 5f550eb27f81..39534fafa36a 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -39,70 +39,92 @@
* compile-time options: to be removed as soon as all the drivers are
* converted to the new debugging mechanism
*/
-#undef ATA_DEBUG /* debugging output */
-#undef ATA_VERBOSE_DEBUG /* yet more debugging output */
#undef ATA_IRQ_TRAP /* define to ack screaming irqs */
-#undef ATA_NDEBUG /* define to disable quick runtime checks */
+/* defines only for the constants which don't work well as enums */
+#define ATA_TAG_POISON 0xfafbfcfdU
-/* note: prints function name for you */
-#ifdef ATA_DEBUG
-#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
-#ifdef ATA_VERBOSE_DEBUG
-#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
-#else
-#define VPRINTK(fmt, args...)
-#endif /* ATA_VERBOSE_DEBUG */
-#else
-#define DPRINTK(fmt, args...)
-#define VPRINTK(fmt, args...)
-#endif /* ATA_DEBUG */
-
-#define ata_print_version_once(dev, version) \
-({ \
- static bool __print_once; \
- \
- if (!__print_once) { \
- __print_once = true; \
- ata_print_version(dev, version); \
- } \
-})
-
-/* NEW: debug levels */
-#define HAVE_LIBATA_MSG 1
+/*
+ * Quirk flags bits.
+ * ata_device->quirks is an unsigned int, so __ATA_QUIRK_MAX must not exceed 32.
+ */
+enum ata_quirks {
+ __ATA_QUIRK_DIAGNOSTIC, /* Failed boot diag */
+ __ATA_QUIRK_NODMA, /* DMA problems */
+ __ATA_QUIRK_NONCQ, /* Don't use NCQ */
+ __ATA_QUIRK_MAX_SEC_128, /* Limit max sects to 128 */
+ __ATA_QUIRK_BROKEN_HPA, /* Broken HPA */
+ __ATA_QUIRK_DISABLE, /* Disable it */
+ __ATA_QUIRK_HPA_SIZE, /* Native size off by one */
+ __ATA_QUIRK_IVB, /* cbl det validity bit bugs */
+ __ATA_QUIRK_STUCK_ERR, /* Stuck ERR on next PACKET */
+ __ATA_QUIRK_BRIDGE_OK, /* No bridge limits */
+ __ATA_QUIRK_ATAPI_MOD16_DMA, /* Use ATAPI DMA for commands that */
+ /* are not a multiple of 16 bytes */
+ __ATA_QUIRK_FIRMWARE_WARN, /* Firmware update warning */
+ __ATA_QUIRK_1_5_GBPS, /* Force 1.5 Gbps */
+ __ATA_QUIRK_NOSETXFER, /* Skip SETXFER, SATA only */
+ __ATA_QUIRK_BROKEN_FPDMA_AA, /* Skip AA */
+ __ATA_QUIRK_DUMP_ID, /* Dump IDENTIFY data */
+ __ATA_QUIRK_MAX_SEC_LBA48, /* Set max sects to 65535 */
+ __ATA_QUIRK_ATAPI_DMADIR, /* Device requires dmadir */
+ __ATA_QUIRK_NO_NCQ_TRIM, /* Do not use queued TRIM */
+ __ATA_QUIRK_NOLPM, /* Do not use LPM */
+ __ATA_QUIRK_WD_BROKEN_LPM, /* Some WDs have broken LPM */
+ __ATA_QUIRK_ZERO_AFTER_TRIM, /* Guarantees zero after trim */
+ __ATA_QUIRK_NO_DMA_LOG, /* Do not use DMA for log read */
+ __ATA_QUIRK_NOTRIM, /* Do not use TRIM */
+ __ATA_QUIRK_MAX_SEC_1024, /* Limit max sects to 1024 */
+ __ATA_QUIRK_MAX_SEC_8191, /* Limit max sects to 8191 */
+ __ATA_QUIRK_MAX_TRIM_128M, /* Limit max trim size to 128M */
+ __ATA_QUIRK_NO_NCQ_ON_ATI, /* Disable NCQ on ATI chipset */
+ __ATA_QUIRK_NO_LPM_ON_ATI, /* Disable LPM on ATI chipset */
+ __ATA_QUIRK_NO_ID_DEV_LOG, /* Identify device log missing */
+ __ATA_QUIRK_NO_LOG_DIR, /* Do not read log directory */
+ __ATA_QUIRK_NO_FUA, /* Do not use FUA */
+
+ __ATA_QUIRK_MAX,
+};
+/*
+ * Quirk flags: may be set by libata or controller drivers on drives.
+ * Some quirks may be drive/controller pair dependent.
+ */
enum {
- ATA_MSG_DRV = 0x0001,
- ATA_MSG_INFO = 0x0002,
- ATA_MSG_PROBE = 0x0004,
- ATA_MSG_WARN = 0x0008,
- ATA_MSG_MALLOC = 0x0010,
- ATA_MSG_CTL = 0x0020,
- ATA_MSG_INTR = 0x0040,
- ATA_MSG_ERR = 0x0080,
+ ATA_QUIRK_DIAGNOSTIC = (1U << __ATA_QUIRK_DIAGNOSTIC),
+ ATA_QUIRK_NODMA = (1U << __ATA_QUIRK_NODMA),
+ ATA_QUIRK_NONCQ = (1U << __ATA_QUIRK_NONCQ),
+ ATA_QUIRK_MAX_SEC_128 = (1U << __ATA_QUIRK_MAX_SEC_128),
+ ATA_QUIRK_BROKEN_HPA = (1U << __ATA_QUIRK_BROKEN_HPA),
+ ATA_QUIRK_DISABLE = (1U << __ATA_QUIRK_DISABLE),
+ ATA_QUIRK_HPA_SIZE = (1U << __ATA_QUIRK_HPA_SIZE),
+ ATA_QUIRK_IVB = (1U << __ATA_QUIRK_IVB),
+ ATA_QUIRK_STUCK_ERR = (1U << __ATA_QUIRK_STUCK_ERR),
+ ATA_QUIRK_BRIDGE_OK = (1U << __ATA_QUIRK_BRIDGE_OK),
+ ATA_QUIRK_ATAPI_MOD16_DMA = (1U << __ATA_QUIRK_ATAPI_MOD16_DMA),
+ ATA_QUIRK_FIRMWARE_WARN = (1U << __ATA_QUIRK_FIRMWARE_WARN),
+ ATA_QUIRK_1_5_GBPS = (1U << __ATA_QUIRK_1_5_GBPS),
+ ATA_QUIRK_NOSETXFER = (1U << __ATA_QUIRK_NOSETXFER),
+ ATA_QUIRK_BROKEN_FPDMA_AA = (1U << __ATA_QUIRK_BROKEN_FPDMA_AA),
+ ATA_QUIRK_DUMP_ID = (1U << __ATA_QUIRK_DUMP_ID),
+ ATA_QUIRK_MAX_SEC_LBA48 = (1U << __ATA_QUIRK_MAX_SEC_LBA48),
+ ATA_QUIRK_ATAPI_DMADIR = (1U << __ATA_QUIRK_ATAPI_DMADIR),
+ ATA_QUIRK_NO_NCQ_TRIM = (1U << __ATA_QUIRK_NO_NCQ_TRIM),
+ ATA_QUIRK_NOLPM = (1U << __ATA_QUIRK_NOLPM),
+ ATA_QUIRK_WD_BROKEN_LPM = (1U << __ATA_QUIRK_WD_BROKEN_LPM),
+ ATA_QUIRK_ZERO_AFTER_TRIM = (1U << __ATA_QUIRK_ZERO_AFTER_TRIM),
+ ATA_QUIRK_NO_DMA_LOG = (1U << __ATA_QUIRK_NO_DMA_LOG),
+ ATA_QUIRK_NOTRIM = (1U << __ATA_QUIRK_NOTRIM),
+ ATA_QUIRK_MAX_SEC_1024 = (1U << __ATA_QUIRK_MAX_SEC_1024),
+ ATA_QUIRK_MAX_SEC_8191 = (1U << __ATA_QUIRK_MAX_SEC_8191),
+ ATA_QUIRK_MAX_TRIM_128M = (1U << __ATA_QUIRK_MAX_TRIM_128M),
+ ATA_QUIRK_NO_NCQ_ON_ATI = (1U << __ATA_QUIRK_NO_NCQ_ON_ATI),
+ ATA_QUIRK_NO_LPM_ON_ATI = (1U << __ATA_QUIRK_NO_LPM_ON_ATI),
+ ATA_QUIRK_NO_ID_DEV_LOG = (1U << __ATA_QUIRK_NO_ID_DEV_LOG),
+ ATA_QUIRK_NO_LOG_DIR = (1U << __ATA_QUIRK_NO_LOG_DIR),
+ ATA_QUIRK_NO_FUA = (1U << __ATA_QUIRK_NO_FUA),
};
-#define ata_msg_drv(p) ((p)->msg_enable & ATA_MSG_DRV)
-#define ata_msg_info(p) ((p)->msg_enable & ATA_MSG_INFO)
-#define ata_msg_probe(p) ((p)->msg_enable & ATA_MSG_PROBE)
-#define ata_msg_warn(p) ((p)->msg_enable & ATA_MSG_WARN)
-#define ata_msg_malloc(p) ((p)->msg_enable & ATA_MSG_MALLOC)
-#define ata_msg_ctl(p) ((p)->msg_enable & ATA_MSG_CTL)
-#define ata_msg_intr(p) ((p)->msg_enable & ATA_MSG_INTR)
-#define ata_msg_err(p) ((p)->msg_enable & ATA_MSG_ERR)
-
-static inline u32 ata_msg_init(int dval, int default_msg_enable_bits)
-{
- if (dval < 0 || dval >= (sizeof(u32) * 8))
- return default_msg_enable_bits; /* should be 0x1 - only driver info msgs */
- if (!dval)
- return 0;
- return (1 << dval) - 1;
-}
-
-/* defines only for the constants which don't work well as enums */
-#define ATA_TAG_POISON 0xfafbfcfdU
-
enum {
/* various global constants */
LIBATA_MAX_PRD = ATA_MAX_PRD / 2,
@@ -138,28 +160,35 @@ enum {
ATA_DFLAG_ACPI_FAILED = (1 << 6), /* ACPI on devcfg has failed */
ATA_DFLAG_AN = (1 << 7), /* AN configured */
ATA_DFLAG_TRUSTED = (1 << 8), /* device supports trusted send/recv */
+ ATA_DFLAG_FUA = (1 << 9), /* device supports FUA */
ATA_DFLAG_DMADIR = (1 << 10), /* device requires DMADIR */
- ATA_DFLAG_CFG_MASK = (1 << 12) - 1,
-
- ATA_DFLAG_PIO = (1 << 12), /* device limited to PIO mode */
- ATA_DFLAG_NCQ_OFF = (1 << 13), /* device limited to non-NCQ mode */
- ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */
- ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */
- ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */
- ATA_DFLAG_UNLOCK_HPA = (1 << 18), /* unlock HPA */
- ATA_DFLAG_NCQ_SEND_RECV = (1 << 19), /* device supports NCQ SEND and RECV */
- ATA_DFLAG_NCQ_PRIO = (1 << 20), /* device supports NCQ priority */
- ATA_DFLAG_NCQ_PRIO_ENABLE = (1 << 21), /* Priority cmds sent to dev */
- ATA_DFLAG_INIT_MASK = (1 << 24) - 1,
-
+ ATA_DFLAG_NCQ_SEND_RECV = (1 << 11), /* device supports NCQ SEND and RECV */
+ ATA_DFLAG_NCQ_PRIO = (1 << 12), /* device supports NCQ priority */
+ ATA_DFLAG_CDL = (1 << 13), /* supports cmd duration limits */
+ ATA_DFLAG_CFG_MASK = (1 << 14) - 1,
+
+ ATA_DFLAG_PIO = (1 << 14), /* device limited to PIO mode */
+ ATA_DFLAG_NCQ_OFF = (1 << 15), /* device limited to non-NCQ mode */
+ ATA_DFLAG_SLEEPING = (1 << 16), /* device is sleeping */
+ ATA_DFLAG_DUBIOUS_XFER = (1 << 17), /* data transfer not verified */
+ ATA_DFLAG_NO_UNLOAD = (1 << 18), /* device doesn't support unload */
+ ATA_DFLAG_UNLOCK_HPA = (1 << 19), /* unlock HPA */
+ ATA_DFLAG_INIT_MASK = (1 << 20) - 1,
+
+ ATA_DFLAG_NCQ_PRIO_ENABLED = (1 << 20), /* Priority cmds sent to dev */
+ ATA_DFLAG_CDL_ENABLED = (1 << 21), /* cmd duration limits is enabled */
+ ATA_DFLAG_RESUMING = (1 << 22), /* Device is resuming */
ATA_DFLAG_DETACH = (1 << 24),
ATA_DFLAG_DETACHED = (1 << 25),
-
ATA_DFLAG_DA = (1 << 26), /* device supports Device Attention */
ATA_DFLAG_DEVSLP = (1 << 27), /* device supports Device Sleep */
ATA_DFLAG_ACPI_DISABLED = (1 << 28), /* ACPI for the device is disabled */
ATA_DFLAG_D_SENSE = (1 << 29), /* Descriptor sense requested */
- ATA_DFLAG_ZAC = (1 << 30), /* ZAC device */
+
+ ATA_DFLAG_FEATURES_MASK = (ATA_DFLAG_TRUSTED | ATA_DFLAG_DA | \
+ ATA_DFLAG_DEVSLP | ATA_DFLAG_NCQ_SEND_RECV | \
+ ATA_DFLAG_NCQ_PRIO | ATA_DFLAG_FUA | \
+ ATA_DFLAG_CDL),
ATA_DEV_UNKNOWN = 0, /* unknown device */
ATA_DEV_ATA = 1, /* ATA device */
@@ -187,7 +216,7 @@ enum {
ATA_LFLAG_NO_LPM = (1 << 8), /* disable LPM on this link */
ATA_LFLAG_RST_ONCE = (1 << 9), /* limit recovery to one reset */
ATA_LFLAG_CHANGED = (1 << 10), /* LPM state changed on this link */
- ATA_LFLAG_NO_DB_DELAY = (1 << 11), /* no debounce delay on link resume */
+ ATA_LFLAG_NO_DEBOUNCE_DELAY = (1 << 11), /* no debounce delay on link resume */
/* struct ata_port flags */
ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
@@ -233,6 +262,7 @@ enum {
ATA_PFLAG_UNLOADING = (1 << 9), /* driver is being unloaded */
ATA_PFLAG_UNLOADED = (1 << 10), /* driver is unloaded */
+ ATA_PFLAG_RESUMING = (1 << 16), /* port is being resumed */
ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */
ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */
ATA_PFLAG_INIT_GTM_VALID = (1 << 19), /* initial gtm data valid */
@@ -244,15 +274,18 @@ enum {
/* struct ata_queued_cmd flags */
ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */
ATA_QCFLAG_DMAMAP = (1 << 1), /* SG table is DMA mapped */
+ ATA_QCFLAG_RTF_FILLED = (1 << 2), /* result TF has been filled */
ATA_QCFLAG_IO = (1 << 3), /* standard IO command */
ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */
ATA_QCFLAG_CLEAR_EXCL = (1 << 5), /* clear excl_link on completion */
ATA_QCFLAG_QUIET = (1 << 6), /* don't report device error */
ATA_QCFLAG_RETRY = (1 << 7), /* retry after failure */
+ ATA_QCFLAG_HAS_CDL = (1 << 8), /* qc has CDL a descriptor set */
- ATA_QCFLAG_FAILED = (1 << 16), /* cmd failed and is owned by EH */
+ ATA_QCFLAG_EH = (1 << 16), /* cmd aborted and owned by EH */
ATA_QCFLAG_SENSE_VALID = (1 << 17), /* sense data valid */
ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */
+ ATA_QCFLAG_EH_SUCCESS_CMD = (1 << 19), /* EH should fetch sense for this successful cmd */
/* host set flags */
ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host only */
@@ -260,11 +293,13 @@ enum {
ATA_HOST_PARALLEL_SCAN = (1 << 2), /* Ports on this host can be scanned in parallel */
ATA_HOST_IGNORE_ATA = (1 << 3), /* Ignore ATA devices on this host. */
+ ATA_HOST_NO_PART = (1 << 4), /* Host does not support partial */
+ ATA_HOST_NO_SSC = (1 << 5), /* Host does not support slumber */
+ ATA_HOST_NO_DEVSLP = (1 << 6), /* Host does not support devslp */
+
/* bits 24:31 of host->flags are reserved for LLD specific flags */
- /* various lengths of time */
- ATA_TMOUT_BOOT = 30000, /* heuristic */
- ATA_TMOUT_BOOT_QUICK = 7000, /* heuristic */
+ /* Various lengths of time */
ATA_TMOUT_INTERNAL_QUICK = 5000,
ATA_TMOUT_MAX_PARK = 30000,
@@ -293,7 +328,7 @@ enum {
* advised to wait only for the following duration before
* doing SRST.
*/
- ATA_TMOUT_PMP_SRST_WAIT = 5000,
+ ATA_TMOUT_PMP_SRST_WAIT = 10000,
/* When the LPM policy is set to ATA_LPM_MAX_POWER, there might
* be a spurious PHY event, so ignore the first PHY event that
@@ -319,7 +354,7 @@ enum {
PORT_DISABLED = 2,
/* encoding various smaller bitmaps into a single
- * unsigned long bitmap
+ * unsigned int bitmap
*/
ATA_NR_PIO_MODES = 7,
ATA_NR_MWDMA_MODES = 5,
@@ -351,8 +386,11 @@ enum {
ATA_EH_RESET = ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
ATA_EH_ENABLE_LINK = (1 << 3),
ATA_EH_PARK = (1 << 5), /* unload heads and stop I/O */
+ ATA_EH_GET_SUCCESS_SENSE = (1 << 6), /* Get sense data for successful cmd */
+ ATA_EH_SET_ACTIVE = (1 << 7), /* Set a device to active power mode */
- ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK,
+ ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK |
+ ATA_EH_GET_SUCCESS_SENSE | ATA_EH_SET_ACTIVE,
ATA_EH_ALL_ACTIONS = ATA_EH_REVALIDATE | ATA_EH_RESET |
ATA_EH_ENABLE_LINK,
@@ -367,6 +405,7 @@ enum {
ATA_EHI_PRINTINFO = (1 << 18), /* print configuration info */
ATA_EHI_SETMODE = (1 << 19), /* configure transfer mode */
ATA_EHI_POST_SETMODE = (1 << 20), /* revalidating after setmode */
+ ATA_EHI_DID_PRINT_QUIRKS = (1 << 21), /* already printed quirks info */
ATA_EHI_DID_RESET = ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET,
@@ -380,7 +419,6 @@ enum {
ATA_LINK_RESUME_TRIES = 5,
/* how hard are we gonna try to probe/recover devices */
- ATA_PROBE_MAX_TRIES = 3,
ATA_EH_DEV_TRIES = 3,
ATA_EH_PMP_TRIES = 5,
ATA_EH_PMP_LINK_TRIES = 3,
@@ -390,41 +428,9 @@ enum {
/* This should match the actual table size of
* ata_eh_cmd_timeout_table in libata-eh.c.
*/
- ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 6,
-
- /* Horkage types. May be set by libata or controller on drives
- (some horkage may be drive/controller pair dependent */
-
- ATA_HORKAGE_DIAGNOSTIC = (1 << 0), /* Failed boot diag */
- ATA_HORKAGE_NODMA = (1 << 1), /* DMA problems */
- ATA_HORKAGE_NONCQ = (1 << 2), /* Don't use NCQ */
- ATA_HORKAGE_MAX_SEC_128 = (1 << 3), /* Limit max sects to 128 */
- ATA_HORKAGE_BROKEN_HPA = (1 << 4), /* Broken HPA */
- ATA_HORKAGE_DISABLE = (1 << 5), /* Disable it */
- ATA_HORKAGE_HPA_SIZE = (1 << 6), /* native size off by one */
- ATA_HORKAGE_IVB = (1 << 8), /* cbl det validity bit bugs */
- ATA_HORKAGE_STUCK_ERR = (1 << 9), /* stuck ERR on next PACKET */
- ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */
- ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands
- not multiple of 16 bytes */
- ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firmware update warning */
- ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */
- ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */
- ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */
- ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */
- ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */
- ATA_HORKAGE_ATAPI_DMADIR = (1 << 18), /* device requires dmadir */
- ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19), /* don't use queued TRIM */
- ATA_HORKAGE_NOLPM = (1 << 20), /* don't use LPM */
- ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */
- ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */
- ATA_HORKAGE_NO_DMA_LOG = (1 << 23), /* don't use DMA for log read */
- ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */
- ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */
- ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */
-
- /* DMA mask for user DMA control: User visible values; DO NOT
- renumber */
+ ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 8,
+
+ /* User visible DMA mask for DMA control. DO NOT renumber. */
ATA_DMA_MASK_ATA = (1 << 0), /* DMA on ATA Disk */
ATA_DMA_MASK_ATAPI = (1 << 1), /* DMA on ATAPI */
ATA_DMA_MASK_CFA = (1 << 2), /* DMA on CF Card */
@@ -467,12 +473,9 @@ enum {
};
enum ata_xfer_mask {
- ATA_MASK_PIO = ((1LU << ATA_NR_PIO_MODES) - 1)
- << ATA_SHIFT_PIO,
- ATA_MASK_MWDMA = ((1LU << ATA_NR_MWDMA_MODES) - 1)
- << ATA_SHIFT_MWDMA,
- ATA_MASK_UDMA = ((1LU << ATA_NR_UDMA_MODES) - 1)
- << ATA_SHIFT_UDMA,
+ ATA_MASK_PIO = ((1U << ATA_NR_PIO_MODES) - 1) << ATA_SHIFT_PIO,
+ ATA_MASK_MWDMA = ((1U << ATA_NR_MWDMA_MODES) - 1) << ATA_SHIFT_MWDMA,
+ ATA_MASK_UDMA = ((1U << ATA_NR_UDMA_MODES) - 1) << ATA_SHIFT_UDMA,
};
enum hsm_task_states {
@@ -500,16 +503,28 @@ enum ata_completion_errors {
};
/*
- * Link power management policy: If you alter this, you also need to
- * alter libata-scsi.c (for the ascii descriptions)
+ * Link Power Management (LPM) policies.
+ *
+ * The default LPM policy to use for a device link is defined using these values
+ * with the CONFIG_SATA_MOBILE_LPM_POLICY config option and applied through the
+ * target_lpm_policy field of struct ata_port.
+ *
+ * If you alter this, you also need to alter the policy names used with the
+ * sysfs attribute link_power_management_policy defined in libata-sata.c.
*/
enum ata_lpm_policy {
+ /* Keep firmware settings */
ATA_LPM_UNKNOWN,
+ /* No power savings (maximum performance) */
ATA_LPM_MAX_POWER,
+ /* HIPM (Partial) */
ATA_LPM_MED_POWER,
- ATA_LPM_MED_POWER_WITH_DIPM, /* Med power + DIPM as win IRST does */
- ATA_LPM_MIN_POWER_WITH_PARTIAL, /* Min Power + partial and slumber */
- ATA_LPM_MIN_POWER, /* Min power + no partial (slumber only) */
+ /* HIPM (Partial) and DIPM (Partial and Slumber) */
+ ATA_LPM_MED_POWER_WITH_DIPM,
+ /* HIPM (Partial and DevSleep) and DIPM (Partial and Slumber) */
+ ATA_LPM_MIN_POWER_WITH_PARTIAL,
+ /* HIPM (Slumber and DevSleep) and DIPM (Partial and Slumber) */
+ ATA_LPM_MIN_POWER,
};
enum ata_lpm_hints {
@@ -534,7 +549,9 @@ typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes)
extern struct device_attribute dev_attr_unload_heads;
#ifdef CONFIG_SATA_HOST
+extern struct device_attribute dev_attr_link_power_management_supported;
extern struct device_attribute dev_attr_link_power_management_policy;
+extern struct device_attribute dev_attr_ncq_prio_supported;
extern struct device_attribute dev_attr_ncq_prio_enable;
extern struct device_attribute dev_attr_em_message_type;
extern struct device_attribute dev_attr_em_message;
@@ -559,7 +576,10 @@ struct ata_taskfile {
u8 hob_lbam;
u8 hob_lbah;
- u8 feature;
+ union {
+ u8 error;
+ u8 feature;
+ };
u8 nsect;
u8 lbal;
u8 lbam;
@@ -567,7 +587,10 @@ struct ata_taskfile {
u8 device;
- u8 command; /* IO operation */
+ union {
+ u8 status;
+ u8 command;
+ };
u32 auxiliary; /* auxiliary field */
/* from SATA 3.1 and */
@@ -670,10 +693,37 @@ struct ata_ering {
struct ata_ering_entry ring[ATA_ERING_SIZE];
};
+struct ata_cpr {
+ u8 num;
+ u8 num_storage_elements;
+ u64 start_lba;
+ u64 num_lbas;
+};
+
+struct ata_cpr_log {
+ u8 nr_cpr;
+ struct ata_cpr cpr[] __counted_by(nr_cpr);
+};
+
+struct ata_cdl {
+ /*
+ * Buffer to cache the CDL log page 18h (command duration descriptors)
+ * for SCSI-ATA translation.
+ */
+ u8 desc_log_buf[ATA_LOG_CDL_SIZE];
+
+ /*
+ * Buffer to handle reading the sense data for successful NCQ Commands
+ * log page for commands using a CDL with one of the limits policy set
+ * to 0xD (successful completion with sense data available bit set).
+ */
+ u8 ncq_sense_log_buf[ATA_LOG_SENSE_NCQ_SIZE];
+};
+
struct ata_device {
struct ata_link *link;
unsigned int devno; /* 0 or 1 */
- unsigned int horkage; /* List of broken features */
+ unsigned int quirks; /* List of broken features */
unsigned long flags; /* ATA_DFLAG_xxx */
struct scsi_device *sdev; /* attached SCSI device */
void *private_data;
@@ -702,9 +752,9 @@ struct ata_device {
unsigned int cdb_len;
/* per-dev xfer mask */
- unsigned long pio_mask;
- unsigned long mwdma_mask;
- unsigned long udma_mask;
+ unsigned int pio_mask;
+ unsigned int mwdma_mask;
+ unsigned int udma_mask;
/* for CHS addressing */
u16 cylinders; /* Number of cylinders */
@@ -716,6 +766,9 @@ struct ata_device {
u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */
} ____cacheline_aligned;
+ /* General Purpose Log Directory log page */
+ u8 gp_log_dir[ATA_SECT_SIZE] ____cacheline_aligned;
+
/* DEVSLP Timing Variables from Identify Device Data Log */
u8 devslp_timing[ATA_LOG_DEVSLP_SIZE];
@@ -729,10 +782,19 @@ struct ata_device {
u32 zac_zones_optimal_nonseq;
u32 zac_zones_max_open;
+ /* Concurrent positioning ranges */
+ struct ata_cpr_log *cpr_log;
+
+ /* Command Duration Limits support */
+ struct ata_cdl *cdl;
+
/* error history */
int spdn_cnt;
/* ering is CLEAR_END, read comment above CLEAR_END */
struct ata_ering ering;
+
+ /* For EH */
+ u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned;
};
/* Fields between ATA_DEVICE_CLEAR_BEGIN and ATA_DEVICE_CLEAR_END are
@@ -818,7 +880,6 @@ struct ata_port {
/* Flags that change dynamically, protected by ap->lock */
unsigned int pflags; /* ATA_PFLAG_xxx */
unsigned int print_id; /* user visible unique port ID */
- unsigned int local_port_no; /* host local port num */
unsigned int port_no; /* 0 based port no. inside the host */
#ifdef CONFIG_ATA_SFF
@@ -839,10 +900,8 @@ struct ata_port {
unsigned int cbl; /* cable type; ATA_CBL_xxx */
struct ata_queued_cmd qcmd[ATA_MAX_QUEUE + 1];
- unsigned long sas_tag_allocated; /* for sas tag allocation only */
u64 qc_active;
int nr_active_links; /* #links with active qcs */
- unsigned int sas_last_tag; /* track next tag hw expects */
struct ata_link link; /* host default link */
struct ata_link *slave_link; /* see ata_slave_link_init() */
@@ -858,11 +917,10 @@ struct ata_port {
struct mutex scsi_scan_mutex;
struct delayed_work hotplug_task;
- struct work_struct scsi_rescan_task;
+ struct delayed_work scsi_rescan_task;
unsigned int hsm_task_state;
- u32 msg_enable;
struct list_head eh_done_q;
wait_queue_head_t eh_wait_q;
int eh_tries;
@@ -872,7 +930,7 @@ struct ata_port {
enum ata_lpm_policy target_lpm_policy;
struct timer_list fastdrain_timer;
- unsigned long fastdrain_cnt;
+ unsigned int fastdrain_cnt;
async_cookie_t cookie;
@@ -882,8 +940,6 @@ struct ata_port {
#ifdef CONFIG_ATA_ACPI
struct ata_acpi_gtm __acpi_init_gtm; /* use ata_acpi_init_gtm() */
#endif
- /* owned by EH */
- u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned;
};
/* The following initializer overrides a method to NULL whether one of
@@ -893,6 +949,13 @@ struct ata_port {
*/
#define ATA_OP_NULL (void *)(unsigned long)(-ENOENT)
+struct ata_reset_operations {
+ ata_prereset_fn_t prereset;
+ ata_reset_fn_t softreset;
+ ata_reset_fn_t hardreset;
+ ata_postreset_fn_t postreset;
+};
+
struct ata_port_operations {
/*
* Command execution
@@ -901,30 +964,26 @@ struct ata_port_operations {
int (*check_atapi_dma)(struct ata_queued_cmd *qc);
enum ata_completion_errors (*qc_prep)(struct ata_queued_cmd *qc);
unsigned int (*qc_issue)(struct ata_queued_cmd *qc);
- bool (*qc_fill_rtf)(struct ata_queued_cmd *qc);
+ void (*qc_fill_rtf)(struct ata_queued_cmd *qc);
+ void (*qc_ncq_fill_rtf)(struct ata_port *ap, u64 done_mask);
/*
* Configuration and exception handling
*/
int (*cable_detect)(struct ata_port *ap);
- unsigned long (*mode_filter)(struct ata_device *dev, unsigned long xfer_mask);
+ unsigned int (*mode_filter)(struct ata_device *dev, unsigned int xfer_mask);
void (*set_piomode)(struct ata_port *ap, struct ata_device *dev);
void (*set_dmamode)(struct ata_port *ap, struct ata_device *dev);
int (*set_mode)(struct ata_link *link, struct ata_device **r_failed_dev);
- unsigned int (*read_id)(struct ata_device *dev, struct ata_taskfile *tf, u16 *id);
+ unsigned int (*read_id)(struct ata_device *dev, struct ata_taskfile *tf,
+ __le16 *id);
void (*dev_config)(struct ata_device *dev);
void (*freeze)(struct ata_port *ap);
void (*thaw)(struct ata_port *ap);
- ata_prereset_fn_t prereset;
- ata_reset_fn_t softreset;
- ata_reset_fn_t hardreset;
- ata_postreset_fn_t postreset;
- ata_prereset_fn_t pmp_prereset;
- ata_reset_fn_t pmp_softreset;
- ata_reset_fn_t pmp_hardreset;
- ata_postreset_fn_t pmp_postreset;
+ struct ata_reset_operations reset;
+ struct ata_reset_operations pmp_reset;
void (*error_handler)(struct ata_port *ap);
void (*lost_interrupt)(struct ata_port *ap);
void (*post_internal_cmd)(struct ata_queued_cmd *qc);
@@ -987,12 +1046,6 @@ struct ata_port_operations {
ssize_t size);
/*
- * Obsolete
- */
- void (*phy_reset)(struct ata_port *ap);
- void (*eng_timeout)(struct ata_port *ap);
-
- /*
* ->inherits must be the last field and all the preceding
* fields must be pointers.
*/
@@ -1002,9 +1055,9 @@ struct ata_port_operations {
struct ata_port_info {
unsigned long flags;
unsigned long link_flags;
- unsigned long pio_mask;
- unsigned long mwdma_mask;
- unsigned long udma_mask;
+ unsigned int pio_mask;
+ unsigned int mwdma_mask;
+ unsigned int udma_mask;
struct ata_port_operations *port_ops;
void *private_data;
};
@@ -1067,27 +1120,29 @@ static inline int ata_port_is_dummy(struct ata_port *ap)
return ap->ops == &ata_dummy_port_ops;
}
+static inline bool ata_port_is_frozen(const struct ata_port *ap)
+{
+ return ap->pflags & ATA_PFLAG_FROZEN;
+}
+
extern int ata_std_prereset(struct ata_link *link, unsigned long deadline);
extern int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
int (*check_ready)(struct ata_link *link));
-extern int sata_std_hardreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline);
extern void ata_std_postreset(struct ata_link *link, unsigned int *classes);
-extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports);
+extern struct ata_host *ata_host_alloc(struct device *dev, int n_ports);
extern struct ata_host *ata_host_alloc_pinfo(struct device *dev,
const struct ata_port_info * const * ppi, int n_ports);
extern void ata_host_get(struct ata_host *host);
extern void ata_host_put(struct ata_host *host);
extern int ata_host_start(struct ata_host *host);
extern int ata_host_register(struct ata_host *host,
- struct scsi_host_template *sht);
+ const struct scsi_host_template *sht);
extern int ata_host_activate(struct ata_host *host, int irq,
irq_handler_t irq_handler, unsigned long irq_flags,
- struct scsi_host_template *sht);
+ const struct scsi_host_template *sht);
extern void ata_host_detach(struct ata_host *host);
extern void ata_host_init(struct ata_host *, struct device *, struct ata_port_operations *);
-extern int ata_scsi_detect(struct scsi_host_template *sht);
extern int ata_scsi_ioctl(struct scsi_device *dev, unsigned int cmd,
void __user *arg);
#ifdef CONFIG_COMPAT
@@ -1106,7 +1161,7 @@ extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev,
extern bool ata_link_online(struct ata_link *link);
extern bool ata_link_offline(struct ata_link *link);
#ifdef CONFIG_PM
-extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg);
+extern void ata_host_suspend(struct ata_host *host, pm_message_t mesg);
extern void ata_host_resume(struct ata_host *host);
extern void ata_sas_port_suspend(struct ata_port *ap);
extern void ata_sas_port_resume(struct ata_port *ap);
@@ -1121,45 +1176,55 @@ static inline void ata_sas_port_resume(struct ata_port *ap)
extern int ata_ratelimit(void);
extern void ata_msleep(struct ata_port *ap, unsigned int msecs);
extern u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask,
- u32 val, unsigned long interval, unsigned long timeout);
+ u32 val, unsigned int interval, unsigned int timeout);
extern int atapi_cmd_type(u8 opcode);
-extern unsigned long ata_pack_xfermask(unsigned long pio_mask,
- unsigned long mwdma_mask, unsigned long udma_mask);
-extern void ata_unpack_xfermask(unsigned long xfer_mask,
- unsigned long *pio_mask, unsigned long *mwdma_mask,
- unsigned long *udma_mask);
-extern u8 ata_xfer_mask2mode(unsigned long xfer_mask);
-extern unsigned long ata_xfer_mode2mask(u8 xfer_mode);
-extern int ata_xfer_mode2shift(unsigned long xfer_mode);
-extern const char *ata_mode_string(unsigned long xfer_mask);
-extern unsigned long ata_id_xfermask(const u16 *id);
+extern unsigned int ata_pack_xfermask(unsigned int pio_mask,
+ unsigned int mwdma_mask,
+ unsigned int udma_mask);
+extern void ata_unpack_xfermask(unsigned int xfer_mask,
+ unsigned int *pio_mask,
+ unsigned int *mwdma_mask,
+ unsigned int *udma_mask);
+extern u8 ata_xfer_mask2mode(unsigned int xfer_mask);
+extern unsigned int ata_xfer_mode2mask(u8 xfer_mode);
+extern int ata_xfer_mode2shift(u8 xfer_mode);
+extern const char *ata_mode_string(unsigned int xfer_mask);
+extern unsigned int ata_id_xfermask(const u16 *id);
extern int ata_std_qc_defer(struct ata_queued_cmd *qc);
-extern enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc);
extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
unsigned int n_elem);
extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
+extern unsigned int ata_port_classify(struct ata_port *ap,
+ const struct ata_taskfile *tf);
extern void ata_dev_disable(struct ata_device *adev);
extern void ata_id_string(const u16 *id, unsigned char *s,
unsigned int ofs, unsigned int len);
extern void ata_id_c_string(const u16 *id, unsigned char *s,
unsigned int ofs, unsigned int len);
extern unsigned int ata_do_dev_read_id(struct ata_device *dev,
- struct ata_taskfile *tf, u16 *id);
+ struct ata_taskfile *tf, __le16 *id);
extern void ata_qc_complete(struct ata_queued_cmd *qc);
extern u64 ata_qc_get_active(struct ata_port *ap);
extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd);
extern int ata_std_bios_param(struct scsi_device *sdev,
- struct block_device *bdev,
+ struct gendisk *unused,
sector_t capacity, int geom[]);
extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev);
-extern int ata_scsi_slave_config(struct scsi_device *sdev);
-extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
+extern int ata_scsi_sdev_init(struct scsi_device *sdev);
+int ata_scsi_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim);
+extern void ata_scsi_sdev_destroy(struct scsi_device *sdev);
extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
int queue_depth);
-extern int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
- int queue_depth);
+extern int ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
+ int queue_depth);
+extern int ata_ncq_prio_supported(struct ata_port *ap, struct scsi_device *sdev,
+ bool *supported);
+extern int ata_ncq_prio_enabled(struct ata_port *ap, struct scsi_device *sdev,
+ bool *enabled);
+extern int ata_ncq_prio_enable(struct ata_port *ap, struct scsi_device *sdev,
+ bool enable);
extern struct ata_device *ata_dev_pair(struct ata_device *adev);
-extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
+int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_q);
@@ -1167,11 +1232,11 @@ extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *
* SATA specific code - drivers/ata/libata-sata.c
*/
#ifdef CONFIG_SATA_HOST
-extern const unsigned long sata_deb_timing_normal[];
-extern const unsigned long sata_deb_timing_hotplug[];
-extern const unsigned long sata_deb_timing_long[];
+extern const unsigned int sata_deb_timing_normal[];
+extern const unsigned int sata_deb_timing_hotplug[];
+extern const unsigned int sata_deb_timing_long[];
-static inline const unsigned long *
+static inline const unsigned int *
sata_ehc_deb_timing(struct ata_eh_context *ehc)
{
if (ehc->i.flags & ATA_EHI_HOTPLUGGED)
@@ -1185,14 +1250,16 @@ extern int sata_scr_read(struct ata_link *link, int reg, u32 *val);
extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val);
extern int sata_set_spd(struct ata_link *link);
+int sata_std_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
extern int sata_link_hardreset(struct ata_link *link,
- const unsigned long *timing, unsigned long deadline,
+ const unsigned int *timing, unsigned long deadline,
bool *online, int (*check_ready)(struct ata_link *));
-extern int sata_link_resume(struct ata_link *link, const unsigned long *params,
+extern int sata_link_resume(struct ata_link *link, const unsigned int *params,
unsigned long deadline);
extern void ata_eh_analyze_ncq_error(struct ata_link *link);
#else
-static inline const unsigned long *
+static inline const unsigned int *
sata_ehc_deb_timing(struct ata_eh_context *ehc)
{
return NULL;
@@ -1211,8 +1278,13 @@ static inline int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
return -EOPNOTSUPP;
}
static inline int sata_set_spd(struct ata_link *link) { return -EOPNOTSUPP; }
+static inline int sata_std_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ return -EOPNOTSUPP;
+}
static inline int sata_link_hardreset(struct ata_link *link,
- const unsigned long *timing,
+ const unsigned int *timing,
unsigned long deadline,
bool *online,
int (*check_ready)(struct ata_link *))
@@ -1222,7 +1294,7 @@ static inline int sata_link_hardreset(struct ata_link *link,
return -EOPNOTSUPP;
}
static inline int sata_link_resume(struct ata_link *link,
- const unsigned long *params,
+ const unsigned int *params,
unsigned long deadline)
{
return -EOPNOTSUPP;
@@ -1230,21 +1302,17 @@ static inline int sata_link_resume(struct ata_link *link,
static inline void ata_eh_analyze_ncq_error(struct ata_link *link) { }
#endif
extern int sata_link_debounce(struct ata_link *link,
- const unsigned long *params, unsigned long deadline);
+ const unsigned int *params, unsigned long deadline);
extern int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
bool spm_wakeup);
extern int ata_slave_link_init(struct ata_port *ap);
-extern void ata_sas_port_destroy(struct ata_port *);
-extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
- struct ata_port_info *, struct Scsi_Host *);
-extern void ata_sas_async_probe(struct ata_port *ap);
-extern int ata_sas_sync_probe(struct ata_port *ap);
-extern int ata_sas_port_init(struct ata_port *);
-extern int ata_sas_port_start(struct ata_port *ap);
-extern int ata_sas_tport_add(struct device *parent, struct ata_port *ap);
-extern void ata_sas_tport_delete(struct ata_port *ap);
-extern void ata_sas_port_stop(struct ata_port *ap);
-extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
+extern void ata_port_probe(struct ata_port *ap);
+extern struct ata_port *ata_port_alloc(struct ata_host *host);
+extern void ata_port_free(struct ata_port *ap);
+extern int ata_tport_add(struct device *parent, struct ata_port *ap);
+extern void ata_tport_delete(struct ata_port *ap);
+int ata_sas_sdev_configure(struct scsi_device *sdev, struct queue_limits *lim,
+ struct ata_port *ap);
extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap);
extern void ata_tf_to_fis(const struct ata_taskfile *tf,
u8 pmp, int is_cmd, u8 *fis);
@@ -1288,7 +1356,7 @@ extern int ata_pci_device_resume(struct pci_dev *pdev);
struct platform_device;
-extern int ata_platform_remove_one(struct platform_device *pdev);
+extern void ata_platform_remove_one(struct platform_device *pdev);
/*
* ACPI - drivers/ata/libata-acpi.c
@@ -1302,9 +1370,9 @@ static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap)
}
int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm);
int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *stm);
-unsigned long ata_acpi_gtm_xfermask(struct ata_device *dev,
- const struct ata_acpi_gtm *gtm);
-int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm);
+unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev,
+ const struct ata_acpi_gtm *gtm);
+int ata_acpi_cbl_pata_type(struct ata_port *ap);
#else
static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap)
{
@@ -1329,10 +1397,9 @@ static inline unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev,
return 0;
}
-static inline int ata_acpi_cbl_80wire(struct ata_port *ap,
- const struct ata_acpi_gtm *gtm)
+static inline int ata_acpi_cbl_pata_type(struct ata_port *ap)
{
- return 0;
+ return ATA_CBL_PATA40;
}
#endif
@@ -1351,9 +1418,6 @@ extern void ata_eh_thaw_port(struct ata_port *ap);
extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
-extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
- ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
- ata_postreset_fn_t postreset);
extern void ata_std_error_handler(struct ata_port *ap);
extern void ata_std_sched_eh(struct ata_port *ap);
extern void ata_std_end_eh(struct ata_port *ap);
@@ -1382,7 +1446,7 @@ extern int ata_link_nr_enabled(struct ata_link *link);
*/
extern const struct ata_port_operations ata_base_port_ops;
extern const struct ata_port_operations sata_port_ops;
-extern struct device_attribute *ata_common_sdev_attrs[];
+extern const struct attribute_group *ata_common_sdev_groups[];
/*
* All sht initializers (BASE, PIO, BMDMA, NCQ) must be instantiated
@@ -1397,26 +1461,42 @@ extern struct device_attribute *ata_common_sdev_attrs[];
ATA_SCSI_COMPAT_IOCTL \
.queuecommand = ata_scsi_queuecmd, \
.dma_need_drain = ata_scsi_dma_need_drain, \
- .can_queue = ATA_DEF_QUEUE, \
- .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
.this_id = ATA_SHT_THIS_ID, \
.emulated = ATA_SHT_EMULATED, \
.proc_name = drv_name, \
- .slave_configure = ata_scsi_slave_config, \
- .slave_destroy = ata_scsi_slave_destroy, \
+ .sdev_init = ata_scsi_sdev_init, \
+ .sdev_destroy = ata_scsi_sdev_destroy, \
.bios_param = ata_std_bios_param, \
- .unlock_native_capacity = ata_scsi_unlock_native_capacity
+ .unlock_native_capacity = ata_scsi_unlock_native_capacity,\
+ .max_sectors = ATA_MAX_SECTORS_LBA48
-#define ATA_BASE_SHT(drv_name) \
+#define ATA_SUBBASE_SHT(drv_name) \
__ATA_BASE_SHT(drv_name), \
- .sdev_attrs = ata_common_sdev_attrs
+ .can_queue = ATA_DEF_QUEUE, \
+ .tag_alloc_policy_rr = true, \
+ .sdev_configure = ata_scsi_sdev_configure
+
+#define ATA_SUBBASE_SHT_QD(drv_name, drv_qd) \
+ __ATA_BASE_SHT(drv_name), \
+ .can_queue = drv_qd, \
+ .tag_alloc_policy_rr = true, \
+ .sdev_configure = ata_scsi_sdev_configure
+
+#define ATA_BASE_SHT(drv_name) \
+ ATA_SUBBASE_SHT(drv_name), \
+ .sdev_groups = ata_common_sdev_groups
#ifdef CONFIG_SATA_HOST
-extern struct device_attribute *ata_ncq_sdev_attrs[];
+extern const struct attribute_group *ata_ncq_sdev_groups[];
#define ATA_NCQ_SHT(drv_name) \
- __ATA_BASE_SHT(drv_name), \
- .sdev_attrs = ata_ncq_sdev_attrs, \
+ ATA_SUBBASE_SHT(drv_name), \
+ .sdev_groups = ata_ncq_sdev_groups, \
+ .change_queue_depth = ata_scsi_change_queue_depth
+
+#define ATA_NCQ_SHT_QD(drv_name, drv_qd) \
+ ATA_SUBBASE_SHT_QD(drv_name, drv_qd), \
+ .sdev_groups = ata_ncq_sdev_groups, \
.change_queue_depth = ata_scsi_change_queue_depth
#endif
@@ -1451,7 +1531,7 @@ static inline bool sata_pmp_attached(struct ata_port *ap)
static inline bool ata_is_host_link(const struct ata_link *link)
{
- return 1;
+ return true;
}
#endif /* CONFIG_SATA_PMP */
@@ -1462,53 +1542,73 @@ static inline int sata_srst_pmp(struct ata_link *link)
return link->pmp;
}
-/*
- * printk helpers
- */
-__printf(3, 4)
-void ata_port_printk(const struct ata_port *ap, const char *level,
- const char *fmt, ...);
-__printf(3, 4)
-void ata_link_printk(const struct ata_link *link, const char *level,
- const char *fmt, ...);
-__printf(3, 4)
-void ata_dev_printk(const struct ata_device *dev, const char *level,
- const char *fmt, ...);
+#define ata_port_printk(level, ap, fmt, ...) \
+ pr_ ## level ("ata%u: " fmt, (ap)->print_id, ##__VA_ARGS__)
#define ata_port_err(ap, fmt, ...) \
- ata_port_printk(ap, KERN_ERR, fmt, ##__VA_ARGS__)
+ ata_port_printk(err, ap, fmt, ##__VA_ARGS__)
#define ata_port_warn(ap, fmt, ...) \
- ata_port_printk(ap, KERN_WARNING, fmt, ##__VA_ARGS__)
+ ata_port_printk(warn, ap, fmt, ##__VA_ARGS__)
#define ata_port_notice(ap, fmt, ...) \
- ata_port_printk(ap, KERN_NOTICE, fmt, ##__VA_ARGS__)
+ ata_port_printk(notice, ap, fmt, ##__VA_ARGS__)
#define ata_port_info(ap, fmt, ...) \
- ata_port_printk(ap, KERN_INFO, fmt, ##__VA_ARGS__)
+ ata_port_printk(info, ap, fmt, ##__VA_ARGS__)
#define ata_port_dbg(ap, fmt, ...) \
- ata_port_printk(ap, KERN_DEBUG, fmt, ##__VA_ARGS__)
+ ata_port_printk(debug, ap, fmt, ##__VA_ARGS__)
+
+#define ata_link_printk(level, link, fmt, ...) \
+do { \
+ if (sata_pmp_attached((link)->ap) || \
+ (link)->ap->slave_link) \
+ pr_ ## level ("ata%u.%02u: " fmt, \
+ (link)->ap->print_id, \
+ (link)->pmp, \
+ ##__VA_ARGS__); \
+ else \
+ pr_ ## level ("ata%u: " fmt, \
+ (link)->ap->print_id, \
+ ##__VA_ARGS__); \
+} while (0)
#define ata_link_err(link, fmt, ...) \
- ata_link_printk(link, KERN_ERR, fmt, ##__VA_ARGS__)
+ ata_link_printk(err, link, fmt, ##__VA_ARGS__)
#define ata_link_warn(link, fmt, ...) \
- ata_link_printk(link, KERN_WARNING, fmt, ##__VA_ARGS__)
+ ata_link_printk(warn, link, fmt, ##__VA_ARGS__)
#define ata_link_notice(link, fmt, ...) \
- ata_link_printk(link, KERN_NOTICE, fmt, ##__VA_ARGS__)
+ ata_link_printk(notice, link, fmt, ##__VA_ARGS__)
#define ata_link_info(link, fmt, ...) \
- ata_link_printk(link, KERN_INFO, fmt, ##__VA_ARGS__)
+ ata_link_printk(info, link, fmt, ##__VA_ARGS__)
#define ata_link_dbg(link, fmt, ...) \
- ata_link_printk(link, KERN_DEBUG, fmt, ##__VA_ARGS__)
+ ata_link_printk(debug, link, fmt, ##__VA_ARGS__)
+
+#define ata_dev_printk(level, dev, fmt, ...) \
+ pr_ ## level("ata%u.%02u: " fmt, \
+ (dev)->link->ap->print_id, \
+ (dev)->link->pmp + (dev)->devno, \
+ ##__VA_ARGS__)
#define ata_dev_err(dev, fmt, ...) \
- ata_dev_printk(dev, KERN_ERR, fmt, ##__VA_ARGS__)
+ ata_dev_printk(err, dev, fmt, ##__VA_ARGS__)
#define ata_dev_warn(dev, fmt, ...) \
- ata_dev_printk(dev, KERN_WARNING, fmt, ##__VA_ARGS__)
+ ata_dev_printk(warn, dev, fmt, ##__VA_ARGS__)
#define ata_dev_notice(dev, fmt, ...) \
- ata_dev_printk(dev, KERN_NOTICE, fmt, ##__VA_ARGS__)
+ ata_dev_printk(notice, dev, fmt, ##__VA_ARGS__)
#define ata_dev_info(dev, fmt, ...) \
- ata_dev_printk(dev, KERN_INFO, fmt, ##__VA_ARGS__)
+ ata_dev_printk(info, dev, fmt, ##__VA_ARGS__)
#define ata_dev_dbg(dev, fmt, ...) \
- ata_dev_printk(dev, KERN_DEBUG, fmt, ##__VA_ARGS__)
+ ata_dev_printk(debug, dev, fmt, ##__VA_ARGS__)
-void ata_print_version(const struct device *dev, const char *version);
+#define ata_dev_warn_once(dev, fmt, ...) \
+ pr_warn_once("ata%u.%02u: " fmt, \
+ (dev)->link->ap->print_id, \
+ (dev)->link->pmp + (dev)->devno, \
+ ##__VA_ARGS__)
+
+static inline void ata_print_version_once(const struct device *dev,
+ const char *version)
+{
+ dev_dbg_once(dev, "version %s\n", version);
+}
/*
* ata_eh_info helpers
@@ -1536,6 +1636,13 @@ void ata_port_desc(struct ata_port *ap, const char *fmt, ...);
extern void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
const char *name);
#endif
+static inline void ata_port_desc_misc(struct ata_port *ap, int irq)
+{
+ ata_port_desc(ap, "irq %d", irq);
+ ata_port_desc(ap, "lpm-pol %d", ap->target_lpm_policy);
+ if (ap->pflags & ATA_PFLAG_EXTERNAL)
+ ata_port_desc(ap, "ext");
+}
static inline bool ata_tag_internal(unsigned int tag)
{
@@ -1680,21 +1787,35 @@ extern struct ata_device *ata_dev_next(struct ata_device *dev,
(dev) = ata_dev_next((dev), (link), ATA_DITER_##mode))
/**
- * ata_ncq_enabled - Test whether NCQ is enabled
- * @dev: ATA device to test for
+ * ata_ncq_supported - Test whether NCQ is supported
+ * @dev: ATA device to test
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
- * 1 if NCQ is enabled for @dev, 0 otherwise.
+ * true if @dev supports NCQ, false otherwise.
*/
-static inline int ata_ncq_enabled(struct ata_device *dev)
+static inline bool ata_ncq_supported(struct ata_device *dev)
{
if (!IS_ENABLED(CONFIG_SATA_HOST))
- return 0;
- return (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF |
- ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ;
+ return false;
+ return (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ;
+}
+
+/**
+ * ata_ncq_enabled - Test whether NCQ is enabled
+ * @dev: ATA device to test
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * true if NCQ is enabled for @dev, false otherwise.
+ */
+static inline bool ata_ncq_enabled(struct ata_device *dev)
+{
+ return ata_ncq_supported(dev) && !(dev->flags & ATA_DFLAG_NCQ_OFF);
}
static inline bool ata_fpdma_dsm_supported(struct ata_device *dev)
@@ -1742,11 +1863,11 @@ static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap,
{
struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
- if (unlikely(!qc) || !ap->ops->error_handler)
+ if (unlikely(!qc))
return qc;
if ((qc->flags & (ATA_QCFLAG_ACTIVE |
- ATA_QCFLAG_FAILED)) == ATA_QCFLAG_ACTIVE)
+ ATA_QCFLAG_EH)) == ATA_QCFLAG_ACTIVE)
return qc;
return NULL;
@@ -1833,7 +1954,7 @@ static inline int ata_check_ready(u8 status)
}
static inline unsigned long ata_deadline(unsigned long from_jiffies,
- unsigned long timeout_msecs)
+ unsigned int timeout_msecs)
{
return from_jiffies + msecs_to_jiffies(timeout_msecs);
}
@@ -1842,23 +1963,21 @@ static inline unsigned long ata_deadline(unsigned long from_jiffies,
change in future hardware and specs, secondly 0xFF means 'no DMA' but is
> UDMA_0. Dyma ddreigiau */
-static inline int ata_using_mwdma(struct ata_device *adev)
+static inline bool ata_using_mwdma(struct ata_device *adev)
{
- if (adev->dma_mode >= XFER_MW_DMA_0 && adev->dma_mode <= XFER_MW_DMA_4)
- return 1;
- return 0;
+ return adev->dma_mode >= XFER_MW_DMA_0 &&
+ adev->dma_mode <= XFER_MW_DMA_4;
}
-static inline int ata_using_udma(struct ata_device *adev)
+static inline bool ata_using_udma(struct ata_device *adev)
{
- if (adev->dma_mode >= XFER_UDMA_0 && adev->dma_mode <= XFER_UDMA_7)
- return 1;
- return 0;
+ return adev->dma_mode >= XFER_UDMA_0 &&
+ adev->dma_mode <= XFER_UDMA_7;
}
-static inline int ata_dma_enabled(struct ata_device *adev)
+static inline bool ata_dma_enabled(struct ata_device *adev)
{
- return (adev->dma_mode == 0xFF ? 0 : 1);
+ return adev->dma_mode != 0xFF;
}
/**************************************************************************
@@ -1908,8 +2027,6 @@ extern void ata_sff_dev_select(struct ata_port *ap, unsigned int device);
extern u8 ata_sff_check_status(struct ata_port *ap);
extern void ata_sff_pause(struct ata_port *ap);
extern void ata_sff_dma_pause(struct ata_port *ap);
-extern int ata_sff_busy_sleep(struct ata_port *ap,
- unsigned long timeout_pat, unsigned long timeout);
extern int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline);
extern void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
extern void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
@@ -1920,7 +2037,6 @@ extern unsigned int ata_sff_data_xfer(struct ata_queued_cmd *qc,
extern unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc,
unsigned char *buf, unsigned int buflen, int rw);
extern void ata_sff_irq_on(struct ata_port *ap);
-extern void ata_sff_irq_clear(struct ata_port *ap);
extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
u8 status, int in_wq);
extern void ata_sff_queue_work(struct work_struct *work);
@@ -1928,7 +2044,7 @@ extern void ata_sff_queue_delayed_work(struct delayed_work *dwork,
unsigned long delay);
extern void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay);
extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc);
-extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc);
+extern void ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc);
extern unsigned int ata_sff_port_intr(struct ata_port *ap,
struct ata_queued_cmd *qc);
extern irqreturn_t ata_sff_interrupt(int irq, void *dev_instance);
@@ -1955,10 +2071,10 @@ extern int ata_pci_sff_prepare_host(struct pci_dev *pdev,
struct ata_host **r_host);
extern int ata_pci_sff_activate_host(struct ata_host *host,
irq_handler_t irq_handler,
- struct scsi_host_template *sht);
+ const struct scsi_host_template *sht);
extern int ata_pci_sff_init_one(struct pci_dev *pdev,
const struct ata_port_info * const * ppi,
- struct scsi_host_template *sht, void *host_priv, int hflags);
+ const struct scsi_host_template *sht, void *host_priv, int hflags);
#endif /* CONFIG_PCI */
#ifdef CONFIG_ATA_BMDMA
@@ -1994,7 +2110,7 @@ extern int ata_pci_bmdma_prepare_host(struct pci_dev *pdev,
struct ata_host **r_host);
extern int ata_pci_bmdma_init_one(struct pci_dev *pdev,
const struct ata_port_info * const * ppi,
- struct scsi_host_template *sht,
+ const struct scsi_host_template *sht,
void *host_priv, int hflags);
#endif /* CONFIG_PCI */
#endif /* CONFIG_ATA_BMDMA */
@@ -2040,14 +2156,17 @@ static inline u8 ata_wait_idle(struct ata_port *ap)
{
u8 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
-#ifdef ATA_DEBUG
if (status != 0xff && (status & (ATA_BUSY | ATA_DRQ)))
- ata_port_printk(ap, KERN_DEBUG, "abnormal Status 0x%X\n",
- status);
-#endif
+ ata_port_dbg(ap, "abnormal Status 0x%X\n", status);
return status;
}
+#else /* CONFIG_ATA_SFF */
+static inline int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_ATA_SFF */
#endif /* __LINUX_LIBATA_H__ */
diff --git a/include/linux/libgcc.h b/include/linux/libgcc.h
index b8dc75f0c830..0d68f9d6a6a7 100644
--- a/include/linux/libgcc.h
+++ b/include/linux/libgcc.h
@@ -27,4 +27,15 @@ typedef union {
long long ll;
} DWunion;
+long long notrace __ashldi3(long long u, word_type b);
+long long notrace __ashrdi3(long long u, word_type b);
+word_type notrace __cmpdi2(long long a, long long b);
+long long notrace __lshrdi3(long long u, word_type b);
+long long notrace __muldi3(long long u, long long v);
+word_type notrace __ucmpdi2(unsigned long long a, unsigned long long b);
+
+#ifdef CONFIG_HAVE_ARCH_LIBGCC_H
+#include <asm/libgcc.h>
+#endif
+
#endif /* __ASM_LIBGCC_H */
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index 89b69e645ac7..28f086c4a187 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -6,12 +6,12 @@
*/
#ifndef __LIBNVDIMM_H__
#define __LIBNVDIMM_H__
-#include <linux/kernel.h>
+
+#include <linux/io.h>
#include <linux/sizes.h>
+#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/uuid.h>
-#include <linux/spinlock.h>
-#include <linux/bio.h>
struct badrange_entry {
u64 start;
@@ -25,8 +25,6 @@ struct badrange {
};
enum {
- /* when a dimm supports both PMEM and BLK access a label is required */
- NDD_ALIASING = 0,
/* unarmed memory devices may not persist writes */
NDD_UNARMED = 1,
/* locked memory devices should not be accessed */
@@ -35,10 +33,16 @@ enum {
NDD_SECURITY_OVERWRITE = 3,
/* tracking whether or not there is a pending device reference */
NDD_WORK_PENDING = 4,
- /* ignore / filter NSLABEL_FLAG_LOCAL for this DIMM, i.e. no aliasing */
- NDD_NOBLK = 5,
/* dimm supports namespace labels */
NDD_LABELING = 6,
+ /*
+ * dimm contents have changed requiring invalidation of CPU caches prior
+ * to activation of a region that includes this device
+ */
+ NDD_INCOHERENT = 7,
+
+ /* dimm provider wants synchronous registration by __nvdimm_create() */
+ NDD_REGISTER_SYNC = 8,
/* need to set a limit somewhere, but yes, this is likely overkill */
ND_IOCTL_MAX_BUFLEN = SZ_4M,
@@ -63,6 +67,9 @@ enum {
/* Platform provides asynchronous flush mechanism */
ND_REGION_ASYNC = 3,
+ /* Region was created by CXL subsystem */
+ ND_REGION_CXL = 4,
+
/* mark newly adjusted resources as requiring a label update */
DPA_RESOURCE_ADJUSTED = 1 << 0,
};
@@ -73,7 +80,9 @@ typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc,
struct nvdimm *nvdimm, unsigned int cmd, void *buf,
unsigned int buf_len, int *cmd_rc);
+struct attribute_group;
struct device_node;
+struct module;
struct nvdimm_bus_descriptor {
const struct attribute_group **attr_groups;
unsigned long cmd_mask;
@@ -114,6 +123,8 @@ struct nd_mapping_desc {
int position;
};
+struct bio;
+struct resource;
struct nd_region;
struct nd_region_desc {
struct resource *res;
@@ -126,6 +137,7 @@ struct nd_region_desc {
int numa_node;
int target_node;
unsigned long flags;
+ int memregion;
struct device_node *of_node;
int (*flush)(struct nd_region *nd_region, struct bio *bio);
};
@@ -139,23 +151,6 @@ static inline void __iomem *devm_nvdimm_ioremap(struct device *dev,
return (void __iomem *) devm_nvdimm_memremap(dev, offset, size, 0);
}
-struct nvdimm_bus;
-struct module;
-struct nd_blk_region;
-struct nd_blk_region_desc {
- int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev);
- int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
- void *iobuf, u64 len, int rw);
- struct nd_region_desc ndr_desc;
-};
-
-static inline struct nd_blk_region_desc *to_blk_region_desc(
- struct nd_region_desc *ndr_desc)
-{
- return container_of(ndr_desc, struct nd_blk_region_desc, ndr_desc);
-
-}
-
/*
* Note that separate bits for locked + unlocked are defined so that
* 'flags == 0' corresponds to an error / not-supported state.
@@ -198,6 +193,8 @@ struct nvdimm_security_ops {
int (*overwrite)(struct nvdimm *nvdimm,
const struct nvdimm_key_data *key_data);
int (*query_overwrite)(struct nvdimm *nvdimm);
+ int (*disable_master)(struct nvdimm *nvdimm,
+ const struct nvdimm_key_data *key_data);
};
enum nvdimm_fwa_state {
@@ -243,6 +240,9 @@ struct nvdimm_fw_ops {
int (*arm)(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arg);
};
+struct kobject;
+struct nvdimm_bus;
+
void badrange_init(struct badrange *badrange);
int badrange_add(struct badrange *badrange, u64 addr, u64 length);
void badrange_forget(struct badrange *badrange, phys_addr_t start,
@@ -257,7 +257,6 @@ struct nvdimm_bus *nvdimm_to_bus(struct nvdimm *nvdimm);
struct nvdimm *to_nvdimm(struct device *dev);
struct nd_region *to_nd_region(struct device *dev);
struct device *nd_region_dev(struct nd_region *nd_region);
-struct nd_blk_region *to_nd_blk_region(struct device *dev);
struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus);
struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus);
const char *nvdimm_name(struct nvdimm *nvdimm);
@@ -278,6 +277,8 @@ static inline struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus,
return __nvdimm_create(nvdimm_bus, provider_data, groups, flags,
cmd_mask, num_flush, flush_wpq, NULL, NULL, NULL);
}
+void nvdimm_delete(struct nvdimm *nvdimm);
+void nvdimm_region_delete(struct nd_region *nd_region);
const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd);
const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd);
@@ -294,10 +295,6 @@ struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
struct nd_region_desc *ndr_desc);
void *nd_region_provider_data(struct nd_region *nd_region);
-void *nd_blk_region_provider_data(struct nd_blk_region *ndbr);
-void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data);
-struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr);
-unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr);
unsigned int nd_region_acquire_lane(struct nd_region *nd_region);
void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane);
u64 nd_fletcher64(void *addr, size_t len, bool le);
diff --git a/include/linux/libps2.h b/include/linux/libps2.h
index 53f7e4d0f4b7..9ca9ce4e6e64 100644
--- a/include/linux/libps2.h
+++ b/include/linux/libps2.h
@@ -8,44 +8,59 @@
*/
#include <linux/bitops.h>
+#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/types.h>
#include <linux/wait.h>
-#define PS2_CMD_SETSCALE11 0x00e6
-#define PS2_CMD_SETRES 0x10e8
-#define PS2_CMD_GETID 0x02f2
-#define PS2_CMD_RESET_BAT 0x02ff
+struct ps2dev;
-#define PS2_RET_BAT 0xaa
-#define PS2_RET_ID 0x00
-#define PS2_RET_ACK 0xfa
-#define PS2_RET_NAK 0xfe
-#define PS2_RET_ERR 0xfc
+/**
+ * enum ps2_disposition - indicates how received byte should be handled
+ * @PS2_PROCESS: pass to the main protocol handler, process normally
+ * @PS2_IGNORE: skip the byte
+ * @PS2_ERROR: do not process the byte, abort command in progress
+ */
+enum ps2_disposition {
+ PS2_PROCESS,
+ PS2_IGNORE,
+ PS2_ERROR,
+};
-#define PS2_FLAG_ACK BIT(0) /* Waiting for ACK/NAK */
-#define PS2_FLAG_CMD BIT(1) /* Waiting for a command to finish */
-#define PS2_FLAG_CMD1 BIT(2) /* Waiting for the first byte of command response */
-#define PS2_FLAG_WAITID BIT(3) /* Command executing is GET ID */
-#define PS2_FLAG_NAK BIT(4) /* Last transmission was NAKed */
-#define PS2_FLAG_ACK_CMD BIT(5) /* Waiting to ACK the command (first) byte */
+typedef enum ps2_disposition (*ps2_pre_receive_handler_t)(struct ps2dev *, u8,
+ unsigned int);
+typedef void (*ps2_receive_handler_t)(struct ps2dev *, u8);
+/**
+ * struct ps2dev - represents a device using PS/2 protocol
+ * @serio: a serio port used by the PS/2 device
+ * @cmd_mutex: a mutex ensuring that only one command is executing at a time
+ * @wait: a waitqueue used to signal completion from the serio interrupt handler
+ * @flags: various internal flags indicating stages of PS/2 command execution
+ * @cmdbuf: buffer holding command response
+ * @cmdcnt: outstanding number of bytes of the command response
+ * @nak: a byte transmitted by the device when it refuses command
+ * @pre_receive_handler: checks communication errors and returns disposition
+ * (&enum ps2_disposition) of the received data byte
+ * @receive_handler: main handler of particular PS/2 protocol, such as keyboard
+ * or mouse protocol
+ */
struct ps2dev {
struct serio *serio;
-
- /* Ensures that only one command is executing at a time */
struct mutex cmd_mutex;
-
- /* Used to signal completion from interrupt handler */
wait_queue_head_t wait;
-
unsigned long flags;
u8 cmdbuf[8];
u8 cmdcnt;
u8 nak;
+
+ ps2_pre_receive_handler_t pre_receive_handler;
+ ps2_receive_handler_t receive_handler;
};
-void ps2_init(struct ps2dev *ps2dev, struct serio *serio);
+void ps2_init(struct ps2dev *ps2dev, struct serio *serio,
+ ps2_pre_receive_handler_t pre_receive_handler,
+ ps2_receive_handler_t receive_handler);
int ps2_sendbyte(struct ps2dev *ps2dev, u8 byte, unsigned int timeout);
void ps2_drain(struct ps2dev *ps2dev, size_t maxbytes, unsigned int timeout);
void ps2_begin_command(struct ps2dev *ps2dev);
@@ -53,9 +68,8 @@ void ps2_end_command(struct ps2dev *ps2dev);
int __ps2_command(struct ps2dev *ps2dev, u8 *param, unsigned int command);
int ps2_command(struct ps2dev *ps2dev, u8 *param, unsigned int command);
int ps2_sliced_command(struct ps2dev *ps2dev, u8 command);
-bool ps2_handle_ack(struct ps2dev *ps2dev, u8 data);
-bool ps2_handle_response(struct ps2dev *ps2dev, u8 data);
-void ps2_cmd_aborted(struct ps2dev *ps2dev);
bool ps2_is_keyboard_id(u8 id);
+irqreturn_t ps2_interrupt(struct serio *serio, u8 data, unsigned int flags);
+
#endif /* _LIBPS2_H */
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
deleted file mode 100644
index 0908abda9c1b..000000000000
--- a/include/linux/lightnvm.h
+++ /dev/null
@@ -1,697 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef NVM_H
-#define NVM_H
-
-#include <linux/blkdev.h>
-#include <linux/types.h>
-#include <uapi/linux/lightnvm.h>
-
-enum {
- NVM_IO_OK = 0,
- NVM_IO_REQUEUE = 1,
- NVM_IO_DONE = 2,
- NVM_IO_ERR = 3,
-
- NVM_IOTYPE_NONE = 0,
- NVM_IOTYPE_GC = 1,
-};
-
-/* common format */
-#define NVM_GEN_CH_BITS (8)
-#define NVM_GEN_LUN_BITS (8)
-#define NVM_GEN_BLK_BITS (16)
-#define NVM_GEN_RESERVED (32)
-
-/* 1.2 format */
-#define NVM_12_PG_BITS (16)
-#define NVM_12_PL_BITS (4)
-#define NVM_12_SEC_BITS (4)
-#define NVM_12_RESERVED (8)
-
-/* 2.0 format */
-#define NVM_20_SEC_BITS (24)
-#define NVM_20_RESERVED (8)
-
-enum {
- NVM_OCSSD_SPEC_12 = 12,
- NVM_OCSSD_SPEC_20 = 20,
-};
-
-struct ppa_addr {
- /* Generic structure for all addresses */
- union {
- /* generic device format */
- struct {
- u64 ch : NVM_GEN_CH_BITS;
- u64 lun : NVM_GEN_LUN_BITS;
- u64 blk : NVM_GEN_BLK_BITS;
- u64 reserved : NVM_GEN_RESERVED;
- } a;
-
- /* 1.2 device format */
- struct {
- u64 ch : NVM_GEN_CH_BITS;
- u64 lun : NVM_GEN_LUN_BITS;
- u64 blk : NVM_GEN_BLK_BITS;
- u64 pg : NVM_12_PG_BITS;
- u64 pl : NVM_12_PL_BITS;
- u64 sec : NVM_12_SEC_BITS;
- u64 reserved : NVM_12_RESERVED;
- } g;
-
- /* 2.0 device format */
- struct {
- u64 grp : NVM_GEN_CH_BITS;
- u64 pu : NVM_GEN_LUN_BITS;
- u64 chk : NVM_GEN_BLK_BITS;
- u64 sec : NVM_20_SEC_BITS;
- u64 reserved : NVM_20_RESERVED;
- } m;
-
- struct {
- u64 line : 63;
- u64 is_cached : 1;
- } c;
-
- u64 ppa;
- };
-};
-
-struct nvm_rq;
-struct nvm_id;
-struct nvm_dev;
-struct nvm_tgt_dev;
-struct nvm_chk_meta;
-
-typedef int (nvm_id_fn)(struct nvm_dev *);
-typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
-typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
-typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, sector_t, int,
- struct nvm_chk_meta *);
-typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *, void *);
-typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *, int);
-typedef void (nvm_destroy_dma_pool_fn)(void *);
-typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
- dma_addr_t *);
-typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
-
-struct nvm_dev_ops {
- nvm_id_fn *identity;
- nvm_op_bb_tbl_fn *get_bb_tbl;
- nvm_op_set_bb_fn *set_bb_tbl;
-
- nvm_get_chk_meta_fn *get_chk_meta;
-
- nvm_submit_io_fn *submit_io;
-
- nvm_create_dma_pool_fn *create_dma_pool;
- nvm_destroy_dma_pool_fn *destroy_dma_pool;
- nvm_dev_dma_alloc_fn *dev_dma_alloc;
- nvm_dev_dma_free_fn *dev_dma_free;
-};
-
-#ifdef CONFIG_NVM
-
-#include <linux/file.h>
-#include <linux/dmapool.h>
-
-enum {
- /* HW Responsibilities */
- NVM_RSP_L2P = 1 << 0,
- NVM_RSP_ECC = 1 << 1,
-
- /* Physical Adressing Mode */
- NVM_ADDRMODE_LINEAR = 0,
- NVM_ADDRMODE_CHANNEL = 1,
-
- /* Plane programming mode for LUN */
- NVM_PLANE_SINGLE = 1,
- NVM_PLANE_DOUBLE = 2,
- NVM_PLANE_QUAD = 4,
-
- /* Status codes */
- NVM_RSP_SUCCESS = 0x0,
- NVM_RSP_NOT_CHANGEABLE = 0x1,
- NVM_RSP_ERR_FAILWRITE = 0x40ff,
- NVM_RSP_ERR_EMPTYPAGE = 0x42ff,
- NVM_RSP_ERR_FAILECC = 0x4281,
- NVM_RSP_ERR_FAILCRC = 0x4004,
- NVM_RSP_WARN_HIGHECC = 0x4700,
-
- /* Device opcodes */
- NVM_OP_PWRITE = 0x91,
- NVM_OP_PREAD = 0x92,
- NVM_OP_ERASE = 0x90,
-
- /* PPA Command Flags */
- NVM_IO_SNGL_ACCESS = 0x0,
- NVM_IO_DUAL_ACCESS = 0x1,
- NVM_IO_QUAD_ACCESS = 0x2,
-
- /* NAND Access Modes */
- NVM_IO_SUSPEND = 0x80,
- NVM_IO_SLC_MODE = 0x100,
- NVM_IO_SCRAMBLE_ENABLE = 0x200,
-
- /* Block Types */
- NVM_BLK_T_FREE = 0x0,
- NVM_BLK_T_BAD = 0x1,
- NVM_BLK_T_GRWN_BAD = 0x2,
- NVM_BLK_T_DEV = 0x4,
- NVM_BLK_T_HOST = 0x8,
-
- /* Memory capabilities */
- NVM_ID_CAP_SLC = 0x1,
- NVM_ID_CAP_CMD_SUSPEND = 0x2,
- NVM_ID_CAP_SCRAMBLE = 0x4,
- NVM_ID_CAP_ENCRYPT = 0x8,
-
- /* Memory types */
- NVM_ID_FMTYPE_SLC = 0,
- NVM_ID_FMTYPE_MLC = 1,
-
- /* Device capabilities */
- NVM_ID_DCAP_BBLKMGMT = 0x1,
- NVM_UD_DCAP_ECC = 0x2,
-};
-
-struct nvm_id_lp_mlc {
- u16 num_pairs;
- u8 pairs[886];
-};
-
-struct nvm_id_lp_tbl {
- __u8 id[8];
- struct nvm_id_lp_mlc mlc;
-};
-
-struct nvm_addrf_12 {
- u8 ch_len;
- u8 lun_len;
- u8 blk_len;
- u8 pg_len;
- u8 pln_len;
- u8 sec_len;
-
- u8 ch_offset;
- u8 lun_offset;
- u8 blk_offset;
- u8 pg_offset;
- u8 pln_offset;
- u8 sec_offset;
-
- u64 ch_mask;
- u64 lun_mask;
- u64 blk_mask;
- u64 pg_mask;
- u64 pln_mask;
- u64 sec_mask;
-};
-
-struct nvm_addrf {
- u8 ch_len;
- u8 lun_len;
- u8 chk_len;
- u8 sec_len;
- u8 rsv_len[2];
-
- u8 ch_offset;
- u8 lun_offset;
- u8 chk_offset;
- u8 sec_offset;
- u8 rsv_off[2];
-
- u64 ch_mask;
- u64 lun_mask;
- u64 chk_mask;
- u64 sec_mask;
- u64 rsv_mask[2];
-};
-
-enum {
- /* Chunk states */
- NVM_CHK_ST_FREE = 1 << 0,
- NVM_CHK_ST_CLOSED = 1 << 1,
- NVM_CHK_ST_OPEN = 1 << 2,
- NVM_CHK_ST_OFFLINE = 1 << 3,
-
- /* Chunk types */
- NVM_CHK_TP_W_SEQ = 1 << 0,
- NVM_CHK_TP_W_RAN = 1 << 1,
- NVM_CHK_TP_SZ_SPEC = 1 << 4,
-};
-
-/*
- * Note: The structure size is linked to nvme_nvm_chk_meta such that the same
- * buffer can be used when converting from little endian to cpu addressing.
- */
-struct nvm_chk_meta {
- u8 state;
- u8 type;
- u8 wi;
- u8 rsvd[5];
- u64 slba;
- u64 cnlb;
- u64 wp;
-};
-
-struct nvm_target {
- struct list_head list;
- struct nvm_tgt_dev *dev;
- struct nvm_tgt_type *type;
- struct gendisk *disk;
-};
-
-#define ADDR_EMPTY (~0ULL)
-
-#define NVM_TARGET_DEFAULT_OP (101)
-#define NVM_TARGET_MIN_OP (3)
-#define NVM_TARGET_MAX_OP (80)
-
-#define NVM_VERSION_MAJOR 1
-#define NVM_VERSION_MINOR 0
-#define NVM_VERSION_PATCH 0
-
-#define NVM_MAX_VLBA (64) /* max logical blocks in a vector command */
-
-struct nvm_rq;
-typedef void (nvm_end_io_fn)(struct nvm_rq *);
-
-struct nvm_rq {
- struct nvm_tgt_dev *dev;
-
- struct bio *bio;
-
- union {
- struct ppa_addr ppa_addr;
- dma_addr_t dma_ppa_list;
- };
-
- struct ppa_addr *ppa_list;
-
- void *meta_list;
- dma_addr_t dma_meta_list;
-
- nvm_end_io_fn *end_io;
-
- uint8_t opcode;
- uint16_t nr_ppas;
- uint16_t flags;
-
- u64 ppa_status; /* ppa media status */
- int error;
-
- int is_seq; /* Sequential hint flag. 1.2 only */
-
- void *private;
-};
-
-static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu)
-{
- return pdu - sizeof(struct nvm_rq);
-}
-
-static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
-{
- return rqdata + 1;
-}
-
-static inline struct ppa_addr *nvm_rq_to_ppa_list(struct nvm_rq *rqd)
-{
- return (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
-}
-
-enum {
- NVM_BLK_ST_FREE = 0x1, /* Free block */
- NVM_BLK_ST_TGT = 0x2, /* Block in use by target */
- NVM_BLK_ST_BAD = 0x8, /* Bad block */
-};
-
-/* Instance geometry */
-struct nvm_geo {
- /* device reported version */
- u8 major_ver_id;
- u8 minor_ver_id;
-
- /* kernel short version */
- u8 version;
-
- /* instance specific geometry */
- int num_ch;
- int num_lun; /* per channel */
-
- /* calculated values */
- int all_luns; /* across channels */
- int all_chunks; /* across channels */
-
- int op; /* over-provision in instance */
-
- sector_t total_secs; /* across channels */
-
- /* chunk geometry */
- u32 num_chk; /* chunks per lun */
- u32 clba; /* sectors per chunk */
- u16 csecs; /* sector size */
- u16 sos; /* out-of-band area size */
- bool ext; /* metadata in extended data buffer */
- u32 mdts; /* Max data transfer size*/
-
- /* device write constrains */
- u32 ws_min; /* minimum write size */
- u32 ws_opt; /* optimal write size */
- u32 mw_cunits; /* distance required for successful read */
- u32 maxoc; /* maximum open chunks */
- u32 maxocpu; /* maximum open chunks per parallel unit */
-
- /* device capabilities */
- u32 mccap;
-
- /* device timings */
- u32 trdt; /* Avg. Tread (ns) */
- u32 trdm; /* Max Tread (ns) */
- u32 tprt; /* Avg. Tprog (ns) */
- u32 tprm; /* Max Tprog (ns) */
- u32 tbet; /* Avg. Terase (ns) */
- u32 tbem; /* Max Terase (ns) */
-
- /* generic address format */
- struct nvm_addrf addrf;
-
- /* 1.2 compatibility */
- u8 vmnt;
- u32 cap;
- u32 dom;
-
- u8 mtype;
- u8 fmtype;
-
- u16 cpar;
- u32 mpos;
-
- u8 num_pln;
- u8 pln_mode;
- u16 num_pg;
- u16 fpg_sz;
-};
-
-/* sub-device structure */
-struct nvm_tgt_dev {
- /* Device information */
- struct nvm_geo geo;
-
- /* Base ppas for target LUNs */
- struct ppa_addr *luns;
-
- struct request_queue *q;
-
- struct nvm_dev *parent;
- void *map;
-};
-
-struct nvm_dev {
- struct nvm_dev_ops *ops;
-
- struct list_head devices;
-
- /* Device information */
- struct nvm_geo geo;
-
- unsigned long *lun_map;
- void *dma_pool;
-
- /* Backend device */
- struct request_queue *q;
- char name[DISK_NAME_LEN];
- void *private_data;
-
- struct kref ref;
- void *rmap;
-
- struct mutex mlock;
- spinlock_t lock;
-
- /* target management */
- struct list_head area_list;
- struct list_head targets;
-};
-
-static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
- struct ppa_addr r)
-{
- struct nvm_geo *geo = &dev->geo;
- struct ppa_addr l;
-
- if (geo->version == NVM_OCSSD_SPEC_12) {
- struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf;
-
- l.ppa = ((u64)r.g.ch) << ppaf->ch_offset;
- l.ppa |= ((u64)r.g.lun) << ppaf->lun_offset;
- l.ppa |= ((u64)r.g.blk) << ppaf->blk_offset;
- l.ppa |= ((u64)r.g.pg) << ppaf->pg_offset;
- l.ppa |= ((u64)r.g.pl) << ppaf->pln_offset;
- l.ppa |= ((u64)r.g.sec) << ppaf->sec_offset;
- } else {
- struct nvm_addrf *lbaf = &geo->addrf;
-
- l.ppa = ((u64)r.m.grp) << lbaf->ch_offset;
- l.ppa |= ((u64)r.m.pu) << lbaf->lun_offset;
- l.ppa |= ((u64)r.m.chk) << lbaf->chk_offset;
- l.ppa |= ((u64)r.m.sec) << lbaf->sec_offset;
- }
-
- return l;
-}
-
-static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
- struct ppa_addr r)
-{
- struct nvm_geo *geo = &dev->geo;
- struct ppa_addr l;
-
- l.ppa = 0;
-
- if (geo->version == NVM_OCSSD_SPEC_12) {
- struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf;
-
- l.g.ch = (r.ppa & ppaf->ch_mask) >> ppaf->ch_offset;
- l.g.lun = (r.ppa & ppaf->lun_mask) >> ppaf->lun_offset;
- l.g.blk = (r.ppa & ppaf->blk_mask) >> ppaf->blk_offset;
- l.g.pg = (r.ppa & ppaf->pg_mask) >> ppaf->pg_offset;
- l.g.pl = (r.ppa & ppaf->pln_mask) >> ppaf->pln_offset;
- l.g.sec = (r.ppa & ppaf->sec_mask) >> ppaf->sec_offset;
- } else {
- struct nvm_addrf *lbaf = &geo->addrf;
-
- l.m.grp = (r.ppa & lbaf->ch_mask) >> lbaf->ch_offset;
- l.m.pu = (r.ppa & lbaf->lun_mask) >> lbaf->lun_offset;
- l.m.chk = (r.ppa & lbaf->chk_mask) >> lbaf->chk_offset;
- l.m.sec = (r.ppa & lbaf->sec_mask) >> lbaf->sec_offset;
- }
-
- return l;
-}
-
-static inline u64 dev_to_chunk_addr(struct nvm_dev *dev, void *addrf,
- struct ppa_addr p)
-{
- struct nvm_geo *geo = &dev->geo;
- u64 caddr;
-
- if (geo->version == NVM_OCSSD_SPEC_12) {
- struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)addrf;
-
- caddr = (u64)p.g.pg << ppaf->pg_offset;
- caddr |= (u64)p.g.pl << ppaf->pln_offset;
- caddr |= (u64)p.g.sec << ppaf->sec_offset;
- } else {
- caddr = p.m.sec;
- }
-
- return caddr;
-}
-
-static inline struct ppa_addr nvm_ppa32_to_ppa64(struct nvm_dev *dev,
- void *addrf, u32 ppa32)
-{
- struct ppa_addr ppa64;
-
- ppa64.ppa = 0;
-
- if (ppa32 == -1) {
- ppa64.ppa = ADDR_EMPTY;
- } else if (ppa32 & (1U << 31)) {
- ppa64.c.line = ppa32 & ((~0U) >> 1);
- ppa64.c.is_cached = 1;
- } else {
- struct nvm_geo *geo = &dev->geo;
-
- if (geo->version == NVM_OCSSD_SPEC_12) {
- struct nvm_addrf_12 *ppaf = addrf;
-
- ppa64.g.ch = (ppa32 & ppaf->ch_mask) >>
- ppaf->ch_offset;
- ppa64.g.lun = (ppa32 & ppaf->lun_mask) >>
- ppaf->lun_offset;
- ppa64.g.blk = (ppa32 & ppaf->blk_mask) >>
- ppaf->blk_offset;
- ppa64.g.pg = (ppa32 & ppaf->pg_mask) >>
- ppaf->pg_offset;
- ppa64.g.pl = (ppa32 & ppaf->pln_mask) >>
- ppaf->pln_offset;
- ppa64.g.sec = (ppa32 & ppaf->sec_mask) >>
- ppaf->sec_offset;
- } else {
- struct nvm_addrf *lbaf = addrf;
-
- ppa64.m.grp = (ppa32 & lbaf->ch_mask) >>
- lbaf->ch_offset;
- ppa64.m.pu = (ppa32 & lbaf->lun_mask) >>
- lbaf->lun_offset;
- ppa64.m.chk = (ppa32 & lbaf->chk_mask) >>
- lbaf->chk_offset;
- ppa64.m.sec = (ppa32 & lbaf->sec_mask) >>
- lbaf->sec_offset;
- }
- }
-
- return ppa64;
-}
-
-static inline u32 nvm_ppa64_to_ppa32(struct nvm_dev *dev,
- void *addrf, struct ppa_addr ppa64)
-{
- u32 ppa32 = 0;
-
- if (ppa64.ppa == ADDR_EMPTY) {
- ppa32 = ~0U;
- } else if (ppa64.c.is_cached) {
- ppa32 |= ppa64.c.line;
- ppa32 |= 1U << 31;
- } else {
- struct nvm_geo *geo = &dev->geo;
-
- if (geo->version == NVM_OCSSD_SPEC_12) {
- struct nvm_addrf_12 *ppaf = addrf;
-
- ppa32 |= ppa64.g.ch << ppaf->ch_offset;
- ppa32 |= ppa64.g.lun << ppaf->lun_offset;
- ppa32 |= ppa64.g.blk << ppaf->blk_offset;
- ppa32 |= ppa64.g.pg << ppaf->pg_offset;
- ppa32 |= ppa64.g.pl << ppaf->pln_offset;
- ppa32 |= ppa64.g.sec << ppaf->sec_offset;
- } else {
- struct nvm_addrf *lbaf = addrf;
-
- ppa32 |= ppa64.m.grp << lbaf->ch_offset;
- ppa32 |= ppa64.m.pu << lbaf->lun_offset;
- ppa32 |= ppa64.m.chk << lbaf->chk_offset;
- ppa32 |= ppa64.m.sec << lbaf->sec_offset;
- }
- }
-
- return ppa32;
-}
-
-static inline int nvm_next_ppa_in_chk(struct nvm_tgt_dev *dev,
- struct ppa_addr *ppa)
-{
- struct nvm_geo *geo = &dev->geo;
- int last = 0;
-
- if (geo->version == NVM_OCSSD_SPEC_12) {
- int sec = ppa->g.sec;
-
- sec++;
- if (sec == geo->ws_min) {
- int pg = ppa->g.pg;
-
- sec = 0;
- pg++;
- if (pg == geo->num_pg) {
- int pl = ppa->g.pl;
-
- pg = 0;
- pl++;
- if (pl == geo->num_pln)
- last = 1;
-
- ppa->g.pl = pl;
- }
- ppa->g.pg = pg;
- }
- ppa->g.sec = sec;
- } else {
- ppa->m.sec++;
- if (ppa->m.sec == geo->clba)
- last = 1;
- }
-
- return last;
-}
-
-typedef sector_t (nvm_tgt_capacity_fn)(void *);
-typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *,
- int flags);
-typedef void (nvm_tgt_exit_fn)(void *, bool);
-typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *);
-typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *);
-
-enum {
- NVM_TGT_F_DEV_L2P = 0,
- NVM_TGT_F_HOST_L2P = 1 << 0,
-};
-
-struct nvm_tgt_type {
- const char *name;
- unsigned int version[3];
- int flags;
-
- /* target entry points */
- const struct block_device_operations *bops;
- nvm_tgt_capacity_fn *capacity;
-
- /* module-specific init/teardown */
- nvm_tgt_init_fn *init;
- nvm_tgt_exit_fn *exit;
-
- /* sysfs */
- nvm_tgt_sysfs_init_fn *sysfs_init;
- nvm_tgt_sysfs_exit_fn *sysfs_exit;
-
- /* For internal use */
- struct list_head list;
- struct module *owner;
-};
-
-extern int nvm_register_tgt_type(struct nvm_tgt_type *);
-extern void nvm_unregister_tgt_type(struct nvm_tgt_type *);
-
-extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *);
-extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t);
-
-extern struct nvm_dev *nvm_alloc_dev(int);
-extern int nvm_register(struct nvm_dev *);
-extern void nvm_unregister(struct nvm_dev *);
-
-extern int nvm_get_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr,
- int, struct nvm_chk_meta *);
-extern int nvm_set_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr *,
- int, int);
-extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *, void *);
-extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *, void *);
-extern void nvm_end_io(struct nvm_rq *);
-
-#else /* CONFIG_NVM */
-struct nvm_dev_ops;
-
-static inline struct nvm_dev *nvm_alloc_dev(int node)
-{
- return ERR_PTR(-EINVAL);
-}
-static inline int nvm_register(struct nvm_dev *dev)
-{
- return -EINVAL;
-}
-static inline void nvm_unregister(struct nvm_dev *dev) {}
-#endif /* CONFIG_NVM */
-#endif /* LIGHTNVM.H */
diff --git a/include/linux/limits.h b/include/linux/limits.h
index b568b9c30bbf..38eb7f6f7e88 100644
--- a/include/linux/limits.h
+++ b/include/linux/limits.h
@@ -7,8 +7,11 @@
#include <vdso/limits.h>
#define SIZE_MAX (~(size_t)0)
+#define SSIZE_MAX ((ssize_t)(SIZE_MAX >> 1))
#define PHYS_ADDR_MAX (~(phys_addr_t)0)
+#define RESOURCE_SIZE_MAX ((resource_size_t)~0)
+
#define U8_MAX ((u8)~0U)
#define S8_MAX ((s8)(U8_MAX >> 1))
#define S8_MIN ((s8)(-S8_MAX - 1))
diff --git a/include/linux/linear_range.h b/include/linux/linear_range.h
index 17b5943727d5..2e4f4c3539c0 100644
--- a/include/linux/linear_range.h
+++ b/include/linux/linear_range.h
@@ -26,6 +26,17 @@ struct linear_range {
unsigned int step;
};
+#define LINEAR_RANGE(_min, _min_sel, _max_sel, _step) \
+ { \
+ .min = _min, \
+ .min_sel = _min_sel, \
+ .max_sel = _max_sel, \
+ .step = _step, \
+ }
+
+#define LINEAR_RANGE_IDX(_idx, _min, _min_sel, _max_sel, _step) \
+ [_idx] = LINEAR_RANGE(_min, _min_sel, _max_sel, _step)
+
unsigned int linear_range_values_in_range(const struct linear_range *r);
unsigned int linear_range_values_in_range_array(const struct linear_range *r,
int ranges);
@@ -41,6 +52,8 @@ int linear_range_get_selector_low(const struct linear_range *r,
int linear_range_get_selector_high(const struct linear_range *r,
unsigned int val, unsigned int *selector,
bool *found);
+void linear_range_get_selector_within(const struct linear_range *r,
+ unsigned int val, unsigned int *selector);
int linear_range_get_selector_low_array(const struct linear_range *r,
int ranges, unsigned int val,
unsigned int *selector, bool *found);
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index dbf8506decca..b11660b706c5 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -69,8 +69,8 @@
#endif
#ifndef __ALIGN
-#define __ALIGN .align 4,0x90
-#define __ALIGN_STR ".align 4,0x90"
+#define __ALIGN .balign CONFIG_FUNCTION_ALIGNMENT
+#define __ALIGN_STR __stringify(__ALIGN)
#endif
#ifdef __ASSEMBLY__
@@ -134,10 +134,6 @@
.size name, .-name
#endif
-/* If symbol 'name' is treated as a subroutine (gets called, and returns)
- * then please use ENDPROC to mark 'name' as STT_FUNC for the benefit of
- * static analysis tools such as stack depth analyzer.
- */
#ifndef ENDPROC
/* deprecated, use SYM_FUNC_END */
#define ENDPROC(name) \
@@ -165,7 +161,15 @@
#ifndef SYM_END
#define SYM_END(name, sym_type) \
.type name sym_type ASM_NL \
- .size name, .-name
+ .set .L__sym_size_##name, .-name ASM_NL \
+ .size name, .L__sym_size_##name
+#endif
+
+/* SYM_ALIAS -- use only if you have to */
+#ifndef SYM_ALIAS
+#define SYM_ALIAS(alias, name, linkage) \
+ linkage(alias) ASM_NL \
+ .set alias, name ASM_NL
#endif
/* === code annotations === */
@@ -200,30 +204,8 @@
SYM_ENTRY(name, linkage, SYM_A_NONE)
#endif
-/*
- * SYM_FUNC_START_LOCAL_ALIAS -- use where there are two local names for one
- * function
- */
-#ifndef SYM_FUNC_START_LOCAL_ALIAS
-#define SYM_FUNC_START_LOCAL_ALIAS(name) \
- SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN)
-#endif
-
-/*
- * SYM_FUNC_START_ALIAS -- use where there are two global names for one
- * function
- */
-#ifndef SYM_FUNC_START_ALIAS
-#define SYM_FUNC_START_ALIAS(name) \
- SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
-#endif
-
/* SYM_FUNC_START -- use for global functions */
#ifndef SYM_FUNC_START
-/*
- * The same as SYM_FUNC_START_ALIAS, but we will need to distinguish these two
- * later.
- */
#define SYM_FUNC_START(name) \
SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
#endif
@@ -236,7 +218,6 @@
/* SYM_FUNC_START_LOCAL -- use for local functions */
#ifndef SYM_FUNC_START_LOCAL
-/* the same as SYM_FUNC_START_LOCAL_ALIAS, see comment near SYM_FUNC_START */
#define SYM_FUNC_START_LOCAL(name) \
SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN)
#endif
@@ -259,22 +240,39 @@
SYM_START(name, SYM_L_WEAK, SYM_A_NONE)
#endif
-/* SYM_FUNC_END_ALIAS -- the end of LOCAL_ALIASed or ALIASed function */
-#ifndef SYM_FUNC_END_ALIAS
-#define SYM_FUNC_END_ALIAS(name) \
- SYM_END(name, SYM_T_FUNC)
-#endif
-
/*
* SYM_FUNC_END -- the end of SYM_FUNC_START_LOCAL, SYM_FUNC_START,
* SYM_FUNC_START_WEAK, ...
*/
#ifndef SYM_FUNC_END
-/* the same as SYM_FUNC_END_ALIAS, see comment near SYM_FUNC_START */
#define SYM_FUNC_END(name) \
SYM_END(name, SYM_T_FUNC)
#endif
+/*
+ * SYM_FUNC_ALIAS -- define a global alias for an existing function
+ */
+#ifndef SYM_FUNC_ALIAS
+#define SYM_FUNC_ALIAS(alias, name) \
+ SYM_ALIAS(alias, name, SYM_L_GLOBAL)
+#endif
+
+/*
+ * SYM_FUNC_ALIAS_LOCAL -- define a local alias for an existing function
+ */
+#ifndef SYM_FUNC_ALIAS_LOCAL
+#define SYM_FUNC_ALIAS_LOCAL(alias, name) \
+ SYM_ALIAS(alias, name, SYM_L_LOCAL)
+#endif
+
+/*
+ * SYM_FUNC_ALIAS_WEAK -- define a weak global alias for an existing function
+ */
+#ifndef SYM_FUNC_ALIAS_WEAK
+#define SYM_FUNC_ALIAS_WEAK(alias, name) \
+ SYM_ALIAS(alias, name, SYM_L_WEAK)
+#endif
+
/* SYM_CODE_START -- use for non-C (special) functions */
#ifndef SYM_CODE_START
#define SYM_CODE_START(name) \
diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h
index f8397f300fcd..3b9de09871f6 100644
--- a/include/linux/linkmode.h
+++ b/include/linux/linkmode.h
@@ -10,6 +10,11 @@ static inline void linkmode_zero(unsigned long *dst)
bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
+static inline void linkmode_fill(unsigned long *dst)
+{
+ bitmap_fill(dst, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
static inline void linkmode_copy(unsigned long *dst, const unsigned long *src)
{
bitmap_copy(dst, src, __ETHTOOL_LINK_MODE_MASK_NBITS);
@@ -32,16 +37,17 @@ static inline bool linkmode_empty(const unsigned long *src)
return bitmap_empty(src, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static inline int linkmode_andnot(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2)
+static inline bool linkmode_andnot(unsigned long *dst,
+ const unsigned long *src1,
+ const unsigned long *src2)
{
return bitmap_andnot(dst, src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static inline void linkmode_set_bit(int nr, volatile unsigned long *addr)
-{
- __set_bit(nr, addr);
-}
+#define linkmode_test_bit test_bit
+#define linkmode_set_bit __set_bit
+#define linkmode_clear_bit __clear_bit
+#define linkmode_mod_bit __assign_bit
static inline void linkmode_set_bit_array(const int *array, int array_size,
unsigned long *addr)
@@ -52,30 +58,6 @@ static inline void linkmode_set_bit_array(const int *array, int array_size,
linkmode_set_bit(array[i], addr);
}
-static inline void linkmode_clear_bit(int nr, volatile unsigned long *addr)
-{
- __clear_bit(nr, addr);
-}
-
-static inline void linkmode_mod_bit(int nr, volatile unsigned long *addr,
- int set)
-{
- if (set)
- linkmode_set_bit(nr, addr);
- else
- linkmode_clear_bit(nr, addr);
-}
-
-static inline void linkmode_change_bit(int nr, volatile unsigned long *addr)
-{
- __change_bit(nr, addr);
-}
-
-static inline int linkmode_test_bit(int nr, const volatile unsigned long *addr)
-{
- return test_bit(nr, addr);
-}
-
static inline int linkmode_equal(const unsigned long *src1,
const unsigned long *src2)
{
diff --git a/include/linux/linux_logo.h b/include/linux/linux_logo.h
index d4d5b93efe84..e37699b7e839 100644
--- a/include/linux/linux_logo.h
+++ b/include/linux/linux_logo.h
@@ -10,9 +10,6 @@
* Copyright (C) 2001 Greg Banks <gnb@alphalink.com.au>
* Copyright (C) 2001 Jan-Benedict Glaw <jbglaw@lug-owl.de>
* Copyright (C) 2003 Geert Uytterhoeven <geert@linux-m68k.org>
- *
- * Serial_console ascii image can be any size,
- * but should contain %s to display the version
*/
#include <linux/init.h>
diff --git a/include/linux/list.h b/include/linux/list.h
index f2af4b4aa4e9..00ea8e5fb88b 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -2,11 +2,13 @@
#ifndef _LINUX_LIST_H
#define _LINUX_LIST_H
+#include <linux/container_of.h>
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/poison.h>
#include <linux/const.h>
-#include <linux/kernel.h>
+
+#include <asm/barrier.h>
/*
* Circular doubly linked list implementation.
@@ -18,8 +20,16 @@
* using the generic single-entry routines.
*/
+/**
+ * LIST_HEAD_INIT - initialize a &struct list_head's links to point to itself
+ * @name: name of the list_head
+ */
#define LIST_HEAD_INIT(name) { &(name), &(name) }
+/**
+ * LIST_HEAD - definition of a &struct list_head with initialization values
+ * @name: name of the list_head
+ */
#define LIST_HEAD(name) \
struct list_head name = LIST_HEAD_INIT(name)
@@ -33,14 +43,95 @@
static inline void INIT_LIST_HEAD(struct list_head *list)
{
WRITE_ONCE(list->next, list);
- list->prev = list;
+ WRITE_ONCE(list->prev, list);
}
+#ifdef CONFIG_LIST_HARDENED
+
#ifdef CONFIG_DEBUG_LIST
-extern bool __list_add_valid(struct list_head *new,
- struct list_head *prev,
- struct list_head *next);
-extern bool __list_del_entry_valid(struct list_head *entry);
+# define __list_valid_slowpath
+#else
+# define __list_valid_slowpath __cold __preserve_most
+#endif
+
+/*
+ * Performs the full set of list corruption checks before __list_add().
+ * On list corruption reports a warning, and returns false.
+ */
+bool __list_valid_slowpath __list_add_valid_or_report(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next);
+
+/*
+ * Performs list corruption checks before __list_add(). Returns false if a
+ * corruption is detected, true otherwise.
+ *
+ * With CONFIG_LIST_HARDENED only, performs minimal list integrity checking
+ * inline to catch non-faulting corruptions, and only if a corruption is
+ * detected calls the reporting function __list_add_valid_or_report().
+ */
+static __always_inline bool __list_add_valid(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next)
+{
+ bool ret = true;
+
+ if (!IS_ENABLED(CONFIG_DEBUG_LIST)) {
+ /*
+ * With the hardening version, elide checking if next and prev
+ * are NULL, since the immediate dereference of them below would
+ * result in a fault if NULL.
+ *
+ * With the reduced set of checks, we can afford to inline the
+ * checks, which also gives the compiler a chance to elide some
+ * of them completely if they can be proven at compile-time. If
+ * one of the pre-conditions does not hold, the slow-path will
+ * show a report which pre-condition failed.
+ */
+ if (likely(next->prev == prev && prev->next == next && new != prev && new != next))
+ return true;
+ ret = false;
+ }
+
+ ret &= __list_add_valid_or_report(new, prev, next);
+ return ret;
+}
+
+/*
+ * Performs the full set of list corruption checks before __list_del_entry().
+ * On list corruption reports a warning, and returns false.
+ */
+bool __list_valid_slowpath __list_del_entry_valid_or_report(struct list_head *entry);
+
+/*
+ * Performs list corruption checks before __list_del_entry(). Returns false if a
+ * corruption is detected, true otherwise.
+ *
+ * With CONFIG_LIST_HARDENED only, performs minimal list integrity checking
+ * inline to catch non-faulting corruptions, and only if a corruption is
+ * detected calls the reporting function __list_del_entry_valid_or_report().
+ */
+static __always_inline bool __list_del_entry_valid(struct list_head *entry)
+{
+ bool ret = true;
+
+ if (!IS_ENABLED(CONFIG_DEBUG_LIST)) {
+ struct list_head *prev = entry->prev;
+ struct list_head *next = entry->next;
+
+ /*
+ * With the hardening version, elide checking if next and prev
+ * are NULL, LIST_POISON1 or LIST_POISON2, since the immediate
+ * dereference of them below would result in a fault.
+ */
+ if (likely(prev->next == entry && next->prev == entry))
+ return true;
+ ret = false;
+ }
+
+ ret &= __list_del_entry_valid_or_report(entry);
+ return ret;
+}
#else
static inline bool __list_add_valid(struct list_head *new,
struct list_head *prev,
@@ -256,8 +347,7 @@ static inline void list_bulk_move_tail(struct list_head *head,
* @list: the entry to test
* @head: the head of the list
*/
-static inline int list_is_first(const struct list_head *list,
- const struct list_head *head)
+static inline int list_is_first(const struct list_head *list, const struct list_head *head)
{
return list->prev == head;
}
@@ -267,13 +357,22 @@ static inline int list_is_first(const struct list_head *list,
* @list: the entry to test
* @head: the head of the list
*/
-static inline int list_is_last(const struct list_head *list,
- const struct list_head *head)
+static inline int list_is_last(const struct list_head *list, const struct list_head *head)
{
return list->next == head;
}
/**
+ * list_is_head - tests whether @list is the list @head
+ * @list: the entry to test
+ * @head: the head of the list
+ */
+static inline int list_is_head(const struct list_head *list, const struct list_head *head)
+{
+ return list == head;
+}
+
+/**
* list_empty - tests whether a list is empty
* @head: the list to test.
*/
@@ -296,7 +395,7 @@ static inline int list_empty(const struct list_head *head)
static inline void list_del_init_careful(struct list_head *entry)
{
__list_del_entry(entry);
- entry->prev = entry;
+ WRITE_ONCE(entry->prev, entry);
smp_store_release(&entry->next, entry);
}
@@ -316,7 +415,7 @@ static inline void list_del_init_careful(struct list_head *entry)
static inline int list_empty_careful(const struct list_head *head)
{
struct list_head *next = smp_load_acquire(&head->next);
- return (next == head) && (next == head->prev);
+ return list_is_head(next, head) && (next == READ_ONCE(head->prev));
}
/**
@@ -391,10 +490,9 @@ static inline void list_cut_position(struct list_head *list,
{
if (list_empty(head))
return;
- if (list_is_singular(head) &&
- (head->next != entry && head != entry))
+ if (list_is_singular(head) && !list_is_head(entry, head) && (entry != head->next))
return;
- if (entry == head)
+ if (list_is_head(entry, head))
INIT_LIST_HEAD(list);
else
__list_cut_position(list, head, entry);
@@ -547,6 +645,20 @@ static inline void list_splice_tail_init(struct list_head *list,
})
/**
+ * list_last_entry_or_null - get the last element from a list
+ * @ptr: the list head to take the element from.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_head within the struct.
+ *
+ * Note that if the list is empty, it returns NULL.
+ */
+#define list_last_entry_or_null(ptr, type, member) ({ \
+ struct list_head *head__ = (ptr); \
+ struct list_head *pos__ = READ_ONCE(head__->prev); \
+ pos__ != head__ ? list_entry(pos__, type, member) : NULL; \
+})
+
+/**
* list_next_entry - get the next element in list
* @pos: the type * to cursor
* @member: the name of the list_head within the struct.
@@ -555,6 +667,19 @@ static inline void list_splice_tail_init(struct list_head *list,
list_entry((pos)->member.next, typeof(*(pos)), member)
/**
+ * list_next_entry_circular - get the next element in list
+ * @pos: the type * to cursor.
+ * @head: the list head to take the element from.
+ * @member: the name of the list_head within the struct.
+ *
+ * Wraparound if pos is the last element (return the first element).
+ * Note, that list is expected to be not empty.
+ */
+#define list_next_entry_circular(pos, head, member) \
+ (list_is_last(&(pos)->member, head) ? \
+ list_first_entry(head, typeof(*(pos)), member) : list_next_entry(pos, member))
+
+/**
* list_prev_entry - get the prev element in list
* @pos: the type * to cursor
* @member: the name of the list_head within the struct.
@@ -563,12 +688,25 @@ static inline void list_splice_tail_init(struct list_head *list,
list_entry((pos)->member.prev, typeof(*(pos)), member)
/**
+ * list_prev_entry_circular - get the prev element in list
+ * @pos: the type * to cursor.
+ * @head: the list head to take the element from.
+ * @member: the name of the list_head within the struct.
+ *
+ * Wraparound if pos is the first element (return the last element).
+ * Note, that list is expected to be not empty.
+ */
+#define list_prev_entry_circular(pos, head, member) \
+ (list_is_first(&(pos)->member, head) ? \
+ list_last_entry(head, typeof(*(pos)), member) : list_prev_entry(pos, member))
+
+/**
* list_for_each - iterate over a list
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
*/
#define list_for_each(pos, head) \
- for (pos = (head)->next; pos != (head); pos = pos->next)
+ for (pos = (head)->next; !list_is_head(pos, (head)); pos = pos->next)
/**
* list_for_each_continue - continue iteration over a list
@@ -578,7 +716,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* Continue to iterate over a list, continuing after the current position.
*/
#define list_for_each_continue(pos, head) \
- for (pos = pos->next; pos != (head); pos = pos->next)
+ for (pos = pos->next; !list_is_head(pos, (head)); pos = pos->next)
/**
* list_for_each_prev - iterate over a list backwards
@@ -586,7 +724,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* @head: the head for your list.
*/
#define list_for_each_prev(pos, head) \
- for (pos = (head)->prev; pos != (head); pos = pos->prev)
+ for (pos = (head)->prev; !list_is_head(pos, (head)); pos = pos->prev)
/**
* list_for_each_safe - iterate over a list safe against removal of list entry
@@ -595,8 +733,9 @@ static inline void list_splice_tail_init(struct list_head *list,
* @head: the head for your list.
*/
#define list_for_each_safe(pos, n, head) \
- for (pos = (head)->next, n = pos->next; pos != (head); \
- pos = n, n = pos->next)
+ for (pos = (head)->next, n = pos->next; \
+ !list_is_head(pos, (head)); \
+ pos = n, n = pos->next)
/**
* list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
@@ -606,17 +745,32 @@ static inline void list_splice_tail_init(struct list_head *list,
*/
#define list_for_each_prev_safe(pos, n, head) \
for (pos = (head)->prev, n = pos->prev; \
- pos != (head); \
+ !list_is_head(pos, (head)); \
pos = n, n = pos->prev)
/**
+ * list_count_nodes - count nodes in the list
+ * @head: the head for your list.
+ */
+static inline size_t list_count_nodes(struct list_head *head)
+{
+ struct list_head *pos;
+ size_t count = 0;
+
+ list_for_each(pos, head)
+ count++;
+
+ return count;
+}
+
+/**
* list_entry_is_head - test if the entry points to the head of the list
* @pos: the type * to cursor
* @head: the head for your list.
* @member: the name of the list_head within the struct.
*/
#define list_entry_is_head(pos, head, member) \
- (&pos->member == (head))
+ list_is_head(&pos->member, (head))
/**
* list_for_each_entry - iterate over list of given type
@@ -969,6 +1123,26 @@ static inline void hlist_move_list(struct hlist_head *old,
old->first = NULL;
}
+/**
+ * hlist_splice_init() - move all entries from one list to another
+ * @from: hlist_head from which entries will be moved
+ * @last: last entry on the @from list
+ * @to: hlist_head to which entries will be moved
+ *
+ * @to can be empty, @from must contain at least @last.
+ */
+static inline void hlist_splice_init(struct hlist_head *from,
+ struct hlist_node *last,
+ struct hlist_head *to)
+{
+ if (to->first)
+ to->first->pprev = &last->next;
+ last->next = to->first;
+ to->first = from->first;
+ from->first->pprev = &to->first;
+ from->first = NULL;
+}
+
#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
#define hlist_for_each(pos, head) \
@@ -1025,4 +1199,19 @@ static inline void hlist_move_list(struct hlist_head *old,
pos && ({ n = pos->member.next; 1; }); \
pos = hlist_entry_safe(n, typeof(*pos), member))
+/**
+ * hlist_count_nodes - count nodes in the hlist
+ * @head: the head for your hlist.
+ */
+static inline size_t hlist_count_nodes(struct hlist_head *head)
+{
+ struct hlist_node *pos;
+ size_t count = 0;
+
+ hlist_for_each(pos, head)
+ count++;
+
+ return count;
+}
+
#endif
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index 9dcaa3e582c9..fe739d35a864 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -11,6 +11,7 @@
#include <linux/list.h>
#include <linux/nodemask.h>
#include <linux/shrinker.h>
+#include <linux/xarray.h>
struct mem_cgroup;
@@ -23,85 +24,149 @@ enum lru_status {
LRU_SKIP, /* item cannot be locked, skip */
LRU_RETRY, /* item not freeable. May drop the lock
internally, but has to return locked. */
+ LRU_STOP, /* stop lru list walking. May drop the lock
+ internally, but has to return locked. */
};
struct list_lru_one {
struct list_head list;
/* may become negative during memcg reparenting */
long nr_items;
+ /* protects all fields above */
+ spinlock_t lock;
};
struct list_lru_memcg {
struct rcu_head rcu;
- /* array of per cgroup lists, indexed by memcg_cache_id */
- struct list_lru_one *lru[];
+ /* array of per cgroup per node lists, indexed by node id */
+ struct list_lru_one node[];
};
struct list_lru_node {
- /* protects all lists on the node, including per cgroup */
- spinlock_t lock;
/* global list, used for the root cgroup in cgroup aware lrus */
struct list_lru_one lru;
-#ifdef CONFIG_MEMCG_KMEM
- /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
- struct list_lru_memcg __rcu *memcg_lrus;
-#endif
- long nr_items;
+ atomic_long_t nr_items;
} ____cacheline_aligned_in_smp;
struct list_lru {
struct list_lru_node *node;
-#ifdef CONFIG_MEMCG_KMEM
+#ifdef CONFIG_MEMCG
struct list_head list;
int shrinker_id;
bool memcg_aware;
+ struct xarray xa;
+#endif
+#ifdef CONFIG_LOCKDEP
+ struct lock_class_key *key;
#endif
};
void list_lru_destroy(struct list_lru *lru);
int __list_lru_init(struct list_lru *lru, bool memcg_aware,
- struct lock_class_key *key, struct shrinker *shrinker);
+ struct shrinker *shrinker);
#define list_lru_init(lru) \
- __list_lru_init((lru), false, NULL, NULL)
-#define list_lru_init_key(lru, key) \
- __list_lru_init((lru), false, (key), NULL)
+ __list_lru_init((lru), false, NULL)
#define list_lru_init_memcg(lru, shrinker) \
- __list_lru_init((lru), true, NULL, shrinker)
+ __list_lru_init((lru), true, shrinker)
+
+static inline int list_lru_init_memcg_key(struct list_lru *lru, struct shrinker *shrinker,
+ struct lock_class_key *key)
+{
+#ifdef CONFIG_LOCKDEP
+ lru->key = key;
+#endif
+ return list_lru_init_memcg(lru, shrinker);
+}
-int memcg_update_all_list_lrus(int num_memcgs);
-void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg);
+int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
+ gfp_t gfp);
+void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent);
/**
* list_lru_add: add an element to the lru list's tail
- * @list_lru: the lru pointer
+ * @lru: the lru pointer
* @item: the item to be added.
+ * @nid: the node id of the sublist to add the item to.
+ * @memcg: the cgroup of the sublist to add the item to.
*
* If the element is already part of a list, this function returns doing
- * nothing. Therefore the caller does not need to keep state about whether or
- * not the element already belongs in the list and is allowed to lazy update
- * it. Note however that this is valid for *a* list, not *this* list. If
- * the caller organize itself in a way that elements can be in more than
- * one type of list, it is up to the caller to fully remove the item from
- * the previous list (with list_lru_del() for instance) before moving it
- * to @list_lru
- *
- * Return value: true if the list was updated, false otherwise
+ * nothing. This means that it is not necessary to keep state about whether or
+ * not the element already belongs in the list. That said, this logic only
+ * works if the item is in *this* list. If the item might be in some other
+ * list, then you cannot rely on this check and you must remove it from the
+ * other list before trying to insert it.
+ *
+ * The lru list consists of many sublists internally; the @nid and @memcg
+ * parameters are used to determine which sublist to insert the item into.
+ * It's important to use the right value of @nid and @memcg when deleting the
+ * item, since it might otherwise get deleted from the wrong sublist.
+ *
+ * This also applies when attempting to insert the item multiple times - if
+ * the item is currently in one sublist and you call list_lru_add() again, you
+ * must pass the right @nid and @memcg parameters so that the same sublist is
+ * used.
+ *
+ * You must ensure that the memcg is not freed during this call (e.g., with
+ * rcu or by taking a css refcnt).
+ *
+ * Return: true if the list was updated, false otherwise
+ */
+bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
+ struct mem_cgroup *memcg);
+
+/**
+ * list_lru_add_obj: add an element to the lru list's tail
+ * @lru: the lru pointer
+ * @item: the item to be added.
+ *
+ * This function is similar to list_lru_add(), but the NUMA node and the
+ * memcg of the sublist is determined by @item list_head. This assumption is
+ * valid for slab objects LRU such as dentries, inodes, etc.
+ *
+ * Return: true if the list was updated, false otherwise
+ */
+bool list_lru_add_obj(struct list_lru *lru, struct list_head *item);
+
+/**
+ * list_lru_del: delete an element from the lru list
+ * @lru: the lru pointer
+ * @item: the item to be deleted.
+ * @nid: the node id of the sublist to delete the item from.
+ * @memcg: the cgroup of the sublist to delete the item from.
+ *
+ * This function works analogously as list_lru_add() in terms of list
+ * manipulation.
+ *
+ * The comments in list_lru_add() about an element already being in a list are
+ * also valid for list_lru_del(), that is, you can delete an item that has
+ * already been removed or never been added. However, if the item is in a
+ * list, it must be in *this* list, and you must pass the right value of @nid
+ * and @memcg so that the right sublist is used.
+ *
+ * You must ensure that the memcg is not freed during this call (e.g., with
+ * rcu or by taking a css refcnt). When a memcg is deleted, list_lru entries
+ * are automatically moved to the parent memcg. This is done in a race-free
+ * way, so during deletion of an memcg both the old and new memcg will resolve
+ * to the same sublist internally.
+ *
+ * Return: true if the list was updated, false otherwise
*/
-bool list_lru_add(struct list_lru *lru, struct list_head *item);
+bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
+ struct mem_cgroup *memcg);
/**
- * list_lru_del: delete an element to the lru list
- * @list_lru: the lru pointer
+ * list_lru_del_obj: delete an element from the lru list
+ * @lru: the lru pointer
* @item: the item to be deleted.
*
- * This function works analogously as list_lru_add in terms of list
- * manipulation. The comments about an element already pertaining to
- * a list are also valid for list_lru_del.
+ * This function is similar to list_lru_del(), but the NUMA node and the
+ * memcg of the sublist is determined by @item list_head. This assumption is
+ * valid for slab objects LRU such as dentries, inodes, etc.
*
- * Return value: true if the list was updated, false otherwise
+ * Return: true if the list was updated, false otherwise.
*/
-bool list_lru_del(struct list_lru *lru, struct list_head *item);
+bool list_lru_del_obj(struct list_lru *lru, struct list_head *item);
/**
* list_lru_count_one: return the number of objects currently held by @lru
@@ -109,9 +174,11 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item);
* @nid: the node id to count from.
* @memcg: the cgroup to count from.
*
- * Always return a non-negative number, 0 for empty lists. There is no
- * guarantee that the list is not updated while the count is being computed.
- * Callers that want such a guarantee need to provide an outer lock.
+ * There is no guarantee that the list is not updated while the count is being
+ * computed. Callers that want such a guarantee need to provide an outer lock.
+ *
+ * Return: 0 for empty lists, otherwise the number of objects
+ * currently held by @lru.
*/
unsigned long list_lru_count_one(struct list_lru *lru,
int nid, struct mem_cgroup *memcg);
@@ -139,45 +206,45 @@ void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
struct list_head *head);
typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
- struct list_lru_one *list, spinlock_t *lock, void *cb_arg);
+ struct list_lru_one *list, void *cb_arg);
/**
- * list_lru_walk_one: walk a list_lru, isolating and disposing freeable items.
+ * list_lru_walk_one: walk a @lru, isolating and disposing freeable items.
* @lru: the lru pointer.
* @nid: the node id to scan from.
* @memcg: the cgroup to scan from.
- * @isolate: callback function that is resposible for deciding what to do with
+ * @isolate: callback function that is responsible for deciding what to do with
* the item currently being scanned
* @cb_arg: opaque type that will be passed to @isolate
* @nr_to_walk: how many items to scan.
*
- * This function will scan all elements in a particular list_lru, calling the
+ * This function will scan all elements in a particular @lru, calling the
* @isolate callback for each of those items, along with the current list
* spinlock and a caller-provided opaque. The @isolate callback can choose to
* drop the lock internally, but *must* return with the lock held. The callback
- * will return an enum lru_status telling the list_lru infrastructure what to
+ * will return an enum lru_status telling the @lru infrastructure what to
* do with the object being scanned.
*
- * Please note that nr_to_walk does not mean how many objects will be freed,
+ * Please note that @nr_to_walk does not mean how many objects will be freed,
* just how many objects will be scanned.
*
- * Return value: the number of objects effectively removed from the LRU.
+ * Return: the number of objects effectively removed from the LRU.
*/
unsigned long list_lru_walk_one(struct list_lru *lru,
int nid, struct mem_cgroup *memcg,
list_lru_walk_cb isolate, void *cb_arg,
unsigned long *nr_to_walk);
/**
- * list_lru_walk_one_irq: walk a list_lru, isolating and disposing freeable items.
+ * list_lru_walk_one_irq: walk a @lru, isolating and disposing freeable items.
* @lru: the lru pointer.
* @nid: the node id to scan from.
* @memcg: the cgroup to scan from.
- * @isolate: callback function that is resposible for deciding what to do with
+ * @isolate: callback function that is responsible for deciding what to do with
* the item currently being scanned
* @cb_arg: opaque type that will be passed to @isolate
* @nr_to_walk: how many items to scan.
*
- * Same as @list_lru_walk_one except that the spinlock is acquired with
+ * Same as list_lru_walk_one() except that the spinlock is acquired with
* spin_lock_irq().
*/
unsigned long list_lru_walk_one_irq(struct list_lru *lru,
diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h
index fa6e8471bd22..248db9b77ee2 100644
--- a/include/linux/list_nulls.h
+++ b/include/linux/list_nulls.h
@@ -28,6 +28,7 @@ struct hlist_nulls_node {
#define NULLS_MARKER(value) (1UL | (((long)value) << 1))
#define INIT_HLIST_NULLS_HEAD(ptr, nulls) \
((ptr)->first = (struct hlist_nulls_node *) NULLS_MARKER(nulls))
+#define HLIST_NULLS_HEAD_INIT(nulls) {.first = (struct hlist_nulls_node *)NULLS_MARKER(nulls)}
#define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member)
diff --git a/include/linux/litex.h b/include/linux/litex.h
index 5ea9ccf5cce4..f2edb86d5f44 100644
--- a/include/linux/litex.h
+++ b/include/linux/litex.h
@@ -11,18 +11,6 @@
#include <linux/io.h>
-/* LiteX SoCs support 8- or 32-bit CSR Bus data width (i.e., subreg. size) */
-#if defined(CONFIG_LITEX_SUBREG_SIZE) && \
- (CONFIG_LITEX_SUBREG_SIZE == 1 || CONFIG_LITEX_SUBREG_SIZE == 4)
-#define LITEX_SUBREG_SIZE CONFIG_LITEX_SUBREG_SIZE
-#else
-#error LiteX subregister size (LITEX_SUBREG_SIZE) must be 4 or 1!
-#endif
-#define LITEX_SUBREG_SIZE_BIT (LITEX_SUBREG_SIZE * 8)
-
-/* LiteX subregisters of any width are always aligned on a 4-byte boundary */
-#define LITEX_SUBREG_ALIGN 0x4
-
static inline void _write_litex_subregister(u32 val, void __iomem *addr)
{
writel((u32 __force)cpu_to_le32(val), addr);
@@ -42,115 +30,54 @@ static inline u32 _read_litex_subregister(void __iomem *addr)
* 32-bit wide logical CSR will be laid out as four 32-bit physical
* subregisters, each one containing one byte of meaningful data.
*
- * For details see: https://github.com/enjoy-digital/litex/wiki/CSR-Bus
- */
-
-/* number of LiteX subregisters needed to store a register of given reg_size */
-#define _litex_num_subregs(reg_size) \
- (((reg_size) - 1) / LITEX_SUBREG_SIZE + 1)
-
-/*
- * since the number of 4-byte aligned subregisters required to store a single
- * LiteX CSR (MMIO) register varies with LITEX_SUBREG_SIZE, the offset of the
- * next adjacent LiteX CSR register w.r.t. the offset of the current one also
- * depends on how many subregisters the latter is spread across
- */
-#define _next_reg_off(off, size) \
- ((off) + _litex_num_subregs(size) * LITEX_SUBREG_ALIGN)
-
-/*
- * The purpose of `_litex_[set|get]_reg()` is to implement the logic of
- * writing to/reading from the LiteX CSR in a single place that can be then
- * reused by all LiteX drivers via the `litex_[write|read][8|16|32|64]()`
- * accessors for the appropriate data width.
- * NOTE: direct use of `_litex_[set|get]_reg()` by LiteX drivers is strongly
- * discouraged, as they perform no error checking on the requested data width!
- */
-
-/**
- * _litex_set_reg() - Writes a value to the LiteX CSR (Control&Status Register)
- * @reg: Address of the CSR
- * @reg_size: The width of the CSR expressed in the number of bytes
- * @val: Value to be written to the CSR
+ * For Linux support, upstream LiteX enforces a 32-bit wide CSR bus, which
+ * means that only larger-than-32-bit CSRs will be split across multiple
+ * subregisters (e.g., a 64-bit CSR will be spread across two consecutive
+ * 32-bit subregisters).
*
- * This function splits a single (possibly multi-byte) LiteX CSR write into
- * a series of subregister writes with a proper offset.
- * NOTE: caller is responsible for ensuring (0 < reg_size <= sizeof(u64)).
- */
-static inline void _litex_set_reg(void __iomem *reg, size_t reg_size, u64 val)
-{
- u8 shift = _litex_num_subregs(reg_size) * LITEX_SUBREG_SIZE_BIT;
-
- while (shift > 0) {
- shift -= LITEX_SUBREG_SIZE_BIT;
- _write_litex_subregister(val >> shift, reg);
- reg += LITEX_SUBREG_ALIGN;
- }
-}
-
-/**
- * _litex_get_reg() - Reads a value of the LiteX CSR (Control&Status Register)
- * @reg: Address of the CSR
- * @reg_size: The width of the CSR expressed in the number of bytes
- *
- * Return: Value read from the CSR
- *
- * This function generates a series of subregister reads with a proper offset
- * and joins their results into a single (possibly multi-byte) LiteX CSR value.
- * NOTE: caller is responsible for ensuring (0 < reg_size <= sizeof(u64)).
+ * For details see: https://github.com/enjoy-digital/litex/wiki/CSR-Bus
*/
-static inline u64 _litex_get_reg(void __iomem *reg, size_t reg_size)
-{
- u64 r;
- u8 i;
-
- r = _read_litex_subregister(reg);
- for (i = 1; i < _litex_num_subregs(reg_size); i++) {
- r <<= LITEX_SUBREG_SIZE_BIT;
- reg += LITEX_SUBREG_ALIGN;
- r |= _read_litex_subregister(reg);
- }
- return r;
-}
static inline void litex_write8(void __iomem *reg, u8 val)
{
- _litex_set_reg(reg, sizeof(u8), val);
+ _write_litex_subregister(val, reg);
}
static inline void litex_write16(void __iomem *reg, u16 val)
{
- _litex_set_reg(reg, sizeof(u16), val);
+ _write_litex_subregister(val, reg);
}
static inline void litex_write32(void __iomem *reg, u32 val)
{
- _litex_set_reg(reg, sizeof(u32), val);
+ _write_litex_subregister(val, reg);
}
static inline void litex_write64(void __iomem *reg, u64 val)
{
- _litex_set_reg(reg, sizeof(u64), val);
+ _write_litex_subregister(val >> 32, reg);
+ _write_litex_subregister(val, reg + 4);
}
static inline u8 litex_read8(void __iomem *reg)
{
- return _litex_get_reg(reg, sizeof(u8));
+ return _read_litex_subregister(reg);
}
static inline u16 litex_read16(void __iomem *reg)
{
- return _litex_get_reg(reg, sizeof(u16));
+ return _read_litex_subregister(reg);
}
static inline u32 litex_read32(void __iomem *reg)
{
- return _litex_get_reg(reg, sizeof(u32));
+ return _read_litex_subregister(reg);
}
static inline u64 litex_read64(void __iomem *reg)
{
- return _litex_get_reg(reg, sizeof(u64));
+ return ((u64)_read_litex_subregister(reg) << 32) |
+ _read_litex_subregister(reg + 4);
}
#endif /* _LINUX_LITEX_H */
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 2614247a9781..772919e8096a 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -13,15 +13,15 @@
#include <linux/ftrace.h>
#include <linux/completion.h>
#include <linux/list.h>
+#include <linux/livepatch_external.h>
+#include <linux/livepatch_sched.h>
#if IS_ENABLED(CONFIG_LIVEPATCH)
-#include <asm/livepatch.h>
-
/* task patch states */
-#define KLP_UNDEFINED -1
-#define KLP_UNPATCHED 0
-#define KLP_PATCHED 1
+#define KLP_TRANSITION_IDLE -1
+#define KLP_TRANSITION_UNPATCHED 0
+#define KLP_TRANSITION_PATCHED 1
/**
* struct klp_func - function structure for live patching
@@ -78,30 +78,6 @@ struct klp_func {
bool transition;
};
-struct klp_object;
-
-/**
- * struct klp_callbacks - pre/post live-(un)patch callback structure
- * @pre_patch: executed before code patching
- * @post_patch: executed after code patching
- * @pre_unpatch: executed before code unpatching
- * @post_unpatch: executed after code unpatching
- * @post_unpatch_enabled: flag indicating if post-unpatch callback
- * should run
- *
- * All callbacks are optional. Only the pre-patch callback, if provided,
- * will be unconditionally executed. If the parent klp_object fails to
- * patch for any reason, including a non-zero error status returned from
- * the pre-patch callback, no further callbacks will be executed.
- */
-struct klp_callbacks {
- int (*pre_patch)(struct klp_object *obj);
- void (*post_patch)(struct klp_object *obj);
- void (*pre_unpatch)(struct klp_object *obj);
- void (*post_unpatch)(struct klp_object *obj);
- bool post_unpatch_enabled;
-};
-
/**
* struct klp_object - kernel object structure for live patching
* @name: module name (or NULL for vmlinux)
diff --git a/include/linux/livepatch_external.h b/include/linux/livepatch_external.h
new file mode 100644
index 000000000000..138af19b0f5c
--- /dev/null
+++ b/include/linux/livepatch_external.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * External livepatch interfaces for patch creation tooling
+ */
+
+#ifndef _LINUX_LIVEPATCH_EXTERNAL_H_
+#define _LINUX_LIVEPATCH_EXTERNAL_H_
+
+#include <linux/types.h>
+
+#define KLP_RELOC_SEC_PREFIX ".klp.rela."
+#define KLP_SYM_PREFIX ".klp.sym."
+
+#define __KLP_PRE_PATCH_PREFIX __klp_pre_patch_callback_
+#define __KLP_POST_PATCH_PREFIX __klp_post_patch_callback_
+#define __KLP_PRE_UNPATCH_PREFIX __klp_pre_unpatch_callback_
+#define __KLP_POST_UNPATCH_PREFIX __klp_post_unpatch_callback_
+
+#define KLP_PRE_PATCH_PREFIX __stringify(__KLP_PRE_PATCH_PREFIX)
+#define KLP_POST_PATCH_PREFIX __stringify(__KLP_POST_PATCH_PREFIX)
+#define KLP_PRE_UNPATCH_PREFIX __stringify(__KLP_PRE_UNPATCH_PREFIX)
+#define KLP_POST_UNPATCH_PREFIX __stringify(__KLP_POST_UNPATCH_PREFIX)
+
+struct klp_object;
+
+typedef int (*klp_pre_patch_t)(struct klp_object *obj);
+typedef void (*klp_post_patch_t)(struct klp_object *obj);
+typedef void (*klp_pre_unpatch_t)(struct klp_object *obj);
+typedef void (*klp_post_unpatch_t)(struct klp_object *obj);
+
+/**
+ * struct klp_callbacks - pre/post live-(un)patch callback structure
+ * @pre_patch: executed before code patching
+ * @post_patch: executed after code patching
+ * @pre_unpatch: executed before code unpatching
+ * @post_unpatch: executed after code unpatching
+ * @post_unpatch_enabled: flag indicating if post-unpatch callback
+ * should run
+ *
+ * All callbacks are optional. Only the pre-patch callback, if provided,
+ * will be unconditionally executed. If the parent klp_object fails to
+ * patch for any reason, including a non-zero error status returned from
+ * the pre-patch callback, no further callbacks will be executed.
+ */
+struct klp_callbacks {
+ klp_pre_patch_t pre_patch;
+ klp_post_patch_t post_patch;
+ klp_pre_unpatch_t pre_unpatch;
+ klp_post_unpatch_t post_unpatch;
+ bool post_unpatch_enabled;
+};
+
+/*
+ * 'struct klp_{func,object}_ext' are compact "external" representations of
+ * 'struct klp_{func,object}'. They are used by objtool for livepatch
+ * generation. The structs are then read by the livepatch module and converted
+ * to the real structs before calling klp_enable_patch().
+ *
+ * TODO make these the official API for klp_enable_patch(). That should
+ * simplify livepatch's interface as well as its data structure lifetime
+ * management.
+ */
+struct klp_func_ext {
+ const char *old_name;
+ void *new_func;
+ unsigned long sympos;
+};
+
+struct klp_object_ext {
+ const char *name;
+ struct klp_func_ext *funcs;
+ struct klp_callbacks callbacks;
+ unsigned int nr_funcs;
+};
+
+#endif /* _LINUX_LIVEPATCH_EXTERNAL_H_ */
diff --git a/include/linux/livepatch_helpers.h b/include/linux/livepatch_helpers.h
new file mode 100644
index 000000000000..99d68d0773fa
--- /dev/null
+++ b/include/linux/livepatch_helpers.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_LIVEPATCH_HELPERS_H
+#define _LINUX_LIVEPATCH_HELPERS_H
+
+/*
+ * Interfaces for use by livepatch patches
+ */
+
+#include <linux/syscalls.h>
+#include <linux/livepatch.h>
+
+#ifdef MODULE
+#define KLP_OBJNAME __KBUILD_MODNAME
+#else
+#define KLP_OBJNAME vmlinux
+#endif
+
+/* Livepatch callback registration */
+
+#define KLP_CALLBACK_PTRS ".discard.klp_callback_ptrs"
+
+#define KLP_PRE_PATCH_CALLBACK(func) \
+ klp_pre_patch_t __used __section(KLP_CALLBACK_PTRS) \
+ __PASTE(__KLP_PRE_PATCH_PREFIX, KLP_OBJNAME) = func
+
+#define KLP_POST_PATCH_CALLBACK(func) \
+ klp_post_patch_t __used __section(KLP_CALLBACK_PTRS) \
+ __PASTE(__KLP_POST_PATCH_PREFIX, KLP_OBJNAME) = func
+
+#define KLP_PRE_UNPATCH_CALLBACK(func) \
+ klp_pre_unpatch_t __used __section(KLP_CALLBACK_PTRS) \
+ __PASTE(__KLP_PRE_UNPATCH_PREFIX, KLP_OBJNAME) = func
+
+#define KLP_POST_UNPATCH_CALLBACK(func) \
+ klp_post_unpatch_t __used __section(KLP_CALLBACK_PTRS) \
+ __PASTE(__KLP_POST_UNPATCH_PREFIX, KLP_OBJNAME) = func
+
+/*
+ * Replace static_call() usage with this macro when create-diff-object
+ * recommends it due to the original static call key living in a module.
+ *
+ * This converts the static call to a regular indirect call.
+ */
+#define KLP_STATIC_CALL(name) \
+ ((typeof(STATIC_CALL_TRAMP(name))*)(STATIC_CALL_KEY(name).func))
+
+/* Syscall patching */
+
+#define KLP_SYSCALL_DEFINE1(name, ...) KLP_SYSCALL_DEFINEx(1, _##name, __VA_ARGS__)
+#define KLP_SYSCALL_DEFINE2(name, ...) KLP_SYSCALL_DEFINEx(2, _##name, __VA_ARGS__)
+#define KLP_SYSCALL_DEFINE3(name, ...) KLP_SYSCALL_DEFINEx(3, _##name, __VA_ARGS__)
+#define KLP_SYSCALL_DEFINE4(name, ...) KLP_SYSCALL_DEFINEx(4, _##name, __VA_ARGS__)
+#define KLP_SYSCALL_DEFINE5(name, ...) KLP_SYSCALL_DEFINEx(5, _##name, __VA_ARGS__)
+#define KLP_SYSCALL_DEFINE6(name, ...) KLP_SYSCALL_DEFINEx(6, _##name, __VA_ARGS__)
+
+#define KLP_SYSCALL_DEFINEx(x, sname, ...) \
+ __KLP_SYSCALL_DEFINEx(x, sname, __VA_ARGS__)
+
+#ifdef CONFIG_X86_64
+// TODO move this to arch/x86/include/asm/syscall_wrapper.h and share code
+#define __KLP_SYSCALL_DEFINEx(x, name, ...) \
+ static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ static inline long __klp_do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
+ __X64_SYS_STUBx(x, name, __VA_ARGS__) \
+ __IA32_SYS_STUBx(x, name, __VA_ARGS__) \
+ static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+ { \
+ long ret = __klp_do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__));\
+ __MAP(x,__SC_TEST,__VA_ARGS__); \
+ __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \
+ return ret; \
+ } \
+ static inline long __klp_do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+
+#endif
+
+#endif /* _LINUX_LIVEPATCH_HELPERS_H */
diff --git a/include/linux/livepatch_sched.h b/include/linux/livepatch_sched.h
new file mode 100644
index 000000000000..065c185f2763
--- /dev/null
+++ b/include/linux/livepatch_sched.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_LIVEPATCH_SCHED_H_
+#define _LINUX_LIVEPATCH_SCHED_H_
+
+#include <linux/jump_label.h>
+#include <linux/sched.h>
+
+#ifdef CONFIG_LIVEPATCH
+
+void __klp_sched_try_switch(void);
+
+DECLARE_STATIC_KEY_FALSE(klp_sched_try_switch_key);
+
+static __always_inline void klp_sched_try_switch(struct task_struct *curr)
+{
+ if (static_branch_unlikely(&klp_sched_try_switch_key) &&
+ READ_ONCE(curr->__state) & TASK_FREEZABLE)
+ __klp_sched_try_switch();
+}
+
+#else /* !CONFIG_LIVEPATCH */
+static inline void klp_sched_try_switch(struct task_struct *curr) {}
+#endif /* CONFIG_LIVEPATCH */
+
+#endif /* _LINUX_LIVEPATCH_SCHED_H_ */
diff --git a/include/linux/liveupdate.h b/include/linux/liveupdate.h
new file mode 100644
index 000000000000..a7f6ee5b6771
--- /dev/null
+++ b/include/linux/liveupdate.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (c) 2025, Google LLC.
+ * Pasha Tatashin <pasha.tatashin@soleen.com>
+ */
+#ifndef _LINUX_LIVEUPDATE_H
+#define _LINUX_LIVEUPDATE_H
+
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/kho/abi/luo.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <uapi/linux/liveupdate.h>
+
+struct liveupdate_file_handler;
+struct file;
+
+/**
+ * struct liveupdate_file_op_args - Arguments for file operation callbacks.
+ * @handler: The file handler being called.
+ * @retrieved: The retrieve status for the 'can_finish / finish'
+ * operation.
+ * @file: The file object. For retrieve: [OUT] The callback sets
+ * this to the new file. For other ops: [IN] The caller sets
+ * this to the file being operated on.
+ * @serialized_data: The opaque u64 handle, preserve/prepare/freeze may update
+ * this field.
+ * @private_data: Private data for the file used to hold runtime state that
+ * is not preserved. Set by the handler's .preserve()
+ * callback, and must be freed in the handler's
+ * .unpreserve() callback.
+ *
+ * This structure bundles all parameters for the file operation callbacks.
+ * The 'data' and 'file' fields are used for both input and output.
+ */
+struct liveupdate_file_op_args {
+ struct liveupdate_file_handler *handler;
+ bool retrieved;
+ struct file *file;
+ u64 serialized_data;
+ void *private_data;
+};
+
+/**
+ * struct liveupdate_file_ops - Callbacks for live-updatable files.
+ * @can_preserve: Required. Lightweight check to see if this handler is
+ * compatible with the given file.
+ * @preserve: Required. Performs state-saving for the file.
+ * @unpreserve: Required. Cleans up any resources allocated by @preserve.
+ * @freeze: Optional. Final actions just before kernel transition.
+ * @unfreeze: Optional. Undo freeze operations.
+ * @retrieve: Required. Restores the file in the new kernel.
+ * @can_finish: Optional. Check if this FD can finish, i.e. all restoration
+ * pre-requirements for this FD are satisfied. Called prior to
+ * finish, in order to do successful finish calls for all
+ * resources in the session.
+ * @finish: Required. Final cleanup in the new kernel.
+ * @owner: Module reference
+ *
+ * All operations (except can_preserve) receive a pointer to a
+ * 'struct liveupdate_file_op_args' containing the necessary context.
+ */
+struct liveupdate_file_ops {
+ bool (*can_preserve)(struct liveupdate_file_handler *handler,
+ struct file *file);
+ int (*preserve)(struct liveupdate_file_op_args *args);
+ void (*unpreserve)(struct liveupdate_file_op_args *args);
+ int (*freeze)(struct liveupdate_file_op_args *args);
+ void (*unfreeze)(struct liveupdate_file_op_args *args);
+ int (*retrieve)(struct liveupdate_file_op_args *args);
+ bool (*can_finish)(struct liveupdate_file_op_args *args);
+ void (*finish)(struct liveupdate_file_op_args *args);
+ struct module *owner;
+};
+
+/**
+ * struct liveupdate_file_handler - Represents a handler for a live-updatable file type.
+ * @ops: Callback functions
+ * @compatible: The compatibility string (e.g., "memfd-v1", "vfiofd-v1")
+ * that uniquely identifies the file type this handler
+ * supports. This is matched against the compatible string
+ * associated with individual &struct file instances.
+ *
+ * Modules that want to support live update for specific file types should
+ * register an instance of this structure. LUO uses this registration to
+ * determine if a given file can be preserved and to find the appropriate
+ * operations to manage its state across the update.
+ */
+struct liveupdate_file_handler {
+ const struct liveupdate_file_ops *ops;
+ const char compatible[LIVEUPDATE_HNDL_COMPAT_LENGTH];
+
+ /* private: */
+
+ /*
+ * Used for linking this handler instance into a global list of
+ * registered file handlers.
+ */
+ struct list_head __private list;
+};
+
+#ifdef CONFIG_LIVEUPDATE
+
+/* Return true if live update orchestrator is enabled */
+bool liveupdate_enabled(void);
+
+/* Called during kexec to tell LUO that entered into reboot */
+int liveupdate_reboot(void);
+
+int liveupdate_register_file_handler(struct liveupdate_file_handler *fh);
+int liveupdate_unregister_file_handler(struct liveupdate_file_handler *fh);
+
+#else /* CONFIG_LIVEUPDATE */
+
+static inline bool liveupdate_enabled(void)
+{
+ return false;
+}
+
+static inline int liveupdate_reboot(void)
+{
+ return 0;
+}
+
+static inline int liveupdate_register_file_handler(struct liveupdate_file_handler *fh)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int liveupdate_unregister_file_handler(struct liveupdate_file_handler *fh)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif /* CONFIG_LIVEUPDATE */
+#endif /* _LINUX_LIVEUPDATE_H */
diff --git a/include/linux/llist.h b/include/linux/llist.h
index 24f207b0190b..607b2360c938 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -49,7 +49,9 @@
*/
#include <linux/atomic.h>
-#include <linux/kernel.h>
+#include <linux/container_of.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
struct llist_head {
struct llist_node *first;
@@ -72,6 +74,33 @@ static inline void init_llist_head(struct llist_head *list)
}
/**
+ * init_llist_node - initialize lock-less list node
+ * @node: the node to be initialised
+ *
+ * In cases where there is a need to test if a node is on
+ * a list or not, this initialises the node to clearly
+ * not be on any list.
+ */
+static inline void init_llist_node(struct llist_node *node)
+{
+ WRITE_ONCE(node->next, node);
+}
+
+/**
+ * llist_on_list - test if a lock-list list node is on a list
+ * @node: the node to test
+ *
+ * When a node is on a list the ->next pointer will be NULL or
+ * some other node. It can never point to itself. We use that
+ * in init_llist_node() to record that a node is not on any list,
+ * and here to test whether it is on any list.
+ */
+static inline bool llist_on_list(const struct llist_node *node)
+{
+ return READ_ONCE(node->next) != node;
+}
+
+/**
* llist_entry - get the struct of this entry
* @ptr: the &struct llist_node pointer.
* @type: the type of the struct this is embedded in.
@@ -191,12 +220,29 @@ static inline bool llist_empty(const struct llist_head *head)
static inline struct llist_node *llist_next(struct llist_node *node)
{
- return node->next;
+ return READ_ONCE(node->next);
}
-extern bool llist_add_batch(struct llist_node *new_first,
- struct llist_node *new_last,
- struct llist_head *head);
+/**
+ * llist_add_batch - add several linked entries in batch
+ * @new_first: first entry in batch to be added
+ * @new_last: last entry in batch to be added
+ * @head: the head for your lock-less list
+ *
+ * Return whether list is empty before adding.
+ */
+static inline bool llist_add_batch(struct llist_node *new_first,
+ struct llist_node *new_last,
+ struct llist_head *head)
+{
+ struct llist_node *first = READ_ONCE(head->first);
+
+ do {
+ new_last->next = first;
+ } while (!try_cmpxchg(&head->first, &first, new_first));
+
+ return !first;
+}
static inline bool __llist_add_batch(struct llist_node *new_first,
struct llist_node *new_last,
@@ -247,6 +293,25 @@ static inline struct llist_node *__llist_del_all(struct llist_head *head)
extern struct llist_node *llist_del_first(struct llist_head *head);
+/**
+ * llist_del_first_init - delete first entry from lock-list and mark is as being off-list
+ * @head: the head of lock-less list to delete from.
+ *
+ * This behave the same as llist_del_first() except that llist_init_node() is called
+ * on the returned node so that llist_on_list() will report false for the node.
+ */
+static inline struct llist_node *llist_del_first_init(struct llist_head *head)
+{
+ struct llist_node *n = llist_del_first(head);
+
+ if (n)
+ init_llist_node(n);
+ return n;
+}
+
+extern bool llist_del_first_this(struct llist_head *head,
+ struct llist_node *this);
+
struct llist_node *llist_reverse_order(struct llist_node *head);
#endif /* LLIST_H */
diff --git a/include/linux/llist_api.h b/include/linux/llist_api.h
new file mode 100644
index 000000000000..625bec0393a1
--- /dev/null
+++ b/include/linux/llist_api.h
@@ -0,0 +1 @@
+#include <linux/llist.h>
diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h
index e55010fa7329..b0e6ab329b00 100644
--- a/include/linux/local_lock.h
+++ b/include/linux/local_lock.h
@@ -6,6 +6,7 @@
/**
* local_lock_init - Runtime initialize a lock instance
+ * @lock: The lock variable
*/
#define local_lock_init(lock) __local_lock_init(lock)
@@ -13,13 +14,13 @@
* local_lock - Acquire a per CPU local lock
* @lock: The lock variable
*/
-#define local_lock(lock) __local_lock(lock)
+#define local_lock(lock) __local_lock(this_cpu_ptr(lock))
/**
* local_lock_irq - Acquire a per CPU local lock and disable interrupts
* @lock: The lock variable
*/
-#define local_lock_irq(lock) __local_lock_irq(lock)
+#define local_lock_irq(lock) __local_lock_irq(this_cpu_ptr(lock))
/**
* local_lock_irqsave - Acquire a per CPU local lock, save and disable
@@ -28,19 +29,19 @@
* @flags: Storage for interrupt flags
*/
#define local_lock_irqsave(lock, flags) \
- __local_lock_irqsave(lock, flags)
+ __local_lock_irqsave(this_cpu_ptr(lock), flags)
/**
* local_unlock - Release a per CPU local lock
* @lock: The lock variable
*/
-#define local_unlock(lock) __local_unlock(lock)
+#define local_unlock(lock) __local_unlock(this_cpu_ptr(lock))
/**
* local_unlock_irq - Release a per CPU local lock and enable interrupts
* @lock: The lock variable
*/
-#define local_unlock_irq(lock) __local_unlock_irq(lock)
+#define local_unlock_irq(lock) __local_unlock_irq(this_cpu_ptr(lock))
/**
* local_unlock_irqrestore - Release a per CPU local lock and restore
@@ -49,6 +50,58 @@
* @flags: Interrupt flags to restore
*/
#define local_unlock_irqrestore(lock, flags) \
- __local_unlock_irqrestore(lock, flags)
+ __local_unlock_irqrestore(this_cpu_ptr(lock), flags)
+
+/**
+ * local_trylock_init - Runtime initialize a lock instance
+ * @lock: The lock variable
+ */
+#define local_trylock_init(lock) __local_trylock_init(lock)
+
+/**
+ * local_trylock - Try to acquire a per CPU local lock
+ * @lock: The lock variable
+ *
+ * The function can be used in any context such as NMI or HARDIRQ. Due to
+ * locking constrains it will _always_ fail to acquire the lock in NMI or
+ * HARDIRQ context on PREEMPT_RT.
+ */
+#define local_trylock(lock) __local_trylock(this_cpu_ptr(lock))
+
+#define local_lock_is_locked(lock) __local_lock_is_locked(lock)
+
+/**
+ * local_trylock_irqsave - Try to acquire a per CPU local lock, save and disable
+ * interrupts if acquired
+ * @lock: The lock variable
+ * @flags: Storage for interrupt flags
+ *
+ * The function can be used in any context such as NMI or HARDIRQ. Due to
+ * locking constrains it will _always_ fail to acquire the lock in NMI or
+ * HARDIRQ context on PREEMPT_RT.
+ */
+#define local_trylock_irqsave(lock, flags) \
+ __local_trylock_irqsave(this_cpu_ptr(lock), flags)
+
+DEFINE_GUARD(local_lock, local_lock_t __percpu*,
+ local_lock(_T),
+ local_unlock(_T))
+DEFINE_GUARD(local_lock_irq, local_lock_t __percpu*,
+ local_lock_irq(_T),
+ local_unlock_irq(_T))
+DEFINE_LOCK_GUARD_1(local_lock_irqsave, local_lock_t __percpu,
+ local_lock_irqsave(_T->lock, _T->flags),
+ local_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags)
+
+#define local_lock_nested_bh(_lock) \
+ __local_lock_nested_bh(this_cpu_ptr(_lock))
+
+#define local_unlock_nested_bh(_lock) \
+ __local_unlock_nested_bh(this_cpu_ptr(_lock))
+
+DEFINE_GUARD(local_lock_nested_bh, local_lock_t __percpu*,
+ local_lock_nested_bh(_T),
+ local_unlock_nested_bh(_T))
#endif
diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
index ded90b097e6e..8f82b4eb542f 100644
--- a/include/linux/local_lock_internal.h
+++ b/include/linux/local_lock_internal.h
@@ -6,6 +6,8 @@
#include <linux/percpu-defs.h>
#include <linux/lockdep.h>
+#ifndef CONFIG_PREEMPT_RT
+
typedef struct {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
@@ -13,30 +15,27 @@ typedef struct {
#endif
} local_lock_t;
+/* local_trylock() and local_trylock_irqsave() only work with local_trylock_t */
+typedef struct {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define LL_DEP_MAP_INIT(lockname) \
+ struct lockdep_map dep_map;
+ struct task_struct *owner;
+#endif
+ u8 acquired;
+} local_trylock_t;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define LOCAL_LOCK_DEBUG_INIT(lockname) \
.dep_map = { \
.name = #lockname, \
.wait_type_inner = LD_WAIT_CONFIG, \
- .lock_type = LD_LOCK_PERCPU, \
- }
-#else
-# define LL_DEP_MAP_INIT(lockname)
-#endif
-
-#define INIT_LOCAL_LOCK(lockname) { LL_DEP_MAP_INIT(lockname) }
+ .lock_type = LD_LOCK_PERCPU, \
+ }, \
+ .owner = NULL,
-#define __local_lock_init(lock) \
-do { \
- static struct lock_class_key __key; \
- \
- debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
- lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, 0, \
- LD_WAIT_CONFIG, LD_WAIT_INV, \
- LD_LOCK_PERCPU); \
-} while (0)
+# define LOCAL_TRYLOCK_DEBUG_INIT(lockname) \
+ LOCAL_LOCK_DEBUG_INIT(lockname)
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
static inline void local_lock_acquire(local_lock_t *l)
{
lock_map_acquire(&l->dep_map);
@@ -44,6 +43,13 @@ static inline void local_lock_acquire(local_lock_t *l)
l->owner = current;
}
+static inline void local_trylock_acquire(local_lock_t *l)
+{
+ lock_map_acquire_try(&l->dep_map);
+ DEBUG_LOCKS_WARN_ON(l->owner);
+ l->owner = current;
+}
+
static inline void local_lock_release(local_lock_t *l)
{
DEBUG_LOCKS_WARN_ON(l->owner != current);
@@ -51,43 +57,239 @@ static inline void local_lock_release(local_lock_t *l)
lock_map_release(&l->dep_map);
}
+static inline void local_lock_debug_init(local_lock_t *l)
+{
+ l->owner = NULL;
+}
#else /* CONFIG_DEBUG_LOCK_ALLOC */
+# define LOCAL_LOCK_DEBUG_INIT(lockname)
+# define LOCAL_TRYLOCK_DEBUG_INIT(lockname)
static inline void local_lock_acquire(local_lock_t *l) { }
+static inline void local_trylock_acquire(local_lock_t *l) { }
static inline void local_lock_release(local_lock_t *l) { }
+static inline void local_lock_debug_init(local_lock_t *l) { }
#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
+#define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) }
+#define INIT_LOCAL_TRYLOCK(lockname) { LOCAL_TRYLOCK_DEBUG_INIT(lockname) }
+
+#define __local_lock_init(lock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
+ lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \
+ 0, LD_WAIT_CONFIG, LD_WAIT_INV, \
+ LD_LOCK_PERCPU); \
+ local_lock_debug_init(lock); \
+} while (0)
+
+#define __local_trylock_init(lock) __local_lock_init((local_lock_t *)lock)
+
+#define __spinlock_nested_bh_init(lock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
+ lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \
+ 0, LD_WAIT_CONFIG, LD_WAIT_INV, \
+ LD_LOCK_NORMAL); \
+ local_lock_debug_init(lock); \
+} while (0)
+
+#define __local_lock_acquire(lock) \
+ do { \
+ local_trylock_t *__tl; \
+ local_lock_t *__l; \
+ \
+ __l = (local_lock_t *)(lock); \
+ __tl = (local_trylock_t *)__l; \
+ _Generic((lock), \
+ local_trylock_t *: ({ \
+ lockdep_assert(__tl->acquired == 0); \
+ WRITE_ONCE(__tl->acquired, 1); \
+ }), \
+ local_lock_t *: (void)0); \
+ local_lock_acquire(__l); \
+ } while (0)
+
#define __local_lock(lock) \
do { \
preempt_disable(); \
- local_lock_acquire(this_cpu_ptr(lock)); \
+ __local_lock_acquire(lock); \
} while (0)
#define __local_lock_irq(lock) \
do { \
local_irq_disable(); \
- local_lock_acquire(this_cpu_ptr(lock)); \
+ __local_lock_acquire(lock); \
} while (0)
#define __local_lock_irqsave(lock, flags) \
do { \
local_irq_save(flags); \
- local_lock_acquire(this_cpu_ptr(lock)); \
+ __local_lock_acquire(lock); \
+ } while (0)
+
+#define __local_trylock(lock) \
+ ({ \
+ local_trylock_t *__tl; \
+ \
+ preempt_disable(); \
+ __tl = (lock); \
+ if (READ_ONCE(__tl->acquired)) { \
+ preempt_enable(); \
+ __tl = NULL; \
+ } else { \
+ WRITE_ONCE(__tl->acquired, 1); \
+ local_trylock_acquire( \
+ (local_lock_t *)__tl); \
+ } \
+ !!__tl; \
+ })
+
+#define __local_trylock_irqsave(lock, flags) \
+ ({ \
+ local_trylock_t *__tl; \
+ \
+ local_irq_save(flags); \
+ __tl = (lock); \
+ if (READ_ONCE(__tl->acquired)) { \
+ local_irq_restore(flags); \
+ __tl = NULL; \
+ } else { \
+ WRITE_ONCE(__tl->acquired, 1); \
+ local_trylock_acquire( \
+ (local_lock_t *)__tl); \
+ } \
+ !!__tl; \
+ })
+
+/* preemption or migration must be disabled before calling __local_lock_is_locked */
+#define __local_lock_is_locked(lock) READ_ONCE(this_cpu_ptr(lock)->acquired)
+
+#define __local_lock_release(lock) \
+ do { \
+ local_trylock_t *__tl; \
+ local_lock_t *__l; \
+ \
+ __l = (local_lock_t *)(lock); \
+ __tl = (local_trylock_t *)__l; \
+ local_lock_release(__l); \
+ _Generic((lock), \
+ local_trylock_t *: ({ \
+ lockdep_assert(__tl->acquired == 1); \
+ WRITE_ONCE(__tl->acquired, 0); \
+ }), \
+ local_lock_t *: (void)0); \
} while (0)
#define __local_unlock(lock) \
do { \
- local_lock_release(this_cpu_ptr(lock)); \
+ __local_lock_release(lock); \
preempt_enable(); \
} while (0)
#define __local_unlock_irq(lock) \
do { \
- local_lock_release(this_cpu_ptr(lock)); \
+ __local_lock_release(lock); \
local_irq_enable(); \
} while (0)
#define __local_unlock_irqrestore(lock, flags) \
do { \
- local_lock_release(this_cpu_ptr(lock)); \
+ __local_lock_release(lock); \
local_irq_restore(flags); \
} while (0)
+
+#define __local_lock_nested_bh(lock) \
+ do { \
+ lockdep_assert_in_softirq(); \
+ local_lock_acquire((lock)); \
+ } while (0)
+
+#define __local_unlock_nested_bh(lock) \
+ local_lock_release((lock))
+
+#else /* !CONFIG_PREEMPT_RT */
+
+/*
+ * On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the
+ * critical section while staying preemptible.
+ */
+typedef spinlock_t local_lock_t;
+typedef spinlock_t local_trylock_t;
+
+#define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
+#define INIT_LOCAL_TRYLOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
+
+#define __local_lock_init(__l) \
+ do { \
+ local_spin_lock_init((__l)); \
+ } while (0)
+
+#define __local_trylock_init(__l) __local_lock_init(__l)
+
+#define __local_lock(__lock) \
+ do { \
+ migrate_disable(); \
+ spin_lock((__lock)); \
+ } while (0)
+
+#define __local_lock_irq(lock) __local_lock(lock)
+
+#define __local_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
+ __local_lock(lock); \
+ } while (0)
+
+#define __local_unlock(__lock) \
+ do { \
+ spin_unlock((__lock)); \
+ migrate_enable(); \
+ } while (0)
+
+#define __local_unlock_irq(lock) __local_unlock(lock)
+
+#define __local_unlock_irqrestore(lock, flags) __local_unlock(lock)
+
+#define __local_lock_nested_bh(lock) \
+do { \
+ lockdep_assert_in_softirq_func(); \
+ spin_lock((lock)); \
+} while (0)
+
+#define __local_unlock_nested_bh(lock) \
+do { \
+ spin_unlock((lock)); \
+} while (0)
+
+#define __local_trylock(lock) \
+ ({ \
+ int __locked; \
+ \
+ if (in_nmi() | in_hardirq()) { \
+ __locked = 0; \
+ } else { \
+ migrate_disable(); \
+ __locked = spin_trylock((lock)); \
+ if (!__locked) \
+ migrate_enable(); \
+ } \
+ __locked; \
+ })
+
+#define __local_trylock_irqsave(lock, flags) \
+ ({ \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
+ __local_trylock(lock); \
+ })
+
+/* migration must be disabled before calling __local_lock_is_locked */
+#define __local_lock_is_locked(__lock) \
+ (rt_mutex_owner(&this_cpu_ptr(__lock)->lock) == current)
+
+#endif /* CONFIG_PREEMPT_RT */
diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
index 0520c0cd73f4..c53c81242e72 100644
--- a/include/linux/lockd/bind.h
+++ b/include/linux/lockd/bind.h
@@ -20,6 +20,7 @@
/* Dummy declarations */
struct svc_rqst;
struct rpc_task;
+struct rpc_clnt;
/*
* This is the set of functions for lockd->nfsd communication
@@ -27,7 +28,8 @@ struct rpc_task;
struct nlmsvc_binding {
__be32 (*fopen)(struct svc_rqst *,
struct nfs_fh *,
- struct file **);
+ struct file **,
+ int mode);
void (*fclose)(struct file *);
};
@@ -55,6 +57,7 @@ struct nlmclnt_initdata {
extern struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init);
extern void nlmclnt_done(struct nlm_host *host);
+extern struct rpc_clnt *nlmclnt_rpc_clnt(struct nlm_host *host);
/*
* NLM client operations provide a means to modify RPC processing of NLM
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 666f5f310a04..330e38776bb2 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -10,6 +10,9 @@
#ifndef LINUX_LOCKD_LOCKD_H
#define LINUX_LOCKD_LOCKD_H
+/* XXX: a lot of this should really be under fs/lockd. */
+
+#include <linux/exportfs.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <net/ipv6.h>
@@ -97,21 +100,11 @@ struct nsm_handle {
/*
* Rigorous type checking on sockaddr type conversions
*/
-static inline struct sockaddr_in *nlm_addr_in(const struct nlm_host *host)
-{
- return (struct sockaddr_in *)&host->h_addr;
-}
-
static inline struct sockaddr *nlm_addr(const struct nlm_host *host)
{
return (struct sockaddr *)&host->h_addr;
}
-static inline struct sockaddr_in *nlm_srcaddr_in(const struct nlm_host *host)
-{
- return (struct sockaddr_in *)&host->h_srcaddr;
-}
-
static inline struct sockaddr *nlm_srcaddr(const struct nlm_host *host)
{
return (struct sockaddr *)&host->h_srcaddr;
@@ -129,7 +122,16 @@ struct nlm_lockowner {
uint32_t pid;
};
-struct nlm_wait;
+/*
+ * This is the representation of a blocked client lock.
+ */
+struct nlm_wait {
+ struct list_head b_list; /* linked list */
+ wait_queue_head_t b_wait; /* where to wait on */
+ struct nlm_host *b_host;
+ struct file_lock *b_lock; /* local file lock */
+ __be32 b_status; /* grant callback status */
+};
/*
* Memory chunk for NLM client RPC request.
@@ -154,7 +156,8 @@ struct nlm_rqst {
struct nlm_file {
struct hlist_node f_list; /* linked list */
struct nfs_fh f_handle; /* NFS file handle */
- struct file * f_file; /* VFS file pointer */
+ struct file * f_file[2]; /* VFS file pointers,
+ indexed by O_ flags */
struct nlm_share * f_shares; /* DOS shares */
struct list_head f_blocks; /* blocked locks */
unsigned int f_locks; /* guesstimate # of locks */
@@ -193,15 +196,17 @@ struct nlm_block {
* Global variables
*/
extern const struct rpc_program nlm_program;
-extern const struct svc_procedure nlmsvc_procedures[];
+extern const struct svc_procedure nlmsvc_procedures[24];
#ifdef CONFIG_LOCKD_V4
-extern const struct svc_procedure nlmsvc_procedures4[];
+extern const struct svc_procedure nlmsvc_procedures4[24];
#endif
extern int nlmsvc_grace_period;
-extern unsigned long nlmsvc_timeout;
+extern unsigned long nlm_timeout;
extern bool nsm_use_hostnames;
extern u32 nsm_local_state;
+extern struct timer_list nlmsvc_retry;
+
/*
* Lockd client functions
*/
@@ -209,9 +214,11 @@ struct nlm_rqst * nlm_alloc_call(struct nlm_host *host);
int nlm_async_call(struct nlm_rqst *, u32, const struct rpc_call_ops *);
int nlm_async_reply(struct nlm_rqst *, u32, const struct rpc_call_ops *);
void nlmclnt_release_call(struct nlm_rqst *);
-struct nlm_wait * nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl);
-void nlmclnt_finish_block(struct nlm_wait *block);
-int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout);
+void nlmclnt_prepare_block(struct nlm_wait *block, struct nlm_host *host,
+ struct file_lock *fl);
+void nlmclnt_queue_block(struct nlm_wait *block);
+__be32 nlmclnt_dequeue_block(struct nlm_wait *block);
+int nlmclnt_wait(struct nlm_wait *block, struct nlm_rqst *req, long timeout);
__be32 nlmclnt_grant(const struct sockaddr *addr,
const struct nlm_lock *lock);
void nlmclnt_recovery(struct nlm_host *);
@@ -267,15 +274,16 @@ typedef int (*nlm_host_match_fn_t)(void *cur, struct nlm_host *ref);
/*
* Server-side lock handling
*/
+int lock_to_openmode(struct file_lock *);
__be32 nlmsvc_lock(struct svc_rqst *, struct nlm_file *,
struct nlm_host *, struct nlm_lock *, int,
struct nlm_cookie *, int);
__be32 nlmsvc_unlock(struct net *net, struct nlm_file *, struct nlm_lock *);
-__be32 nlmsvc_testlock(struct svc_rqst *, struct nlm_file *,
- struct nlm_host *, struct nlm_lock *,
- struct nlm_lock *, struct nlm_cookie *);
+__be32 nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
+ struct nlm_host *host, struct nlm_lock *lock,
+ struct nlm_lock *conflock);
__be32 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *, struct nlm_lock *);
-unsigned long nlmsvc_retry_blocked(void);
+void nlmsvc_retry_blocked(struct svc_rqst *rqstp);
void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *,
nlm_host_match_fn_t match);
void nlmsvc_grant_reply(struct nlm_cookie *, __be32);
@@ -286,8 +294,9 @@ void nlmsvc_locks_init_private(struct file_lock *, struct nlm_host *, pid_t);
* File handling for the server personality
*/
__be32 nlm_lookup_file(struct svc_rqst *, struct nlm_file **,
- struct nfs_fh *);
+ struct nlm_lock *);
void nlm_release_file(struct nlm_file *);
+void nlmsvc_put_lockowner(struct nlm_lockowner *);
void nlmsvc_release_lockowner(struct nlm_lock *);
void nlmsvc_mark_resources(struct net *);
void nlmsvc_free_host_resources(struct nlm_host *);
@@ -299,9 +308,21 @@ void nlmsvc_invalidate_all(void);
int nlmsvc_unlock_all_by_sb(struct super_block *sb);
int nlmsvc_unlock_all_by_ip(struct sockaddr *server_addr);
+static inline struct file *nlmsvc_file_file(const struct nlm_file *file)
+{
+ return file->f_file[O_RDONLY] ?
+ file->f_file[O_RDONLY] : file->f_file[O_WRONLY];
+}
+
static inline struct inode *nlmsvc_file_inode(struct nlm_file *file)
{
- return locks_inode(file->f_file);
+ return file_inode(nlmsvc_file_file(file));
+}
+
+static inline bool
+nlmsvc_file_cannot_lock(const struct nlm_file *file)
+{
+ return exportfs_cannot_lock(nlmsvc_file_file(file)->f_path.dentry->d_sb->s_export_op);
}
static inline int __nlm_privileged_request4(const struct sockaddr *sap)
@@ -361,12 +382,12 @@ static inline int nlm_privileged_requester(const struct svc_rqst *rqstp)
static inline int nlm_compare_locks(const struct file_lock *fl1,
const struct file_lock *fl2)
{
- return locks_inode(fl1->fl_file) == locks_inode(fl2->fl_file)
- && fl1->fl_pid == fl2->fl_pid
- && fl1->fl_owner == fl2->fl_owner
+ return file_inode(fl1->c.flc_file) == file_inode(fl2->c.flc_file)
+ && fl1->c.flc_pid == fl2->c.flc_pid
+ && fl1->c.flc_owner == fl2->c.flc_owner
&& fl1->fl_start == fl2->fl_start
&& fl1->fl_end == fl2->fl_end
- &&(fl1->fl_type == fl2->fl_type || fl2->fl_type == F_UNLCK);
+ &&(fl1->c.flc_type == fl2->c.flc_type || fl2->c.flc_type == F_UNLCK);
}
extern const struct lock_manager_operations nlmsvc_lock_operations;
diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h
index a98309c0121c..17d53165d9f2 100644
--- a/include/linux/lockd/xdr.h
+++ b/include/linux/lockd/xdr.h
@@ -11,6 +11,7 @@
#define LOCKD_XDR_H
#include <linux/fs.h>
+#include <linux/filelock.h>
#include <linux/nfs.h>
#include <linux/sunrpc/xdr.h>
@@ -41,6 +42,8 @@ struct nlm_lock {
struct nfs_fh fh;
struct xdr_netobj oh;
u32 svid;
+ u64 lock_start;
+ u64 lock_len;
struct file_lock fl;
};
@@ -49,7 +52,7 @@ struct nlm_lock {
* FreeBSD uses 16, Apple Mac OS X 10.3 uses 20. Therefore we set it to
* 32 bytes.
*/
-
+
struct nlm_cookie
{
unsigned char data[NLM_MAXCOOKIELEN];
@@ -70,8 +73,6 @@ struct nlm_args {
u32 fsm_mode;
};
-typedef struct nlm_args nlm_args;
-
/*
* Generic lockd result
*/
@@ -96,18 +97,19 @@ struct nlm_reboot {
*/
#define NLMSVC_XDRSIZE sizeof(struct nlm_args)
-int nlmsvc_decode_testargs(struct svc_rqst *, __be32 *);
-int nlmsvc_encode_testres(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_lockargs(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_cancargs(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_unlockargs(struct svc_rqst *, __be32 *);
-int nlmsvc_encode_res(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_res(struct svc_rqst *, __be32 *);
-int nlmsvc_encode_void(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_void(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_shareargs(struct svc_rqst *, __be32 *);
-int nlmsvc_encode_shareres(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_notify(struct svc_rqst *, __be32 *);
-int nlmsvc_decode_reboot(struct svc_rqst *, __be32 *);
+bool nlmsvc_decode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_testargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_lockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_cancargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_unlockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_reboot(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_shareargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_decode_notify(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+
+bool nlmsvc_encode_testres(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_encode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_encode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlmsvc_encode_shareres(struct svc_rqst *rqstp, struct xdr_stream *xdr);
#endif /* LOCKD_XDR_H */
diff --git a/include/linux/lockd/xdr4.h b/include/linux/lockd/xdr4.h
index 5ae766f26e04..72831e35dca3 100644
--- a/include/linux/lockd/xdr4.h
+++ b/include/linux/lockd/xdr4.h
@@ -22,21 +22,21 @@
#define nlm4_fbig cpu_to_be32(NLM_FBIG)
#define nlm4_failed cpu_to_be32(NLM_FAILED)
-
-
-int nlm4svc_decode_testargs(struct svc_rqst *, __be32 *);
-int nlm4svc_encode_testres(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_lockargs(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_cancargs(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_unlockargs(struct svc_rqst *, __be32 *);
-int nlm4svc_encode_res(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_res(struct svc_rqst *, __be32 *);
-int nlm4svc_encode_void(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_void(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_shareargs(struct svc_rqst *, __be32 *);
-int nlm4svc_encode_shareres(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_notify(struct svc_rqst *, __be32 *);
-int nlm4svc_decode_reboot(struct svc_rqst *, __be32 *);
+void nlm4svc_set_file_lock_range(struct file_lock *fl, u64 off, u64 len);
+bool nlm4svc_decode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_testargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_lockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_cancargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_unlockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_reboot(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_shareargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_decode_notify(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+
+bool nlm4svc_encode_testres(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_encode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_encode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr);
+bool nlm4svc_encode_shareres(struct svc_rqst *rqstp, struct xdr_stream *xdr);
extern const struct rpc_version nlm_version4;
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 5cf387813754..dd634103b014 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -16,10 +16,6 @@
struct task_struct;
-/* for sysctl */
-extern int prove_locking;
-extern int lock_stat;
-
#ifdef CONFIG_LOCKDEP
#include <linux/linkage.h>
@@ -86,62 +82,6 @@ struct lock_chain {
u64 chain_key;
};
-#define MAX_LOCKDEP_KEYS_BITS 13
-#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
-#define INITIAL_CHAIN_KEY -1
-
-struct held_lock {
- /*
- * One-way hash of the dependency chain up to this point. We
- * hash the hashes step by step as the dependency chain grows.
- *
- * We use it for dependency-caching and we skip detection
- * passes and dependency-updates if there is a cache-hit, so
- * it is absolutely critical for 100% coverage of the validator
- * to have a unique key value for every unique dependency path
- * that can occur in the system, to make a unique hash value
- * as likely as possible - hence the 64-bit width.
- *
- * The task struct holds the current hash value (initialized
- * with zero), here we store the previous hash value:
- */
- u64 prev_chain_key;
- unsigned long acquire_ip;
- struct lockdep_map *instance;
- struct lockdep_map *nest_lock;
-#ifdef CONFIG_LOCK_STAT
- u64 waittime_stamp;
- u64 holdtime_stamp;
-#endif
- /*
- * class_idx is zero-indexed; it points to the element in
- * lock_classes this held lock instance belongs to. class_idx is in
- * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
- */
- unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
- /*
- * The lock-stack is unified in that the lock chains of interrupt
- * contexts nest ontop of process context chains, but we 'separate'
- * the hashes by starting with 0 if we cross into an interrupt
- * context, and we also keep do not add cross-context lock
- * dependencies - the lock usage graph walking covers that area
- * anyway, and we'd just unnecessarily increase the number of
- * dependencies otherwise. [Note: hardirq and softirq contexts
- * are separated from each other too.]
- *
- * The following field is used to detect when we cross into an
- * interrupt context:
- */
- unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
- unsigned int trylock:1; /* 16 bits */
-
- unsigned int read:2; /* see lock_acquire() comment */
- unsigned int check:1; /* see lock_acquire() comment */
- unsigned int hardirqs_off:1;
- unsigned int references:12; /* 32 bits */
- unsigned int pin_count;
-};
-
/*
* Initialization, self-test and debugging-output methods:
*/
@@ -192,7 +132,7 @@ static inline void
lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass, u8 inner, u8 outer)
{
- lockdep_init_map_type(lock, name, key, subclass, inner, LD_WAIT_INV, LD_LOCK_NORMAL);
+ lockdep_init_map_type(lock, name, key, subclass, inner, outer, LD_LOCK_NORMAL);
}
static inline void
@@ -215,28 +155,50 @@ static inline void lockdep_init_map(struct lockdep_map *lock, const char *name,
* or they are too narrow (they suffer from a false class-split):
*/
#define lockdep_set_class(lock, key) \
- lockdep_init_map_waits(&(lock)->dep_map, #key, key, 0, \
- (lock)->dep_map.wait_type_inner, \
- (lock)->dep_map.wait_type_outer)
+ lockdep_init_map_type(&(lock)->dep_map, #key, key, 0, \
+ (lock)->dep_map.wait_type_inner, \
+ (lock)->dep_map.wait_type_outer, \
+ (lock)->dep_map.lock_type)
#define lockdep_set_class_and_name(lock, key, name) \
- lockdep_init_map_waits(&(lock)->dep_map, name, key, 0, \
- (lock)->dep_map.wait_type_inner, \
- (lock)->dep_map.wait_type_outer)
+ lockdep_init_map_type(&(lock)->dep_map, name, key, 0, \
+ (lock)->dep_map.wait_type_inner, \
+ (lock)->dep_map.wait_type_outer, \
+ (lock)->dep_map.lock_type)
#define lockdep_set_class_and_subclass(lock, key, sub) \
- lockdep_init_map_waits(&(lock)->dep_map, #key, key, sub,\
- (lock)->dep_map.wait_type_inner, \
- (lock)->dep_map.wait_type_outer)
+ lockdep_init_map_type(&(lock)->dep_map, #key, key, sub, \
+ (lock)->dep_map.wait_type_inner, \
+ (lock)->dep_map.wait_type_outer, \
+ (lock)->dep_map.lock_type)
#define lockdep_set_subclass(lock, sub) \
- lockdep_init_map_waits(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\
- (lock)->dep_map.wait_type_inner, \
- (lock)->dep_map.wait_type_outer)
+ lockdep_init_map_type(&(lock)->dep_map, (lock)->dep_map.name, (lock)->dep_map.key, sub,\
+ (lock)->dep_map.wait_type_inner, \
+ (lock)->dep_map.wait_type_outer, \
+ (lock)->dep_map.lock_type)
+/**
+ * lockdep_set_novalidate_class: disable checking of lock ordering on a given
+ * lock
+ * @lock: Lock to mark
+ *
+ * Lockdep will still record that this lock has been taken, and print held
+ * instances when dumping locks
+ */
#define lockdep_set_novalidate_class(lock) \
lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
+/**
+ * lockdep_set_notrack_class: disable lockdep tracking of a given lock entirely
+ * @lock: Lock to mark
+ *
+ * Bigger hammer than lockdep_set_novalidate_class: so far just for bcachefs,
+ * which takes more locks than lockdep is able to track (48).
+ */
+#define lockdep_set_notrack_class(lock) \
+ lockdep_set_class_and_name(lock, &__lockdep_no_track__, #lock)
+
/*
* Compare locking classes
*/
@@ -268,6 +230,10 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
extern void lock_release(struct lockdep_map *lock, unsigned long ip);
+extern void lock_sync(struct lockdep_map *lock, unsigned int subclass,
+ int read, int check, struct lockdep_map *nest_lock,
+ unsigned long ip);
+
/* lock_is_held_type() returns */
#define LOCK_STATE_UNKNOWN -1
#define LOCK_STATE_NOT_HELD 0
@@ -290,6 +256,9 @@ extern void lock_set_class(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, unsigned int subclass,
unsigned long ip);
+#define lock_set_novalidate_class(l, n, i) \
+ lock_set_class(l, n, &__lockdep_no_validate__, 0, i)
+
static inline void lock_set_subclass(struct lockdep_map *lock,
unsigned int subclass, unsigned long ip)
{
@@ -306,31 +275,29 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
-#define lockdep_assert_held(l) do { \
- WARN_ON(debug_locks && \
- lockdep_is_held(l) == LOCK_STATE_NOT_HELD); \
- } while (0)
+#define lockdep_assert(cond) \
+ do { WARN_ON(debug_locks && !(cond)); } while (0)
+
+#define lockdep_assert_once(cond) \
+ do { WARN_ON_ONCE(debug_locks && !(cond)); } while (0)
-#define lockdep_assert_not_held(l) do { \
- WARN_ON(debug_locks && \
- lockdep_is_held(l) == LOCK_STATE_HELD); \
- } while (0)
+#define lockdep_assert_held(l) \
+ lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
-#define lockdep_assert_held_write(l) do { \
- WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \
- } while (0)
+#define lockdep_assert_not_held(l) \
+ lockdep_assert(lockdep_is_held(l) != LOCK_STATE_HELD)
-#define lockdep_assert_held_read(l) do { \
- WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \
- } while (0)
+#define lockdep_assert_held_write(l) \
+ lockdep_assert(lockdep_is_held_type(l, 0))
-#define lockdep_assert_held_once(l) do { \
- WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \
- } while (0)
+#define lockdep_assert_held_read(l) \
+ lockdep_assert(lockdep_is_held_type(l, 1))
-#define lockdep_assert_none_held_once() do { \
- WARN_ON_ONCE(debug_locks && current->lockdep_depth); \
- } while (0)
+#define lockdep_assert_held_once(l) \
+ lockdep_assert_once(lockdep_is_held(l) != LOCK_STATE_NOT_HELD)
+
+#define lockdep_assert_none_held_once() \
+ lockdep_assert_once(!current->lockdep_depth)
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
@@ -338,6 +305,16 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
#define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
#define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
+/*
+ * Must use lock_map_aquire_try() with override maps to avoid
+ * lockdep thinking they participate in the block chain.
+ */
+#define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type) \
+ struct lockdep_map _name = { \
+ .name = #_name "-wait-type-override", \
+ .wait_type_inner = _wait_type, \
+ .lock_type = LD_LOCK_WAIT_OVERRIDE, }
+
#else /* !CONFIG_LOCKDEP */
static inline void lockdep_init_task(struct task_struct *task)
@@ -359,7 +336,8 @@ static inline void lockdep_set_selftest_task(struct task_struct *task)
# define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
# define lock_release(l, i) do { } while (0)
# define lock_downgrade(l, i) do { } while (0)
-# define lock_set_class(l, n, k, s, i) do { } while (0)
+# define lock_set_class(l, n, key, s, i) do { (void)(key); } while (0)
+# define lock_set_novalidate_class(l, n, i) do { } while (0)
# define lock_set_subclass(l, s, i) do { } while (0)
# define lockdep_init() do { } while (0)
# define lockdep_init_map_type(lock, name, key, sub, inner, outer, type) \
@@ -378,6 +356,7 @@ static inline void lockdep_set_selftest_task(struct task_struct *task)
#define lockdep_set_subclass(lock, sub) do { } while (0)
#define lockdep_set_novalidate_class(lock) do { } while (0)
+#define lockdep_set_notrack_class(lock) do { } while (0)
/*
* We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
@@ -407,6 +386,9 @@ extern int lock_is_held(const void *);
extern int lockdep_is_held(const void *);
#define lockdep_is_held_type(l, r) (1)
+#define lockdep_assert(c) do { } while (0)
+#define lockdep_assert_once(c) do { } while (0)
+
#define lockdep_assert_held(l) do { (void)(l); } while (0)
#define lockdep_assert_not_held(l) do { (void)(l); } while (0)
#define lockdep_assert_held_write(l) do { (void)(l); } while (0)
@@ -422,15 +404,25 @@ extern int lockdep_is_held(const void *);
#define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0)
#define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0)
+#define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type) \
+ struct lockdep_map __maybe_unused _name = {}
+
#endif /* !LOCKDEP */
+#ifdef CONFIG_PROVE_LOCKING
+void lockdep_set_lock_cmp_fn(struct lockdep_map *, lock_cmp_fn, lock_print_fn);
+
+#define lock_set_cmp_fn(lock, ...) lockdep_set_lock_cmp_fn(&(lock)->dep_map, __VA_ARGS__)
+#else
+#define lock_set_cmp_fn(lock, ...) do { } while (0)
+#endif
+
enum xhlock_context_t {
XHLOCK_HARD,
XHLOCK_SOFT,
XHLOCK_CTX_NR,
};
-#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
/*
* To initialize a lockdep_map statically use this macro.
* Note that _name must not be NULL.
@@ -480,23 +472,6 @@ do { \
#endif /* CONFIG_LOCK_STAT */
-#ifdef CONFIG_LOCKDEP
-
-/*
- * On lockdep we dont want the hand-coded irq-enable of
- * _raw_*_lock_flags() code, because lockdep assumes
- * that interrupts are not re-enabled during lock-acquire:
- */
-#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
- LOCK_CONTENDED((_lock), (try), (lock))
-
-#else /* CONFIG_LOCKDEP */
-
-#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
- lockfl((_lock), (flags))
-
-#endif /* CONFIG_LOCKDEP */
-
#ifdef CONFIG_PROVE_LOCKING
extern void print_irqtrace_events(struct task_struct *curr);
#else
@@ -564,9 +539,11 @@ do { \
#define rwsem_release(l, i) lock_release(l, i)
#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
+#define lock_map_acquire_try(l) lock_acquire_exclusive(l, 0, 1, NULL, _THIS_IP_)
#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
#define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
#define lock_map_release(l) lock_release(l, _THIS_IP_)
+#define lock_map_sync(l) lock_sync(l, 0, 0, 1, NULL, _THIS_IP_)
#ifdef CONFIG_PROVE_LOCKING
# define might_lock(lock) \
@@ -610,6 +587,12 @@ do { \
WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \
} while (0)
+#define lockdep_assert_no_hardirq() \
+do { \
+ WARN_ON_ONCE(__lockdep_enabled && (this_cpu_read(hardirq_context) || \
+ !this_cpu_read(hardirqs_enabled))); \
+} while (0)
+
#define lockdep_assert_preemption_enabled() \
do { \
WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
@@ -633,9 +616,11 @@ do { \
#define lockdep_assert_in_softirq() \
do { \
WARN_ON_ONCE(__lockdep_enabled && \
- (!in_softirq() || in_irq() || in_nmi())); \
+ (!in_softirq() || in_hardirq() || in_nmi())); \
} while (0)
+extern void lockdep_assert_in_softirq_func(void);
+
#else
# define might_lock(lock) do { } while (0)
# define might_lock_read(lock) do { } while (0)
@@ -644,10 +629,12 @@ do { \
# define lockdep_assert_irqs_enabled() do { } while (0)
# define lockdep_assert_irqs_disabled() do { } while (0)
# define lockdep_assert_in_irq() do { } while (0)
+# define lockdep_assert_no_hardirq() do { } while (0)
# define lockdep_assert_preemption_enabled() do { } while (0)
# define lockdep_assert_preemption_disabled() do { } while (0)
# define lockdep_assert_in_softirq() do { } while (0)
+# define lockdep_assert_in_softirq_func() do { } while (0)
#endif
#ifdef CONFIG_PROVE_RAW_LOCK_NESTING
diff --git a/include/linux/lockdep_api.h b/include/linux/lockdep_api.h
new file mode 100644
index 000000000000..907e66979ab2
--- /dev/null
+++ b/include/linux/lockdep_api.h
@@ -0,0 +1 @@
+#include <linux/lockdep.h>
diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h
index 2ec9ff5a7fff..eae115a26488 100644
--- a/include/linux/lockdep_types.h
+++ b/include/linux/lockdep_types.h
@@ -21,7 +21,7 @@ enum lockdep_wait_type {
LD_WAIT_SPIN, /* spin loops, raw_spinlock_t etc.. */
#ifdef CONFIG_PROVE_RAW_LOCK_NESTING
- LD_WAIT_CONFIG, /* CONFIG_PREEMPT_LOCK, spinlock_t etc.. */
+ LD_WAIT_CONFIG, /* preemptible in PREEMPT_RT, spinlock_t etc.. */
#else
LD_WAIT_CONFIG = LD_WAIT_SPIN,
#endif
@@ -33,6 +33,7 @@ enum lockdep_wait_type {
enum lockdep_lock_type {
LD_LOCK_NORMAL = 0, /* normal, catch all */
LD_LOCK_PERCPU, /* percpu */
+ LD_LOCK_WAIT_OVERRIDE, /* annotation */
LD_LOCK_MAX,
};
@@ -52,7 +53,7 @@ enum lockdep_lock_type {
* NR_LOCKDEP_CACHING_CLASSES ... Number of classes
* cached in the instance of lockdep_map
*
- * Currently main class (subclass == 0) and signle depth subclass
+ * Currently main class (subclass == 0) and single depth subclass
* are cached in lockdep_map. This optimization is mainly targeting
* on rq->lock. double_rq_lock() acquires this highly competitive with
* single depth.
@@ -79,11 +80,17 @@ struct lock_class_key {
};
extern struct lock_class_key __lockdep_no_validate__;
+extern struct lock_class_key __lockdep_no_track__;
struct lock_trace;
#define LOCKSTAT_POINTS 4
+struct lockdep_map;
+typedef int (*lock_cmp_fn)(const struct lockdep_map *a,
+ const struct lockdep_map *b);
+typedef void (*lock_print_fn)(const struct lockdep_map *map);
+
/*
* The lock-class itself. The order of the structure members matters.
* reinit_class() zeroes the key member and all subsequent members.
@@ -109,6 +116,9 @@ struct lock_class {
struct list_head locks_after, locks_before;
const struct lockdep_subclass_key *key;
+ lock_cmp_fn cmp_fn;
+ lock_print_fn print_fn;
+
unsigned int subclass;
unsigned int dep_gen_id;
@@ -118,12 +128,12 @@ struct lock_class {
unsigned long usage_mask;
const struct lock_trace *usage_traces[LOCK_TRACE_STATES];
+ const char *name;
/*
* Generation counter, when doing certain classes of graph walking,
* to ensure that we check one node only once:
*/
int name_version;
- const char *name;
u8 wait_type_inner;
u8 wait_type_outer;
@@ -165,7 +175,7 @@ struct lock_class_stats {
unsigned long bounces[nr_bounce_types];
};
-struct lock_class_stats lock_stats(struct lock_class *class);
+void lock_stats(struct lock_class *class, struct lock_class_stats *stats);
void clear_lock_stats(struct lock_class *class);
#endif
@@ -189,6 +199,63 @@ struct lockdep_map {
struct pin_cookie { unsigned int val; };
+#define MAX_LOCKDEP_KEYS_BITS 13
+#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
+#define INITIAL_CHAIN_KEY -1
+
+struct held_lock {
+ /*
+ * One-way hash of the dependency chain up to this point. We
+ * hash the hashes step by step as the dependency chain grows.
+ *
+ * We use it for dependency-caching and we skip detection
+ * passes and dependency-updates if there is a cache-hit, so
+ * it is absolutely critical for 100% coverage of the validator
+ * to have a unique key value for every unique dependency path
+ * that can occur in the system, to make a unique hash value
+ * as likely as possible - hence the 64-bit width.
+ *
+ * The task struct holds the current hash value (initialized
+ * with zero), here we store the previous hash value:
+ */
+ u64 prev_chain_key;
+ unsigned long acquire_ip;
+ struct lockdep_map *instance;
+ struct lockdep_map *nest_lock;
+#ifdef CONFIG_LOCK_STAT
+ u64 waittime_stamp;
+ u64 holdtime_stamp;
+#endif
+ /*
+ * class_idx is zero-indexed; it points to the element in
+ * lock_classes this held lock instance belongs to. class_idx is in
+ * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
+ */
+ unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
+ /*
+ * The lock-stack is unified in that the lock chains of interrupt
+ * contexts nest ontop of process context chains, but we 'separate'
+ * the hashes by starting with 0 if we cross into an interrupt
+ * context, and we also keep do not add cross-context lock
+ * dependencies - the lock usage graph walking covers that area
+ * anyway, and we'd just unnecessarily increase the number of
+ * dependencies otherwise. [Note: hardirq and softirq contexts
+ * are separated from each other too.]
+ *
+ * The following field is used to detect when we cross into an
+ * interrupt context:
+ */
+ unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
+ unsigned int trylock:1; /* 16 bits */
+
+ unsigned int read:2; /* see lock_acquire() comment */
+ unsigned int check:1; /* see lock_acquire() comment */
+ unsigned int hardirqs_off:1;
+ unsigned int sync:1;
+ unsigned int references:11; /* 32 bits */
+ unsigned int pin_count;
+};
+
#else /* !CONFIG_LOCKDEP */
/*
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index 99f17cc8e163..815d871fadfc 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -34,15 +34,27 @@ struct lockref {
};
};
-extern void lockref_get(struct lockref *);
-extern int lockref_put_return(struct lockref *);
-extern int lockref_get_not_zero(struct lockref *);
-extern int lockref_put_not_zero(struct lockref *);
-extern int lockref_get_or_lock(struct lockref *);
-extern int lockref_put_or_lock(struct lockref *);
-
-extern void lockref_mark_dead(struct lockref *);
-extern int lockref_get_not_dead(struct lockref *);
+/**
+ * lockref_init - Initialize a lockref
+ * @lockref: pointer to lockref structure
+ *
+ * Initializes @lockref->count to 1.
+ */
+static inline void lockref_init(struct lockref *lockref)
+{
+ spin_lock_init(&lockref->lock);
+ lockref->count = 1;
+}
+
+void lockref_get(struct lockref *lockref);
+int lockref_put_return(struct lockref *lockref);
+bool lockref_get_not_zero(struct lockref *lockref);
+bool lockref_put_or_lock(struct lockref *lockref);
+#define lockref_put_or_lock(_lockref) \
+ (!__cond_lock((_lockref)->lock, !lockref_put_or_lock(_lockref)))
+
+void lockref_mark_dead(struct lockref *lockref);
+bool lockref_get_not_dead(struct lockref *lockref);
/* Must be called under spinlock for reliable results */
static inline bool __lockref_is_dead(const struct lockref *l)
diff --git a/include/linux/log2.h b/include/linux/log2.h
index df0b155c2141..2eac3fc9303d 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -18,7 +18,7 @@
* - the arch is not required to handle n==0 if implementing the fallback
*/
#ifndef CONFIG_ARCH_HAS_ILOG2_U32
-static inline __attribute__((const))
+static __always_inline __attribute__((const))
int __ilog2_u32(u32 n)
{
return fls(n) - 1;
@@ -26,7 +26,7 @@ int __ilog2_u32(u32 n)
#endif
#ifndef CONFIG_ARCH_HAS_ILOG2_U64
-static inline __attribute__((const))
+static __always_inline __attribute__((const))
int __ilog2_u64(u64 n)
{
return fls64(n) - 1;
@@ -41,7 +41,7 @@ int __ilog2_u64(u64 n)
* *not* considered a power of two.
* Return: true if @n is a power of 2, otherwise false.
*/
-static inline __attribute__((const))
+static __always_inline __attribute__((const))
bool is_power_of_2(unsigned long n)
{
return (n != 0 && ((n & (n - 1)) == 0));
@@ -255,4 +255,18 @@ int __bits_per(unsigned long n)
) : \
__bits_per(n) \
)
+
+/**
+ * max_pow_of_two_factor - return highest power-of-2 factor
+ * @n: parameter
+ *
+ * find highest power-of-2 which is evenly divisible into n.
+ * 0 is returned for n == 0 or 1.
+ */
+static inline __attribute__((const))
+unsigned int max_pow_of_two_factor(unsigned int n)
+{
+ return n & -n;
+}
+
#endif /* _LINUX_LOG2_H */
diff --git a/include/linux/logic_iomem.h b/include/linux/logic_iomem.h
new file mode 100644
index 000000000000..3fa65c964379
--- /dev/null
+++ b/include/linux/logic_iomem.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Intel Corporation
+ * Author: johannes@sipsolutions.net
+ */
+#ifndef __LOGIC_IOMEM_H
+#define __LOGIC_IOMEM_H
+#include <linux/types.h>
+#include <linux/ioport.h>
+
+/**
+ * struct logic_iomem_ops - emulated IO memory ops
+ * @read: read an 8, 16, 32 or 64 bit quantity from the given offset,
+ * size is given in bytes (1, 2, 4 or 8)
+ * (64-bit only necessary if CONFIG_64BIT is set)
+ * @write: write an 8, 16 32 or 64 bit quantity to the given offset,
+ * size is given in bytes (1, 2, 4 or 8)
+ * (64-bit only necessary if CONFIG_64BIT is set)
+ * @set: optional, for memset_io()
+ * @copy_from: optional, for memcpy_fromio()
+ * @copy_to: optional, for memcpy_toio()
+ * @unmap: optional, this region is getting unmapped
+ */
+struct logic_iomem_ops {
+ unsigned long (*read)(void *priv, unsigned int offset, int size);
+ void (*write)(void *priv, unsigned int offset, int size,
+ unsigned long val);
+
+ void (*set)(void *priv, unsigned int offset, u8 value, int size);
+ void (*copy_from)(void *priv, void *buffer, unsigned int offset,
+ int size);
+ void (*copy_to)(void *priv, unsigned int offset, const void *buffer,
+ int size);
+
+ void (*unmap)(void *priv);
+};
+
+/**
+ * struct logic_iomem_region_ops - ops for an IO memory handler
+ * @map: map a range in the registered IO memory region, must
+ * fill *ops with the ops and may fill *priv to be passed
+ * to the ops. The offset is given as the offset into the
+ * registered resource region.
+ * The return value is negative for errors, or >= 0 for
+ * success. On success, the return value is added to the
+ * offset for later ops, to allow for partial mappings.
+ */
+struct logic_iomem_region_ops {
+ long (*map)(unsigned long offset, size_t size,
+ const struct logic_iomem_ops **ops,
+ void **priv);
+};
+
+/**
+ * logic_iomem_add_region - register an IO memory region
+ * @resource: the resource description for this region
+ * @ops: the IO memory mapping ops for this resource
+ */
+int logic_iomem_add_region(struct resource *resource,
+ const struct logic_iomem_region_ops *ops);
+
+#endif /* __LOGIC_IOMEM_H */
diff --git a/include/linux/logic_pio.h b/include/linux/logic_pio.h
index 54945aa824b4..8f1a9408302f 100644
--- a/include/linux/logic_pio.h
+++ b/include/linux/logic_pio.h
@@ -17,7 +17,7 @@ enum {
struct logic_pio_hwaddr {
struct list_head list;
- struct fwnode_handle *fwnode;
+ const struct fwnode_handle *fwnode;
resource_size_t hw_start;
resource_size_t io_start;
resource_size_t size; /* range size populated */
@@ -39,9 +39,6 @@ struct logic_pio_host_ops {
#ifdef CONFIG_INDIRECT_PIO
u8 logic_inb(unsigned long addr);
-void logic_outb(u8 value, unsigned long addr);
-void logic_outw(u16 value, unsigned long addr);
-void logic_outl(u32 value, unsigned long addr);
u16 logic_inw(unsigned long addr);
u32 logic_inl(unsigned long addr);
void logic_outb(u8 value, unsigned long addr);
@@ -113,8 +110,8 @@ void logic_outsl(unsigned long addr, const void *buffer, unsigned int count);
#endif /* CONFIG_INDIRECT_PIO */
#define MMIO_UPPER_LIMIT (IO_SPACE_LIMIT - PIO_INDIRECT_SIZE)
-struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode);
-unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode,
+struct logic_pio_hwaddr *find_io_range_by_fwnode(const struct fwnode_handle *fwnode);
+unsigned long logic_pio_trans_hwaddr(const struct fwnode_handle *fwnode,
resource_size_t hw_addr, resource_size_t size);
int logic_pio_register_range(struct logic_pio_hwaddr *newrange);
void logic_pio_unregister_range(struct logic_pio_hwaddr *range);
diff --git a/include/linux/lru_cache.h b/include/linux/lru_cache.h
index 429d67d815ce..ff82ef85a084 100644
--- a/include/linux/lru_cache.h
+++ b/include/linux/lru_cache.h
@@ -32,7 +32,7 @@ This header file (and its .c file; kernel-doc of functions see there)
Because of this later property, it is called "lru_cache".
As it actually Tracks Objects in an Active SeT, we could also call it
toast (incidentally that is what may happen to the data on the
- backend storage uppon next resync, if we don't get it right).
+ backend storage upon next resync, if we don't get it right).
What for?
@@ -119,7 +119,7 @@ write intent log information, three of which are mentioned here.
*/
/* this defines an element in a tracked set
- * .colision is for hash table lookup.
+ * .collision is for hash table lookup.
* When we process a new IO request, we know its sector, thus can deduce the
* region number (label) easily. To do the label -> object lookup without a
* full list walk, we use a simple hash table.
@@ -145,14 +145,14 @@ write intent log information, three of which are mentioned here.
* But it avoids high order page allocations in kmalloc.
*/
struct lc_element {
- struct hlist_node colision;
+ struct hlist_node collision;
struct list_head list; /* LRU list or free list */
unsigned refcnt;
/* back "pointer" into lc_cache->element[index],
* for paranoia, and for "lc_element_to_index" */
unsigned lc_index;
/* if we want to track a larger set of objects,
- * it needs to become arch independend u64 */
+ * it needs to become an architecture independent u64 */
unsigned lc_number;
/* special label when on free list */
#define LC_FREE (~0U)
@@ -199,7 +199,6 @@ struct lru_cache {
unsigned long flags;
- void *lc_private;
const char *name;
/* nr_elements there */
@@ -241,7 +240,6 @@ extern struct lru_cache *lc_create(const char *name, struct kmem_cache *cache,
unsigned e_count, size_t e_size, size_t e_off);
extern void lc_reset(struct lru_cache *lc);
extern void lc_destroy(struct lru_cache *lc);
-extern void lc_set(struct lru_cache *lc, unsigned int enr, int index);
extern void lc_del(struct lru_cache *lc, struct lc_element *element);
extern struct lc_element *lc_get_cumulative(struct lru_cache *lc, unsigned int enr);
@@ -263,7 +261,7 @@ extern void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char
*
* Allows (expects) the set to be "dirty". Note that the reference counts and
* order on the active and lru lists may still change. Used to serialize
- * changing transactions. Returns true if we aquired the lock.
+ * changing transactions. Returns true if we acquired the lock.
*/
static inline int lc_try_lock_for_transaction(struct lru_cache *lc)
{
@@ -275,7 +273,7 @@ static inline int lc_try_lock_for_transaction(struct lru_cache *lc)
* @lc: the lru cache to operate on
*
* Note that the reference counts and order on the active and lru lists may
- * still change. Only works on a "clean" set. Returns true if we aquired the
+ * still change. Only works on a "clean" set. Returns true if we acquired the
* lock, which means there are no pending changes, and any further attempt to
* change the set will not succeed until the next lc_unlock().
*/
@@ -297,6 +295,5 @@ extern bool lc_is_used(struct lru_cache *lc, unsigned int enr);
container_of(ptr, type, member)
extern struct lc_element *lc_element_by_index(struct lru_cache *lc, unsigned i);
-extern unsigned int lc_index_of(struct lru_cache *lc, struct lc_element *e);
#endif
diff --git a/include/linux/lsm/apparmor.h b/include/linux/lsm/apparmor.h
new file mode 100644
index 000000000000..612cbfacb072
--- /dev/null
+++ b/include/linux/lsm/apparmor.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Linux Security Module interface to other subsystems.
+ * AppArmor presents single pointer to an aa_label structure.
+ */
+#ifndef __LINUX_LSM_APPARMOR_H
+#define __LINUX_LSM_APPARMOR_H
+
+struct aa_label;
+
+struct lsm_prop_apparmor {
+#ifdef CONFIG_SECURITY_APPARMOR
+ struct aa_label *label;
+#endif
+};
+
+#endif /* ! __LINUX_LSM_APPARMOR_H */
diff --git a/include/linux/lsm/bpf.h b/include/linux/lsm/bpf.h
new file mode 100644
index 000000000000..8106e206fcef
--- /dev/null
+++ b/include/linux/lsm/bpf.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Linux Security Module interface to other subsystems.
+ * BPF may present a single u32 value.
+ */
+#ifndef __LINUX_LSM_BPF_H
+#define __LINUX_LSM_BPF_H
+#include <linux/types.h>
+
+struct lsm_prop_bpf {
+#ifdef CONFIG_BPF_LSM
+ u32 secid;
+#endif
+};
+
+#endif /* ! __LINUX_LSM_BPF_H */
diff --git a/include/linux/lsm/selinux.h b/include/linux/lsm/selinux.h
new file mode 100644
index 000000000000..9455a6b5b910
--- /dev/null
+++ b/include/linux/lsm/selinux.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Linux Security Module interface to other subsystems.
+ * SELinux presents a single u32 value which is known as a secid.
+ */
+#ifndef __LINUX_LSM_SELINUX_H
+#define __LINUX_LSM_SELINUX_H
+#include <linux/types.h>
+
+struct lsm_prop_selinux {
+#ifdef CONFIG_SECURITY_SELINUX
+ u32 secid;
+#endif
+};
+
+#endif /* ! __LINUX_LSM_SELINUX_H */
diff --git a/include/linux/lsm/smack.h b/include/linux/lsm/smack.h
new file mode 100644
index 000000000000..ff730dd7a734
--- /dev/null
+++ b/include/linux/lsm/smack.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Linux Security Module interface to other subsystems.
+ * Smack presents a pointer into the global Smack label list.
+ */
+#ifndef __LINUX_LSM_SMACK_H
+#define __LINUX_LSM_SMACK_H
+
+struct smack_known;
+
+struct lsm_prop_smack {
+#ifdef CONFIG_SECURITY_SMACK
+ struct smack_known *skp;
+#endif
+};
+
+#endif /* ! __LINUX_LSM_SMACK_H */
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index cd23355d2271..382c56a97bba 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -5,7 +5,7 @@
*
* Author : Etienne BASSET <etienne.basset@ensta.org>
*
- * All credits to : Stephen Smalley, <sds@tycho.nsa.gov>
+ * All credits to : Stephen Smalley
* All BUGS to : Etienne BASSET <etienne.basset@ensta.org>
*/
#ifndef _LSM_COMMON_LOGGING_
@@ -48,13 +48,13 @@ struct lsm_ioctlop_audit {
};
struct lsm_ibpkey_audit {
- u64 subnet_prefix;
- u16 pkey;
+ u64 subnet_prefix;
+ u16 pkey;
};
struct lsm_ibendport_audit {
- char dev_name[IB_DEVICE_NAME_MAX];
- u8 port;
+ const char *dev_name;
+ u8 port;
};
/* Auxiliary data to use in generating the audit record. */
@@ -76,6 +76,8 @@ struct common_audit_data {
#define LSM_AUDIT_DATA_IBENDPORT 14
#define LSM_AUDIT_DATA_LOCKDOWN 15
#define LSM_AUDIT_DATA_NOTIFICATION 16
+#define LSM_AUDIT_DATA_ANONINODE 17
+#define LSM_AUDIT_DATA_NLMSGTYPE 18
union {
struct path path;
struct dentry *dentry;
@@ -96,6 +98,8 @@ struct common_audit_data {
struct lsm_ibpkey_audit *ibpkey;
struct lsm_ibendport_audit *ibendport;
int reason;
+ const char *anonclass;
+ u16 nlmsg_type;
} u;
/* this union contains LSM specific data */
union {
@@ -114,14 +118,36 @@ struct common_audit_data {
#define v4info fam.v4
#define v6info fam.v6
+#ifdef CONFIG_AUDIT
+
int ipv4_skb_to_auditdata(struct sk_buff *skb,
struct common_audit_data *ad, u8 *proto);
+#if IS_ENABLED(CONFIG_IPV6)
int ipv6_skb_to_auditdata(struct sk_buff *skb,
struct common_audit_data *ad, u8 *proto);
+#endif /* IS_ENABLED(CONFIG_IPV6) */
void common_lsm_audit(struct common_audit_data *a,
void (*pre_audit)(struct audit_buffer *, void *),
void (*post_audit)(struct audit_buffer *, void *));
+void audit_log_lsm_data(struct audit_buffer *ab,
+ const struct common_audit_data *a);
+
+#else /* CONFIG_AUDIT */
+
+static inline void common_lsm_audit(struct common_audit_data *a,
+ void (*pre_audit)(struct audit_buffer *, void *),
+ void (*post_audit)(struct audit_buffer *, void *))
+{
+}
+
+static inline void audit_log_lsm_data(struct audit_buffer *ab,
+ const struct common_audit_data *a)
+{
+}
+
+#endif /* CONFIG_AUDIT */
+
#endif
diff --git a/include/linux/lsm_count.h b/include/linux/lsm_count.h
new file mode 100644
index 000000000000..16eb49761b25
--- /dev/null
+++ b/include/linux/lsm_count.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (C) 2023 Google LLC.
+ */
+
+#ifndef __LINUX_LSM_COUNT_H
+#define __LINUX_LSM_COUNT_H
+
+#include <linux/args.h>
+
+#ifdef CONFIG_SECURITY
+
+/*
+ * Macros to count the number of LSMs enabled in the kernel at compile time.
+ */
+
+/*
+ * Capabilities is enabled when CONFIG_SECURITY is enabled.
+ */
+#if IS_ENABLED(CONFIG_SECURITY)
+#define CAPABILITIES_ENABLED 1,
+#else
+#define CAPABILITIES_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_SELINUX)
+#define SELINUX_ENABLED 1,
+#else
+#define SELINUX_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_SMACK)
+#define SMACK_ENABLED 1,
+#else
+#define SMACK_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_APPARMOR)
+#define APPARMOR_ENABLED 1,
+#else
+#define APPARMOR_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_TOMOYO)
+#define TOMOYO_ENABLED 1,
+#else
+#define TOMOYO_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_YAMA)
+#define YAMA_ENABLED 1,
+#else
+#define YAMA_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_LOADPIN)
+#define LOADPIN_ENABLED 1,
+#else
+#define LOADPIN_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_LOCKDOWN_LSM)
+#define LOCKDOWN_ENABLED 1,
+#else
+#define LOCKDOWN_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_SAFESETID)
+#define SAFESETID_ENABLED 1,
+#else
+#define SAFESETID_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_BPF_LSM)
+#define BPF_LSM_ENABLED 1,
+#else
+#define BPF_LSM_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_LANDLOCK)
+#define LANDLOCK_ENABLED 1,
+#else
+#define LANDLOCK_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_IMA)
+#define IMA_ENABLED 1,
+#else
+#define IMA_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_EVM)
+#define EVM_ENABLED 1,
+#else
+#define EVM_ENABLED
+#endif
+
+#if IS_ENABLED(CONFIG_SECURITY_IPE)
+#define IPE_ENABLED 1,
+#else
+#define IPE_ENABLED
+#endif
+
+/*
+ * There is a trailing comma that we need to be accounted for. This is done by
+ * using a skipped argument in __COUNT_LSMS
+ */
+#define __COUNT_LSMS(skipped_arg, args...) COUNT_ARGS(args...)
+#define COUNT_LSMS(args...) __COUNT_LSMS(args)
+
+#define MAX_LSM_COUNT \
+ COUNT_LSMS( \
+ CAPABILITIES_ENABLED \
+ SELINUX_ENABLED \
+ SMACK_ENABLED \
+ APPARMOR_ENABLED \
+ TOMOYO_ENABLED \
+ YAMA_ENABLED \
+ LOADPIN_ENABLED \
+ LOCKDOWN_ENABLED \
+ SAFESETID_ENABLED \
+ BPF_LSM_ENABLED \
+ LANDLOCK_ENABLED \
+ IMA_ENABLED \
+ EVM_ENABLED \
+ IPE_ENABLED)
+
+#else
+
+#define MAX_LSM_COUNT 0
+
+#endif /* CONFIG_SECURITY */
+
+#endif /* __LINUX_LSM_COUNT_H */
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index 04c01794de83..8c42b4bde09c 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -26,34 +26,35 @@
* #undef LSM_HOOK
* };
*/
-LSM_HOOK(int, 0, binder_set_context_mgr, struct task_struct *mgr)
-LSM_HOOK(int, 0, binder_transaction, struct task_struct *from,
- struct task_struct *to)
-LSM_HOOK(int, 0, binder_transfer_binder, struct task_struct *from,
- struct task_struct *to)
-LSM_HOOK(int, 0, binder_transfer_file, struct task_struct *from,
- struct task_struct *to, struct file *file)
+LSM_HOOK(int, 0, binder_set_context_mgr, const struct cred *mgr)
+LSM_HOOK(int, 0, binder_transaction, const struct cred *from,
+ const struct cred *to)
+LSM_HOOK(int, 0, binder_transfer_binder, const struct cred *from,
+ const struct cred *to)
+LSM_HOOK(int, 0, binder_transfer_file, const struct cred *from,
+ const struct cred *to, const struct file *file)
LSM_HOOK(int, 0, ptrace_access_check, struct task_struct *child,
unsigned int mode)
LSM_HOOK(int, 0, ptrace_traceme, struct task_struct *parent)
-LSM_HOOK(int, 0, capget, struct task_struct *target, kernel_cap_t *effective,
+LSM_HOOK(int, 0, capget, const struct task_struct *target, kernel_cap_t *effective,
kernel_cap_t *inheritable, kernel_cap_t *permitted)
LSM_HOOK(int, 0, capset, struct cred *new, const struct cred *old,
const kernel_cap_t *effective, const kernel_cap_t *inheritable,
const kernel_cap_t *permitted)
LSM_HOOK(int, 0, capable, const struct cred *cred, struct user_namespace *ns,
int cap, unsigned int opts)
-LSM_HOOK(int, 0, quotactl, int cmds, int type, int id, struct super_block *sb)
+LSM_HOOK(int, 0, quotactl, int cmds, int type, int id, const struct super_block *sb)
LSM_HOOK(int, 0, quota_on, struct dentry *dentry)
LSM_HOOK(int, 0, syslog, int type)
LSM_HOOK(int, 0, settime, const struct timespec64 *ts,
const struct timezone *tz)
LSM_HOOK(int, 0, vm_enough_memory, struct mm_struct *mm, long pages)
LSM_HOOK(int, 0, bprm_creds_for_exec, struct linux_binprm *bprm)
-LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, struct file *file)
+LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, const struct file *file)
LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm)
-LSM_HOOK(void, LSM_RET_VOID, bprm_committing_creds, struct linux_binprm *bprm)
-LSM_HOOK(void, LSM_RET_VOID, bprm_committed_creds, struct linux_binprm *bprm)
+LSM_HOOK(void, LSM_RET_VOID, bprm_committing_creds, const struct linux_binprm *bprm)
+LSM_HOOK(void, LSM_RET_VOID, bprm_committed_creds, const struct linux_binprm *bprm)
+LSM_HOOK(int, 0, fs_context_submount, struct fs_context *fc, struct super_block *reference)
LSM_HOOK(int, 0, fs_context_dup, struct fs_context *fc,
struct fs_context *src_sc)
LSM_HOOK(int, -ENOPARAM, fs_context_parse_param, struct fs_context *fc,
@@ -65,7 +66,7 @@ LSM_HOOK(void, LSM_RET_VOID, sb_free_mnt_opts, void *mnt_opts)
LSM_HOOK(int, 0, sb_eat_lsm_opts, char *orig, void **mnt_opts)
LSM_HOOK(int, 0, sb_mnt_opts_compat, struct super_block *sb, void *mnt_opts)
LSM_HOOK(int, 0, sb_remount, struct super_block *sb, void *mnt_opts)
-LSM_HOOK(int, 0, sb_kern_mount, struct super_block *sb)
+LSM_HOOK(int, 0, sb_kern_mount, const struct super_block *sb)
LSM_HOOK(int, 0, sb_show_options, struct seq_file *m, struct super_block *sb)
LSM_HOOK(int, 0, sb_statfs, struct dentry *dentry)
LSM_HOOK(int, 0, sb_mount, const char *dev_name, const struct path *path,
@@ -78,14 +79,13 @@ LSM_HOOK(int, 0, sb_set_mnt_opts, struct super_block *sb, void *mnt_opts,
LSM_HOOK(int, 0, sb_clone_mnt_opts, const struct super_block *oldsb,
struct super_block *newsb, unsigned long kern_flags,
unsigned long *set_kern_flags)
-LSM_HOOK(int, 0, sb_add_mnt_opt, const char *option, const char *val,
- int len, void **mnt_opts)
LSM_HOOK(int, 0, move_mount, const struct path *from_path,
const struct path *to_path)
-LSM_HOOK(int, 0, dentry_init_security, struct dentry *dentry,
- int mode, const struct qstr *name, void **ctx, u32 *ctxlen)
+LSM_HOOK(int, -EOPNOTSUPP, dentry_init_security, struct dentry *dentry,
+ int mode, const struct qstr *name, const char **xattr_name,
+ struct lsm_context *cp)
LSM_HOOK(int, 0, dentry_create_files_as, struct dentry *dentry, int mode,
- struct qstr *name, const struct cred *old, struct cred *new)
+ const struct qstr *name, const struct cred *old, struct cred *new)
#ifdef CONFIG_SECURITY_PATH
LSM_HOOK(int, 0, path_unlink, const struct path *dir, struct dentry *dentry)
@@ -94,6 +94,8 @@ LSM_HOOK(int, 0, path_mkdir, const struct path *dir, struct dentry *dentry,
LSM_HOOK(int, 0, path_rmdir, const struct path *dir, struct dentry *dentry)
LSM_HOOK(int, 0, path_mknod, const struct path *dir, struct dentry *dentry,
umode_t mode, unsigned int dev)
+LSM_HOOK(void, LSM_RET_VOID, path_post_mknod, struct mnt_idmap *idmap,
+ struct dentry *dentry)
LSM_HOOK(int, 0, path_truncate, const struct path *path)
LSM_HOOK(int, 0, path_symlink, const struct path *dir, struct dentry *dentry,
const char *old_name)
@@ -101,7 +103,7 @@ LSM_HOOK(int, 0, path_link, struct dentry *old_dentry,
const struct path *new_dir, struct dentry *new_dentry)
LSM_HOOK(int, 0, path_rename, const struct path *old_dir,
struct dentry *old_dentry, const struct path *new_dir,
- struct dentry *new_dentry)
+ struct dentry *new_dentry, unsigned int flags)
LSM_HOOK(int, 0, path_chmod, const struct path *path, umode_t mode)
LSM_HOOK(int, 0, path_chown, const struct path *path, kuid_t uid, kgid_t gid)
LSM_HOOK(int, 0, path_chroot, const struct path *path)
@@ -112,13 +114,16 @@ LSM_HOOK(int, 0, path_notify, const struct path *path, u64 mask,
unsigned int obj_type)
LSM_HOOK(int, 0, inode_alloc_security, struct inode *inode)
LSM_HOOK(void, LSM_RET_VOID, inode_free_security, struct inode *inode)
-LSM_HOOK(int, 0, inode_init_security, struct inode *inode,
- struct inode *dir, const struct qstr *qstr, const char **name,
- void **value, size_t *len)
+LSM_HOOK(void, LSM_RET_VOID, inode_free_security_rcu, void *inode_security)
+LSM_HOOK(int, -EOPNOTSUPP, inode_init_security, struct inode *inode,
+ struct inode *dir, const struct qstr *qstr, struct xattr *xattrs,
+ int *xattr_count)
LSM_HOOK(int, 0, inode_init_security_anon, struct inode *inode,
const struct qstr *name, const struct inode *context_inode)
LSM_HOOK(int, 0, inode_create, struct inode *dir, struct dentry *dentry,
umode_t mode)
+LSM_HOOK(void, LSM_RET_VOID, inode_post_create_tmpfile, struct mnt_idmap *idmap,
+ struct inode *inode)
LSM_HOOK(int, 0, inode_link, struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry)
LSM_HOOK(int, 0, inode_unlink, struct inode *dir, struct dentry *dentry)
@@ -135,36 +140,61 @@ LSM_HOOK(int, 0, inode_readlink, struct dentry *dentry)
LSM_HOOK(int, 0, inode_follow_link, struct dentry *dentry, struct inode *inode,
bool rcu)
LSM_HOOK(int, 0, inode_permission, struct inode *inode, int mask)
-LSM_HOOK(int, 0, inode_setattr, struct dentry *dentry, struct iattr *attr)
+LSM_HOOK(int, 0, inode_setattr, struct mnt_idmap *idmap, struct dentry *dentry,
+ struct iattr *attr)
+LSM_HOOK(void, LSM_RET_VOID, inode_post_setattr, struct mnt_idmap *idmap,
+ struct dentry *dentry, int ia_valid)
LSM_HOOK(int, 0, inode_getattr, const struct path *path)
-LSM_HOOK(int, 0, inode_setxattr, struct user_namespace *mnt_userns,
+LSM_HOOK(int, 0, inode_xattr_skipcap, const char *name)
+LSM_HOOK(int, 0, inode_setxattr, struct mnt_idmap *idmap,
struct dentry *dentry, const char *name, const void *value,
size_t size, int flags)
LSM_HOOK(void, LSM_RET_VOID, inode_post_setxattr, struct dentry *dentry,
const char *name, const void *value, size_t size, int flags)
LSM_HOOK(int, 0, inode_getxattr, struct dentry *dentry, const char *name)
LSM_HOOK(int, 0, inode_listxattr, struct dentry *dentry)
-LSM_HOOK(int, 0, inode_removexattr, struct user_namespace *mnt_userns,
+LSM_HOOK(int, 0, inode_removexattr, struct mnt_idmap *idmap,
struct dentry *dentry, const char *name)
+LSM_HOOK(void, LSM_RET_VOID, inode_post_removexattr, struct dentry *dentry,
+ const char *name)
+LSM_HOOK(int, 0, inode_file_setattr, struct dentry *dentry, struct file_kattr *fa)
+LSM_HOOK(int, 0, inode_file_getattr, struct dentry *dentry, struct file_kattr *fa)
+LSM_HOOK(int, 0, inode_set_acl, struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name, struct posix_acl *kacl)
+LSM_HOOK(void, LSM_RET_VOID, inode_post_set_acl, struct dentry *dentry,
+ const char *acl_name, struct posix_acl *kacl)
+LSM_HOOK(int, 0, inode_get_acl, struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name)
+LSM_HOOK(int, 0, inode_remove_acl, struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name)
+LSM_HOOK(void, LSM_RET_VOID, inode_post_remove_acl, struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name)
LSM_HOOK(int, 0, inode_need_killpriv, struct dentry *dentry)
-LSM_HOOK(int, 0, inode_killpriv, struct user_namespace *mnt_userns,
+LSM_HOOK(int, 0, inode_killpriv, struct mnt_idmap *idmap,
struct dentry *dentry)
-LSM_HOOK(int, -EOPNOTSUPP, inode_getsecurity, struct user_namespace *mnt_userns,
+LSM_HOOK(int, -EOPNOTSUPP, inode_getsecurity, struct mnt_idmap *idmap,
struct inode *inode, const char *name, void **buffer, bool alloc)
LSM_HOOK(int, -EOPNOTSUPP, inode_setsecurity, struct inode *inode,
const char *name, const void *value, size_t size, int flags)
LSM_HOOK(int, 0, inode_listsecurity, struct inode *inode, char *buffer,
size_t buffer_size)
-LSM_HOOK(void, LSM_RET_VOID, inode_getsecid, struct inode *inode, u32 *secid)
+LSM_HOOK(void, LSM_RET_VOID, inode_getlsmprop, struct inode *inode,
+ struct lsm_prop *prop)
LSM_HOOK(int, 0, inode_copy_up, struct dentry *src, struct cred **new)
-LSM_HOOK(int, -EOPNOTSUPP, inode_copy_up_xattr, const char *name)
+LSM_HOOK(int, -EOPNOTSUPP, inode_copy_up_xattr, struct dentry *src,
+ const char *name)
+LSM_HOOK(int, 0, inode_setintegrity, const struct inode *inode,
+ enum lsm_integrity_type type, const void *value, size_t size)
LSM_HOOK(int, 0, kernfs_init_security, struct kernfs_node *kn_dir,
struct kernfs_node *kn)
LSM_HOOK(int, 0, file_permission, struct file *file, int mask)
LSM_HOOK(int, 0, file_alloc_security, struct file *file)
+LSM_HOOK(void, LSM_RET_VOID, file_release, struct file *file)
LSM_HOOK(void, LSM_RET_VOID, file_free_security, struct file *file)
LSM_HOOK(int, 0, file_ioctl, struct file *file, unsigned int cmd,
unsigned long arg)
+LSM_HOOK(int, 0, file_ioctl_compat, struct file *file, unsigned int cmd,
+ unsigned long arg)
LSM_HOOK(int, 0, mmap_addr, unsigned long addr)
LSM_HOOK(int, 0, mmap_file, struct file *file, unsigned long reqprot,
unsigned long prot, unsigned long flags)
@@ -178,8 +208,10 @@ LSM_HOOK(int, 0, file_send_sigiotask, struct task_struct *tsk,
struct fown_struct *fown, int sig)
LSM_HOOK(int, 0, file_receive, struct file *file)
LSM_HOOK(int, 0, file_open, struct file *file)
+LSM_HOOK(int, 0, file_post_open, struct file *file, int mask)
+LSM_HOOK(int, 0, file_truncate, struct file *file)
LSM_HOOK(int, 0, task_alloc, struct task_struct *task,
- unsigned long clone_flags)
+ u64 clone_flags)
LSM_HOOK(void, LSM_RET_VOID, task_free, struct task_struct *task)
LSM_HOOK(int, 0, cred_alloc_blank, struct cred *cred, gfp_t gfp)
LSM_HOOK(void, LSM_RET_VOID, cred_free, struct cred *cred)
@@ -188,6 +220,8 @@ LSM_HOOK(int, 0, cred_prepare, struct cred *new, const struct cred *old,
LSM_HOOK(void, LSM_RET_VOID, cred_transfer, struct cred *new,
const struct cred *old)
LSM_HOOK(void, LSM_RET_VOID, cred_getsecid, const struct cred *c, u32 *secid)
+LSM_HOOK(void, LSM_RET_VOID, cred_getlsmprop, const struct cred *c,
+ struct lsm_prop *prop)
LSM_HOOK(int, 0, kernel_act_as, struct cred *new, u32 secid)
LSM_HOOK(int, 0, kernel_create_files_as, struct cred *new, struct inode *inode)
LSM_HOOK(int, 0, kernel_module_request, char *kmod_name)
@@ -202,13 +236,13 @@ LSM_HOOK(int, 0, task_fix_setuid, struct cred *new, const struct cred *old,
int flags)
LSM_HOOK(int, 0, task_fix_setgid, struct cred *new, const struct cred * old,
int flags)
+LSM_HOOK(int, 0, task_fix_setgroups, struct cred *new, const struct cred * old)
LSM_HOOK(int, 0, task_setpgid, struct task_struct *p, pid_t pgid)
LSM_HOOK(int, 0, task_getpgid, struct task_struct *p)
LSM_HOOK(int, 0, task_getsid, struct task_struct *p)
-LSM_HOOK(void, LSM_RET_VOID, task_getsecid_subj,
- struct task_struct *p, u32 *secid)
-LSM_HOOK(void, LSM_RET_VOID, task_getsecid_obj,
- struct task_struct *p, u32 *secid)
+LSM_HOOK(void, LSM_RET_VOID, current_getlsmprop_subj, struct lsm_prop *prop)
+LSM_HOOK(void, LSM_RET_VOID, task_getlsmprop_obj,
+ struct task_struct *p, struct lsm_prop *prop)
LSM_HOOK(int, 0, task_setnice, struct task_struct *p, int nice)
LSM_HOOK(int, 0, task_setioprio, struct task_struct *p, int ioprio)
LSM_HOOK(int, 0, task_getioprio, struct task_struct *p)
@@ -225,9 +259,10 @@ LSM_HOOK(int, -ENOSYS, task_prctl, int option, unsigned long arg2,
unsigned long arg3, unsigned long arg4, unsigned long arg5)
LSM_HOOK(void, LSM_RET_VOID, task_to_inode, struct task_struct *p,
struct inode *inode)
+LSM_HOOK(int, 0, userns_create, const struct cred *cred)
LSM_HOOK(int, 0, ipc_permission, struct kern_ipc_perm *ipcp, short flag)
-LSM_HOOK(void, LSM_RET_VOID, ipc_getsecid, struct kern_ipc_perm *ipcp,
- u32 *secid)
+LSM_HOOK(void, LSM_RET_VOID, ipc_getlsmprop, struct kern_ipc_perm *ipcp,
+ struct lsm_prop *prop)
LSM_HOOK(int, 0, msg_msg_alloc_security, struct msg_msg *msg)
LSM_HOOK(void, LSM_RET_VOID, msg_msg_free_security, struct msg_msg *msg)
LSM_HOOK(int, 0, msg_queue_alloc_security, struct kern_ipc_perm *perm)
@@ -254,19 +289,24 @@ LSM_HOOK(int, 0, sem_semop, struct kern_ipc_perm *perm, struct sembuf *sops,
LSM_HOOK(int, 0, netlink_send, struct sock *sk, struct sk_buff *skb)
LSM_HOOK(void, LSM_RET_VOID, d_instantiate, struct dentry *dentry,
struct inode *inode)
-LSM_HOOK(int, -EINVAL, getprocattr, struct task_struct *p, char *name,
+LSM_HOOK(int, -EOPNOTSUPP, getselfattr, unsigned int attr,
+ struct lsm_ctx __user *ctx, u32 *size, u32 flags)
+LSM_HOOK(int, -EOPNOTSUPP, setselfattr, unsigned int attr,
+ struct lsm_ctx *ctx, u32 size, u32 flags)
+LSM_HOOK(int, -EINVAL, getprocattr, struct task_struct *p, const char *name,
char **value)
LSM_HOOK(int, -EINVAL, setprocattr, const char *name, void *value, size_t size)
LSM_HOOK(int, 0, ismaclabel, const char *name)
-LSM_HOOK(int, -EOPNOTSUPP, secid_to_secctx, u32 secid, char **secdata,
- u32 *seclen)
+LSM_HOOK(int, -EOPNOTSUPP, secid_to_secctx, u32 secid, struct lsm_context *cp)
+LSM_HOOK(int, -EOPNOTSUPP, lsmprop_to_secctx, struct lsm_prop *prop,
+ struct lsm_context *cp)
LSM_HOOK(int, 0, secctx_to_secid, const char *secdata, u32 seclen, u32 *secid)
-LSM_HOOK(void, LSM_RET_VOID, release_secctx, char *secdata, u32 seclen)
+LSM_HOOK(void, LSM_RET_VOID, release_secctx, struct lsm_context *cp)
LSM_HOOK(void, LSM_RET_VOID, inode_invalidate_secctx, struct inode *inode)
LSM_HOOK(int, 0, inode_notifysecctx, struct inode *inode, void *ctx, u32 ctxlen)
LSM_HOOK(int, 0, inode_setsecctx, struct dentry *dentry, void *ctx, u32 ctxlen)
-LSM_HOOK(int, 0, inode_getsecctx, struct inode *inode, void **ctx,
- u32 *ctxlen)
+LSM_HOOK(int, -EOPNOTSUPP, inode_getsecctx, struct inode *inode,
+ struct lsm_context *cp)
#if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE)
LSM_HOOK(int, 0, post_notification, const struct cred *w_cred,
@@ -301,15 +341,15 @@ LSM_HOOK(int, 0, socket_getsockopt, struct socket *sock, int level, int optname)
LSM_HOOK(int, 0, socket_setsockopt, struct socket *sock, int level, int optname)
LSM_HOOK(int, 0, socket_shutdown, struct socket *sock, int how)
LSM_HOOK(int, 0, socket_sock_rcv_skb, struct sock *sk, struct sk_buff *skb)
-LSM_HOOK(int, 0, socket_getpeersec_stream, struct socket *sock,
- char __user *optval, int __user *optlen, unsigned len)
-LSM_HOOK(int, 0, socket_getpeersec_dgram, struct socket *sock,
+LSM_HOOK(int, -ENOPROTOOPT, socket_getpeersec_stream, struct socket *sock,
+ sockptr_t optval, sockptr_t optlen, unsigned int len)
+LSM_HOOK(int, -ENOPROTOOPT, socket_getpeersec_dgram, struct socket *sock,
struct sk_buff *skb, u32 *secid)
LSM_HOOK(int, 0, sk_alloc_security, struct sock *sk, int family, gfp_t priority)
LSM_HOOK(void, LSM_RET_VOID, sk_free_security, struct sock *sk)
LSM_HOOK(void, LSM_RET_VOID, sk_clone_security, const struct sock *sk,
struct sock *newsk)
-LSM_HOOK(void, LSM_RET_VOID, sk_getsecid, struct sock *sk, u32 *secid)
+LSM_HOOK(void, LSM_RET_VOID, sk_getsecid, const struct sock *sk, u32 *secid)
LSM_HOOK(void, LSM_RET_VOID, sock_graft, struct sock *sk, struct socket *parent)
LSM_HOOK(int, 0, inet_conn_request, const struct sock *sk, struct sk_buff *skb,
struct request_sock *req)
@@ -322,26 +362,27 @@ LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_inc, void)
LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_dec, void)
LSM_HOOK(void, LSM_RET_VOID, req_classify_flow, const struct request_sock *req,
struct flowi_common *flic)
-LSM_HOOK(int, 0, tun_dev_alloc_security, void **security)
-LSM_HOOK(void, LSM_RET_VOID, tun_dev_free_security, void *security)
+LSM_HOOK(int, 0, tun_dev_alloc_security, void *security)
LSM_HOOK(int, 0, tun_dev_create, void)
LSM_HOOK(int, 0, tun_dev_attach_queue, void *security)
LSM_HOOK(int, 0, tun_dev_attach, struct sock *sk, void *security)
LSM_HOOK(int, 0, tun_dev_open, void *security)
-LSM_HOOK(int, 0, sctp_assoc_request, struct sctp_endpoint *ep,
+LSM_HOOK(int, 0, sctp_assoc_request, struct sctp_association *asoc,
struct sk_buff *skb)
LSM_HOOK(int, 0, sctp_bind_connect, struct sock *sk, int optname,
struct sockaddr *address, int addrlen)
-LSM_HOOK(void, LSM_RET_VOID, sctp_sk_clone, struct sctp_endpoint *ep,
+LSM_HOOK(void, LSM_RET_VOID, sctp_sk_clone, struct sctp_association *asoc,
struct sock *sk, struct sock *newsk)
+LSM_HOOK(int, 0, sctp_assoc_established, struct sctp_association *asoc,
+ struct sk_buff *skb)
+LSM_HOOK(int, 0, mptcp_add_subflow, struct sock *sk, struct sock *ssk)
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_INFINIBAND
LSM_HOOK(int, 0, ib_pkey_access, void *sec, u64 subnet_prefix, u16 pkey)
LSM_HOOK(int, 0, ib_endport_manage_subnet, void *sec, const char *dev_name,
u8 port_num)
-LSM_HOOK(int, 0, ib_alloc_security, void **sec)
-LSM_HOOK(void, LSM_RET_VOID, ib_free_security, void *sec)
+LSM_HOOK(int, 0, ib_alloc_security, void *sec)
#endif /* CONFIG_SECURITY_INFINIBAND */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -358,8 +399,7 @@ LSM_HOOK(int, 0, xfrm_state_alloc_acquire, struct xfrm_state *x,
struct xfrm_sec_ctx *polsec, u32 secid)
LSM_HOOK(void, LSM_RET_VOID, xfrm_state_free_security, struct xfrm_state *x)
LSM_HOOK(int, 0, xfrm_state_delete_security, struct xfrm_state *x)
-LSM_HOOK(int, 0, xfrm_policy_lookup, struct xfrm_sec_ctx *ctx, u32 fl_secid,
- u8 dir)
+LSM_HOOK(int, 0, xfrm_policy_lookup, struct xfrm_sec_ctx *ctx, u32 fl_secid)
LSM_HOOK(int, 1, xfrm_state_pol_flow_match, struct xfrm_state *x,
struct xfrm_policy *xp, const struct flowi_common *flic)
LSM_HOOK(int, 0, xfrm_decode_session, struct sk_buff *skb, u32 *secid,
@@ -370,36 +410,59 @@ LSM_HOOK(int, 0, xfrm_decode_session, struct sk_buff *skb, u32 *secid,
#ifdef CONFIG_KEYS
LSM_HOOK(int, 0, key_alloc, struct key *key, const struct cred *cred,
unsigned long flags)
-LSM_HOOK(void, LSM_RET_VOID, key_free, struct key *key)
LSM_HOOK(int, 0, key_permission, key_ref_t key_ref, const struct cred *cred,
enum key_need_perm need_perm)
-LSM_HOOK(int, 0, key_getsecurity, struct key *key, char **_buffer)
+LSM_HOOK(int, 0, key_getsecurity, struct key *key, char **buffer)
+LSM_HOOK(void, LSM_RET_VOID, key_post_create_or_update, struct key *keyring,
+ struct key *key, const void *payload, size_t payload_len,
+ unsigned long flags, bool create)
#endif /* CONFIG_KEYS */
#ifdef CONFIG_AUDIT
LSM_HOOK(int, 0, audit_rule_init, u32 field, u32 op, char *rulestr,
- void **lsmrule)
+ void **lsmrule, gfp_t gfp)
LSM_HOOK(int, 0, audit_rule_known, struct audit_krule *krule)
-LSM_HOOK(int, 0, audit_rule_match, u32 secid, u32 field, u32 op, void *lsmrule)
+LSM_HOOK(int, 0, audit_rule_match, struct lsm_prop *prop, u32 field, u32 op,
+ void *lsmrule)
LSM_HOOK(void, LSM_RET_VOID, audit_rule_free, void *lsmrule)
#endif /* CONFIG_AUDIT */
#ifdef CONFIG_BPF_SYSCALL
-LSM_HOOK(int, 0, bpf, int cmd, union bpf_attr *attr, unsigned int size)
+LSM_HOOK(int, 0, bpf, int cmd, union bpf_attr *attr, unsigned int size, bool kernel)
LSM_HOOK(int, 0, bpf_map, struct bpf_map *map, fmode_t fmode)
LSM_HOOK(int, 0, bpf_prog, struct bpf_prog *prog)
-LSM_HOOK(int, 0, bpf_map_alloc_security, struct bpf_map *map)
-LSM_HOOK(void, LSM_RET_VOID, bpf_map_free_security, struct bpf_map *map)
-LSM_HOOK(int, 0, bpf_prog_alloc_security, struct bpf_prog_aux *aux)
-LSM_HOOK(void, LSM_RET_VOID, bpf_prog_free_security, struct bpf_prog_aux *aux)
+LSM_HOOK(int, 0, bpf_map_create, struct bpf_map *map, union bpf_attr *attr,
+ struct bpf_token *token, bool kernel)
+LSM_HOOK(void, LSM_RET_VOID, bpf_map_free, struct bpf_map *map)
+LSM_HOOK(int, 0, bpf_prog_load, struct bpf_prog *prog, union bpf_attr *attr,
+ struct bpf_token *token, bool kernel)
+LSM_HOOK(void, LSM_RET_VOID, bpf_prog_free, struct bpf_prog *prog)
+LSM_HOOK(int, 0, bpf_token_create, struct bpf_token *token, union bpf_attr *attr,
+ const struct path *path)
+LSM_HOOK(void, LSM_RET_VOID, bpf_token_free, struct bpf_token *token)
+LSM_HOOK(int, 0, bpf_token_cmd, const struct bpf_token *token, enum bpf_cmd cmd)
+LSM_HOOK(int, 0, bpf_token_capable, const struct bpf_token *token, int cap)
#endif /* CONFIG_BPF_SYSCALL */
LSM_HOOK(int, 0, locked_down, enum lockdown_reason what)
#ifdef CONFIG_PERF_EVENTS
-LSM_HOOK(int, 0, perf_event_open, struct perf_event_attr *attr, int type)
+LSM_HOOK(int, 0, perf_event_open, int type)
LSM_HOOK(int, 0, perf_event_alloc, struct perf_event *event)
-LSM_HOOK(void, LSM_RET_VOID, perf_event_free, struct perf_event *event)
LSM_HOOK(int, 0, perf_event_read, struct perf_event *event)
LSM_HOOK(int, 0, perf_event_write, struct perf_event *event)
#endif /* CONFIG_PERF_EVENTS */
+
+#ifdef CONFIG_IO_URING
+LSM_HOOK(int, 0, uring_override_creds, const struct cred *new)
+LSM_HOOK(int, 0, uring_sqpoll, void)
+LSM_HOOK(int, 0, uring_cmd, struct io_uring_cmd *ioucmd)
+LSM_HOOK(int, 0, uring_allowed, void)
+#endif /* CONFIG_IO_URING */
+
+LSM_HOOK(void, LSM_RET_VOID, initramfs_populated, void)
+
+LSM_HOOK(int, 0, bdev_alloc_security, struct block_device *bdev)
+LSM_HOOK(void, LSM_RET_VOID, bdev_free_security, struct block_device *bdev)
+LSM_HOOK(int, 0, bdev_setintegrity, struct block_device *bdev,
+ enum lsm_integrity_type type, const void *value, size_t size)
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 5c4c5c0602cb..b92008641242 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -25,1573 +25,100 @@
#ifndef __LINUX_LSM_HOOKS_H
#define __LINUX_LSM_HOOKS_H
+#include <uapi/linux/lsm.h>
#include <linux/security.h>
#include <linux/init.h>
#include <linux/rculist.h>
+#include <linux/xattr.h>
+#include <linux/static_call.h>
+#include <linux/unroll.h>
+#include <linux/jump_label.h>
+#include <linux/lsm_count.h>
-/**
- * union security_list_options - Linux Security Module hook function list
- *
- * Security hooks for program execution operations.
- *
- * @bprm_creds_for_exec:
- * If the setup in prepare_exec_creds did not setup @bprm->cred->security
- * properly for executing @bprm->file, update the LSM's portion of
- * @bprm->cred->security to be what commit_creds needs to install for the
- * new program. This hook may also optionally check permissions
- * (e.g. for transitions between security domains).
- * The hook must set @bprm->secureexec to 1 if AT_SECURE should be set to
- * request libc enable secure mode.
- * @bprm contains the linux_binprm structure.
- * Return 0 if the hook is successful and permission is granted.
- * @bprm_creds_from_file:
- * If @file is setpcap, suid, sgid or otherwise marked to change
- * privilege upon exec, update @bprm->cred to reflect that change.
- * This is called after finding the binary that will be executed.
- * without an interpreter. This ensures that the credentials will not
- * be derived from a script that the binary will need to reopen, which
- * when reopend may end up being a completely different file. This
- * hook may also optionally check permissions (e.g. for transitions
- * between security domains).
- * The hook must set @bprm->secureexec to 1 if AT_SECURE should be set to
- * request libc enable secure mode.
- * The hook must add to @bprm->per_clear any personality flags that
- * should be cleared from current->personality.
- * @bprm contains the linux_binprm structure.
- * Return 0 if the hook is successful and permission is granted.
- * @bprm_check_security:
- * This hook mediates the point when a search for a binary handler will
- * begin. It allows a check against the @bprm->cred->security value
- * which was set in the preceding creds_for_exec call. The argv list and
- * envp list are reliably available in @bprm. This hook may be called
- * multiple times during a single execve.
- * @bprm contains the linux_binprm structure.
- * Return 0 if the hook is successful and permission is granted.
- * @bprm_committing_creds:
- * Prepare to install the new security attributes of a process being
- * transformed by an execve operation, based on the old credentials
- * pointed to by @current->cred and the information set in @bprm->cred by
- * the bprm_creds_for_exec hook. @bprm points to the linux_binprm
- * structure. This hook is a good place to perform state changes on the
- * process such as closing open file descriptors to which access will no
- * longer be granted when the attributes are changed. This is called
- * immediately before commit_creds().
- * @bprm_committed_creds:
- * Tidy up after the installation of the new security attributes of a
- * process being transformed by an execve operation. The new credentials
- * have, by this point, been set to @current->cred. @bprm points to the
- * linux_binprm structure. This hook is a good place to perform state
- * changes on the process such as clearing out non-inheritable signal
- * state. This is called immediately after commit_creds().
- *
- * Security hooks for mount using fs_context.
- * [See also Documentation/filesystems/mount_api.rst]
- *
- * @fs_context_dup:
- * Allocate and attach a security structure to sc->security. This pointer
- * is initialised to NULL by the caller.
- * @fc indicates the new filesystem context.
- * @src_fc indicates the original filesystem context.
- * @fs_context_parse_param:
- * Userspace provided a parameter to configure a superblock. The LSM may
- * reject it with an error and may use it for itself, in which case it
- * should return 0; otherwise it should return -ENOPARAM to pass it on to
- * the filesystem.
- * @fc indicates the filesystem context.
- * @param The parameter
- *
- * Security hooks for filesystem operations.
- *
- * @sb_alloc_security:
- * Allocate and attach a security structure to the sb->s_security field.
- * The s_security field is initialized to NULL when the structure is
- * allocated.
- * @sb contains the super_block structure to be modified.
- * Return 0 if operation was successful.
- * @sb_delete:
- * Release objects tied to a superblock (e.g. inodes).
- * @sb contains the super_block structure being released.
- * @sb_free_security:
- * Deallocate and clear the sb->s_security field.
- * @sb contains the super_block structure to be modified.
- * @sb_free_mnt_opts:
- * Free memory associated with @mnt_ops.
- * @sb_eat_lsm_opts:
- * Eat (scan @orig options) and save them in @mnt_opts.
- * @sb_statfs:
- * Check permission before obtaining filesystem statistics for the @mnt
- * mountpoint.
- * @dentry is a handle on the superblock for the filesystem.
- * Return 0 if permission is granted.
- * @sb_mount:
- * Check permission before an object specified by @dev_name is mounted on
- * the mount point named by @nd. For an ordinary mount, @dev_name
- * identifies a device if the file system type requires a device. For a
- * remount (@flags & MS_REMOUNT), @dev_name is irrelevant. For a
- * loopback/bind mount (@flags & MS_BIND), @dev_name identifies the
- * pathname of the object being mounted.
- * @dev_name contains the name for object being mounted.
- * @path contains the path for mount point object.
- * @type contains the filesystem type.
- * @flags contains the mount flags.
- * @data contains the filesystem-specific data.
- * Return 0 if permission is granted.
- * @sb_copy_data:
- * Allow mount option data to be copied prior to parsing by the filesystem,
- * so that the security module can extract security-specific mount
- * options cleanly (a filesystem may modify the data e.g. with strsep()).
- * This also allows the original mount data to be stripped of security-
- * specific options to avoid having to make filesystems aware of them.
- * @orig the original mount data copied from userspace.
- * @copy copied data which will be passed to the security module.
- * Returns 0 if the copy was successful.
- * @sb_mnt_opts_compat:
- * Determine if the new mount options in @mnt_opts are allowed given
- * the existing mounted filesystem at @sb.
- * @sb superblock being compared
- * @mnt_opts new mount options
- * Return 0 if options are compatible.
- * @sb_remount:
- * Extracts security system specific mount options and verifies no changes
- * are being made to those options.
- * @sb superblock being remounted
- * @data contains the filesystem-specific data.
- * Return 0 if permission is granted.
- * @sb_kern_mount:
- * Mount this @sb if allowed by permissions.
- * @sb_show_options:
- * Show (print on @m) mount options for this @sb.
- * @sb_umount:
- * Check permission before the @mnt file system is unmounted.
- * @mnt contains the mounted file system.
- * @flags contains the unmount flags, e.g. MNT_FORCE.
- * Return 0 if permission is granted.
- * @sb_pivotroot:
- * Check permission before pivoting the root filesystem.
- * @old_path contains the path for the new location of the
- * current root (put_old).
- * @new_path contains the path for the new root (new_root).
- * Return 0 if permission is granted.
- * @sb_set_mnt_opts:
- * Set the security relevant mount options used for a superblock
- * @sb the superblock to set security mount options for
- * @opts binary data structure containing all lsm mount data
- * @sb_clone_mnt_opts:
- * Copy all security options from a given superblock to another
- * @oldsb old superblock which contain information to clone
- * @newsb new superblock which needs filled in
- * @sb_add_mnt_opt:
- * Add one mount @option to @mnt_opts.
- * @sb_parse_opts_str:
- * Parse a string of security data filling in the opts structure
- * @options string containing all mount options known by the LSM
- * @opts binary data structure usable by the LSM
- * @move_mount:
- * Check permission before a mount is moved.
- * @from_path indicates the mount that is going to be moved.
- * @to_path indicates the mountpoint that will be mounted upon.
- * @dentry_init_security:
- * Compute a context for a dentry as the inode is not yet available
- * since NFSv4 has no label backed by an EA anyway.
- * @dentry dentry to use in calculating the context.
- * @mode mode used to determine resource type.
- * @name name of the last path component used to create file
- * @ctx pointer to place the pointer to the resulting context in.
- * @ctxlen point to place the length of the resulting context.
- * @dentry_create_files_as:
- * Compute a context for a dentry as the inode is not yet available
- * and set that context in passed in creds so that new files are
- * created using that context. Context is calculated using the
- * passed in creds and not the creds of the caller.
- * @dentry dentry to use in calculating the context.
- * @mode mode used to determine resource type.
- * @name name of the last path component used to create file
- * @old creds which should be used for context calculation
- * @new creds to modify
- *
- *
- * Security hooks for inode operations.
- *
- * @inode_alloc_security:
- * Allocate and attach a security structure to @inode->i_security. The
- * i_security field is initialized to NULL when the inode structure is
- * allocated.
- * @inode contains the inode structure.
- * Return 0 if operation was successful.
- * @inode_free_security:
- * @inode contains the inode structure.
- * Deallocate the inode security structure and set @inode->i_security to
- * NULL.
- * @inode_init_security:
- * Obtain the security attribute name suffix and value to set on a newly
- * created inode and set up the incore security field for the new inode.
- * This hook is called by the fs code as part of the inode creation
- * transaction and provides for atomic labeling of the inode, unlike
- * the post_create/mkdir/... hooks called by the VFS. The hook function
- * is expected to allocate the name and value via kmalloc, with the caller
- * being responsible for calling kfree after using them.
- * If the security module does not use security attributes or does
- * not wish to put a security attribute on this particular inode,
- * then it should return -EOPNOTSUPP to skip this processing.
- * @inode contains the inode structure of the newly created inode.
- * @dir contains the inode structure of the parent directory.
- * @qstr contains the last path component of the new object
- * @name will be set to the allocated name suffix (e.g. selinux).
- * @value will be set to the allocated attribute value.
- * @len will be set to the length of the value.
- * Returns 0 if @name and @value have been successfully set,
- * -EOPNOTSUPP if no security attribute is needed, or
- * -ENOMEM on memory allocation failure.
- * @inode_init_security_anon:
- * Set up the incore security field for the new anonymous inode
- * and return whether the inode creation is permitted by the security
- * module or not.
- * @inode contains the inode structure
- * @name name of the anonymous inode class
- * @context_inode optional related inode
- * Returns 0 on success, -EACCES if the security module denies the
- * creation of this inode, or another -errno upon other errors.
- * @inode_create:
- * Check permission to create a regular file.
- * @dir contains inode structure of the parent of the new file.
- * @dentry contains the dentry structure for the file to be created.
- * @mode contains the file mode of the file to be created.
- * Return 0 if permission is granted.
- * @inode_link:
- * Check permission before creating a new hard link to a file.
- * @old_dentry contains the dentry structure for an existing
- * link to the file.
- * @dir contains the inode structure of the parent directory
- * of the new link.
- * @new_dentry contains the dentry structure for the new link.
- * Return 0 if permission is granted.
- * @path_link:
- * Check permission before creating a new hard link to a file.
- * @old_dentry contains the dentry structure for an existing link
- * to the file.
- * @new_dir contains the path structure of the parent directory of
- * the new link.
- * @new_dentry contains the dentry structure for the new link.
- * Return 0 if permission is granted.
- * @inode_unlink:
- * Check the permission to remove a hard link to a file.
- * @dir contains the inode structure of parent directory of the file.
- * @dentry contains the dentry structure for file to be unlinked.
- * Return 0 if permission is granted.
- * @path_unlink:
- * Check the permission to remove a hard link to a file.
- * @dir contains the path structure of parent directory of the file.
- * @dentry contains the dentry structure for file to be unlinked.
- * Return 0 if permission is granted.
- * @inode_symlink:
- * Check the permission to create a symbolic link to a file.
- * @dir contains the inode structure of parent directory of
- * the symbolic link.
- * @dentry contains the dentry structure of the symbolic link.
- * @old_name contains the pathname of file.
- * Return 0 if permission is granted.
- * @path_symlink:
- * Check the permission to create a symbolic link to a file.
- * @dir contains the path structure of parent directory of
- * the symbolic link.
- * @dentry contains the dentry structure of the symbolic link.
- * @old_name contains the pathname of file.
- * Return 0 if permission is granted.
- * @inode_mkdir:
- * Check permissions to create a new directory in the existing directory
- * associated with inode structure @dir.
- * @dir contains the inode structure of parent of the directory
- * to be created.
- * @dentry contains the dentry structure of new directory.
- * @mode contains the mode of new directory.
- * Return 0 if permission is granted.
- * @path_mkdir:
- * Check permissions to create a new directory in the existing directory
- * associated with path structure @path.
- * @dir contains the path structure of parent of the directory
- * to be created.
- * @dentry contains the dentry structure of new directory.
- * @mode contains the mode of new directory.
- * Return 0 if permission is granted.
- * @inode_rmdir:
- * Check the permission to remove a directory.
- * @dir contains the inode structure of parent of the directory
- * to be removed.
- * @dentry contains the dentry structure of directory to be removed.
- * Return 0 if permission is granted.
- * @path_rmdir:
- * Check the permission to remove a directory.
- * @dir contains the path structure of parent of the directory to be
- * removed.
- * @dentry contains the dentry structure of directory to be removed.
- * Return 0 if permission is granted.
- * @inode_mknod:
- * Check permissions when creating a special file (or a socket or a fifo
- * file created via the mknod system call). Note that if mknod operation
- * is being done for a regular file, then the create hook will be called
- * and not this hook.
- * @dir contains the inode structure of parent of the new file.
- * @dentry contains the dentry structure of the new file.
- * @mode contains the mode of the new file.
- * @dev contains the device number.
- * Return 0 if permission is granted.
- * @path_mknod:
- * Check permissions when creating a file. Note that this hook is called
- * even if mknod operation is being done for a regular file.
- * @dir contains the path structure of parent of the new file.
- * @dentry contains the dentry structure of the new file.
- * @mode contains the mode of the new file.
- * @dev contains the undecoded device number. Use new_decode_dev() to get
- * the decoded device number.
- * Return 0 if permission is granted.
- * @inode_rename:
- * Check for permission to rename a file or directory.
- * @old_dir contains the inode structure for parent of the old link.
- * @old_dentry contains the dentry structure of the old link.
- * @new_dir contains the inode structure for parent of the new link.
- * @new_dentry contains the dentry structure of the new link.
- * Return 0 if permission is granted.
- * @path_rename:
- * Check for permission to rename a file or directory.
- * @old_dir contains the path structure for parent of the old link.
- * @old_dentry contains the dentry structure of the old link.
- * @new_dir contains the path structure for parent of the new link.
- * @new_dentry contains the dentry structure of the new link.
- * Return 0 if permission is granted.
- * @path_chmod:
- * Check for permission to change a mode of the file @path. The new
- * mode is specified in @mode.
- * @path contains the path structure of the file to change the mode.
- * @mode contains the new DAC's permission, which is a bitmask of
- * constants from <include/uapi/linux/stat.h>
- * Return 0 if permission is granted.
- * @path_chown:
- * Check for permission to change owner/group of a file or directory.
- * @path contains the path structure.
- * @uid contains new owner's ID.
- * @gid contains new group's ID.
- * Return 0 if permission is granted.
- * @path_chroot:
- * Check for permission to change root directory.
- * @path contains the path structure.
- * Return 0 if permission is granted.
- * @path_notify:
- * Check permissions before setting a watch on events as defined by @mask,
- * on an object at @path, whose type is defined by @obj_type.
- * @inode_readlink:
- * Check the permission to read the symbolic link.
- * @dentry contains the dentry structure for the file link.
- * Return 0 if permission is granted.
- * @inode_follow_link:
- * Check permission to follow a symbolic link when looking up a pathname.
- * @dentry contains the dentry structure for the link.
- * @inode contains the inode, which itself is not stable in RCU-walk
- * @rcu indicates whether we are in RCU-walk mode.
- * Return 0 if permission is granted.
- * @inode_permission:
- * Check permission before accessing an inode. This hook is called by the
- * existing Linux permission function, so a security module can use it to
- * provide additional checking for existing Linux permission checks.
- * Notice that this hook is called when a file is opened (as well as many
- * other operations), whereas the file_security_ops permission hook is
- * called when the actual read/write operations are performed.
- * @inode contains the inode structure to check.
- * @mask contains the permission mask.
- * Return 0 if permission is granted.
- * @inode_setattr:
- * Check permission before setting file attributes. Note that the kernel
- * call to notify_change is performed from several locations, whenever
- * file attributes change (such as when a file is truncated, chown/chmod
- * operations, transferring disk quotas, etc).
- * @dentry contains the dentry structure for the file.
- * @attr is the iattr structure containing the new file attributes.
- * Return 0 if permission is granted.
- * @path_truncate:
- * Check permission before truncating a file.
- * @path contains the path structure for the file.
- * Return 0 if permission is granted.
- * @inode_getattr:
- * Check permission before obtaining file attributes.
- * @path contains the path structure for the file.
- * Return 0 if permission is granted.
- * @inode_setxattr:
- * Check permission before setting the extended attributes
- * @value identified by @name for @dentry.
- * Return 0 if permission is granted.
- * @inode_post_setxattr:
- * Update inode security field after successful setxattr operation.
- * @value identified by @name for @dentry.
- * @inode_getxattr:
- * Check permission before obtaining the extended attributes
- * identified by @name for @dentry.
- * Return 0 if permission is granted.
- * @inode_listxattr:
- * Check permission before obtaining the list of extended attribute
- * names for @dentry.
- * Return 0 if permission is granted.
- * @inode_removexattr:
- * Check permission before removing the extended attribute
- * identified by @name for @dentry.
- * Return 0 if permission is granted.
- * @inode_getsecurity:
- * Retrieve a copy of the extended attribute representation of the
- * security label associated with @name for @inode via @buffer. Note that
- * @name is the remainder of the attribute name after the security prefix
- * has been removed. @alloc is used to specify of the call should return a
- * value via the buffer or just the value length Return size of buffer on
- * success.
- * @inode_setsecurity:
- * Set the security label associated with @name for @inode from the
- * extended attribute value @value. @size indicates the size of the
- * @value in bytes. @flags may be XATTR_CREATE, XATTR_REPLACE, or 0.
- * Note that @name is the remainder of the attribute name after the
- * security. prefix has been removed.
- * Return 0 on success.
- * @inode_listsecurity:
- * Copy the extended attribute names for the security labels
- * associated with @inode into @buffer. The maximum size of @buffer
- * is specified by @buffer_size. @buffer may be NULL to request
- * the size of the buffer required.
- * Returns number of bytes used/required on success.
- * @inode_need_killpriv:
- * Called when an inode has been changed.
- * @dentry is the dentry being changed.
- * Return <0 on error to abort the inode change operation.
- * Return 0 if inode_killpriv does not need to be called.
- * Return >0 if inode_killpriv does need to be called.
- * @inode_killpriv:
- * The setuid bit is being removed. Remove similar security labels.
- * Called with the dentry->d_inode->i_mutex held.
- * @mnt_userns: user namespace of the mount
- * @dentry is the dentry being changed.
- * Return 0 on success. If error is returned, then the operation
- * causing setuid bit removal is failed.
- * @inode_getsecid:
- * Get the secid associated with the node.
- * @inode contains a pointer to the inode.
- * @secid contains a pointer to the location where result will be saved.
- * In case of failure, @secid will be set to zero.
- * @inode_copy_up:
- * A file is about to be copied up from lower layer to upper layer of
- * overlay filesystem. Security module can prepare a set of new creds
- * and modify as need be and return new creds. Caller will switch to
- * new creds temporarily to create new file and release newly allocated
- * creds.
- * @src indicates the union dentry of file that is being copied up.
- * @new pointer to pointer to return newly allocated creds.
- * Returns 0 on success or a negative error code on error.
- * @inode_copy_up_xattr:
- * Filter the xattrs being copied up when a unioned file is copied
- * up from a lower layer to the union/overlay layer.
- * @name indicates the name of the xattr.
- * Returns 0 to accept the xattr, 1 to discard the xattr, -EOPNOTSUPP if
- * security module does not know about attribute or a negative error code
- * to abort the copy up. Note that the caller is responsible for reading
- * and writing the xattrs as this hook is merely a filter.
- * @d_instantiate:
- * Fill in @inode security information for a @dentry if allowed.
- * @getprocattr:
- * Read attribute @name for process @p and store it into @value if allowed.
- * @setprocattr:
- * Write (set) attribute @name to @value, size @size if allowed.
- *
- * Security hooks for kernfs node operations
- *
- * @kernfs_init_security:
- * Initialize the security context of a newly created kernfs node based
- * on its own and its parent's attributes.
- *
- * @kn_dir the parent kernfs node
- * @kn the new child kernfs node
- *
- * Security hooks for file operations
- *
- * @file_permission:
- * Check file permissions before accessing an open file. This hook is
- * called by various operations that read or write files. A security
- * module can use this hook to perform additional checking on these
- * operations, e.g. to revalidate permissions on use to support privilege
- * bracketing or policy changes. Notice that this hook is used when the
- * actual read/write operations are performed, whereas the
- * inode_security_ops hook is called when a file is opened (as well as
- * many other operations).
- * Caveat: Although this hook can be used to revalidate permissions for
- * various system call operations that read or write files, it does not
- * address the revalidation of permissions for memory-mapped files.
- * Security modules must handle this separately if they need such
- * revalidation.
- * @file contains the file structure being accessed.
- * @mask contains the requested permissions.
- * Return 0 if permission is granted.
- * @file_alloc_security:
- * Allocate and attach a security structure to the file->f_security field.
- * The security field is initialized to NULL when the structure is first
- * created.
- * @file contains the file structure to secure.
- * Return 0 if the hook is successful and permission is granted.
- * @file_free_security:
- * Deallocate and free any security structures stored in file->f_security.
- * @file contains the file structure being modified.
- * @file_ioctl:
- * @file contains the file structure.
- * @cmd contains the operation to perform.
- * @arg contains the operational arguments.
- * Check permission for an ioctl operation on @file. Note that @arg
- * sometimes represents a user space pointer; in other cases, it may be a
- * simple integer value. When @arg represents a user space pointer, it
- * should never be used by the security module.
- * Return 0 if permission is granted.
- * @mmap_addr :
- * Check permissions for a mmap operation at @addr.
- * @addr contains virtual address that will be used for the operation.
- * Return 0 if permission is granted.
- * @mmap_file :
- * Check permissions for a mmap operation. The @file may be NULL, e.g.
- * if mapping anonymous memory.
- * @file contains the file structure for file to map (may be NULL).
- * @reqprot contains the protection requested by the application.
- * @prot contains the protection that will be applied by the kernel.
- * @flags contains the operational flags.
- * Return 0 if permission is granted.
- * @file_mprotect:
- * Check permissions before changing memory access permissions.
- * @vma contains the memory region to modify.
- * @reqprot contains the protection requested by the application.
- * @prot contains the protection that will be applied by the kernel.
- * Return 0 if permission is granted.
- * @file_lock:
- * Check permission before performing file locking operations.
- * Note the hook mediates both flock and fcntl style locks.
- * @file contains the file structure.
- * @cmd contains the posix-translated lock operation to perform
- * (e.g. F_RDLCK, F_WRLCK).
- * Return 0 if permission is granted.
- * @file_fcntl:
- * Check permission before allowing the file operation specified by @cmd
- * from being performed on the file @file. Note that @arg sometimes
- * represents a user space pointer; in other cases, it may be a simple
- * integer value. When @arg represents a user space pointer, it should
- * never be used by the security module.
- * @file contains the file structure.
- * @cmd contains the operation to be performed.
- * @arg contains the operational arguments.
- * Return 0 if permission is granted.
- * @file_set_fowner:
- * Save owner security information (typically from current->security) in
- * file->f_security for later use by the send_sigiotask hook.
- * @file contains the file structure to update.
- * Return 0 on success.
- * @file_send_sigiotask:
- * Check permission for the file owner @fown to send SIGIO or SIGURG to the
- * process @tsk. Note that this hook is sometimes called from interrupt.
- * Note that the fown_struct, @fown, is never outside the context of a
- * struct file, so the file structure (and associated security information)
- * can always be obtained: container_of(fown, struct file, f_owner)
- * @tsk contains the structure of task receiving signal.
- * @fown contains the file owner information.
- * @sig is the signal that will be sent. When 0, kernel sends SIGIO.
- * Return 0 if permission is granted.
- * @file_receive:
- * This hook allows security modules to control the ability of a process
- * to receive an open file descriptor via socket IPC.
- * @file contains the file structure being received.
- * Return 0 if permission is granted.
- * @file_open:
- * Save open-time permission checking state for later use upon
- * file_permission, and recheck access if anything has changed
- * since inode_permission.
- *
- * Security hooks for task operations.
- *
- * @task_alloc:
- * @task task being allocated.
- * @clone_flags contains the flags indicating what should be shared.
- * Handle allocation of task-related resources.
- * Returns a zero on success, negative values on failure.
- * @task_free:
- * @task task about to be freed.
- * Handle release of task-related resources. (Note that this can be called
- * from interrupt context.)
- * @cred_alloc_blank:
- * @cred points to the credentials.
- * @gfp indicates the atomicity of any memory allocations.
- * Only allocate sufficient memory and attach to @cred such that
- * cred_transfer() will not get ENOMEM.
- * @cred_free:
- * @cred points to the credentials.
- * Deallocate and clear the cred->security field in a set of credentials.
- * @cred_prepare:
- * @new points to the new credentials.
- * @old points to the original credentials.
- * @gfp indicates the atomicity of any memory allocations.
- * Prepare a new set of credentials by copying the data from the old set.
- * @cred_transfer:
- * @new points to the new credentials.
- * @old points to the original credentials.
- * Transfer data from original creds to new creds
- * @cred_getsecid:
- * Retrieve the security identifier of the cred structure @c
- * @c contains the credentials, secid will be placed into @secid.
- * In case of failure, @secid will be set to zero.
- * @kernel_act_as:
- * Set the credentials for a kernel service to act as (subjective context).
- * @new points to the credentials to be modified.
- * @secid specifies the security ID to be set
- * The current task must be the one that nominated @secid.
- * Return 0 if successful.
- * @kernel_create_files_as:
- * Set the file creation context in a set of credentials to be the same as
- * the objective context of the specified inode.
- * @new points to the credentials to be modified.
- * @inode points to the inode to use as a reference.
- * The current task must be the one that nominated @inode.
- * Return 0 if successful.
- * @kernel_module_request:
- * Ability to trigger the kernel to automatically upcall to userspace for
- * userspace to load a kernel module with the given name.
- * @kmod_name name of the module requested by the kernel
- * Return 0 if successful.
- * @kernel_load_data:
- * Load data provided by userspace.
- * @id kernel load data identifier
- * @contents if a subsequent @kernel_post_load_data will be called.
- * Return 0 if permission is granted.
- * @kernel_post_load_data:
- * Load data provided by a non-file source (usually userspace buffer).
- * @buf pointer to buffer containing the data contents.
- * @size length of the data contents.
- * @id kernel load data identifier
- * @description a text description of what was loaded, @id-specific
- * Return 0 if permission is granted.
- * This must be paired with a prior @kernel_load_data call that had
- * @contents set to true.
- * @kernel_read_file:
- * Read a file specified by userspace.
- * @file contains the file structure pointing to the file being read
- * by the kernel.
- * @id kernel read file identifier
- * @contents if a subsequent @kernel_post_read_file will be called.
- * Return 0 if permission is granted.
- * @kernel_post_read_file:
- * Read a file specified by userspace.
- * @file contains the file structure pointing to the file being read
- * by the kernel.
- * @buf pointer to buffer containing the file contents.
- * @size length of the file contents.
- * @id kernel read file identifier
- * This must be paired with a prior @kernel_read_file call that had
- * @contents set to true.
- * Return 0 if permission is granted.
- * @task_fix_setuid:
- * Update the module's state after setting one or more of the user
- * identity attributes of the current process. The @flags parameter
- * indicates which of the set*uid system calls invoked this hook. If
- * @new is the set of credentials that will be installed. Modifications
- * should be made to this rather than to @current->cred.
- * @old is the set of credentials that are being replaces
- * @flags contains one of the LSM_SETID_* values.
- * Return 0 on success.
- * @task_fix_setgid:
- * Update the module's state after setting one or more of the group
- * identity attributes of the current process. The @flags parameter
- * indicates which of the set*gid system calls invoked this hook.
- * @new is the set of credentials that will be installed. Modifications
- * should be made to this rather than to @current->cred.
- * @old is the set of credentials that are being replaced.
- * @flags contains one of the LSM_SETID_* values.
- * Return 0 on success.
- * @task_setpgid:
- * Check permission before setting the process group identifier of the
- * process @p to @pgid.
- * @p contains the task_struct for process being modified.
- * @pgid contains the new pgid.
- * Return 0 if permission is granted.
- * @task_getpgid:
- * Check permission before getting the process group identifier of the
- * process @p.
- * @p contains the task_struct for the process.
- * Return 0 if permission is granted.
- * @task_getsid:
- * Check permission before getting the session identifier of the process
- * @p.
- * @p contains the task_struct for the process.
- * Return 0 if permission is granted.
- * @task_getsecid_subj:
- * Retrieve the subjective security identifier of the task_struct in @p
- * and return it in @secid. Special care must be taken to ensure that @p
- * is the either the "current" task, or the caller has exclusive access
- * to @p.
- * In case of failure, @secid will be set to zero.
- * @task_getsecid_obj:
- * Retrieve the objective security identifier of the task_struct in @p
- * and return it in @secid.
- * In case of failure, @secid will be set to zero.
- *
- * @task_setnice:
- * Check permission before setting the nice value of @p to @nice.
- * @p contains the task_struct of process.
- * @nice contains the new nice value.
- * Return 0 if permission is granted.
- * @task_setioprio:
- * Check permission before setting the ioprio value of @p to @ioprio.
- * @p contains the task_struct of process.
- * @ioprio contains the new ioprio value
- * Return 0 if permission is granted.
- * @task_getioprio:
- * Check permission before getting the ioprio value of @p.
- * @p contains the task_struct of process.
- * Return 0 if permission is granted.
- * @task_prlimit:
- * Check permission before getting and/or setting the resource limits of
- * another task.
- * @cred points to the cred structure for the current task.
- * @tcred points to the cred structure for the target task.
- * @flags contains the LSM_PRLIMIT_* flag bits indicating whether the
- * resource limits are being read, modified, or both.
- * Return 0 if permission is granted.
- * @task_setrlimit:
- * Check permission before setting the resource limits of process @p
- * for @resource to @new_rlim. The old resource limit values can
- * be examined by dereferencing (p->signal->rlim + resource).
- * @p points to the task_struct for the target task's group leader.
- * @resource contains the resource whose limit is being set.
- * @new_rlim contains the new limits for @resource.
- * Return 0 if permission is granted.
- * @task_setscheduler:
- * Check permission before setting scheduling policy and/or parameters of
- * process @p.
- * @p contains the task_struct for process.
- * Return 0 if permission is granted.
- * @task_getscheduler:
- * Check permission before obtaining scheduling information for process
- * @p.
- * @p contains the task_struct for process.
- * Return 0 if permission is granted.
- * @task_movememory:
- * Check permission before moving memory owned by process @p.
- * @p contains the task_struct for process.
- * Return 0 if permission is granted.
- * @task_kill:
- * Check permission before sending signal @sig to @p. @info can be NULL,
- * the constant 1, or a pointer to a kernel_siginfo structure. If @info is 1 or
- * SI_FROMKERNEL(info) is true, then the signal should be viewed as coming
- * from the kernel and should typically be permitted.
- * SIGIO signals are handled separately by the send_sigiotask hook in
- * file_security_ops.
- * @p contains the task_struct for process.
- * @info contains the signal information.
- * @sig contains the signal value.
- * @cred contains the cred of the process where the signal originated, or
- * NULL if the current task is the originator.
- * Return 0 if permission is granted.
- * @task_prctl:
- * Check permission before performing a process control operation on the
- * current process.
- * @option contains the operation.
- * @arg2 contains a argument.
- * @arg3 contains a argument.
- * @arg4 contains a argument.
- * @arg5 contains a argument.
- * Return -ENOSYS if no-one wanted to handle this op, any other value to
- * cause prctl() to return immediately with that value.
- * @task_to_inode:
- * Set the security attributes for an inode based on an associated task's
- * security attributes, e.g. for /proc/pid inodes.
- * @p contains the task_struct for the task.
- * @inode contains the inode structure for the inode.
- *
- * Security hooks for Netlink messaging.
- *
- * @netlink_send:
- * Save security information for a netlink message so that permission
- * checking can be performed when the message is processed. The security
- * information can be saved using the eff_cap field of the
- * netlink_skb_parms structure. Also may be used to provide fine
- * grained control over message transmission.
- * @sk associated sock of task sending the message.
- * @skb contains the sk_buff structure for the netlink message.
- * Return 0 if the information was successfully saved and message
- * is allowed to be transmitted.
- *
- * Security hooks for Unix domain networking.
- *
- * @unix_stream_connect:
- * Check permissions before establishing a Unix domain stream connection
- * between @sock and @other.
- * @sock contains the sock structure.
- * @other contains the peer sock structure.
- * @newsk contains the new sock structure.
- * Return 0 if permission is granted.
- * @unix_may_send:
- * Check permissions before connecting or sending datagrams from @sock to
- * @other.
- * @sock contains the socket structure.
- * @other contains the peer socket structure.
- * Return 0 if permission is granted.
- *
- * The @unix_stream_connect and @unix_may_send hooks were necessary because
- * Linux provides an alternative to the conventional file name space for Unix
- * domain sockets. Whereas binding and connecting to sockets in the file name
- * space is mediated by the typical file permissions (and caught by the mknod
- * and permission hooks in inode_security_ops), binding and connecting to
- * sockets in the abstract name space is completely unmediated. Sufficient
- * control of Unix domain sockets in the abstract name space isn't possible
- * using only the socket layer hooks, since we need to know the actual target
- * socket, which is not looked up until we are inside the af_unix code.
- *
- * Security hooks for socket operations.
- *
- * @socket_create:
- * Check permissions prior to creating a new socket.
- * @family contains the requested protocol family.
- * @type contains the requested communications type.
- * @protocol contains the requested protocol.
- * @kern set to 1 if a kernel socket.
- * Return 0 if permission is granted.
- * @socket_post_create:
- * This hook allows a module to update or allocate a per-socket security
- * structure. Note that the security field was not added directly to the
- * socket structure, but rather, the socket security information is stored
- * in the associated inode. Typically, the inode alloc_security hook will
- * allocate and attach security information to
- * SOCK_INODE(sock)->i_security. This hook may be used to update the
- * SOCK_INODE(sock)->i_security field with additional information that
- * wasn't available when the inode was allocated.
- * @sock contains the newly created socket structure.
- * @family contains the requested protocol family.
- * @type contains the requested communications type.
- * @protocol contains the requested protocol.
- * @kern set to 1 if a kernel socket.
- * @socket_socketpair:
- * Check permissions before creating a fresh pair of sockets.
- * @socka contains the first socket structure.
- * @sockb contains the second socket structure.
- * Return 0 if permission is granted and the connection was established.
- * @socket_bind:
- * Check permission before socket protocol layer bind operation is
- * performed and the socket @sock is bound to the address specified in the
- * @address parameter.
- * @sock contains the socket structure.
- * @address contains the address to bind to.
- * @addrlen contains the length of address.
- * Return 0 if permission is granted.
- * @socket_connect:
- * Check permission before socket protocol layer connect operation
- * attempts to connect socket @sock to a remote address, @address.
- * @sock contains the socket structure.
- * @address contains the address of remote endpoint.
- * @addrlen contains the length of address.
- * Return 0 if permission is granted.
- * @socket_listen:
- * Check permission before socket protocol layer listen operation.
- * @sock contains the socket structure.
- * @backlog contains the maximum length for the pending connection queue.
- * Return 0 if permission is granted.
- * @socket_accept:
- * Check permission before accepting a new connection. Note that the new
- * socket, @newsock, has been created and some information copied to it,
- * but the accept operation has not actually been performed.
- * @sock contains the listening socket structure.
- * @newsock contains the newly created server socket for connection.
- * Return 0 if permission is granted.
- * @socket_sendmsg:
- * Check permission before transmitting a message to another socket.
- * @sock contains the socket structure.
- * @msg contains the message to be transmitted.
- * @size contains the size of message.
- * Return 0 if permission is granted.
- * @socket_recvmsg:
- * Check permission before receiving a message from a socket.
- * @sock contains the socket structure.
- * @msg contains the message structure.
- * @size contains the size of message structure.
- * @flags contains the operational flags.
- * Return 0 if permission is granted.
- * @socket_getsockname:
- * Check permission before the local address (name) of the socket object
- * @sock is retrieved.
- * @sock contains the socket structure.
- * Return 0 if permission is granted.
- * @socket_getpeername:
- * Check permission before the remote address (name) of a socket object
- * @sock is retrieved.
- * @sock contains the socket structure.
- * Return 0 if permission is granted.
- * @socket_getsockopt:
- * Check permissions before retrieving the options associated with socket
- * @sock.
- * @sock contains the socket structure.
- * @level contains the protocol level to retrieve option from.
- * @optname contains the name of option to retrieve.
- * Return 0 if permission is granted.
- * @socket_setsockopt:
- * Check permissions before setting the options associated with socket
- * @sock.
- * @sock contains the socket structure.
- * @level contains the protocol level to set options for.
- * @optname contains the name of the option to set.
- * Return 0 if permission is granted.
- * @socket_shutdown:
- * Checks permission before all or part of a connection on the socket
- * @sock is shut down.
- * @sock contains the socket structure.
- * @how contains the flag indicating how future sends and receives
- * are handled.
- * Return 0 if permission is granted.
- * @socket_sock_rcv_skb:
- * Check permissions on incoming network packets. This hook is distinct
- * from Netfilter's IP input hooks since it is the first time that the
- * incoming sk_buff @skb has been associated with a particular socket, @sk.
- * Must not sleep inside this hook because some callers hold spinlocks.
- * @sk contains the sock (not socket) associated with the incoming sk_buff.
- * @skb contains the incoming network data.
- * @socket_getpeersec_stream:
- * This hook allows the security module to provide peer socket security
- * state for unix or connected tcp sockets to userspace via getsockopt
- * SO_GETPEERSEC. For tcp sockets this can be meaningful if the
- * socket is associated with an ipsec SA.
- * @sock is the local socket.
- * @optval userspace memory where the security state is to be copied.
- * @optlen userspace int where the module should copy the actual length
- * of the security state.
- * @len as input is the maximum length to copy to userspace provided
- * by the caller.
- * Return 0 if all is well, otherwise, typical getsockopt return
- * values.
- * @socket_getpeersec_dgram:
- * This hook allows the security module to provide peer socket security
- * state for udp sockets on a per-packet basis to userspace via
- * getsockopt SO_GETPEERSEC. The application must first have indicated
- * the IP_PASSSEC option via getsockopt. It can then retrieve the
- * security state returned by this hook for a packet via the SCM_SECURITY
- * ancillary message type.
- * @sock contains the peer socket. May be NULL.
- * @skb is the sk_buff for the packet being queried. May be NULL.
- * @secid pointer to store the secid of the packet.
- * Return 0 on success, error on failure.
- * @sk_alloc_security:
- * Allocate and attach a security structure to the sk->sk_security field,
- * which is used to copy security attributes between local stream sockets.
- * @sk_free_security:
- * Deallocate security structure.
- * @sk_clone_security:
- * Clone/copy security structure.
- * @sk_getsecid:
- * Retrieve the LSM-specific secid for the sock to enable caching
- * of network authorizations.
- * @sock_graft:
- * Sets the socket's isec sid to the sock's sid.
- * @inet_conn_request:
- * Sets the openreq's sid to socket's sid with MLS portion taken
- * from peer sid.
- * @inet_csk_clone:
- * Sets the new child socket's sid to the openreq sid.
- * @inet_conn_established:
- * Sets the connection's peersid to the secmark on skb.
- * @secmark_relabel_packet:
- * check if the process should be allowed to relabel packets to
- * the given secid
- * @secmark_refcount_inc:
- * tells the LSM to increment the number of secmark labeling rules loaded
- * @secmark_refcount_dec:
- * tells the LSM to decrement the number of secmark labeling rules loaded
- * @req_classify_flow:
- * Sets the flow's sid to the openreq sid.
- * @tun_dev_alloc_security:
- * This hook allows a module to allocate a security structure for a TUN
- * device.
- * @security pointer to a security structure pointer.
- * Returns a zero on success, negative values on failure.
- * @tun_dev_free_security:
- * This hook allows a module to free the security structure for a TUN
- * device.
- * @security pointer to the TUN device's security structure
- * @tun_dev_create:
- * Check permissions prior to creating a new TUN device.
- * @tun_dev_attach_queue:
- * Check permissions prior to attaching to a TUN device queue.
- * @security pointer to the TUN device's security structure.
- * @tun_dev_attach:
- * This hook can be used by the module to update any security state
- * associated with the TUN device's sock structure.
- * @sk contains the existing sock structure.
- * @security pointer to the TUN device's security structure.
- * @tun_dev_open:
- * This hook can be used by the module to update any security state
- * associated with the TUN device's security structure.
- * @security pointer to the TUN devices's security structure.
- *
- * Security hooks for SCTP
- *
- * @sctp_assoc_request:
- * Passes the @ep and @chunk->skb of the association INIT packet to
- * the security module.
- * @ep pointer to sctp endpoint structure.
- * @skb pointer to skbuff of association packet.
- * Return 0 on success, error on failure.
- * @sctp_bind_connect:
- * Validiate permissions required for each address associated with sock
- * @sk. Depending on @optname, the addresses will be treated as either
- * for a connect or bind service. The @addrlen is calculated on each
- * ipv4 and ipv6 address using sizeof(struct sockaddr_in) or
- * sizeof(struct sockaddr_in6).
- * @sk pointer to sock structure.
- * @optname name of the option to validate.
- * @address list containing one or more ipv4/ipv6 addresses.
- * @addrlen total length of address(s).
- * Return 0 on success, error on failure.
- * @sctp_sk_clone:
- * Called whenever a new socket is created by accept(2) (i.e. a TCP
- * style socket) or when a socket is 'peeled off' e.g userspace
- * calls sctp_peeloff(3).
- * @ep pointer to current sctp endpoint structure.
- * @sk pointer to current sock structure.
- * @sk pointer to new sock structure.
- *
- * Security hooks for Infiniband
- *
- * @ib_pkey_access:
- * Check permission to access a pkey when modifing a QP.
- * @subnet_prefix the subnet prefix of the port being used.
- * @pkey the pkey to be accessed.
- * @sec pointer to a security structure.
- * @ib_endport_manage_subnet:
- * Check permissions to send and receive SMPs on a end port.
- * @dev_name the IB device name (i.e. mlx4_0).
- * @port_num the port number.
- * @sec pointer to a security structure.
- * @ib_alloc_security:
- * Allocate a security structure for Infiniband objects.
- * @sec pointer to a security structure pointer.
- * Returns 0 on success, non-zero on failure
- * @ib_free_security:
- * Deallocate an Infiniband security structure.
- * @sec contains the security structure to be freed.
- *
- * Security hooks for XFRM operations.
- *
- * @xfrm_policy_alloc_security:
- * @ctxp is a pointer to the xfrm_sec_ctx being added to Security Policy
- * Database used by the XFRM system.
- * @sec_ctx contains the security context information being provided by
- * the user-level policy update program (e.g., setkey).
- * Allocate a security structure to the xp->security field; the security
- * field is initialized to NULL when the xfrm_policy is allocated.
- * Return 0 if operation was successful (memory to allocate, legal context)
- * @gfp is to specify the context for the allocation
- * @xfrm_policy_clone_security:
- * @old_ctx contains an existing xfrm_sec_ctx.
- * @new_ctxp contains a new xfrm_sec_ctx being cloned from old.
- * Allocate a security structure in new_ctxp that contains the
- * information from the old_ctx structure.
- * Return 0 if operation was successful (memory to allocate).
- * @xfrm_policy_free_security:
- * @ctx contains the xfrm_sec_ctx
- * Deallocate xp->security.
- * @xfrm_policy_delete_security:
- * @ctx contains the xfrm_sec_ctx.
- * Authorize deletion of xp->security.
- * @xfrm_state_alloc:
- * @x contains the xfrm_state being added to the Security Association
- * Database by the XFRM system.
- * @sec_ctx contains the security context information being provided by
- * the user-level SA generation program (e.g., setkey or racoon).
- * Allocate a security structure to the x->security field; the security
- * field is initialized to NULL when the xfrm_state is allocated. Set the
- * context to correspond to sec_ctx. Return 0 if operation was successful
- * (memory to allocate, legal context).
- * @xfrm_state_alloc_acquire:
- * @x contains the xfrm_state being added to the Security Association
- * Database by the XFRM system.
- * @polsec contains the policy's security context.
- * @secid contains the secid from which to take the mls portion of the
- * context.
- * Allocate a security structure to the x->security field; the security
- * field is initialized to NULL when the xfrm_state is allocated. Set the
- * context to correspond to secid. Return 0 if operation was successful
- * (memory to allocate, legal context).
- * @xfrm_state_free_security:
- * @x contains the xfrm_state.
- * Deallocate x->security.
- * @xfrm_state_delete_security:
- * @x contains the xfrm_state.
- * Authorize deletion of x->security.
- * @xfrm_policy_lookup:
- * @ctx contains the xfrm_sec_ctx for which the access control is being
- * checked.
- * @fl_secid contains the flow security label that is used to authorize
- * access to the policy xp.
- * @dir contains the direction of the flow (input or output).
- * Check permission when a flow selects a xfrm_policy for processing
- * XFRMs on a packet. The hook is called when selecting either a
- * per-socket policy or a generic xfrm policy.
- * Return 0 if permission is granted, -ESRCH otherwise, or -errno
- * on other errors.
- * @xfrm_state_pol_flow_match:
- * @x contains the state to match.
- * @xp contains the policy to check for a match.
- * @flic contains the flowi_common struct to check for a match.
- * Return 1 if there is a match.
- * @xfrm_decode_session:
- * @skb points to skb to decode.
- * @secid points to the flow key secid to set.
- * @ckall says if all xfrms used should be checked for same secid.
- * Return 0 if ckall is zero or all xfrms used have the same secid.
- *
- * Security hooks affecting all Key Management operations
- *
- * @key_alloc:
- * Permit allocation of a key and assign security data. Note that key does
- * not have a serial number assigned at this point.
- * @key points to the key.
- * @flags is the allocation flags
- * Return 0 if permission is granted, -ve error otherwise.
- * @key_free:
- * Notification of destruction; free security data.
- * @key points to the key.
- * No return value.
- * @key_permission:
- * See whether a specific operational right is granted to a process on a
- * key.
- * @key_ref refers to the key (key pointer + possession attribute bit).
- * @cred points to the credentials to provide the context against which to
- * evaluate the security data on the key.
- * @perm describes the combination of permissions required of this key.
- * Return 0 if permission is granted, -ve error otherwise.
- * @key_getsecurity:
- * Get a textual representation of the security context attached to a key
- * for the purposes of honouring KEYCTL_GETSECURITY. This function
- * allocates the storage for the NUL-terminated string and the caller
- * should free it.
- * @key points to the key to be queried.
- * @_buffer points to a pointer that should be set to point to the
- * resulting string (if no label or an error occurs).
- * Return the length of the string (including terminating NUL) or -ve if
- * an error.
- * May also return 0 (and a NULL buffer pointer) if there is no label.
- *
- * Security hooks affecting all System V IPC operations.
- *
- * @ipc_permission:
- * Check permissions for access to IPC
- * @ipcp contains the kernel IPC permission structure
- * @flag contains the desired (requested) permission set
- * Return 0 if permission is granted.
- * @ipc_getsecid:
- * Get the secid associated with the ipc object.
- * @ipcp contains the kernel IPC permission structure.
- * @secid contains a pointer to the location where result will be saved.
- * In case of failure, @secid will be set to zero.
- *
- * Security hooks for individual messages held in System V IPC message queues
- *
- * @msg_msg_alloc_security:
- * Allocate and attach a security structure to the msg->security field.
- * The security field is initialized to NULL when the structure is first
- * created.
- * @msg contains the message structure to be modified.
- * Return 0 if operation was successful and permission is granted.
- * @msg_msg_free_security:
- * Deallocate the security structure for this message.
- * @msg contains the message structure to be modified.
- *
- * Security hooks for System V IPC Message Queues
- *
- * @msg_queue_alloc_security:
- * Allocate and attach a security structure to the
- * @perm->security field. The security field is initialized to
- * NULL when the structure is first created.
- * @perm contains the IPC permissions of the message queue.
- * Return 0 if operation was successful and permission is granted.
- * @msg_queue_free_security:
- * Deallocate security field @perm->security for the message queue.
- * @perm contains the IPC permissions of the message queue.
- * @msg_queue_associate:
- * Check permission when a message queue is requested through the
- * msgget system call. This hook is only called when returning the
- * message queue identifier for an existing message queue, not when a
- * new message queue is created.
- * @perm contains the IPC permissions of the message queue.
- * @msqflg contains the operation control flags.
- * Return 0 if permission is granted.
- * @msg_queue_msgctl:
- * Check permission when a message control operation specified by @cmd
- * is to be performed on the message queue with permissions @perm.
- * The @perm may be NULL, e.g. for IPC_INFO or MSG_INFO.
- * @perm contains the IPC permissions of the msg queue. May be NULL.
- * @cmd contains the operation to be performed.
- * Return 0 if permission is granted.
- * @msg_queue_msgsnd:
- * Check permission before a message, @msg, is enqueued on the message
- * queue with permissions @perm.
- * @perm contains the IPC permissions of the message queue.
- * @msg contains the message to be enqueued.
- * @msqflg contains operational flags.
- * Return 0 if permission is granted.
- * @msg_queue_msgrcv:
- * Check permission before a message, @msg, is removed from the message
- * queue. The @target task structure contains a pointer to the
- * process that will be receiving the message (not equal to the current
- * process when inline receives are being performed).
- * @perm contains the IPC permissions of the message queue.
- * @msg contains the message destination.
- * @target contains the task structure for recipient process.
- * @type contains the type of message requested.
- * @mode contains the operational flags.
- * Return 0 if permission is granted.
- *
- * Security hooks for System V Shared Memory Segments
- *
- * @shm_alloc_security:
- * Allocate and attach a security structure to the @perm->security
- * field. The security field is initialized to NULL when the structure is
- * first created.
- * @perm contains the IPC permissions of the shared memory structure.
- * Return 0 if operation was successful and permission is granted.
- * @shm_free_security:
- * Deallocate the security structure @perm->security for the memory segment.
- * @perm contains the IPC permissions of the shared memory structure.
- * @shm_associate:
- * Check permission when a shared memory region is requested through the
- * shmget system call. This hook is only called when returning the shared
- * memory region identifier for an existing region, not when a new shared
- * memory region is created.
- * @perm contains the IPC permissions of the shared memory structure.
- * @shmflg contains the operation control flags.
- * Return 0 if permission is granted.
- * @shm_shmctl:
- * Check permission when a shared memory control operation specified by
- * @cmd is to be performed on the shared memory region with permissions @perm.
- * The @perm may be NULL, e.g. for IPC_INFO or SHM_INFO.
- * @perm contains the IPC permissions of the shared memory structure.
- * @cmd contains the operation to be performed.
- * Return 0 if permission is granted.
- * @shm_shmat:
- * Check permissions prior to allowing the shmat system call to attach the
- * shared memory segment with permissions @perm to the data segment of the
- * calling process. The attaching address is specified by @shmaddr.
- * @perm contains the IPC permissions of the shared memory structure.
- * @shmaddr contains the address to attach memory region to.
- * @shmflg contains the operational flags.
- * Return 0 if permission is granted.
- *
- * Security hooks for System V Semaphores
- *
- * @sem_alloc_security:
- * Allocate and attach a security structure to the @perm->security
- * field. The security field is initialized to NULL when the structure is
- * first created.
- * @perm contains the IPC permissions of the semaphore.
- * Return 0 if operation was successful and permission is granted.
- * @sem_free_security:
- * Deallocate security structure @perm->security for the semaphore.
- * @perm contains the IPC permissions of the semaphore.
- * @sem_associate:
- * Check permission when a semaphore is requested through the semget
- * system call. This hook is only called when returning the semaphore
- * identifier for an existing semaphore, not when a new one must be
- * created.
- * @perm contains the IPC permissions of the semaphore.
- * @semflg contains the operation control flags.
- * Return 0 if permission is granted.
- * @sem_semctl:
- * Check permission when a semaphore operation specified by @cmd is to be
- * performed on the semaphore. The @perm may be NULL, e.g. for
- * IPC_INFO or SEM_INFO.
- * @perm contains the IPC permissions of the semaphore. May be NULL.
- * @cmd contains the operation to be performed.
- * Return 0 if permission is granted.
- * @sem_semop:
- * Check permissions before performing operations on members of the
- * semaphore set. If the @alter flag is nonzero, the semaphore set
- * may be modified.
- * @perm contains the IPC permissions of the semaphore.
- * @sops contains the operations to perform.
- * @nsops contains the number of operations to perform.
- * @alter contains the flag indicating whether changes are to be made.
- * Return 0 if permission is granted.
- *
- * @binder_set_context_mgr:
- * Check whether @mgr is allowed to be the binder context manager.
- * @mgr contains the task_struct for the task being registered.
- * Return 0 if permission is granted.
- * @binder_transaction:
- * Check whether @from is allowed to invoke a binder transaction call
- * to @to.
- * @from contains the task_struct for the sending task.
- * @to contains the task_struct for the receiving task.
- * @binder_transfer_binder:
- * Check whether @from is allowed to transfer a binder reference to @to.
- * @from contains the task_struct for the sending task.
- * @to contains the task_struct for the receiving task.
- * @binder_transfer_file:
- * Check whether @from is allowed to transfer @file to @to.
- * @from contains the task_struct for the sending task.
- * @file contains the struct file being transferred.
- * @to contains the task_struct for the receiving task.
- *
- * @ptrace_access_check:
- * Check permission before allowing the current process to trace the
- * @child process.
- * Security modules may also want to perform a process tracing check
- * during an execve in the set_security or apply_creds hooks of
- * tracing check during an execve in the bprm_set_creds hook of
- * binprm_security_ops if the process is being traced and its security
- * attributes would be changed by the execve.
- * @child contains the task_struct structure for the target process.
- * @mode contains the PTRACE_MODE flags indicating the form of access.
- * Return 0 if permission is granted.
- * @ptrace_traceme:
- * Check that the @parent process has sufficient permission to trace the
- * current process before allowing the current process to present itself
- * to the @parent process for tracing.
- * @parent contains the task_struct structure for debugger process.
- * Return 0 if permission is granted.
- * @capget:
- * Get the @effective, @inheritable, and @permitted capability sets for
- * the @target process. The hook may also perform permission checking to
- * determine if the current process is allowed to see the capability sets
- * of the @target process.
- * @target contains the task_struct structure for target process.
- * @effective contains the effective capability set.
- * @inheritable contains the inheritable capability set.
- * @permitted contains the permitted capability set.
- * Return 0 if the capability sets were successfully obtained.
- * @capset:
- * Set the @effective, @inheritable, and @permitted capability sets for
- * the current process.
- * @new contains the new credentials structure for target process.
- * @old contains the current credentials structure for target process.
- * @effective contains the effective capability set.
- * @inheritable contains the inheritable capability set.
- * @permitted contains the permitted capability set.
- * Return 0 and update @new if permission is granted.
- * @capable:
- * Check whether the @tsk process has the @cap capability in the indicated
- * credentials.
- * @cred contains the credentials to use.
- * @ns contains the user namespace we want the capability in
- * @cap contains the capability <include/linux/capability.h>.
- * @opts contains options for the capable check <include/linux/security.h>
- * Return 0 if the capability is granted for @tsk.
- * @quotactl:
- * Check whether the quotactl syscall is allowed for this @sb.
- * @quota_on:
- * Check whether QUOTAON is allowed for this @dentry.
- * @syslog:
- * Check permission before accessing the kernel message ring or changing
- * logging to the console.
- * See the syslog(2) manual page for an explanation of the @type values.
- * @type contains the SYSLOG_ACTION_* constant from <include/linux/syslog.h>
- * Return 0 if permission is granted.
- * @settime:
- * Check permission to change the system time.
- * struct timespec64 is defined in <include/linux/time64.h> and timezone
- * is defined in <include/linux/time.h>
- * @ts contains new time
- * @tz contains new timezone
- * Return 0 if permission is granted.
- * @vm_enough_memory:
- * Check permissions for allocating a new virtual mapping.
- * @mm contains the mm struct it is being added to.
- * @pages contains the number of pages.
- * Return 0 if permission is granted.
- *
- * @ismaclabel:
- * Check if the extended attribute specified by @name
- * represents a MAC label. Returns 1 if name is a MAC
- * attribute otherwise returns 0.
- * @name full extended attribute name to check against
- * LSM as a MAC label.
- *
- * @secid_to_secctx:
- * Convert secid to security context. If secdata is NULL the length of
- * the result will be returned in seclen, but no secdata will be returned.
- * This does mean that the length could change between calls to check the
- * length and the next call which actually allocates and returns the
- * secdata.
- * @secid contains the security ID.
- * @secdata contains the pointer that stores the converted security
- * context.
- * @seclen pointer which contains the length of the data
- * @secctx_to_secid:
- * Convert security context to secid.
- * @secid contains the pointer to the generated security ID.
- * @secdata contains the security context.
- *
- * @release_secctx:
- * Release the security context.
- * @secdata contains the security context.
- * @seclen contains the length of the security context.
- *
- * Security hooks for Audit
- *
- * @audit_rule_init:
- * Allocate and initialize an LSM audit rule structure.
- * @field contains the required Audit action.
- * Fields flags are defined in <include/linux/audit.h>
- * @op contains the operator the rule uses.
- * @rulestr contains the context where the rule will be applied to.
- * @lsmrule contains a pointer to receive the result.
- * Return 0 if @lsmrule has been successfully set,
- * -EINVAL in case of an invalid rule.
- *
- * @audit_rule_known:
- * Specifies whether given @krule contains any fields related to
- * current LSM.
- * @krule contains the audit rule of interest.
- * Return 1 in case of relation found, 0 otherwise.
- *
- * @audit_rule_match:
- * Determine if given @secid matches a rule previously approved
- * by @audit_rule_known.
- * @secid contains the security id in question.
- * @field contains the field which relates to current LSM.
- * @op contains the operator that will be used for matching.
- * @lrule points to the audit rule that will be checked against.
- * Return 1 if secid matches the rule, 0 if it does not, -ERRNO on failure.
- *
- * @audit_rule_free:
- * Deallocate the LSM audit rule structure previously allocated by
- * audit_rule_init.
- * @lsmrule contains the allocated rule
- *
- * @inode_invalidate_secctx:
- * Notify the security module that it must revalidate the security context
- * of an inode.
- *
- * @inode_notifysecctx:
- * Notify the security module of what the security context of an inode
- * should be. Initializes the incore security context managed by the
- * security module for this inode. Example usage: NFS client invokes
- * this hook to initialize the security context in its incore inode to the
- * value provided by the server for the file when the server returned the
- * file's attributes to the client.
- * Must be called with inode->i_mutex locked.
- * @inode we wish to set the security context of.
- * @ctx contains the string which we wish to set in the inode.
- * @ctxlen contains the length of @ctx.
- *
- * @inode_setsecctx:
- * Change the security context of an inode. Updates the
- * incore security context managed by the security module and invokes the
- * fs code as needed (via __vfs_setxattr_noperm) to update any backing
- * xattrs that represent the context. Example usage: NFS server invokes
- * this hook to change the security context in its incore inode and on the
- * backing filesystem to a value provided by the client on a SETATTR
- * operation.
- * Must be called with inode->i_mutex locked.
- * @dentry contains the inode we wish to set the security context of.
- * @ctx contains the string which we wish to set in the inode.
- * @ctxlen contains the length of @ctx.
- *
- * @inode_getsecctx:
- * On success, returns 0 and fills out @ctx and @ctxlen with the security
- * context for the given @inode.
- * @inode we wish to get the security context of.
- * @ctx is a pointer in which to place the allocated security context.
- * @ctxlen points to the place to put the length of @ctx.
- *
- * Security hooks for the general notification queue:
- *
- * @post_notification:
- * Check to see if a watch notification can be posted to a particular
- * queue.
- * @w_cred: The credentials of the whoever set the watch.
- * @cred: The event-triggerer's credentials
- * @n: The notification being posted
- *
- * @watch_key:
- * Check to see if a process is allowed to watch for event notifications
- * from a key or keyring.
- * @key: The key to watch.
- *
- * Security hooks for using the eBPF maps and programs functionalities through
- * eBPF syscalls.
- *
- * @bpf:
- * Do a initial check for all bpf syscalls after the attribute is copied
- * into the kernel. The actual security module can implement their own
- * rules to check the specific cmd they need.
- *
- * @bpf_map:
- * Do a check when the kernel generate and return a file descriptor for
- * eBPF maps.
- *
- * @map: bpf map that we want to access
- * @mask: the access flags
- *
- * @bpf_prog:
- * Do a check when the kernel generate and return a file descriptor for
- * eBPF programs.
- *
- * @prog: bpf prog that userspace want to use.
- *
- * @bpf_map_alloc_security:
- * Initialize the security field inside bpf map.
- *
- * @bpf_map_free_security:
- * Clean up the security information stored inside bpf map.
- *
- * @bpf_prog_alloc_security:
- * Initialize the security field inside bpf program.
- *
- * @bpf_prog_free_security:
- * Clean up the security information stored inside bpf prog.
- *
- * @locked_down:
- * Determine whether a kernel feature that potentially enables arbitrary
- * code execution in kernel space should be permitted.
- *
- * @what: kernel feature being accessed
- *
- * Security hooks for perf events
- *
- * @perf_event_open:
- * Check whether the @type of perf_event_open syscall is allowed.
- * @perf_event_alloc:
- * Allocate and save perf_event security info.
- * @perf_event_free:
- * Release (free) perf_event security info.
- * @perf_event_read:
- * Read perf_event security info if allowed.
- * @perf_event_write:
- * Write perf_event security info if allowed.
- */
union security_list_options {
#define LSM_HOOK(RET, DEFAULT, NAME, ...) RET (*NAME)(__VA_ARGS__);
#include "lsm_hook_defs.h"
#undef LSM_HOOK
+ void *lsm_func_addr;
};
-struct security_hook_heads {
- #define LSM_HOOK(RET, DEFAULT, NAME, ...) struct hlist_head NAME;
- #include "lsm_hook_defs.h"
- #undef LSM_HOOK
+/*
+ * @key: static call key as defined by STATIC_CALL_KEY
+ * @trampoline: static call trampoline as defined by STATIC_CALL_TRAMP
+ * @hl: The security_hook_list as initialized by the owning LSM.
+ * @active: Enabled when the static call has an LSM hook associated.
+ */
+struct lsm_static_call {
+ struct static_call_key *key;
+ void *trampoline;
+ struct security_hook_list *hl;
+ /* this needs to be true or false based on what the key defaults to */
+ struct static_key_false *active;
} __randomize_layout;
/*
+ * Table of the static calls for each LSM hook.
+ * Once the LSMs are initialized, their callbacks will be copied to these
+ * tables such that the calls are filled backwards (from last to first).
+ * This way, we can jump directly to the first used static call, and execute
+ * all of them after. This essentially makes the entry point
+ * dynamic to adapt the number of static calls to the number of callbacks.
+ */
+struct lsm_static_calls_table {
+ #define LSM_HOOK(RET, DEFAULT, NAME, ...) \
+ struct lsm_static_call NAME[MAX_LSM_COUNT];
+ #include <linux/lsm_hook_defs.h>
+ #undef LSM_HOOK
+} __packed __randomize_layout;
+
+/**
+ * struct lsm_id - Identify a Linux Security Module.
+ * @lsm: name of the LSM, must be approved by the LSM maintainers
+ * @id: LSM ID number from uapi/linux/lsm.h
+ *
+ * Contains the information that identifies the LSM.
+ */
+struct lsm_id {
+ const char *name;
+ u64 id;
+};
+
+/*
* Security module hook list structure.
* For use with generic list macros for common operations.
+ *
+ * struct security_hook_list - Contents of a cacheable, mappable object.
+ * @scalls: The beginning of the array of static calls assigned to this hook.
+ * @hook: The callback for the hook.
+ * @lsm: The name of the lsm that owns this hook.
*/
struct security_hook_list {
- struct hlist_node list;
- struct hlist_head *head;
- union security_list_options hook;
- char *lsm;
+ struct lsm_static_call *scalls;
+ union security_list_options hook;
+ const struct lsm_id *lsmid;
} __randomize_layout;
/*
* Security blob size or offset data.
*/
struct lsm_blob_sizes {
- int lbs_cred;
- int lbs_file;
- int lbs_inode;
- int lbs_superblock;
- int lbs_ipc;
- int lbs_msg_msg;
- int lbs_task;
+ unsigned int lbs_cred;
+ unsigned int lbs_file;
+ unsigned int lbs_ib;
+ unsigned int lbs_inode;
+ unsigned int lbs_sock;
+ unsigned int lbs_superblock;
+ unsigned int lbs_ipc;
+ unsigned int lbs_key;
+ unsigned int lbs_msg_msg;
+ unsigned int lbs_perf_event;
+ unsigned int lbs_task;
+ unsigned int lbs_xattr_count; /* num xattr slots in new_xattrs array */
+ unsigned int lbs_tun_dev;
+ unsigned int lbs_bdev;
+ unsigned int lbs_bpf_map;
+ unsigned int lbs_bpf_prog;
+ unsigned int lbs_bpf_token;
};
/*
@@ -1606,14 +133,14 @@ struct lsm_blob_sizes {
* care of the common case and reduces the amount of
* text involved.
*/
-#define LSM_HOOK_INIT(HEAD, HOOK) \
- { .head = &security_hook_heads.HEAD, .hook = { .HEAD = HOOK } }
-
-extern struct security_hook_heads security_hook_heads;
-extern char *lsm_names;
+#define LSM_HOOK_INIT(NAME, HOOK) \
+ { \
+ .scalls = static_calls_table.NAME, \
+ .hook = { .NAME = HOOK } \
+ }
extern void security_add_hooks(struct security_hook_list *hooks, int count,
- char *lsm);
+ const struct lsm_id *lsmid);
#define LSM_FLAG_LEGACY_MAJOR BIT(0)
#define LSM_FLAG_EXCLUSIVE BIT(1)
@@ -1621,20 +148,41 @@ extern void security_add_hooks(struct security_hook_list *hooks, int count,
enum lsm_order {
LSM_ORDER_FIRST = -1, /* This is only for capabilities. */
LSM_ORDER_MUTABLE = 0,
+ LSM_ORDER_LAST = 1, /* This is only for integrity. */
};
+/**
+ * struct lsm_info - Define an individual LSM for the LSM framework.
+ * @id: LSM name/ID info
+ * @order: ordering with respect to other LSMs, optional
+ * @flags: descriptive flags, optional
+ * @blobs: LSM blob sharing, optional
+ * @enabled: controlled by CONFIG_LSM, optional
+ * @init: LSM specific initialization routine
+ * @initcall_pure: LSM callback for initcall_pure() setup, optional
+ * @initcall_early: LSM callback for early_initcall setup, optional
+ * @initcall_core: LSM callback for core_initcall() setup, optional
+ * @initcall_subsys: LSM callback for subsys_initcall() setup, optional
+ * @initcall_fs: LSM callback for fs_initcall setup, optional
+ * @nitcall_device: LSM callback for device_initcall() setup, optional
+ * @initcall_late: LSM callback for late_initcall() setup, optional
+ */
struct lsm_info {
- const char *name; /* Required. */
- enum lsm_order order; /* Optional: default is LSM_ORDER_MUTABLE */
- unsigned long flags; /* Optional: flags describing LSM */
- int *enabled; /* Optional: controlled by CONFIG_LSM */
- int (*init)(void); /* Required. */
- struct lsm_blob_sizes *blobs; /* Optional: for blob sharing. */
+ const struct lsm_id *id;
+ enum lsm_order order;
+ unsigned long flags;
+ struct lsm_blob_sizes *blobs;
+ int *enabled;
+ int (*init)(void);
+ int (*initcall_pure)(void);
+ int (*initcall_early)(void);
+ int (*initcall_core)(void);
+ int (*initcall_subsys)(void);
+ int (*initcall_fs)(void);
+ int (*initcall_device)(void);
+ int (*initcall_late)(void);
};
-extern struct lsm_info __start_lsm_info[], __end_lsm_info[];
-extern struct lsm_info __start_early_lsm_info[], __end_early_lsm_info[];
-
#define DEFINE_LSM(lsm) \
static struct lsm_info __lsm_##lsm \
__used __section(".lsm_info.init") \
@@ -1645,36 +193,26 @@ extern struct lsm_info __start_early_lsm_info[], __end_early_lsm_info[];
__used __section(".early_lsm_info.init") \
__aligned(sizeof(unsigned long))
-#ifdef CONFIG_SECURITY_SELINUX_DISABLE
-/*
- * Assuring the safety of deleting a security module is up to
- * the security module involved. This may entail ordering the
- * module's hook list in a particular way, refusing to disable
- * the module once a policy is loaded or any number of other
- * actions better imagined than described.
+
+/* DO NOT tamper with these variables outside of the LSM framework */
+extern struct lsm_static_calls_table static_calls_table __ro_after_init;
+
+/**
+ * lsm_get_xattr_slot - Return the next available slot and increment the index
+ * @xattrs: array storing LSM-provided xattrs
+ * @xattr_count: number of already stored xattrs (updated)
+ *
+ * Retrieve the first available slot in the @xattrs array to fill with an xattr,
+ * and increment @xattr_count.
*
- * The name of the configuration option reflects the only module
- * that currently uses the mechanism. Any developer who thinks
- * disabling their module is a good idea needs to be at least as
- * careful as the SELinux team.
+ * Return: The slot to fill in @xattrs if non-NULL, NULL otherwise.
*/
-static inline void security_delete_hooks(struct security_hook_list *hooks,
- int count)
+static inline struct xattr *lsm_get_xattr_slot(struct xattr *xattrs,
+ int *xattr_count)
{
- int i;
-
- for (i = 0; i < count; i++)
- hlist_del_rcu(&hooks[i].list);
+ if (unlikely(!xattrs))
+ return NULL;
+ return &xattrs[(*xattr_count)++];
}
-#endif /* CONFIG_SECURITY_SELINUX_DISABLE */
-
-/* Currently required to handle SELinux runtime hook disable. */
-#ifdef CONFIG_SECURITY_WRITABLE_HOOKS
-#define __lsm_ro_after_init
-#else
-#define __lsm_ro_after_init __ro_after_init
-#endif /* CONFIG_SECURITY_WRITABLE_HOOKS */
-
-extern int lsm_inode_alloc(struct inode *inode);
#endif /* ! __LINUX_LSM_HOOKS_H */
diff --git a/include/linux/lwq.h b/include/linux/lwq.h
new file mode 100644
index 000000000000..d081d5cf8e33
--- /dev/null
+++ b/include/linux/lwq.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef LWQ_H
+#define LWQ_H
+/*
+ * Light-weight single-linked queue built from llist
+ *
+ * Entries can be enqueued from any context with no locking.
+ * Entries can be dequeued from process context with integrated locking.
+ *
+ * This is particularly suitable when work items are queued in
+ * BH or IRQ context, and where work items are handled one at a time
+ * by dedicated threads.
+ */
+#include <linux/container_of.h>
+#include <linux/spinlock.h>
+#include <linux/llist.h>
+
+struct lwq_node {
+ struct llist_node node;
+};
+
+struct lwq {
+ spinlock_t lock;
+ struct llist_node *ready; /* entries to be dequeued */
+ struct llist_head new; /* entries being enqueued */
+};
+
+/**
+ * lwq_init - initialise a lwq
+ * @q: the lwq object
+ */
+static inline void lwq_init(struct lwq *q)
+{
+ spin_lock_init(&q->lock);
+ q->ready = NULL;
+ init_llist_head(&q->new);
+}
+
+/**
+ * lwq_empty - test if lwq contains any entry
+ * @q: the lwq object
+ *
+ * This empty test contains an acquire barrier so that if a wakeup
+ * is sent when lwq_dequeue returns true, it is safe to go to sleep after
+ * a test on lwq_empty().
+ */
+static inline bool lwq_empty(struct lwq *q)
+{
+ /* acquire ensures ordering wrt lwq_enqueue() */
+ return smp_load_acquire(&q->ready) == NULL && llist_empty(&q->new);
+}
+
+struct llist_node *__lwq_dequeue(struct lwq *q);
+/**
+ * lwq_dequeue - dequeue first (oldest) entry from lwq
+ * @q: the queue to dequeue from
+ * @type: the type of object to return
+ * @member: them member in returned object which is an lwq_node.
+ *
+ * Remove a single object from the lwq and return it. This will take
+ * a spinlock and so must always be called in the same context, typcially
+ * process contet.
+ */
+#define lwq_dequeue(q, type, member) \
+ ({ struct llist_node *_n = __lwq_dequeue(q); \
+ _n ? container_of(_n, type, member.node) : NULL; })
+
+struct llist_node *lwq_dequeue_all(struct lwq *q);
+
+/**
+ * lwq_for_each_safe - iterate over detached queue allowing deletion
+ * @_n: iterator variable
+ * @_t1: temporary struct llist_node **
+ * @_t2: temporary struct llist_node *
+ * @_l: address of llist_node pointer from lwq_dequeue_all()
+ * @_member: member in _n where lwq_node is found.
+ *
+ * Iterate over members in a dequeued list. If the iterator variable
+ * is set to NULL, the iterator removes that entry from the queue.
+ */
+#define lwq_for_each_safe(_n, _t1, _t2, _l, _member) \
+ for (_t1 = (_l); \
+ *(_t1) ? (_n = container_of(*(_t1), typeof(*(_n)), _member.node),\
+ _t2 = ((*_t1)->next), \
+ true) \
+ : false; \
+ (_n) ? (_t1 = &(_n)->_member.node.next, 0) \
+ : ((*(_t1) = (_t2)), 0))
+
+/**
+ * lwq_enqueue - add a new item to the end of the queue
+ * @n - the lwq_node embedded in the item to be added
+ * @q - the lwq to append to.
+ *
+ * No locking is needed to append to the queue so this can
+ * be called from any context.
+ * Return %true is the list may have previously been empty.
+ */
+static inline bool lwq_enqueue(struct lwq_node *n, struct lwq *q)
+{
+ /* acquire enqures ordering wrt lwq_dequeue */
+ return llist_add(&n->node, &q->new) &&
+ smp_load_acquire(&q->ready) == NULL;
+}
+
+/**
+ * lwq_enqueue_batch - add a list of new items to the end of the queue
+ * @n - the lwq_node embedded in the first item to be added
+ * @q - the lwq to append to.
+ *
+ * No locking is needed to append to the queue so this can
+ * be called from any context.
+ * Return %true is the list may have previously been empty.
+ */
+static inline bool lwq_enqueue_batch(struct llist_node *n, struct lwq *q)
+{
+ struct llist_node *e = n;
+
+ /* acquire enqures ordering wrt lwq_dequeue */
+ return llist_add_batch(llist_reverse_order(n), e, &q->new) &&
+ smp_load_acquire(&q->ready) == NULL;
+}
+#endif /* LWQ_H */
diff --git a/include/linux/lz4.h b/include/linux/lz4.h
index b16e15b9587a..ad6042a718b5 100644
--- a/include/linux/lz4.h
+++ b/include/linux/lz4.h
@@ -645,4 +645,10 @@ int LZ4_decompress_safe_usingDict(const char *source, char *dest,
int LZ4_decompress_fast_usingDict(const char *source, char *dest,
int originalSize, const char *dictStart, int dictSize);
+#define LZ4_DECOMPRESS_INPLACE_MARGIN(compressedSize) (((compressedSize) >> 8) + 32)
+
+#ifndef LZ4_DISTANCE_MAX /* history window size; can be user-defined at compile time */
+#define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
+#endif
+
#endif
diff --git a/include/linux/lzo.h b/include/linux/lzo.h
index e95c7d1092b2..4d30e3624acd 100644
--- a/include/linux/lzo.h
+++ b/include/linux/lzo.h
@@ -24,10 +24,18 @@
int lzo1x_1_compress(const unsigned char *src, size_t src_len,
unsigned char *dst, size_t *dst_len, void *wrkmem);
+/* Same as above but does not write more than dst_len to dst. */
+int lzo1x_1_compress_safe(const unsigned char *src, size_t src_len,
+ unsigned char *dst, size_t *dst_len, void *wrkmem);
+
/* This requires 'wrkmem' of size LZO1X_1_MEM_COMPRESS */
int lzorle1x_1_compress(const unsigned char *src, size_t src_len,
unsigned char *dst, size_t *dst_len, void *wrkmem);
+/* Same as above but does not write more than dst_len to dst. */
+int lzorle1x_1_compress_safe(const unsigned char *src, size_t src_len,
+ unsigned char *dst, size_t *dst_len, void *wrkmem);
+
/* safe decompression with overrun testing */
int lzo1x_decompress_safe(const unsigned char *src, size_t src_len,
unsigned char *dst, size_t *dst_len);
diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h
index a7330eb3ec64..7aab4a769736 100644
--- a/include/linux/mISDNif.h
+++ b/include/linux/mISDNif.h
@@ -18,7 +18,6 @@
#ifndef mISDNIF_H
#define mISDNIF_H
-#include <stdarg.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/socket.h>
@@ -587,7 +586,7 @@ extern struct mISDNclock *mISDN_register_clock(char *, int, clockctl_func_t *,
void *);
extern void mISDN_unregister_clock(struct mISDNclock *);
-static inline struct mISDNdevice *dev_to_mISDN(struct device *dev)
+static inline struct mISDNdevice *dev_to_mISDN(const struct device *dev)
{
if (dev)
return dev_get_drvdata(dev);
diff --git a/include/linux/mailbox/exynos-message.h b/include/linux/mailbox/exynos-message.h
new file mode 100644
index 000000000000..5a9ed5ce2046
--- /dev/null
+++ b/include/linux/mailbox/exynos-message.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Exynos mailbox message.
+ *
+ * Copyright 2024 Linaro Ltd.
+ */
+
+#ifndef _LINUX_EXYNOS_MESSAGE_H_
+#define _LINUX_EXYNOS_MESSAGE_H_
+
+#define EXYNOS_MBOX_CHAN_TYPE_DOORBELL 0
+#define EXYNOS_MBOX_CHAN_TYPE_DATA 1
+
+struct exynos_mbox_msg {
+ unsigned int chan_id;
+ unsigned int chan_type;
+};
+
+#endif /* _LINUX_EXYNOS_MESSAGE_H_ */
diff --git a/include/linux/mailbox/mchp-ipc.h b/include/linux/mailbox/mchp-ipc.h
new file mode 100644
index 000000000000..f084ac9e291b
--- /dev/null
+++ b/include/linux/mailbox/mchp-ipc.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *Copyright (c) 2024 Microchip Technology Inc. All rights reserved.
+ */
+
+#ifndef _LINUX_MCHP_IPC_H_
+#define _LINUX_MCHP_IPC_H_
+
+#include <linux/mailbox_controller.h>
+#include <linux/types.h>
+
+struct mchp_ipc_msg {
+ u32 *buf;
+ u16 size;
+};
+
+struct mchp_ipc_sbi_chan {
+ void *buf_base_tx;
+ void *buf_base_rx;
+ void *msg_buf_tx;
+ void *msg_buf_rx;
+ phys_addr_t buf_base_tx_addr;
+ phys_addr_t buf_base_rx_addr;
+ phys_addr_t msg_buf_tx_addr;
+ phys_addr_t msg_buf_rx_addr;
+ int chan_aggregated_irq;
+ int mp_irq;
+ int mc_irq;
+ u32 id;
+ u32 max_msg_size;
+};
+
+#endif /* _LINUX_MCHP_IPC_H_ */
diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h
index d5a983d65f05..e1555e06e7e5 100644
--- a/include/linux/mailbox/mtk-cmdq-mailbox.h
+++ b/include/linux/mailbox/mtk-cmdq-mailbox.h
@@ -65,21 +65,9 @@ enum cmdq_code {
CMDQ_CODE_LOGIC = 0xa0,
};
-enum cmdq_cb_status {
- CMDQ_CB_NORMAL = 0,
- CMDQ_CB_ERROR
-};
-
struct cmdq_cb_data {
- enum cmdq_cb_status sta;
- void *data;
-};
-
-typedef void (*cmdq_async_flush_cb)(struct cmdq_cb_data data);
-
-struct cmdq_task_cb {
- cmdq_async_flush_cb cb;
- void *data;
+ int sta;
+ struct cmdq_pkt *pkt;
};
struct cmdq_pkt {
@@ -87,11 +75,18 @@ struct cmdq_pkt {
dma_addr_t pa_base;
size_t cmd_buf_size; /* command occupied size */
size_t buf_size; /* real buffer size */
- struct cmdq_task_cb cb;
- struct cmdq_task_cb async_cb;
- void *cl;
};
+/**
+ * cmdq_get_shift_pa() - get the shift bits of physical address
+ * @chan: mailbox channel
+ *
+ * GCE can only fetch the command buffer address from a 32-bit register.
+ * Some SOCs support more than 32-bit command buffer address for GCE, which
+ * requires some shift bits to make the address fit into the 32-bit register.
+ *
+ * Return: the shift bits of physical address
+ */
u8 cmdq_get_shift_pa(struct mbox_chan *chan);
#endif /* __MTK_CMDQ_MAILBOX_H__ */
diff --git a/include/linux/mailbox/riscv-rpmi-message.h b/include/linux/mailbox/riscv-rpmi-message.h
new file mode 100644
index 000000000000..e135c6564d0c
--- /dev/null
+++ b/include/linux/mailbox/riscv-rpmi-message.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2025 Ventana Micro Systems Inc. */
+
+#ifndef _LINUX_RISCV_RPMI_MESSAGE_H_
+#define _LINUX_RISCV_RPMI_MESSAGE_H_
+
+#include <linux/errno.h>
+#include <linux/mailbox_client.h>
+#include <linux/types.h>
+#include <linux/wordpart.h>
+
+/* RPMI version encode/decode macros */
+#define RPMI_VER_MAJOR(__ver) upper_16_bits(__ver)
+#define RPMI_VER_MINOR(__ver) lower_16_bits(__ver)
+#define RPMI_MKVER(__maj, __min) (((u32)(__maj) << 16) | (u16)(__min))
+
+/* RPMI message header */
+struct rpmi_message_header {
+ __le16 servicegroup_id;
+ u8 service_id;
+ u8 flags;
+ __le16 datalen;
+ __le16 token;
+};
+
+/* RPMI message */
+struct rpmi_message {
+ struct rpmi_message_header header;
+ u8 data[];
+};
+
+/* RPMI notification event */
+struct rpmi_notification_event {
+ __le16 event_datalen;
+ u8 event_id;
+ u8 reserved;
+ u8 event_data[];
+};
+
+/* RPMI error codes */
+enum rpmi_error_codes {
+ RPMI_SUCCESS = 0,
+ RPMI_ERR_FAILED = -1,
+ RPMI_ERR_NOTSUPP = -2,
+ RPMI_ERR_INVALID_PARAM = -3,
+ RPMI_ERR_DENIED = -4,
+ RPMI_ERR_INVALID_ADDR = -5,
+ RPMI_ERR_ALREADY = -6,
+ RPMI_ERR_EXTENSION = -7,
+ RPMI_ERR_HW_FAULT = -8,
+ RPMI_ERR_BUSY = -9,
+ RPMI_ERR_INVALID_STATE = -10,
+ RPMI_ERR_BAD_RANGE = -11,
+ RPMI_ERR_TIMEOUT = -12,
+ RPMI_ERR_IO = -13,
+ RPMI_ERR_NO_DATA = -14,
+ RPMI_ERR_RESERVED_START = -15,
+ RPMI_ERR_RESERVED_END = -127,
+ RPMI_ERR_VENDOR_START = -128,
+};
+
+static inline int rpmi_to_linux_error(int rpmi_error)
+{
+ switch (rpmi_error) {
+ case RPMI_SUCCESS:
+ return 0;
+ case RPMI_ERR_INVALID_PARAM:
+ case RPMI_ERR_BAD_RANGE:
+ case RPMI_ERR_INVALID_STATE:
+ return -EINVAL;
+ case RPMI_ERR_DENIED:
+ return -EPERM;
+ case RPMI_ERR_INVALID_ADDR:
+ case RPMI_ERR_HW_FAULT:
+ return -EFAULT;
+ case RPMI_ERR_ALREADY:
+ return -EALREADY;
+ case RPMI_ERR_BUSY:
+ return -EBUSY;
+ case RPMI_ERR_TIMEOUT:
+ return -ETIMEDOUT;
+ case RPMI_ERR_IO:
+ return -ECOMM;
+ case RPMI_ERR_FAILED:
+ case RPMI_ERR_NOTSUPP:
+ case RPMI_ERR_NO_DATA:
+ case RPMI_ERR_EXTENSION:
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* RPMI service group IDs */
+#define RPMI_SRVGRP_SYSTEM_MSI 0x00002
+#define RPMI_SRVGRP_CLOCK 0x00008
+
+/* RPMI clock service IDs */
+enum rpmi_clock_service_id {
+ RPMI_CLK_SRV_ENABLE_NOTIFICATION = 0x01,
+ RPMI_CLK_SRV_GET_NUM_CLOCKS = 0x02,
+ RPMI_CLK_SRV_GET_ATTRIBUTES = 0x03,
+ RPMI_CLK_SRV_GET_SUPPORTED_RATES = 0x04,
+ RPMI_CLK_SRV_SET_CONFIG = 0x05,
+ RPMI_CLK_SRV_GET_CONFIG = 0x06,
+ RPMI_CLK_SRV_SET_RATE = 0x07,
+ RPMI_CLK_SRV_GET_RATE = 0x08,
+ RPMI_CLK_SRV_ID_MAX_COUNT
+};
+
+/* RPMI system MSI service IDs */
+enum rpmi_sysmsi_service_id {
+ RPMI_SYSMSI_SRV_ENABLE_NOTIFICATION = 0x01,
+ RPMI_SYSMSI_SRV_GET_ATTRIBUTES = 0x02,
+ RPMI_SYSMSI_SRV_GET_MSI_ATTRIBUTES = 0x03,
+ RPMI_SYSMSI_SRV_SET_MSI_STATE = 0x04,
+ RPMI_SYSMSI_SRV_GET_MSI_STATE = 0x05,
+ RPMI_SYSMSI_SRV_SET_MSI_TARGET = 0x06,
+ RPMI_SYSMSI_SRV_GET_MSI_TARGET = 0x07,
+ RPMI_SYSMSI_SRV_ID_MAX_COUNT
+};
+
+/* RPMI Linux mailbox attribute IDs */
+enum rpmi_mbox_attribute_id {
+ RPMI_MBOX_ATTR_SPEC_VERSION,
+ RPMI_MBOX_ATTR_MAX_MSG_DATA_SIZE,
+ RPMI_MBOX_ATTR_SERVICEGROUP_ID,
+ RPMI_MBOX_ATTR_SERVICEGROUP_VERSION,
+ RPMI_MBOX_ATTR_IMPL_ID,
+ RPMI_MBOX_ATTR_IMPL_VERSION,
+ RPMI_MBOX_ATTR_MAX_ID
+};
+
+/* RPMI Linux mailbox message types */
+enum rpmi_mbox_message_type {
+ RPMI_MBOX_MSG_TYPE_GET_ATTRIBUTE,
+ RPMI_MBOX_MSG_TYPE_SET_ATTRIBUTE,
+ RPMI_MBOX_MSG_TYPE_SEND_WITH_RESPONSE,
+ RPMI_MBOX_MSG_TYPE_SEND_WITHOUT_RESPONSE,
+ RPMI_MBOX_MSG_TYPE_NOTIFICATION_EVENT,
+ RPMI_MBOX_MSG_MAX_TYPE
+};
+
+/* RPMI Linux mailbox message instance */
+struct rpmi_mbox_message {
+ enum rpmi_mbox_message_type type;
+ union {
+ struct {
+ enum rpmi_mbox_attribute_id id;
+ u32 value;
+ } attr;
+
+ struct {
+ u32 service_id;
+ void *request;
+ unsigned long request_len;
+ void *response;
+ unsigned long max_response_len;
+ unsigned long out_response_len;
+ } data;
+
+ struct {
+ u16 event_datalen;
+ u8 event_id;
+ u8 *event_data;
+ } notif;
+ };
+ int error;
+};
+
+/* RPMI Linux mailbox message helper routines */
+static inline void rpmi_mbox_init_get_attribute(struct rpmi_mbox_message *msg,
+ enum rpmi_mbox_attribute_id id)
+{
+ msg->type = RPMI_MBOX_MSG_TYPE_GET_ATTRIBUTE;
+ msg->attr.id = id;
+ msg->attr.value = 0;
+ msg->error = 0;
+}
+
+static inline void rpmi_mbox_init_set_attribute(struct rpmi_mbox_message *msg,
+ enum rpmi_mbox_attribute_id id,
+ u32 value)
+{
+ msg->type = RPMI_MBOX_MSG_TYPE_SET_ATTRIBUTE;
+ msg->attr.id = id;
+ msg->attr.value = value;
+ msg->error = 0;
+}
+
+static inline void rpmi_mbox_init_send_with_response(struct rpmi_mbox_message *msg,
+ u32 service_id,
+ void *request,
+ unsigned long request_len,
+ void *response,
+ unsigned long max_response_len)
+{
+ msg->type = RPMI_MBOX_MSG_TYPE_SEND_WITH_RESPONSE;
+ msg->data.service_id = service_id;
+ msg->data.request = request;
+ msg->data.request_len = request_len;
+ msg->data.response = response;
+ msg->data.max_response_len = max_response_len;
+ msg->data.out_response_len = 0;
+ msg->error = 0;
+}
+
+static inline void rpmi_mbox_init_send_without_response(struct rpmi_mbox_message *msg,
+ u32 service_id,
+ void *request,
+ unsigned long request_len)
+{
+ msg->type = RPMI_MBOX_MSG_TYPE_SEND_WITHOUT_RESPONSE;
+ msg->data.service_id = service_id;
+ msg->data.request = request;
+ msg->data.request_len = request_len;
+ msg->data.response = NULL;
+ msg->data.max_response_len = 0;
+ msg->data.out_response_len = 0;
+ msg->error = 0;
+}
+
+static inline void *rpmi_mbox_get_msg_response(struct rpmi_mbox_message *msg)
+{
+ return msg ? msg->data.response : NULL;
+}
+
+static inline int rpmi_mbox_send_message(struct mbox_chan *chan,
+ struct rpmi_mbox_message *msg)
+{
+ int ret;
+
+ /* Send message for the underlying mailbox channel */
+ ret = mbox_send_message(chan, msg);
+ if (ret < 0)
+ return ret;
+
+ /* Explicitly signal txdone for mailbox channel */
+ ret = msg->error;
+ mbox_client_txdone(chan, ret);
+ return ret;
+}
+
+#endif /* _LINUX_RISCV_RPMI_MESSAGE_H_ */
diff --git a/include/linux/mailbox/zynqmp-ipi-message.h b/include/linux/mailbox/zynqmp-ipi-message.h
index 35ce84c8ca02..31d8046d945e 100644
--- a/include/linux/mailbox/zynqmp-ipi-message.h
+++ b/include/linux/mailbox/zynqmp-ipi-message.h
@@ -9,7 +9,7 @@
* @data: message payload
*
* This is the structure for data used in mbox_send_message
- * the maximum length of data buffer is fixed to 12 bytes.
+ * the maximum length of data buffer is fixed to 32 bytes.
* Client is supposed to be aware of this.
*/
struct zynqmp_ipi_message {
diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h
index 65229a45590f..c6eea9afb943 100644
--- a/include/linux/mailbox_client.h
+++ b/include/linux/mailbox_client.h
@@ -7,8 +7,8 @@
#ifndef __MAILBOX_CLIENT_H
#define __MAILBOX_CLIENT_H
-#include <linux/of.h>
#include <linux/device.h>
+#include <linux/of.h>
struct mbox_chan;
@@ -37,6 +37,7 @@ struct mbox_client {
void (*tx_done)(struct mbox_client *cl, void *mssg, int r);
};
+int mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl);
struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
const char *name);
struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index);
diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h
index 36d6ce673503..80a427c7ca29 100644
--- a/include/linux/mailbox_controller.h
+++ b/include/linux/mailbox_controller.h
@@ -3,11 +3,11 @@
#ifndef __MAILBOX_CONTROLLER_H
#define __MAILBOX_CONTROLLER_H
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/hrtimer.h>
#include <linux/of.h>
#include <linux/types.h>
-#include <linux/hrtimer.h>
-#include <linux/device.h>
-#include <linux/completion.h>
struct mbox_chan;
@@ -66,6 +66,7 @@ struct mbox_chan_ops {
* no interrupt rises. Ignored if 'txdone_irq' is set.
* @txpoll_period: If 'txdone_poll' is in effect, the API polls for
* last TX's status after these many millisecs
+ * @fw_xlate: Controller driver specific mapping of channel via fwnode
* @of_xlate: Controller driver specific mapping of channel via DT
* @poll_hrt: API private. hrtimer used to poll for TXDONE on all
* channels.
@@ -79,10 +80,13 @@ struct mbox_controller {
bool txdone_irq;
bool txdone_poll;
unsigned txpoll_period;
+ struct mbox_chan *(*fw_xlate)(struct mbox_controller *mbox,
+ const struct fwnode_reference_args *sp);
struct mbox_chan *(*of_xlate)(struct mbox_controller *mbox,
const struct of_phandle_args *sp);
/* Internal to API */
struct hrtimer poll_hrt;
+ spinlock_t poll_hrt_lock;
struct list_head node;
};
@@ -133,7 +137,4 @@ void mbox_chan_txdone(struct mbox_chan *chan, int r); /* atomic */
int devm_mbox_controller_register(struct device *dev,
struct mbox_controller *mbox);
-void devm_mbox_controller_unregister(struct device *dev,
- struct mbox_controller *mbox);
-
#endif /* __MAILBOX_CONTROLLER_H */
diff --git a/include/linux/maple.h b/include/linux/maple.h
index 9b140272ee16..3be4e567473c 100644
--- a/include/linux/maple.h
+++ b/include/linux/maple.h
@@ -5,7 +5,6 @@
#include <mach/maple.h>
struct device;
-extern struct bus_type maple_bus_type;
/* Maple Bus command and response codes */
enum maple_code {
@@ -98,7 +97,7 @@ int maple_add_packet(struct maple_device *mdev, u32 function,
void maple_clear_dev(struct maple_device *mdev);
#define to_maple_dev(n) container_of(n, struct maple_device, dev)
-#define to_maple_driver(n) container_of(n, struct maple_driver, drv)
+#define to_maple_driver(n) container_of_const(n, struct maple_driver, drv)
#define maple_get_drvdata(d) dev_get_drvdata(&(d)->dev)
#define maple_set_drvdata(d,p) dev_set_drvdata(&(d)->dev, (p))
diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
new file mode 100644
index 000000000000..66f98a3da8d8
--- /dev/null
+++ b/include/linux/maple_tree.h
@@ -0,0 +1,903 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef _LINUX_MAPLE_TREE_H
+#define _LINUX_MAPLE_TREE_H
+/*
+ * Maple Tree - An RCU-safe adaptive tree for storing ranges
+ * Copyright (c) 2018-2022 Oracle
+ * Authors: Liam R. Howlett <Liam.Howlett@Oracle.com>
+ * Matthew Wilcox <willy@infradead.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/rcupdate.h>
+#include <linux/spinlock.h>
+/* #define CONFIG_MAPLE_RCU_DISABLED */
+
+/*
+ * Allocated nodes are mutable until they have been inserted into the tree,
+ * at which time they cannot change their type until they have been removed
+ * from the tree and an RCU grace period has passed.
+ *
+ * Removed nodes have their ->parent set to point to themselves. RCU readers
+ * check ->parent before relying on the value that they loaded from the
+ * slots array. This lets us reuse the slots array for the RCU head.
+ *
+ * Nodes in the tree point to their parent unless bit 0 is set.
+ */
+#if defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64)
+/* 64bit sizes */
+#define MAPLE_NODE_SLOTS 31 /* 256 bytes including ->parent */
+#define MAPLE_RANGE64_SLOTS 16 /* 256 bytes */
+#define MAPLE_ARANGE64_SLOTS 10 /* 240 bytes */
+#define MAPLE_ALLOC_SLOTS (MAPLE_NODE_SLOTS - 1)
+#else
+/* 32bit sizes */
+#define MAPLE_NODE_SLOTS 63 /* 256 bytes including ->parent */
+#define MAPLE_RANGE64_SLOTS 32 /* 256 bytes */
+#define MAPLE_ARANGE64_SLOTS 21 /* 240 bytes */
+#define MAPLE_ALLOC_SLOTS (MAPLE_NODE_SLOTS - 2)
+#endif /* defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64) */
+
+#define MAPLE_NODE_MASK 255UL
+
+/*
+ * The node->parent of the root node has bit 0 set and the rest of the pointer
+ * is a pointer to the tree itself. No more bits are available in this pointer
+ * (on m68k, the data structure may only be 2-byte aligned).
+ *
+ * Internal non-root nodes can only have maple_range_* nodes as parents. The
+ * parent pointer is 256B aligned like all other tree nodes. When storing a 32
+ * or 64 bit values, the offset can fit into 4 bits. The 16 bit values need an
+ * extra bit to store the offset. This extra bit comes from a reuse of the last
+ * bit in the node type. This is possible by using bit 1 to indicate if bit 2
+ * is part of the type or the slot.
+ *
+ * Once the type is decided, the decision of an allocation range type or a
+ * range type is done by examining the immutable tree flag for the
+ * MT_FLAGS_ALLOC_RANGE flag.
+ *
+ * Node types:
+ * 0b??1 = Root
+ * 0b?00 = 16 bit nodes
+ * 0b010 = 32 bit nodes
+ * 0b110 = 64 bit nodes
+ *
+ * Slot size and location in the parent pointer:
+ * type : slot location
+ * 0b??1 : Root
+ * 0b?00 : 16 bit values, type in 0-1, slot in 2-6
+ * 0b010 : 32 bit values, type in 0-2, slot in 3-6
+ * 0b110 : 64 bit values, type in 0-2, slot in 3-6
+ */
+
+/*
+ * This metadata is used to optimize the gap updating code and in reverse
+ * searching for gaps or any other code that needs to find the end of the data.
+ */
+struct maple_metadata {
+ unsigned char end; /* end of data */
+ unsigned char gap; /* offset of largest gap */
+};
+
+/*
+ * Leaf nodes do not store pointers to nodes, they store user data. Users may
+ * store almost any bit pattern. As noted above, the optimisation of storing an
+ * entry at 0 in the root pointer cannot be done for data which have the bottom
+ * two bits set to '10'. We also reserve values with the bottom two bits set to
+ * '10' which are below 4096 (ie 2, 6, 10 .. 4094) for internal use. Some APIs
+ * return errnos as a negative errno shifted right by two bits and the bottom
+ * two bits set to '10', and while choosing to store these values in the array
+ * is not an error, it may lead to confusion if you're testing for an error with
+ * mas_is_err().
+ *
+ * Non-leaf nodes store the type of the node pointed to (enum maple_type in bits
+ * 3-6), bit 2 is reserved. That leaves bits 0-1 unused for now.
+ *
+ * In regular B-Tree terms, pivots are called keys. The term pivot is used to
+ * indicate that the tree is specifying ranges, Pivots may appear in the
+ * subtree with an entry attached to the value whereas keys are unique to a
+ * specific position of a B-tree. Pivot values are inclusive of the slot with
+ * the same index.
+ */
+
+struct maple_range_64 {
+ struct maple_pnode *parent;
+ unsigned long pivot[MAPLE_RANGE64_SLOTS - 1];
+ union {
+ void __rcu *slot[MAPLE_RANGE64_SLOTS];
+ struct {
+ void __rcu *pad[MAPLE_RANGE64_SLOTS - 1];
+ struct maple_metadata meta;
+ };
+ };
+};
+
+/*
+ * At tree creation time, the user can specify that they're willing to trade off
+ * storing fewer entries in a tree in return for storing more information in
+ * each node.
+ *
+ * The maple tree supports recording the largest range of NULL entries available
+ * in this node, also called gaps. This optimises the tree for allocating a
+ * range.
+ */
+struct maple_arange_64 {
+ struct maple_pnode *parent;
+ unsigned long pivot[MAPLE_ARANGE64_SLOTS - 1];
+ void __rcu *slot[MAPLE_ARANGE64_SLOTS];
+ unsigned long gap[MAPLE_ARANGE64_SLOTS];
+ struct maple_metadata meta;
+};
+
+struct maple_alloc {
+ unsigned long total;
+ unsigned char node_count;
+ unsigned int request_count;
+ struct maple_alloc *slot[MAPLE_ALLOC_SLOTS];
+};
+
+struct maple_topiary {
+ struct maple_pnode *parent;
+ struct maple_enode *next; /* Overlaps the pivot */
+};
+
+enum maple_type {
+ maple_dense,
+ maple_leaf_64,
+ maple_range_64,
+ maple_arange_64,
+};
+
+enum store_type {
+ wr_invalid,
+ wr_new_root,
+ wr_store_root,
+ wr_exact_fit,
+ wr_spanning_store,
+ wr_split_store,
+ wr_rebalance,
+ wr_append,
+ wr_node_store,
+ wr_slot_store,
+};
+
+/**
+ * DOC: Maple tree flags
+ *
+ * * MT_FLAGS_ALLOC_RANGE - Track gaps in this tree
+ * * MT_FLAGS_USE_RCU - Operate in RCU mode
+ * * MT_FLAGS_HEIGHT_OFFSET - The position of the tree height in the flags
+ * * MT_FLAGS_HEIGHT_MASK - The mask for the maple tree height value
+ * * MT_FLAGS_LOCK_MASK - How the mt_lock is used
+ * * MT_FLAGS_LOCK_IRQ - Acquired irq-safe
+ * * MT_FLAGS_LOCK_BH - Acquired bh-safe
+ * * MT_FLAGS_LOCK_EXTERN - mt_lock is not used
+ *
+ * MAPLE_HEIGHT_MAX The largest height that can be stored
+ */
+#define MT_FLAGS_ALLOC_RANGE 0x01
+#define MT_FLAGS_USE_RCU 0x02
+#define MT_FLAGS_HEIGHT_OFFSET 0x02
+#define MT_FLAGS_HEIGHT_MASK 0x7C
+#define MT_FLAGS_LOCK_MASK 0x300
+#define MT_FLAGS_LOCK_IRQ 0x100
+#define MT_FLAGS_LOCK_BH 0x200
+#define MT_FLAGS_LOCK_EXTERN 0x300
+#define MT_FLAGS_ALLOC_WRAPPED 0x0800
+
+#define MAPLE_HEIGHT_MAX 31
+
+
+#define MAPLE_NODE_TYPE_MASK 0x0F
+#define MAPLE_NODE_TYPE_SHIFT 0x03
+
+#define MAPLE_RESERVED_RANGE 4096
+
+#ifdef CONFIG_LOCKDEP
+#define mt_lock_is_held(mt) \
+ (!(mt)->ma_external_lock || lock_is_held((mt)->ma_external_lock))
+
+#define mt_write_lock_is_held(mt) \
+ (!(mt)->ma_external_lock || \
+ lock_is_held_type((mt)->ma_external_lock, 0))
+
+#define mt_set_external_lock(mt, lock) \
+ (mt)->ma_external_lock = &(lock)->dep_map
+
+#define mt_on_stack(mt) (mt).ma_external_lock = NULL
+#else
+#define mt_lock_is_held(mt) 1
+#define mt_write_lock_is_held(mt) 1
+#define mt_set_external_lock(mt, lock) do { } while (0)
+#define mt_on_stack(mt) do { } while (0)
+#endif
+
+/*
+ * If the tree contains a single entry at index 0, it is usually stored in
+ * tree->ma_root. To optimise for the page cache, an entry which ends in '00',
+ * '01' or '11' is stored in the root, but an entry which ends in '10' will be
+ * stored in a node. Bits 3-6 are used to store enum maple_type.
+ *
+ * The flags are used both to store some immutable information about this tree
+ * (set at tree creation time) and dynamic information set under the spinlock.
+ *
+ * Another use of flags are to indicate global states of the tree. This is the
+ * case with the MT_FLAGS_USE_RCU flag, which indicates the tree is currently in
+ * RCU mode. This mode was added to allow the tree to reuse nodes instead of
+ * re-allocating and RCU freeing nodes when there is a single user.
+ */
+struct maple_tree {
+ union {
+ spinlock_t ma_lock;
+#ifdef CONFIG_LOCKDEP
+ struct lockdep_map *ma_external_lock;
+#endif
+ };
+ unsigned int ma_flags;
+ void __rcu *ma_root;
+};
+
+/**
+ * MTREE_INIT() - Initialize a maple tree
+ * @name: The maple tree name
+ * @__flags: The maple tree flags
+ *
+ */
+#define MTREE_INIT(name, __flags) { \
+ .ma_lock = __SPIN_LOCK_UNLOCKED((name).ma_lock), \
+ .ma_flags = __flags, \
+ .ma_root = NULL, \
+}
+
+/**
+ * MTREE_INIT_EXT() - Initialize a maple tree with an external lock.
+ * @name: The tree name
+ * @__flags: The maple tree flags
+ * @__lock: The external lock
+ */
+#ifdef CONFIG_LOCKDEP
+#define MTREE_INIT_EXT(name, __flags, __lock) { \
+ .ma_external_lock = &(__lock).dep_map, \
+ .ma_flags = (__flags), \
+ .ma_root = NULL, \
+}
+#else
+#define MTREE_INIT_EXT(name, __flags, __lock) MTREE_INIT(name, __flags)
+#endif
+
+#define DEFINE_MTREE(name) \
+ struct maple_tree name = MTREE_INIT(name, 0)
+
+#define mtree_lock(mt) spin_lock((&(mt)->ma_lock))
+#define mtree_lock_nested(mas, subclass) \
+ spin_lock_nested((&(mt)->ma_lock), subclass)
+#define mtree_unlock(mt) spin_unlock((&(mt)->ma_lock))
+
+/*
+ * The Maple Tree squeezes various bits in at various points which aren't
+ * necessarily obvious. Usually, this is done by observing that pointers are
+ * N-byte aligned and thus the bottom log_2(N) bits are available for use. We
+ * don't use the high bits of pointers to store additional information because
+ * we don't know what bits are unused on any given architecture.
+ *
+ * Nodes are 256 bytes in size and are also aligned to 256 bytes, giving us 8
+ * low bits for our own purposes. Nodes are currently of 4 types:
+ * 1. Single pointer (Range is 0-0)
+ * 2. Non-leaf Allocation Range nodes
+ * 3. Non-leaf Range nodes
+ * 4. Leaf Range nodes All nodes consist of a number of node slots,
+ * pivots, and a parent pointer.
+ */
+
+struct maple_node {
+ union {
+ struct {
+ struct maple_pnode *parent;
+ void __rcu *slot[MAPLE_NODE_SLOTS];
+ };
+ struct {
+ void *pad;
+ struct rcu_head rcu;
+ struct maple_enode *piv_parent;
+ unsigned char parent_slot;
+ enum maple_type type;
+ unsigned char slot_len;
+ unsigned int ma_flags;
+ };
+ struct maple_range_64 mr64;
+ struct maple_arange_64 ma64;
+ struct maple_alloc alloc;
+ };
+};
+
+/*
+ * More complicated stores can cause two nodes to become one or three and
+ * potentially alter the height of the tree. Either half of the tree may need
+ * to be rebalanced against the other. The ma_topiary struct is used to track
+ * which nodes have been 'cut' from the tree so that the change can be done
+ * safely at a later date. This is done to support RCU.
+ */
+struct ma_topiary {
+ struct maple_enode *head;
+ struct maple_enode *tail;
+ struct maple_tree *mtree;
+};
+
+void *mtree_load(struct maple_tree *mt, unsigned long index);
+
+int mtree_insert(struct maple_tree *mt, unsigned long index,
+ void *entry, gfp_t gfp);
+int mtree_insert_range(struct maple_tree *mt, unsigned long first,
+ unsigned long last, void *entry, gfp_t gfp);
+int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
+ void *entry, unsigned long size, unsigned long min,
+ unsigned long max, gfp_t gfp);
+int mtree_alloc_cyclic(struct maple_tree *mt, unsigned long *startp,
+ void *entry, unsigned long range_lo, unsigned long range_hi,
+ unsigned long *next, gfp_t gfp);
+int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
+ void *entry, unsigned long size, unsigned long min,
+ unsigned long max, gfp_t gfp);
+
+int mtree_store_range(struct maple_tree *mt, unsigned long first,
+ unsigned long last, void *entry, gfp_t gfp);
+int mtree_store(struct maple_tree *mt, unsigned long index,
+ void *entry, gfp_t gfp);
+void *mtree_erase(struct maple_tree *mt, unsigned long index);
+
+int mtree_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp);
+int __mt_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp);
+
+void mtree_destroy(struct maple_tree *mt);
+void __mt_destroy(struct maple_tree *mt);
+
+/**
+ * mtree_empty() - Determine if a tree has any present entries.
+ * @mt: Maple Tree.
+ *
+ * Context: Any context.
+ * Return: %true if the tree contains only NULL pointers.
+ */
+static inline bool mtree_empty(const struct maple_tree *mt)
+{
+ return mt->ma_root == NULL;
+}
+
+/* Advanced API */
+
+/*
+ * Maple State Status
+ * ma_active means the maple state is pointing to a node and offset and can
+ * continue operating on the tree.
+ * ma_start means we have not searched the tree.
+ * ma_root means we have searched the tree and the entry we found lives in
+ * the root of the tree (ie it has index 0, length 1 and is the only entry in
+ * the tree).
+ * ma_none means we have searched the tree and there is no node in the
+ * tree for this entry. For example, we searched for index 1 in an empty
+ * tree. Or we have a tree which points to a full leaf node and we
+ * searched for an entry which is larger than can be contained in that
+ * leaf node.
+ * ma_pause means the data within the maple state may be stale, restart the
+ * operation
+ * ma_overflow means the search has reached the upper limit of the search
+ * ma_underflow means the search has reached the lower limit of the search
+ * ma_error means there was an error, check the node for the error number.
+ */
+enum maple_status {
+ ma_active,
+ ma_start,
+ ma_root,
+ ma_none,
+ ma_pause,
+ ma_overflow,
+ ma_underflow,
+ ma_error,
+};
+
+/*
+ * The maple state is defined in the struct ma_state and is used to keep track
+ * of information during operations, and even between operations when using the
+ * advanced API.
+ *
+ * If state->node has bit 0 set then it references a tree location which is not
+ * a node (eg the root). If bit 1 is set, the rest of the bits are a negative
+ * errno. Bit 2 (the 'unallocated slots' bit) is clear. Bits 3-6 indicate the
+ * node type.
+ *
+ * state->alloc either has a request number of nodes or an allocated node. If
+ * stat->alloc has a requested number of nodes, the first bit will be set (0x1)
+ * and the remaining bits are the value. If state->alloc is a node, then the
+ * node will be of type maple_alloc. maple_alloc has MAPLE_NODE_SLOTS - 1 for
+ * storing more allocated nodes, a total number of nodes allocated, and the
+ * node_count in this node. node_count is the number of allocated nodes in this
+ * node. The scaling beyond MAPLE_NODE_SLOTS - 1 is handled by storing further
+ * nodes into state->alloc->slot[0]'s node. Nodes are taken from state->alloc
+ * by removing a node from the state->alloc node until state->alloc->node_count
+ * is 1, when state->alloc is returned and the state->alloc->slot[0] is promoted
+ * to state->alloc. Nodes are pushed onto state->alloc by putting the current
+ * state->alloc into the pushed node's slot[0].
+ *
+ * The state also contains the implied min/max of the state->node, the depth of
+ * this search, and the offset. The implied min/max are either from the parent
+ * node or are 0-oo for the root node. The depth is incremented or decremented
+ * every time a node is walked down or up. The offset is the slot/pivot of
+ * interest in the node - either for reading or writing.
+ *
+ * When returning a value the maple state index and last respectively contain
+ * the start and end of the range for the entry. Ranges are inclusive in the
+ * Maple Tree.
+ *
+ * The status of the state is used to determine how the next action should treat
+ * the state. For instance, if the status is ma_start then the next action
+ * should start at the root of the tree and walk down. If the status is
+ * ma_pause then the node may be stale data and should be discarded. If the
+ * status is ma_overflow, then the last action hit the upper limit.
+ *
+ */
+struct ma_state {
+ struct maple_tree *tree; /* The tree we're operating in */
+ unsigned long index; /* The index we're operating on - range start */
+ unsigned long last; /* The last index we're operating on - range end */
+ struct maple_enode *node; /* The node containing this entry */
+ unsigned long min; /* The minimum index of this node - implied pivot min */
+ unsigned long max; /* The maximum index of this node - implied pivot max */
+ struct slab_sheaf *sheaf; /* Allocated nodes for this operation */
+ struct maple_node *alloc; /* A single allocated node for fast path writes */
+ unsigned long node_request; /* The number of nodes to allocate for this operation */
+ enum maple_status status; /* The status of the state (active, start, none, etc) */
+ unsigned char depth; /* depth of tree descent during write */
+ unsigned char offset;
+ unsigned char mas_flags;
+ unsigned char end; /* The end of the node */
+ enum store_type store_type; /* The type of store needed for this operation */
+};
+
+struct ma_wr_state {
+ struct ma_state *mas;
+ struct maple_node *node; /* Decoded mas->node */
+ unsigned long r_min; /* range min */
+ unsigned long r_max; /* range max */
+ enum maple_type type; /* mas->node type */
+ unsigned char offset_end; /* The offset where the write ends */
+ unsigned long *pivots; /* mas->node->pivots pointer */
+ unsigned long end_piv; /* The pivot at the offset end */
+ void __rcu **slots; /* mas->node->slots pointer */
+ void *entry; /* The entry to write */
+ void *content; /* The existing entry that is being overwritten */
+ unsigned char vacant_height; /* Height of lowest node with free space */
+ unsigned char sufficient_height;/* Height of lowest node with min sufficiency + 1 nodes */
+};
+
+#define mas_lock(mas) spin_lock(&((mas)->tree->ma_lock))
+#define mas_lock_nested(mas, subclass) \
+ spin_lock_nested(&((mas)->tree->ma_lock), subclass)
+#define mas_unlock(mas) spin_unlock(&((mas)->tree->ma_lock))
+
+/*
+ * Special values for ma_state.node.
+ * MA_ERROR represents an errno. After dropping the lock and attempting
+ * to resolve the error, the walk would have to be restarted from the
+ * top of the tree as the tree may have been modified.
+ */
+#define MA_ERROR(err) \
+ ((struct maple_enode *)(((unsigned long)err << 2) | 2UL))
+
+/*
+ * When changing MA_STATE, remember to also change rust/kernel/maple_tree.rs
+ */
+#define MA_STATE(name, mt, first, end) \
+ struct ma_state name = { \
+ .tree = mt, \
+ .index = first, \
+ .last = end, \
+ .node = NULL, \
+ .status = ma_start, \
+ .min = 0, \
+ .max = ULONG_MAX, \
+ .sheaf = NULL, \
+ .alloc = NULL, \
+ .node_request = 0, \
+ .mas_flags = 0, \
+ .store_type = wr_invalid, \
+ }
+
+#define MA_WR_STATE(name, ma_state, wr_entry) \
+ struct ma_wr_state name = { \
+ .mas = ma_state, \
+ .content = NULL, \
+ .entry = wr_entry, \
+ .vacant_height = 0, \
+ .sufficient_height = 0 \
+ }
+
+#define MA_TOPIARY(name, tree) \
+ struct ma_topiary name = { \
+ .head = NULL, \
+ .tail = NULL, \
+ .mtree = tree, \
+ }
+
+void *mas_walk(struct ma_state *mas);
+void *mas_store(struct ma_state *mas, void *entry);
+void *mas_erase(struct ma_state *mas);
+int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp);
+void mas_store_prealloc(struct ma_state *mas, void *entry);
+void *mas_find(struct ma_state *mas, unsigned long max);
+void *mas_find_range(struct ma_state *mas, unsigned long max);
+void *mas_find_rev(struct ma_state *mas, unsigned long min);
+void *mas_find_range_rev(struct ma_state *mas, unsigned long max);
+int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp);
+int mas_alloc_cyclic(struct ma_state *mas, unsigned long *startp,
+ void *entry, unsigned long range_lo, unsigned long range_hi,
+ unsigned long *next, gfp_t gfp);
+
+bool mas_nomem(struct ma_state *mas, gfp_t gfp);
+void mas_pause(struct ma_state *mas);
+void maple_tree_init(void);
+void mas_destroy(struct ma_state *mas);
+int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries);
+
+void *mas_prev(struct ma_state *mas, unsigned long min);
+void *mas_prev_range(struct ma_state *mas, unsigned long max);
+void *mas_next(struct ma_state *mas, unsigned long max);
+void *mas_next_range(struct ma_state *mas, unsigned long max);
+
+int mas_empty_area(struct ma_state *mas, unsigned long min, unsigned long max,
+ unsigned long size);
+/*
+ * This finds an empty area from the highest address to the lowest.
+ * AKA "Topdown" version,
+ */
+int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
+ unsigned long max, unsigned long size);
+
+static inline void mas_init(struct ma_state *mas, struct maple_tree *tree,
+ unsigned long addr)
+{
+ memset(mas, 0, sizeof(struct ma_state));
+ mas->tree = tree;
+ mas->index = mas->last = addr;
+ mas->max = ULONG_MAX;
+ mas->status = ma_start;
+ mas->node = NULL;
+}
+
+static inline bool mas_is_active(struct ma_state *mas)
+{
+ return mas->status == ma_active;
+}
+
+static inline bool mas_is_err(struct ma_state *mas)
+{
+ return mas->status == ma_error;
+}
+
+/**
+ * mas_reset() - Reset a Maple Tree operation state.
+ * @mas: Maple Tree operation state.
+ *
+ * Resets the error or walk state of the @mas so future walks of the
+ * array will start from the root. Use this if you have dropped the
+ * lock and want to reuse the ma_state.
+ *
+ * Context: Any context.
+ */
+static __always_inline void mas_reset(struct ma_state *mas)
+{
+ mas->status = ma_start;
+ mas->node = NULL;
+}
+
+/**
+ * mas_for_each() - Iterate over a range of the maple tree.
+ * @__mas: Maple Tree operation state (maple_state)
+ * @__entry: Entry retrieved from the tree
+ * @__max: maximum index to retrieve from the tree
+ *
+ * When returned, mas->index and mas->last will hold the entire range for the
+ * entry.
+ *
+ * Note: may return the zero entry.
+ */
+#define mas_for_each(__mas, __entry, __max) \
+ while (((__entry) = mas_find((__mas), (__max))) != NULL)
+
+/**
+ * mas_for_each_rev() - Iterate over a range of the maple tree in reverse order.
+ * @__mas: Maple Tree operation state (maple_state)
+ * @__entry: Entry retrieved from the tree
+ * @__min: minimum index to retrieve from the tree
+ *
+ * When returned, mas->index and mas->last will hold the entire range for the
+ * entry.
+ *
+ * Note: may return the zero entry.
+ */
+#define mas_for_each_rev(__mas, __entry, __min) \
+ while (((__entry) = mas_find_rev((__mas), (__min))) != NULL)
+
+#ifdef CONFIG_DEBUG_MAPLE_TREE
+enum mt_dump_format {
+ mt_dump_dec,
+ mt_dump_hex,
+};
+
+extern atomic_t maple_tree_tests_run;
+extern atomic_t maple_tree_tests_passed;
+
+void mt_dump(const struct maple_tree *mt, enum mt_dump_format format);
+void mas_dump(const struct ma_state *mas);
+void mas_wr_dump(const struct ma_wr_state *wr_mas);
+void mt_validate(struct maple_tree *mt);
+void mt_cache_shrink(void);
+#define MT_BUG_ON(__tree, __x) do { \
+ atomic_inc(&maple_tree_tests_run); \
+ if (__x) { \
+ pr_info("BUG at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mt_dump(__tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+} while (0)
+
+#define MAS_BUG_ON(__mas, __x) do { \
+ atomic_inc(&maple_tree_tests_run); \
+ if (__x) { \
+ pr_info("BUG at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mas_dump(__mas); \
+ mt_dump((__mas)->tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+} while (0)
+
+#define MAS_WR_BUG_ON(__wrmas, __x) do { \
+ atomic_inc(&maple_tree_tests_run); \
+ if (__x) { \
+ pr_info("BUG at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mas_wr_dump(__wrmas); \
+ mas_dump((__wrmas)->mas); \
+ mt_dump((__wrmas)->mas->tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+} while (0)
+
+#define MT_WARN_ON(__tree, __x) ({ \
+ int ret = !!(__x); \
+ atomic_inc(&maple_tree_tests_run); \
+ if (ret) { \
+ pr_info("WARN at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mt_dump(__tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+ unlikely(ret); \
+})
+
+#define MAS_WARN_ON(__mas, __x) ({ \
+ int ret = !!(__x); \
+ atomic_inc(&maple_tree_tests_run); \
+ if (ret) { \
+ pr_info("WARN at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mas_dump(__mas); \
+ mt_dump((__mas)->tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+ unlikely(ret); \
+})
+
+#define MAS_WR_WARN_ON(__wrmas, __x) ({ \
+ int ret = !!(__x); \
+ atomic_inc(&maple_tree_tests_run); \
+ if (ret) { \
+ pr_info("WARN at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mas_wr_dump(__wrmas); \
+ mas_dump((__wrmas)->mas); \
+ mt_dump((__wrmas)->mas->tree, mt_dump_hex); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+ unlikely(ret); \
+})
+#else
+#define MT_BUG_ON(__tree, __x) BUG_ON(__x)
+#define MAS_BUG_ON(__mas, __x) BUG_ON(__x)
+#define MAS_WR_BUG_ON(__mas, __x) BUG_ON(__x)
+#define MT_WARN_ON(__tree, __x) WARN_ON(__x)
+#define MAS_WARN_ON(__mas, __x) WARN_ON(__x)
+#define MAS_WR_WARN_ON(__mas, __x) WARN_ON(__x)
+#endif /* CONFIG_DEBUG_MAPLE_TREE */
+
+/**
+ * __mas_set_range() - Set up Maple Tree operation state to a sub-range of the
+ * current location.
+ * @mas: Maple Tree operation state.
+ * @start: New start of range in the Maple Tree.
+ * @last: New end of range in the Maple Tree.
+ *
+ * set the internal maple state values to a sub-range.
+ * Please use mas_set_range() if you do not know where you are in the tree.
+ */
+static inline void __mas_set_range(struct ma_state *mas, unsigned long start,
+ unsigned long last)
+{
+ /* Ensure the range starts within the current slot */
+ MAS_WARN_ON(mas, mas_is_active(mas) &&
+ (mas->index > start || mas->last < start));
+ mas->index = start;
+ mas->last = last;
+}
+
+/**
+ * mas_set_range() - Set up Maple Tree operation state for a different index.
+ * @mas: Maple Tree operation state.
+ * @start: New start of range in the Maple Tree.
+ * @last: New end of range in the Maple Tree.
+ *
+ * Move the operation state to refer to a different range. This will
+ * have the effect of starting a walk from the top; see mas_next()
+ * to move to an adjacent index.
+ */
+static inline
+void mas_set_range(struct ma_state *mas, unsigned long start, unsigned long last)
+{
+ mas_reset(mas);
+ __mas_set_range(mas, start, last);
+}
+
+/**
+ * mas_set() - Set up Maple Tree operation state for a different index.
+ * @mas: Maple Tree operation state.
+ * @index: New index into the Maple Tree.
+ *
+ * Move the operation state to refer to a different index. This will
+ * have the effect of starting a walk from the top; see mas_next()
+ * to move to an adjacent index.
+ */
+static inline void mas_set(struct ma_state *mas, unsigned long index)
+{
+
+ mas_set_range(mas, index, index);
+}
+
+static inline bool mt_external_lock(const struct maple_tree *mt)
+{
+ return (mt->ma_flags & MT_FLAGS_LOCK_MASK) == MT_FLAGS_LOCK_EXTERN;
+}
+
+/**
+ * mt_init_flags() - Initialise an empty maple tree with flags.
+ * @mt: Maple Tree
+ * @flags: maple tree flags.
+ *
+ * If you need to initialise a Maple Tree with special flags (eg, an
+ * allocation tree), use this function.
+ *
+ * Context: Any context.
+ */
+static inline void mt_init_flags(struct maple_tree *mt, unsigned int flags)
+{
+ mt->ma_flags = flags;
+ if (!mt_external_lock(mt))
+ spin_lock_init(&mt->ma_lock);
+ rcu_assign_pointer(mt->ma_root, NULL);
+}
+
+/**
+ * mt_init() - Initialise an empty maple tree.
+ * @mt: Maple Tree
+ *
+ * An empty Maple Tree.
+ *
+ * Context: Any context.
+ */
+static inline void mt_init(struct maple_tree *mt)
+{
+ mt_init_flags(mt, 0);
+}
+
+static inline bool mt_in_rcu(struct maple_tree *mt)
+{
+#ifdef CONFIG_MAPLE_RCU_DISABLED
+ return false;
+#endif
+ return mt->ma_flags & MT_FLAGS_USE_RCU;
+}
+
+/**
+ * mt_clear_in_rcu() - Switch the tree to non-RCU mode.
+ * @mt: The Maple Tree
+ */
+static inline void mt_clear_in_rcu(struct maple_tree *mt)
+{
+ if (!mt_in_rcu(mt))
+ return;
+
+ if (mt_external_lock(mt)) {
+ WARN_ON(!mt_lock_is_held(mt));
+ mt->ma_flags &= ~MT_FLAGS_USE_RCU;
+ } else {
+ mtree_lock(mt);
+ mt->ma_flags &= ~MT_FLAGS_USE_RCU;
+ mtree_unlock(mt);
+ }
+}
+
+/**
+ * mt_set_in_rcu() - Switch the tree to RCU safe mode.
+ * @mt: The Maple Tree
+ */
+static inline void mt_set_in_rcu(struct maple_tree *mt)
+{
+ if (mt_in_rcu(mt))
+ return;
+
+ if (mt_external_lock(mt)) {
+ WARN_ON(!mt_lock_is_held(mt));
+ mt->ma_flags |= MT_FLAGS_USE_RCU;
+ } else {
+ mtree_lock(mt);
+ mt->ma_flags |= MT_FLAGS_USE_RCU;
+ mtree_unlock(mt);
+ }
+}
+
+static inline unsigned int mt_height(const struct maple_tree *mt)
+{
+ return (mt->ma_flags & MT_FLAGS_HEIGHT_MASK) >> MT_FLAGS_HEIGHT_OFFSET;
+}
+
+void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max);
+void *mt_find_after(struct maple_tree *mt, unsigned long *index,
+ unsigned long max);
+void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min);
+void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max);
+
+/**
+ * mt_for_each - Iterate over each entry starting at index until max.
+ * @__tree: The Maple Tree
+ * @__entry: The current entry
+ * @__index: The index to start the search from. Subsequently used as iterator.
+ * @__max: The maximum limit for @index
+ *
+ * This iterator skips all entries, which resolve to a NULL pointer,
+ * e.g. entries which has been reserved with XA_ZERO_ENTRY.
+ */
+#define mt_for_each(__tree, __entry, __index, __max) \
+ for (__entry = mt_find(__tree, &(__index), __max); \
+ __entry; __entry = mt_find_after(__tree, &(__index), __max))
+
+#endif /*_LINUX_MAPLE_TREE_H */
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index acee44b9db26..b1fbe4118414 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -7,6 +7,7 @@
/* Known PHY IDs */
#define MARVELL_PHY_ID_88E1101 0x01410c60
+#define MARVELL_PHY_ID_88E3082 0x01410c80
#define MARVELL_PHY_ID_88E1112 0x01410c90
#define MARVELL_PHY_ID_88E1111 0x01410cc0
#define MARVELL_PHY_ID_88E1118 0x01410e10
@@ -22,17 +23,17 @@
#define MARVELL_PHY_ID_88E1545 0x01410ea0
#define MARVELL_PHY_ID_88E1548P 0x01410ec0
#define MARVELL_PHY_ID_88E3016 0x01410e60
+#define MARVELL_PHY_ID_88X3310 0x002b09a0
#define MARVELL_PHY_ID_88E2110 0x002b09b0
#define MARVELL_PHY_ID_88X2222 0x01410f10
-
-/* PHY IDs and mask for Alaska 10G PHYs */
-#define MARVELL_PHY_ID_88X33X0_MASK 0xfffffff8
-#define MARVELL_PHY_ID_88X3310 0x002b09a0
-#define MARVELL_PHY_ID_88X3340 0x002b09a8
+#define MARVELL_PHY_ID_88Q2110 0x002b0980
+#define MARVELL_PHY_ID_88Q2220 0x002b0b20
/* Marvel 88E1111 in Finisar SFP module with modified PHY ID */
#define MARVELL_PHY_ID_88E1111_FINISAR 0x01ff0cc0
+/* ID from 88E6020, assumed to be the same for the whole 6250 family */
+#define MARVELL_PHY_ID_88E6250_FAMILY 0x01410db0
/* These Ethernet switch families contain embedded PHYs, but they do
* not have a model ID. So the switch driver traps reads to the ID2
* register and returns the switch family ID
diff --git a/include/linux/math.h b/include/linux/math.h
index 53674a327e39..6dc1d1d32fbc 100644
--- a/include/linux/math.h
+++ b/include/linux/math.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_MATH_H
#define _LINUX_MATH_H
+#include <linux/types.h>
#include <asm/div64.h>
#include <uapi/linux/kernel.h>
@@ -33,6 +34,18 @@
*/
#define round_down(x, y) ((x) & ~__round_mask(x, y))
+/**
+ * DIV_ROUND_UP_POW2 - divide and round up
+ * @n: numerator
+ * @d: denominator (must be a power of 2)
+ *
+ * Divides @n by @d and rounds up to next multiple of @d (which must be a power
+ * of 2). Avoids integer overflows that may occur with __KERNEL_DIV_ROUND_UP().
+ * Performance is roughly equivalent to __KERNEL_DIV_ROUND_UP().
+ */
+#define DIV_ROUND_UP_POW2(n, d) \
+ ((n) / (d) + !!((n) & ((d) - 1)))
+
#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
#define DIV_ROUND_DOWN_ULL(ll, d) \
@@ -106,27 +119,45 @@
} \
)
-/*
- * Multiplies an integer by a fraction, while avoiding unnecessary
- * overflow or loss of precision.
- */
-#define mult_frac(x, numer, denom)( \
-{ \
- typeof(x) quot = (x) / (denom); \
- typeof(x) rem = (x) % (denom); \
- (quot * (numer)) + ((rem * (numer)) / (denom)); \
-} \
-)
+#define __STRUCT_FRACT(type) \
+struct type##_fract { \
+ __##type numerator; \
+ __##type denominator; \
+};
+__STRUCT_FRACT(s8)
+__STRUCT_FRACT(u8)
+__STRUCT_FRACT(s16)
+__STRUCT_FRACT(u16)
+__STRUCT_FRACT(s32)
+__STRUCT_FRACT(u32)
+#undef __STRUCT_FRACT
+
+/* Calculate "x * n / d" without unnecessary overflow or loss of precision. */
+#define mult_frac(x, n, d) \
+({ \
+ typeof(x) x_ = (x); \
+ typeof(n) n_ = (n); \
+ typeof(d) d_ = (d); \
+ \
+ typeof(x_) q = x_ / d_; \
+ typeof(x_) r = x_ % d_; \
+ q * n_ + r * n_ / d_; \
+})
#define sector_div(a, b) do_div(a, b)
/**
* abs - return absolute value of an argument
- * @x: the value. If it is unsigned type, it is converted to signed type first.
- * char is treated as if it was signed (regardless of whether it really is)
- * but the macro's return type is preserved as char.
+ * @x: the value.
+ *
+ * If it is unsigned type, @x is converted to signed type first.
+ * char is treated as if it was signed (regardless of whether it really is)
+ * but the macro's return type is preserved as char.
*
- * Return: an absolute value of x.
+ * NOTE, for signed type if @x is the minimum, the returned result is undefined
+ * as there is not enough bits to represent it as a positive number.
+ *
+ * Return: an absolute value of @x.
*/
#define abs(x) __abs_choose_expr(x, long long, \
__abs_choose_expr(x, long, \
@@ -144,6 +175,25 @@
({ signed type __x = (x); __x < 0 ? -__x : __x; }), other)
/**
+ * abs_diff - return absolute value of the difference between the arguments
+ * @a: the first argument
+ * @b: the second argument
+ *
+ * @a and @b have to be of the same type. With this restriction we compare
+ * signed to signed and unsigned to unsigned. The result is the subtraction
+ * the smaller of the two from the bigger, hence result is always a positive
+ * value.
+ *
+ * Return: an absolute value of the difference between the @a and @b.
+ */
+#define abs_diff(a, b) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ (void)(&__a == &__b); \
+ __a > __b ? (__a - __b) : (__b - __a); \
+})
+
+/**
* reciprocal_scale - "scale" a value into range [0, ep_ro)
* @val: value
* @ep_ro: right open interval endpoint
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 66deb1fdc2ef..cc305206d89f 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -3,8 +3,9 @@
#define _LINUX_MATH64_H
#include <linux/types.h>
-#include <vdso/math64.h>
+#include <linux/math.h>
#include <asm/div64.h>
+#include <vdso/math64.h>
#if BITS_PER_LONG == 64
@@ -28,7 +29,7 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
return dividend / divisor;
}
-/*
+/**
* div_s64_rem - signed 64bit divide with 32bit divisor with remainder
* @dividend: signed 64bit dividend
* @divisor: signed 32bit divisor
@@ -42,7 +43,7 @@ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
return dividend / divisor;
}
-/*
+/**
* div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
* @dividend: unsigned 64bit dividend
* @divisor: unsigned 64bit divisor
@@ -56,7 +57,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
return dividend / divisor;
}
-/*
+/**
* div64_u64 - unsigned 64bit divide with 64bit divisor
* @dividend: unsigned 64bit dividend
* @divisor: unsigned 64bit divisor
@@ -68,7 +69,7 @@ static inline u64 div64_u64(u64 dividend, u64 divisor)
return dividend / divisor;
}
-/*
+/**
* div64_s64 - signed 64bit divide with 64bit divisor
* @dividend: signed 64bit dividend
* @divisor: signed 64bit divisor
@@ -119,6 +120,8 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
* This is the most common 64bit divide and should be used if possible,
* as many 32bit archs can optimize this variant better than a full 64bit
* divide.
+ *
+ * Return: dividend / divisor
*/
#ifndef div_u64
static inline u64 div_u64(u64 dividend, u32 divisor)
@@ -132,6 +135,8 @@ static inline u64 div_u64(u64 dividend, u32 divisor)
* div_s64 - signed 64bit divide with 32bit divisor
* @dividend: signed 64bit dividend
* @divisor: signed 32bit divisor
+ *
+ * Return: dividend / divisor
*/
#ifndef div_s64
static inline s64 div_s64(s64 dividend, s32 divisor)
@@ -153,17 +158,28 @@ static inline u64 mul_u32_u32(u32 a, u32 b)
}
#endif
+#ifndef add_u64_u32
+/*
+ * Many a GCC version also messes this up.
+ * Zero extending b and then spilling everything to stack.
+ */
+static inline u64 add_u64_u32(u64 a, u32 b)
+{
+ return a + b;
+}
+#endif
+
#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
#ifndef mul_u64_u32_shr
-static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
+static __always_inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
{
return (u64)(((unsigned __int128)a * mul) >> shift);
}
#endif /* mul_u64_u32_shr */
#ifndef mul_u64_u64_shr
-static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
+static __always_inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
{
return (u64)(((unsigned __int128)a * mul) >> shift);
}
@@ -172,18 +188,14 @@ static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
#else
#ifndef mul_u64_u32_shr
-static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
+static __always_inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
{
- u32 ah, al;
+ u32 ah = a >> 32, al = a;
u64 ret;
- al = a;
- ah = a >> 32;
-
ret = mul_u32_u32(al, mul) >> shift;
if (ah)
ret += mul_u32_u32(ah, mul) << (32 - shift);
-
return ret;
}
#endif /* mul_u64_u32_shr */
@@ -234,6 +246,24 @@ static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
#endif
+#ifndef mul_s64_u64_shr
+static inline u64 mul_s64_u64_shr(s64 a, u64 b, unsigned int shift)
+{
+ u64 ret;
+
+ /*
+ * Extract the sign before the multiplication and put it back
+ * afterwards if needed.
+ */
+ ret = mul_u64_u64_shr(abs(a), b, shift);
+
+ if (a < 0)
+ ret = -((s64) ret);
+
+ return ret;
+}
+#endif /* mul_s64_u64_shr */
+
#ifndef mul_u64_u32_div
static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
{
@@ -263,12 +293,81 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
}
#endif /* mul_u64_u32_div */
-u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div);
+/**
+ * mul_u64_add_u64_div_u64 - unsigned 64bit multiply, add, and divide
+ * @a: first unsigned 64bit multiplicand
+ * @b: second unsigned 64bit multiplicand
+ * @c: unsigned 64bit addend
+ * @d: unsigned 64bit divisor
+ *
+ * Multiply two 64bit values together to generate a 128bit product
+ * add a third value and then divide by a fourth.
+ * The Generic code divides by 0 if @d is zero and returns ~0 on overflow.
+ * Architecture specific code may trap on zero or overflow.
+ *
+ * Return: (@a * @b + @c) / @d
+ */
+u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d);
+/**
+ * mul_u64_u64_div_u64 - unsigned 64bit multiply and divide
+ * @a: first unsigned 64bit multiplicand
+ * @b: second unsigned 64bit multiplicand
+ * @d: unsigned 64bit divisor
+ *
+ * Multiply two 64bit values together to generate a 128bit product
+ * and then divide by a third value.
+ * The Generic code divides by 0 if @d is zero and returns ~0 on overflow.
+ * Architecture specific code may trap on zero or overflow.
+ *
+ * Return: @a * @b / @d
+ */
+#define mul_u64_u64_div_u64(a, b, d) mul_u64_add_u64_div_u64(a, b, 0, d)
+
+/**
+ * mul_u64_u64_div_u64_roundup - unsigned 64bit multiply and divide rounded up
+ * @a: first unsigned 64bit multiplicand
+ * @b: second unsigned 64bit multiplicand
+ * @d: unsigned 64bit divisor
+ *
+ * Multiply two 64bit values together to generate a 128bit product
+ * and then divide and round up.
+ * The Generic code divides by 0 if @d is zero and returns ~0 on overflow.
+ * Architecture specific code may trap on zero or overflow.
+ *
+ * Return: (@a * @b + @d - 1) / @d
+ */
+#define mul_u64_u64_div_u64_roundup(a, b, d) \
+ ({ u64 _tmp = (d); mul_u64_add_u64_div_u64(a, b, _tmp - 1, _tmp); })
+
+
+/**
+ * DIV64_U64_ROUND_UP - unsigned 64bit divide with 64bit divisor rounded up
+ * @ll: unsigned 64bit dividend
+ * @d: unsigned 64bit divisor
+ *
+ * Divide unsigned 64bit dividend by unsigned 64bit divisor
+ * and round up.
+ *
+ * Return: dividend / divisor rounded up
+ */
#define DIV64_U64_ROUND_UP(ll, d) \
({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); })
/**
+ * DIV_U64_ROUND_UP - unsigned 64bit divide with 32bit divisor rounded up
+ * @ll: unsigned 64bit dividend
+ * @d: unsigned 32bit divisor
+ *
+ * Divide unsigned 64bit dividend by unsigned 32bit divisor
+ * and round up.
+ *
+ * Return: dividend / divisor rounded up
+ */
+#define DIV_U64_ROUND_UP(ll, d) \
+ ({ u32 _tmp = (d); div_u64((ll) + _tmp - 1, _tmp); })
+
+/**
* DIV64_U64_ROUND_CLOSEST - unsigned 64bit divide with 64bit divisor rounded to nearest integer
* @dividend: unsigned 64bit dividend
* @divisor: unsigned 64bit divisor
@@ -281,7 +380,20 @@ u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div);
#define DIV64_U64_ROUND_CLOSEST(dividend, divisor) \
({ u64 _tmp = (divisor); div64_u64((dividend) + _tmp / 2, _tmp); })
-/*
+/**
+ * DIV_U64_ROUND_CLOSEST - unsigned 64bit divide with 32bit divisor rounded to nearest integer
+ * @dividend: unsigned 64bit dividend
+ * @divisor: unsigned 32bit divisor
+ *
+ * Divide unsigned 64bit dividend by unsigned 32bit divisor
+ * and round to closest integer.
+ *
+ * Return: dividend / divisor rounded to nearest integer
+ */
+#define DIV_U64_ROUND_CLOSEST(dividend, divisor) \
+ ({ u32 _tmp = (divisor); div_u64((u64)(dividend) + _tmp / 2, _tmp); })
+
+/**
* DIV_S64_ROUND_CLOSEST - signed 64bit divide with 32bit divisor rounded to nearest integer
* @dividend: signed 64bit dividend
* @divisor: signed 32bit divisor
@@ -300,4 +412,19 @@ u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div);
div_s64((__x - (__d / 2)), __d); \
} \
)
+
+/**
+ * roundup_u64 - Round up a 64bit value to the next specified 32bit multiple
+ * @x: the value to up
+ * @y: 32bit multiple to round up to
+ *
+ * Rounds @x to the next multiple of @y. For 32bit @x values, see roundup and
+ * the faster round_up() for powers of 2.
+ *
+ * Return: rounded up value.
+ */
+static inline u64 roundup_u64(u64 x, u32 y)
+{
+ return DIV_U64_ROUND_UP(x, y) * y;
+}
#endif /* _LINUX_MATH64_H */
diff --git a/include/linux/max17040_battery.h b/include/linux/max17040_battery.h
deleted file mode 100644
index 593602fc9317..000000000000
--- a/include/linux/max17040_battery.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2009 Samsung Electronics
- * Minkyu Kang <mk7.kang@samsung.com>
- */
-
-#ifndef __MAX17040_BATTERY_H_
-#define __MAX17040_BATTERY_H_
-
-struct max17040_platform_data {
- int (*battery_online)(void);
- int (*charger_online)(void);
- int (*charger_enable)(void);
-};
-
-#endif
diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h
index 20f1e3ff6013..97e64184767d 100644
--- a/include/linux/mbcache.h
+++ b/include/linux/mbcache.h
@@ -10,16 +10,29 @@
struct mb_cache;
+/* Cache entry flags */
+enum {
+ MBE_REFERENCED_B = 0,
+ MBE_REUSABLE_B
+};
+
struct mb_cache_entry {
/* List of entries in cache - protected by cache->c_list_lock */
struct list_head e_list;
- /* Hash table list - protected by hash chain bitlock */
+ /*
+ * Hash table list - protected by hash chain bitlock. The entry is
+ * guaranteed to be hashed while e_refcnt > 0.
+ */
struct hlist_bl_node e_hash_list;
+ /*
+ * Entry refcount. Once it reaches zero, entry is unhashed and freed.
+ * While refcount > 0, the entry is guaranteed to stay in the hash and
+ * e.g. mb_cache_entry_try_delete() will fail.
+ */
atomic_t e_refcnt;
/* Key in hash - stable during lifetime of the entry */
u32 e_key;
- u32 e_referenced:1;
- u32 e_reusable:1;
+ unsigned long e_flags;
/* User provided value - stable during lifetime of the entry */
u64 e_value;
};
@@ -29,17 +42,24 @@ void mb_cache_destroy(struct mb_cache *cache);
int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
u64 value, bool reusable);
-void __mb_cache_entry_free(struct mb_cache_entry *entry);
-static inline int mb_cache_entry_put(struct mb_cache *cache,
- struct mb_cache_entry *entry)
+void __mb_cache_entry_free(struct mb_cache *cache,
+ struct mb_cache_entry *entry);
+void mb_cache_entry_wait_unused(struct mb_cache_entry *entry);
+static inline void mb_cache_entry_put(struct mb_cache *cache,
+ struct mb_cache_entry *entry)
{
- if (!atomic_dec_and_test(&entry->e_refcnt))
- return 0;
- __mb_cache_entry_free(entry);
- return 1;
+ unsigned int cnt = atomic_dec_return(&entry->e_refcnt);
+
+ if (cnt > 0) {
+ if (cnt <= 2)
+ wake_up_var(&entry->e_refcnt);
+ return;
+ }
+ __mb_cache_entry_free(cache, entry);
}
-void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value);
+struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache,
+ u32 key, u64 value);
struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
u64 value);
struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h
index 0661af17a758..34dfcc77f505 100644
--- a/include/linux/mc146818rtc.h
+++ b/include/linux/mc146818rtc.h
@@ -86,6 +86,8 @@ struct cmos_rtc_board_info {
/* 2 values for divider stage reset, others for "testing purposes only" */
# define RTC_DIV_RESET1 0x60
# define RTC_DIV_RESET2 0x70
+ /* In AMD BKDG bit 5 and 6 are reserved, bit 4 is for select dv0 bank */
+# define RTC_AMD_BANK_SELECT 0x10
/* Periodic intr. / Square wave rate select. 0=none, 1=32.8kHz,... 15=2Hz */
# define RTC_RATE_SELECT 0x0F
@@ -123,7 +125,12 @@ struct cmos_rtc_board_info {
#define RTC_IO_EXTENT_USED RTC_IO_EXTENT
#endif /* ARCH_RTC_LOCATION */
-unsigned int mc146818_get_time(struct rtc_time *time);
+bool mc146818_does_rtc_work(void);
+int mc146818_get_time(struct rtc_time *time, int timeout);
int mc146818_set_time(struct rtc_time *time);
+bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
+ int timeout,
+ void *param);
+
#endif /* _MC146818RTC_H */
diff --git a/include/linux/mc33xs2410.h b/include/linux/mc33xs2410.h
new file mode 100644
index 000000000000..31c0edf10dd7
--- /dev/null
+++ b/include/linux/mc33xs2410.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024 Liebherr-Electronics and Drives GmbH
+ */
+#ifndef _MC33XS2410_H
+#define _MC33XS2410_H
+
+#include <linux/spi/spi.h>
+
+MODULE_IMPORT_NS("PWM_MC33XS2410");
+
+int mc33xs2410_read_reg_ctrl(struct spi_device *spi, u8 reg, u16 *val);
+int mc33xs2410_read_reg_diag(struct spi_device *spi, u8 reg, u16 *val);
+int mc33xs2410_modify_reg(struct spi_device *spi, u8 reg, u8 mask, u8 val);
+
+#endif /* _MC33XS2410_H */
diff --git a/include/linux/mcb.h b/include/linux/mcb.h
index 71dd10a3d928..4ab2691f51a6 100644
--- a/include/linux/mcb.h
+++ b/include/linux/mcb.h
@@ -63,7 +63,6 @@ static inline struct mcb_bus *to_mcb_bus(struct device *dev)
struct mcb_device {
struct device dev;
struct mcb_bus *bus;
- bool is_added;
struct mcb_driver *driver;
u16 id;
int inst;
@@ -76,10 +75,7 @@ struct mcb_device {
struct device *dma_dev;
};
-static inline struct mcb_device *to_mcb_device(struct device *dev)
-{
- return container_of(dev, struct mcb_device, dev);
-}
+#define to_mcb_device(__dev) container_of_const(__dev, struct mcb_device, dev)
/**
* struct mcb_driver - MEN Chameleon Bus device driver
@@ -98,10 +94,7 @@ struct mcb_driver {
void (*shutdown)(struct mcb_device *mdev);
};
-static inline struct mcb_driver *to_mcb_driver(struct device_driver *drv)
-{
- return container_of(drv, struct mcb_driver, driver);
-}
+#define to_mcb_driver(__drv) container_of_const(__drv, struct mcb_driver, driver)
static inline void *mcb_get_drvdata(struct mcb_device *dev)
{
@@ -120,7 +113,7 @@ extern int __must_check __mcb_register_driver(struct mcb_driver *drv,
__mcb_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
extern void mcb_unregister_driver(struct mcb_driver *driver);
#define module_mcb_driver(__mcb_driver) \
- module_driver(__mcb_driver, mcb_register_driver, mcb_unregister_driver);
+ module_driver(__mcb_driver, mcb_register_driver, mcb_unregister_driver)
extern void mcb_bus_add_devices(const struct mcb_bus *bus);
extern int mcb_device_register(struct mcb_bus *bus, struct mcb_device *dev);
extern struct mcb_bus *mcb_alloc_bus(struct device *carrier);
diff --git a/include/linux/mdev.h b/include/linux/mdev.h
index 1fb34ea394ad..139d05b26f82 100644
--- a/include/linux/mdev.h
+++ b/include/linux/mdev.h
@@ -10,179 +10,80 @@
#ifndef MDEV_H
#define MDEV_H
+#include <linux/device.h>
+#include <linux/uuid.h>
+
struct mdev_type;
struct mdev_device {
struct device dev;
guid_t uuid;
- void *driver_data;
struct list_head next;
struct mdev_type *type;
- struct device *iommu_device;
bool active;
};
-static inline struct mdev_device *to_mdev_device(struct device *dev)
-{
- return container_of(dev, struct mdev_device, dev);
-}
+struct mdev_type {
+ /* set by the driver before calling mdev_register parent: */
+ const char *sysfs_name;
+ const char *pretty_name;
-/*
- * Called by the parent device driver to set the device which represents
- * this mdev in iommu protection scope. By default, the iommu device is
- * NULL, that indicates using vendor defined isolation.
- *
- * @dev: the mediated device that iommu will isolate.
- * @iommu_device: a pci device which represents the iommu for @dev.
- */
-static inline void mdev_set_iommu_device(struct mdev_device *mdev,
- struct device *iommu_device)
-{
- mdev->iommu_device = iommu_device;
-}
-
-static inline struct device *mdev_get_iommu_device(struct mdev_device *mdev)
-{
- return mdev->iommu_device;
-}
+ /* set by the core, can be used drivers */
+ struct mdev_parent *parent;
-unsigned int mdev_get_type_group_id(struct mdev_device *mdev);
-unsigned int mtype_get_type_group_id(struct mdev_type *mtype);
-struct device *mtype_get_parent_dev(struct mdev_type *mtype);
-
-/**
- * struct mdev_parent_ops - Structure to be registered for each parent device to
- * register the device to mdev module.
- *
- * @owner: The module owner.
- * @dev_attr_groups: Attributes of the parent device.
- * @mdev_attr_groups: Attributes of the mediated device.
- * @supported_type_groups: Attributes to define supported types. It is mandatory
- * to provide supported types.
- * @create: Called to allocate basic resources in parent device's
- * driver for a particular mediated device. It is
- * mandatory to provide create ops.
- * @mdev: mdev_device structure on of mediated device
- * that is being created
- * Returns integer: success (0) or error (< 0)
- * @remove: Called to free resources in parent device's driver for
- * a mediated device. It is mandatory to provide 'remove'
- * ops.
- * @mdev: mdev_device device structure which is being
- * destroyed
- * Returns integer: success (0) or error (< 0)
- * @open: Open mediated device.
- * @mdev: mediated device.
- * Returns integer: success (0) or error (< 0)
- * @release: release mediated device
- * @mdev: mediated device.
- * @read: Read emulation callback
- * @mdev: mediated device structure
- * @buf: read buffer
- * @count: number of bytes to read
- * @ppos: address.
- * Retuns number on bytes read on success or error.
- * @write: Write emulation callback
- * @mdev: mediated device structure
- * @buf: write buffer
- * @count: number of bytes to be written
- * @ppos: address.
- * Retuns number on bytes written on success or error.
- * @ioctl: IOCTL callback
- * @mdev: mediated device structure
- * @cmd: ioctl command
- * @arg: arguments to ioctl
- * @mmap: mmap callback
- * @mdev: mediated device structure
- * @vma: vma structure
- * @request: request callback to release device
- * @mdev: mediated device structure
- * @count: request sequence number
- * Parent device that support mediated device should be registered with mdev
- * module with mdev_parent_ops structure.
- **/
-struct mdev_parent_ops {
- struct module *owner;
- const struct attribute_group **dev_attr_groups;
- const struct attribute_group **mdev_attr_groups;
- struct attribute_group **supported_type_groups;
-
- int (*create)(struct mdev_device *mdev);
- int (*remove)(struct mdev_device *mdev);
- int (*open)(struct mdev_device *mdev);
- void (*release)(struct mdev_device *mdev);
- ssize_t (*read)(struct mdev_device *mdev, char __user *buf,
- size_t count, loff_t *ppos);
- ssize_t (*write)(struct mdev_device *mdev, const char __user *buf,
- size_t count, loff_t *ppos);
- long (*ioctl)(struct mdev_device *mdev, unsigned int cmd,
- unsigned long arg);
- int (*mmap)(struct mdev_device *mdev, struct vm_area_struct *vma);
- void (*request)(struct mdev_device *mdev, unsigned int count);
+ /* internal only */
+ struct kobject kobj;
+ struct kobject *devices_kobj;
};
-/* interface for exporting mdev supported type attributes */
-struct mdev_type_attribute {
- struct attribute attr;
- ssize_t (*show)(struct mdev_type *mtype,
- struct mdev_type_attribute *attr, char *buf);
- ssize_t (*store)(struct mdev_type *mtype,
- struct mdev_type_attribute *attr, const char *buf,
- size_t count);
+/* embedded into the struct device that the mdev devices hang off */
+struct mdev_parent {
+ struct device *dev;
+ struct mdev_driver *mdev_driver;
+ struct kset *mdev_types_kset;
+ /* Synchronize device creation/removal with parent unregistration */
+ struct rw_semaphore unreg_sem;
+ struct mdev_type **types;
+ unsigned int nr_types;
+ atomic_t available_instances;
};
-#define MDEV_TYPE_ATTR(_name, _mode, _show, _store) \
-struct mdev_type_attribute mdev_type_attr_##_name = \
- __ATTR(_name, _mode, _show, _store)
-#define MDEV_TYPE_ATTR_RW(_name) \
- struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_RW(_name)
-#define MDEV_TYPE_ATTR_RO(_name) \
- struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_RO(_name)
-#define MDEV_TYPE_ATTR_WO(_name) \
- struct mdev_type_attribute mdev_type_attr_##_name = __ATTR_WO(_name)
+static inline struct mdev_device *to_mdev_device(struct device *dev)
+{
+ return container_of(dev, struct mdev_device, dev);
+}
/**
* struct mdev_driver - Mediated device driver
+ * @device_api: string to return for the device_api sysfs
+ * @max_instances: maximum number of instances supported (optional)
* @probe: called when new device created
* @remove: called when device removed
+ * @get_available: Return the max number of instances that can be created
+ * @show_description: Print a description of the mtype
* @driver: device driver structure
- *
**/
struct mdev_driver {
+ const char *device_api;
+ unsigned int max_instances;
int (*probe)(struct mdev_device *dev);
void (*remove)(struct mdev_device *dev);
+ unsigned int (*get_available)(struct mdev_type *mtype);
+ ssize_t (*show_description)(struct mdev_type *mtype, char *buf);
struct device_driver driver;
};
-static inline void *mdev_get_drvdata(struct mdev_device *mdev)
-{
- return mdev->driver_data;
-}
-static inline void mdev_set_drvdata(struct mdev_device *mdev, void *data)
-{
- mdev->driver_data = data;
-}
-static inline const guid_t *mdev_uuid(struct mdev_device *mdev)
-{
- return &mdev->uuid;
-}
-
-extern struct bus_type mdev_bus_type;
-
-int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops);
-void mdev_unregister_device(struct device *dev);
+int mdev_register_parent(struct mdev_parent *parent, struct device *dev,
+ struct mdev_driver *mdev_driver, struct mdev_type **types,
+ unsigned int nr_types);
+void mdev_unregister_parent(struct mdev_parent *parent);
int mdev_register_driver(struct mdev_driver *drv);
void mdev_unregister_driver(struct mdev_driver *drv);
-struct device *mdev_parent_dev(struct mdev_device *mdev);
static inline struct device *mdev_dev(struct mdev_device *mdev)
{
return &mdev->dev;
}
-static inline struct mdev_device *mdev_from_dev(struct device *dev)
-{
- return dev->bus == &mdev_bus_type ? to_mdev_device(dev) : NULL;
-}
#endif /* MDEV_H */
diff --git a/include/linux/mdio-bitbang.h b/include/linux/mdio-bitbang.h
index 373630fe5c28..cffabdbce075 100644
--- a/include/linux/mdio-bitbang.h
+++ b/include/linux/mdio-bitbang.h
@@ -38,8 +38,10 @@ struct mdiobb_ctrl {
u8 op_c22_write;
};
-int mdiobb_read(struct mii_bus *bus, int phy, int reg);
-int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val);
+int mdiobb_read_c22(struct mii_bus *bus, int phy, int reg);
+int mdiobb_write_c22(struct mii_bus *bus, int phy, int reg, u16 val);
+int mdiobb_read_c45(struct mii_bus *bus, int devad, int phy, int reg);
+int mdiobb_write_c45(struct mii_bus *bus, int devad, int phy, int reg, u16 val);
/* The returned bus is not yet registered with the phy layer. */
struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl);
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index ffb787d5ebde..42d6d47e445b 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -7,15 +7,9 @@
#define __LINUX_MDIO_H__
#include <uapi/linux/mdio.h>
+#include <linux/bitfield.h>
#include <linux/mod_devicetable.h>
-/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit
- * IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips.
- */
-#define MII_ADDR_C45 (1<<30)
-#define MII_DEVADDR_C45_SHIFT 16
-#define MII_REGADDR_C45_MASK GENMASK(15, 0)
-
struct gpio_desc;
struct mii_bus;
struct reset_control;
@@ -37,23 +31,21 @@ struct mdio_device {
struct mii_bus *bus;
char modalias[MDIO_NAME_SIZE];
- int (*bus_match)(struct device *dev, struct device_driver *drv);
+ int (*bus_match)(struct device *dev, const struct device_driver *drv);
void (*device_free)(struct mdio_device *mdiodev);
void (*device_remove)(struct mdio_device *mdiodev);
/* Bus address of the MDIO device (0-31) */
int addr;
int flags;
+ int reset_state;
struct gpio_desc *reset_gpio;
struct reset_control *reset_ctrl;
unsigned int reset_assert_delay;
unsigned int reset_deassert_delay;
};
-static inline struct mdio_device *to_mdio_device(const struct device *dev)
-{
- return container_of(dev, struct mdio_device, dev);
-}
+#define to_mdio_device(__dev) container_of_const(__dev, struct mdio_device, dev)
/* struct mdio_driver_common: Common to all MDIO drivers */
struct mdio_driver_common {
@@ -62,11 +54,8 @@ struct mdio_driver_common {
};
#define MDIO_DEVICE_FLAG_PHY 1
-static inline struct mdio_driver_common *
-to_mdio_common_driver(const struct device_driver *driver)
-{
- return container_of(driver, struct mdio_driver_common, driver);
-}
+#define to_mdio_common_driver(__drv_c) container_of_const(__drv_c, struct mdio_driver_common, \
+ driver)
/* struct mdio_driver: Generic MDIO driver */
struct mdio_driver {
@@ -80,14 +69,13 @@ struct mdio_driver {
/* Clears up any memory if needed */
void (*remove)(struct mdio_device *mdiodev);
+
+ /* Quiesces the device on system shutdown, turns off interrupts etc */
+ void (*shutdown)(struct mdio_device *mdiodev);
};
-static inline struct mdio_driver *
-to_mdio_driver(const struct device_driver *driver)
-{
- return container_of(to_mdio_common_driver(driver), struct mdio_driver,
- mdiodrv);
-}
+#define to_mdio_driver(__drv_m) container_of_const(to_mdio_common_driver(__drv_m), \
+ struct mdio_driver, mdiodrv)
/* device driver data */
static inline void mdiodev_set_drvdata(struct mdio_device *mdio, void *data)
@@ -107,7 +95,16 @@ void mdio_device_remove(struct mdio_device *mdiodev);
void mdio_device_reset(struct mdio_device *mdiodev, int value);
int mdio_driver_register(struct mdio_driver *drv);
void mdio_driver_unregister(struct mdio_driver *drv);
-int mdio_device_bus_match(struct device *dev, struct device_driver *drv);
+
+static inline void mdio_device_get(struct mdio_device *mdiodev)
+{
+ get_device(&mdiodev->dev);
+}
+
+static inline void mdio_device_put(struct mdio_device *mdiodev)
+{
+ mdio_device_free(mdiodev);
+}
static inline bool mdio_phy_id_is_c45(int phy_id)
{
@@ -164,31 +161,12 @@ extern int mdio_set_flag(const struct mdio_if_info *mdio,
bool sense);
extern int mdio45_links_ok(const struct mdio_if_info *mdio, u32 mmds);
extern int mdio45_nway_restart(const struct mdio_if_info *mdio);
-extern void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio,
- struct ethtool_cmd *ecmd,
- u32 npage_adv, u32 npage_lpa);
extern void
mdio45_ethtool_ksettings_get_npage(const struct mdio_if_info *mdio,
struct ethtool_link_ksettings *cmd,
u32 npage_adv, u32 npage_lpa);
/**
- * mdio45_ethtool_gset - get settings for ETHTOOL_GSET
- * @mdio: MDIO interface
- * @ecmd: Ethtool request structure
- *
- * Since the CSRs for auto-negotiation using next pages are not fully
- * standardised, this function does not attempt to decode them. Use
- * mdio45_ethtool_gset_npage() to specify advertisement bits from next
- * pages.
- */
-static inline void mdio45_ethtool_gset(const struct mdio_if_info *mdio,
- struct ethtool_cmd *ecmd)
-{
- mdio45_ethtool_gset_npage(mdio, ecmd, 0, 0);
-}
-
-/**
* mdio45_ethtool_ksettings_get - get settings for ETHTOOL_GLINKSETTINGS
* @mdio: MDIO interface
* @cmd: Ethtool request structure
@@ -335,8 +313,266 @@ static inline void mii_10gbt_stat_mod_linkmode_lpa_t(unsigned long *advertising,
advertising, lpa & MDIO_AN_10GBT_STAT_LP10G);
}
+/**
+ * mii_t1_adv_l_mod_linkmode_t
+ * @advertising: target the linkmode advertisement settings
+ * @lpa: value of the BASE-T1 Autonegotiation Advertisement [15:0] Register
+ *
+ * A small helper function that translates BASE-T1 Autonegotiation
+ * Advertisement [15:0] Register bits to linkmode advertisement settings.
+ * Other bits in advertising aren't changed.
+ */
+static inline void mii_t1_adv_l_mod_linkmode_t(unsigned long *advertising, u32 lpa)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising,
+ lpa & MDIO_AN_T1_ADV_L_PAUSE_CAP);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising,
+ lpa & MDIO_AN_T1_ADV_L_PAUSE_ASYM);
+}
+
+/**
+ * mii_t1_adv_m_mod_linkmode_t
+ * @advertising: target the linkmode advertisement settings
+ * @lpa: value of the BASE-T1 Autonegotiation Advertisement [31:16] Register
+ *
+ * A small helper function that translates BASE-T1 Autonegotiation
+ * Advertisement [31:16] Register bits to linkmode advertisement settings.
+ * Other bits in advertising aren't changed.
+ */
+static inline void mii_t1_adv_m_mod_linkmode_t(unsigned long *advertising, u32 lpa)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
+ advertising, lpa & MDIO_AN_T1_ADV_M_B10L);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT1_Full_BIT,
+ advertising, lpa & MDIO_AN_T1_ADV_M_100BT1);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT1_Full_BIT,
+ advertising, lpa & MDIO_AN_T1_ADV_M_1000BT1);
+}
+
+/**
+ * linkmode_adv_to_mii_t1_adv_l_t
+ * @advertising: the linkmode advertisement settings
+ *
+ * A small helper function that translates linkmode advertisement
+ * settings to phy autonegotiation advertisements for the
+ * BASE-T1 Autonegotiation Advertisement [15:0] Register.
+ */
+static inline u32 linkmode_adv_to_mii_t1_adv_l_t(unsigned long *advertising)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising))
+ result |= MDIO_AN_T1_ADV_L_PAUSE_CAP;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising))
+ result |= MDIO_AN_T1_ADV_L_PAUSE_ASYM;
+
+ return result;
+}
+
+/**
+ * linkmode_adv_to_mii_t1_adv_m_t
+ * @advertising: the linkmode advertisement settings
+ *
+ * A small helper function that translates linkmode advertisement
+ * settings to phy autonegotiation advertisements for the
+ * BASE-T1 Autonegotiation Advertisement [31:16] Register.
+ */
+static inline u32 linkmode_adv_to_mii_t1_adv_m_t(unsigned long *advertising)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, advertising))
+ result |= MDIO_AN_T1_ADV_M_B10L;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT1_Full_BIT, advertising))
+ result |= MDIO_AN_T1_ADV_M_100BT1;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT1_Full_BIT, advertising))
+ result |= MDIO_AN_T1_ADV_M_1000BT1;
+
+ return result;
+}
+
+/**
+ * mii_eee_cap1_mod_linkmode_t()
+ * @adv: target the linkmode advertisement settings
+ * @val: register value
+ *
+ * A function that translates value of following registers to the linkmode:
+ * IEEE 802.3-2018 45.2.3.10 "EEE control and capability 1" register (3.20)
+ * IEEE 802.3-2018 45.2.7.13 "EEE advertisement 1" register (7.60)
+ * IEEE 802.3-2018 45.2.7.14 "EEE link partner ability 1" register (7.61)
+ */
+static inline void mii_eee_cap1_mod_linkmode_t(unsigned long *adv, u32 val)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ adv, val & MDIO_EEE_100TX);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ adv, val & MDIO_EEE_1000T);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ adv, val & MDIO_EEE_10GT);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ adv, val & MDIO_EEE_1000KX);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ adv, val & MDIO_EEE_10GKX4);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ adv, val & MDIO_EEE_10GKR);
+}
+
+/**
+ * mii_eee_cap2_mod_linkmode_sup_t()
+ * @adv: target the linkmode settings
+ * @val: register value
+ *
+ * A function that translates value of following registers to the linkmode:
+ * IEEE 802.3-2022 45.2.3.11 "EEE control and capability 2" register (3.21)
+ */
+static inline void mii_eee_cap2_mod_linkmode_sup_t(unsigned long *adv, u32 val)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ adv, val & MDIO_EEE_2_5GT);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ adv, val & MDIO_EEE_5GT);
+}
+
+/**
+ * mii_eee_cap2_mod_linkmode_adv_t()
+ * @adv: target the linkmode advertisement settings
+ * @val: register value
+ *
+ * A function that translates value of following registers to the linkmode:
+ * IEEE 802.3-2022 45.2.7.16 "EEE advertisement 2" register (7.62)
+ * IEEE 802.3-2022 45.2.7.17 "EEE link partner ability 2" register (7.63)
+ * Note: Currently this function is the same as mii_eee_cap2_mod_linkmode_sup_t.
+ * For certain, not yet supported, modes however the bits differ.
+ * Therefore create separate functions already.
+ */
+static inline void mii_eee_cap2_mod_linkmode_adv_t(unsigned long *adv, u32 val)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ adv, val & MDIO_EEE_2_5GT);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ adv, val & MDIO_EEE_5GT);
+}
+
+/**
+ * linkmode_to_mii_eee_cap1_t()
+ * @adv: the linkmode advertisement settings
+ *
+ * A function that translates linkmode to value for IEEE 802.3-2018 45.2.7.13
+ * "EEE advertisement 1" register (7.60)
+ */
+static inline u32 linkmode_to_mii_eee_cap1_t(unsigned long *adv)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, adv))
+ result |= MDIO_EEE_100TX;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, adv))
+ result |= MDIO_EEE_1000T;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, adv))
+ result |= MDIO_EEE_10GT;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, adv))
+ result |= MDIO_EEE_1000KX;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, adv))
+ result |= MDIO_EEE_10GKX4;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, adv))
+ result |= MDIO_EEE_10GKR;
+
+ return result;
+}
+
+/**
+ * linkmode_to_mii_eee_cap2_t()
+ * @adv: the linkmode advertisement settings
+ *
+ * A function that translates linkmode to value for IEEE 802.3-2022 45.2.7.16
+ * "EEE advertisement 2" register (7.62)
+ */
+static inline u32 linkmode_to_mii_eee_cap2_t(unsigned long *adv)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, adv))
+ result |= MDIO_EEE_2_5GT;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, adv))
+ result |= MDIO_EEE_5GT;
+
+ return result;
+}
+
+/**
+ * mii_10base_t1_adv_mod_linkmode_t()
+ * @adv: linkmode advertisement settings
+ * @val: register value
+ *
+ * A function that translates IEEE 802.3cg-2019 45.2.7.26 "10BASE-T1 AN status"
+ * register (7.527) value to the linkmode.
+ */
+static inline void mii_10base_t1_adv_mod_linkmode_t(unsigned long *adv, u16 val)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
+ adv, val & MDIO_AN_10BT1_AN_CTRL_ADV_EEE_T1L);
+}
+
+/**
+ * linkmode_adv_to_mii_10base_t1_t()
+ * @adv: linkmode advertisement settings
+ *
+ * A function that translates the linkmode to IEEE 802.3cg-2019 45.2.7.25
+ * "10BASE-T1 AN control" register (7.526) value.
+ */
+static inline u32 linkmode_adv_to_mii_10base_t1_t(unsigned long *adv)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, adv))
+ result |= MDIO_AN_10BT1_AN_CTRL_ADV_EEE_T1L;
+
+ return result;
+}
+
+/**
+ * mii_c73_mod_linkmode - convert a Clause 73 advertisement to linkmodes
+ * @adv: linkmode advertisement setting
+ * @lpa: array of three u16s containing the advertisement
+ *
+ * Convert an IEEE 802.3 Clause 73 advertisement to ethtool link modes.
+ */
+static inline void mii_c73_mod_linkmode(unsigned long *adv, u16 *lpa)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ adv, lpa[0] & MDIO_AN_C73_0_PAUSE);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ adv, lpa[0] & MDIO_AN_C73_0_ASM_DIR);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_1000BASE_KX);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_10GBASE_KX4);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_40GBASE_KR4);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_40GBASE_CR4);
+ /* 100GBASE_CR10 and 100GBASE_KP4 not implemented */
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_100GBASE_KR4);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_100GBASE_CR4);
+ /* 25GBASE_R_S not implemented */
+ /* The 25GBASE_R bit can be used for 25Gbase KR or CR modes */
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_25GBASE_R);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_25GBASE_R);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ adv, lpa[1] & MDIO_AN_C73_1_10GBASE_KR);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
+ adv, lpa[2] & MDIO_AN_C73_2_2500BASE_KX);
+ /* 5GBASE_KR not implemented */
+}
+
int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
+int __mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask,
+ u16 set);
int __mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum,
u16 mask, u16 set);
@@ -346,35 +582,98 @@ int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val);
int mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask,
u16 set);
+int mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum,
+ u16 mask, u16 set);
+int __mdiobus_c45_read(struct mii_bus *bus, int addr, int devad, u32 regnum);
+int mdiobus_c45_read(struct mii_bus *bus, int addr, int devad, u32 regnum);
+int mdiobus_c45_read_nested(struct mii_bus *bus, int addr, int devad,
+ u32 regnum);
+int __mdiobus_c45_write(struct mii_bus *bus, int addr, int devad, u32 regnum,
+ u16 val);
+int mdiobus_c45_write(struct mii_bus *bus, int addr, int devad, u32 regnum,
+ u16 val);
+int mdiobus_c45_write_nested(struct mii_bus *bus, int addr, int devad,
+ u32 regnum, u16 val);
+int mdiobus_c45_modify(struct mii_bus *bus, int addr, int devad, u32 regnum,
+ u16 mask, u16 set);
+
+int mdiobus_c45_modify_changed(struct mii_bus *bus, int addr, int devad,
+ u32 regnum, u16 mask, u16 set);
+
+static inline int __mdiodev_read(struct mdio_device *mdiodev, u32 regnum)
+{
+ return __mdiobus_read(mdiodev->bus, mdiodev->addr, regnum);
+}
+
+static inline int __mdiodev_write(struct mdio_device *mdiodev, u32 regnum,
+ u16 val)
+{
+ return __mdiobus_write(mdiodev->bus, mdiodev->addr, regnum, val);
+}
+
+static inline int __mdiodev_modify(struct mdio_device *mdiodev, u32 regnum,
+ u16 mask, u16 set)
+{
+ return __mdiobus_modify(mdiodev->bus, mdiodev->addr, regnum, mask, set);
+}
+
+static inline int __mdiodev_modify_changed(struct mdio_device *mdiodev,
+ u32 regnum, u16 mask, u16 set)
+{
+ return __mdiobus_modify_changed(mdiodev->bus, mdiodev->addr, regnum,
+ mask, set);
+}
+
+static inline int mdiodev_read(struct mdio_device *mdiodev, u32 regnum)
+{
+ return mdiobus_read(mdiodev->bus, mdiodev->addr, regnum);
+}
+
+static inline int mdiodev_write(struct mdio_device *mdiodev, u32 regnum,
+ u16 val)
+{
+ return mdiobus_write(mdiodev->bus, mdiodev->addr, regnum, val);
+}
+
+static inline int mdiodev_modify(struct mdio_device *mdiodev, u32 regnum,
+ u16 mask, u16 set)
+{
+ return mdiobus_modify(mdiodev->bus, mdiodev->addr, regnum, mask, set);
+}
-static inline u32 mdiobus_c45_addr(int devad, u16 regnum)
+static inline int mdiodev_modify_changed(struct mdio_device *mdiodev,
+ u32 regnum, u16 mask, u16 set)
{
- return MII_ADDR_C45 | devad << MII_DEVADDR_C45_SHIFT | regnum;
+ return mdiobus_modify_changed(mdiodev->bus, mdiodev->addr, regnum,
+ mask, set);
}
-static inline int __mdiobus_c45_read(struct mii_bus *bus, int prtad, int devad,
- u16 regnum)
+static inline int mdiodev_c45_modify(struct mdio_device *mdiodev, int devad,
+ u32 regnum, u16 mask, u16 set)
{
- return __mdiobus_read(bus, prtad, mdiobus_c45_addr(devad, regnum));
+ return mdiobus_c45_modify(mdiodev->bus, mdiodev->addr, devad, regnum,
+ mask, set);
}
-static inline int __mdiobus_c45_write(struct mii_bus *bus, int prtad, int devad,
- u16 regnum, u16 val)
+static inline int mdiodev_c45_modify_changed(struct mdio_device *mdiodev,
+ int devad, u32 regnum, u16 mask,
+ u16 set)
{
- return __mdiobus_write(bus, prtad, mdiobus_c45_addr(devad, regnum),
- val);
+ return mdiobus_c45_modify_changed(mdiodev->bus, mdiodev->addr, devad,
+ regnum, mask, set);
}
-static inline int mdiobus_c45_read(struct mii_bus *bus, int prtad, int devad,
+static inline int mdiodev_c45_read(struct mdio_device *mdiodev, int devad,
u16 regnum)
{
- return mdiobus_read(bus, prtad, mdiobus_c45_addr(devad, regnum));
+ return mdiobus_c45_read(mdiodev->bus, mdiodev->addr, devad, regnum);
}
-static inline int mdiobus_c45_write(struct mii_bus *bus, int prtad, int devad,
+static inline int mdiodev_c45_write(struct mdio_device *mdiodev, u32 devad,
u16 regnum, u16 val)
{
- return mdiobus_write(bus, prtad, mdiobus_c45_addr(devad, regnum), val);
+ return mdiobus_c45_write(mdiodev->bus, mdiodev->addr, devad, regnum,
+ val);
}
int mdiobus_register_device(struct mdio_device *mdiodev);
@@ -390,16 +689,7 @@ struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr);
* init/exit. Each module may only use this macro once, and calling it
* replaces module_init() and module_exit().
*/
-#define mdio_module_driver(_mdio_driver) \
-static int __init mdio_module_init(void) \
-{ \
- return mdio_driver_register(&_mdio_driver); \
-} \
-module_init(mdio_module_init); \
-static void __exit mdio_module_exit(void) \
-{ \
- mdio_driver_unregister(&_mdio_driver); \
-} \
-module_exit(mdio_module_exit)
+#define mdio_module_driver(_mdio_driver) \
+ module_driver(_mdio_driver, mdio_driver_register, mdio_driver_unregister)
#endif /* __LINUX_MDIO_H__ */
diff --git a/include/linux/mdio/mdio-i2c.h b/include/linux/mdio/mdio-i2c.h
index b1d27f7cd23f..65b550a6fc32 100644
--- a/include/linux/mdio/mdio-i2c.h
+++ b/include/linux/mdio/mdio-i2c.h
@@ -11,6 +11,14 @@ struct device;
struct i2c_adapter;
struct mii_bus;
-struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c);
+enum mdio_i2c_proto {
+ MDIO_I2C_NONE,
+ MDIO_I2C_MARVELL_C22,
+ MDIO_I2C_C45,
+ MDIO_I2C_ROLLBALL,
+};
+
+struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c,
+ enum mdio_i2c_proto protocol);
#endif
diff --git a/include/linux/mdio/mdio-mscc-miim.h b/include/linux/mdio/mdio-mscc-miim.h
new file mode 100644
index 000000000000..1ce699740af6
--- /dev/null
+++ b/include/linux/mdio/mdio-mscc-miim.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Driver for the MDIO interface of Microsemi network switches.
+ *
+ * Author: Colin Foster <colin.foster@in-advantage.com>
+ * Copyright (C) 2021 Innovative Advantage
+ */
+#ifndef MDIO_MSCC_MIIM_H
+#define MDIO_MSCC_MIIM_H
+
+#include <linux/device.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
+
+int mscc_miim_setup(struct device *device, struct mii_bus **bus,
+ const char *name, struct regmap *mii_regmap,
+ int status_offset, bool ignore_read_errors);
+
+#endif
diff --git a/include/linux/mdio/mdio-regmap.h b/include/linux/mdio/mdio-regmap.h
new file mode 100644
index 000000000000..679d9069846b
--- /dev/null
+++ b/include/linux/mdio/mdio-regmap.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Driver for MMIO-Mapped MDIO devices. Some IPs expose internal PHYs or PCS
+ * within the MMIO-mapped area
+ *
+ * Copyright (C) 2023 Maxime Chevallier <maxime.chevallier@bootlin.com>
+ */
+#ifndef MDIO_REGMAP_H
+#define MDIO_REGMAP_H
+
+#include <linux/phy.h>
+
+struct device;
+struct regmap;
+
+struct mdio_regmap_config {
+ struct device *parent;
+ struct regmap *regmap;
+ char name[MII_BUS_ID_SIZE];
+ u8 valid_addr;
+ bool autoscan;
+};
+
+struct mii_bus *devm_mdio_regmap_register(struct device *dev,
+ const struct mdio_regmap_config *config);
+
+#endif
diff --git a/include/linux/mdio/mdio-xgene.h b/include/linux/mdio/mdio-xgene.h
index 8af93ada8b64..9e588965dc83 100644
--- a/include/linux/mdio/mdio-xgene.h
+++ b/include/linux/mdio/mdio-xgene.h
@@ -8,6 +8,10 @@
#ifndef __MDIO_XGENE_H__
#define __MDIO_XGENE_H__
+#include <linux/bits.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
#define BLOCK_XG_MDIO_CSR_OFFSET 0x5000
#define BLOCK_DIAG_CSR_OFFSET 0xd000
#define XGENET_CONFIG_REG_ADDR 0x20
diff --git a/include/linux/mei_aux.h b/include/linux/mei_aux.h
new file mode 100644
index 000000000000..506912ad363b
--- /dev/null
+++ b/include/linux/mei_aux.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022, Intel Corporation. All rights reserved.
+ */
+#ifndef _LINUX_MEI_AUX_H
+#define _LINUX_MEI_AUX_H
+
+#include <linux/auxiliary_bus.h>
+
+/**
+ * struct mei_aux_device - mei auxiliary device
+ * @aux_dev: - auxiliary device object
+ * @irq: interrupt driving the mei auxiliary device
+ * @bar: mmio resource bar reserved to mei auxiliary device
+ * @ext_op_mem: resource for extend operational memory
+ * used in graphics PXP mode.
+ * @slow_firmware: The device has slow underlying firmware.
+ * Such firmware will require to use larger operation timeouts.
+ */
+struct mei_aux_device {
+ struct auxiliary_device aux_dev;
+ int irq;
+ struct resource bar;
+ struct resource ext_op_mem;
+ bool slow_firmware;
+};
+
+#define auxiliary_dev_to_mei_aux_dev(auxiliary_dev) \
+ container_of(auxiliary_dev, struct mei_aux_device, aux_dev)
+
+#endif /* _LINUX_MEI_AUX_H */
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index 07f5ef8fc456..a82755e1fc40 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -11,6 +11,7 @@
struct mei_cl_device;
struct mei_device;
+struct scatterlist;
typedef void (*mei_cldev_cb_t)(struct mei_cl_device *cldev);
@@ -30,11 +31,11 @@ typedef void (*mei_cldev_cb_t)(struct mei_cl_device *cldev);
* @rx_work: async work to execute Rx event callback
* @rx_cb: Drivers register this callback to get asynchronous ME
* Rx buffer pending notifications.
- * @notif_work: async work to execute FW notif event callback
+ * @notif_work: async work to execute FW notify event callback
* @notif_cb: Drivers register this callback to get asynchronous ME
* FW notification pending notifications.
*
- * @do_match: wheather device can be matched with a driver
+ * @do_match: whether the device can be matched with a driver
* @is_added: device is already scanned
* @priv_data: client private data
*/
@@ -91,29 +92,42 @@ void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv);
mei_cldev_driver_register,\
mei_cldev_driver_unregister)
-ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length);
+ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf,
+ size_t length);
+ssize_t mei_cldev_send_timeout(struct mei_cl_device *cldev, const u8 *buf,
+ size_t length, unsigned long timeout);
ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length);
-ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
- size_t length);
-ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length,
- u8 vtag);
+ssize_t mei_cldev_recv_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length,
+ unsigned long timeout);
+ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf,
+ size_t length, u8 vtag);
+ssize_t mei_cldev_send_vtag_timeout(struct mei_cl_device *cldev, const u8 *buf,
+ size_t length, u8 vtag, unsigned long timeout);
ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length,
u8 *vtag);
-ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf,
- size_t length, u8 *vtag);
+ssize_t mei_cldev_recv_vtag_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length,
+ u8 *vtag, unsigned long timeout);
int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb);
int mei_cldev_register_notif_cb(struct mei_cl_device *cldev,
mei_cldev_cb_t notif_cb);
-const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev);
u8 mei_cldev_ver(const struct mei_cl_device *cldev);
+size_t mei_cldev_mtu(const struct mei_cl_device *cldev);
void *mei_cldev_get_drvdata(const struct mei_cl_device *cldev);
void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data);
int mei_cldev_enable(struct mei_cl_device *cldev);
int mei_cldev_disable(struct mei_cl_device *cldev);
-bool mei_cldev_enabled(struct mei_cl_device *cldev);
+bool mei_cldev_enabled(const struct mei_cl_device *cldev);
+ssize_t mei_cldev_send_gsc_command(struct mei_cl_device *cldev,
+ u8 client_id, u32 fence_id,
+ struct scatterlist *sg_in,
+ size_t total_in_len,
+ struct scatterlist *sg_out);
+
+void *mei_cldev_dma_map(struct mei_cl_device *cldev, u8 buffer_id, size_t size);
+int mei_cldev_dma_unmap(struct mei_cl_device *cldev);
#endif /* _LINUX_MEI_CL_BUS_H */
diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h
index 5c4a18a91f89..07584c5e36fb 100644
--- a/include/linux/mem_encrypt.h
+++ b/include/linux/mem_encrypt.h
@@ -16,10 +16,6 @@
#include <asm/mem_encrypt.h>
-#else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */
-
-static inline bool mem_encrypt_active(void) { return false; }
-
#endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */
#ifdef CONFIG_AMD_MEM_ENCRYPT
@@ -30,11 +26,34 @@ static inline bool mem_encrypt_active(void) { return false; }
*/
#define __sme_set(x) ((x) | sme_me_mask)
#define __sme_clr(x) ((x) & ~sme_me_mask)
+
+#define dma_addr_encrypted(x) __sme_set(x)
+#define dma_addr_canonical(x) __sme_clr(x)
+
#else
#define __sme_set(x) (x)
#define __sme_clr(x) (x)
#endif
+/*
+ * dma_addr_encrypted() and dma_addr_unencrypted() are for converting a given DMA
+ * address to the respective type of addressing.
+ *
+ * dma_addr_canonical() is used to reverse any conversions for encrypted/decrypted
+ * back to the canonical address.
+ */
+#ifndef dma_addr_encrypted
+#define dma_addr_encrypted(x) (x)
+#endif
+
+#ifndef dma_addr_unencrypted
+#define dma_addr_unencrypted(x) (x)
+#endif
+
+#ifndef dma_addr_canonical
+#define dma_addr_canonical(x) (x)
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* __MEM_ENCRYPT_H__ */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 5984fff3f175..221118b5a16e 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _LINUX_MEMBLOCK_H
#define _LINUX_MEMBLOCK_H
-#ifdef __KERNEL__
/*
* Logical memory blocks.
@@ -28,15 +27,40 @@ extern unsigned long long max_possible_pfn;
/**
* enum memblock_flags - definition of memory region attributes
* @MEMBLOCK_NONE: no special request
- * @MEMBLOCK_HOTPLUG: hotpluggable region
+ * @MEMBLOCK_HOTPLUG: memory region indicated in the firmware-provided memory
+ * map during early boot as hot(un)pluggable system RAM (e.g., memory range
+ * that might get hotunplugged later). With "movable_node" set on the kernel
+ * commandline, try keeping this memory region hotunpluggable. Does not apply
+ * to memblocks added ("hotplugged") after early boot.
* @MEMBLOCK_MIRROR: mirrored region
- * @MEMBLOCK_NOMAP: don't add to kernel direct mapping
+ * @MEMBLOCK_NOMAP: don't add to kernel direct mapping and treat as
+ * reserved in the memory map; refer to memblock_mark_nomap() description
+ * for further details
+ * @MEMBLOCK_DRIVER_MANAGED: memory region that is always detected and added
+ * via a driver, and never indicated in the firmware-provided memory map as
+ * system RAM. This corresponds to IORESOURCE_SYSRAM_DRIVER_MANAGED in the
+ * kernel resource tree.
+ * @MEMBLOCK_RSRV_NOINIT: reserved memory region for which struct pages are not
+ * fully initialized. Users of this flag are responsible to properly initialize
+ * struct pages of this region
+ * @MEMBLOCK_RSRV_KERN: memory region that is reserved for kernel use,
+ * either explictitly with memblock_reserve_kern() or via memblock
+ * allocation APIs. All memblock allocations set this flag.
+ * @MEMBLOCK_KHO_SCRATCH: memory region that kexec can pass to the next
+ * kernel in handover mode. During early boot, we do not know about all
+ * memory reservations yet, so we get scratch memory from the previous
+ * kernel that we know is good to use. It is the only memory that
+ * allocations may happen from in this phase.
*/
enum memblock_flags {
MEMBLOCK_NONE = 0x0, /* No special request */
MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
MEMBLOCK_MIRROR = 0x2, /* mirrored region */
MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */
+ MEMBLOCK_DRIVER_MANAGED = 0x8, /* always detected via a driver */
+ MEMBLOCK_RSRV_NOINIT = 0x10, /* don't initialize struct pages */
+ MEMBLOCK_RSRV_KERN = 0x20, /* memory reserved for kernel use */
+ MEMBLOCK_KHO_SCRATCH = 0x40, /* scratch memory for kexec handover */
};
/**
@@ -50,7 +74,7 @@ struct memblock_region {
phys_addr_t base;
phys_addr_t size;
enum memblock_flags flags;
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
int nid;
#endif
};
@@ -97,28 +121,44 @@ void memblock_discard(void);
static inline void memblock_discard(void) {}
#endif
-phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
- phys_addr_t size, phys_addr_t align);
void memblock_allow_resize(void);
-int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
+int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid,
+ enum memblock_flags flags);
int memblock_add(phys_addr_t base, phys_addr_t size);
int memblock_remove(phys_addr_t base, phys_addr_t size);
-int memblock_free(phys_addr_t base, phys_addr_t size);
-int memblock_reserve(phys_addr_t base, phys_addr_t size);
+int memblock_phys_free(phys_addr_t base, phys_addr_t size);
+int __memblock_reserve(phys_addr_t base, phys_addr_t size, int nid,
+ enum memblock_flags flags);
+
+static __always_inline int memblock_reserve(phys_addr_t base, phys_addr_t size)
+{
+ return __memblock_reserve(base, size, NUMA_NO_NODE, 0);
+}
+
+static __always_inline int memblock_reserve_kern(phys_addr_t base, phys_addr_t size)
+{
+ return __memblock_reserve(base, size, NUMA_NO_NODE, MEMBLOCK_RSRV_KERN);
+}
+
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
#endif
void memblock_trim_memory(phys_addr_t align);
+unsigned long memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
+ phys_addr_t base2, phys_addr_t size2);
bool memblock_overlaps_region(struct memblock_type *type,
phys_addr_t base, phys_addr_t size);
+bool memblock_validate_numa_coverage(unsigned long threshold_bytes);
int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
+int memblock_reserved_mark_noinit(phys_addr_t base, phys_addr_t size);
+int memblock_mark_kho_scratch(phys_addr_t base, phys_addr_t size);
+int memblock_clear_kho_scratch(phys_addr_t base, phys_addr_t size);
-void memblock_free_all(void);
-void reset_node_managed_pages(pg_data_t *pgdat);
+void memblock_free(void *ptr, size_t size);
void reset_all_zones_managed_pages(void);
/* Low level functions */
@@ -132,7 +172,7 @@ void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
struct memblock_type *type_b, phys_addr_t *out_start,
phys_addr_t *out_end, int *out_nid);
-void __memblock_free_late(phys_addr_t base, phys_addr_t size);
+void memblock_free_late(phys_addr_t base, phys_addr_t size);
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
@@ -207,7 +247,8 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
*/
#define for_each_mem_range(i, p_start, p_end) \
__for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, \
- MEMBLOCK_NONE, p_start, p_end, NULL)
+ MEMBLOCK_HOTPLUG | MEMBLOCK_DRIVER_MANAGED, \
+ p_start, p_end, NULL)
/**
* for_each_mem_range_rev - reverse iterate through memblock areas from
@@ -218,7 +259,8 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
*/
#define for_each_mem_range_rev(i, p_start, p_end) \
__for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \
- MEMBLOCK_NONE, p_start, p_end, NULL)
+ MEMBLOCK_HOTPLUG | MEMBLOCK_DRIVER_MANAGED,\
+ p_start, p_end, NULL)
/**
* for_each_reserved_mem_range - iterate over all reserved memblock areas
@@ -248,6 +290,21 @@ static inline bool memblock_is_nomap(struct memblock_region *m)
return m->flags & MEMBLOCK_NOMAP;
}
+static inline bool memblock_is_reserved_noinit(struct memblock_region *m)
+{
+ return m->flags & MEMBLOCK_RSRV_NOINIT;
+}
+
+static inline bool memblock_is_driver_managed(struct memblock_region *m)
+{
+ return m->flags & MEMBLOCK_DRIVER_MANAGED;
+}
+
+static inline bool memblock_is_kho_scratch(struct memblock_region *m)
+{
+ return m->flags & MEMBLOCK_KHO_SCRATCH;
+}
+
int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
unsigned long *end_pfn);
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
@@ -267,49 +324,6 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
-#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
-void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
- unsigned long *out_spfn,
- unsigned long *out_epfn);
-/**
- * for_each_free_mem_pfn_range_in_zone - iterate through zone specific free
- * memblock areas
- * @i: u64 used as loop variable
- * @zone: zone in which all of the memory blocks reside
- * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
- * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
- *
- * Walks over free (memory && !reserved) areas of memblock in a specific
- * zone. Available once memblock and an empty zone is initialized. The main
- * assumption is that the zone start, end, and pgdat have been associated.
- * This way we can use the zone to determine NUMA node, and if a given part
- * of the memblock is valid for the zone.
- */
-#define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end) \
- for (i = 0, \
- __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end); \
- i != U64_MAX; \
- __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
-
-/**
- * for_each_free_mem_pfn_range_in_zone_from - iterate through zone specific
- * free memblock areas from a given point
- * @i: u64 used as loop variable
- * @zone: zone in which all of the memory blocks reside
- * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
- * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
- *
- * Walks over free (memory && !reserved) areas of memblock in a specific
- * zone, continuing from current position. Available as soon as memblock is
- * initialized.
- */
-#define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \
- for (; i != U64_MAX; \
- __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
-
-int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask);
-
-#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
/**
* for_each_free_mem_range - iterate through free memblock areas
@@ -347,7 +361,7 @@ int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask);
int memblock_set_node(phys_addr_t base, phys_addr_t size,
struct memblock_type *type, int nid);
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
static inline void memblock_set_region_node(struct memblock_region *r, int nid)
{
r->nid = nid;
@@ -366,12 +380,16 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
{
return 0;
}
-#endif /* CONFIG_NEED_MULTIPLE_NODES */
+#endif /* CONFIG_NUMA */
/* Flags for memblock allocation APIs */
#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
#define MEMBLOCK_ALLOC_ACCESSIBLE 0
-#define MEMBLOCK_ALLOC_KASAN 1
+/*
+ * MEMBLOCK_ALLOC_NOLEAKTRACE avoids kmemleak tracing. It implies
+ * MEMBLOCK_ALLOC_ACCESSIBLE
+ */
+#define MEMBLOCK_ALLOC_NOLEAKTRACE 1
/* We are using top down, so it is safe to use 0 here */
#define MEMBLOCK_LOW_LIMIT 0
@@ -387,8 +405,8 @@ phys_addr_t memblock_alloc_range_nid(phys_addr_t size,
phys_addr_t end, int nid, bool exact_nid);
phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
-static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
- phys_addr_t align)
+static __always_inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
+ phys_addr_t align)
{
return memblock_phys_alloc_range(size, align, 0,
MEMBLOCK_ALLOC_ACCESSIBLE);
@@ -410,6 +428,12 @@ static __always_inline void *memblock_alloc(phys_addr_t size, phys_addr_t align)
MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
}
+void *__memblock_alloc_or_panic(phys_addr_t size, phys_addr_t align,
+ const char *func);
+
+#define memblock_alloc_or_panic(size, align) \
+ __memblock_alloc_or_panic(size, align, __func__)
+
static inline void *memblock_alloc_raw(phys_addr_t size,
phys_addr_t align)
{
@@ -418,7 +442,7 @@ static inline void *memblock_alloc_raw(phys_addr_t size,
NUMA_NO_NODE);
}
-static inline void *memblock_alloc_from(phys_addr_t size,
+static __always_inline void *memblock_alloc_from(phys_addr_t size,
phys_addr_t align,
phys_addr_t min_addr)
{
@@ -440,23 +464,6 @@ static inline void *memblock_alloc_node(phys_addr_t size,
MEMBLOCK_ALLOC_ACCESSIBLE, nid);
}
-static inline void memblock_free_early(phys_addr_t base,
- phys_addr_t size)
-{
- memblock_free(base, size);
-}
-
-static inline void memblock_free_early_nid(phys_addr_t base,
- phys_addr_t size, int nid)
-{
- memblock_free(base, size);
-}
-
-static inline void memblock_free_late(phys_addr_t base, phys_addr_t size)
-{
- __memblock_free_late(base, size);
-}
-
/*
* Set the allocation direction to bottom-up or top-down.
*/
@@ -477,6 +484,8 @@ static inline __init_memblock bool memblock_bottom_up(void)
phys_addr_t memblock_phys_mem_size(void);
phys_addr_t memblock_reserved_size(void);
+phys_addr_t memblock_reserved_kern_size(phys_addr_t limit, int nid);
+unsigned long memblock_estimated_nr_free_pages(void);
phys_addr_t memblock_start_of_DRAM(void);
phys_addr_t memblock_end_of_DRAM(void);
void memblock_enforce_memory_limit(phys_addr_t memory_limit);
@@ -554,7 +563,7 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo
}
/**
- * for_each_mem_region - itereate over memory regions
+ * for_each_mem_region - iterate over memory regions
* @region: loop variable
*/
#define for_each_mem_region(region) \
@@ -582,9 +591,7 @@ extern void *alloc_large_system_hash(const char *tablename,
unsigned long high_limit);
#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
-#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min
- * shift passed via *_hash_shift */
-#define HASH_ZERO 0x00000004 /* Zero allocated hash table */
+#define HASH_ZERO 0x00000002 /* Zero allocated hash table */
/* Only NUMA needs hash distribution. 64bit NUMA architectures have
* sufficient vmalloc space.
@@ -597,13 +604,21 @@ extern int hashdist; /* Distribute hashes across NUMA nodes? */
#endif
#ifdef CONFIG_MEMTEST
-extern void early_memtest(phys_addr_t start, phys_addr_t end);
+void early_memtest(phys_addr_t start, phys_addr_t end);
+void memtest_report_meminfo(struct seq_file *m);
#else
-static inline void early_memtest(phys_addr_t start, phys_addr_t end)
-{
-}
+static inline void early_memtest(phys_addr_t start, phys_addr_t end) { }
+static inline void memtest_report_meminfo(struct seq_file *m) { }
#endif
-#endif /* __KERNEL__ */
+#ifdef CONFIG_MEMBLOCK_KHO_SCRATCH
+void memblock_set_kho_scratch_only(void);
+void memblock_clear_kho_scratch_only(void);
+void memmap_init_kho_scratch_pages(void);
+#else
+static inline void memblock_set_kho_scratch_only(void) { }
+static inline void memblock_clear_kho_scratch_only(void) { }
+static inline void memmap_init_kho_scratch_pages(void) {}
+#endif
#endif /* _LINUX_MEMBLOCK_H */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index c193be760709..0651865a4564 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -14,6 +14,7 @@
#include <linux/vm_event_item.h>
#include <linux/hardirq.h>
#include <linux/jump_label.h>
+#include <linux/kernel.h>
#include <linux/page_counter.h>
#include <linux/vmpressure.h>
#include <linux/eventfd.h>
@@ -21,6 +22,7 @@
#include <linux/vmstat.h>
#include <linux/writeback.h>
#include <linux/page-flags.h>
+#include <linux/shrinker.h>
struct mem_cgroup;
struct obj_cgroup;
@@ -33,6 +35,10 @@ enum memcg_stat_item {
MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
MEMCG_SOCK,
MEMCG_PERCPU_B,
+ MEMCG_VMALLOC,
+ MEMCG_KMEM,
+ MEMCG_ZSWAP_B,
+ MEMCG_ZSWAPPED,
MEMCG_NR_STAT,
};
@@ -42,118 +48,78 @@ enum memcg_memory_event {
MEMCG_MAX,
MEMCG_OOM,
MEMCG_OOM_KILL,
+ MEMCG_OOM_GROUP_KILL,
MEMCG_SWAP_HIGH,
MEMCG_SWAP_MAX,
MEMCG_SWAP_FAIL,
+ MEMCG_SOCK_THROTTLED,
MEMCG_NR_MEMORY_EVENTS,
};
struct mem_cgroup_reclaim_cookie {
pg_data_t *pgdat;
- unsigned int generation;
+ int generation;
};
#ifdef CONFIG_MEMCG
#define MEM_CGROUP_ID_SHIFT 16
-#define MEM_CGROUP_ID_MAX USHRT_MAX
struct mem_cgroup_id {
int id;
refcount_t ref;
};
-/*
- * Per memcg event counter is incremented at every pagein/pageout. With THP,
- * it will be incremented by the number of pages. This counter is used
- * to trigger some periodic events. This is straightforward and better
- * than using jiffies etc. to handle periodic memcg event.
- */
-enum mem_cgroup_events_target {
- MEM_CGROUP_TARGET_THRESH,
- MEM_CGROUP_TARGET_SOFTLIMIT,
- MEM_CGROUP_NTARGETS,
-};
-
-struct memcg_vmstats_percpu {
- /* Local (CPU and cgroup) page state & events */
- long state[MEMCG_NR_STAT];
- unsigned long events[NR_VM_EVENT_ITEMS];
-
- /* Delta calculation for lockless upward propagation */
- long state_prev[MEMCG_NR_STAT];
- unsigned long events_prev[NR_VM_EVENT_ITEMS];
-
- /* Cgroup1: threshold notifications & softlimit tree updates */
- unsigned long nr_page_events;
- unsigned long targets[MEM_CGROUP_NTARGETS];
-};
-
-struct memcg_vmstats {
- /* Aggregated (CPU and subtree) page state & events */
- long state[MEMCG_NR_STAT];
- unsigned long events[NR_VM_EVENT_ITEMS];
-
- /* Pending child counts during tree propagation */
- long state_pending[MEMCG_NR_STAT];
- unsigned long events_pending[NR_VM_EVENT_ITEMS];
-};
+struct memcg_vmstats_percpu;
+struct memcg1_events_percpu;
+struct memcg_vmstats;
+struct lruvec_stats_percpu;
+struct lruvec_stats;
struct mem_cgroup_reclaim_iter {
struct mem_cgroup *position;
/* scan generation, increased every round-trip */
- unsigned int generation;
-};
-
-struct lruvec_stat {
- long count[NR_VM_NODE_STAT_ITEMS];
-};
-
-struct batched_lruvec_stat {
- s32 count[NR_VM_NODE_STAT_ITEMS];
-};
-
-/*
- * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
- * shrinkers, which have elements charged to this memcg.
- */
-struct shrinker_info {
- struct rcu_head rcu;
- atomic_long_t *nr_deferred;
- unsigned long *map;
+ atomic_t generation;
};
/*
* per-node information in memory controller.
*/
struct mem_cgroup_per_node {
- struct lruvec lruvec;
+ /* Keep the read-only fields at the start */
+ struct mem_cgroup *memcg; /* Back pointer, we cannot */
+ /* use container_of */
+ struct lruvec_stats_percpu __percpu *lruvec_stats_percpu;
+ struct lruvec_stats *lruvec_stats;
+ struct shrinker_info __rcu *shrinker_info;
+
+#ifdef CONFIG_MEMCG_V1
/*
- * Legacy local VM stats. This should be struct lruvec_stat and
- * cannot be optimized to struct batched_lruvec_stat. Because
- * the threshold of the lruvec_stat_cpu can be as big as
- * MEMCG_CHARGE_BATCH * PAGE_SIZE. It can fit into s32. But this
- * filed has no upper limit.
+ * Memcg-v1 only stuff in middle as buffer between read mostly fields
+ * and update often fields to avoid false sharing. If v1 stuff is
+ * not present, an explicit padding is needed.
*/
- struct lruvec_stat __percpu *lruvec_stat_local;
-
- /* Subtree VM stats (batched updates) */
- struct batched_lruvec_stat __percpu *lruvec_stat_cpu;
- atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS];
-
- unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
-
- struct mem_cgroup_reclaim_iter iter;
-
- struct shrinker_info __rcu *shrinker_info;
struct rb_node tree_node; /* RB tree node */
unsigned long usage_in_excess;/* Set to the value by which */
/* the soft limit is exceeded*/
bool on_tree;
- struct mem_cgroup *memcg; /* Back pointer, we cannot */
- /* use container_of */
+#else
+ CACHELINE_PADDING(_pad1_);
+#endif
+
+ /* Fields which get updated often at the end. */
+ struct lruvec lruvec;
+ CACHELINE_PADDING(_pad2_);
+ unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
+ struct mem_cgroup_reclaim_iter iter;
+
+#ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC
+ /* slab stats for nmi context */
+ atomic_t slab_reclaimable;
+ atomic_t slab_unreclaimable;
+#endif
};
struct mem_cgroup_threshold {
@@ -168,7 +134,7 @@ struct mem_cgroup_threshold_ary {
/* Size of entries[] */
unsigned int size;
/* Array of thresholds */
- struct mem_cgroup_threshold entries[];
+ struct mem_cgroup_threshold entries[] __counted_by(size);
};
struct mem_cgroup_thresholds {
@@ -182,21 +148,6 @@ struct mem_cgroup_thresholds {
struct mem_cgroup_threshold_ary *spare;
};
-enum memcg_kmem_state {
- KMEM_NONE,
- KMEM_ALLOCATED,
- KMEM_ONLINE,
-};
-
-#if defined(CONFIG_SMP)
-struct memcg_padding {
- char x[0];
-} ____cacheline_internodealigned_in_smp;
-#define MEMCG_PADDING(name) struct memcg_padding name;
-#else
-#define MEMCG_PADDING(name)
-#endif
-
/*
* Remember four most recent foreign writebacks with dirty pages in this
* cgroup. Inode sharing is expected to be uncommon and, even if we miss
@@ -225,7 +176,7 @@ struct obj_cgroup {
struct mem_cgroup *memcg;
atomic_t nr_charged_bytes;
union {
- struct list_head list;
+ struct list_head list; /* protected by objcg_lock */
struct rcu_head rcu;
};
};
@@ -250,14 +201,23 @@ struct mem_cgroup {
struct page_counter memsw; /* v1 only */
};
- /* Legacy consumer-oriented counters */
- struct page_counter kmem; /* v1 only */
- struct page_counter tcpmem; /* v1 only */
+ /* registered local peak watchers */
+ struct list_head memory_peaks;
+ struct list_head swap_peaks;
+ spinlock_t peaks_lock;
/* Range enforcement for interrupt charges */
struct work_struct high_work;
- unsigned long soft_limit;
+#ifdef CONFIG_ZSWAP
+ unsigned long zswap_max;
+
+ /*
+ * Prevent pages from this memcg from being written back from zswap to
+ * swap, and from being swapped out on zswap store failures.
+ */
+ bool zswap_writeback;
+#endif
/* vmpressure notifications */
struct vmpressure vmpressure;
@@ -267,13 +227,7 @@ struct mem_cgroup {
*/
bool oom_group;
- /* protected by memcg_oom_lock */
- bool oom_lock;
- int under_oom;
-
- int swappiness;
- /* OOM-Killer disable */
- int oom_kill_disable;
+ int swappiness;
/* memory.events and memory.events.local */
struct cgroup_file events_file;
@@ -282,56 +236,36 @@ struct mem_cgroup {
/* handle for "memory.swap.events" */
struct cgroup_file swap_events_file;
- /* protect arrays of thresholds */
- struct mutex thresholds_lock;
-
- /* thresholds for memory usage. RCU-protected */
- struct mem_cgroup_thresholds thresholds;
-
- /* thresholds for mem+swap usage. RCU-protected */
- struct mem_cgroup_thresholds memsw_thresholds;
-
- /* For oom notifier event fd */
- struct list_head oom_notify;
-
- /*
- * Should we move charges of a task when a task is moved into this
- * mem_cgroup ? And what type of charges should we move ?
- */
- unsigned long move_charge_at_immigrate;
- /* taken only while moving_account > 0 */
- spinlock_t move_lock;
- unsigned long move_lock_flags;
-
- MEMCG_PADDING(_pad1_);
-
/* memory.stat */
- struct memcg_vmstats vmstats;
+ struct memcg_vmstats *vmstats;
/* memory.events */
atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS];
- unsigned long socket_pressure;
-
- /* Legacy tcp memory accounting */
- bool tcpmem_active;
- int tcpmem_pressure;
-
-#ifdef CONFIG_MEMCG_KMEM
- int kmemcg_id;
- enum memcg_kmem_state kmem_state;
- struct obj_cgroup __rcu *objcg;
- struct list_head objcg_list; /* list of inherited objcgs */
+#ifdef CONFIG_MEMCG_NMI_SAFETY_REQUIRES_ATOMIC
+ /* MEMCG_KMEM for nmi context */
+ atomic_t kmem_stat;
#endif
-
- MEMCG_PADDING(_pad2_);
-
/*
- * set > 0 if pages under this cgroup are moving to other cgroup.
+ * Hint of reclaim pressure for socket memroy management. Note
+ * that this indicator should NOT be used in legacy cgroup mode
+ * where socket memory is accounted/charged separately.
+ */
+ u64 socket_pressure;
+#if BITS_PER_LONG < 64
+ seqlock_t socket_pressure_seqlock;
+#endif
+ int kmemcg_id;
+ /*
+ * memcg->objcg is wiped out as a part of the objcg repaprenting
+ * process. memcg->orig_objcg preserves a pointer (and a reference)
+ * to the original objcg until the end of live of memcg.
*/
- atomic_t moving_account;
- struct task_struct *move_lock_task;
+ struct obj_cgroup __rcu *objcg;
+ struct obj_cgroup *orig_objcg;
+ /* list of inherited objcgs, protected by objcg_lock */
+ struct list_head objcg_list;
struct memcg_vmstats_percpu __percpu *vmstats_percpu;
@@ -341,266 +275,273 @@ struct mem_cgroup {
struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
#endif
- /* List of events which userspace want to receive */
- struct list_head event_list;
- spinlock_t event_list_lock;
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
struct deferred_split deferred_split_queue;
#endif
- struct mem_cgroup_per_node *nodeinfo[0];
- /* WARNING: nodeinfo must be the last member here */
+#ifdef CONFIG_LRU_GEN_WALKS_MMU
+ /* per-memcg mm_struct list */
+ struct lru_gen_mm_list mm_list;
+#endif
+
+#ifdef CONFIG_MEMCG_V1
+ /* Legacy consumer-oriented counters */
+ struct page_counter kmem; /* v1 only */
+ struct page_counter tcpmem; /* v1 only */
+
+ struct memcg1_events_percpu __percpu *events_percpu;
+
+ unsigned long soft_limit;
+
+ /* protected by memcg_oom_lock */
+ bool oom_lock;
+ int under_oom;
+
+ /* OOM-Killer disable */
+ int oom_kill_disable;
+
+ /* protect arrays of thresholds */
+ struct mutex thresholds_lock;
+
+ /* thresholds for memory usage. RCU-protected */
+ struct mem_cgroup_thresholds thresholds;
+
+ /* thresholds for mem+swap usage. RCU-protected */
+ struct mem_cgroup_thresholds memsw_thresholds;
+
+ /* For oom notifier event fd */
+ struct list_head oom_notify;
+
+ /* Legacy tcp memory accounting */
+ bool tcpmem_active;
+ int tcpmem_pressure;
+
+ /* List of events which userspace want to receive */
+ struct list_head event_list;
+ spinlock_t event_list_lock;
+#endif /* CONFIG_MEMCG_V1 */
+
+ struct mem_cgroup_per_node *nodeinfo[];
};
/*
- * size of first charge trial. "32" comes from vmscan.c's magic value.
- * TODO: maybe necessary to use big numbers in big irons.
+ * size of first charge trial.
+ * TODO: maybe necessary to use big numbers in big irons or dynamic based of the
+ * workload.
*/
-#define MEMCG_CHARGE_BATCH 32U
+#define MEMCG_CHARGE_BATCH 64U
extern struct mem_cgroup *root_mem_cgroup;
enum page_memcg_data_flags {
- /* page->memcg_data is a pointer to an objcgs vector */
- MEMCG_DATA_OBJCGS = (1UL << 0),
+ /* page->memcg_data is a pointer to an slabobj_ext vector */
+ MEMCG_DATA_OBJEXTS = (1UL << 0),
/* page has been accounted as a non-slab kernel page */
MEMCG_DATA_KMEM = (1UL << 1),
/* the next bit after the last actual flag */
__NR_MEMCG_DATA_FLAGS = (1UL << 2),
};
-#define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1)
+#define __OBJEXTS_ALLOC_FAIL MEMCG_DATA_OBJEXTS
+#define __FIRST_OBJEXT_FLAG __NR_MEMCG_DATA_FLAGS
+
+#else /* CONFIG_MEMCG */
+
+#define __OBJEXTS_ALLOC_FAIL (1UL << 0)
+#define __FIRST_OBJEXT_FLAG (1UL << 0)
-static inline bool PageMemcgKmem(struct page *page);
+#endif /* CONFIG_MEMCG */
+
+enum objext_flags {
+ /*
+ * Use bit 0 with zero other bits to signal that slabobj_ext vector
+ * failed to allocate. The same bit 0 with valid upper bits means
+ * MEMCG_DATA_OBJEXTS.
+ */
+ OBJEXTS_ALLOC_FAIL = __OBJEXTS_ALLOC_FAIL,
+ /* slabobj_ext vector allocated with kmalloc_nolock() */
+ OBJEXTS_NOSPIN_ALLOC = __FIRST_OBJEXT_FLAG,
+ /* the next bit after the last actual flag */
+ __NR_OBJEXTS_FLAGS = (__FIRST_OBJEXT_FLAG << 1),
+};
+
+#define OBJEXTS_FLAGS_MASK (__NR_OBJEXTS_FLAGS - 1)
+
+#ifdef CONFIG_MEMCG
+
+static inline bool folio_memcg_kmem(struct folio *folio);
/*
* After the initialization objcg->memcg is always pointing at
* a valid memcg, but can be atomically swapped to the parent memcg.
*
- * The caller must ensure that the returned memcg won't be released:
- * e.g. acquire the rcu_read_lock or css_set_lock.
+ * The caller must ensure that the returned memcg won't be released.
*/
static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
{
+ lockdep_assert_once(rcu_read_lock_held() || lockdep_is_held(&cgroup_mutex));
return READ_ONCE(objcg->memcg);
}
/*
- * __page_memcg - get the memory cgroup associated with a non-kmem page
- * @page: a pointer to the page struct
+ * __folio_memcg - Get the memory cgroup associated with a non-kmem folio
+ * @folio: Pointer to the folio.
*
- * Returns a pointer to the memory cgroup associated with the page,
- * or NULL. This function assumes that the page is known to have a
+ * Returns a pointer to the memory cgroup associated with the folio,
+ * or NULL. This function assumes that the folio is known to have a
* proper memory cgroup pointer. It's not safe to call this function
- * against some type of pages, e.g. slab pages or ex-slab pages or
- * kmem pages.
+ * against some type of folios, e.g. slab folios or ex-slab folios or
+ * kmem folios.
*/
-static inline struct mem_cgroup *__page_memcg(struct page *page)
+static inline struct mem_cgroup *__folio_memcg(struct folio *folio)
{
- unsigned long memcg_data = page->memcg_data;
+ unsigned long memcg_data = folio->memcg_data;
- VM_BUG_ON_PAGE(PageSlab(page), page);
- VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_OBJCGS, page);
- VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page);
+ VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
+ VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJEXTS, folio);
+ VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio);
- return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
+ return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK);
}
/*
- * __page_objcg - get the object cgroup associated with a kmem page
- * @page: a pointer to the page struct
+ * __folio_objcg - get the object cgroup associated with a kmem folio.
+ * @folio: Pointer to the folio.
*
- * Returns a pointer to the object cgroup associated with the page,
- * or NULL. This function assumes that the page is known to have a
+ * Returns a pointer to the object cgroup associated with the folio,
+ * or NULL. This function assumes that the folio is known to have a
* proper object cgroup pointer. It's not safe to call this function
- * against some type of pages, e.g. slab pages or ex-slab pages or
- * LRU pages.
+ * against some type of folios, e.g. slab folios or ex-slab folios or
+ * LRU folios.
*/
-static inline struct obj_cgroup *__page_objcg(struct page *page)
+static inline struct obj_cgroup *__folio_objcg(struct folio *folio)
{
- unsigned long memcg_data = page->memcg_data;
+ unsigned long memcg_data = folio->memcg_data;
- VM_BUG_ON_PAGE(PageSlab(page), page);
- VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_OBJCGS, page);
- VM_BUG_ON_PAGE(!(memcg_data & MEMCG_DATA_KMEM), page);
+ VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
+ VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJEXTS, folio);
+ VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio);
- return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
+ return (struct obj_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK);
}
/*
- * page_memcg - get the memory cgroup associated with a page
- * @page: a pointer to the page struct
+ * folio_memcg - Get the memory cgroup associated with a folio.
+ * @folio: Pointer to the folio.
*
- * Returns a pointer to the memory cgroup associated with the page,
- * or NULL. This function assumes that the page is known to have a
+ * Returns a pointer to the memory cgroup associated with the folio,
+ * or NULL. This function assumes that the folio is known to have a
* proper memory cgroup pointer. It's not safe to call this function
- * against some type of pages, e.g. slab pages or ex-slab pages.
+ * against some type of folios, e.g. slab folios or ex-slab folios.
*
- * For a non-kmem page any of the following ensures page and memcg binding
+ * For a non-kmem folio any of the following ensures folio and memcg binding
* stability:
*
- * - the page lock
+ * - the folio lock
* - LRU isolation
- * - lock_page_memcg()
* - exclusive reference
*
- * For a kmem page a caller should hold an rcu read lock to protect memcg
- * associated with a kmem page from being released.
+ * For a kmem folio a caller should hold an rcu read lock to protect memcg
+ * associated with a kmem folio from being released.
*/
-static inline struct mem_cgroup *page_memcg(struct page *page)
+static inline struct mem_cgroup *folio_memcg(struct folio *folio)
{
- if (PageMemcgKmem(page))
- return obj_cgroup_memcg(__page_objcg(page));
- else
- return __page_memcg(page);
+ if (folio_memcg_kmem(folio))
+ return obj_cgroup_memcg(__folio_objcg(folio));
+ return __folio_memcg(folio);
}
/*
- * page_memcg_rcu - locklessly get the memory cgroup associated with a page
- * @page: a pointer to the page struct
+ * folio_memcg_charged - If a folio is charged to a memory cgroup.
+ * @folio: Pointer to the folio.
*
- * Returns a pointer to the memory cgroup associated with the page,
- * or NULL. This function assumes that the page is known to have a
- * proper memory cgroup pointer. It's not safe to call this function
- * against some type of pages, e.g. slab pages or ex-slab pages.
+ * Returns true if folio is charged to a memory cgroup, otherwise returns false.
*/
-static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
+static inline bool folio_memcg_charged(struct folio *folio)
{
- unsigned long memcg_data = READ_ONCE(page->memcg_data);
-
- VM_BUG_ON_PAGE(PageSlab(page), page);
- WARN_ON_ONCE(!rcu_read_lock_held());
-
- if (memcg_data & MEMCG_DATA_KMEM) {
- struct obj_cgroup *objcg;
-
- objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
- return obj_cgroup_memcg(objcg);
- }
-
- return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
+ return folio->memcg_data != 0;
}
/*
- * page_memcg_check - get the memory cgroup associated with a page
- * @page: a pointer to the page struct
+ * folio_memcg_check - Get the memory cgroup associated with a folio.
+ * @folio: Pointer to the folio.
*
- * Returns a pointer to the memory cgroup associated with the page,
- * or NULL. This function unlike page_memcg() can take any page
- * as an argument. It has to be used in cases when it's not known if a page
+ * Returns a pointer to the memory cgroup associated with the folio,
+ * or NULL. This function unlike folio_memcg() can take any folio
+ * as an argument. It has to be used in cases when it's not known if a folio
* has an associated memory cgroup pointer or an object cgroups vector or
* an object cgroup.
*
- * For a non-kmem page any of the following ensures page and memcg binding
+ * For a non-kmem folio any of the following ensures folio and memcg binding
* stability:
*
- * - the page lock
+ * - the folio lock
* - LRU isolation
- * - lock_page_memcg()
* - exclusive reference
*
- * For a kmem page a caller should hold an rcu read lock to protect memcg
- * associated with a kmem page from being released.
+ * For a kmem folio a caller should hold an rcu read lock to protect memcg
+ * associated with a kmem folio from being released.
*/
-static inline struct mem_cgroup *page_memcg_check(struct page *page)
+static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
{
/*
- * Because page->memcg_data might be changed asynchronously
- * for slab pages, READ_ONCE() should be used here.
+ * Because folio->memcg_data might be changed asynchronously
+ * for slabs, READ_ONCE() should be used here.
*/
- unsigned long memcg_data = READ_ONCE(page->memcg_data);
+ unsigned long memcg_data = READ_ONCE(folio->memcg_data);
- if (memcg_data & MEMCG_DATA_OBJCGS)
+ if (memcg_data & MEMCG_DATA_OBJEXTS)
return NULL;
if (memcg_data & MEMCG_DATA_KMEM) {
struct obj_cgroup *objcg;
- objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
+ objcg = (void *)(memcg_data & ~OBJEXTS_FLAGS_MASK);
return obj_cgroup_memcg(objcg);
}
- return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
+ return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK);
}
-#ifdef CONFIG_MEMCG_KMEM
-/*
- * PageMemcgKmem - check if the page has MemcgKmem flag set
- * @page: a pointer to the page struct
- *
- * Checks if the page has MemcgKmem flag set. The caller must ensure that
- * the page has an associated memory cgroup. It's not safe to call this function
- * against some types of pages, e.g. slab pages.
- */
-static inline bool PageMemcgKmem(struct page *page)
+static inline struct mem_cgroup *page_memcg_check(struct page *page)
{
- VM_BUG_ON_PAGE(page->memcg_data & MEMCG_DATA_OBJCGS, page);
- return page->memcg_data & MEMCG_DATA_KMEM;
+ if (PageTail(page))
+ return NULL;
+ return folio_memcg_check((struct folio *)page);
}
-/*
- * page_objcgs - get the object cgroups vector associated with a page
- * @page: a pointer to the page struct
- *
- * Returns a pointer to the object cgroups vector associated with the page,
- * or NULL. This function assumes that the page is known to have an
- * associated object cgroups vector. It's not safe to call this function
- * against pages, which might have an associated memory cgroup: e.g.
- * kernel stack pages.
- */
-static inline struct obj_cgroup **page_objcgs(struct page *page)
+static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
{
- unsigned long memcg_data = READ_ONCE(page->memcg_data);
+ struct mem_cgroup *memcg;
- VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS), page);
- VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page);
+ rcu_read_lock();
+retry:
+ memcg = obj_cgroup_memcg(objcg);
+ if (unlikely(!css_tryget(&memcg->css)))
+ goto retry;
+ rcu_read_unlock();
- return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
+ return memcg;
}
/*
- * page_objcgs_check - get the object cgroups vector associated with a page
- * @page: a pointer to the page struct
+ * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set.
+ * @folio: Pointer to the folio.
*
- * Returns a pointer to the object cgroups vector associated with the page,
- * or NULL. This function is safe to use if the page can be directly associated
- * with a memory cgroup.
+ * Checks if the folio has MemcgKmem flag set. The caller must ensure
+ * that the folio has an associated memory cgroup. It's not safe to call
+ * this function against some types of folios, e.g. slab folios.
*/
-static inline struct obj_cgroup **page_objcgs_check(struct page *page)
+static inline bool folio_memcg_kmem(struct folio *folio)
{
- unsigned long memcg_data = READ_ONCE(page->memcg_data);
-
- if (!memcg_data || !(memcg_data & MEMCG_DATA_OBJCGS))
- return NULL;
-
- VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page);
-
- return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
+ VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page);
+ VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJEXTS, folio);
+ return folio->memcg_data & MEMCG_DATA_KMEM;
}
-#else
static inline bool PageMemcgKmem(struct page *page)
{
- return false;
-}
-
-static inline struct obj_cgroup **page_objcgs(struct page *page)
-{
- return NULL;
-}
-
-static inline struct obj_cgroup **page_objcgs_check(struct page *page)
-{
- return NULL;
-}
-#endif
-
-static __always_inline bool memcg_stat_item_in_bytes(int idx)
-{
- if (idx == MEMCG_PERCPU_B)
- return true;
- return vmstat_item_in_bytes(idx);
+ return folio_memcg_kmem(page_folio(page));
}
static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
@@ -613,17 +554,20 @@ static inline bool mem_cgroup_disabled(void)
return !cgroup_subsys_enabled(memory_cgrp_subsys);
}
-static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
- struct mem_cgroup *memcg,
- bool in_low_reclaim)
+static inline void mem_cgroup_protection(struct mem_cgroup *root,
+ struct mem_cgroup *memcg,
+ unsigned long *min,
+ unsigned long *low)
{
+ *min = *low = 0;
+
if (mem_cgroup_disabled())
- return 0;
+ return;
/*
* There is no reclaim protection applied to a targeted reclaim.
* We are special casing this specific case here because
- * mem_cgroup_protected calculation is not robust enough to keep
+ * mem_cgroup_calculate_protection is not robust enough to keep
* the protection invariant for calculated effective values for
* parallel reclaimers with different reclaim target. This is
* especially a problem for tail memcgs (as they have pages on LRU)
@@ -654,55 +598,101 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
*
*/
if (root == memcg)
- return 0;
-
- if (in_low_reclaim)
- return READ_ONCE(memcg->memory.emin);
+ return;
- return max(READ_ONCE(memcg->memory.emin),
- READ_ONCE(memcg->memory.elow));
+ *min = READ_ONCE(memcg->memory.emin);
+ *low = READ_ONCE(memcg->memory.elow);
}
void mem_cgroup_calculate_protection(struct mem_cgroup *root,
struct mem_cgroup *memcg);
-static inline bool mem_cgroup_supports_protection(struct mem_cgroup *memcg)
+static inline bool mem_cgroup_unprotected(struct mem_cgroup *target,
+ struct mem_cgroup *memcg)
{
/*
* The root memcg doesn't account charges, and doesn't support
- * protection.
+ * protection. The target memcg's protection is ignored, see
+ * mem_cgroup_calculate_protection() and mem_cgroup_protection()
*/
- return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg);
-
+ return mem_cgroup_disabled() || mem_cgroup_is_root(memcg) ||
+ memcg == target;
}
-static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
+static inline bool mem_cgroup_below_low(struct mem_cgroup *target,
+ struct mem_cgroup *memcg)
{
- if (!mem_cgroup_supports_protection(memcg))
+ if (mem_cgroup_unprotected(target, memcg))
return false;
return READ_ONCE(memcg->memory.elow) >=
page_counter_read(&memcg->memory);
}
-static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
+static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
+ struct mem_cgroup *memcg)
{
- if (!mem_cgroup_supports_protection(memcg))
+ if (mem_cgroup_unprotected(target, memcg))
return false;
return READ_ONCE(memcg->memory.emin) >=
page_counter_read(&memcg->memory);
}
-int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask);
-int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
+int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp);
+
+/**
+ * mem_cgroup_charge - Charge a newly allocated folio to a cgroup.
+ * @folio: Folio to charge.
+ * @mm: mm context of the allocating task.
+ * @gfp: Reclaim mode.
+ *
+ * Try to charge @folio to the memcg that @mm belongs to, reclaiming
+ * pages according to @gfp if necessary. If @mm is NULL, try to
+ * charge to the active memcg.
+ *
+ * Do not use this for folios allocated for swapin.
+ *
+ * Return: 0 on success. Otherwise, an error code is returned.
+ */
+static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
+ gfp_t gfp)
+{
+ if (mem_cgroup_disabled())
+ return 0;
+ return __mem_cgroup_charge(folio, mm, gfp);
+}
+
+int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp);
+
+int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
gfp_t gfp, swp_entry_t entry);
-void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
-void mem_cgroup_uncharge(struct page *page);
-void mem_cgroup_uncharge_list(struct list_head *page_list);
+void __mem_cgroup_uncharge(struct folio *folio);
-void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
+/**
+ * mem_cgroup_uncharge - Uncharge a folio.
+ * @folio: Folio to uncharge.
+ *
+ * Uncharge a folio previously charged with mem_cgroup_charge().
+ */
+static inline void mem_cgroup_uncharge(struct folio *folio)
+{
+ if (mem_cgroup_disabled())
+ return;
+ __mem_cgroup_uncharge(folio);
+}
+
+void __mem_cgroup_uncharge_folios(struct folio_batch *folios);
+static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios)
+{
+ if (mem_cgroup_disabled())
+ return;
+ __mem_cgroup_uncharge_folios(folios);
+}
+
+void mem_cgroup_replace_folio(struct folio *old, struct folio *new);
+void mem_cgroup_migrate(struct folio *old, struct folio *new);
/**
* mem_cgroup_lruvec - get the lru list vector for a memcg & node
@@ -741,50 +731,37 @@ out:
}
/**
- * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
- * @page: the page
- * @pgdat: pgdat of the page
+ * folio_lruvec - return lruvec for isolating/putting an LRU folio
+ * @folio: Pointer to the folio.
*
- * This function relies on page->mem_cgroup being stable.
+ * This function relies on folio->mem_cgroup being stable.
*/
-static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
- struct pglist_data *pgdat)
+static inline struct lruvec *folio_lruvec(struct folio *folio)
{
- struct mem_cgroup *memcg = page_memcg(page);
+ struct mem_cgroup *memcg = folio_memcg(folio);
- VM_WARN_ON_ONCE_PAGE(!memcg && !mem_cgroup_disabled(), page);
- return mem_cgroup_lruvec(memcg, pgdat);
-}
-
-static inline bool lruvec_holds_page_lru_lock(struct page *page,
- struct lruvec *lruvec)
-{
- pg_data_t *pgdat = page_pgdat(page);
- const struct mem_cgroup *memcg;
- struct mem_cgroup_per_node *mz;
-
- if (mem_cgroup_disabled())
- return lruvec == &pgdat->__lruvec;
-
- mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- memcg = page_memcg(page) ? : root_mem_cgroup;
-
- return lruvec->pgdat == pgdat && mz->memcg == memcg;
+ VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio);
+ return mem_cgroup_lruvec(memcg, folio_pgdat(folio));
}
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
-struct lruvec *lock_page_lruvec(struct page *page);
-struct lruvec *lock_page_lruvec_irq(struct page *page);
-struct lruvec *lock_page_lruvec_irqsave(struct page *page,
+struct mem_cgroup *get_mem_cgroup_from_current(void);
+
+struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio);
+
+struct lruvec *folio_lruvec_lock(struct folio *folio);
+struct lruvec *folio_lruvec_lock_irq(struct folio *folio);
+struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
unsigned long *flags);
#ifdef CONFIG_DEBUG_VM
-void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page);
+void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio);
#else
-static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page)
+static inline
+void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
{
}
#endif
@@ -812,7 +789,18 @@ static inline void obj_cgroup_get_many(struct obj_cgroup *objcg,
static inline void obj_cgroup_put(struct obj_cgroup *objcg)
{
- percpu_ref_put(&objcg->refcnt);
+ if (objcg)
+ percpu_ref_put(&objcg->refcnt);
+}
+
+static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
+{
+ return !memcg || css_tryget(&memcg->css);
+}
+
+static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg)
+{
+ return !memcg || css_tryget_online(&memcg->css);
}
static inline void mem_cgroup_put(struct mem_cgroup *memcg)
@@ -828,8 +816,8 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
struct mem_cgroup *,
struct mem_cgroup_reclaim_cookie *);
void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
-int mem_cgroup_scan_tasks(struct mem_cgroup *,
- int (*)(struct task_struct *, void *), void *);
+void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
+ int (*)(struct task_struct *, void *), void *arg);
static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
{
@@ -840,6 +828,15 @@ static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
}
struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
+#ifdef CONFIG_SHRINKER_DEBUG
+static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
+{
+ return memcg ? cgroup_ino(memcg->css.cgroup) : 0;
+}
+
+struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino);
+#endif
+
static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
{
return mem_cgroup_from_css(seq_css(m));
@@ -860,14 +857,11 @@ static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
* parent_mem_cgroup - find the accounting parent of a memcg
* @memcg: memcg whose parent to find
*
- * Returns the parent memcg, or NULL if this is the root or the memory
- * controller is in legacy no-hierarchy mode.
+ * Returns the parent memcg, or NULL if this is the root.
*/
static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
{
- if (!memcg->memory.parent)
- return NULL;
- return mem_cgroup_from_counter(memcg->memory.parent, memory);
+ return mem_cgroup_from_css(memcg->css.parent);
}
static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
@@ -892,7 +886,7 @@ static inline bool mm_match_cgroup(struct mm_struct *mm,
return match;
}
-struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
+struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio);
ino_t page_cgroup_ino(struct page *page);
static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
@@ -902,11 +896,6 @@ static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
return !!(memcg->css.flags & CSS_ONLINE);
}
-/*
- * For memory reclaim.
- */
-int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
-
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
int zid, int nr_pages);
@@ -920,7 +909,13 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
}
-void mem_cgroup_handle_over_high(void);
+void __mem_cgroup_handle_over_high(gfp_t gfp_mask);
+
+static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
+{
+ if (unlikely(current->memcg_nr_pages_over_high))
+ __mem_cgroup_handle_over_high(gfp_mask);
+}
unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
@@ -931,135 +926,53 @@ void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
-static inline void mem_cgroup_enter_user_fault(void)
-{
- WARN_ON(current->in_user_fault);
- current->in_user_fault = 1;
-}
-
-static inline void mem_cgroup_exit_user_fault(void)
-{
- WARN_ON(!current->in_user_fault);
- current->in_user_fault = 0;
-}
-
-static inline bool task_in_memcg_oom(struct task_struct *p)
-{
- return p->memcg_in_oom;
-}
-
-bool mem_cgroup_oom_synchronize(bool wait);
struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
struct mem_cgroup *oom_domain);
void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
-#ifdef CONFIG_MEMCG_SWAP
-extern bool cgroup_memory_noswap;
-#endif
-
-void lock_page_memcg(struct page *page);
-void unlock_page_memcg(struct page *page);
-
-void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
-
/* idx can be of type enum memcg_stat_item or node_stat_item */
-static inline void mod_memcg_state(struct mem_cgroup *memcg,
- int idx, int val)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __mod_memcg_state(memcg, idx, val);
- local_irq_restore(flags);
-}
+void mod_memcg_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx, int val);
-static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
- enum node_stat_item idx)
+static inline void mod_memcg_page_state(struct page *page,
+ enum memcg_stat_item idx, int val)
{
- struct mem_cgroup_per_node *pn;
- long x;
-
- if (mem_cgroup_disabled())
- return node_page_state(lruvec_pgdat(lruvec), idx);
-
- pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- x = atomic_long_read(&pn->lruvec_stat[idx]);
-#ifdef CONFIG_SMP
- if (x < 0)
- x = 0;
-#endif
- return x;
-}
-
-static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
- enum node_stat_item idx)
-{
- struct mem_cgroup_per_node *pn;
- long x = 0;
- int cpu;
+ struct mem_cgroup *memcg;
if (mem_cgroup_disabled())
- return node_page_state(lruvec_pgdat(lruvec), idx);
-
- pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- for_each_possible_cpu(cpu)
- x += per_cpu(pn->lruvec_stat_local->count[idx], cpu);
-#ifdef CONFIG_SMP
- if (x < 0)
- x = 0;
-#endif
- return x;
-}
-
-void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
- int val);
-void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
-
-static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
- int val)
-{
- unsigned long flags;
+ return;
- local_irq_save(flags);
- __mod_lruvec_kmem_state(p, idx, val);
- local_irq_restore(flags);
+ rcu_read_lock();
+ memcg = folio_memcg(page_folio(page));
+ if (memcg)
+ mod_memcg_state(memcg, idx, val);
+ rcu_read_unlock();
}
-static inline void mod_memcg_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx, int val)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __mod_memcg_lruvec_state(lruvec, idx, val);
- local_irq_restore(flags);
-}
+unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx);
+unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx);
+unsigned long lruvec_page_state_local(struct lruvec *lruvec,
+ enum node_stat_item idx);
-void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
- unsigned long count);
+void mem_cgroup_flush_stats(struct mem_cgroup *memcg);
+void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg);
-static inline void count_memcg_events(struct mem_cgroup *memcg,
- enum vm_event_item idx,
- unsigned long count)
-{
- unsigned long flags;
+void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
- local_irq_save(flags);
- __count_memcg_events(memcg, idx, count);
- local_irq_restore(flags);
-}
+void count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
+ unsigned long count);
-static inline void count_memcg_page_event(struct page *page,
- enum vm_event_item idx)
+static inline void count_memcg_folio_events(struct folio *folio,
+ enum vm_event_item idx, unsigned long nr)
{
- struct mem_cgroup *memcg = page_memcg(page);
+ struct mem_cgroup *memcg = folio_memcg(folio);
if (memcg)
- count_memcg_events(memcg, idx, 1);
+ count_memcg_events(memcg, idx, nr);
}
-static inline void count_memcg_event_mm(struct mm_struct *mm,
- enum vm_event_item idx)
+static inline void count_memcg_events_mm(struct mm_struct *mm,
+ enum vm_event_item idx, unsigned long count)
{
struct mem_cgroup *memcg;
@@ -1069,33 +982,23 @@ static inline void count_memcg_event_mm(struct mm_struct *mm,
rcu_read_lock();
memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
if (likely(memcg))
- count_memcg_events(memcg, idx, 1);
+ count_memcg_events(memcg, idx, count);
rcu_read_unlock();
}
-static inline void memcg_memory_event(struct mem_cgroup *memcg,
- enum memcg_memory_event event)
+static inline void count_memcg_event_mm(struct mm_struct *mm,
+ enum vm_event_item idx)
{
- bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
- event == MEMCG_SWAP_FAIL;
-
- atomic_long_inc(&memcg->memory_events_local[event]);
- if (!swap_event)
- cgroup_file_notify(&memcg->events_local_file);
+ count_memcg_events_mm(mm, idx, 1);
+}
- do {
- atomic_long_inc(&memcg->memory_events[event]);
- if (swap_event)
- cgroup_file_notify(&memcg->swap_events_file);
- else
- cgroup_file_notify(&memcg->events_file);
+void __memcg_memory_event(struct mem_cgroup *memcg,
+ enum memcg_memory_event event, bool allow_spinning);
- if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
- break;
- if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
- break;
- } while ((memcg = parent_mem_cgroup(memcg)) &&
- !mem_cgroup_is_root(memcg));
+static inline void memcg_memory_event(struct mem_cgroup *memcg,
+ enum memcg_memory_event event)
+{
+ __memcg_memory_event(memcg, event, true);
}
static inline void memcg_memory_event_mm(struct mm_struct *mm,
@@ -1113,25 +1016,46 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
rcu_read_unlock();
}
-void split_page_memcg(struct page *head, unsigned int nr);
+void split_page_memcg(struct page *first, unsigned order);
+void folio_split_memcg_refs(struct folio *folio, unsigned old_order,
+ unsigned new_order);
-unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
- gfp_t gfp_mask,
- unsigned long *total_scanned);
+static inline u64 cgroup_id_from_mm(struct mm_struct *mm)
+{
+ struct mem_cgroup *memcg;
+ u64 id;
+
+ if (mem_cgroup_disabled())
+ return 0;
+ rcu_read_lock();
+ memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
+ if (!memcg)
+ memcg = root_mem_cgroup;
+ id = cgroup_id(memcg->css.cgroup);
+ rcu_read_unlock();
+ return id;
+}
+
+extern int mem_cgroup_init(void);
#else /* CONFIG_MEMCG */
#define MEM_CGROUP_ID_SHIFT 0
-#define MEM_CGROUP_ID_MAX 0
-static inline struct mem_cgroup *page_memcg(struct page *page)
+#define root_mem_cgroup (NULL)
+
+static inline struct mem_cgroup *folio_memcg(struct folio *folio)
{
return NULL;
}
-static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
+static inline bool folio_memcg_charged(struct folio *folio)
+{
+ return false;
+}
+
+static inline struct mem_cgroup *folio_memcg_check(struct folio *folio)
{
- WARN_ON_ONCE(!rcu_read_lock_held());
return NULL;
}
@@ -1140,6 +1064,16 @@ static inline struct mem_cgroup *page_memcg_check(struct page *page)
return NULL;
}
+static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
+{
+ return NULL;
+}
+
+static inline bool folio_memcg_kmem(struct folio *folio)
+{
+ return false;
+}
+
static inline bool PageMemcgKmem(struct page *page)
{
return false;
@@ -1165,11 +1099,12 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
{
}
-static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
- struct mem_cgroup *memcg,
- bool in_low_reclaim)
+static inline void mem_cgroup_protection(struct mem_cgroup *root,
+ struct mem_cgroup *memcg,
+ unsigned long *min,
+ unsigned long *low)
{
- return 0;
+ *min = *low = 0;
}
static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
@@ -1177,41 +1112,54 @@ static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
{
}
-static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
+static inline bool mem_cgroup_unprotected(struct mem_cgroup *target,
+ struct mem_cgroup *memcg)
+{
+ return true;
+}
+static inline bool mem_cgroup_below_low(struct mem_cgroup *target,
+ struct mem_cgroup *memcg)
{
return false;
}
-static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
+static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
+ struct mem_cgroup *memcg)
{
return false;
}
-static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
- gfp_t gfp_mask)
+static inline int mem_cgroup_charge(struct folio *folio,
+ struct mm_struct *mm, gfp_t gfp)
{
return 0;
}
-static inline int mem_cgroup_swapin_charge_page(struct page *page,
+static inline int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp)
+{
+ return 0;
+}
+
+static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
struct mm_struct *mm, gfp_t gfp, swp_entry_t entry)
{
return 0;
}
-static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
+static inline void mem_cgroup_uncharge(struct folio *folio)
{
}
-static inline void mem_cgroup_uncharge(struct page *page)
+static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios)
{
}
-static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
+static inline void mem_cgroup_replace_folio(struct folio *old,
+ struct folio *new)
{
}
-static inline void mem_cgroup_migrate(struct page *old, struct page *new)
+static inline void mem_cgroup_migrate(struct folio *old, struct folio *new)
{
}
@@ -1221,21 +1169,14 @@ static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
return &pgdat->__lruvec;
}
-static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
- struct pglist_data *pgdat)
+static inline struct lruvec *folio_lruvec(struct folio *folio)
{
+ struct pglist_data *pgdat = folio_pgdat(folio);
return &pgdat->__lruvec;
}
-static inline bool lruvec_holds_page_lru_lock(struct page *page,
- struct lruvec *lruvec)
-{
- pg_data_t *pgdat = page_pgdat(page);
-
- return lruvec == &pgdat->__lruvec;
-}
-
-static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page)
+static inline
+void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
{
}
@@ -1255,30 +1196,64 @@ static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
return NULL;
}
+static inline struct mem_cgroup *get_mem_cgroup_from_current(void)
+{
+ return NULL;
+}
+
+static inline struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio)
+{
+ return NULL;
+}
+
+static inline
+struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
+{
+ return NULL;
+}
+
+static inline void obj_cgroup_get(struct obj_cgroup *objcg)
+{
+}
+
+static inline void obj_cgroup_put(struct obj_cgroup *objcg)
+{
+}
+
+static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg)
+{
+ return true;
+}
+
+static inline bool mem_cgroup_tryget_online(struct mem_cgroup *memcg)
+{
+ return true;
+}
+
static inline void mem_cgroup_put(struct mem_cgroup *memcg)
{
}
-static inline struct lruvec *lock_page_lruvec(struct page *page)
+static inline struct lruvec *folio_lruvec_lock(struct folio *folio)
{
- struct pglist_data *pgdat = page_pgdat(page);
+ struct pglist_data *pgdat = folio_pgdat(folio);
spin_lock(&pgdat->__lruvec.lru_lock);
return &pgdat->__lruvec;
}
-static inline struct lruvec *lock_page_lruvec_irq(struct page *page)
+static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
{
- struct pglist_data *pgdat = page_pgdat(page);
+ struct pglist_data *pgdat = folio_pgdat(folio);
spin_lock_irq(&pgdat->__lruvec.lru_lock);
return &pgdat->__lruvec;
}
-static inline struct lruvec *lock_page_lruvec_irqsave(struct page *page,
+static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
unsigned long *flagsp)
{
- struct pglist_data *pgdat = page_pgdat(page);
+ struct pglist_data *pgdat = folio_pgdat(folio);
spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp);
return &pgdat->__lruvec;
@@ -1297,10 +1272,9 @@ static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
{
}
-static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
+static inline void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
int (*fn)(struct task_struct *, void *), void *arg)
{
- return 0;
}
static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
@@ -1315,6 +1289,18 @@ static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
return NULL;
}
+#ifdef CONFIG_SHRINKER_DEBUG
+static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg)
+{
+ return 0;
+}
+
+static inline struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
+{
+ return NULL;
+}
+#endif
+
static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
{
return NULL;
@@ -1357,34 +1343,8 @@ mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
{
}
-static inline void lock_page_memcg(struct page *page)
-{
-}
-
-static inline void unlock_page_memcg(struct page *page)
-{
-}
-
-static inline void mem_cgroup_handle_over_high(void)
-{
-}
-
-static inline void mem_cgroup_enter_user_fault(void)
-{
-}
-
-static inline void mem_cgroup_exit_user_fault(void)
-{
-}
-
-static inline bool task_in_memcg_oom(struct task_struct *p)
+static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
{
- return false;
-}
-
-static inline bool mem_cgroup_oom_synchronize(bool wait)
-{
- return false;
}
static inline struct mem_cgroup *mem_cgroup_get_oom_group(
@@ -1397,18 +1357,22 @@ static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
{
}
-static inline void __mod_memcg_state(struct mem_cgroup *memcg,
- int idx,
- int nr)
+static inline void mod_memcg_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx,
+ int nr)
{
}
-static inline void mod_memcg_state(struct mem_cgroup *memcg,
- int idx,
- int nr)
+static inline void mod_memcg_page_state(struct page *page,
+ enum memcg_stat_item idx, int val)
{
}
+static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
+{
+ return 0;
+}
+
static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
enum node_stat_item idx)
{
@@ -1421,17 +1385,12 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
return node_page_state(lruvec_pgdat(lruvec), idx);
}
-static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
- enum node_stat_item idx, int val)
+static inline void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
{
}
-static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
- int val)
+static inline void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
{
- struct page *page = virt_to_head_page(p);
-
- __mod_node_page_state(page_pgdat(page), idx, val);
}
static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
@@ -1443,19 +1402,18 @@ static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
}
static inline void count_memcg_events(struct mem_cgroup *memcg,
- enum vm_event_item idx,
- unsigned long count)
+ enum vm_event_item idx,
+ unsigned long count)
{
}
-static inline void __count_memcg_events(struct mem_cgroup *memcg,
- enum vm_event_item idx,
- unsigned long count)
+static inline void count_memcg_folio_events(struct folio *folio,
+ enum vm_event_item idx, unsigned long nr)
{
}
-static inline void count_memcg_page_event(struct page *page,
- int idx)
+static inline void count_memcg_events_mm(struct mm_struct *mm,
+ enum vm_event_item idx, unsigned long count)
{
}
@@ -1464,28 +1422,35 @@ void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
{
}
-static inline void split_page_memcg(struct page *head, unsigned int nr)
+static inline void split_page_memcg(struct page *first, unsigned order)
{
}
-static inline
-unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
- gfp_t gfp_mask,
- unsigned long *total_scanned)
+static inline void folio_split_memcg_refs(struct folio *folio,
+ unsigned old_order, unsigned new_order)
{
- return 0;
}
-#endif /* CONFIG_MEMCG */
-static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx)
+static inline u64 cgroup_id_from_mm(struct mm_struct *mm)
{
- __mod_lruvec_kmem_state(p, idx, 1);
+ return 0;
}
-static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx)
-{
- __mod_lruvec_kmem_state(p, idx, -1);
-}
+static inline int mem_cgroup_init(void) { return 0; }
+#endif /* CONFIG_MEMCG */
+
+/*
+ * Extended information for slab objects stored as an array in page->memcg_data
+ * if MEMCG_DATA_OBJEXTS is set.
+ */
+struct slabobj_ext {
+#ifdef CONFIG_MEMCG
+ struct obj_cgroup *objcg;
+#endif
+#ifdef CONFIG_MEM_ALLOC_PROFILING
+ union codetag_ref ref;
+#endif
+} __aligned(8);
static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
{
@@ -1516,32 +1481,40 @@ static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
spin_unlock_irqrestore(&lruvec->lru_lock, flags);
}
+/* Test requires a stable folio->memcg binding, see folio_memcg() */
+static inline bool folio_matches_lruvec(struct folio *folio,
+ struct lruvec *lruvec)
+{
+ return lruvec_pgdat(lruvec) == folio_pgdat(folio) &&
+ lruvec_memcg(lruvec) == folio_memcg(folio);
+}
+
/* Don't lock again iff page's lruvec locked */
-static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
+static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio,
struct lruvec *locked_lruvec)
{
if (locked_lruvec) {
- if (lruvec_holds_page_lru_lock(page, locked_lruvec))
+ if (folio_matches_lruvec(folio, locked_lruvec))
return locked_lruvec;
unlock_page_lruvec_irq(locked_lruvec);
}
- return lock_page_lruvec_irq(page);
+ return folio_lruvec_lock_irq(folio);
}
-/* Don't lock again iff page's lruvec locked */
-static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page,
- struct lruvec *locked_lruvec, unsigned long *flags)
+/* Don't lock again iff folio's lruvec locked */
+static inline void folio_lruvec_relock_irqsave(struct folio *folio,
+ struct lruvec **lruvecp, unsigned long *flags)
{
- if (locked_lruvec) {
- if (lruvec_holds_page_lru_lock(page, locked_lruvec))
- return locked_lruvec;
+ if (*lruvecp) {
+ if (folio_matches_lruvec(folio, *lruvecp))
+ return;
- unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
+ unlock_page_lruvec_irqrestore(*lruvecp, *flags);
}
- return lock_page_lruvec_irqsave(page, flags);
+ *lruvecp = folio_lruvec_lock_irqsave(folio, flags);
}
#ifdef CONFIG_CGROUP_WRITEBACK
@@ -1551,17 +1524,20 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
unsigned long *pheadroom, unsigned long *pdirty,
unsigned long *pwriteback);
-void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
+void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
struct bdi_writeback *wb);
-static inline void mem_cgroup_track_foreign_dirty(struct page *page,
+static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
struct bdi_writeback *wb)
{
+ struct mem_cgroup *memcg;
+
if (mem_cgroup_disabled())
return;
- if (unlikely(&page_memcg(page)->css != wb->memcg_css))
- mem_cgroup_track_foreign_dirty_slowpath(page, wb);
+ memcg = folio_memcg(folio);
+ if (unlikely(memcg && &memcg->css != wb->memcg_css))
+ mem_cgroup_track_foreign_dirty_slowpath(folio, wb);
}
void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
@@ -1581,7 +1557,7 @@ static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
{
}
-static inline void mem_cgroup_track_foreign_dirty(struct page *page,
+static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
struct bdi_writeback *wb)
{
}
@@ -1593,82 +1569,152 @@ static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
#endif /* CONFIG_CGROUP_WRITEBACK */
struct sock;
-bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
-void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
#ifdef CONFIG_MEMCG
extern struct static_key_false memcg_sockets_enabled_key;
#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
+
void mem_cgroup_sk_alloc(struct sock *sk);
void mem_cgroup_sk_free(struct sock *sk);
-static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
+void mem_cgroup_sk_inherit(const struct sock *sk, struct sock *newsk);
+bool mem_cgroup_sk_charge(const struct sock *sk, unsigned int nr_pages,
+ gfp_t gfp_mask);
+void mem_cgroup_sk_uncharge(const struct sock *sk, unsigned int nr_pages);
+
+#if BITS_PER_LONG < 64
+static inline void mem_cgroup_set_socket_pressure(struct mem_cgroup *memcg)
{
- if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
- return true;
+ u64 val = get_jiffies_64() + HZ;
+ unsigned long flags;
+
+ write_seqlock_irqsave(&memcg->socket_pressure_seqlock, flags);
+ memcg->socket_pressure = val;
+ write_sequnlock_irqrestore(&memcg->socket_pressure_seqlock, flags);
+}
+
+static inline u64 mem_cgroup_get_socket_pressure(struct mem_cgroup *memcg)
+{
+ unsigned int seq;
+ u64 val;
+
do {
- if (time_before(jiffies, memcg->socket_pressure))
- return true;
- } while ((memcg = parent_mem_cgroup(memcg)));
- return false;
+ seq = read_seqbegin(&memcg->socket_pressure_seqlock);
+ val = memcg->socket_pressure;
+ } while (read_seqretry(&memcg->socket_pressure_seqlock, seq));
+
+ return val;
+}
+#else
+static inline void mem_cgroup_set_socket_pressure(struct mem_cgroup *memcg)
+{
+ WRITE_ONCE(memcg->socket_pressure, jiffies + HZ);
}
+static inline u64 mem_cgroup_get_socket_pressure(struct mem_cgroup *memcg)
+{
+ return READ_ONCE(memcg->socket_pressure);
+}
+#endif
+
int alloc_shrinker_info(struct mem_cgroup *memcg);
void free_shrinker_info(struct mem_cgroup *memcg);
void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
void reparent_shrinker_deferred(struct mem_cgroup *memcg);
+
+static inline int shrinker_id(struct shrinker *shrinker)
+{
+ return shrinker->id;
+}
#else
#define mem_cgroup_sockets_enabled 0
-static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
-static inline void mem_cgroup_sk_free(struct sock *sk) { };
-static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
+
+static inline void mem_cgroup_sk_alloc(struct sock *sk)
+{
+}
+
+static inline void mem_cgroup_sk_free(struct sock *sk)
+{
+}
+
+static inline void mem_cgroup_sk_inherit(const struct sock *sk, struct sock *newsk)
+{
+}
+
+static inline bool mem_cgroup_sk_charge(const struct sock *sk,
+ unsigned int nr_pages,
+ gfp_t gfp_mask)
{
return false;
}
+static inline void mem_cgroup_sk_uncharge(const struct sock *sk,
+ unsigned int nr_pages)
+{
+}
+
static inline void set_shrinker_bit(struct mem_cgroup *memcg,
int nid, int shrinker_id)
{
}
+
+static inline int shrinker_id(struct shrinker *shrinker)
+{
+ return -1;
+}
#endif
-#ifdef CONFIG_MEMCG_KMEM
+#ifdef CONFIG_MEMCG
+bool mem_cgroup_kmem_disabled(void);
int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
void __memcg_kmem_uncharge_page(struct page *page, int order);
-struct obj_cgroup *get_obj_cgroup_from_current(void);
+/*
+ * The returned objcg pointer is safe to use without additional
+ * protection within a scope. The scope is defined either by
+ * the current task (similar to the "current" global variable)
+ * or by set_active_memcg() pair.
+ * Please, use obj_cgroup_get() to get a reference if the pointer
+ * needs to be used outside of the local scope.
+ */
+struct obj_cgroup *current_obj_cgroup(void);
+struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio);
+
+static inline struct obj_cgroup *get_obj_cgroup_from_current(void)
+{
+ struct obj_cgroup *objcg = current_obj_cgroup();
+
+ if (objcg)
+ obj_cgroup_get(objcg);
+
+ return objcg;
+}
int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
-extern struct static_key_false memcg_kmem_enabled_key;
-
-extern int memcg_nr_cache_ids;
-void memcg_get_cache_ids(void);
-void memcg_put_cache_ids(void);
+extern struct static_key_false memcg_bpf_enabled_key;
+static inline bool memcg_bpf_enabled(void)
+{
+ return static_branch_likely(&memcg_bpf_enabled_key);
+}
-/*
- * Helper macro to loop through all memcg-specific caches. Callers must still
- * check if the cache is valid (it is either valid or NULL).
- * the slab_mutex must be held when looping through those caches
- */
-#define for_each_memcg_cache_index(_idx) \
- for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
+extern struct static_key_false memcg_kmem_online_key;
-static inline bool memcg_kmem_enabled(void)
+static inline bool memcg_kmem_online(void)
{
- return static_branch_likely(&memcg_kmem_enabled_key);
+ return static_branch_likely(&memcg_kmem_online_key);
}
static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
int order)
{
- if (memcg_kmem_enabled())
+ if (memcg_kmem_online())
return __memcg_kmem_charge_page(page, gfp, order);
return 0;
}
static inline void memcg_kmem_uncharge_page(struct page *page, int order)
{
- if (memcg_kmem_enabled())
+ if (memcg_kmem_online())
__memcg_kmem_uncharge_page(page, order);
}
@@ -1676,14 +1722,42 @@ static inline void memcg_kmem_uncharge_page(struct page *page, int order)
* A helper for accessing memcg's kmem_id, used for getting
* corresponding LRU lists.
*/
-static inline int memcg_cache_id(struct mem_cgroup *memcg)
+static inline int memcg_kmem_id(struct mem_cgroup *memcg)
{
return memcg ? memcg->kmemcg_id : -1;
}
-struct mem_cgroup *mem_cgroup_from_obj(void *p);
+struct mem_cgroup *mem_cgroup_from_slab_obj(void *p);
+
+static inline void count_objcg_events(struct obj_cgroup *objcg,
+ enum vm_event_item idx,
+ unsigned long count)
+{
+ struct mem_cgroup *memcg;
+
+ if (!memcg_kmem_online())
+ return;
+
+ rcu_read_lock();
+ memcg = obj_cgroup_memcg(objcg);
+ count_memcg_events(memcg, idx, count);
+ rcu_read_unlock();
+}
+
+bool mem_cgroup_node_allowed(struct mem_cgroup *memcg, int nid);
+
+void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg);
+
+static inline bool memcg_is_dying(struct mem_cgroup *memcg)
+{
+ return memcg ? css_is_dying(&memcg->css) : false;
+}
#else
+static inline bool mem_cgroup_kmem_disabled(void)
+{
+ return true;
+}
static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
int order)
@@ -1705,32 +1779,147 @@ static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
{
}
-#define for_each_memcg_cache_index(_idx) \
- for (; NULL; )
+static inline struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
+{
+ return NULL;
+}
+
+static inline bool memcg_bpf_enabled(void)
+{
+ return false;
+}
-static inline bool memcg_kmem_enabled(void)
+static inline bool memcg_kmem_online(void)
{
return false;
}
-static inline int memcg_cache_id(struct mem_cgroup *memcg)
+static inline int memcg_kmem_id(struct mem_cgroup *memcg)
{
return -1;
}
-static inline void memcg_get_cache_ids(void)
+static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
+{
+ return NULL;
+}
+
+static inline void count_objcg_events(struct obj_cgroup *objcg,
+ enum vm_event_item idx,
+ unsigned long count)
+{
+}
+
+static inline ino_t page_cgroup_ino(struct page *page)
+{
+ return 0;
+}
+
+static inline bool mem_cgroup_node_allowed(struct mem_cgroup *memcg, int nid)
+{
+ return true;
+}
+
+static inline void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg)
+{
+}
+
+static inline bool memcg_is_dying(struct mem_cgroup *memcg)
+{
+ return false;
+}
+#endif /* CONFIG_MEMCG */
+
+#if defined(CONFIG_MEMCG) && defined(CONFIG_ZSWAP)
+bool obj_cgroup_may_zswap(struct obj_cgroup *objcg);
+void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size);
+void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size);
+bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg);
+#else
+static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
+{
+ return true;
+}
+static inline void obj_cgroup_charge_zswap(struct obj_cgroup *objcg,
+ size_t size)
+{
+}
+static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg,
+ size_t size)
+{
+}
+static inline bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
+{
+ /* if zswap is disabled, do not block pages going to the swapping device */
+ return true;
+}
+#endif
+
+
+/* Cgroup v1-related declarations */
+
+#ifdef CONFIG_MEMCG_V1
+unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
+ gfp_t gfp_mask,
+ unsigned long *total_scanned);
+
+bool mem_cgroup_oom_synchronize(bool wait);
+
+static inline bool task_in_memcg_oom(struct task_struct *p)
+{
+ return p->memcg_in_oom;
+}
+
+static inline void mem_cgroup_enter_user_fault(void)
+{
+ WARN_ON(current->in_user_fault);
+ current->in_user_fault = 1;
+}
+
+static inline void mem_cgroup_exit_user_fault(void)
+{
+ WARN_ON(!current->in_user_fault);
+ current->in_user_fault = 0;
+}
+
+void memcg1_swapout(struct folio *folio, swp_entry_t entry);
+void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages);
+
+#else /* CONFIG_MEMCG_V1 */
+static inline
+unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
+ gfp_t gfp_mask,
+ unsigned long *total_scanned)
+{
+ return 0;
+}
+
+static inline bool task_in_memcg_oom(struct task_struct *p)
+{
+ return false;
+}
+
+static inline bool mem_cgroup_oom_synchronize(bool wait)
+{
+ return false;
+}
+
+static inline void mem_cgroup_enter_user_fault(void)
+{
+}
+
+static inline void mem_cgroup_exit_user_fault(void)
{
}
-static inline void memcg_put_cache_ids(void)
+static inline void memcg1_swapout(struct folio *folio, swp_entry_t entry)
{
}
-static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
+static inline void memcg1_swapin(swp_entry_t entry, unsigned int nr_pages)
{
- return NULL;
}
-#endif /* CONFIG_MEMCG_KMEM */
+#endif /* CONFIG_MEMCG_V1 */
#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/memfd.h b/include/linux/memfd.h
index 4f1600413f91..cc74de3dbcfe 100644
--- a/include/linux/memfd.h
+++ b/include/linux/memfd.h
@@ -4,13 +4,33 @@
#include <linux/file.h>
+#define MEMFD_ANON_NAME "[memfd]"
+
#ifdef CONFIG_MEMFD_CREATE
-extern long memfd_fcntl(struct file *file, unsigned int cmd, unsigned long arg);
+extern long memfd_fcntl(struct file *file, unsigned int cmd, unsigned int arg);
+struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx);
+/*
+ * Check for any existing seals on mmap, return an error if access is denied due
+ * to sealing, or 0 otherwise.
+ *
+ * We also update VMA flags if appropriate by manipulating the VMA flags pointed
+ * to by vm_flags_ptr.
+ */
+int memfd_check_seals_mmap(struct file *file, vm_flags_t *vm_flags_ptr);
#else
-static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned long a)
+static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned int a)
{
return -EINVAL;
}
+static inline struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx)
+{
+ return ERR_PTR(-EINVAL);
+}
+static inline int memfd_check_seals_mmap(struct file *file,
+ vm_flags_t *vm_flags_ptr)
+{
+ return 0;
+}
#endif
#endif /* __LINUX_MEMFD_H */
diff --git a/include/linux/memory-failure.h b/include/linux/memory-failure.h
new file mode 100644
index 000000000000..bc326503d2d2
--- /dev/null
+++ b/include/linux/memory-failure.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MEMORY_FAILURE_H
+#define _LINUX_MEMORY_FAILURE_H
+
+#include <linux/interval_tree.h>
+
+struct pfn_address_space;
+
+struct pfn_address_space {
+ struct interval_tree_node node;
+ struct address_space *mapping;
+};
+
+int register_pfn_address_space(struct pfn_address_space *pfn_space);
+void unregister_pfn_address_space(struct pfn_address_space *pfn_space);
+
+#endif /* _LINUX_MEMORY_FAILURE_H */
diff --git a/include/linux/memory-tiers.h b/include/linux/memory-tiers.h
new file mode 100644
index 000000000000..7a805796fcfd
--- /dev/null
+++ b/include/linux/memory-tiers.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MEMORY_TIERS_H
+#define _LINUX_MEMORY_TIERS_H
+
+#include <linux/types.h>
+#include <linux/nodemask.h>
+#include <linux/kref.h>
+#include <linux/mmzone.h>
+#include <linux/notifier.h>
+/*
+ * Each tier cover a abstrace distance chunk size of 128
+ */
+#define MEMTIER_CHUNK_BITS 7
+#define MEMTIER_CHUNK_SIZE (1 << MEMTIER_CHUNK_BITS)
+/*
+ * Smaller abstract distance values imply faster (higher) memory tiers. Offset
+ * the DRAM adistance so that we can accommodate devices with a slightly lower
+ * adistance value (slightly faster) than default DRAM adistance to be part of
+ * the same memory tier.
+ */
+#define MEMTIER_ADISTANCE_DRAM ((4L * MEMTIER_CHUNK_SIZE) + (MEMTIER_CHUNK_SIZE >> 1))
+
+struct memory_tier;
+struct memory_dev_type {
+ /* list of memory types that are part of same tier as this type */
+ struct list_head tier_sibling;
+ /* list of memory types that are managed by one driver */
+ struct list_head list;
+ /* abstract distance for this specific memory type */
+ int adistance;
+ /* Nodes of same abstract distance */
+ nodemask_t nodes;
+ struct kref kref;
+};
+
+struct access_coordinate;
+
+#ifdef CONFIG_NUMA
+extern bool numa_demotion_enabled;
+extern struct memory_dev_type *default_dram_type;
+extern nodemask_t default_dram_nodes;
+struct memory_dev_type *alloc_memory_type(int adistance);
+void put_memory_type(struct memory_dev_type *memtype);
+void init_node_memory_type(int node, struct memory_dev_type *default_type);
+void clear_node_memory_type(int node, struct memory_dev_type *memtype);
+int register_mt_adistance_algorithm(struct notifier_block *nb);
+int unregister_mt_adistance_algorithm(struct notifier_block *nb);
+int mt_calc_adistance(int node, int *adist);
+int mt_set_default_dram_perf(int nid, struct access_coordinate *perf,
+ const char *source);
+int mt_perf_to_adistance(struct access_coordinate *perf, int *adist);
+struct memory_dev_type *mt_find_alloc_memory_type(int adist,
+ struct list_head *memory_types);
+void mt_put_memory_types(struct list_head *memory_types);
+#ifdef CONFIG_MIGRATION
+int next_demotion_node(int node);
+void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets);
+bool node_is_toptier(int node);
+#else
+static inline int next_demotion_node(int node)
+{
+ return NUMA_NO_NODE;
+}
+
+static inline void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets)
+{
+ *targets = NODE_MASK_NONE;
+}
+
+static inline bool node_is_toptier(int node)
+{
+ return true;
+}
+#endif
+
+#else
+
+#define numa_demotion_enabled false
+#define default_dram_type NULL
+#define default_dram_nodes NODE_MASK_NONE
+/*
+ * CONFIG_NUMA implementation returns non NULL error.
+ */
+static inline struct memory_dev_type *alloc_memory_type(int adistance)
+{
+ return NULL;
+}
+
+static inline void put_memory_type(struct memory_dev_type *memtype)
+{
+
+}
+
+static inline void init_node_memory_type(int node, struct memory_dev_type *default_type)
+{
+
+}
+
+static inline void clear_node_memory_type(int node, struct memory_dev_type *memtype)
+{
+
+}
+
+static inline int next_demotion_node(int node)
+{
+ return NUMA_NO_NODE;
+}
+
+static inline void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets)
+{
+ *targets = NODE_MASK_NONE;
+}
+
+static inline bool node_is_toptier(int node)
+{
+ return true;
+}
+
+static inline int register_mt_adistance_algorithm(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int unregister_mt_adistance_algorithm(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int mt_calc_adistance(int node, int *adist)
+{
+ return NOTIFY_DONE;
+}
+
+static inline int mt_set_default_dram_perf(int nid, struct access_coordinate *perf,
+ const char *source)
+{
+ return -EIO;
+}
+
+static inline int mt_perf_to_adistance(struct access_coordinate *perf, int *adist)
+{
+ return -EIO;
+}
+
+static inline struct memory_dev_type *mt_find_alloc_memory_type(int adist,
+ struct list_head *memory_types)
+{
+ return NULL;
+}
+
+static inline void mt_put_memory_types(struct list_head *memory_types)
+{
+}
+#endif /* CONFIG_NUMA */
+#endif /* _LINUX_MEMORY_TIERS_H */
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 97e92e8b556a..faeaa921e55b 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -19,54 +19,108 @@
#include <linux/node.h>
#include <linux/compiler.h>
#include <linux/mutex.h>
-#include <linux/notifier.h>
#define MIN_MEMORY_BLOCK_SIZE (1UL << SECTION_SIZE_BITS)
+/**
+ * struct memory_group - a logical group of memory blocks
+ * @nid: The node id for all memory blocks inside the memory group.
+ * @memory_blocks: List of all memory blocks belonging to this memory group.
+ * @present_kernel_pages: Present (online) memory outside ZONE_MOVABLE of this
+ * memory group.
+ * @present_movable_pages: Present (online) memory in ZONE_MOVABLE of this
+ * memory group.
+ * @is_dynamic: The memory group type: static vs. dynamic
+ * @s.max_pages: Valid with &memory_group.is_dynamic == false. The maximum
+ * number of pages we'll have in this static memory group.
+ * @d.unit_pages: Valid with &memory_group.is_dynamic == true. Unit in pages
+ * in which memory is added/removed in this dynamic memory group.
+ * This granularity defines the alignment of a unit in physical
+ * address space; it has to be at least as big as a single
+ * memory block.
+ *
+ * A memory group logically groups memory blocks; each memory block
+ * belongs to at most one memory group. A memory group corresponds to
+ * a memory device, such as a DIMM or a NUMA node, which spans multiple
+ * memory blocks and might even span multiple non-contiguous physical memory
+ * ranges.
+ *
+ * Modification of members after registration is serialized by memory
+ * hot(un)plug code.
+ */
+struct memory_group {
+ int nid;
+ struct list_head memory_blocks;
+ unsigned long present_kernel_pages;
+ unsigned long present_movable_pages;
+ bool is_dynamic;
+ union {
+ struct {
+ unsigned long max_pages;
+ } s;
+ struct {
+ unsigned long unit_pages;
+ } d;
+ };
+};
+
+enum memory_block_state {
+ /* These states are exposed to userspace as text strings in sysfs */
+ MEM_ONLINE, /* exposed to userspace */
+ MEM_GOING_OFFLINE, /* exposed to userspace */
+ MEM_OFFLINE, /* exposed to userspace */
+ MEM_GOING_ONLINE,
+ MEM_CANCEL_ONLINE,
+ MEM_CANCEL_OFFLINE,
+};
+
struct memory_block {
unsigned long start_section_nr;
- unsigned long state; /* serialized by the dev->lock */
+ enum memory_block_state state; /* serialized by the dev->lock */
int online_type; /* for passing data to online routine */
int nid; /* NID for this memory block */
- struct device dev;
/*
- * Number of vmemmap pages. These pages
- * lay at the beginning of the memory block.
+ * The single zone of this memory block if all PFNs of this memory block
+ * that are System RAM (not a memory hole, not ZONE_DEVICE ranges) are
+ * managed by a single zone. NULL if multiple zones (including nodes)
+ * apply.
*/
- unsigned long nr_vmemmap_pages;
+ struct zone *zone;
+ struct device dev;
+ struct vmem_altmap *altmap;
+ struct memory_group *group; /* group (if any) for this block */
+ struct list_head group_next; /* next block inside memory group */
+#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
+ atomic_long_t nr_hwpoison;
+#endif
};
int arch_get_memory_phys_device(unsigned long start_pfn);
unsigned long memory_block_size_bytes(void);
int set_memory_block_size_order(unsigned int order);
-/* These states are exposed to userspace as text strings in sysfs */
-#define MEM_ONLINE (1<<0) /* exposed to userspace */
-#define MEM_GOING_OFFLINE (1<<1) /* exposed to userspace */
-#define MEM_OFFLINE (1<<2) /* exposed to userspace */
-#define MEM_GOING_ONLINE (1<<3)
-#define MEM_CANCEL_ONLINE (1<<4)
-#define MEM_CANCEL_OFFLINE (1<<5)
-
struct memory_notify {
unsigned long start_pfn;
unsigned long nr_pages;
- int status_change_nid_normal;
- int status_change_nid_high;
- int status_change_nid;
};
struct notifier_block;
struct mem_section;
/*
- * Priorities for the hotplug memory callback routines (stored in decreasing
- * order in the callback chain)
+ * Priorities for the hotplug memory callback routines. Invoked from
+ * high to low. Higher priorities correspond to higher numbers.
*/
-#define SLAB_CALLBACK_PRI 1
-#define IPC_CALLBACK_PRI 10
+#define DEFAULT_CALLBACK_PRI 0
+#define SLAB_CALLBACK_PRI 1
+#define CXL_CALLBACK_PRI 5
+#define HMAT_CALLBACK_PRI 6
+#define MM_COMPUTE_BATCH_PRI 10
+#define CPUSET_CALLBACK_PRI 10
+#define MEMTIER_HOTPLUG_PRI 100
+#define KSM_CALLBACK_PRI 100
-#ifndef CONFIG_MEMORY_HOTPLUG_SPARSE
+#ifndef CONFIG_MEMORY_HOTPLUG
static inline void memory_dev_init(void)
{
return;
@@ -78,40 +132,74 @@ static inline int register_memory_notifier(struct notifier_block *nb)
static inline void unregister_memory_notifier(struct notifier_block *nb)
{
}
-static inline int memory_notify(unsigned long val, void *v)
+static inline int memory_notify(enum memory_block_state state, void *v)
+{
+ return 0;
+}
+static inline int hotplug_memory_notifier(notifier_fn_t fn, int pri)
{
return 0;
}
-#else
+static inline int memory_block_advise_max_size(unsigned long size)
+{
+ return -ENODEV;
+}
+static inline unsigned long memory_block_advised_max_size(void)
+{
+ return 0;
+}
+#else /* CONFIG_MEMORY_HOTPLUG */
extern int register_memory_notifier(struct notifier_block *nb);
extern void unregister_memory_notifier(struct notifier_block *nb);
int create_memory_block_devices(unsigned long start, unsigned long size,
- unsigned long vmemmap_pages);
+ int nid, struct vmem_altmap *altmap,
+ struct memory_group *group);
void remove_memory_block_devices(unsigned long start, unsigned long size);
extern void memory_dev_init(void);
-extern int memory_notify(unsigned long val, void *v);
-extern struct memory_block *find_memory_block(struct mem_section *);
+extern int memory_notify(enum memory_block_state state, void *v);
+extern struct memory_block *find_memory_block(unsigned long section_nr);
typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *);
extern int walk_memory_blocks(unsigned long start, unsigned long size,
void *arg, walk_memory_blocks_func_t func);
extern int for_each_memory_block(void *arg, walk_memory_blocks_func_t func);
-#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT)
-#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
-#ifdef CONFIG_MEMORY_HOTPLUG
+extern int memory_group_register_static(int nid, unsigned long max_pages);
+extern int memory_group_register_dynamic(int nid, unsigned long unit_pages);
+extern int memory_group_unregister(int mgid);
+struct memory_group *memory_group_find_by_id(int mgid);
+typedef int (*walk_memory_groups_func_t)(struct memory_group *, void *);
+int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func,
+ struct memory_group *excluded, void *arg);
+struct memory_block *find_memory_block_by_id(unsigned long block_id);
#define hotplug_memory_notifier(fn, pri) ({ \
static __meminitdata struct notifier_block fn##_mem_nb =\
{ .notifier_call = fn, .priority = pri };\
register_memory_notifier(&fn##_mem_nb); \
})
-#define register_hotmemory_notifier(nb) register_memory_notifier(nb)
-#define unregister_hotmemory_notifier(nb) unregister_memory_notifier(nb)
-#else
-#define hotplug_memory_notifier(fn, pri) ({ 0; })
-/* These aren't inline functions due to a GCC bug. */
-#define register_hotmemory_notifier(nb) ({ (void)(nb); 0; })
-#define unregister_hotmemory_notifier(nb) ({ (void)(nb); })
-#endif
+
+extern int sections_per_block;
+
+static inline unsigned long memory_block_id(unsigned long section_nr)
+{
+ return section_nr / sections_per_block;
+}
+
+static inline unsigned long pfn_to_block_id(unsigned long pfn)
+{
+ return memory_block_id(pfn_to_section_nr(pfn));
+}
+
+static inline unsigned long phys_to_block_id(unsigned long phys)
+{
+ return pfn_to_block_id(PFN_DOWN(phys));
+}
+
+#ifdef CONFIG_NUMA
+void memory_block_add_nid_early(struct memory_block *mem, int nid);
+#endif /* CONFIG_NUMA */
+int memory_block_advise_max_size(unsigned long size);
+unsigned long memory_block_advised_max_size(void);
+#endif /* CONFIG_MEMORY_HOTPLUG */
/*
* Kernel text modification mutex, used for code patching. Users of this lock
diff --git a/include/linux/memory/ti-aemif.h b/include/linux/memory/ti-aemif.h
new file mode 100644
index 000000000000..da94a9d985e7
--- /dev/null
+++ b/include/linux/memory/ti-aemif.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __MEMORY_TI_AEMIF_H
+#define __MEMORY_TI_AEMIF_H
+
+/**
+ * struct aemif_cs_timings: structure to hold CS timing configuration
+ * values are expressed in number of clock cycles - 1
+ * @ta: minimum turn around time
+ * @rhold: read hold width
+ * @rstrobe: read strobe width
+ * @rsetup: read setup width
+ * @whold: write hold width
+ * @wstrobe: write strobe width
+ * @wsetup: write setup width
+ */
+struct aemif_cs_timings {
+ u32 ta;
+ u32 rhold;
+ u32 rstrobe;
+ u32 rsetup;
+ u32 whold;
+ u32 wstrobe;
+ u32 wsetup;
+};
+
+struct aemif_device;
+
+int aemif_set_cs_timings(struct aemif_device *aemif, u8 cs, struct aemif_cs_timings *timings);
+int aemif_check_cs_timings(struct aemif_cs_timings *timings);
+
+#endif // __MEMORY_TI_AEMIF_H
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 28f32fd00fe9..f2f16cdd73ee 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -11,25 +11,14 @@ struct page;
struct zone;
struct pglist_data;
struct mem_section;
-struct memory_block;
+struct memory_group;
struct resource;
struct vmem_altmap;
+struct dev_pagemap;
#ifdef CONFIG_MEMORY_HOTPLUG
struct page *pfn_to_online_page(unsigned long pfn);
-/*
- * Types for free bootmem stored in page->lru.next. These have to be in
- * some random range in unsigned long space for debugging purposes.
- */
-enum {
- MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12,
- SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE,
- MIX_SECTION_INFO,
- NODE_INFO,
- MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO,
-};
-
/* Types for control the zone type of onlined and offlined memory */
enum {
/* Offline the memory. */
@@ -60,8 +49,15 @@ typedef int __bitwise mhp_t;
* To do so, we will use the beginning of the hot-added range to build
* the page tables for the memmap array that describes the entire range.
* Only selected architectures support it with SPARSE_VMEMMAP.
+ * This is only a hint, the core kernel can decide to not do this based on
+ * different alignment checks.
*/
#define MHP_MEMMAP_ON_MEMORY ((__force mhp_t)BIT(1))
+/*
+ * The nid field specifies a memory group id (mgid) instead. The memory group
+ * implies the node id (nid).
+ */
+#define MHP_NID_IS_MGID ((__force mhp_t)BIT(2))
/*
* Extended parameters for memory hotplug:
@@ -72,10 +68,12 @@ typedef int __bitwise mhp_t;
struct mhp_params {
struct vmem_altmap *altmap;
pgprot_t pgprot;
+ struct dev_pagemap *pgmap;
};
bool mhp_range_allowed(u64 start, u64 size, bool need_mapping);
struct range mhp_get_pluggable_range(bool need_mapping);
+bool mhp_supports_memmap_on_memory(void);
/*
* Zone resizing functions
@@ -104,20 +102,17 @@ static inline void zone_seqlock_init(struct zone *zone)
{
seqlock_init(&zone->span_seqlock);
}
-extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
-extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
-extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
-extern void adjust_present_page_count(struct zone *zone, long nr_pages);
+extern void adjust_present_page_count(struct page *page,
+ struct memory_group *group,
+ long nr_pages);
/* VM interface that may be used by firmware interface */
extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
struct zone *zone);
extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages);
extern int online_pages(unsigned long pfn, unsigned long nr_pages,
- struct zone *zone);
-extern struct zone *test_pages_in_a_zone(unsigned long start_pfn,
- unsigned long end_pfn);
-extern void __offline_isolated_pages(unsigned long start_pfn,
- unsigned long end_pfn);
+ struct zone *zone, struct memory_group *group);
+extern unsigned long __offline_isolated_pages(unsigned long start_pfn,
+ unsigned long end_pfn);
typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
@@ -133,8 +128,6 @@ extern u64 max_mem_size;
extern int mhp_online_type_from_str(const char *str);
-/* Default online_type (MMOP_*) when new memory blocks are added. */
-extern int mhp_default_online_type;
/* If movable_node boot option specified */
extern bool movable_node_enabled;
static inline bool movable_node_is_enabled(void)
@@ -142,8 +135,7 @@ static inline bool movable_node_is_enabled(void)
return movable_node_enabled;
}
-extern void arch_remove_memory(int nid, u64 start, u64 size,
- struct vmem_altmap *altmap);
+extern void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap);
extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
struct vmem_altmap *altmap);
@@ -162,82 +154,27 @@ int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
struct mhp_params *params);
#endif /* ARCH_HAS_ADD_PAGES */
-#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
-/*
- * For supporting node-hotadd, we have to allocate a new pgdat.
- *
- * If an arch has generic style NODE_DATA(),
- * node_data[nid] = kzalloc() works well. But it depends on the architecture.
- *
- * In general, generic_alloc_nodedata() is used.
- * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
- *
- */
-extern pg_data_t *arch_alloc_nodedata(int nid);
-extern void arch_free_nodedata(pg_data_t *pgdat);
-extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
-
-#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
-
-#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
-#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat)
+void get_online_mems(void);
+void put_online_mems(void);
-#ifdef CONFIG_NUMA
-/*
- * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
- * XXX: kmalloc_node() can't work well to get new node's memory at this time.
- * Because, pgdat for the new node is not allocated/initialized yet itself.
- * To use new node's memory, more consideration will be necessary.
- */
-#define generic_alloc_nodedata(nid) \
-({ \
- kzalloc(sizeof(pg_data_t), GFP_KERNEL); \
-})
-/*
- * This definition is just for error path in node hotadd.
- * For node hotremove, we have to replace this.
- */
-#define generic_free_nodedata(pgdat) kfree(pgdat)
+void mem_hotplug_begin(void);
+void mem_hotplug_done(void);
-extern pg_data_t *node_data[];
-static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
+/* See kswapd_is_running() */
+static inline void pgdat_kswapd_lock(pg_data_t *pgdat)
{
- node_data[nid] = pgdat;
+ mutex_lock(&pgdat->kswapd_lock);
}
-#else /* !CONFIG_NUMA */
-
-/* never called */
-static inline pg_data_t *generic_alloc_nodedata(int nid)
-{
- BUG();
- return NULL;
-}
-static inline void generic_free_nodedata(pg_data_t *pgdat)
+static inline void pgdat_kswapd_unlock(pg_data_t *pgdat)
{
+ mutex_unlock(&pgdat->kswapd_lock);
}
-static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
-{
-}
-#endif /* CONFIG_NUMA */
-#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
-#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
-extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
-#else
-static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
+static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat)
{
+ mutex_init(&pgdat->kswapd_lock);
}
-#endif
-extern void put_page_bootmem(struct page *page);
-extern void get_page_bootmem(unsigned long ingo, struct page *page,
- unsigned long type);
-
-void get_online_mems(void);
-void put_online_mems(void);
-
-void mem_hotplug_begin(void);
-void mem_hotplug_done(void);
#else /* ! CONFIG_MEMORY_HOTPLUG */
#define pfn_to_online_page(pfn) \
@@ -260,10 +197,6 @@ static inline void zone_span_writelock(struct zone *zone) {}
static inline void zone_span_writeunlock(struct zone *zone) {}
static inline void zone_seqlock_init(struct zone *zone) {}
-static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
-{
-}
-
static inline int try_online_node(int nid)
{
return 0;
@@ -279,6 +212,15 @@ static inline bool movable_node_is_enabled(void)
{
return false;
}
+
+static inline bool mhp_supports_memmap_on_memory(void)
+{
+ return false;
+}
+
+static inline void pgdat_kswapd_lock(pg_data_t *pgdat) {}
+static inline void pgdat_kswapd_unlock(pg_data_t *pgdat) {}
+static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat) {}
#endif /* ! CONFIG_MEMORY_HOTPLUG */
/*
@@ -319,32 +261,34 @@ static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
#ifdef CONFIG_MEMORY_HOTREMOVE
extern void try_offline_node(int nid);
-extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
-extern int remove_memory(int nid, u64 start, u64 size);
-extern void __remove_memory(int nid, u64 start, u64 size);
-extern int offline_and_remove_memory(int nid, u64 start, u64 size);
+extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
+ struct zone *zone, struct memory_group *group);
+extern int remove_memory(u64 start, u64 size);
+extern void __remove_memory(u64 start, u64 size);
+extern int offline_and_remove_memory(u64 start, u64 size);
#else
static inline void try_offline_node(int nid) {}
-static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
+static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
+ struct zone *zone, struct memory_group *group)
{
return -EINVAL;
}
-static inline int remove_memory(int nid, u64 start, u64 size)
+static inline int remove_memory(u64 start, u64 size)
{
return -EBUSY;
}
-static inline void __remove_memory(int nid, u64 start, u64 size) {}
+static inline void __remove_memory(u64 start, u64 size) {}
#endif /* CONFIG_MEMORY_HOTREMOVE */
-extern void set_zone_contiguous(struct zone *zone);
-extern void clear_zone_contiguous(struct zone *zone);
-
#ifdef CONFIG_MEMORY_HOTPLUG
-extern void __ref free_area_init_core_hotplug(int nid);
+/* Default online_type (MMOP_*) when new memory blocks are added. */
+extern int mhp_get_default_online_type(void);
+extern void mhp_set_default_online_type(int online_type);
+extern void __ref free_area_init_core_hotplug(struct pglist_data *pgdat);
extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
extern int add_memory_resource(int nid, struct resource *resource,
@@ -354,24 +298,24 @@ extern int add_memory_driver_managed(int nid, u64 start, u64 size,
mhp_t mhp_flags);
extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages,
- struct vmem_altmap *altmap, int migratetype);
+ struct vmem_altmap *altmap, int migratetype,
+ bool isolate_pageblock);
extern void remove_pfn_range_from_zone(struct zone *zone,
unsigned long start_pfn,
unsigned long nr_pages);
-extern bool is_memblock_offlined(struct memory_block *mem);
extern int sparse_add_section(int nid, unsigned long pfn,
- unsigned long nr_pages, struct vmem_altmap *altmap);
-extern void sparse_remove_section(struct mem_section *ms,
- unsigned long pfn, unsigned long nr_pages,
- unsigned long map_offset, struct vmem_altmap *altmap);
+ unsigned long nr_pages, struct vmem_altmap *altmap,
+ struct dev_pagemap *pgmap);
+extern void sparse_remove_section(unsigned long pfn, unsigned long nr_pages,
+ struct vmem_altmap *altmap);
extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
unsigned long pnum);
-extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn,
+extern struct zone *zone_for_pfn_range(int online_type, int nid,
+ struct memory_group *group, unsigned long start_pfn,
unsigned long nr_pages);
extern int arch_create_linear_mapping(int nid, u64 start, u64 size,
struct mhp_params *params);
void arch_remove_linear_mapping(u64 start, u64 size);
-extern bool mhp_supports_memmap_on_memory(unsigned long size);
#endif /* CONFIG_MEMORY_HOTPLUG */
#endif /* __LINUX_MEMORY_HOTPLUG_H */
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 5f1c74df264d..0fe96f3ab3ef 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -8,16 +8,18 @@
#include <linux/sched.h>
#include <linux/mmzone.h>
-#include <linux/dax.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/spinlock.h>
+#include <linux/node.h>
#include <linux/nodemask.h>
#include <linux/pagemap.h>
#include <uapi/linux/mempolicy.h>
struct mm_struct;
+#define NO_INTERLEAVE_INDEX (-1UL) /* use task il_prev for interleaving */
+
#ifdef CONFIG_NUMA
/*
@@ -46,11 +48,9 @@ struct mempolicy {
atomic_t refcnt;
unsigned short mode; /* See MPOL_* above */
unsigned short flags; /* See set_mempolicy() MPOL_F_* above */
- union {
- short preferred_node; /* preferred */
- nodemask_t nodes; /* interleave/bind */
- /* undefined for default */
- } v;
+ nodemask_t nodes; /* interleave/bind/preferred/etc */
+ int home_node; /* Home node to use for MPOL_BIND and MPOL_PREFERRED_MANY */
+
union {
nodemask_t cpuset_mems_allowed; /* relative to these nodes */
nodemask_t user_nodemask; /* nodemask passed by user */
@@ -92,8 +92,6 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
return pol;
}
-#define vma_policy(vma) ((vma)->vm_policy)
-
static inline void mpol_get(struct mempolicy *pol)
{
if (pol)
@@ -110,35 +108,30 @@ static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
/*
* Tree of shared policies for a shared memory region.
- * Maintain the policies in a pseudo mm that contains vmas. The vmas
- * carry the policy. As a special twist the pseudo mm is indexed in pages, not
- * bytes, so that we can work with shared memory segments bigger than
- * unsigned long.
*/
-
-struct sp_node {
- struct rb_node nd;
- unsigned long start, end;
- struct mempolicy *policy;
-};
-
struct shared_policy {
struct rb_root root;
rwlock_t lock;
};
+struct sp_node {
+ struct rb_node nd;
+ pgoff_t start, end;
+ struct mempolicy *policy;
+};
int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst);
void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
-int mpol_set_shared_policy(struct shared_policy *info,
- struct vm_area_struct *vma,
- struct mempolicy *new);
-void mpol_free_shared_policy(struct shared_policy *p);
+int mpol_set_shared_policy(struct shared_policy *sp,
+ struct vm_area_struct *vma, struct mempolicy *mpol);
+void mpol_free_shared_policy(struct shared_policy *sp);
struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
- unsigned long idx);
+ pgoff_t idx);
struct mempolicy *get_task_policy(struct task_struct *p);
struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
- unsigned long addr);
+ unsigned long addr, pgoff_t *ilx);
+struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
+ unsigned long addr, int order, pgoff_t *ilx);
bool vma_policy_mof(struct vm_area_struct *vma);
extern void numa_default_policy(void);
@@ -150,17 +143,8 @@ extern int huge_node(struct vm_area_struct *vma,
unsigned long addr, gfp_t gfp_flags,
struct mempolicy **mpol, nodemask_t **nodemask);
extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
-extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
+extern bool mempolicy_in_oom_domain(struct task_struct *tsk,
const nodemask_t *mask);
-extern nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy);
-
-static inline nodemask_t *policy_nodemask_current(gfp_t gfp)
-{
- struct mempolicy *mpol = get_task_policy(current);
-
- return policy_nodemask(gfp, mpol);
-}
-
extern unsigned int mempolicy_slab_node(void);
extern enum zone_type policy_zone;
@@ -184,19 +168,35 @@ extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
/* Check if a vma is migratable */
extern bool vma_migratable(struct vm_area_struct *vma);
-extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
+int mpol_misplaced(struct folio *folio, struct vm_fault *vmf,
+ unsigned long addr);
extern void mpol_put_task_policy(struct task_struct *);
+static inline bool mpol_is_preferred_many(struct mempolicy *pol)
+{
+ return (pol->mode == MPOL_PREFERRED_MANY);
+}
+
+extern bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone);
+
+extern int mempolicy_set_node_perf(unsigned int node,
+ struct access_coordinate *coords);
+
#else
struct mempolicy {};
+static inline struct mempolicy *get_task_policy(struct task_struct *p)
+{
+ return NULL;
+}
+
static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
{
return true;
}
-static inline void mpol_put(struct mempolicy *p)
+static inline void mpol_put(struct mempolicy *pol)
{
}
@@ -215,17 +215,22 @@ static inline void mpol_shared_policy_init(struct shared_policy *sp,
{
}
-static inline void mpol_free_shared_policy(struct shared_policy *p)
+static inline void mpol_free_shared_policy(struct shared_policy *sp)
{
}
static inline struct mempolicy *
-mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
+mpol_shared_policy_lookup(struct shared_policy *sp, pgoff_t idx)
{
return NULL;
}
-#define vma_policy(vma) NULL
+static inline struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
+ unsigned long addr, int order, pgoff_t *ilx)
+{
+ *ilx = 0;
+ return NULL;
+}
static inline int
vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
@@ -281,7 +286,8 @@ static inline int mpol_parse_str(char *str, struct mempolicy **mpol)
}
#endif
-static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
+static inline int mpol_misplaced(struct folio *folio,
+ struct vm_fault *vmf,
unsigned long address)
{
return -1; /* no node preference */
@@ -291,9 +297,10 @@ static inline void mpol_put_task_policy(struct task_struct *task)
{
}
-static inline nodemask_t *policy_nodemask_current(gfp_t gfp)
+static inline bool mpol_is_preferred_many(struct mempolicy *pol)
{
- return NULL;
+ return false;
}
+
#endif /* CONFIG_NUMA */
#endif
diff --git a/include/linux/mempool.h b/include/linux/mempool.h
index 0c964ac107c2..e8e440e04a06 100644
--- a/include/linux/mempool.h
+++ b/include/linux/mempool.h
@@ -5,6 +5,8 @@
#ifndef _LINUX_MEMPOOL_H
#define _LINUX_MEMPOOL_H
+#include <linux/sched.h>
+#include <linux/alloc_tag.h>
#include <linux/wait.h>
#include <linux/compiler.h>
@@ -13,7 +15,7 @@ struct kmem_cache;
typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data);
typedef void (mempool_free_t)(void *element, void *pool_data);
-typedef struct mempool_s {
+typedef struct mempool {
spinlock_t lock;
int min_nr; /* nr of elements at *elements */
int curr_nr; /* Current nr of elements at *elements */
@@ -25,28 +27,53 @@ typedef struct mempool_s {
wait_queue_head_t wait;
} mempool_t;
-static inline bool mempool_initialized(mempool_t *pool)
+static inline bool mempool_initialized(struct mempool *pool)
{
return pool->elements != NULL;
}
-void mempool_exit(mempool_t *pool);
-int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
- mempool_free_t *free_fn, void *pool_data,
- gfp_t gfp_mask, int node_id);
-int mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
- mempool_free_t *free_fn, void *pool_data);
-
-extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
- mempool_free_t *free_fn, void *pool_data);
-extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
- mempool_free_t *free_fn, void *pool_data,
- gfp_t gfp_mask, int nid);
+static inline bool mempool_is_saturated(struct mempool *pool)
+{
+ return READ_ONCE(pool->curr_nr) >= pool->min_nr;
+}
-extern int mempool_resize(mempool_t *pool, int new_min_nr);
-extern void mempool_destroy(mempool_t *pool);
-extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc;
-extern void mempool_free(void *element, mempool_t *pool);
+void mempool_exit(struct mempool *pool);
+int mempool_init_node(struct mempool *pool, int min_nr,
+ mempool_alloc_t *alloc_fn, mempool_free_t *free_fn,
+ void *pool_data, gfp_t gfp_mask, int node_id);
+int mempool_init_noprof(struct mempool *pool, int min_nr,
+ mempool_alloc_t *alloc_fn, mempool_free_t *free_fn,
+ void *pool_data);
+#define mempool_init(...) \
+ alloc_hooks(mempool_init_noprof(__VA_ARGS__))
+
+struct mempool *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
+ mempool_free_t *free_fn, void *pool_data);
+struct mempool *mempool_create_node_noprof(int min_nr,
+ mempool_alloc_t *alloc_fn, mempool_free_t *free_fn,
+ void *pool_data, gfp_t gfp_mask, int nid);
+#define mempool_create_node(...) \
+ alloc_hooks(mempool_create_node_noprof(__VA_ARGS__))
+
+#define mempool_create(_min_nr, _alloc_fn, _free_fn, _pool_data) \
+ mempool_create_node(_min_nr, _alloc_fn, _free_fn, _pool_data, \
+ GFP_KERNEL, NUMA_NO_NODE)
+
+int mempool_resize(struct mempool *pool, int new_min_nr);
+void mempool_destroy(struct mempool *pool);
+
+void *mempool_alloc_noprof(struct mempool *pool, gfp_t gfp_mask) __malloc;
+#define mempool_alloc(...) \
+ alloc_hooks(mempool_alloc_noprof(__VA_ARGS__))
+int mempool_alloc_bulk_noprof(struct mempool *pool, void **elem,
+ unsigned int count, unsigned int allocated);
+#define mempool_alloc_bulk(...) \
+ alloc_hooks(mempool_alloc_bulk_noprof(__VA_ARGS__))
+
+void *mempool_alloc_preallocated(struct mempool *pool) __malloc;
+void mempool_free(void *element, struct mempool *pool);
+unsigned int mempool_free_bulk(struct mempool *pool, void **elem,
+ unsigned int count);
/*
* A mempool_alloc_t and mempool_free_t that get the memory from
@@ -56,19 +83,10 @@ extern void mempool_free(void *element, mempool_t *pool);
void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data);
void mempool_free_slab(void *element, void *pool_data);
-static inline int
-mempool_init_slab_pool(mempool_t *pool, int min_nr, struct kmem_cache *kc)
-{
- return mempool_init(pool, min_nr, mempool_alloc_slab,
- mempool_free_slab, (void *) kc);
-}
-
-static inline mempool_t *
-mempool_create_slab_pool(int min_nr, struct kmem_cache *kc)
-{
- return mempool_create(min_nr, mempool_alloc_slab, mempool_free_slab,
- (void *) kc);
-}
+#define mempool_init_slab_pool(_pool, _min_nr, _kc) \
+ mempool_init(_pool, (_min_nr), mempool_alloc_slab, mempool_free_slab, (void *)(_kc))
+#define mempool_create_slab_pool(_min_nr, _kc) \
+ mempool_create((_min_nr), mempool_alloc_slab, mempool_free_slab, (void *)(_kc))
/*
* a mempool_alloc_t and a mempool_free_t to kmalloc and kfree the
@@ -77,17 +95,12 @@ mempool_create_slab_pool(int min_nr, struct kmem_cache *kc)
void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data);
void mempool_kfree(void *element, void *pool_data);
-static inline int mempool_init_kmalloc_pool(mempool_t *pool, int min_nr, size_t size)
-{
- return mempool_init(pool, min_nr, mempool_kmalloc,
- mempool_kfree, (void *) size);
-}
-
-static inline mempool_t *mempool_create_kmalloc_pool(int min_nr, size_t size)
-{
- return mempool_create(min_nr, mempool_kmalloc, mempool_kfree,
- (void *) size);
-}
+#define mempool_init_kmalloc_pool(_pool, _min_nr, _size) \
+ mempool_init(_pool, (_min_nr), mempool_kmalloc, mempool_kfree, \
+ (void *)(unsigned long)(_size))
+#define mempool_create_kmalloc_pool(_min_nr, _size) \
+ mempool_create((_min_nr), mempool_kmalloc, mempool_kfree, \
+ (void *)(unsigned long)(_size))
/*
* A mempool_alloc_t and mempool_free_t for a simple page allocator that
@@ -96,16 +109,11 @@ static inline mempool_t *mempool_create_kmalloc_pool(int min_nr, size_t size)
void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data);
void mempool_free_pages(void *element, void *pool_data);
-static inline int mempool_init_page_pool(mempool_t *pool, int min_nr, int order)
-{
- return mempool_init(pool, min_nr, mempool_alloc_pages,
- mempool_free_pages, (void *)(long)order);
-}
-
-static inline mempool_t *mempool_create_page_pool(int min_nr, int order)
-{
- return mempool_create(min_nr, mempool_alloc_pages, mempool_free_pages,
- (void *)(long)order);
-}
+#define mempool_init_page_pool(_pool, _min_nr, _order) \
+ mempool_init(_pool, (_min_nr), mempool_alloc_pages, \
+ mempool_free_pages, (void *)(long)(_order))
+#define mempool_create_page_pool(_min_nr, _order) \
+ mempool_create((_min_nr), mempool_alloc_pages, \
+ mempool_free_pages, (void *)(long)(_order))
#endif /* _LINUX_MEMPOOL_H */
diff --git a/include/linux/memregion.h b/include/linux/memregion.h
index e11595256cac..a55f62cc5266 100644
--- a/include/linux/memregion.h
+++ b/include/linux/memregion.h
@@ -3,9 +3,12 @@
#define _MEMREGION_H_
#include <linux/types.h>
#include <linux/errno.h>
+#include <linux/range.h>
+#include <linux/bug.h>
struct memregion_info {
int target_node;
+ struct range range;
};
#ifdef CONFIG_MEMREGION
@@ -16,8 +19,53 @@ static inline int memregion_alloc(gfp_t gfp)
{
return -ENOMEM;
}
-void memregion_free(int id)
+static inline void memregion_free(int id)
{
}
#endif
+
+/**
+ * cpu_cache_invalidate_memregion - drop any CPU cached data for
+ * memregion
+ * @start: start physical address of the target memory region.
+ * @len: length of the target memory region. -1 for all the regions of
+ * the target type.
+ *
+ * Perform cache maintenance after a memory event / operation that
+ * changes the contents of physical memory in a cache-incoherent manner.
+ * For example, device memory technologies like NVDIMM and CXL have
+ * device secure erase, and dynamic region provision that can replace
+ * the memory mapped to a given physical address.
+ *
+ * Limit the functionality to architectures that have an efficient way
+ * to writeback and invalidate potentially terabytes of address space at
+ * once. Note that this routine may or may not write back any dirty
+ * contents while performing the invalidation. It is only exported for
+ * the explicit usage of the NVDIMM and CXL modules in the 'DEVMEM'
+ * symbol namespace on bare platforms.
+ *
+ * Returns 0 on success or negative error code on a failure to perform
+ * the cache maintenance.
+ */
+#ifdef CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION
+int cpu_cache_invalidate_memregion(phys_addr_t start, size_t len);
+bool cpu_cache_has_invalidate_memregion(void);
+#else
+static inline bool cpu_cache_has_invalidate_memregion(void)
+{
+ return false;
+}
+
+static inline int cpu_cache_invalidate_memregion(phys_addr_t start, size_t len)
+{
+ WARN_ON_ONCE("CPU cache invalidation required");
+ return -ENXIO;
+}
+#endif
+
+static inline int cpu_cache_invalidate_all(void)
+{
+ return cpu_cache_invalidate_memregion(0, -1);
+}
+
#endif /* _MEMREGION_H_ */
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 45a79da89c5f..713ec0435b48 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -1,6 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MEMREMAP_H_
#define _LINUX_MEMREMAP_H_
+
+#include <linux/mmzone.h>
#include <linux/range.h>
#include <linux/ioport.h>
#include <linux/percpu-refcount.h>
@@ -26,7 +28,7 @@ struct vmem_altmap {
};
/*
- * Specialize ZONE_DEVICE memory into multiple types each having differents
+ * Specialize ZONE_DEVICE memory into multiple types each has a different
* usage.
*
* MEMORY_DEVICE_PRIVATE:
@@ -37,7 +39,14 @@ struct vmem_altmap {
* must be treated as an opaque object, rather than a "normal" struct page.
*
* A more complete discussion of unaddressable memory may be found in
- * include/linux/hmm.h and Documentation/vm/hmm.rst.
+ * include/linux/hmm.h and Documentation/mm/hmm.rst.
+ *
+ * MEMORY_DEVICE_COHERENT:
+ * Device memory that is cache coherent from device and CPU point of view. This
+ * is used on platforms that have an advanced system bus (like CAPI or CXL). A
+ * driver can hotplug the device memory using ZONE_DEVICE and with that memory
+ * type. Any page of a process can be migrated to such memory. However no one
+ * should be allowed to pin such memory so that it can always be evicted.
*
* MEMORY_DEVICE_FS_DAX:
* Host memory that has similar access semantics as System RAM i.e. DMA
@@ -59,6 +68,7 @@ struct vmem_altmap {
enum memory_type {
/* 0 is reserved to catch uninitialized type fields */
MEMORY_DEVICE_PRIVATE = 1,
+ MEMORY_DEVICE_COHERENT,
MEMORY_DEVICE_FS_DAX,
MEMORY_DEVICE_GENERIC,
MEMORY_DEVICE_PCI_P2PDMA,
@@ -66,27 +76,36 @@ enum memory_type {
struct dev_pagemap_ops {
/*
- * Called once the page refcount reaches 1. (ZONE_DEVICE pages never
- * reach 0 refcount unless there is a refcount bug. This allows the
- * device driver to implement its own memory management.)
+ * Called once the folio refcount reaches 0. The reference count will be
+ * reset to one by the core code after the method is called to prepare
+ * for handing out the folio again.
*/
- void (*page_free)(struct page *page);
+ void (*folio_free)(struct folio *folio);
/*
- * Transition the refcount in struct dev_pagemap to the dead state.
+ * Used for private (un-addressable) device memory only. Must migrate
+ * the page back to a CPU accessible page.
*/
- void (*kill)(struct dev_pagemap *pgmap);
+ vm_fault_t (*migrate_to_ram)(struct vm_fault *vmf);
/*
- * Wait for refcount in struct dev_pagemap to be idle and reap it.
+ * Handle the memory failure happens on a range of pfns. Notify the
+ * processes who are using these pfns, and try to recover the data on
+ * them if necessary. The mf_flags is finally passed to the recover
+ * function through the whole notify routine.
+ *
+ * When this is not implemented, or it returns -EOPNOTSUPP, the caller
+ * will fall back to a common handler called mf_generic_kill_procs().
*/
- void (*cleanup)(struct dev_pagemap *pgmap);
+ int (*memory_failure)(struct dev_pagemap *pgmap, unsigned long pfn,
+ unsigned long nr_pages, int mf_flags);
/*
- * Used for private (un-addressable) device memory only. Must migrate
- * the page back to a CPU accessible page.
+ * Used for private (un-addressable) device memory only.
+ * This callback is used when a folio is split into
+ * a smaller folio
*/
- vm_fault_t (*migrate_to_ram)(struct vm_fault *vmf);
+ void (*folio_split)(struct folio *head, struct folio *tail);
};
#define PGMAP_ALTMAP_VALID (1 << 0)
@@ -95,10 +114,14 @@ struct dev_pagemap_ops {
* struct dev_pagemap - metadata for ZONE_DEVICE mappings
* @altmap: pre-allocated/reserved memory for vmemmap allocations
* @ref: reference count that pins the devm_memremap_pages() mapping
- * @internal_ref: internal reference if @ref is not provided by the caller
- * @done: completion for @internal_ref
- * @type: memory type: see MEMORY_* in memory_hotplug.h
+ * @done: completion for @ref
+ * @type: memory type: see MEMORY_* above in memremap.h
* @flags: PGMAP_* flags to specify defailed behavior
+ * @vmemmap_shift: structural definition of how the vmemmap page metadata
+ * is populated, specifically the metadata page order.
+ * A zero value (default) uses base pages as the vmemmap metadata
+ * representation. A bigger value will set up compound struct pages
+ * of the requested order value.
* @ops: method table
* @owner: an opaque pointer identifying the entity that manages this
* instance. Used by various helpers to make sure that no
@@ -109,20 +132,25 @@ struct dev_pagemap_ops {
*/
struct dev_pagemap {
struct vmem_altmap altmap;
- struct percpu_ref *ref;
- struct percpu_ref internal_ref;
+ struct percpu_ref ref;
struct completion done;
enum memory_type type;
unsigned int flags;
+ unsigned long vmemmap_shift;
const struct dev_pagemap_ops *ops;
void *owner;
int nr_range;
union {
struct range range;
- struct range ranges[0];
+ DECLARE_FLEX_ARRAY(struct range, ranges);
};
};
+static inline bool pgmap_has_memory_failure(struct dev_pagemap *pgmap)
+{
+ return pgmap->ops && pgmap->ops->memory_failure;
+}
+
static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap)
{
if (pgmap->flags & PGMAP_ALTMAP_VALID)
@@ -130,18 +158,106 @@ static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap)
return NULL;
}
+static inline unsigned long pgmap_vmemmap_nr(struct dev_pagemap *pgmap)
+{
+ return 1 << pgmap->vmemmap_shift;
+}
+
+static inline bool folio_is_device_private(const struct folio *folio)
+{
+ return IS_ENABLED(CONFIG_DEVICE_PRIVATE) &&
+ folio_is_zone_device(folio) &&
+ folio->pgmap->type == MEMORY_DEVICE_PRIVATE;
+}
+
+static inline bool is_device_private_page(const struct page *page)
+{
+ return IS_ENABLED(CONFIG_DEVICE_PRIVATE) &&
+ folio_is_device_private(page_folio(page));
+}
+
+static inline bool folio_is_pci_p2pdma(const struct folio *folio)
+{
+ return IS_ENABLED(CONFIG_PCI_P2PDMA) &&
+ folio_is_zone_device(folio) &&
+ folio->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
+}
+
+static inline void *folio_zone_device_data(const struct folio *folio)
+{
+ VM_WARN_ON_FOLIO(!folio_is_device_private(folio), folio);
+ return folio->page.zone_device_data;
+}
+
+static inline void folio_set_zone_device_data(struct folio *folio, void *data)
+{
+ VM_WARN_ON_FOLIO(!folio_is_device_private(folio), folio);
+ folio->page.zone_device_data = data;
+}
+
+static inline bool is_pci_p2pdma_page(const struct page *page)
+{
+ return IS_ENABLED(CONFIG_PCI_P2PDMA) &&
+ folio_is_pci_p2pdma(page_folio(page));
+}
+
+static inline bool folio_is_device_coherent(const struct folio *folio)
+{
+ return folio_is_zone_device(folio) &&
+ folio->pgmap->type == MEMORY_DEVICE_COHERENT;
+}
+
+static inline bool is_device_coherent_page(const struct page *page)
+{
+ return folio_is_device_coherent(page_folio(page));
+}
+
+static inline bool folio_is_fsdax(const struct folio *folio)
+{
+ return folio_is_zone_device(folio) &&
+ folio->pgmap->type == MEMORY_DEVICE_FS_DAX;
+}
+
+static inline bool is_fsdax_page(const struct page *page)
+{
+ return folio_is_fsdax(page_folio(page));
+}
+
#ifdef CONFIG_ZONE_DEVICE
+void zone_device_page_init(struct page *page, unsigned int order);
void *memremap_pages(struct dev_pagemap *pgmap, int nid);
void memunmap_pages(struct dev_pagemap *pgmap);
void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap);
-struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
- struct dev_pagemap *pgmap);
+struct dev_pagemap *get_dev_pagemap(unsigned long pfn);
bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn);
-unsigned long vmem_altmap_offset(struct vmem_altmap *altmap);
-void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
unsigned long memremap_compat_align(void);
+
+static inline void zone_device_folio_init(struct folio *folio, unsigned int order)
+{
+ zone_device_page_init(&folio->page, order);
+ if (order)
+ folio_set_large_rmappable(folio);
+}
+
+static inline void zone_device_private_split_cb(struct folio *original_folio,
+ struct folio *new_folio)
+{
+ if (folio_is_device_private(original_folio)) {
+ if (!original_folio->pgmap->ops->folio_split) {
+ if (new_folio) {
+ new_folio->pgmap = original_folio->pgmap;
+ new_folio->page.mapping =
+ original_folio->page.mapping;
+ }
+ } else {
+ original_folio->pgmap->ops->folio_split(original_folio,
+ new_folio);
+ }
+ }
+}
+
#else
static inline void *devm_memremap_pages(struct device *dev,
struct dev_pagemap *pgmap)
@@ -160,8 +276,7 @@ static inline void devm_memunmap_pages(struct device *dev,
{
}
-static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
- struct dev_pagemap *pgmap)
+static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn)
{
return NULL;
}
@@ -171,27 +286,22 @@ static inline bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
return false;
}
-static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
-{
- return 0;
-}
-
-static inline void vmem_altmap_free(struct vmem_altmap *altmap,
- unsigned long nr_pfns)
-{
-}
-
/* when memremap_pages() is disabled all archs can remap a single page */
static inline unsigned long memremap_compat_align(void)
{
return PAGE_SIZE;
}
+
+static inline void zone_device_private_split_cb(struct folio *original_folio,
+ struct folio *new_folio)
+{
+}
#endif /* CONFIG_ZONE_DEVICE */
static inline void put_dev_pagemap(struct dev_pagemap *pgmap)
{
if (pgmap)
- percpu_ref_put(pgmap->ref);
+ percpu_ref_put(&pgmap->ref);
}
#endif /* _LINUX_MEMREMAP_H_ */
diff --git a/include/linux/memstick.h b/include/linux/memstick.h
index ebf73d4ee969..107bdcbedf79 100644
--- a/include/linux/memstick.h
+++ b/include/linux/memstick.h
@@ -293,7 +293,7 @@ struct memstick_host {
};
struct memstick_driver {
- struct memstick_device_id *id_table;
+ const struct memstick_device_id *id_table;
int (*probe)(struct memstick_dev *card);
void (*remove)(struct memstick_dev *card);
int (*suspend)(struct memstick_dev *card,
diff --git a/include/linux/mfd/88pm80x.h b/include/linux/mfd/88pm80x.h
index def5df6e74bf..551ef1c367d6 100644
--- a/include/linux/mfd/88pm80x.h
+++ b/include/linux/mfd/88pm80x.h
@@ -294,7 +294,7 @@ struct pm80x_chip {
struct i2c_client *client;
struct i2c_client *companion;
struct regmap *regmap;
- struct regmap_irq_chip *regmap_irq_chip;
+ const struct regmap_irq_chip *regmap_irq_chip;
struct regmap_irq_chip_data *irq_data;
int type;
int irq;
diff --git a/include/linux/mfd/88pm860x.h b/include/linux/mfd/88pm860x.h
index 473545a2c425..6fa21791fc85 100644
--- a/include/linux/mfd/88pm860x.h
+++ b/include/linux/mfd/88pm860x.h
@@ -472,13 +472,7 @@ extern int pm860x_bulk_read(struct i2c_client *, int, int, unsigned char *);
extern int pm860x_bulk_write(struct i2c_client *, int, int, unsigned char *);
extern int pm860x_set_bits(struct i2c_client *, int, unsigned char,
unsigned char);
-extern int pm860x_page_reg_read(struct i2c_client *, int);
extern int pm860x_page_reg_write(struct i2c_client *, int, unsigned char);
extern int pm860x_page_bulk_read(struct i2c_client *, int, int,
unsigned char *);
-extern int pm860x_page_bulk_write(struct i2c_client *, int, int,
- unsigned char *);
-extern int pm860x_page_set_bits(struct i2c_client *, int, unsigned char,
- unsigned char);
-
#endif /* __LINUX_MFD_88PM860X_H */
diff --git a/include/linux/mfd/88pm886.h b/include/linux/mfd/88pm886.h
new file mode 100644
index 000000000000..38892ba7b8a4
--- /dev/null
+++ b/include/linux/mfd/88pm886.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __MFD_88PM886_H
+#define __MFD_88PM886_H
+
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+
+#define PM886_A1_CHIP_ID 0xa1
+
+#define PM886_IRQ_ONKEY 0
+
+#define PM886_PAGE_OFFSET_REGULATORS 1
+#define PM886_PAGE_OFFSET_GPADC 2
+
+#define PM886_REG_ID 0x00
+
+#define PM886_REG_STATUS1 0x01
+#define PM886_ONKEY_STS1 BIT(0)
+
+#define PM886_REG_INT_STATUS1 0x05
+
+#define PM886_REG_INT_ENA_1 0x0a
+#define PM886_INT_ENA1_ONKEY BIT(0)
+
+#define PM886_REG_MISC_CONFIG1 0x14
+#define PM886_SW_PDOWN BIT(5)
+
+#define PM886_REG_MISC_CONFIG2 0x15
+#define PM886_INT_INV BIT(0)
+#define PM886_INT_CLEAR BIT(1)
+#define PM886_INT_RC 0x00
+#define PM886_INT_WC BIT(1)
+#define PM886_INT_MASK_MODE BIT(2)
+
+#define PM886_REG_RTC_CNT1 0xd1
+#define PM886_REG_RTC_CNT2 0xd2
+#define PM886_REG_RTC_CNT3 0xd3
+#define PM886_REG_RTC_CNT4 0xd4
+#define PM886_REG_RTC_SPARE1 0xea
+#define PM886_REG_RTC_SPARE2 0xeb
+#define PM886_REG_RTC_SPARE3 0xec
+#define PM886_REG_RTC_SPARE4 0xed
+#define PM886_REG_RTC_SPARE5 0xee
+#define PM886_REG_RTC_SPARE6 0xef
+
+#define PM886_REG_BUCK_EN 0x08
+#define PM886_REG_LDO_EN1 0x09
+#define PM886_REG_LDO_EN2 0x0a
+#define PM886_REG_LDO1_VOUT 0x20
+#define PM886_REG_LDO2_VOUT 0x26
+#define PM886_REG_LDO3_VOUT 0x2c
+#define PM886_REG_LDO4_VOUT 0x32
+#define PM886_REG_LDO5_VOUT 0x38
+#define PM886_REG_LDO6_VOUT 0x3e
+#define PM886_REG_LDO7_VOUT 0x44
+#define PM886_REG_LDO8_VOUT 0x4a
+#define PM886_REG_LDO9_VOUT 0x50
+#define PM886_REG_LDO10_VOUT 0x56
+#define PM886_REG_LDO11_VOUT 0x5c
+#define PM886_REG_LDO12_VOUT 0x62
+#define PM886_REG_LDO13_VOUT 0x68
+#define PM886_REG_LDO14_VOUT 0x6e
+#define PM886_REG_LDO15_VOUT 0x74
+#define PM886_REG_LDO16_VOUT 0x7a
+#define PM886_REG_BUCK1_VOUT 0xa5
+#define PM886_REG_BUCK2_VOUT 0xb3
+#define PM886_REG_BUCK3_VOUT 0xc1
+#define PM886_REG_BUCK4_VOUT 0xcf
+#define PM886_REG_BUCK5_VOUT 0xdd
+
+#define PM886_LDO_VSEL_MASK 0x0f
+#define PM886_BUCK_VSEL_MASK 0x7f
+
+/* GPADC enable/disable registers */
+#define PM886_REG_GPADC_CONFIG(n) (n)
+
+#define PM886_GPADC_VSC_EN BIT(0)
+#define PM886_GPADC_VBAT_EN BIT(1)
+#define PM886_GPADC_GNDDET1_EN BIT(3)
+#define PM886_GPADC_VBUS_EN BIT(4)
+#define PM886_GPADC_VCHG_PWR_EN BIT(5)
+#define PM886_GPADC_VCF_OUT_EN BIT(6)
+#define PM886_GPADC_CONFIG1_EN_ALL \
+ (PM886_GPADC_VSC_EN | \
+ PM886_GPADC_VBAT_EN | \
+ PM886_GPADC_GNDDET1_EN | \
+ PM886_GPADC_VBUS_EN | \
+ PM886_GPADC_VCHG_PWR_EN | \
+ PM886_GPADC_VCF_OUT_EN)
+
+#define PM886_GPADC_TINT_EN BIT(0)
+#define PM886_GPADC_PMODE_EN BIT(1)
+#define PM886_GPADC_GPADC0_EN BIT(2)
+#define PM886_GPADC_GPADC1_EN BIT(3)
+#define PM886_GPADC_GPADC2_EN BIT(4)
+#define PM886_GPADC_GPADC3_EN BIT(5)
+#define PM886_GPADC_MIC_DET_EN BIT(6)
+#define PM886_GPADC_CONFIG2_EN_ALL \
+ (PM886_GPADC_TINT_EN | \
+ PM886_GPADC_GPADC0_EN | \
+ PM886_GPADC_GPADC1_EN | \
+ PM886_GPADC_GPADC2_EN | \
+ PM886_GPADC_GPADC3_EN | \
+ PM886_GPADC_MIC_DET_EN)
+
+/* No CONFIG3_EN_ALL because this is the only bit there. */
+#define PM886_GPADC_GND_DET2_EN BIT(0)
+
+/* GPADC channel registers */
+#define PM886_REG_GPADC_VSC 0x40
+#define PM886_REG_GPADC_VCHG_PWR 0x4c
+#define PM886_REG_GPADC_VCF_OUT 0x4e
+#define PM886_REG_GPADC_TINT 0x50
+#define PM886_REG_GPADC_GPADC0 0x54
+#define PM886_REG_GPADC_GPADC1 0x56
+#define PM886_REG_GPADC_GPADC2 0x58
+#define PM886_REG_GPADC_VBAT 0xa0
+#define PM886_REG_GPADC_GND_DET1 0xa4
+#define PM886_REG_GPADC_GND_DET2 0xa6
+#define PM886_REG_GPADC_VBUS 0xa8
+#define PM886_REG_GPADC_GPADC3 0xaa
+#define PM886_REG_GPADC_MIC_DET 0xac
+#define PM886_REG_GPADC_VBAT_SLP 0xb0
+
+/* VBAT_SLP is the last register and is 2 bytes wide like other channels. */
+#define PM886_GPADC_MAX_REGISTER (PM886_REG_GPADC_VBAT_SLP + 1)
+
+#define PM886_GPADC_BIAS_LEVELS 16
+#define PM886_GPADC_INDEX_TO_BIAS_uA(i) (1 + (i) * 5)
+
+struct pm886_chip {
+ struct i2c_client *client;
+ unsigned int chip_id;
+ struct regmap *regmap;
+};
+#endif /* __MFD_88PM886_H */
diff --git a/include/linux/mfd/aat2870.h b/include/linux/mfd/aat2870.h
index 2445842d482d..c7a3c53eba68 100644
--- a/include/linux/mfd/aat2870.h
+++ b/include/linux/mfd/aat2870.h
@@ -133,9 +133,6 @@ struct aat2870_data {
int (*read)(struct aat2870_data *aat2870, u8 addr, u8 *val);
int (*write)(struct aat2870_data *aat2870, u8 addr, u8 val);
int (*update)(struct aat2870_data *aat2870, u8 addr, u8 mask, u8 val);
-
- /* for debugfs */
- struct dentry *dentry_root;
};
struct aat2870_subdev_info {
diff --git a/include/linux/mfd/abx500/ab8500.h b/include/linux/mfd/abx500/ab8500.h
index 302a330c5c84..76d326ea8eba 100644
--- a/include/linux/mfd/abx500/ab8500.h
+++ b/include/linux/mfd/abx500/ab8500.h
@@ -382,10 +382,6 @@ struct ab8500_platform_data {
struct ab8500_sysctrl_platform_data *sysctrl;
};
-extern int ab8500_init(struct ab8500 *ab8500,
- enum ab8500_version version);
-extern int ab8500_exit(struct ab8500 *ab8500);
-
extern int ab8500_suspend(struct ab8500 *ab8500);
static inline int is_ab8500(struct ab8500 *ab)
@@ -503,13 +499,7 @@ static inline int is_ab9540_2p0_or_earlier(struct ab8500 *ab)
void ab8500_override_turn_on_stat(u8 mask, u8 set);
-#ifdef CONFIG_AB8500_DEBUG
-extern int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size);
-void ab8500_dump_all_banks(struct device *dev);
-void ab8500_debug_register_interrupt(int line);
-#else
static inline void ab8500_dump_all_banks(struct device *dev) {}
static inline void ab8500_debug_register_interrupt(int line) {}
-#endif
#endif /* MFD_AB8500_H */
diff --git a/include/linux/mfd/adp5585.h b/include/linux/mfd/adp5585.h
new file mode 100644
index 000000000000..5237da6b4a9f
--- /dev/null
+++ b/include/linux/mfd/adp5585.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Analog Devices ADP5585 I/O expander, PWM controller and keypad controller
+ *
+ * Copyright 2022 NXP
+ * Copyright 2024 Ideas on Board Oy
+ */
+
+#ifndef __MFD_ADP5585_H_
+#define __MFD_ADP5585_H_
+
+#include <linux/bits.h>
+#include <linux/notifier.h>
+
+#define ADP5585_ID 0x00
+#define ADP5585_MAN_ID_VALUE 0x20
+#define ADP5585_MAN_ID_MASK GENMASK(7, 4)
+#define ADP5585_REV_ID_MASK GENMASK(3, 0)
+#define ADP5585_INT_STATUS 0x01
+#define ADP5585_OVRFLOW_INT BIT(2)
+#define ADP5585_EVENT_INT BIT(0)
+#define ADP5585_STATUS 0x02
+#define ADP5585_EC_MASK GENMASK(4, 0)
+#define ADP5585_FIFO_1 0x03
+#define ADP5585_KEV_EV_PRESS_MASK BIT(7)
+#define ADP5585_KEY_EVENT_MASK GENMASK(6, 0)
+#define ADP5585_FIFO_2 0x04
+#define ADP5585_FIFO_3 0x05
+#define ADP5585_FIFO_4 0x06
+#define ADP5585_FIFO_5 0x07
+#define ADP5585_FIFO_6 0x08
+#define ADP5585_FIFO_7 0x09
+#define ADP5585_FIFO_8 0x0a
+#define ADP5585_FIFO_9 0x0b
+#define ADP5585_FIFO_10 0x0c
+#define ADP5585_FIFO_11 0x0d
+#define ADP5585_FIFO_12 0x0e
+#define ADP5585_FIFO_13 0x0f
+#define ADP5585_FIFO_14 0x10
+#define ADP5585_FIFO_15 0x11
+#define ADP5585_FIFO_16 0x12
+#define ADP5585_EV_MAX (ADP5585_FIFO_16 - ADP5585_FIFO_1 + 1)
+#define ADP5585_GPI_INT_STAT_A 0x13
+#define ADP5585_GPI_INT_STAT_B 0x14
+#define ADP5585_GPI_STATUS_A 0x15
+#define ADP5585_GPI_STATUS_B 0x16
+#define ADP5585_RPULL_CONFIG_A 0x17
+#define ADP5585_RPULL_CONFIG_B 0x18
+#define ADP5585_RPULL_CONFIG_C 0x19
+#define ADP5585_RPULL_CONFIG_D 0x1a
+#define ADP5585_Rx_PULL_CFG_PU_300K 0
+#define ADP5585_Rx_PULL_CFG_PD_300K 1
+#define ADP5585_Rx_PULL_CFG_PU_100K 2
+#define ADP5585_Rx_PULL_CFG_DISABLE 3
+#define ADP5585_Rx_PULL_CFG_MASK 3
+#define ADP5585_GPI_INT_LEVEL_A 0x1b
+#define ADP5585_GPI_INT_LEVEL_B 0x1c
+#define ADP5585_GPI_EVENT_EN_A 0x1d
+#define ADP5585_GPI_EVENT_EN_B 0x1e
+#define ADP5585_GPI_INTERRUPT_EN_A 0x1f
+#define ADP5585_GPI_INTERRUPT_EN_B 0x20
+#define ADP5585_DEBOUNCE_DIS_A 0x21
+#define ADP5585_DEBOUNCE_DIS_B 0x22
+#define ADP5585_GPO_DATA_OUT_A 0x23
+#define ADP5585_GPO_DATA_OUT_B 0x24
+#define ADP5585_GPO_OUT_MODE_A 0x25
+#define ADP5585_GPO_OUT_MODE_B 0x26
+#define ADP5585_GPIO_DIRECTION_A 0x27
+#define ADP5585_GPIO_DIRECTION_B 0x28
+#define ADP5585_RESET1_EVENT_A 0x29
+#define ADP5585_RESET_EV_PRESS BIT(7)
+#define ADP5585_RESET1_EVENT_B 0x2a
+#define ADP5585_RESET1_EVENT_C 0x2b
+#define ADP5585_RESET2_EVENT_A 0x2c
+#define ADP5585_RESET2_EVENT_B 0x2d
+#define ADP5585_RESET_CFG 0x2e
+#define ADP5585_PWM_OFFT_LOW 0x2f
+#define ADP5585_PWM_OFFT_HIGH 0x30
+#define ADP5585_PWM_ONT_LOW 0x31
+#define ADP5585_PWM_ONT_HIGH 0x32
+#define ADP5585_PWM_CFG 0x33
+#define ADP5585_PWM_IN_AND BIT(2)
+#define ADP5585_PWM_MODE BIT(1)
+#define ADP5585_PWM_EN BIT(0)
+#define ADP5585_LOGIC_CFG 0x34
+#define ADP5585_LOGIC_FF_CFG 0x35
+#define ADP5585_LOGIC_INT_EVENT_EN 0x36
+#define ADP5585_POLL_PTIME_CFG 0x37
+#define ADP5585_PIN_CONFIG_A 0x38
+#define ADP5585_PIN_CONFIG_B 0x39
+#define ADP5585_PIN_CONFIG_C 0x3a
+#define ADP5585_PULL_SELECT BIT(7)
+#define ADP5585_C4_EXTEND_CFG_GPIO11 (0U << 6)
+#define ADP5585_C4_EXTEND_CFG_RESET2 (1U << 6)
+#define ADP5585_C4_EXTEND_CFG_MASK GENMASK(6, 6)
+#define ADP5585_R4_EXTEND_CFG_GPIO5 (0U << 5)
+#define ADP5585_R4_EXTEND_CFG_RESET1 (1U << 5)
+#define ADP5585_R4_EXTEND_CFG_MASK GENMASK(5, 5)
+#define ADP5585_R3_EXTEND_CFG_GPIO4 (0U << 2)
+#define ADP5585_R3_EXTEND_CFG_LC (1U << 2)
+#define ADP5585_R3_EXTEND_CFG_PWM_OUT (2U << 2)
+#define ADP5585_R3_EXTEND_CFG_MASK GENMASK(3, 2)
+#define ADP5585_R0_EXTEND_CFG_GPIO1 (0U << 0)
+#define ADP5585_R0_EXTEND_CFG_LY (1U << 0)
+#define ADP5585_R0_EXTEND_CFG_MASK GENMASK(0, 0)
+#define ADP5585_GENERAL_CFG 0x3b
+#define ADP5585_OSC_EN BIT(7)
+#define ADP5585_OSC_FREQ_50KHZ (0U << 5)
+#define ADP5585_OSC_FREQ_100KHZ (1U << 5)
+#define ADP5585_OSC_FREQ_200KHZ (2U << 5)
+#define ADP5585_OSC_FREQ_500KHZ (3U << 5)
+#define ADP5585_OSC_FREQ_MASK GENMASK(6, 5)
+#define ADP5585_INT_CFG BIT(1)
+#define ADP5585_RST_CFG BIT(0)
+#define ADP5585_INT_EN 0x3c
+#define ADP5585_OVRFLOW_IEN BIT(2)
+#define ADP5585_EVENT_IEN BIT(0)
+
+#define ADP5585_MAX_REG ADP5585_INT_EN
+
+#define ADP5585_PIN_MAX 11
+#define ADP5585_MAX_UNLOCK_TIME_SEC 7
+#define ADP5585_KEY_EVENT_START 1
+#define ADP5585_KEY_EVENT_END 25
+#define ADP5585_GPI_EVENT_START 37
+#define ADP5585_GPI_EVENT_END 47
+#define ADP5585_ROW5_KEY_EVENT_START 1
+#define ADP5585_ROW5_KEY_EVENT_END 30
+#define ADP5585_PWM_OUT 3
+#define ADP5585_RESET1_OUT 4
+#define ADP5585_RESET2_OUT 9
+#define ADP5585_ROW5 5
+
+/* ADP5589 */
+#define ADP5589_MAN_ID_VALUE 0x10
+#define ADP5589_GPI_STATUS_A 0x16
+#define ADP5589_GPI_STATUS_C 0x18
+#define ADP5589_RPULL_CONFIG_A 0x19
+#define ADP5589_GPI_INT_LEVEL_A 0x1e
+#define ADP5589_GPI_EVENT_EN_A 0x21
+#define ADP5589_DEBOUNCE_DIS_A 0x27
+#define ADP5589_GPO_DATA_OUT_A 0x2a
+#define ADP5589_GPO_OUT_MODE_A 0x2d
+#define ADP5589_GPIO_DIRECTION_A 0x30
+#define ADP5589_UNLOCK1 0x33
+#define ADP5589_UNLOCK_EV_PRESS BIT(7)
+#define ADP5589_UNLOCK_TIMERS 0x36
+#define ADP5589_UNLOCK_TIMER GENMASK(2, 0)
+#define ADP5589_LOCK_CFG 0x37
+#define ADP5589_LOCK_EN BIT(0)
+#define ADP5589_RESET1_EVENT_A 0x38
+#define ADP5589_RESET2_EVENT_A 0x3B
+#define ADP5589_RESET_CFG 0x3D
+#define ADP5585_RESET2_POL BIT(7)
+#define ADP5585_RESET1_POL BIT(6)
+#define ADP5585_RST_PASSTHRU_EN BIT(5)
+#define ADP5585_RESET_TRIG_TIME GENMASK(4, 2)
+#define ADP5585_PULSE_WIDTH GENMASK(1, 0)
+#define ADP5589_PWM_OFFT_LOW 0x3e
+#define ADP5589_PWM_ONT_LOW 0x40
+#define ADP5589_PWM_CFG 0x42
+#define ADP5589_POLL_PTIME_CFG 0x48
+#define ADP5589_PIN_CONFIG_A 0x49
+#define ADP5589_PIN_CONFIG_D 0x4C
+#define ADP5589_GENERAL_CFG 0x4d
+#define ADP5589_INT_EN 0x4e
+#define ADP5589_MAX_REG ADP5589_INT_EN
+
+#define ADP5589_PIN_MAX 19
+#define ADP5589_KEY_EVENT_START 1
+#define ADP5589_KEY_EVENT_END 88
+#define ADP5589_GPI_EVENT_START 97
+#define ADP5589_GPI_EVENT_END 115
+#define ADP5589_UNLOCK_WILDCARD 127
+#define ADP5589_RESET2_OUT 12
+
+struct regmap;
+
+enum adp5585_variant {
+ ADP5585_00 = 1,
+ ADP5585_01,
+ ADP5585_02,
+ ADP5585_03,
+ ADP5585_04,
+ ADP5589_00,
+ ADP5589_01,
+ ADP5589_02,
+ ADP5585_MAX
+};
+
+struct adp5585_regs {
+ unsigned int gen_cfg;
+ unsigned int ext_cfg;
+ unsigned int int_en;
+ unsigned int poll_ptime_cfg;
+ unsigned int reset_cfg;
+ unsigned int reset1_event_a;
+ unsigned int reset2_event_a;
+ unsigned int pin_cfg_a;
+};
+
+struct adp5585_dev {
+ struct device *dev;
+ struct regmap *regmap;
+ const struct adp5585_regs *regs;
+ struct blocking_notifier_head event_notifier;
+ unsigned long *pin_usage;
+ unsigned int n_pins;
+ unsigned int reset2_out;
+ enum adp5585_variant variant;
+ unsigned int id;
+ bool has_unlock;
+ bool has_pin6;
+ int irq;
+ unsigned int ev_poll_time;
+ unsigned int unlock_time;
+ unsigned int unlock_keys[2];
+ unsigned int nkeys_unlock;
+ unsigned int reset1_keys[3];
+ unsigned int nkeys_reset1;
+ unsigned int reset2_keys[2];
+ unsigned int nkeys_reset2;
+ u8 reset_cfg;
+};
+
+#endif
diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h
index 2d13bbea4f3a..f72e6d4b14a7 100644
--- a/include/linux/mfd/arizona/pdata.h
+++ b/include/linux/mfd/arizona/pdata.h
@@ -117,8 +117,10 @@ struct arizona_pdata {
/** Check for line output with HPDET method */
bool hpdet_acc_id_line;
+#ifdef CONFIG_GPIOLIB_LEGACY
/** GPIO used for mic isolation with HPDET */
int hpdet_id_gpio;
+#endif
/** Channel to use for headphone detection */
unsigned int hpdet_channel;
@@ -129,8 +131,10 @@ struct arizona_pdata {
/** Extra debounce timeout used during initial mic detection (ms) */
unsigned int micd_detect_debounce;
+#ifdef CONFIG_GPIOLIB_LEGACY
/** GPIO for mic detection polarity */
int micd_pol_gpio;
+#endif
/** Mic detect ramp rate */
unsigned int micd_bias_start_time;
@@ -184,8 +188,10 @@ struct arizona_pdata {
/** Haptic actuator type */
unsigned int hap_act;
+#ifdef CONFIG_GPIOLIB_LEGACY
/** GPIO for primary IRQ (used for edge triggered emulation) */
int irq_gpio;
+#endif
/** General purpose switch control */
unsigned int gpsw;
diff --git a/include/linux/mfd/asic3.h b/include/linux/mfd/asic3.h
deleted file mode 100644
index 61e686dbaa74..000000000000
--- a/include/linux/mfd/asic3.h
+++ /dev/null
@@ -1,313 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * include/linux/mfd/asic3.h
- *
- * Compaq ASIC3 headers.
- *
- * Copyright 2001 Compaq Computer Corporation.
- * Copyright 2007-2008 OpenedHand Ltd.
- */
-
-#ifndef __ASIC3_H__
-#define __ASIC3_H__
-
-#include <linux/types.h>
-
-struct led_classdev;
-struct asic3_led {
- const char *name;
- const char *default_trigger;
- struct led_classdev *cdev;
-};
-
-struct asic3_platform_data {
- u16 *gpio_config;
- unsigned int gpio_config_num;
-
- unsigned int irq_base;
-
- unsigned int gpio_base;
-
- unsigned int clock_rate;
-
- struct asic3_led *leds;
-};
-
-#define ASIC3_NUM_GPIO_BANKS 4
-#define ASIC3_GPIOS_PER_BANK 16
-#define ASIC3_NUM_GPIOS 64
-#define ASIC3_NR_IRQS ASIC3_NUM_GPIOS + 6
-
-#define ASIC3_IRQ_LED0 64
-#define ASIC3_IRQ_LED1 65
-#define ASIC3_IRQ_LED2 66
-#define ASIC3_IRQ_SPI 67
-#define ASIC3_IRQ_SMBUS 68
-#define ASIC3_IRQ_OWM 69
-
-#define ASIC3_TO_GPIO(gpio) (NR_BUILTIN_GPIO + (gpio))
-
-#define ASIC3_GPIO_BANK_A 0
-#define ASIC3_GPIO_BANK_B 1
-#define ASIC3_GPIO_BANK_C 2
-#define ASIC3_GPIO_BANK_D 3
-
-#define ASIC3_GPIO(bank, gpio) \
- ((ASIC3_GPIOS_PER_BANK * ASIC3_GPIO_BANK_##bank) + (gpio))
-#define ASIC3_GPIO_bit(gpio) (1 << (gpio & 0xf))
-/* All offsets below are specified with this address bus shift */
-#define ASIC3_DEFAULT_ADDR_SHIFT 2
-
-#define ASIC3_OFFSET(base, reg) (ASIC3_##base##_BASE + ASIC3_##base##_##reg)
-#define ASIC3_GPIO_OFFSET(base, reg) \
- (ASIC3_GPIO_##base##_BASE + ASIC3_GPIO_##reg)
-
-#define ASIC3_GPIO_A_BASE 0x0000
-#define ASIC3_GPIO_B_BASE 0x0100
-#define ASIC3_GPIO_C_BASE 0x0200
-#define ASIC3_GPIO_D_BASE 0x0300
-
-#define ASIC3_GPIO_TO_BANK(gpio) ((gpio) >> 4)
-#define ASIC3_GPIO_TO_BIT(gpio) ((gpio) - \
- (ASIC3_GPIOS_PER_BANK * ((gpio) >> 4)))
-#define ASIC3_GPIO_TO_MASK(gpio) (1 << ASIC3_GPIO_TO_BIT(gpio))
-#define ASIC3_GPIO_TO_BASE(gpio) (ASIC3_GPIO_A_BASE + (((gpio) >> 4) * 0x0100))
-#define ASIC3_BANK_TO_BASE(bank) (ASIC3_GPIO_A_BASE + ((bank) * 0x100))
-
-#define ASIC3_GPIO_MASK 0x00 /* R/W 0:don't mask */
-#define ASIC3_GPIO_DIRECTION 0x04 /* R/W 0:input */
-#define ASIC3_GPIO_OUT 0x08 /* R/W 0:output low */
-#define ASIC3_GPIO_TRIGGER_TYPE 0x0c /* R/W 0:level */
-#define ASIC3_GPIO_EDGE_TRIGGER 0x10 /* R/W 0:falling */
-#define ASIC3_GPIO_LEVEL_TRIGGER 0x14 /* R/W 0:low level detect */
-#define ASIC3_GPIO_SLEEP_MASK 0x18 /* R/W 0:don't mask in sleep mode */
-#define ASIC3_GPIO_SLEEP_OUT 0x1c /* R/W level 0:low in sleep mode */
-#define ASIC3_GPIO_BAT_FAULT_OUT 0x20 /* R/W level 0:low in batt_fault */
-#define ASIC3_GPIO_INT_STATUS 0x24 /* R/W 0:none, 1:detect */
-#define ASIC3_GPIO_ALT_FUNCTION 0x28 /* R/W 1:LED register control */
-#define ASIC3_GPIO_SLEEP_CONF 0x2c /*
- * R/W bit 1: autosleep
- * 0: disable gposlpout in normal mode,
- * enable gposlpout in sleep mode.
- */
-#define ASIC3_GPIO_STATUS 0x30 /* R Pin status */
-
-/*
- * ASIC3 GPIO config
- *
- * Bits 0..6 gpio number
- * Bits 7..13 Alternate function
- * Bit 14 Direction
- * Bit 15 Initial value
- *
- */
-#define ASIC3_CONFIG_GPIO_PIN(config) ((config) & 0x7f)
-#define ASIC3_CONFIG_GPIO_ALT(config) (((config) & (0x7f << 7)) >> 7)
-#define ASIC3_CONFIG_GPIO_DIR(config) ((config & (1 << 14)) >> 14)
-#define ASIC3_CONFIG_GPIO_INIT(config) ((config & (1 << 15)) >> 15)
-#define ASIC3_CONFIG_GPIO(gpio, alt, dir, init) (((gpio) & 0x7f) \
- | (((alt) & 0x7f) << 7) | (((dir) & 0x1) << 14) \
- | (((init) & 0x1) << 15))
-#define ASIC3_CONFIG_GPIO_DEFAULT(gpio, dir, init) \
- ASIC3_CONFIG_GPIO((gpio), 0, (dir), (init))
-#define ASIC3_CONFIG_GPIO_DEFAULT_OUT(gpio, init) \
- ASIC3_CONFIG_GPIO((gpio), 0, 1, (init))
-
-/*
- * Alternate functions
- */
-#define ASIC3_GPIOA11_PWM0 ASIC3_CONFIG_GPIO(11, 1, 1, 0)
-#define ASIC3_GPIOA12_PWM1 ASIC3_CONFIG_GPIO(12, 1, 1, 0)
-#define ASIC3_GPIOA15_CONTROL_CX ASIC3_CONFIG_GPIO(15, 1, 1, 0)
-#define ASIC3_GPIOC0_LED0 ASIC3_CONFIG_GPIO(32, 1, 0, 0)
-#define ASIC3_GPIOC1_LED1 ASIC3_CONFIG_GPIO(33, 1, 0, 0)
-#define ASIC3_GPIOC2_LED2 ASIC3_CONFIG_GPIO(34, 1, 0, 0)
-#define ASIC3_GPIOC3_SPI_RXD ASIC3_CONFIG_GPIO(35, 1, 0, 0)
-#define ASIC3_GPIOC4_CF_nCD ASIC3_CONFIG_GPIO(36, 1, 0, 0)
-#define ASIC3_GPIOC4_SPI_TXD ASIC3_CONFIG_GPIO(36, 1, 1, 0)
-#define ASIC3_GPIOC5_SPI_CLK ASIC3_CONFIG_GPIO(37, 1, 1, 0)
-#define ASIC3_GPIOC5_nCIOW ASIC3_CONFIG_GPIO(37, 1, 1, 0)
-#define ASIC3_GPIOC6_nCIOR ASIC3_CONFIG_GPIO(38, 1, 1, 0)
-#define ASIC3_GPIOC7_nPCE_1 ASIC3_CONFIG_GPIO(39, 1, 0, 0)
-#define ASIC3_GPIOC8_nPCE_2 ASIC3_CONFIG_GPIO(40, 1, 0, 0)
-#define ASIC3_GPIOC9_nPOE ASIC3_CONFIG_GPIO(41, 1, 0, 0)
-#define ASIC3_GPIOC10_nPWE ASIC3_CONFIG_GPIO(42, 1, 0, 0)
-#define ASIC3_GPIOC11_PSKTSEL ASIC3_CONFIG_GPIO(43, 1, 0, 0)
-#define ASIC3_GPIOC12_nPREG ASIC3_CONFIG_GPIO(44, 1, 0, 0)
-#define ASIC3_GPIOC13_nPWAIT ASIC3_CONFIG_GPIO(45, 1, 1, 0)
-#define ASIC3_GPIOC14_nPIOIS16 ASIC3_CONFIG_GPIO(46, 1, 1, 0)
-#define ASIC3_GPIOC15_nPIOR ASIC3_CONFIG_GPIO(47, 1, 0, 0)
-#define ASIC3_GPIOD4_CF_nCD ASIC3_CONFIG_GPIO(52, 1, 0, 0)
-#define ASIC3_GPIOD11_nCIOIS16 ASIC3_CONFIG_GPIO(59, 1, 0, 0)
-#define ASIC3_GPIOD12_nCWAIT ASIC3_CONFIG_GPIO(60, 1, 0, 0)
-#define ASIC3_GPIOD15_nPIOW ASIC3_CONFIG_GPIO(63, 1, 0, 0)
-
-
-#define ASIC3_SPI_Base 0x0400
-#define ASIC3_SPI_Control 0x0000
-#define ASIC3_SPI_TxData 0x0004
-#define ASIC3_SPI_RxData 0x0008
-#define ASIC3_SPI_Int 0x000c
-#define ASIC3_SPI_Status 0x0010
-
-#define SPI_CONTROL_SPR(clk) ((clk) & 0x0f) /* Clock rate */
-
-#define ASIC3_PWM_0_Base 0x0500
-#define ASIC3_PWM_1_Base 0x0600
-#define ASIC3_PWM_TimeBase 0x0000
-#define ASIC3_PWM_PeriodTime 0x0004
-#define ASIC3_PWM_DutyTime 0x0008
-
-#define PWM_TIMEBASE_VALUE(x) ((x)&0xf) /* Low 4 bits sets time base */
-#define PWM_TIMEBASE_ENABLE (1 << 4) /* Enable clock */
-
-#define ASIC3_NUM_LEDS 3
-#define ASIC3_LED_0_Base 0x0700
-#define ASIC3_LED_1_Base 0x0800
-#define ASIC3_LED_2_Base 0x0900
-#define ASIC3_LED_TimeBase 0x0000 /* R/W 7 bits */
-#define ASIC3_LED_PeriodTime 0x0004 /* R/W 12 bits */
-#define ASIC3_LED_DutyTime 0x0008 /* R/W 12 bits */
-#define ASIC3_LED_AutoStopCount 0x000c /* R/W 16 bits */
-
-/* LED TimeBase bits - match ASIC2 */
-#define LED_TBS 0x0f /* Low 4 bits sets time base, max = 13 */
- /* Note: max = 5 on hx4700 */
- /* 0: maximum time base */
- /* 1: maximum time base / 2 */
- /* n: maximum time base / 2^n */
-
-#define LED_EN (1 << 4) /* LED ON/OFF 0:off, 1:on */
-#define LED_AUTOSTOP (1 << 5) /* LED ON/OFF auto stop 0:disable, 1:enable */
-#define LED_ALWAYS (1 << 6) /* LED Interrupt Mask 0:No mask, 1:mask */
-
-#define ASIC3_CLOCK_BASE 0x0A00
-#define ASIC3_CLOCK_CDEX 0x00
-#define ASIC3_CLOCK_SEL 0x04
-
-#define CLOCK_CDEX_SOURCE (1 << 0) /* 2 bits */
-#define CLOCK_CDEX_SOURCE0 (1 << 0)
-#define CLOCK_CDEX_SOURCE1 (1 << 1)
-#define CLOCK_CDEX_SPI (1 << 2)
-#define CLOCK_CDEX_OWM (1 << 3)
-#define CLOCK_CDEX_PWM0 (1 << 4)
-#define CLOCK_CDEX_PWM1 (1 << 5)
-#define CLOCK_CDEX_LED0 (1 << 6)
-#define CLOCK_CDEX_LED1 (1 << 7)
-#define CLOCK_CDEX_LED2 (1 << 8)
-
-/* Clocks settings: 1 for 24.576 MHz, 0 for 12.288Mhz */
-#define CLOCK_CDEX_SD_HOST (1 << 9) /* R/W: SD host clock source */
-#define CLOCK_CDEX_SD_BUS (1 << 10) /* R/W: SD bus clock source ctrl */
-#define CLOCK_CDEX_SMBUS (1 << 11)
-#define CLOCK_CDEX_CONTROL_CX (1 << 12)
-
-#define CLOCK_CDEX_EX0 (1 << 13) /* R/W: 32.768 kHz crystal */
-#define CLOCK_CDEX_EX1 (1 << 14) /* R/W: 24.576 MHz crystal */
-
-#define CLOCK_SEL_SD_HCLK_SEL (1 << 0) /* R/W: SDIO host clock select */
-#define CLOCK_SEL_SD_BCLK_SEL (1 << 1) /* R/W: SDIO bus clock select */
-
-/* R/W: INT clock source control (32.768 kHz) */
-#define CLOCK_SEL_CX (1 << 2)
-
-
-#define ASIC3_INTR_BASE 0x0B00
-
-#define ASIC3_INTR_INT_MASK 0x00 /* Interrupt mask control */
-#define ASIC3_INTR_P_INT_STAT 0x04 /* Peripheral interrupt status */
-#define ASIC3_INTR_INT_CPS 0x08 /* Interrupt timer clock pre-scale */
-#define ASIC3_INTR_INT_TBS 0x0c /* Interrupt timer set */
-
-#define ASIC3_INTMASK_GINTMASK (1 << 0) /* Global INTs mask 1:enable */
-#define ASIC3_INTMASK_GINTEL (1 << 1) /* 1: rising edge, 0: hi level */
-#define ASIC3_INTMASK_MASK0 (1 << 2)
-#define ASIC3_INTMASK_MASK1 (1 << 3)
-#define ASIC3_INTMASK_MASK2 (1 << 4)
-#define ASIC3_INTMASK_MASK3 (1 << 5)
-#define ASIC3_INTMASK_MASK4 (1 << 6)
-#define ASIC3_INTMASK_MASK5 (1 << 7)
-
-#define ASIC3_INTR_PERIPHERAL_A (1 << 0)
-#define ASIC3_INTR_PERIPHERAL_B (1 << 1)
-#define ASIC3_INTR_PERIPHERAL_C (1 << 2)
-#define ASIC3_INTR_PERIPHERAL_D (1 << 3)
-#define ASIC3_INTR_LED0 (1 << 4)
-#define ASIC3_INTR_LED1 (1 << 5)
-#define ASIC3_INTR_LED2 (1 << 6)
-#define ASIC3_INTR_SPI (1 << 7)
-#define ASIC3_INTR_SMBUS (1 << 8)
-#define ASIC3_INTR_OWM (1 << 9)
-
-#define ASIC3_INTR_CPS(x) ((x)&0x0f) /* 4 bits, max 14 */
-#define ASIC3_INTR_CPS_SET (1 << 4) /* Time base enable */
-
-
-/* Basic control of the SD ASIC */
-#define ASIC3_SDHWCTRL_BASE 0x0E00
-#define ASIC3_SDHWCTRL_SDCONF 0x00
-
-#define ASIC3_SDHWCTRL_SUSPEND (1 << 0) /* 1=suspend all SD operations */
-#define ASIC3_SDHWCTRL_CLKSEL (1 << 1) /* 1=SDICK, 0=HCLK */
-#define ASIC3_SDHWCTRL_PCLR (1 << 2) /* All registers of SDIO cleared */
-#define ASIC3_SDHWCTRL_LEVCD (1 << 3) /* SD card detection: 0:low */
-
-/* SD card write protection: 0=high */
-#define ASIC3_SDHWCTRL_LEVWP (1 << 4)
-#define ASIC3_SDHWCTRL_SDLED (1 << 5) /* SD card LED signal 0=disable */
-
-/* SD card power supply ctrl 1=enable */
-#define ASIC3_SDHWCTRL_SDPWR (1 << 6)
-
-#define ASIC3_EXTCF_BASE 0x1100
-
-#define ASIC3_EXTCF_SELECT 0x00
-#define ASIC3_EXTCF_RESET 0x04
-
-#define ASIC3_EXTCF_SMOD0 (1 << 0) /* slot number of mode 0 */
-#define ASIC3_EXTCF_SMOD1 (1 << 1) /* slot number of mode 1 */
-#define ASIC3_EXTCF_SMOD2 (1 << 2) /* slot number of mode 2 */
-#define ASIC3_EXTCF_OWM_EN (1 << 4) /* enable onewire module */
-#define ASIC3_EXTCF_OWM_SMB (1 << 5) /* OWM bus selection */
-#define ASIC3_EXTCF_OWM_RESET (1 << 6) /* ?? used by OWM and CF */
-#define ASIC3_EXTCF_CF0_SLEEP_MODE (1 << 7) /* CF0 sleep state */
-#define ASIC3_EXTCF_CF1_SLEEP_MODE (1 << 8) /* CF1 sleep state */
-#define ASIC3_EXTCF_CF0_PWAIT_EN (1 << 10) /* CF0 PWAIT_n control */
-#define ASIC3_EXTCF_CF1_PWAIT_EN (1 << 11) /* CF1 PWAIT_n control */
-#define ASIC3_EXTCF_CF0_BUF_EN (1 << 12) /* CF0 buffer control */
-#define ASIC3_EXTCF_CF1_BUF_EN (1 << 13) /* CF1 buffer control */
-#define ASIC3_EXTCF_SD_MEM_ENABLE (1 << 14)
-#define ASIC3_EXTCF_CF_SLEEP (1 << 15) /* CF sleep mode control */
-
-/*********************************************
- * The Onewire interface (DS1WM) is handled
- * by the ds1wm driver.
- *
- *********************************************/
-
-#define ASIC3_OWM_BASE 0xC00
-
-/*****************************************************************************
- * The SD configuration registers are at a completely different location
- * in memory. They are divided into three sets of registers:
- *
- * SD_CONFIG Core configuration register
- * SD_CTRL Control registers for SD operations
- * SDIO_CTRL Control registers for SDIO operations
- *
- *****************************************************************************/
-#define ASIC3_SD_CONFIG_BASE 0x0400 /* Assumes 32 bit addressing */
-#define ASIC3_SD_CONFIG_SIZE 0x0200 /* Assumes 32 bit addressing */
-#define ASIC3_SD_CTRL_BASE 0x1000
-#define ASIC3_SDIO_CTRL_BASE 0x1200
-
-#define ASIC3_MAP_SIZE_32BIT 0x2000
-#define ASIC3_MAP_SIZE_16BIT 0x1000
-
-/* Functions needed by leds-asic3 */
-
-struct asic3;
-extern void asic3_write_register(struct asic3 *asic, unsigned int reg, u32 val);
-extern u32 asic3_read_register(struct asic3 *asic, unsigned int reg);
-
-#endif /* __ASIC3_H__ */
diff --git a/include/linux/mfd/atmel-hlcdc.h b/include/linux/mfd/atmel-hlcdc.h
index a186119a49b5..80d675a03b39 100644
--- a/include/linux/mfd/atmel-hlcdc.h
+++ b/include/linux/mfd/atmel-hlcdc.h
@@ -22,6 +22,8 @@
#define ATMEL_HLCDC_DITHER BIT(6)
#define ATMEL_HLCDC_DISPDLY BIT(7)
#define ATMEL_HLCDC_MODE_MASK GENMASK(9, 8)
+#define ATMEL_XLCDC_MODE_MASK GENMASK(10, 8)
+#define ATMEL_XLCDC_DPI BIT(11)
#define ATMEL_HLCDC_PP BIT(10)
#define ATMEL_HLCDC_VSPSU BIT(12)
#define ATMEL_HLCDC_VSPHO BIT(13)
@@ -34,6 +36,12 @@
#define ATMEL_HLCDC_IDR 0x30
#define ATMEL_HLCDC_IMR 0x34
#define ATMEL_HLCDC_ISR 0x38
+#define ATMEL_XLCDC_ATTRE 0x3c
+
+#define ATMEL_XLCDC_BASE_UPDATE BIT(0)
+#define ATMEL_XLCDC_OVR1_UPDATE BIT(1)
+#define ATMEL_XLCDC_OVR3_UPDATE BIT(2)
+#define ATMEL_XLCDC_HEO_UPDATE BIT(3)
#define ATMEL_HLCDC_CLKPOL BIT(0)
#define ATMEL_HLCDC_CLKSEL BIT(2)
@@ -48,6 +56,8 @@
#define ATMEL_HLCDC_DISP BIT(2)
#define ATMEL_HLCDC_PWM BIT(3)
#define ATMEL_HLCDC_SIP BIT(4)
+#define ATMEL_XLCDC_SD BIT(5)
+#define ATMEL_XLCDC_CM BIT(6)
#define ATMEL_HLCDC_SOF BIT(0)
#define ATMEL_HLCDC_SYNCDIS BIT(1)
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index 9ab0e2fca7ea..3c5aecf1d4b5 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -12,18 +12,24 @@
enum axp20x_variants {
AXP152_ID = 0,
+ AXP192_ID,
AXP202_ID,
AXP209_ID,
AXP221_ID,
AXP223_ID,
AXP288_ID,
+ AXP313A_ID,
+ AXP323_ID,
+ AXP717_ID,
AXP803_ID,
AXP806_ID,
AXP809_ID,
AXP813_ID,
+ AXP15060_ID,
NR_AXP20X_VARIANTS,
};
+#define AXP192_DATACACHE(m) (0x06 + (m))
#define AXP20X_DATACACHE(m) (0x04 + (m))
/* Power supply */
@@ -45,6 +51,13 @@ enum axp20x_variants {
#define AXP152_DCDC_FREQ 0x37
#define AXP152_DCDC_MODE 0x80
+#define AXP192_USB_OTG_STATUS 0x04
+#define AXP192_PWR_OUT_CTRL 0x12
+#define AXP192_DCDC2_V_OUT 0x23
+#define AXP192_DCDC1_V_OUT 0x26
+#define AXP192_DCDC3_V_OUT 0x27
+#define AXP192_LDO2_3_V_OUT 0x28
+
#define AXP20X_PWR_INPUT_STATUS 0x00
#define AXP20X_PWR_OP_MODE 0x01
#define AXP20X_USB_OTG_STATUS 0x02
@@ -91,6 +104,77 @@ enum axp20x_variants {
#define AXP22X_ALDO3_V_OUT 0x2a
#define AXP22X_CHRG_CTRL3 0x35
+#define AXP313A_ON_INDICATE 0x00
+#define AXP313A_OUTPUT_CONTROL 0x10
+#define AXP313A_DCDC1_CONTROL 0x13
+#define AXP313A_DCDC2_CONTROL 0x14
+#define AXP313A_DCDC3_CONTROL 0x15
+#define AXP313A_ALDO1_CONTROL 0x16
+#define AXP313A_DLDO1_CONTROL 0x17
+#define AXP313A_SHUTDOWN_CTRL 0x1a
+#define AXP313A_IRQ_EN 0x20
+#define AXP313A_IRQ_STATE 0x21
+#define AXP323_DCDC_MODE_CTRL2 0x22
+
+#define AXP717_ON_INDICATE 0x00
+#define AXP717_PMU_STATUS_2 0x01
+#define AXP717_BC_DETECT 0x05
+#define AXP717_PMU_FAULT 0x08
+#define AXP717_MODULE_EN_CONTROL_1 0x0b
+#define AXP717_MIN_SYS_V_CONTROL 0x15
+#define AXP717_INPUT_VOL_LIMIT_CTRL 0x16
+#define AXP717_INPUT_CUR_LIMIT_CTRL 0x17
+#define AXP717_MODULE_EN_CONTROL_2 0x19
+#define AXP717_BOOST_CONTROL 0x1e
+#define AXP717_VSYS_V_POWEROFF 0x24
+#define AXP717_IRQ0_EN 0x40
+#define AXP717_IRQ1_EN 0x41
+#define AXP717_IRQ2_EN 0x42
+#define AXP717_IRQ3_EN 0x43
+#define AXP717_IRQ4_EN 0x44
+#define AXP717_IRQ0_STATE 0x48
+#define AXP717_IRQ1_STATE 0x49
+#define AXP717_IRQ2_STATE 0x4a
+#define AXP717_IRQ3_STATE 0x4b
+#define AXP717_IRQ4_STATE 0x4c
+#define AXP717_TS_PIN_CFG 0x50
+#define AXP717_ICC_CHG_SET 0x62
+#define AXP717_ITERM_CHG_SET 0x63
+#define AXP717_CV_CHG_SET 0x64
+#define AXP717_DCDC_OUTPUT_CONTROL 0x80
+#define AXP717_DCDC1_CONTROL 0x83
+#define AXP717_DCDC2_CONTROL 0x84
+#define AXP717_DCDC3_CONTROL 0x85
+#define AXP717_DCDC4_CONTROL 0x86
+#define AXP717_LDO0_OUTPUT_CONTROL 0x90
+#define AXP717_LDO1_OUTPUT_CONTROL 0x91
+#define AXP717_ALDO1_CONTROL 0x93
+#define AXP717_ALDO2_CONTROL 0x94
+#define AXP717_ALDO3_CONTROL 0x95
+#define AXP717_ALDO4_CONTROL 0x96
+#define AXP717_BLDO1_CONTROL 0x97
+#define AXP717_BLDO2_CONTROL 0x98
+#define AXP717_BLDO3_CONTROL 0x99
+#define AXP717_BLDO4_CONTROL 0x9a
+#define AXP717_CLDO1_CONTROL 0x9b
+#define AXP717_CLDO2_CONTROL 0x9c
+#define AXP717_CLDO3_CONTROL 0x9d
+#define AXP717_CLDO4_CONTROL 0x9e
+#define AXP717_CPUSLDO_CONTROL 0x9f
+#define AXP717_BATT_PERCENT_DATA 0xa4
+#define AXP717_ADC_CH_EN_CONTROL 0xc0
+#define AXP717_BATT_V_H 0xc4
+#define AXP717_BATT_V_L 0xc5
+#define AXP717_VBUS_V_H 0xc6
+#define AXP717_VBUS_V_L 0xc7
+#define AXP717_VSYS_V_H 0xc8
+#define AXP717_VSYS_V_L 0xc9
+#define AXP717_BATT_CHRG_I_H 0xca
+#define AXP717_BATT_CHRG_I_L 0xcb
+#define AXP717_ADC_DATA_SEL 0xcd
+#define AXP717_ADC_DATA_H 0xce
+#define AXP717_ADC_DATA_L 0xcf
+
#define AXP806_STARTUP_SRC 0x00
#define AXP806_CHIP_ID 0x03
#define AXP806_PWR_OUT_CTRL1 0x10
@@ -131,6 +215,39 @@ enum axp20x_variants {
/* Other DCDC regulator control registers are the same as AXP803 */
#define AXP813_DCDC7_V_OUT 0x26
+#define AXP15060_STARTUP_SRC 0x00
+#define AXP15060_PWR_OUT_CTRL1 0x10
+#define AXP15060_PWR_OUT_CTRL2 0x11
+#define AXP15060_PWR_OUT_CTRL3 0x12
+#define AXP15060_DCDC1_V_CTRL 0x13
+#define AXP15060_DCDC2_V_CTRL 0x14
+#define AXP15060_DCDC3_V_CTRL 0x15
+#define AXP15060_DCDC4_V_CTRL 0x16
+#define AXP15060_DCDC5_V_CTRL 0x17
+#define AXP15060_DCDC6_V_CTRL 0x18
+#define AXP15060_ALDO1_V_CTRL 0x19
+#define AXP15060_DCDC_MODE_CTRL1 0x1a
+#define AXP15060_DCDC_MODE_CTRL2 0x1b
+#define AXP15060_OUTPUT_MONITOR_DISCHARGE 0x1e
+#define AXP15060_IRQ_PWROK_VOFF 0x1f
+#define AXP15060_ALDO2_V_CTRL 0x20
+#define AXP15060_ALDO3_V_CTRL 0x21
+#define AXP15060_ALDO4_V_CTRL 0x22
+#define AXP15060_ALDO5_V_CTRL 0x23
+#define AXP15060_BLDO1_V_CTRL 0x24
+#define AXP15060_BLDO2_V_CTRL 0x25
+#define AXP15060_BLDO3_V_CTRL 0x26
+#define AXP15060_BLDO4_V_CTRL 0x27
+#define AXP15060_BLDO5_V_CTRL 0x28
+#define AXP15060_CLDO1_V_CTRL 0x29
+#define AXP15060_CLDO2_V_CTRL 0x2a
+#define AXP15060_CLDO3_V_CTRL 0x2b
+#define AXP15060_CLDO4_V_CTRL 0x2d
+#define AXP15060_CPUSLDO_V_CTRL 0x2e
+#define AXP15060_PWR_WAKEUP_CTRL 0x31
+#define AXP15060_PWR_DISABLE_DOWN_SEQ 0x32
+#define AXP15060_PEK_KEY 0x36
+
/* Interrupt */
#define AXP152_IRQ1_EN 0x40
#define AXP152_IRQ2_EN 0x41
@@ -139,6 +256,17 @@ enum axp20x_variants {
#define AXP152_IRQ2_STATE 0x49
#define AXP152_IRQ3_STATE 0x4a
+#define AXP192_IRQ1_EN 0x40
+#define AXP192_IRQ2_EN 0x41
+#define AXP192_IRQ3_EN 0x42
+#define AXP192_IRQ4_EN 0x43
+#define AXP192_IRQ1_STATE 0x44
+#define AXP192_IRQ2_STATE 0x45
+#define AXP192_IRQ3_STATE 0x46
+#define AXP192_IRQ4_STATE 0x47
+#define AXP192_IRQ5_EN 0x4a
+#define AXP192_IRQ5_STATE 0x4d
+
#define AXP20X_IRQ1_EN 0x40
#define AXP20X_IRQ2_EN 0x41
#define AXP20X_IRQ3_EN 0x42
@@ -152,7 +280,17 @@ enum axp20x_variants {
#define AXP20X_IRQ5_STATE 0x4c
#define AXP20X_IRQ6_STATE 0x4d
+#define AXP15060_IRQ1_EN 0x40
+#define AXP15060_IRQ2_EN 0x41
+#define AXP15060_IRQ1_STATE 0x48
+#define AXP15060_IRQ2_STATE 0x49
+
/* ADC */
+#define AXP192_GPIO2_V_ADC_H 0x68
+#define AXP192_GPIO2_V_ADC_L 0x69
+#define AXP192_GPIO3_V_ADC_H 0x6a
+#define AXP192_GPIO3_V_ADC_L 0x6b
+
#define AXP20X_ACIN_V_ADC_H 0x56
#define AXP20X_ACIN_V_ADC_L 0x57
#define AXP20X_ACIN_I_ADC_H 0x58
@@ -182,6 +320,8 @@ enum axp20x_variants {
#define AXP20X_IPSOUT_V_HIGH_L 0x7f
/* Power supply */
+#define AXP192_GPIO30_IN_RANGE 0x85
+
#define AXP20X_DCDC_MODE 0x80
#define AXP20X_ADC_EN1 0x82
#define AXP20X_ADC_EN2 0x83
@@ -210,6 +350,16 @@ enum axp20x_variants {
#define AXP152_PWM1_FREQ_Y 0x9c
#define AXP152_PWM1_DUTY_CYCLE 0x9d
+#define AXP192_GPIO0_CTRL 0x90
+#define AXP192_LDO_IO0_V_OUT 0x91
+#define AXP192_GPIO1_CTRL 0x92
+#define AXP192_GPIO2_CTRL 0x93
+#define AXP192_GPIO2_0_STATE 0x94
+#define AXP192_GPIO4_3_CTRL 0x95
+#define AXP192_GPIO4_3_STATE 0x96
+#define AXP192_GPIO2_0_PULL 0x97
+#define AXP192_N_RSTO_CTRL 0x9e
+
#define AXP20X_GPIO0_CTRL 0x90
#define AXP20X_LDO5_V_OUT 0x91
#define AXP20X_GPIO1_CTRL 0x92
@@ -222,6 +372,8 @@ enum axp20x_variants {
#define AXP22X_GPIO_STATE 0x94
#define AXP22X_GPIO_PULL_DOWN 0x95
+#define AXP15060_CLDO4_GPIO2_MODESET 0x2c
+
/* Battery */
#define AXP20X_CHRG_CC_31_24 0xb0
#define AXP20X_CHRG_CC_23_16 0xb1
@@ -288,6 +440,17 @@ enum axp20x_variants {
/* Regulators IDs */
enum {
+ AXP192_DCDC1 = 0,
+ AXP192_DCDC2,
+ AXP192_DCDC3,
+ AXP192_LDO1,
+ AXP192_LDO2,
+ AXP192_LDO3,
+ AXP192_LDO_IO0,
+ AXP192_REG_ID_MAX
+};
+
+enum {
AXP20X_LDO1 = 0,
AXP20X_LDO2,
AXP20X_LDO3,
@@ -323,6 +486,38 @@ enum {
};
enum {
+ AXP313A_DCDC1 = 0,
+ AXP313A_DCDC2,
+ AXP313A_DCDC3,
+ AXP313A_ALDO1,
+ AXP313A_DLDO1,
+ AXP313A_RTC_LDO,
+ AXP313A_REG_ID_MAX,
+};
+
+enum {
+ AXP717_DCDC1 = 0,
+ AXP717_DCDC2,
+ AXP717_DCDC3,
+ AXP717_DCDC4,
+ AXP717_ALDO1,
+ AXP717_ALDO2,
+ AXP717_ALDO3,
+ AXP717_ALDO4,
+ AXP717_BLDO1,
+ AXP717_BLDO2,
+ AXP717_BLDO3,
+ AXP717_BLDO4,
+ AXP717_CLDO1,
+ AXP717_CLDO2,
+ AXP717_CLDO3,
+ AXP717_CLDO4,
+ AXP717_CPUSLDO,
+ AXP717_BOOST,
+ AXP717_REG_ID_MAX,
+};
+
+enum {
AXP806_DCDCA = 0,
AXP806_DCDCB,
AXP806_DCDCC,
@@ -419,6 +614,33 @@ enum {
AXP813_REG_ID_MAX,
};
+enum {
+ AXP15060_DCDC1 = 0,
+ AXP15060_DCDC2,
+ AXP15060_DCDC3,
+ AXP15060_DCDC4,
+ AXP15060_DCDC5,
+ AXP15060_DCDC6,
+ AXP15060_ALDO1,
+ AXP15060_ALDO2,
+ AXP15060_ALDO3,
+ AXP15060_ALDO4,
+ AXP15060_ALDO5,
+ AXP15060_BLDO1,
+ AXP15060_BLDO2,
+ AXP15060_BLDO3,
+ AXP15060_BLDO4,
+ AXP15060_BLDO5,
+ AXP15060_CLDO1,
+ AXP15060_CLDO2,
+ AXP15060_CLDO3,
+ AXP15060_CLDO4,
+ AXP15060_CPUSLDO,
+ AXP15060_SW,
+ AXP15060_RTC_LDO,
+ AXP15060_REG_ID_MAX,
+};
+
/* IRQs */
enum {
AXP152_IRQ_LDO0IN_CONNECT = 1,
@@ -432,14 +654,51 @@ enum {
AXP152_IRQ_PEK_SHORT,
AXP152_IRQ_PEK_LONG,
AXP152_IRQ_TIMER,
- AXP152_IRQ_PEK_RIS_EDGE,
+ /* out of bit order to make sure the press event is handled first */
AXP152_IRQ_PEK_FAL_EDGE,
+ AXP152_IRQ_PEK_RIS_EDGE,
AXP152_IRQ_GPIO3_INPUT,
AXP152_IRQ_GPIO2_INPUT,
AXP152_IRQ_GPIO1_INPUT,
AXP152_IRQ_GPIO0_INPUT,
};
+enum axp192_irqs {
+ AXP192_IRQ_ACIN_OVER_V = 1,
+ AXP192_IRQ_ACIN_PLUGIN,
+ AXP192_IRQ_ACIN_REMOVAL,
+ AXP192_IRQ_VBUS_OVER_V,
+ AXP192_IRQ_VBUS_PLUGIN,
+ AXP192_IRQ_VBUS_REMOVAL,
+ AXP192_IRQ_VBUS_V_LOW,
+ AXP192_IRQ_BATT_PLUGIN,
+ AXP192_IRQ_BATT_REMOVAL,
+ AXP192_IRQ_BATT_ENT_ACT_MODE,
+ AXP192_IRQ_BATT_EXIT_ACT_MODE,
+ AXP192_IRQ_CHARG,
+ AXP192_IRQ_CHARG_DONE,
+ AXP192_IRQ_BATT_TEMP_HIGH,
+ AXP192_IRQ_BATT_TEMP_LOW,
+ AXP192_IRQ_DIE_TEMP_HIGH,
+ AXP192_IRQ_CHARG_I_LOW,
+ AXP192_IRQ_DCDC1_V_LONG,
+ AXP192_IRQ_DCDC2_V_LONG,
+ AXP192_IRQ_DCDC3_V_LONG,
+ AXP192_IRQ_PEK_SHORT = 22,
+ AXP192_IRQ_PEK_LONG,
+ AXP192_IRQ_N_OE_PWR_ON,
+ AXP192_IRQ_N_OE_PWR_OFF,
+ AXP192_IRQ_VBUS_VALID,
+ AXP192_IRQ_VBUS_NOT_VALID,
+ AXP192_IRQ_VBUS_SESS_VALID,
+ AXP192_IRQ_VBUS_SESS_END,
+ AXP192_IRQ_LOW_PWR_LVL = 31,
+ AXP192_IRQ_TIMER,
+ AXP192_IRQ_GPIO2_INPUT = 37,
+ AXP192_IRQ_GPIO1_INPUT,
+ AXP192_IRQ_GPIO0_INPUT,
+};
+
enum {
AXP20X_IRQ_ACIN_OVER_V = 1,
AXP20X_IRQ_ACIN_PLUGIN,
@@ -472,8 +731,9 @@ enum {
AXP20X_IRQ_LOW_PWR_LVL1,
AXP20X_IRQ_LOW_PWR_LVL2,
AXP20X_IRQ_TIMER,
- AXP20X_IRQ_PEK_RIS_EDGE,
+ /* out of bit order to make sure the press event is handled first */
AXP20X_IRQ_PEK_FAL_EDGE,
+ AXP20X_IRQ_PEK_RIS_EDGE,
AXP20X_IRQ_GPIO3_INPUT,
AXP20X_IRQ_GPIO2_INPUT,
AXP20X_IRQ_GPIO1_INPUT,
@@ -502,8 +762,9 @@ enum axp22x_irqs {
AXP22X_IRQ_LOW_PWR_LVL1,
AXP22X_IRQ_LOW_PWR_LVL2,
AXP22X_IRQ_TIMER,
- AXP22X_IRQ_PEK_RIS_EDGE,
+ /* out of bit order to make sure the press event is handled first */
AXP22X_IRQ_PEK_FAL_EDGE,
+ AXP22X_IRQ_PEK_RIS_EDGE,
AXP22X_IRQ_GPIO1_INPUT,
AXP22X_IRQ_GPIO0_INPUT,
};
@@ -545,6 +806,50 @@ enum axp288_irqs {
AXP288_IRQ_BC_USB_CHNG,
};
+enum axp313a_irqs {
+ AXP313A_IRQ_DIE_TEMP_HIGH,
+ AXP313A_IRQ_DCDC2_V_LOW = 2,
+ AXP313A_IRQ_DCDC3_V_LOW,
+ AXP313A_IRQ_PEK_LONG,
+ AXP313A_IRQ_PEK_SHORT,
+ AXP313A_IRQ_PEK_FAL_EDGE,
+ AXP313A_IRQ_PEK_RIS_EDGE,
+};
+
+enum axp717_irqs {
+ AXP717_IRQ_VBUS_FAULT,
+ AXP717_IRQ_VBUS_OVER_V,
+ AXP717_IRQ_BOOST_OVER_V,
+ AXP717_IRQ_GAUGE_NEW_SOC = 4,
+ AXP717_IRQ_SOC_DROP_LVL1 = 6,
+ AXP717_IRQ_SOC_DROP_LVL2,
+ AXP717_IRQ_PEK_RIS_EDGE,
+ AXP717_IRQ_PEK_FAL_EDGE,
+ AXP717_IRQ_PEK_LONG,
+ AXP717_IRQ_PEK_SHORT,
+ AXP717_IRQ_BATT_REMOVAL,
+ AXP717_IRQ_BATT_PLUGIN,
+ AXP717_IRQ_VBUS_REMOVAL,
+ AXP717_IRQ_VBUS_PLUGIN,
+ AXP717_IRQ_BATT_OVER_V,
+ AXP717_IRQ_CHARG_TIMER,
+ AXP717_IRQ_DIE_TEMP_HIGH,
+ AXP717_IRQ_CHARG,
+ AXP717_IRQ_CHARG_DONE,
+ AXP717_IRQ_BATT_OVER_CURR,
+ AXP717_IRQ_LDO_OVER_CURR,
+ AXP717_IRQ_WDOG_EXPIRE,
+ AXP717_IRQ_BATT_ACT_TEMP_LOW,
+ AXP717_IRQ_BATT_ACT_TEMP_HIGH,
+ AXP717_IRQ_BATT_CHG_TEMP_LOW,
+ AXP717_IRQ_BATT_CHG_TEMP_HIGH,
+ AXP717_IRQ_BATT_QUIT_TEMP_HIGH,
+ AXP717_IRQ_BC_USB_CHNG = 30,
+ AXP717_IRQ_BC_USB_DONE,
+ AXP717_IRQ_TYPEC_PLUGIN = 37,
+ AXP717_IRQ_TYPEC_REMOVE,
+};
+
enum axp803_irqs {
AXP803_IRQ_ACIN_OVER_V = 1,
AXP803_IRQ_ACIN_PLUGIN,
@@ -571,8 +876,9 @@ enum axp803_irqs {
AXP803_IRQ_LOW_PWR_LVL1,
AXP803_IRQ_LOW_PWR_LVL2,
AXP803_IRQ_TIMER,
- AXP803_IRQ_PEK_RIS_EDGE,
+ /* out of bit order to make sure the press event is handled first */
AXP803_IRQ_PEK_FAL_EDGE,
+ AXP803_IRQ_PEK_RIS_EDGE,
AXP803_IRQ_PEK_SHORT,
AXP803_IRQ_PEK_LONG,
AXP803_IRQ_PEK_OVER_OFF,
@@ -623,8 +929,9 @@ enum axp809_irqs {
AXP809_IRQ_LOW_PWR_LVL1,
AXP809_IRQ_LOW_PWR_LVL2,
AXP809_IRQ_TIMER,
- AXP809_IRQ_PEK_RIS_EDGE,
+ /* out of bit order to make sure the press event is handled first */
AXP809_IRQ_PEK_FAL_EDGE,
+ AXP809_IRQ_PEK_RIS_EDGE,
AXP809_IRQ_PEK_SHORT,
AXP809_IRQ_PEK_LONG,
AXP809_IRQ_PEK_OVER_OFF,
@@ -632,13 +939,30 @@ enum axp809_irqs {
AXP809_IRQ_GPIO0_INPUT,
};
+enum axp15060_irqs {
+ AXP15060_IRQ_DIE_TEMP_HIGH_LV1 = 1,
+ AXP15060_IRQ_DIE_TEMP_HIGH_LV2,
+ AXP15060_IRQ_DCDC1_V_LOW,
+ AXP15060_IRQ_DCDC2_V_LOW,
+ AXP15060_IRQ_DCDC3_V_LOW,
+ AXP15060_IRQ_DCDC4_V_LOW,
+ AXP15060_IRQ_DCDC5_V_LOW,
+ AXP15060_IRQ_DCDC6_V_LOW,
+ AXP15060_IRQ_PEK_LONG,
+ AXP15060_IRQ_PEK_SHORT,
+ AXP15060_IRQ_GPIO1_INPUT,
+ AXP15060_IRQ_PEK_FAL_EDGE,
+ AXP15060_IRQ_PEK_RIS_EDGE,
+ AXP15060_IRQ_GPIO2_INPUT,
+};
+
struct axp20x_dev {
struct device *dev;
int irq;
unsigned long irq_flags;
struct regmap *regmap;
struct regmap_irq_chip_data *regmap_irqc;
- long variant;
+ enum axp20x_variants variant;
int nr_cells;
const struct mfd_cell *cells;
const struct regmap_config *regmap_cfg;
diff --git a/include/linux/mfd/bcm2835-pm.h b/include/linux/mfd/bcm2835-pm.h
index ed37dc40e82a..f70a810c55f7 100644
--- a/include/linux/mfd/bcm2835-pm.h
+++ b/include/linux/mfd/bcm2835-pm.h
@@ -9,6 +9,7 @@ struct bcm2835_pm {
struct device *dev;
void __iomem *base;
void __iomem *asb;
+ void __iomem *rpivid_asb;
};
#endif /* BCM2835_MFD_PM_H */
diff --git a/include/linux/mfd/bcm590xx.h b/include/linux/mfd/bcm590xx.h
index 6b8791da6119..5a5783abd47b 100644
--- a/include/linux/mfd/bcm590xx.h
+++ b/include/linux/mfd/bcm590xx.h
@@ -13,6 +13,26 @@
#include <linux/i2c.h>
#include <linux/regmap.h>
+/* PMU ID register values; also used as device type */
+#define BCM590XX_PMUID_BCM59054 0x54
+#define BCM590XX_PMUID_BCM59056 0x56
+
+/* Known chip revision IDs */
+#define BCM59054_REV_DIGITAL_A1 1
+#define BCM59054_REV_ANALOG_A1 2
+
+#define BCM59056_REV_DIGITAL_A0 1
+#define BCM59056_REV_ANALOG_A0 1
+
+#define BCM59056_REV_DIGITAL_B0 2
+#define BCM59056_REV_ANALOG_B0 2
+
+/* regmap types */
+enum bcm590xx_regmap_type {
+ BCM590XX_REGMAP_PRI,
+ BCM590XX_REGMAP_SEC,
+};
+
/* max register address */
#define BCM590XX_MAX_REGISTER_PRI 0xe7
#define BCM590XX_MAX_REGISTER_SEC 0xf0
@@ -23,7 +43,13 @@ struct bcm590xx {
struct i2c_client *i2c_sec;
struct regmap *regmap_pri;
struct regmap *regmap_sec;
- unsigned int id;
+
+ /* PMU ID value; also used as device type */
+ u8 pmu_id;
+
+ /* Chip revision, read from PMUREV reg */
+ u8 rev_digital;
+ u8 rev_analog;
};
#endif /* __LINUX_MFD_BCM590XX_H */
diff --git a/include/linux/mfd/bq257xx.h b/include/linux/mfd/bq257xx.h
new file mode 100644
index 000000000000..1d6ddc7fb09f
--- /dev/null
+++ b/include/linux/mfd/bq257xx.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Register definitions for TI BQ257XX
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#define BQ25703_CHARGE_OPTION_0 0x00
+#define BQ25703_CHARGE_CURRENT 0x02
+#define BQ25703_MAX_CHARGE_VOLT 0x04
+#define BQ25703_OTG_VOLT 0x06
+#define BQ25703_OTG_CURRENT 0x08
+#define BQ25703_INPUT_VOLTAGE 0x0a
+#define BQ25703_MIN_VSYS 0x0c
+#define BQ25703_IIN_HOST 0x0e
+#define BQ25703_CHARGER_STATUS 0x20
+#define BQ25703_PROCHOT_STATUS 0x22
+#define BQ25703_IIN_DPM 0x24
+#define BQ25703_ADCIBAT_CHG 0x28
+#define BQ25703_ADCIINCMPIN 0x2a
+#define BQ25703_ADCVSYSVBAT 0x2c
+#define BQ25703_MANUFACT_DEV_ID 0x2e
+#define BQ25703_CHARGE_OPTION_1 0x30
+#define BQ25703_CHARGE_OPTION_2 0x32
+#define BQ25703_CHARGE_OPTION_3 0x34
+#define BQ25703_ADC_OPTION 0x3a
+
+#define BQ25703_EN_LWPWR BIT(15)
+#define BQ25703_WDTMR_ADJ_MASK GENMASK(14, 13)
+#define BQ25703_WDTMR_DISABLE 0
+#define BQ25703_WDTMR_5_SEC 1
+#define BQ25703_WDTMR_88_SEC 2
+#define BQ25703_WDTMR_175_SEC 3
+
+#define BQ25703_ICHG_MASK GENMASK(12, 6)
+#define BQ25703_ICHG_STEP_UA 64000
+#define BQ25703_ICHG_MIN_UA 64000
+#define BQ25703_ICHG_MAX_UA 8128000
+
+#define BQ25703_MAX_CHARGE_VOLT_MASK GENMASK(15, 4)
+#define BQ25703_VBATREG_STEP_UV 16000
+#define BQ25703_VBATREG_MIN_UV 1024000
+#define BQ25703_VBATREG_MAX_UV 19200000
+
+#define BQ25703_OTG_VOLT_MASK GENMASK(13, 6)
+#define BQ25703_OTG_VOLT_STEP_UV 64000
+#define BQ25703_OTG_VOLT_MIN_UV 4480000
+#define BQ25703_OTG_VOLT_MAX_UV 20800000
+#define BQ25703_OTG_VOLT_NUM_VOLT 256
+
+#define BQ25703_OTG_CUR_MASK GENMASK(14, 8)
+#define BQ25703_OTG_CUR_STEP_UA 50000
+#define BQ25703_OTG_CUR_MAX_UA 6350000
+
+#define BQ25703_MINVSYS_MASK GENMASK(13, 8)
+#define BQ25703_MINVSYS_STEP_UV 256000
+#define BQ25703_MINVSYS_MIN_UV 1024000
+#define BQ25703_MINVSYS_MAX_UV 16128000
+
+#define BQ25703_STS_AC_STAT BIT(15)
+#define BQ25703_STS_IN_FCHRG BIT(10)
+#define BQ25703_STS_IN_PCHRG BIT(9)
+#define BQ25703_STS_FAULT_ACOV BIT(7)
+#define BQ25703_STS_FAULT_BATOC BIT(6)
+#define BQ25703_STS_FAULT_ACOC BIT(5)
+
+#define BQ25703_IINDPM_MASK GENMASK(14, 8)
+#define BQ25703_IINDPM_STEP_UA 50000
+#define BQ25703_IINDPM_MIN_UA 50000
+#define BQ25703_IINDPM_MAX_UA 6400000
+#define BQ25703_IINDPM_DEFAULT_UA 3300000
+#define BQ25703_IINDPM_OFFSET_UA 50000
+
+#define BQ25703_ADCIBAT_DISCHG_MASK GENMASK(6, 0)
+#define BQ25703_ADCIBAT_CHG_MASK GENMASK(14, 8)
+#define BQ25703_ADCIBAT_CHG_STEP_UA 64000
+#define BQ25703_ADCIBAT_DIS_STEP_UA 256000
+
+#define BQ25703_ADCIIN GENMASK(15, 8)
+#define BQ25703_ADCIINCMPIN_STEP 50000
+
+#define BQ25703_ADCVSYS_MASK GENMASK(15, 8)
+#define BQ25703_ADCVBAT_MASK GENMASK(7, 0)
+#define BQ25703_ADCVSYSVBAT_OFFSET_UV 2880000
+#define BQ25703_ADCVSYSVBAT_STEP 64000
+
+#define BQ25703_ADC_CH_MASK GENMASK(7, 0)
+#define BQ25703_ADC_CONV_EN BIT(15)
+#define BQ25703_ADC_START BIT(14)
+#define BQ25703_ADC_FULL_SCALE BIT(13)
+#define BQ25703_ADC_CMPIN_EN BIT(7)
+#define BQ25703_ADC_VBUS_EN BIT(6)
+#define BQ25703_ADC_PSYS_EN BIT(5)
+#define BQ25703_ADC_IIN_EN BIT(4)
+#define BQ25703_ADC_IDCHG_EN BIT(3)
+#define BQ25703_ADC_ICHG_EN BIT(2)
+#define BQ25703_ADC_VSYS_EN BIT(1)
+#define BQ25703_ADC_VBAT_EN BIT(0)
+
+#define BQ25703_EN_OTG_MASK BIT(12)
+
+struct bq257xx_device {
+ struct i2c_client *client;
+ struct regmap *regmap;
+};
diff --git a/include/linux/mfd/cgbc.h b/include/linux/mfd/cgbc.h
new file mode 100644
index 000000000000..badbec4c7033
--- /dev/null
+++ b/include/linux/mfd/cgbc.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Congatec Board Controller driver definitions
+ *
+ * Copyright (C) 2024 Bootlin
+ * Author: Thomas Richard <thomas.richard@bootlin.com>
+ */
+
+#ifndef _LINUX_MFD_CGBC_H_
+
+/**
+ * struct cgbc_version - Board Controller device version structure
+ * @feature: Board Controller feature number
+ * @major: Board Controller major revision
+ * @minor: Board Controller minor revision
+ */
+struct cgbc_version {
+ unsigned char feature;
+ unsigned char major;
+ unsigned char minor;
+};
+
+/**
+ * struct cgbc_device_data - Internal representation of the Board Controller device
+ * @io_session: Pointer to the session IO memory
+ * @io_cmd: Pointer to the command IO memory
+ * @session: Session id returned by the Board Controller
+ * @dev: Pointer to kernel device structure
+ * @cgbc_version: Board Controller version structure
+ * @mutex: Board Controller mutex
+ */
+struct cgbc_device_data {
+ void __iomem *io_session;
+ void __iomem *io_cmd;
+ u8 session;
+ struct device *dev;
+ struct cgbc_version version;
+ struct mutex lock;
+};
+
+int cgbc_command(struct cgbc_device_data *cgbc, void *cmd, unsigned int cmd_size,
+ void *data, unsigned int data_size, u8 *status);
+
+#endif /*_LINUX_MFD_CGBC_H_*/
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
index 0bc7cba798a3..faeea7abd688 100644
--- a/include/linux/mfd/core.h
+++ b/include/linux/mfd/core.h
@@ -68,16 +68,16 @@ struct mfd_cell {
int id;
int level;
- int (*enable)(struct platform_device *dev);
- int (*disable)(struct platform_device *dev);
-
int (*suspend)(struct platform_device *dev);
int (*resume)(struct platform_device *dev);
/* platform data passed to the sub devices drivers */
- void *platform_data;
+ const void *platform_data;
size_t pdata_size;
+ /* Matches ACPI */
+ const struct mfd_cell_acpi_match *acpi_match;
+
/* Software node for the device. */
const struct software_node *swnode;
@@ -88,18 +88,15 @@ struct mfd_cell {
const char *of_compatible;
/*
- * Address as defined in Device Tree. Used to compement 'of_compatible'
+ * Address as defined in Device Tree. Used to complement 'of_compatible'
* (above) when matching OF nodes with devices that have identical
* compatible strings
*/
- const u64 of_reg;
+ u64 of_reg;
/* Set to 'true' to use 'of_reg' (above) - allows for of_reg=0 */
bool use_of_reg;
- /* Matches ACPI */
- const struct mfd_cell_acpi_match *acpi_match;
-
/*
* These resources can be specified relative to the parent device.
* For accessing hardware you should use resources from the platform dev
@@ -119,20 +116,11 @@ struct mfd_cell {
/* A list of regulator supplies that should be mapped to the MFD
* device rather than the child device when requested
*/
- const char * const *parent_supplies;
int num_parent_supplies;
+ const char * const *parent_supplies;
};
/*
- * Convenience functions for clients using shared cells. Refcounting
- * happens automatically, with the cell's enable/disable callbacks
- * being called only when a device is first being enabled or no other
- * clients are making use of it.
- */
-extern int mfd_cell_enable(struct platform_device *pdev);
-extern int mfd_cell_disable(struct platform_device *pdev);
-
-/*
* Given a platform device that's been created by mfd_add_devices(), fetch
* the mfd_cell that created it.
*/
diff --git a/include/linux/mfd/cs40l50.h b/include/linux/mfd/cs40l50.h
new file mode 100644
index 000000000000..e5dc49860944
--- /dev/null
+++ b/include/linux/mfd/cs40l50.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * CS40L50 Advanced Haptic Driver with waveform memory,
+ * integrated DSP, and closed-loop algorithms
+ *
+ * Copyright 2024 Cirrus Logic, Inc.
+ *
+ * Author: James Ogletree <james.ogletree@cirrus.com>
+ */
+
+#ifndef __MFD_CS40L50_H__
+#define __MFD_CS40L50_H__
+
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/gpio/consumer.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+
+/* Power Supply Configuration */
+#define CS40L50_BLOCK_ENABLES2 0x201C
+#define CS40L50_ERR_RLS 0x2034
+#define CS40L50_BST_LPMODE_SEL 0x3810
+#define CS40L50_DCM_LOW_POWER 0x1
+#define CS40L50_OVERTEMP_WARN 0x4000010
+
+/* Interrupts */
+#define CS40L50_IRQ1_INT_1 0xE010
+#define CS40L50_IRQ1_BASE CS40L50_IRQ1_INT_1
+#define CS40L50_IRQ1_INT_2 0xE014
+#define CS40L50_IRQ1_INT_8 0xE02C
+#define CS40L50_IRQ1_INT_9 0xE030
+#define CS40L50_IRQ1_INT_10 0xE034
+#define CS40L50_IRQ1_INT_18 0xE054
+#define CS40L50_IRQ1_MASK_1 0xE090
+#define CS40L50_IRQ1_MASK_2 0xE094
+#define CS40L50_IRQ1_MASK_20 0xE0DC
+#define CS40L50_IRQ1_INT_1_OFFSET (CS40L50_IRQ1_INT_1 - CS40L50_IRQ1_BASE)
+#define CS40L50_IRQ1_INT_2_OFFSET (CS40L50_IRQ1_INT_2 - CS40L50_IRQ1_BASE)
+#define CS40L50_IRQ1_INT_8_OFFSET (CS40L50_IRQ1_INT_8 - CS40L50_IRQ1_BASE)
+#define CS40L50_IRQ1_INT_9_OFFSET (CS40L50_IRQ1_INT_9 - CS40L50_IRQ1_BASE)
+#define CS40L50_IRQ1_INT_10_OFFSET (CS40L50_IRQ1_INT_10 - CS40L50_IRQ1_BASE)
+#define CS40L50_IRQ1_INT_18_OFFSET (CS40L50_IRQ1_INT_18 - CS40L50_IRQ1_BASE)
+#define CS40L50_IRQ_MASK_2_OVERRIDE 0xFFDF7FFF
+#define CS40L50_IRQ_MASK_20_OVERRIDE 0x15C01000
+#define CS40L50_AMP_SHORT_MASK BIT(31)
+#define CS40L50_DSP_QUEUE_MASK BIT(21)
+#define CS40L50_TEMP_ERR_MASK BIT(31)
+#define CS40L50_BST_UVP_MASK BIT(6)
+#define CS40L50_BST_SHORT_MASK BIT(7)
+#define CS40L50_BST_ILIMIT_MASK BIT(18)
+#define CS40L50_UVLO_VDDBATT_MASK BIT(16)
+#define CS40L50_GLOBAL_ERROR_MASK BIT(15)
+
+enum cs40l50_irq_list {
+ CS40L50_DSP_QUEUE_IRQ,
+ CS40L50_GLOBAL_ERROR_IRQ,
+ CS40L50_UVLO_VDDBATT_IRQ,
+ CS40L50_BST_ILIMIT_IRQ,
+ CS40L50_BST_SHORT_IRQ,
+ CS40L50_BST_UVP_IRQ,
+ CS40L50_TEMP_ERR_IRQ,
+ CS40L50_AMP_SHORT_IRQ,
+};
+
+/* DSP */
+#define CS40L50_XMEM_PACKED_0 0x2000000
+#define CS40L50_XMEM_UNPACKED24_0 0x2800000
+#define CS40L50_SYS_INFO_ID 0x25E0000
+#define CS40L50_DSP_QUEUE_WT 0x28042C8
+#define CS40L50_DSP_QUEUE_RD 0x28042CC
+#define CS40L50_NUM_WAVES 0x2805C18
+#define CS40L50_CORE_BASE 0x2B80000
+#define CS40L50_YMEM_PACKED_0 0x2C00000
+#define CS40L50_YMEM_UNPACKED24_0 0x3400000
+#define CS40L50_PMEM_0 0x3800000
+#define CS40L50_DSP_POLL_US 1000
+#define CS40L50_DSP_TIMEOUT_COUNT 100
+#define CS40L50_RESET_PULSE_US 2200
+#define CS40L50_CP_READY_US 3100
+#define CS40L50_AUTOSUSPEND_MS 2000
+#define CS40L50_PM_ALGO 0x9F206
+#define CS40L50_GLOBAL_ERR_RLS_SET BIT(11)
+#define CS40L50_GLOBAL_ERR_RLS_CLEAR 0
+
+enum cs40l50_wseqs {
+ CS40L50_PWR_ON,
+ CS40L50_STANDBY,
+ CS40L50_ACTIVE,
+ CS40L50_NUM_WSEQS,
+};
+
+/* DSP Queue */
+#define CS40L50_DSP_QUEUE_BASE 0x11004
+#define CS40L50_DSP_QUEUE_END 0x1101C
+#define CS40L50_DSP_QUEUE 0x11020
+#define CS40L50_PREVENT_HIBER 0x2000003
+#define CS40L50_ALLOW_HIBER 0x2000004
+#define CS40L50_SHUTDOWN 0x2000005
+#define CS40L50_SYSTEM_RESET 0x2000007
+#define CS40L50_START_I2S 0x3000002
+#define CS40L50_OWT_PUSH 0x3000008
+#define CS40L50_STOP_PLAYBACK 0x5000000
+#define CS40L50_OWT_DELETE 0xD000000
+
+/* Firmware files */
+#define CS40L50_FW "cs40l50.wmfw"
+#define CS40L50_WT "cs40l50.bin"
+
+/* Device */
+#define CS40L50_DEVID 0x0
+#define CS40L50_REVID 0x4
+#define CS40L50_DEVID_A 0x40A50
+#define CS40L50_REVID_B0 0xB0
+
+struct cs40l50 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct mutex lock;
+ struct cs_dsp dsp;
+ struct gpio_desc *reset_gpio;
+ struct regmap_irq_chip_data *irq_data;
+ const struct firmware *fw;
+ const struct firmware *bin;
+ struct cs_dsp_wseq wseqs[CS40L50_NUM_WSEQS];
+ int irq;
+ u32 devid;
+ u32 revid;
+};
+
+int cs40l50_dsp_write(struct device *dev, struct regmap *regmap, u32 val);
+int cs40l50_probe(struct cs40l50 *cs40l50);
+int cs40l50_remove(struct cs40l50 *cs40l50);
+
+extern const struct regmap_config cs40l50_regmap;
+extern const struct dev_pm_ops cs40l50_pm_ops;
+
+#endif /* __MFD_CS40L50_H__ */
diff --git a/include/linux/mfd/cs42l43-regs.h b/include/linux/mfd/cs42l43-regs.h
new file mode 100644
index 000000000000..c39a49269cb7
--- /dev/null
+++ b/include/linux/mfd/cs42l43-regs.h
@@ -0,0 +1,1184 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * cs42l43 register definitions
+ *
+ * Copyright (c) 2022-2023 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#ifndef CS42L43_CORE_REGS_H
+#define CS42L43_CORE_REGS_H
+
+/* Registers */
+#define CS42L43_GEN_INT_STAT_1 0x000000C0
+#define CS42L43_GEN_INT_MASK_1 0x000000C1
+#define CS42L43_DEVID 0x00003000
+#define CS42L43_REVID 0x00003004
+#define CS42L43_RELID 0x0000300C
+#define CS42L43_SFT_RESET 0x00003020
+#define CS42L43_DRV_CTRL1 0x00006004
+#define CS42L43_DRV_CTRL3 0x0000600C
+#define CS42L43_DRV_CTRL4 0x00006010
+#define CS42L43_DRV_CTRL_5 0x00006014
+#define CS42L43_GPIO_CTRL1 0x00006034
+#define CS42L43_GPIO_CTRL2 0x00006038
+#define CS42L43_GPIO_STS 0x0000603C
+#define CS42L43_GPIO_FN_SEL 0x00006040
+#define CS42L43_MCLK_SRC_SEL 0x00007004
+#define CS42L43_CCM_BLK_CLK_CONTROL 0x00007010
+#define CS42L43_SAMPLE_RATE1 0x00007014
+#define CS42L43_SAMPLE_RATE2 0x00007018
+#define CS42L43_SAMPLE_RATE3 0x0000701C
+#define CS42L43_SAMPLE_RATE4 0x00007020
+#define CS42L43_PLL_CONTROL 0x00007034
+#define CS42L43_FS_SELECT1 0x00007038
+#define CS42L43_FS_SELECT2 0x0000703C
+#define CS42L43_FS_SELECT3 0x00007040
+#define CS42L43_FS_SELECT4 0x00007044
+#define CS42L43_PDM_CONTROL 0x0000704C
+#define CS42L43_ASP_CLK_CONFIG1 0x00007058
+#define CS42L43_ASP_CLK_CONFIG2 0x0000705C
+#define CS42L43_OSC_DIV_SEL 0x00007068
+#define CS42L43_ADC_B_CTRL1 0x00008000
+#define CS42L43_ADC_B_CTRL2 0x00008004
+#define CS42L43_DECIM_HPF_WNF_CTRL1 0x0000803C
+#define CS42L43_DECIM_HPF_WNF_CTRL2 0x00008040
+#define CS42L43_DECIM_HPF_WNF_CTRL3 0x00008044
+#define CS42L43_DECIM_HPF_WNF_CTRL4 0x00008048
+#define CS42L43_DMIC_PDM_CTRL 0x0000804C
+#define CS42L43_DECIM_VOL_CTRL_CH1_CH2 0x00008050
+#define CS42L43_DECIM_VOL_CTRL_CH3_CH4 0x00008054
+#define CS42L43_DECIM_VOL_CTRL_UPDATE 0x00008058
+#define CS42L43_INTP_VOLUME_CTRL1 0x00009008
+#define CS42L43_INTP_VOLUME_CTRL2 0x0000900C
+#define CS42L43_AMP1_2_VOL_RAMP 0x00009010
+#define CS42L43_ASP_CTRL 0x0000A000
+#define CS42L43_ASP_FSYNC_CTRL1 0x0000A004
+#define CS42L43_ASP_FSYNC_CTRL2 0x0000A008
+#define CS42L43_ASP_FSYNC_CTRL3 0x0000A00C
+#define CS42L43_ASP_FSYNC_CTRL4 0x0000A010
+#define CS42L43_ASP_DATA_CTRL 0x0000A018
+#define CS42L43_ASP_RX_EN 0x0000A020
+#define CS42L43_ASP_TX_EN 0x0000A024
+#define CS42L43_ASP_RX_CH1_CTRL 0x0000A028
+#define CS42L43_ASP_RX_CH2_CTRL 0x0000A02C
+#define CS42L43_ASP_RX_CH3_CTRL 0x0000A030
+#define CS42L43_ASP_RX_CH4_CTRL 0x0000A034
+#define CS42L43_ASP_RX_CH5_CTRL 0x0000A038
+#define CS42L43_ASP_RX_CH6_CTRL 0x0000A03C
+#define CS42L43_ASP_TX_CH1_CTRL 0x0000A068
+#define CS42L43_ASP_TX_CH2_CTRL 0x0000A06C
+#define CS42L43_ASP_TX_CH3_CTRL 0x0000A070
+#define CS42L43_ASP_TX_CH4_CTRL 0x0000A074
+#define CS42L43_ASP_TX_CH5_CTRL 0x0000A078
+#define CS42L43_ASP_TX_CH6_CTRL 0x0000A07C
+#define CS42L43_OTP_REVISION_ID 0x0000B02C
+#define CS42L43_ASPTX1_INPUT 0x0000C200
+#define CS42L43_ASPTX2_INPUT 0x0000C210
+#define CS42L43_ASPTX3_INPUT 0x0000C220
+#define CS42L43_ASPTX4_INPUT 0x0000C230
+#define CS42L43_ASPTX5_INPUT 0x0000C240
+#define CS42L43_ASPTX6_INPUT 0x0000C250
+#define CS42L43_SWIRE_DP1_CH1_INPUT 0x0000C280
+#define CS42L43_SWIRE_DP1_CH2_INPUT 0x0000C290
+#define CS42L43_SWIRE_DP1_CH3_INPUT 0x0000C2A0
+#define CS42L43_SWIRE_DP1_CH4_INPUT 0x0000C2B0
+#define CS42L43_SWIRE_DP2_CH1_INPUT 0x0000C2C0
+#define CS42L43_SWIRE_DP2_CH2_INPUT 0x0000C2D0
+#define CS42L43_SWIRE_DP3_CH1_INPUT 0x0000C2E0
+#define CS42L43_SWIRE_DP3_CH2_INPUT 0x0000C2F0
+#define CS42L43_SWIRE_DP4_CH1_INPUT 0x0000C300
+#define CS42L43_SWIRE_DP4_CH2_INPUT 0x0000C310
+#define CS42L43_ASRC_INT1_INPUT1 0x0000C400
+#define CS42L43_ASRC_INT2_INPUT1 0x0000C410
+#define CS42L43_ASRC_INT3_INPUT1 0x0000C420
+#define CS42L43_ASRC_INT4_INPUT1 0x0000C430
+#define CS42L43_ASRC_DEC1_INPUT1 0x0000C440
+#define CS42L43_ASRC_DEC2_INPUT1 0x0000C450
+#define CS42L43_ASRC_DEC3_INPUT1 0x0000C460
+#define CS42L43_ASRC_DEC4_INPUT1 0x0000C470
+#define CS42L43_ISRC1INT1_INPUT1 0x0000C500
+#define CS42L43_ISRC1INT2_INPUT1 0x0000C510
+#define CS42L43_ISRC1DEC1_INPUT1 0x0000C520
+#define CS42L43_ISRC1DEC2_INPUT1 0x0000C530
+#define CS42L43_ISRC2INT1_INPUT1 0x0000C540
+#define CS42L43_ISRC2INT2_INPUT1 0x0000C550
+#define CS42L43_ISRC2DEC1_INPUT1 0x0000C560
+#define CS42L43_ISRC2DEC2_INPUT1 0x0000C570
+#define CS42L43_EQ1MIX_INPUT1 0x0000C580
+#define CS42L43_EQ1MIX_INPUT2 0x0000C584
+#define CS42L43_EQ1MIX_INPUT3 0x0000C588
+#define CS42L43_EQ1MIX_INPUT4 0x0000C58C
+#define CS42L43_EQ2MIX_INPUT1 0x0000C590
+#define CS42L43_EQ2MIX_INPUT2 0x0000C594
+#define CS42L43_EQ2MIX_INPUT3 0x0000C598
+#define CS42L43_EQ2MIX_INPUT4 0x0000C59C
+#define CS42L43_SPDIF1_INPUT1 0x0000C600
+#define CS42L43_SPDIF2_INPUT1 0x0000C610
+#define CS42L43_AMP1MIX_INPUT1 0x0000C620
+#define CS42L43_AMP1MIX_INPUT2 0x0000C624
+#define CS42L43_AMP1MIX_INPUT3 0x0000C628
+#define CS42L43_AMP1MIX_INPUT4 0x0000C62C
+#define CS42L43_AMP2MIX_INPUT1 0x0000C630
+#define CS42L43_AMP2MIX_INPUT2 0x0000C634
+#define CS42L43_AMP2MIX_INPUT3 0x0000C638
+#define CS42L43_AMP2MIX_INPUT4 0x0000C63C
+#define CS42L43_AMP3MIX_INPUT1 0x0000C640
+#define CS42L43_AMP3MIX_INPUT2 0x0000C644
+#define CS42L43_AMP3MIX_INPUT3 0x0000C648
+#define CS42L43_AMP3MIX_INPUT4 0x0000C64C
+#define CS42L43_AMP4MIX_INPUT1 0x0000C650
+#define CS42L43_AMP4MIX_INPUT2 0x0000C654
+#define CS42L43_AMP4MIX_INPUT3 0x0000C658
+#define CS42L43_AMP4MIX_INPUT4 0x0000C65C
+#define CS42L43_ASRC_INT_ENABLES 0x0000E000
+#define CS42L43_ASRC_DEC_ENABLES 0x0000E004
+#define CS42L43_PDNCNTL 0x00010000
+#define CS42L43_RINGSENSE_DEB_CTRL 0x0001001C
+#define CS42L43_TIPSENSE_DEB_CTRL 0x00010020
+#define CS42L43_TIP_RING_SENSE_INTERRUPT_STATUS 0x00010028
+#define CS42L43_HS2 0x00010040
+#define CS42L43_HS_STAT 0x00010048
+#define CS42L43_MCU_SW_INTERRUPT 0x00010094
+#define CS42L43_STEREO_MIC_CTRL 0x000100A4
+#define CS42L43_STEREO_MIC_CLAMP_CTRL 0x000100C4
+#define CS42L43_BLOCK_EN2 0x00010104
+#define CS42L43_BLOCK_EN3 0x00010108
+#define CS42L43_BLOCK_EN4 0x0001010C
+#define CS42L43_BLOCK_EN5 0x00010110
+#define CS42L43_BLOCK_EN6 0x00010114
+#define CS42L43_BLOCK_EN7 0x00010118
+#define CS42L43_BLOCK_EN8 0x0001011C
+#define CS42L43_BLOCK_EN9 0x00010120
+#define CS42L43_BLOCK_EN10 0x00010124
+#define CS42L43_BLOCK_EN11 0x00010128
+#define CS42L43_TONE_CH1_CTRL 0x00010134
+#define CS42L43_TONE_CH2_CTRL 0x00010138
+#define CS42L43_MIC_DETECT_CONTROL_1 0x00011074
+#define CS42L43_DETECT_STATUS_1 0x0001107C
+#define CS42L43_HS_BIAS_SENSE_AND_CLAMP_AUTOCONTROL 0x00011090
+#define CS42L43_MIC_DETECT_CONTROL_ANDROID 0x000110B0
+#define CS42L43_ISRC1_CTRL 0x00012004
+#define CS42L43_ISRC2_CTRL 0x00013004
+#define CS42L43_CTRL_REG 0x00014000
+#define CS42L43_FDIV_FRAC 0x00014004
+#define CS42L43_CAL_RATIO 0x00014008
+#define CS42L43_SPI_CLK_CONFIG1 0x00016004
+#define CS42L43_SPI_CONFIG1 0x00016010
+#define CS42L43_SPI_CONFIG2 0x00016014
+#define CS42L43_SPI_CONFIG3 0x00016018
+#define CS42L43_SPI_CONFIG4 0x00016024
+#define CS42L43_SPI_STATUS1 0x00016100
+#define CS42L43_SPI_STATUS2 0x00016104
+#define CS42L43_TRAN_CONFIG1 0x00016200
+#define CS42L43_TRAN_CONFIG2 0x00016204
+#define CS42L43_TRAN_CONFIG3 0x00016208
+#define CS42L43_TRAN_CONFIG4 0x0001620C
+#define CS42L43_TRAN_CONFIG5 0x00016220
+#define CS42L43_TRAN_CONFIG6 0x00016224
+#define CS42L43_TRAN_CONFIG7 0x00016228
+#define CS42L43_TRAN_CONFIG8 0x0001622C
+#define CS42L43_TRAN_STATUS1 0x00016300
+#define CS42L43_TRAN_STATUS2 0x00016304
+#define CS42L43_TRAN_STATUS3 0x00016308
+#define CS42L43_TX_DATA 0x00016400
+#define CS42L43_RX_DATA 0x00016600
+#define CS42L43_DACCNFG1 0x00017000
+#define CS42L43_DACCNFG2 0x00017004
+#define CS42L43_HPPATHVOL 0x0001700C
+#define CS42L43_PGAVOL 0x00017014
+#define CS42L43_LOADDETRESULTS 0x00017018
+#define CS42L43_LOADDETENA 0x00017024
+#define CS42L43_CTRL 0x00017028
+#define CS42L43_COEFF_DATA_IN0 0x00018000
+#define CS42L43_COEFF_RD_WR0 0x00018008
+#define CS42L43_INIT_DONE0 0x00018010
+#define CS42L43_START_EQZ0 0x00018014
+#define CS42L43_MUTE_EQ_IN0 0x0001801C
+#define CS42L43_DECIM_INT 0x0001B000
+#define CS42L43_EQ_INT 0x0001B004
+#define CS42L43_ASP_INT 0x0001B008
+#define CS42L43_PLL_INT 0x0001B00C
+#define CS42L43_SOFT_INT 0x0001B010
+#define CS42L43_SWIRE_INT 0x0001B014
+#define CS42L43_MSM_INT 0x0001B018
+#define CS42L43_ACC_DET_INT 0x0001B01C
+#define CS42L43_I2C_TGT_INT 0x0001B020
+#define CS42L43_SPI_MSTR_INT 0x0001B024
+#define CS42L43_SW_TO_SPI_BRIDGE_INT 0x0001B028
+#define CS42L43_OTP_INT 0x0001B02C
+#define CS42L43_CLASS_D_AMP_INT 0x0001B030
+#define CS42L43_GPIO_INT 0x0001B034
+#define CS42L43_ASRC_INT 0x0001B038
+#define CS42L43_HPOUT_INT 0x0001B03C
+#define CS42L43_DECIM_MASK 0x0001B0A0
+#define CS42L43_EQ_MIX_MASK 0x0001B0A4
+#define CS42L43_ASP_MASK 0x0001B0A8
+#define CS42L43_PLL_MASK 0x0001B0AC
+#define CS42L43_SOFT_MASK 0x0001B0B0
+#define CS42L43_SWIRE_MASK 0x0001B0B4
+#define CS42L43_MSM_MASK 0x0001B0B8
+#define CS42L43_ACC_DET_MASK 0x0001B0BC
+#define CS42L43_I2C_TGT_MASK 0x0001B0C0
+#define CS42L43_SPI_MSTR_MASK 0x0001B0C4
+#define CS42L43_SW_TO_SPI_BRIDGE_MASK 0x0001B0C8
+#define CS42L43_OTP_MASK 0x0001B0CC
+#define CS42L43_CLASS_D_AMP_MASK 0x0001B0D0
+#define CS42L43_GPIO_INT_MASK 0x0001B0D4
+#define CS42L43_ASRC_MASK 0x0001B0D8
+#define CS42L43_HPOUT_MASK 0x0001B0DC
+#define CS42L43_DECIM_INT_SHADOW 0x0001B300
+#define CS42L43_EQ_MIX_INT_SHADOW 0x0001B304
+#define CS42L43_ASP_INT_SHADOW 0x0001B308
+#define CS42L43_PLL_INT_SHADOW 0x0001B30C
+#define CS42L43_SOFT_INT_SHADOW 0x0001B310
+#define CS42L43_SWIRE_INT_SHADOW 0x0001B314
+#define CS42L43_MSM_INT_SHADOW 0x0001B318
+#define CS42L43_ACC_DET_INT_SHADOW 0x0001B31C
+#define CS42L43_I2C_TGT_INT_SHADOW 0x0001B320
+#define CS42L43_SPI_MSTR_INT_SHADOW 0x0001B324
+#define CS42L43_SW_TO_SPI_BRIDGE_SHADOW 0x0001B328
+#define CS42L43_OTP_INT_SHADOW 0x0001B32C
+#define CS42L43_CLASS_D_AMP_INT_SHADOW 0x0001B330
+#define CS42L43_GPIO_SHADOW 0x0001B334
+#define CS42L43_ASRC_SHADOW 0x0001B338
+#define CS42L43_HP_OUT_SHADOW 0x0001B33C
+#define CS42L43_BOOT_CONTROL 0x00101000
+#define CS42L43_BLOCK_EN 0x00101008
+#define CS42L43_SHUTTER_CONTROL 0x0010100C
+#define CS42L43_MCU_SW_REV 0x00114000
+#define CS42L43_PATCH_START_ADDR 0x00114004
+#define CS42L43_NEED_CONFIGS 0x0011400C
+#define CS42L43_BOOT_STATUS 0x0011401C
+#define CS42L43_FW_SH_BOOT_CFG_NEED_CONFIGS 0x0011F8F8
+#define CS42L43_FW_MISSION_CTRL_NEED_CONFIGS 0x0011FE00
+#define CS42L43_FW_MISSION_CTRL_HAVE_CONFIGS 0x0011FE04
+#define CS42L43_FW_MISSION_CTRL_MM_CTRL_SELECTION 0x0011FE0C
+#define CS42L43_FW_MISSION_CTRL_MM_MCU_CFG_REG 0x0011FE10
+#define CS42L43_MCU_RAM_MAX 0x0011FFFF
+
+/* CS42L43_DEVID */
+#define CS42L43_DEVID_VAL 0x00042A43
+
+/* CS42L43_GEN_INT_STAT_1 */
+#define CS42L43_INT_STAT_GEN1_MASK 0x00000001
+#define CS42L43_INT_STAT_GEN1_SHIFT 0
+
+/* CS42L43_SFT_RESET */
+#define CS42L43_SFT_RESET_MASK 0xFF000000
+#define CS42L43_SFT_RESET_SHIFT 24
+
+#define CS42L43_SFT_RESET_VAL 0x5A000000
+
+/* CS42L43_DRV_CTRL1 */
+#define CS42L43_ASP_DOUT_DRV_MASK 0x00038000
+#define CS42L43_ASP_DOUT_DRV_SHIFT 15
+#define CS42L43_ASP_FSYNC_DRV_MASK 0x00000E00
+#define CS42L43_ASP_FSYNC_DRV_SHIFT 9
+#define CS42L43_ASP_BCLK_DRV_MASK 0x000001C0
+#define CS42L43_ASP_BCLK_DRV_SHIFT 6
+
+/* CS42L43_DRV_CTRL3 */
+#define CS42L43_I2C_ADDR_DRV_MASK 0x30000000
+#define CS42L43_I2C_ADDR_DRV_SHIFT 28
+#define CS42L43_I2C_SDA_DRV_MASK 0x0C000000
+#define CS42L43_I2C_SDA_DRV_SHIFT 26
+#define CS42L43_PDMOUT2_CLK_DRV_MASK 0x00E00000
+#define CS42L43_PDMOUT2_CLK_DRV_SHIFT 21
+#define CS42L43_PDMOUT2_DATA_DRV_MASK 0x001C0000
+#define CS42L43_PDMOUT2_DATA_DRV_SHIFT 18
+#define CS42L43_PDMOUT1_CLK_DRV_MASK 0x00038000
+#define CS42L43_PDMOUT1_CLK_DRV_SHIFT 15
+#define CS42L43_PDMOUT1_DATA_DRV_MASK 0x00007000
+#define CS42L43_PDMOUT1_DATA_DRV_SHIFT 12
+#define CS42L43_SPI_MISO_DRV_MASK 0x00000038
+#define CS42L43_SPI_MISO_DRV_SHIFT 3
+
+/* CS42L43_DRV_CTRL4 */
+#define CS42L43_GPIO3_DRV_MASK 0x00000E00
+#define CS42L43_GPIO3_DRV_SHIFT 9
+#define CS42L43_GPIO2_DRV_MASK 0x000001C0
+#define CS42L43_GPIO2_DRV_SHIFT 6
+#define CS42L43_GPIO1_DRV_MASK 0x00000038
+#define CS42L43_GPIO1_DRV_SHIFT 3
+
+/* CS42L43_DRV_CTRL_5 */
+#define CS42L43_I2C_SCL_DRV_MASK 0x18000000
+#define CS42L43_I2C_SCL_DRV_SHIFT 27
+#define CS42L43_SPI_SCK_DRV_MASK 0x07000000
+#define CS42L43_SPI_SCK_DRV_SHIFT 24
+#define CS42L43_SPI_MOSI_DRV_MASK 0x00E00000
+#define CS42L43_SPI_MOSI_DRV_SHIFT 21
+#define CS42L43_SPI_SSB_DRV_MASK 0x001C0000
+#define CS42L43_SPI_SSB_DRV_SHIFT 18
+#define CS42L43_ASP_DIN_DRV_MASK 0x000001C0
+#define CS42L43_ASP_DIN_DRV_SHIFT 6
+
+/* CS42L43_GPIO_CTRL1 */
+#define CS42L43_GPIO3_POL_MASK 0x00040000
+#define CS42L43_GPIO3_POL_SHIFT 18
+#define CS42L43_GPIO2_POL_MASK 0x00020000
+#define CS42L43_GPIO2_POL_SHIFT 17
+#define CS42L43_GPIO1_POL_MASK 0x00010000
+#define CS42L43_GPIO1_POL_SHIFT 16
+#define CS42L43_GPIO3_LVL_MASK 0x00000400
+#define CS42L43_GPIO3_LVL_SHIFT 10
+#define CS42L43_GPIO2_LVL_MASK 0x00000200
+#define CS42L43_GPIO2_LVL_SHIFT 9
+#define CS42L43_GPIO1_LVL_MASK 0x00000100
+#define CS42L43_GPIO1_LVL_SHIFT 8
+#define CS42L43_GPIO3_DIR_MASK 0x00000004
+#define CS42L43_GPIO3_DIR_SHIFT 2
+#define CS42L43_GPIO2_DIR_MASK 0x00000002
+#define CS42L43_GPIO2_DIR_SHIFT 1
+#define CS42L43_GPIO1_DIR_MASK 0x00000001
+#define CS42L43_GPIO1_DIR_SHIFT 0
+
+/* CS42L43_GPIO_CTRL2 */
+#define CS42L43_GPIO3_DEGLITCH_BYP_MASK 0x00000004
+#define CS42L43_GPIO3_DEGLITCH_BYP_SHIFT 2
+#define CS42L43_GPIO2_DEGLITCH_BYP_MASK 0x00000002
+#define CS42L43_GPIO2_DEGLITCH_BYP_SHIFT 1
+#define CS42L43_GPIO1_DEGLITCH_BYP_MASK 0x00000001
+#define CS42L43_GPIO1_DEGLITCH_BYP_SHIFT 0
+
+/* CS42L43_GPIO_STS */
+#define CS42L43_GPIO3_STS_MASK 0x00000004
+#define CS42L43_GPIO3_STS_SHIFT 2
+#define CS42L43_GPIO2_STS_MASK 0x00000002
+#define CS42L43_GPIO2_STS_SHIFT 1
+#define CS42L43_GPIO1_STS_MASK 0x00000001
+#define CS42L43_GPIO1_STS_SHIFT 0
+
+/* CS42L43_GPIO_FN_SEL */
+#define CS42L43_GPIO3_FN_SEL_MASK 0x00000004
+#define CS42L43_GPIO3_FN_SEL_SHIFT 2
+#define CS42L43_GPIO1_FN_SEL_MASK 0x00000001
+#define CS42L43_GPIO1_FN_SEL_SHIFT 0
+
+/* CS42L43_MCLK_SRC_SEL */
+#define CS42L43_OSC_PLL_MCLK_SEL_MASK 0x00000001
+#define CS42L43_OSC_PLL_MCLK_SEL_SHIFT 0
+
+/* CS42L43_SAMPLE_RATE1..CS42L43_SAMPLE_RATE4 */
+#define CS42L43_SAMPLE_RATE_MASK 0x0000001F
+#define CS42L43_SAMPLE_RATE_SHIFT 0
+
+/* CS42L43_PLL_CONTROL */
+#define CS42L43_PLL_REFCLK_EN_MASK 0x00000008
+#define CS42L43_PLL_REFCLK_EN_SHIFT 3
+#define CS42L43_PLL_REFCLK_DIV_MASK 0x00000006
+#define CS42L43_PLL_REFCLK_DIV_SHIFT 1
+#define CS42L43_PLL_REFCLK_SRC_MASK 0x00000001
+#define CS42L43_PLL_REFCLK_SRC_SHIFT 0
+
+/* CS42L43_FS_SELECT1 */
+#define CS42L43_ASP_RATE_MASK 0x00000003
+#define CS42L43_ASP_RATE_SHIFT 0
+
+/* CS42L43_FS_SELECT2 */
+#define CS42L43_ASRC_DEC_OUT_RATE_MASK 0x000000C0
+#define CS42L43_ASRC_DEC_OUT_RATE_SHIFT 6
+#define CS42L43_ASRC_INT_OUT_RATE_MASK 0x00000030
+#define CS42L43_ASRC_INT_OUT_RATE_SHIFT 4
+#define CS42L43_ASRC_DEC_IN_RATE_MASK 0x0000000C
+#define CS42L43_ASRC_DEC_IN_RATE_SHIFT 2
+#define CS42L43_ASRC_INT_IN_RATE_MASK 0x00000003
+#define CS42L43_ASRC_INT_IN_RATE_SHIFT 0
+
+/* CS42L43_FS_SELECT3 */
+#define CS42L43_HPOUT_RATE_MASK 0x0000C000
+#define CS42L43_HPOUT_RATE_SHIFT 14
+#define CS42L43_EQZ_RATE_MASK 0x00003000
+#define CS42L43_EQZ_RATE_SHIFT 12
+#define CS42L43_DIAGGEN_RATE_MASK 0x00000C00
+#define CS42L43_DIAGGEN_RATE_SHIFT 10
+#define CS42L43_DECIM_CH4_RATE_MASK 0x00000300
+#define CS42L43_DECIM_CH4_RATE_SHIFT 8
+#define CS42L43_DECIM_CH3_RATE_MASK 0x000000C0
+#define CS42L43_DECIM_CH3_RATE_SHIFT 6
+#define CS42L43_DECIM_CH2_RATE_MASK 0x00000030
+#define CS42L43_DECIM_CH2_RATE_SHIFT 4
+#define CS42L43_DECIM_CH1_RATE_MASK 0x0000000C
+#define CS42L43_DECIM_CH1_RATE_SHIFT 2
+#define CS42L43_AMP1_2_RATE_MASK 0x00000003
+#define CS42L43_AMP1_2_RATE_SHIFT 0
+
+/* CS42L43_FS_SELECT4 */
+#define CS42L43_SW_DP7_RATE_MASK 0x00C00000
+#define CS42L43_SW_DP7_RATE_SHIFT 22
+#define CS42L43_SW_DP6_RATE_MASK 0x00300000
+#define CS42L43_SW_DP6_RATE_SHIFT 20
+#define CS42L43_SPDIF_RATE_MASK 0x000C0000
+#define CS42L43_SPDIF_RATE_SHIFT 18
+#define CS42L43_SW_DP5_RATE_MASK 0x00030000
+#define CS42L43_SW_DP5_RATE_SHIFT 16
+#define CS42L43_SW_DP4_RATE_MASK 0x0000C000
+#define CS42L43_SW_DP4_RATE_SHIFT 14
+#define CS42L43_SW_DP3_RATE_MASK 0x00003000
+#define CS42L43_SW_DP3_RATE_SHIFT 12
+#define CS42L43_SW_DP2_RATE_MASK 0x00000C00
+#define CS42L43_SW_DP2_RATE_SHIFT 10
+#define CS42L43_SW_DP1_RATE_MASK 0x00000300
+#define CS42L43_SW_DP1_RATE_SHIFT 8
+#define CS42L43_ISRC2_LOW_RATE_MASK 0x000000C0
+#define CS42L43_ISRC2_LOW_RATE_SHIFT 6
+#define CS42L43_ISRC2_HIGH_RATE_MASK 0x00000030
+#define CS42L43_ISRC2_HIGH_RATE_SHIFT 4
+#define CS42L43_ISRC1_LOW_RATE_MASK 0x0000000C
+#define CS42L43_ISRC1_LOW_RATE_SHIFT 2
+#define CS42L43_ISRC1_HIGH_RATE_MASK 0x00000003
+#define CS42L43_ISRC1_HIGH_RATE_SHIFT 0
+
+/* CS42L43_PDM_CONTROL */
+#define CS42L43_PDM2_CLK_DIV_MASK 0x0000000C
+#define CS42L43_PDM2_CLK_DIV_SHIFT 2
+#define CS42L43_PDM1_CLK_DIV_MASK 0x00000003
+#define CS42L43_PDM1_CLK_DIV_SHIFT 0
+
+/* CS42L43_ASP_CLK_CONFIG1 */
+#define CS42L43_ASP_BCLK_N_MASK 0x03FF0000
+#define CS42L43_ASP_BCLK_N_SHIFT 16
+#define CS42L43_ASP_BCLK_M_MASK 0x000003FF
+#define CS42L43_ASP_BCLK_M_SHIFT 0
+
+/* CS42L43_ASP_CLK_CONFIG2 */
+#define CS42L43_ASP_MASTER_MODE_MASK 0x00000002
+#define CS42L43_ASP_MASTER_MODE_SHIFT 1
+#define CS42L43_ASP_BCLK_INV_MASK 0x00000001
+#define CS42L43_ASP_BCLK_INV_SHIFT 0
+
+/* CS42L43_OSC_DIV_SEL */
+#define CS42L43_OSC_DIV2_EN_MASK 0x00000001
+#define CS42L43_OSC_DIV2_EN_SHIFT 0
+
+/* CS42L43_ADC_B_CTRL1..CS42L43_ADC_B_CTRL1 */
+#define CS42L43_PGA_WIDESWING_MODE_EN_MASK 0x00000080
+#define CS42L43_PGA_WIDESWING_MODE_EN_SHIFT 7
+#define CS42L43_ADC_AIN_SEL_MASK 0x00000010
+#define CS42L43_ADC_AIN_SEL_SHIFT 4
+#define CS42L43_ADC_PGA_GAIN_MASK 0x0000000F
+#define CS42L43_ADC_PGA_GAIN_SHIFT 0
+
+/* CS42L43_DECIM_HPF_WNF_CTRL1..CS42L43_DECIM_HPF_WNF_CTRL4 */
+#define CS42L43_DECIM_WNF_CF_MASK 0x00000070
+#define CS42L43_DECIM_WNF_CF_SHIFT 4
+#define CS42L43_DECIM_WNF_EN_MASK 0x00000008
+#define CS42L43_DECIM_WNF_EN_SHIFT 3
+#define CS42L43_DECIM_HPF_CF_MASK 0x00000006
+#define CS42L43_DECIM_HPF_CF_SHIFT 1
+#define CS42L43_DECIM_HPF_EN_MASK 0x00000001
+#define CS42L43_DECIM_HPF_EN_SHIFT 0
+
+/* CS42L43_DMIC_PDM_CTRL */
+#define CS42L43_PDM2R_INV_MASK 0x00000020
+#define CS42L43_PDM2R_INV_SHIFT 5
+#define CS42L43_PDM2L_INV_MASK 0x00000010
+#define CS42L43_PDM2L_INV_SHIFT 4
+#define CS42L43_PDM1R_INV_MASK 0x00000008
+#define CS42L43_PDM1R_INV_SHIFT 3
+#define CS42L43_PDM1L_INV_MASK 0x00000004
+#define CS42L43_PDM1L_INV_SHIFT 2
+
+/* CS42L43_DECIM_VOL_CTRL_CH1_CH2 */
+#define CS42L43_DECIM2_MUTE_MASK 0x80000000
+#define CS42L43_DECIM2_MUTE_SHIFT 31
+#define CS42L43_DECIM2_VOL_MASK 0x3FC00000
+#define CS42L43_DECIM2_VOL_SHIFT 22
+#define CS42L43_DECIM2_VD_RAMP_MASK 0x00380000
+#define CS42L43_DECIM2_VD_RAMP_SHIFT 19
+#define CS42L43_DECIM2_VI_RAMP_MASK 0x00070000
+#define CS42L43_DECIM2_VI_RAMP_SHIFT 16
+#define CS42L43_DECIM1_MUTE_MASK 0x00008000
+#define CS42L43_DECIM1_MUTE_SHIFT 15
+#define CS42L43_DECIM1_VOL_MASK 0x00003FC0
+#define CS42L43_DECIM1_VOL_SHIFT 6
+#define CS42L43_DECIM1_VD_RAMP_MASK 0x00000038
+#define CS42L43_DECIM1_VD_RAMP_SHIFT 3
+#define CS42L43_DECIM1_VI_RAMP_MASK 0x00000007
+#define CS42L43_DECIM1_VI_RAMP_SHIFT 0
+
+/* CS42L43_DECIM_VOL_CTRL_CH3_CH4 */
+#define CS42L43_DECIM4_MUTE_MASK 0x80000000
+#define CS42L43_DECIM4_MUTE_SHIFT 31
+#define CS42L43_DECIM4_VOL_MASK 0x3FC00000
+#define CS42L43_DECIM4_VOL_SHIFT 22
+#define CS42L43_DECIM4_VD_RAMP_MASK 0x00380000
+#define CS42L43_DECIM4_VD_RAMP_SHIFT 19
+#define CS42L43_DECIM4_VI_RAMP_MASK 0x00070000
+#define CS42L43_DECIM4_VI_RAMP_SHIFT 16
+#define CS42L43_DECIM3_MUTE_MASK 0x00008000
+#define CS42L43_DECIM3_MUTE_SHIFT 15
+#define CS42L43_DECIM3_VOL_MASK 0x00003FC0
+#define CS42L43_DECIM3_VOL_SHIFT 6
+#define CS42L43_DECIM3_VD_RAMP_MASK 0x00000038
+#define CS42L43_DECIM3_VD_RAMP_SHIFT 3
+#define CS42L43_DECIM3_VI_RAMP_MASK 0x00000007
+#define CS42L43_DECIM3_VI_RAMP_SHIFT 0
+
+/* CS42L43_DECIM_VOL_CTRL_UPDATE */
+#define CS42L43_DECIM4_VOL_UPDATE_MASK 0x00000008
+#define CS42L43_DECIM4_VOL_UPDATE_SHIFT 3
+#define CS42L43_DECIM3_VOL_UPDATE_MASK 0x00000004
+#define CS42L43_DECIM3_VOL_UPDATE_SHIFT 2
+#define CS42L43_DECIM2_VOL_UPDATE_MASK 0x00000002
+#define CS42L43_DECIM2_VOL_UPDATE_SHIFT 1
+#define CS42L43_DECIM1_VOL_UPDATE_MASK 0x00000001
+#define CS42L43_DECIM1_VOL_UPDATE_SHIFT 0
+
+/* CS42L43_INTP_VOLUME_CTRL1..CS42L43_INTP_VOLUME_CTRL2 */
+#define CS42L43_AMP1_2_VU_MASK 0x00000200
+#define CS42L43_AMP1_2_VU_SHIFT 9
+#define CS42L43_AMP_MUTE_MASK 0x00000100
+#define CS42L43_AMP_MUTE_SHIFT 8
+#define CS42L43_AMP_VOL_MASK 0x000000FF
+#define CS42L43_AMP_VOL_SHIFT 0
+
+/* CS42L43_AMP1_2_VOL_RAMP */
+#define CS42L43_AMP1_2_VD_RAMP_MASK 0x00000070
+#define CS42L43_AMP1_2_VD_RAMP_SHIFT 4
+#define CS42L43_AMP1_2_VI_RAMP_MASK 0x00000007
+#define CS42L43_AMP1_2_VI_RAMP_SHIFT 0
+
+/* CS42L43_ASP_CTRL */
+#define CS42L43_ASP_FSYNC_MODE_MASK 0x00000004
+#define CS42L43_ASP_FSYNC_MODE_SHIFT 2
+#define CS42L43_ASP_BCLK_EN_MASK 0x00000002
+#define CS42L43_ASP_BCLK_EN_SHIFT 1
+#define CS42L43_ASP_FSYNC_EN_MASK 0x00000001
+#define CS42L43_ASP_FSYNC_EN_SHIFT 0
+
+/* CS42L43_ASP_FSYNC_CTRL1 */
+#define CS42L43_ASP_FSYNC_M_MASK 0x0007FFFF
+#define CS42L43_ASP_FSYNC_M_SHIFT 0
+
+/* CS42L43_ASP_FSYNC_CTRL3 */
+#define CS42L43_ASP_FSYNC_IN_INV_MASK 0x00000002
+#define CS42L43_ASP_FSYNC_IN_INV_SHIFT 1
+#define CS42L43_ASP_FSYNC_OUT_INV_MASK 0x00000001
+#define CS42L43_ASP_FSYNC_OUT_INV_SHIFT 0
+
+/* CS42L43_ASP_FSYNC_CTRL4 */
+#define CS42L43_ASP_NUM_BCLKS_PER_FSYNC_MASK 0x00001FFE
+#define CS42L43_ASP_NUM_BCLKS_PER_FSYNC_SHIFT 1
+
+/* CS42L43_ASP_DATA_CTRL */
+#define CS42L43_ASP_FSYNC_FRAME_START_PHASE_MASK 0x00000008
+#define CS42L43_ASP_FSYNC_FRAME_START_PHASE_SHIFT 3
+#define CS42L43_ASP_FSYNC_FRAME_START_DLY_MASK 0x00000007
+#define CS42L43_ASP_FSYNC_FRAME_START_DLY_SHIFT 0
+
+/* CS42L43_ASP_RX_EN */
+#define CS42L43_ASP_RX_CH6_EN_MASK 0x00000020
+#define CS42L43_ASP_RX_CH6_EN_SHIFT 5
+#define CS42L43_ASP_RX_CH5_EN_MASK 0x00000010
+#define CS42L43_ASP_RX_CH5_EN_SHIFT 4
+#define CS42L43_ASP_RX_CH4_EN_MASK 0x00000008
+#define CS42L43_ASP_RX_CH4_EN_SHIFT 3
+#define CS42L43_ASP_RX_CH3_EN_MASK 0x00000004
+#define CS42L43_ASP_RX_CH3_EN_SHIFT 2
+#define CS42L43_ASP_RX_CH2_EN_MASK 0x00000002
+#define CS42L43_ASP_RX_CH2_EN_SHIFT 1
+#define CS42L43_ASP_RX_CH1_EN_MASK 0x00000001
+#define CS42L43_ASP_RX_CH1_EN_SHIFT 0
+
+/* CS42L43_ASP_TX_EN */
+#define CS42L43_ASP_TX_CH6_EN_MASK 0x00000020
+#define CS42L43_ASP_TX_CH6_EN_SHIFT 5
+#define CS42L43_ASP_TX_CH5_EN_MASK 0x00000010
+#define CS42L43_ASP_TX_CH5_EN_SHIFT 4
+#define CS42L43_ASP_TX_CH4_EN_MASK 0x00000008
+#define CS42L43_ASP_TX_CH4_EN_SHIFT 3
+#define CS42L43_ASP_TX_CH3_EN_MASK 0x00000004
+#define CS42L43_ASP_TX_CH3_EN_SHIFT 2
+#define CS42L43_ASP_TX_CH2_EN_MASK 0x00000002
+#define CS42L43_ASP_TX_CH2_EN_SHIFT 1
+#define CS42L43_ASP_TX_CH1_EN_MASK 0x00000001
+#define CS42L43_ASP_TX_CH1_EN_SHIFT 0
+
+/* CS42L43_ASP_RX_CH1_CTRL..CS42L43_ASP_TX_CH6_CTRL */
+#define CS42L43_ASP_CH_WIDTH_MASK 0x001F0000
+#define CS42L43_ASP_CH_WIDTH_SHIFT 16
+#define CS42L43_ASP_CH_SLOT_MASK 0x00001FFE
+#define CS42L43_ASP_CH_SLOT_SHIFT 1
+#define CS42L43_ASP_CH_SLOT_PHASE_MASK 0x00000001
+#define CS42L43_ASP_CH_SLOT_PHASE_SHIFT 0
+
+/* CS42L43_ASPTX1_INPUT..CS42L43_AMP4MIX_INPUT4 */
+#define CS42L43_MIXER_VOL_MASK 0x00FE0000
+#define CS42L43_MIXER_VOL_SHIFT 17
+#define CS42L43_MIXER_SRC_MASK 0x000001FF
+#define CS42L43_MIXER_SRC_SHIFT 0
+
+/* CS42L43_ASRC_INT_ENABLES */
+#define CS42L43_ASRC_INT4_EN_MASK 0x00000008
+#define CS42L43_ASRC_INT4_EN_SHIFT 3
+#define CS42L43_ASRC_INT3_EN_MASK 0x00000004
+#define CS42L43_ASRC_INT3_EN_SHIFT 2
+#define CS42L43_ASRC_INT2_EN_MASK 0x00000002
+#define CS42L43_ASRC_INT2_EN_SHIFT 1
+#define CS42L43_ASRC_INT1_EN_MASK 0x00000001
+#define CS42L43_ASRC_INT1_EN_SHIFT 0
+
+/* CS42L43_ASRC_DEC_ENABLES */
+#define CS42L43_ASRC_DEC4_EN_MASK 0x00000008
+#define CS42L43_ASRC_DEC4_EN_SHIFT 3
+#define CS42L43_ASRC_DEC3_EN_MASK 0x00000004
+#define CS42L43_ASRC_DEC3_EN_SHIFT 2
+#define CS42L43_ASRC_DEC2_EN_MASK 0x00000002
+#define CS42L43_ASRC_DEC2_EN_SHIFT 1
+#define CS42L43_ASRC_DEC1_EN_MASK 0x00000001
+#define CS42L43_ASRC_DEC1_EN_SHIFT 0
+
+/* CS42L43_PDNCNTL */
+#define CS42L43_RING_SENSE_EN_MASK 0x00000002
+#define CS42L43_RING_SENSE_EN_SHIFT 1
+
+/* CS42L43_RINGSENSE_DEB_CTRL */
+#define CS42L43_RINGSENSE_INV_MASK 0x00000080
+#define CS42L43_RINGSENSE_INV_SHIFT 7
+#define CS42L43_RINGSENSE_PULLUP_PDNB_MASK 0x00000040
+#define CS42L43_RINGSENSE_PULLUP_PDNB_SHIFT 6
+#define CS42L43_RINGSENSE_FALLING_DB_TIME_MASK 0x00000038
+#define CS42L43_RINGSENSE_FALLING_DB_TIME_SHIFT 3
+#define CS42L43_RINGSENSE_RISING_DB_TIME_MASK 0x00000007
+#define CS42L43_RINGSENSE_RISING_DB_TIME_SHIFT 0
+
+/* CS42L43_TIPSENSE_DEB_CTRL */
+#define CS42L43_TIPSENSE_INV_MASK 0x00000080
+#define CS42L43_TIPSENSE_INV_SHIFT 7
+#define CS42L43_TIPSENSE_FALLING_DB_TIME_MASK 0x00000038
+#define CS42L43_TIPSENSE_FALLING_DB_TIME_SHIFT 3
+#define CS42L43_TIPSENSE_RISING_DB_TIME_MASK 0x00000007
+#define CS42L43_TIPSENSE_RISING_DB_TIME_SHIFT 0
+
+/* CS42L43_TIP_RING_SENSE_INTERRUPT_STATUS */
+#define CS42L43_TIPSENSE_UNPLUG_DB_STS_MASK 0x00000008
+#define CS42L43_TIPSENSE_UNPLUG_DB_STS_SHIFT 3
+#define CS42L43_TIPSENSE_PLUG_DB_STS_MASK 0x00000004
+#define CS42L43_TIPSENSE_PLUG_DB_STS_SHIFT 2
+#define CS42L43_RINGSENSE_UNPLUG_DB_STS_MASK 0x00000002
+#define CS42L43_RINGSENSE_UNPLUG_DB_STS_SHIFT 1
+#define CS42L43_RINGSENSE_PLUG_DB_STS_MASK 0x00000001
+#define CS42L43_RINGSENSE_PLUG_DB_STS_SHIFT 0
+
+/* CS42L43_HS2 */
+#define CS42L43_HS_CLAMP_DISABLE_MASK 0x10000000
+#define CS42L43_HS_CLAMP_DISABLE_SHIFT 28
+#define CS42L43_HSBIAS_RAMP_MASK 0x0C000000
+#define CS42L43_HSBIAS_RAMP_SHIFT 26
+#define CS42L43_HSDET_MODE_MASK 0x00018000
+#define CS42L43_HSDET_MODE_SHIFT 15
+#define CS42L43_HSDET_MANUAL_MODE_MASK 0x00006000
+#define CS42L43_HSDET_MANUAL_MODE_SHIFT 13
+#define CS42L43_AUTO_HSDET_TIME_MASK 0x00000700
+#define CS42L43_AUTO_HSDET_TIME_SHIFT 8
+#define CS42L43_AMP3_4_GNDREF_HS3_SEL_MASK 0x00000080
+#define CS42L43_AMP3_4_GNDREF_HS3_SEL_SHIFT 7
+#define CS42L43_AMP3_4_GNDREF_HS4_SEL_MASK 0x00000040
+#define CS42L43_AMP3_4_GNDREF_HS4_SEL_SHIFT 6
+#define CS42L43_HSBIAS_GNDREF_HS3_SEL_MASK 0x00000020
+#define CS42L43_HSBIAS_GNDREF_HS3_SEL_SHIFT 5
+#define CS42L43_HSBIAS_GNDREF_HS4_SEL_MASK 0x00000010
+#define CS42L43_HSBIAS_GNDREF_HS4_SEL_SHIFT 4
+#define CS42L43_HSBIAS_OUT_HS3_SEL_MASK 0x00000008
+#define CS42L43_HSBIAS_OUT_HS3_SEL_SHIFT 3
+#define CS42L43_HSBIAS_OUT_HS4_SEL_MASK 0x00000004
+#define CS42L43_HSBIAS_OUT_HS4_SEL_SHIFT 2
+#define CS42L43_HSGND_HS3_SEL_MASK 0x00000002
+#define CS42L43_HSGND_HS3_SEL_SHIFT 1
+#define CS42L43_HSGND_HS4_SEL_MASK 0x00000001
+#define CS42L43_HSGND_HS4_SEL_SHIFT 0
+
+/* CS42L43_HS_STAT */
+#define CS42L43_HSDET_TYPE_STS_MASK 0x00000007
+#define CS42L43_HSDET_TYPE_STS_SHIFT 0
+
+/* CS42L43_MCU_SW_INTERRUPT */
+#define CS42L43_CONTROL_IND_MASK 0x00000004
+#define CS42L43_CONTROL_IND_SHIFT 2
+#define CS42L43_CONFIGS_IND_MASK 0x00000002
+#define CS42L43_CONFIGS_IND_SHIFT 1
+#define CS42L43_PATCH_IND_MASK 0x00000001
+#define CS42L43_PATCH_IND_SHIFT 0
+
+/* CS42L43_STEREO_MIC_CTRL */
+#define CS42L43_HS2_BIAS_SENSE_EN_MASK 0x00000020
+#define CS42L43_HS2_BIAS_SENSE_EN_SHIFT 5
+#define CS42L43_HS1_BIAS_SENSE_EN_MASK 0x00000010
+#define CS42L43_HS1_BIAS_SENSE_EN_SHIFT 4
+#define CS42L43_HS2_BIAS_EN_MASK 0x00000008
+#define CS42L43_HS2_BIAS_EN_SHIFT 3
+#define CS42L43_HS1_BIAS_EN_MASK 0x00000004
+#define CS42L43_HS1_BIAS_EN_SHIFT 2
+#define CS42L43_JACK_STEREO_CONFIG_MASK 0x00000003
+#define CS42L43_JACK_STEREO_CONFIG_SHIFT 0
+
+/* CS42L43_STEREO_MIC_CLAMP_CTRL */
+#define CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_VAL_MASK 0x00000002
+#define CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_VAL_SHIFT 1
+#define CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_MASK 0x00000001
+#define CS42L43_SMIC_HPAMP_CLAMP_DIS_FRC_SHIFT 0
+
+/* CS42L43_BLOCK_EN2 */
+#define CS42L43_SPI_MSTR_EN_MASK 0x00000001
+#define CS42L43_SPI_MSTR_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN3 */
+#define CS42L43_PDM2_DIN_R_EN_MASK 0x00000020
+#define CS42L43_PDM2_DIN_R_EN_SHIFT 5
+#define CS42L43_PDM2_DIN_L_EN_MASK 0x00000010
+#define CS42L43_PDM2_DIN_L_EN_SHIFT 4
+#define CS42L43_PDM1_DIN_R_EN_MASK 0x00000008
+#define CS42L43_PDM1_DIN_R_EN_SHIFT 3
+#define CS42L43_PDM1_DIN_L_EN_MASK 0x00000004
+#define CS42L43_PDM1_DIN_L_EN_SHIFT 2
+#define CS42L43_ADC2_EN_MASK 0x00000002
+#define CS42L43_ADC2_EN_SHIFT 1
+#define CS42L43_ADC1_EN_MASK 0x00000001
+#define CS42L43_ADC1_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN4 */
+#define CS42L43_ASRC_DEC_BANK_EN_MASK 0x00000002
+#define CS42L43_ASRC_DEC_BANK_EN_SHIFT 1
+#define CS42L43_ASRC_INT_BANK_EN_MASK 0x00000001
+#define CS42L43_ASRC_INT_BANK_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN5 */
+#define CS42L43_ISRC2_BANK_EN_MASK 0x00000002
+#define CS42L43_ISRC2_BANK_EN_SHIFT 1
+#define CS42L43_ISRC1_BANK_EN_MASK 0x00000001
+#define CS42L43_ISRC1_BANK_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN6 */
+#define CS42L43_MIXER_EN_MASK 0x00000001
+#define CS42L43_MIXER_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN7 */
+#define CS42L43_EQ_EN_MASK 0x00000001
+#define CS42L43_EQ_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN8 */
+#define CS42L43_HP_EN_MASK 0x00000001
+#define CS42L43_HP_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN9 */
+#define CS42L43_TONE_EN_MASK 0x00000001
+#define CS42L43_TONE_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN10 */
+#define CS42L43_AMP2_EN_MASK 0x00000002
+#define CS42L43_AMP2_EN_SHIFT 1
+#define CS42L43_AMP1_EN_MASK 0x00000001
+#define CS42L43_AMP1_EN_SHIFT 0
+
+/* CS42L43_BLOCK_EN11 */
+#define CS42L43_SPDIF_EN_MASK 0x00000001
+#define CS42L43_SPDIF_EN_SHIFT 0
+
+/* CS42L43_TONE_CH1_CTRL..CS42L43_TONE_CH2_CTRL */
+#define CS42L43_TONE_FREQ_MASK 0x00000070
+#define CS42L43_TONE_FREQ_SHIFT 4
+#define CS42L43_TONE_SEL_MASK 0x0000000F
+#define CS42L43_TONE_SEL_SHIFT 0
+
+/* CS42L43_MIC_DETECT_CONTROL_1 */
+#define CS42L43_BUTTON_DETECT_MODE_MASK 0x00000018
+#define CS42L43_BUTTON_DETECT_MODE_SHIFT 3
+#define CS42L43_HSBIAS_MODE_MASK 0x00000006
+#define CS42L43_HSBIAS_MODE_SHIFT 1
+#define CS42L43_MIC_LVL_DET_DISABLE_MASK 0x00000001
+#define CS42L43_MIC_LVL_DET_DISABLE_SHIFT 0
+
+/* CS42L43_DETECT_STATUS_1 */
+#define CS42L43_HSDET_DC_STS_MASK 0x01FF0000
+#define CS42L43_HSDET_DC_STS_SHIFT 16
+#define CS42L43_JACKDET_STS_MASK 0x00000080
+#define CS42L43_JACKDET_STS_SHIFT 7
+#define CS42L43_HSBIAS_CLAMP_STS_MASK 0x00000040
+#define CS42L43_HSBIAS_CLAMP_STS_SHIFT 6
+
+/* CS42L43_HS_BIAS_SENSE_AND_CLAMP_AUTOCONTROL */
+#define CS42L43_JACKDET_MODE_MASK 0xC0000000
+#define CS42L43_JACKDET_MODE_SHIFT 30
+#define CS42L43_JACKDET_INV_MASK 0x20000000
+#define CS42L43_JACKDET_INV_SHIFT 29
+#define CS42L43_JACKDET_DB_TIME_MASK 0x03000000
+#define CS42L43_JACKDET_DB_TIME_SHIFT 24
+#define CS42L43_S0_AUTO_ADCMUTE_DISABLE_MASK 0x00800000
+#define CS42L43_S0_AUTO_ADCMUTE_DISABLE_SHIFT 23
+#define CS42L43_HSBIAS_SENSE_EN_MASK 0x00000080
+#define CS42L43_HSBIAS_SENSE_EN_SHIFT 7
+#define CS42L43_AUTO_HSBIAS_CLAMP_EN_MASK 0x00000040
+#define CS42L43_AUTO_HSBIAS_CLAMP_EN_SHIFT 6
+#define CS42L43_JACKDET_SENSE_EN_MASK 0x00000020
+#define CS42L43_JACKDET_SENSE_EN_SHIFT 5
+#define CS42L43_HSBIAS_SENSE_TRIP_MASK 0x00000007
+#define CS42L43_HSBIAS_SENSE_TRIP_SHIFT 0
+
+/* CS42L43_MIC_DETECT_CONTROL_ANDROID */
+#define CS42L43_HSDET_LVL_COMBWIDTH_MASK 0xC0000000
+#define CS42L43_HSDET_LVL_COMBWIDTH_SHIFT 30
+#define CS42L43_HSDET_LVL2_THRESH_MASK 0x01FF0000
+#define CS42L43_HSDET_LVL2_THRESH_SHIFT 16
+#define CS42L43_HSDET_LVL1_THRESH_MASK 0x000001FF
+#define CS42L43_HSDET_LVL1_THRESH_SHIFT 0
+
+/* CS42L43_ISRC1_CTRL..CS42L43_ISRC2_CTRL */
+#define CS42L43_ISRC_INT2_EN_MASK 0x00000200
+#define CS42L43_ISRC_INT2_EN_SHIFT 9
+#define CS42L43_ISRC_INT1_EN_MASK 0x00000100
+#define CS42L43_ISRC_INT1_EN_SHIFT 8
+#define CS42L43_ISRC_DEC2_EN_MASK 0x00000002
+#define CS42L43_ISRC_DEC2_EN_SHIFT 1
+#define CS42L43_ISRC_DEC1_EN_MASK 0x00000001
+#define CS42L43_ISRC_DEC1_EN_SHIFT 0
+
+/* CS42L43_CTRL_REG */
+#define CS42L43_PLL_MODE_BYPASS_500_MASK 0x00000004
+#define CS42L43_PLL_MODE_BYPASS_500_SHIFT 2
+#define CS42L43_PLL_MODE_BYPASS_1029_MASK 0x00000002
+#define CS42L43_PLL_MODE_BYPASS_1029_SHIFT 1
+#define CS42L43_PLL_EN_MASK 0x00000001
+#define CS42L43_PLL_EN_SHIFT 0
+
+/* CS42L43_FDIV_FRAC */
+#define CS42L43_PLL_DIV_INT_MASK 0xFF000000
+#define CS42L43_PLL_DIV_INT_SHIFT 24
+#define CS42L43_PLL_DIV_FRAC_BYTE2_MASK 0x00FF0000
+#define CS42L43_PLL_DIV_FRAC_BYTE2_SHIFT 16
+#define CS42L43_PLL_DIV_FRAC_BYTE1_MASK 0x0000FF00
+#define CS42L43_PLL_DIV_FRAC_BYTE1_SHIFT 8
+#define CS42L43_PLL_DIV_FRAC_BYTE0_MASK 0x000000FF
+#define CS42L43_PLL_DIV_FRAC_BYTE0_SHIFT 0
+
+/* CS42L43_CAL_RATIO */
+#define CS42L43_PLL_CAL_RATIO_MASK 0x000000FF
+#define CS42L43_PLL_CAL_RATIO_SHIFT 0
+
+/* CS42L43_SPI_CLK_CONFIG1 */
+#define CS42L43_SCLK_DIV_MASK 0x0000000F
+#define CS42L43_SCLK_DIV_SHIFT 0
+
+/* CS42L43_SPI_CONFIG1 */
+#define CS42L43_SPI_SS_IDLE_DUR_MASK 0x0F000000
+#define CS42L43_SPI_SS_IDLE_DUR_SHIFT 24
+#define CS42L43_SPI_SS_DELAY_DUR_MASK 0x000F0000
+#define CS42L43_SPI_SS_DELAY_DUR_SHIFT 16
+#define CS42L43_SPI_THREE_WIRE_MASK 0x00000100
+#define CS42L43_SPI_THREE_WIRE_SHIFT 8
+#define CS42L43_SPI_DPHA_MASK 0x00000040
+#define CS42L43_SPI_DPHA_SHIFT 6
+#define CS42L43_SPI_CPHA_MASK 0x00000020
+#define CS42L43_SPI_CPHA_SHIFT 5
+#define CS42L43_SPI_CPOL_MASK 0x00000010
+#define CS42L43_SPI_CPOL_SHIFT 4
+#define CS42L43_SPI_SS_SEL_MASK 0x00000007
+#define CS42L43_SPI_SS_SEL_SHIFT 0
+
+/* CS42L43_SPI_CONFIG2 */
+#define CS42L43_SPI_SS_FRC_MASK 0x00000001
+#define CS42L43_SPI_SS_FRC_SHIFT 0
+
+/* CS42L43_SPI_CONFIG3 */
+#define CS42L43_SPI_WDT_ENA_MASK 0x00000001
+#define CS42L43_SPI_WDT_ENA_SHIFT 0
+
+/* CS42L43_SPI_CONFIG4 */
+#define CS42L43_SPI_STALL_ENA_MASK 0x00010000
+#define CS42L43_SPI_STALL_ENA_SHIFT 16
+
+/* CS42L43_SPI_STATUS1 */
+#define CS42L43_SPI_ABORT_STS_MASK 0x00000002
+#define CS42L43_SPI_ABORT_STS_SHIFT 1
+#define CS42L43_SPI_DONE_STS_MASK 0x00000001
+#define CS42L43_SPI_DONE_STS_SHIFT 0
+
+/* CS42L43_SPI_STATUS2 */
+#define CS42L43_SPI_RX_DONE_STS_MASK 0x00000010
+#define CS42L43_SPI_RX_DONE_STS_SHIFT 4
+#define CS42L43_SPI_TX_DONE_STS_MASK 0x00000001
+#define CS42L43_SPI_TX_DONE_STS_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG1 */
+#define CS42L43_SPI_START_MASK 0x00000001
+#define CS42L43_SPI_START_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG2 */
+#define CS42L43_SPI_ABORT_MASK 0x00000001
+#define CS42L43_SPI_ABORT_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG3 */
+#define CS42L43_SPI_WORD_SIZE_MASK 0x00070000
+#define CS42L43_SPI_WORD_SIZE_SHIFT 16
+#define CS42L43_SPI_CMD_MASK 0x00000003
+#define CS42L43_SPI_CMD_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG4 */
+#define CS42L43_SPI_TX_LENGTH_MASK 0x0000FFFF
+#define CS42L43_SPI_TX_LENGTH_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG5 */
+#define CS42L43_SPI_RX_LENGTH_MASK 0x0000FFFF
+#define CS42L43_SPI_RX_LENGTH_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG6 */
+#define CS42L43_SPI_TX_BLOCK_LENGTH_MASK 0x0000000F
+#define CS42L43_SPI_TX_BLOCK_LENGTH_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG7 */
+#define CS42L43_SPI_RX_BLOCK_LENGTH_MASK 0x0000000F
+#define CS42L43_SPI_RX_BLOCK_LENGTH_SHIFT 0
+
+/* CS42L43_TRAN_CONFIG8 */
+#define CS42L43_SPI_RX_DONE_MASK 0x00000010
+#define CS42L43_SPI_RX_DONE_SHIFT 4
+#define CS42L43_SPI_TX_DONE_MASK 0x00000001
+#define CS42L43_SPI_TX_DONE_SHIFT 0
+
+/* CS42L43_TRAN_STATUS1 */
+#define CS42L43_SPI_BUSY_STS_MASK 0x00000100
+#define CS42L43_SPI_BUSY_STS_SHIFT 8
+#define CS42L43_SPI_RX_REQUEST_MASK 0x00000010
+#define CS42L43_SPI_RX_REQUEST_SHIFT 4
+#define CS42L43_SPI_TX_REQUEST_MASK 0x00000001
+#define CS42L43_SPI_TX_REQUEST_SHIFT 0
+
+/* CS42L43_TRAN_STATUS2 */
+#define CS42L43_SPI_TX_BYTE_COUNT_MASK 0x0000FFFF
+#define CS42L43_SPI_TX_BYTE_COUNT_SHIFT 0
+
+/* CS42L43_TRAN_STATUS3 */
+#define CS42L43_SPI_RX_BYTE_COUNT_MASK 0x0000FFFF
+#define CS42L43_SPI_RX_BYTE_COUNT_SHIFT 0
+
+/* CS42L43_TX_DATA */
+#define CS42L43_SPI_TX_DATA_MASK 0xFFFFFFFF
+#define CS42L43_SPI_TX_DATA_SHIFT 0
+
+/* CS42L43_RX_DATA */
+#define CS42L43_SPI_RX_DATA_MASK 0xFFFFFFFF
+#define CS42L43_SPI_RX_DATA_SHIFT 0
+
+/* CS42L43_DACCNFG1 */
+#define CS42L43_HP_MSTR_VOL_CTRL_EN_MASK 0x00000008
+#define CS42L43_HP_MSTR_VOL_CTRL_EN_SHIFT 3
+#define CS42L43_AMP4_INV_MASK 0x00000002
+#define CS42L43_AMP4_INV_SHIFT 1
+#define CS42L43_AMP3_INV_MASK 0x00000001
+#define CS42L43_AMP3_INV_SHIFT 0
+
+/* CS42L43_DACCNFG2 */
+#define CS42L43_HP_AUTO_CLAMP_DISABLE_MASK 0x00000002
+#define CS42L43_HP_AUTO_CLAMP_DISABLE_SHIFT 1
+#define CS42L43_HP_HPF_EN_MASK 0x00000001
+#define CS42L43_HP_HPF_EN_SHIFT 0
+
+/* CS42L43_HPPATHVOL */
+#define CS42L43_AMP4_PATH_VOL_MASK 0x01FF0000
+#define CS42L43_AMP4_PATH_VOL_SHIFT 16
+#define CS42L43_AMP3_PATH_VOL_MASK 0x000001FF
+#define CS42L43_AMP3_PATH_VOL_SHIFT 0
+
+/* CS42L43_PGAVOL */
+#define CS42L43_HP_PATH_VOL_RAMP_MASK 0x0003C000
+#define CS42L43_HP_PATH_VOL_RAMP_SHIFT 14
+#define CS42L43_HP_PATH_VOL_ZC_MASK 0x00002000
+#define CS42L43_HP_PATH_VOL_ZC_SHIFT 13
+#define CS42L43_HP_PATH_VOL_SFT_MASK 0x00001000
+#define CS42L43_HP_PATH_VOL_SFT_SHIFT 12
+#define CS42L43_HP_DIG_VOL_RAMP_MASK 0x00000F00
+#define CS42L43_HP_DIG_VOL_RAMP_SHIFT 8
+#define CS42L43_HP_ANA_VOL_RAMP_MASK 0x0000000F
+#define CS42L43_HP_ANA_VOL_RAMP_SHIFT 0
+
+/* CS42L43_LOADDETRESULTS */
+#define CS42L43_AMP3_RES_DET_MASK 0x00000003
+#define CS42L43_AMP3_RES_DET_SHIFT 0
+
+/* CS42L43_LOADDETENA */
+#define CS42L43_HPLOAD_DET_EN_MASK 0x00000001
+#define CS42L43_HPLOAD_DET_EN_SHIFT 0
+
+/* CS42L43_CTRL */
+#define CS42L43_ADPTPWR_MODE_MASK 0x00000007
+#define CS42L43_ADPTPWR_MODE_SHIFT 0
+
+/* CS42L43_COEFF_RD_WR0 */
+#define CS42L43_WRITE_MODE_MASK 0x00000002
+#define CS42L43_WRITE_MODE_SHIFT 1
+
+/* CS42L43_INIT_DONE0 */
+#define CS42L43_INITIALIZE_DONE_MASK 0x00000001
+#define CS42L43_INITIALIZE_DONE_SHIFT 0
+
+/* CS42L43_START_EQZ0 */
+#define CS42L43_START_FILTER_MASK 0x00000001
+#define CS42L43_START_FILTER_SHIFT 0
+
+/* CS42L43_MUTE_EQ_IN0 */
+#define CS42L43_MUTE_EQ_CH2_MASK 0x00000002
+#define CS42L43_MUTE_EQ_CH2_SHIFT 1
+#define CS42L43_MUTE_EQ_CH1_MASK 0x00000001
+#define CS42L43_MUTE_EQ_CH1_SHIFT 0
+
+/* CS42L43_PLL_INT */
+#define CS42L43_PLL_LOST_LOCK_INT_MASK 0x00000002
+#define CS42L43_PLL_LOST_LOCK_INT_SHIFT 1
+#define CS42L43_PLL_READY_INT_MASK 0x00000001
+#define CS42L43_PLL_READY_INT_SHIFT 0
+
+/* CS42L43_SOFT_INT */
+#define CS42L43_CONTROL_APPLIED_INT_MASK 0x00000010
+#define CS42L43_CONTROL_APPLIED_INT_SHIFT 4
+#define CS42L43_CONTROL_WARN_INT_MASK 0x00000008
+#define CS42L43_CONTROL_WARN_INT_SHIFT 3
+#define CS42L43_PATCH_WARN_INT_MASK 0x00000002
+#define CS42L43_PATCH_WARN_INT_SHIFT 1
+#define CS42L43_PATCH_APPLIED_INT_MASK 0x00000001
+#define CS42L43_PATCH_APPLIED_INT_SHIFT 0
+
+/* CS42L43_MSM_INT */
+#define CS42L43_HP_STARTUP_DONE_INT_MASK 0x00000800
+#define CS42L43_HP_STARTUP_DONE_INT_SHIFT 11
+#define CS42L43_HP_SHUTDOWN_DONE_INT_MASK 0x00000400
+#define CS42L43_HP_SHUTDOWN_DONE_INT_SHIFT 10
+#define CS42L43_HSDET_DONE_INT_MASK 0x00000200
+#define CS42L43_HSDET_DONE_INT_SHIFT 9
+#define CS42L43_TIPSENSE_UNPLUG_DB_INT_MASK 0x00000080
+#define CS42L43_TIPSENSE_UNPLUG_DB_INT_SHIFT 7
+#define CS42L43_TIPSENSE_PLUG_DB_INT_MASK 0x00000040
+#define CS42L43_TIPSENSE_PLUG_DB_INT_SHIFT 6
+#define CS42L43_RINGSENSE_UNPLUG_DB_INT_MASK 0x00000020
+#define CS42L43_RINGSENSE_UNPLUG_DB_INT_SHIFT 5
+#define CS42L43_RINGSENSE_PLUG_DB_INT_MASK 0x00000010
+#define CS42L43_RINGSENSE_PLUG_DB_INT_SHIFT 4
+#define CS42L43_TIPSENSE_UNPLUG_PDET_INT_MASK 0x00000008
+#define CS42L43_TIPSENSE_UNPLUG_PDET_INT_SHIFT 3
+#define CS42L43_TIPSENSE_PLUG_PDET_INT_MASK 0x00000004
+#define CS42L43_TIPSENSE_PLUG_PDET_INT_SHIFT 2
+#define CS42L43_RINGSENSE_UNPLUG_PDET_INT_MASK 0x00000002
+#define CS42L43_RINGSENSE_UNPLUG_PDET_INT_SHIFT 1
+#define CS42L43_RINGSENSE_PLUG_PDET_INT_MASK 0x00000001
+#define CS42L43_RINGSENSE_PLUG_PDET_INT_SHIFT 0
+
+/* CS42L43_ACC_DET_INT */
+#define CS42L43_HS2_BIAS_SENSE_INT_MASK 0x00000800
+#define CS42L43_HS2_BIAS_SENSE_INT_SHIFT 11
+#define CS42L43_HS1_BIAS_SENSE_INT_MASK 0x00000400
+#define CS42L43_HS1_BIAS_SENSE_INT_SHIFT 10
+#define CS42L43_DC_DETECT1_FALSE_INT_MASK 0x00000080
+#define CS42L43_DC_DETECT1_FALSE_INT_SHIFT 7
+#define CS42L43_DC_DETECT1_TRUE_INT_MASK 0x00000040
+#define CS42L43_DC_DETECT1_TRUE_INT_SHIFT 6
+#define CS42L43_HSBIAS_CLAMPED_INT_MASK 0x00000008
+#define CS42L43_HSBIAS_CLAMPED_INT_SHIFT 3
+#define CS42L43_HS3_4_BIAS_SENSE_INT_MASK 0x00000001
+#define CS42L43_HS3_4_BIAS_SENSE_INT_SHIFT 0
+
+/* CS42L43_SPI_MSTR_INT */
+#define CS42L43_IRQ_SPI_STALLING_INT_MASK 0x00000004
+#define CS42L43_IRQ_SPI_STALLING_INT_SHIFT 2
+#define CS42L43_IRQ_SPI_STS_INT_MASK 0x00000002
+#define CS42L43_IRQ_SPI_STS_INT_SHIFT 1
+#define CS42L43_IRQ_SPI_BLOCK_INT_MASK 0x00000001
+#define CS42L43_IRQ_SPI_BLOCK_INT_SHIFT 0
+
+/* CS42L43_SW_TO_SPI_BRIDGE_INT */
+#define CS42L43_SW2SPI_BUF_OVF_UDF_INT_MASK 0x00000001
+#define CS42L43_SW2SPI_BUF_OVF_UDF_INT_SHIFT 0
+
+/* CS42L43_CLASS_D_AMP_INT */
+#define CS42L43_AMP2_CLK_STOP_FAULT_INT_MASK 0x00002000
+#define CS42L43_AMP2_CLK_STOP_FAULT_INT_SHIFT 13
+#define CS42L43_AMP1_CLK_STOP_FAULT_INT_MASK 0x00001000
+#define CS42L43_AMP1_CLK_STOP_FAULT_INT_SHIFT 12
+#define CS42L43_AMP2_VDDSPK_FAULT_INT_MASK 0x00000800
+#define CS42L43_AMP2_VDDSPK_FAULT_INT_SHIFT 11
+#define CS42L43_AMP1_VDDSPK_FAULT_INT_MASK 0x00000400
+#define CS42L43_AMP1_VDDSPK_FAULT_INT_SHIFT 10
+#define CS42L43_AMP2_SHUTDOWN_DONE_INT_MASK 0x00000200
+#define CS42L43_AMP2_SHUTDOWN_DONE_INT_SHIFT 9
+#define CS42L43_AMP1_SHUTDOWN_DONE_INT_MASK 0x00000100
+#define CS42L43_AMP1_SHUTDOWN_DONE_INT_SHIFT 8
+#define CS42L43_AMP2_STARTUP_DONE_INT_MASK 0x00000080
+#define CS42L43_AMP2_STARTUP_DONE_INT_SHIFT 7
+#define CS42L43_AMP1_STARTUP_DONE_INT_MASK 0x00000040
+#define CS42L43_AMP1_STARTUP_DONE_INT_SHIFT 6
+#define CS42L43_AMP2_THERM_SHDN_INT_MASK 0x00000020
+#define CS42L43_AMP2_THERM_SHDN_INT_SHIFT 5
+#define CS42L43_AMP1_THERM_SHDN_INT_MASK 0x00000010
+#define CS42L43_AMP1_THERM_SHDN_INT_SHIFT 4
+#define CS42L43_AMP2_THERM_WARN_INT_MASK 0x00000008
+#define CS42L43_AMP2_THERM_WARN_INT_SHIFT 3
+#define CS42L43_AMP1_THERM_WARN_INT_MASK 0x00000004
+#define CS42L43_AMP1_THERM_WARN_INT_SHIFT 2
+#define CS42L43_AMP2_SCDET_INT_MASK 0x00000002
+#define CS42L43_AMP2_SCDET_INT_SHIFT 1
+#define CS42L43_AMP1_SCDET_INT_MASK 0x00000001
+#define CS42L43_AMP1_SCDET_INT_SHIFT 0
+
+/* CS42L43_GPIO_INT */
+#define CS42L43_GPIO3_FALL_INT_MASK 0x00000020
+#define CS42L43_GPIO3_FALL_INT_SHIFT 5
+#define CS42L43_GPIO3_RISE_INT_MASK 0x00000010
+#define CS42L43_GPIO3_RISE_INT_SHIFT 4
+#define CS42L43_GPIO2_FALL_INT_MASK 0x00000008
+#define CS42L43_GPIO2_FALL_INT_SHIFT 3
+#define CS42L43_GPIO2_RISE_INT_MASK 0x00000004
+#define CS42L43_GPIO2_RISE_INT_SHIFT 2
+#define CS42L43_GPIO1_FALL_INT_MASK 0x00000002
+#define CS42L43_GPIO1_FALL_INT_SHIFT 1
+#define CS42L43_GPIO1_RISE_INT_MASK 0x00000001
+#define CS42L43_GPIO1_RISE_INT_SHIFT 0
+
+/* CS42L43_HPOUT_INT */
+#define CS42L43_HP_ILIMIT_INT_MASK 0x00000002
+#define CS42L43_HP_ILIMIT_INT_SHIFT 1
+#define CS42L43_HP_LOADDET_DONE_INT_MASK 0x00000001
+#define CS42L43_HP_LOADDET_DONE_INT_SHIFT 0
+
+/* CS42L43_BOOT_CONTROL */
+#define CS42L43_LOCK_HW_STS_MASK 0x00000002
+#define CS42L43_LOCK_HW_STS_SHIFT 1
+
+/* CS42L43_BLOCK_EN */
+#define CS42L43_MCU_EN_MASK 0x00000001
+#define CS42L43_MCU_EN_SHIFT 0
+
+/* CS42L43_SHUTTER_CONTROL */
+#define CS42L43_STATUS_SPK_SHUTTER_MUTE_MASK 0x00008000
+#define CS42L43_STATUS_SPK_SHUTTER_MUTE_SHIFT 15
+#define CS42L43_SPK_SHUTTER_CFG_MASK 0x00000F00
+#define CS42L43_SPK_SHUTTER_CFG_SHIFT 8
+#define CS42L43_STATUS_MIC_SHUTTER_MUTE_MASK 0x00000080
+#define CS42L43_STATUS_MIC_SHUTTER_MUTE_SHIFT 7
+#define CS42L43_MIC_SHUTTER_CFG_MASK 0x0000000F
+#define CS42L43_MIC_SHUTTER_CFG_SHIFT 0
+
+/* CS42L43_MCU_SW_REV */
+#define CS42L43_BIOS_SUBMINOR_REV_MASK 0xFF000000
+#define CS42L43_BIOS_SUBMINOR_REV_SHIFT 24
+#define CS42L43_BIOS_MINOR_REV_MASK 0x00F00000
+#define CS42L43_BIOS_MINOR_REV_SHIFT 20
+#define CS42L43_BIOS_MAJOR_REV_MASK 0x000F0000
+#define CS42L43_BIOS_MAJOR_REV_SHIFT 16
+#define CS42L43_FW_SUBMINOR_REV_MASK 0x0000FF00
+#define CS42L43_FW_SUBMINOR_REV_SHIFT 8
+#define CS42L43_FW_MINOR_REV_MASK 0x000000F0
+#define CS42L43_FW_MINOR_REV_SHIFT 4
+#define CS42L43_FW_MAJOR_REV_MASK 0x0000000F
+#define CS42L43_FW_MAJOR_REV_SHIFT 0
+
+/* CS42L43_NEED_CONFIGS */
+#define CS42L43_FW_PATCH_NEED_CFG_MASK 0x80000000
+#define CS42L43_FW_PATCH_NEED_CFG_SHIFT 31
+
+/* CS42L43_FW_MISSION_CTRL_MM_CTRL_SELECTION */
+#define CS42L43_FW_MM_CTRL_MCU_SEL_MASK 0x00000001
+#define CS42L43_FW_MM_CTRL_MCU_SEL_SHIFT 0
+
+/* CS42L43_FW_MISSION_CTRL_MM_MCU_CFG_REG */
+#define CS42L43_FW_MISSION_CTRL_MM_MCU_CFG_DISABLE_VAL 0xF05AA50F
+
+#endif /* CS42L43_CORE_REGS_H */
diff --git a/include/linux/mfd/cs42l43.h b/include/linux/mfd/cs42l43.h
new file mode 100644
index 000000000000..2239d8585e78
--- /dev/null
+++ b/include/linux/mfd/cs42l43.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CS42L43 core driver external data
+ *
+ * Copyright (C) 2022-2023 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#ifndef CS42L43_CORE_EXT_H
+#define CS42L43_CORE_EXT_H
+
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/workqueue.h>
+
+#define CS42L43_N_SUPPLIES 3
+
+struct device;
+struct gpio_desc;
+struct sdw_slave;
+
+enum cs42l43_irq_numbers {
+ CS42L43_PLL_LOST_LOCK,
+ CS42L43_PLL_READY,
+
+ CS42L43_HP_STARTUP_DONE,
+ CS42L43_HP_SHUTDOWN_DONE,
+ CS42L43_HSDET_DONE,
+ CS42L43_TIPSENSE_UNPLUG_DB,
+ CS42L43_TIPSENSE_PLUG_DB,
+ CS42L43_RINGSENSE_UNPLUG_DB,
+ CS42L43_RINGSENSE_PLUG_DB,
+ CS42L43_TIPSENSE_UNPLUG_PDET,
+ CS42L43_TIPSENSE_PLUG_PDET,
+ CS42L43_RINGSENSE_UNPLUG_PDET,
+ CS42L43_RINGSENSE_PLUG_PDET,
+
+ CS42L43_HS2_BIAS_SENSE,
+ CS42L43_HS1_BIAS_SENSE,
+ CS42L43_DC_DETECT1_FALSE,
+ CS42L43_DC_DETECT1_TRUE,
+ CS42L43_HSBIAS_CLAMPED,
+ CS42L43_HS3_4_BIAS_SENSE,
+
+ CS42L43_AMP2_CLK_STOP_FAULT,
+ CS42L43_AMP1_CLK_STOP_FAULT,
+ CS42L43_AMP2_VDDSPK_FAULT,
+ CS42L43_AMP1_VDDSPK_FAULT,
+ CS42L43_AMP2_SHUTDOWN_DONE,
+ CS42L43_AMP1_SHUTDOWN_DONE,
+ CS42L43_AMP2_STARTUP_DONE,
+ CS42L43_AMP1_STARTUP_DONE,
+ CS42L43_AMP2_THERM_SHDN,
+ CS42L43_AMP1_THERM_SHDN,
+ CS42L43_AMP2_THERM_WARN,
+ CS42L43_AMP1_THERM_WARN,
+ CS42L43_AMP2_SCDET,
+ CS42L43_AMP1_SCDET,
+
+ CS42L43_GPIO3_FALL,
+ CS42L43_GPIO3_RISE,
+ CS42L43_GPIO2_FALL,
+ CS42L43_GPIO2_RISE,
+ CS42L43_GPIO1_FALL,
+ CS42L43_GPIO1_RISE,
+
+ CS42L43_HP_ILIMIT,
+ CS42L43_HP_LOADDET_DONE,
+};
+
+struct cs42l43 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct sdw_slave *sdw;
+
+ struct regulator *vdd_p;
+ struct regulator *vdd_d;
+ struct regulator_bulk_data core_supplies[CS42L43_N_SUPPLIES];
+
+ struct gpio_desc *reset;
+
+ int irq;
+ struct regmap_irq_chip irq_chip;
+ struct regmap_irq_chip_data *irq_data;
+
+ struct work_struct boot_work;
+ struct completion device_attach;
+ struct completion device_detach;
+ struct completion firmware_download;
+ int firmware_error;
+
+ unsigned int sdw_freq;
+ /* Lock to gate control of the PLL and its sources. */
+ struct mutex pll_lock;
+
+ bool sdw_pll_active;
+ bool attached;
+ bool hw_lock;
+};
+
+#endif /* CS42L43_CORE_EXT_H */
diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h
index 76feb3a7066d..9cb2fc2938ce 100644
--- a/include/linux/mfd/da9052/da9052.h
+++ b/include/linux/mfd/da9052/da9052.h
@@ -93,6 +93,8 @@ struct da9052 {
int chip_irq;
+ int fault_log;
+
/* SOC I/O transfer related fixes for DA9052/53 */
int (*fix_io) (struct da9052 *da9052, unsigned char reg);
};
diff --git a/include/linux/mfd/da9055/pdata.h b/include/linux/mfd/da9055/pdata.h
index d3f126990ad0..137a2b067512 100644
--- a/include/linux/mfd/da9055/pdata.h
+++ b/include/linux/mfd/da9055/pdata.h
@@ -7,7 +7,6 @@
#define DA9055_MAX_REGULATORS 8
struct da9055;
-struct gpio_desc;
enum gpio_select {
NO_GPIO = 0,
@@ -24,16 +23,6 @@ struct da9055_pdata {
/* Enable RTC in RESET Mode */
bool reset_enable;
/*
- * GPI muxed pin to control
- * regulator state A/B, 0 if not available.
- */
- int *gpio_ren;
- /*
- * GPI muxed pin to control
- * regulator set, 0 if not available.
- */
- int *gpio_rsel;
- /*
* Regulator mode control bits value (GPI offset) that
* controls the regulator state, 0 if not available.
*/
@@ -43,7 +32,5 @@ struct da9055_pdata {
* controls the regulator set A/B, 0 if not available.
*/
enum gpio_select *reg_rsel;
- /* GPIO descriptors to enable regulator, NULL if not available */
- struct gpio_desc **ena_gpiods;
};
#endif /* __DA9055_PDATA_H */
diff --git a/include/linux/mfd/da9063/core.h b/include/linux/mfd/da9063/core.h
index fa7a43f02f27..eae82f421414 100644
--- a/include/linux/mfd/da9063/core.h
+++ b/include/linux/mfd/da9063/core.h
@@ -36,6 +36,7 @@ enum da9063_variant_codes {
PMIC_DA9063_BB = 0x5,
PMIC_DA9063_CA = 0x6,
PMIC_DA9063_DA = 0x7,
+ PMIC_DA9063_EA = 0x8,
};
/* Interrupts */
@@ -77,6 +78,7 @@ struct da9063 {
enum da9063_type type;
unsigned char variant_code;
unsigned int flags;
+ bool use_sw_pm;
/* Control interface */
struct regmap *regmap;
diff --git a/include/linux/mfd/da9063/registers.h b/include/linux/mfd/da9063/registers.h
index 6e0f66a2e727..7b8364bd08a0 100644
--- a/include/linux/mfd/da9063/registers.h
+++ b/include/linux/mfd/da9063/registers.h
@@ -1040,6 +1040,29 @@
/* DA9063_REG_CONFIG_J (addr=0x10F) */
#define DA9063_TWOWIRE_TO 0x40
+/* DA9063_REG_MON_REG_2 (addr=0x115) */
+#define DA9063_LDO1_MON_EN 0x01
+#define DA9063_LDO2_MON_EN 0x02
+#define DA9063_LDO3_MON_EN 0x04
+#define DA9063_LDO4_MON_EN 0x08
+#define DA9063_LDO5_MON_EN 0x10
+#define DA9063_LDO6_MON_EN 0x20
+#define DA9063_LDO7_MON_EN 0x40
+#define DA9063_LDO8_MON_EN 0x80
+
+/* DA9063_REG_MON_REG_3 (addr=0x116) */
+#define DA9063_LDO9_MON_EN 0x01
+#define DA9063_LDO10_MON_EN 0x02
+#define DA9063_LDO11_MON_EN 0x04
+
+/* DA9063_REG_MON_REG_4 (addr=0x117) */
+#define DA9063_BCORE1_MON_EN 0x04
+#define DA9063_BCORE2_MON_EN 0x08
+#define DA9063_BPRO_MON_EN 0x10
+#define DA9063_BIO_MON_EN 0x20
+#define DA9063_BMEM_MON_EN 0x40
+#define DA9063_BPERI_MON_EN 0x80
+
/* DA9063_REG_MON_REG_5 (addr=0x116) */
#define DA9063_MON_A8_IDX_MASK 0x07
#define DA9063_MON_A8_IDX_NONE 0x00
diff --git a/include/linux/mfd/davinci_voicecodec.h b/include/linux/mfd/davinci_voicecodec.h
index 556375b91316..9acd703dd5ca 100644
--- a/include/linux/mfd/davinci_voicecodec.h
+++ b/include/linux/mfd/davinci_voicecodec.h
@@ -10,11 +10,13 @@
#ifndef __LINUX_MFD_DAVINCI_VOICECODEC_H_
#define __LINUX_MFD_DAVINCI_VOICECODEC_H_
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
+#include <linux/bits.h>
#include <linux/mfd/core.h>
-#include <linux/platform_data/edma.h>
+#include <linux/types.h>
+struct clk;
+struct device;
+struct platform_device;
struct regmap;
/*
diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h
index e6ee2ec35de9..828362b7860c 100644
--- a/include/linux/mfd/dbx500-prcmu.h
+++ b/include/linux/mfd/dbx500-prcmu.h
@@ -186,10 +186,11 @@ enum ddr_pwrst {
#define PRCMU_FW_PROJECT_U8500_C3 8
#define PRCMU_FW_PROJECT_U8500_C4 9
#define PRCMU_FW_PROJECT_U9500_MBL 10
-#define PRCMU_FW_PROJECT_U8500_MBL 11 /* Customer specific */
+#define PRCMU_FW_PROJECT_U8500_SSG1 11 /* Samsung specific */
#define PRCMU_FW_PROJECT_U8500_MBL2 12 /* Customer specific */
#define PRCMU_FW_PROJECT_U8520 13
#define PRCMU_FW_PROJECT_U8420 14
+#define PRCMU_FW_PROJECT_U8500_SSG2 15 /* Samsung specific */
#define PRCMU_FW_PROJECT_U8420_SYSCLK 17
#define PRCMU_FW_PROJECT_A9420 20
/* [32..63] 9540 and derivatives */
@@ -212,9 +213,9 @@ struct prcmu_fw_version {
#if defined(CONFIG_UX500_SOC_DB8500)
-static inline void prcmu_early_init(void)
+static inline void __init prcmu_early_init(void)
{
- return db8500_prcmu_early_init();
+ db8500_prcmu_early_init();
}
static inline int prcmu_set_power_state(u8 state, bool keep_ulp_clk,
@@ -301,7 +302,7 @@ static inline int prcmu_request_ape_opp_100_voltage(bool enable)
static inline void prcmu_system_reset(u16 reset_code)
{
- return db8500_prcmu_system_reset(reset_code);
+ db8500_prcmu_system_reset(reset_code);
}
static inline u16 prcmu_get_reset_code(void)
@@ -313,7 +314,7 @@ int prcmu_ac_wake_req(void);
void prcmu_ac_sleep_req(void);
static inline void prcmu_modem_reset(void)
{
- return db8500_prcmu_modem_reset();
+ db8500_prcmu_modem_reset();
}
static inline bool prcmu_is_ac_wake_requested(void)
@@ -555,36 +556,6 @@ static inline void prcmu_clear(unsigned int reg, u32 bits)
#define PRCMU_QOS_ARM_OPP 3
#define PRCMU_QOS_DEFAULT_VALUE -1
-#ifdef CONFIG_DBX500_PRCMU_QOS_POWER
-
-unsigned long prcmu_qos_get_cpufreq_opp_delay(void);
-void prcmu_qos_set_cpufreq_opp_delay(unsigned long);
-void prcmu_qos_force_opp(int, s32);
-int prcmu_qos_requirement(int pm_qos_class);
-int prcmu_qos_add_requirement(int pm_qos_class, char *name, s32 value);
-int prcmu_qos_update_requirement(int pm_qos_class, char *name, s32 new_value);
-void prcmu_qos_remove_requirement(int pm_qos_class, char *name);
-int prcmu_qos_add_notifier(int prcmu_qos_class,
- struct notifier_block *notifier);
-int prcmu_qos_remove_notifier(int prcmu_qos_class,
- struct notifier_block *notifier);
-
-#else
-
-static inline unsigned long prcmu_qos_get_cpufreq_opp_delay(void)
-{
- return 0;
-}
-
-static inline void prcmu_qos_set_cpufreq_opp_delay(unsigned long n) {}
-
-static inline void prcmu_qos_force_opp(int prcmu_qos_class, s32 i) {}
-
-static inline int prcmu_qos_requirement(int prcmu_qos_class)
-{
- return 0;
-}
-
static inline int prcmu_qos_add_requirement(int prcmu_qos_class,
char *name, s32 value)
{
@@ -601,17 +572,4 @@ static inline void prcmu_qos_remove_requirement(int prcmu_qos_class, char *name)
{
}
-static inline int prcmu_qos_add_notifier(int prcmu_qos_class,
- struct notifier_block *notifier)
-{
- return 0;
-}
-static inline int prcmu_qos_remove_notifier(int prcmu_qos_class,
- struct notifier_block *notifier)
-{
- return 0;
-}
-
-#endif
-
#endif /* __MACH_PRCMU_H */
diff --git a/include/linux/mfd/dm355evm_msp.h b/include/linux/mfd/dm355evm_msp.h
deleted file mode 100644
index 372470350fab..000000000000
--- a/include/linux/mfd/dm355evm_msp.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * dm355evm_msp.h - support MSP430 microcontroller on DM355EVM board
- */
-#ifndef __LINUX_I2C_DM355EVM_MSP
-#define __LINUX_I2C_DM355EVM_MSP
-
-/*
- * Written against Spectrum's writeup for the A4 firmware revision,
- * and tweaked to match source and rev D2 schematics by removing CPLD
- * and NOR flash hooks (which were last appropriate in rev B boards).
- *
- * Note that the firmware supports a flavor of write posting ... to be
- * sure a write completes, issue another read or write.
- */
-
-/* utilities to access "registers" emulated by msp430 firmware */
-extern int dm355evm_msp_write(u8 value, u8 reg);
-extern int dm355evm_msp_read(u8 reg);
-
-
-/* command/control registers */
-#define DM355EVM_MSP_COMMAND 0x00
-# define MSP_COMMAND_NULL 0
-# define MSP_COMMAND_RESET_COLD 1
-# define MSP_COMMAND_RESET_WARM 2
-# define MSP_COMMAND_RESET_WARM_I 3
-# define MSP_COMMAND_POWEROFF 4
-# define MSP_COMMAND_IR_REINIT 5
-#define DM355EVM_MSP_STATUS 0x01
-# define MSP_STATUS_BAD_OFFSET BIT(0)
-# define MSP_STATUS_BAD_COMMAND BIT(1)
-# define MSP_STATUS_POWER_ERROR BIT(2)
-# define MSP_STATUS_RXBUF_OVERRUN BIT(3)
-#define DM355EVM_MSP_RESET 0x02 /* 0 bits == in reset */
-# define MSP_RESET_DC5 BIT(0)
-# define MSP_RESET_TVP5154 BIT(2)
-# define MSP_RESET_IMAGER BIT(3)
-# define MSP_RESET_ETHERNET BIT(4)
-# define MSP_RESET_SYS BIT(5)
-# define MSP_RESET_AIC33 BIT(7)
-
-/* GPIO registers ... bit patterns mostly match the source MSP ports */
-#define DM355EVM_MSP_LED 0x03 /* active low (MSP P4) */
-#define DM355EVM_MSP_SWITCH1 0x04 /* (MSP P5, masked) */
-# define MSP_SWITCH1_SW6_1 BIT(0)
-# define MSP_SWITCH1_SW6_2 BIT(1)
-# define MSP_SWITCH1_SW6_3 BIT(2)
-# define MSP_SWITCH1_SW6_4 BIT(3)
-# define MSP_SWITCH1_J1 BIT(4) /* NTSC/PAL */
-# define MSP_SWITCH1_MSP_INT BIT(5) /* active low */
-#define DM355EVM_MSP_SWITCH2 0x05 /* (MSP P6, masked) */
-# define MSP_SWITCH2_SW10 BIT(3)
-# define MSP_SWITCH2_SW11 BIT(4)
-# define MSP_SWITCH2_SW12 BIT(5)
-# define MSP_SWITCH2_SW13 BIT(6)
-# define MSP_SWITCH2_SW14 BIT(7)
-#define DM355EVM_MSP_SDMMC 0x06 /* (MSP P2, masked) */
-# define MSP_SDMMC_0_WP BIT(1)
-# define MSP_SDMMC_0_CD BIT(2) /* active low */
-# define MSP_SDMMC_1_WP BIT(3)
-# define MSP_SDMMC_1_CD BIT(4) /* active low */
-#define DM355EVM_MSP_FIRMREV 0x07 /* not a GPIO (out of order) */
-#define DM355EVM_MSP_VIDEO_IN 0x08 /* (MSP P3, masked) */
-# define MSP_VIDEO_IMAGER BIT(7) /* low == tvp5146 */
-
-/* power supply registers are currently omitted */
-
-/* RTC registers */
-#define DM355EVM_MSP_RTC_0 0x12 /* LSB */
-#define DM355EVM_MSP_RTC_1 0x13
-#define DM355EVM_MSP_RTC_2 0x14
-#define DM355EVM_MSP_RTC_3 0x15 /* MSB */
-
-/* input event queue registers; code == ((HIGH << 8) | LOW) */
-#define DM355EVM_MSP_INPUT_COUNT 0x16 /* decrement by reading LOW */
-#define DM355EVM_MSP_INPUT_HIGH 0x17
-#define DM355EVM_MSP_INPUT_LOW 0x18
-
-#endif /* __LINUX_I2C_DM355EVM_MSP */
diff --git a/include/linux/mfd/ds1wm.h b/include/linux/mfd/ds1wm.h
deleted file mode 100644
index 43dfca1c9702..000000000000
--- a/include/linux/mfd/ds1wm.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* MFD cell driver data for the DS1WM driver
- *
- * to be defined in the MFD device that is
- * using this driver for one of his sub devices
- */
-
-struct ds1wm_driver_data {
- int active_high;
- int clock_rate;
- /* in milliseconds, the amount of time to
- * sleep following a reset pulse. Zero
- * should work if your bus devices recover
- * time respects the 1-wire spec since the
- * ds1wm implements the precise timings of
- * a reset pulse/presence detect sequence.
- */
- unsigned int reset_recover_delay;
-
- /* Say 1 here for big endian Hardware
- * (only relevant with bus-shift > 0
- */
- bool is_hw_big_endian;
-
- /* left shift of register number to get register address offsett.
- * Only 0,1,2 allowed for 8,16 or 32 bit bus width respectively
- */
- unsigned int bus_shift;
-};
diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h
index ffde195e12b7..ea51b1cdca5a 100644
--- a/include/linux/mfd/ezx-pcap.h
+++ b/include/linux/mfd/ezx-pcap.h
@@ -31,7 +31,6 @@ int ezx_pcap_set_bits(struct pcap_chip *, u8, u32, u32);
int pcap_to_irq(struct pcap_chip *, int);
int irq_to_pcap(struct pcap_chip *, int);
int pcap_adc_async(struct pcap_chip *, u8, u32, u8[], void *, void *);
-int pcap_adc_sync(struct pcap_chip *, u8, u32, u8[], u16[]);
void pcap_set_ts_bits(struct pcap_chip *, u32);
#define PCAP_SECOND_PORT 1
diff --git a/include/linux/mfd/hi6421-spmi-pmic.h b/include/linux/mfd/hi6421-spmi-pmic.h
deleted file mode 100644
index 2660226138b8..000000000000
--- a/include/linux/mfd/hi6421-spmi-pmic.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Header file for device driver Hi6421 PMIC
- *
- * Copyright (c) 2013 Linaro Ltd.
- * Copyright (C) 2011 Hisilicon.
- * Copyright (c) 2020-2021 Huawei Technologies Co., Ltd
- *
- * Guodong Xu <guodong.xu@linaro.org>
- */
-
-#ifndef __HISI_PMIC_H
-#define __HISI_PMIC_H
-
-#include <linux/irqdomain.h>
-#include <linux/regmap.h>
-
-struct hi6421_spmi_pmic {
- struct resource *res;
- struct device *dev;
- void __iomem *regs;
- spinlock_t lock;
- struct irq_domain *domain;
- int irq;
- int gpio;
- unsigned int *irqs;
- struct regmap *regmap;
-};
-
-#endif /* __HISI_PMIC_H */
diff --git a/include/linux/mfd/hi655x-pmic.h b/include/linux/mfd/hi655x-pmic.h
index b06171322178..194556851ccf 100644
--- a/include/linux/mfd/hi655x-pmic.h
+++ b/include/linux/mfd/hi655x-pmic.h
@@ -2,7 +2,7 @@
/*
* Device driver for regulators in hi655x IC
*
- * Copyright (c) 2016 Hisilicon.
+ * Copyright (c) 2016 HiSilicon Ltd.
*
* Authors:
* Chen Feng <puck.chen@hisilicon.com>
@@ -12,6 +12,8 @@
#ifndef __HI655X_PMIC_H
#define __HI655X_PMIC_H
+#include <linux/gpio/consumer.h>
+
/* Hi655x registers are mapped to memory bus in 4 bytes stride */
#define HI655X_STRIDE 4
#define HI655X_BUS_ADDR(x) ((x) << 2)
@@ -50,10 +52,9 @@
#define OTMP_D1R_INT_MASK BIT(OTMP_D1R_INT)
struct hi655x_pmic {
- struct resource *res;
struct device *dev;
struct regmap *regmap;
- int gpio;
+ struct gpio_desc *gpio;
unsigned int ver;
struct regmap_irq_chip_data *irq_data;
};
diff --git a/include/linux/mfd/htc-pasic3.h b/include/linux/mfd/htc-pasic3.h
deleted file mode 100644
index 3d3ed67bd969..000000000000
--- a/include/linux/mfd/htc-pasic3.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * HTC PASIC3 driver - LEDs and DS1WM
- *
- * Copyright (c) 2007 Philipp Zabel <philipp.zabel@gmail.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- *
- */
-
-#ifndef __PASIC3_H
-#define __PASIC3_H
-
-#include <linux/platform_device.h>
-#include <linux/leds.h>
-
-extern void pasic3_write_register(struct device *dev, u32 reg, u8 val);
-extern u8 pasic3_read_register(struct device *dev, u32 reg);
-
-/*
- * mask for registers 0x20,0x21,0x22
- */
-#define PASIC3_MASK_LED0 0x04
-#define PASIC3_MASK_LED1 0x08
-#define PASIC3_MASK_LED2 0x40
-
-/*
- * bits in register 0x06
- */
-#define PASIC3_BIT2_LED0 0x08
-#define PASIC3_BIT2_LED1 0x10
-#define PASIC3_BIT2_LED2 0x20
-
-struct pasic3_led {
- struct led_classdev led;
- unsigned int hw_num;
- unsigned int bit2;
- unsigned int mask;
- struct pasic3_leds_machinfo *pdata;
-};
-
-struct pasic3_leds_machinfo {
- unsigned int num_leds;
- unsigned int power_gpio;
- struct pasic3_led *leds;
-};
-
-struct pasic3_platform_data {
- struct pasic3_leds_machinfo *led_pdata;
- unsigned int clock_rate;
-};
-
-#endif
diff --git a/include/linux/mfd/idt82p33_reg.h b/include/linux/mfd/idt82p33_reg.h
new file mode 100644
index 000000000000..1db532feeb91
--- /dev/null
+++ b/include/linux/mfd/idt82p33_reg.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Register Map - Based on AN888_SMUforIEEE_SynchEther_82P33xxx_RevH.pdf
+ *
+ * Copyright (C) 2021 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#ifndef HAVE_IDT82P33_REG
+#define HAVE_IDT82P33_REG
+
+#define REG_ADDR(page, offset) (((page) << 0x7) | ((offset) & 0x7f))
+
+/* Register address */
+#define DPLL1_TOD_CNFG 0x134
+#define DPLL2_TOD_CNFG 0x1B4
+
+#define DPLL1_TOD_STS 0x10B
+#define DPLL2_TOD_STS 0x18B
+
+#define DPLL1_TOD_TRIGGER 0x115
+#define DPLL2_TOD_TRIGGER 0x195
+
+#define DPLL1_OPERATING_MODE_CNFG 0x120
+#define DPLL2_OPERATING_MODE_CNFG 0x1A0
+
+#define DPLL1_HOLDOVER_FREQ_CNFG 0x12C
+#define DPLL2_HOLDOVER_FREQ_CNFG 0x1AC
+
+#define DPLL1_PHASE_OFFSET_CNFG 0x143
+#define DPLL2_PHASE_OFFSET_CNFG 0x1C3
+
+#define DPLL1_SYNC_EDGE_CNFG 0x140
+#define DPLL2_SYNC_EDGE_CNFG 0x1C0
+
+#define DPLL1_INPUT_MODE_CNFG 0x116
+#define DPLL2_INPUT_MODE_CNFG 0x196
+
+#define DPLL1_OPERATING_STS 0x102
+#define DPLL2_OPERATING_STS 0x182
+
+#define DPLL1_CURRENT_FREQ_STS 0x103
+#define DPLL2_CURRENT_FREQ_STS 0x183
+
+#define REG_SOFT_RESET 0X381
+
+#define OUT_MUX_CNFG(outn) REG_ADDR(0x6, (0xC * (outn)))
+#define TOD_TRIGGER(wr_trig, rd_trig) ((wr_trig & 0xf) << 4 | (rd_trig & 0xf))
+
+/* Register bit definitions */
+#define SYNC_TOD BIT(1)
+#define PH_OFFSET_EN BIT(7)
+#define SQUELCH_ENABLE BIT(5)
+
+/* Bit definitions for the DPLL_MODE register */
+#define PLL_MODE_SHIFT (0)
+#define PLL_MODE_MASK (0x1F)
+#define COMBO_MODE_EN BIT(5)
+#define COMBO_MODE_SHIFT (6)
+#define COMBO_MODE_MASK (0x3)
+
+/* Bit definitions for DPLL_OPERATING_STS register */
+#define OPERATING_STS_MASK (0x7)
+#define OPERATING_STS_SHIFT (0x0)
+
+/* Bit definitions for DPLL_TOD_TRIGGER register */
+#define READ_TRIGGER_MASK (0xF)
+#define READ_TRIGGER_SHIFT (0x0)
+#define WRITE_TRIGGER_MASK (0xF0)
+#define WRITE_TRIGGER_SHIFT (0x4)
+
+/* Bit definitions for REG_SOFT_RESET register */
+#define SOFT_RESET_EN BIT(7)
+
+enum pll_mode {
+ PLL_MODE_MIN = 0,
+ PLL_MODE_AUTOMATIC = PLL_MODE_MIN,
+ PLL_MODE_FORCE_FREERUN = 1,
+ PLL_MODE_FORCE_HOLDOVER = 2,
+ PLL_MODE_FORCE_LOCKED = 4,
+ PLL_MODE_FORCE_PRE_LOCKED2 = 5,
+ PLL_MODE_FORCE_PRE_LOCKED = 6,
+ PLL_MODE_FORCE_LOST_PHASE = 7,
+ PLL_MODE_DCO = 10,
+ PLL_MODE_WPH = 18,
+ PLL_MODE_MAX = PLL_MODE_WPH,
+};
+
+enum hw_tod_trig_sel {
+ HW_TOD_TRIG_SEL_MIN = 0,
+ HW_TOD_TRIG_SEL_NO_WRITE = HW_TOD_TRIG_SEL_MIN,
+ HW_TOD_TRIG_SEL_NO_READ = HW_TOD_TRIG_SEL_MIN,
+ HW_TOD_TRIG_SEL_SYNC_SEL = 1,
+ HW_TOD_TRIG_SEL_IN12 = 2,
+ HW_TOD_TRIG_SEL_IN13 = 3,
+ HW_TOD_TRIG_SEL_IN14 = 4,
+ HW_TOD_TRIG_SEL_TOD_PPS = 5,
+ HW_TOD_TRIG_SEL_TIMER_INTERVAL = 6,
+ HW_TOD_TRIG_SEL_MSB_PHASE_OFFSET_CNFG = 7,
+ HW_TOD_TRIG_SEL_MSB_HOLDOVER_FREQ_CNFG = 8,
+ HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG = 9,
+ HW_TOD_RD_TRIG_SEL_LSB_TOD_STS = HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
+ WR_TRIG_SEL_MAX = HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG,
+};
+
+/** @brief Enumerated type listing DPLL operational modes */
+enum dpll_state {
+ DPLL_STATE_FREERUN = 1,
+ DPLL_STATE_HOLDOVER = 2,
+ DPLL_STATE_LOCKED = 4,
+ DPLL_STATE_PRELOCKED2 = 5,
+ DPLL_STATE_PRELOCKED = 6,
+ DPLL_STATE_LOSTPHASE = 7,
+ DPLL_STATE_MAX
+};
+
+#endif
diff --git a/include/linux/mfd/idt8a340_reg.h b/include/linux/mfd/idt8a340_reg.h
new file mode 100644
index 000000000000..53a222605526
--- /dev/null
+++ b/include/linux/mfd/idt8a340_reg.h
@@ -0,0 +1,768 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Based on 5.2.0, Family Programming Guide (Sept 30, 2020)
+ *
+ * Copyright (C) 2021 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#ifndef HAVE_IDT8A340_REG
+#define HAVE_IDT8A340_REG
+
+#define PAGE_ADDR_BASE 0x0000
+#define PAGE_ADDR 0x00fc
+
+#define HW_REVISION 0x8180
+#define REV_ID 0x007a
+
+#define HW_DPLL_0 (0x8a00)
+#define HW_DPLL_1 (0x8b00)
+#define HW_DPLL_2 (0x8c00)
+#define HW_DPLL_3 (0x8d00)
+#define HW_DPLL_4 (0x8e00)
+#define HW_DPLL_5 (0x8f00)
+#define HW_DPLL_6 (0x9000)
+#define HW_DPLL_7 (0x9100)
+
+#define HW_DPLL_TOD_SW_TRIG_ADDR__0 (0x080)
+#define HW_DPLL_TOD_CTRL_1 (0x089)
+#define HW_DPLL_TOD_CTRL_2 (0x08A)
+#define HW_DPLL_TOD_OVR__0 (0x098)
+#define HW_DPLL_TOD_OUT_0__0 (0x0B0)
+
+#define HW_Q0_Q1_CH_SYNC_CTRL_0 (0xa740)
+#define HW_Q0_Q1_CH_SYNC_CTRL_1 (0xa741)
+#define HW_Q2_Q3_CH_SYNC_CTRL_0 (0xa742)
+#define HW_Q2_Q3_CH_SYNC_CTRL_1 (0xa743)
+#define HW_Q4_Q5_CH_SYNC_CTRL_0 (0xa744)
+#define HW_Q4_Q5_CH_SYNC_CTRL_1 (0xa745)
+#define HW_Q6_Q7_CH_SYNC_CTRL_0 (0xa746)
+#define HW_Q6_Q7_CH_SYNC_CTRL_1 (0xa747)
+#define HW_Q8_CH_SYNC_CTRL_0 (0xa748)
+#define HW_Q8_CH_SYNC_CTRL_1 (0xa749)
+#define HW_Q9_CH_SYNC_CTRL_0 (0xa74a)
+#define HW_Q9_CH_SYNC_CTRL_1 (0xa74b)
+#define HW_Q10_CH_SYNC_CTRL_0 (0xa74c)
+#define HW_Q10_CH_SYNC_CTRL_1 (0xa74d)
+#define HW_Q11_CH_SYNC_CTRL_0 (0xa74e)
+#define HW_Q11_CH_SYNC_CTRL_1 (0xa74f)
+
+#define SYNC_SOURCE_DPLL0_TOD_PPS 0x14
+#define SYNC_SOURCE_DPLL1_TOD_PPS 0x15
+#define SYNC_SOURCE_DPLL2_TOD_PPS 0x16
+#define SYNC_SOURCE_DPLL3_TOD_PPS 0x17
+
+#define SYNCTRL1_MASTER_SYNC_RST BIT(7)
+#define SYNCTRL1_MASTER_SYNC_TRIG BIT(5)
+#define SYNCTRL1_TOD_SYNC_TRIG BIT(4)
+#define SYNCTRL1_FBDIV_FRAME_SYNC_TRIG BIT(3)
+#define SYNCTRL1_FBDIV_SYNC_TRIG BIT(2)
+#define SYNCTRL1_Q1_DIV_SYNC_TRIG BIT(1)
+#define SYNCTRL1_Q0_DIV_SYNC_TRIG BIT(0)
+
+#define HW_Q8_CTRL_SPARE (0xa7d4)
+#define HW_Q11_CTRL_SPARE (0xa7ec)
+
+/*
+ * Select FOD5 as sync_trigger for Q8 divider.
+ * Transition from logic zero to one
+ * sets trigger to sync Q8 divider.
+ *
+ * Unused when FOD4 is driving Q8 divider (normal operation).
+ */
+#define Q9_TO_Q8_SYNC_TRIG BIT(1)
+
+/*
+ * Enable FOD5 as driver for clock and sync for Q8 divider.
+ * Enable fanout buffer for FOD5.
+ *
+ * Unused when FOD4 is driving Q8 divider (normal operation).
+ */
+#define Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK (BIT(0) | BIT(2))
+
+/*
+ * Select FOD6 as sync_trigger for Q11 divider.
+ * Transition from logic zero to one
+ * sets trigger to sync Q11 divider.
+ *
+ * Unused when FOD7 is driving Q11 divider (normal operation).
+ */
+#define Q10_TO_Q11_SYNC_TRIG BIT(1)
+
+/*
+ * Enable FOD6 as driver for clock and sync for Q11 divider.
+ * Enable fanout buffer for FOD6.
+ *
+ * Unused when FOD7 is driving Q11 divider (normal operation).
+ */
+#define Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK (BIT(0) | BIT(2))
+
+#define RESET_CTRL 0xc000
+#define SM_RESET 0x0012
+#define SM_RESET_V520 0x0013
+#define SM_RESET_CMD 0x5A
+
+#define GENERAL_STATUS 0xc014
+#define BOOT_STATUS 0x0000
+#define HW_REV_ID 0x000A
+#define BOND_ID 0x000B
+#define HW_CSR_ID 0x000C
+#define HW_IRQ_ID 0x000E
+#define MAJ_REL 0x0010
+#define MIN_REL 0x0011
+#define HOTFIX_REL 0x0012
+#define PIPELINE_ID 0x0014
+#define BUILD_ID 0x0018
+#define JTAG_DEVICE_ID 0x001c
+#define PRODUCT_ID 0x001e
+#define OTP_SCSR_CONFIG_SELECT 0x0022
+
+#define STATUS 0xc03c
+#define DPLL0_STATUS 0x0018
+#define DPLL1_STATUS 0x0019
+#define DPLL2_STATUS 0x001a
+#define DPLL3_STATUS 0x001b
+#define DPLL4_STATUS 0x001c
+#define DPLL5_STATUS 0x001d
+#define DPLL6_STATUS 0x001e
+#define DPLL7_STATUS 0x001f
+#define DPLL_SYS_STATUS 0x0020
+#define DPLL_SYS_APLL_STATUS 0x0021
+#define DPLL0_FILTER_STATUS 0x0044
+#define DPLL1_FILTER_STATUS 0x004c
+#define DPLL2_FILTER_STATUS 0x0054
+#define DPLL3_FILTER_STATUS 0x005c
+#define DPLL4_FILTER_STATUS 0x0064
+#define DPLL5_FILTER_STATUS 0x006c
+#define DPLL6_FILTER_STATUS 0x0074
+#define DPLL7_FILTER_STATUS 0x007c
+#define DPLLSYS_FILTER_STATUS 0x0084
+#define USER_GPIO0_TO_7_STATUS 0x008a
+#define USER_GPIO8_TO_15_STATUS 0x008b
+
+#define GPIO_USER_CONTROL 0xc160
+#define GPIO0_TO_7_OUT 0x0000
+#define GPIO8_TO_15_OUT 0x0001
+#define GPIO0_TO_7_OUT_V520 0x0002
+#define GPIO8_TO_15_OUT_V520 0x0003
+
+#define STICKY_STATUS_CLEAR 0xc164
+
+#define GPIO_TOD_NOTIFICATION_CLEAR 0xc16c
+
+#define ALERT_CFG 0xc188
+
+#define SYS_DPLL_XO 0xc194
+
+#define SYS_APLL 0xc19c
+
+#define INPUT_0 0xc1b0
+#define INPUT_1 0xc1c0
+#define INPUT_2 0xc1d0
+#define INPUT_3 0xc200
+#define INPUT_4 0xc210
+#define INPUT_5 0xc220
+#define INPUT_6 0xc230
+#define INPUT_7 0xc240
+#define INPUT_8 0xc250
+#define INPUT_9 0xc260
+#define INPUT_10 0xc280
+#define INPUT_11 0xc290
+#define INPUT_12 0xc2a0
+#define INPUT_13 0xc2b0
+#define INPUT_14 0xc2c0
+#define INPUT_15 0xc2d0
+
+#define REF_MON_0 0xc2e0
+#define REF_MON_1 0xc2ec
+#define REF_MON_2 0xc300
+#define REF_MON_3 0xc30c
+#define REF_MON_4 0xc318
+#define REF_MON_5 0xc324
+#define REF_MON_6 0xc330
+#define REF_MON_7 0xc33c
+#define REF_MON_8 0xc348
+#define REF_MON_9 0xc354
+#define REF_MON_10 0xc360
+#define REF_MON_11 0xc36c
+#define REF_MON_12 0xc380
+#define REF_MON_13 0xc38c
+#define REF_MON_14 0xc398
+#define REF_MON_15 0xc3a4
+
+#define DPLL_0 0xc3b0
+#define DPLL_CTRL_REG_0 0x0002
+#define DPLL_CTRL_REG_1 0x0003
+#define DPLL_CTRL_REG_2 0x0004
+#define DPLL_TOD_SYNC_CFG 0x0031
+#define DPLL_COMBO_SLAVE_CFG_0 0x0032
+#define DPLL_COMBO_SLAVE_CFG_1 0x0033
+#define DPLL_SLAVE_REF_CFG 0x0034
+#define DPLL_REF_MODE 0x0035
+#define DPLL_PHASE_MEASUREMENT_CFG 0x0036
+#define DPLL_MODE 0x0037
+#define DPLL_MODE_V520 0x003B
+#define DPLL_1 0xc400
+#define DPLL_2 0xc438
+#define DPLL_2_V520 0xc43c
+#define DPLL_3 0xc480
+#define DPLL_4 0xc4b8
+#define DPLL_4_V520 0xc4bc
+#define DPLL_5 0xc500
+#define DPLL_6 0xc538
+#define DPLL_6_V520 0xc53c
+#define DPLL_7 0xc580
+#define SYS_DPLL 0xc5b8
+#define SYS_DPLL_V520 0xc5bc
+
+#define DPLL_CTRL_0 0xc600
+#define DPLL_CTRL_DPLL_MANU_REF_CFG 0x0001
+#define DPLL_CTRL_DPLL_FOD_FREQ 0x001c
+#define DPLL_CTRL_COMBO_MASTER_CFG 0x003a
+#define DPLL_CTRL_1 0xc63c
+#define DPLL_CTRL_2 0xc680
+#define DPLL_CTRL_3 0xc6bc
+#define DPLL_CTRL_4 0xc700
+#define DPLL_CTRL_5 0xc73c
+#define DPLL_CTRL_6 0xc780
+#define DPLL_CTRL_7 0xc7bc
+#define SYS_DPLL_CTRL 0xc800
+
+#define DPLL_PHASE_0 0xc818
+/* Signed 42-bit FFO in units of 2^(-53) */
+#define DPLL_WR_PHASE 0x0000
+#define DPLL_PHASE_1 0xc81c
+#define DPLL_PHASE_2 0xc820
+#define DPLL_PHASE_3 0xc824
+#define DPLL_PHASE_4 0xc828
+#define DPLL_PHASE_5 0xc82c
+#define DPLL_PHASE_6 0xc830
+#define DPLL_PHASE_7 0xc834
+
+#define DPLL_FREQ_0 0xc838
+/* Signed 42-bit FFO in units of 2^(-53) */
+#define DPLL_WR_FREQ 0x0000
+#define DPLL_FREQ_1 0xc840
+#define DPLL_FREQ_2 0xc848
+#define DPLL_FREQ_3 0xc850
+#define DPLL_FREQ_4 0xc858
+#define DPLL_FREQ_5 0xc860
+#define DPLL_FREQ_6 0xc868
+#define DPLL_FREQ_7 0xc870
+
+#define DPLL_PHASE_PULL_IN_0 0xc880
+#define PULL_IN_OFFSET 0x0000 /* Signed 32 bit */
+#define PULL_IN_SLOPE_LIMIT 0x0004 /* Unsigned 24 bit */
+#define PULL_IN_CTRL 0x0007
+#define DPLL_PHASE_PULL_IN_1 0xc888
+#define DPLL_PHASE_PULL_IN_2 0xc890
+#define DPLL_PHASE_PULL_IN_3 0xc898
+#define DPLL_PHASE_PULL_IN_4 0xc8a0
+#define DPLL_PHASE_PULL_IN_5 0xc8a8
+#define DPLL_PHASE_PULL_IN_6 0xc8b0
+#define DPLL_PHASE_PULL_IN_7 0xc8b8
+
+#define GPIO_CFG 0xc8c0
+#define GPIO_CFG_GBL 0x0000
+#define GPIO_0 0xc8c2
+#define GPIO_DCO_INC_DEC 0x0000
+#define GPIO_OUT_CTRL_0 0x0001
+#define GPIO_OUT_CTRL_1 0x0002
+#define GPIO_TOD_TRIG 0x0003
+#define GPIO_DPLL_INDICATOR 0x0004
+#define GPIO_LOS_INDICATOR 0x0005
+#define GPIO_REF_INPUT_DSQ_0 0x0006
+#define GPIO_REF_INPUT_DSQ_1 0x0007
+#define GPIO_REF_INPUT_DSQ_2 0x0008
+#define GPIO_REF_INPUT_DSQ_3 0x0009
+#define GPIO_MAN_CLK_SEL_0 0x000a
+#define GPIO_MAN_CLK_SEL_1 0x000b
+#define GPIO_MAN_CLK_SEL_2 0x000c
+#define GPIO_SLAVE 0x000d
+#define GPIO_ALERT_OUT_CFG 0x000e
+#define GPIO_TOD_NOTIFICATION_CFG 0x000f
+#define GPIO_CTRL 0x0010
+#define GPIO_CTRL_V520 0x0011
+#define GPIO_1 0xc8d4
+#define GPIO_2 0xc8e6
+#define GPIO_3 0xc900
+#define GPIO_4 0xc912
+#define GPIO_5 0xc924
+#define GPIO_6 0xc936
+#define GPIO_7 0xc948
+#define GPIO_8 0xc95a
+#define GPIO_9 0xc980
+#define GPIO_10 0xc992
+#define GPIO_11 0xc9a4
+#define GPIO_12 0xc9b6
+#define GPIO_13 0xc9c8
+#define GPIO_14 0xc9da
+#define GPIO_15 0xca00
+
+#define OUT_DIV_MUX 0xca12
+#define OUTPUT_0 0xca14
+#define OUTPUT_0_V520 0xca20
+/* FOD frequency output divider value */
+#define OUT_DIV 0x0000
+#define OUT_DUTY_CYCLE_HIGH 0x0004
+#define OUT_CTRL_0 0x0008
+#define OUT_CTRL_1 0x0009
+/* Phase adjustment in FOD cycles */
+#define OUT_PHASE_ADJ 0x000c
+#define OUTPUT_1 0xca24
+#define OUTPUT_1_V520 0xca30
+#define OUTPUT_2 0xca34
+#define OUTPUT_2_V520 0xca40
+#define OUTPUT_3 0xca44
+#define OUTPUT_3_V520 0xca50
+#define OUTPUT_4 0xca54
+#define OUTPUT_4_V520 0xca60
+#define OUTPUT_5 0xca64
+#define OUTPUT_5_V520 0xca80
+#define OUTPUT_6 0xca80
+#define OUTPUT_6_V520 0xca90
+#define OUTPUT_7 0xca90
+#define OUTPUT_7_V520 0xcaa0
+#define OUTPUT_8 0xcaa0
+#define OUTPUT_8_V520 0xcab0
+#define OUTPUT_9 0xcab0
+#define OUTPUT_9_V520 0xcac0
+#define OUTPUT_10 0xcac0
+#define OUTPUT_10_V520 0xcad0
+#define OUTPUT_11 0xcad0
+#define OUTPUT_11_V520 0xcae0
+
+#define SERIAL 0xcae0
+#define SERIAL_V520 0xcaf0
+
+#define PWM_ENCODER_0 0xcb00
+#define PWM_ENCODER_1 0xcb08
+#define PWM_ENCODER_2 0xcb10
+#define PWM_ENCODER_3 0xcb18
+#define PWM_ENCODER_4 0xcb20
+#define PWM_ENCODER_5 0xcb28
+#define PWM_ENCODER_6 0xcb30
+#define PWM_ENCODER_7 0xcb38
+#define PWM_DECODER_0 0xcb40
+#define PWM_DECODER_1 0xcb48
+#define PWM_DECODER_1_V520 0xcb4a
+#define PWM_DECODER_2 0xcb50
+#define PWM_DECODER_2_V520 0xcb54
+#define PWM_DECODER_3 0xcb58
+#define PWM_DECODER_3_V520 0xcb5e
+#define PWM_DECODER_4 0xcb60
+#define PWM_DECODER_4_V520 0xcb68
+#define PWM_DECODER_5 0xcb68
+#define PWM_DECODER_5_V520 0xcb80
+#define PWM_DECODER_6 0xcb70
+#define PWM_DECODER_6_V520 0xcb8a
+#define PWM_DECODER_7 0xcb80
+#define PWM_DECODER_7_V520 0xcb94
+#define PWM_DECODER_8 0xcb88
+#define PWM_DECODER_8_V520 0xcb9e
+#define PWM_DECODER_9 0xcb90
+#define PWM_DECODER_9_V520 0xcba8
+#define PWM_DECODER_10 0xcb98
+#define PWM_DECODER_10_V520 0xcbb2
+#define PWM_DECODER_11 0xcba0
+#define PWM_DECODER_11_V520 0xcbbc
+#define PWM_DECODER_12 0xcba8
+#define PWM_DECODER_12_V520 0xcbc6
+#define PWM_DECODER_13 0xcbb0
+#define PWM_DECODER_13_V520 0xcbd0
+#define PWM_DECODER_14 0xcbb8
+#define PWM_DECODER_14_V520 0xcbda
+#define PWM_DECODER_15 0xcbc0
+#define PWM_DECODER_15_V520 0xcbe4
+#define PWM_USER_DATA 0xcbc8
+#define PWM_USER_DATA_V520 0xcbf0
+
+#define TOD_0 0xcbcc
+#define TOD_0_V520 0xcc00
+/* Enable TOD counter, output channel sync and even-PPS mode */
+#define TOD_CFG 0x0000
+#define TOD_CFG_V520 0x0001
+#define TOD_1 0xcbce
+#define TOD_1_V520 0xcc02
+#define TOD_2 0xcbd0
+#define TOD_2_V520 0xcc04
+#define TOD_3 0xcbd2
+#define TOD_3_V520 0xcc06
+
+#define TOD_WRITE_0 0xcc00
+#define TOD_WRITE_0_V520 0xcc10
+/* 8-bit subns, 32-bit ns, 48-bit seconds */
+#define TOD_WRITE 0x0000
+/* Counter increments after TOD write is completed */
+#define TOD_WRITE_COUNTER 0x000c
+/* TOD write trigger configuration */
+#define TOD_WRITE_SELECT_CFG_0 0x000d
+/* TOD write trigger selection */
+#define TOD_WRITE_CMD 0x000f
+#define TOD_WRITE_1 0xcc10
+#define TOD_WRITE_1_V520 0xcc20
+#define TOD_WRITE_2 0xcc20
+#define TOD_WRITE_2_V520 0xcc30
+#define TOD_WRITE_3 0xcc30
+#define TOD_WRITE_3_V520 0xcc40
+
+#define TOD_READ_PRIMARY_0 0xcc40
+#define TOD_READ_PRIMARY_0_V520 0xcc50
+/* 8-bit subns, 32-bit ns, 48-bit seconds */
+#define TOD_READ_PRIMARY_BASE 0x0000
+/* Counter increments after TOD write is completed */
+#define TOD_READ_PRIMARY_COUNTER 0x000b
+/* Read trigger configuration */
+#define TOD_READ_PRIMARY_SEL_CFG_0 0x000c
+/* Read trigger selection */
+#define TOD_READ_PRIMARY_CMD 0x000e
+#define TOD_READ_PRIMARY_CMD_V520 0x000f
+#define TOD_READ_PRIMARY_1 0xcc50
+#define TOD_READ_PRIMARY_1_V520 0xcc60
+#define TOD_READ_PRIMARY_2 0xcc60
+#define TOD_READ_PRIMARY_2_V520 0xcc80
+#define TOD_READ_PRIMARY_3 0xcc80
+#define TOD_READ_PRIMARY_3_V520 0xcc90
+
+#define TOD_READ_SECONDARY_0 0xcc90
+#define TOD_READ_SECONDARY_0_V520 0xcca0
+/* 8-bit subns, 32-bit ns, 48-bit seconds */
+#define TOD_READ_SECONDARY_BASE 0x0000
+/* Counter increments after TOD write is completed */
+#define TOD_READ_SECONDARY_COUNTER 0x000b
+/* Read trigger configuration */
+#define TOD_READ_SECONDARY_SEL_CFG_0 0x000c
+/* Read trigger selection */
+#define TOD_READ_SECONDARY_CMD 0x000e
+#define TOD_READ_SECONDARY_CMD_V520 0x000f
+
+#define TOD_READ_SECONDARY_1 0xcca0
+#define TOD_READ_SECONDARY_1_V520 0xccb0
+#define TOD_READ_SECONDARY_2 0xccb0
+#define TOD_READ_SECONDARY_2_V520 0xccc0
+#define TOD_READ_SECONDARY_3 0xccc0
+#define TOD_READ_SECONDARY_3_V520 0xccd0
+
+#define OUTPUT_TDC_CFG 0xccd0
+#define OUTPUT_TDC_CFG_V520 0xcce0
+#define OUTPUT_TDC_0 0xcd00
+#define OUTPUT_TDC_1 0xcd08
+#define OUTPUT_TDC_2 0xcd10
+#define OUTPUT_TDC_3 0xcd18
+#define INPUT_TDC 0xcd20
+
+#define SCRATCH 0xcf50
+#define SCRATCH_V520 0xcf4c
+
+#define EEPROM 0xcf68
+#define EEPROM_V520 0xcf64
+
+#define OTP 0xcf70
+
+#define BYTE 0xcf80
+
+/* Bit definitions for the MAJ_REL register */
+#define MAJOR_SHIFT (1)
+#define MAJOR_MASK (0x7f)
+#define PR_BUILD BIT(0)
+
+/* Bit definitions for the USER_GPIO0_TO_7_STATUS register */
+#define GPIO0_LEVEL BIT(0)
+#define GPIO1_LEVEL BIT(1)
+#define GPIO2_LEVEL BIT(2)
+#define GPIO3_LEVEL BIT(3)
+#define GPIO4_LEVEL BIT(4)
+#define GPIO5_LEVEL BIT(5)
+#define GPIO6_LEVEL BIT(6)
+#define GPIO7_LEVEL BIT(7)
+
+/* Bit definitions for the USER_GPIO8_TO_15_STATUS register */
+#define GPIO8_LEVEL BIT(0)
+#define GPIO9_LEVEL BIT(1)
+#define GPIO10_LEVEL BIT(2)
+#define GPIO11_LEVEL BIT(3)
+#define GPIO12_LEVEL BIT(4)
+#define GPIO13_LEVEL BIT(5)
+#define GPIO14_LEVEL BIT(6)
+#define GPIO15_LEVEL BIT(7)
+
+/* Bit definitions for the GPIO0_TO_7_OUT register */
+#define GPIO0_DRIVE_LEVEL BIT(0)
+#define GPIO1_DRIVE_LEVEL BIT(1)
+#define GPIO2_DRIVE_LEVEL BIT(2)
+#define GPIO3_DRIVE_LEVEL BIT(3)
+#define GPIO4_DRIVE_LEVEL BIT(4)
+#define GPIO5_DRIVE_LEVEL BIT(5)
+#define GPIO6_DRIVE_LEVEL BIT(6)
+#define GPIO7_DRIVE_LEVEL BIT(7)
+
+/* Bit definitions for the GPIO8_TO_15_OUT register */
+#define GPIO8_DRIVE_LEVEL BIT(0)
+#define GPIO9_DRIVE_LEVEL BIT(1)
+#define GPIO10_DRIVE_LEVEL BIT(2)
+#define GPIO11_DRIVE_LEVEL BIT(3)
+#define GPIO12_DRIVE_LEVEL BIT(4)
+#define GPIO13_DRIVE_LEVEL BIT(5)
+#define GPIO14_DRIVE_LEVEL BIT(6)
+#define GPIO15_DRIVE_LEVEL BIT(7)
+
+/* Bit definitions for the DPLL_TOD_SYNC_CFG register */
+#define TOD_SYNC_SOURCE_SHIFT (1)
+#define TOD_SYNC_SOURCE_MASK (0x3)
+#define TOD_SYNC_EN BIT(0)
+
+/* Bit definitions for the DPLL_MODE register */
+#define WRITE_TIMER_MODE BIT(6)
+#define PLL_MODE_SHIFT (3)
+#define PLL_MODE_MASK (0x7)
+#define STATE_MODE_SHIFT (0)
+#define STATE_MODE_MASK (0x7)
+
+/* Bit definitions for the DPLL_MANU_REF_CFG register */
+#define MANUAL_REFERENCE_SHIFT (0)
+#define MANUAL_REFERENCE_MASK (0x1f)
+
+/* Bit definitions for the GPIO_CFG_GBL register */
+#define SUPPLY_MODE_SHIFT (0)
+#define SUPPLY_MODE_MASK (0x3)
+
+/* Bit definitions for the GPIO_DCO_INC_DEC register */
+#define INCDEC_DPLL_INDEX_SHIFT (0)
+#define INCDEC_DPLL_INDEX_MASK (0x7)
+
+/* Bit definitions for the GPIO_OUT_CTRL_0 register */
+#define CTRL_OUT_0 BIT(0)
+#define CTRL_OUT_1 BIT(1)
+#define CTRL_OUT_2 BIT(2)
+#define CTRL_OUT_3 BIT(3)
+#define CTRL_OUT_4 BIT(4)
+#define CTRL_OUT_5 BIT(5)
+#define CTRL_OUT_6 BIT(6)
+#define CTRL_OUT_7 BIT(7)
+
+/* Bit definitions for the GPIO_OUT_CTRL_1 register */
+#define CTRL_OUT_8 BIT(0)
+#define CTRL_OUT_9 BIT(1)
+#define CTRL_OUT_10 BIT(2)
+#define CTRL_OUT_11 BIT(3)
+#define CTRL_OUT_12 BIT(4)
+#define CTRL_OUT_13 BIT(5)
+#define CTRL_OUT_14 BIT(6)
+#define CTRL_OUT_15 BIT(7)
+
+/* Bit definitions for the GPIO_TOD_TRIG register */
+#define TOD_TRIG_0 BIT(0)
+#define TOD_TRIG_1 BIT(1)
+#define TOD_TRIG_2 BIT(2)
+#define TOD_TRIG_3 BIT(3)
+
+/* Bit definitions for the GPIO_DPLL_INDICATOR register */
+#define IND_DPLL_INDEX_SHIFT (0)
+#define IND_DPLL_INDEX_MASK (0x7)
+
+/* Bit definitions for the GPIO_LOS_INDICATOR register */
+#define REFMON_INDEX_SHIFT (0)
+#define REFMON_INDEX_MASK (0xf)
+/* Active level of LOS indicator, 0=low 1=high */
+#define ACTIVE_LEVEL BIT(4)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_0 register */
+#define DSQ_INP_0 BIT(0)
+#define DSQ_INP_1 BIT(1)
+#define DSQ_INP_2 BIT(2)
+#define DSQ_INP_3 BIT(3)
+#define DSQ_INP_4 BIT(4)
+#define DSQ_INP_5 BIT(5)
+#define DSQ_INP_6 BIT(6)
+#define DSQ_INP_7 BIT(7)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_1 register */
+#define DSQ_INP_8 BIT(0)
+#define DSQ_INP_9 BIT(1)
+#define DSQ_INP_10 BIT(2)
+#define DSQ_INP_11 BIT(3)
+#define DSQ_INP_12 BIT(4)
+#define DSQ_INP_13 BIT(5)
+#define DSQ_INP_14 BIT(6)
+#define DSQ_INP_15 BIT(7)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_2 register */
+#define DSQ_DPLL_0 BIT(0)
+#define DSQ_DPLL_1 BIT(1)
+#define DSQ_DPLL_2 BIT(2)
+#define DSQ_DPLL_3 BIT(3)
+#define DSQ_DPLL_4 BIT(4)
+#define DSQ_DPLL_5 BIT(5)
+#define DSQ_DPLL_6 BIT(6)
+#define DSQ_DPLL_7 BIT(7)
+
+/* Bit definitions for the GPIO_REF_INPUT_DSQ_3 register */
+#define DSQ_DPLL_SYS BIT(0)
+#define GPIO_DSQ_LEVEL BIT(1)
+
+/* Bit definitions for the GPIO_TOD_NOTIFICATION_CFG register */
+#define DPLL_TOD_SHIFT (0)
+#define DPLL_TOD_MASK (0x3)
+#define TOD_READ_SECONDARY BIT(2)
+#define GPIO_ASSERT_LEVEL BIT(3)
+
+/* Bit definitions for the GPIO_CTRL register */
+#define GPIO_FUNCTION_EN BIT(0)
+#define GPIO_CMOS_OD_MODE BIT(1)
+#define GPIO_CONTROL_DIR BIT(2)
+#define GPIO_PU_PD_MODE BIT(3)
+#define GPIO_FUNCTION_SHIFT (4)
+#define GPIO_FUNCTION_MASK (0xf)
+
+/* Bit definitions for the OUT_CTRL_1 register */
+#define OUT_SYNC_DISABLE BIT(7)
+#define SQUELCH_VALUE BIT(6)
+#define SQUELCH_DISABLE BIT(5)
+#define PAD_VDDO_SHIFT (2)
+#define PAD_VDDO_MASK (0x7)
+#define PAD_CMOSDRV_SHIFT (0)
+#define PAD_CMOSDRV_MASK (0x3)
+
+/* Bit definitions for the TOD_CFG register */
+#define TOD_EVEN_PPS_MODE BIT(2)
+#define TOD_OUT_SYNC_ENABLE BIT(1)
+#define TOD_ENABLE BIT(0)
+
+/* Bit definitions for the TOD_WRITE_SELECT_CFG_0 register */
+#define WR_PWM_DECODER_INDEX_SHIFT (4)
+#define WR_PWM_DECODER_INDEX_MASK (0xf)
+#define WR_REF_INDEX_SHIFT (0)
+#define WR_REF_INDEX_MASK (0xf)
+
+/* Bit definitions for the TOD_WRITE_CMD register */
+#define TOD_WRITE_SELECTION_SHIFT (0)
+#define TOD_WRITE_SELECTION_MASK (0xf)
+/* 4.8.7 */
+#define TOD_WRITE_TYPE_SHIFT (4)
+#define TOD_WRITE_TYPE_MASK (0x3)
+
+/* Bit definitions for the TOD_READ_PRIMARY_SEL_CFG_0 register */
+#define RD_PWM_DECODER_INDEX_SHIFT (4)
+#define RD_PWM_DECODER_INDEX_MASK (0xf)
+#define RD_REF_INDEX_SHIFT (0)
+#define RD_REF_INDEX_MASK (0xf)
+
+/* Bit definitions for the TOD_READ_PRIMARY_CMD register */
+#define TOD_READ_TRIGGER_MODE BIT(4)
+#define TOD_READ_TRIGGER_SHIFT (0)
+#define TOD_READ_TRIGGER_MASK (0xf)
+
+/* Bit definitions for the DPLL_CTRL_COMBO_MASTER_CFG register */
+#define COMBO_MASTER_HOLD BIT(0)
+
+/* Bit definitions for DPLL_SYS_STATUS register */
+#define DPLL_SYS_STATE_MASK (0xf)
+
+/* Bit definitions for SYS_APLL_STATUS register */
+#define SYS_APLL_LOSS_LOCK_LIVE_MASK BIT(0)
+#define SYS_APLL_LOSS_LOCK_LIVE_LOCKED 0
+#define SYS_APLL_LOSS_LOCK_LIVE_UNLOCKED 1
+
+/* Bit definitions for the DPLL0_STATUS register */
+#define DPLL_STATE_MASK (0xf)
+#define DPLL_STATE_SHIFT (0x0)
+
+/* Values of DPLL_N.DPLL_MODE.PLL_MODE */
+enum pll_mode {
+ PLL_MODE_MIN = 0,
+ PLL_MODE_PLL = PLL_MODE_MIN,
+ PLL_MODE_WRITE_PHASE = 1,
+ PLL_MODE_WRITE_FREQUENCY = 2,
+ PLL_MODE_GPIO_INC_DEC = 3,
+ PLL_MODE_SYNTHESIS = 4,
+ PLL_MODE_PHASE_MEASUREMENT = 5,
+ PLL_MODE_DISABLED = 6,
+ PLL_MODE_MAX = PLL_MODE_DISABLED,
+};
+
+/* Values of DPLL_CTRL_n.DPLL_MANU_REF_CFG.MANUAL_REFERENCE */
+enum manual_reference {
+ MANU_REF_MIN = 0,
+ MANU_REF_CLK0 = MANU_REF_MIN,
+ MANU_REF_CLK1,
+ MANU_REF_CLK2,
+ MANU_REF_CLK3,
+ MANU_REF_CLK4,
+ MANU_REF_CLK5,
+ MANU_REF_CLK6,
+ MANU_REF_CLK7,
+ MANU_REF_CLK8,
+ MANU_REF_CLK9,
+ MANU_REF_CLK10,
+ MANU_REF_CLK11,
+ MANU_REF_CLK12,
+ MANU_REF_CLK13,
+ MANU_REF_CLK14,
+ MANU_REF_CLK15,
+ MANU_REF_WRITE_PHASE,
+ MANU_REF_WRITE_FREQUENCY,
+ MANU_REF_XO_DPLL,
+ MANU_REF_MAX = MANU_REF_XO_DPLL,
+};
+
+enum hw_tod_write_trig_sel {
+ HW_TOD_WR_TRIG_SEL_MIN = 0,
+ HW_TOD_WR_TRIG_SEL_MSB = HW_TOD_WR_TRIG_SEL_MIN,
+ HW_TOD_WR_TRIG_SEL_RESERVED = 1,
+ HW_TOD_WR_TRIG_SEL_TOD_PPS = 2,
+ HW_TOD_WR_TRIG_SEL_IRIGB_PPS = 3,
+ HW_TOD_WR_TRIG_SEL_PWM_PPS = 4,
+ HW_TOD_WR_TRIG_SEL_GPIO = 5,
+ HW_TOD_WR_TRIG_SEL_FOD_SYNC = 6,
+ WR_TRIG_SEL_MAX = HW_TOD_WR_TRIG_SEL_FOD_SYNC,
+};
+
+enum scsr_read_trig_sel {
+ /* CANCEL CURRENT TOD READ; MODULE BECOMES IDLE - NO TRIGGER OCCURS */
+ SCSR_TOD_READ_TRIG_SEL_DISABLE = 0,
+ /* TRIGGER IMMEDIATELY */
+ SCSR_TOD_READ_TRIG_SEL_IMMEDIATE = 1,
+ /* TRIGGER ON RISING EDGE OF INTERNAL TOD PPS SIGNAL */
+ SCSR_TOD_READ_TRIG_SEL_TODPPS = 2,
+ /* TRGGER ON RISING EDGE OF SELECTED REFERENCE INPUT */
+ SCSR_TOD_READ_TRIG_SEL_REFCLK = 3,
+ /* TRIGGER ON RISING EDGE OF SELECTED PWM DECODER 1PPS OUTPUT */
+ SCSR_TOD_READ_TRIG_SEL_PWMPPS = 4,
+ SCSR_TOD_READ_TRIG_SEL_RESERVED = 5,
+ /* TRIGGER WHEN WRITE FREQUENCY EVENT OCCURS */
+ SCSR_TOD_READ_TRIG_SEL_WRITEFREQUENCYEVENT = 6,
+ /* TRIGGER ON SELECTED GPIO */
+ SCSR_TOD_READ_TRIG_SEL_GPIO = 7,
+ SCSR_TOD_READ_TRIG_SEL_MAX = SCSR_TOD_READ_TRIG_SEL_GPIO,
+};
+
+/* Values STATUS.DPLL_SYS_STATUS.DPLL_SYS_STATE */
+enum dpll_state {
+ DPLL_STATE_MIN = 0,
+ DPLL_STATE_FREERUN = DPLL_STATE_MIN,
+ DPLL_STATE_LOCKACQ = 1,
+ DPLL_STATE_LOCKREC = 2,
+ DPLL_STATE_LOCKED = 3,
+ DPLL_STATE_HOLDOVER = 4,
+ DPLL_STATE_OPEN_LOOP = 5,
+ DPLL_STATE_MAX = DPLL_STATE_OPEN_LOOP,
+};
+
+/* 4.8.7 only */
+enum scsr_tod_write_trig_sel {
+ SCSR_TOD_WR_TRIG_SEL_DISABLE = 0,
+ SCSR_TOD_WR_TRIG_SEL_IMMEDIATE = 1,
+ SCSR_TOD_WR_TRIG_SEL_REFCLK = 2,
+ SCSR_TOD_WR_TRIG_SEL_PWMPPS = 3,
+ SCSR_TOD_WR_TRIG_SEL_TODPPS = 4,
+ SCSR_TOD_WR_TRIG_SEL_SYNCFOD = 5,
+ SCSR_TOD_WR_TRIG_SEL_GPIO = 6,
+ SCSR_TOD_WR_TRIG_SEL_MAX = SCSR_TOD_WR_TRIG_SEL_GPIO,
+};
+
+/* 4.8.7 only */
+enum scsr_tod_write_type_sel {
+ SCSR_TOD_WR_TYPE_SEL_ABSOLUTE = 0,
+ SCSR_TOD_WR_TYPE_SEL_DELTA_PLUS = 1,
+ SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS = 2,
+ SCSR_TOD_WR_TYPE_SEL_MAX = SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS,
+};
+#endif
diff --git a/include/linux/mfd/idtRC38xxx_reg.h b/include/linux/mfd/idtRC38xxx_reg.h
new file mode 100644
index 000000000000..ec11872f51ad
--- /dev/null
+++ b/include/linux/mfd/idtRC38xxx_reg.h
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Register Map - Based on PolarBear_CSRs.RevA.xlsx (2023-04-21)
+ *
+ * Copyright (C) 2023 Integrated Device Technology, Inc., a Renesas Company.
+ */
+#ifndef MFD_IDTRC38XXX_REG
+#define MFD_IDTRC38XXX_REG
+
+/* GLOBAL */
+#define SOFT_RESET_CTRL (0x15) /* Specific to FC3W */
+#define MISC_CTRL (0x14) /* Specific to FC3A */
+#define APLL_REINIT BIT(1)
+#define APLL_REINIT_VFC3A BIT(2)
+
+#define DEVICE_ID (0x2)
+#define DEVICE_ID_MASK (0x1000) /* Bit 12 is 1 if FC3W and 0 if FC3A */
+#define DEVICE_ID_SHIFT (12)
+
+/* FOD */
+#define FOD_0 (0x300)
+#define FOD_0_VFC3A (0x400)
+#define FOD_1 (0x340)
+#define FOD_1_VFC3A (0x440)
+#define FOD_2 (0x380)
+#define FOD_2_VFC3A (0x480)
+
+/* TDCAPLL */
+#define TDC_CTRL (0x44a) /* Specific to FC3W */
+#define TDC_ENABLE_CTRL (0x169) /* Specific to FC3A */
+#define TDC_DAC_CAL_CTRL (0x16a) /* Specific to FC3A */
+#define TDC_EN BIT(0)
+#define TDC_DAC_RECAL_REQ BIT(1)
+#define TDC_DAC_RECAL_REQ_VFC3A BIT(0)
+
+#define TDC_FB_DIV_INT_CNFG (0x442)
+#define TDC_FB_DIV_INT_CNFG_VFC3A (0x162)
+#define TDC_FB_DIV_INT_MASK GENMASK(7, 0)
+#define TDC_REF_DIV_CNFG (0x443)
+#define TDC_REF_DIV_CNFG_VFC3A (0x163)
+#define TDC_REF_DIV_CONFIG_MASK GENMASK(2, 0)
+
+/* TIME SYNC CHANNEL */
+#define TIME_CLOCK_SRC (0xa01) /* Specific to FC3W */
+#define TIME_CLOCK_COUNT (0xa00) /* Specific to FC3W */
+#define TIME_CLOCK_COUNT_MASK GENMASK(5, 0)
+
+#define SUB_SYNC_GEN_CNFG (0xa04)
+
+#define TOD_COUNTER_READ_REQ (0xa5f)
+#define TOD_COUNTER_READ_REQ_VFC3A (0x6df)
+#define TOD_SYNC_LOAD_VAL_CTRL (0xa10)
+#define TOD_SYNC_LOAD_VAL_CTRL_VFC3A (0x690)
+#define SYNC_COUNTER_MASK GENMASK_ULL(51, 0)
+#define SUB_SYNC_COUNTER_MASK GENMASK(30, 0)
+#define TOD_SYNC_LOAD_REQ_CTRL (0xa21)
+#define TOD_SYNC_LOAD_REQ_CTRL_VFC3A (0x6a1)
+#define SYNC_LOAD_ENABLE BIT(1)
+#define SUB_SYNC_LOAD_ENABLE BIT(0)
+#define SYNC_LOAD_REQ BIT(0)
+
+#define LPF_MODE_CNFG (0xa80)
+#define LPF_MODE_CNFG_VFC3A (0x700)
+enum lpf_mode {
+ LPF_DISABLED = 0,
+ LPF_WP = 1,
+ LPF_HOLDOVER = 2,
+ LPF_WF = 3,
+ LPF_INVALID = 4
+};
+#define LPF_CTRL (0xa98)
+#define LPF_CTRL_VFC3A (0x718)
+#define LPF_EN BIT(0)
+
+#define LPF_BW_CNFG (0xa81)
+#define LPF_BW_SHIFT GENMASK(7, 3)
+#define LPF_BW_MULT GENMASK(2, 0)
+#define LPF_BW_SHIFT_DEFAULT (0xb)
+#define LPF_BW_MULT_DEFAULT (0x0)
+#define LPF_BW_SHIFT_1PPS (0x5)
+
+#define LPF_WR_PHASE_CTRL (0xaa8)
+#define LPF_WR_PHASE_CTRL_VFC3A (0x728)
+#define LPF_WR_FREQ_CTRL (0xab0)
+#define LPF_WR_FREQ_CTRL_VFC3A (0x730)
+
+#define TIME_CLOCK_TDC_FANOUT_CNFG (0xB00)
+#define TIME_SYNC_TO_TDC_EN BIT(0)
+#define SIG1_MUX_SEL_MASK GENMASK(7, 4)
+#define SIG2_MUX_SEL_MASK GENMASK(11, 8)
+enum tdc_mux_sel {
+ REF0 = 0,
+ REF1 = 1,
+ REF2 = 2,
+ REF3 = 3,
+ REF_CLK5 = 4,
+ REF_CLK6 = 5,
+ DPLL_FB_TO_TDC = 6,
+ DPLL_FB_DIVIDED_TO_TDC = 7,
+ TIME_CLK_DIVIDED = 8,
+ TIME_SYNC = 9,
+};
+
+#define TIME_CLOCK_MEAS_CNFG (0xB04)
+#define TDC_MEAS_MODE BIT(0)
+enum tdc_meas_mode {
+ CONTINUOUS = 0,
+ ONE_SHOT = 1,
+ MEAS_MODE_INVALID = 2,
+};
+
+#define TIME_CLOCK_MEAS_DIV_CNFG (0xB08)
+#define TIME_REF_DIV_MASK GENMASK(29, 24)
+
+#define TIME_CLOCK_MEAS_CTRL (0xB10)
+#define TDC_MEAS_EN BIT(0)
+#define TDC_MEAS_START BIT(1)
+
+#define TDC_FIFO_READ_REQ (0xB2F)
+#define TDC_FIFO_READ (0xB30)
+#define COARSE_MEAS_MASK GENMASK_ULL(39, 13)
+#define FINE_MEAS_MASK GENMASK(12, 0)
+
+#define TDC_FIFO_CTRL (0xB12)
+#define FIFO_CLEAR BIT(0)
+#define TDC_FIFO_STS (0xB38)
+#define FIFO_FULL BIT(1)
+#define FIFO_EMPTY BIT(0)
+#define TDC_FIFO_EVENT (0xB39)
+#define FIFO_OVERRUN BIT(1)
+
+/* DPLL */
+#define MAX_REFERENCE_INDEX (3)
+#define MAX_NUM_REF_PRIORITY (4)
+
+#define MAX_DPLL_INDEX (2)
+
+#define DPLL_STS (0x580)
+#define DPLL_STS_VFC3A (0x571)
+#define DPLL_STATE_STS_MASK (0x70)
+#define DPLL_STATE_STS_SHIFT (4)
+#define DPLL_REF_SEL_STS_MASK (0x6)
+#define DPLL_REF_SEL_STS_SHIFT (1)
+
+#define DPLL_REF_PRIORITY_CNFG (0x502)
+#define DPLL_REFX_PRIORITY_DISABLE_MASK (0xf)
+#define DPLL_REF0_PRIORITY_ENABLE_AND_SET_MASK (0x31)
+#define DPLL_REF1_PRIORITY_ENABLE_AND_SET_MASK (0xc2)
+#define DPLL_REF2_PRIORITY_ENABLE_AND_SET_MASK (0x304)
+#define DPLL_REF3_PRIORITY_ENABLE_AND_SET_MASK (0xc08)
+#define DPLL_REF0_PRIORITY_SHIFT (4)
+#define DPLL_REF1_PRIORITY_SHIFT (6)
+#define DPLL_REF2_PRIORITY_SHIFT (8)
+#define DPLL_REF3_PRIORITY_SHIFT (10)
+
+enum dpll_state {
+ DPLL_STATE_MIN = 0,
+ DPLL_STATE_FREERUN = DPLL_STATE_MIN,
+ DPLL_STATE_LOCKED = 1,
+ DPLL_STATE_HOLDOVER = 2,
+ DPLL_STATE_WRITE_FREQUENCY = 3,
+ DPLL_STATE_ACQUIRE = 4,
+ DPLL_STATE_HITLESS_SWITCH = 5,
+ DPLL_STATE_MAX = DPLL_STATE_HITLESS_SWITCH
+};
+
+/* REFMON */
+#define LOSMON_STS_0 (0x81e)
+#define LOSMON_STS_0_VFC3A (0x18e)
+#define LOSMON_STS_1 (0x82e)
+#define LOSMON_STS_1_VFC3A (0x19e)
+#define LOSMON_STS_2 (0x83e)
+#define LOSMON_STS_2_VFC3A (0x1ae)
+#define LOSMON_STS_3 (0x84e)
+#define LOSMON_STS_3_VFC3A (0x1be)
+#define LOS_STS_MASK (0x1)
+
+#define FREQMON_STS_0 (0x874)
+#define FREQMON_STS_0_VFC3A (0x1d4)
+#define FREQMON_STS_1 (0x894)
+#define FREQMON_STS_1_VFC3A (0x1f4)
+#define FREQMON_STS_2 (0x8b4)
+#define FREQMON_STS_2_VFC3A (0x214)
+#define FREQMON_STS_3 (0x8d4)
+#define FREQMON_STS_3_VFC3A (0x234)
+#define FREQ_FAIL_STS_SHIFT (31)
+
+/* Firmware interface */
+#define TIME_CLK_FREQ_ADDR (0xffa0)
+#define XTAL_FREQ_ADDR (0xffa1)
+
+/*
+ * Return register address and field mask based on passed in firmware version
+ */
+#define IDTFC3_FW_REG(FW, VER, REG) (((FW) < (VER)) ? (REG) : (REG##_##VER))
+#define IDTFC3_FW_FIELD(FW, VER, FIELD) (((FW) < (VER)) ? (FIELD) : (FIELD##_##VER))
+enum fw_version {
+ V_DEFAULT = 0,
+ VFC3W = 1,
+ VFC3A = 2
+};
+
+/* XTAL_FREQ_ADDR/TIME_CLK_FREQ_ADDR */
+enum {
+ FREQ_MIN = 0,
+ FREQ_25M = 1,
+ FREQ_49_152M = 2,
+ FREQ_50M = 3,
+ FREQ_100M = 4,
+ FREQ_125M = 5,
+ FREQ_250M = 6,
+ FREQ_MAX
+};
+
+struct idtfc3_hw_param {
+ u32 xtal_freq;
+ u32 time_clk_freq;
+};
+
+struct idtfc3_fwrc {
+ u8 hiaddr;
+ u8 loaddr;
+ u8 value;
+ u8 reserved;
+} __packed;
+
+static inline void idtfc3_default_hw_param(struct idtfc3_hw_param *hw_param)
+{
+ hw_param->xtal_freq = 49152000;
+ hw_param->time_clk_freq = 25000000;
+}
+
+static inline int idtfc3_set_hw_param(struct idtfc3_hw_param *hw_param,
+ u16 addr, u8 val)
+{
+ if (addr == XTAL_FREQ_ADDR)
+ switch (val) {
+ case FREQ_49_152M:
+ hw_param->xtal_freq = 49152000;
+ break;
+ case FREQ_50M:
+ hw_param->xtal_freq = 50000000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ else if (addr == TIME_CLK_FREQ_ADDR)
+ switch (val) {
+ case FREQ_25M:
+ hw_param->time_clk_freq = 25000000;
+ break;
+ case FREQ_50M:
+ hw_param->time_clk_freq = 50000000;
+ break;
+ case FREQ_100M:
+ hw_param->time_clk_freq = 100000000;
+ break;
+ case FREQ_125M:
+ hw_param->time_clk_freq = 125000000;
+ break;
+ case FREQ_250M:
+ hw_param->time_clk_freq = 250000000;
+ break;
+ default:
+ return -EINVAL;
+ }
+ else
+ return -EFAULT;
+
+ return 0;
+}
+
+#endif
diff --git a/include/linux/mfd/intel-m10-bmc.h b/include/linux/mfd/intel-m10-bmc.h
index f0044b14136e..988f1cd90032 100644
--- a/include/linux/mfd/intel-m10-bmc.h
+++ b/include/linux/mfd/intel-m10-bmc.h
@@ -7,40 +7,49 @@
#ifndef __MFD_INTEL_M10_BMC_H
#define __MFD_INTEL_M10_BMC_H
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/dev_printk.h>
#include <linux/regmap.h>
+#include <linux/rwsem.h>
-#define M10BMC_LEGACY_BUILD_VER 0x300468
-#define M10BMC_SYS_BASE 0x300800
-#define M10BMC_SYS_END 0x300fff
-#define M10BMC_FLASH_BASE 0x10000000
-#define M10BMC_FLASH_END 0x1fffffff
-#define M10BMC_MEM_END M10BMC_FLASH_END
+#define M10BMC_N3000_LEGACY_BUILD_VER 0x300468
+#define M10BMC_N3000_SYS_BASE 0x300800
+#define M10BMC_N3000_SYS_END 0x300fff
+#define M10BMC_N3000_FLASH_BASE 0x10000000
+#define M10BMC_N3000_FLASH_END 0x1fffffff
+#define M10BMC_N3000_MEM_END M10BMC_N3000_FLASH_END
#define M10BMC_STAGING_BASE 0x18000000
#define M10BMC_STAGING_SIZE 0x3800000
/* Register offset of system registers */
-#define NIOS2_FW_VERSION 0x0
-#define M10BMC_MAC_LOW 0x10
-#define M10BMC_MAC_BYTE4 GENMASK(7, 0)
-#define M10BMC_MAC_BYTE3 GENMASK(15, 8)
-#define M10BMC_MAC_BYTE2 GENMASK(23, 16)
-#define M10BMC_MAC_BYTE1 GENMASK(31, 24)
-#define M10BMC_MAC_HIGH 0x14
-#define M10BMC_MAC_BYTE6 GENMASK(7, 0)
-#define M10BMC_MAC_BYTE5 GENMASK(15, 8)
-#define M10BMC_MAC_COUNT GENMASK(23, 16)
-#define M10BMC_TEST_REG 0x3c
-#define M10BMC_BUILD_VER 0x68
-#define M10BMC_VER_MAJOR_MSK GENMASK(23, 16)
-#define M10BMC_VER_PCB_INFO_MSK GENMASK(31, 24)
-#define M10BMC_VER_LEGACY_INVALID 0xffffffff
+#define NIOS2_N3000_FW_VERSION 0x0
+#define M10BMC_N3000_MAC_LOW 0x10
+#define M10BMC_N3000_MAC_BYTE4 GENMASK(7, 0)
+#define M10BMC_N3000_MAC_BYTE3 GENMASK(15, 8)
+#define M10BMC_N3000_MAC_BYTE2 GENMASK(23, 16)
+#define M10BMC_N3000_MAC_BYTE1 GENMASK(31, 24)
+#define M10BMC_N3000_MAC_HIGH 0x14
+#define M10BMC_N3000_MAC_BYTE6 GENMASK(7, 0)
+#define M10BMC_N3000_MAC_BYTE5 GENMASK(15, 8)
+#define M10BMC_N3000_MAC_COUNT GENMASK(23, 16)
+#define M10BMC_N3000_TEST_REG 0x3c
+#define M10BMC_N3000_BUILD_VER 0x68
+#define M10BMC_N3000_VER_MAJOR_MSK GENMASK(23, 16)
+#define M10BMC_N3000_VER_PCB_INFO_MSK GENMASK(31, 24)
+#define M10BMC_N3000_VER_LEGACY_INVALID 0xffffffff
+
+/* Telemetry registers */
+#define M10BMC_N3000_TELEM_START 0x100
+#define M10BMC_N3000_TELEM_END 0x250
+#define M10BMC_D5005_TELEM_END 0x300
/* Secure update doorbell register, in system register region */
-#define M10BMC_DOORBELL 0x400
+#define M10BMC_N3000_DOORBELL 0x400
/* Authorization Result register, in system register region */
-#define M10BMC_AUTH_RESULT 0x404
+#define M10BMC_N3000_AUTH_RESULT 0x404
/* Doorbell register fields */
#define DRBL_RSU_REQUEST BIT(0)
@@ -88,7 +97,6 @@
#define HOST_STATUS_ABORT_RSU 0x2
#define rsu_prog(doorbell) FIELD_GET(DRBL_RSU_PROGRESS, doorbell)
-#define rsu_stat(doorbell) FIELD_GET(DRBL_RSU_STATUS, doorbell)
/* interval 100ms and timeout 5s */
#define NIOS_HANDSHAKE_INTERVAL_US (100 * 1000)
@@ -103,29 +111,162 @@
#define RSU_COMPLETE_TIMEOUT_MS (40 * 60 * 1000)
/* Addresses for security related data in FLASH */
-#define BMC_REH_ADDR 0x17ffc004
-#define BMC_PROG_ADDR 0x17ffc000
-#define BMC_PROG_MAGIC 0x5746
+#define M10BMC_N3000_BMC_REH_ADDR 0x17ffc004
+#define M10BMC_N3000_BMC_PROG_ADDR 0x17ffc000
+#define M10BMC_N3000_BMC_PROG_MAGIC 0x5746
-#define SR_REH_ADDR 0x17ffd004
-#define SR_PROG_ADDR 0x17ffd000
-#define SR_PROG_MAGIC 0x5253
+#define M10BMC_N3000_SR_REH_ADDR 0x17ffd004
+#define M10BMC_N3000_SR_PROG_ADDR 0x17ffd000
+#define M10BMC_N3000_SR_PROG_MAGIC 0x5253
-#define PR_REH_ADDR 0x17ffe004
-#define PR_PROG_ADDR 0x17ffe000
-#define PR_PROG_MAGIC 0x5250
+#define M10BMC_N3000_PR_REH_ADDR 0x17ffe004
+#define M10BMC_N3000_PR_PROG_ADDR 0x17ffe000
+#define M10BMC_N3000_PR_PROG_MAGIC 0x5250
/* Address of 4KB inverted bit vector containing staging area FLASH count */
-#define STAGING_FLASH_COUNT 0x17ffb000
+#define M10BMC_N3000_STAGING_FLASH_COUNT 0x17ffb000
+
+#define M10BMC_N6000_INDIRECT_BASE 0x400
+
+#define M10BMC_N6000_SYS_BASE 0x0
+#define M10BMC_N6000_SYS_END 0xfff
+
+#define M10BMC_N6000_DOORBELL 0x1c0
+#define M10BMC_N6000_AUTH_RESULT 0x1c4
+#define AUTH_RESULT_RSU_STATUS GENMASK(23, 16)
+
+#define M10BMC_N6000_BUILD_VER 0x0
+#define NIOS2_N6000_FW_VERSION 0x4
+#define M10BMC_N6000_MAC_LOW 0x20
+#define M10BMC_N6000_MAC_HIGH (M10BMC_N6000_MAC_LOW + 4)
+
+/* Addresses for security related data in FLASH */
+#define M10BMC_N6000_BMC_REH_ADDR 0x7ffc004
+#define M10BMC_N6000_BMC_PROG_ADDR 0x7ffc000
+#define M10BMC_N6000_BMC_PROG_MAGIC 0x5746
+
+#define M10BMC_N6000_SR_REH_ADDR 0x7ffd004
+#define M10BMC_N6000_SR_PROG_ADDR 0x7ffd000
+#define M10BMC_N6000_SR_PROG_MAGIC 0x5253
+
+#define M10BMC_N6000_PR_REH_ADDR 0x7ffe004
+#define M10BMC_N6000_PR_PROG_ADDR 0x7ffe000
+#define M10BMC_N6000_PR_PROG_MAGIC 0x5250
+
+#define M10BMC_N6000_STAGING_FLASH_COUNT 0x7ff5000
+
+#define M10BMC_N6000_FLASH_MUX_CTRL 0x1d0
+#define M10BMC_N6000_FLASH_MUX_SELECTION GENMASK(2, 0)
+#define M10BMC_N6000_FLASH_MUX_IDLE 0
+#define M10BMC_N6000_FLASH_MUX_NIOS 1
+#define M10BMC_N6000_FLASH_MUX_HOST 2
+#define M10BMC_N6000_FLASH_MUX_PFL 4
+#define get_flash_mux(mux) FIELD_GET(M10BMC_N6000_FLASH_MUX_SELECTION, mux)
+
+#define M10BMC_N6000_FLASH_NIOS_REQUEST BIT(4)
+#define M10BMC_N6000_FLASH_HOST_REQUEST BIT(5)
+
+#define M10BMC_N6000_FLASH_CTRL 0x40
+#define M10BMC_N6000_FLASH_WR_MODE BIT(0)
+#define M10BMC_N6000_FLASH_RD_MODE BIT(1)
+#define M10BMC_N6000_FLASH_BUSY BIT(2)
+#define M10BMC_N6000_FLASH_FIFO_SPACE GENMASK(13, 4)
+#define M10BMC_N6000_FLASH_READ_COUNT GENMASK(25, 16)
+
+#define M10BMC_N6000_FLASH_ADDR 0x44
+#define M10BMC_N6000_FLASH_FIFO 0x800
+#define M10BMC_N6000_READ_BLOCK_SIZE 0x800
+#define M10BMC_N6000_FIFO_MAX_BYTES 0x800
+#define M10BMC_N6000_FIFO_WORD_SIZE 4
+#define M10BMC_N6000_FIFO_MAX_WORDS (M10BMC_N6000_FIFO_MAX_BYTES / \
+ M10BMC_N6000_FIFO_WORD_SIZE)
+
+#define M10BMC_FLASH_INT_US 1
+#define M10BMC_FLASH_TIMEOUT_US 10000
+
+/**
+ * struct m10bmc_csr_map - Intel MAX 10 BMC CSR register map
+ */
+struct m10bmc_csr_map {
+ unsigned int base;
+ unsigned int build_version;
+ unsigned int fw_version;
+ unsigned int mac_low;
+ unsigned int mac_high;
+ unsigned int doorbell;
+ unsigned int auth_result;
+ unsigned int bmc_prog_addr;
+ unsigned int bmc_reh_addr;
+ unsigned int bmc_magic;
+ unsigned int sr_prog_addr;
+ unsigned int sr_reh_addr;
+ unsigned int sr_magic;
+ unsigned int pr_prog_addr;
+ unsigned int pr_reh_addr;
+ unsigned int pr_magic;
+ unsigned int rsu_update_counter;
+ unsigned int staging_size;
+};
+
+/**
+ * struct intel_m10bmc_platform_info - Intel MAX 10 BMC platform specific information
+ * @cells: MFD cells
+ * @n_cells: MFD cells ARRAY_SIZE()
+ * @handshake_sys_reg_ranges: array of register ranges for fw handshake regs
+ * @handshake_sys_reg_nranges: number of register ranges for fw handshake regs
+ * @csr_map: the mappings for register definition of MAX10 BMC
+ */
+struct intel_m10bmc_platform_info {
+ struct mfd_cell *cells;
+ int n_cells;
+ const struct regmap_range *handshake_sys_reg_ranges;
+ unsigned int handshake_sys_reg_nranges;
+ const struct m10bmc_csr_map *csr_map;
+};
+
+struct intel_m10bmc;
+
+/**
+ * struct intel_m10bmc_flash_bulk_ops - device specific operations for flash R/W
+ * @read: read a block of data from flash
+ * @write: write a block of data to flash
+ * @lock_write: locks flash access for erase+write
+ * @unlock_write: unlock flash access
+ *
+ * Write must be protected with @lock_write and @unlock_write. While the flash
+ * is locked, @read returns -EBUSY.
+ */
+struct intel_m10bmc_flash_bulk_ops {
+ int (*read)(struct intel_m10bmc *m10bmc, u8 *buf, u32 addr, u32 size);
+ int (*write)(struct intel_m10bmc *m10bmc, const u8 *buf, u32 offset, u32 size);
+ int (*lock_write)(struct intel_m10bmc *m10bmc);
+ void (*unlock_write)(struct intel_m10bmc *m10bmc);
+};
+
+enum m10bmc_fw_state {
+ M10BMC_FW_STATE_NORMAL,
+ M10BMC_FW_STATE_SEC_UPDATE_PREPARE,
+ M10BMC_FW_STATE_SEC_UPDATE_WRITE,
+ M10BMC_FW_STATE_SEC_UPDATE_PROGRAM,
+};
/**
* struct intel_m10bmc - Intel MAX 10 BMC parent driver data structure
* @dev: this device
* @regmap: the regmap used to access registers by m10bmc itself
+ * @info: the platform information for MAX10 BMC
+ * @flash_bulk_ops: optional device specific operations for flash R/W
+ * @bmcfw_lock: read/write semaphore to BMC firmware running state
+ * @bmcfw_state: BMC firmware running state. Available only when
+ * handshake_sys_reg_nranges > 0.
*/
struct intel_m10bmc {
struct device *dev;
struct regmap *regmap;
+ const struct intel_m10bmc_platform_info *info;
+ const struct intel_m10bmc_flash_bulk_ops *flash_bulk_ops;
+ struct rw_semaphore bmcfw_lock; /* Protects bmcfw_state */
+ enum m10bmc_fw_state bmcfw_state;
};
/*
@@ -133,6 +274,7 @@ struct intel_m10bmc {
*
* m10bmc_raw_read - read m10bmc register per addr
* m10bmc_sys_read - read m10bmc system register per offset
+ * m10bmc_sys_update_bits - update m10bmc system register per offset
*/
static inline int
m10bmc_raw_read(struct intel_m10bmc *m10bmc, unsigned int addr,
@@ -148,15 +290,20 @@ m10bmc_raw_read(struct intel_m10bmc *m10bmc, unsigned int addr,
return ret;
}
+int m10bmc_sys_read(struct intel_m10bmc *m10bmc, unsigned int offset, unsigned int *val);
+int m10bmc_sys_update_bits(struct intel_m10bmc *m10bmc, unsigned int offset,
+ unsigned int msk, unsigned int val);
+
/*
- * The base of the system registers could be configured by HW developers, and
- * in HW SPEC, the base is not added to the addresses of the system registers.
- *
- * This macro helps to simplify the accessing of the system registers. And if
- * the base is reconfigured in HW, SW developers could simply change the
- * M10BMC_SYS_BASE accordingly.
+ * Track the state of the firmware, as it is not available for register
+ * handshakes during secure updates on some MAX 10 cards.
+ */
+void m10bmc_fw_state_set(struct intel_m10bmc *m10bmc, enum m10bmc_fw_state new_state);
+
+/*
+ * MAX10 BMC Core support
*/
-#define m10bmc_sys_read(m10bmc, offset, val) \
- m10bmc_raw_read(m10bmc, M10BMC_SYS_BASE + (offset), val)
+int m10bmc_dev_init(struct intel_m10bmc *m10bmc, const struct intel_m10bmc_platform_info *info);
+extern const struct attribute_group *m10bmc_dev_groups[];
#endif /* __MFD_INTEL_M10_BMC_H */
diff --git a/include/linux/mfd/intel_soc_pmic.h b/include/linux/mfd/intel_soc_pmic.h
index 6a88e34cb955..9ba2c1a8d836 100644
--- a/include/linux/mfd/intel_soc_pmic.h
+++ b/include/linux/mfd/intel_soc_pmic.h
@@ -13,6 +13,14 @@
#include <linux/regmap.h>
+enum intel_cht_wc_models {
+ INTEL_CHT_WC_UNKNOWN,
+ INTEL_CHT_WC_GPD_WIN_POCKET,
+ INTEL_CHT_WC_XIAOMI_MIPAD2,
+ INTEL_CHT_WC_LENOVO_YOGABOOK1,
+ INTEL_CHT_WC_LENOVO_YT3_X90,
+};
+
/**
* struct intel_soc_pmic - Intel SoC PMIC data
* @irq: Master interrupt number of the parent PMIC device
@@ -39,6 +47,7 @@ struct intel_soc_pmic {
struct regmap_irq_chip_data *irq_chip_data_crit;
struct device *dev;
struct intel_scu_ipc_dev *scu;
+ enum intel_cht_wc_models cht_wc_model;
};
int intel_soc_pmic_exec_mipi_pmic_seq_element(u16 i2c_address, u32 reg_address,
diff --git a/include/linux/mfd/ipaq-micro.h b/include/linux/mfd/ipaq-micro.h
index ee48a4321c57..d5caa4c86ecc 100644
--- a/include/linux/mfd/ipaq-micro.h
+++ b/include/linux/mfd/ipaq-micro.h
@@ -75,8 +75,8 @@ struct ipaq_micro_rxdev {
* @id: 4-bit ID of the message
* @tx_len: length of TX data
* @tx_data: TX data to send
- * @rx_len: length of receieved RX data
- * @rx_data: RX data to recieve
+ * @rx_len: length of received RX data
+ * @rx_data: RX data to receive
* @ack: a completion that will be completed when RX is complete
* @node: list node if message gets queued
*/
diff --git a/include/linux/mfd/iqs62x.h b/include/linux/mfd/iqs62x.h
index 5ced55eae11b..ffc86010af74 100644
--- a/include/linux/mfd/iqs62x.h
+++ b/include/linux/mfd/iqs62x.h
@@ -14,6 +14,11 @@
#define IQS624_PROD_NUM 0x43
#define IQS625_PROD_NUM 0x4E
+#define IQS620_HW_NUM_V0 0x82
+#define IQS620_HW_NUM_V1 IQS620_HW_NUM_V0
+#define IQS620_HW_NUM_V2 IQS620_HW_NUM_V0
+#define IQS620_HW_NUM_V3 0x92
+
#define IQS621_ALS_FLAGS 0x16
#define IQS622_ALS_FLAGS 0x14
@@ -129,6 +134,8 @@ struct iqs62x_core {
struct completion fw_done;
enum iqs62x_ui_sel ui_sel;
unsigned long event_cache;
+ u8 sw_num;
+ u8 hw_num;
};
extern const struct iqs62x_event_desc iqs62x_events[IQS62X_NUM_EVENTS];
diff --git a/include/linux/mfd/lm3533.h b/include/linux/mfd/lm3533.h
index 77092f6363ad..69059a7a2ce5 100644
--- a/include/linux/mfd/lm3533.h
+++ b/include/linux/mfd/lm3533.h
@@ -16,6 +16,7 @@
DEVICE_ATTR(_name, S_IRUGO | S_IWUSR , show_##_name, store_##_name)
struct device;
+struct gpio_desc;
struct regmap;
struct lm3533 {
@@ -23,7 +24,7 @@ struct lm3533 {
struct regmap *regmap;
- int gpio_hwen;
+ struct gpio_desc *hwen;
int irq;
unsigned have_als:1;
@@ -69,8 +70,6 @@ enum lm3533_boost_ovp {
};
struct lm3533_platform_data {
- int gpio_hwen;
-
enum lm3533_boost_ovp boost_ovp;
enum lm3533_boost_freq boost_freq;
diff --git a/include/linux/mfd/loongson-se.h b/include/linux/mfd/loongson-se.h
new file mode 100644
index 000000000000..07afa0c2524d
--- /dev/null
+++ b/include/linux/mfd/loongson-se.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (C) 2025 Loongson Technology Corporation Limited */
+
+#ifndef __MFD_LOONGSON_SE_H__
+#define __MFD_LOONGSON_SE_H__
+
+#define LOONGSON_ENGINE_CMD_TIMEOUT_US 10000
+#define SE_SEND_CMD_REG 0x0
+#define SE_SEND_CMD_REG_LEN 0x8
+/* Controller command ID */
+#define SE_CMD_START 0x0
+#define SE_CMD_SET_DMA 0x3
+#define SE_CMD_SET_ENGINE_CMDBUF 0x4
+
+#define SE_S2LINT_STAT 0x88
+#define SE_S2LINT_EN 0x8c
+#define SE_S2LINT_CL 0x94
+#define SE_L2SINT_STAT 0x98
+#define SE_L2SINT_SET 0xa0
+
+#define SE_INT_ALL 0xffffffff
+#define SE_INT_CONTROLLER BIT(0)
+
+#define SE_ENGINE_MAX 16
+#define SE_ENGINE_RNG 1
+#define SE_CMD_RNG 0x100
+
+#define SE_ENGINE_TPM 5
+#define SE_CMD_TPM 0x500
+
+#define SE_ENGINE_CMD_SIZE 32
+
+struct loongson_se_engine {
+ struct loongson_se *se;
+ int id;
+
+ /* Command buffer */
+ void *command;
+ void *command_ret;
+
+ void *data_buffer;
+ uint buffer_size;
+ /* Data buffer offset to DMA base */
+ uint buffer_off;
+
+ struct completion completion;
+
+};
+
+struct loongson_se_engine *loongson_se_init_engine(struct device *dev, int id);
+int loongson_se_send_engine_cmd(struct loongson_se_engine *engine);
+
+#endif
diff --git a/include/linux/mfd/lp3943.h b/include/linux/mfd/lp3943.h
index 020a339f96e8..402f01078fcc 100644
--- a/include/linux/mfd/lp3943.h
+++ b/include/linux/mfd/lp3943.h
@@ -11,7 +11,6 @@
#define __MFD_LP3943_H__
#include <linux/gpio.h>
-#include <linux/pwm.h>
#include <linux/regmap.h>
/* Registers */
diff --git a/include/linux/mfd/lp873x.h b/include/linux/mfd/lp873x.h
index 5546688c7da7..fe8174cc8637 100644
--- a/include/linux/mfd/lp873x.h
+++ b/include/linux/mfd/lp873x.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Functions to access LP873X power management chip.
*
* Copyright (C) 2016 Texas Instruments Incorporated - https://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __LINUX_MFD_LP873X_H
diff --git a/include/linux/mfd/lp87565.h b/include/linux/mfd/lp87565.h
index 5640e6088fe6..4c895072d91b 100644
--- a/include/linux/mfd/lp87565.h
+++ b/include/linux/mfd/lp87565.h
@@ -222,31 +222,20 @@ enum lp87565_device_type {
#define LP87565_GPIO2_SEL BIT(1)
#define LP87565_GPIO1_SEL BIT(0)
-#define LP87565_GOIO3_OD BIT(6)
-#define LP87565_GOIO2_OD BIT(5)
-#define LP87565_GOIO1_OD BIT(4)
-#define LP87565_GOIO3_DIR BIT(2)
-#define LP87565_GOIO2_DIR BIT(1)
-#define LP87565_GOIO1_DIR BIT(0)
-
-#define LP87565_GOIO3_IN BIT(2)
-#define LP87565_GOIO2_IN BIT(1)
-#define LP87565_GOIO1_IN BIT(0)
-
-#define LP87565_GOIO3_OUT BIT(2)
-#define LP87565_GOIO2_OUT BIT(1)
-#define LP87565_GOIO1_OUT BIT(0)
-
-enum LP87565_regulator_id {
- /* BUCK's */
- LP87565_BUCK_0,
- LP87565_BUCK_1,
- LP87565_BUCK_2,
- LP87565_BUCK_3,
- LP87565_BUCK_10,
- LP87565_BUCK_23,
- LP87565_BUCK_3210,
-};
+#define LP87565_GPIO3_OD BIT(6)
+#define LP87565_GPIO2_OD BIT(5)
+#define LP87565_GPIO1_OD BIT(4)
+#define LP87565_GPIO3_DIR BIT(2)
+#define LP87565_GPIO2_DIR BIT(1)
+#define LP87565_GPIO1_DIR BIT(0)
+
+#define LP87565_GPIO3_IN BIT(2)
+#define LP87565_GPIO2_IN BIT(1)
+#define LP87565_GPIO1_IN BIT(0)
+
+#define LP87565_GPIO3_OUT BIT(2)
+#define LP87565_GPIO2_OUT BIT(1)
+#define LP87565_GPIO1_OUT BIT(0)
/**
* struct LP87565 - state holder for the LP87565 driver
@@ -263,5 +252,6 @@ struct lp87565 {
u8 rev;
u8 dev_type;
struct regmap *regmap;
+ struct gpio_desc *reset_gpio;
};
#endif /* __LINUX_MFD_LP87565_H */
diff --git a/include/linux/mfd/lp8788.h b/include/linux/mfd/lp8788.h
index 3d5c480d58ea..fd17bec2a33e 100644
--- a/include/linux/mfd/lp8788.h
+++ b/include/linux/mfd/lp8788.h
@@ -10,9 +10,7 @@
#ifndef __MFD_LP8788_H__
#define __MFD_LP8788_H__
-#include <linux/gpio.h>
#include <linux/irqdomain.h>
-#include <linux/pwm.h>
#include <linux/regmap.h>
#define LP8788_DEV_BUCK "lp8788-buck"
@@ -88,12 +86,6 @@ enum lp8788_charger_event {
CHARGER_DETECTED,
};
-enum lp8788_bl_ctrl_mode {
- LP8788_BL_REGISTER_ONLY,
- LP8788_BL_COMB_PWM_BASED, /* PWM + I2C, changed by PWM input */
- LP8788_BL_COMB_REGISTER_BASED, /* PWM + I2C, changed by I2C */
-};
-
enum lp8788_bl_dim_mode {
LP8788_DIM_EXPONENTIAL,
LP8788_DIM_LINEAR,
@@ -159,21 +151,17 @@ struct lp8788;
/*
* lp8788_buck1_dvs
- * @gpio : gpio pin number for dvs control
* @vsel : dvs selector for buck v1 register
*/
struct lp8788_buck1_dvs {
- int gpio;
enum lp8788_dvs_sel vsel;
};
/*
* lp8788_buck2_dvs
- * @gpio : two gpio pin numbers are used for dvs
* @vsel : dvs selector for buck v2 register
*/
struct lp8788_buck2_dvs {
- int gpio[LP8788_NUM_BUCK2_DVS];
enum lp8788_dvs_sel vsel;
};
@@ -207,31 +195,6 @@ struct lp8788_charger_platform_data {
};
/*
- * struct lp8788_backlight_platform_data
- * @name : backlight driver name. (default: "lcd-backlight")
- * @initial_brightness : initial value of backlight brightness
- * @bl_mode : brightness control by pwm or lp8788 register
- * @dim_mode : dimming mode selection
- * @full_scale : full scale current setting
- * @rise_time : brightness ramp up step time
- * @fall_time : brightness ramp down step time
- * @pwm_pol : pwm polarity setting when bl_mode is pwm based
- * @period_ns : platform specific pwm period value. unit is nano.
- Only valid when bl_mode is LP8788_BL_COMB_PWM_BASED
- */
-struct lp8788_backlight_platform_data {
- char *name;
- int initial_brightness;
- enum lp8788_bl_ctrl_mode bl_mode;
- enum lp8788_bl_dim_mode dim_mode;
- enum lp8788_bl_full_scale_current full_scale;
- enum lp8788_bl_ramp_step rise_time;
- enum lp8788_bl_ramp_step fall_time;
- enum pwm_polarity pwm_pol;
- unsigned int period_ns;
-};
-
-/*
* struct lp8788_led_platform_data
* @name : led driver name. (default: "keyboard-backlight")
* @scale : current scale
@@ -268,11 +231,10 @@ struct lp8788_vib_platform_data {
* @buck_data : regulator initial data for buck
* @dldo_data : regulator initial data for digital ldo
* @aldo_data : regulator initial data for analog ldo
- * @buck1_dvs : gpio configurations for buck1 dvs
- * @buck2_dvs : gpio configurations for buck2 dvs
+ * @buck1_dvs : configurations for buck1 dvs
+ * @buck2_dvs : configurations for buck2 dvs
* @chg_pdata : platform data for charger driver
* @alarm_sel : rtc alarm selection (1 or 2)
- * @bl_pdata : configurable data for backlight driver
* @led_pdata : configurable data for led driver
* @vib_pdata : configurable data for vibrator driver
* @adc_pdata : iio map data for adc driver
@@ -294,9 +256,6 @@ struct lp8788_platform_data {
/* rtc alarm */
enum lp8788_alarm_sel alarm_sel;
- /* backlight */
- struct lp8788_backlight_platform_data *bl_pdata;
-
/* current sinks */
struct lp8788_led_platform_data *led_pdata;
struct lp8788_vib_platform_data *vib_pdata;
diff --git a/include/linux/mfd/lpc_ich.h b/include/linux/mfd/lpc_ich.h
index 39967a5eca6d..1fbda1f8967d 100644
--- a/include/linux/mfd/lpc_ich.h
+++ b/include/linux/mfd/lpc_ich.h
@@ -8,14 +8,14 @@
#ifndef LPC_ICH_H
#define LPC_ICH_H
-#include <linux/platform_data/x86/intel-spi.h>
+#include <linux/platform_data/x86/spi-intel.h>
/* GPIO resources */
#define ICH_RES_GPIO 0
#define ICH_RES_GPE0 1
/* GPIO compatibility */
-enum {
+enum lpc_gpio_versions {
ICH_I3100_GPIO,
ICH_V5_GPIO,
ICH_V6_GPIO,
@@ -26,11 +26,14 @@ enum {
AVOTON_GPIO,
};
+struct lpc_ich_gpio_info;
+
struct lpc_ich_info {
char name[32];
unsigned int iTCO_version;
- unsigned int gpio_version;
+ enum lpc_gpio_versions gpio_version;
enum intel_spi_type spi_type;
+ const struct lpc_ich_gpio_info *gpio_info;
u8 use_gpio;
};
diff --git a/include/linux/mfd/macsmc.h b/include/linux/mfd/macsmc.h
new file mode 100644
index 000000000000..cc09ecce0df7
--- /dev/null
+++ b/include/linux/mfd/macsmc.h
@@ -0,0 +1,280 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/*
+ * Apple SMC (System Management Controller) core definitions
+ *
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#ifndef _LINUX_MFD_MACSMC_H
+#define _LINUX_MFD_MACSMC_H
+
+#include <linux/soc/apple/rtkit.h>
+
+/**
+ * typedef smc_key - Alias for u32 to be used for SMC keys
+ *
+ * SMC keys are 32bit integers containing packed ASCII characters in natural
+ * integer order, i.e. 0xAABBCCDD, which represent the FourCC ABCD.
+ * The SMC driver is designed with this assumption and ensures the right
+ * endianness is used when these are stored to memory and sent to or received
+ * from the actual SMC firmware (which can be done in either shared memory or
+ * as 64bit mailbox message on Apple Silicon).
+ * Internally, SMC stores these keys in a table sorted lexicographically and
+ * allows resolving an index into this table to the corresponding SMC key.
+ * Thus, storing keys as u32 is very convenient as it allows to e.g. use
+ * normal comparison operators which directly map to the natural order used
+ * by SMC firmware.
+ *
+ * This simple type alias is introduced to allow easy recognition of SMC key
+ * variables and arguments.
+ */
+typedef u32 smc_key;
+
+/**
+ * SMC_KEY - Convert FourCC SMC keys in source code to smc_key
+ *
+ * This macro can be used to easily define FourCC SMC keys in source code
+ * and convert these to u32 / smc_key, e.g. SMC_KEY(NTAP) will expand to
+ * 0x4e544150.
+ *
+ * @s: FourCC SMC key to be converted
+ */
+#define SMC_KEY(s) (smc_key)(_SMC_KEY(#s))
+#define _SMC_KEY(s) (((s)[0] << 24) | ((s)[1] << 16) | ((s)[2] << 8) | (s)[3])
+#define __SMC_KEY(a, b, c, d) (((u32)(a) << 24) | ((u32)(b) << 16) | ((u32)(c) << 8) | ((u32)(d)))
+
+#define APPLE_SMC_READABLE BIT(7)
+#define APPLE_SMC_WRITABLE BIT(6)
+#define APPLE_SMC_FUNCTION BIT(4)
+
+/**
+ * struct apple_smc_key_info - Information for a SMC key as returned by SMC
+ * @type_code: FourCC code indicating the type for this key.
+ * Known types:
+ * ch8*: ASCII string
+ * flag: Boolean, 1 or 0
+ * flt: 32-bit single-precision IEEE 754 float
+ * hex: Binary data
+ * ioft: 64bit Unsigned fixed-point intger (48.16)
+ * {si,ui}{8,16,32,64}: Signed/Unsigned 8-/16-/32-/64-bit integer
+ * @size: Size of the buffer associated with this key
+ * @flags: Bitfield encoding flags (APPLE_SMC_{READABLE,WRITABLE,FUNCTION})
+ */
+struct apple_smc_key_info {
+ u32 type_code;
+ u8 size;
+ u8 flags;
+};
+
+/**
+ * enum apple_smc_boot_stage - SMC boot stage
+ * @APPLE_SMC_BOOTING: SMC is booting
+ * @APPLE_SMC_INITIALIZED: SMC is initialized and ready to use
+ * @APPLE_SMC_ERROR_NO_SHMEM: Shared memory could not be initialized during boot
+ * @APPLE_SMC_ERROR_CRASHED: SMC has crashed
+ */
+enum apple_smc_boot_stage {
+ APPLE_SMC_BOOTING,
+ APPLE_SMC_INITIALIZED,
+ APPLE_SMC_ERROR_NO_SHMEM,
+ APPLE_SMC_ERROR_CRASHED
+};
+
+/**
+ * struct apple_smc
+ * @dev: Underlying device struct for the physical backend device
+ * @key_count: Number of available SMC keys
+ * @first_key: First valid SMC key
+ * @last_key: Last valid SMC key
+ * @event_handlers: Notifier call chain for events received from SMC
+ * @rtk: Pointer to Apple RTKit instance
+ * @init_done: Completion for initialization
+ * @boot_stage: Current boot stage of SMC
+ * @sram: Pointer to SRAM resource
+ * @sram_base: SRAM base address
+ * @shmem: RTKit shared memory structure for SRAM
+ * @msg_id: Current message id for commands, will be incremented for each command
+ * @atomic_mode: Flag set when atomic mode is entered
+ * @atomic_pending: Flag indicating pending atomic command
+ * @cmd_done: Completion for command execution in non-atomic mode
+ * @cmd_ret: Return value from SMC for last command
+ * @mutex: Mutex for non-atomic mode
+ * @lock: Spinlock for atomic mode
+ */
+struct apple_smc {
+ struct device *dev;
+
+ u32 key_count;
+ smc_key first_key;
+ smc_key last_key;
+
+ struct blocking_notifier_head event_handlers;
+
+ struct apple_rtkit *rtk;
+
+ struct completion init_done;
+ enum apple_smc_boot_stage boot_stage;
+
+ struct resource *sram;
+ void __iomem *sram_base;
+ struct apple_rtkit_shmem shmem;
+
+ unsigned int msg_id;
+
+ bool atomic_mode;
+ bool atomic_pending;
+ struct completion cmd_done;
+ u64 cmd_ret;
+
+ struct mutex mutex;
+ spinlock_t lock;
+};
+
+/**
+ * apple_smc_read - Read size bytes from given SMC key into buf
+ * @smc: Pointer to apple_smc struct
+ * @key: smc_key to be read
+ * @buf: Buffer into which size bytes of data will be read from SMC
+ * @size: Number of bytes to be read into buf
+ *
+ * Return: Zero on success, negative errno on error
+ */
+int apple_smc_read(struct apple_smc *smc, smc_key key, void *buf, size_t size);
+
+/**
+ * apple_smc_write - Write size bytes into given SMC key from buf
+ * @smc: Pointer to apple_smc struct
+ * @key: smc_key data will be written to
+ * @buf: Buffer from which size bytes of data will be written to SMC
+ * @size: Number of bytes to be written
+ *
+ * Return: Zero on success, negative errno on error
+ */
+int apple_smc_write(struct apple_smc *smc, smc_key key, const void *buf, size_t size);
+
+/**
+ * apple_smc_enter_atomic - Enter atomic mode to be able to use apple_smc_write_atomic
+ * @smc: Pointer to apple_smc struct
+ *
+ * This function switches the SMC backend to atomic mode which allows the
+ * use of apple_smc_write_atomic while disabling *all* other functions.
+ * This is only used for shutdown/reboot which requires writing to a SMC
+ * key from atomic context.
+ *
+ * Return: Zero on success, negative errno on error
+ */
+int apple_smc_enter_atomic(struct apple_smc *smc);
+
+/**
+ * apple_smc_write_atomic - Write size bytes into given SMC key from buf without sleeping
+ * @smc: Pointer to apple_smc struct
+ * @key: smc_key data will be written to
+ * @buf: Buffer from which size bytes of data will be written to SMC
+ * @size: Number of bytes to be written
+ *
+ * Note that this function will fail if apple_smc_enter_atomic hasn't been
+ * called before.
+ *
+ * Return: Zero on success, negative errno on error
+ */
+int apple_smc_write_atomic(struct apple_smc *smc, smc_key key, const void *buf, size_t size);
+
+/**
+ * apple_smc_rw - Write and then read using the given SMC key
+ * @smc: Pointer to apple_smc struct
+ * @key: smc_key data will be written to
+ * @wbuf: Buffer from which size bytes of data will be written to SMC
+ * @wsize: Number of bytes to be written
+ * @rbuf: Buffer to which size bytes of data will be read from SMC
+ * @rsize: Number of bytes to be read
+ *
+ * Return: Zero on success, negative errno on error
+ */
+int apple_smc_rw(struct apple_smc *smc, smc_key key, const void *wbuf, size_t wsize,
+ void *rbuf, size_t rsize);
+
+/**
+ * apple_smc_get_key_by_index - Given an index return the corresponding SMC key
+ * @smc: Pointer to apple_smc struct
+ * @index: Index to be resolved
+ * @key: Buffer for SMC key to be returned
+ *
+ * Return: Zero on success, negative errno on error
+ */
+int apple_smc_get_key_by_index(struct apple_smc *smc, int index, smc_key *key);
+
+/**
+ * apple_smc_get_key_info - Get key information from SMC
+ * @smc: Pointer to apple_smc struct
+ * @key: Key to acquire information for
+ * @info: Pointer to struct apple_smc_key_info which will be filled
+ *
+ * Return: Zero on success, negative errno on error
+ */
+int apple_smc_get_key_info(struct apple_smc *smc, smc_key key, struct apple_smc_key_info *info);
+
+/**
+ * apple_smc_key_exists - Check if the given SMC key exists
+ * @smc: Pointer to apple_smc struct
+ * @key: smc_key to be checked
+ *
+ * Return: True if the key exists, false otherwise
+ */
+static inline bool apple_smc_key_exists(struct apple_smc *smc, smc_key key)
+{
+ return apple_smc_get_key_info(smc, key, NULL) >= 0;
+}
+
+#define APPLE_SMC_TYPE_OPS(type) \
+ static inline int apple_smc_read_##type(struct apple_smc *smc, smc_key key, type *p) \
+ { \
+ int ret = apple_smc_read(smc, key, p, sizeof(*p)); \
+ return (ret < 0) ? ret : ((ret != sizeof(*p)) ? -EINVAL : 0); \
+ } \
+ static inline int apple_smc_write_##type(struct apple_smc *smc, smc_key key, type p) \
+ { \
+ return apple_smc_write(smc, key, &p, sizeof(p)); \
+ } \
+ static inline int apple_smc_write_##type##_atomic(struct apple_smc *smc, smc_key key, type p) \
+ { \
+ return apple_smc_write_atomic(smc, key, &p, sizeof(p)); \
+ } \
+ static inline int apple_smc_rw_##type(struct apple_smc *smc, smc_key key, \
+ type w, type *r) \
+ { \
+ int ret = apple_smc_rw(smc, key, &w, sizeof(w), r, sizeof(*r)); \
+ return (ret < 0) ? ret : ((ret != sizeof(*r)) ? -EINVAL : 0); \
+ }
+
+APPLE_SMC_TYPE_OPS(u64)
+APPLE_SMC_TYPE_OPS(u32)
+APPLE_SMC_TYPE_OPS(u16)
+APPLE_SMC_TYPE_OPS(u8)
+APPLE_SMC_TYPE_OPS(s64)
+APPLE_SMC_TYPE_OPS(s32)
+APPLE_SMC_TYPE_OPS(s16)
+APPLE_SMC_TYPE_OPS(s8)
+
+static inline int apple_smc_read_flag(struct apple_smc *smc, smc_key key, bool *flag)
+{
+ u8 val;
+ int ret = apple_smc_read_u8(smc, key, &val);
+
+ if (ret < 0)
+ return ret;
+
+ *flag = val ? true : false;
+ return ret;
+}
+
+static inline int apple_smc_write_flag(struct apple_smc *smc, smc_key key, bool state)
+{
+ return apple_smc_write_u8(smc, key, state ? 1 : 0);
+}
+
+static inline int apple_smc_write_flag_atomic(struct apple_smc *smc, smc_key key, bool state)
+{
+ return apple_smc_write_u8_atomic(smc, key, state ? 1 : 0);
+}
+
+#endif
diff --git a/include/linux/mfd/madera/pdata.h b/include/linux/mfd/madera/pdata.h
index 601cbbc10370..7e84738cbb20 100644
--- a/include/linux/mfd/madera/pdata.h
+++ b/include/linux/mfd/madera/pdata.h
@@ -8,10 +8,11 @@
#ifndef MADERA_PDATA_H
#define MADERA_PDATA_H
-#include <linux/kernel.h>
#include <linux/regulator/arizona-ldo1.h>
#include <linux/regulator/arizona-micsupp.h>
#include <linux/regulator/machine.h>
+#include <linux/types.h>
+
#include <sound/madera-pdata.h>
#define MADERA_MAX_MICBIAS 4
@@ -31,7 +32,7 @@ struct pinctrl_map;
* @irq_flags: Mode for primary IRQ (defaults to active low)
* @gpio_base: Base GPIO number
* @gpio_configs: Array of GPIO configurations (See
- * Documentation/driver-api/pinctl.rst)
+ * Documentation/driver-api/pin-control.rst)
* @n_gpio_configs: Number of entries in gpio_configs
* @gpsw: General purpose switch mode setting. Depends on the external
* hardware connected to the switch. (See the SW1_MODE field
diff --git a/include/linux/mfd/max14577-private.h b/include/linux/mfd/max14577-private.h
index a21374f8ad26..dd51a37fa37f 100644
--- a/include/linux/mfd/max14577-private.h
+++ b/include/linux/mfd/max14577-private.h
@@ -2,7 +2,7 @@
/*
* max14577-private.h - Common API for the Maxim 14577/77836 internal sub chip
*
- * Copyright (C) 2014 Samsung Electrnoics
+ * Copyright (C) 2014 Samsung Electronics
* Chanwoo Choi <cw00.choi@samsung.com>
* Krzysztof Kozlowski <krzk@kernel.org>
*/
diff --git a/include/linux/mfd/max14577.h b/include/linux/mfd/max14577.h
index 8b3ef891ba42..0fda5c2e745a 100644
--- a/include/linux/mfd/max14577.h
+++ b/include/linux/mfd/max14577.h
@@ -2,7 +2,7 @@
/*
* max14577.h - Driver for the Maxim 14577/77836
*
- * Copyright (C) 2014 Samsung Electrnoics
+ * Copyright (C) 2014 Samsung Electronics
* Chanwoo Choi <cw00.choi@samsung.com>
* Krzysztof Kozlowski <krzk@kernel.org>
*
diff --git a/include/linux/mfd/max5970.h b/include/linux/mfd/max5970.h
new file mode 100644
index 000000000000..fc50e89edfaa
--- /dev/null
+++ b/include/linux/mfd/max5970.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Device driver for regulators in MAX5970 and MAX5978 IC
+ *
+ * Copyright (c) 2022 9elements GmbH
+ *
+ * Author: Patrick Rudolph <patrick.rudolph@9elements.com>
+ */
+
+#ifndef _MFD_MAX5970_H
+#define _MFD_MAX5970_H
+
+#include <linux/regmap.h>
+
+#define MAX5970_NUM_SWITCHES 2
+#define MAX5978_NUM_SWITCHES 1
+#define MAX5970_NUM_LEDS 4
+
+#define MAX5970_REG_CURRENT_L(ch) (0x01 + (ch) * 4)
+#define MAX5970_REG_CURRENT_H(ch) (0x00 + (ch) * 4)
+#define MAX5970_REG_VOLTAGE_L(ch) (0x03 + (ch) * 4)
+#define MAX5970_REG_VOLTAGE_H(ch) (0x02 + (ch) * 4)
+#define MAX5970_REG_MON_RANGE 0x18
+#define MAX5970_MON_MASK 0x3
+#define MAX5970_MON(reg, ch) (((reg) >> ((ch) * 2)) & MAX5970_MON_MASK)
+#define MAX5970_MON_MAX_RANGE_UV 16000000
+
+#define MAX5970_REG_CH_UV_WARN_H(ch) (0x1A + (ch) * 10)
+#define MAX5970_REG_CH_UV_WARN_L(ch) (0x1B + (ch) * 10)
+#define MAX5970_REG_CH_UV_CRIT_H(ch) (0x1C + (ch) * 10)
+#define MAX5970_REG_CH_UV_CRIT_L(ch) (0x1D + (ch) * 10)
+#define MAX5970_REG_CH_OV_WARN_H(ch) (0x1E + (ch) * 10)
+#define MAX5970_REG_CH_OV_WARN_L(ch) (0x1F + (ch) * 10)
+#define MAX5970_REG_CH_OV_CRIT_H(ch) (0x20 + (ch) * 10)
+#define MAX5970_REG_CH_OV_CRIT_L(ch) (0x21 + (ch) * 10)
+
+#define MAX5970_VAL2REG_H(x) (((x) >> 2) & 0xFF)
+#define MAX5970_VAL2REG_L(x) ((x) & 0x3)
+
+#define MAX5970_REG_DAC_FAST(ch) (0x2E + (ch))
+
+#define MAX5970_FAST2SLOW_RATIO 200
+
+#define MAX5970_REG_STATUS0 0x31
+#define MAX5970_CB_IFAULTF(ch) (1 << (ch))
+#define MAX5970_CB_IFAULTS(ch) (1 << ((ch) + 4))
+
+#define MAX5970_REG_STATUS1 0x32
+#define STATUS1_PROT_MASK 0x3
+#define STATUS1_PROT(reg) \
+ (((reg) >> 6) & STATUS1_PROT_MASK)
+#define STATUS1_PROT_SHUTDOWN 0
+#define STATUS1_PROT_CLEAR_PG 1
+#define STATUS1_PROT_ALERT_ONLY 2
+
+#define MAX5970_REG_STATUS2 0x33
+#define MAX5970_IRNG_MASK 0x3
+#define MAX5970_IRNG(reg, ch) \
+ (((reg) >> ((ch) * 2)) & MAX5970_IRNG_MASK)
+
+#define MAX5970_REG_STATUS3 0x34
+#define MAX5970_STATUS3_ALERT BIT(4)
+#define MAX5970_STATUS3_PG(ch) BIT(ch)
+
+#define MAX5970_REG_FAULT0 0x35
+#define UV_STATUS_WARN(ch) (1 << (ch))
+#define UV_STATUS_CRIT(ch) (1 << ((ch) + 4))
+
+#define MAX5970_REG_FAULT1 0x36
+#define OV_STATUS_WARN(ch) (1 << (ch))
+#define OV_STATUS_CRIT(ch) (1 << ((ch) + 4))
+
+#define MAX5970_REG_FAULT2 0x37
+#define OC_STATUS_WARN(ch) (1 << (ch))
+
+#define MAX5970_REG_CHXEN 0x3b
+#define CHXEN(ch) (3 << ((ch) * 2))
+
+#define MAX5970_REG_LED_FLASH 0x43
+
+#define MAX_REGISTERS 0x49
+#define ADC_MASK 0x3FF
+
+#endif /* _MFD_MAX5970_H */
diff --git a/include/linux/mfd/max7360.h b/include/linux/mfd/max7360.h
new file mode 100644
index 000000000000..44cf2bf651a2
--- /dev/null
+++ b/include/linux/mfd/max7360.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __LINUX_MFD_MAX7360_H
+#define __LINUX_MFD_MAX7360_H
+
+#include <linux/bits.h>
+
+#define MAX7360_MAX_KEY_ROWS 8
+#define MAX7360_MAX_KEY_COLS 8
+#define MAX7360_MAX_KEY_NUM (MAX7360_MAX_KEY_ROWS * MAX7360_MAX_KEY_COLS)
+#define MAX7360_ROW_SHIFT 3
+
+#define MAX7360_MAX_GPIO 8
+#define MAX7360_MAX_GPO 6
+#define MAX7360_PORT_PWM_COUNT 8
+#define MAX7360_PORT_RTR_PIN (MAX7360_PORT_PWM_COUNT - 1)
+
+/*
+ * MAX7360 registers
+ */
+#define MAX7360_REG_KEYFIFO 0x00
+#define MAX7360_REG_CONFIG 0x01
+#define MAX7360_REG_DEBOUNCE 0x02
+#define MAX7360_REG_INTERRUPT 0x03
+#define MAX7360_REG_PORTS 0x04
+#define MAX7360_REG_KEYREP 0x05
+#define MAX7360_REG_SLEEP 0x06
+
+/*
+ * MAX7360 GPIO registers
+ *
+ * All these registers are reset together when writing bit 3 of
+ * MAX7360_REG_GPIOCFG.
+ */
+#define MAX7360_REG_GPIOCFG 0x40
+#define MAX7360_REG_GPIOCTRL 0x41
+#define MAX7360_REG_GPIODEB 0x42
+#define MAX7360_REG_GPIOCURR 0x43
+#define MAX7360_REG_GPIOOUTM 0x44
+#define MAX7360_REG_PWMCOM 0x45
+#define MAX7360_REG_RTRCFG 0x46
+#define MAX7360_REG_I2C_TIMEOUT 0x48
+#define MAX7360_REG_GPIOIN 0x49
+#define MAX7360_REG_RTR_CNT 0x4A
+#define MAX7360_REG_PWMBASE 0x50
+#define MAX7360_REG_PWMCFGBASE 0x58
+
+#define MAX7360_REG_GPIO_LAST 0x5F
+
+#define MAX7360_REG_PWM(x) (MAX7360_REG_PWMBASE + (x))
+#define MAX7360_REG_PWMCFG(x) (MAX7360_REG_PWMCFGBASE + (x))
+
+/*
+ * Configuration register bits
+ */
+#define MAX7360_FIFO_EMPTY 0x3F
+#define MAX7360_FIFO_OVERFLOW 0x7F
+#define MAX7360_FIFO_RELEASE BIT(6)
+#define MAX7360_FIFO_COL GENMASK(5, 3)
+#define MAX7360_FIFO_ROW GENMASK(2, 0)
+
+#define MAX7360_CFG_SLEEP BIT(7)
+#define MAX7360_CFG_INTERRUPT BIT(5)
+#define MAX7360_CFG_KEY_RELEASE BIT(3)
+#define MAX7360_CFG_WAKEUP BIT(1)
+#define MAX7360_CFG_TIMEOUT BIT(0)
+
+#define MAX7360_DEBOUNCE GENMASK(4, 0)
+#define MAX7360_DEBOUNCE_MIN 9
+#define MAX7360_DEBOUNCE_MAX 40
+#define MAX7360_PORTS GENMASK(8, 5)
+
+#define MAX7360_INTERRUPT_TIME_MASK GENMASK(4, 0)
+#define MAX7360_INTERRUPT_FIFO_MASK GENMASK(7, 5)
+
+#define MAX7360_PORT_CFG_INTERRUPT_MASK BIT(7)
+#define MAX7360_PORT_CFG_INTERRUPT_EDGES BIT(6)
+#define MAX7360_PORT_CFG_COMMON_PWM BIT(5)
+
+/*
+ * Autosleep register values
+ */
+#define MAX7360_AUTOSLEEP_8192MS 0x01
+#define MAX7360_AUTOSLEEP_4096MS 0x02
+#define MAX7360_AUTOSLEEP_2048MS 0x03
+#define MAX7360_AUTOSLEEP_1024MS 0x04
+#define MAX7360_AUTOSLEEP_512MS 0x05
+#define MAX7360_AUTOSLEEP_256MS 0x06
+
+#define MAX7360_GPIO_CFG_RTR_EN BIT(7)
+#define MAX7360_GPIO_CFG_GPIO_EN BIT(4)
+#define MAX7360_GPIO_CFG_GPIO_RST BIT(3)
+
+#define MAX7360_ROT_DEBOUNCE GENMASK(3, 0)
+#define MAX7360_ROT_DEBOUNCE_MIN 0
+#define MAX7360_ROT_DEBOUNCE_MAX 15
+#define MAX7360_ROT_INTCNT GENMASK(6, 4)
+#define MAX7360_ROT_INTCNT_DLY BIT(7)
+
+#define MAX7360_INT_INTI 0
+#define MAX7360_INT_INTK 1
+
+#define MAX7360_INT_GPIO 0
+#define MAX7360_INT_KEYPAD 1
+#define MAX7360_INT_ROTARY 2
+
+#define MAX7360_NR_INTERNAL_IRQS 3
+
+#endif
diff --git a/include/linux/mfd/max77541.h b/include/linux/mfd/max77541.h
new file mode 100644
index 000000000000..fe5c0a3dc637
--- /dev/null
+++ b/include/linux/mfd/max77541.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef __MFD_MAX77541_H
+#define __MFD_MAX77541_H
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+/* REGISTERS */
+#define MAX77541_REG_INT_SRC 0x00
+#define MAX77541_REG_INT_SRC_M 0x01
+
+#define MAX77541_BIT_INT_SRC_TOPSYS BIT(0)
+#define MAX77541_BIT_INT_SRC_BUCK BIT(1)
+
+#define MAX77541_REG_TOPSYS_INT 0x02
+#define MAX77541_REG_TOPSYS_INT_M 0x03
+
+#define MAX77541_BIT_TOPSYS_INT_TJ_120C BIT(0)
+#define MAX77541_BIT_TOPSYS_INT_TJ_140C BIT(1)
+#define MAX77541_BIT_TOPSYS_INT_TSHDN BIT(2)
+#define MAX77541_BIT_TOPSYS_INT_UVLO BIT(3)
+#define MAX77541_BIT_TOPSYS_INT_ALT_SWO BIT(4)
+#define MAX77541_BIT_TOPSYS_INT_EXT_FREQ_DET BIT(5)
+
+/* REGULATORS */
+#define MAX77541_REG_BUCK_INT 0x20
+#define MAX77541_REG_BUCK_INT_M 0x21
+
+#define MAX77541_BIT_BUCK_INT_M1_POK_FLT BIT(0)
+#define MAX77541_BIT_BUCK_INT_M2_POK_FLT BIT(1)
+#define MAX77541_BIT_BUCK_INT_M1_SCFLT BIT(4)
+#define MAX77541_BIT_BUCK_INT_M2_SCFLT BIT(5)
+
+#define MAX77541_REG_EN_CTRL 0x0B
+
+#define MAX77541_BIT_M1_EN BIT(0)
+#define MAX77541_BIT_M2_EN BIT(1)
+
+#define MAX77541_REG_M1_VOUT 0x23
+#define MAX77541_REG_M2_VOUT 0x33
+
+#define MAX77541_BITS_MX_VOUT GENMASK(7, 0)
+
+#define MAX77541_REG_M1_CFG1 0x25
+#define MAX77541_REG_M2_CFG1 0x35
+
+#define MAX77541_BITS_MX_CFG1_RNG GENMASK(7, 6)
+
+/* ADC */
+#define MAX77541_REG_ADC_INT 0x70
+#define MAX77541_REG_ADC_INT_M 0x71
+
+#define MAX77541_BIT_ADC_INT_CH1_I BIT(0)
+#define MAX77541_BIT_ADC_INT_CH2_I BIT(1)
+#define MAX77541_BIT_ADC_INT_CH3_I BIT(2)
+#define MAX77541_BIT_ADC_INT_CH6_I BIT(5)
+
+#define MAX77541_REG_ADC_DATA_CH1 0x72
+#define MAX77541_REG_ADC_DATA_CH2 0x73
+#define MAX77541_REG_ADC_DATA_CH3 0x74
+#define MAX77541_REG_ADC_DATA_CH6 0x77
+
+/* INTERRUPT MASKS*/
+#define MAX77541_REG_INT_SRC_MASK 0x00
+#define MAX77541_REG_TOPSYS_INT_MASK 0x00
+#define MAX77541_REG_BUCK_INT_MASK 0x00
+
+#define MAX77541_MAX_REGULATORS 2
+
+enum max7754x_ids {
+ MAX77540 = 1,
+ MAX77541,
+};
+
+struct regmap;
+struct regmap_irq_chip_data;
+struct i2c_client;
+
+struct max77541 {
+ struct i2c_client *i2c;
+ struct regmap *regmap;
+ enum max7754x_ids id;
+
+ struct regmap_irq_chip_data *irq_data;
+ struct regmap_irq_chip_data *irq_buck;
+ struct regmap_irq_chip_data *irq_topsys;
+ struct regmap_irq_chip_data *irq_adc;
+};
+
+#endif /* __MFD_MAX77541_H */
diff --git a/include/linux/mfd/max77686-private.h b/include/linux/mfd/max77686-private.h
index 833e578e051e..e6b8b4014dc0 100644
--- a/include/linux/mfd/max77686-private.h
+++ b/include/linux/mfd/max77686-private.h
@@ -2,7 +2,7 @@
/*
* max77686-private.h - Voltage regulator driver for the Maxim 77686/802
*
- * Copyright (C) 2012 Samsung Electrnoics
+ * Copyright (C) 2012 Samsung Electronics
* Chiwoong Byun <woong.byun@samsung.com>
*/
@@ -133,35 +133,35 @@ enum max77686_pmic_reg {
/* Reserved: 0x7A-0x7D */
MAX77686_REG_BBAT_CHG = 0x7E,
- MAX77686_REG_32KHZ = 0x7F,
+ MAX77686_REG_32KHZ = 0x7F,
MAX77686_REG_PMIC_END = 0x80,
};
enum max77686_rtc_reg {
- MAX77686_RTC_INT = 0x00,
- MAX77686_RTC_INTM = 0x01,
+ MAX77686_RTC_INT = 0x00,
+ MAX77686_RTC_INTM = 0x01,
MAX77686_RTC_CONTROLM = 0x02,
MAX77686_RTC_CONTROL = 0x03,
MAX77686_RTC_UPDATE0 = 0x04,
/* Reserved: 0x5 */
MAX77686_WTSR_SMPL_CNTL = 0x06,
- MAX77686_RTC_SEC = 0x07,
- MAX77686_RTC_MIN = 0x08,
- MAX77686_RTC_HOUR = 0x09,
+ MAX77686_RTC_SEC = 0x07,
+ MAX77686_RTC_MIN = 0x08,
+ MAX77686_RTC_HOUR = 0x09,
MAX77686_RTC_WEEKDAY = 0x0A,
- MAX77686_RTC_MONTH = 0x0B,
- MAX77686_RTC_YEAR = 0x0C,
- MAX77686_RTC_DATE = 0x0D,
- MAX77686_ALARM1_SEC = 0x0E,
- MAX77686_ALARM1_MIN = 0x0F,
+ MAX77686_RTC_MONTH = 0x0B,
+ MAX77686_RTC_YEAR = 0x0C,
+ MAX77686_RTC_MONTHDAY = 0x0D,
+ MAX77686_ALARM1_SEC = 0x0E,
+ MAX77686_ALARM1_MIN = 0x0F,
MAX77686_ALARM1_HOUR = 0x10,
MAX77686_ALARM1_WEEKDAY = 0x11,
MAX77686_ALARM1_MONTH = 0x12,
MAX77686_ALARM1_YEAR = 0x13,
MAX77686_ALARM1_DATE = 0x14,
- MAX77686_ALARM2_SEC = 0x15,
- MAX77686_ALARM2_MIN = 0x16,
+ MAX77686_ALARM2_SEC = 0x15,
+ MAX77686_ALARM2_MIN = 0x16,
MAX77686_ALARM2_HOUR = 0x17,
MAX77686_ALARM2_WEEKDAY = 0x18,
MAX77686_ALARM2_MONTH = 0x19,
@@ -352,7 +352,7 @@ enum max77802_rtc_reg {
MAX77802_RTC_WEEKDAY = 0xCA,
MAX77802_RTC_MONTH = 0xCB,
MAX77802_RTC_YEAR = 0xCC,
- MAX77802_RTC_DATE = 0xCD,
+ MAX77802_RTC_MONTHDAY = 0xCD,
MAX77802_RTC_AE1 = 0xCE,
MAX77802_ALARM1_SEC = 0xCF,
MAX77802_ALARM1_MIN = 0xD0,
@@ -441,8 +441,4 @@ enum max77686_types {
TYPE_MAX77802,
};
-extern int max77686_irq_init(struct max77686_dev *max77686);
-extern void max77686_irq_exit(struct max77686_dev *max77686);
-extern int max77686_irq_resume(struct max77686_dev *max77686);
-
#endif /* __LINUX_MFD_MAX77686_PRIV_H */
diff --git a/include/linux/mfd/max77686.h b/include/linux/mfd/max77686.h
index d0fb510875e6..7c4624acd1db 100644
--- a/include/linux/mfd/max77686.h
+++ b/include/linux/mfd/max77686.h
@@ -2,7 +2,7 @@
/*
* max77686.h - Driver for the Maxim 77686/802
*
- * Copyright (C) 2012 Samsung Electrnoics
+ * Copyright (C) 2012 Samsung Electronics
* Chiwoong Byun <woong.byun@samsung.com>
*
* This driver is based on max8997.h
diff --git a/include/linux/mfd/max77693-common.h b/include/linux/mfd/max77693-common.h
index a5bce099f1ed..ec2e1b2dceb8 100644
--- a/include/linux/mfd/max77693-common.h
+++ b/include/linux/mfd/max77693-common.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * Common data shared between Maxim 77693 and 77843 drivers
+ * Common data shared between Maxim 77693, 77705 and 77843 drivers
*
* Copyright (C) 2015 Samsung Electronics
*/
@@ -11,6 +11,7 @@
enum max77693_types {
TYPE_MAX77693_UNKNOWN,
TYPE_MAX77693,
+ TYPE_MAX77705,
TYPE_MAX77843,
TYPE_MAX77693_NUM,
@@ -32,6 +33,7 @@ struct max77693_dev {
struct regmap *regmap_muic;
struct regmap *regmap_haptic; /* Only MAX77693 */
struct regmap *regmap_chg; /* Only MAX77843 */
+ struct regmap *regmap_leds; /* Only MAX77705 */
struct regmap_irq_chip_data *irq_data_led;
struct regmap_irq_chip_data *irq_data_topsys;
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index 311f7d3d2323..8e7c35b5ea1c 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -2,7 +2,7 @@
/*
* max77693-private.h - Voltage regulator driver for the Maxim 77693
*
- * Copyright (C) 2012 Samsung Electrnoics
+ * Copyright (C) 2012 Samsung Electronics
* SangYoung Son <hello.son@samsung.com>
*
* This program is not provided / owned by Maxim Integrated Products.
@@ -217,6 +217,10 @@ enum max77693_charger_battery_state {
#define CHG_CNFG_01_CHGRSTRT_MASK (0x3 << CHG_CNFG_01_CHGRSTRT_SHIFT)
#define CHG_CNFG_01_PQEN_MAKS BIT(CHG_CNFG_01_PQEN_SHIFT)
+/* MAX77693_CHG_REG_CHG_CNFG_02 register */
+#define CHG_CNFG_02_CC_SHIFT 0
+#define CHG_CNFG_02_CC_MASK 0x3F
+
/* MAX77693_CHG_REG_CHG_CNFG_03 register */
#define CHG_CNFG_03_TOITH_SHIFT 0
#define CHG_CNFG_03_TOTIME_SHIFT 3
@@ -244,6 +248,7 @@ enum max77693_charger_battery_state {
#define CHG_CNFG_12_VCHGINREG_MASK (0x3 << CHG_CNFG_12_VCHGINREG_SHIFT)
/* MAX77693 CHG_CNFG_09 Register */
+#define CHG_CNFG_09_CHGIN_ILIM_SHIFT 0
#define CHG_CNFG_09_CHGIN_ILIM_MASK 0x7F
/* MAX77693 CHG_CTRL Register */
@@ -405,7 +410,7 @@ enum max77693_haptic_reg {
MAX77693_HAPTIC_REG_END,
};
-/* max77693-pmic LSCNFG configuraton register */
+/* max77693-pmic LSCNFG configuration register */
#define MAX77693_PMIC_LOW_SYS_MASK 0x80
#define MAX77693_PMIC_LOW_SYS_SHIFT 7
@@ -414,17 +419,6 @@ enum max77693_haptic_reg {
#define MAX77693_CONFIG2_MEN 6
#define MAX77693_CONFIG2_HTYP 5
-enum max77693_irq_source {
- LED_INT = 0,
- TOPSYS_INT,
- CHG_INT,
- MUIC_INT1,
- MUIC_INT2,
- MUIC_INT3,
-
- MAX77693_IRQ_GROUP_NR,
-};
-
#define SRC_IRQ_CHARGER BIT(0)
#define SRC_IRQ_TOP BIT(1)
#define SRC_IRQ_FLASH BIT(2)
diff --git a/include/linux/mfd/max77693.h b/include/linux/mfd/max77693.h
index c67c16ba8649..8e77ebeb7cf1 100644
--- a/include/linux/mfd/max77693.h
+++ b/include/linux/mfd/max77693.h
@@ -2,7 +2,7 @@
/*
* max77693.h - Driver for the Maxim 77693
*
- * Copyright (C) 2012 Samsung Electrnoics
+ * Copyright (C) 2012 Samsung Electronics
* SangYoung Son <hello.son@samsung.com>
*
* This program is not provided / owned by Maxim Integrated Products.
diff --git a/include/linux/mfd/max77705-private.h b/include/linux/mfd/max77705-private.h
new file mode 100644
index 000000000000..214de7feeb8c
--- /dev/null
+++ b/include/linux/mfd/max77705-private.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Maxim MAX77705 definitions.
+ *
+ * Copyright (C) 2015 Samsung Electronics, Inc.
+ * Copyright (C) 2025 Dzmitry Sankouski <dsankouski@gmail.com>
+ */
+
+#ifndef __LINUX_MFD_MAX77705_PRIV_H
+#define __LINUX_MFD_MAX77705_PRIV_H
+
+#define MAX77705_SRC_IRQ_CHG BIT(0)
+#define MAX77705_SRC_IRQ_TOP BIT(1)
+#define MAX77705_SRC_IRQ_FG BIT(2)
+#define MAX77705_SRC_IRQ_USBC BIT(3)
+#define MAX77705_SRC_IRQ_ALL (MAX77705_SRC_IRQ_CHG | MAX77705_SRC_IRQ_TOP | \
+ MAX77705_SRC_IRQ_FG | MAX77705_SRC_IRQ_USBC)
+
+/* MAX77705_PMIC_REG_PMICREV register */
+#define MAX77705_VERSION_SHIFT 3
+#define MAX77705_REVISION_MASK GENMASK(2, 0)
+#define MAX77705_VERSION_MASK GENMASK(7, MAX77705_VERSION_SHIFT)
+/* MAX77705_PMIC_REG_MAINCTRL1 register */
+#define MAX77705_MAINCTRL1_BIASEN_SHIFT 7
+#define MAX77705_MAINCTRL1_BIASEN_MASK BIT(MAX77705_MAINCTRL1_BIASEN_SHIFT)
+/* MAX77705_PMIC_REG_MCONFIG2 (haptics) register */
+#define MAX77705_CONFIG2_MEN_SHIFT 6
+#define MAX77705_CONFIG2_MODE_SHIFT 7
+#define MAX77705_CONFIG2_HTYP_SHIFT 5
+/* MAX77705_PMIC_REG_SYSTEM_INT_MASK register */
+#define MAX77705_SYSTEM_IRQ_BSTEN_INT BIT(3)
+#define MAX77705_SYSTEM_IRQ_SYSUVLO_INT BIT(4)
+#define MAX77705_SYSTEM_IRQ_SYSOVLO_INT BIT(5)
+#define MAX77705_SYSTEM_IRQ_TSHDN_INT BIT(6)
+#define MAX77705_SYSTEM_IRQ_TM_INT BIT(7)
+/* MAX77705_RGBLED_REG_LEDEN register */
+#define MAX77705_RGBLED_EN_WIDTH 2
+/* MAX77705_RGBLED_REG_LEDBLNK register */
+#define MAX77705_RGB_DELAY_100_STEP_LIM 500
+#define MAX77705_RGB_DELAY_100_STEP_COUNT 4
+#define MAX77705_RGB_DELAY_100_STEP 100
+#define MAX77705_RGB_DELAY_250_STEP_LIM 3250
+#define MAX77705_RGB_DELAY_250_STEP 250
+#define MAX77705_RGB_DELAY_500_STEP 500
+#define MAX77705_RGB_DELAY_500_STEP_COUNT 10
+#define MAX77705_RGB_DELAY_500_STEP_LIM 5000
+#define MAX77705_RGB_DELAY_1000_STEP_LIM 8000
+#define MAX77705_RGB_DELAY_1000_STEP_COUNT 13
+#define MAX77705_RGB_DELAY_1000_STEP 1000
+#define MAX77705_RGB_DELAY_2000_STEP 2000
+#define MAX77705_RGB_DELAY_2000_STEP_COUNT 13
+#define MAX77705_RGB_DELAY_2000_STEP_LIM 12000
+
+enum max77705_hw_rev {
+ MAX77705_PASS1 = 1,
+ MAX77705_PASS2,
+ MAX77705_PASS3
+};
+
+enum max77705_reg {
+ MAX77705_PMIC_REG_PMICID1 = 0x00,
+ MAX77705_PMIC_REG_PMICREV = 0x01,
+ MAX77705_PMIC_REG_MAINCTRL1 = 0x02,
+ MAX77705_PMIC_REG_BSTOUT_MASK = 0x03,
+ MAX77705_PMIC_REG_FORCE_EN_MASK = 0x08,
+ MAX77705_PMIC_REG_MCONFIG = 0x10,
+ MAX77705_PMIC_REG_MCONFIG2 = 0x11,
+ MAX77705_PMIC_REG_INTSRC = 0x22,
+ MAX77705_PMIC_REG_INTSRC_MASK = 0x23,
+ MAX77705_PMIC_REG_SYSTEM_INT = 0x24,
+ MAX77705_PMIC_REG_RESERVED_25 = 0x25,
+ MAX77705_PMIC_REG_SYSTEM_INT_MASK = 0x26,
+ MAX77705_PMIC_REG_RESERVED_27 = 0x27,
+ MAX77705_PMIC_REG_RESERVED_28 = 0x28,
+ MAX77705_PMIC_REG_RESERVED_29 = 0x29,
+ MAX77705_PMIC_REG_BOOSTCONTROL1 = 0x4C,
+ MAX77705_PMIC_REG_BOOSTCONTROL2 = 0x4F,
+ MAX77705_PMIC_REG_SW_RESET = 0x50,
+ MAX77705_PMIC_REG_USBC_RESET = 0x51,
+
+ MAX77705_PMIC_REG_END
+};
+
+enum max77705_chg_reg {
+ MAX77705_CHG_REG_BASE = 0xB0,
+ MAX77705_CHG_REG_INT = 0,
+ MAX77705_CHG_REG_INT_MASK,
+ MAX77705_CHG_REG_INT_OK,
+ MAX77705_CHG_REG_DETAILS_00,
+ MAX77705_CHG_REG_DETAILS_01,
+ MAX77705_CHG_REG_DETAILS_02,
+ MAX77705_CHG_REG_DTLS_03,
+ MAX77705_CHG_REG_CNFG_00,
+ MAX77705_CHG_REG_CNFG_01,
+ MAX77705_CHG_REG_CNFG_02,
+ MAX77705_CHG_REG_CNFG_03,
+ MAX77705_CHG_REG_CNFG_04,
+ MAX77705_CHG_REG_CNFG_05,
+ MAX77705_CHG_REG_CNFG_06,
+ MAX77705_CHG_REG_CNFG_07,
+ MAX77705_CHG_REG_CNFG_08,
+ MAX77705_CHG_REG_CNFG_09,
+ MAX77705_CHG_REG_CNFG_10,
+ MAX77705_CHG_REG_CNFG_11,
+
+ MAX77705_CHG_REG_CNFG_12,
+ MAX77705_CHG_REG_CNFG_13,
+ MAX77705_CHG_REG_CNFG_14,
+ MAX77705_CHG_REG_SAFEOUT_CTRL
+};
+
+enum max77705_fuelgauge_reg {
+ STATUS_REG = 0x00,
+ VALRT_THRESHOLD_REG = 0x01,
+ TALRT_THRESHOLD_REG = 0x02,
+ SALRT_THRESHOLD_REG = 0x03,
+ REMCAP_REP_REG = 0x05,
+ SOCREP_REG = 0x06,
+ TEMPERATURE_REG = 0x08,
+ VCELL_REG = 0x09,
+ TIME_TO_EMPTY_REG = 0x11,
+ FULLSOCTHR_REG = 0x13,
+ CURRENT_REG = 0x0A,
+ AVG_CURRENT_REG = 0x0B,
+ SOCMIX_REG = 0x0D,
+ SOCAV_REG = 0x0E,
+ REMCAP_MIX_REG = 0x0F,
+ FULLCAP_REG = 0x10,
+ RFAST_REG = 0x15,
+ AVR_TEMPERATURE_REG = 0x16,
+ CYCLES_REG = 0x17,
+ DESIGNCAP_REG = 0x18,
+ AVR_VCELL_REG = 0x19,
+ TIME_TO_FULL_REG = 0x20,
+ CONFIG_REG = 0x1D,
+ ICHGTERM_REG = 0x1E,
+ REMCAP_AV_REG = 0x1F,
+ FULLCAP_NOM_REG = 0x23,
+ LEARN_CFG_REG = 0x28,
+ FILTER_CFG_REG = 0x29,
+ MISCCFG_REG = 0x2B,
+ QRTABLE20_REG = 0x32,
+ FULLCAP_REP_REG = 0x35,
+ RCOMP_REG = 0x38,
+ VEMPTY_REG = 0x3A,
+ FSTAT_REG = 0x3D,
+ DISCHARGE_THRESHOLD_REG = 0x40,
+ QRTABLE30_REG = 0x42,
+ ISYS_REG = 0x43,
+ DQACC_REG = 0x45,
+ DPACC_REG = 0x46,
+ AVGISYS_REG = 0x4B,
+ QH_REG = 0x4D,
+ VSYS_REG = 0xB1,
+ TALRTTH2_REG = 0xB2,
+ VBYP_REG = 0xB3,
+ CONFIG2_REG = 0xBB,
+ IIN_REG = 0xD0,
+ OCV_REG = 0xEE,
+ VFOCV_REG = 0xFB,
+ VFSOC_REG = 0xFF,
+
+ MAX77705_FG_END
+};
+
+enum max77705_led_reg {
+ MAX77705_RGBLED_REG_BASE = 0x30,
+ MAX77705_RGBLED_REG_LEDEN = 0,
+ MAX77705_RGBLED_REG_LED0BRT,
+ MAX77705_RGBLED_REG_LED1BRT,
+ MAX77705_RGBLED_REG_LED2BRT,
+ MAX77705_RGBLED_REG_LED3BRT,
+ MAX77705_RGBLED_REG_LEDRMP,
+ MAX77705_RGBLED_REG_LEDBLNK,
+ MAX77705_LED_REG_END
+};
+
+enum max77705_charger_battery_state {
+ MAX77705_BATTERY_NOBAT,
+ MAX77705_BATTERY_PREQUALIFICATION,
+ MAX77705_BATTERY_DEAD,
+ MAX77705_BATTERY_GOOD,
+ MAX77705_BATTERY_LOWVOLTAGE,
+ MAX77705_BATTERY_OVERVOLTAGE,
+ MAX77705_BATTERY_RESERVED
+};
+
+enum max77705_charger_charge_type {
+ MAX77705_CHARGER_CONSTANT_CURRENT = 1,
+ MAX77705_CHARGER_CONSTANT_VOLTAGE,
+ MAX77705_CHARGER_END_OF_CHARGE,
+ MAX77705_CHARGER_DONE
+};
+
+#endif /* __LINUX_MFD_MAX77705_PRIV_H */
diff --git a/include/linux/mfd/max77714.h b/include/linux/mfd/max77714.h
new file mode 100644
index 000000000000..7947e0d697a5
--- /dev/null
+++ b/include/linux/mfd/max77714.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Maxim MAX77714 Register and data structures definition.
+ *
+ * Copyright (C) 2022 Luca Ceresoli
+ * Author: Luca Ceresoli <luca.ceresoli@bootlin.com>
+ */
+
+#ifndef __LINUX_MFD_MAX77714_H_
+#define __LINUX_MFD_MAX77714_H_
+
+#include <linux/bits.h>
+
+#define MAX77714_INT_TOP 0x00
+#define MAX77714_INT_TOPM 0x07 /* Datasheet says "read only", but it is RW */
+
+#define MAX77714_INT_TOP_ONOFF BIT(1)
+#define MAX77714_INT_TOP_RTC BIT(3)
+#define MAX77714_INT_TOP_GPIO BIT(4)
+#define MAX77714_INT_TOP_LDO BIT(5)
+#define MAX77714_INT_TOP_SD BIT(6)
+#define MAX77714_INT_TOP_GLBL BIT(7)
+
+#define MAX77714_32K_STATUS 0x30
+#define MAX77714_32K_STATUS_SIOSCOK BIT(5)
+#define MAX77714_32K_STATUS_XOSCOK BIT(4)
+#define MAX77714_32K_STATUS_32KSOURCE BIT(3)
+#define MAX77714_32K_STATUS_32KLOAD_MSK 0x3
+#define MAX77714_32K_STATUS_32KLOAD_SHF 1
+#define MAX77714_32K_STATUS_CRYSTAL_CFG BIT(0)
+
+#define MAX77714_32K_CONFIG 0x31
+#define MAX77714_32K_CONFIG_XOSC_RETRY BIT(4)
+
+#define MAX77714_CNFG_GLBL2 0x91
+#define MAX77714_WDTEN BIT(2)
+#define MAX77714_WDTSLPC BIT(3)
+#define MAX77714_TWD_MASK 0x3
+#define MAX77714_TWD_2s 0x0
+#define MAX77714_TWD_16s 0x1
+#define MAX77714_TWD_64s 0x2
+#define MAX77714_TWD_128s 0x3
+
+#define MAX77714_CNFG_GLBL3 0x92
+#define MAX77714_WDTC BIT(0)
+
+#define MAX77714_CNFG2_ONOFF 0x94
+#define MAX77714_WD_RST_WK BIT(5)
+
+/* Interrupts */
+enum {
+ MAX77714_IRQ_TOP_ONOFF,
+ MAX77714_IRQ_TOP_RTC, /* Real-time clock */
+ MAX77714_IRQ_TOP_GPIO, /* GPIOs */
+ MAX77714_IRQ_TOP_LDO, /* Low-dropout regulators */
+ MAX77714_IRQ_TOP_SD, /* Step-down regulators */
+ MAX77714_IRQ_TOP_GLBL, /* "Global resources": Low-Battery, overtemp... */
+};
+
+#endif /* __LINUX_MFD_MAX77714_H_ */
diff --git a/include/linux/mfd/max77759.h b/include/linux/mfd/max77759.h
new file mode 100644
index 000000000000..c6face34e385
--- /dev/null
+++ b/include/linux/mfd/max77759.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2020 Google Inc.
+ * Copyright 2025 Linaro Ltd.
+ *
+ * Maxim MAX77759 core driver
+ */
+
+#ifndef __LINUX_MFD_MAX77759_H
+#define __LINUX_MFD_MAX77759_H
+
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+
+#define MAX77759_PMIC_REG_PMIC_ID 0x00
+#define MAX77759_PMIC_REG_PMIC_REVISION 0x01
+#define MAX77759_PMIC_REG_OTP_REVISION 0x02
+#define MAX77759_PMIC_REG_INTSRC 0x22
+#define MAX77759_PMIC_REG_INTSRCMASK 0x23
+#define MAX77759_PMIC_REG_INTSRC_MAXQ BIT(3)
+#define MAX77759_PMIC_REG_INTSRC_TOPSYS BIT(1)
+#define MAX77759_PMIC_REG_INTSRC_CHGR BIT(0)
+#define MAX77759_PMIC_REG_TOPSYS_INT 0x24
+#define MAX77759_PMIC_REG_TOPSYS_INT_MASK 0x26
+#define MAX77759_PMIC_REG_TOPSYS_INT_TSHDN BIT(6)
+#define MAX77759_PMIC_REG_TOPSYS_INT_SYSOVLO BIT(5)
+#define MAX77759_PMIC_REG_TOPSYS_INT_SYSUVLO BIT(4)
+#define MAX77759_PMIC_REG_TOPSYS_INT_FSHIP BIT(0)
+#define MAX77759_PMIC_REG_I2C_CNFG 0x40
+#define MAX77759_PMIC_REG_SWRESET 0x50
+#define MAX77759_PMIC_REG_CONTROL_FG 0x51
+
+#define MAX77759_MAXQ_REG_UIC_INT1 0x64
+#define MAX77759_MAXQ_REG_UIC_INT1_APCMDRESI BIT(7)
+#define MAX77759_MAXQ_REG_UIC_INT1_SYSMSGI BIT(6)
+#define MAX77759_MAXQ_REG_UIC_INT1_GPIO6I BIT(1)
+#define MAX77759_MAXQ_REG_UIC_INT1_GPIO5I BIT(0)
+#define MAX77759_MAXQ_REG_UIC_INT1_GPIOxI(offs, en) (((en) & 1) << (offs))
+#define MAX77759_MAXQ_REG_UIC_INT1_GPIOxI_MASK(offs) \
+ MAX77759_MAXQ_REG_UIC_INT1_GPIOxI(offs, ~0)
+#define MAX77759_MAXQ_REG_UIC_INT2 0x65
+#define MAX77759_MAXQ_REG_UIC_INT3 0x66
+#define MAX77759_MAXQ_REG_UIC_INT4 0x67
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS1 0x68
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS2 0x69
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS3 0x6a
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS4 0x6b
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS5 0x6c
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS6 0x6d
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS7 0x6f
+#define MAX77759_MAXQ_REG_UIC_UIC_STATUS8 0x6f
+#define MAX77759_MAXQ_REG_UIC_INT1_M 0x70
+#define MAX77759_MAXQ_REG_UIC_INT2_M 0x71
+#define MAX77759_MAXQ_REG_UIC_INT3_M 0x72
+#define MAX77759_MAXQ_REG_UIC_INT4_M 0x73
+#define MAX77759_MAXQ_REG_AP_DATAOUT0 0x81
+#define MAX77759_MAXQ_REG_AP_DATAOUT32 0xa1
+#define MAX77759_MAXQ_REG_AP_DATAIN0 0xb1
+#define MAX77759_MAXQ_REG_UIC_SWRST 0xe0
+
+#define MAX77759_CHGR_REG_CHG_INT 0xb0
+#define MAX77759_CHGR_REG_CHG_INT2 0xb1
+#define MAX77759_CHGR_REG_CHG_INT_MASK 0xb2
+#define MAX77759_CHGR_REG_CHG_INT2_MASK 0xb3
+#define MAX77759_CHGR_REG_CHG_INT_OK 0xb4
+#define MAX77759_CHGR_REG_CHG_DETAILS_00 0xb5
+#define MAX77759_CHGR_REG_CHG_DETAILS_01 0xb6
+#define MAX77759_CHGR_REG_CHG_DETAILS_02 0xb7
+#define MAX77759_CHGR_REG_CHG_DETAILS_03 0xb8
+#define MAX77759_CHGR_REG_CHG_CNFG_00 0xb9
+#define MAX77759_CHGR_REG_CHG_CNFG_01 0xba
+#define MAX77759_CHGR_REG_CHG_CNFG_02 0xbb
+#define MAX77759_CHGR_REG_CHG_CNFG_03 0xbc
+#define MAX77759_CHGR_REG_CHG_CNFG_04 0xbd
+#define MAX77759_CHGR_REG_CHG_CNFG_05 0xbe
+#define MAX77759_CHGR_REG_CHG_CNFG_06 0xbf
+#define MAX77759_CHGR_REG_CHG_CNFG_07 0xc0
+#define MAX77759_CHGR_REG_CHG_CNFG_08 0xc1
+#define MAX77759_CHGR_REG_CHG_CNFG_09 0xc2
+#define MAX77759_CHGR_REG_CHG_CNFG_10 0xc3
+#define MAX77759_CHGR_REG_CHG_CNFG_11 0xc4
+#define MAX77759_CHGR_REG_CHG_CNFG_12 0xc5
+#define MAX77759_CHGR_REG_CHG_CNFG_13 0xc6
+#define MAX77759_CHGR_REG_CHG_CNFG_14 0xc7
+#define MAX77759_CHGR_REG_CHG_CNFG_15 0xc8
+#define MAX77759_CHGR_REG_CHG_CNFG_16 0xc9
+#define MAX77759_CHGR_REG_CHG_CNFG_17 0xca
+#define MAX77759_CHGR_REG_CHG_CNFG_18 0xcb
+#define MAX77759_CHGR_REG_CHG_CNFG_19 0xcc
+
+/* MaxQ opcodes for max77759_maxq_command() */
+#define MAX77759_MAXQ_OPCODE_MAXLENGTH (MAX77759_MAXQ_REG_AP_DATAOUT32 - \
+ MAX77759_MAXQ_REG_AP_DATAOUT0 + \
+ 1)
+
+#define MAX77759_MAXQ_OPCODE_GPIO_TRIGGER_READ 0x21
+#define MAX77759_MAXQ_OPCODE_GPIO_TRIGGER_WRITE 0x22
+#define MAX77759_MAXQ_OPCODE_GPIO_CONTROL_READ 0x23
+#define MAX77759_MAXQ_OPCODE_GPIO_CONTROL_WRITE 0x24
+#define MAX77759_MAXQ_OPCODE_USER_SPACE_READ 0x81
+#define MAX77759_MAXQ_OPCODE_USER_SPACE_WRITE 0x82
+
+/**
+ * struct max77759 - core max77759 internal data structure
+ *
+ * @regmap_top: Regmap for accessing TOP registers
+ * @maxq_lock: Lock for serializing access to MaxQ
+ * @regmap_maxq: Regmap for accessing MaxQ registers
+ * @cmd_done: Used to signal completion of a MaxQ command
+ * @regmap_charger: Regmap for accessing charger registers
+ *
+ * The MAX77759 comprises several sub-blocks, namely TOP, MaxQ, Charger,
+ * Fuel Gauge, and TCPCI.
+ */
+struct max77759 {
+ struct regmap *regmap_top;
+
+ /* This protects MaxQ commands - only one can be active */
+ struct mutex maxq_lock;
+ struct regmap *regmap_maxq;
+ struct completion cmd_done;
+
+ struct regmap *regmap_charger;
+};
+
+/**
+ * struct max77759_maxq_command - structure containing the MaxQ command to
+ * send
+ *
+ * @length: The number of bytes to send.
+ * @cmd: The data to send.
+ */
+struct max77759_maxq_command {
+ u8 length;
+ u8 cmd[] __counted_by(length);
+};
+
+/**
+ * struct max77759_maxq_response - structure containing the MaxQ response
+ *
+ * @length: The number of bytes to receive.
+ * @rsp: The data received. Must have at least @length bytes space.
+ */
+struct max77759_maxq_response {
+ u8 length;
+ u8 rsp[] __counted_by(length);
+};
+
+/**
+ * max77759_maxq_command() - issue a MaxQ command and wait for the response
+ * and associated data
+ *
+ * @max77759: The core max77759 device handle.
+ * @cmd: The command to be sent.
+ * @rsp: Any response data associated with the command will be copied here;
+ * can be %NULL if the command has no response (other than ACK).
+ *
+ * Return: 0 on success, a negative error number otherwise.
+ */
+int max77759_maxq_command(struct max77759 *max77759,
+ const struct max77759_maxq_command *cmd,
+ struct max77759_maxq_response *rsp);
+
+#endif /* __LINUX_MFD_MAX77759_H */
diff --git a/include/linux/mfd/max77843-private.h b/include/linux/mfd/max77843-private.h
index 0bc7454c4dbe..2fb4db67f110 100644
--- a/include/linux/mfd/max77843-private.h
+++ b/include/linux/mfd/max77843-private.h
@@ -198,7 +198,7 @@ enum max77843_irq_muic {
#define MAX77843_MCONFIG_MEN_MASK BIT(MCONFIG_MEN_SHIFT)
#define MAX77843_MCONFIG_PDIV_MASK (0x3 << MCONFIG_PDIV_SHIFT)
-/* Max77843 charger insterrupts */
+/* Max77843 charger interrupts */
#define MAX77843_CHG_BYP_I BIT(0)
#define MAX77843_CHG_BATP_I BIT(2)
#define MAX77843_CHG_BAT_I BIT(3)
diff --git a/include/linux/mfd/max8997-private.h b/include/linux/mfd/max8997-private.h
index a10cd6945232..261c0aae7d00 100644
--- a/include/linux/mfd/max8997-private.h
+++ b/include/linux/mfd/max8997-private.h
@@ -2,7 +2,7 @@
/*
* max8997-private.h - Voltage regulator driver for the Maxim 8997
*
- * Copyright (C) 2010 Samsung Electrnoics
+ * Copyright (C) 2010 Samsung Electronics
* MyungJoo Ham <myungjoo.ham@samsung.com>
*/
@@ -397,7 +397,6 @@ enum max8997_types {
};
extern int max8997_irq_init(struct max8997_dev *max8997);
-extern void max8997_irq_exit(struct max8997_dev *max8997);
extern int max8997_irq_resume(struct max8997_dev *max8997);
extern int max8997_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest);
diff --git a/include/linux/mfd/max8997.h b/include/linux/mfd/max8997.h
index 6c98edcf4b0b..fb36e1386069 100644
--- a/include/linux/mfd/max8997.h
+++ b/include/linux/mfd/max8997.h
@@ -2,7 +2,7 @@
/*
* max8997.h - Driver for the Maxim 8997/8966
*
- * Copyright (C) 2009-2010 Samsung Electrnoics
+ * Copyright (C) 2009-2010 Samsung Electronics
* MyungJoo Ham <myungjoo.ham@samsung.com>
*
* This driver is based on max8998.h
@@ -110,8 +110,6 @@ enum max8997_haptic_pwm_divisor {
/**
* max8997_haptic_platform_data
- * @pwm_channel_id: channel number of PWM device
- * valid for MAX8997_EXTERNAL_MODE
* @pwm_period: period in nano second for PWM device
* valid for MAX8997_EXTERNAL_MODE
* @type: motor type
@@ -128,7 +126,6 @@ enum max8997_haptic_pwm_divisor {
* [0 - 255]: available period
*/
struct max8997_haptic_platform_data {
- unsigned int pwm_channel_id;
unsigned int pwm_period;
enum max8997_haptic_motor_type type;
@@ -181,7 +178,6 @@ struct max8997_platform_data {
*
*/
bool ignore_gpiodvs_side_effect;
- int buck125_gpios[3]; /* GPIO of [0]SET1, [1]SET2, [2]SET3 */
int buck125_default_idx; /* Default value of SET1, 2, 3 */
unsigned int buck1_voltage[8]; /* buckx_voltage in uV */
bool buck1_gpiodvs;
diff --git a/include/linux/mfd/max8998-private.h b/include/linux/mfd/max8998-private.h
index 6deb5f577602..d77dc18db6eb 100644
--- a/include/linux/mfd/max8998-private.h
+++ b/include/linux/mfd/max8998-private.h
@@ -2,7 +2,7 @@
/*
* max8998-private.h - Voltage regulator driver for the Maxim 8998
*
- * Copyright (C) 2009-2010 Samsung Electrnoics
+ * Copyright (C) 2009-2010 Samsung Electronics
* Kyungmin Park <kyungmin.park@samsung.com>
* Marek Szyprowski <m.szyprowski@samsung.com>
*/
diff --git a/include/linux/mfd/max8998.h b/include/linux/mfd/max8998.h
index 79c020bd0c70..5473f1983e31 100644
--- a/include/linux/mfd/max8998.h
+++ b/include/linux/mfd/max8998.h
@@ -2,7 +2,7 @@
/*
* max8998.h - Voltage regulator driver for the Maxim 8998
*
- * Copyright (C) 2009-2010 Samsung Electrnoics
+ * Copyright (C) 2009-2010 Samsung Electronics
* Kyungmin Park <kyungmin.park@samsung.com>
* Marek Szyprowski <m.szyprowski@samsung.com>
*/
@@ -65,10 +65,7 @@ struct max8998_regulator_data {
* be other than the preset values.
* @buck1_voltage: BUCK1 DVS mode 1 voltage registers
* @buck2_voltage: BUCK2 DVS mode 2 voltage registers
- * @buck1_set1: BUCK1 gpio pin 1 to set output voltage
- * @buck1_set2: BUCK1 gpio pin 2 to set output voltage
* @buck1_default_idx: Default for BUCK1 gpio pin 1, 2
- * @buck2_set3: BUCK2 gpio pin to set output voltage
* @buck2_default_idx: Default for BUCK2 gpio pin.
* @wakeup: Allow to wake up from suspend
* @rtc_delay: LP3974 RTC chip bug that requires delay after a register
@@ -91,10 +88,7 @@ struct max8998_platform_data {
bool buck_voltage_lock;
int buck1_voltage[4];
int buck2_voltage[2];
- int buck1_set1;
- int buck1_set2;
int buck1_default_idx;
- int buck2_set3;
int buck2_default_idx;
bool wakeup;
bool rtc_delay;
diff --git a/include/linux/mfd/mc13xxx.h b/include/linux/mfd/mc13xxx.h
index f372926d5894..dd46fe424a80 100644
--- a/include/linux/mfd/mc13xxx.h
+++ b/include/linux/mfd/mc13xxx.h
@@ -31,12 +31,6 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx,
unsigned int mode, unsigned int channel,
u8 ato, bool atox, unsigned int *sample);
-/* Deprecated calls */
-static inline int mc13xxx_irq_ack(struct mc13xxx *mc13xxx, int irq)
-{
- return 0;
-}
-
static inline int mc13xxx_irq_request_nounmask(struct mc13xxx *mc13xxx, int irq,
irq_handler_t handler,
const char *name, void *dev)
diff --git a/include/linux/mfd/mt6328/core.h b/include/linux/mfd/mt6328/core.h
new file mode 100644
index 000000000000..9a08aed72b9f
--- /dev/null
+++ b/include/linux/mfd/mt6328/core.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#ifndef __MFD_MT6328_CORE_H__
+#define __MFD_MT6328_CORE_H__
+
+enum mt6328_irq_status_numbers {
+ MT6328_IRQ_STATUS_PWRKEY = 0,
+ MT6328_IRQ_STATUS_HOMEKEY,
+ MT6328_IRQ_STATUS_PWRKEY_R,
+ MT6328_IRQ_STATUS_HOMEKEY_R,
+ MT6328_IRQ_STATUS_THR_H,
+ MT6328_IRQ_STATUS_THR_L,
+ MT6328_IRQ_STATUS_BAT_H,
+ MT6328_IRQ_STATUS_BAT_L,
+ MT6328_IRQ_STATUS_RTC,
+ MT6328_IRQ_STATUS_AUDIO,
+ MT6328_IRQ_STATUS_ACCDET,
+ MT6328_IRQ_STATUS_ACCDET_EINT,
+ MT6328_IRQ_STATUS_ACCDET_NEGV,
+ MT6328_IRQ_STATUS_NI_LBAT_INT,
+ MT6328_IRQ_STATUS_VPROC_OC = 16,
+ MT6328_IRQ_STATUS_VSYS_OC,
+ MT6328_IRQ_STATUS_VLTE_OC,
+ MT6328_IRQ_STATUS_VCORE_OC,
+ MT6328_IRQ_STATUS_VPA_OC,
+ MT6328_IRQ_STATUS_LDO_OC,
+ MT6328_IRQ_STATUS_BAT2_H,
+ MT6328_IRQ_STATUS_BAT2_L,
+ MT6328_IRQ_STATUS_VISMPS0_H,
+ MT6328_IRQ_STATUS_VISMPS0_L,
+ MT6328_IRQ_STATUS_AUXADC_IMP,
+ MT6328_IRQ_STATUS_OV = 32,
+ MT6328_IRQ_STATUS_BVALID_DET,
+ MT6328_IRQ_STATUS_VBATON_HV,
+ MT6328_IRQ_STATUS_VBATON_UNDET,
+ MT6328_IRQ_STATUS_WATCHDOG,
+ MT6328_IRQ_STATUS_PCHR_CM_VDEC,
+ MT6328_IRQ_STATUS_CHRDET,
+ MT6328_IRQ_STATUS_PCHR_CM_VINC,
+ MT6328_IRQ_STATUS_FG_BAT_H,
+ MT6328_IRQ_STATUS_FG_BAT_L,
+ MT6328_IRQ_STATUS_FG_CUR_H,
+ MT6328_IRQ_STATUS_FG_CUR_L,
+ MT6328_IRQ_STATUS_FG_ZCV,
+ MT6328_IRQ_STATUS_SPKL_D,
+ MT6328_IRQ_STATUS_SPKL_AB,
+};
+
+#endif /* __MFD_MT6323_CORE_H__ */
diff --git a/include/linux/mfd/mt6328/registers.h b/include/linux/mfd/mt6328/registers.h
new file mode 100644
index 000000000000..8199aaea27b9
--- /dev/null
+++ b/include/linux/mfd/mt6328/registers.h
@@ -0,0 +1,822 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#ifndef __MFD_MT6328_REGISTERS_H__
+#define __MFD_MT6328_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6328_STRUP_CON0 0x0000
+#define MT6328_STRUP_CON2 0x0002
+#define MT6328_STRUP_CON3 0x0004
+#define MT6328_STRUP_CON4 0x0006
+#define MT6328_STRUP_CON5 0x0008
+#define MT6328_STRUP_CON6 0x000a
+#define MT6328_STRUP_CON7 0x000c
+#define MT6328_STRUP_CON8 0x000e
+#define MT6328_STRUP_CON9 0x0010
+#define MT6328_STRUP_CON10 0x0012
+#define MT6328_STRUP_CON11 0x0014
+#define MT6328_STRUP_CON12 0x0016
+#define MT6328_STRUP_CON13 0x0018
+#define MT6328_STRUP_CON14 0x001a
+#define MT6328_STRUP_CON15 0x001c
+#define MT6328_STRUP_CON16 0x001e
+#define MT6328_STRUP_CON17 0x0020
+#define MT6328_STRUP_CON18 0x0022
+#define MT6328_STRUP_CON19 0x0024
+#define MT6328_STRUP_CON20 0x0026
+#define MT6328_STRUP_CON21 0x0028
+#define MT6328_STRUP_CON22 0x002a
+#define MT6328_STRUP_CON23 0x002c
+#define MT6328_STRUP_CON24 0x002e
+#define MT6328_STRUP_CON25 0x0030
+#define MT6328_STRUP_CON26 0x0032
+#define MT6328_STRUP_CON27 0x0034
+#define MT6328_STRUP_CON28 0x0036
+#define MT6328_STRUP_CON29 0x0038
+#define MT6328_STRUP_CON30 0x003a
+#define MT6328_STRUP_CON31 0x003c
+#define MT6328_STRUP_CON32 0x003e
+#define MT6328_STRUP_ANA_CON0 0x0040
+#define MT6328_HWCID 0x0200
+#define MT6328_SWCID 0x0202
+#define MT6328_TOP_CON 0x0204
+#define MT6328_TEST_OUT 0x0206
+#define MT6328_TEST_CON0 0x0208
+#define MT6328_TEST_CON1 0x020a
+#define MT6328_TESTMODE_SW 0x020c
+#define MT6328_EN_STATUS0 0x020e
+#define MT6328_EN_STATUS1 0x0210
+#define MT6328_EN_STATUS2 0x0212
+#define MT6328_OCSTATUS0 0x0214
+#define MT6328_OCSTATUS1 0x0216
+#define MT6328_OCSTATUS2 0x0218
+#define MT6328_PGDEBSTATUS 0x021a
+#define MT6328_PGSTATUS 0x021c
+#define MT6328_THERMALSTATUS 0x021e
+#define MT6328_TOPSTATUS 0x0220
+#define MT6328_TDSEL_CON 0x0222
+#define MT6328_RDSEL_CON 0x0224
+#define MT6328_SMT_CON0 0x0226
+#define MT6328_SMT_CON1 0x0228
+#define MT6328_SMT_CON2 0x022a
+#define MT6328_DRV_CON0 0x022c
+#define MT6328_DRV_CON1 0x022e
+#define MT6328_DRV_CON2 0x0230
+#define MT6328_DRV_CON3 0x0232
+#define MT6328_TOP_STATUS 0x0234
+#define MT6328_TOP_STATUS_SET 0x0236
+#define MT6328_TOP_STATUS_CLR 0x0238
+#define MT6328_RGS_ANA_MON 0x023a
+#define MT6328_TOP_CKPDN_CON0 0x023c
+#define MT6328_TOP_CKPDN_CON0_SET 0x023e
+#define MT6328_TOP_CKPDN_CON0_CLR 0x0240
+#define MT6328_TOP_CKPDN_CON1 0x0242
+#define MT6328_TOP_CKPDN_CON1_SET 0x0244
+#define MT6328_TOP_CKPDN_CON1_CLR 0x0246
+#define MT6328_TOP_CKPDN_CON2 0x0248
+#define MT6328_TOP_CKPDN_CON2_SET 0x024a
+#define MT6328_TOP_CKPDN_CON2_CLR 0x024c
+#define MT6328_TOP_CKPDN_CON3 0x024e
+#define MT6328_TOP_CKPDN_CON3_SET 0x0250
+#define MT6328_TOP_CKPDN_CON3_CLR 0x0252
+#define MT6328_TOP_CKPDN_CON4 0x0254
+#define MT6328_TOP_CKPDN_CON4_SET 0x0256
+#define MT6328_TOP_CKPDN_CON4_CLR 0x0258
+#define MT6328_TOP_CKSEL_CON0 0x025a
+#define MT6328_TOP_CKSEL_CON0_SET 0x025c
+#define MT6328_TOP_CKSEL_CON0_CLR 0x025e
+#define MT6328_TOP_CKSEL_CON1 0x0260
+#define MT6328_TOP_CKSEL_CON1_SET 0x0262
+#define MT6328_TOP_CKSEL_CON1_CLR 0x0264
+#define MT6328_TOP_CKSEL_CON2 0x0266
+#define MT6328_TOP_CKSEL_CON2_SET 0x0268
+#define MT6328_TOP_CKSEL_CON2_CLR 0x026a
+#define MT6328_TOP_CKDIVSEL_CON0 0x026c
+#define MT6328_TOP_CKDIVSEL_CON0_SET 0x026e
+#define MT6328_TOP_CKDIVSEL_CON0_CLR 0x0270
+#define MT6328_TOP_CKDIVSEL_CON1 0x0272
+#define MT6328_TOP_CKDIVSEL_CON1_SET 0x0274
+#define MT6328_TOP_CKDIVSEL_CON1_CLR 0x0276
+#define MT6328_TOP_CKHWEN_CON0 0x0278
+#define MT6328_TOP_CKHWEN_CON0_SET 0x027a
+#define MT6328_TOP_CKHWEN_CON0_CLR 0x027c
+#define MT6328_TOP_CKHWEN_CON1 0x027e
+#define MT6328_TOP_CKHWEN_CON1_SET 0x0280
+#define MT6328_TOP_CKHWEN_CON1_CLR 0x0282
+#define MT6328_TOP_CKTST_CON0 0x0284
+#define MT6328_TOP_CKTST_CON1 0x0286
+#define MT6328_TOP_CKTST_CON2 0x0288
+#define MT6328_TOP_CLKSQ 0x028a
+#define MT6328_TOP_CLKSQ_SET 0x028c
+#define MT6328_TOP_CLKSQ_CLR 0x028e
+#define MT6328_TOP_CLKSQ_RTC 0x0290
+#define MT6328_TOP_CLKSQ_RTC_SET 0x0292
+#define MT6328_TOP_CLKSQ_RTC_CLR 0x0294
+#define MT6328_TOP_CLK_TRIM 0x0296
+#define MT6328_TOP_RST_CON0 0x0298
+#define MT6328_TOP_RST_CON0_SET 0x029a
+#define MT6328_TOP_RST_CON0_CLR 0x029c
+#define MT6328_TOP_RST_CON1 0x029e
+#define MT6328_TOP_RST_MISC 0x02a0
+#define MT6328_TOP_RST_MISC_SET 0x02a2
+#define MT6328_TOP_RST_MISC_CLR 0x02a4
+#define MT6328_TOP_RST_STATUS 0x02a6
+#define MT6328_TOP_RST_STATUS_SET 0x02a8
+#define MT6328_TOP_RST_STATUS_CLR 0x02aa
+#define MT6328_INT_CON0 0x02ac
+#define MT6328_INT_CON0_SET 0x02ae
+#define MT6328_INT_CON0_CLR 0x02b0
+#define MT6328_INT_CON1 0x02b2
+#define MT6328_INT_CON1_SET 0x02b4
+#define MT6328_INT_CON1_CLR 0x02b6
+#define MT6328_INT_CON2 0x02b8
+#define MT6328_INT_CON2_SET 0x02ba
+#define MT6328_INT_CON2_CLR 0x02bc
+#define MT6328_INT_MISC_CON 0x02be
+#define MT6328_INT_MISC_CON_SET 0x02c0
+#define MT6328_INT_MISC_CON_CLR 0x02c2
+#define MT6328_INT_STATUS0 0x02c4
+#define MT6328_INT_STATUS1 0x02c6
+#define MT6328_INT_STATUS2 0x02c8
+#define MT6328_OC_GEAR_0 0x02ca
+#define MT6328_FQMTR_CON0 0x02cc
+#define MT6328_FQMTR_CON1 0x02ce
+#define MT6328_FQMTR_CON2 0x02d0
+#define MT6328_RG_SPI_CON 0x02d2
+#define MT6328_DEW_DIO_EN 0x02d4
+#define MT6328_DEW_READ_TEST 0x02d6
+#define MT6328_DEW_WRITE_TEST 0x02d8
+#define MT6328_DEW_CRC_SWRST 0x02da
+#define MT6328_DEW_CRC_EN 0x02dc
+#define MT6328_DEW_CRC_VAL 0x02de
+#define MT6328_DEW_DBG_MON_SEL 0x02e0
+#define MT6328_DEW_CIPHER_KEY_SEL 0x02e2
+#define MT6328_DEW_CIPHER_IV_SEL 0x02e4
+#define MT6328_DEW_CIPHER_EN 0x02e6
+#define MT6328_DEW_CIPHER_RDY 0x02e8
+#define MT6328_DEW_CIPHER_MODE 0x02ea
+#define MT6328_DEW_CIPHER_SWRST 0x02ec
+#define MT6328_DEW_RDDMY_NO 0x02ee
+#define MT6328_INT_TYPE_CON0 0x02f0
+#define MT6328_INT_TYPE_CON0_SET 0x02f2
+#define MT6328_INT_TYPE_CON0_CLR 0x02f4
+#define MT6328_INT_TYPE_CON1 0x02f6
+#define MT6328_INT_TYPE_CON1_SET 0x02f8
+#define MT6328_INT_TYPE_CON1_CLR 0x02fa
+#define MT6328_INT_TYPE_CON2 0x02fc
+#define MT6328_INT_TYPE_CON2_SET 0x02fe
+#define MT6328_INT_TYPE_CON2_CLR 0x0300
+#define MT6328_INT_STA 0x0302
+#define MT6328_BUCK_ALL_CON0 0x0400
+#define MT6328_BUCK_ALL_CON1 0x0402
+#define MT6328_BUCK_ALL_CON2 0x0404
+#define MT6328_BUCK_ALL_CON3 0x0406
+#define MT6328_BUCK_ALL_CON4 0x0408
+#define MT6328_BUCK_ALL_CON5 0x040a
+#define MT6328_BUCK_ALL_CON6 0x040c
+#define MT6328_BUCK_ALL_CON9 0x040e
+#define MT6328_BUCK_ALL_CON12 0x0410
+#define MT6328_BUCK_ALL_CON13 0x0412
+#define MT6328_BUCK_ALL_CON14 0x0414
+#define MT6328_BUCK_ALL_CON16 0x0416
+#define MT6328_BUCK_ALL_CON18 0x0418
+#define MT6328_BUCK_ALL_CON19 0x041a
+#define MT6328_BUCK_ALL_CON20 0x041c
+#define MT6328_BUCK_ALL_CON21 0x041e
+#define MT6328_BUCK_ALL_CON22 0x0420
+#define MT6328_BUCK_ALL_CON23 0x0422
+#define MT6328_BUCK_ALL_CON24 0x0424
+#define MT6328_BUCK_ALL_CON25 0x0426
+#define MT6328_BUCK_ALL_CON26 0x0428
+#define MT6328_BUCK_ALL_CON27 0x042a
+#define MT6328_BUCK_ALL_CON28 0x042c
+#define MT6328_SMPS_TOP_ANA_CON0 0x042e
+#define MT6328_SMPS_TOP_ANA_CON1 0x0430
+#define MT6328_SMPS_TOP_ANA_CON2 0x0432
+#define MT6328_SMPS_TOP_ANA_CON3 0x0434
+#define MT6328_SMPS_TOP_ANA_CON4 0x0436
+#define MT6328_SMPS_TOP_ANA_CON5 0x0438
+#define MT6328_SMPS_TOP_ANA_CON6 0x043a
+#define MT6328_SMPS_TOP_ANA_CON7 0x043c
+#define MT6328_SMPS_TOP_ANA_CON8 0x043e
+#define MT6328_VCORE_ANA_CON0 0x0440
+#define MT6328_VCORE_ANA_CON1 0x0442
+#define MT6328_VCORE_ANA_CON2 0x0444
+#define MT6328_VCORE_ANA_CON3 0x0446
+#define MT6328_VCORE_ANA_CON4 0x0448
+#define MT6328_VSYS22_ANA_CON0 0x044a
+#define MT6328_VSYS22_ANA_CON1 0x044c
+#define MT6328_VSYS22_ANA_CON2 0x044e
+#define MT6328_VSYS22_ANA_CON3 0x0450
+#define MT6328_VSYS22_ANA_CON4 0x0452
+#define MT6328_VPROC_ANA_CON0 0x0454
+#define MT6328_VPROC_ANA_CON1 0x0456
+#define MT6328_VPROC_ANA_CON2 0x0458
+#define MT6328_VPROC_ANA_CON3 0x045a
+#define MT6328_VPROC_ANA_CON4 0x045c
+#define MT6328_OSC32_ANA_CON0 0x045e
+#define MT6328_OSC32_ANA_CON1 0x0460
+#define MT6328_VPA_ANA_CON0 0x0462
+#define MT6328_VPA_ANA_CON1 0x0464
+#define MT6328_VPA_ANA_CON2 0x0466
+#define MT6328_VPA_ANA_CON3 0x0468
+#define MT6328_VLTE_ANA_CON0 0x046a
+#define MT6328_VLTE_ANA_CON1 0x046c
+#define MT6328_VLTE_ANA_CON2 0x046e
+#define MT6328_VLTE_ANA_CON3 0x0470
+#define MT6328_VLTE_ANA_CON4 0x0472
+#define MT6328_VPROC_CON0 0x0474
+#define MT6328_VPROC_CON1 0x0476
+#define MT6328_VPROC_CON2 0x0478
+#define MT6328_VPROC_CON3 0x047a
+#define MT6328_VPROC_CON4 0x047c
+#define MT6328_VPROC_CON5 0x047e
+#define MT6328_VPROC_CON6 0x0480
+#define MT6328_VPROC_CON7 0x0482
+#define MT6328_VPROC_CON8 0x0484
+#define MT6328_VPROC_CON9 0x0486
+#define MT6328_VPROC_CON10 0x0488
+#define MT6328_VPROC_CON11 0x048a
+#define MT6328_VPROC_CON12 0x048c
+#define MT6328_VPROC_CON13 0x048e
+#define MT6328_VPROC_CON14 0x0490
+#define MT6328_VPROC_CON15 0x0492
+#define MT6328_VPROC_CON16 0x0494
+#define MT6328_VPROC_CON17 0x0496
+#define MT6328_VPROC_CON18 0x0498
+#define MT6328_VPROC_CON19 0x049a
+#define MT6328_VSRAM_CON0 0x049c
+#define MT6328_VSRAM_CON1 0x049e
+#define MT6328_VSRAM_CON2 0x04a0
+#define MT6328_VSRAM_CON3 0x04a2
+#define MT6328_VSRAM_CON4 0x04a4
+#define MT6328_VSRAM_CON5 0x04a6
+#define MT6328_VSRAM_CON6 0x04a8
+#define MT6328_VSRAM_CON7 0x04aa
+#define MT6328_VSRAM_CON8 0x04ac
+#define MT6328_VSRAM_CON9 0x04ae
+#define MT6328_VSRAM_CON10 0x04b0
+#define MT6328_VSRAM_CON11 0x04b2
+#define MT6328_VSRAM_CON12 0x04b4
+#define MT6328_VSRAM_CON13 0x04b6
+#define MT6328_VSRAM_CON14 0x04b8
+#define MT6328_VSRAM_CON15 0x04ba
+#define MT6328_VSRAM_CON16 0x04bc
+#define MT6328_VSRAM_CON17 0x04be
+#define MT6328_VSRAM_CON18 0x04c0
+#define MT6328_VSRAM_CON19 0x04c2
+#define MT6328_VLTE_CON0 0x04c4
+#define MT6328_VLTE_CON1 0x04c6
+#define MT6328_VLTE_CON2 0x04c8
+#define MT6328_VLTE_CON3 0x04ca
+#define MT6328_VLTE_CON4 0x04cc
+#define MT6328_VLTE_CON5 0x04ce
+#define MT6328_VLTE_CON6 0x04d0
+#define MT6328_VLTE_CON7 0x04d2
+#define MT6328_VLTE_CON8 0x04d4
+#define MT6328_VLTE_CON9 0x04d6
+#define MT6328_VLTE_CON10 0x04d8
+#define MT6328_VLTE_CON11 0x04da
+#define MT6328_VLTE_CON12 0x04dc
+#define MT6328_VLTE_CON13 0x04de
+#define MT6328_VLTE_CON14 0x04e0
+#define MT6328_VLTE_CON15 0x04e2
+#define MT6328_VLTE_CON16 0x04e4
+#define MT6328_VLTE_CON17 0x04e6
+#define MT6328_VLTE_CON18 0x04e8
+#define MT6328_VLTE_CON19 0x04ea
+#define MT6328_VCORE1_CON0 0x0600
+#define MT6328_VCORE1_CON1 0x0602
+#define MT6328_VCORE1_CON2 0x0604
+#define MT6328_VCORE1_CON3 0x0606
+#define MT6328_VCORE1_CON4 0x0608
+#define MT6328_VCORE1_CON5 0x060a
+#define MT6328_VCORE1_CON6 0x060c
+#define MT6328_VCORE1_CON7 0x060e
+#define MT6328_VCORE1_CON8 0x0610
+#define MT6328_VCORE1_CON9 0x0612
+#define MT6328_VCORE1_CON10 0x0614
+#define MT6328_VCORE1_CON11 0x0616
+#define MT6328_VCORE1_CON12 0x0618
+#define MT6328_VCORE1_CON13 0x061a
+#define MT6328_VCORE1_CON14 0x061c
+#define MT6328_VCORE1_CON15 0x061e
+#define MT6328_VCORE1_CON16 0x0620
+#define MT6328_VCORE1_CON17 0x0622
+#define MT6328_VCORE1_CON18 0x0624
+#define MT6328_VCORE1_CON19 0x0626
+#define MT6328_VSYS22_CON0 0x0628
+#define MT6328_VSYS22_CON1 0x062a
+#define MT6328_VSYS22_CON2 0x062c
+#define MT6328_VSYS22_CON3 0x062e
+#define MT6328_VSYS22_CON4 0x0630
+#define MT6328_VSYS22_CON5 0x0632
+#define MT6328_VSYS22_CON6 0x0634
+#define MT6328_VSYS22_CON7 0x0636
+#define MT6328_VSYS22_CON8 0x0638
+#define MT6328_VSYS22_CON9 0x063a
+#define MT6328_VSYS22_CON10 0x063c
+#define MT6328_VSYS22_CON11 0x063e
+#define MT6328_VSYS22_CON12 0x0640
+#define MT6328_VSYS22_CON13 0x0642
+#define MT6328_VSYS22_CON14 0x0644
+#define MT6328_VSYS22_CON15 0x0646
+#define MT6328_VSYS22_CON16 0x0648
+#define MT6328_VSYS22_CON17 0x064a
+#define MT6328_VSYS22_CON18 0x064c
+#define MT6328_VSYS22_CON19 0x064e
+#define MT6328_VPA_CON0 0x0650
+#define MT6328_VPA_CON1 0x0652
+#define MT6328_VPA_CON2 0x0654
+#define MT6328_VPA_CON3 0x0656
+#define MT6328_VPA_CON4 0x0658
+#define MT6328_VPA_CON5 0x065a
+#define MT6328_VPA_CON6 0x065c
+#define MT6328_VPA_CON7 0x065e
+#define MT6328_VPA_CON8 0x0660
+#define MT6328_VPA_CON9 0x0662
+#define MT6328_VPA_CON10 0x0664
+#define MT6328_VPA_CON11 0x0666
+#define MT6328_VPA_CON12 0x0668
+#define MT6328_VPA_CON13 0x066a
+#define MT6328_VPA_CON14 0x066c
+#define MT6328_VPA_CON15 0x066e
+#define MT6328_VPA_CON16 0x0670
+#define MT6328_VPA_CON17 0x0672
+#define MT6328_VPA_CON18 0x0674
+#define MT6328_VPA_CON19 0x0676
+#define MT6328_VPA_CON20 0x0678
+#define MT6328_VPA_CON21 0x067a
+#define MT6328_VPA_CON22 0x067c
+#define MT6328_VPA_CON23 0x067e
+#define MT6328_VPA_CON24 0x0680
+#define MT6328_BUCK_K_CON0 0x0682
+#define MT6328_BUCK_K_CON1 0x0684
+#define MT6328_BUCK_K_CON2 0x0686
+#define MT6328_BUCK_K_CON3 0x0688
+#define MT6328_ZCD_CON0 0x0800
+#define MT6328_ZCD_CON1 0x0802
+#define MT6328_ZCD_CON2 0x0804
+#define MT6328_ZCD_CON3 0x0806
+#define MT6328_ZCD_CON4 0x0808
+#define MT6328_ZCD_CON5 0x080a
+#define MT6328_ISINK0_CON0 0x080c
+#define MT6328_ISINK0_CON1 0x080e
+#define MT6328_ISINK0_CON2 0x0810
+#define MT6328_ISINK0_CON3 0x0812
+#define MT6328_ISINK1_CON0 0x0814
+#define MT6328_ISINK1_CON1 0x0816
+#define MT6328_ISINK1_CON2 0x0818
+#define MT6328_ISINK1_CON3 0x081a
+#define MT6328_ISINK2_CON1 0x081c
+#define MT6328_ISINK3_CON1 0x081e
+#define MT6328_ISINK_ANA0 0x0820
+#define MT6328_ISINK_ANA1 0x0822
+#define MT6328_ISINK_PHASE_DLY 0x0824
+#define MT6328_ISINK_SFSTR 0x0826
+#define MT6328_ISINK_EN_CTRL 0x0828
+#define MT6328_ISINK_MODE_CTRL 0x082a
+#define MT6328_VTCXO_0_CON0 0x0a00
+#define MT6328_VTCXO_1_CON0 0x0a02
+#define MT6328_VAUD28_CON0 0x0a04
+#define MT6328_VAUX18_CON0 0x0a06
+#define MT6328_VRF18_0_CON0 0x0a08
+#define MT6328_VRF18_0_CON1 0x0a0a
+#define MT6328_VCAMA_CON0 0x0a0c
+#define MT6328_VCN28_CON0 0x0a0e
+#define MT6328_VCN33_CON0 0x0a10
+#define MT6328_VCN33_CON1 0x0a12
+#define MT6328_VCN33_CON2 0x0a14
+#define MT6328_VRF18_1_CON0 0x0a16
+#define MT6328_VRF18_1_CON1 0x0a18
+#define MT6328_VUSB33_CON0 0x0a1a
+#define MT6328_VMCH_CON0 0x0a1c
+#define MT6328_VMCH_CON1 0x0a1e
+#define MT6328_VMC_CON0 0x0a20
+#define MT6328_VMC_CON1 0x0a22
+#define MT6328_VEMC_3V3_CON0 0x0a24
+#define MT6328_VEMC_3V3_CON1 0x0a26
+#define MT6328_VIO28_CON0 0x0a28
+#define MT6328_VCAMAF_CON0 0x0a2a
+#define MT6328_VGP1_CON0 0x0a2c
+#define MT6328_VGP1_CON1 0x0a2e
+#define MT6328_VEFUSE_CON0 0x0a30
+#define MT6328_VSIM1_CON0 0x0a32
+#define MT6328_VSIM2_CON0 0x0a34
+#define MT6328_VIO18_CON0 0x0a36
+#define MT6328_VIBR_CON0 0x0a38
+#define MT6328_VCN18_CON0 0x0a3a
+#define MT6328_VCAM_CON0 0x0a3c
+#define MT6328_VCAMIO_CON0 0x0a3e
+#define MT6328_LDO_VSRAM_CON0 0x0a40
+#define MT6328_LDO_VSRAM_CON1 0x0a42
+#define MT6328_VTREF_CON0 0x0a44
+#define MT6328_VM_CON0 0x0a46
+#define MT6328_VM_CON1 0x0a48
+#define MT6328_VRTC_CON0 0x0a4a
+#define MT6328_LDO_OCFB0 0x0a4c
+#define MT6328_ALDO_ANA_CON0 0x0a4e
+#define MT6328_ADLDO_ANA_CON1 0x0a50
+#define MT6328_ADLDO_ANA_CON2 0x0a52
+#define MT6328_ADLDO_ANA_CON3 0x0a54
+#define MT6328_ADLDO_ANA_CON4 0x0a56
+#define MT6328_ADLDO_ANA_CON5 0x0a58
+#define MT6328_ADLDO_ANA_CON6 0x0a5a
+#define MT6328_ADLDO_ANA_CON7 0x0a5c
+#define MT6328_ADLDO_ANA_CON8 0x0a5e
+#define MT6328_ADLDO_ANA_CON9 0x0a60
+#define MT6328_ADLDO_ANA_CON10 0x0a62
+#define MT6328_ADLDO_ANA_CON11 0x0a64
+#define MT6328_ADLDO_ANA_CON12 0x0a66
+#define MT6328_ADLDO_ANA_CON13 0x0a68
+#define MT6328_DLDO_ANA_CON0 0x0a6a
+#define MT6328_DLDO_ANA_CON1 0x0a6c
+#define MT6328_DLDO_ANA_CON2 0x0a6e
+#define MT6328_DLDO_ANA_CON3 0x0a70
+#define MT6328_DLDO_ANA_CON4 0x0a72
+#define MT6328_DLDO_ANA_CON5 0x0a74
+#define MT6328_SLDO_ANA_CON0 0x0a76
+#define MT6328_SLDO_ANA_CON1 0x0a78
+#define MT6328_SLDO_ANA_CON2 0x0a7a
+#define MT6328_SLDO_ANA_CON3 0x0a7c
+#define MT6328_SLDO_ANA_CON4 0x0a7e
+#define MT6328_SLDO_ANA_CON5 0x0a80
+#define MT6328_SLDO_ANA_CON6 0x0a82
+#define MT6328_SLDO_ANA_CON7 0x0a84
+#define MT6328_SLDO_ANA_CON8 0x0a86
+#define MT6328_SLDO_ANA_CON9 0x0a88
+#define MT6328_SLDO_ANA_CON10 0x0a8a
+#define MT6328_LDO_RSV_CON0 0x0a8c
+#define MT6328_LDO_RSV_CON1 0x0a8e
+#define MT6328_SPK_CON0 0x0a90
+#define MT6328_SPK_CON1 0x0a92
+#define MT6328_SPK_CON2 0x0a94
+#define MT6328_SPK_CON3 0x0a96
+#define MT6328_SPK_CON4 0x0a98
+#define MT6328_SPK_CON5 0x0a9a
+#define MT6328_SPK_CON6 0x0a9c
+#define MT6328_SPK_CON7 0x0a9e
+#define MT6328_SPK_CON8 0x0aa0
+#define MT6328_SPK_CON9 0x0aa2
+#define MT6328_SPK_CON10 0x0aa4
+#define MT6328_SPK_CON11 0x0aa6
+#define MT6328_SPK_CON12 0x0aa8
+#define MT6328_SPK_CON13 0x0aaa
+#define MT6328_SPK_CON14 0x0aac
+#define MT6328_SPK_CON15 0x0aae
+#define MT6328_SPK_CON16 0x0ab0
+#define MT6328_SPK_ANA_CON0 0x0ab2
+#define MT6328_SPK_ANA_CON1 0x0ab4
+#define MT6328_SPK_ANA_CON3 0x0ab6
+#define MT6328_OTP_CON0 0x0c00
+#define MT6328_OTP_CON1 0x0c02
+#define MT6328_OTP_CON2 0x0c04
+#define MT6328_OTP_CON3 0x0c06
+#define MT6328_OTP_CON4 0x0c08
+#define MT6328_OTP_CON5 0x0c0a
+#define MT6328_OTP_CON6 0x0c0c
+#define MT6328_OTP_CON7 0x0c0e
+#define MT6328_OTP_CON8 0x0c10
+#define MT6328_OTP_CON9 0x0c12
+#define MT6328_OTP_CON10 0x0c14
+#define MT6328_OTP_CON11 0x0c16
+#define MT6328_OTP_CON12 0x0c18
+#define MT6328_OTP_CON13 0x0c1a
+#define MT6328_OTP_CON14 0x0c1c
+#define MT6328_OTP_DOUT_0_15 0x0c1e
+#define MT6328_OTP_DOUT_16_31 0x0c20
+#define MT6328_OTP_DOUT_32_47 0x0c22
+#define MT6328_OTP_DOUT_48_63 0x0c24
+#define MT6328_OTP_DOUT_64_79 0x0c26
+#define MT6328_OTP_DOUT_80_95 0x0c28
+#define MT6328_OTP_DOUT_96_111 0x0c2a
+#define MT6328_OTP_DOUT_112_127 0x0c2c
+#define MT6328_OTP_DOUT_128_143 0x0c2e
+#define MT6328_OTP_DOUT_144_159 0x0c30
+#define MT6328_OTP_DOUT_160_175 0x0c32
+#define MT6328_OTP_DOUT_176_191 0x0c34
+#define MT6328_OTP_DOUT_192_207 0x0c36
+#define MT6328_OTP_DOUT_208_223 0x0c38
+#define MT6328_OTP_DOUT_224_239 0x0c3a
+#define MT6328_OTP_DOUT_240_255 0x0c3c
+#define MT6328_OTP_DOUT_256_271 0x0c3e
+#define MT6328_OTP_DOUT_272_287 0x0c40
+#define MT6328_OTP_DOUT_288_303 0x0c42
+#define MT6328_OTP_DOUT_304_319 0x0c44
+#define MT6328_OTP_DOUT_320_335 0x0c46
+#define MT6328_OTP_DOUT_336_351 0x0c48
+#define MT6328_OTP_DOUT_352_367 0x0c4a
+#define MT6328_OTP_DOUT_368_383 0x0c4c
+#define MT6328_OTP_DOUT_384_399 0x0c4e
+#define MT6328_OTP_DOUT_400_415 0x0c50
+#define MT6328_OTP_DOUT_416_431 0x0c52
+#define MT6328_OTP_DOUT_432_447 0x0c54
+#define MT6328_OTP_DOUT_448_463 0x0c56
+#define MT6328_OTP_DOUT_464_479 0x0c58
+#define MT6328_OTP_DOUT_480_495 0x0c5a
+#define MT6328_OTP_DOUT_496_511 0x0c5c
+#define MT6328_OTP_VAL_0_15 0x0c5e
+#define MT6328_OTP_VAL_16_31 0x0c60
+#define MT6328_OTP_VAL_32_47 0x0c62
+#define MT6328_OTP_VAL_48_63 0x0c64
+#define MT6328_OTP_VAL_64_79 0x0c66
+#define MT6328_OTP_VAL_80_95 0x0c68
+#define MT6328_OTP_VAL_96_111 0x0c6a
+#define MT6328_OTP_VAL_112_127 0x0c6c
+#define MT6328_OTP_VAL_128_143 0x0c6e
+#define MT6328_OTP_VAL_144_159 0x0c70
+#define MT6328_OTP_VAL_160_175 0x0c72
+#define MT6328_OTP_VAL_176_191 0x0c74
+#define MT6328_OTP_VAL_192_207 0x0c76
+#define MT6328_OTP_VAL_208_223 0x0c78
+#define MT6328_OTP_VAL_224_239 0x0c7a
+#define MT6328_OTP_VAL_240_255 0x0c7c
+#define MT6328_OTP_VAL_256_271 0x0c7e
+#define MT6328_OTP_VAL_272_287 0x0c80
+#define MT6328_OTP_VAL_288_303 0x0c82
+#define MT6328_OTP_VAL_304_319 0x0c84
+#define MT6328_OTP_VAL_320_335 0x0c86
+#define MT6328_OTP_VAL_336_351 0x0c88
+#define MT6328_OTP_VAL_352_367 0x0c8a
+#define MT6328_OTP_VAL_368_383 0x0c8c
+#define MT6328_OTP_VAL_384_399 0x0c8e
+#define MT6328_OTP_VAL_400_415 0x0c90
+#define MT6328_OTP_VAL_416_431 0x0c92
+#define MT6328_OTP_VAL_432_447 0x0c94
+#define MT6328_OTP_VAL_448_463 0x0c96
+#define MT6328_OTP_VAL_464_479 0x0c98
+#define MT6328_OTP_VAL_480_495 0x0c9a
+#define MT6328_OTP_VAL_496_511 0x0c9c
+#define MT6328_RTC_MIX_CON0 0x0c9e
+#define MT6328_RTC_MIX_CON1 0x0ca0
+#define MT6328_RTC_MIX_CON2 0x0ca2
+#define MT6328_FGADC_CON0 0x0ca4
+#define MT6328_FGADC_CON1 0x0ca6
+#define MT6328_FGADC_CON2 0x0ca8
+#define MT6328_FGADC_CON3 0x0caa
+#define MT6328_FGADC_CON4 0x0cac
+#define MT6328_FGADC_CON5 0x0cae
+#define MT6328_FGADC_CON6 0x0cb0
+#define MT6328_FGADC_CON7 0x0cb2
+#define MT6328_FGADC_CON8 0x0cb4
+#define MT6328_FGADC_CON9 0x0cb6
+#define MT6328_FGADC_CON10 0x0cb8
+#define MT6328_FGADC_CON11 0x0cba
+#define MT6328_FGADC_CON12 0x0cbc
+#define MT6328_FGADC_CON13 0x0cbe
+#define MT6328_FGADC_CON14 0x0cc0
+#define MT6328_FGADC_CON15 0x0cc2
+#define MT6328_FGADC_CON16 0x0cc4
+#define MT6328_FGADC_CON17 0x0cc6
+#define MT6328_FGADC_CON18 0x0cc8
+#define MT6328_FGADC_CON19 0x0cca
+#define MT6328_FGADC_CON20 0x0ccc
+#define MT6328_FGADC_CON21 0x0cce
+#define MT6328_FGADC_CON22 0x0cd0
+#define MT6328_FGADC_CON23 0x0cd2
+#define MT6328_FGADC_CON24 0x0cd4
+#define MT6328_FGADC_CON25 0x0cd6
+#define MT6328_FGADC_CON26 0x0cd8
+#define MT6328_FGADC_CON27 0x0cda
+#define MT6328_AUDDEC_ANA_CON0 0x0cdc
+#define MT6328_AUDDEC_ANA_CON1 0x0cde
+#define MT6328_AUDDEC_ANA_CON2 0x0ce0
+#define MT6328_AUDDEC_ANA_CON3 0x0ce2
+#define MT6328_AUDDEC_ANA_CON4 0x0ce4
+#define MT6328_AUDDEC_ANA_CON5 0x0ce6
+#define MT6328_AUDDEC_ANA_CON6 0x0ce8
+#define MT6328_AUDDEC_ANA_CON7 0x0cea
+#define MT6328_AUDDEC_ANA_CON8 0x0cec
+#define MT6328_AUDENC_ANA_CON0 0x0cee
+#define MT6328_AUDENC_ANA_CON1 0x0cf0
+#define MT6328_AUDENC_ANA_CON2 0x0cf2
+#define MT6328_AUDENC_ANA_CON3 0x0cf4
+#define MT6328_AUDENC_ANA_CON4 0x0cf6
+#define MT6328_AUDENC_ANA_CON5 0x0cf8
+#define MT6328_AUDENC_ANA_CON6 0x0cfa
+#define MT6328_AUDENC_ANA_CON7 0x0cfc
+#define MT6328_AUDENC_ANA_CON8 0x0cfe
+#define MT6328_AUDENC_ANA_CON9 0x0d00
+#define MT6328_AUDENC_ANA_CON10 0x0d02
+#define MT6328_AUDNCP_CLKDIV_CON0 0x0d04
+#define MT6328_AUDNCP_CLKDIV_CON1 0x0d06
+#define MT6328_AUDNCP_CLKDIV_CON2 0x0d08
+#define MT6328_AUDNCP_CLKDIV_CON3 0x0d0a
+#define MT6328_AUDNCP_CLKDIV_CON4 0x0d0c
+#define MT6328_AUXADC_ADC0 0x0e00
+#define MT6328_AUXADC_ADC1 0x0e02
+#define MT6328_AUXADC_ADC2 0x0e04
+#define MT6328_AUXADC_ADC3 0x0e06
+#define MT6328_AUXADC_ADC4 0x0e08
+#define MT6328_AUXADC_ADC5 0x0e0a
+#define MT6328_AUXADC_ADC6 0x0e0c
+#define MT6328_AUXADC_ADC7 0x0e0e
+#define MT6328_AUXADC_ADC8 0x0e10
+#define MT6328_AUXADC_ADC9 0x0e12
+#define MT6328_AUXADC_ADC10 0x0e14
+#define MT6328_AUXADC_ADC11 0x0e16
+#define MT6328_AUXADC_ADC12 0x0e18
+#define MT6328_AUXADC_ADC13 0x0e1a
+#define MT6328_AUXADC_ADC14 0x0e1c
+#define MT6328_AUXADC_ADC15 0x0e1e
+#define MT6328_AUXADC_ADC16 0x0e20
+#define MT6328_AUXADC_ADC17 0x0e22
+#define MT6328_AUXADC_ADC18 0x0e24
+#define MT6328_AUXADC_ADC19 0x0e26
+#define MT6328_AUXADC_ADC20 0x0e28
+#define MT6328_AUXADC_ADC21 0x0e2a
+#define MT6328_AUXADC_ADC22 0x0e2c
+#define MT6328_AUXADC_ADC23 0x0e2e
+#define MT6328_AUXADC_ADC24 0x0e30
+#define MT6328_AUXADC_ADC25 0x0e32
+#define MT6328_AUXADC_ADC26 0x0e34
+#define MT6328_AUXADC_ADC27 0x0e36
+#define MT6328_AUXADC_ADC28 0x0e38
+#define MT6328_AUXADC_ADC29 0x0e3a
+#define MT6328_AUXADC_ADC30 0x0e3c
+#define MT6328_AUXADC_ADC31 0x0e3e
+#define MT6328_AUXADC_ADC32 0x0e40
+#define MT6328_AUXADC_ADC33 0x0e42
+#define MT6328_AUXADC_BUF0 0x0e44
+#define MT6328_AUXADC_BUF1 0x0e46
+#define MT6328_AUXADC_BUF2 0x0e48
+#define MT6328_AUXADC_BUF3 0x0e4a
+#define MT6328_AUXADC_BUF4 0x0e4c
+#define MT6328_AUXADC_BUF5 0x0e4e
+#define MT6328_AUXADC_BUF6 0x0e50
+#define MT6328_AUXADC_BUF7 0x0e52
+#define MT6328_AUXADC_BUF8 0x0e54
+#define MT6328_AUXADC_BUF9 0x0e56
+#define MT6328_AUXADC_BUF10 0x0e58
+#define MT6328_AUXADC_BUF11 0x0e5a
+#define MT6328_AUXADC_BUF12 0x0e5c
+#define MT6328_AUXADC_BUF13 0x0e5e
+#define MT6328_AUXADC_BUF14 0x0e60
+#define MT6328_AUXADC_BUF15 0x0e62
+#define MT6328_AUXADC_BUF16 0x0e64
+#define MT6328_AUXADC_BUF17 0x0e66
+#define MT6328_AUXADC_BUF18 0x0e68
+#define MT6328_AUXADC_BUF19 0x0e6a
+#define MT6328_AUXADC_BUF20 0x0e6c
+#define MT6328_AUXADC_BUF21 0x0e6e
+#define MT6328_AUXADC_BUF22 0x0e70
+#define MT6328_AUXADC_BUF23 0x0e72
+#define MT6328_AUXADC_BUF24 0x0e74
+#define MT6328_AUXADC_BUF25 0x0e76
+#define MT6328_AUXADC_BUF26 0x0e78
+#define MT6328_AUXADC_BUF27 0x0e7a
+#define MT6328_AUXADC_BUF28 0x0e7c
+#define MT6328_AUXADC_BUF29 0x0e7e
+#define MT6328_AUXADC_BUF30 0x0e80
+#define MT6328_AUXADC_BUF31 0x0e82
+#define MT6328_AUXADC_STA0 0x0e84
+#define MT6328_AUXADC_STA1 0x0e86
+#define MT6328_AUXADC_RQST0 0x0e88
+#define MT6328_AUXADC_RQST0_SET 0x0e8a
+#define MT6328_AUXADC_RQST0_CLR 0x0e8c
+#define MT6328_AUXADC_RQST1 0x0e8e
+#define MT6328_AUXADC_RQST1_SET 0x0e90
+#define MT6328_AUXADC_RQST1_CLR 0x0e92
+#define MT6328_AUXADC_CON0 0x0e94
+#define MT6328_AUXADC_CON0_SET 0x0e96
+#define MT6328_AUXADC_CON0_CLR 0x0e98
+#define MT6328_AUXADC_CON1 0x0e9a
+#define MT6328_AUXADC_CON2 0x0e9c
+#define MT6328_AUXADC_CON3 0x0e9e
+#define MT6328_AUXADC_CON4 0x0ea0
+#define MT6328_AUXADC_CON5 0x0ea2
+#define MT6328_AUXADC_CON6 0x0ea4
+#define MT6328_AUXADC_CON7 0x0ea6
+#define MT6328_AUXADC_CON8 0x0ea8
+#define MT6328_AUXADC_CON9 0x0eaa
+#define MT6328_AUXADC_CON10 0x0eac
+#define MT6328_AUXADC_CON11 0x0eae
+#define MT6328_AUXADC_CON12 0x0eb0
+#define MT6328_AUXADC_CON13 0x0eb2
+#define MT6328_AUXADC_CON14 0x0eb4
+#define MT6328_AUXADC_CON15 0x0eb6
+#define MT6328_AUXADC_CON16 0x0eb8
+#define MT6328_AUXADC_AUTORPT0 0x0eba
+#define MT6328_AUXADC_LBAT0 0x0ebc
+#define MT6328_AUXADC_LBAT1 0x0ebe
+#define MT6328_AUXADC_LBAT2 0x0ec0
+#define MT6328_AUXADC_LBAT3 0x0ec2
+#define MT6328_AUXADC_LBAT4 0x0ec4
+#define MT6328_AUXADC_LBAT5 0x0ec6
+#define MT6328_AUXADC_LBAT6 0x0ec8
+#define MT6328_AUXADC_ACCDET 0x0eca
+#define MT6328_AUXADC_THR0 0x0ecc
+#define MT6328_AUXADC_THR1 0x0ece
+#define MT6328_AUXADC_THR2 0x0ed0
+#define MT6328_AUXADC_THR3 0x0ed2
+#define MT6328_AUXADC_THR4 0x0ed4
+#define MT6328_AUXADC_THR5 0x0ed6
+#define MT6328_AUXADC_THR6 0x0ed8
+#define MT6328_AUXADC_EFUSE0 0x0eda
+#define MT6328_AUXADC_EFUSE1 0x0edc
+#define MT6328_AUXADC_EFUSE2 0x0ede
+#define MT6328_AUXADC_EFUSE3 0x0ee0
+#define MT6328_AUXADC_EFUSE4 0x0ee2
+#define MT6328_AUXADC_EFUSE5 0x0ee4
+#define MT6328_AUXADC_DBG0 0x0ee6
+#define MT6328_AUXADC_IMP0 0x0ee8
+#define MT6328_AUXADC_IMP1 0x0eea
+#define MT6328_AUXADC_VISMPS0_1 0x0eec
+#define MT6328_AUXADC_VISMPS0_2 0x0eee
+#define MT6328_AUXADC_VISMPS0_3 0x0ef0
+#define MT6328_AUXADC_VISMPS0_4 0x0ef2
+#define MT6328_AUXADC_VISMPS0_5 0x0ef4
+#define MT6328_AUXADC_VISMPS0_6 0x0ef6
+#define MT6328_AUXADC_VISMPS0_7 0x0ef8
+#define MT6328_AUXADC_LBAT2_1 0x0efa
+#define MT6328_AUXADC_LBAT2_2 0x0efc
+#define MT6328_AUXADC_LBAT2_3 0x0efe
+#define MT6328_AUXADC_LBAT2_4 0x0f00
+#define MT6328_AUXADC_LBAT2_5 0x0f02
+#define MT6328_AUXADC_LBAT2_6 0x0f04
+#define MT6328_AUXADC_LBAT2_7 0x0f06
+#define MT6328_AUXADC_MDBG_0 0x0f08
+#define MT6328_AUXADC_MDBG_1 0x0f0a
+#define MT6328_AUXADC_MDBG_2 0x0f0c
+#define MT6328_AUXADC_MDRT_0 0x0f0e
+#define MT6328_AUXADC_MDRT_1 0x0f10
+#define MT6328_AUXADC_MDRT_2 0x0f12
+#define MT6328_ACCDET_CON0 0x0f14
+#define MT6328_ACCDET_CON1 0x0f16
+#define MT6328_ACCDET_CON2 0x0f18
+#define MT6328_ACCDET_CON3 0x0f1a
+#define MT6328_ACCDET_CON4 0x0f1c
+#define MT6328_ACCDET_CON5 0x0f1e
+#define MT6328_ACCDET_CON6 0x0f20
+#define MT6328_ACCDET_CON7 0x0f22
+#define MT6328_ACCDET_CON8 0x0f24
+#define MT6328_ACCDET_CON9 0x0f26
+#define MT6328_ACCDET_CON10 0x0f28
+#define MT6328_ACCDET_CON11 0x0f2a
+#define MT6328_ACCDET_CON12 0x0f2c
+#define MT6328_ACCDET_CON13 0x0f2e
+#define MT6328_ACCDET_CON14 0x0f30
+#define MT6328_ACCDET_CON15 0x0f32
+#define MT6328_ACCDET_CON16 0x0f34
+#define MT6328_ACCDET_CON17 0x0f36
+#define MT6328_ACCDET_CON18 0x0f38
+#define MT6328_ACCDET_CON19 0x0f3a
+#define MT6328_ACCDET_CON20 0x0f3c
+#define MT6328_ACCDET_CON21 0x0f3e
+#define MT6328_ACCDET_CON22 0x0f40
+#define MT6328_ACCDET_CON23 0x0f42
+#define MT6328_ACCDET_CON24 0x0f44
+#define MT6328_ACCDET_CON25 0x0f46
+#define MT6328_CHR_CON0 0x0f48
+#define MT6328_CHR_CON1 0x0f4a
+#define MT6328_CHR_CON2 0x0f4c
+#define MT6328_CHR_CON3 0x0f4e
+#define MT6328_CHR_CON4 0x0f50
+#define MT6328_CHR_CON5 0x0f52
+#define MT6328_CHR_CON6 0x0f54
+#define MT6328_CHR_CON7 0x0f56
+#define MT6328_CHR_CON8 0x0f58
+#define MT6328_CHR_CON9 0x0f5a
+#define MT6328_CHR_CON10 0x0f5c
+#define MT6328_CHR_CON11 0x0f5e
+#define MT6328_CHR_CON12 0x0f60
+#define MT6328_CHR_CON13 0x0f62
+#define MT6328_CHR_CON14 0x0f64
+#define MT6328_CHR_CON15 0x0f66
+#define MT6328_CHR_CON16 0x0f68
+#define MT6328_CHR_CON17 0x0f6a
+#define MT6328_CHR_CON18 0x0f6c
+#define MT6328_CHR_CON19 0x0f6e
+#define MT6328_CHR_CON20 0x0f70
+#define MT6328_CHR_CON21 0x0f72
+#define MT6328_CHR_CON22 0x0f74
+#define MT6328_CHR_CON23 0x0f76
+#define MT6328_CHR_CON24 0x0f78
+#define MT6328_CHR_CON25 0x0f7a
+#define MT6328_CHR_CON26 0x0f7c
+#define MT6328_CHR_CON27 0x0f7e
+#define MT6328_CHR_CON28 0x0f80
+#define MT6328_CHR_CON29 0x0f82
+#define MT6328_CHR_CON30 0x0f84
+#define MT6328_CHR_CON31 0x0f86
+#define MT6328_CHR_CON32 0x0f88
+#define MT6328_CHR_CON33 0x0f8a
+#define MT6328_CHR_CON34 0x0f8c
+#define MT6328_CHR_CON35 0x0f8e
+#define MT6328_CHR_CON36 0x0f90
+#define MT6328_CHR_CON37 0x0f92
+#define MT6328_CHR_CON38 0x0f94
+#define MT6328_CHR_CON39 0x0f96
+#define MT6328_CHR_CON40 0x0f98
+#define MT6328_CHR_CON41 0x0f9a
+#define MT6328_CHR_CON42 0x0f9c
+#define MT6328_BATON_CON0 0x0f9e
+#define MT6328_CHR_CON43 0x0fa0
+#define MT6328_EOSC_CALI_CON0 0x0faa
+#define MT6328_EOSC_CALI_CON1 0x0fac
+#define MT6328_VRTC_PWM_CON0 0x0fae
+
+#endif /* __MFD_MT6328_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6331/core.h b/include/linux/mfd/mt6331/core.h
new file mode 100644
index 000000000000..df8e6b1e4bc1
--- /dev/null
+++ b/include/linux/mfd/mt6331/core.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __MFD_MT6331_CORE_H__
+#define __MFD_MT6331_CORE_H__
+
+enum mt6331_irq_status_numbers {
+ MT6331_IRQ_STATUS_PWRKEY = 0,
+ MT6331_IRQ_STATUS_HOMEKEY,
+ MT6331_IRQ_STATUS_CHRDET,
+ MT6331_IRQ_STATUS_THR_H,
+ MT6331_IRQ_STATUS_THR_L,
+ MT6331_IRQ_STATUS_BAT_H,
+ MT6331_IRQ_STATUS_BAT_L,
+ MT6331_IRQ_STATUS_RTC,
+ MT6331_IRQ_STATUS_AUDIO,
+ MT6331_IRQ_STATUS_MAD,
+ MT6331_IRQ_STATUS_ACCDET,
+ MT6331_IRQ_STATUS_ACCDET_EINT,
+ MT6331_IRQ_STATUS_ACCDET_NEGV = 12,
+ MT6331_IRQ_STATUS_VDVFS11_OC = 16,
+ MT6331_IRQ_STATUS_VDVFS12_OC,
+ MT6331_IRQ_STATUS_VDVFS13_OC,
+ MT6331_IRQ_STATUS_VDVFS14_OC,
+ MT6331_IRQ_STATUS_GPU_OC,
+ MT6331_IRQ_STATUS_VCORE1_OC,
+ MT6331_IRQ_STATUS_VCORE2_OC,
+ MT6331_IRQ_STATUS_VIO18_OC,
+ MT6331_IRQ_STATUS_LDO_OC,
+ MT6331_IRQ_STATUS_NR,
+};
+
+#define MT6331_IRQ_CON0_BASE MT6331_IRQ_STATUS_PWRKEY
+#define MT6331_IRQ_CON0_BITS (MT6331_IRQ_STATUS_ACCDET_NEGV + 1)
+#define MT6331_IRQ_CON1_BASE MT6331_IRQ_STATUS_VDVFS11_OC
+#define MT6331_IRQ_CON1_BITS (MT6331_IRQ_STATUS_LDO_OC - MT6331_IRQ_STATUS_VDFS11_OC + 1)
+
+#endif /* __MFD_MT6331_CORE_H__ */
diff --git a/include/linux/mfd/mt6331/registers.h b/include/linux/mfd/mt6331/registers.h
new file mode 100644
index 000000000000..e2be6bccd1a7
--- /dev/null
+++ b/include/linux/mfd/mt6331/registers.h
@@ -0,0 +1,584 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __MFD_MT6331_REGISTERS_H__
+#define __MFD_MT6331_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6331_STRUP_CON0 0x0
+#define MT6331_STRUP_CON2 0x2
+#define MT6331_STRUP_CON3 0x4
+#define MT6331_STRUP_CON4 0x6
+#define MT6331_STRUP_CON5 0x8
+#define MT6331_STRUP_CON6 0xA
+#define MT6331_STRUP_CON7 0xC
+#define MT6331_STRUP_CON8 0xE
+#define MT6331_STRUP_CON9 0x10
+#define MT6331_STRUP_CON10 0x12
+#define MT6331_STRUP_CON11 0x14
+#define MT6331_STRUP_CON12 0x16
+#define MT6331_STRUP_CON13 0x18
+#define MT6331_STRUP_CON14 0x1A
+#define MT6331_STRUP_CON15 0x1C
+#define MT6331_STRUP_CON16 0x1E
+#define MT6331_STRUP_CON17 0x20
+#define MT6331_STRUP_CON18 0x22
+#define MT6331_HWCID 0x100
+#define MT6331_SWCID 0x102
+#define MT6331_EXT_PMIC_STATUS 0x104
+#define MT6331_TOP_CON 0x106
+#define MT6331_TEST_OUT 0x108
+#define MT6331_TEST_CON0 0x10A
+#define MT6331_TEST_CON1 0x10C
+#define MT6331_TESTMODE_SW 0x10E
+#define MT6331_EN_STATUS0 0x110
+#define MT6331_EN_STATUS1 0x112
+#define MT6331_EN_STATUS2 0x114
+#define MT6331_OCSTATUS0 0x116
+#define MT6331_OCSTATUS1 0x118
+#define MT6331_OCSTATUS2 0x11A
+#define MT6331_PGSTATUS 0x11C
+#define MT6331_TOPSTATUS 0x11E
+#define MT6331_TDSEL_CON 0x120
+#define MT6331_RDSEL_CON 0x122
+#define MT6331_SMT_CON0 0x124
+#define MT6331_SMT_CON1 0x126
+#define MT6331_SMT_CON2 0x128
+#define MT6331_DRV_CON0 0x12A
+#define MT6331_DRV_CON1 0x12C
+#define MT6331_DRV_CON2 0x12E
+#define MT6331_DRV_CON3 0x130
+#define MT6331_TOP_STATUS 0x132
+#define MT6331_TOP_STATUS_SET 0x134
+#define MT6331_TOP_STATUS_CLR 0x136
+#define MT6331_TOP_CKPDN_CON0 0x138
+#define MT6331_TOP_CKPDN_CON0_SET 0x13A
+#define MT6331_TOP_CKPDN_CON0_CLR 0x13C
+#define MT6331_TOP_CKPDN_CON1 0x13E
+#define MT6331_TOP_CKPDN_CON1_SET 0x140
+#define MT6331_TOP_CKPDN_CON1_CLR 0x142
+#define MT6331_TOP_CKPDN_CON2 0x144
+#define MT6331_TOP_CKPDN_CON2_SET 0x146
+#define MT6331_TOP_CKPDN_CON2_CLR 0x148
+#define MT6331_TOP_CKSEL_CON 0x14A
+#define MT6331_TOP_CKSEL_CON_SET 0x14C
+#define MT6331_TOP_CKSEL_CON_CLR 0x14E
+#define MT6331_TOP_CKHWEN_CON 0x150
+#define MT6331_TOP_CKHWEN_CON_SET 0x152
+#define MT6331_TOP_CKHWEN_CON_CLR 0x154
+#define MT6331_TOP_CKTST_CON0 0x156
+#define MT6331_TOP_CKTST_CON1 0x158
+#define MT6331_TOP_CLKSQ 0x15A
+#define MT6331_TOP_CLKSQ_SET 0x15C
+#define MT6331_TOP_CLKSQ_CLR 0x15E
+#define MT6331_TOP_RST_CON 0x160
+#define MT6331_TOP_RST_CON_SET 0x162
+#define MT6331_TOP_RST_CON_CLR 0x164
+#define MT6331_TOP_RST_MISC 0x166
+#define MT6331_TOP_RST_MISC_SET 0x168
+#define MT6331_TOP_RST_MISC_CLR 0x16A
+#define MT6331_INT_CON0 0x16C
+#define MT6331_INT_CON0_SET 0x16E
+#define MT6331_INT_CON0_CLR 0x170
+#define MT6331_INT_CON1 0x172
+#define MT6331_INT_CON1_SET 0x174
+#define MT6331_INT_CON1_CLR 0x176
+#define MT6331_INT_MISC_CON 0x178
+#define MT6331_INT_MISC_CON_SET 0x17A
+#define MT6331_INT_MISC_CON_CLR 0x17C
+#define MT6331_INT_STATUS_CON0 0x17E
+#define MT6331_INT_STATUS_CON1 0x180
+#define MT6331_OC_GEAR_0 0x182
+#define MT6331_FQMTR_CON0 0x184
+#define MT6331_FQMTR_CON1 0x186
+#define MT6331_FQMTR_CON2 0x188
+#define MT6331_RG_SPI_CON 0x18A
+#define MT6331_DEW_DIO_EN 0x18C
+#define MT6331_DEW_READ_TEST 0x18E
+#define MT6331_DEW_WRITE_TEST 0x190
+#define MT6331_DEW_CRC_SWRST 0x192
+#define MT6331_DEW_CRC_EN 0x194
+#define MT6331_DEW_CRC_VAL 0x196
+#define MT6331_DEW_DBG_MON_SEL 0x198
+#define MT6331_DEW_CIPHER_KEY_SEL 0x19A
+#define MT6331_DEW_CIPHER_IV_SEL 0x19C
+#define MT6331_DEW_CIPHER_EN 0x19E
+#define MT6331_DEW_CIPHER_RDY 0x1A0
+#define MT6331_DEW_CIPHER_MODE 0x1A2
+#define MT6331_DEW_CIPHER_SWRST 0x1A4
+#define MT6331_DEW_RDDMY_NO 0x1A6
+#define MT6331_INT_TYPE_CON0 0x1A8
+#define MT6331_INT_TYPE_CON0_SET 0x1AA
+#define MT6331_INT_TYPE_CON0_CLR 0x1AC
+#define MT6331_INT_TYPE_CON1 0x1AE
+#define MT6331_INT_TYPE_CON1_SET 0x1B0
+#define MT6331_INT_TYPE_CON1_CLR 0x1B2
+#define MT6331_INT_STA 0x1B4
+#define MT6331_BUCK_ALL_CON0 0x200
+#define MT6331_BUCK_ALL_CON1 0x202
+#define MT6331_BUCK_ALL_CON2 0x204
+#define MT6331_BUCK_ALL_CON3 0x206
+#define MT6331_BUCK_ALL_CON4 0x208
+#define MT6331_BUCK_ALL_CON5 0x20A
+#define MT6331_BUCK_ALL_CON6 0x20C
+#define MT6331_BUCK_ALL_CON7 0x20E
+#define MT6331_BUCK_ALL_CON8 0x210
+#define MT6331_BUCK_ALL_CON9 0x212
+#define MT6331_BUCK_ALL_CON10 0x214
+#define MT6331_BUCK_ALL_CON11 0x216
+#define MT6331_BUCK_ALL_CON12 0x218
+#define MT6331_BUCK_ALL_CON13 0x21A
+#define MT6331_BUCK_ALL_CON14 0x21C
+#define MT6331_BUCK_ALL_CON15 0x21E
+#define MT6331_BUCK_ALL_CON16 0x220
+#define MT6331_BUCK_ALL_CON17 0x222
+#define MT6331_BUCK_ALL_CON18 0x224
+#define MT6331_BUCK_ALL_CON19 0x226
+#define MT6331_BUCK_ALL_CON20 0x228
+#define MT6331_BUCK_ALL_CON21 0x22A
+#define MT6331_BUCK_ALL_CON22 0x22C
+#define MT6331_BUCK_ALL_CON23 0x22E
+#define MT6331_BUCK_ALL_CON24 0x230
+#define MT6331_BUCK_ALL_CON25 0x232
+#define MT6331_BUCK_ALL_CON26 0x234
+#define MT6331_VDVFS11_CON0 0x236
+#define MT6331_VDVFS11_CON1 0x238
+#define MT6331_VDVFS11_CON2 0x23A
+#define MT6331_VDVFS11_CON3 0x23C
+#define MT6331_VDVFS11_CON4 0x23E
+#define MT6331_VDVFS11_CON5 0x240
+#define MT6331_VDVFS11_CON6 0x242
+#define MT6331_VDVFS11_CON7 0x244
+#define MT6331_VDVFS11_CON8 0x246
+#define MT6331_VDVFS11_CON9 0x248
+#define MT6331_VDVFS11_CON10 0x24A
+#define MT6331_VDVFS11_CON11 0x24C
+#define MT6331_VDVFS11_CON12 0x24E
+#define MT6331_VDVFS11_CON13 0x250
+#define MT6331_VDVFS11_CON14 0x252
+#define MT6331_VDVFS11_CON18 0x25A
+#define MT6331_VDVFS11_CON19 0x25C
+#define MT6331_VDVFS11_CON20 0x25E
+#define MT6331_VDVFS11_CON21 0x260
+#define MT6331_VDVFS11_CON22 0x262
+#define MT6331_VDVFS11_CON23 0x264
+#define MT6331_VDVFS11_CON24 0x266
+#define MT6331_VDVFS11_CON25 0x268
+#define MT6331_VDVFS11_CON26 0x26A
+#define MT6331_VDVFS11_CON27 0x26C
+#define MT6331_VDVFS12_CON0 0x26E
+#define MT6331_VDVFS12_CON1 0x270
+#define MT6331_VDVFS12_CON2 0x272
+#define MT6331_VDVFS12_CON3 0x274
+#define MT6331_VDVFS12_CON4 0x276
+#define MT6331_VDVFS12_CON5 0x278
+#define MT6331_VDVFS12_CON6 0x27A
+#define MT6331_VDVFS12_CON7 0x27C
+#define MT6331_VDVFS12_CON8 0x27E
+#define MT6331_VDVFS12_CON9 0x280
+#define MT6331_VDVFS12_CON10 0x282
+#define MT6331_VDVFS12_CON11 0x284
+#define MT6331_VDVFS12_CON12 0x286
+#define MT6331_VDVFS12_CON13 0x288
+#define MT6331_VDVFS12_CON14 0x28A
+#define MT6331_VDVFS12_CON18 0x292
+#define MT6331_VDVFS12_CON19 0x294
+#define MT6331_VDVFS12_CON20 0x296
+#define MT6331_VDVFS13_CON0 0x298
+#define MT6331_VDVFS13_CON1 0x29A
+#define MT6331_VDVFS13_CON2 0x29C
+#define MT6331_VDVFS13_CON3 0x29E
+#define MT6331_VDVFS13_CON4 0x2A0
+#define MT6331_VDVFS13_CON5 0x2A2
+#define MT6331_VDVFS13_CON6 0x2A4
+#define MT6331_VDVFS13_CON7 0x2A6
+#define MT6331_VDVFS13_CON8 0x2A8
+#define MT6331_VDVFS13_CON9 0x2AA
+#define MT6331_VDVFS13_CON10 0x2AC
+#define MT6331_VDVFS13_CON11 0x2AE
+#define MT6331_VDVFS13_CON12 0x2B0
+#define MT6331_VDVFS13_CON13 0x2B2
+#define MT6331_VDVFS13_CON14 0x2B4
+#define MT6331_VDVFS13_CON18 0x2BC
+#define MT6331_VDVFS13_CON19 0x2BE
+#define MT6331_VDVFS13_CON20 0x2C0
+#define MT6331_VDVFS14_CON0 0x2C2
+#define MT6331_VDVFS14_CON1 0x2C4
+#define MT6331_VDVFS14_CON2 0x2C6
+#define MT6331_VDVFS14_CON3 0x2C8
+#define MT6331_VDVFS14_CON4 0x2CA
+#define MT6331_VDVFS14_CON5 0x2CC
+#define MT6331_VDVFS14_CON6 0x2CE
+#define MT6331_VDVFS14_CON7 0x2D0
+#define MT6331_VDVFS14_CON8 0x2D2
+#define MT6331_VDVFS14_CON9 0x2D4
+#define MT6331_VDVFS14_CON10 0x2D6
+#define MT6331_VDVFS14_CON11 0x2D8
+#define MT6331_VDVFS14_CON12 0x2DA
+#define MT6331_VDVFS14_CON13 0x2DC
+#define MT6331_VDVFS14_CON14 0x2DE
+#define MT6331_VDVFS14_CON18 0x2E6
+#define MT6331_VDVFS14_CON19 0x2E8
+#define MT6331_VDVFS14_CON20 0x2EA
+#define MT6331_VGPU_CON0 0x300
+#define MT6331_VGPU_CON1 0x302
+#define MT6331_VGPU_CON2 0x304
+#define MT6331_VGPU_CON3 0x306
+#define MT6331_VGPU_CON4 0x308
+#define MT6331_VGPU_CON5 0x30A
+#define MT6331_VGPU_CON6 0x30C
+#define MT6331_VGPU_CON7 0x30E
+#define MT6331_VGPU_CON8 0x310
+#define MT6331_VGPU_CON9 0x312
+#define MT6331_VGPU_CON10 0x314
+#define MT6331_VGPU_CON11 0x316
+#define MT6331_VGPU_CON12 0x318
+#define MT6331_VGPU_CON13 0x31A
+#define MT6331_VGPU_CON14 0x31C
+#define MT6331_VGPU_CON15 0x31E
+#define MT6331_VGPU_CON16 0x320
+#define MT6331_VGPU_CON17 0x322
+#define MT6331_VGPU_CON18 0x324
+#define MT6331_VGPU_CON19 0x326
+#define MT6331_VGPU_CON20 0x328
+#define MT6331_VCORE1_CON0 0x32A
+#define MT6331_VCORE1_CON1 0x32C
+#define MT6331_VCORE1_CON2 0x32E
+#define MT6331_VCORE1_CON3 0x330
+#define MT6331_VCORE1_CON4 0x332
+#define MT6331_VCORE1_CON5 0x334
+#define MT6331_VCORE1_CON6 0x336
+#define MT6331_VCORE1_CON7 0x338
+#define MT6331_VCORE1_CON8 0x33A
+#define MT6331_VCORE1_CON9 0x33C
+#define MT6331_VCORE1_CON10 0x33E
+#define MT6331_VCORE1_CON11 0x340
+#define MT6331_VCORE1_CON12 0x342
+#define MT6331_VCORE1_CON13 0x344
+#define MT6331_VCORE1_CON14 0x346
+#define MT6331_VCORE1_CON15 0x348
+#define MT6331_VCORE1_CON16 0x34A
+#define MT6331_VCORE1_CON17 0x34C
+#define MT6331_VCORE1_CON18 0x34E
+#define MT6331_VCORE1_CON19 0x350
+#define MT6331_VCORE1_CON20 0x352
+#define MT6331_VCORE2_CON0 0x354
+#define MT6331_VCORE2_CON1 0x356
+#define MT6331_VCORE2_CON2 0x358
+#define MT6331_VCORE2_CON3 0x35A
+#define MT6331_VCORE2_CON4 0x35C
+#define MT6331_VCORE2_CON5 0x35E
+#define MT6331_VCORE2_CON6 0x360
+#define MT6331_VCORE2_CON7 0x362
+#define MT6331_VCORE2_CON8 0x364
+#define MT6331_VCORE2_CON9 0x366
+#define MT6331_VCORE2_CON10 0x368
+#define MT6331_VCORE2_CON11 0x36A
+#define MT6331_VCORE2_CON12 0x36C
+#define MT6331_VCORE2_CON13 0x36E
+#define MT6331_VCORE2_CON14 0x370
+#define MT6331_VCORE2_CON15 0x372
+#define MT6331_VCORE2_CON16 0x374
+#define MT6331_VCORE2_CON17 0x376
+#define MT6331_VCORE2_CON18 0x378
+#define MT6331_VCORE2_CON19 0x37A
+#define MT6331_VCORE2_CON20 0x37C
+#define MT6331_VCORE2_CON21 0x37E
+#define MT6331_VIO18_CON0 0x380
+#define MT6331_VIO18_CON1 0x382
+#define MT6331_VIO18_CON2 0x384
+#define MT6331_VIO18_CON3 0x386
+#define MT6331_VIO18_CON4 0x388
+#define MT6331_VIO18_CON5 0x38A
+#define MT6331_VIO18_CON6 0x38C
+#define MT6331_VIO18_CON7 0x38E
+#define MT6331_VIO18_CON8 0x390
+#define MT6331_VIO18_CON9 0x392
+#define MT6331_VIO18_CON10 0x394
+#define MT6331_VIO18_CON11 0x396
+#define MT6331_VIO18_CON12 0x398
+#define MT6331_VIO18_CON13 0x39A
+#define MT6331_VIO18_CON14 0x39C
+#define MT6331_VIO18_CON15 0x39E
+#define MT6331_VIO18_CON16 0x3A0
+#define MT6331_VIO18_CON17 0x3A2
+#define MT6331_VIO18_CON18 0x3A4
+#define MT6331_VIO18_CON19 0x3A6
+#define MT6331_VIO18_CON20 0x3A8
+#define MT6331_BUCK_K_CON0 0x3AA
+#define MT6331_BUCK_K_CON1 0x3AC
+#define MT6331_BUCK_K_CON2 0x3AE
+#define MT6331_BUCK_K_CON3 0x3B0
+#define MT6331_ZCD_CON0 0x400
+#define MT6331_ZCD_CON1 0x402
+#define MT6331_ZCD_CON2 0x404
+#define MT6331_ZCD_CON3 0x406
+#define MT6331_ZCD_CON4 0x408
+#define MT6331_ZCD_CON5 0x40A
+#define MT6331_ISINK0_CON0 0x40C
+#define MT6331_ISINK0_CON1 0x40E
+#define MT6331_ISINK0_CON2 0x410
+#define MT6331_ISINK0_CON3 0x412
+#define MT6331_ISINK0_CON4 0x414
+#define MT6331_ISINK1_CON0 0x416
+#define MT6331_ISINK1_CON1 0x418
+#define MT6331_ISINK1_CON2 0x41A
+#define MT6331_ISINK1_CON3 0x41C
+#define MT6331_ISINK1_CON4 0x41E
+#define MT6331_ISINK2_CON0 0x420
+#define MT6331_ISINK2_CON1 0x422
+#define MT6331_ISINK2_CON2 0x424
+#define MT6331_ISINK2_CON3 0x426
+#define MT6331_ISINK2_CON4 0x428
+#define MT6331_ISINK3_CON0 0x42A
+#define MT6331_ISINK3_CON1 0x42C
+#define MT6331_ISINK3_CON2 0x42E
+#define MT6331_ISINK3_CON3 0x430
+#define MT6331_ISINK3_CON4 0x432
+#define MT6331_ISINK_ANA0 0x434
+#define MT6331_ISINK_ANA1 0x436
+#define MT6331_ISINK_PHASE_DLY 0x438
+#define MT6331_ISINK_EN_CTRL 0x43A
+#define MT6331_ANALDO_CON0 0x500
+#define MT6331_ANALDO_CON1 0x502
+#define MT6331_ANALDO_CON2 0x504
+#define MT6331_ANALDO_CON3 0x506
+#define MT6331_ANALDO_CON4 0x508
+#define MT6331_ANALDO_CON5 0x50A
+#define MT6331_ANALDO_CON6 0x50C
+#define MT6331_ANALDO_CON7 0x50E
+#define MT6331_ANALDO_CON8 0x510
+#define MT6331_ANALDO_CON9 0x512
+#define MT6331_ANALDO_CON10 0x514
+#define MT6331_ANALDO_CON11 0x516
+#define MT6331_ANALDO_CON12 0x518
+#define MT6331_ANALDO_CON13 0x51A
+#define MT6331_SYSLDO_CON0 0x51C
+#define MT6331_SYSLDO_CON1 0x51E
+#define MT6331_SYSLDO_CON2 0x520
+#define MT6331_SYSLDO_CON3 0x522
+#define MT6331_SYSLDO_CON4 0x524
+#define MT6331_SYSLDO_CON5 0x526
+#define MT6331_SYSLDO_CON6 0x528
+#define MT6331_SYSLDO_CON7 0x52A
+#define MT6331_SYSLDO_CON8 0x52C
+#define MT6331_SYSLDO_CON9 0x52E
+#define MT6331_SYSLDO_CON10 0x530
+#define MT6331_SYSLDO_CON11 0x532
+#define MT6331_SYSLDO_CON12 0x534
+#define MT6331_SYSLDO_CON13 0x536
+#define MT6331_SYSLDO_CON14 0x538
+#define MT6331_SYSLDO_CON15 0x53A
+#define MT6331_SYSLDO_CON16 0x53C
+#define MT6331_SYSLDO_CON17 0x53E
+#define MT6331_SYSLDO_CON18 0x540
+#define MT6331_SYSLDO_CON19 0x542
+#define MT6331_SYSLDO_CON20 0x544
+#define MT6331_SYSLDO_CON21 0x546
+#define MT6331_DIGLDO_CON0 0x548
+#define MT6331_DIGLDO_CON1 0x54A
+#define MT6331_DIGLDO_CON2 0x54C
+#define MT6331_DIGLDO_CON3 0x54E
+#define MT6331_DIGLDO_CON4 0x550
+#define MT6331_DIGLDO_CON5 0x552
+#define MT6331_DIGLDO_CON6 0x554
+#define MT6331_DIGLDO_CON7 0x556
+#define MT6331_DIGLDO_CON8 0x558
+#define MT6331_DIGLDO_CON9 0x55A
+#define MT6331_DIGLDO_CON10 0x55C
+#define MT6331_DIGLDO_CON11 0x55E
+#define MT6331_DIGLDO_CON12 0x560
+#define MT6331_DIGLDO_CON13 0x562
+#define MT6331_DIGLDO_CON14 0x564
+#define MT6331_DIGLDO_CON15 0x566
+#define MT6331_DIGLDO_CON16 0x568
+#define MT6331_DIGLDO_CON17 0x56A
+#define MT6331_DIGLDO_CON18 0x56C
+#define MT6331_DIGLDO_CON19 0x56E
+#define MT6331_DIGLDO_CON20 0x570
+#define MT6331_DIGLDO_CON21 0x572
+#define MT6331_DIGLDO_CON22 0x574
+#define MT6331_DIGLDO_CON23 0x576
+#define MT6331_DIGLDO_CON24 0x578
+#define MT6331_DIGLDO_CON25 0x57A
+#define MT6331_DIGLDO_CON26 0x57C
+#define MT6331_DIGLDO_CON27 0x57E
+#define MT6331_DIGLDO_CON28 0x580
+#define MT6331_OTP_CON0 0x600
+#define MT6331_OTP_CON1 0x602
+#define MT6331_OTP_CON2 0x604
+#define MT6331_OTP_CON3 0x606
+#define MT6331_OTP_CON4 0x608
+#define MT6331_OTP_CON5 0x60A
+#define MT6331_OTP_CON6 0x60C
+#define MT6331_OTP_CON7 0x60E
+#define MT6331_OTP_CON8 0x610
+#define MT6331_OTP_CON9 0x612
+#define MT6331_OTP_CON10 0x614
+#define MT6331_OTP_CON11 0x616
+#define MT6331_OTP_CON12 0x618
+#define MT6331_OTP_CON13 0x61A
+#define MT6331_OTP_CON14 0x61C
+#define MT6331_OTP_DOUT_0_15 0x61E
+#define MT6331_OTP_DOUT_16_31 0x620
+#define MT6331_OTP_DOUT_32_47 0x622
+#define MT6331_OTP_DOUT_48_63 0x624
+#define MT6331_OTP_DOUT_64_79 0x626
+#define MT6331_OTP_DOUT_80_95 0x628
+#define MT6331_OTP_DOUT_96_111 0x62A
+#define MT6331_OTP_DOUT_112_127 0x62C
+#define MT6331_OTP_DOUT_128_143 0x62E
+#define MT6331_OTP_DOUT_144_159 0x630
+#define MT6331_OTP_DOUT_160_175 0x632
+#define MT6331_OTP_DOUT_176_191 0x634
+#define MT6331_OTP_DOUT_192_207 0x636
+#define MT6331_OTP_DOUT_208_223 0x638
+#define MT6331_OTP_DOUT_224_239 0x63A
+#define MT6331_OTP_DOUT_240_255 0x63C
+#define MT6331_OTP_VAL_0_15 0x63E
+#define MT6331_OTP_VAL_16_31 0x640
+#define MT6331_OTP_VAL_32_47 0x642
+#define MT6331_OTP_VAL_48_63 0x644
+#define MT6331_OTP_VAL_64_79 0x646
+#define MT6331_OTP_VAL_80_95 0x648
+#define MT6331_OTP_VAL_96_111 0x64A
+#define MT6331_OTP_VAL_112_127 0x64C
+#define MT6331_OTP_VAL_128_143 0x64E
+#define MT6331_OTP_VAL_144_159 0x650
+#define MT6331_OTP_VAL_160_175 0x652
+#define MT6331_OTP_VAL_176_191 0x654
+#define MT6331_OTP_VAL_192_207 0x656
+#define MT6331_OTP_VAL_208_223 0x658
+#define MT6331_OTP_VAL_224_239 0x65A
+#define MT6331_OTP_VAL_240_255 0x65C
+#define MT6331_RTC_MIX_CON0 0x65E
+#define MT6331_RTC_MIX_CON1 0x660
+#define MT6331_AUDDAC_CFG0 0x662
+#define MT6331_AUDBUF_CFG0 0x664
+#define MT6331_AUDBUF_CFG1 0x666
+#define MT6331_AUDBUF_CFG2 0x668
+#define MT6331_AUDBUF_CFG3 0x66A
+#define MT6331_AUDBUF_CFG4 0x66C
+#define MT6331_AUDBUF_CFG5 0x66E
+#define MT6331_AUDBUF_CFG6 0x670
+#define MT6331_AUDBUF_CFG7 0x672
+#define MT6331_AUDBUF_CFG8 0x674
+#define MT6331_IBIASDIST_CFG0 0x676
+#define MT6331_AUDCLKGEN_CFG0 0x678
+#define MT6331_AUDLDO_CFG0 0x67A
+#define MT6331_AUDDCDC_CFG0 0x67C
+#define MT6331_AUDDCDC_CFG1 0x67E
+#define MT6331_AUDNVREGGLB_CFG0 0x680
+#define MT6331_AUD_NCP0 0x682
+#define MT6331_AUD_ZCD_CFG0 0x684
+#define MT6331_AUDPREAMP_CFG0 0x686
+#define MT6331_AUDPREAMP_CFG1 0x688
+#define MT6331_AUDPREAMP_CFG2 0x68A
+#define MT6331_AUDADC_CFG0 0x68C
+#define MT6331_AUDADC_CFG1 0x68E
+#define MT6331_AUDADC_CFG2 0x690
+#define MT6331_AUDADC_CFG3 0x692
+#define MT6331_AUDADC_CFG4 0x694
+#define MT6331_AUDADC_CFG5 0x696
+#define MT6331_AUDDIGMI_CFG0 0x698
+#define MT6331_AUDDIGMI_CFG1 0x69A
+#define MT6331_AUDMICBIAS_CFG0 0x69C
+#define MT6331_AUDMICBIAS_CFG1 0x69E
+#define MT6331_AUDENCSPARE_CFG0 0x6A0
+#define MT6331_AUDPREAMPGAIN_CFG0 0x6A2
+#define MT6331_AUDMADPLL_CFG0 0x6A4
+#define MT6331_AUDMADPLL_CFG1 0x6A6
+#define MT6331_AUDMADPLL_CFG2 0x6A8
+#define MT6331_AUDLDO_NVREG_CFG0 0x6AA
+#define MT6331_AUDLDO_NVREG_CFG1 0x6AC
+#define MT6331_AUDLDO_NVREG_CFG2 0x6AE
+#define MT6331_AUXADC_ADC0 0x700
+#define MT6331_AUXADC_ADC1 0x702
+#define MT6331_AUXADC_ADC2 0x704
+#define MT6331_AUXADC_ADC3 0x706
+#define MT6331_AUXADC_ADC4 0x708
+#define MT6331_AUXADC_ADC5 0x70A
+#define MT6331_AUXADC_ADC6 0x70C
+#define MT6331_AUXADC_ADC7 0x70E
+#define MT6331_AUXADC_ADC8 0x710
+#define MT6331_AUXADC_ADC9 0x712
+#define MT6331_AUXADC_ADC10 0x714
+#define MT6331_AUXADC_ADC11 0x716
+#define MT6331_AUXADC_ADC12 0x718
+#define MT6331_AUXADC_ADC13 0x71A
+#define MT6331_AUXADC_ADC14 0x71C
+#define MT6331_AUXADC_ADC15 0x71E
+#define MT6331_AUXADC_ADC16 0x720
+#define MT6331_AUXADC_ADC17 0x722
+#define MT6331_AUXADC_ADC18 0x724
+#define MT6331_AUXADC_ADC19 0x726
+#define MT6331_AUXADC_STA0 0x728
+#define MT6331_AUXADC_STA1 0x72A
+#define MT6331_AUXADC_RQST0 0x72C
+#define MT6331_AUXADC_RQST0_SET 0x72E
+#define MT6331_AUXADC_RQST0_CLR 0x730
+#define MT6331_AUXADC_RQST1 0x732
+#define MT6331_AUXADC_RQST1_SET 0x734
+#define MT6331_AUXADC_RQST1_CLR 0x736
+#define MT6331_AUXADC_CON0 0x738
+#define MT6331_AUXADC_CON1 0x73A
+#define MT6331_AUXADC_CON2 0x73C
+#define MT6331_AUXADC_CON3 0x73E
+#define MT6331_AUXADC_CON4 0x740
+#define MT6331_AUXADC_CON5 0x742
+#define MT6331_AUXADC_CON6 0x744
+#define MT6331_AUXADC_CON7 0x746
+#define MT6331_AUXADC_CON8 0x748
+#define MT6331_AUXADC_CON9 0x74A
+#define MT6331_AUXADC_CON10 0x74C
+#define MT6331_AUXADC_CON11 0x74E
+#define MT6331_AUXADC_CON12 0x750
+#define MT6331_AUXADC_CON13 0x752
+#define MT6331_AUXADC_CON14 0x754
+#define MT6331_AUXADC_CON15 0x756
+#define MT6331_AUXADC_CON16 0x758
+#define MT6331_AUXADC_CON17 0x75A
+#define MT6331_AUXADC_CON18 0x75C
+#define MT6331_AUXADC_CON19 0x75E
+#define MT6331_AUXADC_CON20 0x760
+#define MT6331_AUXADC_CON21 0x762
+#define MT6331_AUXADC_CON22 0x764
+#define MT6331_AUXADC_CON23 0x766
+#define MT6331_AUXADC_CON24 0x768
+#define MT6331_AUXADC_CON25 0x76A
+#define MT6331_AUXADC_CON26 0x76C
+#define MT6331_AUXADC_CON27 0x76E
+#define MT6331_AUXADC_CON28 0x770
+#define MT6331_AUXADC_CON29 0x772
+#define MT6331_AUXADC_CON30 0x774
+#define MT6331_AUXADC_CON31 0x776
+#define MT6331_AUXADC_CON32 0x778
+#define MT6331_ACCDET_CON0 0x77A
+#define MT6331_ACCDET_CON1 0x77C
+#define MT6331_ACCDET_CON2 0x77E
+#define MT6331_ACCDET_CON3 0x780
+#define MT6331_ACCDET_CON4 0x782
+#define MT6331_ACCDET_CON5 0x784
+#define MT6331_ACCDET_CON6 0x786
+#define MT6331_ACCDET_CON7 0x788
+#define MT6331_ACCDET_CON8 0x78A
+#define MT6331_ACCDET_CON9 0x78C
+#define MT6331_ACCDET_CON10 0x78E
+#define MT6331_ACCDET_CON11 0x790
+#define MT6331_ACCDET_CON12 0x792
+#define MT6331_ACCDET_CON13 0x794
+#define MT6331_ACCDET_CON14 0x796
+#define MT6331_ACCDET_CON15 0x798
+#define MT6331_ACCDET_CON16 0x79A
+#define MT6331_ACCDET_CON17 0x79C
+#define MT6331_ACCDET_CON18 0x79E
+#define MT6331_ACCDET_CON19 0x7A0
+#define MT6331_ACCDET_CON20 0x7A2
+#define MT6331_ACCDET_CON21 0x7A4
+#define MT6331_ACCDET_CON22 0x7A6
+#define MT6331_ACCDET_CON23 0x7A8
+#define MT6331_ACCDET_CON24 0x7AA
+
+#endif /* __MFD_MT6331_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6332/core.h b/include/linux/mfd/mt6332/core.h
new file mode 100644
index 000000000000..cd6013eb82d9
--- /dev/null
+++ b/include/linux/mfd/mt6332/core.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __MFD_MT6332_CORE_H__
+#define __MFD_MT6332_CORE_H__
+
+enum mt6332_irq_status_numbers {
+ MT6332_IRQ_STATUS_CHR_COMPLETE = 0,
+ MT6332_IRQ_STATUS_THERMAL_SD,
+ MT6332_IRQ_STATUS_THERMAL_REG_IN,
+ MT6332_IRQ_STATUS_THERMAL_REG_OUT,
+ MT6332_IRQ_STATUS_OTG_OC,
+ MT6332_IRQ_STATUS_CHR_OC,
+ MT6332_IRQ_STATUS_OTG_THERMAL,
+ MT6332_IRQ_STATUS_CHRIN_SHORT,
+ MT6332_IRQ_STATUS_DRVCDT_SHORT,
+ MT6332_IRQ_STATUS_PLUG_IN_FLASH,
+ MT6332_IRQ_STATUS_CHRWDT_FLAG,
+ MT6332_IRQ_STATUS_FLASH_EN_TIMEOUT,
+ MT6332_IRQ_STATUS_FLASH_VLED1_SHORT,
+ MT6332_IRQ_STATUS_FLASH_VLED1_OPEN = 13,
+ MT6332_IRQ_STATUS_OV = 16,
+ MT6332_IRQ_STATUS_BVALID_DET,
+ MT6332_IRQ_STATUS_VBATON_UNDET,
+ MT6332_IRQ_STATUS_CHR_PLUG_IN,
+ MT6332_IRQ_STATUS_CHR_PLUG_OUT,
+ MT6332_IRQ_STATUS_BC11_TIMEOUT,
+ MT6332_IRQ_STATUS_FLASH_VLED2_SHORT,
+ MT6332_IRQ_STATUS_FLASH_VLED2_OPEN = 23,
+ MT6332_IRQ_STATUS_THR_H = 32,
+ MT6332_IRQ_STATUS_THR_L,
+ MT6332_IRQ_STATUS_BAT_H,
+ MT6332_IRQ_STATUS_BAT_L,
+ MT6332_IRQ_STATUS_M3_H,
+ MT6332_IRQ_STATUS_M3_L,
+ MT6332_IRQ_STATUS_FG_BAT_H,
+ MT6332_IRQ_STATUS_FG_BAT_L,
+ MT6332_IRQ_STATUS_FG_CUR_H,
+ MT6332_IRQ_STATUS_FG_CUR_L,
+ MT6332_IRQ_STATUS_SPKL_D,
+ MT6332_IRQ_STATUS_SPKL_AB,
+ MT6332_IRQ_STATUS_BIF,
+ MT6332_IRQ_STATUS_VWLED_OC = 45,
+ MT6332_IRQ_STATUS_VDRAM_OC = 48,
+ MT6332_IRQ_STATUS_VDVFS2_OC,
+ MT6332_IRQ_STATUS_VRF1_OC,
+ MT6332_IRQ_STATUS_VRF2_OC,
+ MT6332_IRQ_STATUS_VPA_OC,
+ MT6332_IRQ_STATUS_VSBST_OC,
+ MT6332_IRQ_STATUS_LDO_OC,
+ MT6332_IRQ_STATUS_NR,
+};
+
+#define MT6332_IRQ_CON0_BASE MT6332_IRQ_STATUS_CHR_COMPLETE
+#define MT6332_IRQ_CON0_BITS (MT6332_IRQ_STATUS_FLASH_VLED1_OPEN + 1)
+#define MT6332_IRQ_CON1_BASE MT6332_IRQ_STATUS_OV
+#define MT6332_IRQ_CON1_BITS (MT6332_IRQ_STATUS_FLASH_VLED2_OPEN - MT6332_IRQ_STATUS_OV + 1)
+#define MT6332_IRQ_CON2_BASE MT6332_IRQ_STATUS_THR_H
+#define MT6332_IRQ_CON2_BITS (MT6332_IRQ_STATUS_VWLED_OC - MT6332_IRQ_STATUS_THR_H + 1)
+#define MT6332_IRQ_CON3_BASE MT6332_IRQ_STATUS_VDRAM_OC
+#define MT6332_IRQ_CON3_BITS (MT6332_IRQ_STATUS_LDO_OC - MT6332_IRQ_STATUS_VDRAM_OC + 1)
+
+#endif /* __MFD_MT6332_CORE_H__ */
diff --git a/include/linux/mfd/mt6332/registers.h b/include/linux/mfd/mt6332/registers.h
new file mode 100644
index 000000000000..65e0b86fceac
--- /dev/null
+++ b/include/linux/mfd/mt6332/registers.h
@@ -0,0 +1,642 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __MFD_MT6332_REGISTERS_H__
+#define __MFD_MT6332_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6332_HWCID 0x8000
+#define MT6332_SWCID 0x8002
+#define MT6332_TOP_CON 0x8004
+#define MT6332_DDR_VREF_AP_CON 0x8006
+#define MT6332_DDR_VREF_DQ_CON 0x8008
+#define MT6332_DDR_VREF_CA_CON 0x800A
+#define MT6332_TEST_OUT 0x800C
+#define MT6332_TEST_CON0 0x800E
+#define MT6332_TEST_CON1 0x8010
+#define MT6332_TESTMODE_SW 0x8012
+#define MT6332_TESTMODE_ANA 0x8014
+#define MT6332_TDSEL_CON 0x8016
+#define MT6332_RDSEL_CON 0x8018
+#define MT6332_SMT_CON0 0x801A
+#define MT6332_SMT_CON1 0x801C
+#define MT6332_DRV_CON0 0x801E
+#define MT6332_DRV_CON1 0x8020
+#define MT6332_DRV_CON2 0x8022
+#define MT6332_EN_STATUS0 0x8024
+#define MT6332_OCSTATUS0 0x8026
+#define MT6332_TOP_STATUS 0x8028
+#define MT6332_TOP_STATUS_SET 0x802A
+#define MT6332_TOP_STATUS_CLR 0x802C
+#define MT6332_FLASH_CON0 0x802E
+#define MT6332_FLASH_CON1 0x8030
+#define MT6332_FLASH_CON2 0x8032
+#define MT6332_CORE_CON0 0x8034
+#define MT6332_CORE_CON1 0x8036
+#define MT6332_CORE_CON2 0x8038
+#define MT6332_CORE_CON3 0x803A
+#define MT6332_CORE_CON4 0x803C
+#define MT6332_CORE_CON5 0x803E
+#define MT6332_CORE_CON6 0x8040
+#define MT6332_CORE_CON7 0x8042
+#define MT6332_CORE_CON8 0x8044
+#define MT6332_CORE_CON9 0x8046
+#define MT6332_CORE_CON10 0x8048
+#define MT6332_CORE_CON11 0x804A
+#define MT6332_CORE_CON12 0x804C
+#define MT6332_CORE_CON13 0x804E
+#define MT6332_CORE_CON14 0x8050
+#define MT6332_CORE_CON15 0x8052
+#define MT6332_STA_CON0 0x8054
+#define MT6332_STA_CON1 0x8056
+#define MT6332_STA_CON2 0x8058
+#define MT6332_STA_CON3 0x805A
+#define MT6332_STA_CON4 0x805C
+#define MT6332_STA_CON5 0x805E
+#define MT6332_STA_CON6 0x8060
+#define MT6332_STA_CON7 0x8062
+#define MT6332_CHR_CON0 0x8064
+#define MT6332_CHR_CON1 0x8066
+#define MT6332_CHR_CON2 0x8068
+#define MT6332_CHR_CON3 0x806A
+#define MT6332_CHR_CON4 0x806C
+#define MT6332_CHR_CON5 0x806E
+#define MT6332_CHR_CON6 0x8070
+#define MT6332_CHR_CON7 0x8072
+#define MT6332_CHR_CON8 0x8074
+#define MT6332_CHR_CON9 0x8076
+#define MT6332_CHR_CON10 0x8078
+#define MT6332_CHR_CON11 0x807A
+#define MT6332_CHR_CON12 0x807C
+#define MT6332_CHR_CON13 0x807E
+#define MT6332_CHR_CON14 0x8080
+#define MT6332_CHR_CON15 0x8082
+#define MT6332_BOOST_CON0 0x8084
+#define MT6332_BOOST_CON1 0x8086
+#define MT6332_BOOST_CON2 0x8088
+#define MT6332_BOOST_CON3 0x808A
+#define MT6332_BOOST_CON4 0x808C
+#define MT6332_BOOST_CON5 0x808E
+#define MT6332_BOOST_CON6 0x8090
+#define MT6332_BOOST_CON7 0x8092
+#define MT6332_TOP_CKPDN_CON0 0x8094
+#define MT6332_TOP_CKPDN_CON0_SET 0x8096
+#define MT6332_TOP_CKPDN_CON0_CLR 0x8098
+#define MT6332_TOP_CKPDN_CON1 0x809A
+#define MT6332_TOP_CKPDN_CON1_SET 0x809C
+#define MT6332_TOP_CKPDN_CON1_CLR 0x809E
+#define MT6332_TOP_CKPDN_CON2 0x80A0
+#define MT6332_TOP_CKPDN_CON2_SET 0x80A2
+#define MT6332_TOP_CKPDN_CON2_CLR 0x80A4
+#define MT6332_TOP_CKSEL_CON0 0x80A6
+#define MT6332_TOP_CKSEL_CON0_SET 0x80A8
+#define MT6332_TOP_CKSEL_CON0_CLR 0x80AA
+#define MT6332_TOP_CKSEL_CON1 0x80AC
+#define MT6332_TOP_CKSEL_CON1_SET 0x80AE
+#define MT6332_TOP_CKSEL_CON1_CLR 0x80B0
+#define MT6332_TOP_CKHWEN_CON 0x80B2
+#define MT6332_TOP_CKHWEN_CON_SET 0x80B4
+#define MT6332_TOP_CKHWEN_CON_CLR 0x80B6
+#define MT6332_TOP_CKTST_CON0 0x80B8
+#define MT6332_TOP_CKTST_CON1 0x80BA
+#define MT6332_TOP_RST_CON 0x80BC
+#define MT6332_TOP_RST_CON_SET 0x80BE
+#define MT6332_TOP_RST_CON_CLR 0x80C0
+#define MT6332_TOP_RST_MISC 0x80C2
+#define MT6332_TOP_RST_MISC_SET 0x80C4
+#define MT6332_TOP_RST_MISC_CLR 0x80C6
+#define MT6332_INT_CON0 0x80C8
+#define MT6332_INT_CON0_SET 0x80CA
+#define MT6332_INT_CON0_CLR 0x80CC
+#define MT6332_INT_CON1 0x80CE
+#define MT6332_INT_CON1_SET 0x80D0
+#define MT6332_INT_CON1_CLR 0x80D2
+#define MT6332_INT_CON2 0x80D4
+#define MT6332_INT_CON2_SET 0x80D6
+#define MT6332_INT_CON2_CLR 0x80D8
+#define MT6332_INT_CON3 0x80DA
+#define MT6332_INT_CON3_SET 0x80DC
+#define MT6332_INT_CON3_CLR 0x80DE
+#define MT6332_CHRWDT_CON0 0x80E0
+#define MT6332_CHRWDT_STATUS0 0x80E2
+#define MT6332_INT_STATUS0 0x80E4
+#define MT6332_INT_STATUS1 0x80E6
+#define MT6332_INT_STATUS2 0x80E8
+#define MT6332_INT_STATUS3 0x80EA
+#define MT6332_OC_GEAR_0 0x80EC
+#define MT6332_OC_GEAR_1 0x80EE
+#define MT6332_OC_GEAR_2 0x80F0
+#define MT6332_INT_MISC_CON 0x80F2
+#define MT6332_RG_SPI_CON 0x80F4
+#define MT6332_DEW_DIO_EN 0x80F6
+#define MT6332_DEW_READ_TEST 0x80F8
+#define MT6332_DEW_WRITE_TEST 0x80FA
+#define MT6332_DEW_CRC_SWRST 0x80FC
+#define MT6332_DEW_CRC_EN 0x80FE
+#define MT6332_DEW_CRC_VAL 0x8100
+#define MT6332_DEW_DBG_MON_SEL 0x8102
+#define MT6332_DEW_CIPHER_KEY_SEL 0x8104
+#define MT6332_DEW_CIPHER_IV_SEL 0x8106
+#define MT6332_DEW_CIPHER_EN 0x8108
+#define MT6332_DEW_CIPHER_RDY 0x810A
+#define MT6332_DEW_CIPHER_MODE 0x810C
+#define MT6332_DEW_CIPHER_SWRST 0x810E
+#define MT6332_DEW_RDDMY_NO 0x8110
+#define MT6332_INT_STA 0x8112
+#define MT6332_BIF_CON0 0x8114
+#define MT6332_BIF_CON1 0x8116
+#define MT6332_BIF_CON2 0x8118
+#define MT6332_BIF_CON3 0x811A
+#define MT6332_BIF_CON4 0x811C
+#define MT6332_BIF_CON5 0x811E
+#define MT6332_BIF_CON6 0x8120
+#define MT6332_BIF_CON7 0x8122
+#define MT6332_BIF_CON8 0x8124
+#define MT6332_BIF_CON9 0x8126
+#define MT6332_BIF_CON10 0x8128
+#define MT6332_BIF_CON11 0x812A
+#define MT6332_BIF_CON12 0x812C
+#define MT6332_BIF_CON13 0x812E
+#define MT6332_BIF_CON14 0x8130
+#define MT6332_BIF_CON15 0x8132
+#define MT6332_BIF_CON16 0x8134
+#define MT6332_BIF_CON17 0x8136
+#define MT6332_BIF_CON18 0x8138
+#define MT6332_BIF_CON19 0x813A
+#define MT6332_BIF_CON20 0x813C
+#define MT6332_BIF_CON21 0x813E
+#define MT6332_BIF_CON22 0x8140
+#define MT6332_BIF_CON23 0x8142
+#define MT6332_BIF_CON24 0x8144
+#define MT6332_BIF_CON25 0x8146
+#define MT6332_BIF_CON26 0x8148
+#define MT6332_BIF_CON27 0x814A
+#define MT6332_BIF_CON28 0x814C
+#define MT6332_BIF_CON29 0x814E
+#define MT6332_BIF_CON30 0x8150
+#define MT6332_BIF_CON31 0x8152
+#define MT6332_BIF_CON32 0x8154
+#define MT6332_BIF_CON33 0x8156
+#define MT6332_BIF_CON34 0x8158
+#define MT6332_BIF_CON35 0x815A
+#define MT6332_BIF_CON36 0x815C
+#define MT6332_BATON_CON0 0x815E
+#define MT6332_BIF_CON37 0x8160
+#define MT6332_BIF_CON38 0x8162
+#define MT6332_CHR_CON16 0x8164
+#define MT6332_CHR_CON17 0x8166
+#define MT6332_CHR_CON18 0x8168
+#define MT6332_CHR_CON19 0x816A
+#define MT6332_CHR_CON20 0x816C
+#define MT6332_CHR_CON21 0x816E
+#define MT6332_CHR_CON22 0x8170
+#define MT6332_CHR_CON23 0x8172
+#define MT6332_CHR_CON24 0x8174
+#define MT6332_CHR_CON25 0x8176
+#define MT6332_STA_CON8 0x8178
+#define MT6332_BUCK_ALL_CON0 0x8400
+#define MT6332_BUCK_ALL_CON1 0x8402
+#define MT6332_BUCK_ALL_CON2 0x8404
+#define MT6332_BUCK_ALL_CON3 0x8406
+#define MT6332_BUCK_ALL_CON4 0x8408
+#define MT6332_BUCK_ALL_CON5 0x840A
+#define MT6332_BUCK_ALL_CON6 0x840C
+#define MT6332_BUCK_ALL_CON7 0x840E
+#define MT6332_BUCK_ALL_CON8 0x8410
+#define MT6332_BUCK_ALL_CON9 0x8412
+#define MT6332_BUCK_ALL_CON10 0x8414
+#define MT6332_BUCK_ALL_CON11 0x8416
+#define MT6332_BUCK_ALL_CON12 0x8418
+#define MT6332_BUCK_ALL_CON13 0x841A
+#define MT6332_BUCK_ALL_CON14 0x841C
+#define MT6332_BUCK_ALL_CON15 0x841E
+#define MT6332_BUCK_ALL_CON16 0x8420
+#define MT6332_BUCK_ALL_CON17 0x8422
+#define MT6332_BUCK_ALL_CON18 0x8424
+#define MT6332_BUCK_ALL_CON19 0x8426
+#define MT6332_BUCK_ALL_CON20 0x8428
+#define MT6332_BUCK_ALL_CON21 0x842A
+#define MT6332_BUCK_ALL_CON22 0x842C
+#define MT6332_BUCK_ALL_CON23 0x842E
+#define MT6332_BUCK_ALL_CON24 0x8430
+#define MT6332_BUCK_ALL_CON25 0x8432
+#define MT6332_BUCK_ALL_CON26 0x8434
+#define MT6332_BUCK_ALL_CON27 0x8436
+#define MT6332_VDRAM_CON0 0x8438
+#define MT6332_VDRAM_CON1 0x843A
+#define MT6332_VDRAM_CON2 0x843C
+#define MT6332_VDRAM_CON3 0x843E
+#define MT6332_VDRAM_CON4 0x8440
+#define MT6332_VDRAM_CON5 0x8442
+#define MT6332_VDRAM_CON6 0x8444
+#define MT6332_VDRAM_CON7 0x8446
+#define MT6332_VDRAM_CON8 0x8448
+#define MT6332_VDRAM_CON9 0x844A
+#define MT6332_VDRAM_CON10 0x844C
+#define MT6332_VDRAM_CON11 0x844E
+#define MT6332_VDRAM_CON12 0x8450
+#define MT6332_VDRAM_CON13 0x8452
+#define MT6332_VDRAM_CON14 0x8454
+#define MT6332_VDRAM_CON15 0x8456
+#define MT6332_VDRAM_CON16 0x8458
+#define MT6332_VDRAM_CON17 0x845A
+#define MT6332_VDRAM_CON18 0x845C
+#define MT6332_VDRAM_CON19 0x845E
+#define MT6332_VDRAM_CON20 0x8460
+#define MT6332_VDRAM_CON21 0x8462
+#define MT6332_VDVFS2_CON0 0x8464
+#define MT6332_VDVFS2_CON1 0x8466
+#define MT6332_VDVFS2_CON2 0x8468
+#define MT6332_VDVFS2_CON3 0x846A
+#define MT6332_VDVFS2_CON4 0x846C
+#define MT6332_VDVFS2_CON5 0x846E
+#define MT6332_VDVFS2_CON6 0x8470
+#define MT6332_VDVFS2_CON7 0x8472
+#define MT6332_VDVFS2_CON8 0x8474
+#define MT6332_VDVFS2_CON9 0x8476
+#define MT6332_VDVFS2_CON10 0x8478
+#define MT6332_VDVFS2_CON11 0x847A
+#define MT6332_VDVFS2_CON12 0x847C
+#define MT6332_VDVFS2_CON13 0x847E
+#define MT6332_VDVFS2_CON14 0x8480
+#define MT6332_VDVFS2_CON15 0x8482
+#define MT6332_VDVFS2_CON16 0x8484
+#define MT6332_VDVFS2_CON17 0x8486
+#define MT6332_VDVFS2_CON18 0x8488
+#define MT6332_VDVFS2_CON19 0x848A
+#define MT6332_VDVFS2_CON20 0x848C
+#define MT6332_VDVFS2_CON21 0x848E
+#define MT6332_VDVFS2_CON22 0x8490
+#define MT6332_VDVFS2_CON23 0x8492
+#define MT6332_VDVFS2_CON24 0x8494
+#define MT6332_VDVFS2_CON25 0x8496
+#define MT6332_VDVFS2_CON26 0x8498
+#define MT6332_VDVFS2_CON27 0x849A
+#define MT6332_VRF1_CON0 0x849C
+#define MT6332_VRF1_CON1 0x849E
+#define MT6332_VRF1_CON2 0x84A0
+#define MT6332_VRF1_CON3 0x84A2
+#define MT6332_VRF1_CON4 0x84A4
+#define MT6332_VRF1_CON5 0x84A6
+#define MT6332_VRF1_CON6 0x84A8
+#define MT6332_VRF1_CON7 0x84AA
+#define MT6332_VRF1_CON8 0x84AC
+#define MT6332_VRF1_CON9 0x84AE
+#define MT6332_VRF1_CON10 0x84B0
+#define MT6332_VRF1_CON11 0x84B2
+#define MT6332_VRF1_CON12 0x84B4
+#define MT6332_VRF1_CON13 0x84B6
+#define MT6332_VRF1_CON14 0x84B8
+#define MT6332_VRF1_CON15 0x84BA
+#define MT6332_VRF1_CON16 0x84BC
+#define MT6332_VRF1_CON17 0x84BE
+#define MT6332_VRF1_CON18 0x84C0
+#define MT6332_VRF1_CON19 0x84C2
+#define MT6332_VRF1_CON20 0x84C4
+#define MT6332_VRF1_CON21 0x84C6
+#define MT6332_VRF2_CON0 0x84C8
+#define MT6332_VRF2_CON1 0x84CA
+#define MT6332_VRF2_CON2 0x84CC
+#define MT6332_VRF2_CON3 0x84CE
+#define MT6332_VRF2_CON4 0x84D0
+#define MT6332_VRF2_CON5 0x84D2
+#define MT6332_VRF2_CON6 0x84D4
+#define MT6332_VRF2_CON7 0x84D6
+#define MT6332_VRF2_CON8 0x84D8
+#define MT6332_VRF2_CON9 0x84DA
+#define MT6332_VRF2_CON10 0x84DC
+#define MT6332_VRF2_CON11 0x84DE
+#define MT6332_VRF2_CON12 0x84E0
+#define MT6332_VRF2_CON13 0x84E2
+#define MT6332_VRF2_CON14 0x84E4
+#define MT6332_VRF2_CON15 0x84E6
+#define MT6332_VRF2_CON16 0x84E8
+#define MT6332_VRF2_CON17 0x84EA
+#define MT6332_VRF2_CON18 0x84EC
+#define MT6332_VRF2_CON19 0x84EE
+#define MT6332_VRF2_CON20 0x84F0
+#define MT6332_VRF2_CON21 0x84F2
+#define MT6332_VPA_CON0 0x84F4
+#define MT6332_VPA_CON1 0x84F6
+#define MT6332_VPA_CON2 0x84F8
+#define MT6332_VPA_CON3 0x84FC
+#define MT6332_VPA_CON4 0x84FE
+#define MT6332_VPA_CON5 0x8500
+#define MT6332_VPA_CON6 0x8502
+#define MT6332_VPA_CON7 0x8504
+#define MT6332_VPA_CON8 0x8506
+#define MT6332_VPA_CON9 0x8508
+#define MT6332_VPA_CON10 0x850A
+#define MT6332_VPA_CON11 0x850C
+#define MT6332_VPA_CON12 0x850E
+#define MT6332_VPA_CON13 0x8510
+#define MT6332_VPA_CON14 0x8512
+#define MT6332_VPA_CON15 0x8514
+#define MT6332_VPA_CON16 0x8516
+#define MT6332_VPA_CON17 0x8518
+#define MT6332_VPA_CON18 0x851A
+#define MT6332_VPA_CON19 0x851C
+#define MT6332_VPA_CON20 0x851E
+#define MT6332_VPA_CON21 0x8520
+#define MT6332_VPA_CON22 0x8522
+#define MT6332_VPA_CON23 0x8524
+#define MT6332_VPA_CON24 0x8526
+#define MT6332_VPA_CON25 0x8528
+#define MT6332_VSBST_CON0 0x852A
+#define MT6332_VSBST_CON1 0x852C
+#define MT6332_VSBST_CON2 0x852E
+#define MT6332_VSBST_CON3 0x8530
+#define MT6332_VSBST_CON4 0x8532
+#define MT6332_VSBST_CON5 0x8534
+#define MT6332_VSBST_CON6 0x8536
+#define MT6332_VSBST_CON7 0x8538
+#define MT6332_VSBST_CON8 0x853A
+#define MT6332_VSBST_CON9 0x853C
+#define MT6332_VSBST_CON10 0x853E
+#define MT6332_VSBST_CON11 0x8540
+#define MT6332_VSBST_CON12 0x8542
+#define MT6332_VSBST_CON13 0x8544
+#define MT6332_VSBST_CON14 0x8546
+#define MT6332_VSBST_CON15 0x8548
+#define MT6332_VSBST_CON16 0x854A
+#define MT6332_VSBST_CON17 0x854C
+#define MT6332_VSBST_CON18 0x854E
+#define MT6332_VSBST_CON19 0x8550
+#define MT6332_VSBST_CON20 0x8552
+#define MT6332_VSBST_CON21 0x8554
+#define MT6332_BUCK_K_CON0 0x8556
+#define MT6332_BUCK_K_CON1 0x8558
+#define MT6332_BUCK_K_CON2 0x855A
+#define MT6332_BUCK_K_CON3 0x855C
+#define MT6332_BUCK_K_CON4 0x855E
+#define MT6332_BUCK_K_CON5 0x8560
+#define MT6332_AUXADC_ADC0 0x8800
+#define MT6332_AUXADC_ADC1 0x8802
+#define MT6332_AUXADC_ADC2 0x8804
+#define MT6332_AUXADC_ADC3 0x8806
+#define MT6332_AUXADC_ADC4 0x8808
+#define MT6332_AUXADC_ADC5 0x880A
+#define MT6332_AUXADC_ADC6 0x880C
+#define MT6332_AUXADC_ADC7 0x880E
+#define MT6332_AUXADC_ADC8 0x8810
+#define MT6332_AUXADC_ADC9 0x8812
+#define MT6332_AUXADC_ADC10 0x8814
+#define MT6332_AUXADC_ADC11 0x8816
+#define MT6332_AUXADC_ADC12 0x8818
+#define MT6332_AUXADC_ADC13 0x881A
+#define MT6332_AUXADC_ADC14 0x881C
+#define MT6332_AUXADC_ADC15 0x881E
+#define MT6332_AUXADC_ADC16 0x8820
+#define MT6332_AUXADC_ADC17 0x8822
+#define MT6332_AUXADC_ADC18 0x8824
+#define MT6332_AUXADC_ADC19 0x8826
+#define MT6332_AUXADC_ADC20 0x8828
+#define MT6332_AUXADC_ADC21 0x882A
+#define MT6332_AUXADC_ADC22 0x882C
+#define MT6332_AUXADC_ADC23 0x882E
+#define MT6332_AUXADC_ADC24 0x8830
+#define MT6332_AUXADC_ADC25 0x8832
+#define MT6332_AUXADC_ADC26 0x8834
+#define MT6332_AUXADC_ADC27 0x8836
+#define MT6332_AUXADC_ADC28 0x8838
+#define MT6332_AUXADC_ADC29 0x883A
+#define MT6332_AUXADC_ADC30 0x883C
+#define MT6332_AUXADC_ADC31 0x883E
+#define MT6332_AUXADC_ADC32 0x8840
+#define MT6332_AUXADC_ADC33 0x8842
+#define MT6332_AUXADC_ADC34 0x8844
+#define MT6332_AUXADC_ADC35 0x8846
+#define MT6332_AUXADC_ADC36 0x8848
+#define MT6332_AUXADC_ADC37 0x884A
+#define MT6332_AUXADC_ADC38 0x884C
+#define MT6332_AUXADC_ADC39 0x884E
+#define MT6332_AUXADC_ADC40 0x8850
+#define MT6332_AUXADC_ADC41 0x8852
+#define MT6332_AUXADC_ADC42 0x8854
+#define MT6332_AUXADC_ADC43 0x8856
+#define MT6332_AUXADC_STA0 0x8858
+#define MT6332_AUXADC_STA1 0x885A
+#define MT6332_AUXADC_RQST0 0x885C
+#define MT6332_AUXADC_RQST0_SET 0x885E
+#define MT6332_AUXADC_RQST0_CLR 0x8860
+#define MT6332_AUXADC_RQST1 0x8862
+#define MT6332_AUXADC_RQST1_SET 0x8864
+#define MT6332_AUXADC_RQST1_CLR 0x8866
+#define MT6332_AUXADC_CON0 0x8868
+#define MT6332_AUXADC_CON1 0x886A
+#define MT6332_AUXADC_CON2 0x886C
+#define MT6332_AUXADC_CON3 0x886E
+#define MT6332_AUXADC_CON4 0x8870
+#define MT6332_AUXADC_CON5 0x8872
+#define MT6332_AUXADC_CON6 0x8874
+#define MT6332_AUXADC_CON7 0x8876
+#define MT6332_AUXADC_CON8 0x8878
+#define MT6332_AUXADC_CON9 0x887A
+#define MT6332_AUXADC_CON10 0x887C
+#define MT6332_AUXADC_CON11 0x887E
+#define MT6332_AUXADC_CON12 0x8880
+#define MT6332_AUXADC_CON13 0x8882
+#define MT6332_AUXADC_CON14 0x8884
+#define MT6332_AUXADC_CON15 0x8886
+#define MT6332_AUXADC_CON16 0x8888
+#define MT6332_AUXADC_CON17 0x888A
+#define MT6332_AUXADC_CON18 0x888C
+#define MT6332_AUXADC_CON19 0x888E
+#define MT6332_AUXADC_CON20 0x8890
+#define MT6332_AUXADC_CON21 0x8892
+#define MT6332_AUXADC_CON22 0x8894
+#define MT6332_AUXADC_CON23 0x8896
+#define MT6332_AUXADC_CON24 0x8898
+#define MT6332_AUXADC_CON25 0x889A
+#define MT6332_AUXADC_CON26 0x889C
+#define MT6332_AUXADC_CON27 0x889E
+#define MT6332_AUXADC_CON28 0x88A0
+#define MT6332_AUXADC_CON29 0x88A2
+#define MT6332_AUXADC_CON30 0x88A4
+#define MT6332_AUXADC_CON31 0x88A6
+#define MT6332_AUXADC_CON32 0x88A8
+#define MT6332_AUXADC_CON33 0x88AA
+#define MT6332_AUXADC_CON34 0x88AC
+#define MT6332_AUXADC_CON35 0x88AE
+#define MT6332_AUXADC_CON36 0x88B0
+#define MT6332_AUXADC_CON37 0x88B2
+#define MT6332_AUXADC_CON38 0x88B4
+#define MT6332_AUXADC_CON39 0x88B6
+#define MT6332_AUXADC_CON40 0x88B8
+#define MT6332_AUXADC_CON41 0x88BA
+#define MT6332_AUXADC_CON42 0x88BC
+#define MT6332_AUXADC_CON43 0x88BE
+#define MT6332_AUXADC_CON44 0x88C0
+#define MT6332_AUXADC_CON45 0x88C2
+#define MT6332_AUXADC_CON46 0x88C4
+#define MT6332_AUXADC_CON47 0x88C6
+#define MT6332_STRUP_CONA0 0x8C00
+#define MT6332_STRUP_CONA1 0x8C02
+#define MT6332_STRUP_CONA2 0x8C04
+#define MT6332_STRUP_CON0 0x8C06
+#define MT6332_STRUP_CON2 0x8C08
+#define MT6332_STRUP_CON3 0x8C0A
+#define MT6332_STRUP_CON4 0x8C0C
+#define MT6332_STRUP_CON5 0x8C0E
+#define MT6332_STRUP_CON6 0x8C10
+#define MT6332_STRUP_CON7 0x8C12
+#define MT6332_STRUP_CON8 0x8C14
+#define MT6332_STRUP_CON9 0x8C16
+#define MT6332_STRUP_CON10 0x8C18
+#define MT6332_STRUP_CON11 0x8C1A
+#define MT6332_STRUP_CON12 0x8C1C
+#define MT6332_STRUP_CON13 0x8C1E
+#define MT6332_STRUP_CON14 0x8C20
+#define MT6332_STRUP_CON15 0x8C22
+#define MT6332_STRUP_CON16 0x8C24
+#define MT6332_STRUP_CON17 0x8C26
+#define MT6332_FGADC_CON0 0x8C28
+#define MT6332_FGADC_CON1 0x8C2A
+#define MT6332_FGADC_CON2 0x8C2C
+#define MT6332_FGADC_CON3 0x8C2E
+#define MT6332_FGADC_CON4 0x8C30
+#define MT6332_FGADC_CON5 0x8C32
+#define MT6332_FGADC_CON6 0x8C34
+#define MT6332_FGADC_CON7 0x8C36
+#define MT6332_FGADC_CON8 0x8C38
+#define MT6332_FGADC_CON9 0x8C3A
+#define MT6332_FGADC_CON10 0x8C3C
+#define MT6332_FGADC_CON11 0x8C3E
+#define MT6332_FGADC_CON12 0x8C40
+#define MT6332_FGADC_CON13 0x8C42
+#define MT6332_FGADC_CON14 0x8C44
+#define MT6332_FGADC_CON15 0x8C46
+#define MT6332_FGADC_CON16 0x8C48
+#define MT6332_FGADC_CON17 0x8C4A
+#define MT6332_FGADC_CON18 0x8C4C
+#define MT6332_FGADC_CON19 0x8C4E
+#define MT6332_FGADC_CON20 0x8C50
+#define MT6332_FGADC_CON21 0x8C52
+#define MT6332_FGADC_CON22 0x8C54
+#define MT6332_OTP_CON0 0x8C56
+#define MT6332_OTP_CON1 0x8C58
+#define MT6332_OTP_CON2 0x8C5A
+#define MT6332_OTP_CON3 0x8C5C
+#define MT6332_OTP_CON4 0x8C5E
+#define MT6332_OTP_CON5 0x8C60
+#define MT6332_OTP_CON6 0x8C62
+#define MT6332_OTP_CON7 0x8C64
+#define MT6332_OTP_CON8 0x8C66
+#define MT6332_OTP_CON9 0x8C68
+#define MT6332_OTP_CON10 0x8C6A
+#define MT6332_OTP_CON11 0x8C6C
+#define MT6332_OTP_CON12 0x8C6E
+#define MT6332_OTP_CON13 0x8C70
+#define MT6332_OTP_CON14 0x8C72
+#define MT6332_OTP_DOUT_0_15 0x8C74
+#define MT6332_OTP_DOUT_16_31 0x8C76
+#define MT6332_OTP_DOUT_32_47 0x8C78
+#define MT6332_OTP_DOUT_48_63 0x8C7A
+#define MT6332_OTP_DOUT_64_79 0x8C7C
+#define MT6332_OTP_DOUT_80_95 0x8C7E
+#define MT6332_OTP_DOUT_96_111 0x8C80
+#define MT6332_OTP_DOUT_112_127 0x8C82
+#define MT6332_OTP_DOUT_128_143 0x8C84
+#define MT6332_OTP_DOUT_144_159 0x8C86
+#define MT6332_OTP_DOUT_160_175 0x8C88
+#define MT6332_OTP_DOUT_176_191 0x8C8A
+#define MT6332_OTP_DOUT_192_207 0x8C8C
+#define MT6332_OTP_DOUT_208_223 0x8C8E
+#define MT6332_OTP_DOUT_224_239 0x8C90
+#define MT6332_OTP_DOUT_240_255 0x8C92
+#define MT6332_OTP_VAL_0_15 0x8C94
+#define MT6332_OTP_VAL_16_31 0x8C96
+#define MT6332_OTP_VAL_32_47 0x8C98
+#define MT6332_OTP_VAL_48_63 0x8C9A
+#define MT6332_OTP_VAL_64_79 0x8C9C
+#define MT6332_OTP_VAL_80_95 0x8C9E
+#define MT6332_OTP_VAL_96_111 0x8CA0
+#define MT6332_OTP_VAL_112_127 0x8CA2
+#define MT6332_OTP_VAL_128_143 0x8CA4
+#define MT6332_OTP_VAL_144_159 0x8CA6
+#define MT6332_OTP_VAL_160_175 0x8CA8
+#define MT6332_OTP_VAL_176_191 0x8CAA
+#define MT6332_OTP_VAL_192_207 0x8CAC
+#define MT6332_OTP_VAL_208_223 0x8CAE
+#define MT6332_OTP_VAL_224_239 0x8CB0
+#define MT6332_OTP_VAL_240_255 0x8CB2
+#define MT6332_LDO_CON0 0x8CB4
+#define MT6332_LDO_CON1 0x8CB6
+#define MT6332_LDO_CON2 0x8CB8
+#define MT6332_LDO_CON3 0x8CBA
+#define MT6332_LDO_CON5 0x8CBC
+#define MT6332_LDO_CON6 0x8CBE
+#define MT6332_LDO_CON7 0x8CC0
+#define MT6332_LDO_CON8 0x8CC2
+#define MT6332_LDO_CON9 0x8CC4
+#define MT6332_LDO_CON10 0x8CC6
+#define MT6332_LDO_CON11 0x8CC8
+#define MT6332_LDO_CON12 0x8CCA
+#define MT6332_LDO_CON13 0x8CCC
+#define MT6332_FQMTR_CON0 0x8CCE
+#define MT6332_FQMTR_CON1 0x8CD0
+#define MT6332_FQMTR_CON2 0x8CD2
+#define MT6332_IWLED_CON0 0x8CD4
+#define MT6332_IWLED_DEG 0x8CD6
+#define MT6332_IWLED_STATUS 0x8CD8
+#define MT6332_IWLED_EN_CTRL 0x8CDA
+#define MT6332_IWLED_CON1 0x8CDC
+#define MT6332_IWLED_CON2 0x8CDE
+#define MT6332_IWLED_TRIM0 0x8CE0
+#define MT6332_IWLED_TRIM1 0x8CE2
+#define MT6332_IWLED_CON3 0x8CE4
+#define MT6332_IWLED_CON4 0x8CE6
+#define MT6332_IWLED_CON5 0x8CE8
+#define MT6332_IWLED_CON6 0x8CEA
+#define MT6332_IWLED_CON7 0x8CEC
+#define MT6332_IWLED_CON8 0x8CEE
+#define MT6332_IWLED_CON9 0x8CF0
+#define MT6332_SPK_CON0 0x8CF2
+#define MT6332_SPK_CON1 0x8CF4
+#define MT6332_SPK_CON2 0x8CF6
+#define MT6332_SPK_CON3 0x8CF8
+#define MT6332_SPK_CON4 0x8CFA
+#define MT6332_SPK_CON5 0x8CFC
+#define MT6332_SPK_CON6 0x8CFE
+#define MT6332_SPK_CON7 0x8D00
+#define MT6332_SPK_CON8 0x8D02
+#define MT6332_SPK_CON9 0x8D04
+#define MT6332_SPK_CON10 0x8D06
+#define MT6332_SPK_CON11 0x8D08
+#define MT6332_SPK_CON12 0x8D0A
+#define MT6332_SPK_CON13 0x8D0C
+#define MT6332_SPK_CON14 0x8D0E
+#define MT6332_SPK_CON15 0x8D10
+#define MT6332_SPK_CON16 0x8D12
+#define MT6332_TESTI_CON0 0x8D14
+#define MT6332_TESTI_CON1 0x8D16
+#define MT6332_TESTI_CON2 0x8D18
+#define MT6332_TESTI_CON3 0x8D1A
+#define MT6332_TESTI_CON4 0x8D1C
+#define MT6332_TESTI_CON5 0x8D1E
+#define MT6332_TESTI_CON6 0x8D20
+#define MT6332_TESTI_MUX_CON0 0x8D22
+#define MT6332_TESTI_MUX_CON1 0x8D24
+#define MT6332_TESTI_MUX_CON2 0x8D26
+#define MT6332_TESTI_MUX_CON3 0x8D28
+#define MT6332_TESTI_MUX_CON4 0x8D2A
+#define MT6332_TESTI_MUX_CON5 0x8D2C
+#define MT6332_TESTI_MUX_CON6 0x8D2E
+#define MT6332_TESTO_CON0 0x8D30
+#define MT6332_TESTO_CON1 0x8D32
+#define MT6332_TEST_OMUX_CON0 0x8D34
+#define MT6332_TEST_OMUX_CON1 0x8D36
+#define MT6332_DEBUG_CON0 0x8D38
+#define MT6332_DEBUG_CON1 0x8D3A
+#define MT6332_DEBUG_CON2 0x8D3C
+#define MT6332_FGADC_CON23 0x8D3E
+#define MT6332_FGADC_CON24 0x8D40
+#define MT6332_FGADC_CON25 0x8D42
+#define MT6332_TOP_RST_STATUS 0x8D44
+#define MT6332_TOP_RST_STATUS_SET 0x8D46
+#define MT6332_TOP_RST_STATUS_CLR 0x8D48
+#define MT6332_VDVFS2_CON28 0x8D4A
+
+#endif /* __MFD_MT6332_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6357/core.h b/include/linux/mfd/mt6357/core.h
new file mode 100644
index 000000000000..2441611264fd
--- /dev/null
+++ b/include/linux/mfd/mt6357/core.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 BayLibre, SAS
+ * Author: Fabien Parent <fparent@baylibre.com>
+ */
+
+#ifndef __MFD_MT6357_CORE_H__
+#define __MFD_MT6357_CORE_H__
+
+enum mt6357_irq_top_status_shift {
+ MT6357_BUCK_TOP = 0,
+ MT6357_LDO_TOP,
+ MT6357_PSC_TOP,
+ MT6357_SCK_TOP,
+ MT6357_BM_TOP,
+ MT6357_HK_TOP,
+ MT6357_XPP_TOP,
+ MT6357_AUD_TOP,
+ MT6357_MISC_TOP,
+};
+
+enum mt6357_irq_numbers {
+ MT6357_IRQ_VPROC_OC = 0,
+ MT6357_IRQ_VCORE_OC,
+ MT6357_IRQ_VMODEM_OC,
+ MT6357_IRQ_VS1_OC,
+ MT6357_IRQ_VPA_OC,
+ MT6357_IRQ_VCORE_PREOC,
+ MT6357_IRQ_VFE28_OC = 16,
+ MT6357_IRQ_VXO22_OC,
+ MT6357_IRQ_VRF18_OC,
+ MT6357_IRQ_VRF12_OC,
+ MT6357_IRQ_VEFUSE_OC,
+ MT6357_IRQ_VCN33_OC,
+ MT6357_IRQ_VCN28_OC,
+ MT6357_IRQ_VCN18_OC,
+ MT6357_IRQ_VCAMA_OC,
+ MT6357_IRQ_VCAMD_OC,
+ MT6357_IRQ_VCAMIO_OC,
+ MT6357_IRQ_VLDO28_OC,
+ MT6357_IRQ_VUSB33_OC,
+ MT6357_IRQ_VAUX18_OC,
+ MT6357_IRQ_VAUD28_OC,
+ MT6357_IRQ_VIO28_OC,
+ MT6357_IRQ_VIO18_OC,
+ MT6357_IRQ_VSRAM_PROC_OC,
+ MT6357_IRQ_VSRAM_OTHERS_OC,
+ MT6357_IRQ_VIBR_OC,
+ MT6357_IRQ_VDRAM_OC,
+ MT6357_IRQ_VMC_OC,
+ MT6357_IRQ_VMCH_OC,
+ MT6357_IRQ_VEMC_OC,
+ MT6357_IRQ_VSIM1_OC,
+ MT6357_IRQ_VSIM2_OC,
+ MT6357_IRQ_PWRKEY = 48,
+ MT6357_IRQ_HOMEKEY,
+ MT6357_IRQ_PWRKEY_R,
+ MT6357_IRQ_HOMEKEY_R,
+ MT6357_IRQ_NI_LBAT_INT,
+ MT6357_IRQ_CHRDET,
+ MT6357_IRQ_CHRDET_EDGE,
+ MT6357_IRQ_VCDT_HV_DET,
+ MT6357_IRQ_WATCHDOG,
+ MT6357_IRQ_VBATON_UNDET,
+ MT6357_IRQ_BVALID_DET,
+ MT6357_IRQ_OV,
+ MT6357_IRQ_RTC = 64,
+ MT6357_IRQ_FG_BAT0_H = 80,
+ MT6357_IRQ_FG_BAT0_L,
+ MT6357_IRQ_FG_CUR_H,
+ MT6357_IRQ_FG_CUR_L,
+ MT6357_IRQ_FG_ZCV,
+ MT6357_IRQ_BATON_LV = 96,
+ MT6357_IRQ_BATON_HT,
+ MT6357_IRQ_BAT_H = 112,
+ MT6357_IRQ_BAT_L,
+ MT6357_IRQ_AUXADC_IMP,
+ MT6357_IRQ_NAG_C_DLTV,
+ MT6357_IRQ_AUDIO = 128,
+ MT6357_IRQ_ACCDET = 133,
+ MT6357_IRQ_ACCDET_EINT0,
+ MT6357_IRQ_ACCDET_EINT1,
+ MT6357_IRQ_SPI_CMD_ALERT = 144,
+ MT6357_IRQ_NR,
+};
+
+#define MT6357_IRQ_BUCK_BASE MT6357_IRQ_VPROC_OC
+#define MT6357_IRQ_LDO_BASE MT6357_IRQ_VFE28_OC
+#define MT6357_IRQ_PSC_BASE MT6357_IRQ_PWRKEY
+#define MT6357_IRQ_SCK_BASE MT6357_IRQ_RTC
+#define MT6357_IRQ_BM_BASE MT6357_IRQ_FG_BAT0_H
+#define MT6357_IRQ_HK_BASE MT6357_IRQ_BAT_H
+#define MT6357_IRQ_AUD_BASE MT6357_IRQ_AUDIO
+#define MT6357_IRQ_MISC_BASE MT6357_IRQ_SPI_CMD_ALERT
+
+#define MT6357_IRQ_BUCK_BITS (MT6357_IRQ_VCORE_PREOC - MT6357_IRQ_BUCK_BASE + 1)
+#define MT6357_IRQ_LDO_BITS (MT6357_IRQ_VSIM2_OC - MT6357_IRQ_LDO_BASE + 1)
+#define MT6357_IRQ_PSC_BITS (MT6357_IRQ_VCDT_HV_DET - MT6357_IRQ_PSC_BASE + 1)
+#define MT6357_IRQ_SCK_BITS (MT6357_IRQ_RTC - MT6357_IRQ_SCK_BASE + 1)
+#define MT6357_IRQ_BM_BITS (MT6357_IRQ_BATON_HT - MT6357_IRQ_BM_BASE + 1)
+#define MT6357_IRQ_HK_BITS (MT6357_IRQ_NAG_C_DLTV - MT6357_IRQ_HK_BASE + 1)
+#define MT6357_IRQ_AUD_BITS (MT6357_IRQ_ACCDET_EINT1 - MT6357_IRQ_AUD_BASE + 1)
+#define MT6357_IRQ_MISC_BITS \
+ (MT6357_IRQ_SPI_CMD_ALERT - MT6357_IRQ_MISC_BASE + 1)
+
+#define MT6357_TOP_GEN(sp) \
+{ \
+ .hwirq_base = MT6357_IRQ_##sp##_BASE, \
+ .num_int_regs = \
+ ((MT6357_IRQ_##sp##_BITS - 1) / \
+ MTK_PMIC_REG_WIDTH) + 1, \
+ .en_reg = MT6357_##sp##_TOP_INT_CON0, \
+ .en_reg_shift = 0x6, \
+ .sta_reg = MT6357_##sp##_TOP_INT_STATUS0, \
+ .sta_reg_shift = 0x2, \
+ .top_offset = MT6357_##sp##_TOP, \
+}
+
+#endif /* __MFD_MT6357_CORE_H__ */
diff --git a/include/linux/mfd/mt6357/registers.h b/include/linux/mfd/mt6357/registers.h
new file mode 100644
index 000000000000..e24af83b618d
--- /dev/null
+++ b/include/linux/mfd/mt6357/registers.h
@@ -0,0 +1,1574 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6357_REGISTERS_H__
+#define __MFD_MT6357_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6357_TOP0_ID 0x0
+#define MT6357_TOP0_REV0 0x2
+#define MT6357_TOP0_DSN_DBI 0x4
+#define MT6357_TOP0_DSN_DXI 0x6
+#define MT6357_HWCID 0x8
+#define MT6357_SWCID 0xa
+#define MT6357_PONSTS 0xc
+#define MT6357_POFFSTS 0xe
+#define MT6357_PSTSCTL 0x10
+#define MT6357_PG_DEB_STS0 0x12
+#define MT6357_PG_SDN_STS0 0x14
+#define MT6357_OC_SDN_STS0 0x16
+#define MT6357_THERMALSTATUS 0x18
+#define MT6357_TOP_CON 0x1a
+#define MT6357_TEST_OUT 0x1c
+#define MT6357_TEST_CON0 0x1e
+#define MT6357_TEST_CON1 0x20
+#define MT6357_TESTMODE_SW 0x22
+#define MT6357_TOPSTATUS 0x24
+#define MT6357_TDSEL_CON 0x26
+#define MT6357_RDSEL_CON 0x28
+#define MT6357_SMT_CON0 0x2a
+#define MT6357_SMT_CON1 0x2c
+#define MT6357_TOP_RSV0 0x2e
+#define MT6357_TOP_RSV1 0x30
+#define MT6357_DRV_CON0 0x32
+#define MT6357_DRV_CON1 0x34
+#define MT6357_DRV_CON2 0x36
+#define MT6357_DRV_CON3 0x38
+#define MT6357_FILTER_CON0 0x3a
+#define MT6357_FILTER_CON1 0x3c
+#define MT6357_FILTER_CON2 0x3e
+#define MT6357_FILTER_CON3 0x40
+#define MT6357_TOP_STATUS 0x42
+#define MT6357_TOP_STATUS_SET 0x44
+#define MT6357_TOP_STATUS_CLR 0x46
+#define MT6357_TOP_TRAP 0x48
+#define MT6357_TOP1_ID 0x80
+#define MT6357_TOP1_REV0 0x82
+#define MT6357_TOP1_DSN_DBI 0x84
+#define MT6357_TOP1_DSN_DXI 0x86
+#define MT6357_GPIO_DIR0 0x88
+#define MT6357_GPIO_DIR0_SET 0x8a
+#define MT6357_GPIO_DIR0_CLR 0x8c
+#define MT6357_GPIO_PULLEN0 0x8e
+#define MT6357_GPIO_PULLEN0_SET 0x90
+#define MT6357_GPIO_PULLEN0_CLR 0x92
+#define MT6357_GPIO_PULLSEL0 0x94
+#define MT6357_GPIO_PULLSEL0_SET 0x96
+#define MT6357_GPIO_PULLSEL0_CLR 0x98
+#define MT6357_GPIO_DINV0 0x9a
+#define MT6357_GPIO_DINV0_SET 0x9c
+#define MT6357_GPIO_DINV0_CLR 0x9e
+#define MT6357_GPIO_DOUT0 0xa0
+#define MT6357_GPIO_DOUT0_SET 0xa2
+#define MT6357_GPIO_DOUT0_CLR 0xa4
+#define MT6357_GPIO_PI0 0xa6
+#define MT6357_GPIO_POE0 0xa8
+#define MT6357_GPIO_MODE0 0xaa
+#define MT6357_GPIO_MODE0_SET 0xac
+#define MT6357_GPIO_MODE0_CLR 0xae
+#define MT6357_GPIO_MODE1 0xb0
+#define MT6357_GPIO_MODE1_SET 0xb2
+#define MT6357_GPIO_MODE1_CLR 0xb4
+#define MT6357_GPIO_MODE2 0xb6
+#define MT6357_GPIO_MODE2_SET 0xb8
+#define MT6357_GPIO_MODE2_CLR 0xba
+#define MT6357_GPIO_MODE3 0xbc
+#define MT6357_GPIO_MODE3_SET 0xbe
+#define MT6357_GPIO_MODE3_CLR 0xc0
+#define MT6357_GPIO_RSV 0xc2
+#define MT6357_TOP2_ID 0x100
+#define MT6357_TOP2_REV0 0x102
+#define MT6357_TOP2_DSN_DBI 0x104
+#define MT6357_TOP2_DSN_DXI 0x106
+#define MT6357_TOP_PAM0 0x108
+#define MT6357_TOP_PAM1 0x10a
+#define MT6357_TOP_CKPDN_CON0 0x10c
+#define MT6357_TOP_CKPDN_CON0_SET 0x10e
+#define MT6357_TOP_CKPDN_CON0_CLR 0x110
+#define MT6357_TOP_CKPDN_CON1 0x112
+#define MT6357_TOP_CKPDN_CON1_SET 0x114
+#define MT6357_TOP_CKPDN_CON1_CLR 0x116
+#define MT6357_TOP_CKSEL_CON0 0x118
+#define MT6357_TOP_CKSEL_CON0_SET 0x11a
+#define MT6357_TOP_CKSEL_CON0_CLR 0x11c
+#define MT6357_TOP_CKSEL_CON1 0x11e
+#define MT6357_TOP_CKSEL_CON1_SET 0x120
+#define MT6357_TOP_CKSEL_CON1_CLR 0x122
+#define MT6357_TOP_CKDIVSEL_CON0 0x124
+#define MT6357_TOP_CKDIVSEL_CON0_SET 0x126
+#define MT6357_TOP_CKDIVSEL_CON0_CLR 0x128
+#define MT6357_TOP_CKHWEN_CON0 0x12a
+#define MT6357_TOP_CKHWEN_CON0_SET 0x12c
+#define MT6357_TOP_CKHWEN_CON0_CLR 0x12e
+#define MT6357_TOP_CKTST_CON0 0x130
+#define MT6357_TOP_CKTST_CON1 0x132
+#define MT6357_TOP_CLK_CON0 0x134
+#define MT6357_TOP_CLK_CON0_SET 0x136
+#define MT6357_TOP_CLK_CON0_CLR 0x138
+#define MT6357_TOP_DCM_CON0 0x13a
+#define MT6357_TOP_HANDOVER_DEBUG0 0x13c
+#define MT6357_TOP_RST_CON0 0x13e
+#define MT6357_TOP_RST_CON0_SET 0x140
+#define MT6357_TOP_RST_CON0_CLR 0x142
+#define MT6357_TOP_RST_CON1 0x144
+#define MT6357_TOP_RST_CON1_SET 0x146
+#define MT6357_TOP_RST_CON1_CLR 0x148
+#define MT6357_TOP_RST_CON2 0x14a
+#define MT6357_TOP_RST_MISC 0x14c
+#define MT6357_TOP_RST_MISC_SET 0x14e
+#define MT6357_TOP_RST_MISC_CLR 0x150
+#define MT6357_TOP_RST_STATUS 0x152
+#define MT6357_TOP_RST_STATUS_SET 0x154
+#define MT6357_TOP_RST_STATUS_CLR 0x156
+#define MT6357_TOP2_ELR_NUM 0x158
+#define MT6357_TOP2_ELR0 0x15a
+#define MT6357_TOP2_ELR1 0x15c
+#define MT6357_TOP3_ID 0x180
+#define MT6357_TOP3_REV0 0x182
+#define MT6357_TOP3_DSN_DBI 0x184
+#define MT6357_TOP3_DSN_DXI 0x186
+#define MT6357_MISC_TOP_INT_CON0 0x188
+#define MT6357_MISC_TOP_INT_CON0_SET 0x18a
+#define MT6357_MISC_TOP_INT_CON0_CLR 0x18c
+#define MT6357_MISC_TOP_INT_MASK_CON0 0x18e
+#define MT6357_MISC_TOP_INT_MASK_CON0_SET 0x190
+#define MT6357_MISC_TOP_INT_MASK_CON0_CLR 0x192
+#define MT6357_MISC_TOP_INT_STATUS0 0x194
+#define MT6357_MISC_TOP_INT_RAW_STATUS0 0x196
+#define MT6357_TOP_INT_MASK_CON0 0x198
+#define MT6357_TOP_INT_MASK_CON0_SET 0x19a
+#define MT6357_TOP_INT_MASK_CON0_CLR 0x19c
+#define MT6357_TOP_INT_STATUS0 0x19e
+#define MT6357_TOP_INT_RAW_STATUS0 0x1a0
+#define MT6357_TOP_INT_CON0 0x1a2
+#define MT6357_PLT0_ID 0x380
+#define MT6357_PLT0_REV0 0x382
+#define MT6357_PLT0_REV1 0x384
+#define MT6357_PLT0_DSN_DXI 0x386
+#define MT6357_FQMTR_CON0 0x388
+#define MT6357_FQMTR_CON1 0x38a
+#define MT6357_FQMTR_CON2 0x38c
+#define MT6357_TOP_CLK_TRIM 0x38e
+#define MT6357_OTP_CON0 0x390
+#define MT6357_OTP_CON1 0x392
+#define MT6357_OTP_CON2 0x394
+#define MT6357_OTP_CON3 0x396
+#define MT6357_OTP_CON4 0x398
+#define MT6357_OTP_CON5 0x39a
+#define MT6357_OTP_CON6 0x39c
+#define MT6357_OTP_CON7 0x39e
+#define MT6357_OTP_CON8 0x3a0
+#define MT6357_OTP_CON9 0x3a2
+#define MT6357_OTP_CON10 0x3a4
+#define MT6357_OTP_CON11 0x3a6
+#define MT6357_OTP_CON12 0x3a8
+#define MT6357_OTP_CON13 0x3aa
+#define MT6357_OTP_CON14 0x3ac
+#define MT6357_TOP_TMA_KEY 0x3ae
+#define MT6357_TOP_MDB_CONF0 0x3b0
+#define MT6357_TOP_MDB_CONF1 0x3b2
+#define MT6357_TOP_MDB_CONF2 0x3b4
+#define MT6357_PLT0_ELR_NUM 0x3b6
+#define MT6357_PLT0_ELR0 0x3b8
+#define MT6357_PLT0_ELR1 0x3ba
+#define MT6357_SPISLV_ID 0x400
+#define MT6357_SPISLV_REV0 0x402
+#define MT6357_SPISLV_REV1 0x404
+#define MT6357_SPISLV_DSN_DXI 0x406
+#define MT6357_RG_SPI_CON0 0x408
+#define MT6357_DEW_DIO_EN 0x40a
+#define MT6357_DEW_READ_TEST 0x40c
+#define MT6357_DEW_WRITE_TEST 0x40e
+#define MT6357_DEW_CRC_SWRST 0x410
+#define MT6357_DEW_CRC_EN 0x412
+#define MT6357_DEW_CRC_VAL 0x414
+#define MT6357_DEW_DBG_MON_SEL 0x416
+#define MT6357_DEW_CIPHER_KEY_SEL 0x418
+#define MT6357_DEW_CIPHER_IV_SEL 0x41a
+#define MT6357_DEW_CIPHER_EN 0x41c
+#define MT6357_DEW_CIPHER_RDY 0x41e
+#define MT6357_DEW_CIPHER_MODE 0x420
+#define MT6357_DEW_CIPHER_SWRST 0x422
+#define MT6357_DEW_RDDMY_NO 0x424
+#define MT6357_INT_TYPE_CON0 0x426
+#define MT6357_INT_TYPE_CON0_SET 0x428
+#define MT6357_INT_TYPE_CON0_CLR 0x42a
+#define MT6357_INT_STA 0x42c
+#define MT6357_RG_SPI_CON1 0x42e
+#define MT6357_RG_SPI_CON2 0x430
+#define MT6357_RG_SPI_CON3 0x432
+#define MT6357_RG_SPI_CON4 0x434
+#define MT6357_RG_SPI_CON5 0x436
+#define MT6357_RG_SPI_CON6 0x438
+#define MT6357_RG_SPI_CON7 0x43a
+#define MT6357_RG_SPI_CON8 0x43c
+#define MT6357_RG_SPI_CON9 0x43e
+#define MT6357_RG_SPI_CON10 0x440
+#define MT6357_RG_SPI_CON11 0x442
+#define MT6357_RG_SPI_CON12 0x444
+#define MT6357_RG_SPI_CON13 0x446
+#define MT6357_TOP_SPI_CON0 0x448
+#define MT6357_TOP_SPI_CON1 0x44a
+#define MT6357_SCK_TOP_DSN_ID 0x500
+#define MT6357_SCK_TOP_DSN_REV0 0x502
+#define MT6357_SCK_TOP_DBI 0x504
+#define MT6357_SCK_TOP_DXI 0x506
+#define MT6357_SCK_TOP_TPM0 0x508
+#define MT6357_SCK_TOP_TPM1 0x50a
+#define MT6357_SCK_TOP_CON0 0x50c
+#define MT6357_SCK_TOP_CON1 0x50e
+#define MT6357_SCK_TOP_TEST_OUT 0x510
+#define MT6357_SCK_TOP_TEST_CON0 0x512
+#define MT6357_SCK_TOP_CKPDN_CON0 0x514
+#define MT6357_SCK_TOP_CKPDN_CON0_SET 0x516
+#define MT6357_SCK_TOP_CKPDN_CON0_CLR 0x518
+#define MT6357_SCK_TOP_CKHWEN_CON0 0x51a
+#define MT6357_SCK_TOP_CKHWEN_CON0_SET 0x51c
+#define MT6357_SCK_TOP_CKHWEN_CON0_CLR 0x51e
+#define MT6357_SCK_TOP_CKTST_CON 0x520
+#define MT6357_SCK_TOP_RST_CON0 0x522
+#define MT6357_SCK_TOP_RST_CON0_SET 0x524
+#define MT6357_SCK_TOP_RST_CON0_CLR 0x526
+#define MT6357_SCK_TOP_INT_CON0 0x528
+#define MT6357_SCK_TOP_INT_CON0_SET 0x52a
+#define MT6357_SCK_TOP_INT_CON0_CLR 0x52c
+#define MT6357_SCK_TOP_INT_MASK_CON0 0x52e
+#define MT6357_SCK_TOP_INT_MASK_CON0_SET 0x530
+#define MT6357_SCK_TOP_INT_MASK_CON0_CLR 0x532
+#define MT6357_SCK_TOP_INT_STATUS0 0x534
+#define MT6357_SCK_TOP_INT_RAW_STATUS0 0x536
+#define MT6357_SCK_TOP_INT_MISC_CON 0x538
+#define MT6357_EOSC_CALI_CON0 0x53a
+#define MT6357_EOSC_CALI_CON1 0x53c
+#define MT6357_RTC_MIX_CON0 0x53e
+#define MT6357_RTC_MIX_CON1 0x540
+#define MT6357_RTC_MIX_CON2 0x542
+#define MT6357_RTC_DSN_ID 0x580
+#define MT6357_RTC_DSN_REV0 0x582
+#define MT6357_RTC_DBI 0x584
+#define MT6357_RTC_DXI 0x586
+#define MT6357_RTC_BBPU 0x588
+#define MT6357_RTC_IRQ_STA 0x58a
+#define MT6357_RTC_IRQ_EN 0x58c
+#define MT6357_RTC_CII_EN 0x58e
+#define MT6357_RTC_AL_MASK 0x590
+#define MT6357_RTC_TC_SEC 0x592
+#define MT6357_RTC_TC_MIN 0x594
+#define MT6357_RTC_TC_HOU 0x596
+#define MT6357_RTC_TC_DOM 0x598
+#define MT6357_RTC_TC_DOW 0x59a
+#define MT6357_RTC_TC_MTH 0x59c
+#define MT6357_RTC_TC_YEA 0x59e
+#define MT6357_RTC_AL_SEC 0x5a0
+#define MT6357_RTC_AL_MIN 0x5a2
+#define MT6357_RTC_AL_HOU 0x5a4
+#define MT6357_RTC_AL_DOM 0x5a6
+#define MT6357_RTC_AL_DOW 0x5a8
+#define MT6357_RTC_AL_MTH 0x5aa
+#define MT6357_RTC_AL_YEA 0x5ac
+#define MT6357_RTC_OSC32CON 0x5ae
+#define MT6357_RTC_POWERKEY1 0x5b0
+#define MT6357_RTC_POWERKEY2 0x5b2
+#define MT6357_RTC_PDN1 0x5b4
+#define MT6357_RTC_PDN2 0x5b6
+#define MT6357_RTC_SPAR0 0x5b8
+#define MT6357_RTC_SPAR1 0x5ba
+#define MT6357_RTC_PROT 0x5bc
+#define MT6357_RTC_DIFF 0x5be
+#define MT6357_RTC_CALI 0x5c0
+#define MT6357_RTC_WRTGR 0x5c2
+#define MT6357_RTC_CON 0x5c4
+#define MT6357_RTC_SEC_CTRL 0x5c6
+#define MT6357_RTC_INT_CNT 0x5c8
+#define MT6357_RTC_SEC_DAT0 0x5ca
+#define MT6357_RTC_SEC_DAT1 0x5cc
+#define MT6357_RTC_SEC_DAT2 0x5ce
+#define MT6357_RTC_SEC_DSN_ID 0x600
+#define MT6357_RTC_SEC_DSN_REV0 0x602
+#define MT6357_RTC_SEC_DBI 0x604
+#define MT6357_RTC_SEC_DXI 0x606
+#define MT6357_RTC_TC_SEC_SEC 0x608
+#define MT6357_RTC_TC_MIN_SEC 0x60a
+#define MT6357_RTC_TC_HOU_SEC 0x60c
+#define MT6357_RTC_TC_DOM_SEC 0x60e
+#define MT6357_RTC_TC_DOW_SEC 0x610
+#define MT6357_RTC_TC_MTH_SEC 0x612
+#define MT6357_RTC_TC_YEA_SEC 0x614
+#define MT6357_RTC_SEC_CK_PDN 0x616
+#define MT6357_RTC_SEC_WRTGR 0x618
+#define MT6357_DCXO_DSN_ID 0x780
+#define MT6357_DCXO_DSN_REV0 0x782
+#define MT6357_DCXO_DSN_DBI 0x784
+#define MT6357_DCXO_DSN_DXI 0x786
+#define MT6357_DCXO_CW00 0x788
+#define MT6357_DCXO_CW00_SET 0x78a
+#define MT6357_DCXO_CW00_CLR 0x78c
+#define MT6357_DCXO_CW01 0x78e
+#define MT6357_DCXO_CW02 0x790
+#define MT6357_DCXO_CW03 0x792
+#define MT6357_DCXO_CW04 0x794
+#define MT6357_DCXO_CW05 0x796
+#define MT6357_DCXO_CW06 0x798
+#define MT6357_DCXO_CW07 0x79a
+#define MT6357_DCXO_CW08 0x79c
+#define MT6357_DCXO_CW09 0x79e
+#define MT6357_DCXO_CW10 0x7a0
+#define MT6357_DCXO_CW11 0x7a2
+#define MT6357_DCXO_CW11_SET 0x7a4
+#define MT6357_DCXO_CW11_CLR 0x7a6
+#define MT6357_DCXO_CW12 0x7a8
+#define MT6357_DCXO_CW13 0x7aa
+#define MT6357_DCXO_CW14 0x7ac
+#define MT6357_DCXO_CW15 0x7ae
+#define MT6357_DCXO_CW16 0x7b0
+#define MT6357_DCXO_CW17 0x7b2
+#define MT6357_DCXO_CW18 0x7b4
+#define MT6357_DCXO_CW19 0x7b6
+#define MT6357_DCXO_CW20 0x7b8
+#define MT6357_DCXO_CW21 0x7ba
+#define MT6357_DCXO_CW22 0x7bc
+#define MT6357_DCXO_ELR_NUM 0x7be
+#define MT6357_DCXO_ELR0 0x7c0
+#define MT6357_PSC_TOP_ID 0x900
+#define MT6357_PSC_TOP_REV0 0x902
+#define MT6357_PSC_TOP_DBI 0x904
+#define MT6357_PSC_TOP_DXI 0x906
+#define MT6357_PSC_TPM0 0x908
+#define MT6357_PSC_TPM1 0x90a
+#define MT6357_PSC_TOP_RSTCTL_0 0x90c
+#define MT6357_PSC_TOP_INT_CON0 0x90e
+#define MT6357_PSC_TOP_INT_CON0_SET 0x910
+#define MT6357_PSC_TOP_INT_CON0_CLR 0x912
+#define MT6357_PSC_TOP_INT_MASK_CON0 0x914
+#define MT6357_PSC_TOP_INT_MASK_CON0_SET 0x916
+#define MT6357_PSC_TOP_INT_MASK_CON0_CLR 0x918
+#define MT6357_PSC_TOP_INT_STATUS0 0x91a
+#define MT6357_PSC_TOP_INT_RAW_STATUS0 0x91c
+#define MT6357_PSC_TOP_INT_MISC_CON 0x91e
+#define MT6357_PSC_TOP_INT_MISC_CON_SET 0x920
+#define MT6357_PSC_TOP_INT_MISC_CON_CLR 0x922
+#define MT6357_PSC_TOP_MON_CTL 0x924
+#define MT6357_STRUP_ID 0x980
+#define MT6357_STRUP_REV0 0x982
+#define MT6357_STRUP_DBI 0x984
+#define MT6357_STRUP_DXI 0x986
+#define MT6357_STRUP_ANA_CON0 0x988
+#define MT6357_STRUP_ANA_CON1 0x98a
+#define MT6357_STRUP_ANA_CON2 0x98c
+#define MT6357_STRUP_ELR_NUM 0x98e
+#define MT6357_STRUP_ELR_0 0x990
+#define MT6357_PSEQ_ID 0xa00
+#define MT6357_PSEQ_REV0 0xa02
+#define MT6357_PSEQ_DBI 0xa04
+#define MT6357_PSEQ_DXI 0xa06
+#define MT6357_PPCCTL0 0xa08
+#define MT6357_PPCCTL1 0xa0a
+#define MT6357_PPCCTL2 0xa0c
+#define MT6357_PPCCFG0 0xa0e
+#define MT6357_PPCTST0 0xa10
+#define MT6357_PORFLAG 0xa12
+#define MT6357_STRUP_CON0 0xa14
+#define MT6357_STRUP_CON1 0xa16
+#define MT6357_STRUP_CON2 0xa18
+#define MT6357_STRUP_CON3 0xa1a
+#define MT6357_STRUP_CON4 0xa1c
+#define MT6357_STRUP_CON5 0xa1e
+#define MT6357_STRUP_CON6 0xa20
+#define MT6357_STRUP_CON7 0xa22
+#define MT6357_CPSCFG0 0xa24
+#define MT6357_STRUP_CON9 0xa26
+#define MT6357_STRUP_CON10 0xa28
+#define MT6357_STRUP_CON11 0xa2a
+#define MT6357_STRUP_CON12 0xa2c
+#define MT6357_STRUP_CON13 0xa2e
+#define MT6357_STRUP_CON14 0xa30
+#define MT6357_STRUP_CON15 0xa32
+#define MT6357_STRUP_CON16 0xa34
+#define MT6357_STRUP_CON19 0xa36
+#define MT6357_PSEQ_ELR_NUM 0xa38
+#define MT6357_PSEQ_ELR7 0xa3a
+#define MT6357_PSEQ_ELR8 0xa3c
+#define MT6357_PCHR_DIG_DSN_ID 0xa80
+#define MT6357_PCHR_DIG_DSN_REV0 0xa82
+#define MT6357_PCHR_DIG_DSN_DBI 0xa84
+#define MT6357_PCHR_DIG_DSN_DXI 0xa86
+#define MT6357_CHR_TOP_CON0 0xa88
+#define MT6357_CHR_TOP_CON1 0xa8a
+#define MT6357_CHR_TOP_CON2 0xa8c
+#define MT6357_CHR_TOP_CON3 0xa8e
+#define MT6357_CHR_TOP_CON4 0xa90
+#define MT6357_CHR_TOP_CON5 0xa92
+#define MT6357_CHR_TOP_CON6 0xa94
+#define MT6357_PCHR_DIG_ELR_NUM 0xa96
+#define MT6357_PCHR_ELR0 0xa98
+#define MT6357_PCHR_ELR1 0xa9a
+#define MT6357_PCHR_MACRO_DSN_ID 0xb80
+#define MT6357_PCHR_MACRO_DSN_REV0 0xb82
+#define MT6357_PCHR_MACRO_DSN_DBI 0xb84
+#define MT6357_PCHR_MACRO_DSN_DXI 0xb86
+#define MT6357_CHR_CON0 0xb88
+#define MT6357_CHR_CON1 0xb8a
+#define MT6357_CHR_CON2 0xb8c
+#define MT6357_CHR_CON3 0xb8e
+#define MT6357_CHR_CON4 0xb90
+#define MT6357_CHR_CON5 0xb92
+#define MT6357_CHR_CON6 0xb94
+#define MT6357_CHR_CON7 0xb96
+#define MT6357_CHR_CON8 0xb98
+#define MT6357_CHR_CON9 0xb9a
+#define MT6357_BM_TOP_DSN_ID 0xc00
+#define MT6357_BM_TOP_DSN_REV0 0xc02
+#define MT6357_BM_TOP_DBI 0xc04
+#define MT6357_BM_TOP_DXI 0xc06
+#define MT6357_BM_TPM0 0xc08
+#define MT6357_BM_TPM1 0xc0a
+#define MT6357_BM_TOP_CKPDN_CON0 0xc0c
+#define MT6357_BM_TOP_CKPDN_CON0_SET 0xc0e
+#define MT6357_BM_TOP_CKPDN_CON0_CLR 0xc10
+#define MT6357_BM_TOP_CKSEL_CON0 0xc12
+#define MT6357_BM_TOP_CKSEL_CON0_SET 0xc14
+#define MT6357_BM_TOP_CKSEL_CON0_CLR 0xc16
+#define MT6357_BM_TOP_CKTST_CON0 0xc18
+#define MT6357_BM_TOP_RST_CON0 0xc1a
+#define MT6357_BM_TOP_RST_CON0_SET 0xc1c
+#define MT6357_BM_TOP_RST_CON0_CLR 0xc1e
+#define MT6357_BM_TOP_INT_CON0 0xc20
+#define MT6357_BM_TOP_INT_CON0_SET 0xc22
+#define MT6357_BM_TOP_INT_CON0_CLR 0xc24
+#define MT6357_BM_TOP_INT_CON1 0xc26
+#define MT6357_BM_TOP_INT_CON1_SET 0xc28
+#define MT6357_BM_TOP_INT_CON1_CLR 0xc2a
+#define MT6357_BM_TOP_INT_MASK_CON0 0xc2c
+#define MT6357_BM_TOP_INT_MASK_CON0_SET 0xc2e
+#define MT6357_BM_TOP_INT_MASK_CON0_CLR 0xc30
+#define MT6357_BM_TOP_INT_MASK_CON1 0xc32
+#define MT6357_BM_TOP_INT_MASK_CON1_SET 0xc34
+#define MT6357_BM_TOP_INT_MASK_CON1_CLR 0xc36
+#define MT6357_BM_TOP_INT_STATUS0 0xc38
+#define MT6357_BM_TOP_INT_STATUS1 0xc3a
+#define MT6357_BM_TOP_INT_RAW_STATUS0 0xc3c
+#define MT6357_BM_TOP_INT_RAW_STATUS1 0xc3e
+#define MT6357_BM_TOP_INT_MISC_CON 0xc40
+#define MT6357_BM_TOP_DBG_CON 0xc42
+#define MT6357_BM_TOP_RSV0 0xc44
+#define MT6357_FGADC_ANA_DSN_ID 0xc80
+#define MT6357_FGADC_ANA_DSN_REV0 0xc82
+#define MT6357_FGADC_ANA_DSN_DBI 0xc84
+#define MT6357_FGADC_ANA_DSN_DXI 0xc86
+#define MT6357_FGADC_ANA_CON0 0xc88
+#define MT6357_FGADC_ANA_TEST_CON0 0xc8a
+#define MT6357_FGADC_ANA_ELR_NUM 0xc8c
+#define MT6357_FGADC_ANA_ELR0 0xc8e
+#define MT6357_FGADC_ANA_ELR1 0xc90
+#define MT6357_FGADC0_DSN_ID 0xd00
+#define MT6357_FGADC0_DSN_REV0 0xd02
+#define MT6357_FGADC0_DSN_DBI 0xd04
+#define MT6357_FGADC0_DSN_DXI 0xd06
+#define MT6357_FGADC_CON0 0xd08
+#define MT6357_FGADC_CON1 0xd0a
+#define MT6357_FGADC_CON2 0xd0c
+#define MT6357_FGADC_CON3 0xd0e
+#define MT6357_FGADC_CON4 0xd10
+#define MT6357_FGADC_CAR_CON0 0xd12
+#define MT6357_FGADC_CAR_CON1 0xd14
+#define MT6357_FGADC_CAR_CON2 0xd16
+#define MT6357_FGADC_CARTH_CON0 0xd18
+#define MT6357_FGADC_CARTH_CON1 0xd1a
+#define MT6357_FGADC_CARTH_CON2 0xd1c
+#define MT6357_FGADC_CARTH_CON3 0xd1e
+#define MT6357_FGADC_NTER_CON0 0xd20
+#define MT6357_FGADC_NTER_CON1 0xd22
+#define MT6357_FGADC_NTER_CON2 0xd24
+#define MT6357_FGADC_SON_CON0 0xd26
+#define MT6357_FGADC_SON_CON1 0xd28
+#define MT6357_FGADC_SON_CON2 0xd2a
+#define MT6357_FGADC_SON_CON3 0xd2c
+#define MT6357_FGADC_ZCV_CON0 0xd2e
+#define MT6357_FGADC_ZCV_CON1 0xd30
+#define MT6357_FGADC_ZCV_CON2 0xd32
+#define MT6357_FGADC_ZCV_CON3 0xd34
+#define MT6357_FGADC_ZCV_CON4 0xd36
+#define MT6357_FGADC_ZCVTH_CON0 0xd38
+#define MT6357_FGADC_ZCVTH_CON1 0xd3a
+#define MT6357_FGADC_ZCVTH_CON2 0xd3c
+#define MT6357_FGADC1_DSN_ID 0xd80
+#define MT6357_FGADC1_DSN_REV0 0xd82
+#define MT6357_FGADC1_DSN_DBI 0xd84
+#define MT6357_FGADC1_DSN_DXI 0xd86
+#define MT6357_FGADC_R_CON0 0xd88
+#define MT6357_FGADC_CUR_CON0 0xd8a
+#define MT6357_FGADC_CUR_CON1 0xd8c
+#define MT6357_FGADC_CUR_CON2 0xd8e
+#define MT6357_FGADC_CUR_CON3 0xd90
+#define MT6357_FGADC_OFFSET_CON0 0xd92
+#define MT6357_FGADC_OFFSET_CON1 0xd94
+#define MT6357_FGADC_GAIN_CON0 0xd96
+#define MT6357_FGADC_TEST_CON0 0xd98
+#define MT6357_SYSTEM_INFO_CON0 0xd9a
+#define MT6357_SYSTEM_INFO_CON1 0xd9c
+#define MT6357_SYSTEM_INFO_CON2 0xd9e
+#define MT6357_SYSTEM_INFO_CON3 0xda0
+#define MT6357_SYSTEM_INFO_CON4 0xda2
+#define MT6357_BATON_ANA_DSN_ID 0xe00
+#define MT6357_BATON_ANA_DSN_REV0 0xe02
+#define MT6357_BATON_ANA_DSN_DBI 0xe04
+#define MT6357_BATON_ANA_DSN_DXI 0xe06
+#define MT6357_BATON_ANA_CON0 0xe08
+#define MT6357_BATON_ANA_ELR_NUM 0xe0a
+#define MT6357_BATON_ANA_ELR0 0xe0c
+#define MT6357_HK_TOP_ID 0xf80
+#define MT6357_HK_TOP_REV0 0xf82
+#define MT6357_HK_TOP_DBI 0xf84
+#define MT6357_HK_TOP_DXI 0xf86
+#define MT6357_HK_TPM0 0xf88
+#define MT6357_HK_TPM1 0xf8a
+#define MT6357_HK_TOP_CLK_CON0 0xf8c
+#define MT6357_HK_TOP_CLK_CON1 0xf8e
+#define MT6357_HK_TOP_RST_CON0 0xf90
+#define MT6357_HK_TOP_INT_CON0 0xf92
+#define MT6357_HK_TOP_INT_CON0_SET 0xf94
+#define MT6357_HK_TOP_INT_CON0_CLR 0xf96
+#define MT6357_HK_TOP_INT_MASK_CON0 0xf98
+#define MT6357_HK_TOP_INT_MASK_CON0_SET 0xf9a
+#define MT6357_HK_TOP_INT_MASK_CON0_CLR 0xf9c
+#define MT6357_HK_TOP_INT_STATUS0 0xf9e
+#define MT6357_HK_TOP_INT_RAW_STATUS0 0xfa0
+#define MT6357_HK_TOP_MON_CON0 0xfa2
+#define MT6357_HK_TOP_MON_CON1 0xfa4
+#define MT6357_HK_TOP_MON_CON2 0xfa6
+#define MT6357_AUXADC_DSN_ID 0x1000
+#define MT6357_AUXADC_DSN_REV0 0x1002
+#define MT6357_AUXADC_DSN_DBI 0x1004
+#define MT6357_AUXADC_DSN_DXI 0x1006
+#define MT6357_AUXADC_ANA_CON0 0x1008
+#define MT6357_AUXADC_DIG_1_DSN_ID 0x1080
+#define MT6357_AUXADC_DIG_1_DSN_REV0 0x1082
+#define MT6357_AUXADC_DIG_1_DSN_DBI 0x1084
+#define MT6357_AUXADC_DIG_1_DSN_DXI 0x1086
+#define MT6357_AUXADC_ADC0 0x1088
+#define MT6357_AUXADC_ADC1 0x108a
+#define MT6357_AUXADC_ADC2 0x108c
+#define MT6357_AUXADC_ADC3 0x108e
+#define MT6357_AUXADC_ADC4 0x1090
+#define MT6357_AUXADC_ADC5 0x1092
+#define MT6357_AUXADC_ADC6 0x1094
+#define MT6357_AUXADC_ADC7 0x1096
+#define MT6357_AUXADC_ADC8 0x1098
+#define MT6357_AUXADC_ADC9 0x109a
+#define MT6357_AUXADC_ADC10 0x109c
+#define MT6357_AUXADC_ADC11 0x109e
+#define MT6357_AUXADC_ADC12 0x10a0
+#define MT6357_AUXADC_ADC14 0x10a2
+#define MT6357_AUXADC_ADC16 0x10a4
+#define MT6357_AUXADC_ADC17 0x10a6
+#define MT6357_AUXADC_ADC18 0x10a8
+#define MT6357_AUXADC_ADC19 0x10aa
+#define MT6357_AUXADC_ADC20 0x10ac
+#define MT6357_AUXADC_ADC21 0x10ae
+#define MT6357_AUXADC_ADC22 0x10b0
+#define MT6357_AUXADC_ADC23 0x10b2
+#define MT6357_AUXADC_ADC24 0x10b4
+#define MT6357_AUXADC_ADC25 0x10b6
+#define MT6357_AUXADC_ADC26 0x10b8
+#define MT6357_AUXADC_ADC27 0x10ba
+#define MT6357_AUXADC_ADC29 0x10bc
+#define MT6357_AUXADC_ADC30 0x10be
+#define MT6357_AUXADC_ADC31 0x10c0
+#define MT6357_AUXADC_ADC32 0x10c2
+#define MT6357_AUXADC_ADC33 0x10c4
+#define MT6357_AUXADC_ADC34 0x10c6
+#define MT6357_AUXADC_ADC35 0x10c8
+#define MT6357_AUXADC_ADC36 0x10ca
+#define MT6357_AUXADC_ADC38 0x10cc
+#define MT6357_AUXADC_ADC39 0x10ce
+#define MT6357_AUXADC_ADC40 0x10d0
+#define MT6357_AUXADC_ADC41 0x10d2
+#define MT6357_AUXADC_ADC42 0x10d4
+#define MT6357_AUXADC_ADC43 0x10d6
+#define MT6357_AUXADC_ADC46 0x10d8
+#define MT6357_AUXADC_ADC47 0x10da
+#define MT6357_AUXADC_DIG_1_ELR_NUM 0x10dc
+#define MT6357_AUXADC_DIG_1_ELR0 0x10de
+#define MT6357_AUXADC_DIG_1_ELR1 0x10e0
+#define MT6357_AUXADC_DIG_2_DSN_ID 0x1100
+#define MT6357_AUXADC_DIG_2_DSN_REV0 0x1102
+#define MT6357_AUXADC_DIG_2_DSN_DBI 0x1104
+#define MT6357_AUXADC_DIG_2_DSN_DXI 0x1106
+#define MT6357_AUXADC_STA0 0x1108
+#define MT6357_AUXADC_STA1 0x110a
+#define MT6357_AUXADC_STA2 0x110c
+#define MT6357_AUXADC_RQST0 0x110e
+#define MT6357_AUXADC_RQST0_SET 0x1110
+#define MT6357_AUXADC_RQST0_CLR 0x1112
+#define MT6357_AUXADC_RQST2 0x1114
+#define MT6357_AUXADC_RQST2_SET 0x1116
+#define MT6357_AUXADC_RQST2_CLR 0x1118
+#define MT6357_AUXADC_RQST1 0x111a
+#define MT6357_AUXADC_RQST1_SET 0x111c
+#define MT6357_AUXADC_RQST1_CLR 0x111e
+#define MT6357_AUXADC_CON0 0x1120
+#define MT6357_AUXADC_CON0_SET 0x1122
+#define MT6357_AUXADC_CON0_CLR 0x1124
+#define MT6357_AUXADC_CON1 0x1126
+#define MT6357_AUXADC_CON2 0x1128
+#define MT6357_AUXADC_CON3 0x112a
+#define MT6357_AUXADC_CON4 0x112c
+#define MT6357_AUXADC_CON5 0x112e
+#define MT6357_AUXADC_CON6 0x1130
+#define MT6357_AUXADC_CON7 0x1132
+#define MT6357_AUXADC_CON8 0x1134
+#define MT6357_AUXADC_CON9 0x1136
+#define MT6357_AUXADC_CON10 0x1138
+#define MT6357_AUXADC_CON11 0x113a
+#define MT6357_AUXADC_CON12 0x113c
+#define MT6357_AUXADC_CON13 0x113e
+#define MT6357_AUXADC_CON14 0x1140
+#define MT6357_AUXADC_CON15 0x1142
+#define MT6357_AUXADC_CON16 0x1144
+#define MT6357_AUXADC_CON17 0x1146
+#define MT6357_AUXADC_CON18 0x1148
+#define MT6357_AUXADC_CON19 0x114a
+#define MT6357_AUXADC_CON20 0x114c
+#define MT6357_AUXADC_DIG_3_DSN_ID 0x1180
+#define MT6357_AUXADC_DIG_3_DSN_REV0 0x1182
+#define MT6357_AUXADC_DIG_3_DSN_DBI 0x1184
+#define MT6357_AUXADC_DIG_3_DSN_DXI 0x1186
+#define MT6357_AUXADC_AUTORPT0 0x1188
+#define MT6357_AUXADC_LBAT0 0x118a
+#define MT6357_AUXADC_LBAT1 0x118c
+#define MT6357_AUXADC_LBAT2 0x118e
+#define MT6357_AUXADC_LBAT3 0x1190
+#define MT6357_AUXADC_LBAT4 0x1192
+#define MT6357_AUXADC_LBAT5 0x1194
+#define MT6357_AUXADC_LBAT6 0x1196
+#define MT6357_AUXADC_ACCDET 0x1198
+#define MT6357_AUXADC_DBG0 0x119a
+#define MT6357_AUXADC_IMP0 0x119c
+#define MT6357_AUXADC_IMP1 0x119e
+#define MT6357_AUXADC_DIG_3_ELR_NUM 0x11a0
+#define MT6357_AUXADC_DIG_3_ELR0 0x11a2
+#define MT6357_AUXADC_DIG_3_ELR1 0x11a4
+#define MT6357_AUXADC_DIG_3_ELR2 0x11a6
+#define MT6357_AUXADC_DIG_3_ELR3 0x11a8
+#define MT6357_AUXADC_DIG_3_ELR4 0x11aa
+#define MT6357_AUXADC_DIG_3_ELR5 0x11ac
+#define MT6357_AUXADC_DIG_3_ELR6 0x11ae
+#define MT6357_AUXADC_DIG_3_ELR7 0x11b0
+#define MT6357_AUXADC_DIG_3_ELR8 0x11b2
+#define MT6357_AUXADC_DIG_3_ELR9 0x11b4
+#define MT6357_AUXADC_DIG_3_ELR10 0x11b6
+#define MT6357_AUXADC_DIG_3_ELR11 0x11b8
+#define MT6357_AUXADC_DIG_4_DSN_ID 0x1200
+#define MT6357_AUXADC_DIG_4_DSN_REV0 0x1202
+#define MT6357_AUXADC_DIG_4_DSN_DBI 0x1204
+#define MT6357_AUXADC_DIG_4_DSN_DXI 0x1206
+#define MT6357_AUXADC_MDRT_0 0x1208
+#define MT6357_AUXADC_MDRT_1 0x120a
+#define MT6357_AUXADC_MDRT_2 0x120c
+#define MT6357_AUXADC_MDRT_3 0x120e
+#define MT6357_AUXADC_MDRT_4 0x1210
+#define MT6357_AUXADC_DCXO_MDRT_0 0x1212
+#define MT6357_AUXADC_DCXO_MDRT_1 0x1214
+#define MT6357_AUXADC_DCXO_MDRT_2 0x1216
+#define MT6357_AUXADC_NAG_0 0x1218
+#define MT6357_AUXADC_NAG_1 0x121a
+#define MT6357_AUXADC_NAG_2 0x121c
+#define MT6357_AUXADC_NAG_3 0x121e
+#define MT6357_AUXADC_NAG_4 0x1220
+#define MT6357_AUXADC_NAG_5 0x1222
+#define MT6357_AUXADC_NAG_6 0x1224
+#define MT6357_AUXADC_NAG_7 0x1226
+#define MT6357_AUXADC_NAG_8 0x1228
+#define MT6357_AUXADC_RSV_1 0x122a
+#define MT6357_AUXADC_ANA_0 0x122c
+#define MT6357_AUXADC_IMP_CG0 0x122e
+#define MT6357_AUXADC_LBAT_CG0 0x1230
+#define MT6357_AUXADC_NAG_CG0 0x1232
+#define MT6357_AUXADC_PRI_NEW 0x1234
+#define MT6357_AUXADC_CHR_TOP_CON2 0x1236
+#define MT6357_BUCK_TOP_DSN_ID 0x1400
+#define MT6357_BUCK_TOP_DSN_REV0 0x1402
+#define MT6357_BUCK_TOP_DBI 0x1404
+#define MT6357_BUCK_TOP_DXI 0x1406
+#define MT6357_BUCK_TOP_PAM0 0x1408
+#define MT6357_BUCK_TOP_PAM1 0x140a
+#define MT6357_BUCK_TOP_CLK_CON0 0x140c
+#define MT6357_BUCK_TOP_CLK_CON0_SET 0x140e
+#define MT6357_BUCK_TOP_CLK_CON0_CLR 0x1410
+#define MT6357_BUCK_TOP_CLK_HWEN_CON0 0x1412
+#define MT6357_BUCK_TOP_CLK_HWEN_CON0_SET 0x1414
+#define MT6357_BUCK_TOP_CLK_HWEN_CON0_CLR 0x1416
+#define MT6357_BUCK_TOP_CLK_MISC_CON0 0x1418
+#define MT6357_BUCK_TOP_INT_CON0 0x141a
+#define MT6357_BUCK_TOP_INT_CON0_SET 0x141c
+#define MT6357_BUCK_TOP_INT_CON0_CLR 0x141e
+#define MT6357_BUCK_TOP_INT_MASK_CON0 0x1420
+#define MT6357_BUCK_TOP_INT_MASK_CON0_SET 0x1422
+#define MT6357_BUCK_TOP_INT_MASK_CON0_CLR 0x1424
+#define MT6357_BUCK_TOP_INT_STATUS0 0x1426
+#define MT6357_BUCK_TOP_INT_RAW_STATUS0 0x1428
+#define MT6357_BUCK_TOP_STB_CON 0x142a
+#define MT6357_BUCK_TOP_SLP_CON0 0x142c
+#define MT6357_BUCK_TOP_SLP_CON1 0x142e
+#define MT6357_BUCK_TOP_SLP_CON2 0x1430
+#define MT6357_BUCK_TOP_MINFREQ_CON 0x1432
+#define MT6357_BUCK_TOP_OC_CON0 0x1434
+#define MT6357_BUCK_TOP_K_CON0 0x1436
+#define MT6357_BUCK_TOP_K_CON1 0x1438
+#define MT6357_BUCK_TOP_K_CON2 0x143a
+#define MT6357_BUCK_TOP_WDTDBG0 0x143c
+#define MT6357_BUCK_TOP_WDTDBG1 0x143e
+#define MT6357_BUCK_TOP_WDTDBG2 0x1440
+#define MT6357_BUCK_TOP_ELR_NUM 0x1442
+#define MT6357_BUCK_TOP_ELR0 0x1444
+#define MT6357_BUCK_TOP_ELR1 0x1446
+#define MT6357_BUCK_VPROC_DSN_ID 0x1480
+#define MT6357_BUCK_VPROC_DSN_REV0 0x1482
+#define MT6357_BUCK_VPROC_DSN_DBI 0x1484
+#define MT6357_BUCK_VPROC_DSN_DXI 0x1486
+#define MT6357_BUCK_VPROC_CON0 0x1488
+#define MT6357_BUCK_VPROC_CON1 0x148a
+#define MT6357_BUCK_VPROC_CFG0 0x148c
+#define MT6357_BUCK_VPROC_CFG1 0x148e
+#define MT6357_BUCK_VPROC_OP_EN 0x1490
+#define MT6357_BUCK_VPROC_OP_EN_SET 0x1492
+#define MT6357_BUCK_VPROC_OP_EN_CLR 0x1494
+#define MT6357_BUCK_VPROC_OP_CFG 0x1496
+#define MT6357_BUCK_VPROC_OP_CFG_SET 0x1498
+#define MT6357_BUCK_VPROC_OP_CFG_CLR 0x149a
+#define MT6357_BUCK_VPROC_SP_CON 0x149c
+#define MT6357_BUCK_VPROC_SP_CFG 0x149e
+#define MT6357_BUCK_VPROC_OC_CFG 0x14a0
+#define MT6357_BUCK_VPROC_DBG0 0x14a2
+#define MT6357_BUCK_VPROC_DBG1 0x14a4
+#define MT6357_BUCK_VPROC_DBG2 0x14a6
+#define MT6357_BUCK_VPROC_ELR_NUM 0x14a8
+#define MT6357_BUCK_VPROC_ELR0 0x14aa
+#define MT6357_BUCK_VCORE_DSN_ID 0x1500
+#define MT6357_BUCK_VCORE_DSN_REV0 0x1502
+#define MT6357_BUCK_VCORE_DSN_DBI 0x1504
+#define MT6357_BUCK_VCORE_DSN_DXI 0x1506
+#define MT6357_BUCK_VCORE_CON0 0x1508
+#define MT6357_BUCK_VCORE_CON1 0x150a
+#define MT6357_BUCK_VCORE_CFG0 0x150c
+#define MT6357_BUCK_VCORE_CFG1 0x150e
+#define MT6357_BUCK_VCORE_OP_EN 0x1510
+#define MT6357_BUCK_VCORE_OP_EN_SET 0x1512
+#define MT6357_BUCK_VCORE_OP_EN_CLR 0x1514
+#define MT6357_BUCK_VCORE_OP_CFG 0x1516
+#define MT6357_BUCK_VCORE_OP_CFG_SET 0x1518
+#define MT6357_BUCK_VCORE_OP_CFG_CLR 0x151a
+#define MT6357_BUCK_VCORE_SP_CON 0x151c
+#define MT6357_BUCK_VCORE_SP_CFG 0x151e
+#define MT6357_BUCK_VCORE_OC_CFG 0x1520
+#define MT6357_BUCK_VCORE_DBG0 0x1522
+#define MT6357_BUCK_VCORE_DBG1 0x1524
+#define MT6357_BUCK_VCORE_DBG2 0x1526
+#define MT6357_BUCK_VCORE_ELR_NUM 0x1528
+#define MT6357_BUCK_VCORE_ELR0 0x152a
+#define MT6357_BUCK_VMODEM_DSN_ID 0x1580
+#define MT6357_BUCK_VMODEM_DSN_REV0 0x1582
+#define MT6357_BUCK_VMODEM_DSN_DBI 0x1584
+#define MT6357_BUCK_VMODEM_DSN_DXI 0x1586
+#define MT6357_BUCK_VMODEM_CON0 0x1588
+#define MT6357_BUCK_VMODEM_CON1 0x158a
+#define MT6357_BUCK_VMODEM_CFG0 0x158c
+#define MT6357_BUCK_VMODEM_CFG1 0x158e
+#define MT6357_BUCK_VMODEM_OP_EN 0x1590
+#define MT6357_BUCK_VMODEM_OP_EN_SET 0x1592
+#define MT6357_BUCK_VMODEM_OP_EN_CLR 0x1594
+#define MT6357_BUCK_VMODEM_OP_CFG 0x1596
+#define MT6357_BUCK_VMODEM_OP_CFG_SET 0x1598
+#define MT6357_BUCK_VMODEM_OP_CFG_CLR 0x159a
+#define MT6357_BUCK_VMODEM_SP_CON 0x159c
+#define MT6357_BUCK_VMODEM_SP_CFG 0x159e
+#define MT6357_BUCK_VMODEM_OC_CFG 0x15a0
+#define MT6357_BUCK_VMODEM_DBG0 0x15a2
+#define MT6357_BUCK_VMODEM_DBG1 0x15a4
+#define MT6357_BUCK_VMODEM_DBG2 0x15a6
+#define MT6357_BUCK_VMODEM_ELR_NUM 0x15a8
+#define MT6357_BUCK_VMODEM_ELR0 0x15aa
+#define MT6357_BUCK_VS1_DSN_ID 0x1600
+#define MT6357_BUCK_VS1_DSN_REV0 0x1602
+#define MT6357_BUCK_VS1_DSN_DBI 0x1604
+#define MT6357_BUCK_VS1_DSN_DXI 0x1606
+#define MT6357_BUCK_VS1_CON0 0x1608
+#define MT6357_BUCK_VS1_CON1 0x160a
+#define MT6357_BUCK_VS1_CFG0 0x160c
+#define MT6357_BUCK_VS1_CFG1 0x160e
+#define MT6357_BUCK_VS1_OP_EN 0x1610
+#define MT6357_BUCK_VS1_OP_EN_SET 0x1612
+#define MT6357_BUCK_VS1_OP_EN_CLR 0x1614
+#define MT6357_BUCK_VS1_OP_CFG 0x1616
+#define MT6357_BUCK_VS1_OP_CFG_SET 0x1618
+#define MT6357_BUCK_VS1_OP_CFG_CLR 0x161a
+#define MT6357_BUCK_VS1_SP_CON 0x161c
+#define MT6357_BUCK_VS1_SP_CFG 0x161e
+#define MT6357_BUCK_VS1_OC_CFG 0x1620
+#define MT6357_BUCK_VS1_DBG0 0x1622
+#define MT6357_BUCK_VS1_DBG1 0x1624
+#define MT6357_BUCK_VS1_DBG2 0x1626
+#define MT6357_BUCK_VS1_VOTER 0x1628
+#define MT6357_BUCK_VS1_VOTER_SET 0x162a
+#define MT6357_BUCK_VS1_VOTER_CLR 0x162c
+#define MT6357_BUCK_VS1_VOTER_CFG 0x162e
+#define MT6357_BUCK_VS1_ELR_NUM 0x1630
+#define MT6357_BUCK_VS1_ELR0 0x1632
+#define MT6357_BUCK_VPA_DSN_ID 0x1680
+#define MT6357_BUCK_VPA_DSN_REV0 0x1682
+#define MT6357_BUCK_VPA_DSN_DBI 0x1684
+#define MT6357_BUCK_VPA_DSN_DXI 0x1686
+#define MT6357_BUCK_VPA_CON0 0x1688
+#define MT6357_BUCK_VPA_CON1 0x168a
+#define MT6357_BUCK_VPA_CFG0 0x168c
+#define MT6357_BUCK_VPA_CFG1 0x168e
+#define MT6357_BUCK_VPA_OC_CFG 0x1690
+#define MT6357_BUCK_VPA_DBG0 0x1692
+#define MT6357_BUCK_VPA_DBG1 0x1694
+#define MT6357_BUCK_VPA_DBG2 0x1696
+#define MT6357_BUCK_VPA_DLC_CON0 0x1698
+#define MT6357_BUCK_VPA_DLC_CON1 0x169a
+#define MT6357_BUCK_VPA_DLC_CON2 0x169c
+#define MT6357_BUCK_VPA_MSFG_CON0 0x169e
+#define MT6357_BUCK_VPA_MSFG_CON1 0x16a0
+#define MT6357_BUCK_VPA_MSFG_RRATE0 0x16a2
+#define MT6357_BUCK_VPA_MSFG_RRATE1 0x16a4
+#define MT6357_BUCK_VPA_MSFG_RRATE2 0x16a6
+#define MT6357_BUCK_VPA_MSFG_RTHD0 0x16a8
+#define MT6357_BUCK_VPA_MSFG_RTHD1 0x16aa
+#define MT6357_BUCK_VPA_MSFG_RTHD2 0x16ac
+#define MT6357_BUCK_VPA_MSFG_FRATE0 0x16ae
+#define MT6357_BUCK_VPA_MSFG_FRATE1 0x16b0
+#define MT6357_BUCK_VPA_MSFG_FRATE2 0x16b2
+#define MT6357_BUCK_VPA_MSFG_FTHD0 0x16b4
+#define MT6357_BUCK_VPA_MSFG_FTHD1 0x16b6
+#define MT6357_BUCK_VPA_MSFG_FTHD2 0x16b8
+#define MT6357_BUCK_ANA_DSN_ID 0x1700
+#define MT6357_BUCK_ANA_DSN_REV0 0x1702
+#define MT6357_BUCK_ANA_DSN_DBI 0x1704
+#define MT6357_BUCK_ANA_DSN_FPI 0x1706
+#define MT6357_SMPS_ANA_CON0 0x1708
+#define MT6357_SMPS_ANA_CON1 0x170a
+#define MT6357_SMPS_ANA_CON2 0x170c
+#define MT6357_VCORE_VPROC_ANA_CON0 0x170e
+#define MT6357_VCORE_VPROC_ANA_CON1 0x1710
+#define MT6357_VCORE_VPROC_ANA_CON2 0x1712
+#define MT6357_VCORE_VPROC_ANA_CON3 0x1714
+#define MT6357_VCORE_VPROC_ANA_CON4 0x1716
+#define MT6357_VCORE_VPROC_ANA_CON5 0x1718
+#define MT6357_VCORE_VPROC_ANA_CON6 0x171a
+#define MT6357_VCORE_VPROC_ANA_CON7 0x171c
+#define MT6357_VCORE_VPROC_ANA_CON8 0x171e
+#define MT6357_VCORE_VPROC_ANA_CON9 0x1720
+#define MT6357_VCORE_VPROC_ANA_CON10 0x1722
+#define MT6357_VCORE_VPROC_ANA_CON11 0x1724
+#define MT6357_VMODEM_ANA_CON0 0x1726
+#define MT6357_VMODEM_ANA_CON1 0x1728
+#define MT6357_VMODEM_ANA_CON2 0x172a
+#define MT6357_VMODEM_ANA_CON3 0x172c
+#define MT6357_VMODEM_ANA_CON4 0x172e
+#define MT6357_VMODEM_ANA_CON5 0x1730
+#define MT6357_VS1_ANA_CON0 0x1732
+#define MT6357_VS1_ANA_CON1 0x1734
+#define MT6357_VS1_ANA_CON2 0x1736
+#define MT6357_VS1_ANA_CON3 0x1738
+#define MT6357_VS1_ANA_CON4 0x173a
+#define MT6357_VS1_ANA_CON5 0x173c
+#define MT6357_VPA_ANA_CON0 0x173e
+#define MT6357_VPA_ANA_CON1 0x1740
+#define MT6357_VPA_ANA_CON2 0x1742
+#define MT6357_VPA_ANA_CON3 0x1744
+#define MT6357_VPA_ANA_CON4 0x1746
+#define MT6357_VPA_ANA_CON5 0x1748
+#define MT6357_BUCK_ANA_ELR_NUM 0x174a
+#define MT6357_SMPS_ELR_0 0x174c
+#define MT6357_SMPS_ELR_1 0x174e
+#define MT6357_SMPS_ELR_2 0x1750
+#define MT6357_SMPS_ELR_3 0x1752
+#define MT6357_SMPS_ELR_4 0x1754
+#define MT6357_SMPS_ELR_5 0x1756
+#define MT6357_VCORE_VPROC_ELR_0 0x1758
+#define MT6357_VCORE_VPROC_ELR_1 0x175a
+#define MT6357_VCORE_VPROC_ELR_2 0x175c
+#define MT6357_VCORE_VPROC_ELR_3 0x175e
+#define MT6357_VCORE_VPROC_ELR_4 0x1760
+#define MT6357_VMODEM_ELR_0 0x1762
+#define MT6357_VMODEM_ELR_1 0x1764
+#define MT6357_VMODEM_ELR_2 0x1766
+#define MT6357_VS1_ELR_0 0x1768
+#define MT6357_VS1_ELR_1 0x176a
+#define MT6357_VPA_ELR_0 0x176c
+#define MT6357_LDO_TOP_ID 0x1880
+#define MT6357_LDO_TOP_REV0 0x1882
+#define MT6357_LDO_TOP_DBI 0x1884
+#define MT6357_LDO_TOP_DXI 0x1886
+#define MT6357_LDO_TPM0 0x1888
+#define MT6357_LDO_TPM1 0x188a
+#define MT6357_LDO_TOP_CLK_DCM_CON0 0x188c
+#define MT6357_LDO_TOP_CLK_VIO28_CON0 0x188e
+#define MT6357_LDO_TOP_CLK_VIO18_CON0 0x1890
+#define MT6357_LDO_TOP_CLK_VAUD28_CON0 0x1892
+#define MT6357_LDO_TOP_CLK_VDRAM_CON0 0x1894
+#define MT6357_LDO_TOP_CLK_VSRAM_PROC_CON0 0x1896
+#define MT6357_LDO_TOP_CLK_VSRAM_OTHERS_CON0 0x1898
+#define MT6357_LDO_TOP_CLK_VAUX18_CON0 0x189a
+#define MT6357_LDO_TOP_CLK_VUSB33_CON0 0x189c
+#define MT6357_LDO_TOP_CLK_VEMC_CON0 0x189e
+#define MT6357_LDO_TOP_CLK_VXO22_CON0 0x18a0
+#define MT6357_LDO_TOP_CLK_VSIM1_CON0 0x18a2
+#define MT6357_LDO_TOP_CLK_VSIM2_CON0 0x18a4
+#define MT6357_LDO_TOP_CLK_VCAMD_CON0 0x18a6
+#define MT6357_LDO_TOP_CLK_VCAMIO_CON0 0x18a8
+#define MT6357_LDO_TOP_CLK_VEFUSE_CON0 0x18aa
+#define MT6357_LDO_TOP_CLK_VCN33_CON0 0x18ac
+#define MT6357_LDO_TOP_CLK_VCN18_CON0 0x18ae
+#define MT6357_LDO_TOP_CLK_VCN28_CON0 0x18b0
+#define MT6357_LDO_TOP_CLK_VIBR_CON0 0x18b2
+#define MT6357_LDO_TOP_CLK_VFE28_CON0 0x18b4
+#define MT6357_LDO_TOP_CLK_VMCH_CON0 0x18b6
+#define MT6357_LDO_TOP_CLK_VMC_CON0 0x18b8
+#define MT6357_LDO_TOP_CLK_VRF18_CON0 0x18ba
+#define MT6357_LDO_TOP_CLK_VLDO28_CON0 0x18bc
+#define MT6357_LDO_TOP_CLK_VRF12_CON0 0x18be
+#define MT6357_LDO_TOP_CLK_VCAMA_CON0 0x18c0
+#define MT6357_LDO_TOP_CLK_TREF_CON0 0x18c2
+#define MT6357_LDO_TOP_INT_CON0 0x18c4
+#define MT6357_LDO_TOP_INT_CON0_SET 0x18c6
+#define MT6357_LDO_TOP_INT_CON0_CLR 0x18c8
+#define MT6357_LDO_TOP_INT_CON1 0x18ca
+#define MT6357_LDO_TOP_INT_CON1_SET 0x18cc
+#define MT6357_LDO_TOP_INT_CON1_CLR 0x18ce
+#define MT6357_LDO_TOP_INT_MASK_CON0 0x18d0
+#define MT6357_LDO_TOP_INT_MASK_CON0_SET 0x18d2
+#define MT6357_LDO_TOP_INT_MASK_CON0_CLR 0x18d4
+#define MT6357_LDO_TOP_INT_MASK_CON1 0x18d6
+#define MT6357_LDO_TOP_INT_MASK_CON1_SET 0x18d8
+#define MT6357_LDO_TOP_INT_MASK_CON1_CLR 0x18da
+#define MT6357_LDO_TOP_INT_STATUS0 0x18dc
+#define MT6357_LDO_TOP_INT_STATUS1 0x18de
+#define MT6357_LDO_TOP_INT_RAW_STATUS0 0x18e0
+#define MT6357_LDO_TOP_INT_RAW_STATUS1 0x18e2
+#define MT6357_LDO_TEST_CON0 0x18e4
+#define MT6357_LDO_TOP_WDT_CON0 0x18e6
+#define MT6357_LDO_TOP_RSV_CON0 0x18e8
+#define MT6357_LDO_TOP_RSV_CON1 0x18ea
+#define MT6357_LDO_OCFB0 0x18ec
+#define MT6357_LDO_LP_PROTECTION 0x18ee
+#define MT6357_LDO_DUMMY_LOAD_GATED 0x18f0
+#define MT6357_LDO_GON0_DSN_ID 0x1900
+#define MT6357_LDO_GON0_DSN_REV0 0x1902
+#define MT6357_LDO_GON0_DSN_DBI 0x1904
+#define MT6357_LDO_GON0_DSN_DXI 0x1906
+#define MT6357_LDO_VXO22_CON0 0x1908
+#define MT6357_LDO_VXO22_OP_EN 0x190a
+#define MT6357_LDO_VXO22_OP_EN_SET 0x190c
+#define MT6357_LDO_VXO22_OP_EN_CLR 0x190e
+#define MT6357_LDO_VXO22_OP_CFG 0x1910
+#define MT6357_LDO_VXO22_OP_CFG_SET 0x1912
+#define MT6357_LDO_VXO22_OP_CFG_CLR 0x1914
+#define MT6357_LDO_VXO22_CON1 0x1916
+#define MT6357_LDO_VXO22_CON2 0x1918
+#define MT6357_LDO_VXO22_CON3 0x191a
+#define MT6357_LDO_VAUX18_CON0 0x191c
+#define MT6357_LDO_VAUX18_OP_EN 0x191e
+#define MT6357_LDO_VAUX18_OP_EN_SET 0x1920
+#define MT6357_LDO_VAUX18_OP_EN_CLR 0x1922
+#define MT6357_LDO_VAUX18_OP_CFG 0x1924
+#define MT6357_LDO_VAUX18_OP_CFG_SET 0x1926
+#define MT6357_LDO_VAUX18_OP_CFG_CLR 0x1928
+#define MT6357_LDO_VAUX18_CON1 0x192a
+#define MT6357_LDO_VAUX18_CON2 0x192c
+#define MT6357_LDO_VAUX18_CON3 0x192e
+#define MT6357_LDO_VAUD28_CON0 0x1930
+#define MT6357_LDO_VAUD28_OP_EN 0x1932
+#define MT6357_LDO_VAUD28_OP_EN_SET 0x1934
+#define MT6357_LDO_VAUD28_OP_EN_CLR 0x1936
+#define MT6357_LDO_VAUD28_OP_CFG 0x1938
+#define MT6357_LDO_VAUD28_OP_CFG_SET 0x193a
+#define MT6357_LDO_VAUD28_OP_CFG_CLR 0x193c
+#define MT6357_LDO_VAUD28_CON1 0x193e
+#define MT6357_LDO_VAUD28_CON2 0x1940
+#define MT6357_LDO_VAUD28_CON3 0x1942
+#define MT6357_LDO_VIO28_CON0 0x1944
+#define MT6357_LDO_VIO28_OP_EN 0x1946
+#define MT6357_LDO_VIO28_OP_EN_SET 0x1948
+#define MT6357_LDO_VIO28_OP_EN_CLR 0x194a
+#define MT6357_LDO_VIO28_OP_CFG 0x194c
+#define MT6357_LDO_VIO28_OP_CFG_SET 0x194e
+#define MT6357_LDO_VIO28_OP_CFG_CLR 0x1950
+#define MT6357_LDO_VIO28_CON1 0x1952
+#define MT6357_LDO_VIO28_CON2 0x1954
+#define MT6357_LDO_VIO28_CON3 0x1956
+#define MT6357_LDO_VIO18_CON0 0x1958
+#define MT6357_LDO_VIO18_OP_EN 0x195a
+#define MT6357_LDO_VIO18_OP_EN_SET 0x195c
+#define MT6357_LDO_VIO18_OP_EN_CLR 0x195e
+#define MT6357_LDO_VIO18_OP_CFG 0x1960
+#define MT6357_LDO_VIO18_OP_CFG_SET 0x1962
+#define MT6357_LDO_VIO18_OP_CFG_CLR 0x1964
+#define MT6357_LDO_VIO18_CON1 0x1966
+#define MT6357_LDO_VIO18_CON2 0x1968
+#define MT6357_LDO_VIO18_CON3 0x196a
+#define MT6357_LDO_VDRAM_CON0 0x196c
+#define MT6357_LDO_VDRAM_OP_EN 0x196e
+#define MT6357_LDO_VDRAM_OP_EN_SET 0x1970
+#define MT6357_LDO_VDRAM_OP_EN_CLR 0x1972
+#define MT6357_LDO_VDRAM_OP_CFG 0x1974
+#define MT6357_LDO_VDRAM_OP_CFG_SET 0x1976
+#define MT6357_LDO_VDRAM_OP_CFG_CLR 0x1978
+#define MT6357_LDO_VDRAM_CON1 0x197a
+#define MT6357_LDO_VDRAM_CON2 0x197c
+#define MT6357_LDO_VDRAM_CON3 0x197e
+#define MT6357_LDO_GON1_DSN_ID 0x1980
+#define MT6357_LDO_GON1_DSN_REV0 0x1982
+#define MT6357_LDO_GON1_DSN_DBI 0x1984
+#define MT6357_LDO_GON1_DSN_DXI 0x1986
+#define MT6357_LDO_VEMC_CON0 0x1988
+#define MT6357_LDO_VEMC_OP_EN 0x198a
+#define MT6357_LDO_VEMC_OP_EN_SET 0x198c
+#define MT6357_LDO_VEMC_OP_EN_CLR 0x198e
+#define MT6357_LDO_VEMC_OP_CFG 0x1990
+#define MT6357_LDO_VEMC_OP_CFG_SET 0x1992
+#define MT6357_LDO_VEMC_OP_CFG_CLR 0x1994
+#define MT6357_LDO_VEMC_CON1 0x1996
+#define MT6357_LDO_VEMC_CON2 0x1998
+#define MT6357_LDO_VEMC_CON3 0x199a
+#define MT6357_LDO_VUSB33_CON0_0 0x199c
+#define MT6357_LDO_VUSB33_OP_EN 0x199e
+#define MT6357_LDO_VUSB33_OP_EN_SET 0x19a0
+#define MT6357_LDO_VUSB33_OP_EN_CLR 0x19a2
+#define MT6357_LDO_VUSB33_OP_CFG 0x19a4
+#define MT6357_LDO_VUSB33_OP_CFG_SET 0x19a6
+#define MT6357_LDO_VUSB33_OP_CFG_CLR 0x19a8
+#define MT6357_LDO_VUSB33_CON0_1 0x19aa
+#define MT6357_LDO_VUSB33_CON1 0x19ac
+#define MT6357_LDO_VUSB33_CON2 0x19ae
+#define MT6357_LDO_VUSB33_CON3 0x19b0
+#define MT6357_LDO_VSRAM_PROC_CON0 0x19b2
+#define MT6357_LDO_VSRAM_PROC_CON2 0x19b4
+#define MT6357_LDO_VSRAM_PROC_CFG0 0x19b6
+#define MT6357_LDO_VSRAM_PROC_CFG1 0x19b8
+#define MT6357_LDO_VSRAM_PROC_OP_EN 0x19ba
+#define MT6357_LDO_VSRAM_PROC_OP_EN_SET 0x19bc
+#define MT6357_LDO_VSRAM_PROC_OP_EN_CLR 0x19be
+#define MT6357_LDO_VSRAM_PROC_OP_CFG 0x19c0
+#define MT6357_LDO_VSRAM_PROC_OP_CFG_SET 0x19c2
+#define MT6357_LDO_VSRAM_PROC_OP_CFG_CLR 0x19c4
+#define MT6357_LDO_VSRAM_PROC_CON3 0x19c6
+#define MT6357_LDO_VSRAM_PROC_CON4 0x19c8
+#define MT6357_LDO_VSRAM_PROC_CON5 0x19ca
+#define MT6357_LDO_VSRAM_PROC_DBG0 0x19cc
+#define MT6357_LDO_VSRAM_PROC_DBG1 0x19ce
+#define MT6357_LDO_VSRAM_OTHERS_CON0 0x19d0
+#define MT6357_LDO_VSRAM_OTHERS_CON2 0x19d2
+#define MT6357_LDO_VSRAM_OTHERS_CFG0 0x19d4
+#define MT6357_LDO_VSRAM_OTHERS_CFG1 0x19d6
+#define MT6357_LDO_VSRAM_OTHERS_OP_EN 0x19d8
+#define MT6357_LDO_VSRAM_OTHERS_OP_EN_SET 0x19da
+#define MT6357_LDO_VSRAM_OTHERS_OP_EN_CLR 0x19dc
+#define MT6357_LDO_VSRAM_OTHERS_OP_CFG 0x19de
+#define MT6357_LDO_VSRAM_OTHERS_OP_CFG_SET 0x19e0
+#define MT6357_LDO_VSRAM_OTHERS_OP_CFG_CLR 0x19e2
+#define MT6357_LDO_VSRAM_OTHERS_CON3 0x19e4
+#define MT6357_LDO_VSRAM_OTHERS_CON4 0x19e6
+#define MT6357_LDO_VSRAM_OTHERS_CON5 0x19e8
+#define MT6357_LDO_VSRAM_OTHERS_DBG0 0x19ea
+#define MT6357_LDO_VSRAM_OTHERS_DBG1 0x19ec
+#define MT6357_LDO_VSRAM_PROC_SP 0x19ee
+#define MT6357_LDO_VSRAM_OTHERS_SP 0x19f0
+#define MT6357_LDO_VSRAM_PROC_R2R_PDN_DIS 0x19f2
+#define MT6357_LDO_VSRAM_OTHERS_R2R_PDN_DIS 0x19f4
+#define MT6357_LDO_VSRAM_WDT_DBG0 0x19f6
+#define MT6357_LDO_GON1_ELR_NUM 0x19f8
+#define MT6357_LDO_VSRAM_CON0 0x19fa
+#define MT6357_LDO_VSRAM_CON1 0x19fc
+#define MT6357_LDO_VSRAM_CON2 0x19fe
+#define MT6357_LDO_GOFF0_DSN_ID 0x1a00
+#define MT6357_LDO_GOFF0_DSN_REV0 0x1a02
+#define MT6357_LDO_GOFF0_DSN_DBI 0x1a04
+#define MT6357_LDO_GOFF0_DSN_DXI 0x1a06
+#define MT6357_LDO_VFE28_CON0 0x1a08
+#define MT6357_LDO_VFE28_OP_EN 0x1a0a
+#define MT6357_LDO_VFE28_OP_EN_SET 0x1a0c
+#define MT6357_LDO_VFE28_OP_EN_CLR 0x1a0e
+#define MT6357_LDO_VFE28_OP_CFG 0x1a10
+#define MT6357_LDO_VFE28_OP_CFG_SET 0x1a12
+#define MT6357_LDO_VFE28_OP_CFG_CLR 0x1a14
+#define MT6357_LDO_VFE28_CON1 0x1a16
+#define MT6357_LDO_VFE28_CON2 0x1a18
+#define MT6357_LDO_VFE28_CON3 0x1a1a
+#define MT6357_LDO_VRF18_CON0 0x1a1c
+#define MT6357_LDO_VRF18_OP_EN 0x1a1e
+#define MT6357_LDO_VRF18_OP_EN_SET 0x1a20
+#define MT6357_LDO_VRF18_OP_EN_CLR 0x1a22
+#define MT6357_LDO_VRF18_OP_CFG 0x1a24
+#define MT6357_LDO_VRF18_OP_CFG_SET 0x1a26
+#define MT6357_LDO_VRF18_OP_CFG_CLR 0x1a28
+#define MT6357_LDO_VRF18_CON1 0x1a2a
+#define MT6357_LDO_VRF18_CON2 0x1a2c
+#define MT6357_LDO_VRF18_CON3 0x1a2e
+#define MT6357_LDO_VRF12_CON0 0x1a30
+#define MT6357_LDO_VRF12_OP_EN 0x1a32
+#define MT6357_LDO_VRF12_OP_EN_SET 0x1a34
+#define MT6357_LDO_VRF12_OP_EN_CLR 0x1a36
+#define MT6357_LDO_VRF12_OP_CFG 0x1a38
+#define MT6357_LDO_VRF12_OP_CFG_SET 0x1a3a
+#define MT6357_LDO_VRF12_OP_CFG_CLR 0x1a3c
+#define MT6357_LDO_VRF12_CON1 0x1a3e
+#define MT6357_LDO_VRF12_CON2 0x1a40
+#define MT6357_LDO_VRF12_CON3 0x1a42
+#define MT6357_LDO_VEFUSE_CON0 0x1a44
+#define MT6357_LDO_VEFUSE_OP_EN 0x1a46
+#define MT6357_LDO_VEFUSE_OP_EN_SET 0x1a48
+#define MT6357_LDO_VEFUSE_OP_EN_CLR 0x1a4a
+#define MT6357_LDO_VEFUSE_OP_CFG 0x1a4c
+#define MT6357_LDO_VEFUSE_OP_CFG_SET 0x1a4e
+#define MT6357_LDO_VEFUSE_OP_CFG_CLR 0x1a50
+#define MT6357_LDO_VEFUSE_CON1 0x1a52
+#define MT6357_LDO_VEFUSE_CON2 0x1a54
+#define MT6357_LDO_VEFUSE_CON3 0x1a56
+#define MT6357_LDO_VCN18_CON0 0x1a58
+#define MT6357_LDO_VCN18_OP_EN 0x1a5a
+#define MT6357_LDO_VCN18_OP_EN_SET 0x1a5c
+#define MT6357_LDO_VCN18_OP_EN_CLR 0x1a5e
+#define MT6357_LDO_VCN18_OP_CFG 0x1a60
+#define MT6357_LDO_VCN18_OP_CFG_SET 0x1a62
+#define MT6357_LDO_VCN18_OP_CFG_CLR 0x1a64
+#define MT6357_LDO_VCN18_CON1 0x1a66
+#define MT6357_LDO_VCN18_CON2 0x1a68
+#define MT6357_LDO_VCN18_CON3 0x1a6a
+#define MT6357_LDO_VCAMA_CON0 0x1a6c
+#define MT6357_LDO_VCAMA_OP_EN 0x1a6e
+#define MT6357_LDO_VCAMA_OP_EN_SET 0x1a70
+#define MT6357_LDO_VCAMA_OP_EN_CLR 0x1a72
+#define MT6357_LDO_VCAMA_OP_CFG 0x1a74
+#define MT6357_LDO_VCAMA_OP_CFG_SET 0x1a76
+#define MT6357_LDO_VCAMA_OP_CFG_CLR 0x1a78
+#define MT6357_LDO_VCAMA_CON1 0x1a7a
+#define MT6357_LDO_VCAMA_CON2 0x1a7c
+#define MT6357_LDO_VCAMA_CON3 0x1a7e
+#define MT6357_LDO_GOFF1_DSN_ID 0x1a80
+#define MT6357_LDO_GOFF1_DSN_REV0 0x1a82
+#define MT6357_LDO_GOFF1_DSN_DBI 0x1a84
+#define MT6357_LDO_GOFF1_DSN_DXI 0x1a86
+#define MT6357_LDO_VCAMD_CON0 0x1a88
+#define MT6357_LDO_VCAMD_OP_EN 0x1a8a
+#define MT6357_LDO_VCAMD_OP_EN_SET 0x1a8c
+#define MT6357_LDO_VCAMD_OP_EN_CLR 0x1a8e
+#define MT6357_LDO_VCAMD_OP_CFG 0x1a90
+#define MT6357_LDO_VCAMD_OP_CFG_SET 0x1a92
+#define MT6357_LDO_VCAMD_OP_CFG_CLR 0x1a94
+#define MT6357_LDO_VCAMD_CON1 0x1a96
+#define MT6357_LDO_VCAMD_CON2 0x1a98
+#define MT6357_LDO_VCAMD_CON3 0x1a9a
+#define MT6357_LDO_VCAMIO_CON0 0x1a9c
+#define MT6357_LDO_VCAMIO_OP_EN 0x1a9e
+#define MT6357_LDO_VCAMIO_OP_EN_SET 0x1aa0
+#define MT6357_LDO_VCAMIO_OP_EN_CLR 0x1aa2
+#define MT6357_LDO_VCAMIO_OP_CFG 0x1aa4
+#define MT6357_LDO_VCAMIO_OP_CFG_SET 0x1aa6
+#define MT6357_LDO_VCAMIO_OP_CFG_CLR 0x1aa8
+#define MT6357_LDO_VCAMIO_CON1 0x1aaa
+#define MT6357_LDO_VCAMIO_CON2 0x1aac
+#define MT6357_LDO_VCAMIO_CON3 0x1aae
+#define MT6357_LDO_VMC_CON0 0x1ab0
+#define MT6357_LDO_VMC_OP_EN 0x1ab2
+#define MT6357_LDO_VMC_OP_EN_SET 0x1ab4
+#define MT6357_LDO_VMC_OP_EN_CLR 0x1ab6
+#define MT6357_LDO_VMC_OP_CFG 0x1ab8
+#define MT6357_LDO_VMC_OP_CFG_SET 0x1aba
+#define MT6357_LDO_VMC_OP_CFG_CLR 0x1abc
+#define MT6357_LDO_VMC_CON1 0x1abe
+#define MT6357_LDO_VMC_CON2 0x1ac0
+#define MT6357_LDO_VMC_CON3 0x1ac2
+#define MT6357_LDO_VMCH_CON0 0x1ac4
+#define MT6357_LDO_VMCH_OP_EN 0x1ac6
+#define MT6357_LDO_VMCH_OP_EN_SET 0x1ac8
+#define MT6357_LDO_VMCH_OP_EN_CLR 0x1aca
+#define MT6357_LDO_VMCH_OP_CFG 0x1acc
+#define MT6357_LDO_VMCH_OP_CFG_SET 0x1ace
+#define MT6357_LDO_VMCH_OP_CFG_CLR 0x1ad0
+#define MT6357_LDO_VMCH_CON1 0x1ad2
+#define MT6357_LDO_VMCH_CON2 0x1ad4
+#define MT6357_LDO_VMCH_CON3 0x1ad6
+#define MT6357_LDO_VSIM1_CON0 0x1ad8
+#define MT6357_LDO_VSIM1_OP_EN 0x1ada
+#define MT6357_LDO_VSIM1_OP_EN_SET 0x1adc
+#define MT6357_LDO_VSIM1_OP_EN_CLR 0x1ade
+#define MT6357_LDO_VSIM1_OP_CFG 0x1ae0
+#define MT6357_LDO_VSIM1_OP_CFG_SET 0x1ae2
+#define MT6357_LDO_VSIM1_OP_CFG_CLR 0x1ae4
+#define MT6357_LDO_VSIM1_CON1 0x1ae6
+#define MT6357_LDO_VSIM1_CON2 0x1ae8
+#define MT6357_LDO_VSIM1_CON3 0x1aea
+#define MT6357_LDO_VSIM2_CON0 0x1aec
+#define MT6357_LDO_VSIM2_OP_EN 0x1aee
+#define MT6357_LDO_VSIM2_OP_EN_SET 0x1af0
+#define MT6357_LDO_VSIM2_OP_EN_CLR 0x1af2
+#define MT6357_LDO_VSIM2_OP_CFG 0x1af4
+#define MT6357_LDO_VSIM2_OP_CFG_SET 0x1af6
+#define MT6357_LDO_VSIM2_OP_CFG_CLR 0x1af8
+#define MT6357_LDO_VSIM2_CON1 0x1afa
+#define MT6357_LDO_VSIM2_CON2 0x1afc
+#define MT6357_LDO_VSIM2_CON3 0x1afe
+#define MT6357_LDO_GOFF2_DSN_ID 0x1b00
+#define MT6357_LDO_GOFF2_DSN_REV0 0x1b02
+#define MT6357_LDO_GOFF2_DSN_DBI 0x1b04
+#define MT6357_LDO_GOFF2_DSN_DXI 0x1b06
+#define MT6357_LDO_VIBR_CON0 0x1b08
+#define MT6357_LDO_VIBR_OP_EN 0x1b0a
+#define MT6357_LDO_VIBR_OP_EN_SET 0x1b0c
+#define MT6357_LDO_VIBR_OP_EN_CLR 0x1b0e
+#define MT6357_LDO_VIBR_OP_CFG 0x1b10
+#define MT6357_LDO_VIBR_OP_CFG_SET 0x1b12
+#define MT6357_LDO_VIBR_OP_CFG_CLR 0x1b14
+#define MT6357_LDO_VIBR_CON1 0x1b16
+#define MT6357_LDO_VIBR_CON2 0x1b18
+#define MT6357_LDO_VIBR_CON3 0x1b1a
+#define MT6357_LDO_VCN33_CON0_0 0x1b1c
+#define MT6357_LDO_VCN33_OP_EN 0x1b1e
+#define MT6357_LDO_VCN33_OP_EN_SET 0x1b20
+#define MT6357_LDO_VCN33_OP_EN_CLR 0x1b22
+#define MT6357_LDO_VCN33_OP_CFG 0x1b24
+#define MT6357_LDO_VCN33_OP_CFG_SET 0x1b26
+#define MT6357_LDO_VCN33_OP_CFG_CLR 0x1b28
+#define MT6357_LDO_VCN33_CON0_1 0x1b2a
+#define MT6357_LDO_VCN33_CON1 0x1b2c
+#define MT6357_LDO_VCN33_CON2 0x1b2e
+#define MT6357_LDO_VCN33_CON3 0x1b30
+#define MT6357_LDO_VLDO28_CON0_0 0x1b32
+#define MT6357_LDO_VLDO28_OP_EN 0x1b34
+#define MT6357_LDO_VLDO28_OP_EN_SET 0x1b36
+#define MT6357_LDO_VLDO28_OP_EN_CLR 0x1b38
+#define MT6357_LDO_VLDO28_OP_CFG 0x1b3a
+#define MT6357_LDO_VLDO28_OP_CFG_SET 0x1b3c
+#define MT6357_LDO_VLDO28_OP_CFG_CLR 0x1b3e
+#define MT6357_LDO_VLDO28_CON0_1 0x1b40
+#define MT6357_LDO_VLDO28_CON1 0x1b42
+#define MT6357_LDO_VLDO28_CON2 0x1b44
+#define MT6357_LDO_VLDO28_CON3 0x1b46
+#define MT6357_LDO_GOFF2_RSV_CON0 0x1b48
+#define MT6357_LDO_GOFF2_RSV_CON1 0x1b4a
+#define MT6357_LDO_GOFF3_DSN_ID 0x1b80
+#define MT6357_LDO_GOFF3_DSN_REV0 0x1b82
+#define MT6357_LDO_GOFF3_DSN_DBI 0x1b84
+#define MT6357_LDO_GOFF3_DSN_DXI 0x1b86
+#define MT6357_LDO_VCN28_CON0 0x1b88
+#define MT6357_LDO_VCN28_OP_EN 0x1b8a
+#define MT6357_LDO_VCN28_OP_EN_SET 0x1b8c
+#define MT6357_LDO_VCN28_OP_EN_CLR 0x1b8e
+#define MT6357_LDO_VCN28_OP_CFG 0x1b90
+#define MT6357_LDO_VCN28_OP_CFG_SET 0x1b92
+#define MT6357_LDO_VCN28_OP_CFG_CLR 0x1b94
+#define MT6357_LDO_VCN28_CON1 0x1b96
+#define MT6357_LDO_VCN28_CON2 0x1b98
+#define MT6357_LDO_VCN28_CON3 0x1b9a
+#define MT6357_VRTC_CON0 0x1b9c
+#define MT6357_LDO_TREF_CON0 0x1b9e
+#define MT6357_LDO_TREF_OP_EN 0x1ba0
+#define MT6357_LDO_TREF_OP_EN_SET 0x1ba2
+#define MT6357_LDO_TREF_OP_EN_CLR 0x1ba4
+#define MT6357_LDO_TREF_OP_CFG 0x1ba6
+#define MT6357_LDO_TREF_OP_CFG_SET 0x1ba8
+#define MT6357_LDO_TREF_OP_CFG_CLR 0x1baa
+#define MT6357_LDO_TREF_CON1 0x1bac
+#define MT6357_LDO_GOFF3_RSV_CON0 0x1bae
+#define MT6357_LDO_GOFF3_RSV_CON1 0x1bb0
+#define MT6357_LDO_ANA0_DSN_ID 0x1c00
+#define MT6357_LDO_ANA0_DSN_REV0 0x1c02
+#define MT6357_LDO_ANA0_DSN_DBI 0x1c04
+#define MT6357_LDO_ANA0_DSN_DXI 0x1c06
+#define MT6357_VFE28_ANA_CON0 0x1c08
+#define MT6357_VFE28_ANA_CON1 0x1c0a
+#define MT6357_VCN28_ANA_CON0 0x1c0c
+#define MT6357_VCN28_ANA_CON1 0x1c0e
+#define MT6357_VAUD28_ANA_CON0 0x1c10
+#define MT6357_VAUD28_ANA_CON1 0x1c12
+#define MT6357_VAUX18_ANA_CON0 0x1c14
+#define MT6357_VAUX18_ANA_CON1 0x1c16
+#define MT6357_VXO22_ANA_CON0 0x1c18
+#define MT6357_VXO22_ANA_CON1 0x1c1a
+#define MT6357_VCN33_ANA_CON0 0x1c1c
+#define MT6357_VCN33_ANA_CON1 0x1c1e
+#define MT6357_VEMC_ANA_CON0 0x1c20
+#define MT6357_VEMC_ANA_CON1 0x1c22
+#define MT6357_VLDO28_ANA_CON0 0x1c24
+#define MT6357_VLDO28_ANA_CON1 0x1c26
+#define MT6357_VIO28_ANA_CON0 0x1c28
+#define MT6357_VIO28_ANA_CON1 0x1c2a
+#define MT6357_VIBR_ANA_CON0 0x1c2c
+#define MT6357_VIBR_ANA_CON1 0x1c2e
+#define MT6357_VSIM1_ANA_CON0 0x1c30
+#define MT6357_VSIM1_ANA_CON1 0x1c32
+#define MT6357_VSIM2_ANA_CON0 0x1c34
+#define MT6357_VSIM2_ANA_CON1 0x1c36
+#define MT6357_VMCH_ANA_CON0 0x1c38
+#define MT6357_VMCH_ANA_CON1 0x1c3a
+#define MT6357_VMC_ANA_CON0 0x1c3c
+#define MT6357_VMC_ANA_CON1 0x1c3e
+#define MT6357_VCAMIO_ANA_CON0 0x1c40
+#define MT6357_VCAMIO_ANA_CON1 0x1c42
+#define MT6357_VCN18_ANA_CON0 0x1c44
+#define MT6357_VCN18_ANA_CON1 0x1c46
+#define MT6357_VRF18_ANA_CON0 0x1c48
+#define MT6357_VRF18_ANA_CON1 0x1c4a
+#define MT6357_VIO18_ANA_CON0 0x1c4c
+#define MT6357_VIO18_ANA_CON1 0x1c4e
+#define MT6357_VDRAM_ANA_CON1 0x1c50
+#define MT6357_VRF12_ANA_CON0 0x1c52
+#define MT6357_VRF12_ANA_CON1 0x1c54
+#define MT6357_VSRAM_PROC_ANA_CON0 0x1c56
+#define MT6357_VSRAM_OTHERS_ANA_CON0 0x1c58
+#define MT6357_LDO_ANA0_ELR_NUM 0x1c5a
+#define MT6357_VFE28_ELR_0 0x1c5c
+#define MT6357_VCN28_ELR_0 0x1c5e
+#define MT6357_VAUD28_ELR_0 0x1c60
+#define MT6357_VAUX18_ELR_0 0x1c62
+#define MT6357_VXO22_ELR_0 0x1c64
+#define MT6357_VCN33_ELR_0 0x1c66
+#define MT6357_VEMC_ELR_0 0x1c68
+#define MT6357_VLDO28_ELR_0 0x1c6a
+#define MT6357_VIO28_ELR_0 0x1c6c
+#define MT6357_VIBR_ELR_0 0x1c6e
+#define MT6357_VSIM1_ELR_0 0x1c70
+#define MT6357_VSIM2_ELR_0 0x1c72
+#define MT6357_VMCH_ELR_0 0x1c74
+#define MT6357_VMC_ELR_0 0x1c76
+#define MT6357_VCAMIO_ELR_0 0x1c78
+#define MT6357_VCN18_ELR_0 0x1c7a
+#define MT6357_VRF18_ELR_0 0x1c7c
+#define MT6357_LDO_ANA1_DSN_ID 0x1c80
+#define MT6357_LDO_ANA1_DSN_REV0 0x1c82
+#define MT6357_LDO_ANA1_DSN_DBI 0x1c84
+#define MT6357_LDO_ANA1_DSN_DXI 0x1c86
+#define MT6357_VUSB33_ANA_CON0 0x1c88
+#define MT6357_VUSB33_ANA_CON1 0x1c8a
+#define MT6357_VCAMA_ANA_CON0 0x1c8c
+#define MT6357_VCAMA_ANA_CON1 0x1c8e
+#define MT6357_VEFUSE_ANA_CON0 0x1c90
+#define MT6357_VEFUSE_ANA_CON1 0x1c92
+#define MT6357_VCAMD_ANA_CON0 0x1c94
+#define MT6357_VCAMD_ANA_CON1 0x1c96
+#define MT6357_LDO_ANA1_ELR_NUM 0x1c98
+#define MT6357_VUSB33_ELR_0 0x1c9a
+#define MT6357_VCAMA_ELR_0 0x1c9c
+#define MT6357_VEFUSE_ELR_0 0x1c9e
+#define MT6357_VCAMD_ELR_0 0x1ca0
+#define MT6357_VIO18_ELR_0 0x1ca2
+#define MT6357_VDRAM_ELR_0 0x1ca4
+#define MT6357_VRF12_ELR_0 0x1ca6
+#define MT6357_VRTC_ELR_0 0x1ca8
+#define MT6357_VDRAM_ELR_1 0x1caa
+#define MT6357_VDRAM_ELR_2 0x1cac
+#define MT6357_XPP_TOP_ID 0x1e00
+#define MT6357_XPP_TOP_REV0 0x1e02
+#define MT6357_XPP_TOP_DBI 0x1e04
+#define MT6357_XPP_TOP_DXI 0x1e06
+#define MT6357_XPP_TPM0 0x1e08
+#define MT6357_XPP_TPM1 0x1e0a
+#define MT6357_XPP_TOP_TEST_OUT 0x1e0c
+#define MT6357_XPP_TOP_TEST_CON0 0x1e0e
+#define MT6357_XPP_TOP_CKPDN_CON0 0x1e10
+#define MT6357_XPP_TOP_CKPDN_CON0_SET 0x1e12
+#define MT6357_XPP_TOP_CKPDN_CON0_CLR 0x1e14
+#define MT6357_XPP_TOP_CKSEL_CON0 0x1e16
+#define MT6357_XPP_TOP_CKSEL_CON0_SET 0x1e18
+#define MT6357_XPP_TOP_CKSEL_CON0_CLR 0x1e1a
+#define MT6357_XPP_TOP_RST_CON0 0x1e1c
+#define MT6357_XPP_TOP_RST_CON0_SET 0x1e1e
+#define MT6357_XPP_TOP_RST_CON0_CLR 0x1e20
+#define MT6357_XPP_TOP_RST_BANK_CON0 0x1e22
+#define MT6357_XPP_TOP_RST_BANK_CON0_SET 0x1e24
+#define MT6357_XPP_TOP_RST_BANK_CON0_CLR 0x1e26
+#define MT6357_DRIVER_BL_DSN_ID 0x1e80
+#define MT6357_DRIVER_BL_DSN_REV0 0x1e82
+#define MT6357_DRIVER_BL_DSN_DBI 0x1e84
+#define MT6357_DRIVER_BL_DSN_DXI 0x1e86
+#define MT6357_ISINK1_CON0 0x1e88
+#define MT6357_ISINK1_CON1 0x1e8a
+#define MT6357_ISINK1_CON2 0x1e8c
+#define MT6357_ISINK1_CON3 0x1e8e
+#define MT6357_ISINK_ANA1 0x1e90
+#define MT6357_ISINK_PHASE_DLY 0x1e92
+#define MT6357_ISINK_SFSTR 0x1e94
+#define MT6357_ISINK_EN_CTRL 0x1e96
+#define MT6357_ISINK_MODE_CTRL 0x1e98
+#define MT6357_DRIVER_ANA_CON0 0x1e9a
+#define MT6357_ISINK_ANA_CON0 0x1e9c
+#define MT6357_ISINK_ANA_CON1 0x1e9e
+#define MT6357_DRIVER_BL_ELR_NUM 0x1ea0
+#define MT6357_DRIVER_BL_ELR_0 0x1ea2
+#define MT6357_DRIVER_CI_DSN_ID 0x1f00
+#define MT6357_DRIVER_CI_DSN_REV0 0x1f02
+#define MT6357_DRIVER_CI_DSN_DBI 0x1f04
+#define MT6357_DRIVER_CI_DSN_DXI 0x1f06
+#define MT6357_CHRIND_CON0 0x1f08
+#define MT6357_CHRIND_CON1 0x1f0a
+#define MT6357_CHRIND_CON2 0x1f0c
+#define MT6357_CHRIND_CON3 0x1f0e
+#define MT6357_CHRIND_CON4 0x1f10
+#define MT6357_CHRIND_EN_CTRL 0x1f12
+#define MT6357_CHRIND_ANA_CON0 0x1f14
+#define MT6357_DRIVER_DL_DSN_ID 0x1f80
+#define MT6357_DRIVER_DL_DSN_REV0 0x1f82
+#define MT6357_DRIVER_DL_DSN_DBI 0x1f84
+#define MT6357_DRIVER_DL_DSN_DXI 0x1f86
+#define MT6357_ISINK2_CON0 0x1f88
+#define MT6357_ISINK3_CON0 0x1f8a
+#define MT6357_ISINK_EN_CTRL_SMPL 0x1f8c
+#define MT6357_AUD_TOP_ID 0x2080
+#define MT6357_AUD_TOP_REV0 0x2082
+#define MT6357_AUD_TOP_DBI 0x2084
+#define MT6357_AUD_TOP_DXI 0x2086
+#define MT6357_AUD_TOP_CKPDN_TPM0 0x2088
+#define MT6357_AUD_TOP_CKPDN_TPM1 0x208a
+#define MT6357_AUD_TOP_CKPDN_CON0 0x208c
+#define MT6357_AUD_TOP_CKPDN_CON0_SET 0x208e
+#define MT6357_AUD_TOP_CKPDN_CON0_CLR 0x2090
+#define MT6357_AUD_TOP_CKSEL_CON0 0x2092
+#define MT6357_AUD_TOP_CKSEL_CON0_SET 0x2094
+#define MT6357_AUD_TOP_CKSEL_CON0_CLR 0x2096
+#define MT6357_AUD_TOP_CKTST_CON0 0x2098
+#define MT6357_AUD_TOP_RST_CON0 0x209a
+#define MT6357_AUD_TOP_RST_CON0_SET 0x209c
+#define MT6357_AUD_TOP_RST_CON0_CLR 0x209e
+#define MT6357_AUD_TOP_RST_BANK_CON0 0x20a0
+#define MT6357_AUD_TOP_INT_CON0 0x20a2
+#define MT6357_AUD_TOP_INT_CON0_SET 0x20a4
+#define MT6357_AUD_TOP_INT_CON0_CLR 0x20a6
+#define MT6357_AUD_TOP_INT_MASK_CON0 0x20a8
+#define MT6357_AUD_TOP_INT_MASK_CON0_SET 0x20aa
+#define MT6357_AUD_TOP_INT_MASK_CON0_CLR 0x20ac
+#define MT6357_AUD_TOP_INT_STATUS0 0x20ae
+#define MT6357_AUD_TOP_INT_RAW_STATUS0 0x20b0
+#define MT6357_AUD_TOP_INT_MISC_CON0 0x20b2
+#define MT6357_AUDNCP_CLKDIV_CON0 0x20b4
+#define MT6357_AUDNCP_CLKDIV_CON1 0x20b6
+#define MT6357_AUDNCP_CLKDIV_CON2 0x20b8
+#define MT6357_AUDNCP_CLKDIV_CON3 0x20ba
+#define MT6357_AUDNCP_CLKDIV_CON4 0x20bc
+#define MT6357_AUD_TOP_MON_CON0 0x20be
+#define MT6357_AUDIO_DIG_DSN_ID 0x2100
+#define MT6357_AUDIO_DIG_DSN_REV0 0x2102
+#define MT6357_AUDIO_DIG_DSN_DBI 0x2104
+#define MT6357_AUDIO_DIG_DSN_DXI 0x2106
+#define MT6357_AFE_UL_DL_CON0 0x2108
+#define MT6357_AFE_DL_SRC2_CON0_L 0x210a
+#define MT6357_AFE_UL_SRC_CON0_H 0x210c
+#define MT6357_AFE_UL_SRC_CON0_L 0x210e
+#define MT6357_AFE_TOP_CON0 0x2110
+#define MT6357_AUDIO_TOP_CON0 0x2112
+#define MT6357_AFE_MON_DEBUG0 0x2114
+#define MT6357_AFUNC_AUD_CON0 0x2116
+#define MT6357_AFUNC_AUD_CON1 0x2118
+#define MT6357_AFUNC_AUD_CON2 0x211a
+#define MT6357_AFUNC_AUD_CON3 0x211c
+#define MT6357_AFUNC_AUD_CON4 0x211e
+#define MT6357_AFUNC_AUD_CON5 0x2120
+#define MT6357_AFUNC_AUD_CON6 0x2122
+#define MT6357_AFUNC_AUD_MON0 0x2124
+#define MT6357_AUDRC_TUNE_MON0 0x2126
+#define MT6357_AFE_ADDA_MTKAIF_FIFO_CFG0 0x2128
+#define MT6357_AFE_ADDA_MTKAIF_FIFO_LOG_MON1 0x212a
+#define MT6357_AFE_ADDA_MTKAIF_MON0 0x212c
+#define MT6357_AFE_ADDA_MTKAIF_MON1 0x212e
+#define MT6357_AFE_ADDA_MTKAIF_MON2 0x2130
+#define MT6357_AFE_ADDA_MTKAIF_MON3 0x2132
+#define MT6357_AFE_ADDA_MTKAIF_CFG0 0x2134
+#define MT6357_AFE_ADDA_MTKAIF_RX_CFG0 0x2136
+#define MT6357_AFE_ADDA_MTKAIF_RX_CFG1 0x2138
+#define MT6357_AFE_ADDA_MTKAIF_RX_CFG2 0x213a
+#define MT6357_AFE_ADDA_MTKAIF_RX_CFG3 0x213c
+#define MT6357_AFE_ADDA_MTKAIF_TX_CFG1 0x213e
+#define MT6357_AFE_SGEN_CFG0 0x2140
+#define MT6357_AFE_SGEN_CFG1 0x2142
+#define MT6357_AFE_ADC_ASYNC_FIFO_CFG 0x2144
+#define MT6357_AFE_DCCLK_CFG0 0x2146
+#define MT6357_AFE_DCCLK_CFG1 0x2148
+#define MT6357_AUDIO_DIG_CFG 0x214a
+#define MT6357_AFE_AUD_PAD_TOP 0x214c
+#define MT6357_AFE_AUD_PAD_TOP_MON 0x214e
+#define MT6357_AFE_AUD_PAD_TOP_MON1 0x2150
+#define MT6357_AUDENC_DSN_ID 0x2180
+#define MT6357_AUDENC_DSN_REV0 0x2182
+#define MT6357_AUDENC_DSN_DBI 0x2184
+#define MT6357_AUDENC_DSN_FPI 0x2186
+#define MT6357_AUDENC_ANA_CON0 0x2188
+#define MT6357_AUDENC_ANA_CON1 0x218a
+#define MT6357_AUDENC_ANA_CON2 0x218c
+#define MT6357_AUDENC_ANA_CON3 0x218e
+#define MT6357_AUDENC_ANA_CON4 0x2190
+#define MT6357_AUDENC_ANA_CON5 0x2192
+#define MT6357_AUDENC_ANA_CON6 0x2194
+#define MT6357_AUDENC_ANA_CON7 0x2196
+#define MT6357_AUDENC_ANA_CON8 0x2198
+#define MT6357_AUDENC_ANA_CON9 0x219a
+#define MT6357_AUDENC_ANA_CON10 0x219c
+#define MT6357_AUDENC_ANA_CON11 0x219e
+#define MT6357_AUDDEC_DSN_ID 0x2200
+#define MT6357_AUDDEC_DSN_REV0 0x2202
+#define MT6357_AUDDEC_DSN_DBI 0x2204
+#define MT6357_AUDDEC_DSN_FPI 0x2206
+#define MT6357_AUDDEC_ANA_CON0 0x2208
+#define MT6357_AUDDEC_ANA_CON1 0x220a
+#define MT6357_AUDDEC_ANA_CON2 0x220c
+#define MT6357_AUDDEC_ANA_CON3 0x220e
+#define MT6357_AUDDEC_ANA_CON4 0x2210
+#define MT6357_AUDDEC_ANA_CON5 0x2212
+#define MT6357_AUDDEC_ANA_CON6 0x2214
+#define MT6357_AUDDEC_ANA_CON7 0x2216
+#define MT6357_AUDDEC_ANA_CON8 0x2218
+#define MT6357_AUDDEC_ANA_CON9 0x221a
+#define MT6357_AUDDEC_ANA_CON10 0x221c
+#define MT6357_AUDDEC_ANA_CON11 0x221e
+#define MT6357_AUDDEC_ANA_CON12 0x2220
+#define MT6357_AUDDEC_ANA_CON13 0x2222
+#define MT6357_AUDDEC_ELR_NUM 0x2224
+#define MT6357_AUDDEC_ELR_0 0x2226
+#define MT6357_AUDZCD_DSN_ID 0x2280
+#define MT6357_AUDZCD_DSN_REV0 0x2282
+#define MT6357_AUDZCD_DSN_DBI 0x2284
+#define MT6357_AUDZCD_DSN_FPI 0x2286
+#define MT6357_ZCD_CON0 0x2288
+#define MT6357_ZCD_CON1 0x228a
+#define MT6357_ZCD_CON2 0x228c
+#define MT6357_ZCD_CON3 0x228e
+#define MT6357_ZCD_CON4 0x2290
+#define MT6357_ZCD_CON5 0x2292
+#define MT6357_ACCDET_DSN_DIG_ID 0x2300
+#define MT6357_ACCDET_DSN_DIG_REV0 0x2302
+#define MT6357_ACCDET_DSN_DBI 0x2304
+#define MT6357_ACCDET_DSN_FPI 0x2306
+#define MT6357_ACCDET_CON0 0x2308
+#define MT6357_ACCDET_CON1 0x230a
+#define MT6357_ACCDET_CON2 0x230c
+#define MT6357_ACCDET_CON3 0x230e
+#define MT6357_ACCDET_CON4 0x2310
+#define MT6357_ACCDET_CON5 0x2312
+#define MT6357_ACCDET_CON6 0x2314
+#define MT6357_ACCDET_CON7 0x2316
+#define MT6357_ACCDET_CON8 0x2318
+#define MT6357_ACCDET_CON9 0x231a
+#define MT6357_ACCDET_CON10 0x231c
+#define MT6357_ACCDET_CON11 0x231e
+#define MT6357_ACCDET_CON12 0x2320
+#define MT6357_ACCDET_CON13 0x2322
+#define MT6357_ACCDET_CON14 0x2324
+#define MT6357_ACCDET_CON15 0x2326
+#define MT6357_ACCDET_CON16 0x2328
+#define MT6357_ACCDET_CON17 0x232a
+#define MT6357_ACCDET_CON18 0x232c
+#define MT6357_ACCDET_CON19 0x232e
+#define MT6357_ACCDET_CON20 0x2330
+#define MT6357_ACCDET_CON21 0x2332
+#define MT6357_ACCDET_CON22 0x2334
+#define MT6357_ACCDET_CON23 0x2336
+#define MT6357_ACCDET_CON24 0x2338
+#define MT6357_ACCDET_CON25 0x233a
+#define MT6357_ACCDET_CON26 0x233c
+#define MT6357_ACCDET_CON27 0x233e
+#define MT6357_ACCDET_CON28 0x2340
+
+#endif /* __MFD_MT6357_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6358/core.h b/include/linux/mfd/mt6358/core.h
index c5a11b7458d4..68578e2019b0 100644
--- a/include/linux/mfd/mt6358/core.h
+++ b/include/linux/mfd/mt6358/core.h
@@ -6,12 +6,9 @@
#ifndef __MFD_MT6358_CORE_H__
#define __MFD_MT6358_CORE_H__
-#define MT6358_REG_WIDTH 16
-
struct irq_top_t {
int hwirq_base;
unsigned int num_int_regs;
- unsigned int num_int_bits;
unsigned int en_reg;
unsigned int en_reg_shift;
unsigned int sta_reg;
@@ -25,6 +22,7 @@ struct pmic_irq_data {
unsigned short top_int_status_reg;
bool *enable_hwirq;
bool *cache_hwirq;
+ const struct irq_top_t *pmic_ints;
};
enum mt6358_irq_top_status_shift {
@@ -146,8 +144,8 @@ enum mt6358_irq_numbers {
{ \
.hwirq_base = MT6358_IRQ_##sp##_BASE, \
.num_int_regs = \
- ((MT6358_IRQ_##sp##_BITS - 1) / MT6358_REG_WIDTH) + 1, \
- .num_int_bits = MT6358_IRQ_##sp##_BITS, \
+ ((MT6358_IRQ_##sp##_BITS - 1) / \
+ MTK_PMIC_REG_WIDTH) + 1, \
.en_reg = MT6358_##sp##_TOP_INT_CON0, \
.en_reg_shift = 0x6, \
.sta_reg = MT6358_##sp##_TOP_INT_STATUS0, \
diff --git a/include/linux/mfd/mt6358/registers.h b/include/linux/mfd/mt6358/registers.h
index 2ad0b312aa28..d83e87298ac4 100644
--- a/include/linux/mfd/mt6358/registers.h
+++ b/include/linux/mfd/mt6358/registers.h
@@ -8,6 +8,8 @@
/* PMIC Registers */
#define MT6358_SWCID 0xa
+#define MT6358_TOPSTATUS 0x28
+#define MT6358_TOP_RST_MISC 0x14c
#define MT6358_MISC_TOP_INT_CON0 0x188
#define MT6358_MISC_TOP_INT_STATUS0 0x194
#define MT6358_TOP_INT_STATUS0 0x19e
@@ -92,6 +94,10 @@
#define MT6358_BUCK_VCORE_CON0 0x1488
#define MT6358_BUCK_VCORE_DBG0 0x149e
#define MT6358_BUCK_VCORE_DBG1 0x14a0
+#define MT6358_BUCK_VCORE_SSHUB_CON0 0x14a4
+#define MT6358_BUCK_VCORE_SSHUB_CON1 0x14a6
+#define MT6358_BUCK_VCORE_SSHUB_ELR0 MT6358_BUCK_VCORE_SSHUB_CON1
+#define MT6358_BUCK_VCORE_SSHUB_DBG1 MT6358_BUCK_VCORE_DBG1
#define MT6358_BUCK_VCORE_ELR0 0x14aa
#define MT6358_BUCK_VGPU_CON0 0x1508
#define MT6358_BUCK_VGPU_DBG0 0x151e
@@ -167,6 +173,9 @@
#define MT6358_LDO_VSRAM_OTHERS_CON0 0x1ba6
#define MT6358_LDO_VSRAM_OTHERS_DBG0 0x1bc0
#define MT6358_LDO_VSRAM_OTHERS_DBG1 0x1bc2
+#define MT6358_LDO_VSRAM_OTHERS_SSHUB_CON0 0x1bc4
+#define MT6358_LDO_VSRAM_OTHERS_SSHUB_CON1 0x1bc6
+#define MT6358_LDO_VSRAM_OTHERS_SSHUB_DBG1 MT6358_LDO_VSRAM_OTHERS_DBG1
#define MT6358_LDO_VSRAM_GPU_CON0 0x1bc8
#define MT6358_LDO_VSRAM_GPU_DBG0 0x1be2
#define MT6358_LDO_VSRAM_GPU_DBG1 0x1be4
@@ -253,6 +262,12 @@
#define MT6358_LDO_VBIF28_CON3 0x1db0
#define MT6358_VCAMA1_ANA_CON0 0x1e08
#define MT6358_VCAMA2_ANA_CON0 0x1e0c
+#define MT6358_VFE28_ANA_CON0 0x1e10
+#define MT6358_VCN28_ANA_CON0 0x1e14
+#define MT6358_VBIF28_ANA_CON0 0x1e18
+#define MT6358_VAUD28_ANA_CON0 0x1e1c
+#define MT6358_VAUX18_ANA_CON0 0x1e20
+#define MT6358_VXO22_ANA_CON0 0x1e24
#define MT6358_VCN33_ANA_CON0 0x1e28
#define MT6358_VSIM1_ANA_CON0 0x1e2c
#define MT6358_VSIM2_ANA_CON0 0x1e30
@@ -279,4 +294,21 @@
#define MT6358_AUD_TOP_INT_CON0 0x2228
#define MT6358_AUD_TOP_INT_STATUS0 0x2234
+/*
+ * MT6366 has no VCAM*, but has other regulators in its place. The names
+ * keep the MT6358 prefix for ease of use in the regulator driver.
+ */
+#define MT6358_LDO_VSRAM_CON5 0x1bf8
+#define MT6358_LDO_VM18_CON0 MT6358_LDO_VCAMA1_CON0
+#define MT6358_LDO_VM18_CON1 MT6358_LDO_VCAMA1_CON1
+#define MT6358_LDO_VM18_CON2 MT6358_LDO_VCAMA1_CON2
+#define MT6358_LDO_VMDDR_CON0 MT6358_LDO_VCAMA2_CON0
+#define MT6358_LDO_VMDDR_CON1 MT6358_LDO_VCAMA2_CON1
+#define MT6358_LDO_VMDDR_CON2 MT6358_LDO_VCAMA2_CON2
+#define MT6358_LDO_VSRAM_CORE_CON0 MT6358_LDO_VCAMD_CON0
+#define MT6358_LDO_VSRAM_CORE_DBG0 0x1cb6
+#define MT6358_LDO_VSRAM_CORE_DBG1 0x1cb8
+#define MT6358_VM18_ANA_CON0 MT6358_VCAMA1_ANA_CON0
+#define MT6358_VMDDR_ANA_CON0 MT6358_VCAMD_ANA_CON0
+
#endif /* __MFD_MT6358_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6359/core.h b/include/linux/mfd/mt6359/core.h
new file mode 100644
index 000000000000..8d298868126d
--- /dev/null
+++ b/include/linux/mfd/mt6359/core.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6359_CORE_H__
+#define __MFD_MT6359_CORE_H__
+
+enum mt6359_irq_top_status_shift {
+ MT6359_BUCK_TOP = 0,
+ MT6359_LDO_TOP,
+ MT6359_PSC_TOP,
+ MT6359_SCK_TOP,
+ MT6359_BM_TOP,
+ MT6359_HK_TOP,
+ MT6359_AUD_TOP = 7,
+ MT6359_MISC_TOP,
+};
+
+enum mt6359_irq_numbers {
+ MT6359_IRQ_VCORE_OC = 1,
+ MT6359_IRQ_VGPU11_OC,
+ MT6359_IRQ_VGPU12_OC,
+ MT6359_IRQ_VMODEM_OC,
+ MT6359_IRQ_VPROC1_OC,
+ MT6359_IRQ_VPROC2_OC,
+ MT6359_IRQ_VS1_OC,
+ MT6359_IRQ_VS2_OC,
+ MT6359_IRQ_VPA_OC = 9,
+ MT6359_IRQ_VFE28_OC = 16,
+ MT6359_IRQ_VXO22_OC,
+ MT6359_IRQ_VRF18_OC,
+ MT6359_IRQ_VRF12_OC,
+ MT6359_IRQ_VEFUSE_OC,
+ MT6359_IRQ_VCN33_1_OC,
+ MT6359_IRQ_VCN33_2_OC,
+ MT6359_IRQ_VCN13_OC,
+ MT6359_IRQ_VCN18_OC,
+ MT6359_IRQ_VA09_OC,
+ MT6359_IRQ_VCAMIO_OC,
+ MT6359_IRQ_VA12_OC,
+ MT6359_IRQ_VAUX18_OC,
+ MT6359_IRQ_VAUD18_OC,
+ MT6359_IRQ_VIO18_OC,
+ MT6359_IRQ_VSRAM_PROC1_OC,
+ MT6359_IRQ_VSRAM_PROC2_OC,
+ MT6359_IRQ_VSRAM_OTHERS_OC,
+ MT6359_IRQ_VSRAM_MD_OC,
+ MT6359_IRQ_VEMC_OC,
+ MT6359_IRQ_VSIM1_OC,
+ MT6359_IRQ_VSIM2_OC,
+ MT6359_IRQ_VUSB_OC,
+ MT6359_IRQ_VRFCK_OC,
+ MT6359_IRQ_VBBCK_OC,
+ MT6359_IRQ_VBIF28_OC,
+ MT6359_IRQ_VIBR_OC,
+ MT6359_IRQ_VIO28_OC,
+ MT6359_IRQ_VM18_OC,
+ MT6359_IRQ_VUFS_OC = 45,
+ MT6359_IRQ_PWRKEY = 48,
+ MT6359_IRQ_HOMEKEY,
+ MT6359_IRQ_PWRKEY_R,
+ MT6359_IRQ_HOMEKEY_R,
+ MT6359_IRQ_NI_LBAT_INT,
+ MT6359_IRQ_CHRDET_EDGE = 53,
+ MT6359_IRQ_RTC = 64,
+ MT6359_IRQ_FG_BAT_H = 80,
+ MT6359_IRQ_FG_BAT_L,
+ MT6359_IRQ_FG_CUR_H,
+ MT6359_IRQ_FG_CUR_L,
+ MT6359_IRQ_FG_ZCV = 84,
+ MT6359_IRQ_FG_N_CHARGE_L = 87,
+ MT6359_IRQ_FG_IAVG_H,
+ MT6359_IRQ_FG_IAVG_L = 89,
+ MT6359_IRQ_FG_DISCHARGE = 91,
+ MT6359_IRQ_FG_CHARGE,
+ MT6359_IRQ_BATON_LV = 96,
+ MT6359_IRQ_BATON_BAT_IN = 98,
+ MT6359_IRQ_BATON_BAT_OU,
+ MT6359_IRQ_BIF = 100,
+ MT6359_IRQ_BAT_H = 112,
+ MT6359_IRQ_BAT_L,
+ MT6359_IRQ_BAT2_H,
+ MT6359_IRQ_BAT2_L,
+ MT6359_IRQ_BAT_TEMP_H,
+ MT6359_IRQ_BAT_TEMP_L,
+ MT6359_IRQ_THR_H,
+ MT6359_IRQ_THR_L,
+ MT6359_IRQ_AUXADC_IMP,
+ MT6359_IRQ_NAG_C_DLTV = 121,
+ MT6359_IRQ_AUDIO = 128,
+ MT6359_IRQ_ACCDET = 133,
+ MT6359_IRQ_ACCDET_EINT0,
+ MT6359_IRQ_ACCDET_EINT1,
+ MT6359_IRQ_SPI_CMD_ALERT = 144,
+ MT6359_IRQ_NR,
+};
+
+#define MT6359_IRQ_BUCK_BASE MT6359_IRQ_VCORE_OC
+#define MT6359_IRQ_LDO_BASE MT6359_IRQ_VFE28_OC
+#define MT6359_IRQ_PSC_BASE MT6359_IRQ_PWRKEY
+#define MT6359_IRQ_SCK_BASE MT6359_IRQ_RTC
+#define MT6359_IRQ_BM_BASE MT6359_IRQ_FG_BAT_H
+#define MT6359_IRQ_HK_BASE MT6359_IRQ_BAT_H
+#define MT6359_IRQ_AUD_BASE MT6359_IRQ_AUDIO
+#define MT6359_IRQ_MISC_BASE MT6359_IRQ_SPI_CMD_ALERT
+
+#define MT6359_IRQ_BUCK_BITS (MT6359_IRQ_VPA_OC - MT6359_IRQ_BUCK_BASE + 1)
+#define MT6359_IRQ_LDO_BITS (MT6359_IRQ_VUFS_OC - MT6359_IRQ_LDO_BASE + 1)
+#define MT6359_IRQ_PSC_BITS \
+ (MT6359_IRQ_CHRDET_EDGE - MT6359_IRQ_PSC_BASE + 1)
+#define MT6359_IRQ_SCK_BITS (MT6359_IRQ_RTC - MT6359_IRQ_SCK_BASE + 1)
+#define MT6359_IRQ_BM_BITS (MT6359_IRQ_BIF - MT6359_IRQ_BM_BASE + 1)
+#define MT6359_IRQ_HK_BITS (MT6359_IRQ_NAG_C_DLTV - MT6359_IRQ_HK_BASE + 1)
+#define MT6359_IRQ_AUD_BITS \
+ (MT6359_IRQ_ACCDET_EINT1 - MT6359_IRQ_AUD_BASE + 1)
+#define MT6359_IRQ_MISC_BITS \
+ (MT6359_IRQ_SPI_CMD_ALERT - MT6359_IRQ_MISC_BASE + 1)
+
+#define MT6359_TOP_GEN(sp) \
+{ \
+ .hwirq_base = MT6359_IRQ_##sp##_BASE, \
+ .num_int_regs = \
+ ((MT6359_IRQ_##sp##_BITS - 1) / \
+ MTK_PMIC_REG_WIDTH) + 1, \
+ .en_reg = MT6359_##sp##_TOP_INT_CON0, \
+ .en_reg_shift = 0x6, \
+ .sta_reg = MT6359_##sp##_TOP_INT_STATUS0, \
+ .sta_reg_shift = 0x2, \
+ .top_offset = MT6359_##sp##_TOP, \
+}
+
+#endif /* __MFD_MT6359_CORE_H__ */
diff --git a/include/linux/mfd/mt6359/registers.h b/include/linux/mfd/mt6359/registers.h
new file mode 100644
index 000000000000..2a4394a27b1c
--- /dev/null
+++ b/include/linux/mfd/mt6359/registers.h
@@ -0,0 +1,531 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6359_REGISTERS_H__
+#define __MFD_MT6359_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6359_SWCID 0xa
+#define MT6359_TOPSTATUS 0x2a
+#define MT6359_TOP_RST_MISC 0x14c
+#define MT6359_MISC_TOP_INT_CON0 0x188
+#define MT6359_MISC_TOP_INT_STATUS0 0x194
+#define MT6359_TOP_INT_STATUS0 0x19e
+#define MT6359_SCK_TOP_INT_CON0 0x528
+#define MT6359_SCK_TOP_INT_STATUS0 0x534
+#define MT6359_EOSC_CALI_CON0 0x53a
+#define MT6359_EOSC_CALI_CON1 0x53c
+#define MT6359_RTC_MIX_CON0 0x53e
+#define MT6359_RTC_MIX_CON1 0x540
+#define MT6359_RTC_MIX_CON2 0x542
+#define MT6359_RTC_DSN_ID 0x580
+#define MT6359_RTC_DSN_REV0 0x582
+#define MT6359_RTC_DBI 0x584
+#define MT6359_RTC_DXI 0x586
+#define MT6359_RTC_BBPU 0x588
+#define MT6359_RTC_IRQ_STA 0x58a
+#define MT6359_RTC_IRQ_EN 0x58c
+#define MT6359_RTC_CII_EN 0x58e
+#define MT6359_RTC_AL_MASK 0x590
+#define MT6359_RTC_TC_SEC 0x592
+#define MT6359_RTC_TC_MIN 0x594
+#define MT6359_RTC_TC_HOU 0x596
+#define MT6359_RTC_TC_DOM 0x598
+#define MT6359_RTC_TC_DOW 0x59a
+#define MT6359_RTC_TC_MTH 0x59c
+#define MT6359_RTC_TC_YEA 0x59e
+#define MT6359_RTC_AL_SEC 0x5a0
+#define MT6359_RTC_AL_MIN 0x5a2
+#define MT6359_RTC_AL_HOU 0x5a4
+#define MT6359_RTC_AL_DOM 0x5a6
+#define MT6359_RTC_AL_DOW 0x5a8
+#define MT6359_RTC_AL_MTH 0x5aa
+#define MT6359_RTC_AL_YEA 0x5ac
+#define MT6359_RTC_OSC32CON 0x5ae
+#define MT6359_RTC_POWERKEY1 0x5b0
+#define MT6359_RTC_POWERKEY2 0x5b2
+#define MT6359_RTC_PDN1 0x5b4
+#define MT6359_RTC_PDN2 0x5b6
+#define MT6359_RTC_SPAR0 0x5b8
+#define MT6359_RTC_SPAR1 0x5ba
+#define MT6359_RTC_PROT 0x5bc
+#define MT6359_RTC_DIFF 0x5be
+#define MT6359_RTC_CALI 0x5c0
+#define MT6359_RTC_WRTGR 0x5c2
+#define MT6359_RTC_CON 0x5c4
+#define MT6359_RTC_SEC_CTRL 0x5c6
+#define MT6359_RTC_INT_CNT 0x5c8
+#define MT6359_RTC_SEC_DAT0 0x5ca
+#define MT6359_RTC_SEC_DAT1 0x5cc
+#define MT6359_RTC_SEC_DAT2 0x5ce
+#define MT6359_RTC_SEC_DSN_ID 0x600
+#define MT6359_RTC_SEC_DSN_REV0 0x602
+#define MT6359_RTC_SEC_DBI 0x604
+#define MT6359_RTC_SEC_DXI 0x606
+#define MT6359_RTC_TC_SEC_SEC 0x608
+#define MT6359_RTC_TC_MIN_SEC 0x60a
+#define MT6359_RTC_TC_HOU_SEC 0x60c
+#define MT6359_RTC_TC_DOM_SEC 0x60e
+#define MT6359_RTC_TC_DOW_SEC 0x610
+#define MT6359_RTC_TC_MTH_SEC 0x612
+#define MT6359_RTC_TC_YEA_SEC 0x614
+#define MT6359_RTC_SEC_CK_PDN 0x616
+#define MT6359_RTC_SEC_WRTGR 0x618
+#define MT6359_PSC_TOP_INT_CON0 0x910
+#define MT6359_PSC_TOP_INT_STATUS0 0x91c
+#define MT6359_BM_TOP_INT_CON0 0xc32
+#define MT6359_BM_TOP_INT_CON1 0xc38
+#define MT6359_BM_TOP_INT_STATUS0 0xc4a
+#define MT6359_BM_TOP_INT_STATUS1 0xc4c
+#define MT6359_HK_TOP_INT_CON0 0xf92
+#define MT6359_HK_TOP_INT_STATUS0 0xf9e
+#define MT6359_BUCK_TOP_INT_CON0 0x1418
+#define MT6359_BUCK_TOP_INT_STATUS0 0x1424
+#define MT6359_BUCK_VPU_CON0 0x1488
+#define MT6359_BUCK_VPU_DBG0 0x14a6
+#define MT6359_BUCK_VPU_DBG1 0x14a8
+#define MT6359_BUCK_VPU_ELR0 0x14ac
+#define MT6359_BUCK_VCORE_CON0 0x1508
+#define MT6359_BUCK_VCORE_DBG0 0x1526
+#define MT6359_BUCK_VCORE_DBG1 0x1528
+#define MT6359_BUCK_VCORE_SSHUB_CON0 0x152a
+#define MT6359_BUCK_VCORE_ELR0 0x1534
+#define MT6359_BUCK_VGPU11_CON0 0x1588
+#define MT6359_BUCK_VGPU11_DBG0 0x15a6
+#define MT6359_BUCK_VGPU11_DBG1 0x15a8
+#define MT6359_BUCK_VGPU11_ELR0 0x15ac
+#define MT6359_BUCK_VMODEM_CON0 0x1688
+#define MT6359_BUCK_VMODEM_DBG0 0x16a6
+#define MT6359_BUCK_VMODEM_DBG1 0x16a8
+#define MT6359_BUCK_VMODEM_ELR0 0x16ae
+#define MT6359_BUCK_VPROC1_CON0 0x1708
+#define MT6359_BUCK_VPROC1_DBG0 0x1726
+#define MT6359_BUCK_VPROC1_DBG1 0x1728
+#define MT6359_BUCK_VPROC1_ELR0 0x172e
+#define MT6359_BUCK_VPROC2_CON0 0x1788
+#define MT6359_BUCK_VPROC2_DBG0 0x17a6
+#define MT6359_BUCK_VPROC2_DBG1 0x17a8
+#define MT6359_BUCK_VPROC2_ELR0 0x17b2
+#define MT6359_BUCK_VS1_CON0 0x1808
+#define MT6359_BUCK_VS1_DBG0 0x1826
+#define MT6359_BUCK_VS1_DBG1 0x1828
+#define MT6359_BUCK_VS1_ELR0 0x1834
+#define MT6359_BUCK_VS2_CON0 0x1888
+#define MT6359_BUCK_VS2_DBG0 0x18a6
+#define MT6359_BUCK_VS2_DBG1 0x18a8
+#define MT6359_BUCK_VS2_ELR0 0x18b4
+#define MT6359_BUCK_VPA_CON0 0x1908
+#define MT6359_BUCK_VPA_CON1 0x190e
+#define MT6359_BUCK_VPA_CFG0 0x1910
+#define MT6359_BUCK_VPA_CFG1 0x1912
+#define MT6359_BUCK_VPA_DBG0 0x1914
+#define MT6359_BUCK_VPA_DBG1 0x1916
+#define MT6359_VGPUVCORE_ANA_CON2 0x198e
+#define MT6359_VGPUVCORE_ANA_CON13 0x19a4
+#define MT6359_VPROC1_ANA_CON3 0x19b2
+#define MT6359_VPROC2_ANA_CON3 0x1a0e
+#define MT6359_VMODEM_ANA_CON3 0x1a1a
+#define MT6359_VPU_ANA_CON3 0x1a26
+#define MT6359_VS1_ANA_CON0 0x1a2c
+#define MT6359_VS2_ANA_CON0 0x1a34
+#define MT6359_VPA_ANA_CON0 0x1a3c
+#define MT6359_LDO_TOP_INT_CON0 0x1b14
+#define MT6359_LDO_TOP_INT_CON1 0x1b1a
+#define MT6359_LDO_TOP_INT_STATUS0 0x1b28
+#define MT6359_LDO_TOP_INT_STATUS1 0x1b2a
+#define MT6359_LDO_VSRAM_PROC1_ELR 0x1b40
+#define MT6359_LDO_VSRAM_PROC2_ELR 0x1b42
+#define MT6359_LDO_VSRAM_OTHERS_ELR 0x1b44
+#define MT6359_LDO_VSRAM_MD_ELR 0x1b46
+#define MT6359_LDO_VFE28_CON0 0x1b88
+#define MT6359_LDO_VFE28_MON 0x1b8a
+#define MT6359_LDO_VXO22_CON0 0x1b98
+#define MT6359_LDO_VXO22_MON 0x1b9a
+#define MT6359_LDO_VRF18_CON0 0x1ba8
+#define MT6359_LDO_VRF18_MON 0x1baa
+#define MT6359_LDO_VRF12_CON0 0x1bb8
+#define MT6359_LDO_VRF12_MON 0x1bba
+#define MT6359_LDO_VEFUSE_CON0 0x1bc8
+#define MT6359_LDO_VEFUSE_MON 0x1bca
+#define MT6359_LDO_VCN33_1_CON0 0x1bd8
+#define MT6359_LDO_VCN33_1_MON 0x1bda
+#define MT6359_LDO_VCN33_1_MULTI_SW 0x1be8
+#define MT6359_LDO_VCN33_2_CON0 0x1c08
+#define MT6359_LDO_VCN33_2_MON 0x1c0a
+#define MT6359_LDO_VCN33_2_MULTI_SW 0x1c18
+#define MT6359_LDO_VCN13_CON0 0x1c1a
+#define MT6359_LDO_VCN13_MON 0x1c1c
+#define MT6359_LDO_VCN18_CON0 0x1c2a
+#define MT6359_LDO_VCN18_MON 0x1c2c
+#define MT6359_LDO_VA09_CON0 0x1c3a
+#define MT6359_LDO_VA09_MON 0x1c3c
+#define MT6359_LDO_VCAMIO_CON0 0x1c4a
+#define MT6359_LDO_VCAMIO_MON 0x1c4c
+#define MT6359_LDO_VA12_CON0 0x1c5a
+#define MT6359_LDO_VA12_MON 0x1c5c
+#define MT6359_LDO_VAUX18_CON0 0x1c88
+#define MT6359_LDO_VAUX18_MON 0x1c8a
+#define MT6359_LDO_VAUD18_CON0 0x1c98
+#define MT6359_LDO_VAUD18_MON 0x1c9a
+#define MT6359_LDO_VIO18_CON0 0x1ca8
+#define MT6359_LDO_VIO18_MON 0x1caa
+#define MT6359_LDO_VEMC_CON0 0x1cb8
+#define MT6359_LDO_VEMC_MON 0x1cba
+#define MT6359_LDO_VSIM1_CON0 0x1cc8
+#define MT6359_LDO_VSIM1_MON 0x1cca
+#define MT6359_LDO_VSIM2_CON0 0x1cd8
+#define MT6359_LDO_VSIM2_MON 0x1cda
+#define MT6359_LDO_VUSB_CON0 0x1d08
+#define MT6359_LDO_VUSB_MON 0x1d0a
+#define MT6359_LDO_VUSB_MULTI_SW 0x1d18
+#define MT6359_LDO_VRFCK_CON0 0x1d1a
+#define MT6359_LDO_VRFCK_MON 0x1d1c
+#define MT6359_LDO_VBBCK_CON0 0x1d2a
+#define MT6359_LDO_VBBCK_MON 0x1d2c
+#define MT6359_LDO_VBIF28_CON0 0x1d3a
+#define MT6359_LDO_VBIF28_MON 0x1d3c
+#define MT6359_LDO_VIBR_CON0 0x1d4a
+#define MT6359_LDO_VIBR_MON 0x1d4c
+#define MT6359_LDO_VIO28_CON0 0x1d5a
+#define MT6359_LDO_VIO28_MON 0x1d5c
+#define MT6359_LDO_VM18_CON0 0x1d88
+#define MT6359_LDO_VM18_MON 0x1d8a
+#define MT6359_LDO_VUFS_CON0 0x1d98
+#define MT6359_LDO_VUFS_MON 0x1d9a
+#define MT6359_LDO_VSRAM_PROC1_CON0 0x1e88
+#define MT6359_LDO_VSRAM_PROC1_MON 0x1e8a
+#define MT6359_LDO_VSRAM_PROC1_VOSEL1 0x1e8e
+#define MT6359_LDO_VSRAM_PROC2_CON0 0x1ea6
+#define MT6359_LDO_VSRAM_PROC2_MON 0x1ea8
+#define MT6359_LDO_VSRAM_PROC2_VOSEL1 0x1eac
+#define MT6359_LDO_VSRAM_OTHERS_CON0 0x1f08
+#define MT6359_LDO_VSRAM_OTHERS_MON 0x1f0a
+#define MT6359_LDO_VSRAM_OTHERS_VOSEL1 0x1f0e
+#define MT6359_LDO_VSRAM_OTHERS_SSHUB 0x1f26
+#define MT6359_LDO_VSRAM_MD_CON0 0x1f2c
+#define MT6359_LDO_VSRAM_MD_MON 0x1f2e
+#define MT6359_LDO_VSRAM_MD_VOSEL1 0x1f32
+#define MT6359_VFE28_ANA_CON0 0x1f88
+#define MT6359_VAUX18_ANA_CON0 0x1f8c
+#define MT6359_VUSB_ANA_CON0 0x1f90
+#define MT6359_VBIF28_ANA_CON0 0x1f94
+#define MT6359_VCN33_1_ANA_CON0 0x1f98
+#define MT6359_VCN33_2_ANA_CON0 0x1f9c
+#define MT6359_VEMC_ANA_CON0 0x1fa0
+#define MT6359_VSIM1_ANA_CON0 0x1fa4
+#define MT6359_VSIM2_ANA_CON0 0x1fa8
+#define MT6359_VIO28_ANA_CON0 0x1fac
+#define MT6359_VIBR_ANA_CON0 0x1fb0
+#define MT6359_VRF18_ANA_CON0 0x2008
+#define MT6359_VEFUSE_ANA_CON0 0x200c
+#define MT6359_VCN18_ANA_CON0 0x2010
+#define MT6359_VCAMIO_ANA_CON0 0x2014
+#define MT6359_VAUD18_ANA_CON0 0x2018
+#define MT6359_VIO18_ANA_CON0 0x201c
+#define MT6359_VM18_ANA_CON0 0x2020
+#define MT6359_VUFS_ANA_CON0 0x2024
+#define MT6359_VRF12_ANA_CON0 0x202a
+#define MT6359_VCN13_ANA_CON0 0x202e
+#define MT6359_VA09_ANA_CON0 0x2032
+#define MT6359_VA12_ANA_CON0 0x2036
+#define MT6359_VXO22_ANA_CON0 0x2088
+#define MT6359_VRFCK_ANA_CON0 0x208c
+#define MT6359_VBBCK_ANA_CON0 0x2094
+#define MT6359_AUD_TOP_INT_CON0 0x2328
+#define MT6359_AUD_TOP_INT_STATUS0 0x2334
+
+#define MT6359_RG_BUCK_VPU_EN_ADDR MT6359_BUCK_VPU_CON0
+#define MT6359_RG_BUCK_VPU_LP_ADDR MT6359_BUCK_VPU_CON0
+#define MT6359_RG_BUCK_VPU_LP_SHIFT 1
+#define MT6359_DA_VPU_VOSEL_ADDR MT6359_BUCK_VPU_DBG0
+#define MT6359_DA_VPU_VOSEL_MASK 0x7F
+#define MT6359_DA_VPU_VOSEL_SHIFT 0
+#define MT6359_DA_VPU_EN_ADDR MT6359_BUCK_VPU_DBG1
+#define MT6359_RG_BUCK_VPU_VOSEL_ADDR MT6359_BUCK_VPU_ELR0
+#define MT6359_RG_BUCK_VPU_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VPU_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VCORE_EN_ADDR MT6359_BUCK_VCORE_CON0
+#define MT6359_RG_BUCK_VCORE_LP_ADDR MT6359_BUCK_VCORE_CON0
+#define MT6359_RG_BUCK_VCORE_LP_SHIFT 1
+#define MT6359_DA_VCORE_VOSEL_ADDR MT6359_BUCK_VCORE_DBG0
+#define MT6359_DA_VCORE_VOSEL_MASK 0x7F
+#define MT6359_DA_VCORE_VOSEL_SHIFT 0
+#define MT6359_DA_VCORE_EN_ADDR MT6359_BUCK_VCORE_DBG1
+#define MT6359_RG_BUCK_VCORE_SSHUB_EN_ADDR MT6359_BUCK_VCORE_SSHUB_CON0
+#define MT6359_RG_BUCK_VCORE_SSHUB_VOSEL_ADDR MT6359_BUCK_VCORE_SSHUB_CON0
+#define MT6359_RG_BUCK_VCORE_SSHUB_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VCORE_SSHUB_VOSEL_SHIFT 4
+#define MT6359_RG_BUCK_VCORE_VOSEL_ADDR MT6359_BUCK_VCORE_ELR0
+#define MT6359_RG_BUCK_VCORE_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VCORE_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VGPU11_EN_ADDR MT6359_BUCK_VGPU11_CON0
+#define MT6359_RG_BUCK_VGPU11_LP_ADDR MT6359_BUCK_VGPU11_CON0
+#define MT6359_RG_BUCK_VGPU11_LP_SHIFT 1
+#define MT6359_DA_VGPU11_VOSEL_ADDR MT6359_BUCK_VGPU11_DBG0
+#define MT6359_DA_VGPU11_VOSEL_MASK 0x7F
+#define MT6359_DA_VGPU11_VOSEL_SHIFT 0
+#define MT6359_DA_VGPU11_EN_ADDR MT6359_BUCK_VGPU11_DBG1
+#define MT6359_RG_BUCK_VGPU11_VOSEL_ADDR MT6359_BUCK_VGPU11_ELR0
+#define MT6359_RG_BUCK_VGPU11_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VGPU11_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VMODEM_EN_ADDR MT6359_BUCK_VMODEM_CON0
+#define MT6359_RG_BUCK_VMODEM_LP_ADDR MT6359_BUCK_VMODEM_CON0
+#define MT6359_RG_BUCK_VMODEM_LP_SHIFT 1
+#define MT6359_DA_VMODEM_VOSEL_ADDR MT6359_BUCK_VMODEM_DBG0
+#define MT6359_DA_VMODEM_VOSEL_MASK 0x7F
+#define MT6359_DA_VMODEM_VOSEL_SHIFT 0
+#define MT6359_DA_VMODEM_EN_ADDR MT6359_BUCK_VMODEM_DBG1
+#define MT6359_RG_BUCK_VMODEM_VOSEL_ADDR MT6359_BUCK_VMODEM_ELR0
+#define MT6359_RG_BUCK_VMODEM_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VMODEM_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VPROC1_EN_ADDR MT6359_BUCK_VPROC1_CON0
+#define MT6359_RG_BUCK_VPROC1_LP_ADDR MT6359_BUCK_VPROC1_CON0
+#define MT6359_RG_BUCK_VPROC1_LP_SHIFT 1
+#define MT6359_DA_VPROC1_VOSEL_ADDR MT6359_BUCK_VPROC1_DBG0
+#define MT6359_DA_VPROC1_VOSEL_MASK 0x7F
+#define MT6359_DA_VPROC1_VOSEL_SHIFT 0
+#define MT6359_DA_VPROC1_EN_ADDR MT6359_BUCK_VPROC1_DBG1
+#define MT6359_RG_BUCK_VPROC1_VOSEL_ADDR MT6359_BUCK_VPROC1_ELR0
+#define MT6359_RG_BUCK_VPROC1_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VPROC1_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VPROC2_EN_ADDR MT6359_BUCK_VPROC2_CON0
+#define MT6359_RG_BUCK_VPROC2_LP_ADDR MT6359_BUCK_VPROC2_CON0
+#define MT6359_RG_BUCK_VPROC2_LP_SHIFT 1
+#define MT6359_DA_VPROC2_VOSEL_ADDR MT6359_BUCK_VPROC2_DBG0
+#define MT6359_DA_VPROC2_VOSEL_MASK 0x7F
+#define MT6359_DA_VPROC2_VOSEL_SHIFT 0
+#define MT6359_DA_VPROC2_EN_ADDR MT6359_BUCK_VPROC2_DBG1
+#define MT6359_RG_BUCK_VPROC2_VOSEL_ADDR MT6359_BUCK_VPROC2_ELR0
+#define MT6359_RG_BUCK_VPROC2_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VPROC2_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VS1_EN_ADDR MT6359_BUCK_VS1_CON0
+#define MT6359_RG_BUCK_VS1_LP_ADDR MT6359_BUCK_VS1_CON0
+#define MT6359_RG_BUCK_VS1_LP_SHIFT 1
+#define MT6359_DA_VS1_VOSEL_ADDR MT6359_BUCK_VS1_DBG0
+#define MT6359_DA_VS1_VOSEL_MASK 0x7F
+#define MT6359_DA_VS1_VOSEL_SHIFT 0
+#define MT6359_DA_VS1_EN_ADDR MT6359_BUCK_VS1_DBG1
+#define MT6359_RG_BUCK_VS1_VOSEL_ADDR MT6359_BUCK_VS1_ELR0
+#define MT6359_RG_BUCK_VS1_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VS1_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VS2_EN_ADDR MT6359_BUCK_VS2_CON0
+#define MT6359_RG_BUCK_VS2_LP_ADDR MT6359_BUCK_VS2_CON0
+#define MT6359_RG_BUCK_VS2_LP_SHIFT 1
+#define MT6359_DA_VS2_VOSEL_ADDR MT6359_BUCK_VS2_DBG0
+#define MT6359_DA_VS2_VOSEL_MASK 0x7F
+#define MT6359_DA_VS2_VOSEL_SHIFT 0
+#define MT6359_DA_VS2_EN_ADDR MT6359_BUCK_VS2_DBG1
+#define MT6359_RG_BUCK_VS2_VOSEL_ADDR MT6359_BUCK_VS2_ELR0
+#define MT6359_RG_BUCK_VS2_VOSEL_MASK 0x7F
+#define MT6359_RG_BUCK_VS2_VOSEL_SHIFT 0
+#define MT6359_RG_BUCK_VPA_EN_ADDR MT6359_BUCK_VPA_CON0
+#define MT6359_RG_BUCK_VPA_LP_ADDR MT6359_BUCK_VPA_CON0
+#define MT6359_RG_BUCK_VPA_LP_SHIFT 1
+#define MT6359_RG_BUCK_VPA_VOSEL_ADDR MT6359_BUCK_VPA_CON1
+#define MT6359_RG_BUCK_VPA_VOSEL_MASK 0x3F
+#define MT6359_RG_BUCK_VPA_VOSEL_SHIFT 0
+#define MT6359_DA_VPA_VOSEL_ADDR MT6359_BUCK_VPA_DBG0
+#define MT6359_DA_VPA_VOSEL_MASK 0x3F
+#define MT6359_DA_VPA_VOSEL_SHIFT 0
+#define MT6359_DA_VPA_EN_ADDR MT6359_BUCK_VPA_DBG1
+#define MT6359_RG_VGPU11_FCCM_ADDR MT6359_VGPUVCORE_ANA_CON2
+#define MT6359_RG_VGPU11_FCCM_SHIFT 9
+#define MT6359_RG_VCORE_FCCM_ADDR MT6359_VGPUVCORE_ANA_CON13
+#define MT6359_RG_VCORE_FCCM_SHIFT 5
+#define MT6359_RG_VPROC1_FCCM_ADDR MT6359_VPROC1_ANA_CON3
+#define MT6359_RG_VPROC1_FCCM_SHIFT 1
+#define MT6359_RG_VPROC2_FCCM_ADDR MT6359_VPROC2_ANA_CON3
+#define MT6359_RG_VPROC2_FCCM_SHIFT 1
+#define MT6359_RG_VMODEM_FCCM_ADDR MT6359_VMODEM_ANA_CON3
+#define MT6359_RG_VMODEM_FCCM_SHIFT 1
+#define MT6359_RG_VPU_FCCM_ADDR MT6359_VPU_ANA_CON3
+#define MT6359_RG_VPU_FCCM_SHIFT 1
+#define MT6359_RG_VS1_FPWM_ADDR MT6359_VS1_ANA_CON0
+#define MT6359_RG_VS1_FPWM_SHIFT 3
+#define MT6359_RG_VS2_FPWM_ADDR MT6359_VS2_ANA_CON0
+#define MT6359_RG_VS2_FPWM_SHIFT 3
+#define MT6359_RG_VPA_MODESET_ADDR MT6359_VPA_ANA_CON0
+#define MT6359_RG_VPA_MODESET_SHIFT 1
+#define MT6359_RG_LDO_VSRAM_PROC1_VOSEL_ADDR MT6359_LDO_VSRAM_PROC1_ELR
+#define MT6359_RG_LDO_VSRAM_PROC1_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_PROC1_VOSEL_SHIFT 0
+#define MT6359_RG_LDO_VSRAM_PROC2_VOSEL_ADDR MT6359_LDO_VSRAM_PROC2_ELR
+#define MT6359_RG_LDO_VSRAM_PROC2_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_PROC2_VOSEL_SHIFT 0
+#define MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_ADDR MT6359_LDO_VSRAM_OTHERS_ELR
+#define MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_OTHERS_VOSEL_SHIFT 0
+#define MT6359_RG_LDO_VSRAM_MD_VOSEL_ADDR MT6359_LDO_VSRAM_MD_ELR
+#define MT6359_RG_LDO_VSRAM_MD_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_MD_VOSEL_SHIFT 0
+#define MT6359_RG_LDO_VFE28_EN_ADDR MT6359_LDO_VFE28_CON0
+#define MT6359_DA_VFE28_B_EN_ADDR MT6359_LDO_VFE28_MON
+#define MT6359_RG_LDO_VXO22_EN_ADDR MT6359_LDO_VXO22_CON0
+#define MT6359_RG_LDO_VXO22_EN_SHIFT 0
+#define MT6359_DA_VXO22_B_EN_ADDR MT6359_LDO_VXO22_MON
+#define MT6359_RG_LDO_VRF18_EN_ADDR MT6359_LDO_VRF18_CON0
+#define MT6359_RG_LDO_VRF18_EN_SHIFT 0
+#define MT6359_DA_VRF18_B_EN_ADDR MT6359_LDO_VRF18_MON
+#define MT6359_RG_LDO_VRF12_EN_ADDR MT6359_LDO_VRF12_CON0
+#define MT6359_RG_LDO_VRF12_EN_SHIFT 0
+#define MT6359_DA_VRF12_B_EN_ADDR MT6359_LDO_VRF12_MON
+#define MT6359_RG_LDO_VEFUSE_EN_ADDR MT6359_LDO_VEFUSE_CON0
+#define MT6359_RG_LDO_VEFUSE_EN_SHIFT 0
+#define MT6359_DA_VEFUSE_B_EN_ADDR MT6359_LDO_VEFUSE_MON
+#define MT6359_RG_LDO_VCN33_1_EN_0_ADDR MT6359_LDO_VCN33_1_CON0
+#define MT6359_RG_LDO_VCN33_1_EN_0_MASK 0x1
+#define MT6359_RG_LDO_VCN33_1_EN_0_SHIFT 0
+#define MT6359_DA_VCN33_1_B_EN_ADDR MT6359_LDO_VCN33_1_MON
+#define MT6359_RG_LDO_VCN33_1_EN_1_ADDR MT6359_LDO_VCN33_1_MULTI_SW
+#define MT6359_RG_LDO_VCN33_1_EN_1_SHIFT 15
+#define MT6359_RG_LDO_VCN33_2_EN_0_ADDR MT6359_LDO_VCN33_2_CON0
+#define MT6359_RG_LDO_VCN33_2_EN_0_SHIFT 0
+#define MT6359_DA_VCN33_2_B_EN_ADDR MT6359_LDO_VCN33_2_MON
+#define MT6359_RG_LDO_VCN33_2_EN_1_ADDR MT6359_LDO_VCN33_2_MULTI_SW
+#define MT6359_RG_LDO_VCN33_2_EN_1_MASK 0x1
+#define MT6359_RG_LDO_VCN33_2_EN_1_SHIFT 15
+#define MT6359_RG_LDO_VCN13_EN_ADDR MT6359_LDO_VCN13_CON0
+#define MT6359_RG_LDO_VCN13_EN_SHIFT 0
+#define MT6359_DA_VCN13_B_EN_ADDR MT6359_LDO_VCN13_MON
+#define MT6359_RG_LDO_VCN18_EN_ADDR MT6359_LDO_VCN18_CON0
+#define MT6359_DA_VCN18_B_EN_ADDR MT6359_LDO_VCN18_MON
+#define MT6359_RG_LDO_VA09_EN_ADDR MT6359_LDO_VA09_CON0
+#define MT6359_RG_LDO_VA09_EN_SHIFT 0
+#define MT6359_DA_VA09_B_EN_ADDR MT6359_LDO_VA09_MON
+#define MT6359_RG_LDO_VCAMIO_EN_ADDR MT6359_LDO_VCAMIO_CON0
+#define MT6359_RG_LDO_VCAMIO_EN_SHIFT 0
+#define MT6359_DA_VCAMIO_B_EN_ADDR MT6359_LDO_VCAMIO_MON
+#define MT6359_RG_LDO_VA12_EN_ADDR MT6359_LDO_VA12_CON0
+#define MT6359_RG_LDO_VA12_EN_SHIFT 0
+#define MT6359_DA_VA12_B_EN_ADDR MT6359_LDO_VA12_MON
+#define MT6359_RG_LDO_VAUX18_EN_ADDR MT6359_LDO_VAUX18_CON0
+#define MT6359_DA_VAUX18_B_EN_ADDR MT6359_LDO_VAUX18_MON
+#define MT6359_RG_LDO_VAUD18_EN_ADDR MT6359_LDO_VAUD18_CON0
+#define MT6359_DA_VAUD18_B_EN_ADDR MT6359_LDO_VAUD18_MON
+#define MT6359_RG_LDO_VIO18_EN_ADDR MT6359_LDO_VIO18_CON0
+#define MT6359_RG_LDO_VIO18_EN_SHIFT 0
+#define MT6359_DA_VIO18_B_EN_ADDR MT6359_LDO_VIO18_MON
+#define MT6359_RG_LDO_VEMC_EN_ADDR MT6359_LDO_VEMC_CON0
+#define MT6359_RG_LDO_VEMC_EN_SHIFT 0
+#define MT6359_DA_VEMC_B_EN_ADDR MT6359_LDO_VEMC_MON
+#define MT6359_RG_LDO_VSIM1_EN_ADDR MT6359_LDO_VSIM1_CON0
+#define MT6359_RG_LDO_VSIM1_EN_SHIFT 0
+#define MT6359_DA_VSIM1_B_EN_ADDR MT6359_LDO_VSIM1_MON
+#define MT6359_RG_LDO_VSIM2_EN_ADDR MT6359_LDO_VSIM2_CON0
+#define MT6359_RG_LDO_VSIM2_EN_SHIFT 0
+#define MT6359_DA_VSIM2_B_EN_ADDR MT6359_LDO_VSIM2_MON
+#define MT6359_RG_LDO_VUSB_EN_0_ADDR MT6359_LDO_VUSB_CON0
+#define MT6359_RG_LDO_VUSB_EN_0_MASK 0x1
+#define MT6359_RG_LDO_VUSB_EN_0_SHIFT 0
+#define MT6359_DA_VUSB_B_EN_ADDR MT6359_LDO_VUSB_MON
+#define MT6359_RG_LDO_VUSB_EN_1_ADDR MT6359_LDO_VUSB_MULTI_SW
+#define MT6359_RG_LDO_VUSB_EN_1_MASK 0x1
+#define MT6359_RG_LDO_VUSB_EN_1_SHIFT 15
+#define MT6359_RG_LDO_VRFCK_EN_ADDR MT6359_LDO_VRFCK_CON0
+#define MT6359_RG_LDO_VRFCK_EN_SHIFT 0
+#define MT6359_DA_VRFCK_B_EN_ADDR MT6359_LDO_VRFCK_MON
+#define MT6359_RG_LDO_VBBCK_EN_ADDR MT6359_LDO_VBBCK_CON0
+#define MT6359_RG_LDO_VBBCK_EN_SHIFT 0
+#define MT6359_DA_VBBCK_B_EN_ADDR MT6359_LDO_VBBCK_MON
+#define MT6359_RG_LDO_VBIF28_EN_ADDR MT6359_LDO_VBIF28_CON0
+#define MT6359_DA_VBIF28_B_EN_ADDR MT6359_LDO_VBIF28_MON
+#define MT6359_RG_LDO_VIBR_EN_ADDR MT6359_LDO_VIBR_CON0
+#define MT6359_RG_LDO_VIBR_EN_SHIFT 0
+#define MT6359_DA_VIBR_B_EN_ADDR MT6359_LDO_VIBR_MON
+#define MT6359_RG_LDO_VIO28_EN_ADDR MT6359_LDO_VIO28_CON0
+#define MT6359_RG_LDO_VIO28_EN_SHIFT 0
+#define MT6359_DA_VIO28_B_EN_ADDR MT6359_LDO_VIO28_MON
+#define MT6359_RG_LDO_VM18_EN_ADDR MT6359_LDO_VM18_CON0
+#define MT6359_RG_LDO_VM18_EN_SHIFT 0
+#define MT6359_DA_VM18_B_EN_ADDR MT6359_LDO_VM18_MON
+#define MT6359_RG_LDO_VUFS_EN_ADDR MT6359_LDO_VUFS_CON0
+#define MT6359_RG_LDO_VUFS_EN_SHIFT 0
+#define MT6359_DA_VUFS_B_EN_ADDR MT6359_LDO_VUFS_MON
+#define MT6359_RG_LDO_VSRAM_PROC1_EN_ADDR MT6359_LDO_VSRAM_PROC1_CON0
+#define MT6359_DA_VSRAM_PROC1_B_EN_ADDR MT6359_LDO_VSRAM_PROC1_MON
+#define MT6359_DA_VSRAM_PROC1_VOSEL_ADDR MT6359_LDO_VSRAM_PROC1_VOSEL1
+#define MT6359_DA_VSRAM_PROC1_VOSEL_MASK 0x7F
+#define MT6359_DA_VSRAM_PROC1_VOSEL_SHIFT 8
+#define MT6359_RG_LDO_VSRAM_PROC2_EN_ADDR MT6359_LDO_VSRAM_PROC2_CON0
+#define MT6359_DA_VSRAM_PROC2_B_EN_ADDR MT6359_LDO_VSRAM_PROC2_MON
+#define MT6359_DA_VSRAM_PROC2_VOSEL_ADDR MT6359_LDO_VSRAM_PROC2_VOSEL1
+#define MT6359_DA_VSRAM_PROC2_VOSEL_MASK 0x7F
+#define MT6359_DA_VSRAM_PROC2_VOSEL_SHIFT 8
+#define MT6359_RG_LDO_VSRAM_OTHERS_EN_ADDR MT6359_LDO_VSRAM_OTHERS_CON0
+#define MT6359_DA_VSRAM_OTHERS_B_EN_ADDR MT6359_LDO_VSRAM_OTHERS_MON
+#define MT6359_DA_VSRAM_OTHERS_VOSEL_ADDR MT6359_LDO_VSRAM_OTHERS_VOSEL1
+#define MT6359_DA_VSRAM_OTHERS_VOSEL_MASK 0x7F
+#define MT6359_DA_VSRAM_OTHERS_VOSEL_SHIFT 8
+#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_EN_ADDR MT6359_LDO_VSRAM_OTHERS_SSHUB
+#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_ADDR MT6359_LDO_VSRAM_OTHERS_SSHUB
+#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_MASK 0x7F
+#define MT6359_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_SHIFT 1
+#define MT6359_RG_LDO_VSRAM_MD_EN_ADDR MT6359_LDO_VSRAM_MD_CON0
+#define MT6359_DA_VSRAM_MD_B_EN_ADDR MT6359_LDO_VSRAM_MD_MON
+#define MT6359_DA_VSRAM_MD_VOSEL_ADDR MT6359_LDO_VSRAM_MD_VOSEL1
+#define MT6359_DA_VSRAM_MD_VOSEL_MASK 0x7F
+#define MT6359_DA_VSRAM_MD_VOSEL_SHIFT 8
+#define MT6359_RG_VCN33_1_VOSEL_ADDR MT6359_VCN33_1_ANA_CON0
+#define MT6359_RG_VCN33_1_VOSEL_MASK 0xF
+#define MT6359_RG_VCN33_1_VOSEL_SHIFT 8
+#define MT6359_RG_VCN33_2_VOSEL_ADDR MT6359_VCN33_2_ANA_CON0
+#define MT6359_RG_VCN33_2_VOSEL_MASK 0xF
+#define MT6359_RG_VCN33_2_VOSEL_SHIFT 8
+#define MT6359_RG_VEMC_VOSEL_ADDR MT6359_VEMC_ANA_CON0
+#define MT6359_RG_VEMC_VOSEL_MASK 0xF
+#define MT6359_RG_VEMC_VOSEL_SHIFT 8
+#define MT6359_RG_VSIM1_VOSEL_ADDR MT6359_VSIM1_ANA_CON0
+#define MT6359_RG_VSIM1_VOSEL_MASK 0xF
+#define MT6359_RG_VSIM1_VOSEL_SHIFT 8
+#define MT6359_RG_VSIM2_VOSEL_ADDR MT6359_VSIM2_ANA_CON0
+#define MT6359_RG_VSIM2_VOSEL_MASK 0xF
+#define MT6359_RG_VSIM2_VOSEL_SHIFT 8
+#define MT6359_RG_VIO28_VOSEL_ADDR MT6359_VIO28_ANA_CON0
+#define MT6359_RG_VIO28_VOSEL_MASK 0xF
+#define MT6359_RG_VIO28_VOSEL_SHIFT 8
+#define MT6359_RG_VIBR_VOSEL_ADDR MT6359_VIBR_ANA_CON0
+#define MT6359_RG_VIBR_VOSEL_MASK 0xF
+#define MT6359_RG_VIBR_VOSEL_SHIFT 8
+#define MT6359_RG_VRF18_VOSEL_ADDR MT6359_VRF18_ANA_CON0
+#define MT6359_RG_VRF18_VOSEL_MASK 0xF
+#define MT6359_RG_VRF18_VOSEL_SHIFT 8
+#define MT6359_RG_VEFUSE_VOSEL_ADDR MT6359_VEFUSE_ANA_CON0
+#define MT6359_RG_VEFUSE_VOSEL_MASK 0xF
+#define MT6359_RG_VEFUSE_VOSEL_SHIFT 8
+#define MT6359_RG_VCAMIO_VOSEL_ADDR MT6359_VCAMIO_ANA_CON0
+#define MT6359_RG_VCAMIO_VOSEL_MASK 0xF
+#define MT6359_RG_VCAMIO_VOSEL_SHIFT 8
+#define MT6359_RG_VIO18_VOSEL_ADDR MT6359_VIO18_ANA_CON0
+#define MT6359_RG_VIO18_VOSEL_MASK 0xF
+#define MT6359_RG_VIO18_VOSEL_SHIFT 8
+#define MT6359_RG_VM18_VOSEL_ADDR MT6359_VM18_ANA_CON0
+#define MT6359_RG_VM18_VOSEL_MASK 0xF
+#define MT6359_RG_VM18_VOSEL_SHIFT 8
+#define MT6359_RG_VUFS_VOSEL_ADDR MT6359_VUFS_ANA_CON0
+#define MT6359_RG_VUFS_VOSEL_MASK 0xF
+#define MT6359_RG_VUFS_VOSEL_SHIFT 8
+#define MT6359_RG_VRF12_VOSEL_ADDR MT6359_VRF12_ANA_CON0
+#define MT6359_RG_VRF12_VOSEL_MASK 0xF
+#define MT6359_RG_VRF12_VOSEL_SHIFT 8
+#define MT6359_RG_VCN13_VOSEL_ADDR MT6359_VCN13_ANA_CON0
+#define MT6359_RG_VCN13_VOSEL_MASK 0xF
+#define MT6359_RG_VCN13_VOSEL_SHIFT 8
+#define MT6359_RG_VA09_VOSEL_ADDR MT6359_VA09_ANA_CON0
+#define MT6359_RG_VA09_VOSEL_MASK 0xF
+#define MT6359_RG_VA09_VOSEL_SHIFT 8
+#define MT6359_RG_VA12_VOSEL_ADDR MT6359_VA12_ANA_CON0
+#define MT6359_RG_VA12_VOSEL_MASK 0xF
+#define MT6359_RG_VA12_VOSEL_SHIFT 8
+#define MT6359_RG_VXO22_VOSEL_ADDR MT6359_VXO22_ANA_CON0
+#define MT6359_RG_VXO22_VOSEL_MASK 0xF
+#define MT6359_RG_VXO22_VOSEL_SHIFT 8
+#define MT6359_RG_VRFCK_VOSEL_ADDR MT6359_VRFCK_ANA_CON0
+#define MT6359_RG_VRFCK_VOSEL_MASK 0xF
+#define MT6359_RG_VRFCK_VOSEL_SHIFT 8
+#define MT6359_RG_VBBCK_VOSEL_ADDR MT6359_VBBCK_ANA_CON0
+#define MT6359_RG_VBBCK_VOSEL_MASK 0xF
+#define MT6359_RG_VBBCK_VOSEL_SHIFT 8
+
+#endif /* __MFD_MT6359_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6359p/registers.h b/include/linux/mfd/mt6359p/registers.h
new file mode 100644
index 000000000000..3d97c1885171
--- /dev/null
+++ b/include/linux/mfd/mt6359p/registers.h
@@ -0,0 +1,249 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#ifndef __MFD_MT6359P_REGISTERS_H__
+#define __MFD_MT6359P_REGISTERS_H__
+
+#define MT6359P_CHIP_VER 0x5930
+
+/* PMIC Registers */
+#define MT6359P_HWCID 0x8
+#define MT6359P_TOP_TRAP 0x50
+#define MT6359P_TOP_TMA_KEY 0x3a8
+#define MT6359P_BUCK_VCORE_ELR_NUM 0x152a
+#define MT6359P_BUCK_VCORE_ELR0 0x152c
+#define MT6359P_BUCK_VGPU11_SSHUB_CON0 0x15aa
+#define MT6359P_BUCK_VGPU11_ELR0 0x15b4
+#define MT6359P_LDO_VSRAM_PROC1_ELR 0x1b44
+#define MT6359P_LDO_VSRAM_PROC2_ELR 0x1b46
+#define MT6359P_LDO_VSRAM_OTHERS_ELR 0x1b48
+#define MT6359P_LDO_VSRAM_MD_ELR 0x1b4a
+#define MT6359P_LDO_VEMC_ELR_0 0x1b4c
+#define MT6359P_LDO_VFE28_CON0 0x1b88
+#define MT6359P_LDO_VFE28_MON 0x1b8c
+#define MT6359P_LDO_VXO22_CON0 0x1b9a
+#define MT6359P_LDO_VXO22_MON 0x1b9e
+#define MT6359P_LDO_VRF18_CON0 0x1bac
+#define MT6359P_LDO_VRF18_MON 0x1bb0
+#define MT6359P_LDO_VRF12_CON0 0x1bbe
+#define MT6359P_LDO_VRF12_MON 0x1bc2
+#define MT6359P_LDO_VEFUSE_CON0 0x1bd0
+#define MT6359P_LDO_VEFUSE_MON 0x1bd4
+#define MT6359P_LDO_VCN33_1_CON0 0x1be2
+#define MT6359P_LDO_VCN33_1_MON 0x1be6
+#define MT6359P_LDO_VCN33_1_MULTI_SW 0x1bf4
+#define MT6359P_LDO_VCN33_2_CON0 0x1c08
+#define MT6359P_LDO_VCN33_2_MON 0x1c0c
+#define MT6359P_LDO_VCN33_2_MULTI_SW 0x1c1a
+#define MT6359P_LDO_VCN13_CON0 0x1c1c
+#define MT6359P_LDO_VCN13_MON 0x1c20
+#define MT6359P_LDO_VCN18_CON0 0x1c2e
+#define MT6359P_LDO_VCN18_MON 0x1c32
+#define MT6359P_LDO_VA09_CON0 0x1c40
+#define MT6359P_LDO_VA09_MON 0x1c44
+#define MT6359P_LDO_VCAMIO_CON0 0x1c52
+#define MT6359P_LDO_VCAMIO_MON 0x1c56
+#define MT6359P_LDO_VA12_CON0 0x1c64
+#define MT6359P_LDO_VA12_MON 0x1c68
+#define MT6359P_LDO_VAUX18_CON0 0x1c88
+#define MT6359P_LDO_VAUX18_MON 0x1c8c
+#define MT6359P_LDO_VAUD18_CON0 0x1c9a
+#define MT6359P_LDO_VAUD18_MON 0x1c9e
+#define MT6359P_LDO_VIO18_CON0 0x1cac
+#define MT6359P_LDO_VIO18_MON 0x1cb0
+#define MT6359P_LDO_VEMC_CON0 0x1cbe
+#define MT6359P_LDO_VEMC_MON 0x1cc2
+#define MT6359P_LDO_VSIM1_CON0 0x1cd0
+#define MT6359P_LDO_VSIM1_MON 0x1cd4
+#define MT6359P_LDO_VSIM2_CON0 0x1ce2
+#define MT6359P_LDO_VSIM2_MON 0x1ce6
+#define MT6359P_LDO_VUSB_CON0 0x1d08
+#define MT6359P_LDO_VUSB_MON 0x1d0c
+#define MT6359P_LDO_VUSB_MULTI_SW 0x1d1a
+#define MT6359P_LDO_VRFCK_CON0 0x1d1c
+#define MT6359P_LDO_VRFCK_MON 0x1d20
+#define MT6359P_LDO_VBBCK_CON0 0x1d2e
+#define MT6359P_LDO_VBBCK_MON 0x1d32
+#define MT6359P_LDO_VBIF28_CON0 0x1d40
+#define MT6359P_LDO_VBIF28_MON 0x1d44
+#define MT6359P_LDO_VIBR_CON0 0x1d52
+#define MT6359P_LDO_VIBR_MON 0x1d56
+#define MT6359P_LDO_VIO28_CON0 0x1d64
+#define MT6359P_LDO_VIO28_MON 0x1d68
+#define MT6359P_LDO_VM18_CON0 0x1d88
+#define MT6359P_LDO_VM18_MON 0x1d8c
+#define MT6359P_LDO_VUFS_CON0 0x1d9a
+#define MT6359P_LDO_VUFS_MON 0x1d9e
+#define MT6359P_LDO_VSRAM_PROC1_CON0 0x1e88
+#define MT6359P_LDO_VSRAM_PROC1_MON 0x1e8c
+#define MT6359P_LDO_VSRAM_PROC1_VOSEL1 0x1e90
+#define MT6359P_LDO_VSRAM_PROC2_CON0 0x1ea8
+#define MT6359P_LDO_VSRAM_PROC2_MON 0x1eac
+#define MT6359P_LDO_VSRAM_PROC2_VOSEL1 0x1eb0
+#define MT6359P_LDO_VSRAM_OTHERS_CON0 0x1f08
+#define MT6359P_LDO_VSRAM_OTHERS_MON 0x1f0c
+#define MT6359P_LDO_VSRAM_OTHERS_VOSEL1 0x1f10
+#define MT6359P_LDO_VSRAM_OTHERS_SSHUB 0x1f28
+#define MT6359P_LDO_VSRAM_MD_CON0 0x1f2e
+#define MT6359P_LDO_VSRAM_MD_MON 0x1f32
+#define MT6359P_LDO_VSRAM_MD_VOSEL1 0x1f36
+#define MT6359P_VFE28_ANA_CON0 0x1f88
+#define MT6359P_VAUX18_ANA_CON0 0x1f8c
+#define MT6359P_VUSB_ANA_CON0 0x1f90
+#define MT6359P_VBIF28_ANA_CON0 0x1f94
+#define MT6359P_VCN33_1_ANA_CON0 0x1f98
+#define MT6359P_VCN33_2_ANA_CON0 0x1f9c
+#define MT6359P_VEMC_ANA_CON0 0x1fa0
+#define MT6359P_VSIM1_ANA_CON0 0x1fa2
+#define MT6359P_VSIM2_ANA_CON0 0x1fa6
+#define MT6359P_VIO28_ANA_CON0 0x1faa
+#define MT6359P_VIBR_ANA_CON0 0x1fae
+#define MT6359P_VFE28_ELR_4 0x1fc0
+#define MT6359P_VRF18_ANA_CON0 0x2008
+#define MT6359P_VEFUSE_ANA_CON0 0x200c
+#define MT6359P_VCN18_ANA_CON0 0x2010
+#define MT6359P_VCAMIO_ANA_CON0 0x2014
+#define MT6359P_VAUD18_ANA_CON0 0x2018
+#define MT6359P_VIO18_ANA_CON0 0x201c
+#define MT6359P_VM18_ANA_CON0 0x2020
+#define MT6359P_VUFS_ANA_CON0 0x2024
+#define MT6359P_VRF12_ANA_CON0 0x202a
+#define MT6359P_VCN13_ANA_CON0 0x202e
+#define MT6359P_VA09_ANA_CON0 0x2032
+#define MT6359P_VRF18_ELR_3 0x204e
+#define MT6359P_VXO22_ANA_CON0 0x2088
+#define MT6359P_VRFCK_ANA_CON0 0x208c
+#define MT6359P_VBBCK_ANA_CON0 0x2096
+
+#define MT6359P_RG_BUCK_VCORE_VOSEL_ADDR MT6359P_BUCK_VCORE_ELR0
+#define MT6359P_RG_BUCK_VGPU11_SSHUB_EN_ADDR MT6359P_BUCK_VGPU11_SSHUB_CON0
+#define MT6359P_RG_BUCK_VGPU11_VOSEL_ADDR MT6359P_BUCK_VGPU11_ELR0
+#define MT6359P_RG_BUCK_VGPU11_SSHUB_VOSEL_ADDR MT6359P_BUCK_VGPU11_SSHUB_CON0
+#define MT6359P_RG_BUCK_VGPU11_SSHUB_VOSEL_MASK 0x7F
+#define MT6359P_RG_BUCK_VGPU11_SSHUB_VOSEL_SHIFT 4
+#define MT6359P_RG_LDO_VSRAM_PROC1_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC1_ELR
+#define MT6359P_RG_LDO_VSRAM_PROC2_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC2_ELR
+#define MT6359P_RG_LDO_VSRAM_OTHERS_VOSEL_ADDR MT6359P_LDO_VSRAM_OTHERS_ELR
+#define MT6359P_RG_LDO_VSRAM_MD_VOSEL_ADDR MT6359P_LDO_VSRAM_MD_ELR
+#define MT6359P_RG_LDO_VEMC_VOSEL_0_ADDR MT6359P_LDO_VEMC_ELR_0
+#define MT6359P_RG_LDO_VEMC_VOSEL_0_MASK 0xF
+#define MT6359P_RG_LDO_VEMC_VOSEL_0_SHIFT 0
+#define MT6359P_RG_LDO_VFE28_EN_ADDR MT6359P_LDO_VFE28_CON0
+#define MT6359P_DA_VFE28_B_EN_ADDR MT6359P_LDO_VFE28_MON
+#define MT6359P_RG_LDO_VXO22_EN_ADDR MT6359P_LDO_VXO22_CON0
+#define MT6359P_RG_LDO_VXO22_EN_SHIFT 0
+#define MT6359P_DA_VXO22_B_EN_ADDR MT6359P_LDO_VXO22_MON
+#define MT6359P_RG_LDO_VRF18_EN_ADDR MT6359P_LDO_VRF18_CON0
+#define MT6359P_RG_LDO_VRF18_EN_SHIFT 0
+#define MT6359P_DA_VRF18_B_EN_ADDR MT6359P_LDO_VRF18_MON
+#define MT6359P_RG_LDO_VRF12_EN_ADDR MT6359P_LDO_VRF12_CON0
+#define MT6359P_RG_LDO_VRF12_EN_SHIFT 0
+#define MT6359P_DA_VRF12_B_EN_ADDR MT6359P_LDO_VRF12_MON
+#define MT6359P_RG_LDO_VEFUSE_EN_ADDR MT6359P_LDO_VEFUSE_CON0
+#define MT6359P_RG_LDO_VEFUSE_EN_SHIFT 0
+#define MT6359P_DA_VEFUSE_B_EN_ADDR MT6359P_LDO_VEFUSE_MON
+#define MT6359P_RG_LDO_VCN33_1_EN_0_ADDR MT6359P_LDO_VCN33_1_CON0
+#define MT6359P_DA_VCN33_1_B_EN_ADDR MT6359P_LDO_VCN33_1_MON
+#define MT6359P_RG_LDO_VCN33_1_EN_1_ADDR MT6359P_LDO_VCN33_1_MULTI_SW
+#define MT6359P_RG_LDO_VCN33_1_EN_1_SHIFT 15
+#define MT6359P_RG_LDO_VCN33_2_EN_0_ADDR MT6359P_LDO_VCN33_2_CON0
+#define MT6359P_RG_LDO_VCN33_2_EN_0_SHIFT 0
+#define MT6359P_DA_VCN33_2_B_EN_ADDR MT6359P_LDO_VCN33_2_MON
+#define MT6359P_RG_LDO_VCN33_2_EN_1_ADDR MT6359P_LDO_VCN33_2_MULTI_SW
+#define MT6359P_RG_LDO_VCN13_EN_ADDR MT6359P_LDO_VCN13_CON0
+#define MT6359P_RG_LDO_VCN13_EN_SHIFT 0
+#define MT6359P_DA_VCN13_B_EN_ADDR MT6359P_LDO_VCN13_MON
+#define MT6359P_RG_LDO_VCN18_EN_ADDR MT6359P_LDO_VCN18_CON0
+#define MT6359P_DA_VCN18_B_EN_ADDR MT6359P_LDO_VCN18_MON
+#define MT6359P_RG_LDO_VA09_EN_ADDR MT6359P_LDO_VA09_CON0
+#define MT6359P_RG_LDO_VA09_EN_SHIFT 0
+#define MT6359P_DA_VA09_B_EN_ADDR MT6359P_LDO_VA09_MON
+#define MT6359P_RG_LDO_VCAMIO_EN_ADDR MT6359P_LDO_VCAMIO_CON0
+#define MT6359P_RG_LDO_VCAMIO_EN_SHIFT 0
+#define MT6359P_DA_VCAMIO_B_EN_ADDR MT6359P_LDO_VCAMIO_MON
+#define MT6359P_RG_LDO_VA12_EN_ADDR MT6359P_LDO_VA12_CON0
+#define MT6359P_RG_LDO_VA12_EN_SHIFT 0
+#define MT6359P_DA_VA12_B_EN_ADDR MT6359P_LDO_VA12_MON
+#define MT6359P_RG_LDO_VAUX18_EN_ADDR MT6359P_LDO_VAUX18_CON0
+#define MT6359P_DA_VAUX18_B_EN_ADDR MT6359P_LDO_VAUX18_MON
+#define MT6359P_RG_LDO_VAUD18_EN_ADDR MT6359P_LDO_VAUD18_CON0
+#define MT6359P_DA_VAUD18_B_EN_ADDR MT6359P_LDO_VAUD18_MON
+#define MT6359P_RG_LDO_VIO18_EN_ADDR MT6359P_LDO_VIO18_CON0
+#define MT6359P_RG_LDO_VIO18_EN_SHIFT 0
+#define MT6359P_DA_VIO18_B_EN_ADDR MT6359P_LDO_VIO18_MON
+#define MT6359P_RG_LDO_VEMC_EN_ADDR MT6359P_LDO_VEMC_CON0
+#define MT6359P_RG_LDO_VEMC_EN_SHIFT 0
+#define MT6359P_DA_VEMC_B_EN_ADDR MT6359P_LDO_VEMC_MON
+#define MT6359P_RG_LDO_VSIM1_EN_ADDR MT6359P_LDO_VSIM1_CON0
+#define MT6359P_RG_LDO_VSIM1_EN_SHIFT 0
+#define MT6359P_DA_VSIM1_B_EN_ADDR MT6359P_LDO_VSIM1_MON
+#define MT6359P_RG_LDO_VSIM2_EN_ADDR MT6359P_LDO_VSIM2_CON0
+#define MT6359P_RG_LDO_VSIM2_EN_SHIFT 0
+#define MT6359P_DA_VSIM2_B_EN_ADDR MT6359P_LDO_VSIM2_MON
+#define MT6359P_RG_LDO_VUSB_EN_0_ADDR MT6359P_LDO_VUSB_CON0
+#define MT6359P_DA_VUSB_B_EN_ADDR MT6359P_LDO_VUSB_MON
+#define MT6359P_RG_LDO_VUSB_EN_1_ADDR MT6359P_LDO_VUSB_MULTI_SW
+#define MT6359P_RG_LDO_VRFCK_EN_ADDR MT6359P_LDO_VRFCK_CON0
+#define MT6359P_RG_LDO_VRFCK_EN_SHIFT 0
+#define MT6359P_DA_VRFCK_B_EN_ADDR MT6359P_LDO_VRFCK_MON
+#define MT6359P_RG_LDO_VBBCK_EN_ADDR MT6359P_LDO_VBBCK_CON0
+#define MT6359P_RG_LDO_VBBCK_EN_SHIFT 0
+#define MT6359P_DA_VBBCK_B_EN_ADDR MT6359P_LDO_VBBCK_MON
+#define MT6359P_RG_LDO_VBIF28_EN_ADDR MT6359P_LDO_VBIF28_CON0
+#define MT6359P_DA_VBIF28_B_EN_ADDR MT6359P_LDO_VBIF28_MON
+#define MT6359P_RG_LDO_VIBR_EN_ADDR MT6359P_LDO_VIBR_CON0
+#define MT6359P_RG_LDO_VIBR_EN_SHIFT 0
+#define MT6359P_DA_VIBR_B_EN_ADDR MT6359P_LDO_VIBR_MON
+#define MT6359P_RG_LDO_VIO28_EN_ADDR MT6359P_LDO_VIO28_CON0
+#define MT6359P_RG_LDO_VIO28_EN_SHIFT 0
+#define MT6359P_DA_VIO28_B_EN_ADDR MT6359P_LDO_VIO28_MON
+#define MT6359P_RG_LDO_VM18_EN_ADDR MT6359P_LDO_VM18_CON0
+#define MT6359P_RG_LDO_VM18_EN_SHIFT 0
+#define MT6359P_DA_VM18_B_EN_ADDR MT6359P_LDO_VM18_MON
+#define MT6359P_RG_LDO_VUFS_EN_ADDR MT6359P_LDO_VUFS_CON0
+#define MT6359P_RG_LDO_VUFS_EN_SHIFT 0
+#define MT6359P_DA_VUFS_B_EN_ADDR MT6359P_LDO_VUFS_MON
+#define MT6359P_RG_LDO_VSRAM_PROC1_EN_ADDR MT6359P_LDO_VSRAM_PROC1_CON0
+#define MT6359P_DA_VSRAM_PROC1_B_EN_ADDR MT6359P_LDO_VSRAM_PROC1_MON
+#define MT6359P_DA_VSRAM_PROC1_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC1_VOSEL1
+#define MT6359P_RG_LDO_VSRAM_PROC2_EN_ADDR MT6359P_LDO_VSRAM_PROC2_CON0
+#define MT6359P_DA_VSRAM_PROC2_B_EN_ADDR MT6359P_LDO_VSRAM_PROC2_MON
+#define MT6359P_DA_VSRAM_PROC2_VOSEL_ADDR MT6359P_LDO_VSRAM_PROC2_VOSEL1
+#define MT6359P_RG_LDO_VSRAM_OTHERS_EN_ADDR MT6359P_LDO_VSRAM_OTHERS_CON0
+#define MT6359P_DA_VSRAM_OTHERS_B_EN_ADDR MT6359P_LDO_VSRAM_OTHERS_MON
+#define MT6359P_DA_VSRAM_OTHERS_VOSEL_ADDR MT6359P_LDO_VSRAM_OTHERS_VOSEL1
+#define MT6359P_RG_LDO_VSRAM_OTHERS_SSHUB_EN_ADDR MT6359P_LDO_VSRAM_OTHERS_SSHUB
+#define MT6359P_RG_LDO_VSRAM_OTHERS_SSHUB_VOSEL_ADDR MT6359P_LDO_VSRAM_OTHERS_SSHUB
+#define MT6359P_RG_LDO_VSRAM_MD_EN_ADDR MT6359P_LDO_VSRAM_MD_CON0
+#define MT6359P_DA_VSRAM_MD_B_EN_ADDR MT6359P_LDO_VSRAM_MD_MON
+#define MT6359P_DA_VSRAM_MD_VOSEL_ADDR MT6359P_LDO_VSRAM_MD_VOSEL1
+#define MT6359P_RG_VCN33_1_VOSEL_ADDR MT6359P_VCN33_1_ANA_CON0
+#define MT6359P_RG_VCN33_2_VOSEL_ADDR MT6359P_VCN33_2_ANA_CON0
+#define MT6359P_RG_VEMC_VOSEL_ADDR MT6359P_VEMC_ANA_CON0
+#define MT6359P_RG_VSIM1_VOSEL_ADDR MT6359P_VSIM1_ANA_CON0
+#define MT6359P_RG_VSIM2_VOSEL_ADDR MT6359P_VSIM2_ANA_CON0
+#define MT6359P_RG_VIO28_VOSEL_ADDR MT6359P_VIO28_ANA_CON0
+#define MT6359P_RG_VIBR_VOSEL_ADDR MT6359P_VIBR_ANA_CON0
+#define MT6359P_RG_VRF18_VOSEL_ADDR MT6359P_VRF18_ANA_CON0
+#define MT6359P_RG_VEFUSE_VOSEL_ADDR MT6359P_VEFUSE_ANA_CON0
+#define MT6359P_RG_VCAMIO_VOSEL_ADDR MT6359P_VCAMIO_ANA_CON0
+#define MT6359P_RG_VIO18_VOSEL_ADDR MT6359P_VIO18_ANA_CON0
+#define MT6359P_RG_VM18_VOSEL_ADDR MT6359P_VM18_ANA_CON0
+#define MT6359P_RG_VUFS_VOSEL_ADDR MT6359P_VUFS_ANA_CON0
+#define MT6359P_RG_VRF12_VOSEL_ADDR MT6359P_VRF12_ANA_CON0
+#define MT6359P_RG_VCN13_VOSEL_ADDR MT6359P_VCN13_ANA_CON0
+#define MT6359P_RG_VA09_VOSEL_ADDR MT6359P_VRF18_ELR_3
+#define MT6359P_RG_VA12_VOSEL_ADDR MT6359P_VFE28_ELR_4
+#define MT6359P_RG_VXO22_VOSEL_ADDR MT6359P_VXO22_ANA_CON0
+#define MT6359P_RG_VRFCK_VOSEL_ADDR MT6359P_VRFCK_ANA_CON0
+#define MT6359P_RG_VBBCK_VOSEL_ADDR MT6359P_VBBCK_ANA_CON0
+#define MT6359P_RG_VBBCK_VOSEL_MASK 0xF
+#define MT6359P_RG_VBBCK_VOSEL_SHIFT 4
+#define MT6359P_VM_MODE_ADDR MT6359P_TOP_TRAP
+#define MT6359P_TMA_KEY_ADDR MT6359P_TOP_TMA_KEY
+
+#define TMA_KEY 0x9CA6
+
+#endif /* __MFD_MT6359P_REGISTERS_H__ */
diff --git a/include/linux/mfd/mt6360.h b/include/linux/mfd/mt6360.h
deleted file mode 100644
index ea1304035d4d..000000000000
--- a/include/linux/mfd/mt6360.h
+++ /dev/null
@@ -1,240 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2020 MediaTek Inc.
- */
-
-#ifndef __MT6360_H__
-#define __MT6360_H__
-
-#include <linux/regmap.h>
-
-enum {
- MT6360_SLAVE_PMU = 0,
- MT6360_SLAVE_PMIC,
- MT6360_SLAVE_LDO,
- MT6360_SLAVE_TCPC,
- MT6360_SLAVE_MAX,
-};
-
-#define MT6360_PMU_SLAVEID (0x34)
-#define MT6360_PMIC_SLAVEID (0x1A)
-#define MT6360_LDO_SLAVEID (0x64)
-#define MT6360_TCPC_SLAVEID (0x4E)
-
-struct mt6360_pmu_data {
- struct i2c_client *i2c[MT6360_SLAVE_MAX];
- struct device *dev;
- struct regmap *regmap;
- struct regmap_irq_chip_data *irq_data;
- unsigned int chip_rev;
-};
-
-/* PMU register defininition */
-#define MT6360_PMU_DEV_INFO (0x00)
-#define MT6360_PMU_CORE_CTRL1 (0x01)
-#define MT6360_PMU_RST1 (0x02)
-#define MT6360_PMU_CRCEN (0x03)
-#define MT6360_PMU_RST_PAS_CODE1 (0x04)
-#define MT6360_PMU_RST_PAS_CODE2 (0x05)
-#define MT6360_PMU_CORE_CTRL2 (0x06)
-#define MT6360_PMU_TM_PAS_CODE1 (0x07)
-#define MT6360_PMU_TM_PAS_CODE2 (0x08)
-#define MT6360_PMU_TM_PAS_CODE3 (0x09)
-#define MT6360_PMU_TM_PAS_CODE4 (0x0A)
-#define MT6360_PMU_IRQ_IND (0x0B)
-#define MT6360_PMU_IRQ_MASK (0x0C)
-#define MT6360_PMU_IRQ_SET (0x0D)
-#define MT6360_PMU_SHDN_CTRL (0x0E)
-#define MT6360_PMU_TM_INF (0x0F)
-#define MT6360_PMU_I2C_CTRL (0x10)
-#define MT6360_PMU_CHG_CTRL1 (0x11)
-#define MT6360_PMU_CHG_CTRL2 (0x12)
-#define MT6360_PMU_CHG_CTRL3 (0x13)
-#define MT6360_PMU_CHG_CTRL4 (0x14)
-#define MT6360_PMU_CHG_CTRL5 (0x15)
-#define MT6360_PMU_CHG_CTRL6 (0x16)
-#define MT6360_PMU_CHG_CTRL7 (0x17)
-#define MT6360_PMU_CHG_CTRL8 (0x18)
-#define MT6360_PMU_CHG_CTRL9 (0x19)
-#define MT6360_PMU_CHG_CTRL10 (0x1A)
-#define MT6360_PMU_CHG_CTRL11 (0x1B)
-#define MT6360_PMU_CHG_CTRL12 (0x1C)
-#define MT6360_PMU_CHG_CTRL13 (0x1D)
-#define MT6360_PMU_CHG_CTRL14 (0x1E)
-#define MT6360_PMU_CHG_CTRL15 (0x1F)
-#define MT6360_PMU_CHG_CTRL16 (0x20)
-#define MT6360_PMU_CHG_AICC_RESULT (0x21)
-#define MT6360_PMU_DEVICE_TYPE (0x22)
-#define MT6360_PMU_QC_CONTROL1 (0x23)
-#define MT6360_PMU_QC_CONTROL2 (0x24)
-#define MT6360_PMU_QC30_CONTROL1 (0x25)
-#define MT6360_PMU_QC30_CONTROL2 (0x26)
-#define MT6360_PMU_USB_STATUS1 (0x27)
-#define MT6360_PMU_QC_STATUS1 (0x28)
-#define MT6360_PMU_QC_STATUS2 (0x29)
-#define MT6360_PMU_CHG_PUMP (0x2A)
-#define MT6360_PMU_CHG_CTRL17 (0x2B)
-#define MT6360_PMU_CHG_CTRL18 (0x2C)
-#define MT6360_PMU_CHRDET_CTRL1 (0x2D)
-#define MT6360_PMU_CHRDET_CTRL2 (0x2E)
-#define MT6360_PMU_DPDN_CTRL (0x2F)
-#define MT6360_PMU_CHG_HIDDEN_CTRL1 (0x30)
-#define MT6360_PMU_CHG_HIDDEN_CTRL2 (0x31)
-#define MT6360_PMU_CHG_HIDDEN_CTRL3 (0x32)
-#define MT6360_PMU_CHG_HIDDEN_CTRL4 (0x33)
-#define MT6360_PMU_CHG_HIDDEN_CTRL5 (0x34)
-#define MT6360_PMU_CHG_HIDDEN_CTRL6 (0x35)
-#define MT6360_PMU_CHG_HIDDEN_CTRL7 (0x36)
-#define MT6360_PMU_CHG_HIDDEN_CTRL8 (0x37)
-#define MT6360_PMU_CHG_HIDDEN_CTRL9 (0x38)
-#define MT6360_PMU_CHG_HIDDEN_CTRL10 (0x39)
-#define MT6360_PMU_CHG_HIDDEN_CTRL11 (0x3A)
-#define MT6360_PMU_CHG_HIDDEN_CTRL12 (0x3B)
-#define MT6360_PMU_CHG_HIDDEN_CTRL13 (0x3C)
-#define MT6360_PMU_CHG_HIDDEN_CTRL14 (0x3D)
-#define MT6360_PMU_CHG_HIDDEN_CTRL15 (0x3E)
-#define MT6360_PMU_CHG_HIDDEN_CTRL16 (0x3F)
-#define MT6360_PMU_CHG_HIDDEN_CTRL17 (0x40)
-#define MT6360_PMU_CHG_HIDDEN_CTRL18 (0x41)
-#define MT6360_PMU_CHG_HIDDEN_CTRL19 (0x42)
-#define MT6360_PMU_CHG_HIDDEN_CTRL20 (0x43)
-#define MT6360_PMU_CHG_HIDDEN_CTRL21 (0x44)
-#define MT6360_PMU_CHG_HIDDEN_CTRL22 (0x45)
-#define MT6360_PMU_CHG_HIDDEN_CTRL23 (0x46)
-#define MT6360_PMU_CHG_HIDDEN_CTRL24 (0x47)
-#define MT6360_PMU_CHG_HIDDEN_CTRL25 (0x48)
-#define MT6360_PMU_BC12_CTRL (0x49)
-#define MT6360_PMU_CHG_STAT (0x4A)
-#define MT6360_PMU_RESV1 (0x4B)
-#define MT6360_PMU_TYPEC_OTP_TH_SEL_CODEH (0x4E)
-#define MT6360_PMU_TYPEC_OTP_TH_SEL_CODEL (0x4F)
-#define MT6360_PMU_TYPEC_OTP_HYST_TH (0x50)
-#define MT6360_PMU_TYPEC_OTP_CTRL (0x51)
-#define MT6360_PMU_ADC_BAT_DATA_H (0x52)
-#define MT6360_PMU_ADC_BAT_DATA_L (0x53)
-#define MT6360_PMU_IMID_BACKBST_ON (0x54)
-#define MT6360_PMU_IMID_BACKBST_OFF (0x55)
-#define MT6360_PMU_ADC_CONFIG (0x56)
-#define MT6360_PMU_ADC_EN2 (0x57)
-#define MT6360_PMU_ADC_IDLE_T (0x58)
-#define MT6360_PMU_ADC_RPT_1 (0x5A)
-#define MT6360_PMU_ADC_RPT_2 (0x5B)
-#define MT6360_PMU_ADC_RPT_3 (0x5C)
-#define MT6360_PMU_ADC_RPT_ORG1 (0x5D)
-#define MT6360_PMU_ADC_RPT_ORG2 (0x5E)
-#define MT6360_PMU_BAT_OVP_TH_SEL_CODEH (0x5F)
-#define MT6360_PMU_BAT_OVP_TH_SEL_CODEL (0x60)
-#define MT6360_PMU_CHG_CTRL19 (0x61)
-#define MT6360_PMU_VDDASUPPLY (0x62)
-#define MT6360_PMU_BC12_MANUAL (0x63)
-#define MT6360_PMU_CHGDET_FUNC (0x64)
-#define MT6360_PMU_FOD_CTRL (0x65)
-#define MT6360_PMU_CHG_CTRL20 (0x66)
-#define MT6360_PMU_CHG_HIDDEN_CTRL26 (0x67)
-#define MT6360_PMU_CHG_HIDDEN_CTRL27 (0x68)
-#define MT6360_PMU_RESV2 (0x69)
-#define MT6360_PMU_USBID_CTRL1 (0x6D)
-#define MT6360_PMU_USBID_CTRL2 (0x6E)
-#define MT6360_PMU_USBID_CTRL3 (0x6F)
-#define MT6360_PMU_FLED_CFG (0x70)
-#define MT6360_PMU_RESV3 (0x71)
-#define MT6360_PMU_FLED1_CTRL (0x72)
-#define MT6360_PMU_FLED_STRB_CTRL (0x73)
-#define MT6360_PMU_FLED1_STRB_CTRL2 (0x74)
-#define MT6360_PMU_FLED1_TOR_CTRL (0x75)
-#define MT6360_PMU_FLED2_CTRL (0x76)
-#define MT6360_PMU_RESV4 (0x77)
-#define MT6360_PMU_FLED2_STRB_CTRL2 (0x78)
-#define MT6360_PMU_FLED2_TOR_CTRL (0x79)
-#define MT6360_PMU_FLED_VMIDTRK_CTRL1 (0x7A)
-#define MT6360_PMU_FLED_VMID_RTM (0x7B)
-#define MT6360_PMU_FLED_VMIDTRK_CTRL2 (0x7C)
-#define MT6360_PMU_FLED_PWSEL (0x7D)
-#define MT6360_PMU_FLED_EN (0x7E)
-#define MT6360_PMU_FLED_Hidden1 (0x7F)
-#define MT6360_PMU_RGB_EN (0x80)
-#define MT6360_PMU_RGB1_ISNK (0x81)
-#define MT6360_PMU_RGB2_ISNK (0x82)
-#define MT6360_PMU_RGB3_ISNK (0x83)
-#define MT6360_PMU_RGB_ML_ISNK (0x84)
-#define MT6360_PMU_RGB1_DIM (0x85)
-#define MT6360_PMU_RGB2_DIM (0x86)
-#define MT6360_PMU_RGB3_DIM (0x87)
-#define MT6360_PMU_RESV5 (0x88)
-#define MT6360_PMU_RGB12_Freq (0x89)
-#define MT6360_PMU_RGB34_Freq (0x8A)
-#define MT6360_PMU_RGB1_Tr (0x8B)
-#define MT6360_PMU_RGB1_Tf (0x8C)
-#define MT6360_PMU_RGB1_TON_TOFF (0x8D)
-#define MT6360_PMU_RGB2_Tr (0x8E)
-#define MT6360_PMU_RGB2_Tf (0x8F)
-#define MT6360_PMU_RGB2_TON_TOFF (0x90)
-#define MT6360_PMU_RGB3_Tr (0x91)
-#define MT6360_PMU_RGB3_Tf (0x92)
-#define MT6360_PMU_RGB3_TON_TOFF (0x93)
-#define MT6360_PMU_RGB_Hidden_CTRL1 (0x94)
-#define MT6360_PMU_RGB_Hidden_CTRL2 (0x95)
-#define MT6360_PMU_RESV6 (0x97)
-#define MT6360_PMU_SPARE1 (0x9A)
-#define MT6360_PMU_SPARE2 (0xA0)
-#define MT6360_PMU_SPARE3 (0xB0)
-#define MT6360_PMU_SPARE4 (0xC0)
-#define MT6360_PMU_CHG_IRQ1 (0xD0)
-#define MT6360_PMU_CHG_IRQ2 (0xD1)
-#define MT6360_PMU_CHG_IRQ3 (0xD2)
-#define MT6360_PMU_CHG_IRQ4 (0xD3)
-#define MT6360_PMU_CHG_IRQ5 (0xD4)
-#define MT6360_PMU_CHG_IRQ6 (0xD5)
-#define MT6360_PMU_QC_IRQ (0xD6)
-#define MT6360_PMU_FOD_IRQ (0xD7)
-#define MT6360_PMU_BASE_IRQ (0xD8)
-#define MT6360_PMU_FLED_IRQ1 (0xD9)
-#define MT6360_PMU_FLED_IRQ2 (0xDA)
-#define MT6360_PMU_RGB_IRQ (0xDB)
-#define MT6360_PMU_BUCK1_IRQ (0xDC)
-#define MT6360_PMU_BUCK2_IRQ (0xDD)
-#define MT6360_PMU_LDO_IRQ1 (0xDE)
-#define MT6360_PMU_LDO_IRQ2 (0xDF)
-#define MT6360_PMU_CHG_STAT1 (0xE0)
-#define MT6360_PMU_CHG_STAT2 (0xE1)
-#define MT6360_PMU_CHG_STAT3 (0xE2)
-#define MT6360_PMU_CHG_STAT4 (0xE3)
-#define MT6360_PMU_CHG_STAT5 (0xE4)
-#define MT6360_PMU_CHG_STAT6 (0xE5)
-#define MT6360_PMU_QC_STAT (0xE6)
-#define MT6360_PMU_FOD_STAT (0xE7)
-#define MT6360_PMU_BASE_STAT (0xE8)
-#define MT6360_PMU_FLED_STAT1 (0xE9)
-#define MT6360_PMU_FLED_STAT2 (0xEA)
-#define MT6360_PMU_RGB_STAT (0xEB)
-#define MT6360_PMU_BUCK1_STAT (0xEC)
-#define MT6360_PMU_BUCK2_STAT (0xED)
-#define MT6360_PMU_LDO_STAT1 (0xEE)
-#define MT6360_PMU_LDO_STAT2 (0xEF)
-#define MT6360_PMU_CHG_MASK1 (0xF0)
-#define MT6360_PMU_CHG_MASK2 (0xF1)
-#define MT6360_PMU_CHG_MASK3 (0xF2)
-#define MT6360_PMU_CHG_MASK4 (0xF3)
-#define MT6360_PMU_CHG_MASK5 (0xF4)
-#define MT6360_PMU_CHG_MASK6 (0xF5)
-#define MT6360_PMU_QC_MASK (0xF6)
-#define MT6360_PMU_FOD_MASK (0xF7)
-#define MT6360_PMU_BASE_MASK (0xF8)
-#define MT6360_PMU_FLED_MASK1 (0xF9)
-#define MT6360_PMU_FLED_MASK2 (0xFA)
-#define MT6360_PMU_FAULTB_MASK (0xFB)
-#define MT6360_PMU_BUCK1_MASK (0xFC)
-#define MT6360_PMU_BUCK2_MASK (0xFD)
-#define MT6360_PMU_LDO_MASK1 (0xFE)
-#define MT6360_PMU_LDO_MASK2 (0xFF)
-#define MT6360_PMU_MAXREG (MT6360_PMU_LDO_MASK2)
-
-/* MT6360_PMU_IRQ_SET */
-#define MT6360_PMU_IRQ_REGNUM (MT6360_PMU_LDO_IRQ2 - MT6360_PMU_CHG_IRQ1 + 1)
-#define MT6360_IRQ_RETRIG BIT(2)
-
-#define CHIP_VEN_MASK (0xF0)
-#define CHIP_VEN_MT6360 (0x50)
-#define CHIP_REV_MASK (0x0F)
-
-#endif /* __MT6360_H__ */
diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h
index 949268581b36..b774c3a4bb62 100644
--- a/include/linux/mfd/mt6397/core.h
+++ b/include/linux/mfd/mt6397/core.h
@@ -12,7 +12,13 @@
enum chip_id {
MT6323_CHIP_ID = 0x23,
+ MT6328_CHIP_ID = 0x30,
+ MT6331_CHIP_ID = 0x20,
+ MT6332_CHIP_ID = 0x20,
+ MT6357_CHIP_ID = 0x57,
MT6358_CHIP_ID = 0x58,
+ MT6359_CHIP_ID = 0x59,
+ MT6366_CHIP_ID = 0x66,
MT6391_CHIP_ID = 0x91,
MT6397_CHIP_ID = 0x97,
};
@@ -60,11 +66,11 @@ struct mt6397_chip {
int irq;
struct irq_domain *irq_domain;
struct mutex irqlock;
- u16 wake_mask[2];
- u16 irq_masks_cur[2];
- u16 irq_masks_cache[2];
- u16 int_con[2];
- u16 int_status[2];
+ u16 wake_mask[3];
+ u16 irq_masks_cur[3];
+ u16 irq_masks_cache[3];
+ u16 int_con[3];
+ u16 int_status[3];
u16 chip_id;
void *irq_data;
};
diff --git a/include/linux/mfd/mt6397/rtc.h b/include/linux/mfd/mt6397/rtc.h
index c3748b53bf7d..27883af44f87 100644
--- a/include/linux/mfd/mt6397/rtc.h
+++ b/include/linux/mfd/mt6397/rtc.h
@@ -36,6 +36,7 @@
#define RTC_AL_MASK_DOW BIT(4)
#define RTC_TC_SEC 0x000a
+#define RTC_TC_MTH_MASK 0x000f
/* Min, Hour, Dom... register offset to RTC_TC_SEC */
#define RTC_OFFSET_SEC 0
#define RTC_OFFSET_MIN 1
@@ -59,11 +60,6 @@
#define RTC_PDN2 0x002e
#define RTC_PDN2_PWRON_ALARM BIT(4)
-#define RTC_MIN_YEAR 1968
-#define RTC_BASE_YEAR 1900
-#define RTC_NUM_YEARS 128
-#define RTC_MIN_YEAR_OFFSET (RTC_MIN_YEAR - RTC_BASE_YEAR)
-
#define MTK_RTC_POLL_DELAY_US 10
#define MTK_RTC_POLL_TIMEOUT (jiffies_to_usecs(HZ))
diff --git a/include/linux/mfd/nct6694.h b/include/linux/mfd/nct6694.h
new file mode 100644
index 000000000000..6eb9be2cd4a0
--- /dev/null
+++ b/include/linux/mfd/nct6694.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025 Nuvoton Technology Corp.
+ *
+ * Nuvoton NCT6694 USB transaction and data structure.
+ */
+
+#ifndef __MFD_NCT6694_H
+#define __MFD_NCT6694_H
+
+#define NCT6694_VENDOR_ID 0x0416
+#define NCT6694_PRODUCT_ID 0x200B
+#define NCT6694_INT_IN_EP 0x81
+#define NCT6694_BULK_IN_EP 0x02
+#define NCT6694_BULK_OUT_EP 0x03
+
+#define NCT6694_HCTRL_SET 0x40
+#define NCT6694_HCTRL_GET 0x80
+
+#define NCT6694_URB_TIMEOUT 1000
+
+enum nct6694_irq_id {
+ NCT6694_IRQ_GPIO0 = 0,
+ NCT6694_IRQ_GPIO1,
+ NCT6694_IRQ_GPIO2,
+ NCT6694_IRQ_GPIO3,
+ NCT6694_IRQ_GPIO4,
+ NCT6694_IRQ_GPIO5,
+ NCT6694_IRQ_GPIO6,
+ NCT6694_IRQ_GPIO7,
+ NCT6694_IRQ_GPIO8,
+ NCT6694_IRQ_GPIO9,
+ NCT6694_IRQ_GPIOA,
+ NCT6694_IRQ_GPIOB,
+ NCT6694_IRQ_GPIOC,
+ NCT6694_IRQ_GPIOD,
+ NCT6694_IRQ_GPIOE,
+ NCT6694_IRQ_GPIOF,
+ NCT6694_IRQ_CAN0,
+ NCT6694_IRQ_CAN1,
+ NCT6694_IRQ_RTC,
+ NCT6694_NR_IRQS,
+};
+
+enum nct6694_response_err_status {
+ NCT6694_NO_ERROR = 0,
+ NCT6694_FORMAT_ERROR,
+ NCT6694_RESERVED1,
+ NCT6694_RESERVED2,
+ NCT6694_NOT_SUPPORT_ERROR,
+ NCT6694_NO_RESPONSE_ERROR,
+ NCT6694_TIMEOUT_ERROR,
+ NCT6694_PENDING,
+};
+
+struct __packed nct6694_cmd_header {
+ u8 rsv1;
+ u8 mod;
+ union __packed {
+ __le16 offset;
+ struct __packed {
+ u8 cmd;
+ u8 sel;
+ };
+ };
+ u8 hctrl;
+ u8 rsv2;
+ __le16 len;
+};
+
+struct __packed nct6694_response_header {
+ u8 sequence_id;
+ u8 sts;
+ u8 reserved[4];
+ __le16 len;
+};
+
+union __packed nct6694_usb_msg {
+ struct nct6694_cmd_header cmd_header;
+ struct nct6694_response_header response_header;
+};
+
+struct nct6694 {
+ struct device *dev;
+ struct ida gpio_ida;
+ struct ida i2c_ida;
+ struct ida canfd_ida;
+ struct ida wdt_ida;
+ struct irq_domain *domain;
+ struct mutex access_lock;
+ spinlock_t irq_lock;
+ struct urb *int_in_urb;
+ struct usb_device *udev;
+ union nct6694_usb_msg *usb_msg;
+ __le32 *int_buffer;
+ unsigned int irq_enable;
+};
+
+int nct6694_read_msg(struct nct6694 *nct6694, const struct nct6694_cmd_header *cmd_hd, void *buf);
+int nct6694_write_msg(struct nct6694 *nct6694, const struct nct6694_cmd_header *cmd_hd, void *buf);
+
+#endif
diff --git a/include/linux/mfd/ntxec.h b/include/linux/mfd/ntxec.h
index 26ab3b8eb612..e5880c346da9 100644
--- a/include/linux/mfd/ntxec.h
+++ b/include/linux/mfd/ntxec.h
@@ -26,7 +26,7 @@ struct ntxec {
* This convenience function converts an 8-bit value to 16-bit for use in the
* second kind of register.
*/
-static inline __be16 ntxec_reg8(u8 value)
+static inline u16 ntxec_reg8(u8 value)
{
return value << 8;
}
@@ -34,5 +34,5 @@ static inline __be16 ntxec_reg8(u8 value)
/* Known firmware versions */
#define NTXEC_VERSION_KOBO_AURA 0xd726 /* found in Kobo Aura */
#define NTXEC_VERSION_TOLINO_SHINE2 0xf110 /* found in Tolino Shine 2 HD */
-
+#define NTXEC_VERSION_TOLINO_VISION 0xe135 /* found in Tolino Vision, contains RTC, ADC, PWM, home pad */
#endif
diff --git a/include/linux/mfd/ocelot.h b/include/linux/mfd/ocelot.h
new file mode 100644
index 000000000000..dd72073d2d4f
--- /dev/null
+++ b/include/linux/mfd/ocelot.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2022 Innovative Advantage Inc. */
+
+#ifndef _LINUX_MFD_OCELOT_H
+#define _LINUX_MFD_OCELOT_H
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+struct resource;
+
+static inline struct regmap *
+ocelot_regmap_from_resource_optional(struct platform_device *pdev,
+ unsigned int index,
+ const struct regmap_config *config)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *regs;
+
+ /*
+ * Don't use _get_and_ioremap_resource() here, since that will invoke
+ * prints of "invalid resource" which will simply add confusion.
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, index);
+ if (res) {
+ regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regs))
+ return ERR_CAST(regs);
+ return devm_regmap_init_mmio(dev, regs, config);
+ }
+
+ /*
+ * Fall back to using REG and getting the resource from the parent
+ * device, which is possible in an MFD configuration
+ */
+ if (dev->parent) {
+ res = platform_get_resource(pdev, IORESOURCE_REG, index);
+ if (!res)
+ return NULL;
+
+ return dev_get_regmap(dev->parent, res->name);
+ }
+
+ return NULL;
+}
+
+static inline struct regmap *
+ocelot_regmap_from_resource(struct platform_device *pdev, unsigned int index,
+ const struct regmap_config *config)
+{
+ struct regmap *map;
+
+ map = ocelot_regmap_from_resource_optional(pdev, index, config);
+ return map ?: ERR_PTR(-ENOENT);
+}
+
+#endif
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h
index 1e61c7e9f50d..dabcc0dea802 100644
--- a/include/linux/mfd/palmas.h
+++ b/include/linux/mfd/palmas.h
@@ -16,7 +16,6 @@
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/extcon-provider.h>
-#include <linux/of_gpio.h>
#include <linux/usb/phy_companion.h>
#define PALMAS_NUM_CLIENTS 3
@@ -99,8 +98,8 @@ struct palmas_sleep_requestor_info {
};
struct palmas_regs_info {
- char *name;
- char *sname;
+ const char *name;
+ const char *sname;
u8 vsel_addr;
u8 ctrl_addr;
u8 tstep_addr;
@@ -129,12 +128,6 @@ struct palmas_pmic_driver_data {
struct regulator_config config);
};
-struct palmas_adc_wakeup_property {
- int adc_channel_number;
- int adc_high_threshold;
- int adc_low_threshold;
-};
-
struct palmas_gpadc_platform_data {
/* Channel 3 current source is only enabled during conversion */
int ch3_current; /* 0: off; 1: 10uA; 2: 400uA; 3: 800 uA */
@@ -153,8 +146,6 @@ struct palmas_gpadc_platform_data {
int start_polarity;
int auto_conversion_period_ms;
- struct palmas_adc_wakeup_property *adc_wakeup1_data;
- struct palmas_adc_wakeup_property *adc_wakeup2_data;
};
struct palmas_reg_init {
diff --git a/include/linux/mfd/pcf50633/adc.h b/include/linux/mfd/pcf50633/adc.h
deleted file mode 100644
index 6a81896d4889..000000000000
--- a/include/linux/mfd/pcf50633/adc.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * adc.h -- Driver for NXP PCF50633 ADC
- *
- * (C) 2006-2008 by Openmoko, Inc.
- * All rights reserved.
- */
-
-#ifndef __LINUX_MFD_PCF50633_ADC_H
-#define __LINUX_MFD_PCF50633_ADC_H
-
-#include <linux/mfd/pcf50633/core.h>
-#include <linux/platform_device.h>
-
-/* ADC Registers */
-#define PCF50633_REG_ADCC3 0x52
-#define PCF50633_REG_ADCC2 0x53
-#define PCF50633_REG_ADCC1 0x54
-#define PCF50633_REG_ADCS1 0x55
-#define PCF50633_REG_ADCS2 0x56
-#define PCF50633_REG_ADCS3 0x57
-
-#define PCF50633_ADCC1_ADCSTART 0x01
-#define PCF50633_ADCC1_RES_8BIT 0x02
-#define PCF50633_ADCC1_RES_10BIT 0x00
-#define PCF50633_ADCC1_AVERAGE_NO 0x00
-#define PCF50633_ADCC1_AVERAGE_4 0x04
-#define PCF50633_ADCC1_AVERAGE_8 0x08
-#define PCF50633_ADCC1_AVERAGE_16 0x0c
-#define PCF50633_ADCC1_MUX_BATSNS_RES 0x00
-#define PCF50633_ADCC1_MUX_BATSNS_SUBTR 0x10
-#define PCF50633_ADCC1_MUX_ADCIN2_RES 0x20
-#define PCF50633_ADCC1_MUX_ADCIN2_SUBTR 0x30
-#define PCF50633_ADCC1_MUX_BATTEMP 0x60
-#define PCF50633_ADCC1_MUX_ADCIN1 0x70
-#define PCF50633_ADCC1_AVERAGE_MASK 0x0c
-#define PCF50633_ADCC1_ADCMUX_MASK 0xf0
-
-#define PCF50633_ADCC2_RATIO_NONE 0x00
-#define PCF50633_ADCC2_RATIO_BATTEMP 0x01
-#define PCF50633_ADCC2_RATIO_ADCIN1 0x02
-#define PCF50633_ADCC2_RATIO_BOTH 0x03
-#define PCF50633_ADCC2_RATIOSETTL_100US 0x04
-
-#define PCF50633_ADCC3_ACCSW_EN 0x01
-#define PCF50633_ADCC3_NTCSW_EN 0x04
-#define PCF50633_ADCC3_RES_DIV_TWO 0x10
-#define PCF50633_ADCC3_RES_DIV_THREE 0x00
-
-#define PCF50633_ADCS3_REF_NTCSW 0x00
-#define PCF50633_ADCS3_REF_ACCSW 0x10
-#define PCF50633_ADCS3_REF_2V0 0x20
-#define PCF50633_ADCS3_REF_VISA 0x30
-#define PCF50633_ADCS3_REF_2V0_2 0x70
-#define PCF50633_ADCS3_ADCRDY 0x80
-
-#define PCF50633_ADCS3_ADCDAT1L_MASK 0x03
-#define PCF50633_ADCS3_ADCDAT2L_MASK 0x0c
-#define PCF50633_ADCS3_ADCDAT2L_SHIFT 2
-#define PCF50633_ASCS3_REF_MASK 0x70
-
-extern int
-pcf50633_adc_async_read(struct pcf50633 *pcf, int mux, int avg,
- void (*callback)(struct pcf50633 *, void *, int),
- void *callback_param);
-extern int
-pcf50633_adc_sync_read(struct pcf50633 *pcf, int mux, int avg);
-
-#endif /* __LINUX_PCF50633_ADC_H */
diff --git a/include/linux/mfd/pcf50633/backlight.h b/include/linux/mfd/pcf50633/backlight.h
deleted file mode 100644
index fd4a4f8d6c13..000000000000
--- a/include/linux/mfd/pcf50633/backlight.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
- * PCF50633 backlight device driver
- */
-
-#ifndef __LINUX_MFD_PCF50633_BACKLIGHT
-#define __LINUX_MFD_PCF50633_BACKLIGHT
-
-/*
-* @default_brightness: Backlight brightness is initialized to this value
-*
-* Brightness to be used after the driver has been probed.
-* Valid range 0-63.
-*
-* @default_brightness_limit: The actual brightness is limited by this value
-*
-* Brightness limit to be used after the driver has been probed. This is useful
-* when it is not known how much power is available for the backlight during
-* probe.
-* Valid range 0-63. Can be changed later with pcf50633_bl_set_brightness_limit.
-*
-* @ramp_time: Display ramp time when changing brightness
-*
-* When changing the backlights brightness the change is not instant, instead
-* it fades smooth from one state to another. This value specifies how long
-* the fade should take. The lower the value the higher the fade time.
-* Valid range 0-255
-*/
-struct pcf50633_bl_platform_data {
- unsigned int default_brightness;
- unsigned int default_brightness_limit;
- uint8_t ramp_time;
-};
-
-
-struct pcf50633;
-
-int pcf50633_bl_set_brightness_limit(struct pcf50633 *pcf, unsigned int limit);
-
-#endif
-
diff --git a/include/linux/mfd/pcf50633/core.h b/include/linux/mfd/pcf50633/core.h
deleted file mode 100644
index 3f752dc62a6c..000000000000
--- a/include/linux/mfd/pcf50633/core.h
+++ /dev/null
@@ -1,234 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * core.h -- Core driver for NXP PCF50633
- *
- * (C) 2006-2008 by Openmoko, Inc.
- * All rights reserved.
- */
-
-#ifndef __LINUX_MFD_PCF50633_CORE_H
-#define __LINUX_MFD_PCF50633_CORE_H
-
-#include <linux/i2c.h>
-#include <linux/workqueue.h>
-#include <linux/regulator/driver.h>
-#include <linux/regulator/machine.h>
-#include <linux/power_supply.h>
-#include <linux/mfd/pcf50633/backlight.h>
-
-struct pcf50633;
-struct regmap;
-
-#define PCF50633_NUM_REGULATORS 11
-
-struct pcf50633_platform_data {
- struct regulator_init_data reg_init_data[PCF50633_NUM_REGULATORS];
-
- char **batteries;
- int num_batteries;
-
- /*
- * Should be set accordingly to the reference resistor used, see
- * I_{ch(ref)} charger reference current in the pcf50633 User
- * Manual.
- */
- int charger_reference_current_ma;
-
- /* Callbacks */
- void (*probe_done)(struct pcf50633 *);
- void (*mbc_event_callback)(struct pcf50633 *, int);
- void (*regulator_registered)(struct pcf50633 *, int);
- void (*force_shutdown)(struct pcf50633 *);
-
- u8 resumers[5];
-
- struct pcf50633_bl_platform_data *backlight_data;
-};
-
-struct pcf50633_irq {
- void (*handler) (int, void *);
- void *data;
-};
-
-int pcf50633_register_irq(struct pcf50633 *pcf, int irq,
- void (*handler) (int, void *), void *data);
-int pcf50633_free_irq(struct pcf50633 *pcf, int irq);
-
-int pcf50633_irq_mask(struct pcf50633 *pcf, int irq);
-int pcf50633_irq_unmask(struct pcf50633 *pcf, int irq);
-int pcf50633_irq_mask_get(struct pcf50633 *pcf, int irq);
-
-int pcf50633_read_block(struct pcf50633 *, u8 reg,
- int nr_regs, u8 *data);
-int pcf50633_write_block(struct pcf50633 *pcf, u8 reg,
- int nr_regs, u8 *data);
-u8 pcf50633_reg_read(struct pcf50633 *, u8 reg);
-int pcf50633_reg_write(struct pcf50633 *pcf, u8 reg, u8 val);
-
-int pcf50633_reg_set_bit_mask(struct pcf50633 *pcf, u8 reg, u8 mask, u8 val);
-int pcf50633_reg_clear_bits(struct pcf50633 *pcf, u8 reg, u8 bits);
-
-/* Interrupt registers */
-
-#define PCF50633_REG_INT1 0x02
-#define PCF50633_REG_INT2 0x03
-#define PCF50633_REG_INT3 0x04
-#define PCF50633_REG_INT4 0x05
-#define PCF50633_REG_INT5 0x06
-
-#define PCF50633_REG_INT1M 0x07
-#define PCF50633_REG_INT2M 0x08
-#define PCF50633_REG_INT3M 0x09
-#define PCF50633_REG_INT4M 0x0a
-#define PCF50633_REG_INT5M 0x0b
-
-enum {
- /* Chip IRQs */
- PCF50633_IRQ_ADPINS,
- PCF50633_IRQ_ADPREM,
- PCF50633_IRQ_USBINS,
- PCF50633_IRQ_USBREM,
- PCF50633_IRQ_RESERVED1,
- PCF50633_IRQ_RESERVED2,
- PCF50633_IRQ_ALARM,
- PCF50633_IRQ_SECOND,
- PCF50633_IRQ_ONKEYR,
- PCF50633_IRQ_ONKEYF,
- PCF50633_IRQ_EXTON1R,
- PCF50633_IRQ_EXTON1F,
- PCF50633_IRQ_EXTON2R,
- PCF50633_IRQ_EXTON2F,
- PCF50633_IRQ_EXTON3R,
- PCF50633_IRQ_EXTON3F,
- PCF50633_IRQ_BATFULL,
- PCF50633_IRQ_CHGHALT,
- PCF50633_IRQ_THLIMON,
- PCF50633_IRQ_THLIMOFF,
- PCF50633_IRQ_USBLIMON,
- PCF50633_IRQ_USBLIMOFF,
- PCF50633_IRQ_ADCRDY,
- PCF50633_IRQ_ONKEY1S,
- PCF50633_IRQ_LOWSYS,
- PCF50633_IRQ_LOWBAT,
- PCF50633_IRQ_HIGHTMP,
- PCF50633_IRQ_AUTOPWRFAIL,
- PCF50633_IRQ_DWN1PWRFAIL,
- PCF50633_IRQ_DWN2PWRFAIL,
- PCF50633_IRQ_LEDPWRFAIL,
- PCF50633_IRQ_LEDOVP,
- PCF50633_IRQ_LDO1PWRFAIL,
- PCF50633_IRQ_LDO2PWRFAIL,
- PCF50633_IRQ_LDO3PWRFAIL,
- PCF50633_IRQ_LDO4PWRFAIL,
- PCF50633_IRQ_LDO5PWRFAIL,
- PCF50633_IRQ_LDO6PWRFAIL,
- PCF50633_IRQ_HCLDOPWRFAIL,
- PCF50633_IRQ_HCLDOOVL,
-
- /* Always last */
- PCF50633_NUM_IRQ,
-};
-
-struct pcf50633 {
- struct device *dev;
- struct regmap *regmap;
-
- struct pcf50633_platform_data *pdata;
- int irq;
- struct pcf50633_irq irq_handler[PCF50633_NUM_IRQ];
- struct work_struct irq_work;
- struct workqueue_struct *work_queue;
- struct mutex lock;
-
- u8 mask_regs[5];
-
- u8 suspend_irq_masks[5];
- u8 resume_reason[5];
- int is_suspended;
-
- int onkey1s_held;
-
- struct platform_device *rtc_pdev;
- struct platform_device *mbc_pdev;
- struct platform_device *adc_pdev;
- struct platform_device *input_pdev;
- struct platform_device *bl_pdev;
- struct platform_device *regulator_pdev[PCF50633_NUM_REGULATORS];
-};
-
-enum pcf50633_reg_int1 {
- PCF50633_INT1_ADPINS = 0x01, /* Adapter inserted */
- PCF50633_INT1_ADPREM = 0x02, /* Adapter removed */
- PCF50633_INT1_USBINS = 0x04, /* USB inserted */
- PCF50633_INT1_USBREM = 0x08, /* USB removed */
- /* reserved */
- PCF50633_INT1_ALARM = 0x40, /* RTC alarm time is reached */
- PCF50633_INT1_SECOND = 0x80, /* RTC periodic second interrupt */
-};
-
-enum pcf50633_reg_int2 {
- PCF50633_INT2_ONKEYR = 0x01, /* ONKEY rising edge */
- PCF50633_INT2_ONKEYF = 0x02, /* ONKEY falling edge */
- PCF50633_INT2_EXTON1R = 0x04, /* EXTON1 rising edge */
- PCF50633_INT2_EXTON1F = 0x08, /* EXTON1 falling edge */
- PCF50633_INT2_EXTON2R = 0x10, /* EXTON2 rising edge */
- PCF50633_INT2_EXTON2F = 0x20, /* EXTON2 falling edge */
- PCF50633_INT2_EXTON3R = 0x40, /* EXTON3 rising edge */
- PCF50633_INT2_EXTON3F = 0x80, /* EXTON3 falling edge */
-};
-
-enum pcf50633_reg_int3 {
- PCF50633_INT3_BATFULL = 0x01, /* Battery full */
- PCF50633_INT3_CHGHALT = 0x02, /* Charger halt */
- PCF50633_INT3_THLIMON = 0x04,
- PCF50633_INT3_THLIMOFF = 0x08,
- PCF50633_INT3_USBLIMON = 0x10,
- PCF50633_INT3_USBLIMOFF = 0x20,
- PCF50633_INT3_ADCRDY = 0x40, /* ADC result ready */
- PCF50633_INT3_ONKEY1S = 0x80, /* ONKEY pressed 1 second */
-};
-
-enum pcf50633_reg_int4 {
- PCF50633_INT4_LOWSYS = 0x01,
- PCF50633_INT4_LOWBAT = 0x02,
- PCF50633_INT4_HIGHTMP = 0x04,
- PCF50633_INT4_AUTOPWRFAIL = 0x08,
- PCF50633_INT4_DWN1PWRFAIL = 0x10,
- PCF50633_INT4_DWN2PWRFAIL = 0x20,
- PCF50633_INT4_LEDPWRFAIL = 0x40,
- PCF50633_INT4_LEDOVP = 0x80,
-};
-
-enum pcf50633_reg_int5 {
- PCF50633_INT5_LDO1PWRFAIL = 0x01,
- PCF50633_INT5_LDO2PWRFAIL = 0x02,
- PCF50633_INT5_LDO3PWRFAIL = 0x04,
- PCF50633_INT5_LDO4PWRFAIL = 0x08,
- PCF50633_INT5_LDO5PWRFAIL = 0x10,
- PCF50633_INT5_LDO6PWRFAIL = 0x20,
- PCF50633_INT5_HCLDOPWRFAIL = 0x40,
- PCF50633_INT5_HCLDOOVL = 0x80,
-};
-
-/* misc. registers */
-#define PCF50633_REG_OOCSHDWN 0x0c
-
-/* LED registers */
-#define PCF50633_REG_LEDOUT 0x28
-#define PCF50633_REG_LEDENA 0x29
-#define PCF50633_REG_LEDCTL 0x2a
-#define PCF50633_REG_LEDDIM 0x2b
-
-static inline struct pcf50633 *dev_to_pcf50633(struct device *dev)
-{
- return dev_get_drvdata(dev);
-}
-
-int pcf50633_irq_init(struct pcf50633 *pcf, int irq);
-void pcf50633_irq_free(struct pcf50633 *pcf);
-#ifdef CONFIG_PM
-int pcf50633_irq_suspend(struct pcf50633 *pcf);
-int pcf50633_irq_resume(struct pcf50633 *pcf);
-#endif
-
-#endif
diff --git a/include/linux/mfd/pcf50633/gpio.h b/include/linux/mfd/pcf50633/gpio.h
deleted file mode 100644
index f589e35795f1..000000000000
--- a/include/linux/mfd/pcf50633/gpio.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * gpio.h -- GPIO driver for NXP PCF50633
- *
- * (C) 2006-2008 by Openmoko, Inc.
- * All rights reserved.
- */
-
-#ifndef __LINUX_MFD_PCF50633_GPIO_H
-#define __LINUX_MFD_PCF50633_GPIO_H
-
-#include <linux/mfd/pcf50633/core.h>
-
-#define PCF50633_GPIO1 1
-#define PCF50633_GPIO2 2
-#define PCF50633_GPIO3 3
-#define PCF50633_GPO 4
-
-#define PCF50633_REG_GPIO1CFG 0x14
-#define PCF50633_REG_GPIO2CFG 0x15
-#define PCF50633_REG_GPIO3CFG 0x16
-#define PCF50633_REG_GPOCFG 0x17
-
-#define PCF50633_GPOCFG_GPOSEL_MASK 0x07
-
-enum pcf50633_reg_gpocfg {
- PCF50633_GPOCFG_GPOSEL_0 = 0x00,
- PCF50633_GPOCFG_GPOSEL_LED_NFET = 0x01,
- PCF50633_GPOCFG_GPOSEL_SYSxOK = 0x02,
- PCF50633_GPOCFG_GPOSEL_CLK32K = 0x03,
- PCF50633_GPOCFG_GPOSEL_ADAPUSB = 0x04,
- PCF50633_GPOCFG_GPOSEL_USBxOK = 0x05,
- PCF50633_GPOCFG_GPOSEL_ACTPH4 = 0x06,
- PCF50633_GPOCFG_GPOSEL_1 = 0x07,
- PCF50633_GPOCFG_GPOSEL_INVERSE = 0x08,
-};
-
-int pcf50633_gpio_set(struct pcf50633 *pcf, int gpio, u8 val);
-u8 pcf50633_gpio_get(struct pcf50633 *pcf, int gpio);
-
-int pcf50633_gpio_invert_set(struct pcf50633 *, int gpio, int invert);
-int pcf50633_gpio_invert_get(struct pcf50633 *pcf, int gpio);
-
-int pcf50633_gpio_power_supply_set(struct pcf50633 *,
- int gpio, int regulator, int on);
-#endif /* __LINUX_MFD_PCF50633_GPIO_H */
-
-
diff --git a/include/linux/mfd/pcf50633/mbc.h b/include/linux/mfd/pcf50633/mbc.h
deleted file mode 100644
index fa5cb9256d99..000000000000
--- a/include/linux/mfd/pcf50633/mbc.h
+++ /dev/null
@@ -1,130 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * mbc.h -- Driver for NXP PCF50633 Main Battery Charger
- *
- * (C) 2006-2008 by Openmoko, Inc.
- * All rights reserved.
- */
-
-#ifndef __LINUX_MFD_PCF50633_MBC_H
-#define __LINUX_MFD_PCF50633_MBC_H
-
-#include <linux/mfd/pcf50633/core.h>
-#include <linux/platform_device.h>
-
-#define PCF50633_REG_MBCC1 0x43
-#define PCF50633_REG_MBCC2 0x44
-#define PCF50633_REG_MBCC3 0x45
-#define PCF50633_REG_MBCC4 0x46
-#define PCF50633_REG_MBCC5 0x47
-#define PCF50633_REG_MBCC6 0x48
-#define PCF50633_REG_MBCC7 0x49
-#define PCF50633_REG_MBCC8 0x4a
-#define PCF50633_REG_MBCS1 0x4b
-#define PCF50633_REG_MBCS2 0x4c
-#define PCF50633_REG_MBCS3 0x4d
-
-enum pcf50633_reg_mbcc1 {
- PCF50633_MBCC1_CHGENA = 0x01, /* Charger enable */
- PCF50633_MBCC1_AUTOSTOP = 0x02,
- PCF50633_MBCC1_AUTORES = 0x04, /* automatic resume */
- PCF50633_MBCC1_RESUME = 0x08, /* explicit resume cmd */
- PCF50633_MBCC1_RESTART = 0x10, /* restart charging */
- PCF50633_MBCC1_PREWDTIME_60M = 0x20, /* max. precharging time */
- PCF50633_MBCC1_WDTIME_1H = 0x00,
- PCF50633_MBCC1_WDTIME_2H = 0x40,
- PCF50633_MBCC1_WDTIME_4H = 0x80,
- PCF50633_MBCC1_WDTIME_6H = 0xc0,
-};
-#define PCF50633_MBCC1_WDTIME_MASK 0xc0
-
-enum pcf50633_reg_mbcc2 {
- PCF50633_MBCC2_VBATCOND_2V7 = 0x00,
- PCF50633_MBCC2_VBATCOND_2V85 = 0x01,
- PCF50633_MBCC2_VBATCOND_3V0 = 0x02,
- PCF50633_MBCC2_VBATCOND_3V15 = 0x03,
- PCF50633_MBCC2_VMAX_4V = 0x00,
- PCF50633_MBCC2_VMAX_4V20 = 0x28,
- PCF50633_MBCC2_VRESDEBTIME_64S = 0x80, /* debounce time (32/64sec) */
-};
-
-enum pcf50633_reg_mbcc7 {
- PCF50633_MBCC7_USB_100mA = 0x00,
- PCF50633_MBCC7_USB_500mA = 0x01,
- PCF50633_MBCC7_USB_1000mA = 0x02,
- PCF50633_MBCC7_USB_SUSPEND = 0x03,
- PCF50633_MBCC7_BATTEMP_EN = 0x04,
- PCF50633_MBCC7_BATSYSIMAX_1A6 = 0x00,
- PCF50633_MBCC7_BATSYSIMAX_1A8 = 0x40,
- PCF50633_MBCC7_BATSYSIMAX_2A0 = 0x80,
- PCF50633_MBCC7_BATSYSIMAX_2A2 = 0xc0,
-};
-#define PCF50633_MBCC7_USB_MASK 0x03
-
-enum pcf50633_reg_mbcc8 {
- PCF50633_MBCC8_USBENASUS = 0x10,
-};
-
-enum pcf50633_reg_mbcs1 {
- PCF50633_MBCS1_USBPRES = 0x01,
- PCF50633_MBCS1_USBOK = 0x02,
- PCF50633_MBCS1_ADAPTPRES = 0x04,
- PCF50633_MBCS1_ADAPTOK = 0x08,
- PCF50633_MBCS1_TBAT_OK = 0x00,
- PCF50633_MBCS1_TBAT_ABOVE = 0x10,
- PCF50633_MBCS1_TBAT_BELOW = 0x20,
- PCF50633_MBCS1_TBAT_UNDEF = 0x30,
- PCF50633_MBCS1_PREWDTEXP = 0x40,
- PCF50633_MBCS1_WDTEXP = 0x80,
-};
-
-enum pcf50633_reg_mbcs2_mbcmod {
- PCF50633_MBCS2_MBC_PLAY = 0x00,
- PCF50633_MBCS2_MBC_USB_PRE = 0x01,
- PCF50633_MBCS2_MBC_USB_PRE_WAIT = 0x02,
- PCF50633_MBCS2_MBC_USB_FAST = 0x03,
- PCF50633_MBCS2_MBC_USB_FAST_WAIT = 0x04,
- PCF50633_MBCS2_MBC_USB_SUSPEND = 0x05,
- PCF50633_MBCS2_MBC_ADP_PRE = 0x06,
- PCF50633_MBCS2_MBC_ADP_PRE_WAIT = 0x07,
- PCF50633_MBCS2_MBC_ADP_FAST = 0x08,
- PCF50633_MBCS2_MBC_ADP_FAST_WAIT = 0x09,
- PCF50633_MBCS2_MBC_BAT_FULL = 0x0a,
- PCF50633_MBCS2_MBC_HALT = 0x0b,
-};
-#define PCF50633_MBCS2_MBC_MASK 0x0f
-enum pcf50633_reg_mbcs2_chgstat {
- PCF50633_MBCS2_CHGS_NONE = 0x00,
- PCF50633_MBCS2_CHGS_ADAPTER = 0x10,
- PCF50633_MBCS2_CHGS_USB = 0x20,
- PCF50633_MBCS2_CHGS_BOTH = 0x30,
-};
-#define PCF50633_MBCS2_RESSTAT_AUTO 0x40
-
-enum pcf50633_reg_mbcs3 {
- PCF50633_MBCS3_USBLIM_PLAY = 0x01,
- PCF50633_MBCS3_USBLIM_CGH = 0x02,
- PCF50633_MBCS3_TLIM_PLAY = 0x04,
- PCF50633_MBCS3_TLIM_CHG = 0x08,
- PCF50633_MBCS3_ILIM = 0x10, /* 1: Ibat > Icutoff */
- PCF50633_MBCS3_VLIM = 0x20, /* 1: Vbat == Vmax */
- PCF50633_MBCS3_VBATSTAT = 0x40, /* 1: Vbat > Vbatcond */
- PCF50633_MBCS3_VRES = 0x80, /* 1: Vbat > Vth(RES) */
-};
-
-#define PCF50633_MBCC2_VBATCOND_MASK 0x03
-#define PCF50633_MBCC2_VMAX_MASK 0x3c
-
-/* Charger status */
-#define PCF50633_MBC_USB_ONLINE 0x01
-#define PCF50633_MBC_USB_ACTIVE 0x02
-#define PCF50633_MBC_ADAPTER_ONLINE 0x04
-#define PCF50633_MBC_ADAPTER_ACTIVE 0x08
-
-int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma);
-
-int pcf50633_mbc_get_status(struct pcf50633 *);
-int pcf50633_mbc_get_usb_online_status(struct pcf50633 *);
-
-#endif
-
diff --git a/include/linux/mfd/pcf50633/pmic.h b/include/linux/mfd/pcf50633/pmic.h
deleted file mode 100644
index eac0c3d8e984..000000000000
--- a/include/linux/mfd/pcf50633/pmic.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_MFD_PCF50633_PMIC_H
-#define __LINUX_MFD_PCF50633_PMIC_H
-
-#include <linux/mfd/pcf50633/core.h>
-#include <linux/platform_device.h>
-
-#define PCF50633_REG_AUTOOUT 0x1a
-#define PCF50633_REG_AUTOENA 0x1b
-#define PCF50633_REG_AUTOCTL 0x1c
-#define PCF50633_REG_AUTOMXC 0x1d
-#define PCF50633_REG_DOWN1OUT 0x1e
-#define PCF50633_REG_DOWN1ENA 0x1f
-#define PCF50633_REG_DOWN1CTL 0x20
-#define PCF50633_REG_DOWN1MXC 0x21
-#define PCF50633_REG_DOWN2OUT 0x22
-#define PCF50633_REG_DOWN2ENA 0x23
-#define PCF50633_REG_DOWN2CTL 0x24
-#define PCF50633_REG_DOWN2MXC 0x25
-#define PCF50633_REG_MEMLDOOUT 0x26
-#define PCF50633_REG_MEMLDOENA 0x27
-#define PCF50633_REG_LDO1OUT 0x2d
-#define PCF50633_REG_LDO1ENA 0x2e
-#define PCF50633_REG_LDO2OUT 0x2f
-#define PCF50633_REG_LDO2ENA 0x30
-#define PCF50633_REG_LDO3OUT 0x31
-#define PCF50633_REG_LDO3ENA 0x32
-#define PCF50633_REG_LDO4OUT 0x33
-#define PCF50633_REG_LDO4ENA 0x34
-#define PCF50633_REG_LDO5OUT 0x35
-#define PCF50633_REG_LDO5ENA 0x36
-#define PCF50633_REG_LDO6OUT 0x37
-#define PCF50633_REG_LDO6ENA 0x38
-#define PCF50633_REG_HCLDOOUT 0x39
-#define PCF50633_REG_HCLDOENA 0x3a
-#define PCF50633_REG_HCLDOOVL 0x40
-
-enum pcf50633_regulator_enable {
- PCF50633_REGULATOR_ON = 0x01,
- PCF50633_REGULATOR_ON_GPIO1 = 0x02,
- PCF50633_REGULATOR_ON_GPIO2 = 0x04,
- PCF50633_REGULATOR_ON_GPIO3 = 0x08,
-};
-#define PCF50633_REGULATOR_ON_MASK 0x0f
-
-enum pcf50633_regulator_phase {
- PCF50633_REGULATOR_ACTPH1 = 0x00,
- PCF50633_REGULATOR_ACTPH2 = 0x10,
- PCF50633_REGULATOR_ACTPH3 = 0x20,
- PCF50633_REGULATOR_ACTPH4 = 0x30,
-};
-#define PCF50633_REGULATOR_ACTPH_MASK 0x30
-
-enum pcf50633_regulator_id {
- PCF50633_REGULATOR_AUTO,
- PCF50633_REGULATOR_DOWN1,
- PCF50633_REGULATOR_DOWN2,
- PCF50633_REGULATOR_LDO1,
- PCF50633_REGULATOR_LDO2,
- PCF50633_REGULATOR_LDO3,
- PCF50633_REGULATOR_LDO4,
- PCF50633_REGULATOR_LDO5,
- PCF50633_REGULATOR_LDO6,
- PCF50633_REGULATOR_HCLDO,
- PCF50633_REGULATOR_MEMLDO,
-};
-#endif
-
diff --git a/include/linux/mfd/pf1550.h b/include/linux/mfd/pf1550.h
new file mode 100644
index 000000000000..7cb2340ff2bd
--- /dev/null
+++ b/include/linux/mfd/pf1550.h
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Declarations for the PF1550 PMIC
+ *
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Robin Gong <yibin.gong@freescale.com>
+ *
+ * Portions Copyright (c) 2025 Savoir-faire Linux Inc.
+ * Samuel Kayode <samuel.kayode@savoirfairelinux.com>
+ */
+
+#ifndef __LINUX_MFD_PF1550_H
+#define __LINUX_MFD_PF1550_H
+
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+
+enum pf1550_pmic_reg {
+ /* PMIC regulator part */
+ PF1550_PMIC_REG_DEVICE_ID = 0x00,
+ PF1550_PMIC_REG_OTP_FLAVOR = 0x01,
+ PF1550_PMIC_REG_SILICON_REV = 0x02,
+
+ PF1550_PMIC_REG_INT_CATEGORY = 0x06,
+ PF1550_PMIC_REG_SW_INT_STAT0 = 0x08,
+ PF1550_PMIC_REG_SW_INT_MASK0 = 0x09,
+ PF1550_PMIC_REG_SW_INT_SENSE0 = 0x0a,
+ PF1550_PMIC_REG_SW_INT_STAT1 = 0x0b,
+ PF1550_PMIC_REG_SW_INT_MASK1 = 0x0c,
+ PF1550_PMIC_REG_SW_INT_SENSE1 = 0x0d,
+ PF1550_PMIC_REG_SW_INT_STAT2 = 0x0e,
+ PF1550_PMIC_REG_SW_INT_MASK2 = 0x0f,
+ PF1550_PMIC_REG_SW_INT_SENSE2 = 0x10,
+ PF1550_PMIC_REG_LDO_INT_STAT0 = 0x18,
+ PF1550_PMIC_REG_LDO_INT_MASK0 = 0x19,
+ PF1550_PMIC_REG_LDO_INT_SENSE0 = 0x1a,
+ PF1550_PMIC_REG_TEMP_INT_STAT0 = 0x20,
+ PF1550_PMIC_REG_TEMP_INT_MASK0 = 0x21,
+ PF1550_PMIC_REG_TEMP_INT_SENSE0 = 0x22,
+ PF1550_PMIC_REG_ONKEY_INT_STAT0 = 0x24,
+ PF1550_PMIC_REG_ONKEY_INT_MASK0 = 0x25,
+ PF1550_PMIC_REG_ONKEY_INT_SENSE0 = 0x26,
+ PF1550_PMIC_REG_MISC_INT_STAT0 = 0x28,
+ PF1550_PMIC_REG_MISC_INT_MASK0 = 0x29,
+ PF1550_PMIC_REG_MISC_INT_SENSE0 = 0x2a,
+
+ PF1550_PMIC_REG_COINCELL_CONTROL = 0x30,
+
+ PF1550_PMIC_REG_SW1_VOLT = 0x32,
+ PF1550_PMIC_REG_SW1_STBY_VOLT = 0x33,
+ PF1550_PMIC_REG_SW1_SLP_VOLT = 0x34,
+ PF1550_PMIC_REG_SW1_CTRL = 0x35,
+ PF1550_PMIC_REG_SW1_CTRL1 = 0x36,
+ PF1550_PMIC_REG_SW2_VOLT = 0x38,
+ PF1550_PMIC_REG_SW2_STBY_VOLT = 0x39,
+ PF1550_PMIC_REG_SW2_SLP_VOLT = 0x3a,
+ PF1550_PMIC_REG_SW2_CTRL = 0x3b,
+ PF1550_PMIC_REG_SW2_CTRL1 = 0x3c,
+ PF1550_PMIC_REG_SW3_VOLT = 0x3e,
+ PF1550_PMIC_REG_SW3_STBY_VOLT = 0x3f,
+ PF1550_PMIC_REG_SW3_SLP_VOLT = 0x40,
+ PF1550_PMIC_REG_SW3_CTRL = 0x41,
+ PF1550_PMIC_REG_SW3_CTRL1 = 0x42,
+ PF1550_PMIC_REG_VSNVS_CTRL = 0x48,
+ PF1550_PMIC_REG_VREFDDR_CTRL = 0x4a,
+ PF1550_PMIC_REG_LDO1_VOLT = 0x4c,
+ PF1550_PMIC_REG_LDO1_CTRL = 0x4d,
+ PF1550_PMIC_REG_LDO2_VOLT = 0x4f,
+ PF1550_PMIC_REG_LDO2_CTRL = 0x50,
+ PF1550_PMIC_REG_LDO3_VOLT = 0x52,
+ PF1550_PMIC_REG_LDO3_CTRL = 0x53,
+ PF1550_PMIC_REG_PWRCTRL0 = 0x58,
+ PF1550_PMIC_REG_PWRCTRL1 = 0x59,
+ PF1550_PMIC_REG_PWRCTRL2 = 0x5a,
+ PF1550_PMIC_REG_PWRCTRL3 = 0x5b,
+ PF1550_PMIC_REG_SW1_PWRDN_SEQ = 0x5f,
+ PF1550_PMIC_REG_SW2_PWRDN_SEQ = 0x60,
+ PF1550_PMIC_REG_SW3_PWRDN_SEQ = 0x61,
+ PF1550_PMIC_REG_LDO1_PWRDN_SEQ = 0x62,
+ PF1550_PMIC_REG_LDO2_PWRDN_SEQ = 0x63,
+ PF1550_PMIC_REG_LDO3_PWRDN_SEQ = 0x64,
+ PF1550_PMIC_REG_VREFDDR_PWRDN_SEQ = 0x65,
+
+ PF1550_PMIC_REG_STATE_INFO = 0x67,
+ PF1550_PMIC_REG_I2C_ADDR = 0x68,
+ PF1550_PMIC_REG_IO_DRV0 = 0x69,
+ PF1550_PMIC_REG_IO_DRV1 = 0x6a,
+ PF1550_PMIC_REG_RC_16MHZ = 0x6b,
+ PF1550_PMIC_REG_KEY = 0x6f,
+
+ /* Charger part */
+ PF1550_CHARG_REG_CHG_INT = 0x80,
+ PF1550_CHARG_REG_CHG_INT_MASK = 0x82,
+ PF1550_CHARG_REG_CHG_INT_OK = 0x84,
+ PF1550_CHARG_REG_VBUS_SNS = 0x86,
+ PF1550_CHARG_REG_CHG_SNS = 0x87,
+ PF1550_CHARG_REG_BATT_SNS = 0x88,
+ PF1550_CHARG_REG_CHG_OPER = 0x89,
+ PF1550_CHARG_REG_CHG_TMR = 0x8a,
+ PF1550_CHARG_REG_CHG_EOC_CNFG = 0x8d,
+ PF1550_CHARG_REG_CHG_CURR_CNFG = 0x8e,
+ PF1550_CHARG_REG_BATT_REG = 0x8f,
+ PF1550_CHARG_REG_BATFET_CNFG = 0x91,
+ PF1550_CHARG_REG_THM_REG_CNFG = 0x92,
+ PF1550_CHARG_REG_VBUS_INLIM_CNFG = 0x94,
+ PF1550_CHARG_REG_VBUS_LIN_DPM = 0x95,
+ PF1550_CHARG_REG_USB_PHY_LDO_CNFG = 0x96,
+ PF1550_CHARG_REG_DBNC_DELAY_TIME = 0x98,
+ PF1550_CHARG_REG_CHG_INT_CNFG = 0x99,
+ PF1550_CHARG_REG_THM_ADJ_SETTING = 0x9a,
+ PF1550_CHARG_REG_VBUS2SYS_CNFG = 0x9b,
+ PF1550_CHARG_REG_LED_PWM = 0x9c,
+ PF1550_CHARG_REG_FAULT_BATFET_CNFG = 0x9d,
+ PF1550_CHARG_REG_LED_CNFG = 0x9e,
+ PF1550_CHARG_REG_CHGR_KEY2 = 0x9f,
+
+ PF1550_TEST_REG_FMRADDR = 0xc4,
+ PF1550_TEST_REG_FMRDATA = 0xc5,
+ PF1550_TEST_REG_KEY3 = 0xdf,
+
+ PF1550_PMIC_REG_END = 0xff,
+};
+
+/* One-Time Programmable(OTP) memory */
+enum pf1550_otp_reg {
+ PF1550_OTP_SW1_SW2 = 0x1e,
+ PF1550_OTP_SW2_SW3 = 0x1f,
+};
+
+#define PF1550_DEVICE_ID 0x7c
+
+/* Keys for reading OTP */
+#define PF1550_OTP_PMIC_KEY 0x15
+#define PF1550_OTP_CHGR_KEY 0x50
+#define PF1550_OTP_TEST_KEY 0xab
+
+/* Supported charger modes */
+#define PF1550_CHG_BAT_OFF 1
+#define PF1550_CHG_BAT_ON 2
+
+#define PF1550_CHG_PRECHARGE 0
+#define PF1550_CHG_CONSTANT_CURRENT 1
+#define PF1550_CHG_CONSTANT_VOL 2
+#define PF1550_CHG_EOC 3
+#define PF1550_CHG_DONE 4
+#define PF1550_CHG_TIMER_FAULT 6
+#define PF1550_CHG_SUSPEND 7
+#define PF1550_CHG_OFF_INV 8
+#define PF1550_CHG_BAT_OVER 9
+#define PF1550_CHG_OFF_TEMP 10
+#define PF1550_CHG_LINEAR_ONLY 12
+#define PF1550_CHG_SNS_MASK 0xf
+#define PF1550_CHG_INT_MASK 0x51
+
+#define PF1550_BAT_NO_VBUS 0
+#define PF1550_BAT_LOW_THAN_PRECHARG 1
+#define PF1550_BAT_CHARG_FAIL 2
+#define PF1550_BAT_HIGH_THAN_PRECHARG 4
+#define PF1550_BAT_OVER_VOL 5
+#define PF1550_BAT_NO_DETECT 6
+#define PF1550_BAT_SNS_MASK 0x7
+
+#define PF1550_VBUS_UVLO BIT(2)
+#define PF1550_VBUS_IN2SYS BIT(3)
+#define PF1550_VBUS_OVLO BIT(4)
+#define PF1550_VBUS_VALID BIT(5)
+
+#define PF1550_CHARG_REG_BATT_REG_CHGCV_MASK 0x3f
+#define PF1550_CHARG_REG_BATT_REG_VMINSYS_SHIFT 6
+#define PF1550_CHARG_REG_BATT_REG_VMINSYS_MASK GENMASK(7, 6)
+#define PF1550_CHARG_REG_THM_REG_CNFG_REGTEMP_SHIFT 2
+#define PF1550_CHARG_REG_THM_REG_CNFG_REGTEMP_MASK GENMASK(3, 2)
+
+#define PF1550_ONKEY_RST_EN BIT(7)
+
+/* DVS enable masks */
+#define OTP_SW1_DVS_ENB BIT(1)
+#define OTP_SW2_DVS_ENB BIT(3)
+
+/* Top level interrupt masks */
+#define IRQ_REGULATOR (BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(6))
+#define IRQ_ONKEY BIT(5)
+#define IRQ_CHG BIT(0)
+
+/* Regulator interrupt masks */
+#define PMIC_IRQ_SW1_LS BIT(0)
+#define PMIC_IRQ_SW2_LS BIT(1)
+#define PMIC_IRQ_SW3_LS BIT(2)
+#define PMIC_IRQ_SW1_HS BIT(0)
+#define PMIC_IRQ_SW2_HS BIT(1)
+#define PMIC_IRQ_SW3_HS BIT(2)
+#define PMIC_IRQ_LDO1_FAULT BIT(0)
+#define PMIC_IRQ_LDO2_FAULT BIT(1)
+#define PMIC_IRQ_LDO3_FAULT BIT(2)
+#define PMIC_IRQ_TEMP_110 BIT(0)
+#define PMIC_IRQ_TEMP_125 BIT(1)
+
+/* Onkey interrupt masks */
+#define ONKEY_IRQ_PUSHI BIT(0)
+#define ONKEY_IRQ_1SI BIT(1)
+#define ONKEY_IRQ_2SI BIT(2)
+#define ONKEY_IRQ_3SI BIT(3)
+#define ONKEY_IRQ_4SI BIT(4)
+#define ONKEY_IRQ_8SI BIT(5)
+
+/* Charger interrupt masks */
+#define CHARG_IRQ_BAT2SOCI BIT(1)
+#define CHARG_IRQ_BATI BIT(2)
+#define CHARG_IRQ_CHGI BIT(3)
+#define CHARG_IRQ_VBUSI BIT(5)
+#define CHARG_IRQ_DPMI BIT(6)
+#define CHARG_IRQ_THMI BIT(7)
+
+enum pf1550_irq {
+ PF1550_IRQ_CHG,
+ PF1550_IRQ_REGULATOR,
+ PF1550_IRQ_ONKEY,
+};
+
+enum pf1550_pmic_irq {
+ PF1550_PMIC_IRQ_SW1_LS,
+ PF1550_PMIC_IRQ_SW2_LS,
+ PF1550_PMIC_IRQ_SW3_LS,
+ PF1550_PMIC_IRQ_SW1_HS,
+ PF1550_PMIC_IRQ_SW2_HS,
+ PF1550_PMIC_IRQ_SW3_HS,
+ PF1550_PMIC_IRQ_LDO1_FAULT,
+ PF1550_PMIC_IRQ_LDO2_FAULT,
+ PF1550_PMIC_IRQ_LDO3_FAULT,
+ PF1550_PMIC_IRQ_TEMP_110,
+ PF1550_PMIC_IRQ_TEMP_125,
+};
+
+enum pf1550_onkey_irq {
+ PF1550_ONKEY_IRQ_PUSHI,
+ PF1550_ONKEY_IRQ_1SI,
+ PF1550_ONKEY_IRQ_2SI,
+ PF1550_ONKEY_IRQ_3SI,
+ PF1550_ONKEY_IRQ_4SI,
+ PF1550_ONKEY_IRQ_8SI,
+};
+
+enum pf1550_charg_irq {
+ PF1550_CHARG_IRQ_BAT2SOCI,
+ PF1550_CHARG_IRQ_BATI,
+ PF1550_CHARG_IRQ_CHGI,
+ PF1550_CHARG_IRQ_VBUSI,
+ PF1550_CHARG_IRQ_THMI,
+};
+
+enum pf1550_regulators {
+ PF1550_SW1,
+ PF1550_SW2,
+ PF1550_SW3,
+ PF1550_VREFDDR,
+ PF1550_LDO1,
+ PF1550_LDO2,
+ PF1550_LDO3,
+};
+
+struct pf1550_ddata {
+ struct regmap_irq_chip_data *irq_data_regulator;
+ struct regmap_irq_chip_data *irq_data_charger;
+ struct regmap_irq_chip_data *irq_data_onkey;
+ struct regmap_irq_chip_data *irq_data;
+ struct regmap *regmap;
+ struct device *dev;
+ bool dvs1_enable;
+ bool dvs2_enable;
+ int irq;
+};
+
+#endif /* __LINUX_MFD_PF1550_H */
diff --git a/include/linux/mfd/qnap-mcu.h b/include/linux/mfd/qnap-mcu.h
new file mode 100644
index 000000000000..42bf523f9a5b
--- /dev/null
+++ b/include/linux/mfd/qnap-mcu.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Core definitions for QNAP MCU MFD driver.
+ * Copyright (C) 2024 Heiko Stuebner <heiko@sntech.de>
+ */
+
+#ifndef _LINUX_QNAP_MCU_H_
+#define _LINUX_QNAP_MCU_H_
+
+#include <linux/types.h>
+
+struct qnap_mcu;
+
+struct qnap_mcu_variant {
+ u32 baud_rate;
+ int num_drives;
+ int fan_pwm_min;
+ int fan_pwm_max;
+ bool usb_led;
+};
+
+int qnap_mcu_exec(struct qnap_mcu *mcu,
+ const u8 *cmd_data, size_t cmd_data_size,
+ u8 *reply_data, size_t reply_data_size);
+int qnap_mcu_exec_with_ack(struct qnap_mcu *mcu,
+ const u8 *cmd_data, size_t cmd_data_size);
+
+#endif /* _LINUX_QNAP_MCU_H_ */
diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h
index e07f6e61cd38..28170ee08898 100644
--- a/include/linux/mfd/rk808.h
+++ b/include/linux/mfd/rk808.h
@@ -113,6 +113,148 @@ enum rk808_reg {
#define RK808_INT_STS_MSK_REG2 0x4f
#define RK808_IO_POL_REG 0x50
+/* RK816 */
+enum rk816_reg {
+ RK816_ID_DCDC1,
+ RK816_ID_DCDC2,
+ RK816_ID_DCDC3,
+ RK816_ID_DCDC4,
+ RK816_ID_LDO1,
+ RK816_ID_LDO2,
+ RK816_ID_LDO3,
+ RK816_ID_LDO4,
+ RK816_ID_LDO5,
+ RK816_ID_LDO6,
+ RK816_ID_BOOST,
+ RK816_ID_OTG_SW,
+};
+
+enum rk816_irqs {
+ /* INT_STS_REG1 */
+ RK816_IRQ_PWRON_FALL,
+ RK816_IRQ_PWRON_RISE,
+
+ /* INT_STS_REG2 */
+ RK816_IRQ_VB_LOW,
+ RK816_IRQ_PWRON,
+ RK816_IRQ_PWRON_LP,
+ RK816_IRQ_HOTDIE,
+ RK816_IRQ_RTC_ALARM,
+ RK816_IRQ_RTC_PERIOD,
+ RK816_IRQ_USB_OV,
+
+ /* INT_STS_REG3 */
+ RK816_IRQ_PLUG_IN,
+ RK816_IRQ_PLUG_OUT,
+ RK816_IRQ_CHG_OK,
+ RK816_IRQ_CHG_TE,
+ RK816_IRQ_CHG_TS,
+ RK816_IRQ_CHG_CVTLIM,
+ RK816_IRQ_DISCHG_ILIM,
+};
+
+/* power channel registers */
+#define RK816_DCDC_EN_REG1 0x23
+
+#define RK816_DCDC_EN_REG2 0x24
+#define RK816_BOOST_EN BIT(1)
+#define RK816_OTG_EN BIT(2)
+#define RK816_BOOST_EN_MSK BIT(5)
+#define RK816_OTG_EN_MSK BIT(6)
+#define RK816_BUCK_DVS_CONFIRM BIT(7)
+
+#define RK816_LDO_EN_REG1 0x27
+
+#define RK816_LDO_EN_REG2 0x28
+
+/* interrupt registers and irq definitions */
+#define RK816_INT_STS_REG1 0x49
+#define RK816_INT_STS_MSK_REG1 0x4a
+#define RK816_INT_STS_PWRON_FALL BIT(5)
+#define RK816_INT_STS_PWRON_RISE BIT(6)
+
+#define RK816_INT_STS_REG2 0x4c
+#define RK816_INT_STS_MSK_REG2 0x4d
+#define RK816_INT_STS_VB_LOW BIT(1)
+#define RK816_INT_STS_PWRON BIT(2)
+#define RK816_INT_STS_PWRON_LP BIT(3)
+#define RK816_INT_STS_HOTDIE BIT(4)
+#define RK816_INT_STS_RTC_ALARM BIT(5)
+#define RK816_INT_STS_RTC_PERIOD BIT(6)
+#define RK816_INT_STS_USB_OV BIT(7)
+
+#define RK816_INT_STS_REG3 0x4e
+#define RK816_INT_STS_MSK_REG3 0x4f
+#define RK816_INT_STS_PLUG_IN BIT(0)
+#define RK816_INT_STS_PLUG_OUT BIT(1)
+#define RK816_INT_STS_CHG_OK BIT(2)
+#define RK816_INT_STS_CHG_TE BIT(3)
+#define RK816_INT_STS_CHG_TS BIT(4)
+#define RK816_INT_STS_CHG_CVTLIM BIT(6)
+#define RK816_INT_STS_DISCHG_ILIM BIT(7)
+
+#define RK816_IRQ_STS_OFFSET(x) ((x) - RK816_INT_STS_REG1)
+#define RK816_IRQ_MSK_OFFSET(x) ((x) - RK816_INT_STS_MSK_REG1)
+
+/* charger, boost and OTG registers */
+#define RK816_OTG_BUCK_LDO_CONFIG_REG 0x2a
+#define RK816_CHRG_CONFIG_REG 0x2b
+#define RK816_BOOST_ON_VESL_REG 0x54
+#define RK816_BOOST_SLP_VSEL_REG 0x55
+#define RK816_CHRG_BOOST_CONFIG_REG 0x9a
+#define RK816_SUP_STS_REG 0xa0
+#define RK816_USB_CTRL_REG 0xa1
+#define RK816_CHRG_CTRL(x) (0xa3 + (x))
+#define RK816_BAT_CTRL_REG 0xa6
+#define RK816_BAT_HTS_TS_REG 0xa8
+#define RK816_BAT_LTS_TS_REG 0xa9
+
+/* adc and fuel gauge registers */
+#define RK816_TS_CTRL_REG 0xac
+#define RK816_ADC_CTRL_REG 0xad
+#define RK816_GGCON_REG 0xb0
+#define RK816_GGSTS_REG 0xb1
+#define RK816_ZERO_CUR_ADC_REGH 0xb2
+#define RK816_ZERO_CUR_ADC_REGL 0xb3
+#define RK816_GASCNT_CAL_REG(x) (0xb7 - (x))
+#define RK816_GASCNT_REG(x) (0xbb - (x))
+#define RK816_BAT_CUR_AVG_REGH 0xbc
+#define RK816_BAT_CUR_AVG_REGL 0xbd
+#define RK816_TS_ADC_REGH 0xbe
+#define RK816_TS_ADC_REGL 0xbf
+#define RK816_USB_ADC_REGH 0xc0
+#define RK816_USB_ADC_REGL 0xc1
+#define RK816_BAT_OCV_REGH 0xc2
+#define RK816_BAT_OCV_REGL 0xc3
+#define RK816_BAT_VOL_REGH 0xc4
+#define RK816_BAT_VOL_REGL 0xc5
+#define RK816_RELAX_ENTRY_THRES_REGH 0xc6
+#define RK816_RELAX_ENTRY_THRES_REGL 0xc7
+#define RK816_RELAX_EXIT_THRES_REGH 0xc8
+#define RK816_RELAX_EXIT_THRES_REGL 0xc9
+#define RK816_RELAX_VOL1_REGH 0xca
+#define RK816_RELAX_VOL1_REGL 0xcb
+#define RK816_RELAX_VOL2_REGH 0xcc
+#define RK816_RELAX_VOL2_REGL 0xcd
+#define RK816_RELAX_CUR1_REGH 0xce
+#define RK816_RELAX_CUR1_REGL 0xcf
+#define RK816_RELAX_CUR2_REGH 0xd0
+#define RK816_RELAX_CUR2_REGL 0xd1
+#define RK816_CAL_OFFSET_REGH 0xd2
+#define RK816_CAL_OFFSET_REGL 0xd3
+#define RK816_NON_ACT_TIMER_CNT_REG 0xd4
+#define RK816_VCALIB0_REGH 0xd5
+#define RK816_VCALIB0_REGL 0xd6
+#define RK816_VCALIB1_REGH 0xd7
+#define RK816_VCALIB1_REGL 0xd8
+#define RK816_FCC_GASCNT_REG(x) (0xdc - (x))
+#define RK816_IOFFSET_REGH 0xdd
+#define RK816_IOFFSET_REGL 0xde
+#define RK816_SLEEP_CON_SAMP_CUR_REG 0xdf
+
+/* general purpose data registers 0xe0 ~ 0xf2 */
+#define RK816_DATA_REG(x) (0xe0 + (x))
+
/* RK818 */
#define RK818_DCDC1 0
#define RK818_LDO1 4
@@ -289,6 +431,416 @@ enum rk805_reg {
#define RK805_INT_ALARM_EN (1 << 3)
#define RK805_INT_TIMER_EN (1 << 2)
+/* RK806 */
+#define RK806_POWER_EN0 0x0
+#define RK806_POWER_EN1 0x1
+#define RK806_POWER_EN2 0x2
+#define RK806_POWER_EN3 0x3
+#define RK806_POWER_EN4 0x4
+#define RK806_POWER_EN5 0x5
+#define RK806_POWER_SLP_EN0 0x6
+#define RK806_POWER_SLP_EN1 0x7
+#define RK806_POWER_SLP_EN2 0x8
+#define RK806_POWER_DISCHRG_EN0 0x9
+#define RK806_POWER_DISCHRG_EN1 0xA
+#define RK806_POWER_DISCHRG_EN2 0xB
+#define RK806_BUCK_FB_CONFIG 0xC
+#define RK806_SLP_LP_CONFIG 0xD
+#define RK806_POWER_FPWM_EN0 0xE
+#define RK806_POWER_FPWM_EN1 0xF
+#define RK806_BUCK1_CONFIG 0x10
+#define RK806_BUCK2_CONFIG 0x11
+#define RK806_BUCK3_CONFIG 0x12
+#define RK806_BUCK4_CONFIG 0x13
+#define RK806_BUCK5_CONFIG 0x14
+#define RK806_BUCK6_CONFIG 0x15
+#define RK806_BUCK7_CONFIG 0x16
+#define RK806_BUCK8_CONFIG 0x17
+#define RK806_BUCK9_CONFIG 0x18
+#define RK806_BUCK10_CONFIG 0x19
+#define RK806_BUCK1_ON_VSEL 0x1A
+#define RK806_BUCK2_ON_VSEL 0x1B
+#define RK806_BUCK3_ON_VSEL 0x1C
+#define RK806_BUCK4_ON_VSEL 0x1D
+#define RK806_BUCK5_ON_VSEL 0x1E
+#define RK806_BUCK6_ON_VSEL 0x1F
+#define RK806_BUCK7_ON_VSEL 0x20
+#define RK806_BUCK8_ON_VSEL 0x21
+#define RK806_BUCK9_ON_VSEL 0x22
+#define RK806_BUCK10_ON_VSEL 0x23
+#define RK806_BUCK1_SLP_VSEL 0x24
+#define RK806_BUCK2_SLP_VSEL 0x25
+#define RK806_BUCK3_SLP_VSEL 0x26
+#define RK806_BUCK4_SLP_VSEL 0x27
+#define RK806_BUCK5_SLP_VSEL 0x28
+#define RK806_BUCK6_SLP_VSEL 0x29
+#define RK806_BUCK7_SLP_VSEL 0x2A
+#define RK806_BUCK8_SLP_VSEL 0x2B
+#define RK806_BUCK9_SLP_VSEL 0x2D
+#define RK806_BUCK10_SLP_VSEL 0x2E
+#define RK806_BUCK_DEBUG1 0x30
+#define RK806_BUCK_DEBUG2 0x31
+#define RK806_BUCK_DEBUG3 0x32
+#define RK806_BUCK_DEBUG4 0x33
+#define RK806_BUCK_DEBUG5 0x34
+#define RK806_BUCK_DEBUG6 0x35
+#define RK806_BUCK_DEBUG7 0x36
+#define RK806_BUCK_DEBUG8 0x37
+#define RK806_BUCK_DEBUG9 0x38
+#define RK806_BUCK_DEBUG10 0x39
+#define RK806_BUCK_DEBUG11 0x3A
+#define RK806_BUCK_DEBUG12 0x3B
+#define RK806_BUCK_DEBUG13 0x3C
+#define RK806_BUCK_DEBUG14 0x3D
+#define RK806_BUCK_DEBUG15 0x3E
+#define RK806_BUCK_DEBUG16 0x3F
+#define RK806_BUCK_DEBUG17 0x40
+#define RK806_BUCK_DEBUG18 0x41
+#define RK806_NLDO_IMAX 0x42
+#define RK806_NLDO1_ON_VSEL 0x43
+#define RK806_NLDO2_ON_VSEL 0x44
+#define RK806_NLDO3_ON_VSEL 0x45
+#define RK806_NLDO4_ON_VSEL 0x46
+#define RK806_NLDO5_ON_VSEL 0x47
+#define RK806_NLDO1_SLP_VSEL 0x48
+#define RK806_NLDO2_SLP_VSEL 0x49
+#define RK806_NLDO3_SLP_VSEL 0x4A
+#define RK806_NLDO4_SLP_VSEL 0x4B
+#define RK806_NLDO5_SLP_VSEL 0x4C
+#define RK806_PLDO_IMAX 0x4D
+#define RK806_PLDO1_ON_VSEL 0x4E
+#define RK806_PLDO2_ON_VSEL 0x4F
+#define RK806_PLDO3_ON_VSEL 0x50
+#define RK806_PLDO4_ON_VSEL 0x51
+#define RK806_PLDO5_ON_VSEL 0x52
+#define RK806_PLDO6_ON_VSEL 0x53
+#define RK806_PLDO1_SLP_VSEL 0x54
+#define RK806_PLDO2_SLP_VSEL 0x55
+#define RK806_PLDO3_SLP_VSEL 0x56
+#define RK806_PLDO4_SLP_VSEL 0x57
+#define RK806_PLDO5_SLP_VSEL 0x58
+#define RK806_PLDO6_SLP_VSEL 0x59
+#define RK806_CHIP_NAME 0x5A
+#define RK806_CHIP_VER 0x5B
+#define RK806_OTP_VER 0x5C
+#define RK806_SYS_STS 0x5D
+#define RK806_SYS_CFG0 0x5E
+#define RK806_SYS_CFG1 0x5F
+#define RK806_SYS_OPTION 0x61
+#define RK806_SLEEP_CONFIG0 0x62
+#define RK806_SLEEP_CONFIG1 0x63
+#define RK806_SLEEP_CTR_SEL0 0x64
+#define RK806_SLEEP_CTR_SEL1 0x65
+#define RK806_SLEEP_CTR_SEL2 0x66
+#define RK806_SLEEP_CTR_SEL3 0x67
+#define RK806_SLEEP_CTR_SEL4 0x68
+#define RK806_SLEEP_CTR_SEL5 0x69
+#define RK806_DVS_CTRL_SEL0 0x6A
+#define RK806_DVS_CTRL_SEL1 0x6B
+#define RK806_DVS_CTRL_SEL2 0x6C
+#define RK806_DVS_CTRL_SEL3 0x6D
+#define RK806_DVS_CTRL_SEL4 0x6E
+#define RK806_DVS_CTRL_SEL5 0x6F
+#define RK806_DVS_START_CTRL 0x70
+#define RK806_SLEEP_GPIO 0x71
+#define RK806_SYS_CFG3 0x72
+#define RK806_ON_SOURCE 0x74
+#define RK806_OFF_SOURCE 0x75
+#define RK806_PWRON_KEY 0x76
+#define RK806_INT_STS0 0x77
+#define RK806_INT_MSK0 0x78
+#define RK806_INT_STS1 0x79
+#define RK806_INT_MSK1 0x7A
+#define RK806_GPIO_INT_CONFIG 0x7B
+#define RK806_DATA_REG0 0x7C
+#define RK806_DATA_REG1 0x7D
+#define RK806_DATA_REG2 0x7E
+#define RK806_DATA_REG3 0x7F
+#define RK806_DATA_REG4 0x80
+#define RK806_DATA_REG5 0x81
+#define RK806_DATA_REG6 0x82
+#define RK806_DATA_REG7 0x83
+#define RK806_DATA_REG8 0x84
+#define RK806_DATA_REG9 0x85
+#define RK806_DATA_REG10 0x86
+#define RK806_DATA_REG11 0x87
+#define RK806_DATA_REG12 0x88
+#define RK806_DATA_REG13 0x89
+#define RK806_DATA_REG14 0x8A
+#define RK806_DATA_REG15 0x8B
+#define RK806_TM_REG 0x8C
+#define RK806_OTP_EN_REG 0x8D
+#define RK806_FUNC_OTP_EN_REG 0x8E
+#define RK806_TEST_REG1 0x8F
+#define RK806_TEST_REG2 0x90
+#define RK806_TEST_REG3 0x91
+#define RK806_TEST_REG4 0x92
+#define RK806_TEST_REG5 0x93
+#define RK806_BUCK_VSEL_OTP_REG0 0x94
+#define RK806_BUCK_VSEL_OTP_REG1 0x95
+#define RK806_BUCK_VSEL_OTP_REG2 0x96
+#define RK806_BUCK_VSEL_OTP_REG3 0x97
+#define RK806_BUCK_VSEL_OTP_REG4 0x98
+#define RK806_BUCK_VSEL_OTP_REG5 0x99
+#define RK806_BUCK_VSEL_OTP_REG6 0x9A
+#define RK806_BUCK_VSEL_OTP_REG7 0x9B
+#define RK806_BUCK_VSEL_OTP_REG8 0x9C
+#define RK806_BUCK_VSEL_OTP_REG9 0x9D
+#define RK806_NLDO1_VSEL_OTP_REG0 0x9E
+#define RK806_NLDO1_VSEL_OTP_REG1 0x9F
+#define RK806_NLDO1_VSEL_OTP_REG2 0xA0
+#define RK806_NLDO1_VSEL_OTP_REG3 0xA1
+#define RK806_NLDO1_VSEL_OTP_REG4 0xA2
+#define RK806_PLDO_VSEL_OTP_REG0 0xA3
+#define RK806_PLDO_VSEL_OTP_REG1 0xA4
+#define RK806_PLDO_VSEL_OTP_REG2 0xA5
+#define RK806_PLDO_VSEL_OTP_REG3 0xA6
+#define RK806_PLDO_VSEL_OTP_REG4 0xA7
+#define RK806_PLDO_VSEL_OTP_REG5 0xA8
+#define RK806_BUCK_EN_OTP_REG1 0xA9
+#define RK806_NLDO_EN_OTP_REG1 0xAA
+#define RK806_PLDO_EN_OTP_REG1 0xAB
+#define RK806_BUCK_FB_RES_OTP_REG1 0xAC
+#define RK806_OTP_RESEV_REG0 0xAD
+#define RK806_OTP_RESEV_REG1 0xAE
+#define RK806_OTP_RESEV_REG2 0xAF
+#define RK806_OTP_RESEV_REG3 0xB0
+#define RK806_OTP_RESEV_REG4 0xB1
+#define RK806_BUCK_SEQ_REG0 0xB2
+#define RK806_BUCK_SEQ_REG1 0xB3
+#define RK806_BUCK_SEQ_REG2 0xB4
+#define RK806_BUCK_SEQ_REG3 0xB5
+#define RK806_BUCK_SEQ_REG4 0xB6
+#define RK806_BUCK_SEQ_REG5 0xB7
+#define RK806_BUCK_SEQ_REG6 0xB8
+#define RK806_BUCK_SEQ_REG7 0xB9
+#define RK806_BUCK_SEQ_REG8 0xBA
+#define RK806_BUCK_SEQ_REG9 0xBB
+#define RK806_BUCK_SEQ_REG10 0xBC
+#define RK806_BUCK_SEQ_REG11 0xBD
+#define RK806_BUCK_SEQ_REG12 0xBE
+#define RK806_BUCK_SEQ_REG13 0xBF
+#define RK806_BUCK_SEQ_REG14 0xC0
+#define RK806_BUCK_SEQ_REG15 0xC1
+#define RK806_BUCK_SEQ_REG16 0xC2
+#define RK806_BUCK_SEQ_REG17 0xC3
+#define RK806_HK_TRIM_REG1 0xC4
+#define RK806_HK_TRIM_REG2 0xC5
+#define RK806_BUCK_REF_TRIM_REG1 0xC6
+#define RK806_BUCK_REF_TRIM_REG2 0xC7
+#define RK806_BUCK_REF_TRIM_REG3 0xC8
+#define RK806_BUCK_REF_TRIM_REG4 0xC9
+#define RK806_BUCK_REF_TRIM_REG5 0xCA
+#define RK806_BUCK_OSC_TRIM_REG1 0xCB
+#define RK806_BUCK_OSC_TRIM_REG2 0xCC
+#define RK806_BUCK_OSC_TRIM_REG3 0xCD
+#define RK806_BUCK_OSC_TRIM_REG4 0xCE
+#define RK806_BUCK_OSC_TRIM_REG5 0xCF
+#define RK806_BUCK_TRIM_ZCDIOS_REG1 0xD0
+#define RK806_BUCK_TRIM_ZCDIOS_REG2 0xD1
+#define RK806_NLDO_TRIM_REG1 0xD2
+#define RK806_NLDO_TRIM_REG2 0xD3
+#define RK806_NLDO_TRIM_REG3 0xD4
+#define RK806_PLDO_TRIM_REG1 0xD5
+#define RK806_PLDO_TRIM_REG2 0xD6
+#define RK806_PLDO_TRIM_REG3 0xD7
+#define RK806_TRIM_ICOMP_REG1 0xD8
+#define RK806_TRIM_ICOMP_REG2 0xD9
+#define RK806_EFUSE_CONTROL_REGH 0xDA
+#define RK806_FUSE_PROG_REG 0xDB
+#define RK806_MAIN_FSM_STS_REG 0xDD
+#define RK806_FSM_REG 0xDE
+#define RK806_TOP_RESEV_OFFR 0xEC
+#define RK806_TOP_RESEV_POR 0xED
+#define RK806_BUCK_VRSN_REG1 0xEE
+#define RK806_BUCK_VRSN_REG2 0xEF
+#define RK806_NLDO_RLOAD_SEL_REG1 0xF0
+#define RK806_PLDO_RLOAD_SEL_REG1 0xF1
+#define RK806_PLDO_RLOAD_SEL_REG2 0xF2
+#define RK806_BUCK_CMIN_MX_REG1 0xF3
+#define RK806_BUCK_CMIN_MX_REG2 0xF4
+#define RK806_BUCK_FREQ_SET_REG1 0xF5
+#define RK806_BUCK_FREQ_SET_REG2 0xF6
+#define RK806_BUCK_RS_MEABS_REG1 0xF7
+#define RK806_BUCK_RS_MEABS_REG2 0xF8
+#define RK806_BUCK_RS_ZDLEB_REG1 0xF9
+#define RK806_BUCK_RS_ZDLEB_REG2 0xFA
+#define RK806_BUCK_RSERVE_REG1 0xFB
+#define RK806_BUCK_RSERVE_REG2 0xFC
+#define RK806_BUCK_RSERVE_REG3 0xFD
+#define RK806_BUCK_RSERVE_REG4 0xFE
+#define RK806_BUCK_RSERVE_REG5 0xFF
+
+/* INT_STS Register field definitions */
+#define RK806_INT_STS_PWRON_FALL BIT(0)
+#define RK806_INT_STS_PWRON_RISE BIT(1)
+#define RK806_INT_STS_PWRON BIT(2)
+#define RK806_INT_STS_PWRON_LP BIT(3)
+#define RK806_INT_STS_HOTDIE BIT(4)
+#define RK806_INT_STS_VDC_RISE BIT(5)
+#define RK806_INT_STS_VDC_FALL BIT(6)
+#define RK806_INT_STS_VB_LO BIT(7)
+#define RK806_INT_STS_REV0 BIT(0)
+#define RK806_INT_STS_REV1 BIT(1)
+#define RK806_INT_STS_REV2 BIT(2)
+#define RK806_INT_STS_CRC_ERROR BIT(3)
+#define RK806_INT_STS_SLP3_GPIO BIT(4)
+#define RK806_INT_STS_SLP2_GPIO BIT(5)
+#define RK806_INT_STS_SLP1_GPIO BIT(6)
+#define RK806_INT_STS_WDT BIT(7)
+
+/* SPI command */
+#define RK806_CMD_READ 0
+#define RK806_CMD_WRITE BIT(7)
+#define RK806_CMD_CRC_EN BIT(6)
+#define RK806_CMD_CRC_DIS 0
+#define RK806_CMD_LEN_MSK 0x0f
+#define RK806_REG_H 0x00
+
+#define VERSION_AB 0x01
+
+enum rk806_reg_id {
+ RK806_ID_DCDC1 = 0,
+ RK806_ID_DCDC2,
+ RK806_ID_DCDC3,
+ RK806_ID_DCDC4,
+ RK806_ID_DCDC5,
+ RK806_ID_DCDC6,
+ RK806_ID_DCDC7,
+ RK806_ID_DCDC8,
+ RK806_ID_DCDC9,
+ RK806_ID_DCDC10,
+
+ RK806_ID_NLDO1,
+ RK806_ID_NLDO2,
+ RK806_ID_NLDO3,
+ RK806_ID_NLDO4,
+ RK806_ID_NLDO5,
+
+ RK806_ID_PLDO1,
+ RK806_ID_PLDO2,
+ RK806_ID_PLDO3,
+ RK806_ID_PLDO4,
+ RK806_ID_PLDO5,
+ RK806_ID_PLDO6,
+ RK806_ID_END,
+};
+
+/* Define the RK806 IRQ numbers */
+enum rk806_irqs {
+ /* INT_STS0 registers */
+ RK806_IRQ_PWRON_FALL,
+ RK806_IRQ_PWRON_RISE,
+ RK806_IRQ_PWRON,
+ RK806_IRQ_PWRON_LP,
+ RK806_IRQ_HOTDIE,
+ RK806_IRQ_VDC_RISE,
+ RK806_IRQ_VDC_FALL,
+ RK806_IRQ_VB_LO,
+
+ /* INT_STS0 registers */
+ RK806_IRQ_REV0,
+ RK806_IRQ_REV1,
+ RK806_IRQ_REV2,
+ RK806_IRQ_CRC_ERROR,
+ RK806_IRQ_SLP3_GPIO,
+ RK806_IRQ_SLP2_GPIO,
+ RK806_IRQ_SLP1_GPIO,
+ RK806_IRQ_WDT,
+};
+
+/* VCC1 Low Voltage Threshold */
+enum rk806_lv_sel {
+ VB_LO_SEL_2800,
+ VB_LO_SEL_2900,
+ VB_LO_SEL_3000,
+ VB_LO_SEL_3100,
+ VB_LO_SEL_3200,
+ VB_LO_SEL_3300,
+ VB_LO_SEL_3400,
+ VB_LO_SEL_3500,
+};
+
+/* System Shutdown Voltage Select */
+enum rk806_uv_sel {
+ VB_UV_SEL_2700,
+ VB_UV_SEL_2800,
+ VB_UV_SEL_2900,
+ VB_UV_SEL_3000,
+ VB_UV_SEL_3100,
+ VB_UV_SEL_3200,
+ VB_UV_SEL_3300,
+ VB_UV_SEL_3400,
+};
+
+/* Pin Function */
+enum rk806_pwrctrl_fun {
+ PWRCTRL_NULL_FUN,
+ PWRCTRL_SLP_FUN,
+ PWRCTRL_POWOFF_FUN,
+ PWRCTRL_RST_FUN,
+ PWRCTRL_DVS_FUN,
+ PWRCTRL_GPIO_FUN,
+};
+
+/* Pin Polarity */
+enum rk806_pin_level {
+ POL_LOW,
+ POL_HIGH,
+};
+
+enum rk806_vsel_ctr_sel {
+ CTR_BY_NO_EFFECT,
+ CTR_BY_PWRCTRL1,
+ CTR_BY_PWRCTRL2,
+ CTR_BY_PWRCTRL3,
+};
+
+enum rk806_dvs_ctr_sel {
+ CTR_SEL_NO_EFFECT,
+ CTR_SEL_DVS_START1,
+ CTR_SEL_DVS_START2,
+ CTR_SEL_DVS_START3,
+};
+
+enum rk806_pin_dr_sel {
+ RK806_PIN_INPUT,
+ RK806_PIN_OUTPUT,
+};
+
+#define RK806_INT_POL_MSK BIT(1)
+#define RK806_INT_POL_H BIT(1)
+#define RK806_INT_POL_L 0
+
+/* SYS_CFG3 */
+#define RK806_RST_FUN_MSK GENMASK(7, 6)
+#define RK806_SLAVE_RESTART_FUN_MSK BIT(1)
+#define RK806_SLAVE_RESTART_FUN_EN BIT(1)
+#define RK806_SLAVE_RESTART_FUN_OFF 0
+
+#define RK806_SYS_ENB2_2M_MSK BIT(1)
+#define RK806_SYS_ENB2_2M_EN BIT(1)
+#define RK806_SYS_ENB2_2M_OFF 0
+
+enum rk806_int_fun {
+ RK806_INT_ONLY,
+ RK806_INT_ADN_WKUP,
+};
+
+enum rk806_dvs_mode {
+ RK806_DVS_NOT_SUPPORT,
+ RK806_DVS_START1,
+ RK806_DVS_START2,
+ RK806_DVS_START3,
+ RK806_DVS_PWRCTRL1,
+ RK806_DVS_PWRCTRL2,
+ RK806_DVS_PWRCTRL3,
+ RK806_DVS_START_PWRCTR1,
+ RK806_DVS_START_PWRCTR2,
+ RK806_DVS_START_PWRCTR3,
+ RK806_DVS_END,
+};
+
/* RK808 IRQ Definitions */
#define RK808_IRQ_VOUT_LO 0
#define RK808_IRQ_VB_LO 1
@@ -373,6 +925,7 @@ enum rk805_reg {
#define SWITCH2_EN BIT(6)
#define SWITCH1_EN BIT(5)
#define DEV_OFF_RST BIT(3)
+#define DEV_RST BIT(2)
#define DEV_OFF BIT(0)
#define RTC_STOP BIT(0)
@@ -382,6 +935,7 @@ enum rk805_reg {
#define VOUT_LO_INT BIT(0)
#define CLK32KOUT2_EN BIT(0)
+#define TEMP105C 0x08
#define TEMP115C 0x0c
#define TEMP_HOTDIE_MSK 0x0c
#define SLP_SD_MSK (0x3 << 2)
@@ -437,6 +991,158 @@ enum rk809_reg_id {
#define RK817_RTC_COMP_LSB_REG 0x10
#define RK817_RTC_COMP_MSB_REG 0x11
+/* RK817 Codec Registers */
+#define RK817_CODEC_DTOP_VUCTL 0x12
+#define RK817_CODEC_DTOP_VUCTIME 0x13
+#define RK817_CODEC_DTOP_LPT_SRST 0x14
+#define RK817_CODEC_DTOP_DIGEN_CLKE 0x15
+#define RK817_CODEC_AREF_RTCFG0 0x16
+#define RK817_CODEC_AREF_RTCFG1 0x17
+#define RK817_CODEC_AADC_CFG0 0x18
+#define RK817_CODEC_AADC_CFG1 0x19
+#define RK817_CODEC_DADC_VOLL 0x1a
+#define RK817_CODEC_DADC_VOLR 0x1b
+#define RK817_CODEC_DADC_SR_ACL0 0x1e
+#define RK817_CODEC_DADC_ALC1 0x1f
+#define RK817_CODEC_DADC_ALC2 0x20
+#define RK817_CODEC_DADC_NG 0x21
+#define RK817_CODEC_DADC_HPF 0x22
+#define RK817_CODEC_DADC_RVOLL 0x23
+#define RK817_CODEC_DADC_RVOLR 0x24
+#define RK817_CODEC_AMIC_CFG0 0x27
+#define RK817_CODEC_AMIC_CFG1 0x28
+#define RK817_CODEC_DMIC_PGA_GAIN 0x29
+#define RK817_CODEC_DMIC_LMT1 0x2a
+#define RK817_CODEC_DMIC_LMT2 0x2b
+#define RK817_CODEC_DMIC_NG1 0x2c
+#define RK817_CODEC_DMIC_NG2 0x2d
+#define RK817_CODEC_ADAC_CFG0 0x2e
+#define RK817_CODEC_ADAC_CFG1 0x2f
+#define RK817_CODEC_DDAC_POPD_DACST 0x30
+#define RK817_CODEC_DDAC_VOLL 0x31
+#define RK817_CODEC_DDAC_VOLR 0x32
+#define RK817_CODEC_DDAC_SR_LMT0 0x35
+#define RK817_CODEC_DDAC_LMT1 0x36
+#define RK817_CODEC_DDAC_LMT2 0x37
+#define RK817_CODEC_DDAC_MUTE_MIXCTL 0x38
+#define RK817_CODEC_DDAC_RVOLL 0x39
+#define RK817_CODEC_DDAC_RVOLR 0x3a
+#define RK817_CODEC_AHP_ANTI0 0x3b
+#define RK817_CODEC_AHP_ANTI1 0x3c
+#define RK817_CODEC_AHP_CFG0 0x3d
+#define RK817_CODEC_AHP_CFG1 0x3e
+#define RK817_CODEC_AHP_CP 0x3f
+#define RK817_CODEC_ACLASSD_CFG1 0x40
+#define RK817_CODEC_ACLASSD_CFG2 0x41
+#define RK817_CODEC_APLL_CFG0 0x42
+#define RK817_CODEC_APLL_CFG1 0x43
+#define RK817_CODEC_APLL_CFG2 0x44
+#define RK817_CODEC_APLL_CFG3 0x45
+#define RK817_CODEC_APLL_CFG4 0x46
+#define RK817_CODEC_APLL_CFG5 0x47
+#define RK817_CODEC_DI2S_CKM 0x48
+#define RK817_CODEC_DI2S_RSD 0x49
+#define RK817_CODEC_DI2S_RXCR1 0x4a
+#define RK817_CODEC_DI2S_RXCR2 0x4b
+#define RK817_CODEC_DI2S_RXCMD_TSD 0x4c
+#define RK817_CODEC_DI2S_TXCR1 0x4d
+#define RK817_CODEC_DI2S_TXCR2 0x4e
+#define RK817_CODEC_DI2S_TXCR3_TXCMD 0x4f
+
+/* RK817_CODEC_DI2S_CKM */
+#define RK817_I2S_MODE_MASK (0x1 << 0)
+#define RK817_I2S_MODE_MST (0x1 << 0)
+#define RK817_I2S_MODE_SLV (0x0 << 0)
+
+/* RK817_CODEC_DDAC_MUTE_MIXCTL */
+#define DACMT_MASK (0x1 << 0)
+#define DACMT_ENABLE (0x1 << 0)
+#define DACMT_DISABLE (0x0 << 0)
+
+/* RK817_CODEC_DI2S_RXCR2 */
+#define VDW_RX_24BITS (0x17)
+#define VDW_RX_16BITS (0x0f)
+
+/* RK817_CODEC_DI2S_TXCR2 */
+#define VDW_TX_24BITS (0x17)
+#define VDW_TX_16BITS (0x0f)
+
+/* RK817_CODEC_AMIC_CFG0 */
+#define MIC_DIFF_MASK (0x1 << 7)
+#define MIC_DIFF_DIS (0x0 << 7)
+#define MIC_DIFF_EN (0x1 << 7)
+
+/* RK817 Battery Registers */
+#define RK817_GAS_GAUGE_ADC_CONFIG0 0x50
+#define RK817_GG_EN (0x1 << 7)
+#define RK817_SYS_VOL_ADC_EN (0x1 << 6)
+#define RK817_TS_ADC_EN (0x1 << 5)
+#define RK817_USB_VOL_ADC_EN (0x1 << 4)
+#define RK817_BAT_VOL_ADC_EN (0x1 << 3)
+#define RK817_BAT_CUR_ADC_EN (0x1 << 2)
+
+#define RK817_GAS_GAUGE_ADC_CONFIG1 0x55
+
+#define RK817_VOL_CUR_CALIB_UPD BIT(7)
+
+#define RK817_GAS_GAUGE_GG_CON 0x56
+#define RK817_GAS_GAUGE_GG_STS 0x57
+
+#define RK817_BAT_CON (0x1 << 4)
+#define RK817_RELAX_VOL_UPD (0x3 << 2)
+#define RK817_RELAX_STS (0x1 << 1)
+
+#define RK817_GAS_GAUGE_RELAX_THRE_H 0x58
+#define RK817_GAS_GAUGE_RELAX_THRE_L 0x59
+#define RK817_GAS_GAUGE_OCV_THRE_VOL 0x62
+#define RK817_GAS_GAUGE_OCV_VOL_H 0x63
+#define RK817_GAS_GAUGE_OCV_VOL_L 0x64
+#define RK817_GAS_GAUGE_PWRON_VOL_H 0x6b
+#define RK817_GAS_GAUGE_PWRON_VOL_L 0x6c
+#define RK817_GAS_GAUGE_PWRON_CUR_H 0x6d
+#define RK817_GAS_GAUGE_PWRON_CUR_L 0x6e
+#define RK817_GAS_GAUGE_OFF_CNT 0x6f
+#define RK817_GAS_GAUGE_Q_INIT_H3 0x70
+#define RK817_GAS_GAUGE_Q_INIT_H2 0x71
+#define RK817_GAS_GAUGE_Q_INIT_L1 0x72
+#define RK817_GAS_GAUGE_Q_INIT_L0 0x73
+#define RK817_GAS_GAUGE_Q_PRES_H3 0x74
+#define RK817_GAS_GAUGE_Q_PRES_H2 0x75
+#define RK817_GAS_GAUGE_Q_PRES_L1 0x76
+#define RK817_GAS_GAUGE_Q_PRES_L0 0x77
+#define RK817_GAS_GAUGE_BAT_VOL_H 0x78
+#define RK817_GAS_GAUGE_BAT_VOL_L 0x79
+#define RK817_GAS_GAUGE_BAT_CUR_H 0x7a
+#define RK817_GAS_GAUGE_BAT_CUR_L 0x7b
+#define RK817_GAS_GAUGE_USB_VOL_H 0x7e
+#define RK817_GAS_GAUGE_USB_VOL_L 0x7f
+#define RK817_GAS_GAUGE_SYS_VOL_H 0x80
+#define RK817_GAS_GAUGE_SYS_VOL_L 0x81
+#define RK817_GAS_GAUGE_Q_MAX_H3 0x82
+#define RK817_GAS_GAUGE_Q_MAX_H2 0x83
+#define RK817_GAS_GAUGE_Q_MAX_L1 0x84
+#define RK817_GAS_GAUGE_Q_MAX_L0 0x85
+#define RK817_GAS_GAUGE_SLEEP_CON_SAMP_CUR_H 0x8f
+#define RK817_GAS_GAUGE_SLEEP_CON_SAMP_CUR_L 0x90
+#define RK817_GAS_GAUGE_CAL_OFFSET_H 0x91
+#define RK817_GAS_GAUGE_CAL_OFFSET_L 0x92
+#define RK817_GAS_GAUGE_VCALIB0_H 0x93
+#define RK817_GAS_GAUGE_VCALIB0_L 0x94
+#define RK817_GAS_GAUGE_VCALIB1_H 0x95
+#define RK817_GAS_GAUGE_VCALIB1_L 0x96
+#define RK817_GAS_GAUGE_IOFFSET_H 0x97
+#define RK817_GAS_GAUGE_IOFFSET_L 0x98
+#define RK817_GAS_GAUGE_BAT_R1 0x9a
+#define RK817_GAS_GAUGE_BAT_R2 0x9b
+#define RK817_GAS_GAUGE_BAT_R3 0x9c
+#define RK817_GAS_GAUGE_DATA0 0x9d
+#define RK817_GAS_GAUGE_DATA1 0x9e
+#define RK817_GAS_GAUGE_DATA2 0x9f
+#define RK817_GAS_GAUGE_DATA3 0xa0
+#define RK817_GAS_GAUGE_DATA4 0xa1
+#define RK817_GAS_GAUGE_DATA5 0xa2
+#define RK817_GAS_GAUGE_CUR_ADC_K0 0xb0
+
#define RK817_POWER_EN_REG(i) (0xb1 + (i))
#define RK817_POWER_SLP_EN_REG(i) (0xb5 + (i))
@@ -462,10 +1168,30 @@ enum rk809_reg_id {
#define RK817_LDO_ON_VSEL_REG(idx) (0xcc + (idx) * 2)
#define RK817_BOOST_OTG_CFG (0xde)
+#define RK817_PMIC_CHRG_OUT 0xe4
+#define RK817_CHRG_VOL_SEL (0x07 << 4)
+#define RK817_CHRG_CUR_SEL (0x07 << 0)
+
+#define RK817_PMIC_CHRG_IN 0xe5
+#define RK817_USB_VLIM_EN (0x01 << 7)
+#define RK817_USB_VLIM_SEL (0x07 << 4)
+#define RK817_USB_ILIM_EN (0x01 << 3)
+#define RK817_USB_ILIM_SEL (0x07 << 0)
+#define RK817_PMIC_CHRG_TERM 0xe6
+#define RK817_CHRG_TERM_ANA_DIG (0x01 << 2)
+#define RK817_CHRG_TERM_ANA_SEL (0x03 << 0)
+#define RK817_CHRG_EN (0x01 << 6)
+
+#define RK817_PMIC_CHRG_STS 0xeb
+#define RK817_BAT_EXS BIT(7)
+#define RK817_CHG_STS (0x07 << 4)
+
#define RK817_ID_MSB 0xed
#define RK817_ID_LSB 0xee
#define RK817_SYS_STS 0xf0
+#define RK817_PLUG_IN_STS (0x1 << 6)
+
#define RK817_SYS_CFG(i) (0xf1 + (i))
#define RK817_ON_SOURCE_REG 0xf5
@@ -607,18 +1333,26 @@ enum {
enum {
RK805_ID = 0x8050,
+ RK806_ID = 0x8060,
RK808_ID = 0x0000,
RK809_ID = 0x8090,
+ RK816_ID = 0x8160,
RK817_ID = 0x8170,
RK818_ID = 0x8180,
};
struct rk808 {
- struct i2c_client *i2c;
+ struct device *dev;
struct regmap_irq_chip_data *irq_data;
struct regmap *regmap;
long variant;
const struct regmap_config *regmap_cfg;
const struct regmap_irq_chip *regmap_irq_chip;
};
+
+void rk8xx_shutdown(struct device *dev);
+int rk8xx_probe(struct device *dev, int variant, unsigned int irq, struct regmap *regmap);
+int rk8xx_suspend(struct device *dev);
+int rk8xx_resume(struct device *dev);
+
#endif /* __LINUX_REGULATOR_RK808_H */
diff --git a/include/linux/mfd/rn5t618.h b/include/linux/mfd/rn5t618.h
index 8aa0bda1af4f..aacb6d51e99c 100644
--- a/include/linux/mfd/rn5t618.h
+++ b/include/linux/mfd/rn5t618.h
@@ -227,6 +227,15 @@
#define RN5T618_WATCHDOG_WDOGTIM_S 0
#define RN5T618_PWRIRQ_IR_WDOG BIT(6)
+#define RN5T618_POFFHIS_PWRON BIT(0)
+#define RN5T618_POFFHIS_TSHUT BIT(1)
+#define RN5T618_POFFHIS_VINDET BIT(2)
+#define RN5T618_POFFHIS_IODET BIT(3)
+#define RN5T618_POFFHIS_CPU BIT(4)
+#define RN5T618_POFFHIS_WDG BIT(5)
+#define RN5T618_POFFHIS_DCLIM BIT(6)
+#define RN5T618_POFFHIS_N_OE BIT(7)
+
enum {
RN5T618_DCDC1,
RN5T618_DCDC2,
diff --git a/include/linux/mfd/rohm-bd70528.h b/include/linux/mfd/rohm-bd70528.h
deleted file mode 100644
index a57af878fd0c..000000000000
--- a/include/linux/mfd/rohm-bd70528.h
+++ /dev/null
@@ -1,391 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/* Copyright (C) 2018 ROHM Semiconductors */
-
-#ifndef __LINUX_MFD_BD70528_H__
-#define __LINUX_MFD_BD70528_H__
-
-#include <linux/bits.h>
-#include <linux/device.h>
-#include <linux/mfd/rohm-generic.h>
-#include <linux/mfd/rohm-shared.h>
-#include <linux/regmap.h>
-
-enum {
- BD70528_BUCK1,
- BD70528_BUCK2,
- BD70528_BUCK3,
- BD70528_LDO1,
- BD70528_LDO2,
- BD70528_LDO3,
- BD70528_LED1,
- BD70528_LED2,
-};
-
-struct bd70528_data {
- struct rohm_regmap_dev chip;
- struct mutex rtc_timer_lock;
-};
-
-#define BD70528_BUCK_VOLTS 17
-#define BD70528_BUCK_VOLTS 17
-#define BD70528_BUCK_VOLTS 17
-#define BD70528_LDO_VOLTS 0x20
-
-#define BD70528_REG_BUCK1_EN 0x0F
-#define BD70528_REG_BUCK1_VOLT 0x15
-#define BD70528_REG_BUCK2_EN 0x10
-#define BD70528_REG_BUCK2_VOLT 0x16
-#define BD70528_REG_BUCK3_EN 0x11
-#define BD70528_REG_BUCK3_VOLT 0x17
-#define BD70528_REG_LDO1_EN 0x1b
-#define BD70528_REG_LDO1_VOLT 0x1e
-#define BD70528_REG_LDO2_EN 0x1c
-#define BD70528_REG_LDO2_VOLT 0x1f
-#define BD70528_REG_LDO3_EN 0x1d
-#define BD70528_REG_LDO3_VOLT 0x20
-#define BD70528_REG_LED_CTRL 0x2b
-#define BD70528_REG_LED_VOLT 0x29
-#define BD70528_REG_LED_EN 0x2a
-
-/* main irq registers */
-#define BD70528_REG_INT_MAIN 0x7E
-#define BD70528_REG_INT_MAIN_MASK 0x74
-
-/* 'sub irq' registers */
-#define BD70528_REG_INT_SHDN 0x7F
-#define BD70528_REG_INT_PWR_FLT 0x80
-#define BD70528_REG_INT_VR_FLT 0x81
-#define BD70528_REG_INT_MISC 0x82
-#define BD70528_REG_INT_BAT1 0x83
-#define BD70528_REG_INT_BAT2 0x84
-#define BD70528_REG_INT_RTC 0x85
-#define BD70528_REG_INT_GPIO 0x86
-#define BD70528_REG_INT_OP_FAIL 0x87
-
-#define BD70528_REG_INT_SHDN_MASK 0x75
-#define BD70528_REG_INT_PWR_FLT_MASK 0x76
-#define BD70528_REG_INT_VR_FLT_MASK 0x77
-#define BD70528_REG_INT_MISC_MASK 0x78
-#define BD70528_REG_INT_BAT1_MASK 0x79
-#define BD70528_REG_INT_BAT2_MASK 0x7a
-#define BD70528_REG_INT_RTC_MASK 0x7b
-#define BD70528_REG_INT_GPIO_MASK 0x7c
-#define BD70528_REG_INT_OP_FAIL_MASK 0x7d
-
-/* Reset related 'magic' registers */
-#define BD70528_REG_SHIPMODE 0x03
-#define BD70528_REG_HWRESET 0x04
-#define BD70528_REG_WARMRESET 0x05
-#define BD70528_REG_STANDBY 0x06
-
-/* GPIO registers */
-#define BD70528_REG_GPIO_STATE 0x8F
-
-#define BD70528_REG_GPIO1_IN 0x4d
-#define BD70528_REG_GPIO2_IN 0x4f
-#define BD70528_REG_GPIO3_IN 0x51
-#define BD70528_REG_GPIO4_IN 0x53
-#define BD70528_REG_GPIO1_OUT 0x4e
-#define BD70528_REG_GPIO2_OUT 0x50
-#define BD70528_REG_GPIO3_OUT 0x52
-#define BD70528_REG_GPIO4_OUT 0x54
-
-/* RTC */
-
-#define BD70528_REG_RTC_COUNT_H 0x2d
-#define BD70528_REG_RTC_COUNT_L 0x2e
-#define BD70528_REG_RTC_SEC 0x2f
-#define BD70528_REG_RTC_MINUTE 0x30
-#define BD70528_REG_RTC_HOUR 0x31
-#define BD70528_REG_RTC_WEEK 0x32
-#define BD70528_REG_RTC_DAY 0x33
-#define BD70528_REG_RTC_MONTH 0x34
-#define BD70528_REG_RTC_YEAR 0x35
-
-#define BD70528_REG_RTC_ALM_SEC 0x36
-#define BD70528_REG_RTC_ALM_START BD70528_REG_RTC_ALM_SEC
-#define BD70528_REG_RTC_ALM_MINUTE 0x37
-#define BD70528_REG_RTC_ALM_HOUR 0x38
-#define BD70528_REG_RTC_ALM_WEEK 0x39
-#define BD70528_REG_RTC_ALM_DAY 0x3a
-#define BD70528_REG_RTC_ALM_MONTH 0x3b
-#define BD70528_REG_RTC_ALM_YEAR 0x3c
-#define BD70528_REG_RTC_ALM_MASK 0x3d
-#define BD70528_REG_RTC_ALM_REPEAT 0x3e
-#define BD70528_REG_RTC_START BD70528_REG_RTC_SEC
-
-#define BD70528_REG_RTC_WAKE_SEC 0x43
-#define BD70528_REG_RTC_WAKE_START BD70528_REG_RTC_WAKE_SEC
-#define BD70528_REG_RTC_WAKE_MIN 0x44
-#define BD70528_REG_RTC_WAKE_HOUR 0x45
-#define BD70528_REG_RTC_WAKE_CTRL 0x46
-
-#define BD70528_REG_ELAPSED_TIMER_EN 0x42
-#define BD70528_REG_WAKE_EN 0x46
-
-/* WDT registers */
-#define BD70528_REG_WDT_CTRL 0x4A
-#define BD70528_REG_WDT_HOUR 0x49
-#define BD70528_REG_WDT_MINUTE 0x48
-#define BD70528_REG_WDT_SEC 0x47
-
-/* Charger / Battery */
-#define BD70528_REG_CHG_CURR_STAT 0x59
-#define BD70528_REG_CHG_BAT_STAT 0x57
-#define BD70528_REG_CHG_BAT_TEMP 0x58
-#define BD70528_REG_CHG_IN_STAT 0x56
-#define BD70528_REG_CHG_DCIN_ILIM 0x5d
-#define BD70528_REG_CHG_CHG_CURR_WARM 0x61
-#define BD70528_REG_CHG_CHG_CURR_COLD 0x62
-
-/* Masks for main IRQ register bits */
-enum {
- BD70528_INT_SHDN,
-#define BD70528_INT_SHDN_MASK BIT(BD70528_INT_SHDN)
- BD70528_INT_PWR_FLT,
-#define BD70528_INT_PWR_FLT_MASK BIT(BD70528_INT_PWR_FLT)
- BD70528_INT_VR_FLT,
-#define BD70528_INT_VR_FLT_MASK BIT(BD70528_INT_VR_FLT)
- BD70528_INT_MISC,
-#define BD70528_INT_MISC_MASK BIT(BD70528_INT_MISC)
- BD70528_INT_BAT1,
-#define BD70528_INT_BAT1_MASK BIT(BD70528_INT_BAT1)
- BD70528_INT_RTC,
-#define BD70528_INT_RTC_MASK BIT(BD70528_INT_RTC)
- BD70528_INT_GPIO,
-#define BD70528_INT_GPIO_MASK BIT(BD70528_INT_GPIO)
- BD70528_INT_OP_FAIL,
-#define BD70528_INT_OP_FAIL_MASK BIT(BD70528_INT_OP_FAIL)
-};
-
-/* IRQs */
-enum {
- /* Shutdown register IRQs */
- BD70528_INT_LONGPUSH,
- BD70528_INT_WDT,
- BD70528_INT_HWRESET,
- BD70528_INT_RSTB_FAULT,
- BD70528_INT_VBAT_UVLO,
- BD70528_INT_TSD,
- BD70528_INT_RSTIN,
- /* Power failure register IRQs */
- BD70528_INT_BUCK1_FAULT,
- BD70528_INT_BUCK2_FAULT,
- BD70528_INT_BUCK3_FAULT,
- BD70528_INT_LDO1_FAULT,
- BD70528_INT_LDO2_FAULT,
- BD70528_INT_LDO3_FAULT,
- BD70528_INT_LED1_FAULT,
- BD70528_INT_LED2_FAULT,
- /* VR FAULT register IRQs */
- BD70528_INT_BUCK1_OCP,
- BD70528_INT_BUCK2_OCP,
- BD70528_INT_BUCK3_OCP,
- BD70528_INT_LED1_OCP,
- BD70528_INT_LED2_OCP,
- BD70528_INT_BUCK1_FULLON,
- BD70528_INT_BUCK2_FULLON,
- /* PMU register interrupts */
- BD70528_INT_SHORTPUSH,
- BD70528_INT_AUTO_WAKEUP,
- BD70528_INT_STATE_CHANGE,
- /* Charger 1 register IRQs */
- BD70528_INT_BAT_OV_RES,
- BD70528_INT_BAT_OV_DET,
- BD70528_INT_DBAT_DET,
- BD70528_INT_BATTSD_COLD_RES,
- BD70528_INT_BATTSD_COLD_DET,
- BD70528_INT_BATTSD_HOT_RES,
- BD70528_INT_BATTSD_HOT_DET,
- BD70528_INT_CHG_TSD,
- /* Charger 2 register IRQs */
- BD70528_INT_BAT_RMV,
- BD70528_INT_BAT_DET,
- BD70528_INT_DCIN2_OV_RES,
- BD70528_INT_DCIN2_OV_DET,
- BD70528_INT_DCIN2_RMV,
- BD70528_INT_DCIN2_DET,
- BD70528_INT_DCIN1_RMV,
- BD70528_INT_DCIN1_DET,
- /* RTC register IRQs */
- BD70528_INT_RTC_ALARM,
- BD70528_INT_ELPS_TIM,
- /* GPIO register IRQs */
- BD70528_INT_GPIO0,
- BD70528_INT_GPIO1,
- BD70528_INT_GPIO2,
- BD70528_INT_GPIO3,
- /* Invalid operation register IRQs */
- BD70528_INT_BUCK1_DVS_OPFAIL,
- BD70528_INT_BUCK2_DVS_OPFAIL,
- BD70528_INT_BUCK3_DVS_OPFAIL,
- BD70528_INT_LED1_VOLT_OPFAIL,
- BD70528_INT_LED2_VOLT_OPFAIL,
-};
-
-/* Masks */
-#define BD70528_INT_LONGPUSH_MASK 0x1
-#define BD70528_INT_WDT_MASK 0x2
-#define BD70528_INT_HWRESET_MASK 0x4
-#define BD70528_INT_RSTB_FAULT_MASK 0x8
-#define BD70528_INT_VBAT_UVLO_MASK 0x10
-#define BD70528_INT_TSD_MASK 0x20
-#define BD70528_INT_RSTIN_MASK 0x40
-
-#define BD70528_INT_BUCK1_FAULT_MASK 0x1
-#define BD70528_INT_BUCK2_FAULT_MASK 0x2
-#define BD70528_INT_BUCK3_FAULT_MASK 0x4
-#define BD70528_INT_LDO1_FAULT_MASK 0x8
-#define BD70528_INT_LDO2_FAULT_MASK 0x10
-#define BD70528_INT_LDO3_FAULT_MASK 0x20
-#define BD70528_INT_LED1_FAULT_MASK 0x40
-#define BD70528_INT_LED2_FAULT_MASK 0x80
-
-#define BD70528_INT_BUCK1_OCP_MASK 0x1
-#define BD70528_INT_BUCK2_OCP_MASK 0x2
-#define BD70528_INT_BUCK3_OCP_MASK 0x4
-#define BD70528_INT_LED1_OCP_MASK 0x8
-#define BD70528_INT_LED2_OCP_MASK 0x10
-#define BD70528_INT_BUCK1_FULLON_MASK 0x20
-#define BD70528_INT_BUCK2_FULLON_MASK 0x40
-
-#define BD70528_INT_SHORTPUSH_MASK 0x1
-#define BD70528_INT_AUTO_WAKEUP_MASK 0x2
-#define BD70528_INT_STATE_CHANGE_MASK 0x10
-
-#define BD70528_INT_BAT_OV_RES_MASK 0x1
-#define BD70528_INT_BAT_OV_DET_MASK 0x2
-#define BD70528_INT_DBAT_DET_MASK 0x4
-#define BD70528_INT_BATTSD_COLD_RES_MASK 0x8
-#define BD70528_INT_BATTSD_COLD_DET_MASK 0x10
-#define BD70528_INT_BATTSD_HOT_RES_MASK 0x20
-#define BD70528_INT_BATTSD_HOT_DET_MASK 0x40
-#define BD70528_INT_CHG_TSD_MASK 0x80
-
-#define BD70528_INT_BAT_RMV_MASK 0x1
-#define BD70528_INT_BAT_DET_MASK 0x2
-#define BD70528_INT_DCIN2_OV_RES_MASK 0x4
-#define BD70528_INT_DCIN2_OV_DET_MASK 0x8
-#define BD70528_INT_DCIN2_RMV_MASK 0x10
-#define BD70528_INT_DCIN2_DET_MASK 0x20
-#define BD70528_INT_DCIN1_RMV_MASK 0x40
-#define BD70528_INT_DCIN1_DET_MASK 0x80
-
-#define BD70528_INT_RTC_ALARM_MASK 0x1
-#define BD70528_INT_ELPS_TIM_MASK 0x2
-
-#define BD70528_INT_GPIO0_MASK 0x1
-#define BD70528_INT_GPIO1_MASK 0x2
-#define BD70528_INT_GPIO2_MASK 0x4
-#define BD70528_INT_GPIO3_MASK 0x8
-
-#define BD70528_INT_BUCK1_DVS_OPFAIL_MASK 0x1
-#define BD70528_INT_BUCK2_DVS_OPFAIL_MASK 0x2
-#define BD70528_INT_BUCK3_DVS_OPFAIL_MASK 0x4
-#define BD70528_INT_LED1_VOLT_OPFAIL_MASK 0x10
-#define BD70528_INT_LED2_VOLT_OPFAIL_MASK 0x20
-
-#define BD70528_DEBOUNCE_MASK 0x3
-
-#define BD70528_DEBOUNCE_DISABLE 0
-#define BD70528_DEBOUNCE_15MS 1
-#define BD70528_DEBOUNCE_30MS 2
-#define BD70528_DEBOUNCE_50MS 3
-
-#define BD70528_GPIO_DRIVE_MASK 0x2
-#define BD70528_GPIO_PUSH_PULL 0x0
-#define BD70528_GPIO_OPEN_DRAIN 0x2
-
-#define BD70528_GPIO_OUT_EN_MASK 0x80
-#define BD70528_GPIO_OUT_ENABLE 0x80
-#define BD70528_GPIO_OUT_DISABLE 0x0
-
-#define BD70528_GPIO_OUT_HI 0x1
-#define BD70528_GPIO_OUT_LO 0x0
-#define BD70528_GPIO_OUT_MASK 0x1
-
-#define BD70528_GPIO_IN_STATE_BASE 1
-
-/* RTC masks to mask out reserved bits */
-
-#define BD70528_MASK_ELAPSED_TIMER_EN 0x1
-/* Mask second, min and hour fields
- * HW would support ALM irq for over 24h
- * (by setting day, month and year too)
- * but as we wish to keep this same as for
- * wake-up we limit ALM to 24H and only
- * unmask sec, min and hour
- */
-#define BD70528_MASK_WAKE_EN 0x1
-
-/* WDT masks */
-#define BD70528_MASK_WDT_EN 0x1
-#define BD70528_MASK_WDT_HOUR 0x1
-#define BD70528_MASK_WDT_MINUTE 0x7f
-#define BD70528_MASK_WDT_SEC 0x7f
-
-#define BD70528_WDT_STATE_BIT 0x1
-#define BD70528_ELAPSED_STATE_BIT 0x2
-#define BD70528_WAKE_STATE_BIT 0x4
-
-/* Charger masks */
-#define BD70528_MASK_CHG_STAT 0x7f
-#define BD70528_MASK_CHG_BAT_TIMER 0x20
-#define BD70528_MASK_CHG_BAT_OVERVOLT 0x10
-#define BD70528_MASK_CHG_BAT_DETECT 0x1
-#define BD70528_MASK_CHG_DCIN1_UVLO 0x1
-#define BD70528_MASK_CHG_DCIN_ILIM 0x3f
-#define BD70528_MASK_CHG_CHG_CURR 0x1f
-#define BD70528_MASK_CHG_TRICKLE_CURR 0x10
-
-/*
- * Note, external battery register is the lonely rider at
- * address 0xc5. See how to stuff that in the regmap
- */
-#define BD70528_MAX_REGISTER 0x94
-
-/* Buck control masks */
-#define BD70528_MASK_RUN_EN 0x4
-#define BD70528_MASK_STBY_EN 0x2
-#define BD70528_MASK_IDLE_EN 0x1
-#define BD70528_MASK_LED1_EN 0x1
-#define BD70528_MASK_LED2_EN 0x10
-
-#define BD70528_MASK_BUCK_VOLT 0xf
-#define BD70528_MASK_LDO_VOLT 0x1f
-#define BD70528_MASK_LED1_VOLT 0x1
-#define BD70528_MASK_LED2_VOLT 0x10
-
-/* Misc irq masks */
-#define BD70528_INT_MASK_SHORT_PUSH 1
-#define BD70528_INT_MASK_AUTO_WAKE 2
-#define BD70528_INT_MASK_POWER_STATE 4
-
-#define BD70528_MASK_BUCK_RAMP 0x10
-#define BD70528_SIFT_BUCK_RAMP 4
-
-#if IS_ENABLED(CONFIG_BD70528_WATCHDOG)
-
-int bd70528_wdt_set(struct rohm_regmap_dev *data, int enable, int *old_state);
-void bd70528_wdt_lock(struct rohm_regmap_dev *data);
-void bd70528_wdt_unlock(struct rohm_regmap_dev *data);
-
-#else /* CONFIG_BD70528_WATCHDOG */
-
-static inline int bd70528_wdt_set(struct rohm_regmap_dev *data, int enable,
- int *old_state)
-{
- return 0;
-}
-
-static inline void bd70528_wdt_lock(struct rohm_regmap_dev *data)
-{
-}
-
-static inline void bd70528_wdt_unlock(struct rohm_regmap_dev *data)
-{
-}
-
-#endif /* CONFIG_BD70528_WATCHDOG */
-
-#endif /* __LINUX_MFD_BD70528_H__ */
diff --git a/include/linux/mfd/rohm-bd71828.h b/include/linux/mfd/rohm-bd71828.h
index c7ab69c87ee8..73a71ef69152 100644
--- a/include/linux/mfd/rohm-bd71828.h
+++ b/include/linux/mfd/rohm-bd71828.h
@@ -4,6 +4,7 @@
#ifndef __LINUX_MFD_BD71828_H__
#define __LINUX_MFD_BD71828_H__
+#include <linux/bits.h>
#include <linux/mfd/rohm-generic.h>
#include <linux/mfd/rohm-shared.h>
@@ -26,11 +27,11 @@ enum {
BD71828_REGULATOR_AMOUNT,
};
-#define BD71828_BUCK1267_VOLTS 0xEF
-#define BD71828_BUCK3_VOLTS 0x10
-#define BD71828_BUCK4_VOLTS 0x20
-#define BD71828_BUCK5_VOLTS 0x10
-#define BD71828_LDO_VOLTS 0x32
+#define BD71828_BUCK1267_VOLTS 0x100
+#define BD71828_BUCK3_VOLTS 0x20
+#define BD71828_BUCK4_VOLTS 0x40
+#define BD71828_BUCK5_VOLTS 0x20
+#define BD71828_LDO_VOLTS 0x40
/* LDO6 is fixed 1.8V voltage */
#define BD71828_LDO_6_VOLTAGE 1800000
@@ -41,7 +42,8 @@ enum {
#define BD71828_REG_PS_CTRL_2 0x05
#define BD71828_REG_PS_CTRL_3 0x06
-//#define BD71828_REG_SWRESET 0x06
+#define BD71828_MASK_STATE_HBNT BIT(1)
+
#define BD71828_MASK_RUN_LVL_CTRL 0x30
/* Regulator control masks */
@@ -133,7 +135,6 @@ enum {
#define BD71828_REG_LDO5_VOLT 0x43
#define BD71828_REG_LDO5_VOLT_OPT 0x42
#define BD71828_REG_LDO6_EN 0x44
-//#define BD71828_REG_LDO6_VOLT 0x4
#define BD71828_REG_LDO7_EN 0x45
#define BD71828_REG_LDO7_VOLT 0x46
@@ -188,6 +189,69 @@ enum {
/* Charger/Battey */
#define BD71828_REG_CHG_STATE 0x65
#define BD71828_REG_CHG_FULL 0xd2
+#define BD71828_REG_CHG_EN 0x6F
+#define BD71828_REG_DCIN_STAT 0x68
+#define BD71828_MASK_DCIN_DET 0x01
+#define BD71828_REG_VDCIN_U 0x9c
+#define BD71828_MASK_CHG_EN 0x01
+#define BD71828_CHG_MASK_DCIN_U 0x0f
+#define BD71828_REG_BAT_STAT 0x67
+#define BD71828_REG_BAT_TEMP 0x6c
+#define BD71828_MASK_BAT_TEMP 0x07
+#define BD71828_BAT_TEMP_OPEN 0x07
+#define BD71828_MASK_BAT_DET 0x20
+#define BD71828_MASK_BAT_DET_DONE 0x10
+#define BD71828_REG_CHG_STATE 0x65
+#define BD71828_REG_VBAT_U 0x8c
+#define BD71828_MASK_VBAT_U 0x0f
+#define BD71828_REG_VBAT_REX_AVG_U 0x92
+
+#define BD71828_REG_OCV_PWRON_U 0x8A
+
+#define BD71828_REG_VBAT_MIN_AVG_U 0x8e
+#define BD71828_REG_VBAT_MIN_AVG_L 0x8f
+
+#define BD71828_REG_CC_CNT3 0xb5
+#define BD71828_REG_CC_CNT2 0xb6
+#define BD71828_REG_CC_CNT1 0xb7
+#define BD71828_REG_CC_CNT0 0xb8
+#define BD71828_REG_CC_CURCD_AVG_U 0xb2
+#define BD71828_MASK_CC_CURCD_AVG_U 0x3f
+#define BD71828_MASK_CC_CUR_DIR 0x80
+#define BD71828_REG_VM_BTMP_U 0xa1
+#define BD71828_REG_VM_BTMP_L 0xa2
+#define BD71828_MASK_VM_BTMP_U 0x0f
+#define BD71828_REG_COULOMB_CTRL 0xc4
+#define BD71828_REG_COULOMB_CTRL2 0xd2
+#define BD71828_MASK_REX_CC_CLR 0x01
+#define BD71828_MASK_FULL_CC_CLR 0x10
+#define BD71828_REG_CC_CNT_FULL3 0xbd
+#define BD71828_REG_CC_CNT_CHG3 0xc1
+
+#define BD71828_REG_VBAT_INITIAL1_U 0x86
+#define BD71828_REG_VBAT_INITIAL1_L 0x87
+
+#define BD71828_REG_VBAT_INITIAL2_U 0x88
+#define BD71828_REG_VBAT_INITIAL2_L 0x89
+
+#define BD71828_REG_IBAT_U 0xb0
+#define BD71828_REG_IBAT_L 0xb1
+
+#define BD71828_REG_IBAT_AVG_U 0xb2
+#define BD71828_REG_IBAT_AVG_L 0xb3
+
+#define BD71828_REG_VSYS_AVG_U 0x96
+#define BD71828_REG_VSYS_AVG_L 0x97
+#define BD71828_REG_VSYS_MIN_AVG_U 0x98
+#define BD71828_REG_VSYS_MIN_AVG_L 0x99
+#define BD71828_REG_CHG_SET1 0x75
+#define BD71828_REG_ALM_VBAT_LIMIT_U 0xaa
+#define BD71828_REG_BATCAP_MON_LIMIT_U 0xcc
+#define BD71828_REG_CONF 0x64
+
+#define BD71828_REG_DCIN_CLPS 0x71
+
+#define BD71828_REG_MEAS_CLEAR 0xaf
/* LEDs */
#define BD71828_REG_LED_CTRL 0x4A
diff --git a/include/linux/mfd/rohm-bd96801.h b/include/linux/mfd/rohm-bd96801.h
new file mode 100644
index 000000000000..68c8ac8ad409
--- /dev/null
+++ b/include/linux/mfd/rohm-bd96801.h
@@ -0,0 +1,217 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (C) 2024 ROHM Semiconductors */
+
+#ifndef __MFD_BD96801_H__
+#define __MFD_BD96801_H__
+
+#define BD96801_REG_SSCG_CTRL 0x09
+#define BD96801_REG_SHD_INTB 0x20
+#define BD96801_LDO5_VOL_LVL_REG 0x2c
+#define BD96801_LDO6_VOL_LVL_REG 0x2d
+#define BD96801_LDO7_VOL_LVL_REG 0x2e
+#define BD96801_REG_BUCK_OVP 0x30
+#define BD96801_REG_BUCK_OVD 0x35
+#define BD96801_REG_LDO_OVP 0x31
+#define BD96801_REG_LDO_OVD 0x36
+#define BD96801_REG_BOOT_OVERTIME 0x3a
+#define BD96801_REG_WD_TMO 0x40
+#define BD96801_REG_WD_CONF 0x41
+#define BD96801_REG_WD_FEED 0x42
+#define BD96801_REG_WD_FAILCOUNT 0x43
+#define BD96801_REG_WD_ASK 0x46
+#define BD96801_REG_WD_STATUS 0x4a
+#define BD96801_REG_PMIC_STATE 0x4f
+#define BD96801_REG_EXT_STATE 0x50
+
+#define BD96801_STATE_STBY 0x09
+
+#define BD96801_LOCK_REG 0x04
+#define BD96801_UNLOCK 0x9d
+#define BD96801_LOCK 0x00
+
+/* IRQ register area */
+#define BD96801_REG_INT_MAIN 0x51
+
+/*
+ * The BD96801 has two physical IRQ lines, INTB and ERRB.
+ *
+ * The 'main status register' is located at 0x51.
+ * The ERRB status registers are located at 0x52 ... 0x5B
+ * INTB status registers are at range 0x5c ... 0x63
+ */
+#define BD96801_REG_INT_SYS_ERRB1 0x52
+#define BD96801_REG_INT_BUCK2_ERRB 0x56
+#define BD96801_REG_INT_SYS_INTB 0x5c
+#define BD96801_REG_INT_BUCK2_INTB 0x5e
+#define BD96801_REG_INT_LDO7_INTB 0x63
+
+/* MASK registers */
+#define BD96801_REG_MASK_SYS_INTB 0x73
+#define BD96801_REG_MASK_SYS_ERRB 0x69
+
+#define BD96801_MAX_REGISTER 0x7a
+
+#define BD96801_OTP_ERR_MASK BIT(0)
+#define BD96801_DBIST_ERR_MASK BIT(1)
+#define BD96801_EEP_ERR_MASK BIT(2)
+#define BD96801_ABIST_ERR_MASK BIT(3)
+#define BD96801_PRSTB_ERR_MASK BIT(4)
+#define BD96801_DRMOS1_ERR_MASK BIT(5)
+#define BD96801_DRMOS2_ERR_MASK BIT(6)
+#define BD96801_SLAVE_ERR_MASK BIT(7)
+#define BD96801_VREF_ERR_MASK BIT(0)
+#define BD96801_TSD_ERR_MASK BIT(1)
+#define BD96801_UVLO_ERR_MASK BIT(2)
+#define BD96801_OVLO_ERR_MASK BIT(3)
+#define BD96801_OSC_ERR_MASK BIT(4)
+#define BD96801_PON_ERR_MASK BIT(5)
+#define BD96801_POFF_ERR_MASK BIT(6)
+#define BD96801_CMD_SHDN_ERR_MASK BIT(7)
+#define BD96801_INT_PRSTB_WDT_ERR_MASK BIT(0)
+#define BD96801_INT_CHIP_IF_ERR_MASK BIT(3)
+#define BD96801_INT_SHDN_ERR_MASK BIT(7)
+#define BD96801_OUT_PVIN_ERR_MASK BIT(0)
+#define BD96801_OUT_OVP_ERR_MASK BIT(1)
+#define BD96801_OUT_UVP_ERR_MASK BIT(2)
+#define BD96801_OUT_SHDN_ERR_MASK BIT(7)
+
+/* ERRB IRQs */
+enum {
+ /* Reg 0x52, 0x53, 0x54 - ERRB system IRQs */
+ BD96801_OTP_ERR_STAT,
+ BD96801_DBIST_ERR_STAT,
+ BD96801_EEP_ERR_STAT,
+ BD96801_ABIST_ERR_STAT,
+ BD96801_PRSTB_ERR_STAT,
+ BD96801_DRMOS1_ERR_STAT,
+ BD96801_DRMOS2_ERR_STAT,
+ BD96801_SLAVE_ERR_STAT,
+ BD96801_VREF_ERR_STAT,
+ BD96801_TSD_ERR_STAT,
+ BD96801_UVLO_ERR_STAT,
+ BD96801_OVLO_ERR_STAT,
+ BD96801_OSC_ERR_STAT,
+ BD96801_PON_ERR_STAT,
+ BD96801_POFF_ERR_STAT,
+ BD96801_CMD_SHDN_ERR_STAT,
+ BD96801_INT_PRSTB_WDT_ERR,
+ BD96801_INT_CHIP_IF_ERR,
+ BD96801_INT_SHDN_ERR_STAT,
+
+ /* Reg 0x55 BUCK1 ERR IRQs */
+ BD96801_BUCK1_PVIN_ERR_STAT,
+ BD96801_BUCK1_OVP_ERR_STAT,
+ BD96801_BUCK1_UVP_ERR_STAT,
+ BD96801_BUCK1_SHDN_ERR_STAT,
+
+ /* Reg 0x56 BUCK2 ERR IRQs */
+ BD96801_BUCK2_PVIN_ERR_STAT,
+ BD96801_BUCK2_OVP_ERR_STAT,
+ BD96801_BUCK2_UVP_ERR_STAT,
+ BD96801_BUCK2_SHDN_ERR_STAT,
+
+ /* Reg 0x57 BUCK3 ERR IRQs */
+ BD96801_BUCK3_PVIN_ERR_STAT,
+ BD96801_BUCK3_OVP_ERR_STAT,
+ BD96801_BUCK3_UVP_ERR_STAT,
+ BD96801_BUCK3_SHDN_ERR_STAT,
+
+ /* Reg 0x58 BUCK4 ERR IRQs */
+ BD96801_BUCK4_PVIN_ERR_STAT,
+ BD96801_BUCK4_OVP_ERR_STAT,
+ BD96801_BUCK4_UVP_ERR_STAT,
+ BD96801_BUCK4_SHDN_ERR_STAT,
+
+ /* Reg 0x59 LDO5 ERR IRQs */
+ BD96801_LDO5_PVIN_ERR_STAT,
+ BD96801_LDO5_OVP_ERR_STAT,
+ BD96801_LDO5_UVP_ERR_STAT,
+ BD96801_LDO5_SHDN_ERR_STAT,
+
+ /* Reg 0x5a LDO6 ERR IRQs */
+ BD96801_LDO6_PVIN_ERR_STAT,
+ BD96801_LDO6_OVP_ERR_STAT,
+ BD96801_LDO6_UVP_ERR_STAT,
+ BD96801_LDO6_SHDN_ERR_STAT,
+
+ /* Reg 0x5b LDO7 ERR IRQs */
+ BD96801_LDO7_PVIN_ERR_STAT,
+ BD96801_LDO7_OVP_ERR_STAT,
+ BD96801_LDO7_UVP_ERR_STAT,
+ BD96801_LDO7_SHDN_ERR_STAT,
+};
+
+/* INTB IRQs */
+enum {
+ /* Reg 0x5c (System INTB) */
+ BD96801_TW_STAT,
+ BD96801_WDT_ERR_STAT,
+ BD96801_I2C_ERR_STAT,
+ BD96801_CHIP_IF_ERR_STAT,
+
+ /* Reg 0x5d (BUCK1 INTB) */
+ BD96801_BUCK1_OCPH_STAT,
+ BD96801_BUCK1_OCPL_STAT,
+ BD96801_BUCK1_OCPN_STAT,
+ BD96801_BUCK1_OVD_STAT,
+ BD96801_BUCK1_UVD_STAT,
+ BD96801_BUCK1_TW_CH_STAT,
+
+ /* Reg 0x5e (BUCK2 INTB) */
+ BD96801_BUCK2_OCPH_STAT,
+ BD96801_BUCK2_OCPL_STAT,
+ BD96801_BUCK2_OCPN_STAT,
+ BD96801_BUCK2_OVD_STAT,
+ BD96801_BUCK2_UVD_STAT,
+ BD96801_BUCK2_TW_CH_STAT,
+
+ /* Reg 0x5f (BUCK3 INTB)*/
+ BD96801_BUCK3_OCPH_STAT,
+ BD96801_BUCK3_OCPL_STAT,
+ BD96801_BUCK3_OCPN_STAT,
+ BD96801_BUCK3_OVD_STAT,
+ BD96801_BUCK3_UVD_STAT,
+ BD96801_BUCK3_TW_CH_STAT,
+
+ /* Reg 0x60 (BUCK4 INTB)*/
+ BD96801_BUCK4_OCPH_STAT,
+ BD96801_BUCK4_OCPL_STAT,
+ BD96801_BUCK4_OCPN_STAT,
+ BD96801_BUCK4_OVD_STAT,
+ BD96801_BUCK4_UVD_STAT,
+ BD96801_BUCK4_TW_CH_STAT,
+
+ /* Reg 0x61 (LDO5 INTB) */
+ BD96801_LDO5_OCPH_STAT, /* bit [0] */
+ BD96801_LDO5_OVD_STAT, /* bit [3] */
+ BD96801_LDO5_UVD_STAT, /* bit [4] */
+
+ /* Reg 0x62 (LDO6 INTB) */
+ BD96801_LDO6_OCPH_STAT, /* bit [0] */
+ BD96801_LDO6_OVD_STAT, /* bit [3] */
+ BD96801_LDO6_UVD_STAT, /* bit [4] */
+
+ /* Reg 0x63 (LDO7 INTB) */
+ BD96801_LDO7_OCPH_STAT, /* bit [0] */
+ BD96801_LDO7_OVD_STAT, /* bit [3] */
+ BD96801_LDO7_UVD_STAT, /* bit [4] */
+};
+
+/* IRQ MASKs */
+#define BD96801_TW_STAT_MASK BIT(0)
+#define BD96801_WDT_ERR_STAT_MASK BIT(1)
+#define BD96801_I2C_ERR_STAT_MASK BIT(2)
+#define BD96801_CHIP_IF_ERR_STAT_MASK BIT(3)
+
+#define BD96801_BUCK_OCPH_STAT_MASK BIT(0)
+#define BD96801_BUCK_OCPL_STAT_MASK BIT(1)
+#define BD96801_BUCK_OCPN_STAT_MASK BIT(2)
+#define BD96801_BUCK_OVD_STAT_MASK BIT(3)
+#define BD96801_BUCK_UVD_STAT_MASK BIT(4)
+#define BD96801_BUCK_TW_CH_STAT_MASK BIT(5)
+
+#define BD96801_LDO_OCPH_STAT_MASK BIT(0)
+#define BD96801_LDO_OVD_STAT_MASK BIT(3)
+#define BD96801_LDO_UVD_STAT_MASK BIT(4)
+
+#endif
diff --git a/include/linux/mfd/rohm-bd96802.h b/include/linux/mfd/rohm-bd96802.h
new file mode 100644
index 000000000000..bf4b77944edf
--- /dev/null
+++ b/include/linux/mfd/rohm-bd96802.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2025 ROHM Semiconductors
+ *
+ * The digital interface of trhe BD96802 PMIC is a reduced version of the
+ * BD96801. Hence the BD96801 definitions are used for registers and masks
+ * while this header only holds the IRQ definitions - mainly to avoid gaps in
+ * IRQ numbers caused by the lack of some BUCKs / LDOs and their respective
+ * IRQs.
+ */
+
+#ifndef __LINUX_MFD_BD96802_H__
+#define __LINUX_MFD_BD96802_H__
+
+/* ERRB IRQs */
+enum {
+ /* Reg 0x52, 0x53, 0x54 - ERRB system IRQs */
+ BD96802_OTP_ERR_STAT,
+ BD96802_DBIST_ERR_STAT,
+ BD96802_EEP_ERR_STAT,
+ BD96802_ABIST_ERR_STAT,
+ BD96802_PRSTB_ERR_STAT,
+ BD96802_DRMOS1_ERR_STAT,
+ BD96802_DRMOS2_ERR_STAT,
+ BD96802_SLAVE_ERR_STAT,
+ BD96802_VREF_ERR_STAT,
+ BD96802_TSD_ERR_STAT,
+ BD96802_UVLO_ERR_STAT,
+ BD96802_OVLO_ERR_STAT,
+ BD96802_OSC_ERR_STAT,
+ BD96802_PON_ERR_STAT,
+ BD96802_POFF_ERR_STAT,
+ BD96802_CMD_SHDN_ERR_STAT,
+ BD96802_INT_SHDN_ERR_STAT,
+
+ /* Reg 0x55 BUCK1 ERR IRQs */
+ BD96802_BUCK1_PVIN_ERR_STAT,
+ BD96802_BUCK1_OVP_ERR_STAT,
+ BD96802_BUCK1_UVP_ERR_STAT,
+ BD96802_BUCK1_SHDN_ERR_STAT,
+
+ /* Reg 0x56 BUCK2 ERR IRQs */
+ BD96802_BUCK2_PVIN_ERR_STAT,
+ BD96802_BUCK2_OVP_ERR_STAT,
+ BD96802_BUCK2_UVP_ERR_STAT,
+ BD96802_BUCK2_SHDN_ERR_STAT,
+};
+
+/* INTB IRQs */
+enum {
+ /* Reg 0x5c (System INTB) */
+ BD96802_TW_STAT,
+ BD96802_WDT_ERR_STAT,
+ BD96802_I2C_ERR_STAT,
+ BD96802_CHIP_IF_ERR_STAT,
+
+ /* Reg 0x5d (BUCK1 INTB) */
+ BD96802_BUCK1_OCPH_STAT,
+ BD96802_BUCK1_OCPL_STAT,
+ BD96802_BUCK1_OCPN_STAT,
+ BD96802_BUCK1_OVD_STAT,
+ BD96802_BUCK1_UVD_STAT,
+ BD96802_BUCK1_TW_CH_STAT,
+
+ /* Reg 0x5e (BUCK2 INTB) */
+ BD96802_BUCK2_OCPH_STAT,
+ BD96802_BUCK2_OCPL_STAT,
+ BD96802_BUCK2_OCPN_STAT,
+ BD96802_BUCK2_OVD_STAT,
+ BD96802_BUCK2_UVD_STAT,
+ BD96802_BUCK2_TW_CH_STAT,
+};
+
+#endif
diff --git a/include/linux/mfd/rohm-generic.h b/include/linux/mfd/rohm-generic.h
index 35b392a0d73a..579e8dcfcca4 100644
--- a/include/linux/mfd/rohm-generic.h
+++ b/include/linux/mfd/rohm-generic.h
@@ -12,11 +12,14 @@ enum rohm_chip_type {
ROHM_CHIP_TYPE_BD9573,
ROHM_CHIP_TYPE_BD9574,
ROHM_CHIP_TYPE_BD9576,
- ROHM_CHIP_TYPE_BD70528,
ROHM_CHIP_TYPE_BD71815,
ROHM_CHIP_TYPE_BD71828,
ROHM_CHIP_TYPE_BD71837,
ROHM_CHIP_TYPE_BD71847,
+ ROHM_CHIP_TYPE_BD96801,
+ ROHM_CHIP_TYPE_BD96802,
+ ROHM_CHIP_TYPE_BD96805,
+ ROHM_CHIP_TYPE_BD96806,
ROHM_CHIP_TYPE_AMOUNT
};
@@ -80,14 +83,8 @@ int rohm_regulator_set_dvs_levels(const struct rohm_dvs_config *dvs,
const struct regulator_desc *desc,
struct regmap *regmap);
-#else
-static inline int rohm_regulator_set_dvs_levels(const struct rohm_dvs_config *dvs,
- struct device_node *np,
- const struct regulator_desc *desc,
- struct regmap *regmap)
-{
- return 0;
-}
+int rohm_regulator_set_voltage_sel_restricted(struct regulator_dev *rdev,
+ unsigned int sel);
#endif
#endif
diff --git a/include/linux/mfd/rsmu.h b/include/linux/mfd/rsmu.h
new file mode 100644
index 000000000000..0379aa207428
--- /dev/null
+++ b/include/linux/mfd/rsmu.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Core interface for Renesas Synchronization Management Unit (SMU) devices.
+ *
+ * Copyright (C) 2021 Integrated Device Technology, Inc., a Renesas Company.
+ */
+
+#ifndef __LINUX_MFD_RSMU_H
+#define __LINUX_MFD_RSMU_H
+
+#define RSMU_MAX_WRITE_COUNT (255)
+#define RSMU_MAX_READ_COUNT (255)
+
+/* The supported devices are ClockMatrix, Sabre and SnowLotus */
+enum rsmu_type {
+ RSMU_CM = 0x34000,
+ RSMU_SABRE = 0x33810,
+ RSMU_SL = 0x19850,
+};
+
+/**
+ *
+ * struct rsmu_ddata - device data structure for sub devices.
+ *
+ * @dev: i2c/spi device.
+ * @regmap: i2c/spi bus access.
+ * @lock: mutex used by sub devices to make sure a series of
+ * bus access requests are not interrupted.
+ * @type: RSMU device type.
+ * @page: i2c/spi bus driver internal use only.
+ */
+struct rsmu_ddata {
+ struct device *dev;
+ struct regmap *regmap;
+ struct mutex lock;
+ enum rsmu_type type;
+ u32 page;
+};
+#endif /* __LINUX_MFD_RSMU_H */
diff --git a/include/linux/mfd/rt5033-private.h b/include/linux/mfd/rt5033-private.h
index 2d1895c3efbf..0221f806d139 100644
--- a/include/linux/mfd/rt5033-private.h
+++ b/include/linux/mfd/rt5033-private.h
@@ -55,21 +55,28 @@ enum rt5033_reg {
};
/* RT5033 Charger state register */
-#define RT5033_CHG_STAT_MASK 0x20
+#define RT5033_CHG_STAT_TYPE_MASK 0x60
+#define RT5033_CHG_STAT_TYPE_PRE 0x20
+#define RT5033_CHG_STAT_TYPE_FAST 0x60
+#define RT5033_CHG_STAT_MASK 0x30
#define RT5033_CHG_STAT_DISCHARGING 0x00
#define RT5033_CHG_STAT_FULL 0x10
#define RT5033_CHG_STAT_CHARGING 0x20
#define RT5033_CHG_STAT_NOT_CHARGING 0x30
-#define RT5033_CHG_STAT_TYPE_MASK 0x60
-#define RT5033_CHG_STAT_TYPE_PRE 0x20
-#define RT5033_CHG_STAT_TYPE_FAST 0x60
/* RT5033 CHGCTRL1 register */
#define RT5033_CHGCTRL1_IAICR_MASK 0xe0
+#define RT5033_CHGCTRL1_TE_EN_MASK 0x08
+#define RT5033_CHGCTRL1_HZ_MASK 0x02
#define RT5033_CHGCTRL1_MODE_MASK 0x01
/* RT5033 CHGCTRL2 register */
#define RT5033_CHGCTRL2_CV_MASK 0xfc
+#define RT5033_CHGCTRL2_CV_SHIFT 0x02
+
+/* RT5033 DEVICE_ID register */
+#define RT5033_VENDOR_ID_MASK 0xf0
+#define RT5033_CHIP_REV_MASK 0x0f
/* RT5033 CHGCTRL3 register */
#define RT5033_CHGCTRL3_CFO_EN_MASK 0x40
@@ -77,18 +84,18 @@ enum rt5033_reg {
#define RT5033_CHGCTRL3_TIMER_EN_MASK 0x01
/* RT5033 CHGCTRL4 register */
-#define RT5033_CHGCTRL4_EOC_MASK 0x07
+#define RT5033_CHGCTRL4_MIVR_MASK 0xe0
#define RT5033_CHGCTRL4_IPREC_MASK 0x18
+#define RT5033_CHGCTRL4_IPREC_SHIFT 0x03
+#define RT5033_CHGCTRL4_EOC_MASK 0x07
/* RT5033 CHGCTRL5 register */
-#define RT5033_CHGCTRL5_VPREC_MASK 0x0f
#define RT5033_CHGCTRL5_ICHG_MASK 0xf0
#define RT5033_CHGCTRL5_ICHG_SHIFT 0x04
-#define RT5033_CHG_MAX_CURRENT 0x0d
+#define RT5033_CHGCTRL5_VPREC_MASK 0x0f
/* RT5033 RT CTRL1 register */
#define RT5033_RT_CTRL1_UUG_MASK 0x02
-#define RT5033_RT_HZ_MASK 0x01
/* RT5033 control register */
#define RT5033_CTRL_FCCM_BUCK_MASK BIT(0)
@@ -107,65 +114,77 @@ enum rt5033_reg {
#define RT5033_LDO_CTRL_MASK 0x1f
/* RT5033 charger property - model, manufacturer */
-
#define RT5033_CHARGER_MODEL "RT5033WSC Charger"
#define RT5033_MANUFACTURER "Richtek Technology Corporation"
/*
- * RT5033 charger fast-charge current lmits (as in CHGCTRL1 register),
- * AICR mode limits the input current for example,
- * the AIRC 100 mode limits the input current to 100 mA.
+ * While RT5033 charger can limit the fast-charge current (as in CHGCTRL1
+ * register), AICR mode limits the input current. For example, the AIRC 100
+ * mode limits the input current to 100 mA.
*/
+#define RT5033_AICR_DISABLE 0x00
#define RT5033_AICR_100_MODE 0x20
#define RT5033_AICR_500_MODE 0x40
#define RT5033_AICR_700_MODE 0x60
#define RT5033_AICR_900_MODE 0x80
+#define RT5033_AICR_1000_MODE 0xa0
#define RT5033_AICR_1500_MODE 0xc0
#define RT5033_AICR_2000_MODE 0xe0
-#define RT5033_AICR_MODE_MASK 0xe0
-/* RT5033 use internal timer need to set time */
-#define RT5033_FAST_CHARGE_TIMER4 0x00
-#define RT5033_FAST_CHARGE_TIMER6 0x01
-#define RT5033_FAST_CHARGE_TIMER8 0x02
-#define RT5033_FAST_CHARGE_TIMER9 0x03
-#define RT5033_FAST_CHARGE_TIMER12 0x04
-#define RT5033_FAST_CHARGE_TIMER14 0x05
-#define RT5033_FAST_CHARGE_TIMER16 0x06
+/* RT5033 charger minimum input voltage regulation */
+#define RT5033_CHARGER_MIVR_DISABLE 0x00
+#define RT5033_CHARGER_MIVR_4200MV 0x20
+#define RT5033_CHARGER_MIVR_4300MV 0x40
+#define RT5033_CHARGER_MIVR_4400MV 0x60
+#define RT5033_CHARGER_MIVR_4500MV 0x80
+#define RT5033_CHARGER_MIVR_4600MV 0xa0
+#define RT5033_CHARGER_MIVR_4700MV 0xc0
+#define RT5033_CHARGER_MIVR_4800MV 0xe0
+/* RT5033 use internal timer need to set time */
+#define RT5033_FAST_CHARGE_TIMER4 0x00 /* 4 hrs */
+#define RT5033_FAST_CHARGE_TIMER6 0x08 /* 6 hrs */
+#define RT5033_FAST_CHARGE_TIMER8 0x10 /* 8 hrs */
+#define RT5033_FAST_CHARGE_TIMER10 0x18 /* 10 hrs */
+#define RT5033_FAST_CHARGE_TIMER12 0x20 /* 12 hrs */
+#define RT5033_FAST_CHARGE_TIMER14 0x28 /* 14 hrs */
+#define RT5033_FAST_CHARGE_TIMER16 0x30 /* 16 hrs */
+
+#define RT5033_INT_TIMER_DISABLE 0x00
#define RT5033_INT_TIMER_ENABLE 0x01
-/* RT5033 charger termination enable mask */
-#define RT5033_TE_ENABLE_MASK 0x08
-
/*
- * RT5033 charger opa mode. RT50300 have two opa mode charger mode
- * and boost mode for OTG
+ * RT5033 charger opa mode. RT5033 has two opa modes for OTG: charger mode
+ * and boost mode.
*/
-
#define RT5033_CHARGER_MODE 0x00
#define RT5033_BOOST_MODE 0x01
/* RT5033 charger termination enable */
+#define RT5033_TE_DISABLE 0x00
#define RT5033_TE_ENABLE 0x08
/* RT5033 charger CFO enable */
+#define RT5033_CFO_DISABLE 0x00
#define RT5033_CFO_ENABLE 0x40
/* RT5033 charger constant charge voltage (as in CHGCTRL2 register), uV */
#define RT5033_CHARGER_CONST_VOLTAGE_LIMIT_MIN 3650000U
#define RT5033_CHARGER_CONST_VOLTAGE_STEP_NUM 25000U
#define RT5033_CHARGER_CONST_VOLTAGE_LIMIT_MAX 4400000U
+#define RT5033_CV_MAX_VOLTAGE 0x1e
/* RT5033 charger pre-charge current limits (as in CHGCTRL4 register), uA */
#define RT5033_CHARGER_PRE_CURRENT_LIMIT_MIN 350000U
#define RT5033_CHARGER_PRE_CURRENT_STEP_NUM 100000U
#define RT5033_CHARGER_PRE_CURRENT_LIMIT_MAX 650000U
+#define RT5033_CHG_MAX_PRE_CURRENT 0x03
/* RT5033 charger fast-charge current (as in CHGCTRL5 register), uA */
#define RT5033_CHARGER_FAST_CURRENT_MIN 700000U
#define RT5033_CHARGER_FAST_CURRENT_STEP_NUM 100000U
#define RT5033_CHARGER_FAST_CURRENT_MAX 2000000U
+#define RT5033_CHG_MAX_CURRENT 0x0d
/*
* RT5033 charger const-charge end of charger current (
@@ -181,20 +200,20 @@ enum rt5033_reg {
* RT5033 charger pre-charge threshold volt limits
* (as in CHGCTRL5 register), uV
*/
-
#define RT5033_CHARGER_PRE_THRESHOLD_LIMIT_MIN 2300000U
#define RT5033_CHARGER_PRE_THRESHOLD_STEP_NUM 100000U
#define RT5033_CHARGER_PRE_THRESHOLD_LIMIT_MAX 3800000U
/*
- * RT5033 charger enable UUG, If UUG enable MOS auto control by H/W charger
+ * RT5033 charger UUG. It enables MOS auto control by H/W charger
* circuit.
*/
+#define RT5033_CHARGER_UUG_DISABLE 0x00
#define RT5033_CHARGER_UUG_ENABLE 0x02
-/* RT5033 charger High impedance mode */
+/* RT5033 charger high impedance mode */
#define RT5033_CHARGER_HZ_DISABLE 0x00
-#define RT5033_CHARGER_HZ_ENABLE 0x01
+#define RT5033_CHARGER_HZ_ENABLE 0x02
/* RT5033 regulator BUCK output voltage uV */
#define RT5033_REGULATOR_BUCK_VOLTAGE_MIN 1000000U
diff --git a/include/linux/mfd/rt5033.h b/include/linux/mfd/rt5033.h
index 3c23b6220c04..bb3d18945d21 100644
--- a/include/linux/mfd/rt5033.h
+++ b/include/linux/mfd/rt5033.h
@@ -12,7 +12,6 @@
#include <linux/regulator/consumer.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
-#include <linux/power_supply.h>
/* RT5033 regulator IDs */
enum rt5033_regulators {
@@ -32,28 +31,4 @@ struct rt5033_dev {
bool wakeup;
};
-struct rt5033_battery {
- struct i2c_client *client;
- struct rt5033_dev *rt5033;
- struct regmap *regmap;
- struct power_supply *psy;
-};
-
-/* RT5033 charger platform data */
-struct rt5033_charger_data {
- unsigned int pre_uamp;
- unsigned int pre_uvolt;
- unsigned int const_uvolt;
- unsigned int eoc_uamp;
- unsigned int fast_uamp;
-};
-
-struct rt5033_charger {
- struct device *dev;
- struct rt5033_dev *rt5033;
- struct power_supply psy;
-
- struct rt5033_charger_data *chg;
-};
-
#endif /* __RT5033_H__ */
diff --git a/include/linux/mfd/rz-mtu3.h b/include/linux/mfd/rz-mtu3.h
new file mode 100644
index 000000000000..8421d49500bf
--- /dev/null
+++ b/include/linux/mfd/rz-mtu3.h
@@ -0,0 +1,191 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022 Renesas Electronics Corporation
+ */
+#ifndef __MFD_RZ_MTU3_H__
+#define __MFD_RZ_MTU3_H__
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+
+/* 8-bit shared register offsets macros */
+#define RZ_MTU3_TSTRA 0x080 /* Timer start register A */
+#define RZ_MTU3_TSTRB 0x880 /* Timer start register B */
+
+/* 16-bit shared register offset macros */
+#define RZ_MTU3_TDDRA 0x016 /* Timer dead time data register A */
+#define RZ_MTU3_TDDRB 0x816 /* Timer dead time data register B */
+#define RZ_MTU3_TCDRA 0x014 /* Timer cycle data register A */
+#define RZ_MTU3_TCDRB 0x814 /* Timer cycle data register B */
+#define RZ_MTU3_TCBRA 0x022 /* Timer cycle buffer register A */
+#define RZ_MTU3_TCBRB 0x822 /* Timer cycle buffer register B */
+#define RZ_MTU3_TCNTSA 0x020 /* Timer subcounter A */
+#define RZ_MTU3_TCNTSB 0x820 /* Timer subcounter B */
+
+/*
+ * MTU5 contains 3 timer counter registers and is totaly different
+ * from other channels, so we must separate its offset
+ */
+
+/* 8-bit register offset macros of MTU3 channels except MTU5 */
+#define RZ_MTU3_TIER 0 /* Timer interrupt register */
+#define RZ_MTU3_NFCR 1 /* Noise filter control register */
+#define RZ_MTU3_TSR 2 /* Timer status register */
+#define RZ_MTU3_TCR 3 /* Timer control register */
+#define RZ_MTU3_TCR2 4 /* Timer control register 2 */
+
+/* Timer mode register 1 */
+#define RZ_MTU3_TMDR1 5
+#define RZ_MTU3_TMDR1_MD GENMASK(3, 0)
+#define RZ_MTU3_TMDR1_MD_NORMAL FIELD_PREP(RZ_MTU3_TMDR1_MD, 0)
+#define RZ_MTU3_TMDR1_MD_PWMMODE1 FIELD_PREP(RZ_MTU3_TMDR1_MD, 2)
+
+#define RZ_MTU3_TIOR 6 /* Timer I/O control register */
+#define RZ_MTU3_TIORH 6 /* Timer I/O control register H */
+#define RZ_MTU3_TIORL 7 /* Timer I/O control register L */
+/* Only MTU3/4/6/7 have TBTM registers */
+#define RZ_MTU3_TBTM 8 /* Timer buffer operation transfer mode register */
+
+/* 8-bit MTU5 register offset macros */
+#define RZ_MTU3_TSTR 2 /* MTU5 Timer start register */
+#define RZ_MTU3_TCNTCMPCLR 3 /* MTU5 Timer compare match clear register */
+#define RZ_MTU3_TCRU 4 /* Timer control register U */
+#define RZ_MTU3_TCR2U 5 /* Timer control register 2U */
+#define RZ_MTU3_TIORU 6 /* Timer I/O control register U */
+#define RZ_MTU3_TCRV 7 /* Timer control register V */
+#define RZ_MTU3_TCR2V 8 /* Timer control register 2V */
+#define RZ_MTU3_TIORV 9 /* Timer I/O control register V */
+#define RZ_MTU3_TCRW 10 /* Timer control register W */
+#define RZ_MTU3_TCR2W 11 /* Timer control register 2W */
+#define RZ_MTU3_TIORW 12 /* Timer I/O control register W */
+
+/* 16-bit register offset macros of MTU3 channels except MTU5 */
+#define RZ_MTU3_TCNT 0 /* Timer counter */
+#define RZ_MTU3_TGRA 1 /* Timer general register A */
+#define RZ_MTU3_TGRB 2 /* Timer general register B */
+#define RZ_MTU3_TGRC 3 /* Timer general register C */
+#define RZ_MTU3_TGRD 4 /* Timer general register D */
+#define RZ_MTU3_TGRE 5 /* Timer general register E */
+#define RZ_MTU3_TGRF 6 /* Timer general register F */
+/* Timer A/D converter start request registers */
+#define RZ_MTU3_TADCR 7 /* control register */
+#define RZ_MTU3_TADCORA 8 /* cycle set register A */
+#define RZ_MTU3_TADCORB 9 /* cycle set register B */
+#define RZ_MTU3_TADCOBRA 10 /* cycle set buffer register A */
+#define RZ_MTU3_TADCOBRB 11 /* cycle set buffer register B */
+
+/* 16-bit MTU5 register offset macros */
+#define RZ_MTU3_TCNTU 0 /* MTU5 Timer counter U */
+#define RZ_MTU3_TGRU 1 /* MTU5 Timer general register U */
+#define RZ_MTU3_TCNTV 2 /* MTU5 Timer counter V */
+#define RZ_MTU3_TGRV 3 /* MTU5 Timer general register V */
+#define RZ_MTU3_TCNTW 4 /* MTU5 Timer counter W */
+#define RZ_MTU3_TGRW 5 /* MTU5 Timer general register W */
+
+/* 32-bit register offset */
+#define RZ_MTU3_TCNTLW 0 /* Timer longword counter */
+#define RZ_MTU3_TGRALW 1 /* Timer longword general register A */
+#define RZ_MTU3_TGRBLW 2 /* Timer longowrd general register B */
+
+#define RZ_MTU3_TMDR3 0x191 /* MTU1 Timer Mode Register 3 */
+
+/* Macros for setting registers */
+#define RZ_MTU3_TCR_CCLR GENMASK(7, 5)
+#define RZ_MTU3_TCR_CKEG GENMASK(4, 3)
+#define RZ_MTU3_TCR_TPCS GENMASK(2, 0)
+#define RZ_MTU3_TCR_CCLR_TGRA BIT(5)
+#define RZ_MTU3_TCR_CCLR_TGRC FIELD_PREP(RZ_MTU3_TCR_CCLR, 5)
+#define RZ_MTU3_TCR_CKEG_RISING FIELD_PREP(RZ_MTU3_TCR_CKEG, 0)
+
+#define RZ_MTU3_TIOR_IOB GENMASK(7, 4)
+#define RZ_MTU3_TIOR_IOA GENMASK(3, 0)
+#define RZ_MTU3_TIOR_OC_RETAIN 0
+#define RZ_MTU3_TIOR_OC_INIT_OUT_LO_HI_OUT 2
+#define RZ_MTU3_TIOR_OC_INIT_OUT_HI_TOGGLE_OUT 7
+
+#define RZ_MTU3_TIOR_OC_IOA_H_COMP_MATCH \
+ FIELD_PREP(RZ_MTU3_TIOR_IOA, RZ_MTU3_TIOR_OC_INIT_OUT_LO_HI_OUT)
+#define RZ_MTU3_TIOR_OC_IOB_TOGGLE \
+ FIELD_PREP(RZ_MTU3_TIOR_IOB, RZ_MTU3_TIOR_OC_INIT_OUT_HI_TOGGLE_OUT)
+
+enum rz_mtu3_channels {
+ RZ_MTU3_CHAN_0,
+ RZ_MTU3_CHAN_1,
+ RZ_MTU3_CHAN_2,
+ RZ_MTU3_CHAN_3,
+ RZ_MTU3_CHAN_4,
+ RZ_MTU3_CHAN_5,
+ RZ_MTU3_CHAN_6,
+ RZ_MTU3_CHAN_7,
+ RZ_MTU3_CHAN_8,
+ RZ_MTU_NUM_CHANNELS
+};
+
+/**
+ * struct rz_mtu3_channel - MTU3 channel private data
+ *
+ * @dev: device handle
+ * @channel_number: channel number
+ * @lock: Lock to protect channel state
+ * @is_busy: channel state
+ */
+struct rz_mtu3_channel {
+ struct device *dev;
+ unsigned int channel_number;
+ struct mutex lock;
+ bool is_busy;
+};
+
+/**
+ * struct rz_mtu3 - MTU3 core private data
+ *
+ * @clk: MTU3 module clock
+ * @rz_mtu3_channel: HW channels
+ * @priv_data: MTU3 core driver private data
+ */
+struct rz_mtu3 {
+ struct clk *clk;
+ struct rz_mtu3_channel channels[RZ_MTU_NUM_CHANNELS];
+
+ void *priv_data;
+};
+
+static inline bool rz_mtu3_request_channel(struct rz_mtu3_channel *ch)
+{
+ mutex_lock(&ch->lock);
+ if (ch->is_busy) {
+ mutex_unlock(&ch->lock);
+ return false;
+ }
+
+ ch->is_busy = true;
+ mutex_unlock(&ch->lock);
+
+ return true;
+}
+
+static inline void rz_mtu3_release_channel(struct rz_mtu3_channel *ch)
+{
+ mutex_lock(&ch->lock);
+ ch->is_busy = false;
+ mutex_unlock(&ch->lock);
+}
+
+bool rz_mtu3_is_enabled(struct rz_mtu3_channel *ch);
+void rz_mtu3_disable(struct rz_mtu3_channel *ch);
+int rz_mtu3_enable(struct rz_mtu3_channel *ch);
+
+u8 rz_mtu3_8bit_ch_read(struct rz_mtu3_channel *ch, u16 off);
+u16 rz_mtu3_16bit_ch_read(struct rz_mtu3_channel *ch, u16 off);
+u32 rz_mtu3_32bit_ch_read(struct rz_mtu3_channel *ch, u16 off);
+u16 rz_mtu3_shared_reg_read(struct rz_mtu3_channel *ch, u16 off);
+
+void rz_mtu3_8bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u8 val);
+void rz_mtu3_16bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u16 val);
+void rz_mtu3_32bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u32 val);
+void rz_mtu3_shared_reg_write(struct rz_mtu3_channel *ch, u16 off, u16 val);
+void rz_mtu3_shared_reg_update_bit(struct rz_mtu3_channel *ch, u16 off,
+ u16 pos, u8 val);
+
+#endif /* __MFD_RZ_MTU3_H__ */
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
index f1631a39acfc..d785e101fe79 100644
--- a/include/linux/mfd/samsung/core.h
+++ b/include/linux/mfd/samsung/core.h
@@ -36,15 +36,16 @@
struct gpio_desc;
enum sec_device_type {
- S5M8751X,
- S5M8763X,
S5M8767X,
+ S2DOS05,
S2MPA01,
+ S2MPG10,
S2MPS11X,
S2MPS13X,
S2MPS14X,
S2MPS15X,
S2MPU02,
+ S2MPU05,
};
/**
@@ -66,30 +67,16 @@ struct sec_pmic_dev {
struct regmap *regmap_pmic;
struct i2c_client *i2c;
- unsigned long device_type;
- int irq_base;
+ int device_type;
int irq;
struct regmap_irq_chip_data *irq_data;
-
- bool wakeup;
};
-int sec_irq_init(struct sec_pmic_dev *sec_pmic);
-void sec_irq_exit(struct sec_pmic_dev *sec_pmic);
-int sec_irq_resume(struct sec_pmic_dev *sec_pmic);
-
struct sec_platform_data {
struct sec_regulator_data *regulators;
struct sec_opmode_data *opmode;
- int device_type;
int num_regulators;
- int irq_base;
- int (*cfg_pmic_irq)(void);
-
- bool wakeup;
- bool buck_voltage_lock;
-
int buck_gpios[3];
int buck_ds[3];
unsigned int buck2_voltage[8];
@@ -99,35 +86,12 @@ struct sec_platform_data {
unsigned int buck4_voltage[8];
bool buck4_gpiodvs;
- int buck_set1;
- int buck_set2;
- int buck_set3;
- int buck2_enable;
- int buck3_enable;
- int buck4_enable;
int buck_default_idx;
- int buck2_default_idx;
- int buck3_default_idx;
- int buck4_default_idx;
-
int buck_ramp_delay;
- int buck2_ramp_delay;
- int buck34_ramp_delay;
- int buck5_ramp_delay;
- int buck16_ramp_delay;
- int buck7810_ramp_delay;
- int buck9_ramp_delay;
- int buck24_ramp_delay;
- int buck3_ramp_delay;
- int buck7_ramp_delay;
- int buck8910_ramp_delay;
-
- bool buck1_ramp_enable;
bool buck2_ramp_enable;
bool buck3_ramp_enable;
bool buck4_ramp_enable;
- bool buck6_ramp_enable;
int buck2_init;
int buck3_init;
diff --git a/include/linux/mfd/samsung/irq.h b/include/linux/mfd/samsung/irq.h
index 6cfe4201a106..8402a5f8e18a 100644
--- a/include/linux/mfd/samsung/irq.h
+++ b/include/linux/mfd/samsung/irq.h
@@ -57,6 +57,115 @@ enum s2mpa01_irq {
#define S2MPA01_IRQ_B24_TSD_MASK (1 << 4)
#define S2MPA01_IRQ_B35_TSD_MASK (1 << 5)
+enum s2mpg10_common_irq {
+ /* Top-level (common) block */
+ S2MPG10_COMMON_IRQ_PMIC,
+ S2MPG10_COMMON_IRQ_UNUSED,
+};
+
+enum s2mpg10_irq {
+ /* PMIC */
+ S2MPG10_IRQ_PWRONF,
+ S2MPG10_IRQ_PWRONR,
+ S2MPG10_IRQ_JIGONBF,
+ S2MPG10_IRQ_JIGONBR,
+ S2MPG10_IRQ_ACOKBF,
+ S2MPG10_IRQ_ACOKBR,
+ S2MPG10_IRQ_PWRON1S,
+ S2MPG10_IRQ_MRB,
+#define S2MPG10_IRQ_PWRONF_MASK BIT(0)
+#define S2MPG10_IRQ_PWRONR_MASK BIT(1)
+#define S2MPG10_IRQ_JIGONBF_MASK BIT(2)
+#define S2MPG10_IRQ_JIGONBR_MASK BIT(3)
+#define S2MPG10_IRQ_ACOKBF_MASK BIT(4)
+#define S2MPG10_IRQ_ACOKBR_MASK BIT(5)
+#define S2MPG10_IRQ_PWRON1S_MASK BIT(6)
+#define S2MPG10_IRQ_MRB_MASK BIT(7)
+
+ S2MPG10_IRQ_RTC60S,
+ S2MPG10_IRQ_RTCA1,
+ S2MPG10_IRQ_RTCA0,
+ S2MPG10_IRQ_RTC1S,
+ S2MPG10_IRQ_WTSR_COLDRST,
+ S2MPG10_IRQ_WTSR,
+ S2MPG10_IRQ_WRST,
+ S2MPG10_IRQ_SMPL,
+#define S2MPG10_IRQ_RTC60S_MASK BIT(0)
+#define S2MPG10_IRQ_RTCA1_MASK BIT(1)
+#define S2MPG10_IRQ_RTCA0_MASK BIT(2)
+#define S2MPG10_IRQ_RTC1S_MASK BIT(3)
+#define S2MPG10_IRQ_WTSR_COLDRST_MASK BIT(4)
+#define S2MPG10_IRQ_WTSR_MASK BIT(5)
+#define S2MPG10_IRQ_WRST_MASK BIT(6)
+#define S2MPG10_IRQ_SMPL_MASK BIT(7)
+
+ S2MPG10_IRQ_120C,
+ S2MPG10_IRQ_140C,
+ S2MPG10_IRQ_TSD,
+ S2MPG10_IRQ_PIF_TIMEOUT1,
+ S2MPG10_IRQ_PIF_TIMEOUT2,
+ S2MPG10_IRQ_SPD_PARITY_ERR,
+ S2MPG10_IRQ_SPD_ABNORMAL_STOP,
+ S2MPG10_IRQ_PMETER_OVERF,
+#define S2MPG10_IRQ_INT120C_MASK BIT(0)
+#define S2MPG10_IRQ_INT140C_MASK BIT(1)
+#define S2MPG10_IRQ_TSD_MASK BIT(2)
+#define S2MPG10_IRQ_PIF_TIMEOUT1_MASK BIT(3)
+#define S2MPG10_IRQ_PIF_TIMEOUT2_MASK BIT(4)
+#define S2MPG10_IRQ_SPD_PARITY_ERR_MASK BIT(5)
+#define S2MPG10_IRQ_SPD_ABNORMAL_STOP_MASK BIT(6)
+#define S2MPG10_IRQ_PMETER_OVERF_MASK BIT(7)
+
+ S2MPG10_IRQ_OCP_B1M,
+ S2MPG10_IRQ_OCP_B2M,
+ S2MPG10_IRQ_OCP_B3M,
+ S2MPG10_IRQ_OCP_B4M,
+ S2MPG10_IRQ_OCP_B5M,
+ S2MPG10_IRQ_OCP_B6M,
+ S2MPG10_IRQ_OCP_B7M,
+ S2MPG10_IRQ_OCP_B8M,
+#define S2MPG10_IRQ_OCP_B1M_MASK BIT(0)
+#define S2MPG10_IRQ_OCP_B2M_MASK BIT(1)
+#define S2MPG10_IRQ_OCP_B3M_MASK BIT(2)
+#define S2MPG10_IRQ_OCP_B4M_MASK BIT(3)
+#define S2MPG10_IRQ_OCP_B5M_MASK BIT(4)
+#define S2MPG10_IRQ_OCP_B6M_MASK BIT(5)
+#define S2MPG10_IRQ_OCP_B7M_MASK BIT(6)
+#define S2MPG10_IRQ_OCP_B8M_MASK BIT(7)
+
+ S2MPG10_IRQ_OCP_B9M,
+ S2MPG10_IRQ_OCP_B10M,
+ S2MPG10_IRQ_WLWP_ACC,
+ S2MPG10_IRQ_SMPL_TIMEOUT,
+ S2MPG10_IRQ_WTSR_TIMEOUT,
+ S2MPG10_IRQ_SPD_SRP_PKT_RST,
+#define S2MPG10_IRQ_OCP_B9M_MASK BIT(0)
+#define S2MPG10_IRQ_OCP_B10M_MASK BIT(1)
+#define S2MPG10_IRQ_WLWP_ACC_MASK BIT(2)
+#define S2MPG10_IRQ_SMPL_TIMEOUT_MASK BIT(5)
+#define S2MPG10_IRQ_WTSR_TIMEOUT_MASK BIT(6)
+#define S2MPG10_IRQ_SPD_SRP_PKT_RST_MASK BIT(7)
+
+ S2MPG10_IRQ_PWR_WARN_CH0,
+ S2MPG10_IRQ_PWR_WARN_CH1,
+ S2MPG10_IRQ_PWR_WARN_CH2,
+ S2MPG10_IRQ_PWR_WARN_CH3,
+ S2MPG10_IRQ_PWR_WARN_CH4,
+ S2MPG10_IRQ_PWR_WARN_CH5,
+ S2MPG10_IRQ_PWR_WARN_CH6,
+ S2MPG10_IRQ_PWR_WARN_CH7,
+#define S2MPG10_IRQ_PWR_WARN_CH0_MASK BIT(0)
+#define S2MPG10_IRQ_PWR_WARN_CH1_MASK BIT(1)
+#define S2MPG10_IRQ_PWR_WARN_CH2_MASK BIT(2)
+#define S2MPG10_IRQ_PWR_WARN_CH3_MASK BIT(3)
+#define S2MPG10_IRQ_PWR_WARN_CH4_MASK BIT(4)
+#define S2MPG10_IRQ_PWR_WARN_CH5_MASK BIT(5)
+#define S2MPG10_IRQ_PWR_WARN_CH6_MASK BIT(6)
+#define S2MPG10_IRQ_PWR_WARN_CH7_MASK BIT(7)
+
+ S2MPG10_IRQ_NR,
+};
+
enum s2mps11_irq {
S2MPS11_IRQ_PWRONF,
S2MPS11_IRQ_PWRONR,
@@ -150,6 +259,50 @@ enum s2mpu02_irq {
/* Masks for interrupts are the same as in s2mps11 */
#define S2MPS14_IRQ_TSD_MASK (1 << 2)
+enum s2mpu05_irq {
+ S2MPU05_IRQ_PWRONF,
+ S2MPU05_IRQ_PWRONR,
+ S2MPU05_IRQ_JIGONBF,
+ S2MPU05_IRQ_JIGONBR,
+ S2MPU05_IRQ_ACOKF,
+ S2MPU05_IRQ_ACOKR,
+ S2MPU05_IRQ_PWRON1S,
+ S2MPU05_IRQ_MRB,
+
+ S2MPU05_IRQ_RTC60S,
+ S2MPU05_IRQ_RTCA1,
+ S2MPU05_IRQ_RTCA0,
+ S2MPU05_IRQ_SMPL,
+ S2MPU05_IRQ_RTC1S,
+ S2MPU05_IRQ_WTSR,
+
+ S2MPU05_IRQ_INT120C,
+ S2MPU05_IRQ_INT140C,
+ S2MPU05_IRQ_TSD,
+
+ S2MPU05_IRQ_NR,
+};
+
+#define S2MPU05_IRQ_PWRONF_MASK BIT(0)
+#define S2MPU05_IRQ_PWRONR_MASK BIT(1)
+#define S2MPU05_IRQ_JIGONBF_MASK BIT(2)
+#define S2MPU05_IRQ_JIGONBR_MASK BIT(3)
+#define S2MPU05_IRQ_ACOKF_MASK BIT(4)
+#define S2MPU05_IRQ_ACOKR_MASK BIT(5)
+#define S2MPU05_IRQ_PWRON1S_MASK BIT(6)
+#define S2MPU05_IRQ_MRB_MASK BIT(7)
+
+#define S2MPU05_IRQ_RTC60S_MASK BIT(0)
+#define S2MPU05_IRQ_RTCA1_MASK BIT(1)
+#define S2MPU05_IRQ_RTCA0_MASK BIT(2)
+#define S2MPU05_IRQ_SMPL_MASK BIT(3)
+#define S2MPU05_IRQ_RTC1S_MASK BIT(4)
+#define S2MPU05_IRQ_WTSR_MASK BIT(5)
+
+#define S2MPU05_IRQ_INT120C_MASK BIT(0)
+#define S2MPU05_IRQ_INT140C_MASK BIT(1)
+#define S2MPU05_IRQ_TSD_MASK BIT(2)
+
enum s5m8767_irq {
S5M8767_IRQ_PWRR,
S5M8767_IRQ_PWRF,
@@ -194,54 +347,4 @@ enum s5m8767_irq {
#define S5M8767_IRQ_RTC1S_MASK (1 << 4)
#define S5M8767_IRQ_WTSR_MASK (1 << 5)
-enum s5m8763_irq {
- S5M8763_IRQ_DCINF,
- S5M8763_IRQ_DCINR,
- S5M8763_IRQ_JIGF,
- S5M8763_IRQ_JIGR,
- S5M8763_IRQ_PWRONF,
- S5M8763_IRQ_PWRONR,
-
- S5M8763_IRQ_WTSREVNT,
- S5M8763_IRQ_SMPLEVNT,
- S5M8763_IRQ_ALARM1,
- S5M8763_IRQ_ALARM0,
-
- S5M8763_IRQ_ONKEY1S,
- S5M8763_IRQ_TOPOFFR,
- S5M8763_IRQ_DCINOVPR,
- S5M8763_IRQ_CHGRSTF,
- S5M8763_IRQ_DONER,
- S5M8763_IRQ_CHGFAULT,
-
- S5M8763_IRQ_LOBAT1,
- S5M8763_IRQ_LOBAT2,
-
- S5M8763_IRQ_NR,
-};
-
-#define S5M8763_IRQ_DCINF_MASK (1 << 2)
-#define S5M8763_IRQ_DCINR_MASK (1 << 3)
-#define S5M8763_IRQ_JIGF_MASK (1 << 4)
-#define S5M8763_IRQ_JIGR_MASK (1 << 5)
-#define S5M8763_IRQ_PWRONF_MASK (1 << 6)
-#define S5M8763_IRQ_PWRONR_MASK (1 << 7)
-
-#define S5M8763_IRQ_WTSREVNT_MASK (1 << 0)
-#define S5M8763_IRQ_SMPLEVNT_MASK (1 << 1)
-#define S5M8763_IRQ_ALARM1_MASK (1 << 2)
-#define S5M8763_IRQ_ALARM0_MASK (1 << 3)
-
-#define S5M8763_IRQ_ONKEY1S_MASK (1 << 0)
-#define S5M8763_IRQ_TOPOFFR_MASK (1 << 2)
-#define S5M8763_IRQ_DCINOVPR_MASK (1 << 3)
-#define S5M8763_IRQ_CHGRSTF_MASK (1 << 4)
-#define S5M8763_IRQ_DONER_MASK (1 << 5)
-#define S5M8763_IRQ_CHGFAULT_MASK (1 << 7)
-
-#define S5M8763_IRQ_LOBAT1_MASK (1 << 0)
-#define S5M8763_IRQ_LOBAT2_MASK (1 << 1)
-
-#define S5M8763_ENRAMP (1 << 4)
-
#endif /* __LINUX_MFD_SEC_IRQ_H */
diff --git a/include/linux/mfd/samsung/rtc.h b/include/linux/mfd/samsung/rtc.h
index 0204decfc9aa..51c4239a1fa6 100644
--- a/include/linux/mfd/samsung/rtc.h
+++ b/include/linux/mfd/samsung/rtc.h
@@ -72,6 +72,37 @@ enum s2mps_rtc_reg {
S2MPS_RTC_REG_MAX,
};
+enum s2mpg10_rtc_reg {
+ S2MPG10_RTC_CTRL,
+ S2MPG10_RTC_UPDATE,
+ S2MPG10_RTC_SMPL,
+ S2MPG10_RTC_WTSR,
+ S2MPG10_RTC_CAP_SEL,
+ S2MPG10_RTC_MSEC,
+ S2MPG10_RTC_SEC,
+ S2MPG10_RTC_MIN,
+ S2MPG10_RTC_HOUR,
+ S2MPG10_RTC_WEEK,
+ S2MPG10_RTC_DAY,
+ S2MPG10_RTC_MON,
+ S2MPG10_RTC_YEAR,
+ S2MPG10_RTC_A0SEC,
+ S2MPG10_RTC_A0MIN,
+ S2MPG10_RTC_A0HOUR,
+ S2MPG10_RTC_A0WEEK,
+ S2MPG10_RTC_A0DAY,
+ S2MPG10_RTC_A0MON,
+ S2MPG10_RTC_A0YEAR,
+ S2MPG10_RTC_A1SEC,
+ S2MPG10_RTC_A1MIN,
+ S2MPG10_RTC_A1HOUR,
+ S2MPG10_RTC_A1WEEK,
+ S2MPG10_RTC_A1DAY,
+ S2MPG10_RTC_A1MON,
+ S2MPG10_RTC_A1YEAR,
+ S2MPG10_RTC_OSC_CTRL,
+};
+
#define RTC_I2C_ADDR (0x0C >> 1)
#define HOUR_12 (1 << 7)
@@ -124,10 +155,16 @@ enum s2mps_rtc_reg {
#define ALARM_ENABLE_SHIFT 7
#define ALARM_ENABLE_MASK (1 << ALARM_ENABLE_SHIFT)
+/* WTSR & SMPL registers */
#define SMPL_ENABLE_SHIFT 7
#define SMPL_ENABLE_MASK (1 << SMPL_ENABLE_SHIFT)
#define WTSR_ENABLE_SHIFT 6
#define WTSR_ENABLE_MASK (1 << WTSR_ENABLE_SHIFT)
+#define S2MPG10_WTSR_COLDTIMER GENMASK(6, 5)
+#define S2MPG10_WTSR_COLDRST BIT(4)
+#define S2MPG10_WTSR_WTSRT GENMASK(3, 1)
+#define S2MPG10_WTSR_WTSR_EN BIT(0)
+
#endif /* __LINUX_MFD_SEC_RTC_H */
diff --git a/include/linux/mfd/samsung/s2mpg10.h b/include/linux/mfd/samsung/s2mpg10.h
new file mode 100644
index 000000000000..9f5919b89a3c
--- /dev/null
+++ b/include/linux/mfd/samsung/s2mpg10.h
@@ -0,0 +1,454 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2015 Samsung Electronics
+ * Copyright 2020 Google Inc
+ * Copyright 2025 Linaro Ltd.
+ */
+
+#ifndef __LINUX_MFD_S2MPG10_H
+#define __LINUX_MFD_S2MPG10_H
+
+/* Common registers (type 0x000) */
+enum s2mpg10_common_reg {
+ S2MPG10_COMMON_CHIPID,
+ S2MPG10_COMMON_INT,
+ S2MPG10_COMMON_INT_MASK,
+ S2MPG10_COMMON_SPD_CTRL1 = 0x0a,
+ S2MPG10_COMMON_SPD_CTRL2,
+ S2MPG10_COMMON_SPD_CTRL3,
+ S2MPG10_COMMON_MON1SEL = 0x1a,
+ S2MPG10_COMMON_MON2SEL,
+ S2MPG10_COMMON_MONR,
+ S2MPG10_COMMON_DEBUG_CTRL1,
+ S2MPG10_COMMON_DEBUG_CTRL2,
+ S2MPG10_COMMON_DEBUG_CTRL3,
+ S2MPG10_COMMON_DEBUG_CTRL4,
+ S2MPG10_COMMON_DEBUG_CTRL5,
+ S2MPG10_COMMON_DEBUG_CTRL6,
+ S2MPG10_COMMON_DEBUG_CTRL7,
+ S2MPG10_COMMON_DEBUG_CTRL8,
+ S2MPG10_COMMON_TEST_MODE1,
+ S2MPG10_COMMON_TEST_MODE2,
+ S2MPG10_COMMON_SPD_DEBUG1,
+ S2MPG10_COMMON_SPD_DEBUG2,
+ S2MPG10_COMMON_SPD_DEBUG3,
+ S2MPG10_COMMON_SPD_DEBUG4,
+};
+
+/* For S2MPG10_COMMON_INT and S2MPG10_COMMON_INT_MASK */
+#define S2MPG10_COMMON_INT_SRC GENMASK(7, 0)
+#define S2MPG10_COMMON_INT_SRC_PMIC BIT(0)
+
+/* PMIC registers (type 0x100) */
+enum s2mpg10_pmic_reg {
+ S2MPG10_PMIC_INT1,
+ S2MPG10_PMIC_INT2,
+ S2MPG10_PMIC_INT3,
+ S2MPG10_PMIC_INT4,
+ S2MPG10_PMIC_INT5,
+ S2MPG10_PMIC_INT6,
+ S2MPG10_PMIC_INT1M,
+ S2MPG10_PMIC_INT2M,
+ S2MPG10_PMIC_INT3M,
+ S2MPG10_PMIC_INT4M,
+ S2MPG10_PMIC_INT5M,
+ S2MPG10_PMIC_INT6M,
+ S2MPG10_PMIC_STATUS1,
+ S2MPG10_PMIC_STATUS2,
+ S2MPG10_PMIC_PWRONSRC,
+ S2MPG10_PMIC_OFFSRC,
+ S2MPG10_PMIC_BU_CHG,
+ S2MPG10_PMIC_RTCBUF,
+ S2MPG10_PMIC_COMMON_CTRL1,
+ S2MPG10_PMIC_COMMON_CTRL2,
+ S2MPG10_PMIC_COMMON_CTRL3,
+ S2MPG10_PMIC_COMMON_CTRL4,
+ S2MPG10_PMIC_SMPL_WARN_CTRL,
+ S2MPG10_PMIC_MIMICKING_CTRL,
+ S2MPG10_PMIC_B1M_CTRL,
+ S2MPG10_PMIC_B1M_OUT1,
+ S2MPG10_PMIC_B1M_OUT2,
+ S2MPG10_PMIC_B2M_CTRL,
+ S2MPG10_PMIC_B2M_OUT1,
+ S2MPG10_PMIC_B2M_OUT2,
+ S2MPG10_PMIC_B3M_CTRL,
+ S2MPG10_PMIC_B3M_OUT1,
+ S2MPG10_PMIC_B3M_OUT2,
+ S2MPG10_PMIC_B4M_CTRL,
+ S2MPG10_PMIC_B4M_OUT1,
+ S2MPG10_PMIC_B4M_OUT2,
+ S2MPG10_PMIC_B5M_CTRL,
+ S2MPG10_PMIC_B5M_OUT1,
+ S2MPG10_PMIC_B5M_OUT2,
+ S2MPG10_PMIC_B6M_CTRL,
+ S2MPG10_PMIC_B6M_OUT1,
+ S2MPG10_PMIC_B6M_OUT2,
+ S2MPG10_PMIC_B7M_CTRL,
+ S2MPG10_PMIC_B7M_OUT1,
+ S2MPG10_PMIC_B7M_OUT2,
+ S2MPG10_PMIC_B8M_CTRL,
+ S2MPG10_PMIC_B8M_OUT1,
+ S2MPG10_PMIC_B8M_OUT2,
+ S2MPG10_PMIC_B9M_CTRL,
+ S2MPG10_PMIC_B9M_OUT1,
+ S2MPG10_PMIC_B9M_OUT2,
+ S2MPG10_PMIC_B10M_CTRL,
+ S2MPG10_PMIC_B10M_OUT1,
+ S2MPG10_PMIC_B10M_OUT2,
+ S2MPG10_PMIC_BUCK1M_USONIC,
+ S2MPG10_PMIC_BUCK2M_USONIC,
+ S2MPG10_PMIC_BUCK3M_USONIC,
+ S2MPG10_PMIC_BUCK4M_USONIC,
+ S2MPG10_PMIC_BUCK5M_USONIC,
+ S2MPG10_PMIC_BUCK6M_USONIC,
+ S2MPG10_PMIC_BUCK7M_USONIC,
+ S2MPG10_PMIC_BUCK8M_USONIC,
+ S2MPG10_PMIC_BUCK9M_USONIC,
+ S2MPG10_PMIC_BUCK10M_USONIC,
+ S2MPG10_PMIC_L1M_CTRL,
+ S2MPG10_PMIC_L2M_CTRL,
+ S2MPG10_PMIC_L3M_CTRL,
+ S2MPG10_PMIC_L4M_CTRL,
+ S2MPG10_PMIC_L5M_CTRL,
+ S2MPG10_PMIC_L6M_CTRL,
+ S2MPG10_PMIC_L7M_CTRL,
+ S2MPG10_PMIC_L8M_CTRL,
+ S2MPG10_PMIC_L9M_CTRL,
+ S2MPG10_PMIC_L10M_CTRL,
+ S2MPG10_PMIC_L11M_CTRL1,
+ S2MPG10_PMIC_L11M_CTRL2,
+ S2MPG10_PMIC_L12M_CTRL1,
+ S2MPG10_PMIC_L12M_CTRL2,
+ S2MPG10_PMIC_L13M_CTRL1,
+ S2MPG10_PMIC_L13M_CTRL2,
+ S2MPG10_PMIC_L14M_CTRL,
+ S2MPG10_PMIC_L15M_CTRL1,
+ S2MPG10_PMIC_L15M_CTRL2,
+ S2MPG10_PMIC_L16M_CTRL,
+ S2MPG10_PMIC_L17M_CTRL,
+ S2MPG10_PMIC_L18M_CTRL,
+ S2MPG10_PMIC_L19M_CTRL,
+ S2MPG10_PMIC_L20M_CTRL,
+ S2MPG10_PMIC_L21M_CTRL,
+ S2MPG10_PMIC_L22M_CTRL,
+ S2MPG10_PMIC_L23M_CTRL,
+ S2MPG10_PMIC_L24M_CTRL,
+ S2MPG10_PMIC_L25M_CTRL,
+ S2MPG10_PMIC_L26M_CTRL,
+ S2MPG10_PMIC_L27M_CTRL,
+ S2MPG10_PMIC_L28M_CTRL,
+ S2MPG10_PMIC_L29M_CTRL,
+ S2MPG10_PMIC_L30M_CTRL,
+ S2MPG10_PMIC_L31M_CTRL,
+ S2MPG10_PMIC_LDO_CTRL1,
+ S2MPG10_PMIC_LDO_CTRL2,
+ S2MPG10_PMIC_LDO_DSCH1,
+ S2MPG10_PMIC_LDO_DSCH2,
+ S2MPG10_PMIC_LDO_DSCH3,
+ S2MPG10_PMIC_LDO_DSCH4,
+ S2MPG10_PMIC_LDO_BUCK7M_HLIMIT,
+ S2MPG10_PMIC_LDO_BUCK7M_LLIMIT,
+ S2MPG10_PMIC_LDO_LDO21M_HLIMIT,
+ S2MPG10_PMIC_LDO_LDO21M_LLIMIT,
+ S2MPG10_PMIC_LDO_LDO11M_HLIMIT,
+ S2MPG10_PMIC_DVS_RAMP1,
+ S2MPG10_PMIC_DVS_RAMP2,
+ S2MPG10_PMIC_DVS_RAMP3,
+ S2MPG10_PMIC_DVS_RAMP4,
+ S2MPG10_PMIC_DVS_RAMP5,
+ S2MPG10_PMIC_DVS_RAMP6,
+ S2MPG10_PMIC_DVS_SYNC_CTRL1,
+ S2MPG10_PMIC_DVS_SYNC_CTRL2,
+ S2MPG10_PMIC_DVS_SYNC_CTRL3,
+ S2MPG10_PMIC_DVS_SYNC_CTRL4,
+ S2MPG10_PMIC_DVS_SYNC_CTRL5,
+ S2MPG10_PMIC_DVS_SYNC_CTRL6,
+ S2MPG10_PMIC_OFF_CTRL1,
+ S2MPG10_PMIC_OFF_CTRL2,
+ S2MPG10_PMIC_OFF_CTRL3,
+ S2MPG10_PMIC_OFF_CTRL4,
+ S2MPG10_PMIC_SEQ_CTRL1,
+ S2MPG10_PMIC_SEQ_CTRL2,
+ S2MPG10_PMIC_SEQ_CTRL3,
+ S2MPG10_PMIC_SEQ_CTRL4,
+ S2MPG10_PMIC_SEQ_CTRL5,
+ S2MPG10_PMIC_SEQ_CTRL6,
+ S2MPG10_PMIC_SEQ_CTRL7,
+ S2MPG10_PMIC_SEQ_CTRL8,
+ S2MPG10_PMIC_SEQ_CTRL9,
+ S2MPG10_PMIC_SEQ_CTRL10,
+ S2MPG10_PMIC_SEQ_CTRL11,
+ S2MPG10_PMIC_SEQ_CTRL12,
+ S2MPG10_PMIC_SEQ_CTRL13,
+ S2MPG10_PMIC_SEQ_CTRL14,
+ S2MPG10_PMIC_SEQ_CTRL15,
+ S2MPG10_PMIC_SEQ_CTRL16,
+ S2MPG10_PMIC_SEQ_CTRL17,
+ S2MPG10_PMIC_SEQ_CTRL18,
+ S2MPG10_PMIC_SEQ_CTRL19,
+ S2MPG10_PMIC_SEQ_CTRL20,
+ S2MPG10_PMIC_SEQ_CTRL21,
+ S2MPG10_PMIC_SEQ_CTRL22,
+ S2MPG10_PMIC_SEQ_CTRL23,
+ S2MPG10_PMIC_SEQ_CTRL24,
+ S2MPG10_PMIC_SEQ_CTRL25,
+ S2MPG10_PMIC_SEQ_CTRL26,
+ S2MPG10_PMIC_SEQ_CTRL27,
+ S2MPG10_PMIC_SEQ_CTRL28,
+ S2MPG10_PMIC_SEQ_CTRL29,
+ S2MPG10_PMIC_SEQ_CTRL30,
+ S2MPG10_PMIC_SEQ_CTRL31,
+ S2MPG10_PMIC_SEQ_CTRL32,
+ S2MPG10_PMIC_SEQ_CTRL33,
+ S2MPG10_PMIC_SEQ_CTRL34,
+ S2MPG10_PMIC_SEQ_CTRL35,
+ S2MPG10_PMIC_OFF_SEQ_CTRL1,
+ S2MPG10_PMIC_OFF_SEQ_CTRL2,
+ S2MPG10_PMIC_OFF_SEQ_CTRL3,
+ S2MPG10_PMIC_OFF_SEQ_CTRL4,
+ S2MPG10_PMIC_OFF_SEQ_CTRL5,
+ S2MPG10_PMIC_OFF_SEQ_CTRL6,
+ S2MPG10_PMIC_OFF_SEQ_CTRL7,
+ S2MPG10_PMIC_OFF_SEQ_CTRL8,
+ S2MPG10_PMIC_OFF_SEQ_CTRL9,
+ S2MPG10_PMIC_OFF_SEQ_CTRL10,
+ S2MPG10_PMIC_OFF_SEQ_CTRL11,
+ S2MPG10_PMIC_OFF_SEQ_CTRL12,
+ S2MPG10_PMIC_OFF_SEQ_CTRL13,
+ S2MPG10_PMIC_OFF_SEQ_CTRL14,
+ S2MPG10_PMIC_OFF_SEQ_CTRL15,
+ S2MPG10_PMIC_OFF_SEQ_CTRL16,
+ S2MPG10_PMIC_OFF_SEQ_CTRL17,
+ S2MPG10_PMIC_OFF_SEQ_CTRL18,
+ S2MPG10_PMIC_PCTRLSEL1,
+ S2MPG10_PMIC_PCTRLSEL2,
+ S2MPG10_PMIC_PCTRLSEL3,
+ S2MPG10_PMIC_PCTRLSEL4,
+ S2MPG10_PMIC_PCTRLSEL5,
+ S2MPG10_PMIC_PCTRLSEL6,
+ S2MPG10_PMIC_PCTRLSEL7,
+ S2MPG10_PMIC_PCTRLSEL8,
+ S2MPG10_PMIC_PCTRLSEL9,
+ S2MPG10_PMIC_PCTRLSEL10,
+ S2MPG10_PMIC_PCTRLSEL11,
+ S2MPG10_PMIC_PCTRLSEL12,
+ S2MPG10_PMIC_PCTRLSEL13,
+ S2MPG10_PMIC_DCTRLSEL1,
+ S2MPG10_PMIC_DCTRLSEL2,
+ S2MPG10_PMIC_DCTRLSEL3,
+ S2MPG10_PMIC_DCTRLSEL4,
+ S2MPG10_PMIC_DCTRLSEL5,
+ S2MPG10_PMIC_DCTRLSEL6,
+ S2MPG10_PMIC_DCTRLSEL7,
+ S2MPG10_PMIC_GPIO_CTRL1,
+ S2MPG10_PMIC_GPIO_CTRL2,
+ S2MPG10_PMIC_GPIO_CTRL3,
+ S2MPG10_PMIC_GPIO_CTRL4,
+ S2MPG10_PMIC_GPIO_CTRL5,
+ S2MPG10_PMIC_GPIO_CTRL6,
+ S2MPG10_PMIC_GPIO_CTRL7,
+ S2MPG10_PMIC_B2M_OCP_WARN,
+ S2MPG10_PMIC_B2M_OCP_WARN_X,
+ S2MPG10_PMIC_B2M_OCP_WARN_Y,
+ S2MPG10_PMIC_B2M_OCP_WARN_Z,
+ S2MPG10_PMIC_B3M_OCP_WARN,
+ S2MPG10_PMIC_B3M_OCP_WARN_X,
+ S2MPG10_PMIC_B3M_OCP_WARN_Y,
+ S2MPG10_PMIC_B3M_OCP_WARN_Z,
+ S2MPG10_PMIC_B10M_OCP_WARN,
+ S2MPG10_PMIC_B10M_OCP_WARN_X,
+ S2MPG10_PMIC_B10M_OCP_WARN_Y,
+ S2MPG10_PMIC_B10M_OCP_WARN_Z,
+ S2MPG10_PMIC_B2M_SOFT_OCP_WARN,
+ S2MPG10_PMIC_B2M_SOFT_OCP_WARN_X,
+ S2MPG10_PMIC_B2M_SOFT_OCP_WARN_Y,
+ S2MPG10_PMIC_B2M_SOFT_OCP_WARN_Z,
+ S2MPG10_PMIC_B3M_SOFT_OCP_WARN,
+ S2MPG10_PMIC_B3M_SOFT_OCP_WARN_X,
+ S2MPG10_PMIC_B3M_SOFT_OCP_WARN_Y,
+ S2MPG10_PMIC_B3M_SOFT_OCP_WARN_Z,
+ S2MPG10_PMIC_B10M_SOFT_OCP_WARN,
+ S2MPG10_PMIC_B10M_SOFT_OCP_WARN_X,
+ S2MPG10_PMIC_B10M_SOFT_OCP_WARN_Y,
+ S2MPG10_PMIC_B10M_SOFT_OCP_WARN_Z,
+ S2MPG10_PMIC_BUCK_OCP_EN1,
+ S2MPG10_PMIC_BUCK_OCP_EN2,
+ S2MPG10_PMIC_BUCK_OCP_PD_EN1,
+ S2MPG10_PMIC_BUCK_OCP_PD_EN2,
+ S2MPG10_PMIC_BUCK_OCP_CTRL1,
+ S2MPG10_PMIC_BUCK_OCP_CTRL2,
+ S2MPG10_PMIC_BUCK_OCP_CTRL3,
+ S2MPG10_PMIC_BUCK_OCP_CTRL4,
+ S2MPG10_PMIC_BUCK_OCP_CTRL5,
+ S2MPG10_PMIC_PIF_CTRL,
+ S2MPG10_PMIC_BUCK_HR_MODE1,
+ S2MPG10_PMIC_BUCK_HR_MODE2,
+ S2MPG10_PMIC_FAULTOUT_CTRL,
+ S2MPG10_PMIC_LDO_SENSE1,
+ S2MPG10_PMIC_LDO_SENSE2,
+ S2MPG10_PMIC_LDO_SENSE3,
+ S2MPG10_PMIC_LDO_SENSE4,
+};
+
+/* Meter registers (type 0xa00) */
+enum s2mpg10_meter_reg {
+ S2MPG10_METER_CTRL1,
+ S2MPG10_METER_CTRL2,
+ S2MPG10_METER_CTRL3,
+ S2MPG10_METER_CTRL4,
+ S2MPG10_METER_BUCKEN1,
+ S2MPG10_METER_BUCKEN2,
+ S2MPG10_METER_MUXSEL0,
+ S2MPG10_METER_MUXSEL1,
+ S2MPG10_METER_MUXSEL2,
+ S2MPG10_METER_MUXSEL3,
+ S2MPG10_METER_MUXSEL4,
+ S2MPG10_METER_MUXSEL5,
+ S2MPG10_METER_MUXSEL6,
+ S2MPG10_METER_MUXSEL7,
+ S2MPG10_METER_LPF_C0_0,
+ S2MPG10_METER_LPF_C0_1,
+ S2MPG10_METER_LPF_C0_2,
+ S2MPG10_METER_LPF_C0_3,
+ S2MPG10_METER_LPF_C0_4,
+ S2MPG10_METER_LPF_C0_5,
+ S2MPG10_METER_LPF_C0_6,
+ S2MPG10_METER_LPF_C0_7,
+ S2MPG10_METER_PWR_WARN0,
+ S2MPG10_METER_PWR_WARN1,
+ S2MPG10_METER_PWR_WARN2,
+ S2MPG10_METER_PWR_WARN3,
+ S2MPG10_METER_PWR_WARN4,
+ S2MPG10_METER_PWR_WARN5,
+ S2MPG10_METER_PWR_WARN6,
+ S2MPG10_METER_PWR_WARN7,
+ S2MPG10_METER_PWR_HYS1,
+ S2MPG10_METER_PWR_HYS2,
+ S2MPG10_METER_PWR_HYS3,
+ S2MPG10_METER_PWR_HYS4,
+ S2MPG10_METER_ACC_DATA_CH0_1 = 0x40,
+ S2MPG10_METER_ACC_DATA_CH0_2,
+ S2MPG10_METER_ACC_DATA_CH0_3,
+ S2MPG10_METER_ACC_DATA_CH0_4,
+ S2MPG10_METER_ACC_DATA_CH0_5,
+ S2MPG10_METER_ACC_DATA_CH0_6,
+ S2MPG10_METER_ACC_DATA_CH1_1,
+ S2MPG10_METER_ACC_DATA_CH1_2,
+ S2MPG10_METER_ACC_DATA_CH1_3,
+ S2MPG10_METER_ACC_DATA_CH1_4,
+ S2MPG10_METER_ACC_DATA_CH1_5,
+ S2MPG10_METER_ACC_DATA_CH1_6,
+ S2MPG10_METER_ACC_DATA_CH2_1,
+ S2MPG10_METER_ACC_DATA_CH2_2,
+ S2MPG10_METER_ACC_DATA_CH2_3,
+ S2MPG10_METER_ACC_DATA_CH2_4,
+ S2MPG10_METER_ACC_DATA_CH2_5,
+ S2MPG10_METER_ACC_DATA_CH2_6,
+ S2MPG10_METER_ACC_DATA_CH3_1,
+ S2MPG10_METER_ACC_DATA_CH3_2,
+ S2MPG10_METER_ACC_DATA_CH3_3,
+ S2MPG10_METER_ACC_DATA_CH3_4,
+ S2MPG10_METER_ACC_DATA_CH3_5,
+ S2MPG10_METER_ACC_DATA_CH3_6,
+ S2MPG10_METER_ACC_DATA_CH4_1,
+ S2MPG10_METER_ACC_DATA_CH4_2,
+ S2MPG10_METER_ACC_DATA_CH4_3,
+ S2MPG10_METER_ACC_DATA_CH4_4,
+ S2MPG10_METER_ACC_DATA_CH4_5,
+ S2MPG10_METER_ACC_DATA_CH4_6,
+ S2MPG10_METER_ACC_DATA_CH5_1,
+ S2MPG10_METER_ACC_DATA_CH5_2,
+ S2MPG10_METER_ACC_DATA_CH5_3,
+ S2MPG10_METER_ACC_DATA_CH5_4,
+ S2MPG10_METER_ACC_DATA_CH5_5,
+ S2MPG10_METER_ACC_DATA_CH5_6,
+ S2MPG10_METER_ACC_DATA_CH6_1,
+ S2MPG10_METER_ACC_DATA_CH6_2,
+ S2MPG10_METER_ACC_DATA_CH6_3,
+ S2MPG10_METER_ACC_DATA_CH6_4,
+ S2MPG10_METER_ACC_DATA_CH6_5,
+ S2MPG10_METER_ACC_DATA_CH6_6,
+ S2MPG10_METER_ACC_DATA_CH7_1,
+ S2MPG10_METER_ACC_DATA_CH7_2,
+ S2MPG10_METER_ACC_DATA_CH7_3,
+ S2MPG10_METER_ACC_DATA_CH7_4,
+ S2MPG10_METER_ACC_DATA_CH7_5,
+ S2MPG10_METER_ACC_DATA_CH7_6,
+ S2MPG10_METER_ACC_COUNT_1,
+ S2MPG10_METER_ACC_COUNT_2,
+ S2MPG10_METER_ACC_COUNT_3,
+ S2MPG10_METER_LPF_DATA_CH0_1,
+ S2MPG10_METER_LPF_DATA_CH0_2,
+ S2MPG10_METER_LPF_DATA_CH0_3,
+ S2MPG10_METER_LPF_DATA_CH1_1,
+ S2MPG10_METER_LPF_DATA_CH1_2,
+ S2MPG10_METER_LPF_DATA_CH1_3,
+ S2MPG10_METER_LPF_DATA_CH2_1,
+ S2MPG10_METER_LPF_DATA_CH2_2,
+ S2MPG10_METER_LPF_DATA_CH2_3,
+ S2MPG10_METER_LPF_DATA_CH3_1,
+ S2MPG10_METER_LPF_DATA_CH3_2,
+ S2MPG10_METER_LPF_DATA_CH3_3,
+ S2MPG10_METER_LPF_DATA_CH4_1,
+ S2MPG10_METER_LPF_DATA_CH4_2,
+ S2MPG10_METER_LPF_DATA_CH4_3,
+ S2MPG10_METER_LPF_DATA_CH5_1,
+ S2MPG10_METER_LPF_DATA_CH5_2,
+ S2MPG10_METER_LPF_DATA_CH5_3,
+ S2MPG10_METER_LPF_DATA_CH6_1,
+ S2MPG10_METER_LPF_DATA_CH6_2,
+ S2MPG10_METER_LPF_DATA_CH6_3,
+ S2MPG10_METER_LPF_DATA_CH7_1,
+ S2MPG10_METER_LPF_DATA_CH7_2,
+ S2MPG10_METER_LPF_DATA_CH7_3,
+ S2MPG10_METER_DSM_TRIM_OFFSET = 0xee,
+ S2MPG10_METER_BUCK_METER_TRIM3 = 0xf1,
+};
+
+/* S2MPG10 regulator IDs */
+enum s2mpg10_regulators {
+ S2MPG10_LDO1,
+ S2MPG10_LDO2,
+ S2MPG10_LDO3,
+ S2MPG10_LDO4,
+ S2MPG10_LDO5,
+ S2MPG10_LDO6,
+ S2MPG10_LDO7,
+ S2MPG10_LDO8,
+ S2MPG10_LDO9,
+ S2MPG10_LDO10,
+ S2MPG10_LDO11,
+ S2MPG10_LDO12,
+ S2MPG10_LDO13,
+ S2MPG10_LDO14,
+ S2MPG10_LDO15,
+ S2MPG10_LDO16,
+ S2MPG10_LDO17,
+ S2MPG10_LDO18,
+ S2MPG10_LDO19,
+ S2MPG10_LDO20,
+ S2MPG10_LDO21,
+ S2MPG10_LDO22,
+ S2MPG10_LDO23,
+ S2MPG10_LDO24,
+ S2MPG10_LDO25,
+ S2MPG10_LDO26,
+ S2MPG10_LDO27,
+ S2MPG10_LDO28,
+ S2MPG10_LDO29,
+ S2MPG10_LDO30,
+ S2MPG10_LDO31,
+ S2MPG10_BUCK1,
+ S2MPG10_BUCK2,
+ S2MPG10_BUCK3,
+ S2MPG10_BUCK4,
+ S2MPG10_BUCK5,
+ S2MPG10_BUCK6,
+ S2MPG10_BUCK7,
+ S2MPG10_BUCK8,
+ S2MPG10_BUCK9,
+ S2MPG10_BUCK10,
+ S2MPG10_REGULATOR_MAX,
+};
+
+#endif /* __LINUX_MFD_S2MPG10_H */
diff --git a/include/linux/mfd/samsung/s2mpu05.h b/include/linux/mfd/samsung/s2mpu05.h
new file mode 100644
index 000000000000..fcdb6c8adb03
--- /dev/null
+++ b/include/linux/mfd/samsung/s2mpu05.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd
+ * Copyright (c) 2025 Kaustabh Chakraborty <kauschluss@disroot.org>
+ */
+
+#ifndef __LINUX_MFD_S2MPU05_H
+#define __LINUX_MFD_S2MPU05_H
+
+/* S2MPU05 registers */
+enum S2MPU05_reg {
+ S2MPU05_REG_ID,
+ S2MPU05_REG_INT1,
+ S2MPU05_REG_INT2,
+ S2MPU05_REG_INT3,
+ S2MPU05_REG_INT1M,
+ S2MPU05_REG_INT2M,
+ S2MPU05_REG_INT3M,
+ S2MPU05_REG_ST1,
+ S2MPU05_REG_ST2,
+ S2MPU05_REG_PWRONSRC,
+ S2MPU05_REG_OFFSRC,
+ S2MPU05_REG_BU_CHG,
+ S2MPU05_REG_RTC_BUF,
+ S2MPU05_REG_CTRL1,
+ S2MPU05_REG_CTRL2,
+ S2MPU05_REG_ETC_TEST,
+ S2MPU05_REG_OTP_ADRL,
+ S2MPU05_REG_OTP_ADRH,
+ S2MPU05_REG_OTP_DATA,
+ S2MPU05_REG_MON1SEL,
+ S2MPU05_REG_MON2SEL,
+ S2MPU05_REG_CTRL3,
+ S2MPU05_REG_ETC_OTP,
+ S2MPU05_REG_UVLO,
+ S2MPU05_REG_TIME_CTRL1,
+ S2MPU05_REG_TIME_CTRL2,
+ S2MPU05_REG_B1CTRL1,
+ S2MPU05_REG_B1CTRL2,
+ S2MPU05_REG_B2CTRL1,
+ S2MPU05_REG_B2CTRL2,
+ S2MPU05_REG_B2CTRL3,
+ S2MPU05_REG_B2CTRL4,
+ S2MPU05_REG_B3CTRL1,
+ S2MPU05_REG_B3CTRL2,
+ S2MPU05_REG_B3CTRL3,
+ S2MPU05_REG_B4CTRL1,
+ S2MPU05_REG_B4CTRL2,
+ S2MPU05_REG_B5CTRL1,
+ S2MPU05_REG_B5CTRL2,
+ S2MPU05_REG_BUCK_RAMP,
+ S2MPU05_REG_LDO_DVS1,
+ S2MPU05_REG_LDO_DVS9,
+ S2MPU05_REG_LDO_DVS10,
+ S2MPU05_REG_L1CTRL,
+ S2MPU05_REG_L2CTRL,
+ S2MPU05_REG_L3CTRL,
+ S2MPU05_REG_L4CTRL,
+ S2MPU05_REG_L5CTRL,
+ S2MPU05_REG_L6CTRL,
+ S2MPU05_REG_L7CTRL,
+ S2MPU05_REG_L8CTRL,
+ S2MPU05_REG_L9CTRL1,
+ S2MPU05_REG_L9CTRL2,
+ S2MPU05_REG_L10CTRL,
+ S2MPU05_REG_L11CTRL1,
+ S2MPU05_REG_L11CTRL2,
+ S2MPU05_REG_L12CTRL,
+ S2MPU05_REG_L13CTRL,
+ S2MPU05_REG_L14CTRL,
+ S2MPU05_REG_L15CTRL,
+ S2MPU05_REG_L16CTRL,
+ S2MPU05_REG_L17CTRL1,
+ S2MPU05_REG_L17CTRL2,
+ S2MPU05_REG_L18CTRL1,
+ S2MPU05_REG_L18CTRL2,
+ S2MPU05_REG_L19CTRL,
+ S2MPU05_REG_L20CTRL,
+ S2MPU05_REG_L21CTRL,
+ S2MPU05_REG_L22CTRL,
+ S2MPU05_REG_L23CTRL,
+ S2MPU05_REG_L24CTRL,
+ S2MPU05_REG_L25CTRL,
+ S2MPU05_REG_L26CTRL,
+ S2MPU05_REG_L27CTRL,
+ S2MPU05_REG_L28CTRL,
+ S2MPU05_REG_L29CTRL,
+ S2MPU05_REG_L30CTRL,
+ S2MPU05_REG_L31CTRL,
+ S2MPU05_REG_L32CTRL,
+ S2MPU05_REG_L33CTRL,
+ S2MPU05_REG_L34CTRL,
+ S2MPU05_REG_L35CTRL,
+ S2MPU05_REG_LDO_DSCH1,
+ S2MPU05_REG_LDO_DSCH2,
+ S2MPU05_REG_LDO_DSCH3,
+ S2MPU05_REG_LDO_DSCH4,
+ S2MPU05_REG_LDO_DSCH5,
+ S2MPU05_REG_LDO_CTRL1,
+ S2MPU05_REG_LDO_CTRL2,
+ S2MPU05_REG_TCXO_CTRL,
+ S2MPU05_REG_SELMIF,
+};
+
+/* S2MPU05 regulator ids */
+enum S2MPU05_regulators {
+ S2MPU05_LDO1,
+ S2MPU05_LDO2,
+ S2MPU05_LDO3,
+ S2MPU05_LDO4,
+ S2MPU05_LDO5,
+ S2MPU05_LDO6,
+ S2MPU05_LDO7,
+ S2MPU05_LDO8,
+ S2MPU05_LDO9,
+ S2MPU05_LDO10,
+ S2MPU05_LDO11,
+ S2MPU05_LDO12,
+ S2MPU05_LDO13,
+ S2MPU05_LDO14,
+ S2MPU05_LDO15,
+ S2MPU05_LDO16,
+ S2MPU05_LDO17,
+ S2MPU05_LDO18,
+ S2MPU05_LDO19,
+ S2MPU05_LDO20,
+ S2MPU05_LDO21,
+ S2MPU05_LDO22,
+ S2MPU05_LDO23,
+ S2MPU05_LDO24,
+ S2MPU05_LDO25,
+ S2MPU05_LDO26,
+ S2MPU05_LDO27,
+ S2MPU05_LDO28,
+ S2MPU05_LDO29,
+ S2MPU05_LDO30,
+ S2MPU05_LDO31,
+ S2MPU05_LDO32,
+ S2MPU05_LDO33,
+ S2MPU05_LDO34,
+ S2MPU05_LDO35,
+ S2MPU05_BUCK1,
+ S2MPU05_BUCK2,
+ S2MPU05_BUCK3,
+ S2MPU05_BUCK4,
+ S2MPU05_BUCK5,
+
+ S2MPU05_REGULATOR_MAX,
+};
+
+#define S2MPU05_SW_ENABLE_MASK 0x03
+
+#define S2MPU05_ENABLE_TIME_LDO 128
+#define S2MPU05_ENABLE_TIME_BUCK1 110
+#define S2MPU05_ENABLE_TIME_BUCK2 110
+#define S2MPU05_ENABLE_TIME_BUCK3 110
+#define S2MPU05_ENABLE_TIME_BUCK4 150
+#define S2MPU05_ENABLE_TIME_BUCK5 150
+
+#define S2MPU05_LDO_MIN1 800000
+#define S2MPU05_LDO_MIN2 1800000
+#define S2MPU05_LDO_MIN3 400000
+#define S2MPU05_LDO_STEP1 12500
+#define S2MPU05_LDO_STEP2 25000
+
+#define S2MPU05_BUCK_MIN1 400000
+#define S2MPU05_BUCK_MIN2 600000
+#define S2MPU05_BUCK_STEP1 6250
+#define S2MPU05_BUCK_STEP2 12500
+
+#define S2MPU05_RAMP_DELAY 12000 /* uV/uS */
+
+#define S2MPU05_ENABLE_SHIFT 6
+#define S2MPU05_ENABLE_MASK (0x03 << S2MPU05_ENABLE_SHIFT)
+
+#define S2MPU05_LDO_VSEL_MASK 0x3F
+#define S2MPU05_BUCK_VSEL_MASK 0xFF
+#define S2MPU05_LDO_N_VOLTAGES (S2MPU05_LDO_VSEL_MASK + 1)
+#define S2MPU05_BUCK_N_VOLTAGES (S2MPU05_BUCK_VSEL_MASK + 1)
+
+#define S2MPU05_PMIC_EN_SHIFT 6
+
+#endif /* __LINUX_MFD_S2MPU05_H */
diff --git a/include/linux/mfd/samsung/s5m8763.h b/include/linux/mfd/samsung/s5m8763.h
deleted file mode 100644
index c534f086ca16..000000000000
--- a/include/linux/mfd/samsung/s5m8763.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * Copyright (c) 2011 Samsung Electronics Co., Ltd
- * http://www.samsung.com
- */
-
-#ifndef __LINUX_MFD_S5M8763_H
-#define __LINUX_MFD_S5M8763_H
-
-/* S5M8763 registers */
-enum s5m8763_reg {
- S5M8763_REG_IRQ1,
- S5M8763_REG_IRQ2,
- S5M8763_REG_IRQ3,
- S5M8763_REG_IRQ4,
- S5M8763_REG_IRQM1,
- S5M8763_REG_IRQM2,
- S5M8763_REG_IRQM3,
- S5M8763_REG_IRQM4,
- S5M8763_REG_STATUS1,
- S5M8763_REG_STATUS2,
- S5M8763_REG_STATUSM1,
- S5M8763_REG_STATUSM2,
- S5M8763_REG_CHGR1,
- S5M8763_REG_CHGR2,
- S5M8763_REG_LDO_ACTIVE_DISCHARGE1,
- S5M8763_REG_LDO_ACTIVE_DISCHARGE2,
- S5M8763_REG_BUCK_ACTIVE_DISCHARGE3,
- S5M8763_REG_ONOFF1,
- S5M8763_REG_ONOFF2,
- S5M8763_REG_ONOFF3,
- S5M8763_REG_ONOFF4,
- S5M8763_REG_BUCK1_VOLTAGE1,
- S5M8763_REG_BUCK1_VOLTAGE2,
- S5M8763_REG_BUCK1_VOLTAGE3,
- S5M8763_REG_BUCK1_VOLTAGE4,
- S5M8763_REG_BUCK2_VOLTAGE1,
- S5M8763_REG_BUCK2_VOLTAGE2,
- S5M8763_REG_BUCK3,
- S5M8763_REG_BUCK4,
- S5M8763_REG_LDO1_LDO2,
- S5M8763_REG_LDO3,
- S5M8763_REG_LDO4,
- S5M8763_REG_LDO5,
- S5M8763_REG_LDO6,
- S5M8763_REG_LDO7,
- S5M8763_REG_LDO7_LDO8,
- S5M8763_REG_LDO9_LDO10,
- S5M8763_REG_LDO11,
- S5M8763_REG_LDO12,
- S5M8763_REG_LDO13,
- S5M8763_REG_LDO14,
- S5M8763_REG_LDO15,
- S5M8763_REG_LDO16,
- S5M8763_REG_BKCHR,
- S5M8763_REG_LBCNFG1,
- S5M8763_REG_LBCNFG2,
-};
-
-/* S5M8763 regulator ids */
-enum s5m8763_regulators {
- S5M8763_LDO1,
- S5M8763_LDO2,
- S5M8763_LDO3,
- S5M8763_LDO4,
- S5M8763_LDO5,
- S5M8763_LDO6,
- S5M8763_LDO7,
- S5M8763_LDO8,
- S5M8763_LDO9,
- S5M8763_LDO10,
- S5M8763_LDO11,
- S5M8763_LDO12,
- S5M8763_LDO13,
- S5M8763_LDO14,
- S5M8763_LDO15,
- S5M8763_LDO16,
- S5M8763_BUCK1,
- S5M8763_BUCK2,
- S5M8763_BUCK3,
- S5M8763_BUCK4,
- S5M8763_AP_EN32KHZ,
- S5M8763_CP_EN32KHZ,
- S5M8763_ENCHGVI,
- S5M8763_ESAFEUSB1,
- S5M8763_ESAFEUSB2,
-};
-
-#define S5M8763_ENRAMP (1 << 4)
-#endif /* __LINUX_MFD_S5M8763_H */
diff --git a/include/linux/mfd/si476x-platform.h b/include/linux/mfd/si476x-platform.h
index 18363b773d07..cb99e16ca947 100644
--- a/include/linux/mfd/si476x-platform.h
+++ b/include/linux/mfd/si476x-platform.h
@@ -10,7 +10,7 @@
#ifndef __SI476X_PLATFORM_H__
#define __SI476X_PLATFORM_H__
-/* It is possible to select one of the four adresses using pins A0
+/* It is possible to select one of the four addresses using pins A0
* and A1 on SI476x */
#define SI476X_I2C_ADDR_1 0x60
#define SI476X_I2C_ADDR_2 0x61
diff --git a/include/linux/mfd/sta2x11-mfd.h b/include/linux/mfd/sta2x11-mfd.h
deleted file mode 100644
index 2001ca5c44a9..000000000000
--- a/include/linux/mfd/sta2x11-mfd.h
+++ /dev/null
@@ -1,506 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2009-2011 Wind River Systems, Inc.
- * Copyright (c) 2011 ST Microelectronics (Alessandro Rubini)
- *
- * The STMicroelectronics ConneXt (STA2X11) chip has several unrelated
- * functions in one PCI endpoint functions. This driver simply
- * registers the platform devices in this iomemregion and exports a few
- * functions to access common registers
- */
-
-#ifndef __STA2X11_MFD_H
-#define __STA2X11_MFD_H
-#include <linux/types.h>
-#include <linux/pci.h>
-
-enum sta2x11_mfd_plat_dev {
- sta2x11_sctl = 0,
- sta2x11_gpio,
- sta2x11_scr,
- sta2x11_time,
- sta2x11_apbreg,
- sta2x11_apb_soc_regs,
- sta2x11_vic,
- sta2x11_n_mfd_plat_devs,
-};
-
-#define STA2X11_MFD_SCTL_NAME "sta2x11-sctl"
-#define STA2X11_MFD_GPIO_NAME "sta2x11-gpio"
-#define STA2X11_MFD_SCR_NAME "sta2x11-scr"
-#define STA2X11_MFD_TIME_NAME "sta2x11-time"
-#define STA2X11_MFD_APBREG_NAME "sta2x11-apbreg"
-#define STA2X11_MFD_APB_SOC_REGS_NAME "sta2x11-apb-soc-regs"
-#define STA2X11_MFD_VIC_NAME "sta2x11-vic"
-
-extern u32
-__sta2x11_mfd_mask(struct pci_dev *, u32, u32, u32, enum sta2x11_mfd_plat_dev);
-
-/*
- * The MFD PCI block includes the GPIO peripherals and other register blocks.
- * For GPIO, we have 32*4 bits (I use "gsta" for "gpio sta2x11".)
- */
-#define GSTA_GPIO_PER_BLOCK 32
-#define GSTA_NR_BLOCKS 4
-#define GSTA_NR_GPIO (GSTA_GPIO_PER_BLOCK * GSTA_NR_BLOCKS)
-
-/* Pinconfig is set by the board definition: altfunc, pull-up, pull-down */
-struct sta2x11_gpio_pdata {
- unsigned pinconfig[GSTA_NR_GPIO];
-};
-
-/* Macros below lifted from sh_pfc.h, with minor differences */
-#define PINMUX_TYPE_NONE 0
-#define PINMUX_TYPE_FUNCTION 1
-#define PINMUX_TYPE_OUTPUT_LOW 2
-#define PINMUX_TYPE_OUTPUT_HIGH 3
-#define PINMUX_TYPE_INPUT 4
-#define PINMUX_TYPE_INPUT_PULLUP 5
-#define PINMUX_TYPE_INPUT_PULLDOWN 6
-
-/* Give names to GPIO pins, like PXA does, taken from the manual */
-#define STA2X11_GPIO0 0
-#define STA2X11_GPIO1 1
-#define STA2X11_GPIO2 2
-#define STA2X11_GPIO3 3
-#define STA2X11_GPIO4 4
-#define STA2X11_GPIO5 5
-#define STA2X11_GPIO6 6
-#define STA2X11_GPIO7 7
-#define STA2X11_GPIO8_RGBOUT_RED7 8
-#define STA2X11_GPIO9_RGBOUT_RED6 9
-#define STA2X11_GPIO10_RGBOUT_RED5 10
-#define STA2X11_GPIO11_RGBOUT_RED4 11
-#define STA2X11_GPIO12_RGBOUT_RED3 12
-#define STA2X11_GPIO13_RGBOUT_RED2 13
-#define STA2X11_GPIO14_RGBOUT_RED1 14
-#define STA2X11_GPIO15_RGBOUT_RED0 15
-#define STA2X11_GPIO16_RGBOUT_GREEN7 16
-#define STA2X11_GPIO17_RGBOUT_GREEN6 17
-#define STA2X11_GPIO18_RGBOUT_GREEN5 18
-#define STA2X11_GPIO19_RGBOUT_GREEN4 19
-#define STA2X11_GPIO20_RGBOUT_GREEN3 20
-#define STA2X11_GPIO21_RGBOUT_GREEN2 21
-#define STA2X11_GPIO22_RGBOUT_GREEN1 22
-#define STA2X11_GPIO23_RGBOUT_GREEN0 23
-#define STA2X11_GPIO24_RGBOUT_BLUE7 24
-#define STA2X11_GPIO25_RGBOUT_BLUE6 25
-#define STA2X11_GPIO26_RGBOUT_BLUE5 26
-#define STA2X11_GPIO27_RGBOUT_BLUE4 27
-#define STA2X11_GPIO28_RGBOUT_BLUE3 28
-#define STA2X11_GPIO29_RGBOUT_BLUE2 29
-#define STA2X11_GPIO30_RGBOUT_BLUE1 30
-#define STA2X11_GPIO31_RGBOUT_BLUE0 31
-#define STA2X11_GPIO32_RGBOUT_VSYNCH 32
-#define STA2X11_GPIO33_RGBOUT_HSYNCH 33
-#define STA2X11_GPIO34_RGBOUT_DEN 34
-#define STA2X11_GPIO35_ETH_CRS_DV 35
-#define STA2X11_GPIO36_ETH_TXD1 36
-#define STA2X11_GPIO37_ETH_TXD0 37
-#define STA2X11_GPIO38_ETH_TX_EN 38
-#define STA2X11_GPIO39_MDIO 39
-#define STA2X11_GPIO40_ETH_REF_CLK 40
-#define STA2X11_GPIO41_ETH_RXD1 41
-#define STA2X11_GPIO42_ETH_RXD0 42
-#define STA2X11_GPIO43_MDC 43
-#define STA2X11_GPIO44_CAN_TX 44
-#define STA2X11_GPIO45_CAN_RX 45
-#define STA2X11_GPIO46_MLB_DAT 46
-#define STA2X11_GPIO47_MLB_SIG 47
-#define STA2X11_GPIO48_SPI0_CLK 48
-#define STA2X11_GPIO49_SPI0_TXD 49
-#define STA2X11_GPIO50_SPI0_RXD 50
-#define STA2X11_GPIO51_SPI0_FRM 51
-#define STA2X11_GPIO52_SPI1_CLK 52
-#define STA2X11_GPIO53_SPI1_TXD 53
-#define STA2X11_GPIO54_SPI1_RXD 54
-#define STA2X11_GPIO55_SPI1_FRM 55
-#define STA2X11_GPIO56_SPI2_CLK 56
-#define STA2X11_GPIO57_SPI2_TXD 57
-#define STA2X11_GPIO58_SPI2_RXD 58
-#define STA2X11_GPIO59_SPI2_FRM 59
-#define STA2X11_GPIO60_I2C0_SCL 60
-#define STA2X11_GPIO61_I2C0_SDA 61
-#define STA2X11_GPIO62_I2C1_SCL 62
-#define STA2X11_GPIO63_I2C1_SDA 63
-#define STA2X11_GPIO64_I2C2_SCL 64
-#define STA2X11_GPIO65_I2C2_SDA 65
-#define STA2X11_GPIO66_I2C3_SCL 66
-#define STA2X11_GPIO67_I2C3_SDA 67
-#define STA2X11_GPIO68_MSP0_RCK 68
-#define STA2X11_GPIO69_MSP0_RXD 69
-#define STA2X11_GPIO70_MSP0_RFS 70
-#define STA2X11_GPIO71_MSP0_TCK 71
-#define STA2X11_GPIO72_MSP0_TXD 72
-#define STA2X11_GPIO73_MSP0_TFS 73
-#define STA2X11_GPIO74_MSP0_SCK 74
-#define STA2X11_GPIO75_MSP1_CK 75
-#define STA2X11_GPIO76_MSP1_RXD 76
-#define STA2X11_GPIO77_MSP1_FS 77
-#define STA2X11_GPIO78_MSP1_TXD 78
-#define STA2X11_GPIO79_MSP2_CK 79
-#define STA2X11_GPIO80_MSP2_RXD 80
-#define STA2X11_GPIO81_MSP2_FS 81
-#define STA2X11_GPIO82_MSP2_TXD 82
-#define STA2X11_GPIO83_MSP3_CK 83
-#define STA2X11_GPIO84_MSP3_RXD 84
-#define STA2X11_GPIO85_MSP3_FS 85
-#define STA2X11_GPIO86_MSP3_TXD 86
-#define STA2X11_GPIO87_MSP4_CK 87
-#define STA2X11_GPIO88_MSP4_RXD 88
-#define STA2X11_GPIO89_MSP4_FS 89
-#define STA2X11_GPIO90_MSP4_TXD 90
-#define STA2X11_GPIO91_MSP5_CK 91
-#define STA2X11_GPIO92_MSP5_RXD 92
-#define STA2X11_GPIO93_MSP5_FS 93
-#define STA2X11_GPIO94_MSP5_TXD 94
-#define STA2X11_GPIO95_SDIO3_DAT3 95
-#define STA2X11_GPIO96_SDIO3_DAT2 96
-#define STA2X11_GPIO97_SDIO3_DAT1 97
-#define STA2X11_GPIO98_SDIO3_DAT0 98
-#define STA2X11_GPIO99_SDIO3_CLK 99
-#define STA2X11_GPIO100_SDIO3_CMD 100
-#define STA2X11_GPIO101 101
-#define STA2X11_GPIO102 102
-#define STA2X11_GPIO103 103
-#define STA2X11_GPIO104 104
-#define STA2X11_GPIO105_SDIO2_DAT3 105
-#define STA2X11_GPIO106_SDIO2_DAT2 106
-#define STA2X11_GPIO107_SDIO2_DAT1 107
-#define STA2X11_GPIO108_SDIO2_DAT0 108
-#define STA2X11_GPIO109_SDIO2_CLK 109
-#define STA2X11_GPIO110_SDIO2_CMD 110
-#define STA2X11_GPIO111 111
-#define STA2X11_GPIO112 112
-#define STA2X11_GPIO113 113
-#define STA2X11_GPIO114 114
-#define STA2X11_GPIO115_SDIO1_DAT3 115
-#define STA2X11_GPIO116_SDIO1_DAT2 116
-#define STA2X11_GPIO117_SDIO1_DAT1 117
-#define STA2X11_GPIO118_SDIO1_DAT0 118
-#define STA2X11_GPIO119_SDIO1_CLK 119
-#define STA2X11_GPIO120_SDIO1_CMD 120
-#define STA2X11_GPIO121 121
-#define STA2X11_GPIO122 122
-#define STA2X11_GPIO123 123
-#define STA2X11_GPIO124 124
-#define STA2X11_GPIO125_UART2_TXD 125
-#define STA2X11_GPIO126_UART2_RXD 126
-#define STA2X11_GPIO127_UART3_TXD 127
-
-/*
- * The APB bridge has its own registers, needed by our users as well.
- * They are accessed with the following read/mask/write function.
- */
-static inline u32
-sta2x11_apbreg_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
-{
- return __sta2x11_mfd_mask(pdev, reg, mask, val, sta2x11_apbreg);
-}
-
-/* CAN and MLB */
-#define APBREG_BSR 0x00 /* Bridge Status Reg */
-#define APBREG_PAER 0x08 /* Peripherals Address Error Reg */
-#define APBREG_PWAC 0x20 /* Peripheral Write Access Control reg */
-#define APBREG_PRAC 0x40 /* Peripheral Read Access Control reg */
-#define APBREG_PCG 0x60 /* Peripheral Clock Gating Reg */
-#define APBREG_PUR 0x80 /* Peripheral Under Reset Reg */
-#define APBREG_EMU_PCG 0xA0 /* Emulator Peripheral Clock Gating Reg */
-
-#define APBREG_CAN (1 << 1)
-#define APBREG_MLB (1 << 3)
-
-/* SARAC */
-#define APBREG_BSR_SARAC 0x100 /* Bridge Status Reg */
-#define APBREG_PAER_SARAC 0x108 /* Peripherals Address Error Reg */
-#define APBREG_PWAC_SARAC 0x120 /* Peripheral Write Access Control reg */
-#define APBREG_PRAC_SARAC 0x140 /* Peripheral Read Access Control reg */
-#define APBREG_PCG_SARAC 0x160 /* Peripheral Clock Gating Reg */
-#define APBREG_PUR_SARAC 0x180 /* Peripheral Under Reset Reg */
-#define APBREG_EMU_PCG_SARAC 0x1A0 /* Emulator Peripheral Clock Gating Reg */
-
-#define APBREG_SARAC (1 << 2)
-
-/*
- * The system controller has its own registers. Some of these are accessed
- * by out users as well, using the following read/mask/write/function
- */
-static inline
-u32 sta2x11_sctl_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
-{
- return __sta2x11_mfd_mask(pdev, reg, mask, val, sta2x11_sctl);
-}
-
-#define SCTL_SCCTL 0x00 /* System controller control register */
-#define SCTL_ARMCFG 0x04 /* ARM configuration register */
-#define SCTL_SCPLLCTL 0x08 /* PLL control status register */
-
-#define SCTL_SCPLLCTL_AUDIO_PLL_PD BIT(1)
-#define SCTL_SCPLLCTL_FRAC_CONTROL BIT(3)
-#define SCTL_SCPLLCTL_STRB_BYPASS BIT(6)
-#define SCTL_SCPLLCTL_STRB_INPUT BIT(8)
-
-#define SCTL_SCPLLFCTRL 0x0c /* PLL frequency control register */
-
-#define SCTL_SCPLLFCTRL_AUDIO_PLL_NDIV_MASK 0xff
-#define SCTL_SCPLLFCTRL_AUDIO_PLL_NDIV_SHIFT 10
-#define SCTL_SCPLLFCTRL_AUDIO_PLL_IDF_MASK 7
-#define SCTL_SCPLLFCTRL_AUDIO_PLL_IDF_SHIFT 21
-#define SCTL_SCPLLFCTRL_AUDIO_PLL_ODF_MASK 7
-#define SCTL_SCPLLFCTRL_AUDIO_PLL_ODF_SHIFT 18
-#define SCTL_SCPLLFCTRL_DITHER_DISABLE_MASK 0x03
-#define SCTL_SCPLLFCTRL_DITHER_DISABLE_SHIFT 4
-
-
-#define SCTL_SCRESFRACT 0x10 /* PLL fractional input register */
-
-#define SCTL_SCRESFRACT_MASK 0x0000ffff
-
-
-#define SCTL_SCRESCTRL1 0x14 /* Peripheral reset control 1 */
-#define SCTL_SCRESXTRL2 0x18 /* Peripheral reset control 2 */
-#define SCTL_SCPEREN0 0x1c /* Peripheral clock enable register 0 */
-#define SCTL_SCPEREN1 0x20 /* Peripheral clock enable register 1 */
-#define SCTL_SCPEREN2 0x24 /* Peripheral clock enable register 2 */
-#define SCTL_SCGRST 0x28 /* Peripheral global reset */
-#define SCTL_SCPCIECSBRST 0x2c /* PCIe PAB CSB reset status register */
-#define SCTL_SCPCIPMCR1 0x30 /* PCI power management control 1 */
-#define SCTL_SCPCIPMCR2 0x34 /* PCI power management control 2 */
-#define SCTL_SCPCIPMSR1 0x38 /* PCI power management status 1 */
-#define SCTL_SCPCIPMSR2 0x3c /* PCI power management status 2 */
-#define SCTL_SCPCIPMSR3 0x40 /* PCI power management status 3 */
-#define SCTL_SCINTREN 0x44 /* Interrupt enable */
-#define SCTL_SCRISR 0x48 /* RAW interrupt status */
-#define SCTL_SCCLKSTAT0 0x4c /* Peripheral clocks status 0 */
-#define SCTL_SCCLKSTAT1 0x50 /* Peripheral clocks status 1 */
-#define SCTL_SCCLKSTAT2 0x54 /* Peripheral clocks status 2 */
-#define SCTL_SCRSTSTA 0x58 /* Reset status register */
-
-#define SCTL_SCRESCTRL1_USB_PHY_POR (1 << 0)
-#define SCTL_SCRESCTRL1_USB_OTG (1 << 1)
-#define SCTL_SCRESCTRL1_USB_HRST (1 << 2)
-#define SCTL_SCRESCTRL1_USB_PHY_HOST (1 << 3)
-#define SCTL_SCRESCTRL1_SATAII (1 << 4)
-#define SCTL_SCRESCTRL1_VIP (1 << 5)
-#define SCTL_SCRESCTRL1_PER_MMC0 (1 << 6)
-#define SCTL_SCRESCTRL1_PER_MMC1 (1 << 7)
-#define SCTL_SCRESCTRL1_PER_GPIO0 (1 << 8)
-#define SCTL_SCRESCTRL1_PER_GPIO1 (1 << 9)
-#define SCTL_SCRESCTRL1_PER_GPIO2 (1 << 10)
-#define SCTL_SCRESCTRL1_PER_GPIO3 (1 << 11)
-#define SCTL_SCRESCTRL1_PER_MTU0 (1 << 12)
-#define SCTL_SCRESCTRL1_KER_SPI0 (1 << 13)
-#define SCTL_SCRESCTRL1_KER_SPI1 (1 << 14)
-#define SCTL_SCRESCTRL1_KER_SPI2 (1 << 15)
-#define SCTL_SCRESCTRL1_KER_MCI0 (1 << 16)
-#define SCTL_SCRESCTRL1_KER_MCI1 (1 << 17)
-#define SCTL_SCRESCTRL1_PRE_HSI2C0 (1 << 18)
-#define SCTL_SCRESCTRL1_PER_HSI2C1 (1 << 19)
-#define SCTL_SCRESCTRL1_PER_HSI2C2 (1 << 20)
-#define SCTL_SCRESCTRL1_PER_HSI2C3 (1 << 21)
-#define SCTL_SCRESCTRL1_PER_MSP0 (1 << 22)
-#define SCTL_SCRESCTRL1_PER_MSP1 (1 << 23)
-#define SCTL_SCRESCTRL1_PER_MSP2 (1 << 24)
-#define SCTL_SCRESCTRL1_PER_MSP3 (1 << 25)
-#define SCTL_SCRESCTRL1_PER_MSP4 (1 << 26)
-#define SCTL_SCRESCTRL1_PER_MSP5 (1 << 27)
-#define SCTL_SCRESCTRL1_PER_MMC (1 << 28)
-#define SCTL_SCRESCTRL1_KER_MSP0 (1 << 29)
-#define SCTL_SCRESCTRL1_KER_MSP1 (1 << 30)
-#define SCTL_SCRESCTRL1_KER_MSP2 (1 << 31)
-
-#define SCTL_SCPEREN0_UART0 (1 << 0)
-#define SCTL_SCPEREN0_UART1 (1 << 1)
-#define SCTL_SCPEREN0_UART2 (1 << 2)
-#define SCTL_SCPEREN0_UART3 (1 << 3)
-#define SCTL_SCPEREN0_MSP0 (1 << 4)
-#define SCTL_SCPEREN0_MSP1 (1 << 5)
-#define SCTL_SCPEREN0_MSP2 (1 << 6)
-#define SCTL_SCPEREN0_MSP3 (1 << 7)
-#define SCTL_SCPEREN0_MSP4 (1 << 8)
-#define SCTL_SCPEREN0_MSP5 (1 << 9)
-#define SCTL_SCPEREN0_SPI0 (1 << 10)
-#define SCTL_SCPEREN0_SPI1 (1 << 11)
-#define SCTL_SCPEREN0_SPI2 (1 << 12)
-#define SCTL_SCPEREN0_I2C0 (1 << 13)
-#define SCTL_SCPEREN0_I2C1 (1 << 14)
-#define SCTL_SCPEREN0_I2C2 (1 << 15)
-#define SCTL_SCPEREN0_I2C3 (1 << 16)
-#define SCTL_SCPEREN0_SVDO_LVDS (1 << 17)
-#define SCTL_SCPEREN0_USB_HOST (1 << 18)
-#define SCTL_SCPEREN0_USB_OTG (1 << 19)
-#define SCTL_SCPEREN0_MCI0 (1 << 20)
-#define SCTL_SCPEREN0_MCI1 (1 << 21)
-#define SCTL_SCPEREN0_MCI2 (1 << 22)
-#define SCTL_SCPEREN0_MCI3 (1 << 23)
-#define SCTL_SCPEREN0_SATA (1 << 24)
-#define SCTL_SCPEREN0_ETHERNET (1 << 25)
-#define SCTL_SCPEREN0_VIC (1 << 26)
-#define SCTL_SCPEREN0_DMA_AUDIO (1 << 27)
-#define SCTL_SCPEREN0_DMA_SOC (1 << 28)
-#define SCTL_SCPEREN0_RAM (1 << 29)
-#define SCTL_SCPEREN0_VIP (1 << 30)
-#define SCTL_SCPEREN0_ARM (1 << 31)
-
-#define SCTL_SCPEREN1_UART0 (1 << 0)
-#define SCTL_SCPEREN1_UART1 (1 << 1)
-#define SCTL_SCPEREN1_UART2 (1 << 2)
-#define SCTL_SCPEREN1_UART3 (1 << 3)
-#define SCTL_SCPEREN1_MSP0 (1 << 4)
-#define SCTL_SCPEREN1_MSP1 (1 << 5)
-#define SCTL_SCPEREN1_MSP2 (1 << 6)
-#define SCTL_SCPEREN1_MSP3 (1 << 7)
-#define SCTL_SCPEREN1_MSP4 (1 << 8)
-#define SCTL_SCPEREN1_MSP5 (1 << 9)
-#define SCTL_SCPEREN1_SPI0 (1 << 10)
-#define SCTL_SCPEREN1_SPI1 (1 << 11)
-#define SCTL_SCPEREN1_SPI2 (1 << 12)
-#define SCTL_SCPEREN1_I2C0 (1 << 13)
-#define SCTL_SCPEREN1_I2C1 (1 << 14)
-#define SCTL_SCPEREN1_I2C2 (1 << 15)
-#define SCTL_SCPEREN1_I2C3 (1 << 16)
-#define SCTL_SCPEREN1_USB_PHY (1 << 17)
-
-/*
- * APB-SOC registers
- */
-static inline
-u32 sta2x11_apb_soc_regs_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
-{
- return __sta2x11_mfd_mask(pdev, reg, mask, val, sta2x11_apb_soc_regs);
-}
-
-#define PCIE_EP1_FUNC3_0_INTR_REG 0x000
-#define PCIE_EP1_FUNC7_4_INTR_REG 0x004
-#define PCIE_EP2_FUNC3_0_INTR_REG 0x008
-#define PCIE_EP2_FUNC7_4_INTR_REG 0x00c
-#define PCIE_EP3_FUNC3_0_INTR_REG 0x010
-#define PCIE_EP3_FUNC7_4_INTR_REG 0x014
-#define PCIE_EP4_FUNC3_0_INTR_REG 0x018
-#define PCIE_EP4_FUNC7_4_INTR_REG 0x01c
-#define PCIE_INTR_ENABLE0_REG 0x020
-#define PCIE_INTR_ENABLE1_REG 0x024
-#define PCIE_EP1_FUNC_TC_REG 0x028
-#define PCIE_EP2_FUNC_TC_REG 0x02c
-#define PCIE_EP3_FUNC_TC_REG 0x030
-#define PCIE_EP4_FUNC_TC_REG 0x034
-#define PCIE_EP1_FUNC_F_REG 0x038
-#define PCIE_EP2_FUNC_F_REG 0x03c
-#define PCIE_EP3_FUNC_F_REG 0x040
-#define PCIE_EP4_FUNC_F_REG 0x044
-#define PCIE_PAB_AMBA_SW_RST_REG 0x048
-#define PCIE_PM_STATUS_0_PORT_0_4 0x04c
-#define PCIE_PM_STATUS_7_0_EP1 0x050
-#define PCIE_PM_STATUS_7_0_EP2 0x054
-#define PCIE_PM_STATUS_7_0_EP3 0x058
-#define PCIE_PM_STATUS_7_0_EP4 0x05c
-#define PCIE_DEV_ID_0_EP1_REG 0x060
-#define PCIE_CC_REV_ID_0_EP1_REG 0x064
-#define PCIE_DEV_ID_1_EP1_REG 0x068
-#define PCIE_CC_REV_ID_1_EP1_REG 0x06c
-#define PCIE_DEV_ID_2_EP1_REG 0x070
-#define PCIE_CC_REV_ID_2_EP1_REG 0x074
-#define PCIE_DEV_ID_3_EP1_REG 0x078
-#define PCIE_CC_REV_ID_3_EP1_REG 0x07c
-#define PCIE_DEV_ID_4_EP1_REG 0x080
-#define PCIE_CC_REV_ID_4_EP1_REG 0x084
-#define PCIE_DEV_ID_5_EP1_REG 0x088
-#define PCIE_CC_REV_ID_5_EP1_REG 0x08c
-#define PCIE_DEV_ID_6_EP1_REG 0x090
-#define PCIE_CC_REV_ID_6_EP1_REG 0x094
-#define PCIE_DEV_ID_7_EP1_REG 0x098
-#define PCIE_CC_REV_ID_7_EP1_REG 0x09c
-#define PCIE_DEV_ID_0_EP2_REG 0x0a0
-#define PCIE_CC_REV_ID_0_EP2_REG 0x0a4
-#define PCIE_DEV_ID_1_EP2_REG 0x0a8
-#define PCIE_CC_REV_ID_1_EP2_REG 0x0ac
-#define PCIE_DEV_ID_2_EP2_REG 0x0b0
-#define PCIE_CC_REV_ID_2_EP2_REG 0x0b4
-#define PCIE_DEV_ID_3_EP2_REG 0x0b8
-#define PCIE_CC_REV_ID_3_EP2_REG 0x0bc
-#define PCIE_DEV_ID_4_EP2_REG 0x0c0
-#define PCIE_CC_REV_ID_4_EP2_REG 0x0c4
-#define PCIE_DEV_ID_5_EP2_REG 0x0c8
-#define PCIE_CC_REV_ID_5_EP2_REG 0x0cc
-#define PCIE_DEV_ID_6_EP2_REG 0x0d0
-#define PCIE_CC_REV_ID_6_EP2_REG 0x0d4
-#define PCIE_DEV_ID_7_EP2_REG 0x0d8
-#define PCIE_CC_REV_ID_7_EP2_REG 0x0dC
-#define PCIE_DEV_ID_0_EP3_REG 0x0e0
-#define PCIE_CC_REV_ID_0_EP3_REG 0x0e4
-#define PCIE_DEV_ID_1_EP3_REG 0x0e8
-#define PCIE_CC_REV_ID_1_EP3_REG 0x0ec
-#define PCIE_DEV_ID_2_EP3_REG 0x0f0
-#define PCIE_CC_REV_ID_2_EP3_REG 0x0f4
-#define PCIE_DEV_ID_3_EP3_REG 0x0f8
-#define PCIE_CC_REV_ID_3_EP3_REG 0x0fc
-#define PCIE_DEV_ID_4_EP3_REG 0x100
-#define PCIE_CC_REV_ID_4_EP3_REG 0x104
-#define PCIE_DEV_ID_5_EP3_REG 0x108
-#define PCIE_CC_REV_ID_5_EP3_REG 0x10c
-#define PCIE_DEV_ID_6_EP3_REG 0x110
-#define PCIE_CC_REV_ID_6_EP3_REG 0x114
-#define PCIE_DEV_ID_7_EP3_REG 0x118
-#define PCIE_CC_REV_ID_7_EP3_REG 0x11c
-#define PCIE_DEV_ID_0_EP4_REG 0x120
-#define PCIE_CC_REV_ID_0_EP4_REG 0x124
-#define PCIE_DEV_ID_1_EP4_REG 0x128
-#define PCIE_CC_REV_ID_1_EP4_REG 0x12c
-#define PCIE_DEV_ID_2_EP4_REG 0x130
-#define PCIE_CC_REV_ID_2_EP4_REG 0x134
-#define PCIE_DEV_ID_3_EP4_REG 0x138
-#define PCIE_CC_REV_ID_3_EP4_REG 0x13c
-#define PCIE_DEV_ID_4_EP4_REG 0x140
-#define PCIE_CC_REV_ID_4_EP4_REG 0x144
-#define PCIE_DEV_ID_5_EP4_REG 0x148
-#define PCIE_CC_REV_ID_5_EP4_REG 0x14c
-#define PCIE_DEV_ID_6_EP4_REG 0x150
-#define PCIE_CC_REV_ID_6_EP4_REG 0x154
-#define PCIE_DEV_ID_7_EP4_REG 0x158
-#define PCIE_CC_REV_ID_7_EP4_REG 0x15c
-#define PCIE_SUBSYS_VEN_ID_REG 0x160
-#define PCIE_COMMON_CLOCK_CONFIG_0_4_0 0x164
-#define PCIE_MIPHYP_SSC_EN_REG 0x168
-#define PCIE_MIPHYP_ADDR_REG 0x16c
-#define PCIE_L1_ASPM_READY_REG 0x170
-#define PCIE_EXT_CFG_RDY_REG 0x174
-#define PCIE_SoC_INT_ROUTER_STATUS0_REG 0x178
-#define PCIE_SoC_INT_ROUTER_STATUS1_REG 0x17c
-#define PCIE_SoC_INT_ROUTER_STATUS2_REG 0x180
-#define PCIE_SoC_INT_ROUTER_STATUS3_REG 0x184
-#define DMA_IP_CTRL_REG 0x324
-#define DISP_BRIDGE_PU_PD_CTRL_REG 0x328
-#define VIP_PU_PD_CTRL_REG 0x32c
-#define USB_MLB_PU_PD_CTRL_REG 0x330
-#define SDIO_PU_PD_MISCFUNC_CTRL_REG1 0x334
-#define SDIO_PU_PD_MISCFUNC_CTRL_REG2 0x338
-#define UART_PU_PD_CTRL_REG 0x33c
-#define ARM_Lock 0x340
-#define SYS_IO_CHAR_REG1 0x344
-#define SYS_IO_CHAR_REG2 0x348
-#define SATA_CORE_ID_REG 0x34c
-#define SATA_CTRL_REG 0x350
-#define I2C_HSFIX_MISC_REG 0x354
-#define SPARE2_RESERVED 0x358
-#define SPARE3_RESERVED 0x35c
-#define MASTER_LOCK_REG 0x368
-#define SYSTEM_CONFIG_STATUS_REG 0x36c
-#define MSP_CLK_CTRL_REG 0x39c
-#define COMPENSATION_REG1 0x3c4
-#define COMPENSATION_REG2 0x3c8
-#define COMPENSATION_REG3 0x3cc
-#define TEST_CTL_REG 0x3d0
-
-/*
- * SECR (OTP) registers
- */
-#define STA2X11_SECR_CR 0x00
-#define STA2X11_SECR_FVR0 0x10
-#define STA2X11_SECR_FVR1 0x14
-
-extern int sta2x11_mfd_get_regs_data(struct platform_device *pdev,
- enum sta2x11_mfd_plat_dev index,
- void __iomem **regs,
- spinlock_t **lock);
-
-#endif /* __STA2X11_MFD_H */
diff --git a/include/linux/mfd/stm32-lptimer.h b/include/linux/mfd/stm32-lptimer.h
index 90b20550c1c8..a592c8dc716d 100644
--- a/include/linux/mfd/stm32-lptimer.h
+++ b/include/linux/mfd/stm32-lptimer.h
@@ -17,20 +17,30 @@
#define STM32_LPTIM_IER 0x08 /* Interrupt Enable Reg */
#define STM32_LPTIM_CFGR 0x0C /* Configuration Reg */
#define STM32_LPTIM_CR 0x10 /* Control Reg */
-#define STM32_LPTIM_CMP 0x14 /* Compare Reg */
+#define STM32_LPTIM_CMP 0x14 /* Compare Reg (MP25 CCR1) */
#define STM32_LPTIM_ARR 0x18 /* Autoreload Reg */
#define STM32_LPTIM_CNT 0x1C /* Counter Reg */
+#define STM32_LPTIM_CCMR1 0x2C /* Capture/Compare Mode MP25 */
+#define STM32_LPTIM_CCR2 0x34 /* Compare Reg2 MP25 */
+
+#define STM32_LPTIM_HWCFGR2 0x3EC /* Hardware configuration register 2 - MP25 */
+#define STM32_LPTIM_HWCFGR1 0x3F0 /* Hardware configuration register 1 - MP15 */
+#define STM32_LPTIM_VERR 0x3F4 /* Version identification register - MP15 */
/* STM32_LPTIM_ISR - bit fields */
+#define STM32_LPTIM_DIEROK_ARROK (BIT(24) | BIT(4)) /* MP25 */
+#define STM32_LPTIM_CMP2_ARROK (BIT(19) | BIT(4))
#define STM32_LPTIM_CMPOK_ARROK GENMASK(4, 3)
#define STM32_LPTIM_ARROK BIT(4)
#define STM32_LPTIM_CMPOK BIT(3)
/* STM32_LPTIM_ICR - bit fields */
-#define STM32_LPTIM_ARRMCF BIT(1)
+#define STM32_LPTIM_DIEROKCF_ARROKCF (BIT(24) | BIT(4)) /* MP25 */
+#define STM32_LPTIM_CMP2OKCF_ARROKCF (BIT(19) | BIT(4))
#define STM32_LPTIM_CMPOKCF_ARROKCF GENMASK(4, 3)
+#define STM32_LPTIM_ARRMCF BIT(1)
-/* STM32_LPTIM_IER - bit flieds */
+/* STM32_LPTIM_IER - bit fields */
#define STM32_LPTIM_ARRMIE BIT(1)
/* STM32_LPTIM_CR - bit fields */
@@ -45,19 +55,45 @@
#define STM32_LPTIM_PRESC GENMASK(11, 9)
#define STM32_LPTIM_CKPOL GENMASK(2, 1)
+/* STM32_LPTIM_CKPOL */
+#define STM32_LPTIM_CKPOL_RISING_EDGE 0
+#define STM32_LPTIM_CKPOL_FALLING_EDGE 1
+#define STM32_LPTIM_CKPOL_BOTH_EDGES 2
+
/* STM32_LPTIM_ARR */
#define STM32_LPTIM_MAX_ARR 0xFFFF
+/* STM32_LPTIM_CCMR1 */
+#define STM32_LPTIM_CC2P GENMASK(19, 18)
+#define STM32_LPTIM_CC2E BIT(17)
+#define STM32_LPTIM_CC2SEL BIT(16)
+#define STM32_LPTIM_CC1P GENMASK(3, 2)
+#define STM32_LPTIM_CC1E BIT(1)
+#define STM32_LPTIM_CC1SEL BIT(0)
+
+/* STM32_LPTIM_HWCFGR1 */
+#define STM32_LPTIM_HWCFGR1_ENCODER BIT(16)
+
+/* STM32_LPTIM_HWCFGR2 */
+#define STM32_LPTIM_HWCFGR2_CHAN_NUM GENMASK(3, 0)
+
+/* STM32_LPTIM_VERR */
+#define STM32_LPTIM_VERR_23 0x23 /* STM32MP25 */
+
/**
* struct stm32_lptimer - STM32 Low-Power Timer data assigned by parent device
* @clk: clock reference for this instance
* @regmap: register map reference for this instance
* @has_encoder: indicates this Low-Power Timer supports encoder mode
+ * @num_cc_chans: indicates the number of capture/compare channels
+ * @version: indicates the major and minor revision of the controller
*/
struct stm32_lptimer {
struct clk *clk;
struct regmap *regmap;
bool has_encoder;
+ unsigned int num_cc_chans;
+ u32 version;
};
#endif
diff --git a/include/linux/mfd/stm32-timers.h b/include/linux/mfd/stm32-timers.h
index f8db83aedb2b..23b0cae4a9f8 100644
--- a/include/linux/mfd/stm32-timers.h
+++ b/include/linux/mfd/stm32-timers.h
@@ -12,79 +12,114 @@
#include <linux/dma-mapping.h>
#include <linux/regmap.h>
-#define TIM_CR1 0x00 /* Control Register 1 */
-#define TIM_CR2 0x04 /* Control Register 2 */
-#define TIM_SMCR 0x08 /* Slave mode control reg */
-#define TIM_DIER 0x0C /* DMA/interrupt register */
-#define TIM_SR 0x10 /* Status register */
-#define TIM_EGR 0x14 /* Event Generation Reg */
-#define TIM_CCMR1 0x18 /* Capt/Comp 1 Mode Reg */
-#define TIM_CCMR2 0x1C /* Capt/Comp 2 Mode Reg */
-#define TIM_CCER 0x20 /* Capt/Comp Enable Reg */
-#define TIM_CNT 0x24 /* Counter */
-#define TIM_PSC 0x28 /* Prescaler */
-#define TIM_ARR 0x2c /* Auto-Reload Register */
-#define TIM_CCR1 0x34 /* Capt/Comp Register 1 */
-#define TIM_CCR2 0x38 /* Capt/Comp Register 2 */
-#define TIM_CCR3 0x3C /* Capt/Comp Register 3 */
-#define TIM_CCR4 0x40 /* Capt/Comp Register 4 */
-#define TIM_BDTR 0x44 /* Break and Dead-Time Reg */
-#define TIM_DCR 0x48 /* DMA control register */
-#define TIM_DMAR 0x4C /* DMA register for transfer */
+#define TIM_CR1 0x00 /* Control Register 1 */
+#define TIM_CR2 0x04 /* Control Register 2 */
+#define TIM_SMCR 0x08 /* Slave mode control reg */
+#define TIM_DIER 0x0C /* DMA/interrupt register */
+#define TIM_SR 0x10 /* Status register */
+#define TIM_EGR 0x14 /* Event Generation Reg */
+#define TIM_CCMR1 0x18 /* Capt/Comp 1 Mode Reg */
+#define TIM_CCMR2 0x1C /* Capt/Comp 2 Mode Reg */
+#define TIM_CCER 0x20 /* Capt/Comp Enable Reg */
+#define TIM_CNT 0x24 /* Counter */
+#define TIM_PSC 0x28 /* Prescaler */
+#define TIM_ARR 0x2c /* Auto-Reload Register */
+#define TIM_CCRx(x) (0x34 + 4 * ((x) - 1)) /* Capt/Comp Register x (x ∈ {1, .. 4}) */
+#define TIM_CCR1 TIM_CCRx(1) /* Capt/Comp Register 1 */
+#define TIM_CCR2 TIM_CCRx(2) /* Capt/Comp Register 2 */
+#define TIM_CCR3 TIM_CCRx(3) /* Capt/Comp Register 3 */
+#define TIM_CCR4 TIM_CCRx(4) /* Capt/Comp Register 4 */
+#define TIM_BDTR 0x44 /* Break and Dead-Time Reg */
+#define TIM_DCR 0x48 /* DMA control register */
+#define TIM_DMAR 0x4C /* DMA register for transfer */
+#define TIM_TISEL 0x68 /* Input Selection */
+#define TIM_HWCFGR2 0x3EC /* hardware configuration 2 Reg (MP25) */
+#define TIM_HWCFGR1 0x3F0 /* hardware configuration 1 Reg (MP25) */
+#define TIM_IPIDR 0x3F8 /* IP identification Reg (MP25) */
-#define TIM_CR1_CEN BIT(0) /* Counter Enable */
-#define TIM_CR1_DIR BIT(4) /* Counter Direction */
-#define TIM_CR1_ARPE BIT(7) /* Auto-reload Preload Ena */
-#define TIM_CR2_MMS (BIT(4) | BIT(5) | BIT(6)) /* Master mode selection */
-#define TIM_CR2_MMS2 GENMASK(23, 20) /* Master mode selection 2 */
-#define TIM_SMCR_SMS (BIT(0) | BIT(1) | BIT(2)) /* Slave mode selection */
-#define TIM_SMCR_TS (BIT(4) | BIT(5) | BIT(6)) /* Trigger selection */
-#define TIM_DIER_UIE BIT(0) /* Update interrupt */
-#define TIM_DIER_UDE BIT(8) /* Update DMA request Enable */
-#define TIM_DIER_CC1DE BIT(9) /* CC1 DMA request Enable */
-#define TIM_DIER_CC2DE BIT(10) /* CC2 DMA request Enable */
-#define TIM_DIER_CC3DE BIT(11) /* CC3 DMA request Enable */
-#define TIM_DIER_CC4DE BIT(12) /* CC4 DMA request Enable */
-#define TIM_DIER_COMDE BIT(13) /* COM DMA request Enable */
-#define TIM_DIER_TDE BIT(14) /* Trigger DMA request Enable */
-#define TIM_SR_UIF BIT(0) /* Update interrupt flag */
-#define TIM_EGR_UG BIT(0) /* Update Generation */
-#define TIM_CCMR_PE BIT(3) /* Channel Preload Enable */
-#define TIM_CCMR_M1 (BIT(6) | BIT(5)) /* Channel PWM Mode 1 */
-#define TIM_CCMR_CC1S (BIT(0) | BIT(1)) /* Capture/compare 1 sel */
-#define TIM_CCMR_IC1PSC GENMASK(3, 2) /* Input capture 1 prescaler */
-#define TIM_CCMR_CC2S (BIT(8) | BIT(9)) /* Capture/compare 2 sel */
-#define TIM_CCMR_IC2PSC GENMASK(11, 10) /* Input capture 2 prescaler */
-#define TIM_CCMR_CC1S_TI1 BIT(0) /* IC1/IC3 selects TI1/TI3 */
-#define TIM_CCMR_CC1S_TI2 BIT(1) /* IC1/IC3 selects TI2/TI4 */
-#define TIM_CCMR_CC2S_TI2 BIT(8) /* IC2/IC4 selects TI2/TI4 */
-#define TIM_CCMR_CC2S_TI1 BIT(9) /* IC2/IC4 selects TI1/TI3 */
-#define TIM_CCER_CC1E BIT(0) /* Capt/Comp 1 out Ena */
-#define TIM_CCER_CC1P BIT(1) /* Capt/Comp 1 Polarity */
-#define TIM_CCER_CC1NE BIT(2) /* Capt/Comp 1N out Ena */
-#define TIM_CCER_CC1NP BIT(3) /* Capt/Comp 1N Polarity */
-#define TIM_CCER_CC2E BIT(4) /* Capt/Comp 2 out Ena */
-#define TIM_CCER_CC2P BIT(5) /* Capt/Comp 2 Polarity */
-#define TIM_CCER_CC3E BIT(8) /* Capt/Comp 3 out Ena */
-#define TIM_CCER_CC3P BIT(9) /* Capt/Comp 3 Polarity */
-#define TIM_CCER_CC4E BIT(12) /* Capt/Comp 4 out Ena */
-#define TIM_CCER_CC4P BIT(13) /* Capt/Comp 4 Polarity */
-#define TIM_CCER_CCXE (BIT(0) | BIT(4) | BIT(8) | BIT(12))
-#define TIM_BDTR_BKE(x) BIT(12 + (x) * 12) /* Break input enable */
-#define TIM_BDTR_BKP(x) BIT(13 + (x) * 12) /* Break input polarity */
-#define TIM_BDTR_AOE BIT(14) /* Automatic Output Enable */
-#define TIM_BDTR_MOE BIT(15) /* Main Output Enable */
-#define TIM_BDTR_BKF(x) (0xf << (16 + (x) * 4))
-#define TIM_DCR_DBA GENMASK(4, 0) /* DMA base addr */
-#define TIM_DCR_DBL GENMASK(12, 8) /* DMA burst len */
+#define TIM_CR1_CEN BIT(0) /* Counter Enable */
+#define TIM_CR1_DIR BIT(4) /* Counter Direction */
+#define TIM_CR1_ARPE BIT(7) /* Auto-reload Preload Ena */
+#define TIM_CR2_MMS (BIT(4) | BIT(5) | BIT(6)) /* Master mode selection */
+#define TIM_CR2_MMS2 GENMASK(23, 20) /* Master mode selection 2 */
+#define TIM_SMCR_SMS (BIT(0) | BIT(1) | BIT(2)) /* Slave mode selection */
+#define TIM_SMCR_TS (BIT(4) | BIT(5) | BIT(6)) /* Trigger selection */
+#define TIM_DIER_UIE BIT(0) /* Update interrupt */
+#define TIM_DIER_CCxIE(x) BIT(1 + ((x) - 1)) /* CCx Interrupt Enable (x ∈ {1, .. 4}) */
+#define TIM_DIER_CC1IE TIM_DIER_CCxIE(1) /* CC1 Interrupt Enable */
+#define TIM_DIER_CC2IE TIM_DIER_CCxIE(2) /* CC2 Interrupt Enable */
+#define TIM_DIER_CC3IE TIM_DIER_CCxIE(3) /* CC3 Interrupt Enable */
+#define TIM_DIER_CC4IE TIM_DIER_CCxIE(4) /* CC4 Interrupt Enable */
+#define TIM_DIER_UDE BIT(8) /* Update DMA request Enable */
+#define TIM_DIER_CCxDE(x) BIT(9 + ((x) - 1)) /* CCx DMA request Enable (x ∈ {1, .. 4}) */
+#define TIM_DIER_CC1DE TIM_DIER_CCxDE(1) /* CC1 DMA request Enable */
+#define TIM_DIER_CC2DE TIM_DIER_CCxDE(2) /* CC2 DMA request Enable */
+#define TIM_DIER_CC3DE TIM_DIER_CCxDE(3) /* CC3 DMA request Enable */
+#define TIM_DIER_CC4DE TIM_DIER_CCxDE(4) /* CC4 DMA request Enable */
+#define TIM_DIER_COMDE BIT(13) /* COM DMA request Enable */
+#define TIM_DIER_TDE BIT(14) /* Trigger DMA request Enable */
+#define TIM_SR_UIF BIT(0) /* Update interrupt flag */
+#define TIM_SR_CC_IF(x) BIT((x) + 1) /* CC1, CC2, CC3, CC4 interrupt flag */
+#define TIM_EGR_UG BIT(0) /* Update Generation */
+#define TIM_CCMR_PE BIT(3) /* Channel Preload Enable */
+#define TIM_CCMR_M1 (BIT(6) | BIT(5)) /* Channel PWM Mode 1 */
+#define TIM_CCMR_CC1S (BIT(0) | BIT(1)) /* Capture/compare 1 sel */
+#define TIM_CCMR_IC1PSC GENMASK(3, 2) /* Input capture 1 prescaler */
+#define TIM_CCMR_CC2S (BIT(8) | BIT(9)) /* Capture/compare 2 sel */
+#define TIM_CCMR_IC2PSC GENMASK(11, 10) /* Input capture 2 prescaler */
+#define TIM_CCMR_CC1S_TI1 BIT(0) /* IC1/IC3 selects TI1/TI3 */
+#define TIM_CCMR_CC1S_TI2 BIT(1) /* IC1/IC3 selects TI2/TI4 */
+#define TIM_CCMR_CC2S_TI2 BIT(8) /* IC2/IC4 selects TI2/TI4 */
+#define TIM_CCMR_CC2S_TI1 BIT(9) /* IC2/IC4 selects TI1/TI3 */
+#define TIM_CCMR_CC3S (BIT(0) | BIT(1)) /* Capture/compare 3 sel */
+#define TIM_CCMR_CC4S (BIT(8) | BIT(9)) /* Capture/compare 4 sel */
+#define TIM_CCMR_CC3S_TI3 BIT(0) /* IC3 selects TI3 */
+#define TIM_CCMR_CC4S_TI4 BIT(8) /* IC4 selects TI4 */
+#define TIM_CCER_CCxE(x) BIT(0 + 4 * ((x) - 1)) /* Capt/Comp x out Ena (x ∈ {1, .. 4}) */
+#define TIM_CCER_CCxP(x) BIT(1 + 4 * ((x) - 1)) /* Capt/Comp x Polarity (x ∈ {1, .. 4}) */
+#define TIM_CCER_CCxNE(x) BIT(2 + 4 * ((x) - 1)) /* Capt/Comp xN out Ena (x ∈ {1, .. 4}) */
+#define TIM_CCER_CCxNP(x) BIT(3 + 4 * ((x) - 1)) /* Capt/Comp xN Polarity (x ∈ {1, .. 4}) */
+#define TIM_CCER_CC1E TIM_CCER_CCxE(1) /* Capt/Comp 1 out Ena */
+#define TIM_CCER_CC1P TIM_CCER_CCxP(1) /* Capt/Comp 1 Polarity */
+#define TIM_CCER_CC1NE TIM_CCER_CCxNE(1) /* Capt/Comp 1N out Ena */
+#define TIM_CCER_CC1NP TIM_CCER_CCxNP(1) /* Capt/Comp 1N Polarity */
+#define TIM_CCER_CC2E TIM_CCER_CCxE(2) /* Capt/Comp 2 out Ena */
+#define TIM_CCER_CC2P TIM_CCER_CCxP(2) /* Capt/Comp 2 Polarity */
+#define TIM_CCER_CC2NE TIM_CCER_CCxNE(2) /* Capt/Comp 2N out Ena */
+#define TIM_CCER_CC2NP TIM_CCER_CCxNP(2) /* Capt/Comp 2N Polarity */
+#define TIM_CCER_CC3E TIM_CCER_CCxE(3) /* Capt/Comp 3 out Ena */
+#define TIM_CCER_CC3P TIM_CCER_CCxP(3) /* Capt/Comp 3 Polarity */
+#define TIM_CCER_CC3NE TIM_CCER_CCxNE(3) /* Capt/Comp 3N out Ena */
+#define TIM_CCER_CC3NP TIM_CCER_CCxNP(3) /* Capt/Comp 3N Polarity */
+#define TIM_CCER_CC4E TIM_CCER_CCxE(4) /* Capt/Comp 4 out Ena */
+#define TIM_CCER_CC4P TIM_CCER_CCxP(4) /* Capt/Comp 4 Polarity */
+#define TIM_CCER_CC4NE TIM_CCER_CCxNE(4) /* Capt/Comp 4N out Ena */
+#define TIM_CCER_CC4NP TIM_CCER_CCxNP(4) /* Capt/Comp 4N Polarity */
+#define TIM_CCER_CCXE (BIT(0) | BIT(4) | BIT(8) | BIT(12))
+#define TIM_BDTR_BKE(x) BIT(12 + (x) * 12) /* Break input enable */
+#define TIM_BDTR_BKP(x) BIT(13 + (x) * 12) /* Break input polarity */
+#define TIM_BDTR_AOE BIT(14) /* Automatic Output Enable */
+#define TIM_BDTR_MOE BIT(15) /* Main Output Enable */
+#define TIM_BDTR_BKF(x) (0xf << (16 + (x) * 4))
+#define TIM_DCR_DBA GENMASK(4, 0) /* DMA base addr */
+#define TIM_DCR_DBL GENMASK(12, 8) /* DMA burst len */
+#define TIM_HWCFGR1_NB_OF_CC GENMASK(3, 0) /* Capture/compare channels */
+#define TIM_HWCFGR1_NB_OF_DT GENMASK(7, 4) /* Complementary outputs & dead-time generators */
+#define TIM_HWCFGR2_CNT_WIDTH GENMASK(15, 8) /* Counter width */
-#define MAX_TIM_PSC 0xFFFF
-#define MAX_TIM_ICPSC 0x3
-#define TIM_CR2_MMS_SHIFT 4
-#define TIM_CR2_MMS2_SHIFT 20
-#define TIM_SMCR_TS_SHIFT 4
-#define TIM_BDTR_BKF_MASK 0xF
-#define TIM_BDTR_BKF_SHIFT(x) (16 + (x) * 4)
+#define MAX_TIM_PSC 0xFFFF
+#define MAX_TIM_ICPSC 0x3
+#define TIM_CR2_MMS_SHIFT 4
+#define TIM_CR2_MMS2_SHIFT 20
+#define TIM_SMCR_SMS_SLAVE_MODE_DISABLED 0 /* counts on internal clock when CEN=1 */
+#define TIM_SMCR_SMS_ENCODER_MODE_1 1 /* counts TI1FP1 edges, depending on TI2FP2 level */
+#define TIM_SMCR_SMS_ENCODER_MODE_2 2 /* counts TI2FP2 edges, depending on TI1FP1 level */
+#define TIM_SMCR_SMS_ENCODER_MODE_3 3 /* counts on both TI1FP1 and TI2FP2 edges */
+#define TIM_SMCR_TS_SHIFT 4
+#define TIM_BDTR_BKF_MASK 0xF
+#define TIM_BDTR_BKF_SHIFT(x) (16 + (x) * 4)
+
+#define STM32MP25_TIM_IPIDR 0x00120002
enum stm32_timers_dmas {
STM32_TIMERS_DMA_CH1,
@@ -97,6 +132,15 @@ enum stm32_timers_dmas {
STM32_TIMERS_MAX_DMAS,
};
+/* STM32 Timer may have either a unique global interrupt or 4 interrupt lines */
+enum stm32_timers_irqs {
+ STM32_TIMERS_IRQ_GLOBAL_BRK, /* global or brk IRQ */
+ STM32_TIMERS_IRQ_UP,
+ STM32_TIMERS_IRQ_TRG_COM,
+ STM32_TIMERS_IRQ_CC,
+ STM32_TIMERS_MAX_IRQS,
+};
+
/**
* struct stm32_timers_dma - STM32 timer DMA handling.
* @completion: end of DMA transfer completion
@@ -115,9 +159,12 @@ struct stm32_timers_dma {
struct stm32_timers {
struct clk *clk;
+ u32 ipidr;
struct regmap *regmap;
u32 max_arr;
struct stm32_timers_dma dma; /* Only to be used by the parent */
+ unsigned int nr_irqs;
+ int irq[STM32_TIMERS_MAX_IRQS];
};
#if IS_REACHABLE(CONFIG_MFD_STM32_TIMERS)
diff --git a/include/linux/mfd/stmfx.h b/include/linux/mfd/stmfx.h
index 744dce63946e..967a2e486800 100644
--- a/include/linux/mfd/stmfx.h
+++ b/include/linux/mfd/stmfx.h
@@ -113,10 +113,8 @@ struct stmfx {
struct irq_domain *irq_domain;
struct mutex lock; /* IRQ bus lock */
u8 irq_src;
-#ifdef CONFIG_PM
u8 bkp_sysctrl;
u8 bkp_irqoutpin;
-#endif
};
int stmfx_function_enable(struct stmfx *stmfx, u32 func);
diff --git a/include/linux/mfd/stpmic1.h b/include/linux/mfd/stpmic1.h
index fa3f99f7e9a1..dc00bac24f5a 100644
--- a/include/linux/mfd/stpmic1.h
+++ b/include/linux/mfd/stpmic1.h
@@ -15,7 +15,7 @@
#define RREQ_STATE_SR 0x5
#define VERSION_SR 0x6
-#define SWOFF_PWRCTRL_CR 0x10
+#define MAIN_CR 0x10
#define PADS_PULL_CR 0x11
#define BUCKS_PD_CR 0x12
#define LDO14_PD_CR 0x13
@@ -148,14 +148,14 @@
#define LDO_BYPASS_MASK BIT(7)
/* Main PMIC Control Register
- * SWOFF_PWRCTRL_CR
+ * MAIN_CR
* Address : 0x10
*/
-#define ICC_EVENT_ENABLED BIT(4)
+#define OCP_OFF_DBG BIT(4)
#define PWRCTRL_POLARITY_HIGH BIT(3)
-#define PWRCTRL_PIN_VALID BIT(2)
-#define RESTART_REQUEST_ENABLED BIT(1)
-#define SOFTWARE_SWITCH_OFF_ENABLED BIT(0)
+#define PWRCTRL_ENABLE BIT(2)
+#define RESTART_REQUEST_ENABLE BIT(1)
+#define SOFTWARE_SWITCH_OFF BIT(0)
/* Main PMIC PADS Control Register
* PADS_PULL_CR
diff --git a/include/linux/mfd/sun4i-gpadc.h b/include/linux/mfd/sun4i-gpadc.h
index ea0ccf33a459..021f820f9d52 100644
--- a/include/linux/mfd/sun4i-gpadc.h
+++ b/include/linux/mfd/sun4i-gpadc.h
@@ -81,8 +81,8 @@
#define SUN4I_GPADC_TEMP_DATA 0x20
#define SUN4I_GPADC_DATA 0x24
-#define SUN4I_GPADC_IRQ_FIFO_DATA 0
-#define SUN4I_GPADC_IRQ_TEMP_DATA 1
+#define SUN4I_GPADC_IRQ_FIFO_DATA 1
+#define SUN4I_GPADC_IRQ_TEMP_DATA 2
/* 10s delay before suspending the IP */
#define SUN4I_GPADC_AUTOSUSPEND_DELAY 10000
diff --git a/include/linux/mfd/sy7636a.h b/include/linux/mfd/sy7636a.h
new file mode 100644
index 000000000000..22f03b2f851e
--- /dev/null
+++ b/include/linux/mfd/sy7636a.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Functions to access SY3686A power management chip.
+ *
+ * Copyright (C) 2021 reMarkable AS - http://www.remarkable.com/
+ */
+
+#ifndef __MFD_SY7636A_H
+#define __MFD_SY7636A_H
+
+#define SY7636A_REG_OPERATION_MODE_CRL 0x00
+/* It is set if a gpio is used to control the regulator */
+#define SY7636A_OPERATION_MODE_CRL_VCOMCTL BIT(6)
+#define SY7636A_OPERATION_MODE_CRL_ONOFF BIT(7)
+#define SY7636A_REG_VCOM_ADJUST_CTRL_L 0x01
+#define SY7636A_REG_VCOM_ADJUST_CTRL_H 0x02
+#define SY7636A_REG_VCOM_ADJUST_CTRL_MASK 0x01ff
+#define SY7636A_REG_VLDO_VOLTAGE_ADJULST_CTRL 0x03
+#define SY7636A_REG_POWER_ON_DELAY_TIME 0x06
+#define SY7636A_REG_FAULT_FLAG 0x07
+#define SY7636A_FAULT_FLAG_PG BIT(0)
+#define SY7636A_REG_TERMISTOR_READOUT 0x08
+
+#define SY7636A_REG_MAX 0x08
+
+#define VCOM_ADJUST_CTRL_MASK 0x1ff
+// Used to shift the high byte
+#define VCOM_ADJUST_CTRL_SHIFT 8
+// Used to scale from VCOM_ADJUST_CTRL to mv
+#define VCOM_ADJUST_CTRL_SCAL 10000
+
+#define FAULT_FLAG_SHIFT 1
+
+#endif /* __LINUX_MFD_SY7636A_H */
diff --git a/include/linux/mfd/syscon.h b/include/linux/mfd/syscon.h
index fecc2fa2a364..aad9c6b50463 100644
--- a/include/linux/mfd/syscon.h
+++ b/include/linux/mfd/syscon.h
@@ -17,20 +17,19 @@
struct device_node;
#ifdef CONFIG_MFD_SYSCON
-extern struct regmap *device_node_to_regmap(struct device_node *np);
-extern struct regmap *syscon_node_to_regmap(struct device_node *np);
-extern struct regmap *syscon_regmap_lookup_by_compatible(const char *s);
-extern struct regmap *syscon_regmap_lookup_by_phandle(
- struct device_node *np,
- const char *property);
-extern struct regmap *syscon_regmap_lookup_by_phandle_args(
- struct device_node *np,
- const char *property,
- int arg_count,
- unsigned int *out_args);
-extern struct regmap *syscon_regmap_lookup_by_phandle_optional(
- struct device_node *np,
- const char *property);
+struct regmap *device_node_to_regmap(struct device_node *np);
+struct regmap *syscon_node_to_regmap(struct device_node *np);
+struct regmap *syscon_regmap_lookup_by_compatible(const char *s);
+struct regmap *syscon_regmap_lookup_by_phandle(struct device_node *np,
+ const char *property);
+struct regmap *syscon_regmap_lookup_by_phandle_args(struct device_node *np,
+ const char *property,
+ int arg_count,
+ unsigned int *out_args);
+struct regmap *syscon_regmap_lookup_by_phandle_optional(struct device_node *np,
+ const char *property);
+int of_syscon_register_regmap(struct device_node *np,
+ struct regmap *regmap);
#else
static inline struct regmap *device_node_to_regmap(struct device_node *np)
{
@@ -70,6 +69,12 @@ static inline struct regmap *syscon_regmap_lookup_by_phandle_optional(
return NULL;
}
+static inline int of_syscon_register_regmap(struct device_node *np,
+ struct regmap *regmap)
+{
+ return -EOPNOTSUPP;
+}
+
#endif
#endif /* __LINUX_MFD_SYSCON_H__ */
diff --git a/include/linux/mfd/syscon/atmel-smc.h b/include/linux/mfd/syscon/atmel-smc.h
index e9e24f4c4578..9b9119c742a2 100644
--- a/include/linux/mfd/syscon/atmel-smc.h
+++ b/include/linux/mfd/syscon/atmel-smc.h
@@ -11,9 +11,11 @@
#ifndef _LINUX_MFD_SYSCON_ATMEL_SMC_H_
#define _LINUX_MFD_SYSCON_ATMEL_SMC_H_
-#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/regmap.h>
+#include <linux/bits.h>
+#include <linux/types.h>
+
+struct device_node;
+struct regmap;
#define ATMEL_SMC_SETUP(cs) (((cs) * 0x10))
#define ATMEL_HSMC_SETUP(layout, cs) \
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index d4b5e527a7a3..09c6b3184bb0 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -451,8 +451,10 @@
#define IMX6SX_GPR12_PCIE_RX_EQ_2 (0x2 << 0)
/* For imx6ul iomux gpr register field define */
-#define IMX6UL_GPR1_ENET1_CLK_DIR (0x1 << 17)
-#define IMX6UL_GPR1_ENET2_CLK_DIR (0x1 << 18)
+#define IMX6UL_GPR1_ENET2_TX_CLK_DIR BIT(18)
+#define IMX6UL_GPR1_ENET1_TX_CLK_DIR BIT(17)
+#define IMX6UL_GPR1_ENET2_CLK_SEL BIT(14)
+#define IMX6UL_GPR1_ENET1_CLK_SEL BIT(13)
#define IMX6UL_GPR1_ENET1_CLK_OUTPUT (0x1 << 17)
#define IMX6UL_GPR1_ENET2_CLK_OUTPUT (0x1 << 18)
#define IMX6UL_GPR1_ENET_CLK_DIR (0x3 << 17)
diff --git a/include/linux/mfd/t7l66xb.h b/include/linux/mfd/t7l66xb.h
deleted file mode 100644
index 69632c1b07bd..000000000000
--- a/include/linux/mfd/t7l66xb.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * This file contains the definitions for the T7L66XB
- *
- * (C) Copyright 2005 Ian Molton <spyro@f2s.com>
- */
-#ifndef MFD_T7L66XB_H
-#define MFD_T7L66XB_H
-
-#include <linux/mfd/core.h>
-#include <linux/mfd/tmio.h>
-
-struct t7l66xb_platform_data {
- int (*enable)(struct platform_device *dev);
- int (*disable)(struct platform_device *dev);
- int (*suspend)(struct platform_device *dev);
- int (*resume)(struct platform_device *dev);
-
- int irq_base; /* The base for subdevice irqs */
-
- struct tmio_nand_data *nand_data;
-};
-
-
-#define IRQ_T7L66XB_MMC (1)
-#define IRQ_T7L66XB_NAND (3)
-
-#define T7L66XB_NR_IRQS 8
-
-#endif
diff --git a/include/linux/mfd/tc6387xb.h b/include/linux/mfd/tc6387xb.h
deleted file mode 100644
index b4888209494a..000000000000
--- a/include/linux/mfd/tc6387xb.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * This file contains the definitions for the TC6387XB
- *
- * (C) Copyright 2005 Ian Molton <spyro@f2s.com>
- *
- * May be copied or modified under the terms of the GNU General Public
- * License. See linux/COPYING for more information.
- *
- */
-#ifndef MFD_TC6387XB_H
-#define MFD_TC6387XB_H
-
-struct tc6387xb_platform_data {
- int (*enable)(struct platform_device *dev);
- int (*disable)(struct platform_device *dev);
- int (*suspend)(struct platform_device *dev);
- int (*resume)(struct platform_device *dev);
-};
-
-#endif
diff --git a/include/linux/mfd/tc6393xb.h b/include/linux/mfd/tc6393xb.h
deleted file mode 100644
index fcc8e74f0e8d..000000000000
--- a/include/linux/mfd/tc6393xb.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Toshiba TC6393XB SoC support
- *
- * Copyright(c) 2005-2006 Chris Humbert
- * Copyright(c) 2005 Dirk Opfer
- * Copyright(c) 2005 Ian Molton <spyro@f2s.com>
- * Copyright(c) 2007 Dmitry Baryshkov
- *
- * Based on code written by Sharp/Lineo for 2.4 kernels
- * Based on locomo.c
- */
-
-#ifndef MFD_TC6393XB_H
-#define MFD_TC6393XB_H
-
-#include <linux/fb.h>
-
-/* Also one should provide the CK3P6MI clock */
-struct tc6393xb_platform_data {
- u16 scr_pll2cr; /* PLL2 Control */
- u16 scr_gper; /* GP Enable */
-
- int (*enable)(struct platform_device *dev);
- int (*disable)(struct platform_device *dev);
- int (*suspend)(struct platform_device *dev);
- int (*resume)(struct platform_device *dev);
-
- int irq_base; /* base for subdevice irqs */
- int gpio_base;
- int (*setup)(struct platform_device *dev);
- void (*teardown)(struct platform_device *dev);
-
- struct tmio_nand_data *nand_data;
- struct tmio_fb_data *fb_data;
-
- unsigned resume_restore : 1; /* make special actions
- to preserve the state
- on suspend/resume */
-};
-
-extern int tc6393xb_lcd_mode(struct platform_device *fb,
- const struct fb_videomode *mode);
-extern int tc6393xb_lcd_set_power(struct platform_device *fb, bool on);
-
-/*
- * Relative to irq_base
- */
-#define IRQ_TC6393_NAND 0
-#define IRQ_TC6393_MMC 1
-#define IRQ_TC6393_OHCI 2
-#define IRQ_TC6393_FB 4
-
-#define TC6393XB_NR_IRQS 8
-
-#endif
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
index ffc091b77633..4063b0614d90 100644
--- a/include/linux/mfd/ti_am335x_tscadc.h
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -1,22 +1,16 @@
-#ifndef __LINUX_TI_AM335X_TSCADC_MFD_H
-#define __LINUX_TI_AM335X_TSCADC_MFD_H
-
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI Touch Screen / ADC MFD driver
*
* Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
+#ifndef __LINUX_TI_AM335X_TSCADC_MFD_H
+#define __LINUX_TI_AM335X_TSCADC_MFD_H
+
+#include <linux/bitfield.h>
#include <linux/mfd/core.h>
+#include <linux/units.h>
#define REG_RAWIRQSTATUS 0x024
#define REG_IRQSTATUS 0x028
@@ -46,13 +40,6 @@
/* IRQ wakeup enable */
#define IRQWKUP_ENB BIT(0)
-/* Step Enable */
-#define STEPENB_MASK (0x1FFFF << 0)
-#define STEPENB(val) ((val) << 0)
-#define ENB(val) (1 << (val))
-#define STPENB_STEPENB STEPENB(0x1FFFF)
-#define STPENB_STEPENB_TC STEPENB(0x1FFF)
-
/* IRQ enable */
#define IRQENB_HW_PEN BIT(0)
#define IRQENB_EOS BIT(1)
@@ -65,12 +52,10 @@
#define IRQENB_PENUP BIT(9)
/* Step Configuration */
-#define STEPCONFIG_MODE_MASK (3 << 0)
-#define STEPCONFIG_MODE(val) ((val) << 0)
+#define STEPCONFIG_MODE(val) FIELD_PREP(GENMASK(1, 0), (val))
#define STEPCONFIG_MODE_SWCNT STEPCONFIG_MODE(1)
#define STEPCONFIG_MODE_HWSYNC STEPCONFIG_MODE(2)
-#define STEPCONFIG_AVG_MASK (7 << 2)
-#define STEPCONFIG_AVG(val) ((val) << 2)
+#define STEPCONFIG_AVG(val) FIELD_PREP(GENMASK(4, 2), (val))
#define STEPCONFIG_AVG_16 STEPCONFIG_AVG(4)
#define STEPCONFIG_XPP BIT(5)
#define STEPCONFIG_XNN BIT(6)
@@ -78,70 +63,67 @@
#define STEPCONFIG_YNN BIT(8)
#define STEPCONFIG_XNP BIT(9)
#define STEPCONFIG_YPN BIT(10)
-#define STEPCONFIG_RFP(val) ((val) << 12)
-#define STEPCONFIG_RFP_VREFP (0x3 << 12)
-#define STEPCONFIG_INM_MASK (0xF << 15)
-#define STEPCONFIG_INM(val) ((val) << 15)
+#define STEPCONFIG_RFP(val) FIELD_PREP(GENMASK(13, 12), (val))
+#define STEPCONFIG_RFP_VREFP STEPCONFIG_RFP(3)
+#define STEPCONFIG_INM(val) FIELD_PREP(GENMASK(18, 15), (val))
#define STEPCONFIG_INM_ADCREFM STEPCONFIG_INM(8)
-#define STEPCONFIG_INP_MASK (0xF << 19)
-#define STEPCONFIG_INP(val) ((val) << 19)
+#define STEPCONFIG_INP(val) FIELD_PREP(GENMASK(22, 19), (val))
#define STEPCONFIG_INP_AN4 STEPCONFIG_INP(4)
#define STEPCONFIG_INP_ADCREFM STEPCONFIG_INP(8)
#define STEPCONFIG_FIFO1 BIT(26)
-#define STEPCONFIG_RFM(val) ((val) << 23)
-#define STEPCONFIG_RFM_VREFN (0x3 << 23)
+#define STEPCONFIG_RFM(val) FIELD_PREP(GENMASK(24, 23), (val))
+#define STEPCONFIG_RFM_VREFN STEPCONFIG_RFM(3)
/* Delay register */
-#define STEPDELAY_OPEN_MASK (0x3FFFF << 0)
-#define STEPDELAY_OPEN(val) ((val) << 0)
+#define STEPDELAY_OPEN(val) FIELD_PREP(GENMASK(17, 0), (val))
#define STEPCONFIG_OPENDLY STEPDELAY_OPEN(0x098)
-#define STEPDELAY_SAMPLE_MASK (0xFF << 24)
-#define STEPDELAY_SAMPLE(val) ((val) << 24)
+#define STEPCONFIG_MAX_OPENDLY GENMASK(17, 0)
+#define STEPDELAY_SAMPLE(val) FIELD_PREP(GENMASK(31, 24), (val))
#define STEPCONFIG_SAMPLEDLY STEPDELAY_SAMPLE(0)
+#define STEPCONFIG_MAX_SAMPLE GENMASK(7, 0)
/* Charge Config */
-#define STEPCHARGE_RFP_MASK (7 << 12)
-#define STEPCHARGE_RFP(val) ((val) << 12)
+#define STEPCHARGE_RFP(val) FIELD_PREP(GENMASK(14, 12), (val))
#define STEPCHARGE_RFP_XPUL STEPCHARGE_RFP(1)
-#define STEPCHARGE_INM_MASK (0xF << 15)
-#define STEPCHARGE_INM(val) ((val) << 15)
+#define STEPCHARGE_INM(val) FIELD_PREP(GENMASK(18, 15), (val))
#define STEPCHARGE_INM_AN1 STEPCHARGE_INM(1)
-#define STEPCHARGE_INP_MASK (0xF << 19)
-#define STEPCHARGE_INP(val) ((val) << 19)
-#define STEPCHARGE_RFM_MASK (3 << 23)
-#define STEPCHARGE_RFM(val) ((val) << 23)
+#define STEPCHARGE_INP(val) FIELD_PREP(GENMASK(22, 19), (val))
+#define STEPCHARGE_RFM(val) FIELD_PREP(GENMASK(24, 23), (val))
#define STEPCHARGE_RFM_XNUR STEPCHARGE_RFM(1)
/* Charge delay */
-#define CHARGEDLY_OPEN_MASK (0x3FFFF << 0)
-#define CHARGEDLY_OPEN(val) ((val) << 0)
+#define CHARGEDLY_OPEN(val) FIELD_PREP(GENMASK(17, 0), (val))
#define CHARGEDLY_OPENDLY CHARGEDLY_OPEN(0x400)
/* Control register */
-#define CNTRLREG_TSCSSENB BIT(0)
+#define CNTRLREG_SSENB BIT(0)
#define CNTRLREG_STEPID BIT(1)
-#define CNTRLREG_STEPCONFIGWRT BIT(2)
+#define CNTRLREG_TSC_STEPCONFIGWRT BIT(2)
#define CNTRLREG_POWERDOWN BIT(4)
-#define CNTRLREG_AFE_CTRL_MASK (3 << 5)
-#define CNTRLREG_AFE_CTRL(val) ((val) << 5)
-#define CNTRLREG_4WIRE CNTRLREG_AFE_CTRL(1)
-#define CNTRLREG_5WIRE CNTRLREG_AFE_CTRL(2)
-#define CNTRLREG_8WIRE CNTRLREG_AFE_CTRL(3)
-#define CNTRLREG_TSCENB BIT(7)
+#define CNTRLREG_TSC_AFE_CTRL(val) FIELD_PREP(GENMASK(6, 5), (val))
+#define CNTRLREG_TSC_4WIRE CNTRLREG_TSC_AFE_CTRL(1)
+#define CNTRLREG_TSC_5WIRE CNTRLREG_TSC_AFE_CTRL(2)
+#define CNTRLREG_TSC_ENB BIT(7)
+
+/*Control registers bitfields for MAGADC IP */
+#define CNTRLREG_MAGADCENB BIT(0)
+#define CNTRLREG_MAG_PREAMP_PWRDOWN BIT(5)
+#define CNTRLREG_MAG_PREAMP_BYPASS BIT(6)
/* FIFO READ Register */
-#define FIFOREAD_DATA_MASK (0xfff << 0)
-#define FIFOREAD_CHNLID_MASK (0xf << 16)
+#define FIFOREAD_DATA_MASK GENMASK(11, 0)
+#define FIFOREAD_CHNLID_MASK GENMASK(19, 16)
/* DMA ENABLE/CLEAR Register */
#define DMA_FIFO0 BIT(0)
#define DMA_FIFO1 BIT(1)
/* Sequencer Status */
-#define SEQ_STATUS BIT(5)
+#define SEQ_STATUS BIT(5)
#define CHARGE_STEP 0x11
-#define ADC_CLK 3000000
+#define TSC_ADC_CLK (3 * HZ_PER_MHZ)
+#define MAG_ADC_CLK (13 * HZ_PER_MHZ)
#define TOTAL_STEPS 16
#define TOTAL_CHANNELS 8
#define FIFO1_THRESHOLD 19
@@ -158,21 +140,27 @@
*
* max processing time: 266431 * 308ns = 83ms(approx)
*/
-#define IDLE_TIMEOUT 83 /* milliseconds */
+#define IDLE_TIMEOUT_MS 83 /* milliseconds */
#define TSCADC_CELLS 2
+struct ti_tscadc_data {
+ char *adc_feature_name;
+ char *adc_feature_compatible;
+ char *secondary_feature_name;
+ char *secondary_feature_compatible;
+ unsigned int target_clk_rate;
+};
+
struct ti_tscadc_dev {
struct device *dev;
struct regmap *regmap;
void __iomem *tscadc_base;
phys_addr_t tscadc_phys_base;
+ const struct ti_tscadc_data *data;
int irq;
- int used_cells; /* 1-2 */
- int tsc_wires;
- int tsc_cell; /* -1 if not used */
- int adc_cell; /* -1 if not used */
struct mfd_cell cells[TSCADC_CELLS];
+ u32 ctrl;
u32 reg_se_cache;
bool adc_waiting;
bool adc_in_use;
@@ -194,6 +182,12 @@ static inline struct ti_tscadc_dev *ti_tscadc_dev_get(struct platform_device *p)
return *tscadc_dev;
}
+static inline bool ti_adc_with_touchscreen(struct ti_tscadc_dev *tscadc)
+{
+ return of_device_is_compatible(tscadc->dev->of_node,
+ "ti,am3359-tscadc");
+}
+
void am335x_tsc_se_set_cache(struct ti_tscadc_dev *tsadc, u32 val);
void am335x_tsc_se_set_once(struct ti_tscadc_dev *tsadc, u32 val);
void am335x_tsc_se_clr(struct ti_tscadc_dev *tsadc, u32 val);
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
deleted file mode 100644
index 27264fe4b3b9..000000000000
--- a/include/linux/mfd/tmio.h
+++ /dev/null
@@ -1,139 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef MFD_TMIO_H
-#define MFD_TMIO_H
-
-#include <linux/device.h>
-#include <linux/fb.h>
-#include <linux/io.h>
-#include <linux/jiffies.h>
-#include <linux/mmc/card.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-
-#define tmio_ioread8(addr) readb(addr)
-#define tmio_ioread16(addr) readw(addr)
-#define tmio_ioread16_rep(r, b, l) readsw(r, b, l)
-#define tmio_ioread32(addr) \
- (((u32)readw((addr))) | (((u32)readw((addr) + 2)) << 16))
-
-#define tmio_iowrite8(val, addr) writeb((val), (addr))
-#define tmio_iowrite16(val, addr) writew((val), (addr))
-#define tmio_iowrite16_rep(r, b, l) writesw(r, b, l)
-#define tmio_iowrite32(val, addr) \
- do { \
- writew((val), (addr)); \
- writew((val) >> 16, (addr) + 2); \
- } while (0)
-
-#define sd_config_write8(base, shift, reg, val) \
- tmio_iowrite8((val), (base) + ((reg) << (shift)))
-#define sd_config_write16(base, shift, reg, val) \
- tmio_iowrite16((val), (base) + ((reg) << (shift)))
-#define sd_config_write32(base, shift, reg, val) \
- do { \
- tmio_iowrite16((val), (base) + ((reg) << (shift))); \
- tmio_iowrite16((val) >> 16, (base) + ((reg + 2) << (shift))); \
- } while (0)
-
-/* tmio MMC platform flags */
-/*
- * Some controllers can support a 2-byte block size when the bus width
- * is configured in 4-bit mode.
- */
-#define TMIO_MMC_BLKSZ_2BYTES BIT(1)
-/*
- * Some controllers can support SDIO IRQ signalling.
- */
-#define TMIO_MMC_SDIO_IRQ BIT(2)
-
-/* Some features are only available or tested on R-Car Gen2 or later */
-#define TMIO_MMC_MIN_RCAR2 BIT(3)
-
-/*
- * Some controllers require waiting for the SD bus to become
- * idle before writing to some registers.
- */
-#define TMIO_MMC_HAS_IDLE_WAIT BIT(4)
-
-/*
- * Use the busy timeout feature. Probably all TMIO versions support it. Yet,
- * we don't have documentation for old variants, so we enable only known good
- * variants with this flag. Can be removed once all variants are known good.
- */
-#define TMIO_MMC_USE_BUSY_TIMEOUT BIT(5)
-
-/*
- * Some controllers have CMD12 automatically
- * issue/non-issue register
- */
-#define TMIO_MMC_HAVE_CMD12_CTRL BIT(7)
-
-/* Controller has some SDIO status bits which must be 1 */
-#define TMIO_MMC_SDIO_STATUS_SETBITS BIT(8)
-
-/*
- * Some controllers have a 32-bit wide data port register
- */
-#define TMIO_MMC_32BIT_DATA_PORT BIT(9)
-
-/*
- * Some controllers allows to set SDx actual clock
- */
-#define TMIO_MMC_CLK_ACTUAL BIT(10)
-
-/* Some controllers have a CBSY bit */
-#define TMIO_MMC_HAVE_CBSY BIT(11)
-
-int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
-int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base);
-void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state);
-void tmio_core_mmc_clk_div(void __iomem *cnf, int shift, int state);
-
-struct dma_chan;
-
-/*
- * data for the MMC controller
- */
-struct tmio_mmc_data {
- void *chan_priv_tx;
- void *chan_priv_rx;
- unsigned int hclk;
- unsigned long capabilities;
- unsigned long capabilities2;
- unsigned long flags;
- u32 ocr_mask; /* available voltages */
- int alignment_shift;
- dma_addr_t dma_rx_offset;
- unsigned int max_blk_count;
- unsigned short max_segs;
- void (*set_pwr)(struct platform_device *host, int state);
- void (*set_clk_div)(struct platform_device *host, int state);
-};
-
-/*
- * data for the NAND controller
- */
-struct tmio_nand_data {
- struct nand_bbt_descr *badblock_pattern;
- struct mtd_partition *partition;
- unsigned int num_partitions;
- const char *const *part_parsers;
-};
-
-#define FBIO_TMIO_ACC_WRITE 0x7C639300
-#define FBIO_TMIO_ACC_SYNC 0x7C639301
-
-struct tmio_fb_data {
- int (*lcd_set_power)(struct platform_device *fb_dev,
- bool on);
- int (*lcd_mode)(struct platform_device *fb_dev,
- const struct fb_videomode *mode);
- int num_modes;
- struct fb_videomode *modes;
-
- /* in mm: size of screen */
- int height;
- int width;
-};
-
-#endif
diff --git a/include/linux/mfd/tps65010.h b/include/linux/mfd/tps65010.h
index a1fb9bc5311d..5edf1aef1118 100644
--- a/include/linux/mfd/tps65010.h
+++ b/include/linux/mfd/tps65010.h
@@ -28,6 +28,8 @@
#ifndef __LINUX_I2C_TPS65010_H
#define __LINUX_I2C_TPS65010_H
+struct gpio_chip;
+
/*
* ----------------------------------------------------------------------------
* Registers, all 8 bits
@@ -176,12 +178,10 @@ struct i2c_client;
/**
* struct tps65010_board - packages GPIO and LED lines
- * @base: the GPIO number to assign to GPIO-1
* @outmask: bit (N-1) is set to allow GPIO-N to be used as an
* (open drain) output
* @setup: optional callback issued once the GPIOs are valid
* @teardown: optional callback issued before the GPIOs are invalidated
- * @context: optional parameter passed to setup() and teardown()
*
* Board data may be used to package the GPIO (and LED) lines for use
* in by the generic GPIO and LED frameworks. The first four GPIOs
@@ -193,12 +193,9 @@ struct i2c_client;
* devices in their initial states using these GPIOs.
*/
struct tps65010_board {
- int base;
unsigned outmask;
-
- int (*setup)(struct i2c_client *client, void *context);
- int (*teardown)(struct i2c_client *client, void *context);
- void *context;
+ int (*setup)(struct i2c_client *client, struct gpio_chip *gc);
+ void (*teardown)(struct i2c_client *client, struct gpio_chip *gc);
};
#endif /* __LINUX_I2C_TPS65010_H */
diff --git a/include/linux/mfd/tps65086.h b/include/linux/mfd/tps65086.h
index e0a417e53766..9185b5cd8371 100644
--- a/include/linux/mfd/tps65086.h
+++ b/include/linux/mfd/tps65086.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
- *
* Based on the TPS65912 driver
*/
@@ -21,8 +13,9 @@
#include <linux/regmap.h>
/* List of registers for TPS65086 */
-#define TPS65086_DEVICEID 0x01
-#define TPS65086_IRQ 0x02
+#define TPS65086_DEVICEID1 0x00
+#define TPS65086_DEVICEID2 0x01
+#define TPS65086_IRQ 0x02
#define TPS65086_IRQ_MASK 0x03
#define TPS65086_PMICSTAT 0x04
#define TPS65086_SHUTDNSRC 0x05
@@ -83,10 +76,16 @@
#define TPS65086_IRQ_SHUTDN_MASK BIT(3)
#define TPS65086_IRQ_FAULT_MASK BIT(7)
-/* DEVICEID Register field definitions */
-#define TPS65086_DEVICEID_PART_MASK GENMASK(3, 0)
-#define TPS65086_DEVICEID_OTP_MASK GENMASK(5, 4)
-#define TPS65086_DEVICEID_REV_MASK GENMASK(7, 6)
+/* DEVICEID1 Register field definitions */
+#define TPS6508640_ID 0x00
+#define TPS65086401_ID 0x01
+#define TPS6508641_ID 0x10
+#define TPS65086470_ID 0x70
+
+/* DEVICEID2 Register field definitions */
+#define TPS65086_DEVICEID2_PART_MASK GENMASK(3, 0)
+#define TPS65086_DEVICEID2_OTP_MASK GENMASK(5, 4)
+#define TPS65086_DEVICEID2_REV_MASK GENMASK(7, 6)
/* VID Masks */
#define BUCK_VID_MASK GENMASK(7, 1)
@@ -100,6 +99,8 @@ enum tps65086_irqs {
TPS65086_IRQ_FAULT,
};
+struct tps65086_regulator_config;
+
/**
* struct tps65086 - state holder for the tps65086 driver
*
@@ -108,6 +109,8 @@ enum tps65086_irqs {
struct tps65086 {
struct device *dev;
struct regmap *regmap;
+ unsigned int chip_id;
+ const struct tps65086_regulator_config *reg_config;
/* IRQ Data */
int irq;
diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h
index db7091824ed0..877d9c41c53d 100644
--- a/include/linux/mfd/tps65217.h
+++ b/include/linux/mfd/tps65217.h
@@ -1,18 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/mfd/tps65217.h
*
* Functions to access TPS65217 power management chip.
*
* Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __LINUX_MFD_TPS65217_H
diff --git a/include/linux/mfd/tps65218.h b/include/linux/mfd/tps65218.h
index f4ca367e3473..2946be2f15f3 100644
--- a/include/linux/mfd/tps65218.h
+++ b/include/linux/mfd/tps65218.h
@@ -1,18 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/mfd/tps65218.h
*
- * Functions to access TPS65219 power management chip.
+ * Functions to access TPS65218 power management chip.
*
* Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
*/
#ifndef __LINUX_MFD_TPS65218_H
diff --git a/include/linux/mfd/tps65219.h b/include/linux/mfd/tps65219.h
new file mode 100644
index 000000000000..55234e771ba7
--- /dev/null
+++ b/include/linux/mfd/tps65219.h
@@ -0,0 +1,449 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Functions to access TPS65215/TPS65219 Power Management Integrated Chips
+ *
+ * Copyright (C) 2022 BayLibre Incorporated - https://www.baylibre.com/
+ * Copyright (C) 2024 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+#ifndef MFD_TPS65219_H
+#define MFD_TPS65219_H
+
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+
+/* Chip id list*/
+enum pmic_id {
+ TPS65214,
+ TPS65215,
+ TPS65219,
+};
+
+/* I2C ID for TPS65219 part */
+#define TPS65219_I2C_ID 0x24
+
+/* All register addresses */
+#define TPS65219_REG_TI_DEV_ID 0x00
+#define TPS65219_REG_NVM_ID 0x01
+#define TPS65219_REG_ENABLE_CTRL 0x02
+#define TPS65219_REG_BUCKS_CONFIG 0x03
+#define TPS65214_REG_LOCK 0x03
+#define TPS65219_REG_LDO4_VOUT 0x04
+#define TPS65214_REG_LDO1_VOUT_STBY 0x04
+#define TPS65219_REG_LDO3_VOUT 0x05
+#define TPS65215_REG_LDO2_VOUT 0x05
+#define TPS65214_REG_LDO1_VOUT 0x05
+#define TPS65219_REG_LDO2_VOUT 0x06
+#define TPS65214_REG_LDO2_VOUT 0x06
+#define TPS65219_REG_LDO1_VOUT 0x07
+#define TPS65214_REG_LDO2_VOUT_STBY 0x07
+#define TPS65219_REG_BUCK3_VOUT 0x8
+#define TPS65219_REG_BUCK2_VOUT 0x9
+#define TPS65219_REG_BUCK1_VOUT 0xA
+#define TPS65219_REG_LDO4_SEQUENCE_SLOT 0xB
+#define TPS65219_REG_LDO3_SEQUENCE_SLOT 0xC
+#define TPS65215_REG_LDO2_SEQUENCE_SLOT 0xC
+#define TPS65214_REG_LDO1_SEQUENCE_SLOT 0xC
+#define TPS65219_REG_LDO2_SEQUENCE_SLOT 0xD
+#define TPS65219_REG_LDO1_SEQUENCE_SLOT 0xE
+#define TPS65219_REG_BUCK3_SEQUENCE_SLOT 0xF
+#define TPS65219_REG_BUCK2_SEQUENCE_SLOT 0x10
+#define TPS65219_REG_BUCK1_SEQUENCE_SLOT 0x11
+#define TPS65219_REG_nRST_SEQUENCE_SLOT 0x12
+#define TPS65219_REG_GPIO_SEQUENCE_SLOT 0x13
+#define TPS65219_REG_GPO2_SEQUENCE_SLOT 0x14
+#define TPS65214_REG_GPIO_GPI_SEQUENCE_SLOT 0x14
+#define TPS65219_REG_GPO1_SEQUENCE_SLOT 0x15
+#define TPS65214_REG_GPO_SEQUENCE_SLOT 0x15
+#define TPS65219_REG_POWER_UP_SLOT_DURATION_1 0x16
+#define TPS65219_REG_POWER_UP_SLOT_DURATION_2 0x17
+/* _SLOT_DURATION_3 doesn't apply to TPS65215*/
+#define TPS65219_REG_POWER_UP_SLOT_DURATION_3 0x18
+#define TPS65219_REG_POWER_UP_SLOT_DURATION_4 0x19
+#define TPS65214_REG_BUCK3_VOUT_STBY 0x19
+#define TPS65219_REG_POWER_DOWN_SLOT_DURATION_1 0x1A
+#define TPS65219_REG_POWER_DOWN_SLOT_DURATION_2 0x1B
+#define TPS65219_REG_POWER_DOWN_SLOT_DURATION_3 0x1C
+#define TPS65214_REG_BUCK2_VOUT_STBY 0x1C
+#define TPS65219_REG_POWER_DOWN_SLOT_DURATION_4 0x1D
+#define TPS65214_REG_BUCK1_VOUT_STBY 0x1D
+#define TPS65219_REG_GENERAL_CONFIG 0x1E
+#define TPS65219_REG_MFP_1_CONFIG 0x1F
+#define TPS65219_REG_MFP_2_CONFIG 0x20
+#define TPS65219_REG_STBY_1_CONFIG 0x21
+#define TPS65219_REG_STBY_2_CONFIG 0x22
+#define TPS65219_REG_OC_DEGL_CONFIG 0x23
+/* 'sub irq' MASK registers */
+#define TPS65219_REG_INT_MASK_UV 0x24
+#define TPS65219_REG_MASK_CONFIG 0x25
+
+#define TPS65219_REG_I2C_ADDRESS_REG 0x26
+#define TPS65219_REG_USER_GENERAL_NVM_STORAGE 0x27
+#define TPS65219_REG_MANUFACTURING_VER 0x28
+#define TPS65219_REG_MFP_CTRL 0x29
+#define TPS65219_REG_DISCHARGE_CONFIG 0x2A
+/* main irq registers */
+#define TPS65219_REG_INT_SOURCE 0x2B
+
+/* TPS65219 'sub irq' registers */
+#define TPS65219_REG_INT_LDO_3_4 0x2C
+#define TPS65219_REG_INT_LDO_1_2 0x2D
+
+/* TPS65215 specific 'sub irq' registers */
+#define TPS65215_REG_INT_LDO_2 0x2C
+#define TPS65215_REG_INT_LDO_1 0x2D
+
+/* TPS65214 specific 'sub irq' register */
+#define TPS65214_REG_INT_LDO_1_2 0x2D
+
+/* Common TPS65215 & TPS65219 'sub irq' registers */
+#define TPS65219_REG_INT_BUCK_3 0x2E
+#define TPS65219_REG_INT_BUCK_1_2 0x2F
+#define TPS65219_REG_INT_SYSTEM 0x30
+#define TPS65219_REG_INT_RV 0x31
+#define TPS65219_REG_INT_TIMEOUT_RV_SD 0x32
+#define TPS65219_REG_INT_PB 0x33
+
+#define TPS65219_REG_INT_LDO_3_4_POS 0
+#define TPS65219_REG_INT_LDO_1_2_POS 1
+#define TPS65219_REG_INT_BUCK_3_POS 2
+#define TPS65219_REG_INT_BUCK_1_2_POS 3
+#define TPS65219_REG_INT_SYS_POS 4
+#define TPS65219_REG_INT_RV_POS 5
+#define TPS65219_REG_INT_TO_RV_POS 6
+#define TPS65219_REG_INT_PB_POS 7
+
+#define TPS65215_REG_INT_LDO_2_POS 0
+#define TPS65215_REG_INT_LDO_1_POS 1
+
+#define TPS65214_REG_INT_LDO_1_2_POS 0
+#define TPS65214_REG_INT_BUCK_3_POS 1
+#define TPS65214_REG_INT_BUCK_1_2_POS 2
+#define TPS65214_REG_INT_SYS_POS 3
+#define TPS65214_REG_INT_RV_POS 4
+#define TPS65214_REG_INT_TO_RV_POS 5
+#define TPS65214_REG_INT_PB_POS 6
+
+#define TPS65219_REG_USER_NVM_CMD 0x34
+#define TPS65219_REG_POWER_UP_STATUS 0x35
+#define TPS65219_REG_SPARE_2 0x36
+#define TPS65219_REG_SPARE_3 0x37
+#define TPS65219_REG_FACTORY_CONFIG_2 0x41
+
+/* Register field definitions */
+#define TPS65219_DEVID_REV_MASK GENMASK(7, 0)
+#define TPS65219_BUCKS_LDOS_VOUT_VSET_MASK GENMASK(5, 0)
+#define TPS65219_BUCKS_UV_THR_SEL_MASK BIT(6)
+#define TPS65219_BUCKS_BW_SEL_MASK BIT(7)
+#define LDO_BYP_SHIFT 6
+#define TPS65219_LDOS_BYP_CONFIG_MASK BIT(LDO_BYP_SHIFT)
+#define TPS65219_LDOS_LSW_CONFIG_MASK BIT(7)
+/* Regulators enable control */
+#define TPS65219_ENABLE_BUCK1_EN_MASK BIT(0)
+#define TPS65219_ENABLE_BUCK2_EN_MASK BIT(1)
+#define TPS65219_ENABLE_BUCK3_EN_MASK BIT(2)
+#define TPS65219_ENABLE_LDO1_EN_MASK BIT(3)
+#define TPS65219_ENABLE_LDO2_EN_MASK BIT(4)
+#define TPS65219_ENABLE_LDO3_EN_MASK BIT(5)
+#define TPS65215_ENABLE_LDO2_EN_MASK BIT(5)
+#define TPS65214_ENABLE_LDO1_EN_MASK BIT(5)
+#define TPS65219_ENABLE_LDO4_EN_MASK BIT(6)
+/* power ON-OFF sequence slot */
+#define TPS65219_BUCKS_LDOS_SEQUENCE_OFF_SLOT_MASK GENMASK(3, 0)
+#define TPS65219_BUCKS_LDOS_SEQUENCE_ON_SLOT_MASK GENMASK(7, 4)
+/* TODO: Not needed, same mapping as TPS65219_ENABLE_REGNAME_EN, factorize */
+#define TPS65219_STBY1_BUCK1_STBY_EN_MASK BIT(0)
+#define TPS65219_STBY1_BUCK2_STBY_EN_MASK BIT(1)
+#define TPS65219_STBY1_BUCK3_STBY_EN_MASK BIT(2)
+#define TPS65219_STBY1_LDO1_STBY_EN_MASK BIT(3)
+#define TPS65219_STBY1_LDO2_STBY_EN_MASK BIT(4)
+#define TPS65219_STBY1_LDO3_STBY_EN_MASK BIT(5)
+#define TPS65219_STBY1_LDO4_STBY_EN_MASK BIT(6)
+/* STBY_2 config */
+#define TPS65219_STBY2_GPO1_STBY_EN_MASK BIT(0)
+#define TPS65219_STBY2_GPO2_STBY_EN_MASK BIT(1)
+#define TPS65219_STBY2_GPIO_STBY_EN_MASK BIT(2)
+/* MFP Control */
+#define TPS65219_MFP_I2C_OFF_REQ_MASK BIT(0)
+#define TPS65219_MFP_STBY_I2C_CTRL_MASK BIT(1)
+#define TPS65219_MFP_COLD_RESET_I2C_CTRL_MASK BIT(2)
+#define TPS65219_MFP_WARM_RESET_I2C_CTRL_MASK BIT(3)
+#define TPS65219_MFP_GPIO_STATUS_MASK BIT(4)
+/* MFP_1 Config */
+#define TPS65219_MFP_1_VSEL_DDR_SEL_MASK BIT(0)
+#define TPS65219_MFP_1_VSEL_SD_POL_MASK BIT(1)
+#define TPS65219_MFP_1_VSEL_RAIL_MASK BIT(2)
+/* MFP_2 Config */
+#define TPS65219_MFP_2_MODE_STBY_MASK GENMASK(1, 0)
+#define TPS65219_MFP_2_MODE_RESET_MASK BIT(2)
+#define TPS65219_MFP_2_EN_PB_VSENSE_DEGL_MASK BIT(3)
+#define TPS65219_MFP_2_EN_PB_VSENSE_MASK GENMASK(5, 4)
+#define TPS65219_MFP_2_WARM_COLD_RESET_MASK BIT(6)
+#define TPS65219_MFP_2_PU_ON_FSD_MASK BIT(7)
+#define TPS65219_MFP_2_EN 0
+#define TPS65219_MFP_2_PB BIT(4)
+#define TPS65219_MFP_2_VSENSE BIT(5)
+/* MASK_UV Config */
+#define TPS65219_REG_MASK_UV_LDO1_UV_MASK BIT(0)
+#define TPS65219_REG_MASK_UV_LDO2_UV_MASK BIT(1)
+#define TPS65219_REG_MASK_UV_LDO3_UV_MASK BIT(2)
+#define TPS65219_REG_MASK_UV_LDO4_UV_MASK BIT(3)
+#define TPS65219_REG_MASK_UV_BUCK1_UV_MASK BIT(4)
+#define TPS65219_REG_MASK_UV_BUCK2_UV_MASK BIT(5)
+#define TPS65219_REG_MASK_UV_BUCK3_UV_MASK BIT(6)
+#define TPS65219_REG_MASK_UV_RETRY_MASK BIT(7)
+/* MASK Config */
+// SENSOR_N_WARM_MASK already defined in Thermal
+#define TPS65219_REG_MASK_INT_FOR_RV_MASK BIT(4)
+#define TPS65219_REG_MASK_EFFECT_MASK GENMASK(2, 1)
+#define TPS65219_REG_MASK_INT_FOR_PB_MASK BIT(7)
+/* UnderVoltage - Short to GND - OverCurrent*/
+/* LDO3-4: only for TPS65219*/
+#define TPS65219_INT_LDO3_SCG_MASK BIT(0)
+#define TPS65219_INT_LDO3_OC_MASK BIT(1)
+#define TPS65219_INT_LDO3_UV_MASK BIT(2)
+#define TPS65219_INT_LDO4_SCG_MASK BIT(3)
+#define TPS65219_INT_LDO4_OC_MASK BIT(4)
+#define TPS65219_INT_LDO4_UV_MASK BIT(5)
+/* LDO1-2: TPS65214 & TPS65219 */
+#define TPS65219_INT_LDO1_SCG_MASK BIT(0)
+#define TPS65219_INT_LDO1_OC_MASK BIT(1)
+#define TPS65219_INT_LDO1_UV_MASK BIT(2)
+#define TPS65219_INT_LDO2_SCG_MASK BIT(3)
+#define TPS65219_INT_LDO2_OC_MASK BIT(4)
+#define TPS65219_INT_LDO2_UV_MASK BIT(5)
+/* TPS65215 LDO1-2*/
+#define TPS65215_INT_LDO1_SCG_MASK BIT(0)
+#define TPS65215_INT_LDO1_OC_MASK BIT(1)
+#define TPS65215_INT_LDO1_UV_MASK BIT(2)
+#define TPS65215_INT_LDO2_SCG_MASK BIT(0)
+#define TPS65215_INT_LDO2_OC_MASK BIT(1)
+#define TPS65215_INT_LDO2_UV_MASK BIT(2)
+/* BUCK3 */
+#define TPS65219_INT_BUCK3_SCG_MASK BIT(0)
+#define TPS65219_INT_BUCK3_OC_MASK BIT(1)
+#define TPS65219_INT_BUCK3_NEG_OC_MASK BIT(2)
+#define TPS65219_INT_BUCK3_UV_MASK BIT(3)
+/* BUCK1-2 */
+#define TPS65219_INT_BUCK1_SCG_MASK BIT(0)
+#define TPS65219_INT_BUCK1_OC_MASK BIT(1)
+#define TPS65219_INT_BUCK1_NEG_OC_MASK BIT(2)
+#define TPS65219_INT_BUCK1_UV_MASK BIT(3)
+#define TPS65219_INT_BUCK2_SCG_MASK BIT(4)
+#define TPS65219_INT_BUCK2_OC_MASK BIT(5)
+#define TPS65219_INT_BUCK2_NEG_OC_MASK BIT(6)
+#define TPS65219_INT_BUCK2_UV_MASK BIT(7)
+/* Thermal Sensor: TPS65219/TPS65215 */
+#define TPS65219_INT_SENSOR_3_WARM_MASK BIT(0)
+#define TPS65219_INT_SENSOR_3_HOT_MASK BIT(4)
+/* Thermal Sensor: TPS65219/TPS65215/TPS65214 */
+#define TPS65219_INT_SENSOR_2_WARM_MASK BIT(1)
+#define TPS65219_INT_SENSOR_1_WARM_MASK BIT(2)
+#define TPS65219_INT_SENSOR_0_WARM_MASK BIT(3)
+#define TPS65219_INT_SENSOR_2_HOT_MASK BIT(5)
+#define TPS65219_INT_SENSOR_1_HOT_MASK BIT(6)
+#define TPS65219_INT_SENSOR_0_HOT_MASK BIT(7)
+/* Residual Voltage */
+#define TPS65219_INT_BUCK1_RV_MASK BIT(0)
+#define TPS65219_INT_BUCK2_RV_MASK BIT(1)
+#define TPS65219_INT_BUCK3_RV_MASK BIT(2)
+#define TPS65219_INT_LDO1_RV_MASK BIT(3)
+#define TPS65219_INT_LDO2_RV_MASK BIT(4)
+#define TPS65219_INT_LDO3_RV_MASK BIT(5)
+#define TPS65215_INT_LDO2_RV_MASK BIT(5)
+#define TPS65214_INT_LDO2_RV_MASK BIT(5)
+#define TPS65219_INT_LDO4_RV_MASK BIT(6)
+/* Residual Voltage ShutDown */
+#define TPS65219_INT_BUCK1_RV_SD_MASK BIT(0)
+#define TPS65219_INT_BUCK2_RV_SD_MASK BIT(1)
+#define TPS65219_INT_BUCK3_RV_SD_MASK BIT(2)
+#define TPS65219_INT_LDO1_RV_SD_MASK BIT(3)
+#define TPS65219_INT_LDO2_RV_SD_MASK BIT(4)
+#define TPS65219_INT_LDO3_RV_SD_MASK BIT(5)
+#define TPS65215_INT_LDO2_RV_SD_MASK BIT(5)
+#define TPS65214_INT_LDO1_RV_SD_MASK BIT(5)
+#define TPS65219_INT_LDO4_RV_SD_MASK BIT(6)
+#define TPS65219_INT_TIMEOUT_MASK BIT(7)
+/* Power Button */
+#define TPS65219_INT_PB_FALLING_EDGE_DETECT_MASK BIT(0)
+#define TPS65219_INT_PB_RISING_EDGE_DETECT_MASK BIT(1)
+#define TPS65219_INT_PB_REAL_TIME_STATUS_MASK BIT(2)
+
+#define TPS65219_PB_POS 7
+#define TPS65219_TO_RV_POS 6
+#define TPS65219_RV_POS 5
+#define TPS65219_SYS_POS 4
+#define TPS65219_BUCK_1_2_POS 3
+#define TPS65219_BUCK_3_POS 2
+#define TPS65219_LDO_1_2_POS 1
+#define TPS65219_LDO_3_4_POS 0
+
+/* IRQs */
+enum {
+ /* LDO3-4 register IRQs */
+ TPS65219_INT_LDO3_SCG,
+ TPS65219_INT_LDO3_OC,
+ TPS65219_INT_LDO3_UV,
+ TPS65219_INT_LDO4_SCG,
+ TPS65219_INT_LDO4_OC,
+ TPS65219_INT_LDO4_UV,
+ /* TPS65215 LDO1*/
+ TPS65215_INT_LDO1_SCG,
+ TPS65215_INT_LDO1_OC,
+ TPS65215_INT_LDO1_UV,
+ /* TPS65215 LDO2*/
+ TPS65215_INT_LDO2_SCG,
+ TPS65215_INT_LDO2_OC,
+ TPS65215_INT_LDO2_UV,
+ /* LDO1-2: TPS65219/TPS65214 */
+ TPS65219_INT_LDO1_SCG,
+ TPS65219_INT_LDO1_OC,
+ TPS65219_INT_LDO1_UV,
+ TPS65219_INT_LDO2_SCG,
+ TPS65219_INT_LDO2_OC,
+ TPS65219_INT_LDO2_UV,
+ /* BUCK3 */
+ TPS65219_INT_BUCK3_SCG,
+ TPS65219_INT_BUCK3_OC,
+ TPS65219_INT_BUCK3_NEG_OC,
+ TPS65219_INT_BUCK3_UV,
+ /* BUCK1-2 */
+ TPS65219_INT_BUCK1_SCG,
+ TPS65219_INT_BUCK1_OC,
+ TPS65219_INT_BUCK1_NEG_OC,
+ TPS65219_INT_BUCK1_UV,
+ TPS65219_INT_BUCK2_SCG,
+ TPS65219_INT_BUCK2_OC,
+ TPS65219_INT_BUCK2_NEG_OC,
+ TPS65219_INT_BUCK2_UV,
+ /* Thermal Sensor */
+ TPS65219_INT_SENSOR_3_WARM,
+ TPS65219_INT_SENSOR_2_WARM,
+ TPS65219_INT_SENSOR_1_WARM,
+ TPS65219_INT_SENSOR_0_WARM,
+ TPS65219_INT_SENSOR_3_HOT,
+ TPS65219_INT_SENSOR_2_HOT,
+ TPS65219_INT_SENSOR_1_HOT,
+ TPS65219_INT_SENSOR_0_HOT,
+ /* Residual Voltage */
+ TPS65219_INT_BUCK1_RV,
+ TPS65219_INT_BUCK2_RV,
+ TPS65219_INT_BUCK3_RV,
+ TPS65219_INT_LDO1_RV,
+ TPS65219_INT_LDO2_RV,
+ TPS65215_INT_LDO2_RV,
+ TPS65214_INT_LDO2_RV,
+ TPS65219_INT_LDO3_RV,
+ TPS65219_INT_LDO4_RV,
+ /* Residual Voltage ShutDown */
+ TPS65219_INT_BUCK1_RV_SD,
+ TPS65219_INT_BUCK2_RV_SD,
+ TPS65219_INT_BUCK3_RV_SD,
+ TPS65219_INT_LDO1_RV_SD,
+ TPS65214_INT_LDO1_RV_SD,
+ TPS65215_INT_LDO2_RV_SD,
+ TPS65219_INT_LDO2_RV_SD,
+ TPS65219_INT_LDO3_RV_SD,
+ TPS65219_INT_LDO4_RV_SD,
+ TPS65219_INT_TIMEOUT,
+ /* Power Button */
+ TPS65219_INT_PB_FALLING_EDGE_DETECT,
+ TPS65219_INT_PB_RISING_EDGE_DETECT,
+};
+
+enum tps65214_regulator_id {
+ /*
+ * DCDC's same as TPS65219
+ * LDO1 maps to TPS65219's LDO3
+ * LDO2 is the same as TPS65219
+ *
+ */
+ TPS65214_LDO_1 = 3,
+ TPS65214_LDO_2 = 4,
+};
+
+enum tps65215_regulator_id {
+ /* DCDC's same as TPS65219 */
+ /* LDO1 is the same as TPS65219 */
+ TPS65215_LDO_2 = 4,
+};
+
+enum tps65219_regulator_id {
+ /* DCDC's */
+ TPS65219_BUCK_1,
+ TPS65219_BUCK_2,
+ TPS65219_BUCK_3,
+ /* LDOs */
+ TPS65219_LDO_1,
+ TPS65219_LDO_2,
+ TPS65219_LDO_3,
+ TPS65219_LDO_4,
+};
+
+/* Number of step-down converters available */
+#define TPS6521X_NUM_BUCKS 3
+/* Number of LDO voltage regulators available */
+#define TPS65219_NUM_LDO 4
+#define TPS65215_NUM_LDO 2
+#define TPS65214_NUM_LDO 2
+/* Number of total regulators available */
+#define TPS65219_NUM_REGULATOR (TPS6521X_NUM_BUCKS + TPS65219_NUM_LDO)
+#define TPS65215_NUM_REGULATOR (TPS6521X_NUM_BUCKS + TPS65215_NUM_LDO)
+#define TPS65214_NUM_REGULATOR (TPS6521X_NUM_BUCKS + TPS65214_NUM_LDO)
+
+/* Define the TPS65214 IRQ numbers */
+enum tps65214_irqs {
+ /* INT source registers */
+ TPS65214_TO_RV_SD_SET_IRQ,
+ TPS65214_RV_SET_IRQ,
+ TPS65214_SYS_SET_IRQ,
+ TPS65214_BUCK_1_2_SET_IRQ,
+ TPS65214_BUCK_3_SET_IRQ,
+ TPS65214_LDO_1_2_SET_IRQ,
+ TPS65214_PB_SET_IRQ = 7,
+};
+
+/* Define the TPS65215 IRQ numbers */
+enum tps65215_irqs {
+ /* INT source registers */
+ TPS65215_TO_RV_SD_SET_IRQ,
+ TPS65215_RV_SET_IRQ,
+ TPS65215_SYS_SET_IRQ,
+ TPS65215_BUCK_1_2_SET_IRQ,
+ TPS65215_BUCK_3_SET_IRQ,
+ TPS65215_LDO_1_SET_IRQ,
+ TPS65215_LDO_2_SET_IRQ,
+ TPS65215_PB_SET_IRQ,
+};
+
+/* Define the TPS65219 IRQ numbers */
+enum tps65219_irqs {
+ /* INT source registers */
+ TPS65219_TO_RV_SD_SET_IRQ,
+ TPS65219_RV_SET_IRQ,
+ TPS65219_SYS_SET_IRQ,
+ TPS65219_BUCK_1_2_SET_IRQ,
+ TPS65219_BUCK_3_SET_IRQ,
+ TPS65219_LDO_1_2_SET_IRQ,
+ TPS65219_LDO_3_4_SET_IRQ,
+ TPS65219_PB_SET_IRQ,
+};
+
+/**
+ * struct tps65219 - tps65219 sub-driver chip access routines
+ *
+ * Device data may be used to access the TPS65219 chip
+ *
+ * @dev: MFD device
+ * @regmap: Regmap for accessing the device registers
+ * @irq_data: Regmap irq data used for the irq chip
+ */
+struct tps65219 {
+ struct device *dev;
+ struct regmap *regmap;
+
+ struct regmap_irq_chip_data *irq_data;
+};
+
+#endif /* MFD_TPS65219_H */
diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h
index 701925db75b3..f67ef0a4e041 100644
--- a/include/linux/mfd/tps65910.h
+++ b/include/linux/mfd/tps65910.h
@@ -749,7 +749,7 @@
#define VDDCTRL_ST_SHIFT 0
-/*Register VDDCTRL_OP (0x28) bit definitios */
+/*Register VDDCTRL_OP (0x28) bit definitions */
#define VDDCTRL_OP_CMD_MASK 0x80
#define VDDCTRL_OP_CMD_SHIFT 7
#define VDDCTRL_OP_SEL_MASK 0x7F
diff --git a/include/linux/mfd/tps65912.h b/include/linux/mfd/tps65912.h
index 7943e413deae..e5373c302722 100644
--- a/include/linux/mfd/tps65912.h
+++ b/include/linux/mfd/tps65912.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/
* Andrew F. Davis <afd@ti.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
- *
* Based on the TPS65218 driver and the previous TPS65912 driver by
* Margarita Olaya Cabrera <magi@slimlogic.co.uk>
*/
@@ -322,6 +314,5 @@ struct tps65912 {
extern const struct regmap_config tps65912_regmap_config;
int tps65912_device_init(struct tps65912 *tps);
-int tps65912_device_exit(struct tps65912 *tps);
#endif /* __LINUX_MFD_TPS65912_H */
diff --git a/include/linux/mfd/tps6594.h b/include/linux/mfd/tps6594.h
new file mode 100644
index 000000000000..021db8875963
--- /dev/null
+++ b/include/linux/mfd/tps6594.h
@@ -0,0 +1,1346 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Functions to access TPS6594 Power Management IC
+ *
+ * Copyright (C) 2023 BayLibre Incorporated - https://www.baylibre.com/
+ */
+
+#ifndef __LINUX_MFD_TPS6594_H
+#define __LINUX_MFD_TPS6594_H
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+struct regmap_irq_chip_data;
+
+/* Chip id list */
+enum pmic_id {
+ TPS6594,
+ TPS6593,
+ LP8764,
+ TPS65224,
+ TPS652G1,
+};
+
+/* Macro to get page index from register address */
+#define TPS6594_REG_TO_PAGE(reg) ((reg) >> 8)
+
+/* Registers for page 0 */
+#define TPS6594_REG_DEV_REV 0x01
+
+#define TPS6594_REG_NVM_CODE_1 0x02
+#define TPS6594_REG_NVM_CODE_2 0x03
+
+#define TPS6594_REG_BUCKX_CTRL(buck_inst) (0x04 + ((buck_inst) << 1))
+#define TPS6594_REG_BUCKX_CONF(buck_inst) (0x05 + ((buck_inst) << 1))
+#define TPS6594_REG_BUCKX_VOUT_1(buck_inst) (0x0e + ((buck_inst) << 1))
+#define TPS6594_REG_BUCKX_VOUT_2(buck_inst) (0x0f + ((buck_inst) << 1))
+#define TPS6594_REG_BUCKX_PG_WINDOW(buck_inst) (0x18 + (buck_inst))
+
+#define TPS6594_REG_LDOX_CTRL(ldo_inst) (0x1d + (ldo_inst))
+#define TPS6594_REG_LDORTC_CTRL 0x22
+#define TPS6594_REG_LDOX_VOUT(ldo_inst) (0x23 + (ldo_inst))
+#define TPS6594_REG_LDOX_PG_WINDOW(ldo_inst) (0x27 + (ldo_inst))
+
+#define TPS6594_REG_VCCA_VMON_CTRL 0x2b
+#define TPS6594_REG_VCCA_PG_WINDOW 0x2c
+#define TPS6594_REG_VMON1_PG_WINDOW 0x2d
+#define TPS6594_REG_VMON1_PG_LEVEL 0x2e
+#define TPS6594_REG_VMON2_PG_WINDOW 0x2f
+#define TPS6594_REG_VMON2_PG_LEVEL 0x30
+
+#define TPS6594_REG_GPIOX_CONF(gpio_inst) (0x31 + (gpio_inst))
+#define TPS6594_REG_NPWRON_CONF 0x3c
+#define TPS6594_REG_GPIO_OUT_1 0x3d
+#define TPS6594_REG_GPIO_OUT_2 0x3e
+#define TPS6594_REG_GPIO_IN_1 0x3f
+#define TPS6594_REG_GPIO_IN_2 0x40
+#define TPS6594_REG_GPIOX_OUT(gpio_inst) (TPS6594_REG_GPIO_OUT_1 + (gpio_inst) / 8)
+#define TPS6594_REG_GPIOX_IN(gpio_inst) (TPS6594_REG_GPIO_IN_1 + (gpio_inst) / 8)
+
+#define TPS6594_REG_RAIL_SEL_1 0x41
+#define TPS6594_REG_RAIL_SEL_2 0x42
+#define TPS6594_REG_RAIL_SEL_3 0x43
+
+#define TPS6594_REG_FSM_TRIG_SEL_1 0x44
+#define TPS6594_REG_FSM_TRIG_SEL_2 0x45
+#define TPS6594_REG_FSM_TRIG_MASK_1 0x46
+#define TPS6594_REG_FSM_TRIG_MASK_2 0x47
+#define TPS6594_REG_FSM_TRIG_MASK_3 0x48
+
+#define TPS6594_REG_MASK_BUCK1_2 0x49
+#define TPS65224_REG_MASK_BUCKS 0x49
+#define TPS6594_REG_MASK_BUCK3_4 0x4a
+#define TPS6594_REG_MASK_BUCK5 0x4b
+#define TPS6594_REG_MASK_LDO1_2 0x4c
+#define TPS65224_REG_MASK_LDOS 0x4c
+#define TPS6594_REG_MASK_LDO3_4 0x4d
+#define TPS6594_REG_MASK_VMON 0x4e
+#define TPS6594_REG_MASK_GPIO_FALL 0x4f
+#define TPS6594_REG_MASK_GPIO_RISE 0x50
+#define TPS6594_REG_MASK_GPIO9_11 0x51
+#define TPS6594_REG_MASK_STARTUP 0x52
+#define TPS6594_REG_MASK_MISC 0x53
+#define TPS6594_REG_MASK_MODERATE_ERR 0x54
+#define TPS6594_REG_MASK_FSM_ERR 0x56
+#define TPS6594_REG_MASK_COMM_ERR 0x57
+#define TPS6594_REG_MASK_READBACK_ERR 0x58
+#define TPS6594_REG_MASK_ESM 0x59
+
+#define TPS6594_REG_INT_TOP 0x5a
+#define TPS6594_REG_INT_BUCK 0x5b
+#define TPS6594_REG_INT_BUCK1_2 0x5c
+#define TPS6594_REG_INT_BUCK3_4 0x5d
+#define TPS6594_REG_INT_BUCK5 0x5e
+#define TPS6594_REG_INT_LDO_VMON 0x5f
+#define TPS6594_REG_INT_LDO1_2 0x60
+#define TPS6594_REG_INT_LDO3_4 0x61
+#define TPS6594_REG_INT_VMON 0x62
+#define TPS6594_REG_INT_GPIO 0x63
+#define TPS6594_REG_INT_GPIO1_8 0x64
+#define TPS6594_REG_INT_STARTUP 0x65
+#define TPS6594_REG_INT_MISC 0x66
+#define TPS6594_REG_INT_MODERATE_ERR 0x67
+#define TPS6594_REG_INT_SEVERE_ERR 0x68
+#define TPS6594_REG_INT_FSM_ERR 0x69
+#define TPS6594_REG_INT_COMM_ERR 0x6a
+#define TPS6594_REG_INT_READBACK_ERR 0x6b
+#define TPS6594_REG_INT_ESM 0x6c
+
+#define TPS6594_REG_STAT_BUCK1_2 0x6d
+#define TPS6594_REG_STAT_BUCK3_4 0x6e
+#define TPS6594_REG_STAT_BUCK5 0x6f
+#define TPS6594_REG_STAT_LDO1_2 0x70
+#define TPS6594_REG_STAT_LDO3_4 0x71
+#define TPS6594_REG_STAT_VMON 0x72
+#define TPS6594_REG_STAT_STARTUP 0x73
+#define TPS6594_REG_STAT_MISC 0x74
+#define TPS6594_REG_STAT_MODERATE_ERR 0x75
+#define TPS6594_REG_STAT_SEVERE_ERR 0x76
+#define TPS6594_REG_STAT_READBACK_ERR 0x77
+
+#define TPS6594_REG_PGOOD_SEL_1 0x78
+#define TPS6594_REG_PGOOD_SEL_2 0x79
+#define TPS6594_REG_PGOOD_SEL_3 0x7a
+#define TPS6594_REG_PGOOD_SEL_4 0x7b
+
+#define TPS6594_REG_PLL_CTRL 0x7c
+
+#define TPS6594_REG_CONFIG_1 0x7d
+#define TPS6594_REG_CONFIG_2 0x7e
+
+#define TPS6594_REG_ENABLE_DRV_REG 0x80
+
+#define TPS6594_REG_MISC_CTRL 0x81
+
+#define TPS6594_REG_ENABLE_DRV_STAT 0x82
+
+#define TPS6594_REG_RECOV_CNT_REG_1 0x83
+#define TPS6594_REG_RECOV_CNT_REG_2 0x84
+
+#define TPS6594_REG_FSM_I2C_TRIGGERS 0x85
+#define TPS6594_REG_FSM_NSLEEP_TRIGGERS 0x86
+
+#define TPS6594_REG_BUCK_RESET_REG 0x87
+
+#define TPS6594_REG_SPREAD_SPECTRUM_1 0x88
+
+#define TPS6594_REG_FREQ_SEL 0x8a
+
+#define TPS6594_REG_FSM_STEP_SIZE 0x8b
+
+#define TPS6594_REG_LDO_RV_TIMEOUT_REG_1 0x8c
+#define TPS6594_REG_LDO_RV_TIMEOUT_REG_2 0x8d
+
+#define TPS6594_REG_USER_SPARE_REGS 0x8e
+
+#define TPS6594_REG_ESM_MCU_START_REG 0x8f
+#define TPS6594_REG_ESM_MCU_DELAY1_REG 0x90
+#define TPS6594_REG_ESM_MCU_DELAY2_REG 0x91
+#define TPS6594_REG_ESM_MCU_MODE_CFG 0x92
+#define TPS6594_REG_ESM_MCU_HMAX_REG 0x93
+#define TPS6594_REG_ESM_MCU_HMIN_REG 0x94
+#define TPS6594_REG_ESM_MCU_LMAX_REG 0x95
+#define TPS6594_REG_ESM_MCU_LMIN_REG 0x96
+#define TPS6594_REG_ESM_MCU_ERR_CNT_REG 0x97
+#define TPS6594_REG_ESM_SOC_START_REG 0x98
+#define TPS6594_REG_ESM_SOC_DELAY1_REG 0x99
+#define TPS6594_REG_ESM_SOC_DELAY2_REG 0x9a
+#define TPS6594_REG_ESM_SOC_MODE_CFG 0x9b
+#define TPS6594_REG_ESM_SOC_HMAX_REG 0x9c
+#define TPS6594_REG_ESM_SOC_HMIN_REG 0x9d
+#define TPS6594_REG_ESM_SOC_LMAX_REG 0x9e
+#define TPS6594_REG_ESM_SOC_LMIN_REG 0x9f
+#define TPS6594_REG_ESM_SOC_ERR_CNT_REG 0xa0
+
+#define TPS6594_REG_REGISTER_LOCK 0xa1
+
+#define TPS65224_REG_SRAM_ACCESS_1 0xa2
+#define TPS65224_REG_SRAM_ACCESS_2 0xa3
+#define TPS65224_REG_SRAM_ADDR_CTRL 0xa4
+#define TPS65224_REG_RECOV_CNT_PFSM_INCR 0xa5
+#define TPS6594_REG_MANUFACTURING_VER 0xa6
+
+#define TPS6594_REG_CUSTOMER_NVM_ID_REG 0xa7
+
+#define TPS6594_REG_VMON_CONF_REG 0xa8
+
+#define TPS6594_REG_SOFT_REBOOT_REG 0xab
+
+#define TPS65224_REG_ADC_CTRL 0xac
+#define TPS65224_REG_ADC_RESULT_REG_1 0xad
+#define TPS65224_REG_ADC_RESULT_REG_2 0xae
+#define TPS6594_REG_RTC_SECONDS 0xb5
+#define TPS6594_REG_RTC_MINUTES 0xb6
+#define TPS6594_REG_RTC_HOURS 0xb7
+#define TPS6594_REG_RTC_DAYS 0xb8
+#define TPS6594_REG_RTC_MONTHS 0xb9
+#define TPS6594_REG_RTC_YEARS 0xba
+#define TPS6594_REG_RTC_WEEKS 0xbb
+
+#define TPS6594_REG_ALARM_SECONDS 0xbc
+#define TPS6594_REG_ALARM_MINUTES 0xbd
+#define TPS6594_REG_ALARM_HOURS 0xbe
+#define TPS6594_REG_ALARM_DAYS 0xbf
+#define TPS6594_REG_ALARM_MONTHS 0xc0
+#define TPS6594_REG_ALARM_YEARS 0xc1
+
+#define TPS6594_REG_RTC_CTRL_1 0xc2
+#define TPS6594_REG_RTC_CTRL_2 0xc3
+#define TPS65224_REG_STARTUP_CTRL 0xc3
+#define TPS6594_REG_RTC_STATUS 0xc4
+#define TPS6594_REG_RTC_INTERRUPTS 0xc5
+#define TPS6594_REG_RTC_COMP_LSB 0xc6
+#define TPS6594_REG_RTC_COMP_MSB 0xc7
+#define TPS6594_REG_RTC_RESET_STATUS 0xc8
+
+#define TPS6594_REG_SCRATCH_PAD_REG_1 0xc9
+#define TPS6594_REG_SCRATCH_PAD_REG_2 0xca
+#define TPS6594_REG_SCRATCH_PAD_REG_3 0xcb
+#define TPS6594_REG_SCRATCH_PAD_REG_4 0xcc
+
+#define TPS6594_REG_PFSM_DELAY_REG_1 0xcd
+#define TPS6594_REG_PFSM_DELAY_REG_2 0xce
+#define TPS6594_REG_PFSM_DELAY_REG_3 0xcf
+#define TPS6594_REG_PFSM_DELAY_REG_4 0xd0
+#define TPS65224_REG_ADC_GAIN_COMP_REG 0xd0
+#define TPS65224_REG_CRC_CALC_CONTROL 0xef
+#define TPS65224_REG_REGMAP_USER_CRC_LOW 0xf0
+#define TPS65224_REG_REGMAP_USER_CRC_HIGH 0xf1
+
+/* Registers for page 1 */
+#define TPS6594_REG_SERIAL_IF_CONFIG 0x11a
+#define TPS6594_REG_I2C1_ID 0x122
+#define TPS6594_REG_I2C2_ID 0x123
+
+/* Registers for page 4 */
+#define TPS6594_REG_WD_ANSWER_REG 0x401
+#define TPS6594_REG_WD_QUESTION_ANSW_CNT 0x402
+#define TPS6594_REG_WD_WIN1_CFG 0x403
+#define TPS6594_REG_WD_WIN2_CFG 0x404
+#define TPS6594_REG_WD_LONGWIN_CFG 0x405
+#define TPS6594_REG_WD_MODE_REG 0x406
+#define TPS6594_REG_WD_QA_CFG 0x407
+#define TPS6594_REG_WD_ERR_STATUS 0x408
+#define TPS6594_REG_WD_THR_CFG 0x409
+#define TPS6594_REG_DWD_FAIL_CNT_REG 0x40a
+
+/* BUCKX_CTRL register field definition */
+#define TPS6594_BIT_BUCK_EN BIT(0)
+#define TPS6594_BIT_BUCK_FPWM BIT(1)
+#define TPS6594_BIT_BUCK_FPWM_MP BIT(2)
+#define TPS6594_BIT_BUCK_VSEL BIT(3)
+#define TPS6594_BIT_BUCK_VMON_EN BIT(4)
+#define TPS6594_BIT_BUCK_PLDN BIT(5)
+#define TPS6594_BIT_BUCK_RV_SEL BIT(7)
+
+/* TPS6594 BUCKX_CONF register field definition */
+#define TPS6594_MASK_BUCK_SLEW_RATE GENMASK(2, 0)
+#define TPS6594_MASK_BUCK_ILIM GENMASK(5, 3)
+
+/* TPS65224 BUCKX_CONF register field definition */
+#define TPS65224_MASK_BUCK_SLEW_RATE GENMASK(1, 0)
+
+/* TPS6594 BUCKX_PG_WINDOW register field definition */
+#define TPS6594_MASK_BUCK_OV_THR GENMASK(2, 0)
+#define TPS6594_MASK_BUCK_UV_THR GENMASK(5, 3)
+
+/* TPS65224 BUCKX_PG_WINDOW register field definition */
+#define TPS65224_MASK_BUCK_VMON_THR GENMASK(1, 0)
+
+/* TPS6594 BUCKX_VOUT register field definition */
+#define TPS6594_MASK_BUCKS_VSET GENMASK(7, 0)
+
+/* TPS65224 BUCKX_VOUT register field definition */
+#define TPS65224_MASK_BUCK1_VSET GENMASK(7, 0)
+#define TPS65224_MASK_BUCKS_VSET GENMASK(6, 0)
+
+/* LDOX_CTRL register field definition */
+#define TPS6594_BIT_LDO_EN BIT(0)
+#define TPS6594_BIT_LDO_SLOW_RAMP BIT(1)
+#define TPS6594_BIT_LDO_VMON_EN BIT(4)
+#define TPS6594_MASK_LDO_PLDN GENMASK(6, 5)
+#define TPS6594_BIT_LDO_RV_SEL BIT(7)
+#define TPS65224_BIT_LDO_DISCHARGE_EN BIT(5)
+
+/* LDORTC_CTRL register field definition */
+#define TPS6594_BIT_LDORTC_DIS BIT(0)
+
+/* LDOX_VOUT register field definition */
+#define TPS6594_MASK_LDO123_VSET GENMASK(6, 1)
+#define TPS6594_MASK_LDO4_VSET GENMASK(6, 0)
+#define TPS6594_BIT_LDO_BYPASS BIT(7)
+
+/* LDOX_PG_WINDOW register field definition */
+#define TPS6594_MASK_LDO_OV_THR GENMASK(2, 0)
+#define TPS6594_MASK_LDO_UV_THR GENMASK(5, 3)
+
+/* LDOX_PG_WINDOW register field definition */
+#define TPS65224_MASK_LDO_VMON_THR GENMASK(1, 0)
+
+/* VCCA_VMON_CTRL register field definition */
+#define TPS6594_BIT_VMON_EN BIT(0)
+#define TPS6594_BIT_VMON1_EN BIT(1)
+#define TPS6594_BIT_VMON1_RV_SEL BIT(2)
+#define TPS6594_BIT_VMON2_EN BIT(3)
+#define TPS6594_BIT_VMON2_RV_SEL BIT(4)
+#define TPS6594_BIT_VMON_DEGLITCH_SEL BIT(5)
+#define TPS65224_BIT_VMON_DEGLITCH_SEL GENMASK(7, 5)
+
+/* VCCA_PG_WINDOW register field definition */
+#define TPS6594_MASK_VCCA_OV_THR GENMASK(2, 0)
+#define TPS6594_MASK_VCCA_UV_THR GENMASK(5, 3)
+#define TPS65224_MASK_VCCA_VMON_THR GENMASK(1, 0)
+#define TPS6594_BIT_VCCA_PG_SET BIT(6)
+
+/* VMONX_PG_WINDOW register field definition */
+#define TPS6594_MASK_VMONX_OV_THR GENMASK(2, 0)
+#define TPS6594_MASK_VMONX_UV_THR GENMASK(5, 3)
+#define TPS6594_BIT_VMONX_RANGE BIT(6)
+
+/* VMONX_PG_WINDOW register field definition */
+#define TPS65224_MASK_VMONX_THR GENMASK(1, 0)
+
+/* GPIOX_CONF register field definition */
+#define TPS6594_BIT_GPIO_DIR BIT(0)
+#define TPS6594_BIT_GPIO_OD BIT(1)
+#define TPS6594_BIT_GPIO_PU_SEL BIT(2)
+#define TPS6594_BIT_GPIO_PU_PD_EN BIT(3)
+#define TPS6594_BIT_GPIO_DEGLITCH_EN BIT(4)
+#define TPS6594_MASK_GPIO_SEL GENMASK(7, 5)
+#define TPS65224_MASK_GPIO_SEL GENMASK(6, 5)
+#define TPS65224_MASK_GPIO_SEL_GPIO6 GENMASK(7, 5)
+
+/* NPWRON_CONF register field definition */
+#define TPS6594_BIT_NRSTOUT_OD BIT(0)
+#define TPS6594_BIT_ENABLE_PU_SEL BIT(2)
+#define TPS6594_BIT_ENABLE_PU_PD_EN BIT(3)
+#define TPS6594_BIT_ENABLE_DEGLITCH_EN BIT(4)
+#define TPS6594_BIT_ENABLE_POL BIT(5)
+#define TPS6594_MASK_NPWRON_SEL GENMASK(7, 6)
+
+/* POWER_ON_CONFIG register field definition */
+#define TPS65224_BIT_NINT_ENDRV_PU_SEL BIT(0)
+#define TPS65224_BIT_NINT_ENDRV_SEL BIT(1)
+#define TPS65224_BIT_EN_PB_DEGL BIT(5)
+#define TPS65224_MASK_EN_PB_VSENSE_CONFIG GENMASK(7, 6)
+
+/* GPIO_OUT_X register field definition */
+#define TPS6594_BIT_GPIOX_OUT(gpio_inst) BIT((gpio_inst) % 8)
+
+/* GPIO_IN_X register field definition */
+#define TPS6594_BIT_GPIOX_IN(gpio_inst) BIT((gpio_inst) % 8)
+#define TPS6594_BIT_NPWRON_IN BIT(3)
+
+/* GPIO_OUT_X register field definition */
+#define TPS65224_BIT_GPIOX_OUT(gpio_inst) BIT((gpio_inst))
+
+/* GPIO_IN_X register field definition */
+#define TPS65224_BIT_GPIOX_IN(gpio_inst) BIT((gpio_inst))
+
+/* RAIL_SEL_1 register field definition */
+#define TPS6594_MASK_BUCK1_GRP_SEL GENMASK(1, 0)
+#define TPS6594_MASK_BUCK2_GRP_SEL GENMASK(3, 2)
+#define TPS6594_MASK_BUCK3_GRP_SEL GENMASK(5, 4)
+#define TPS6594_MASK_BUCK4_GRP_SEL GENMASK(7, 6)
+
+/* RAIL_SEL_2 register field definition */
+#define TPS6594_MASK_BUCK5_GRP_SEL GENMASK(1, 0)
+#define TPS6594_MASK_LDO1_GRP_SEL GENMASK(3, 2)
+#define TPS6594_MASK_LDO2_GRP_SEL GENMASK(5, 4)
+#define TPS6594_MASK_LDO3_GRP_SEL GENMASK(7, 6)
+
+/* RAIL_SEL_3 register field definition */
+#define TPS6594_MASK_LDO4_GRP_SEL GENMASK(1, 0)
+#define TPS6594_MASK_VCCA_GRP_SEL GENMASK(3, 2)
+#define TPS6594_MASK_VMON1_GRP_SEL GENMASK(5, 4)
+#define TPS6594_MASK_VMON2_GRP_SEL GENMASK(7, 6)
+
+/* FSM_TRIG_SEL_1 register field definition */
+#define TPS6594_MASK_MCU_RAIL_TRIG GENMASK(1, 0)
+#define TPS6594_MASK_SOC_RAIL_TRIG GENMASK(3, 2)
+#define TPS6594_MASK_OTHER_RAIL_TRIG GENMASK(5, 4)
+#define TPS6594_MASK_SEVERE_ERR_TRIG GENMASK(7, 6)
+
+/* FSM_TRIG_SEL_2 register field definition */
+#define TPS6594_MASK_MODERATE_ERR_TRIG GENMASK(1, 0)
+
+/* FSM_TRIG_MASK_X register field definition */
+#define TPS6594_BIT_GPIOX_FSM_MASK(gpio_inst) BIT(((gpio_inst) << 1) % 8)
+#define TPS6594_BIT_GPIOX_FSM_MASK_POL(gpio_inst) BIT(((gpio_inst) << 1) % 8 + 1)
+
+#define TPS65224_BIT_GPIOX_FSM_MASK(gpio_inst) BIT(((gpio_inst) << 1) % 6)
+#define TPS65224_BIT_GPIOX_FSM_MASK_POL(gpio_inst) BIT(((gpio_inst) << 1) % 6 + 1)
+
+/* MASK_BUCKX register field definition */
+#define TPS6594_BIT_BUCKX_OV_MASK(buck_inst) BIT(((buck_inst) << 2) % 8)
+#define TPS6594_BIT_BUCKX_UV_MASK(buck_inst) BIT(((buck_inst) << 2) % 8 + 1)
+#define TPS6594_BIT_BUCKX_ILIM_MASK(buck_inst) BIT(((buck_inst) << 2) % 8 + 3)
+
+/* MASK_LDOX register field definition */
+#define TPS6594_BIT_LDOX_OV_MASK(ldo_inst) BIT(((ldo_inst) << 2) % 8)
+#define TPS6594_BIT_LDOX_UV_MASK(ldo_inst) BIT(((ldo_inst) << 2) % 8 + 1)
+#define TPS6594_BIT_LDOX_ILIM_MASK(ldo_inst) BIT(((ldo_inst) << 2) % 8 + 3)
+
+/* MASK_VMON register field definition */
+#define TPS6594_BIT_VCCA_OV_MASK BIT(0)
+#define TPS6594_BIT_VCCA_UV_MASK BIT(1)
+#define TPS6594_BIT_VMON1_OV_MASK BIT(2)
+#define TPS6594_BIT_VMON1_UV_MASK BIT(3)
+#define TPS6594_BIT_VMON2_OV_MASK BIT(5)
+#define TPS6594_BIT_VMON2_UV_MASK BIT(6)
+
+/* MASK_BUCK Register field definition */
+#define TPS65224_BIT_BUCK1_UVOV_MASK BIT(0)
+#define TPS65224_BIT_BUCK2_UVOV_MASK BIT(1)
+#define TPS65224_BIT_BUCK3_UVOV_MASK BIT(2)
+#define TPS65224_BIT_BUCK4_UVOV_MASK BIT(4)
+
+/* MASK_LDO_VMON register field definition */
+#define TPS65224_BIT_LDO1_UVOV_MASK BIT(0)
+#define TPS65224_BIT_LDO2_UVOV_MASK BIT(1)
+#define TPS65224_BIT_LDO3_UVOV_MASK BIT(2)
+#define TPS65224_BIT_VCCA_UVOV_MASK BIT(4)
+#define TPS65224_BIT_VMON1_UVOV_MASK BIT(5)
+#define TPS65224_BIT_VMON2_UVOV_MASK BIT(6)
+
+/* MASK_GPIOX register field definition */
+#define TPS6594_BIT_GPIOX_FALL_MASK(gpio_inst) BIT((gpio_inst) < 8 ? \
+ (gpio_inst) : (gpio_inst) % 8)
+#define TPS6594_BIT_GPIOX_RISE_MASK(gpio_inst) BIT((gpio_inst) < 8 ? \
+ (gpio_inst) : (gpio_inst) % 8 + 3)
+/* MASK_GPIOX register field definition */
+#define TPS65224_BIT_GPIOX_FALL_MASK(gpio_inst) BIT((gpio_inst))
+#define TPS65224_BIT_GPIOX_RISE_MASK(gpio_inst) BIT((gpio_inst))
+
+/* MASK_STARTUP register field definition */
+#define TPS6594_BIT_NPWRON_START_MASK BIT(0)
+#define TPS6594_BIT_ENABLE_MASK BIT(1)
+#define TPS6594_BIT_FSD_MASK BIT(4)
+#define TPS6594_BIT_SOFT_REBOOT_MASK BIT(5)
+#define TPS65224_BIT_VSENSE_MASK BIT(0)
+#define TPS65224_BIT_PB_SHORT_MASK BIT(2)
+
+/* MASK_MISC register field definition */
+#define TPS6594_BIT_BIST_PASS_MASK BIT(0)
+#define TPS6594_BIT_EXT_CLK_MASK BIT(1)
+#define TPS65224_BIT_REG_UNLOCK_MASK BIT(2)
+#define TPS6594_BIT_TWARN_MASK BIT(3)
+#define TPS65224_BIT_PB_LONG_MASK BIT(4)
+#define TPS65224_BIT_PB_FALL_MASK BIT(5)
+#define TPS65224_BIT_PB_RISE_MASK BIT(6)
+#define TPS65224_BIT_ADC_CONV_READY_MASK BIT(7)
+
+/* MASK_MODERATE_ERR register field definition */
+#define TPS6594_BIT_BIST_FAIL_MASK BIT(1)
+#define TPS6594_BIT_REG_CRC_ERR_MASK BIT(2)
+#define TPS6594_BIT_SPMI_ERR_MASK BIT(4)
+#define TPS6594_BIT_NPWRON_LONG_MASK BIT(5)
+#define TPS6594_BIT_NINT_READBACK_MASK BIT(6)
+#define TPS6594_BIT_NRSTOUT_READBACK_MASK BIT(7)
+
+/* MASK_FSM_ERR register field definition */
+#define TPS6594_BIT_IMM_SHUTDOWN_MASK BIT(0)
+#define TPS6594_BIT_ORD_SHUTDOWN_MASK BIT(1)
+#define TPS6594_BIT_MCU_PWR_ERR_MASK BIT(2)
+#define TPS6594_BIT_SOC_PWR_ERR_MASK BIT(3)
+#define TPS65224_BIT_COMM_ERR_MASK BIT(4)
+#define TPS65224_BIT_I2C2_ERR_MASK BIT(5)
+
+/* MASK_COMM_ERR register field definition */
+#define TPS6594_BIT_COMM_FRM_ERR_MASK BIT(0)
+#define TPS6594_BIT_COMM_CRC_ERR_MASK BIT(1)
+#define TPS6594_BIT_COMM_ADR_ERR_MASK BIT(3)
+#define TPS6594_BIT_I2C2_CRC_ERR_MASK BIT(5)
+#define TPS6594_BIT_I2C2_ADR_ERR_MASK BIT(7)
+
+/* MASK_READBACK_ERR register field definition */
+#define TPS6594_BIT_EN_DRV_READBACK_MASK BIT(0)
+#define TPS6594_BIT_NRSTOUT_SOC_READBACK_MASK BIT(3)
+
+/* MASK_ESM register field definition */
+#define TPS6594_BIT_ESM_SOC_PIN_MASK BIT(0)
+#define TPS6594_BIT_ESM_SOC_FAIL_MASK BIT(1)
+#define TPS6594_BIT_ESM_SOC_RST_MASK BIT(2)
+#define TPS6594_BIT_ESM_MCU_PIN_MASK BIT(3)
+#define TPS6594_BIT_ESM_MCU_FAIL_MASK BIT(4)
+#define TPS6594_BIT_ESM_MCU_RST_MASK BIT(5)
+
+/* INT_TOP register field definition */
+#define TPS6594_BIT_BUCK_INT BIT(0)
+#define TPS6594_BIT_LDO_VMON_INT BIT(1)
+#define TPS6594_BIT_GPIO_INT BIT(2)
+#define TPS6594_BIT_STARTUP_INT BIT(3)
+#define TPS6594_BIT_MISC_INT BIT(4)
+#define TPS6594_BIT_MODERATE_ERR_INT BIT(5)
+#define TPS6594_BIT_SEVERE_ERR_INT BIT(6)
+#define TPS6594_BIT_FSM_ERR_INT BIT(7)
+
+/* INT_BUCK register field definition */
+#define TPS6594_BIT_BUCK1_2_INT BIT(0)
+#define TPS6594_BIT_BUCK3_4_INT BIT(1)
+#define TPS6594_BIT_BUCK5_INT BIT(2)
+
+/* INT_BUCK register field definition */
+#define TPS65224_BIT_BUCK1_UVOV_INT BIT(0)
+#define TPS65224_BIT_BUCK2_UVOV_INT BIT(1)
+#define TPS65224_BIT_BUCK3_UVOV_INT BIT(2)
+#define TPS65224_BIT_BUCK4_UVOV_INT BIT(3)
+
+/* INT_BUCKX register field definition */
+#define TPS6594_BIT_BUCKX_OV_INT(buck_inst) BIT(((buck_inst) << 2) % 8)
+#define TPS6594_BIT_BUCKX_UV_INT(buck_inst) BIT(((buck_inst) << 2) % 8 + 1)
+#define TPS6594_BIT_BUCKX_SC_INT(buck_inst) BIT(((buck_inst) << 2) % 8 + 2)
+#define TPS6594_BIT_BUCKX_ILIM_INT(buck_inst) BIT(((buck_inst) << 2) % 8 + 3)
+
+/* INT_LDO_VMON register field definition */
+#define TPS6594_BIT_LDO1_2_INT BIT(0)
+#define TPS6594_BIT_LDO3_4_INT BIT(1)
+#define TPS6594_BIT_VCCA_INT BIT(4)
+
+/* INT_LDO_VMON register field definition */
+#define TPS65224_BIT_LDO1_UVOV_INT BIT(0)
+#define TPS65224_BIT_LDO2_UVOV_INT BIT(1)
+#define TPS65224_BIT_LDO3_UVOV_INT BIT(2)
+#define TPS65224_BIT_VCCA_UVOV_INT BIT(4)
+#define TPS65224_BIT_VMON1_UVOV_INT BIT(5)
+#define TPS65224_BIT_VMON2_UVOV_INT BIT(6)
+
+/* INT_LDOX register field definition */
+#define TPS6594_BIT_LDOX_OV_INT(ldo_inst) BIT(((ldo_inst) << 2) % 8)
+#define TPS6594_BIT_LDOX_UV_INT(ldo_inst) BIT(((ldo_inst) << 2) % 8 + 1)
+#define TPS6594_BIT_LDOX_SC_INT(ldo_inst) BIT(((ldo_inst) << 2) % 8 + 2)
+#define TPS6594_BIT_LDOX_ILIM_INT(ldo_inst) BIT(((ldo_inst) << 2) % 8 + 3)
+
+/* INT_VMON register field definition */
+#define TPS6594_BIT_VCCA_OV_INT BIT(0)
+#define TPS6594_BIT_VCCA_UV_INT BIT(1)
+#define TPS6594_BIT_VMON1_OV_INT BIT(2)
+#define TPS6594_BIT_VMON1_UV_INT BIT(3)
+#define TPS6594_BIT_VMON1_RV_INT BIT(4)
+#define TPS6594_BIT_VMON2_OV_INT BIT(5)
+#define TPS6594_BIT_VMON2_UV_INT BIT(6)
+#define TPS6594_BIT_VMON2_RV_INT BIT(7)
+
+/* INT_GPIO register field definition */
+#define TPS6594_BIT_GPIO9_INT BIT(0)
+#define TPS6594_BIT_GPIO10_INT BIT(1)
+#define TPS6594_BIT_GPIO11_INT BIT(2)
+#define TPS6594_BIT_GPIO1_8_INT BIT(3)
+
+/* INT_GPIOX register field definition */
+#define TPS6594_BIT_GPIOX_INT(gpio_inst) BIT(gpio_inst)
+
+/* INT_GPIO register field definition */
+#define TPS65224_BIT_GPIO1_INT BIT(0)
+#define TPS65224_BIT_GPIO2_INT BIT(1)
+#define TPS65224_BIT_GPIO3_INT BIT(2)
+#define TPS65224_BIT_GPIO4_INT BIT(3)
+#define TPS65224_BIT_GPIO5_INT BIT(4)
+#define TPS65224_BIT_GPIO6_INT BIT(5)
+
+/* INT_STARTUP register field definition */
+#define TPS6594_BIT_NPWRON_START_INT BIT(0)
+#define TPS65224_BIT_VSENSE_INT BIT(0)
+#define TPS6594_BIT_ENABLE_INT BIT(1)
+#define TPS6594_BIT_RTC_INT BIT(2)
+#define TPS65224_BIT_PB_SHORT_INT BIT(2)
+#define TPS6594_BIT_FSD_INT BIT(4)
+#define TPS6594_BIT_SOFT_REBOOT_INT BIT(5)
+
+/* INT_MISC register field definition */
+#define TPS6594_BIT_BIST_PASS_INT BIT(0)
+#define TPS6594_BIT_EXT_CLK_INT BIT(1)
+#define TPS65224_BIT_REG_UNLOCK_INT BIT(2)
+#define TPS6594_BIT_TWARN_INT BIT(3)
+#define TPS65224_BIT_PB_LONG_INT BIT(4)
+#define TPS65224_BIT_PB_FALL_INT BIT(5)
+#define TPS65224_BIT_PB_RISE_INT BIT(6)
+#define TPS65224_BIT_ADC_CONV_READY_INT BIT(7)
+
+/* INT_MODERATE_ERR register field definition */
+#define TPS6594_BIT_TSD_ORD_INT BIT(0)
+#define TPS6594_BIT_BIST_FAIL_INT BIT(1)
+#define TPS6594_BIT_REG_CRC_ERR_INT BIT(2)
+#define TPS6594_BIT_RECOV_CNT_INT BIT(3)
+#define TPS6594_BIT_SPMI_ERR_INT BIT(4)
+#define TPS6594_BIT_NPWRON_LONG_INT BIT(5)
+#define TPS6594_BIT_NINT_READBACK_INT BIT(6)
+#define TPS6594_BIT_NRSTOUT_READBACK_INT BIT(7)
+
+/* INT_SEVERE_ERR register field definition */
+#define TPS6594_BIT_TSD_IMM_INT BIT(0)
+#define TPS6594_BIT_VCCA_OVP_INT BIT(1)
+#define TPS6594_BIT_PFSM_ERR_INT BIT(2)
+#define TPS65224_BIT_BG_XMON_INT BIT(3)
+
+/* INT_FSM_ERR register field definition */
+#define TPS6594_BIT_IMM_SHUTDOWN_INT BIT(0)
+#define TPS6594_BIT_ORD_SHUTDOWN_INT BIT(1)
+#define TPS6594_BIT_MCU_PWR_ERR_INT BIT(2)
+#define TPS6594_BIT_SOC_PWR_ERR_INT BIT(3)
+#define TPS6594_BIT_COMM_ERR_INT BIT(4)
+#define TPS6594_BIT_READBACK_ERR_INT BIT(5)
+#define TPS65224_BIT_I2C2_ERR_INT BIT(5)
+#define TPS6594_BIT_ESM_INT BIT(6)
+#define TPS6594_BIT_WD_INT BIT(7)
+
+/* INT_COMM_ERR register field definition */
+#define TPS6594_BIT_COMM_FRM_ERR_INT BIT(0)
+#define TPS6594_BIT_COMM_CRC_ERR_INT BIT(1)
+#define TPS6594_BIT_COMM_ADR_ERR_INT BIT(3)
+#define TPS6594_BIT_I2C2_CRC_ERR_INT BIT(5)
+#define TPS6594_BIT_I2C2_ADR_ERR_INT BIT(7)
+
+/* INT_READBACK_ERR register field definition */
+#define TPS6594_BIT_EN_DRV_READBACK_INT BIT(0)
+#define TPS6594_BIT_NRSTOUT_SOC_READBACK_INT BIT(3)
+
+/* INT_ESM register field definition */
+#define TPS6594_BIT_ESM_SOC_PIN_INT BIT(0)
+#define TPS6594_BIT_ESM_SOC_FAIL_INT BIT(1)
+#define TPS6594_BIT_ESM_SOC_RST_INT BIT(2)
+#define TPS6594_BIT_ESM_MCU_PIN_INT BIT(3)
+#define TPS6594_BIT_ESM_MCU_FAIL_INT BIT(4)
+#define TPS6594_BIT_ESM_MCU_RST_INT BIT(5)
+
+/* STAT_BUCKX register field definition */
+#define TPS6594_BIT_BUCKX_OV_STAT(buck_inst) BIT(((buck_inst) << 2) % 8)
+#define TPS6594_BIT_BUCKX_UV_STAT(buck_inst) BIT(((buck_inst) << 2) % 8 + 1)
+#define TPS6594_BIT_BUCKX_ILIM_STAT(buck_inst) BIT(((buck_inst) << 2) % 8 + 3)
+
+/* STAT_LDOX register field definition */
+#define TPS6594_BIT_LDOX_OV_STAT(ldo_inst) BIT(((ldo_inst) << 2) % 8)
+#define TPS6594_BIT_LDOX_UV_STAT(ldo_inst) BIT(((ldo_inst) << 2) % 8 + 1)
+#define TPS6594_BIT_LDOX_ILIM_STAT(ldo_inst) BIT(((ldo_inst) << 2) % 8 + 3)
+
+/* STAT_VMON register field definition */
+#define TPS6594_BIT_VCCA_OV_STAT BIT(0)
+#define TPS6594_BIT_VCCA_UV_STAT BIT(1)
+#define TPS6594_BIT_VMON1_OV_STAT BIT(2)
+#define TPS6594_BIT_VMON1_UV_STAT BIT(3)
+#define TPS6594_BIT_VMON2_OV_STAT BIT(5)
+#define TPS6594_BIT_VMON2_UV_STAT BIT(6)
+
+/* STAT_LDO_VMON register field definition */
+#define TPS65224_BIT_LDO1_UVOV_STAT BIT(0)
+#define TPS65224_BIT_LDO2_UVOV_STAT BIT(1)
+#define TPS65224_BIT_LDO3_UVOV_STAT BIT(2)
+#define TPS65224_BIT_VCCA_UVOV_STAT BIT(4)
+#define TPS65224_BIT_VMON1_UVOV_STAT BIT(5)
+#define TPS65224_BIT_VMON2_UVOV_STAT BIT(6)
+
+/* STAT_STARTUP register field definition */
+#define TPS65224_BIT_VSENSE_STAT BIT(0)
+#define TPS6594_BIT_ENABLE_STAT BIT(1)
+#define TPS65224_BIT_PB_LEVEL_STAT BIT(2)
+
+/* STAT_MISC register field definition */
+#define TPS6594_BIT_EXT_CLK_STAT BIT(1)
+#define TPS6594_BIT_TWARN_STAT BIT(3)
+
+/* STAT_MODERATE_ERR register field definition */
+#define TPS6594_BIT_TSD_ORD_STAT BIT(0)
+
+/* STAT_SEVERE_ERR register field definition */
+#define TPS6594_BIT_TSD_IMM_STAT BIT(0)
+#define TPS6594_BIT_VCCA_OVP_STAT BIT(1)
+#define TPS65224_BIT_BG_XMON_STAT BIT(3)
+
+/* STAT_READBACK_ERR register field definition */
+#define TPS6594_BIT_EN_DRV_READBACK_STAT BIT(0)
+#define TPS6594_BIT_NINT_READBACK_STAT BIT(1)
+#define TPS6594_BIT_NRSTOUT_READBACK_STAT BIT(2)
+#define TPS6594_BIT_NRSTOUT_SOC_READBACK_STAT BIT(3)
+
+/* PGOOD_SEL_1 register field definition */
+#define TPS6594_MASK_PGOOD_SEL_BUCK1 GENMASK(1, 0)
+#define TPS6594_MASK_PGOOD_SEL_BUCK2 GENMASK(3, 2)
+#define TPS6594_MASK_PGOOD_SEL_BUCK3 GENMASK(5, 4)
+#define TPS6594_MASK_PGOOD_SEL_BUCK4 GENMASK(7, 6)
+
+/* PGOOD_SEL_2 register field definition */
+#define TPS6594_MASK_PGOOD_SEL_BUCK5 GENMASK(1, 0)
+
+/* PGOOD_SEL_3 register field definition */
+#define TPS6594_MASK_PGOOD_SEL_LDO1 GENMASK(1, 0)
+#define TPS6594_MASK_PGOOD_SEL_LDO2 GENMASK(3, 2)
+#define TPS6594_MASK_PGOOD_SEL_LDO3 GENMASK(5, 4)
+#define TPS6594_MASK_PGOOD_SEL_LDO4 GENMASK(7, 6)
+
+/* PGOOD_SEL_4 register field definition */
+#define TPS6594_BIT_PGOOD_SEL_VCCA BIT(0)
+#define TPS6594_BIT_PGOOD_SEL_VMON1 BIT(1)
+#define TPS6594_BIT_PGOOD_SEL_VMON2 BIT(2)
+#define TPS6594_BIT_PGOOD_SEL_TDIE_WARN BIT(3)
+#define TPS6594_BIT_PGOOD_SEL_NRSTOUT BIT(4)
+#define TPS6594_BIT_PGOOD_SEL_NRSTOUT_SOC BIT(5)
+#define TPS6594_BIT_PGOOD_POL BIT(6)
+#define TPS6594_BIT_PGOOD_WINDOW BIT(7)
+
+/* PLL_CTRL register field definition */
+#define TPS6594_MASK_EXT_CLK_FREQ GENMASK(1, 0)
+
+/* CONFIG_1 register field definition */
+#define TPS6594_BIT_TWARN_LEVEL BIT(0)
+#define TPS6594_BIT_TSD_ORD_LEVEL BIT(1)
+#define TPS6594_BIT_I2C1_HS BIT(3)
+#define TPS6594_BIT_I2C2_HS BIT(4)
+#define TPS6594_BIT_EN_ILIM_FSM_CTRL BIT(5)
+#define TPS6594_BIT_NSLEEP1_MASK BIT(6)
+#define TPS6594_BIT_NSLEEP2_MASK BIT(7)
+
+/* CONFIG_2 register field definition */
+#define TPS6594_BIT_BB_CHARGER_EN BIT(0)
+#define TPS6594_BIT_BB_ICHR BIT(1)
+#define TPS6594_MASK_BB_VEOC GENMASK(3, 2)
+#define TPS65224_BIT_I2C1_SPI_CRC_EN BIT(4)
+#define TPS65224_BIT_I2C2_CRC_EN BIT(5)
+#define TPS6594_BB_EOC_RDY BIT(7)
+
+/* ENABLE_DRV_REG register field definition */
+#define TPS6594_BIT_ENABLE_DRV BIT(0)
+
+/* MISC_CTRL register field definition */
+#define TPS6594_BIT_NRSTOUT BIT(0)
+#define TPS6594_BIT_NRSTOUT_SOC BIT(1)
+#define TPS6594_BIT_LPM_EN BIT(2)
+#define TPS6594_BIT_CLKMON_EN BIT(3)
+#define TPS6594_BIT_AMUXOUT_EN BIT(4)
+#define TPS6594_BIT_SEL_EXT_CLK BIT(5)
+#define TPS6594_MASK_SYNCCLKOUT_FREQ_SEL GENMASK(7, 6)
+
+/* ENABLE_DRV_STAT register field definition */
+#define TPS6594_BIT_EN_DRV_IN BIT(0)
+#define TPS6594_BIT_NRSTOUT_IN BIT(1)
+#define TPS6594_BIT_NRSTOUT_SOC_IN BIT(2)
+#define TPS6594_BIT_FORCE_EN_DRV_LOW BIT(3)
+#define TPS6594_BIT_SPMI_LPM_EN BIT(4)
+#define TPS65224_BIT_TSD_DISABLE BIT(5)
+
+/* RECOV_CNT_REG_1 register field definition */
+#define TPS6594_MASK_RECOV_CNT GENMASK(3, 0)
+
+/* RECOV_CNT_REG_2 register field definition */
+#define TPS6594_MASK_RECOV_CNT_THR GENMASK(3, 0)
+#define TPS6594_BIT_RECOV_CNT_CLR BIT(4)
+
+/* FSM_I2C_TRIGGERS register field definition */
+#define TPS6594_BIT_TRIGGER_I2C(bit) BIT(bit)
+
+/* FSM_NSLEEP_TRIGGERS register field definition */
+#define TPS6594_BIT_NSLEEP1B BIT(0)
+#define TPS6594_BIT_NSLEEP2B BIT(1)
+
+/* BUCK_RESET_REG register field definition */
+#define TPS6594_BIT_BUCKX_RESET(buck_inst) BIT(buck_inst)
+
+/* SPREAD_SPECTRUM_1 register field definition */
+#define TPS6594_MASK_SS_DEPTH GENMASK(1, 0)
+#define TPS6594_BIT_SS_EN BIT(2)
+
+/* FREQ_SEL register field definition */
+#define TPS6594_BIT_BUCKX_FREQ_SEL(buck_inst) BIT(buck_inst)
+
+/* FSM_STEP_SIZE register field definition */
+#define TPS6594_MASK_PFSM_DELAY_STEP GENMASK(4, 0)
+
+/* LDO_RV_TIMEOUT_REG_1 register field definition */
+#define TPS6594_MASK_LDO1_RV_TIMEOUT GENMASK(3, 0)
+#define TPS6594_MASK_LDO2_RV_TIMEOUT GENMASK(7, 4)
+
+/* LDO_RV_TIMEOUT_REG_2 register field definition */
+#define TPS6594_MASK_LDO3_RV_TIMEOUT GENMASK(3, 0)
+#define TPS6594_MASK_LDO4_RV_TIMEOUT GENMASK(7, 4)
+
+/* USER_SPARE_REGS register field definition */
+#define TPS6594_BIT_USER_SPARE(bit) BIT(bit)
+
+/* ESM_MCU_START_REG register field definition */
+#define TPS6594_BIT_ESM_MCU_START BIT(0)
+
+/* ESM_MCU_MODE_CFG register field definition */
+#define TPS6594_MASK_ESM_MCU_ERR_CNT_TH GENMASK(3, 0)
+#define TPS6594_BIT_ESM_MCU_ENDRV BIT(5)
+#define TPS6594_BIT_ESM_MCU_EN BIT(6)
+#define TPS6594_BIT_ESM_MCU_MODE BIT(7)
+
+/* ESM_MCU_ERR_CNT_REG register field definition */
+#define TPS6594_MASK_ESM_MCU_ERR_CNT GENMASK(4, 0)
+
+/* ESM_SOC_START_REG register field definition */
+#define TPS6594_BIT_ESM_SOC_START BIT(0)
+
+/* ESM_MCU_START_REG register field definition */
+#define TPS65224_BIT_ESM_MCU_START BIT(0)
+
+/* ESM_SOC_MODE_CFG register field definition */
+#define TPS6594_MASK_ESM_SOC_ERR_CNT_TH GENMASK(3, 0)
+#define TPS6594_BIT_ESM_SOC_ENDRV BIT(5)
+#define TPS6594_BIT_ESM_SOC_EN BIT(6)
+#define TPS6594_BIT_ESM_SOC_MODE BIT(7)
+
+/* ESM_MCU_MODE_CFG register field definition */
+#define TPS65224_MASK_ESM_MCU_ERR_CNT_TH GENMASK(3, 0)
+#define TPS65224_BIT_ESM_MCU_ENDRV BIT(5)
+#define TPS65224_BIT_ESM_MCU_EN BIT(6)
+#define TPS65224_BIT_ESM_MCU_MODE BIT(7)
+
+/* ESM_SOC_ERR_CNT_REG register field definition */
+#define TPS6594_MASK_ESM_SOC_ERR_CNT GENMASK(4, 0)
+
+/* ESM_MCU_ERR_CNT_REG register field definition */
+#define TPS6594_MASK_ESM_MCU_ERR_CNT GENMASK(4, 0)
+
+/* REGISTER_LOCK register field definition */
+#define TPS6594_BIT_REGISTER_LOCK_STATUS BIT(0)
+
+/* VMON_CONF register field definition */
+#define TPS6594_MASK_VMON1_SLEW_RATE GENMASK(2, 0)
+#define TPS6594_MASK_VMON2_SLEW_RATE GENMASK(5, 3)
+
+/* SRAM_ACCESS_1 Register field definition */
+#define TPS65224_MASk_SRAM_UNLOCK_SEQ GENMASK(7, 0)
+
+/* SRAM_ACCESS_2 Register field definition */
+#define TPS65224_BIT_SRAM_WRITE_MODE BIT(0)
+#define TPS65224_BIT_OTP_PROG_USER BIT(1)
+#define TPS65224_BIT_OTP_PROG_PFSM BIT(2)
+#define TPS65224_BIT_OTP_PROG_STATUS BIT(3)
+#define TPS65224_BIT_SRAM_UNLOCKED BIT(6)
+#define TPS65224_USER_PROG_ALLOWED BIT(7)
+
+/* SRAM_ADDR_CTRL Register field definition */
+#define TPS65224_MASk_SRAM_SEL GENMASK(1, 0)
+
+/* RECOV_CNT_PFSM_INCR Register field definition */
+#define TPS65224_BIT_INCREMENT_RECOV_CNT BIT(0)
+
+/* MANUFACTURING_VER Register field definition */
+#define TPS65224_MASK_SILICON_REV GENMASK(7, 0)
+
+/* CUSTOMER_NVM_ID_REG Register field definition */
+#define TPS65224_MASK_CUSTOMER_NVM_ID GENMASK(7, 0)
+
+/* SOFT_REBOOT_REG register field definition */
+#define TPS6594_BIT_SOFT_REBOOT BIT(0)
+
+/* RTC_SECONDS & ALARM_SECONDS register field definition */
+#define TPS6594_MASK_SECOND_0 GENMASK(3, 0)
+#define TPS6594_MASK_SECOND_1 GENMASK(6, 4)
+
+/* RTC_MINUTES & ALARM_MINUTES register field definition */
+#define TPS6594_MASK_MINUTE_0 GENMASK(3, 0)
+#define TPS6594_MASK_MINUTE_1 GENMASK(6, 4)
+
+/* RTC_HOURS & ALARM_HOURS register field definition */
+#define TPS6594_MASK_HOUR_0 GENMASK(3, 0)
+#define TPS6594_MASK_HOUR_1 GENMASK(5, 4)
+#define TPS6594_BIT_PM_NAM BIT(7)
+
+/* RTC_DAYS & ALARM_DAYS register field definition */
+#define TPS6594_MASK_DAY_0 GENMASK(3, 0)
+#define TPS6594_MASK_DAY_1 GENMASK(5, 4)
+
+/* RTC_MONTHS & ALARM_MONTHS register field definition */
+#define TPS6594_MASK_MONTH_0 GENMASK(3, 0)
+#define TPS6594_BIT_MONTH_1 BIT(4)
+
+/* RTC_YEARS & ALARM_YEARS register field definition */
+#define TPS6594_MASK_YEAR_0 GENMASK(3, 0)
+#define TPS6594_MASK_YEAR_1 GENMASK(7, 4)
+
+/* RTC_WEEKS register field definition */
+#define TPS6594_MASK_WEEK GENMASK(2, 0)
+
+/* RTC_CTRL_1 register field definition */
+#define TPS6594_BIT_STOP_RTC BIT(0)
+#define TPS6594_BIT_ROUND_30S BIT(1)
+#define TPS6594_BIT_AUTO_COMP BIT(2)
+#define TPS6594_BIT_MODE_12_24 BIT(3)
+#define TPS6594_BIT_SET_32_COUNTER BIT(5)
+#define TPS6594_BIT_GET_TIME BIT(6)
+#define TPS6594_BIT_RTC_V_OPT BIT(7)
+
+/* RTC_CTRL_2 register field definition */
+#define TPS6594_BIT_XTAL_EN BIT(0)
+#define TPS6594_MASK_XTAL_SEL GENMASK(2, 1)
+#define TPS6594_BIT_LP_STANDBY_SEL BIT(3)
+#define TPS6594_BIT_FAST_BIST BIT(4)
+#define TPS6594_MASK_STARTUP_DEST GENMASK(6, 5)
+#define TPS6594_BIT_FIRST_STARTUP_DONE BIT(7)
+
+/* RTC_STATUS register field definition */
+#define TPS6594_BIT_RUN BIT(1)
+#define TPS6594_BIT_TIMER BIT(5)
+#define TPS6594_BIT_ALARM BIT(6)
+#define TPS6594_BIT_POWER_UP BIT(7)
+
+/* RTC_INTERRUPTS register field definition */
+#define TPS6594_MASK_EVERY GENMASK(1, 0)
+#define TPS6594_BIT_IT_TIMER BIT(2)
+#define TPS6594_BIT_IT_ALARM BIT(3)
+
+/* RTC_RESET_STATUS register field definition */
+#define TPS6594_BIT_RESET_STATUS_RTC BIT(0)
+
+/* SERIAL_IF_CONFIG register field definition */
+#define TPS6594_BIT_I2C_SPI_SEL BIT(0)
+#define TPS6594_BIT_I2C1_SPI_CRC_EN BIT(1)
+#define TPS6594_BIT_I2C2_CRC_EN BIT(2)
+#define TPS6594_MASK_T_CRC GENMASK(7, 3)
+
+/* ADC_CTRL Register field definition */
+#define TPS65224_BIT_ADC_START BIT(0)
+#define TPS65224_BIT_ADC_CONT_CONV BIT(1)
+#define TPS65224_BIT_ADC_THERMAL_SEL BIT(2)
+#define TPS65224_BIT_ADC_RDIV_EN BIT(3)
+#define TPS65224_BIT_ADC_STATUS BIT(7)
+
+/* ADC_RESULT_REG_1 Register field definition */
+#define TPS65224_MASK_ADC_RESULT_11_4 GENMASK(7, 0)
+
+/* ADC_RESULT_REG_2 Register field definition */
+#define TPS65224_MASK_ADC_RESULT_3_0 GENMASK(7, 4)
+
+/* STARTUP_CTRL Register field definition */
+#define TPS65224_MASK_STARTUP_DEST GENMASK(6, 5)
+#define TPS65224_BIT_FIRST_STARTUP_DONE BIT(7)
+
+/* SCRATCH_PAD_REG_1 Register field definition */
+#define TPS6594_MASK_SCRATCH_PAD_1 GENMASK(7, 0)
+
+/* SCRATCH_PAD_REG_2 Register field definition */
+#define TPS6594_MASK_SCRATCH_PAD_2 GENMASK(7, 0)
+
+/* SCRATCH_PAD_REG_3 Register field definition */
+#define TPS6594_MASK_SCRATCH_PAD_3 GENMASK(7, 0)
+
+/* SCRATCH_PAD_REG_4 Register field definition */
+#define TPS6594_MASK_SCRATCH_PAD_4 GENMASK(7, 0)
+
+/* PFSM_DELAY_REG_1 Register field definition */
+#define TPS6594_MASK_PFSM_DELAY1 GENMASK(7, 0)
+
+/* PFSM_DELAY_REG_2 Register field definition */
+#define TPS6594_MASK_PFSM_DELAY2 GENMASK(7, 0)
+
+/* PFSM_DELAY_REG_3 Register field definition */
+#define TPS6594_MASK_PFSM_DELAY3 GENMASK(7, 0)
+
+/* PFSM_DELAY_REG_4 Register field definition */
+#define TPS6594_MASK_PFSM_DELAY4 GENMASK(7, 0)
+
+/* CRC_CALC_CONTROL Register field definition */
+#define TPS65224_BIT_RUN_CRC_BIST BIT(0)
+#define TPS65224_BIT_RUN_CRC_UPDATE BIT(1)
+
+/* ADC_GAIN_COMP_REG Register field definition */
+#define TPS65224_MASK_ADC_GAIN_COMP GENMASK(7, 0)
+
+/* REGMAP_USER_CRC_LOW Register field definition */
+#define TPS65224_MASK_REGMAP_USER_CRC16_LOW GENMASK(7, 0)
+
+/* REGMAP_USER_CRC_HIGH Register field definition */
+#define TPS65224_MASK_REGMAP_USER_CRC16_HIGH GENMASK(7, 0)
+
+/* WD_ANSWER_REG Register field definition */
+#define TPS6594_MASK_WD_ANSWER GENMASK(7, 0)
+
+/* WD_QUESTION_ANSW_CNT register field definition */
+#define TPS6594_MASK_WD_QUESTION GENMASK(3, 0)
+#define TPS6594_MASK_WD_ANSW_CNT GENMASK(5, 4)
+#define TPS65224_BIT_INT_TOP_STATUS BIT(7)
+
+/* WD WIN1_CFG register field definition */
+#define TPS6594_MASK_WD_WIN1_CFG GENMASK(6, 0)
+
+/* WD WIN2_CFG register field definition */
+#define TPS6594_MASK_WD_WIN2_CFG GENMASK(6, 0)
+
+/* WD LongWin register field definition */
+#define TPS6594_MASK_WD_LONGWIN_CFG GENMASK(7, 0)
+
+/* WD_MODE_REG register field definition */
+#define TPS6594_BIT_WD_RETURN_LONGWIN BIT(0)
+#define TPS6594_BIT_WD_MODE_SELECT BIT(1)
+#define TPS6594_BIT_WD_PWRHOLD BIT(2)
+#define TPS65224_BIT_WD_ENDRV_SEL BIT(6)
+#define TPS65224_BIT_WD_CNT_SEL BIT(7)
+
+/* WD_QA_CFG register field definition */
+#define TPS6594_MASK_WD_QUESTION_SEED GENMASK(3, 0)
+#define TPS6594_MASK_WD_QA_LFSR GENMASK(5, 4)
+#define TPS6594_MASK_WD_QA_FDBK GENMASK(7, 6)
+
+/* WD_ERR_STATUS register field definition */
+#define TPS6594_BIT_WD_LONGWIN_TIMEOUT_INT BIT(0)
+#define TPS6594_BIT_WD_TIMEOUT BIT(1)
+#define TPS6594_BIT_WD_TRIG_EARLY BIT(2)
+#define TPS6594_BIT_WD_ANSW_EARLY BIT(3)
+#define TPS6594_BIT_WD_SEQ_ERR BIT(4)
+#define TPS6594_BIT_WD_ANSW_ERR BIT(5)
+#define TPS6594_BIT_WD_FAIL_INT BIT(6)
+#define TPS6594_BIT_WD_RST_INT BIT(7)
+
+/* WD_THR_CFG register field definition */
+#define TPS6594_MASK_WD_RST_TH GENMASK(2, 0)
+#define TPS6594_MASK_WD_FAIL_TH GENMASK(5, 3)
+#define TPS6594_BIT_WD_EN BIT(6)
+#define TPS6594_BIT_WD_RST_EN BIT(7)
+
+/* WD_FAIL_CNT_REG register field definition */
+#define TPS6594_MASK_WD_FAIL_CNT GENMASK(3, 0)
+#define TPS6594_BIT_WD_FIRST_OK BIT(5)
+#define TPS6594_BIT_WD_BAD_EVENT BIT(6)
+
+/* CRC8 polynomial for I2C & SPI protocols */
+#define TPS6594_CRC8_POLYNOMIAL 0x07
+
+/* IRQs */
+enum tps6594_irqs {
+ /* INT_BUCK1_2 register */
+ TPS6594_IRQ_BUCK1_OV,
+ TPS6594_IRQ_BUCK1_UV,
+ TPS6594_IRQ_BUCK1_SC,
+ TPS6594_IRQ_BUCK1_ILIM,
+ TPS6594_IRQ_BUCK2_OV,
+ TPS6594_IRQ_BUCK2_UV,
+ TPS6594_IRQ_BUCK2_SC,
+ TPS6594_IRQ_BUCK2_ILIM,
+ /* INT_BUCK3_4 register */
+ TPS6594_IRQ_BUCK3_OV,
+ TPS6594_IRQ_BUCK3_UV,
+ TPS6594_IRQ_BUCK3_SC,
+ TPS6594_IRQ_BUCK3_ILIM,
+ TPS6594_IRQ_BUCK4_OV,
+ TPS6594_IRQ_BUCK4_UV,
+ TPS6594_IRQ_BUCK4_SC,
+ TPS6594_IRQ_BUCK4_ILIM,
+ /* INT_BUCK5 register */
+ TPS6594_IRQ_BUCK5_OV,
+ TPS6594_IRQ_BUCK5_UV,
+ TPS6594_IRQ_BUCK5_SC,
+ TPS6594_IRQ_BUCK5_ILIM,
+ /* INT_LDO1_2 register */
+ TPS6594_IRQ_LDO1_OV,
+ TPS6594_IRQ_LDO1_UV,
+ TPS6594_IRQ_LDO1_SC,
+ TPS6594_IRQ_LDO1_ILIM,
+ TPS6594_IRQ_LDO2_OV,
+ TPS6594_IRQ_LDO2_UV,
+ TPS6594_IRQ_LDO2_SC,
+ TPS6594_IRQ_LDO2_ILIM,
+ /* INT_LDO3_4 register */
+ TPS6594_IRQ_LDO3_OV,
+ TPS6594_IRQ_LDO3_UV,
+ TPS6594_IRQ_LDO3_SC,
+ TPS6594_IRQ_LDO3_ILIM,
+ TPS6594_IRQ_LDO4_OV,
+ TPS6594_IRQ_LDO4_UV,
+ TPS6594_IRQ_LDO4_SC,
+ TPS6594_IRQ_LDO4_ILIM,
+ /* INT_VMON register */
+ TPS6594_IRQ_VCCA_OV,
+ TPS6594_IRQ_VCCA_UV,
+ TPS6594_IRQ_VMON1_OV,
+ TPS6594_IRQ_VMON1_UV,
+ TPS6594_IRQ_VMON1_RV,
+ TPS6594_IRQ_VMON2_OV,
+ TPS6594_IRQ_VMON2_UV,
+ TPS6594_IRQ_VMON2_RV,
+ /* INT_GPIO register */
+ TPS6594_IRQ_GPIO9,
+ TPS6594_IRQ_GPIO10,
+ TPS6594_IRQ_GPIO11,
+ /* INT_GPIO1_8 register */
+ TPS6594_IRQ_GPIO1,
+ TPS6594_IRQ_GPIO2,
+ TPS6594_IRQ_GPIO3,
+ TPS6594_IRQ_GPIO4,
+ TPS6594_IRQ_GPIO5,
+ TPS6594_IRQ_GPIO6,
+ TPS6594_IRQ_GPIO7,
+ TPS6594_IRQ_GPIO8,
+ /* INT_STARTUP register */
+ TPS6594_IRQ_NPWRON_START,
+ TPS6594_IRQ_ENABLE,
+ TPS6594_IRQ_FSD,
+ TPS6594_IRQ_SOFT_REBOOT,
+ /* INT_MISC register */
+ TPS6594_IRQ_BIST_PASS,
+ TPS6594_IRQ_EXT_CLK,
+ TPS6594_IRQ_TWARN,
+ /* INT_MODERATE_ERR register */
+ TPS6594_IRQ_TSD_ORD,
+ TPS6594_IRQ_BIST_FAIL,
+ TPS6594_IRQ_REG_CRC_ERR,
+ TPS6594_IRQ_RECOV_CNT,
+ TPS6594_IRQ_SPMI_ERR,
+ TPS6594_IRQ_NPWRON_LONG,
+ TPS6594_IRQ_NINT_READBACK,
+ TPS6594_IRQ_NRSTOUT_READBACK,
+ /* INT_SEVERE_ERR register */
+ TPS6594_IRQ_TSD_IMM,
+ TPS6594_IRQ_VCCA_OVP,
+ TPS6594_IRQ_PFSM_ERR,
+ /* INT_FSM_ERR register */
+ TPS6594_IRQ_IMM_SHUTDOWN,
+ TPS6594_IRQ_ORD_SHUTDOWN,
+ TPS6594_IRQ_MCU_PWR_ERR,
+ TPS6594_IRQ_SOC_PWR_ERR,
+ /* INT_COMM_ERR register */
+ TPS6594_IRQ_COMM_FRM_ERR,
+ TPS6594_IRQ_COMM_CRC_ERR,
+ TPS6594_IRQ_COMM_ADR_ERR,
+ TPS6594_IRQ_I2C2_CRC_ERR,
+ TPS6594_IRQ_I2C2_ADR_ERR,
+ /* INT_READBACK_ERR register */
+ TPS6594_IRQ_EN_DRV_READBACK,
+ TPS6594_IRQ_NRSTOUT_SOC_READBACK,
+ /* INT_ESM register */
+ TPS6594_IRQ_ESM_SOC_PIN,
+ TPS6594_IRQ_ESM_SOC_FAIL,
+ TPS6594_IRQ_ESM_SOC_RST,
+ /* RTC_STATUS register */
+ TPS6594_IRQ_TIMER,
+ TPS6594_IRQ_ALARM,
+ TPS6594_IRQ_POWER_UP,
+};
+
+#define TPS6594_IRQ_NAME_BUCK1_OV "buck1_ov"
+#define TPS6594_IRQ_NAME_BUCK1_UV "buck1_uv"
+#define TPS6594_IRQ_NAME_BUCK1_SC "buck1_sc"
+#define TPS6594_IRQ_NAME_BUCK1_ILIM "buck1_ilim"
+#define TPS6594_IRQ_NAME_BUCK2_OV "buck2_ov"
+#define TPS6594_IRQ_NAME_BUCK2_UV "buck2_uv"
+#define TPS6594_IRQ_NAME_BUCK2_SC "buck2_sc"
+#define TPS6594_IRQ_NAME_BUCK2_ILIM "buck2_ilim"
+#define TPS6594_IRQ_NAME_BUCK3_OV "buck3_ov"
+#define TPS6594_IRQ_NAME_BUCK3_UV "buck3_uv"
+#define TPS6594_IRQ_NAME_BUCK3_SC "buck3_sc"
+#define TPS6594_IRQ_NAME_BUCK3_ILIM "buck3_ilim"
+#define TPS6594_IRQ_NAME_BUCK4_OV "buck4_ov"
+#define TPS6594_IRQ_NAME_BUCK4_UV "buck4_uv"
+#define TPS6594_IRQ_NAME_BUCK4_SC "buck4_sc"
+#define TPS6594_IRQ_NAME_BUCK4_ILIM "buck4_ilim"
+#define TPS6594_IRQ_NAME_BUCK5_OV "buck5_ov"
+#define TPS6594_IRQ_NAME_BUCK5_UV "buck5_uv"
+#define TPS6594_IRQ_NAME_BUCK5_SC "buck5_sc"
+#define TPS6594_IRQ_NAME_BUCK5_ILIM "buck5_ilim"
+#define TPS6594_IRQ_NAME_LDO1_OV "ldo1_ov"
+#define TPS6594_IRQ_NAME_LDO1_UV "ldo1_uv"
+#define TPS6594_IRQ_NAME_LDO1_SC "ldo1_sc"
+#define TPS6594_IRQ_NAME_LDO1_ILIM "ldo1_ilim"
+#define TPS6594_IRQ_NAME_LDO2_OV "ldo2_ov"
+#define TPS6594_IRQ_NAME_LDO2_UV "ldo2_uv"
+#define TPS6594_IRQ_NAME_LDO2_SC "ldo2_sc"
+#define TPS6594_IRQ_NAME_LDO2_ILIM "ldo2_ilim"
+#define TPS6594_IRQ_NAME_LDO3_OV "ldo3_ov"
+#define TPS6594_IRQ_NAME_LDO3_UV "ldo3_uv"
+#define TPS6594_IRQ_NAME_LDO3_SC "ldo3_sc"
+#define TPS6594_IRQ_NAME_LDO3_ILIM "ldo3_ilim"
+#define TPS6594_IRQ_NAME_LDO4_OV "ldo4_ov"
+#define TPS6594_IRQ_NAME_LDO4_UV "ldo4_uv"
+#define TPS6594_IRQ_NAME_LDO4_SC "ldo4_sc"
+#define TPS6594_IRQ_NAME_LDO4_ILIM "ldo4_ilim"
+#define TPS6594_IRQ_NAME_VCCA_OV "vcca_ov"
+#define TPS6594_IRQ_NAME_VCCA_UV "vcca_uv"
+#define TPS6594_IRQ_NAME_VMON1_OV "vmon1_ov"
+#define TPS6594_IRQ_NAME_VMON1_UV "vmon1_uv"
+#define TPS6594_IRQ_NAME_VMON1_RV "vmon1_rv"
+#define TPS6594_IRQ_NAME_VMON2_OV "vmon2_ov"
+#define TPS6594_IRQ_NAME_VMON2_UV "vmon2_uv"
+#define TPS6594_IRQ_NAME_VMON2_RV "vmon2_rv"
+#define TPS6594_IRQ_NAME_GPIO9 "gpio9"
+#define TPS6594_IRQ_NAME_GPIO10 "gpio10"
+#define TPS6594_IRQ_NAME_GPIO11 "gpio11"
+#define TPS6594_IRQ_NAME_GPIO1 "gpio1"
+#define TPS6594_IRQ_NAME_GPIO2 "gpio2"
+#define TPS6594_IRQ_NAME_GPIO3 "gpio3"
+#define TPS6594_IRQ_NAME_GPIO4 "gpio4"
+#define TPS6594_IRQ_NAME_GPIO5 "gpio5"
+#define TPS6594_IRQ_NAME_GPIO6 "gpio6"
+#define TPS6594_IRQ_NAME_GPIO7 "gpio7"
+#define TPS6594_IRQ_NAME_GPIO8 "gpio8"
+#define TPS6594_IRQ_NAME_NPWRON_START "npwron_start"
+#define TPS6594_IRQ_NAME_ENABLE "enable"
+#define TPS6594_IRQ_NAME_FSD "fsd"
+#define TPS6594_IRQ_NAME_SOFT_REBOOT "soft_reboot"
+#define TPS6594_IRQ_NAME_BIST_PASS "bist_pass"
+#define TPS6594_IRQ_NAME_EXT_CLK "ext_clk"
+#define TPS6594_IRQ_NAME_TWARN "twarn"
+#define TPS6594_IRQ_NAME_TSD_ORD "tsd_ord"
+#define TPS6594_IRQ_NAME_BIST_FAIL "bist_fail"
+#define TPS6594_IRQ_NAME_REG_CRC_ERR "reg_crc_err"
+#define TPS6594_IRQ_NAME_RECOV_CNT "recov_cnt"
+#define TPS6594_IRQ_NAME_SPMI_ERR "spmi_err"
+#define TPS6594_IRQ_NAME_NPWRON_LONG "npwron_long"
+#define TPS6594_IRQ_NAME_NINT_READBACK "nint_readback"
+#define TPS6594_IRQ_NAME_NRSTOUT_READBACK "nrstout_readback"
+#define TPS6594_IRQ_NAME_TSD_IMM "tsd_imm"
+#define TPS6594_IRQ_NAME_VCCA_OVP "vcca_ovp"
+#define TPS6594_IRQ_NAME_PFSM_ERR "pfsm_err"
+#define TPS6594_IRQ_NAME_IMM_SHUTDOWN "imm_shutdown"
+#define TPS6594_IRQ_NAME_ORD_SHUTDOWN "ord_shutdown"
+#define TPS6594_IRQ_NAME_MCU_PWR_ERR "mcu_pwr_err"
+#define TPS6594_IRQ_NAME_SOC_PWR_ERR "soc_pwr_err"
+#define TPS6594_IRQ_NAME_COMM_FRM_ERR "comm_frm_err"
+#define TPS6594_IRQ_NAME_COMM_CRC_ERR "comm_crc_err"
+#define TPS6594_IRQ_NAME_COMM_ADR_ERR "comm_adr_err"
+#define TPS6594_IRQ_NAME_EN_DRV_READBACK "en_drv_readback"
+#define TPS6594_IRQ_NAME_NRSTOUT_SOC_READBACK "nrstout_soc_readback"
+#define TPS6594_IRQ_NAME_ESM_SOC_PIN "esm_soc_pin"
+#define TPS6594_IRQ_NAME_ESM_SOC_FAIL "esm_soc_fail"
+#define TPS6594_IRQ_NAME_ESM_SOC_RST "esm_soc_rst"
+#define TPS6594_IRQ_NAME_TIMER "timer"
+#define TPS6594_IRQ_NAME_ALARM "alarm"
+#define TPS6594_IRQ_NAME_POWERUP "powerup"
+
+/* IRQs */
+enum tps65224_irqs {
+ /* INT_BUCK register */
+ TPS65224_IRQ_BUCK1_UVOV,
+ TPS65224_IRQ_BUCK2_UVOV,
+ TPS65224_IRQ_BUCK3_UVOV,
+ TPS65224_IRQ_BUCK4_UVOV,
+ /* INT_LDO_VMON register */
+ TPS65224_IRQ_LDO1_UVOV,
+ TPS65224_IRQ_LDO2_UVOV,
+ TPS65224_IRQ_LDO3_UVOV,
+ TPS65224_IRQ_VCCA_UVOV,
+ TPS65224_IRQ_VMON1_UVOV,
+ TPS65224_IRQ_VMON2_UVOV,
+ /* INT_GPIO register */
+ TPS65224_IRQ_GPIO1,
+ TPS65224_IRQ_GPIO2,
+ TPS65224_IRQ_GPIO3,
+ TPS65224_IRQ_GPIO4,
+ TPS65224_IRQ_GPIO5,
+ TPS65224_IRQ_GPIO6,
+ /* INT_STARTUP register */
+ TPS65224_IRQ_VSENSE,
+ TPS65224_IRQ_ENABLE,
+ TPS65224_IRQ_PB_SHORT,
+ TPS65224_IRQ_FSD,
+ TPS65224_IRQ_SOFT_REBOOT,
+ /* INT_MISC register */
+ TPS65224_IRQ_BIST_PASS,
+ TPS65224_IRQ_EXT_CLK,
+ TPS65224_IRQ_REG_UNLOCK,
+ TPS65224_IRQ_TWARN,
+ TPS65224_IRQ_PB_LONG,
+ TPS65224_IRQ_PB_FALL,
+ TPS65224_IRQ_PB_RISE,
+ TPS65224_IRQ_ADC_CONV_READY,
+ /* INT_MODERATE_ERR register */
+ TPS65224_IRQ_TSD_ORD,
+ TPS65224_IRQ_BIST_FAIL,
+ TPS65224_IRQ_REG_CRC_ERR,
+ TPS65224_IRQ_RECOV_CNT,
+ /* INT_SEVERE_ERR register */
+ TPS65224_IRQ_TSD_IMM,
+ TPS65224_IRQ_VCCA_OVP,
+ TPS65224_IRQ_PFSM_ERR,
+ TPS65224_IRQ_BG_XMON,
+ /* INT_FSM_ERR register */
+ TPS65224_IRQ_IMM_SHUTDOWN,
+ TPS65224_IRQ_ORD_SHUTDOWN,
+ TPS65224_IRQ_MCU_PWR_ERR,
+ TPS65224_IRQ_SOC_PWR_ERR,
+ TPS65224_IRQ_COMM_ERR,
+ TPS65224_IRQ_I2C2_ERR,
+};
+
+#define TPS65224_IRQ_NAME_BUCK1_UVOV "buck1_uvov"
+#define TPS65224_IRQ_NAME_BUCK2_UVOV "buck2_uvov"
+#define TPS65224_IRQ_NAME_BUCK3_UVOV "buck3_uvov"
+#define TPS65224_IRQ_NAME_BUCK4_UVOV "buck4_uvov"
+#define TPS65224_IRQ_NAME_LDO1_UVOV "ldo1_uvov"
+#define TPS65224_IRQ_NAME_LDO2_UVOV "ldo2_uvov"
+#define TPS65224_IRQ_NAME_LDO3_UVOV "ldo3_uvov"
+#define TPS65224_IRQ_NAME_VCCA_UVOV "vcca_uvov"
+#define TPS65224_IRQ_NAME_VMON1_UVOV "vmon1_uvov"
+#define TPS65224_IRQ_NAME_VMON2_UVOV "vmon2_uvov"
+#define TPS65224_IRQ_NAME_GPIO1 "gpio1"
+#define TPS65224_IRQ_NAME_GPIO2 "gpio2"
+#define TPS65224_IRQ_NAME_GPIO3 "gpio3"
+#define TPS65224_IRQ_NAME_GPIO4 "gpio4"
+#define TPS65224_IRQ_NAME_GPIO5 "gpio5"
+#define TPS65224_IRQ_NAME_GPIO6 "gpio6"
+#define TPS65224_IRQ_NAME_VSENSE "vsense"
+#define TPS65224_IRQ_NAME_ENABLE "enable"
+#define TPS65224_IRQ_NAME_PB_SHORT "pb_short"
+#define TPS65224_IRQ_NAME_FSD "fsd"
+#define TPS65224_IRQ_NAME_SOFT_REBOOT "soft_reboot"
+#define TPS65224_IRQ_NAME_BIST_PASS "bist_pass"
+#define TPS65224_IRQ_NAME_EXT_CLK "ext_clk"
+#define TPS65224_IRQ_NAME_REG_UNLOCK "reg_unlock"
+#define TPS65224_IRQ_NAME_TWARN "twarn"
+#define TPS65224_IRQ_NAME_PB_LONG "pb_long"
+#define TPS65224_IRQ_NAME_PB_FALL "pb_fall"
+#define TPS65224_IRQ_NAME_PB_RISE "pb_rise"
+#define TPS65224_IRQ_NAME_ADC_CONV_READY "adc_conv_ready"
+#define TPS65224_IRQ_NAME_TSD_ORD "tsd_ord"
+#define TPS65224_IRQ_NAME_BIST_FAIL "bist_fail"
+#define TPS65224_IRQ_NAME_REG_CRC_ERR "reg_crc_err"
+#define TPS65224_IRQ_NAME_RECOV_CNT "recov_cnt"
+#define TPS65224_IRQ_NAME_TSD_IMM "tsd_imm"
+#define TPS65224_IRQ_NAME_VCCA_OVP "vcca_ovp"
+#define TPS65224_IRQ_NAME_PFSM_ERR "pfsm_err"
+#define TPS65224_IRQ_NAME_BG_XMON "bg_xmon"
+#define TPS65224_IRQ_NAME_IMM_SHUTDOWN "imm_shutdown"
+#define TPS65224_IRQ_NAME_ORD_SHUTDOWN "ord_shutdown"
+#define TPS65224_IRQ_NAME_MCU_PWR_ERR "mcu_pwr_err"
+#define TPS65224_IRQ_NAME_SOC_PWR_ERR "soc_pwr_err"
+#define TPS65224_IRQ_NAME_COMM_ERR "comm_err"
+#define TPS65224_IRQ_NAME_I2C2_ERR "i2c2_err"
+#define TPS65224_IRQ_NAME_POWERUP "powerup"
+
+/**
+ * struct tps6594 - device private data structure
+ *
+ * @dev: MFD parent device
+ * @chip_id: chip ID
+ * @reg: I2C slave address or SPI chip select number
+ * @use_crc: if true, use CRC for I2C and SPI interface protocols
+ * @regmap: regmap for accessing the device registers
+ * @irq: irq generated by the device
+ * @irq_data: regmap irq data used for the irq chip
+ */
+struct tps6594 {
+ struct device *dev;
+ unsigned long chip_id;
+ unsigned short reg;
+ bool use_crc;
+ struct regmap *regmap;
+ int irq;
+ struct regmap_irq_chip_data *irq_data;
+};
+
+extern const struct regmap_access_table tps6594_volatile_table;
+extern const struct regmap_access_table tps65224_volatile_table;
+
+int tps6594_device_init(struct tps6594 *tps, bool enable_crc);
+
+#endif /* __LINUX_MFD_TPS6594_H */
diff --git a/include/linux/mfd/tps68470.h b/include/linux/mfd/tps68470.h
index ffe81127d91c..7807fa329db0 100644
--- a/include/linux/mfd/tps68470.h
+++ b/include/linux/mfd/tps68470.h
@@ -75,6 +75,17 @@
#define TPS68470_CLKCFG1_MODE_A_MASK GENMASK(1, 0)
#define TPS68470_CLKCFG1_MODE_B_MASK GENMASK(3, 2)
+#define TPS68470_CLKCFG2_DRV_STR_2MA 0x05
+#define TPS68470_PLL_OUTPUT_ENABLE 0x02
+#define TPS68470_CLK_SRC_XTAL BIT(0)
+#define TPS68470_PLLSWR_DEFAULT GENMASK(1, 0)
+#define TPS68470_OSC_EXT_CAP_DEFAULT 0x05
+
+#define TPS68470_OUTPUT_A_SHIFT 0x00
+#define TPS68470_OUTPUT_B_SHIFT 0x02
+#define TPS68470_CLK_SRC_SHIFT GENMASK(2, 0)
+#define TPS68470_OSC_EXT_CAP_SHIFT BIT(2)
+
#define TPS68470_GPIO_CTL_REG_A(x) (TPS68470_REG_GPCTL0A + (x) * 2)
#define TPS68470_GPIO_CTL_REG_B(x) (TPS68470_REG_GPCTL0B + (x) * 2)
#define TPS68470_GPIO_MODE_MASK GENMASK(1, 0)
diff --git a/include/linux/mfd/tps80031.h b/include/linux/mfd/tps80031.h
deleted file mode 100644
index 2c75c9c9318f..000000000000
--- a/include/linux/mfd/tps80031.h
+++ /dev/null
@@ -1,637 +0,0 @@
-/*
- * tps80031.h -- TI TPS80031 and TI TPS80032 PMIC driver.
- *
- * Copyright (c) 2012, NVIDIA Corporation.
- *
- * Author: Laxman Dewangan <ldewangan@nvidia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
- * whether express or implied; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307, USA
- */
-
-#ifndef __LINUX_MFD_TPS80031_H
-#define __LINUX_MFD_TPS80031_H
-
-#include <linux/device.h>
-#include <linux/regmap.h>
-
-/* Pull-ups/Pull-downs */
-#define TPS80031_CFG_INPUT_PUPD1 0xF0
-#define TPS80031_CFG_INPUT_PUPD2 0xF1
-#define TPS80031_CFG_INPUT_PUPD3 0xF2
-#define TPS80031_CFG_INPUT_PUPD4 0xF3
-#define TPS80031_CFG_LDO_PD1 0xF4
-#define TPS80031_CFG_LDO_PD2 0xF5
-#define TPS80031_CFG_SMPS_PD 0xF6
-
-/* Real Time Clock */
-#define TPS80031_SECONDS_REG 0x00
-#define TPS80031_MINUTES_REG 0x01
-#define TPS80031_HOURS_REG 0x02
-#define TPS80031_DAYS_REG 0x03
-#define TPS80031_MONTHS_REG 0x04
-#define TPS80031_YEARS_REG 0x05
-#define TPS80031_WEEKS_REG 0x06
-#define TPS80031_ALARM_SECONDS_REG 0x08
-#define TPS80031_ALARM_MINUTES_REG 0x09
-#define TPS80031_ALARM_HOURS_REG 0x0A
-#define TPS80031_ALARM_DAYS_REG 0x0B
-#define TPS80031_ALARM_MONTHS_REG 0x0C
-#define TPS80031_ALARM_YEARS_REG 0x0D
-#define TPS80031_RTC_CTRL_REG 0x10
-#define TPS80031_RTC_STATUS_REG 0x11
-#define TPS80031_RTC_INTERRUPTS_REG 0x12
-#define TPS80031_RTC_COMP_LSB_REG 0x13
-#define TPS80031_RTC_COMP_MSB_REG 0x14
-#define TPS80031_RTC_RESET_STATUS_REG 0x16
-
-/*PMC Master Module */
-#define TPS80031_PHOENIX_START_CONDITION 0x1F
-#define TPS80031_PHOENIX_MSK_TRANSITION 0x20
-#define TPS80031_STS_HW_CONDITIONS 0x21
-#define TPS80031_PHOENIX_LAST_TURNOFF_STS 0x22
-#define TPS80031_VSYSMIN_LO_THRESHOLD 0x23
-#define TPS80031_VSYSMIN_HI_THRESHOLD 0x24
-#define TPS80031_PHOENIX_DEV_ON 0x25
-#define TPS80031_STS_PWR_GRP_STATE 0x27
-#define TPS80031_PH_CFG_VSYSLOW 0x28
-#define TPS80031_PH_STS_BOOT 0x29
-#define TPS80031_PHOENIX_SENS_TRANSITION 0x2A
-#define TPS80031_PHOENIX_SEQ_CFG 0x2B
-#define TPS80031_PRIMARY_WATCHDOG_CFG 0X2C
-#define TPS80031_KEY_PRESS_DUR_CFG 0X2D
-#define TPS80031_SMPS_LDO_SHORT_STS 0x2E
-
-/* PMC Slave Module - Broadcast */
-#define TPS80031_BROADCAST_ADDR_ALL 0x31
-#define TPS80031_BROADCAST_ADDR_REF 0x32
-#define TPS80031_BROADCAST_ADDR_PROV 0x33
-#define TPS80031_BROADCAST_ADDR_CLK_RST 0x34
-
-/* PMC Slave Module SMPS Regulators */
-#define TPS80031_SMPS4_CFG_TRANS 0x41
-#define TPS80031_SMPS4_CFG_STATE 0x42
-#define TPS80031_SMPS4_CFG_VOLTAGE 0x44
-#define TPS80031_VIO_CFG_TRANS 0x47
-#define TPS80031_VIO_CFG_STATE 0x48
-#define TPS80031_VIO_CFG_FORCE 0x49
-#define TPS80031_VIO_CFG_VOLTAGE 0x4A
-#define TPS80031_VIO_CFG_STEP 0x48
-#define TPS80031_SMPS1_CFG_TRANS 0x53
-#define TPS80031_SMPS1_CFG_STATE 0x54
-#define TPS80031_SMPS1_CFG_FORCE 0x55
-#define TPS80031_SMPS1_CFG_VOLTAGE 0x56
-#define TPS80031_SMPS1_CFG_STEP 0x57
-#define TPS80031_SMPS2_CFG_TRANS 0x59
-#define TPS80031_SMPS2_CFG_STATE 0x5A
-#define TPS80031_SMPS2_CFG_FORCE 0x5B
-#define TPS80031_SMPS2_CFG_VOLTAGE 0x5C
-#define TPS80031_SMPS2_CFG_STEP 0x5D
-#define TPS80031_SMPS3_CFG_TRANS 0x65
-#define TPS80031_SMPS3_CFG_STATE 0x66
-#define TPS80031_SMPS3_CFG_VOLTAGE 0x68
-
-/* PMC Slave Module LDO Regulators */
-#define TPS80031_VANA_CFG_TRANS 0x81
-#define TPS80031_VANA_CFG_STATE 0x82
-#define TPS80031_VANA_CFG_VOLTAGE 0x83
-#define TPS80031_LDO2_CFG_TRANS 0x85
-#define TPS80031_LDO2_CFG_STATE 0x86
-#define TPS80031_LDO2_CFG_VOLTAGE 0x87
-#define TPS80031_LDO4_CFG_TRANS 0x89
-#define TPS80031_LDO4_CFG_STATE 0x8A
-#define TPS80031_LDO4_CFG_VOLTAGE 0x8B
-#define TPS80031_LDO3_CFG_TRANS 0x8D
-#define TPS80031_LDO3_CFG_STATE 0x8E
-#define TPS80031_LDO3_CFG_VOLTAGE 0x8F
-#define TPS80031_LDO6_CFG_TRANS 0x91
-#define TPS80031_LDO6_CFG_STATE 0x92
-#define TPS80031_LDO6_CFG_VOLTAGE 0x93
-#define TPS80031_LDOLN_CFG_TRANS 0x95
-#define TPS80031_LDOLN_CFG_STATE 0x96
-#define TPS80031_LDOLN_CFG_VOLTAGE 0x97
-#define TPS80031_LDO5_CFG_TRANS 0x99
-#define TPS80031_LDO5_CFG_STATE 0x9A
-#define TPS80031_LDO5_CFG_VOLTAGE 0x9B
-#define TPS80031_LDO1_CFG_TRANS 0x9D
-#define TPS80031_LDO1_CFG_STATE 0x9E
-#define TPS80031_LDO1_CFG_VOLTAGE 0x9F
-#define TPS80031_LDOUSB_CFG_TRANS 0xA1
-#define TPS80031_LDOUSB_CFG_STATE 0xA2
-#define TPS80031_LDOUSB_CFG_VOLTAGE 0xA3
-#define TPS80031_LDO7_CFG_TRANS 0xA5
-#define TPS80031_LDO7_CFG_STATE 0xA6
-#define TPS80031_LDO7_CFG_VOLTAGE 0xA7
-
-/* PMC Slave Module External Control */
-#define TPS80031_REGEN1_CFG_TRANS 0xAE
-#define TPS80031_REGEN1_CFG_STATE 0xAF
-#define TPS80031_REGEN2_CFG_TRANS 0xB1
-#define TPS80031_REGEN2_CFG_STATE 0xB2
-#define TPS80031_SYSEN_CFG_TRANS 0xB4
-#define TPS80031_SYSEN_CFG_STATE 0xB5
-
-/* PMC Slave Module Internal Control */
-#define TPS80031_NRESPWRON_CFG_TRANS 0xB7
-#define TPS80031_NRESPWRON_CFG_STATE 0xB8
-#define TPS80031_CLK32KAO_CFG_TRANS 0xBA
-#define TPS80031_CLK32KAO_CFG_STATE 0xBB
-#define TPS80031_CLK32KG_CFG_TRANS 0xBD
-#define TPS80031_CLK32KG_CFG_STATE 0xBE
-#define TPS80031_CLK32KAUDIO_CFG_TRANS 0xC0
-#define TPS80031_CLK32KAUDIO_CFG_STATE 0xC1
-#define TPS80031_VRTC_CFG_TRANS 0xC3
-#define TPS80031_VRTC_CFG_STATE 0xC4
-#define TPS80031_BIAS_CFG_TRANS 0xC6
-#define TPS80031_BIAS_CFG_STATE 0xC7
-#define TPS80031_VSYSMIN_HI_CFG_TRANS 0xC9
-#define TPS80031_VSYSMIN_HI_CFG_STATE 0xCA
-#define TPS80031_RC6MHZ_CFG_TRANS 0xCC
-#define TPS80031_RC6MHZ_CFG_STATE 0xCD
-#define TPS80031_TMP_CFG_TRANS 0xCF
-#define TPS80031_TMP_CFG_STATE 0xD0
-
-/* PMC Slave Module resources assignment */
-#define TPS80031_PREQ1_RES_ASS_A 0xD7
-#define TPS80031_PREQ1_RES_ASS_B 0xD8
-#define TPS80031_PREQ1_RES_ASS_C 0xD9
-#define TPS80031_PREQ2_RES_ASS_A 0xDA
-#define TPS80031_PREQ2_RES_ASS_B 0xDB
-#define TPS80031_PREQ2_RES_ASS_C 0xDC
-#define TPS80031_PREQ3_RES_ASS_A 0xDD
-#define TPS80031_PREQ3_RES_ASS_B 0xDE
-#define TPS80031_PREQ3_RES_ASS_C 0xDF
-
-/* PMC Slave Module Miscellaneous */
-#define TPS80031_SMPS_OFFSET 0xE0
-#define TPS80031_SMPS_MULT 0xE3
-#define TPS80031_MISC1 0xE4
-#define TPS80031_MISC2 0xE5
-#define TPS80031_BBSPOR_CFG 0xE6
-#define TPS80031_TMP_CFG 0xE7
-
-/* Battery Charging Controller and Indicator LED */
-#define TPS80031_CONTROLLER_CTRL2 0xDA
-#define TPS80031_CONTROLLER_VSEL_COMP 0xDB
-#define TPS80031_CHARGERUSB_VSYSREG 0xDC
-#define TPS80031_CHARGERUSB_VICHRG_PC 0xDD
-#define TPS80031_LINEAR_CHRG_STS 0xDE
-#define TPS80031_CONTROLLER_INT_MASK 0xE0
-#define TPS80031_CONTROLLER_CTRL1 0xE1
-#define TPS80031_CONTROLLER_WDG 0xE2
-#define TPS80031_CONTROLLER_STAT1 0xE3
-#define TPS80031_CHARGERUSB_INT_STATUS 0xE4
-#define TPS80031_CHARGERUSB_INT_MASK 0xE5
-#define TPS80031_CHARGERUSB_STATUS_INT1 0xE6
-#define TPS80031_CHARGERUSB_STATUS_INT2 0xE7
-#define TPS80031_CHARGERUSB_CTRL1 0xE8
-#define TPS80031_CHARGERUSB_CTRL2 0xE9
-#define TPS80031_CHARGERUSB_CTRL3 0xEA
-#define TPS80031_CHARGERUSB_STAT1 0xEB
-#define TPS80031_CHARGERUSB_VOREG 0xEC
-#define TPS80031_CHARGERUSB_VICHRG 0xED
-#define TPS80031_CHARGERUSB_CINLIMIT 0xEE
-#define TPS80031_CHARGERUSB_CTRLLIMIT1 0xEF
-#define TPS80031_CHARGERUSB_CTRLLIMIT2 0xF0
-#define TPS80031_LED_PWM_CTRL1 0xF4
-#define TPS80031_LED_PWM_CTRL2 0xF5
-
-/* USB On-The-Go */
-#define TPS80031_BACKUP_REG 0xFA
-#define TPS80031_USB_VENDOR_ID_LSB 0x00
-#define TPS80031_USB_VENDOR_ID_MSB 0x01
-#define TPS80031_USB_PRODUCT_ID_LSB 0x02
-#define TPS80031_USB_PRODUCT_ID_MSB 0x03
-#define TPS80031_USB_VBUS_CTRL_SET 0x04
-#define TPS80031_USB_VBUS_CTRL_CLR 0x05
-#define TPS80031_USB_ID_CTRL_SET 0x06
-#define TPS80031_USB_ID_CTRL_CLR 0x07
-#define TPS80031_USB_VBUS_INT_SRC 0x08
-#define TPS80031_USB_VBUS_INT_LATCH_SET 0x09
-#define TPS80031_USB_VBUS_INT_LATCH_CLR 0x0A
-#define TPS80031_USB_VBUS_INT_EN_LO_SET 0x0B
-#define TPS80031_USB_VBUS_INT_EN_LO_CLR 0x0C
-#define TPS80031_USB_VBUS_INT_EN_HI_SET 0x0D
-#define TPS80031_USB_VBUS_INT_EN_HI_CLR 0x0E
-#define TPS80031_USB_ID_INT_SRC 0x0F
-#define TPS80031_USB_ID_INT_LATCH_SET 0x10
-#define TPS80031_USB_ID_INT_LATCH_CLR 0x11
-#define TPS80031_USB_ID_INT_EN_LO_SET 0x12
-#define TPS80031_USB_ID_INT_EN_LO_CLR 0x13
-#define TPS80031_USB_ID_INT_EN_HI_SET 0x14
-#define TPS80031_USB_ID_INT_EN_HI_CLR 0x15
-#define TPS80031_USB_OTG_ADP_CTRL 0x16
-#define TPS80031_USB_OTG_ADP_HIGH 0x17
-#define TPS80031_USB_OTG_ADP_LOW 0x18
-#define TPS80031_USB_OTG_ADP_RISE 0x19
-#define TPS80031_USB_OTG_REVISION 0x1A
-
-/* Gas Gauge */
-#define TPS80031_FG_REG_00 0xC0
-#define TPS80031_FG_REG_01 0xC1
-#define TPS80031_FG_REG_02 0xC2
-#define TPS80031_FG_REG_03 0xC3
-#define TPS80031_FG_REG_04 0xC4
-#define TPS80031_FG_REG_05 0xC5
-#define TPS80031_FG_REG_06 0xC6
-#define TPS80031_FG_REG_07 0xC7
-#define TPS80031_FG_REG_08 0xC8
-#define TPS80031_FG_REG_09 0xC9
-#define TPS80031_FG_REG_10 0xCA
-#define TPS80031_FG_REG_11 0xCB
-
-/* General Purpose ADC */
-#define TPS80031_GPADC_CTRL 0x2E
-#define TPS80031_GPADC_CTRL2 0x2F
-#define TPS80031_RTSELECT_LSB 0x32
-#define TPS80031_RTSELECT_ISB 0x33
-#define TPS80031_RTSELECT_MSB 0x34
-#define TPS80031_GPSELECT_ISB 0x35
-#define TPS80031_CTRL_P1 0x36
-#define TPS80031_RTCH0_LSB 0x37
-#define TPS80031_RTCH0_MSB 0x38
-#define TPS80031_RTCH1_LSB 0x39
-#define TPS80031_RTCH1_MSB 0x3A
-#define TPS80031_GPCH0_LSB 0x3B
-#define TPS80031_GPCH0_MSB 0x3C
-
-/* SIM, MMC and Battery Detection */
-#define TPS80031_SIMDEBOUNCING 0xEB
-#define TPS80031_SIMCTRL 0xEC
-#define TPS80031_MMCDEBOUNCING 0xED
-#define TPS80031_MMCCTRL 0xEE
-#define TPS80031_BATDEBOUNCING 0xEF
-
-/* Vibrator Driver and PWMs */
-#define TPS80031_VIBCTRL 0x9B
-#define TPS80031_VIBMODE 0x9C
-#define TPS80031_PWM1ON 0xBA
-#define TPS80031_PWM1OFF 0xBB
-#define TPS80031_PWM2ON 0xBD
-#define TPS80031_PWM2OFF 0xBE
-
-/* Control Interface */
-#define TPS80031_INT_STS_A 0xD0
-#define TPS80031_INT_STS_B 0xD1
-#define TPS80031_INT_STS_C 0xD2
-#define TPS80031_INT_MSK_LINE_A 0xD3
-#define TPS80031_INT_MSK_LINE_B 0xD4
-#define TPS80031_INT_MSK_LINE_C 0xD5
-#define TPS80031_INT_MSK_STS_A 0xD6
-#define TPS80031_INT_MSK_STS_B 0xD7
-#define TPS80031_INT_MSK_STS_C 0xD8
-#define TPS80031_TOGGLE1 0x90
-#define TPS80031_TOGGLE2 0x91
-#define TPS80031_TOGGLE3 0x92
-#define TPS80031_PWDNSTATUS1 0x93
-#define TPS80031_PWDNSTATUS2 0x94
-#define TPS80031_VALIDITY0 0x17
-#define TPS80031_VALIDITY1 0x18
-#define TPS80031_VALIDITY2 0x19
-#define TPS80031_VALIDITY3 0x1A
-#define TPS80031_VALIDITY4 0x1B
-#define TPS80031_VALIDITY5 0x1C
-#define TPS80031_VALIDITY6 0x1D
-#define TPS80031_VALIDITY7 0x1E
-
-/* Version number related register */
-#define TPS80031_JTAGVERNUM 0x87
-#define TPS80031_EPROM_REV 0xDF
-
-/* GPADC Trimming Bits. */
-#define TPS80031_GPADC_TRIM0 0xCC
-#define TPS80031_GPADC_TRIM1 0xCD
-#define TPS80031_GPADC_TRIM2 0xCE
-#define TPS80031_GPADC_TRIM3 0xCF
-#define TPS80031_GPADC_TRIM4 0xD0
-#define TPS80031_GPADC_TRIM5 0xD1
-#define TPS80031_GPADC_TRIM6 0xD2
-#define TPS80031_GPADC_TRIM7 0xD3
-#define TPS80031_GPADC_TRIM8 0xD4
-#define TPS80031_GPADC_TRIM9 0xD5
-#define TPS80031_GPADC_TRIM10 0xD6
-#define TPS80031_GPADC_TRIM11 0xD7
-#define TPS80031_GPADC_TRIM12 0xD8
-#define TPS80031_GPADC_TRIM13 0xD9
-#define TPS80031_GPADC_TRIM14 0xDA
-#define TPS80031_GPADC_TRIM15 0xDB
-#define TPS80031_GPADC_TRIM16 0xDC
-#define TPS80031_GPADC_TRIM17 0xDD
-#define TPS80031_GPADC_TRIM18 0xDE
-
-/* TPS80031_CONTROLLER_STAT1 bit fields */
-#define TPS80031_CONTROLLER_STAT1_BAT_TEMP 0
-#define TPS80031_CONTROLLER_STAT1_BAT_REMOVED 1
-#define TPS80031_CONTROLLER_STAT1_VBUS_DET 2
-#define TPS80031_CONTROLLER_STAT1_VAC_DET 3
-#define TPS80031_CONTROLLER_STAT1_FAULT_WDG 4
-#define TPS80031_CONTROLLER_STAT1_LINCH_GATED 6
-/* TPS80031_CONTROLLER_INT_MASK bit filed */
-#define TPS80031_CONTROLLER_INT_MASK_MVAC_DET 0
-#define TPS80031_CONTROLLER_INT_MASK_MVBUS_DET 1
-#define TPS80031_CONTROLLER_INT_MASK_MBAT_TEMP 2
-#define TPS80031_CONTROLLER_INT_MASK_MFAULT_WDG 3
-#define TPS80031_CONTROLLER_INT_MASK_MBAT_REMOVED 4
-#define TPS80031_CONTROLLER_INT_MASK_MLINCH_GATED 5
-
-#define TPS80031_CHARGE_CONTROL_SUB_INT_MASK 0x3F
-
-/* TPS80031_PHOENIX_DEV_ON bit field */
-#define TPS80031_DEVOFF 0x1
-
-#define TPS80031_EXT_CONTROL_CFG_TRANS 0
-#define TPS80031_EXT_CONTROL_CFG_STATE 1
-
-/* State register field */
-#define TPS80031_STATE_OFF 0x00
-#define TPS80031_STATE_ON 0x01
-#define TPS80031_STATE_MASK 0x03
-
-/* Trans register field */
-#define TPS80031_TRANS_ACTIVE_OFF 0x00
-#define TPS80031_TRANS_ACTIVE_ON 0x01
-#define TPS80031_TRANS_ACTIVE_MASK 0x03
-#define TPS80031_TRANS_SLEEP_OFF 0x00
-#define TPS80031_TRANS_SLEEP_ON 0x04
-#define TPS80031_TRANS_SLEEP_MASK 0x0C
-#define TPS80031_TRANS_OFF_OFF 0x00
-#define TPS80031_TRANS_OFF_ACTIVE 0x10
-#define TPS80031_TRANS_OFF_MASK 0x30
-
-#define TPS80031_EXT_PWR_REQ (TPS80031_PWR_REQ_INPUT_PREQ1 | \
- TPS80031_PWR_REQ_INPUT_PREQ2 | \
- TPS80031_PWR_REQ_INPUT_PREQ3)
-
-/* TPS80031_BBSPOR_CFG bit field */
-#define TPS80031_BBSPOR_CHG_EN 0x8
-#define TPS80031_MAX_REGISTER 0xFF
-
-struct i2c_client;
-
-/* Supported chips */
-enum chips {
- TPS80031 = 0x00000001,
- TPS80032 = 0x00000002,
-};
-
-enum {
- TPS80031_INT_PWRON,
- TPS80031_INT_RPWRON,
- TPS80031_INT_SYS_VLOW,
- TPS80031_INT_RTC_ALARM,
- TPS80031_INT_RTC_PERIOD,
- TPS80031_INT_HOT_DIE,
- TPS80031_INT_VXX_SHORT,
- TPS80031_INT_SPDURATION,
- TPS80031_INT_WATCHDOG,
- TPS80031_INT_BAT,
- TPS80031_INT_SIM,
- TPS80031_INT_MMC,
- TPS80031_INT_RES,
- TPS80031_INT_GPADC_RT,
- TPS80031_INT_GPADC_SW2_EOC,
- TPS80031_INT_CC_AUTOCAL,
- TPS80031_INT_ID_WKUP,
- TPS80031_INT_VBUSS_WKUP,
- TPS80031_INT_ID,
- TPS80031_INT_VBUS,
- TPS80031_INT_CHRG_CTRL,
- TPS80031_INT_EXT_CHRG,
- TPS80031_INT_INT_CHRG,
- TPS80031_INT_RES2,
- TPS80031_INT_BAT_TEMP_OVRANGE,
- TPS80031_INT_BAT_REMOVED,
- TPS80031_INT_VBUS_DET,
- TPS80031_INT_VAC_DET,
- TPS80031_INT_FAULT_WDG,
- TPS80031_INT_LINCH_GATED,
-
- /* Last interrupt id to get the end number */
- TPS80031_INT_NR,
-};
-
-/* TPS80031 Slave IDs */
-#define TPS80031_NUM_SLAVES 4
-#define TPS80031_SLAVE_ID0 0
-#define TPS80031_SLAVE_ID1 1
-#define TPS80031_SLAVE_ID2 2
-#define TPS80031_SLAVE_ID3 3
-
-/* TPS80031 I2C addresses */
-#define TPS80031_I2C_ID0_ADDR 0x12
-#define TPS80031_I2C_ID1_ADDR 0x48
-#define TPS80031_I2C_ID2_ADDR 0x49
-#define TPS80031_I2C_ID3_ADDR 0x4A
-
-enum {
- TPS80031_REGULATOR_VIO,
- TPS80031_REGULATOR_SMPS1,
- TPS80031_REGULATOR_SMPS2,
- TPS80031_REGULATOR_SMPS3,
- TPS80031_REGULATOR_SMPS4,
- TPS80031_REGULATOR_VANA,
- TPS80031_REGULATOR_LDO1,
- TPS80031_REGULATOR_LDO2,
- TPS80031_REGULATOR_LDO3,
- TPS80031_REGULATOR_LDO4,
- TPS80031_REGULATOR_LDO5,
- TPS80031_REGULATOR_LDO6,
- TPS80031_REGULATOR_LDO7,
- TPS80031_REGULATOR_LDOLN,
- TPS80031_REGULATOR_LDOUSB,
- TPS80031_REGULATOR_VBUS,
- TPS80031_REGULATOR_REGEN1,
- TPS80031_REGULATOR_REGEN2,
- TPS80031_REGULATOR_SYSEN,
- TPS80031_REGULATOR_MAX,
-};
-
-/* Different configurations for the rails */
-enum {
- /* USBLDO input selection */
- TPS80031_USBLDO_INPUT_VSYS = 0x00000001,
- TPS80031_USBLDO_INPUT_PMID = 0x00000002,
-
- /* LDO3 output mode */
- TPS80031_LDO3_OUTPUT_VIB = 0x00000004,
-
- /* VBUS configuration */
- TPS80031_VBUS_DISCHRG_EN_PDN = 0x00000004,
- TPS80031_VBUS_SW_ONLY = 0x00000008,
- TPS80031_VBUS_SW_N_ID = 0x00000010,
-};
-
-/* External controls requests */
-enum tps80031_ext_control {
- TPS80031_PWR_REQ_INPUT_NONE = 0x00000000,
- TPS80031_PWR_REQ_INPUT_PREQ1 = 0x00000001,
- TPS80031_PWR_REQ_INPUT_PREQ2 = 0x00000002,
- TPS80031_PWR_REQ_INPUT_PREQ3 = 0x00000004,
- TPS80031_PWR_OFF_ON_SLEEP = 0x00000008,
- TPS80031_PWR_ON_ON_SLEEP = 0x00000010,
-};
-
-enum tps80031_pupd_pins {
- TPS80031_PREQ1 = 0,
- TPS80031_PREQ2A,
- TPS80031_PREQ2B,
- TPS80031_PREQ2C,
- TPS80031_PREQ3,
- TPS80031_NRES_WARM,
- TPS80031_PWM_FORCE,
- TPS80031_CHRG_EXT_CHRG_STATZ,
- TPS80031_SIM,
- TPS80031_MMC,
- TPS80031_GPADC_START,
- TPS80031_DVSI2C_SCL,
- TPS80031_DVSI2C_SDA,
- TPS80031_CTLI2C_SCL,
- TPS80031_CTLI2C_SDA,
-};
-
-enum tps80031_pupd_settings {
- TPS80031_PUPD_NORMAL,
- TPS80031_PUPD_PULLDOWN,
- TPS80031_PUPD_PULLUP,
-};
-
-struct tps80031 {
- struct device *dev;
- unsigned long chip_info;
- int es_version;
- struct i2c_client *clients[TPS80031_NUM_SLAVES];
- struct regmap *regmap[TPS80031_NUM_SLAVES];
- struct regmap_irq_chip_data *irq_data;
-};
-
-struct tps80031_pupd_init_data {
- int input_pin;
- int setting;
-};
-
-/*
- * struct tps80031_regulator_platform_data - tps80031 regulator platform data.
- *
- * @reg_init_data: The regulator init data.
- * @ext_ctrl_flag: External control flag for sleep/power request control.
- * @config_flags: Configuration flag to configure the rails.
- * It should be ORed of config enums.
- */
-
-struct tps80031_regulator_platform_data {
- struct regulator_init_data *reg_init_data;
- unsigned int ext_ctrl_flag;
- unsigned int config_flags;
-};
-
-struct tps80031_platform_data {
- int irq_base;
- bool use_power_off;
- struct tps80031_pupd_init_data *pupd_init_data;
- int pupd_init_data_size;
- struct tps80031_regulator_platform_data
- *regulator_pdata[TPS80031_REGULATOR_MAX];
-};
-
-static inline int tps80031_write(struct device *dev, int sid,
- int reg, uint8_t val)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_write(tps80031->regmap[sid], reg, val);
-}
-
-static inline int tps80031_writes(struct device *dev, int sid, int reg,
- int len, uint8_t *val)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_bulk_write(tps80031->regmap[sid], reg, val, len);
-}
-
-static inline int tps80031_read(struct device *dev, int sid,
- int reg, uint8_t *val)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
- unsigned int ival;
- int ret;
-
- ret = regmap_read(tps80031->regmap[sid], reg, &ival);
- if (ret < 0) {
- dev_err(dev, "failed reading from reg 0x%02x\n", reg);
- return ret;
- }
-
- *val = ival;
- return ret;
-}
-
-static inline int tps80031_reads(struct device *dev, int sid,
- int reg, int len, uint8_t *val)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_bulk_read(tps80031->regmap[sid], reg, val, len);
-}
-
-static inline int tps80031_set_bits(struct device *dev, int sid,
- int reg, uint8_t bit_mask)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_update_bits(tps80031->regmap[sid], reg,
- bit_mask, bit_mask);
-}
-
-static inline int tps80031_clr_bits(struct device *dev, int sid,
- int reg, uint8_t bit_mask)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_update_bits(tps80031->regmap[sid], reg, bit_mask, 0);
-}
-
-static inline int tps80031_update(struct device *dev, int sid,
- int reg, uint8_t val, uint8_t mask)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_update_bits(tps80031->regmap[sid], reg, mask, val);
-}
-
-static inline unsigned long tps80031_get_chip_info(struct device *dev)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return tps80031->chip_info;
-}
-
-static inline int tps80031_get_pmu_version(struct device *dev)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return tps80031->es_version;
-}
-
-static inline int tps80031_irq_get_virq(struct device *dev, int irq)
-{
- struct tps80031 *tps80031 = dev_get_drvdata(dev);
-
- return regmap_irq_get_virq(tps80031->irq_data, irq);
-}
-
-extern int tps80031_ext_power_req_config(struct device *dev,
- unsigned long ext_ctrl_flag, int preq_bit,
- int state_reg_add, int trans_reg_add);
-#endif /*__LINUX_MFD_TPS80031_H */
diff --git a/include/linux/mfd/twl.h b/include/linux/mfd/twl.h
index 8871cc5188a0..b31e07fa4d51 100644
--- a/include/linux/mfd/twl.h
+++ b/include/linux/mfd/twl.h
@@ -69,6 +69,8 @@ enum twl6030_module_ids {
TWL6030_MODULE_GPADC,
TWL6030_MODULE_GASGAUGE,
+ /* A few extra registers before the registers shared with the 6030 */
+ TWL6032_MODULE_CHARGE,
TWL6030_MODULE_LAST,
};
@@ -203,27 +205,6 @@ int twl_get_hfclk_rate(void);
int twl6030_interrupt_unmask(u8 bit_mask, u8 offset);
int twl6030_interrupt_mask(u8 bit_mask, u8 offset);
-/* Card detect Configuration for MMC1 Controller on OMAP4 */
-#ifdef CONFIG_TWL4030_CORE
-int twl6030_mmc_card_detect_config(void);
-#else
-static inline int twl6030_mmc_card_detect_config(void)
-{
- pr_debug("twl6030_mmc_card_detect_config not supported\n");
- return 0;
-}
-#endif
-
-/* MMC1 Controller on OMAP4 uses Phoenix irq for Card detect */
-#ifdef CONFIG_TWL4030_CORE
-int twl6030_mmc_card_detect(struct device *dev, int slot);
-#else
-static inline int twl6030_mmc_card_detect(struct device *dev, int slot)
-{
- pr_debug("Call back twl6030_mmc_card_detect not supported\n");
- return -EIO;
-}
-#endif
/*----------------------------------------------------------------------*/
/*
@@ -459,6 +440,7 @@ static inline int twl6030_mmc_card_detect(struct device *dev, int slot)
#define TWL4030_PM_MASTER_GLOBAL_TST 0xb6
+#define TWL6030_PHOENIX_DEV_ON 0x06
/*----------------------------------------------------------------------*/
/* Power bus message definitions */
@@ -591,11 +573,6 @@ struct twl4030_gpio_platform_data {
*/
u32 pullups;
u32 pulldowns;
-
- int (*setup)(struct device *dev,
- unsigned gpio, unsigned ngpio);
- int (*teardown)(struct device *dev,
- unsigned gpio, unsigned ngpio);
};
struct twl4030_madc_platform_data {
@@ -694,61 +671,6 @@ struct twl4030_audio_data {
unsigned int irq_base;
};
-struct twl4030_platform_data {
- struct twl4030_clock_init_data *clock;
- struct twl4030_bci_platform_data *bci;
- struct twl4030_gpio_platform_data *gpio;
- struct twl4030_madc_platform_data *madc;
- struct twl4030_keypad_data *keypad;
- struct twl4030_usb_data *usb;
- struct twl4030_power_data *power;
- struct twl4030_audio_data *audio;
-
- /* Common LDO regulators for TWL4030/TWL6030 */
- struct regulator_init_data *vdac;
- struct regulator_init_data *vaux1;
- struct regulator_init_data *vaux2;
- struct regulator_init_data *vaux3;
- struct regulator_init_data *vdd1;
- struct regulator_init_data *vdd2;
- struct regulator_init_data *vdd3;
- /* TWL4030 LDO regulators */
- struct regulator_init_data *vpll1;
- struct regulator_init_data *vpll2;
- struct regulator_init_data *vmmc1;
- struct regulator_init_data *vmmc2;
- struct regulator_init_data *vsim;
- struct regulator_init_data *vaux4;
- struct regulator_init_data *vio;
- struct regulator_init_data *vintana1;
- struct regulator_init_data *vintana2;
- struct regulator_init_data *vintdig;
- /* TWL6030 LDO regulators */
- struct regulator_init_data *vmmc;
- struct regulator_init_data *vpp;
- struct regulator_init_data *vusim;
- struct regulator_init_data *vana;
- struct regulator_init_data *vcxio;
- struct regulator_init_data *vusb;
- struct regulator_init_data *clk32kg;
- struct regulator_init_data *v1v8;
- struct regulator_init_data *v2v1;
- /* TWL6032 LDO regulators */
- struct regulator_init_data *ldo1;
- struct regulator_init_data *ldo2;
- struct regulator_init_data *ldo3;
- struct regulator_init_data *ldo4;
- struct regulator_init_data *ldo5;
- struct regulator_init_data *ldo6;
- struct regulator_init_data *ldo7;
- struct regulator_init_data *ldoln;
- struct regulator_init_data *ldousb;
- /* TWL6032 DCDC regulators */
- struct regulator_init_data *smps3;
- struct regulator_init_data *smps4;
- struct regulator_init_data *vio6025;
-};
-
struct twl_regulator_driver_data {
int (*set_voltage)(void *data, int target_uV);
int (*get_voltage)(void *data);
diff --git a/include/linux/mfd/twl6040.h b/include/linux/mfd/twl6040.h
index 1fc7450bd8ab..286a724e379a 100644
--- a/include/linux/mfd/twl6040.h
+++ b/include/linux/mfd/twl6040.h
@@ -174,35 +174,7 @@
#define TWL6040_GPO_MAX 3
-/* TODO: All platform data struct can be removed */
-struct twl6040_codec_data {
- u16 hs_left_step;
- u16 hs_right_step;
- u16 hf_left_step;
- u16 hf_right_step;
-};
-
-struct twl6040_vibra_data {
- unsigned int vibldrv_res; /* left driver resistance */
- unsigned int vibrdrv_res; /* right driver resistance */
- unsigned int viblmotor_res; /* left motor resistance */
- unsigned int vibrmotor_res; /* right motor resistance */
- int vddvibl_uV; /* VDDVIBL volt, set 0 for fixed reg */
- int vddvibr_uV; /* VDDVIBR volt, set 0 for fixed reg */
-};
-
-struct twl6040_gpo_data {
- int gpio_base;
-};
-
-struct twl6040_platform_data {
- int audpwron_gpio; /* audio power-on gpio */
-
- struct twl6040_codec_data *codec;
- struct twl6040_vibra_data *vibra;
- struct twl6040_gpo_data *gpo;
-};
-
+struct gpio_desc;
struct regmap;
struct regmap_irq_chips_data;
@@ -218,7 +190,7 @@ struct twl6040 {
struct mfd_cell cells[TWL6040_CELLS];
struct completion ready;
- int audpwron;
+ struct gpio_desc *audpwron;
int power_count;
int rev;
diff --git a/include/linux/mfd/ucb1x00.h b/include/linux/mfd/ucb1x00.h
index 43bcf35afe27..ede237384723 100644
--- a/include/linux/mfd/ucb1x00.h
+++ b/include/linux/mfd/ucb1x00.h
@@ -10,6 +10,7 @@
#include <linux/device.h>
#include <linux/mfd/mcp.h>
#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/mutex.h>
#define UCB_IO_DATA 0x00
diff --git a/include/linux/mfd/upboard-fpga.h b/include/linux/mfd/upboard-fpga.h
new file mode 100644
index 000000000000..12231e40f5da
--- /dev/null
+++ b/include/linux/mfd/upboard-fpga.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * UP Board CPLD/FPGA driver
+ *
+ * Copyright (c) AAEON. All rights reserved.
+ * Copyright (C) 2024 Bootlin
+ *
+ * Author: Gary Wang <garywang@aaeon.com.tw>
+ * Author: Thomas Richard <thomas.richard@bootlin.com>
+ *
+ */
+
+#ifndef __LINUX_MFD_UPBOARD_FPGA_H
+#define __LINUX_MFD_UPBOARD_FPGA_H
+
+#define UPBOARD_REGISTER_SIZE 16
+
+enum upboard_fpgareg {
+ UPBOARD_REG_PLATFORM_ID = 0x10,
+ UPBOARD_REG_FIRMWARE_ID = 0x11,
+ UPBOARD_REG_FUNC_EN0 = 0x20,
+ UPBOARD_REG_FUNC_EN1 = 0x21,
+ UPBOARD_REG_GPIO_EN0 = 0x30,
+ UPBOARD_REG_GPIO_EN1 = 0x31,
+ UPBOARD_REG_GPIO_EN2 = 0x32,
+ UPBOARD_REG_GPIO_DIR0 = 0x40,
+ UPBOARD_REG_GPIO_DIR1 = 0x41,
+ UPBOARD_REG_GPIO_DIR2 = 0x42,
+ UPBOARD_REG_MAX,
+};
+
+enum upboard_fpga_type {
+ UPBOARD_UP_FPGA,
+ UPBOARD_UP2_FPGA,
+};
+
+struct upboard_fpga_data {
+ enum upboard_fpga_type type;
+ const struct regmap_config *regmap_config;
+};
+
+struct upboard_fpga {
+ struct device *dev;
+ struct regmap *regmap;
+ struct gpio_desc *enable_gpio;
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *clear_gpio;
+ struct gpio_desc *strobe_gpio;
+ struct gpio_desc *datain_gpio;
+ struct gpio_desc *dataout_gpio;
+ unsigned int firmware_version;
+ const struct upboard_fpga_data *fpga_data;
+};
+
+#endif /* __LINUX_MFD_UPBOARD_FPGA_H */
diff --git a/include/linux/mfd/wcd934x/registers.h b/include/linux/mfd/wcd934x/registers.h
index bb8d2e276668..76a943c83c63 100644
--- a/include/linux/mfd/wcd934x/registers.h
+++ b/include/linux/mfd/wcd934x/registers.h
@@ -18,6 +18,8 @@
#define WCD934X_EFUSE_SENSE_STATE_DEF 0x10
#define WCD934X_EFUSE_SENSE_EN_MASK BIT(0)
#define WCD934X_EFUSE_SENSE_ENABLE BIT(0)
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT1 0x002a
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT2 0x002b
#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT14 0x0037
#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT15 0x0038
#define WCD934X_CHIP_TIER_CTRL_EFUSE_STATUS 0x0039
@@ -103,21 +105,58 @@
#define WCD934X_ANA_AMIC3 0x0610
#define WCD934X_ANA_AMIC4 0x0611
#define WCD934X_ANA_MBHC_MECH 0x0614
+#define WCD934X_MBHC_L_DET_EN_MASK BIT(7)
+#define WCD934X_MBHC_L_DET_EN BIT(7)
+#define WCD934X_MBHC_GND_DET_EN_MASK BIT(6)
+#define WCD934X_MBHC_MECH_DETECT_TYPE_MASK BIT(5)
+#define WCD934X_MBHC_MECH_DETECT_TYPE_INS 1
+#define WCD934X_MBHC_HPHL_PLUG_TYPE_MASK BIT(4)
+#define WCD934X_MBHC_HPHL_PLUG_TYPE_NO 1
+#define WCD934X_MBHC_GND_PLUG_TYPE_MASK BIT(3)
+#define WCD934X_MBHC_GND_PLUG_TYPE_NO 1
+#define WCD934X_MBHC_HSL_PULLUP_COMP_EN BIT(2)
+#define WCD934X_MBHC_HSG_PULLUP_COMP_EN BIT(1)
+#define WCD934X_MBHC_HPHL_100K_TO_GND_EN BIT(0)
#define WCD934X_ANA_MBHC_ELECT 0x0615
+#define WCD934X_ANA_MBHC_BIAS_EN_MASK BIT(0)
+#define WCD934X_ANA_MBHC_BIAS_EN BIT(0)
#define WCD934X_ANA_MBHC_ZDET 0x0616
#define WCD934X_ANA_MBHC_RESULT_1 0x0617
#define WCD934X_ANA_MBHC_RESULT_2 0x0618
#define WCD934X_ANA_MBHC_RESULT_3 0x0619
+#define WCD934X_ANA_MBHC_BTN0 0x061a
+#define WCD934X_VTH_MASK GENMASK(7, 2)
+#define WCD934X_ANA_MBHC_BTN1 0x061b
+#define WCD934X_ANA_MBHC_BTN2 0x061c
+#define WCD934X_ANA_MBHC_BTN3 0x061d
+#define WCD934X_ANA_MBHC_BTN4 0x061e
+#define WCD934X_ANA_MBHC_BTN5 0x061f
+#define WCD934X_ANA_MBHC_BTN6 0x0620
+#define WCD934X_ANA_MBHC_BTN7 0x0621
+#define WCD934X_MBHC_BTN_VTH_MASK GENMASK(7, 2)
#define WCD934X_ANA_MICB1 0x0622
#define WCD934X_MICB_VAL_MASK GENMASK(5, 0)
#define WCD934X_ANA_MICB_EN_MASK GENMASK(7, 6)
+#define WCD934X_MICB_DISABLE 0
+#define WCD934X_MICB_ENABLE 1
+#define WCD934X_MICB_PULL_UP 2
+#define WCD934X_MICB_PULL_DOWN 3
#define WCD934X_ANA_MICB_PULL_UP 0x80
#define WCD934X_ANA_MICB_ENABLE 0x40
#define WCD934X_ANA_MICB_DISABLE 0x0
#define WCD934X_ANA_MICB2 0x0623
+#define WCD934X_ANA_MICB2_ENABLE BIT(6)
+#define WCD934X_ANA_MICB2_ENABLE_MASK GENMASK(7, 6)
+#define WCD934X_ANA_MICB2_VOUT_MASK GENMASK(5, 0)
+#define WCD934X_ANA_MICB2_RAMP 0x0624
+#define WCD934X_RAMP_EN_MASK BIT(7)
+#define WCD934X_RAMP_SHIFT_CTRL_MASK GENMASK(4, 2)
#define WCD934X_ANA_MICB3 0x0625
#define WCD934X_ANA_MICB4 0x0626
#define WCD934X_BIAS_VBG_FINE_ADJ 0x0629
+#define WCD934X_MBHC_CTL_CLK 0x0656
+#define WCD934X_MBHC_CTL_BCS 0x065a
+#define WCD934X_MBHC_STATUS_SPARE_1 0x065b
#define WCD934X_MICB1_TEST_CTL_1 0x066b
#define WCD934X_MICB1_TEST_CTL_2 0x066c
#define WCD934X_MICB2_TEST_CTL_1 0x066e
@@ -141,7 +180,11 @@
#define WCD934X_HPH_CNP_WG_CTL 0x06cc
#define WCD934X_HPH_GM3_BOOST_EN_MASK BIT(7)
#define WCD934X_HPH_GM3_BOOST_ENABLE BIT(7)
+#define WCD934X_HPH_CNP_WG_TIME 0x06cd
#define WCD934X_HPH_OCP_CTL 0x06ce
+#define WCD934X_HPH_PA_CTL2 0x06d2
+#define WCD934X_HPHPA_GND_R_MASK BIT(6)
+#define WCD934X_HPHPA_GND_L_MASK BIT(4)
#define WCD934X_HPH_L_EN 0x06d3
#define WCD934X_HPH_GAIN_SRC_SEL_MASK BIT(5)
#define WCD934X_HPH_GAIN_SRC_SEL_COMPANDER 0
@@ -152,6 +195,8 @@
#define WCD934X_HPH_OCP_DET_MASK BIT(0)
#define WCD934X_HPH_OCP_DET_ENABLE BIT(0)
#define WCD934X_HPH_OCP_DET_DISABLE 0
+#define WCD934X_HPH_R_ATEST 0x06d8
+#define WCD934X_HPHPA_GND_OVR_MASK BIT(1)
#define WCD934X_DIFF_LO_LO2_COMPANDER 0x06ea
#define WCD934X_DIFF_LO_LO1_COMPANDER 0x06eb
#define WCD934X_CLK_SYS_MCLK_PRG 0x0711
@@ -172,7 +217,19 @@
#define WCD934X_SIDO_NEW_VOUT_D_FREQ2 0x071e
#define WCD934X_SIDO_RIPPLE_FREQ_EN_MASK BIT(0)
#define WCD934X_SIDO_RIPPLE_FREQ_ENABLE BIT(0)
+#define WCD934X_MBHC_NEW_CTL_1 0x0720
+#define WCD934X_MBHC_CTL_RCO_EN_MASK BIT(7)
+#define WCD935X_MBHC_CTL_RCO_EN BIT(7)
#define WCD934X_MBHC_NEW_CTL_2 0x0721
+#define WCD934X_M_RTH_CTL_MASK GENMASK(3, 2)
+#define WCD934X_MBHC_NEW_PLUG_DETECT_CTL 0x0722
+#define WCD934X_HSDET_PULLUP_C_MASK GENMASK(7, 6)
+#define WCD934X_MBHC_NEW_ZDET_ANA_CTL 0x0723
+#define WCD934X_ZDET_RANGE_CTL_MASK GENMASK(3, 0)
+#define WCD934X_ZDET_MAXV_CTL_MASK GENMASK(6, 4)
+#define WCD934X_MBHC_NEW_ZDET_RAMP_CTL 0x0724
+#define WCD934X_MBHC_NEW_FSM_STATUS 0x0725
+#define WCD934X_MBHC_NEW_ADC_RESULT 0x0726
#define WCD934X_TX_NEW_AMIC_4_5_SEL 0x0727
#define WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_L 0x0733
#define WCD934X_HPH_NEW_INT_RDAC_OVERRIDE_CTL 0x0735
diff --git a/include/linux/mfd/wl1273-core.h b/include/linux/mfd/wl1273-core.h
deleted file mode 100644
index c28cf76d5c31..000000000000
--- a/include/linux/mfd/wl1273-core.h
+++ /dev/null
@@ -1,277 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * include/linux/mfd/wl1273-core.h
- *
- * Some definitions for the wl1273 radio receiver/transmitter chip.
- *
- * Copyright (C) 2010 Nokia Corporation
- * Author: Matti J. Aaltonen <matti.j.aaltonen@nokia.com>
- */
-
-#ifndef WL1273_CORE_H
-#define WL1273_CORE_H
-
-#include <linux/i2c.h>
-#include <linux/mfd/core.h>
-
-#define WL1273_FM_DRIVER_NAME "wl1273-fm"
-#define RX71_FM_I2C_ADDR 0x22
-
-#define WL1273_STEREO_GET 0
-#define WL1273_RSSI_LVL_GET 1
-#define WL1273_IF_COUNT_GET 2
-#define WL1273_FLAG_GET 3
-#define WL1273_RDS_SYNC_GET 4
-#define WL1273_RDS_DATA_GET 5
-#define WL1273_FREQ_SET 10
-#define WL1273_AF_FREQ_SET 11
-#define WL1273_MOST_MODE_SET 12
-#define WL1273_MOST_BLEND_SET 13
-#define WL1273_DEMPH_MODE_SET 14
-#define WL1273_SEARCH_LVL_SET 15
-#define WL1273_BAND_SET 16
-#define WL1273_MUTE_STATUS_SET 17
-#define WL1273_RDS_PAUSE_LVL_SET 18
-#define WL1273_RDS_PAUSE_DUR_SET 19
-#define WL1273_RDS_MEM_SET 20
-#define WL1273_RDS_BLK_B_SET 21
-#define WL1273_RDS_MSK_B_SET 22
-#define WL1273_RDS_PI_MASK_SET 23
-#define WL1273_RDS_PI_SET 24
-#define WL1273_RDS_SYSTEM_SET 25
-#define WL1273_INT_MASK_SET 26
-#define WL1273_SEARCH_DIR_SET 27
-#define WL1273_VOLUME_SET 28
-#define WL1273_AUDIO_ENABLE 29
-#define WL1273_PCM_MODE_SET 30
-#define WL1273_I2S_MODE_CONFIG_SET 31
-#define WL1273_POWER_SET 32
-#define WL1273_INTX_CONFIG_SET 33
-#define WL1273_PULL_EN_SET 34
-#define WL1273_HILO_SET 35
-#define WL1273_SWITCH2FREF 36
-#define WL1273_FREQ_DRIFT_REPORT 37
-
-#define WL1273_PCE_GET 40
-#define WL1273_FIRM_VER_GET 41
-#define WL1273_ASIC_VER_GET 42
-#define WL1273_ASIC_ID_GET 43
-#define WL1273_MAN_ID_GET 44
-#define WL1273_TUNER_MODE_SET 45
-#define WL1273_STOP_SEARCH 46
-#define WL1273_RDS_CNTRL_SET 47
-
-#define WL1273_WRITE_HARDWARE_REG 100
-#define WL1273_CODE_DOWNLOAD 101
-#define WL1273_RESET 102
-
-#define WL1273_FM_POWER_MODE 254
-#define WL1273_FM_INTERRUPT 255
-
-/* Transmitter API */
-
-#define WL1273_CHANL_SET 55
-#define WL1273_SCAN_SPACING_SET 56
-#define WL1273_REF_SET 57
-#define WL1273_POWER_ENB_SET 90
-#define WL1273_POWER_ATT_SET 58
-#define WL1273_POWER_LEV_SET 59
-#define WL1273_AUDIO_DEV_SET 60
-#define WL1273_PILOT_DEV_SET 61
-#define WL1273_RDS_DEV_SET 62
-#define WL1273_PUPD_SET 91
-#define WL1273_AUDIO_IO_SET 63
-#define WL1273_PREMPH_SET 64
-#define WL1273_MONO_SET 66
-#define WL1273_MUTE 92
-#define WL1273_MPX_LMT_ENABLE 67
-#define WL1273_PI_SET 93
-#define WL1273_ECC_SET 69
-#define WL1273_PTY 70
-#define WL1273_AF 71
-#define WL1273_DISPLAY_MODE 74
-#define WL1273_RDS_REP_SET 77
-#define WL1273_RDS_CONFIG_DATA_SET 98
-#define WL1273_RDS_DATA_SET 99
-#define WL1273_RDS_DATA_ENB 94
-#define WL1273_TA_SET 78
-#define WL1273_TP_SET 79
-#define WL1273_DI_SET 80
-#define WL1273_MS_SET 81
-#define WL1273_PS_SCROLL_SPEED 82
-#define WL1273_TX_AUDIO_LEVEL_TEST 96
-#define WL1273_TX_AUDIO_LEVEL_TEST_THRESHOLD 73
-#define WL1273_TX_AUDIO_INPUT_LEVEL_RANGE_SET 54
-#define WL1273_RX_ANTENNA_SELECT 87
-#define WL1273_I2C_DEV_ADDR_SET 86
-#define WL1273_REF_ERR_CALIB_PARAM_SET 88
-#define WL1273_REF_ERR_CALIB_PERIODICITY_SET 89
-#define WL1273_SOC_INT_TRIGGER 52
-#define WL1273_SOC_AUDIO_PATH_SET 83
-#define WL1273_SOC_PCMI_OVERRIDE 84
-#define WL1273_SOC_I2S_OVERRIDE 85
-#define WL1273_RSSI_BLOCK_SCAN_FREQ_SET 95
-#define WL1273_RSSI_BLOCK_SCAN_START 97
-#define WL1273_RSSI_BLOCK_SCAN_DATA_GET 5
-#define WL1273_READ_FMANT_TUNE_VALUE 104
-
-#define WL1273_RDS_OFF 0
-#define WL1273_RDS_ON 1
-#define WL1273_RDS_RESET 2
-
-#define WL1273_AUDIO_DIGITAL 0
-#define WL1273_AUDIO_ANALOG 1
-
-#define WL1273_MODE_RX BIT(0)
-#define WL1273_MODE_TX BIT(1)
-#define WL1273_MODE_OFF BIT(2)
-#define WL1273_MODE_SUSPENDED BIT(3)
-
-#define WL1273_RADIO_CHILD BIT(0)
-#define WL1273_CODEC_CHILD BIT(1)
-
-#define WL1273_RX_MONO 1
-#define WL1273_RX_STEREO 0
-#define WL1273_TX_MONO 0
-#define WL1273_TX_STEREO 1
-
-#define WL1273_MAX_VOLUME 0xffff
-#define WL1273_DEFAULT_VOLUME 0x78b8
-
-/* I2S protocol, left channel first, data width 16 bits */
-#define WL1273_PCM_DEF_MODE 0x00
-
-/* Rx */
-#define WL1273_AUDIO_ENABLE_I2S BIT(0)
-#define WL1273_AUDIO_ENABLE_ANALOG BIT(1)
-
-/* Tx */
-#define WL1273_AUDIO_IO_SET_ANALOG 0
-#define WL1273_AUDIO_IO_SET_I2S 1
-
-#define WL1273_PUPD_SET_OFF 0x00
-#define WL1273_PUPD_SET_ON 0x01
-#define WL1273_PUPD_SET_RETENTION 0x10
-
-/* I2S mode */
-#define WL1273_IS2_WIDTH_32 0x0
-#define WL1273_IS2_WIDTH_40 0x1
-#define WL1273_IS2_WIDTH_22_23 0x2
-#define WL1273_IS2_WIDTH_23_22 0x3
-#define WL1273_IS2_WIDTH_48 0x4
-#define WL1273_IS2_WIDTH_50 0x5
-#define WL1273_IS2_WIDTH_60 0x6
-#define WL1273_IS2_WIDTH_64 0x7
-#define WL1273_IS2_WIDTH_80 0x8
-#define WL1273_IS2_WIDTH_96 0x9
-#define WL1273_IS2_WIDTH_128 0xa
-#define WL1273_IS2_WIDTH 0xf
-
-#define WL1273_IS2_FORMAT_STD (0x0 << 4)
-#define WL1273_IS2_FORMAT_LEFT (0x1 << 4)
-#define WL1273_IS2_FORMAT_RIGHT (0x2 << 4)
-#define WL1273_IS2_FORMAT_USER (0x3 << 4)
-
-#define WL1273_IS2_MASTER (0x0 << 6)
-#define WL1273_IS2_SLAVEW (0x1 << 6)
-
-#define WL1273_IS2_TRI_AFTER_SENDING (0x0 << 7)
-#define WL1273_IS2_TRI_ALWAYS_ACTIVE (0x1 << 7)
-
-#define WL1273_IS2_SDOWS_RR (0x0 << 8)
-#define WL1273_IS2_SDOWS_RF (0x1 << 8)
-#define WL1273_IS2_SDOWS_FR (0x2 << 8)
-#define WL1273_IS2_SDOWS_FF (0x3 << 8)
-
-#define WL1273_IS2_TRI_OPT (0x0 << 10)
-#define WL1273_IS2_TRI_ALWAYS (0x1 << 10)
-
-#define WL1273_IS2_RATE_48K (0x0 << 12)
-#define WL1273_IS2_RATE_44_1K (0x1 << 12)
-#define WL1273_IS2_RATE_32K (0x2 << 12)
-#define WL1273_IS2_RATE_22_05K (0x4 << 12)
-#define WL1273_IS2_RATE_16K (0x5 << 12)
-#define WL1273_IS2_RATE_12K (0x8 << 12)
-#define WL1273_IS2_RATE_11_025 (0x9 << 12)
-#define WL1273_IS2_RATE_8K (0xa << 12)
-#define WL1273_IS2_RATE (0xf << 12)
-
-#define WL1273_I2S_DEF_MODE (WL1273_IS2_WIDTH_32 | \
- WL1273_IS2_FORMAT_STD | \
- WL1273_IS2_MASTER | \
- WL1273_IS2_TRI_AFTER_SENDING | \
- WL1273_IS2_SDOWS_RR | \
- WL1273_IS2_TRI_OPT | \
- WL1273_IS2_RATE_48K)
-
-#define SCHAR_MIN (-128)
-#define SCHAR_MAX 127
-
-#define WL1273_FR_EVENT BIT(0)
-#define WL1273_BL_EVENT BIT(1)
-#define WL1273_RDS_EVENT BIT(2)
-#define WL1273_BBLK_EVENT BIT(3)
-#define WL1273_LSYNC_EVENT BIT(4)
-#define WL1273_LEV_EVENT BIT(5)
-#define WL1273_IFFR_EVENT BIT(6)
-#define WL1273_PI_EVENT BIT(7)
-#define WL1273_PD_EVENT BIT(8)
-#define WL1273_STIC_EVENT BIT(9)
-#define WL1273_MAL_EVENT BIT(10)
-#define WL1273_POW_ENB_EVENT BIT(11)
-#define WL1273_SCAN_OVER_EVENT BIT(12)
-#define WL1273_ERROR_EVENT BIT(13)
-
-#define TUNER_MODE_STOP_SEARCH 0
-#define TUNER_MODE_PRESET 1
-#define TUNER_MODE_AUTO_SEEK 2
-#define TUNER_MODE_AF 3
-#define TUNER_MODE_AUTO_SEEK_PI 4
-#define TUNER_MODE_AUTO_SEEK_BULK 5
-
-#define RDS_BLOCK_SIZE 3
-
-struct wl1273_fm_platform_data {
- int (*request_resources) (struct i2c_client *client);
- void (*free_resources) (void);
- void (*enable) (void);
- void (*disable) (void);
-
- u8 forbidden_modes;
- unsigned int children;
-};
-
-#define WL1273_FM_CORE_CELLS 2
-
-#define WL1273_BAND_OTHER 0
-#define WL1273_BAND_JAPAN 1
-
-#define WL1273_BAND_JAPAN_LOW 76000
-#define WL1273_BAND_JAPAN_HIGH 90000
-#define WL1273_BAND_OTHER_LOW 87500
-#define WL1273_BAND_OTHER_HIGH 108000
-
-#define WL1273_BAND_TX_LOW 76000
-#define WL1273_BAND_TX_HIGH 108000
-
-struct wl1273_core {
- struct mfd_cell cells[WL1273_FM_CORE_CELLS];
- struct wl1273_fm_platform_data *pdata;
-
- unsigned int mode;
- unsigned int i2s_mode;
- unsigned int volume;
- unsigned int audio_mode;
- unsigned int channel_number;
- struct mutex lock; /* for serializing fm radio operations */
-
- struct i2c_client *client;
-
- int (*read)(struct wl1273_core *core, u8, u16 *);
- int (*write)(struct wl1273_core *core, u8, u16);
- int (*write_data)(struct wl1273_core *core, u8 *, u16);
- int (*set_audio)(struct wl1273_core *core, unsigned int);
- int (*set_volume)(struct wl1273_core *core, unsigned int);
-};
-
-#endif /* ifndef WL1273_CORE_H */
diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h
index a3241e4d7548..5f70d3b5d1b1 100644
--- a/include/linux/mfd/wm8350/core.h
+++ b/include/linux/mfd/wm8350/core.h
@@ -8,11 +8,12 @@
#ifndef __LINUX_MFD_WM8350_CORE_H_
#define __LINUX_MFD_WM8350_CORE_H_
-#include <linux/kernel.h>
-#include <linux/mutex.h>
-#include <linux/interrupt.h>
#include <linux/completion.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
#include <linux/regmap.h>
+#include <linux/types.h>
#include <linux/mfd/wm8350/audio.h>
#include <linux/mfd/wm8350/gpio.h>
@@ -21,6 +22,9 @@
#include <linux/mfd/wm8350/supply.h>
#include <linux/mfd/wm8350/wdt.h>
+struct device;
+struct platform_device;
+
/*
* Register values.
*/
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index 944aa3aa3035..dd372b0123a6 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -266,6 +266,7 @@ struct mhi_event_config {
* struct mhi_controller_config - Root MHI controller configuration
* @max_channels: Maximum number of channels supported
* @timeout_ms: Timeout value for operations. 0 means use default
+ * @ready_timeout_ms: Timeout value for waiting device to be ready (optional)
* @buf_len: Size of automatically allocated buffers. 0 means use default
* @num_channels: Number of channels defined in @ch_cfg
* @ch_cfg: Array of defined channels
@@ -277,6 +278,7 @@ struct mhi_event_config {
struct mhi_controller_config {
u32 max_channels;
u32 timeout_ms;
+ u32 ready_timeout_ms;
u32 buf_len;
u32 num_channels;
const struct mhi_channel_config *ch_cfg;
@@ -288,6 +290,7 @@ struct mhi_controller_config {
/**
* struct mhi_controller - Master MHI controller structure
+ * @name: Device name of the MHI controller
* @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI
* controller (required)
* @mhi_dev: MHI device instance for the controller
@@ -299,10 +302,15 @@ struct mhi_controller_config {
* @iova_start: IOMMU starting address for data (required)
* @iova_stop: IOMMU stop address for data (required)
* @fw_image: Firmware image name for normal booting (optional)
+ * @fw_data: Firmware image data content for normal booting, used only
+ * if fw_image is NULL and fbc_download is true (optional)
+ * @fw_sz: Firmware image data size for normal booting, used only if fw_image
+ * is NULL and fbc_download is true (optional)
* @edl_image: Firmware image name for emergency download mode (optional)
* @rddm_size: RAM dump size that host should allocate for debugging purpose
* @sbl_size: SBL image size downloaded through BHIe (optional)
* @seg_len: BHIe vector size (optional)
+ * @reg_len: Length of the MHI MMIO region (required)
* @fbc_image: Points to firmware image buffer
* @rddm_image: Points to RAM dump buffer
* @mhi_chan: Points to the channel configuration table
@@ -313,18 +321,14 @@ struct mhi_controller_config {
* @hw_ev_rings: Number of hardware event rings
* @sw_ev_rings: Number of software event rings
* @nr_irqs: Number of IRQ allocated by bus master (required)
- * @family_number: MHI controller family number
- * @device_number: MHI controller device number
- * @major_version: MHI controller major revision number
- * @minor_version: MHI controller minor revision number
* @serial_number: MHI controller serial number obtained from BHI
- * @oem_pk_hash: MHI controller OEM PK Hash obtained from BHI
* @mhi_event: MHI event ring configurations table
* @mhi_cmd: MHI command ring configurations table
* @mhi_ctxt: MHI device context, shared memory between host and device
* @pm_mutex: Mutex for suspend/resume operation
* @pm_lock: Lock for protecting MHI power management state
* @timeout_ms: Timeout in ms for state transitions
+ * @ready_timeout_ms: Timeout in ms for waiting device to be ready (optional)
* @pm_state: MHI power management state
* @db_access: DB access states
* @ee: MHI device execution environment
@@ -350,27 +354,21 @@ struct mhi_controller_config {
* @read_reg: Read a MHI register via the physical link (required)
* @write_reg: Write a MHI register via the physical link (required)
* @reset: Controller specific reset function (optional)
+ * @edl_trigger: CB function to trigger EDL mode (optional)
* @buffer_len: Bounce buffer length
* @index: Index of the MHI controller instance
* @bounce_buf: Use of bounce buffer
* @fbc_download: MHI host needs to do complete image transfer (optional)
* @wake_set: Device wakeup set flag
* @irq_flags: irq flags passed to request_irq (optional)
+ * @mru: the default MRU for the MHI device
*
* Fields marked as (required) need to be populated by the controller driver
* before calling mhi_register_controller(). For the fields marked as (optional)
* they can be populated depending on the usecase.
- *
- * The following fields are present for the purpose of implementing any device
- * specific quirks or customizations for specific MHI revisions used in device
- * by the controller drivers. The MHI stack will just populate these fields
- * during mhi_register_controller():
- * family_number
- * device_number
- * major_version
- * minor_version
*/
struct mhi_controller {
+ const char *name;
struct device *cntrl_dev;
struct mhi_device *mhi_dev;
struct dentry *debugfs_dentry;
@@ -382,10 +380,13 @@ struct mhi_controller {
dma_addr_t iova_start;
dma_addr_t iova_stop;
const char *fw_image;
+ const u8 *fw_data;
+ size_t fw_sz;
const char *edl_image;
size_t rddm_size;
size_t sbl_size;
size_t seg_len;
+ size_t reg_len;
struct image_info *fbc_image;
struct image_info *rddm_image;
struct mhi_chan *mhi_chan;
@@ -396,12 +397,7 @@ struct mhi_controller {
u32 hw_ev_rings;
u32 sw_ev_rings;
u32 nr_irqs;
- u32 family_number;
- u32 device_number;
- u32 major_version;
- u32 minor_version;
u32 serial_number;
- u32 oem_pk_hash[MHI_MAX_OEM_PK_HASH_SEGMENTS];
struct mhi_event *mhi_event;
struct mhi_cmd *mhi_cmd;
@@ -410,6 +406,7 @@ struct mhi_controller {
struct mutex pm_mutex;
rwlock_t pm_lock;
u32 timeout_ms;
+ u32 ready_timeout_ms;
u32 pm_state;
u32 db_access;
enum mhi_ee_type ee;
@@ -441,6 +438,7 @@ struct mhi_controller {
void (*write_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr,
u32 val);
void (*reset)(struct mhi_controller *mhi_cntrl);
+ int (*edl_trigger)(struct mhi_controller *mhi_cntrl);
size_t buffer_len;
int index;
@@ -448,6 +446,7 @@ struct mhi_controller {
bool fbc_download;
bool wake_set;
unsigned long irq_flags;
+ u32 mru;
};
/**
@@ -529,7 +528,7 @@ struct mhi_driver {
struct device_driver driver;
};
-#define to_mhi_driver(drv) container_of(drv, struct mhi_driver, driver)
+#define to_mhi_driver(drv) container_of_const(drv, struct mhi_driver, driver)
#define to_mhi_device(dev) container_of(dev, struct mhi_device, dev)
/**
@@ -635,13 +634,29 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl);
int mhi_sync_power_up(struct mhi_controller *mhi_cntrl);
/**
- * mhi_power_down - Start MHI power down sequence
+ * mhi_power_down - Power down the MHI device and also destroy the
+ * 'struct device' for the channels associated with it.
+ * See also mhi_power_down_keep_dev() which is a variant
+ * of this API that keeps the 'struct device' for channels
+ * (useful during suspend/hibernation).
* @mhi_cntrl: MHI controller
* @graceful: Link is still accessible, so do a graceful shutdown process
*/
void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful);
/**
+ * mhi_power_down_keep_dev - Power down the MHI device but keep the 'struct
+ * device' for the channels associated with it.
+ * This is a variant of 'mhi_power_down()' and
+ * useful in scenarios such as suspend/hibernation
+ * where destroying of the 'struct device' is not
+ * needed.
+ * @mhi_cntrl: MHI controller
+ * @graceful: Link is still accessible, so do a graceful shutdown process
+ */
+void mhi_power_down_keep_dev(struct mhi_controller *mhi_cntrl, bool graceful);
+
+/**
* mhi_unprepare_after_power_down - Free any allocated memory after power down
* @mhi_cntrl: MHI controller
*/
@@ -660,6 +675,19 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl);
int mhi_pm_resume(struct mhi_controller *mhi_cntrl);
/**
+ * mhi_pm_resume_force - Force resume MHI from suspended state
+ * @mhi_cntrl: MHI controller
+ *
+ * Resume the device irrespective of its MHI state. As per the MHI spec, devices
+ * has to be in M3 state during resume. But some devices seem to be in a
+ * different MHI state other than M3 but they continue working fine if allowed.
+ * This API is intented to be used for such devices.
+ *
+ * Return: 0 if the resume succeeds, a negative error code otherwise
+ */
+int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl);
+
+/**
* mhi_download_rddm_image - Download ramdump image from device for
* debugging purpose.
* @mhi_cntrl: MHI controller
@@ -693,12 +721,6 @@ enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl);
void mhi_soc_reset(struct mhi_controller *mhi_cntrl);
/**
- * mhi_device_get - Disable device low power mode
- * @mhi_dev: Device associated with the channel
- */
-void mhi_device_get(struct mhi_device *mhi_dev);
-
-/**
* mhi_device_get_sync - Disable device low power mode. Synchronously
* take the controller out of suspended state
* @mhi_dev: Device associated with the channel
@@ -713,16 +735,27 @@ void mhi_device_put(struct mhi_device *mhi_dev);
/**
* mhi_prepare_for_transfer - Setup UL and DL channels for data transfer.
- * Allocate and initialize the channel context and
- * also issue the START channel command to both
- * channels. Channels can be started only if both
- * host and device execution environments match and
- * channels are in a DISABLED state.
* @mhi_dev: Device associated with the channels
+ *
+ * Allocate and initialize the channel context and also issue the START channel
+ * command to both channels. Channels can be started only if both host and
+ * device execution environments match and channels are in a DISABLED state.
*/
int mhi_prepare_for_transfer(struct mhi_device *mhi_dev);
/**
+ * mhi_prepare_for_transfer_autoqueue - Setup UL and DL channels with auto queue
+ * buffers for DL traffic
+ * @mhi_dev: Device associated with the channels
+ *
+ * Allocate and initialize the channel context and also issue the START channel
+ * command to both channels. Channels can be started only if both host and
+ * device execution environments match and channels are in a DISABLED state.
+ * The MHI core will automatically allocate and queue buffers for the DL traffic.
+ */
+int mhi_prepare_for_transfer_autoqueue(struct mhi_device *mhi_dev);
+
+/**
* mhi_unprepare_from_transfer - Reset UL and DL channels for data transfer.
* Issue the RESET channel command and let the
* device clean-up the context so no incoming
@@ -738,25 +771,6 @@ int mhi_prepare_for_transfer(struct mhi_device *mhi_dev);
void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev);
/**
- * mhi_poll - Poll for any available data in DL direction
- * @mhi_dev: Device associated with the channels
- * @budget: # of events to process
- */
-int mhi_poll(struct mhi_device *mhi_dev, u32 budget);
-
-/**
- * mhi_queue_dma - Send or receive DMA mapped buffers from client device
- * over MHI channel
- * @mhi_dev: Device associated with the channels
- * @dir: DMA direction for the channel
- * @mhi_buf: Buffer for holding the DMA mapped data
- * @len: Buffer length
- * @mflags: MHI transfer flags used for the transfer
- */
-int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
- struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags);
-
-/**
* mhi_queue_buf - Send or receive raw buffers from client device over MHI
* channel
* @mhi_dev: Device associated with the channels
@@ -786,4 +800,13 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
*/
bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir);
+/**
+ * mhi_get_channel_doorbell_offset - Get the channel doorbell offset
+ * @mhi_cntrl: MHI controller
+ * @chdb_offset: Read channel doorbell offset
+ *
+ * Return: 0 if the read succeeds, a negative error code otherwise
+ */
+int mhi_get_channel_doorbell_offset(struct mhi_controller *mhi_cntrl, u32 *chdb_offset);
+
#endif /* _MHI_H_ */
diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h
new file mode 100644
index 000000000000..7b40fc8cbe77
--- /dev/null
+++ b/include/linux/mhi_ep.h
@@ -0,0 +1,305 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022, Linaro Ltd.
+ *
+ */
+#ifndef _MHI_EP_H_
+#define _MHI_EP_H_
+
+#include <linux/dma-direction.h>
+#include <linux/mhi.h>
+
+#define MHI_EP_DEFAULT_MTU 0x8000
+
+/**
+ * struct mhi_ep_channel_config - Channel configuration structure for controller
+ * @name: The name of this channel
+ * @num: The number assigned to this channel
+ * @num_elements: The number of elements that can be queued to this channel
+ * @dir: Direction that data may flow on this channel
+ */
+struct mhi_ep_channel_config {
+ char *name;
+ u32 num;
+ u32 num_elements;
+ enum dma_data_direction dir;
+};
+
+/**
+ * struct mhi_ep_cntrl_config - MHI Endpoint controller configuration
+ * @mhi_version: MHI spec version supported by the controller
+ * @max_channels: Maximum number of channels supported
+ * @num_channels: Number of channels defined in @ch_cfg
+ * @ch_cfg: Array of defined channels
+ */
+struct mhi_ep_cntrl_config {
+ u32 mhi_version;
+ u32 max_channels;
+ u32 num_channels;
+ const struct mhi_ep_channel_config *ch_cfg;
+};
+
+/**
+ * struct mhi_ep_db_info - MHI Endpoint doorbell info
+ * @mask: Mask of the doorbell interrupt
+ * @status: Status of the doorbell interrupt
+ */
+struct mhi_ep_db_info {
+ u32 mask;
+ u32 status;
+};
+
+/**
+ * struct mhi_ep_buf_info - MHI Endpoint transfer buffer info
+ * @mhi_dev: MHI device associated with this buffer
+ * @dev_addr: Address of the buffer in endpoint
+ * @host_addr: Address of the bufffer in host
+ * @size: Size of the buffer
+ * @code: Transfer completion code
+ * @cb: Callback to be executed by controller drivers after transfer completion (async)
+ * @cb_buf: Opaque buffer to be passed to the callback
+ */
+struct mhi_ep_buf_info {
+ struct mhi_ep_device *mhi_dev;
+ void *dev_addr;
+ u64 host_addr;
+ size_t size;
+ int code;
+
+ void (*cb)(struct mhi_ep_buf_info *buf_info);
+ void *cb_buf;
+};
+
+/**
+ * struct mhi_ep_cntrl - MHI Endpoint controller structure
+ * @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI
+ * Endpoint controller
+ * @mhi_dev: MHI Endpoint device instance for the controller
+ * @mmio: MMIO region containing the MHI registers
+ * @mhi_chan: Points to the channel configuration table
+ * @mhi_event: Points to the event ring configurations table
+ * @mhi_cmd: Points to the command ring configurations table
+ * @sm: MHI Endpoint state machine
+ * @ch_ctx_cache: Cache of host channel context data structure
+ * @ev_ctx_cache: Cache of host event context data structure
+ * @cmd_ctx_cache: Cache of host command context data structure
+ * @ch_ctx_host_pa: Physical address of host channel context data structure
+ * @ev_ctx_host_pa: Physical address of host event context data structure
+ * @cmd_ctx_host_pa: Physical address of host command context data structure
+ * @ch_ctx_cache_phys: Physical address of the host channel context cache
+ * @ev_ctx_cache_phys: Physical address of the host event context cache
+ * @cmd_ctx_cache_phys: Physical address of the host command context cache
+ * @chdb: Array of channel doorbell interrupt info
+ * @event_lock: Lock for protecting event rings
+ * @state_lock: Lock for protecting state transitions
+ * @list_lock: Lock for protecting state transition and channel doorbell lists
+ * @st_transition_list: List of state transitions
+ * @ch_db_list: List of queued channel doorbells
+ * @wq: Dedicated workqueue for handling rings and state changes
+ * @state_work: State transition worker
+ * @reset_work: Worker for MHI Endpoint reset
+ * @cmd_ring_work: Worker for processing command rings
+ * @ch_ring_work: Worker for processing channel rings
+ * @raise_irq: CB function for raising IRQ to the host
+ * @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it
+ * @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context
+ * @read_sync: CB function for reading from host memory synchronously
+ * @write_sync: CB function for writing to host memory synchronously
+ * @read_async: CB function for reading from host memory asynchronously
+ * @write_async: CB function for writing to host memory asynchronously
+ * @mhi_state: MHI Endpoint state
+ * @max_chan: Maximum channels supported by the endpoint controller
+ * @mru: MRU (Maximum Receive Unit) value of the endpoint controller
+ * @event_rings: Number of event rings supported by the endpoint controller
+ * @hw_event_rings: Number of hardware event rings supported by the endpoint controller
+ * @chdb_offset: Channel doorbell offset set by the host
+ * @erdb_offset: Event ring doorbell offset set by the host
+ * @index: MHI Endpoint controller index
+ * @irq: IRQ used by the endpoint controller
+ * @enabled: Check if the endpoint controller is enabled or not
+ */
+struct mhi_ep_cntrl {
+ struct device *cntrl_dev;
+ struct mhi_ep_device *mhi_dev;
+ void __iomem *mmio;
+
+ struct mhi_ep_chan *mhi_chan;
+ struct mhi_ep_event *mhi_event;
+ struct mhi_ep_cmd *mhi_cmd;
+ struct mhi_ep_sm *sm;
+
+ struct mhi_chan_ctxt *ch_ctx_cache;
+ struct mhi_event_ctxt *ev_ctx_cache;
+ struct mhi_cmd_ctxt *cmd_ctx_cache;
+ u64 ch_ctx_host_pa;
+ u64 ev_ctx_host_pa;
+ u64 cmd_ctx_host_pa;
+ phys_addr_t ch_ctx_cache_phys;
+ phys_addr_t ev_ctx_cache_phys;
+ phys_addr_t cmd_ctx_cache_phys;
+
+ struct mhi_ep_db_info chdb[4];
+ struct mutex event_lock;
+ struct mutex state_lock;
+ spinlock_t list_lock;
+
+ struct list_head st_transition_list;
+ struct list_head ch_db_list;
+
+ struct workqueue_struct *wq;
+ struct work_struct state_work;
+ struct work_struct reset_work;
+ struct work_struct cmd_ring_work;
+ struct work_struct ch_ring_work;
+ struct kmem_cache *ring_item_cache;
+ struct kmem_cache *ev_ring_el_cache;
+ struct kmem_cache *tre_buf_cache;
+
+ void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector);
+ int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr,
+ void __iomem **virt, size_t size);
+ void (*unmap_free)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t phys,
+ void __iomem *virt, size_t size);
+ int (*read_sync)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
+ int (*write_sync)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
+ int (*read_async)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
+ int (*write_async)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info);
+
+ enum mhi_state mhi_state;
+
+ u32 max_chan;
+ u32 mru;
+ u32 event_rings;
+ u32 hw_event_rings;
+ u32 chdb_offset;
+ u32 erdb_offset;
+ u32 index;
+ int irq;
+ bool enabled;
+};
+
+/**
+ * struct mhi_ep_device - Structure representing an MHI Endpoint device that binds
+ * to channels or is associated with controllers
+ * @dev: Driver model device node for the MHI Endpoint device
+ * @mhi_cntrl: Controller the device belongs to
+ * @id: Pointer to MHI Endpoint device ID struct
+ * @name: Name of the associated MHI Endpoint device
+ * @ul_chan: UL (from host to endpoint) channel for the device
+ * @dl_chan: DL (from endpoint to host) channel for the device
+ * @dev_type: MHI device type
+ */
+struct mhi_ep_device {
+ struct device dev;
+ struct mhi_ep_cntrl *mhi_cntrl;
+ const struct mhi_device_id *id;
+ const char *name;
+ struct mhi_ep_chan *ul_chan;
+ struct mhi_ep_chan *dl_chan;
+ enum mhi_device_type dev_type;
+};
+
+/**
+ * struct mhi_ep_driver - Structure representing a MHI Endpoint client driver
+ * @id_table: Pointer to MHI Endpoint device ID table
+ * @driver: Device driver model driver
+ * @probe: CB function for client driver probe function
+ * @remove: CB function for client driver remove function
+ * @ul_xfer_cb: CB function for UL (from host to endpoint) data transfer
+ * @dl_xfer_cb: CB function for DL (from endpoint to host) data transfer
+ */
+struct mhi_ep_driver {
+ const struct mhi_device_id *id_table;
+ struct device_driver driver;
+ int (*probe)(struct mhi_ep_device *mhi_ep,
+ const struct mhi_device_id *id);
+ void (*remove)(struct mhi_ep_device *mhi_ep);
+ void (*ul_xfer_cb)(struct mhi_ep_device *mhi_dev,
+ struct mhi_result *result);
+ void (*dl_xfer_cb)(struct mhi_ep_device *mhi_dev,
+ struct mhi_result *result);
+};
+
+#define to_mhi_ep_device(dev) container_of(dev, struct mhi_ep_device, dev)
+#define to_mhi_ep_driver(drv) container_of_const(drv, struct mhi_ep_driver, driver)
+
+/*
+ * module_mhi_ep_driver() - Helper macro for drivers that don't do
+ * anything special other than using default mhi_ep_driver_register() and
+ * mhi_ep_driver_unregister(). This eliminates a lot of boilerplate.
+ * Each module may only use this macro once.
+ */
+#define module_mhi_ep_driver(mhi_drv) \
+ module_driver(mhi_drv, mhi_ep_driver_register, \
+ mhi_ep_driver_unregister)
+
+/*
+ * Macro to avoid include chaining to get THIS_MODULE
+ */
+#define mhi_ep_driver_register(mhi_drv) \
+ __mhi_ep_driver_register(mhi_drv, THIS_MODULE)
+
+/**
+ * __mhi_ep_driver_register - Register a driver with MHI Endpoint bus
+ * @mhi_drv: Driver to be associated with the device
+ * @owner: The module owner
+ *
+ * Return: 0 if driver registrations succeeds, a negative error code otherwise.
+ */
+int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner);
+
+/**
+ * mhi_ep_driver_unregister - Unregister a driver from MHI Endpoint bus
+ * @mhi_drv: Driver associated with the device
+ */
+void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv);
+
+/**
+ * mhi_ep_register_controller - Register MHI Endpoint controller
+ * @mhi_cntrl: MHI Endpoint controller to register
+ * @config: Configuration to use for the controller
+ *
+ * Return: 0 if controller registrations succeeds, a negative error code otherwise.
+ */
+int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
+ const struct mhi_ep_cntrl_config *config);
+
+/**
+ * mhi_ep_unregister_controller - Unregister MHI Endpoint controller
+ * @mhi_cntrl: MHI Endpoint controller to unregister
+ */
+void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_power_up - Power up the MHI endpoint stack
+ * @mhi_cntrl: MHI Endpoint controller
+ *
+ * Return: 0 if power up succeeds, a negative error code otherwise.
+ */
+int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_power_down - Power down the MHI endpoint stack
+ * @mhi_cntrl: MHI controller
+ */
+void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_queue_is_empty - Determine whether the transfer queue is empty
+ * @mhi_dev: Device associated with the channels
+ * @dir: DMA direction for the channel
+ *
+ * Return: true if the queue is empty, false otherwise.
+ */
+bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir);
+
+/**
+ * mhi_ep_queue_skb - Send SKBs to host over MHI Endpoint
+ * @mhi_dev: Device associated with the DL channel
+ * @skb: SKBs to be queued
+ *
+ * Return: 0 if the SKBs has been sent successfully, a negative error code otherwise.
+ */
+int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb);
+
+#endif
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 416ee6dd2574..ca691641788b 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -8,6 +8,8 @@
#ifndef _MICREL_PHY_H
#define _MICREL_PHY_H
+#define MICREL_OUI 0x0885
+
#define MICREL_PHY_ID_MASK 0x00fffff0
#define PHY_ID_KSZ8873MLL 0x000e7237
@@ -28,6 +30,9 @@
#define PHY_ID_KSZ9031 0x00221620
#define PHY_ID_KSZ9131 0x00221640
#define PHY_ID_LAN8814 0x00221660
+#define PHY_ID_LAN8804 0x00221670
+#define PHY_ID_LAN8841 0x00221650
+#define PHY_ID_LAN8842 0x002216C0
#define PHY_ID_KSZ886X 0x00221430
#define PHY_ID_KSZ8863 0x00221435
@@ -37,12 +42,32 @@
#define PHY_ID_KSZ9477 0x00221631
/* struct phy_device dev_flags definitions */
-#define MICREL_PHY_50MHZ_CLK 0x00000001
-#define MICREL_PHY_FXEN 0x00000002
+#define MICREL_PHY_50MHZ_CLK BIT(0)
+#define MICREL_PHY_FXEN BIT(1)
+#define MICREL_KSZ8_P1_ERRATA BIT(2)
#define MICREL_KSZ9021_EXTREG_CTRL 0xB
#define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC
#define MICREL_KSZ9021_RGMII_CLK_CTRL_PAD_SCEW 0x104
#define MICREL_KSZ9021_RGMII_RX_DATA_PAD_SCEW 0x105
+/* Device specific MII_BMCR (Reg 0) bits */
+/* 1 = HP Auto MDI/MDI-X mode, 0 = Microchip Auto MDI/MDI-X mode */
+#define KSZ886X_BMCR_HP_MDIX BIT(5)
+/* 1 = Force MDI (transmit on RXP/RXM pins), 0 = Normal operation
+ * (transmit on TXP/TXM pins)
+ */
+#define KSZ886X_BMCR_FORCE_MDI BIT(4)
+/* 1 = Disable auto MDI-X */
+#define KSZ886X_BMCR_DISABLE_AUTO_MDIX BIT(3)
+#define KSZ886X_BMCR_DISABLE_FAR_END_FAULT BIT(2)
+#define KSZ886X_BMCR_DISABLE_TRANSMIT BIT(1)
+#define KSZ886X_BMCR_DISABLE_LED BIT(0)
+
+/* PHY Special Control/Status Register (Reg 31) */
+#define KSZ886X_CTRL_MDIX_STAT BIT(4)
+#define KSZ886X_CTRL_FORCE_LINK BIT(3)
+#define KSZ886X_CTRL_PWRSAVE BIT(2)
+#define KSZ886X_CTRL_REMOTE_LOOPBACK BIT(1)
+
#endif /* _MICREL_PHY_H */
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 4bb4e519e3f5..26ca00c325d9 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -7,127 +7,114 @@
#include <linux/migrate_mode.h>
#include <linux/hugetlb.h>
-typedef struct page *new_page_t(struct page *page, unsigned long private);
-typedef void free_page_t(struct page *page, unsigned long private);
+typedef struct folio *new_folio_t(struct folio *folio, unsigned long private);
+typedef void free_folio_t(struct folio *folio, unsigned long private);
struct migration_target_control;
-/*
- * Return values from addresss_space_operations.migratepage():
- * - negative errno on page migration failure;
- * - zero on page migration success;
+/**
+ * struct movable_operations - Driver page migration
+ * @isolate_page:
+ * The VM calls this function to prepare the page to be moved. The page
+ * is locked and the driver should not unlock it. The driver should
+ * return ``true`` if the page is movable and ``false`` if it is not
+ * currently movable. After this function returns, the VM uses the
+ * page->lru field, so the driver must preserve any information which
+ * is usually stored here.
+ *
+ * @migrate_page:
+ * After isolation, the VM calls this function with the isolated
+ * @src page. The driver should copy the contents of the
+ * @src page to the @dst page and set up the fields of @dst page.
+ * Both pages are locked.
+ * If page migration is successful, the driver should return 0.
+ * If the driver cannot migrate the page at the moment, it can return
+ * -EAGAIN. The VM interprets this as a temporary migration failure and
+ * will retry it later. Any other error value is a permanent migration
+ * failure and migration will not be retried.
+ * The driver shouldn't touch the @src->lru field while in the
+ * migrate_page() function. It may write to @dst->lru.
+ *
+ * @putback_page:
+ * If migration fails on the isolated page, the VM informs the driver
+ * that the page is no longer a candidate for migration by calling
+ * this function. The driver should put the isolated page back into
+ * its own data structure.
*/
-#define MIGRATEPAGE_SUCCESS 0
-
-enum migrate_reason {
- MR_COMPACTION,
- MR_MEMORY_FAILURE,
- MR_MEMORY_HOTPLUG,
- MR_SYSCALL, /* also applies to cpusets */
- MR_MEMPOLICY_MBIND,
- MR_NUMA_MISPLACED,
- MR_CONTIG_RANGE,
- MR_LONGTERM_PIN,
- MR_TYPES
+struct movable_operations {
+ bool (*isolate_page)(struct page *, isolate_mode_t);
+ int (*migrate_page)(struct page *dst, struct page *src,
+ enum migrate_mode);
+ void (*putback_page)(struct page *);
};
-/* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
+/* Defined in mm/debug.c: */
extern const char *migrate_reason_names[MR_TYPES];
#ifdef CONFIG_MIGRATION
-extern void putback_movable_pages(struct list_head *l);
-extern int migrate_page(struct address_space *mapping,
- struct page *newpage, struct page *page,
- enum migrate_mode mode);
-extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
- unsigned long private, enum migrate_mode mode, int reason);
-extern struct page *alloc_migration_target(struct page *page, unsigned long private);
-extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
-
-extern void migrate_page_states(struct page *newpage, struct page *page);
-extern void migrate_page_copy(struct page *newpage, struct page *page);
-extern int migrate_huge_page_move_mapping(struct address_space *mapping,
- struct page *newpage, struct page *page);
-extern int migrate_page_move_mapping(struct address_space *mapping,
- struct page *newpage, struct page *page, int extra_count);
+void putback_movable_pages(struct list_head *l);
+int migrate_folio(struct address_space *mapping, struct folio *dst,
+ struct folio *src, enum migrate_mode mode);
+int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free,
+ unsigned long private, enum migrate_mode mode, int reason,
+ unsigned int *ret_succeeded);
+struct folio *alloc_migration_target(struct folio *src, unsigned long private);
+bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode);
+bool isolate_folio_to_list(struct folio *folio, struct list_head *list);
+
+int migrate_huge_page_move_mapping(struct address_space *mapping,
+ struct folio *dst, struct folio *src);
+void migration_entry_wait_on_locked(softleaf_t entry, spinlock_t *ptl)
+ __releases(ptl);
+void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
+int folio_migrate_mapping(struct address_space *mapping,
+ struct folio *newfolio, struct folio *folio, int extra_count);
+int set_movable_ops(const struct movable_operations *ops, enum pagetype type);
+
#else
static inline void putback_movable_pages(struct list_head *l) {}
-static inline int migrate_pages(struct list_head *l, new_page_t new,
- free_page_t free, unsigned long private, enum migrate_mode mode,
- int reason)
+static inline int migrate_pages(struct list_head *l, new_folio_t new,
+ free_folio_t free, unsigned long private,
+ enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
{ return -ENOSYS; }
-static inline struct page *alloc_migration_target(struct page *page,
+static inline struct folio *alloc_migration_target(struct folio *src,
unsigned long private)
{ return NULL; }
-static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
- { return -EBUSY; }
+static inline bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode)
+ { return false; }
+static inline bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
+ { return false; }
-static inline void migrate_page_states(struct page *newpage, struct page *page)
+static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
+ struct folio *dst, struct folio *src)
{
+ return -ENOSYS;
}
-
-static inline void migrate_page_copy(struct page *newpage,
- struct page *page) {}
-
-static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
- struct page *newpage, struct page *page)
+static inline int set_movable_ops(const struct movable_operations *ops, enum pagetype type)
{
return -ENOSYS;
}
#endif /* CONFIG_MIGRATION */
-#ifdef CONFIG_COMPACTION
-extern int PageMovable(struct page *page);
-extern void __SetPageMovable(struct page *page, struct address_space *mapping);
-extern void __ClearPageMovable(struct page *page);
-#else
-static inline int PageMovable(struct page *page) { return 0; }
-static inline void __SetPageMovable(struct page *page,
- struct address_space *mapping)
-{
-}
-static inline void __ClearPageMovable(struct page *page)
-{
-}
-#endif
-
#ifdef CONFIG_NUMA_BALANCING
-extern bool pmd_trans_migrating(pmd_t pmd);
-extern int migrate_misplaced_page(struct page *page,
- struct vm_area_struct *vma, int node);
+int migrate_misplaced_folio_prepare(struct folio *folio,
+ struct vm_area_struct *vma, int node);
+int migrate_misplaced_folio(struct folio *folio, int node);
#else
-static inline bool pmd_trans_migrating(pmd_t pmd)
+static inline int migrate_misplaced_folio_prepare(struct folio *folio,
+ struct vm_area_struct *vma, int node)
{
- return false;
+ return -EAGAIN; /* can't migrate now */
}
-static inline int migrate_misplaced_page(struct page *page,
- struct vm_area_struct *vma, int node)
+static inline int migrate_misplaced_folio(struct folio *folio, int node)
{
return -EAGAIN; /* can't migrate now */
}
#endif /* CONFIG_NUMA_BALANCING */
-#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
-extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
- struct vm_area_struct *vma,
- pmd_t *pmd, pmd_t entry,
- unsigned long address,
- struct page *page, int node);
-#else
-static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
- struct vm_area_struct *vma,
- pmd_t *pmd, pmd_t entry,
- unsigned long address,
- struct page *page, int node)
-{
- return -EAGAIN;
-}
-#endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
-
-
#ifdef CONFIG_MIGRATION
/*
@@ -137,8 +124,8 @@ static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
*/
#define MIGRATE_PFN_VALID (1UL << 0)
#define MIGRATE_PFN_MIGRATE (1UL << 1)
-#define MIGRATE_PFN_LOCKED (1UL << 2)
#define MIGRATE_PFN_WRITE (1UL << 3)
+#define MIGRATE_PFN_COMPOUND (1UL << 4)
#define MIGRATE_PFN_SHIFT 6
static inline struct page *migrate_pfn_to_page(unsigned long mpfn)
@@ -156,6 +143,8 @@ static inline unsigned long migrate_pfn(unsigned long pfn)
enum migrate_vma_direction {
MIGRATE_VMA_SELECT_SYSTEM = 1 << 0,
MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1,
+ MIGRATE_VMA_SELECT_DEVICE_COHERENT = 1 << 2,
+ MIGRATE_VMA_SELECT_COMPOUND = 1 << 3,
};
struct migrate_vma {
@@ -176,8 +165,8 @@ struct migrate_vma {
unsigned long end;
/*
- * Set to the owner value also stored in page->pgmap->owner for
- * migrating out of device private memory. The flags also need to
+ * Set to the owner value also stored in page_pgmap(page)->owner
+ * for migrating out of device private memory. The flags also need to
* be set to MIGRATE_VMA_SELECT_DEVICE_PRIVATE.
* The caller should always set this field when using mmu notifier
* callbacks to avoid device MMU invalidations for device private
@@ -185,11 +174,24 @@ struct migrate_vma {
*/
void *pgmap_owner;
unsigned long flags;
+
+ /*
+ * Set to vmf->page if this is being called to migrate a page as part of
+ * a migrate_to_ram() callback.
+ */
+ struct page *fault_page;
};
int migrate_vma_setup(struct migrate_vma *args);
void migrate_vma_pages(struct migrate_vma *migrate);
void migrate_vma_finalize(struct migrate_vma *migrate);
+int migrate_device_range(unsigned long *src_pfns, unsigned long start,
+ unsigned long npages);
+int migrate_device_pfns(unsigned long *src_pfns, unsigned long npages);
+void migrate_device_pages(unsigned long *src_pfns, unsigned long *dst_pfns,
+ unsigned long npages);
+void migrate_device_finalize(unsigned long *src_pfns,
+ unsigned long *dst_pfns, unsigned long npages);
#endif /* CONFIG_MIGRATION */
diff --git a/include/linux/migrate_mode.h b/include/linux/migrate_mode.h
index 883c99249033..265c4328b36a 100644
--- a/include/linux/migrate_mode.h
+++ b/include/linux/migrate_mode.h
@@ -7,16 +7,25 @@
* on most operations but not ->writepage as the potential stall time
* is too significant
* MIGRATE_SYNC will block when migrating pages
- * MIGRATE_SYNC_NO_COPY will block when migrating pages but will not copy pages
- * with the CPU. Instead, page copy happens outside the migratepage()
- * callback and is likely using a DMA engine. See migrate_vma() and HMM
- * (mm/hmm.c) for users of this mode.
*/
enum migrate_mode {
MIGRATE_ASYNC,
MIGRATE_SYNC_LIGHT,
MIGRATE_SYNC,
- MIGRATE_SYNC_NO_COPY,
+};
+
+enum migrate_reason {
+ MR_COMPACTION,
+ MR_MEMORY_FAILURE,
+ MR_MEMORY_HOTPLUG,
+ MR_SYSCALL, /* also applies to cpusets */
+ MR_MEMPOLICY_MBIND,
+ MR_NUMA_MISPLACED,
+ MR_CONTIG_RANGE,
+ MR_LONGTERM_PIN,
+ MR_DEMOTION,
+ MR_DAMON,
+ MR_TYPES
};
#endif /* MIGRATE_MODE_H_INCLUDED */
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 219b93cad1dd..b8f26d4513c3 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -32,7 +32,7 @@ struct mii_if_info {
extern int mii_link_ok (struct mii_if_info *mii);
extern int mii_nway_restart (struct mii_if_info *mii);
-extern int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
+extern void mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
extern void mii_ethtool_get_link_ksettings(
struct mii_if_info *mii, struct ethtool_link_ksettings *cmd);
extern int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
@@ -140,7 +140,7 @@ static inline u32 ethtool_adv_to_mii_adv_t(u32 ethadv)
* settings to phy autonegotiation advertisements for the
* MII_ADVERTISE register.
*/
-static inline u32 linkmode_adv_to_mii_adv_t(unsigned long *advertising)
+static inline u32 linkmode_adv_to_mii_adv_t(const unsigned long *advertising)
{
u32 result = 0;
@@ -215,7 +215,8 @@ static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv)
* settings to phy autonegotiation advertisements for the
* MII_CTRL1000 register when in 1000T mode.
*/
-static inline u32 linkmode_adv_to_mii_ctrl1000_t(unsigned long *advertising)
+static inline u32
+linkmode_adv_to_mii_ctrl1000_t(const unsigned long *advertising)
{
u32 result = 0;
@@ -355,56 +356,6 @@ static inline u32 mii_adv_to_ethtool_adv_x(u32 adv)
}
/**
- * mii_lpa_mod_linkmode_adv_sgmii
- * @lp_advertising: pointer to destination link mode.
- * @lpa: value of the MII_LPA register
- *
- * A small helper function that translates MII_LPA bits to
- * linkmode advertisement settings for SGMII.
- * Leaves other bits unchanged.
- */
-static inline void
-mii_lpa_mod_linkmode_lpa_sgmii(unsigned long *lp_advertising, u32 lpa)
-{
- u32 speed_duplex = lpa & LPA_SGMII_DPX_SPD_MASK;
-
- linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, lp_advertising,
- speed_duplex == LPA_SGMII_1000HALF);
-
- linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, lp_advertising,
- speed_duplex == LPA_SGMII_1000FULL);
-
- linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, lp_advertising,
- speed_duplex == LPA_SGMII_100HALF);
-
- linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, lp_advertising,
- speed_duplex == LPA_SGMII_100FULL);
-
- linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, lp_advertising,
- speed_duplex == LPA_SGMII_10HALF);
-
- linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, lp_advertising,
- speed_duplex == LPA_SGMII_10FULL);
-}
-
-/**
- * mii_lpa_to_linkmode_adv_sgmii
- * @advertising: pointer to destination link mode.
- * @lpa: value of the MII_LPA register
- *
- * A small helper function that translates MII_ADVERTISE bits
- * to linkmode advertisement settings when in SGMII mode.
- * Clears the old value of advertising.
- */
-static inline void mii_lpa_to_linkmode_lpa_sgmii(unsigned long *lp_advertising,
- u32 lpa)
-{
- linkmode_zero(lp_advertising);
-
- mii_lpa_mod_linkmode_lpa_sgmii(lp_advertising, lpa);
-}
-
-/**
* mii_adv_mod_linkmode_adv_t
* @advertising:pointer to destination link mode.
* @adv: value of the MII_ADVERTISE register
@@ -503,7 +454,7 @@ static inline void mii_ctrl1000_mod_linkmode_adv_t(unsigned long *advertising,
* A small helper function that translates linkmode advertising to LVL
* pause capabilities.
*/
-static inline u32 linkmode_adv_to_lcl_adv_t(unsigned long *advertising)
+static inline u32 linkmode_adv_to_lcl_adv_t(const unsigned long *advertising)
{
u32 lcl_adv = 0;
@@ -595,4 +546,39 @@ static inline u8 mii_resolve_flowctrl_fdx(u16 lcladv, u16 rmtadv)
return cap;
}
+/**
+ * mii_bmcr_encode_fixed - encode fixed speed/duplex settings to a BMCR value
+ * @speed: a SPEED_* value
+ * @duplex: a DUPLEX_* value
+ *
+ * Encode the speed and duplex to a BMCR value. 2500, 1000, 100 and 10 Mbps are
+ * supported. 2500Mbps is encoded to 1000Mbps. Other speeds are encoded as 10
+ * Mbps. Unknown duplex values are encoded to half-duplex.
+ */
+static inline u16 mii_bmcr_encode_fixed(int speed, int duplex)
+{
+ u16 bmcr;
+
+ switch (speed) {
+ case SPEED_2500:
+ case SPEED_1000:
+ bmcr = BMCR_SPEED1000;
+ break;
+
+ case SPEED_100:
+ bmcr = BMCR_SPEED100;
+ break;
+
+ case SPEED_10:
+ default:
+ bmcr = BMCR_SPEED10;
+ break;
+ }
+
+ if (duplex == DUPLEX_FULL)
+ bmcr |= BMCR_FULLDPLX;
+
+ return bmcr;
+}
+
#endif /* __LINUX_MII_H__ */
diff --git a/include/linux/mii_timestamper.h b/include/linux/mii_timestamper.h
index fa940bbaf8ae..3102c425c8e0 100644
--- a/include/linux/mii_timestamper.h
+++ b/include/linux/mii_timestamper.h
@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/ethtool.h>
#include <linux/skbuff.h>
+#include <linux/net_tstamp.h>
struct phy_device;
@@ -26,7 +27,9 @@ struct phy_device;
* as soon as a timestamp becomes available. One of the PTP_CLASS_
* values is passed in 'type'.
*
- * @hwtstamp: Handles SIOCSHWTSTAMP ioctl for hardware time stamping.
+ * @hwtstamp_set: Handles SIOCSHWTSTAMP ioctl for hardware time stamping.
+ *
+ * @hwtstamp_get: Handles SIOCGHWTSTAMP ioctl for hardware time stamping.
*
* @link_state: Allows the device to respond to changes in the link
* state. The caller invokes this function while holding
@@ -50,14 +53,18 @@ struct mii_timestamper {
void (*txtstamp)(struct mii_timestamper *mii_ts,
struct sk_buff *skb, int type);
- int (*hwtstamp)(struct mii_timestamper *mii_ts,
- struct ifreq *ifreq);
+ int (*hwtstamp_set)(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *kernel_config,
+ struct netlink_ext_ack *extack);
+
+ int (*hwtstamp_get)(struct mii_timestamper *mii_ts,
+ struct kernel_hwtstamp_config *kernel_config);
void (*link_state)(struct mii_timestamper *mii_ts,
struct phy_device *phydev);
int (*ts_info)(struct mii_timestamper *mii_ts,
- struct ethtool_ts_info *ts_info);
+ struct kernel_ethtool_ts_info *ts_info);
struct device *device;
};
diff --git a/include/linux/min_heap.h b/include/linux/min_heap.h
index 44077837385f..79ddc0adbf2b 100644
--- a/include/linux/min_heap.h
+++ b/include/linux/min_heap.h
@@ -6,129 +6,472 @@
#include <linux/string.h>
#include <linux/types.h>
+/*
+ * The Min Heap API provides utilities for managing min-heaps, a binary tree
+ * structure where each node's value is less than or equal to its children's
+ * values, ensuring the smallest element is at the root.
+ *
+ * Users should avoid directly calling functions prefixed with __min_heap_*().
+ * Instead, use the provided macro wrappers.
+ *
+ * For further details and examples, refer to Documentation/core-api/min_heap.rst.
+ */
+
/**
- * struct min_heap - Data structure to hold a min-heap.
- * @data: Start of array holding the heap elements.
+ * Data structure to hold a min-heap.
* @nr: Number of elements currently in the heap.
* @size: Maximum number of elements that can be held in current storage.
+ * @data: Pointer to the start of array holding the heap elements.
+ * @preallocated: Start of the static preallocated array holding the heap elements.
*/
-struct min_heap {
- void *data;
- int nr;
- int size;
-};
+#define MIN_HEAP_PREALLOCATED(_type, _name, _nr) \
+struct _name { \
+ size_t nr; \
+ size_t size; \
+ _type *data; \
+ _type preallocated[_nr]; \
+}
+
+#define DEFINE_MIN_HEAP(_type, _name) MIN_HEAP_PREALLOCATED(_type, _name, 0)
+
+typedef DEFINE_MIN_HEAP(char, min_heap_char) min_heap_char;
+
+#define __minheap_cast(_heap) (typeof((_heap)->data[0]) *)
+#define __minheap_obj_size(_heap) sizeof((_heap)->data[0])
/**
* struct min_heap_callbacks - Data/functions to customise the min_heap.
- * @elem_size: The nr of each element in bytes.
* @less: Partial order function for this heap.
* @swp: Swap elements function.
*/
struct min_heap_callbacks {
- int elem_size;
- bool (*less)(const void *lhs, const void *rhs);
- void (*swp)(void *lhs, void *rhs);
+ bool (*less)(const void *lhs, const void *rhs, void *args);
+ void (*swp)(void *lhs, void *rhs, void *args);
};
+/**
+ * is_aligned - is this pointer & size okay for word-wide copying?
+ * @base: pointer to data
+ * @size: size of each element
+ * @align: required alignment (typically 4 or 8)
+ *
+ * Returns true if elements can be copied using word loads and stores.
+ * The size must be a multiple of the alignment, and the base address must
+ * be if we do not have CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS.
+ *
+ * For some reason, gcc doesn't know to optimize "if (a & mask || b & mask)"
+ * to "if ((a | b) & mask)", so we do that by hand.
+ */
+__attribute_const__ __always_inline
+static bool is_aligned(const void *base, size_t size, unsigned char align)
+{
+ unsigned char lsbits = (unsigned char)size;
+
+ (void)base;
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ lsbits |= (unsigned char)(uintptr_t)base;
+#endif
+ return (lsbits & (align - 1)) == 0;
+}
+
+/**
+ * swap_words_32 - swap two elements in 32-bit chunks
+ * @a: pointer to the first element to swap
+ * @b: pointer to the second element to swap
+ * @n: element size (must be a multiple of 4)
+ *
+ * Exchange the two objects in memory. This exploits base+index addressing,
+ * which basically all CPUs have, to minimize loop overhead computations.
+ *
+ * For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the
+ * bottom of the loop, even though the zero flag is still valid from the
+ * subtract (since the intervening mov instructions don't alter the flags).
+ * Gcc 8.1.0 doesn't have that problem.
+ */
+static __always_inline
+void swap_words_32(void *a, void *b, size_t n)
+{
+ do {
+ u32 t = *(u32 *)(a + (n -= 4));
+ *(u32 *)(a + n) = *(u32 *)(b + n);
+ *(u32 *)(b + n) = t;
+ } while (n);
+}
+
+/**
+ * swap_words_64 - swap two elements in 64-bit chunks
+ * @a: pointer to the first element to swap
+ * @b: pointer to the second element to swap
+ * @n: element size (must be a multiple of 8)
+ *
+ * Exchange the two objects in memory. This exploits base+index
+ * addressing, which basically all CPUs have, to minimize loop overhead
+ * computations.
+ *
+ * We'd like to use 64-bit loads if possible. If they're not, emulating
+ * one requires base+index+4 addressing which x86 has but most other
+ * processors do not. If CONFIG_64BIT, we definitely have 64-bit loads,
+ * but it's possible to have 64-bit loads without 64-bit pointers (e.g.
+ * x32 ABI). Are there any cases the kernel needs to worry about?
+ */
+static __always_inline
+void swap_words_64(void *a, void *b, size_t n)
+{
+ do {
+#ifdef CONFIG_64BIT
+ u64 t = *(u64 *)(a + (n -= 8));
+ *(u64 *)(a + n) = *(u64 *)(b + n);
+ *(u64 *)(b + n) = t;
+#else
+ /* Use two 32-bit transfers to avoid base+index+4 addressing */
+ u32 t = *(u32 *)(a + (n -= 4));
+ *(u32 *)(a + n) = *(u32 *)(b + n);
+ *(u32 *)(b + n) = t;
+
+ t = *(u32 *)(a + (n -= 4));
+ *(u32 *)(a + n) = *(u32 *)(b + n);
+ *(u32 *)(b + n) = t;
+#endif
+ } while (n);
+}
+
+/**
+ * swap_bytes - swap two elements a byte at a time
+ * @a: pointer to the first element to swap
+ * @b: pointer to the second element to swap
+ * @n: element size
+ *
+ * This is the fallback if alignment doesn't allow using larger chunks.
+ */
+static __always_inline
+void swap_bytes(void *a, void *b, size_t n)
+{
+ do {
+ char t = ((char *)a)[--n];
+ ((char *)a)[n] = ((char *)b)[n];
+ ((char *)b)[n] = t;
+ } while (n);
+}
+
+/*
+ * The values are arbitrary as long as they can't be confused with
+ * a pointer, but small integers make for the smallest compare
+ * instructions.
+ */
+#define SWAP_WORDS_64 ((void (*)(void *, void *, void *))0)
+#define SWAP_WORDS_32 ((void (*)(void *, void *, void *))1)
+#define SWAP_BYTES ((void (*)(void *, void *, void *))2)
+
+/*
+ * Selects the appropriate swap function based on the element size.
+ */
+static __always_inline
+void *select_swap_func(const void *base, size_t size)
+{
+ if (is_aligned(base, size, 8))
+ return SWAP_WORDS_64;
+ else if (is_aligned(base, size, 4))
+ return SWAP_WORDS_32;
+ else
+ return SWAP_BYTES;
+}
+
+static __always_inline
+void do_swap(void *a, void *b, size_t size, void (*swap_func)(void *lhs, void *rhs, void *args),
+ void *priv)
+{
+ if (swap_func == SWAP_WORDS_64)
+ swap_words_64(a, b, size);
+ else if (swap_func == SWAP_WORDS_32)
+ swap_words_32(a, b, size);
+ else if (swap_func == SWAP_BYTES)
+ swap_bytes(a, b, size);
+ else
+ swap_func(a, b, priv);
+}
+
+/**
+ * parent - given the offset of the child, find the offset of the parent.
+ * @i: the offset of the heap element whose parent is sought. Non-zero.
+ * @lsbit: a precomputed 1-bit mask, equal to "size & -size"
+ * @size: size of each element
+ *
+ * In terms of array indexes, the parent of element j = @i/@size is simply
+ * (j-1)/2. But when working in byte offsets, we can't use implicit
+ * truncation of integer divides.
+ *
+ * Fortunately, we only need one bit of the quotient, not the full divide.
+ * @size has a least significant bit. That bit will be clear if @i is
+ * an even multiple of @size, and set if it's an odd multiple.
+ *
+ * Logically, we're doing "if (i & lsbit) i -= size;", but since the
+ * branch is unpredictable, it's done with a bit of clever branch-free
+ * code instead.
+ */
+__attribute_const__ __always_inline
+static size_t parent(size_t i, unsigned int lsbit, size_t size)
+{
+ i -= size;
+ i -= size & -(i & lsbit);
+ return i / 2;
+}
+
+/* Initialize a min-heap. */
+static __always_inline
+void __min_heap_init_inline(min_heap_char *heap, void *data, size_t size)
+{
+ heap->nr = 0;
+ heap->size = size;
+ if (data)
+ heap->data = data;
+ else
+ heap->data = heap->preallocated;
+}
+
+#define min_heap_init_inline(_heap, _data, _size) \
+ __min_heap_init_inline(container_of(&(_heap)->nr, min_heap_char, nr), _data, _size)
+
+/* Get the minimum element from the heap. */
+static __always_inline
+void *__min_heap_peek_inline(struct min_heap_char *heap)
+{
+ return heap->nr ? heap->data : NULL;
+}
+
+#define min_heap_peek_inline(_heap) \
+ (__minheap_cast(_heap) \
+ __min_heap_peek_inline(container_of(&(_heap)->nr, min_heap_char, nr)))
+
+/* Check if the heap is full. */
+static __always_inline
+bool __min_heap_full_inline(min_heap_char *heap)
+{
+ return heap->nr == heap->size;
+}
+
+#define min_heap_full_inline(_heap) \
+ __min_heap_full_inline(container_of(&(_heap)->nr, min_heap_char, nr))
+
/* Sift the element at pos down the heap. */
static __always_inline
-void min_heapify(struct min_heap *heap, int pos,
- const struct min_heap_callbacks *func)
+void __min_heap_sift_down_inline(min_heap_char *heap, size_t pos, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args)
{
- void *left, *right, *parent, *smallest;
+ const unsigned long lsbit = elem_size & -elem_size;
void *data = heap->data;
+ void (*swp)(void *lhs, void *rhs, void *args) = func->swp;
+ /* pre-scale counters for performance */
+ size_t a = pos * elem_size;
+ size_t b, c, d;
+ size_t n = heap->nr * elem_size;
- for (;;) {
- if (pos * 2 + 1 >= heap->nr)
- break;
+ if (!swp)
+ swp = select_swap_func(data, elem_size);
+
+ /* Find the sift-down path all the way to the leaves. */
+ for (b = a; c = 2 * b + elem_size, (d = c + elem_size) < n;)
+ b = func->less(data + c, data + d, args) ? c : d;
+
+ /* Special case for the last leaf with no sibling. */
+ if (d == n)
+ b = c;
+
+ /* Backtrack to the correct location. */
+ while (b != a && func->less(data + a, data + b, args))
+ b = parent(b, lsbit, elem_size);
+
+ /* Shift the element into its correct place. */
+ c = b;
+ while (b != a) {
+ b = parent(b, lsbit, elem_size);
+ do_swap(data + b, data + c, elem_size, swp, args);
+ }
+}
+
+#define min_heap_sift_down_inline(_heap, _pos, _func, _args) \
+ __min_heap_sift_down_inline(container_of(&(_heap)->nr, min_heap_char, nr), _pos, \
+ __minheap_obj_size(_heap), _func, _args)
+
+/* Sift up ith element from the heap, O(log2(nr)). */
+static __always_inline
+void __min_heap_sift_up_inline(min_heap_char *heap, size_t elem_size, size_t idx,
+ const struct min_heap_callbacks *func, void *args)
+{
+ const unsigned long lsbit = elem_size & -elem_size;
+ void *data = heap->data;
+ void (*swp)(void *lhs, void *rhs, void *args) = func->swp;
+ /* pre-scale counters for performance */
+ size_t a = idx * elem_size, b;
- left = data + ((pos * 2 + 1) * func->elem_size);
- parent = data + (pos * func->elem_size);
- smallest = parent;
- if (func->less(left, smallest))
- smallest = left;
-
- if (pos * 2 + 2 < heap->nr) {
- right = data + ((pos * 2 + 2) * func->elem_size);
- if (func->less(right, smallest))
- smallest = right;
- }
- if (smallest == parent)
+ if (!swp)
+ swp = select_swap_func(data, elem_size);
+
+ while (a) {
+ b = parent(a, lsbit, elem_size);
+ if (func->less(data + b, data + a, args))
break;
- func->swp(smallest, parent);
- if (smallest == left)
- pos = (pos * 2) + 1;
- else
- pos = (pos * 2) + 2;
+ do_swap(data + a, data + b, elem_size, swp, args);
+ a = b;
}
}
+#define min_heap_sift_up_inline(_heap, _idx, _func, _args) \
+ __min_heap_sift_up_inline(container_of(&(_heap)->nr, min_heap_char, nr), \
+ __minheap_obj_size(_heap), _idx, _func, _args)
+
/* Floyd's approach to heapification that is O(nr). */
static __always_inline
-void min_heapify_all(struct min_heap *heap,
- const struct min_heap_callbacks *func)
+void __min_heapify_all_inline(min_heap_char *heap, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args)
{
- int i;
+ ssize_t i;
- for (i = heap->nr / 2; i >= 0; i--)
- min_heapify(heap, i, func);
+ for (i = heap->nr / 2 - 1; i >= 0; i--)
+ __min_heap_sift_down_inline(heap, i, elem_size, func, args);
}
+#define min_heapify_all_inline(_heap, _func, _args) \
+ __min_heapify_all_inline(container_of(&(_heap)->nr, min_heap_char, nr), \
+ __minheap_obj_size(_heap), _func, _args)
+
/* Remove minimum element from the heap, O(log2(nr)). */
static __always_inline
-void min_heap_pop(struct min_heap *heap,
- const struct min_heap_callbacks *func)
+bool __min_heap_pop_inline(min_heap_char *heap, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args)
{
void *data = heap->data;
if (WARN_ONCE(heap->nr <= 0, "Popping an empty heap"))
- return;
+ return false;
/* Place last element at the root (position 0) and then sift down. */
heap->nr--;
- memcpy(data, data + (heap->nr * func->elem_size), func->elem_size);
- min_heapify(heap, 0, func);
+ memcpy(data, data + (heap->nr * elem_size), elem_size);
+ __min_heap_sift_down_inline(heap, 0, elem_size, func, args);
+
+ return true;
}
+#define min_heap_pop_inline(_heap, _func, _args) \
+ __min_heap_pop_inline(container_of(&(_heap)->nr, min_heap_char, nr), \
+ __minheap_obj_size(_heap), _func, _args)
+
/*
* Remove the minimum element and then push the given element. The
* implementation performs 1 sift (O(log2(nr))) and is therefore more
* efficient than a pop followed by a push that does 2.
*/
static __always_inline
-void min_heap_pop_push(struct min_heap *heap,
- const void *element,
- const struct min_heap_callbacks *func)
+void __min_heap_pop_push_inline(min_heap_char *heap, const void *element, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args)
{
- memcpy(heap->data, element, func->elem_size);
- min_heapify(heap, 0, func);
+ memcpy(heap->data, element, elem_size);
+ __min_heap_sift_down_inline(heap, 0, elem_size, func, args);
}
+#define min_heap_pop_push_inline(_heap, _element, _func, _args) \
+ __min_heap_pop_push_inline(container_of(&(_heap)->nr, min_heap_char, nr), _element, \
+ __minheap_obj_size(_heap), _func, _args)
+
/* Push an element on to the heap, O(log2(nr)). */
static __always_inline
-void min_heap_push(struct min_heap *heap, const void *element,
- const struct min_heap_callbacks *func)
+bool __min_heap_push_inline(min_heap_char *heap, const void *element, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args)
{
void *data = heap->data;
- void *child, *parent;
- int pos;
+ size_t pos;
if (WARN_ONCE(heap->nr >= heap->size, "Pushing on a full heap"))
- return;
+ return false;
/* Place at the end of data. */
pos = heap->nr;
- memcpy(data + (pos * func->elem_size), element, func->elem_size);
+ memcpy(data + (pos * elem_size), element, elem_size);
heap->nr++;
/* Sift child at pos up. */
- for (; pos > 0; pos = (pos - 1) / 2) {
- child = data + (pos * func->elem_size);
- parent = data + ((pos - 1) / 2) * func->elem_size;
- if (func->less(parent, child))
- break;
- func->swp(parent, child);
- }
+ __min_heap_sift_up_inline(heap, elem_size, pos, func, args);
+
+ return true;
}
+#define min_heap_push_inline(_heap, _element, _func, _args) \
+ __min_heap_push_inline(container_of(&(_heap)->nr, min_heap_char, nr), _element, \
+ __minheap_obj_size(_heap), _func, _args)
+
+/* Remove ith element from the heap, O(log2(nr)). */
+static __always_inline
+bool __min_heap_del_inline(min_heap_char *heap, size_t elem_size, size_t idx,
+ const struct min_heap_callbacks *func, void *args)
+{
+ void *data = heap->data;
+ void (*swp)(void *lhs, void *rhs, void *args) = func->swp;
+
+ if (WARN_ONCE(heap->nr <= 0, "Popping an empty heap"))
+ return false;
+
+ if (!swp)
+ swp = select_swap_func(data, elem_size);
+
+ /* Place last element at the root (position 0) and then sift down. */
+ heap->nr--;
+ if (idx == heap->nr)
+ return true;
+ do_swap(data + (idx * elem_size), data + (heap->nr * elem_size), elem_size, swp, args);
+ __min_heap_sift_up_inline(heap, elem_size, idx, func, args);
+ __min_heap_sift_down_inline(heap, idx, elem_size, func, args);
+
+ return true;
+}
+
+#define min_heap_del_inline(_heap, _idx, _func, _args) \
+ __min_heap_del_inline(container_of(&(_heap)->nr, min_heap_char, nr), \
+ __minheap_obj_size(_heap), _idx, _func, _args)
+
+void __min_heap_init(min_heap_char *heap, void *data, size_t size);
+void *__min_heap_peek(struct min_heap_char *heap);
+bool __min_heap_full(min_heap_char *heap);
+void __min_heap_sift_down(min_heap_char *heap, size_t pos, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args);
+void __min_heap_sift_up(min_heap_char *heap, size_t elem_size, size_t idx,
+ const struct min_heap_callbacks *func, void *args);
+void __min_heapify_all(min_heap_char *heap, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args);
+bool __min_heap_pop(min_heap_char *heap, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args);
+void __min_heap_pop_push(min_heap_char *heap, const void *element, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args);
+bool __min_heap_push(min_heap_char *heap, const void *element, size_t elem_size,
+ const struct min_heap_callbacks *func, void *args);
+bool __min_heap_del(min_heap_char *heap, size_t elem_size, size_t idx,
+ const struct min_heap_callbacks *func, void *args);
+
+#define min_heap_init(_heap, _data, _size) \
+ __min_heap_init(container_of(&(_heap)->nr, min_heap_char, nr), _data, _size)
+#define min_heap_peek(_heap) \
+ (__minheap_cast(_heap) __min_heap_peek(container_of(&(_heap)->nr, min_heap_char, nr)))
+#define min_heap_full(_heap) \
+ __min_heap_full(container_of(&(_heap)->nr, min_heap_char, nr))
+#define min_heap_sift_down(_heap, _pos, _func, _args) \
+ __min_heap_sift_down(container_of(&(_heap)->nr, min_heap_char, nr), _pos, \
+ __minheap_obj_size(_heap), _func, _args)
+#define min_heap_sift_up(_heap, _idx, _func, _args) \
+ __min_heap_sift_up(container_of(&(_heap)->nr, min_heap_char, nr), \
+ __minheap_obj_size(_heap), _idx, _func, _args)
+#define min_heapify_all(_heap, _func, _args) \
+ __min_heapify_all(container_of(&(_heap)->nr, min_heap_char, nr), \
+ __minheap_obj_size(_heap), _func, _args)
+#define min_heap_pop(_heap, _func, _args) \
+ __min_heap_pop(container_of(&(_heap)->nr, min_heap_char, nr), \
+ __minheap_obj_size(_heap), _func, _args)
+#define min_heap_pop_push(_heap, _element, _func, _args) \
+ __min_heap_pop_push(container_of(&(_heap)->nr, min_heap_char, nr), _element, \
+ __minheap_obj_size(_heap), _func, _args)
+#define min_heap_push(_heap, _element, _func, _args) \
+ __min_heap_push(container_of(&(_heap)->nr, min_heap_char, nr), _element, \
+ __minheap_obj_size(_heap), _func, _args)
+#define min_heap_del(_heap, _idx, _func, _args) \
+ __min_heap_del(container_of(&(_heap)->nr, min_heap_char, nr), \
+ __minheap_obj_size(_heap), _idx, _func, _args)
+
#endif /* _LINUX_MIN_HEAP_H */
diff --git a/include/linux/minmax.h b/include/linux/minmax.h
index c0f57b0c64d9..a0158db54a04 100644
--- a/include/linux/minmax.h
+++ b/include/linux/minmax.h
@@ -2,60 +2,137 @@
#ifndef _LINUX_MINMAX_H
#define _LINUX_MINMAX_H
+#include <linux/build_bug.h>
+#include <linux/compiler.h>
+#include <linux/const.h>
+#include <linux/types.h>
+
/*
- * min()/max()/clamp() macros must accomplish three things:
+ * min()/max()/clamp() macros must accomplish several things:
*
- * - avoid multiple evaluations of the arguments (so side-effects like
+ * - Avoid multiple evaluations of the arguments (so side-effects like
* "x++" happen only once) when non-constant.
- * - perform strict type-checking (to generate warnings instead of
- * nasty runtime surprises). See the "unnecessary" pointer comparison
- * in __typecheck().
- * - retain result as a constant expressions when called with only
- * constant expressions (to avoid tripping VLA warnings in stack
- * allocation usage).
+ * - Perform signed v unsigned type-checking (to generate compile
+ * errors instead of nasty runtime surprises).
+ * - Unsigned char/short are always promoted to signed int and can be
+ * compared against signed or unsigned arguments.
+ * - Unsigned arguments can be compared against non-negative signed constants.
+ * - Comparison of a signed argument against an unsigned constant fails
+ * even if the constant is below __INT_MAX__ and could be cast to int.
*/
#define __typecheck(x, y) \
(!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
/*
- * This returns a constant expression while determining if an argument is
- * a constant expression, most importantly without evaluating the argument.
- * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de>
+ * __sign_use for integer expressions:
+ * bit #0 set if ok for unsigned comparisons
+ * bit #1 set if ok for signed comparisons
+ *
+ * In particular, statically non-negative signed integer expressions
+ * are ok for both.
+ *
+ * NOTE! Unsigned types smaller than 'int' are implicitly converted to 'int'
+ * in expressions, and are accepted for signed conversions for now.
+ * This is debatable.
+ *
+ * Note that 'x' is the original expression, and 'ux' is the unique variable
+ * that contains the value.
+ *
+ * We use 'ux' for pure type checking, and 'x' for when we need to look at the
+ * value (but without evaluating it for side effects!
+ * Careful to only ever evaluate it with sizeof() or __builtin_constant_p() etc).
+ *
+ * Pointers end up being checked by the normal C type rules at the actual
+ * comparison, and these expressions only need to be careful to not cause
+ * warnings for pointer use.
*/
-#define __is_constexpr(x) \
- (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
+#define __sign_use(ux) (is_signed_type(typeof(ux)) ? \
+ (2 + __is_nonneg(ux)) : (1 + 2 * (sizeof(ux) < 4)))
-#define __no_side_effects(x, y) \
- (__is_constexpr(x) && __is_constexpr(y))
+/*
+ * Check whether a signed value is always non-negative.
+ *
+ * A cast is needed to avoid any warnings from values that aren't signed
+ * integer types (in which case the result doesn't matter).
+ *
+ * On 64-bit any integer or pointer type can safely be cast to 'long long'.
+ * But on 32-bit we need to avoid warnings about casting pointers to integers
+ * of different sizes without truncating 64-bit values so 'long' or 'long long'
+ * must be used depending on the size of the value.
+ *
+ * This does not work for 128-bit signed integers since the cast would truncate
+ * them, but we do not use s128 types in the kernel (we do use 'u128',
+ * but they are handled by the !is_signed_type() case).
+ */
+#if __SIZEOF_POINTER__ == __SIZEOF_LONG_LONG__
+#define __is_nonneg(ux) statically_true((long long)(ux) >= 0)
+#else
+#define __is_nonneg(ux) statically_true( \
+ (typeof(__builtin_choose_expr(sizeof(ux) > 4, 1LL, 1L)))(ux) >= 0)
+#endif
-#define __safe_cmp(x, y) \
- (__typecheck(x, y) && __no_side_effects(x, y))
+#define __types_ok(ux, uy) \
+ (__sign_use(ux) & __sign_use(uy))
-#define __cmp(x, y, op) ((x) op (y) ? (x) : (y))
+#define __types_ok3(ux, uy, uz) \
+ (__sign_use(ux) & __sign_use(uy) & __sign_use(uz))
-#define __cmp_once(x, y, unique_x, unique_y, op) ({ \
- typeof(x) unique_x = (x); \
- typeof(y) unique_y = (y); \
- __cmp(unique_x, unique_y, op); })
+#define __cmp_op_min <
+#define __cmp_op_max >
-#define __careful_cmp(x, y, op) \
- __builtin_choose_expr(__safe_cmp(x, y), \
- __cmp(x, y, op), \
- __cmp_once(x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y), op))
+#define __cmp(op, x, y) ((x) __cmp_op_##op (y) ? (x) : (y))
+
+#define __cmp_once_unique(op, type, x, y, ux, uy) \
+ ({ type ux = (x); type uy = (y); __cmp(op, ux, uy); })
+
+#define __cmp_once(op, type, x, y) \
+ __cmp_once_unique(op, type, x, y, __UNIQUE_ID(x_), __UNIQUE_ID(y_))
+
+#define __careful_cmp_once(op, x, y, ux, uy) ({ \
+ auto ux = (x); auto uy = (y); \
+ BUILD_BUG_ON_MSG(!__types_ok(ux, uy), \
+ #op"("#x", "#y") signedness error"); \
+ __cmp(op, ux, uy); })
+
+#define __careful_cmp(op, x, y) \
+ __careful_cmp_once(op, x, y, __UNIQUE_ID(x_), __UNIQUE_ID(y_))
/**
* min - return minimum of two values of the same or compatible types
* @x: first value
* @y: second value
*/
-#define min(x, y) __careful_cmp(x, y, <)
+#define min(x, y) __careful_cmp(min, x, y)
/**
* max - return maximum of two values of the same or compatible types
* @x: first value
* @y: second value
*/
-#define max(x, y) __careful_cmp(x, y, >)
+#define max(x, y) __careful_cmp(max, x, y)
+
+/**
+ * umin - return minimum of two non-negative values
+ * Signed types are zero extended to match a larger unsigned type.
+ * @x: first value
+ * @y: second value
+ */
+#define umin(x, y) \
+ __careful_cmp(min, (x) + 0u + 0ul + 0ull, (y) + 0u + 0ul + 0ull)
+
+/**
+ * umax - return maximum of two non-negative values
+ * @x: first value
+ * @y: second value
+ */
+#define umax(x, y) \
+ __careful_cmp(max, (x) + 0u + 0ul + 0ull, (y) + 0u + 0ul + 0ull)
+
+#define __careful_op3(op, x, y, z, ux, uy, uz) ({ \
+ auto ux = (x); auto uy = (y); auto uz = (z); \
+ BUILD_BUG_ON_MSG(!__types_ok3(ux, uy, uz), \
+ #op"3("#x", "#y", "#z") signedness error"); \
+ __cmp(op, ux, __cmp(op, uy, uz)); })
/**
* min3 - return minimum of three values
@@ -63,7 +140,8 @@
* @y: second value
* @z: third value
*/
-#define min3(x, y, z) min((typeof(x))min(x, y), z)
+#define min3(x, y, z) \
+ __careful_op3(min, x, y, z, __UNIQUE_ID(x_), __UNIQUE_ID(y_), __UNIQUE_ID(z_))
/**
* max3 - return maximum of three values
@@ -71,7 +149,24 @@
* @y: second value
* @z: third value
*/
-#define max3(x, y, z) max((typeof(x))max(x, y), z)
+#define max3(x, y, z) \
+ __careful_op3(max, x, y, z, __UNIQUE_ID(x_), __UNIQUE_ID(y_), __UNIQUE_ID(z_))
+
+/**
+ * min_t - return minimum of two values, using the specified type
+ * @type: data type to use
+ * @x: first value
+ * @y: second value
+ */
+#define min_t(type, x, y) __cmp_once(min, type, x, y)
+
+/**
+ * max_t - return maximum of two values, using the specified type
+ * @type: data type to use
+ * @x: first value
+ * @y: second value
+ */
+#define max_t(type, x, y) __cmp_once(max, type, x, y)
/**
* min_not_zero - return the minimum that is _not_ zero, unless both are zero
@@ -83,39 +178,32 @@
typeof(y) __y = (y); \
__x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
+#define __clamp(val, lo, hi) \
+ ((val) >= (hi) ? (hi) : ((val) <= (lo) ? (lo) : (val)))
+
+#define __clamp_once(type, val, lo, hi, uval, ulo, uhi) ({ \
+ type uval = (val); \
+ type ulo = (lo); \
+ type uhi = (hi); \
+ BUILD_BUG_ON_MSG(statically_true(ulo > uhi), \
+ "clamp() low limit " #lo " greater than high limit " #hi); \
+ BUILD_BUG_ON_MSG(!__types_ok3(uval, ulo, uhi), \
+ "clamp("#val", "#lo", "#hi") signedness error"); \
+ __clamp(uval, ulo, uhi); })
+
+#define __careful_clamp(type, val, lo, hi) \
+ __clamp_once(type, val, lo, hi, __UNIQUE_ID(v_), __UNIQUE_ID(l_), __UNIQUE_ID(h_))
+
/**
- * clamp - return a value clamped to a given range with strict typechecking
+ * clamp - return a value clamped to a given range with typechecking
* @val: current value
* @lo: lowest allowable value
* @hi: highest allowable value
*
- * This macro does strict typechecking of @lo/@hi to make sure they are of the
- * same type as @val. See the unnecessary pointer comparisons.
- */
-#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
-
-/*
- * ..and if you can't take the strict
- * types, you can specify one yourself.
- *
- * Or not use min/max/clamp at all, of course.
+ * This macro checks @val/@lo/@hi to make sure they have compatible
+ * signedness.
*/
-
-/**
- * min_t - return minimum of two values, using the specified type
- * @type: data type to use
- * @x: first value
- * @y: second value
- */
-#define min_t(type, x, y) __careful_cmp((type)(x), (type)(y), <)
-
-/**
- * max_t - return maximum of two values, using the specified type
- * @type: data type to use
- * @x: first value
- * @y: second value
- */
-#define max_t(type, x, y) __careful_cmp((type)(x), (type)(y), >)
+#define clamp(val, lo, hi) __careful_clamp(auto, val, lo, hi)
/**
* clamp_t - return a value clamped to a given range using a given type
@@ -127,7 +215,7 @@
* This macro does no typechecking and uses temporary variables of type
* @type to make all the comparisons.
*/
-#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
+#define clamp_t(type, val, lo, hi) __careful_clamp(type, val, lo, hi)
/**
* clamp_val - return a value clamped to a given range using val's type
@@ -140,7 +228,76 @@
* type and @lo and @hi are literals that will otherwise be assigned a signed
* integer type.
*/
-#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
+#define clamp_val(val, lo, hi) __careful_clamp(typeof(val), val, lo, hi)
+
+/*
+ * Do not check the array parameter using __must_be_array().
+ * In the following legit use-case where the "array" passed is a simple pointer,
+ * __must_be_array() will return a failure.
+ * --- 8< ---
+ * int *buff
+ * ...
+ * min = min_array(buff, nb_items);
+ * --- 8< ---
+ *
+ * The first typeof(&(array)[0]) is needed in order to support arrays of both
+ * 'int *buff' and 'int buff[N]' types.
+ *
+ * The array can be an array of const items.
+ * typeof() keeps the const qualifier. Use __unqual_scalar_typeof() in order
+ * to discard the const qualifier for the __element variable.
+ */
+#define __minmax_array(op, array, len) ({ \
+ typeof(&(array)[0]) __array = (array); \
+ typeof(len) __len = (len); \
+ __unqual_scalar_typeof(__array[0]) __element = __array[--__len];\
+ while (__len--) \
+ __element = op(__element, __array[__len]); \
+ __element; })
+
+/**
+ * min_array - return minimum of values present in an array
+ * @array: array
+ * @len: array length
+ *
+ * Note that @len must not be zero (empty array).
+ */
+#define min_array(array, len) __minmax_array(min, array, len)
+
+/**
+ * max_array - return maximum of values present in an array
+ * @array: array
+ * @len: array length
+ *
+ * Note that @len must not be zero (empty array).
+ */
+#define max_array(array, len) __minmax_array(max, array, len)
+
+static inline bool in_range64(u64 val, u64 start, u64 len)
+{
+ return (val - start) < len;
+}
+
+static inline bool in_range32(u32 val, u32 start, u32 len)
+{
+ return (val - start) < len;
+}
+
+/**
+ * in_range - Determine if a value lies within a range.
+ * @val: Value to test.
+ * @start: First value in range.
+ * @len: Number of values in range.
+ *
+ * This is more efficient than "if (start <= val && val < (start + len))".
+ * It also gives a different answer if @start + @len overflows the size of
+ * the type by a sufficient amount to encompass @val. Decide for yourself
+ * which behaviour you want, or prove that start + len never overflow.
+ * Do not blindly replace one form with the other.
+ */
+#define in_range(val, start, len) \
+ ((sizeof(start) | sizeof(len) | sizeof(val)) <= sizeof(u32) ? \
+ in_range32(val, start, len) : in_range64(val, start, len))
/**
* swap - swap values of @a and @b
@@ -150,4 +307,13 @@
#define swap(a, b) \
do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
+/*
+ * Use these carefully: no type checking, and uses the arguments
+ * multiple times. Use for obvious constants only.
+ */
+#define MIN(a, b) __cmp(min, a, b)
+#define MAX(a, b) __cmp(max, a, b)
+#define MIN_T(type, a, b) __cmp(min, (type)(a), (type)(b))
+#define MAX_T(type, a, b) __cmp(max, (type)(a), (type)(b))
+
#endif /* _LINUX_MINMAX_H */
diff --git a/include/linux/misc/keba.h b/include/linux/misc/keba.h
new file mode 100644
index 000000000000..a81d6fa70851
--- /dev/null
+++ b/include/linux/misc/keba.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2024, KEBA Industrial Automation Gmbh */
+
+#ifndef _LINUX_MISC_KEBA_H
+#define _LINUX_MISC_KEBA_H
+
+#include <linux/auxiliary_bus.h>
+
+struct i2c_board_info;
+struct spi_board_info;
+
+/**
+ * struct keba_i2c_auxdev - KEBA I2C auxiliary device
+ * @auxdev: auxiliary device object
+ * @io: address range of I2C controller IO memory
+ * @info_size: number of I2C devices to be probed
+ * @info: I2C devices to be probed
+ */
+struct keba_i2c_auxdev {
+ struct auxiliary_device auxdev;
+ struct resource io;
+ int info_size;
+ struct i2c_board_info *info;
+};
+
+/**
+ * struct keba_spi_auxdev - KEBA SPI auxiliary device
+ * @auxdev: auxiliary device object
+ * @io: address range of SPI controller IO memory
+ * @info_size: number of SPI devices to be probed
+ * @info: SPI devices to be probed
+ */
+struct keba_spi_auxdev {
+ struct auxiliary_device auxdev;
+ struct resource io;
+ int info_size;
+ struct spi_board_info *info;
+};
+
+/**
+ * struct keba_fan_auxdev - KEBA fan auxiliary device
+ * @auxdev: auxiliary device object
+ * @io: address range of fan controller IO memory
+ */
+struct keba_fan_auxdev {
+ struct auxiliary_device auxdev;
+ struct resource io;
+};
+
+/**
+ * struct keba_batt_auxdev - KEBA battery auxiliary device
+ * @auxdev: auxiliary device object
+ * @io: address range of battery controller IO memory
+ */
+struct keba_batt_auxdev {
+ struct auxiliary_device auxdev;
+ struct resource io;
+};
+
+/**
+ * struct keba_uart_auxdev - KEBA UART auxiliary device
+ * @auxdev: auxiliary device object
+ * @io: address range of UART controller IO memory
+ * @irq: number of UART controller interrupt
+ */
+struct keba_uart_auxdev {
+ struct auxiliary_device auxdev;
+ struct resource io;
+ unsigned int irq;
+};
+
+#endif /* _LINUX_MISC_KEBA_H */
diff --git a/include/linux/misc_cgroup.h b/include/linux/misc_cgroup.h
index da2367e2ac1e..0cb36a3ffc47 100644
--- a/include/linux/misc_cgroup.h
+++ b/include/linux/misc_cgroup.h
@@ -9,15 +9,20 @@
#define _MISC_CGROUP_H_
/**
- * Types of misc cgroup entries supported by the host.
+ * enum misc_res_type - Types of misc cgroup entries supported by the host.
*/
enum misc_res_type {
#ifdef CONFIG_KVM_AMD_SEV
- /* AMD SEV ASIDs resource */
+ /** @MISC_CG_RES_SEV: AMD SEV ASIDs resource */
MISC_CG_RES_SEV,
- /* AMD SEV-ES ASIDs resource */
+ /** @MISC_CG_RES_SEV_ES: AMD SEV-ES ASIDs resource */
MISC_CG_RES_SEV_ES,
#endif
+#ifdef CONFIG_INTEL_TDX_HOST
+ /** @MISC_CG_RES_TDX: Intel TDX HKIDs resource */
+ MISC_CG_RES_TDX,
+#endif
+ /** @MISC_CG_RES_TYPES: count of enum misc_res_type constants */
MISC_CG_RES_TYPES
};
@@ -30,31 +35,38 @@ struct misc_cg;
/**
* struct misc_res: Per cgroup per misc type resource
* @max: Maximum limit on the resource.
+ * @watermark: Historical maximum usage of the resource.
* @usage: Current usage of the resource.
- * @failed: True if charged failed for the resource in a cgroup.
+ * @events: Number of times, the resource limit exceeded.
*/
struct misc_res {
- unsigned long max;
- atomic_long_t usage;
- bool failed;
+ u64 max;
+ atomic64_t watermark;
+ atomic64_t usage;
+ atomic64_t events;
+ atomic64_t events_local;
};
/**
* struct misc_cg - Miscellaneous controller's cgroup structure.
* @css: cgroup subsys state object.
+ * @events_file: Handle for the misc resources events file.
* @res: Array of misc resources usage in the cgroup.
*/
struct misc_cg {
struct cgroup_subsys_state css;
+
+ /* misc.events */
+ struct cgroup_file events_file;
+ /* misc.events.local */
+ struct cgroup_file events_local_file;
+
struct misc_res res[MISC_CG_RES_TYPES];
};
-unsigned long misc_cg_res_total_usage(enum misc_res_type type);
-int misc_cg_set_capacity(enum misc_res_type type, unsigned long capacity);
-int misc_cg_try_charge(enum misc_res_type type, struct misc_cg *cg,
- unsigned long amount);
-void misc_cg_uncharge(enum misc_res_type type, struct misc_cg *cg,
- unsigned long amount);
+int misc_cg_set_capacity(enum misc_res_type type, u64 capacity);
+int misc_cg_try_charge(enum misc_res_type type, struct misc_cg *cg, u64 amount);
+void misc_cg_uncharge(enum misc_res_type type, struct misc_cg *cg, u64 amount);
/**
* css_misc() - Get misc cgroup from the css.
@@ -95,27 +107,21 @@ static inline void put_misc_cg(struct misc_cg *cg)
#else /* !CONFIG_CGROUP_MISC */
-static inline unsigned long misc_cg_res_total_usage(enum misc_res_type type)
-{
- return 0;
-}
-
-static inline int misc_cg_set_capacity(enum misc_res_type type,
- unsigned long capacity)
+static inline int misc_cg_set_capacity(enum misc_res_type type, u64 capacity)
{
return 0;
}
static inline int misc_cg_try_charge(enum misc_res_type type,
struct misc_cg *cg,
- unsigned long amount)
+ u64 amount)
{
return 0;
}
static inline void misc_cg_uncharge(enum misc_res_type type,
struct misc_cg *cg,
- unsigned long amount)
+ u64 amount)
{
}
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index 0676f18093f9..7d0aa718499c 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -44,7 +44,7 @@
#define AGPGART_MINOR 175
#define TOSH_MINOR_DEV 181
#define HWRNG_MINOR 183
-#define MICROCODE_MINOR 184
+/*#define MICROCODE_MINOR 184 unused */
#define KEYPAD_MINOR 185
#define IRNET_MINOR 187
#define D7S_MINOR 193
@@ -70,13 +70,19 @@
#define UHID_MINOR 239
#define USERIO_MINOR 240
#define VHOST_VSOCK_MINOR 241
+#define EISA_EEPROM_MINOR 241
#define RFKILL_MINOR 242
-#define MISC_DYNAMIC_MINOR 255
-struct device;
-struct attribute_group;
+/*
+ * Misc char device minor code space division related to below macro:
+ *
+ * < 255 : Fixed minor code
+ * == 255 : Indicator to request dynamic minor code
+ * > 255 : Dynamic minor code requested, 1048320 minor codes totally.
+ */
+#define MISC_DYNAMIC_MINOR 255
-struct miscdevice {
+struct miscdevice {
int minor;
const char *name;
const struct file_operations *fops;
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 236a7d04f891..f016263e1fcf 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -33,6 +33,7 @@
#ifndef MLX4_DEVICE_H
#define MLX4_DEVICE_H
+#include <linux/auxiliary_bus.h>
#include <linux/if_ether.h>
#include <linux/pci.h>
#include <linux/completion.h>
@@ -630,6 +631,7 @@ struct mlx4_caps {
bool wol_port[MLX4_MAX_PORTS + 1];
struct mlx4_rate_limit_caps rl_caps;
u32 health_buffer_addrs;
+ bool map_clock_to_user;
};
struct mlx4_buf_list {
@@ -888,6 +890,12 @@ struct mlx4_dev {
u8 uar_page_shift;
};
+struct mlx4_adev {
+ struct auxiliary_device adev;
+ struct mlx4_dev *mdev;
+ int idx;
+};
+
struct mlx4_clock_params {
u64 offset;
u8 bar;
@@ -1086,6 +1094,19 @@ static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
(offset & (PAGE_SIZE - 1));
}
+static inline int mlx4_is_bonded(struct mlx4_dev *dev)
+{
+ return !!(dev->flags & MLX4_FLAG_BONDED);
+}
+
+static inline int mlx4_is_mf_bonded(struct mlx4_dev *dev)
+{
+ return (mlx4_is_bonded(dev) && mlx4_is_mfunc(dev));
+}
+
+int mlx4_queue_bond_work(struct mlx4_dev *dev, int is_bonded, u8 v2p_p1,
+ u8 v2p_p2);
+
int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn);
void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn);
int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn);
@@ -1114,7 +1135,7 @@ int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
struct mlx4_buf *buf);
-int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order);
+int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, unsigned int order);
void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
@@ -1394,7 +1415,6 @@ int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port,
bool *vlan_offload_disabled);
void mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
struct _rule_hw *eth_header);
-int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);
@@ -1435,7 +1455,7 @@ int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
enum mlx4_net_trans_rule_id id);
int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id);
-int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
+int mlx4_tunnel_steer_add(struct mlx4_dev *dev, const unsigned char *addr,
int port, int qpn, u16 prio, u64 *reg_id);
void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port,
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index a858bcb6220b..69825223081f 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -34,8 +34,12 @@
#define MLX4_DRIVER_H
#include <net/devlink.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/notifier.h>
#include <linux/mlx4/device.h>
+#define MLX4_ADEV_NAME "mlx4_core"
+
struct mlx4_dev;
#define MLX4_MAC_MASK 0xffffffffffffULL
@@ -54,64 +58,20 @@ enum {
MLX4_INTFF_BONDING = 1 << 0
};
-struct mlx4_interface {
- void * (*add) (struct mlx4_dev *dev);
- void (*remove)(struct mlx4_dev *dev, void *context);
- void (*event) (struct mlx4_dev *dev, void *context,
- enum mlx4_dev_event event, unsigned long param);
- void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port);
- void (*activate)(struct mlx4_dev *dev, void *context);
- struct list_head list;
+struct mlx4_adrv {
+ struct auxiliary_driver adrv;
enum mlx4_protocol protocol;
int flags;
};
-int mlx4_register_interface(struct mlx4_interface *intf);
-void mlx4_unregister_interface(struct mlx4_interface *intf);
-
-int mlx4_bond(struct mlx4_dev *dev);
-int mlx4_unbond(struct mlx4_dev *dev);
-static inline int mlx4_is_bonded(struct mlx4_dev *dev)
-{
- return !!(dev->flags & MLX4_FLAG_BONDED);
-}
-
-static inline int mlx4_is_mf_bonded(struct mlx4_dev *dev)
-{
- return (mlx4_is_bonded(dev) && mlx4_is_mfunc(dev));
-}
-
-struct mlx4_port_map {
- u8 port1;
- u8 port2;
-};
-
-int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p);
+int mlx4_register_auxiliary_driver(struct mlx4_adrv *madrv);
+void mlx4_unregister_auxiliary_driver(struct mlx4_adrv *madrv);
-void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port);
+int mlx4_register_event_notifier(struct mlx4_dev *dev,
+ struct notifier_block *nb);
+int mlx4_unregister_event_notifier(struct mlx4_dev *dev,
+ struct notifier_block *nb);
struct devlink_port *mlx4_get_devlink_port(struct mlx4_dev *dev, int port);
-static inline u64 mlx4_mac_to_u64(u8 *addr)
-{
- u64 mac = 0;
- int i;
-
- for (i = 0; i < ETH_ALEN; i++) {
- mac <<= 8;
- mac |= addr[i];
- }
- return mac;
-}
-
-static inline void mlx4_u64_to_mac(u8 *addr, u64 mac)
-{
- int i;
-
- for (i = ETH_ALEN; i > 0; i--) {
- addr[i - 1] = mac & 0xFF;
- mac >>= 8;
- }
-}
-
#endif /* MLX4_DRIVER_H */
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 9db93e487496..b9a7b1319f5d 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -446,6 +446,7 @@ enum {
struct mlx4_wqe_inline_seg {
__be32 byte_count;
+ __u8 data[];
};
enum mlx4_update_qp_attr {
@@ -503,4 +504,5 @@ static inline u16 folded_qp(u32 q)
u16 mlx4_qp_roce_entropy(struct mlx4_dev *dev, u32 qpn);
+void mlx4_put_qp(struct mlx4_qp *qp);
#endif /* MLX4_QP_H */
diff --git a/include/linux/mlx5/accel.h b/include/linux/mlx5/accel.h
deleted file mode 100644
index dacf69516002..000000000000
--- a/include/linux/mlx5/accel.h
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef __MLX5_ACCEL_H__
-#define __MLX5_ACCEL_H__
-
-#include <linux/mlx5/driver.h>
-
-enum mlx5_accel_esp_aes_gcm_keymat_iv_algo {
- MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ,
-};
-
-enum mlx5_accel_esp_flags {
- MLX5_ACCEL_ESP_FLAGS_TUNNEL = 0, /* Default */
- MLX5_ACCEL_ESP_FLAGS_TRANSPORT = 1UL << 0,
- MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED = 1UL << 1,
- MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP = 1UL << 2,
-};
-
-enum mlx5_accel_esp_action {
- MLX5_ACCEL_ESP_ACTION_DECRYPT,
- MLX5_ACCEL_ESP_ACTION_ENCRYPT,
-};
-
-enum mlx5_accel_esp_keymats {
- MLX5_ACCEL_ESP_KEYMAT_AES_NONE,
- MLX5_ACCEL_ESP_KEYMAT_AES_GCM,
-};
-
-enum mlx5_accel_esp_replay {
- MLX5_ACCEL_ESP_REPLAY_NONE,
- MLX5_ACCEL_ESP_REPLAY_BMP,
-};
-
-struct aes_gcm_keymat {
- u64 seq_iv;
- enum mlx5_accel_esp_aes_gcm_keymat_iv_algo iv_algo;
-
- u32 salt;
- u32 icv_len;
-
- u32 key_len;
- u32 aes_key[256 / 32];
-};
-
-struct mlx5_accel_esp_xfrm_attrs {
- enum mlx5_accel_esp_action action;
- u32 esn;
- __be32 spi;
- u32 seq;
- u32 tfc_pad;
- u32 flags;
- u32 sa_handle;
- enum mlx5_accel_esp_replay replay_type;
- union {
- struct {
- u32 size;
-
- } bmp;
- } replay;
- enum mlx5_accel_esp_keymats keymat_type;
- union {
- struct aes_gcm_keymat aes_gcm;
- } keymat;
-
- union {
- __be32 a4;
- __be32 a6[4];
- } saddr;
-
- union {
- __be32 a4;
- __be32 a6[4];
- } daddr;
-
- u8 is_ipv6;
-};
-
-struct mlx5_accel_esp_xfrm {
- struct mlx5_core_dev *mdev;
- struct mlx5_accel_esp_xfrm_attrs attrs;
-};
-
-enum {
- MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA = 1UL << 0,
-};
-
-enum mlx5_accel_ipsec_cap {
- MLX5_ACCEL_IPSEC_CAP_DEVICE = 1 << 0,
- MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA = 1 << 1,
- MLX5_ACCEL_IPSEC_CAP_ESP = 1 << 2,
- MLX5_ACCEL_IPSEC_CAP_IPV6 = 1 << 3,
- MLX5_ACCEL_IPSEC_CAP_LSO = 1 << 4,
- MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER = 1 << 5,
- MLX5_ACCEL_IPSEC_CAP_ESN = 1 << 6,
- MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN = 1 << 7,
-};
-
-#ifdef CONFIG_MLX5_ACCEL
-
-u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev);
-
-struct mlx5_accel_esp_xfrm *
-mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 flags);
-void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm);
-int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
- const struct mlx5_accel_esp_xfrm_attrs *attrs);
-
-#else
-
-static inline u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; }
-
-static inline struct mlx5_accel_esp_xfrm *
-mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 flags) { return ERR_PTR(-EOPNOTSUPP); }
-static inline void
-mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) {}
-static inline int
-mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
- const struct mlx5_accel_esp_xfrm_attrs *attrs) { return -EOPNOTSUPP; }
-
-#endif /* CONFIG_MLX5_ACCEL */
-#endif /* __MLX5_ACCEL_H__ */
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 7bfb67363434..9d47cdc727ad 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -41,7 +41,6 @@ struct mlx5_core_cq {
int cqe_sz;
__be32 *set_ci_db;
__be32 *arm_db;
- struct mlx5_uars_page *uar;
refcount_t refcount;
struct completion free;
unsigned vector;
@@ -95,9 +94,10 @@ enum {
};
enum {
- MLX5_CQ_MODIFY_PERIOD = 1 << 0,
- MLX5_CQ_MODIFY_COUNT = 1 << 1,
- MLX5_CQ_MODIFY_OVERRUN = 1 << 2,
+ MLX5_CQ_MODIFY_PERIOD = BIT(0),
+ MLX5_CQ_MODIFY_COUNT = BIT(1),
+ MLX5_CQ_MODIFY_OVERRUN = BIT(2),
+ MLX5_CQ_MODIFY_PERIOD_MODE = BIT(4),
};
enum {
@@ -183,6 +183,9 @@ static inline void mlx5_cq_put(struct mlx5_core_cq *cq)
complete(&cq->free);
}
+void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe);
+int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+ u32 *in, int inlen, u32 *out, int outlen);
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 *in, int inlen, u32 *out, int outlen);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 578c4ccae91c..d7f46a8fbfa1 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -36,6 +36,7 @@
#include <linux/types.h>
#include <rdma/ib_verbs.h>
#include <linux/mlx5/mlx5_ifc.h>
+#include <linux/bitfield.h>
#if defined(__LITTLE_ENDIAN)
#define MLX5_SET_HOST_ENDIANNESS 0
@@ -67,7 +68,7 @@
#define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
#define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
-#define MLX5_ADDR_OF(typ, p, fld) ((void *)((uint8_t *)(p) + MLX5_BYTE_OFF(typ, fld)))
+#define MLX5_ADDR_OF(typ, p, fld) ((void *)((u8 *)(p) + MLX5_BYTE_OFF(typ, fld)))
/* insert a value to a struct */
#define MLX5_SET(typ, p, fld, v) do { \
@@ -210,6 +211,7 @@ enum {
enum {
MLX5_PFAULT_SUBTYPE_WQE = 0,
MLX5_PFAULT_SUBTYPE_RDMA = 1,
+ MLX5_PFAULT_SUBTYPE_MEMORY = 2,
};
enum wqe_page_fault_type {
@@ -278,6 +280,7 @@ enum {
MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23,
MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE = 1ull << 25,
MLX5_MKEY_MASK_FREE = 1ull << 29,
+ MLX5_MKEY_MASK_PAGE_SIZE_5 = 1ull << 42,
MLX5_MKEY_MASK_RELAXED_ORDERING_READ = 1ull << 47,
};
@@ -290,9 +293,10 @@ enum {
MLX5_UMR_INLINE = (1 << 7),
};
-#define MLX5_UMR_MTT_ALIGNMENT 0x40
-#define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1)
-#define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT
+#define MLX5_UMR_FLEX_ALIGNMENT 0x40
+#define MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_mtt))
+#define MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_klm))
+#define MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_ksm))
#define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8)
@@ -324,6 +328,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
MLX5_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11,
MLX5_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12,
+ MLX5_EVENT_TYPE_OBJECT_CHANGE = 0x27,
MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x08,
MLX5_EVENT_TYPE_PORT_CHANGE = 0x09,
@@ -361,11 +366,19 @@ enum mlx5_event {
enum mlx5_driver_event {
MLX5_DRIVER_EVENT_TYPE_TRAP = 0,
+ MLX5_DRIVER_EVENT_UPLINK_NETDEV,
+ MLX5_DRIVER_EVENT_MACSEC_SA_ADDED,
+ MLX5_DRIVER_EVENT_MACSEC_SA_DELETED,
+ MLX5_DRIVER_EVENT_SF_PEER_DEVLINK,
+ MLX5_DRIVER_EVENT_AFFILIATION_DONE,
+ MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
+ MLX5_DRIVER_EVENT_ACTIVE_BACKUP_LAG_CHANGE_LOWERSTATE,
};
enum {
MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE = 0x0,
MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE = 0x1,
+ MLX5_TRACER_SUBTYPE_STRINGS_DB_UPDATE = 0x2,
};
enum {
@@ -386,21 +399,6 @@ enum {
};
enum {
- MLX5_DEV_CAP_FLAG_XRC = 1LL << 3,
- MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8,
- MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9,
- MLX5_DEV_CAP_FLAG_APM = 1LL << 17,
- MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18,
- MLX5_DEV_CAP_FLAG_BLOCK_MCAST = 1LL << 23,
- MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24,
- MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29,
- MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30,
- MLX5_DEV_CAP_FLAG_DCT = 1LL << 37,
- MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40,
- MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46,
-};
-
-enum {
MLX5_ROCE_VERSION_1 = 0,
MLX5_ROCE_VERSION_2 = 2,
};
@@ -454,6 +452,9 @@ enum {
MLX5_OPCODE_UMR = 0x25,
+ MLX5_OPCODE_FLOW_TBL_ACCESS = 0x2c,
+
+ MLX5_OPCODE_ACCESS_ASO = 0x2d,
};
enum {
@@ -495,10 +496,6 @@ enum {
};
enum {
- MLX5_CAP_OFF_CMDIF_CSUM = 46,
-};
-
-enum {
/*
* Max wqe size for rdma read is 512 bytes, so this
* limits our max_sge_rd as the wqe needs to fit:
@@ -541,19 +538,22 @@ struct mlx5_cmd_layout {
u8 status_own;
};
-enum mlx5_fatal_assert_bit_offsets {
- MLX5_RFR_OFFSET = 31,
+enum mlx5_rfr_severity_bit_offsets {
+ MLX5_CRR_BIT_OFFSET = 0x6,
+ MLX5_RFR_BIT_OFFSET = 0x7,
};
struct health_buffer {
- __be32 assert_var[5];
- __be32 rsvd0[3];
+ __be32 assert_var[6];
+ __be32 rsvd0[2];
__be32 assert_exit_ptr;
__be32 assert_callra;
- __be32 rsvd1[2];
+ __be32 rsvd1[1];
+ __be32 time;
__be32 fw_ver;
__be32 hw_id;
- __be32 rfr;
+ u8 rfr_severity;
+ u8 rsvd2[3];
u8 irisc_index;
u8 synd;
__be16 ext_synd;
@@ -577,7 +577,9 @@ struct mlx5_init_seg {
__be32 rsvd1[120];
__be32 initializing;
struct health_buffer health;
- __be32 rsvd2[880];
+ __be32 rsvd2[878];
+ __be32 cmd_exec_to;
+ __be32 cmd_q_init_to;
__be32 internal_timer_h;
__be32 internal_timer_l;
__be32 rsvd3[2];
@@ -648,10 +650,11 @@ struct mlx5_eqe_page_req {
__be32 rsvd1[5];
};
+#define MEMORY_SCHEME_PAGE_FAULT_GRANULARITY 4096
struct mlx5_eqe_page_fault {
- __be32 bytes_committed;
union {
struct {
+ __be32 bytes_committed;
u16 reserved1;
__be16 wqe_index;
u16 reserved2;
@@ -661,6 +664,7 @@ struct mlx5_eqe_page_fault {
__be32 pftype_wq;
} __packed wqe;
struct {
+ __be32 bytes_committed;
__be32 r_key;
u16 reserved1;
__be16 packet_length;
@@ -668,6 +672,23 @@ struct mlx5_eqe_page_fault {
__be64 rdma_va;
__be32 pftype_token;
} __packed rdma;
+ struct {
+ u8 flags;
+ u8 reserved1;
+ __be16 post_demand_fault_pages;
+ __be16 pre_demand_fault_pages;
+ __be16 token47_32;
+ __be32 token31_0;
+ /*
+ * FW changed from specifying the fault size in byte
+ * count to 4k pages granularity. The size specified
+ * in pages uses bits 31:12, to keep backward
+ * compatibility.
+ */
+ __be32 demand_fault_pages;
+ __be32 mkey;
+ __be64 va;
+ } __packed memory;
} __packed;
} __packed;
@@ -712,12 +733,19 @@ struct mlx5_eqe_temp_warning {
__be64 sensor_warning_lsb;
} __packed;
+struct mlx5_eqe_obj_change {
+ u8 rsvd0[2];
+ __be16 obj_type;
+ __be32 obj_id;
+} __packed;
+
#define SYNC_RST_STATE_MASK 0xf
enum sync_rst_state_type {
MLX5_SYNC_RST_STATE_RESET_REQUEST = 0x0,
MLX5_SYNC_RST_STATE_RESET_NOW = 0x1,
MLX5_SYNC_RST_STATE_RESET_ABORT = 0x2,
+ MLX5_SYNC_RST_STATE_RESET_UNLOAD = 0x3,
};
struct mlx5_eqe_sync_fw_update {
@@ -750,6 +778,7 @@ union ev_data {
struct mlx5_eqe_xrq_err xrq_err;
struct mlx5_eqe_sync_fw_update sync_fw_update;
struct mlx5_eqe_vhca_state vhca_state;
+ struct mlx5_eqe_obj_change obj_change;
} __packed;
struct mlx5_eqe {
@@ -795,10 +824,23 @@ struct mlx5_cqe64 {
u8 tls_outer_l3_tunneled;
u8 rsvd0;
__be16 wqe_id;
- u8 lro_tcppsh_abort_dupack;
- u8 lro_min_ttl;
- __be16 lro_tcp_win;
- __be32 lro_ack_seq_num;
+ union {
+ struct {
+ u8 tcppsh_abort_dupack;
+ u8 min_ttl;
+ __be16 tcp_win;
+ __be32 ack_seq_num;
+ } lro;
+ struct {
+ u8 reserved0:1;
+ u8 match:1;
+ u8 flush:1;
+ u8 reserved3:5;
+ u8 header_size;
+ __be16 header_entry_index;
+ __be32 data_offset;
+ } shampo;
+ };
__be32 rss_hash_result;
u8 rss_hash_type;
u8 ml_path;
@@ -822,7 +864,10 @@ struct mlx5_cqe64 {
__be32 timestamp_l;
__be32 sop_drop_qpn;
__be16 wqe_counter;
- u8 signature;
+ union {
+ u8 signature;
+ u8 validity_iteration_count;
+ };
u8 op_own;
};
@@ -854,6 +899,11 @@ enum {
MLX5_CQE_FORMAT_CSUM_STRIDX = 0x3,
};
+enum {
+ MLX5_CQE_COMPRESS_LAYOUT_BASIC = 0,
+ MLX5_CQE_COMPRESS_LAYOUT_ENHANCED = 1,
+};
+
#define MLX5_MINI_CQE_ARRAY_SIZE 8
static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
@@ -866,19 +916,20 @@ static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe)
return cqe->op_own >> 4;
}
-static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
+static inline u8 get_cqe_enhanced_num_mini_cqes(struct mlx5_cqe64 *cqe)
{
- return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
+ /* num_of_mini_cqes is zero based */
+ return get_cqe_opcode(cqe) + 1;
}
-static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
+static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
{
- return (cqe->l4_l3_hdr_type >> 4) & 0x7;
+ return (cqe->lro.tcppsh_abort_dupack >> 6) & 1;
}
-static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe)
+static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
{
- return (cqe->l4_l3_hdr_type >> 2) & 0x3;
+ return (cqe->l4_l3_hdr_type >> 4) & 0x7;
}
static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe)
@@ -891,7 +942,7 @@ static inline u8 get_cqe_tls_offload(struct mlx5_cqe64 *cqe)
return (cqe->tls_outer_l3_tunneled >> 3) & 0x3;
}
-static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe)
+static inline bool cqe_has_vlan(const struct mlx5_cqe64 *cqe)
{
return cqe->l4_l3_hdr_type & 0x1;
}
@@ -962,14 +1013,23 @@ enum {
};
enum {
- CQE_RSS_HTYPE_IP = 0x3 << 2,
+ CQE_RSS_HTYPE_IP = GENMASK(3, 2),
/* cqe->rss_hash_type[3:2] - IP destination selected for hash
* (00 = none, 01 = IPv4, 10 = IPv6, 11 = Reserved)
*/
- CQE_RSS_HTYPE_L4 = 0x3 << 6,
+ CQE_RSS_IP_NONE = 0x0,
+ CQE_RSS_IPV4 = 0x1,
+ CQE_RSS_IPV6 = 0x2,
+ CQE_RSS_RESERVED = 0x3,
+
+ CQE_RSS_HTYPE_L4 = GENMASK(7, 6),
/* cqe->rss_hash_type[7:6] - L4 destination selected for hash
* (00 = none, 01 = TCP. 10 = UDP, 11 = IPSEC.SPI
*/
+ CQE_RSS_L4_NONE = 0x0,
+ CQE_RSS_L4_TCP = 0x1,
+ CQE_RSS_L4_UDP = 0x2,
+ CQE_RSS_L4_IPSEC = 0x3,
};
enum {
@@ -1038,7 +1098,7 @@ enum {
struct mlx5_mkey_seg {
/* This is a two bit field occupying bits 31-30.
* bit 31 is always 0,
- * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation
+ * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have translation
*/
u8 status;
u8 pcie_control;
@@ -1075,6 +1135,11 @@ enum {
};
enum {
+ MLX5_VPORT_CVLAN_INSERT_WHEN_NO_CVLAN = 0x1,
+ MLX5_VPORT_CVLAN_INSERT_ALWAYS = 0x3,
+};
+
+enum {
MLX5_L3_PROT_TYPE_IPV4 = 0,
MLX5_L3_PROT_TYPE_IPV6 = 1,
};
@@ -1099,6 +1164,7 @@ enum {
MLX5_MATCH_MISC_PARAMETERS_2 = 1 << 3,
MLX5_MATCH_MISC_PARAMETERS_3 = 1 << 4,
MLX5_MATCH_MISC_PARAMETERS_4 = 1 << 5,
+ MLX5_MATCH_MISC_PARAMETERS_5 = 1 << 6,
};
enum {
@@ -1157,6 +1223,9 @@ enum mlx5_cap_mode {
HCA_CAP_OPMOD_GET_CUR = 1,
};
+/* Any new cap addition must update mlx5_hca_caps_alloc() to allocate
+ * capability memory.
+ */
enum mlx5_cap_type {
MLX5_CAP_GENERAL = 0,
MLX5_CAP_ETHERNET_OFFLOADS,
@@ -1168,9 +1237,7 @@ enum mlx5_cap_type {
MLX5_CAP_FLOW_TABLE,
MLX5_CAP_ESWITCH_FLOW_TABLE,
MLX5_CAP_ESWITCH,
- MLX5_CAP_RESERVED,
- MLX5_CAP_VECTOR_CALC,
- MLX5_CAP_QOS,
+ MLX5_CAP_QOS = 0xc,
MLX5_CAP_DEBUG,
MLX5_CAP_RESERVED_14,
MLX5_CAP_DEV_MEM,
@@ -1179,6 +1246,14 @@ enum mlx5_cap_type {
MLX5_CAP_VDPA_EMULATION = 0x13,
MLX5_CAP_DEV_EVENT = 0x14,
MLX5_CAP_IPSEC,
+ MLX5_CAP_CRYPTO = 0x1a,
+ MLX5_CAP_SHAMPO = 0x1d,
+ MLX5_CAP_PSP = 0x1e,
+ MLX5_CAP_MACSEC = 0x1f,
+ MLX5_CAP_GENERAL_2 = 0x20,
+ MLX5_CAP_PORT_SELECTION = 0x25,
+ MLX5_CAP_ADV_VIRTUALIZATION = 0x26,
+ MLX5_CAP_ADV_RDMA = 0x28,
/* NUM OF CAP Types */
MLX5_CAP_NUM
};
@@ -1193,9 +1268,9 @@ enum mlx5_pcam_feature_groups {
enum mlx5_mcam_reg_groups {
MLX5_MCAM_REGS_FIRST_128 = 0x0,
- MLX5_MCAM_REGS_0x9080_0x90FF = 0x1,
MLX5_MCAM_REGS_0x9100_0x917F = 0x2,
- MLX5_MCAM_REGS_NUM = 0x3,
+ MLX5_MCAM_REGS_0x9180_0x91FF = 0x3,
+ MLX5_MCAM_REGS_NUM = 0x4,
};
enum mlx5_mcam_feature_groups {
@@ -1212,136 +1287,141 @@ enum mlx5_qcam_feature_groups {
/* GET Dev Caps macros */
#define MLX5_CAP_GEN(mdev, cap) \
- MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap)
+ MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap)
#define MLX5_CAP_GEN_64(mdev, cap) \
- MLX5_GET64(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap)
+ MLX5_GET64(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap)
#define MLX5_CAP_GEN_MAX(mdev, cap) \
- MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap)
+ MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->max, cap)
-#define MLX5_CAP_ETH(mdev, cap) \
- MLX5_GET(per_protocol_networking_offload_caps,\
- mdev->caps.hca_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+#define MLX5_CAP_GEN_2(mdev, cap) \
+ MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap)
+
+#define MLX5_CAP_GEN_2_64(mdev, cap) \
+ MLX5_GET64(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap)
+
+#define MLX5_CAP_GEN_2_MAX(mdev, cap) \
+ MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->max, cap)
-#define MLX5_CAP_ETH_MAX(mdev, cap) \
+#define MLX5_CAP_ETH(mdev, cap) \
MLX5_GET(per_protocol_networking_offload_caps,\
- mdev->caps.hca_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+ mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->cur, cap)
#define MLX5_CAP_IPOIB_ENHANCED(mdev, cap) \
MLX5_GET(per_protocol_networking_offload_caps,\
- mdev->caps.hca_cur[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS], cap)
+ mdev->caps.hca[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS]->cur, cap)
#define MLX5_CAP_ROCE(mdev, cap) \
- MLX5_GET(roce_cap, mdev->caps.hca_cur[MLX5_CAP_ROCE], cap)
+ MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->cur, cap)
#define MLX5_CAP_ROCE_MAX(mdev, cap) \
- MLX5_GET(roce_cap, mdev->caps.hca_max[MLX5_CAP_ROCE], cap)
+ MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->max, cap)
#define MLX5_CAP_ATOMIC(mdev, cap) \
- MLX5_GET(atomic_caps, mdev->caps.hca_cur[MLX5_CAP_ATOMIC], cap)
+ MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->cur, cap)
#define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
- MLX5_GET(atomic_caps, mdev->caps.hca_max[MLX5_CAP_ATOMIC], cap)
+ MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->max, cap)
#define MLX5_CAP_FLOWTABLE(mdev, cap) \
- MLX5_GET(flow_table_nic_cap, mdev->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap)
+ MLX5_GET(flow_table_nic_cap, mdev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap)
#define MLX5_CAP64_FLOWTABLE(mdev, cap) \
- MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap)
-
-#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
- MLX5_GET(flow_table_nic_cap, mdev->caps.hca_max[MLX5_CAP_FLOW_TABLE], cap)
+ MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap)
#define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
-#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
- MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
-
#define MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit.cap)
-#define MLX5_CAP_FLOWTABLE_NIC_TX_MAX(mdev, cap) \
- MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit.cap)
-
#define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap)
-#define MLX5_CAP_FLOWTABLE_SNIFFER_RX_MAX(mdev, cap) \
- MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_sniffer.cap)
-
#define MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_sniffer.cap)
-#define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \
- MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap)
-
#define MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_rdma.cap)
-#define MLX5_CAP_FLOWTABLE_RDMA_RX_MAX(mdev, cap) \
- MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_rdma.cap)
-
#define MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_rdma.cap)
-#define MLX5_CAP_FLOWTABLE_RDMA_TX_MAX(mdev, cap) \
- MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_rdma.cap)
+#define MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_RX(mdev, cap) \
+ MLX5_CAP_ADV_RDMA(mdev, rdma_transport_rx_flow_table_properties.cap)
-#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
- MLX5_GET(flow_table_eswitch_cap, \
- mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+#define MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_TX(mdev, cap) \
+ MLX5_CAP_ADV_RDMA(mdev, rdma_transport_tx_flow_table_properties.cap)
-#define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
+#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \
- mdev->caps.hca_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+ mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap)
#define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
-#define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \
- MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap)
-
#define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap)
-#define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \
- MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap)
-
#define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap)
-#define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \
- MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap)
+#define MLX5_CAP_ESW_FT_FIELD_SUPPORT_2(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE(mdev, ft_field_support_2_esw_fdb.cap)
+
+#define MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(mdev, cap) \
+ MLX5_CAP_FLOWTABLE(mdev, ft_field_support_2_nic_receive.cap)
#define MLX5_CAP_ESW(mdev, cap) \
MLX5_GET(e_switch_cap, \
- mdev->caps.hca_cur[MLX5_CAP_ESWITCH], cap)
+ mdev->caps.hca[MLX5_CAP_ESWITCH]->cur, cap)
#define MLX5_CAP64_ESW_FLOWTABLE(mdev, cap) \
MLX5_GET64(flow_table_eswitch_cap, \
- (mdev)->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+ (mdev)->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap)
-#define MLX5_CAP_ESW_MAX(mdev, cap) \
- MLX5_GET(e_switch_cap, \
- mdev->caps.hca_max[MLX5_CAP_ESWITCH], cap)
+#define MLX5_CAP_PORT_SELECTION(mdev, cap) \
+ MLX5_GET(port_selection_cap, \
+ mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->cur, cap)
+
+#define MLX5_CAP_PORT_SELECTION_MAX(mdev, cap) \
+ MLX5_GET(port_selection_cap, \
+ mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->max, cap)
+
+#define MLX5_CAP_ADV_VIRTUALIZATION(mdev, cap) \
+ MLX5_GET(adv_virtualization_cap, \
+ mdev->caps.hca[MLX5_CAP_ADV_VIRTUALIZATION]->cur, cap)
+
+#define MLX5_CAP_ADV_RDMA(mdev, cap) \
+ MLX5_GET(adv_rdma_cap, \
+ mdev->caps.hca[MLX5_CAP_ADV_RDMA]->cur, cap)
+
+#define MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) \
+ MLX5_CAP_PORT_SELECTION(mdev, flow_table_properties_port_selection.cap)
+
+#define MLX5_CAP_PORT_SELECTION_FT_FIELD_SUPPORT_2(mdev, cap) \
+ MLX5_CAP_PORT_SELECTION(mdev, ft_field_support_2_port_selection.cap)
#define MLX5_CAP_ODP(mdev, cap)\
- MLX5_GET(odp_cap, mdev->caps.hca_cur[MLX5_CAP_ODP], cap)
+ MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap)
-#define MLX5_CAP_ODP_MAX(mdev, cap)\
- MLX5_GET(odp_cap, mdev->caps.hca_max[MLX5_CAP_ODP], cap)
+#define MLX5_CAP_ODP_SCHEME(mdev, cap) \
+ (MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, \
+ mem_page_fault) ? \
+ MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, \
+ memory_page_fault_scheme_cap.cap) : \
+ MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, \
+ transport_page_fault_scheme_cap.cap))
-#define MLX5_CAP_VECTOR_CALC(mdev, cap) \
- MLX5_GET(vector_calc_cap, \
- mdev->caps.hca_cur[MLX5_CAP_VECTOR_CALC], cap)
+#define MLX5_CAP_ODP_MAX(mdev, cap)\
+ MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->max, cap)
#define MLX5_CAP_QOS(mdev, cap)\
- MLX5_GET(qos_cap, mdev->caps.hca_cur[MLX5_CAP_QOS], cap)
+ MLX5_GET(qos_cap, mdev->caps.hca[MLX5_CAP_QOS]->cur, cap)
#define MLX5_CAP_DEBUG(mdev, cap)\
- MLX5_GET(debug_cap, mdev->caps.hca_cur[MLX5_CAP_DEBUG], cap)
+ MLX5_GET(debug_cap, mdev->caps.hca[MLX5_CAP_DEBUG]->cur, cap)
#define MLX5_CAP_PCAM_FEATURE(mdev, fld) \
MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld)
@@ -1353,14 +1433,14 @@ enum mlx5_qcam_feature_groups {
MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_FIRST_128], \
mng_access_reg_cap_mask.access_regs.reg)
-#define MLX5_CAP_MCAM_REG1(mdev, reg) \
- MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9080_0x90FF], \
- mng_access_reg_cap_mask.access_regs1.reg)
-
#define MLX5_CAP_MCAM_REG2(mdev, reg) \
MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9100_0x917F], \
mng_access_reg_cap_mask.access_regs2.reg)
+#define MLX5_CAP_MCAM_REG3(mdev, reg) \
+ MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9180_0x91FF], \
+ mng_access_reg_cap_mask.access_regs3.reg)
+
#define MLX5_CAP_MCAM_FEATURE(mdev, fld) \
MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
@@ -1377,27 +1457,39 @@ enum mlx5_qcam_feature_groups {
MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap)
#define MLX5_CAP_DEV_MEM(mdev, cap)\
- MLX5_GET(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap)
+ MLX5_GET(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap)
#define MLX5_CAP64_DEV_MEM(mdev, cap)\
- MLX5_GET64(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap)
+ MLX5_GET64(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap)
#define MLX5_CAP_TLS(mdev, cap) \
- MLX5_GET(tls_cap, (mdev)->caps.hca_cur[MLX5_CAP_TLS], cap)
+ MLX5_GET(tls_cap, (mdev)->caps.hca[MLX5_CAP_TLS]->cur, cap)
#define MLX5_CAP_DEV_EVENT(mdev, cap)\
- MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap)
+ MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca[MLX5_CAP_DEV_EVENT]->cur, cap)
#define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\
MLX5_GET(virtio_emulation_cap, \
- (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap)
+ (mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap)
#define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\
MLX5_GET64(virtio_emulation_cap, \
- (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap)
+ (mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap)
#define MLX5_CAP_IPSEC(mdev, cap)\
- MLX5_GET(ipsec_cap, (mdev)->caps.hca_cur[MLX5_CAP_IPSEC], cap)
+ MLX5_GET(ipsec_cap, (mdev)->caps.hca[MLX5_CAP_IPSEC]->cur, cap)
+
+#define MLX5_CAP_CRYPTO(mdev, cap)\
+ MLX5_GET(crypto_cap, (mdev)->caps.hca[MLX5_CAP_CRYPTO]->cur, cap)
+
+#define MLX5_CAP_MACSEC(mdev, cap)\
+ MLX5_GET(macsec_cap, (mdev)->caps.hca[MLX5_CAP_MACSEC]->cur, cap)
+
+#define MLX5_CAP_SHAMPO(mdev, cap) \
+ MLX5_GET(shampo_cap, mdev->caps.hca[MLX5_CAP_SHAMPO]->cur, cap)
+
+#define MLX5_CAP_PSP(mdev, cap)\
+ MLX5_GET(psp_cap, (mdev)->caps.hca[MLX5_CAP_PSP]->cur, cap)
enum {
MLX5_CMD_STAT_OK = 0x0,
@@ -1407,6 +1499,7 @@ enum {
MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
MLX5_CMD_STAT_RES_BUSY = 0x6,
+ MLX5_CMD_STAT_NOT_READY = 0x7,
MLX5_CMD_STAT_LIM_ERR = 0x8,
MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
MLX5_CMD_STAT_IX_ERR = 0xa,
@@ -1429,7 +1522,10 @@ enum {
MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12,
MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP = 0x13,
MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16,
+ MLX5_PHYSICAL_LAYER_RECOVERY_GROUP = 0x1a,
MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
+ MLX5_INFINIBAND_EXTENDED_PORT_COUNTERS_GROUP = 0x21,
+ MLX5_RS_FEC_HISTOGRAM_GROUP = 0x23,
};
enum {
@@ -1443,6 +1539,8 @@ static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
}
+#define MLX5_RDMA_RX_NUM_COUNTERS_PRIOS 6
+#define MLX5_RDMA_TX_NUM_COUNTERS_PRIOS 4
#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 16
#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 16
#define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index f8e8d7e90616..1c54aa6f74fb 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -36,6 +36,7 @@
#include <linux/kernel.h>
#include <linux/completion.h>
#include <linux/pci.h>
+#include <linux/pci-tph.h>
#include <linux/irq.h>
#include <linux/spinlock_types.h>
#include <linux/semaphore.h>
@@ -45,29 +46,26 @@
#include <linux/workqueue.h>
#include <linux/mempool.h>
#include <linux/interrupt.h>
-#include <linux/idr.h>
#include <linux/notifier.h>
#include <linux/refcount.h>
#include <linux/auxiliary_bus.h>
+#include <linux/mutex.h>
#include <linux/mlx5/device.h>
#include <linux/mlx5/doorbell.h>
#include <linux/mlx5/eq.h>
#include <linux/timecounter.h>
-#include <linux/ptp_clock_kernel.h>
#include <net/devlink.h>
#define MLX5_ADEV_NAME "mlx5_core"
+#define MLX5_IRQ_EQ_CTRL (U8_MAX)
+
enum {
MLX5_BOARD_ID_LEN = 64,
};
enum {
- /* one minute for the sake of bringup. Generally, commands must always
- * complete and we may need to increase this timeout value
- */
- MLX5_CMD_TIMEOUT_MSEC = 60 * 1000,
MLX5_CMD_WQ_MAX_NAME = 32,
};
@@ -86,7 +84,7 @@ enum mlx5_sqp_t {
};
enum {
- MLX5_MAX_PORTS = 2,
+ MLX5_MAX_PORTS = 8,
};
enum {
@@ -102,6 +100,8 @@ enum {
};
enum {
+ MLX5_REG_SBPR = 0xb001,
+ MLX5_REG_SBCM = 0xb002,
MLX5_REG_QPTS = 0x4002,
MLX5_REG_QETCR = 0x4005,
MLX5_REG_QTCT = 0x400a,
@@ -130,12 +130,17 @@ enum {
MLX5_REG_PDDR = 0x5031,
MLX5_REG_PMLP = 0x5002,
MLX5_REG_PPLM = 0x5023,
+ MLX5_REG_PPHCR = 0x503E,
MLX5_REG_PCAM = 0x507f,
MLX5_REG_NODE_DESC = 0x6001,
MLX5_REG_HOST_ENDIANNESS = 0x7004,
+ MLX5_REG_MTCAP = 0x9009,
+ MLX5_REG_MTMP = 0x900A,
MLX5_REG_MCIA = 0x9014,
+ MLX5_REG_MNVDA = 0x9024,
MLX5_REG_MFRL = 0x9028,
MLX5_REG_MLCR = 0x902b,
+ MLX5_REG_MRTC = 0x902d,
MLX5_REG_MTRC_CAP = 0x9040,
MLX5_REG_MTRC_CONF = 0x9041,
MLX5_REG_MTRC_STDB = 0x9042,
@@ -146,14 +151,23 @@ enum {
MLX5_REG_MTPPSE = 0x9054,
MLX5_REG_MTUTC = 0x9055,
MLX5_REG_MPEGC = 0x9056,
+ MLX5_REG_MPIR = 0x9059,
MLX5_REG_MCQS = 0x9060,
MLX5_REG_MCQI = 0x9061,
MLX5_REG_MCC = 0x9062,
MLX5_REG_MCDA = 0x9063,
MLX5_REG_MCAM = 0x907f,
+ MLX5_REG_MSECQ = 0x9155,
+ MLX5_REG_MSEES = 0x9156,
MLX5_REG_MIRC = 0x9162,
+ MLX5_REG_MTPTM = 0x9180,
+ MLX5_REG_MTCTR = 0x9181,
+ MLX5_REG_MRTCQ = 0x9182,
MLX5_REG_SBCAM = 0xB01F,
MLX5_REG_RESOURCE_DUMP = 0xC000,
+ MLX5_REG_NIC_CAP = 0xC00D,
+ MLX5_REG_DTOR = 0xC00E,
+ MLX5_REG_VHCA_ICM_CTRL = 0xC010,
};
enum mlx5_qpts_trust_state {
@@ -214,6 +228,7 @@ struct mlx5_rsc_debug {
enum mlx5_dev_event {
MLX5_DEV_EVENT_SYS_ERROR = 128, /* 0 - 127 are FW events */
MLX5_DEV_EVENT_PORT_AFFINITY = 129,
+ MLX5_DEV_EVENT_MULTIPORT_ESW = 130,
};
enum mlx5_port_status {
@@ -264,6 +279,16 @@ enum {
struct mlx5_cmd_stats {
u64 sum;
u64 n;
+ /* number of times command failed */
+ u64 failed;
+ /* number of times command failed on bad status returned by FW */
+ u64 failed_mbox_status;
+ /* last command failed returned errno */
+ u32 last_failed_errno;
+ /* last bad status returned by FW */
+ u8 last_failed_mbox_status;
+ /* last command failed syndrome returned by FW */
+ u32 last_failed_syndrome;
struct dentry *root;
/* protect command average calculations */
spinlock_t lock;
@@ -272,18 +297,25 @@ struct mlx5_cmd_stats {
struct mlx5_cmd {
struct mlx5_nb nb;
+ /* members which needs to be queried or reinitialized each reload */
+ struct {
+ u16 cmdif_rev;
+ u8 log_sz;
+ u8 log_stride;
+ int max_reg_cmds;
+ unsigned long bitmask;
+ struct semaphore sem;
+ struct semaphore pages_sem;
+ struct semaphore throttle_sem;
+ struct semaphore unprivileged_sem;
+ struct xarray privileged_uids;
+ } vars;
enum mlx5_cmdif_state state;
void *cmd_alloc_buf;
dma_addr_t alloc_dma;
int alloc_size;
void *cmd_buf;
dma_addr_t dma;
- u16 cmdif_rev;
- u8 log_sz;
- u8 log_stride;
- int max_reg_cmds;
- int events;
- u32 __iomem *vector;
/* protect command queue allocations
*/
@@ -293,11 +325,8 @@ struct mlx5_cmd {
*/
spinlock_t token_lock;
u8 token;
- unsigned long bitmask;
char wq_name[MLX5_CMD_WQ_MAX_NAME];
struct workqueue_struct *wq;
- struct semaphore sem;
- struct semaphore pages_sem;
int mode;
u16 allowed_opcode;
struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
@@ -305,7 +334,7 @@ struct mlx5_cmd {
struct mlx5_cmd_debug dbg;
struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
int checksum_disabled;
- struct mlx5_cmd_stats *stats;
+ struct xarray stats;
};
struct mlx5_cmd_mailbox {
@@ -357,22 +386,6 @@ struct mlx5_core_sig_ctx {
u32 sigerr_count;
};
-enum {
- MLX5_MKEY_MR = 1,
- MLX5_MKEY_MW,
- MLX5_MKEY_INDIRECT_DEVX,
-};
-
-struct mlx5_core_mkey {
- u64 iova;
- u64 size;
- u32 key;
- u32 pd;
- u32 type;
- struct wait_queue_head wait;
- refcount_t usecount;
-};
-
#define MLX5_24BIT_MASK ((1 << 24) - 1)
enum mlx5_res_type {
@@ -382,13 +395,13 @@ enum mlx5_res_type {
MLX5_RES_SRQ = 3,
MLX5_RES_XSRQ = 4,
MLX5_RES_XRQ = 5,
- MLX5_RES_DCT = MLX5_EVENT_QUEUE_TYPE_DCT,
};
struct mlx5_core_rsc_common {
enum mlx5_res_type res;
refcount_t refcount;
struct completion free;
+ bool invalid;
};
struct mlx5_uars_page {
@@ -421,7 +434,6 @@ struct mlx5_sq_bfreg {
struct mlx5_uars_page *up;
bool wc;
u32 index;
- unsigned int offset;
};
struct mlx5_core_health {
@@ -433,23 +445,19 @@ struct mlx5_core_health {
u8 synd;
u32 fatal_error;
u32 crdump_size;
- /* wq spinlock to synchronize draining */
- spinlock_t wq_lock;
struct workqueue_struct *wq;
unsigned long flags;
struct work_struct fatal_report_work;
struct work_struct report_work;
struct devlink_health_reporter *fw_reporter;
struct devlink_health_reporter *fw_fatal_reporter;
+ struct devlink_health_reporter *vnic_reporter;
+ struct delayed_work update_fw_log_ts_work;
};
-struct mlx5_qp_table {
- struct notifier_block nb;
-
- /* protect radix tree
- */
- spinlock_t lock;
- struct radix_tree_root tree;
+enum {
+ MLX5_PF_NOTIFY_DISABLE_VF,
+ MLX5_PF_NOTIFY_ENABLE_VF,
};
struct mlx5_vf_context {
@@ -462,52 +470,28 @@ struct mlx5_vf_context {
u8 port_guid_valid:1;
u8 node_guid_valid:1;
enum port_state_policy policy;
+ struct blocking_notifier_head notifier;
};
struct mlx5_core_sriov {
struct mlx5_vf_context *vfs_ctx;
int num_vfs;
u16 max_vfs;
-};
-
-struct mlx5_fc_pool {
- struct mlx5_core_dev *dev;
- struct mutex pool_lock; /* protects pool lists */
- struct list_head fully_used;
- struct list_head partially_used;
- struct list_head unused;
- int available_fcs;
- int used_fcs;
- int threshold;
-};
-
-struct mlx5_fc_stats {
- spinlock_t counters_idr_lock; /* protects counters_idr */
- struct idr counters_idr;
- struct list_head counters;
- struct llist_head addlist;
- struct llist_head dellist;
-
- struct workqueue_struct *wq;
- struct delayed_work work;
- unsigned long next_query;
- unsigned long sampling_interval; /* jiffies */
- u32 *bulk_query_out;
- struct mlx5_fc_pool fc_pool;
+ u16 max_ec_vfs;
};
struct mlx5_events;
struct mlx5_mpfs;
struct mlx5_eswitch;
struct mlx5_lag;
-struct mlx5_devcom;
+struct mlx5_devcom_dev;
struct mlx5_fw_reset;
struct mlx5_eq_table;
struct mlx5_irq_table;
-struct mlx5_vhca_state_notifier;
struct mlx5_sf_dev_table;
struct mlx5_sf_hw_table;
struct mlx5_sf_table;
+struct mlx5_crypto_dek_priv;
struct mlx5_rate_limit {
u32 rate;
@@ -542,6 +526,11 @@ struct mlx5_core_roce {
enum {
MLX5_PRIV_FLAGS_DISABLE_IB_ADEV = 1 << 0,
MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV = 1 << 1,
+ /* Set during device detach to block any further devices
+ * creation/deletion on drivers rescan. Unset during device attach.
+ */
+ MLX5_PRIV_FLAGS_DETACH = 1 << 2,
+ MLX5_PRIV_FLAGS_SWITCH_LEGACY = 1 << 3,
};
struct mlx5_adev {
@@ -550,6 +539,26 @@ struct mlx5_adev {
int idx;
};
+struct mlx5_debugfs_entries {
+ struct dentry *dbg_root;
+ struct dentry *qp_debugfs;
+ struct dentry *eq_debugfs;
+ struct dentry *cq_debugfs;
+ struct dentry *cmdif_debugfs;
+ struct dentry *pages_debugfs;
+ struct dentry *lag_debugfs;
+};
+
+enum mlx5_func_type {
+ MLX5_PF,
+ MLX5_VF,
+ MLX5_SF,
+ MLX5_HOST_PF,
+ MLX5_EC_VF,
+ MLX5_FUNC_TYPE_NUM,
+};
+
+struct mlx5_ft_pool;
struct mlx5_priv {
/* IRQ table valid only for real pci devices PF or VF */
struct mlx5_irq_table *irq_table;
@@ -559,71 +568,77 @@ struct mlx5_priv {
struct mlx5_nb pg_nb;
struct workqueue_struct *pg_wq;
struct xarray page_root_xa;
- int fw_pages;
atomic_t reg_pages;
struct list_head free_list;
- int vfs_pages;
- int host_pf_pages;
+ u32 fw_pages;
+ u32 page_counters[MLX5_FUNC_TYPE_NUM];
+ u32 fw_pages_alloc_failed;
+ u32 give_pages_dropped;
+ u32 reclaim_pages_discard;
struct mlx5_core_health health;
struct list_head traps;
- /* start: qp staff */
- struct dentry *qp_debugfs;
- struct dentry *eq_debugfs;
- struct dentry *cq_debugfs;
- struct dentry *cmdif_debugfs;
- /* end: qp staff */
+ struct mlx5_debugfs_entries dbg;
/* start: alloc staff */
- /* protect buffer alocation according to numa node */
+ /* protect buffer allocation according to numa node */
struct mutex alloc_mutex;
int numa_node;
struct mutex pgdir_mutex;
struct list_head pgdir_list;
/* end: alloc staff */
- struct dentry *dbg_root;
- struct list_head ctx_list;
- spinlock_t ctx_lock;
struct mlx5_adev **adev;
int adev_idx;
+ int sw_vhca_id;
struct mlx5_events *events;
+ struct mlx5_vhca_events *vhca_events;
struct mlx5_flow_steering *steering;
struct mlx5_mpfs *mpfs;
+ struct blocking_notifier_head esw_n_head;
struct mlx5_eswitch *eswitch;
struct mlx5_core_sriov sriov;
struct mlx5_lag *lag;
u32 flags;
- struct mlx5_devcom *devcom;
+ struct mlx5_devcom_dev *devc;
+ struct mlx5_devcom_comp_dev *hca_devcom_comp;
struct mlx5_fw_reset *fw_reset;
struct mlx5_core_roce roce;
- struct mlx5_fc_stats fc_stats;
+ struct mlx5_fc_stats *fc_stats;
struct mlx5_rl_table rl_table;
+ struct mlx5_ft_pool *ft_pool;
struct mlx5_bfreg_data bfregs;
- struct mlx5_uars_page *uar;
+ struct mlx5_sq_bfreg bfreg;
#ifdef CONFIG_MLX5_SF
- struct mlx5_vhca_state_notifier *vhca_state_notifier;
+ struct mlx5_nb vhca_state_nb;
+ struct blocking_notifier_head vhca_state_n_head;
+ struct notifier_block sf_dev_nb;
struct mlx5_sf_dev_table *sf_dev_table;
struct mlx5_core_dev *parent_mdev;
#endif
#ifdef CONFIG_MLX5_SF_MANAGER
+ struct notifier_block sf_hw_table_vhca_nb;
struct mlx5_sf_hw_table *sf_hw_table;
+ struct notifier_block sf_table_esw_nb;
+ struct notifier_block sf_table_vhca_nb;
+ struct notifier_block sf_table_mdev_nb;
struct mlx5_sf_table *sf_table;
#endif
+ struct blocking_notifier_head lag_nh;
};
enum mlx5_device_state {
- MLX5_DEVICE_STATE_UNINITIALIZED,
- MLX5_DEVICE_STATE_UP,
+ MLX5_DEVICE_STATE_UP = 1,
MLX5_DEVICE_STATE_INTERNAL_ERROR,
};
enum mlx5_interface_state {
MLX5_INTERFACE_STATE_UP = BIT(0),
+ MLX5_BREAK_FW_WAIT = BIT(1),
};
enum mlx5_pci_status {
@@ -648,16 +663,24 @@ struct mlx5e_resources {
struct mlx5e_hw_objs {
u32 pdn;
struct mlx5_td td;
- struct mlx5_core_mkey mkey;
- struct mlx5_sq_bfreg bfreg;
+ u32 mkey;
+ struct mlx5_sq_bfreg *bfregs;
+ unsigned int num_bfregs;
+#define MLX5_MAX_NUM_TC 8
+ u32 tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC];
+ bool tisn_valid;
} hw_objs;
- struct devlink_port dl_port;
struct net_device *uplink_netdev;
+ netdevice_tracker tracker;
+ struct mutex uplink_netdev_lock;
+ struct mlx5_crypto_dek_priv *dek_priv;
};
enum mlx5_sw_icm_type {
MLX5_SW_ICM_TYPE_STEERING,
MLX5_SW_ICM_TYPE_HEADER_MODIFY,
+ MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN,
+ MLX5_SW_ICM_TYPE_SW_ENCAP,
};
#define MLX5_MAX_RESERVED_GIDS 8
@@ -668,41 +691,50 @@ struct mlx5_rsvd_gids {
struct ida ida;
};
-#define MAX_PIN_NUM 8
-struct mlx5_pps {
- u8 pin_caps[MAX_PIN_NUM];
- struct work_struct out_work;
- u64 start[MAX_PIN_NUM];
- u8 enabled;
-};
-
-struct mlx5_timer {
- struct cyclecounter cycles;
- struct timecounter tc;
- u32 nominal_c_mult;
- unsigned long overflow_period;
- struct delayed_work overflow_work;
-};
-
-struct mlx5_clock {
- struct mlx5_nb pps_nb;
- seqlock_t lock;
- struct hwtstamp_config hwtstamp_config;
- struct ptp_clock *ptp;
- struct ptp_clock_info ptp_info;
- struct mlx5_pps pps_info;
- struct mlx5_timer timer;
-};
-
+struct mlx5_clock;
+struct mlx5_clock_dev_state;
struct mlx5_dm;
struct mlx5_fw_tracer;
struct mlx5_vxlan;
struct mlx5_geneve;
struct mlx5_hv_vhca;
+struct mlx5_st;
#define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity))
#define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
+enum {
+ MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
+ MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
+};
+
+enum {
+ MKEY_CACHE_LAST_STD_ENTRY = 20,
+ MLX5_IMR_KSM_CACHE_ENTRY,
+ MAX_MKEY_CACHE_ENTRIES
+};
+
+struct mlx5_profile {
+ u64 mask;
+ u8 log_max_qp;
+ u8 num_cmd_caches;
+ struct {
+ int size;
+ int limit;
+ } mr_cache[MAX_MKEY_CACHE_ENTRIES];
+};
+
+struct mlx5_hca_cap {
+ u32 cur[MLX5_UN_SZ_DW(hca_cap_union)];
+ u32 max[MLX5_UN_SZ_DW(hca_cap_union)];
+};
+
+enum mlx5_wc_state {
+ MLX5_WC_STATE_UNINITIALIZED,
+ MLX5_WC_STATE_UNSUPPORTED,
+ MLX5_WC_STATE_SUPPORTED,
+};
+
struct mlx5_core_dev {
struct device *device;
enum mlx5_coredev_type coredev_type;
@@ -714,14 +746,14 @@ struct mlx5_core_dev {
char board_id[MLX5_BOARD_ID_LEN];
struct mlx5_cmd cmd;
struct {
- u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
- u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
+ struct mlx5_hca_cap *hca[MLX5_CAP_NUM];
u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
u32 mcam[MLX5_MCAM_REGS_NUM][MLX5_ST_SZ_DW(mcam_reg)];
u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
u8 embedded_cpu;
} caps;
+ struct mlx5_timeouts *timeouts;
u64 sys_image_guid;
phys_addr_t iseg_base;
struct mlx5_init_seg __iomem *iseg;
@@ -729,12 +761,14 @@ struct mlx5_core_dev {
enum mlx5_device_state state;
/* sync interface state */
struct mutex intf_state_mutex;
+ struct lock_class_key lock_key;
unsigned long intf_state;
struct mlx5_priv priv;
- struct mlx5_profile *profile;
+ struct mlx5_profile profile;
u32 issi;
struct mlx5e_resources mlx5e_res;
struct mlx5_dm *dm;
+ struct mlx5_st *st;
struct mlx5_vxlan *vxlan;
struct mlx5_geneve *geneve;
struct {
@@ -744,15 +778,26 @@ struct mlx5_core_dev {
#ifdef CONFIG_MLX5_FPGA
struct mlx5_fpga_device *fpga;
#endif
-#ifdef CONFIG_MLX5_ACCEL
- const struct mlx5_accel_ipsec_ops *ipsec_ops;
-#endif
- struct mlx5_clock clock;
+ struct mlx5_clock *clock;
+ struct mlx5_clock_dev_state *clock_state;
struct mlx5_ib_clock_info *clock_info;
struct mlx5_fw_tracer *tracer;
struct mlx5_rsc_dump *rsc_dump;
u32 vsc_addr;
struct mlx5_hv_vhca *hv_vhca;
+ struct mlx5_hwmon *hwmon;
+ u64 num_block_tc;
+ u64 num_block_ipsec;
+#ifdef CONFIG_MLX5_MACSEC
+ struct mlx5_macsec_fs *macsec_fs;
+ /* MACsec notifier chain to sync MACsec core and IB database */
+ struct blocking_notifier_head macsec_nh;
+#endif
+ u64 num_ipsec_offloads;
+ struct mlx5_sd *sd;
+ enum mlx5_wc_state wc_state;
+ /* sync write combining state */
+ struct mutex wc_state_lock;
};
struct mlx5_db {
@@ -765,6 +810,8 @@ struct mlx5_db {
int index;
};
+#define MLX5_DEFAULT_NUM_DOORBELLS 8
+
enum {
MLX5_COMP_EQ_SIZE = 1024,
};
@@ -778,6 +825,7 @@ typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
enum {
MLX5_CMD_ENT_STATE_PENDING_COMP,
+ MLX5_CMD_ENT_STATE_TIMEDOUT,
};
struct mlx5_cmd_work_ent {
@@ -791,6 +839,7 @@ struct mlx5_cmd_work_ent {
void *context;
int idx;
struct completion handling;
+ struct completion slotted;
struct completion done;
struct mlx5_cmd *cmd;
struct work_struct work;
@@ -807,11 +856,6 @@ struct mlx5_cmd_work_ent {
refcount_t refcnt;
};
-struct mlx5_pas {
- u64 pa;
- u8 log_sz;
-};
-
enum phy_port_state {
MLX5_AAA_111
};
@@ -841,22 +885,13 @@ struct mlx5_hca_vport_context {
u16 qkey_violation_counter;
u16 pkey_violation_counter;
bool grh_required;
+ u8 num_plane;
};
-static inline void *mlx5_buf_offset(struct mlx5_frag_buf *buf, int offset)
-{
- return buf->frags->buf + offset;
-}
-
#define STRUCT_FIELD(header, field) \
.struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
.struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
-static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
-{
- return pci_get_drvdata(pdev);
-}
-
extern struct dentry *mlx5_debugfs_root;
static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
@@ -935,7 +970,7 @@ void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode);
struct mlx5_async_ctx {
struct mlx5_core_dev *dev;
atomic_t num_inflight;
- struct wait_queue_head wait;
+ struct completion inflight_done;
};
struct mlx5_async_work;
@@ -945,6 +980,11 @@ typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context);
struct mlx5_async_work {
struct mlx5_async_ctx *ctx;
mlx5_async_cbk_t user_callback;
+ u16 opcode; /* cmd opcode */
+ u16 op_mod; /* cmd op_mod */
+ u8 throttle_locked:1;
+ u8 unpriv_locked:1;
+ void *out; /* pointer to the cmd output buffer */
};
void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
@@ -953,7 +993,9 @@ void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx);
int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
void *out, int out_size, mlx5_async_cbk_t callback,
struct mlx5_async_work *work);
-
+void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out);
+int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size);
+int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out);
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size);
@@ -971,66 +1013,67 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
void *out, int out_size);
-void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
bool mlx5_cmd_is_down(struct mlx5_core_dev *dev);
+int mlx5_cmd_add_privileged_uid(struct mlx5_core_dev *dev, u16 uid);
+void mlx5_cmd_remove_privileged_uid(struct mlx5_core_dev *dev, u16 uid);
+
+void mlx5_core_uplink_netdev_set(struct mlx5_core_dev *mdev, struct net_device *netdev);
+void mlx5_core_uplink_netdev_event_replay(struct mlx5_core_dev *mdev);
+
+void mlx5_core_mp_event_replay(struct mlx5_core_dev *dev, u32 event, void *data);
-int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
-int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
-int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
-void mlx5_health_flush(struct mlx5_core_dev *dev);
void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
+void mlx5_start_health_fw_log_up(struct mlx5_core_dev *dev);
void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
-int mlx5_buf_alloc(struct mlx5_core_dev *dev,
- int size, struct mlx5_frag_buf *buf);
-void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_frag_buf *buf, int node);
void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
-struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
- gfp_t flags, int npages);
-void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
- struct mlx5_cmd_mailbox *head);
-int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
- struct mlx5_core_mkey *mkey,
- u32 *in, int inlen);
-int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
- struct mlx5_core_mkey *mkey);
-int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
- u32 *out, int outlen);
+int mlx5_core_create_mkey(struct mlx5_core_dev *dev, u32 *mkey, u32 *in,
+ int inlen);
+int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, u32 mkey);
+int mlx5_core_query_mkey(struct mlx5_core_dev *dev, u32 mkey, u32 *out,
+ int outlen);
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
int mlx5_pagealloc_init(struct mlx5_core_dev *dev);
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
void mlx5_pagealloc_start(struct mlx5_core_dev *dev);
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
-void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
- s32 npages, bool ec_function);
+void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev);
+void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
void mlx5_register_debugfs(void);
void mlx5_unregister_debugfs(void);
-void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas);
void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm);
void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
-int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
- unsigned int *irqn);
+int mlx5_comp_eqn_get(struct mlx5_core_dev *dev, u16 vecidx, int *eqn);
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
+struct dentry *mlx5_debugfs_get_dev_root(struct mlx5_core_dev *dev);
void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
+int mlx5_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in,
+ void *data_out, int size_out, u16 reg_id, int arg,
+ int write, bool verbose);
int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
int size_in, void *data_out, int size_out,
u16 reg_num, int arg, int write);
-int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
int node);
+
+static inline int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
+{
+ return mlx5_db_alloc_node(dev, db, dev->priv.numa_node);
+}
+
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
const char *mlx5_command_str(int command);
@@ -1039,11 +1082,8 @@ void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
int npsvs, u32 *sig_index);
int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
+__be32 mlx5_core_get_terminate_scatter_list_mkey(struct mlx5_core_dev *dev);
void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
-int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
- struct mlx5_odp_caps *odp_caps);
-int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
- u8 port_num, void *out, size_t sz);
int mlx5_init_rl_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
@@ -1060,9 +1100,8 @@ int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
bool map_wc, bool fast_path);
void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
-unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev);
-struct cpumask *
-mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector);
+unsigned int mlx5_comp_vectors_max(struct mlx5_core_dev *dev);
+int mlx5_comp_vector_get_cpu(struct mlx5_core_dev *dev, int vector);
unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev);
int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
u8 roce_version, u8 roce_l3_type, const u8 *gid,
@@ -1083,20 +1122,8 @@ static inline u8 mlx5_mkey_variant(u32 mkey)
return mkey & 0xff;
}
-enum {
- MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
- MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
-};
-
-enum {
- MR_CACHE_LAST_STD_ENTRY = 20,
- MLX5_IMR_MTT_CACHE_ENTRY,
- MLX5_IMR_KSM_CACHE_ENTRY,
- MAX_MR_CACHE_ENTRIES
-};
-
/* Async-atomic event notifier used by mlx5 core to forward FW
- * evetns recived from event queue to mlx5 consumers.
+ * evetns received from event queue to mlx5 consumers.
* Optimise event queue dipatching.
*/
int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
@@ -1121,15 +1148,25 @@ int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
bool mlx5_lag_is_roce(struct mlx5_core_dev *dev);
bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
-bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev);
bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
-struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
+bool mlx5_lag_mode_is_hash(struct mlx5_core_dev *dev);
+bool mlx5_lag_is_master(struct mlx5_core_dev *dev);
+bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev);
+bool mlx5_lag_is_mpesw(struct mlx5_core_dev *dev);
u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
struct net_device *slave);
int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
u64 *values,
int num_counters,
size_t *offsets);
+struct mlx5_core_dev *mlx5_lag_get_next_peer_mdev(struct mlx5_core_dev *dev, int *i);
+
+#define mlx5_lag_for_each_peer_mdev(dev, peer, i) \
+ for (i = 0, peer = mlx5_lag_get_next_peer_mdev(dev, &i); \
+ peer; \
+ peer = mlx5_lag_get_next_peer_mdev(dev, &i))
+
+u8 mlx5_lag_get_num_ports(struct mlx5_core_dev *dev);
struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
@@ -1138,25 +1175,36 @@ int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
u64 length, u16 uid, phys_addr_t addr, u32 obj_id);
-#ifdef CONFIG_MLX5_CORE_IPOIB
-struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
- struct ib_device *ibdev,
- const char *name,
- void (*setup)(struct net_device *));
-#endif /* CONFIG_MLX5_CORE_IPOIB */
+#ifdef CONFIG_PCIE_TPH
+int mlx5_st_alloc_index(struct mlx5_core_dev *dev, enum tph_mem_type mem_type,
+ unsigned int cpu_uid, u16 *st_index);
+int mlx5_st_dealloc_index(struct mlx5_core_dev *dev, u16 st_index);
+#else
+static inline int mlx5_st_alloc_index(struct mlx5_core_dev *dev,
+ enum tph_mem_type mem_type,
+ unsigned int cpu_uid, u16 *st_index)
+{
+ return -EOPNOTSUPP;
+}
+static inline int mlx5_st_dealloc_index(struct mlx5_core_dev *dev, u16 st_index)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+struct mlx5_core_dev *mlx5_vf_get_core_dev(struct pci_dev *pdev);
+void mlx5_vf_put_core_dev(struct mlx5_core_dev *mdev);
+
+int mlx5_sriov_blocking_notifier_register(struct mlx5_core_dev *mdev,
+ int vf_id,
+ struct notifier_block *nb);
+void mlx5_sriov_blocking_notifier_unregister(struct mlx5_core_dev *mdev,
+ int vf_id,
+ struct notifier_block *nb);
int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
struct ib_device *device,
struct rdma_netdev_alloc_params *params);
-struct mlx5_profile {
- u64 mask;
- u8 log_max_qp;
- struct {
- int size;
- int limit;
- } mr_cache[MAX_MR_CACHE_ENTRIES];
-};
-
enum {
MLX5_PCI_DEV_IS_VF = 1 << 0,
};
@@ -1171,6 +1219,12 @@ static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev)
return dev->coredev_type == MLX5_COREDEV_VF;
}
+static inline bool mlx5_core_same_coredev_type(const struct mlx5_core_dev *dev1,
+ const struct mlx5_core_dev *dev2)
+{
+ return dev1->coredev_type == dev2->coredev_type;
+}
+
static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev)
{
return dev->caps.embedded_cpu;
@@ -1192,6 +1246,23 @@ static inline u16 mlx5_core_max_vfs(const struct mlx5_core_dev *dev)
return dev->priv.sriov.max_vfs;
}
+static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
+{
+ /* LACP owner conditions:
+ * 1) Function is physical.
+ * 2) LAG is supported by FW.
+ * 3) LAG is managed by driver (currently the only option).
+ */
+ return MLX5_CAP_GEN(dev, vport_group_manager) &&
+ (MLX5_CAP_GEN(dev, num_lag_ports) > 1) &&
+ MLX5_CAP_GEN(dev, lag_master);
+}
+
+static inline u16 mlx5_core_max_ec_vfs(const struct mlx5_core_dev *dev)
+{
+ return dev->priv.sriov.max_ec_vfs;
+}
+
static inline int mlx5_get_gid_table_len(u16 param)
{
if (param > 4) {
@@ -1232,19 +1303,90 @@ static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev)
return MLX5_CAP_GEN(dev, native_port_num);
}
+static inline int mlx5_get_dev_index(struct mlx5_core_dev *dev)
+{
+ int idx = MLX5_CAP_GEN(dev, native_port_num);
+
+ if (idx >= 1 && idx <= MLX5_MAX_PORTS)
+ return idx - 1;
+ else
+ return PCI_FUNC(dev->pdev->devfn);
+}
+
enum {
MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
};
-static inline bool mlx5_is_roce_init_enabled(struct mlx5_core_dev *dev)
+bool mlx5_is_roce_on(struct mlx5_core_dev *dev);
+
+static inline bool mlx5_get_roce_state(struct mlx5_core_dev *dev)
{
- struct devlink *devlink = priv_to_devlink(dev);
- union devlink_param_value val;
+ if (MLX5_CAP_GEN(dev, roce_rw_supported))
+ return MLX5_CAP_GEN(dev, roce);
- devlink_param_driverinit_value_get(devlink,
- DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
- &val);
- return val.vbool;
+ /* If RoCE cap is read-only in FW, get RoCE state from devlink
+ * in order to support RoCE enable/disable feature
+ */
+ return mlx5_is_roce_on(dev);
}
+#ifdef CONFIG_MLX5_MACSEC
+static inline bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev)
+{
+ if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD))
+ return false;
+
+ if (!MLX5_CAP_GEN(mdev, log_max_dek))
+ return false;
+
+ if (!MLX5_CAP_MACSEC(mdev, log_max_macsec_offload))
+ return false;
+
+ if (!MLX5_CAP_FLOWTABLE_NIC_RX(mdev, macsec_decrypt) ||
+ !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_remove_macsec))
+ return false;
+
+ if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, macsec_encrypt) ||
+ !MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_macsec))
+ return false;
+
+ if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_encrypt) &&
+ !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_encrypt))
+ return false;
+
+ if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_decrypt) &&
+ !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_decrypt))
+ return false;
+
+ return true;
+}
+
+#define NIC_RDMA_BOTH_DIRS_CAPS (MLX5_FT_NIC_RX_2_NIC_RX_RDMA | MLX5_FT_NIC_TX_RDMA_2_NIC_TX)
+
+static inline bool mlx5_is_macsec_roce_supported(struct mlx5_core_dev *mdev)
+{
+ if (((MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) &
+ NIC_RDMA_BOTH_DIRS_CAPS) != NIC_RDMA_BOTH_DIRS_CAPS) ||
+ !MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, max_modify_header_actions) ||
+ !mlx5e_is_macsec_device(mdev) || !mdev->macsec_fs)
+ return false;
+
+ return true;
+}
+#endif
+
+enum {
+ MLX5_OCTWORD = 16,
+};
+
+bool mlx5_wc_support_get(struct mlx5_core_dev *mdev);
+
+static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
+{
+ return devlink_net(priv_to_devlink(dev));
+}
+
+#define MLX5_SW_IMAGE_GUID_MAX_BYTES 9
+
#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/eq.h b/include/linux/mlx5/eq.h
index e49d8c0d4f26..3705a382276b 100644
--- a/include/linux/mlx5/eq.h
+++ b/include/linux/mlx5/eq.h
@@ -4,18 +4,18 @@
#ifndef MLX5_CORE_EQ_H
#define MLX5_CORE_EQ_H
-#define MLX5_IRQ_VEC_COMP_BASE 1
#define MLX5_NUM_CMD_EQE (32)
#define MLX5_NUM_ASYNC_EQE (0x1000)
#define MLX5_NUM_SPARE_EQE (0x80)
struct mlx5_eq;
+struct mlx5_irq;
struct mlx5_core_dev;
struct mlx5_eq_param {
- u8 irq_index;
int nent;
u64 mask[4];
+ struct mlx5_irq *irq;
};
struct mlx5_eq *
diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
index 17109b65c1ac..67256e776566 100644
--- a/include/linux/mlx5/eswitch.h
+++ b/include/linux/mlx5/eswitch.h
@@ -7,12 +7,12 @@
#define _MLX5_ESWITCH_
#include <linux/mlx5/driver.h>
+#include <linux/mlx5/vport.h>
#include <net/devlink.h>
#define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager)
enum {
- MLX5_ESWITCH_NONE,
MLX5_ESWITCH_LEGACY,
MLX5_ESWITCH_OFFLOADS
};
@@ -29,11 +29,20 @@ enum {
REP_LOADED,
};
+enum mlx5_switchdev_event {
+ MLX5_SWITCHDEV_EVENT_PAIR,
+ MLX5_SWITCHDEV_EVENT_UNPAIR,
+};
+
struct mlx5_eswitch_rep;
struct mlx5_eswitch_rep_ops {
int (*load)(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep);
void (*unload)(struct mlx5_eswitch_rep *rep);
void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep);
+ int (*event)(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep,
+ enum mlx5_switchdev_event event,
+ void *data);
};
struct mlx5_eswitch_rep_data {
@@ -63,6 +72,7 @@ struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type);
struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
+ struct mlx5_eswitch *from_esw,
struct mlx5_eswitch_rep *rep, u32 sqn);
#ifdef CONFIG_MLX5_ESWITCH
@@ -98,10 +108,11 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
u16 vport_num);
/* Reg C1 usage:
- * Reg C1 = < ESW_TUN_ID(12) | ESW_TUN_OPTS(12) | ESW_ZONE_ID(8) >
+ * Reg C1 = < Reserved(1) | ESW_TUN_ID(12) | ESW_TUN_OPTS(11) | ESW_ZONE_ID(8) >
*
- * Highest 12 bits of reg c1 is the encapsulation tunnel id, next 12 bits is
- * encapsulation tunnel options, and the lowest 8 bits are used for zone id.
+ * Highest bit is reserved for other offloads as marker bit, next 12 bits of reg c1
+ * is the encapsulation tunnel id, next 11 bits is encapsulation tunnel options,
+ * and the lowest 8 bits are used for zone id.
*
* Zone id is used to restore CT flow when packet misses on chain.
*
@@ -109,28 +120,45 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
* on miss and to support inner header rewrite by means of implicit chain 0
* flows.
*/
+#define ESW_RESERVED_BITS 1
#define ESW_ZONE_ID_BITS 8
-#define ESW_TUN_OPTS_BITS 12
+#define ESW_TUN_OPTS_BITS 11
#define ESW_TUN_ID_BITS 12
#define ESW_TUN_OPTS_OFFSET ESW_ZONE_ID_BITS
#define ESW_TUN_OFFSET ESW_TUN_OPTS_OFFSET
#define ESW_ZONE_ID_MASK GENMASK(ESW_ZONE_ID_BITS - 1, 0)
-#define ESW_TUN_OPTS_MASK GENMASK(32 - ESW_TUN_ID_BITS - 1, ESW_TUN_OPTS_OFFSET)
-#define ESW_TUN_MASK GENMASK(31, ESW_TUN_OFFSET)
+#define ESW_TUN_OPTS_MASK GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, ESW_TUN_OPTS_OFFSET)
+#define ESW_TUN_MASK GENMASK(31 - ESW_RESERVED_BITS, ESW_TUN_OFFSET)
#define ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT 0 /* 0 is not a valid tunnel id */
-#define ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT 0xFFF /* 0xFFF is a reserved mapping */
+#define ESW_TUN_ID_BRIDGE_INGRESS_PUSH_VLAN ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT
+/* 0x7FF is a reserved mapping */
+#define ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT GENMASK(ESW_TUN_OPTS_BITS - 1, 0)
#define ESW_TUN_SLOW_TABLE_GOTO_VPORT ((ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT << ESW_TUN_OPTS_BITS) | \
ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT)
#define ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK ESW_TUN_OPTS_MASK
-
-u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev);
+/* 0x7FE is a reserved mapping for bridge ingress push vlan mark */
+#define ESW_TUN_OPTS_BRIDGE_INGRESS_PUSH_VLAN (ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT - 1)
+#define ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN ((ESW_TUN_ID_BRIDGE_INGRESS_PUSH_VLAN << \
+ ESW_TUN_OPTS_BITS) | \
+ ESW_TUN_OPTS_BRIDGE_INGRESS_PUSH_VLAN)
+#define ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN_MARK \
+ GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, \
+ ESW_TUN_OPTS_OFFSET + 1)
+
+/* reuse tun_opts for the mapped ipsec obj id when tun_id is 0 (invalid) */
+#define ESW_IPSEC_RX_MAPPED_ID_MASK GENMASK(ESW_TUN_OPTS_BITS - 1, 0)
+#define ESW_IPSEC_RX_MAPPED_ID_MATCH_MASK \
+ GENMASK(31 - ESW_RESERVED_BITS, ESW_ZONE_ID_BITS)
+
+u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev);
u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev);
+struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw);
#else /* CONFIG_MLX5_ESWITCH */
-static inline u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev)
+static inline u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev)
{
- return MLX5_ESWITCH_NONE;
+ return MLX5_ESWITCH_LEGACY;
}
static inline enum devlink_eswitch_encap_mode
@@ -168,11 +196,28 @@ static inline u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
return 0;
}
+static inline struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw)
+{
+ return NULL;
+}
+
#endif /* CONFIG_MLX5_ESWITCH */
+static inline bool is_mdev_legacy_mode(struct mlx5_core_dev *dev)
+{
+ return mlx5_eswitch_mode(dev) == MLX5_ESWITCH_LEGACY;
+}
+
static inline bool is_mdev_switchdev_mode(struct mlx5_core_dev *dev)
{
return mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS;
}
+/* The returned number is valid only when the dev is eswitch manager. */
+static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev)
+{
+ return mlx5_core_is_ecpf_esw_manager(dev) ?
+ MLX5_VPORT_ECPF : MLX5_VPORT_PF;
+}
+
#endif
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 1f51f4c3b1af..9cadb1d5e6df 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -38,6 +38,25 @@
#define MLX5_FS_DEFAULT_FLOW_TAG 0x0
+#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
+
+#define MLX5_RDMA_TRANSPORT_BYPASS_PRIO 16
+#define MLX5_FS_MAX_POOL_SIZE BIT(30)
+
+enum mlx5_flow_destination_type {
+ MLX5_FLOW_DESTINATION_TYPE_NONE,
+ MLX5_FLOW_DESTINATION_TYPE_VPORT,
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
+ MLX5_FLOW_DESTINATION_TYPE_TIR,
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER,
+ MLX5_FLOW_DESTINATION_TYPE_UPLINK,
+ MLX5_FLOW_DESTINATION_TYPE_PORT,
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER,
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM,
+ MLX5_FLOW_DESTINATION_TYPE_RANGE,
+ MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE,
+};
+
enum {
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16,
MLX5_FLOW_CONTEXT_ACTION_ENCRYPT = 1 << 17,
@@ -51,6 +70,8 @@ enum {
MLX5_FLOW_TABLE_TERMINATION = BIT(2),
MLX5_FLOW_TABLE_UNMANAGED = BIT(3),
MLX5_FLOW_TABLE_OTHER_VPORT = BIT(4),
+ MLX5_FLOW_TABLE_UPLINK_VPORT = BIT(5),
+ MLX5_FLOW_TABLE_OTHER_ESWITCH = BIT(6),
};
#define LEFTOVERS_RULE_NUM 2
@@ -65,34 +86,70 @@ static inline void build_leftovers_ft_param(int *priority,
enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_BYPASS,
+ MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC,
MLX5_FLOW_NAMESPACE_LAG,
MLX5_FLOW_NAMESPACE_OFFLOADS,
MLX5_FLOW_NAMESPACE_ETHTOOL,
MLX5_FLOW_NAMESPACE_KERNEL,
MLX5_FLOW_NAMESPACE_LEFTOVERS,
MLX5_FLOW_NAMESPACE_ANCHOR,
+ MLX5_FLOW_NAMESPACE_FDB_BYPASS,
MLX5_FLOW_NAMESPACE_FDB,
MLX5_FLOW_NAMESPACE_ESW_EGRESS,
MLX5_FLOW_NAMESPACE_ESW_INGRESS,
MLX5_FLOW_NAMESPACE_SNIFFER_RX,
MLX5_FLOW_NAMESPACE_SNIFFER_TX,
MLX5_FLOW_NAMESPACE_EGRESS,
- MLX5_FLOW_NAMESPACE_EGRESS_KERNEL,
+ MLX5_FLOW_NAMESPACE_EGRESS_IPSEC,
+ MLX5_FLOW_NAMESPACE_EGRESS_MACSEC,
MLX5_FLOW_NAMESPACE_RDMA_RX,
MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL,
MLX5_FLOW_NAMESPACE_RDMA_TX,
+ MLX5_FLOW_NAMESPACE_PORT_SEL,
+ MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS,
+ MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS,
+ MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC,
+ MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC,
+ MLX5_FLOW_NAMESPACE_RDMA_RX_MACSEC,
+ MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC,
+ MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX,
+ MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX,
};
enum {
+ FDB_DROP_ROOT,
FDB_BYPASS_PATH,
+ FDB_CRYPTO_INGRESS,
FDB_TC_OFFLOAD,
FDB_FT_OFFLOAD,
+ FDB_TC_MISS,
+ FDB_BR_OFFLOAD,
FDB_SLOW_PATH,
+ FDB_CRYPTO_EGRESS,
FDB_PER_VPORT,
};
+enum fs_flow_table_type {
+ FS_FT_NIC_RX = 0x0,
+ FS_FT_NIC_TX = 0x1,
+ FS_FT_ESW_EGRESS_ACL = 0x2,
+ FS_FT_ESW_INGRESS_ACL = 0x3,
+ FS_FT_FDB = 0X4,
+ FS_FT_SNIFFER_RX = 0X5,
+ FS_FT_SNIFFER_TX = 0X6,
+ FS_FT_RDMA_RX = 0X7,
+ FS_FT_RDMA_TX = 0X8,
+ FS_FT_PORT_SEL = 0X9,
+ FS_FT_FDB_RX = 0xa,
+ FS_FT_FDB_TX = 0xb,
+ FS_FT_RDMA_TRANSPORT_RX = 0xd,
+ FS_FT_RDMA_TRANSPORT_TX = 0xe,
+ FS_FT_MAX_TYPE = FS_FT_RDMA_TRANSPORT_TX,
+};
+
struct mlx5_pkt_reformat;
struct mlx5_modify_hdr;
+struct mlx5_flow_definer;
struct mlx5_flow_table;
struct mlx5_flow_group;
struct mlx5_flow_namespace;
@@ -100,6 +157,7 @@ struct mlx5_flow_handle;
enum {
FLOW_CONTEXT_HAS_TAG = BIT(0),
+ FLOW_CONTEXT_UPLINK_HAIRPIN_EN = BIT(1),
};
struct mlx5_flow_context {
@@ -120,19 +178,30 @@ enum {
MLX5_FLOW_DEST_VPORT_REFORMAT_ID = BIT(1),
};
+enum mlx5_flow_dest_range_field {
+ MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN = 0,
+};
+
struct mlx5_flow_destination {
enum mlx5_flow_destination_type type;
union {
u32 tir_num;
u32 ft_num;
struct mlx5_flow_table *ft;
- u32 counter_id;
+ struct mlx5_fc *counter;
struct {
u16 num;
u16 vhca_id;
struct mlx5_pkt_reformat *pkt_reformat;
u8 flags;
} vport;
+ struct {
+ struct mlx5_flow_table *hit_ft;
+ struct mlx5_flow_table *miss_ft;
+ enum mlx5_flow_dest_range_field field;
+ u32 min;
+ u32 max;
+ } range;
u32 sampler_id;
};
};
@@ -148,15 +217,18 @@ struct mlx5_flow_namespace *
mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type type);
struct mlx5_flow_namespace *
-mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
- enum mlx5_flow_namespace_type type,
- int vport);
+mlx5_get_flow_vport_namespace(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type type,
+ int vport_idx);
struct mlx5_flow_table_attr {
int prio;
int max_fte;
u32 level;
u32 flags;
+ u16 uid;
+ u16 vport;
+ u16 esw_owner_vhca_id;
struct mlx5_flow_table *next_ft;
struct {
@@ -191,6 +263,20 @@ struct mlx5_flow_group *
mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in);
void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
+struct mlx5_exe_aso {
+ u32 object_id;
+ int base_id;
+ u8 type;
+ u8 return_reg_id;
+ union {
+ u32 ctrl_data;
+ struct {
+ u8 meter_idx;
+ u8 init_color;
+ } flow_meter;
+ };
+};
+
struct mlx5_fs_vlan {
u16 ethtype;
u16 vid;
@@ -208,13 +294,15 @@ struct mlx5_flow_act {
u32 action;
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_pkt_reformat *pkt_reformat;
- union {
- u32 ipsec_obj_id;
- uintptr_t esp_id;
- };
+ struct mlx5_flow_act_crypto_params {
+ u8 type;
+ u32 obj_id;
+ } crypto;
u32 flags;
struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH];
struct ib_counters *counters;
+ struct mlx5_flow_group *fg;
+ struct mlx5_exe_aso exe_aso;
};
#define MLX5_DECLARE_FLOW_ACT(name) \
@@ -237,10 +325,17 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
struct mlx5_flow_destination *old_dest);
struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
+
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
+struct mlx5_fc *mlx5_fc_local_create(u32 counter_id, u32 offset, u32 bulk_size);
+void mlx5_fc_local_destroy(struct mlx5_fc *counter);
+void mlx5_fc_local_get(struct mlx5_fc *counter);
+void mlx5_fc_local_put(struct mlx5_fc *counter);
u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter);
void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse);
+void mlx5_fc_query_cached_raw(struct mlx5_fc *counter,
+ u64 *bytes, u64 *packets, u64 *lastuse);
int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
u64 *packets, u64 *bytes);
u32 mlx5_fc_id(struct mlx5_fc *counter);
@@ -253,13 +348,34 @@ struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
void *modify_actions);
void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
struct mlx5_modify_hdr *modify_hdr);
+struct mlx5_flow_definer *
+mlx5_create_match_definer(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type ns_type, u16 format_id,
+ u32 *match_mask);
+void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
+ struct mlx5_flow_definer *definer);
+int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer);
+
+struct mlx5_pkt_reformat_params {
+ int type;
+ u8 param_0;
+ u8 param_1;
+ size_t size;
+ void *data;
+};
struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
- int reformat_type,
- size_t size,
- void *reformat_data,
+ struct mlx5_pkt_reformat_params *params,
enum mlx5_flow_namespace_type ns_type);
void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
struct mlx5_pkt_reformat *reformat);
+u32 mlx5_flow_table_id(struct mlx5_flow_table *ft);
+
+struct mlx5_flow_root_namespace *
+mlx5_get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type);
+
+int mlx5_fs_set_root_dev(struct mlx5_core_dev *dev,
+ struct mlx5_core_dev *new_dev,
+ enum fs_flow_table_type table_type);
#endif
diff --git a/include/linux/mlx5/fs_helpers.h b/include/linux/mlx5/fs_helpers.h
index 9db21cd0e92c..bc5125bc0561 100644
--- a/include/linux/mlx5/fs_helpers.h
+++ b/include/linux/mlx5/fs_helpers.h
@@ -38,46 +38,6 @@
#define MLX5_FS_IPV4_VERSION 4
#define MLX5_FS_IPV6_VERSION 6
-static inline bool mlx5_fs_is_ipsec_flow(const u32 *match_c)
-{
- void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
- misc_parameters);
-
- return MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi);
-}
-
-static inline bool _mlx5_fs_is_outer_ipproto_flow(const u32 *match_c,
- const u32 *match_v, u8 match)
-{
- const void *headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
- outer_headers);
- const void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
- outer_headers);
-
- return MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_protocol) == 0xff &&
- MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol) == match;
-}
-
-static inline bool mlx5_fs_is_outer_tcp_flow(const u32 *match_c,
- const u32 *match_v)
-{
- return _mlx5_fs_is_outer_ipproto_flow(match_c, match_v, IPPROTO_TCP);
-}
-
-static inline bool mlx5_fs_is_outer_udp_flow(const u32 *match_c,
- const u32 *match_v)
-{
- return _mlx5_fs_is_outer_ipproto_flow(match_c, match_v, IPPROTO_UDP);
-}
-
-static inline bool mlx5_fs_is_vxlan_flow(const u32 *match_c)
-{
- void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
- misc_parameters);
-
- return MLX5_GET(fte_match_set_misc, misc_params_c, vxlan_vni);
-}
-
static inline bool _mlx5_fs_is_outer_ipv_flow(struct mlx5_core_dev *mdev,
const u32 *match_c,
const u32 *match_v, int version)
@@ -131,12 +91,4 @@ mlx5_fs_is_outer_ipv6_flow(struct mlx5_core_dev *mdev, const u32 *match_c,
MLX5_FS_IPV6_VERSION);
}
-static inline bool mlx5_fs_is_outer_ipsec_flow(const u32 *match_c)
-{
- void *misc_params_c =
- MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters);
-
- return MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi);
-}
-
#endif
diff --git a/include/linux/mlx5/macsec.h b/include/linux/mlx5/macsec.h
new file mode 100644
index 000000000000..f7ff4c2a95d0
--- /dev/null
+++ b/include/linux/mlx5/macsec.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */
+
+#ifndef MLX5_MACSEC_H
+#define MLX5_MACSEC_H
+
+#ifdef CONFIG_MLX5_MACSEC
+struct mlx5_macsec_event_data {
+ struct mlx5_macsec_fs *macsec_fs;
+ void *macdev;
+ u32 fs_id;
+ bool is_tx;
+};
+
+int mlx5_macsec_add_roce_rule(void *macdev, const struct sockaddr *addr, u16 gid_idx,
+ struct list_head *tx_rules_list, struct list_head *rx_rules_list,
+ struct mlx5_macsec_fs *macsec_fs);
+
+void mlx5_macsec_del_roce_rule(u16 gid_idx, struct mlx5_macsec_fs *macsec_fs,
+ struct list_head *tx_rules_list, struct list_head *rx_rules_list);
+
+void mlx5_macsec_add_roce_sa_rules(u32 fs_id, const struct sockaddr *addr, u16 gid_idx,
+ struct list_head *tx_rules_list,
+ struct list_head *rx_rules_list,
+ struct mlx5_macsec_fs *macsec_fs, bool is_tx);
+
+void mlx5_macsec_del_roce_sa_rules(u32 fs_id, struct mlx5_macsec_fs *macsec_fs,
+ struct list_head *tx_rules_list,
+ struct list_head *rx_rules_list, bool is_tx);
+
+#endif
+#endif /* MLX5_MACSEC_H */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 6d16eed6850e..e9dcd4bf355d 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -64,17 +64,14 @@ enum {
};
enum {
- MLX5_MODIFY_TIR_BITMASK_LRO = 0x0,
- MLX5_MODIFY_TIR_BITMASK_INDIRECT_TABLE = 0x1,
- MLX5_MODIFY_TIR_BITMASK_HASH = 0x2,
- MLX5_MODIFY_TIR_BITMASK_TUNNELED_OFFLOAD_EN = 0x3
-};
-
-enum {
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0,
+ MLX5_SET_HCA_CAP_OP_MOD_ETHERNET_OFFLOADS = 0x1,
MLX5_SET_HCA_CAP_OP_MOD_ODP = 0x2,
MLX5_SET_HCA_CAP_OP_MOD_ATOMIC = 0x3,
MLX5_SET_HCA_CAP_OP_MOD_ROCE = 0x4,
+ MLX5_SET_HCA_CAP_OP_MOD_IPSEC = 0x15,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2 = 0x20,
+ MLX5_SET_HCA_CAP_OP_MOD_PORT_SELECTION = 0x25,
};
enum {
@@ -83,17 +80,16 @@ enum {
enum {
MLX5_OBJ_TYPE_SW_ICM = 0x0008,
-};
-
-enum {
- MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM = (1ULL << MLX5_OBJ_TYPE_SW_ICM),
- MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT = (1ULL << 11),
- MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q = (1ULL << 13),
-};
-
-enum {
MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b,
MLX5_OBJ_TYPE_VIRTIO_NET_Q = 0x000d,
+ MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS = 0x001c,
+ MLX5_OBJ_TYPE_MATCH_DEFINER = 0x0018,
+ MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT = 0x23,
+ MLX5_OBJ_TYPE_STC = 0x0040,
+ MLX5_OBJ_TYPE_RTC = 0x0041,
+ MLX5_OBJ_TYPE_STE = 0x0042,
+ MLX5_OBJ_TYPE_MODIFY_HDR_PATTERN = 0x0043,
+ MLX5_OBJ_TYPE_PAGE_TRACK = 0x46,
MLX5_OBJ_TYPE_MKEY = 0xff01,
MLX5_OBJ_TYPE_QP = 0xff02,
MLX5_OBJ_TYPE_PSV = 0xff03,
@@ -108,6 +104,16 @@ enum {
MLX5_OBJ_TYPE_RQT = 0xff0e,
MLX5_OBJ_TYPE_FLOW_COUNTER = 0xff0f,
MLX5_OBJ_TYPE_CQ = 0xff10,
+ MLX5_OBJ_TYPE_FT_ALIAS = 0xff15,
+};
+
+enum {
+ MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM = (1ULL << MLX5_OBJ_TYPE_SW_ICM),
+ MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT = (1ULL << 11),
+ MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q = (1ULL << 13),
+ MLX5_GENERAL_OBJ_TYPES_CAP_HEADER_MODIFY_ARGUMENT =
+ (1ULL << MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT),
+ MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD = (1ULL << 39),
};
enum {
@@ -126,6 +132,11 @@ enum {
MLX5_CMD_OP_QUERY_SF_PARTITION = 0x111,
MLX5_CMD_OP_ALLOC_SF = 0x113,
MLX5_CMD_OP_DEALLOC_SF = 0x114,
+ MLX5_CMD_OP_SUSPEND_VHCA = 0x115,
+ MLX5_CMD_OP_RESUME_VHCA = 0x116,
+ MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE = 0x117,
+ MLX5_CMD_OP_SAVE_VHCA_STATE = 0x118,
+ MLX5_CMD_OP_LOAD_VHCA_STATE = 0x119,
MLX5_CMD_OP_CREATE_MKEY = 0x200,
MLX5_CMD_OP_QUERY_MKEY = 0x201,
MLX5_CMD_OP_DESTROY_MKEY = 0x202,
@@ -178,6 +189,9 @@ enum {
MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS = 0x727,
MLX5_CMD_OP_RELEASE_XRQ_ERROR = 0x729,
MLX5_CMD_OP_MODIFY_XRQ = 0x72a,
+ MLX5_CMD_OPCODE_QUERY_DELEGATED_VHCA = 0x732,
+ MLX5_CMD_OPCODE_CREATE_ESW_VPORT = 0x733,
+ MLX5_CMD_OPCODE_DESTROY_ESW_VPORT = 0x734,
MLX5_CMD_OP_QUERY_ESW_FUNCTIONS = 0x740,
MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750,
MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751,
@@ -300,8 +314,14 @@ enum {
MLX5_CMD_OP_CREATE_UMEM = 0xa08,
MLX5_CMD_OP_DESTROY_UMEM = 0xa0a,
MLX5_CMD_OP_SYNC_STEERING = 0xb00,
+ MLX5_CMD_OP_PSP_GEN_SPI = 0xb10,
+ MLX5_CMD_OP_PSP_ROTATE_KEY = 0xb11,
MLX5_CMD_OP_QUERY_VHCA_STATE = 0xb0d,
MLX5_CMD_OP_MODIFY_VHCA_STATE = 0xb0e,
+ MLX5_CMD_OP_SYNC_CRYPTO = 0xb12,
+ MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS = 0xb16,
+ MLX5_CMD_OP_GENERATE_WQE = 0xb17,
+ MLX5_CMD_OPCODE_QUERY_VUID = 0xb22,
MLX5_CMD_OP_MAX
};
@@ -311,6 +331,15 @@ enum {
MLX5_CMD_OP_GENERAL_END = 0xd00,
};
+enum {
+ MLX5_FT_NIC_RX_2_NIC_RX_RDMA = BIT(0),
+ MLX5_FT_NIC_TX_RDMA_2_NIC_TX = BIT(1),
+};
+
+enum {
+ MLX5_CMD_OP_MOD_UPDATE_HEADER_MODIFY_ARGUMENT = 0x1,
+};
+
struct mlx5_ifc_flow_table_fields_supported_bits {
u8 outer_dmac[0x1];
u8 outer_smac[0x1];
@@ -342,7 +371,7 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 outer_geneve_oam[0x1];
u8 outer_geneve_protocol_type[0x1];
u8 outer_geneve_opt_len[0x1];
- u8 reserved_at_1e[0x1];
+ u8 source_vhca_port[0x1];
u8 source_eswitch_port[0x1];
u8 inner_dmac[0x1];
@@ -371,7 +400,8 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 reserved_at_37[0x9];
u8 geneve_tlv_option_0_data[0x1];
- u8 reserved_at_41[0x4];
+ u8 geneve_tlv_option_0_exist[0x1];
+ u8 reserved_at_42[0x3];
u8 outer_first_mpls_over_udp[0x4];
u8 outer_first_mpls_over_gre[0x4];
u8 inner_first_mpls[0x4];
@@ -393,6 +423,25 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 metadata_reg_c_0[0x1];
};
+/* Table 2170 - Flow Table Fields Supported 2 Format */
+struct mlx5_ifc_flow_table_fields_supported_2_bits {
+ u8 inner_l4_type_ext[0x1];
+ u8 outer_l4_type_ext[0x1];
+ u8 inner_l4_type[0x1];
+ u8 outer_l4_type[0x1];
+ u8 reserved_at_4[0xa];
+ u8 bth_opcode[0x1];
+ u8 reserved_at_f[0x1];
+ u8 tunnel_header_0_1[0x1];
+ u8 reserved_at_11[0xf];
+
+ u8 reserved_at_20[0xf];
+ u8 ipsec_next_header[0x1];
+ u8 reserved_at_30[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
struct mlx5_ifc_flow_table_prop_layout_bits {
u8 ft_support[0x1];
u8 reserved_at_1[0x1];
@@ -403,7 +452,7 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 flow_table_modify[0x1];
u8 reformat[0x1];
u8 decap[0x1];
- u8 reserved_at_9[0x1];
+ u8 reset_root_to_default[0x1];
u8 pop_vlan[0x1];
u8 push_vlan[0x1];
u8 reserved_at_c[0x1];
@@ -433,9 +482,32 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 max_modify_header_actions[0x8];
u8 max_ft_level[0x8];
- u8 reserved_at_40[0x20];
+ u8 reformat_add_esp_trasport[0x1];
+ u8 reformat_l2_to_l3_esp_tunnel[0x1];
+ u8 reformat_add_esp_transport_over_udp[0x1];
+ u8 reformat_del_esp_trasport[0x1];
+ u8 reformat_l3_esp_tunnel_to_l2[0x1];
+ u8 reformat_del_esp_transport_over_udp[0x1];
+ u8 execute_aso[0x1];
+ u8 reserved_at_47[0x19];
- u8 reserved_at_60[0x18];
+ u8 reformat_l2_to_l3_psp_tunnel[0x1];
+ u8 reformat_l3_psp_tunnel_to_l2[0x1];
+ u8 reformat_insert[0x1];
+ u8 reformat_remove[0x1];
+ u8 macsec_encrypt[0x1];
+ u8 macsec_decrypt[0x1];
+ u8 psp_encrypt[0x1];
+ u8 psp_decrypt[0x1];
+ u8 reformat_add_macsec[0x1];
+ u8 reformat_remove_macsec[0x1];
+ u8 reparse[0x1];
+ u8 reserved_at_6b[0x1];
+ u8 cross_vhca_object[0x1];
+ u8 reformat_l2_to_l3_audp_tunnel[0x1];
+ u8 reformat_l3_audp_tunnel_to_l2[0x1];
+ u8 ignore_flow_level_rtc_valid[0x1];
+ u8 reserved_at_70[0x8];
u8 log_max_ft_num[0x8];
u8 reserved_at_80[0x10];
@@ -462,6 +534,43 @@ struct mlx5_ifc_odp_per_transport_service_cap_bits {
u8 reserved_at_6[0x1a];
};
+struct mlx5_ifc_ipv4_layout_bits {
+ u8 reserved_at_0[0x60];
+
+ u8 ipv4[0x20];
+};
+
+struct mlx5_ifc_ipv6_layout_bits {
+ u8 ipv6[16][0x8];
+};
+
+struct mlx5_ifc_ipv6_simple_layout_bits {
+ u8 ipv6_127_96[0x20];
+ u8 ipv6_95_64[0x20];
+ u8 ipv6_63_32[0x20];
+ u8 ipv6_31_0[0x20];
+};
+
+union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
+ struct mlx5_ifc_ipv6_simple_layout_bits ipv6_simple_layout;
+ struct mlx5_ifc_ipv6_layout_bits ipv6_layout;
+ struct mlx5_ifc_ipv4_layout_bits ipv4_layout;
+ u8 reserved_at_0[0x80];
+};
+
+enum {
+ MLX5_PACKET_L4_TYPE_NONE,
+ MLX5_PACKET_L4_TYPE_TCP,
+ MLX5_PACKET_L4_TYPE_UDP,
+};
+
+enum {
+ MLX5_PACKET_L4_TYPE_EXT_NONE,
+ MLX5_PACKET_L4_TYPE_EXT_TCP,
+ MLX5_PACKET_L4_TYPE_EXT_UDP,
+ MLX5_PACKET_L4_TYPE_EXT_ICMP,
+};
+
struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 smac_47_16[0x20];
@@ -487,7 +596,11 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 tcp_sport[0x10];
u8 tcp_dport[0x10];
- u8 reserved_at_c0[0x18];
+ u8 l4_type[0x2];
+ u8 l4_type_ext[0x4];
+ u8 reserved_at_c6[0xa];
+ u8 ipv4_ihl[0x4];
+ u8 reserved_at_d4[0x4];
u8 ttl_hoplimit[0x8];
u8 udp_sport[0x10];
@@ -536,10 +649,11 @@ struct mlx5_ifc_fte_match_set_misc_bits {
union mlx5_ifc_gre_key_bits gre_key;
u8 vxlan_vni[0x18];
- u8 reserved_at_b8[0x8];
+ u8 bth_opcode[0x8];
u8 geneve_vni[0x18];
- u8 reserved_at_d8[0x7];
+ u8 reserved_at_d8[0x6];
+ u8 geneve_tlv_option_0_exist[0x1];
u8 geneve_oam[0x1];
u8 reserved_at_e0[0xc];
@@ -554,7 +668,7 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 reserved_at_140[0x8];
u8 bth_dst_qp[0x18];
- u8 reserved_at_160[0x20];
+ u8 inner_esp_spi[0x20];
u8 outer_esp_spi[0x20];
u8 reserved_at_1a0[0x60];
};
@@ -593,7 +707,12 @@ struct mlx5_ifc_fte_match_set_misc2_bits {
u8 metadata_reg_a[0x20];
- u8 reserved_at_1a0[0x60];
+ u8 psp_syndrome[0x8];
+ u8 macsec_syndrome[0x8];
+ u8 ipsec_syndrome[0x8];
+ u8 ipsec_next_header[0x8];
+
+ u8 reserved_at_1c0[0x40];
};
struct mlx5_ifc_fte_match_set_misc3_bits {
@@ -658,6 +777,26 @@ struct mlx5_ifc_fte_match_set_misc4_bits {
u8 reserved_at_100[0x100];
};
+struct mlx5_ifc_fte_match_set_misc5_bits {
+ u8 macsec_tag_0[0x20];
+
+ u8 macsec_tag_1[0x20];
+
+ u8 macsec_tag_2[0x20];
+
+ u8 macsec_tag_3[0x20];
+
+ u8 tunnel_header_0[0x20];
+
+ u8 tunnel_header_1[0x20];
+
+ u8 tunnel_header_2[0x20];
+
+ u8 tunnel_header_3[0x20];
+
+ u8 reserved_at_100[0x100];
+};
+
struct mlx5_ifc_cmd_pas_bits {
u8 pa_h[0x20];
@@ -690,7 +829,7 @@ struct mlx5_ifc_ads_bits {
u8 reserved_at_2[0xe];
u8 pkey_index[0x10];
- u8 reserved_at_20[0x8];
+ u8 plane_index[0x8];
u8 grh[0x1];
u8 mlid[0x7];
u8 rlid[0x10];
@@ -753,7 +892,19 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer;
- u8 reserved_at_e00[0x1200];
+ u8 reserved_at_e00[0x600];
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_receive;
+
+ u8 reserved_at_1480[0x80];
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_receive_rdma;
+
+ u8 reserved_at_1580[0x280];
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_transmit_rdma;
+
+ u8 reserved_at_1880[0x780];
u8 sw_steering_nic_rx_action_drop_icm_address[0x40];
@@ -764,6 +915,22 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
u8 reserved_at_20c0[0x5f40];
};
+struct mlx5_ifc_port_selection_cap_bits {
+ u8 reserved_at_0[0x10];
+ u8 port_select_flow_table[0x1];
+ u8 reserved_at_11[0x1];
+ u8 port_select_flow_table_bypass[0x1];
+ u8 reserved_at_13[0xd];
+
+ u8 reserved_at_20[0x1e0];
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_port_selection;
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_port_selection;
+
+ u8 reserved_at_480[0x7b80];
+};
+
enum {
MLX5_FDB_TO_VPORT_REG_C_0 = 0x01,
MLX5_FDB_TO_VPORT_REG_C_1 = 0x02,
@@ -777,9 +944,16 @@ enum {
struct mlx5_ifc_flow_table_eswitch_cap_bits {
u8 fdb_to_vport_reg_c_id[0x8];
- u8 reserved_at_8[0xd];
+ u8 reserved_at_8[0x5];
+ u8 fdb_uplink_hairpin[0x1];
+ u8 fdb_multi_path_any_table_limit_regc[0x1];
+ u8 reserved_at_f[0x1];
+ u8 fdb_dynamic_tunnel[0x1];
+ u8 reserved_at_11[0x1];
+ u8 fdb_multi_path_any_table[0x1];
+ u8 reserved_at_13[0x2];
u8 fdb_modify_header_fwd_to_table[0x1];
- u8 reserved_at_16[0x1];
+ u8 fdb_ipv4_ttl_modify[0x1];
u8 flow_source[0x1];
u8 reserved_at_18[0x2];
u8 multi_fdb_encap[0x1];
@@ -795,7 +969,13 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits {
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_egress;
- u8 reserved_at_800[0x1000];
+ u8 reserved_at_800[0xC00];
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_esw_fdb;
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_bitmask_support_2_esw_fdb;
+
+ u8 reserved_at_1500[0x300];
u8 sw_steering_fdb_action_drop_icm_address_rx[0x40];
@@ -808,6 +988,73 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits {
u8 reserved_at_1900[0x6700];
};
+struct mlx5_ifc_wqe_based_flow_table_cap_bits {
+ u8 reserved_at_0[0x3];
+ u8 log_max_num_ste[0x5];
+ u8 reserved_at_8[0x3];
+ u8 log_max_num_stc[0x5];
+ u8 reserved_at_10[0x3];
+ u8 log_max_num_rtc[0x5];
+ u8 reserved_at_18[0x3];
+ u8 log_max_num_header_modify_pattern[0x5];
+
+ u8 rtc_hash_split_table[0x1];
+ u8 rtc_linear_lookup_table[0x1];
+ u8 reserved_at_22[0x1];
+ u8 stc_alloc_log_granularity[0x5];
+ u8 reserved_at_28[0x3];
+ u8 stc_alloc_log_max[0x5];
+ u8 reserved_at_30[0x3];
+ u8 ste_alloc_log_granularity[0x5];
+ u8 reserved_at_38[0x3];
+ u8 ste_alloc_log_max[0x5];
+
+ u8 reserved_at_40[0xb];
+ u8 rtc_reparse_mode[0x5];
+ u8 reserved_at_50[0x3];
+ u8 rtc_index_mode[0x5];
+ u8 reserved_at_58[0x3];
+ u8 rtc_log_depth_max[0x5];
+
+ u8 reserved_at_60[0x10];
+ u8 ste_format[0x10];
+
+ u8 stc_action_type[0x80];
+
+ u8 header_insert_type[0x10];
+ u8 header_remove_type[0x10];
+
+ u8 trivial_match_definer[0x20];
+
+ u8 reserved_at_140[0x1b];
+ u8 rtc_max_num_hash_definer_gen_wqe[0x5];
+
+ u8 reserved_at_160[0x18];
+ u8 access_index_mode[0x8];
+
+ u8 reserved_at_180[0x10];
+ u8 ste_format_gen_wqe[0x10];
+
+ u8 linear_match_definer_reg_c3[0x20];
+
+ u8 fdb_jump_to_tir_stc[0x1];
+ u8 reserved_at_1c1[0x1f];
+};
+
+struct mlx5_ifc_esw_cap_bits {
+ u8 reserved_at_0[0x1d];
+ u8 merged_eswitch[0x1];
+ u8 reserved_at_1e[0x2];
+
+ u8 reserved_at_20[0x40];
+
+ u8 esw_manager_vport_number_valid[0x1];
+ u8 reserved_at_61[0xf];
+ u8 esw_manager_vport_number[0x10];
+
+ u8 reserved_at_80[0x780];
+};
+
enum {
MLX5_COUNTER_SOURCE_ESWITCH = 0x0,
MLX5_COUNTER_FLOW_ESWITCH = 0x1,
@@ -819,7 +1066,8 @@ struct mlx5_ifc_e_switch_cap_bits {
u8 vport_svlan_insert[0x1];
u8 vport_cvlan_insert_if_not_exist[0x1];
u8 vport_cvlan_insert_overwrite[0x1];
- u8 reserved_at_5[0x2];
+ u8 reserved_at_5[0x1];
+ u8 vport_cvlan_insert_always[0x1];
u8 esw_shared_ingress_acl[0x1];
u8 esw_uplink_ingress_acl[0x1];
u8 root_ft_on_other_esw[0x1];
@@ -862,9 +1110,12 @@ struct mlx5_ifc_qos_cap_bits {
u8 nic_bw_share[0x1];
u8 nic_rate_limit[0x1];
u8 packet_pacing_uid[0x1];
- u8 reserved_at_c[0x14];
+ u8 log_esw_max_sched_depth[0x4];
+ u8 reserved_at_10[0x10];
- u8 reserved_at_20[0xb];
+ u8 reserved_at_20[0x9];
+ u8 esw_cross_esw_sched[0x1];
+ u8 reserved_at_2a[0x1];
u8 log_max_qos_nic_queue_group[0x5];
u8 reserved_at_30[0x10];
@@ -872,7 +1123,8 @@ struct mlx5_ifc_qos_cap_bits {
u8 packet_pacing_min_rate[0x20];
- u8 reserved_at_80[0x10];
+ u8 reserved_at_80[0xb];
+ u8 log_esw_max_rate_limit[0x5];
u8 packet_pacing_rate_table_size[0x10];
u8 esw_element_type[0x10];
@@ -883,7 +1135,18 @@ struct mlx5_ifc_qos_cap_bits {
u8 max_tsar_bw_share[0x20];
- u8 reserved_at_100[0x700];
+ u8 nic_element_type[0x10];
+ u8 nic_tsar_type[0x10];
+
+ u8 reserved_at_120[0x3];
+ u8 log_meter_aso_granularity[0x5];
+ u8 reserved_at_128[0x3];
+ u8 log_meter_aso_max_alloc[0x5];
+ u8 reserved_at_130[0x3];
+ u8 log_max_num_meter_aso[0x5];
+ u8 reserved_at_138[0x8];
+
+ u8 reserved_at_140[0x6c0];
};
struct mlx5_ifc_debug_cap_bits {
@@ -918,7 +1181,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 scatter_fcs[0x1];
u8 enhanced_multi_pkt_send_wqe[0x1];
u8 tunnel_lso_const_out_ip_id[0x1];
- u8 reserved_at_1c[0x2];
+ u8 tunnel_lro_gre[0x1];
+ u8 tunnel_lro_vxlan[0x1];
u8 tunnel_stateless_gre[0x1];
u8 tunnel_stateless_vxlan[0x1];
@@ -938,7 +1202,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 tunnel_stateless_ip_over_ip_tx[0x1];
u8 reserved_at_2e[0x2];
u8 max_vxlan_udp_ports[0x8];
- u8 reserved_at_38[0x6];
+ u8 swp_csum_l4_partial[0x1];
+ u8 reserved_at_39[0x5];
u8 max_geneve_opt_len[0x1];
u8 tunnel_stateless_geneve_rx[0x1];
@@ -953,9 +1218,9 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
};
enum {
- MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING = 0x0,
- MLX5_QP_TIMESTAMP_FORMAT_CAP_REAL_TIME = 0x1,
- MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME = 0x2,
+ MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING = 0x0,
+ MLX5_TIMESTAMP_FORMAT_CAP_REAL_TIME = 0x1,
+ MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME = 0x2,
};
struct mlx5_ifc_roce_cap_bits {
@@ -964,7 +1229,9 @@ struct mlx5_ifc_roce_cap_bits {
u8 sw_r_roce_src_udp_port[0x1];
u8 fl_rc_qp_when_roce_disabled[0x1];
u8 fl_rc_qp_when_roce_enabled[0x1];
- u8 reserved_at_7[0x17];
+ u8 roce_cc_general[0x1];
+ u8 qp_ooo_transmit_default[0x1];
+ u8 reserved_at_9[0x15];
u8 qp_ts_format[0x2];
u8 reserved_at_20[0x60];
@@ -1005,6 +1272,30 @@ struct mlx5_ifc_sync_steering_out_bits {
u8 reserved_at_40[0x40];
};
+struct mlx5_ifc_sync_crypto_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x10];
+ u8 crypto_type[0x10];
+
+ u8 reserved_at_80[0x80];
+};
+
+struct mlx5_ifc_sync_crypto_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
struct mlx5_ifc_device_mem_cap_bits {
u8 memic[0x1];
u8 reserved_at_1[0x1f];
@@ -1028,15 +1319,23 @@ struct mlx5_ifc_device_mem_cap_bits {
u8 log_sw_icm_alloc_granularity[0x6];
u8 log_steering_sw_icm_size[0x8];
- u8 reserved_at_120[0x20];
+ u8 log_indirect_encap_sw_icm_size[0x8];
+ u8 reserved_at_128[0x10];
+ u8 log_header_modify_pattern_sw_icm_size[0x8];
u8 header_modify_sw_icm_start_address[0x40];
- u8 reserved_at_180[0x80];
+ u8 reserved_at_180[0x40];
+
+ u8 header_modify_pattern_sw_icm_start_address[0x40];
u8 memic_operations[0x20];
- u8 reserved_at_220[0x5e0];
+ u8 reserved_at_220[0x20];
+
+ u8 indirect_encap_sw_icm_start_address[0x40];
+
+ u8 reserved_at_280[0x580];
};
struct mlx5_ifc_device_event_cap_bits {
@@ -1064,7 +1363,14 @@ struct mlx5_ifc_virtio_emulation_cap_bits {
u8 max_emulated_devices[0x8];
u8 max_num_virtio_queues[0x18];
- u8 reserved_at_a0[0x60];
+ u8 reserved_at_a0[0x20];
+
+ u8 reserved_at_c0[0x13];
+ u8 desc_group_mkey_supported[0x1];
+ u8 freeze_to_rdy_supported[0x1];
+ u8 reserved_at_d5[0xb];
+
+ u8 reserved_at_e0[0x20];
u8 umem_1_buffer_param_a[0x20];
@@ -1128,11 +1434,13 @@ struct mlx5_ifc_atomic_caps_bits {
u8 reserved_at_e0[0x720];
};
-struct mlx5_ifc_odp_cap_bits {
+struct mlx5_ifc_odp_scheme_cap_bits {
u8 reserved_at_0[0x40];
u8 sig[0x1];
- u8 reserved_at_41[0x1f];
+ u8 reserved_at_41[0x4];
+ u8 page_prefetch[0x1];
+ u8 reserved_at_46[0x1a];
u8 reserved_at_60[0x20];
@@ -1146,34 +1454,20 @@ struct mlx5_ifc_odp_cap_bits {
struct mlx5_ifc_odp_per_transport_service_cap_bits dc_odp_caps;
- u8 reserved_at_120[0x6E0];
+ u8 reserved_at_120[0xe0];
};
-struct mlx5_ifc_calc_op {
- u8 reserved_at_0[0x10];
- u8 reserved_at_10[0x9];
- u8 op_swap_endianness[0x1];
- u8 op_min[0x1];
- u8 op_xor[0x1];
- u8 op_or[0x1];
- u8 op_and[0x1];
- u8 op_max[0x1];
- u8 op_add[0x1];
-};
+struct mlx5_ifc_odp_cap_bits {
+ struct mlx5_ifc_odp_scheme_cap_bits transport_page_fault_scheme_cap;
-struct mlx5_ifc_vector_calc_cap_bits {
- u8 calc_matrix[0x1];
- u8 reserved_at_1[0x1f];
- u8 reserved_at_20[0x8];
- u8 max_vec_count[0x8];
- u8 reserved_at_30[0xd];
- u8 max_chunk_size[0x3];
- struct mlx5_ifc_calc_op calc0;
- struct mlx5_ifc_calc_op calc1;
- struct mlx5_ifc_calc_op calc2;
- struct mlx5_ifc_calc_op calc3;
+ struct mlx5_ifc_odp_scheme_cap_bits memory_page_fault_scheme_cap;
+
+ u8 reserved_at_400[0x200];
- u8 reserved_at_c0[0x720];
+ u8 mem_page_fault[0x1];
+ u8 reserved_at_601[0x1f];
+
+ u8 reserved_at_620[0x1e0];
};
struct mlx5_ifc_tls_cap_bits {
@@ -1203,6 +1497,39 @@ struct mlx5_ifc_ipsec_cap_bits {
u8 reserved_at_30[0x7d0];
};
+struct mlx5_ifc_macsec_cap_bits {
+ u8 macsec_epn[0x1];
+ u8 reserved_at_1[0x2];
+ u8 macsec_crypto_esp_aes_gcm_256_encrypt[0x1];
+ u8 macsec_crypto_esp_aes_gcm_128_encrypt[0x1];
+ u8 macsec_crypto_esp_aes_gcm_256_decrypt[0x1];
+ u8 macsec_crypto_esp_aes_gcm_128_decrypt[0x1];
+ u8 reserved_at_7[0x4];
+ u8 log_max_macsec_offload[0x5];
+ u8 reserved_at_10[0x10];
+
+ u8 min_log_macsec_full_replay_window[0x8];
+ u8 max_log_macsec_full_replay_window[0x8];
+ u8 reserved_at_30[0x10];
+
+ u8 reserved_at_40[0x7c0];
+};
+
+struct mlx5_ifc_psp_cap_bits {
+ u8 reserved_at_0[0x1];
+ u8 psp_crypto_offload[0x1];
+ u8 reserved_at_2[0x1];
+ u8 psp_crypto_esp_aes_gcm_256_encrypt[0x1];
+ u8 psp_crypto_esp_aes_gcm_128_encrypt[0x1];
+ u8 psp_crypto_esp_aes_gcm_256_decrypt[0x1];
+ u8 psp_crypto_esp_aes_gcm_128_decrypt[0x1];
+ u8 reserved_at_7[0x4];
+ u8 log_max_num_of_psp_spi[0x5];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x7e0];
+};
+
enum {
MLX5_WQ_TYPE_LINKED_LIST = 0x0,
MLX5_WQ_TYPE_CYCLIC = 0x1,
@@ -1255,9 +1582,13 @@ enum {
};
enum {
+ MLX5_FLEX_IPV4_OVER_VXLAN_ENABLED = 1 << 0,
+ MLX5_FLEX_IPV6_OVER_VXLAN_ENABLED = 1 << 1,
+ MLX5_FLEX_IPV6_OVER_IP_ENABLED = 1 << 2,
MLX5_FLEX_PARSER_GENEVE_ENABLED = 1 << 3,
MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED = 1 << 4,
- mlx5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED = 1 << 5,
+ MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED = 1 << 5,
+ MLX5_FLEX_P_BIT_VXLAN_GPE_ENABLED = 1 << 6,
MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED = 1 << 7,
MLX5_FLEX_PARSER_ICMP_V4_ENABLED = 1 << 8,
MLX5_FLEX_PARSER_ICMP_V6_ENABLED = 1 << 9,
@@ -1272,6 +1603,8 @@ enum {
enum {
MLX5_UCTX_CAP_RAW_TX = 1UL << 0,
MLX5_UCTX_CAP_INTERNAL_DEV_RES = 1UL << 1,
+ MLX5_UCTX_CAP_RDMA_CTRL = 1UL << 3,
+ MLX5_UCTX_CAP_RDMA_CTRL_OTHER_VHCA = 1UL << 4,
};
#define MLX5_FC_BULK_SIZE_FACTOR 128
@@ -1289,28 +1622,27 @@ enum mlx5_fc_bulk_alloc_bitmask {
#define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum))
+#define MLX5_FT_MAX_MULTIPATH_LEVEL 63
+
enum {
MLX5_STEERING_FORMAT_CONNECTX_5 = 0,
MLX5_STEERING_FORMAT_CONNECTX_6DX = 1,
-};
-
-enum {
- MLX5_SQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING = 0x0,
- MLX5_SQ_TIMESTAMP_FORMAT_CAP_REAL_TIME = 0x1,
- MLX5_SQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME = 0x2,
-};
-
-enum {
- MLX5_RQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING = 0x0,
- MLX5_RQ_TIMESTAMP_FORMAT_CAP_REAL_TIME = 0x1,
- MLX5_RQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME = 0x2,
+ MLX5_STEERING_FORMAT_CONNECTX_7 = 2,
+ MLX5_STEERING_FORMAT_CONNECTX_8 = 3,
};
struct mlx5_ifc_cmd_hca_cap_bits {
- u8 reserved_at_0[0x1f];
+ u8 reserved_at_0[0x6];
+ u8 page_request_disable[0x1];
+ u8 abs_native_port_num[0x1];
+ u8 reserved_at_8[0x8];
+ u8 shared_object_to_user_object_allowed[0x1];
+ u8 reserved_at_13[0xe];
u8 vhca_resource_manager[0x1];
- u8 reserved_at_20[0x3];
+ u8 hca_cap_2[0x1];
+ u8 create_lag_when_not_master_up[0x1];
+ u8 dtor[0x1];
u8 event_on_vhca_state_teardown_request[0x1];
u8 event_on_vhca_state_in_use[0x1];
u8 event_on_vhca_state_active[0x1];
@@ -1340,7 +1672,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_b0[0x1];
u8 uplink_follow[0x1];
u8 ts_cqe_to_dest_cqn[0x1];
- u8 reserved_at_b3[0xd];
+ u8 reserved_at_b3[0x6];
+ u8 go_back_n[0x1];
+ u8 reserved_at_ba[0x6];
u8 max_sgl_for_optimized_performance[0x8];
u8 log_max_cq_sz[0x8];
@@ -1353,9 +1687,11 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_eq_sz[0x8];
u8 relaxed_ordering_write[0x1];
- u8 relaxed_ordering_read[0x1];
+ u8 relaxed_ordering_read_pci_enabled[0x1];
u8 log_max_mkey[0x6];
- u8 reserved_at_f0[0x8];
+ u8 reserved_at_f0[0x6];
+ u8 terminate_scatter_list_mkey[0x1];
+ u8 repeated_mkey[0x1];
u8 dump_fill_mkey[0x1];
u8 reserved_at_f9[0x2];
u8 fast_teardown[0x1];
@@ -1371,13 +1707,19 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 null_mkey[0x1];
u8 log_max_klm_list_size[0x6];
- u8 reserved_at_120[0xa];
+ u8 reserved_at_120[0x2];
+ u8 qpc_extension[0x1];
+ u8 reserved_at_123[0x7];
u8 log_max_ra_req_dc[0x6];
- u8 reserved_at_130[0xa];
+ u8 reserved_at_130[0x2];
+ u8 eth_wqe_too_small[0x1];
+ u8 reserved_at_133[0x6];
+ u8 vnic_env_cq_overrun[0x1];
u8 log_max_ra_res_dc[0x6];
- u8 reserved_at_140[0x6];
+ u8 reserved_at_140[0x5];
u8 release_all_pages[0x1];
+ u8 must_not_use[0x1];
u8 reserved_at_147[0x2];
u8 roce_accl[0x1];
u8 log_max_ra_req_qp[0x6];
@@ -1455,7 +1797,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 pci_sync_for_fw_update_event[0x1];
u8 reserved_at_1f2[0x6];
u8 init2_lag_tx_port_affinity[0x1];
- u8 reserved_at_1fa[0x3];
+ u8 reserved_at_1fa[0x2];
+ u8 wqe_based_flow_table_update_cap[0x1];
u8 cqe_version[0x4];
u8 compact_address_vector[0x1];
@@ -1491,7 +1834,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 cq_oi[0x1];
u8 cq_resize[0x1];
u8 cq_moderation[0x1];
- u8 reserved_at_223[0x3];
+ u8 cq_period_mode_modify[0x1];
+ u8 reserved_at_224[0x2];
u8 cq_eq_remap[0x1];
u8 pg[0x1];
u8 block_lb_mc[0x1];
@@ -1516,9 +1860,14 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 rc[0x1];
u8 uar_4k[0x1];
- u8 reserved_at_241[0x9];
+ u8 reserved_at_241[0x7];
+ u8 fl_rc_qp_when_roce_disabled[0x1];
+ u8 regexp_params[0x1];
u8 uar_sz[0x6];
- u8 reserved_at_250[0x8];
+ u8 port_selection_cap[0x1];
+ u8 nic_cap_reg[0x1];
+ u8 umem_uid_0[0x1];
+ u8 reserved_at_253[0x5];
u8 log_pg_sz[0x8];
u8 bf[0x1];
@@ -1530,7 +1879,10 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_bf_reg_size[0x5];
- u8 reserved_at_270[0x6];
+ u8 disciplined_fr_counter[0x1];
+ u8 reserved_at_271[0x2];
+ u8 qp_error_syndrome[0x1];
+ u8 reserved_at_274[0x2];
u8 lag_dct[0x2];
u8 lag_tx_port_affinity[0x1];
u8 lag_native_fdb_selection[0x1];
@@ -1541,7 +1893,14 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_280[0x10];
u8 max_wqe_sz_sq[0x10];
- u8 reserved_at_2a0[0x10];
+ u8 reserved_at_2a0[0x7];
+ u8 mkey_pcie_tph[0x1];
+ u8 reserved_at_2a8[0x1];
+ u8 tis_tir_td_order[0x1];
+
+ u8 psp[0x1];
+ u8 shampo[0x1];
+ u8 reserved_at_2ac[0x4];
u8 max_wqe_sz_rq[0x10];
u8 max_flow_counter_31_16[0x10];
@@ -1556,15 +1915,30 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_320[0x3];
u8 log_max_transport_domain[0x5];
- u8 reserved_at_328[0x3];
+ u8 reserved_at_328[0x2];
+ u8 relaxed_ordering_read[0x1];
u8 log_max_pd[0x5];
- u8 reserved_at_330[0xb];
+ u8 dp_ordering_ooo_all_ud[0x1];
+ u8 dp_ordering_ooo_all_uc[0x1];
+ u8 dp_ordering_ooo_all_xrc[0x1];
+ u8 dp_ordering_ooo_all_dc[0x1];
+ u8 dp_ordering_ooo_all_rc[0x1];
+ u8 pcie_reset_using_hotreset_method[0x1];
+ u8 pci_sync_for_fw_update_with_driver_unload[0x1];
+ u8 vnic_env_cnt_steering_fail[0x1];
+ u8 vport_counter_local_loopback[0x1];
+ u8 q_counter_aggregation[0x1];
+ u8 q_counter_other_vport[0x1];
u8 log_max_xrcd[0x5];
u8 nic_receive_steering_discard[0x1];
u8 receive_discard_vport_down[0x1];
u8 transmit_discard_vport_down[0x1];
- u8 reserved_at_343[0x5];
+ u8 eq_overrun_count[0x1];
+ u8 reserved_at_344[0x1];
+ u8 invalid_command_count[0x1];
+ u8 quota_exceeded_count[0x1];
+ u8 reserved_at_347[0x1];
u8 log_max_flow_counter_bulk[0x8];
u8 max_flow_counter_15_0[0x10];
@@ -1585,11 +1959,14 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_rqt[0x5];
u8 reserved_at_390[0x3];
u8 log_max_rqt_size[0x5];
- u8 reserved_at_398[0x3];
+ u8 reserved_at_398[0x1];
+ u8 vnic_env_cnt_bar_uar_access[0x1];
+ u8 vnic_env_cnt_odp_page_fault[0x1];
u8 log_max_tis_per_sq[0x5];
u8 ext_stride_num_range[0x1];
- u8 reserved_at_3a1[0x2];
+ u8 roce_rw_supported[0x1];
+ u8 log_max_current_uc_list_wr_supported[0x1];
u8 log_max_stride_sz_rq[0x5];
u8 reserved_at_3a8[0x3];
u8 log_min_stride_sz_rq[0x5];
@@ -1612,7 +1989,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 disable_local_lb_uc[0x1];
u8 disable_local_lb_mc[0x1];
u8 log_min_hairpin_wq_data_sz[0x5];
- u8 reserved_at_3e8[0x2];
+ u8 reserved_at_3e8[0x1];
+ u8 silent_mode[0x1];
u8 vhca_state[0x1];
u8 log_max_vlan_list[0x5];
u8 reserved_at_3f0[0x3];
@@ -1627,9 +2005,12 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 steering_format_version[0x4];
u8 create_qp_start_hint[0x18];
- u8 reserved_at_460[0x3];
+ u8 reserved_at_460[0x1];
+ u8 ats[0x1];
+ u8 cross_vhca_rqt[0x1];
u8 log_max_uctx[0x5];
- u8 reserved_at_468[0x2];
+ u8 reserved_at_468[0x1];
+ u8 crypto[0x1];
u8 ipsec_offload[0x1];
u8 log_max_umem[0x5];
u8 max_num_eqs[0x10];
@@ -1653,9 +2034,22 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 max_geneve_tlv_options[0x8];
u8 reserved_at_568[0x3];
u8 max_geneve_tlv_option_data_len[0x5];
- u8 reserved_at_570[0x10];
-
- u8 reserved_at_580[0x33];
+ u8 reserved_at_570[0x1];
+ u8 adv_rdma[0x1];
+ u8 reserved_at_572[0x7];
+ u8 adv_virtualization[0x1];
+ u8 reserved_at_57a[0x6];
+
+ u8 reserved_at_580[0xb];
+ u8 log_max_dci_stream_channels[0x5];
+ u8 reserved_at_590[0x3];
+ u8 log_max_dci_errored_streams[0x5];
+ u8 reserved_at_598[0x8];
+
+ u8 reserved_at_5a0[0x10];
+ u8 enhanced_cqe_compression[0x1];
+ u8 reserved_at_5b1[0x1];
+ u8 crossing_vhca_mkey[0x1];
u8 log_max_dek[0x5];
u8 reserved_at_5b8[0x4];
u8 mini_cqe_resp_stride_index[0x1];
@@ -1697,7 +2091,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_682[0x1];
u8 log_max_sf[0x5];
u8 apu[0x1];
- u8 reserved_at_689[0x7];
+ u8 reserved_at_689[0x4];
+ u8 migration[0x1];
+ u8 reserved_at_68e[0x2];
u8 log_min_sf_size[0x8];
u8 max_num_sf_partitions[0x8];
@@ -1712,7 +2108,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 flex_parser_id_outer_first_mpls_over_gre[0x4];
u8 flex_parser_id_outer_first_mpls_over_udp_label[0x4];
- u8 reserved_at_6e0[0x10];
+ u8 max_num_match_definer[0x10];
u8 sf_base_id[0x10];
u8 flex_parser_id_gtpu_dw_2[0x4];
@@ -1722,23 +2118,148 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 dynamic_msix_table_size[0xc];
u8 reserved_at_740[0xc];
u8 min_dynamic_vf_msix_table_size[0x4];
- u8 reserved_at_750[0x4];
+ u8 reserved_at_750[0x2];
+ u8 data_direct[0x1];
+ u8 reserved_at_753[0x1];
u8 max_dynamic_vf_msix_table_size[0xc];
- u8 reserved_at_760[0x20];
+ u8 reserved_at_760[0x3];
+ u8 log_max_num_header_modify_argument[0x5];
+ u8 log_header_modify_argument_granularity_offset[0x4];
+ u8 log_header_modify_argument_granularity[0x4];
+ u8 reserved_at_770[0x3];
+ u8 log_header_modify_argument_max_alloc[0x5];
+ u8 reserved_at_778[0x8];
+
u8 vhca_tunnel_commands[0x40];
- u8 reserved_at_7c0[0x40];
+ u8 match_definer_format_supported[0x40];
};
-enum mlx5_flow_destination_type {
- MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0,
- MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
- MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2,
- MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER = 0x6,
+enum {
+ MLX5_CROSS_VHCA_OBJ_TO_OBJ_SUPPORTED_LOCAL_FLOW_TABLE_TO_REMOTE_FLOW_TABLE_MISS = 0x80000,
+ MLX5_CROSS_VHCA_OBJ_TO_OBJ_SUPPORTED_LOCAL_FLOW_TABLE_ROOT_TO_REMOTE_FLOW_TABLE = (1ULL << 20),
+};
- MLX5_FLOW_DESTINATION_TYPE_PORT = 0x99,
- MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100,
- MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM = 0x101,
+enum {
+ MLX5_ALLOWED_OBJ_FOR_OTHER_VHCA_ACCESS_FLOW_TABLE = 0x200,
+};
+
+struct mlx5_ifc_cmd_hca_cap_2_bits {
+ u8 reserved_at_0[0x80];
+
+ u8 migratable[0x1];
+ u8 reserved_at_81[0x7];
+ u8 dp_ordering_force[0x1];
+ u8 reserved_at_89[0x9];
+ u8 query_vuid[0x1];
+ u8 reserved_at_93[0x5];
+ u8 umr_log_entity_size_5[0x1];
+ u8 reserved_at_99[0x7];
+
+ u8 max_reformat_insert_size[0x8];
+ u8 max_reformat_insert_offset[0x8];
+ u8 max_reformat_remove_size[0x8];
+ u8 max_reformat_remove_offset[0x8];
+
+ u8 reserved_at_c0[0x8];
+ u8 migration_multi_load[0x1];
+ u8 migration_tracking_state[0x1];
+ u8 multiplane_qp_ud[0x1];
+ u8 reserved_at_cb[0x5];
+ u8 migration_in_chunks[0x1];
+ u8 reserved_at_d1[0x1];
+ u8 sf_eq_usage[0x1];
+ u8 reserved_at_d3[0x5];
+ u8 multiplane[0x1];
+ u8 reserved_at_d9[0x7];
+
+ u8 cross_vhca_object_to_object_supported[0x20];
+
+ u8 allowed_object_for_other_vhca_access[0x40];
+
+ u8 reserved_at_140[0x60];
+
+ u8 flow_table_type_2_type[0x8];
+ u8 reserved_at_1a8[0x2];
+ u8 format_select_dw_8_6_ext[0x1];
+ u8 log_min_mkey_entity_size[0x5];
+ u8 reserved_at_1b0[0x10];
+
+ u8 general_obj_types_127_64[0x40];
+ u8 reserved_at_200[0x20];
+
+ u8 reserved_at_220[0x1];
+ u8 sw_vhca_id_valid[0x1];
+ u8 sw_vhca_id[0xe];
+ u8 reserved_at_230[0x10];
+
+ u8 reserved_at_240[0xb];
+ u8 ts_cqe_metadata_size2wqe_counter[0x5];
+ u8 reserved_at_250[0x10];
+
+ u8 reserved_at_260[0x20];
+
+ u8 format_select_dw_gtpu_dw_0[0x8];
+ u8 format_select_dw_gtpu_dw_1[0x8];
+ u8 format_select_dw_gtpu_dw_2[0x8];
+ u8 format_select_dw_gtpu_first_ext_dw_0[0x8];
+
+ u8 generate_wqe_type[0x20];
+
+ u8 reserved_at_2c0[0xc0];
+
+ u8 reserved_at_380[0xb];
+ u8 min_mkey_log_entity_size_fixed_buffer[0x5];
+ u8 ec_vf_vport_base[0x10];
+
+ u8 reserved_at_3a0[0x2];
+ u8 max_mkey_log_entity_size_fixed_buffer[0x6];
+ u8 reserved_at_3a8[0x2];
+ u8 max_mkey_log_entity_size_mtt[0x6];
+ u8 max_rqt_vhca_id[0x10];
+
+ u8 reserved_at_3c0[0x20];
+
+ u8 reserved_at_3e0[0x10];
+ u8 pcc_ifa2[0x1];
+ u8 reserved_at_3f1[0xf];
+
+ u8 reserved_at_400[0x1];
+ u8 min_mkey_log_entity_size_fixed_buffer_valid[0x1];
+ u8 reserved_at_402[0xe];
+ u8 return_reg_id[0x10];
+
+ u8 reserved_at_420[0x1c];
+ u8 flow_table_hash_type[0x4];
+
+ u8 reserved_at_440[0x8];
+ u8 max_num_eqs_24b[0x18];
+
+ u8 reserved_at_460[0x144];
+ u8 load_balance_id[0x4];
+ u8 reserved_at_5a8[0x18];
+
+ u8 query_adjacent_functions_id[0x1];
+ u8 ingress_egress_esw_vport_connect[0x1];
+ u8 function_id_type_vhca_id[0x1];
+ u8 reserved_at_5c3[0x1];
+ u8 lag_per_mp_group[0x1];
+ u8 reserved_at_5c5[0xb];
+ u8 delegate_vhca_management_profiles[0x10];
+
+ u8 delegated_vhca_max[0x10];
+ u8 delegate_vhca_max[0x10];
+
+ u8 reserved_at_600[0x200];
+};
+
+enum mlx5_ifc_flow_destination_type {
+ MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT = 0x0,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_TIR = 0x2,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER = 0x6,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK = 0x8,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_TABLE_TYPE = 0xA,
};
enum mlx5_flow_table_miss_action {
@@ -1753,7 +2274,8 @@ struct mlx5_ifc_dest_format_struct_bits {
u8 destination_eswitch_owner_vhca_id_valid[0x1];
u8 packet_reformat[0x1];
- u8 reserved_at_22[0xe];
+ u8 reserved_at_22[0x6];
+ u8 destination_table_type[0x8];
u8 destination_eswitch_owner_vhca_id[0x10];
};
@@ -1771,7 +2293,7 @@ struct mlx5_ifc_extended_dest_format_bits {
u8 reserved_at_60[0x20];
};
-union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits {
+union mlx5_ifc_dest_format_flow_counter_list_auto_bits {
struct mlx5_ifc_extended_dest_format_bits extended_dest_format;
struct mlx5_ifc_flow_counter_list_bits flow_counter_list;
};
@@ -1789,7 +2311,9 @@ struct mlx5_ifc_fte_match_param_bits {
struct mlx5_ifc_fte_match_set_misc4_bits misc_parameters_4;
- u8 reserved_at_c00[0x400];
+ struct mlx5_ifc_fte_match_set_misc5_bits misc_parameters_5;
+
+ u8 reserved_at_e00[0x200];
};
enum {
@@ -1861,7 +2385,26 @@ struct mlx5_ifc_wq_bits {
u8 reserved_at_139[0x4];
u8 log_wqe_stride_size[0x3];
- u8 reserved_at_140[0x4c0];
+ u8 dbr_umem_id[0x20];
+ u8 wq_umem_id[0x20];
+
+ u8 wq_umem_offset[0x40];
+
+ u8 headers_mkey[0x20];
+
+ u8 shampo_enable[0x1];
+ u8 reserved_at_1e1[0x1];
+ u8 shampo_mode[0x2];
+ u8 reserved_at_1e4[0x1];
+ u8 log_reservation_size[0x3];
+ u8 reserved_at_1e8[0x5];
+ u8 log_max_num_of_packets_per_reservation[0x3];
+ u8 reserved_at_1f0[0x6];
+ u8 log_headers_entry_size[0x2];
+ u8 reserved_at_1f8[0x4];
+ u8 log_headers_buffer_entry_num[0x4];
+
+ u8 reserved_at_200[0x400];
struct mlx5_ifc_cmd_pas_bits pas[];
};
@@ -1871,6 +2414,13 @@ struct mlx5_ifc_rq_num_bits {
u8 rq_num[0x18];
};
+struct mlx5_ifc_rq_vhca_bits {
+ u8 reserved_at_0[0x8];
+ u8 rq_num[0x18];
+ u8 reserved_at_20[0x10];
+ u8 rq_vhca_id[0x10];
+};
+
struct mlx5_ifc_mac_address_layout_bits {
u8 reserved_at_0[0x10];
u8 mac_addr_47_32[0x10];
@@ -1945,6 +2495,17 @@ struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits {
u8 reserved_at_360[0x4a0];
};
+struct mlx5_ifc_cong_control_r_roce_general_bits {
+ u8 reserved_at_0[0x80];
+
+ u8 reserved_at_80[0x10];
+ u8 rtt_resp_dscp_valid[0x1];
+ u8 reserved_at_91[0x9];
+ u8 rtt_resp_dscp[0x6];
+
+ u8 reserved_at_a0[0x760];
+};
+
struct mlx5_ifc_cong_control_802_1qau_rp_bits {
u8 reserved_at_0[0x80];
@@ -2143,6 +2704,12 @@ struct mlx5_ifc_field_select_802_1qau_rp_bits {
u8 field_select_8021qaurp[0x20];
};
+struct mlx5_ifc_phys_layer_recovery_cntrs_bits {
+ u8 total_successful_recovery_events[0x20];
+
+ u8 reserved_at_20[0x7a0];
+};
+
struct mlx5_ifc_phys_layer_cntrs_bits {
u8 time_since_last_clear_high[0x20];
@@ -2315,6 +2882,46 @@ struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits {
u8 port_xmit_wait[0x20];
};
+struct mlx5_ifc_ib_ext_port_cntrs_grp_data_layout_bits {
+ u8 reserved_at_0[0x300];
+
+ u8 port_xmit_data_high[0x20];
+
+ u8 port_xmit_data_low[0x20];
+
+ u8 port_rcv_data_high[0x20];
+
+ u8 port_rcv_data_low[0x20];
+
+ u8 port_xmit_pkts_high[0x20];
+
+ u8 port_xmit_pkts_low[0x20];
+
+ u8 port_rcv_pkts_high[0x20];
+
+ u8 port_rcv_pkts_low[0x20];
+
+ u8 reserved_at_400[0x80];
+
+ u8 port_unicast_xmit_pkts_high[0x20];
+
+ u8 port_unicast_xmit_pkts_low[0x20];
+
+ u8 port_multicast_xmit_pkts_high[0x20];
+
+ u8 port_multicast_xmit_pkts_low[0x20];
+
+ u8 port_unicast_rcv_pkts_high[0x20];
+
+ u8 port_unicast_rcv_pkts_low[0x20];
+
+ u8 port_multicast_rcv_pkts_high[0x20];
+
+ u8 port_multicast_rcv_pkts_low[0x20];
+
+ u8 reserved_at_580[0x240];
+};
+
struct mlx5_ifc_eth_per_tc_prio_grp_data_layout_bits {
u8 transmit_queue_high[0x20];
@@ -2792,6 +3399,62 @@ struct mlx5_ifc_dropped_packet_logged_bits {
u8 reserved_at_0[0xe0];
};
+struct mlx5_ifc_nic_cap_reg_bits {
+ u8 reserved_at_0[0x1a];
+ u8 vhca_icm_ctrl[0x1];
+ u8 reserved_at_1b[0x5];
+
+ u8 reserved_at_20[0x60];
+};
+
+struct mlx5_ifc_default_timeout_bits {
+ u8 to_multiplier[0x3];
+ u8 reserved_at_3[0x9];
+ u8 to_value[0x14];
+};
+
+struct mlx5_ifc_dtor_reg_bits {
+ u8 reserved_at_0[0x20];
+
+ struct mlx5_ifc_default_timeout_bits pcie_toggle_to;
+
+ u8 reserved_at_40[0x60];
+
+ struct mlx5_ifc_default_timeout_bits health_poll_to;
+
+ struct mlx5_ifc_default_timeout_bits full_crdump_to;
+
+ struct mlx5_ifc_default_timeout_bits fw_reset_to;
+
+ struct mlx5_ifc_default_timeout_bits flush_on_err_to;
+
+ struct mlx5_ifc_default_timeout_bits pci_sync_update_to;
+
+ struct mlx5_ifc_default_timeout_bits tear_down_to;
+
+ struct mlx5_ifc_default_timeout_bits fsm_reactivate_to;
+
+ struct mlx5_ifc_default_timeout_bits reclaim_pages_to;
+
+ struct mlx5_ifc_default_timeout_bits reclaim_vfs_pages_to;
+
+ struct mlx5_ifc_default_timeout_bits reset_unload_to;
+
+ u8 reserved_at_1c0[0x20];
+};
+
+struct mlx5_ifc_vhca_icm_ctrl_reg_bits {
+ u8 vhca_id_valid[0x1];
+ u8 reserved_at_1[0xf];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_20[0xa0];
+
+ u8 cur_alloc_icm[0x20];
+
+ u8 reserved_at_e0[0x120];
+};
+
enum {
MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN = 0x1,
MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR = 0x2,
@@ -2942,9 +3605,9 @@ enum {
};
enum {
- MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING = 0x0,
- MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT = 0x1,
- MLX5_QPC_TIMESTAMP_FORMAT_REAL_TIME = 0x2,
+ MLX5_TIMESTAMP_FORMAT_FREE_RUNNING = 0x0,
+ MLX5_TIMESTAMP_FORMAT_DEFAULT = 0x1,
+ MLX5_TIMESTAMP_FORMAT_REAL_TIME = 0x2,
};
struct mlx5_ifc_qpc_bits {
@@ -2966,7 +3629,8 @@ struct mlx5_ifc_qpc_bits {
u8 latency_sensitive[0x1];
u8 reserved_at_24[0x1];
u8 drain_sigerr[0x1];
- u8 reserved_at_26[0x2];
+ u8 reserved_at_26[0x1];
+ u8 dp_ordering_force[0x1];
u8 pd[0x18];
u8 mtu[0x3];
@@ -2976,7 +3640,8 @@ struct mlx5_ifc_qpc_bits {
u8 log_rq_stride[0x3];
u8 no_sq[0x1];
u8 log_sq_size[0x4];
- u8 reserved_at_55[0x3];
+ u8 reserved_at_55[0x1];
+ u8 retry_mode[0x2];
u8 ts_format[0x2];
u8 reserved_at_5a[0x1];
u8 rlky[0x1];
@@ -3013,10 +3678,12 @@ struct mlx5_ifc_qpc_bits {
u8 reserved_at_3c0[0x8];
u8 next_send_psn[0x18];
- u8 reserved_at_3e0[0x8];
+ u8 reserved_at_3e0[0x3];
+ u8 log_num_dci_stream_channels[0x5];
u8 cqn_snd[0x18];
- u8 reserved_at_400[0x8];
+ u8 reserved_at_400[0x3];
+ u8 log_num_dci_errored_streams[0x5];
u8 deth_sqpn[0x18];
u8 reserved_at_420[0x20];
@@ -3036,7 +3703,8 @@ struct mlx5_ifc_qpc_bits {
u8 rae[0x1];
u8 reserved_at_493[0x1];
u8 page_offset[0x6];
- u8 reserved_at_49a[0x3];
+ u8 reserved_at_49a[0x2];
+ u8 dp_ordering_1[0x1];
u8 cd_slave_receive[0x1];
u8 cd_slave_send[0x1];
u8 cd_master[0x1];
@@ -3101,22 +3769,69 @@ struct mlx5_ifc_roce_addr_layout_bits {
u8 reserved_at_e0[0x20];
};
+struct mlx5_ifc_crypto_cap_bits {
+ u8 reserved_at_0[0x3];
+ u8 synchronize_dek[0x1];
+ u8 int_kek_manual[0x1];
+ u8 int_kek_auto[0x1];
+ u8 reserved_at_6[0x1a];
+
+ u8 reserved_at_20[0x3];
+ u8 log_dek_max_alloc[0x5];
+ u8 reserved_at_28[0x3];
+ u8 log_max_num_deks[0x5];
+ u8 reserved_at_30[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x3];
+ u8 log_dek_granularity[0x5];
+ u8 reserved_at_68[0x3];
+ u8 log_max_num_int_kek[0x5];
+ u8 sw_wrapped_dek[0x10];
+
+ u8 reserved_at_80[0x780];
+};
+
+struct mlx5_ifc_shampo_cap_bits {
+ u8 reserved_at_0[0x3];
+ u8 shampo_log_max_reservation_size[0x5];
+ u8 reserved_at_8[0x3];
+ u8 shampo_log_min_reservation_size[0x5];
+ u8 shampo_min_mss_size[0x10];
+
+ u8 shampo_header_split[0x1];
+ u8 shampo_header_split_data_merge[0x1];
+ u8 reserved_at_22[0x1];
+ u8 shampo_log_max_headers_entry_size[0x5];
+ u8 reserved_at_28[0x18];
+
+ u8 reserved_at_40[0x7c0];
+};
+
union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
+ struct mlx5_ifc_cmd_hca_cap_2_bits cmd_hca_cap_2;
struct mlx5_ifc_odp_cap_bits odp_cap;
struct mlx5_ifc_atomic_caps_bits atomic_caps;
struct mlx5_ifc_roce_cap_bits roce_cap;
struct mlx5_ifc_per_protocol_networking_offload_caps_bits per_protocol_networking_offload_caps;
struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap;
+ struct mlx5_ifc_wqe_based_flow_table_cap_bits wqe_based_flow_table_cap;
+ struct mlx5_ifc_esw_cap_bits esw_cap;
struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
- struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap;
+ struct mlx5_ifc_port_selection_cap_bits port_selection_cap;
struct mlx5_ifc_qos_cap_bits qos_cap;
struct mlx5_ifc_debug_cap_bits debug_cap;
struct mlx5_ifc_fpga_cap_bits fpga_cap;
struct mlx5_ifc_tls_cap_bits tls_cap;
struct mlx5_ifc_device_mem_cap_bits device_mem_cap;
struct mlx5_ifc_virtio_emulation_cap_bits virtio_emulation_cap;
+ struct mlx5_ifc_macsec_cap_bits macsec_cap;
+ struct mlx5_ifc_crypto_cap_bits crypto_cap;
+ struct mlx5_ifc_ipsec_cap_bits ipsec_cap;
+ struct mlx5_ifc_psp_cap_bits psp_cap;
u8 reserved_at_0[0x8000];
};
@@ -3132,8 +3847,9 @@ enum {
MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH = 0x100,
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 = 0x400,
MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 = 0x800,
- MLX5_FLOW_CONTEXT_ACTION_IPSEC_DECRYPT = 0x1000,
- MLX5_FLOW_CONTEXT_ACTION_IPSEC_ENCRYPT = 0x2000,
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT = 0x1000,
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT = 0x2000,
+ MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO = 0x4000,
};
enum {
@@ -3142,6 +3858,12 @@ enum {
MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT = 0x2,
};
+enum {
+ MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC = 0x0,
+ MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC = 0x1,
+ MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_PSP = 0x2,
+};
+
struct mlx5_ifc_vlan_bits {
u8 ethtype[0x10];
u8 prio[0x3];
@@ -3149,6 +3871,38 @@ struct mlx5_ifc_vlan_bits {
u8 vid[0xc];
};
+enum {
+ MLX5_FLOW_METER_COLOR_RED = 0x0,
+ MLX5_FLOW_METER_COLOR_YELLOW = 0x1,
+ MLX5_FLOW_METER_COLOR_GREEN = 0x2,
+ MLX5_FLOW_METER_COLOR_UNDEFINED = 0x3,
+};
+
+enum {
+ MLX5_EXE_ASO_FLOW_METER = 0x2,
+};
+
+struct mlx5_ifc_exe_aso_ctrl_flow_meter_bits {
+ u8 return_reg_id[0x4];
+ u8 aso_type[0x4];
+ u8 reserved_at_8[0x14];
+ u8 action[0x1];
+ u8 init_color[0x2];
+ u8 meter_id[0x1];
+};
+
+union mlx5_ifc_exe_aso_ctrl {
+ struct mlx5_ifc_exe_aso_ctrl_flow_meter_bits exe_aso_ctrl_flow_meter;
+};
+
+struct mlx5_ifc_execute_aso_bits {
+ u8 valid[0x1];
+ u8 reserved_at_1[0x7];
+ u8 aso_object_id[0x18];
+
+ union mlx5_ifc_exe_aso_ctrl exe_aso_ctrl;
+};
+
struct mlx5_ifc_flow_context_bits {
struct mlx5_ifc_vlan_bits push_vlan;
@@ -3161,9 +3915,9 @@ struct mlx5_ifc_flow_context_bits {
u8 action[0x10];
u8 extended_destination[0x1];
- u8 reserved_at_81[0x1];
+ u8 uplink_hairpin_en[0x1];
u8 flow_source[0x2];
- u8 reserved_at_84[0x4];
+ u8 encrypt_decrypt_type[0x4];
u8 destination_list_size[0x18];
u8 reserved_at_a0[0x8];
@@ -3175,14 +3929,16 @@ struct mlx5_ifc_flow_context_bits {
struct mlx5_ifc_vlan_bits push_vlan_2;
- u8 ipsec_obj_id[0x20];
+ u8 encrypt_decrypt_obj_id[0x20];
u8 reserved_at_140[0xc0];
struct mlx5_ifc_fte_match_param_bits match_value;
- u8 reserved_at_1200[0x600];
+ struct mlx5_ifc_execute_aso_bits execute_aso[4];
+
+ u8 reserved_at_1300[0x500];
- union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[];
+ union mlx5_ifc_dest_format_flow_counter_list_auto_bits destination[];
};
enum {
@@ -3248,11 +4004,35 @@ struct mlx5_ifc_vnic_diagnostic_statistics_bits {
u8 transmit_discard_vport_down[0x40];
- u8 reserved_at_140[0xa0];
+ u8 async_eq_overrun[0x20];
+
+ u8 comp_eq_overrun[0x20];
+
+ u8 reserved_at_180[0x20];
+
+ u8 invalid_command[0x20];
+
+ u8 quota_exceeded_command[0x20];
u8 internal_rq_out_of_buffer[0x20];
- u8 reserved_at_200[0xe00];
+ u8 cq_overrun[0x20];
+
+ u8 eth_wqe_too_small[0x20];
+
+ u8 reserved_at_220[0xc0];
+
+ u8 generated_pkt_steering_fail[0x40];
+
+ u8 handled_pkt_steering_fail[0x40];
+
+ u8 bar_uar_access[0x20];
+
+ u8 odp_local_triggered_page_fault[0x20];
+
+ u8 odp_remote_triggered_page_fault[0x20];
+
+ u8 reserved_at_3c0[0xc20];
};
struct mlx5_ifc_traffic_counter_bits {
@@ -3291,8 +4071,8 @@ enum {
};
enum {
- MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO = 0x1,
- MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO = 0x2,
+ MLX5_TIRC_PACKET_MERGE_MASK_IPV4_LRO = BIT(0),
+ MLX5_TIRC_PACKET_MERGE_MASK_IPV6_LRO = BIT(1),
};
enum {
@@ -3317,7 +4097,7 @@ struct mlx5_ifc_tirc_bits {
u8 reserved_at_80[0x4];
u8 lro_timeout_period_usecs[0x10];
- u8 lro_enable_mask[0x4];
+ u8 packet_merge_mask[0x4];
u8 lro_max_ip_payload_size[0x8];
u8 reserved_at_a0[0x40];
@@ -3394,12 +4174,6 @@ enum {
MLX5_SQC_STATE_ERR = 0x3,
};
-enum {
- MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING = 0x0,
- MLX5_SQC_TIMESTAMP_FORMAT_DEFAULT = 0x1,
- MLX5_SQC_TIMESTAMP_FORMAT_REAL_TIME = 0x2,
-};
-
struct mlx5_ifc_sqc_bits {
u8 rlky[0x1];
u8 cd_master[0x1];
@@ -3411,7 +4185,8 @@ struct mlx5_ifc_sqc_bits {
u8 reg_umr[0x1];
u8 allow_swp[0x1];
u8 hairpin[0x1];
- u8 reserved_at_f[0xb];
+ u8 non_wire[0x1];
+ u8 reserved_at_10[0xa];
u8 ts_format[0x2];
u8 reserved_at_1c[0x4];
@@ -3451,20 +4226,65 @@ enum {
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC = 0x2,
SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3,
SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP = 0x4,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_RATE_LIMIT = 0x5,
};
enum {
- ELEMENT_TYPE_CAP_MASK_TASR = 1 << 0,
+ ELEMENT_TYPE_CAP_MASK_TSAR = 1 << 0,
ELEMENT_TYPE_CAP_MASK_VPORT = 1 << 1,
ELEMENT_TYPE_CAP_MASK_VPORT_TC = 1 << 2,
ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC = 1 << 3,
+ ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP = 1 << 4,
+ ELEMENT_TYPE_CAP_MASK_RATE_LIMIT = 1 << 5,
+};
+
+enum {
+ TSAR_ELEMENT_TSAR_TYPE_DWRR = 0x0,
+ TSAR_ELEMENT_TSAR_TYPE_ROUND_ROBIN = 0x1,
+ TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2,
+ TSAR_ELEMENT_TSAR_TYPE_TC_ARB = 0x3,
+};
+
+enum {
+ TSAR_TYPE_CAP_MASK_DWRR = 1 << 0,
+ TSAR_TYPE_CAP_MASK_ROUND_ROBIN = 1 << 1,
+ TSAR_TYPE_CAP_MASK_ETS = 1 << 2,
+ TSAR_TYPE_CAP_MASK_TC_ARB = 1 << 3,
+};
+
+struct mlx5_ifc_tsar_element_bits {
+ u8 traffic_class[0x4];
+ u8 reserved_at_4[0x4];
+ u8 tsar_type[0x8];
+ u8 reserved_at_10[0x10];
+};
+
+struct mlx5_ifc_vport_element_bits {
+ u8 reserved_at_0[0x4];
+ u8 eswitch_owner_vhca_id_valid[0x1];
+ u8 eswitch_owner_vhca_id[0xb];
+ u8 vport_number[0x10];
+};
+
+struct mlx5_ifc_vport_tc_element_bits {
+ u8 traffic_class[0x4];
+ u8 eswitch_owner_vhca_id_valid[0x1];
+ u8 eswitch_owner_vhca_id[0xb];
+ u8 vport_number[0x10];
+};
+
+union mlx5_ifc_element_attributes_bits {
+ struct mlx5_ifc_tsar_element_bits tsar;
+ struct mlx5_ifc_vport_element_bits vport;
+ struct mlx5_ifc_vport_tc_element_bits vport_tc;
+ u8 reserved_at_0[0x20];
};
struct mlx5_ifc_scheduling_context_bits {
u8 element_type[0x8];
u8 reserved_at_8[0x18];
- u8 element_attributes[0x20];
+ union mlx5_ifc_element_attributes_bits element_attributes;
u8 parent_element_id[0x20];
@@ -3474,7 +4294,9 @@ struct mlx5_ifc_scheduling_context_bits {
u8 max_average_bw[0x20];
- u8 reserved_at_e0[0x120];
+ u8 max_bw_obj_id[0x20];
+
+ u8 reserved_at_100[0x100];
};
struct mlx5_ifc_rqtc_bits {
@@ -3491,7 +4313,10 @@ struct mlx5_ifc_rqtc_bits {
u8 reserved_at_e0[0x6a0];
- struct mlx5_ifc_rq_num_bits rq_num[];
+ union {
+ DECLARE_FLEX_ARRAY(struct mlx5_ifc_rq_num_bits, rq_num);
+ DECLARE_FLEX_ARRAY(struct mlx5_ifc_rq_vhca_bits, rq_vhca);
+ };
};
enum {
@@ -3506,9 +4331,15 @@ enum {
};
enum {
- MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING = 0x0,
- MLX5_RQC_TIMESTAMP_FORMAT_DEFAULT = 0x1,
- MLX5_RQC_TIMESTAMP_FORMAT_REAL_TIME = 0x2,
+ MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_BYTE = 0x0,
+ MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_STRIDE = 0x1,
+ MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_PAGE = 0x2,
+};
+
+enum {
+ MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_NO_MATCH = 0x0,
+ MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED = 0x1,
+ MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_FIVE_TUPLE = 0x2,
};
struct mlx5_ifc_rqc_bits {
@@ -3543,7 +4374,13 @@ struct mlx5_ifc_rqc_bits {
u8 reserved_at_c0[0x10];
u8 hairpin_peer_vhca[0x10];
- u8 reserved_at_e0[0xa0];
+ u8 reserved_at_e0[0x46];
+ u8 shampo_no_match_alignment_granularity[0x2];
+ u8 reserved_at_128[0x6];
+ u8 shampo_match_criteria_type[0x2];
+ u8 reservation_timeout[0x10];
+
+ u8 reserved_at_140[0x40];
struct mlx5_ifc_wq_bits wq;
};
@@ -3566,6 +4403,11 @@ struct mlx5_ifc_rmpc_bits {
struct mlx5_ifc_wq_bits wq;
};
+enum {
+ VHCA_ID_TYPE_HW = 0,
+ VHCA_ID_TYPE_SW = 1,
+};
+
struct mlx5_ifc_nic_vport_context_bits {
u8 reserved_at_0[0x5];
u8 min_wqe_inline_mode[0x3];
@@ -3582,13 +4424,18 @@ struct mlx5_ifc_nic_vport_context_bits {
u8 event_on_mc_address_change[0x1];
u8 event_on_uc_address_change[0x1];
- u8 reserved_at_40[0xc];
-
+ u8 vhca_id_type[0x1];
+ u8 reserved_at_41[0xb];
u8 affiliation_criteria[0x4];
u8 affiliated_vhca_id[0x10];
- u8 reserved_at_60[0xd0];
+ u8 reserved_at_60[0xa0];
+
+ u8 reserved_at_100[0x1];
+ u8 sd_group[0x3];
+ u8 reserved_at_104[0x1c];
+ u8 reserved_at_120[0x10];
u8 mtu[0x10];
u8 system_image_guid[0x40];
@@ -3621,6 +4468,11 @@ enum {
MLX5_MKC_ACCESS_MODE_KSM = 0x3,
MLX5_MKC_ACCESS_MODE_SW_ICM = 0x4,
MLX5_MKC_ACCESS_MODE_MEMIC = 0x5,
+ MLX5_MKC_ACCESS_MODE_CROSSING = 0x6,
+};
+
+enum {
+ MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX = 0,
};
struct mlx5_ifc_mkc_bits {
@@ -3639,7 +4491,9 @@ struct mlx5_ifc_mkc_bits {
u8 lw[0x1];
u8 lr[0x1];
u8 access_mode_1_0[0x2];
- u8 reserved_at_18[0x8];
+ u8 reserved_at_18[0x2];
+ u8 ma_translation_mode[0x2];
+ u8 reserved_at_1c[0x4];
u8 qpn[0x18];
u8 mkey_7_0[0x8];
@@ -3661,16 +4515,22 @@ struct mlx5_ifc_mkc_bits {
u8 bsf_octword_size[0x20];
- u8 reserved_at_120[0x80];
+ u8 reserved_at_120[0x60];
+
+ u8 crossing_target_vhca_id[0x10];
+ u8 reserved_at_190[0x10];
u8 translations_octword_size[0x20];
u8 reserved_at_1c0[0x19];
u8 relaxed_ordering_read[0x1];
- u8 reserved_at_1d9[0x1];
- u8 log_page_size[0x5];
+ u8 log_page_size[0x6];
- u8 reserved_at_1e0[0x20];
+ u8 reserved_at_1e0[0x5];
+ u8 pcie_tph_en[0x1];
+ u8 pcie_tph_ph[0x2];
+ u8 pcie_tph_steering_tag_index[0x8];
+ u8 reserved_at_1f0[0x10];
};
struct mlx5_ifc_pkey_bits {
@@ -3691,7 +4551,8 @@ struct mlx5_ifc_hca_vport_context_bits {
u8 has_smi[0x1];
u8 has_raw[0x1];
u8 grh_required[0x1];
- u8 reserved_at_104[0xc];
+ u8 reserved_at_104[0x4];
+ u8 num_port_plane[0x8];
u8 port_physical_state[0x4];
u8 vport_state_policy[0x4];
u8 port_state[0x4];
@@ -3788,8 +4649,8 @@ struct mlx5_ifc_eqc_bits {
u8 reserved_at_80[0x20];
- u8 reserved_at_a0[0x18];
- u8 intr[0x8];
+ u8 reserved_at_a0[0x14];
+ u8 intr[0xc];
u8 reserved_at_c0[0x3];
u8 log_page_size[0x5];
@@ -3831,7 +4692,8 @@ struct mlx5_ifc_dctc_bits {
u8 state[0x4];
u8 reserved_at_8[0x18];
- u8 reserved_at_20[0x8];
+ u8 reserved_at_20[0x7];
+ u8 dp_ordering_force[0x1];
u8 user_index[0x18];
u8 reserved_at_40[0x8];
@@ -3846,7 +4708,9 @@ struct mlx5_ifc_dctc_bits {
u8 latency_sensitive[0x1];
u8 rlky[0x1];
u8 free_ar[0x1];
- u8 reserved_at_73[0xd];
+ u8 reserved_at_73[0x1];
+ u8 dp_ordering_1[0x1];
+ u8 reserved_at_75[0xb];
u8 reserved_at_80[0x8];
u8 cs_res[0x8];
@@ -3905,17 +4769,17 @@ enum {
MLX5_CQC_ST_FIRED = 0xa,
};
-enum {
+enum mlx5_cq_period_mode {
MLX5_CQ_PERIOD_MODE_START_FROM_EQE = 0x0,
MLX5_CQ_PERIOD_MODE_START_FROM_CQE = 0x1,
- MLX5_CQ_PERIOD_NUM_MODES
+ MLX5_CQ_PERIOD_NUM_MODES,
};
struct mlx5_ifc_cqc_bits {
u8 status[0x4];
u8 reserved_at_4[0x2];
u8 dbr_umem_valid[0x1];
- u8 apu_thread_cq[0x1];
+ u8 apu_cq[0x1];
u8 cqe_sz[0x3];
u8 cc[0x1];
u8 reserved_at_c[0x1];
@@ -3925,7 +4789,8 @@ struct mlx5_ifc_cqc_bits {
u8 cqe_comp_en[0x1];
u8 mini_cqe_res_format[0x2];
u8 st[0x4];
- u8 reserved_at_18[0x8];
+ u8 reserved_at_18[0x6];
+ u8 cqe_compression_layout[0x2];
u8 reserved_at_20[0x20];
@@ -3941,8 +4806,7 @@ struct mlx5_ifc_cqc_bits {
u8 cq_period[0xc];
u8 cq_max_count[0x10];
- u8 reserved_at_a0[0x18];
- u8 c_eqn[0x8];
+ u8 c_eqn_or_apu_element[0x20];
u8 reserved_at_c0[0x3];
u8 log_page_size[0x5];
@@ -3971,6 +4835,7 @@ union mlx5_ifc_cong_control_roce_ecn_auto_bits {
struct mlx5_ifc_cong_control_802_1qau_rp_bits cong_control_802_1qau_rp;
struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits cong_control_r_roce_ecn_rp;
struct mlx5_ifc_cong_control_r_roce_ecn_np_bits cong_control_r_roce_ecn_np;
+ struct mlx5_ifc_cong_control_r_roce_general_bits cong_control_r_roce_general;
u8 reserved_at_0[0x800];
};
@@ -4049,6 +4914,11 @@ union mlx5_ifc_field_select_802_1_r_roce_auto_bits {
u8 reserved_at_0[0x20];
};
+struct mlx5_ifc_rs_histogram_cntrs_bits {
+ u8 hist[16][0x40];
+ u8 reserved_at_400[0x2c0];
+};
+
union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout;
struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout;
@@ -4059,8 +4929,11 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
struct mlx5_ifc_eth_per_tc_prio_grp_data_layout_bits eth_per_tc_prio_grp_data_layout;
struct mlx5_ifc_eth_per_tc_congest_prio_grp_data_layout_bits eth_per_tc_congest_prio_grp_data_layout;
struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits ib_port_cntrs_grp_data_layout;
+ struct mlx5_ifc_ib_ext_port_cntrs_grp_data_layout_bits ib_ext_port_cntrs_grp_data_layout;
struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
struct mlx5_ifc_phys_layer_statistical_cntrs_bits phys_layer_statistical_cntrs;
+ struct mlx5_ifc_phys_layer_recovery_cntrs_bits phys_layer_recovery_cntrs;
+ struct mlx5_ifc_rs_histogram_cntrs_bits rs_histogram_cntrs;
u8 reserved_at_0[0x7c0];
};
@@ -4092,13 +4965,19 @@ struct mlx5_ifc_health_buffer_bits {
u8 assert_callra[0x20];
- u8 reserved_at_140[0x40];
+ u8 reserved_at_140[0x20];
+
+ u8 time[0x20];
u8 fw_version[0x20];
u8 hw_id[0x20];
- u8 reserved_at_1c0[0x20];
+ u8 rfr[0x1];
+ u8 reserved_at_1c1[0x3];
+ u8 valid[0x1];
+ u8 severity[0x3];
+ u8 reserved_at_1c8[0x18];
u8 irisc_index[0x8];
u8 synd[0x8];
@@ -4114,29 +4993,6 @@ struct mlx5_ifc_register_loopback_control_bits {
u8 reserved_at_20[0x60];
};
-struct mlx5_ifc_vport_tc_element_bits {
- u8 traffic_class[0x4];
- u8 reserved_at_4[0xc];
- u8 vport_number[0x10];
-};
-
-struct mlx5_ifc_vport_element_bits {
- u8 reserved_at_0[0x10];
- u8 vport_number[0x10];
-};
-
-enum {
- TSAR_ELEMENT_TSAR_TYPE_DWRR = 0x0,
- TSAR_ELEMENT_TSAR_TYPE_ROUND_ROBIN = 0x1,
- TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2,
-};
-
-struct mlx5_ifc_tsar_element_bits {
- u8 reserved_at_0[0x8];
- u8 tsar_type[0x8];
- u8 reserved_at_10[0x10];
-};
-
enum {
MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_SUCCESS = 0x0,
MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL = 0x1,
@@ -4308,7 +5164,10 @@ struct mlx5_ifc_set_l2_table_entry_in_bits {
u8 reserved_at_c0[0x20];
- u8 reserved_at_e0[0x13];
+ u8 reserved_at_e0[0x10];
+ u8 silent_mode_valid[0x1];
+ u8 silent_mode[0x1];
+ u8 reserved_at_f2[0x1];
u8 vlan_valid[0x1];
u8 vlan[0xc];
@@ -4356,7 +5215,10 @@ struct mlx5_ifc_set_hca_cap_in_bits {
u8 op_mod[0x10];
u8 other_function[0x1];
- u8 reserved_at_41[0xf];
+ u8 ec_vf_function[0x1];
+ u8 reserved_at_42[0x1];
+ u8 function_id_type[0x1];
+ u8 reserved_at_44[0xc];
u8 function_id[0x10];
u8 reserved_at_60[0x20];
@@ -4389,13 +5251,15 @@ struct mlx5_ifc_set_fte_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
+ u8 other_eswitch[0x1];
+ u8 reserved_at_42[0xe];
u8 vport_number[0x10];
u8 reserved_at_60[0x20];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x8];
+ u8 eswitch_owner_vhca_id[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
@@ -4413,6 +5277,16 @@ struct mlx5_ifc_set_fte_in_bits {
struct mlx5_ifc_flow_context_bits flow_context;
};
+struct mlx5_ifc_dest_format_bits {
+ u8 destination_type[0x8];
+ u8 destination_id[0x18];
+
+ u8 destination_eswitch_owner_vhca_id_valid[0x1];
+ u8 packet_reformat[0x1];
+ u8 reserved_at_22[0xe];
+ u8 destination_eswitch_owner_vhca_id[0x10];
+};
+
struct mlx5_ifc_rts2rts_qp_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -4576,6 +5450,36 @@ struct mlx5_ifc_query_vport_state_out_bits {
u8 state[0x4];
};
+struct mlx5_ifc_array1024_auto_bits {
+ u8 array1024_auto[32][0x20];
+};
+
+struct mlx5_ifc_query_vuid_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x40];
+
+ u8 query_vfs_vuid[0x1];
+ u8 data_direct[0x1];
+ u8 reserved_at_62[0xe];
+ u8 vhca_id[0x10];
+};
+
+struct mlx5_ifc_query_vuid_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x1a0];
+
+ u8 reserved_at_1e0[0x10];
+ u8 num_of_entries[0x10];
+
+ struct mlx5_ifc_array1024_auto_bits vuid[];
+};
+
enum {
MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT = 0x0,
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1,
@@ -4734,7 +5638,9 @@ struct mlx5_ifc_query_vport_counter_out_bits {
struct mlx5_ifc_traffic_counter_bits transmitted_eth_multicast;
- u8 reserved_at_680[0xa00];
+ struct mlx5_ifc_traffic_counter_bits local_loopback;
+
+ u8 reserved_at_700[0x980];
};
enum {
@@ -4873,7 +5779,11 @@ struct mlx5_ifc_query_special_contexts_out_bits {
u8 null_mkey[0x20];
- u8 reserved_at_a0[0x60];
+ u8 terminate_scatter_list_mkey[0x20];
+
+ u8 repeated_mkey[0x20];
+
+ u8 reserved_at_a0[0x20];
};
struct mlx5_ifc_query_special_contexts_in_bits {
@@ -5017,24 +5927,54 @@ struct mlx5_ifc_query_rmp_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_cqe_error_syndrome_bits {
+ u8 hw_error_syndrome[0x8];
+ u8 hw_syndrome_type[0x4];
+ u8 reserved_at_c[0x4];
+ u8 vendor_error_syndrome[0x8];
+ u8 syndrome[0x8];
+};
+
+struct mlx5_ifc_qp_context_extension_bits {
+ u8 reserved_at_0[0x60];
+
+ struct mlx5_ifc_cqe_error_syndrome_bits error_syndrome;
+
+ u8 reserved_at_80[0x580];
+};
+
+struct mlx5_ifc_qpc_extension_and_pas_list_in_bits {
+ struct mlx5_ifc_qp_context_extension_bits qpc_data_extension;
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_qp_pas_list_in_bits {
+ struct mlx5_ifc_cmd_pas_bits pas[0];
+};
+
+union mlx5_ifc_qp_pas_or_qpc_ext_and_pas_bits {
+ struct mlx5_ifc_qp_pas_list_in_bits qp_pas_list;
+ struct mlx5_ifc_qpc_extension_and_pas_list_in_bits qpc_ext_and_pas_list;
+};
+
struct mlx5_ifc_query_qp_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_at_40[0x20];
- u8 ece[0x20];
+ u8 reserved_at_40[0x40];
u8 opt_param_mask[0x20];
- u8 reserved_at_a0[0x20];
+ u8 ece[0x20];
struct mlx5_ifc_qpc_bits qpc;
u8 reserved_at_800[0x80];
- u8 pas[][0x40];
+ union mlx5_ifc_qp_pas_or_qpc_ext_and_pas_bits qp_pas_or_qpc_ext_and_pas;
};
struct mlx5_ifc_query_qp_in_bits {
@@ -5044,7 +5984,8 @@ struct mlx5_ifc_query_qp_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x8];
+ u8 qpc_ext[0x1];
+ u8 reserved_at_41[0x7];
u8 qpn[0x18];
u8 reserved_at_60[0x20];
@@ -5100,7 +6041,11 @@ struct mlx5_ifc_query_q_counter_out_bits {
u8 local_ack_timeout_err[0x20];
- u8 reserved_at_320[0xa0];
+ u8 reserved_at_320[0x60];
+
+ u8 req_rnr_retries_exceeded[0x20];
+
+ u8 reserved_at_3a0[0x20];
u8 resp_local_length_error[0x20];
@@ -5162,10 +6107,15 @@ struct mlx5_ifc_query_q_counter_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x80];
+ u8 other_vport[0x1];
+ u8 reserved_at_41[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_at_60[0x60];
u8 clear[0x1];
- u8 reserved_at_c1[0x1f];
+ u8 aggregate[0x1];
+ u8 reserved_at_c2[0x1e];
u8 reserved_at_e0[0x18];
u8 counter_set_id[0x8];
@@ -5467,7 +6417,10 @@ struct mlx5_ifc_query_hca_cap_in_bits {
u8 op_mod[0x10];
u8 other_function[0x1];
- u8 reserved_at_41[0xf];
+ u8 ec_vf_function[0x1];
+ u8 reserved_at_42[0x1];
+ u8 function_id_type[0x1];
+ u8 reserved_at_44[0xc];
u8 function_id[0x10];
u8 reserved_at_60[0x20];
@@ -5525,6 +6478,20 @@ struct mlx5_ifc_modify_other_hca_cap_in_bits {
struct mlx5_ifc_other_hca_cap_bits other_capability;
};
+struct mlx5_ifc_sw_owner_icm_root_params_bits {
+ u8 sw_owner_icm_root_1[0x40];
+
+ u8 sw_owner_icm_root_0[0x40];
+};
+
+struct mlx5_ifc_rtc_params_bits {
+ u8 rtc_id_0[0x20];
+
+ u8 rtc_id_1[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
struct mlx5_ifc_flow_table_context_bits {
u8 reformat_en[0x1];
u8 decap_en[0x1];
@@ -5532,7 +6499,8 @@ struct mlx5_ifc_flow_table_context_bits {
u8 termination_table[0x1];
u8 table_miss_action[0x4];
u8 level[0x8];
- u8 reserved_at_10[0x8];
+ u8 rtc_valid[0x1];
+ u8 reserved_at_11[0x7];
u8 log_size[0x8];
u8 reserved_at_20[0x8];
@@ -5543,10 +6511,10 @@ struct mlx5_ifc_flow_table_context_bits {
u8 reserved_at_60[0x60];
- u8 sw_owner_icm_root_1[0x40];
-
- u8 sw_owner_icm_root_0[0x40];
-
+ union {
+ struct mlx5_ifc_sw_owner_icm_root_params_bits sws;
+ struct mlx5_ifc_rtc_params_bits hws;
+ };
};
struct mlx5_ifc_query_flow_table_out_bits {
@@ -5611,6 +6579,367 @@ struct mlx5_ifc_query_fte_in_bits {
u8 reserved_at_120[0xe0];
};
+struct mlx5_ifc_match_definer_format_0_bits {
+ u8 reserved_at_0[0x100];
+
+ u8 metadata_reg_c_0[0x20];
+
+ u8 metadata_reg_c_1[0x20];
+
+ u8 outer_dmac_47_16[0x20];
+
+ u8 outer_dmac_15_0[0x10];
+ u8 outer_ethertype[0x10];
+
+ u8 reserved_at_180[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_lb[0x1];
+ u8 outer_ip_frag[0x1];
+ u8 outer_qp_type[0x2];
+ u8 outer_encap_type[0x2];
+ u8 port_number[0x2];
+ u8 outer_l3_type[0x2];
+ u8 outer_l4_type[0x2];
+ u8 outer_first_vlan_type[0x2];
+ u8 outer_first_vlan_prio[0x3];
+ u8 outer_first_vlan_cfi[0x1];
+ u8 outer_first_vlan_vid[0xc];
+
+ u8 outer_l4_type_ext[0x4];
+ u8 reserved_at_1a4[0x2];
+ u8 outer_ipsec_layer[0x2];
+ u8 outer_l2_type[0x2];
+ u8 force_lb[0x1];
+ u8 outer_l2_ok[0x1];
+ u8 outer_l3_ok[0x1];
+ u8 outer_l4_ok[0x1];
+ u8 outer_second_vlan_type[0x2];
+ u8 outer_second_vlan_prio[0x3];
+ u8 outer_second_vlan_cfi[0x1];
+ u8 outer_second_vlan_vid[0xc];
+
+ u8 outer_smac_47_16[0x20];
+
+ u8 outer_smac_15_0[0x10];
+ u8 inner_ipv4_checksum_ok[0x1];
+ u8 inner_l4_checksum_ok[0x1];
+ u8 outer_ipv4_checksum_ok[0x1];
+ u8 outer_l4_checksum_ok[0x1];
+ u8 inner_l3_ok[0x1];
+ u8 inner_l4_ok[0x1];
+ u8 outer_l3_ok_duplicate[0x1];
+ u8 outer_l4_ok_duplicate[0x1];
+ u8 outer_tcp_cwr[0x1];
+ u8 outer_tcp_ece[0x1];
+ u8 outer_tcp_urg[0x1];
+ u8 outer_tcp_ack[0x1];
+ u8 outer_tcp_psh[0x1];
+ u8 outer_tcp_rst[0x1];
+ u8 outer_tcp_syn[0x1];
+ u8 outer_tcp_fin[0x1];
+};
+
+struct mlx5_ifc_match_definer_format_22_bits {
+ u8 reserved_at_0[0x100];
+
+ u8 outer_ip_src_addr[0x20];
+
+ u8 outer_ip_dest_addr[0x20];
+
+ u8 outer_l4_sport[0x10];
+ u8 outer_l4_dport[0x10];
+
+ u8 reserved_at_160[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_lb[0x1];
+ u8 outer_ip_frag[0x1];
+ u8 outer_qp_type[0x2];
+ u8 outer_encap_type[0x2];
+ u8 port_number[0x2];
+ u8 outer_l3_type[0x2];
+ u8 outer_l4_type[0x2];
+ u8 outer_first_vlan_type[0x2];
+ u8 outer_first_vlan_prio[0x3];
+ u8 outer_first_vlan_cfi[0x1];
+ u8 outer_first_vlan_vid[0xc];
+
+ u8 metadata_reg_c_0[0x20];
+
+ u8 outer_dmac_47_16[0x20];
+
+ u8 outer_smac_47_16[0x20];
+
+ u8 outer_smac_15_0[0x10];
+ u8 outer_dmac_15_0[0x10];
+};
+
+struct mlx5_ifc_match_definer_format_23_bits {
+ u8 reserved_at_0[0x100];
+
+ u8 inner_ip_src_addr[0x20];
+
+ u8 inner_ip_dest_addr[0x20];
+
+ u8 inner_l4_sport[0x10];
+ u8 inner_l4_dport[0x10];
+
+ u8 reserved_at_160[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_lb[0x1];
+ u8 inner_ip_frag[0x1];
+ u8 inner_qp_type[0x2];
+ u8 inner_encap_type[0x2];
+ u8 port_number[0x2];
+ u8 inner_l3_type[0x2];
+ u8 inner_l4_type[0x2];
+ u8 inner_first_vlan_type[0x2];
+ u8 inner_first_vlan_prio[0x3];
+ u8 inner_first_vlan_cfi[0x1];
+ u8 inner_first_vlan_vid[0xc];
+
+ u8 tunnel_header_0[0x20];
+
+ u8 inner_dmac_47_16[0x20];
+
+ u8 inner_smac_47_16[0x20];
+
+ u8 inner_smac_15_0[0x10];
+ u8 inner_dmac_15_0[0x10];
+};
+
+struct mlx5_ifc_match_definer_format_29_bits {
+ u8 reserved_at_0[0xc0];
+
+ u8 outer_ip_dest_addr[0x80];
+
+ u8 outer_ip_src_addr[0x80];
+
+ u8 outer_l4_sport[0x10];
+ u8 outer_l4_dport[0x10];
+
+ u8 reserved_at_1e0[0x20];
+};
+
+struct mlx5_ifc_match_definer_format_30_bits {
+ u8 reserved_at_0[0xa0];
+
+ u8 outer_ip_dest_addr[0x80];
+
+ u8 outer_ip_src_addr[0x80];
+
+ u8 outer_dmac_47_16[0x20];
+
+ u8 outer_smac_47_16[0x20];
+
+ u8 outer_smac_15_0[0x10];
+ u8 outer_dmac_15_0[0x10];
+};
+
+struct mlx5_ifc_match_definer_format_31_bits {
+ u8 reserved_at_0[0xc0];
+
+ u8 inner_ip_dest_addr[0x80];
+
+ u8 inner_ip_src_addr[0x80];
+
+ u8 inner_l4_sport[0x10];
+ u8 inner_l4_dport[0x10];
+
+ u8 reserved_at_1e0[0x20];
+};
+
+struct mlx5_ifc_match_definer_format_32_bits {
+ u8 reserved_at_0[0xa0];
+
+ u8 inner_ip_dest_addr[0x80];
+
+ u8 inner_ip_src_addr[0x80];
+
+ u8 inner_dmac_47_16[0x20];
+
+ u8 inner_smac_47_16[0x20];
+
+ u8 inner_smac_15_0[0x10];
+ u8 inner_dmac_15_0[0x10];
+};
+
+enum {
+ MLX5_IFC_DEFINER_FORMAT_ID_SELECT = 61,
+};
+
+#define MLX5_IFC_DEFINER_FORMAT_OFFSET_UNUSED 0x0
+#define MLX5_IFC_DEFINER_FORMAT_OFFSET_OUTER_ETH_PKT_LEN 0x48
+#define MLX5_IFC_DEFINER_DW_SELECTORS_NUM 9
+#define MLX5_IFC_DEFINER_BYTE_SELECTORS_NUM 8
+
+struct mlx5_ifc_match_definer_match_mask_bits {
+ u8 reserved_at_1c0[5][0x20];
+ u8 match_dw_8[0x20];
+ u8 match_dw_7[0x20];
+ u8 match_dw_6[0x20];
+ u8 match_dw_5[0x20];
+ u8 match_dw_4[0x20];
+ u8 match_dw_3[0x20];
+ u8 match_dw_2[0x20];
+ u8 match_dw_1[0x20];
+ u8 match_dw_0[0x20];
+
+ u8 match_byte_7[0x8];
+ u8 match_byte_6[0x8];
+ u8 match_byte_5[0x8];
+ u8 match_byte_4[0x8];
+
+ u8 match_byte_3[0x8];
+ u8 match_byte_2[0x8];
+ u8 match_byte_1[0x8];
+ u8 match_byte_0[0x8];
+};
+
+struct mlx5_ifc_match_definer_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x40];
+
+ u8 reserved_at_80[0x10];
+ u8 format_id[0x10];
+
+ u8 reserved_at_a0[0x60];
+
+ u8 format_select_dw3[0x8];
+ u8 format_select_dw2[0x8];
+ u8 format_select_dw1[0x8];
+ u8 format_select_dw0[0x8];
+
+ u8 format_select_dw7[0x8];
+ u8 format_select_dw6[0x8];
+ u8 format_select_dw5[0x8];
+ u8 format_select_dw4[0x8];
+
+ u8 reserved_at_100[0x18];
+ u8 format_select_dw8[0x8];
+
+ u8 reserved_at_120[0x20];
+
+ u8 format_select_byte3[0x8];
+ u8 format_select_byte2[0x8];
+ u8 format_select_byte1[0x8];
+ u8 format_select_byte0[0x8];
+
+ u8 format_select_byte7[0x8];
+ u8 format_select_byte6[0x8];
+ u8 format_select_byte5[0x8];
+ u8 format_select_byte4[0x8];
+
+ u8 reserved_at_180[0x40];
+
+ union {
+ struct {
+ u8 match_mask[16][0x20];
+ };
+ struct mlx5_ifc_match_definer_match_mask_bits match_mask_format;
+ };
+};
+
+struct mlx5_ifc_general_obj_create_param_bits {
+ u8 alias_object[0x1];
+ u8 reserved_at_1[0x2];
+ u8 log_obj_range[0x5];
+ u8 reserved_at_8[0x18];
+};
+
+struct mlx5_ifc_general_obj_query_param_bits {
+ u8 alias_object[0x1];
+ u8 obj_offset[0x1f];
+};
+
+struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 vhca_tunnel_id[0x10];
+ u8 obj_type[0x10];
+
+ u8 obj_id[0x20];
+
+ union {
+ struct mlx5_ifc_general_obj_create_param_bits create;
+ struct mlx5_ifc_general_obj_query_param_bits query;
+ } op_param;
+};
+
+struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 obj_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_allow_other_vhca_access_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0x50];
+ u8 object_type_to_be_accessed[0x10];
+ u8 object_id_to_be_accessed[0x20];
+ u8 reserved_at_c0[0x40];
+ union {
+ u8 access_key_raw[0x100];
+ u8 access_key[8][0x20];
+ };
+};
+
+struct mlx5_ifc_allow_other_vhca_access_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_modify_header_arg_bits {
+ u8 reserved_at_0[0x80];
+
+ u8 reserved_at_80[0x8];
+ u8 access_pd[0x18];
+};
+
+struct mlx5_ifc_create_modify_header_arg_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_modify_header_arg_bits arg;
+};
+
+struct mlx5_ifc_create_match_definer_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+
+ struct mlx5_ifc_match_definer_bits obj_context;
+};
+
+struct mlx5_ifc_create_match_definer_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+};
+
+struct mlx5_ifc_alias_context_bits {
+ u8 vhca_id_to_be_accessed[0x10];
+ u8 reserved_at_10[0xd];
+ u8 status[0x3];
+ u8 object_id_to_be_accessed[0x20];
+ u8 reserved_at_40[0x40];
+ union {
+ u8 access_key_raw[0x100];
+ u8 access_key[8][0x20];
+ };
+ u8 metadata[0x80];
+};
+
+struct mlx5_ifc_create_alias_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_alias_context_bits alias_ctx;
+};
+
enum {
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
@@ -5618,6 +6947,7 @@ enum {
MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3,
MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_3 = 0x4,
MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_4 = 0x5,
+ MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_5 = 0x6,
};
struct mlx5_ifc_query_flow_group_out_bits {
@@ -5716,6 +7046,28 @@ struct mlx5_ifc_query_esw_vport_context_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_destroy_esw_vport_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x20];
+};
+
+struct mlx5_ifc_destroy_esw_vport_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 vport_num[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
struct mlx5_ifc_modify_esw_vport_context_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -5783,12 +7135,14 @@ struct mlx5_ifc_query_eq_in_bits {
};
struct mlx5_ifc_packet_reformat_context_in_bits {
- u8 reserved_at_0[0x5];
- u8 reformat_type[0x3];
- u8 reserved_at_8[0xe];
+ u8 reformat_type[0x8];
+ u8 reserved_at_8[0x4];
+ u8 reformat_param_0[0x4];
+ u8 reserved_at_10[0x6];
u8 reformat_data_size[0xa];
- u8 reserved_at_20[0x10];
+ u8 reformat_param_1[0x8];
+ u8 reserved_at_28[0x8];
u8 reformat_data[2][0x8];
u8 more_reformat_data[][0x8];
@@ -5828,12 +7182,33 @@ struct mlx5_ifc_alloc_packet_reformat_context_out_bits {
u8 reserved_at_60[0x20];
};
+enum {
+ MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START = 0x1,
+ MLX5_REFORMAT_CONTEXT_ANCHOR_VLAN_START = 0x2,
+ MLX5_REFORMAT_CONTEXT_ANCHOR_IP_START = 0x7,
+ MLX5_REFORMAT_CONTEXT_ANCHOR_TCP_UDP_START = 0x9,
+};
+
enum mlx5_reformat_ctx_type {
MLX5_REFORMAT_TYPE_L2_TO_VXLAN = 0x0,
MLX5_REFORMAT_TYPE_L2_TO_NVGRE = 0x1,
MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2,
MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2 = 0x3,
MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x4,
+ MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4 = 0x5,
+ MLX5_REFORMAT_TYPE_L2_TO_L3_ESP_TUNNEL = 0x6,
+ MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4 = 0x7,
+ MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT = 0x8,
+ MLX5_REFORMAT_TYPE_L3_ESP_TUNNEL_TO_L2 = 0x9,
+ MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP = 0xa,
+ MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6 = 0xb,
+ MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV6 = 0xc,
+ MLX5_REFORMAT_TYPE_ADD_PSP_TUNNEL = 0xd,
+ MLX5_REFORMAT_TYPE_DEL_PSP_TUNNEL = 0xe,
+ MLX5_REFORMAT_TYPE_INSERT_HDR = 0xf,
+ MLX5_REFORMAT_TYPE_REMOVE_HDR = 0x10,
+ MLX5_REFORMAT_TYPE_ADD_MACSEC = 0x11,
+ MLX5_REFORMAT_TYPE_DEL_MACSEC = 0x12,
};
struct mlx5_ifc_alloc_packet_reformat_context_in_bits {
@@ -5954,6 +7329,9 @@ enum {
MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM = 0x59,
MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM = 0x5B,
MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME = 0x5D,
+ MLX5_ACTION_IN_FIELD_OUT_EMD_47_32 = 0x6F,
+ MLX5_ACTION_IN_FIELD_OUT_EMD_31_0 = 0x70,
+ MLX5_ACTION_IN_FIELD_PSP_SYNDROME = 0x71,
};
struct mlx5_ifc_alloc_modify_header_context_out_bits {
@@ -6194,6 +7572,85 @@ struct mlx5_ifc_query_adapter_in_bits {
u8 reserved_at_40[0x40];
};
+struct mlx5_ifc_function_vhca_rid_info_reg_bits {
+ u8 host_number[0x8];
+ u8 host_pci_device_function[0x8];
+ u8 host_pci_bus[0x8];
+ u8 reserved_at_18[0x3];
+ u8 pci_bus_assigned[0x1];
+ u8 function_type[0x4];
+
+ u8 parent_pci_device_function[0x8];
+ u8 parent_pci_bus[0x8];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 function_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_delegated_function_vhca_rid_info_bits {
+ struct mlx5_ifc_function_vhca_rid_info_reg_bits function_vhca_rid_info;
+
+ u8 reserved_at_80[0x18];
+ u8 manage_profile[0x8];
+
+ u8 reserved_at_a0[0x60];
+};
+
+struct mlx5_ifc_query_delegated_vhca_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x10];
+ u8 functions_count[0x10];
+
+ u8 reserved_at_80[0x80];
+
+ struct mlx5_ifc_delegated_function_vhca_rid_info_bits
+ delegated_function_vhca_rid_info[];
+};
+
+struct mlx5_ifc_query_delegated_vhca_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_create_esw_vport_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x10];
+ u8 vport_num[0x10];
+};
+
+struct mlx5_ifc_create_esw_vport_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 managed_vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
struct mlx5_ifc_qp_2rst_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -6238,6 +7695,30 @@ struct mlx5_ifc_qp_2err_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_trans_page_fault_info_bits {
+ u8 error[0x1];
+ u8 reserved_at_1[0x4];
+ u8 page_fault_type[0x3];
+ u8 wq_number[0x18];
+
+ u8 reserved_at_20[0x8];
+ u8 fault_token[0x18];
+};
+
+struct mlx5_ifc_mem_page_fault_info_bits {
+ u8 error[0x1];
+ u8 reserved_at_1[0xf];
+ u8 fault_token_47_32[0x10];
+
+ u8 fault_token_31_0[0x20];
+};
+
+union mlx5_ifc_page_fault_resume_in_page_fault_info_auto_bits {
+ struct mlx5_ifc_trans_page_fault_info_bits trans_page_fault_info;
+ struct mlx5_ifc_mem_page_fault_info_bits mem_page_fault_info;
+ u8 reserved_at_0[0x40];
+};
+
struct mlx5_ifc_page_fault_resume_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -6254,13 +7735,8 @@ struct mlx5_ifc_page_fault_resume_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 error[0x1];
- u8 reserved_at_41[0x4];
- u8 page_fault_type[0x3];
- u8 wq_number[0x18];
-
- u8 reserved_at_60[0x8];
- u8 token[0x18];
+ union mlx5_ifc_page_fault_resume_in_page_fault_info_auto_bits
+ page_fault_info;
};
struct mlx5_ifc_nop_out_bits {
@@ -6302,7 +7778,12 @@ struct mlx5_ifc_modify_vport_state_in_bits {
u8 reserved_at_41[0xf];
u8 vport_number[0x10];
- u8 reserved_at_60[0x18];
+ u8 reserved_at_60[0x10];
+ u8 ingress_connect[0x1];
+ u8 egress_connect[0x1];
+ u8 ingress_connect_valid[0x1];
+ u8 egress_connect_valid[0x1];
+ u8 reserved_at_74[0x4];
u8 admin_state[0x4];
u8 reserved_at_7c[0x4];
};
@@ -6352,7 +7833,7 @@ struct mlx5_ifc_modify_tir_bitmask_bits {
u8 reserved_at_3c[0x1];
u8 hash[0x1];
u8 reserved_at_3e[0x1];
- u8 lro[0x1];
+ u8 packet_merge[0x1];
};
struct mlx5_ifc_modify_tir_out_bits {
@@ -6769,7 +8250,7 @@ struct mlx5_ifc_mad_ifc_in_bits {
u8 op_mod[0x10];
u8 remote_lid[0x10];
- u8 reserved_at_50[0x8];
+ u8 plane_index[0x8];
u8 port[0x8];
u8 reserved_at_60[0x20];
@@ -6793,7 +8274,12 @@ struct mlx5_ifc_init_hca_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x40];
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x2];
+ u8 sw_vhca_id[0xe];
+ u8 reserved_at_70[0x10];
+
u8 sw_owner_id[4][0x20];
};
@@ -7325,13 +8811,15 @@ struct mlx5_ifc_destroy_flow_table_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
+ u8 other_eswitch[0x1];
+ u8 reserved_at_42[0xe];
u8 vport_number[0x10];
u8 reserved_at_60[0x20];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x8];
+ u8 eswitch_owner_vhca_id[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
@@ -7356,13 +8844,15 @@ struct mlx5_ifc_destroy_flow_group_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
+ u8 other_eswitch[0x1];
+ u8 reserved_at_42[0xe];
u8 vport_number[0x10];
u8 reserved_at_60[0x20];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x8];
+ u8 eswitch_owner_vhca_id[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
@@ -7501,13 +8991,15 @@ struct mlx5_ifc_delete_fte_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
+ u8 other_eswitch[0x1];
+ u8 reserved_at_42[0xe];
u8 vport_number[0x10];
u8 reserved_at_60[0x20];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x8];
+ u8 eswitch_owner_vhca_id[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
@@ -7552,7 +9044,7 @@ struct mlx5_ifc_dealloc_uar_out_bits {
struct mlx5_ifc_dealloc_uar_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -7928,7 +9420,8 @@ struct mlx5_ifc_create_qp_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x8];
+ u8 qpc_ext[0x1];
+ u8 reserved_at_41[0x7];
u8 input_qpn[0x18];
u8 reserved_at_60[0x20];
@@ -7938,7 +9431,9 @@ struct mlx5_ifc_create_qp_in_bits {
struct mlx5_ifc_qpc_bits qpc;
- u8 reserved_at_800[0x60];
+ u8 wq_umem_offset[0x40];
+
+ u8 wq_umem_id[0x20];
u8 wq_umem_valid[0x1];
u8 reserved_at_861[0x1f];
@@ -8004,7 +9499,8 @@ struct mlx5_ifc_create_mkey_in_bits {
u8 pg_access[0x1];
u8 mkey_umem_valid[0x1];
- u8 reserved_at_62[0x1e];
+ u8 data_direct[0x1];
+ u8 reserved_at_63[0x1d];
struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
@@ -8041,19 +9537,21 @@ struct mlx5_ifc_create_flow_table_out_bits {
struct mlx5_ifc_create_flow_table_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
+ u8 other_eswitch[0x1];
+ u8 reserved_at_42[0xe];
u8 vport_number[0x10];
u8 reserved_at_60[0x20];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x8];
+ u8 eswitch_owner_vhca_id[0x10];
u8 reserved_at_a0[0x20];
@@ -8073,6 +9571,11 @@ struct mlx5_ifc_create_flow_group_out_bits {
};
enum {
+ MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_TCAM_SUBTABLE = 0x0,
+ MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_HASH_SPLIT = 0x1,
+};
+
+enum {
MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
@@ -8087,13 +9590,16 @@ struct mlx5_ifc_create_flow_group_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
+ u8 other_eswitch[0x1];
+ u8 reserved_at_42[0xe];
u8 vport_number[0x10];
u8 reserved_at_60[0x20];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x4];
+ u8 group_type[0x4];
+ u8 eswitch_owner_vhca_id[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
@@ -8108,7 +9614,10 @@ struct mlx5_ifc_create_flow_group_in_bits {
u8 end_flow_index[0x20];
- u8 reserved_at_140[0xa0];
+ u8 reserved_at_140[0x10];
+ u8 match_definer_id[0x10];
+
+ u8 reserved_at_160[0x80];
u8 reserved_at_1e0[0x18];
u8 match_criteria_enable[0x8];
@@ -8399,7 +9908,7 @@ struct mlx5_ifc_alloc_uar_out_bits {
struct mlx5_ifc_alloc_uar_in_bits {
u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
@@ -8491,7 +10000,8 @@ struct mlx5_ifc_alloc_flow_counter_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_at_40[0x38];
+ u8 reserved_at_40[0x33];
+ u8 flow_counter_bulk_log_size[0x5];
u8 flow_counter_bulk[0x8];
};
@@ -8675,13 +10185,19 @@ struct mlx5_ifc_pude_reg_bits {
u8 reserved_at_20[0x60];
};
+enum {
+ MLX5_PTYS_CONNECTOR_TYPE_PORT_DA = 0x7,
+};
+
struct mlx5_ifc_ptys_reg_bits {
u8 reserved_at_0[0x1];
u8 an_disable_admin[0x1];
u8 an_disable_cap[0x1];
u8 reserved_at_3[0x5];
u8 local_port[0x8];
- u8 reserved_at_10[0xd];
+ u8 reserved_at_10[0x8];
+ u8 plane_ind[0x4];
+ u8 reserved_at_1c[0x1];
u8 proto_mask[0x3];
u8 an_status[0x4];
@@ -8709,7 +10225,8 @@ struct mlx5_ifc_ptys_reg_bits {
u8 ib_link_width_oper[0x10];
u8 ib_proto_oper[0x10];
- u8 reserved_at_160[0x1c];
+ u8 reserved_at_160[0x8];
+ u8 lane_rate_oper[0x14];
u8 connector_type[0x4];
u8 eth_proto_lp_advertise[0x20];
@@ -8882,7 +10399,35 @@ struct mlx5_ifc_pplm_reg_bits {
u8 fec_override_admin_100g_2x[0x10];
u8 fec_override_admin_50g_1x[0x10];
- u8 reserved_at_140[0x140];
+ u8 fec_override_cap_800g_8x[0x10];
+ u8 fec_override_cap_400g_4x[0x10];
+
+ u8 fec_override_cap_200g_2x[0x10];
+ u8 fec_override_cap_100g_1x[0x10];
+
+ u8 reserved_at_180[0xa0];
+
+ u8 fec_override_admin_800g_8x[0x10];
+ u8 fec_override_admin_400g_4x[0x10];
+
+ u8 fec_override_admin_200g_2x[0x10];
+ u8 fec_override_admin_100g_1x[0x10];
+
+ u8 reserved_at_260[0x60];
+
+ u8 fec_override_cap_1600g_8x[0x10];
+ u8 fec_override_cap_800g_4x[0x10];
+
+ u8 fec_override_cap_400g_2x[0x10];
+ u8 fec_override_cap_200g_1x[0x10];
+
+ u8 fec_override_admin_1600g_8x[0x10];
+ u8 fec_override_admin_800g_4x[0x10];
+
+ u8 fec_override_admin_400g_2x[0x10];
+ u8 fec_override_admin_200g_1x[0x10];
+
+ u8 reserved_at_340[0x80];
};
struct mlx5_ifc_ppcnt_reg_bits {
@@ -8893,8 +10438,10 @@ struct mlx5_ifc_ppcnt_reg_bits {
u8 grp[0x6];
u8 clr[0x1];
- u8 reserved_at_21[0x1c];
- u8 prio_tc[0x3];
+ u8 reserved_at_21[0x13];
+ u8 plane_ind[0x4];
+ u8 reserved_at_38[0x3];
+ u8 prio_tc[0x5];
union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
};
@@ -9123,10 +10670,19 @@ struct mlx5_ifc_pifr_reg_bits {
u8 port_filter_update_en[8][0x20];
};
+enum {
+ MLX5_BUF_OWNERSHIP_UNKNOWN = 0x0,
+ MLX5_BUF_OWNERSHIP_FW_OWNED = 0x1,
+ MLX5_BUF_OWNERSHIP_SW_OWNED = 0x2,
+};
+
struct mlx5_ifc_pfcc_reg_bits {
- u8 reserved_at_0[0x8];
+ u8 reserved_at_0[0x4];
+ u8 buf_ownership[0x2];
+ u8 reserved_at_6[0x2];
u8 local_port[0x8];
- u8 reserved_at_10[0xb];
+ u8 reserved_at_10[0xa];
+ u8 cable_length_mask[0x1];
u8 ppan_mask_n[0x1];
u8 minor_stall_mask[0x1];
u8 critical_stall_mask[0x1];
@@ -9155,7 +10711,10 @@ struct mlx5_ifc_pfcc_reg_bits {
u8 device_stall_minor_watermark[0x10];
u8 device_stall_critical_watermark[0x10];
- u8 reserved_at_a0[0x60];
+ u8 reserved_at_a0[0x18];
+ u8 cable_length[0x8];
+
+ u8 reserved_at_c0[0x40];
};
struct mlx5_ifc_pelc_reg_bits {
@@ -9208,6 +10767,24 @@ struct mlx5_ifc_mpegc_reg_bits {
u8 reserved_at_60[0x100];
};
+struct mlx5_ifc_mpir_reg_bits {
+ u8 sdm[0x1];
+ u8 reserved_at_1[0x1b];
+ u8 host_buses[0x4];
+
+ u8 reserved_at_20[0x20];
+
+ u8 local_port[0x8];
+ u8 reserved_at_28[0x18];
+
+ u8 reserved_at_60[0x20];
+};
+
+enum {
+ MLX5_MTUTC_FREQ_ADJ_UNITS_PPB = 0x0,
+ MLX5_MTUTC_FREQ_ADJ_UNITS_SCALED_PPM = 0x1,
+};
+
enum {
MLX5_MTUTC_OPERATION_SET_TIME_IMMEDIATE = 0x1,
MLX5_MTUTC_OPERATION_ADJUST_TIME = 0x2,
@@ -9215,7 +10792,12 @@ enum {
};
struct mlx5_ifc_mtutc_reg_bits {
- u8 reserved_at_0[0x1c];
+ u8 reserved_at_0[0x5];
+ u8 freq_adj_units[0x3];
+ u8 reserved_at_8[0x3];
+ u8 log_max_freq_adjustment[0x5];
+
+ u8 reserved_at_10[0xc];
u8 operation[0x4];
u8 freq_adjustment[0x20];
@@ -9231,7 +10813,17 @@ struct mlx5_ifc_mtutc_reg_bits {
};
struct mlx5_ifc_pcam_enhanced_features_bits {
- u8 reserved_at_0[0x68];
+ u8 reserved_at_0[0x10];
+ u8 ppcnt_recovery_counters[0x1];
+ u8 reserved_at_11[0x7];
+ u8 cable_length[0x1];
+ u8 reserved_at_19[0x4];
+ u8 fec_200G_per_lane_in_pplm[0x1];
+ u8 reserved_at_1e[0x2a];
+ u8 fec_100G_per_lane_in_pplm[0x1];
+ u8 reserved_at_49[0xa];
+ u8 buffer_ownership[0x1];
+ u8 resereved_at_54[0x14];
u8 fec_50G_per_lane_in_pplm[0x1];
u8 reserved_at_69[0x4];
u8 rx_icrc_encapsulated_counter[0x1];
@@ -9252,7 +10844,9 @@ struct mlx5_ifc_pcam_regs_5000_to_507f_bits {
u8 port_access_reg_cap_mask_127_to_96[0x20];
u8 port_access_reg_cap_mask_95_to_64[0x20];
- u8 port_access_reg_cap_mask_63_to_36[0x1c];
+ u8 port_access_reg_cap_mask_63[0x1];
+ u8 pphcr[0x1];
+ u8 port_access_reg_cap_mask_61_to_36[0x1a];
u8 pplm[0x1];
u8 port_access_reg_cap_mask_34_to_32[0x3];
@@ -9288,7 +10882,15 @@ struct mlx5_ifc_pcam_reg_bits {
};
struct mlx5_ifc_mcam_enhanced_features_bits {
- u8 reserved_at_0[0x6b];
+ u8 reserved_at_0[0x50];
+ u8 mtutc_freq_adj_units[0x1];
+ u8 mtutc_time_adjustment_extended_range[0x1];
+ u8 reserved_at_52[0xb];
+ u8 mcia_32dwords[0x1];
+ u8 out_pulse_duration_ns[0x1];
+ u8 npps_period[0x1];
+ u8 reserved_at_60[0xa];
+ u8 reset_state[0x1];
u8 ptpcyc2realtime_modify[0x1];
u8 reserved_at_6c[0x2];
u8 pci_status_and_power[0x1];
@@ -9311,14 +10913,23 @@ struct mlx5_ifc_mcam_access_reg_bits {
u8 mcqi[0x1];
u8 mcqs[0x1];
- u8 regs_95_to_87[0x9];
+ u8 regs_95_to_90[0x6];
+ u8 mpir[0x1];
+ u8 regs_88_to_87[0x2];
u8 mpegc[0x1];
u8 mtutc[0x1];
u8 regs_84_to_68[0x11];
u8 tracer_registers[0x4];
- u8 regs_63_to_32[0x20];
- u8 regs_31_to_0[0x20];
+ u8 regs_63_to_46[0x12];
+ u8 mrtc[0x1];
+ u8 regs_44_to_41[0x4];
+ u8 mfrl[0x1];
+ u8 regs_39_to_32[0x8];
+
+ u8 regs_31_to_11[0x15];
+ u8 mtmp[0x1];
+ u8 regs_9_to_0[0xa];
};
struct mlx5_ifc_mcam_access_reg_bits1 {
@@ -9336,13 +10947,28 @@ struct mlx5_ifc_mcam_access_reg_bits2 {
u8 mirc[0x1];
u8 regs_97_to_96[0x2];
- u8 regs_95_to_64[0x20];
+ u8 regs_95_to_87[0x09];
+ u8 synce_registers[0x2];
+ u8 regs_84_to_64[0x15];
u8 regs_63_to_32[0x20];
u8 regs_31_to_0[0x20];
};
+struct mlx5_ifc_mcam_access_reg_bits3 {
+ u8 regs_127_to_96[0x20];
+
+ u8 regs_95_to_64[0x20];
+
+ u8 regs_63_to_32[0x20];
+
+ u8 regs_31_to_3[0x1d];
+ u8 mrtcq[0x1];
+ u8 mtctr[0x1];
+ u8 mtptm[0x1];
+};
+
struct mlx5_ifc_mcam_reg_bits {
u8 reserved_at_0[0x8];
u8 feature_group[0x8];
@@ -9355,6 +10981,7 @@ struct mlx5_ifc_mcam_reg_bits {
struct mlx5_ifc_mcam_access_reg_bits access_regs;
struct mlx5_ifc_mcam_access_reg_bits1 access_regs1;
struct mlx5_ifc_mcam_access_reg_bits2 access_regs2;
+ struct mlx5_ifc_mcam_access_reg_bits3 access_regs3;
u8 reserved_at_0[0x80];
} mng_access_reg_cap_mask;
@@ -9458,25 +11085,31 @@ struct mlx5_ifc_pcmr_reg_bits {
u8 reserved_at_0[0x8];
u8 local_port[0x8];
u8 reserved_at_10[0x10];
+
u8 entropy_force_cap[0x1];
u8 entropy_calc_cap[0x1];
u8 entropy_gre_calc_cap[0x1];
- u8 reserved_at_23[0x1b];
+ u8 reserved_at_23[0xf];
+ u8 rx_ts_over_crc_cap[0x1];
+ u8 reserved_at_33[0xb];
u8 fcs_cap[0x1];
u8 reserved_at_3f[0x1];
+
u8 entropy_force[0x1];
u8 entropy_calc[0x1];
u8 entropy_gre_calc[0x1];
- u8 reserved_at_43[0x1b];
+ u8 reserved_at_43[0xf];
+ u8 rx_ts_over_crc[0x1];
+ u8 reserved_at_53[0xb];
u8 fcs_chk[0x1];
u8 reserved_at_5f[0x1];
};
struct mlx5_ifc_lane_2_module_mapping_bits {
- u8 reserved_at_0[0x6];
- u8 rx_lane[0x2];
- u8 reserved_at_8[0x6];
- u8 tx_lane[0x2];
+ u8 reserved_at_0[0x4];
+ u8 rx_lane[0x4];
+ u8 reserved_at_8[0x4];
+ u8 tx_lane[0x4];
u8 reserved_at_10[0x8];
u8 module[0x8];
};
@@ -9485,8 +11118,8 @@ struct mlx5_ifc_bufferx_reg_bits {
u8 reserved_at_0[0x6];
u8 lossy[0x1];
u8 epsb[0x1];
- u8 reserved_at_8[0xc];
- u8 size[0xc];
+ u8 reserved_at_8[0x8];
+ u8 size[0x10];
u8 xoff_threshold[0x10];
u8 xon_threshold[0x10];
@@ -9706,6 +11339,7 @@ enum {
MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER = 0x0,
MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED = 0x1,
MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC = 0x2,
+ MLX5_INITIAL_SEG_NIC_INTERFACE_SW_RESET = 0x7,
};
enum {
@@ -9726,6 +11360,8 @@ enum {
MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_INV = 0xe,
MLX5_INITIAL_SEG_HEALTH_SYNDROME_FFSER_ERR = 0xf,
MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR = 0x10,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PCI_POISONED_ERR = 0x12,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_TRUST_LOCKDOWN_ERR = 0x13,
};
struct mlx5_ifc_initial_seg_bits {
@@ -9778,7 +11414,12 @@ struct mlx5_ifc_mtpps_reg_bits {
u8 reserved_at_18[0x4];
u8 cap_max_num_of_pps_out_pins[0x4];
- u8 reserved_at_20[0x24];
+ u8 reserved_at_20[0x13];
+ u8 cap_log_min_npps_period[0x5];
+ u8 reserved_at_38[0x3];
+ u8 cap_log_min_out_pulse_duration_ns[0x5];
+
+ u8 reserved_at_40[0x4];
u8 cap_pin_3_mode[0x4];
u8 reserved_at_48[0x4];
u8 cap_pin_2_mode[0x4];
@@ -9797,7 +11438,9 @@ struct mlx5_ifc_mtpps_reg_bits {
u8 cap_pin_4_mode[0x4];
u8 field_select[0x20];
- u8 reserved_at_a0[0x60];
+ u8 reserved_at_a0[0x20];
+
+ u8 npps_period[0x40];
u8 enable[0x1];
u8 reserved_at_101[0xb];
@@ -9806,7 +11449,8 @@ struct mlx5_ifc_mtpps_reg_bits {
u8 pin_mode[0x4];
u8 pin[0x8];
- u8 reserved_at_120[0x20];
+ u8 reserved_at_120[0x2];
+ u8 out_pulse_duration_ns[0x1e];
u8 time_stamp[0x40];
@@ -9961,6 +11605,20 @@ struct mlx5_ifc_mcda_reg_bits {
};
enum {
+ MLX5_MFRL_REG_PCI_RESET_METHOD_LINK_TOGGLE = 0,
+ MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET = 1,
+};
+
+enum {
+ MLX5_MFRL_REG_RESET_STATE_IDLE = 0,
+ MLX5_MFRL_REG_RESET_STATE_IN_NEGOTIATION = 1,
+ MLX5_MFRL_REG_RESET_STATE_RESET_IN_PROGRESS = 2,
+ MLX5_MFRL_REG_RESET_STATE_NEG_TIMEOUT = 3,
+ MLX5_MFRL_REG_RESET_STATE_NACK = 4,
+ MLX5_MFRL_REG_RESET_STATE_UNLOAD_TIMEOUT = 5,
+};
+
+enum {
MLX5_MFRL_REG_RESET_TYPE_FULL_CHIP = BIT(0),
MLX5_MFRL_REG_RESET_TYPE_NET_PORT_ALIVE = BIT(1),
};
@@ -9978,7 +11636,9 @@ struct mlx5_ifc_mfrl_reg_bits {
u8 pci_sync_for_fw_update_start[0x1];
u8 pci_sync_for_fw_update_resp[0x2];
u8 rst_type_sel[0x3];
- u8 reserved_at_28[0x8];
+ u8 pci_reset_req_method[0x3];
+ u8 reserved_at_2b[0x1];
+ u8 reset_state[0x4];
u8 reset_type[0x8];
u8 reset_level[0x8];
};
@@ -10037,6 +11697,101 @@ struct mlx5_ifc_pddr_reg_bits {
union mlx5_ifc_pddr_reg_page_data_auto_bits page_data;
};
+struct mlx5_ifc_mrtc_reg_bits {
+ u8 time_synced[0x1];
+ u8 reserved_at_1[0x1f];
+
+ u8 reserved_at_20[0x20];
+
+ u8 time_h[0x20];
+
+ u8 time_l[0x20];
+};
+
+struct mlx5_ifc_mtcap_reg_bits {
+ u8 reserved_at_0[0x19];
+ u8 sensor_count[0x7];
+
+ u8 reserved_at_20[0x20];
+
+ u8 sensor_map[0x40];
+};
+
+struct mlx5_ifc_mtmp_reg_bits {
+ u8 reserved_at_0[0x14];
+ u8 sensor_index[0xc];
+
+ u8 reserved_at_20[0x10];
+ u8 temperature[0x10];
+
+ u8 mte[0x1];
+ u8 mtr[0x1];
+ u8 reserved_at_42[0xe];
+ u8 max_temperature[0x10];
+
+ u8 tee[0x2];
+ u8 reserved_at_62[0xe];
+ u8 temp_threshold_hi[0x10];
+
+ u8 reserved_at_80[0x10];
+ u8 temp_threshold_lo[0x10];
+
+ u8 reserved_at_a0[0x20];
+
+ u8 sensor_name_hi[0x20];
+ u8 sensor_name_lo[0x20];
+};
+
+struct mlx5_ifc_mtptm_reg_bits {
+ u8 reserved_at_0[0x10];
+ u8 psta[0x1];
+ u8 reserved_at_11[0xf];
+
+ u8 reserved_at_20[0x60];
+};
+
+enum {
+ MLX5_MTCTR_REQUEST_NOP = 0x0,
+ MLX5_MTCTR_REQUEST_PTM_ROOT_CLOCK = 0x1,
+ MLX5_MTCTR_REQUEST_FREE_RUNNING_COUNTER = 0x2,
+ MLX5_MTCTR_REQUEST_REAL_TIME_CLOCK = 0x3,
+};
+
+struct mlx5_ifc_mtctr_reg_bits {
+ u8 first_clock_timestamp_request[0x8];
+ u8 second_clock_timestamp_request[0x8];
+ u8 reserved_at_10[0x10];
+
+ u8 first_clock_valid[0x1];
+ u8 second_clock_valid[0x1];
+ u8 reserved_at_22[0x1e];
+
+ u8 first_clock_timestamp[0x40];
+ u8 second_clock_timestamp[0x40];
+};
+
+struct mlx5_ifc_bin_range_layout_bits {
+ u8 reserved_at_0[0xa];
+ u8 high_val[0x6];
+ u8 reserved_at_10[0xa];
+ u8 low_val[0x6];
+};
+
+struct mlx5_ifc_pphcr_reg_bits {
+ u8 active_hist_type[0x4];
+ u8 reserved_at_4[0x4];
+ u8 local_port[0x8];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x8];
+ u8 num_of_bins[0x8];
+ u8 reserved_at_30[0x10];
+
+ u8 reserved_at_40[0x40];
+
+ struct mlx5_ifc_bin_range_layout_bits bin_range[16];
+};
+
union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
@@ -10098,6 +11853,12 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_mirc_reg_bits mirc_reg;
struct mlx5_ifc_mfrl_reg_bits mfrl_reg;
struct mlx5_ifc_mtutc_reg_bits mtutc_reg;
+ struct mlx5_ifc_mrtc_reg_bits mrtc_reg;
+ struct mlx5_ifc_mtcap_reg_bits mtcap_reg;
+ struct mlx5_ifc_mtmp_reg_bits mtmp_reg;
+ struct mlx5_ifc_mtptm_reg_bits mtptm_reg;
+ struct mlx5_ifc_mtctr_reg_bits mtctr_reg;
+ struct mlx5_ifc_pphcr_reg_bits pphcr_reg;
u8 reserved_at_0[0x60e0];
};
@@ -10128,10 +11889,12 @@ struct mlx5_ifc_set_flow_table_root_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
+ u8 other_eswitch[0x1];
+ u8 reserved_at_42[0xe];
u8 vport_number[0x10];
- u8 reserved_at_60[0x20];
+ u8 reserved_at_60[0x10];
+ u8 eswitch_owner_vhca_id[0x10];
u8 table_type[0x8];
u8 reserved_at_88[0x7];
@@ -10171,14 +11934,16 @@ struct mlx5_ifc_modify_flow_table_in_bits {
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
+ u8 other_eswitch[0x1];
+ u8 reserved_at_42[0xe];
u8 vport_number[0x10];
u8 reserved_at_60[0x10];
u8 modify_field_select[0x10];
u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 reserved_at_88[0x8];
+ u8 eswitch_owner_vhca_id[0x10];
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
@@ -10304,6 +12069,67 @@ struct mlx5_ifc_pbmc_reg_bits {
u8 reserved_at_2e0[0x80];
};
+struct mlx5_ifc_sbpr_reg_bits {
+ u8 desc[0x1];
+ u8 snap[0x1];
+ u8 reserved_at_2[0x4];
+ u8 dir[0x2];
+ u8 reserved_at_8[0x14];
+ u8 pool[0x4];
+
+ u8 infi_size[0x1];
+ u8 reserved_at_21[0x7];
+ u8 size[0x18];
+
+ u8 reserved_at_40[0x1c];
+ u8 mode[0x4];
+
+ u8 reserved_at_60[0x8];
+ u8 buff_occupancy[0x18];
+
+ u8 clr[0x1];
+ u8 reserved_at_81[0x7];
+ u8 max_buff_occupancy[0x18];
+
+ u8 reserved_at_a0[0x8];
+ u8 ext_buff_occupancy[0x18];
+};
+
+struct mlx5_ifc_sbcm_reg_bits {
+ u8 desc[0x1];
+ u8 snap[0x1];
+ u8 reserved_at_2[0x6];
+ u8 local_port[0x8];
+ u8 pnat[0x2];
+ u8 pg_buff[0x6];
+ u8 reserved_at_18[0x6];
+ u8 dir[0x2];
+
+ u8 reserved_at_20[0x1f];
+ u8 exc[0x1];
+
+ u8 reserved_at_40[0x40];
+
+ u8 reserved_at_80[0x8];
+ u8 buff_occupancy[0x18];
+
+ u8 clr[0x1];
+ u8 reserved_at_a1[0x7];
+ u8 max_buff_occupancy[0x18];
+
+ u8 reserved_at_c0[0x8];
+ u8 min_buff[0x18];
+
+ u8 infi_max[0x1];
+ u8 reserved_at_e1[0x7];
+ u8 max_buff[0x18];
+
+ u8 reserved_at_100[0x20];
+
+ u8 reserved_at_120[0x1c];
+ u8 pool[0x4];
+};
+
struct mlx5_ifc_qtct_reg_bits {
u8 reserved_at_0[0x8];
u8 port_number[0x8];
@@ -10375,12 +12201,22 @@ struct mlx5_ifc_dcbx_param_bits {
u8 reserved_at_a0[0x160];
};
+enum {
+ MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY = 0,
+ MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT = 1,
+ MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_MPESW = 2,
+};
+
struct mlx5_ifc_lagc_bits {
u8 fdb_selection_mode[0x1];
- u8 reserved_at_1[0x1c];
+ u8 reserved_at_1[0x14];
+ u8 port_select_mode[0x3];
+ u8 reserved_at_18[0x5];
u8 lag_state[0x3];
- u8 reserved_at_20[0x14];
+ u8 reserved_at_20[0xc];
+ u8 active_port[0x4];
+ u8 reserved_at_30[0x4];
u8 tx_remap_affinity_2[0x4];
u8 reserved_at_38[0x4];
u8 tx_remap_affinity_1[0x4];
@@ -10591,33 +12427,11 @@ struct mlx5_ifc_dealloc_memic_out_bits {
u8 reserved_at_40[0x40];
};
-struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
- u8 opcode[0x10];
- u8 uid[0x10];
-
- u8 vhca_tunnel_id[0x10];
- u8 obj_type[0x10];
-
- u8 obj_id[0x20];
-
- u8 reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
-
- u8 syndrome[0x20];
-
- u8 obj_id[0x20];
-
- u8 reserved_at_60[0x20];
-};
-
struct mlx5_ifc_umem_bits {
u8 reserved_at_0[0x80];
- u8 reserved_at_80[0x1b];
+ u8 ats[0x1];
+ u8 reserved_at_81[0x1a];
u8 log_page_size[0x5];
u8 page_offset[0x20];
@@ -10821,7 +12635,9 @@ struct mlx5_ifc_mtrc_ctrl_bits {
struct mlx5_ifc_host_params_context_bits {
u8 host_number[0x8];
- u8 reserved_at_8[0x7];
+ u8 reserved_at_8[0x5];
+ u8 host_pf_not_exist[0x1];
+ u8 reserved_at_14[0x1];
u8 host_pf_disabled[0x1];
u8 host_num_of_vfs[0x10];
@@ -10943,21 +12759,79 @@ struct mlx5_ifc_affiliated_event_header_bits {
};
enum {
- MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = BIT_ULL(0xc),
- MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC = BIT_ULL(0x13),
- MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_SAMPLER = BIT_ULL(0x20),
-};
-
-enum {
MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = 0xc,
MLX5_GENERAL_OBJECT_TYPES_IPSEC = 0x13,
MLX5_GENERAL_OBJECT_TYPES_SAMPLER = 0x20,
+ MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO = 0x24,
+ MLX5_GENERAL_OBJECT_TYPES_MACSEC = 0x27,
+ MLX5_GENERAL_OBJECT_TYPES_INT_KEK = 0x47,
+ MLX5_GENERAL_OBJECT_TYPES_RDMA_CTRL = 0x53,
+ MLX5_GENERAL_OBJECT_TYPES_PCIE_CONG_EVENT = 0x58,
+ MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS = 0xff15,
+};
+
+enum {
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY =
+ BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY),
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC =
+ BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_IPSEC),
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_SAMPLER =
+ BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_SAMPLER),
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_FLOW_METER_ASO =
+ BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO),
+};
+
+enum {
+ MLX5_HCA_CAP_2_GENERAL_OBJECT_TYPES_RDMA_CTRL =
+ BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_RDMA_CTRL - 0x40),
+ MLX5_HCA_CAP_2_GENERAL_OBJECT_TYPES_PCIE_CONG_EVENT =
+ BIT_ULL(MLX5_GENERAL_OBJECT_TYPES_PCIE_CONG_EVENT - 0x40),
};
enum {
MLX5_IPSEC_OBJECT_ICV_LEN_16B,
- MLX5_IPSEC_OBJECT_ICV_LEN_12B,
- MLX5_IPSEC_OBJECT_ICV_LEN_8B,
+};
+
+enum {
+ MLX5_IPSEC_ASO_REG_C_0_1 = 0x0,
+ MLX5_IPSEC_ASO_REG_C_2_3 = 0x1,
+ MLX5_IPSEC_ASO_REG_C_4_5 = 0x2,
+ MLX5_IPSEC_ASO_REG_C_6_7 = 0x3,
+};
+
+enum {
+ MLX5_IPSEC_ASO_MODE = 0x0,
+ MLX5_IPSEC_ASO_REPLAY_PROTECTION = 0x1,
+ MLX5_IPSEC_ASO_INC_SN = 0x2,
+};
+
+enum {
+ MLX5_IPSEC_ASO_REPLAY_WIN_32BIT = 0x0,
+ MLX5_IPSEC_ASO_REPLAY_WIN_64BIT = 0x1,
+ MLX5_IPSEC_ASO_REPLAY_WIN_128BIT = 0x2,
+ MLX5_IPSEC_ASO_REPLAY_WIN_256BIT = 0x3,
+};
+
+struct mlx5_ifc_ipsec_aso_bits {
+ u8 valid[0x1];
+ u8 reserved_at_201[0x1];
+ u8 mode[0x2];
+ u8 window_sz[0x2];
+ u8 soft_lft_arm[0x1];
+ u8 hard_lft_arm[0x1];
+ u8 remove_flow_enable[0x1];
+ u8 esn_event_arm[0x1];
+ u8 reserved_at_20a[0x16];
+
+ u8 remove_flow_pkt_cnt[0x20];
+
+ u8 remove_flow_soft_lft[0x20];
+
+ u8 reserved_at_260[0x80];
+
+ u8 mode_parameter[0x20];
+
+ u8 replay_protection_window[0x100];
};
struct mlx5_ifc_ipsec_obj_bits {
@@ -10981,7 +12855,11 @@ struct mlx5_ifc_ipsec_obj_bits {
u8 implicit_iv[0x40];
- u8 reserved_at_100[0x700];
+ u8 reserved_at_100[0x8];
+ u8 ipsec_aso_access_pd[0x18];
+ u8 reserved_at_120[0xe0];
+
+ struct mlx5_ifc_ipsec_aso_bits ipsec_aso;
};
struct mlx5_ifc_create_ipsec_obj_in_bits {
@@ -11004,21 +12882,152 @@ struct mlx5_ifc_modify_ipsec_obj_in_bits {
struct mlx5_ifc_ipsec_obj_bits ipsec_object;
};
+enum {
+ MLX5_MACSEC_ASO_REPLAY_PROTECTION = 0x1,
+};
+
+enum {
+ MLX5_MACSEC_ASO_REPLAY_WIN_32BIT = 0x0,
+ MLX5_MACSEC_ASO_REPLAY_WIN_64BIT = 0x1,
+ MLX5_MACSEC_ASO_REPLAY_WIN_128BIT = 0x2,
+ MLX5_MACSEC_ASO_REPLAY_WIN_256BIT = 0x3,
+};
+
+#define MLX5_MACSEC_ASO_INC_SN 0x2
+#define MLX5_MACSEC_ASO_REG_C_4_5 0x2
+
+struct mlx5_ifc_macsec_aso_bits {
+ u8 valid[0x1];
+ u8 reserved_at_1[0x1];
+ u8 mode[0x2];
+ u8 window_size[0x2];
+ u8 soft_lifetime_arm[0x1];
+ u8 hard_lifetime_arm[0x1];
+ u8 remove_flow_enable[0x1];
+ u8 epn_event_arm[0x1];
+ u8 reserved_at_a[0x16];
+
+ u8 remove_flow_packet_count[0x20];
+
+ u8 remove_flow_soft_lifetime[0x20];
+
+ u8 reserved_at_60[0x80];
+
+ u8 mode_parameter[0x20];
+
+ u8 replay_protection_window[8][0x20];
+};
+
+struct mlx5_ifc_macsec_offload_obj_bits {
+ u8 modify_field_select[0x40];
+
+ u8 confidentiality_en[0x1];
+ u8 reserved_at_41[0x1];
+ u8 epn_en[0x1];
+ u8 epn_overlap[0x1];
+ u8 reserved_at_44[0x2];
+ u8 confidentiality_offset[0x2];
+ u8 reserved_at_48[0x4];
+ u8 aso_return_reg[0x4];
+ u8 reserved_at_50[0x10];
+
+ u8 epn_msb[0x20];
+
+ u8 reserved_at_80[0x8];
+ u8 dekn[0x18];
+
+ u8 reserved_at_a0[0x20];
+
+ u8 sci[0x40];
+
+ u8 reserved_at_100[0x8];
+ u8 macsec_aso_access_pd[0x18];
+
+ u8 reserved_at_120[0x60];
+
+ u8 salt[3][0x20];
+
+ u8 reserved_at_1e0[0x20];
+
+ struct mlx5_ifc_macsec_aso_bits macsec_aso;
+};
+
+struct mlx5_ifc_create_macsec_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_macsec_offload_obj_bits macsec_object;
+};
+
+struct mlx5_ifc_modify_macsec_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_macsec_offload_obj_bits macsec_object;
+};
+
+enum {
+ MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP = BIT(0),
+ MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB = BIT(1),
+};
+
+struct mlx5_ifc_query_macsec_obj_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+ struct mlx5_ifc_macsec_offload_obj_bits macsec_object;
+};
+
+struct mlx5_ifc_wrapped_dek_bits {
+ u8 gcm_iv[0x60];
+
+ u8 reserved_at_60[0x20];
+
+ u8 const0[0x1];
+ u8 key_size[0x1];
+ u8 reserved_at_82[0x2];
+ u8 key2_invalid[0x1];
+ u8 reserved_at_85[0x3];
+ u8 pd[0x18];
+
+ u8 key_purpose[0x5];
+ u8 reserved_at_a5[0x13];
+ u8 kek_id[0x8];
+
+ u8 reserved_at_c0[0x40];
+
+ u8 key1[0x8][0x20];
+
+ u8 key2[0x8][0x20];
+
+ u8 reserved_at_300[0x40];
+
+ u8 const1[0x1];
+ u8 reserved_at_341[0x1f];
+
+ u8 reserved_at_360[0x20];
+
+ u8 auth_tag[0x80];
+};
+
struct mlx5_ifc_encryption_key_obj_bits {
u8 modify_field_select[0x40];
- u8 reserved_at_40[0x14];
+ u8 state[0x8];
+ u8 sw_wrapped[0x1];
+ u8 reserved_at_49[0xb];
u8 key_size[0x4];
u8 reserved_at_58[0x4];
- u8 key_type[0x4];
+ u8 key_purpose[0x4];
u8 reserved_at_60[0x8];
u8 pd[0x18];
- u8 reserved_at_80[0x180];
- u8 key[8][0x20];
+ u8 reserved_at_80[0x100];
+
+ u8 opaque[0x40];
+
+ u8 reserved_at_1c0[0x40];
- u8 reserved_at_300[0x500];
+ u8 key[8][0x80];
+
+ u8 sw_wrapped_dek[8][0x80];
+
+ u8 reserved_at_a00[0x600];
};
struct mlx5_ifc_create_encryption_key_in_bits {
@@ -11026,6 +13035,94 @@ struct mlx5_ifc_create_encryption_key_in_bits {
struct mlx5_ifc_encryption_key_obj_bits encryption_key_object;
};
+struct mlx5_ifc_modify_encryption_key_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_encryption_key_obj_bits encryption_key_object;
+};
+
+enum {
+ MLX5_FLOW_METER_MODE_BYTES_IP_LENGTH = 0x0,
+ MLX5_FLOW_METER_MODE_BYTES_CALC_WITH_L2 = 0x1,
+ MLX5_FLOW_METER_MODE_BYTES_CALC_WITH_L2_IPG = 0x2,
+ MLX5_FLOW_METER_MODE_NUM_PACKETS = 0x3,
+};
+
+struct mlx5_ifc_flow_meter_parameters_bits {
+ u8 valid[0x1];
+ u8 bucket_overflow[0x1];
+ u8 start_color[0x2];
+ u8 both_buckets_on_green[0x1];
+ u8 reserved_at_5[0x1];
+ u8 meter_mode[0x2];
+ u8 reserved_at_8[0x18];
+
+ u8 reserved_at_20[0x20];
+
+ u8 reserved_at_40[0x3];
+ u8 cbs_exponent[0x5];
+ u8 cbs_mantissa[0x8];
+ u8 reserved_at_50[0x3];
+ u8 cir_exponent[0x5];
+ u8 cir_mantissa[0x8];
+
+ u8 reserved_at_60[0x20];
+
+ u8 reserved_at_80[0x3];
+ u8 ebs_exponent[0x5];
+ u8 ebs_mantissa[0x8];
+ u8 reserved_at_90[0x3];
+ u8 eir_exponent[0x5];
+ u8 eir_mantissa[0x8];
+
+ u8 reserved_at_a0[0x60];
+};
+
+struct mlx5_ifc_flow_meter_aso_obj_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x40];
+
+ u8 reserved_at_80[0x8];
+ u8 meter_aso_access_pd[0x18];
+
+ u8 reserved_at_a0[0x160];
+
+ struct mlx5_ifc_flow_meter_parameters_bits flow_meter_parameters[2];
+};
+
+struct mlx5_ifc_create_flow_meter_aso_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_flow_meter_aso_obj_bits flow_meter_aso_obj;
+};
+
+struct mlx5_ifc_int_kek_obj_bits {
+ u8 modify_field_select[0x40];
+
+ u8 state[0x8];
+ u8 auto_gen[0x1];
+ u8 reserved_at_49[0xb];
+ u8 key_size[0x4];
+ u8 reserved_at_58[0x8];
+
+ u8 reserved_at_60[0x8];
+ u8 pd[0x18];
+
+ u8 reserved_at_80[0x180];
+ u8 key[8][0x80];
+
+ u8 reserved_at_600[0x200];
+};
+
+struct mlx5_ifc_create_int_kek_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_int_kek_obj_bits int_kek_object;
+};
+
+struct mlx5_ifc_create_int_kek_obj_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+ struct mlx5_ifc_int_kek_obj_bits int_kek_object;
+};
+
struct mlx5_ifc_sampler_obj_bits {
u8 modify_field_select[0x40];
@@ -11053,14 +13150,21 @@ struct mlx5_ifc_create_sampler_obj_in_bits {
struct mlx5_ifc_sampler_obj_bits sampler_object;
};
+struct mlx5_ifc_query_sampler_obj_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+ struct mlx5_ifc_sampler_obj_bits sampler_object;
+};
+
enum {
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128 = 0x0,
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_256 = 0x1,
};
enum {
- MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS = 0x1,
- MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_IPSEC = 0x2,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_TLS = 0x1,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_IPSEC = 0x2,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_MACSEC = 0x4,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_PSP = 0x6,
};
struct mlx5_ifc_tls_static_params_bits {
@@ -11103,4 +13207,439 @@ enum {
MLX5_MTT_PERM_RW = MLX5_MTT_PERM_READ | MLX5_MTT_PERM_WRITE,
};
+enum {
+ MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_INITIATOR = 0x0,
+ MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_RESPONDER = 0x1,
+};
+
+struct mlx5_ifc_suspend_vhca_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_suspend_vhca_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+enum {
+ MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_RESPONDER = 0x0,
+ MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_INITIATOR = 0x1,
+};
+
+struct mlx5_ifc_resume_vhca_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_resume_vhca_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_query_vhca_migration_state_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 incremental[0x1];
+ u8 chunk[0x1];
+ u8 reserved_at_42[0xe];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_query_vhca_migration_state_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ u8 required_umem_size[0x20];
+
+ u8 reserved_at_a0[0x20];
+
+ u8 remaining_total_size[0x40];
+
+ u8 reserved_at_100[0x100];
+};
+
+struct mlx5_ifc_save_vhca_state_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 incremental[0x1];
+ u8 set_track[0x1];
+ u8 reserved_at_42[0xe];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+
+ u8 va[0x40];
+
+ u8 mkey[0x20];
+
+ u8 size[0x20];
+};
+
+struct mlx5_ifc_save_vhca_state_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 actual_image_size[0x20];
+
+ u8 next_required_umem_size[0x20];
+};
+
+struct mlx5_ifc_load_vhca_state_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+
+ u8 va[0x40];
+
+ u8 mkey[0x20];
+
+ u8 size[0x20];
+};
+
+struct mlx5_ifc_load_vhca_state_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_adv_rdma_cap_bits {
+ u8 rdma_transport_manager[0x1];
+ u8 rdma_transport_manager_other_eswitch[0x1];
+ u8 reserved_at_2[0x1e];
+
+ u8 rcx_type[0x8];
+ u8 reserved_at_28[0x2];
+ u8 ps_entry_log_max_value[0x6];
+ u8 reserved_at_30[0x6];
+ u8 qp_max_ps_num_entry[0xa];
+
+ u8 mp_max_num_queues[0x8];
+ u8 ps_user_context_max_log_size[0x8];
+ u8 message_based_qp_and_striding_wq[0x8];
+ u8 reserved_at_58[0x8];
+
+ u8 max_receive_send_message_size_stride[0x10];
+ u8 reserved_at_70[0x10];
+
+ u8 max_receive_send_message_size_byte[0x20];
+
+ u8 reserved_at_a0[0x160];
+
+ struct mlx5_ifc_flow_table_prop_layout_bits rdma_transport_rx_flow_table_properties;
+
+ struct mlx5_ifc_flow_table_prop_layout_bits rdma_transport_tx_flow_table_properties;
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits rdma_transport_rx_ft_field_support_2;
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits rdma_transport_tx_ft_field_support_2;
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits rdma_transport_rx_ft_field_bitmask_support_2;
+
+ struct mlx5_ifc_flow_table_fields_supported_2_bits rdma_transport_tx_ft_field_bitmask_support_2;
+
+ u8 reserved_at_800[0x3800];
+};
+
+struct mlx5_ifc_adv_virtualization_cap_bits {
+ u8 reserved_at_0[0x3];
+ u8 pg_track_log_max_num[0x5];
+ u8 pg_track_max_num_range[0x8];
+ u8 pg_track_log_min_addr_space[0x8];
+ u8 pg_track_log_max_addr_space[0x8];
+
+ u8 reserved_at_20[0x3];
+ u8 pg_track_log_min_msg_size[0x5];
+ u8 reserved_at_28[0x3];
+ u8 pg_track_log_max_msg_size[0x5];
+ u8 reserved_at_30[0x3];
+ u8 pg_track_log_min_page_size[0x5];
+ u8 reserved_at_38[0x3];
+ u8 pg_track_log_max_page_size[0x5];
+
+ u8 reserved_at_40[0x7c0];
+};
+
+struct mlx5_ifc_page_track_report_entry_bits {
+ u8 dirty_address_high[0x20];
+
+ u8 dirty_address_low[0x20];
+};
+
+enum {
+ MLX5_PAGE_TRACK_STATE_TRACKING,
+ MLX5_PAGE_TRACK_STATE_REPORTING,
+ MLX5_PAGE_TRACK_STATE_ERROR,
+};
+
+struct mlx5_ifc_page_track_range_bits {
+ u8 start_address[0x40];
+
+ u8 length[0x40];
+};
+
+struct mlx5_ifc_page_track_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x10];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+
+ u8 state[0x4];
+ u8 track_type[0x4];
+ u8 log_addr_space_size[0x8];
+ u8 reserved_at_90[0x3];
+ u8 log_page_size[0x5];
+ u8 reserved_at_98[0x3];
+ u8 log_msg_size[0x5];
+
+ u8 reserved_at_a0[0x8];
+ u8 reporting_qpn[0x18];
+
+ u8 reserved_at_c0[0x18];
+ u8 num_ranges[0x8];
+
+ u8 reserved_at_e0[0x20];
+
+ u8 range_start_address[0x40];
+
+ u8 length[0x40];
+
+ struct mlx5_ifc_page_track_range_bits track_range[0];
+};
+
+struct mlx5_ifc_create_page_track_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_page_track_bits obj_context;
+};
+
+struct mlx5_ifc_modify_page_track_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_page_track_bits obj_context;
+};
+
+struct mlx5_ifc_query_page_track_obj_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+ struct mlx5_ifc_page_track_bits obj_context;
+};
+
+struct mlx5_ifc_msecq_reg_bits {
+ u8 reserved_at_0[0x20];
+
+ u8 reserved_at_20[0x12];
+ u8 network_option[0x2];
+ u8 local_ssm_code[0x4];
+ u8 local_enhanced_ssm_code[0x8];
+
+ u8 local_clock_identity[0x40];
+
+ u8 reserved_at_80[0x180];
+};
+
+enum {
+ MLX5_MSEES_FIELD_SELECT_ENABLE = BIT(0),
+ MLX5_MSEES_FIELD_SELECT_ADMIN_STATUS = BIT(1),
+ MLX5_MSEES_FIELD_SELECT_ADMIN_FREQ_MEASURE = BIT(2),
+};
+
+enum mlx5_msees_admin_status {
+ MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING = 0x0,
+ MLX5_MSEES_ADMIN_STATUS_TRACK = 0x1,
+};
+
+enum mlx5_msees_oper_status {
+ MLX5_MSEES_OPER_STATUS_FREE_RUNNING = 0x0,
+ MLX5_MSEES_OPER_STATUS_SELF_TRACK = 0x1,
+ MLX5_MSEES_OPER_STATUS_OTHER_TRACK = 0x2,
+ MLX5_MSEES_OPER_STATUS_HOLDOVER = 0x3,
+ MLX5_MSEES_OPER_STATUS_FAIL_HOLDOVER = 0x4,
+ MLX5_MSEES_OPER_STATUS_FAIL_FREE_RUNNING = 0x5,
+};
+
+enum mlx5_msees_failure_reason {
+ MLX5_MSEES_FAILURE_REASON_UNDEFINED_ERROR = 0x0,
+ MLX5_MSEES_FAILURE_REASON_PORT_DOWN = 0x1,
+ MLX5_MSEES_FAILURE_REASON_TOO_HIGH_FREQUENCY_DIFF = 0x2,
+ MLX5_MSEES_FAILURE_REASON_NET_SYNCHRONIZER_DEVICE_ERROR = 0x3,
+ MLX5_MSEES_FAILURE_REASON_LACK_OF_RESOURCES = 0x4,
+};
+
+struct mlx5_ifc_msees_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 local_port[0x8];
+ u8 pnat[0x2];
+ u8 lp_msb[0x2];
+ u8 reserved_at_14[0xc];
+
+ u8 field_select[0x20];
+
+ u8 admin_status[0x4];
+ u8 oper_status[0x4];
+ u8 ho_acq[0x1];
+ u8 reserved_at_49[0xc];
+ u8 admin_freq_measure[0x1];
+ u8 oper_freq_measure[0x1];
+ u8 failure_reason[0x9];
+
+ u8 frequency_diff[0x20];
+
+ u8 reserved_at_80[0x180];
+};
+
+struct mlx5_ifc_mrtcq_reg_bits {
+ u8 reserved_at_0[0x40];
+
+ u8 rt_clock_identity[0x40];
+
+ u8 reserved_at_80[0x180];
+};
+
+struct mlx5_ifc_pcie_cong_event_obj_bits {
+ u8 modify_select_field[0x40];
+
+ u8 inbound_event_en[0x1];
+ u8 outbound_event_en[0x1];
+ u8 reserved_at_42[0x1e];
+
+ u8 reserved_at_60[0x1];
+ u8 inbound_cong_state[0x3];
+ u8 reserved_at_64[0x1];
+ u8 outbound_cong_state[0x3];
+ u8 reserved_at_68[0x18];
+
+ u8 inbound_cong_low_threshold[0x10];
+ u8 inbound_cong_high_threshold[0x10];
+
+ u8 outbound_cong_low_threshold[0x10];
+ u8 outbound_cong_high_threshold[0x10];
+
+ u8 reserved_at_e0[0x340];
+};
+
+struct mlx5_ifc_pcie_cong_event_cmd_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_pcie_cong_event_obj_bits cong_obj;
+};
+
+struct mlx5_ifc_pcie_cong_event_cmd_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits hdr;
+ struct mlx5_ifc_pcie_cong_event_obj_bits cong_obj;
+};
+
+enum mlx5e_pcie_cong_event_mod_field {
+ MLX5_PCIE_CONG_EVENT_MOD_EVENT_EN = BIT(0),
+ MLX5_PCIE_CONG_EVENT_MOD_THRESH = BIT(2),
+};
+
+struct mlx5_ifc_psp_rotate_key_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_psp_rotate_key_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+enum mlx5_psp_gen_spi_in_key_size {
+ MLX5_PSP_GEN_SPI_IN_KEY_SIZE_128 = 0x0,
+ MLX5_PSP_GEN_SPI_IN_KEY_SIZE_256 = 0x1,
+};
+
+struct mlx5_ifc_key_spi_bits {
+ u8 spi[0x20];
+
+ u8 reserved_at_20[0x60];
+
+ u8 key[8][0x20];
+};
+
+struct mlx5_ifc_psp_gen_spi_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 key_size[0x2];
+ u8 reserved_at_62[0xe];
+ u8 num_of_spi[0x10];
+};
+
+struct mlx5_ifc_psp_gen_spi_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x10];
+ u8 num_of_spi[0x10];
+
+ u8 reserved_at_60[0x20];
+
+ struct mlx5_ifc_key_spi_bits key_spi[];
+};
+
#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h
index 07d77323f78a..0596472923ad 100644
--- a/include/linux/mlx5/mlx5_ifc_fpga.h
+++ b/include/linux/mlx5/mlx5_ifc_fpga.h
@@ -32,31 +32,6 @@
#ifndef MLX5_IFC_FPGA_H
#define MLX5_IFC_FPGA_H
-struct mlx5_ifc_ipv4_layout_bits {
- u8 reserved_at_0[0x60];
-
- u8 ipv4[0x20];
-};
-
-struct mlx5_ifc_ipv6_layout_bits {
- u8 ipv6[16][0x8];
-};
-
-union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
- struct mlx5_ifc_ipv6_layout_bits ipv6_layout;
- struct mlx5_ifc_ipv4_layout_bits ipv4_layout;
- u8 reserved_at_0[0x80];
-};
-
-enum {
- MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX = 0x2c9,
-};
-
-enum {
- MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC = 0x2,
- MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_TLS = 0x3,
-};
-
struct mlx5_ifc_fpga_shell_caps_bits {
u8 max_num_qps[0x10];
u8 reserved_at_10[0x8];
@@ -387,89 +362,6 @@ struct mlx5_ifc_fpga_destroy_qp_out_bits {
u8 reserved_at_40[0x40];
};
-struct mlx5_ifc_tls_extended_cap_bits {
- u8 aes_gcm_128[0x1];
- u8 aes_gcm_256[0x1];
- u8 reserved_at_2[0x1e];
- u8 reserved_at_20[0x20];
- u8 context_capacity_total[0x20];
- u8 context_capacity_rx[0x20];
- u8 context_capacity_tx[0x20];
- u8 reserved_at_a0[0x10];
- u8 tls_counter_size[0x10];
- u8 tls_counters_addr_low[0x20];
- u8 tls_counters_addr_high[0x20];
- u8 rx[0x1];
- u8 tx[0x1];
- u8 tls_v12[0x1];
- u8 tls_v13[0x1];
- u8 lro[0x1];
- u8 ipv6[0x1];
- u8 reserved_at_106[0x1a];
-};
-
-struct mlx5_ifc_ipsec_extended_cap_bits {
- u8 encapsulation[0x20];
-
- u8 reserved_0[0x12];
- u8 v2_command[0x1];
- u8 udp_encap[0x1];
- u8 rx_no_trailer[0x1];
- u8 ipv4_fragment[0x1];
- u8 ipv6[0x1];
- u8 esn[0x1];
- u8 lso[0x1];
- u8 transport_and_tunnel_mode[0x1];
- u8 tunnel_mode[0x1];
- u8 transport_mode[0x1];
- u8 ah_esp[0x1];
- u8 esp[0x1];
- u8 ah[0x1];
- u8 ipv4_options[0x1];
-
- u8 auth_alg[0x20];
-
- u8 enc_alg[0x20];
-
- u8 sa_cap[0x20];
-
- u8 reserved_1[0x10];
- u8 number_of_ipsec_counters[0x10];
-
- u8 ipsec_counters_addr_low[0x20];
- u8 ipsec_counters_addr_high[0x20];
-};
-
-struct mlx5_ifc_ipsec_counters_bits {
- u8 dec_in_packets[0x40];
-
- u8 dec_out_packets[0x40];
-
- u8 dec_bypass_packets[0x40];
-
- u8 enc_in_packets[0x40];
-
- u8 enc_out_packets[0x40];
-
- u8 enc_bypass_packets[0x40];
-
- u8 drop_dec_packets[0x40];
-
- u8 failed_auth_dec_packets[0x40];
-
- u8 drop_enc_packets[0x40];
-
- u8 success_add_sa[0x40];
-
- u8 fail_add_sa[0x40];
-
- u8 success_delete_sa[0x40];
-
- u8 fail_delete_sa[0x40];
-
- u8 dropped_cmd[0x40];
-};
-
enum {
MLX5_FPGA_QP_ERROR_EVENT_SYNDROME_RETRY_COUNTER_EXPIRED = 0x1,
MLX5_FPGA_QP_ERROR_EVENT_SYNDROME_RNR_EXPIRED = 0x2,
@@ -486,131 +378,4 @@ struct mlx5_ifc_fpga_qp_error_event_bits {
u8 reserved_at_c0[0x8];
u8 fpga_qpn[0x18];
};
-enum mlx5_ifc_fpga_ipsec_response_syndrome {
- MLX5_FPGA_IPSEC_RESPONSE_SUCCESS = 0,
- MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST = 1,
- MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE = 2,
- MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE = 3,
-};
-
-struct mlx5_ifc_fpga_ipsec_cmd_resp {
- __be32 syndrome;
- union {
- __be32 sw_sa_handle;
- __be32 flags;
- };
- u8 reserved[24];
-} __packed;
-
-enum mlx5_ifc_fpga_ipsec_cmd_opcode {
- MLX5_FPGA_IPSEC_CMD_OP_ADD_SA = 0,
- MLX5_FPGA_IPSEC_CMD_OP_DEL_SA = 1,
- MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 = 2,
- MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 = 3,
- MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2 = 4,
- MLX5_FPGA_IPSEC_CMD_OP_SET_CAP = 5,
-};
-
-enum mlx5_ifc_fpga_ipsec_cap {
- MLX5_FPGA_IPSEC_CAP_NO_TRAILER = BIT(0),
-};
-
-struct mlx5_ifc_fpga_ipsec_cmd_cap {
- __be32 cmd;
- __be32 flags;
- u8 reserved[24];
-} __packed;
-
-enum mlx5_ifc_fpga_ipsec_sa_flags {
- MLX5_FPGA_IPSEC_SA_ESN_EN = BIT(0),
- MLX5_FPGA_IPSEC_SA_ESN_OVERLAP = BIT(1),
- MLX5_FPGA_IPSEC_SA_IPV6 = BIT(2),
- MLX5_FPGA_IPSEC_SA_DIR_SX = BIT(3),
- MLX5_FPGA_IPSEC_SA_SPI_EN = BIT(4),
- MLX5_FPGA_IPSEC_SA_SA_VALID = BIT(5),
- MLX5_FPGA_IPSEC_SA_IP_ESP = BIT(6),
- MLX5_FPGA_IPSEC_SA_IP_AH = BIT(7),
-};
-
-enum mlx5_ifc_fpga_ipsec_sa_enc_mode {
- MLX5_FPGA_IPSEC_SA_ENC_MODE_NONE = 0,
- MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128 = 1,
- MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128 = 3,
-};
-
-struct mlx5_ifc_fpga_ipsec_sa_v1 {
- __be32 cmd;
- u8 key_enc[32];
- u8 key_auth[32];
- __be32 sip[4];
- __be32 dip[4];
- union {
- struct {
- __be32 reserved;
- u8 salt_iv[8];
- __be32 salt;
- } __packed gcm;
- struct {
- u8 salt[16];
- } __packed cbc;
- };
- __be32 spi;
- __be32 sw_sa_handle;
- __be16 tfclen;
- u8 enc_mode;
- u8 reserved1[2];
- u8 flags;
- u8 reserved2[2];
-};
-
-struct mlx5_ifc_fpga_ipsec_sa {
- struct mlx5_ifc_fpga_ipsec_sa_v1 ipsec_sa_v1;
- __be16 udp_sp;
- __be16 udp_dp;
- u8 reserved1[4];
- __be32 esn;
- __be16 vid; /* only 12 bits, rest is reserved */
- __be16 reserved2;
-} __packed;
-
-enum fpga_tls_cmds {
- CMD_SETUP_STREAM = 0x1001,
- CMD_TEARDOWN_STREAM = 0x1002,
- CMD_RESYNC_RX = 0x1003,
-};
-
-#define MLX5_TLS_1_2 (0)
-
-#define MLX5_TLS_ALG_AES_GCM_128 (0)
-#define MLX5_TLS_ALG_AES_GCM_256 (1)
-
-struct mlx5_ifc_tls_cmd_bits {
- u8 command_type[0x20];
- u8 ipv6[0x1];
- u8 direction_sx[0x1];
- u8 tls_version[0x2];
- u8 reserved[0x1c];
- u8 swid[0x20];
- u8 src_port[0x10];
- u8 dst_port[0x10];
- union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6;
- union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
- u8 tls_rcd_sn[0x40];
- u8 tcp_sn[0x20];
- u8 tls_implicit_iv[0x20];
- u8 tls_xor_iv[0x40];
- u8 encryption_key[0x100];
- u8 alg[4];
- u8 reserved2[0x1c];
- u8 reserved3[0x4a0];
-};
-
-struct mlx5_ifc_tls_resp_bits {
- u8 syndrome[0x20];
- u8 stream_id[0x20];
- u8 reserved[0x40];
-};
-
-#define MLX5_TLS_COMMAND_SIZE (0x100)
-
#endif /* MLX5_IFC_FPGA_H */
diff --git a/include/linux/mlx5/mlx5_ifc_vdpa.h b/include/linux/mlx5/mlx5_ifc_vdpa.h
index 98b56b75c625..58dfa2ee7c83 100644
--- a/include/linux/mlx5/mlx5_ifc_vdpa.h
+++ b/include/linux/mlx5/mlx5_ifc_vdpa.h
@@ -11,13 +11,15 @@ enum {
};
enum {
- MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT = 0x1, // do I check this caps?
- MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED = 0x2,
+ MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT = 0,
+ MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED = 1,
};
enum {
- MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT = 0,
- MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED = 1,
+ MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT =
+ BIT(MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT),
+ MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED =
+ BIT(MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED),
};
struct mlx5_ifc_virtio_q_bits {
@@ -72,7 +74,11 @@ struct mlx5_ifc_virtio_q_bits {
u8 reserved_at_320[0x8];
u8 pd[0x18];
- u8 reserved_at_340[0xc0];
+ u8 reserved_at_340[0x20];
+
+ u8 desc_group_mkey[0x20];
+
+ u8 reserved_at_380[0x80];
};
struct mlx5_ifc_virtio_net_q_object_bits {
@@ -139,6 +145,13 @@ enum {
MLX5_VIRTQ_MODIFY_MASK_STATE = (u64)1 << 0,
MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_PARAMS = (u64)1 << 3,
MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_DUMP_ENABLE = (u64)1 << 4,
+ MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS = (u64)1 << 6,
+ MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_AVAIL_IDX = (u64)1 << 7,
+ MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_USED_IDX = (u64)1 << 8,
+ MLX5_VIRTQ_MODIFY_MASK_QUEUE_VIRTIO_VERSION = (u64)1 << 10,
+ MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY = (u64)1 << 11,
+ MLX5_VIRTQ_MODIFY_MASK_QUEUE_FEATURES = (u64)1 << 12,
+ MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY = (u64)1 << 14,
};
enum {
@@ -148,6 +161,14 @@ enum {
MLX5_VIRTIO_NET_Q_OBJECT_STATE_ERR = 0x3,
};
+/* This indicates that the object was not created or has already
+ * been desroyed. It is very safe to assume that this object will never
+ * have so many states
+ */
+enum {
+ MLX5_VIRTIO_NET_Q_OBJECT_NONE = 0xffffffff
+};
+
enum {
MLX5_RQTC_LIST_Q_TYPE_RQ = 0x0,
MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q = 0x1,
@@ -163,4 +184,43 @@ struct mlx5_ifc_modify_virtio_net_q_out_bits {
struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
};
+struct mlx5_ifc_virtio_q_counters_bits {
+ u8 modify_field_select[0x40];
+ u8 reserved_at_40[0x40];
+ u8 received_desc[0x40];
+ u8 completed_desc[0x40];
+ u8 error_cqes[0x20];
+ u8 bad_desc_errors[0x20];
+ u8 exceed_max_chain[0x20];
+ u8 invalid_buffer[0x20];
+ u8 reserved_at_180[0x280];
+};
+
+struct mlx5_ifc_create_virtio_q_counters_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_virtio_q_counters_bits virtio_q_counters;
+};
+
+struct mlx5_ifc_create_virtio_q_counters_out_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_virtio_q_counters_bits virtio_q_counters;
+};
+
+struct mlx5_ifc_destroy_virtio_q_counters_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+};
+
+struct mlx5_ifc_destroy_virtio_q_counters_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits hdr;
+};
+
+struct mlx5_ifc_query_virtio_q_counters_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+};
+
+struct mlx5_ifc_query_virtio_q_counters_out_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_virtio_q_counters_bits counters;
+};
+
#endif /* __MLX5_IFC_VDPA_H_ */
diff --git a/include/linux/mlx5/mpfs.h b/include/linux/mlx5/mpfs.h
new file mode 100644
index 000000000000..bf700c8d5516
--- /dev/null
+++ b/include/linux/mlx5/mpfs.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+ * Copyright (c) 2021 Mellanox Technologies Ltd.
+ */
+
+#ifndef _MLX5_MPFS_
+#define _MLX5_MPFS_
+
+struct mlx5_core_dev;
+
+#ifdef CONFIG_MLX5_MPFS
+int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac);
+int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac);
+#else /* #ifndef CONFIG_MLX5_MPFS */
+static inline int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
+static inline int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
+#endif
+
+#endif
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index 77ea4f9c5265..1df9d9a57bbc 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -56,22 +56,11 @@ enum mlx5_an_status {
MLX5_AN_LINK_DOWN = 4,
};
-#define MLX5_EEPROM_MAX_BYTES 32
-#define MLX5_EEPROM_IDENTIFIER_BYTE_MASK 0x000000ff
#define MLX5_I2C_ADDR_LOW 0x50
#define MLX5_I2C_ADDR_HIGH 0x51
#define MLX5_EEPROM_PAGE_LENGTH 256
#define MLX5_EEPROM_HIGH_PAGE_LENGTH 128
-struct mlx5_module_eeprom_query_params {
- u16 size;
- u16 offset;
- u16 i2c_address;
- u32 page;
- u32 bank;
- u32 module_number;
-};
-
enum mlx5e_link_mode {
MLX5E_1000BASE_CX_SGMII = 0,
MLX5E_1000BASE_KX = 1,
@@ -117,8 +106,13 @@ enum mlx5e_ext_link_mode {
MLX5E_100GAUI_1_100GBASE_CR_KR = 11,
MLX5E_200GAUI_4_200GBASE_CR4_KR4 = 12,
MLX5E_200GAUI_2_200GBASE_CR2_KR2 = 13,
- MLX5E_400GAUI_8 = 15,
+ MLX5E_200GAUI_1_200GBASE_CR1_KR1 = 14,
+ MLX5E_400GAUI_8_400GBASE_CR8 = 15,
MLX5E_400GAUI_4_400GBASE_CR4_KR4 = 16,
+ MLX5E_400GAUI_2_400GBASE_CR2_KR2 = 17,
+ MLX5E_800GAUI_8_800GBASE_CR8_KR8 = 19,
+ MLX5E_800GAUI_4_800GBASE_CR4_KR4 = 20,
+ MLX5E_1600TAUI_8_1600TBASE_CR8_KR8 = 23,
MLX5E_EXT_LINK_MODES_NUMBER,
};
@@ -143,25 +137,19 @@ enum mlx5_ptys_width {
MLX5_PTYS_WIDTH_12X = 1 << 4,
};
-#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
+#define MLX5E_PROT_MASK(link_mode) (1U << link_mode)
#define MLX5_GET_ETH_PROTO(reg, out, ext, field) \
(ext ? MLX5_GET(reg, out, ext_##field) : \
MLX5_GET(reg, out, field))
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
- int ptys_size, int proto_mask, u8 local_port);
+ int ptys_size, int proto_mask,
+ u8 local_port, u8 plane_index);
int mlx5_query_ib_port_oper(struct mlx5_core_dev *dev, u16 *link_width_oper,
- u16 *proto_oper, u8 local_port);
-void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
-int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
- enum mlx5_port_status status);
-int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
- enum mlx5_port_status *status);
-int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration);
-
-int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
+ u16 *proto_oper, u8 local_port, u8 plane_index);
+
void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port);
void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
u8 port);
@@ -169,55 +157,4 @@ void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
u8 *vl_hw_cap, u8 local_port);
-int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
-int mlx5_query_port_pause(struct mlx5_core_dev *dev,
- u32 *rx_pause, u32 *tx_pause);
-
-int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx);
-int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx,
- u8 *pfc_en_rx);
-
-int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev,
- u16 stall_critical_watermark,
- u16 stall_minor_watermark);
-int mlx5_query_port_stall_watermark(struct mlx5_core_dev *dev,
- u16 *stall_critical_watermark, u16 *stall_minor_watermark);
-
-int mlx5_max_tc(struct mlx5_core_dev *mdev);
-
-int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
-int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
- u8 prio, u8 *tc);
-int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
-int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
- u8 tc, u8 *tc_group);
-int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
-int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
- u8 tc, u8 *bw_pct);
-int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
- u8 *max_bw_value,
- u8 *max_bw_unit);
-int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
- u8 *max_bw_value,
- u8 *max_bw_unit);
-int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode);
-int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode);
-
-int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen);
-int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen);
-int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable);
-void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
- bool *enabled);
-int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
- u16 offset, u16 size, u8 *data);
-int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
- struct mlx5_module_eeprom_query_params *params, u8 *data);
-
-int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out);
-int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in);
-
-int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state);
-int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state);
-int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio);
-int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio);
#endif /* __MLX5_PORT_H__ */
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index b7deb790f257..d67aedc6ea68 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -36,7 +36,7 @@
#include <linux/mlx5/device.h>
#include <linux/mlx5/driver.h>
-#define MLX5_INVALID_LKEY 0x100
+#define MLX5_TERMINATE_SCATTER_LIST_LKEY cpu_to_be32(0x100)
/* UMR (3 WQE_BB's) + SIG (3 WQE_BB's) + PSV (mem) + PSV (wire) */
#define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 8)
#define MLX5_DIF_SIZE 8
@@ -149,6 +149,7 @@ enum {
MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2,
MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE = 3 << 2,
MLX5_WQE_CTRL_SOLICITED = 1 << 1,
+ MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE = 1 << 5,
};
enum {
@@ -162,6 +163,8 @@ enum {
MLX5_SEND_WQE_MAX_WQEBBS = 16,
};
+#define MLX5_SEND_WQE_MAX_SIZE (MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQE_BB)
+
enum {
MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
@@ -202,6 +205,9 @@ struct mlx5_wqe_fmr_seg {
struct mlx5_wqe_ctrl_seg {
__be32 opmod_idx_opcode;
__be32 qpn_ds;
+
+ struct_group(trailer,
+
u8 signature;
u8 rsvd[2];
u8 fm_ce_se;
@@ -211,6 +217,8 @@ struct mlx5_wqe_ctrl_seg {
__be32 umr_mkey;
__be32 tis_tir_num;
};
+
+ ); /* end of trailer group */
};
#define MLX5_WQE_CTRL_DS_MASK 0x3f
@@ -229,13 +237,11 @@ enum {
};
enum {
- MLX5_ETH_WQE_SVLAN = 1 << 0,
MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC = 1 << 26,
MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC = 1 << 27,
MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC = 3 << 26,
MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC = 1 << 28,
MLX5_ETH_WQE_INSERT_TRAILER = 1 << 30,
- MLX5_ETH_WQE_INSERT_VLAN = 1 << 15,
};
enum {
@@ -245,8 +251,15 @@ enum {
MLX5_ETH_WQE_SWP_OUTER_L4_UDP = 1 << 5,
};
+/* Metadata bits 0-7 are used by timestamping */
+/* Base shift for metadata bits used by IPsec and MACsec */
+#define MLX5_ETH_WQE_FT_META_SHIFT 8
+
enum {
- MLX5_ETH_WQE_FT_META_IPSEC = BIT(0),
+ MLX5_ETH_WQE_FT_META_IPSEC = BIT(0) << MLX5_ETH_WQE_FT_META_SHIFT,
+ MLX5_ETH_WQE_FT_META_MACSEC = BIT(1) << MLX5_ETH_WQE_FT_META_SHIFT,
+ MLX5_ETH_WQE_FT_META_MACSEC_FS_ID_MASK =
+ GENMASK(5, 2) << MLX5_ETH_WQE_FT_META_SHIFT,
};
struct mlx5_wqe_eth_seg {
@@ -261,12 +274,11 @@ struct mlx5_wqe_eth_seg {
union {
struct {
__be16 sz;
- u8 start[2];
+ union {
+ u8 start[2];
+ DECLARE_FLEX_ARRAY(u8, data);
+ };
} inline_hdr;
- struct {
- __be16 type;
- __be16 vlan_tci;
- } insert;
__be32 trailer;
};
};
@@ -470,6 +482,12 @@ struct mlx5_klm {
__be64 va;
};
+struct mlx5_ksm {
+ __be32 reserved;
+ __be32 key;
+ __be64 va;
+};
+
struct mlx5_stride_block_entry {
__be16 stride;
__be16 bcount;
@@ -485,6 +503,16 @@ struct mlx5_stride_block_ctrl_seg {
__be16 num_entries;
};
+struct mlx5_wqe_flow_update_ctrl_seg {
+ __be32 flow_idx_update;
+ __be32 dest_handle;
+ u8 reserved0[40];
+};
+
+struct mlx5_wqe_header_modify_argument_update_seg {
+ u8 argument_list[64];
+};
+
struct mlx5_core_qp {
struct mlx5_core_rsc_common common; /* must be first */
void (*event) (struct mlx5_core_qp *, int);
@@ -549,9 +577,12 @@ static inline const char *mlx5_qp_state_str(int state)
static inline int mlx5_get_qp_default_ts(struct mlx5_core_dev *dev)
{
- return !MLX5_CAP_ROCE(dev, qp_ts_format) ?
- MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING :
- MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT;
+ u8 supported_ts_cap = mlx5_get_roce_state(dev) ?
+ MLX5_CAP_ROCE(dev, qp_ts_format) :
+ MLX5_CAP_GEN(dev, sq_ts_format);
+
+ return supported_ts_cap ? MLX5_TIMESTAMP_FORMAT_DEFAULT :
+ MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
}
#endif /* MLX5_QP_H */
diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h
index 028f442530cf..60ffeb6b67ae 100644
--- a/include/linux/mlx5/transobj.h
+++ b/include/linux/mlx5/transobj.h
@@ -85,4 +85,5 @@ mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev,
struct mlx5_hairpin_params *params);
void mlx5_core_hairpin_destroy(struct mlx5_hairpin *pair);
+void mlx5_core_hairpin_clear_dead_peer(struct mlx5_hairpin *hp);
#endif /* __TRANSOBJ_H__ */
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index aad53cb72f17..f876bfc0669c 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -72,7 +72,9 @@ int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
u64 *system_image_guid);
-int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
+int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group);
+int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev,
+ u16 vport, bool other_vport, u64 *node_guid);
int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
u16 vport, u64 node_guid);
int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
@@ -132,4 +134,8 @@ int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev);
u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev);
+int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 vport, void *out,
+ u16 opmod);
+int mlx5_vport_get_vhca_id(struct mlx5_core_dev *dev, u16 vport, u16 *vhca_id);
+
#endif /* __MLX5_VPORT_H__ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c274f75efcf9..7a1819c20643 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -3,17 +3,16 @@
#define _LINUX_MM_H
#include <linux/errno.h>
-
-#ifdef __KERNEL__
-
#include <linux/mmdebug.h>
#include <linux/gfp.h>
+#include <linux/pgalloc_tag.h>
#include <linux/bug.h>
#include <linux/list.h>
#include <linux/mmzone.h>
#include <linux/rbtree.h>
#include <linux/atomic.h>
#include <linux/debug_locks.h>
+#include <linux/compiler.h>
#include <linux/mm_types.h>
#include <linux/mmap_lock.h>
#include <linux/range.h>
@@ -26,37 +25,29 @@
#include <linux/err.h>
#include <linux/page-flags.h>
#include <linux/page_ref.h>
-#include <linux/memremap.h>
#include <linux/overflow.h>
#include <linux/sizes.h>
#include <linux/sched.h>
#include <linux/pgtable.h>
#include <linux/kasan.h>
+#include <linux/memremap.h>
+#include <linux/slab.h>
+#include <linux/cacheinfo.h>
+#include <linux/rcuwait.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
struct mempolicy;
struct anon_vma;
struct anon_vma_chain;
-struct file_ra_state;
struct user_struct;
-struct writeback_control;
-struct bdi_writeback;
struct pt_regs;
+struct folio_batch;
-extern int sysctl_page_lock_unfairness;
-
+void arch_mm_preinit(void);
+void mm_core_init(void);
void init_mm_internals(void);
-#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */
-extern unsigned long max_mapnr;
-
-static inline void set_max_mapnr(unsigned long limit)
-{
- max_mapnr = limit;
-}
-#else
-static inline void set_max_mapnr(unsigned long limit) { }
-#endif
-
extern atomic_long_t _totalram_pages;
static inline unsigned long totalram_pages(void)
{
@@ -79,7 +70,15 @@ static inline void totalram_pages_add(long count)
}
extern void * high_memory;
-extern int page_cluster;
+
+/*
+ * Convert between pages and MB
+ * 20 is the shift for 1MB (2^20 = 1MB)
+ * PAGE_SHIFT is the shift for page size (e.g., 12 for 4KB pages)
+ * So (20 - PAGE_SHIFT) converts between pages and MB
+ */
+#define PAGES_TO_MB(pages) ((pages) >> (20 - PAGE_SHIFT))
+#define MB_TO_PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
#ifdef CONFIG_SYSCTL
extern int sysctl_legacy_va_layout;
@@ -89,7 +88,7 @@ extern int sysctl_legacy_va_layout;
#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
extern const int mmap_rnd_bits_min;
-extern const int mmap_rnd_bits_max;
+extern int mmap_rnd_bits_max __ro_after_init;
extern int mmap_rnd_bits __read_mostly;
#endif
#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
@@ -98,20 +97,19 @@ extern const int mmap_rnd_compat_bits_max;
extern int mmap_rnd_compat_bits __read_mostly;
#endif
+#ifndef DIRECT_MAP_PHYSMEM_END
+# ifdef MAX_PHYSMEM_BITS
+# define DIRECT_MAP_PHYSMEM_END ((1ULL << MAX_PHYSMEM_BITS) - 1)
+# else
+# define DIRECT_MAP_PHYSMEM_END (((phys_addr_t)-1)&~(1ULL<<63))
+# endif
+#endif
+
+#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
+
#include <asm/page.h>
#include <asm/processor.h>
-/*
- * Architectures that support memory tagging (assigning tags to memory regions,
- * embedding these tags into addresses that point to these memory regions, and
- * checking that the memory and the pointer tags match on memory accesses)
- * redefine this macro to strip tags from pointers.
- * It's defined as noop for architectures that don't support memory tagging.
- */
-#ifndef untagged_addr
-#define untagged_addr(addr) (addr)
-#endif
-
#ifndef __pa_symbol
#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
#endif
@@ -125,16 +123,6 @@ extern int mmap_rnd_compat_bits __read_mostly;
#endif
/*
- * With CONFIG_CFI_CLANG, the compiler replaces function addresses in
- * instrumented C code with jump table addresses. Architectures that
- * support CFI can define this macro to return the actual function address
- * when needed.
- */
-#ifndef function_nocfi
-#define function_nocfi(x) (x)
-#endif
-
-/*
* To prevent common memory management code establishing
* a zero page mapping on a read fault.
* This macro should be defined within <asm/pgtable.h>.
@@ -152,10 +140,10 @@ extern int mmap_rnd_compat_bits __read_mostly;
* define their own version of this macro in <asm/pgtable.h>
*/
#if BITS_PER_LONG == 64
-/* This function must be updated when the size of struct page grows above 80
+/* This function must be updated when the size of struct page grows above 96
* or reduces below 56. The idea that compiler optimizes out switch()
* statement, and only leaves move/store instructions. Also the compiler can
- * combine write statments if they are both assignments and can be reordered,
+ * combine write statements if they are both assignments and can be reordered,
* this can result in several of the writes here being dropped.
*/
#define mm_zero_struct_page(pp) __mm_zero_struct_page(pp)
@@ -163,12 +151,18 @@ static inline void __mm_zero_struct_page(struct page *page)
{
unsigned long *_pp = (void *)page;
- /* Check that struct page is either 56, 64, 72, or 80 bytes */
+ /* Check that struct page is either 56, 64, 72, 80, 88 or 96 bytes */
BUILD_BUG_ON(sizeof(struct page) & 7);
BUILD_BUG_ON(sizeof(struct page) < 56);
- BUILD_BUG_ON(sizeof(struct page) > 80);
+ BUILD_BUG_ON(sizeof(struct page) > 96);
switch (sizeof(struct page)) {
+ case 96:
+ _pp[11] = 0;
+ fallthrough;
+ case 88:
+ _pp[10] = 0;
+ fallthrough;
case 80:
_pp[9] = 0;
fallthrough;
@@ -216,33 +210,46 @@ extern int sysctl_max_map_count;
extern unsigned long sysctl_user_reserve_kbytes;
extern unsigned long sysctl_admin_reserve_kbytes;
-extern int sysctl_overcommit_memory;
-extern int sysctl_overcommit_ratio;
-extern unsigned long sysctl_overcommit_kbytes;
-
-int overcommit_ratio_handler(struct ctl_table *, int, void *, size_t *,
- loff_t *);
-int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *,
- loff_t *);
-int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *,
- loff_t *);
-/*
- * Any attempt to mark this function as static leads to build failure
- * when CONFIG_DEBUG_INFO_BTF is enabled because __add_to_page_cache_locked()
- * is referred to by BPF code. This must be visible for error injection.
- */
-int __add_to_page_cache_locked(struct page *page, struct address_space *mapping,
- pgoff_t index, gfp_t gfp, void **shadowp);
-
-#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
+#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
+bool page_range_contiguous(const struct page *page, unsigned long nr_pages);
+#else
+static inline bool page_range_contiguous(const struct page *page,
+ unsigned long nr_pages)
+{
+ return true;
+}
+#endif
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
+/* to align the pointer to the (prev) page boundary */
+#define PAGE_ALIGN_DOWN(addr) ALIGN_DOWN(addr, PAGE_SIZE)
+
/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
-#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
+/**
+ * folio_page_idx - Return the number of a page in a folio.
+ * @folio: The folio.
+ * @page: The folio page.
+ *
+ * This function expects that the page is actually part of the folio.
+ * The returned number is relative to the start of the folio.
+ */
+static inline unsigned long folio_page_idx(const struct folio *folio,
+ const struct page *page)
+{
+ return page - &folio->page;
+}
+
+static inline struct folio *lru_to_folio(struct list_head *head)
+{
+ return list_entry((head)->prev, struct folio, lru);
+}
+
+void setup_initial_init_mm(void *start_code, void *end_code,
+ void *end_data, void *brk);
/*
* Linux kernel virtual memory manager primitives.
@@ -268,119 +275,239 @@ extern unsigned int kobjsize(const void *objp);
* vm_flags in vm_area_struct, see mm_types.h.
* When changing, update also include/trace/events/mmflags.h
*/
+
#define VM_NONE 0x00000000
-#define VM_READ 0x00000001 /* currently active flags */
-#define VM_WRITE 0x00000002
-#define VM_EXEC 0x00000004
-#define VM_SHARED 0x00000008
-
-/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
-#define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */
-#define VM_MAYWRITE 0x00000020
-#define VM_MAYEXEC 0x00000040
-#define VM_MAYSHARE 0x00000080
-
-#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
-#define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */
-#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
-#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
-#define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */
-
-#define VM_LOCKED 0x00002000
-#define VM_IO 0x00004000 /* Memory mapped I/O or similar */
-
- /* Used by sys_madvise() */
-#define VM_SEQ_READ 0x00008000 /* App will access data sequentially */
-#define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */
-
-#define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
-#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
-#define VM_LOCKONFAULT 0x00080000 /* Lock the pages covered when they are faulted in */
-#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
-#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
-#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
-#define VM_SYNC 0x00800000 /* Synchronous page faults */
-#define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
-#define VM_WIPEONFORK 0x02000000 /* Wipe VMA contents in child. */
-#define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
+/**
+ * typedef vma_flag_t - specifies an individual VMA flag by bit number.
+ *
+ * This value is made type safe by sparse to avoid passing invalid flag values
+ * around.
+ */
+typedef int __bitwise vma_flag_t;
+#define DECLARE_VMA_BIT(name, bitnum) \
+ VMA_ ## name ## _BIT = ((__force vma_flag_t)bitnum)
+#define DECLARE_VMA_BIT_ALIAS(name, aliased) \
+ VMA_ ## name ## _BIT = (VMA_ ## aliased ## _BIT)
+enum {
+ DECLARE_VMA_BIT(READ, 0),
+ DECLARE_VMA_BIT(WRITE, 1),
+ DECLARE_VMA_BIT(EXEC, 2),
+ DECLARE_VMA_BIT(SHARED, 3),
+ /* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
+ DECLARE_VMA_BIT(MAYREAD, 4), /* limits for mprotect() etc. */
+ DECLARE_VMA_BIT(MAYWRITE, 5),
+ DECLARE_VMA_BIT(MAYEXEC, 6),
+ DECLARE_VMA_BIT(MAYSHARE, 7),
+ DECLARE_VMA_BIT(GROWSDOWN, 8), /* general info on the segment */
+#ifdef CONFIG_MMU
+ DECLARE_VMA_BIT(UFFD_MISSING, 9),/* missing pages tracking */
+#else
+ /* nommu: R/O MAP_PRIVATE mapping that might overlay a file mapping */
+ DECLARE_VMA_BIT(MAYOVERLAY, 9),
+#endif /* CONFIG_MMU */
+ /* Page-ranges managed without "struct page", just pure PFN */
+ DECLARE_VMA_BIT(PFNMAP, 10),
+ DECLARE_VMA_BIT(MAYBE_GUARD, 11),
+ DECLARE_VMA_BIT(UFFD_WP, 12), /* wrprotect pages tracking */
+ DECLARE_VMA_BIT(LOCKED, 13),
+ DECLARE_VMA_BIT(IO, 14), /* Memory mapped I/O or similar */
+ DECLARE_VMA_BIT(SEQ_READ, 15), /* App will access data sequentially */
+ DECLARE_VMA_BIT(RAND_READ, 16), /* App will not benefit from clustered reads */
+ DECLARE_VMA_BIT(DONTCOPY, 17), /* Do not copy this vma on fork */
+ DECLARE_VMA_BIT(DONTEXPAND, 18),/* Cannot expand with mremap() */
+ DECLARE_VMA_BIT(LOCKONFAULT, 19),/* Lock pages covered when faulted in */
+ DECLARE_VMA_BIT(ACCOUNT, 20), /* Is a VM accounted object */
+ DECLARE_VMA_BIT(NORESERVE, 21), /* should the VM suppress accounting */
+ DECLARE_VMA_BIT(HUGETLB, 22), /* Huge TLB Page VM */
+ DECLARE_VMA_BIT(SYNC, 23), /* Synchronous page faults */
+ DECLARE_VMA_BIT(ARCH_1, 24), /* Architecture-specific flag */
+ DECLARE_VMA_BIT(WIPEONFORK, 25),/* Wipe VMA contents in child. */
+ DECLARE_VMA_BIT(DONTDUMP, 26), /* Do not include in the core dump */
+ DECLARE_VMA_BIT(SOFTDIRTY, 27), /* NOT soft dirty clean area */
+ DECLARE_VMA_BIT(MIXEDMAP, 28), /* Can contain struct page and pure PFN pages */
+ DECLARE_VMA_BIT(HUGEPAGE, 29), /* MADV_HUGEPAGE marked this vma */
+ DECLARE_VMA_BIT(NOHUGEPAGE, 30),/* MADV_NOHUGEPAGE marked this vma */
+ DECLARE_VMA_BIT(MERGEABLE, 31), /* KSM may merge identical pages */
+ /* These bits are reused, we define specific uses below. */
+ DECLARE_VMA_BIT(HIGH_ARCH_0, 32),
+ DECLARE_VMA_BIT(HIGH_ARCH_1, 33),
+ DECLARE_VMA_BIT(HIGH_ARCH_2, 34),
+ DECLARE_VMA_BIT(HIGH_ARCH_3, 35),
+ DECLARE_VMA_BIT(HIGH_ARCH_4, 36),
+ DECLARE_VMA_BIT(HIGH_ARCH_5, 37),
+ DECLARE_VMA_BIT(HIGH_ARCH_6, 38),
+ /*
+ * This flag is used to connect VFIO to arch specific KVM code. It
+ * indicates that the memory under this VMA is safe for use with any
+ * non-cachable memory type inside KVM. Some VFIO devices, on some
+ * platforms, are thought to be unsafe and can cause machine crashes
+ * if KVM does not lock down the memory type.
+ */
+ DECLARE_VMA_BIT(ALLOW_ANY_UNCACHED, 39),
+#ifdef CONFIG_PPC32
+ DECLARE_VMA_BIT_ALIAS(DROPPABLE, ARCH_1),
+#else
+ DECLARE_VMA_BIT(DROPPABLE, 40),
+#endif
+ DECLARE_VMA_BIT(UFFD_MINOR, 41),
+ DECLARE_VMA_BIT(SEALED, 42),
+ /* Flags that reuse flags above. */
+ DECLARE_VMA_BIT_ALIAS(PKEY_BIT0, HIGH_ARCH_0),
+ DECLARE_VMA_BIT_ALIAS(PKEY_BIT1, HIGH_ARCH_1),
+ DECLARE_VMA_BIT_ALIAS(PKEY_BIT2, HIGH_ARCH_2),
+ DECLARE_VMA_BIT_ALIAS(PKEY_BIT3, HIGH_ARCH_3),
+ DECLARE_VMA_BIT_ALIAS(PKEY_BIT4, HIGH_ARCH_4),
+#if defined(CONFIG_X86_USER_SHADOW_STACK)
+ /*
+ * VM_SHADOW_STACK should not be set with VM_SHARED because of lack of
+ * support core mm.
+ *
+ * These VMAs will get a single end guard page. This helps userspace
+ * protect itself from attacks. A single page is enough for current
+ * shadow stack archs (x86). See the comments near alloc_shstk() in
+ * arch/x86/kernel/shstk.c for more details on the guard size.
+ */
+ DECLARE_VMA_BIT_ALIAS(SHADOW_STACK, HIGH_ARCH_5),
+#elif defined(CONFIG_ARM64_GCS)
+ /*
+ * arm64's Guarded Control Stack implements similar functionality and
+ * has similar constraints to shadow stacks.
+ */
+ DECLARE_VMA_BIT_ALIAS(SHADOW_STACK, HIGH_ARCH_6),
+#endif
+ DECLARE_VMA_BIT_ALIAS(SAO, ARCH_1), /* Strong Access Ordering (powerpc) */
+ DECLARE_VMA_BIT_ALIAS(GROWSUP, ARCH_1), /* parisc */
+ DECLARE_VMA_BIT_ALIAS(SPARC_ADI, ARCH_1), /* sparc64 */
+ DECLARE_VMA_BIT_ALIAS(ARM64_BTI, ARCH_1), /* arm64 */
+ DECLARE_VMA_BIT_ALIAS(ARCH_CLEAR, ARCH_1), /* sparc64, arm64 */
+ DECLARE_VMA_BIT_ALIAS(MAPPED_COPY, ARCH_1), /* !CONFIG_MMU */
+ DECLARE_VMA_BIT_ALIAS(MTE, HIGH_ARCH_4), /* arm64 */
+ DECLARE_VMA_BIT_ALIAS(MTE_ALLOWED, HIGH_ARCH_5),/* arm64 */
+#ifdef CONFIG_STACK_GROWSUP
+ DECLARE_VMA_BIT_ALIAS(STACK, GROWSUP),
+ DECLARE_VMA_BIT_ALIAS(STACK_EARLY, GROWSDOWN),
+#else
+ DECLARE_VMA_BIT_ALIAS(STACK, GROWSDOWN),
+#endif
+};
+#undef DECLARE_VMA_BIT
+#undef DECLARE_VMA_BIT_ALIAS
+
+#define INIT_VM_FLAG(name) BIT((__force int) VMA_ ## name ## _BIT)
+#define VM_READ INIT_VM_FLAG(READ)
+#define VM_WRITE INIT_VM_FLAG(WRITE)
+#define VM_EXEC INIT_VM_FLAG(EXEC)
+#define VM_SHARED INIT_VM_FLAG(SHARED)
+#define VM_MAYREAD INIT_VM_FLAG(MAYREAD)
+#define VM_MAYWRITE INIT_VM_FLAG(MAYWRITE)
+#define VM_MAYEXEC INIT_VM_FLAG(MAYEXEC)
+#define VM_MAYSHARE INIT_VM_FLAG(MAYSHARE)
+#define VM_GROWSDOWN INIT_VM_FLAG(GROWSDOWN)
+#ifdef CONFIG_MMU
+#define VM_UFFD_MISSING INIT_VM_FLAG(UFFD_MISSING)
+#else
+#define VM_UFFD_MISSING VM_NONE
+#define VM_MAYOVERLAY INIT_VM_FLAG(MAYOVERLAY)
+#endif
+#define VM_PFNMAP INIT_VM_FLAG(PFNMAP)
+#define VM_MAYBE_GUARD INIT_VM_FLAG(MAYBE_GUARD)
+#define VM_UFFD_WP INIT_VM_FLAG(UFFD_WP)
+#define VM_LOCKED INIT_VM_FLAG(LOCKED)
+#define VM_IO INIT_VM_FLAG(IO)
+#define VM_SEQ_READ INIT_VM_FLAG(SEQ_READ)
+#define VM_RAND_READ INIT_VM_FLAG(RAND_READ)
+#define VM_DONTCOPY INIT_VM_FLAG(DONTCOPY)
+#define VM_DONTEXPAND INIT_VM_FLAG(DONTEXPAND)
+#define VM_LOCKONFAULT INIT_VM_FLAG(LOCKONFAULT)
+#define VM_ACCOUNT INIT_VM_FLAG(ACCOUNT)
+#define VM_NORESERVE INIT_VM_FLAG(NORESERVE)
+#define VM_HUGETLB INIT_VM_FLAG(HUGETLB)
+#define VM_SYNC INIT_VM_FLAG(SYNC)
+#define VM_ARCH_1 INIT_VM_FLAG(ARCH_1)
+#define VM_WIPEONFORK INIT_VM_FLAG(WIPEONFORK)
+#define VM_DONTDUMP INIT_VM_FLAG(DONTDUMP)
#ifdef CONFIG_MEM_SOFT_DIRTY
-# define VM_SOFTDIRTY 0x08000000 /* Not soft dirty clean area */
+#define VM_SOFTDIRTY INIT_VM_FLAG(SOFTDIRTY)
#else
-# define VM_SOFTDIRTY 0
-#endif
-
-#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
-#define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */
-#define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */
-#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
-
-#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
-#define VM_HIGH_ARCH_BIT_0 32 /* bit only usable on 64-bit architectures */
-#define VM_HIGH_ARCH_BIT_1 33 /* bit only usable on 64-bit architectures */
-#define VM_HIGH_ARCH_BIT_2 34 /* bit only usable on 64-bit architectures */
-#define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */
-#define VM_HIGH_ARCH_BIT_4 36 /* bit only usable on 64-bit architectures */
-#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0)
-#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1)
-#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2)
-#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3)
-#define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4)
-#endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */
-
-#ifdef CONFIG_ARCH_HAS_PKEYS
-# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
-# define VM_PKEY_BIT0 VM_HIGH_ARCH_0 /* A protection key is a 4-bit value */
-# define VM_PKEY_BIT1 VM_HIGH_ARCH_1 /* on x86 and 5-bit value on ppc64 */
-# define VM_PKEY_BIT2 VM_HIGH_ARCH_2
-# define VM_PKEY_BIT3 VM_HIGH_ARCH_3
-#ifdef CONFIG_PPC
-# define VM_PKEY_BIT4 VM_HIGH_ARCH_4
+#define VM_SOFTDIRTY VM_NONE
+#endif
+#define VM_MIXEDMAP INIT_VM_FLAG(MIXEDMAP)
+#define VM_HUGEPAGE INIT_VM_FLAG(HUGEPAGE)
+#define VM_NOHUGEPAGE INIT_VM_FLAG(NOHUGEPAGE)
+#define VM_MERGEABLE INIT_VM_FLAG(MERGEABLE)
+#define VM_STACK INIT_VM_FLAG(STACK)
+#ifdef CONFIG_STACK_GROWS_UP
+#define VM_STACK_EARLY INIT_VM_FLAG(STACK_EARLY)
#else
-# define VM_PKEY_BIT4 0
+#define VM_STACK_EARLY VM_NONE
#endif
+#ifdef CONFIG_ARCH_HAS_PKEYS
+#define VM_PKEY_SHIFT ((__force int)VMA_HIGH_ARCH_0_BIT)
+/* Despite the naming, these are FLAGS not bits. */
+#define VM_PKEY_BIT0 INIT_VM_FLAG(PKEY_BIT0)
+#define VM_PKEY_BIT1 INIT_VM_FLAG(PKEY_BIT1)
+#define VM_PKEY_BIT2 INIT_VM_FLAG(PKEY_BIT2)
+#if CONFIG_ARCH_PKEY_BITS > 3
+#define VM_PKEY_BIT3 INIT_VM_FLAG(PKEY_BIT3)
+#else
+#define VM_PKEY_BIT3 VM_NONE
+#endif /* CONFIG_ARCH_PKEY_BITS > 3 */
+#if CONFIG_ARCH_PKEY_BITS > 4
+#define VM_PKEY_BIT4 INIT_VM_FLAG(PKEY_BIT4)
+#else
+#define VM_PKEY_BIT4 VM_NONE
+#endif /* CONFIG_ARCH_PKEY_BITS > 4 */
#endif /* CONFIG_ARCH_HAS_PKEYS */
-
-#if defined(CONFIG_X86)
-# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */
-#elif defined(CONFIG_PPC)
-# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */
+#if defined(CONFIG_X86_USER_SHADOW_STACK) || defined(CONFIG_ARM64_GCS)
+#define VM_SHADOW_STACK INIT_VM_FLAG(SHADOW_STACK)
+#else
+#define VM_SHADOW_STACK VM_NONE
+#endif
+#if defined(CONFIG_PPC64)
+#define VM_SAO INIT_VM_FLAG(SAO)
#elif defined(CONFIG_PARISC)
-# define VM_GROWSUP VM_ARCH_1
-#elif defined(CONFIG_IA64)
-# define VM_GROWSUP VM_ARCH_1
+#define VM_GROWSUP INIT_VM_FLAG(GROWSUP)
#elif defined(CONFIG_SPARC64)
-# define VM_SPARC_ADI VM_ARCH_1 /* Uses ADI tag for access control */
-# define VM_ARCH_CLEAR VM_SPARC_ADI
+#define VM_SPARC_ADI INIT_VM_FLAG(SPARC_ADI)
+#define VM_ARCH_CLEAR INIT_VM_FLAG(ARCH_CLEAR)
#elif defined(CONFIG_ARM64)
-# define VM_ARM64_BTI VM_ARCH_1 /* BTI guarded page, a.k.a. GP bit */
-# define VM_ARCH_CLEAR VM_ARM64_BTI
+#define VM_ARM64_BTI INIT_VM_FLAG(ARM64_BTI)
+#define VM_ARCH_CLEAR INIT_VM_FLAG(ARCH_CLEAR)
#elif !defined(CONFIG_MMU)
-# define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */
-#endif
-
-#if defined(CONFIG_ARM64_MTE)
-# define VM_MTE VM_HIGH_ARCH_0 /* Use Tagged memory for access control */
-# define VM_MTE_ALLOWED VM_HIGH_ARCH_1 /* Tagged memory permitted */
-#else
-# define VM_MTE VM_NONE
-# define VM_MTE_ALLOWED VM_NONE
+#define VM_MAPPED_COPY INIT_VM_FLAG(MAPPED_COPY)
#endif
-
#ifndef VM_GROWSUP
-# define VM_GROWSUP VM_NONE
+#define VM_GROWSUP VM_NONE
+#endif
+#ifdef CONFIG_ARM64_MTE
+#define VM_MTE INIT_VM_FLAG(MTE)
+#define VM_MTE_ALLOWED INIT_VM_FLAG(MTE_ALLOWED)
+#else
+#define VM_MTE VM_NONE
+#define VM_MTE_ALLOWED VM_NONE
#endif
-
#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
-# define VM_UFFD_MINOR_BIT 37
-# define VM_UFFD_MINOR BIT(VM_UFFD_MINOR_BIT) /* UFFD minor faults */
-#else /* !CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
-# define VM_UFFD_MINOR VM_NONE
-#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
+#define VM_UFFD_MINOR INIT_VM_FLAG(UFFD_MINOR)
+#else
+#define VM_UFFD_MINOR VM_NONE
+#endif
+#ifdef CONFIG_64BIT
+#define VM_ALLOW_ANY_UNCACHED INIT_VM_FLAG(ALLOW_ANY_UNCACHED)
+#define VM_SEALED INIT_VM_FLAG(SEALED)
+#else
+#define VM_ALLOW_ANY_UNCACHED VM_NONE
+#define VM_SEALED VM_NONE
+#endif
+#if defined(CONFIG_64BIT) || defined(CONFIG_PPC32)
+#define VM_DROPPABLE INIT_VM_FLAG(DROPPABLE)
+#else
+#define VM_DROPPABLE VM_NONE
+#endif
/* Bits set in the VMA until the stack is in its final location */
-#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
+#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)
#define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
@@ -400,10 +527,12 @@ extern unsigned int kobjsize(const void *objp);
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
#endif
-#ifdef CONFIG_STACK_GROWSUP
-#define VM_STACK VM_GROWSUP
+#define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK)
+
+#ifdef CONFIG_MSEAL_SYSTEM_MAPPINGS
+#define VM_SEALED_SYSMAP VM_SEALED
#else
-#define VM_STACK VM_GROWSDOWN
+#define VM_SEALED_SYSMAP VM_NONE
#endif
#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
@@ -411,77 +540,101 @@ extern unsigned int kobjsize(const void *objp);
/* VMA basic access permission flags */
#define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
-
/*
* Special vmas that are non-mergable, non-mlock()able.
*/
#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
+/*
+ * Physically remapped pages are special. Tell the
+ * rest of the world about it:
+ * VM_IO tells people not to look at these pages
+ * (accesses can have side effects).
+ * VM_PFNMAP tells the core MM that the base pages are just
+ * raw PFN mappings, and do not have a "struct page" associated
+ * with them.
+ * VM_DONTEXPAND
+ * Disable vma merging and expanding with mremap().
+ * VM_DONTDUMP
+ * Omit vma from core dump, even when VM_IO turned off.
+ */
+#define VM_REMAP_FLAGS (VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
+
/* This mask prevents VMA from being scanned with khugepaged */
#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
/* This mask defines which mm->def_flags a process can inherit its parent */
#define VM_INIT_DEF_MASK VM_NOHUGEPAGE
-/* This mask is used to clear all the VMA flags used by mlock */
-#define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT))
+/* This mask represents all the VMA flag bits used by mlock */
+#define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT)
+
+/* These flags can be updated atomically via VMA/mmap read lock. */
+#define VM_ATOMIC_SET_ALLOWED VM_MAYBE_GUARD
/* Arch-specific flags to clear when updating VM flags on protection change */
#ifndef VM_ARCH_CLEAR
-# define VM_ARCH_CLEAR VM_NONE
+#define VM_ARCH_CLEAR VM_NONE
#endif
#define VM_FLAGS_CLEAR (ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR)
/*
+ * Flags which should be 'sticky' on merge - that is, flags which, when one VMA
+ * possesses it but the other does not, the merged VMA should nonetheless have
+ * applied to it:
+ *
+ * VM_SOFTDIRTY - if a VMA is marked soft-dirty, that is has not had its
+ * references cleared via /proc/$pid/clear_refs, any merged VMA
+ * should be considered soft-dirty also as it operates at a VMA
+ * granularity.
+ *
+ * VM_MAYBE_GUARD - If a VMA may have guard regions in place it implies that
+ * mapped page tables may contain metadata not described by the
+ * VMA and thus any merged VMA may also contain this metadata,
+ * and thus we must make this flag sticky.
+ */
+#define VM_STICKY (VM_SOFTDIRTY | VM_MAYBE_GUARD)
+
+/*
+ * VMA flags we ignore for the purposes of merge, i.e. one VMA possessing one
+ * of these flags and the other not does not preclude a merge.
+ *
+ * VM_STICKY - When merging VMAs, VMA flags must match, unless they are
+ * 'sticky'. If any sticky flags exist in either VMA, we simply
+ * set all of them on the merged VMA.
+ */
+#define VM_IGNORE_MERGE VM_STICKY
+
+/*
+ * Flags which should result in page tables being copied on fork. These are
+ * flags which indicate that the VMA maps page tables which cannot be
+ * reconsistuted upon page fault, so necessitate page table copying upon
+ *
+ * VM_PFNMAP / VM_MIXEDMAP - These contain kernel-mapped data which cannot be
+ * reasonably reconstructed on page fault.
+ *
+ * VM_UFFD_WP - Encodes metadata about an installed uffd
+ * write protect handler, which cannot be
+ * reconstructed on page fault.
+ *
+ * We always copy pgtables when dst_vma has uffd-wp
+ * enabled even if it's file-backed
+ * (e.g. shmem). Because when uffd-wp is enabled,
+ * pgtable contains uffd-wp protection information,
+ * that's something we can't retrieve from page cache,
+ * and skip copying will lose those info.
+ *
+ * VM_MAYBE_GUARD - Could contain page guard region markers which
+ * by design are a property of the page tables
+ * only and thus cannot be reconstructed on page
+ * fault.
+ */
+#define VM_COPY_ON_FORK (VM_PFNMAP | VM_MIXEDMAP | VM_UFFD_WP | VM_MAYBE_GUARD)
+
+/*
* mapping from the currently active vm_flags protection bits (the
* low four bits) to a page protection mask..
*/
-extern pgprot_t protection_map[16];
-
-/**
- * enum fault_flag - Fault flag definitions.
- * @FAULT_FLAG_WRITE: Fault was a write fault.
- * @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE.
- * @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked.
- * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_lock and wait when retrying.
- * @FAULT_FLAG_KILLABLE: The fault task is in SIGKILL killable region.
- * @FAULT_FLAG_TRIED: The fault has been tried once.
- * @FAULT_FLAG_USER: The fault originated in userspace.
- * @FAULT_FLAG_REMOTE: The fault is not for current task/mm.
- * @FAULT_FLAG_INSTRUCTION: The fault was during an instruction fetch.
- * @FAULT_FLAG_INTERRUPTIBLE: The fault can be interrupted by non-fatal signals.
- *
- * About @FAULT_FLAG_ALLOW_RETRY and @FAULT_FLAG_TRIED: we can specify
- * whether we would allow page faults to retry by specifying these two
- * fault flags correctly. Currently there can be three legal combinations:
- *
- * (a) ALLOW_RETRY and !TRIED: this means the page fault allows retry, and
- * this is the first try
- *
- * (b) ALLOW_RETRY and TRIED: this means the page fault allows retry, and
- * we've already tried at least once
- *
- * (c) !ALLOW_RETRY and !TRIED: this means the page fault does not allow retry
- *
- * The unlisted combination (!ALLOW_RETRY && TRIED) is illegal and should never
- * be used. Note that page faults can be allowed to retry for multiple times,
- * in which case we'll have an initial fault with flags (a) then later on
- * continuous faults with flags (b). We should always try to detect pending
- * signals before a retry to make sure the continuous page faults can still be
- * interrupted if necessary.
- */
-enum fault_flag {
- FAULT_FLAG_WRITE = 1 << 0,
- FAULT_FLAG_MKWRITE = 1 << 1,
- FAULT_FLAG_ALLOW_RETRY = 1 << 2,
- FAULT_FLAG_RETRY_NOWAIT = 1 << 3,
- FAULT_FLAG_KILLABLE = 1 << 4,
- FAULT_FLAG_TRIED = 1 << 5,
- FAULT_FLAG_USER = 1 << 6,
- FAULT_FLAG_REMOTE = 1 << 7,
- FAULT_FLAG_INSTRUCTION = 1 << 8,
- FAULT_FLAG_INTERRUPTIBLE = 1 << 9,
-};
/*
* The default fault flags that should be used by most of the
@@ -520,7 +673,8 @@ static inline bool fault_flag_allow_retry_first(enum fault_flag flags)
{ FAULT_FLAG_USER, "USER" }, \
{ FAULT_FLAG_REMOTE, "REMOTE" }, \
{ FAULT_FLAG_INSTRUCTION, "INSTRUCTION" }, \
- { FAULT_FLAG_INTERRUPTIBLE, "INTERRUPTIBLE" }
+ { FAULT_FLAG_INTERRUPTIBLE, "INTERRUPTIBLE" }, \
+ { FAULT_FLAG_VMA_LOCK, "VMA_LOCK" }
/*
* vm_fault is filled by the pagefault handler and passed to the vma's
@@ -537,7 +691,8 @@ struct vm_fault {
struct vm_area_struct *vma; /* Target VMA */
gfp_t gfp_mask; /* gfp mask to be used for allocations */
pgoff_t pgoff; /* Logical page offset based on vma */
- unsigned long address; /* Faulting virtual address */
+ unsigned long address; /* Faulting virtual address - masked */
+ unsigned long real_address; /* Faulting virtual address - unmasked */
};
enum fault_flag flags; /* FAULT_FLAG_xxx flags
* XXX: should really be 'const' */
@@ -546,7 +701,12 @@ struct vm_fault {
pud_t *pud; /* Pointer to pud entry matching
* the 'address'
*/
- pte_t orig_pte; /* Value of PTE at the time of fault */
+ union {
+ pte_t orig_pte; /* Value of PTE at the time of fault */
+ pmd_t orig_pmd; /* Value of PMD at the time of fault,
+ * used by PMD fault only.
+ */
+ };
struct page *cow_page; /* Page handler may use for COW fault */
struct page *page; /* ->fault handlers should return a
@@ -572,13 +732,6 @@ struct vm_fault {
*/
};
-/* page entry size for vm->huge_fault() */
-enum page_entry_size {
- PE_SIZE_PTE = 0,
- PE_SIZE_PMD,
- PE_SIZE_PUD,
-};
-
/*
* These are the virtual MM functions - opening of an area, closing and
* unmapping it (needed to keep files on disk up-to-date etc), pointer
@@ -586,6 +739,10 @@ enum page_entry_size {
*/
struct vm_operations_struct {
void (*open)(struct vm_area_struct * area);
+ /**
+ * @close: Called when the VMA is being removed from the MM.
+ * Context: User context. May sleep. Caller holds mmap_lock.
+ */
void (*close)(struct vm_area_struct * area);
/* Called any time before splitting to check if it's allowed */
int (*may_split)(struct vm_area_struct *area, unsigned long addr);
@@ -593,13 +750,12 @@ struct vm_operations_struct {
/*
* Called by mprotect() to make driver-specific permission
* checks before mprotect() is finalised. The VMA must not
- * be modified. Returns 0 if eprotect() can proceed.
+ * be modified. Returns 0 if mprotect() can proceed.
*/
int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
unsigned long end, unsigned long newflags);
vm_fault_t (*fault)(struct vm_fault *vmf);
- vm_fault_t (*huge_fault)(struct vm_fault *vmf,
- enum page_entry_size pe_size);
+ vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
vm_fault_t (*map_pages)(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
unsigned long (*pagesize)(struct vm_area_struct * area);
@@ -644,25 +800,231 @@ struct vm_operations_struct {
* policy.
*/
struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
- unsigned long addr);
+ unsigned long addr, pgoff_t *ilx);
#endif
+#ifdef CONFIG_FIND_NORMAL_PAGE
/*
- * Called by vm_normal_page() for special PTEs to find the
- * page for @addr. This is useful if the default behavior
- * (using pte_page()) would not find the correct page.
+ * Called by vm_normal_page() for special PTEs in @vma at @addr. This
+ * allows for returning a "normal" page from vm_normal_page() even
+ * though the PTE indicates that the "struct page" either does not exist
+ * or should not be touched: "special".
+ *
+ * Do not add new users: this really only works when a "normal" page
+ * was mapped, but then the PTE got changed to something weird (+
+ * marked special) that would not make pte_pfn() identify the originally
+ * inserted page.
*/
- struct page *(*find_special_page)(struct vm_area_struct *vma,
- unsigned long addr);
+ struct page *(*find_normal_page)(struct vm_area_struct *vma,
+ unsigned long addr);
+#endif /* CONFIG_FIND_NORMAL_PAGE */
};
-static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
+#ifdef CONFIG_NUMA_BALANCING
+static inline void vma_numab_state_init(struct vm_area_struct *vma)
+{
+ vma->numab_state = NULL;
+}
+static inline void vma_numab_state_free(struct vm_area_struct *vma)
+{
+ kfree(vma->numab_state);
+}
+#else
+static inline void vma_numab_state_init(struct vm_area_struct *vma) {}
+static inline void vma_numab_state_free(struct vm_area_struct *vma) {}
+#endif /* CONFIG_NUMA_BALANCING */
+
+/*
+ * These must be here rather than mmap_lock.h as dependent on vm_fault type,
+ * declared in this header.
+ */
+#ifdef CONFIG_PER_VMA_LOCK
+static inline void release_fault_lock(struct vm_fault *vmf)
+{
+ if (vmf->flags & FAULT_FLAG_VMA_LOCK)
+ vma_end_read(vmf->vma);
+ else
+ mmap_read_unlock(vmf->vma->vm_mm);
+}
+
+static inline void assert_fault_locked(const struct vm_fault *vmf)
+{
+ if (vmf->flags & FAULT_FLAG_VMA_LOCK)
+ vma_assert_locked(vmf->vma);
+ else
+ mmap_assert_locked(vmf->vma->vm_mm);
+}
+#else
+static inline void release_fault_lock(struct vm_fault *vmf)
+{
+ mmap_read_unlock(vmf->vma->vm_mm);
+}
+
+static inline void assert_fault_locked(const struct vm_fault *vmf)
+{
+ mmap_assert_locked(vmf->vma->vm_mm);
+}
+#endif /* CONFIG_PER_VMA_LOCK */
+
+static inline bool mm_flags_test(int flag, const struct mm_struct *mm)
+{
+ return test_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
+}
+
+static inline bool mm_flags_test_and_set(int flag, struct mm_struct *mm)
+{
+ return test_and_set_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
+}
+
+static inline bool mm_flags_test_and_clear(int flag, struct mm_struct *mm)
+{
+ return test_and_clear_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
+}
+
+static inline void mm_flags_set(int flag, struct mm_struct *mm)
+{
+ set_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
+}
+
+static inline void mm_flags_clear(int flag, struct mm_struct *mm)
+{
+ clear_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
+}
+
+static inline void mm_flags_clear_all(struct mm_struct *mm)
{
- static const struct vm_operations_struct dummy_vm_ops = {};
+ bitmap_zero(ACCESS_PRIVATE(&mm->flags, __mm_flags), NUM_MM_FLAG_BITS);
+}
+
+extern const struct vm_operations_struct vma_dummy_vm_ops;
+static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
+{
memset(vma, 0, sizeof(*vma));
vma->vm_mm = mm;
- vma->vm_ops = &dummy_vm_ops;
+ vma->vm_ops = &vma_dummy_vm_ops;
INIT_LIST_HEAD(&vma->anon_vma_chain);
+ vma_lock_init(vma, false);
+}
+
+/* Use when VMA is not part of the VMA tree and needs no locking */
+static inline void vm_flags_init(struct vm_area_struct *vma,
+ vm_flags_t flags)
+{
+ VM_WARN_ON_ONCE(!pgtable_supports_soft_dirty() && (flags & VM_SOFTDIRTY));
+ vma_flags_clear_all(&vma->flags);
+ vma_flags_overwrite_word(&vma->flags, flags);
+}
+
+/*
+ * Use when VMA is part of the VMA tree and modifications need coordination
+ * Note: vm_flags_reset and vm_flags_reset_once do not lock the vma and
+ * it should be locked explicitly beforehand.
+ */
+static inline void vm_flags_reset(struct vm_area_struct *vma,
+ vm_flags_t flags)
+{
+ VM_WARN_ON_ONCE(!pgtable_supports_soft_dirty() && (flags & VM_SOFTDIRTY));
+ vma_assert_write_locked(vma);
+ vm_flags_init(vma, flags);
+}
+
+static inline void vm_flags_reset_once(struct vm_area_struct *vma,
+ vm_flags_t flags)
+{
+ vma_assert_write_locked(vma);
+ /*
+ * If VMA flags exist beyond the first system word, also clear these. It
+ * is assumed the write once behaviour is required only for the first
+ * system word.
+ */
+ if (NUM_VMA_FLAG_BITS > BITS_PER_LONG) {
+ unsigned long *bitmap = ACCESS_PRIVATE(&vma->flags, __vma_flags);
+
+ bitmap_zero(&bitmap[1], NUM_VMA_FLAG_BITS - BITS_PER_LONG);
+ }
+
+ vma_flags_overwrite_word_once(&vma->flags, flags);
+}
+
+static inline void vm_flags_set(struct vm_area_struct *vma,
+ vm_flags_t flags)
+{
+ vma_start_write(vma);
+ vma_flags_set_word(&vma->flags, flags);
+}
+
+static inline void vm_flags_clear(struct vm_area_struct *vma,
+ vm_flags_t flags)
+{
+ VM_WARN_ON_ONCE(!pgtable_supports_soft_dirty() && (flags & VM_SOFTDIRTY));
+ vma_start_write(vma);
+ vma_flags_clear_word(&vma->flags, flags);
+}
+
+/*
+ * Use only if VMA is not part of the VMA tree or has no other users and
+ * therefore needs no locking.
+ */
+static inline void __vm_flags_mod(struct vm_area_struct *vma,
+ vm_flags_t set, vm_flags_t clear)
+{
+ vm_flags_init(vma, (vma->vm_flags | set) & ~clear);
+}
+
+/*
+ * Use only when the order of set/clear operations is unimportant, otherwise
+ * use vm_flags_{set|clear} explicitly.
+ */
+static inline void vm_flags_mod(struct vm_area_struct *vma,
+ vm_flags_t set, vm_flags_t clear)
+{
+ vma_start_write(vma);
+ __vm_flags_mod(vma, set, clear);
+}
+
+static inline bool __vma_flag_atomic_valid(struct vm_area_struct *vma,
+ vma_flag_t bit)
+{
+ const vm_flags_t mask = BIT((__force int)bit);
+
+ /* Only specific flags are permitted */
+ if (WARN_ON_ONCE(!(mask & VM_ATOMIC_SET_ALLOWED)))
+ return false;
+
+ return true;
+}
+
+/*
+ * Set VMA flag atomically. Requires only VMA/mmap read lock. Only specific
+ * valid flags are allowed to do this.
+ */
+static inline void vma_flag_set_atomic(struct vm_area_struct *vma,
+ vma_flag_t bit)
+{
+ unsigned long *bitmap = ACCESS_PRIVATE(&vma->flags, __vma_flags);
+
+ /* mmap read lock/VMA read lock must be held. */
+ if (!rwsem_is_locked(&vma->vm_mm->mmap_lock))
+ vma_assert_locked(vma);
+
+ if (__vma_flag_atomic_valid(vma, bit))
+ set_bit((__force int)bit, bitmap);
+}
+
+/*
+ * Test for VMA flag atomically. Requires no locks. Only specific valid flags
+ * are allowed to do this.
+ *
+ * This is necessarily racey, so callers must ensure that serialisation is
+ * achieved through some other means, or that races are permissible.
+ */
+static inline bool vma_flag_test_atomic(struct vm_area_struct *vma,
+ vma_flag_t bit)
+{
+ if (__vma_flag_atomic_valid(vma, bit))
+ return test_bit((__force int)bit, &vma->vm_flags);
+
+ return false;
}
static inline void vma_set_anonymous(struct vm_area_struct *vma)
@@ -675,7 +1037,32 @@ static inline bool vma_is_anonymous(struct vm_area_struct *vma)
return !vma->vm_ops;
}
-static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
+/*
+ * Indicate if the VMA is a heap for the given task; for
+ * /proc/PID/maps that is the heap of the main task.
+ */
+static inline bool vma_is_initial_heap(const struct vm_area_struct *vma)
+{
+ return vma->vm_start < vma->vm_mm->brk &&
+ vma->vm_end > vma->vm_mm->start_brk;
+}
+
+/*
+ * Indicate if the VMA is a stack for the given task; for
+ * /proc/PID/maps that is the stack of the main task.
+ */
+static inline bool vma_is_initial_stack(const struct vm_area_struct *vma)
+{
+ /*
+ * We make no effort to guess what a given thread considers to be
+ * its "stack". It's not even well-defined for programs written
+ * languages like Go.
+ */
+ return vma->vm_start <= vma->vm_mm->start_stack &&
+ vma->vm_end >= vma->vm_mm->start_stack;
+}
+
+static inline bool vma_is_temporary_stack(const struct vm_area_struct *vma)
{
int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
@@ -689,7 +1076,7 @@ static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
return false;
}
-static inline bool vma_is_foreign(struct vm_area_struct *vma)
+static inline bool vma_is_foreign(const struct vm_area_struct *vma)
{
if (!current->mm)
return true;
@@ -700,22 +1087,109 @@ static inline bool vma_is_foreign(struct vm_area_struct *vma)
return false;
}
-static inline bool vma_is_accessible(struct vm_area_struct *vma)
+static inline bool vma_is_accessible(const struct vm_area_struct *vma)
{
return vma->vm_flags & VM_ACCESS_FLAGS;
}
+static inline bool is_shared_maywrite(vm_flags_t vm_flags)
+{
+ return (vm_flags & (VM_SHARED | VM_MAYWRITE)) ==
+ (VM_SHARED | VM_MAYWRITE);
+}
+
+static inline bool vma_is_shared_maywrite(const struct vm_area_struct *vma)
+{
+ return is_shared_maywrite(vma->vm_flags);
+}
+
+static inline
+struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
+{
+ return mas_find(&vmi->mas, max - 1);
+}
+
+static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
+{
+ /*
+ * Uses mas_find() to get the first VMA when the iterator starts.
+ * Calling mas_next() could skip the first entry.
+ */
+ return mas_find(&vmi->mas, ULONG_MAX);
+}
+
+static inline
+struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi)
+{
+ return mas_next_range(&vmi->mas, ULONG_MAX);
+}
+
+
+static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
+{
+ return mas_prev(&vmi->mas, 0);
+}
+
+static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
+ unsigned long start, unsigned long end, gfp_t gfp)
+{
+ __mas_set_range(&vmi->mas, start, end - 1);
+ mas_store_gfp(&vmi->mas, NULL, gfp);
+ if (unlikely(mas_is_err(&vmi->mas)))
+ return -ENOMEM;
+
+ return 0;
+}
+
+/* Free any unused preallocations */
+static inline void vma_iter_free(struct vma_iterator *vmi)
+{
+ mas_destroy(&vmi->mas);
+}
+
+static inline int vma_iter_bulk_store(struct vma_iterator *vmi,
+ struct vm_area_struct *vma)
+{
+ vmi->mas.index = vma->vm_start;
+ vmi->mas.last = vma->vm_end - 1;
+ mas_store(&vmi->mas, vma);
+ if (unlikely(mas_is_err(&vmi->mas)))
+ return -ENOMEM;
+
+ vma_mark_attached(vma);
+ return 0;
+}
+
+static inline void vma_iter_invalidate(struct vma_iterator *vmi)
+{
+ mas_pause(&vmi->mas);
+}
+
+static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
+{
+ mas_set(&vmi->mas, addr);
+}
+
+#define for_each_vma(__vmi, __vma) \
+ while (((__vma) = vma_next(&(__vmi))) != NULL)
+
+/* The MM code likes to work with exclusive end addresses */
+#define for_each_vma_range(__vmi, __vma, __end) \
+ while (((__vma) = vma_find(&(__vmi), (__end))) != NULL)
+
#ifdef CONFIG_SHMEM
/*
* The vma_is_shmem is not inline because it is used only by slow
* paths in userfault.
*/
-bool vma_is_shmem(struct vm_area_struct *vma);
+bool vma_is_shmem(const struct vm_area_struct *vma);
+bool vma_is_anon_shmem(const struct vm_area_struct *vma);
#else
-static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
+static inline bool vma_is_shmem(const struct vm_area_struct *vma) { return false; }
+static inline bool vma_is_anon_shmem(const struct vm_area_struct *vma) { return false; }
#endif
-int vma_is_stack_for_current(struct vm_area_struct *vma);
+int vma_is_stack_for_current(const struct vm_area_struct *vma);
/* flush_tlb_range() takes a vma, not a mm, and can care about flags */
#define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
@@ -723,6 +1197,74 @@ int vma_is_stack_for_current(struct vm_area_struct *vma);
struct mmu_gather;
struct inode;
+extern void prep_compound_page(struct page *page, unsigned int order);
+
+static inline unsigned int folio_large_order(const struct folio *folio)
+{
+ return folio->_flags_1 & 0xff;
+}
+
+#ifdef NR_PAGES_IN_LARGE_FOLIO
+static inline unsigned long folio_large_nr_pages(const struct folio *folio)
+{
+ return folio->_nr_pages;
+}
+#else
+static inline unsigned long folio_large_nr_pages(const struct folio *folio)
+{
+ return 1L << folio_large_order(folio);
+}
+#endif
+
+/*
+ * compound_order() can be called without holding a reference, which means
+ * that niceties like page_folio() don't work. These callers should be
+ * prepared to handle wild return values. For example, PG_head may be
+ * set before the order is initialised, or this may be a tail page.
+ * See compaction.c for some good examples.
+ */
+static inline unsigned int compound_order(const struct page *page)
+{
+ const struct folio *folio = (struct folio *)page;
+
+ if (!test_bit(PG_head, &folio->flags.f))
+ return 0;
+ return folio_large_order(folio);
+}
+
+/**
+ * folio_order - The allocation order of a folio.
+ * @folio: The folio.
+ *
+ * A folio is composed of 2^order pages. See get_order() for the definition
+ * of order.
+ *
+ * Return: The order of the folio.
+ */
+static inline unsigned int folio_order(const struct folio *folio)
+{
+ if (!folio_test_large(folio))
+ return 0;
+ return folio_large_order(folio);
+}
+
+/**
+ * folio_reset_order - Reset the folio order and derived _nr_pages
+ * @folio: The folio.
+ *
+ * Reset the order and derived _nr_pages to 0. Must only be used in the
+ * process of splitting large folios.
+ */
+static inline void folio_reset_order(struct folio *folio)
+{
+ if (WARN_ON_ONCE(!folio_test_large(folio)))
+ return;
+ folio->_flags_1 &= ~0xffUL;
+#ifdef NR_PAGES_IN_LARGE_FOLIO
+ folio->_nr_pages = 0;
+#endif
+}
+
#include <linux/huge_mm.h>
/*
@@ -747,17 +1289,29 @@ static inline int put_page_testzero(struct page *page)
return page_ref_dec_and_test(page);
}
+static inline int folio_put_testzero(struct folio *folio)
+{
+ return put_page_testzero(&folio->page);
+}
+
/*
* Try to grab a ref unless the page has a refcount of zero, return false if
* that is the case.
* This can be called when MMU is off so it must not access
* any of the virtual mappings.
*/
-static inline int get_page_unless_zero(struct page *page)
+static inline bool get_page_unless_zero(struct page *page)
{
return page_ref_add_unless(page, 1, 0);
}
+static inline struct folio *folio_get_nontail_page(struct page *page)
+{
+ if (unlikely(!get_page_unless_zero(page)))
+ return NULL;
+ return (struct folio *)page;
+}
+
extern int page_is_ram(unsigned long pfn);
enum {
@@ -779,11 +1333,6 @@ unsigned long vmalloc_to_pfn(const void *addr);
* On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
* is no special casing required.
*/
-
-#ifndef is_ioremap_addr
-#define is_ioremap_addr(x) is_vmalloc_addr(x)
-#endif
-
#ifdef CONFIG_MMU
extern bool is_vmalloc_addr(const void *x);
extern int is_vmalloc_or_module_addr(const void *x);
@@ -798,99 +1347,78 @@ static inline int is_vmalloc_or_module_addr(const void *x)
}
#endif
-extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
-static inline void *kvmalloc(size_t size, gfp_t flags)
-{
- return kvmalloc_node(size, flags, NUMA_NO_NODE);
-}
-static inline void *kvzalloc_node(size_t size, gfp_t flags, int node)
-{
- return kvmalloc_node(size, flags | __GFP_ZERO, node);
-}
-static inline void *kvzalloc(size_t size, gfp_t flags)
-{
- return kvmalloc(size, flags | __GFP_ZERO);
-}
-
-static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
-{
- size_t bytes;
-
- if (unlikely(check_mul_overflow(n, size, &bytes)))
- return NULL;
-
- return kvmalloc(bytes, flags);
-}
-
-static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
+/*
+ * How many times the entire folio is mapped as a single unit (eg by a
+ * PMD or PUD entry). This is probably not what you want, except for
+ * debugging purposes or implementation of other core folio_*() primitives.
+ */
+static inline int folio_entire_mapcount(const struct folio *folio)
{
- return kvmalloc_array(n, size, flags | __GFP_ZERO);
+ VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
+ if (!IS_ENABLED(CONFIG_64BIT) && unlikely(folio_large_order(folio) == 1))
+ return 0;
+ return atomic_read(&folio->_entire_mapcount) + 1;
}
-extern void kvfree(const void *addr);
-extern void kvfree_sensitive(const void *addr, size_t len);
-
-static inline int head_compound_mapcount(struct page *head)
+static inline int folio_large_mapcount(const struct folio *folio)
{
- return atomic_read(compound_mapcount_ptr(head)) + 1;
+ VM_WARN_ON_FOLIO(!folio_test_large(folio), folio);
+ return atomic_read(&folio->_large_mapcount) + 1;
}
-/*
- * Mapcount of compound page as a whole, does not include mapped sub-pages.
+/**
+ * folio_mapcount() - Number of mappings of this folio.
+ * @folio: The folio.
+ *
+ * The folio mapcount corresponds to the number of present user page table
+ * entries that reference any part of a folio. Each such present user page
+ * table entry must be paired with exactly on folio reference.
+ *
+ * For ordindary folios, each user page table entry (PTE/PMD/PUD/...) counts
+ * exactly once.
+ *
+ * For hugetlb folios, each abstracted "hugetlb" user page table entry that
+ * references the entire folio counts exactly once, even when such special
+ * page table entries are comprised of multiple ordinary page table entries.
+ *
+ * Will report 0 for pages which cannot be mapped into userspace, such as
+ * slab, page tables and similar.
*
- * Must be called only for compound pages or any their tail sub-pages.
+ * Return: The number of times this folio is mapped.
*/
-static inline int compound_mapcount(struct page *page)
+static inline int folio_mapcount(const struct folio *folio)
{
- VM_BUG_ON_PAGE(!PageCompound(page), page);
- page = compound_head(page);
- return head_compound_mapcount(page);
-}
+ int mapcount;
-/*
- * The atomic page->_mapcount, starts from -1: so that transitions
- * both from it and to it can be tracked, using atomic_inc_and_test
- * and atomic_add_negative(-1).
- */
-static inline void page_mapcount_reset(struct page *page)
-{
- atomic_set(&(page)->_mapcount, -1);
+ if (likely(!folio_test_large(folio))) {
+ mapcount = atomic_read(&folio->_mapcount) + 1;
+ if (page_mapcount_is_type(mapcount))
+ mapcount = 0;
+ return mapcount;
+ }
+ return folio_large_mapcount(folio);
}
-int __page_mapcount(struct page *page);
-
-/*
- * Mapcount of 0-order page; when compound sub-page, includes
- * compound_mapcount().
+/**
+ * folio_mapped - Is this folio mapped into userspace?
+ * @folio: The folio.
*
- * Result is undefined for pages which cannot be mapped into userspace.
- * For example SLAB or special types of pages. See function page_has_type().
- * They use this place in struct page differently.
+ * Return: True if any page in this folio is referenced by user page tables.
*/
-static inline int page_mapcount(struct page *page)
+static inline bool folio_mapped(const struct folio *folio)
{
- if (unlikely(PageCompound(page)))
- return __page_mapcount(page);
- return atomic_read(&page->_mapcount) + 1;
+ return folio_mapcount(folio) >= 1;
}
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-int total_mapcount(struct page *page);
-int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
-#else
-static inline int total_mapcount(struct page *page)
-{
- return page_mapcount(page);
-}
-static inline int page_trans_huge_mapcount(struct page *page,
- int *total_mapcount)
+/*
+ * Return true if this page is mapped into pagetables.
+ * For compound page it returns true if any sub-page of compound page is mapped,
+ * even if this particular sub-page is not itself mapped by any PTE or PMD.
+ */
+static inline bool page_mapped(const struct page *page)
{
- int mapcount = page_mapcount(page);
- if (total_mapcount)
- *total_mapcount = mapcount;
- return mapcount;
+ return folio_mapped(page_folio(page));
}
-#endif
static inline struct page *virt_to_head_page(const void *x)
{
@@ -899,92 +1427,23 @@ static inline struct page *virt_to_head_page(const void *x)
return compound_head(page);
}
-void __put_page(struct page *page);
-
-void put_pages_list(struct list_head *pages);
-
-void split_page(struct page *page, unsigned int order);
-
-/*
- * Compound pages have a destructor function. Provide a
- * prototype for that function and accessor functions.
- * These are _only_ valid on the head of a compound page.
- */
-typedef void compound_page_dtor(struct page *);
-
-/* Keep the enum in sync with compound_page_dtors array in mm/page_alloc.c */
-enum compound_dtor_id {
- NULL_COMPOUND_DTOR,
- COMPOUND_PAGE_DTOR,
-#ifdef CONFIG_HUGETLB_PAGE
- HUGETLB_PAGE_DTOR,
-#endif
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- TRANSHUGE_PAGE_DTOR,
-#endif
- NR_COMPOUND_DTORS,
-};
-extern compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS];
-
-static inline void set_compound_page_dtor(struct page *page,
- enum compound_dtor_id compound_dtor)
-{
- VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page);
- page[1].compound_dtor = compound_dtor;
-}
-
-static inline void destroy_compound_page(struct page *page)
-{
- VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page);
- compound_page_dtors[page[1].compound_dtor](page);
-}
-
-static inline unsigned int compound_order(struct page *page)
-{
- if (!PageHead(page))
- return 0;
- return page[1].compound_order;
-}
-
-static inline bool hpage_pincount_available(struct page *page)
+static inline struct folio *virt_to_folio(const void *x)
{
- /*
- * Can the page->hpage_pinned_refcount field be used? That field is in
- * the 3rd page of the compound page, so the smallest (2-page) compound
- * pages cannot support it.
- */
- page = compound_head(page);
- return PageCompound(page) && compound_order(page) > 1;
-}
+ struct page *page = virt_to_page(x);
-static inline int head_compound_pincount(struct page *head)
-{
- return atomic_read(compound_pincount_ptr(head));
+ return page_folio(page);
}
-static inline int compound_pincount(struct page *page)
-{
- VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
- page = compound_head(page);
- return head_compound_pincount(page);
-}
+void __folio_put(struct folio *folio);
-static inline void set_compound_order(struct page *page, unsigned int order)
-{
- page[1].compound_order = order;
- page[1].compound_nr = 1U << order;
-}
+void split_page(struct page *page, unsigned int order);
+void folio_copy(struct folio *dst, struct folio *src);
+int folio_mc_copy(struct folio *dst, struct folio *src);
-/* Returns the number of pages in this potentially compound page. */
-static inline unsigned long compound_nr(struct page *page)
-{
- if (!PageHead(page))
- return 1;
- return page[1].compound_nr;
-}
+unsigned long nr_free_buffer_pages(void);
/* Returns the number of bytes in this potentially compound page. */
-static inline unsigned long page_size(struct page *page)
+static inline unsigned long page_size(const struct page *page)
{
return PAGE_SIZE << compound_order(page);
}
@@ -995,7 +1454,26 @@ static inline unsigned int page_shift(struct page *page)
return PAGE_SHIFT + compound_order(page);
}
-void free_compound_page(struct page *page);
+/**
+ * thp_order - Order of a transparent huge page.
+ * @page: Head page of a transparent huge page.
+ */
+static inline unsigned int thp_order(struct page *page)
+{
+ VM_BUG_ON_PGFLAGS(PageTail(page), page);
+ return compound_order(page);
+}
+
+/**
+ * thp_size - Size of a transparent huge page.
+ * @page: Head page of a transparent huge page.
+ *
+ * Return: Number of bytes in this page.
+ */
+static inline unsigned long thp_size(struct page *page)
+{
+ return PAGE_SIZE << thp_order(page);
+}
#ifdef CONFIG_MMU
/*
@@ -1007,15 +1485,15 @@ void free_compound_page(struct page *page);
static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
if (likely(vma->vm_flags & VM_WRITE))
- pte = pte_mkwrite(pte);
+ pte = pte_mkwrite(pte, vma);
return pte;
}
-vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page);
-void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr);
+vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *page);
+void set_pte_range(struct vm_fault *vmf, struct folio *folio,
+ struct page *page, unsigned int nr, unsigned long addr);
vm_fault_t finish_fault(struct vm_fault *vmf);
-vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
#endif
/*
@@ -1052,9 +1530,9 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
* the page's disk buffers. PG_private must be set to tell the VM to call
* into the filesystem to release these pages.
*
- * A page may belong to an inode's memory mapping. In this case, page->mapping
- * is the pointer to the inode, and page->index is the file offset of the page,
- * in units of PAGE_SIZE.
+ * A folio may belong to an inode's memory mapping. In this case,
+ * folio->mapping points to the inode, and folio->index is the file
+ * offset of the folio, in units of PAGE_SIZE.
*
* If pagecache pages are not associated with an inode, they are said to be
* anonymous pages. These may become associated with the swapcache, and in that
@@ -1078,169 +1556,129 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
* back into memory.
*/
-/*
- * The zone field is never updated after free_area_init_core()
- * sets it, so none of the operations on it need to be atomic.
- */
-
-/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
-#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
-#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
-#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
-#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
-#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
+/* 127: arbitrary random number, small enough to assemble well */
+#define folio_ref_zero_or_close_to_overflow(folio) \
+ ((unsigned int) folio_ref_count(folio) + 127u <= 127u)
-/*
- * Define the bit shifts to access each section. For non-existent
- * sections we define the shift as 0; that plus a 0 mask ensures
- * the compiler will optimise away reference to them.
+/**
+ * folio_get - Increment the reference count on a folio.
+ * @folio: The folio.
+ *
+ * Context: May be called in any context, as long as you know that
+ * you have a refcount on the folio. If you do not already have one,
+ * folio_try_get() may be the right interface for you to use.
*/
-#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
-#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
-#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
-#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
-#define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0))
-
-/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
-#ifdef NODE_NOT_IN_PAGE_FLAGS
-#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
-#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
- SECTIONS_PGOFF : ZONES_PGOFF)
-#else
-#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
-#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
- NODES_PGOFF : ZONES_PGOFF)
-#endif
-
-#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
-
-#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
-#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
-#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
-#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
-#define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1)
-#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
-
-static inline enum zone_type page_zonenum(const struct page *page)
+static inline void folio_get(struct folio *folio)
{
- ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT);
- return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
+ VM_BUG_ON_FOLIO(folio_ref_zero_or_close_to_overflow(folio), folio);
+ folio_ref_inc(folio);
}
-#ifdef CONFIG_ZONE_DEVICE
-static inline bool is_zone_device_page(const struct page *page)
-{
- return page_zonenum(page) == ZONE_DEVICE;
-}
-extern void memmap_init_zone_device(struct zone *, unsigned long,
- unsigned long, struct dev_pagemap *);
-#else
-static inline bool is_zone_device_page(const struct page *page)
-{
- return false;
-}
-#endif
-
-static inline bool is_zone_movable_page(const struct page *page)
+static inline void get_page(struct page *page)
{
- return page_zonenum(page) == ZONE_MOVABLE;
+ struct folio *folio = page_folio(page);
+ if (WARN_ON_ONCE(folio_test_slab(folio)))
+ return;
+ if (WARN_ON_ONCE(folio_test_large_kmalloc(folio)))
+ return;
+ folio_get(folio);
}
-#ifdef CONFIG_DEV_PAGEMAP_OPS
-void free_devmap_managed_page(struct page *page);
-DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
-
-static inline bool page_is_devmap_managed(struct page *page)
+static inline __must_check bool try_get_page(struct page *page)
{
- if (!static_branch_unlikely(&devmap_managed_key))
- return false;
- if (!is_zone_device_page(page))
+ page = compound_head(page);
+ if (WARN_ON_ONCE(page_ref_count(page) <= 0))
return false;
- switch (page->pgmap->type) {
- case MEMORY_DEVICE_PRIVATE:
- case MEMORY_DEVICE_FS_DAX:
- return true;
- default:
- break;
- }
- return false;
-}
-
-void put_devmap_managed_page(struct page *page);
-
-#else /* CONFIG_DEV_PAGEMAP_OPS */
-static inline bool page_is_devmap_managed(struct page *page)
-{
- return false;
-}
-
-static inline void put_devmap_managed_page(struct page *page)
-{
+ page_ref_inc(page);
+ return true;
}
-#endif /* CONFIG_DEV_PAGEMAP_OPS */
-static inline bool is_device_private_page(const struct page *page)
+/**
+ * folio_put - Decrement the reference count on a folio.
+ * @folio: The folio.
+ *
+ * If the folio's reference count reaches zero, the memory will be
+ * released back to the page allocator and may be used by another
+ * allocation immediately. Do not access the memory or the struct folio
+ * after calling folio_put() unless you can be sure that it wasn't the
+ * last reference.
+ *
+ * Context: May be called in process or interrupt context, but not in NMI
+ * context. May be called while holding a spinlock.
+ */
+static inline void folio_put(struct folio *folio)
{
- return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) &&
- IS_ENABLED(CONFIG_DEVICE_PRIVATE) &&
- is_zone_device_page(page) &&
- page->pgmap->type == MEMORY_DEVICE_PRIVATE;
+ if (folio_put_testzero(folio))
+ __folio_put(folio);
}
-static inline bool is_pci_p2pdma_page(const struct page *page)
+/**
+ * folio_put_refs - Reduce the reference count on a folio.
+ * @folio: The folio.
+ * @refs: The amount to subtract from the folio's reference count.
+ *
+ * If the folio's reference count reaches zero, the memory will be
+ * released back to the page allocator and may be used by another
+ * allocation immediately. Do not access the memory or the struct folio
+ * after calling folio_put_refs() unless you can be sure that these weren't
+ * the last references.
+ *
+ * Context: May be called in process or interrupt context, but not in NMI
+ * context. May be called while holding a spinlock.
+ */
+static inline void folio_put_refs(struct folio *folio, int refs)
{
- return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) &&
- IS_ENABLED(CONFIG_PCI_P2PDMA) &&
- is_zone_device_page(page) &&
- page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
+ if (folio_ref_sub_and_test(folio, refs))
+ __folio_put(folio);
}
-/* 127: arbitrary random number, small enough to assemble well */
-#define page_ref_zero_or_close_to_overflow(page) \
- ((unsigned int) page_ref_count(page) + 127u <= 127u)
+void folios_put_refs(struct folio_batch *folios, unsigned int *refs);
-static inline void get_page(struct page *page)
-{
- page = compound_head(page);
- /*
- * Getting a normal page or the head of a compound page
- * requires to already have an elevated page->_refcount.
- */
- VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page);
- page_ref_inc(page);
-}
-
-bool __must_check try_grab_page(struct page *page, unsigned int flags);
-__maybe_unused struct page *try_grab_compound_head(struct page *page, int refs,
- unsigned int flags);
+/*
+ * union release_pages_arg - an array of pages or folios
+ *
+ * release_pages() releases a simple array of multiple pages, and
+ * accepts various different forms of said page array: either
+ * a regular old boring array of pages, an array of folios, or
+ * an array of encoded page pointers.
+ *
+ * The transparent union syntax for this kind of "any of these
+ * argument types" is all kinds of ugly, so look away.
+ */
+typedef union {
+ struct page **pages;
+ struct folio **folios;
+ struct encoded_page **encoded_pages;
+} release_pages_arg __attribute__ ((__transparent_union__));
+void release_pages(release_pages_arg, int nr);
-static inline __must_check bool try_get_page(struct page *page)
+/**
+ * folios_put - Decrement the reference count on an array of folios.
+ * @folios: The folios.
+ *
+ * Like folio_put(), but for a batch of folios. This is more efficient
+ * than writing the loop yourself as it will optimise the locks which need
+ * to be taken if the folios are freed. The folios batch is returned
+ * empty and ready to be reused for another batch; there is no need to
+ * reinitialise it.
+ *
+ * Context: May be called in process or interrupt context, but not in NMI
+ * context. May be called while holding a spinlock.
+ */
+static inline void folios_put(struct folio_batch *folios)
{
- page = compound_head(page);
- if (WARN_ON_ONCE(page_ref_count(page) <= 0))
- return false;
- page_ref_inc(page);
- return true;
+ folios_put_refs(folios, NULL);
}
static inline void put_page(struct page *page)
{
- page = compound_head(page);
+ struct folio *folio = page_folio(page);
- /*
- * For devmap managed pages we need to catch refcount transition from
- * 2 to 1, when refcount reach one it means the page is free and we
- * need to inform the device driver through callback. See
- * include/linux/memremap.h and HMM for details.
- */
- if (page_is_devmap_managed(page)) {
- put_devmap_managed_page(page);
+ if (folio_test_slab(folio) || folio_test_large_kmalloc(folio))
return;
- }
- if (put_page_testzero(page))
- __put_page(page);
+ folio_put(folio);
}
/*
@@ -1269,83 +1707,41 @@ static inline void put_page(struct page *page)
* applications that don't have huge page reference counts, this won't be an
* issue.
*
- * Locking: the lockless algorithm described in page_cache_get_speculative()
- * and page_cache_gup_pin_speculative() provides safe operation for
- * get_user_pages and page_mkclean and other calls that race to set up page
- * table entries.
+ * Locking: the lockless algorithm described in folio_try_get_rcu()
+ * provides safe operation for get_user_pages(), folio_mkclean() and
+ * other calls that race to set up page table entries.
*/
#define GUP_PIN_COUNTING_BIAS (1U << 10)
void unpin_user_page(struct page *page);
+void unpin_folio(struct folio *folio);
void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
bool make_dirty);
void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
bool make_dirty);
void unpin_user_pages(struct page **pages, unsigned long npages);
-
-/**
- * page_maybe_dma_pinned - Report if a page is pinned for DMA.
- * @page: The page.
- *
- * This function checks if a page has been pinned via a call to
- * a function in the pin_user_pages() family.
- *
- * For non-huge pages, the return value is partially fuzzy: false is not fuzzy,
- * because it means "definitely not pinned for DMA", but true means "probably
- * pinned for DMA, but possibly a false positive due to having at least
- * GUP_PIN_COUNTING_BIAS worth of normal page references".
- *
- * False positives are OK, because: a) it's unlikely for a page to get that many
- * refcounts, and b) all the callers of this routine are expected to be able to
- * deal gracefully with a false positive.
- *
- * For huge pages, the result will be exactly correct. That's because we have
- * more tracking data available: the 3rd struct page in the compound page is
- * used to track the pincount (instead using of the GUP_PIN_COUNTING_BIAS
- * scheme).
- *
- * For more information, please see Documentation/core-api/pin_user_pages.rst.
- *
- * Return: True, if it is likely that the page has been "dma-pinned".
- * False, if the page is definitely not dma-pinned.
- */
-static inline bool page_maybe_dma_pinned(struct page *page)
-{
- if (hpage_pincount_available(page))
- return compound_pincount(page) > 0;
-
- /*
- * page_ref_count() is signed. If that refcount overflows, then
- * page_ref_count() returns a negative value, and callers will avoid
- * further incrementing the refcount.
- *
- * Here, for that overflow case, use the signed bit to count a little
- * bit higher via unsigned math, and thus still get an accurate result.
- */
- return ((unsigned int)page_ref_count(compound_head(page))) >=
- GUP_PIN_COUNTING_BIAS;
-}
+void unpin_user_folio(struct folio *folio, unsigned long npages);
+void unpin_folios(struct folio **folios, unsigned long nfolios);
static inline bool is_cow_mapping(vm_flags_t flags)
{
return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
}
-/*
- * This should most likely only be called during fork() to see whether we
- * should break the cow immediately for a page on the src mm.
- */
-static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma,
- struct page *page)
+#ifndef CONFIG_MMU
+static inline bool is_nommu_shared_mapping(vm_flags_t flags)
{
- if (!is_cow_mapping(vma->vm_flags))
- return false;
-
- if (!atomic_read(&vma->vm_mm->has_pinned))
- return false;
-
- return page_maybe_dma_pinned(page);
+ /*
+ * NOMMU shared mappings are ordinary MAP_SHARED mappings and selected
+ * R/O MAP_PRIVATE file mappings that are an effective R/O overlay of
+ * a file mapping. R/O MAP_PRIVATE mappings might still modify
+ * underlying memory if ptrace is active, so this is only possible if
+ * ptrace does not apply. Note that there is no mprotect() to upgrade
+ * write permissions later.
+ */
+ return flags & (VM_MAYSHARE | VM_MAYOVERLAY);
}
+#endif
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
#define SECTION_IN_PAGE_FLAGS
@@ -1361,21 +1757,41 @@ static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma,
*/
static inline int page_zone_id(struct page *page)
{
- return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
+ return (page->flags.f >> ZONEID_PGSHIFT) & ZONEID_MASK;
}
#ifdef NODE_NOT_IN_PAGE_FLAGS
-extern int page_to_nid(const struct page *page);
+int memdesc_nid(memdesc_flags_t mdf);
#else
+static inline int memdesc_nid(memdesc_flags_t mdf)
+{
+ return (mdf.f >> NODES_PGSHIFT) & NODES_MASK;
+}
+#endif
+
static inline int page_to_nid(const struct page *page)
{
- struct page *p = (struct page *)page;
+ return memdesc_nid(PF_POISONED_CHECK(page)->flags);
+}
- return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK;
+static inline int folio_nid(const struct folio *folio)
+{
+ return memdesc_nid(folio->flags);
}
-#endif
#ifdef CONFIG_NUMA_BALANCING
+/* page access time bits needs to hold at least 4 seconds */
+#define PAGE_ACCESS_TIME_MIN_BITS 12
+#if LAST_CPUPID_SHIFT < PAGE_ACCESS_TIME_MIN_BITS
+#define PAGE_ACCESS_TIME_BUCKETS \
+ (PAGE_ACCESS_TIME_MIN_BITS - LAST_CPUPID_SHIFT)
+#else
+#define PAGE_ACCESS_TIME_BUCKETS 0
+#endif
+
+#define PAGE_ACCESS_TIME_MASK \
+ (LAST_CPUPID_MASK << PAGE_ACCESS_TIME_BUCKETS)
+
static inline int cpu_pid_to_cpupid(int cpu, int pid)
{
return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
@@ -1413,41 +1829,67 @@ static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
-static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
+static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
{
- return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
+ return xchg(&folio->_last_cpupid, cpupid & LAST_CPUPID_MASK);
}
-static inline int page_cpupid_last(struct page *page)
+static inline int folio_last_cpupid(struct folio *folio)
{
- return page->_last_cpupid;
+ return folio->_last_cpupid;
}
static inline void page_cpupid_reset_last(struct page *page)
{
page->_last_cpupid = -1 & LAST_CPUPID_MASK;
}
#else
-static inline int page_cpupid_last(struct page *page)
+static inline int folio_last_cpupid(struct folio *folio)
{
- return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
+ return (folio->flags.f >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
}
-extern int page_cpupid_xchg_last(struct page *page, int cpupid);
+int folio_xchg_last_cpupid(struct folio *folio, int cpupid);
static inline void page_cpupid_reset_last(struct page *page)
{
- page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
+ page->flags.f |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
}
#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
+
+static inline int folio_xchg_access_time(struct folio *folio, int time)
+{
+ int last_time;
+
+ last_time = folio_xchg_last_cpupid(folio,
+ time >> PAGE_ACCESS_TIME_BUCKETS);
+ return last_time << PAGE_ACCESS_TIME_BUCKETS;
+}
+
+static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
+{
+ unsigned int pid_bit;
+
+ pid_bit = hash_32(current->pid, ilog2(BITS_PER_LONG));
+ if (vma->numab_state && !test_bit(pid_bit, &vma->numab_state->pids_active[1])) {
+ __set_bit(pid_bit, &vma->numab_state->pids_active[1]);
+ }
+}
+
+bool folio_use_access_time(struct folio *folio);
#else /* !CONFIG_NUMA_BALANCING */
-static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
+static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
+{
+ return folio_nid(folio); /* XXX */
+}
+
+static inline int folio_xchg_access_time(struct folio *folio, int time)
{
- return page_to_nid(page); /* XXX */
+ return 0;
}
-static inline int page_cpupid_last(struct page *page)
+static inline int folio_last_cpupid(struct folio *folio)
{
- return page_to_nid(page); /* XXX */
+ return folio_nid(folio); /* XXX */
}
static inline int cpupid_to_nid(int cpupid)
@@ -1483,6 +1925,14 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
{
return false;
}
+
+static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
+{
+}
+static inline bool folio_use_access_time(struct folio *folio)
+{
+ return false;
+}
#endif /* CONFIG_NUMA_BALANCING */
#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
@@ -1495,10 +1945,10 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
static inline u8 page_kasan_tag(const struct page *page)
{
- u8 tag = 0xff;
+ u8 tag = KASAN_TAG_KERNEL;
if (kasan_enabled()) {
- tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
+ tag = (page->flags.f >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
tag ^= 0xff;
}
@@ -1507,17 +1957,24 @@ static inline u8 page_kasan_tag(const struct page *page)
static inline void page_kasan_tag_set(struct page *page, u8 tag)
{
- if (kasan_enabled()) {
- tag ^= 0xff;
- page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
- page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
- }
+ unsigned long old_flags, flags;
+
+ if (!kasan_enabled())
+ return;
+
+ tag ^= 0xff;
+ old_flags = READ_ONCE(page->flags.f);
+ do {
+ flags = old_flags;
+ flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
+ flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
+ } while (unlikely(!try_cmpxchg(&page->flags.f, &old_flags, flags)));
}
static inline void page_kasan_tag_reset(struct page *page)
{
if (kasan_enabled())
- page_kasan_tag_set(page, 0xff);
+ page_kasan_tag_set(page, KASAN_TAG_KERNEL);
}
#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
@@ -1542,28 +1999,228 @@ static inline pg_data_t *page_pgdat(const struct page *page)
return NODE_DATA(page_to_nid(page));
}
+static inline pg_data_t *folio_pgdat(const struct folio *folio)
+{
+ return NODE_DATA(folio_nid(folio));
+}
+
+static inline struct zone *folio_zone(const struct folio *folio)
+{
+ return &folio_pgdat(folio)->node_zones[folio_zonenum(folio)];
+}
+
#ifdef SECTION_IN_PAGE_FLAGS
static inline void set_page_section(struct page *page, unsigned long section)
{
- page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
- page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
+ page->flags.f &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
+ page->flags.f |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
}
-static inline unsigned long page_to_section(const struct page *page)
+static inline unsigned long memdesc_section(memdesc_flags_t mdf)
{
- return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
+ return (mdf.f >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
+}
+#else /* !SECTION_IN_PAGE_FLAGS */
+static inline unsigned long memdesc_section(memdesc_flags_t mdf)
+{
+ return 0;
+}
+#endif /* SECTION_IN_PAGE_FLAGS */
+
+/**
+ * folio_pfn - Return the Page Frame Number of a folio.
+ * @folio: The folio.
+ *
+ * A folio may contain multiple pages. The pages have consecutive
+ * Page Frame Numbers.
+ *
+ * Return: The Page Frame Number of the first page in the folio.
+ */
+static inline unsigned long folio_pfn(const struct folio *folio)
+{
+ return page_to_pfn(&folio->page);
+}
+
+static inline struct folio *pfn_folio(unsigned long pfn)
+{
+ return page_folio(pfn_to_page(pfn));
+}
+
+#ifdef CONFIG_MMU
+static inline pte_t mk_pte(const struct page *page, pgprot_t pgprot)
+{
+ return pfn_pte(page_to_pfn(page), pgprot);
+}
+
+/**
+ * folio_mk_pte - Create a PTE for this folio
+ * @folio: The folio to create a PTE for
+ * @pgprot: The page protection bits to use
+ *
+ * Create a page table entry for the first page of this folio.
+ * This is suitable for passing to set_ptes().
+ *
+ * Return: A page table entry suitable for mapping this folio.
+ */
+static inline pte_t folio_mk_pte(const struct folio *folio, pgprot_t pgprot)
+{
+ return pfn_pte(folio_pfn(folio), pgprot);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/**
+ * folio_mk_pmd - Create a PMD for this folio
+ * @folio: The folio to create a PMD for
+ * @pgprot: The page protection bits to use
+ *
+ * Create a page table entry for the first page of this folio.
+ * This is suitable for passing to set_pmd_at().
+ *
+ * Return: A page table entry suitable for mapping this folio.
+ */
+static inline pmd_t folio_mk_pmd(const struct folio *folio, pgprot_t pgprot)
+{
+ return pmd_mkhuge(pfn_pmd(folio_pfn(folio), pgprot));
+}
+
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+/**
+ * folio_mk_pud - Create a PUD for this folio
+ * @folio: The folio to create a PUD for
+ * @pgprot: The page protection bits to use
+ *
+ * Create a page table entry for the first page of this folio.
+ * This is suitable for passing to set_pud_at().
+ *
+ * Return: A page table entry suitable for mapping this folio.
+ */
+static inline pud_t folio_mk_pud(const struct folio *folio, pgprot_t pgprot)
+{
+ return pud_mkhuge(pfn_pud(folio_pfn(folio), pgprot));
+}
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif /* CONFIG_MMU */
+
+static inline bool folio_has_pincount(const struct folio *folio)
+{
+ if (IS_ENABLED(CONFIG_64BIT))
+ return folio_test_large(folio);
+ return folio_order(folio) > 1;
+}
+
+/**
+ * folio_maybe_dma_pinned - Report if a folio may be pinned for DMA.
+ * @folio: The folio.
+ *
+ * This function checks if a folio has been pinned via a call to
+ * a function in the pin_user_pages() family.
+ *
+ * For small folios, the return value is partially fuzzy: false is not fuzzy,
+ * because it means "definitely not pinned for DMA", but true means "probably
+ * pinned for DMA, but possibly a false positive due to having at least
+ * GUP_PIN_COUNTING_BIAS worth of normal folio references".
+ *
+ * False positives are OK, because: a) it's unlikely for a folio to
+ * get that many refcounts, and b) all the callers of this routine are
+ * expected to be able to deal gracefully with a false positive.
+ *
+ * For most large folios, the result will be exactly correct. That's because
+ * we have more tracking data available: the _pincount field is used
+ * instead of the GUP_PIN_COUNTING_BIAS scheme.
+ *
+ * For more information, please see Documentation/core-api/pin_user_pages.rst.
+ *
+ * Return: True, if it is likely that the folio has been "dma-pinned".
+ * False, if the folio is definitely not dma-pinned.
+ */
+static inline bool folio_maybe_dma_pinned(struct folio *folio)
+{
+ if (folio_has_pincount(folio))
+ return atomic_read(&folio->_pincount) > 0;
+
+ /*
+ * folio_ref_count() is signed. If that refcount overflows, then
+ * folio_ref_count() returns a negative value, and callers will avoid
+ * further incrementing the refcount.
+ *
+ * Here, for that overflow case, use the sign bit to count a little
+ * bit higher via unsigned math, and thus still get an accurate result.
+ */
+ return ((unsigned int)folio_ref_count(folio)) >=
+ GUP_PIN_COUNTING_BIAS;
}
-#endif
-/* MIGRATE_CMA and ZONE_MOVABLE do not allow pin pages */
+/*
+ * This should most likely only be called during fork() to see whether we
+ * should break the cow immediately for an anon page on the src mm.
+ *
+ * The caller has to hold the PT lock and the vma->vm_mm->->write_protect_seq.
+ */
+static inline bool folio_needs_cow_for_dma(struct vm_area_struct *vma,
+ struct folio *folio)
+{
+ VM_BUG_ON(!(raw_read_seqcount(&vma->vm_mm->write_protect_seq) & 1));
+
+ if (!mm_flags_test(MMF_HAS_PINNED, vma->vm_mm))
+ return false;
+
+ return folio_maybe_dma_pinned(folio);
+}
+
+/**
+ * is_zero_page - Query if a page is a zero page
+ * @page: The page to query
+ *
+ * This returns true if @page is one of the permanent zero pages.
+ */
+static inline bool is_zero_page(const struct page *page)
+{
+ return is_zero_pfn(page_to_pfn(page));
+}
+
+/**
+ * is_zero_folio - Query if a folio is a zero page
+ * @folio: The folio to query
+ *
+ * This returns true if @folio is one of the permanent zero pages.
+ */
+static inline bool is_zero_folio(const struct folio *folio)
+{
+ return is_zero_page(&folio->page);
+}
+
+/* MIGRATE_CMA and ZONE_MOVABLE do not allow pin folios */
#ifdef CONFIG_MIGRATION
-static inline bool is_pinnable_page(struct page *page)
+static inline bool folio_is_longterm_pinnable(struct folio *folio)
{
- return !(is_zone_movable_page(page) || is_migrate_cma_page(page)) ||
- is_zero_pfn(page_to_pfn(page));
+#ifdef CONFIG_CMA
+ int mt = folio_migratetype(folio);
+
+ if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE)
+ return false;
+#endif
+ /* The zero page can be "pinned" but gets special handling. */
+ if (is_zero_folio(folio))
+ return true;
+
+ /* Coherent device memory must always allow eviction. */
+ if (folio_is_device_coherent(folio))
+ return false;
+
+ /*
+ * Filesystems can only tolerate transient delays to truncate and
+ * hole-punch operations
+ */
+ if (folio_is_fsdax(folio))
+ return false;
+
+ /* Otherwise, non-movable zone folios can be pinned. */
+ return !folio_is_zone_movable(folio);
+
}
#else
-static inline bool is_pinnable_page(struct page *page)
+static inline bool folio_is_longterm_pinnable(struct folio *folio)
{
return true;
}
@@ -1571,14 +2228,14 @@ static inline bool is_pinnable_page(struct page *page)
static inline void set_page_zone(struct page *page, enum zone_type zone)
{
- page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
- page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
+ page->flags.f &= ~(ZONES_MASK << ZONES_PGSHIFT);
+ page->flags.f |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
}
static inline void set_page_node(struct page *page, unsigned long node)
{
- page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
- page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
+ page->flags.f &= ~(NODES_MASK << NODES_PGSHIFT);
+ page->flags.f |= (node & NODES_MASK) << NODES_PGSHIFT;
}
static inline void set_page_links(struct page *page, enum zone_type zone,
@@ -1591,16 +2248,243 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
#endif
}
+/**
+ * folio_nr_pages - The number of pages in the folio.
+ * @folio: The folio.
+ *
+ * Return: A positive power of two.
+ */
+static inline unsigned long folio_nr_pages(const struct folio *folio)
+{
+ if (!folio_test_large(folio))
+ return 1;
+ return folio_large_nr_pages(folio);
+}
+
+#if !defined(CONFIG_HAVE_GIGANTIC_FOLIOS)
/*
- * Some inline functions in vmstat.h depend on page_zone()
+ * We don't expect any folios that exceed buddy sizes (and consequently
+ * memory sections).
*/
-#include <linux/vmstat.h>
+#define MAX_FOLIO_ORDER MAX_PAGE_ORDER
+#elif defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
+/*
+ * Only pages within a single memory section are guaranteed to be
+ * contiguous. By limiting folios to a single memory section, all folio
+ * pages are guaranteed to be contiguous.
+ */
+#define MAX_FOLIO_ORDER PFN_SECTION_SHIFT
+#elif defined(CONFIG_HUGETLB_PAGE)
+/*
+ * There is no real limit on the folio size. We limit them to the maximum we
+ * currently expect (see CONFIG_HAVE_GIGANTIC_FOLIOS): with hugetlb, we expect
+ * no folios larger than 16 GiB on 64bit and 1 GiB on 32bit.
+ */
+#define MAX_FOLIO_ORDER get_order(IS_ENABLED(CONFIG_64BIT) ? SZ_16G : SZ_1G)
+#else
+/*
+ * Without hugetlb, gigantic folios that are bigger than a single PUD are
+ * currently impossible.
+ */
+#define MAX_FOLIO_ORDER PUD_ORDER
+#endif
-static __always_inline void *lowmem_page_address(const struct page *page)
+#define MAX_FOLIO_NR_PAGES (1UL << MAX_FOLIO_ORDER)
+
+/*
+ * compound_nr() returns the number of pages in this potentially compound
+ * page. compound_nr() can be called on a tail page, and is defined to
+ * return 1 in that case.
+ */
+static inline unsigned long compound_nr(const struct page *page)
{
- return page_to_virt(page);
+ const struct folio *folio = (struct folio *)page;
+
+ if (!test_bit(PG_head, &folio->flags.f))
+ return 1;
+ return folio_large_nr_pages(folio);
+}
+
+/**
+ * folio_next - Move to the next physical folio.
+ * @folio: The folio we're currently operating on.
+ *
+ * If you have physically contiguous memory which may span more than
+ * one folio (eg a &struct bio_vec), use this function to move from one
+ * folio to the next. Do not use it if the memory is only virtually
+ * contiguous as the folios are almost certainly not adjacent to each
+ * other. This is the folio equivalent to writing ``page++``.
+ *
+ * Context: We assume that the folios are refcounted and/or locked at a
+ * higher level and do not adjust the reference counts.
+ * Return: The next struct folio.
+ */
+static inline struct folio *folio_next(struct folio *folio)
+{
+ return (struct folio *)folio_page(folio, folio_nr_pages(folio));
+}
+
+/**
+ * folio_shift - The size of the memory described by this folio.
+ * @folio: The folio.
+ *
+ * A folio represents a number of bytes which is a power-of-two in size.
+ * This function tells you which power-of-two the folio is. See also
+ * folio_size() and folio_order().
+ *
+ * Context: The caller should have a reference on the folio to prevent
+ * it from being split. It is not necessary for the folio to be locked.
+ * Return: The base-2 logarithm of the size of this folio.
+ */
+static inline unsigned int folio_shift(const struct folio *folio)
+{
+ return PAGE_SHIFT + folio_order(folio);
}
+/**
+ * folio_size - The number of bytes in a folio.
+ * @folio: The folio.
+ *
+ * Context: The caller should have a reference on the folio to prevent
+ * it from being split. It is not necessary for the folio to be locked.
+ * Return: The number of bytes in this folio.
+ */
+static inline size_t folio_size(const struct folio *folio)
+{
+ return PAGE_SIZE << folio_order(folio);
+}
+
+/**
+ * folio_maybe_mapped_shared - Whether the folio is mapped into the page
+ * tables of more than one MM
+ * @folio: The folio.
+ *
+ * This function checks if the folio maybe currently mapped into more than one
+ * MM ("maybe mapped shared"), or if the folio is certainly mapped into a single
+ * MM ("mapped exclusively").
+ *
+ * For KSM folios, this function also returns "mapped shared" when a folio is
+ * mapped multiple times into the same MM, because the individual page mappings
+ * are independent.
+ *
+ * For small anonymous folios and anonymous hugetlb folios, the return
+ * value will be exactly correct: non-KSM folios can only be mapped at most once
+ * into an MM, and they cannot be partially mapped. KSM folios are
+ * considered shared even if mapped multiple times into the same MM.
+ *
+ * For other folios, the result can be fuzzy:
+ * #. For partially-mappable large folios (THP), the return value can wrongly
+ * indicate "mapped shared" (false positive) if a folio was mapped by
+ * more than two MMs at one point in time.
+ * #. For pagecache folios (including hugetlb), the return value can wrongly
+ * indicate "mapped shared" (false positive) when two VMAs in the same MM
+ * cover the same file range.
+ *
+ * Further, this function only considers current page table mappings that
+ * are tracked using the folio mapcount(s).
+ *
+ * This function does not consider:
+ * #. If the folio might get mapped in the (near) future (e.g., swapcache,
+ * pagecache, temporary unmapping for migration).
+ * #. If the folio is mapped differently (VM_PFNMAP).
+ * #. If hugetlb page table sharing applies. Callers might want to check
+ * hugetlb_pmd_shared().
+ *
+ * Return: Whether the folio is estimated to be mapped into more than one MM.
+ */
+static inline bool folio_maybe_mapped_shared(struct folio *folio)
+{
+ int mapcount = folio_mapcount(folio);
+
+ /* Only partially-mappable folios require more care. */
+ if (!folio_test_large(folio) || unlikely(folio_test_hugetlb(folio)))
+ return mapcount > 1;
+
+ /*
+ * vm_insert_page() without CONFIG_TRANSPARENT_HUGEPAGE ...
+ * simply assume "mapped shared", nobody should really care
+ * about this for arbitrary kernel allocations.
+ */
+ if (!IS_ENABLED(CONFIG_MM_ID))
+ return true;
+
+ /*
+ * A single mapping implies "mapped exclusively", even if the
+ * folio flag says something different: it's easier to handle this
+ * case here instead of on the RMAP hot path.
+ */
+ if (mapcount <= 1)
+ return false;
+ return test_bit(FOLIO_MM_IDS_SHARED_BITNUM, &folio->_mm_ids);
+}
+
+/**
+ * folio_expected_ref_count - calculate the expected folio refcount
+ * @folio: the folio
+ *
+ * Calculate the expected folio refcount, taking references from the pagecache,
+ * swapcache, PG_private and page table mappings into account. Useful in
+ * combination with folio_ref_count() to detect unexpected references (e.g.,
+ * GUP or other temporary references).
+ *
+ * Does currently not consider references from the LRU cache. If the folio
+ * was isolated from the LRU (which is the case during migration or split),
+ * the LRU cache does not apply.
+ *
+ * Calling this function on an unmapped folio -- !folio_mapped() -- that is
+ * locked will return a stable result.
+ *
+ * Calling this function on a mapped folio will not result in a stable result,
+ * because nothing stops additional page table mappings from coming (e.g.,
+ * fork()) or going (e.g., munmap()).
+ *
+ * Calling this function without the folio lock will also not result in a
+ * stable result: for example, the folio might get dropped from the swapcache
+ * concurrently.
+ *
+ * However, even when called without the folio lock or on a mapped folio,
+ * this function can be used to detect unexpected references early (for example,
+ * if it makes sense to even lock the folio and unmap it).
+ *
+ * The caller must add any reference (e.g., from folio_try_get()) it might be
+ * holding itself to the result.
+ *
+ * Returns the expected folio refcount.
+ */
+static inline int folio_expected_ref_count(const struct folio *folio)
+{
+ const int order = folio_order(folio);
+ int ref_count = 0;
+
+ if (WARN_ON_ONCE(page_has_type(&folio->page) && !folio_test_hugetlb(folio)))
+ return 0;
+
+ if (folio_test_anon(folio)) {
+ /* One reference per page from the swapcache. */
+ ref_count += folio_test_swapcache(folio) << order;
+ } else {
+ /* One reference per page from the pagecache. */
+ ref_count += !!folio->mapping << order;
+ /* One reference from PG_private. */
+ ref_count += folio_test_private(folio);
+ }
+
+ /* One reference per page table mapping. */
+ return ref_count + folio_mapcount(folio);
+}
+
+#ifndef HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
+static inline int arch_make_folio_accessible(struct folio *folio)
+{
+ return 0;
+}
+#endif
+
+/*
+ * Some inline functions in vmstat.h depend on page_zone()
+ */
+#include <linux/vmstat.h>
+
#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
#define HASHED_PAGE_VIRTUAL
#endif
@@ -1623,55 +2507,50 @@ void set_page_address(struct page *page, void *virtual);
void page_address_init(void);
#endif
+static __always_inline void *lowmem_page_address(const struct page *page)
+{
+ return page_to_virt(page);
+}
+
#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
#define page_address(page) lowmem_page_address(page)
#define set_page_address(page, address) do { } while(0)
#define page_address_init() do { } while(0)
#endif
-extern void *page_rmapping(struct page *page);
-extern struct anon_vma *page_anon_vma(struct page *page);
-extern struct address_space *page_mapping(struct page *page);
-
-extern struct address_space *__page_file_mapping(struct page *);
-
-static inline
-struct address_space *page_file_mapping(struct page *page)
+static inline void *folio_address(const struct folio *folio)
{
- if (unlikely(PageSwapCache(page)))
- return __page_file_mapping(page);
-
- return page->mapping;
+ return page_address(&folio->page);
}
-extern pgoff_t __page_file_index(struct page *page);
-
/*
- * Return the pagecache index of the passed page. Regular pagecache pages
- * use ->index whereas swapcache pages use swp_offset(->private)
+ * Return true only if the page has been allocated with
+ * ALLOC_NO_WATERMARKS and the low watermark was not
+ * met implying that the system is under some pressure.
*/
-static inline pgoff_t page_index(struct page *page)
+static inline bool page_is_pfmemalloc(const struct page *page)
{
- if (unlikely(PageSwapCache(page)))
- return __page_file_index(page);
- return page->index;
+ /*
+ * lru.next has bit 1 set if the page is allocated from the
+ * pfmemalloc reserves. Callers may simply overwrite it if
+ * they do not need to preserve that information.
+ */
+ return (uintptr_t)page->lru.next & BIT(1);
}
-bool page_mapped(struct page *page);
-struct address_space *page_mapping(struct page *page);
-
/*
- * Return true only if the page has been allocated with
+ * Return true only if the folio has been allocated with
* ALLOC_NO_WATERMARKS and the low watermark was not
* met implying that the system is under some pressure.
*/
-static inline bool page_is_pfmemalloc(const struct page *page)
+static inline bool folio_is_pfmemalloc(const struct folio *folio)
{
/*
- * Page index cannot be this large so this must be
- * a pfmemalloc page.
+ * lru.next has bit 1 set if the page is allocated from the
+ * pfmemalloc reserves. Callers may simply overwrite it if
+ * they do not need to preserve that information.
*/
- return page->index == -1UL;
+ return (uintptr_t)folio->lru.next & BIT(1);
}
/*
@@ -1680,12 +2559,12 @@ static inline bool page_is_pfmemalloc(const struct page *page)
*/
static inline void set_page_pfmemalloc(struct page *page)
{
- page->index = -1UL;
+ page->lru.next = (void *)BIT(1);
}
static inline void clear_page_pfmemalloc(struct page *page)
{
- page->index = 0;
+ page->lru.next = NULL;
}
/*
@@ -1694,44 +2573,59 @@ static inline void clear_page_pfmemalloc(struct page *page)
extern void pagefault_out_of_memory(void);
#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
-#define offset_in_thp(page, p) ((unsigned long)(p) & (thp_size(page) - 1))
+#define offset_in_folio(folio, p) ((unsigned long)(p) & (folio_size(folio) - 1))
/*
- * Flags passed to show_mem() and show_free_areas() to suppress output in
- * various contexts.
+ * Parameter block passed down to zap_pte_range in exceptional cases.
*/
-#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
+struct zap_details {
+ struct folio *single_folio; /* Locked folio to be unmapped */
+ bool even_cows; /* Zap COWed private pages too? */
+ bool reclaim_pt; /* Need reclaim page tables? */
+ zap_flags_t zap_flags; /* Extra flags for zapping */
+};
-extern void show_free_areas(unsigned int flags, nodemask_t *nodemask);
+/*
+ * Whether to drop the pte markers, for example, the uffd-wp information for
+ * file-backed memory. This should only be specified when we will completely
+ * drop the page in the mm, either by truncation or unmapping of the vma. By
+ * default, the flag is not set.
+ */
+#define ZAP_FLAG_DROP_MARKER ((__force zap_flags_t) BIT(0))
+/* Set in unmap_vmas() to indicate a final unmap call. Only used by hugetlb */
+#define ZAP_FLAG_UNMAP ((__force zap_flags_t) BIT(1))
#ifdef CONFIG_MMU
extern bool can_do_mlock(void);
#else
static inline bool can_do_mlock(void) { return false; }
#endif
-extern int user_shm_lock(size_t, struct user_struct *);
-extern void user_shm_unlock(size_t, struct user_struct *);
-
-/*
- * Parameter block passed down to zap_pte_range in exceptional cases.
- */
-struct zap_details {
- struct address_space *check_mapping; /* Check page->mapping if set */
- pgoff_t first_index; /* Lowest page->index to unmap */
- pgoff_t last_index; /* Highest page->index to unmap */
-};
+extern int user_shm_lock(size_t, struct ucounts *);
+extern void user_shm_unlock(size_t, struct ucounts *);
+struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
+ pte_t pte);
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
+struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t pmd);
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t pmd);
+struct page *vm_normal_page_pud(struct vm_area_struct *vma, unsigned long addr,
+ pud_t pud);
void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
-void zap_page_range(struct vm_area_struct *vma, unsigned long address,
- unsigned long size);
-void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
- unsigned long start, unsigned long end);
+void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
+ unsigned long size, struct zap_details *details);
+static inline void zap_vma_pages(struct vm_area_struct *vma)
+{
+ zap_page_range_single(vma, vma->vm_start,
+ vma->vm_end - vma->vm_start, NULL);
+}
+void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
+ struct vm_area_struct *start_vma, unsigned long start,
+ unsigned long end, unsigned long tree_end);
struct mmu_notifier_range;
@@ -1739,25 +2633,51 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor, unsigned long ceiling);
int
copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
-int follow_invalidate_pte(struct mm_struct *mm, unsigned long address,
- struct mmu_notifier_range *range, pte_t **ptepp,
- pmd_t **pmdpp, spinlock_t **ptlp);
-int follow_pte(struct mm_struct *mm, unsigned long address,
- pte_t **ptepp, spinlock_t **ptlp);
-int follow_pfn(struct vm_area_struct *vma, unsigned long address,
- unsigned long *pfn);
-int follow_phys(struct vm_area_struct *vma, unsigned long address,
- unsigned int flags, unsigned long *prot, resource_size_t *phys);
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);
+struct follow_pfnmap_args {
+ /**
+ * Inputs:
+ * @vma: Pointer to @vm_area_struct struct
+ * @address: the virtual address to walk
+ */
+ struct vm_area_struct *vma;
+ unsigned long address;
+ /**
+ * Internals:
+ *
+ * The caller shouldn't touch any of these.
+ */
+ spinlock_t *lock;
+ pte_t *ptep;
+ /**
+ * Outputs:
+ *
+ * @pfn: the PFN of the address
+ * @addr_mask: address mask covering pfn
+ * @pgprot: the pgprot_t of the mapping
+ * @writable: whether the mapping is writable
+ * @special: whether the mapping is a special mapping (real PFN maps)
+ */
+ unsigned long pfn;
+ unsigned long addr_mask;
+ pgprot_t pgprot;
+ bool writable;
+ bool special;
+};
+int follow_pfnmap_start(struct follow_pfnmap_args *args);
+void follow_pfnmap_end(struct follow_pfnmap_args *args);
+
extern void truncate_pagecache(struct inode *inode, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize);
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
-int truncate_inode_page(struct address_space *mapping, struct page *page);
-int generic_error_remove_page(struct address_space *mapping, struct page *page);
-int invalidate_inode_page(struct page *page);
+int generic_error_remove_folio(struct address_space *mapping,
+ struct folio *folio);
+
+struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
+ unsigned long address, struct pt_regs *regs);
#ifdef CONFIG_MMU
extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
@@ -1798,89 +2718,103 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
unmap_mapping_range(mapping, holebegin, holelen, 0);
}
+static inline struct vm_area_struct *vma_lookup(struct mm_struct *mm,
+ unsigned long addr);
+
extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
void *buf, int len, unsigned int gup_flags);
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
void *buf, int len, unsigned int gup_flags);
-extern int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
+
+#ifdef CONFIG_BPF_SYSCALL
+extern int copy_remote_vm_str(struct task_struct *tsk, unsigned long addr,
void *buf, int len, unsigned int gup_flags);
+#endif
long get_user_pages_remote(struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages,
- unsigned int gup_flags, struct page **pages,
- struct vm_area_struct **vmas, int *locked);
+ unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages,
+ int *locked);
long pin_user_pages_remote(struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
- struct vm_area_struct **vmas, int *locked);
+ int *locked);
+
+/*
+ * Retrieves a single page alongside its VMA. Does not support FOLL_NOWAIT.
+ */
+static inline struct page *get_user_page_vma_remote(struct mm_struct *mm,
+ unsigned long addr,
+ int gup_flags,
+ struct vm_area_struct **vmap)
+{
+ struct page *page;
+ struct vm_area_struct *vma;
+ int got;
+
+ if (WARN_ON_ONCE(unlikely(gup_flags & FOLL_NOWAIT)))
+ return ERR_PTR(-EINVAL);
+
+ got = get_user_pages_remote(mm, addr, 1, gup_flags, &page, NULL);
+
+ if (got < 0)
+ return ERR_PTR(got);
+
+ vma = vma_lookup(mm, addr);
+ if (WARN_ON_ONCE(!vma)) {
+ put_page(page);
+ return ERR_PTR(-EINVAL);
+ }
+
+ *vmap = vma;
+ return page;
+}
+
long get_user_pages(unsigned long start, unsigned long nr_pages,
- unsigned int gup_flags, struct page **pages,
- struct vm_area_struct **vmas);
+ unsigned int gup_flags, struct page **pages);
long pin_user_pages(unsigned long start, unsigned long nr_pages,
- unsigned int gup_flags, struct page **pages,
- struct vm_area_struct **vmas);
-long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
- unsigned int gup_flags, struct page **pages, int *locked);
-long pin_user_pages_locked(unsigned long start, unsigned long nr_pages,
- unsigned int gup_flags, struct page **pages, int *locked);
+ unsigned int gup_flags, struct page **pages);
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags);
long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags);
+long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end,
+ struct folio **folios, unsigned int max_folios,
+ pgoff_t *offset);
+int folio_add_pins(struct folio *folio, unsigned int pins);
int get_user_pages_fast(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages);
int pin_user_pages_fast(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages);
+void folio_add_pin(struct folio *folio);
int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
- struct task_struct *task, bool bypass_rlim);
+ const struct task_struct *task, bool bypass_rlim);
struct kvec;
-int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
- struct page **pages);
-int get_kernel_page(unsigned long start, int write, struct page **pages);
-struct page *get_dump_page(unsigned long addr);
-
-extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
-extern void do_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length);
-
-void __set_page_dirty(struct page *, struct address_space *, int warn);
-int __set_page_dirty_nobuffers(struct page *page);
-int __set_page_dirty_no_writeback(struct page *page);
-int redirty_page_for_writepage(struct writeback_control *wbc,
- struct page *page);
-void account_page_dirtied(struct page *page, struct address_space *mapping);
-void account_page_cleaned(struct page *page, struct address_space *mapping,
- struct bdi_writeback *wb);
-int set_page_dirty(struct page *page);
+struct page *get_dump_page(unsigned long addr, int *locked);
+
+bool folio_mark_dirty(struct folio *folio);
+bool folio_mark_dirty_lock(struct folio *folio);
+bool set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page);
-void __cancel_dirty_page(struct page *page);
-static inline void cancel_dirty_page(struct page *page)
-{
- /* Avoid atomic ops, locking, etc. when not actually needed. */
- if (PageDirty(page))
- __cancel_dirty_page(page);
-}
-int clear_page_dirty_for_io(struct page *page);
int get_cmdline(struct task_struct *task, char *buffer, int buflen);
-extern unsigned long move_page_tables(struct vm_area_struct *vma,
- unsigned long old_addr, struct vm_area_struct *new_vma,
- unsigned long new_addr, unsigned long len,
- bool need_rmap_locks);
-
/*
* Flags used by change_protection(). For now we make it a bitmap so
* that we can pass in multiple flags just like parameters. However
* for now all the callers are only use one of the flags at the same
* time.
*/
-/* Whether we should allow dirty bit accounting */
-#define MM_CP_DIRTY_ACCT (1UL << 0)
+/*
+ * Whether we should manually check if we can map individual PTEs writable,
+ * because something (e.g., COW, uffd-wp) blocks that from happening for all
+ * PTEs automatically in a writable mapping.
+ */
+#define MM_CP_TRY_CHANGE_WRITABLE (1UL << 0)
/* Whether this protection change is for NUMA hints */
#define MM_CP_PROT_NUMA (1UL << 1)
/* Whether this change is for write protecting */
@@ -1889,20 +2823,20 @@ extern unsigned long move_page_tables(struct vm_area_struct *vma,
#define MM_CP_UFFD_WP_ALL (MM_CP_UFFD_WP | \
MM_CP_UFFD_WP_RESOLVE)
-extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, pgprot_t newprot,
- unsigned long cp_flags);
-extern int mprotect_fixup(struct vm_area_struct *vma,
- struct vm_area_struct **pprev, unsigned long start,
- unsigned long end, unsigned long newflags);
+bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
+ pte_t pte);
+extern long change_protection(struct mmu_gather *tlb,
+ struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, unsigned long cp_flags);
+extern int mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
+ struct vm_area_struct *vma, struct vm_area_struct **pprev,
+ unsigned long start, unsigned long end, vm_flags_t newflags);
/*
* doesn't attempt to fault and will return short.
*/
int get_user_pages_fast_only(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages);
-int pin_user_pages_fast_only(unsigned long start, int nr_pages,
- unsigned int gup_flags, struct page **pages);
static inline bool get_user_page_fast_only(unsigned long addr,
unsigned int gup_flags, struct page **pagep)
@@ -1914,55 +2848,50 @@ static inline bool get_user_page_fast_only(unsigned long addr,
*/
static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
{
- long val = atomic_long_read(&mm->rss_stat.count[member]);
+ return percpu_counter_read_positive(&mm->rss_stat[member]);
+}
-#ifdef SPLIT_RSS_COUNTING
- /*
- * counter is updated in asynchronous manner and may go to minus.
- * But it's never be expected number for users.
- */
- if (val < 0)
- val = 0;
-#endif
- return (unsigned long)val;
+static inline unsigned long get_mm_counter_sum(struct mm_struct *mm, int member)
+{
+ return percpu_counter_sum_positive(&mm->rss_stat[member]);
}
-void mm_trace_rss_stat(struct mm_struct *mm, int member, long count);
+void mm_trace_rss_stat(struct mm_struct *mm, int member);
static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
{
- long count = atomic_long_add_return(value, &mm->rss_stat.count[member]);
+ percpu_counter_add(&mm->rss_stat[member], value);
- mm_trace_rss_stat(mm, member, count);
+ mm_trace_rss_stat(mm, member);
}
static inline void inc_mm_counter(struct mm_struct *mm, int member)
{
- long count = atomic_long_inc_return(&mm->rss_stat.count[member]);
+ percpu_counter_inc(&mm->rss_stat[member]);
- mm_trace_rss_stat(mm, member, count);
+ mm_trace_rss_stat(mm, member);
}
static inline void dec_mm_counter(struct mm_struct *mm, int member)
{
- long count = atomic_long_dec_return(&mm->rss_stat.count[member]);
+ percpu_counter_dec(&mm->rss_stat[member]);
- mm_trace_rss_stat(mm, member, count);
+ mm_trace_rss_stat(mm, member);
}
-/* Optimized variant when page is already known not to be PageAnon */
-static inline int mm_counter_file(struct page *page)
+/* Optimized variant when folio is already known not to be anon */
+static inline int mm_counter_file(struct folio *folio)
{
- if (PageSwapBacked(page))
+ if (folio_test_swapbacked(folio))
return MM_SHMEMPAGES;
return MM_FILEPAGES;
}
-static inline int mm_counter(struct page *page)
+static inline int mm_counter(struct folio *folio)
{
- if (PageAnon(page))
+ if (folio_test_anon(folio))
return MM_ANONPAGES;
- return mm_counter_file(page);
+ return mm_counter_file(folio);
}
static inline unsigned long get_mm_rss(struct mm_struct *mm)
@@ -1986,8 +2915,8 @@ static inline void update_hiwater_rss(struct mm_struct *mm)
{
unsigned long _rss = get_mm_rss(mm);
- if ((mm)->hiwater_rss < _rss)
- (mm)->hiwater_rss = _rss;
+ if (data_race(mm->hiwater_rss) < _rss)
+ data_race(mm->hiwater_rss = _rss);
}
static inline void update_hiwater_vm(struct mm_struct *mm)
@@ -2010,14 +2939,6 @@ static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
*maxrss = hiwater_rss;
}
-#if defined(SPLIT_RSS_COUNTING)
-void sync_mm_rss(struct mm_struct *mm);
-#else
-static inline void sync_mm_rss(struct mm_struct *mm)
-{
-}
-#endif
-
#ifndef CONFIG_ARCH_HAS_PTE_SPECIAL
static inline int pte_special(pte_t pte)
{
@@ -2030,14 +2951,29 @@ static inline pte_t pte_mkspecial(pte_t pte)
}
#endif
-#ifndef CONFIG_ARCH_HAS_PTE_DEVMAP
-static inline int pte_devmap(pte_t pte)
+#ifndef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
+static inline bool pmd_special(pmd_t pmd)
{
- return 0;
+ return false;
}
-#endif
-int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
+static inline pmd_t pmd_mkspecial(pmd_t pmd)
+{
+ return pmd;
+}
+#endif /* CONFIG_ARCH_SUPPORTS_PMD_PFNMAP */
+
+#ifndef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
+static inline bool pud_special(pud_t pud)
+{
+ return false;
+}
+
+static inline pud_t pud_mkspecial(pud_t pud)
+{
+ return pud;
+}
+#endif /* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */
extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
spinlock_t **ptl);
@@ -2172,42 +3108,166 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
}
#endif /* CONFIG_MMU */
-#if USE_SPLIT_PTE_PTLOCKS
+enum pt_flags {
+ PT_kernel = PG_referenced,
+ PT_reserved = PG_reserved,
+ /* High bits are used for zone/node/section */
+};
+
+static inline struct ptdesc *virt_to_ptdesc(const void *x)
+{
+ return page_ptdesc(virt_to_page(x));
+}
+
+/**
+ * ptdesc_address - Virtual address of page table.
+ * @pt: Page table descriptor.
+ *
+ * Return: The first byte of the page table described by @pt.
+ */
+static inline void *ptdesc_address(const struct ptdesc *pt)
+{
+ return folio_address(ptdesc_folio(pt));
+}
+
+static inline bool pagetable_is_reserved(struct ptdesc *pt)
+{
+ return test_bit(PT_reserved, &pt->pt_flags.f);
+}
+
+/**
+ * ptdesc_set_kernel - Mark a ptdesc used to map the kernel
+ * @ptdesc: The ptdesc to be marked
+ *
+ * Kernel page tables often need special handling. Set a flag so that
+ * the handling code knows this ptdesc will not be used for userspace.
+ */
+static inline void ptdesc_set_kernel(struct ptdesc *ptdesc)
+{
+ set_bit(PT_kernel, &ptdesc->pt_flags.f);
+}
+
+/**
+ * ptdesc_clear_kernel - Mark a ptdesc as no longer used to map the kernel
+ * @ptdesc: The ptdesc to be unmarked
+ *
+ * Use when the ptdesc is no longer used to map the kernel and no longer
+ * needs special handling.
+ */
+static inline void ptdesc_clear_kernel(struct ptdesc *ptdesc)
+{
+ /*
+ * Note: the 'PG_referenced' bit does not strictly need to be
+ * cleared before freeing the page. But this is nice for
+ * symmetry.
+ */
+ clear_bit(PT_kernel, &ptdesc->pt_flags.f);
+}
+
+/**
+ * ptdesc_test_kernel - Check if a ptdesc is used to map the kernel
+ * @ptdesc: The ptdesc being tested
+ *
+ * Call to tell if the ptdesc used to map the kernel.
+ */
+static inline bool ptdesc_test_kernel(const struct ptdesc *ptdesc)
+{
+ return test_bit(PT_kernel, &ptdesc->pt_flags.f);
+}
+
+/**
+ * pagetable_alloc - Allocate pagetables
+ * @gfp: GFP flags
+ * @order: desired pagetable order
+ *
+ * pagetable_alloc allocates memory for page tables as well as a page table
+ * descriptor to describe that memory.
+ *
+ * Return: The ptdesc describing the allocated page tables.
+ */
+static inline struct ptdesc *pagetable_alloc_noprof(gfp_t gfp, unsigned int order)
+{
+ struct page *page = alloc_pages_noprof(gfp | __GFP_COMP, order);
+
+ return page_ptdesc(page);
+}
+#define pagetable_alloc(...) alloc_hooks(pagetable_alloc_noprof(__VA_ARGS__))
+
+static inline void __pagetable_free(struct ptdesc *pt)
+{
+ struct page *page = ptdesc_page(pt);
+
+ __free_pages(page, compound_order(page));
+}
+
+#ifdef CONFIG_ASYNC_KERNEL_PGTABLE_FREE
+void pagetable_free_kernel(struct ptdesc *pt);
+#else
+static inline void pagetable_free_kernel(struct ptdesc *pt)
+{
+ __pagetable_free(pt);
+}
+#endif
+/**
+ * pagetable_free - Free pagetables
+ * @pt: The page table descriptor
+ *
+ * pagetable_free frees the memory of all page tables described by a page
+ * table descriptor and the memory for the descriptor itself.
+ */
+static inline void pagetable_free(struct ptdesc *pt)
+{
+ if (ptdesc_test_kernel(pt)) {
+ ptdesc_clear_kernel(pt);
+ pagetable_free_kernel(pt);
+ } else {
+ __pagetable_free(pt);
+ }
+}
+
+#if defined(CONFIG_SPLIT_PTE_PTLOCKS)
#if ALLOC_SPLIT_PTLOCKS
void __init ptlock_cache_init(void);
-extern bool ptlock_alloc(struct page *page);
-extern void ptlock_free(struct page *page);
+bool ptlock_alloc(struct ptdesc *ptdesc);
+void ptlock_free(struct ptdesc *ptdesc);
-static inline spinlock_t *ptlock_ptr(struct page *page)
+static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc)
{
- return page->ptl;
+ return ptdesc->ptl;
}
#else /* ALLOC_SPLIT_PTLOCKS */
static inline void ptlock_cache_init(void)
{
}
-static inline bool ptlock_alloc(struct page *page)
+static inline bool ptlock_alloc(struct ptdesc *ptdesc)
{
return true;
}
-static inline void ptlock_free(struct page *page)
+static inline void ptlock_free(struct ptdesc *ptdesc)
{
}
-static inline spinlock_t *ptlock_ptr(struct page *page)
+static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc)
{
- return &page->ptl;
+ return &ptdesc->ptl;
}
#endif /* ALLOC_SPLIT_PTLOCKS */
static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
{
- return ptlock_ptr(pmd_page(*pmd));
+ return ptlock_ptr(page_ptdesc(pmd_page(*pmd)));
+}
+
+static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte)
+{
+ BUILD_BUG_ON(IS_ENABLED(CONFIG_HIGHPTE));
+ BUILD_BUG_ON(MAX_PTRS_PER_PTE * sizeof(pte_t) > PAGE_SIZE);
+ return ptlock_ptr(virt_to_ptdesc(pte));
}
-static inline bool ptlock_init(struct page *page)
+static inline bool ptlock_init(struct ptdesc *ptdesc)
{
/*
* prep_new_page() initialize page->private (and therefore page->ptl)
@@ -2216,14 +3276,14 @@ static inline bool ptlock_init(struct page *page)
* It can happen if arch try to use slab for page table allocation:
* slab code uses page->slab_cache, which share storage with page->ptl.
*/
- VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
- if (!ptlock_alloc(page))
+ VM_BUG_ON_PAGE(*(unsigned long *)&ptdesc->ptl, ptdesc_page(ptdesc));
+ if (!ptlock_alloc(ptdesc))
return false;
- spin_lock_init(ptlock_ptr(page));
+ spin_lock_init(ptlock_ptr(ptdesc));
return true;
}
-#else /* !USE_SPLIT_PTE_PTLOCKS */
+#else /* !defined(CONFIG_SPLIT_PTE_PTLOCKS) */
/*
* We use mm->page_table_lock to guard all pagetable pages of the mm.
*/
@@ -2231,41 +3291,83 @@ static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
{
return &mm->page_table_lock;
}
+static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte)
+{
+ return &mm->page_table_lock;
+}
static inline void ptlock_cache_init(void) {}
-static inline bool ptlock_init(struct page *page) { return true; }
-static inline void ptlock_free(struct page *page) {}
-#endif /* USE_SPLIT_PTE_PTLOCKS */
+static inline bool ptlock_init(struct ptdesc *ptdesc) { return true; }
+static inline void ptlock_free(struct ptdesc *ptdesc) {}
+#endif /* defined(CONFIG_SPLIT_PTE_PTLOCKS) */
-static inline void pgtable_init(void)
+static inline unsigned long ptdesc_nr_pages(const struct ptdesc *ptdesc)
{
- ptlock_cache_init();
- pgtable_cache_init();
+ return compound_nr(ptdesc_page(ptdesc));
}
-static inline bool pgtable_pte_page_ctor(struct page *page)
+static inline void __pagetable_ctor(struct ptdesc *ptdesc)
{
- if (!ptlock_init(page))
+ pg_data_t *pgdat = NODE_DATA(memdesc_nid(ptdesc->pt_flags));
+
+ __SetPageTable(ptdesc_page(ptdesc));
+ mod_node_page_state(pgdat, NR_PAGETABLE, ptdesc_nr_pages(ptdesc));
+}
+
+static inline void pagetable_dtor(struct ptdesc *ptdesc)
+{
+ pg_data_t *pgdat = NODE_DATA(memdesc_nid(ptdesc->pt_flags));
+
+ ptlock_free(ptdesc);
+ __ClearPageTable(ptdesc_page(ptdesc));
+ mod_node_page_state(pgdat, NR_PAGETABLE, -ptdesc_nr_pages(ptdesc));
+}
+
+static inline void pagetable_dtor_free(struct ptdesc *ptdesc)
+{
+ pagetable_dtor(ptdesc);
+ pagetable_free(ptdesc);
+}
+
+static inline bool pagetable_pte_ctor(struct mm_struct *mm,
+ struct ptdesc *ptdesc)
+{
+ if (mm != &init_mm && !ptlock_init(ptdesc))
return false;
- __SetPageTable(page);
- inc_lruvec_page_state(page, NR_PAGETABLE);
+ __pagetable_ctor(ptdesc);
return true;
}
-static inline void pgtable_pte_page_dtor(struct page *page)
+pte_t *___pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp);
+static inline pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr,
+ pmd_t *pmdvalp)
{
- ptlock_free(page);
- __ClearPageTable(page);
- dec_lruvec_page_state(page, NR_PAGETABLE);
+ pte_t *pte;
+
+ __cond_lock(RCU, pte = ___pte_offset_map(pmd, addr, pmdvalp));
+ return pte;
+}
+static inline pte_t *pte_offset_map(pmd_t *pmd, unsigned long addr)
+{
+ return __pte_offset_map(pmd, addr, NULL);
+}
+
+pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long addr, spinlock_t **ptlp);
+static inline pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long addr, spinlock_t **ptlp)
+{
+ pte_t *pte;
+
+ __cond_lock(RCU, __cond_lock(*ptlp,
+ pte = __pte_offset_map_lock(mm, pmd, addr, ptlp)));
+ return pte;
}
-#define pte_offset_map_lock(mm, pmd, address, ptlp) \
-({ \
- spinlock_t *__ptl = pte_lockptr(mm, pmd); \
- pte_t *__pte = pte_offset_map(pmd, address); \
- *(ptlp) = __ptl; \
- spin_lock(__ptl); \
- __pte; \
-})
+pte_t *pte_offset_map_ro_nolock(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long addr, spinlock_t **ptlp);
+pte_t *pte_offset_map_rw_nolock(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long addr, pmd_t *pmdvalp,
+ spinlock_t **ptlp);
#define pte_unmap_unlock(pte, ptl) do { \
spin_unlock(ptl); \
@@ -2285,36 +3387,33 @@ static inline void pgtable_pte_page_dtor(struct page *page)
((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
NULL: pte_offset_kernel(pmd, address))
-#if USE_SPLIT_PMD_PTLOCKS
+#if defined(CONFIG_SPLIT_PMD_PTLOCKS)
-static struct page *pmd_to_page(pmd_t *pmd)
+static inline struct page *pmd_pgtable_page(pmd_t *pmd)
{
unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
return virt_to_page((void *)((unsigned long) pmd & mask));
}
-static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
+static inline struct ptdesc *pmd_ptdesc(pmd_t *pmd)
{
- return ptlock_ptr(pmd_to_page(pmd));
+ return page_ptdesc(pmd_pgtable_page(pmd));
}
-static inline bool pmd_ptlock_init(struct page *page)
+static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
{
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- page->pmd_huge_pte = NULL;
-#endif
- return ptlock_init(page);
+ return ptlock_ptr(pmd_ptdesc(pmd));
}
-static inline void pmd_ptlock_free(struct page *page)
+static inline bool pmd_ptlock_init(struct ptdesc *ptdesc)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
+ ptdesc->pmd_huge_pte = NULL;
#endif
- ptlock_free(page);
+ return ptlock_init(ptdesc);
}
-#define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte)
+#define pmd_huge_pte(mm, pmd) (pmd_ptdesc(pmd)->pmd_huge_pte)
#else
@@ -2323,8 +3422,7 @@ static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
return &mm->page_table_lock;
}
-static inline bool pmd_ptlock_init(struct page *page) { return true; }
-static inline void pmd_ptlock_free(struct page *page) {}
+static inline bool pmd_ptlock_init(struct ptdesc *ptdesc) { return true; }
#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
@@ -2337,22 +3435,16 @@ static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
return ptl;
}
-static inline bool pgtable_pmd_page_ctor(struct page *page)
+static inline bool pagetable_pmd_ctor(struct mm_struct *mm,
+ struct ptdesc *ptdesc)
{
- if (!pmd_ptlock_init(page))
+ if (mm != &init_mm && !pmd_ptlock_init(ptdesc))
return false;
- __SetPageTable(page);
- inc_lruvec_page_state(page, NR_PAGETABLE);
+ ptdesc_pmd_pts_init(ptdesc);
+ __pagetable_ctor(ptdesc);
return true;
}
-static inline void pgtable_pmd_page_dtor(struct page *page)
-{
- pmd_ptlock_free(page);
- __ClearPageTable(page);
- dec_lruvec_page_state(page, NR_PAGETABLE);
-}
-
/*
* No scalability reason to split PUD locks yet, but follow the same pattern
* as the PMD locks to make it easier if we decide to. The VM should not be
@@ -2372,8 +3464,22 @@ static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
return ptl;
}
+static inline void pagetable_pud_ctor(struct ptdesc *ptdesc)
+{
+ __pagetable_ctor(ptdesc);
+}
+
+static inline void pagetable_p4d_ctor(struct ptdesc *ptdesc)
+{
+ __pagetable_ctor(ptdesc);
+}
+
+static inline void pagetable_pgd_ctor(struct ptdesc *ptdesc)
+{
+ __pagetable_ctor(ptdesc);
+}
+
extern void __init pagecache_init(void);
-extern void __init free_area_init_memoryless_node(int nid);
extern void free_initmem(void);
/*
@@ -2386,19 +3492,12 @@ extern unsigned long free_reserved_area(void *start, void *end,
int poison, const char *s);
extern void adjust_managed_page_count(struct page *page, long count);
-extern void mem_init_print_info(void);
-extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);
+extern void reserve_bootmem_region(phys_addr_t start,
+ phys_addr_t end, int nid);
/* Free the reserved page into the buddy system, so it gets managed. */
-static inline void free_reserved_page(struct page *page)
-{
- ClearPageReserved(page);
- init_page_count(page);
- __free_page(page);
- adjust_managed_page_count(page, 1);
-}
-#define free_highmem_page(page) free_reserved_page(page)
+void free_reserved_page(struct page *page);
static inline void mark_page_reserved(struct page *page)
{
@@ -2406,6 +3505,11 @@ static inline void mark_page_reserved(struct page *page)
adjust_managed_page_count(page, -1);
}
+static inline void free_reserved_ptdesc(struct ptdesc *pt)
+{
+ free_reserved_page(ptdesc_page(pt));
+}
+
/*
* Default method to free all the __init memory into the buddy system.
* The freed pages will be poisoned with pattern "poison" if it's within
@@ -2417,7 +3521,7 @@ static inline unsigned long free_initmem_default(int poison)
extern char __init_begin[], __init_end[];
return free_reserved_area(&__init_begin, &__init_end,
- poison, "unused kernel");
+ poison, "unused kernel image (initmem)");
}
static inline unsigned long get_num_physpages(void)
@@ -2444,20 +3548,17 @@ static inline unsigned long get_num_physpages(void)
* unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
* max_highmem_pfn};
* for_each_valid_physical_page_range()
- * memblock_add_node(base, size, nid)
+ * memblock_add_node(base, size, nid, MEMBLOCK_NONE)
* free_area_init(max_zone_pfns);
*/
void free_area_init(unsigned long *max_zone_pfn);
unsigned long node_map_pfn_alignment(void);
-unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
- unsigned long end_pfn);
extern unsigned long absent_pages_in_range(unsigned long start_pfn,
unsigned long end_pfn);
extern void get_pfn_range_for_nid(unsigned int nid,
unsigned long *start_pfn, unsigned long *end_pfn);
-extern unsigned long find_min_pfn_with_active_regions(void);
-#ifndef CONFIG_NEED_MULTIPLE_NODES
+#ifndef CONFIG_NUMA
static inline int early_pfn_to_nid(unsigned long pfn)
{
return 0;
@@ -2467,34 +3568,23 @@ static inline int early_pfn_to_nid(unsigned long pfn)
extern int __meminit early_pfn_to_nid(unsigned long pfn);
#endif
-extern void set_dma_reserve(unsigned long new_dma_reserve);
-extern void memmap_init_range(unsigned long, int, unsigned long,
- unsigned long, unsigned long, enum meminit_context,
- struct vmem_altmap *, int migratetype);
-extern void memmap_init_zone(struct zone *zone);
-extern void setup_per_zone_wmarks(void);
-extern int __meminit init_per_zone_wmark_min(void);
extern void mem_init(void);
extern void __init mmap_init(void);
-extern void show_mem(unsigned int flags, nodemask_t *nodemask);
+
+extern void __show_mem(unsigned int flags, nodemask_t *nodemask, int max_zone_idx);
+static inline void show_mem(void)
+{
+ __show_mem(0, NULL, MAX_NR_ZONES - 1);
+}
extern long si_mem_available(void);
extern void si_meminfo(struct sysinfo * val);
extern void si_meminfo_node(struct sysinfo *val, int nid);
-#ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES
-extern unsigned long arch_reserved_kernel_pages(void);
-#endif
extern __printf(3, 4)
void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
extern void setup_per_cpu_pageset(void);
-/* page_alloc.c */
-extern int min_free_kbytes;
-extern int watermark_boost_factor;
-extern int watermark_scale_factor;
-extern bool arch_has_descending_max_zone_pfns(void);
-
/* nommu.c */
extern atomic_long_t mmap_pages_allocated;
extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
@@ -2507,6 +3597,8 @@ void vma_interval_tree_insert_after(struct vm_area_struct *node,
struct rb_root_cached *root);
void vma_interval_tree_remove(struct vm_area_struct *node,
struct rb_root_cached *root);
+struct vm_area_struct *vma_interval_tree_subtree_search(struct vm_area_struct *node,
+ unsigned long start, unsigned long last);
struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root,
unsigned long start, unsigned long last);
struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
@@ -2534,32 +3626,11 @@ void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
/* mmap.c */
-extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
-extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
- struct vm_area_struct *expand);
-static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
-{
- return __vma_adjust(vma, start, end, pgoff, insert, NULL);
-}
-extern struct vm_area_struct *vma_merge(struct mm_struct *,
- struct vm_area_struct *prev, unsigned long addr, unsigned long end,
- unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
- struct mempolicy *, struct vm_userfaultfd_ctx);
-extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
-extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
- unsigned long addr, int new_below);
-extern int split_vma(struct mm_struct *, struct vm_area_struct *,
- unsigned long addr, int new_below);
+extern int __vm_enough_memory(const struct mm_struct *mm, long pages, int cap_sys_admin);
extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
-extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
- struct rb_node **, struct rb_node *);
-extern void unlink_file_vma(struct vm_area_struct *);
-extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
- unsigned long addr, unsigned long len, pgoff_t pgoff,
- bool *need_rmap_locks);
extern void exit_mmap(struct mm_struct *);
+bool mmap_read_lock_maybe_expand(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, bool write);
static inline int check_data_rlimit(unsigned long rlim,
unsigned long new,
@@ -2578,7 +3649,8 @@ static inline int check_data_rlimit(unsigned long rlim,
extern int mm_take_all_locks(struct mm_struct *mm);
extern void mm_drop_all_locks(struct mm_struct *mm);
-extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
+extern int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
+extern int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
extern struct file *get_mm_exe_file(struct mm_struct *mm);
extern struct file *get_task_exe_file(struct task_struct *task);
@@ -2587,27 +3659,35 @@ extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
const struct vm_special_mapping *sm);
-extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
+struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
unsigned long addr, unsigned long len,
- unsigned long flags,
+ vm_flags_t vm_flags,
const struct vm_special_mapping *spec);
-/* This is an obsolete alternative to _install_special_mapping. */
-extern int install_special_mapping(struct mm_struct *mm,
- unsigned long addr, unsigned long len,
- unsigned long flags, struct page **pages);
unsigned long randomize_stack_top(unsigned long stack_top);
+unsigned long randomize_page(unsigned long start, unsigned long range);
-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
+unsigned long
+__get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
+ unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags);
+
+static inline unsigned long
+get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
+ unsigned long pgoff, unsigned long flags)
+{
+ return __get_unmapped_area(file, addr, len, pgoff, flags, 0);
+}
-extern unsigned long mmap_region(struct file *file, unsigned long addr,
- unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
- struct list_head *uf);
extern unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot, unsigned long flags,
- unsigned long pgoff, unsigned long *populate, struct list_head *uf);
-extern int __do_munmap(struct mm_struct *, unsigned long, size_t,
- struct list_head *uf, bool downgrade);
+ vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
+ struct list_head *uf);
+extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
+ unsigned long start, size_t len, struct list_head *uf,
+ bool unlock);
+int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ struct mm_struct *mm, unsigned long start,
+ unsigned long end, struct list_head *uf, bool unlock);
extern int do_munmap(struct mm_struct *, unsigned long, size_t,
struct list_head *uf);
extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
@@ -2624,8 +3704,7 @@ static inline void mm_populate(unsigned long addr, unsigned long len)
static inline void mm_populate(unsigned long addr, unsigned long len) {}
#endif
-/* These take the mm semaphore themselves */
-extern int __must_check vm_brk(unsigned long, unsigned long);
+/* This takes the mm semaphore itself */
extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long);
extern int vm_munmap(unsigned long, size_t);
extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
@@ -2640,15 +3719,16 @@ struct vm_unmapped_area_info {
unsigned long high_limit;
unsigned long align_mask;
unsigned long align_offset;
+ unsigned long start_gap;
};
extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info);
/* truncate.c */
-extern void truncate_inode_pages(struct address_space *, loff_t);
-extern void truncate_inode_pages_range(struct address_space *,
- loff_t lstart, loff_t lend);
-extern void truncate_inode_pages_final(struct address_space *);
+void truncate_inode_pages(struct address_space *mapping, loff_t lstart);
+void truncate_inode_pages_range(struct address_space *mapping, loff_t lstart,
+ uoff_t lend);
+void truncate_inode_pages_final(struct address_space *mapping);
/* generic vm_area_ops exported for stackable file systems */
extern vm_fault_t filemap_fault(struct vm_fault *vmf);
@@ -2656,52 +3736,60 @@ extern vm_fault_t filemap_map_pages(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
-/* mm/page-writeback.c */
-int __must_check write_one_page(struct page *page);
-void task_dirty_inc(struct task_struct *tsk);
-
extern unsigned long stack_guard_gap;
/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
-extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
-
-/* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */
-extern int expand_downwards(struct vm_area_struct *vma,
- unsigned long address);
-#if VM_GROWSUP
-extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
-#else
- #define expand_upwards(vma, address) (0)
-#endif
+int expand_stack_locked(struct vm_area_struct *vma, unsigned long address);
+struct vm_area_struct *expand_stack(struct mm_struct * mm, unsigned long addr);
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
struct vm_area_struct **pprev);
-/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
- NULL if none. Assume start_addr < end_addr. */
-static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
+/*
+ * Look up the first VMA which intersects the interval [start_addr, end_addr)
+ * NULL if none. Assume start_addr < end_addr.
+ */
+struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
+ unsigned long start_addr, unsigned long end_addr);
+
+/**
+ * vma_lookup() - Find a VMA at a specific address
+ * @mm: The process address space.
+ * @addr: The user address.
+ *
+ * Return: The vm_area_struct at the given address, %NULL otherwise.
+ */
+static inline
+struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
{
- struct vm_area_struct * vma = find_vma(mm,start_addr);
+ return mtree_load(&mm->mm_mt, addr);
+}
- if (vma && end_addr <= vma->vm_start)
- vma = NULL;
- return vma;
+static inline unsigned long stack_guard_start_gap(const struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & VM_GROWSDOWN)
+ return stack_guard_gap;
+
+ /* See reasoning around the VM_SHADOW_STACK definition */
+ if (vma->vm_flags & VM_SHADOW_STACK)
+ return PAGE_SIZE;
+
+ return 0;
}
-static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
+static inline unsigned long vm_start_gap(const struct vm_area_struct *vma)
{
+ unsigned long gap = stack_guard_start_gap(vma);
unsigned long vm_start = vma->vm_start;
- if (vma->vm_flags & VM_GROWSDOWN) {
- vm_start -= stack_guard_gap;
- if (vm_start > vma->vm_start)
- vm_start = 0;
- }
+ vm_start -= gap;
+ if (vm_start > vma->vm_start)
+ vm_start = 0;
return vm_start;
}
-static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
+static inline unsigned long vm_end_gap(const struct vm_area_struct *vma)
{
unsigned long vm_end = vma->vm_end;
@@ -2713,16 +3801,100 @@ static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
return vm_end;
}
-static inline unsigned long vma_pages(struct vm_area_struct *vma)
+static inline unsigned long vma_pages(const struct vm_area_struct *vma)
{
return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
}
+static inline unsigned long vma_desc_size(const struct vm_area_desc *desc)
+{
+ return desc->end - desc->start;
+}
+
+static inline unsigned long vma_desc_pages(const struct vm_area_desc *desc)
+{
+ return vma_desc_size(desc) >> PAGE_SHIFT;
+}
+
+/**
+ * mmap_action_remap - helper for mmap_prepare hook to specify that a pure PFN
+ * remap is required.
+ * @desc: The VMA descriptor for the VMA requiring remap.
+ * @start: The virtual address to start the remap from, must be within the VMA.
+ * @start_pfn: The first PFN in the range to remap.
+ * @size: The size of the range to remap, in bytes, at most spanning to the end
+ * of the VMA.
+ */
+static inline void mmap_action_remap(struct vm_area_desc *desc,
+ unsigned long start,
+ unsigned long start_pfn,
+ unsigned long size)
+{
+ struct mmap_action *action = &desc->action;
+
+ /* [start, start + size) must be within the VMA. */
+ WARN_ON_ONCE(start < desc->start || start >= desc->end);
+ WARN_ON_ONCE(start + size > desc->end);
+
+ action->type = MMAP_REMAP_PFN;
+ action->remap.start = start;
+ action->remap.start_pfn = start_pfn;
+ action->remap.size = size;
+ action->remap.pgprot = desc->page_prot;
+}
+
+/**
+ * mmap_action_remap_full - helper for mmap_prepare hook to specify that the
+ * entirety of a VMA should be PFN remapped.
+ * @desc: The VMA descriptor for the VMA requiring remap.
+ * @start_pfn: The first PFN in the range to remap.
+ */
+static inline void mmap_action_remap_full(struct vm_area_desc *desc,
+ unsigned long start_pfn)
+{
+ mmap_action_remap(desc, desc->start, start_pfn, vma_desc_size(desc));
+}
+
+/**
+ * mmap_action_ioremap - helper for mmap_prepare hook to specify that a pure PFN
+ * I/O remap is required.
+ * @desc: The VMA descriptor for the VMA requiring remap.
+ * @start: The virtual address to start the remap from, must be within the VMA.
+ * @start_pfn: The first PFN in the range to remap.
+ * @size: The size of the range to remap, in bytes, at most spanning to the end
+ * of the VMA.
+ */
+static inline void mmap_action_ioremap(struct vm_area_desc *desc,
+ unsigned long start,
+ unsigned long start_pfn,
+ unsigned long size)
+{
+ mmap_action_remap(desc, start, start_pfn, size);
+ desc->action.type = MMAP_IO_REMAP_PFN;
+}
+
+/**
+ * mmap_action_ioremap_full - helper for mmap_prepare hook to specify that the
+ * entirety of a VMA should be PFN I/O remapped.
+ * @desc: The VMA descriptor for the VMA requiring remap.
+ * @start_pfn: The first PFN in the range to remap.
+ */
+static inline void mmap_action_ioremap_full(struct vm_area_desc *desc,
+ unsigned long start_pfn)
+{
+ mmap_action_ioremap(desc, desc->start, start_pfn, vma_desc_size(desc));
+}
+
+void mmap_action_prepare(struct mmap_action *action,
+ struct vm_area_desc *desc);
+int mmap_action_complete(struct mmap_action *action,
+ struct vm_area_struct *vma);
+
/* Look up the first VMA which exactly match the interval vm_start ... vm_end */
static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
unsigned long vm_start, unsigned long vm_end)
{
- struct vm_area_struct *vma = find_vma(mm, vm_start);
+ struct vm_area_struct *vma = vma_lookup(mm, vm_start);
if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
vma = NULL;
@@ -2730,17 +3902,17 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
return vma;
}
-static inline bool range_in_vma(struct vm_area_struct *vma,
+static inline bool range_in_vma(const struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
return (vma && vma->vm_start <= start && end <= vma->vm_end);
}
#ifdef CONFIG_MMU
-pgprot_t vm_get_page_prot(unsigned long vm_flags);
+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
void vma_set_page_prot(struct vm_area_struct *vma);
#else
-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
{
return __pgprot(0);
}
@@ -2757,11 +3929,11 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
#endif
-struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
-int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
- unsigned long pfn, unsigned long size, pgprot_t);
-int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
- unsigned long pfn, unsigned long size, pgprot_t prot);
+struct vm_area_struct *find_extend_vma_locked(struct mm_struct *,
+ unsigned long addr);
+int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, unsigned long size, pgprot_t pgprot);
+
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
struct page **pages, unsigned long *num);
@@ -2769,16 +3941,16 @@ int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
unsigned long num);
int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
unsigned long num);
+vm_fault_t vmf_insert_page_mkwrite(struct vm_fault *vmf, struct page *page,
+ bool write);
vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t pgprot);
vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
- pfn_t pfn);
-vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
- pfn_t pfn, pgprot_t pgprot);
+ unsigned long pfn);
vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
- unsigned long addr, pfn_t pfn);
+ unsigned long addr, unsigned long pfn);
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
@@ -2794,101 +3966,50 @@ static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
return VM_FAULT_NOPAGE;
}
-#ifndef io_remap_pfn_range
-static inline int io_remap_pfn_range(struct vm_area_struct *vma,
- unsigned long addr, unsigned long pfn,
- unsigned long size, pgprot_t prot)
+#ifndef io_remap_pfn_range_pfn
+static inline unsigned long io_remap_pfn_range_pfn(unsigned long pfn,
+ unsigned long size)
{
- return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot));
+ return pfn;
}
#endif
+static inline int io_remap_pfn_range(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long orig_pfn,
+ unsigned long size, pgprot_t orig_prot)
+{
+ const unsigned long pfn = io_remap_pfn_range_pfn(orig_pfn, size);
+ const pgprot_t prot = pgprot_decrypted(orig_prot);
+
+ return remap_pfn_range(vma, addr, pfn, size, prot);
+}
+
static inline vm_fault_t vmf_error(int err)
{
if (err == -ENOMEM)
return VM_FAULT_OOM;
+ else if (err == -EHWPOISON)
+ return VM_FAULT_HWPOISON;
return VM_FAULT_SIGBUS;
}
-struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
- unsigned int foll_flags);
-
-#define FOLL_WRITE 0x01 /* check pte is writable */
-#define FOLL_TOUCH 0x02 /* mark page accessed */
-#define FOLL_GET 0x04 /* do get_page on page */
-#define FOLL_DUMP 0x08 /* give error on hole if it would be zero */
-#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */
-#define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO
- * and return without waiting upon it */
-#define FOLL_POPULATE 0x40 /* fault in page */
-#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
-#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */
-#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */
-#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
-#define FOLL_MLOCK 0x1000 /* lock present pages */
-#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
-#define FOLL_COW 0x4000 /* internal GUP flag */
-#define FOLL_ANON 0x8000 /* don't do file mappings */
-#define FOLL_LONGTERM 0x10000 /* mapping lifetime is indefinite: see below */
-#define FOLL_SPLIT_PMD 0x20000 /* split huge pmd before returning */
-#define FOLL_PIN 0x40000 /* pages must be released via unpin_user_page */
-#define FOLL_FAST_ONLY 0x80000 /* gup_fast: prevent fall-back to slow gup */
-
-/*
- * FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each
- * other. Here is what they mean, and how to use them:
- *
- * FOLL_LONGTERM indicates that the page will be held for an indefinite time
- * period _often_ under userspace control. This is in contrast to
- * iov_iter_get_pages(), whose usages are transient.
- *
- * FIXME: For pages which are part of a filesystem, mappings are subject to the
- * lifetime enforced by the filesystem and we need guarantees that longterm
- * users like RDMA and V4L2 only establish mappings which coordinate usage with
- * the filesystem. Ideas for this coordination include revoking the longterm
- * pin, delaying writeback, bounce buffer page writeback, etc. As FS DAX was
- * added after the problem with filesystems was found FS DAX VMAs are
- * specifically failed. Filesystem pages are still subject to bugs and use of
- * FOLL_LONGTERM should be avoided on those pages.
- *
- * FIXME: Also NOTE that FOLL_LONGTERM is not supported in every GUP call.
- * Currently only get_user_pages() and get_user_pages_fast() support this flag
- * and calls to get_user_pages_[un]locked are specifically not allowed. This
- * is due to an incompatibility with the FS DAX check and
- * FAULT_FLAG_ALLOW_RETRY.
- *
- * In the CMA case: long term pins in a CMA region would unnecessarily fragment
- * that region. And so, CMA attempts to migrate the page before pinning, when
- * FOLL_LONGTERM is specified.
- *
- * FOLL_PIN indicates that a special kind of tracking (not just page->_refcount,
- * but an additional pin counting system) will be invoked. This is intended for
- * anything that gets a page reference and then touches page data (for example,
- * Direct IO). This lets the filesystem know that some non-file-system entity is
- * potentially changing the pages' data. In contrast to FOLL_GET (whose pages
- * are released via put_page()), FOLL_PIN pages must be released, ultimately, by
- * a call to unpin_user_page().
- *
- * FOLL_PIN is similar to FOLL_GET: both of these pin pages. They use different
- * and separate refcounting mechanisms, however, and that means that each has
- * its own acquire and release mechanisms:
- *
- * FOLL_GET: get_user_pages*() to acquire, and put_page() to release.
- *
- * FOLL_PIN: pin_user_pages*() to acquire, and unpin_user_pages to release.
- *
- * FOLL_PIN and FOLL_GET are mutually exclusive for a given function call.
- * (The underlying pages may experience both FOLL_GET-based and FOLL_PIN-based
- * calls applied to them, and that's perfectly OK. This is a constraint on the
- * callers, not on the pages.)
- *
- * FOLL_PIN should be set internally by the pin_user_pages*() APIs, never
- * directly by the caller. That's in order to help avoid mismatches when
- * releasing pages: get_user_pages*() pages must be released via put_page(),
- * while pin_user_pages*() pages must be released via unpin_user_page().
- *
- * Please see Documentation/core-api/pin_user_pages.rst for more information.
+/*
+ * Convert errno to return value for ->page_mkwrite() calls.
+ *
+ * This should eventually be merged with vmf_error() above, but will need a
+ * careful audit of all vmf_error() callers.
*/
+static inline vm_fault_t vmf_fs_error(int err)
+{
+ if (err == 0)
+ return VM_FAULT_LOCKED;
+ if (err == -EFAULT || err == -EAGAIN)
+ return VM_FAULT_NOPAGE;
+ if (err == -ENOMEM)
+ return VM_FAULT_OOM;
+ /* -ENOSPC, -EDQUOT, -EIO ... */
+ return VM_FAULT_SIGBUS;
+}
static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
{
@@ -2901,6 +4022,30 @@ static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
return 0;
}
+/*
+ * Indicates whether GUP can follow a PROT_NONE mapped page, or whether
+ * a (NUMA hinting) fault is required.
+ */
+static inline bool gup_can_follow_protnone(const struct vm_area_struct *vma,
+ unsigned int flags)
+{
+ /*
+ * If callers don't want to honor NUMA hinting faults, no need to
+ * determine if we would actually have to trigger a NUMA hinting fault.
+ */
+ if (!(flags & FOLL_HONOR_NUMA_FAULT))
+ return true;
+
+ /*
+ * NUMA hinting faults don't apply in inaccessible (PROT_NONE) VMAs.
+ *
+ * Requiring a fault here even for inaccessible VMAs would mean that
+ * FOLL_FORCE cannot make any progress, because handle_mm_fault()
+ * refuses to process NUMA hinting faults in inaccessible VMAs.
+ */
+ return !vma_is_accessible(vma);
+}
+
typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
unsigned long size, pte_fn_t fn, void *data);
@@ -2908,7 +4053,6 @@ extern int apply_to_existing_page_range(struct mm_struct *mm,
unsigned long address, unsigned long size,
pte_fn_t fn, void *data);
-extern void init_mem_debugging_and_hardening(void);
#ifdef CONFIG_PAGE_POISONING
extern void __kernel_poison_pages(struct page *page, int numpages);
extern void __kernel_unpoison_pages(struct page *page, int numpages);
@@ -2970,8 +4114,8 @@ static inline bool debug_pagealloc_enabled(void)
}
/*
- * For use in fast paths after init_debug_pagealloc() has run, or when a
- * false negative result is not harmful when called too early.
+ * For use in fast paths after mem_debugging_and_hardening_init() has run,
+ * or when a false negative result is not harmful when called too early.
*/
static inline bool debug_pagealloc_enabled_static(void)
{
@@ -2981,13 +4125,12 @@ static inline bool debug_pagealloc_enabled_static(void)
return static_branch_unlikely(&_debug_pagealloc_enabled);
}
-#ifdef CONFIG_DEBUG_PAGEALLOC
/*
* To support DEBUG_PAGEALLOC architecture must ensure that
* __kernel_map_pages() never fails
*/
extern void __kernel_map_pages(struct page *page, int numpages, int enable);
-
+#ifdef CONFIG_DEBUG_PAGEALLOC
static inline void debug_pagealloc_map_pages(struct page *page, int numpages)
{
if (debug_pagealloc_enabled_static())
@@ -2999,9 +4142,56 @@ static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages)
if (debug_pagealloc_enabled_static())
__kernel_map_pages(page, numpages, 0);
}
+
+extern unsigned int _debug_guardpage_minorder;
+DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
+
+static inline unsigned int debug_guardpage_minorder(void)
+{
+ return _debug_guardpage_minorder;
+}
+
+static inline bool debug_guardpage_enabled(void)
+{
+ return static_branch_unlikely(&_debug_guardpage_enabled);
+}
+
+static inline bool page_is_guard(const struct page *page)
+{
+ if (!debug_guardpage_enabled())
+ return false;
+
+ return PageGuard(page);
+}
+
+bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order);
+static inline bool set_page_guard(struct zone *zone, struct page *page,
+ unsigned int order)
+{
+ if (!debug_guardpage_enabled())
+ return false;
+ return __set_page_guard(zone, page, order);
+}
+
+void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order);
+static inline void clear_page_guard(struct zone *zone, struct page *page,
+ unsigned int order)
+{
+ if (!debug_guardpage_enabled())
+ return;
+ __clear_page_guard(zone, page, order);
+}
+
#else /* CONFIG_DEBUG_PAGEALLOC */
static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {}
static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {}
+static inline unsigned int debug_guardpage_minorder(void) { return 0; }
+static inline bool debug_guardpage_enabled(void) { return false; }
+static inline bool page_is_guard(const struct page *page) { return false; }
+static inline bool set_page_guard(struct zone *zone, struct page *page,
+ unsigned int order) { return false; }
+static inline void clear_page_guard(struct zone *zone, struct page *page,
+ unsigned int order) {}
#endif /* CONFIG_DEBUG_PAGEALLOC */
#ifdef __HAVE_ARCH_GATE_AREA
@@ -3020,16 +4210,9 @@ static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
}
#endif /* __HAVE_ARCH_GATE_AREA */
-extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
-
-#ifdef CONFIG_SYSCTL
-extern int sysctl_drop_caches;
-int drop_caches_sysctl_handler(struct ctl_table *, int, void *, size_t *,
- loff_t *);
-#endif
+bool process_shares_mm(const struct task_struct *p, const struct mm_struct *mm);
void drop_slab(void);
-void drop_slab_node(int nid);
#ifndef CONFIG_MMU
#define randomize_va_space 0
@@ -3047,47 +4230,176 @@ static inline void print_vma_addr(char *prefix, unsigned long rip)
#endif
void *sparse_buffer_alloc(unsigned long size);
+unsigned long section_map_size(void);
struct page * __populate_section_memmap(unsigned long pfn,
- unsigned long nr_pages, int nid, struct vmem_altmap *altmap);
+ unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
+ struct dev_pagemap *pgmap);
pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
- struct vmem_altmap *altmap);
+ struct vmem_altmap *altmap, unsigned long ptpfn,
+ unsigned long flags);
void *vmemmap_alloc_block(unsigned long size, int node);
struct vmem_altmap;
void *vmemmap_alloc_block_buf(unsigned long size, int node,
struct vmem_altmap *altmap);
void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
+void vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
+ unsigned long addr, unsigned long next);
+int vmemmap_check_pmd(pmd_t *pmd, int node,
+ unsigned long addr, unsigned long next);
int vmemmap_populate_basepages(unsigned long start, unsigned long end,
int node, struct vmem_altmap *altmap);
+int vmemmap_populate_hugepages(unsigned long start, unsigned long end,
+ int node, struct vmem_altmap *altmap);
int vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap);
+int vmemmap_populate_hvo(unsigned long start, unsigned long end, int node,
+ unsigned long headsize);
+int vmemmap_undo_hvo(unsigned long start, unsigned long end, int node,
+ unsigned long headsize);
+void vmemmap_wrprotect_hvo(unsigned long start, unsigned long end, int node,
+ unsigned long headsize);
void vmemmap_populate_print_last(void);
#ifdef CONFIG_MEMORY_HOTPLUG
void vmemmap_free(unsigned long start, unsigned long end,
struct vmem_altmap *altmap);
#endif
-void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
- unsigned long nr_pages);
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+static inline unsigned long vmem_altmap_offset(const struct vmem_altmap *altmap)
+{
+ /* number of pfns from base where pfn_to_page() is valid */
+ if (altmap)
+ return altmap->reserve + altmap->free;
+ return 0;
+}
+
+static inline void vmem_altmap_free(struct vmem_altmap *altmap,
+ unsigned long nr_pfns)
+{
+ altmap->alloc -= nr_pfns;
+}
+#else
+static inline unsigned long vmem_altmap_offset(const struct vmem_altmap *altmap)
+{
+ return 0;
+}
+
+static inline void vmem_altmap_free(struct vmem_altmap *altmap,
+ unsigned long nr_pfns)
+{
+}
+#endif
+
+#define VMEMMAP_RESERVE_NR 2
+#ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
+static inline bool __vmemmap_can_optimize(struct vmem_altmap *altmap,
+ struct dev_pagemap *pgmap)
+{
+ unsigned long nr_pages;
+ unsigned long nr_vmemmap_pages;
+
+ if (!pgmap || !is_power_of_2(sizeof(struct page)))
+ return false;
+
+ nr_pages = pgmap_vmemmap_nr(pgmap);
+ nr_vmemmap_pages = ((nr_pages * sizeof(struct page)) >> PAGE_SHIFT);
+ /*
+ * For vmemmap optimization with DAX we need minimum 2 vmemmap
+ * pages. See layout diagram in Documentation/mm/vmemmap_dedup.rst
+ */
+ return !altmap && (nr_vmemmap_pages > VMEMMAP_RESERVE_NR);
+}
+/*
+ * If we don't have an architecture override, use the generic rule
+ */
+#ifndef vmemmap_can_optimize
+#define vmemmap_can_optimize __vmemmap_can_optimize
+#endif
+
+#else
+static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap,
+ struct dev_pagemap *pgmap)
+{
+ return false;
+}
+#endif
enum mf_flags {
MF_COUNT_INCREASED = 1 << 0,
MF_ACTION_REQUIRED = 1 << 1,
MF_MUST_KILL = 1 << 2,
MF_SOFT_OFFLINE = 1 << 3,
+ MF_UNPOISON = 1 << 4,
+ MF_SW_SIMULATED = 1 << 5,
+ MF_NO_RETRY = 1 << 6,
+ MF_MEM_PRE_REMOVE = 1 << 7,
};
+int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
+ unsigned long count, int mf_flags);
extern int memory_failure(unsigned long pfn, int flags);
-extern void memory_failure_queue(unsigned long pfn, int flags);
-extern void memory_failure_queue_kick(int cpu);
extern int unpoison_memory(unsigned long pfn);
-extern int sysctl_memory_failure_early_kill;
-extern int sysctl_memory_failure_recovery;
-extern void shake_page(struct page *p, int access);
extern atomic_long_t num_poisoned_pages __read_mostly;
extern int soft_offline_page(unsigned long pfn, int flags);
+#ifdef CONFIG_MEMORY_FAILURE
+/*
+ * Sysfs entries for memory failure handling statistics.
+ */
+extern const struct attribute_group memory_failure_attr_group;
+extern void memory_failure_queue(unsigned long pfn, int flags);
+extern int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
+ bool *migratable_cleared);
+void num_poisoned_pages_inc(unsigned long pfn);
+void num_poisoned_pages_sub(unsigned long pfn, long i);
+#else
+static inline void memory_failure_queue(unsigned long pfn, int flags)
+{
+}
+
+static inline int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
+ bool *migratable_cleared)
+{
+ return 0;
+}
+
+static inline void num_poisoned_pages_inc(unsigned long pfn)
+{
+}
+
+static inline void num_poisoned_pages_sub(unsigned long pfn, long i)
+{
+}
+#endif
+
+#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
+extern void memblk_nr_poison_inc(unsigned long pfn);
+extern void memblk_nr_poison_sub(unsigned long pfn, long i);
+#else
+static inline void memblk_nr_poison_inc(unsigned long pfn)
+{
+}
+
+static inline void memblk_nr_poison_sub(unsigned long pfn, long i)
+{
+}
+#endif
+
+#ifndef arch_memory_failure
+static inline int arch_memory_failure(unsigned long pfn, int flags)
+{
+ return -ENXIO;
+}
+#endif
+#ifndef arch_is_platform_page
+static inline bool arch_is_platform_page(u64 paddr)
+{
+ return false;
+}
+#endif
/*
* Error handlers for various types of pages.
@@ -3102,12 +4414,10 @@ enum mf_result {
enum mf_action_page_type {
MF_MSG_KERNEL,
MF_MSG_KERNEL_HIGH_ORDER,
- MF_MSG_SLAB,
MF_MSG_DIFFERENT_COMPOUND,
- MF_MSG_POISONED_HUGE,
MF_MSG_HUGE,
MF_MSG_FREE_HUGE,
- MF_MSG_NON_PMD_HUGE,
+ MF_MSG_GET_HWPOISON,
MF_MSG_UNMAP_FAILED,
MF_MSG_DIRTY_SWAPCACHE,
MF_MSG_CLEAN_SWAPCACHE,
@@ -3119,24 +4429,21 @@ enum mf_action_page_type {
MF_MSG_CLEAN_LRU,
MF_MSG_TRUNCATED_LRU,
MF_MSG_BUDDY,
- MF_MSG_BUDDY_2ND,
MF_MSG_DAX,
MF_MSG_UNSPLIT_THP,
+ MF_MSG_ALREADY_POISONED,
+ MF_MSG_PFN_MAP,
MF_MSG_UNKNOWN,
};
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
-extern void clear_huge_page(struct page *page,
- unsigned long addr_hint,
- unsigned int pages_per_huge_page);
-extern void copy_user_huge_page(struct page *dst, struct page *src,
- unsigned long addr_hint,
- struct vm_area_struct *vma,
- unsigned int pages_per_huge_page);
-extern long copy_huge_page_from_user(struct page *dst_page,
- const void __user *usr_src,
- unsigned int pages_per_huge_page,
- bool allow_pagefault);
+void folio_zero_user(struct folio *folio, unsigned long addr_hint);
+int copy_user_large_folio(struct folio *dst, struct folio *src,
+ unsigned long addr_hint,
+ struct vm_area_struct *vma);
+long copy_folio_from_user(struct folio *dst_folio,
+ const void __user *usr_src,
+ bool allow_pagefault);
/**
* vma_is_special_huge - Are transhuge page-table entries considered special?
@@ -3156,33 +4463,6 @@ static inline bool vma_is_special_huge(const struct vm_area_struct *vma)
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
-#ifdef CONFIG_DEBUG_PAGEALLOC
-extern unsigned int _debug_guardpage_minorder;
-DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
-
-static inline unsigned int debug_guardpage_minorder(void)
-{
- return _debug_guardpage_minorder;
-}
-
-static inline bool debug_guardpage_enabled(void)
-{
- return static_branch_unlikely(&_debug_guardpage_enabled);
-}
-
-static inline bool page_is_guard(struct page *page)
-{
- if (!debug_guardpage_enabled())
- return false;
-
- return PageGuard(page);
-}
-#else
-static inline unsigned int debug_guardpage_minorder(void) { return 0; }
-static inline bool debug_guardpage_enabled(void) { return false; }
-static inline bool page_is_guard(struct page *page) { return false; }
-#endif /* CONFIG_DEBUG_PAGEALLOC */
-
#if MAX_NUMNODES > 1
void __init setup_nr_node_ids(void);
#else
@@ -3208,45 +4488,158 @@ unsigned long wp_shared_mapping_range(struct address_space *mapping,
pgoff_t first_index, pgoff_t nr);
#endif
-extern int sysctl_nr_trim_pages;
+#ifdef CONFIG_ANON_VMA_NAME
+int set_anon_vma_name(unsigned long addr, unsigned long size,
+ const char __user *uname);
+#else
+static inline
+int set_anon_vma_name(unsigned long addr, unsigned long size,
+ const char __user *uname)
+{
+ return -EINVAL;
+}
+#endif
+
+#ifdef CONFIG_UNACCEPTED_MEMORY
+
+bool range_contains_unaccepted_memory(phys_addr_t start, unsigned long size);
+void accept_memory(phys_addr_t start, unsigned long size);
-#ifdef CONFIG_PRINTK
-void mem_dump_obj(void *object);
#else
-static inline void mem_dump_obj(void *object) {}
+
+static inline bool range_contains_unaccepted_memory(phys_addr_t start,
+ unsigned long size)
+{
+ return false;
+}
+
+static inline void accept_memory(phys_addr_t start, unsigned long size)
+{
+}
+
#endif
-/**
- * seal_check_future_write - Check for F_SEAL_FUTURE_WRITE flag and handle it
- * @seals: the seals to check
- * @vma: the vma to operate on
- *
- * Check whether F_SEAL_FUTURE_WRITE is set; if so, do proper check/handling on
- * the vma flags. Return 0 if check pass, or <0 for errors.
- */
-static inline int seal_check_future_write(int seals, struct vm_area_struct *vma)
-{
- if (seals & F_SEAL_FUTURE_WRITE) {
- /*
- * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
- * "future write" seal active.
- */
- if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
- return -EPERM;
-
- /*
- * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as
- * MAP_SHARED and read-only, take care to not allow mprotect to
- * revert protections on such mappings. Do this only for shared
- * mappings. For private mappings, don't need to mask
- * VM_MAYWRITE as we still want them to be COW-writable.
- */
- if (vma->vm_flags & VM_SHARED)
- vma->vm_flags &= ~(VM_MAYWRITE);
- }
+static inline bool pfn_is_unaccepted_memory(unsigned long pfn)
+{
+ return range_contains_unaccepted_memory(pfn << PAGE_SHIFT, PAGE_SIZE);
+}
+void vma_pgtable_walk_begin(struct vm_area_struct *vma);
+void vma_pgtable_walk_end(struct vm_area_struct *vma);
+
+int reserve_mem_find_by_name(const char *name, phys_addr_t *start, phys_addr_t *size);
+int reserve_mem_release_by_name(const char *name);
+
+#ifdef CONFIG_64BIT
+int do_mseal(unsigned long start, size_t len_in, unsigned long flags);
+#else
+static inline int do_mseal(unsigned long start, size_t len_in, unsigned long flags)
+{
+ /* noop on 32 bit */
return 0;
}
+#endif
+
+/*
+ * user_alloc_needs_zeroing checks if a user folio from page allocator needs to
+ * be zeroed or not.
+ */
+static inline bool user_alloc_needs_zeroing(void)
+{
+ /*
+ * for user folios, arch with cache aliasing requires cache flush and
+ * arc changes folio->flags to make icache coherent with dcache, so
+ * always return false to make caller use
+ * clear_user_page()/clear_user_highpage().
+ */
+ return cpu_dcache_is_aliasing() || cpu_icache_is_aliasing() ||
+ !static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
+ &init_on_alloc);
+}
+
+int arch_get_shadow_stack_status(struct task_struct *t, unsigned long __user *status);
+int arch_set_shadow_stack_status(struct task_struct *t, unsigned long status);
+int arch_lock_shadow_stack_status(struct task_struct *t, unsigned long status);
+
+/*
+ * DMA mapping IDs for page_pool
+ *
+ * When DMA-mapping a page, page_pool allocates an ID (from an xarray) and
+ * stashes it in the upper bits of page->pp_magic. We always want to be able to
+ * unambiguously identify page pool pages (using page_pool_page_is_pp()). Non-PP
+ * pages can have arbitrary kernel pointers stored in the same field as pp_magic
+ * (since it overlaps with page->lru.next), so we must ensure that we cannot
+ * mistake a valid kernel pointer with any of the values we write into this
+ * field.
+ *
+ * On architectures that set POISON_POINTER_DELTA, this is already ensured,
+ * since this value becomes part of PP_SIGNATURE; meaning we can just use the
+ * space between the PP_SIGNATURE value (without POISON_POINTER_DELTA), and the
+ * lowest bits of POISON_POINTER_DELTA. On arches where POISON_POINTER_DELTA is
+ * 0, we use the lowest bit of PAGE_OFFSET as the boundary if that value is
+ * known at compile-time.
+ *
+ * If the value of PAGE_OFFSET is not known at compile time, or if it is too
+ * small to leave at least 8 bits available above PP_SIGNATURE, we define the
+ * number of bits to be 0, which turns off the DMA index tracking altogether
+ * (see page_pool_register_dma_index()).
+ */
+#define PP_DMA_INDEX_SHIFT (1 + __fls(PP_SIGNATURE - POISON_POINTER_DELTA))
+#if POISON_POINTER_DELTA > 0
+/* PP_SIGNATURE includes POISON_POINTER_DELTA, so limit the size of the DMA
+ * index to not overlap with that if set
+ */
+#define PP_DMA_INDEX_BITS MIN(32, __ffs(POISON_POINTER_DELTA) - PP_DMA_INDEX_SHIFT)
+#else
+/* Use the lowest bit of PAGE_OFFSET if there's at least 8 bits available; see above */
+#define PP_DMA_INDEX_MIN_OFFSET (1 << (PP_DMA_INDEX_SHIFT + 8))
+#define PP_DMA_INDEX_BITS ((__builtin_constant_p(PAGE_OFFSET) && \
+ PAGE_OFFSET >= PP_DMA_INDEX_MIN_OFFSET && \
+ !(PAGE_OFFSET & (PP_DMA_INDEX_MIN_OFFSET - 1))) ? \
+ MIN(32, __ffs(PAGE_OFFSET) - PP_DMA_INDEX_SHIFT) : 0)
+
+#endif
+
+#define PP_DMA_INDEX_MASK GENMASK(PP_DMA_INDEX_BITS + PP_DMA_INDEX_SHIFT - 1, \
+ PP_DMA_INDEX_SHIFT)
+
+/* Mask used for checking in page_pool_page_is_pp() below. page->pp_magic is
+ * OR'ed with PP_SIGNATURE after the allocation in order to preserve bit 0 for
+ * the head page of compound page and bit 1 for pfmemalloc page, as well as the
+ * bits used for the DMA index. page_is_pfmemalloc() is checked in
+ * __page_pool_put_page() to avoid recycling the pfmemalloc page.
+ */
+#define PP_MAGIC_MASK ~(PP_DMA_INDEX_MASK | 0x3UL)
+
+#ifdef CONFIG_PAGE_POOL
+static inline bool page_pool_page_is_pp(const struct page *page)
+{
+ return (page->pp_magic & PP_MAGIC_MASK) == PP_SIGNATURE;
+}
+#else
+static inline bool page_pool_page_is_pp(const struct page *page)
+{
+ return false;
+}
+#endif
+
+#define PAGE_SNAPSHOT_FAITHFUL (1 << 0)
+#define PAGE_SNAPSHOT_PG_BUDDY (1 << 1)
+#define PAGE_SNAPSHOT_PG_IDLE (1 << 2)
+
+struct page_snapshot {
+ struct folio folio_snapshot;
+ struct page page_snapshot;
+ unsigned long pfn;
+ unsigned long idx;
+ unsigned long flags;
+};
+
+static inline bool snapshot_page_is_faithful(const struct page_snapshot *ps)
+{
+ return ps->flags & PAGE_SNAPSHOT_FAITHFUL;
+}
+
+void snapshot_page(struct page_snapshot *ps, const struct page *page);
-#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_api.h b/include/linux/mm_api.h
new file mode 100644
index 000000000000..a5ace2b198b8
--- /dev/null
+++ b/include/linux/mm_api.h
@@ -0,0 +1 @@
+#include <linux/mm.h>
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 355ea1ee32bd..fa2d6ba811b5 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -2,106 +2,657 @@
#ifndef LINUX_MM_INLINE_H
#define LINUX_MM_INLINE_H
+#include <linux/atomic.h>
#include <linux/huge_mm.h>
+#include <linux/mm_types.h>
#include <linux/swap.h>
+#include <linux/string.h>
+#include <linux/userfaultfd_k.h>
+#include <linux/leafops.h>
/**
- * page_is_file_lru - should the page be on a file LRU or anon LRU?
- * @page: the page to test
- *
- * Returns 1 if @page is a regular filesystem backed page cache page or a lazily
- * freed anonymous page (e.g. via MADV_FREE). Returns 0 if @page is a normal
- * anonymous page, a tmpfs page or otherwise ram or swap backed page. Used by
- * functions that manipulate the LRU lists, to sort a page onto the right LRU
- * list.
+ * folio_is_file_lru - Should the folio be on a file LRU or anon LRU?
+ * @folio: The folio to test.
*
* We would like to get this info without a page flag, but the state
- * needs to survive until the page is last deleted from the LRU, which
+ * needs to survive until the folio is last deleted from the LRU, which
* could be as far down as __page_cache_release.
+ *
+ * Return: An integer (not a boolean!) used to sort a folio onto the
+ * right LRU list and to account folios correctly.
+ * 1 if @folio is a regular filesystem backed page cache folio
+ * or a lazily freed anonymous folio (e.g. via MADV_FREE).
+ * 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise
+ * ram or swap backed folio.
*/
+static inline int folio_is_file_lru(const struct folio *folio)
+{
+ return !folio_test_swapbacked(folio);
+}
+
static inline int page_is_file_lru(struct page *page)
{
- return !PageSwapBacked(page);
+ return folio_is_file_lru(page_folio(page));
}
-static __always_inline void update_lru_size(struct lruvec *lruvec,
+static __always_inline void __update_lru_size(struct lruvec *lruvec,
enum lru_list lru, enum zone_type zid,
- int nr_pages)
+ long nr_pages)
{
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
- __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
+ lockdep_assert_held(&lruvec->lru_lock);
+ WARN_ON_ONCE(nr_pages != (int)nr_pages);
+
+ mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
__mod_zone_page_state(&pgdat->node_zones[zid],
NR_ZONE_LRU_BASE + lru, nr_pages);
+}
+
+static __always_inline void update_lru_size(struct lruvec *lruvec,
+ enum lru_list lru, enum zone_type zid,
+ long nr_pages)
+{
+ __update_lru_size(lruvec, lru, zid, nr_pages);
#ifdef CONFIG_MEMCG
mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
#endif
}
/**
- * __clear_page_lru_flags - clear page lru flags before releasing a page
- * @page: the page that was on lru and now has a zero reference
+ * __folio_clear_lru_flags - Clear page lru flags before releasing a page.
+ * @folio: The folio that was on lru and now has a zero reference.
*/
-static __always_inline void __clear_page_lru_flags(struct page *page)
+static __always_inline void __folio_clear_lru_flags(struct folio *folio)
{
- VM_BUG_ON_PAGE(!PageLRU(page), page);
+ VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio);
- __ClearPageLRU(page);
+ __folio_clear_lru(folio);
/* this shouldn't happen, so leave the flags to bad_page() */
- if (PageActive(page) && PageUnevictable(page))
+ if (folio_test_active(folio) && folio_test_unevictable(folio))
return;
- __ClearPageActive(page);
- __ClearPageUnevictable(page);
+ __folio_clear_active(folio);
+ __folio_clear_unevictable(folio);
}
/**
- * page_lru - which LRU list should a page be on?
- * @page: the page to test
+ * folio_lru_list - Which LRU list should a folio be on?
+ * @folio: The folio to test.
*
- * Returns the LRU list a page should be on, as an index
+ * Return: The LRU list a folio should be on, as an index
* into the array of LRU lists.
*/
-static __always_inline enum lru_list page_lru(struct page *page)
+static __always_inline enum lru_list folio_lru_list(const struct folio *folio)
{
enum lru_list lru;
- VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
+ VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio);
- if (PageUnevictable(page))
+ if (folio_test_unevictable(folio))
return LRU_UNEVICTABLE;
- lru = page_is_file_lru(page) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
- if (PageActive(page))
+ lru = folio_is_file_lru(folio) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
+ if (folio_test_active(folio))
lru += LRU_ACTIVE;
return lru;
}
-static __always_inline void add_page_to_lru_list(struct page *page,
- struct lruvec *lruvec)
+#ifdef CONFIG_LRU_GEN
+
+#ifdef CONFIG_LRU_GEN_ENABLED
+static inline bool lru_gen_enabled(void)
+{
+ DECLARE_STATIC_KEY_TRUE(lru_gen_caps[NR_LRU_GEN_CAPS]);
+
+ return static_branch_likely(&lru_gen_caps[LRU_GEN_CORE]);
+}
+#else
+static inline bool lru_gen_enabled(void)
+{
+ DECLARE_STATIC_KEY_FALSE(lru_gen_caps[NR_LRU_GEN_CAPS]);
+
+ return static_branch_unlikely(&lru_gen_caps[LRU_GEN_CORE]);
+}
+#endif
+
+static inline bool lru_gen_in_fault(void)
+{
+ return current->in_lru_fault;
+}
+
+static inline int lru_gen_from_seq(unsigned long seq)
+{
+ return seq % MAX_NR_GENS;
+}
+
+static inline int lru_hist_from_seq(unsigned long seq)
+{
+ return seq % NR_HIST_GENS;
+}
+
+static inline int lru_tier_from_refs(int refs, bool workingset)
+{
+ VM_WARN_ON_ONCE(refs > BIT(LRU_REFS_WIDTH));
+
+ /* see the comment on MAX_NR_TIERS */
+ return workingset ? MAX_NR_TIERS - 1 : order_base_2(refs);
+}
+
+static inline int folio_lru_refs(const struct folio *folio)
+{
+ unsigned long flags = READ_ONCE(folio->flags.f);
+
+ if (!(flags & BIT(PG_referenced)))
+ return 0;
+ /*
+ * Return the total number of accesses including PG_referenced. Also see
+ * the comment on LRU_REFS_FLAGS.
+ */
+ return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + 1;
+}
+
+static inline int folio_lru_gen(const struct folio *folio)
+{
+ unsigned long flags = READ_ONCE(folio->flags.f);
+
+ return ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
+}
+
+static inline bool lru_gen_is_active(const struct lruvec *lruvec, int gen)
+{
+ unsigned long max_seq = lruvec->lrugen.max_seq;
+
+ VM_WARN_ON_ONCE(gen >= MAX_NR_GENS);
+
+ /* see the comment on MIN_NR_GENS */
+ return gen == lru_gen_from_seq(max_seq) || gen == lru_gen_from_seq(max_seq - 1);
+}
+
+static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *folio,
+ int old_gen, int new_gen)
+{
+ int type = folio_is_file_lru(folio);
+ int zone = folio_zonenum(folio);
+ int delta = folio_nr_pages(folio);
+ enum lru_list lru = type * LRU_INACTIVE_FILE;
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
+
+ VM_WARN_ON_ONCE(old_gen != -1 && old_gen >= MAX_NR_GENS);
+ VM_WARN_ON_ONCE(new_gen != -1 && new_gen >= MAX_NR_GENS);
+ VM_WARN_ON_ONCE(old_gen == -1 && new_gen == -1);
+
+ if (old_gen >= 0)
+ WRITE_ONCE(lrugen->nr_pages[old_gen][type][zone],
+ lrugen->nr_pages[old_gen][type][zone] - delta);
+ if (new_gen >= 0)
+ WRITE_ONCE(lrugen->nr_pages[new_gen][type][zone],
+ lrugen->nr_pages[new_gen][type][zone] + delta);
+
+ /* addition */
+ if (old_gen < 0) {
+ if (lru_gen_is_active(lruvec, new_gen))
+ lru += LRU_ACTIVE;
+ __update_lru_size(lruvec, lru, zone, delta);
+ return;
+ }
+
+ /* deletion */
+ if (new_gen < 0) {
+ if (lru_gen_is_active(lruvec, old_gen))
+ lru += LRU_ACTIVE;
+ __update_lru_size(lruvec, lru, zone, -delta);
+ return;
+ }
+
+ /* promotion */
+ if (!lru_gen_is_active(lruvec, old_gen) && lru_gen_is_active(lruvec, new_gen)) {
+ __update_lru_size(lruvec, lru, zone, -delta);
+ __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, delta);
+ }
+
+ /* demotion requires isolation, e.g., lru_deactivate_fn() */
+ VM_WARN_ON_ONCE(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen));
+}
+
+static inline unsigned long lru_gen_folio_seq(const struct lruvec *lruvec,
+ const struct folio *folio,
+ bool reclaiming)
+{
+ int gen;
+ int type = folio_is_file_lru(folio);
+ const struct lru_gen_folio *lrugen = &lruvec->lrugen;
+
+ /*
+ * +-----------------------------------+-----------------------------------+
+ * | Accessed through page tables and | Accessed through file descriptors |
+ * | promoted by folio_update_gen() | and protected by folio_inc_gen() |
+ * +-----------------------------------+-----------------------------------+
+ * | PG_active (set while isolated) | |
+ * +-----------------+-----------------+-----------------+-----------------+
+ * | PG_workingset | PG_referenced | PG_workingset | LRU_REFS_FLAGS |
+ * +-----------------------------------+-----------------------------------+
+ * |<---------- MIN_NR_GENS ---------->| |
+ * |<---------------------------- MAX_NR_GENS ---------------------------->|
+ */
+ if (folio_test_active(folio))
+ gen = MIN_NR_GENS - folio_test_workingset(folio);
+ else if (reclaiming)
+ gen = MAX_NR_GENS;
+ else if ((!folio_is_file_lru(folio) && !folio_test_swapcache(folio)) ||
+ (folio_test_reclaim(folio) &&
+ (folio_test_dirty(folio) || folio_test_writeback(folio))))
+ gen = MIN_NR_GENS;
+ else
+ gen = MAX_NR_GENS - folio_test_workingset(folio);
+
+ return max(READ_ONCE(lrugen->max_seq) - gen + 1, READ_ONCE(lrugen->min_seq[type]));
+}
+
+static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
+{
+ unsigned long seq;
+ unsigned long flags;
+ int gen = folio_lru_gen(folio);
+ int type = folio_is_file_lru(folio);
+ int zone = folio_zonenum(folio);
+ struct lru_gen_folio *lrugen = &lruvec->lrugen;
+
+ VM_WARN_ON_ONCE_FOLIO(gen != -1, folio);
+
+ if (folio_test_unevictable(folio) || !lrugen->enabled)
+ return false;
+
+ seq = lru_gen_folio_seq(lruvec, folio, reclaiming);
+ gen = lru_gen_from_seq(seq);
+ flags = (gen + 1UL) << LRU_GEN_PGOFF;
+ /* see the comment on MIN_NR_GENS about PG_active */
+ set_mask_bits(&folio->flags.f, LRU_GEN_MASK | BIT(PG_active), flags);
+
+ lru_gen_update_size(lruvec, folio, -1, gen);
+ /* for folio_rotate_reclaimable() */
+ if (reclaiming)
+ list_add_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
+ else
+ list_add(&folio->lru, &lrugen->folios[gen][type][zone]);
+
+ return true;
+}
+
+static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
+{
+ unsigned long flags;
+ int gen = folio_lru_gen(folio);
+
+ if (gen < 0)
+ return false;
+
+ VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio);
+ VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
+
+ /* for folio_migrate_flags() */
+ flags = !reclaiming && lru_gen_is_active(lruvec, gen) ? BIT(PG_active) : 0;
+ flags = set_mask_bits(&folio->flags.f, LRU_GEN_MASK, flags);
+ gen = ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
+
+ lru_gen_update_size(lruvec, folio, gen, -1);
+ list_del(&folio->lru);
+
+ return true;
+}
+
+static inline void folio_migrate_refs(struct folio *new, const struct folio *old)
+{
+ unsigned long refs = READ_ONCE(old->flags.f) & LRU_REFS_MASK;
+
+ set_mask_bits(&new->flags.f, LRU_REFS_MASK, refs);
+}
+#else /* !CONFIG_LRU_GEN */
+
+static inline bool lru_gen_enabled(void)
+{
+ return false;
+}
+
+static inline bool lru_gen_in_fault(void)
+{
+ return false;
+}
+
+static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
+{
+ return false;
+}
+
+static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
{
- enum lru_list lru = page_lru(page);
+ return false;
+}
+
+static inline void folio_migrate_refs(struct folio *new, const struct folio *old)
+{
+
+}
+#endif /* CONFIG_LRU_GEN */
+
+static __always_inline
+void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio)
+{
+ enum lru_list lru = folio_lru_list(folio);
+
+ if (lru_gen_add_folio(lruvec, folio, false))
+ return;
+
+ update_lru_size(lruvec, lru, folio_zonenum(folio),
+ folio_nr_pages(folio));
+ if (lru != LRU_UNEVICTABLE)
+ list_add(&folio->lru, &lruvec->lists[lru]);
+}
+
+static __always_inline
+void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio)
+{
+ enum lru_list lru = folio_lru_list(folio);
+
+ if (lru_gen_add_folio(lruvec, folio, true))
+ return;
+
+ update_lru_size(lruvec, lru, folio_zonenum(folio),
+ folio_nr_pages(folio));
+ /* This is not expected to be used on LRU_UNEVICTABLE */
+ list_add_tail(&folio->lru, &lruvec->lists[lru]);
+}
+
+static __always_inline
+void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio)
+{
+ enum lru_list lru = folio_lru_list(folio);
+
+ if (lru_gen_del_folio(lruvec, folio, false))
+ return;
+
+ if (lru != LRU_UNEVICTABLE)
+ list_del(&folio->lru);
+ update_lru_size(lruvec, lru, folio_zonenum(folio),
+ -folio_nr_pages(folio));
+}
+
+#ifdef CONFIG_ANON_VMA_NAME
+/* mmap_lock should be read-locked */
+static inline void anon_vma_name_get(struct anon_vma_name *anon_name)
+{
+ if (anon_name)
+ kref_get(&anon_name->kref);
+}
+
+static inline void anon_vma_name_put(struct anon_vma_name *anon_name)
+{
+ if (anon_name)
+ kref_put(&anon_name->kref, anon_vma_name_free);
+}
+
+static inline
+struct anon_vma_name *anon_vma_name_reuse(struct anon_vma_name *anon_name)
+{
+ /* Prevent anon_name refcount saturation early on */
+ if (kref_read(&anon_name->kref) < REFCOUNT_MAX) {
+ anon_vma_name_get(anon_name);
+ return anon_name;
+
+ }
+ return anon_vma_name_alloc(anon_name->name);
+}
+
+static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
+ struct vm_area_struct *new_vma)
+{
+ struct anon_vma_name *anon_name = anon_vma_name(orig_vma);
+
+ if (anon_name)
+ new_vma->anon_name = anon_vma_name_reuse(anon_name);
+}
+
+static inline void free_anon_vma_name(struct vm_area_struct *vma)
+{
+ /*
+ * Not using anon_vma_name because it generates a warning if mmap_lock
+ * is not held, which might be the case here.
+ */
+ anon_vma_name_put(vma->anon_name);
+}
+
+static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
+ struct anon_vma_name *anon_name2)
+{
+ if (anon_name1 == anon_name2)
+ return true;
+
+ return anon_name1 && anon_name2 &&
+ !strcmp(anon_name1->name, anon_name2->name);
+}
+
+#else /* CONFIG_ANON_VMA_NAME */
+static inline void anon_vma_name_get(struct anon_vma_name *anon_name) {}
+static inline void anon_vma_name_put(struct anon_vma_name *anon_name) {}
+static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
+ struct vm_area_struct *new_vma) {}
+static inline void free_anon_vma_name(struct vm_area_struct *vma) {}
+
+static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
+ struct anon_vma_name *anon_name2)
+{
+ return true;
+}
+
+#endif /* CONFIG_ANON_VMA_NAME */
+
+void pfnmap_track_ctx_release(struct kref *ref);
+
+static inline void init_tlb_flush_pending(struct mm_struct *mm)
+{
+ atomic_set(&mm->tlb_flush_pending, 0);
+}
+
+static inline void inc_tlb_flush_pending(struct mm_struct *mm)
+{
+ atomic_inc(&mm->tlb_flush_pending);
+ /*
+ * The only time this value is relevant is when there are indeed pages
+ * to flush. And we'll only flush pages after changing them, which
+ * requires the PTL.
+ *
+ * So the ordering here is:
+ *
+ * atomic_inc(&mm->tlb_flush_pending);
+ * spin_lock(&ptl);
+ * ...
+ * set_pte_at();
+ * spin_unlock(&ptl);
+ *
+ * spin_lock(&ptl)
+ * mm_tlb_flush_pending();
+ * ....
+ * spin_unlock(&ptl);
+ *
+ * flush_tlb_range();
+ * atomic_dec(&mm->tlb_flush_pending);
+ *
+ * Where the increment if constrained by the PTL unlock, it thus
+ * ensures that the increment is visible if the PTE modification is
+ * visible. After all, if there is no PTE modification, nobody cares
+ * about TLB flushes either.
+ *
+ * This very much relies on users (mm_tlb_flush_pending() and
+ * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
+ * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
+ * locks (PPC) the unlock of one doesn't order against the lock of
+ * another PTL.
+ *
+ * The decrement is ordered by the flush_tlb_range(), such that
+ * mm_tlb_flush_pending() will not return false unless all flushes have
+ * completed.
+ */
+}
+
+static inline void dec_tlb_flush_pending(struct mm_struct *mm)
+{
+ /*
+ * See inc_tlb_flush_pending().
+ *
+ * This cannot be smp_mb__before_atomic() because smp_mb() simply does
+ * not order against TLB invalidate completion, which is what we need.
+ *
+ * Therefore we must rely on tlb_flush_*() to guarantee order.
+ */
+ atomic_dec(&mm->tlb_flush_pending);
+}
+
+static inline bool mm_tlb_flush_pending(const struct mm_struct *mm)
+{
+ /*
+ * Must be called after having acquired the PTL; orders against that
+ * PTLs release and therefore ensures that if we observe the modified
+ * PTE we must also observe the increment from inc_tlb_flush_pending().
+ *
+ * That is, it only guarantees to return true if there is a flush
+ * pending for _this_ PTL.
+ */
+ return atomic_read(&mm->tlb_flush_pending);
+}
+
+static inline bool mm_tlb_flush_nested(const struct mm_struct *mm)
+{
+ /*
+ * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
+ * for which there is a TLB flush pending in order to guarantee
+ * we've seen both that PTE modification and the increment.
+ *
+ * (no requirement on actually still holding the PTL, that is irrelevant)
+ */
+ return atomic_read(&mm->tlb_flush_pending) > 1;
+}
+
+#ifdef CONFIG_MMU
+/*
+ * Computes the pte marker to copy from the given source entry into dst_vma.
+ * If no marker should be copied, returns 0.
+ * The caller should insert a new pte created with make_pte_marker().
+ */
+static inline pte_marker copy_pte_marker(
+ softleaf_t entry, struct vm_area_struct *dst_vma)
+{
+ const pte_marker srcm = softleaf_to_marker(entry);
+ /* Always copy error entries. */
+ pte_marker dstm = srcm & (PTE_MARKER_POISONED | PTE_MARKER_GUARD);
+
+ /* Only copy PTE markers if UFFD register matches. */
+ if ((srcm & PTE_MARKER_UFFD_WP) && userfaultfd_wp(dst_vma))
+ dstm |= PTE_MARKER_UFFD_WP;
- update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
- list_add(&page->lru, &lruvec->lists[lru]);
+ return dstm;
}
-static __always_inline void add_page_to_lru_list_tail(struct page *page,
- struct lruvec *lruvec)
+/*
+ * If this pte is wr-protected by uffd-wp in any form, arm the special pte to
+ * replace a none pte. NOTE! This should only be called when *pte is already
+ * cleared so we will never accidentally replace something valuable. Meanwhile
+ * none pte also means we are not demoting the pte so tlb flushed is not needed.
+ * E.g., when pte cleared the caller should have taken care of the tlb flush.
+ *
+ * Must be called with pgtable lock held so that no thread will see the none
+ * pte, and if they see it, they'll fault and serialize at the pgtable lock.
+ *
+ * Returns true if an uffd-wp pte was installed, false otherwise.
+ */
+static inline bool
+pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *pte, pte_t pteval)
{
- enum lru_list lru = page_lru(page);
+ bool arm_uffd_pte = false;
+
+ if (!uffd_supports_wp_marker())
+ return false;
+
+ /* The current status of the pte should be "cleared" before calling */
+ WARN_ON_ONCE(!pte_none(ptep_get(pte)));
+
+ /*
+ * NOTE: userfaultfd_wp_unpopulated() doesn't need this whole
+ * thing, because when zapping either it means it's dropping the
+ * page, or in TTU where the present pte will be quickly replaced
+ * with a swap pte. There's no way of leaking the bit.
+ */
+ if (vma_is_anonymous(vma) || !userfaultfd_wp(vma))
+ return false;
- update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
- list_add_tail(&page->lru, &lruvec->lists[lru]);
+ /* A uffd-wp wr-protected normal pte */
+ if (unlikely(pte_present(pteval) && pte_uffd_wp(pteval)))
+ arm_uffd_pte = true;
+
+ /*
+ * A uffd-wp wr-protected swap pte. Note: this should even cover an
+ * existing pte marker with uffd-wp bit set.
+ */
+ if (unlikely(pte_swp_uffd_wp_any(pteval)))
+ arm_uffd_pte = true;
+
+ if (unlikely(arm_uffd_pte)) {
+ set_pte_at(vma->vm_mm, addr, pte,
+ make_pte_marker(PTE_MARKER_UFFD_WP));
+ return true;
+ }
+
+ return false;
}
-static __always_inline void del_page_from_lru_list(struct page *page,
- struct lruvec *lruvec)
+static inline bool vma_has_recency(const struct vm_area_struct *vma)
{
- list_del(&page->lru);
- update_lru_size(lruvec, page_lru(page), page_zonenum(page),
- -thp_nr_pages(page));
+ if (vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))
+ return false;
+
+ if (vma->vm_file && (vma->vm_file->f_mode & FMODE_NOREUSE))
+ return false;
+
+ return true;
}
#endif
+
+/**
+ * num_pages_contiguous() - determine the number of contiguous pages
+ * that represent contiguous PFNs
+ * @pages: an array of page pointers
+ * @nr_pages: length of the array, at least 1
+ *
+ * Determine the number of contiguous pages that represent contiguous PFNs
+ * in @pages, starting from the first page.
+ *
+ * In some kernel configs contiguous PFNs will not have contiguous struct
+ * pages. In these configurations num_pages_contiguous() will return a num
+ * smaller than ideal number. The caller should continue to check for pfn
+ * contiguity after each call to num_pages_contiguous().
+ *
+ * Returns the number of contiguous pages.
+ */
+static inline size_t num_pages_contiguous(struct page **pages, size_t nr_pages)
+{
+ struct page *cur_page = pages[0];
+ unsigned long section = memdesc_section(cur_page->flags);
+ size_t i;
+
+ for (i = 1; i < nr_pages; i++) {
+ if (++cur_page != pages[i])
+ break;
+ /*
+ * In unproblematic kernel configs, page_to_section() == 0 and
+ * the whole check will get optimized out.
+ */
+ if (memdesc_section(cur_page->flags) != section)
+ break;
+ }
+
+ return i;
+}
+
+#endif
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 5aacc1c10a45..9f6de068295d 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -5,16 +5,23 @@
#include <linux/mm_types_task.h>
#include <linux/auxvec.h>
+#include <linux/kref.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/rbtree.h>
+#include <linux/maple_tree.h>
#include <linux/rwsem.h>
#include <linux/completion.h>
#include <linux/cpumask.h>
#include <linux/uprobes.h>
+#include <linux/rcupdate.h>
#include <linux/page-flags-layout.h>
#include <linux/workqueue.h>
#include <linux/seqlock.h>
+#include <linux/percpu_counter.h>
+#include <linux/types.h>
+#include <linux/rseq_types.h>
+#include <linux/bitmap.h>
#include <asm/mmu.h>
@@ -23,11 +30,15 @@
#endif
#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
-#define INIT_PASID 0
struct address_space;
+struct futex_private_hash;
struct mem_cgroup;
+typedef struct {
+ unsigned long f;
+} memdesc_flags_t;
+
/*
* Each physical page in the system has a struct page associated with
* it to keep track of whatever it is we are using the page for at the
@@ -42,9 +53,7 @@ struct mem_cgroup;
* which is guaranteed to be aligned. If you use the same storage as
* page->mapping, you must restore it to NULL before freeing the page.
*
- * If your page will not be mapped to userspace, you can also use the four
- * bytes in the mapcount union, but you must call page_mapcount_reset()
- * before freeing it.
+ * The mapcount field must not be used for own purposes.
*
* If you want to use the refcount field, it must be used in such a way
* that other CPUs temporarily incrementing and then decrementing the
@@ -55,20 +64,20 @@ struct mem_cgroup;
* in each subpage, but you may need to restore some of their values
* afterwards.
*
- * SLUB uses cmpxchg_double() to atomically update its freelist and
- * counters. That requires that freelist & counters be adjacent and
- * double-word aligned. We align all struct pages to double-word
- * boundaries, and ensure that 'freelist' is aligned within the
- * struct.
+ * SLUB uses cmpxchg_double() to atomically update its freelist and counters.
+ * That requires that freelist & counters in struct slab be adjacent and
+ * double-word aligned. Because struct slab currently just reinterprets the
+ * bits of struct page, we align all struct pages to double-word boundaries,
+ * and ensure that 'freelist' is aligned within struct slab.
*/
#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
#define _struct_page_alignment __aligned(2 * sizeof(unsigned long))
#else
-#define _struct_page_alignment
+#define _struct_page_alignment __aligned(sizeof(unsigned long))
#endif
struct page {
- unsigned long flags; /* Atomic flags, some possibly
+ memdesc_flags_t flags; /* Atomic flags, some possibly
* updated asynchronously */
/*
* Five words (20/40 bytes) are available in this union.
@@ -83,84 +92,48 @@ struct page {
* lruvec->lru_lock. Sometimes used as a generic list
* by the page owner.
*/
- struct list_head lru;
- /* See page-flags.h for PAGE_MAPPING_FLAGS */
+ union {
+ struct list_head lru;
+
+ /* Or, free page */
+ struct list_head buddy_list;
+ struct list_head pcp_list;
+ struct llist_node pcp_llist;
+ };
struct address_space *mapping;
- pgoff_t index; /* Our offset within mapping. */
+ union {
+ pgoff_t __folio_index; /* Our offset within mapping. */
+ unsigned long share; /* share count for fsdax */
+ };
/**
* @private: Mapping-private opaque data.
* Usually used for buffer_heads if PagePrivate.
- * Used for swp_entry_t if PageSwapCache.
- * Indicates order in the buddy system if PageBuddy.
+ * Used for swp_entry_t if swapcache flag set.
+ * Indicates order in the buddy system if PageBuddy
+ * or on pcp_llist.
*/
unsigned long private;
};
struct { /* page_pool used by netstack */
/**
- * @dma_addr: might require a 64-bit value on
- * 32-bit architectures.
+ * @pp_magic: magic value to avoid recycling non
+ * page_pool allocated pages.
*/
- unsigned long dma_addr[2];
- };
- struct { /* slab, slob and slub */
- union {
- struct list_head slab_list;
- struct { /* Partial pages */
- struct page *next;
-#ifdef CONFIG_64BIT
- int pages; /* Nr of pages left */
- int pobjects; /* Approximate count */
-#else
- short int pages;
- short int pobjects;
-#endif
- };
- };
- struct kmem_cache *slab_cache; /* not slob */
- /* Double-word boundary */
- void *freelist; /* first free object */
- union {
- void *s_mem; /* slab: first object */
- unsigned long counters; /* SLUB */
- struct { /* SLUB */
- unsigned inuse:16;
- unsigned objects:15;
- unsigned frozen:1;
- };
- };
+ unsigned long pp_magic;
+ struct page_pool *pp;
+ unsigned long _pp_mapping_pad;
+ unsigned long dma_addr;
+ atomic_long_t pp_ref_count;
};
struct { /* Tail pages of compound page */
unsigned long compound_head; /* Bit zero is set */
-
- /* First tail page only */
- unsigned char compound_dtor;
- unsigned char compound_order;
- atomic_t compound_mapcount;
- unsigned int compound_nr; /* 1 << compound_order */
- };
- struct { /* Second tail page of compound page */
- unsigned long _compound_pad_1; /* compound_head */
- atomic_t hpage_pinned_refcount;
- /* For both global and memcg */
- struct list_head deferred_list;
- };
- struct { /* Page table pages */
- unsigned long _pt_pad_1; /* compound_head */
- pgtable_t pmd_huge_pte; /* protected by page->ptl */
- unsigned long _pt_pad_2; /* mapping */
- union {
- struct mm_struct *pt_mm; /* x86 pgds only */
- atomic_t pt_frag_refcount; /* powerpc */
- };
-#if ALLOC_SPLIT_PTLOCKS
- spinlock_t *ptl;
-#else
- spinlock_t ptl;
-#endif
};
struct { /* ZONE_DEVICE pages */
- /** @pgmap: Points to the hosting device page map. */
- struct dev_pagemap *pgmap;
+ /*
+ * The first word is used for compound_head or folio
+ * pgmap
+ */
+ void *_unused_pgmap_compound_head;
void *zone_device_data;
/*
* ZONE_DEVICE private pages are counted as being
@@ -180,21 +153,31 @@ struct page {
union { /* This union is 4 bytes in size. */
/*
- * If the page can be mapped to userspace, encodes the number
- * of times this page is referenced by a page table.
+ * For head pages of typed folios, the value stored here
+ * allows for determining what this page is used for. The
+ * tail pages of typed folios will not store a type
+ * (page_type == _mapcount == -1).
+ *
+ * See page-flags.h for a list of page types which are currently
+ * stored here.
+ *
+ * Owners of typed folios may reuse the lower 16 bit of the
+ * head page page_type field after setting the page type,
+ * but must reset these 16 bit to -1 before clearing the
+ * page type.
*/
- atomic_t _mapcount;
+ unsigned int page_type;
/*
- * If the page is neither PageSlab nor mappable to userspace,
- * the value stored here may help determine what this page
- * is used for. See page-flags.h for a list of page types
- * which are currently stored here.
+ * For pages that are part of non-typed folios for which mappings
+ * are tracked via the RMAP, encodes the number of times this page
+ * is directly referenced by a page table.
+ *
+ * Note that the mapcount is always initialized to -1, so that
+ * transitions both from it and to it can be tracked, using
+ * atomic_inc_and_test() and atomic_add_negative(-1).
*/
- unsigned int page_type;
-
- unsigned int active; /* SLAB */
- int units; /* SLOB */
+ atomic_t _mapcount;
};
/* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
@@ -202,6 +185,8 @@ struct page {
#ifdef CONFIG_MEMCG
unsigned long memcg_data;
+#elif defined(CONFIG_SLAB_OBJ_EXT)
+ unsigned long _unused_slab_obj_exts;
#endif
/*
@@ -222,26 +207,475 @@ struct page {
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
int _last_cpupid;
#endif
+
+#ifdef CONFIG_KMSAN
+ /*
+ * KMSAN metadata for this page:
+ * - shadow page: every bit indicates whether the corresponding
+ * bit of the original page is initialized (0) or not (1);
+ * - origin page: every 4 bytes contain an id of the stack trace
+ * where the uninitialized value was created.
+ */
+ struct page *kmsan_shadow;
+ struct page *kmsan_origin;
+#endif
} _struct_page_alignment;
-static inline atomic_t *compound_mapcount_ptr(struct page *page)
+/*
+ * struct encoded_page - a nonexistent type marking this pointer
+ *
+ * An 'encoded_page' pointer is a pointer to a regular 'struct page', but
+ * with the low bits of the pointer indicating extra context-dependent
+ * information. Only used in mmu_gather handling, and this acts as a type
+ * system check on that use.
+ *
+ * We only really have two guaranteed bits in general, although you could
+ * play with 'struct page' alignment (see CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
+ * for more.
+ *
+ * Use the supplied helper functions to endcode/decode the pointer and bits.
+ */
+struct encoded_page;
+
+#define ENCODED_PAGE_BITS 3ul
+
+/* Perform rmap removal after we have flushed the TLB. */
+#define ENCODED_PAGE_BIT_DELAY_RMAP 1ul
+
+/*
+ * The next item in an encoded_page array is the "nr_pages" argument, specifying
+ * the number of consecutive pages starting from this page, that all belong to
+ * the same folio. For example, "nr_pages" corresponds to the number of folio
+ * references that must be dropped. If this bit is not set, "nr_pages" is
+ * implicitly 1.
+ */
+#define ENCODED_PAGE_BIT_NR_PAGES_NEXT 2ul
+
+static __always_inline struct encoded_page *encode_page(struct page *page, unsigned long flags)
+{
+ BUILD_BUG_ON(flags > ENCODED_PAGE_BITS);
+ return (struct encoded_page *)(flags | (unsigned long)page);
+}
+
+static inline unsigned long encoded_page_flags(struct encoded_page *page)
+{
+ return ENCODED_PAGE_BITS & (unsigned long)page;
+}
+
+static inline struct page *encoded_page_ptr(struct encoded_page *page)
+{
+ return (struct page *)(~ENCODED_PAGE_BITS & (unsigned long)page);
+}
+
+static __always_inline struct encoded_page *encode_nr_pages(unsigned long nr)
+{
+ VM_WARN_ON_ONCE((nr << 2) >> 2 != nr);
+ return (struct encoded_page *)(nr << 2);
+}
+
+static __always_inline unsigned long encoded_nr_pages(struct encoded_page *page)
+{
+ return ((unsigned long)page) >> 2;
+}
+
+/*
+ * A swap entry has to fit into a "unsigned long", as the entry is hidden
+ * in the "index" field of the swapper address space.
+ */
+typedef struct {
+ unsigned long val;
+} swp_entry_t;
+
+/**
+ * typedef softleaf_t - Describes a page table software leaf entry, abstracted
+ * from its architecture-specific encoding.
+ *
+ * Page table leaf entries are those which do not reference any descendent page
+ * tables but rather either reference a data page, are an empty (or 'none'
+ * entry), or contain a non-present entry.
+ *
+ * If referencing another page table or a data page then the page table entry is
+ * pertinent to hardware - that is it tells the hardware how to decode the page
+ * table entry.
+ *
+ * Otherwise it is a software-defined leaf page table entry, which this type
+ * describes. See leafops.h and specifically @softleaf_type for a list of all
+ * possible kinds of software leaf entry.
+ *
+ * A softleaf_t entry is abstracted from the hardware page table entry, so is
+ * not architecture-specific.
+ *
+ * NOTE: While we transition from the confusing swp_entry_t type used for this
+ * purpose, we simply alias this type. This will be removed once the
+ * transition is complete.
+ */
+typedef swp_entry_t softleaf_t;
+
+#if defined(CONFIG_MEMCG) || defined(CONFIG_SLAB_OBJ_EXT)
+/* We have some extra room after the refcount in tail pages. */
+#define NR_PAGES_IN_LARGE_FOLIO
+#endif
+
+/*
+ * On 32bit, we can cut the required metadata in half, because:
+ * (a) PID_MAX_LIMIT implicitly limits the number of MMs we could ever have,
+ * so we can limit MM IDs to 15 bit (32767).
+ * (b) We don't expect folios where even a single complete PTE mapping by
+ * one MM would exceed 15 bits (order-15).
+ */
+#ifdef CONFIG_64BIT
+typedef int mm_id_mapcount_t;
+#define MM_ID_MAPCOUNT_MAX INT_MAX
+typedef unsigned int mm_id_t;
+#else /* !CONFIG_64BIT */
+typedef short mm_id_mapcount_t;
+#define MM_ID_MAPCOUNT_MAX SHRT_MAX
+typedef unsigned short mm_id_t;
+#endif /* CONFIG_64BIT */
+
+/* We implicitly use the dummy ID for init-mm etc. where we never rmap pages. */
+#define MM_ID_DUMMY 0
+#define MM_ID_MIN (MM_ID_DUMMY + 1)
+
+/*
+ * We leave the highest bit of each MM id unused, so we can store a flag
+ * in the highest bit of each folio->_mm_id[].
+ */
+#define MM_ID_BITS ((sizeof(mm_id_t) * BITS_PER_BYTE) - 1)
+#define MM_ID_MASK ((1U << MM_ID_BITS) - 1)
+#define MM_ID_MAX MM_ID_MASK
+
+/*
+ * In order to use bit_spin_lock(), which requires an unsigned long, we
+ * operate on folio->_mm_ids when working on flags.
+ */
+#define FOLIO_MM_IDS_LOCK_BITNUM MM_ID_BITS
+#define FOLIO_MM_IDS_LOCK_BIT BIT(FOLIO_MM_IDS_LOCK_BITNUM)
+#define FOLIO_MM_IDS_SHARED_BITNUM (2 * MM_ID_BITS + 1)
+#define FOLIO_MM_IDS_SHARED_BIT BIT(FOLIO_MM_IDS_SHARED_BITNUM)
+
+/**
+ * struct folio - Represents a contiguous set of bytes.
+ * @flags: Identical to the page flags.
+ * @lru: Least Recently Used list; tracks how recently this folio was used.
+ * @mlock_count: Number of times this folio has been pinned by mlock().
+ * @mapping: The file this page belongs to, or refers to the anon_vma for
+ * anonymous memory.
+ * @index: Offset within the file, in units of pages. For anonymous memory,
+ * this is the index from the beginning of the mmap.
+ * @share: number of DAX mappings that reference this folio. See
+ * dax_associate_entry.
+ * @private: Filesystem per-folio data (see folio_attach_private()).
+ * @swap: Used for swp_entry_t if folio_test_swapcache().
+ * @_mapcount: Do not access this member directly. Use folio_mapcount() to
+ * find out how many times this folio is mapped by userspace.
+ * @_refcount: Do not access this member directly. Use folio_ref_count()
+ * to find how many references there are to this folio.
+ * @memcg_data: Memory Control Group data.
+ * @pgmap: Metadata for ZONE_DEVICE mappings
+ * @virtual: Virtual address in the kernel direct map.
+ * @_last_cpupid: IDs of last CPU and last process that accessed the folio.
+ * @_entire_mapcount: Do not use directly, call folio_entire_mapcount().
+ * @_large_mapcount: Do not use directly, call folio_mapcount().
+ * @_nr_pages_mapped: Do not use outside of rmap and debug code.
+ * @_pincount: Do not use directly, call folio_maybe_dma_pinned().
+ * @_nr_pages: Do not use directly, call folio_nr_pages().
+ * @_mm_id: Do not use outside of rmap code.
+ * @_mm_ids: Do not use outside of rmap code.
+ * @_mm_id_mapcount: Do not use outside of rmap code.
+ * @_hugetlb_subpool: Do not use directly, use accessor in hugetlb.h.
+ * @_hugetlb_cgroup: Do not use directly, use accessor in hugetlb_cgroup.h.
+ * @_hugetlb_cgroup_rsvd: Do not use directly, use accessor in hugetlb_cgroup.h.
+ * @_hugetlb_hwpoison: Do not use directly, call raw_hwp_list_head().
+ * @_deferred_list: Folios to be split under memory pressure.
+ * @_unused_slab_obj_exts: Placeholder to match obj_exts in struct slab.
+ *
+ * A folio is a physically, virtually and logically contiguous set
+ * of bytes. It is a power-of-two in size, and it is aligned to that
+ * same power-of-two. It is at least as large as %PAGE_SIZE. If it is
+ * in the page cache, it is at a file offset which is a multiple of that
+ * power-of-two. It may be mapped into userspace at an address which is
+ * at an arbitrary page offset, but its kernel virtual address is aligned
+ * to its size.
+ */
+struct folio {
+ /* private: don't document the anon union */
+ union {
+ struct {
+ /* public: */
+ memdesc_flags_t flags;
+ union {
+ struct list_head lru;
+ /* private: avoid cluttering the output */
+ /* For the Unevictable "LRU list" slot */
+ struct {
+ /* Avoid compound_head */
+ void *__filler;
+ /* public: */
+ unsigned int mlock_count;
+ /* private: */
+ };
+ /* public: */
+ struct dev_pagemap *pgmap;
+ };
+ struct address_space *mapping;
+ union {
+ pgoff_t index;
+ unsigned long share;
+ };
+ union {
+ void *private;
+ swp_entry_t swap;
+ };
+ atomic_t _mapcount;
+ atomic_t _refcount;
+#ifdef CONFIG_MEMCG
+ unsigned long memcg_data;
+#elif defined(CONFIG_SLAB_OBJ_EXT)
+ unsigned long _unused_slab_obj_exts;
+#endif
+#if defined(WANT_PAGE_VIRTUAL)
+ void *virtual;
+#endif
+#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
+ int _last_cpupid;
+#endif
+ /* private: the union with struct page is transitional */
+ };
+ struct page page;
+ };
+ union {
+ struct {
+ unsigned long _flags_1;
+ unsigned long _head_1;
+ union {
+ struct {
+ /* public: */
+ atomic_t _large_mapcount;
+ atomic_t _nr_pages_mapped;
+#ifdef CONFIG_64BIT
+ atomic_t _entire_mapcount;
+ atomic_t _pincount;
+#endif /* CONFIG_64BIT */
+ mm_id_mapcount_t _mm_id_mapcount[2];
+ union {
+ mm_id_t _mm_id[2];
+ unsigned long _mm_ids;
+ };
+ /* private: the union with struct page is transitional */
+ };
+ unsigned long _usable_1[4];
+ };
+ atomic_t _mapcount_1;
+ atomic_t _refcount_1;
+ /* public: */
+#ifdef NR_PAGES_IN_LARGE_FOLIO
+ unsigned int _nr_pages;
+#endif /* NR_PAGES_IN_LARGE_FOLIO */
+ /* private: the union with struct page is transitional */
+ };
+ struct page __page_1;
+ };
+ union {
+ struct {
+ unsigned long _flags_2;
+ unsigned long _head_2;
+ /* public: */
+ struct list_head _deferred_list;
+#ifndef CONFIG_64BIT
+ atomic_t _entire_mapcount;
+ atomic_t _pincount;
+#endif /* !CONFIG_64BIT */
+ /* private: the union with struct page is transitional */
+ };
+ struct page __page_2;
+ };
+ union {
+ struct {
+ unsigned long _flags_3;
+ unsigned long _head_3;
+ /* public: */
+ void *_hugetlb_subpool;
+ void *_hugetlb_cgroup;
+ void *_hugetlb_cgroup_rsvd;
+ void *_hugetlb_hwpoison;
+ /* private: the union with struct page is transitional */
+ };
+ struct page __page_3;
+ };
+};
+
+#define FOLIO_MATCH(pg, fl) \
+ static_assert(offsetof(struct page, pg) == offsetof(struct folio, fl))
+FOLIO_MATCH(flags, flags);
+FOLIO_MATCH(lru, lru);
+FOLIO_MATCH(mapping, mapping);
+FOLIO_MATCH(compound_head, lru);
+FOLIO_MATCH(__folio_index, index);
+FOLIO_MATCH(private, private);
+FOLIO_MATCH(_mapcount, _mapcount);
+FOLIO_MATCH(_refcount, _refcount);
+#ifdef CONFIG_MEMCG
+FOLIO_MATCH(memcg_data, memcg_data);
+#endif
+#if defined(WANT_PAGE_VIRTUAL)
+FOLIO_MATCH(virtual, virtual);
+#endif
+#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
+FOLIO_MATCH(_last_cpupid, _last_cpupid);
+#endif
+#undef FOLIO_MATCH
+#define FOLIO_MATCH(pg, fl) \
+ static_assert(offsetof(struct folio, fl) == \
+ offsetof(struct page, pg) + sizeof(struct page))
+FOLIO_MATCH(flags, _flags_1);
+FOLIO_MATCH(compound_head, _head_1);
+FOLIO_MATCH(_mapcount, _mapcount_1);
+FOLIO_MATCH(_refcount, _refcount_1);
+#undef FOLIO_MATCH
+#define FOLIO_MATCH(pg, fl) \
+ static_assert(offsetof(struct folio, fl) == \
+ offsetof(struct page, pg) + 2 * sizeof(struct page))
+FOLIO_MATCH(flags, _flags_2);
+FOLIO_MATCH(compound_head, _head_2);
+#undef FOLIO_MATCH
+#define FOLIO_MATCH(pg, fl) \
+ static_assert(offsetof(struct folio, fl) == \
+ offsetof(struct page, pg) + 3 * sizeof(struct page))
+FOLIO_MATCH(flags, _flags_3);
+FOLIO_MATCH(compound_head, _head_3);
+#undef FOLIO_MATCH
+
+/**
+ * struct ptdesc - Memory descriptor for page tables.
+ * @pt_flags: enum pt_flags plus zone/node/section.
+ * @pt_rcu_head: For freeing page table pages.
+ * @pt_list: List of used page tables. Used for s390 gmap shadow pages
+ * (which are not linked into the user page tables) and x86
+ * pgds.
+ * @_pt_pad_1: Padding that aliases with page's compound head.
+ * @pmd_huge_pte: Protected by ptdesc->ptl, used for THPs.
+ * @__page_mapping: Aliases with page->mapping. Unused for page tables.
+ * @pt_index: Used for s390 gmap.
+ * @pt_mm: Used for x86 pgds.
+ * @pt_frag_refcount: For fragmented page table tracking. Powerpc only.
+ * @pt_share_count: Used for HugeTLB PMD page table share count.
+ * @_pt_pad_2: Padding to ensure proper alignment.
+ * @ptl: Lock for the page table.
+ * @__page_type: Same as page->page_type. Unused for page tables.
+ * @__page_refcount: Same as page refcount.
+ * @pt_memcg_data: Memcg data. Tracked for page tables here.
+ *
+ * This struct overlays struct page for now. Do not modify without a good
+ * understanding of the issues.
+ */
+struct ptdesc {
+ memdesc_flags_t pt_flags;
+
+ union {
+ struct rcu_head pt_rcu_head;
+ struct list_head pt_list;
+ struct {
+ unsigned long _pt_pad_1;
+ pgtable_t pmd_huge_pte;
+ };
+ };
+ unsigned long __page_mapping;
+
+ union {
+ pgoff_t pt_index;
+ struct mm_struct *pt_mm;
+ atomic_t pt_frag_refcount;
+#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
+ atomic_t pt_share_count;
+#endif
+ };
+
+ union {
+ unsigned long _pt_pad_2;
+#if ALLOC_SPLIT_PTLOCKS
+ spinlock_t *ptl;
+#else
+ spinlock_t ptl;
+#endif
+ };
+ unsigned int __page_type;
+ atomic_t __page_refcount;
+#ifdef CONFIG_MEMCG
+ unsigned long pt_memcg_data;
+#endif
+};
+
+#define TABLE_MATCH(pg, pt) \
+ static_assert(offsetof(struct page, pg) == offsetof(struct ptdesc, pt))
+TABLE_MATCH(flags, pt_flags);
+TABLE_MATCH(compound_head, pt_list);
+TABLE_MATCH(compound_head, _pt_pad_1);
+TABLE_MATCH(mapping, __page_mapping);
+TABLE_MATCH(__folio_index, pt_index);
+TABLE_MATCH(rcu_head, pt_rcu_head);
+TABLE_MATCH(page_type, __page_type);
+TABLE_MATCH(_refcount, __page_refcount);
+#ifdef CONFIG_MEMCG
+TABLE_MATCH(memcg_data, pt_memcg_data);
+#endif
+#undef TABLE_MATCH
+static_assert(sizeof(struct ptdesc) <= sizeof(struct page));
+
+#define ptdesc_page(pt) (_Generic((pt), \
+ const struct ptdesc *: (const struct page *)(pt), \
+ struct ptdesc *: (struct page *)(pt)))
+
+#define ptdesc_folio(pt) (_Generic((pt), \
+ const struct ptdesc *: (const struct folio *)(pt), \
+ struct ptdesc *: (struct folio *)(pt)))
+
+#define page_ptdesc(p) (_Generic((p), \
+ const struct page *: (const struct ptdesc *)(p), \
+ struct page *: (struct ptdesc *)(p)))
+
+#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
+static inline void ptdesc_pmd_pts_init(struct ptdesc *ptdesc)
{
- return &page[1].compound_mapcount;
+ atomic_set(&ptdesc->pt_share_count, 0);
}
-static inline atomic_t *compound_pincount_ptr(struct page *page)
+static inline void ptdesc_pmd_pts_inc(struct ptdesc *ptdesc)
{
- return &page[2].hpage_pinned_refcount;
+ atomic_inc(&ptdesc->pt_share_count);
}
+static inline void ptdesc_pmd_pts_dec(struct ptdesc *ptdesc)
+{
+ atomic_dec(&ptdesc->pt_share_count);
+}
+
+static inline int ptdesc_pmd_pts_count(const struct ptdesc *ptdesc)
+{
+ return atomic_read(&ptdesc->pt_share_count);
+}
+
+static inline bool ptdesc_pmd_is_shared(struct ptdesc *ptdesc)
+{
+ return !!ptdesc_pmd_pts_count(ptdesc);
+}
+#else
+static inline void ptdesc_pmd_pts_init(struct ptdesc *ptdesc)
+{
+}
+#endif
+
/*
* Used for sizing the vmemmap region on some architectures
*/
#define STRUCT_PAGE_MAX_SHIFT (order_base_2(sizeof(struct page)))
-#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
-#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
-
+/*
+ * page_private can be used on tail pages. However, PagePrivate is only
+ * checked by the VM on the head page. So page_private on the tail pages
+ * should be used for data that's ancillary to the head page (eg attaching
+ * buffer heads to tail pages after attaching buffer heads to the head page)
+ */
#define page_private(page) ((page)->private)
static inline void set_page_private(struct page *page, unsigned long private)
@@ -249,24 +683,20 @@ static inline void set_page_private(struct page *page, unsigned long private)
page->private = private;
}
-struct page_frag_cache {
- void * va;
-#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
- __u16 offset;
- __u16 size;
-#else
- __u32 offset;
-#endif
- /* we maintain a pagecount bias, so that we dont dirty cache line
- * containing page->_refcount every time we allocate a fragment.
- */
- unsigned int pagecnt_bias;
- bool pfmemalloc;
-};
+static inline void *folio_get_private(const struct folio *folio)
+{
+ return folio->private;
+}
typedef unsigned long vm_flags_t;
/*
+ * freeptr_t represents a SLUB freelist pointer, which might be encoded
+ * and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled.
+ */
+typedef struct { unsigned long v; } freeptr_t;
+
+/*
* A region containing a mapping of a non-memory backed file under NOMMU
* conditions. These are held in a global tree and are pinned by the VMAs that
* map parts of them.
@@ -295,52 +725,229 @@ struct vm_userfaultfd_ctx {
struct vm_userfaultfd_ctx {};
#endif /* CONFIG_USERFAULTFD */
+struct anon_vma_name {
+ struct kref kref;
+ /* The name needs to be at the end because it is dynamically sized. */
+ char name[];
+};
+
+#ifdef CONFIG_ANON_VMA_NAME
+/*
+ * mmap_lock should be read-locked when calling anon_vma_name(). Caller should
+ * either keep holding the lock while using the returned pointer or it should
+ * raise anon_vma_name refcount before releasing the lock.
+ */
+struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma);
+struct anon_vma_name *anon_vma_name_alloc(const char *name);
+void anon_vma_name_free(struct kref *kref);
+#else /* CONFIG_ANON_VMA_NAME */
+static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
+{
+ return NULL;
+}
+
+static inline struct anon_vma_name *anon_vma_name_alloc(const char *name)
+{
+ return NULL;
+}
+#endif
+
+#define VMA_LOCK_OFFSET 0x40000000
+#define VMA_REF_LIMIT (VMA_LOCK_OFFSET - 1)
+
+struct vma_numab_state {
+ /*
+ * Initialised as time in 'jiffies' after which VMA
+ * should be scanned. Delays first scan of new VMA by at
+ * least sysctl_numa_balancing_scan_delay:
+ */
+ unsigned long next_scan;
+
+ /*
+ * Time in jiffies when pids_active[] is reset to
+ * detect phase change behaviour:
+ */
+ unsigned long pids_active_reset;
+
+ /*
+ * Approximate tracking of PIDs that trapped a NUMA hinting
+ * fault. May produce false positives due to hash collisions.
+ *
+ * [0] Previous PID tracking
+ * [1] Current PID tracking
+ *
+ * Window moves after next_pid_reset has expired approximately
+ * every VMA_PID_RESET_PERIOD jiffies:
+ */
+ unsigned long pids_active[2];
+
+ /* MM scan sequence ID when scan first started after VMA creation */
+ int start_scan_seq;
+
+ /*
+ * MM scan sequence ID when the VMA was last completely scanned.
+ * A VMA is not eligible for scanning if prev_scan_seq == numa_scan_seq
+ */
+ int prev_scan_seq;
+};
+
+#ifdef __HAVE_PFNMAP_TRACKING
+struct pfnmap_track_ctx {
+ struct kref kref;
+ unsigned long pfn;
+ unsigned long size; /* in bytes */
+};
+#endif
+
+/* What action should be taken after an .mmap_prepare call is complete? */
+enum mmap_action_type {
+ MMAP_NOTHING, /* Mapping is complete, no further action. */
+ MMAP_REMAP_PFN, /* Remap PFN range. */
+ MMAP_IO_REMAP_PFN, /* I/O remap PFN range. */
+};
+
+/*
+ * Describes an action an mmap_prepare hook can instruct to be taken to complete
+ * the mapping of a VMA. Specified in vm_area_desc.
+ */
+struct mmap_action {
+ union {
+ /* Remap range. */
+ struct {
+ unsigned long start;
+ unsigned long start_pfn;
+ unsigned long size;
+ pgprot_t pgprot;
+ } remap;
+ };
+ enum mmap_action_type type;
+
+ /*
+ * If specified, this hook is invoked after the selected action has been
+ * successfully completed. Note that the VMA write lock still held.
+ *
+ * The absolute minimum ought to be done here.
+ *
+ * Returns 0 on success, or an error code.
+ */
+ int (*success_hook)(const struct vm_area_struct *vma);
+
+ /*
+ * If specified, this hook is invoked when an error occurred when
+ * attempting the selection action.
+ *
+ * The hook can return an error code in order to filter the error, but
+ * it is not valid to clear the error here.
+ */
+ int (*error_hook)(int err);
+
+ /*
+ * This should be set in rare instances where the operation required
+ * that the rmap should not be able to access the VMA until
+ * completely set up.
+ */
+ bool hide_from_rmap_until_complete :1;
+};
+
+/*
+ * Opaque type representing current VMA (vm_area_struct) flag state. Must be
+ * accessed via vma_flags_xxx() helper functions.
+ */
+#define NUM_VMA_FLAG_BITS BITS_PER_LONG
+typedef struct {
+ DECLARE_BITMAP(__vma_flags, NUM_VMA_FLAG_BITS);
+} __private vma_flags_t;
+
+/*
+ * Describes a VMA that is about to be mmap()'ed. Drivers may choose to
+ * manipulate mutable fields which will cause those fields to be updated in the
+ * resultant VMA.
+ *
+ * Helper functions are not required for manipulating any field.
+ */
+struct vm_area_desc {
+ /* Immutable state. */
+ const struct mm_struct *const mm;
+ struct file *const file; /* May vary from vm_file in stacked callers. */
+ unsigned long start;
+ unsigned long end;
+
+ /* Mutable fields. Populated with initial state. */
+ pgoff_t pgoff;
+ struct file *vm_file;
+ union {
+ vm_flags_t vm_flags;
+ vma_flags_t vma_flags;
+ };
+ pgprot_t page_prot;
+
+ /* Write-only fields. */
+ const struct vm_operations_struct *vm_ops;
+ void *private_data;
+
+ /* Take further action? */
+ struct mmap_action action;
+};
+
/*
* This struct describes a virtual memory area. There is one of these
* per VM-area/task. A VM area is any part of the process virtual memory
* space that has a special rule for the page-fault handlers (ie a shared
* library, the executable area etc).
+ *
+ * Only explicitly marked struct members may be accessed by RCU readers before
+ * getting a stable reference.
+ *
+ * WARNING: when adding new members, please update vm_area_init_from() to copy
+ * them during vm_area_struct content duplication.
*/
struct vm_area_struct {
/* The first cache line has the info for VMA tree walking. */
- unsigned long vm_start; /* Our start address within vm_mm. */
- unsigned long vm_end; /* The first byte after our end address
- within vm_mm. */
-
- /* linked list of VM areas per task, sorted by address */
- struct vm_area_struct *vm_next, *vm_prev;
-
- struct rb_node vm_rb;
+ union {
+ struct {
+ /* VMA covers [vm_start; vm_end) addresses within mm */
+ unsigned long vm_start;
+ unsigned long vm_end;
+ };
+ freeptr_t vm_freeptr; /* Pointer used by SLAB_TYPESAFE_BY_RCU */
+ };
/*
- * Largest free memory gap in bytes to the left of this VMA.
- * Either between this VMA and vma->vm_prev, or between one of the
- * VMAs below us in the VMA rbtree and its ->vm_prev. This helps
- * get_unmapped_area find a free area of the right size.
+ * The address space we belong to.
+ * Unstable RCU readers are allowed to read this.
*/
- unsigned long rb_subtree_gap;
-
- /* Second cache line starts here. */
-
- struct mm_struct *vm_mm; /* The address space we belong to. */
+ struct mm_struct *vm_mm;
+ pgprot_t vm_page_prot; /* Access permissions of this VMA. */
/*
- * Access permissions of this VMA.
- * See vmf_insert_mixed_prot() for discussion.
+ * Flags, see mm.h.
+ * To modify use vm_flags_{init|reset|set|clear|mod} functions.
+ * Preferably, use vma_flags_xxx() functions.
*/
- pgprot_t vm_page_prot;
- unsigned long vm_flags; /* Flags, see mm.h. */
+ union {
+ /* Temporary while VMA flags are being converted. */
+ const vm_flags_t vm_flags;
+ vma_flags_t flags;
+ };
+#ifdef CONFIG_PER_VMA_LOCK
/*
- * For areas with an address space and backing store,
- * linkage into the address_space->i_mmap interval tree.
+ * Can only be written (using WRITE_ONCE()) while holding both:
+ * - mmap_lock (in write mode)
+ * - vm_refcnt bit at VMA_LOCK_OFFSET is set
+ * Can be read reliably while holding one of:
+ * - mmap_lock (in read or write mode)
+ * - vm_refcnt bit at VMA_LOCK_OFFSET is set or vm_refcnt > 1
+ * Can be read unreliably (using READ_ONCE()) for pessimistic bailout
+ * while holding nothing (except RCU to keep the VMA struct allocated).
+ *
+ * This sequence counter is explicitly allowed to overflow; sequence
+ * counter reuse can only lead to occasional unnecessary use of the
+ * slowpath.
*/
- struct {
- struct rb_node rb;
- unsigned long rb_subtree_last;
- } shared;
-
+ unsigned int vm_lock_seq;
+#endif
/*
* A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
* list, after a COW of one of the file pages. A MAP_SHARED vma
@@ -369,40 +976,129 @@ struct vm_area_struct {
#ifdef CONFIG_NUMA
struct mempolicy *vm_policy; /* NUMA policy for the VMA */
#endif
+#ifdef CONFIG_NUMA_BALANCING
+ struct vma_numab_state *numab_state; /* NUMA Balancing state */
+#endif
+#ifdef CONFIG_PER_VMA_LOCK
+ /* Unstable RCU readers are allowed to read this. */
+ refcount_t vm_refcnt ____cacheline_aligned_in_smp;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map vmlock_dep_map;
+#endif
+#endif
+ /*
+ * For areas with an address space and backing store,
+ * linkage into the address_space->i_mmap interval tree.
+ *
+ */
+ struct {
+ struct rb_node rb;
+ unsigned long rb_subtree_last;
+ } shared;
+#ifdef CONFIG_ANON_VMA_NAME
+ /*
+ * For private and shared anonymous mappings, a pointer to a null
+ * terminated string containing the name given to the vma, or NULL if
+ * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
+ */
+ struct anon_vma_name *anon_name;
+#endif
struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
+#ifdef __HAVE_PFNMAP_TRACKING
+ struct pfnmap_track_ctx *pfnmap_track_ctx;
+#endif
} __randomize_layout;
-struct core_thread {
- struct task_struct *task;
- struct core_thread *next;
-};
+/* Clears all bits in the VMA flags bitmap, non-atomically. */
+static inline void vma_flags_clear_all(vma_flags_t *flags)
+{
+ bitmap_zero(ACCESS_PRIVATE(flags, __vma_flags), NUM_VMA_FLAG_BITS);
+}
-struct core_state {
- atomic_t nr_threads;
- struct core_thread dumper;
- struct completion startup;
-};
+/*
+ * Copy value to the first system word of VMA flags, non-atomically.
+ *
+ * IMPORTANT: This does not overwrite bytes past the first system word. The
+ * caller must account for this.
+ */
+static inline void vma_flags_overwrite_word(vma_flags_t *flags, unsigned long value)
+{
+ *ACCESS_PRIVATE(flags, __vma_flags) = value;
+}
+
+/*
+ * Copy value to the first system word of VMA flags ONCE, non-atomically.
+ *
+ * IMPORTANT: This does not overwrite bytes past the first system word. The
+ * caller must account for this.
+ */
+static inline void vma_flags_overwrite_word_once(vma_flags_t *flags, unsigned long value)
+{
+ unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags);
+
+ WRITE_ONCE(*bitmap, value);
+}
+
+/* Update the first system word of VMA flags setting bits, non-atomically. */
+static inline void vma_flags_set_word(vma_flags_t *flags, unsigned long value)
+{
+ unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags);
+
+ *bitmap |= value;
+}
+
+/* Update the first system word of VMA flags clearing bits, non-atomically. */
+static inline void vma_flags_clear_word(vma_flags_t *flags, unsigned long value)
+{
+ unsigned long *bitmap = ACCESS_PRIVATE(flags, __vma_flags);
+
+ *bitmap &= ~value;
+}
+
+#ifdef CONFIG_NUMA
+#define vma_policy(vma) ((vma)->vm_policy)
+#else
+#define vma_policy(vma) NULL
+#endif
+
+/*
+ * Opaque type representing current mm_struct flag state. Must be accessed via
+ * mm_flags_xxx() helper functions.
+ */
+#define NUM_MM_FLAG_BITS (64)
+typedef struct {
+ DECLARE_BITMAP(__mm_flags, NUM_MM_FLAG_BITS);
+} __private mm_flags_t;
struct kioctx_table;
+struct iommu_mm_data;
struct mm_struct {
struct {
- struct vm_area_struct *mmap; /* list of VMAs */
- struct rb_root mm_rb;
- u64 vmacache_seqnum; /* per-thread vmacache */
-#ifdef CONFIG_MMU
- unsigned long (*get_unmapped_area) (struct file *filp,
- unsigned long addr, unsigned long len,
- unsigned long pgoff, unsigned long flags);
-#endif
+ /*
+ * Fields which are often written to are placed in a separate
+ * cache line.
+ */
+ struct {
+ /**
+ * @mm_count: The number of references to &struct
+ * mm_struct (@mm_users count as 1).
+ *
+ * Use mmgrab()/mmdrop() to modify. When this drops to
+ * 0, the &struct mm_struct is freed.
+ */
+ atomic_t mm_count;
+ } ____cacheline_aligned_in_smp;
+
+ struct maple_tree mm_mt;
+
unsigned long mmap_base; /* base of mmap area */
unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
- /* Base adresses for compatible mmap() */
+ /* Base addresses for compatible mmap() */
unsigned long mmap_compat_base;
unsigned long mmap_compat_legacy_base;
#endif
unsigned long task_size; /* size of task vm space */
- unsigned long highest_vm_end; /* highest vma end address */
pgd_t * pgd;
#ifdef CONFIG_MEMBARRIER
@@ -426,40 +1122,29 @@ struct mm_struct {
*/
atomic_t mm_users;
- /**
- * @mm_count: The number of references to &struct mm_struct
- * (@mm_users count as 1).
- *
- * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
- * &struct mm_struct is freed.
- */
- atomic_t mm_count;
-
- /**
- * @has_pinned: Whether this mm has pinned any pages. This can
- * be either replaced in the future by @pinned_vm when it
- * becomes stable, or grow into a counter on its own. We're
- * aggresive on this bit now - even if the pinned pages were
- * unpinned later on, we'll still keep this bit set for the
- * lifecycle of this mm just for simplicity.
- */
- atomic_t has_pinned;
-
- /**
- * @write_protect_seq: Locked when any thread is write
- * protecting pages mapped by this mm to enforce a later COW,
- * for instance during page table copying for fork().
- */
- seqcount_t write_protect_seq;
+ /* MM CID related storage */
+ struct mm_mm_cid mm_cid;
#ifdef CONFIG_MMU
- atomic_long_t pgtables_bytes; /* PTE page table pages */
+ atomic_long_t pgtables_bytes; /* size of all page tables */
#endif
int map_count; /* number of VMAs */
spinlock_t page_table_lock; /* Protects page tables and some
* counters
*/
+ /*
+ * Typically the current mmap_lock's offset is 56 bytes from
+ * the last cacheline boundary, which is very optimal, as
+ * its two hot fields 'count' and 'owner' sit in 2 different
+ * cachelines, and when mmap_lock is highly contended, both
+ * of the 2 fields will be accessed frequently, current layout
+ * will help to reduce cache bouncing.
+ *
+ * So please be careful with adding new fields before
+ * mmap_lock, which can easily push the 2 fields into one
+ * cacheline.
+ */
struct rw_semaphore mmap_lock;
struct list_head mmlist; /* List of maybe swapped mm's. These
@@ -467,7 +1152,37 @@ struct mm_struct {
* init_mm.mmlist, and are protected
* by mmlist_lock
*/
-
+#ifdef CONFIG_PER_VMA_LOCK
+ struct rcuwait vma_writer_wait;
+ /*
+ * This field has lock-like semantics, meaning it is sometimes
+ * accessed with ACQUIRE/RELEASE semantics.
+ * Roughly speaking, incrementing the sequence number is
+ * equivalent to releasing locks on VMAs; reading the sequence
+ * number can be part of taking a read lock on a VMA.
+ * Incremented every time mmap_lock is write-locked/unlocked.
+ * Initialized to 0, therefore odd values indicate mmap_lock
+ * is write-locked and even values that it's released.
+ *
+ * Can be modified under write mmap_lock using RELEASE
+ * semantics.
+ * Can be read with no other protection when holding write
+ * mmap_lock.
+ * Can be read with ACQUIRE semantics if not holding write
+ * mmap_lock.
+ */
+ seqcount_t mm_lock_seq;
+#endif
+#ifdef CONFIG_FUTEX_PRIVATE_HASH
+ struct mutex futex_hash_lock;
+ struct futex_private_hash __rcu *futex_phash;
+ struct futex_private_hash *futex_phash_new;
+ /* futex-ref */
+ unsigned long futex_batches;
+ struct rcu_head futex_rcu;
+ atomic_long_t futex_atomic;
+ unsigned int __percpu *futex_ref;
+#endif
unsigned long hiwater_rss; /* High-watermark of RSS usage */
unsigned long hiwater_vm; /* High-water virtual memory usage */
@@ -478,29 +1193,36 @@ struct mm_struct {
unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
unsigned long stack_vm; /* VM_STACK */
- unsigned long def_flags;
+ vm_flags_t def_flags;
+
+ /**
+ * @write_protect_seq: Locked when any thread is write
+ * protecting pages mapped by this mm to enforce a later COW,
+ * for instance during page table copying for fork().
+ */
+ seqcount_t write_protect_seq;
spinlock_t arg_lock; /* protect the below fields */
+
unsigned long start_code, end_code, start_data, end_data;
unsigned long start_brk, brk, start_stack;
unsigned long arg_start, arg_end, env_start, env_end;
unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
- /*
- * Special counters, in some configurations protected by the
- * page_table_lock, in other configurations by being atomic.
- */
- struct mm_rss_stat rss_stat;
+#ifdef CONFIG_ARCH_HAS_ELF_CORE_EFLAGS
+ /* the ABI-related flags from the ELF header. Used for core dump */
+ unsigned long saved_e_flags;
+#endif
+
+ struct percpu_counter rss_stat[NR_MM_COUNTERS];
struct linux_binfmt *binfmt;
/* Architecture-specific MM context */
mm_context_t context;
- unsigned long flags; /* Must use atomic bitops to access */
-
- struct core_state *core_state; /* coredumping support */
+ mm_flags_t flags; /* Must use mm_flags_* hlpers to access */
#ifdef CONFIG_AIO
spinlock_t ioctx_lock;
@@ -526,42 +1248,81 @@ struct mm_struct {
#ifdef CONFIG_MMU_NOTIFIER
struct mmu_notifier_subscriptions *notifier_subscriptions;
#endif
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !defined(CONFIG_SPLIT_PMD_PTLOCKS)
pgtable_t pmd_huge_pte; /* protected by page_table_lock */
#endif
#ifdef CONFIG_NUMA_BALANCING
/*
- * numa_next_scan is the next time that the PTEs will be marked
- * pte_numa. NUMA hinting faults will gather statistics and
- * migrate pages to new nodes if necessary.
+ * numa_next_scan is the next time that PTEs will be remapped
+ * PROT_NONE to trigger NUMA hinting faults; such faults gather
+ * statistics and migrate pages to new nodes if necessary.
*/
unsigned long numa_next_scan;
- /* Restart point for scanning and setting pte_numa */
+ /* Restart point for scanning and remapping PTEs. */
unsigned long numa_scan_offset;
- /* numa_scan_seq prevents two threads setting pte_numa */
+ /* numa_scan_seq prevents two threads remapping PTEs. */
int numa_scan_seq;
#endif
/*
* An operation with batched TLB flushing is going on. Anything
* that can move process memory needs to flush the TLB when
- * moving a PROT_NONE or PROT_NUMA mapped page.
+ * moving a PROT_NONE mapped page.
*/
atomic_t tlb_flush_pending;
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
/* See flush_tlb_batched_pending() */
- bool tlb_flush_batched;
+ atomic_t tlb_flush_batched;
#endif
struct uprobes_state uprobes_state;
+#ifdef CONFIG_PREEMPT_RT
+ struct rcu_head delayed_drop;
+#endif
#ifdef CONFIG_HUGETLB_PAGE
atomic_long_t hugetlb_usage;
#endif
struct work_struct async_put_work;
-#ifdef CONFIG_IOMMU_SUPPORT
- u32 pasid;
+#ifdef CONFIG_IOMMU_MM_DATA
+ struct iommu_mm_data *iommu_mm;
+#endif
+#ifdef CONFIG_KSM
+ /*
+ * Represent how many pages of this process are involved in KSM
+ * merging (not including ksm_zero_pages).
+ */
+ unsigned long ksm_merging_pages;
+ /*
+ * Represent how many pages are checked for ksm merging
+ * including merged and not merged.
+ */
+ unsigned long ksm_rmap_items;
+ /*
+ * Represent how many empty pages are merged with kernel zero
+ * pages when enabling KSM use_zero_pages.
+ */
+ atomic_long_t ksm_zero_pages;
+#endif /* CONFIG_KSM */
+#ifdef CONFIG_LRU_GEN_WALKS_MMU
+ struct {
+ /* this mm_struct is on lru_gen_mm_list */
+ struct list_head list;
+ /*
+ * Set when switching to this mm_struct, as a hint of
+ * whether it has been used since the last time per-node
+ * page table walkers cleared the corresponding bits.
+ */
+ unsigned long bitmap;
+#ifdef CONFIG_MEMCG
+ /* points to the memcg of "owner" above */
+ struct mem_cgroup *memcg;
#endif
+ } lru_gen;
+#endif /* CONFIG_LRU_GEN_WALKS_MMU */
+#ifdef CONFIG_MM_ID
+ mm_id_t mm_id;
+#endif /* CONFIG_MM_ID */
} __randomize_layout;
/*
@@ -571,6 +1332,38 @@ struct mm_struct {
unsigned long cpu_bitmap[];
};
+/* Copy value to the first system word of mm flags, non-atomically. */
+static inline void __mm_flags_overwrite_word(struct mm_struct *mm, unsigned long value)
+{
+ *ACCESS_PRIVATE(&mm->flags, __mm_flags) = value;
+}
+
+/* Obtain a read-only view of the mm flags bitmap. */
+static inline const unsigned long *__mm_flags_get_bitmap(const struct mm_struct *mm)
+{
+ return (const unsigned long *)ACCESS_PRIVATE(&mm->flags, __mm_flags);
+}
+
+/* Read the first system word of mm flags, non-atomically. */
+static inline unsigned long __mm_flags_get_word(const struct mm_struct *mm)
+{
+ return *__mm_flags_get_bitmap(mm);
+}
+
+/*
+ * Update the first system word of mm flags ONLY, applying the specified mask to
+ * it, then setting all flags specified by bits.
+ */
+static inline void __mm_flags_set_mask_bits_word(struct mm_struct *mm,
+ unsigned long mask, unsigned long bits)
+{
+ unsigned long *bitmap = ACCESS_PRIVATE(&mm->flags, __mm_flags);
+
+ set_mask_bits(bitmap, mask, bits);
+}
+
+#define MM_MT_FLAGS (MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN | \
+ MT_FLAGS_USE_RCU)
extern struct mm_struct init_mm;
/* Pointer magic because the dynamic array size confuses some compilers. */
@@ -588,95 +1381,149 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
return (struct cpumask *)&mm->cpu_bitmap;
}
-struct mmu_gather;
-extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
-extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
-extern void tlb_finish_mmu(struct mmu_gather *tlb);
+#ifdef CONFIG_LRU_GEN
+
+struct lru_gen_mm_list {
+ /* mm_struct list for page table walkers */
+ struct list_head fifo;
+ /* protects the list above */
+ spinlock_t lock;
+};
+
+#endif /* CONFIG_LRU_GEN */
+
+#ifdef CONFIG_LRU_GEN_WALKS_MMU
-static inline void init_tlb_flush_pending(struct mm_struct *mm)
+void lru_gen_add_mm(struct mm_struct *mm);
+void lru_gen_del_mm(struct mm_struct *mm);
+void lru_gen_migrate_mm(struct mm_struct *mm);
+
+static inline void lru_gen_init_mm(struct mm_struct *mm)
{
- atomic_set(&mm->tlb_flush_pending, 0);
+ INIT_LIST_HEAD(&mm->lru_gen.list);
+ mm->lru_gen.bitmap = 0;
+#ifdef CONFIG_MEMCG
+ mm->lru_gen.memcg = NULL;
+#endif
}
-static inline void inc_tlb_flush_pending(struct mm_struct *mm)
+static inline void lru_gen_use_mm(struct mm_struct *mm)
{
- atomic_inc(&mm->tlb_flush_pending);
/*
- * The only time this value is relevant is when there are indeed pages
- * to flush. And we'll only flush pages after changing them, which
- * requires the PTL.
- *
- * So the ordering here is:
- *
- * atomic_inc(&mm->tlb_flush_pending);
- * spin_lock(&ptl);
- * ...
- * set_pte_at();
- * spin_unlock(&ptl);
- *
- * spin_lock(&ptl)
- * mm_tlb_flush_pending();
- * ....
- * spin_unlock(&ptl);
- *
- * flush_tlb_range();
- * atomic_dec(&mm->tlb_flush_pending);
- *
- * Where the increment if constrained by the PTL unlock, it thus
- * ensures that the increment is visible if the PTE modification is
- * visible. After all, if there is no PTE modification, nobody cares
- * about TLB flushes either.
- *
- * This very much relies on users (mm_tlb_flush_pending() and
- * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
- * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
- * locks (PPC) the unlock of one doesn't order against the lock of
- * another PTL.
- *
- * The decrement is ordered by the flush_tlb_range(), such that
- * mm_tlb_flush_pending() will not return false unless all flushes have
- * completed.
+ * When the bitmap is set, page reclaim knows this mm_struct has been
+ * used since the last time it cleared the bitmap. So it might be worth
+ * walking the page tables of this mm_struct to clear the accessed bit.
*/
+ WRITE_ONCE(mm->lru_gen.bitmap, -1);
}
-static inline void dec_tlb_flush_pending(struct mm_struct *mm)
+#else /* !CONFIG_LRU_GEN_WALKS_MMU */
+
+static inline void lru_gen_add_mm(struct mm_struct *mm)
{
- /*
- * See inc_tlb_flush_pending().
- *
- * This cannot be smp_mb__before_atomic() because smp_mb() simply does
- * not order against TLB invalidate completion, which is what we need.
- *
- * Therefore we must rely on tlb_flush_*() to guarantee order.
- */
- atomic_dec(&mm->tlb_flush_pending);
}
-static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
+static inline void lru_gen_del_mm(struct mm_struct *mm)
{
- /*
- * Must be called after having acquired the PTL; orders against that
- * PTLs release and therefore ensures that if we observe the modified
- * PTE we must also observe the increment from inc_tlb_flush_pending().
- *
- * That is, it only guarantees to return true if there is a flush
- * pending for _this_ PTL.
- */
- return atomic_read(&mm->tlb_flush_pending);
}
-static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
+static inline void lru_gen_migrate_mm(struct mm_struct *mm)
{
- /*
- * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
- * for which there is a TLB flush pending in order to guarantee
- * we've seen both that PTE modification and the increment.
- *
- * (no requirement on actually still holding the PTL, that is irrelevant)
- */
- return atomic_read(&mm->tlb_flush_pending) > 1;
}
+static inline void lru_gen_init_mm(struct mm_struct *mm)
+{
+}
+
+static inline void lru_gen_use_mm(struct mm_struct *mm)
+{
+}
+
+#endif /* CONFIG_LRU_GEN_WALKS_MMU */
+
+struct vma_iterator {
+ struct ma_state mas;
+};
+
+#define VMA_ITERATOR(name, __mm, __addr) \
+ struct vma_iterator name = { \
+ .mas = { \
+ .tree = &(__mm)->mm_mt, \
+ .index = __addr, \
+ .node = NULL, \
+ .status = ma_start, \
+ }, \
+ }
+
+static inline void vma_iter_init(struct vma_iterator *vmi,
+ struct mm_struct *mm, unsigned long addr)
+{
+ mas_init(&vmi->mas, &mm->mm_mt, addr);
+}
+
+#ifdef CONFIG_SCHED_MM_CID
+/*
+ * mm_cpus_allowed: Union of all mm's threads allowed CPUs.
+ */
+static inline cpumask_t *mm_cpus_allowed(struct mm_struct *mm)
+{
+ unsigned long bitmap = (unsigned long)mm;
+
+ bitmap += offsetof(struct mm_struct, cpu_bitmap);
+ /* Skip cpu_bitmap */
+ bitmap += cpumask_size();
+ return (struct cpumask *)bitmap;
+}
+
+/* Accessor for struct mm_struct's cidmask. */
+static inline unsigned long *mm_cidmask(struct mm_struct *mm)
+{
+ unsigned long cid_bitmap = (unsigned long)mm_cpus_allowed(mm);
+
+ /* Skip mm_cpus_allowed */
+ cid_bitmap += cpumask_size();
+ return (unsigned long *)cid_bitmap;
+}
+
+void mm_init_cid(struct mm_struct *mm, struct task_struct *p);
+
+static inline int mm_alloc_cid_noprof(struct mm_struct *mm, struct task_struct *p)
+{
+ mm->mm_cid.pcpu = alloc_percpu_noprof(struct mm_cid_pcpu);
+ if (!mm->mm_cid.pcpu)
+ return -ENOMEM;
+ mm_init_cid(mm, p);
+ return 0;
+}
+#define mm_alloc_cid(...) alloc_hooks(mm_alloc_cid_noprof(__VA_ARGS__))
+
+static inline void mm_destroy_cid(struct mm_struct *mm)
+{
+ free_percpu(mm->mm_cid.pcpu);
+ mm->mm_cid.pcpu = NULL;
+}
+
+static inline unsigned int mm_cid_size(void)
+{
+ /* mm_cpus_allowed(), mm_cidmask(). */
+ return cpumask_size() + bitmap_size(num_possible_cpus());
+}
+
+#else /* CONFIG_SCHED_MM_CID */
+static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p) { }
+static inline int mm_alloc_cid(struct mm_struct *mm, struct task_struct *p) { return 0; }
+static inline void mm_destroy_cid(struct mm_struct *mm) { }
+static inline unsigned int mm_cid_size(void)
+{
+ return 0;
+}
+#endif /* CONFIG_SCHED_MM_CID */
+
+struct mmu_gather;
+extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
+extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
+extern void tlb_finish_mmu(struct mmu_gather *tlb);
+
struct vm_fault;
/**
@@ -695,7 +1542,6 @@ typedef __bitwise unsigned int vm_fault_t;
* @VM_FAULT_OOM: Out Of Memory
* @VM_FAULT_SIGBUS: Bad access
* @VM_FAULT_MAJOR: Page read from storage
- * @VM_FAULT_WRITE: Special case for get_user_pages
* @VM_FAULT_HWPOISON: Hit poisoned small page
* @VM_FAULT_HWPOISON_LARGE: Hit poisoned large page. Index encoded
* in upper bits
@@ -708,6 +1554,7 @@ typedef __bitwise unsigned int vm_fault_t;
* @VM_FAULT_NEEDDSYNC: ->fault did not modify page tables and needs
* fsync() to complete (for synchronous page faults
* in DAX)
+ * @VM_FAULT_COMPLETED: ->fault completed, meanwhile mmap lock released
* @VM_FAULT_HINDEX_MASK: mask HINDEX value
*
*/
@@ -715,7 +1562,6 @@ enum vm_fault_reason {
VM_FAULT_OOM = (__force vm_fault_t)0x000001,
VM_FAULT_SIGBUS = (__force vm_fault_t)0x000002,
VM_FAULT_MAJOR = (__force vm_fault_t)0x000004,
- VM_FAULT_WRITE = (__force vm_fault_t)0x000008,
VM_FAULT_HWPOISON = (__force vm_fault_t)0x000010,
VM_FAULT_HWPOISON_LARGE = (__force vm_fault_t)0x000020,
VM_FAULT_SIGSEGV = (__force vm_fault_t)0x000040,
@@ -725,6 +1571,7 @@ enum vm_fault_reason {
VM_FAULT_FALLBACK = (__force vm_fault_t)0x000800,
VM_FAULT_DONE_COW = (__force vm_fault_t)0x001000,
VM_FAULT_NEEDDSYNC = (__force vm_fault_t)0x002000,
+ VM_FAULT_COMPLETED = (__force vm_fault_t)0x004000,
VM_FAULT_HINDEX_MASK = (__force vm_fault_t)0x0f0000,
};
@@ -740,7 +1587,6 @@ enum vm_fault_reason {
{ VM_FAULT_OOM, "OOM" }, \
{ VM_FAULT_SIGBUS, "SIGBUS" }, \
{ VM_FAULT_MAJOR, "MAJOR" }, \
- { VM_FAULT_WRITE, "WRITE" }, \
{ VM_FAULT_HWPOISON, "HWPOISON" }, \
{ VM_FAULT_HWPOISON_LARGE, "HWPOISON_LARGE" }, \
{ VM_FAULT_SIGSEGV, "SIGSEGV" }, \
@@ -749,7 +1595,8 @@ enum vm_fault_reason {
{ VM_FAULT_RETRY, "RETRY" }, \
{ VM_FAULT_FALLBACK, "FALLBACK" }, \
{ VM_FAULT_DONE_COW, "DONE_COW" }, \
- { VM_FAULT_NEEDDSYNC, "NEEDDSYNC" }
+ { VM_FAULT_NEEDDSYNC, "NEEDDSYNC" }, \
+ { VM_FAULT_COMPLETED, "COMPLETED" }
struct vm_special_mapping {
const char *name; /* The name, e.g. "[vdso]". */
@@ -772,6 +1619,9 @@ struct vm_special_mapping {
int (*mremap)(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma);
+
+ void (*close)(const struct vm_special_mapping *sm,
+ struct vm_area_struct *vma);
};
enum tlb_flush_reason {
@@ -780,15 +1630,256 @@ enum tlb_flush_reason {
TLB_LOCAL_SHOOTDOWN,
TLB_LOCAL_MM_SHOOTDOWN,
TLB_REMOTE_SEND_IPI,
+ TLB_REMOTE_WRONG_CPU,
NR_TLB_FLUSH_REASONS,
};
- /*
- * A swap entry has to fit into a "unsigned long", as the entry is hidden
- * in the "index" field of the swapper address space.
- */
-typedef struct {
- unsigned long val;
-} swp_entry_t;
+/**
+ * enum fault_flag - Fault flag definitions.
+ * @FAULT_FLAG_WRITE: Fault was a write fault.
+ * @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE.
+ * @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked.
+ * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_lock and wait when retrying.
+ * @FAULT_FLAG_KILLABLE: The fault task is in SIGKILL killable region.
+ * @FAULT_FLAG_TRIED: The fault has been tried once.
+ * @FAULT_FLAG_USER: The fault originated in userspace.
+ * @FAULT_FLAG_REMOTE: The fault is not for current task/mm.
+ * @FAULT_FLAG_INSTRUCTION: The fault was during an instruction fetch.
+ * @FAULT_FLAG_INTERRUPTIBLE: The fault can be interrupted by non-fatal signals.
+ * @FAULT_FLAG_UNSHARE: The fault is an unsharing request to break COW in a
+ * COW mapping, making sure that an exclusive anon page is
+ * mapped after the fault.
+ * @FAULT_FLAG_ORIG_PTE_VALID: whether the fault has vmf->orig_pte cached.
+ * We should only access orig_pte if this flag set.
+ * @FAULT_FLAG_VMA_LOCK: The fault is handled under VMA lock.
+ *
+ * About @FAULT_FLAG_ALLOW_RETRY and @FAULT_FLAG_TRIED: we can specify
+ * whether we would allow page faults to retry by specifying these two
+ * fault flags correctly. Currently there can be three legal combinations:
+ *
+ * (a) ALLOW_RETRY and !TRIED: this means the page fault allows retry, and
+ * this is the first try
+ *
+ * (b) ALLOW_RETRY and TRIED: this means the page fault allows retry, and
+ * we've already tried at least once
+ *
+ * (c) !ALLOW_RETRY and !TRIED: this means the page fault does not allow retry
+ *
+ * The unlisted combination (!ALLOW_RETRY && TRIED) is illegal and should never
+ * be used. Note that page faults can be allowed to retry for multiple times,
+ * in which case we'll have an initial fault with flags (a) then later on
+ * continuous faults with flags (b). We should always try to detect pending
+ * signals before a retry to make sure the continuous page faults can still be
+ * interrupted if necessary.
+ *
+ * The combination FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE is illegal.
+ * FAULT_FLAG_UNSHARE is ignored and treated like an ordinary read fault when
+ * applied to mappings that are not COW mappings.
+ */
+enum fault_flag {
+ FAULT_FLAG_WRITE = 1 << 0,
+ FAULT_FLAG_MKWRITE = 1 << 1,
+ FAULT_FLAG_ALLOW_RETRY = 1 << 2,
+ FAULT_FLAG_RETRY_NOWAIT = 1 << 3,
+ FAULT_FLAG_KILLABLE = 1 << 4,
+ FAULT_FLAG_TRIED = 1 << 5,
+ FAULT_FLAG_USER = 1 << 6,
+ FAULT_FLAG_REMOTE = 1 << 7,
+ FAULT_FLAG_INSTRUCTION = 1 << 8,
+ FAULT_FLAG_INTERRUPTIBLE = 1 << 9,
+ FAULT_FLAG_UNSHARE = 1 << 10,
+ FAULT_FLAG_ORIG_PTE_VALID = 1 << 11,
+ FAULT_FLAG_VMA_LOCK = 1 << 12,
+};
+
+typedef unsigned int __bitwise zap_flags_t;
+
+/* Flags for clear_young_dirty_ptes(). */
+typedef int __bitwise cydp_t;
+
+/* Clear the access bit */
+#define CYDP_CLEAR_YOUNG ((__force cydp_t)BIT(0))
+
+/* Clear the dirty bit */
+#define CYDP_CLEAR_DIRTY ((__force cydp_t)BIT(1))
+
+/*
+ * FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each
+ * other. Here is what they mean, and how to use them:
+ *
+ *
+ * FIXME: For pages which are part of a filesystem, mappings are subject to the
+ * lifetime enforced by the filesystem and we need guarantees that longterm
+ * users like RDMA and V4L2 only establish mappings which coordinate usage with
+ * the filesystem. Ideas for this coordination include revoking the longterm
+ * pin, delaying writeback, bounce buffer page writeback, etc. As FS DAX was
+ * added after the problem with filesystems was found FS DAX VMAs are
+ * specifically failed. Filesystem pages are still subject to bugs and use of
+ * FOLL_LONGTERM should be avoided on those pages.
+ *
+ * In the CMA case: long term pins in a CMA region would unnecessarily fragment
+ * that region. And so, CMA attempts to migrate the page before pinning, when
+ * FOLL_LONGTERM is specified.
+ *
+ * FOLL_PIN indicates that a special kind of tracking (not just page->_refcount,
+ * but an additional pin counting system) will be invoked. This is intended for
+ * anything that gets a page reference and then touches page data (for example,
+ * Direct IO). This lets the filesystem know that some non-file-system entity is
+ * potentially changing the pages' data. In contrast to FOLL_GET (whose pages
+ * are released via put_page()), FOLL_PIN pages must be released, ultimately, by
+ * a call to unpin_user_page().
+ *
+ * FOLL_PIN is similar to FOLL_GET: both of these pin pages. They use different
+ * and separate refcounting mechanisms, however, and that means that each has
+ * its own acquire and release mechanisms:
+ *
+ * FOLL_GET: get_user_pages*() to acquire, and put_page() to release.
+ *
+ * FOLL_PIN: pin_user_pages*() to acquire, and unpin_user_pages to release.
+ *
+ * FOLL_PIN and FOLL_GET are mutually exclusive for a given function call.
+ * (The underlying pages may experience both FOLL_GET-based and FOLL_PIN-based
+ * calls applied to them, and that's perfectly OK. This is a constraint on the
+ * callers, not on the pages.)
+ *
+ * FOLL_PIN should be set internally by the pin_user_pages*() APIs, never
+ * directly by the caller. That's in order to help avoid mismatches when
+ * releasing pages: get_user_pages*() pages must be released via put_page(),
+ * while pin_user_pages*() pages must be released via unpin_user_page().
+ *
+ * Please see Documentation/core-api/pin_user_pages.rst for more information.
+ */
+
+enum {
+ /* check pte is writable */
+ FOLL_WRITE = 1 << 0,
+ /* do get_page on page */
+ FOLL_GET = 1 << 1,
+ /* give error on hole if it would be zero */
+ FOLL_DUMP = 1 << 2,
+ /* get_user_pages read/write w/o permission */
+ FOLL_FORCE = 1 << 3,
+ /*
+ * if a disk transfer is needed, start the IO and return without waiting
+ * upon it
+ */
+ FOLL_NOWAIT = 1 << 4,
+ /* do not fault in pages */
+ FOLL_NOFAULT = 1 << 5,
+ /* check page is hwpoisoned */
+ FOLL_HWPOISON = 1 << 6,
+ /* don't do file mappings */
+ FOLL_ANON = 1 << 7,
+ /*
+ * FOLL_LONGTERM indicates that the page will be held for an indefinite
+ * time period _often_ under userspace control. This is in contrast to
+ * iov_iter_get_pages(), whose usages are transient.
+ */
+ FOLL_LONGTERM = 1 << 8,
+ /* split huge pmd before returning */
+ FOLL_SPLIT_PMD = 1 << 9,
+ /* allow returning PCI P2PDMA pages */
+ FOLL_PCI_P2PDMA = 1 << 10,
+ /* allow interrupts from generic signals */
+ FOLL_INTERRUPTIBLE = 1 << 11,
+ /*
+ * Always honor (trigger) NUMA hinting faults.
+ *
+ * FOLL_WRITE implicitly honors NUMA hinting faults because a
+ * PROT_NONE-mapped page is not writable (exceptions with FOLL_FORCE
+ * apply). get_user_pages_fast_only() always implicitly honors NUMA
+ * hinting faults.
+ */
+ FOLL_HONOR_NUMA_FAULT = 1 << 12,
+
+ /* See also internal only FOLL flags in mm/internal.h */
+};
+
+/* mm flags */
+
+/*
+ * The first two bits represent core dump modes for set-user-ID,
+ * the modes are SUID_DUMP_* defined in linux/sched/coredump.h
+ */
+#define MMF_DUMPABLE_BITS 2
+#define MMF_DUMPABLE_MASK (BIT(MMF_DUMPABLE_BITS) - 1)
+/* coredump filter bits */
+#define MMF_DUMP_ANON_PRIVATE 2
+#define MMF_DUMP_ANON_SHARED 3
+#define MMF_DUMP_MAPPED_PRIVATE 4
+#define MMF_DUMP_MAPPED_SHARED 5
+#define MMF_DUMP_ELF_HEADERS 6
+#define MMF_DUMP_HUGETLB_PRIVATE 7
+#define MMF_DUMP_HUGETLB_SHARED 8
+#define MMF_DUMP_DAX_PRIVATE 9
+#define MMF_DUMP_DAX_SHARED 10
+
+#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
+#define MMF_DUMP_FILTER_BITS 9
+#define MMF_DUMP_FILTER_MASK \
+ ((BIT(MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
+#define MMF_DUMP_FILTER_DEFAULT \
+ (BIT(MMF_DUMP_ANON_PRIVATE) | BIT(MMF_DUMP_ANON_SHARED) | \
+ BIT(MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
+
+#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
+# define MMF_DUMP_MASK_DEFAULT_ELF BIT(MMF_DUMP_ELF_HEADERS)
+#else
+# define MMF_DUMP_MASK_DEFAULT_ELF 0
+#endif
+ /* leave room for more dump flags */
+#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */
+#define MMF_VM_HUGEPAGE 17 /* set when mm is available for khugepaged */
+
+#define MMF_HUGE_ZERO_FOLIO 18 /* mm has ever used the global huge zero folio */
+
+#define MMF_HAS_UPROBES 19 /* has uprobes */
+#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */
+#define MMF_OOM_SKIP 21 /* mm is of no interest for the OOM killer */
+#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */
+#define MMF_DISABLE_THP_EXCEPT_ADVISED 23 /* no THP except when advised (e.g., VM_HUGEPAGE) */
+#define MMF_DISABLE_THP_COMPLETELY 24 /* no THP for all VMAs */
+#define MMF_DISABLE_THP_MASK (BIT(MMF_DISABLE_THP_COMPLETELY) | \
+ BIT(MMF_DISABLE_THP_EXCEPT_ADVISED))
+#define MMF_OOM_REAP_QUEUED 25 /* mm was queued for oom_reaper */
+#define MMF_MULTIPROCESS 26 /* mm is shared between processes */
+/*
+ * MMF_HAS_PINNED: Whether this mm has pinned any pages. This can be either
+ * replaced in the future by mm.pinned_vm when it becomes stable, or grow into
+ * a counter on its own. We're aggresive on this bit for now: even if the
+ * pinned pages were unpinned later on, we'll still keep this bit set for the
+ * lifecycle of this mm, just for simplicity.
+ */
+#define MMF_HAS_PINNED 27 /* FOLL_PIN has run, never cleared */
+
+#define MMF_HAS_MDWE 28
+#define MMF_HAS_MDWE_MASK BIT(MMF_HAS_MDWE)
+
+#define MMF_HAS_MDWE_NO_INHERIT 29
+
+#define MMF_VM_MERGE_ANY 30
+#define MMF_VM_MERGE_ANY_MASK BIT(MMF_VM_MERGE_ANY)
+
+#define MMF_TOPDOWN 31 /* mm searches top down by default */
+#define MMF_TOPDOWN_MASK BIT(MMF_TOPDOWN)
+
+#define MMF_INIT_LEGACY_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
+ MMF_DISABLE_THP_MASK | MMF_HAS_MDWE_MASK |\
+ MMF_VM_MERGE_ANY_MASK | MMF_TOPDOWN_MASK)
+
+/* Legacy flags must fit within 32 bits. */
+static_assert((u64)MMF_INIT_LEGACY_MASK <= (u64)UINT_MAX);
+
+/*
+ * Initialise legacy flags according to masks, propagating selected flags on
+ * fork. Further flag manipulation can be performed by the caller.
+ */
+static inline unsigned long mmf_init_legacy_flags(unsigned long flags)
+{
+ if (flags & (1UL << MMF_HAS_MDWE_NO_INHERIT))
+ flags &= ~((1UL << MMF_HAS_MDWE) |
+ (1UL << MMF_HAS_MDWE_NO_INHERIT));
+ return flags & MMF_INIT_LEGACY_MASK;
+}
#endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index c1bc6731125c..a82aa80c0ba4 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -8,10 +8,8 @@
* (These are defined separately to decouple sched.h from mm_types.h as much as possible.)
*/
+#include <linux/align.h>
#include <linux/types.h>
-#include <linux/threads.h>
-#include <linux/atomic.h>
-#include <linux/cpumask.h>
#include <asm/page.h>
@@ -19,24 +17,9 @@
#include <asm/tlbbatch.h>
#endif
-#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
-#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
- IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
/*
- * The per task VMA cache array:
- */
-#define VMACACHE_BITS 2
-#define VMACACHE_SIZE (1U << VMACACHE_BITS)
-#define VMACACHE_MASK (VMACACHE_SIZE - 1)
-
-struct vmacache {
- u64 seqnum;
- struct vm_area_struct *vmas[VMACACHE_SIZE];
-};
-
-/*
* When updating this, please also update struct resident_page_types[] in
* kernel/fork.c
*/
@@ -48,18 +31,7 @@ enum {
NR_MM_COUNTERS
};
-#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU)
-#define SPLIT_RSS_COUNTING
-/* per-thread cached information, */
-struct task_rss_stat {
- int events; /* for synchronization threshold */
- int count[NR_MM_COUNTERS];
-};
-#endif /* USE_SPLIT_PTE_PTLOCKS */
-
-struct mm_rss_stat {
- atomic_long_t count[NR_MM_COUNTERS];
-};
+struct page;
struct page_frag {
struct page *page;
@@ -72,13 +44,33 @@ struct page_frag {
#endif
};
+#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
+#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
+struct page_frag_cache {
+ /* encoded_page consists of the virtual address, pfmemalloc bit and
+ * order of a page.
+ */
+ unsigned long encoded_page;
+
+ /* we maintain a pagecount bias, so that we dont dirty cache line
+ * containing page->_refcount every time we allocate a fragment.
+ */
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) && (BITS_PER_LONG <= 32)
+ __u16 offset;
+ __u16 pagecnt_bias;
+#else
+ __u32 offset;
+ __u32 pagecnt_bias;
+#endif
+};
+
/* Track pages that require TLB flushes */
struct tlbflush_unmap_batch {
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
/*
* The arch code makes the following promise: generic code can modify a
- * PTE, then call arch_tlbbatch_add_mm() (which internally provides all
- * needed barriers), then call arch_tlbbatch_flush(), and the entries
+ * PTE, then call arch_tlbbatch_add_pending() (which internally provides
+ * all needed barriers), then call arch_tlbbatch_flush(), and the entries
* will be flushed on all CPUs by the time that arch_tlbbatch_flush()
* returns.
*/
diff --git a/include/linux/mman.h b/include/linux/mman.h
index 629cefc4ecba..0ba8a7e8b90a 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_MMAN_H
#define _LINUX_MMAN_H
+#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/percpu_counter.h>
@@ -15,6 +16,9 @@
#ifndef MAP_32BIT
#define MAP_32BIT 0
#endif
+#ifndef MAP_ABOVE4G
+#define MAP_ABOVE4G 0
+#endif
#ifndef MAP_HUGE_2MB
#define MAP_HUGE_2MB 0
#endif
@@ -31,6 +35,9 @@
/*
* The historical set of flags that all mmap implementations implicitly
* support when a ->mmap_validate() op is not provided in file_operations.
+ *
+ * MAP_EXECUTABLE and MAP_DENYWRITE are completely ignored throughout the
+ * kernel.
*/
#define LEGACY_MAP_MASK (MAP_SHARED \
| MAP_PRIVATE \
@@ -47,12 +54,11 @@
| MAP_STACK \
| MAP_HUGETLB \
| MAP_32BIT \
+ | MAP_ABOVE4G \
| MAP_HUGE_2MB \
| MAP_HUGE_1GB)
extern int sysctl_overcommit_memory;
-extern int sysctl_overcommit_ratio;
-extern unsigned long sysctl_overcommit_kbytes;
extern struct percpu_counter vm_committed_as;
#ifdef CONFIG_SMP
@@ -87,11 +93,7 @@ static inline void vm_unacct_memory(long pages)
#endif
#ifndef arch_calc_vm_flag_bits
-#define arch_calc_vm_flag_bits(flags) 0
-#endif
-
-#ifndef arch_vm_get_page_prot
-#define arch_vm_get_page_prot(vm_flags) __pgprot(0)
+#define arch_calc_vm_flag_bits(file, flags) 0
#endif
#ifndef arch_validate_prot
@@ -135,7 +137,7 @@ static inline bool arch_validate_flags(unsigned long flags)
/*
* Combine the mmap "prot" argument into "vm_flags" used internally.
*/
-static inline unsigned long
+static inline vm_flags_t
calc_vm_prot_bits(unsigned long prot, unsigned long pkey)
{
return _calc_vm_trans(prot, PROT_READ, VM_READ ) |
@@ -147,15 +149,74 @@ calc_vm_prot_bits(unsigned long prot, unsigned long pkey)
/*
* Combine the mmap "flags" argument into "vm_flags" used internally.
*/
-static inline unsigned long
-calc_vm_flag_bits(unsigned long flags)
+static inline vm_flags_t
+calc_vm_flag_bits(struct file *file, unsigned long flags)
{
return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
- _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) |
_calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) |
_calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) |
- arch_calc_vm_flag_bits(flags);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ _calc_vm_trans(flags, MAP_STACK, VM_NOHUGEPAGE) |
+#endif
+ arch_calc_vm_flag_bits(file, flags);
}
unsigned long vm_commit_limit(void);
+
+#ifndef arch_memory_deny_write_exec_supported
+static inline bool arch_memory_deny_write_exec_supported(void)
+{
+ return true;
+}
+#define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported
+#endif
+
+/*
+ * Denies creating a writable executable mapping or gaining executable permissions.
+ *
+ * This denies the following:
+ *
+ * a) mmap(PROT_WRITE | PROT_EXEC)
+ *
+ * b) mmap(PROT_WRITE)
+ * mprotect(PROT_EXEC)
+ *
+ * c) mmap(PROT_WRITE)
+ * mprotect(PROT_READ)
+ * mprotect(PROT_EXEC)
+ *
+ * But allows the following:
+ *
+ * d) mmap(PROT_READ | PROT_EXEC)
+ * mmap(PROT_READ | PROT_EXEC | PROT_BTI)
+ *
+ * This is only applicable if the user has set the Memory-Deny-Write-Execute
+ * (MDWE) protection mask for the current process.
+ *
+ * @old specifies the VMA flags the VMA originally possessed, and @new the ones
+ * we propose to set.
+ *
+ * Return: false if proposed change is OK, true if not ok and should be denied.
+ */
+static inline bool map_deny_write_exec(unsigned long old, unsigned long new)
+{
+ /* If MDWE is disabled, we have nothing to deny. */
+ if (!mm_flags_test(MMF_HAS_MDWE, current->mm))
+ return false;
+
+ /* If the new VMA is not executable, we have nothing to deny. */
+ if (!(new & VM_EXEC))
+ return false;
+
+ /* Under MDWE we do not accept newly writably executable VMAs... */
+ if (new & VM_WRITE)
+ return true;
+
+ /* ...nor previously non-executable VMAs becoming executable. */
+ if (!(old & VM_EXEC))
+ return true;
+
+ return false;
+}
+
#endif /* _LINUX_MMAN_H */
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index 0540f0156f58..d53f72dba7fe 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -1,12 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_MMAP_LOCK_H
#define _LINUX_MMAP_LOCK_H
+/* Avoid a dependency loop by declaring here. */
+extern int rcuwait_wake_up(struct rcuwait *w);
+
#include <linux/lockdep.h>
#include <linux/mm_types.h>
#include <linux/mmdebug.h>
#include <linux/rwsem.h>
#include <linux/tracepoint-defs.h>
#include <linux/types.h>
+#include <linux/cleanup.h>
+#include <linux/sched/mm.h>
#define MMAP_LOCK_INITIALIZER(name) \
.mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock),
@@ -60,15 +66,274 @@ static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write)
#endif /* CONFIG_TRACING */
-static inline void mmap_init_lock(struct mm_struct *mm)
+static inline void mmap_assert_locked(const struct mm_struct *mm)
+{
+ rwsem_assert_held(&mm->mmap_lock);
+}
+
+static inline void mmap_assert_write_locked(const struct mm_struct *mm)
+{
+ rwsem_assert_held_write(&mm->mmap_lock);
+}
+
+#ifdef CONFIG_PER_VMA_LOCK
+
+static inline void mm_lock_seqcount_init(struct mm_struct *mm)
{
- init_rwsem(&mm->mmap_lock);
+ seqcount_init(&mm->mm_lock_seq);
}
+static inline void mm_lock_seqcount_begin(struct mm_struct *mm)
+{
+ do_raw_write_seqcount_begin(&mm->mm_lock_seq);
+}
+
+static inline void mm_lock_seqcount_end(struct mm_struct *mm)
+{
+ ASSERT_EXCLUSIVE_WRITER(mm->mm_lock_seq);
+ do_raw_write_seqcount_end(&mm->mm_lock_seq);
+}
+
+static inline bool mmap_lock_speculate_try_begin(struct mm_struct *mm, unsigned int *seq)
+{
+ /*
+ * Since mmap_lock is a sleeping lock, and waiting for it to become
+ * unlocked is more or less equivalent with taking it ourselves, don't
+ * bother with the speculative path if mmap_lock is already write-locked
+ * and take the slow path, which takes the lock.
+ */
+ return raw_seqcount_try_begin(&mm->mm_lock_seq, *seq);
+}
+
+static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int seq)
+{
+ return read_seqcount_retry(&mm->mm_lock_seq, seq);
+}
+
+static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ static struct lock_class_key lockdep_key;
+
+ lockdep_init_map(&vma->vmlock_dep_map, "vm_lock", &lockdep_key, 0);
+#endif
+ if (reset_refcnt)
+ refcount_set(&vma->vm_refcnt, 0);
+ vma->vm_lock_seq = UINT_MAX;
+}
+
+static inline bool is_vma_writer_only(int refcnt)
+{
+ /*
+ * With a writer and no readers, refcnt is VMA_LOCK_OFFSET if the vma
+ * is detached and (VMA_LOCK_OFFSET + 1) if it is attached. Waiting on
+ * a detached vma happens only in vma_mark_detached() and is a rare
+ * case, therefore most of the time there will be no unnecessary wakeup.
+ */
+ return (refcnt & VMA_LOCK_OFFSET) && refcnt <= VMA_LOCK_OFFSET + 1;
+}
+
+static inline void vma_refcount_put(struct vm_area_struct *vma)
+{
+ /* Use a copy of vm_mm in case vma is freed after we drop vm_refcnt */
+ struct mm_struct *mm = vma->vm_mm;
+ int oldcnt;
+
+ rwsem_release(&vma->vmlock_dep_map, _RET_IP_);
+ if (!__refcount_dec_and_test(&vma->vm_refcnt, &oldcnt)) {
+
+ if (is_vma_writer_only(oldcnt - 1))
+ rcuwait_wake_up(&mm->vma_writer_wait);
+ }
+}
+
+/*
+ * Use only while holding mmap read lock which guarantees that locking will not
+ * fail (nobody can concurrently write-lock the vma). vma_start_read() should
+ * not be used in such cases because it might fail due to mm_lock_seq overflow.
+ * This functionality is used to obtain vma read lock and drop the mmap read lock.
+ */
+static inline bool vma_start_read_locked_nested(struct vm_area_struct *vma, int subclass)
+{
+ int oldcnt;
+
+ mmap_assert_locked(vma->vm_mm);
+ if (unlikely(!__refcount_inc_not_zero_limited_acquire(&vma->vm_refcnt, &oldcnt,
+ VMA_REF_LIMIT)))
+ return false;
+
+ rwsem_acquire_read(&vma->vmlock_dep_map, 0, 1, _RET_IP_);
+ return true;
+}
+
+/*
+ * Use only while holding mmap read lock which guarantees that locking will not
+ * fail (nobody can concurrently write-lock the vma). vma_start_read() should
+ * not be used in such cases because it might fail due to mm_lock_seq overflow.
+ * This functionality is used to obtain vma read lock and drop the mmap read lock.
+ */
+static inline bool vma_start_read_locked(struct vm_area_struct *vma)
+{
+ return vma_start_read_locked_nested(vma, 0);
+}
+
+static inline void vma_end_read(struct vm_area_struct *vma)
+{
+ vma_refcount_put(vma);
+}
+
+/* WARNING! Can only be used if mmap_lock is expected to be write-locked */
+static inline bool __is_vma_write_locked(struct vm_area_struct *vma, unsigned int *mm_lock_seq)
+{
+ mmap_assert_write_locked(vma->vm_mm);
+
+ /*
+ * current task is holding mmap_write_lock, both vma->vm_lock_seq and
+ * mm->mm_lock_seq can't be concurrently modified.
+ */
+ *mm_lock_seq = vma->vm_mm->mm_lock_seq.sequence;
+ return (vma->vm_lock_seq == *mm_lock_seq);
+}
+
+int __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq,
+ int state);
+
+/*
+ * Begin writing to a VMA.
+ * Exclude concurrent readers under the per-VMA lock until the currently
+ * write-locked mmap_lock is dropped or downgraded.
+ */
+static inline void vma_start_write(struct vm_area_struct *vma)
+{
+ unsigned int mm_lock_seq;
+
+ if (__is_vma_write_locked(vma, &mm_lock_seq))
+ return;
+
+ __vma_start_write(vma, mm_lock_seq, TASK_UNINTERRUPTIBLE);
+}
+
+/**
+ * vma_start_write_killable - Begin writing to a VMA.
+ * @vma: The VMA we are going to modify.
+ *
+ * Exclude concurrent readers under the per-VMA lock until the currently
+ * write-locked mmap_lock is dropped or downgraded.
+ *
+ * Context: May sleep while waiting for readers to drop the vma read lock.
+ * Caller must already hold the mmap_lock for write.
+ *
+ * Return: 0 for a successful acquisition. -EINTR if a fatal signal was
+ * received.
+ */
+static inline __must_check
+int vma_start_write_killable(struct vm_area_struct *vma)
+{
+ unsigned int mm_lock_seq;
+
+ if (__is_vma_write_locked(vma, &mm_lock_seq))
+ return 0;
+ return __vma_start_write(vma, mm_lock_seq, TASK_KILLABLE);
+}
+
+static inline void vma_assert_write_locked(struct vm_area_struct *vma)
+{
+ unsigned int mm_lock_seq;
+
+ VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma);
+}
+
+static inline void vma_assert_locked(struct vm_area_struct *vma)
+{
+ unsigned int mm_lock_seq;
+
+ VM_BUG_ON_VMA(refcount_read(&vma->vm_refcnt) <= 1 &&
+ !__is_vma_write_locked(vma, &mm_lock_seq), vma);
+}
+
+/*
+ * WARNING: to avoid racing with vma_mark_attached()/vma_mark_detached(), these
+ * assertions should be made either under mmap_write_lock or when the object
+ * has been isolated under mmap_write_lock, ensuring no competing writers.
+ */
+static inline void vma_assert_attached(struct vm_area_struct *vma)
+{
+ WARN_ON_ONCE(!refcount_read(&vma->vm_refcnt));
+}
+
+static inline void vma_assert_detached(struct vm_area_struct *vma)
+{
+ WARN_ON_ONCE(refcount_read(&vma->vm_refcnt));
+}
+
+static inline void vma_mark_attached(struct vm_area_struct *vma)
+{
+ vma_assert_write_locked(vma);
+ vma_assert_detached(vma);
+ refcount_set_release(&vma->vm_refcnt, 1);
+}
+
+void vma_mark_detached(struct vm_area_struct *vma);
+
+struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
+ unsigned long address);
+
+/*
+ * Locks next vma pointed by the iterator. Confirms the locked vma has not
+ * been modified and will retry under mmap_lock protection if modification
+ * was detected. Should be called from read RCU section.
+ * Returns either a valid locked VMA, NULL if no more VMAs or -EINTR if the
+ * process was interrupted.
+ */
+struct vm_area_struct *lock_next_vma(struct mm_struct *mm,
+ struct vma_iterator *iter,
+ unsigned long address);
+
+#else /* CONFIG_PER_VMA_LOCK */
+
+static inline void mm_lock_seqcount_init(struct mm_struct *mm) {}
+static inline void mm_lock_seqcount_begin(struct mm_struct *mm) {}
+static inline void mm_lock_seqcount_end(struct mm_struct *mm) {}
+
+static inline bool mmap_lock_speculate_try_begin(struct mm_struct *mm, unsigned int *seq)
+{
+ return false;
+}
+
+static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int seq)
+{
+ return true;
+}
+static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt) {}
+static inline void vma_end_read(struct vm_area_struct *vma) {}
+static inline void vma_start_write(struct vm_area_struct *vma) {}
+static inline __must_check
+int vma_start_write_killable(struct vm_area_struct *vma) { return 0; }
+static inline void vma_assert_write_locked(struct vm_area_struct *vma)
+ { mmap_assert_write_locked(vma->vm_mm); }
+static inline void vma_assert_attached(struct vm_area_struct *vma) {}
+static inline void vma_assert_detached(struct vm_area_struct *vma) {}
+static inline void vma_mark_attached(struct vm_area_struct *vma) {}
+static inline void vma_mark_detached(struct vm_area_struct *vma) {}
+
+static inline struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
+ unsigned long address)
+{
+ return NULL;
+}
+
+static inline void vma_assert_locked(struct vm_area_struct *vma)
+{
+ mmap_assert_locked(vma->vm_mm);
+}
+
+#endif /* CONFIG_PER_VMA_LOCK */
+
static inline void mmap_write_lock(struct mm_struct *mm)
{
__mmap_lock_trace_start_locking(mm, true);
down_write(&mm->mmap_lock);
+ mm_lock_seqcount_begin(mm);
__mmap_lock_trace_acquire_returned(mm, true, true);
}
@@ -76,6 +341,7 @@ static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
{
__mmap_lock_trace_start_locking(mm, true);
down_write_nested(&mm->mmap_lock, subclass);
+ mm_lock_seqcount_begin(mm);
__mmap_lock_trace_acquire_returned(mm, true, true);
}
@@ -85,30 +351,38 @@ static inline int mmap_write_lock_killable(struct mm_struct *mm)
__mmap_lock_trace_start_locking(mm, true);
ret = down_write_killable(&mm->mmap_lock);
+ if (!ret)
+ mm_lock_seqcount_begin(mm);
__mmap_lock_trace_acquire_returned(mm, true, ret == 0);
return ret;
}
-static inline bool mmap_write_trylock(struct mm_struct *mm)
+/*
+ * Drop all currently-held per-VMA locks.
+ * This is called from the mmap_lock implementation directly before releasing
+ * a write-locked mmap_lock (or downgrading it to read-locked).
+ * This should normally NOT be called manually from other places.
+ * If you want to call this manually anyway, keep in mind that this will release
+ * *all* VMA write locks, including ones from further up the stack.
+ */
+static inline void vma_end_write_all(struct mm_struct *mm)
{
- bool ret;
-
- __mmap_lock_trace_start_locking(mm, true);
- ret = down_write_trylock(&mm->mmap_lock) != 0;
- __mmap_lock_trace_acquire_returned(mm, true, ret);
- return ret;
+ mmap_assert_write_locked(mm);
+ mm_lock_seqcount_end(mm);
}
static inline void mmap_write_unlock(struct mm_struct *mm)
{
- up_write(&mm->mmap_lock);
__mmap_lock_trace_released(mm, true);
+ vma_end_write_all(mm);
+ up_write(&mm->mmap_lock);
}
static inline void mmap_write_downgrade(struct mm_struct *mm)
{
- downgrade_write(&mm->mmap_lock);
__mmap_lock_trace_acquire_returned(mm, false, true);
+ vma_end_write_all(mm);
+ downgrade_write(&mm->mmap_lock);
}
static inline void mmap_read_lock(struct mm_struct *mm)
@@ -140,35 +414,17 @@ static inline bool mmap_read_trylock(struct mm_struct *mm)
static inline void mmap_read_unlock(struct mm_struct *mm)
{
- up_read(&mm->mmap_lock);
__mmap_lock_trace_released(mm, false);
+ up_read(&mm->mmap_lock);
}
-static inline bool mmap_read_trylock_non_owner(struct mm_struct *mm)
-{
- if (mmap_read_trylock(mm)) {
- rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_);
- return true;
- }
- return false;
-}
+DEFINE_GUARD(mmap_read_lock, struct mm_struct *,
+ mmap_read_lock(_T), mmap_read_unlock(_T))
static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
{
- up_read_non_owner(&mm->mmap_lock);
__mmap_lock_trace_released(mm, false);
-}
-
-static inline void mmap_assert_locked(struct mm_struct *mm)
-{
- lockdep_assert_held(&mm->mmap_lock);
- VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
-}
-
-static inline void mmap_assert_write_locked(struct mm_struct *mm)
-{
- lockdep_assert_held_write(&mm->mmap_lock);
- VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
+ up_read_non_owner(&mm->mmap_lock);
}
static inline int mmap_lock_is_contended(struct mm_struct *mm)
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index f9ad35dd6012..e9e964c20e53 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -32,9 +32,10 @@ struct mmc_csd {
unsigned int r2w_factor;
unsigned int max_dtr;
unsigned int erase_size; /* In sectors */
+ unsigned int wp_grp_size;
unsigned int read_blkbits;
unsigned int write_blkbits;
- unsigned int capacity;
+ sector_t capacity;
unsigned int read_partial:1,
read_misalign:1,
write_partial:1,
@@ -52,9 +53,6 @@ struct mmc_ext_csd {
u8 part_config;
u8 cache_ctrl;
u8 rst_n_function;
- u8 max_packed_writes;
- u8 max_packed_reads;
- u8 packed_event_en;
unsigned int part_time; /* Units: ms */
unsigned int sa_timeout; /* Units: 100ns */
unsigned int generic_cmd6_time; /* Units: 10ms */
@@ -109,6 +107,7 @@ struct mmc_ext_csd {
u8 raw_hc_erase_gap_size; /* 221 */
u8 raw_erase_timeout_mult; /* 223 */
u8 raw_hc_erase_grp_size; /* 224 */
+ u8 raw_boot_mult; /* 226 */
u8 raw_sec_trim_mult; /* 229 */
u8 raw_sec_erase_mult; /* 230 */
u8 raw_sec_feature_support;/* 231 */
@@ -139,6 +138,8 @@ struct sd_scr {
unsigned char cmds;
#define SD_SCR_CMD20_SUPPORT (1<<0)
#define SD_SCR_CMD23_SUPPORT (1<<1)
+#define SD_SCR_CMD48_SUPPORT (1<<2)
+#define SD_SCR_CMD58_SUPPORT (1<<3)
};
struct sd_ssr {
@@ -181,12 +182,64 @@ struct sd_switch_caps {
#define SD_SET_CURRENT_LIMIT_400 1
#define SD_SET_CURRENT_LIMIT_600 2
#define SD_SET_CURRENT_LIMIT_800 3
-#define SD_SET_CURRENT_NO_CHANGE (-1)
#define SD_MAX_CURRENT_200 (1 << SD_SET_CURRENT_LIMIT_200)
#define SD_MAX_CURRENT_400 (1 << SD_SET_CURRENT_LIMIT_400)
#define SD_MAX_CURRENT_600 (1 << SD_SET_CURRENT_LIMIT_600)
#define SD_MAX_CURRENT_800 (1 << SD_SET_CURRENT_LIMIT_800)
+
+#define SD4_SET_POWER_LIMIT_0_72W 0
+#define SD4_SET_POWER_LIMIT_1_44W 1
+#define SD4_SET_POWER_LIMIT_2_16W 2
+#define SD4_SET_POWER_LIMIT_2_88W 3
+#define SD4_SET_POWER_LIMIT_1_80W 4
+};
+
+struct sd_ext_reg {
+ u8 fno;
+ u8 page;
+ u16 offset;
+ u8 rev;
+ u8 feature_enabled;
+ u8 feature_support;
+/* Power Management Function. */
+#define SD_EXT_POWER_OFF_NOTIFY (1<<0)
+#define SD_EXT_POWER_SUSTENANCE (1<<1)
+#define SD_EXT_POWER_DOWN_MODE (1<<2)
+/* Performance Enhancement Function. */
+#define SD_EXT_PERF_FX_EVENT (1<<0)
+#define SD_EXT_PERF_CARD_MAINT (1<<1)
+#define SD_EXT_PERF_HOST_MAINT (1<<2)
+#define SD_EXT_PERF_CACHE (1<<3)
+#define SD_EXT_PERF_CMD_QUEUE (1<<4)
+};
+
+struct sd_uhs2_config {
+ u32 node_id;
+
+ u32 n_fcu;
+ u32 maxblk_len;
+ u8 n_lanes;
+ u8 dadr_len;
+ u8 app_type;
+ u8 phy_minor_rev;
+ u8 phy_major_rev;
+ u8 can_hibernate;
+ u8 n_lss_sync;
+ u8 n_lss_dir;
+ u8 link_minor_rev;
+ u8 link_major_rev;
+ u8 dev_type;
+ u8 n_data_gap;
+
+ u32 n_fcu_set;
+ u32 maxblk_len_set;
+ u8 n_lanes_set;
+ u8 speed_range_set;
+ u8 n_lss_sync_set;
+ u8 n_lss_dir_set;
+ u8 n_data_gap_set;
+ u8 max_retry_set;
};
struct sdio_cccr {
@@ -197,7 +250,8 @@ struct sdio_cccr {
wide_bus:1,
high_power:1,
high_speed:1,
- disable_cd:1;
+ disable_cd:1,
+ enable_async_irq:1;
};
struct sdio_cis {
@@ -270,7 +324,13 @@ struct mmc_card {
#define MMC_QUIRK_BROKEN_IRQ_POLLING (1<<11) /* Polling SDIO_CCCR_INTx could create a fake interrupt */
#define MMC_QUIRK_TRIM_BROKEN (1<<12) /* Skip trim */
#define MMC_QUIRK_BROKEN_HPI (1<<13) /* Disable broken HPI support */
+#define MMC_QUIRK_BROKEN_SD_DISCARD (1<<14) /* Disable broken SD discard support */
+#define MMC_QUIRK_BROKEN_SD_CACHE (1<<15) /* Disable broken SD cache support */
+#define MMC_QUIRK_BROKEN_CACHE_FLUSH (1<<16) /* Don't flush cache until the write has occurred */
+#define MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY (1<<17) /* Disable broken SD poweroff notify support */
+#define MMC_QUIRK_NO_UHS_DDR50_TUNING (1<<18) /* Disable DDR50 tuning */
+ bool written_flag; /* Indicates eMMC has been written since power on */
bool reenable_cmdq; /* Re-enable Command Queue */
unsigned int erase_size; /* erase size in sectors */
@@ -279,6 +339,7 @@ struct mmc_card {
unsigned int eg_boundary; /* don't cross erase-group boundaries */
unsigned int erase_arg; /* erase / trim / discard */
u8 erased_byte; /* value of erased bytes */
+ unsigned int wp_grp_size; /* write group size in sectors */
u32 raw_cid[4]; /* raw card CID */
u32 raw_csd[4]; /* raw card CSD */
@@ -290,6 +351,10 @@ struct mmc_card {
struct sd_scr scr; /* extra SD information */
struct sd_ssr ssr; /* yet more SD information */
struct sd_switch_caps sw_caps; /* switch (CMD6) caps */
+ struct sd_ext_reg ext_power; /* SD extension reg for PM */
+ struct sd_ext_reg ext_perf; /* SD extension reg for PERF */
+
+ struct sd_uhs2_config uhs2_config; /* SD UHS-II config */
unsigned int sdio_funcs; /* number of SDIO functions */
atomic_t sdio_funcs_probed; /* number of probed SDIO funcs */
@@ -319,10 +384,16 @@ static inline bool mmc_large_sector(struct mmc_card *card)
return card->ext_csd.data_sector_size == 4096;
}
+static inline int mmc_card_enable_async_irq(struct mmc_card *card)
+{
+ return card->cccr.enable_async_irq;
+}
+
bool mmc_card_is_blockaddr(struct mmc_card *card);
#define mmc_card_mmc(c) ((c)->type == MMC_TYPE_MMC)
#define mmc_card_sd(c) ((c)->type == MMC_TYPE_SD)
#define mmc_card_sdio(c) ((c)->type == MMC_TYPE_SDIO)
+#define mmc_card_sd_combo(c) ((c)->type == MMC_TYPE_SD_COMBO)
#endif /* LINUX_MMC_CARD_H */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index ab19245e9945..01e0f591a20b 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -11,23 +11,24 @@
struct mmc_data;
struct mmc_request;
-enum mmc_blk_status {
- MMC_BLK_SUCCESS = 0,
- MMC_BLK_PARTIAL,
- MMC_BLK_CMD_ERR,
- MMC_BLK_RETRY,
- MMC_BLK_ABORT,
- MMC_BLK_DATA_ERR,
- MMC_BLK_ECC_ERR,
- MMC_BLK_NOMEDIUM,
- MMC_BLK_NEW_REQUEST,
+#define UHS2_MAX_PAYLOAD_LEN 2
+#define UHS2_MAX_RESP_LEN 20
+
+struct uhs2_command {
+ u16 header;
+ u16 arg;
+ __be32 payload[UHS2_MAX_PAYLOAD_LEN];
+ u8 payload_len;
+ u8 packet_len;
+ u8 tmode_half_duplex;
+ u8 uhs2_resp[UHS2_MAX_RESP_LEN]; /* UHS2 native cmd resp */
+ u8 uhs2_resp_len; /* UHS2 native cmd resp len */
};
struct mmc_command {
u32 opcode;
u32 arg;
#define MMC_CMD23_ARG_REL_WR (1 << 31)
-#define MMC_CMD23_ARG_PACKED ((0 << 31) | (1 << 30))
#define MMC_CMD23_ARG_TAG_REQ (1 << 29)
u32 resp[4];
unsigned int flags; /* expected response type */
@@ -56,6 +57,7 @@ struct mmc_command {
#define MMC_RSP_NONE (0)
#define MMC_RSP_R1 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
#define MMC_RSP_R1B (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE|MMC_RSP_BUSY)
+#define MMC_RSP_R1B_NO_CRC (MMC_RSP_PRESENT|MMC_RSP_OPCODE|MMC_RSP_BUSY)
#define MMC_RSP_R2 (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC)
#define MMC_RSP_R3 (MMC_RSP_PRESENT)
#define MMC_RSP_R4 (MMC_RSP_PRESENT)
@@ -63,9 +65,6 @@ struct mmc_command {
#define MMC_RSP_R6 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
#define MMC_RSP_R7 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
-/* Can be used by core to poll after switch to MMC HS mode */
-#define MMC_RSP_R1_NO_CRC (MMC_RSP_PRESENT|MMC_RSP_OPCODE)
-
#define mmc_resp_type(cmd) ((cmd)->flags & (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC|MMC_RSP_BUSY|MMC_RSP_OPCODE))
/*
@@ -109,6 +108,12 @@ struct mmc_command {
unsigned int busy_timeout; /* busy detect timeout in ms */
struct mmc_data *data; /* data segment associated with cmd */
struct mmc_request *mrq; /* associated request */
+
+ struct uhs2_command *uhs2_cmd; /* UHS2 command */
+
+ /* for SDUC */
+ bool has_ext_addr;
+ u8 ext_addr;
};
struct mmc_data {
@@ -164,10 +169,10 @@ struct mmc_request {
int tag;
#ifdef CONFIG_MMC_CRYPTO
- bool crypto_enabled;
+ const struct bio_crypt_ctx *crypto_ctx;
int crypto_key_slot;
- u32 data_unit_num;
#endif
+ struct uhs2_command uhs2_cmd;
};
struct mmc_card;
@@ -176,8 +181,8 @@ void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq);
int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd,
int retries);
-int mmc_hw_reset(struct mmc_host *host);
-int mmc_sw_reset(struct mmc_host *host);
+int mmc_hw_reset(struct mmc_card *card);
+int mmc_sw_reset(struct mmc_card *card);
void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card);
#endif /* LINUX_MMC_CORE_H */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index c7e7b43600e9..e0e2c265e5d1 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -10,12 +10,14 @@
#include <linux/sched.h>
#include <linux/device.h>
#include <linux/fault-inject.h>
+#include <linux/debugfs.h>
#include <linux/mmc/core.h>
#include <linux/mmc/card.h>
#include <linux/mmc/pm.h>
#include <linux/dma-direction.h>
-#include <linux/keyslot-manager.h>
+#include <linux/blk-crypto-profile.h>
+#include <linux/mmc/sd_uhs2.h>
struct mmc_ios {
unsigned int clock; /* clock rate */
@@ -63,6 +65,10 @@ struct mmc_ios {
#define MMC_TIMING_MMC_HS400 10
#define MMC_TIMING_SD_EXP 11
#define MMC_TIMING_SD_EXP_1_2V 12
+#define MMC_TIMING_UHS2_SPEED_A 13
+#define MMC_TIMING_UHS2_SPEED_A_HD 14
+#define MMC_TIMING_UHS2_SPEED_B 15
+#define MMC_TIMING_UHS2_SPEED_B_HD 16
unsigned char signal_voltage; /* signalling voltage (1.8V or 3.3V) */
@@ -70,6 +76,9 @@ struct mmc_ios {
#define MMC_SIGNAL_VOLTAGE_180 1
#define MMC_SIGNAL_VOLTAGE_120 2
+ unsigned char vqmmc2_voltage;
+#define MMC_VQMMC2_VOLTAGE_180 0
+
unsigned char drv_type; /* driver type (A, B, C, D) */
#define MMC_SET_DRIVER_TYPE_B 0
@@ -91,8 +100,64 @@ struct mmc_clk_phase_map {
struct mmc_clk_phase phase[MMC_NUM_CLK_PHASES];
};
+struct sd_uhs2_caps {
+ u32 dap;
+ u32 gap;
+ u32 group_desc;
+ u32 maxblk_len;
+ u32 n_fcu;
+ u8 n_lanes;
+ u8 addr64;
+ u8 card_type;
+ u8 phy_rev;
+ u8 speed_range;
+ u8 n_lss_sync;
+ u8 n_lss_dir;
+ u8 link_rev;
+ u8 host_type;
+ u8 n_data_gap;
+
+ u32 maxblk_len_set;
+ u32 n_fcu_set;
+ u8 n_lanes_set;
+ u8 n_lss_sync_set;
+ u8 n_lss_dir_set;
+ u8 n_data_gap_set;
+ u8 max_retry_set;
+};
+
+enum sd_uhs2_operation {
+ UHS2_PHY_INIT = 0,
+ UHS2_SET_CONFIG,
+ UHS2_ENABLE_INT,
+ UHS2_DISABLE_INT,
+ UHS2_ENABLE_CLK,
+ UHS2_DISABLE_CLK,
+ UHS2_CHECK_DORMANT,
+ UHS2_SET_IOS,
+};
+
struct mmc_host;
+enum mmc_err_stat {
+ MMC_ERR_CMD_TIMEOUT,
+ MMC_ERR_CMD_CRC,
+ MMC_ERR_DAT_TIMEOUT,
+ MMC_ERR_DAT_CRC,
+ MMC_ERR_AUTO_CMD,
+ MMC_ERR_ADMA,
+ MMC_ERR_TUNING,
+ MMC_ERR_CMDQ_RED,
+ MMC_ERR_CMDQ_GCE,
+ MMC_ERR_CMDQ_ICCE,
+ MMC_ERR_REQ_TIMEOUT,
+ MMC_ERR_CMDQ_REQ_TIMEOUT,
+ MMC_ERR_ICE_CFG,
+ MMC_ERR_CTRL_TIMEOUT,
+ MMC_ERR_UNEXPECTED_IRQ,
+ MMC_ERR_MAX,
+};
+
struct mmc_host_ops {
/*
* It is optional for the host to implement pre_req and post_req in
@@ -153,7 +218,7 @@ struct mmc_host_ops {
int (*start_signal_voltage_switch)(struct mmc_host *host, struct mmc_ios *ios);
- /* Check if the card is pulling dat[0:3] low */
+ /* Check if the card is pulling dat[0] low */
int (*card_busy)(struct mmc_host *host);
/* The tuning command opcode value is different for SD and eMMC cards */
@@ -162,6 +227,15 @@ struct mmc_host_ops {
/* Prepare HS400 target operating frequency depending host driver */
int (*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios);
+ /* Execute HS400 tuning depending host driver */
+ int (*execute_hs400_tuning)(struct mmc_host *host, struct mmc_card *card);
+
+ /* Optional callback to prepare for SD high-speed tuning */
+ int (*prepare_sd_hs_tuning)(struct mmc_host *host, struct mmc_card *card);
+
+ /* Optional callback to execute SD high-speed tuning */
+ int (*execute_sd_hs_tuning)(struct mmc_host *host, struct mmc_card *card);
+
/* Prepare switch to DDR during the HS400 init sequence */
int (*hs400_prepare_ddr)(struct mmc_host *host);
@@ -178,7 +252,7 @@ struct mmc_host_ops {
unsigned int max_dtr, int host_drv,
int card_drv, int *drv_type);
/* Reset the eMMC card via RST_n */
- void (*hw_reset)(struct mmc_host *host);
+ void (*card_hw_reset)(struct mmc_host *host);
void (*card_event)(struct mmc_host *host);
/*
@@ -190,6 +264,14 @@ struct mmc_host_ops {
/* Initialize an SD express card, mandatory for MMC_CAP2_SD_EXP. */
int (*init_sd_express)(struct mmc_host *host, struct mmc_ios *ios);
+
+ /*
+ * The uhs2_control callback is used to execute SD UHS-II specific
+ * operations. It's mandatory to implement for hosts that supports the
+ * SD UHS-II interface (MMC_CAP2_SD_UHS2). Expected return values are a
+ * negative errno in case of a failure or zero for success.
+ */
+ int (*uhs2_control)(struct mmc_host *host, enum sd_uhs2_operation op);
};
struct mmc_cqe_ops {
@@ -236,16 +318,6 @@ struct mmc_cqe_ops {
void (*cqe_recovery_finish)(struct mmc_host *host);
};
-struct mmc_async_req {
- /* active mmc request */
- struct mmc_request *mrq;
- /*
- * Check error status of completed mmc request.
- * Returns 0 if success otherwise non zero.
- */
- enum mmc_blk_status (*err_check)(struct mmc_card *, struct mmc_async_req *);
-};
-
/**
* struct mmc_slot - MMC slot functions
*
@@ -263,26 +335,17 @@ struct mmc_slot {
void *handler_priv;
};
-/**
- * mmc_context_info - synchronization details for mmc context
- * @is_done_rcv wake up reason was done request
- * @is_new_req wake up reason was new request
- * @is_waiting_last_req mmc context waiting for single running request
- * @wait wait queue
- */
-struct mmc_context_info {
- bool is_done_rcv;
- bool is_new_req;
- bool is_waiting_last_req;
- wait_queue_head_t wait;
-};
-
struct regulator;
struct mmc_pwrseq;
+struct notifier_block;
struct mmc_supply {
struct regulator *vmmc; /* Card power supply */
struct regulator *vqmmc; /* Optional Vccq supply */
+ struct regulator *vqmmc2; /* Optional supply for phy */
+
+ struct notifier_block vmmc_nb; /* Notifier for vmmc */
+ struct work_struct uv_work; /* Undervoltage work */
};
struct mmc_ctx {
@@ -374,6 +437,7 @@ struct mmc_host {
MMC_CAP2_HS200_1_2V_SDR)
#define MMC_CAP2_SD_EXP (1 << 7) /* SD express via PCIe */
#define MMC_CAP2_SD_EXP_1_2V (1 << 8) /* SD express 1.2V */
+#define MMC_CAP2_SD_UHS2 (1 << 9) /* SD UHS-II support */
#define MMC_CAP2_CD_ACTIVE_HIGH (1 << 10) /* Card-detect signal active high */
#define MMC_CAP2_RO_ACTIVE_HIGH (1 << 11) /* Write-protect signal active high */
#define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14) /* Don't power up before scan */
@@ -398,14 +462,19 @@ struct mmc_host {
#else
#define MMC_CAP2_CRYPTO 0
#endif
+#define MMC_CAP2_ALT_GPT_TEGRA (1 << 28) /* Host with eMMC that has GPT entry at a non-standard location */
+
+ bool uhs2_sd_tran; /* UHS-II flag for SD_TRAN state */
+ bool uhs2_app_cmd; /* UHS-II flag for APP command */
+ struct sd_uhs2_caps uhs2_caps; /* Host UHS-II capabilities */
int fixed_drv_type; /* fixed driver type for non-removable media */
mmc_pm_flag_t pm_caps; /* supported pm features */
/* host specific block data */
- unsigned int max_seg_size; /* see blk_queue_max_segment_size */
- unsigned short max_segs; /* see blk_queue_max_segments */
+ unsigned int max_seg_size; /* lim->max_segment_size */
+ unsigned short max_segs; /* lim->max_segments */
unsigned short unused;
unsigned int max_req_size; /* maximum number of bytes in one req */
unsigned int max_blk_size; /* maximum size of one mmc block */
@@ -427,6 +496,14 @@ struct mmc_host {
unsigned int retune_paused:1; /* re-tuning is temporarily disabled */
unsigned int retune_crc_disable:1; /* don't trigger retune upon crc */
unsigned int can_dma_map_merge:1; /* merging can be used */
+ unsigned int vqmmc_enabled:1; /* vqmmc regulator is enabled */
+
+ /*
+ * Indicates if an undervoltage event has already been handled.
+ * This prevents repeated regulator notifiers from triggering
+ * multiple REGULATOR_EVENT_UNDER_VOLTAGE events.
+ */
+ unsigned int undervoltage:1; /* Undervoltage state */
int rescan_disable; /* disable card detection */
int rescan_entered; /* used with nonremovable devices */
@@ -453,7 +530,7 @@ struct mmc_host {
unsigned int sdio_irqs;
struct task_struct *sdio_irq_thread;
- struct delayed_work sdio_irq_work;
+ struct work_struct sdio_irq_work;
bool sdio_irq_pending;
atomic_t sdio_irq_thread_abort;
@@ -491,22 +568,26 @@ struct mmc_host {
/* Inline encryption support */
#ifdef CONFIG_MMC_CRYPTO
- struct blk_keyslot_manager ksm;
+ struct blk_crypto_profile crypto_profile;
#endif
/* Host Software Queue support */
bool hsq_enabled;
+ int hsq_depth;
+ u32 err_stats[MMC_ERR_MAX];
+ u32 max_sd_hs_hz;
unsigned long private[] ____cacheline_aligned;
};
struct device_node;
struct mmc_host *mmc_alloc_host(int extra, struct device *);
+struct mmc_host *devm_mmc_alloc_host(struct device *dev, int extra);
int mmc_add_host(struct mmc_host *);
void mmc_remove_host(struct mmc_host *);
void mmc_free_host(struct mmc_host *);
-void mmc_of_parse_clk_phase(struct mmc_host *host,
+void mmc_of_parse_clk_phase(struct device *dev,
struct mmc_clk_phase_map *map);
int mmc_of_parse(struct mmc_host *host);
int mmc_of_parse_voltage(struct mmc_host *host, u32 *mask);
@@ -521,6 +602,14 @@ static inline struct mmc_host *mmc_from_priv(void *priv)
return container_of(priv, struct mmc_host, private);
}
+#ifdef CONFIG_MMC_CRYPTO
+static inline struct mmc_host *
+mmc_from_crypto_profile(struct blk_crypto_profile *profile)
+{
+ return container_of(profile, struct mmc_host, crypto_profile);
+}
+#endif
+
#define mmc_host_is_spi(host) ((host)->caps & MMC_CAP_SPI)
#define mmc_dev(x) ((x)->parent)
@@ -557,6 +646,7 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc,
struct regulator *supply,
unsigned short vdd_bit);
int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios);
+int mmc_regulator_set_vqmmc2(struct mmc_host *mmc, struct mmc_ios *ios);
#else
static inline int mmc_regulator_set_ocr(struct mmc_host *mmc,
struct regulator *supply,
@@ -570,9 +660,17 @@ static inline int mmc_regulator_set_vqmmc(struct mmc_host *mmc,
{
return -EINVAL;
}
+
+static inline int mmc_regulator_set_vqmmc2(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ return -EINVAL;
+}
#endif
int mmc_regulator_get_supply(struct mmc_host *mmc);
+int mmc_regulator_enable_vqmmc(struct mmc_host *mmc);
+void mmc_regulator_disable_vqmmc(struct mmc_host *mmc);
static inline int mmc_card_is_removable(struct mmc_host *host)
{
@@ -603,6 +701,14 @@ static inline int mmc_card_uhs(struct mmc_card *card)
card->host->ios.timing <= MMC_TIMING_UHS_DDR50;
}
+static inline bool mmc_card_uhs2(struct mmc_host *host)
+{
+ return host->ios.timing == MMC_TIMING_UHS2_SPEED_A ||
+ host->ios.timing == MMC_TIMING_UHS2_SPEED_A_HD ||
+ host->ios.timing == MMC_TIMING_UHS2_SPEED_B ||
+ host->ios.timing == MMC_TIMING_UHS2_SPEED_B_HD;
+}
+
void mmc_retune_timer_stop(struct mmc_host *host);
static inline void mmc_retune_needed(struct mmc_host *host)
@@ -631,7 +737,24 @@ static inline enum dma_data_direction mmc_get_dma_dir(struct mmc_data *data)
return data->flags & MMC_DATA_WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
}
+static inline void mmc_debugfs_err_stats_inc(struct mmc_host *host,
+ enum mmc_err_stat stat)
+{
+ host->err_stats[stat] += 1;
+}
+
+static inline int mmc_card_uhs2_hd_mode(struct mmc_host *host)
+{
+ return host->ios.timing == MMC_TIMING_UHS2_SPEED_A_HD ||
+ host->ios.timing == MMC_TIMING_UHS2_SPEED_B_HD;
+}
+
+int mmc_sd_switch(struct mmc_card *card, bool mode, int group,
+ u8 value, u8 *resp);
+int mmc_send_status(struct mmc_card *card, u32 *status);
int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
-int mmc_abort_tuning(struct mmc_host *host, u32 opcode);
+int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode);
+int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
+int mmc_read_tuning(struct mmc_host *host, unsigned int blksz, unsigned int blocks);
#endif /* LINUX_MMC_HOST_H */
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index d9a65c6a8816..cf2bcb5da30d 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -99,6 +99,12 @@ static inline bool mmc_op_multi(u32 opcode)
opcode == MMC_READ_MULTIPLE_BLOCK;
}
+static inline bool mmc_op_tuning(u32 opcode)
+{
+ return opcode == MMC_SEND_TUNING_BLOCK ||
+ opcode == MMC_SEND_TUNING_BLOCK_HS200;
+}
+
/*
* MMC_SWITCH argument format:
*
@@ -251,8 +257,6 @@ static inline bool mmc_ready_for_data(u32 status)
#define EXT_CSD_FLUSH_CACHE 32 /* W */
#define EXT_CSD_CACHE_CTRL 33 /* R/W */
#define EXT_CSD_POWER_OFF_NOTIFICATION 34 /* R/W */
-#define EXT_CSD_PACKED_FAILURE_INDEX 35 /* RO */
-#define EXT_CSD_PACKED_CMD_STATUS 36 /* RO */
#define EXT_CSD_EXP_EVENTS_STATUS 54 /* RO, 2 bytes */
#define EXT_CSD_EXP_EVENTS_CTRL 56 /* R/W, 2 bytes */
#define EXT_CSD_DATA_SECTOR_SIZE 61 /* R */
@@ -315,8 +319,6 @@ static inline bool mmc_ready_for_data(u32 status)
#define EXT_CSD_SUPPORTED_MODE 493 /* RO */
#define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */
#define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */
-#define EXT_CSD_MAX_PACKED_WRITES 500 /* RO */
-#define EXT_CSD_MAX_PACKED_READS 501 /* RO */
#define EXT_CSD_BKOPS_SUPPORT 502 /* RO */
#define EXT_CSD_HPI_FEATURES 503 /* RO */
@@ -396,18 +398,12 @@ static inline bool mmc_ready_for_data(u32 status)
#define EXT_CSD_PWR_CL_8BIT_SHIFT 4
#define EXT_CSD_PWR_CL_4BIT_SHIFT 0
-#define EXT_CSD_PACKED_EVENT_EN BIT(3)
-
/*
* EXCEPTION_EVENT_STATUS field
*/
#define EXT_CSD_URGENT_BKOPS BIT(0)
#define EXT_CSD_DYNCAP_NEEDED BIT(1)
#define EXT_CSD_SYSPOOL_EXHAUSTED BIT(2)
-#define EXT_CSD_PACKED_FAILURE BIT(3)
-
-#define EXT_CSD_PACKED_GENERIC_ERROR BIT(0)
-#define EXT_CSD_PACKED_INDEXED_ERROR BIT(1)
/*
* BKOPS status level
@@ -445,7 +441,7 @@ static inline bool mmc_ready_for_data(u32 status)
#define MMC_SECURE_TRIM1_ARG 0x80000001
#define MMC_SECURE_TRIM2_ARG 0x80008000
#define MMC_SECURE_ARGS 0x80000000
-#define MMC_TRIM_ARGS 0x00008001
+#define MMC_TRIM_OR_DISCARD_ARGS 0x00008003
#define mmc_driver_type_mask(n) (1 << (n))
diff --git a/include/linux/mmc/sd.h b/include/linux/mmc/sd.h
index 2236aa540faa..af5fc70e09a2 100644
--- a/include/linux/mmc/sd.h
+++ b/include/linux/mmc/sd.h
@@ -15,6 +15,9 @@
#define SD_SEND_IF_COND 8 /* bcr [11:0] See below R7 */
#define SD_SWITCH_VOLTAGE 11 /* ac R1 */
+/* Class 2 */
+#define SD_ADDR_EXT 22 /* ac [5:0] R1 */
+
/* class 10 */
#define SD_SWITCH 6 /* adtc [31:0] See below R1 */
@@ -29,9 +32,14 @@
#define SD_APP_OP_COND 41 /* bcr [31:0] OCR R3 */
#define SD_APP_SEND_SCR 51 /* adtc R1 */
+ /* class 11 */
+#define SD_READ_EXTR_SINGLE 48 /* adtc [31:0] R1 */
+#define SD_WRITE_EXTR_SINGLE 49 /* adtc [31:0] R1 */
+
/* OCR bit definitions */
#define SD_OCR_S18R (1 << 24) /* 1.8V switching request */
#define SD_ROCR_S18A SD_OCR_S18R /* 1.8V switching accepted by card */
+#define SD_OCR_2T (1 << 27) /* HO2T/CO2T - SDUC support */
#define SD_OCR_XPC (1 << 28) /* SDXC power control */
#define SD_OCR_CCS (1 << 30) /* Card Capacity Status */
diff --git a/include/linux/mmc/sd_uhs2.h b/include/linux/mmc/sd_uhs2.h
new file mode 100644
index 000000000000..7abe9bd870c7
--- /dev/null
+++ b/include/linux/mmc/sd_uhs2.h
@@ -0,0 +1,240 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Header file for UHS-II packets, Host Controller registers and I/O
+ * accessors.
+ *
+ * Copyright (C) 2014 Intel Corp, All Rights Reserved.
+ */
+#ifndef LINUX_MMC_UHS2_H
+#define LINUX_MMC_UHS2_H
+
+/* LINK Layer definition */
+/*
+ * UHS2 Header:
+ * Refer to UHS-II Addendum Version 1.02 Figure 5-2, the format of CCMD Header is described below:
+ * bit [3:0] : DID(Destination ID = Node ID of UHS2 card)
+ * bit [6:4] : TYP(Packet Type)
+ * 000b: CCMD(Control command packet)
+ * 001b: DCMD(Data command packet)
+ * 010b: RES(Response packet)
+ * 011b: DATA(Data payload packet)
+ * 111b: MSG(Message packet)
+ * Others: Reserved
+ * bit [7] : NP(Native Packet)
+ * bit [10:8] : TID(Transaction ID)
+ * bit [11] : Reserved
+ * bit [15:12]: SID(Source ID 0: Node ID of Host)
+ *
+ * Broadcast CCMD issued by Host is represented as DID=SID=0.
+ */
+/*
+ * UHS2 Argument:
+ * Refer to UHS-II Addendum Version 1.02 Figure 6-5, the format of CCMD Argument is described below:
+ * bit [3:0] : MSB of IOADR
+ * bit [5:4] : PLEN(Payload Length)
+ * 00b: 0 byte
+ * 01b: 4 bytes
+ * 10b: 8 bytes
+ * 11b: 16 bytes
+ * bit [6] : Reserved
+ * bit [7] : R/W(Read/Write)
+ * 0: Control read command
+ * 1: Control write command
+ * bit [15:8] : LSB of IOADR
+ *
+ * I/O Address specifies the address of register in UHS-II I/O space accessed by CCMD.
+ * The unit of I/O Address is 4 Bytes. It is transmitted in MSB first, LSB last.
+ */
+#define UHS2_NATIVE_PACKET_POS 7
+#define UHS2_NATIVE_PACKET (1 << UHS2_NATIVE_PACKET_POS)
+
+#define UHS2_PACKET_TYPE_POS 4
+#define UHS2_PACKET_TYPE_CCMD (0 << UHS2_PACKET_TYPE_POS)
+#define UHS2_PACKET_TYPE_DCMD (1 << UHS2_PACKET_TYPE_POS)
+#define UHS2_PACKET_TYPE_RES (2 << UHS2_PACKET_TYPE_POS)
+#define UHS2_PACKET_TYPE_DATA (3 << UHS2_PACKET_TYPE_POS)
+#define UHS2_PACKET_TYPE_MSG (7 << UHS2_PACKET_TYPE_POS)
+
+#define UHS2_DEST_ID_MASK 0x0F
+#define UHS2_DEST_ID 0x1
+
+#define UHS2_SRC_ID_POS 12
+#define UHS2_SRC_ID_MASK 0xF000
+
+#define UHS2_TRANS_ID_POS 8
+#define UHS2_TRANS_ID_MASK 0x0700
+
+/* UHS2 MSG */
+#define UHS2_MSG_CTG_POS 5
+#define UHS2_MSG_CTG_LMSG 0x00
+#define UHS2_MSG_CTG_INT 0x60
+#define UHS2_MSG_CTG_AMSG 0x80
+
+#define UHS2_MSG_CTG_FCREQ 0x00
+#define UHS2_MSG_CTG_FCRDY 0x01
+#define UHS2_MSG_CTG_STAT 0x02
+
+#define UHS2_MSG_CODE_POS 8
+#define UHS2_MSG_CODE_FC_UNRECOVER_ERR 0x8
+#define UHS2_MSG_CODE_STAT_UNRECOVER_ERR 0x8
+#define UHS2_MSG_CODE_STAT_RECOVER_ERR 0x1
+
+/* TRANS Layer definition */
+
+/* Native packets*/
+#define UHS2_NATIVE_CMD_RW_POS 7
+#define UHS2_NATIVE_CMD_WRITE (1 << UHS2_NATIVE_CMD_RW_POS)
+#define UHS2_NATIVE_CMD_READ (0 << UHS2_NATIVE_CMD_RW_POS)
+
+#define UHS2_NATIVE_CMD_PLEN_POS 4
+#define UHS2_NATIVE_CMD_PLEN_4B (1 << UHS2_NATIVE_CMD_PLEN_POS)
+#define UHS2_NATIVE_CMD_PLEN_8B (2 << UHS2_NATIVE_CMD_PLEN_POS)
+#define UHS2_NATIVE_CMD_PLEN_16B (3 << UHS2_NATIVE_CMD_PLEN_POS)
+
+#define UHS2_NATIVE_CCMD_GET_MIOADR_MASK 0xF00
+#define UHS2_NATIVE_CCMD_MIOADR_MASK 0x0F
+
+#define UHS2_NATIVE_CCMD_LIOADR_POS 8
+#define UHS2_NATIVE_CCMD_GET_LIOADR_MASK 0x0FF
+
+#define UHS2_CCMD_DEV_INIT_COMPLETE_FLAG BIT(11)
+#define UHS2_DEV_INIT_PAYLOAD_LEN 1
+#define UHS2_DEV_INIT_RESP_LEN 6
+#define UHS2_DEV_ENUM_PAYLOAD_LEN 1
+#define UHS2_DEV_ENUM_RESP_LEN 8
+#define UHS2_CFG_WRITE_PAYLOAD_LEN 2
+#define UHS2_CFG_WRITE_PHY_SET_RESP_LEN 4
+#define UHS2_CFG_WRITE_GENERIC_SET_RESP_LEN 5
+#define UHS2_GO_DORMANT_PAYLOAD_LEN 1
+
+/*
+ * UHS2 Argument:
+ * Refer to UHS-II Addendum Version 1.02 Figure 6-8, the format of DCMD Argument is described below:
+ * bit [3:0] : Reserved
+ * bit [6:3] : TMODE(Transfer Mode)
+ * bit 3: DAM(Data Access Mode)
+ * bit 4: TLUM(TLEN Unit Mode)
+ * bit 5: LM(Length Mode)
+ * bit 6: DM(Duplex Mode)
+ * bit [7] : R/W(Read/Write)
+ * 0: Control read command
+ * 1: Control write command
+ * bit [15:8] : Reserved
+ *
+ * I/O Address specifies the address of register in UHS-II I/O space accessed by CCMD.
+ * The unit of I/O Address is 4 Bytes. It is transmitted in MSB first, LSB last.
+ */
+#define UHS2_DCMD_DM_POS 6
+#define UHS2_DCMD_2L_HD_MODE (1 << UHS2_DCMD_DM_POS)
+#define UHS2_DCMD_LM_POS 5
+#define UHS2_DCMD_LM_TLEN_EXIST (1 << UHS2_DCMD_LM_POS)
+#define UHS2_DCMD_TLUM_POS 4
+#define UHS2_DCMD_TLUM_BYTE_MODE (1 << UHS2_DCMD_TLUM_POS)
+#define UHS2_NATIVE_DCMD_DAM_POS 3
+#define UHS2_NATIVE_DCMD_DAM_IO (1 << UHS2_NATIVE_DCMD_DAM_POS)
+
+#define UHS2_RES_NACK_POS 7
+#define UHS2_RES_NACK_MASK (0x1 << UHS2_RES_NACK_POS)
+
+#define UHS2_RES_ECODE_POS 4
+#define UHS2_RES_ECODE_MASK 0x7
+#define UHS2_RES_ECODE_COND 1
+#define UHS2_RES_ECODE_ARG 2
+#define UHS2_RES_ECODE_GEN 3
+
+/* IOADR of device registers */
+#define UHS2_IOADR_GENERIC_CAPS 0x00
+#define UHS2_IOADR_PHY_CAPS 0x02
+#define UHS2_IOADR_LINK_CAPS 0x04
+#define UHS2_IOADR_RSV_CAPS 0x06
+#define UHS2_IOADR_GENERIC_SETTINGS 0x08
+#define UHS2_IOADR_PHY_SETTINGS 0x0A
+#define UHS2_IOADR_LINK_SETTINGS 0x0C
+#define UHS2_IOADR_PRESET 0x40
+
+/* SD application packets */
+#define UHS2_SD_CMD_INDEX_POS 8
+
+#define UHS2_SD_CMD_APP_POS 14
+#define UHS2_SD_CMD_APP (1 << UHS2_SD_CMD_APP_POS)
+
+/* UHS-II Device Registers */
+#define UHS2_DEV_CONFIG_REG 0x000
+
+/* General Caps and Settings registers */
+#define UHS2_DEV_CONFIG_GEN_CAPS (UHS2_DEV_CONFIG_REG + 0x000)
+#define UHS2_DEV_CONFIG_N_LANES_POS 8
+#define UHS2_DEV_CONFIG_N_LANES_MASK 0x3F
+#define UHS2_DEV_CONFIG_2L_HD_FD 0x1
+#define UHS2_DEV_CONFIG_2D1U_FD 0x2
+#define UHS2_DEV_CONFIG_1D2U_FD 0x4
+#define UHS2_DEV_CONFIG_2D2U_FD 0x8
+#define UHS2_DEV_CONFIG_DADR_POS 14
+#define UHS2_DEV_CONFIG_DADR_MASK 0x1
+#define UHS2_DEV_CONFIG_APP_POS 16
+#define UHS2_DEV_CONFIG_APP_MASK 0xFF
+#define UHS2_DEV_CONFIG_APP_SD_MEM 0x1
+
+#define UHS2_DEV_CONFIG_GEN_SET (UHS2_DEV_CONFIG_REG + 0x008)
+#define UHS2_DEV_CONFIG_GEN_SET_N_LANES_POS 8
+#define UHS2_DEV_CONFIG_GEN_SET_2L_FD_HD 0x0
+#define UHS2_DEV_CONFIG_GEN_SET_2D1U_FD 0x2
+#define UHS2_DEV_CONFIG_GEN_SET_1D2U_FD 0x3
+#define UHS2_DEV_CONFIG_GEN_SET_2D2U_FD 0x4
+#define UHS2_DEV_CONFIG_GEN_SET_CFG_COMPLETE BIT(31)
+
+/* PHY Caps and Settings registers */
+#define UHS2_DEV_CONFIG_PHY_CAPS (UHS2_DEV_CONFIG_REG + 0x002)
+#define UHS2_DEV_CONFIG_PHY_MINOR_MASK 0xF
+#define UHS2_DEV_CONFIG_PHY_MAJOR_POS 4
+#define UHS2_DEV_CONFIG_PHY_MAJOR_MASK 0x3
+#define UHS2_DEV_CONFIG_CAN_HIBER_POS 15
+#define UHS2_DEV_CONFIG_CAN_HIBER_MASK 0x1
+#define UHS2_DEV_CONFIG_PHY_CAPS1 (UHS2_DEV_CONFIG_REG + 0x003)
+#define UHS2_DEV_CONFIG_N_LSS_SYN_MASK 0xF
+#define UHS2_DEV_CONFIG_N_LSS_DIR_POS 4
+#define UHS2_DEV_CONFIG_N_LSS_DIR_MASK 0xF
+
+#define UHS2_DEV_CONFIG_PHY_SET (UHS2_DEV_CONFIG_REG + 0x00A)
+#define UHS2_DEV_CONFIG_PHY_SET_SPEED_POS 6
+#define UHS2_DEV_CONFIG_PHY_SET_SPEED_A 0x0
+#define UHS2_DEV_CONFIG_PHY_SET_SPEED_B 0x1
+
+/* LINK-TRAN Caps and Settings registers */
+#define UHS2_DEV_CONFIG_LINK_TRAN_CAPS (UHS2_DEV_CONFIG_REG + 0x004)
+#define UHS2_DEV_CONFIG_LT_MINOR_MASK 0xF
+#define UHS2_DEV_CONFIG_LT_MAJOR_POS 4
+#define UHS2_DEV_CONFIG_LT_MAJOR_MASK 0x3
+#define UHS2_DEV_CONFIG_N_FCU_POS 8
+#define UHS2_DEV_CONFIG_N_FCU_MASK 0xFF
+#define UHS2_DEV_CONFIG_DEV_TYPE_POS 16
+#define UHS2_DEV_CONFIG_DEV_TYPE_MASK 0x7
+#define UHS2_DEV_CONFIG_MAX_BLK_LEN_POS 20
+#define UHS2_DEV_CONFIG_MAX_BLK_LEN_MASK 0xFFF
+#define UHS2_DEV_CONFIG_LINK_TRAN_CAPS1 (UHS2_DEV_CONFIG_REG + 0x005)
+#define UHS2_DEV_CONFIG_N_DATA_GAP_MASK 0xFF
+
+#define UHS2_DEV_CONFIG_LINK_TRAN_SET (UHS2_DEV_CONFIG_REG + 0x00C)
+#define UHS2_DEV_CONFIG_LT_SET_MAX_BLK_LEN 0x200
+#define UHS2_DEV_CONFIG_LT_SET_MAX_RETRY_POS 16
+
+/* Preset register */
+#define UHS2_DEV_CONFIG_PRESET (UHS2_DEV_CONFIG_REG + 0x040)
+
+#define UHS2_DEV_INT_REG 0x100
+
+#define UHS2_DEV_STATUS_REG 0x180
+
+#define UHS2_DEV_CMD_REG 0x200
+#define UHS2_DEV_CMD_FULL_RESET (UHS2_DEV_CMD_REG + 0x000)
+#define UHS2_DEV_CMD_GO_DORMANT_STATE (UHS2_DEV_CMD_REG + 0x001)
+#define UHS2_DEV_CMD_DORMANT_HIBER BIT(7)
+#define UHS2_DEV_CMD_DEVICE_INIT (UHS2_DEV_CMD_REG + 0x002)
+#define UHS2_DEV_INIT_COMPLETE_FLAG BIT(11)
+#define UHS2_DEV_CMD_ENUMERATE (UHS2_DEV_CMD_REG + 0x003)
+#define UHS2_DEV_CMD_TRANS_ABORT (UHS2_DEV_CMD_REG + 0x004)
+
+#define UHS2_RCLK_MAX 52000000
+#define UHS2_RCLK_MIN 26000000
+
+#endif /* LINUX_MMC_UHS2_H */
diff --git a/include/linux/mmc/sdhci-pci-data.h b/include/linux/mmc/sdhci-pci-data.h
deleted file mode 100644
index 1d42872d22f3..000000000000
--- a/include/linux/mmc/sdhci-pci-data.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef LINUX_MMC_SDHCI_PCI_DATA_H
-#define LINUX_MMC_SDHCI_PCI_DATA_H
-
-struct pci_dev;
-
-struct sdhci_pci_data {
- struct pci_dev *pdev;
- int slotno;
- int rst_n_gpio; /* Set to -EINVAL if unused */
- int cd_gpio; /* Set to -EINVAL if unused */
- int (*setup)(struct sdhci_pci_data *data);
- void (*cleanup)(struct sdhci_pci_data *data);
-};
-
-extern struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev,
- int slotno);
-#endif
diff --git a/include/linux/mmc/sdio.h b/include/linux/mmc/sdio.h
index 2a05d1ac4f0e..1ef400f28642 100644
--- a/include/linux/mmc/sdio.h
+++ b/include/linux/mmc/sdio.h
@@ -159,6 +159,11 @@
#define SDIO_DTSx_SET_TYPE_A (1 << SDIO_DRIVE_DTSx_SHIFT)
#define SDIO_DTSx_SET_TYPE_C (2 << SDIO_DRIVE_DTSx_SHIFT)
#define SDIO_DTSx_SET_TYPE_D (3 << SDIO_DRIVE_DTSx_SHIFT)
+
+#define SDIO_CCCR_INTERRUPT_EXT 0x16
+#define SDIO_INTERRUPT_EXT_SAI (1 << 0)
+#define SDIO_INTERRUPT_EXT_EAI (1 << 1)
+
/*
* Function Basic Registers (FBR)
*/
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
index 478855b8e406..fed1f5f4a8d3 100644
--- a/include/linux/mmc/sdio_func.h
+++ b/include/linux/mmc/sdio_func.h
@@ -106,7 +106,10 @@ struct sdio_driver {
.class = (dev_class), \
.vendor = SDIO_ANY_ID, .device = SDIO_ANY_ID
-extern int sdio_register_driver(struct sdio_driver *);
+/* use a macro to avoid include chaining to get THIS_MODULE */
+#define sdio_register_driver(drv) \
+ __sdio_register_driver(drv, THIS_MODULE)
+extern int __sdio_register_driver(struct sdio_driver *, struct module *);
extern void sdio_unregister_driver(struct sdio_driver *);
/**
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 12036619346c..673cbdf43453 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -74,7 +74,13 @@
#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962
#define SDIO_DEVICE_ID_BROADCOM_43364 0xa9a4
#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6
+#define SDIO_DEVICE_ID_BROADCOM_43439 0xa9af
#define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf
+#define SDIO_DEVICE_ID_BROADCOM_43751 0xaae7
+#define SDIO_DEVICE_ID_BROADCOM_43752 0xaae8
+
+#define SDIO_VENDOR_ID_CYPRESS 0x04b4
+#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_43439 0xbd3d
#define SDIO_VENDOR_ID_MARVELL 0x02df
#define SDIO_DEVICE_ID_MARVELL_LIBERTAS 0x9103
@@ -100,14 +106,27 @@
#define SDIO_DEVICE_ID_MARVELL_8977_BT 0x9146
#define SDIO_DEVICE_ID_MARVELL_8987_WLAN 0x9149
#define SDIO_DEVICE_ID_MARVELL_8987_BT 0x914a
+#define SDIO_DEVICE_ID_MARVELL_8978_WLAN 0x9159
#define SDIO_VENDOR_ID_MEDIATEK 0x037a
#define SDIO_DEVICE_ID_MEDIATEK_MT7663 0x7663
#define SDIO_DEVICE_ID_MEDIATEK_MT7668 0x7668
+#define SDIO_DEVICE_ID_MEDIATEK_MT7961 0x7961
#define SDIO_VENDOR_ID_MICROCHIP_WILC 0x0296
#define SDIO_DEVICE_ID_MICROCHIP_WILC1000 0x5347
+#define SDIO_VENDOR_ID_REALTEK 0x024c
+#define SDIO_DEVICE_ID_REALTEK_RTW8723BS 0xb723
+#define SDIO_DEVICE_ID_REALTEK_RTW8821BS 0xb821
+#define SDIO_DEVICE_ID_REALTEK_RTW8822BS 0xb822
+#define SDIO_DEVICE_ID_REALTEK_RTW8821CS 0xc821
+#define SDIO_DEVICE_ID_REALTEK_RTW8822CS 0xc822
+#define SDIO_DEVICE_ID_REALTEK_RTW8723DS_2ANT 0xd723
+#define SDIO_DEVICE_ID_REALTEK_RTW8723DS_1ANT 0xd724
+#define SDIO_DEVICE_ID_REALTEK_RTW8821DS 0xd821
+#define SDIO_DEVICE_ID_REALTEK_RTW8723CS 0xb703
+
#define SDIO_VENDOR_ID_SIANO 0x039a
#define SDIO_DEVICE_ID_SIANO_NOVA_B0 0x0201
#define SDIO_DEVICE_ID_SIANO_NICE 0x0202
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
index 4ae2f2908f99..23ac5696fa38 100644
--- a/include/linux/mmc/slot-gpio.h
+++ b/include/linux/mmc/slot-gpio.h
@@ -8,23 +8,23 @@
#ifndef MMC_SLOT_GPIO_H
#define MMC_SLOT_GPIO_H
+#include <linux/interrupt.h>
#include <linux/types.h>
-#include <linux/irqreturn.h>
struct mmc_host;
int mmc_gpio_get_ro(struct mmc_host *host);
int mmc_gpio_get_cd(struct mmc_host *host);
+void mmc_gpio_set_cd_irq(struct mmc_host *host, int irq);
int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
unsigned int idx, bool override_active_level,
unsigned int debounce);
int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
unsigned int idx, unsigned int debounce);
-void mmc_gpio_set_cd_isr(struct mmc_host *host,
- irqreturn_t (*isr)(int irq, void *dev_id));
+int mmc_gpiod_set_cd_config(struct mmc_host *host, unsigned long config);
int mmc_gpio_set_cd_wake(struct mmc_host *host, bool on);
void mmc_gpiod_request_cd_irq(struct mmc_host *host);
-bool mmc_can_gpio_cd(struct mmc_host *host);
-bool mmc_can_gpio_ro(struct mmc_host *host);
+bool mmc_host_can_gpio_cd(struct mmc_host *host);
+bool mmc_host_can_gpio_ro(struct mmc_host *host);
#endif
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index 5d0767cb424a..14a45979cccc 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -8,11 +8,14 @@
struct page;
struct vm_area_struct;
struct mm_struct;
+struct vma_iterator;
+struct vma_merge_struct;
-extern void dump_page(struct page *page, const char *reason);
-extern void __dump_page(struct page *page, const char *reason);
+void dump_page(const struct page *page, const char *reason);
void dump_vma(const struct vm_area_struct *vma);
void dump_mm(const struct mm_struct *mm);
+void dump_vmg(const struct vma_merge_struct *vmg, const char *reason);
+void vma_iter_dump_tree(const struct vma_iterator *vmi);
#ifdef CONFIG_DEBUG_VM
#define VM_BUG_ON(cond) BUG_ON(cond)
@@ -23,6 +26,13 @@ void dump_mm(const struct mm_struct *mm);
BUG(); \
} \
} while (0)
+#define VM_BUG_ON_FOLIO(cond, folio) \
+ do { \
+ if (unlikely(cond)) { \
+ dump_page(&folio->page, "VM_BUG_ON_FOLIO(" __stringify(cond)")");\
+ BUG(); \
+ } \
+ } while (0)
#define VM_BUG_ON_VMA(cond, vma) \
do { \
if (unlikely(cond)) { \
@@ -38,7 +48,7 @@ void dump_mm(const struct mm_struct *mm);
} \
} while (0)
#define VM_WARN_ON_ONCE_PAGE(cond, page) ({ \
- static bool __section(".data.once") __warned; \
+ static bool __section(".data..once") __warned; \
int __ret_warn_once = !!(cond); \
\
if (unlikely(__ret_warn_once && !__warned)) { \
@@ -48,6 +58,57 @@ void dump_mm(const struct mm_struct *mm);
} \
unlikely(__ret_warn_once); \
})
+#define VM_WARN_ON_FOLIO(cond, folio) ({ \
+ int __ret_warn = !!(cond); \
+ \
+ if (unlikely(__ret_warn)) { \
+ dump_page(&folio->page, "VM_WARN_ON_FOLIO(" __stringify(cond)")");\
+ WARN_ON(1); \
+ } \
+ unlikely(__ret_warn); \
+})
+#define VM_WARN_ON_ONCE_FOLIO(cond, folio) ({ \
+ static bool __section(".data..once") __warned; \
+ int __ret_warn_once = !!(cond); \
+ \
+ if (unlikely(__ret_warn_once && !__warned)) { \
+ dump_page(&folio->page, "VM_WARN_ON_ONCE_FOLIO(" __stringify(cond)")");\
+ __warned = true; \
+ WARN_ON(1); \
+ } \
+ unlikely(__ret_warn_once); \
+})
+#define VM_WARN_ON_ONCE_MM(cond, mm) ({ \
+ static bool __section(".data..once") __warned; \
+ int __ret_warn_once = !!(cond); \
+ \
+ if (unlikely(__ret_warn_once && !__warned)) { \
+ dump_mm(mm); \
+ __warned = true; \
+ WARN_ON(1); \
+ } \
+ unlikely(__ret_warn_once); \
+})
+#define VM_WARN_ON_ONCE_VMA(cond, vma) ({ \
+ static bool __section(".data..once") __warned; \
+ int __ret_warn_once = !!(cond); \
+ \
+ if (unlikely(__ret_warn_once && !__warned)) { \
+ dump_vma(vma); \
+ __warned = true; \
+ WARN_ON(1); \
+ } \
+ unlikely(__ret_warn_once); \
+})
+#define VM_WARN_ON_VMG(cond, vmg) ({ \
+ int __ret_warn = !!(cond); \
+ \
+ if (unlikely(__ret_warn)) { \
+ dump_vmg(vmg, "VM_WARN_ON_VMG(" __stringify(cond)")"); \
+ WARN_ON(1); \
+ } \
+ unlikely(__ret_warn); \
+})
#define VM_WARN_ON(cond) (void)WARN_ON(cond)
#define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond)
@@ -56,13 +117,25 @@ void dump_mm(const struct mm_struct *mm);
#else
#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond)
#define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond)
+#define VM_BUG_ON_FOLIO(cond, folio) VM_BUG_ON(cond)
#define VM_BUG_ON_VMA(cond, vma) VM_BUG_ON(cond)
#define VM_BUG_ON_MM(cond, mm) VM_BUG_ON(cond)
#define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_ONCE_PAGE(cond, page) BUILD_BUG_ON_INVALID(cond)
+#define VM_WARN_ON_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond)
+#define VM_WARN_ON_ONCE_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond)
+#define VM_WARN_ON_ONCE_MM(cond, mm) BUILD_BUG_ON_INVALID(cond)
+#define VM_WARN_ON_ONCE_VMA(cond, vma) BUILD_BUG_ON_INVALID(cond)
+#define VM_WARN_ON_VMG(cond, vmg) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond)
+#endif /* CONFIG_DEBUG_VM */
+
+#ifdef CONFIG_DEBUG_VM_IRQSOFF
+#define VM_WARN_ON_IRQS_ENABLED() WARN_ON_ONCE(!irqs_disabled())
+#else
+#define VM_WARN_ON_IRQS_ENABLED() do { } while (0)
#endif
#ifdef CONFIG_DEBUG_VIRTUAL
diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h
index 03dee12d2b61..ac01dc4eb2ce 100644
--- a/include/linux/mmu_context.h
+++ b/include/linux/mmu_context.h
@@ -11,7 +11,36 @@
#endif
#ifndef leave_mm
-static inline void leave_mm(int cpu) { }
+static inline void leave_mm(void) { }
+#endif
+
+/*
+ * CPUs that are capable of running user task @p. Must contain at least one
+ * active CPU. It is assumed that the kernel can run on all CPUs, so calling
+ * this for a kernel thread is pointless.
+ *
+ * By default, we assume a sane, homogeneous system.
+ */
+#ifndef task_cpu_possible_mask
+# define task_cpu_possible_mask(p) cpu_possible_mask
+# define task_cpu_possible(cpu, p) true
+# define task_cpu_fallback_mask(p) housekeeping_cpumask(HK_TYPE_TICK)
+#else
+# define task_cpu_possible(cpu, p) cpumask_test_cpu((cpu), task_cpu_possible_mask(p))
+#endif
+
+#ifndef mm_untag_mask
+static inline unsigned long mm_untag_mask(struct mm_struct *mm)
+{
+ return -1UL;
+}
+#endif
+
+#ifndef arch_pgtable_dma_compat
+static inline bool arch_pgtable_dma_compat(struct mm_struct *mm)
+{
+ return true;
+}
#endif
#endif
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 1a6a9eb6d3fa..d1094c2d5fb6 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -33,7 +33,7 @@ struct mmu_interval_notifier;
*
* @MMU_NOTIFY_SOFT_DIRTY: soft dirty accounting (still same page and same
* access flags). User should soft dirty the page in the end callback to make
- * sure that anyone relying on soft dirtyness catch pages that might be written
+ * sure that anyone relying on soft dirtiness catch pages that might be written
* through non CPU mappings.
*
* @MMU_NOTIFY_RELEASE: used during mmu_interval_notifier invalidate to signal
@@ -41,7 +41,12 @@ struct mmu_interval_notifier;
*
* @MMU_NOTIFY_MIGRATE: used during migrate_vma_collect() invalidate to signal
* a device driver to possibly ignore the invalidation if the
- * migrate_pgmap_owner field matches the driver's device private pgmap owner.
+ * owner field matches the driver's device private pgmap owner.
+ *
+ * @MMU_NOTIFY_EXCLUSIVE: conversion of a page table entry to device-exclusive.
+ * The owner is initialized to the value provided by the caller of
+ * make_device_exclusive(), such that this caller can filter out these
+ * events.
*/
enum mmu_notifier_event {
MMU_NOTIFY_UNMAP = 0,
@@ -51,6 +56,7 @@ enum mmu_notifier_event {
MMU_NOTIFY_SOFT_DIRTY,
MMU_NOTIFY_RELEASE,
MMU_NOTIFY_MIGRATE,
+ MMU_NOTIFY_EXCLUSIVE,
};
#define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
@@ -117,15 +123,6 @@ struct mmu_notifier_ops {
unsigned long address);
/*
- * change_pte is called in cases that pte mapping to page is changed:
- * for example, when ksm remaps pte to point to a new shared page.
- */
- void (*change_pte)(struct mmu_notifier *subscription,
- struct mm_struct *mm,
- unsigned long address,
- pte_t pte);
-
- /*
* invalidate_range_start() and invalidate_range_end() must be
* paired and are called only when the mmap_lock and/or the
* locks protecting the reverse maps are held. If the subsystem
@@ -161,7 +158,7 @@ struct mmu_notifier_ops {
* decrease the refcount. If the refcount is decreased on
* invalidate_range_start() then the VM can free pages as page
* table entries are removed. If the refcount is only
- * droppped on invalidate_range_end() then the driver itself
+ * dropped on invalidate_range_end() then the driver itself
* will drop the last refcount but it must take care to flush
* any secondary tlb before doing the final free on the
* page. Pages will no longer be referenced by the linux
@@ -181,27 +178,27 @@ struct mmu_notifier_ops {
const struct mmu_notifier_range *range);
/*
- * invalidate_range() is either called between
- * invalidate_range_start() and invalidate_range_end() when the
- * VM has to free pages that where unmapped, but before the
- * pages are actually freed, or outside of _start()/_end() when
- * a (remote) TLB is necessary.
+ * arch_invalidate_secondary_tlbs() is used to manage a non-CPU TLB
+ * which shares page-tables with the CPU. The
+ * invalidate_range_start()/end() callbacks should not be implemented as
+ * invalidate_secondary_tlbs() already catches the points in time when
+ * an external TLB needs to be flushed.
*
- * If invalidate_range() is used to manage a non-CPU TLB with
- * shared page-tables, it not necessary to implement the
- * invalidate_range_start()/end() notifiers, as
- * invalidate_range() alread catches the points in time when an
- * external TLB range needs to be flushed. For more in depth
- * discussion on this see Documentation/vm/mmu_notifier.rst
+ * This requires arch_invalidate_secondary_tlbs() to be called while
+ * holding the ptl spin-lock and therefore this callback is not allowed
+ * to sleep.
*
- * Note that this function might be called with just a sub-range
- * of what was passed to invalidate_range_start()/end(), if
- * called between those functions.
+ * This is called by architecture code whenever invalidating a TLB
+ * entry. It is assumed that any secondary TLB has the same rules for
+ * when invalidations are required. If this is not the case architecture
+ * code will need to call this explicitly when required for secondary
+ * TLB invalidation.
*/
- void (*invalidate_range)(struct mmu_notifier *subscription,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end);
+ void (*arch_invalidate_secondary_tlbs)(
+ struct mmu_notifier *subscription,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end);
/*
* These callbacks are used with the get/put interface to manage the
@@ -263,13 +260,12 @@ extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
#endif
struct mmu_notifier_range {
- struct vm_area_struct *vma;
struct mm_struct *mm;
unsigned long start;
unsigned long end;
unsigned flags;
enum mmu_notifier_event event;
- void *migrate_pgmap_owner;
+ void *owner;
};
static inline int mm_has_notifiers(struct mm_struct *mm)
@@ -363,7 +359,7 @@ mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub,
* mmu_interval_read_retry() will return true.
*
* False is not reliable and only suggests a collision may not have
- * occured. It can be called many times and does not have to hold the user
+ * occurred. It can be called many times and does not have to hold the user
* provided lock.
*
* This call can be used as part of loops and other expensive operations to
@@ -387,13 +383,10 @@ extern int __mmu_notifier_clear_young(struct mm_struct *mm,
unsigned long end);
extern int __mmu_notifier_test_young(struct mm_struct *mm,
unsigned long address);
-extern void __mmu_notifier_change_pte(struct mm_struct *mm,
- unsigned long address, pte_t pte);
extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
-extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
- bool only_end);
-extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
- unsigned long start, unsigned long end);
+extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r);
+extern void __mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
+ unsigned long start, unsigned long end);
extern bool
mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
@@ -435,13 +428,6 @@ static inline int mmu_notifier_test_young(struct mm_struct *mm,
return 0;
}
-static inline void mmu_notifier_change_pte(struct mm_struct *mm,
- unsigned long address, pte_t pte)
-{
- if (mm_has_notifiers(mm))
- __mmu_notifier_change_pte(mm, address, pte);
-}
-
static inline void
mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
{
@@ -455,7 +441,14 @@ mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
lock_map_release(&__mmu_notifier_invalidate_range_start_map);
}
-static inline int
+/*
+ * This version of mmu_notifier_invalidate_range_start() avoids blocking, but it
+ * can return an error if a notifier can't proceed without blocking, in which
+ * case you're not allowed to modify PTEs in the specified range.
+ *
+ * This is mainly intended for OOM handling.
+ */
+static inline int __must_check
mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
{
int ret = 0;
@@ -476,21 +469,14 @@ mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
might_sleep();
if (mm_has_notifiers(range->mm))
- __mmu_notifier_invalidate_range_end(range, false);
-}
-
-static inline void
-mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
-{
- if (mm_has_notifiers(range->mm))
- __mmu_notifier_invalidate_range_end(range, true);
+ __mmu_notifier_invalidate_range_end(range);
}
-static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
- unsigned long start, unsigned long end)
+static inline void mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
{
if (mm_has_notifiers(mm))
- __mmu_notifier_invalidate_range(mm, start, end);
+ __mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
}
static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
@@ -508,12 +494,10 @@ static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
enum mmu_notifier_event event,
unsigned flags,
- struct vm_area_struct *vma,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
- range->vma = vma;
range->event = event;
range->mm = mm;
range->start = start;
@@ -521,14 +505,14 @@ static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
range->flags = flags;
}
-static inline void mmu_notifier_range_init_migrate(
- struct mmu_notifier_range *range, unsigned int flags,
- struct vm_area_struct *vma, struct mm_struct *mm,
- unsigned long start, unsigned long end, void *pgmap)
+static inline void mmu_notifier_range_init_owner(
+ struct mmu_notifier_range *range,
+ enum mmu_notifier_event event, unsigned int flags,
+ struct mm_struct *mm, unsigned long start,
+ unsigned long end, void *owner)
{
- mmu_notifier_range_init(range, MMU_NOTIFY_MIGRATE, flags, vma, mm,
- start, end);
- range->migrate_pgmap_owner = pgmap;
+ mmu_notifier_range_init(range, event, flags, mm, start, end);
+ range->owner = owner;
}
#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
@@ -579,65 +563,6 @@ static inline void mmu_notifier_range_init_migrate(
__young; \
})
-#define ptep_clear_flush_notify(__vma, __address, __ptep) \
-({ \
- unsigned long ___addr = __address & PAGE_MASK; \
- struct mm_struct *___mm = (__vma)->vm_mm; \
- pte_t ___pte; \
- \
- ___pte = ptep_clear_flush(__vma, __address, __ptep); \
- mmu_notifier_invalidate_range(___mm, ___addr, \
- ___addr + PAGE_SIZE); \
- \
- ___pte; \
-})
-
-#define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd) \
-({ \
- unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
- struct mm_struct *___mm = (__vma)->vm_mm; \
- pmd_t ___pmd; \
- \
- ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd); \
- mmu_notifier_invalidate_range(___mm, ___haddr, \
- ___haddr + HPAGE_PMD_SIZE); \
- \
- ___pmd; \
-})
-
-#define pudp_huge_clear_flush_notify(__vma, __haddr, __pud) \
-({ \
- unsigned long ___haddr = __haddr & HPAGE_PUD_MASK; \
- struct mm_struct *___mm = (__vma)->vm_mm; \
- pud_t ___pud; \
- \
- ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud); \
- mmu_notifier_invalidate_range(___mm, ___haddr, \
- ___haddr + HPAGE_PUD_SIZE); \
- \
- ___pud; \
-})
-
-/*
- * set_pte_at_notify() sets the pte _after_ running the notifier.
- * This is safe to start by updating the secondary MMUs, because the primary MMU
- * pte invalidate must have already happened with a ptep_clear_flush() before
- * set_pte_at_notify() has been invoked. Updating the secondary MMUs first is
- * required when we change both the protection of the mapping from read-only to
- * read-write and the pfn (like during copy on write page faults). Otherwise the
- * old page would remain mapped readonly in the secondary MMUs after the new
- * page is already writable by some CPU through the primary MMU.
- */
-#define set_pte_at_notify(__mm, __address, __ptep, __pte) \
-({ \
- struct mm_struct *___mm = __mm; \
- unsigned long ___address = __address; \
- pte_t ___pte = __pte; \
- \
- mmu_notifier_change_pte(___mm, ___address, ___pte); \
- set_pte_at(___mm, ___address, __ptep, ___pte); \
-})
-
#else /* CONFIG_MMU_NOTIFIER */
struct mmu_notifier_range {
@@ -653,10 +578,10 @@ static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
range->end = end;
}
-#define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \
+#define mmu_notifier_range_init(range,event,flags,mm,start,end) \
_mmu_notifier_range_init(range, start, end)
-#define mmu_notifier_range_init_migrate(range, flags, vma, mm, start, end, \
- pgmap) \
+#define mmu_notifier_range_init_owner(range, event, flags, mm, start, \
+ end, owner) \
_mmu_notifier_range_init(range, start, end)
static inline bool
@@ -681,15 +606,17 @@ static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
return 0;
}
-static inline int mmu_notifier_test_young(struct mm_struct *mm,
- unsigned long address)
+static inline int mmu_notifier_clear_young(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
{
return 0;
}
-static inline void mmu_notifier_change_pte(struct mm_struct *mm,
- unsigned long address, pte_t pte)
+static inline int mmu_notifier_test_young(struct mm_struct *mm,
+ unsigned long address)
{
+ return 0;
}
static inline void
@@ -708,12 +635,7 @@ void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
{
}
-static inline void
-mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
-{
-}
-
-static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
+static inline void mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
}
@@ -732,10 +654,6 @@ static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
#define ptep_clear_young_notify ptep_test_and_clear_young
#define pmdp_clear_young_notify pmdp_test_and_clear_young
-#define ptep_clear_flush_notify ptep_clear_flush
-#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
-#define pudp_huge_clear_flush_notify pudp_huge_clear_flush
-#define set_pte_at_notify set_pte_at
static inline void mmu_notifier_synchronize(void)
{
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 0d53eba1c383..4398e027f450 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -7,6 +7,7 @@
#include <linux/spinlock.h>
#include <linux/list.h>
+#include <linux/list_nulls.h>
#include <linux/wait.h>
#include <linux/bitops.h>
#include <linux/cache.h>
@@ -20,15 +21,37 @@
#include <linux/atomic.h>
#include <linux/mm_types.h>
#include <linux/page-flags.h>
+#include <linux/local_lock.h>
+#include <linux/zswap.h>
#include <asm/page.h>
/* Free memory management - zoned buddy allocator. */
-#ifndef CONFIG_FORCE_MAX_ZONEORDER
-#define MAX_ORDER 11
+#ifndef CONFIG_ARCH_FORCE_MAX_ORDER
+#define MAX_PAGE_ORDER 10
#else
-#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
+#define MAX_PAGE_ORDER CONFIG_ARCH_FORCE_MAX_ORDER
+#endif
+#define MAX_ORDER_NR_PAGES (1 << MAX_PAGE_ORDER)
+
+#define IS_MAX_ORDER_ALIGNED(pfn) IS_ALIGNED(pfn, MAX_ORDER_NR_PAGES)
+
+#define NR_PAGE_ORDERS (MAX_PAGE_ORDER + 1)
+
+/* Defines the order for the number of pages that have a migrate type. */
+#ifndef CONFIG_PAGE_BLOCK_MAX_ORDER
+#define PAGE_BLOCK_MAX_ORDER MAX_PAGE_ORDER
+#else
+#define PAGE_BLOCK_MAX_ORDER CONFIG_PAGE_BLOCK_MAX_ORDER
+#endif /* CONFIG_PAGE_BLOCK_MAX_ORDER */
+
+/*
+ * The MAX_PAGE_ORDER, which defines the max order of pages to be allocated
+ * by the buddy allocator, has to be larger or equal to the PAGE_BLOCK_MAX_ORDER,
+ * which defines the order for the number of pages that can have a migrate type
+ */
+#if (PAGE_BLOCK_MAX_ORDER > MAX_PAGE_ORDER)
+#error MAX_PAGE_ORDER must be >= PAGE_BLOCK_MAX_ORDER
#endif
-#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
/*
* PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
@@ -53,12 +76,12 @@ enum migratetype {
*
* The way to use it is to change migratetype of a range of
* pageblocks to MIGRATE_CMA which can be done by
- * __free_pageblock_cma() function. What is important though
- * is that a range of pageblocks must be aligned to
- * MAX_ORDER_NR_PAGES should biggest page be bigger than
- * a single pageblock.
+ * __free_pageblock_cma() function.
*/
MIGRATE_CMA,
+ __MIGRATE_TYPE_END = MIGRATE_CMA,
+#else
+ __MIGRATE_TYPE_END = MIGRATE_HIGHATOMIC,
#endif
#ifdef CONFIG_MEMORY_ISOLATION
MIGRATE_ISOLATE, /* can't allocate from here */
@@ -72,9 +95,16 @@ extern const char * const migratetype_names[MIGRATE_TYPES];
#ifdef CONFIG_CMA
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
+/*
+ * __dump_folio() in mm/debug.c passes a folio pointer to on-stack struct folio,
+ * so folio_pfn() cannot be used and pfn is needed.
+ */
+# define is_migrate_cma_folio(folio, pfn) \
+ (get_pfnblock_migratetype(&folio->page, pfn) == MIGRATE_CMA)
#else
# define is_migrate_cma(migratetype) false
# define is_migrate_cma_page(_page) false
+# define is_migrate_cma_folio(folio, pfn) false
#endif
static inline bool is_migrate_movable(int mt)
@@ -82,50 +112,36 @@ static inline bool is_migrate_movable(int mt)
return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE;
}
+/*
+ * Check whether a migratetype can be merged with another migratetype.
+ *
+ * It is only mergeable when it can fall back to other migratetypes for
+ * allocation. See fallbacks[MIGRATE_TYPES][3] in page_alloc.c.
+ */
+static inline bool migratetype_is_mergeable(int mt)
+{
+ return mt < MIGRATE_PCPTYPES;
+}
+
#define for_each_migratetype_order(order, type) \
- for (order = 0; order < MAX_ORDER; order++) \
+ for (order = 0; order < NR_PAGE_ORDERS; order++) \
for (type = 0; type < MIGRATE_TYPES; type++)
extern int page_group_by_mobility_disabled;
-#define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1)
+#define get_pageblock_migratetype(page) \
+ get_pfnblock_migratetype(page, page_to_pfn(page))
-#define get_pageblock_migratetype(page) \
- get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK)
+#define folio_migratetype(folio) \
+ get_pageblock_migratetype(&folio->page)
struct free_area {
struct list_head free_list[MIGRATE_TYPES];
unsigned long nr_free;
};
-static inline struct page *get_page_from_free_area(struct free_area *area,
- int migratetype)
-{
- return list_first_entry_or_null(&area->free_list[migratetype],
- struct page, lru);
-}
-
-static inline bool free_area_empty(struct free_area *area, int migratetype)
-{
- return list_empty(&area->free_list[migratetype]);
-}
-
struct pglist_data;
-/*
- * Add a wild amount of padding here to ensure datas fall into separate
- * cachelines. There are very few zone structures in the machine, so space
- * consumption is not a concern here.
- */
-#if defined(CONFIG_SMP)
-struct zone_padding {
- char x[0];
-} ____cacheline_internodealigned_in_smp;
-#define ZONE_PADDING(name) struct zone_padding name;
-#else
-#define ZONE_PADDING(name)
-#endif
-
#ifdef CONFIG_NUMA
enum numa_stat_item {
NUMA_HIT, /* allocated in intended node */
@@ -134,15 +150,16 @@ enum numa_stat_item {
NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */
NUMA_LOCAL, /* allocation from local node */
NUMA_OTHER, /* allocation from other node */
- NR_VM_NUMA_STAT_ITEMS
+ NR_VM_NUMA_EVENT_ITEMS
};
#else
-#define NR_VM_NUMA_STAT_ITEMS 0
+#define NR_VM_NUMA_EVENT_ITEMS 0
#endif
enum zone_stat_item {
/* First 128 byte cacheline (assuming 64 bit words) */
NR_FREE_PAGES,
+ NR_FREE_PAGES_BLOCKS,
NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */
NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE,
NR_ZONE_ACTIVE_ANON,
@@ -152,11 +169,13 @@ enum zone_stat_item {
NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */
NR_MLOCK, /* mlock()ed pages found and moved off LRU */
/* Second 128 byte cacheline */
- NR_BOUNCE,
#if IS_ENABLED(CONFIG_ZSMALLOC)
NR_ZSPAGES, /* allocated in zsmalloc */
#endif
NR_FREE_CMA_PAGES,
+#ifdef CONFIG_UNACCEPTED_MEMORY
+ NR_UNACCEPTED,
+#endif
NR_VM_ZONE_STAT_ITEMS };
enum node_stat_item {
@@ -187,7 +206,6 @@ enum node_stat_item {
NR_FILE_PAGES,
NR_FILE_DIRTY,
NR_WRITEBACK,
- NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */
NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
NR_SHMEM_THPS,
NR_SHMEM_PMDMAPPED,
@@ -198,6 +216,7 @@ enum node_stat_item {
NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
NR_DIRTIED, /* page dirtyings since bootup */
NR_WRITTEN, /* page writings since bootup */
+ NR_THROTTLED_WRITTEN, /* NR_WRITTEN while reclaim throttled */
NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */
NR_FOLL_PIN_ACQUIRED, /* via: pin_user_page(), gup flag: FOLL_PIN */
NR_FOLL_PIN_RELEASED, /* pages returned via unpin_user_page() */
@@ -206,9 +225,41 @@ enum node_stat_item {
NR_KERNEL_SCS_KB, /* measured in KiB */
#endif
NR_PAGETABLE, /* used for pagetables */
+ NR_SECONDARY_PAGETABLE, /* secondary pagetables, KVM & IOMMU */
+#ifdef CONFIG_IOMMU_SUPPORT
+ NR_IOMMU_PAGES, /* # of pages allocated by IOMMU */
+#endif
#ifdef CONFIG_SWAP
NR_SWAPCACHE,
#endif
+#ifdef CONFIG_NUMA_BALANCING
+ PGPROMOTE_SUCCESS, /* promote successfully */
+ /**
+ * Candidate pages for promotion based on hint fault latency. This
+ * counter is used to control the promotion rate and adjust the hot
+ * threshold.
+ */
+ PGPROMOTE_CANDIDATE,
+ /**
+ * Not rate-limited (NRL) candidate pages for those can be promoted
+ * without considering hot threshold because of enough free pages in
+ * fast-tier node. These promotions bypass the regular hotness checks
+ * and do NOT influence the promotion rate-limiter or
+ * threshold-adjustment logic.
+ * This is for statistics/monitoring purposes.
+ */
+ PGPROMOTE_CANDIDATE_NRL,
+#endif
+ /* PGDEMOTE_*: pages demoted */
+ PGDEMOTE_KSWAPD,
+ PGDEMOTE_DIRECT,
+ PGDEMOTE_KHUGEPAGED,
+ PGDEMOTE_PROACTIVE,
+#ifdef CONFIG_HUGETLB_PAGE
+ NR_HUGETLB,
+#endif
+ NR_BALLOON_PAGES,
+ NR_KERNEL_FILE_PAGES,
NR_VM_NODE_STAT_ITEMS
};
@@ -271,6 +322,14 @@ enum lru_list {
NR_LRU_LISTS
};
+enum vmscan_throttle_state {
+ VMSCAN_THROTTLE_WRITEBACK,
+ VMSCAN_THROTTLE_ISOLATED,
+ VMSCAN_THROTTLE_NOPROGRESS,
+ VMSCAN_THROTTLE_CONGESTED,
+ NR_VMSCAN_THROTTLE,
+};
+
#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
@@ -285,14 +344,328 @@ static inline bool is_active_lru(enum lru_list lru)
return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
}
+#define WORKINGSET_ANON 0
+#define WORKINGSET_FILE 1
#define ANON_AND_FILE 2
enum lruvec_flags {
- LRUVEC_CONGESTED, /* lruvec has many dirty pages
- * backed by a congested BDI
- */
+ /*
+ * An lruvec has many dirty pages backed by a congested BDI:
+ * 1. LRUVEC_CGROUP_CONGESTED is set by cgroup-level reclaim.
+ * It can be cleared by cgroup reclaim or kswapd.
+ * 2. LRUVEC_NODE_CONGESTED is set by kswapd node-level reclaim.
+ * It can only be cleared by kswapd.
+ *
+ * Essentially, kswapd can unthrottle an lruvec throttled by cgroup
+ * reclaim, but not vice versa. This only applies to the root cgroup.
+ * The goal is to prevent cgroup reclaim on the root cgroup (e.g.
+ * memory.reclaim) to unthrottle an unbalanced node (that was throttled
+ * by kswapd).
+ */
+ LRUVEC_CGROUP_CONGESTED,
+ LRUVEC_NODE_CONGESTED,
+};
+
+#endif /* !__GENERATING_BOUNDS_H */
+
+/*
+ * Evictable folios are divided into multiple generations. The youngest and the
+ * oldest generation numbers, max_seq and min_seq, are monotonically increasing.
+ * They form a sliding window of a variable size [MIN_NR_GENS, MAX_NR_GENS]. An
+ * offset within MAX_NR_GENS, i.e., gen, indexes the LRU list of the
+ * corresponding generation. The gen counter in folio->flags stores gen+1 while
+ * a folio is on one of lrugen->folios[]. Otherwise it stores 0.
+ *
+ * After a folio is faulted in, the aging needs to check the accessed bit at
+ * least twice before handing this folio over to the eviction. The first check
+ * clears the accessed bit from the initial fault; the second check makes sure
+ * this folio hasn't been used since then. This process, AKA second chance,
+ * requires a minimum of two generations, hence MIN_NR_GENS. And to maintain ABI
+ * compatibility with the active/inactive LRU, e.g., /proc/vmstat, these two
+ * generations are considered active; the rest of generations, if they exist,
+ * are considered inactive. See lru_gen_is_active().
+ *
+ * PG_active is always cleared while a folio is on one of lrugen->folios[] so
+ * that the sliding window needs not to worry about it. And it's set again when
+ * a folio considered active is isolated for non-reclaiming purposes, e.g.,
+ * migration. See lru_gen_add_folio() and lru_gen_del_folio().
+ *
+ * MAX_NR_GENS is set to 4 so that the multi-gen LRU can support twice the
+ * number of categories of the active/inactive LRU when keeping track of
+ * accesses through page tables. This requires order_base_2(MAX_NR_GENS+1) bits
+ * in folio->flags, masked by LRU_GEN_MASK.
+ */
+#define MIN_NR_GENS 2U
+#define MAX_NR_GENS 4U
+
+/*
+ * Each generation is divided into multiple tiers. A folio accessed N times
+ * through file descriptors is in tier order_base_2(N). A folio in the first
+ * tier (N=0,1) is marked by PG_referenced unless it was faulted in through page
+ * tables or read ahead. A folio in the last tier (MAX_NR_TIERS-1) is marked by
+ * PG_workingset. A folio in any other tier (1<N<5) between the first and last
+ * is marked by additional bits of LRU_REFS_WIDTH in folio->flags.
+ *
+ * In contrast to moving across generations which requires the LRU lock, moving
+ * across tiers only involves atomic operations on folio->flags and therefore
+ * has a negligible cost in the buffered access path. In the eviction path,
+ * comparisons of refaulted/(evicted+protected) from the first tier and the rest
+ * infer whether folios accessed multiple times through file descriptors are
+ * statistically hot and thus worth protecting.
+ *
+ * MAX_NR_TIERS is set to 4 so that the multi-gen LRU can support twice the
+ * number of categories of the active/inactive LRU when keeping track of
+ * accesses through file descriptors. This uses MAX_NR_TIERS-2 spare bits in
+ * folio->flags, masked by LRU_REFS_MASK.
+ */
+#define MAX_NR_TIERS 4U
+
+#ifndef __GENERATING_BOUNDS_H
+
+#define LRU_GEN_MASK ((BIT(LRU_GEN_WIDTH) - 1) << LRU_GEN_PGOFF)
+#define LRU_REFS_MASK ((BIT(LRU_REFS_WIDTH) - 1) << LRU_REFS_PGOFF)
+
+/*
+ * For folios accessed multiple times through file descriptors,
+ * lru_gen_inc_refs() sets additional bits of LRU_REFS_WIDTH in folio->flags
+ * after PG_referenced, then PG_workingset after LRU_REFS_WIDTH. After all its
+ * bits are set, i.e., LRU_REFS_FLAGS|BIT(PG_workingset), a folio is lazily
+ * promoted into the second oldest generation in the eviction path. And when
+ * folio_inc_gen() does that, it clears LRU_REFS_FLAGS so that
+ * lru_gen_inc_refs() can start over. Note that for this case, LRU_REFS_MASK is
+ * only valid when PG_referenced is set.
+ *
+ * For folios accessed multiple times through page tables, folio_update_gen()
+ * from a page table walk or lru_gen_set_refs() from a rmap walk sets
+ * PG_referenced after the accessed bit is cleared for the first time.
+ * Thereafter, those two paths set PG_workingset and promote folios to the
+ * youngest generation. Like folio_inc_gen(), folio_update_gen() also clears
+ * PG_referenced. Note that for this case, LRU_REFS_MASK is not used.
+ *
+ * For both cases above, after PG_workingset is set on a folio, it remains until
+ * this folio is either reclaimed, or "deactivated" by lru_gen_clear_refs(). It
+ * can be set again if lru_gen_test_recent() returns true upon a refault.
+ */
+#define LRU_REFS_FLAGS (LRU_REFS_MASK | BIT(PG_referenced))
+
+struct lruvec;
+struct page_vma_mapped_walk;
+
+#ifdef CONFIG_LRU_GEN
+
+enum {
+ LRU_GEN_ANON,
+ LRU_GEN_FILE,
+};
+
+enum {
+ LRU_GEN_CORE,
+ LRU_GEN_MM_WALK,
+ LRU_GEN_NONLEAF_YOUNG,
+ NR_LRU_GEN_CAPS
};
+#define MIN_LRU_BATCH BITS_PER_LONG
+#define MAX_LRU_BATCH (MIN_LRU_BATCH * 64)
+
+/* whether to keep historical stats from evicted generations */
+#ifdef CONFIG_LRU_GEN_STATS
+#define NR_HIST_GENS MAX_NR_GENS
+#else
+#define NR_HIST_GENS 1U
+#endif
+
+/*
+ * The youngest generation number is stored in max_seq for both anon and file
+ * types as they are aged on an equal footing. The oldest generation numbers are
+ * stored in min_seq[] separately for anon and file types so that they can be
+ * incremented independently. Ideally min_seq[] are kept in sync when both anon
+ * and file types are evictable. However, to adapt to situations like extreme
+ * swappiness, they are allowed to be out of sync by at most
+ * MAX_NR_GENS-MIN_NR_GENS-1.
+ *
+ * The number of pages in each generation is eventually consistent and therefore
+ * can be transiently negative when reset_batch_size() is pending.
+ */
+struct lru_gen_folio {
+ /* the aging increments the youngest generation number */
+ unsigned long max_seq;
+ /* the eviction increments the oldest generation numbers */
+ unsigned long min_seq[ANON_AND_FILE];
+ /* the birth time of each generation in jiffies */
+ unsigned long timestamps[MAX_NR_GENS];
+ /* the multi-gen LRU lists, lazily sorted on eviction */
+ struct list_head folios[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
+ /* the multi-gen LRU sizes, eventually consistent */
+ long nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
+ /* the exponential moving average of refaulted */
+ unsigned long avg_refaulted[ANON_AND_FILE][MAX_NR_TIERS];
+ /* the exponential moving average of evicted+protected */
+ unsigned long avg_total[ANON_AND_FILE][MAX_NR_TIERS];
+ /* can only be modified under the LRU lock */
+ unsigned long protected[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
+ /* can be modified without holding the LRU lock */
+ atomic_long_t evicted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
+ atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
+ /* whether the multi-gen LRU is enabled */
+ bool enabled;
+ /* the memcg generation this lru_gen_folio belongs to */
+ u8 gen;
+ /* the list segment this lru_gen_folio belongs to */
+ u8 seg;
+ /* per-node lru_gen_folio list for global reclaim */
+ struct hlist_nulls_node list;
+};
+
+enum {
+ MM_LEAF_TOTAL, /* total leaf entries */
+ MM_LEAF_YOUNG, /* young leaf entries */
+ MM_NONLEAF_FOUND, /* non-leaf entries found in Bloom filters */
+ MM_NONLEAF_ADDED, /* non-leaf entries added to Bloom filters */
+ NR_MM_STATS
+};
+
+/* double-buffering Bloom filters */
+#define NR_BLOOM_FILTERS 2
+
+struct lru_gen_mm_state {
+ /* synced with max_seq after each iteration */
+ unsigned long seq;
+ /* where the current iteration continues after */
+ struct list_head *head;
+ /* where the last iteration ended before */
+ struct list_head *tail;
+ /* Bloom filters flip after each iteration */
+ unsigned long *filters[NR_BLOOM_FILTERS];
+ /* the mm stats for debugging */
+ unsigned long stats[NR_HIST_GENS][NR_MM_STATS];
+};
+
+struct lru_gen_mm_walk {
+ /* the lruvec under reclaim */
+ struct lruvec *lruvec;
+ /* max_seq from lru_gen_folio: can be out of date */
+ unsigned long seq;
+ /* the next address within an mm to scan */
+ unsigned long next_addr;
+ /* to batch promoted pages */
+ int nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
+ /* to batch the mm stats */
+ int mm_stats[NR_MM_STATS];
+ /* total batched items */
+ int batched;
+ int swappiness;
+ bool force_scan;
+};
+
+/*
+ * For each node, memcgs are divided into two generations: the old and the
+ * young. For each generation, memcgs are randomly sharded into multiple bins
+ * to improve scalability. For each bin, the hlist_nulls is virtually divided
+ * into three segments: the head, the tail and the default.
+ *
+ * An onlining memcg is added to the tail of a random bin in the old generation.
+ * The eviction starts at the head of a random bin in the old generation. The
+ * per-node memcg generation counter, whose reminder (mod MEMCG_NR_GENS) indexes
+ * the old generation, is incremented when all its bins become empty.
+ *
+ * There are four operations:
+ * 1. MEMCG_LRU_HEAD, which moves a memcg to the head of a random bin in its
+ * current generation (old or young) and updates its "seg" to "head";
+ * 2. MEMCG_LRU_TAIL, which moves a memcg to the tail of a random bin in its
+ * current generation (old or young) and updates its "seg" to "tail";
+ * 3. MEMCG_LRU_OLD, which moves a memcg to the head of a random bin in the old
+ * generation, updates its "gen" to "old" and resets its "seg" to "default";
+ * 4. MEMCG_LRU_YOUNG, which moves a memcg to the tail of a random bin in the
+ * young generation, updates its "gen" to "young" and resets its "seg" to
+ * "default".
+ *
+ * The events that trigger the above operations are:
+ * 1. Exceeding the soft limit, which triggers MEMCG_LRU_HEAD;
+ * 2. The first attempt to reclaim a memcg below low, which triggers
+ * MEMCG_LRU_TAIL;
+ * 3. The first attempt to reclaim a memcg offlined or below reclaimable size
+ * threshold, which triggers MEMCG_LRU_TAIL;
+ * 4. The second attempt to reclaim a memcg offlined or below reclaimable size
+ * threshold, which triggers MEMCG_LRU_YOUNG;
+ * 5. Attempting to reclaim a memcg below min, which triggers MEMCG_LRU_YOUNG;
+ * 6. Finishing the aging on the eviction path, which triggers MEMCG_LRU_YOUNG;
+ * 7. Offlining a memcg, which triggers MEMCG_LRU_OLD.
+ *
+ * Notes:
+ * 1. Memcg LRU only applies to global reclaim, and the round-robin incrementing
+ * of their max_seq counters ensures the eventual fairness to all eligible
+ * memcgs. For memcg reclaim, it still relies on mem_cgroup_iter().
+ * 2. There are only two valid generations: old (seq) and young (seq+1).
+ * MEMCG_NR_GENS is set to three so that when reading the generation counter
+ * locklessly, a stale value (seq-1) does not wraparound to young.
+ */
+#define MEMCG_NR_GENS 3
+#define MEMCG_NR_BINS 8
+
+struct lru_gen_memcg {
+ /* the per-node memcg generation counter */
+ unsigned long seq;
+ /* each memcg has one lru_gen_folio per node */
+ unsigned long nr_memcgs[MEMCG_NR_GENS];
+ /* per-node lru_gen_folio list for global reclaim */
+ struct hlist_nulls_head fifo[MEMCG_NR_GENS][MEMCG_NR_BINS];
+ /* protects the above */
+ spinlock_t lock;
+};
+
+void lru_gen_init_pgdat(struct pglist_data *pgdat);
+void lru_gen_init_lruvec(struct lruvec *lruvec);
+bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
+
+void lru_gen_init_memcg(struct mem_cgroup *memcg);
+void lru_gen_exit_memcg(struct mem_cgroup *memcg);
+void lru_gen_online_memcg(struct mem_cgroup *memcg);
+void lru_gen_offline_memcg(struct mem_cgroup *memcg);
+void lru_gen_release_memcg(struct mem_cgroup *memcg);
+void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid);
+
+#else /* !CONFIG_LRU_GEN */
+
+static inline void lru_gen_init_pgdat(struct pglist_data *pgdat)
+{
+}
+
+static inline void lru_gen_init_lruvec(struct lruvec *lruvec)
+{
+}
+
+static inline bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
+{
+ return false;
+}
+
+static inline void lru_gen_init_memcg(struct mem_cgroup *memcg)
+{
+}
+
+static inline void lru_gen_exit_memcg(struct mem_cgroup *memcg)
+{
+}
+
+static inline void lru_gen_online_memcg(struct mem_cgroup *memcg)
+{
+}
+
+static inline void lru_gen_offline_memcg(struct mem_cgroup *memcg)
+{
+}
+
+static inline void lru_gen_release_memcg(struct mem_cgroup *memcg)
+{
+}
+
+static inline void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid)
+{
+}
+
+#endif /* CONFIG_LRU_GEN */
+
struct lruvec {
struct list_head lists[NR_LRU_LISTS];
/* per lruvec lru_lock for memcg */
@@ -310,13 +683,20 @@ struct lruvec {
unsigned long refaults[ANON_AND_FILE];
/* Various lruvec state flags (enum lruvec_flags) */
unsigned long flags;
+#ifdef CONFIG_LRU_GEN
+ /* evictable pages divided into generations */
+ struct lru_gen_folio lrugen;
+#ifdef CONFIG_LRU_GEN_WALKS_MMU
+ /* to concurrently iterate lru_gen_mm_list */
+ struct lru_gen_mm_state mm_state;
+#endif
+#endif /* CONFIG_LRU_GEN */
#ifdef CONFIG_MEMCG
struct pglist_data *pgdat;
#endif
+ struct zswap_lruvec_state zswap_lruvec_state;
};
-/* Isolate unmapped pages */
-#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
/* Isolate for asynchronous migration */
#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
/* Isolate unevictable pages */
@@ -329,32 +709,68 @@ enum zone_watermarks {
WMARK_MIN,
WMARK_LOW,
WMARK_HIGH,
+ WMARK_PROMO,
NR_WMARK
};
-#define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost)
-#define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost)
-#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost)
-#define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost)
+/*
+ * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER. Two additional lists
+ * are added for THP. One PCP list is used by GPF_MOVABLE, and the other PCP list
+ * is used by GFP_UNMOVABLE and GFP_RECLAIMABLE.
+ */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define NR_PCP_THP 2
+#else
+#define NR_PCP_THP 0
+#endif
+#define NR_LOWORDER_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1))
+#define NR_PCP_LISTS (NR_LOWORDER_PCP_LISTS + NR_PCP_THP)
+
+/*
+ * Flags used in pcp->flags field.
+ *
+ * PCPF_PREV_FREE_HIGH_ORDER: a high-order page is freed in the
+ * previous page freeing. To avoid to drain PCP for an accident
+ * high-order page freeing.
+ *
+ * PCPF_FREE_HIGH_BATCH: preserve "pcp->batch" pages in PCP before
+ * draining PCP for consecutive high-order pages freeing without
+ * allocation if data cache slice of CPU is large enough. To reduce
+ * zone lock contention and keep cache-hot pages reusing.
+ */
+#define PCPF_PREV_FREE_HIGH_ORDER BIT(0)
+#define PCPF_FREE_HIGH_BATCH BIT(1)
struct per_cpu_pages {
+ spinlock_t lock; /* Protects lists field */
int count; /* number of pages in the list */
int high; /* high watermark, emptying needed */
+ int high_min; /* min high watermark */
+ int high_max; /* max high watermark */
int batch; /* chunk size for buddy add/remove */
+ u8 flags; /* protected by pcp->lock */
+ u8 alloc_factor; /* batch scaling factor during allocate */
+#ifdef CONFIG_NUMA
+ u8 expire; /* When 0, remote pagesets are drained */
+#endif
+ short free_count; /* consecutive free count */
/* Lists of pages, one per migrate type stored on the pcp-lists */
- struct list_head lists[MIGRATE_PCPTYPES];
-};
+ struct list_head lists[NR_PCP_LISTS];
+} ____cacheline_aligned_in_smp;
-struct per_cpu_pageset {
- struct per_cpu_pages pcp;
-#ifdef CONFIG_NUMA
- s8 expire;
- u16 vm_numa_stat_diff[NR_VM_NUMA_STAT_ITEMS];
-#endif
+struct per_cpu_zonestat {
#ifdef CONFIG_SMP
- s8 stat_threshold;
s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
+ s8 stat_threshold;
+#endif
+#ifdef CONFIG_NUMA
+ /*
+ * Low priority inaccurate counters that are only folded
+ * on demand. Use a large type to avoid the overhead of
+ * folding during refresh_cpu_vm_stats.
+ */
+ unsigned long vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
#endif
};
@@ -468,6 +884,7 @@ struct zone {
unsigned long watermark_boost;
unsigned long nr_reserved_highatomic;
+ unsigned long nr_free_highatomic;
/*
* We don't know if the memory that we're going to allocate will be
@@ -484,12 +901,14 @@ struct zone {
int node;
#endif
struct pglist_data *zone_pgdat;
- struct per_cpu_pageset __percpu *pageset;
+ struct per_cpu_pages __percpu *per_cpu_pageset;
+ struct per_cpu_zonestat __percpu *per_cpu_zonestats;
/*
* the high and batch values are copied to individual pagesets for
* faster access
*/
- int pageset_high;
+ int pageset_high_min;
+ int pageset_high_max;
int pageset_batch;
#ifndef CONFIG_SPARSEMEM
@@ -512,6 +931,10 @@ struct zone {
* is calculated as:
* present_pages = spanned_pages - absent_pages(pages in holes);
*
+ * present_early_pages is present pages existing within the zone
+ * located on memory available since early boot, excluding hotplugged
+ * memory.
+ *
* managed_pages is present pages managed by the buddy system, which
* is calculated as (reserved_pages includes pages allocated by the
* bootmem allocator):
@@ -538,12 +961,15 @@ struct zone {
* give them a chance of being in the same cacheline.
*
* Write access to present_pages at runtime should be protected by
- * mem_hotplug_begin/end(). Any reader who can't tolerant drift of
- * present_pages should get_online_mems() to get a stable value.
+ * mem_hotplug_begin/done(). Any reader who can't tolerant drift of
+ * present_pages should use get_online_mems() to get a stable value.
*/
atomic_long_t managed_pages;
unsigned long spanned_pages;
unsigned long present_pages;
+#if defined(CONFIG_MEMORY_HOTPLUG)
+ unsigned long present_early_pages;
+#endif
#ifdef CONFIG_CMA
unsigned long cma_pages;
#endif
@@ -567,10 +993,18 @@ struct zone {
int initialized;
/* Write-intensive fields used from the page allocator */
- ZONE_PADDING(_pad1_)
+ CACHELINE_PADDING(_pad1_);
/* free areas of different sizes */
- struct free_area free_area[MAX_ORDER];
+ struct free_area free_area[NR_PAGE_ORDERS];
+
+#ifdef CONFIG_UNACCEPTED_MEMORY
+ /* Pages to be accepted. All pages on the list are MAX_PAGE_ORDER */
+ struct list_head unaccepted_pages;
+
+ /* To be called once the last page in the zone is accepted */
+ struct work_struct unaccepted_cleanup;
+#endif
/* zone flags, see below */
unsigned long flags;
@@ -578,8 +1012,11 @@ struct zone {
/* Primarily protects free_area */
spinlock_t lock;
+ /* Pages to be freed when next trylock succeeds */
+ struct llist_head trylock_free_pages;
+
/* Write-intensive fields used by compaction and vmstats. */
- ZONE_PADDING(_pad2_)
+ CACHELINE_PADDING(_pad2_);
/*
* When free pages are below this point, additional steps are taken
@@ -616,17 +1053,13 @@ struct zone {
bool contiguous;
- ZONE_PADDING(_pad3_)
+ CACHELINE_PADDING(_pad3_);
/* Zone statistics */
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
- atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
+ atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
} ____cacheline_internodealigned_in_smp;
enum pgdat_flags {
- PGDAT_DIRTY, /* reclaim scanning has recently found
- * many dirty file pages at the tail
- * of the LRU.
- */
PGDAT_WRITEBACK, /* reclaim scanning has recently found
* many pages under writeback
*/
@@ -637,9 +1070,37 @@ enum zone_flags {
ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks.
* Cleared when kswapd is woken.
*/
+ ZONE_RECLAIM_ACTIVE, /* kswapd may be scanning the zone. */
+ ZONE_BELOW_HIGH, /* zone is below high watermark. */
};
-static inline unsigned long zone_managed_pages(struct zone *zone)
+static inline unsigned long wmark_pages(const struct zone *z,
+ enum zone_watermarks w)
+{
+ return z->_watermark[w] + z->watermark_boost;
+}
+
+static inline unsigned long min_wmark_pages(const struct zone *z)
+{
+ return wmark_pages(z, WMARK_MIN);
+}
+
+static inline unsigned long low_wmark_pages(const struct zone *z)
+{
+ return wmark_pages(z, WMARK_LOW);
+}
+
+static inline unsigned long high_wmark_pages(const struct zone *z)
+{
+ return wmark_pages(z, WMARK_HIGH);
+}
+
+static inline unsigned long promo_wmark_pages(const struct zone *z)
+{
+ return wmark_pages(z, WMARK_PROMO);
+}
+
+static inline unsigned long zone_managed_pages(const struct zone *zone)
{
return (unsigned long)atomic_long_read(&zone->managed_pages);
}
@@ -663,21 +1124,152 @@ static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
}
-static inline bool zone_is_initialized(struct zone *zone)
+static inline bool zone_is_initialized(const struct zone *zone)
{
return zone->initialized;
}
-static inline bool zone_is_empty(struct zone *zone)
+static inline bool zone_is_empty(const struct zone *zone)
{
return zone->spanned_pages == 0;
}
+#ifndef BUILD_VDSO32_64
+/*
+ * The zone field is never updated after free_area_init_core()
+ * sets it, so none of the operations on it need to be atomic.
+ */
+
+/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
+#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
+#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
+#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
+#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
+#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
+#define LRU_GEN_PGOFF (KASAN_TAG_PGOFF - LRU_GEN_WIDTH)
+#define LRU_REFS_PGOFF (LRU_GEN_PGOFF - LRU_REFS_WIDTH)
+
+/*
+ * Define the bit shifts to access each section. For non-existent
+ * sections we define the shift as 0; that plus a 0 mask ensures
+ * the compiler will optimise away reference to them.
+ */
+#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
+#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
+#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
+#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
+#define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0))
+
+/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
+#ifdef NODE_NOT_IN_PAGE_FLAGS
+#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
+#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF) ? \
+ SECTIONS_PGOFF : ZONES_PGOFF)
+#else
+#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
+#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF) ? \
+ NODES_PGOFF : ZONES_PGOFF)
+#endif
+
+#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
+
+#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
+#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
+#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
+#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
+#define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1)
+#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
+
+static inline enum zone_type memdesc_zonenum(memdesc_flags_t flags)
+{
+ ASSERT_EXCLUSIVE_BITS(flags.f, ZONES_MASK << ZONES_PGSHIFT);
+ return (flags.f >> ZONES_PGSHIFT) & ZONES_MASK;
+}
+
+static inline enum zone_type page_zonenum(const struct page *page)
+{
+ return memdesc_zonenum(page->flags);
+}
+
+static inline enum zone_type folio_zonenum(const struct folio *folio)
+{
+ return memdesc_zonenum(folio->flags);
+}
+
+#ifdef CONFIG_ZONE_DEVICE
+static inline bool memdesc_is_zone_device(memdesc_flags_t mdf)
+{
+ return memdesc_zonenum(mdf) == ZONE_DEVICE;
+}
+
+static inline struct dev_pagemap *page_pgmap(const struct page *page)
+{
+ VM_WARN_ON_ONCE_PAGE(!memdesc_is_zone_device(page->flags), page);
+ return page_folio(page)->pgmap;
+}
+
+/*
+ * Consecutive zone device pages should not be merged into the same sgl
+ * or bvec segment with other types of pages or if they belong to different
+ * pgmaps. Otherwise getting the pgmap of a given segment is not possible
+ * without scanning the entire segment. This helper returns true either if
+ * both pages are not zone device pages or both pages are zone device pages
+ * with the same pgmap.
+ */
+static inline bool zone_device_pages_have_same_pgmap(const struct page *a,
+ const struct page *b)
+{
+ if (memdesc_is_zone_device(a->flags) != memdesc_is_zone_device(b->flags))
+ return false;
+ if (!memdesc_is_zone_device(a->flags))
+ return true;
+ return page_pgmap(a) == page_pgmap(b);
+}
+
+extern void memmap_init_zone_device(struct zone *, unsigned long,
+ unsigned long, struct dev_pagemap *);
+#else
+static inline bool memdesc_is_zone_device(memdesc_flags_t mdf)
+{
+ return false;
+}
+static inline bool zone_device_pages_have_same_pgmap(const struct page *a,
+ const struct page *b)
+{
+ return true;
+}
+static inline struct dev_pagemap *page_pgmap(const struct page *page)
+{
+ return NULL;
+}
+#endif
+
+static inline bool is_zone_device_page(const struct page *page)
+{
+ return memdesc_is_zone_device(page->flags);
+}
+
+static inline bool folio_is_zone_device(const struct folio *folio)
+{
+ return memdesc_is_zone_device(folio->flags);
+}
+
+static inline bool is_zone_movable_page(const struct page *page)
+{
+ return page_zonenum(page) == ZONE_MOVABLE;
+}
+
+static inline bool folio_is_zone_movable(const struct folio *folio)
+{
+ return folio_zonenum(folio) == ZONE_MOVABLE;
+}
+#endif
+
/*
* Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty
* intersection with the given zone
*/
-static inline bool zone_intersects(struct zone *zone,
+static inline bool zone_intersects(const struct zone *zone,
unsigned long start_pfn, unsigned long nr_pages)
{
if (zone_is_empty(zone))
@@ -738,10 +1330,12 @@ struct zonelist {
struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
};
-#ifndef CONFIG_DISCONTIGMEM
-/* The array of struct pages - for discontigmem use pgdat->lmem_map */
+/*
+ * The array of struct pages for flatmem.
+ * It must be declared for SPARSEMEM as well because there are configurations
+ * that rely on that.
+ */
extern struct page *mem_map;
-#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
struct deferred_split {
@@ -751,6 +1345,31 @@ struct deferred_split {
};
#endif
+#ifdef CONFIG_MEMORY_FAILURE
+/*
+ * Per NUMA node memory failure handling statistics.
+ */
+struct memory_failure_stats {
+ /*
+ * Number of raw pages poisoned.
+ * Cases not accounted: memory outside kernel control, offline page,
+ * arch-specific memory_failure (SGX), hwpoison_filter() filtered
+ * error events, and unpoison actions from hwpoison_unpoison.
+ */
+ unsigned long total;
+ /*
+ * Recovery results of poisoned raw pages handled by memory_failure,
+ * in sync with mf_result.
+ * total = ignored + failed + delayed + recovered.
+ * total * PAGE_SIZE * #nodes = /proc/meminfo/HardwareCorrupted.
+ */
+ unsigned long ignored;
+ unsigned long failed;
+ unsigned long delayed;
+ unsigned long recovered;
+};
+#endif
+
/*
* On NUMA machines, each NUMA node would have a pg_data_t to describe
* it's memory layout. On UMA machines there is a single pglist_data which
@@ -775,7 +1394,7 @@ typedef struct pglist_data {
struct zonelist node_zonelists[MAX_ZONELISTS];
int nr_zones; /* number of populated zones in this node */
-#ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */
+#ifdef CONFIG_FLATMEM /* means !SPARSEMEM */
struct page *node_mem_map;
#ifdef CONFIG_PAGE_EXTENSION
struct page_ext *node_page_ext;
@@ -803,18 +1422,28 @@ typedef struct pglist_data {
int node_id;
wait_queue_head_t kswapd_wait;
wait_queue_head_t pfmemalloc_wait;
- struct task_struct *kswapd; /* Protected by
- mem_hotplug_begin/end() */
+
+ /* workqueues for throttling reclaim for different reasons. */
+ wait_queue_head_t reclaim_wait[NR_VMSCAN_THROTTLE];
+
+ atomic_t nr_writeback_throttled;/* nr of writeback-throttled tasks */
+ unsigned long nr_reclaim_start; /* nr pages written while throttled
+ * when throttling started. */
+#ifdef CONFIG_MEMORY_HOTPLUG
+ struct mutex kswapd_lock;
+#endif
+ struct task_struct *kswapd; /* Protected by kswapd_lock */
int kswapd_order;
enum zone_type kswapd_highest_zoneidx;
- int kswapd_failures; /* Number of 'reclaimed == 0' runs */
+ atomic_t kswapd_failures; /* Number of 'reclaimed == 0' runs */
#ifdef CONFIG_COMPACTION
int kcompactd_max_order;
enum zone_type kcompactd_highest_zoneidx;
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
+ bool proactive_compact_trigger;
#endif
/*
* This is a per-node reserve of pages that are not available
@@ -831,7 +1460,7 @@ typedef struct pglist_data {
#endif /* CONFIG_NUMA */
/* Write-intensive fields used by page reclaim */
- ZONE_PADDING(_pad1_)
+ CACHELINE_PADDING(_pad1_);
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
/*
@@ -845,6 +1474,21 @@ typedef struct pglist_data {
struct deferred_split deferred_split_queue;
#endif
+#ifdef CONFIG_NUMA_BALANCING
+ /* start time in ms of current promote rate limit period */
+ unsigned int nbp_rl_start;
+ /* number of promote candidate pages at start time of current rate limit period */
+ unsigned long nbp_rl_nr_cand;
+ /* promote threshold in ms */
+ unsigned int nbp_threshold;
+ /* start time in ms of current promote threshold adjustment period */
+ unsigned int nbp_th_start;
+ /*
+ * number of promote candidate pages at start time of current promote
+ * threshold adjustment period
+ */
+ unsigned long nbp_th_nr_cand;
+#endif
/* Fields commonly accessed by the page reclaim scanner */
/*
@@ -856,21 +1500,28 @@ typedef struct pglist_data {
unsigned long flags;
- ZONE_PADDING(_pad2_)
+#ifdef CONFIG_LRU_GEN
+ /* kswap mm walk data */
+ struct lru_gen_mm_walk mm_walk;
+ /* lru_gen_folio list */
+ struct lru_gen_memcg memcg_lru;
+#endif
+
+ CACHELINE_PADDING(_pad2_);
/* Per-node vmstats */
struct per_cpu_nodestat __percpu *per_cpu_nodestats;
atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS];
+#ifdef CONFIG_NUMA
+ struct memory_tier __rcu *memtier;
+#endif
+#ifdef CONFIG_MEMORY_FAILURE
+ struct memory_failure_stats mf_stats;
+#endif
} pg_data_t;
#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
-#ifdef CONFIG_FLAT_NODE_MEM_MAP
-#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
-#else
-#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
-#endif
-#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
@@ -880,11 +1531,6 @@ static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
return pgdat->node_start_pfn + pgdat->node_spanned_pages;
}
-static inline bool pgdat_is_empty(pg_data_t *pgdat)
-{
- return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
-}
-
#include <linux/memory_hotplug.h>
void build_all_zonelists(pg_data_t *pgdat);
@@ -896,8 +1542,6 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
bool zone_watermark_ok(struct zone *z, unsigned int order,
unsigned long mark, int highest_zoneidx,
unsigned int alloc_flags);
-bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
- unsigned long mark, int highest_zoneidx);
/*
* Memory initialization context, use to differentiate memory added by
* the platform statically or via memory hotplug interface.
@@ -933,12 +1577,12 @@ static inline int local_memory_node(int node_id) { return node_id; };
#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
#ifdef CONFIG_ZONE_DEVICE
-static inline bool zone_is_zone_device(struct zone *zone)
+static inline bool zone_is_zone_device(const struct zone *zone)
{
return zone_idx(zone) == ZONE_DEVICE;
}
#else
-static inline bool zone_is_zone_device(struct zone *zone)
+static inline bool zone_is_zone_device(const struct zone *zone)
{
return false;
}
@@ -950,19 +1594,19 @@ static inline bool zone_is_zone_device(struct zone *zone)
* populated_zone(). If the whole zone is reserved then we can easily
* end up with populated_zone() && !managed_zone().
*/
-static inline bool managed_zone(struct zone *zone)
+static inline bool managed_zone(const struct zone *zone)
{
return zone_managed_pages(zone);
}
/* Returns true if a zone has memory */
-static inline bool populated_zone(struct zone *zone)
+static inline bool populated_zone(const struct zone *zone)
{
return zone->present_pages;
}
#ifdef CONFIG_NUMA
-static inline int zone_to_nid(struct zone *zone)
+static inline int zone_to_nid(const struct zone *zone)
{
return zone->node;
}
@@ -972,7 +1616,7 @@ static inline void zone_set_nid(struct zone *zone, int nid)
zone->node = nid;
}
#else
-static inline int zone_to_nid(struct zone *zone)
+static inline int zone_to_nid(const struct zone *zone)
{
return 0;
}
@@ -982,22 +1626,11 @@ static inline void zone_set_nid(struct zone *zone, int nid) {}
extern int movable_zone;
-#ifdef CONFIG_HIGHMEM
-static inline int zone_movable_is_highmem(void)
-{
-#ifdef CONFIG_NEED_MULTIPLE_NODES
- return movable_zone == ZONE_HIGHMEM;
-#else
- return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
-#endif
-}
-#endif
-
static inline int is_highmem_idx(enum zone_type idx)
{
#ifdef CONFIG_HIGHMEM
return (idx == ZONE_HIGHMEM ||
- (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
+ (idx == ZONE_MOVABLE && movable_zone == ZONE_HIGHMEM));
#else
return 0;
#endif
@@ -1010,48 +1643,34 @@ static inline int is_highmem_idx(enum zone_type idx)
* @zone: pointer to struct zone variable
* Return: 1 for a highmem zone, 0 otherwise
*/
-static inline int is_highmem(struct zone *zone)
+static inline int is_highmem(const struct zone *zone)
{
-#ifdef CONFIG_HIGHMEM
return is_highmem_idx(zone_idx(zone));
-#else
- return 0;
-#endif
}
-/* These two functions are used to setup the per zone pages min values */
-struct ctl_table;
+#ifdef CONFIG_ZONE_DMA
+bool has_managed_dma(void);
+#else
+static inline bool has_managed_dma(void)
+{
+ return false;
+}
+#endif
-int min_free_kbytes_sysctl_handler(struct ctl_table *, int, void *, size_t *,
- loff_t *);
-int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, void *,
- size_t *, loff_t *);
-extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES];
-int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, void *,
- size_t *, loff_t *);
-int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
- void *, size_t *, loff_t *);
-int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
- void *, size_t *, loff_t *);
-int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
- void *, size_t *, loff_t *);
-int numa_zonelist_order_handler(struct ctl_table *, int,
- void *, size_t *, loff_t *);
-extern int percpu_pagelist_fraction;
-extern char numa_zonelist_order[];
-#define NUMA_ZONELIST_ORDER_LEN 16
-#ifndef CONFIG_NEED_MULTIPLE_NODES
+#ifndef CONFIG_NUMA
extern struct pglist_data contig_page_data;
-#define NODE_DATA(nid) (&contig_page_data)
-#define NODE_MEM_MAP(nid) mem_map
+static inline struct pglist_data *NODE_DATA(int nid)
+{
+ return &contig_page_data;
+}
-#else /* CONFIG_NEED_MULTIPLE_NODES */
+#else /* CONFIG_NUMA */
#include <asm/mmzone.h>
-#endif /* !CONFIG_NEED_MULTIPLE_NODES */
+#endif /* !CONFIG_NUMA */
extern struct pglist_data *first_online_pgdat(void);
extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
@@ -1090,12 +1709,12 @@ static inline struct zone *zonelist_zone(struct zoneref *zoneref)
return zoneref->zone;
}
-static inline int zonelist_zone_idx(struct zoneref *zoneref)
+static inline int zonelist_zone_idx(const struct zoneref *zoneref)
{
return zoneref->zone_idx;
}
-static inline int zonelist_node_idx(struct zoneref *zoneref)
+static inline int zonelist_node_idx(const struct zoneref *zoneref)
{
return zone_to_nid(zoneref->zone);
}
@@ -1171,7 +1790,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
zone = zonelist_zone(z))
#define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \
- for (zone = z->zone; \
+ for (zone = zonelist_zone(z); \
zone; \
z = next_zones_zonelist(++z, highidx, nodemask), \
zone = zonelist_zone(z))
@@ -1189,6 +1808,28 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
#define for_each_zone_zonelist(zone, z, zlist, highidx) \
for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
+/* Whether the 'nodes' are all movable nodes */
+static inline bool movable_only_nodes(nodemask_t *nodes)
+{
+ struct zonelist *zonelist;
+ struct zoneref *z;
+ int nid;
+
+ if (nodes_empty(*nodes))
+ return false;
+
+ /*
+ * We can chose arbitrary node from the nodemask to get a
+ * zonelist as they are interlinked. We just need to find
+ * at least one zone that can satisfy kernel allocations.
+ */
+ nid = first_node(*nodes);
+ zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
+ z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes);
+ return (!zonelist_zone(z)) ? true : false;
+}
+
+
#ifdef CONFIG_SPARSEMEM
#include <asm/sparsemem.h>
#endif
@@ -1200,8 +1841,6 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
#ifdef CONFIG_SPARSEMEM
/*
- * SECTION_SHIFT #bits space required to store a section #
- *
* PA_SECTION_SHIFT physical address to/from section number
* PFN_SECTION_SHIFT pfn to/from section number
*/
@@ -1216,8 +1855,8 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
#define SECTION_BLOCKFLAGS_BITS \
((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
-#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
-#error Allocator MAX_ORDER exceeds SECTION_SIZE
+#if (MAX_PAGE_ORDER + PAGE_SHIFT) > SECTION_SIZE_BITS
+#error Allocator MAX_PAGE_ORDER exceeds SECTION_SIZE
#endif
static inline unsigned long pfn_to_section_nr(unsigned long pfn)
@@ -1249,6 +1888,7 @@ static inline unsigned long section_nr_to_pfn(unsigned long sec)
#define SUBSECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SUBSECTION_MASK)
struct mem_section_usage {
+ struct rcu_head rcu;
#ifdef CONFIG_SPARSEMEM_VMEMMAP
DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION);
#endif
@@ -1313,15 +1953,17 @@ static inline unsigned long *section_to_usemap(struct mem_section *ms)
static inline struct mem_section *__nr_to_section(unsigned long nr)
{
+ unsigned long root = SECTION_NR_TO_ROOT(nr);
+
+ if (unlikely(root >= NR_SECTION_ROOTS))
+ return NULL;
+
#ifdef CONFIG_SPARSEMEM_EXTREME
- if (!mem_section)
+ if (!mem_section || !mem_section[root])
return NULL;
#endif
- if (!mem_section[SECTION_NR_TO_ROOT(nr)])
- return NULL;
- return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
+ return &mem_section[root][nr & SECTION_ROOT_MASK];
}
-extern unsigned long __section_nr(struct mem_section *ms);
extern size_t mem_section_usage_size(void);
/*
@@ -1335,16 +1977,38 @@ extern size_t mem_section_usage_size(void);
* (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the
* worst combination is powerpc with 256k pages,
* which results in PFN_SECTION_SHIFT equal 6.
- * To sum it up, at least 6 bits are available.
+ * To sum it up, at least 6 bits are available on all architectures.
+ * However, we can exceed 6 bits on some other architectures except
+ * powerpc (e.g. 15 bits are available on x86_64, 13 bits are available
+ * with the worst case of 64K pages on arm64) if we make sure the
+ * exceeded bit is not applicable to powerpc.
*/
-#define SECTION_MARKED_PRESENT (1UL<<0)
-#define SECTION_HAS_MEM_MAP (1UL<<1)
-#define SECTION_IS_ONLINE (1UL<<2)
-#define SECTION_IS_EARLY (1UL<<3)
-#define SECTION_TAINT_ZONE_DEVICE (1UL<<4)
-#define SECTION_MAP_LAST_BIT (1UL<<5)
-#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
-#define SECTION_NID_SHIFT 3
+enum {
+ SECTION_MARKED_PRESENT_BIT,
+ SECTION_HAS_MEM_MAP_BIT,
+ SECTION_IS_ONLINE_BIT,
+ SECTION_IS_EARLY_BIT,
+#ifdef CONFIG_ZONE_DEVICE
+ SECTION_TAINT_ZONE_DEVICE_BIT,
+#endif
+#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT
+ SECTION_IS_VMEMMAP_PREINIT_BIT,
+#endif
+ SECTION_MAP_LAST_BIT,
+};
+
+#define SECTION_MARKED_PRESENT BIT(SECTION_MARKED_PRESENT_BIT)
+#define SECTION_HAS_MEM_MAP BIT(SECTION_HAS_MEM_MAP_BIT)
+#define SECTION_IS_ONLINE BIT(SECTION_IS_ONLINE_BIT)
+#define SECTION_IS_EARLY BIT(SECTION_IS_EARLY_BIT)
+#ifdef CONFIG_ZONE_DEVICE
+#define SECTION_TAINT_ZONE_DEVICE BIT(SECTION_TAINT_ZONE_DEVICE_BIT)
+#endif
+#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT
+#define SECTION_IS_VMEMMAP_PREINIT BIT(SECTION_IS_VMEMMAP_PREINIT_BIT)
+#endif
+#define SECTION_MAP_MASK (~(BIT(SECTION_MAP_LAST_BIT) - 1))
+#define SECTION_NID_SHIFT SECTION_MAP_LAST_BIT
static inline struct page *__section_mem_map_addr(struct mem_section *section)
{
@@ -1353,7 +2017,7 @@ static inline struct page *__section_mem_map_addr(struct mem_section *section)
return (struct page *)map;
}
-static inline int present_section(struct mem_section *section)
+static inline int present_section(const struct mem_section *section)
{
return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
}
@@ -1363,12 +2027,12 @@ static inline int present_section_nr(unsigned long nr)
return present_section(__nr_to_section(nr));
}
-static inline int valid_section(struct mem_section *section)
+static inline int valid_section(const struct mem_section *section)
{
return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
}
-static inline int early_section(struct mem_section *section)
+static inline int early_section(const struct mem_section *section)
{
return (section && (section->section_mem_map & SECTION_IS_EARLY));
}
@@ -1378,17 +2042,48 @@ static inline int valid_section_nr(unsigned long nr)
return valid_section(__nr_to_section(nr));
}
-static inline int online_section(struct mem_section *section)
+static inline int online_section(const struct mem_section *section)
{
return (section && (section->section_mem_map & SECTION_IS_ONLINE));
}
-static inline int online_device_section(struct mem_section *section)
+#ifdef CONFIG_ZONE_DEVICE
+static inline int online_device_section(const struct mem_section *section)
{
unsigned long flags = SECTION_IS_ONLINE | SECTION_TAINT_ZONE_DEVICE;
return section && ((section->section_mem_map & flags) == flags);
}
+#else
+static inline int online_device_section(const struct mem_section *section)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT
+static inline int preinited_vmemmap_section(const struct mem_section *section)
+{
+ return (section &&
+ (section->section_mem_map & SECTION_IS_VMEMMAP_PREINIT));
+}
+
+void sparse_vmemmap_init_nid_early(int nid);
+void sparse_vmemmap_init_nid_late(int nid);
+
+#else
+static inline int preinited_vmemmap_section(const struct mem_section *section)
+{
+ return 0;
+}
+static inline void sparse_vmemmap_init_nid_early(int nid)
+{
+}
+
+static inline void sparse_vmemmap_init_nid_late(int nid)
+{
+}
+#endif
static inline int online_section_nr(unsigned long nr)
{
@@ -1416,39 +2111,148 @@ static inline int subsection_map_index(unsigned long pfn)
static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
{
int idx = subsection_map_index(pfn);
+ struct mem_section_usage *usage = READ_ONCE(ms->usage);
- return test_bit(idx, ms->usage->subsection_map);
+ return usage ? test_bit(idx, usage->subsection_map) : 0;
+}
+
+static inline bool pfn_section_first_valid(struct mem_section *ms, unsigned long *pfn)
+{
+ struct mem_section_usage *usage = READ_ONCE(ms->usage);
+ int idx = subsection_map_index(*pfn);
+ unsigned long bit;
+
+ if (!usage)
+ return false;
+
+ if (test_bit(idx, usage->subsection_map))
+ return true;
+
+ /* Find the next subsection that exists */
+ bit = find_next_bit(usage->subsection_map, SUBSECTIONS_PER_SECTION, idx);
+ if (bit == SUBSECTIONS_PER_SECTION)
+ return false;
+
+ *pfn = (*pfn & PAGE_SECTION_MASK) + (bit * PAGES_PER_SUBSECTION);
+ return true;
}
#else
static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
{
return 1;
}
+
+static inline bool pfn_section_first_valid(struct mem_section *ms, unsigned long *pfn)
+{
+ return true;
+}
#endif
+void sparse_init_early_section(int nid, struct page *map, unsigned long pnum,
+ unsigned long flags);
+
#ifndef CONFIG_HAVE_ARCH_PFN_VALID
+/**
+ * pfn_valid - check if there is a valid memory map entry for a PFN
+ * @pfn: the page frame number to check
+ *
+ * Check if there is a valid memory map entry aka struct page for the @pfn.
+ * Note, that availability of the memory map entry does not imply that
+ * there is actual usable memory at that @pfn. The struct page may
+ * represent a hole or an unusable page frame.
+ *
+ * Return: 1 for PFNs that have memory map entries and 0 otherwise
+ */
static inline int pfn_valid(unsigned long pfn)
{
struct mem_section *ms;
+ int ret;
+
+ /*
+ * Ensure the upper PAGE_SHIFT bits are clear in the
+ * pfn. Else it might lead to false positives when
+ * some of the upper bits are set, but the lower bits
+ * match a valid pfn.
+ */
+ if (PHYS_PFN(PFN_PHYS(pfn)) != pfn)
+ return 0;
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0;
- ms = __nr_to_section(pfn_to_section_nr(pfn));
- if (!valid_section(ms))
+ ms = __pfn_to_section(pfn);
+ rcu_read_lock_sched();
+ if (!valid_section(ms)) {
+ rcu_read_unlock_sched();
return 0;
+ }
/*
* Traditionally early sections always returned pfn_valid() for
* the entire section-sized span.
*/
- return early_section(ms) || pfn_section_valid(ms, pfn);
+ ret = early_section(ms) || pfn_section_valid(ms, pfn);
+ rcu_read_unlock_sched();
+
+ return ret;
}
+
+/* Returns end_pfn or higher if no valid PFN remaining in range */
+static inline unsigned long first_valid_pfn(unsigned long pfn, unsigned long end_pfn)
+{
+ unsigned long nr = pfn_to_section_nr(pfn);
+
+ rcu_read_lock_sched();
+
+ while (nr <= __highest_present_section_nr && pfn < end_pfn) {
+ struct mem_section *ms = __pfn_to_section(pfn);
+
+ if (valid_section(ms) &&
+ (early_section(ms) || pfn_section_first_valid(ms, &pfn))) {
+ rcu_read_unlock_sched();
+ return pfn;
+ }
+
+ /* Nothing left in this section? Skip to next section */
+ nr++;
+ pfn = section_nr_to_pfn(nr);
+ }
+
+ rcu_read_unlock_sched();
+ return end_pfn;
+}
+
+static inline unsigned long next_valid_pfn(unsigned long pfn, unsigned long end_pfn)
+{
+ pfn++;
+
+ if (pfn >= end_pfn)
+ return end_pfn;
+
+ /*
+ * Either every PFN within the section (or subsection for VMEMMAP) is
+ * valid, or none of them are. So there's no point repeating the check
+ * for every PFN; only call first_valid_pfn() again when crossing a
+ * (sub)section boundary (i.e. !(pfn & ~PAGE_{SUB,}SECTION_MASK)).
+ */
+ if (pfn & ~(IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP) ?
+ PAGE_SUBSECTION_MASK : PAGE_SECTION_MASK))
+ return pfn;
+
+ return first_valid_pfn(pfn, end_pfn);
+}
+
+
+#define for_each_valid_pfn(_pfn, _start_pfn, _end_pfn) \
+ for ((_pfn) = first_valid_pfn((_start_pfn), (_end_pfn)); \
+ (_pfn) < (_end_pfn); \
+ (_pfn) = next_valid_pfn((_pfn), (_end_pfn)))
+
#endif
static inline int pfn_in_present_section(unsigned long pfn)
{
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0;
- return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
+ return present_section(__pfn_to_section(pfn));
}
static inline unsigned long next_present_section_nr(unsigned long section_nr)
@@ -1461,6 +2265,11 @@ static inline unsigned long next_present_section_nr(unsigned long section_nr)
return -1;
}
+#define for_each_present_section_nr(start, section_nr) \
+ for (section_nr = next_present_section_nr(start - 1); \
+ section_nr != -1; \
+ section_nr = next_present_section_nr(section_nr))
+
/*
* These are _only_ used during initialisation, therefore they
* can use __initdata ... They could have names to indicate
@@ -1480,20 +2289,20 @@ void sparse_init(void);
#else
#define sparse_init() do {} while (0)
#define sparse_index_init(_sec, _nid) do {} while (0)
+#define sparse_vmemmap_init_nid_early(_nid, _use) do {} while (0)
+#define sparse_vmemmap_init_nid_late(_nid) do {} while (0)
#define pfn_in_present_section pfn_valid
#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
#endif /* CONFIG_SPARSEMEM */
/*
- * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
- * need to check pfn validity within that MAX_ORDER_NR_PAGES block.
- * pfn_valid_within() should be used in this case; we optimise this away
- * when we have no holes within a MAX_ORDER_NR_PAGES block.
+ * Fallback case for when the architecture provides its own pfn_valid() but
+ * not a corresponding for_each_valid_pfn().
*/
-#ifdef CONFIG_HOLES_IN_ZONE
-#define pfn_valid_within(pfn) pfn_valid(pfn)
-#else
-#define pfn_valid_within(pfn) (1)
+#ifndef for_each_valid_pfn
+#define for_each_valid_pfn(_pfn, _start_pfn, _end_pfn) \
+ for ((_pfn) = (_start_pfn); (_pfn) < (_end_pfn); (_pfn)++) \
+ if (pfn_valid(_pfn))
#endif
#endif /* !__GENERATING_BOUNDS.H */
diff --git a/include/linux/mnt_idmapping.h b/include/linux/mnt_idmapping.h
new file mode 100644
index 000000000000..e71a6070a8f8
--- /dev/null
+++ b/include/linux/mnt_idmapping.h
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MNT_IDMAPPING_H
+#define _LINUX_MNT_IDMAPPING_H
+
+#include <linux/types.h>
+#include <linux/uidgid.h>
+
+struct mnt_idmap;
+struct user_namespace;
+
+extern struct mnt_idmap nop_mnt_idmap;
+extern struct mnt_idmap invalid_mnt_idmap;
+extern struct user_namespace init_user_ns;
+
+typedef struct {
+ uid_t val;
+} vfsuid_t;
+
+typedef struct {
+ gid_t val;
+} vfsgid_t;
+
+static_assert(sizeof(vfsuid_t) == sizeof(kuid_t));
+static_assert(sizeof(vfsgid_t) == sizeof(kgid_t));
+static_assert(offsetof(vfsuid_t, val) == offsetof(kuid_t, val));
+static_assert(offsetof(vfsgid_t, val) == offsetof(kgid_t, val));
+
+static inline bool is_valid_mnt_idmap(const struct mnt_idmap *idmap)
+{
+ return idmap != &nop_mnt_idmap && idmap != &invalid_mnt_idmap;
+}
+
+#ifdef CONFIG_MULTIUSER
+static inline uid_t __vfsuid_val(vfsuid_t uid)
+{
+ return uid.val;
+}
+
+static inline gid_t __vfsgid_val(vfsgid_t gid)
+{
+ return gid.val;
+}
+#else
+static inline uid_t __vfsuid_val(vfsuid_t uid)
+{
+ return 0;
+}
+
+static inline gid_t __vfsgid_val(vfsgid_t gid)
+{
+ return 0;
+}
+#endif
+
+static inline bool vfsuid_valid(vfsuid_t uid)
+{
+ return __vfsuid_val(uid) != (uid_t)-1;
+}
+
+static inline bool vfsgid_valid(vfsgid_t gid)
+{
+ return __vfsgid_val(gid) != (gid_t)-1;
+}
+
+static inline bool vfsuid_eq(vfsuid_t left, vfsuid_t right)
+{
+ return vfsuid_valid(left) && __vfsuid_val(left) == __vfsuid_val(right);
+}
+
+static inline bool vfsgid_eq(vfsgid_t left, vfsgid_t right)
+{
+ return vfsgid_valid(left) && __vfsgid_val(left) == __vfsgid_val(right);
+}
+
+/**
+ * vfsuid_eq_kuid - check whether kuid and vfsuid have the same value
+ * @vfsuid: the vfsuid to compare
+ * @kuid: the kuid to compare
+ *
+ * Check whether @vfsuid and @kuid have the same values.
+ *
+ * Return: true if @vfsuid and @kuid have the same value, false if not.
+ * Comparison between two invalid uids returns false.
+ */
+static inline bool vfsuid_eq_kuid(vfsuid_t vfsuid, kuid_t kuid)
+{
+ return vfsuid_valid(vfsuid) && __vfsuid_val(vfsuid) == __kuid_val(kuid);
+}
+
+/**
+ * vfsgid_eq_kgid - check whether kgid and vfsgid have the same value
+ * @vfsgid: the vfsgid to compare
+ * @kgid: the kgid to compare
+ *
+ * Check whether @vfsgid and @kgid have the same values.
+ *
+ * Return: true if @vfsgid and @kgid have the same value, false if not.
+ * Comparison between two invalid gids returns false.
+ */
+static inline bool vfsgid_eq_kgid(vfsgid_t vfsgid, kgid_t kgid)
+{
+ return vfsgid_valid(vfsgid) && __vfsgid_val(vfsgid) == __kgid_val(kgid);
+}
+
+/*
+ * vfs{g,u}ids are created from k{g,u}ids.
+ * We don't allow them to be created from regular {u,g}id.
+ */
+#define VFSUIDT_INIT(val) (vfsuid_t){ __kuid_val(val) }
+#define VFSGIDT_INIT(val) (vfsgid_t){ __kgid_val(val) }
+
+#define INVALID_VFSUID VFSUIDT_INIT(INVALID_UID)
+#define INVALID_VFSGID VFSGIDT_INIT(INVALID_GID)
+
+/*
+ * Allow a vfs{g,u}id to be used as a k{g,u}id where we want to compare
+ * whether the mapped value is identical to value of a k{g,u}id.
+ */
+#define AS_KUIDT(val) (kuid_t){ __vfsuid_val(val) }
+#define AS_KGIDT(val) (kgid_t){ __vfsgid_val(val) }
+
+int vfsgid_in_group_p(vfsgid_t vfsgid);
+
+struct mnt_idmap *mnt_idmap_get(struct mnt_idmap *idmap);
+void mnt_idmap_put(struct mnt_idmap *idmap);
+
+vfsuid_t make_vfsuid(struct mnt_idmap *idmap,
+ struct user_namespace *fs_userns, kuid_t kuid);
+
+vfsgid_t make_vfsgid(struct mnt_idmap *idmap,
+ struct user_namespace *fs_userns, kgid_t kgid);
+
+kuid_t from_vfsuid(struct mnt_idmap *idmap,
+ struct user_namespace *fs_userns, vfsuid_t vfsuid);
+
+kgid_t from_vfsgid(struct mnt_idmap *idmap,
+ struct user_namespace *fs_userns, vfsgid_t vfsgid);
+
+/**
+ * vfsuid_has_fsmapping - check whether a vfsuid maps into the filesystem
+ * @idmap: the mount's idmapping
+ * @fs_userns: the filesystem's idmapping
+ * @vfsuid: vfsuid to be mapped
+ *
+ * Check whether @vfsuid has a mapping in the filesystem idmapping. Use this
+ * function to check whether the filesystem idmapping has a mapping for
+ * @vfsuid.
+ *
+ * Return: true if @vfsuid has a mapping in the filesystem, false if not.
+ */
+static inline bool vfsuid_has_fsmapping(struct mnt_idmap *idmap,
+ struct user_namespace *fs_userns,
+ vfsuid_t vfsuid)
+{
+ return uid_valid(from_vfsuid(idmap, fs_userns, vfsuid));
+}
+
+static inline bool vfsuid_has_mapping(struct user_namespace *userns,
+ vfsuid_t vfsuid)
+{
+ return from_kuid(userns, AS_KUIDT(vfsuid)) != (uid_t)-1;
+}
+
+/**
+ * vfsuid_into_kuid - convert vfsuid into kuid
+ * @vfsuid: the vfsuid to convert
+ *
+ * This can be used when a vfsuid is committed as a kuid.
+ *
+ * Return: a kuid with the value of @vfsuid
+ */
+static inline kuid_t vfsuid_into_kuid(vfsuid_t vfsuid)
+{
+ return AS_KUIDT(vfsuid);
+}
+
+/**
+ * vfsgid_has_fsmapping - check whether a vfsgid maps into the filesystem
+ * @idmap: the mount's idmapping
+ * @fs_userns: the filesystem's idmapping
+ * @vfsgid: vfsgid to be mapped
+ *
+ * Check whether @vfsgid has a mapping in the filesystem idmapping. Use this
+ * function to check whether the filesystem idmapping has a mapping for
+ * @vfsgid.
+ *
+ * Return: true if @vfsgid has a mapping in the filesystem, false if not.
+ */
+static inline bool vfsgid_has_fsmapping(struct mnt_idmap *idmap,
+ struct user_namespace *fs_userns,
+ vfsgid_t vfsgid)
+{
+ return gid_valid(from_vfsgid(idmap, fs_userns, vfsgid));
+}
+
+static inline bool vfsgid_has_mapping(struct user_namespace *userns,
+ vfsgid_t vfsgid)
+{
+ return from_kgid(userns, AS_KGIDT(vfsgid)) != (gid_t)-1;
+}
+
+/**
+ * vfsgid_into_kgid - convert vfsgid into kgid
+ * @vfsgid: the vfsgid to convert
+ *
+ * This can be used when a vfsgid is committed as a kgid.
+ *
+ * Return: a kgid with the value of @vfsgid
+ */
+static inline kgid_t vfsgid_into_kgid(vfsgid_t vfsgid)
+{
+ return AS_KGIDT(vfsgid);
+}
+
+/**
+ * mapped_fsuid - return caller's fsuid mapped according to an idmapping
+ * @idmap: the mount's idmapping
+ * @fs_userns: the filesystem's idmapping
+ *
+ * Use this helper to initialize a new vfs or filesystem object based on
+ * the caller's fsuid. A common example is initializing the i_uid field of
+ * a newly allocated inode triggered by a creation event such as mkdir or
+ * O_CREAT. Other examples include the allocation of quotas for a specific
+ * user.
+ *
+ * Return: the caller's current fsuid mapped up according to @idmap.
+ */
+static inline kuid_t mapped_fsuid(struct mnt_idmap *idmap,
+ struct user_namespace *fs_userns)
+{
+ return from_vfsuid(idmap, fs_userns, VFSUIDT_INIT(current_fsuid()));
+}
+
+/**
+ * mapped_fsgid - return caller's fsgid mapped according to an idmapping
+ * @idmap: the mount's idmapping
+ * @fs_userns: the filesystem's idmapping
+ *
+ * Use this helper to initialize a new vfs or filesystem object based on
+ * the caller's fsgid. A common example is initializing the i_gid field of
+ * a newly allocated inode triggered by a creation event such as mkdir or
+ * O_CREAT. Other examples include the allocation of quotas for a specific
+ * user.
+ *
+ * Return: the caller's current fsgid mapped up according to @idmap.
+ */
+static inline kgid_t mapped_fsgid(struct mnt_idmap *idmap,
+ struct user_namespace *fs_userns)
+{
+ return from_vfsgid(idmap, fs_userns, VFSGIDT_INIT(current_fsgid()));
+}
+
+#endif /* _LINUX_MNT_IDMAPPING_H */
diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h
index 8f882f5881e8..0acd1089d149 100644
--- a/include/linux/mnt_namespace.h
+++ b/include/linux/mnt_namespace.h
@@ -3,14 +3,20 @@
#define _NAMESPACE_H_
#ifdef __KERNEL__
+#include <linux/cleanup.h>
+#include <linux/err.h>
+
struct mnt_namespace;
struct fs_struct;
struct user_namespace;
struct ns_common;
-extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *,
+extern struct mnt_namespace init_mnt_ns;
+
+extern struct mnt_namespace *copy_mnt_ns(u64, struct mnt_namespace *,
struct user_namespace *, struct fs_struct *);
extern void put_mnt_ns(struct mnt_namespace *ns);
+DEFINE_FREE(put_mnt_ns, struct mnt_namespace *, if (!IS_ERR_OR_NULL(_T)) put_mnt_ns(_T))
extern struct ns_common *from_mnt_ns(struct mnt_namespace *);
extern const struct file_operations proc_mounts_operations;
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 7d45b5f989b0..24eb5a88a5c5 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -9,6 +9,7 @@
#define LINUX_MOD_DEVICETABLE_H
#ifdef __KERNEL__
+#include <linux/mei.h>
#include <linux/types.h>
#include <linux/uuid.h>
typedef unsigned long kernel_ulong_t;
@@ -16,6 +17,10 @@ typedef unsigned long kernel_ulong_t;
#define PCI_ANY_ID (~0)
+enum {
+ PCI_ID_F_VFIO_DRIVER_OVERRIDE = 1,
+};
+
/**
* struct pci_device_id - PCI device ID structure
* @vendor: Vendor ID to match (or PCI_ANY_ID)
@@ -34,12 +39,14 @@ typedef unsigned long kernel_ulong_t;
* Best practice is to use driver_data as an index
* into a static list of equivalent device types,
* instead of using it as a pointer.
+ * @override_only: Match only when dev->driver_override is this driver.
*/
struct pci_device_id {
__u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
__u32 subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */
__u32 class, class_mask; /* (class,subclass,prog-if) triplet */
kernel_ulong_t driver_data; /* Data private to the driver */
+ __u32 override_only;
};
@@ -205,7 +212,7 @@ struct css_device_id {
kernel_ulong_t driver_data;
};
-#define ACPI_ID_LEN 9
+#define ACPI_ID_LEN 16
struct acpi_device_id {
__u8 id[ACPI_ID_LEN];
@@ -214,6 +221,19 @@ struct acpi_device_id {
__u32 cls_msk;
};
+/**
+ * ACPI_DEVICE_CLASS - macro used to describe an ACPI device with
+ * the PCI-defined class-code information
+ *
+ * @_cls : the class, subclass, prog-if triple for this device
+ * @_msk : the class mask for this device
+ *
+ * This macro is used to create a struct acpi_device_id that matches a
+ * specific PCI class. The .id and .driver_data fields will be left
+ * initialized with the default value.
+ */
+#define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (_cls), .cls_msk = (_msk),
+
#define PNP_ID_LEN 8
#define PNP_MAX_DEVICES 8
@@ -320,7 +340,7 @@ struct pcmcia_device_id {
#define INPUT_DEVICE_ID_LED_MAX 0x0f
#define INPUT_DEVICE_ID_SND_MAX 0x07
#define INPUT_DEVICE_ID_FF_MAX 0x7f
-#define INPUT_DEVICE_ID_SW_MAX 0x10
+#define INPUT_DEVICE_ID_SW_MAX 0x11
#define INPUT_DEVICE_ID_PROP_MAX 0x1f
#define INPUT_DEVICE_ID_MATCH_BUS 1
@@ -447,6 +467,7 @@ struct hv_vmbus_device_id {
struct rpmsg_device_id {
char name[RPMSG_NAME_SIZE];
+ kernel_ulong_t driver_data;
};
/* i2c */
@@ -580,7 +601,7 @@ struct dmi_system_id {
#define DMI_MATCH(a, b) { .slot = a, .substr = b }
#define DMI_EXACT_MATCH(a, b) { .slot = a, .substr = b, .exact_match = 1 }
-#define PLATFORM_NAME_SIZE 20
+#define PLATFORM_NAME_SIZE 24
#define PLATFORM_MODULE_PREFIX "platform:"
struct platform_device_id {
@@ -669,6 +690,9 @@ struct x86_cpu_id {
__u16 model;
__u16 steppings;
__u16 feature; /* bit index */
+ /* Solely for kernel-internal use: DO NOT EXPORT to userspace! */
+ __u16 flags;
+ __u8 type;
kernel_ulong_t driver_data;
};
@@ -677,7 +701,10 @@ struct x86_cpu_id {
#define X86_FAMILY_ANY 0
#define X86_MODEL_ANY 0
#define X86_STEPPING_ANY 0
+#define X86_STEP_MIN 0
+#define X86_STEP_MAX 0xf
#define X86_FEATURE_ANY 0 /* Same as FPU, you can't test for that */
+#define X86_CPU_TYPE_ANY 0
/*
* Generic table type for matching CPU features.
@@ -828,6 +855,8 @@ struct wmi_device_id {
#define MHI_DEVICE_MODALIAS_FMT "mhi:%s"
#define MHI_NAME_SIZE 32
+#define MHI_EP_DEVICE_MODALIAS_FMT "mhi_ep:%s"
+
/**
* struct mhi_device_id - MHI device identification
* @chan: MHI channel name
@@ -838,7 +867,7 @@ struct mhi_device_id {
kernel_ulong_t driver_data;
};
-#define AUXILIARY_NAME_SIZE 32
+#define AUXILIARY_NAME_SIZE 40
#define AUXILIARY_MODULE_PREFIX "auxiliary:"
struct auxiliary_device_id {
@@ -888,4 +917,63 @@ struct dfl_device_id {
kernel_ulong_t driver_data;
};
+/* ISHTP (Integrated Sensor Hub Transport Protocol) */
+
+#define ISHTP_MODULE_PREFIX "ishtp:"
+
+/**
+ * struct ishtp_device_id - ISHTP device identifier
+ * @guid: GUID of the device.
+ * @driver_data: pointer to driver specific data
+ */
+struct ishtp_device_id {
+ guid_t guid;
+ kernel_ulong_t driver_data;
+};
+
+#define CDX_ANY_ID (0xFFFF)
+
+enum {
+ CDX_ID_F_VFIO_DRIVER_OVERRIDE = 1,
+};
+
+/**
+ * struct cdx_device_id - CDX device identifier
+ * @vendor: Vendor ID
+ * @device: Device ID
+ * @subvendor: Subsystem vendor ID (or CDX_ANY_ID)
+ * @subdevice: Subsystem device ID (or CDX_ANY_ID)
+ * @class: Device class
+ * Most drivers do not need to specify class/class_mask
+ * as vendor/device is normally sufficient.
+ * @class_mask: Limit which sub-fields of the class field are compared.
+ * @override_only: Match only when dev->driver_override is this driver.
+ *
+ * Type of entries in the "device Id" table for CDX devices supported by
+ * a CDX device driver.
+ */
+struct cdx_device_id {
+ __u16 vendor;
+ __u16 device;
+ __u16 subvendor;
+ __u16 subdevice;
+ __u32 class;
+ __u32 class_mask;
+ __u32 override_only;
+};
+
+struct vchiq_device_id {
+ char name[32];
+};
+
+/**
+ * struct coreboot_device_id - Identifies a coreboot table entry
+ * @tag: tag ID
+ * @driver_data: driver specific data
+ */
+struct coreboot_device_id {
+ __u32 tag;
+ kernel_ulong_t driver_data;
+};
+
#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/linux/module.h b/include/linux/module.h
index 8100bb477d86..d80c3ea57472 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -11,8 +11,10 @@
#include <linux/list.h>
#include <linux/stat.h>
+#include <linux/buildid.h>
#include <linux/compiler.h>
#include <linux/cache.h>
+#include <linux/cleanup.h>
#include <linux/kmod.h>
#include <linux/init.h>
#include <linux/elf.h>
@@ -26,12 +28,12 @@
#include <linux/tracepoint-defs.h>
#include <linux/srcu.h>
#include <linux/static_call_types.h>
-#include <linux/cfi.h>
+#include <linux/dynamic_debug.h>
#include <linux/percpu.h>
#include <asm/module.h>
-#define MODULE_NAME_LEN MAX_PARAM_PREFIX_LEN
+#define MODULE_NAME_LEN __MODULE_NAME_LEN
struct modversion_info {
unsigned long crc;
@@ -51,9 +53,9 @@ struct module_kobject {
struct module_attribute {
struct attribute attr;
- ssize_t (*show)(struct module_attribute *, struct module_kobject *,
+ ssize_t (*show)(const struct module_attribute *, struct module_kobject *,
char *);
- ssize_t (*store)(struct module_attribute *, struct module_kobject *,
+ ssize_t (*store)(const struct module_attribute *, struct module_kobject *,
const char *, size_t count);
void (*setup)(struct module *, const char *);
int (*test)(struct module *);
@@ -66,10 +68,10 @@ struct module_version_attribute {
const char *version;
};
-extern ssize_t __modver_version_show(struct module_attribute *,
+extern ssize_t __modver_version_show(const struct module_attribute *,
struct module_kobject *, char *);
-extern struct module_attribute module_uevent;
+extern const struct module_attribute module_uevent;
/* These are either module local, or the kernel's dummy ones. */
extern int init_module(void);
@@ -131,7 +133,7 @@ extern void cleanup_module(void);
{ return initfn; } \
int init_module(void) __copy(initfn) \
__attribute__((alias(#initfn))); \
- __CFI_ADDRESSABLE(init_module, __initdata);
+ ___ADDRESSABLE(init_module, __initdata);
/* This is only required if you want to be unloadable. */
#define module_exit(exitfn) \
@@ -139,7 +141,7 @@ extern void cleanup_module(void);
{ return exitfn; } \
void cleanup_module(void) __copy(exitfn) \
__attribute__((alias(#exitfn))); \
- __CFI_ADDRESSABLE(cleanup_module, __exitdata);
+ ___ADDRESSABLE(cleanup_module, __exitdata);
#endif
@@ -161,8 +163,7 @@ extern void cleanup_module(void);
#define __INITRODATA_OR_MODULE __INITRODATA
#endif /*CONFIG_MODULES*/
-/* Generic info of form tag = "info" */
-#define MODULE_INFO(tag, info) __MODULE_INFO(tag, tag, info)
+struct module_kobject *lookup_or_create_module_kobject(const char *name);
/* For userspace: you can also call me... */
#define MODULE_ALIAS(_alias) MODULE_INFO(alias, _alias)
@@ -173,6 +174,12 @@ extern void cleanup_module(void);
#define MODULE_SOFTDEP(_softdep) MODULE_INFO(softdep, _softdep)
/*
+ * Weak module dependencies. See man modprobe.d for details.
+ * Example: MODULE_WEAKDEP("module-foo")
+ */
+#define MODULE_WEAKDEP(_weakdep) MODULE_INFO(weakdep, _weakdep)
+
+/*
* MODULE_FILE is used for generating modules.builtin
* So, make it no-op when this is being built as a module
*/
@@ -237,14 +244,23 @@ extern void cleanup_module(void);
/* What your module does. */
#define MODULE_DESCRIPTION(_description) MODULE_INFO(description, _description)
-#ifdef MODULE
+/*
+ * Format: __mod_device_table__kmod_<modname>__<type>__<name>
+ * Parts of the string `__kmod_` and `__` are used as delimiters when parsing
+ * a symbol in file2alias.c
+ */
+#define __mod_device_table(type, name) \
+ __PASTE(__mod_device_table__, \
+ __PASTE(kmod_, \
+ __PASTE(__KBUILD_MODNAME, \
+ __PASTE(__, \
+ __PASTE(type, \
+ __PASTE(__, name))))))
+
/* Creates an alias so file2alias.c can find device table. */
#define MODULE_DEVICE_TABLE(type, name) \
-extern typeof(name) __mod_##type##__##name##_device_table \
- __attribute__ ((unused, alias(__stringify(name))))
-#else /* !MODULE */
-#define MODULE_DEVICE_TABLE(type, name)
-#endif
+static typeof(name) __mod_device_table(type, name) \
+ __attribute__ ((used, alias(__stringify(name))))
/* Version of form [<epoch>:]<version>[-<extra-version>].
* Or for CVS/RCS ID version, everything but the number is stripped.
@@ -268,7 +284,7 @@ extern typeof(name) __mod_##type##__##name##_device_table \
#else
#define MODULE_VERSION(_version) \
MODULE_INFO(version, _version); \
- static struct module_version_attribute __modver_attr \
+ static const struct module_version_attribute __modver_attr \
__used __section("__modver") \
__aligned(__alignof__(struct module_version_attribute)) \
= { \
@@ -289,25 +305,10 @@ extern typeof(name) __mod_##type##__##name##_device_table \
* files require multiple MODULE_FIRMWARE() specifiers */
#define MODULE_FIRMWARE(_firmware) MODULE_INFO(firmware, _firmware)
-#define MODULE_IMPORT_NS(ns) MODULE_INFO(import_ns, #ns)
+#define MODULE_IMPORT_NS(ns) MODULE_INFO(import_ns, ns)
struct notifier_block;
-#ifdef CONFIG_MODULES
-
-extern int modules_disabled; /* for sysctl */
-/* Get/put a kernel symbol (calls must be symmetric) */
-void *__symbol_get(const char *symbol);
-void *__symbol_get_gpl(const char *symbol);
-#define symbol_get(x) ((typeof(&x))(__symbol_get(__stringify(x))))
-
-/* modules using other modules: kdb wants to see this. */
-struct module_use {
- struct list_head source_list;
- struct list_head target_list;
- struct module *source, *target;
-};
-
enum module_state {
MODULE_STATE_LIVE, /* Normal state. */
MODULE_STATE_COMING, /* Full formed, running module_init. */
@@ -320,17 +321,48 @@ struct mod_tree_node {
struct latch_tree_node node;
};
-struct module_layout {
- /* The actual code + data. */
+enum mod_mem_type {
+ MOD_TEXT = 0,
+ MOD_DATA,
+ MOD_RODATA,
+ MOD_RO_AFTER_INIT,
+ MOD_INIT_TEXT,
+ MOD_INIT_DATA,
+ MOD_INIT_RODATA,
+
+ MOD_MEM_NUM_TYPES,
+ MOD_INVALID = -1,
+};
+
+#define mod_mem_type_is_init(type) \
+ ((type) == MOD_INIT_TEXT || \
+ (type) == MOD_INIT_DATA || \
+ (type) == MOD_INIT_RODATA)
+
+#define mod_mem_type_is_core(type) (!mod_mem_type_is_init(type))
+
+#define mod_mem_type_is_text(type) \
+ ((type) == MOD_TEXT || \
+ (type) == MOD_INIT_TEXT)
+
+#define mod_mem_type_is_data(type) (!mod_mem_type_is_text(type))
+
+#define mod_mem_type_is_core_data(type) \
+ (mod_mem_type_is_core(type) && \
+ mod_mem_type_is_data(type))
+
+#define for_each_mod_mem_type(type) \
+ for (enum mod_mem_type (type) = 0; \
+ (type) < MOD_MEM_NUM_TYPES; (type)++)
+
+#define for_class_mod_mem_type(type, class) \
+ for_each_mod_mem_type(type) \
+ if (mod_mem_type_is_##class(type))
+
+struct module_memory {
void *base;
- /* Total size. */
+ bool is_rox;
unsigned int size;
- /* The size of the executable code. */
- unsigned int text_size;
- /* Size of RO section of the module (text+rodata) */
- unsigned int ro_size;
- /* Size of RO after init section */
- unsigned int ro_after_init_size;
#ifdef CONFIG_MODULES_TREE_LOOKUP
struct mod_tree_node mtn;
@@ -339,9 +371,9 @@ struct module_layout {
#ifdef CONFIG_MODULES_TREE_LOOKUP
/* Only touch one cacheline for common rbtree-for-core-layout case. */
-#define __module_layout_align ____cacheline_aligned
+#define __module_memory_align ____cacheline_aligned
#else
-#define __module_layout_align
+#define __module_memory_align
#endif
struct mod_kallsyms {
@@ -352,6 +384,14 @@ struct mod_kallsyms {
};
#ifdef CONFIG_LIVEPATCH
+/**
+ * struct klp_modinfo - ELF information preserved from the livepatch module
+ *
+ * @hdr: ELF header
+ * @sechdrs: Section header table
+ * @secstrings: String table for the section headers
+ * @symndx: The symbol table section index
+ */
struct klp_modinfo {
Elf_Ehdr hdr;
Elf_Shdr *sechdrs;
@@ -369,6 +409,11 @@ struct module {
/* Unique handle for this module */
char name[MODULE_NAME_LEN];
+#ifdef CONFIG_STACKTRACE_BUILD_ID
+ /* Module build ID */
+ unsigned char build_id[BUILD_ID_SIZE_MAX];
+#endif
+
/* Sysfs stuff. */
struct module_kobject mkobj;
struct module_attribute *modinfo_attrs;
@@ -378,11 +423,12 @@ struct module {
/* Exported symbols */
const struct kernel_symbol *syms;
- const s32 *crcs;
+ const u32 *crcs;
unsigned int num_syms;
-#ifdef CONFIG_CFI_CLANG
- cfi_check_fn cfi_check;
+#ifdef CONFIG_ARCH_USES_CFI_TRAPS
+ s32 *kcfi_traps;
+ s32 *kcfi_traps_end;
#endif
/* Kernel parameters. */
@@ -395,7 +441,7 @@ struct module {
/* GPL-only exported symbols. */
unsigned int num_gpl_syms;
const struct kernel_symbol *gpl_syms;
- const s32 *gpl_crcs;
+ const u32 *gpl_crcs;
bool using_gplonly_symbols;
#ifdef CONFIG_MODULE_SIG
@@ -412,9 +458,7 @@ struct module {
/* Startup function. */
int (*init)(void);
- /* Core layout: rbtree is accessed frequently, so keep together. */
- struct module_layout core_layout __module_layout_align;
- struct module_layout init_layout;
+ struct module_memory mem[MOD_MEM_NUM_TYPES] __module_memory_align;
/* Arch-specific module values */
struct mod_arch_specific arch;
@@ -466,7 +510,9 @@ struct module {
#endif
#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
unsigned int btf_data_size;
+ unsigned int btf_base_data_size;
void *btf_data;
+ void *btf_base_data;
#endif
#ifdef CONFIG_JUMP_LABEL
struct jump_entry *jump_entries;
@@ -482,7 +528,7 @@ struct module {
struct trace_eval_map **trace_evals;
unsigned int num_trace_evals;
#endif
-#ifdef CONFIG_FTRACE_MCOUNT_RECORD
+#ifdef CONFIG_DYNAMIC_FTRACE
unsigned int num_ftrace_callsites;
unsigned long *ftrace_callsites;
#endif
@@ -496,15 +542,27 @@ struct module {
int num_static_call_sites;
struct static_call_site *static_call_sites;
#endif
+#if IS_ENABLED(CONFIG_KUNIT)
+ int num_kunit_init_suites;
+ struct kunit_suite **kunit_init_suites;
+ int num_kunit_suites;
+ struct kunit_suite **kunit_suites;
+#endif
+
#ifdef CONFIG_LIVEPATCH
bool klp; /* Is this a livepatch module? */
bool klp_alive;
- /* Elf information */
+ /* ELF information */
struct klp_modinfo *klp_info;
#endif
+#ifdef CONFIG_PRINTK_INDEX
+ unsigned int printk_index_size;
+ struct pi_entry **printk_index_start;
+#endif
+
#ifdef CONFIG_MODULE_UNLOAD
/* What modules depend on me? */
struct list_head source_list;
@@ -527,11 +585,24 @@ struct module {
struct error_injection_entry *ei_funcs;
unsigned int num_ei_funcs;
#endif
+#ifdef CONFIG_DYNAMIC_DEBUG_CORE
+ struct _ddebug_info dyndbg_info;
+#endif
} ____cacheline_aligned __randomize_layout;
#ifndef MODULE_ARCH_INIT
#define MODULE_ARCH_INIT {}
#endif
+#ifdef CONFIG_MODULES
+
+/* Get/put a kernel symbol (calls must be symmetric) */
+void *__symbol_get(const char *symbol);
+void *__symbol_get_gpl(const char *symbol);
+#define symbol_get(x) ({ \
+ static const char __notrim[] \
+ __used __section(".no_trim_symbol") = __stringify(x); \
+ (typeof(&x))(__symbol_get(__stringify(x))); })
+
#ifndef HAVE_ARCH_KALLSYMS_SYMBOL_VALUE
static inline unsigned long kallsyms_symbol_value(const Elf_Sym *sym)
{
@@ -547,6 +618,11 @@ static inline bool module_is_live(struct module *mod)
return mod->state != MODULE_STATE_GOING;
}
+static inline bool module_is_coming(struct module *mod)
+{
+ return mod->state == MODULE_STATE_COMING;
+}
+
struct module *__module_text_address(unsigned long addr);
struct module *__module_address(unsigned long addr);
bool is_module_address(unsigned long addr);
@@ -554,18 +630,35 @@ bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr);
bool is_module_percpu_address(unsigned long addr);
bool is_module_text_address(unsigned long addr);
+static inline bool within_module_mem_type(unsigned long addr,
+ const struct module *mod,
+ enum mod_mem_type type)
+{
+ unsigned long base, size;
+
+ base = (unsigned long)mod->mem[type].base;
+ size = mod->mem[type].size;
+ return addr - base < size;
+}
+
static inline bool within_module_core(unsigned long addr,
const struct module *mod)
{
- return (unsigned long)mod->core_layout.base <= addr &&
- addr < (unsigned long)mod->core_layout.base + mod->core_layout.size;
+ for_class_mod_mem_type(type, core) {
+ if (within_module_mem_type(addr, mod, type))
+ return true;
+ }
+ return false;
}
static inline bool within_module_init(unsigned long addr,
const struct module *mod)
{
- return (unsigned long)mod->init_layout.base <= addr &&
- addr < (unsigned long)mod->init_layout.base + mod->init_layout.size;
+ for_class_mod_mem_type(type, init) {
+ if (within_module_mem_type(addr, mod, type))
+ return true;
+ }
+ return false;
}
static inline bool within_module(unsigned long addr, const struct module *mod)
@@ -573,20 +666,12 @@ static inline bool within_module(unsigned long addr, const struct module *mod)
return within_module_init(addr, mod) || within_module_core(addr, mod);
}
-/* Search for module by name: must be in a RCU-sched critical section. */
+/* Search for module by name: must be in a RCU critical section. */
struct module *find_module(const char *name);
-/* Returns 0 and fills in value, defined and namebuf, or -ERANGE if
- symnum out of range. */
-int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
- char *name, char *module_name, int *exported);
-
-/* Look for this name: can be of form module:name. */
-unsigned long module_kallsyms_lookup_name(const char *name);
-
-extern void __noreturn __module_put_and_exit(struct module *mod,
+extern void __noreturn __module_put_and_kthread_exit(struct module *mod,
long code);
-#define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code)
+#define module_put_and_kthread_exit(code) __module_put_and_kthread_exit(THIS_MODULE, code)
#ifdef CONFIG_MODULE_UNLOAD
int module_refcount(struct module *mod);
@@ -598,10 +683,46 @@ void symbol_put_addr(void *addr);
to handle the error case (which only happens with rmmod --wait). */
extern void __module_get(struct module *module);
-/* This is the Right Way to get a module: if it fails, it's being removed,
- * so pretend it's not there. */
+/**
+ * try_module_get() - take module refcount unless module is being removed
+ * @module: the module we should check for
+ *
+ * Only try to get a module reference count if the module is not being removed.
+ * This call will fail if the module is in the process of being removed.
+ *
+ * Care must also be taken to ensure the module exists and is alive prior to
+ * usage of this call. This can be gauranteed through two means:
+ *
+ * 1) Direct protection: you know an earlier caller must have increased the
+ * module reference through __module_get(). This can typically be achieved
+ * by having another entity other than the module itself increment the
+ * module reference count.
+ *
+ * 2) Implied protection: there is an implied protection against module
+ * removal. An example of this is the implied protection used by kernfs /
+ * sysfs. The sysfs store / read file operations are guaranteed to exist
+ * through the use of kernfs's active reference (see kernfs_active()) and a
+ * sysfs / kernfs file removal cannot happen unless the same file is not
+ * active. Therefore, if a sysfs file is being read or written to the module
+ * which created it must still exist. It is therefore safe to use
+ * try_module_get() on module sysfs store / read ops.
+ *
+ * One of the real values to try_module_get() is the module_is_live() check
+ * which ensures that the caller of try_module_get() can yield to userspace
+ * module removal requests and gracefully fail if the module is on its way out.
+ *
+ * Returns true if the reference count was successfully incremented.
+ */
extern bool try_module_get(struct module *module);
+/**
+ * module_put() - release a reference count to a module
+ * @module: the module we should release a reference count for
+ *
+ * If you successfully bump a reference count to a module with try_module_get(),
+ * when you are finished you must call module_put() to release that reference
+ * count.
+ */
extern void module_put(struct module *module);
#else /*!CONFIG_MODULE_UNLOAD*/
@@ -630,17 +751,6 @@ static inline void __module_get(struct module *module)
/* Dereference module function descriptor */
void *dereference_module_function_descriptor(struct module *mod, void *ptr);
-/* For kallsyms to ask for address resolution. namebuf should be at
- * least KSYM_NAME_LEN long: a pointer to namebuf is returned if
- * found, otherwise NULL. */
-const char *module_address_lookup(unsigned long addr,
- unsigned long *symbolsize,
- unsigned long *offset,
- char **modname,
- char *namebuf);
-int lookup_module_symbol_name(unsigned long addr, char *symname);
-int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
-
int register_module_notifier(struct notifier_block *nb);
int unregister_module_notifier(struct notifier_block *nb);
@@ -651,21 +761,19 @@ static inline bool module_requested_async_probing(struct module *module)
return module && module->async_probe_requested;
}
-#ifdef CONFIG_LIVEPATCH
static inline bool is_livepatch_module(struct module *mod)
{
+#ifdef CONFIG_LIVEPATCH
return mod->klp;
-}
-#else /* !CONFIG_LIVEPATCH */
-static inline bool is_livepatch_module(struct module *mod)
-{
+#else
return false;
+#endif
}
-#endif /* CONFIG_LIVEPATCH */
-bool is_module_sig_enforced(void);
void set_module_sig_enforced(void);
+void module_for_each_mod(int(*func)(struct module *mod, void *data), void *data);
+
#else /* !CONFIG_MODULES... */
static inline struct module *__module_address(unsigned long addr)
@@ -735,38 +843,6 @@ static inline void module_put(struct module *module)
#define module_name(mod) "kernel"
-/* For kallsyms to ask for address resolution. NULL means not found. */
-static inline const char *module_address_lookup(unsigned long addr,
- unsigned long *symbolsize,
- unsigned long *offset,
- char **modname,
- char *namebuf)
-{
- return NULL;
-}
-
-static inline int lookup_module_symbol_name(unsigned long addr, char *symname)
-{
- return -ERANGE;
-}
-
-static inline int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name)
-{
- return -ERANGE;
-}
-
-static inline int module_get_kallsym(unsigned int symnum, unsigned long *value,
- char *type, char *name,
- char *module_name, int *exported)
-{
- return -ERANGE;
-}
-
-static inline unsigned long module_kallsyms_lookup_name(const char *name)
-{
- return 0;
-}
-
static inline int register_module_notifier(struct notifier_block *nb)
{
/* no events will happen anyway, so this can always succeed */
@@ -778,7 +854,7 @@ static inline int unregister_module_notifier(struct notifier_block *nb)
return 0;
}
-#define module_put_and_exit(code) do_exit(code)
+#define module_put_and_kthread_exit(code) kthread_exit(code)
static inline void print_modules(void)
{
@@ -789,10 +865,6 @@ static inline bool module_requested_async_probing(struct module *module)
return false;
}
-static inline bool is_module_sig_enforced(void)
-{
- return false;
-}
static inline void set_module_sig_enforced(void)
{
@@ -805,12 +877,19 @@ void *dereference_module_function_descriptor(struct module *mod, void *ptr)
return ptr;
}
+static inline bool module_is_coming(struct module *mod)
+{
+ return false;
+}
+
+static inline void module_for_each_mod(int(*func)(struct module *mod, void *data), void *data)
+{
+}
#endif /* CONFIG_MODULES */
#ifdef CONFIG_SYSFS
extern struct kset *module_kset;
-extern struct kobj_type module_ktype;
-extern int module_sysfs_initialized;
+extern const struct kobj_type module_ktype;
#endif /* CONFIG_SYSFS */
#define symbol_request(x) try_then_request_module(symbol_get(x), "symbol:" #x)
@@ -834,7 +913,7 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr,
static inline void module_bug_cleanup(struct module *mod) {}
#endif /* CONFIG_GENERIC_BUG */
-#ifdef CONFIG_RETPOLINE
+#ifdef CONFIG_MITIGATION_RETPOLINE
extern bool retpoline_module_ok(bool has_retpoline);
#else
static inline bool retpoline_module_ok(bool has_retpoline)
@@ -844,19 +923,102 @@ static inline bool retpoline_module_ok(bool has_retpoline)
#endif
#ifdef CONFIG_MODULE_SIG
+bool is_module_sig_enforced(void);
+
static inline bool module_sig_ok(struct module *module)
{
return module->sig_ok;
}
#else /* !CONFIG_MODULE_SIG */
+static inline bool is_module_sig_enforced(void)
+{
+ return false;
+}
+
static inline bool module_sig_ok(struct module *module)
{
return true;
}
#endif /* CONFIG_MODULE_SIG */
-int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
- struct module *, unsigned long),
+#if defined(CONFIG_MODULES) && defined(CONFIG_KALLSYMS)
+int module_kallsyms_on_each_symbol(const char *modname,
+ int (*fn)(void *, const char *, unsigned long),
void *data);
+/* For kallsyms to ask for address resolution. namebuf should be at
+ * least KSYM_NAME_LEN long: a pointer to namebuf is returned if
+ * found, otherwise NULL.
+ */
+int module_address_lookup(unsigned long addr,
+ unsigned long *symbolsize,
+ unsigned long *offset,
+ char **modname, const unsigned char **modbuildid,
+ char *namebuf);
+int lookup_module_symbol_name(unsigned long addr, char *symname);
+int lookup_module_symbol_attrs(unsigned long addr,
+ unsigned long *size,
+ unsigned long *offset,
+ char *modname,
+ char *name);
+
+/* Returns 0 and fills in value, defined and namebuf, or -ERANGE if
+ * symnum out of range.
+ */
+int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
+ char *name, char *module_name, int *exported);
+
+/* Look for this name: can be of form module:name. */
+unsigned long module_kallsyms_lookup_name(const char *name);
+
+unsigned long find_kallsyms_symbol_value(struct module *mod, const char *name);
+
+#else /* CONFIG_MODULES && CONFIG_KALLSYMS */
+
+static inline int module_kallsyms_on_each_symbol(const char *modname,
+ int (*fn)(void *, const char *, unsigned long),
+ void *data)
+{
+ return -EOPNOTSUPP;
+}
+
+/* For kallsyms to ask for address resolution. NULL means not found. */
+static inline int module_address_lookup(unsigned long addr,
+ unsigned long *symbolsize,
+ unsigned long *offset,
+ char **modname,
+ const unsigned char **modbuildid,
+ char *namebuf)
+{
+ return 0;
+}
+
+static inline int lookup_module_symbol_name(unsigned long addr, char *symname)
+{
+ return -ERANGE;
+}
+
+static inline int module_get_kallsym(unsigned int symnum, unsigned long *value,
+ char *type, char *name,
+ char *module_name, int *exported)
+{
+ return -ERANGE;
+}
+
+static inline unsigned long module_kallsyms_lookup_name(const char *name)
+{
+ return 0;
+}
+
+static inline unsigned long find_kallsyms_symbol_value(struct module *mod,
+ const char *name)
+{
+ return 0;
+}
+
+#endif /* CONFIG_MODULES && CONFIG_KALLSYMS */
+
+/* Define __free(module_put) macro for struct module *. */
+DEFINE_FREE(module_put, struct module *, if (_T) module_put(_T))
+
#endif /* _LINUX_MODULE_H */
diff --git a/include/linux/module_symbol.h b/include/linux/module_symbol.h
new file mode 100644
index 000000000000..77c9895b9ddb
--- /dev/null
+++ b/include/linux/module_symbol.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _LINUX_MODULE_SYMBOL_H
+#define _LINUX_MODULE_SYMBOL_H
+
+/* This ignores the intensely annoying "mapping symbols" found in ELF files. */
+static inline bool is_mapping_symbol(const char *str)
+{
+ if (str[0] == '.' && str[1] == 'L')
+ return true;
+ if (str[0] == 'L' && str[1] == '0')
+ return true;
+ return str[0] == '$';
+}
+
+#endif /* _LINUX_MODULE_SYMBOL_H */
diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
index 9e09d11ffe5b..e395461d59e5 100644
--- a/include/linux/moduleloader.h
+++ b/include/linux/moduleloader.h
@@ -13,6 +13,9 @@
* must be implemented by each architecture.
*/
+/* arch may override to do additional checking of ELF header architecture */
+bool module_elf_check_arch(Elf_Ehdr *hdr);
+
/* Adjust arch-specific sections. Return 0 on success. */
int module_frob_arch_sections(Elf_Ehdr *hdr,
Elf_Shdr *sechdrs,
@@ -22,13 +25,6 @@ int module_frob_arch_sections(Elf_Ehdr *hdr,
/* Additional bytes needed by arch in front of individual sections */
unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
-/* Allocator used for allocating struct module, core sections and init
- sections. Returns NULL on failure. */
-void *module_alloc(unsigned long size);
-
-/* Free memory returned from module_alloc. */
-void module_memfree(void *module_region);
-
/* Determines if the section name is an init section (that is only used during
* module loading).
*/
@@ -39,6 +35,11 @@ bool module_init_section(const char *name);
*/
bool module_exit_section(const char *name);
+/* Describes whether within_module_init() will consider this an init section
+ * or not. This behaviour changes with CONFIG_MODULE_UNLOAD.
+ */
+bool module_init_layout_section(const char *sname);
+
/*
* Apply the given relocation to the (simplified) ELF. Return -error
* or 0.
@@ -72,6 +73,23 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
unsigned int symindex,
unsigned int relsec,
struct module *mod);
+#ifdef CONFIG_LIVEPATCH
+/*
+ * Some architectures (namely x86_64 and ppc64) perform sanity checks when
+ * applying relocations. If a patched module gets unloaded and then later
+ * reloaded (and re-patched), klp re-applies relocations to the replacement
+ * function(s). Any leftover relocations from the previous loading of the
+ * patched module might trigger the sanity checks.
+ *
+ * To prevent that, when unloading a patched module, clear out any relocations
+ * that might trigger arch-specific sanity checks on a future module reload.
+ */
+void clear_relocate_add(Elf_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *me);
+#endif
#else
static inline int apply_relocate_add(Elf_Shdr *sechdrs,
const char *strtab,
@@ -90,18 +108,18 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *mod);
+#ifdef CONFIG_MODULES
+void flush_module_init_free_work(void);
+#else
+static inline void flush_module_init_free_work(void)
+{
+}
+#endif
+
/* Any cleanup needed when module leaves. */
void module_arch_cleanup(struct module *mod);
/* Any cleanup before freeing mod->module_init */
void module_arch_freeing_init(struct module *mod);
-#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
- !defined(CONFIG_KASAN_VMALLOC)
-#include <linux/kasan.h>
-#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
-#else
-#define MODULE_ALIGN PAGE_SIZE
-#endif
-
#endif
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index eed280fae433..915f32f7d888 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -6,6 +6,13 @@
#include <linux/stringify.h>
#include <linux/kernel.h>
+/*
+ * The maximum module name length, including the NUL byte.
+ * Chosen so that structs with an unsigned long line up, specifically
+ * modversion_info.
+ */
+#define __MODULE_NAME_LEN (64 - sizeof(unsigned long))
+
/* You can override this manually, but generally this should match the
module name. */
#ifdef MODULE
@@ -17,21 +24,22 @@
#define __MODULE_INFO_PREFIX KBUILD_MODNAME "."
#endif
-/* Chosen so that structs with an unsigned long line up. */
-#define MAX_PARAM_PREFIX_LEN (64 - sizeof(unsigned long))
-
-#define __MODULE_INFO(tag, name, info) \
- static const char __UNIQUE_ID(name)[] \
+/* Generic info of form tag = "info" */
+#define MODULE_INFO(tag, info) \
+ static_assert( \
+ sizeof(info) - 1 == __builtin_strlen(info), \
+ "MODULE_INFO(" #tag ", ...) contains embedded NUL byte"); \
+ static const char __UNIQUE_ID(modinfo)[] \
__used __section(".modinfo") __aligned(1) \
= __MODULE_INFO_PREFIX __stringify(tag) "=" info
#define __MODULE_PARM_TYPE(name, _type) \
- __MODULE_INFO(parmtype, name##type, #name ":" _type)
+ MODULE_INFO(parmtype, #name ":" _type)
/* One for each parameter, describing how to use it. Some files do
multiple of these per line, so can't just use MODULE_INFO. */
#define MODULE_PARM_DESC(_parm, desc) \
- __MODULE_INFO(parm, _parm, #_parm ":" desc)
+ MODULE_INFO(parm, #_parm ":" desc)
struct kernel_param;
@@ -276,16 +284,15 @@ struct kparam_array
read-only sections (which is part of respective UNIX ABI on these
platforms). So 'const' makes no sense and even causes compile failures
with some compilers. */
-#if defined(CONFIG_ALPHA) || defined(CONFIG_IA64) || defined(CONFIG_PPC64)
+#if defined(CONFIG_ALPHA) || defined(CONFIG_PPC64)
#define __moduleparam_const
#else
#define __moduleparam_const const
#endif
-/* This is the fundamental function for registering boot/module
- parameters. */
+/* This is the fundamental function for registering boot/module parameters. */
#define __module_param_call(prefix, name, ops, arg, perm, level, flags) \
- /* Default value instead of permissions? */ \
+ static_assert(sizeof(""prefix) - 1 <= __MODULE_NAME_LEN); \
static const char __param_str_##name[] = prefix #name; \
static struct kernel_param __moduleparam_const __param_##name \
__used __section("__param") \
@@ -293,7 +300,11 @@ struct kparam_array
= { __param_str_##name, THIS_MODULE, ops, \
VERIFY_OCTAL_PERMISSIONS(perm), level, flags, { arg } }
-/* Obsolete - use module_param_cb() */
+/*
+ * Useful for describing a set/get pair used only once (i.e. for this
+ * parameter). For repeated set/get pairs (i.e. the same struct
+ * kernel_param_ops), use module_param_cb() instead.
+ */
#define module_param_call(name, _set, _get, arg, perm) \
static const struct kernel_param_ops __param_ops_##name = \
{ .flags = 0, .set = _set, .get = _get }; \
@@ -341,6 +352,19 @@ static inline void kernel_param_unlock(struct module *mod)
__module_param_call("", name, &param_ops_##type, &var, perm, \
-1, KERNEL_PARAM_FL_UNSAFE)
+/**
+ * __core_param_cb - similar like core_param, with a set/get ops instead of type.
+ * @name: the name of the cmdline and sysfs parameter (often the same as var)
+ * @var: the variable
+ * @ops: the set & get operations for this parameter.
+ * @perm: visibility in sysfs
+ *
+ * Ideally this should be called 'core_param_cb', but the name has been
+ * used for module core parameter, so add the '__' prefix
+ */
+#define __core_param_cb(name, ops, arg, perm) \
+ __module_param_call("", name, ops, arg, perm, -1, 0)
+
#endif /* !MODULE */
/**
@@ -381,6 +405,8 @@ extern bool parameq(const char *name1, const char *name2);
*/
extern bool parameqn(const char *name1, const char *name2, size_t n);
+typedef int (*parse_unknown_fn)(char *param, char *val, const char *doing, void *arg);
+
/* Called on module insert or kernel boot */
extern char *parse_args(const char *name,
char *args,
@@ -388,9 +414,7 @@ extern char *parse_args(const char *name,
unsigned num,
s16 level_min,
s16 level_max,
- void *arg,
- int (*unknown)(char *param, char *val,
- const char *doing, void *arg));
+ void *arg, parse_unknown_fn unknown);
/* Called by module remove. */
#ifdef CONFIG_SYSFS
@@ -431,6 +455,8 @@ extern int param_get_int(char *buffer, const struct kernel_param *kp);
extern const struct kernel_param_ops param_ops_uint;
extern int param_set_uint(const char *val, const struct kernel_param *kp);
extern int param_get_uint(char *buffer, const struct kernel_param *kp);
+int param_set_uint_minmax(const char *val, const struct kernel_param *kp,
+ unsigned int min, unsigned int max);
#define param_check_uint(name, p) __param_check(name, p, unsigned int)
extern const struct kernel_param_ops param_ops_long;
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 5d92a7e1a742..acfe7ef86a1b 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -11,113 +11,102 @@
#define _LINUX_MOUNT_H
#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/nodemask.h>
-#include <linux/spinlock.h>
-#include <linux/seqlock.h>
-#include <linux/atomic.h>
+#include <asm/barrier.h>
struct super_block;
-struct vfsmount;
struct dentry;
-struct mnt_namespace;
+struct user_namespace;
+struct mnt_idmap;
+struct file_system_type;
struct fs_context;
+struct file;
+struct path;
-#define MNT_NOSUID 0x01
-#define MNT_NODEV 0x02
-#define MNT_NOEXEC 0x04
-#define MNT_NOATIME 0x08
-#define MNT_NODIRATIME 0x10
-#define MNT_RELATIME 0x20
-#define MNT_READONLY 0x40 /* does the user want this to be r/o? */
-#define MNT_NOSYMFOLLOW 0x80
-
-#define MNT_SHRINKABLE 0x100
-#define MNT_WRITE_HOLD 0x200
-
-#define MNT_SHARED 0x1000 /* if the vfsmount is a shared mount */
-#define MNT_UNBINDABLE 0x2000 /* if the vfsmount is a unbindable mount */
-/*
- * MNT_SHARED_MASK is the set of flags that should be cleared when a
- * mount becomes shared. Currently, this is only the flag that says a
- * mount cannot be bind mounted, since this is how we create a mount
- * that shares events with another mount. If you add a new MNT_*
- * flag, consider how it interacts with shared mounts.
- */
-#define MNT_SHARED_MASK (MNT_UNBINDABLE)
-#define MNT_USER_SETTABLE_MASK (MNT_NOSUID | MNT_NODEV | MNT_NOEXEC \
- | MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME \
- | MNT_READONLY | MNT_NOSYMFOLLOW)
-#define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME )
-
-#define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \
- MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED | \
- MNT_CURSOR)
-
-#define MNT_INTERNAL 0x4000
-
-#define MNT_LOCK_ATIME 0x040000
-#define MNT_LOCK_NOEXEC 0x080000
-#define MNT_LOCK_NOSUID 0x100000
-#define MNT_LOCK_NODEV 0x200000
-#define MNT_LOCK_READONLY 0x400000
-#define MNT_LOCKED 0x800000
-#define MNT_DOOMED 0x1000000
-#define MNT_SYNC_UMOUNT 0x2000000
-#define MNT_MARKED 0x4000000
-#define MNT_UMOUNT 0x8000000
-#define MNT_CURSOR 0x10000000
+enum mount_flags {
+ MNT_NOSUID = 0x01,
+ MNT_NODEV = 0x02,
+ MNT_NOEXEC = 0x04,
+ MNT_NOATIME = 0x08,
+ MNT_NODIRATIME = 0x10,
+ MNT_RELATIME = 0x20,
+ MNT_READONLY = 0x40, /* does the user want this to be r/o? */
+ MNT_NOSYMFOLLOW = 0x80,
+
+ MNT_SHRINKABLE = 0x100,
+
+ MNT_INTERNAL = 0x4000,
+
+ MNT_LOCK_ATIME = 0x040000,
+ MNT_LOCK_NOEXEC = 0x080000,
+ MNT_LOCK_NOSUID = 0x100000,
+ MNT_LOCK_NODEV = 0x200000,
+ MNT_LOCK_READONLY = 0x400000,
+ MNT_LOCKED = 0x800000,
+ MNT_DOOMED = 0x1000000,
+ MNT_SYNC_UMOUNT = 0x2000000,
+ MNT_UMOUNT = 0x8000000,
+
+ MNT_USER_SETTABLE_MASK = MNT_NOSUID | MNT_NODEV | MNT_NOEXEC
+ | MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME
+ | MNT_READONLY | MNT_NOSYMFOLLOW,
+ MNT_ATIME_MASK = MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME,
+
+ MNT_INTERNAL_FLAGS = MNT_INTERNAL | MNT_DOOMED |
+ MNT_SYNC_UMOUNT | MNT_LOCKED
+};
struct vfsmount {
struct dentry *mnt_root; /* root of the mounted tree */
struct super_block *mnt_sb; /* pointer to superblock */
int mnt_flags;
- struct user_namespace *mnt_userns;
+ struct mnt_idmap *mnt_idmap;
} __randomize_layout;
-static inline struct user_namespace *mnt_user_ns(const struct vfsmount *mnt)
+static inline struct mnt_idmap *mnt_idmap(const struct vfsmount *mnt)
{
/* Pairs with smp_store_release() in do_idmap_mount(). */
- return smp_load_acquire(&mnt->mnt_userns);
+ return READ_ONCE(mnt->mnt_idmap);
}
-struct file; /* forward dec */
-struct path;
-
extern int mnt_want_write(struct vfsmount *mnt);
extern int mnt_want_write_file(struct file *file);
extern void mnt_drop_write(struct vfsmount *mnt);
extern void mnt_drop_write_file(struct file *file);
extern void mntput(struct vfsmount *mnt);
extern struct vfsmount *mntget(struct vfsmount *mnt);
+extern void mnt_make_shortterm(struct vfsmount *mnt);
extern struct vfsmount *mnt_clone_internal(const struct path *path);
-extern bool __mnt_is_readonly(struct vfsmount *mnt);
+extern bool __mnt_is_readonly(const struct vfsmount *mnt);
extern bool mnt_may_suid(struct vfsmount *mnt);
-struct path;
extern struct vfsmount *clone_private_mount(const struct path *path);
-extern int __mnt_want_write(struct vfsmount *);
-extern void __mnt_drop_write(struct vfsmount *);
+int mnt_get_write_access(struct vfsmount *mnt);
+void mnt_put_write_access(struct vfsmount *mnt);
-struct file_system_type;
extern struct vfsmount *fc_mount(struct fs_context *fc);
+extern struct vfsmount *fc_mount_longterm(struct fs_context *fc);
extern struct vfsmount *vfs_create_mount(struct fs_context *fc);
extern struct vfsmount *vfs_kern_mount(struct file_system_type *type,
int flags, const char *name,
void *data);
-extern struct vfsmount *vfs_submount(const struct dentry *mountpoint,
- struct file_system_type *type,
- const char *name, void *data);
extern void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list);
extern void mark_mounts_for_expiry(struct list_head *mounts);
-extern dev_t name_to_dev_t(const char *name);
-
-extern unsigned int sysctl_mount_max;
-
extern bool path_is_mountpoint(const struct path *path);
+extern bool our_mnt(struct vfsmount *mnt);
+
+extern struct vfsmount *kern_mount(struct file_system_type *);
+extern void kern_unmount(struct vfsmount *mnt);
+extern int may_umount_tree(struct vfsmount *);
+extern int may_umount(struct vfsmount *);
+int do_mount(const char *, const char __user *,
+ const char *, unsigned long, void *);
+extern const struct path *collect_paths(const struct path *, struct path *, unsigned);
+extern void drop_collected_paths(const struct path *, const struct path *);
extern void kern_unmount_array(struct vfsmount *mnt[], unsigned int num);
+extern int cifs_root_data(char **dev, char **opts);
+
#endif /* _LINUX_MOUNT_H */
diff --git a/include/linux/moxtet.h b/include/linux/moxtet.h
index 79184948fab4..dfa4800306ee 100644
--- a/include/linux/moxtet.h
+++ b/include/linux/moxtet.h
@@ -35,8 +35,6 @@ enum turris_mox_module_id {
#define MOXTET_NIRQS 16
-extern struct bus_type moxtet_type;
-
struct moxtet {
struct device *dev;
struct mutex lock;
@@ -63,13 +61,8 @@ struct moxtet_driver {
struct device_driver driver;
};
-static inline struct moxtet_driver *
-to_moxtet_driver(struct device_driver *drv)
-{
- if (!drv)
- return NULL;
- return container_of(drv, struct moxtet_driver, driver);
-}
+#define to_moxtet_driver(__drv) \
+ ( __drv ? container_of_const(__drv, struct moxtet_driver, driver) : NULL )
extern int __moxtet_register_driver(struct module *owner,
struct moxtet_driver *mdrv);
diff --git a/include/linux/mpage.h b/include/linux/mpage.h
index f4f5e90a6844..1bdc39daac0a 100644
--- a/include/linux/mpage.h
+++ b/include/linux/mpage.h
@@ -16,10 +16,8 @@ struct writeback_control;
struct readahead_control;
void mpage_readahead(struct readahead_control *, get_block_t get_block);
-int mpage_readpage(struct page *page, get_block_t get_block);
+int mpage_read_folio(struct folio *folio, get_block_t get_block);
int mpage_writepages(struct address_space *mapping,
struct writeback_control *wbc, get_block_t get_block);
-int mpage_writepage(struct page *page, get_block_t *get_block,
- struct writeback_control *wbc);
#endif
diff --git a/include/linux/mpi.h b/include/linux/mpi.h
index 3e5358f4de2f..47be46f36435 100644
--- a/include/linux/mpi.h
+++ b/include/linux/mpi.h
@@ -40,79 +40,26 @@ struct gcry_mpi {
typedef struct gcry_mpi *MPI;
#define mpi_get_nlimbs(a) ((a)->nlimbs)
-#define mpi_has_sign(a) ((a)->sign)
/*-- mpiutil.c --*/
MPI mpi_alloc(unsigned nlimbs);
-void mpi_clear(MPI a);
void mpi_free(MPI a);
int mpi_resize(MPI a, unsigned nlimbs);
-static inline MPI mpi_new(unsigned int nbits)
-{
- return mpi_alloc((nbits + BITS_PER_MPI_LIMB - 1) / BITS_PER_MPI_LIMB);
-}
-
MPI mpi_copy(MPI a);
-MPI mpi_alloc_like(MPI a);
-void mpi_snatch(MPI w, MPI u);
-MPI mpi_set(MPI w, MPI u);
-MPI mpi_set_ui(MPI w, unsigned long u);
-MPI mpi_alloc_set_ui(unsigned long u);
-void mpi_swap_cond(MPI a, MPI b, unsigned long swap);
-
-/* Constants used to return constant MPIs. See mpi_init if you
- * want to add more constants.
- */
-#define MPI_NUMBER_OF_CONSTANTS 6
-enum gcry_mpi_constants {
- MPI_C_ZERO,
- MPI_C_ONE,
- MPI_C_TWO,
- MPI_C_THREE,
- MPI_C_FOUR,
- MPI_C_EIGHT
-};
-
-MPI mpi_const(enum gcry_mpi_constants no);
/*-- mpicoder.c --*/
-
-/* Different formats of external big integer representation. */
-enum gcry_mpi_format {
- GCRYMPI_FMT_NONE = 0,
- GCRYMPI_FMT_STD = 1, /* Twos complement stored without length. */
- GCRYMPI_FMT_PGP = 2, /* As used by OpenPGP (unsigned only). */
- GCRYMPI_FMT_SSH = 3, /* As used by SSH (like STD but with length). */
- GCRYMPI_FMT_HEX = 4, /* Hex format. */
- GCRYMPI_FMT_USG = 5, /* Like STD but unsigned. */
- GCRYMPI_FMT_OPAQUE = 8 /* Opaque format (some functions only). */
-};
-
MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes);
MPI mpi_read_from_buffer(const void *buffer, unsigned *ret_nread);
-int mpi_fromstr(MPI val, const char *str);
-MPI mpi_scanval(const char *string);
MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len);
void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign);
int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
int *sign);
int mpi_write_to_sgl(MPI a, struct scatterlist *sg, unsigned nbytes,
int *sign);
-int mpi_print(enum gcry_mpi_format format, unsigned char *buffer,
- size_t buflen, size_t *nwritten, MPI a);
/*-- mpi-mod.c --*/
-void mpi_mod(MPI rem, MPI dividend, MPI divisor);
-
-/* Context used with Barrett reduction. */
-struct barrett_ctx_s;
-typedef struct barrett_ctx_s *mpi_barrett_t;
-
-mpi_barrett_t mpi_barrett_init(MPI m, int copy);
-void mpi_barrett_free(mpi_barrett_t ctx);
-void mpi_mod_barrett(MPI r, MPI x, mpi_barrett_t ctx);
-void mpi_mul_barrett(MPI w, MPI u, MPI v, mpi_barrett_t ctx);
+int mpi_mod(MPI rem, MPI dividend, MPI divisor);
/*-- mpi-pow.c --*/
int mpi_powm(MPI res, MPI base, MPI exp, MPI mod);
@@ -120,7 +67,6 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod);
/*-- mpi-cmp.c --*/
int mpi_cmp_ui(MPI u, ulong v);
int mpi_cmp(MPI u, MPI v);
-int mpi_cmpabs(MPI u, MPI v);
/*-- mpi-sub-ui.c --*/
int mpi_sub_ui(MPI w, MPI u, unsigned long vval);
@@ -129,145 +75,29 @@ int mpi_sub_ui(MPI w, MPI u, unsigned long vval);
void mpi_normalize(MPI a);
unsigned mpi_get_nbits(MPI a);
int mpi_test_bit(MPI a, unsigned int n);
-void mpi_set_bit(MPI a, unsigned int n);
-void mpi_set_highbit(MPI a, unsigned int n);
-void mpi_clear_highbit(MPI a, unsigned int n);
-void mpi_clear_bit(MPI a, unsigned int n);
-void mpi_rshift_limbs(MPI a, unsigned int count);
-void mpi_rshift(MPI x, MPI a, unsigned int n);
-void mpi_lshift_limbs(MPI a, unsigned int count);
-void mpi_lshift(MPI x, MPI a, unsigned int n);
+int mpi_set_bit(MPI a, unsigned int n);
+int mpi_rshift(MPI x, MPI a, unsigned int n);
/*-- mpi-add.c --*/
-void mpi_add_ui(MPI w, MPI u, unsigned long v);
-void mpi_add(MPI w, MPI u, MPI v);
-void mpi_sub(MPI w, MPI u, MPI v);
-void mpi_addm(MPI w, MPI u, MPI v, MPI m);
-void mpi_subm(MPI w, MPI u, MPI v, MPI m);
+int mpi_add(MPI w, MPI u, MPI v);
+int mpi_sub(MPI w, MPI u, MPI v);
+int mpi_addm(MPI w, MPI u, MPI v, MPI m);
+int mpi_subm(MPI w, MPI u, MPI v, MPI m);
/*-- mpi-mul.c --*/
-void mpi_mul(MPI w, MPI u, MPI v);
-void mpi_mulm(MPI w, MPI u, MPI v, MPI m);
+int mpi_mul(MPI w, MPI u, MPI v);
+int mpi_mulm(MPI w, MPI u, MPI v, MPI m);
/*-- mpi-div.c --*/
-void mpi_tdiv_r(MPI rem, MPI num, MPI den);
-void mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor);
-void mpi_fdiv_q(MPI quot, MPI dividend, MPI divisor);
-
-/*-- mpi-inv.c --*/
-int mpi_invm(MPI x, MPI a, MPI n);
-
-/*-- ec.c --*/
-
-/* Object to represent a point in projective coordinates */
-struct gcry_mpi_point {
- MPI x;
- MPI y;
- MPI z;
-};
-
-typedef struct gcry_mpi_point *MPI_POINT;
-
-/* Models describing an elliptic curve */
-enum gcry_mpi_ec_models {
- /* The Short Weierstrass equation is
- * y^2 = x^3 + ax + b
- */
- MPI_EC_WEIERSTRASS = 0,
- /* The Montgomery equation is
- * by^2 = x^3 + ax^2 + x
- */
- MPI_EC_MONTGOMERY,
- /* The Twisted Edwards equation is
- * ax^2 + y^2 = 1 + bx^2y^2
- * Note that we use 'b' instead of the commonly used 'd'.
- */
- MPI_EC_EDWARDS
-};
-
-/* Dialects used with elliptic curves */
-enum ecc_dialects {
- ECC_DIALECT_STANDARD = 0,
- ECC_DIALECT_ED25519,
- ECC_DIALECT_SAFECURVE
-};
-
-/* This context is used with all our EC functions. */
-struct mpi_ec_ctx {
- enum gcry_mpi_ec_models model; /* The model describing this curve. */
- enum ecc_dialects dialect; /* The ECC dialect used with the curve. */
- int flags; /* Public key flags (not always used). */
- unsigned int nbits; /* Number of bits. */
-
- /* Domain parameters. Note that they may not all be set and if set
- * the MPIs may be flaged as constant.
- */
- MPI p; /* Prime specifying the field GF(p). */
- MPI a; /* First coefficient of the Weierstrass equation. */
- MPI b; /* Second coefficient of the Weierstrass equation. */
- MPI_POINT G; /* Base point (generator). */
- MPI n; /* Order of G. */
- unsigned int h; /* Cofactor. */
-
- /* The actual key. May not be set. */
- MPI_POINT Q; /* Public key. */
- MPI d; /* Private key. */
-
- const char *name; /* Name of the curve. */
-
- /* This structure is private to mpi/ec.c! */
- struct {
- struct {
- unsigned int a_is_pminus3:1;
- unsigned int two_inv_p:1;
- } valid; /* Flags to help setting the helper vars below. */
-
- int a_is_pminus3; /* True if A = P - 3. */
-
- MPI two_inv_p;
-
- mpi_barrett_t p_barrett;
-
- /* Scratch variables. */
- MPI scratch[11];
-
- /* Helper for fast reduction. */
- /* int nist_nbits; /\* If this is a NIST curve, the # of bits. *\/ */
- /* MPI s[10]; */
- /* MPI c; */
- } t;
-
- /* Curve specific computation routines for the field. */
- void (*addm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx);
- void (*subm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ec);
- void (*mulm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx);
- void (*pow2)(MPI w, const MPI b, struct mpi_ec_ctx *ctx);
- void (*mul2)(MPI w, MPI u, struct mpi_ec_ctx *ctx);
-};
-
-void mpi_ec_init(struct mpi_ec_ctx *ctx, enum gcry_mpi_ec_models model,
- enum ecc_dialects dialect,
- int flags, MPI p, MPI a, MPI b);
-void mpi_ec_deinit(struct mpi_ec_ctx *ctx);
-MPI_POINT mpi_point_new(unsigned int nbits);
-void mpi_point_release(MPI_POINT p);
-void mpi_point_init(MPI_POINT p);
-void mpi_point_free_parts(MPI_POINT p);
-int mpi_ec_get_affine(MPI x, MPI y, MPI_POINT point, struct mpi_ec_ctx *ctx);
-void mpi_ec_add_points(MPI_POINT result,
- MPI_POINT p1, MPI_POINT p2,
- struct mpi_ec_ctx *ctx);
-void mpi_ec_mul_point(MPI_POINT result,
- MPI scalar, MPI_POINT point,
- struct mpi_ec_ctx *ctx);
-int mpi_ec_curve_point(MPI_POINT point, struct mpi_ec_ctx *ctx);
+int mpi_tdiv_r(MPI rem, MPI num, MPI den);
+int mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor);
/* inline functions */
/**
* mpi_get_size() - returns max size required to store the number
*
- * @a: A multi precision integer for which we want to allocate a bufer
+ * @a: A multi precision integer for which we want to allocate a buffer
*
* Return: size required to store the number
*/
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index 6cbbfe94348c..4c5003afee6c 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -17,11 +17,12 @@ static inline int ip_mroute_opt(int opt)
}
int ip_mroute_setsockopt(struct sock *, int, sockptr_t, unsigned int);
-int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
-int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg);
+int ip_mroute_getsockopt(struct sock *, int, sockptr_t, sockptr_t);
+int ipmr_ioctl(struct sock *sk, int cmd, void *arg);
int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
int ip_mr_init(void);
bool ipmr_rule_default(const struct fib_rule *rule);
+int ipmr_sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
#else
static inline int ip_mroute_setsockopt(struct sock *sock, int optname,
sockptr_t optval, unsigned int optlen)
@@ -29,13 +30,13 @@ static inline int ip_mroute_setsockopt(struct sock *sock, int optname,
return -ENOPROTOOPT;
}
-static inline int ip_mroute_getsockopt(struct sock *sock, int optname,
- char __user *optval, int __user *optlen)
+static inline int ip_mroute_getsockopt(struct sock *sk, int optname,
+ sockptr_t optval, sockptr_t optlen)
{
return -ENOPROTOOPT;
}
-static inline int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
+static inline int ipmr_ioctl(struct sock *sk, int cmd, void *arg)
{
return -ENOIOCTLCMD;
}
@@ -54,6 +55,12 @@ static inline bool ipmr_rule_default(const struct fib_rule *rule)
{
return true;
}
+
+static inline int ipmr_sk_ioctl(struct sock *sk, unsigned int cmd,
+ void __user *arg)
+{
+ return 1;
+}
#endif
#define VIFF_STATIC 0x8000
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
index bc351a85ce9b..fddafdc168f7 100644
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -27,12 +27,13 @@ struct sock;
#ifdef CONFIG_IPV6_MROUTE
extern int ip6_mroute_setsockopt(struct sock *, int, sockptr_t, unsigned int);
-extern int ip6_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
+extern int ip6_mroute_getsockopt(struct sock *, int, sockptr_t, sockptr_t);
extern int ip6_mr_input(struct sk_buff *skb);
-extern int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg);
extern int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
extern int ip6_mr_init(void);
+extern int ip6_mr_output(struct net *net, struct sock *sk, struct sk_buff *skb);
extern void ip6_mr_cleanup(void);
+int ip6mr_ioctl(struct sock *sk, int cmd, void *arg);
#else
static inline int ip6_mroute_setsockopt(struct sock *sock, int optname,
sockptr_t optval, unsigned int optlen)
@@ -42,13 +43,13 @@ static inline int ip6_mroute_setsockopt(struct sock *sock, int optname,
static inline
int ip6_mroute_getsockopt(struct sock *sock,
- int optname, char __user *optval, int __user *optlen)
+ int optname, sockptr_t optval, sockptr_t optlen)
{
return -ENOPROTOOPT;
}
static inline
-int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
+int ip6mr_ioctl(struct sock *sk, int cmd, void *arg)
{
return -ENOIOCTLCMD;
}
@@ -58,6 +59,12 @@ static inline int ip6_mr_init(void)
return 0;
}
+static inline int
+ip6_mr_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+ return ip6_output(net, sk, skb);
+}
+
static inline void ip6_mr_cleanup(void)
{
return;
@@ -100,6 +107,27 @@ extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
#ifdef CONFIG_IPV6_MROUTE
bool mroute6_is_socket(struct net *net, struct sk_buff *skb);
extern int ip6mr_sk_done(struct sock *sk);
+static inline int ip6mr_sk_ioctl(struct sock *sk, unsigned int cmd,
+ void __user *arg)
+{
+ switch (cmd) {
+ /* These userspace buffers will be consumed by ip6mr_ioctl() */
+ case SIOCGETMIFCNT_IN6: {
+ struct sioc_mif_req6 buffer;
+
+ return sock_ioctl_inout(sk, cmd, arg, &buffer,
+ sizeof(buffer));
+ }
+ case SIOCGETSGCNT_IN6: {
+ struct sioc_sg_req6 buffer;
+
+ return sock_ioctl_inout(sk, cmd, arg, &buffer,
+ sizeof(buffer));
+ }
+ }
+
+ return 1;
+}
#else
static inline bool mroute6_is_socket(struct net *net, struct sk_buff *skb)
{
@@ -109,5 +137,11 @@ static inline int ip6mr_sk_done(struct sock *sk)
{
return 0;
}
+
+static inline int ip6mr_sk_ioctl(struct sock *sk, unsigned int cmd,
+ void __user *arg)
+{
+ return 1;
+}
#endif
#endif
diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h
index 8071148f29a6..0075f6e5c3da 100644
--- a/include/linux/mroute_base.h
+++ b/include/linux/mroute_base.h
@@ -12,6 +12,7 @@
/**
* struct vif_device - interface representor for multicast routing
* @dev: network device being used
+ * @dev_tracker: refcount tracker for @dev reference
* @bytes_in: statistic; bytes ingressing
* @bytes_out: statistic; bytes egresing
* @pkt_in: statistic; packets ingressing
@@ -25,7 +26,8 @@
* @remote: Remote address for tunnels
*/
struct vif_device {
- struct net_device *dev;
+ struct net_device __rcu *dev;
+ netdevice_tracker dev_tracker;
unsigned long bytes_in, bytes_out;
unsigned long pkt_in, pkt_out;
unsigned long rate_limit;
@@ -50,6 +52,7 @@ static inline int mr_call_vif_notifier(struct notifier_block *nb,
unsigned short family,
enum fib_event_type event_type,
struct vif_device *vif,
+ struct net_device *vif_dev,
unsigned short vif_index, u32 tb_id,
struct netlink_ext_ack *extack)
{
@@ -58,7 +61,7 @@ static inline int mr_call_vif_notifier(struct notifier_block *nb,
.family = family,
.extack = extack,
},
- .dev = vif->dev,
+ .dev = vif_dev,
.vif_index = vif_index,
.vif_flags = vif->flags,
.tb_id = tb_id,
@@ -71,6 +74,7 @@ static inline int mr_call_vif_notifiers(struct net *net,
unsigned short family,
enum fib_event_type event_type,
struct vif_device *vif,
+ struct net_device *vif_dev,
unsigned short vif_index, u32 tb_id,
unsigned int *ipmr_seq)
{
@@ -78,7 +82,7 @@ static inline int mr_call_vif_notifiers(struct net *net,
.info = {
.family = family,
},
- .dev = vif->dev,
+ .dev = vif_dev,
.vif_index = vif_index,
.vif_flags = vif->flags,
.tb_id = tb_id,
@@ -96,7 +100,8 @@ static inline int mr_call_vif_notifiers(struct net *net,
#define MAXVIFS 32
#endif
-#define VIF_EXISTS(_mrt, _idx) (!!((_mrt)->vif_table[_idx].dev))
+/* Note: This helper is deprecated. */
+#define VIF_EXISTS(_mrt, _idx) (!!rcu_access_pointer((_mrt)->vif_table[_idx].dev))
/* mfc_flags:
* MFC_STATIC - the entry was added statically (not by a routing daemon)
@@ -141,9 +146,9 @@ struct mr_mfc {
unsigned long last_assert;
int minvif;
int maxvif;
- unsigned long bytes;
- unsigned long pkt;
- unsigned long wrong_if;
+ atomic_long_t bytes;
+ atomic_long_t pkt;
+ atomic_long_t wrong_if;
unsigned long lastuse;
unsigned char ttls[MAXVIFS];
refcount_t refcount;
@@ -257,6 +262,11 @@ struct mr_table {
int mroute_reg_vif_num;
};
+static inline bool mr_can_free_table(struct net *net)
+{
+ return !check_net(net) || !net_initialized(net);
+}
+
#ifdef CONFIG_IP_MROUTE_COMMON
void vif_device_init(struct vif_device *v,
struct net_device *dev,
@@ -303,7 +313,7 @@ int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family,
struct netlink_ext_ack *extack),
struct mr_table *(*mr_iter)(struct net *net,
struct mr_table *mrt),
- rwlock_t *mrt_lock, struct netlink_ext_ack *extack);
+ struct netlink_ext_ack *extack);
#else
static inline void vif_device_init(struct vif_device *v,
struct net_device *dev,
@@ -358,7 +368,7 @@ static inline int mr_dump(struct net *net, struct notifier_block *nb,
struct netlink_ext_ack *extack),
struct mr_table *(*mr_iter)(struct net *net,
struct mr_table *mrt),
- rwlock_t *mrt_lock, struct netlink_ext_ack *extack)
+ struct netlink_ext_ack *extack)
{
return -EINVAL;
}
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 6aff469e511d..8003e3218c46 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -2,8 +2,27 @@
#ifndef LINUX_MSI_H
#define LINUX_MSI_H
-#include <linux/kobject.h>
-#include <linux/list.h>
+/*
+ * This header file contains MSI data structures and functions which are
+ * only relevant for:
+ * - Interrupt core code
+ * - PCI/MSI core code
+ * - MSI interrupt domain implementations
+ * - IOMMU, low level VFIO, NTB and other justified exceptions
+ * dealing with low level MSI details.
+ *
+ * Regular device drivers have no business with any of these functions and
+ * especially storing MSI descriptor pointers in random code is considered
+ * abuse.
+ *
+ * Device driver relevant functions are available in <linux/msi_api.h>
+ */
+
+#include <linux/irqdomain_defs.h>
+#include <linux/cpumask_types.h>
+#include <linux/msi_api.h>
+#include <linux/irq.h>
+
#include <asm/msi.h>
/* Dummy shadow structures if an architecture does not define them */
@@ -25,6 +44,10 @@ typedef struct arch_msi_msg_data {
} __attribute__ ((packed)) arch_msi_msg_data_t;
#endif
+#ifndef arch_is_isolated_msi
+#define arch_is_isolated_msi() false
+#endif
+
/**
* msi_msg - Representation of a MSI message
* @address_lo: Low 32 bits of msi message address
@@ -50,192 +73,302 @@ struct msi_msg {
};
};
-extern int pci_msi_ignore_mask;
/* Helper functions */
-struct irq_data;
struct msi_desc;
struct pci_dev;
-struct platform_msi_priv_data;
+struct device_attribute;
+struct irq_domain;
+struct irq_affinity_desc;
+
void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
#ifdef CONFIG_GENERIC_MSI_IRQ
void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
#else
-static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
-{
-}
+static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) { }
#endif
typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
struct msi_msg *msg);
/**
- * platform_msi_desc - Platform device specific msi descriptor data
- * @msi_priv_data: Pointer to platform private data
- * @msi_index: The index of the MSI descriptor for multi MSI
+ * pci_msi_desc - PCI/MSI specific MSI descriptor data
+ *
+ * @msi_mask: [PCI MSI] MSI cached mask bits
+ * @msix_ctrl: [PCI MSI-X] MSI-X cached per vector control bits
+ * @is_msix: [PCI MSI/X] True if MSI-X
+ * @multiple: [PCI MSI/X] log2 num of messages allocated
+ * @multi_cap: [PCI MSI/X] log2 num of messages supported
+ * @can_mask: [PCI MSI/X] Masking supported?
+ * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit
+ * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
+ * @mask_pos: [PCI MSI] Mask register position
+ * @mask_base: [PCI MSI-X] Mask register base address
*/
-struct platform_msi_desc {
- struct platform_msi_priv_data *msi_priv_data;
- u16 msi_index;
+struct pci_msi_desc {
+ union {
+ u32 msi_mask;
+ u32 msix_ctrl;
+ };
+ struct {
+ u8 is_msix : 1;
+ u8 multiple : 3;
+ u8 multi_cap : 3;
+ u8 can_mask : 1;
+ u8 is_64 : 1;
+ u8 is_virtual : 1;
+ unsigned default_irq;
+ } msi_attrib;
+ union {
+ u8 mask_pos;
+ void __iomem *mask_base;
+ };
};
/**
- * fsl_mc_msi_desc - FSL-MC device specific msi descriptor data
- * @msi_index: The index of the MSI descriptor
+ * union msi_domain_cookie - Opaque MSI domain specific data
+ * @value: u64 value store
+ * @ptr: Pointer to domain specific data
+ * @iobase: Domain specific IOmem pointer
+ *
+ * The content of this data is implementation defined and used by the MSI
+ * domain to store domain specific information which is requried for
+ * interrupt chip callbacks.
*/
-struct fsl_mc_msi_desc {
- u16 msi_index;
+union msi_domain_cookie {
+ u64 value;
+ void *ptr;
+ void __iomem *iobase;
};
/**
- * ti_sci_inta_msi_desc - TISCI based INTA specific msi descriptor data
- * @dev_index: TISCI device index
+ * struct msi_desc_data - Generic MSI descriptor data
+ * @dcookie: Cookie for MSI domain specific data which is required
+ * for irq_chip callbacks
+ * @icookie: Cookie for the MSI interrupt instance provided by
+ * the usage site to the allocation function
+ *
+ * The content of this data is implementation defined, e.g. PCI/IMS
+ * implementations define the meaning of the data. The MSI core ignores
+ * this data completely.
*/
-struct ti_sci_inta_msi_desc {
- u16 dev_index;
+struct msi_desc_data {
+ union msi_domain_cookie dcookie;
+ union msi_instance_cookie icookie;
};
+#define MSI_MAX_INDEX ((unsigned int)USHRT_MAX)
+
/**
* struct msi_desc - Descriptor structure for MSI based interrupts
- * @list: List head for management
* @irq: The base interrupt number
* @nvec_used: The number of vectors used
* @dev: Pointer to the device which uses this descriptor
* @msg: The last set MSI message cached for reuse
* @affinity: Optional pointer to a cpu affinity mask for this descriptor
+ * @iommu_msi_iova: Optional shifted IOVA from the IOMMU to override the msi_addr.
+ * Only used if iommu_msi_shift != 0
+ * @iommu_msi_shift: Indicates how many bits of the original address should be
+ * preserved when using iommu_msi_iova.
+ * @sysfs_attr: Pointer to sysfs device attribute
*
* @write_msi_msg: Callback that may be called when the MSI message
* address or data changes
* @write_msi_msg_data: Data parameter for the callback.
*
- * @masked: [PCI MSI/X] Mask bits
- * @is_msix: [PCI MSI/X] True if MSI-X
- * @multiple: [PCI MSI/X] log2 num of messages allocated
- * @multi_cap: [PCI MSI/X] log2 num of messages supported
- * @maskbit: [PCI MSI/X] Mask-Pending bit supported?
- * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit
- * @entry_nr: [PCI MSI/X] Entry which is described by this descriptor
- * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
- * @mask_pos: [PCI MSI] Mask register position
- * @mask_base: [PCI MSI-X] Mask register base address
- * @platform: [platform] Platform device specific msi descriptor data
- * @fsl_mc: [fsl-mc] FSL MC device specific msi descriptor data
- * @inta: [INTA] TISCI based INTA specific msi descriptor data
+ * @msi_index: Index of the msi descriptor
+ * @pci: PCI specific msi descriptor data
+ * @data: Generic MSI descriptor data
*/
struct msi_desc {
/* Shared device/bus type independent data */
- struct list_head list;
unsigned int irq;
unsigned int nvec_used;
struct device *dev;
struct msi_msg msg;
struct irq_affinity_desc *affinity;
#ifdef CONFIG_IRQ_MSI_IOMMU
- const void *iommu_cookie;
+ u64 iommu_msi_iova : 58;
+ u64 iommu_msi_shift : 6;
+#endif
+#ifdef CONFIG_SYSFS
+ struct device_attribute *sysfs_attrs;
#endif
void (*write_msi_msg)(struct msi_desc *entry, void *data);
void *write_msi_msg_data;
+ u16 msi_index;
union {
- /* PCI MSI/X specific data */
- struct {
- u32 masked;
- struct {
- u8 is_msix : 1;
- u8 multiple : 3;
- u8 multi_cap : 3;
- u8 maskbit : 1;
- u8 is_64 : 1;
- u8 is_virtual : 1;
- u16 entry_nr;
- unsigned default_irq;
- } msi_attrib;
- union {
- u8 mask_pos;
- void __iomem *mask_base;
- };
- };
-
- /*
- * Non PCI variants add their data structure here. New
- * entries need to use a named structure. We want
- * proper name spaces for this. The PCI part is
- * anonymous for now as it would require an immediate
- * tree wide cleanup.
- */
- struct platform_msi_desc platform;
- struct fsl_mc_msi_desc fsl_mc;
- struct ti_sci_inta_msi_desc inta;
+ struct pci_msi_desc pci;
+ struct msi_desc_data data;
};
};
-/* Helpers to hide struct msi_desc implementation details */
-#define msi_desc_to_dev(desc) ((desc)->dev)
-#define dev_to_msi_list(dev) (&(dev)->msi_list)
-#define first_msi_entry(dev) \
- list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
-#define for_each_msi_entry(desc, dev) \
- list_for_each_entry((desc), dev_to_msi_list((dev)), list)
-#define for_each_msi_entry_safe(desc, tmp, dev) \
- list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
-#define for_each_msi_vector(desc, __irq, dev) \
- for_each_msi_entry((desc), (dev)) \
- if ((desc)->irq) \
- for (__irq = (desc)->irq; \
- __irq < ((desc)->irq + (desc)->nvec_used); \
- __irq++)
+/*
+ * Filter values for the MSI descriptor iterators and accessor functions.
+ */
+enum msi_desc_filter {
+ /* All descriptors */
+ MSI_DESC_ALL,
+ /* Descriptors which have no interrupt associated */
+ MSI_DESC_NOTASSOCIATED,
+ /* Descriptors which have an interrupt associated */
+ MSI_DESC_ASSOCIATED,
+};
-#ifdef CONFIG_IRQ_MSI_IOMMU
-static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
+
+/**
+ * struct msi_dev_domain - The internals of MSI domain info per device
+ * @store: Xarray for storing MSI descriptor pointers
+ * @irqdomain: Pointer to a per device interrupt domain
+ */
+struct msi_dev_domain {
+ struct xarray store;
+ struct irq_domain *domain;
+};
+
+int msi_setup_device_data(struct device *dev);
+
+void __msi_lock_descs(struct device *dev);
+void __msi_unlock_descs(struct device *dev);
+
+DEFINE_LOCK_GUARD_1(msi_descs_lock, struct device, __msi_lock_descs(_T->lock),
+ __msi_unlock_descs(_T->lock));
+
+struct msi_desc *msi_domain_first_desc(struct device *dev, unsigned int domid,
+ enum msi_desc_filter filter);
+
+/**
+ * msi_first_desc - Get the first MSI descriptor of the default irqdomain
+ * @dev: Device to operate on
+ * @filter: Descriptor state filter
+ *
+ * Must be called with the MSI descriptor mutex held, i.e. msi_lock_descs()
+ * must be invoked before the call.
+ *
+ * Return: Pointer to the first MSI descriptor matching the search
+ * criteria, NULL if none found.
+ */
+static inline struct msi_desc *msi_first_desc(struct device *dev,
+ enum msi_desc_filter filter)
{
- return desc->iommu_cookie;
+ return msi_domain_first_desc(dev, MSI_DEFAULT_DOMAIN, filter);
}
-static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
- const void *iommu_cookie)
+struct msi_desc *msi_next_desc(struct device *dev, unsigned int domid,
+ enum msi_desc_filter filter);
+
+/**
+ * msi_domain_for_each_desc - Iterate the MSI descriptors in a specific domain
+ *
+ * @desc: struct msi_desc pointer used as iterator
+ * @dev: struct device pointer - device to iterate
+ * @domid: The id of the interrupt domain which should be walked.
+ * @filter: Filter for descriptor selection
+ *
+ * Notes:
+ * - The loop must be protected with a msi_lock_descs()/msi_unlock_descs()
+ * pair.
+ * - It is safe to remove a retrieved MSI descriptor in the loop.
+ */
+#define msi_domain_for_each_desc(desc, dev, domid, filter) \
+ for ((desc) = msi_domain_first_desc((dev), (domid), (filter)); (desc); \
+ (desc) = msi_next_desc((dev), (domid), (filter)))
+
+/**
+ * msi_for_each_desc - Iterate the MSI descriptors in the default irqdomain
+ *
+ * @desc: struct msi_desc pointer used as iterator
+ * @dev: struct device pointer - device to iterate
+ * @filter: Filter for descriptor selection
+ *
+ * Notes:
+ * - The loop must be protected with a msi_lock_descs()/msi_unlock_descs()
+ * pair.
+ * - It is safe to remove a retrieved MSI descriptor in the loop.
+ */
+#define msi_for_each_desc(desc, dev, filter) \
+ msi_domain_for_each_desc((desc), (dev), MSI_DEFAULT_DOMAIN, (filter))
+
+#define msi_desc_to_dev(desc) ((desc)->dev)
+
+static inline void msi_desc_set_iommu_msi_iova(struct msi_desc *desc, u64 msi_iova,
+ unsigned int msi_shift)
{
- desc->iommu_cookie = iommu_cookie;
+#ifdef CONFIG_IRQ_MSI_IOMMU
+ desc->iommu_msi_iova = msi_iova >> msi_shift;
+ desc->iommu_msi_shift = msi_shift;
+#endif
}
-#else
-static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
+
+/**
+ * msi_msg_set_addr() - Set MSI address in an MSI message
+ *
+ * @desc: MSI descriptor that may carry an IOVA base address for MSI via @iommu_msi_iova/shift
+ * @msg: Target MSI message to set its address_hi and address_lo
+ * @msi_addr: Physical address to set the MSI message
+ *
+ * Notes:
+ * - Override @msi_addr using the IOVA base address in the @desc if @iommu_msi_shift is set
+ * - Otherwise, simply set @msi_addr to @msg
+ */
+static inline void msi_msg_set_addr(struct msi_desc *desc, struct msi_msg *msg,
+ phys_addr_t msi_addr)
{
- return NULL;
+#ifdef CONFIG_IRQ_MSI_IOMMU
+ if (desc->iommu_msi_shift) {
+ u64 msi_iova = desc->iommu_msi_iova << desc->iommu_msi_shift;
+
+ msg->address_hi = upper_32_bits(msi_iova);
+ msg->address_lo = lower_32_bits(msi_iova) |
+ (msi_addr & ((1 << desc->iommu_msi_shift) - 1));
+ return;
+ }
+#endif
+ msg->address_hi = upper_32_bits(msi_addr);
+ msg->address_lo = lower_32_bits(msi_addr);
}
-static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
- const void *iommu_cookie)
+int msi_domain_insert_msi_desc(struct device *dev, unsigned int domid,
+ struct msi_desc *init_desc);
+/**
+ * msi_insert_msi_desc - Allocate and initialize a MSI descriptor in the
+ * default irqdomain and insert it at @init_desc->msi_index
+ * @dev: Pointer to the device for which the descriptor is allocated
+ * @init_desc: Pointer to an MSI descriptor to initialize the new descriptor
+ *
+ * Return: 0 on success or an appropriate failure code.
+ */
+static inline int msi_insert_msi_desc(struct device *dev, struct msi_desc *init_desc)
{
+ return msi_domain_insert_msi_desc(dev, MSI_DEFAULT_DOMAIN, init_desc);
}
-#endif
-#ifdef CONFIG_PCI_MSI
-#define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev)
-#define for_each_pci_msi_entry(desc, pdev) \
- for_each_msi_entry((desc), &(pdev)->dev)
+void msi_domain_free_msi_descs_range(struct device *dev, unsigned int domid,
+ unsigned int first, unsigned int last);
-struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
-void *msi_desc_to_pci_sysdata(struct msi_desc *desc);
-void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
-#else /* CONFIG_PCI_MSI */
-static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
+/**
+ * msi_free_msi_descs_range - Free a range of MSI descriptors of a device
+ * in the default irqdomain
+ *
+ * @dev: Device for which to free the descriptors
+ * @first: Index to start freeing from (inclusive)
+ * @last: Last index to be freed (inclusive)
+ */
+static inline void msi_free_msi_descs_range(struct device *dev, unsigned int first,
+ unsigned int last)
{
- return NULL;
+ msi_domain_free_msi_descs_range(dev, MSI_DEFAULT_DOMAIN, first, last);
}
-static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
+
+/**
+ * msi_free_msi_descs - Free all MSI descriptors of a device in the default irqdomain
+ * @dev: Device to free the descriptors
+ */
+static inline void msi_free_msi_descs(struct device *dev)
{
+ msi_free_msi_descs_range(dev, 0, MSI_MAX_INDEX);
}
-#endif /* CONFIG_PCI_MSI */
-
-struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
- const struct irq_affinity_desc *affinity);
-void free_msi_entry(struct msi_desc *entry);
-void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
-void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
-
-u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag);
-u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
-void pci_msi_mask_irq(struct irq_data *data);
-void pci_msi_unmask_irq(struct irq_data *data);
/*
* The arch hooks to setup up msi irqs. Default functions are implemented
@@ -250,33 +383,36 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
void arch_teardown_msi_irq(unsigned int irq);
int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
void arch_teardown_msi_irqs(struct pci_dev *dev);
-#else
-static inline int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
-{
- WARN_ON_ONCE(1);
- return -ENODEV;
-}
+#endif /* CONFIG_PCI_MSI_ARCH_FALLBACKS */
-static inline void arch_teardown_msi_irqs(struct pci_dev *dev)
-{
- WARN_ON_ONCE(1);
-}
-#endif
+/*
+ * Xen uses non-default msi_domain_ops and hence needs a way to populate sysfs
+ * entries of MSI IRQs.
+ */
+#if defined(CONFIG_PCI_XEN) || defined(CONFIG_PCI_MSI_ARCH_FALLBACKS)
+#ifdef CONFIG_SYSFS
+int msi_device_populate_sysfs(struct device *dev);
+void msi_device_destroy_sysfs(struct device *dev);
+#else /* CONFIG_SYSFS */
+static inline int msi_device_populate_sysfs(struct device *dev) { return 0; }
+static inline void msi_device_destroy_sysfs(struct device *dev) { }
+#endif /* !CONFIG_SYSFS */
+#endif /* CONFIG_PCI_XEN || CONFIG_PCI_MSI_ARCH_FALLBACKS */
/*
- * The restore hooks are still available as they are useful even
- * for fully irq domain based setups. Courtesy to XEN/X86.
+ * The restore hook is still available even for fully irq domain based
+ * setups. Courtesy to XEN/X86.
*/
-void arch_restore_msi_irqs(struct pci_dev *dev);
-void default_restore_msi_irqs(struct pci_dev *dev);
+bool arch_restore_msi_irqs(struct pci_dev *dev);
-#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+#ifdef CONFIG_GENERIC_MSI_IRQ
#include <linux/irqhandler.h>
struct irq_domain;
struct irq_domain_ops;
struct irq_chip;
+struct irq_fwspec;
struct device_node;
struct fwnode_handle;
struct msi_domain_info;
@@ -286,22 +422,24 @@ struct msi_domain_info;
* @get_hwirq: Retrieve the resulting hw irq number
* @msi_init: Domain specific init function for MSI interrupts
* @msi_free: Domain specific function to free a MSI interrupts
- * @msi_check: Callback for verification of the domain/info/dev data
* @msi_prepare: Prepare the allocation of the interrupts in the domain
- * @msi_finish: Optional callback to finalize the allocation
+ * @msi_teardown: Reverse the effects of @msi_prepare
+ * @prepare_desc: Optional function to prepare the allocated MSI descriptor
+ * in the domain
* @set_desc: Set the msi descriptor for an interrupt
- * @handle_error: Optional error handler if the allocation fails
* @domain_alloc_irqs: Optional function to override the default allocation
* function.
* @domain_free_irqs: Optional function to override the default free
* function.
+ * @msi_translate: Optional translate callback to support the odd wire to
+ * MSI bridges, e.g. MBIGEN
*
- * @get_hwirq, @msi_init and @msi_free are callbacks used by
- * msi_create_irq_domain() and related interfaces
+ * @get_hwirq, @msi_init and @msi_free are callbacks used by the underlying
+ * irqdomain.
*
- * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error
- * are callbacks used by msi_domain_alloc_irqs() and related
- * interfaces which are based on msi_desc.
+ * @msi_check, @msi_prepare, @msi_teardown, @prepare_desc and
+ * @set_desc are callbacks used by the msi_domain_alloc/free_irqs*()
+ * variants.
*
* @domain_alloc_irqs, @domain_free_irqs can be used to override the
* default allocation/free functions (__msi_domain_alloc/free_irqs). This
@@ -309,15 +447,6 @@ struct msi_domain_info;
* be wrapped into the regular irq domains concepts by mere mortals. This
* allows to universally use msi_domain_alloc/free_irqs without having to
* special case XEN all over the place.
- *
- * Contrary to other operations @domain_alloc_irqs and @domain_free_irqs
- * are set to the default implementation if NULL and even when
- * MSI_FLAG_USE_DEF_DOM_OPS is not set to avoid breaking existing users and
- * because these callbacks are obviously mandatory.
- *
- * This is NOT meant to be abused, but it can be useful to build wrappers
- * for specialized MSI irq domains which need extra work before and after
- * calling __msi_domain_alloc_irqs()/__msi_domain_free_irqs().
*/
struct msi_domain_ops {
irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info,
@@ -329,46 +458,82 @@ struct msi_domain_ops {
void (*msi_free)(struct irq_domain *domain,
struct msi_domain_info *info,
unsigned int virq);
- int (*msi_check)(struct irq_domain *domain,
- struct msi_domain_info *info,
- struct device *dev);
int (*msi_prepare)(struct irq_domain *domain,
struct device *dev, int nvec,
msi_alloc_info_t *arg);
- void (*msi_finish)(msi_alloc_info_t *arg, int retval);
+ void (*msi_teardown)(struct irq_domain *domain,
+ msi_alloc_info_t *arg);
+ void (*prepare_desc)(struct irq_domain *domain, msi_alloc_info_t *arg,
+ struct msi_desc *desc);
void (*set_desc)(msi_alloc_info_t *arg,
struct msi_desc *desc);
- int (*handle_error)(struct irq_domain *domain,
- struct msi_desc *desc, int error);
int (*domain_alloc_irqs)(struct irq_domain *domain,
struct device *dev, int nvec);
void (*domain_free_irqs)(struct irq_domain *domain,
struct device *dev);
+ int (*msi_translate)(struct irq_domain *domain, struct irq_fwspec *fwspec,
+ irq_hw_number_t *hwirq, unsigned int *type);
};
/**
* struct msi_domain_info - MSI interrupt domain data
* @flags: Flags to decribe features and capabilities
+ * @bus_token: The domain bus token
+ * @hwsize: The hardware table size or the software index limit.
+ * If 0 then the size is considered unlimited and
+ * gets initialized to the maximum software index limit
+ * by the domain creation code.
* @ops: The callback data structure
+ * @dev: Device which creates the domain
* @chip: Optional: associated interrupt chip
* @chip_data: Optional: associated interrupt chip data
* @handler: Optional: associated interrupt flow handler
* @handler_data: Optional: associated interrupt flow handler data
* @handler_name: Optional: associated interrupt flow handler name
+ * @alloc_data: Optional: associated interrupt allocation data
* @data: Optional: domain specific data
*/
struct msi_domain_info {
- u32 flags;
- struct msi_domain_ops *ops;
- struct irq_chip *chip;
- void *chip_data;
- irq_flow_handler_t handler;
- void *handler_data;
- const char *handler_name;
- void *data;
+ u32 flags;
+ enum irq_domain_bus_token bus_token;
+ unsigned int hwsize;
+ struct msi_domain_ops *ops;
+ struct device *dev;
+ struct irq_chip *chip;
+ void *chip_data;
+ irq_flow_handler_t handler;
+ void *handler_data;
+ const char *handler_name;
+ msi_alloc_info_t *alloc_data;
+ void *data;
};
-/* Flags for msi_domain_info */
+/**
+ * struct msi_domain_template - Template for MSI device domains
+ * @name: Storage for the resulting name. Filled in by the core.
+ * @chip: Interrupt chip for this domain
+ * @ops: MSI domain ops
+ * @info: MSI domain info data
+ * @alloc_info: MSI domain allocation data (architecture specific)
+ */
+struct msi_domain_template {
+ char name[48];
+ struct irq_chip chip;
+ struct msi_domain_ops ops;
+ struct msi_domain_info info;
+ msi_alloc_info_t alloc_info;
+};
+
+/*
+ * Flags for msi_domain_info
+ *
+ * Bit 0-15: Generic MSI functionality which is not subject to restriction
+ * by parent domains
+ *
+ * Bit 16-31: Functionality which depends on the underlying parent domain and
+ * can be masked out by msi_parent_ops::init_dev_msi_info() when
+ * a device MSI domain is initialized.
+ */
enum {
/*
* Init non implemented ops callbacks with default MSI domain
@@ -380,82 +545,173 @@ enum {
* callbacks.
*/
MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1),
- /* Support multiple PCI MSI interrupts */
- MSI_FLAG_MULTI_PCI_MSI = (1 << 2),
- /* Support PCI MSIX interrupts */
- MSI_FLAG_PCI_MSIX = (1 << 3),
/* Needs early activate, required for PCI */
- MSI_FLAG_ACTIVATE_EARLY = (1 << 4),
+ MSI_FLAG_ACTIVATE_EARLY = (1 << 2),
/*
* Must reactivate when irq is started even when
* MSI_FLAG_ACTIVATE_EARLY has been set.
*/
- MSI_FLAG_MUST_REACTIVATE = (1 << 5),
+ MSI_FLAG_MUST_REACTIVATE = (1 << 3),
+ /* Populate sysfs on alloc() and destroy it on free() */
+ MSI_FLAG_DEV_SYSFS = (1 << 4),
+ /* Allocate simple MSI descriptors */
+ MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS = (1 << 5),
+ /* Free MSI descriptors */
+ MSI_FLAG_FREE_MSI_DESCS = (1 << 6),
+ /* Use dev->fwnode for MSI device domain creation */
+ MSI_FLAG_USE_DEV_FWNODE = (1 << 7),
+ /* Set parent->dev into domain->pm_dev on device domain creation */
+ MSI_FLAG_PARENT_PM_DEV = (1 << 8),
+ /* Support for parent mask/unmask */
+ MSI_FLAG_PCI_MSI_MASK_PARENT = (1 << 9),
+ /* Support for parent startup/shutdown */
+ MSI_FLAG_PCI_MSI_STARTUP_PARENT = (1 << 10),
+
+ /* Mask for the generic functionality */
+ MSI_GENERIC_FLAGS_MASK = GENMASK(15, 0),
+
+ /* Mask for the domain specific functionality */
+ MSI_DOMAIN_FLAGS_MASK = GENMASK(31, 16),
+
+ /* Support multiple PCI MSI interrupts */
+ MSI_FLAG_MULTI_PCI_MSI = (1 << 16),
+ /* Support PCI MSIX interrupts */
+ MSI_FLAG_PCI_MSIX = (1 << 17),
/* Is level-triggered capable, using two messages */
- MSI_FLAG_LEVEL_CAPABLE = (1 << 6),
+ MSI_FLAG_LEVEL_CAPABLE = (1 << 18),
+ /* MSI-X entries must be contiguous */
+ MSI_FLAG_MSIX_CONTIGUOUS = (1 << 19),
+ /* PCI/MSI-X vectors can be dynamically allocated/freed post MSI-X enable */
+ MSI_FLAG_PCI_MSIX_ALLOC_DYN = (1 << 20),
+ /* PCI MSIs cannot be steered separately to CPU cores */
+ MSI_FLAG_NO_AFFINITY = (1 << 21),
+ /* Inhibit usage of entry masking */
+ MSI_FLAG_NO_MASK = (1 << 22),
+};
+
+/*
+ * Flags for msi_parent_ops::chip_flags
+ */
+enum {
+ MSI_CHIP_FLAG_SET_EOI = (1 << 0),
+ MSI_CHIP_FLAG_SET_ACK = (1 << 1),
+};
+
+/**
+ * struct msi_parent_ops - MSI parent domain callbacks and configuration info
+ *
+ * @supported_flags: Required: The supported MSI flags of the parent domain
+ * @required_flags: Optional: The required MSI flags of the parent MSI domain
+ * @chip_flags: Optional: Select MSI chip callbacks to update with defaults
+ * in msi_lib_init_dev_msi_info().
+ * @bus_select_token: Optional: The bus token of the real parent domain for
+ * irq_domain::select()
+ * @bus_select_mask: Optional: A mask of supported BUS_DOMAINs for
+ * irq_domain::select()
+ * @prefix: Optional: Prefix for the domain and chip name
+ * @init_dev_msi_info: Required: Callback for MSI parent domains to setup parent
+ * domain specific domain flags, domain ops and interrupt chip
+ * callbacks when a per device domain is created.
+ */
+struct msi_parent_ops {
+ u32 supported_flags;
+ u32 required_flags;
+ u32 chip_flags;
+ u32 bus_select_token;
+ u32 bus_select_mask;
+ const char *prefix;
+ bool (*init_dev_msi_info)(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *msi_parent_domain,
+ struct msi_domain_info *msi_child_info);
};
+bool msi_parent_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *msi_parent_domain,
+ struct msi_domain_info *msi_child_info);
+
int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
bool force);
struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
struct msi_domain_info *info,
struct irq_domain *parent);
-int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
- int nvec);
-int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
- int nvec);
-void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
-void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
+
+struct irq_domain_info;
+struct irq_domain *msi_create_parent_irq_domain(struct irq_domain_info *info,
+ const struct msi_parent_ops *msi_parent_ops);
+
+bool msi_create_device_irq_domain(struct device *dev, unsigned int domid,
+ const struct msi_domain_template *template,
+ unsigned int hwsize, void *domain_data,
+ void *chip_data);
+void msi_remove_device_irq_domain(struct device *dev, unsigned int domid);
+
+bool msi_match_device_irq_domain(struct device *dev, unsigned int domid,
+ enum irq_domain_bus_token bus_token);
+
+int msi_domain_alloc_irqs_range_locked(struct device *dev, unsigned int domid,
+ unsigned int first, unsigned int last);
+int msi_domain_alloc_irqs_range(struct device *dev, unsigned int domid,
+ unsigned int first, unsigned int last);
+int msi_domain_alloc_irqs_all_locked(struct device *dev, unsigned int domid, int nirqs);
+
+struct msi_map msi_domain_alloc_irq_at(struct device *dev, unsigned int domid, unsigned int index,
+ const struct irq_affinity_desc *affdesc,
+ union msi_instance_cookie *cookie);
+
+void msi_domain_free_irqs_range_locked(struct device *dev, unsigned int domid,
+ unsigned int first, unsigned int last);
+void msi_domain_free_irqs_range(struct device *dev, unsigned int domid,
+ unsigned int first, unsigned int last);
+void msi_domain_free_irqs_all_locked(struct device *dev, unsigned int domid);
+void msi_domain_free_irqs_all(struct device *dev, unsigned int domid);
+
struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
-struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
- struct msi_domain_info *info,
- struct irq_domain *parent);
-int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
- irq_write_msi_msg_t write_msi_msg);
-void platform_msi_domain_free_irqs(struct device *dev);
-
-/* When an MSI domain is used as an intermediate domain */
-int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
- int nvec, msi_alloc_info_t *args);
-int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
- int virq, int nvec, msi_alloc_info_t *args);
-struct irq_domain *
-__platform_msi_create_device_domain(struct device *dev,
- unsigned int nvec,
- bool is_tree,
- irq_write_msi_msg_t write_msi_msg,
- const struct irq_domain_ops *ops,
- void *host_data);
-
-#define platform_msi_create_device_domain(dev, nvec, write, ops, data) \
- __platform_msi_create_device_domain(dev, nvec, false, write, ops, data)
-#define platform_msi_create_device_tree_domain(dev, nvec, write, ops, data) \
- __platform_msi_create_device_domain(dev, nvec, true, write, ops, data)
-
-int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs);
-void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
- unsigned int nvec);
-void *platform_msi_get_host_data(struct irq_domain *domain);
-#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
-
-#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
-void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg);
-struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
- struct msi_domain_info *info,
- struct irq_domain *parent);
-int pci_msi_domain_check_cap(struct irq_domain *domain,
- struct msi_domain_info *info, struct device *dev);
+/* Per device platform MSI */
+int platform_device_msi_init_and_alloc_irqs(struct device *dev, unsigned int nvec,
+ irq_write_msi_msg_t write_msi_msg);
+void platform_device_msi_free_irqs_all(struct device *dev);
+
+bool msi_device_has_isolated_msi(struct device *dev);
+
+static inline int msi_domain_alloc_irqs(struct device *dev, unsigned int domid, int nirqs)
+{
+ return msi_domain_alloc_irqs_range(dev, domid, 0, nirqs - 1);
+}
+
+#else /* CONFIG_GENERIC_MSI_IRQ */
+static inline bool msi_device_has_isolated_msi(struct device *dev)
+{
+ /*
+ * Arguably if the platform does not enable MSI support then it has
+ * "isolated MSI", as an interrupt controller that cannot receive MSIs
+ * is inherently isolated by our definition. The default definition for
+ * arch_is_isolated_msi() is conservative and returns false anyhow.
+ */
+ return arch_is_isolated_msi();
+}
+#endif /* CONFIG_GENERIC_MSI_IRQ */
+
+/* PCI specific interfaces */
+#ifdef CONFIG_PCI_MSI
+struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
+void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
+void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
+void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
+void pci_msi_mask_irq(struct irq_data *data);
+void pci_msi_unmask_irq(struct irq_data *data);
u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
+u32 pci_msi_map_rid_ctlr_node(struct pci_dev *pdev, struct device_node **node);
struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev);
-bool pci_dev_has_special_msi_domain(struct pci_dev *pdev);
-#else
+void pci_msix_prepare_desc(struct irq_domain *domain, msi_alloc_info_t *arg,
+ struct msi_desc *desc);
+#else /* CONFIG_PCI_MSI */
static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
{
return NULL;
}
-#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
+static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg) { }
+#endif /* !CONFIG_PCI_MSI */
#endif /* LINUX_MSI_H */
diff --git a/include/linux/msi_api.h b/include/linux/msi_api.h
new file mode 100644
index 000000000000..5ae72d1912c4
--- /dev/null
+++ b/include/linux/msi_api.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef LINUX_MSI_API_H
+#define LINUX_MSI_API_H
+
+/*
+ * APIs which are relevant for device driver code for allocating and
+ * freeing MSI interrupts and querying the associations between
+ * hardware/software MSI indices and the Linux interrupt number.
+ */
+
+struct device;
+
+/*
+ * Per device interrupt domain related constants.
+ */
+enum msi_domain_ids {
+ MSI_DEFAULT_DOMAIN,
+ MSI_MAX_DEVICE_IRQDOMAINS,
+};
+
+/**
+ * union msi_instance_cookie - MSI instance cookie
+ * @value: u64 value store
+ * @ptr: Pointer to usage site specific data
+ *
+ * This cookie is handed to the IMS allocation function and stored in the
+ * MSI descriptor for the interrupt chip callbacks.
+ *
+ * The content of this cookie is MSI domain implementation defined. For
+ * PCI/IMS implementations this could be a PASID or a pointer to queue
+ * memory.
+ */
+union msi_instance_cookie {
+ u64 value;
+ void *ptr;
+};
+
+/**
+ * msi_map - Mapping between MSI index and Linux interrupt number
+ * @index: The MSI index, e.g. slot in the MSI-X table or
+ * a software managed index if >= 0. If negative
+ * the allocation function failed and it contains
+ * the error code.
+ * @virq: The associated Linux interrupt number
+ */
+struct msi_map {
+ int index;
+ int virq;
+};
+
+/*
+ * Constant to be used for dynamic allocations when the allocation is any
+ * free MSI index, which is either an entry in a hardware table or a
+ * software managed index.
+ */
+#define MSI_ANY_INDEX UINT_MAX
+
+unsigned int msi_domain_get_virq(struct device *dev, unsigned int domid, unsigned int index);
+
+/**
+ * msi_get_virq - Lookup the Linux interrupt number for a MSI index on the default interrupt domain
+ * @dev: Device for which the lookup happens
+ * @index: The MSI index to lookup
+ *
+ * Return: The Linux interrupt number on success (> 0), 0 if not found
+ */
+static inline unsigned int msi_get_virq(struct device *dev, unsigned int index)
+{
+ return msi_domain_get_virq(dev, MSI_DEFAULT_DOMAIN, index);
+}
+
+#endif
diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h
index 15cc9b95e32b..6e471436bba5 100644
--- a/include/linux/mtd/blktrans.h
+++ b/include/linux/mtd/blktrans.h
@@ -34,7 +34,7 @@ struct mtd_blktrans_dev {
struct blk_mq_tag_set *tag_set;
spinlock_t queue_lock;
void *priv;
- fmode_t file_mode;
+ bool writable;
};
struct mtd_blktrans_ops {
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index fd1ecb821106..35ca19ae21ae 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -286,7 +286,8 @@ struct cfi_private {
map_word sector_erase_cmd;
unsigned long chipshift; /* Because they're of the same type */
const char *im_name; /* inter_module name for cmdset_setup */
- struct flchip chips[]; /* per-chip data structure for each chip */
+ unsigned long quirks;
+ struct flchip chips[] __counted_by(numchips); /* per-chip data structure for each chip */
};
uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
@@ -307,32 +308,32 @@ static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr)
{
map_word val = map_read(map, addr);
- if (map_bankwidth_is_1(map)) {
+ if (map_bankwidth_is_1(map))
return val.x[0];
- } else if (map_bankwidth_is_2(map)) {
+ if (map_bankwidth_is_2(map))
return cfi16_to_cpu(map, val.x[0]);
- } else {
- /* No point in a 64-bit byteswap since that would just be
- swapping the responses from different chips, and we are
- only interested in one chip (a representative sample) */
- return cfi32_to_cpu(map, val.x[0]);
- }
+ /*
+ * No point in a 64-bit byteswap since that would just be
+ * swapping the responses from different chips, and we are
+ * only interested in one chip (a representative sample)
+ */
+ return cfi32_to_cpu(map, val.x[0]);
}
static inline uint16_t cfi_read_query16(struct map_info *map, uint32_t addr)
{
map_word val = map_read(map, addr);
- if (map_bankwidth_is_1(map)) {
+ if (map_bankwidth_is_1(map))
return val.x[0] & 0xff;
- } else if (map_bankwidth_is_2(map)) {
+ if (map_bankwidth_is_2(map))
return cfi16_to_cpu(map, val.x[0]);
- } else {
- /* No point in a 64-bit byteswap since that would just be
- swapping the responses from different chips, and we are
- only interested in one chip (a representative sample) */
- return cfi32_to_cpu(map, val.x[0]);
- }
+ /*
+ * No point in a 64-bit byteswap since that would just be
+ * swapping the responses from different chips, and we are
+ * only interested in one chip (a representative sample)
+ */
+ return cfi32_to_cpu(map, val.x[0]);
}
void cfi_udelay(int us);
diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
index c04f690871ca..9798c1a1d3b6 100644
--- a/include/linux/mtd/flashchip.h
+++ b/include/linux/mtd/flashchip.h
@@ -13,6 +13,7 @@
*/
#include <linux/sched.h>
#include <linux/mutex.h>
+#include <linux/wait.h>
typedef enum {
FL_READY,
diff --git a/include/linux/mtd/hyperbus.h b/include/linux/mtd/hyperbus.h
index 0ce612428aea..bb6b7121a542 100644
--- a/include/linux/mtd/hyperbus.h
+++ b/include/linux/mtd/hyperbus.h
@@ -89,9 +89,7 @@ int hyperbus_register_device(struct hyperbus_device *hbdev);
/**
* hyperbus_unregister_device - deregister HyperBus slave memory device
* @hbdev: hyperbus_device to be unregistered
- *
- * Return: 0 for success, others for failure.
*/
-int hyperbus_unregister_device(struct hyperbus_device *hbdev);
+void hyperbus_unregister_device(struct hyperbus_device *hbdev);
#endif /* __LINUX_MTD_HYPERBUS_H__ */
diff --git a/include/linux/mtd/jedec.h b/include/linux/mtd/jedec.h
index 0b6b59f7cfbd..56047a4e54c9 100644
--- a/include/linux/mtd/jedec.h
+++ b/include/linux/mtd/jedec.h
@@ -21,6 +21,9 @@ struct jedec_ecc_info {
/* JEDEC features */
#define JEDEC_FEATURE_16_BIT_BUS (1 << 0)
+/* JEDEC Optional Commands */
+#define JEDEC_OPT_CMD_READ_CACHE BIT(1)
+
struct nand_jedec_params {
/* rev info and features block */
/* 'J' 'E' 'S' 'D' */
diff --git a/include/linux/mtd/latch-addr-flash.h b/include/linux/mtd/latch-addr-flash.h
deleted file mode 100644
index e94b8e128074..000000000000
--- a/include/linux/mtd/latch-addr-flash.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Interface for NOR flash driver whose high address lines are latched
- *
- * Copyright © 2008 MontaVista Software, Inc. <source@mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-#ifndef __LATCH_ADDR_FLASH__
-#define __LATCH_ADDR_FLASH__
-
-struct map_info;
-struct mtd_partition;
-
-struct latch_addr_flash_data {
- unsigned int width;
- unsigned int size;
-
- int (*init)(void *data, int cs);
- void (*done)(void *data);
- void (*set_window)(unsigned long offset, void *data);
- void *data;
-
- unsigned int nr_parts;
- struct mtd_partition *parts;
-};
-
-#endif
diff --git a/include/linux/mtd/lpc32xx_mlc.h b/include/linux/mtd/lpc32xx_mlc.h
index d168c628c0d5..35e971be0950 100644
--- a/include/linux/mtd/lpc32xx_mlc.h
+++ b/include/linux/mtd/lpc32xx_mlc.h
@@ -11,7 +11,7 @@
#include <linux/dmaengine.h>
struct lpc32xx_mlc_platform_data {
- bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
+ dma_filter_fn dma_filter;
};
#endif /* __LINUX_MTD_LPC32XX_MLC_H */
diff --git a/include/linux/mtd/lpc32xx_slc.h b/include/linux/mtd/lpc32xx_slc.h
index cf54a9f80460..a044b806566b 100644
--- a/include/linux/mtd/lpc32xx_slc.h
+++ b/include/linux/mtd/lpc32xx_slc.h
@@ -11,7 +11,7 @@
#include <linux/dmaengine.h>
struct lpc32xx_slc_platform_data {
- bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
+ dma_filter_fn dma_filter;
};
#endif /* __LINUX_MTD_LPC32XX_SLC_H */
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index b4fa92a6e44b..75b0b2abc880 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -8,16 +8,17 @@
#ifndef __LINUX_MTD_MAP_H__
#define __LINUX_MTD_MAP_H__
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/string.h>
#include <linux/bug.h>
-#include <linux/kernel.h>
#include <linux/io.h>
-
-#include <asm/unaligned.h>
+#include <linux/ioport.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/unaligned.h>
#include <asm/barrier.h>
+struct device_node;
+struct module;
+
#ifdef CONFIG_MTD_MAP_BANK_WIDTH_1
#define map_bankwidth(map) 1
#define map_bankwidth_is_1(map) (map_bankwidth(map) == 1)
@@ -188,6 +189,7 @@ typedef union {
of living.
*/
+struct mtd_chip_driver;
struct map_info {
const char *name;
unsigned long size;
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index a89955f3cbc8..8d10d9d2e830 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -40,6 +40,12 @@ struct mtd_erase_region_info {
unsigned long *lockmap; /* If keeping bitmap of locks */
};
+struct mtd_req_stats {
+ unsigned int uncorrectable_errors;
+ unsigned int corrected_bitflips;
+ unsigned int max_bitflips;
+};
+
/**
* struct mtd_oob_ops - oob operation operands
* @mode: operation mode
@@ -70,10 +76,9 @@ struct mtd_oob_ops {
uint32_t ooboffs;
uint8_t *datbuf;
uint8_t *oobbuf;
+ struct mtd_req_stats *stats;
};
-#define MTD_MAX_OOBFREE_ENTRIES_LARGE 32
-#define MTD_MAX_ECCPOS_ENTRIES_LARGE 640
/**
* struct mtd_oob_region - oob region definition
* @offset: region offset
@@ -190,9 +195,6 @@ struct module; /* only needed for owner field in mtd_info */
*/
struct mtd_debug_info {
struct dentry *dfs_dir;
-
- const char *partname;
- const char *partid;
};
/**
@@ -221,7 +223,7 @@ struct mtd_part {
* @partitions_lock: lock protecting accesses to the partition list. Protects
* not only the master partition list, but also all
* sub-partitions.
- * @suspended: et to 1 when the device is suspended, 0 otherwise
+ * @suspended: set to 1 when the device is suspended, 0 otherwise
*
* This struct is embedded in mtd_info and contains master-specific
* properties/fields. The master is the root MTD device from the MTD partition
@@ -377,9 +379,11 @@ struct mtd_info {
struct module *owner;
struct device dev;
- int usecount;
+ struct kref refcnt;
struct mtd_debug_info dbg;
struct nvmem_device *nvmem;
+ struct nvmem_device *otp_user_nvmem;
+ struct nvmem_device *otp_factory_nvmem;
/*
* Parent device from the MTD partition point of view.
@@ -392,10 +396,8 @@ struct mtd_info {
/* List of partitions attached to this MTD device */
struct list_head partitions;
- union {
- struct mtd_part part;
- struct mtd_master master;
- };
+ struct mtd_part part;
+ struct mtd_master master;
};
static inline struct mtd_info *mtd_get_master(struct mtd_info *mtd)
@@ -682,6 +684,7 @@ extern int mtd_device_unregister(struct mtd_info *master);
extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num);
extern int __get_mtd_device(struct mtd_info *mtd);
extern void __put_mtd_device(struct mtd_info *mtd);
+extern struct mtd_info *of_get_mtd_device_by_node(struct device_node *np);
extern struct mtd_info *get_mtd_device_nm(const char *name);
extern void put_mtd_device(struct mtd_info *mtd);
@@ -711,4 +714,11 @@ static inline int mtd_is_bitflip_or_eccerr(int err) {
unsigned mtd_mmap_capabilities(struct mtd_info *mtd);
+#ifdef CONFIG_DEBUG_FS
+bool mtd_check_expert_analysis_mode(void);
+#else
+static inline bool mtd_check_expert_analysis_mode(void) { return false; }
+#endif
+
+
#endif /* __MTD_MTD_H__ */
diff --git a/include/linux/mtd/nand-ecc-mtk.h b/include/linux/mtd/nand-ecc-mtk.h
new file mode 100644
index 000000000000..0e48c36e6ca0
--- /dev/null
+++ b/include/linux/mtd/nand-ecc-mtk.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * MTK SDG1 ECC controller
+ *
+ * Copyright (c) 2016 Mediatek
+ * Authors: Xiaolei Li <xiaolei.li@mediatek.com>
+ * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
+ */
+
+#ifndef __DRIVERS_MTD_NAND_MTK_ECC_H__
+#define __DRIVERS_MTD_NAND_MTK_ECC_H__
+
+#include <linux/types.h>
+
+enum mtk_ecc_mode {ECC_DMA_MODE = 0, ECC_NFI_MODE = 1};
+enum mtk_ecc_operation {ECC_ENCODE, ECC_DECODE};
+
+struct device_node;
+struct mtk_ecc;
+
+struct mtk_ecc_stats {
+ u32 corrected;
+ u32 bitflips;
+ u32 failed;
+};
+
+struct mtk_ecc_config {
+ enum mtk_ecc_operation op;
+ enum mtk_ecc_mode mode;
+ dma_addr_t addr;
+ u32 strength;
+ u32 sectors;
+ u32 len;
+};
+
+int mtk_ecc_encode(struct mtk_ecc *, struct mtk_ecc_config *, u8 *, u32);
+void mtk_ecc_get_stats(struct mtk_ecc *, struct mtk_ecc_stats *, int);
+int mtk_ecc_wait_done(struct mtk_ecc *, enum mtk_ecc_operation);
+int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *);
+void mtk_ecc_disable(struct mtk_ecc *);
+void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p);
+unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc);
+
+struct mtk_ecc *of_mtk_ecc_get(struct device_node *);
+void mtk_ecc_release(struct mtk_ecc *);
+
+#endif
diff --git a/include/linux/mtd/nand-ecc-mxic.h b/include/linux/mtd/nand-ecc-mxic.h
new file mode 100644
index 000000000000..0da4b2999576
--- /dev/null
+++ b/include/linux/mtd/nand-ecc-mxic.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright © 2019 Macronix
+ * Author: Miquèl Raynal <miquel.raynal@bootlin.com>
+ *
+ * Header for the Macronix external ECC engine.
+ */
+
+#ifndef __MTD_NAND_ECC_MXIC_H__
+#define __MTD_NAND_ECC_MXIC_H__
+
+#include <linux/platform_device.h>
+#include <linux/device.h>
+
+struct mxic_ecc_engine;
+
+#if IS_ENABLED(CONFIG_MTD_NAND_ECC_MXIC) && IS_REACHABLE(CONFIG_MTD_NAND_CORE)
+
+const struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void);
+struct nand_ecc_engine *mxic_ecc_get_pipelined_engine(struct platform_device *spi_pdev);
+void mxic_ecc_put_pipelined_engine(struct nand_ecc_engine *eng);
+int mxic_ecc_process_data_pipelined(struct nand_ecc_engine *eng,
+ unsigned int direction, dma_addr_t dirmap);
+
+#else /* !CONFIG_MTD_NAND_ECC_MXIC */
+
+static inline const struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void)
+{
+ return NULL;
+}
+
+static inline struct nand_ecc_engine *
+mxic_ecc_get_pipelined_engine(struct platform_device *spi_pdev)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void mxic_ecc_put_pipelined_engine(struct nand_ecc_engine *eng) {}
+
+static inline int mxic_ecc_process_data_pipelined(struct nand_ecc_engine *eng,
+ unsigned int direction,
+ dma_addr_t dirmap)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif /* CONFIG_MTD_NAND_ECC_MXIC */
+
+#endif /* __MTD_NAND_ECC_MXIC_H__ */
diff --git a/include/linux/mtd/nand-qpic-common.h b/include/linux/mtd/nand-qpic-common.h
new file mode 100644
index 000000000000..e8201d1b7cf9
--- /dev/null
+++ b/include/linux/mtd/nand-qpic-common.h
@@ -0,0 +1,483 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * QCOM QPIC common APIs header file
+ *
+ * Copyright (c) 2023 Qualcomm Inc.
+ * Authors: Md sadre Alam <quic_mdalam@quicinc.com>
+ *
+ */
+#ifndef __MTD_NAND_QPIC_COMMON_H__
+#define __MTD_NAND_QPIC_COMMON_H__
+
+/* NANDc reg offsets */
+#define NAND_FLASH_CMD 0x00
+#define NAND_ADDR0 0x04
+#define NAND_ADDR1 0x08
+#define NAND_FLASH_CHIP_SELECT 0x0c
+#define NAND_EXEC_CMD 0x10
+#define NAND_FLASH_STATUS 0x14
+#define NAND_BUFFER_STATUS 0x18
+#define NAND_DEV0_CFG0 0x20
+#define NAND_DEV0_CFG1 0x24
+#define NAND_DEV0_ECC_CFG 0x28
+#define NAND_AUTO_STATUS_EN 0x2c
+#define NAND_DEV1_CFG0 0x30
+#define NAND_DEV1_CFG1 0x34
+#define NAND_READ_ID 0x40
+#define NAND_READ_STATUS 0x44
+#define NAND_DEV_CMD0 0xa0
+#define NAND_DEV_CMD1 0xa4
+#define NAND_DEV_CMD2 0xa8
+#define NAND_DEV_CMD_VLD 0xac
+#define SFLASHC_BURST_CFG 0xe0
+#define NAND_ERASED_CW_DETECT_CFG 0xe8
+#define NAND_ERASED_CW_DETECT_STATUS 0xec
+#define NAND_EBI2_ECC_BUF_CFG 0xf0
+#define FLASH_BUF_ACC 0x100
+
+#define NAND_CTRL 0xf00
+#define NAND_VERSION 0xf08
+#define NAND_READ_LOCATION_0 0xf20
+#define NAND_READ_LOCATION_1 0xf24
+#define NAND_READ_LOCATION_2 0xf28
+#define NAND_READ_LOCATION_3 0xf2c
+#define NAND_READ_LOCATION_LAST_CW_0 0xf40
+#define NAND_READ_LOCATION_LAST_CW_1 0xf44
+#define NAND_READ_LOCATION_LAST_CW_2 0xf48
+#define NAND_READ_LOCATION_LAST_CW_3 0xf4c
+
+/* dummy register offsets, used by qcom_write_reg_dma */
+#define NAND_DEV_CMD1_RESTORE 0xdead
+#define NAND_DEV_CMD_VLD_RESTORE 0xbeef
+
+/* NAND_FLASH_CMD bits */
+#define PAGE_ACC BIT(4)
+#define LAST_PAGE BIT(5)
+
+/* NAND_FLASH_CHIP_SELECT bits */
+#define NAND_DEV_SEL 0
+#define DM_EN BIT(2)
+
+/* NAND_FLASH_STATUS bits */
+#define FS_OP_ERR BIT(4)
+#define FS_READY_BSY_N BIT(5)
+#define FS_MPU_ERR BIT(8)
+#define FS_DEVICE_STS_ERR BIT(16)
+#define FS_DEVICE_WP BIT(23)
+
+/* NAND_BUFFER_STATUS bits */
+#define BS_UNCORRECTABLE_BIT BIT(8)
+#define BS_CORRECTABLE_ERR_MSK 0x1f
+
+/* NAND_DEVn_CFG0 bits */
+#define DISABLE_STATUS_AFTER_WRITE BIT(4)
+#define CW_PER_PAGE_MASK GENMASK(8, 6)
+#define UD_SIZE_BYTES_MASK GENMASK(18, 9)
+#define ECC_PARITY_SIZE_BYTES_RS GENMASK(22, 19)
+#define SPARE_SIZE_BYTES_MASK GENMASK(26, 23)
+#define NUM_ADDR_CYCLES_MASK GENMASK(29, 27)
+#define STATUS_BFR_READ BIT(30)
+#define SET_RD_MODE_AFTER_STATUS BIT(31)
+
+/* NAND_DEVn_CFG0 bits */
+#define DEV0_CFG1_ECC_DISABLE BIT(0)
+#define WIDE_FLASH BIT(1)
+#define NAND_RECOVERY_CYCLES_MASK GENMASK(4, 2)
+#define CS_ACTIVE_BSY BIT(5)
+#define BAD_BLOCK_BYTE_NUM_MASK GENMASK(15, 6)
+#define BAD_BLOCK_IN_SPARE_AREA BIT(16)
+#define WR_RD_BSY_GAP_MASK GENMASK(22, 17)
+#define ENABLE_BCH_ECC BIT(27)
+
+/* NAND_DEV0_ECC_CFG bits */
+#define ECC_CFG_ECC_DISABLE BIT(0)
+#define ECC_SW_RESET BIT(1)
+#define ECC_MODE_MASK GENMASK(5, 4)
+#define ECC_MODE_4BIT 0
+#define ECC_MODE_8BIT 1
+#define ECC_PARITY_SIZE_BYTES_BCH_MASK GENMASK(12, 8)
+#define ECC_NUM_DATA_BYTES_MASK GENMASK(25, 16)
+#define ECC_FORCE_CLK_OPEN BIT(30)
+
+/* NAND_DEV_CMD1 bits */
+#define READ_ADDR_MASK GENMASK(7, 0)
+
+/* NAND_DEV_CMD_VLD bits */
+#define READ_START_VLD BIT(0)
+#define READ_STOP_VLD BIT(1)
+#define WRITE_START_VLD BIT(2)
+#define ERASE_START_VLD BIT(3)
+#define SEQ_READ_START_VLD BIT(4)
+
+/* NAND_EBI2_ECC_BUF_CFG bits */
+#define NUM_STEPS_MASK GENMASK(9, 0)
+
+/* NAND_ERASED_CW_DETECT_CFG bits */
+#define ERASED_CW_ECC_MASK 1
+#define AUTO_DETECT_RES 0
+#define MASK_ECC BIT(ERASED_CW_ECC_MASK)
+#define RESET_ERASED_DET BIT(AUTO_DETECT_RES)
+#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
+#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
+#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
+
+/* NAND_ERASED_CW_DETECT_STATUS bits */
+#define PAGE_ALL_ERASED BIT(7)
+#define CODEWORD_ALL_ERASED BIT(6)
+#define PAGE_ERASED BIT(5)
+#define CODEWORD_ERASED BIT(4)
+#define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
+#define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
+
+/* NAND_READ_LOCATION_n bits */
+#define READ_LOCATION_OFFSET_MASK GENMASK(9, 0)
+#define READ_LOCATION_SIZE_MASK GENMASK(25, 16)
+#define READ_LOCATION_LAST_MASK BIT(31)
+
+/* Version Mask */
+#define NAND_VERSION_MAJOR_MASK 0xf0000000
+#define NAND_VERSION_MAJOR_SHIFT 28
+#define NAND_VERSION_MINOR_MASK 0x0fff0000
+#define NAND_VERSION_MINOR_SHIFT 16
+
+/* NAND OP_CMDs */
+#define OP_PAGE_READ 0x2
+#define OP_PAGE_READ_WITH_ECC 0x3
+#define OP_PAGE_READ_WITH_ECC_SPARE 0x4
+#define OP_PAGE_READ_ONFI_READ 0x5
+#define OP_PROGRAM_PAGE 0x6
+#define OP_PAGE_PROGRAM_WITH_ECC 0x7
+#define OP_PROGRAM_PAGE_SPARE 0x9
+#define OP_BLOCK_ERASE 0xa
+#define OP_CHECK_STATUS 0xc
+#define OP_FETCH_ID 0xb
+#define OP_RESET_DEVICE 0xd
+
+/* Default Value for NAND_DEV_CMD_VLD */
+#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
+ ERASE_START_VLD | SEQ_READ_START_VLD)
+
+/* NAND_CTRL bits */
+#define BAM_MODE_EN BIT(0)
+
+/*
+ * the NAND controller performs reads/writes with ECC in 516 byte chunks.
+ * the driver calls the chunks 'step' or 'codeword' interchangeably
+ */
+#define NANDC_STEP_SIZE 512
+
+/*
+ * the largest page size we support is 8K, this will have 16 steps/codewords
+ * of 512 bytes each
+ */
+#define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
+
+/* we read at most 3 registers per codeword scan */
+#define MAX_REG_RD (3 * MAX_NUM_STEPS)
+
+/* ECC modes supported by the controller */
+#define ECC_NONE BIT(0)
+#define ECC_RS_4BIT BIT(1)
+#define ECC_BCH_4BIT BIT(2)
+#define ECC_BCH_8BIT BIT(3)
+
+/*
+ * Returns the actual register address for all NAND_DEV_ registers
+ * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
+ */
+#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
+
+/* Returns the dma address for reg read buffer */
+#define reg_buf_dma_addr(chip, vaddr) \
+ ((chip)->reg_read_dma + \
+ ((u8 *)(vaddr) - (u8 *)(chip)->reg_read_buf))
+
+#define QPIC_PER_CW_CMD_ELEMENTS 32
+#define QPIC_PER_CW_CMD_SGL 32
+#define QPIC_PER_CW_DATA_SGL 8
+
+#define QPIC_NAND_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
+
+/*
+ * Flags used in DMA descriptor preparation helper functions
+ * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma)
+ */
+/* Don't set the EOT in current tx BAM sgl */
+#define NAND_BAM_NO_EOT BIT(0)
+/* Set the NWD flag in current BAM sgl */
+#define NAND_BAM_NWD BIT(1)
+/* Finish writing in the current BAM sgl and start writing in another BAM sgl */
+#define NAND_BAM_NEXT_SGL BIT(2)
+/*
+ * Erased codeword status is being used two times in single transfer so this
+ * flag will determine the current value of erased codeword status register
+ */
+#define NAND_ERASED_CW_SET BIT(4)
+
+#define MAX_ADDRESS_CYCLE 5
+
+/*
+ * This data type corresponds to the BAM transaction which will be used for all
+ * NAND transfers.
+ * @bam_ce - the array of BAM command elements
+ * @cmd_sgl - sgl for NAND BAM command pipe
+ * @data_sgl - sgl for NAND BAM consumer/producer pipe
+ * @last_data_desc - last DMA desc in data channel (tx/rx).
+ * @last_cmd_desc - last DMA desc in command channel.
+ * @txn_done - completion for NAND transfer.
+ * @bam_ce_nitems - the number of elements in the @bam_ce array
+ * @cmd_sgl_nitems - the number of elements in the @cmd_sgl array
+ * @data_sgl_nitems - the number of elements in the @data_sgl array
+ * @bam_ce_pos - the index in bam_ce which is available for next sgl
+ * @bam_ce_start - the index in bam_ce which marks the start position ce
+ * for current sgl. It will be used for size calculation
+ * for current sgl
+ * @cmd_sgl_pos - current index in command sgl.
+ * @cmd_sgl_start - start index in command sgl.
+ * @tx_sgl_pos - current index in data sgl for tx.
+ * @tx_sgl_start - start index in data sgl for tx.
+ * @rx_sgl_pos - current index in data sgl for rx.
+ * @rx_sgl_start - start index in data sgl for rx.
+ */
+struct bam_transaction {
+ struct bam_cmd_element *bam_ce;
+ struct scatterlist *cmd_sgl;
+ struct scatterlist *data_sgl;
+ struct dma_async_tx_descriptor *last_data_desc;
+ struct dma_async_tx_descriptor *last_cmd_desc;
+ struct completion txn_done;
+
+ unsigned int bam_ce_nitems;
+ unsigned int cmd_sgl_nitems;
+ unsigned int data_sgl_nitems;
+
+ struct_group(bam_positions,
+ u32 bam_ce_pos;
+ u32 bam_ce_start;
+ u32 cmd_sgl_pos;
+ u32 cmd_sgl_start;
+ u32 tx_sgl_pos;
+ u32 tx_sgl_start;
+ u32 rx_sgl_pos;
+ u32 rx_sgl_start;
+
+ );
+};
+
+/*
+ * This data type corresponds to the nand dma descriptor
+ * @dma_desc - low level DMA engine descriptor
+ * @list - list for desc_info
+ *
+ * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
+ * ADM
+ * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
+ * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
+ * @dir - DMA transfer direction
+ */
+struct desc_info {
+ struct dma_async_tx_descriptor *dma_desc;
+ struct list_head node;
+
+ union {
+ struct scatterlist adm_sgl;
+ struct {
+ struct scatterlist *bam_sgl;
+ int sgl_cnt;
+ };
+ };
+ enum dma_data_direction dir;
+};
+
+/*
+ * holds the current register values that we want to write. acts as a contiguous
+ * chunk of memory which we use to write the controller registers through DMA.
+ */
+struct nandc_regs {
+ __le32 cmd;
+ __le32 addr0;
+ __le32 addr1;
+ __le32 chip_sel;
+ __le32 exec;
+
+ __le32 cfg0;
+ __le32 cfg1;
+ __le32 ecc_bch_cfg;
+
+ __le32 clrflashstatus;
+ __le32 clrreadstatus;
+
+ __le32 cmd1;
+ __le32 vld;
+
+ __le32 orig_cmd1;
+ __le32 orig_vld;
+
+ __le32 ecc_buf_cfg;
+ __le32 read_location0;
+ __le32 read_location1;
+ __le32 read_location2;
+ __le32 read_location3;
+ __le32 read_location_last0;
+ __le32 read_location_last1;
+ __le32 read_location_last2;
+ __le32 read_location_last3;
+ __le32 spi_cfg;
+ __le32 num_addr_cycle;
+ __le32 busy_wait_cnt;
+ __le32 flash_feature;
+
+ __le32 erased_cw_detect_cfg_clr;
+ __le32 erased_cw_detect_cfg_set;
+};
+
+/*
+ * NAND controller data struct
+ *
+ * @dev: parent device
+ *
+ * @base: MMIO base
+ *
+ * @core_clk: controller clock
+ * @aon_clk: another controller clock
+ * @iomacro_clk: io macro clock
+ *
+ * @regs: a contiguous chunk of memory for DMA register
+ * writes. contains the register values to be
+ * written to controller
+ *
+ * @props: properties of current NAND controller,
+ * initialized via DT match data
+ *
+ * @controller: base controller structure
+ * @qspi: qpic spi structure
+ * @host_list: list containing all the chips attached to the
+ * controller
+ *
+ * @chan: dma channel
+ * @cmd_crci: ADM DMA CRCI for command flow control
+ * @data_crci: ADM DMA CRCI for data flow control
+ *
+ * @desc_list: DMA descriptor list (list of desc_infos)
+ *
+ * @data_buffer: our local DMA buffer for page read/writes,
+ * used when we can't use the buffer provided
+ * by upper layers directly
+ * @reg_read_buf: local buffer for reading back registers via DMA
+ *
+ * @base_phys: physical base address of controller registers
+ * @base_dma: dma base address of controller registers
+ * @reg_read_dma: contains dma address for register read buffer
+ *
+ * @buf_size/count/start: markers for chip->legacy.read_buf/write_buf
+ * functions
+ * @max_cwperpage: maximum QPIC codewords required. calculated
+ * from all connected NAND devices pagesize
+ *
+ * @reg_read_pos: marker for data read in reg_read_buf
+ *
+ * @cmd1/vld: some fixed controller register values
+ *
+ * @exec_opwrite: flag to select correct number of code word
+ * while reading status
+ */
+struct qcom_nand_controller {
+ struct device *dev;
+
+ void __iomem *base;
+
+ struct clk *core_clk;
+ struct clk *aon_clk;
+
+ struct nandc_regs *regs;
+ struct bam_transaction *bam_txn;
+
+ const struct qcom_nandc_props *props;
+
+ struct nand_controller *controller;
+ struct qpic_spi_nand *qspi;
+ struct list_head host_list;
+
+ union {
+ /* will be used only by QPIC for BAM DMA */
+ struct {
+ struct dma_chan *tx_chan;
+ struct dma_chan *rx_chan;
+ struct dma_chan *cmd_chan;
+ };
+
+ /* will be used only by EBI2 for ADM DMA */
+ struct {
+ struct dma_chan *chan;
+ unsigned int cmd_crci;
+ unsigned int data_crci;
+ };
+ };
+
+ struct list_head desc_list;
+
+ u8 *data_buffer;
+ __le32 *reg_read_buf;
+
+ phys_addr_t base_phys;
+ dma_addr_t base_dma;
+ dma_addr_t reg_read_dma;
+
+ int buf_size;
+ int buf_count;
+ int buf_start;
+ unsigned int max_cwperpage;
+
+ int reg_read_pos;
+
+ u32 cmd1, vld;
+ bool exec_opwrite;
+};
+
+/*
+ * This data type corresponds to the NAND controller properties which varies
+ * among different NAND controllers.
+ * @ecc_modes - ecc mode for NAND
+ * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
+ * @supports_bam - whether NAND controller is using BAM
+ * @nandc_part_of_qpic - whether NAND controller is part of qpic IP
+ * @qpic_version2 - flag to indicate QPIC IP version 2
+ * @use_codeword_fixup - whether NAND has different layout for boot partitions
+ */
+struct qcom_nandc_props {
+ u32 ecc_modes;
+ u32 dev_cmd_reg_start;
+ u32 bam_offset;
+ bool supports_bam;
+ bool nandc_part_of_qpic;
+ bool qpic_version2;
+ bool use_codeword_fixup;
+};
+
+void qcom_free_bam_transaction(struct qcom_nand_controller *nandc);
+struct bam_transaction *qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc);
+void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc);
+void qcom_qpic_bam_dma_done(void *data);
+void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu);
+int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
+ struct dma_chan *chan, unsigned long flags);
+int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
+ int reg_off, const void *vaddr, int size, unsigned int flags);
+int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
+ const void *vaddr, int size, unsigned int flags);
+int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read, int reg_off,
+ const void *vaddr, int size, bool flow_control);
+int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first, int num_regs,
+ unsigned int flags);
+int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr, int first,
+ int num_regs, unsigned int flags);
+int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off, const u8 *vaddr,
+ int size, unsigned int flags);
+int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off, const u8 *vaddr,
+ int size, unsigned int flags);
+int qcom_submit_descs(struct qcom_nand_controller *nandc);
+void qcom_clear_read_regs(struct qcom_nand_controller *nandc);
+void qcom_nandc_unalloc(struct qcom_nand_controller *nandc);
+int qcom_nandc_alloc(struct qcom_nand_controller *nandc);
+#endif
+
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 32fc7edf65b3..09c8c93e4dba 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -21,7 +21,7 @@ struct nand_device;
* @oobsize: OOB area size
* @pages_per_eraseblock: number of pages per eraseblock
* @eraseblocks_per_lun: number of eraseblocks per LUN (Logical Unit Number)
- * @max_bad_eraseblocks_per_lun: maximum number of eraseblocks per LUN
+ * @max_bad_eraseblocks_per_lun: maximum number of bad eraseblocks per LUN
* @planes_per_lun: number of planes per LUN
* @luns_per_target: number of LUN per target (target is a synonym for die)
* @ntargets: total number of targets exposed by the NAND device
@@ -103,6 +103,8 @@ enum nand_page_io_req_type {
* @ooblen: the number of OOB bytes to read from/write to this page
* @oobbuf: buffer to store OOB data in or get OOB data from
* @mode: one of the %MTD_OPS_XXX mode
+ * @continuous: no need to start over the operation at the end of each page, the
+ * NAND device will automatically prepare the next one
*
* This object is used to pass per-page I/O requests to NAND sub-layers. This
* way all useful information are already formatted in a useful way and
@@ -125,6 +127,7 @@ struct nand_page_io_req {
void *in;
} oobbuf;
int mode;
+ bool continuous;
};
const struct mtd_ooblayout_ops *nand_get_small_page_ooblayout(void);
@@ -264,11 +267,35 @@ struct nand_ecc_engine_ops {
};
/**
+ * enum nand_ecc_engine_integration - How the NAND ECC engine is integrated
+ * @NAND_ECC_ENGINE_INTEGRATION_INVALID: Invalid value
+ * @NAND_ECC_ENGINE_INTEGRATION_PIPELINED: Pipelined engine, performs on-the-fly
+ * correction, does not need to copy
+ * data around
+ * @NAND_ECC_ENGINE_INTEGRATION_EXTERNAL: External engine, needs to bring the
+ * data into its own area before use
+ */
+enum nand_ecc_engine_integration {
+ NAND_ECC_ENGINE_INTEGRATION_INVALID,
+ NAND_ECC_ENGINE_INTEGRATION_PIPELINED,
+ NAND_ECC_ENGINE_INTEGRATION_EXTERNAL,
+};
+
+/**
* struct nand_ecc_engine - ECC engine abstraction for NAND devices
+ * @dev: Host device
+ * @node: Private field for registration time
* @ops: ECC engine operations
+ * @integration: How the engine is integrated with the host
+ * (only relevant on %NAND_ECC_ENGINE_TYPE_ON_HOST engines)
+ * @priv: Private data
*/
struct nand_ecc_engine {
- struct nand_ecc_engine_ops *ops;
+ struct device *dev;
+ struct list_head node;
+ const struct nand_ecc_engine_ops *ops;
+ enum nand_ecc_engine_integration integration;
+ void *priv;
};
void of_get_nand_ecc_user_config(struct nand_device *nand);
@@ -279,8 +306,28 @@ int nand_ecc_prepare_io_req(struct nand_device *nand,
int nand_ecc_finish_io_req(struct nand_device *nand,
struct nand_page_io_req *req);
bool nand_ecc_is_strong_enough(struct nand_device *nand);
+
+#if IS_REACHABLE(CONFIG_MTD_NAND_CORE)
+int nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine *engine);
+int nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine *engine);
+#else
+static inline int
+nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine *engine)
+{
+ return -ENOTSUPP;
+}
+static inline int
+nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine *engine)
+{
+ return -ENOTSUPP;
+}
+#endif
+
struct nand_ecc_engine *nand_ecc_get_sw_engine(struct nand_device *nand);
struct nand_ecc_engine *nand_ecc_get_on_die_hw_engine(struct nand_device *nand);
+struct nand_ecc_engine *nand_ecc_get_on_host_hw_engine(struct nand_device *nand);
+void nand_ecc_put_on_host_hw_engine(struct nand_device *nand);
+struct device *nand_ecc_get_engine_dev(struct device *host);
#if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING)
struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void);
@@ -862,19 +909,19 @@ static inline void nanddev_pos_next_page(struct nand_device *nand,
}
/**
- * nand_io_iter_init - Initialize a NAND I/O iterator
+ * nand_io_page_iter_init - Initialize a NAND I/O iterator
* @nand: NAND device
* @offs: absolute offset
* @req: MTD request
* @iter: NAND I/O iterator
*
* Initializes a NAND iterator based on the information passed by the MTD
- * layer.
+ * layer for page jumps.
*/
-static inline void nanddev_io_iter_init(struct nand_device *nand,
- enum nand_page_io_req_type reqtype,
- loff_t offs, struct mtd_oob_ops *req,
- struct nand_io_iter *iter)
+static inline void nanddev_io_page_iter_init(struct nand_device *nand,
+ enum nand_page_io_req_type reqtype,
+ loff_t offs, struct mtd_oob_ops *req,
+ struct nand_io_iter *iter)
{
struct mtd_info *mtd = nanddev_to_mtd(nand);
@@ -893,6 +940,43 @@ static inline void nanddev_io_iter_init(struct nand_device *nand,
iter->req.ooblen = min_t(unsigned int,
iter->oobbytes_per_page - iter->req.ooboffs,
iter->oobleft);
+ iter->req.continuous = false;
+}
+
+/**
+ * nand_io_block_iter_init - Initialize a NAND I/O iterator
+ * @nand: NAND device
+ * @offs: absolute offset
+ * @req: MTD request
+ * @iter: NAND I/O iterator
+ *
+ * Initializes a NAND iterator based on the information passed by the MTD
+ * layer for block jumps (no OOB)
+ *
+ * In practice only reads may leverage this iterator.
+ */
+static inline void nanddev_io_block_iter_init(struct nand_device *nand,
+ enum nand_page_io_req_type reqtype,
+ loff_t offs, struct mtd_oob_ops *req,
+ struct nand_io_iter *iter)
+{
+ unsigned int offs_in_eb;
+
+ iter->req.type = reqtype;
+ iter->req.mode = req->mode;
+ iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
+ iter->req.ooboffs = 0;
+ iter->oobbytes_per_page = 0;
+ iter->dataleft = req->len;
+ iter->oobleft = 0;
+ iter->req.databuf.in = req->datbuf;
+ offs_in_eb = (nand->memorg.pagesize * iter->req.pos.page) + iter->req.dataoffs;
+ iter->req.datalen = min_t(unsigned int,
+ nanddev_eraseblock_size(nand) - offs_in_eb,
+ iter->dataleft);
+ iter->req.oobbuf.in = NULL;
+ iter->req.ooblen = 0;
+ iter->req.continuous = true;
}
/**
@@ -919,6 +1003,25 @@ static inline void nanddev_io_iter_next_page(struct nand_device *nand,
}
/**
+ * nand_io_iter_next_block - Move to the next block
+ * @nand: NAND device
+ * @iter: NAND I/O iterator
+ *
+ * Updates the @iter to point to the next block.
+ * No OOB handling available.
+ */
+static inline void nanddev_io_iter_next_block(struct nand_device *nand,
+ struct nand_io_iter *iter)
+{
+ nanddev_pos_next_eraseblock(nand, &iter->req.pos);
+ iter->dataleft -= iter->req.datalen;
+ iter->req.databuf.in += iter->req.datalen;
+ iter->req.dataoffs = 0;
+ iter->req.datalen = min_t(unsigned int, nanddev_eraseblock_size(nand),
+ iter->dataleft);
+}
+
+/**
* nand_io_iter_end - Should end iteration or not
* @nand: NAND device
* @iter: NAND I/O iterator
@@ -946,22 +1049,41 @@ static inline bool nanddev_io_iter_end(struct nand_device *nand,
* @req: MTD I/O request
* @iter: NAND I/O iterator
*
- * Should be used for iterate over pages that are contained in an MTD request.
+ * Should be used for iterating over pages that are contained in an MTD request.
*/
#define nanddev_io_for_each_page(nand, type, start, req, iter) \
- for (nanddev_io_iter_init(nand, type, start, req, iter); \
+ for (nanddev_io_page_iter_init(nand, type, start, req, iter); \
!nanddev_io_iter_end(nand, iter); \
nanddev_io_iter_next_page(nand, iter))
+/**
+ * nand_io_for_each_block - Iterate over all NAND pages contained in an MTD I/O
+ * request, one block at a time
+ * @nand: NAND device
+ * @start: start address to read/write from
+ * @req: MTD I/O request
+ * @iter: NAND I/O iterator
+ *
+ * Should be used for iterating over blocks that are contained in an MTD request.
+ */
+#define nanddev_io_for_each_block(nand, type, start, req, iter) \
+ for (nanddev_io_block_iter_init(nand, type, start, req, iter); \
+ !nanddev_io_iter_end(nand, iter); \
+ nanddev_io_iter_next_block(nand, iter))
+
bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos);
bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos);
-int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos);
int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos);
/* ECC related functions */
int nanddev_ecc_engine_init(struct nand_device *nand);
void nanddev_ecc_engine_cleanup(struct nand_device *nand);
+static inline void *nand_to_ecc_ctx(struct nand_device *nand)
+{
+ return nand->ecc.ctx.priv;
+}
+
/* BBT related functions */
enum nand_bbt_block_status {
NAND_BBT_BLOCK_STATUS_UNKNOWN,
@@ -1014,4 +1136,9 @@ static inline bool nanddev_bbt_is_initialized(struct nand_device *nand)
int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo);
int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len);
+int nand_check_erased_ecc_chunk(void *data, int datalen,
+ void *ecc, int ecclen,
+ void *extraoob, int extraooblen,
+ int threshold);
+
#endif /* __LINUX_MTD_NAND_H */
diff --git a/include/linux/mtd/onfi.h b/include/linux/mtd/onfi.h
index 339ac798568e..55ab2e4d62f9 100644
--- a/include/linux/mtd/onfi.h
+++ b/include/linux/mtd/onfi.h
@@ -11,6 +11,7 @@
#define __LINUX_MTD_ONFI_H
#include <linux/types.h>
+#include <linux/bitfield.h>
/* ONFI version bits */
#define ONFI_VERSION_1_0 BIT(1)
@@ -24,17 +25,22 @@
#define ONFI_VERSION_4_0 BIT(9)
/* ONFI features */
-#define ONFI_FEATURE_16_BIT_BUS (1 << 0)
-#define ONFI_FEATURE_EXT_PARAM_PAGE (1 << 7)
+#define ONFI_FEATURE_16_BIT_BUS BIT(0)
+#define ONFI_FEATURE_NV_DDR BIT(5)
+#define ONFI_FEATURE_EXT_PARAM_PAGE BIT(7)
/* ONFI timing mode, used in both asynchronous and synchronous mode */
-#define ONFI_TIMING_MODE_0 (1 << 0)
-#define ONFI_TIMING_MODE_1 (1 << 1)
-#define ONFI_TIMING_MODE_2 (1 << 2)
-#define ONFI_TIMING_MODE_3 (1 << 3)
-#define ONFI_TIMING_MODE_4 (1 << 4)
-#define ONFI_TIMING_MODE_5 (1 << 5)
-#define ONFI_TIMING_MODE_UNKNOWN (1 << 6)
+#define ONFI_DATA_INTERFACE_SDR 0
+#define ONFI_DATA_INTERFACE_NVDDR BIT(4)
+#define ONFI_DATA_INTERFACE_NVDDR2 BIT(5)
+#define ONFI_TIMING_MODE_0 BIT(0)
+#define ONFI_TIMING_MODE_1 BIT(1)
+#define ONFI_TIMING_MODE_2 BIT(2)
+#define ONFI_TIMING_MODE_3 BIT(3)
+#define ONFI_TIMING_MODE_4 BIT(4)
+#define ONFI_TIMING_MODE_5 BIT(5)
+#define ONFI_TIMING_MODE_UNKNOWN BIT(6)
+#define ONFI_TIMING_MODE_PARAM(x) FIELD_GET(GENMASK(3, 0), (x))
/* ONFI feature number/address */
#define ONFI_FEATURE_NUMBER 256
@@ -49,7 +55,8 @@
#define ONFI_SUBFEATURE_PARAM_LEN 4
/* ONFI optional commands SET/GET FEATURES supported? */
-#define ONFI_OPT_CMD_SET_GET_FEATURES (1 << 2)
+#define ONFI_OPT_CMD_READ_CACHE BIT(1)
+#define ONFI_OPT_CMD_SET_GET_FEATURES BIT(2)
struct nand_onfi_params {
/* rev info and features block */
@@ -93,14 +100,15 @@ struct nand_onfi_params {
/* electrical parameter block */
u8 io_pin_capacitance_max;
- __le16 async_timing_mode;
+ __le16 sdr_timing_modes;
__le16 program_cache_timing_mode;
__le16 t_prog;
__le16 t_bers;
__le16 t_r;
__le16 t_ccs;
- __le16 src_sync_timing_mode;
- u8 src_ssync_features;
+ u8 nvddr_timing_modes;
+ u8 nvddr2_timing_modes;
+ u8 nvddr_nvddr2_features;
__le16 clk_pin_capacitance_typ;
__le16 io_pin_capacitance_typ;
__le16 input_pin_capacitance_typ;
@@ -160,7 +168,9 @@ struct onfi_ext_param_page {
* @tBERS: Block erase time
* @tR: Page read time
* @tCCS: Change column setup time
- * @async_timing_mode: Supported asynchronous timing mode
+ * @fast_tCAD: Command/Address/Data slow or fast delay (NV-DDR only)
+ * @sdr_timing_modes: Supported asynchronous/SDR timing modes
+ * @nvddr_timing_modes: Supported source synchronous/NV-DDR timing modes
* @vendor_revision: Vendor specific revision number
* @vendor: Vendor specific data
*/
@@ -170,7 +180,9 @@ struct onfi_params {
u16 tBERS;
u16 tR;
u16 tCCS;
- u16 async_timing_mode;
+ bool fast_tCAD;
+ u16 sdr_timing_modes;
+ u16 nvddr_timing_modes;
u16 vendor_revision;
u8 vendor[88];
};
diff --git a/include/linux/mtd/qinfo.h b/include/linux/mtd/qinfo.h
index 2e3f43788d48..0421f12156b5 100644
--- a/include/linux/mtd/qinfo.h
+++ b/include/linux/mtd/qinfo.h
@@ -24,7 +24,7 @@ struct lpddr_private {
struct qinfo_chip *qinfo;
int numchips;
unsigned long chipshift;
- struct flchip chips[];
+ struct flchip chips[] __counted_by(numchips);
};
/* qinfo_query_info structure contains request information for
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 29df2f43dcb5..d30bdc3fcfd7 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -24,6 +24,7 @@
#include <linux/types.h>
struct nand_chip;
+struct gpio_desc;
/* The maximum number of NAND chips in an array */
#define NAND_MAX_CHIPS 8
@@ -66,6 +67,8 @@ struct nand_chip;
/* Extended commands for large page devices */
#define NAND_CMD_READSTART 0x30
+#define NAND_CMD_READCACHESEQ 0x31
+#define NAND_CMD_READCACHEEND 0x3f
#define NAND_CMD_RNDOUTSTART 0xE0
#define NAND_CMD_CACHEDPROG 0x15
@@ -222,6 +225,7 @@ struct nand_chip;
* struct nand_parameters - NAND generic parameters from the parameter page
* @model: Model name
* @supports_set_get_features: The NAND chip supports setting/getting features
+ * @supports_read_cache: The NAND chip supports read cache operations
* @set_feature_list: Bitmap of features that can be set
* @get_feature_list: Bitmap of features that can be get
* @onfi: ONFI specific parameters
@@ -230,6 +234,7 @@ struct nand_parameters {
/* Generic parameters */
const char *model;
bool supports_set_get_features;
+ bool supports_read_cache;
DECLARE_BITMAP(set_feature_list, ONFI_FEATURE_NUMBER);
DECLARE_BITMAP(get_feature_list, ONFI_FEATURE_NUMBER);
@@ -385,8 +390,8 @@ struct nand_ecc_ctrl {
* This struct defines the timing requirements of a SDR NAND chip.
* These information can be found in every NAND datasheets and the timings
* meaning are described in the ONFI specifications:
- * www.onfi.org/~/media/ONFI/specs/onfi_3_1_spec.pdf (chapter 4.15 Timing
- * Parameters)
+ * https://media-www.micron.com/-/media/client/onfi/specs/onfi_3_1_spec.pdf
+ * (chapter 4.15 Timing Parameters)
*
* All these timings are expressed in picoseconds.
*
@@ -472,11 +477,127 @@ struct nand_sdr_timings {
};
/**
+ * struct nand_nvddr_timings - NV-DDR NAND chip timings
+ *
+ * This struct defines the timing requirements of a NV-DDR NAND data interface.
+ * These information can be found in every NAND datasheets and the timings
+ * meaning are described in the ONFI specifications:
+ * https://media-www.micron.com/-/media/client/onfi/specs/onfi_4_1_gold.pdf
+ * (chapter 4.18.2 NV-DDR)
+ *
+ * All these timings are expressed in picoseconds.
+ *
+ * @tBERS_max: Block erase time
+ * @tCCS_min: Change column setup time
+ * @tPROG_max: Page program time
+ * @tR_max: Page read time
+ * @tAC_min: Access window of DQ[7:0] from CLK
+ * @tAC_max: Access window of DQ[7:0] from CLK
+ * @tADL_min: ALE to data loading time
+ * @tCAD_min: Command, Address, Data delay
+ * @tCAH_min: Command/Address DQ hold time
+ * @tCALH_min: W/R_n, CLE and ALE hold time
+ * @tCALS_min: W/R_n, CLE and ALE setup time
+ * @tCAS_min: Command/address DQ setup time
+ * @tCEH_min: CE# high hold time
+ * @tCH_min: CE# hold time
+ * @tCK_min: Average clock cycle time
+ * @tCS_min: CE# setup time
+ * @tDH_min: Data hold time
+ * @tDQSCK_min: Start of the access window of DQS from CLK
+ * @tDQSCK_max: End of the access window of DQS from CLK
+ * @tDQSD_min: Min W/R_n low to DQS/DQ driven by device
+ * @tDQSD_max: Max W/R_n low to DQS/DQ driven by device
+ * @tDQSHZ_max: W/R_n high to DQS/DQ tri-state by device
+ * @tDQSQ_max: DQS-DQ skew, DQS to last DQ valid, per access
+ * @tDS_min: Data setup time
+ * @tDSC_min: DQS cycle time
+ * @tFEAT_max: Busy time for Set Features and Get Features
+ * @tITC_max: Interface and Timing Mode Change time
+ * @tQHS_max: Data hold skew factor
+ * @tRHW_min: Data output cycle to command, address, or data input cycle
+ * @tRR_min: Ready to RE# low (data only)
+ * @tRST_max: Device reset time, measured from the falling edge of R/B# to the
+ * rising edge of R/B#.
+ * @tWB_max: WE# high to SR[6] low
+ * @tWHR_min: WE# high to RE# low
+ * @tWRCK_min: W/R_n low to data output cycle
+ * @tWW_min: WP# transition to WE# low
+ */
+struct nand_nvddr_timings {
+ u64 tBERS_max;
+ u32 tCCS_min;
+ u64 tPROG_max;
+ u64 tR_max;
+ u32 tAC_min;
+ u32 tAC_max;
+ u32 tADL_min;
+ u32 tCAD_min;
+ u32 tCAH_min;
+ u32 tCALH_min;
+ u32 tCALS_min;
+ u32 tCAS_min;
+ u32 tCEH_min;
+ u32 tCH_min;
+ u32 tCK_min;
+ u32 tCS_min;
+ u32 tDH_min;
+ u32 tDQSCK_min;
+ u32 tDQSCK_max;
+ u32 tDQSD_min;
+ u32 tDQSD_max;
+ u32 tDQSHZ_max;
+ u32 tDQSQ_max;
+ u32 tDS_min;
+ u32 tDSC_min;
+ u32 tFEAT_max;
+ u32 tITC_max;
+ u32 tQHS_max;
+ u32 tRHW_min;
+ u32 tRR_min;
+ u32 tRST_max;
+ u32 tWB_max;
+ u32 tWHR_min;
+ u32 tWRCK_min;
+ u32 tWW_min;
+};
+
+/*
+ * While timings related to the data interface itself are mostly different
+ * between SDR and NV-DDR, timings related to the internal chip behavior are
+ * common. IOW, the following entries which describe the internal delays have
+ * the same definition and are shared in both SDR and NV-DDR timing structures:
+ * - tADL_min
+ * - tBERS_max
+ * - tCCS_min
+ * - tFEAT_max
+ * - tPROG_max
+ * - tR_max
+ * - tRR_min
+ * - tRST_max
+ * - tWB_max
+ *
+ * The below macros return the value of a given timing, no matter the interface.
+ */
+#define NAND_COMMON_TIMING_PS(conf, timing_name) \
+ nand_interface_is_sdr(conf) ? \
+ nand_get_sdr_timings(conf)->timing_name : \
+ nand_get_nvddr_timings(conf)->timing_name
+
+#define NAND_COMMON_TIMING_MS(conf, timing_name) \
+ PSEC_TO_MSEC(NAND_COMMON_TIMING_PS((conf), timing_name))
+
+#define NAND_COMMON_TIMING_NS(conf, timing_name) \
+ PSEC_TO_NSEC(NAND_COMMON_TIMING_PS((conf), timing_name))
+
+/**
* enum nand_interface_type - NAND interface type
* @NAND_SDR_IFACE: Single Data Rate interface
+ * @NAND_NVDDR_IFACE: Double Data Rate interface
*/
enum nand_interface_type {
NAND_SDR_IFACE,
+ NAND_NVDDR_IFACE,
};
/**
@@ -485,6 +606,7 @@ enum nand_interface_type {
* @timings: The timing information
* @timings.mode: Timing mode as defined in the specification
* @timings.sdr: Use it when @type is %NAND_SDR_IFACE.
+ * @timings.nvddr: Use it when @type is %NAND_NVDDR_IFACE.
*/
struct nand_interface_config {
enum nand_interface_type type;
@@ -492,24 +614,56 @@ struct nand_interface_config {
unsigned int mode;
union {
struct nand_sdr_timings sdr;
+ struct nand_nvddr_timings nvddr;
};
} timings;
};
/**
+ * nand_interface_is_sdr - get the interface type
+ * @conf: The data interface
+ */
+static bool nand_interface_is_sdr(const struct nand_interface_config *conf)
+{
+ return conf->type == NAND_SDR_IFACE;
+}
+
+/**
+ * nand_interface_is_nvddr - get the interface type
+ * @conf: The data interface
+ */
+static bool nand_interface_is_nvddr(const struct nand_interface_config *conf)
+{
+ return conf->type == NAND_NVDDR_IFACE;
+}
+
+/**
* nand_get_sdr_timings - get SDR timing from data interface
* @conf: The data interface
*/
static inline const struct nand_sdr_timings *
nand_get_sdr_timings(const struct nand_interface_config *conf)
{
- if (conf->type != NAND_SDR_IFACE)
+ if (!nand_interface_is_sdr(conf))
return ERR_PTR(-EINVAL);
return &conf->timings.sdr;
}
/**
+ * nand_get_nvddr_timings - get NV-DDR timing from data interface
+ * @conf: The data interface
+ */
+static inline const struct nand_nvddr_timings *
+nand_get_nvddr_timings(const struct nand_interface_config *conf)
+{
+ if (!nand_interface_is_nvddr(conf))
+ return ERR_PTR(-EINVAL);
+
+ return &conf->timings.nvddr;
+}
+
+/**
* struct nand_op_cmd_instr - Definition of a command instruction
* @opcode: the command to issue in one cycle
*/
@@ -849,6 +1003,8 @@ struct nand_op_parser {
/**
* struct nand_operation - NAND operation descriptor
* @cs: the CS line to select for this NAND operation
+ * @deassert_wp: set to true when the operation requires the WP pin to be
+ * de-asserted (ERASE, PROG, ...)
* @instrs: array of instructions to execute
* @ninstrs: length of the @instrs array
*
@@ -856,6 +1012,7 @@ struct nand_op_parser {
*/
struct nand_operation {
unsigned int cs;
+ bool deassert_wp;
const struct nand_op_instr *instrs;
unsigned int ninstrs;
};
@@ -867,6 +1024,14 @@ struct nand_operation {
.ninstrs = ARRAY_SIZE(_instrs), \
}
+#define NAND_DESTRUCTIVE_OPERATION(_cs, _instrs) \
+ { \
+ .cs = _cs, \
+ .deassert_wp = true, \
+ .instrs = _instrs, \
+ .ninstrs = ARRAY_SIZE(_instrs), \
+ }
+
int nand_op_parser_exec_op(struct nand_chip *chip,
const struct nand_op_parser *parser,
const struct nand_operation *op, bool check_only);
@@ -923,7 +1088,7 @@ static inline void nand_op_trace(const char *prefix,
* @exec_op: controller specific method to execute NAND operations.
* This method replaces chip->legacy.cmdfunc(),
* chip->legacy.{read,write}_{buf,byte,word}(),
- * chip->legacy.dev_ready() and chip->legacy.waifunc().
+ * chip->legacy.dev_ready() and chip->legacy.waitfunc().
* @setup_interface: setup the data interface and timing. If chipnr is set to
* %NAND_DATA_IFACE_CHECK_ONLY this means the configuration
* should not be applied but only checked.
@@ -944,10 +1109,22 @@ struct nand_controller_ops {
*
* @lock: lock used to serialize accesses to the NAND controller
* @ops: NAND controller operations.
+ * @supported_op: NAND controller known-to-be-supported operations,
+ * only writable by the core after initial checking.
+ * @supported_op.data_only_read: The controller supports reading more data from
+ * the bus without restarting an entire read operation nor
+ * changing the column.
+ * @supported_op.cont_read: The controller supports sequential cache reads.
+ * @controller_wp: the controller is in charge of handling the WP pin.
*/
struct nand_controller {
struct mutex lock;
const struct nand_controller_ops *ops;
+ struct {
+ unsigned int data_only_read: 1;
+ unsigned int cont_read: 1;
+ } supported_op;
+ bool controller_wp;
};
static inline void nand_controller_init(struct nand_controller *nfc)
@@ -1090,6 +1267,7 @@ struct nand_secure_region {
* @lock: Lock protecting the suspended field. Also used to serialize accesses
* to the NAND device
* @suspended: Set to 1 when the device is suspended, 0 when it's not
+ * @resume_wq: wait queue to sleep if rawnand is in suspended state.
* @cur_cs: Currently selected target. -1 means no target selected, otherwise we
* should always have cur_cs >= 0 && cur_cs < nanddev_ntargets().
* NAND Controller drivers should not modify this value, but they're
@@ -1097,6 +1275,11 @@ struct nand_secure_region {
* @read_retries: The number of read retry modes supported
* @secure_regions: Structure containing the secure regions info
* @nr_secure_regions: Number of secure regions
+ * @cont_read: Sequential page read internals
+ * @cont_read.ongoing: Whether a continuous read is ongoing or not
+ * @cont_read.first_page: Start of the continuous read operation
+ * @cont_read.pause_page: End of the current sequential cache read operation
+ * @cont_read.last_page: End of the continuous read operation
* @controller: The hardware controller structure which is shared among multiple
* independent devices
* @ecc: The ECC controller structure
@@ -1144,10 +1327,17 @@ struct nand_chip {
/* Internals */
struct mutex lock;
unsigned int suspended : 1;
+ wait_queue_head_t resume_wq;
int cur_cs;
int read_retries;
struct nand_secure_region *secure_regions;
u8 nr_secure_regions;
+ struct {
+ bool ongoing;
+ unsigned int first_page;
+ unsigned int pause_page;
+ unsigned int last_page;
+ } cont_read;
/* Externals */
struct nand_controller *controller;
@@ -1329,11 +1519,6 @@ int rawnand_sw_bch_correct(struct nand_chip *chip, unsigned char *buf,
unsigned char *read_ecc, unsigned char *calc_ecc);
void rawnand_sw_bch_cleanup(struct nand_chip *chip);
-int nand_check_erased_ecc_chunk(void *data, int datalen,
- void *ecc, int ecclen,
- void *extraoob, int extraooblen,
- int threshold);
-
int nand_ecc_choose_conf(struct nand_chip *chip,
const struct nand_ecc_caps *caps, int oobavail);
@@ -1367,6 +1552,7 @@ int nand_reset_op(struct nand_chip *chip);
int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
unsigned int len);
int nand_status_op(struct nand_chip *chip, u8 *status);
+int nand_exit_status_op(struct nand_chip *chip);
int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock);
int nand_read_page_op(struct nand_chip *chip, unsigned int page,
unsigned int offset_in_page, void *buf, unsigned int len);
@@ -1389,6 +1575,8 @@ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
bool force_8bit, bool check_only);
int nand_write_data_op(struct nand_chip *chip, const void *buf,
unsigned int len, bool force_8bit);
+int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page);
/* Scan and identify a NAND device */
int nand_scan_with_ids(struct nand_chip *chip, unsigned int max_chips,
@@ -1413,7 +1601,6 @@ void nand_cleanup(struct nand_chip *chip);
* instruction and have no physical pin to check it.
*/
int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms);
-struct gpio_desc;
int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
unsigned long timeout_ms);
@@ -1446,4 +1633,8 @@ static inline void *nand_get_data_buf(struct nand_chip *chip)
return chip->data_buf;
}
+/* Parse the gpio-cs property */
+int rawnand_dt_parse_gpio_cs(struct device *dev, struct gpio_desc ***cs_array,
+ unsigned int *ncs_array);
+
#endif /* __LINUX_MTD_RAWNAND_H */
diff --git a/include/linux/mtd/spear_smi.h b/include/linux/mtd/spear_smi.h
index 581603ac1277..871634862627 100644
--- a/include/linux/mtd/spear_smi.h
+++ b/include/linux/mtd/spear_smi.h
@@ -31,12 +31,12 @@
* struct spear_smi_flash_info - platform structure for passing flash
* information
*
- * name: name of the serial nor flash for identification
- * mem_base: the memory base on which the flash is mapped
- * size: size of the flash in bytes
- * partitions: parition details
- * nr_partitions: number of partitions
- * fast_mode: whether flash supports fast mode
+ * @name: name of the serial nor flash for identification
+ * @mem_base: the memory base on which the flash is mapped
+ * @size: size of the flash in bytes
+ * @partitions: parition details
+ * @nr_partitions: number of partitions
+ * @fast_mode: whether flash supports fast mode
*/
struct spear_smi_flash_info {
@@ -51,9 +51,10 @@ struct spear_smi_flash_info {
/**
* struct spear_smi_plat_data - platform structure for configuring smi
*
- * clk_rate: clk rate at which SMI must operate
- * num_flashes: number of flashes present on board
- * board_flash_info: specific details of each flash present on board
+ * @clk_rate: clk rate at which SMI must operate
+ * @num_flashes: number of flashes present on board
+ * @board_flash_info: specific details of each flash present on board
+ * @np: array of DT node pointers for all possible flash chip devices
*/
struct spear_smi_plat_data {
unsigned long clk_rate;
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 98ed91b529ea..cdcfe0fd2e7d 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -7,7 +7,6 @@
#define __LINUX_MTD_SPI_NOR_H
#include <linux/bitops.h>
-#include <linux/mtd/cfi.h>
#include <linux/mtd/mtd.h>
#include <linux/spi/spi-mem.h>
@@ -47,10 +46,6 @@
#define SPINOR_OP_RDID 0x9f /* Read JEDEC ID */
#define SPINOR_OP_RDSFDP 0x5a /* Read SFDP */
#define SPINOR_OP_RDCR 0x35 /* Read configuration register */
-#define SPINOR_OP_RDFSR 0x70 /* Read flag status register */
-#define SPINOR_OP_CLFSR 0x50 /* Clear flag status register */
-#define SPINOR_OP_RDEAR 0xc8 /* Read Extended Address Register */
-#define SPINOR_OP_WREAR 0xc5 /* Write Extended Address Register */
#define SPINOR_OP_SRSTEN 0x66 /* Software Reset Enable */
#define SPINOR_OP_SRST 0x99 /* Software Reset */
#define SPINOR_OP_GBULK 0x98 /* Global Block Unlock */
@@ -86,22 +81,12 @@
#define SPINOR_OP_BP 0x02 /* Byte program */
#define SPINOR_OP_AAI_WP 0xad /* Auto address increment word program */
-/* Used for S3AN flashes only */
-#define SPINOR_OP_XSE 0x50 /* Sector erase */
-#define SPINOR_OP_XPP 0x82 /* Page program */
-#define SPINOR_OP_XRDSR 0xd7 /* Read status register */
-
-#define XSR_PAGESIZE BIT(0) /* Page size in Po2 or Linear */
-#define XSR_RDY BIT(7) /* Ready */
-
-
/* Used for Macronix and Winbond flashes. */
#define SPINOR_OP_EN4B 0xb7 /* Enter 4-byte mode */
#define SPINOR_OP_EX4B 0xe9 /* Exit 4-byte mode */
/* Used for Spansion flashes only. */
#define SPINOR_OP_BRWR 0x17 /* Bank register write */
-#define SPINOR_OP_CLSR 0x30 /* Clear status register 1 */
/* Used for Micron flashes only. */
#define SPINOR_OP_RD_EVCR 0x65 /* Read EVCR register */
@@ -135,12 +120,6 @@
/* Enhanced Volatile Configuration Register bits */
#define EVCR_QUAD_EN_MICRON BIT(7) /* Micron Quad I/O */
-/* Flag Status Register bits */
-#define FSR_READY BIT(7) /* Device status, 0 = Busy, 1 = Ready */
-#define FSR_E_ERR BIT(5) /* Erase operation status */
-#define FSR_P_ERR BIT(4) /* Program operation status */
-#define FSR_PT_ERR BIT(1) /* Protection error bit */
-
/* Status Register 2 bits. */
#define SR2_QUAD_EN_BIT1 BIT(1)
#define SR2_LB1 BIT(3) /* Security Register Lock Bit 1 */
@@ -364,15 +343,22 @@ struct spi_nor_flash_parameter;
* struct spi_nor - Structure for defining the SPI NOR layer
* @mtd: an mtd_info structure
* @lock: the lock for the read/write/erase/lock/unlock operations
+ * @rww: Read-While-Write (RWW) sync lock
+ * @rww.wait: wait queue for the RWW sync
+ * @rww.ongoing_io: the bus is busy
+ * @rww.ongoing_rd: a read is ongoing on the chip
+ * @rww.ongoing_pe: a program/erase is ongoing on the chip
+ * @rww.used_banks: bitmap of the banks in use
* @dev: pointer to an SPI device or an SPI NOR controller device
* @spimem: pointer to the SPI memory device
* @bouncebuf: bounce buffer used when the buffer passed by the MTD
* layer is not DMA-able
* @bouncebuf_size: size of the bounce buffer
+ * @id: The flash's ID bytes. Always contains
+ * SPI_NOR_MAX_ID_LEN bytes.
* @info: SPI NOR part JEDEC MFR ID and other info
* @manufacturer: SPI NOR manufacturer
- * @page_size: the page size of the SPI NOR
- * @addr_width: number of address bytes
+ * @addr_nbytes: number of address bytes
* @erase_opcode: the opcode for erasing a sector
* @read_opcode: the read opcode
* @read_dummy: the dummy needed by the read operation
@@ -383,6 +369,8 @@ struct spi_nor_flash_parameter;
* @read_proto: the SPI protocol for read operations
* @write_proto: the SPI protocol for write operations
* @reg_proto: the SPI protocol for read_reg/write_reg/erase operations
+ * @sfdp: the SFDP data of the flash
+ * @debugfs_root: pointer to the debugfs directory
* @controller_ops: SPI NOR controller driver specific operations.
* @params: [FLASH-SPECIFIC] SPI NOR flash parameters and settings.
* The structure includes legacy flash parameters and
@@ -394,14 +382,21 @@ struct spi_nor_flash_parameter;
struct spi_nor {
struct mtd_info mtd;
struct mutex lock;
+ struct spi_nor_rww {
+ wait_queue_head_t wait;
+ bool ongoing_io;
+ bool ongoing_rd;
+ bool ongoing_pe;
+ unsigned int used_banks;
+ } rww;
struct device *dev;
struct spi_mem *spimem;
u8 *bouncebuf;
size_t bouncebuf_size;
+ u8 *id;
const struct flash_info *info;
const struct spi_nor_manufacturer *manufacturer;
- u32 page_size;
- u8 addr_width;
+ u8 addr_nbytes;
u8 erase_opcode;
u8 read_opcode;
u8 read_dummy;
@@ -412,6 +407,8 @@ struct spi_nor {
bool sst_write_second;
u32 flags;
enum spi_nor_cmd_ext cmd_ext_type;
+ struct sfdp *sfdp;
+ struct dentry *debugfs_root;
const struct spi_nor_controller_ops *controller_ops;
@@ -453,10 +450,4 @@ static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor)
int spi_nor_scan(struct spi_nor *nor, const char *name,
const struct spi_nor_hwcaps *hwcaps);
-/**
- * spi_nor_restore_addr_mode() - restore the status of SPI NOR
- * @nor: the spi_nor structure
- */
-void spi_nor_restore(struct spi_nor *nor);
-
#endif
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index 6bb92f26833e..ce76f5c632e1 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -20,126 +20,218 @@
* Standard SPI NAND flash operations
*/
-#define SPINAND_RESET_OP \
+#define SPINAND_RESET_1S_0_0_OP \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xff, 1), \
SPI_MEM_OP_NO_ADDR, \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
-#define SPINAND_WR_EN_DIS_OP(enable) \
+#define SPINAND_WR_EN_DIS_1S_0_0_OP(enable) \
SPI_MEM_OP(SPI_MEM_OP_CMD((enable) ? 0x06 : 0x04, 1), \
SPI_MEM_OP_NO_ADDR, \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
-#define SPINAND_READID_OP(naddr, ndummy, buf, len) \
+#define SPINAND_READID_1S_1S_1S_OP(naddr, ndummy, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x9f, 1), \
SPI_MEM_OP_ADDR(naddr, 0, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 1))
-#define SPINAND_SET_FEATURE_OP(reg, valptr) \
+#define SPINAND_SET_FEATURE_1S_1S_1S_OP(reg, valptr) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x1f, 1), \
SPI_MEM_OP_ADDR(1, reg, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_DATA_OUT(1, valptr, 1))
-#define SPINAND_GET_FEATURE_OP(reg, valptr) \
+#define SPINAND_GET_FEATURE_1S_1S_1S_OP(reg, valptr) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x0f, 1), \
SPI_MEM_OP_ADDR(1, reg, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_DATA_IN(1, valptr, 1))
-#define SPINAND_BLK_ERASE_OP(addr) \
+#define SPINAND_BLK_ERASE_1S_1S_0_OP(addr) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xd8, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
-#define SPINAND_PAGE_READ_OP(addr) \
+#define SPINAND_PAGE_READ_1S_1S_0_OP(addr) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x13, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
-#define SPINAND_PAGE_READ_FROM_CACHE_OP(fast, addr, ndummy, buf, len) \
- SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x03, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
- SPI_MEM_OP_DATA_IN(len, buf, 1))
+ SPI_MEM_OP_DATA_IN(len, buf, 1), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_OP_3A(fast, addr, ndummy, buf, len) \
- SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \
- SPI_MEM_OP_ADDR(3, addr, 1), \
+#define SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x0b, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
- SPI_MEM_OP_DATA_IN(len, buf, 1))
+ SPI_MEM_OP_DATA_IN(len, buf, 1), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_X2_OP(addr, ndummy, buf, len) \
- SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
- SPI_MEM_OP_ADDR(2, addr, 1), \
+#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_1S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x03, 1), \
+ SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
- SPI_MEM_OP_DATA_IN(len, buf, 2))
+ SPI_MEM_OP_DATA_IN(len, buf, 1), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(addr, ndummy, buf, len) \
- SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
+#define SPINAND_PAGE_READ_FROM_CACHE_FAST_3A_1S_1S_1S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x0b, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
- SPI_MEM_OP_DATA_IN(len, buf, 2))
+ SPI_MEM_OP_DATA_IN(len, buf, 1), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_X4_OP(addr, ndummy, buf, len) \
- SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1D_1D_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x0d, 1), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 1), \
+ SPI_MEM_DTR_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_DTR_OP_DATA_IN(len, buf, 1), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
- SPI_MEM_OP_DATA_IN(len, buf, 4))
+ SPI_MEM_OP_DATA_IN(len, buf, 2), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(addr, ndummy, buf, len) \
- SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
+#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_2S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
- SPI_MEM_OP_DATA_IN(len, buf, 4))
+ SPI_MEM_OP_DATA_IN(len, buf, 2), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1D_2D_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x3d, 1), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 1), \
+ SPI_MEM_DTR_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_DTR_OP_DATA_IN(len, buf, 2), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(addr, ndummy, buf, len) \
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \
SPI_MEM_OP_ADDR(2, addr, 2), \
SPI_MEM_OP_DUMMY(ndummy, 2), \
- SPI_MEM_OP_DATA_IN(len, buf, 2))
+ SPI_MEM_OP_DATA_IN(len, buf, 2), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP_3A(addr, ndummy, buf, len) \
+#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_2S_2S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \
SPI_MEM_OP_ADDR(3, addr, 2), \
SPI_MEM_OP_DUMMY(ndummy, 2), \
- SPI_MEM_OP_DATA_IN(len, buf, 2))
+ SPI_MEM_OP_DATA_IN(len, buf, 2), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_2D_2D_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xbd, 1), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 2), \
+ SPI_MEM_DTR_OP_DUMMY(ndummy, 2), \
+ SPI_MEM_DTR_OP_DATA_IN(len, buf, 2), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 4), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_4S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
+ SPI_MEM_OP_ADDR(3, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 4), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(addr, ndummy, buf, len) \
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1D_4D_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x6d, 1), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 1), \
+ SPI_MEM_DTR_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_DTR_OP_DATA_IN(len, buf, 4), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \
SPI_MEM_OP_ADDR(2, addr, 4), \
SPI_MEM_OP_DUMMY(ndummy, 4), \
- SPI_MEM_OP_DATA_IN(len, buf, 4))
+ SPI_MEM_OP_DATA_IN(len, buf, 4), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP_3A(addr, ndummy, buf, len) \
+#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_4S_4S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \
SPI_MEM_OP_ADDR(3, addr, 4), \
SPI_MEM_OP_DUMMY(ndummy, 4), \
- SPI_MEM_OP_DATA_IN(len, buf, 4))
-
-#define SPINAND_PROG_EXEC_OP(addr) \
+ SPI_MEM_OP_DATA_IN(len, buf, 4), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_4D_4D_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xed, 1), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 4), \
+ SPI_MEM_DTR_OP_DUMMY(ndummy, 4), \
+ SPI_MEM_DTR_OP_DATA_IN(len, buf, 4), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_8S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x8b, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 8), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_8S_8S_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xcb, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 8), \
+ SPI_MEM_OP_DUMMY(ndummy, 8), \
+ SPI_MEM_OP_DATA_IN(len, buf, 8), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1D_8D_OP(addr, ndummy, buf, len, freq) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x9d, 1), \
+ SPI_MEM_DTR_OP_ADDR(2, addr, 1), \
+ SPI_MEM_DTR_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_DTR_OP_DATA_IN(len, buf, 8), \
+ SPI_MEM_OP_MAX_FREQ(freq))
+
+#define SPINAND_PROG_EXEC_1S_1S_0_OP(addr) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x10, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
-#define SPINAND_PROG_LOAD(reset, addr, buf, len) \
+#define SPINAND_PROG_LOAD_1S_1S_1S_OP(reset, addr, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x02 : 0x84, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_DATA_OUT(len, buf, 1))
-#define SPINAND_PROG_LOAD_X4(reset, addr, buf, len) \
+#define SPINAND_PROG_LOAD_1S_1S_4S_OP(reset, addr, buf, len) \
SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x32 : 0x34, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_DATA_OUT(len, buf, 4))
+#define SPINAND_PROG_LOAD_1S_1S_8S_OP(addr, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x82, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(len, buf, 8))
+
+#define SPINAND_PROG_LOAD_1S_8S_8S_OP(reset, addr, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0xc2 : 0xc4, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 8), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(len, buf, 8))
+
/**
* Standard SPI NAND flash commands
*/
@@ -169,7 +261,29 @@
struct spinand_op;
struct spinand_device;
-#define SPINAND_MAX_ID_LEN 4
+#define SPINAND_MAX_ID_LEN 5
+/*
+ * For erase, write and read operation, we got the following timings :
+ * tBERS (erase) 1ms to 4ms
+ * tPROG 300us to 400us
+ * tREAD 25us to 100us
+ * In order to minimize latency, the min value is divided by 4 for the
+ * initial delay, and dividing by 20 for the poll delay.
+ * For reset, 5us/10us/500us if the device is respectively
+ * reading/programming/erasing when the RESET occurs. Since we always
+ * issue a RESET when the device is IDLE, 5us is selected for both initial
+ * and poll delay.
+ */
+#define SPINAND_READ_INITIAL_DELAY_US 6
+#define SPINAND_READ_POLL_DELAY_US 5
+#define SPINAND_RESET_INITIAL_DELAY_US 5
+#define SPINAND_RESET_POLL_DELAY_US 5
+#define SPINAND_WRITE_INITIAL_DELAY_US 75
+#define SPINAND_WRITE_POLL_DELAY_US 15
+#define SPINAND_ERASE_INITIAL_DELAY_US 250
+#define SPINAND_ERASE_POLL_DELAY_US 50
+
+#define SPINAND_WAITRDY_TIMEOUT_MS 400
/**
* struct spinand_id - SPI NAND id structure
@@ -238,12 +352,20 @@ struct spinand_manufacturer {
};
/* SPI NAND manufacturers */
+extern const struct spinand_manufacturer alliancememory_spinand_manufacturer;
+extern const struct spinand_manufacturer ato_spinand_manufacturer;
+extern const struct spinand_manufacturer esmt_8c_spinand_manufacturer;
+extern const struct spinand_manufacturer esmt_c8_spinand_manufacturer;
+extern const struct spinand_manufacturer fmsh_spinand_manufacturer;
+extern const struct spinand_manufacturer foresee_spinand_manufacturer;
extern const struct spinand_manufacturer gigadevice_spinand_manufacturer;
extern const struct spinand_manufacturer macronix_spinand_manufacturer;
extern const struct spinand_manufacturer micron_spinand_manufacturer;
extern const struct spinand_manufacturer paragon_spinand_manufacturer;
+extern const struct spinand_manufacturer skyhigh_spinand_manufacturer;
extern const struct spinand_manufacturer toshiba_spinand_manufacturer;
extern const struct spinand_manufacturer winbond_spinand_manufacturer;
+extern const struct spinand_manufacturer xtx_spinand_manufacturer;
/**
* struct spinand_op_variants - SPI NAND operation variants
@@ -285,6 +407,9 @@ struct spinand_ecc_info {
#define SPINAND_HAS_QE_BIT BIT(0)
#define SPINAND_HAS_CR_FEAT_BIT BIT(1)
+#define SPINAND_HAS_PROG_PLANE_SELECT_BIT BIT(2)
+#define SPINAND_HAS_READ_PLANE_SELECT_BIT BIT(3)
+#define SPINAND_NO_RAW_ACCESS BIT(4)
/**
* struct spinand_ondie_ecc_conf - private SPI-NAND on-die ECC engine structure
@@ -296,6 +421,67 @@ struct spinand_ondie_ecc_conf {
};
/**
+ * struct spinand_otp_layout - structure to describe the SPI NAND OTP area
+ * @npages: number of pages in the OTP
+ * @start_page: start page of the user/factory OTP area.
+ */
+struct spinand_otp_layout {
+ unsigned int npages;
+ unsigned int start_page;
+};
+
+/**
+ * struct spinand_fact_otp_ops - SPI NAND OTP methods for factory area
+ * @info: get the OTP area information
+ * @read: read from the SPI NAND OTP area
+ */
+struct spinand_fact_otp_ops {
+ int (*info)(struct spinand_device *spinand, size_t len,
+ struct otp_info *buf, size_t *retlen);
+ int (*read)(struct spinand_device *spinand, loff_t from, size_t len,
+ size_t *retlen, u8 *buf);
+};
+
+/**
+ * struct spinand_user_otp_ops - SPI NAND OTP methods for user area
+ * @info: get the OTP area information
+ * @lock: lock an OTP region
+ * @erase: erase an OTP region
+ * @read: read from the SPI NAND OTP area
+ * @write: write to the SPI NAND OTP area
+ */
+struct spinand_user_otp_ops {
+ int (*info)(struct spinand_device *spinand, size_t len,
+ struct otp_info *buf, size_t *retlen);
+ int (*lock)(struct spinand_device *spinand, loff_t from, size_t len);
+ int (*erase)(struct spinand_device *spinand, loff_t from, size_t len);
+ int (*read)(struct spinand_device *spinand, loff_t from, size_t len,
+ size_t *retlen, u8 *buf);
+ int (*write)(struct spinand_device *spinand, loff_t from, size_t len,
+ size_t *retlen, const u8 *buf);
+};
+
+/**
+ * struct spinand_fact_otp - SPI NAND OTP grouping structure for factory area
+ * @layout: OTP region layout
+ * @ops: OTP access ops
+ */
+struct spinand_fact_otp {
+ const struct spinand_otp_layout layout;
+ const struct spinand_fact_otp_ops *ops;
+};
+
+/**
+ * struct spinand_user_otp - SPI NAND OTP grouping structure for user area
+ * @layout: OTP region layout
+ * @ops: OTP access ops
+ */
+struct spinand_user_otp {
+ const struct spinand_otp_layout layout;
+ const struct spinand_user_otp_ops *ops;
+};
+
+/**
* struct spinand_info - Structure used to describe SPI NAND chips
* @model: model name
* @devid: device ID
@@ -309,6 +495,12 @@ struct spinand_ondie_ecc_conf {
* @op_variants.update_cache: variants of the update-cache operation
* @select_target: function used to select a target/die. Required only for
* multi-die chips
+ * @configure_chip: Align the chip configuration with the core settings
+ * @set_cont_read: enable/disable continuous cached reads
+ * @fact_otp: SPI NAND factory OTP info.
+ * @user_otp: SPI NAND user OTP info.
+ * @read_retries: the number of read retry modes supported
+ * @set_read_retry: enable/disable read retry for data recovery
*
* Each SPI NAND manufacturer driver should have a spinand_info table
* describing all the chips supported by the driver.
@@ -327,6 +519,14 @@ struct spinand_info {
} op_variants;
int (*select_target)(struct spinand_device *spinand,
unsigned int target);
+ int (*configure_chip)(struct spinand_device *spinand);
+ int (*set_cont_read)(struct spinand_device *spinand,
+ bool enable);
+ struct spinand_fact_otp fact_otp;
+ struct spinand_user_otp user_otp;
+ unsigned int read_retries;
+ int (*set_read_retry)(struct spinand_device *spinand,
+ unsigned int read_retry);
};
#define SPINAND_ID(__method, ...) \
@@ -350,7 +550,35 @@ struct spinand_info {
}
#define SPINAND_SELECT_TARGET(__func) \
- .select_target = __func,
+ .select_target = __func
+
+#define SPINAND_CONFIGURE_CHIP(__configure_chip) \
+ .configure_chip = __configure_chip
+
+#define SPINAND_CONT_READ(__set_cont_read) \
+ .set_cont_read = __set_cont_read
+
+#define SPINAND_FACT_OTP_INFO(__npages, __start_page, __ops) \
+ .fact_otp = { \
+ .layout = { \
+ .npages = __npages, \
+ .start_page = __start_page, \
+ }, \
+ .ops = __ops, \
+ }
+
+#define SPINAND_USER_OTP_INFO(__npages, __start_page, __ops) \
+ .user_otp = { \
+ .layout = { \
+ .npages = __npages, \
+ .start_page = __start_page, \
+ }, \
+ .ops = __ops, \
+ }
+
+#define SPINAND_READ_RETRY(__read_retries, __set_read_retry) \
+ .read_retries = __read_retries, \
+ .set_read_retry = __set_read_retry
#define SPINAND_INFO(__model, __id, __memorg, __eccreq, __op_variants, \
__flags, ...) \
@@ -367,6 +595,8 @@ struct spinand_info {
struct spinand_dirmap {
struct spi_mem_dirmap_desc *wdesc;
struct spi_mem_dirmap_desc *rdesc;
+ struct spi_mem_dirmap_desc *wdesc_ecc;
+ struct spi_mem_dirmap_desc *rdesc_ecc;
};
/**
@@ -393,7 +623,18 @@ struct spinand_dirmap {
* passed in spi_mem_op be DMA-able, so we can't based the bufs on
* the stack
* @manufacturer: SPI NAND manufacturer information
+ * @configure_chip: Align the chip configuration with the core settings
+ * @cont_read_possible: Field filled by the core once the whole system
+ * configuration is known to tell whether continuous reads are
+ * suitable to use or not in general with this chip/configuration.
+ * A per-transfer check must of course be done to ensure it is
+ * actually relevant to enable this feature.
+ * @set_cont_read: Enable/disable the continuous read feature
* @priv: manufacturer private data
+ * @fact_otp: SPI NAND factory OTP info.
+ * @user_otp: SPI NAND user OTP info.
+ * @read_retries: the number of read retry modes supported
+ * @set_read_retry: Enable/disable the read retry feature
*/
struct spinand_device {
struct nand_device base;
@@ -422,6 +663,18 @@ struct spinand_device {
u8 *scratchbuf;
const struct spinand_manufacturer *manufacturer;
void *priv;
+
+ int (*configure_chip)(struct spinand_device *spinand);
+ bool cont_read_possible;
+ int (*set_cont_read)(struct spinand_device *spinand,
+ bool enable);
+
+ const struct spinand_fact_otp *fact_otp;
+ const struct spinand_user_otp *user_otp;
+
+ unsigned int read_retries;
+ int (*set_read_retry)(struct spinand_device *spinand,
+ unsigned int retry_mode);
};
/**
@@ -488,6 +741,31 @@ int spinand_match_and_init(struct spinand_device *spinand,
enum spinand_readid_method rdid_method);
int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val);
+int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val);
+int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val);
+int spinand_write_enable_op(struct spinand_device *spinand);
int spinand_select_target(struct spinand_device *spinand, unsigned int target);
+int spinand_wait(struct spinand_device *spinand, unsigned long initial_delay_us,
+ unsigned long poll_delay_us, u8 *s);
+
+int spinand_read_page(struct spinand_device *spinand,
+ const struct nand_page_io_req *req);
+
+int spinand_write_page(struct spinand_device *spinand,
+ const struct nand_page_io_req *req);
+
+size_t spinand_otp_page_size(struct spinand_device *spinand);
+size_t spinand_fact_otp_size(struct spinand_device *spinand);
+size_t spinand_user_otp_size(struct spinand_device *spinand);
+
+int spinand_fact_otp_read(struct spinand_device *spinand, loff_t ofs,
+ size_t len, size_t *retlen, u8 *buf);
+int spinand_user_otp_read(struct spinand_device *spinand, loff_t ofs,
+ size_t len, size_t *retlen, u8 *buf);
+int spinand_user_otp_write(struct spinand_device *spinand, loff_t ofs,
+ size_t len, size_t *retlen, const u8 *buf);
+
+int spinand_set_mtd_otp_ops(struct spinand_device *spinand);
+
#endif /* __LINUX_MTD_SPINAND_H */
diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h
index 7d48ea368c5e..c3f79c4be1cc 100644
--- a/include/linux/mtd/ubi.h
+++ b/include/linux/mtd/ubi.h
@@ -110,6 +110,7 @@ struct ubi_volume_info {
int name_len;
const char *name;
dev_t cdev;
+ struct device *dev;
};
/**
@@ -191,6 +192,7 @@ struct ubi_device_info {
* or a volume was removed)
* @UBI_VOLUME_RESIZED: a volume has been re-sized
* @UBI_VOLUME_RENAMED: a volume has been re-named
+ * @UBI_VOLUME_SHUTDOWN: a volume is going to removed, shutdown users
* @UBI_VOLUME_UPDATED: data has been written to a volume
*
* These constants define which type of event has happened when a volume
@@ -201,6 +203,7 @@ enum {
UBI_VOLUME_REMOVED,
UBI_VOLUME_RESIZED,
UBI_VOLUME_RENAMED,
+ UBI_VOLUME_SHUTDOWN,
UBI_VOLUME_UPDATED,
};
@@ -247,7 +250,6 @@ int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum);
int ubi_leb_map(struct ubi_volume_desc *desc, int lnum);
int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum);
int ubi_sync(int ubi_num);
-int ubi_flush(int ubi_num, int vol_id, int lnum);
/*
* This function is the same as the 'ubi_leb_read()' function, but it does not
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index e19323521f9c..bf535f0118bb 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -19,77 +19,24 @@
#include <asm/processor.h>
#include <linux/osq_lock.h>
#include <linux/debug_locks.h>
+#include <linux/cleanup.h>
+#include <linux/mutex_types.h>
-struct ww_class;
-struct ww_acquire_ctx;
+struct device;
-/*
- * Simple, straightforward mutexes with strict semantics:
- *
- * - only one task can hold the mutex at a time
- * - only the owner can unlock the mutex
- * - multiple unlocks are not permitted
- * - recursive locking is not permitted
- * - a mutex object must be initialized via the API
- * - a mutex object must not be initialized via memset or copying
- * - task may not exit with mutex held
- * - memory areas where held locks reside must not be freed
- * - held mutexes must not be reinitialized
- * - mutexes may not be used in hardware or software interrupt
- * contexts such as tasklets and timers
- *
- * These semantics are fully enforced when DEBUG_MUTEXES is
- * enabled. Furthermore, besides enforcing the above rules, the mutex
- * debugging code also implements a number of additional features
- * that make lock debugging easier and faster:
- *
- * - uses symbolic names of mutexes, whenever they are printed in debug output
- * - point-of-acquire tracking, symbolic lookup of function names
- * - list of all locks held in the system, printout of them
- * - owner tracking
- * - detects self-recursing locks and prints out all relevant info
- * - detects multi-task circular deadlocks and prints out all affected
- * locks and tasks (and only those tasks)
- */
-struct mutex {
- atomic_long_t owner;
- spinlock_t wait_lock;
-#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
- struct optimistic_spin_queue osq; /* Spinner MCS lock */
-#endif
- struct list_head wait_list;
-#ifdef CONFIG_DEBUG_MUTEXES
- void *magic;
-#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-};
-
-struct ww_mutex {
- struct mutex base;
- struct ww_acquire_ctx *ctx;
-#ifdef CONFIG_DEBUG_MUTEXES
- struct ww_class *ww_class;
-#endif
-};
-
-/*
- * This is the control structure for tasks blocked on mutex,
- * which resides on the blocked task's kernel stack:
- */
-struct mutex_waiter {
- struct list_head list;
- struct task_struct *task;
- struct ww_acquire_ctx *ww_ctx;
-#ifdef CONFIG_DEBUG_MUTEXES
- void *magic;
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
+ , .dep_map = { \
+ .name = #lockname, \
+ .wait_type_inner = LD_WAIT_SLEEP, \
+ }
+#else
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
#endif
-};
#ifdef CONFIG_DEBUG_MUTEXES
-#define __DEBUG_MUTEX_INITIALIZER(lockname) \
+# define __DEBUG_MUTEX_INITIALIZER(lockname) \
, .magic = &lockname
extern void mutex_destroy(struct mutex *lock);
@@ -117,19 +64,21 @@ do { \
__mutex_init((mutex), #mutex, &__key); \
} while (0)
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
- , .dep_map = { \
- .name = #lockname, \
- .wait_type_inner = LD_WAIT_SLEEP, \
- }
-#else
-# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
-#endif
+/**
+ * mutex_init_with_key - initialize a mutex with a given lockdep key
+ * @mutex: the mutex to be initialized
+ * @key: the lockdep key to be associated with the mutex
+ *
+ * Initialize the mutex to the unlocked state.
+ *
+ * It is not allowed to initialize an already locked mutex.
+ */
+#define mutex_init_with_key(mutex, key) __mutex_init((mutex), #mutex, (key))
+#ifndef CONFIG_PREEMPT_RT
#define __MUTEX_INITIALIZER(lockname) \
{ .owner = ATOMIC_LONG_INIT(0) \
- , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
+ , .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
, .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
__DEBUG_MUTEX_INITIALIZER(lockname) \
__DEP_MAP_MUTEX_INITIALIZER(lockname) }
@@ -137,8 +86,23 @@ do { \
#define DEFINE_MUTEX(mutexname) \
struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
-extern void __mutex_init(struct mutex *lock, const char *name,
- struct lock_class_key *key);
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+void mutex_init_lockep(struct mutex *lock, const char *name, struct lock_class_key *key);
+
+static inline void __mutex_init(struct mutex *lock, const char *name,
+ struct lock_class_key *key)
+{
+ mutex_init_lockep(lock, name, key);
+}
+#else
+extern void mutex_init_generic(struct mutex *lock);
+
+static inline void __mutex_init(struct mutex *lock, const char *name,
+ struct lock_class_key *key)
+{
+ mutex_init_generic(lock);
+}
+#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
/**
* mutex_is_locked - is the mutex locked
@@ -148,6 +112,71 @@ extern void __mutex_init(struct mutex *lock, const char *name,
*/
extern bool mutex_is_locked(struct mutex *lock);
+#else /* !CONFIG_PREEMPT_RT */
+/*
+ * Preempt-RT variant based on rtmutexes.
+ */
+
+#define __MUTEX_INITIALIZER(mutexname) \
+{ \
+ .rtmutex = __RT_MUTEX_BASE_INITIALIZER(mutexname.rtmutex) \
+ __DEP_MAP_MUTEX_INITIALIZER(mutexname) \
+}
+
+#define DEFINE_MUTEX(mutexname) \
+ struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
+
+#define mutex_is_locked(l) rt_mutex_base_is_locked(&(l)->rtmutex)
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern void mutex_rt_init_lockdep(struct mutex *mutex, const char *name,
+ struct lock_class_key *key);
+
+static inline void __mutex_init(struct mutex *lock, const char *name,
+ struct lock_class_key *key)
+{
+ mutex_rt_init_lockdep(lock, name, key);
+}
+
+#else
+extern void mutex_rt_init_generic(struct mutex *mutex);
+
+static inline void __mutex_init(struct mutex *lock, const char *name,
+ struct lock_class_key *key)
+{
+ mutex_rt_init_generic(lock);
+}
+#endif /* !CONFIG_LOCKDEP */
+#endif /* CONFIG_PREEMPT_RT */
+
+#ifdef CONFIG_DEBUG_MUTEXES
+
+int __must_check __devm_mutex_init(struct device *dev, struct mutex *lock);
+
+#else
+
+static inline int __must_check __devm_mutex_init(struct device *dev, struct mutex *lock)
+{
+ /*
+ * When CONFIG_DEBUG_MUTEXES is off mutex_destroy() is just a nop so
+ * no really need to register it in the devm subsystem.
+ */
+ return 0;
+}
+
+#endif
+
+#define __mutex_init_ret(mutex) \
+({ \
+ typeof(mutex) mutex_ = (mutex); \
+ \
+ mutex_init(mutex_); \
+ mutex_; \
+})
+
+#define devm_mutex_init(dev, mutex) \
+ __devm_mutex_init(dev, __mutex_init_ret(mutex))
+
/*
* See kernel/locking/mutex.c for detailed documentation of these APIs.
* Also see Documentation/locking/mutex-design.rst.
@@ -155,16 +184,15 @@ extern bool mutex_is_locked(struct mutex *lock);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
-
extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
unsigned int subclass);
-extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
- unsigned int subclass);
+extern int __must_check _mutex_lock_killable(struct mutex *lock,
+ unsigned int subclass, struct lockdep_map *nest_lock);
extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass);
#define mutex_lock(lock) mutex_lock_nested(lock, 0)
#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
-#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
+#define mutex_lock_killable(lock) _mutex_lock_killable(lock, 0, NULL)
#define mutex_lock_io(lock) mutex_lock_io_nested(lock, 0)
#define mutex_lock_nest_lock(lock, nest_lock) \
@@ -173,6 +201,15 @@ do { \
_mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
} while (0)
+#define mutex_lock_killable_nest_lock(lock, nest_lock) \
+( \
+ typecheck(struct lockdep_map *, &(nest_lock)->dep_map), \
+ _mutex_lock_killable(lock, 0, &(nest_lock)->dep_map) \
+)
+
+#define mutex_lock_killable_nested(lock, subclass) \
+ _mutex_lock_killable(lock, subclass, NULL)
+
#else
extern void mutex_lock(struct mutex *lock);
extern int __must_check mutex_lock_interruptible(struct mutex *lock);
@@ -182,6 +219,7 @@ extern void mutex_lock_io(struct mutex *lock);
# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
+# define mutex_lock_killable_nest_lock(lock, nest_lock) mutex_lock_killable(lock)
# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
# define mutex_lock_io_nested(lock, subclass) mutex_lock_io(lock)
#endif
@@ -192,9 +230,30 @@ extern void mutex_lock_io(struct mutex *lock);
*
* Returns 1 if the mutex has been acquired successfully, and 0 on contention.
*/
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern int _mutex_trylock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
+
+#define mutex_trylock_nest_lock(lock, nest_lock) \
+( \
+ typecheck(struct lockdep_map *, &(nest_lock)->dep_map), \
+ _mutex_trylock_nest_lock(lock, &(nest_lock)->dep_map) \
+)
+
+#define mutex_trylock(lock) _mutex_trylock_nest_lock(lock, NULL)
+#else
extern int mutex_trylock(struct mutex *lock);
+#define mutex_trylock_nest_lock(lock, nest_lock) mutex_trylock(lock)
+#endif
+
extern void mutex_unlock(struct mutex *lock);
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
+DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
+DEFINE_GUARD_COND(mutex, _try, mutex_trylock(_T))
+DEFINE_GUARD_COND(mutex, _intr, mutex_lock_interruptible(_T), _RET == 0)
+
+extern unsigned long mutex_get_owner(struct mutex *lock);
+
#endif /* __LINUX_MUTEX_H */
diff --git a/include/linux/mutex_api.h b/include/linux/mutex_api.h
new file mode 100644
index 000000000000..85ab9491e13e
--- /dev/null
+++ b/include/linux/mutex_api.h
@@ -0,0 +1 @@
+#include <linux/mutex.h>
diff --git a/include/linux/mutex_types.h b/include/linux/mutex_types.h
new file mode 100644
index 000000000000..fdf7f515fde8
--- /dev/null
+++ b/include/linux/mutex_types.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_MUTEX_TYPES_H
+#define __LINUX_MUTEX_TYPES_H
+
+#include <linux/atomic.h>
+#include <linux/lockdep_types.h>
+#include <linux/osq_lock.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+
+#ifndef CONFIG_PREEMPT_RT
+
+/*
+ * Simple, straightforward mutexes with strict semantics:
+ *
+ * - only one task can hold the mutex at a time
+ * - only the owner can unlock the mutex
+ * - multiple unlocks are not permitted
+ * - recursive locking is not permitted
+ * - a mutex object must be initialized via the API
+ * - a mutex object must not be initialized via memset or copying
+ * - task may not exit with mutex held
+ * - memory areas where held locks reside must not be freed
+ * - held mutexes must not be reinitialized
+ * - mutexes may not be used in hardware or software interrupt
+ * contexts such as tasklets and timers
+ *
+ * These semantics are fully enforced when DEBUG_MUTEXES is
+ * enabled. Furthermore, besides enforcing the above rules, the mutex
+ * debugging code also implements a number of additional features
+ * that make lock debugging easier and faster:
+ *
+ * - uses symbolic names of mutexes, whenever they are printed in debug output
+ * - point-of-acquire tracking, symbolic lookup of function names
+ * - list of all locks held in the system, printout of them
+ * - owner tracking
+ * - detects self-recursing locks and prints out all relevant info
+ * - detects multi-task circular deadlocks and prints out all affected
+ * locks and tasks (and only those tasks)
+ */
+struct mutex {
+ atomic_long_t owner;
+ raw_spinlock_t wait_lock;
+#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
+ struct optimistic_spin_queue osq; /* Spinner MCS lock */
+#endif
+ struct list_head wait_list;
+#ifdef CONFIG_DEBUG_MUTEXES
+ void *magic;
+#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
+#else /* !CONFIG_PREEMPT_RT */
+/*
+ * Preempt-RT variant based on rtmutexes.
+ */
+#include <linux/rtmutex.h>
+
+struct mutex {
+ struct rt_mutex_base rtmutex;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
+#endif /* CONFIG_PREEMPT_RT */
+
+#endif /* __LINUX_MUTEX_TYPES_H */
diff --git a/include/linux/mux/consumer.h b/include/linux/mux/consumer.h
index 5fc6bb2fefad..2e25c838f831 100644
--- a/include/linux/mux/consumer.h
+++ b/include/linux/mux/consumer.h
@@ -14,18 +14,51 @@
struct device;
struct mux_control;
+struct mux_state;
unsigned int mux_control_states(struct mux_control *mux);
-int __must_check mux_control_select(struct mux_control *mux,
- unsigned int state);
-int __must_check mux_control_try_select(struct mux_control *mux,
- unsigned int state);
+int __must_check mux_control_select_delay(struct mux_control *mux,
+ unsigned int state,
+ unsigned int delay_us);
+int __must_check mux_state_select_delay(struct mux_state *mstate,
+ unsigned int delay_us);
+int __must_check mux_control_try_select_delay(struct mux_control *mux,
+ unsigned int state,
+ unsigned int delay_us);
+int __must_check mux_state_try_select_delay(struct mux_state *mstate,
+ unsigned int delay_us);
+
+static inline int __must_check mux_control_select(struct mux_control *mux,
+ unsigned int state)
+{
+ return mux_control_select_delay(mux, state, 0);
+}
+
+static inline int __must_check mux_state_select(struct mux_state *mstate)
+{
+ return mux_state_select_delay(mstate, 0);
+}
+
+static inline int __must_check mux_control_try_select(struct mux_control *mux,
+ unsigned int state)
+{
+ return mux_control_try_select_delay(mux, state, 0);
+}
+
+static inline int __must_check mux_state_try_select(struct mux_state *mstate)
+{
+ return mux_state_try_select_delay(mstate, 0);
+}
+
int mux_control_deselect(struct mux_control *mux);
+int mux_state_deselect(struct mux_state *mstate);
struct mux_control *mux_control_get(struct device *dev, const char *mux_name);
void mux_control_put(struct mux_control *mux);
struct mux_control *devm_mux_control_get(struct device *dev,
const char *mux_name);
+struct mux_state *devm_mux_state_get(struct device *dev,
+ const char *mux_name);
#endif /* _LINUX_MUX_CONSUMER_H */
diff --git a/include/linux/mux/driver.h b/include/linux/mux/driver.h
index 627a2c6bc02d..e58e59354e23 100644
--- a/include/linux/mux/driver.h
+++ b/include/linux/mux/driver.h
@@ -12,6 +12,7 @@
#include <dt-bindings/mux/mux.h>
#include <linux/device.h>
+#include <linux/ktime.h>
#include <linux/semaphore.h>
struct mux_chip;
@@ -33,6 +34,7 @@ struct mux_control_ops {
* @states: The number of mux controller states.
* @idle_state: The mux controller state to use when inactive, or one
* of MUX_IDLE_AS_IS and MUX_IDLE_DISCONNECT.
+ * @last_change: Timestamp of last change
*
* Mux drivers may only change @states and @idle_state, and may only do so
* between allocation and registration of the mux controller. Specifically,
@@ -47,23 +49,25 @@ struct mux_control {
unsigned int states;
int idle_state;
+
+ ktime_t last_change;
};
/**
* struct mux_chip - Represents a chip holding mux controllers.
* @controllers: Number of mux controllers handled by the chip.
- * @mux: Array of mux controllers that are handled.
* @dev: Device structure.
* @id: Used to identify the device internally.
* @ops: Mux controller operations.
+ * @mux: Array of mux controllers that are handled.
*/
struct mux_chip {
unsigned int controllers;
- struct mux_control *mux;
struct device dev;
int id;
const struct mux_control_ops *ops;
+ struct mux_control mux[] __counted_by(controllers);
};
#define to_mux_chip(x) container_of((x), struct mux_chip, dev)
diff --git a/include/linux/mv643xx.h b/include/linux/mv643xx.h
deleted file mode 100644
index 47e5679b48e1..000000000000
--- a/include/linux/mv643xx.h
+++ /dev/null
@@ -1,929 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * mv643xx.h - MV-643XX Internal registers definition file.
- *
- * Copyright 2002 Momentum Computer, Inc.
- * Author: Matthew Dharm <mdharm@momenco.com>
- * Copyright 2002 GALILEO TECHNOLOGY, LTD.
- */
-#ifndef __ASM_MV643XX_H
-#define __ASM_MV643XX_H
-
-#include <asm/types.h>
-#include <linux/mv643xx_eth.h>
-#include <linux/mv643xx_i2c.h>
-
-/****************************************/
-/* Processor Address Space */
-/****************************************/
-
-/* DDR SDRAM BAR and size registers */
-
-#define MV64340_CS_0_BASE_ADDR 0x008
-#define MV64340_CS_0_SIZE 0x010
-#define MV64340_CS_1_BASE_ADDR 0x208
-#define MV64340_CS_1_SIZE 0x210
-#define MV64340_CS_2_BASE_ADDR 0x018
-#define MV64340_CS_2_SIZE 0x020
-#define MV64340_CS_3_BASE_ADDR 0x218
-#define MV64340_CS_3_SIZE 0x220
-
-/* Devices BAR and size registers */
-
-#define MV64340_DEV_CS0_BASE_ADDR 0x028
-#define MV64340_DEV_CS0_SIZE 0x030
-#define MV64340_DEV_CS1_BASE_ADDR 0x228
-#define MV64340_DEV_CS1_SIZE 0x230
-#define MV64340_DEV_CS2_BASE_ADDR 0x248
-#define MV64340_DEV_CS2_SIZE 0x250
-#define MV64340_DEV_CS3_BASE_ADDR 0x038
-#define MV64340_DEV_CS3_SIZE 0x040
-#define MV64340_BOOTCS_BASE_ADDR 0x238
-#define MV64340_BOOTCS_SIZE 0x240
-
-/* PCI 0 BAR and size registers */
-
-#define MV64340_PCI_0_IO_BASE_ADDR 0x048
-#define MV64340_PCI_0_IO_SIZE 0x050
-#define MV64340_PCI_0_MEMORY0_BASE_ADDR 0x058
-#define MV64340_PCI_0_MEMORY0_SIZE 0x060
-#define MV64340_PCI_0_MEMORY1_BASE_ADDR 0x080
-#define MV64340_PCI_0_MEMORY1_SIZE 0x088
-#define MV64340_PCI_0_MEMORY2_BASE_ADDR 0x258
-#define MV64340_PCI_0_MEMORY2_SIZE 0x260
-#define MV64340_PCI_0_MEMORY3_BASE_ADDR 0x280
-#define MV64340_PCI_0_MEMORY3_SIZE 0x288
-
-/* PCI 1 BAR and size registers */
-#define MV64340_PCI_1_IO_BASE_ADDR 0x090
-#define MV64340_PCI_1_IO_SIZE 0x098
-#define MV64340_PCI_1_MEMORY0_BASE_ADDR 0x0a0
-#define MV64340_PCI_1_MEMORY0_SIZE 0x0a8
-#define MV64340_PCI_1_MEMORY1_BASE_ADDR 0x0b0
-#define MV64340_PCI_1_MEMORY1_SIZE 0x0b8
-#define MV64340_PCI_1_MEMORY2_BASE_ADDR 0x2a0
-#define MV64340_PCI_1_MEMORY2_SIZE 0x2a8
-#define MV64340_PCI_1_MEMORY3_BASE_ADDR 0x2b0
-#define MV64340_PCI_1_MEMORY3_SIZE 0x2b8
-
-/* SRAM base address */
-#define MV64340_INTEGRATED_SRAM_BASE_ADDR 0x268
-
-/* internal registers space base address */
-#define MV64340_INTERNAL_SPACE_BASE_ADDR 0x068
-
-/* Enables the CS , DEV_CS , PCI 0 and PCI 1
- windows above */
-#define MV64340_BASE_ADDR_ENABLE 0x278
-
-/****************************************/
-/* PCI remap registers */
-/****************************************/
- /* PCI 0 */
-#define MV64340_PCI_0_IO_ADDR_REMAP 0x0f0
-#define MV64340_PCI_0_MEMORY0_LOW_ADDR_REMAP 0x0f8
-#define MV64340_PCI_0_MEMORY0_HIGH_ADDR_REMAP 0x320
-#define MV64340_PCI_0_MEMORY1_LOW_ADDR_REMAP 0x100
-#define MV64340_PCI_0_MEMORY1_HIGH_ADDR_REMAP 0x328
-#define MV64340_PCI_0_MEMORY2_LOW_ADDR_REMAP 0x2f8
-#define MV64340_PCI_0_MEMORY2_HIGH_ADDR_REMAP 0x330
-#define MV64340_PCI_0_MEMORY3_LOW_ADDR_REMAP 0x300
-#define MV64340_PCI_0_MEMORY3_HIGH_ADDR_REMAP 0x338
- /* PCI 1 */
-#define MV64340_PCI_1_IO_ADDR_REMAP 0x108
-#define MV64340_PCI_1_MEMORY0_LOW_ADDR_REMAP 0x110
-#define MV64340_PCI_1_MEMORY0_HIGH_ADDR_REMAP 0x340
-#define MV64340_PCI_1_MEMORY1_LOW_ADDR_REMAP 0x118
-#define MV64340_PCI_1_MEMORY1_HIGH_ADDR_REMAP 0x348
-#define MV64340_PCI_1_MEMORY2_LOW_ADDR_REMAP 0x310
-#define MV64340_PCI_1_MEMORY2_HIGH_ADDR_REMAP 0x350
-#define MV64340_PCI_1_MEMORY3_LOW_ADDR_REMAP 0x318
-#define MV64340_PCI_1_MEMORY3_HIGH_ADDR_REMAP 0x358
-
-#define MV64340_CPU_PCI_0_HEADERS_RETARGET_CONTROL 0x3b0
-#define MV64340_CPU_PCI_0_HEADERS_RETARGET_BASE 0x3b8
-#define MV64340_CPU_PCI_1_HEADERS_RETARGET_CONTROL 0x3c0
-#define MV64340_CPU_PCI_1_HEADERS_RETARGET_BASE 0x3c8
-#define MV64340_CPU_GE_HEADERS_RETARGET_CONTROL 0x3d0
-#define MV64340_CPU_GE_HEADERS_RETARGET_BASE 0x3d8
-#define MV64340_CPU_IDMA_HEADERS_RETARGET_CONTROL 0x3e0
-#define MV64340_CPU_IDMA_HEADERS_RETARGET_BASE 0x3e8
-
-/****************************************/
-/* CPU Control Registers */
-/****************************************/
-
-#define MV64340_CPU_CONFIG 0x000
-#define MV64340_CPU_MODE 0x120
-#define MV64340_CPU_MASTER_CONTROL 0x160
-#define MV64340_CPU_CROSS_BAR_CONTROL_LOW 0x150
-#define MV64340_CPU_CROSS_BAR_CONTROL_HIGH 0x158
-#define MV64340_CPU_CROSS_BAR_TIMEOUT 0x168
-
-/****************************************/
-/* SMP RegisterS */
-/****************************************/
-
-#define MV64340_SMP_WHO_AM_I 0x200
-#define MV64340_SMP_CPU0_DOORBELL 0x214
-#define MV64340_SMP_CPU0_DOORBELL_CLEAR 0x21C
-#define MV64340_SMP_CPU1_DOORBELL 0x224
-#define MV64340_SMP_CPU1_DOORBELL_CLEAR 0x22C
-#define MV64340_SMP_CPU0_DOORBELL_MASK 0x234
-#define MV64340_SMP_CPU1_DOORBELL_MASK 0x23C
-#define MV64340_SMP_SEMAPHOR0 0x244
-#define MV64340_SMP_SEMAPHOR1 0x24c
-#define MV64340_SMP_SEMAPHOR2 0x254
-#define MV64340_SMP_SEMAPHOR3 0x25c
-#define MV64340_SMP_SEMAPHOR4 0x264
-#define MV64340_SMP_SEMAPHOR5 0x26c
-#define MV64340_SMP_SEMAPHOR6 0x274
-#define MV64340_SMP_SEMAPHOR7 0x27c
-
-/****************************************/
-/* CPU Sync Barrier Register */
-/****************************************/
-
-#define MV64340_CPU_0_SYNC_BARRIER_TRIGGER 0x0c0
-#define MV64340_CPU_0_SYNC_BARRIER_VIRTUAL 0x0c8
-#define MV64340_CPU_1_SYNC_BARRIER_TRIGGER 0x0d0
-#define MV64340_CPU_1_SYNC_BARRIER_VIRTUAL 0x0d8
-
-/****************************************/
-/* CPU Access Protect */
-/****************************************/
-
-#define MV64340_CPU_PROTECT_WINDOW_0_BASE_ADDR 0x180
-#define MV64340_CPU_PROTECT_WINDOW_0_SIZE 0x188
-#define MV64340_CPU_PROTECT_WINDOW_1_BASE_ADDR 0x190
-#define MV64340_CPU_PROTECT_WINDOW_1_SIZE 0x198
-#define MV64340_CPU_PROTECT_WINDOW_2_BASE_ADDR 0x1a0
-#define MV64340_CPU_PROTECT_WINDOW_2_SIZE 0x1a8
-#define MV64340_CPU_PROTECT_WINDOW_3_BASE_ADDR 0x1b0
-#define MV64340_CPU_PROTECT_WINDOW_3_SIZE 0x1b8
-
-
-/****************************************/
-/* CPU Error Report */
-/****************************************/
-
-#define MV64340_CPU_ERROR_ADDR_LOW 0x070
-#define MV64340_CPU_ERROR_ADDR_HIGH 0x078
-#define MV64340_CPU_ERROR_DATA_LOW 0x128
-#define MV64340_CPU_ERROR_DATA_HIGH 0x130
-#define MV64340_CPU_ERROR_PARITY 0x138
-#define MV64340_CPU_ERROR_CAUSE 0x140
-#define MV64340_CPU_ERROR_MASK 0x148
-
-/****************************************/
-/* CPU Interface Debug Registers */
-/****************************************/
-
-#define MV64340_PUNIT_SLAVE_DEBUG_LOW 0x360
-#define MV64340_PUNIT_SLAVE_DEBUG_HIGH 0x368
-#define MV64340_PUNIT_MASTER_DEBUG_LOW 0x370
-#define MV64340_PUNIT_MASTER_DEBUG_HIGH 0x378
-#define MV64340_PUNIT_MMASK 0x3e4
-
-/****************************************/
-/* Integrated SRAM Registers */
-/****************************************/
-
-#define MV64340_SRAM_CONFIG 0x380
-#define MV64340_SRAM_TEST_MODE 0X3F4
-#define MV64340_SRAM_ERROR_CAUSE 0x388
-#define MV64340_SRAM_ERROR_ADDR 0x390
-#define MV64340_SRAM_ERROR_ADDR_HIGH 0X3F8
-#define MV64340_SRAM_ERROR_DATA_LOW 0x398
-#define MV64340_SRAM_ERROR_DATA_HIGH 0x3a0
-#define MV64340_SRAM_ERROR_DATA_PARITY 0x3a8
-
-/****************************************/
-/* SDRAM Configuration */
-/****************************************/
-
-#define MV64340_SDRAM_CONFIG 0x1400
-#define MV64340_D_UNIT_CONTROL_LOW 0x1404
-#define MV64340_D_UNIT_CONTROL_HIGH 0x1424
-#define MV64340_SDRAM_TIMING_CONTROL_LOW 0x1408
-#define MV64340_SDRAM_TIMING_CONTROL_HIGH 0x140c
-#define MV64340_SDRAM_ADDR_CONTROL 0x1410
-#define MV64340_SDRAM_OPEN_PAGES_CONTROL 0x1414
-#define MV64340_SDRAM_OPERATION 0x1418
-#define MV64340_SDRAM_MODE 0x141c
-#define MV64340_EXTENDED_DRAM_MODE 0x1420
-#define MV64340_SDRAM_CROSS_BAR_CONTROL_LOW 0x1430
-#define MV64340_SDRAM_CROSS_BAR_CONTROL_HIGH 0x1434
-#define MV64340_SDRAM_CROSS_BAR_TIMEOUT 0x1438
-#define MV64340_SDRAM_ADDR_CTRL_PADS_CALIBRATION 0x14c0
-#define MV64340_SDRAM_DATA_PADS_CALIBRATION 0x14c4
-
-/****************************************/
-/* SDRAM Error Report */
-/****************************************/
-
-#define MV64340_SDRAM_ERROR_DATA_LOW 0x1444
-#define MV64340_SDRAM_ERROR_DATA_HIGH 0x1440
-#define MV64340_SDRAM_ERROR_ADDR 0x1450
-#define MV64340_SDRAM_RECEIVED_ECC 0x1448
-#define MV64340_SDRAM_CALCULATED_ECC 0x144c
-#define MV64340_SDRAM_ECC_CONTROL 0x1454
-#define MV64340_SDRAM_ECC_ERROR_COUNTER 0x1458
-
-/******************************************/
-/* Controlled Delay Line (CDL) Registers */
-/******************************************/
-
-#define MV64340_DFCDL_CONFIG0 0x1480
-#define MV64340_DFCDL_CONFIG1 0x1484
-#define MV64340_DLL_WRITE 0x1488
-#define MV64340_DLL_READ 0x148c
-#define MV64340_SRAM_ADDR 0x1490
-#define MV64340_SRAM_DATA0 0x1494
-#define MV64340_SRAM_DATA1 0x1498
-#define MV64340_SRAM_DATA2 0x149c
-#define MV64340_DFCL_PROBE 0x14a0
-
-/******************************************/
-/* Debug Registers */
-/******************************************/
-
-#define MV64340_DUNIT_DEBUG_LOW 0x1460
-#define MV64340_DUNIT_DEBUG_HIGH 0x1464
-#define MV64340_DUNIT_MMASK 0X1b40
-
-/****************************************/
-/* Device Parameters */
-/****************************************/
-
-#define MV64340_DEVICE_BANK0_PARAMETERS 0x45c
-#define MV64340_DEVICE_BANK1_PARAMETERS 0x460
-#define MV64340_DEVICE_BANK2_PARAMETERS 0x464
-#define MV64340_DEVICE_BANK3_PARAMETERS 0x468
-#define MV64340_DEVICE_BOOT_BANK_PARAMETERS 0x46c
-#define MV64340_DEVICE_INTERFACE_CONTROL 0x4c0
-#define MV64340_DEVICE_INTERFACE_CROSS_BAR_CONTROL_LOW 0x4c8
-#define MV64340_DEVICE_INTERFACE_CROSS_BAR_CONTROL_HIGH 0x4cc
-#define MV64340_DEVICE_INTERFACE_CROSS_BAR_TIMEOUT 0x4c4
-
-/****************************************/
-/* Device interrupt registers */
-/****************************************/
-
-#define MV64340_DEVICE_INTERRUPT_CAUSE 0x4d0
-#define MV64340_DEVICE_INTERRUPT_MASK 0x4d4
-#define MV64340_DEVICE_ERROR_ADDR 0x4d8
-#define MV64340_DEVICE_ERROR_DATA 0x4dc
-#define MV64340_DEVICE_ERROR_PARITY 0x4e0
-
-/****************************************/
-/* Device debug registers */
-/****************************************/
-
-#define MV64340_DEVICE_DEBUG_LOW 0x4e4
-#define MV64340_DEVICE_DEBUG_HIGH 0x4e8
-#define MV64340_RUNIT_MMASK 0x4f0
-
-/****************************************/
-/* PCI Slave Address Decoding registers */
-/****************************************/
-
-#define MV64340_PCI_0_CS_0_BANK_SIZE 0xc08
-#define MV64340_PCI_1_CS_0_BANK_SIZE 0xc88
-#define MV64340_PCI_0_CS_1_BANK_SIZE 0xd08
-#define MV64340_PCI_1_CS_1_BANK_SIZE 0xd88
-#define MV64340_PCI_0_CS_2_BANK_SIZE 0xc0c
-#define MV64340_PCI_1_CS_2_BANK_SIZE 0xc8c
-#define MV64340_PCI_0_CS_3_BANK_SIZE 0xd0c
-#define MV64340_PCI_1_CS_3_BANK_SIZE 0xd8c
-#define MV64340_PCI_0_DEVCS_0_BANK_SIZE 0xc10
-#define MV64340_PCI_1_DEVCS_0_BANK_SIZE 0xc90
-#define MV64340_PCI_0_DEVCS_1_BANK_SIZE 0xd10
-#define MV64340_PCI_1_DEVCS_1_BANK_SIZE 0xd90
-#define MV64340_PCI_0_DEVCS_2_BANK_SIZE 0xd18
-#define MV64340_PCI_1_DEVCS_2_BANK_SIZE 0xd98
-#define MV64340_PCI_0_DEVCS_3_BANK_SIZE 0xc14
-#define MV64340_PCI_1_DEVCS_3_BANK_SIZE 0xc94
-#define MV64340_PCI_0_DEVCS_BOOT_BANK_SIZE 0xd14
-#define MV64340_PCI_1_DEVCS_BOOT_BANK_SIZE 0xd94
-#define MV64340_PCI_0_P2P_MEM0_BAR_SIZE 0xd1c
-#define MV64340_PCI_1_P2P_MEM0_BAR_SIZE 0xd9c
-#define MV64340_PCI_0_P2P_MEM1_BAR_SIZE 0xd20
-#define MV64340_PCI_1_P2P_MEM1_BAR_SIZE 0xda0
-#define MV64340_PCI_0_P2P_I_O_BAR_SIZE 0xd24
-#define MV64340_PCI_1_P2P_I_O_BAR_SIZE 0xda4
-#define MV64340_PCI_0_CPU_BAR_SIZE 0xd28
-#define MV64340_PCI_1_CPU_BAR_SIZE 0xda8
-#define MV64340_PCI_0_INTERNAL_SRAM_BAR_SIZE 0xe00
-#define MV64340_PCI_1_INTERNAL_SRAM_BAR_SIZE 0xe80
-#define MV64340_PCI_0_EXPANSION_ROM_BAR_SIZE 0xd2c
-#define MV64340_PCI_1_EXPANSION_ROM_BAR_SIZE 0xd9c
-#define MV64340_PCI_0_BASE_ADDR_REG_ENABLE 0xc3c
-#define MV64340_PCI_1_BASE_ADDR_REG_ENABLE 0xcbc
-#define MV64340_PCI_0_CS_0_BASE_ADDR_REMAP 0xc48
-#define MV64340_PCI_1_CS_0_BASE_ADDR_REMAP 0xcc8
-#define MV64340_PCI_0_CS_1_BASE_ADDR_REMAP 0xd48
-#define MV64340_PCI_1_CS_1_BASE_ADDR_REMAP 0xdc8
-#define MV64340_PCI_0_CS_2_BASE_ADDR_REMAP 0xc4c
-#define MV64340_PCI_1_CS_2_BASE_ADDR_REMAP 0xccc
-#define MV64340_PCI_0_CS_3_BASE_ADDR_REMAP 0xd4c
-#define MV64340_PCI_1_CS_3_BASE_ADDR_REMAP 0xdcc
-#define MV64340_PCI_0_CS_0_BASE_HIGH_ADDR_REMAP 0xF04
-#define MV64340_PCI_1_CS_0_BASE_HIGH_ADDR_REMAP 0xF84
-#define MV64340_PCI_0_CS_1_BASE_HIGH_ADDR_REMAP 0xF08
-#define MV64340_PCI_1_CS_1_BASE_HIGH_ADDR_REMAP 0xF88
-#define MV64340_PCI_0_CS_2_BASE_HIGH_ADDR_REMAP 0xF0C
-#define MV64340_PCI_1_CS_2_BASE_HIGH_ADDR_REMAP 0xF8C
-#define MV64340_PCI_0_CS_3_BASE_HIGH_ADDR_REMAP 0xF10
-#define MV64340_PCI_1_CS_3_BASE_HIGH_ADDR_REMAP 0xF90
-#define MV64340_PCI_0_DEVCS_0_BASE_ADDR_REMAP 0xc50
-#define MV64340_PCI_1_DEVCS_0_BASE_ADDR_REMAP 0xcd0
-#define MV64340_PCI_0_DEVCS_1_BASE_ADDR_REMAP 0xd50
-#define MV64340_PCI_1_DEVCS_1_BASE_ADDR_REMAP 0xdd0
-#define MV64340_PCI_0_DEVCS_2_BASE_ADDR_REMAP 0xd58
-#define MV64340_PCI_1_DEVCS_2_BASE_ADDR_REMAP 0xdd8
-#define MV64340_PCI_0_DEVCS_3_BASE_ADDR_REMAP 0xc54
-#define MV64340_PCI_1_DEVCS_3_BASE_ADDR_REMAP 0xcd4
-#define MV64340_PCI_0_DEVCS_BOOTCS_BASE_ADDR_REMAP 0xd54
-#define MV64340_PCI_1_DEVCS_BOOTCS_BASE_ADDR_REMAP 0xdd4
-#define MV64340_PCI_0_P2P_MEM0_BASE_ADDR_REMAP_LOW 0xd5c
-#define MV64340_PCI_1_P2P_MEM0_BASE_ADDR_REMAP_LOW 0xddc
-#define MV64340_PCI_0_P2P_MEM0_BASE_ADDR_REMAP_HIGH 0xd60
-#define MV64340_PCI_1_P2P_MEM0_BASE_ADDR_REMAP_HIGH 0xde0
-#define MV64340_PCI_0_P2P_MEM1_BASE_ADDR_REMAP_LOW 0xd64
-#define MV64340_PCI_1_P2P_MEM1_BASE_ADDR_REMAP_LOW 0xde4
-#define MV64340_PCI_0_P2P_MEM1_BASE_ADDR_REMAP_HIGH 0xd68
-#define MV64340_PCI_1_P2P_MEM1_BASE_ADDR_REMAP_HIGH 0xde8
-#define MV64340_PCI_0_P2P_I_O_BASE_ADDR_REMAP 0xd6c
-#define MV64340_PCI_1_P2P_I_O_BASE_ADDR_REMAP 0xdec
-#define MV64340_PCI_0_CPU_BASE_ADDR_REMAP_LOW 0xd70
-#define MV64340_PCI_1_CPU_BASE_ADDR_REMAP_LOW 0xdf0
-#define MV64340_PCI_0_CPU_BASE_ADDR_REMAP_HIGH 0xd74
-#define MV64340_PCI_1_CPU_BASE_ADDR_REMAP_HIGH 0xdf4
-#define MV64340_PCI_0_INTEGRATED_SRAM_BASE_ADDR_REMAP 0xf00
-#define MV64340_PCI_1_INTEGRATED_SRAM_BASE_ADDR_REMAP 0xf80
-#define MV64340_PCI_0_EXPANSION_ROM_BASE_ADDR_REMAP 0xf38
-#define MV64340_PCI_1_EXPANSION_ROM_BASE_ADDR_REMAP 0xfb8
-#define MV64340_PCI_0_ADDR_DECODE_CONTROL 0xd3c
-#define MV64340_PCI_1_ADDR_DECODE_CONTROL 0xdbc
-#define MV64340_PCI_0_HEADERS_RETARGET_CONTROL 0xF40
-#define MV64340_PCI_1_HEADERS_RETARGET_CONTROL 0xFc0
-#define MV64340_PCI_0_HEADERS_RETARGET_BASE 0xF44
-#define MV64340_PCI_1_HEADERS_RETARGET_BASE 0xFc4
-#define MV64340_PCI_0_HEADERS_RETARGET_HIGH 0xF48
-#define MV64340_PCI_1_HEADERS_RETARGET_HIGH 0xFc8
-
-/***********************************/
-/* PCI Control Register Map */
-/***********************************/
-
-#define MV64340_PCI_0_DLL_STATUS_AND_COMMAND 0x1d20
-#define MV64340_PCI_1_DLL_STATUS_AND_COMMAND 0x1da0
-#define MV64340_PCI_0_MPP_PADS_DRIVE_CONTROL 0x1d1C
-#define MV64340_PCI_1_MPP_PADS_DRIVE_CONTROL 0x1d9C
-#define MV64340_PCI_0_COMMAND 0xc00
-#define MV64340_PCI_1_COMMAND 0xc80
-#define MV64340_PCI_0_MODE 0xd00
-#define MV64340_PCI_1_MODE 0xd80
-#define MV64340_PCI_0_RETRY 0xc04
-#define MV64340_PCI_1_RETRY 0xc84
-#define MV64340_PCI_0_READ_BUFFER_DISCARD_TIMER 0xd04
-#define MV64340_PCI_1_READ_BUFFER_DISCARD_TIMER 0xd84
-#define MV64340_PCI_0_MSI_TRIGGER_TIMER 0xc38
-#define MV64340_PCI_1_MSI_TRIGGER_TIMER 0xcb8
-#define MV64340_PCI_0_ARBITER_CONTROL 0x1d00
-#define MV64340_PCI_1_ARBITER_CONTROL 0x1d80
-#define MV64340_PCI_0_CROSS_BAR_CONTROL_LOW 0x1d08
-#define MV64340_PCI_1_CROSS_BAR_CONTROL_LOW 0x1d88
-#define MV64340_PCI_0_CROSS_BAR_CONTROL_HIGH 0x1d0c
-#define MV64340_PCI_1_CROSS_BAR_CONTROL_HIGH 0x1d8c
-#define MV64340_PCI_0_CROSS_BAR_TIMEOUT 0x1d04
-#define MV64340_PCI_1_CROSS_BAR_TIMEOUT 0x1d84
-#define MV64340_PCI_0_SYNC_BARRIER_TRIGGER_REG 0x1D18
-#define MV64340_PCI_1_SYNC_BARRIER_TRIGGER_REG 0x1D98
-#define MV64340_PCI_0_SYNC_BARRIER_VIRTUAL_REG 0x1d10
-#define MV64340_PCI_1_SYNC_BARRIER_VIRTUAL_REG 0x1d90
-#define MV64340_PCI_0_P2P_CONFIG 0x1d14
-#define MV64340_PCI_1_P2P_CONFIG 0x1d94
-
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_0_LOW 0x1e00
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_0_HIGH 0x1e04
-#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_0 0x1e08
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_1_LOW 0x1e10
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_1_HIGH 0x1e14
-#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_1 0x1e18
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_2_LOW 0x1e20
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_2_HIGH 0x1e24
-#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_2 0x1e28
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_3_LOW 0x1e30
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_3_HIGH 0x1e34
-#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_3 0x1e38
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_4_LOW 0x1e40
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_4_HIGH 0x1e44
-#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_4 0x1e48
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_5_LOW 0x1e50
-#define MV64340_PCI_0_ACCESS_CONTROL_BASE_5_HIGH 0x1e54
-#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_5 0x1e58
-
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_0_LOW 0x1e80
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_0_HIGH 0x1e84
-#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_0 0x1e88
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_1_LOW 0x1e90
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_1_HIGH 0x1e94
-#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_1 0x1e98
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_2_LOW 0x1ea0
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_2_HIGH 0x1ea4
-#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_2 0x1ea8
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_3_LOW 0x1eb0
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_3_HIGH 0x1eb4
-#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_3 0x1eb8
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_4_LOW 0x1ec0
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_4_HIGH 0x1ec4
-#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_4 0x1ec8
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_5_LOW 0x1ed0
-#define MV64340_PCI_1_ACCESS_CONTROL_BASE_5_HIGH 0x1ed4
-#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_5 0x1ed8
-
-/****************************************/
-/* PCI Configuration Access Registers */
-/****************************************/
-
-#define MV64340_PCI_0_CONFIG_ADDR 0xcf8
-#define MV64340_PCI_0_CONFIG_DATA_VIRTUAL_REG 0xcfc
-#define MV64340_PCI_1_CONFIG_ADDR 0xc78
-#define MV64340_PCI_1_CONFIG_DATA_VIRTUAL_REG 0xc7c
-#define MV64340_PCI_0_INTERRUPT_ACKNOWLEDGE_VIRTUAL_REG 0xc34
-#define MV64340_PCI_1_INTERRUPT_ACKNOWLEDGE_VIRTUAL_REG 0xcb4
-
-/****************************************/
-/* PCI Error Report Registers */
-/****************************************/
-
-#define MV64340_PCI_0_SERR_MASK 0xc28
-#define MV64340_PCI_1_SERR_MASK 0xca8
-#define MV64340_PCI_0_ERROR_ADDR_LOW 0x1d40
-#define MV64340_PCI_1_ERROR_ADDR_LOW 0x1dc0
-#define MV64340_PCI_0_ERROR_ADDR_HIGH 0x1d44
-#define MV64340_PCI_1_ERROR_ADDR_HIGH 0x1dc4
-#define MV64340_PCI_0_ERROR_ATTRIBUTE 0x1d48
-#define MV64340_PCI_1_ERROR_ATTRIBUTE 0x1dc8
-#define MV64340_PCI_0_ERROR_COMMAND 0x1d50
-#define MV64340_PCI_1_ERROR_COMMAND 0x1dd0
-#define MV64340_PCI_0_ERROR_CAUSE 0x1d58
-#define MV64340_PCI_1_ERROR_CAUSE 0x1dd8
-#define MV64340_PCI_0_ERROR_MASK 0x1d5c
-#define MV64340_PCI_1_ERROR_MASK 0x1ddc
-
-/****************************************/
-/* PCI Debug Registers */
-/****************************************/
-
-#define MV64340_PCI_0_MMASK 0X1D24
-#define MV64340_PCI_1_MMASK 0X1DA4
-
-/*********************************************/
-/* PCI Configuration, Function 0, Registers */
-/*********************************************/
-
-#define MV64340_PCI_DEVICE_AND_VENDOR_ID 0x000
-#define MV64340_PCI_STATUS_AND_COMMAND 0x004
-#define MV64340_PCI_CLASS_CODE_AND_REVISION_ID 0x008
-#define MV64340_PCI_BIST_HEADER_TYPE_LATENCY_TIMER_CACHE_LINE 0x00C
-
-#define MV64340_PCI_SCS_0_BASE_ADDR_LOW 0x010
-#define MV64340_PCI_SCS_0_BASE_ADDR_HIGH 0x014
-#define MV64340_PCI_SCS_1_BASE_ADDR_LOW 0x018
-#define MV64340_PCI_SCS_1_BASE_ADDR_HIGH 0x01C
-#define MV64340_PCI_INTERNAL_REG_MEM_MAPPED_BASE_ADDR_LOW 0x020
-#define MV64340_PCI_INTERNAL_REG_MEM_MAPPED_BASE_ADDR_HIGH 0x024
-#define MV64340_PCI_SUBSYSTEM_ID_AND_SUBSYSTEM_VENDOR_ID 0x02c
-#define MV64340_PCI_EXPANSION_ROM_BASE_ADDR_REG 0x030
-#define MV64340_PCI_CAPABILTY_LIST_POINTER 0x034
-#define MV64340_PCI_INTERRUPT_PIN_AND_LINE 0x03C
- /* capability list */
-#define MV64340_PCI_POWER_MANAGEMENT_CAPABILITY 0x040
-#define MV64340_PCI_POWER_MANAGEMENT_STATUS_AND_CONTROL 0x044
-#define MV64340_PCI_VPD_ADDR 0x048
-#define MV64340_PCI_VPD_DATA 0x04c
-#define MV64340_PCI_MSI_MESSAGE_CONTROL 0x050
-#define MV64340_PCI_MSI_MESSAGE_ADDR 0x054
-#define MV64340_PCI_MSI_MESSAGE_UPPER_ADDR 0x058
-#define MV64340_PCI_MSI_MESSAGE_DATA 0x05c
-#define MV64340_PCI_X_COMMAND 0x060
-#define MV64340_PCI_X_STATUS 0x064
-#define MV64340_PCI_COMPACT_PCI_HOT_SWAP 0x068
-
-/***********************************************/
-/* PCI Configuration, Function 1, Registers */
-/***********************************************/
-
-#define MV64340_PCI_SCS_2_BASE_ADDR_LOW 0x110
-#define MV64340_PCI_SCS_2_BASE_ADDR_HIGH 0x114
-#define MV64340_PCI_SCS_3_BASE_ADDR_LOW 0x118
-#define MV64340_PCI_SCS_3_BASE_ADDR_HIGH 0x11c
-#define MV64340_PCI_INTERNAL_SRAM_BASE_ADDR_LOW 0x120
-#define MV64340_PCI_INTERNAL_SRAM_BASE_ADDR_HIGH 0x124
-
-/***********************************************/
-/* PCI Configuration, Function 2, Registers */
-/***********************************************/
-
-#define MV64340_PCI_DEVCS_0_BASE_ADDR_LOW 0x210
-#define MV64340_PCI_DEVCS_0_BASE_ADDR_HIGH 0x214
-#define MV64340_PCI_DEVCS_1_BASE_ADDR_LOW 0x218
-#define MV64340_PCI_DEVCS_1_BASE_ADDR_HIGH 0x21c
-#define MV64340_PCI_DEVCS_2_BASE_ADDR_LOW 0x220
-#define MV64340_PCI_DEVCS_2_BASE_ADDR_HIGH 0x224
-
-/***********************************************/
-/* PCI Configuration, Function 3, Registers */
-/***********************************************/
-
-#define MV64340_PCI_DEVCS_3_BASE_ADDR_LOW 0x310
-#define MV64340_PCI_DEVCS_3_BASE_ADDR_HIGH 0x314
-#define MV64340_PCI_BOOT_CS_BASE_ADDR_LOW 0x318
-#define MV64340_PCI_BOOT_CS_BASE_ADDR_HIGH 0x31c
-#define MV64340_PCI_CPU_BASE_ADDR_LOW 0x220
-#define MV64340_PCI_CPU_BASE_ADDR_HIGH 0x224
-
-/***********************************************/
-/* PCI Configuration, Function 4, Registers */
-/***********************************************/
-
-#define MV64340_PCI_P2P_MEM0_BASE_ADDR_LOW 0x410
-#define MV64340_PCI_P2P_MEM0_BASE_ADDR_HIGH 0x414
-#define MV64340_PCI_P2P_MEM1_BASE_ADDR_LOW 0x418
-#define MV64340_PCI_P2P_MEM1_BASE_ADDR_HIGH 0x41c
-#define MV64340_PCI_P2P_I_O_BASE_ADDR 0x420
-#define MV64340_PCI_INTERNAL_REGS_I_O_MAPPED_BASE_ADDR 0x424
-
-/****************************************/
-/* Messaging Unit Registers (I20) */
-/****************************************/
-
-#define MV64340_I2O_INBOUND_MESSAGE_REG0_PCI_0_SIDE 0x010
-#define MV64340_I2O_INBOUND_MESSAGE_REG1_PCI_0_SIDE 0x014
-#define MV64340_I2O_OUTBOUND_MESSAGE_REG0_PCI_0_SIDE 0x018
-#define MV64340_I2O_OUTBOUND_MESSAGE_REG1_PCI_0_SIDE 0x01C
-#define MV64340_I2O_INBOUND_DOORBELL_REG_PCI_0_SIDE 0x020
-#define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_PCI_0_SIDE 0x024
-#define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_PCI_0_SIDE 0x028
-#define MV64340_I2O_OUTBOUND_DOORBELL_REG_PCI_0_SIDE 0x02C
-#define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_PCI_0_SIDE 0x030
-#define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_PCI_0_SIDE 0x034
-#define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_0_SIDE 0x040
-#define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_0_SIDE 0x044
-#define MV64340_I2O_QUEUE_CONTROL_REG_PCI_0_SIDE 0x050
-#define MV64340_I2O_QUEUE_BASE_ADDR_REG_PCI_0_SIDE 0x054
-#define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_PCI_0_SIDE 0x060
-#define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_PCI_0_SIDE 0x064
-#define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_PCI_0_SIDE 0x068
-#define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_PCI_0_SIDE 0x06C
-#define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_PCI_0_SIDE 0x070
-#define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_PCI_0_SIDE 0x074
-#define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_PCI_0_SIDE 0x0F8
-#define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_PCI_0_SIDE 0x0FC
-
-#define MV64340_I2O_INBOUND_MESSAGE_REG0_PCI_1_SIDE 0x090
-#define MV64340_I2O_INBOUND_MESSAGE_REG1_PCI_1_SIDE 0x094
-#define MV64340_I2O_OUTBOUND_MESSAGE_REG0_PCI_1_SIDE 0x098
-#define MV64340_I2O_OUTBOUND_MESSAGE_REG1_PCI_1_SIDE 0x09C
-#define MV64340_I2O_INBOUND_DOORBELL_REG_PCI_1_SIDE 0x0A0
-#define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_PCI_1_SIDE 0x0A4
-#define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_PCI_1_SIDE 0x0A8
-#define MV64340_I2O_OUTBOUND_DOORBELL_REG_PCI_1_SIDE 0x0AC
-#define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_PCI_1_SIDE 0x0B0
-#define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_PCI_1_SIDE 0x0B4
-#define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_1_SIDE 0x0C0
-#define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_1_SIDE 0x0C4
-#define MV64340_I2O_QUEUE_CONTROL_REG_PCI_1_SIDE 0x0D0
-#define MV64340_I2O_QUEUE_BASE_ADDR_REG_PCI_1_SIDE 0x0D4
-#define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_PCI_1_SIDE 0x0E0
-#define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_PCI_1_SIDE 0x0E4
-#define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_PCI_1_SIDE 0x0E8
-#define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_PCI_1_SIDE 0x0EC
-#define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_PCI_1_SIDE 0x0F0
-#define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_PCI_1_SIDE 0x0F4
-#define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_PCI_1_SIDE 0x078
-#define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_PCI_1_SIDE 0x07C
-
-#define MV64340_I2O_INBOUND_MESSAGE_REG0_CPU0_SIDE 0x1C10
-#define MV64340_I2O_INBOUND_MESSAGE_REG1_CPU0_SIDE 0x1C14
-#define MV64340_I2O_OUTBOUND_MESSAGE_REG0_CPU0_SIDE 0x1C18
-#define MV64340_I2O_OUTBOUND_MESSAGE_REG1_CPU0_SIDE 0x1C1C
-#define MV64340_I2O_INBOUND_DOORBELL_REG_CPU0_SIDE 0x1C20
-#define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_CPU0_SIDE 0x1C24
-#define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_CPU0_SIDE 0x1C28
-#define MV64340_I2O_OUTBOUND_DOORBELL_REG_CPU0_SIDE 0x1C2C
-#define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_CPU0_SIDE 0x1C30
-#define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_CPU0_SIDE 0x1C34
-#define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_CPU0_SIDE 0x1C40
-#define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_CPU0_SIDE 0x1C44
-#define MV64340_I2O_QUEUE_CONTROL_REG_CPU0_SIDE 0x1C50
-#define MV64340_I2O_QUEUE_BASE_ADDR_REG_CPU0_SIDE 0x1C54
-#define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_CPU0_SIDE 0x1C60
-#define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_CPU0_SIDE 0x1C64
-#define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_CPU0_SIDE 0x1C68
-#define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_CPU0_SIDE 0x1C6C
-#define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_CPU0_SIDE 0x1C70
-#define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_CPU0_SIDE 0x1C74
-#define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_CPU0_SIDE 0x1CF8
-#define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_CPU0_SIDE 0x1CFC
-#define MV64340_I2O_INBOUND_MESSAGE_REG0_CPU1_SIDE 0x1C90
-#define MV64340_I2O_INBOUND_MESSAGE_REG1_CPU1_SIDE 0x1C94
-#define MV64340_I2O_OUTBOUND_MESSAGE_REG0_CPU1_SIDE 0x1C98
-#define MV64340_I2O_OUTBOUND_MESSAGE_REG1_CPU1_SIDE 0x1C9C
-#define MV64340_I2O_INBOUND_DOORBELL_REG_CPU1_SIDE 0x1CA0
-#define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_CPU1_SIDE 0x1CA4
-#define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_CPU1_SIDE 0x1CA8
-#define MV64340_I2O_OUTBOUND_DOORBELL_REG_CPU1_SIDE 0x1CAC
-#define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_CPU1_SIDE 0x1CB0
-#define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_CPU1_SIDE 0x1CB4
-#define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_CPU1_SIDE 0x1CC0
-#define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_CPU1_SIDE 0x1CC4
-#define MV64340_I2O_QUEUE_CONTROL_REG_CPU1_SIDE 0x1CD0
-#define MV64340_I2O_QUEUE_BASE_ADDR_REG_CPU1_SIDE 0x1CD4
-#define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_CPU1_SIDE 0x1CE0
-#define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_CPU1_SIDE 0x1CE4
-#define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_CPU1_SIDE 0x1CE8
-#define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_CPU1_SIDE 0x1CEC
-#define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_CPU1_SIDE 0x1CF0
-#define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_CPU1_SIDE 0x1CF4
-#define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_CPU1_SIDE 0x1C78
-#define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_CPU1_SIDE 0x1C7C
-
-/****************************************/
-/* Ethernet Unit Registers */
-/****************************************/
-
-/*******************************************/
-/* CUNIT Registers */
-/*******************************************/
-
- /* Address Decoding Register Map */
-
-#define MV64340_CUNIT_BASE_ADDR_REG0 0xf200
-#define MV64340_CUNIT_BASE_ADDR_REG1 0xf208
-#define MV64340_CUNIT_BASE_ADDR_REG2 0xf210
-#define MV64340_CUNIT_BASE_ADDR_REG3 0xf218
-#define MV64340_CUNIT_SIZE0 0xf204
-#define MV64340_CUNIT_SIZE1 0xf20c
-#define MV64340_CUNIT_SIZE2 0xf214
-#define MV64340_CUNIT_SIZE3 0xf21c
-#define MV64340_CUNIT_HIGH_ADDR_REMAP_REG0 0xf240
-#define MV64340_CUNIT_HIGH_ADDR_REMAP_REG1 0xf244
-#define MV64340_CUNIT_BASE_ADDR_ENABLE_REG 0xf250
-#define MV64340_MPSC0_ACCESS_PROTECTION_REG 0xf254
-#define MV64340_MPSC1_ACCESS_PROTECTION_REG 0xf258
-#define MV64340_CUNIT_INTERNAL_SPACE_BASE_ADDR_REG 0xf25C
-
- /* Error Report Registers */
-
-#define MV64340_CUNIT_INTERRUPT_CAUSE_REG 0xf310
-#define MV64340_CUNIT_INTERRUPT_MASK_REG 0xf314
-#define MV64340_CUNIT_ERROR_ADDR 0xf318
-
- /* Cunit Control Registers */
-
-#define MV64340_CUNIT_ARBITER_CONTROL_REG 0xf300
-#define MV64340_CUNIT_CONFIG_REG 0xb40c
-#define MV64340_CUNIT_CRROSBAR_TIMEOUT_REG 0xf304
-
- /* Cunit Debug Registers */
-
-#define MV64340_CUNIT_DEBUG_LOW 0xf340
-#define MV64340_CUNIT_DEBUG_HIGH 0xf344
-#define MV64340_CUNIT_MMASK 0xf380
-
- /* MPSCs Clocks Routing Registers */
-
-#define MV64340_MPSC_ROUTING_REG 0xb400
-#define MV64340_MPSC_RX_CLOCK_ROUTING_REG 0xb404
-#define MV64340_MPSC_TX_CLOCK_ROUTING_REG 0xb408
-
- /* MPSCs Interrupts Registers */
-
-#define MV64340_MPSC_CAUSE_REG(port) (0xb804 + (port<<3))
-#define MV64340_MPSC_MASK_REG(port) (0xb884 + (port<<3))
-
-#define MV64340_MPSC_MAIN_CONFIG_LOW(port) (0x8000 + (port<<12))
-#define MV64340_MPSC_MAIN_CONFIG_HIGH(port) (0x8004 + (port<<12))
-#define MV64340_MPSC_PROTOCOL_CONFIG(port) (0x8008 + (port<<12))
-#define MV64340_MPSC_CHANNEL_REG1(port) (0x800c + (port<<12))
-#define MV64340_MPSC_CHANNEL_REG2(port) (0x8010 + (port<<12))
-#define MV64340_MPSC_CHANNEL_REG3(port) (0x8014 + (port<<12))
-#define MV64340_MPSC_CHANNEL_REG4(port) (0x8018 + (port<<12))
-#define MV64340_MPSC_CHANNEL_REG5(port) (0x801c + (port<<12))
-#define MV64340_MPSC_CHANNEL_REG6(port) (0x8020 + (port<<12))
-#define MV64340_MPSC_CHANNEL_REG7(port) (0x8024 + (port<<12))
-#define MV64340_MPSC_CHANNEL_REG8(port) (0x8028 + (port<<12))
-#define MV64340_MPSC_CHANNEL_REG9(port) (0x802c + (port<<12))
-#define MV64340_MPSC_CHANNEL_REG10(port) (0x8030 + (port<<12))
-
- /* MPSC0 Registers */
-
-
-/***************************************/
-/* SDMA Registers */
-/***************************************/
-
-#define MV64340_SDMA_CONFIG_REG(channel) (0x4000 + (channel<<13))
-#define MV64340_SDMA_COMMAND_REG(channel) (0x4008 + (channel<<13))
-#define MV64340_SDMA_CURRENT_RX_DESCRIPTOR_POINTER(channel) (0x4810 + (channel<<13))
-#define MV64340_SDMA_CURRENT_TX_DESCRIPTOR_POINTER(channel) (0x4c10 + (channel<<13))
-#define MV64340_SDMA_FIRST_TX_DESCRIPTOR_POINTER(channel) (0x4c14 + (channel<<13))
-
-#define MV64340_SDMA_CAUSE_REG 0xb800
-#define MV64340_SDMA_MASK_REG 0xb880
-
-/* BRG Interrupts */
-
-#define MV64340_BRG_CONFIG_REG(brg) (0xb200 + (brg<<3))
-#define MV64340_BRG_BAUDE_TUNING_REG(brg) (0xb208 + (brg<<3))
-#define MV64340_BRG_CAUSE_REG 0xb834
-#define MV64340_BRG_MASK_REG 0xb8b4
-
-/****************************************/
-/* DMA Channel Control */
-/****************************************/
-
-#define MV64340_DMA_CHANNEL0_CONTROL 0x840
-#define MV64340_DMA_CHANNEL0_CONTROL_HIGH 0x880
-#define MV64340_DMA_CHANNEL1_CONTROL 0x844
-#define MV64340_DMA_CHANNEL1_CONTROL_HIGH 0x884
-#define MV64340_DMA_CHANNEL2_CONTROL 0x848
-#define MV64340_DMA_CHANNEL2_CONTROL_HIGH 0x888
-#define MV64340_DMA_CHANNEL3_CONTROL 0x84C
-#define MV64340_DMA_CHANNEL3_CONTROL_HIGH 0x88C
-
-
-/****************************************/
-/* IDMA Registers */
-/****************************************/
-
-#define MV64340_DMA_CHANNEL0_BYTE_COUNT 0x800
-#define MV64340_DMA_CHANNEL1_BYTE_COUNT 0x804
-#define MV64340_DMA_CHANNEL2_BYTE_COUNT 0x808
-#define MV64340_DMA_CHANNEL3_BYTE_COUNT 0x80C
-#define MV64340_DMA_CHANNEL0_SOURCE_ADDR 0x810
-#define MV64340_DMA_CHANNEL1_SOURCE_ADDR 0x814
-#define MV64340_DMA_CHANNEL2_SOURCE_ADDR 0x818
-#define MV64340_DMA_CHANNEL3_SOURCE_ADDR 0x81c
-#define MV64340_DMA_CHANNEL0_DESTINATION_ADDR 0x820
-#define MV64340_DMA_CHANNEL1_DESTINATION_ADDR 0x824
-#define MV64340_DMA_CHANNEL2_DESTINATION_ADDR 0x828
-#define MV64340_DMA_CHANNEL3_DESTINATION_ADDR 0x82C
-#define MV64340_DMA_CHANNEL0_NEXT_DESCRIPTOR_POINTER 0x830
-#define MV64340_DMA_CHANNEL1_NEXT_DESCRIPTOR_POINTER 0x834
-#define MV64340_DMA_CHANNEL2_NEXT_DESCRIPTOR_POINTER 0x838
-#define MV64340_DMA_CHANNEL3_NEXT_DESCRIPTOR_POINTER 0x83C
-#define MV64340_DMA_CHANNEL0_CURRENT_DESCRIPTOR_POINTER 0x870
-#define MV64340_DMA_CHANNEL1_CURRENT_DESCRIPTOR_POINTER 0x874
-#define MV64340_DMA_CHANNEL2_CURRENT_DESCRIPTOR_POINTER 0x878
-#define MV64340_DMA_CHANNEL3_CURRENT_DESCRIPTOR_POINTER 0x87C
-
- /* IDMA Address Decoding Base Address Registers */
-
-#define MV64340_DMA_BASE_ADDR_REG0 0xa00
-#define MV64340_DMA_BASE_ADDR_REG1 0xa08
-#define MV64340_DMA_BASE_ADDR_REG2 0xa10
-#define MV64340_DMA_BASE_ADDR_REG3 0xa18
-#define MV64340_DMA_BASE_ADDR_REG4 0xa20
-#define MV64340_DMA_BASE_ADDR_REG5 0xa28
-#define MV64340_DMA_BASE_ADDR_REG6 0xa30
-#define MV64340_DMA_BASE_ADDR_REG7 0xa38
-
- /* IDMA Address Decoding Size Address Register */
-
-#define MV64340_DMA_SIZE_REG0 0xa04
-#define MV64340_DMA_SIZE_REG1 0xa0c
-#define MV64340_DMA_SIZE_REG2 0xa14
-#define MV64340_DMA_SIZE_REG3 0xa1c
-#define MV64340_DMA_SIZE_REG4 0xa24
-#define MV64340_DMA_SIZE_REG5 0xa2c
-#define MV64340_DMA_SIZE_REG6 0xa34
-#define MV64340_DMA_SIZE_REG7 0xa3C
-
- /* IDMA Address Decoding High Address Remap and Access
- Protection Registers */
-
-#define MV64340_DMA_HIGH_ADDR_REMAP_REG0 0xa60
-#define MV64340_DMA_HIGH_ADDR_REMAP_REG1 0xa64
-#define MV64340_DMA_HIGH_ADDR_REMAP_REG2 0xa68
-#define MV64340_DMA_HIGH_ADDR_REMAP_REG3 0xa6C
-#define MV64340_DMA_BASE_ADDR_ENABLE_REG 0xa80
-#define MV64340_DMA_CHANNEL0_ACCESS_PROTECTION_REG 0xa70
-#define MV64340_DMA_CHANNEL1_ACCESS_PROTECTION_REG 0xa74
-#define MV64340_DMA_CHANNEL2_ACCESS_PROTECTION_REG 0xa78
-#define MV64340_DMA_CHANNEL3_ACCESS_PROTECTION_REG 0xa7c
-#define MV64340_DMA_ARBITER_CONTROL 0x860
-#define MV64340_DMA_CROSS_BAR_TIMEOUT 0x8d0
-
- /* IDMA Headers Retarget Registers */
-
-#define MV64340_DMA_HEADERS_RETARGET_CONTROL 0xa84
-#define MV64340_DMA_HEADERS_RETARGET_BASE 0xa88
-
- /* IDMA Interrupt Register */
-
-#define MV64340_DMA_INTERRUPT_CAUSE_REG 0x8c0
-#define MV64340_DMA_INTERRUPT_CAUSE_MASK 0x8c4
-#define MV64340_DMA_ERROR_ADDR 0x8c8
-#define MV64340_DMA_ERROR_SELECT 0x8cc
-
- /* IDMA Debug Register ( for internal use ) */
-
-#define MV64340_DMA_DEBUG_LOW 0x8e0
-#define MV64340_DMA_DEBUG_HIGH 0x8e4
-#define MV64340_DMA_SPARE 0xA8C
-
-/****************************************/
-/* Timer_Counter */
-/****************************************/
-
-#define MV64340_TIMER_COUNTER0 0x850
-#define MV64340_TIMER_COUNTER1 0x854
-#define MV64340_TIMER_COUNTER2 0x858
-#define MV64340_TIMER_COUNTER3 0x85C
-#define MV64340_TIMER_COUNTER_0_3_CONTROL 0x864
-#define MV64340_TIMER_COUNTER_0_3_INTERRUPT_CAUSE 0x868
-#define MV64340_TIMER_COUNTER_0_3_INTERRUPT_MASK 0x86c
-
-/****************************************/
-/* Watchdog registers */
-/****************************************/
-
-#define MV64340_WATCHDOG_CONFIG_REG 0xb410
-#define MV64340_WATCHDOG_VALUE_REG 0xb414
-
-/****************************************/
-/* I2C Registers */
-/****************************************/
-
-#define MV64XXX_I2C_OFFSET 0xc000
-#define MV64XXX_I2C_REG_BLOCK_SIZE 0x0020
-
-/****************************************/
-/* GPP Interface Registers */
-/****************************************/
-
-#define MV64340_GPP_IO_CONTROL 0xf100
-#define MV64340_GPP_LEVEL_CONTROL 0xf110
-#define MV64340_GPP_VALUE 0xf104
-#define MV64340_GPP_INTERRUPT_CAUSE 0xf108
-#define MV64340_GPP_INTERRUPT_MASK0 0xf10c
-#define MV64340_GPP_INTERRUPT_MASK1 0xf114
-#define MV64340_GPP_VALUE_SET 0xf118
-#define MV64340_GPP_VALUE_CLEAR 0xf11c
-
-/****************************************/
-/* Interrupt Controller Registers */
-/****************************************/
-
-/****************************************/
-/* Interrupts */
-/****************************************/
-
-#define MV64340_MAIN_INTERRUPT_CAUSE_LOW 0x004
-#define MV64340_MAIN_INTERRUPT_CAUSE_HIGH 0x00c
-#define MV64340_CPU_INTERRUPT0_MASK_LOW 0x014
-#define MV64340_CPU_INTERRUPT0_MASK_HIGH 0x01c
-#define MV64340_CPU_INTERRUPT0_SELECT_CAUSE 0x024
-#define MV64340_CPU_INTERRUPT1_MASK_LOW 0x034
-#define MV64340_CPU_INTERRUPT1_MASK_HIGH 0x03c
-#define MV64340_CPU_INTERRUPT1_SELECT_CAUSE 0x044
-#define MV64340_INTERRUPT0_MASK_0_LOW 0x054
-#define MV64340_INTERRUPT0_MASK_0_HIGH 0x05c
-#define MV64340_INTERRUPT0_SELECT_CAUSE 0x064
-#define MV64340_INTERRUPT1_MASK_0_LOW 0x074
-#define MV64340_INTERRUPT1_MASK_0_HIGH 0x07c
-#define MV64340_INTERRUPT1_SELECT_CAUSE 0x084
-
-/****************************************/
-/* MPP Interface Registers */
-/****************************************/
-
-#define MV64340_MPP_CONTROL0 0xf000
-#define MV64340_MPP_CONTROL1 0xf004
-#define MV64340_MPP_CONTROL2 0xf008
-#define MV64340_MPP_CONTROL3 0xf00c
-
-/****************************************/
-/* Serial Initialization registers */
-/****************************************/
-
-#define MV64340_SERIAL_INIT_LAST_DATA 0xf324
-#define MV64340_SERIAL_INIT_CONTROL 0xf328
-#define MV64340_SERIAL_INIT_STATUS 0xf32c
-
-extern void mv64340_irq_init(unsigned int base);
-
-/* Watchdog Platform Device, Driver Data */
-#define MV64x60_WDT_NAME "mv64x60_wdt"
-
-struct mv64x60_wdt_pdata {
- int timeout; /* watchdog expiry in seconds, default 10 */
- int bus_clk; /* bus clock in MHz, default 133 */
-};
-
-#endif /* __ASM_MV643XX_H */
diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h
index 3682ae75c7aa..145169be2ed8 100644
--- a/include/linux/mv643xx_eth.h
+++ b/include/linux/mv643xx_eth.h
@@ -8,6 +8,7 @@
#include <linux/mbus.h>
#include <linux/if_ether.h>
+#include <linux/phy.h>
#define MV643XX_ETH_SHARED_NAME "mv643xx_eth"
#define MV643XX_ETH_NAME "mv643xx_eth_port"
@@ -59,6 +60,7 @@ struct mv643xx_eth_platform_data {
*/
int speed;
int duplex;
+ phy_interface_t interface;
/*
* How many RX/TX queues to use.
diff --git a/include/linux/n_r3964.h b/include/linux/n_r3964.h
deleted file mode 100644
index 90a803aa42e8..000000000000
--- a/include/linux/n_r3964.h
+++ /dev/null
@@ -1,175 +0,0 @@
-/* r3964 linediscipline for linux
- *
- * -----------------------------------------------------------
- * Copyright by
- * Philips Automation Projects
- * Kassel (Germany)
- * -----------------------------------------------------------
- * This software may be used and distributed according to the terms of
- * the GNU General Public License, incorporated herein by reference.
- *
- * Author:
- * L. Haag
- *
- * $Log: r3964.h,v $
- * Revision 1.4 2005/12/21 19:54:24 Kurt Huwig <kurt huwig de>
- * Fixed HZ usage on 2.6 kernels
- * Removed unnecessary include
- *
- * Revision 1.3 2001/03/18 13:02:24 dwmw2
- * Fix timer usage, use spinlocks properly.
- *
- * Revision 1.2 2001/03/18 12:53:15 dwmw2
- * Merge changes in 2.4.2
- *
- * Revision 1.1.1.1 1998/10/13 16:43:14 dwmw2
- * This'll screw the version control
- *
- * Revision 1.6 1998/09/30 00:40:38 dwmw2
- * Updated to use kernel's N_R3964 if available
- *
- * Revision 1.4 1998/04/02 20:29:44 lhaag
- * select, blocking, ...
- *
- * Revision 1.3 1998/02/12 18:58:43 root
- * fixed some memory leaks
- * calculation of checksum characters
- *
- * Revision 1.2 1998/02/07 13:03:17 root
- * ioctl read_telegram
- *
- * Revision 1.1 1998/02/06 19:19:43 root
- * Initial revision
- *
- *
- */
-#ifndef __LINUX_N_R3964_H__
-#define __LINUX_N_R3964_H__
-
-
-#include <linux/param.h>
-#include <uapi/linux/n_r3964.h>
-
-/*
- * Common ascii handshake characters:
- */
-
-#define STX 0x02
-#define ETX 0x03
-#define DLE 0x10
-#define NAK 0x15
-
-/*
- * Timeouts (from milliseconds to jiffies)
- */
-
-#define R3964_TO_QVZ ((550)*HZ/1000)
-#define R3964_TO_ZVZ ((220)*HZ/1000)
-#define R3964_TO_NO_BUF ((400)*HZ/1000)
-#define R3964_NO_TX_ROOM ((100)*HZ/1000)
-#define R3964_TO_RX_PANIC ((4000)*HZ/1000)
-#define R3964_MAX_RETRIES 5
-
-
-enum { R3964_IDLE,
- R3964_TX_REQUEST, R3964_TRANSMITTING,
- R3964_WAIT_ZVZ_BEFORE_TX_RETRY, R3964_WAIT_FOR_TX_ACK,
- R3964_WAIT_FOR_RX_BUF,
- R3964_RECEIVING, R3964_WAIT_FOR_BCC, R3964_WAIT_FOR_RX_REPEAT
- };
-
-/*
- * All open file-handles are 'clients' and are stored in a linked list:
- */
-
-struct r3964_message;
-
-struct r3964_client_info {
- spinlock_t lock;
- struct pid *pid;
- unsigned int sig_flags;
-
- struct r3964_client_info *next;
-
- struct r3964_message *first_msg;
- struct r3964_message *last_msg;
- struct r3964_block_header *next_block_to_read;
- int msg_count;
-};
-
-
-
-struct r3964_block_header;
-
-/* internal version of client_message: */
-struct r3964_message {
- int msg_id;
- int arg;
- int error_code;
- struct r3964_block_header *block;
- struct r3964_message *next;
-};
-
-/*
- * Header of received block in rx_buf/tx_buf:
- */
-
-struct r3964_block_header
-{
- unsigned int length; /* length in chars without header */
- unsigned char *data; /* usually data is located
- immediately behind this struct */
- unsigned int locks; /* only used in rx_buffer */
-
- struct r3964_block_header *next;
- struct r3964_client_info *owner; /* =NULL in rx_buffer */
-};
-
-/*
- * If rx_buf hasn't enough space to store R3964_MTU chars,
- * we will reject all incoming STX-requests by sending NAK.
- */
-
-#define RX_BUF_SIZE 4000
-#define TX_BUF_SIZE 4000
-#define R3964_MAX_BLOCKS_IN_RX_QUEUE 100
-
-#define R3964_PARITY 0x0001
-#define R3964_FRAME 0x0002
-#define R3964_OVERRUN 0x0004
-#define R3964_UNKNOWN 0x0008
-#define R3964_BREAK 0x0010
-#define R3964_CHECKSUM 0x0020
-#define R3964_ERROR 0x003f
-#define R3964_BCC 0x4000
-#define R3964_DEBUG 0x8000
-
-
-struct r3964_info {
- spinlock_t lock;
- struct tty_struct *tty;
- unsigned char priority;
- unsigned char *rx_buf; /* ring buffer */
- unsigned char *tx_buf;
-
- struct r3964_block_header *rx_first;
- struct r3964_block_header *rx_last;
- struct r3964_block_header *tx_first;
- struct r3964_block_header *tx_last;
- unsigned int tx_position;
- unsigned int rx_position;
- unsigned char last_rx;
- unsigned char bcc;
- unsigned int blocks_in_rx_queue;
-
- struct mutex read_lock; /* serialize r3964_read */
-
- struct r3964_client_info *firstClient;
- unsigned int state;
- unsigned int flags;
-
- struct timer_list tmr;
- int nRetry;
-};
-
-#endif
diff --git a/include/linux/namei.h b/include/linux/namei.h
index b9605b2b46e7..58600cf234bc 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -7,6 +7,7 @@
#include <linux/path.h>
#include <linux/fcntl.h>
#include <linux/errno.h>
+#include <linux/fs_struct.h>
enum { MAX_NESTED_LINKS = 8 };
@@ -18,68 +19,191 @@ enum { MAX_NESTED_LINKS = 8 };
enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT};
/* pathwalk mode */
-#define LOOKUP_FOLLOW 0x0001 /* follow links at the end */
-#define LOOKUP_DIRECTORY 0x0002 /* require a directory */
-#define LOOKUP_AUTOMOUNT 0x0004 /* force terminal automount */
-#define LOOKUP_EMPTY 0x4000 /* accept empty path [user_... only] */
-#define LOOKUP_DOWN 0x8000 /* follow mounts in the starting point */
-#define LOOKUP_MOUNTPOINT 0x0080 /* follow mounts in the end */
-
-#define LOOKUP_REVAL 0x0020 /* tell ->d_revalidate() to trust no cache */
-#define LOOKUP_RCU 0x0040 /* RCU pathwalk mode; semi-internal */
+#define LOOKUP_FOLLOW BIT(0) /* follow links at the end */
+#define LOOKUP_DIRECTORY BIT(1) /* require a directory */
+#define LOOKUP_AUTOMOUNT BIT(2) /* force terminal automount */
+#define LOOKUP_EMPTY BIT(3) /* accept empty path [user_... only] */
+#define LOOKUP_LINKAT_EMPTY BIT(4) /* Linkat request with empty path. */
+#define LOOKUP_DOWN BIT(5) /* follow mounts in the starting point */
+#define LOOKUP_MOUNTPOINT BIT(6) /* follow mounts in the end */
+#define LOOKUP_REVAL BIT(7) /* tell ->d_revalidate() to trust no cache */
+#define LOOKUP_RCU BIT(8) /* RCU pathwalk mode; semi-internal */
+#define LOOKUP_CACHED BIT(9) /* Only do cached lookup */
+#define LOOKUP_PARENT BIT(10) /* Looking up final parent in path */
+/* 5 spare bits for pathwalk */
/* These tell filesystem methods that we are dealing with the final component... */
-#define LOOKUP_OPEN 0x0100 /* ... in open */
-#define LOOKUP_CREATE 0x0200 /* ... in object creation */
-#define LOOKUP_EXCL 0x0400 /* ... in exclusive creation */
-#define LOOKUP_RENAME_TARGET 0x0800 /* ... in destination of rename() */
+#define LOOKUP_OPEN BIT(16) /* ... in open */
+#define LOOKUP_CREATE BIT(17) /* ... in object creation */
+#define LOOKUP_EXCL BIT(18) /* ... in target must not exist */
+#define LOOKUP_RENAME_TARGET BIT(19) /* ... in destination of rename() */
-/* internal use only */
-#define LOOKUP_PARENT 0x0010
-#define LOOKUP_JUMPED 0x1000
-#define LOOKUP_ROOT 0x2000
-#define LOOKUP_ROOT_GRABBED 0x0008
+/* 4 spare bits for intent */
/* Scoping flags for lookup. */
-#define LOOKUP_NO_SYMLINKS 0x010000 /* No symlink crossing. */
-#define LOOKUP_NO_MAGICLINKS 0x020000 /* No nd_jump_link() crossing. */
-#define LOOKUP_NO_XDEV 0x040000 /* No mountpoint crossing. */
-#define LOOKUP_BENEATH 0x080000 /* No escaping from starting point. */
-#define LOOKUP_IN_ROOT 0x100000 /* Treat dirfd as fs root. */
-#define LOOKUP_CACHED 0x200000 /* Only do cached lookup */
+#define LOOKUP_NO_SYMLINKS BIT(24) /* No symlink crossing. */
+#define LOOKUP_NO_MAGICLINKS BIT(25) /* No nd_jump_link() crossing. */
+#define LOOKUP_NO_XDEV BIT(26) /* No mountpoint crossing. */
+#define LOOKUP_BENEATH BIT(27) /* No escaping from starting point. */
+#define LOOKUP_IN_ROOT BIT(28) /* Treat dirfd as fs root. */
/* LOOKUP_* flags which do scope-related checks based on the dirfd. */
#define LOOKUP_IS_SCOPED (LOOKUP_BENEATH | LOOKUP_IN_ROOT)
+/* 3 spare bits for scoping */
extern int path_pts(struct path *path);
-extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty);
+extern int user_path_at(int, const char __user *, unsigned, struct path *);
-static inline int user_path_at(int dfd, const char __user *name, unsigned flags,
- struct path *path)
+struct dentry *lookup_one_qstr_excl(const struct qstr *name,
+ struct dentry *base,
+ unsigned int flags);
+extern int kern_path(const char *, unsigned, struct path *);
+struct dentry *kern_path_parent(const char *name, struct path *parent);
+
+extern struct dentry *start_creating_path(int, const char *, struct path *, unsigned int);
+extern struct dentry *start_creating_user_path(int, const char __user *, struct path *, unsigned int);
+extern void end_creating_path(const struct path *, struct dentry *);
+extern struct dentry *start_removing_path(const char *, struct path *);
+extern struct dentry *start_removing_user_path_at(int , const char __user *, struct path *);
+static inline void end_removing_path(const struct path *path , struct dentry *dentry)
{
- return user_path_at_empty(dfd, name, flags, path, NULL);
+ end_creating_path(path, dentry);
+}
+int vfs_path_parent_lookup(struct filename *filename, unsigned int flags,
+ struct path *parent, struct qstr *last, int *type,
+ const struct path *root);
+int vfs_path_lookup(struct dentry *, struct vfsmount *, const char *,
+ unsigned int, struct path *);
+
+extern struct dentry *try_lookup_noperm(struct qstr *, struct dentry *);
+extern struct dentry *lookup_noperm(struct qstr *, struct dentry *);
+extern struct dentry *lookup_noperm_unlocked(struct qstr *, struct dentry *);
+extern struct dentry *lookup_noperm_positive_unlocked(struct qstr *, struct dentry *);
+struct dentry *lookup_one(struct mnt_idmap *, struct qstr *, struct dentry *);
+struct dentry *lookup_one_unlocked(struct mnt_idmap *idmap,
+ struct qstr *name, struct dentry *base);
+struct dentry *lookup_one_positive_unlocked(struct mnt_idmap *idmap,
+ struct qstr *name,
+ struct dentry *base);
+struct dentry *lookup_one_positive_killable(struct mnt_idmap *idmap,
+ struct qstr *name,
+ struct dentry *base);
+
+struct dentry *start_creating(struct mnt_idmap *idmap, struct dentry *parent,
+ struct qstr *name);
+struct dentry *start_removing(struct mnt_idmap *idmap, struct dentry *parent,
+ struct qstr *name);
+struct dentry *start_creating_killable(struct mnt_idmap *idmap,
+ struct dentry *parent,
+ struct qstr *name);
+struct dentry *start_removing_killable(struct mnt_idmap *idmap,
+ struct dentry *parent,
+ struct qstr *name);
+struct dentry *start_creating_noperm(struct dentry *parent, struct qstr *name);
+struct dentry *start_removing_noperm(struct dentry *parent, struct qstr *name);
+struct dentry *start_creating_dentry(struct dentry *parent,
+ struct dentry *child);
+struct dentry *start_removing_dentry(struct dentry *parent,
+ struct dentry *child);
+
+/* end_creating - finish action started with start_creating
+ * @child: dentry returned by start_creating() or vfs_mkdir()
+ *
+ * Unlock and release the child. This can be called after
+ * start_creating() whether that function succeeded or not,
+ * but it is not needed on failure.
+ *
+ * If vfs_mkdir() was called then the value returned from that function
+ * should be given for @child rather than the original dentry, as vfs_mkdir()
+ * may have provided a new dentry.
+ *
+ *
+ * If vfs_mkdir() was not called, then @child will be a valid dentry and
+ * @parent will be ignored.
+ */
+static inline void end_creating(struct dentry *child)
+{
+ end_dirop(child);
}
-extern int kern_path(const char *, unsigned, struct path *);
-
-extern struct dentry *kern_path_create(int, const char *, struct path *, unsigned int);
-extern struct dentry *user_path_create(int, const char __user *, struct path *, unsigned int);
-extern void done_path_create(struct path *, struct dentry *);
-extern struct dentry *kern_path_locked(const char *, struct path *);
+/* end_creating_keep - finish action started with start_creating() and return result
+ * @child: dentry returned by start_creating() or vfs_mkdir()
+ *
+ * Unlock and return the child. This can be called after
+ * start_creating() whether that function succeeded or not,
+ * but it is not needed on failure.
+ *
+ * If vfs_mkdir() was called then the value returned from that function
+ * should be given for @child rather than the original dentry, as vfs_mkdir()
+ * may have provided a new dentry.
+ *
+ * Returns: @child, which may be a dentry or an error.
+ *
+ */
+static inline struct dentry *end_creating_keep(struct dentry *child)
+{
+ if (!IS_ERR(child))
+ dget(child);
+ end_dirop(child);
+ return child;
+}
-extern struct dentry *try_lookup_one_len(const char *, struct dentry *, int);
-extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
-extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int);
-extern struct dentry *lookup_positive_unlocked(const char *, struct dentry *, int);
+/**
+ * end_removing - finish action started with start_removing
+ * @child: dentry returned by start_removing()
+ * @parent: dentry given to start_removing()
+ *
+ * Unlock and release the child.
+ *
+ * This is identical to end_dirop(). It can be passed the result of
+ * start_removing() whether that was successful or not, but it not needed
+ * if start_removing() failed.
+ */
+static inline void end_removing(struct dentry *child)
+{
+ end_dirop(child);
+}
extern int follow_down_one(struct path *);
-extern int follow_down(struct path *);
+extern int follow_down(struct path *path, unsigned int flags);
extern int follow_up(struct path *);
extern struct dentry *lock_rename(struct dentry *, struct dentry *);
+extern struct dentry *lock_rename_child(struct dentry *, struct dentry *);
extern void unlock_rename(struct dentry *, struct dentry *);
+int start_renaming(struct renamedata *rd, int lookup_flags,
+ struct qstr *old_last, struct qstr *new_last);
+int start_renaming_dentry(struct renamedata *rd, int lookup_flags,
+ struct dentry *old_dentry, struct qstr *new_last);
+int start_renaming_two_dentries(struct renamedata *rd,
+ struct dentry *old_dentry, struct dentry *new_dentry);
+void end_renaming(struct renamedata *rd);
+
+/**
+ * mode_strip_umask - handle vfs umask stripping
+ * @dir: parent directory of the new inode
+ * @mode: mode of the new inode to be created in @dir
+ *
+ * In most filesystems, umask stripping depends on whether or not the
+ * filesystem supports POSIX ACLs. If the filesystem doesn't support it umask
+ * stripping is done directly in here. If the filesystem does support POSIX
+ * ACLs umask stripping is deferred until the filesystem calls
+ * posix_acl_create().
+ *
+ * Some filesystems (like NFSv4) also want to avoid umask stripping by the
+ * VFS, but don't support POSIX ACLs. Those filesystems can set SB_I_NOUMASK
+ * to get this effect without declaring that they support POSIX ACLs.
+ *
+ * Returns: mode
+ */
+static inline umode_t __must_check mode_strip_umask(const struct inode *dir, umode_t mode)
+{
+ if (!IS_POSIXACL(dir) && !(dir->i_sb->s_iflags & SB_I_NOUMASK))
+ mode &= ~current_umask();
+ return mode;
+}
-extern int __must_check nd_jump_link(struct path *path);
+extern int __must_check nd_jump_link(const struct path *path);
static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
{
@@ -99,7 +223,7 @@ static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
static inline bool
retry_estale(const long error, const unsigned int flags)
{
- return error == -ESTALE && !(flags & LOOKUP_REVAL);
+ return unlikely(error == -ESTALE && !(flags & LOOKUP_REVAL));
}
#endif /* _LINUX_NAMEI_H */
diff --git a/include/linux/nd.h b/include/linux/nd.h
index ee9ad76afbba..fa099e295f78 100644
--- a/include/linux/nd.h
+++ b/include/linux/nd.h
@@ -8,6 +8,7 @@
#include <linux/ndctl.h>
#include <linux/device.h>
#include <linux/badblocks.h>
+#include <linux/perf_event.h>
enum nvdimm_event {
NVDIMM_REVALIDATE_POISON,
@@ -23,6 +24,57 @@ enum nvdimm_claim_class {
NVDIMM_CCLASS_UNKNOWN,
};
+#define NVDIMM_EVENT_VAR(_id) event_attr_##_id
+#define NVDIMM_EVENT_PTR(_id) (&event_attr_##_id.attr.attr)
+
+#define NVDIMM_EVENT_ATTR(_name, _id) \
+ PMU_EVENT_ATTR(_name, NVDIMM_EVENT_VAR(_id), _id, \
+ nvdimm_events_sysfs_show)
+
+/* Event attribute array index */
+#define NVDIMM_PMU_FORMAT_ATTR 0
+#define NVDIMM_PMU_EVENT_ATTR 1
+#define NVDIMM_PMU_CPUMASK_ATTR 2
+#define NVDIMM_PMU_NULL_ATTR 3
+
+/**
+ * struct nvdimm_pmu - data structure for nvdimm perf driver
+ * @pmu: pmu data structure for nvdimm performance stats.
+ * @dev: nvdimm device pointer.
+ * @cpu: designated cpu for counter access.
+ * @node: node for cpu hotplug notifier link.
+ * @cpuhp_state: state for cpu hotplug notification.
+ * @arch_cpumask: cpumask to get designated cpu for counter access.
+ */
+struct nvdimm_pmu {
+ struct pmu pmu;
+ struct device *dev;
+ int cpu;
+ struct hlist_node node;
+ enum cpuhp_state cpuhp_state;
+ /* cpumask provided by arch/platform specific code */
+ struct cpumask arch_cpumask;
+};
+
+struct platform_device;
+
+#ifdef CONFIG_PERF_EVENTS
+extern ssize_t nvdimm_events_sysfs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page);
+
+int register_nvdimm_pmu(struct nvdimm_pmu *nvdimm, struct platform_device *pdev);
+void unregister_nvdimm_pmu(struct nvdimm_pmu *nd_pmu);
+
+#else
+static inline int register_nvdimm_pmu(struct nvdimm_pmu *nvdimm, struct platform_device *pdev)
+{
+ return -ENXIO;
+}
+
+static inline void unregister_nvdimm_pmu(struct nvdimm_pmu *nd_pmu) { }
+#endif
+
struct nd_device_driver {
struct device_driver drv;
unsigned long type;
@@ -32,11 +84,7 @@ struct nd_device_driver {
void (*notify)(struct device *dev, enum nvdimm_event event);
};
-static inline struct nd_device_driver *to_nd_device_driver(
- struct device_driver *drv)
-{
- return container_of(drv, struct nd_device_driver, drv);
-};
+#define to_nd_device_driver(__drv) container_of_const(__drv, struct nd_device_driver, drv)
/**
* struct nd_namespace_common - core infrastructure of a namespace
@@ -88,31 +136,10 @@ struct nd_namespace_pmem {
struct nd_namespace_io nsio;
unsigned long lbasize;
char *alt_name;
- u8 *uuid;
+ uuid_t *uuid;
int id;
};
-/**
- * struct nd_namespace_blk - namespace for dimm-bounded persistent memory
- * @alt_name: namespace name supplied in the dimm label
- * @uuid: namespace name supplied in the dimm label
- * @id: ida allocated id
- * @lbasize: blk namespaces have a native sector size when btt not present
- * @size: sum of all the resource ranges allocated to this namespace
- * @num_resources: number of dpa extents to claim
- * @res: discontiguous dpa extents for given dimm
- */
-struct nd_namespace_blk {
- struct nd_namespace_common common;
- char *alt_name;
- u8 *uuid;
- int id;
- unsigned long lbasize;
- resource_size_t size;
- int num_resources;
- struct resource **res;
-};
-
static inline struct nd_namespace_io *to_nd_namespace_io(const struct device *dev)
{
return container_of(dev, struct nd_namespace_io, common.dev);
@@ -125,11 +152,6 @@ static inline struct nd_namespace_pmem *to_nd_namespace_pmem(const struct device
return container_of(nsio, struct nd_namespace_pmem, nsio);
}
-static inline struct nd_namespace_blk *to_nd_namespace_blk(const struct device *dev)
-{
- return container_of(dev, struct nd_namespace_blk, common.dev);
-}
-
/**
* nvdimm_read_bytes() - synchronously read bytes from an nvdimm namespace
* @ndns: device to read
diff --git a/include/linux/net.h b/include/linux/net.h
index ba736b457a06..f58b38ab37f8 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -36,11 +36,13 @@ struct net;
* in sock->flags, but moved into sk->sk_wq->flags to be RCU protected.
* Eventually all flags will be in sk->sk_wq->flags.
*/
-#define SOCKWQ_ASYNC_NOSPACE 0
-#define SOCKWQ_ASYNC_WAITDATA 1
-#define SOCK_NOSPACE 2
-#define SOCK_PASSCRED 3
-#define SOCK_PASSSEC 4
+enum socket_flags {
+ SOCKWQ_ASYNC_NOSPACE,
+ SOCKWQ_ASYNC_WAITDATA,
+ SOCK_NOSPACE,
+ SOCK_SUPPORT_ZC,
+ SOCK_CUSTOM_SOCKOPT,
+};
#ifndef ARCH_HAS_SOCKET_TYPES
/**
@@ -67,6 +69,7 @@ enum sock_type {
SOCK_DCCP = 6,
SOCK_PACKET = 10,
};
+#endif /* ARCH_HAS_SOCKET_TYPES */
#define SOCK_MAX (SOCK_PACKET + 1)
/* Mask which covers at least up to SOCK_MASK-1. The
@@ -78,8 +81,7 @@ enum sock_type {
#ifndef SOCK_NONBLOCK
#define SOCK_NONBLOCK O_NONBLOCK
#endif
-
-#endif /* ARCH_HAS_SOCKET_TYPES */
+#define SOCK_COREDUMP O_NOCTTY
/**
* enum sock_shutdown_cmd - Shutdown types
@@ -120,34 +122,56 @@ struct socket {
struct file *file;
struct sock *sk;
- const struct proto_ops *ops;
+ const struct proto_ops *ops; /* Might change with IPV6_ADDRFORM or MPTCP. */
struct socket_wq wq;
};
+/*
+ * "descriptor" for what we're up to with a read.
+ * This allows us to use the same read code yet
+ * have multiple different users of the data that
+ * we read from a file.
+ *
+ * The simplest case just copies the data to user
+ * mode.
+ */
+typedef struct {
+ size_t written;
+ size_t count;
+ union {
+ char __user *buf;
+ void *data;
+ } arg;
+ int error;
+} read_descriptor_t;
+
struct vm_area_struct;
struct page;
-struct sockaddr;
struct msghdr;
struct module;
struct sk_buff;
+struct proto_accept_arg;
typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
unsigned int, size_t);
+typedef int (*skb_read_actor_t)(struct sock *, struct sk_buff *);
+
struct proto_ops {
int family;
struct module *owner;
int (*release) (struct socket *sock);
int (*bind) (struct socket *sock,
- struct sockaddr *myaddr,
+ struct sockaddr_unsized *myaddr,
int sockaddr_len);
int (*connect) (struct socket *sock,
- struct sockaddr *vaddr,
+ struct sockaddr_unsized *vaddr,
int sockaddr_len, int flags);
int (*socketpair)(struct socket *sock1,
struct socket *sock2);
int (*accept) (struct socket *sock,
- struct socket *newsock, int flags, bool kern);
+ struct socket *newsock,
+ struct proto_accept_arg *arg);
int (*getname) (struct socket *sock,
struct sockaddr *addr,
int peer);
@@ -183,10 +207,9 @@ struct proto_ops {
size_t total_len, int flags);
int (*mmap) (struct file *file, struct socket *sock,
struct vm_area_struct * vma);
- ssize_t (*sendpage) (struct socket *sock, struct page *page,
- int offset, size_t size, int flags);
ssize_t (*splice_read)(struct socket *sock, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len, unsigned int flags);
+ void (*splice_eof)(struct socket *sock);
int (*set_peek_off)(struct sock *sk, int val);
int (*peek_len)(struct socket *sock);
@@ -195,8 +218,8 @@ struct proto_ops {
*/
int (*read_sock)(struct sock *sk, read_descriptor_t *desc,
sk_read_actor_t recv_actor);
- int (*sendpage_locked)(struct sock *sk, struct page *page,
- int offset, size_t size, int flags);
+ /* This is different from read_sock(), it reads an entire skb at a time. */
+ int (*read_skb)(struct sock *sk, skb_read_actor_t recv_actor);
int (*sendmsg_locked)(struct sock *sk, struct msghdr *msg,
size_t size);
int (*set_rcvlowat)(struct sock *sk, int val);
@@ -276,16 +299,11 @@ do { \
net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
#else
#define net_dbg_ratelimited(fmt, ...) \
- do { \
- if (0) \
- no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
- } while (0)
+ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#endif
#define net_get_random_once(buf, nbytes) \
get_random_once((buf), (nbytes))
-#define net_get_random_once_wait(buf, nbytes) \
- get_random_once_wait((buf), (nbytes))
/*
* E.g. XFS meta- & log-data is in slab pages, or bcache meta
@@ -302,24 +320,37 @@ static inline bool sendpage_ok(struct page *page)
return !PageSlab(page) && page_count(page) >= 1;
}
+/*
+ * Check sendpage_ok on contiguous pages.
+ */
+static inline bool sendpages_ok(struct page *page, size_t len, size_t offset)
+{
+ struct page *p = page + (offset >> PAGE_SHIFT);
+ size_t count = 0;
+
+ while (count < len) {
+ if (!sendpage_ok(p))
+ return false;
+
+ p++;
+ count += PAGE_SIZE;
+ }
+
+ return true;
+}
+
int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
size_t num, size_t len);
-int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg,
- struct kvec *vec, size_t num, size_t len);
int kernel_recvmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
size_t num, size_t len, int flags);
-int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen);
+int kernel_bind(struct socket *sock, struct sockaddr_unsized *addr, int addrlen);
int kernel_listen(struct socket *sock, int backlog);
int kernel_accept(struct socket *sock, struct socket **newsock, int flags);
-int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
+int kernel_connect(struct socket *sock, struct sockaddr_unsized *addr, int addrlen,
int flags);
int kernel_getsockname(struct socket *sock, struct sockaddr *addr);
int kernel_getpeername(struct socket *sock, struct sockaddr *addr);
-int kernel_sendpage(struct socket *sock, struct page *page, int offset,
- size_t size, int flags);
-int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
- size_t size, int flags);
int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how);
/* Routine returns the IP overhead imposed by a (caller-protected) socket. */
diff --git a/include/linux/net/intel/i40e_client.h b/include/linux/net/intel/i40e_client.h
index f41387a8969f..0aa4411528fc 100644
--- a/include/linux/net/intel/i40e_client.h
+++ b/include/linux/net/intel/i40e_client.h
@@ -4,6 +4,8 @@
#ifndef _I40E_CLIENT_H_
#define _I40E_CLIENT_H_
+#include <linux/auxiliary_bus.h>
+
#define I40E_CLIENT_STR_LENGTH 10
/* Client interface version should be updated anytime there is a change in the
@@ -24,11 +26,6 @@ struct i40e_client_version {
u8 rsvd;
};
-enum i40e_client_state {
- __I40E_CLIENT_NULL,
- __I40E_CLIENT_REGISTERED
-};
-
enum i40e_client_instance_state {
__I40E_CLIENT_INSTANCE_NONE,
__I40E_CLIENT_INSTANCE_OPENED,
@@ -48,7 +45,7 @@ struct i40e_qv_info {
struct i40e_qvlist_info {
u32 num_vectors;
- struct i40e_qv_info qv_info[1];
+ struct i40e_qv_info qv_info[] __counted_by(num_vectors);
};
@@ -78,6 +75,7 @@ struct i40e_info {
u8 lanmac[6];
struct net_device *netdev;
struct pci_dev *pcidev;
+ struct auxiliary_device *aux_dev;
u8 __iomem *hw_addr;
u8 fid; /* function id, PF id or VF id */
#define I40E_CLIENT_FTYPE_PF 0
@@ -100,6 +98,11 @@ struct i40e_info {
u32 fw_build; /* firmware build number */
};
+struct i40e_auxiliary_device {
+ struct auxiliary_device aux_dev;
+ struct i40e_info *ldev;
+};
+
#define I40E_CLIENT_RESET_LEVEL_PF 1
#define I40E_CLIENT_RESET_LEVEL_CORE 2
#define I40E_CLIENT_VSI_FLAG_TCP_ENABLE BIT(1)
@@ -182,13 +185,7 @@ struct i40e_client {
const struct i40e_client_ops *ops; /* client ops provided by the client */
};
-static inline bool i40e_client_is_registered(struct i40e_client *client)
-{
- return test_bit(__I40E_CLIENT_REGISTERED, &client->state);
-}
-
-/* used by clients */
-int i40e_register_client(struct i40e_client *client);
-int i40e_unregister_client(struct i40e_client *client);
+void i40e_client_device_register(struct i40e_info *ldev, struct i40e_client *client);
+void i40e_client_device_unregister(struct i40e_info *ldev);
#endif /* _I40E_CLIENT_H_ */
diff --git a/include/linux/net/intel/iidc_rdma.h b/include/linux/net/intel/iidc_rdma.h
new file mode 100644
index 000000000000..8baad1082042
--- /dev/null
+++ b/include/linux/net/intel/iidc_rdma.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021-2025, Intel Corporation. */
+
+#ifndef _IIDC_RDMA_H_
+#define _IIDC_RDMA_H_
+
+#include <linux/auxiliary_bus.h>
+#include <linux/device.h>
+#include <linux/if_ether.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <net/dscp.h>
+
+enum iidc_rdma_event_type {
+ IIDC_RDMA_EVENT_BEFORE_MTU_CHANGE,
+ IIDC_RDMA_EVENT_AFTER_MTU_CHANGE,
+ IIDC_RDMA_EVENT_BEFORE_TC_CHANGE,
+ IIDC_RDMA_EVENT_AFTER_TC_CHANGE,
+ IIDC_RDMA_EVENT_WARN_RESET,
+ IIDC_RDMA_EVENT_CRIT_ERR,
+ IIDC_RDMA_EVENT_NBITS /* must be last */
+};
+
+struct iidc_rdma_event {
+ DECLARE_BITMAP(type, IIDC_RDMA_EVENT_NBITS);
+ u32 reg;
+};
+
+enum iidc_rdma_reset_type {
+ IIDC_FUNC_RESET,
+ IIDC_DEV_RESET,
+};
+
+enum iidc_rdma_protocol {
+ IIDC_RDMA_PROTOCOL_IWARP = BIT(0),
+ IIDC_RDMA_PROTOCOL_ROCEV2 = BIT(1),
+};
+
+/* Structure to be populated by core LAN PCI driver */
+struct iidc_rdma_core_dev_info {
+ struct pci_dev *pdev; /* PCI device of corresponding to main function */
+ struct auxiliary_device *adev;
+ /* Current active RDMA protocol */
+ enum iidc_rdma_protocol rdma_protocol;
+ void *iidc_priv; /* elements unique to each driver */
+};
+
+/* Structure representing auxiliary driver tailored information about the core
+ * PCI dev, each auxiliary driver using the IIDC interface will have an
+ * instance of this struct dedicated to it.
+ */
+struct iidc_rdma_core_auxiliary_dev {
+ struct auxiliary_device adev;
+ struct iidc_rdma_core_dev_info *cdev_info;
+};
+
+/* structure representing the auxiliary driver. This struct is to be
+ * allocated and populated by the auxiliary driver's owner. The core PCI
+ * driver will access these ops by performing a container_of on the
+ * auxiliary_device->dev.driver.
+ */
+struct iidc_rdma_core_auxiliary_drv {
+ struct auxiliary_driver adrv;
+ void (*event_handler)(struct iidc_rdma_core_dev_info *cdev,
+ struct iidc_rdma_event *event);
+};
+
+#endif /* _IIDC_RDMA_H_*/
diff --git a/include/linux/net/intel/iidc_rdma_ice.h b/include/linux/net/intel/iidc_rdma_ice.h
new file mode 100644
index 000000000000..b40eed0e13fe
--- /dev/null
+++ b/include/linux/net/intel/iidc_rdma_ice.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021-2025, Intel Corporation. */
+
+#ifndef _IIDC_RDMA_ICE_H_
+#define _IIDC_RDMA_ICE_H_
+
+#include <linux/dcbnl.h>
+
+#define IIDC_MAX_USER_PRIORITY 8
+#define IIDC_DSCP_PFC_MODE 0x1
+
+/**
+ * struct iidc_rdma_qset_params - Struct to hold per RDMA Qset info
+ * @teid: TEID of the Qset node
+ * @qs_handle: SW index of the Qset, RDMA provides this
+ * @vport_id: VSI index
+ * @tc: Traffic Class branch the QSet should belong to
+ */
+struct iidc_rdma_qset_params {
+ /* Qset TEID returned to the RDMA driver in
+ * ice_add_rdma_qset and used by RDMA driver
+ * for calls to ice_del_rdma_qset
+ */
+ u32 teid;
+ u16 qs_handle;
+ u16 vport_id;
+ u8 tc;
+};
+
+struct iidc_rdma_qos_info {
+ u64 tc_ctx;
+ u8 rel_bw;
+ u8 prio_type;
+ u8 egress_virt_up;
+ u8 ingress_virt_up;
+};
+
+/* Struct to pass QoS info */
+struct iidc_rdma_qos_params {
+ struct iidc_rdma_qos_info tc_info[IEEE_8021QAZ_MAX_TCS];
+ u8 up2tc[IIDC_MAX_USER_PRIORITY];
+ u8 vport_relative_bw;
+ u8 vport_priority_type;
+ u8 num_tc;
+ u8 pfc_mode;
+ u8 dscp_map[DSCP_MAX];
+};
+
+struct iidc_rdma_priv_dev_info {
+ u8 pf_id;
+ u16 vport_id;
+ struct net_device *netdev;
+ struct iidc_rdma_qos_params qos_info;
+ u8 __iomem *hw_addr;
+};
+
+int ice_add_rdma_qset(struct iidc_rdma_core_dev_info *cdev,
+ struct iidc_rdma_qset_params *qset);
+int ice_del_rdma_qset(struct iidc_rdma_core_dev_info *cdev,
+ struct iidc_rdma_qset_params *qset);
+int ice_rdma_request_reset(struct iidc_rdma_core_dev_info *cdev,
+ enum iidc_rdma_reset_type reset_type);
+int ice_rdma_update_vsi_filter(struct iidc_rdma_core_dev_info *cdev, u16 vsi_id,
+ bool enable);
+int ice_alloc_rdma_qvector(struct iidc_rdma_core_dev_info *cdev,
+ struct msix_entry *entry);
+void ice_free_rdma_qvector(struct iidc_rdma_core_dev_info *cdev,
+ struct msix_entry *entry);
+
+#endif /* _IIDC_RDMA_ICE_H_*/
diff --git a/include/linux/net/intel/iidc_rdma_idpf.h b/include/linux/net/intel/iidc_rdma_idpf.h
new file mode 100644
index 000000000000..bab697e18fd6
--- /dev/null
+++ b/include/linux/net/intel/iidc_rdma_idpf.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2025 Intel Corporation. */
+
+#ifndef _IIDC_RDMA_IDPF_H_
+#define _IIDC_RDMA_IDPF_H_
+
+#include <linux/auxiliary_bus.h>
+
+/* struct to be populated by core LAN PCI driver */
+struct iidc_rdma_vport_dev_info {
+ struct auxiliary_device *adev;
+ struct auxiliary_device *core_adev;
+ struct net_device *netdev;
+ u16 vport_id;
+};
+
+struct iidc_rdma_vport_auxiliary_dev {
+ struct auxiliary_device adev;
+ struct iidc_rdma_vport_dev_info *vdev_info;
+};
+
+struct iidc_rdma_vport_auxiliary_drv {
+ struct auxiliary_driver adrv;
+ void (*event_handler)(struct iidc_rdma_vport_dev_info *vdev,
+ struct iidc_rdma_event *event);
+};
+
+/* struct to be populated by core LAN PCI driver */
+enum iidc_function_type {
+ IIDC_FUNCTION_TYPE_PF,
+ IIDC_FUNCTION_TYPE_VF,
+};
+
+struct iidc_rdma_lan_mapped_mem_region {
+ u8 __iomem *region_addr;
+ __le64 size;
+ __le64 start_offset;
+};
+
+struct iidc_rdma_priv_dev_info {
+ struct msix_entry *msix_entries;
+ u16 msix_count; /* How many vectors are reserved for this device */
+ enum iidc_function_type ftype;
+ __le16 num_memory_regions;
+ struct iidc_rdma_lan_mapped_mem_region *mapped_mem_regions;
+};
+
+int idpf_idc_vport_dev_ctrl(struct iidc_rdma_core_dev_info *cdev_info, bool up);
+int idpf_idc_request_reset(struct iidc_rdma_core_dev_info *cdev_info,
+ enum iidc_rdma_reset_type __always_unused reset_type);
+int idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info *cdev_info,
+ u8 *send_msg, u16 msg_size,
+ u8 *recv_msg, u16 *recv_len);
+
+#endif /* _IIDC_RDMA_IDPF_H_ */
diff --git a/include/linux/net/intel/libie/adminq.h b/include/linux/net/intel/libie/adminq.h
new file mode 100644
index 000000000000..ab13bd777a28
--- /dev/null
+++ b/include/linux/net/intel/libie/adminq.h
@@ -0,0 +1,399 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2025 Intel Corporation */
+
+#ifndef __LIBIE_ADMINQ_H
+#define __LIBIE_ADMINQ_H
+
+#include <linux/build_bug.h>
+#include <linux/types.h>
+
+#define LIBIE_CHECK_STRUCT_LEN(n, X) \
+ static_assert((n) == sizeof(struct X))
+#define LIBIE_AQ_MAX_BUF_LEN 4096
+
+/**
+ * struct libie_aqc_generic - Generic structure used in adminq communication
+ * @param0: generic parameter high 32bit
+ * @param1: generic parameter lower 32bit
+ * @addr_high: generic address high 32bit
+ * @addr_low: generic address lower 32bit
+ */
+struct libie_aqc_generic {
+ __le32 param0;
+ __le32 param1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+LIBIE_CHECK_STRUCT_LEN(16, libie_aqc_generic);
+
+/**
+ * struct libie_aqc_get_ver - Used in command get version (direct 0x0001)
+ * @rom_ver: rom version
+ * @fw_build: number coressponding to firmware build
+ * @fw_branch: branch identifier of firmware version
+ * @fw_major: major number of firmware version
+ * @fw_minor: minor number of firmware version
+ * @fw_patch: patch of firmware version
+ * @api_branch: brancch identifier of API version
+ * @api_major: major number of API version
+ * @api_minor: minor number of API version
+ * @api_patch: patch of API version
+ */
+struct libie_aqc_get_ver {
+ __le32 rom_ver;
+ __le32 fw_build;
+ u8 fw_branch;
+ u8 fw_major;
+ u8 fw_minor;
+ u8 fw_patch;
+ u8 api_branch;
+ u8 api_major;
+ u8 api_minor;
+ u8 api_patch;
+};
+LIBIE_CHECK_STRUCT_LEN(16, libie_aqc_get_ver);
+
+/**
+ * struct libie_aqc_driver_ver - Used in command send driver version
+ * (indirect 0x0002)
+ * @major_ver: driver major version
+ * @minor_ver: driver minor version
+ * @build_ver: driver build version
+ * @subbuild_ver: driver subbuild version
+ * @reserved: for feature use
+ * @addr_high: high part of response address buff
+ * @addr_low: low part of response address buff
+ */
+struct libie_aqc_driver_ver {
+ u8 major_ver;
+ u8 minor_ver;
+ u8 build_ver;
+ u8 subbuild_ver;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+LIBIE_CHECK_STRUCT_LEN(16, libie_aqc_driver_ver);
+
+enum libie_aq_res_id {
+ LIBIE_AQC_RES_ID_NVM = 1,
+ LIBIE_AQC_RES_ID_SDP = 2,
+ LIBIE_AQC_RES_ID_CHNG_LOCK = 3,
+ LIBIE_AQC_RES_ID_GLBL_LOCK = 4,
+};
+
+enum libie_aq_res_access_type {
+ LIBIE_AQC_RES_ACCESS_READ = 1,
+ LIBIE_AQC_RES_ACCESS_WRITE = 2,
+};
+
+#define LIBIE_AQ_RES_NVM_READ_DFLT_TIMEOUT_MS 3000
+#define LIBIE_AQ_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000
+#define LIBIE_AQ_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000
+#define LIBIE_AQ_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000
+
+#define LIBIE_AQ_RES_GLBL_SUCCESS 0
+#define LIBIE_AQ_RES_GLBL_IN_PROG 1
+#define LIBIE_AQ_RES_GLBL_DONE 2
+
+/**
+ * struct libie_aqc_req_res - Request resource ownership
+ * @res_id: resource ID (look at enum definition above)
+ * @access_type: read or write (enum definition above)
+ * @timeout: Upon successful completion, FW writes this value and driver is
+ * expected to release resource before timeout. This value is provided in
+ * milliseconds.
+ * @res_number: for SDP, this is the pin ID of the SDP
+ * @status: status only used for LIBIE_AQC_RES_ID_GLBL_LOCK, for others reserved
+ * @reserved: reserved for future use
+ *
+ * Used in commands:
+ * request resource ownership (direct 0x0008)
+ * request resource ownership (direct 0x0009)
+ */
+struct libie_aqc_req_res {
+ __le16 res_id;
+ __le16 access_type;
+
+ __le32 timeout;
+ __le32 res_number;
+ __le16 status;
+ u8 reserved[2];
+};
+LIBIE_CHECK_STRUCT_LEN(16, libie_aqc_req_res);
+
+/**
+ * struct libie_aqc_list_caps - Getting capabilities
+ * @cmd_flags: command flags
+ * @pf_index: index of PF to get caps from
+ * @reserved: reserved for future use
+ * @count: number of capabilities records
+ * @addr_high: high part of response address buff
+ * @addr_low: low part of response address buff
+ *
+ * Used in commands:
+ * get function capabilities (indirect 0x000A)
+ * get device capabilities (indirect 0x000B)
+ */
+struct libie_aqc_list_caps {
+ u8 cmd_flags;
+ u8 pf_index;
+ u8 reserved[2];
+ __le32 count;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+LIBIE_CHECK_STRUCT_LEN(16, libie_aqc_list_caps);
+
+/* Device/Function buffer entry, repeated per reported capability */
+#define LIBIE_AQC_CAPS_SWITCH_MODE 0x0001
+#define LIBIE_AQC_CAPS_MNG_MODE 0x0002
+#define LIBIE_AQC_CAPS_NPAR_ACTIVE 0x0003
+#define LIBIE_AQC_CAPS_OS2BMC_CAP 0x0004
+#define LIBIE_AQC_CAPS_VALID_FUNCTIONS 0x0005
+#define LIBIE_AQC_MAX_VALID_FUNCTIONS 0x8
+#define LIBIE_AQC_CAPS_SRIOV 0x0012
+#define LIBIE_AQC_CAPS_VF 0x0013
+#define LIBIE_AQC_CAPS_VMDQ 0x0014
+#define LIBIE_AQC_CAPS_8021QBG 0x0015
+#define LIBIE_AQC_CAPS_8021QBR 0x0016
+#define LIBIE_AQC_CAPS_VSI 0x0017
+#define LIBIE_AQC_CAPS_DCB 0x0018
+#define LIBIE_AQC_CAPS_FCOE 0x0021
+#define LIBIE_AQC_CAPS_ISCSI 0x0022
+#define LIBIE_AQC_CAPS_RSS 0x0040
+#define LIBIE_AQC_CAPS_RXQS 0x0041
+#define LIBIE_AQC_CAPS_TXQS 0x0042
+#define LIBIE_AQC_CAPS_MSIX 0x0043
+#define LIBIE_AQC_CAPS_VF_MSIX 0x0044
+#define LIBIE_AQC_CAPS_FD 0x0045
+#define LIBIE_AQC_CAPS_1588 0x0046
+#define LIBIE_AQC_CAPS_MAX_MTU 0x0047
+#define LIBIE_AQC_CAPS_NVM_VER 0x0048
+#define LIBIE_AQC_CAPS_PENDING_NVM_VER 0x0049
+#define LIBIE_AQC_CAPS_OROM_VER 0x004A
+#define LIBIE_AQC_CAPS_PENDING_OROM_VER 0x004B
+#define LIBIE_AQC_CAPS_NET_VER 0x004C
+#define LIBIE_AQC_CAPS_PENDING_NET_VER 0x004D
+#define LIBIE_AQC_CAPS_RDMA 0x0051
+#define LIBIE_AQC_CAPS_LED 0x0061
+#define LIBIE_AQC_CAPS_SDP 0x0062
+#define LIBIE_AQC_CAPS_MDIO 0x0063
+#define LIBIE_AQC_CAPS_WSR_PROT 0x0064
+#define LIBIE_AQC_CAPS_SENSOR_READING 0x0067
+#define LIBIE_AQC_INLINE_IPSEC 0x0070
+#define LIBIE_AQC_CAPS_NUM_ENABLED_PORTS 0x0072
+#define LIBIE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076
+#define LIBIE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
+#define LIBIE_AQC_CAPS_NVM_MGMT 0x0080
+#define LIBIE_AQC_CAPS_EXT_TOPO_DEV_IMG0 0x0081
+#define LIBIE_AQC_CAPS_EXT_TOPO_DEV_IMG1 0x0082
+#define LIBIE_AQC_CAPS_EXT_TOPO_DEV_IMG2 0x0083
+#define LIBIE_AQC_CAPS_EXT_TOPO_DEV_IMG3 0x0084
+#define LIBIE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE 0x0085
+#define LIBIE_AQC_CAPS_NAC_TOPOLOGY 0x0087
+#define LIBIE_AQC_CAPS_FW_LAG_SUPPORT 0x0092
+#define LIBIE_AQC_BIT_ROCEV2_LAG BIT(0)
+#define LIBIE_AQC_BIT_SRIOV_LAG BIT(1)
+#define LIBIE_AQC_BIT_SRIOV_AA_LAG BIT(2)
+#define LIBIE_AQC_CAPS_FLEX10 0x00F1
+#define LIBIE_AQC_CAPS_CEM 0x00F2
+
+/**
+ * struct libie_aqc_list_caps_elem - Getting list of caps elements
+ * @cap: one from the defines list above
+ * @major_ver: major version
+ * @minor_ver: minor version
+ * @number: number of resources described by this capability
+ * @logical_id: logical ID, only meaningful for some types of resources
+ * @phys_id: physical ID, only meaningful for some types of resources
+ * @rsvd1: reserved for future use
+ * @rsvd2: reserved for future use
+ */
+struct libie_aqc_list_caps_elem {
+ __le16 cap;
+
+ u8 major_ver;
+ u8 minor_ver;
+ __le32 number;
+ __le32 logical_id;
+ __le32 phys_id;
+ __le64 rsvd1;
+ __le64 rsvd2;
+};
+LIBIE_CHECK_STRUCT_LEN(32, libie_aqc_list_caps_elem);
+
+/* Admin Queue command opcodes */
+enum libie_adminq_opc {
+ /* FW Logging Commands */
+ libie_aqc_opc_fw_logs_config = 0xFF30,
+ libie_aqc_opc_fw_logs_register = 0xFF31,
+ libie_aqc_opc_fw_logs_query = 0xFF32,
+ libie_aqc_opc_fw_logs_event = 0xFF33,
+};
+
+enum libie_aqc_fw_logging_mod {
+ LIBIE_AQC_FW_LOG_ID_GENERAL = 0,
+ LIBIE_AQC_FW_LOG_ID_CTRL,
+ LIBIE_AQC_FW_LOG_ID_LINK,
+ LIBIE_AQC_FW_LOG_ID_LINK_TOPO,
+ LIBIE_AQC_FW_LOG_ID_DNL,
+ LIBIE_AQC_FW_LOG_ID_I2C,
+ LIBIE_AQC_FW_LOG_ID_SDP,
+ LIBIE_AQC_FW_LOG_ID_MDIO,
+ LIBIE_AQC_FW_LOG_ID_ADMINQ,
+ LIBIE_AQC_FW_LOG_ID_HDMA,
+ LIBIE_AQC_FW_LOG_ID_LLDP,
+ LIBIE_AQC_FW_LOG_ID_DCBX,
+ LIBIE_AQC_FW_LOG_ID_DCB,
+ LIBIE_AQC_FW_LOG_ID_XLR,
+ LIBIE_AQC_FW_LOG_ID_NVM,
+ LIBIE_AQC_FW_LOG_ID_AUTH,
+ LIBIE_AQC_FW_LOG_ID_VPD,
+ LIBIE_AQC_FW_LOG_ID_IOSF,
+ LIBIE_AQC_FW_LOG_ID_PARSER,
+ LIBIE_AQC_FW_LOG_ID_SW,
+ LIBIE_AQC_FW_LOG_ID_SCHEDULER,
+ LIBIE_AQC_FW_LOG_ID_TXQ,
+ LIBIE_AQC_FW_LOG_ID_RSVD,
+ LIBIE_AQC_FW_LOG_ID_POST,
+ LIBIE_AQC_FW_LOG_ID_WATCHDOG,
+ LIBIE_AQC_FW_LOG_ID_TASK_DISPATCH,
+ LIBIE_AQC_FW_LOG_ID_MNG,
+ LIBIE_AQC_FW_LOG_ID_SYNCE,
+ LIBIE_AQC_FW_LOG_ID_HEALTH,
+ LIBIE_AQC_FW_LOG_ID_TSDRV,
+ LIBIE_AQC_FW_LOG_ID_PFREG,
+ LIBIE_AQC_FW_LOG_ID_MDLVER,
+ LIBIE_AQC_FW_LOG_ID_MAX
+};
+
+/* Set FW Logging configuration (indirect 0xFF30)
+ * Register for FW Logging (indirect 0xFF31)
+ * Query FW Logging (indirect 0xFF32)
+ * FW Log Event (indirect 0xFF33)
+ */
+#define LIBIE_AQC_FW_LOG_CONF_UART_EN BIT(0)
+#define LIBIE_AQC_FW_LOG_CONF_AQ_EN BIT(1)
+#define LIBIE_AQC_FW_LOG_QUERY_REGISTERED BIT(2)
+#define LIBIE_AQC_FW_LOG_CONF_SET_VALID BIT(3)
+#define LIBIE_AQC_FW_LOG_AQ_REGISTER BIT(0)
+#define LIBIE_AQC_FW_LOG_AQ_QUERY BIT(2)
+
+#define LIBIE_AQC_FW_LOG_MIN_RESOLUTION 1
+#define LIBIE_AQC_FW_LOG_MAX_RESOLUTION 128
+
+struct libie_aqc_fw_log {
+ u8 cmd_flags;
+
+ u8 rsp_flag;
+ __le16 fw_rt_msb;
+ union {
+ struct {
+ __le32 fw_rt_lsb;
+ } sync;
+ struct {
+ __le16 log_resolution;
+ __le16 mdl_cnt;
+ } cfg;
+ } ops;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Response Buffer for:
+ * Set Firmware Logging Configuration (0xFF30)
+ * Query FW Logging (0xFF32)
+ */
+struct libie_aqc_fw_log_cfg_resp {
+ __le16 module_identifier;
+ u8 log_level;
+ u8 rsvd0;
+};
+
+/**
+ * struct libie_aq_desc - Admin Queue (AQ) descriptor
+ * @flags: LIBIE_AQ_FLAG_* flags
+ * @opcode: AQ command opcode
+ * @datalen: length in bytes of indirect/external data buffer
+ * @retval: return value from firmware
+ * @cookie_high: opaque data high-half
+ * @cookie_low: opaque data low-half
+ * @params: command-specific parameters
+ *
+ * Descriptor format for commands the driver posts on the Admin Transmit Queue
+ * (ATQ). The firmware writes back onto the command descriptor and returns
+ * the result of the command. Asynchronous events that are not an immediate
+ * result of the command are written to the Admin Receive Queue (ARQ) using
+ * the same descriptor format. Descriptors are in little-endian notation with
+ * 32-bit words.
+ */
+struct libie_aq_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ u8 raw[16];
+ struct libie_aqc_generic generic;
+ struct libie_aqc_get_ver get_ver;
+ struct libie_aqc_driver_ver driver_ver;
+ struct libie_aqc_req_res res_owner;
+ struct libie_aqc_list_caps get_cap;
+ struct libie_aqc_fw_log fw_log;
+ } params;
+};
+LIBIE_CHECK_STRUCT_LEN(32, libie_aq_desc);
+
+/* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */
+#define LIBIE_AQ_LG_BUF 512
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+#define LIBIE_AQ_FLAG_DD BIT(0) /* 0x1 */
+#define LIBIE_AQ_FLAG_CMP BIT(1) /* 0x2 */
+#define LIBIE_AQ_FLAG_ERR BIT(2) /* 0x4 */
+#define LIBIE_AQ_FLAG_VFE BIT(3) /* 0x8 */
+#define LIBIE_AQ_FLAG_LB BIT(9) /* 0x200 */
+#define LIBIE_AQ_FLAG_RD BIT(10) /* 0x400 */
+#define LIBIE_AQ_FLAG_VFC BIT(11) /* 0x800 */
+#define LIBIE_AQ_FLAG_BUF BIT(12) /* 0x1000 */
+#define LIBIE_AQ_FLAG_SI BIT(13) /* 0x2000 */
+#define LIBIE_AQ_FLAG_EI BIT(14) /* 0x4000 */
+#define LIBIE_AQ_FLAG_FE BIT(15) /* 0x8000 */
+
+/* error codes */
+enum libie_aq_err {
+ LIBIE_AQ_RC_OK = 0, /* Success */
+ LIBIE_AQ_RC_EPERM = 1, /* Operation not permitted */
+ LIBIE_AQ_RC_ENOENT = 2, /* No such element */
+ LIBIE_AQ_RC_ESRCH = 3, /* Bad opcode */
+ LIBIE_AQ_RC_EIO = 5, /* I/O error */
+ LIBIE_AQ_RC_EAGAIN = 8, /* Try again */
+ LIBIE_AQ_RC_ENOMEM = 9, /* Out of memory */
+ LIBIE_AQ_RC_EACCES = 10, /* Permission denied */
+ LIBIE_AQ_RC_EBUSY = 12, /* Device or resource busy */
+ LIBIE_AQ_RC_EEXIST = 13, /* Object already exists */
+ LIBIE_AQ_RC_EINVAL = 14, /* Invalid argument */
+ LIBIE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */
+ LIBIE_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ LIBIE_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ LIBIE_AQ_RC_ENOSEC = 24, /* Missing security manifest */
+ LIBIE_AQ_RC_EBADSIG = 25, /* Bad RSA signature */
+ LIBIE_AQ_RC_ESVN = 26, /* SVN number prohibits this package */
+ LIBIE_AQ_RC_EBADMAN = 27, /* Manifest hash mismatch */
+ LIBIE_AQ_RC_EBADBUF = 28, /* Buffer hash mismatches manifest */
+};
+
+static inline void *libie_aq_raw(struct libie_aq_desc *desc)
+{
+ return &desc->params.raw;
+}
+
+const char *libie_aq_str(enum libie_aq_err err);
+
+#endif /* __LIBIE_ADMINQ_H */
diff --git a/include/linux/net/intel/libie/fwlog.h b/include/linux/net/intel/libie/fwlog.h
new file mode 100644
index 000000000000..7273c78c826b
--- /dev/null
+++ b/include/linux/net/intel/libie/fwlog.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2022, Intel Corporation. */
+
+#ifndef _LIBIE_FWLOG_H_
+#define _LIBIE_FWLOG_H_
+
+#include <linux/net/intel/libie/adminq.h>
+
+/* Only a single log level should be set and all log levels under the set value
+ * are enabled, e.g. if log level is set to LIBIE_FW_LOG_LEVEL_VERBOSE, then all
+ * other log levels are included (except LIBIE_FW_LOG_LEVEL_NONE)
+ */
+enum libie_fwlog_level {
+ LIBIE_FWLOG_LEVEL_NONE = 0,
+ LIBIE_FWLOG_LEVEL_ERROR = 1,
+ LIBIE_FWLOG_LEVEL_WARNING = 2,
+ LIBIE_FWLOG_LEVEL_NORMAL = 3,
+ LIBIE_FWLOG_LEVEL_VERBOSE = 4,
+ LIBIE_FWLOG_LEVEL_INVALID, /* all values >= this entry are invalid */
+};
+
+struct libie_fwlog_module_entry {
+ /* module ID for the corresponding firmware logging event */
+ u16 module_id;
+ /* verbosity level for the module_id */
+ u8 log_level;
+};
+
+struct libie_fwlog_cfg {
+ /* list of modules for configuring log level */
+ struct libie_fwlog_module_entry module_entries[LIBIE_AQC_FW_LOG_ID_MAX];
+ /* options used to configure firmware logging */
+ u16 options;
+#define LIBIE_FWLOG_OPTION_ARQ_ENA BIT(0)
+#define LIBIE_FWLOG_OPTION_UART_ENA BIT(1)
+ /* set before calling libie_fwlog_init() so the PF registers for
+ * firmware logging on initialization
+ */
+#define LIBIE_FWLOG_OPTION_REGISTER_ON_INIT BIT(2)
+ /* set in the libie_aq_fwlog_get() response if the PF is registered for
+ * FW logging events over ARQ
+ */
+#define LIBIE_FWLOG_OPTION_IS_REGISTERED BIT(3)
+
+ /* minimum number of log events sent per Admin Receive Queue event */
+ u16 log_resolution;
+};
+
+struct libie_fwlog_data {
+ u16 data_size;
+ u8 *data;
+};
+
+struct libie_fwlog_ring {
+ struct libie_fwlog_data *rings;
+ u16 index;
+ u16 size;
+ u16 head;
+ u16 tail;
+};
+
+#define LIBIE_FWLOG_RING_SIZE_INDEX_DFLT 3
+#define LIBIE_FWLOG_RING_SIZE_DFLT 256
+#define LIBIE_FWLOG_RING_SIZE_MAX 512
+
+struct libie_fwlog {
+ struct libie_fwlog_cfg cfg;
+ bool supported; /* does hardware support FW logging? */
+ struct libie_fwlog_ring ring;
+ struct dentry *debugfs;
+ /* keep track of all the dentrys for FW log modules */
+ struct dentry **debugfs_modules;
+ struct_group_tagged(libie_fwlog_api, api,
+ struct pci_dev *pdev;
+ int (*send_cmd)(void *, struct libie_aq_desc *, void *, u16);
+ void *priv;
+ struct dentry *debugfs_root;
+ );
+};
+
+#if IS_ENABLED(CONFIG_LIBIE_FWLOG)
+int libie_fwlog_init(struct libie_fwlog *fwlog, struct libie_fwlog_api *api);
+void libie_fwlog_deinit(struct libie_fwlog *fwlog);
+void libie_fwlog_reregister(struct libie_fwlog *fwlog);
+void libie_get_fwlog_data(struct libie_fwlog *fwlog, u8 *buf, u16 len);
+#else
+static inline int libie_fwlog_init(struct libie_fwlog *fwlog,
+ struct libie_fwlog_api *api)
+{
+ return -EOPNOTSUPP;
+}
+static inline void libie_fwlog_deinit(struct libie_fwlog *fwlog) { }
+static inline void libie_fwlog_reregister(struct libie_fwlog *fwlog) { }
+static inline void libie_get_fwlog_data(struct libie_fwlog *fwlog, u8 *buf,
+ u16 len) { }
+#endif /* CONFIG_LIBIE_FWLOG */
+#endif /* _LIBIE_FWLOG_H_ */
diff --git a/include/linux/net/intel/libie/pctype.h b/include/linux/net/intel/libie/pctype.h
new file mode 100644
index 000000000000..d783417fbf36
--- /dev/null
+++ b/include/linux/net/intel/libie/pctype.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2025 Intel Corporation */
+
+#ifndef __LIBIE_PCTYPE_H
+#define __LIBIE_PCTYPE_H
+
+/* Packet Classifier Type indexes, used to set the xxQF_HENA registers. Also
+ * communicated over the virtchnl API as part of struct virtchnl_rss_hashena.
+ */
+enum libie_filter_pctype {
+ /* Note: Values 0-28 are reserved for future use.
+ * Value 29, 30, 32 are not supported on XL710 and X710.
+ */
+ LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
+ LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
+ LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
+ LIBIE_FILTER_PCTYPE_FRAG_IPV4 = 36,
+ /* Note: Values 37-38 are reserved for future use.
+ * Value 39, 40, 42 are not supported on XL710 and X710.
+ */
+ LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
+ LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
+ LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
+ LIBIE_FILTER_PCTYPE_FRAG_IPV6 = 46,
+ /* Note: Value 47 is reserved for future use */
+ LIBIE_FILTER_PCTYPE_FCOE_OX = 48,
+ LIBIE_FILTER_PCTYPE_FCOE_RX = 49,
+ LIBIE_FILTER_PCTYPE_FCOE_OTHER = 50,
+ /* Note: Values 51-62 are reserved for future use */
+ LIBIE_FILTER_PCTYPE_L2_PAYLOAD = 63
+};
+
+#endif /* __LIBIE_PCTYPE_H */
diff --git a/include/linux/net/intel/libie/rx.h b/include/linux/net/intel/libie/rx.h
new file mode 100644
index 000000000000..8e97775f1d66
--- /dev/null
+++ b/include/linux/net/intel/libie/rx.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2024 Intel Corporation */
+
+#ifndef __LIBIE_RX_H
+#define __LIBIE_RX_H
+
+#include <net/libeth/rx.h>
+
+/* Rx buffer management */
+
+/* The largest size for a single descriptor as per HW */
+#define LIBIE_MAX_RX_BUF_LEN 9728U
+/* "True" HW-writeable space: minimum from SW and HW values */
+#define LIBIE_RX_BUF_LEN(hr) min_t(u32, LIBETH_RX_PAGE_LEN(hr), \
+ LIBIE_MAX_RX_BUF_LEN)
+
+/* The maximum frame size as per HW (S/G) */
+#define __LIBIE_MAX_RX_FRM_LEN 16382U
+/* ATST, HW can chain up to 5 Rx descriptors */
+#define LIBIE_MAX_RX_FRM_LEN(hr) \
+ min_t(u32, __LIBIE_MAX_RX_FRM_LEN, LIBIE_RX_BUF_LEN(hr) * 5)
+/* Maximum frame size minus LL overhead */
+#define LIBIE_MAX_MTU \
+ (LIBIE_MAX_RX_FRM_LEN(LIBETH_MAX_HEADROOM) - LIBETH_RX_LL_LEN)
+
+/* O(1) converting i40e/ice/iavf's 8/10-bit hardware packet type to a parsed
+ * bitfield struct.
+ */
+
+#define LIBIE_RX_PT_NUM 154
+
+extern const struct libeth_rx_pt libie_rx_pt_lut[LIBIE_RX_PT_NUM];
+
+/**
+ * libie_rx_pt_parse - convert HW packet type to software bitfield structure
+ * @pt: 10-bit hardware packet type value from the descriptor
+ *
+ * ```libie_rx_pt_lut``` must be accessed only using this wrapper.
+ *
+ * Return: parsed bitfield struct corresponding to the provided ptype.
+ */
+static inline struct libeth_rx_pt libie_rx_pt_parse(u32 pt)
+{
+ if (unlikely(pt >= LIBIE_RX_PT_NUM))
+ pt = 0;
+
+ return libie_rx_pt_lut[pt];
+}
+
+#endif /* __LIBIE_RX_H */
diff --git a/include/linux/net_tstamp.h b/include/linux/net_tstamp.h
new file mode 100644
index 000000000000..f4936d9c2b3c
--- /dev/null
+++ b/include/linux/net_tstamp.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_NET_TIMESTAMPING_H_
+#define _LINUX_NET_TIMESTAMPING_H_
+
+#include <uapi/linux/net_tstamp.h>
+#include <uapi/linux/ethtool_netlink_generated.h>
+
+#define SOF_TIMESTAMPING_SOFTWARE_MASK (SOF_TIMESTAMPING_RX_SOFTWARE | \
+ SOF_TIMESTAMPING_TX_SOFTWARE | \
+ SOF_TIMESTAMPING_SOFTWARE)
+
+#define SOF_TIMESTAMPING_HARDWARE_MASK (SOF_TIMESTAMPING_RX_HARDWARE | \
+ SOF_TIMESTAMPING_TX_HARDWARE | \
+ SOF_TIMESTAMPING_RAW_HARDWARE)
+
+/**
+ * struct hwtstamp_provider_desc - hwtstamp provider description
+ *
+ * @index: index of the hwtstamp provider.
+ * @qualifier: hwtstamp provider qualifier.
+ */
+struct hwtstamp_provider_desc {
+ int index;
+ enum hwtstamp_provider_qualifier qualifier;
+};
+
+/**
+ * struct hwtstamp_provider - hwtstamp provider object
+ *
+ * @rcu_head: RCU callback used to free the struct.
+ * @source: source of the hwtstamp provider.
+ * @phydev: pointer of the phydev source in case a PTP coming from phylib
+ * @desc: hwtstamp provider description.
+ */
+
+struct hwtstamp_provider {
+ struct rcu_head rcu_head;
+ enum hwtstamp_source source;
+ struct phy_device *phydev;
+ struct hwtstamp_provider_desc desc;
+};
+
+/**
+ * struct kernel_hwtstamp_config - Kernel copy of struct hwtstamp_config
+ *
+ * @flags: see struct hwtstamp_config
+ * @tx_type: see struct hwtstamp_config
+ * @rx_filter: see struct hwtstamp_config
+ * @ifr: pointer to ifreq structure from the original ioctl request, to pass to
+ * a legacy implementation of a lower driver
+ * @copied_to_user: request was passed to a legacy implementation which already
+ * copied the ioctl request back to user space
+ * @source: indication whether timestamps should come from the netdev or from
+ * an attached phylib PHY
+ * @qualifier: qualifier of the hwtstamp provider
+ *
+ * Prefer using this structure for in-kernel processing of hardware
+ * timestamping configuration, over the inextensible struct hwtstamp_config
+ * exposed to the %SIOCGHWTSTAMP and %SIOCSHWTSTAMP ioctl UAPI.
+ */
+struct kernel_hwtstamp_config {
+ int flags;
+ int tx_type;
+ int rx_filter;
+ struct ifreq *ifr;
+ bool copied_to_user;
+ enum hwtstamp_source source;
+ enum hwtstamp_provider_qualifier qualifier;
+};
+
+static inline void hwtstamp_config_to_kernel(struct kernel_hwtstamp_config *kernel_cfg,
+ const struct hwtstamp_config *cfg)
+{
+ kernel_cfg->flags = cfg->flags;
+ kernel_cfg->tx_type = cfg->tx_type;
+ kernel_cfg->rx_filter = cfg->rx_filter;
+}
+
+static inline void hwtstamp_config_from_kernel(struct hwtstamp_config *cfg,
+ const struct kernel_hwtstamp_config *kernel_cfg)
+{
+ cfg->flags = kernel_cfg->flags;
+ cfg->tx_type = kernel_cfg->tx_type;
+ cfg->rx_filter = kernel_cfg->rx_filter;
+}
+
+static inline bool kernel_hwtstamp_config_changed(const struct kernel_hwtstamp_config *a,
+ const struct kernel_hwtstamp_config *b)
+{
+ return a->flags != b->flags ||
+ a->tx_type != b->tx_type ||
+ a->rx_filter != b->rx_filter;
+}
+
+#endif /* _LINUX_NET_TIMESTAMPING_H_ */
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 3de38d6a0aea..93e4da7046a1 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -24,9 +24,8 @@ enum {
NETIF_F_HW_VLAN_CTAG_FILTER_BIT,/* Receive filtering on VLAN CTAGs */
NETIF_F_VLAN_CHALLENGED_BIT, /* Device cannot handle VLAN packets */
NETIF_F_GSO_BIT, /* Enable software GSO. */
- NETIF_F_LLTX_BIT, /* LockLess TX - deprecated. Please */
- /* do not use LLTX in new drivers */
- NETIF_F_NETNS_LOCAL_BIT, /* Does not change network namespaces */
+ __UNUSED_NETIF_F_12,
+ __UNUSED_NETIF_F_13,
NETIF_F_GRO_BIT, /* Generic receive offload */
NETIF_F_LRO_BIT, /* large receive offload */
@@ -54,12 +53,12 @@ enum {
NETIF_F_GSO_UDP_BIT, /* ... UFO, deprecated except tuntap */
NETIF_F_GSO_UDP_L4_BIT, /* ... UDP payload GSO (not UFO) */
NETIF_F_GSO_FRAGLIST_BIT, /* ... Fraglist GSO */
+ NETIF_F_GSO_ACCECN_BIT, /* TCP AccECN w/ TSO (no clear CWR) */
/**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */
- NETIF_F_GSO_FRAGLIST_BIT,
+ NETIF_F_GSO_ACCECN_BIT,
NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */
NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */
- NETIF_F_FCOE_MTU_BIT, /* Supports max FCoE MTU, 2158 bytes*/
NETIF_F_NTUPLE_BIT, /* N-tuple filters supported */
NETIF_F_RXHASH_BIT, /* Receive hashing offload */
NETIF_F_RXCSUM_BIT, /* Receive checksumming offload */
@@ -93,7 +92,7 @@ enum {
/*
* Add your fresh new feature above and remember to update
- * netdev_features_strings[] in net/core/ethtool.c and maybe
+ * netdev_features_strings[] in net/ethtool/common.c and maybe
* some feature mask #defines below. Please also describe it
* in Documentation/networking/netdev-features.rst.
*/
@@ -106,7 +105,6 @@ enum {
#define __NETIF_F(name) __NETIF_F_BIT(NETIF_F_##name##_BIT)
#define NETIF_F_FCOE_CRC __NETIF_F(FCOE_CRC)
-#define NETIF_F_FCOE_MTU __NETIF_F(FCOE_MTU)
#define NETIF_F_FRAGLIST __NETIF_F(FRAGLIST)
#define NETIF_F_FSO __NETIF_F(FSO)
#define NETIF_F_GRO __NETIF_F(GRO)
@@ -120,10 +118,8 @@ enum {
#define NETIF_F_HW_VLAN_CTAG_TX __NETIF_F(HW_VLAN_CTAG_TX)
#define NETIF_F_IP_CSUM __NETIF_F(IP_CSUM)
#define NETIF_F_IPV6_CSUM __NETIF_F(IPV6_CSUM)
-#define NETIF_F_LLTX __NETIF_F(LLTX)
#define NETIF_F_LOOPBACK __NETIF_F(LOOPBACK)
#define NETIF_F_LRO __NETIF_F(LRO)
-#define NETIF_F_NETNS_LOCAL __NETIF_F(NETNS_LOCAL)
#define NETIF_F_NOCACHE_COPY __NETIF_F(NOCACHE_COPY)
#define NETIF_F_NTUPLE __NETIF_F(NTUPLE)
#define NETIF_F_RXCSUM __NETIF_F(RXCSUM)
@@ -132,6 +128,7 @@ enum {
#define NETIF_F_SG __NETIF_F(SG)
#define NETIF_F_TSO6 __NETIF_F(TSO6)
#define NETIF_F_TSO_ECN __NETIF_F(TSO_ECN)
+#define NETIF_F_GSO_ACCECN __NETIF_F(GSO_ACCECN)
#define NETIF_F_TSO __NETIF_F(TSO)
#define NETIF_F_VLAN_CHALLENGED __NETIF_F(VLAN_CHALLENGED)
#define NETIF_F_RXFCS __NETIF_F(RXFCS)
@@ -169,7 +166,7 @@ enum {
#define NETIF_F_HW_HSR_FWD __NETIF_F(HW_HSR_FWD)
#define NETIF_F_HW_HSR_DUP __NETIF_F(HW_HSR_DUP)
-/* Finds the next feature with the highest number of the range of start till 0.
+/* Finds the next feature with the highest number of the range of start-1 till 0.
*/
static inline int find_next_netdev_feature(u64 feature, unsigned long start)
{
@@ -188,12 +185,11 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start)
for ((bit) = find_next_netdev_feature((mask_addr), \
NETDEV_FEATURE_COUNT); \
(bit) >= 0; \
- (bit) = find_next_netdev_feature((mask_addr), (bit) - 1))
+ (bit) = find_next_netdev_feature((mask_addr), (bit)))
/* Features valid for ethtool to change */
/* = all defined minus driver/device-class-related */
-#define NETIF_F_NEVER_CHANGE (NETIF_F_VLAN_CHALLENGED | \
- NETIF_F_LLTX | NETIF_F_NETNS_LOCAL)
+#define NETIF_F_NEVER_CHANGE NETIF_F_VLAN_CHALLENGED
/* remember that ((t)1 << t_BITS) is undefined in C99 */
#define NETIF_F_ETHTOOL_BITS ((__NETIF_F_BIT(NETDEV_FEATURE_COUNT - 1) | \
@@ -214,11 +210,9 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start)
#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | \
NETIF_F_TSO_ECN | NETIF_F_TSO_MANGLEID)
-#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \
- NETIF_F_FSO)
-
/* List of features with software fallbacks. */
-#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | NETIF_F_GSO_SCTP | \
+#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | \
+ NETIF_F_GSO_ACCECN | NETIF_F_GSO_SCTP | \
NETIF_F_GSO_UDP_L4 | NETIF_F_GSO_FRAGLIST)
/*
@@ -261,4 +255,29 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start)
NETIF_F_GSO_UDP_TUNNEL | \
NETIF_F_GSO_UDP_TUNNEL_CSUM)
+/* virtual device features */
+#define MASTER_UPPER_DEV_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
+ NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
+ NETIF_F_GSO_ENCAP_ALL | \
+ NETIF_F_HIGHDMA | NETIF_F_LRO)
+
+#define MASTER_UPPER_DEV_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
+ NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE | \
+ NETIF_F_GSO_PARTIAL)
+
+#define MASTER_UPPER_DEV_MPLS_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
+ NETIF_F_GSO_SOFTWARE)
+
+#define MASTER_UPPER_DEV_XFRM_FEATURES (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM | \
+ NETIF_F_GSO_ESP)
+
+#define MASTER_UPPER_DEV_GSO_PARTIAL_FEATURES (NETIF_F_GSO_ESP)
+
+static inline netdev_features_t netdev_base_features(netdev_features_t features)
+{
+ features &= ~NETIF_F_ONE_FOR_ALL;
+ features |= NETIF_F_ALL_FOR_ALL;
+ return features;
+}
+
#endif /* _LINUX_NETDEV_FEATURES_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 5cbc950b34df..5870a9e514a5 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -28,6 +28,7 @@
#include <linux/prefetch.h>
#include <asm/cache.h>
#include <asm/byteorder.h>
+#include <asm/local.h>
#include <linux/percpu.h>
#include <linux/rculist.h>
@@ -39,24 +40,32 @@
#include <net/dcbnl.h>
#endif
#include <net/netprio_cgroup.h>
-#include <net/xdp.h>
-
#include <linux/netdev_features.h>
#include <linux/neighbour.h>
+#include <linux/netdevice_xmit.h>
#include <uapi/linux/netdevice.h>
#include <uapi/linux/if_bonding.h>
#include <uapi/linux/pkt_cls.h>
+#include <uapi/linux/netdev.h>
#include <linux/hashtable.h>
+#include <linux/rbtree.h>
+#include <net/net_trackers.h>
+#include <net/net_debug.h>
+#include <net/dropreason-core.h>
+#include <net/neighbour_tables.h>
struct netpoll_info;
struct device;
struct ethtool_ops;
+struct kernel_hwtstamp_config;
struct phy_device;
struct dsa_port;
-struct ip_tunnel_parm;
+struct ip_tunnel_parm_kern;
struct macsec_context;
struct macsec_ops;
-
+struct netdev_config;
+struct netdev_name_node;
+struct sd_flow_limit;
struct sfp_bus;
/* 802.11 specific */
struct wireless_dev;
@@ -69,10 +78,19 @@ struct udp_tunnel_nic_info;
struct udp_tunnel_nic;
struct bpf_prog;
struct xdp_buff;
+struct xdp_frame;
+struct xdp_metadata_ops;
+struct xdp_md;
+struct ethtool_netdev_state;
+struct phy_link_topology;
+struct hwtstamp_provider;
+
+typedef u32 xdp_features_t;
void synchronize_net(void);
void netdev_set_default_ethtool_ops(struct net_device *dev,
const struct ethtool_ops *ops);
+void netdev_sw_irq_coalesce_default_on(struct net_device *dev);
/* Backlog congestion levels */
#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
@@ -166,48 +184,59 @@ static inline bool dev_xmit_complete(int rc)
* (unsigned long) so they can be read and written atomically.
*/
+#define NET_DEV_STAT(FIELD) \
+ union { \
+ unsigned long FIELD; \
+ atomic_long_t __##FIELD; \
+ }
+
struct net_device_stats {
- unsigned long rx_packets;
- unsigned long tx_packets;
- unsigned long rx_bytes;
- unsigned long tx_bytes;
- unsigned long rx_errors;
- unsigned long tx_errors;
- unsigned long rx_dropped;
- unsigned long tx_dropped;
- unsigned long multicast;
- unsigned long collisions;
- unsigned long rx_length_errors;
- unsigned long rx_over_errors;
- unsigned long rx_crc_errors;
- unsigned long rx_frame_errors;
- unsigned long rx_fifo_errors;
- unsigned long rx_missed_errors;
- unsigned long tx_aborted_errors;
- unsigned long tx_carrier_errors;
- unsigned long tx_fifo_errors;
- unsigned long tx_heartbeat_errors;
- unsigned long tx_window_errors;
- unsigned long rx_compressed;
- unsigned long tx_compressed;
+ NET_DEV_STAT(rx_packets);
+ NET_DEV_STAT(tx_packets);
+ NET_DEV_STAT(rx_bytes);
+ NET_DEV_STAT(tx_bytes);
+ NET_DEV_STAT(rx_errors);
+ NET_DEV_STAT(tx_errors);
+ NET_DEV_STAT(rx_dropped);
+ NET_DEV_STAT(tx_dropped);
+ NET_DEV_STAT(multicast);
+ NET_DEV_STAT(collisions);
+ NET_DEV_STAT(rx_length_errors);
+ NET_DEV_STAT(rx_over_errors);
+ NET_DEV_STAT(rx_crc_errors);
+ NET_DEV_STAT(rx_frame_errors);
+ NET_DEV_STAT(rx_fifo_errors);
+ NET_DEV_STAT(rx_missed_errors);
+ NET_DEV_STAT(tx_aborted_errors);
+ NET_DEV_STAT(tx_carrier_errors);
+ NET_DEV_STAT(tx_fifo_errors);
+ NET_DEV_STAT(tx_heartbeat_errors);
+ NET_DEV_STAT(tx_window_errors);
+ NET_DEV_STAT(rx_compressed);
+ NET_DEV_STAT(tx_compressed);
};
+#undef NET_DEV_STAT
+/* per-cpu stats, allocated on demand.
+ * Try to fit them in a single cache line, for dev_get_stats() sake.
+ */
+struct net_device_core_stats {
+ unsigned long rx_dropped;
+ unsigned long tx_dropped;
+ unsigned long rx_nohandler;
+ unsigned long rx_otherhost_dropped;
+} __aligned(4 * sizeof(unsigned long));
#include <linux/cache.h>
#include <linux/skbuff.h>
-#ifdef CONFIG_RPS
-#include <linux/static_key.h>
-extern struct static_key_false rps_needed;
-extern struct static_key_false rfs_needed;
-#endif
-
struct neighbour;
struct neigh_parms;
struct sk_buff;
struct netdev_hw_addr {
struct list_head list;
+ struct rb_node node;
unsigned char addr[MAX_ADDR_LEN];
unsigned char type;
#define NETDEV_HW_ADDR_T_LAN 1
@@ -224,6 +253,9 @@ struct netdev_hw_addr {
struct netdev_hw_addr_list {
struct list_head list;
int count;
+
+ /* Auxiliary tree for faster lookup on addition and deletion */
+ struct rb_root tree;
};
#define netdev_hw_addr_list_count(l) ((l)->count)
@@ -235,11 +267,17 @@ struct netdev_hw_addr_list {
#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
#define netdev_for_each_uc_addr(ha, dev) \
netdev_hw_addr_list_for_each(ha, &(dev)->uc)
+#define netdev_for_each_synced_uc_addr(_ha, _dev) \
+ netdev_for_each_uc_addr((_ha), (_dev)) \
+ if ((_ha)->sync_cnt)
#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
#define netdev_for_each_mc_addr(ha, dev) \
netdev_hw_addr_list_for_each(ha, &(dev)->mc)
+#define netdev_for_each_synced_mc_addr(_ha, _dev) \
+ netdev_for_each_mc_addr((_ha), (_dev)) \
+ if ((_ha)->sync_cnt)
struct hh_cache {
unsigned int hh_len;
@@ -263,9 +301,11 @@ struct hh_cache {
* relationship HH alignment <= LL alignment.
*/
#define LL_RESERVED_SPACE(dev) \
- ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
+ ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom)) \
+ & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
- ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
+ ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom) + (extra)) \
+ & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
struct header_ops {
int (*create) (struct sk_buff *skb, struct net_device *dev,
@@ -294,34 +334,51 @@ enum netdev_state_t {
__LINK_STATE_TESTING,
};
-
-/*
- * This structure holds boot-time configured netdevice settings. They
- * are then used in the device probing.
- */
-struct netdev_boot_setup {
- char name[IFNAMSIZ];
- struct ifmap map;
-};
-#define NETDEV_BOOT_SETUP_MAX 8
-
-int __init netdev_boot_setup(char *str);
-
struct gro_list {
struct list_head list;
int count;
};
/*
- * size of gro hash buckets, must less than bit number of
- * napi_struct::gro_bitmask
+ * size of gro hash buckets, must be <= the number of bits in
+ * gro_node::bitmask
*/
#define GRO_HASH_BUCKETS 8
+/**
+ * struct gro_node - structure to support Generic Receive Offload
+ * @bitmask: bitmask to indicate used buckets in @hash
+ * @hash: hashtable of pending aggregated skbs, separated by flows
+ * @rx_list: list of pending ``GRO_NORMAL`` skbs
+ * @rx_count: cached current length of @rx_list
+ * @cached_napi_id: napi_struct::napi_id cached for hotpath, 0 for standalone
+ */
+struct gro_node {
+ unsigned long bitmask;
+ struct gro_list hash[GRO_HASH_BUCKETS];
+ struct list_head rx_list;
+ u32 rx_count;
+ u32 cached_napi_id;
+};
+
+/*
+ * Structure for per-NAPI config
+ */
+struct napi_config {
+ u64 gro_flush_timeout;
+ u64 irq_suspend_timeout;
+ u32 defer_hard_irqs;
+ cpumask_t affinity_mask;
+ u8 threaded;
+ unsigned int napi_id;
+};
+
/*
* Structure for NAPI scheduling similar to tasklet but with weighting
*/
struct napi_struct {
+ /* This field should be first or softnet_data.backlog needs tweaks. */
+ unsigned long state;
/* The poll_list must only be managed by the entity which
* changes the state of the NAPI_STATE_SCHED bit. This means
* whoever atomically sets that bit can add this napi_struct
@@ -330,24 +387,33 @@ struct napi_struct {
*/
struct list_head poll_list;
- unsigned long state;
int weight;
- int defer_hard_irqs_count;
- unsigned long gro_bitmask;
+ u32 defer_hard_irqs_count;
int (*poll)(struct napi_struct *, int);
#ifdef CONFIG_NETPOLL
+ /* CPU actively polling if netpoll is configured */
int poll_owner;
#endif
+ /* CPU on which NAPI has been scheduled for processing */
+ int list_owner;
struct net_device *dev;
- struct gro_list gro_hash[GRO_HASH_BUCKETS];
struct sk_buff *skb;
- struct list_head rx_list; /* Pending GRO_NORMAL skbs */
- int rx_count; /* length of rx_list */
+ struct gro_node gro;
struct hrtimer timer;
+ /* all fields past this point are write-protected by netdev_lock */
+ struct task_struct *thread;
+ unsigned long gro_flush_timeout;
+ unsigned long irq_suspend_timeout;
+ u32 defer_hard_irqs;
+ /* control-path-only fields follow */
+ u32 napi_id;
struct list_head dev_list;
struct hlist_node napi_hash_node;
- unsigned int napi_id;
- struct task_struct *thread;
+ int irq;
+ struct irq_affinity_notify notify;
+ int napi_rmap_idx;
+ int index;
+ struct napi_config *config;
};
enum {
@@ -357,10 +423,12 @@ enum {
NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
NAPI_STATE_LISTED, /* NAPI added to system lists */
NAPI_STATE_NO_BUSY_POLL, /* Do not add in napi_hash, no busy polling */
- NAPI_STATE_IN_BUSY_POLL, /* sk_busy_loop() owns this NAPI */
+ NAPI_STATE_IN_BUSY_POLL, /* Do not rearm NAPI interrupt */
NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/
NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/
NAPI_STATE_SCHED_THREADED, /* Napi is currently scheduled in threaded mode */
+ NAPI_STATE_HAS_NOTIFIER, /* Napi has an IRQ notifier */
+ NAPI_STATE_THREADED_BUSY_POLL, /* The threaded NAPI poller will busy poll */
};
enum {
@@ -374,6 +442,8 @@ enum {
NAPIF_STATE_PREFER_BUSY_POLL = BIT(NAPI_STATE_PREFER_BUSY_POLL),
NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED),
NAPIF_STATE_SCHED_THREADED = BIT(NAPI_STATE_SCHED_THREADED),
+ NAPIF_STATE_HAS_NOTIFIER = BIT(NAPI_STATE_HAS_NOTIFIER),
+ NAPIF_STATE_THREADED_BUSY_POLL = BIT(NAPI_STATE_THREADED_BUSY_POLL),
};
enum gro_result {
@@ -448,6 +518,29 @@ static inline bool napi_prefer_busy_poll(struct napi_struct *n)
return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state);
}
+/**
+ * napi_is_scheduled - test if NAPI is scheduled
+ * @n: NAPI context
+ *
+ * This check is "best-effort". With no locking implemented,
+ * a NAPI can be scheduled or terminate right after this check
+ * and produce not precise results.
+ *
+ * NAPI_STATE_SCHED is an internal state, napi_is_scheduled
+ * should not be used normally and napi_schedule should be
+ * used instead.
+ *
+ * Use only if the driver really needs to check if a NAPI
+ * is scheduled for example in the context of delayed timer
+ * that can be skipped if a NAPI is already scheduled.
+ *
+ * Return: True if NAPI is scheduled, False otherwise.
+ */
+static inline bool napi_is_scheduled(struct napi_struct *n)
+{
+ return test_bit(NAPI_STATE_SCHED, &n->state);
+}
+
bool napi_schedule_prep(struct napi_struct *n);
/**
@@ -456,11 +549,18 @@ bool napi_schedule_prep(struct napi_struct *n);
*
* Schedule NAPI poll routine to be called if it is not already
* running.
+ * Return: true if we schedule a NAPI or false if not.
+ * Refer to napi_schedule_prep() for additional reason on why
+ * a NAPI might not be scheduled.
*/
-static inline void napi_schedule(struct napi_struct *n)
+static inline bool napi_schedule(struct napi_struct *n)
{
- if (napi_schedule_prep(n))
+ if (napi_schedule_prep(n)) {
__napi_schedule(n);
+ return true;
+ }
+
+ return false;
}
/**
@@ -475,42 +575,32 @@ static inline void napi_schedule_irqoff(struct napi_struct *n)
__napi_schedule_irqoff(n);
}
-/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
-static inline bool napi_reschedule(struct napi_struct *napi)
-{
- if (napi_schedule_prep(napi)) {
- __napi_schedule(napi);
- return true;
- }
- return false;
-}
-
-bool napi_complete_done(struct napi_struct *n, int work_done);
/**
- * napi_complete - NAPI processing complete
- * @n: NAPI context
- *
- * Mark NAPI processing as complete.
- * Consider using napi_complete_done() instead.
- * Return false if device should avoid rearming interrupts.
+ * napi_complete_done - NAPI processing complete
+ * @n: NAPI context
+ * @work_done: number of packets processed
+ *
+ * Mark NAPI processing as complete. Should only be called if poll budget
+ * has not been completely consumed.
+ * Prefer over napi_complete().
+ * Return: false if device should avoid rearming interrupts.
*/
+bool napi_complete_done(struct napi_struct *n, int work_done);
+
static inline bool napi_complete(struct napi_struct *n)
{
return napi_complete_done(n, 0);
}
-int dev_set_threaded(struct net_device *dev, bool threaded);
+void netif_threaded_enable(struct net_device *dev);
+int dev_set_threaded(struct net_device *dev,
+ enum netdev_napi_threaded threaded);
-/**
- * napi_disable - prevent NAPI from scheduling
- * @n: NAPI context
- *
- * Stop NAPI from being scheduled on this context.
- * Waits till any outstanding processing completes.
- */
void napi_disable(struct napi_struct *n);
+void napi_disable_locked(struct napi_struct *n);
void napi_enable(struct napi_struct *n);
+void napi_enable_locked(struct napi_struct *n);
/**
* napi_synchronize - wait until NAPI is not running
@@ -541,8 +631,8 @@ static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n)
{
unsigned long val, new;
+ val = READ_ONCE(n->state);
do {
- val = READ_ONCE(n->state);
if (val & NAPIF_STATE_DISABLE)
return true;
@@ -550,7 +640,7 @@ static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n)
return false;
new = val | NAPIF_STATE_MISSED;
- } while (cmpxchg(&n->state, val, new) != val);
+ } while (!try_cmpxchg(&n->state, &val, new));
return true;
}
@@ -586,29 +676,34 @@ struct netdev_queue {
* read-mostly part
*/
struct net_device *dev;
+ netdevice_tracker dev_tracker;
+
struct Qdisc __rcu *qdisc;
- struct Qdisc *qdisc_sleeping;
+ struct Qdisc __rcu *qdisc_sleeping;
#ifdef CONFIG_SYSFS
struct kobject kobj;
-#endif
-#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
- int numa_node;
+ const struct attribute_group **groups;
#endif
unsigned long tx_maxrate;
/*
* Number of TX timeouts for this queue
* (/sys/class/net/DEV/Q/trans_timeout)
*/
- unsigned long trans_timeout;
+ atomic_long_t trans_timeout;
/* Subordinate device that the queue has been assigned to */
struct net_device *sb_dev;
#ifdef CONFIG_XDP_SOCKETS
+ /* "ops protected", see comment about net_device::lock */
struct xsk_buff_pool *pool;
#endif
+
/*
* write-mostly part
*/
+#ifdef CONFIG_BQL
+ struct dql dql;
+#endif
spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
int xmit_lock_owner;
/*
@@ -618,8 +713,16 @@ struct netdev_queue {
unsigned long state;
-#ifdef CONFIG_BQL
- struct dql dql;
+/*
+ * slow- / control-path part
+ */
+ /* NAPI instance for the queue
+ * "ops protected", see comment about net_device::lock
+ */
+ struct napi_struct *napi;
+
+#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
+ int numa_node;
#endif
} ____cacheline_aligned_in_smp;
@@ -633,9 +736,23 @@ extern int sysctl_devconf_inherit_init_net;
*/
static inline bool net_has_fallback_tunnels(const struct net *net)
{
- return !IS_ENABLED(CONFIG_SYSCTL) ||
- !sysctl_fb_tunnels_only_for_init_net ||
- (net == &init_net && sysctl_fb_tunnels_only_for_init_net == 1);
+#if IS_ENABLED(CONFIG_SYSCTL)
+ int fb_tunnels_only_for_init_net = READ_ONCE(sysctl_fb_tunnels_only_for_init_net);
+
+ return !fb_tunnels_only_for_init_net ||
+ (net_eq(net, &init_net) && fb_tunnels_only_for_init_net == 1);
+#else
+ return true;
+#endif
+}
+
+static inline int net_inherit_devconf(void)
+{
+#if IS_ENABLED(CONFIG_SYSCTL)
+ return READ_ONCE(sysctl_devconf_inherit_init_net);
+#else
+ return 0;
+#endif
}
static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
@@ -654,107 +771,10 @@ static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node
#endif
}
-#ifdef CONFIG_RPS
-/*
- * This structure holds an RPS map which can be of variable length. The
- * map is an array of CPUs.
- */
-struct rps_map {
- unsigned int len;
- struct rcu_head rcu;
- u16 cpus[];
-};
-#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
-
-/*
- * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
- * tail pointer for that CPU's input queue at the time of last enqueue, and
- * a hardware filter index.
- */
-struct rps_dev_flow {
- u16 cpu;
- u16 filter;
- unsigned int last_qtail;
-};
-#define RPS_NO_FILTER 0xffff
-
-/*
- * The rps_dev_flow_table structure contains a table of flow mappings.
- */
-struct rps_dev_flow_table {
- unsigned int mask;
- struct rcu_head rcu;
- struct rps_dev_flow flows[];
-};
-#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
- ((_num) * sizeof(struct rps_dev_flow)))
-
-/*
- * The rps_sock_flow_table contains mappings of flows to the last CPU
- * on which they were processed by the application (set in recvmsg).
- * Each entry is a 32bit value. Upper part is the high-order bits
- * of flow hash, lower part is CPU number.
- * rps_cpu_mask is used to partition the space, depending on number of
- * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
- * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f,
- * meaning we use 32-6=26 bits for the hash.
- */
-struct rps_sock_flow_table {
- u32 mask;
-
- u32 ents[] ____cacheline_aligned_in_smp;
-};
-#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
-
-#define RPS_NO_CPU 0xffff
-
-extern u32 rps_cpu_mask;
-extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
-
-static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
- u32 hash)
-{
- if (table && hash) {
- unsigned int index = hash & table->mask;
- u32 val = hash & ~rps_cpu_mask;
-
- /* We only give a hint, preemption can change CPU under us */
- val |= raw_smp_processor_id();
-
- if (table->ents[index] != val)
- table->ents[index] = val;
- }
-}
-
#ifdef CONFIG_RFS_ACCEL
bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
u16 filter_id);
#endif
-#endif /* CONFIG_RPS */
-
-/* This structure contains an instance of an RX queue. */
-struct netdev_rx_queue {
-#ifdef CONFIG_RPS
- struct rps_map __rcu *rps_map;
- struct rps_dev_flow_table __rcu *rps_flow_table;
-#endif
- struct kobject kobj;
- struct net_device *dev;
- struct xdp_rxq_info xdp_rxq;
-#ifdef CONFIG_XDP_SOCKETS
- struct xsk_buff_pool *pool;
-#endif
-} ____cacheline_aligned_in_smp;
-
-/*
- * RX queue sysfs structures and functions.
- */
-struct rx_queue_attribute {
- struct attribute attr;
- ssize_t (*show)(struct netdev_rx_queue *queue, char *buf);
- ssize_t (*store)(struct netdev_rx_queue *queue,
- const char *buf, size_t len);
-};
/* XPS map type and offset of the xps map within net_device->xps_maps[]. */
enum xps_map_type {
@@ -856,6 +876,8 @@ enum net_device_path_type {
DEV_PATH_BRIDGE,
DEV_PATH_PPPOE,
DEV_PATH_DSA,
+ DEV_PATH_MTK_WDMA,
+ DEV_PATH_TUN,
};
struct net_device_path {
@@ -868,6 +890,18 @@ struct net_device_path {
u8 h_dest[ETH_ALEN];
} encap;
struct {
+ union {
+ struct in_addr src_v4;
+ struct in6_addr src_v6;
+ };
+ union {
+ struct in_addr dst_v4;
+ struct in6_addr dst_v6;
+ };
+
+ u8 l3_proto;
+ } tun;
+ struct {
enum {
DEV_PATH_BR_VLAN_KEEP,
DEV_PATH_BR_VLAN_TAG,
@@ -881,6 +915,13 @@ struct net_device_path {
int port;
u16 proto;
} dsa;
+ struct {
+ u8 wdma_idx;
+ u8 queue;
+ u16 wcid;
+ u8 bss;
+ u8 amsdu;
+ } mtk_wdma;
};
};
@@ -894,7 +935,7 @@ struct net_device_path_stack {
struct net_device_path_ctx {
const struct net_device *dev;
- const u8 *daddr;
+ u8 daddr[ETH_ALEN];
int num_vlans;
struct {
@@ -904,6 +945,7 @@ struct net_device_path_ctx {
};
enum tc_setup_type {
+ TC_QUERY_CAPS,
TC_SETUP_QDISC_MQPRIO,
TC_SETUP_CLSU32,
TC_SETUP_CLSFLOWER,
@@ -923,6 +965,7 @@ enum tc_setup_type {
TC_SETUP_QDISC_TBF,
TC_SETUP_QDISC_FIFO,
TC_SETUP_QDISC_HTB,
+ TC_SETUP_ACT,
};
/* These structures hold the attributes of bpf state that are being passed
@@ -989,12 +1032,20 @@ struct netdev_bpf {
#ifdef CONFIG_XFRM_OFFLOAD
struct xfrmdev_ops {
- int (*xdo_dev_state_add) (struct xfrm_state *x);
- void (*xdo_dev_state_delete) (struct xfrm_state *x);
- void (*xdo_dev_state_free) (struct xfrm_state *x);
+ int (*xdo_dev_state_add)(struct net_device *dev,
+ struct xfrm_state *x,
+ struct netlink_ext_ack *extack);
+ void (*xdo_dev_state_delete)(struct net_device *dev,
+ struct xfrm_state *x);
+ void (*xdo_dev_state_free)(struct net_device *dev,
+ struct xfrm_state *x);
bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
struct xfrm_state *x);
void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
+ void (*xdo_dev_state_update_stats) (struct xfrm_state *x);
+ int (*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack);
+ void (*xdo_dev_policy_delete) (struct xfrm_policy *x);
+ void (*xdo_dev_policy_free) (struct xfrm_policy *x);
};
#endif
@@ -1006,16 +1057,6 @@ struct dev_ifalias {
struct devlink;
struct tlsdev_ops;
-struct netdev_name_node {
- struct hlist_node hlist;
- struct list_head list;
- struct net_device *dev;
- const char *name;
-};
-
-int netdev_name_node_alt_create(struct net_device *dev, const char *name);
-int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
-
struct netdev_net_notifier {
struct list_head list;
struct notifier_block *nb;
@@ -1086,9 +1127,18 @@ struct netdev_net_notifier {
* Test if Media Access Control address is valid for the device.
*
* int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
- * Called when a user requests an ioctl which can't be handled by
- * the generic interface code. If not defined ioctls return
- * not supported error code.
+ * Old-style ioctl entry point. This is used internally by the
+ * ieee802154 subsystem but is no longer called by the device
+ * ioctl handler.
+ *
+ * int (*ndo_siocbond)(struct net_device *dev, struct ifreq *ifr, int cmd);
+ * Used by the bonding driver for its device specific ioctls:
+ * SIOCBONDENSLAVE, SIOCBONDRELEASE, SIOCBONDSETHWADDR, SIOCBONDCHANGEACTIVE,
+ * SIOCBONDSLAVEINFOQUERY, and SIOCBONDINFOQUERY
+ *
+ * * int (*ndo_eth_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
+ * Called for ethernet specific ioctls: SIOCGMIIPHY, SIOCGMIIREG,
+ * SIOCSMIIREG, SIOCSHWTSTAMP and SIOCGHWTSTAMP.
*
* int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
* Used to set network devices bus interface parameters. This interface
@@ -1239,18 +1289,39 @@ struct netdev_net_notifier {
* int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
* struct net_device *dev,
* const unsigned char *addr, u16 vid, u16 flags,
- * struct netlink_ext_ack *extack);
+ * bool *notified, struct netlink_ext_ack *extack);
* Adds an FDB entry to dev for addr.
+ * Callee shall set *notified to true if it sent any appropriate
+ * notification(s). Otherwise core will send a generic one.
* int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
* struct net_device *dev,
- * const unsigned char *addr, u16 vid)
- * Deletes the FDB entry from dev coresponding to addr.
+ * const unsigned char *addr, u16 vid
+ * bool *notified, struct netlink_ext_ack *extack);
+ * Deletes the FDB entry from dev corresponding to addr.
+ * Callee shall set *notified to true if it sent any appropriate
+ * notification(s). Otherwise core will send a generic one.
+ * int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh, struct net_device *dev,
+ * struct netlink_ext_ack *extack);
* int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
* struct net_device *dev, struct net_device *filter_dev,
* int *idx)
* Used to add FDB entries to dump requests. Implementers should add
* entries to skb and update idx with the number of entries.
*
+ * int (*ndo_mdb_add)(struct net_device *dev, struct nlattr *tb[],
+ * u16 nlmsg_flags, struct netlink_ext_ack *extack);
+ * Adds an MDB entry to dev.
+ * int (*ndo_mdb_del)(struct net_device *dev, struct nlattr *tb[],
+ * struct netlink_ext_ack *extack);
+ * Deletes the MDB entry from dev.
+ * int (*ndo_mdb_del_bulk)(struct net_device *dev, struct nlattr *tb[],
+ * struct netlink_ext_ack *extack);
+ * Bulk deletes MDB entries from dev.
+ * int (*ndo_mdb_dump)(struct net_device *dev, struct sk_buff *skb,
+ * struct netlink_callback *cb);
+ * Dumps MDB entries from dev. The first argument (marker) in the netlink
+ * callback is used by core rtnetlink code.
+ *
* int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
* u16 flags, struct netlink_ext_ack *extack)
* int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
@@ -1295,11 +1366,6 @@ struct netdev_net_notifier {
* TX queue.
* int (*ndo_get_iflink)(const struct net_device *dev);
* Called to get the iflink value of this device.
- * void (*ndo_change_proto_down)(struct net_device *dev,
- * bool proto_down);
- * This function is used to pass protocol port error state information
- * to the switch driver. The switch driver can react to the proto_down
- * by doing a phys down on the associated switch port.
* int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
* This function is used to get egress tunnel information for given skb.
* This is useful for retrieving outer tunnel header parameters while
@@ -1321,17 +1387,16 @@ struct netdev_net_notifier {
* that got dropped are freed/returned via xdp_return_frame().
* Returns negative number, means general error invoking ndo, meaning
* no frames were xmit'ed and core-caller will free all frames.
+ * struct net_device *(*ndo_xdp_get_xmit_slave)(struct net_device *dev,
+ * struct xdp_buff *xdp);
+ * Get the xmit slave of master device based on the xdp_buff.
* int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags);
* This function is used to wake up the softirq, ksoftirqd or kthread
* responsible for sending and/or receiving packets on a specific
* queue id bound to an AF_XDP socket. The flags field specifies if
* only RX, only Tx, or both should be woken up using the flags
* XDP_WAKEUP_RX and XDP_WAKEUP_TX.
- * struct devlink_port *(*ndo_get_devlink_port)(struct net_device *dev);
- * Get devlink port instance associated with a given netdev.
- * Called with a reference on the netdevice and devlink locks only,
- * rtnl_lock is not held.
- * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm *p,
+ * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm_kern *p,
* int cmd);
* Add, change, delete or get information on an IPv4 tunnel.
* struct net_device *(*ndo_get_peer_dev)(struct net_device *dev);
@@ -1339,6 +1404,22 @@ struct netdev_net_notifier {
* The caller must be under RCU read context.
* int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, struct net_device_path *path);
* Get the forwarding path to reach the real device from the HW destination address
+ * ktime_t (*ndo_get_tstamp)(struct net_device *dev,
+ * const struct skb_shared_hwtstamps *hwtstamps,
+ * bool cycles);
+ * Get hardware timestamp based on normal/adjustable time or free running
+ * cycle counter. This function is required if physical clock supports a
+ * free running cycle counter.
+ *
+ * int (*ndo_hwtstamp_get)(struct net_device *dev,
+ * struct kernel_hwtstamp_config *kernel_config);
+ * Get the currently configured hardware timestamping parameters for the
+ * NIC device.
+ *
+ * int (*ndo_hwtstamp_set)(struct net_device *dev,
+ * struct kernel_hwtstamp_config *kernel_config,
+ * struct netlink_ext_ack *extack);
+ * Change the hardware timestamping parameters for NIC device.
*/
struct net_device_ops {
int (*ndo_init)(struct net_device *dev);
@@ -1361,6 +1442,15 @@ struct net_device_ops {
int (*ndo_validate_addr)(struct net_device *dev);
int (*ndo_do_ioctl)(struct net_device *dev,
struct ifreq *ifr, int cmd);
+ int (*ndo_eth_ioctl)(struct net_device *dev,
+ struct ifreq *ifr, int cmd);
+ int (*ndo_siocbond)(struct net_device *dev,
+ struct ifreq *ifr, int cmd);
+ int (*ndo_siocwandev)(struct net_device *dev,
+ struct if_settings *ifs);
+ int (*ndo_siocdevprivate)(struct net_device *dev,
+ struct ifreq *ifr,
+ void __user *data, int cmd);
int (*ndo_set_config)(struct net_device *dev,
struct ifmap *map);
int (*ndo_change_mtu)(struct net_device *dev,
@@ -1384,8 +1474,7 @@ struct net_device_ops {
__be16 proto, u16 vid);
#ifdef CONFIG_NET_POLL_CONTROLLER
void (*ndo_poll_controller)(struct net_device *dev);
- int (*ndo_netpoll_setup)(struct net_device *dev,
- struct netpoll_info *info);
+ int (*ndo_netpoll_setup)(struct net_device *dev);
void (*ndo_netpoll_cleanup)(struct net_device *dev);
#endif
int (*ndo_set_vf_mac)(struct net_device *dev,
@@ -1482,12 +1571,18 @@ struct net_device_ops {
const unsigned char *addr,
u16 vid,
u16 flags,
+ bool *notified,
struct netlink_ext_ack *extack);
int (*ndo_fdb_del)(struct ndmsg *ndm,
struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr,
- u16 vid);
+ u16 vid,
+ bool *notified,
+ struct netlink_ext_ack *extack);
+ int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh,
+ struct net_device *dev,
+ struct netlink_ext_ack *extack);
int (*ndo_fdb_dump)(struct sk_buff *skb,
struct netlink_callback *cb,
struct net_device *dev,
@@ -1499,6 +1594,23 @@ struct net_device_ops {
const unsigned char *addr,
u16 vid, u32 portid, u32 seq,
struct netlink_ext_ack *extack);
+ int (*ndo_mdb_add)(struct net_device *dev,
+ struct nlattr *tb[],
+ u16 nlmsg_flags,
+ struct netlink_ext_ack *extack);
+ int (*ndo_mdb_del)(struct net_device *dev,
+ struct nlattr *tb[],
+ struct netlink_ext_ack *extack);
+ int (*ndo_mdb_del_bulk)(struct net_device *dev,
+ struct nlattr *tb[],
+ struct netlink_ext_ack *extack);
+ int (*ndo_mdb_dump)(struct net_device *dev,
+ struct sk_buff *skb,
+ struct netlink_callback *cb);
+ int (*ndo_mdb_get)(struct net_device *dev,
+ struct nlattr *tb[], u32 portid,
+ u32 seq,
+ struct netlink_ext_ack *extack);
int (*ndo_bridge_setlink)(struct net_device *dev,
struct nlmsghdr *nlh,
u16 flags,
@@ -1528,8 +1640,6 @@ struct net_device_ops {
int queue_index,
u32 maxrate);
int (*ndo_get_iflink)(const struct net_device *dev);
- int (*ndo_change_proto_down)(struct net_device *dev,
- bool proto_down);
int (*ndo_fill_metadata_dst)(struct net_device *dev,
struct sk_buff *skb);
void (*ndo_set_rx_headroom)(struct net_device *dev,
@@ -1539,14 +1649,32 @@ struct net_device_ops {
int (*ndo_xdp_xmit)(struct net_device *dev, int n,
struct xdp_frame **xdp,
u32 flags);
+ struct net_device * (*ndo_xdp_get_xmit_slave)(struct net_device *dev,
+ struct xdp_buff *xdp);
int (*ndo_xsk_wakeup)(struct net_device *dev,
u32 queue_id, u32 flags);
- struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev);
int (*ndo_tunnel_ctl)(struct net_device *dev,
- struct ip_tunnel_parm *p, int cmd);
+ struct ip_tunnel_parm_kern *p,
+ int cmd);
struct net_device * (*ndo_get_peer_dev)(struct net_device *dev);
int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx,
struct net_device_path *path);
+ ktime_t (*ndo_get_tstamp)(struct net_device *dev,
+ const struct skb_shared_hwtstamps *hwtstamps,
+ bool cycles);
+ int (*ndo_hwtstamp_get)(struct net_device *dev,
+ struct kernel_hwtstamp_config *kernel_config);
+ int (*ndo_hwtstamp_set)(struct net_device *dev,
+ struct kernel_hwtstamp_config *kernel_config,
+ struct netlink_ext_ack *extack);
+
+#if IS_ENABLED(CONFIG_NET_SHAPER)
+ /**
+ * @net_shaper_ops: Device shaping offload operations
+ * see include/net/net_shapers.h
+ */
+ const struct net_shaper_ops *net_shaper_ops;
+#endif
};
/**
@@ -1557,7 +1685,8 @@ struct net_device_ops {
* userspace; this means that the order of these flags can change
* during any kernel release.
*
- * You should have a pretty good reason to be extending these flags.
+ * You should add bitfield booleans after either net_device::priv_flags
+ * (hotpath) or ::threaded (slowpath) instead of extending these flags.
*
* @IFF_802_1Q_VLAN: 802.1Q VLAN device
* @IFF_EBRIDGE: Ethernet bridging device
@@ -1593,7 +1722,7 @@ struct net_device_ops {
* @IFF_FAILOVER: device is a failover master device
* @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
* @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
- * @IFF_LIVE_RENAME_OK: rename is allowed while device is up and running
+ * @IFF_NO_ADDRCONF: prevent ipv6 addrconf
* @IFF_TX_SKB_NO_LINEAR: device/driver is capable of xmitting frames with
* skb_headlen(skb) == 0 (data starts from frag0)
*/
@@ -1628,49 +1757,32 @@ enum netdev_priv_flags {
IFF_FAILOVER = 1<<27,
IFF_FAILOVER_SLAVE = 1<<28,
IFF_L3MDEV_RX_HANDLER = 1<<29,
- IFF_LIVE_RENAME_OK = 1<<30,
- IFF_TX_SKB_NO_LINEAR = 1<<31,
+ IFF_NO_ADDRCONF = BIT_ULL(30),
+ IFF_TX_SKB_NO_LINEAR = BIT_ULL(31),
};
-#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
-#define IFF_EBRIDGE IFF_EBRIDGE
-#define IFF_BONDING IFF_BONDING
-#define IFF_ISATAP IFF_ISATAP
-#define IFF_WAN_HDLC IFF_WAN_HDLC
-#define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE
-#define IFF_DONT_BRIDGE IFF_DONT_BRIDGE
-#define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL
-#define IFF_MACVLAN_PORT IFF_MACVLAN_PORT
-#define IFF_BRIDGE_PORT IFF_BRIDGE_PORT
-#define IFF_OVS_DATAPATH IFF_OVS_DATAPATH
-#define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING
-#define IFF_UNICAST_FLT IFF_UNICAST_FLT
-#define IFF_TEAM_PORT IFF_TEAM_PORT
-#define IFF_SUPP_NOFCS IFF_SUPP_NOFCS
-#define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
-#define IFF_MACVLAN IFF_MACVLAN
-#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
-#define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER
-#define IFF_NO_QUEUE IFF_NO_QUEUE
-#define IFF_OPENVSWITCH IFF_OPENVSWITCH
-#define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE
-#define IFF_TEAM IFF_TEAM
-#define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED
-#define IFF_PHONY_HEADROOM IFF_PHONY_HEADROOM
-#define IFF_MACSEC IFF_MACSEC
-#define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER
-#define IFF_FAILOVER IFF_FAILOVER
-#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
-#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
-#define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK
-#define IFF_TX_SKB_NO_LINEAR IFF_TX_SKB_NO_LINEAR
-
/* Specifies the type of the struct net_device::ml_priv pointer */
enum netdev_ml_priv_type {
ML_PRIV_NONE,
ML_PRIV_CAN,
};
+enum netdev_stat_type {
+ NETDEV_PCPU_STAT_NONE,
+ NETDEV_PCPU_STAT_LSTATS, /* struct pcpu_lstats */
+ NETDEV_PCPU_STAT_TSTATS, /* struct pcpu_sw_netstats */
+ NETDEV_PCPU_STAT_DSTATS, /* struct pcpu_dstats */
+};
+
+enum netdev_reg_state {
+ NETREG_UNINITIALIZED = 0,
+ NETREG_REGISTERED, /* completed register_netdevice */
+ NETREG_UNREGISTERING, /* called unregister_netdevice */
+ NETREG_UNREGISTERED, /* completed unregister todo */
+ NETREG_RELEASED, /* called free_netdev */
+ NETREG_DUMMY, /* dummy device for NAPI poll */
+};
+
/**
* struct net_device - The DEVICE structure.
*
@@ -1678,6 +1790,13 @@ enum netdev_ml_priv_type {
* data with strictly "high-level" data, and it has to know about
* almost every data structure used in the INET module.
*
+ * @priv_flags: flags invisible to userspace defined as bits, see
+ * enum netdev_priv_flags for the definitions
+ * @lltx: device supports lockless Tx. Deprecated for real HW
+ * drivers. Mainly used by logical interfaces, such as
+ * bonding and tunnels
+ * @netmem_tx: device support netmem_tx.
+ *
* @name: This is the first field of the "visible" part of this structure
* (i.e. as seen by users in the "Space.c" file). It is the name
* of the interface.
@@ -1719,22 +1838,19 @@ enum netdev_ml_priv_type {
* @stats: Statistics struct, which was left as a legacy, use
* rtnl_link_stats64 instead
*
- * @rx_dropped: Dropped packets by core network,
+ * @core_stats: core networking counters,
* do not use this in drivers
- * @tx_dropped: Dropped packets by core network,
- * do not use this in drivers
- * @rx_nohandler: nohandler dropped packets by core network on
- * inactive devices, do not use this in drivers
* @carrier_up_count: Number of times the carrier has been up
* @carrier_down_count: Number of times the carrier has been down
*
* @wireless_handlers: List of functions to handle Wireless Extensions,
* instead of ioctl,
* see <net/iw_handler.h> for details.
- * @wireless_data: Instance data managed by the core of wireless extensions
*
* @netdev_ops: Includes several pointers to callbacks,
* if one wants to override the ndo_*() functions
+ * @xdp_metadata_ops: Includes pointers to XDP metadata callbacks.
+ * @xsk_tx_metadata_ops: Includes pointers to AF_XDP TX metadata callbacks.
* @ethtool_ops: Management operations
* @l3mdev_ops: Layer 3 master device operations
* @ndisc_ops: Includes callbacks for different IPv6 neighbour
@@ -1745,10 +1861,10 @@ enum netdev_ml_priv_type {
* of Layer 2 headers.
*
* @flags: Interface flags (a la BSD)
- * @priv_flags: Like 'flags' but invisible to userspace,
- * see if.h for the definitions
+ * @xdp_features: XDP capability supported by the device
* @gflags: Global flags ( kept as legacy )
- * @padded: How much padding added by alloc_netdev()
+ * @priv_len: Size of the ->priv flexible array
+ * @priv: Flexible array containing private data
* @operstate: RFC2863 operstate
* @link_mode: Mapping policy to operstate
* @if_port: Selectable AUI, TP, ...
@@ -1773,6 +1889,7 @@ enum netdev_ml_priv_type {
* @addr_len: Hardware address length
* @upper_level: Maximum depth level of upper devices.
* @lower_level: Maximum depth level of lower devices.
+ * @threaded: napi threaded state.
* @neigh_priv_len: Used in neigh_alloc()
* @dev_id: Used to differentiate devices that share
* the same link layer address
@@ -1798,13 +1915,14 @@ enum netdev_ml_priv_type {
* @tipc_ptr: TIPC specific data
* @atalk_ptr: AppleTalk link
* @ip_ptr: IPv4 specific data
- * @dn_ptr: DECnet specific data
* @ip6_ptr: IPv6 specific data
* @ax25_ptr: AX.25 specific data
* @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
* @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network
* device struct
* @mpls_ptr: mpls_dev struct pointer
+ * @mctp_ptr: MCTP specific data
+ * @psp_dev: PSP crypto device registered for this netdev
*
* @dev_addr: Hw address (before bcast,
* because most packets are unicast)
@@ -1814,14 +1932,10 @@ enum netdev_ml_priv_type {
* allocated at register_netdev() time
* @real_num_rx_queues: Number of RX queues currently active in device
* @xdp_prog: XDP sockets filter program pointer
- * @gro_flush_timeout: timeout for GRO layer in NAPI
- * @napi_defer_hard_irqs: If not zero, provides a counter that would
- * allow to avoid NIC hard IRQ, on busy queues.
*
* @rx_handler: handler for received packets
* @rx_handler_data: XXX: need comments on this one
- * @miniq_ingress: ingress/clsact qdisc specific data for
- * ingress processing
+ * @tcx_ingress: BPF & clsact qdisc specific data for ingress processing
* @ingress_queue: XXX: need comments on this one
* @nf_hooks_ingress: netfilter hooks executed for ingress packets
* @broadcast: hw bcast address
@@ -1842,8 +1956,8 @@ enum netdev_ml_priv_type {
* @xps_maps: all CPUs/RXQs maps for XPS device
*
* @xps_maps: XXX: need comments on this one
- * @miniq_egress: clsact qdisc specific data for
- * egress processing
+ * @tcx_egress: BPF & clsact qdisc specific data for egress processing
+ * @nf_hooks_egress: netfilter hooks executed for egress packets
* @qdisc_hash: qdisc hash table
* @watchdog_timeo: Represents the timeout that is used by
* the watchdog (see dev_watchdog())
@@ -1852,39 +1966,50 @@ enum netdev_ml_priv_type {
* @proto_down_reason: reason a netdev interface is held down
* @pcpu_refcnt: Number of references to this device
* @dev_refcnt: Number of references to this device
+ * @refcnt_tracker: Tracker directory for tracked references to this device
* @todo_list: Delayed register/unregister
* @link_watch_list: XXX: need comments on this one
*
* @reg_state: Register/unregister state machine
* @dismantle: Device is going to be freed
- * @rtnl_link_state: This enum represents the phases of creating
- * a new link
- *
* @needs_free_netdev: Should unregister perform free_netdev?
* @priv_destructor: Called from unregister
* @npinfo: XXX: need comments on this one
* @nd_net: Network namespace this network device is inside
+ * protected by @lock
*
* @ml_priv: Mid-layer private
* @ml_priv_type: Mid-layer private type
- * @lstats: Loopback statistics
- * @tstats: Tunnel statistics
- * @dstats: Dummy statistics
- * @vstats: Virtual ethernet statistics
+ *
+ * @pcpu_stat_type: Type of device statistics which the core should
+ * allocate/free: none, lstats, tstats, dstats. none
+ * means the driver is handling statistics allocation/
+ * freeing internally.
+ * @lstats: Loopback statistics: packets, bytes
+ * @tstats: Tunnel statistics: RX/TX packets, RX/TX bytes
+ * @dstats: Dummy statistics: RX/TX/drop packets, RX/TX bytes
*
* @garp_port: GARP
* @mrp_port: MRP
*
+ * @dm_private: Drop monitor private
+ *
* @dev: Class/net/name entry
* @sysfs_groups: Space for optional device, statistics and wireless
* sysfs groups
*
* @sysfs_rx_queue_group: Space for optional per-rx queue attributes
* @rtnl_link_ops: Rtnl_link_ops
+ * @stat_ops: Optional ops for queue-aware statistics
+ * @queue_mgmt_ops: Optional ops for queue management
*
* @gso_max_size: Maximum size of generic segmentation offload
+ * @tso_max_size: Device (as in HW) limit on the max TSO request size
* @gso_max_segs: Maximum number of segments that can be passed to the
* NIC for GSO
+ * @tso_max_segs: Device (as in HW) limit on the max TSO segment count
+ * @gso_ipv4_max_size: Maximum size of generic segmentation offload,
+ * for IPv4.
*
* @dcbnl_ops: Data Center Bridging netlink ops
* @num_tc: Number of traffic classes in the net device
@@ -1894,20 +2019,33 @@ enum netdev_ml_priv_type {
* @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp
*
* @priomap: XXX: need comments on this one
+ * @link_topo: Physical link topology tracking attached PHYs
* @phydev: Physical device may attach itself
* for hardware timestamping
* @sfp_bus: attached &struct sfp_bus structure.
*
* @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
- * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
*
* @proto_down: protocol port state information can be sent to the
* switch driver and used to set the phys state of the
* switch port.
*
- * @wol_enabled: Wake-on-LAN is enabled
+ * @irq_affinity_auto: driver wants the core to store and re-assign the IRQ
+ * affinity. Set by netif_enable_irq_affinity(), then
+ * the driver must create a persistent napi by
+ * netif_napi_add_config() and finally bind the napi to
+ * IRQ (via netif_napi_set_irq()).
*
- * @threaded: napi threaded mode is enabled
+ * @rx_cpu_rmap_auto: driver wants the core to manage the ARFS rmap.
+ * Set by calling netif_enable_cpu_rmap().
+ *
+ * @see_all_hwtstamp_requests: device wants to see calls to
+ * ndo_hwtstamp_set() for all timestamp requests
+ * regardless of source, even if those aren't
+ * HWTSTAMP_SOURCE_NETDEV
+ * @change_proto_down: device supports setting carrier via IFLA_PROTO_DOWN
+ * @netns_immutable: interface can't change network namespaces
+ * @fcoe_mtu: device supports maximum FCoE MTU, 2158 bytes
*
* @net_notifier_list: List of per-net netdev notifier block
* that follow this device when it is moved
@@ -1918,18 +2056,127 @@ enum netdev_ml_priv_type {
* @udp_tunnel_nic_info: static structure describing the UDP tunnel
* offload capabilities of the device
* @udp_tunnel_nic: UDP tunnel offload state
+ * @ethtool: ethtool related state
* @xdp_state: stores info on attached XDP BPF programs
*
- * @nested_level: Used as as a parameter of spin_lock_nested() of
+ * @nested_level: Used as a parameter of spin_lock_nested() of
* dev->addr_list_lock.
* @unlink_list: As netif_addr_lock() can be called recursively,
* keep a list of interfaces to be deleted.
+ * @gro_max_size: Maximum size of aggregated packet in generic
+ * receive offload (GRO)
+ * @gro_ipv4_max_size: Maximum size of aggregated packet in generic
+ * receive offload (GRO), for IPv4.
+ * @xdp_zc_max_segs: Maximum number of segments supported by AF_XDP
+ * zero copy driver
+ *
+ * @dev_addr_shadow: Copy of @dev_addr to catch direct writes.
+ * @linkwatch_dev_tracker: refcount tracker used by linkwatch.
+ * @watchdog_dev_tracker: refcount tracker used by watchdog.
+ * @dev_registered_tracker: tracker for reference held while
+ * registered
+ * @offload_xstats_l3: L3 HW stats for this netdevice.
+ *
+ * @devlink_port: Pointer to related devlink port structure.
+ * Assigned by a driver before netdev registration using
+ * SET_NETDEV_DEVLINK_PORT macro. This pointer is static
+ * during the time netdevice is registered.
+ *
+ * @dpll_pin: Pointer to the SyncE source pin of a DPLL subsystem,
+ * where the clock is recovered.
+ *
+ * @max_pacing_offload_horizon: max EDT offload horizon in nsec.
+ * @napi_config: An array of napi_config structures containing per-NAPI
+ * settings.
+ * @num_napi_configs: number of allocated NAPI config structs,
+ * always >= max(num_rx_queues, num_tx_queues).
+ * @gro_flush_timeout: timeout for GRO layer in NAPI
+ * @napi_defer_hard_irqs: If not zero, provides a counter that would
+ * allow to avoid NIC hard IRQ, on busy queues.
+ *
+ * @neighbours: List heads pointing to this device's neighbours'
+ * dev_list, one per address-family.
+ * @hwprov: Tracks which PTP performs hardware packet time stamping.
*
* FIXME: cleanup struct net_device such that network protocol info
* moves out.
*/
struct net_device {
+ /* Cacheline organization can be found documented in
+ * Documentation/networking/net_cachelines/net_device.rst.
+ * Please update the document when adding new fields.
+ */
+
+ /* TX read-mostly hotpath */
+ __cacheline_group_begin(net_device_read_tx);
+ struct_group(priv_flags_fast,
+ unsigned long priv_flags:32;
+ unsigned long lltx:1;
+ unsigned long netmem_tx:1;
+ );
+ const struct net_device_ops *netdev_ops;
+ const struct header_ops *header_ops;
+ struct netdev_queue *_tx;
+ netdev_features_t gso_partial_features;
+ unsigned int real_num_tx_queues;
+ unsigned int gso_max_size;
+ unsigned int gso_ipv4_max_size;
+ u16 gso_max_segs;
+ s16 num_tc;
+ /* Note : dev->mtu is often read without holding a lock.
+ * Writers usually hold RTNL.
+ * It is recommended to use READ_ONCE() to annotate the reads,
+ * and to use WRITE_ONCE() to annotate the writes.
+ */
+ unsigned int mtu;
+ unsigned short needed_headroom;
+ struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
+#ifdef CONFIG_XPS
+ struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX];
+#endif
+#ifdef CONFIG_NETFILTER_EGRESS
+ struct nf_hook_entries __rcu *nf_hooks_egress;
+#endif
+#ifdef CONFIG_NET_XGRESS
+ struct bpf_mprog_entry __rcu *tcx_egress;
+#endif
+ __cacheline_group_end(net_device_read_tx);
+
+ /* TXRX read-mostly hotpath */
+ __cacheline_group_begin(net_device_read_txrx);
+ union {
+ struct pcpu_lstats __percpu *lstats;
+ struct pcpu_sw_netstats __percpu *tstats;
+ struct pcpu_dstats __percpu *dstats;
+ };
+ unsigned long state;
+ unsigned int flags;
+ unsigned short hard_header_len;
+ netdev_features_t features;
+ struct inet6_dev __rcu *ip6_ptr;
+ __cacheline_group_end(net_device_read_txrx);
+
+ /* RX read-mostly hotpath */
+ __cacheline_group_begin(net_device_read_rx);
+ struct bpf_prog __rcu *xdp_prog;
+ struct list_head ptype_specific;
+ int ifindex;
+ unsigned int real_num_rx_queues;
+ struct netdev_rx_queue *_rx;
+ unsigned int gro_max_size;
+ unsigned int gro_ipv4_max_size;
+ rx_handler_func_t __rcu *rx_handler;
+ void __rcu *rx_handler_data;
+ possible_net_t nd_net;
+#ifdef CONFIG_NETPOLL
+ struct netpoll_info __rcu *npinfo;
+#endif
+#ifdef CONFIG_NET_XGRESS
+ struct bpf_mprog_entry __rcu *tcx_ingress;
+#endif
+ __cacheline_group_end(net_device_read_rx);
+
char name[IFNAMSIZ];
struct netdev_name_node *name_node;
struct dev_ifalias __rcu *ifalias;
@@ -1947,14 +2194,12 @@ struct net_device {
* part of the usual set specified in Space.c.
*/
- unsigned long state;
struct list_head dev_list;
struct list_head napi_list;
struct list_head unreg_list;
struct list_head close_list;
struct list_head ptype_all;
- struct list_head ptype_specific;
struct {
struct list_head upper;
@@ -1962,29 +2207,18 @@ struct net_device {
} adj_list;
/* Read-mostly cache-line for fast-path access */
- unsigned int flags;
- unsigned int priv_flags;
- const struct net_device_ops *netdev_ops;
- int ifindex;
+ xdp_features_t xdp_features;
+ const struct xdp_metadata_ops *xdp_metadata_ops;
+ const struct xsk_tx_metadata_ops *xsk_tx_metadata_ops;
unsigned short gflags;
- unsigned short hard_header_len;
- /* Note : dev->mtu is often read without holding a lock.
- * Writers usually hold RTNL.
- * It is recommended to use READ_ONCE() to annotate the reads,
- * and to use WRITE_ONCE() to annotate the writes.
- */
- unsigned int mtu;
- unsigned short needed_headroom;
unsigned short needed_tailroom;
- netdev_features_t features;
netdev_features_t hw_features;
netdev_features_t wanted_features;
netdev_features_t vlan_features;
netdev_features_t hw_enc_features;
netdev_features_t mpls_features;
- netdev_features_t gso_partial_features;
unsigned int min_mtu;
unsigned int max_mtu;
@@ -1996,9 +2230,7 @@ struct net_device {
struct net_device_stats stats; /* not used by modern drivers */
- atomic_long_t rx_dropped;
- atomic_long_t tx_dropped;
- atomic_long_t rx_nohandler;
+ struct net_device_core_stats __percpu *core_stats;
/* Stats to monitor link on/off, flapping */
atomic_t carrier_up_count;
@@ -2006,7 +2238,6 @@ struct net_device {
#ifdef CONFIG_WIRELESS_EXT
const struct iw_handler_def *wireless_handlers;
- struct iw_public_data *wireless_data;
#endif
const struct ethtool_ops *ethtool_ops;
#ifdef CONFIG_NET_L3_MASTER_DEV
@@ -2024,9 +2255,7 @@ struct net_device {
const struct tlsdev_ops *tlsdev_ops;
#endif
- const struct header_ops *header_ops;
-
- unsigned char operstate;
+ unsigned int operstate;
unsigned char link_mode;
unsigned char if_port;
@@ -2038,14 +2267,15 @@ struct net_device {
unsigned char addr_len;
unsigned char upper_level;
unsigned char lower_level;
+ u8 threaded;
unsigned short neigh_priv_len;
unsigned short dev_id;
unsigned short dev_port;
- unsigned short padded;
+ int irq;
+ u32 priv_len;
spinlock_t addr_list_lock;
- int irq;
struct netdev_hw_addr_list uc;
struct netdev_hw_addr_list mc;
@@ -2066,6 +2296,9 @@ struct net_device {
/* Protocol-specific pointers */
+ struct in_device __rcu *ip_ptr;
+ /** @fib_nh_head: nexthops associated with this netdev */
+ struct hlist_head fib_nh_head;
#if IS_ENABLED(CONFIG_VLAN_8021Q)
struct vlan_info __rcu *vlan_info;
@@ -2076,42 +2309,41 @@ struct net_device {
#if IS_ENABLED(CONFIG_TIPC)
struct tipc_bearer __rcu *tipc_ptr;
#endif
-#if IS_ENABLED(CONFIG_IRDA) || IS_ENABLED(CONFIG_ATALK)
+#if IS_ENABLED(CONFIG_ATALK)
void *atalk_ptr;
#endif
- struct in_device __rcu *ip_ptr;
-#if IS_ENABLED(CONFIG_DECNET)
- struct dn_dev __rcu *dn_ptr;
-#endif
- struct inet6_dev __rcu *ip6_ptr;
#if IS_ENABLED(CONFIG_AX25)
- void *ax25_ptr;
+ struct ax25_dev __rcu *ax25_ptr;
#endif
+#if IS_ENABLED(CONFIG_CFG80211)
struct wireless_dev *ieee80211_ptr;
+#endif
+#if IS_ENABLED(CONFIG_IEEE802154) || IS_ENABLED(CONFIG_6LOWPAN)
struct wpan_dev *ieee802154_ptr;
+#endif
#if IS_ENABLED(CONFIG_MPLS_ROUTING)
struct mpls_dev __rcu *mpls_ptr;
#endif
+#if IS_ENABLED(CONFIG_MCTP)
+ struct mctp_dev __rcu *mctp_ptr;
+#endif
+#if IS_ENABLED(CONFIG_INET_PSP)
+ struct psp_dev __rcu *psp_dev;
+#endif
/*
* Cache lines mostly used on receive path (including eth_type_trans())
*/
/* Interface address info used in eth_type_trans() */
- unsigned char *dev_addr;
+ const unsigned char *dev_addr;
- struct netdev_rx_queue *_rx;
unsigned int num_rx_queues;
- unsigned int real_num_rx_queues;
-
- struct bpf_prog __rcu *xdp_prog;
- unsigned long gro_flush_timeout;
- int napi_defer_hard_irqs;
- rx_handler_func_t __rcu *rx_handler;
- void __rcu *rx_handler_data;
-
-#ifdef CONFIG_NET_CLS_ACT
- struct mini_Qdisc __rcu *miniq_ingress;
-#endif
+#define GRO_LEGACY_MAX_SIZE 65536u
+/* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE),
+ * and shinfo->gso_segs is a 16bit field.
+ */
+#define GRO_MAX_SIZE (8 * 65535u)
+ unsigned int xdp_zc_max_segs;
struct netdev_queue __rcu *ingress_queue;
#ifdef CONFIG_NETFILTER_INGRESS
struct nf_hook_entries __rcu *nf_hooks_ingress;
@@ -2126,22 +2358,13 @@ struct net_device {
/*
* Cache lines mostly used on transmit path
*/
- struct netdev_queue *_tx ____cacheline_aligned_in_smp;
unsigned int num_tx_queues;
- unsigned int real_num_tx_queues;
- struct Qdisc *qdisc;
+ struct Qdisc __rcu *qdisc;
unsigned int tx_queue_len;
spinlock_t tx_global_lock;
struct xdp_dev_bulk_queue __percpu *xdp_bulkq;
-#ifdef CONFIG_XPS
- struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX];
-#endif
-#ifdef CONFIG_NET_CLS_ACT
- struct mini_Qdisc __rcu *miniq_egress;
-#endif
-
#ifdef CONFIG_NET_SCHED
DECLARE_HASHTABLE (qdisc_hash, 4);
#endif
@@ -2158,42 +2381,27 @@ struct net_device {
#else
refcount_t dev_refcnt;
#endif
+ struct ref_tracker_dir refcnt_tracker;
struct list_head link_watch_list;
- enum { NETREG_UNINITIALIZED=0,
- NETREG_REGISTERED, /* completed register_netdevice */
- NETREG_UNREGISTERING, /* called unregister_netdevice */
- NETREG_UNREGISTERED, /* completed unregister todo */
- NETREG_RELEASED, /* called free_netdev */
- NETREG_DUMMY, /* dummy device for NAPI poll */
- } reg_state:8;
+ u8 reg_state;
bool dismantle;
- enum {
- RTNL_LINK_INITIALIZED,
- RTNL_LINK_INITIALIZING,
- } rtnl_link_state:16;
+ /** @moving_ns: device is changing netns, protected by @lock */
+ bool moving_ns;
+ /** @rtnl_link_initializing: Device being created, suppress events */
+ bool rtnl_link_initializing;
bool needs_free_netdev;
void (*priv_destructor)(struct net_device *dev);
-#ifdef CONFIG_NETPOLL
- struct netpoll_info __rcu *npinfo;
-#endif
-
- possible_net_t nd_net;
-
/* mid-layer private */
void *ml_priv;
enum netdev_ml_priv_type ml_priv_type;
- union {
- struct pcpu_lstats __percpu *lstats;
- struct pcpu_sw_netstats __percpu *tstats;
- struct pcpu_dstats __percpu *dstats;
- };
+ enum netdev_stat_type pcpu_stat_type:8;
#if IS_ENABLED(CONFIG_GARP)
struct garp_port __rcu *garp_port;
@@ -2201,24 +2409,36 @@ struct net_device {
#if IS_ENABLED(CONFIG_MRP)
struct mrp_port __rcu *mrp_port;
#endif
-
+#if IS_ENABLED(CONFIG_NET_DROP_MONITOR)
+ struct dm_hw_stat_delta __rcu *dm_private;
+#endif
struct device dev;
- const struct attribute_group *sysfs_groups[4];
+ const struct attribute_group *sysfs_groups[5];
const struct attribute_group *sysfs_rx_queue_group;
const struct rtnl_link_ops *rtnl_link_ops;
+ const struct netdev_stat_ops *stat_ops;
+
+ const struct netdev_queue_mgmt_ops *queue_mgmt_ops;
+
/* for setting kernel sock attribute on TCP connection setup */
-#define GSO_MAX_SIZE 65536
- unsigned int gso_max_size;
-#define GSO_MAX_SEGS 65535
- u16 gso_max_segs;
+#define GSO_MAX_SEGS 65535u
+#define GSO_LEGACY_MAX_SIZE 65536u
+/* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE),
+ * and shinfo->gso_segs is a 16bit field.
+ */
+#define GSO_MAX_SIZE (8 * GSO_MAX_SEGS)
+
+#define TSO_LEGACY_MAX_SIZE 65536
+#define TSO_MAX_SIZE UINT_MAX
+ unsigned int tso_max_size;
+#define TSO_MAX_SEGS U16_MAX
+ u16 tso_max_segs;
#ifdef CONFIG_DCB
const struct dcbnl_rtnl_ops *dcbnl_ops;
#endif
- s16 num_tc;
- struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
u8 prio_tc_map[TC_BITMASK + 1];
#if IS_ENABLED(CONFIG_FCOE)
@@ -2227,13 +2447,19 @@ struct net_device {
#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
struct netprio_map __rcu *priomap;
#endif
+ struct phy_link_topology *link_topo;
struct phy_device *phydev;
struct sfp_bus *sfp_bus;
struct lock_class_key *qdisc_tx_busylock;
- struct lock_class_key *qdisc_running_key;
bool proto_down;
- unsigned wol_enabled:1;
- unsigned threaded:1;
+ bool irq_affinity_auto;
+ bool rx_cpu_rmap_auto;
+
+ /* priv_flags_slow, ungrouped to save space */
+ unsigned long see_all_hwtstamp_requests:1;
+ unsigned long change_proto_down:1;
+ unsigned long netns_immutable:1;
+ unsigned long fcoe_mtu:1;
struct list_head net_notifier_list;
@@ -2244,11 +2470,127 @@ struct net_device {
const struct udp_tunnel_nic_info *udp_tunnel_nic_info;
struct udp_tunnel_nic *udp_tunnel_nic;
+ /** @cfg: net_device queue-related configuration */
+ struct netdev_config *cfg;
+ /**
+ * @cfg_pending: same as @cfg but when device is being actively
+ * reconfigured includes any changes to the configuration
+ * requested by the user, but which may or may not be rejected.
+ */
+ struct netdev_config *cfg_pending;
+ struct ethtool_netdev_state *ethtool;
+
/* protected by rtnl_lock */
struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE];
-};
+
+ u8 dev_addr_shadow[MAX_ADDR_LEN];
+ netdevice_tracker linkwatch_dev_tracker;
+ netdevice_tracker watchdog_dev_tracker;
+ netdevice_tracker dev_registered_tracker;
+ struct rtnl_hw_stats64 *offload_xstats_l3;
+
+ struct devlink_port *devlink_port;
+
+#if IS_ENABLED(CONFIG_DPLL)
+ struct dpll_pin __rcu *dpll_pin;
+#endif
+#if IS_ENABLED(CONFIG_PAGE_POOL)
+ /** @page_pools: page pools created for this netdevice */
+ struct hlist_head page_pools;
+#endif
+
+ /** @irq_moder: dim parameters used if IS_ENABLED(CONFIG_DIMLIB). */
+ struct dim_irq_moder *irq_moder;
+
+ u64 max_pacing_offload_horizon;
+ struct napi_config *napi_config;
+ u32 num_napi_configs;
+ u32 napi_defer_hard_irqs;
+ unsigned long gro_flush_timeout;
+
+ /**
+ * @up: copy of @state's IFF_UP, but safe to read with just @lock.
+ * May report false negatives while the device is being opened
+ * or closed (@lock does not protect .ndo_open, or .ndo_close).
+ */
+ bool up;
+
+ /**
+ * @request_ops_lock: request the core to run all @netdev_ops and
+ * @ethtool_ops under the @lock.
+ */
+ bool request_ops_lock;
+
+ /**
+ * @lock: netdev-scope lock, protects a small selection of fields.
+ * Should always be taken using netdev_lock() / netdev_unlock() helpers.
+ * Drivers are free to use it for other protection.
+ *
+ * For the drivers that implement shaper or queue API, the scope
+ * of this lock is expanded to cover most ndo/queue/ethtool/sysfs
+ * operations. Drivers may opt-in to this behavior by setting
+ * @request_ops_lock.
+ *
+ * @lock protection mixes with rtnl_lock in multiple ways, fields are
+ * either:
+ *
+ * - simply protected by the instance @lock;
+ *
+ * - double protected - writers hold both locks, readers hold either;
+ *
+ * - ops protected - protected by the lock held around the NDOs
+ * and other callbacks, that is the instance lock on devices for
+ * which netdev_need_ops_lock() returns true, otherwise by rtnl_lock;
+ *
+ * - double ops protected - always protected by rtnl_lock but for
+ * devices for which netdev_need_ops_lock() returns true - also
+ * the instance lock.
+ *
+ * Simply protects:
+ * @gro_flush_timeout, @napi_defer_hard_irqs, @napi_list,
+ * @net_shaper_hierarchy, @reg_state, @threaded
+ *
+ * Double protects:
+ * @up, @moving_ns, @nd_net, @xdp_features
+ *
+ * Double ops protects:
+ * @real_num_rx_queues, @real_num_tx_queues
+ *
+ * Also protects some fields in:
+ * struct napi_struct, struct netdev_queue, struct netdev_rx_queue
+ *
+ * Ordering: take after rtnl_lock.
+ */
+ struct mutex lock;
+
+#if IS_ENABLED(CONFIG_NET_SHAPER)
+ /**
+ * @net_shaper_hierarchy: data tracking the current shaper status
+ * see include/net/net_shapers.h
+ */
+ struct net_shaper_hierarchy *net_shaper_hierarchy;
+#endif
+
+ struct hlist_head neighbours[NEIGH_NR_TABLES];
+
+ struct hwtstamp_provider __rcu *hwprov;
+
+ u8 priv[] ____cacheline_aligned
+ __counted_by(priv_len);
+} ____cacheline_aligned;
#define to_net_dev(d) container_of(d, struct net_device, dev)
+/*
+ * Driver should use this to assign devlink port instance to a netdevice
+ * before it registers the netdevice. Therefore devlink_port is static
+ * during the netdev lifetime after it is registered.
+ */
+#define SET_NETDEV_DEVLINK_PORT(dev, port) \
+({ \
+ WARN_ON((dev)->reg_state != NETREG_UNINITIALIZED); \
+ ((dev)->devlink_port = (port)); \
+})
+
static inline bool netif_elide_gro(const struct net_device *dev)
{
if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog)
@@ -2316,6 +2658,7 @@ static inline
struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
unsigned int index)
{
+ DEBUG_NET_WARN_ON_ONCE(index >= dev->num_tx_queues);
return &dev->_tx[index];
}
@@ -2337,23 +2680,6 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
f(dev, &dev->_tx[i], arg);
}
-#define netdev_lockdep_set_classes(dev) \
-{ \
- static struct lock_class_key qdisc_tx_busylock_key; \
- static struct lock_class_key qdisc_running_key; \
- static struct lock_class_key qdisc_xmit_lock_key; \
- static struct lock_class_key dev_addr_list_lock_key; \
- unsigned int i; \
- \
- (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
- (dev)->qdisc_running_key = &qdisc_running_key; \
- lockdep_set_class(&(dev)->addr_list_lock, \
- &dev_addr_list_lock_key); \
- for (i = 0; i < (dev)->num_tx_queues; i++) \
- lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
- &qdisc_xmit_lock_key); \
-}
-
u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
@@ -2413,6 +2739,12 @@ struct net *dev_net(const struct net_device *dev)
}
static inline
+struct net *dev_net_rcu(const struct net_device *dev)
+{
+ return read_pnet_rcu(&dev->nd_net);
+}
+
+static inline
void dev_net_set(struct net_device *dev, struct net *net)
{
write_pnet(&dev->nd_net, net);
@@ -2426,7 +2758,7 @@ void dev_net_set(struct net_device *dev, struct net *net)
*/
static inline void *netdev_priv(const struct net_device *dev)
{
- return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
+ return (void *)dev->priv;
}
/* Set the sysfs physical device reference for the network logical device
@@ -2440,44 +2772,126 @@ static inline void *netdev_priv(const struct net_device *dev)
*/
#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
+void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index,
+ enum netdev_queue_type type,
+ struct napi_struct *napi);
+
+static inline void netdev_lock(struct net_device *dev)
+{
+ mutex_lock(&dev->lock);
+}
+
+static inline void netdev_unlock(struct net_device *dev)
+{
+ mutex_unlock(&dev->lock);
+}
+/* Additional netdev_lock()-related helpers are in net/netdev_lock.h */
+
+void netif_napi_set_irq_locked(struct napi_struct *napi, int irq);
+
+static inline void netif_napi_set_irq(struct napi_struct *napi, int irq)
+{
+ netdev_lock(napi->dev);
+ netif_napi_set_irq_locked(napi, irq);
+ netdev_unlock(napi->dev);
+}
+
/* Default NAPI poll() weight
* Device drivers are strongly advised to not use bigger value
*/
#define NAPI_POLL_WEIGHT 64
+void netif_napi_add_weight_locked(struct net_device *dev,
+ struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int),
+ int weight);
+
+static inline void
+netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int), int weight)
+{
+ netdev_lock(dev);
+ netif_napi_add_weight_locked(dev, napi, poll, weight);
+ netdev_unlock(dev);
+}
+
/**
- * netif_napi_add - initialize a NAPI context
- * @dev: network device
- * @napi: NAPI context
- * @poll: polling function
- * @weight: default weight
+ * netif_napi_add() - initialize a NAPI context
+ * @dev: network device
+ * @napi: NAPI context
+ * @poll: polling function
*
* netif_napi_add() must be used to initialize a NAPI context prior to calling
* *any* of the other NAPI-related functions.
*/
-void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
- int (*poll)(struct napi_struct *, int), int weight);
+static inline void
+netif_napi_add(struct net_device *dev, struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int))
+{
+ netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
+}
+
+static inline void
+netif_napi_add_locked(struct net_device *dev, struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int))
+{
+ netif_napi_add_weight_locked(dev, napi, poll, NAPI_POLL_WEIGHT);
+}
+
+static inline void
+netif_napi_add_tx_weight(struct net_device *dev,
+ struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int),
+ int weight)
+{
+ set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
+ netif_napi_add_weight(dev, napi, poll, weight);
+}
+
+static inline void
+netif_napi_add_config_locked(struct net_device *dev, struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int), int index)
+{
+ napi->index = index;
+ napi->config = &dev->napi_config[index];
+ netif_napi_add_weight_locked(dev, napi, poll, NAPI_POLL_WEIGHT);
+}
/**
- * netif_tx_napi_add - initialize a NAPI context
- * @dev: network device
- * @napi: NAPI context
- * @poll: polling function
- * @weight: default weight
+ * netif_napi_add_config - initialize a NAPI context with persistent config
+ * @dev: network device
+ * @napi: NAPI context
+ * @poll: polling function
+ * @index: the NAPI index
+ */
+static inline void
+netif_napi_add_config(struct net_device *dev, struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int), int index)
+{
+ netdev_lock(dev);
+ netif_napi_add_config_locked(dev, napi, poll, index);
+ netdev_unlock(dev);
+}
+
+/**
+ * netif_napi_add_tx() - initialize a NAPI context to be used for Tx only
+ * @dev: network device
+ * @napi: NAPI context
+ * @poll: polling function
*
* This variant of netif_napi_add() should be used from drivers using NAPI
* to exclusively poll a TX queue.
* This will avoid we add it into napi_hash[], thus polluting this hash table.
*/
-static inline void netif_tx_napi_add(struct net_device *dev,
+static inline void netif_napi_add_tx(struct net_device *dev,
struct napi_struct *napi,
- int (*poll)(struct napi_struct *, int),
- int weight)
+ int (*poll)(struct napi_struct *, int))
{
- set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
- netif_napi_add(dev, napi, poll, weight);
+ netif_napi_add_tx_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
}
+void __netif_napi_del_locked(struct napi_struct *napi);
+
/**
* __netif_napi_del - remove a NAPI context
* @napi: NAPI context
@@ -2486,7 +2900,18 @@ static inline void netif_tx_napi_add(struct net_device *dev,
* containing @napi. Drivers might want to call this helper to combine
* all the needed RCU grace periods into a single one.
*/
-void __netif_napi_del(struct napi_struct *napi);
+static inline void __netif_napi_del(struct napi_struct *napi)
+{
+ netdev_lock(napi->dev);
+ __netif_napi_del_locked(napi);
+ netdev_unlock(napi->dev);
+}
+
+static inline void netif_napi_del_locked(struct napi_struct *napi)
+{
+ __netif_napi_del_locked(napi);
+ synchronize_net();
+}
/**
* netif_napi_del - remove a NAPI context
@@ -2500,113 +2925,14 @@ static inline void netif_napi_del(struct napi_struct *napi)
synchronize_net();
}
-struct napi_gro_cb {
- /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
- void *frag0;
-
- /* Length of frag0. */
- unsigned int frag0_len;
-
- /* This indicates where we are processing relative to skb->data. */
- int data_offset;
-
- /* This is non-zero if the packet cannot be merged with the new skb. */
- u16 flush;
-
- /* Save the IP ID here and check when we get to the transport layer */
- u16 flush_id;
-
- /* Number of segments aggregated. */
- u16 count;
-
- /* Start offset for remote checksum offload */
- u16 gro_remcsum_start;
-
- /* jiffies when first packet was created/queued */
- unsigned long age;
-
- /* Used in ipv6_gro_receive() and foo-over-udp */
- u16 proto;
-
- /* This is non-zero if the packet may be of the same flow. */
- u8 same_flow:1;
-
- /* Used in tunnel GRO receive */
- u8 encap_mark:1;
-
- /* GRO checksum is valid */
- u8 csum_valid:1;
-
- /* Number of checksums via CHECKSUM_UNNECESSARY */
- u8 csum_cnt:3;
-
- /* Free the skb? */
- u8 free:2;
-#define NAPI_GRO_FREE 1
-#define NAPI_GRO_FREE_STOLEN_HEAD 2
-
- /* Used in foo-over-udp, set in udp[46]_gro_receive */
- u8 is_ipv6:1;
-
- /* Used in GRE, set in fou/gue_gro_receive */
- u8 is_fou:1;
-
- /* Used to determine if flush_id can be ignored */
- u8 is_atomic:1;
-
- /* Number of gro_receive callbacks this packet already went through */
- u8 recursion_counter:4;
-
- /* GRO is done by frag_list pointer chaining. */
- u8 is_flist:1;
-
- /* used to support CHECKSUM_COMPLETE for tunneling protocols */
- __wsum csum;
-
- /* used in skb_gro_receive() slow path */
- struct sk_buff *last;
-};
-
-#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
-
-#define GRO_RECURSION_LIMIT 15
-static inline int gro_recursion_inc_test(struct sk_buff *skb)
-{
- return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
-}
-
-typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
-static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
- struct list_head *head,
- struct sk_buff *skb)
-{
- if (unlikely(gro_recursion_inc_test(skb))) {
- NAPI_GRO_CB(skb)->flush |= 1;
- return NULL;
- }
-
- return cb(head, skb);
-}
-
-typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
- struct sk_buff *);
-static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
- struct sock *sk,
- struct list_head *head,
- struct sk_buff *skb)
-{
- if (unlikely(gro_recursion_inc_test(skb))) {
- NAPI_GRO_CB(skb)->flush |= 1;
- return NULL;
- }
-
- return cb(sk, head, skb);
-}
+int netif_enable_cpu_rmap(struct net_device *dev, unsigned int num_irqs);
+void netif_set_affinity_auto(struct net_device *dev);
struct packet_type {
__be16 type; /* This is really htons(ether_type). */
bool ignore_outgoing;
struct net_device *dev; /* NULL is wildcarded here */
+ netdevice_tracker dev_tracker;
int (*func) (struct sk_buff *,
struct net_device *,
struct packet_type *,
@@ -2616,6 +2942,7 @@ struct packet_type {
struct net_device *);
bool (*id_match)(struct packet_type *ptype,
struct sock *sk);
+ struct net *af_packet_net;
void *af_packet_priv;
struct list_head list;
};
@@ -2637,13 +2964,23 @@ struct packet_offload {
/* often modified stats are per-CPU, other are shared (netdev->stats) */
struct pcpu_sw_netstats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 tx_packets;
- u64 tx_bytes;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
struct u64_stats_sync syncp;
} __aligned(4 * sizeof(u64));
+struct pcpu_dstats {
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
+ u64_stats_t rx_drops;
+ u64_stats_t tx_drops;
+ struct u64_stats_sync syncp;
+} __aligned(8 * sizeof(u64));
+
struct pcpu_lstats {
u64_stats_t packets;
u64_stats_t bytes;
@@ -2657,8 +2994,8 @@ static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int l
struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
u64_stats_update_begin(&tstats->syncp);
- tstats->rx_bytes += len;
- tstats->rx_packets++;
+ u64_stats_add(&tstats->rx_bytes, len);
+ u64_stats_inc(&tstats->rx_packets);
u64_stats_update_end(&tstats->syncp);
}
@@ -2669,8 +3006,8 @@ static inline void dev_sw_netstats_tx_add(struct net_device *dev,
struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
u64_stats_update_begin(&tstats->syncp);
- tstats->tx_bytes += len;
- tstats->tx_packets += packets;
+ u64_stats_add(&tstats->tx_bytes, len);
+ u64_stats_add(&tstats->tx_packets, packets);
u64_stats_update_end(&tstats->syncp);
}
@@ -2684,6 +3021,56 @@ static inline void dev_lstats_add(struct net_device *dev, unsigned int len)
u64_stats_update_end(&lstats->syncp);
}
+static inline void dev_dstats_rx_add(struct net_device *dev,
+ unsigned int len)
+{
+ struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
+ u64_stats_update_begin(&dstats->syncp);
+ u64_stats_inc(&dstats->rx_packets);
+ u64_stats_add(&dstats->rx_bytes, len);
+ u64_stats_update_end(&dstats->syncp);
+}
+
+static inline void dev_dstats_rx_dropped(struct net_device *dev)
+{
+ struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
+ u64_stats_update_begin(&dstats->syncp);
+ u64_stats_inc(&dstats->rx_drops);
+ u64_stats_update_end(&dstats->syncp);
+}
+
+static inline void dev_dstats_rx_dropped_add(struct net_device *dev,
+ unsigned int packets)
+{
+ struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
+ u64_stats_update_begin(&dstats->syncp);
+ u64_stats_add(&dstats->rx_drops, packets);
+ u64_stats_update_end(&dstats->syncp);
+}
+
+static inline void dev_dstats_tx_add(struct net_device *dev,
+ unsigned int len)
+{
+ struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
+ u64_stats_update_begin(&dstats->syncp);
+ u64_stats_inc(&dstats->tx_packets);
+ u64_stats_add(&dstats->tx_bytes, len);
+ u64_stats_update_end(&dstats->syncp);
+}
+
+static inline void dev_dstats_tx_dropped(struct net_device *dev)
+{
+ struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
+ u64_stats_update_begin(&dstats->syncp);
+ u64_stats_inc(&dstats->tx_drops);
+ u64_stats_update_end(&dstats->syncp);
+}
+
#define __netdev_alloc_pcpu_stats(type, gfp) \
({ \
typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
@@ -2772,6 +3159,7 @@ enum netdev_cmd {
NETDEV_PRE_TYPE_CHANGE,
NETDEV_POST_TYPE_CHANGE,
NETDEV_POST_INIT,
+ NETDEV_PRE_UNINIT,
NETDEV_RELEASE,
NETDEV_NOTIFY_PEERS,
NETDEV_JOIN,
@@ -2789,6 +3177,11 @@ enum netdev_cmd {
NETDEV_CVLAN_FILTER_DROP_INFO,
NETDEV_SVLAN_FILTER_PUSH_INFO,
NETDEV_SVLAN_FILTER_DROP_INFO,
+ NETDEV_OFFLOAD_XSTATS_ENABLE,
+ NETDEV_OFFLOAD_XSTATS_DISABLE,
+ NETDEV_OFFLOAD_XSTATS_REPORT_USED,
+ NETDEV_OFFLOAD_XSTATS_REPORT_DELTA,
+ NETDEV_XDP_FEAT_CHANGE,
};
const char *netdev_cmd_to_name(enum netdev_cmd cmd);
@@ -2839,6 +3232,42 @@ struct netdev_notifier_pre_changeaddr_info {
const unsigned char *dev_addr;
};
+enum netdev_offload_xstats_type {
+ NETDEV_OFFLOAD_XSTATS_TYPE_L3 = 1,
+};
+
+struct netdev_notifier_offload_xstats_info {
+ struct netdev_notifier_info info; /* must be first */
+ enum netdev_offload_xstats_type type;
+
+ union {
+ /* NETDEV_OFFLOAD_XSTATS_REPORT_DELTA */
+ struct netdev_notifier_offload_xstats_rd *report_delta;
+ /* NETDEV_OFFLOAD_XSTATS_REPORT_USED */
+ struct netdev_notifier_offload_xstats_ru *report_used;
+ };
+};
+
+int netdev_offload_xstats_enable(struct net_device *dev,
+ enum netdev_offload_xstats_type type,
+ struct netlink_ext_ack *extack);
+int netdev_offload_xstats_disable(struct net_device *dev,
+ enum netdev_offload_xstats_type type);
+bool netdev_offload_xstats_enabled(const struct net_device *dev,
+ enum netdev_offload_xstats_type type);
+int netdev_offload_xstats_get(struct net_device *dev,
+ enum netdev_offload_xstats_type type,
+ struct rtnl_hw_stats64 *stats, bool *used,
+ struct netlink_ext_ack *extack);
+void
+netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *rd,
+ const struct rtnl_hw_stats64 *stats);
+void
+netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *ru);
+void netdev_offload_xstats_push_delta(struct net_device *dev,
+ enum netdev_offload_xstats_type type,
+ const struct rtnl_hw_stats64 *stats);
+
static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
struct net_device *dev)
{
@@ -2859,9 +3288,8 @@ netdev_notifier_info_to_extack(const struct netdev_notifier_info *info)
}
int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
-
-
-extern rwlock_t dev_base_lock; /* Device list lock */
+int call_netdevice_notifiers_info(unsigned long val,
+ struct netdev_notifier_info *info);
#define for_each_netdev(net, d) \
list_for_each_entry(d, &(net)->dev_base_head, dev_list)
@@ -2879,10 +3307,14 @@ extern rwlock_t dev_base_lock; /* Device list lock */
#define for_each_netdev_continue_rcu(net, d) \
list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
#define for_each_netdev_in_bond_rcu(bond, slave) \
- for_each_netdev_rcu(&init_net, slave) \
+ for_each_netdev_rcu(dev_net_rcu(bond), slave) \
if (netdev_master_upper_dev_get_rcu(slave) == (bond))
#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
+#define for_each_netdev_dump(net, d, ifindex) \
+ for (; (d = xa_find(&(net)->dev_by_index, &ifindex, \
+ ULONG_MAX, XA_PRESENT)); ifindex++)
+
static inline struct net_device *next_net_device(struct net_device *dev)
{
struct list_head *lh;
@@ -2909,15 +3341,9 @@ static inline struct net_device *first_net_device(struct net *net)
net_device_entry(net->dev_base_head.next);
}
-static inline struct net_device *first_net_device_rcu(struct net *net)
-{
- struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
-
- return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
-}
-
int netdev_boot_setup_check(struct net_device *dev);
-unsigned long netdev_boot_base(const char *prefix, int unit);
+struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type,
+ const char *hwaddr);
struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
const char *hwaddr);
struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
@@ -2931,26 +3357,36 @@ int dev_get_iflink(const struct net_device *dev);
int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
struct net_device_path_stack *stack);
-struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
- unsigned short mask);
struct net_device *dev_get_by_name(struct net *net, const char *name);
struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
struct net_device *__dev_get_by_name(struct net *net, const char *name);
+bool netdev_name_in_use(struct net *net, const char *name);
int dev_alloc_name(struct net_device *dev, const char *name);
+int netif_open(struct net_device *dev, struct netlink_ext_ack *extack);
int dev_open(struct net_device *dev, struct netlink_ext_ack *extack);
+void netif_close(struct net_device *dev);
void dev_close(struct net_device *dev);
-void dev_close_many(struct list_head *head, bool unlink);
+void netif_close_many(struct list_head *head, bool unlink);
+void netif_disable_lro(struct net_device *dev);
void dev_disable_lro(struct net_device *dev);
int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
-u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev);
-int dev_queue_xmit(struct sk_buff *skb);
-int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
+int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev);
int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
+static inline int dev_queue_xmit(struct sk_buff *skb)
+{
+ return __dev_queue_xmit(skb, NULL);
+}
+
+static inline int dev_queue_xmit_accel(struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ return __dev_queue_xmit(skb, sb_dev);
+}
+
static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
{
int ret;
@@ -2971,8 +3407,6 @@ static inline void unregister_netdevice(struct net_device *dev)
int netdev_refcnt_read(const struct net_device *dev);
void free_netdev(struct net_device *dev);
-void netdev_freemem(struct net_device *dev);
-int init_dummy_netdev(struct net_device *dev);
struct net_device *netdev_get_xmit_slave(struct net_device *dev,
struct sk_buff *skb,
@@ -2981,258 +3415,15 @@ struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
struct sock *sk);
struct net_device *dev_get_by_index(struct net *net, int ifindex);
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
+struct net_device *netdev_get_by_index(struct net *net, int ifindex,
+ netdevice_tracker *tracker, gfp_t gfp);
+struct net_device *netdev_get_by_index_lock(struct net *net, int ifindex);
+struct net_device *netdev_get_by_name(struct net *net, const char *name,
+ netdevice_tracker *tracker, gfp_t gfp);
+struct net_device *netdev_get_by_flags_rcu(struct net *net, netdevice_tracker *tracker,
+ unsigned short flags, unsigned short mask);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
-struct net_device *dev_get_by_napi_id(unsigned int napi_id);
-int netdev_get_name(struct net *net, char *name, int ifindex);
-int dev_restart(struct net_device *dev);
-int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
-int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb);
-
-static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
-{
- return NAPI_GRO_CB(skb)->data_offset;
-}
-
-static inline unsigned int skb_gro_len(const struct sk_buff *skb)
-{
- return skb->len - NAPI_GRO_CB(skb)->data_offset;
-}
-
-static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
-{
- NAPI_GRO_CB(skb)->data_offset += len;
-}
-
-static inline void *skb_gro_header_fast(struct sk_buff *skb,
- unsigned int offset)
-{
- return NAPI_GRO_CB(skb)->frag0 + offset;
-}
-
-static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
-{
- return NAPI_GRO_CB(skb)->frag0_len < hlen;
-}
-
-static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
-{
- NAPI_GRO_CB(skb)->frag0 = NULL;
- NAPI_GRO_CB(skb)->frag0_len = 0;
-}
-
-static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
- unsigned int offset)
-{
- if (!pskb_may_pull(skb, hlen))
- return NULL;
-
- skb_gro_frag0_invalidate(skb);
- return skb->data + offset;
-}
-
-static inline void *skb_gro_network_header(struct sk_buff *skb)
-{
- return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
- skb_network_offset(skb);
-}
-
-static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
- const void *start, unsigned int len)
-{
- if (NAPI_GRO_CB(skb)->csum_valid)
- NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
- csum_partial(start, len, 0));
-}
-
-/* GRO checksum functions. These are logical equivalents of the normal
- * checksum functions (in skbuff.h) except that they operate on the GRO
- * offsets and fields in sk_buff.
- */
-
-__sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
-
-static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
-{
- return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
-}
-
-static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
- bool zero_okay,
- __sum16 check)
-{
- return ((skb->ip_summed != CHECKSUM_PARTIAL ||
- skb_checksum_start_offset(skb) <
- skb_gro_offset(skb)) &&
- !skb_at_gro_remcsum_start(skb) &&
- NAPI_GRO_CB(skb)->csum_cnt == 0 &&
- (!zero_okay || check));
-}
-
-static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
- __wsum psum)
-{
- if (NAPI_GRO_CB(skb)->csum_valid &&
- !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
- return 0;
-
- NAPI_GRO_CB(skb)->csum = psum;
-
- return __skb_gro_checksum_complete(skb);
-}
-
-static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
-{
- if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
- /* Consume a checksum from CHECKSUM_UNNECESSARY */
- NAPI_GRO_CB(skb)->csum_cnt--;
- } else {
- /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
- * verified a new top level checksum or an encapsulated one
- * during GRO. This saves work if we fallback to normal path.
- */
- __skb_incr_checksum_unnecessary(skb);
- }
-}
-
-#define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
- compute_pseudo) \
-({ \
- __sum16 __ret = 0; \
- if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
- __ret = __skb_gro_checksum_validate_complete(skb, \
- compute_pseudo(skb, proto)); \
- if (!__ret) \
- skb_gro_incr_csum_unnecessary(skb); \
- __ret; \
-})
-
-#define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
- __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
-
-#define skb_gro_checksum_validate_zero_check(skb, proto, check, \
- compute_pseudo) \
- __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
-
-#define skb_gro_checksum_simple_validate(skb) \
- __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
-
-static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
-{
- return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
- !NAPI_GRO_CB(skb)->csum_valid);
-}
-
-static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
- __wsum pseudo)
-{
- NAPI_GRO_CB(skb)->csum = ~pseudo;
- NAPI_GRO_CB(skb)->csum_valid = 1;
-}
-
-#define skb_gro_checksum_try_convert(skb, proto, compute_pseudo) \
-do { \
- if (__skb_gro_checksum_convert_check(skb)) \
- __skb_gro_checksum_convert(skb, \
- compute_pseudo(skb, proto)); \
-} while (0)
-
-struct gro_remcsum {
- int offset;
- __wsum delta;
-};
-
-static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
-{
- grc->offset = 0;
- grc->delta = 0;
-}
-
-static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
- unsigned int off, size_t hdrlen,
- int start, int offset,
- struct gro_remcsum *grc,
- bool nopartial)
-{
- __wsum delta;
- size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
-
- BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
-
- if (!nopartial) {
- NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
- return ptr;
- }
-
- ptr = skb_gro_header_fast(skb, off);
- if (skb_gro_header_hard(skb, off + plen)) {
- ptr = skb_gro_header_slow(skb, off + plen, off);
- if (!ptr)
- return NULL;
- }
-
- delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
- start, offset);
-
- /* Adjust skb->csum since we changed the packet */
- NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
-
- grc->offset = off + hdrlen + offset;
- grc->delta = delta;
-
- return ptr;
-}
-
-static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
- struct gro_remcsum *grc)
-{
- void *ptr;
- size_t plen = grc->offset + sizeof(u16);
-
- if (!grc->delta)
- return;
-
- ptr = skb_gro_header_fast(skb, grc->offset);
- if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
- ptr = skb_gro_header_slow(skb, plen, grc->offset);
- if (!ptr)
- return;
- }
-
- remcsum_unadjust((__sum16 *)ptr, grc->delta);
-}
-
-#ifdef CONFIG_XFRM_OFFLOAD
-static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
-{
- if (PTR_ERR(pp) != -EINPROGRESS)
- NAPI_GRO_CB(skb)->flush |= flush;
-}
-static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
- struct sk_buff *pp,
- int flush,
- struct gro_remcsum *grc)
-{
- if (PTR_ERR(pp) != -EINPROGRESS) {
- NAPI_GRO_CB(skb)->flush |= flush;
- skb_gro_remcsum_cleanup(skb, grc);
- skb->remcsum_offload = 0;
- }
-}
-#else
-static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
-{
- NAPI_GRO_CB(skb)->flush |= flush;
-}
-static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
- struct sk_buff *pp,
- int flush,
- struct gro_remcsum *grc)
-{
- NAPI_GRO_CB(skb)->flush |= flush;
- skb_gro_remcsum_cleanup(skb, grc);
- skb->remcsum_offload = 0;
-}
-#endif
+void netdev_copy_name(struct net_device *dev, char *name);
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
@@ -3289,26 +3480,31 @@ static inline bool dev_has_header(const struct net_device *dev)
return dev->header_ops && dev->header_ops->create;
}
-typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr,
- int len, int size);
-int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
-static inline int unregister_gifconf(unsigned int family)
+struct numa_drop_counters {
+ atomic_t drops0 ____cacheline_aligned_in_smp;
+ atomic_t drops1 ____cacheline_aligned_in_smp;
+};
+
+static inline int numa_drop_read(const struct numa_drop_counters *ndc)
{
- return register_gifconf(family, NULL);
+ return atomic_read(&ndc->drops0) + atomic_read(&ndc->drops1);
}
-#ifdef CONFIG_NET_FLOW_LIMIT
-#define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
-struct sd_flow_limit {
- u64 count;
- unsigned int num_buckets;
- unsigned int history_head;
- u16 history[FLOW_LIMIT_HISTORY];
- u8 buckets[];
-};
+static inline void numa_drop_add(struct numa_drop_counters *ndc, int val)
+{
+ int n = numa_node_id() % 2;
-extern int netdev_flow_limit_table_len;
-#endif /* CONFIG_NET_FLOW_LIMIT */
+ if (n)
+ atomic_add(val, &ndc->drops1);
+ else
+ atomic_add(val, &ndc->drops0);
+}
+
+static inline void numa_drop_reset(struct numa_drop_counters *ndc)
+{
+ atomic_set(&ndc->drops0, 0);
+ atomic_set(&ndc->drops1, 0);
+}
/*
* Incoming packets are placed on per-CPU queues
@@ -3316,14 +3512,19 @@ extern int netdev_flow_limit_table_len;
struct softnet_data {
struct list_head poll_list;
struct sk_buff_head process_queue;
+ local_lock_t process_queue_bh_lock;
/* stats */
unsigned int processed;
unsigned int time_squeeze;
- unsigned int received_rps;
#ifdef CONFIG_RPS
struct softnet_data *rps_ipi_list;
#endif
+
+ unsigned int received_rps;
+ bool in_net_rx_action;
+ bool in_napi_threaded_poll;
+
#ifdef CONFIG_NET_FLOW_LIMIT
struct sd_flow_limit __rcu *flow_limit;
#endif
@@ -3334,10 +3535,7 @@ struct softnet_data {
struct sk_buff_head xfrm_backlog;
#endif
/* written and read only by owning cpu: */
- struct {
- u16 recursion;
- u8 more;
- } xmit;
+ struct netdev_xmit xmit;
#ifdef CONFIG_RPS
/* input_queue_head should be written by cpu owning this struct,
* and only read by other cpus. Worth using a cache line.
@@ -3348,52 +3546,45 @@ struct softnet_data {
call_single_data_t csd ____cacheline_aligned_in_smp;
struct softnet_data *rps_ipi_next;
unsigned int cpu;
+
+ /* We force a cacheline alignment from here, to hold together
+ * input_queue_tail, input_pkt_queue and backlog.state.
+ * We add holes so that backlog.state is the last field
+ * of this cache line.
+ */
+ long pad[3] ____cacheline_aligned_in_smp;
unsigned int input_queue_tail;
#endif
- unsigned int dropped;
struct sk_buff_head input_pkt_queue;
- struct napi_struct backlog;
-};
+ struct napi_struct backlog;
-static inline void input_queue_head_incr(struct softnet_data *sd)
-{
-#ifdef CONFIG_RPS
- sd->input_queue_head++;
-#endif
-}
+ struct numa_drop_counters drop_counters;
-static inline void input_queue_tail_incr_save(struct softnet_data *sd,
- unsigned int *qtail)
-{
-#ifdef CONFIG_RPS
- *qtail = ++sd->input_queue_tail;
-#endif
-}
+ int defer_ipi_scheduled ____cacheline_aligned_in_smp;
+ call_single_data_t defer_csd;
+};
DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
+struct page_pool_bh {
+ struct page_pool *pool;
+ local_lock_t bh_lock;
+};
+DECLARE_PER_CPU(struct page_pool_bh, system_page_pool);
+
+#ifndef CONFIG_PREEMPT_RT
static inline int dev_recursion_level(void)
{
return this_cpu_read(softnet_data.xmit.recursion);
}
-
-#define XMIT_RECURSION_LIMIT 8
-static inline bool dev_xmit_recursion(void)
-{
- return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
- XMIT_RECURSION_LIMIT);
-}
-
-static inline void dev_xmit_recursion_inc(void)
+#else
+static inline int dev_recursion_level(void)
{
- __this_cpu_inc(softnet_data.xmit.recursion);
+ return current->net_xmit.recursion;
}
-static inline void dev_xmit_recursion_dec(void)
-{
- __this_cpu_dec(softnet_data.xmit.recursion);
-}
+#endif
void __netif_schedule(struct Qdisc *q);
void netif_schedule_queue(struct netdev_queue *txq);
@@ -3458,6 +3649,13 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev)
static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
{
+ /* Paired with READ_ONCE() from dev_watchdog() */
+ WRITE_ONCE(dev_queue->trans_start, jiffies);
+
+ /* This barrier is paired with smp_mb() from dev_watchdog() */
+ smp_mb__before_atomic();
+
+ /* Must be an atomic op see netif_txq_try_stop() */
set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
}
@@ -3526,6 +3724,16 @@ static inline void netdev_queue_set_dql_min_limit(struct netdev_queue *dev_queue
#endif
}
+static inline int netdev_queue_dql_avail(const struct netdev_queue *txq)
+{
+#ifdef CONFIG_BQL
+ /* Non-BQL migrated drivers will return 0, too. */
+ return dql_avail(&txq->dql);
+#else
+ return 0;
+#endif
+}
+
/**
* netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
* @dev_queue: pointer to transmit queue
@@ -3554,6 +3762,16 @@ static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_qu
#endif
}
+/**
+ * netdev_tx_sent_queue - report the number of bytes queued to a given tx queue
+ * @dev_queue: network device queue
+ * @bytes: number of bytes queued to the device queue
+ *
+ * Report the number of bytes queued for sending/completion to the network
+ * device hardware queue. @bytes should be a good approximation and should
+ * exactly match netdev_completed_queue() @bytes.
+ * This is typically called once per packet, from ndo_start_xmit().
+ */
static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
unsigned int bytes)
{
@@ -3563,6 +3781,12 @@ static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
if (likely(dql_avail(&dev_queue->dql) >= 0))
return;
+ /* Paired with READ_ONCE() from dev_watchdog() */
+ WRITE_ONCE(dev_queue->trans_start, jiffies);
+
+ /* This barrier is paired with smp_mb() from dev_watchdog() */
+ smp_mb__before_atomic();
+
set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
/*
@@ -3570,7 +3794,7 @@ static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
* because in netdev_tx_completed_queue we update the dql_completed
* before checking the XOFF flag.
*/
- smp_mb();
+ smp_mb__after_atomic();
/* check again in case another CPU has just made room avail */
if (unlikely(dql_avail(&dev_queue->dql) >= 0))
@@ -3599,13 +3823,14 @@ static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
}
/**
- * netdev_sent_queue - report the number of bytes queued to hardware
- * @dev: network device
- * @bytes: number of bytes queued to the hardware device queue
+ * netdev_sent_queue - report the number of bytes queued to hardware
+ * @dev: network device
+ * @bytes: number of bytes queued to the hardware device queue
*
- * Report the number of bytes queued for sending/completion to the network
- * device hardware queue. @bytes should be a good approximation and should
- * exactly match netdev_completed_queue() @bytes
+ * Report the number of bytes queued for sending/completion to the network
+ * device hardware queue#0. @bytes should be a good approximation and should
+ * exactly match netdev_completed_queue() @bytes.
+ * This is typically called once per packet, from ndo_start_xmit().
*/
static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
{
@@ -3620,6 +3845,15 @@ static inline bool __netdev_sent_queue(struct net_device *dev,
xmit_more);
}
+/**
+ * netdev_tx_completed_queue - report number of packets/bytes at TX completion.
+ * @dev_queue: network device queue
+ * @pkts: number of packets (currently ignored)
+ * @bytes: number of bytes dequeued from the device queue
+ *
+ * Must be called at most once per TX completion round (and not per
+ * individual packet), so that BQL can adjust its limits appropriately.
+ */
static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
unsigned int pkts, unsigned int bytes)
{
@@ -3630,11 +3864,11 @@ static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
dql_completed(&dev_queue->dql, bytes);
/*
- * Without the memory barrier there is a small possiblity that
+ * Without the memory barrier there is a small possibility that
* netdev_tx_sent_queue will miss the update and cause the queue to
* be stopped forever
*/
- smp_mb();
+ smp_mb(); /* NOTE: netdev_txq_completed_mb() assumes this exists */
if (unlikely(dql_avail(&dev_queue->dql) < 0))
return;
@@ -3669,6 +3903,17 @@ static inline void netdev_tx_reset_queue(struct netdev_queue *q)
}
/**
+ * netdev_tx_reset_subqueue - reset the BQL stats and state of a netdev queue
+ * @dev: network device
+ * @qid: stack index of the queue to reset
+ */
+static inline void netdev_tx_reset_subqueue(const struct net_device *dev,
+ u32 qid)
+{
+ netdev_tx_reset_queue(netdev_get_tx_queue(dev, qid));
+}
+
+/**
* netdev_reset_queue - reset the packets and bytes count of a network device
* @dev_queue: network device
*
@@ -3677,7 +3922,7 @@ static inline void netdev_tx_reset_queue(struct netdev_queue *q)
*/
static inline void netdev_reset_queue(struct net_device *dev_queue)
{
- netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
+ netdev_tx_reset_subqueue(dev_queue, 0);
}
/**
@@ -3815,7 +4060,7 @@ static inline bool netif_attr_test_mask(unsigned long j,
* @online_mask: bitmask for CPUs/Rx queues that are online
* @nr_bits: number of bits in the bitmask
*
- * Returns true if a CPU/Rx queue is online.
+ * Returns: true if a CPU/Rx queue is online.
*/
static inline bool netif_attr_test_online(unsigned long j,
const unsigned long *online_mask,
@@ -3835,7 +4080,8 @@ static inline bool netif_attr_test_online(unsigned long j,
* @srcp: the cpumask/Rx queue mask pointer
* @nr_bits: number of bits in the bitmask
*
- * Returns >= nr_bits if no further CPUs/Rx queues set.
+ * Returns: next (after n) CPU/Rx queue index in the mask;
+ * >= nr_bits if no further CPUs/Rx queues set.
*/
static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
unsigned int nr_bits)
@@ -3857,7 +4103,8 @@ static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
* @src2p: the second CPUs/Rx queues mask pointer
* @nr_bits: number of bits in the bitmask
*
- * Returns >= nr_bits if no further CPUs/Rx queues set in both.
+ * Returns: next (after n) CPU/Rx queue index set in both masks;
+ * >= nr_bits if no further CPUs/Rx queues set in both.
*/
static inline int netif_attrmask_next_and(int n, const unsigned long *src1p,
const unsigned long *src2p,
@@ -3904,51 +4151,19 @@ static inline bool netif_is_multiqueue(const struct net_device *dev)
}
int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
-
-#ifdef CONFIG_SYSFS
int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
-#else
-static inline int netif_set_real_num_rx_queues(struct net_device *dev,
- unsigned int rxqs)
-{
- dev->real_num_rx_queues = rxqs;
- return 0;
-}
-#endif
-
-static inline struct netdev_rx_queue *
-__netif_get_rx_queue(struct net_device *dev, unsigned int rxq)
-{
- return dev->_rx + rxq;
-}
-
-#ifdef CONFIG_SYSFS
-static inline unsigned int get_netdev_rx_queue_index(
- struct netdev_rx_queue *queue)
-{
- struct net_device *dev = queue->dev;
- int index = queue - dev->_rx;
-
- BUG_ON(index >= dev->num_rx_queues);
- return index;
-}
-#endif
+int netif_set_real_num_queues(struct net_device *dev,
+ unsigned int txq, unsigned int rxq);
-#define DEFAULT_MAX_NUM_RSS_QUEUES (8)
int netif_get_num_default_rss_queues(void);
-enum skb_free_reason {
- SKB_REASON_CONSUMED,
- SKB_REASON_DROPPED,
-};
-
-void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
-void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
+void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason);
+void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason);
/*
* It is not allowed to call kfree_skb() or consume_skb() from hardware
* interrupt context or with hardware interrupts being disabled.
- * (in_irq() || irqs_disabled())
+ * (in_hardirq() || irqs_disabled())
*
* We provide four helpers that can be used in following contexts :
*
@@ -3966,38 +4181,45 @@ void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
*/
static inline void dev_kfree_skb_irq(struct sk_buff *skb)
{
- __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
+ dev_kfree_skb_irq_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED);
}
static inline void dev_consume_skb_irq(struct sk_buff *skb)
{
- __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
+ dev_kfree_skb_irq_reason(skb, SKB_CONSUMED);
}
static inline void dev_kfree_skb_any(struct sk_buff *skb)
{
- __dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
+ dev_kfree_skb_any_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED);
}
static inline void dev_consume_skb_any(struct sk_buff *skb)
{
- __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
+ dev_kfree_skb_any_reason(skb, SKB_CONSUMED);
}
-void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
-int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
+u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
+ const struct bpf_prog *xdp_prog);
+void generic_xdp_tx(struct sk_buff *skb, const struct bpf_prog *xdp_prog);
+int do_xdp_generic(const struct bpf_prog *xdp_prog, struct sk_buff **pskb);
int netif_rx(struct sk_buff *skb);
-int netif_rx_ni(struct sk_buff *skb);
-int netif_rx_any_context(struct sk_buff *skb);
+int __netif_rx(struct sk_buff *skb);
+
int netif_receive_skb(struct sk_buff *skb);
int netif_receive_skb_core(struct sk_buff *skb);
+void netif_receive_skb_list_internal(struct list_head *head);
void netif_receive_skb_list(struct list_head *head);
-gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
-void napi_gro_flush(struct napi_struct *napi, bool flush_old);
+gro_result_t gro_receive_skb(struct gro_node *gro, struct sk_buff *skb);
+
+static inline gro_result_t napi_gro_receive(struct napi_struct *napi,
+ struct sk_buff *skb)
+{
+ return gro_receive_skb(&napi->gro, skb);
+}
+
struct sk_buff *napi_get_frags(struct napi_struct *napi);
gro_result_t napi_gro_frags(struct napi_struct *napi);
-struct packet_offload *gro_find_receive_by_type(__be16 type);
-struct packet_offload *gro_find_complete_by_type(__be16 type);
static inline void napi_free_frags(struct napi_struct *napi)
{
@@ -4012,65 +4234,67 @@ int netdev_rx_handler_register(struct net_device *dev,
void netdev_rx_handler_unregister(struct net_device *dev);
bool dev_valid_name(const char *name);
+static inline bool is_socket_ioctl_cmd(unsigned int cmd)
+{
+ return _IOC_TYPE(cmd) == SOCK_IOC_TYPE;
+}
+int get_user_ifreq(struct ifreq *ifr, void __user **ifrdata, void __user *arg);
+int put_user_ifreq(struct ifreq *ifr, void __user *arg);
int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
- bool *need_copyout);
-int dev_ifconf(struct net *net, struct ifconf *, int);
-int dev_ethtool(struct net *net, struct ifreq *);
-unsigned int dev_get_flags(const struct net_device *);
+ void __user *data, bool *need_copyout);
+int dev_ifconf(struct net *net, struct ifconf __user *ifc);
+int dev_eth_ioctl(struct net_device *dev,
+ struct ifreq *ifr, unsigned int cmd);
+int generic_hwtstamp_get_lower(struct net_device *dev,
+ struct kernel_hwtstamp_config *kernel_cfg);
+int generic_hwtstamp_set_lower(struct net_device *dev,
+ struct kernel_hwtstamp_config *kernel_cfg,
+ struct netlink_ext_ack *extack);
+int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata);
+unsigned int netif_get_flags(const struct net_device *dev);
int __dev_change_flags(struct net_device *dev, unsigned int flags,
struct netlink_ext_ack *extack);
+int netif_change_flags(struct net_device *dev, unsigned int flags,
+ struct netlink_ext_ack *extack);
int dev_change_flags(struct net_device *dev, unsigned int flags,
struct netlink_ext_ack *extack);
-void __dev_notify_flags(struct net_device *, unsigned int old_flags,
- unsigned int gchanges);
-int dev_change_name(struct net_device *, const char *);
+int netif_set_alias(struct net_device *dev, const char *alias, size_t len);
int dev_set_alias(struct net_device *, const char *, size_t);
int dev_get_alias(const struct net_device *, char *, size_t);
int __dev_change_net_namespace(struct net_device *dev, struct net *net,
- const char *pat, int new_ifindex);
-static inline
+ const char *pat, int new_ifindex,
+ struct netlink_ext_ack *extack);
int dev_change_net_namespace(struct net_device *dev, struct net *net,
- const char *pat)
-{
- return __dev_change_net_namespace(dev, net, pat, 0);
-}
-int __dev_set_mtu(struct net_device *, int);
-int dev_validate_mtu(struct net_device *dev, int mtu,
- struct netlink_ext_ack *extack);
-int dev_set_mtu_ext(struct net_device *dev, int mtu,
- struct netlink_ext_ack *extack);
+ const char *pat);
+int __netif_set_mtu(struct net_device *dev, int new_mtu);
+int netif_set_mtu(struct net_device *dev, int new_mtu);
int dev_set_mtu(struct net_device *, int);
-int dev_change_tx_queue_len(struct net_device *, unsigned long);
-void dev_set_group(struct net_device *, int);
-int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
- struct netlink_ext_ack *extack);
-int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
+int netif_pre_changeaddr_notify(struct net_device *dev, const char *addr,
+ struct netlink_ext_ack *extack);
+int netif_set_mac_address(struct net_device *dev, struct sockaddr_storage *ss,
+ struct netlink_ext_ack *extack);
+int dev_set_mac_address(struct net_device *dev, struct sockaddr_storage *ss,
struct netlink_ext_ack *extack);
-int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
+int dev_set_mac_address_user(struct net_device *dev, struct sockaddr_storage *ss,
struct netlink_ext_ack *extack);
-int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name);
-int dev_change_carrier(struct net_device *, bool new_carrier);
-int dev_get_phys_port_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid);
-int dev_get_phys_port_name(struct net_device *dev,
- char *name, size_t len);
-int dev_get_port_parent_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid, bool recurse);
+int netif_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name);
+int netif_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid, bool recurse);
bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
-int dev_change_proto_down(struct net_device *dev, bool proto_down);
-int dev_change_proto_down_generic(struct net_device *dev, bool proto_down);
-void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
- u32 value);
+
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);
-typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
-int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
- int fd, int expected_fd, u32 flags);
int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
+u8 dev_xdp_prog_count(struct net_device *dev);
+int netif_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf);
+int dev_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf);
+u8 dev_xdp_sb_prog_count(struct net_device *dev);
u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode);
+u32 dev_get_min_mp_channel_count(const struct net_device *dev);
+
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb);
@@ -4103,59 +4327,157 @@ static __always_inline bool __is_skb_forwardable(const struct net_device *dev,
return false;
}
+void netdev_core_stats_inc(struct net_device *dev, u32 offset);
+
+#define DEV_CORE_STATS_INC(FIELD) \
+static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev) \
+{ \
+ netdev_core_stats_inc(dev, \
+ offsetof(struct net_device_core_stats, FIELD)); \
+}
+DEV_CORE_STATS_INC(rx_dropped)
+DEV_CORE_STATS_INC(tx_dropped)
+DEV_CORE_STATS_INC(rx_nohandler)
+DEV_CORE_STATS_INC(rx_otherhost_dropped)
+#undef DEV_CORE_STATS_INC
+
static __always_inline int ____dev_forward_skb(struct net_device *dev,
struct sk_buff *skb,
const bool check_mtu)
{
if (skb_orphan_frags(skb, GFP_ATOMIC) ||
unlikely(!__is_skb_forwardable(dev, skb, check_mtu))) {
- atomic_long_inc(&dev->rx_dropped);
+ dev_core_stats_rx_dropped_inc(dev);
kfree_skb(skb);
return NET_RX_DROP;
}
- skb_scrub_packet(skb, true);
+ skb_scrub_packet(skb, !net_eq(dev_net(dev), dev_net(skb->dev)));
skb->priority = 0;
return 0;
}
-bool dev_nit_active(struct net_device *dev);
-void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
+bool dev_nit_active_rcu(const struct net_device *dev);
+static inline bool dev_nit_active(const struct net_device *dev)
+{
+ bool ret;
-extern int netdev_budget;
-extern unsigned int netdev_budget_usecs;
+ rcu_read_lock();
+ ret = dev_nit_active_rcu(dev);
+ rcu_read_unlock();
+ return ret;
+}
-/* Called by rtnetlink.c:rtnl_unlock() */
-void netdev_run_todo(void);
+void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
-/**
- * dev_put - release reference to device
- * @dev: network device
- *
- * Release reference to device to allow it to be freed.
- */
-static inline void dev_put(struct net_device *dev)
+static inline void __dev_put(struct net_device *dev)
+{
+ if (dev) {
+#ifdef CONFIG_PCPU_DEV_REFCNT
+ this_cpu_dec(*dev->pcpu_refcnt);
+#else
+ refcount_dec(&dev->dev_refcnt);
+#endif
+ }
+}
+
+static inline void __dev_hold(struct net_device *dev)
{
+ if (dev) {
#ifdef CONFIG_PCPU_DEV_REFCNT
- this_cpu_dec(*dev->pcpu_refcnt);
+ this_cpu_inc(*dev->pcpu_refcnt);
#else
- refcount_dec(&dev->dev_refcnt);
+ refcount_inc(&dev->dev_refcnt);
+#endif
+ }
+}
+
+static inline void __netdev_tracker_alloc(struct net_device *dev,
+ netdevice_tracker *tracker,
+ gfp_t gfp)
+{
+#ifdef CONFIG_NET_DEV_REFCNT_TRACKER
+ ref_tracker_alloc(&dev->refcnt_tracker, tracker, gfp);
+#endif
+}
+
+/* netdev_tracker_alloc() can upgrade a prior untracked reference
+ * taken by dev_get_by_name()/dev_get_by_index() to a tracked one.
+ */
+static inline void netdev_tracker_alloc(struct net_device *dev,
+ netdevice_tracker *tracker, gfp_t gfp)
+{
+#ifdef CONFIG_NET_DEV_REFCNT_TRACKER
+ refcount_dec(&dev->refcnt_tracker.no_tracker);
+ __netdev_tracker_alloc(dev, tracker, gfp);
+#endif
+}
+
+static inline void netdev_tracker_free(struct net_device *dev,
+ netdevice_tracker *tracker)
+{
+#ifdef CONFIG_NET_DEV_REFCNT_TRACKER
+ ref_tracker_free(&dev->refcnt_tracker, tracker);
#endif
}
+static inline void netdev_hold(struct net_device *dev,
+ netdevice_tracker *tracker, gfp_t gfp)
+{
+ if (dev) {
+ __dev_hold(dev);
+ __netdev_tracker_alloc(dev, tracker, gfp);
+ }
+}
+
+static inline void netdev_put(struct net_device *dev,
+ netdevice_tracker *tracker)
+{
+ if (dev) {
+ netdev_tracker_free(dev, tracker);
+ __dev_put(dev);
+ }
+}
+
/**
* dev_hold - get reference to device
* @dev: network device
*
* Hold reference to device to keep it from being freed.
+ * Try using netdev_hold() instead.
*/
static inline void dev_hold(struct net_device *dev)
{
-#ifdef CONFIG_PCPU_DEV_REFCNT
- this_cpu_inc(*dev->pcpu_refcnt);
-#else
- refcount_inc(&dev->dev_refcnt);
-#endif
+ netdev_hold(dev, NULL, GFP_ATOMIC);
+}
+
+/**
+ * dev_put - release reference to device
+ * @dev: network device
+ *
+ * Release reference to device to allow it to be freed.
+ * Try using netdev_put() instead.
+ */
+static inline void dev_put(struct net_device *dev)
+{
+ netdev_put(dev, NULL);
+}
+
+DEFINE_FREE(dev_put, struct net_device *, if (_T) dev_put(_T))
+
+static inline void netdev_ref_replace(struct net_device *odev,
+ struct net_device *ndev,
+ netdevice_tracker *tracker,
+ gfp_t gfp)
+{
+ if (odev)
+ netdev_tracker_free(odev, tracker);
+
+ __dev_hold(ndev);
+ __dev_put(odev);
+
+ if (ndev)
+ __netdev_tracker_alloc(ndev, tracker, gfp);
}
/* Carrier loss detection, dial on demand. The functions netif_carrier_on
@@ -4166,10 +4488,17 @@ static inline void dev_hold(struct net_device *dev)
* called netif_lowerlayer_*() because they represent the state of any
* kind of lower layer not just hardware media.
*/
-
-void linkwatch_init_dev(struct net_device *dev);
void linkwatch_fire_event(struct net_device *dev);
-void linkwatch_forget_dev(struct net_device *dev);
+
+/**
+ * linkwatch_sync_dev - sync linkwatch for the given device
+ * @dev: network device to sync linkwatch for
+ *
+ * Sync linkwatch for the given device, removing it from the
+ * pending work list (if queued).
+ */
+void linkwatch_sync_dev(struct net_device *dev);
+void __linkwatch_sync_dev(struct net_device *dev);
/**
* netif_carrier_ok - test if carrier present
@@ -4184,11 +4513,11 @@ static inline bool netif_carrier_ok(const struct net_device *dev)
unsigned long dev_trans_start(struct net_device *dev);
-void __netdev_watchdog_up(struct net_device *dev);
+void netdev_watchdog_up(struct net_device *dev);
void netif_carrier_on(struct net_device *dev);
-
void netif_carrier_off(struct net_device *dev);
+void netif_carrier_event(struct net_device *dev);
/**
* netif_dormant_on - mark device as dormant.
@@ -4280,8 +4609,10 @@ static inline bool netif_testing(const struct net_device *dev)
*/
static inline bool netif_oper_up(const struct net_device *dev)
{
- return (dev->operstate == IF_OPER_UP ||
- dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
+ unsigned int operstate = READ_ONCE(dev->operstate);
+
+ return operstate == IF_OPER_UP ||
+ operstate == IF_OPER_UNKNOWN /* backward compat */;
}
/**
@@ -4377,7 +4708,8 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
{
spin_lock(&txq->_xmit_lock);
- txq->xmit_lock_owner = cpu;
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ WRITE_ONCE(txq->xmit_lock_owner, cpu);
}
static inline bool __netif_tx_acquire(struct netdev_queue *txq)
@@ -4394,33 +4726,51 @@ static inline void __netif_tx_release(struct netdev_queue *txq)
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
{
spin_lock_bh(&txq->_xmit_lock);
- txq->xmit_lock_owner = smp_processor_id();
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
}
static inline bool __netif_tx_trylock(struct netdev_queue *txq)
{
bool ok = spin_trylock(&txq->_xmit_lock);
- if (likely(ok))
- txq->xmit_lock_owner = smp_processor_id();
+
+ if (likely(ok)) {
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
+ }
return ok;
}
static inline void __netif_tx_unlock(struct netdev_queue *txq)
{
- txq->xmit_lock_owner = -1;
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ WRITE_ONCE(txq->xmit_lock_owner, -1);
spin_unlock(&txq->_xmit_lock);
}
static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
{
- txq->xmit_lock_owner = -1;
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ WRITE_ONCE(txq->xmit_lock_owner, -1);
spin_unlock_bh(&txq->_xmit_lock);
}
-static inline void txq_trans_update(struct netdev_queue *txq)
+/*
+ * txq->trans_start can be read locklessly from dev_watchdog()
+ */
+static inline void txq_trans_update(const struct net_device *dev,
+ struct netdev_queue *txq)
+{
+ if (!dev->lltx)
+ WRITE_ONCE(txq->trans_start, jiffies);
+}
+
+static inline void txq_trans_cond_update(struct netdev_queue *txq)
{
- if (txq->xmit_lock_owner != -1)
- txq->trans_start = jiffies;
+ unsigned long now = jiffies;
+
+ if (READ_ONCE(txq->trans_start) != now)
+ WRITE_ONCE(txq->trans_start, now);
}
/* legacy drivers only, netdev_start_xmit() sets txq->trans_start */
@@ -4428,8 +4778,7 @@ static inline void netif_trans_update(struct net_device *dev)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
- if (txq->trans_start != jiffies)
- txq->trans_start = jiffies;
+ txq_trans_cond_update(txq);
}
/**
@@ -4438,27 +4787,7 @@ static inline void netif_trans_update(struct net_device *dev)
*
* Get network device transmit lock
*/
-static inline void netif_tx_lock(struct net_device *dev)
-{
- unsigned int i;
- int cpu;
-
- spin_lock(&dev->tx_global_lock);
- cpu = smp_processor_id();
- for (i = 0; i < dev->num_tx_queues; i++) {
- struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
-
- /* We are the only thread of execution doing a
- * freeze, but we have to grab the _xmit_lock in
- * order to synchronize with threads which are in
- * the ->hard_start_xmit() handler and already
- * checked the frozen bit.
- */
- __netif_tx_lock(txq, cpu);
- set_bit(__QUEUE_STATE_FROZEN, &txq->state);
- __netif_tx_unlock(txq);
- }
-}
+void netif_tx_lock(struct net_device *dev);
static inline void netif_tx_lock_bh(struct net_device *dev)
{
@@ -4466,22 +4795,7 @@ static inline void netif_tx_lock_bh(struct net_device *dev)
netif_tx_lock(dev);
}
-static inline void netif_tx_unlock(struct net_device *dev)
-{
- unsigned int i;
-
- for (i = 0; i < dev->num_tx_queues; i++) {
- struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
-
- /* No need to grab the _xmit_lock here. If the
- * queue is not stopped for another reason, we
- * force a schedule.
- */
- clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
- netif_schedule_queue(txq);
- }
- spin_unlock(&dev->tx_global_lock);
-}
+void netif_tx_unlock(struct net_device *dev);
static inline void netif_tx_unlock_bh(struct net_device *dev)
{
@@ -4490,7 +4804,7 @@ static inline void netif_tx_unlock_bh(struct net_device *dev)
}
#define HARD_TX_LOCK(dev, txq, cpu) { \
- if ((dev->features & NETIF_F_LLTX) == 0) { \
+ if (!(dev)->lltx) { \
__netif_tx_lock(txq, cpu); \
} else { \
__netif_tx_acquire(txq); \
@@ -4498,12 +4812,12 @@ static inline void netif_tx_unlock_bh(struct net_device *dev)
}
#define HARD_TX_TRYLOCK(dev, txq) \
- (((dev->features & NETIF_F_LLTX) == 0) ? \
+ (!(dev)->lltx ? \
__netif_tx_trylock(txq) : \
__netif_tx_acquire(txq))
#define HARD_TX_UNLOCK(dev, txq) { \
- if ((dev->features & NETIF_F_LLTX) == 0) { \
+ if (!(dev)->lltx) { \
__netif_tx_unlock(txq); \
} else { \
__netif_tx_release(txq); \
@@ -4571,6 +4885,9 @@ static inline void netif_addr_unlock_bh(struct net_device *dev)
void ether_setup(struct net_device *dev);
+/* Allocate dummy net_device */
+struct net_device *alloc_netdev_dummy(int sizeof_priv);
+
/* Support for loadable net-drivers */
struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
unsigned char name_assign_type,
@@ -4591,6 +4908,9 @@ int devm_register_netdev(struct device *dev, struct net_device *ndev);
/* General hardware address lists handling functions */
int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
struct netdev_hw_addr_list *from_list, int addr_len);
+int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list,
+ struct netdev_hw_addr_list *from_list,
+ int addr_len);
void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
struct netdev_hw_addr_list *from_list, int addr_len);
int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
@@ -4615,12 +4935,24 @@ void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
void __hw_addr_init(struct netdev_hw_addr_list *list);
/* Functions used for device addresses handling */
+void dev_addr_mod(struct net_device *dev, unsigned int offset,
+ const void *addr, size_t len);
+
+static inline void
+__dev_addr_set(struct net_device *dev, const void *addr, size_t len)
+{
+ dev_addr_mod(dev, 0, addr, len);
+}
+
+static inline void dev_addr_set(struct net_device *dev, const u8 *addr)
+{
+ __dev_addr_set(dev, addr, dev->addr_len);
+}
+
int dev_addr_add(struct net_device *dev, const unsigned char *addr,
unsigned char addr_type);
int dev_addr_del(struct net_device *dev, const unsigned char *addr,
unsigned char addr_type);
-void dev_addr_flush(struct net_device *dev);
-int dev_addr_init(struct net_device *dev);
/* Functions used for unicast addresses handling */
int dev_uc_add(struct net_device *dev, const unsigned char *addr);
@@ -4633,7 +4965,7 @@ void dev_uc_flush(struct net_device *dev);
void dev_uc_init(struct net_device *dev);
/**
- * __dev_uc_sync - Synchonize device's unicast list
+ * __dev_uc_sync - Synchronize device's unicast list
* @dev: device to sync
* @sync: function to call if address should be added
* @unsync: function to call if address should be removed
@@ -4677,7 +5009,7 @@ void dev_mc_flush(struct net_device *dev);
void dev_mc_init(struct net_device *dev);
/**
- * __dev_mc_sync - Synchonize device's multicast list
+ * __dev_mc_sync - Synchronize device's multicast list
* @dev: device to sync
* @sync: function to call if address should be added
* @unsync: function to call if address should be removed
@@ -4710,9 +5042,11 @@ static inline void __dev_mc_unsync(struct net_device *dev,
/* Functions used for secondary unicast and multicast support */
void dev_set_rx_mode(struct net_device *dev);
-void __dev_set_rx_mode(struct net_device *dev);
+int netif_set_promiscuity(struct net_device *dev, int inc);
int dev_set_promiscuity(struct net_device *dev, int inc);
+int netif_set_allmulti(struct net_device *dev, int inc, bool notify);
int dev_set_allmulti(struct net_device *dev, int inc);
+void netif_state_change(struct net_device *dev);
void netdev_state_change(struct net_device *dev);
void __netdev_notify_peers(struct net_device *dev);
void netdev_notify_peers(struct net_device *dev);
@@ -4727,16 +5061,6 @@ void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
const struct pcpu_sw_netstats __percpu *netstats);
void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s);
-extern int netdev_max_backlog;
-extern int netdev_tstamp_prequeue;
-extern int netdev_unregister_timeout_secs;
-extern int weight_p;
-extern int dev_weight_rx_bias;
-extern int dev_weight_tx_bias;
-extern int dev_rx_weight;
-extern int dev_tx_weight;
-extern int gro_normal_batch;
-
enum {
NESTED_SYNC_IMM_BIT,
NESTED_SYNC_TODO_BIT,
@@ -4756,18 +5080,6 @@ struct netdev_nested_priv {
bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
struct list_head **iter);
-struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
- struct list_head **iter);
-
-#ifdef CONFIG_LOCKDEP
-static LIST_HEAD(net_unlink_list);
-
-static inline void net_unlink_todo(struct net_device *dev)
-{
- if (list_empty(&dev->unlink_list))
- list_add_tail(&dev->unlink_list, &net_unlink_list);
-}
-#endif
/* iterate through upper list, must be called under RCU read lock */
#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
@@ -4861,11 +5173,6 @@ int skb_crc32c_csum_help(struct sk_buff *skb);
int skb_csum_hwoffload_help(struct sk_buff *skb,
const netdev_features_t features);
-struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
- netdev_features_t features, bool tx_path);
-struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
- netdev_features_t features);
-
struct netdev_bonding_info {
ifslave slave;
ifbond master;
@@ -4880,19 +5187,13 @@ void netdev_bonding_info_change(struct net_device *dev,
struct netdev_bonding_info *bonding_info);
#if IS_ENABLED(CONFIG_ETHTOOL_NETLINK)
-void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data);
+void ethtool_notify(struct net_device *dev, unsigned int cmd);
#else
-static inline void ethtool_notify(struct net_device *dev, unsigned int cmd,
- const void *data)
+static inline void ethtool_notify(struct net_device *dev, unsigned int cmd)
{
}
#endif
-static inline
-struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
-{
- return __skb_gso_segment(skb, features, true);
-}
__be16 skb_network_protocol(struct sk_buff *skb, int *depth);
static inline bool can_checksum_protocol(netdev_features_t features,
@@ -4930,24 +5231,47 @@ static inline void netdev_rx_csum_fault(struct net_device *dev,
void net_enable_timestamp(void);
void net_disable_timestamp(void);
-#ifdef CONFIG_PROC_FS
-int __init dev_proc_init(void);
-#else
-#define dev_proc_init() 0
-#endif
+static inline ktime_t netdev_get_tstamp(struct net_device *dev,
+ const struct skb_shared_hwtstamps *hwtstamps,
+ bool cycles)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
-static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
- struct sk_buff *skb, struct net_device *dev,
- bool more)
+ if (ops->ndo_get_tstamp)
+ return ops->ndo_get_tstamp(dev, hwtstamps, cycles);
+
+ return hwtstamps->hwtstamp;
+}
+
+#ifndef CONFIG_PREEMPT_RT
+static inline void netdev_xmit_set_more(bool more)
{
__this_cpu_write(softnet_data.xmit.more, more);
- return ops->ndo_start_xmit(skb, dev);
}
static inline bool netdev_xmit_more(void)
{
return __this_cpu_read(softnet_data.xmit.more);
}
+#else
+static inline void netdev_xmit_set_more(bool more)
+{
+ current->net_xmit.more = more;
+}
+
+static inline bool netdev_xmit_more(void)
+{
+ return current->net_xmit.more;
+}
+#endif
+
+static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
+ struct sk_buff *skb, struct net_device *dev,
+ bool more)
+{
+ netdev_xmit_set_more(more);
+ return ops->ndo_start_xmit(skb, dev);
+}
static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, bool more)
@@ -4957,7 +5281,7 @@ static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_devi
rc = __netdev_start_xmit(ops, skb, dev, more);
if (rc == NETDEV_TX_OK)
- txq_trans_update(txq);
+ txq_trans_update(dev, txq);
return rc;
}
@@ -4971,8 +5295,6 @@ extern const struct kobj_ns_type_operations net_ns_type_operations;
const char *netdev_drivername(const struct net_device *dev);
-void linkwatch_run_queue(void);
-
static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
netdev_features_t f2)
{
@@ -5007,6 +5329,7 @@ static inline netdev_features_t netdev_add_tso_features(netdev_features_t featur
int __netdev_update_features(struct net_device *dev);
void netdev_update_features(struct net_device *dev);
void netdev_change_features(struct net_device *dev);
+void netdev_compute_master_upper_features(struct net_device *dev, bool update_header);
void netif_stacked_transfer_operstate(const struct net_device *rootdev,
struct net_device *dev);
@@ -5015,16 +5338,22 @@ netdev_features_t passthru_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
netdev_features_t netif_skb_features(struct sk_buff *skb);
+void skb_warn_bad_offload(const struct sk_buff *skb);
static inline bool net_gso_ok(netdev_features_t features, int gso_type)
{
- netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
+ netdev_features_t feature;
+
+ if (gso_type & (SKB_GSO_TCP_FIXEDID | SKB_GSO_TCP_FIXEDID_INNER))
+ gso_type |= __SKB_GSO_TCP_FIXEDID;
+
+ feature = ((netdev_features_t)gso_type << NETIF_F_GSO_SHIFT) & NETIF_F_GSO_MASK;
/* check flags correspondence */
BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
- BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(__SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
@@ -5040,6 +5369,8 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type)
BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_TCP_ACCECN !=
+ (NETIF_F_GSO_ACCECN >> NETIF_F_GSO_SHIFT));
return (features & feature) == feature;
}
@@ -5058,23 +5389,27 @@ static inline bool netif_needs_gso(struct sk_buff *skb,
(skb->ip_summed != CHECKSUM_UNNECESSARY)));
}
-static inline void netif_set_gso_max_size(struct net_device *dev,
- unsigned int size)
+void netif_set_tso_max_size(struct net_device *dev, unsigned int size);
+void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs);
+void netif_inherit_tso_max(struct net_device *to,
+ const struct net_device *from);
+
+static inline unsigned int
+netif_get_gro_max_size(const struct net_device *dev, const struct sk_buff *skb)
{
- dev->gso_max_size = size;
+ /* pairs with WRITE_ONCE() in netif_set_gro(_ipv4)_max_size() */
+ return skb->protocol == htons(ETH_P_IPV6) ?
+ READ_ONCE(dev->gro_max_size) :
+ READ_ONCE(dev->gro_ipv4_max_size);
}
-static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
- int pulled_hlen, u16 mac_offset,
- int mac_len)
+static inline unsigned int
+netif_get_gso_max_size(const struct net_device *dev, const struct sk_buff *skb)
{
- skb->protocol = protocol;
- skb->encapsulation = 1;
- skb_push(skb, pulled_hlen);
- skb_reset_transport_header(skb);
- skb->mac_header = mac_offset;
- skb->network_header = skb->mac_header + mac_len;
- skb->mac_len = mac_len;
+ /* pairs with WRITE_ONCE() in netif_set_gso(_ipv4)_max_size() */
+ return skb->protocol == htons(ETH_P_IPV6) ?
+ READ_ONCE(dev->gso_max_size) :
+ READ_ONCE(dev->gso_ipv4_max_size);
}
static inline bool netif_is_macsec(const struct net_device *dev)
@@ -5122,6 +5457,15 @@ static inline bool netif_is_l3_slave(const struct net_device *dev)
return dev->priv_flags & IFF_L3MDEV_SLAVE;
}
+static inline int dev_sdif(const struct net_device *dev)
+{
+#ifdef CONFIG_NET_L3_MASTER_DEV
+ if (netif_is_l3_slave(dev))
+ return dev->ifindex;
+#endif
+ return 0;
+}
+
static inline bool netif_is_bridge_master(const struct net_device *dev)
{
return dev->priv_flags & IFF_EBRIDGE;
@@ -5142,6 +5486,11 @@ static inline bool netif_is_ovs_port(const struct net_device *dev)
return dev->priv_flags & IFF_OVS_DATAPATH;
}
+static inline bool netif_is_any_bridge_master(const struct net_device *dev)
+{
+ return netif_is_bridge_master(dev) || netif_is_ovs_master(dev);
+}
+
static inline bool netif_is_any_bridge_port(const struct net_device *dev)
{
return netif_is_bridge_port(dev) || netif_is_ovs_port(dev);
@@ -5192,7 +5541,7 @@ static inline void netif_keep_dst(struct net_device *dev)
static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
{
/* TODO: reserve and use an additional IFF bit, if we get more users */
- return dev->priv_flags & IFF_MACSEC;
+ return netif_is_macsec(dev);
}
extern struct pernet_operations __net_initdata loopback_net_ops;
@@ -5208,14 +5557,11 @@ static inline const char *netdev_name(const struct net_device *dev)
return dev->name;
}
-static inline bool netdev_unregistering(const struct net_device *dev)
-{
- return dev->reg_state == NETREG_UNREGISTERING;
-}
-
static inline const char *netdev_reg_state(const struct net_device *dev)
{
- switch (dev->reg_state) {
+ u8 reg_state = READ_ONCE(dev->reg_state);
+
+ switch (reg_state) {
case NETREG_UNINITIALIZED: return " (uninitialized)";
case NETREG_REGISTERED: return "";
case NETREG_UNREGISTERING: return " (unregistering)";
@@ -5224,85 +5570,13 @@ static inline const char *netdev_reg_state(const struct net_device *dev)
case NETREG_DUMMY: return " (dummy)";
}
- WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state);
+ WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, reg_state);
return " (unknown)";
}
-__printf(3, 4) __cold
-void netdev_printk(const char *level, const struct net_device *dev,
- const char *format, ...);
-__printf(2, 3) __cold
-void netdev_emerg(const struct net_device *dev, const char *format, ...);
-__printf(2, 3) __cold
-void netdev_alert(const struct net_device *dev, const char *format, ...);
-__printf(2, 3) __cold
-void netdev_crit(const struct net_device *dev, const char *format, ...);
-__printf(2, 3) __cold
-void netdev_err(const struct net_device *dev, const char *format, ...);
-__printf(2, 3) __cold
-void netdev_warn(const struct net_device *dev, const char *format, ...);
-__printf(2, 3) __cold
-void netdev_notice(const struct net_device *dev, const char *format, ...);
-__printf(2, 3) __cold
-void netdev_info(const struct net_device *dev, const char *format, ...);
-
-#define netdev_level_once(level, dev, fmt, ...) \
-do { \
- static bool __print_once __read_mostly; \
- \
- if (!__print_once) { \
- __print_once = true; \
- netdev_printk(level, dev, fmt, ##__VA_ARGS__); \
- } \
-} while (0)
-
-#define netdev_emerg_once(dev, fmt, ...) \
- netdev_level_once(KERN_EMERG, dev, fmt, ##__VA_ARGS__)
-#define netdev_alert_once(dev, fmt, ...) \
- netdev_level_once(KERN_ALERT, dev, fmt, ##__VA_ARGS__)
-#define netdev_crit_once(dev, fmt, ...) \
- netdev_level_once(KERN_CRIT, dev, fmt, ##__VA_ARGS__)
-#define netdev_err_once(dev, fmt, ...) \
- netdev_level_once(KERN_ERR, dev, fmt, ##__VA_ARGS__)
-#define netdev_warn_once(dev, fmt, ...) \
- netdev_level_once(KERN_WARNING, dev, fmt, ##__VA_ARGS__)
-#define netdev_notice_once(dev, fmt, ...) \
- netdev_level_once(KERN_NOTICE, dev, fmt, ##__VA_ARGS__)
-#define netdev_info_once(dev, fmt, ...) \
- netdev_level_once(KERN_INFO, dev, fmt, ##__VA_ARGS__)
-
#define MODULE_ALIAS_NETDEV(device) \
MODULE_ALIAS("netdev-" device)
-#if defined(CONFIG_DYNAMIC_DEBUG) || \
- (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
-#define netdev_dbg(__dev, format, args...) \
-do { \
- dynamic_netdev_dbg(__dev, format, ##args); \
-} while (0)
-#elif defined(DEBUG)
-#define netdev_dbg(__dev, format, args...) \
- netdev_printk(KERN_DEBUG, __dev, format, ##args)
-#else
-#define netdev_dbg(__dev, format, args...) \
-({ \
- if (0) \
- netdev_printk(KERN_DEBUG, __dev, format, ##args); \
-})
-#endif
-
-#if defined(VERBOSE_DEBUG)
-#define netdev_vdbg netdev_dbg
-#else
-
-#define netdev_vdbg(dev, format, args...) \
-({ \
- if (0) \
- netdev_printk(KERN_DEBUG, dev, format, ##args); \
- 0; \
-})
-#endif
-
/*
* netdev_WARN() acts like dev_printk(), but with the key difference
* of using a WARN/WARN_ON to get the message out, including the
@@ -5316,74 +5590,6 @@ do { \
WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \
netdev_reg_state(dev), ##args)
-/* netif printk helpers, similar to netdev_printk */
-
-#define netif_printk(priv, type, level, dev, fmt, args...) \
-do { \
- if (netif_msg_##type(priv)) \
- netdev_printk(level, (dev), fmt, ##args); \
-} while (0)
-
-#define netif_level(level, priv, type, dev, fmt, args...) \
-do { \
- if (netif_msg_##type(priv)) \
- netdev_##level(dev, fmt, ##args); \
-} while (0)
-
-#define netif_emerg(priv, type, dev, fmt, args...) \
- netif_level(emerg, priv, type, dev, fmt, ##args)
-#define netif_alert(priv, type, dev, fmt, args...) \
- netif_level(alert, priv, type, dev, fmt, ##args)
-#define netif_crit(priv, type, dev, fmt, args...) \
- netif_level(crit, priv, type, dev, fmt, ##args)
-#define netif_err(priv, type, dev, fmt, args...) \
- netif_level(err, priv, type, dev, fmt, ##args)
-#define netif_warn(priv, type, dev, fmt, args...) \
- netif_level(warn, priv, type, dev, fmt, ##args)
-#define netif_notice(priv, type, dev, fmt, args...) \
- netif_level(notice, priv, type, dev, fmt, ##args)
-#define netif_info(priv, type, dev, fmt, args...) \
- netif_level(info, priv, type, dev, fmt, ##args)
-
-#if defined(CONFIG_DYNAMIC_DEBUG) || \
- (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
-#define netif_dbg(priv, type, netdev, format, args...) \
-do { \
- if (netif_msg_##type(priv)) \
- dynamic_netdev_dbg(netdev, format, ##args); \
-} while (0)
-#elif defined(DEBUG)
-#define netif_dbg(priv, type, dev, format, args...) \
- netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
-#else
-#define netif_dbg(priv, type, dev, format, args...) \
-({ \
- if (0) \
- netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
- 0; \
-})
-#endif
-
-/* if @cond then downgrade to debug, else print at @level */
-#define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \
- do { \
- if (cond) \
- netif_dbg(priv, type, netdev, fmt, ##args); \
- else \
- netif_ ## level(priv, type, netdev, fmt, ##args); \
- } while (0)
-
-#if defined(VERBOSE_DEBUG)
-#define netif_vdbg netif_dbg
-#else
-#define netif_vdbg(priv, type, dev, format, args...) \
-({ \
- if (0) \
- netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
- 0; \
-})
-#endif
-
/*
* The list of packet types we will receive (as opposed to discard)
* and the routines to invoke.
@@ -5406,9 +5612,14 @@ do { \
#define PTYPE_HASH_SIZE (16)
#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
-extern struct list_head ptype_all __read_mostly;
extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
extern struct net_device *blackhole_netdev;
+/* Note: Avoid these macros in fast path, prefer per-cpu or per-queue counters. */
+#define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD)
+#define DEV_STATS_ADD(DEV, FIELD, VAL) \
+ atomic_long_add((VAL), &(DEV)->stats.__##FIELD)
+#define DEV_STATS_READ(DEV, FIELD) atomic_long_read(&(DEV)->stats.__##FIELD)
+
#endif /* _LINUX_NETDEVICE_H */
diff --git a/include/linux/netdevice_xmit.h b/include/linux/netdevice_xmit.h
new file mode 100644
index 000000000000..cc232508e695
--- /dev/null
+++ b/include/linux/netdevice_xmit.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_NETDEVICE_XMIT_H
+#define _LINUX_NETDEVICE_XMIT_H
+
+#if IS_ENABLED(CONFIG_NET_ACT_MIRRED)
+#define MIRRED_NEST_LIMIT 4
+#endif
+
+struct net_device;
+
+struct netdev_xmit {
+ u16 recursion;
+ u8 more;
+#ifdef CONFIG_NET_EGRESS
+ u8 skip_txqueue;
+#endif
+#if IS_ENABLED(CONFIG_NET_ACT_MIRRED)
+ u8 sched_mirred_nest;
+ struct net_device *sched_mirred_dev[MIRRED_NEST_LIMIT];
+#endif
+#if IS_ENABLED(CONFIG_NF_DUP_NETDEV)
+ u8 nf_dup_skb_recursion;
+#endif
+};
+
+#endif
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index f0f3a8354c3c..efbbfa770d66 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -11,6 +11,7 @@
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/static_key.h>
+#include <linux/module.h>
#include <linux/netfilter_defs.h>
#include <linux/netdevice.h>
#include <linux/sockptr.h>
@@ -21,6 +22,16 @@ static inline int NF_DROP_GETERR(int verdict)
return -(verdict >> NF_VERDICT_QBITS);
}
+static __always_inline int
+NF_DROP_REASON(struct sk_buff *skb, enum skb_drop_reason reason, u32 err)
+{
+ BUILD_BUG_ON(err > 0xffff);
+
+ kfree_skb_reason(skb, reason);
+
+ return ((err << 16) | NF_STOLEN);
+}
+
static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1,
const union nf_inet_addr *a2)
{
@@ -65,8 +76,8 @@ struct nf_hook_ops;
struct sock;
struct nf_hook_state {
- unsigned int hook;
- u_int8_t pf;
+ u8 hook;
+ u8 pf;
struct net_device *in;
struct net_device *out;
struct sock *sk;
@@ -77,12 +88,23 @@ struct nf_hook_state {
typedef unsigned int nf_hookfn(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state);
+enum nf_hook_ops_type {
+ NF_HOOK_OP_UNDEFINED,
+ NF_HOOK_OP_NF_TABLES,
+ NF_HOOK_OP_BPF,
+ NF_HOOK_OP_NFT_FT,
+};
+
struct nf_hook_ops {
+ struct list_head list;
+ struct rcu_head rcu;
+
/* User fills in from here down. */
nf_hookfn *hook;
struct net_device *dev;
void *priv;
- u_int8_t pf;
+ u8 pf;
+ enum nf_hook_ops_type hook_ops_type:8;
unsigned int hooknum;
/* Hooks are ordered in ascending priority. */
int priority;
@@ -237,11 +259,6 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
hook_head = rcu_dereference(net->nf.hooks_bridge[hook]);
#endif
break;
-#if IS_ENABLED(CONFIG_DECNET)
- case NFPROTO_DECNET:
- hook_head = rcu_dereference(net->nf.hooks_decnet[hook]);
- break;
-#endif
default:
WARN_ON_ONCE(1);
break;
@@ -357,31 +374,27 @@ __sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
u_int8_t protocol, unsigned short family);
int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
bool strict, unsigned short family);
-int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry);
#include <net/flow.h>
struct nf_conn;
enum nf_nat_manip_type;
struct nlattr;
-enum ip_conntrack_dir;
struct nf_nat_hook {
int (*parse_nat_setup)(struct nf_conn *ct, enum nf_nat_manip_type manip,
const struct nlattr *attr);
void (*decode_session)(struct sk_buff *skb, struct flowi *fl);
- unsigned int (*manip_pkt)(struct sk_buff *skb, struct nf_conn *ct,
- enum nf_nat_manip_type mtype,
- enum ip_conntrack_dir dir);
+ void (*remove_nat_bysrc)(struct nf_conn *ct);
};
-extern struct nf_nat_hook __rcu *nf_nat_hook;
+extern const struct nf_nat_hook __rcu *nf_nat_hook;
static inline void
nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
{
#if IS_ENABLED(CONFIG_NF_NAT)
- struct nf_nat_hook *nat_hook;
+ const struct nf_nat_hook *nat_hook;
rcu_read_lock();
nat_hook = rcu_dereference(nf_nat_hook);
@@ -434,13 +447,14 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <linux/netfilter/nf_conntrack_zones_common.h>
-extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
+void nf_ct_set_closing(struct nf_conntrack *nfct);
struct nf_conntrack_tuple;
bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
const struct sk_buff *skb);
#else
static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
+static inline void nf_ct_set_closing(struct nf_conntrack *nfct) {}
struct nf_conntrack_tuple;
static inline bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
const struct sk_buff *skb)
@@ -457,8 +471,12 @@ struct nf_ct_hook {
void (*destroy)(struct nf_conntrack *);
bool (*get_tuple_skb)(struct nf_conntrack_tuple *,
const struct sk_buff *);
+ void (*attach)(struct sk_buff *nskb, const struct sk_buff *skb);
+ void (*set_closing)(struct nf_conntrack *nfct);
+ int (*confirm)(struct sk_buff *skb);
+ u32 (*get_id)(const struct nf_conntrack *nfct);
};
-extern struct nf_ct_hook __rcu *nf_ct_hook;
+extern const struct nf_ct_hook __rcu *nf_ct_hook;
struct nlattr;
@@ -473,17 +491,20 @@ struct nfnl_ct_hook {
void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo, s32 off);
};
-extern struct nfnl_ct_hook __rcu *nfnl_ct_hook;
+extern const struct nfnl_ct_hook __rcu *nfnl_ct_hook;
-/**
- * nf_skb_duplicated - TEE target has sent a packet
- *
- * When a xtables target sends a packet, the OUTPUT and POSTROUTING
- * hooks are traversed again, i.e. nft and xtables are invoked recursively.
- *
- * This is used by xtables TEE target to prevent the duplicated skb from
- * being duplicated again.
- */
-DECLARE_PER_CPU(bool, nf_skb_duplicated);
+struct nf_defrag_hook {
+ struct module *owner;
+ int (*enable)(struct net *net);
+ void (*disable)(struct net *net);
+};
+extern const struct nf_defrag_hook __rcu *nf_defrag_v4_hook;
+extern const struct nf_defrag_hook __rcu *nf_defrag_v6_hook;
+
+/*
+ * Contains bitmask of ctnetlink event subscribers, if any.
+ * Can't be pernet due to NETLINK_LISTEN_ALL_NSID setsockopt flag.
+ */
+extern u8 nf_ctnetlink_has_listener;
#endif /*__LINUX_NETFILTER_H*/
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 10279c4830ac..e9f4f845d760 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -186,6 +186,8 @@ struct ip_set_type_variant {
/* Return true if "b" set is the same as "a"
* according to the create set parameters */
bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
+ /* Cancel ongoing garbage collectors before destroying the set*/
+ void (*cancel_gc)(struct ip_set *set);
/* Region-locking is used */
bool region_lock;
};
@@ -196,6 +198,9 @@ struct ip_set_region {
u32 elements; /* Number of elements vs timeout */
};
+/* Max range where every element is added/deleted in one step */
+#define IPSET_MAX_RANGE (1<<14)
+
/* The max revision number supported by any set type + 1 */
#define IPSET_REVISION_MAX 9
@@ -239,6 +244,8 @@ extern void ip_set_type_unregister(struct ip_set_type *set_type);
/* A generic IP set */
struct ip_set {
+ /* For call_cru in destroy */
+ struct rcu_head rcu;
/* The name of the set */
char name[IPSET_MAXNAMELEN];
/* Lock protecting the set data */
@@ -512,6 +519,16 @@ ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo,
*skbinfo = ext->skbinfo;
}
+static inline void
+nf_inet_addr_mask_inplace(union nf_inet_addr *a1,
+ const union nf_inet_addr *mask)
+{
+ a1->all[0] &= mask->all[0];
+ a1->all[1] &= mask->all[1];
+ a1->all[2] &= mask->all[2];
+ a1->all[3] &= mask->all[3];
+}
+
#define IP_SET_INIT_KEXT(skb, opt, set) \
{ .bytes = (skb)->len, .packets = 1, .target = true,\
.timeout = ip_set_adt_opt_timeout(opt, set) }
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index 0c7d8d1e945d..2770db2fa080 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -2,7 +2,7 @@
#ifndef _NF_CONNTRACK_COMMON_H
#define _NF_CONNTRACK_COMMON_H
-#include <linux/atomic.h>
+#include <linux/refcount.h>
#include <uapi/linux/netfilter/nf_conntrack_common.h>
struct ip_conntrack_stat {
@@ -18,25 +18,28 @@ struct ip_conntrack_stat {
unsigned int expect_create;
unsigned int expect_delete;
unsigned int search_restart;
+ unsigned int chaintoolong;
};
#define NFCT_INFOMASK 7UL
#define NFCT_PTRMASK ~(NFCT_INFOMASK)
struct nf_conntrack {
- atomic_t use;
+ refcount_t use;
};
void nf_conntrack_destroy(struct nf_conntrack *nfct);
+
+/* like nf_ct_put, but without module dependency on nf_conntrack */
static inline void nf_conntrack_put(struct nf_conntrack *nfct)
{
- if (nfct && atomic_dec_and_test(&nfct->use))
+ if (nfct && refcount_dec_and_test(&nfct->use))
nf_conntrack_destroy(nfct);
}
static inline void nf_conntrack_get(struct nf_conntrack *nfct)
{
if (nfct)
- atomic_inc(&nfct->use);
+ refcount_inc(&nfct->use);
}
#endif /* _NF_CONNTRACK_COMMON_H */
diff --git a/include/linux/netfilter/nf_conntrack_dccp.h b/include/linux/netfilter/nf_conntrack_dccp.h
deleted file mode 100644
index c509ed76e714..000000000000
--- a/include/linux/netfilter/nf_conntrack_dccp.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NF_CONNTRACK_DCCP_H
-#define _NF_CONNTRACK_DCCP_H
-
-/* Exposed to userspace over nfnetlink */
-enum ct_dccp_states {
- CT_DCCP_NONE,
- CT_DCCP_REQUEST,
- CT_DCCP_RESPOND,
- CT_DCCP_PARTOPEN,
- CT_DCCP_OPEN,
- CT_DCCP_CLOSEREQ,
- CT_DCCP_CLOSING,
- CT_DCCP_TIMEWAIT,
- CT_DCCP_IGNORE,
- CT_DCCP_INVALID,
- __CT_DCCP_MAX
-};
-#define CT_DCCP_MAX (__CT_DCCP_MAX - 1)
-
-enum ct_dccp_roles {
- CT_DCCP_ROLE_CLIENT,
- CT_DCCP_ROLE_SERVER,
- __CT_DCCP_ROLE_MAX
-};
-#define CT_DCCP_ROLE_MAX (__CT_DCCP_ROLE_MAX - 1)
-
-#include <linux/netfilter/nf_conntrack_tuple_common.h>
-
-struct nf_ct_dccp {
- u_int8_t role[IP_CT_DIR_MAX];
- u_int8_t state;
- u_int8_t last_pkt;
- u_int8_t last_dir;
- u_int64_t handshake_seq;
-};
-
-#endif /* _NF_CONNTRACK_DCCP_H */
diff --git a/include/linux/netfilter/nf_conntrack_h323.h b/include/linux/netfilter/nf_conntrack_h323.h
index 4561ec0fcea4..81286c499325 100644
--- a/include/linux/netfilter/nf_conntrack_h323.h
+++ b/include/linux/netfilter/nf_conntrack_h323.h
@@ -34,64 +34,63 @@ struct nf_ct_h323_master {
int get_h225_addr(struct nf_conn *ct, unsigned char *data,
TransportAddress *taddr, union nf_inet_addr *addr,
__be16 *port);
-void nf_conntrack_h245_expect(struct nf_conn *new,
- struct nf_conntrack_expect *this);
-void nf_conntrack_q931_expect(struct nf_conn *new,
- struct nf_conntrack_expect *this);
-extern int (*set_h245_addr_hook) (struct sk_buff *skb, unsigned int protoff,
- unsigned char **data, int dataoff,
- H245_TransportAddress *taddr,
- union nf_inet_addr *addr,
- __be16 port);
-extern int (*set_h225_addr_hook) (struct sk_buff *skb, unsigned int protoff,
- unsigned char **data, int dataoff,
- TransportAddress *taddr,
- union nf_inet_addr *addr,
- __be16 port);
-extern int (*set_sig_addr_hook) (struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff, unsigned char **data,
- TransportAddress *taddr, int count);
-extern int (*set_ras_addr_hook) (struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff, unsigned char **data,
- TransportAddress *taddr, int count);
-extern int (*nat_rtp_rtcp_hook) (struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff, unsigned char **data,
- int dataoff,
- H245_TransportAddress *taddr,
- __be16 port, __be16 rtp_port,
- struct nf_conntrack_expect *rtp_exp,
- struct nf_conntrack_expect *rtcp_exp);
-extern int (*nat_t120_hook) (struct sk_buff *skb, struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff,
+
+struct nfct_h323_nat_hooks {
+ int (*set_h245_addr)(struct sk_buff *skb, unsigned int protoff,
unsigned char **data, int dataoff,
- H245_TransportAddress *taddr, __be16 port,
- struct nf_conntrack_expect *exp);
-extern int (*nat_h245_hook) (struct sk_buff *skb, struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff,
+ H245_TransportAddress *taddr,
+ union nf_inet_addr *addr, __be16 port);
+ int (*set_h225_addr)(struct sk_buff *skb, unsigned int protoff,
unsigned char **data, int dataoff,
- TransportAddress *taddr, __be16 port,
- struct nf_conntrack_expect *exp);
-extern int (*nat_callforwarding_hook) (struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff,
- unsigned char **data, int dataoff,
- TransportAddress *taddr,
- __be16 port,
- struct nf_conntrack_expect *exp);
-extern int (*nat_q931_hook) (struct sk_buff *skb, struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff,
- unsigned char **data, TransportAddress *taddr,
- int idx, __be16 port,
- struct nf_conntrack_expect *exp);
+ TransportAddress *taddr,
+ union nf_inet_addr *addr, __be16 port);
+ int (*set_sig_addr)(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff, unsigned char **data,
+ TransportAddress *taddr, int count);
+ int (*set_ras_addr)(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff, unsigned char **data,
+ TransportAddress *taddr, int count);
+ int (*nat_rtp_rtcp)(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ unsigned char **data, int dataoff,
+ H245_TransportAddress *taddr,
+ __be16 port, __be16 rtp_port,
+ struct nf_conntrack_expect *rtp_exp,
+ struct nf_conntrack_expect *rtcp_exp);
+ int (*nat_t120)(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ unsigned char **data, int dataoff,
+ H245_TransportAddress *taddr, __be16 port,
+ struct nf_conntrack_expect *exp);
+ int (*nat_h245)(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ unsigned char **data, int dataoff,
+ TransportAddress *taddr, __be16 port,
+ struct nf_conntrack_expect *exp);
+ int (*nat_callforwarding)(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ unsigned char **data, int dataoff,
+ TransportAddress *taddr, __be16 port,
+ struct nf_conntrack_expect *exp);
+ int (*nat_q931)(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ unsigned char **data, TransportAddress *taddr, int idx,
+ __be16 port, struct nf_conntrack_expect *exp);
+};
+extern const struct nfct_h323_nat_hooks __rcu *nfct_h323_nat_hook;
#endif
diff --git a/include/linux/netfilter/nf_conntrack_pptp.h b/include/linux/netfilter/nf_conntrack_pptp.h
index a28aa289afdc..c3bdb4370938 100644
--- a/include/linux/netfilter/nf_conntrack_pptp.h
+++ b/include/linux/netfilter/nf_conntrack_pptp.h
@@ -300,26 +300,22 @@ union pptp_ctrl_union {
struct PptpSetLinkInfo setlink;
};
-extern int
-(*nf_nat_pptp_hook_outbound)(struct sk_buff *skb,
- struct nf_conn *ct, enum ip_conntrack_info ctinfo,
- unsigned int protoff,
- struct PptpControlHeader *ctlh,
- union pptp_ctrl_union *pptpReq);
-
-extern int
-(*nf_nat_pptp_hook_inbound)(struct sk_buff *skb,
- struct nf_conn *ct, enum ip_conntrack_info ctinfo,
- unsigned int protoff,
- struct PptpControlHeader *ctlh,
- union pptp_ctrl_union *pptpReq);
-
-extern void
-(*nf_nat_pptp_hook_exp_gre)(struct nf_conntrack_expect *exp_orig,
- struct nf_conntrack_expect *exp_reply);
-
-extern void
-(*nf_nat_pptp_hook_expectfn)(struct nf_conn *ct,
- struct nf_conntrack_expect *exp);
+struct nf_nat_pptp_hook {
+ int (*outbound)(struct sk_buff *skb,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ struct PptpControlHeader *ctlh,
+ union pptp_ctrl_union *pptpReq);
+ int (*inbound)(struct sk_buff *skb,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ struct PptpControlHeader *ctlh,
+ union pptp_ctrl_union *pptpReq);
+ void (*exp_gre)(struct nf_conntrack_expect *exp_orig,
+ struct nf_conntrack_expect *exp_reply);
+ void (*expectfn)(struct nf_conn *ct,
+ struct nf_conntrack_expect *exp);
+};
+extern const struct nf_nat_pptp_hook __rcu *nf_nat_pptp_hook;
#endif /* _NF_CONNTRACK_PPTP_H */
diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h
index f33aa6021364..34ce5d2f37a2 100644
--- a/include/linux/netfilter/nf_conntrack_proto_gre.h
+++ b/include/linux/netfilter/nf_conntrack_proto_gre.h
@@ -25,7 +25,6 @@ struct nf_ct_gre_keymap {
int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
struct nf_conntrack_tuple *t);
-void nf_ct_gre_keymap_flush(struct net *net);
/* delete keymap entries */
void nf_ct_gre_keymap_destroy(struct nf_conn *ct);
diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h
index 625f491b95de..fb31312825ae 100644
--- a/include/linux/netfilter/nf_conntrack_sctp.h
+++ b/include/linux/netfilter/nf_conntrack_sctp.h
@@ -9,6 +9,7 @@ struct ip_ct_sctp {
enum sctp_conntrack state;
__be32 vtag[IP_CT_DIR_MAX];
+ u8 init[IP_CT_DIR_MAX];
u8 last_dir;
u8 flags;
};
diff --git a/include/linux/netfilter/nf_conntrack_sip.h b/include/linux/netfilter/nf_conntrack_sip.h
index c620521c42bc..dbc614dfe0d5 100644
--- a/include/linux/netfilter/nf_conntrack_sip.h
+++ b/include/linux/netfilter/nf_conntrack_sip.h
@@ -164,7 +164,7 @@ struct nf_nat_sip_hooks {
unsigned int medialen,
union nf_inet_addr *rtp_addr);
};
-extern const struct nf_nat_sip_hooks *nf_nat_sip_hooks;
+extern const struct nf_nat_sip_hooks __rcu *nf_nat_sip_hooks;
int ct_sip_parse_request(const struct nf_conn *ct, const char *dptr,
unsigned int datalen, unsigned int *matchoff,
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 515ce53aa20d..e9a9ab34a7cc 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -11,6 +11,7 @@ struct nfnl_info {
struct net *net;
struct sock *sk;
const struct nlmsghdr *nlh;
+ const struct nfgenmsg *nfmsg;
struct netlink_ext_ack *extack;
};
@@ -44,7 +45,6 @@ struct nfnetlink_subsystem {
int (*commit)(struct net *net, struct sk_buff *skb);
int (*abort)(struct net *net, struct sk_buff *skb,
enum nfnl_abort_action action);
- void (*cleanup)(struct net *net);
bool (*valid_genid)(struct net *net, u32 genid);
};
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 07c6ad8f2a02..77c778d84d4c 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -36,8 +36,8 @@ struct xt_action_param {
const void *matchinfo, *targinfo;
};
const struct nf_hook_state *state;
- int fragoff;
unsigned int thoff;
+ u16 fragoff;
bool hotdrop;
};
@@ -51,21 +51,11 @@ static inline struct net_device *xt_in(const struct xt_action_param *par)
return par->state->in;
}
-static inline const char *xt_inname(const struct xt_action_param *par)
-{
- return par->state->in->name;
-}
-
static inline struct net_device *xt_out(const struct xt_action_param *par)
{
return par->state->out;
}
-static inline const char *xt_outname(const struct xt_action_param *par)
-{
- return par->state->out->name;
-}
-
static inline unsigned int xt_hooknum(const struct xt_action_param *par)
{
return par->state->hook;
@@ -238,9 +228,6 @@ struct xt_table {
u_int8_t af; /* address/protocol family */
int priority; /* hook order */
- /* called when table is needed in the given netns */
- int (*table_init)(struct net *net);
-
/* A unique name... */
const char name[XT_TABLE_MAXNAMELEN];
};
@@ -360,7 +347,7 @@ extern struct static_key xt_tee_enabled;
* Begin packet processing : all readers must wait the end
* 1) Must be called with preemption disabled
* 2) softirqs must be disabled too (or we should use this_cpu_add())
- * Returns :
+ * Returns:
* 1 if no recursion on this cpu
* 0 if recursion detected
*/
@@ -452,6 +439,9 @@ xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *);
+int xt_register_template(const struct xt_table *t, int(*table_init)(struct net *net));
+void xt_unregister_template(const struct xt_table *t);
+
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
#include <net/compat.h>
diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h
index 4f9a4b3c5892..a40aaf645fa4 100644
--- a/include/linux/netfilter_arp/arp_tables.h
+++ b/include/linux/netfilter_arp/arp_tables.h
@@ -54,9 +54,8 @@ int arpt_register_table(struct net *net, const struct xt_table *table,
const struct nf_hook_ops *ops);
void arpt_unregister_table(struct net *net, const char *name);
void arpt_unregister_table_pre_exit(struct net *net, const char *name);
-extern unsigned int arpt_do_table(struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct xt_table *table);
+extern unsigned int arpt_do_table(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state);
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
#include <net/compat.h>
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index f980edfdd278..743475ca7e9d 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -42,7 +42,7 @@ static inline int nf_bridge_get_physinif(const struct sk_buff *skb)
if (!nf_bridge)
return 0;
- return nf_bridge->physindev ? nf_bridge->physindev->ifindex : 0;
+ return nf_bridge->physinif;
}
static inline int nf_bridge_get_physoutif(const struct sk_buff *skb)
@@ -56,11 +56,11 @@ static inline int nf_bridge_get_physoutif(const struct sk_buff *skb)
}
static inline struct net_device *
-nf_bridge_get_physindev(const struct sk_buff *skb)
+nf_bridge_get_physindev(const struct sk_buff *skb, struct net *net)
{
const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
- return nf_bridge ? nf_bridge->physindev : NULL;
+ return nf_bridge ? dev_get_by_index_rcu(net, nf_bridge->physinif) : NULL;
}
static inline struct net_device *
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index a8178253ce53..fd533552a062 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -94,10 +94,6 @@ struct ebt_table {
struct ebt_replace_kernel *table;
unsigned int valid_hooks;
rwlock_t lock;
- /* e.g. could be the table explicitly only allows certain
- * matches, targets, ... 0 == let it in */
- int (*check)(const struct ebt_table_info *info,
- unsigned int valid_hooks);
/* the data used by the kernel */
struct ebt_table_info *private;
struct nf_hook_ops *ops;
@@ -112,9 +108,8 @@ extern int ebt_register_table(struct net *net,
const struct nf_hook_ops *ops);
extern void ebt_unregister_table(struct net *net, const char *tablename);
void ebt_unregister_table_pre_exit(struct net *net, const char *tablename);
-extern unsigned int ebt_do_table(struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct ebt_table *table);
+extern unsigned int ebt_do_table(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state);
/* True if the hook mask denotes that the rule is in a base chain,
* used in the check() functions */
@@ -127,4 +122,6 @@ static inline bool ebt_invalid_target(int target)
return (target < -NUM_STANDARD_TARGETS || target >= 0);
}
+int ebt_register_template(const struct ebt_table *t, int(*table_init)(struct net *net));
+void ebt_unregister_template(const struct ebt_table *t);
#endif
diff --git a/include/linux/netfilter_defs.h b/include/linux/netfilter_defs.h
index 8dddfb151f00..a5f7bef1b3a4 100644
--- a/include/linux/netfilter_defs.h
+++ b/include/linux/netfilter_defs.h
@@ -7,14 +7,6 @@
/* in/out/forward only */
#define NF_ARP_NUMHOOKS 3
-/* max hook is NF_DN_ROUTE (6), also see uapi/linux/netfilter_decnet.h */
-#define NF_DN_NUMHOOKS 7
-
-#if IS_ENABLED(CONFIG_DECNET)
-/* Largest hook number + 1, see uapi/linux/netfilter_decnet.h */
-#define NF_MAX_HOOKS NF_DN_NUMHOOKS
-#else
#define NF_MAX_HOOKS NF_INET_NUMHOOKS
-#endif
#endif
diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h
deleted file mode 100644
index a13774be2eb5..000000000000
--- a/include/linux/netfilter_ingress.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NETFILTER_INGRESS_H_
-#define _NETFILTER_INGRESS_H_
-
-#include <linux/netfilter.h>
-#include <linux/netdevice.h>
-
-#ifdef CONFIG_NETFILTER_INGRESS
-static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
-{
-#ifdef CONFIG_JUMP_LABEL
- if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS]))
- return false;
-#endif
- return rcu_access_pointer(skb->dev->nf_hooks_ingress);
-}
-
-/* caller must hold rcu_read_lock */
-static inline int nf_hook_ingress(struct sk_buff *skb)
-{
- struct nf_hook_entries *e = rcu_dereference(skb->dev->nf_hooks_ingress);
- struct nf_hook_state state;
- int ret;
-
- /* Must recheck the ingress hook head, in the event it became NULL
- * after the check in nf_hook_ingress_active evaluated to true.
- */
- if (unlikely(!e))
- return 0;
-
- nf_hook_state_init(&state, NF_NETDEV_INGRESS,
- NFPROTO_NETDEV, skb->dev, NULL, NULL,
- dev_net(skb->dev), NULL);
- ret = nf_hook_slow(skb, &state, e, 0);
- if (ret == 0)
- return -1;
-
- return ret;
-}
-
-static inline void nf_hook_ingress_init(struct net_device *dev)
-{
- RCU_INIT_POINTER(dev->nf_hooks_ingress, NULL);
-}
-#else /* CONFIG_NETFILTER_INGRESS */
-static inline int nf_hook_ingress_active(struct sk_buff *skb)
-{
- return 0;
-}
-
-static inline int nf_hook_ingress(struct sk_buff *skb)
-{
- return 0;
-}
-
-static inline void nf_hook_ingress_init(struct net_device *dev) {}
-#endif /* CONFIG_NETFILTER_INGRESS */
-#endif /* _NETFILTER_INGRESS_H_ */
diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h
index 8d09bfe850dc..132b0e4a6d4d 100644
--- a/include/linux/netfilter_ipv4/ip_tables.h
+++ b/include/linux/netfilter_ipv4/ip_tables.h
@@ -63,9 +63,9 @@ struct ipt_error {
}
extern void *ipt_alloc_initial_table(const struct xt_table *);
-extern unsigned int ipt_do_table(struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct xt_table *table);
+extern unsigned int ipt_do_table(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state);
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
#include <net/compat.h>
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 48314ade1506..61aa48f46dd7 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -51,7 +51,7 @@ struct nf_ipv6_ops {
u32 (*cookie_init_sequence)(const struct ipv6hdr *iph,
const struct tcphdr *th, u16 *mssp);
int (*cookie_v6_check)(const struct ipv6hdr *iph,
- const struct tcphdr *th, __u32 cookie);
+ const struct tcphdr *th);
#endif
void (*route_input)(struct sk_buff *skb);
int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb,
@@ -179,16 +179,16 @@ static inline u32 nf_ipv6_cookie_init_sequence(const struct ipv6hdr *iph,
}
static inline int nf_cookie_v6_check(const struct ipv6hdr *iph,
- const struct tcphdr *th, __u32 cookie)
+ const struct tcphdr *th)
{
#if IS_ENABLED(CONFIG_SYN_COOKIES)
#if IS_MODULE(CONFIG_IPV6)
const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
if (v6_ops)
- return v6_ops->cookie_v6_check(iph, th, cookie);
+ return v6_ops->cookie_v6_check(iph, th);
#elif IS_BUILTIN(CONFIG_IPV6)
- return __cookie_v6_check(iph, th, cookie);
+ return __cookie_v6_check(iph, th);
#endif
#endif
return 0;
@@ -197,6 +197,8 @@ static inline int nf_cookie_v6_check(const struct ipv6hdr *iph,
__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, u_int8_t protocol);
+int nf_ip6_check_hbh_len(struct sk_buff *skb, u32 *plen);
+
int ipv6_netfilter_init(void);
void ipv6_netfilter_fini(void);
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h
index 79e73fd7d965..8b8885a73c76 100644
--- a/include/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/linux/netfilter_ipv6/ip6_tables.h
@@ -29,9 +29,8 @@ int ip6t_register_table(struct net *net, const struct xt_table *table,
const struct nf_hook_ops *ops);
void ip6t_unregister_table_pre_exit(struct net *net, const char *name);
void ip6t_unregister_table_exit(struct net *net, const char *name);
-extern unsigned int ip6t_do_table(struct sk_buff *skb,
- const struct nf_hook_state *state,
- struct xt_table *table);
+extern unsigned int ip6t_do_table(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state);
#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
#include <net/compat.h>
diff --git a/include/linux/netfilter_netdev.h b/include/linux/netfilter_netdev.h
new file mode 100644
index 000000000000..3175073a66ba
--- /dev/null
+++ b/include/linux/netfilter_netdev.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NETFILTER_NETDEV_H_
+#define _NETFILTER_NETDEV_H_
+
+#include <linux/netfilter.h>
+#include <linux/netdevice.h>
+
+#ifdef CONFIG_NETFILTER_INGRESS
+static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
+{
+#ifdef CONFIG_JUMP_LABEL
+ if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS]))
+ return false;
+#endif
+ return rcu_access_pointer(skb->dev->nf_hooks_ingress);
+}
+
+/* caller must hold rcu_read_lock */
+static inline int nf_hook_ingress(struct sk_buff *skb)
+{
+ struct nf_hook_entries *e = rcu_dereference(skb->dev->nf_hooks_ingress);
+ struct nf_hook_state state;
+ int ret;
+
+ /* Must recheck the ingress hook head, in the event it became NULL
+ * after the check in nf_hook_ingress_active evaluated to true.
+ */
+ if (unlikely(!e))
+ return 0;
+
+ nf_hook_state_init(&state, NF_NETDEV_INGRESS,
+ NFPROTO_NETDEV, skb->dev, NULL, NULL,
+ dev_net(skb->dev), NULL);
+ ret = nf_hook_slow(skb, &state, e, 0);
+ if (ret == 0)
+ return -1;
+
+ return ret;
+}
+
+#else /* CONFIG_NETFILTER_INGRESS */
+static inline int nf_hook_ingress_active(struct sk_buff *skb)
+{
+ return 0;
+}
+
+static inline int nf_hook_ingress(struct sk_buff *skb)
+{
+ return 0;
+}
+#endif /* CONFIG_NETFILTER_INGRESS */
+
+#ifdef CONFIG_NETFILTER_EGRESS
+static inline bool nf_hook_egress_active(void)
+{
+#ifdef CONFIG_JUMP_LABEL
+ if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_EGRESS]))
+ return false;
+#endif
+ return true;
+}
+
+/**
+ * nf_hook_egress - classify packets before transmission
+ * @skb: packet to be classified
+ * @rc: result code which shall be returned by __dev_queue_xmit() on failure
+ * @dev: netdev whose egress hooks shall be applied to @skb
+ *
+ * Caller must hold rcu_read_lock.
+ *
+ * On ingress, packets are classified first by tc, then by netfilter.
+ * On egress, the order is reversed for symmetry. Conceptually, tc and
+ * netfilter can be thought of as layers, with netfilter layered above tc:
+ * When tc redirects a packet to another interface, netfilter is not applied
+ * because the packet is on the tc layer.
+ *
+ * The nf_skip_egress flag controls whether netfilter is applied on egress.
+ * It is updated by __netif_receive_skb_core() and __dev_queue_xmit() when the
+ * packet passes through tc and netfilter. Because __dev_queue_xmit() may be
+ * called recursively by tunnel drivers such as vxlan, the flag is reverted to
+ * false after sch_handle_egress(). This ensures that netfilter is applied
+ * both on the overlay and underlying network.
+ *
+ * Returns: @skb on success or %NULL if the packet was consumed or filtered.
+ */
+static inline struct sk_buff *nf_hook_egress(struct sk_buff *skb, int *rc,
+ struct net_device *dev)
+{
+ struct nf_hook_entries *e;
+ struct nf_hook_state state;
+ int ret;
+
+#ifdef CONFIG_NETFILTER_SKIP_EGRESS
+ if (skb->nf_skip_egress)
+ return skb;
+#endif
+
+ e = rcu_dereference_check(dev->nf_hooks_egress, rcu_read_lock_bh_held());
+ if (!e)
+ return skb;
+
+ nf_hook_state_init(&state, NF_NETDEV_EGRESS,
+ NFPROTO_NETDEV, NULL, dev, NULL,
+ dev_net(dev), NULL);
+
+ /* nf assumes rcu_read_lock, not just read_lock_bh */
+ rcu_read_lock();
+ ret = nf_hook_slow(skb, &state, e, 0);
+ rcu_read_unlock();
+
+ if (ret == 1) {
+ return skb;
+ } else if (ret < 0) {
+ *rc = NET_XMIT_DROP;
+ return NULL;
+ } else { /* ret == 0 */
+ *rc = NET_XMIT_SUCCESS;
+ return NULL;
+ }
+}
+#else /* CONFIG_NETFILTER_EGRESS */
+static inline bool nf_hook_egress_active(void)
+{
+ return false;
+}
+
+static inline struct sk_buff *nf_hook_egress(struct sk_buff *skb, int *rc,
+ struct net_device *dev)
+{
+ return skb;
+}
+#endif /* CONFIG_NETFILTER_EGRESS */
+
+static inline void nf_skip_egress(struct sk_buff *skb, bool skip)
+{
+#ifdef CONFIG_NETFILTER_SKIP_EGRESS
+ skb->nf_skip_egress = skip;
+#endif
+}
+
+static inline void nf_hook_netdev_init(struct net_device *dev)
+{
+#ifdef CONFIG_NETFILTER_INGRESS
+ RCU_INIT_POINTER(dev->nf_hooks_ingress, NULL);
+#endif
+#ifdef CONFIG_NETFILTER_EGRESS
+ RCU_INIT_POINTER(dev->nf_hooks_egress, NULL);
+#endif
+}
+
+#endif /* _NETFILTER_NETDEV_H_ */
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index 9062adfa2fb9..72ee7d210a74 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -17,83 +17,141 @@
#include <linux/workqueue.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
+#include <linux/uio.h>
+#include <linux/rolling_buffer.h>
-/*
- * Overload PG_private_2 to give us PG_fscache - this is used to indicate that
- * a page is currently backed by a local disk cache
- */
-#define PageFsCache(page) PagePrivate2((page))
-#define SetPageFsCache(page) SetPagePrivate2((page))
-#define ClearPageFsCache(page) ClearPagePrivate2((page))
-#define TestSetPageFsCache(page) TestSetPagePrivate2((page))
-#define TestClearPageFsCache(page) TestClearPagePrivate2((page))
+enum netfs_sreq_ref_trace;
+typedef struct mempool mempool_t;
+struct folio_queue;
/**
- * set_page_fscache - Set PG_fscache on a page and take a ref
- * @page: The page.
+ * folio_start_private_2 - Start an fscache write on a folio. [DEPRECATED]
+ * @folio: The folio.
*
- * Set the PG_fscache (PG_private_2) flag on a page and take the reference
- * needed for the VM to handle its lifetime correctly. This sets the flag and
- * takes the reference unconditionally, so care must be taken not to set the
- * flag again if it's already set.
+ * Call this function before writing a folio to a local cache. Starting a
+ * second write before the first one finishes is not allowed.
+ *
+ * Note that this should no longer be used.
*/
-static inline void set_page_fscache(struct page *page)
+static inline void folio_start_private_2(struct folio *folio)
{
- set_page_private_2(page);
+ VM_BUG_ON_FOLIO(folio_test_private_2(folio), folio);
+ folio_get(folio);
+ folio_set_private_2(folio);
}
-/**
- * end_page_fscache - Clear PG_fscache and release any waiters
- * @page: The page
- *
- * Clear the PG_fscache (PG_private_2) bit on a page and wake up any sleepers
- * waiting for this. The page ref held for PG_private_2 being set is released.
- *
- * This is, for example, used when a netfs page is being written to a local
- * disk cache, thereby allowing writes to the cache for the same page to be
- * serialised.
+enum netfs_io_source {
+ NETFS_SOURCE_UNKNOWN,
+ NETFS_FILL_WITH_ZEROES,
+ NETFS_DOWNLOAD_FROM_SERVER,
+ NETFS_READ_FROM_CACHE,
+ NETFS_INVALID_READ,
+ NETFS_UPLOAD_TO_SERVER,
+ NETFS_WRITE_TO_CACHE,
+} __mode(byte);
+
+typedef void (*netfs_io_terminated_t)(void *priv, ssize_t transferred_or_error);
+
+/*
+ * Per-inode context. This wraps the VFS inode.
*/
-static inline void end_page_fscache(struct page *page)
+struct netfs_inode {
+ struct inode inode; /* The VFS inode */
+ const struct netfs_request_ops *ops;
+#if IS_ENABLED(CONFIG_FSCACHE)
+ struct fscache_cookie *cache;
+#endif
+ struct mutex wb_lock; /* Writeback serialisation */
+ loff_t remote_i_size; /* Size of the remote file */
+ loff_t zero_point; /* Size after which we assume there's no data
+ * on the server */
+ atomic_t io_count; /* Number of outstanding reqs */
+ unsigned long flags;
+#define NETFS_ICTX_ODIRECT 0 /* The file has DIO in progress */
+#define NETFS_ICTX_UNBUFFERED 1 /* I/O should not use the pagecache */
+#define NETFS_ICTX_MODIFIED_ATTR 3 /* Indicate change in mtime/ctime */
+#define NETFS_ICTX_SINGLE_NO_UPLOAD 4 /* Monolithic payload, cache but no upload */
+};
+
+/*
+ * A netfs group - for instance a ceph snap. This is marked on dirty pages and
+ * pages marked with a group must be flushed before they can be written under
+ * the domain of another group.
+ */
+struct netfs_group {
+ refcount_t ref;
+ void (*free)(struct netfs_group *netfs_group);
+};
+
+/*
+ * Information about a dirty page (attached only if necessary).
+ * folio->private
+ */
+struct netfs_folio {
+ struct netfs_group *netfs_group; /* Filesystem's grouping marker (or NULL). */
+ unsigned int dirty_offset; /* Write-streaming dirty data offset */
+ unsigned int dirty_len; /* Write-streaming dirty data length */
+};
+#define NETFS_FOLIO_INFO 0x1UL /* OR'd with folio->private. */
+#define NETFS_FOLIO_COPY_TO_CACHE ((struct netfs_group *)0x356UL) /* Write to the cache only */
+
+static inline bool netfs_is_folio_info(const void *priv)
{
- end_page_private_2(page);
+ return (unsigned long)priv & NETFS_FOLIO_INFO;
}
-/**
- * wait_on_page_fscache - Wait for PG_fscache to be cleared on a page
- * @page: The page to wait on
- *
- * Wait for PG_fscache (aka PG_private_2) to be cleared on a page.
- */
-static inline void wait_on_page_fscache(struct page *page)
+static inline struct netfs_folio *__netfs_folio_info(const void *priv)
{
- wait_on_page_private_2(page);
+ if (netfs_is_folio_info(priv))
+ return (struct netfs_folio *)((unsigned long)priv & ~NETFS_FOLIO_INFO);
+ return NULL;
}
-/**
- * wait_on_page_fscache_killable - Wait for PG_fscache to be cleared on a page
- * @page: The page to wait on
- *
- * Wait for PG_fscache (aka PG_private_2) to be cleared on a page or until a
- * fatal signal is received by the calling task.
- *
- * Return:
- * - 0 if successful.
- * - -EINTR if a fatal signal was encountered.
- */
-static inline int wait_on_page_fscache_killable(struct page *page)
+static inline struct netfs_folio *netfs_folio_info(struct folio *folio)
{
- return wait_on_page_private_2_killable(page);
+ return __netfs_folio_info(folio_get_private(folio));
}
-enum netfs_read_source {
- NETFS_FILL_WITH_ZEROES,
- NETFS_DOWNLOAD_FROM_SERVER,
- NETFS_READ_FROM_CACHE,
- NETFS_INVALID_READ,
-} __mode(byte);
+static inline struct netfs_group *netfs_folio_group(struct folio *folio)
+{
+ struct netfs_folio *finfo;
+ void *priv = folio_get_private(folio);
+
+ finfo = netfs_folio_info(folio);
+ if (finfo)
+ return finfo->netfs_group;
+ return priv;
+}
-typedef void (*netfs_io_terminated_t)(void *priv, ssize_t transferred_or_error,
- bool was_async);
+/*
+ * Stream of I/O subrequests going to a particular destination, such as the
+ * server or the local cache. This is mainly intended for writing where we may
+ * have to write to multiple destinations concurrently.
+ */
+struct netfs_io_stream {
+ /* Submission tracking */
+ struct netfs_io_subrequest *construct; /* Op being constructed */
+ size_t sreq_max_len; /* Maximum size of a subrequest */
+ unsigned int sreq_max_segs; /* 0 or max number of segments in an iterator */
+ unsigned int submit_off; /* Folio offset we're submitting from */
+ unsigned int submit_len; /* Amount of data left to submit */
+ unsigned int submit_extendable_to; /* Amount I/O can be rounded up to */
+ void (*prepare_write)(struct netfs_io_subrequest *subreq);
+ void (*issue_write)(struct netfs_io_subrequest *subreq);
+ /* Collection tracking */
+ struct list_head subrequests; /* Contributory I/O operations */
+ struct netfs_io_subrequest *front; /* Op being collected */
+ unsigned long long collected_to; /* Position we've collected results to */
+ size_t transferred; /* The amount transferred from this stream */
+ unsigned short error; /* Aggregate error for the stream */
+ enum netfs_io_source source; /* Where to read from/write to */
+ unsigned char stream_nr; /* Index of stream in parent table */
+ bool avail; /* T if stream is available */
+ bool active; /* T if stream is active */
+ bool need_retry; /* T if this stream needs retrying */
+ bool failed; /* T if this stream failed */
+ bool transferred_valid; /* T is ->transferred is valid */
+};
/*
* Resources required to do operations on a cache.
@@ -102,81 +160,169 @@ struct netfs_cache_resources {
const struct netfs_cache_ops *ops;
void *cache_priv;
void *cache_priv2;
+ unsigned int debug_id; /* Cookie debug ID */
+ unsigned int inval_counter; /* object->inval_counter at begin_op */
};
/*
- * Descriptor for a single component subrequest.
+ * Descriptor for a single component subrequest. Each operation represents an
+ * individual read/write from/to a server, a cache, a journal, etc..
+ *
+ * The buffer iterator is persistent for the life of the subrequest struct and
+ * the pages it points to can be relied on to exist for the duration.
*/
-struct netfs_read_subrequest {
- struct netfs_read_request *rreq; /* Supervising read request */
+struct netfs_io_subrequest {
+ struct netfs_io_request *rreq; /* Supervising I/O request */
+ struct work_struct work;
struct list_head rreq_link; /* Link in rreq->subrequests */
- loff_t start; /* Where to start the I/O */
+ struct iov_iter io_iter; /* Iterator for this subrequest */
+ unsigned long long start; /* Where to start the I/O */
size_t len; /* Size of the I/O */
size_t transferred; /* Amount of data transferred */
- refcount_t usage;
+ refcount_t ref;
short error; /* 0 or error that occurred */
unsigned short debug_index; /* Index in list (for debugging output) */
- enum netfs_read_source source; /* Where to read from */
+ unsigned int nr_segs; /* Number of segs in io_iter */
+ u8 retry_count; /* The number of retries (0 on initial pass) */
+ enum netfs_io_source source; /* Where to read from/write to */
+ unsigned char stream_nr; /* I/O stream this belongs to */
unsigned long flags;
-#define NETFS_SREQ_WRITE_TO_CACHE 0 /* Set if should write to cache */
+#define NETFS_SREQ_COPY_TO_CACHE 0 /* Set if should copy the data to the cache */
#define NETFS_SREQ_CLEAR_TAIL 1 /* Set if the rest of the read should be cleared */
-#define NETFS_SREQ_SHORT_READ 2 /* Set if there was a short read from the cache */
-#define NETFS_SREQ_SEEK_DATA_READ 3 /* Set if ->read() should SEEK_DATA first */
-#define NETFS_SREQ_NO_PROGRESS 4 /* Set if we didn't manage to read any data */
+#define NETFS_SREQ_MADE_PROGRESS 4 /* Set if we transferred at least some data */
+#define NETFS_SREQ_ONDEMAND 5 /* Set if it's from on-demand read mode */
+#define NETFS_SREQ_BOUNDARY 6 /* Set if ends on hard boundary (eg. ceph object) */
+#define NETFS_SREQ_HIT_EOF 7 /* Set if short due to EOF */
+#define NETFS_SREQ_IN_PROGRESS 8 /* Unlocked when the subrequest completes */
+#define NETFS_SREQ_NEED_RETRY 9 /* Set if the filesystem requests a retry */
+#define NETFS_SREQ_FAILED 10 /* Set if the subreq failed unretryably */
};
+enum netfs_io_origin {
+ NETFS_READAHEAD, /* This read was triggered by readahead */
+ NETFS_READPAGE, /* This read is a synchronous read */
+ NETFS_READ_GAPS, /* This read is a synchronous read to fill gaps */
+ NETFS_READ_SINGLE, /* This read should be treated as a single object */
+ NETFS_READ_FOR_WRITE, /* This read is to prepare a write */
+ NETFS_UNBUFFERED_READ, /* This is an unbuffered read */
+ NETFS_DIO_READ, /* This is a direct I/O read */
+ NETFS_WRITEBACK, /* This write was triggered by writepages */
+ NETFS_WRITEBACK_SINGLE, /* This monolithic write was triggered by writepages */
+ NETFS_WRITETHROUGH, /* This write was made by netfs_perform_write() */
+ NETFS_UNBUFFERED_WRITE, /* This is an unbuffered write */
+ NETFS_DIO_WRITE, /* This is a direct I/O write */
+ NETFS_PGPRIV2_COPY_TO_CACHE, /* [DEPRECATED] This is writing read data to the cache */
+ nr__netfs_io_origin
+} __mode(byte);
+
/*
- * Descriptor for a read helper request. This is used to make multiple I/O
- * requests on a variety of sources and then stitch the result together.
+ * Descriptor for an I/O helper request. This is used to make multiple I/O
+ * operations to a variety of data stores and then stitch the result together.
*/
-struct netfs_read_request {
- struct work_struct work;
+struct netfs_io_request {
+ union {
+ struct work_struct cleanup_work; /* Deferred cleanup work */
+ struct rcu_head rcu;
+ };
+ struct work_struct work; /* Result collector work */
struct inode *inode; /* The file being accessed */
struct address_space *mapping; /* The mapping being accessed */
+ struct kiocb *iocb; /* AIO completion vector */
struct netfs_cache_resources cache_resources;
- struct list_head subrequests; /* Requests to fetch I/O from disk or net */
+ struct netfs_io_request *copy_to_cache; /* Request to write just-read data to the cache */
+#ifdef CONFIG_PROC_FS
+ struct list_head proc_link; /* Link in netfs_iorequests */
+#endif
+ struct netfs_io_stream io_streams[2]; /* Streams of parallel I/O operations */
+#define NR_IO_STREAMS 2 //wreq->nr_io_streams
+ struct netfs_group *group; /* Writeback group being written back */
+ struct rolling_buffer buffer; /* Unencrypted buffer */
+#define NETFS_ROLLBUF_PUT_MARK ROLLBUF_MARK_1
+#define NETFS_ROLLBUF_PAGECACHE_MARK ROLLBUF_MARK_2
+ wait_queue_head_t waitq; /* Processor waiter */
void *netfs_priv; /* Private data for the netfs */
+ void *netfs_priv2; /* Private data for the netfs */
+ struct bio_vec *direct_bv; /* DIO buffer list (when handling iovec-iter) */
+ unsigned long long submitted; /* Amount submitted for I/O so far */
+ unsigned long long len; /* Length of the request */
+ size_t transferred; /* Amount to be indicated as transferred */
+ long error; /* 0 or error that occurred */
+ unsigned long long i_size; /* Size of the file */
+ unsigned long long start; /* Start position */
+ atomic64_t issued_to; /* Write issuer folio cursor */
+ unsigned long long collected_to; /* Point we've collected to */
+ unsigned long long cleaned_to; /* Position we've cleaned folios to */
+ unsigned long long abandon_to; /* Position to abandon folios to */
+ pgoff_t no_unlock_folio; /* Don't unlock this folio after read */
+ unsigned int direct_bv_count; /* Number of elements in direct_bv[] */
unsigned int debug_id;
- unsigned int cookie_debug_id;
- atomic_t nr_rd_ops; /* Number of read ops in progress */
- atomic_t nr_wr_ops; /* Number of write ops in progress */
- size_t submitted; /* Amount submitted for I/O so far */
- size_t len; /* Length of the request */
- short error; /* 0 or error that occurred */
- loff_t i_size; /* Size of the file */
- loff_t start; /* Start position */
- pgoff_t no_unlock_page; /* Don't unlock this page after read */
- refcount_t usage;
+ unsigned int rsize; /* Maximum read size (0 for none) */
+ unsigned int wsize; /* Maximum write size (0 for none) */
+ atomic_t subreq_counter; /* Next subreq->debug_index */
+ unsigned int nr_group_rel; /* Number of refs to release on ->group */
+ spinlock_t lock; /* Lock for queuing subreqs */
+ unsigned char front_folio_order; /* Order (size) of front folio */
+ enum netfs_io_origin origin; /* Origin of the request */
+ bool direct_bv_unpin; /* T if direct_bv[] must be unpinned */
+ refcount_t ref;
unsigned long flags;
-#define NETFS_RREQ_INCOMPLETE_IO 0 /* Some ioreqs terminated short or with error */
-#define NETFS_RREQ_WRITE_TO_CACHE 1 /* Need to write to the cache */
-#define NETFS_RREQ_NO_UNLOCK_PAGE 2 /* Don't unlock no_unlock_page on completion */
-#define NETFS_RREQ_DONT_UNLOCK_PAGES 3 /* Don't unlock the pages on completion */
-#define NETFS_RREQ_FAILED 4 /* The request failed */
-#define NETFS_RREQ_IN_PROGRESS 5 /* Unlocked when the request completes */
- const struct netfs_read_request_ops *netfs_ops;
+#define NETFS_RREQ_IN_PROGRESS 0 /* Unlocked when the request completes (has ref) */
+#define NETFS_RREQ_ALL_QUEUED 1 /* All subreqs are now queued */
+#define NETFS_RREQ_PAUSE 2 /* Pause subrequest generation */
+#define NETFS_RREQ_FAILED 3 /* The request failed */
+#define NETFS_RREQ_RETRYING 4 /* Set if we're in the retry path */
+#define NETFS_RREQ_SHORT_TRANSFER 5 /* Set if we have a short transfer */
+#define NETFS_RREQ_OFFLOAD_COLLECTION 8 /* Offload collection to workqueue */
+#define NETFS_RREQ_NO_UNLOCK_FOLIO 9 /* Don't unlock no_unlock_folio on completion */
+#define NETFS_RREQ_FOLIO_COPY_TO_CACHE 10 /* Copy current folio to cache from read */
+#define NETFS_RREQ_UPLOAD_TO_SERVER 11 /* Need to write to the server */
+#define NETFS_RREQ_USE_IO_ITER 12 /* Use ->io_iter rather than ->i_pages */
+#define NETFS_RREQ_USE_PGPRIV2 31 /* [DEPRECATED] Use PG_private_2 to mark
+ * write to cache on read */
+ const struct netfs_request_ops *netfs_ops;
};
/*
* Operations the network filesystem can/must provide to the helpers.
*/
-struct netfs_read_request_ops {
- bool (*is_cache_enabled)(struct inode *inode);
- void (*init_rreq)(struct netfs_read_request *rreq, struct file *file);
- int (*begin_cache_operation)(struct netfs_read_request *rreq);
- void (*expand_readahead)(struct netfs_read_request *rreq);
- bool (*clamp_length)(struct netfs_read_subrequest *subreq);
- void (*issue_op)(struct netfs_read_subrequest *subreq);
- bool (*is_still_valid)(struct netfs_read_request *rreq);
+struct netfs_request_ops {
+ mempool_t *request_pool;
+ mempool_t *subrequest_pool;
+ int (*init_request)(struct netfs_io_request *rreq, struct file *file);
+ void (*free_request)(struct netfs_io_request *rreq);
+ void (*free_subrequest)(struct netfs_io_subrequest *rreq);
+
+ /* Read request handling */
+ void (*expand_readahead)(struct netfs_io_request *rreq);
+ int (*prepare_read)(struct netfs_io_subrequest *subreq);
+ void (*issue_read)(struct netfs_io_subrequest *subreq);
+ bool (*is_still_valid)(struct netfs_io_request *rreq);
int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
- struct page *page, void **_fsdata);
- void (*done)(struct netfs_read_request *rreq);
- void (*cleanup)(struct address_space *mapping, void *netfs_priv);
+ struct folio **foliop, void **_fsdata);
+ void (*done)(struct netfs_io_request *rreq);
+
+ /* Modification handling */
+ void (*update_i_size)(struct inode *inode, loff_t i_size);
+ void (*post_modify)(struct inode *inode);
+
+ /* Write request handling */
+ void (*begin_writeback)(struct netfs_io_request *wreq);
+ void (*prepare_write)(struct netfs_io_subrequest *subreq);
+ void (*issue_write)(struct netfs_io_subrequest *subreq);
+ void (*retry_request)(struct netfs_io_request *wreq, struct netfs_io_stream *stream);
+ void (*invalidate_cache)(struct netfs_io_request *wreq);
};
/*
- * Table of operations for access to a cache. This is obtained by
- * rreq->ops->begin_cache_operation().
+ * How to handle reading from a hole.
+ */
+enum netfs_read_from_hole {
+ NETFS_READ_HOLE_IGNORE,
+ NETFS_READ_HOLE_FAIL,
+};
+
+/*
+ * Table of operations for access to a cache.
*/
struct netfs_cache_ops {
/* End an operation */
@@ -186,7 +332,7 @@ struct netfs_cache_ops {
int (*read)(struct netfs_cache_resources *cres,
loff_t start_pos,
struct iov_iter *iter,
- bool seek_data,
+ enum netfs_read_from_hole read_hole,
netfs_io_terminated_t term_func,
void *term_func_priv);
@@ -197,38 +343,213 @@ struct netfs_cache_ops {
netfs_io_terminated_t term_func,
void *term_func_priv);
+ /* Write data to the cache from a netfs subrequest. */
+ void (*issue_write)(struct netfs_io_subrequest *subreq);
+
/* Expand readahead request */
void (*expand_readahead)(struct netfs_cache_resources *cres,
- loff_t *_start, size_t *_len, loff_t i_size);
+ unsigned long long *_start,
+ unsigned long long *_len,
+ unsigned long long i_size);
/* Prepare a read operation, shortening it to a cached/uncached
* boundary as appropriate.
*/
- enum netfs_read_source (*prepare_read)(struct netfs_read_subrequest *subreq,
- loff_t i_size);
+ enum netfs_io_source (*prepare_read)(struct netfs_io_subrequest *subreq,
+ unsigned long long i_size);
+
+ /* Prepare a write subrequest, working out if we're allowed to do it
+ * and finding out the maximum amount of data to gather before
+ * attempting to submit. If we're not permitted to do it, the
+ * subrequest should be marked failed.
+ */
+ void (*prepare_write_subreq)(struct netfs_io_subrequest *subreq);
/* Prepare a write operation, working out what part of the write we can
* actually do.
*/
int (*prepare_write)(struct netfs_cache_resources *cres,
- loff_t *_start, size_t *_len, loff_t i_size);
+ loff_t *_start, size_t *_len, size_t upper_len,
+ loff_t i_size, bool no_space_allocated_yet);
+
+ /* Prepare an on-demand read operation, shortening it to a cached/uncached
+ * boundary as appropriate.
+ */
+ enum netfs_io_source (*prepare_ondemand_read)(struct netfs_cache_resources *cres,
+ loff_t start, size_t *_len,
+ loff_t i_size,
+ unsigned long *_flags, ino_t ino);
+
+ /* Query the occupancy of the cache in a region, returning where the
+ * next chunk of data starts and how long it is.
+ */
+ int (*query_occupancy)(struct netfs_cache_resources *cres,
+ loff_t start, size_t len, size_t granularity,
+ loff_t *_data_start, size_t *_data_len);
};
+/* High-level read API. */
+ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *iter);
+ssize_t netfs_unbuffered_read_iter(struct kiocb *iocb, struct iov_iter *iter);
+ssize_t netfs_buffered_read_iter(struct kiocb *iocb, struct iov_iter *iter);
+ssize_t netfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
+
+/* High-level write API */
+ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
+ struct netfs_group *netfs_group);
+ssize_t netfs_buffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *from,
+ struct netfs_group *netfs_group);
+ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from);
+ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *iter,
+ struct netfs_group *netfs_group);
+ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from);
+
+/* Single, monolithic object read/write API. */
+void netfs_single_mark_inode_dirty(struct inode *inode);
+ssize_t netfs_read_single(struct inode *inode, struct file *file, struct iov_iter *iter);
+int netfs_writeback_single(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct iov_iter *iter);
+
+/* Address operations API */
struct readahead_control;
-extern void netfs_readahead(struct readahead_control *,
- const struct netfs_read_request_ops *,
- void *);
-extern int netfs_readpage(struct file *,
- struct page *,
- const struct netfs_read_request_ops *,
- void *);
-extern int netfs_write_begin(struct file *, struct address_space *,
- loff_t, unsigned int, unsigned int, struct page **,
- void **,
- const struct netfs_read_request_ops *,
- void *);
-
-extern void netfs_subreq_terminated(struct netfs_read_subrequest *, ssize_t, bool);
-extern void netfs_stats_show(struct seq_file *);
+void netfs_readahead(struct readahead_control *);
+int netfs_read_folio(struct file *, struct folio *);
+int netfs_write_begin(struct netfs_inode *, struct file *,
+ struct address_space *, loff_t pos, unsigned int len,
+ struct folio **, void **fsdata);
+int netfs_writepages(struct address_space *mapping,
+ struct writeback_control *wbc);
+bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio);
+int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc);
+void netfs_clear_inode_writeback(struct inode *inode, const void *aux);
+void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
+bool netfs_release_folio(struct folio *folio, gfp_t gfp);
+
+/* VMA operations API. */
+vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group);
+
+/* (Sub)request management API. */
+void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq);
+void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq);
+void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
+ enum netfs_sreq_ref_trace what);
+void netfs_put_subrequest(struct netfs_io_subrequest *subreq,
+ enum netfs_sreq_ref_trace what);
+ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len,
+ struct iov_iter *new,
+ iov_iter_extraction_t extraction_flags);
+size_t netfs_limit_iter(const struct iov_iter *iter, size_t start_offset,
+ size_t max_size, size_t max_segs);
+void netfs_prepare_write_failed(struct netfs_io_subrequest *subreq);
+void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error);
+
+int netfs_start_io_read(struct inode *inode);
+void netfs_end_io_read(struct inode *inode);
+int netfs_start_io_write(struct inode *inode);
+void netfs_end_io_write(struct inode *inode);
+int netfs_start_io_direct(struct inode *inode);
+void netfs_end_io_direct(struct inode *inode);
+
+/* Miscellaneous APIs. */
+struct folio_queue *netfs_folioq_alloc(unsigned int rreq_id, gfp_t gfp,
+ unsigned int trace /*enum netfs_folioq_trace*/);
+void netfs_folioq_free(struct folio_queue *folioq,
+ unsigned int trace /*enum netfs_trace_folioq*/);
+
+/* Buffer wrangling helpers API. */
+int netfs_alloc_folioq_buffer(struct address_space *mapping,
+ struct folio_queue **_buffer,
+ size_t *_cur_size, ssize_t size, gfp_t gfp);
+void netfs_free_folioq_buffer(struct folio_queue *fq);
+
+/**
+ * netfs_inode - Get the netfs inode context from the inode
+ * @inode: The inode to query
+ *
+ * Get the netfs lib inode context from the network filesystem's inode. The
+ * context struct is expected to directly follow on from the VFS inode struct.
+ */
+static inline struct netfs_inode *netfs_inode(struct inode *inode)
+{
+ return container_of(inode, struct netfs_inode, inode);
+}
+
+/**
+ * netfs_inode_init - Initialise a netfslib inode context
+ * @ctx: The netfs inode to initialise
+ * @ops: The netfs's operations list
+ * @use_zero_point: True to use the zero_point read optimisation
+ *
+ * Initialise the netfs library context struct. This is expected to follow on
+ * directly from the VFS inode struct.
+ */
+static inline void netfs_inode_init(struct netfs_inode *ctx,
+ const struct netfs_request_ops *ops,
+ bool use_zero_point)
+{
+ ctx->ops = ops;
+ ctx->remote_i_size = i_size_read(&ctx->inode);
+ ctx->zero_point = LLONG_MAX;
+ ctx->flags = 0;
+ atomic_set(&ctx->io_count, 0);
+#if IS_ENABLED(CONFIG_FSCACHE)
+ ctx->cache = NULL;
+#endif
+ mutex_init(&ctx->wb_lock);
+ /* ->releasepage() drives zero_point */
+ if (use_zero_point) {
+ ctx->zero_point = ctx->remote_i_size;
+ mapping_set_release_always(ctx->inode.i_mapping);
+ }
+}
+
+/**
+ * netfs_resize_file - Note that a file got resized
+ * @ctx: The netfs inode being resized
+ * @new_i_size: The new file size
+ * @changed_on_server: The change was applied to the server
+ *
+ * Inform the netfs lib that a file got resized so that it can adjust its state.
+ */
+static inline void netfs_resize_file(struct netfs_inode *ctx, loff_t new_i_size,
+ bool changed_on_server)
+{
+ if (changed_on_server)
+ ctx->remote_i_size = new_i_size;
+ if (new_i_size < ctx->zero_point)
+ ctx->zero_point = new_i_size;
+}
+
+/**
+ * netfs_i_cookie - Get the cache cookie from the inode
+ * @ctx: The netfs inode to query
+ *
+ * Get the caching cookie (if enabled) from the network filesystem's inode.
+ */
+static inline struct fscache_cookie *netfs_i_cookie(struct netfs_inode *ctx)
+{
+#if IS_ENABLED(CONFIG_FSCACHE)
+ return ctx->cache;
+#else
+ return NULL;
+#endif
+}
+
+/**
+ * netfs_wait_for_outstanding_io - Wait for outstanding I/O to complete
+ * @inode: The netfs inode to wait on
+ *
+ * Wait for outstanding I/O requests of any type to complete. This is intended
+ * to be called from inode eviction routines. This makes sure that any
+ * resources held by those requests are cleaned up before we let the inode get
+ * cleaned up.
+ */
+static inline void netfs_wait_for_outstanding_io(struct inode *inode)
+{
+ struct netfs_inode *ictx = netfs_inode(inode);
+
+ wait_var_event(&ictx->io_count, atomic_read(&ictx->io_count) == 0);
+}
#endif /* _LINUX_NETFS_H */
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 61b1c7fcc401..882e9c1b6c1d 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -34,6 +34,7 @@ struct netlink_skb_parms {
#define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb))
#define NETLINK_CREDS(skb) (&NETLINK_CB((skb)).creds)
+#define NETLINK_CTX_SIZE 48
void netlink_table_grab(void);
@@ -47,10 +48,9 @@ struct netlink_kernel_cfg {
unsigned int groups;
unsigned int flags;
void (*input)(struct sk_buff *skb);
- struct mutex *cb_mutex;
int (*bind)(struct net *net, int group);
void (*unbind)(struct net *net, int group);
- bool (*compare)(struct net *net, struct sock *sk);
+ void (*release) (struct sock *sk, unsigned long *groups);
};
struct sock *__netlink_kernel_create(struct net *net, int unit,
@@ -63,7 +63,8 @@ netlink_kernel_create(struct net *net, int unit, struct netlink_kernel_cfg *cfg)
}
/* this can be increased when necessary - don't expose to userland */
-#define NETLINK_MAX_COOKIE_LEN 20
+#define NETLINK_MAX_COOKIE_LEN 8
+#define NETLINK_MAX_FMTMSG_LEN 80
/**
* struct netlink_ext_ack - netlink extended ACK report struct
@@ -71,22 +72,28 @@ netlink_kernel_create(struct net *net, int unit, struct netlink_kernel_cfg *cfg)
* %NL_SET_ERR_MSG
* @bad_attr: attribute with error
* @policy: policy for a bad attribute
+ * @miss_type: attribute type which was missing
+ * @miss_nest: nest missing an attribute (%NULL if missing top level attr)
* @cookie: cookie data to return to userspace (for success)
* @cookie_len: actual cookie data length
+ * @_msg_buf: output buffer for formatted message strings - don't access
+ * directly, use %NL_SET_ERR_MSG_FMT
*/
struct netlink_ext_ack {
const char *_msg;
const struct nlattr *bad_attr;
const struct nla_policy *policy;
+ const struct nlattr *miss_nest;
+ u16 miss_type;
u8 cookie[NETLINK_MAX_COOKIE_LEN];
u8 cookie_len;
+ char _msg_buf[NETLINK_MAX_FMTMSG_LEN];
};
/* Always use this macro, this allows later putting the
* message into a separate section or such for things
* like translation or listing all possible messages.
- * Currently string formatting is not supported (due
- * to the lack of an output buffer.)
+ * If string formatting is needed use NL_SET_ERR_MSG_FMT.
*/
#define NL_SET_ERR_MSG(extack, msg) do { \
static const char __msg[] = msg; \
@@ -98,9 +105,41 @@ struct netlink_ext_ack {
__extack->_msg = __msg; \
} while (0)
+/* We splice fmt with %s at each end even in the snprintf so that both calls
+ * can use the same string constant, avoiding its duplication in .ro
+ */
+#define NL_SET_ERR_MSG_FMT(extack, fmt, args...) do { \
+ struct netlink_ext_ack *__extack = (extack); \
+ \
+ if (!__extack) \
+ break; \
+ if (snprintf(__extack->_msg_buf, NETLINK_MAX_FMTMSG_LEN, \
+ "%s" fmt "%s", "", ##args, "") >= \
+ NETLINK_MAX_FMTMSG_LEN) \
+ net_warn_ratelimited("%s" fmt "%s", "truncated extack: ", \
+ ##args, "\n"); \
+ \
+ do_trace_netlink_extack(__extack->_msg_buf); \
+ \
+ __extack->_msg = __extack->_msg_buf; \
+} while (0)
+
#define NL_SET_ERR_MSG_MOD(extack, msg) \
NL_SET_ERR_MSG((extack), KBUILD_MODNAME ": " msg)
+#define NL_SET_ERR_MSG_FMT_MOD(extack, fmt, args...) \
+ NL_SET_ERR_MSG_FMT((extack), KBUILD_MODNAME ": " fmt, ##args)
+
+#define NL_SET_ERR_MSG_WEAK(extack, msg) do { \
+ if ((extack) && !(extack)->_msg) \
+ NL_SET_ERR_MSG((extack), msg); \
+} while (0)
+
+#define NL_SET_ERR_MSG_WEAK_MOD(extack, msg) do { \
+ if ((extack) && !(extack)->_msg) \
+ NL_SET_ERR_MSG_MOD((extack), msg); \
+} while (0)
+
#define NL_SET_BAD_ATTR_POLICY(extack, attr, pol) do { \
if ((extack)) { \
(extack)->bad_attr = (attr); \
@@ -123,23 +162,57 @@ struct netlink_ext_ack {
} \
} while (0)
+#define NL_SET_ERR_MSG_ATTR_POL_FMT(extack, attr, pol, fmt, args...) do { \
+ struct netlink_ext_ack *__extack = (extack); \
+ \
+ if (!__extack) \
+ break; \
+ \
+ if (snprintf(__extack->_msg_buf, NETLINK_MAX_FMTMSG_LEN, \
+ "%s" fmt "%s", "", ##args, "") >= \
+ NETLINK_MAX_FMTMSG_LEN) \
+ net_warn_ratelimited("%s" fmt "%s", "truncated extack: ", \
+ ##args, "\n"); \
+ \
+ do_trace_netlink_extack(__extack->_msg_buf); \
+ \
+ __extack->_msg = __extack->_msg_buf; \
+ __extack->bad_attr = (attr); \
+ __extack->policy = (pol); \
+} while (0)
+
#define NL_SET_ERR_MSG_ATTR(extack, attr, msg) \
NL_SET_ERR_MSG_ATTR_POL(extack, attr, NULL, msg)
+#define NL_SET_ERR_MSG_ATTR_FMT(extack, attr, msg, args...) \
+ NL_SET_ERR_MSG_ATTR_POL_FMT(extack, attr, NULL, msg, ##args)
+
+#define NL_SET_ERR_ATTR_MISS(extack, nest, type) do { \
+ struct netlink_ext_ack *__extack = (extack); \
+ \
+ if (__extack) { \
+ __extack->miss_nest = (nest); \
+ __extack->miss_type = (type); \
+ } \
+} while (0)
+
+#define NL_REQ_ATTR_CHECK(extack, nest, tb, type) ({ \
+ struct nlattr **__tb = (tb); \
+ u32 __attr = (type); \
+ int __retval; \
+ \
+ __retval = !__tb[__attr]; \
+ if (__retval) \
+ NL_SET_ERR_ATTR_MISS((extack), (nest), __attr); \
+ __retval; \
+})
+
static inline void nl_set_extack_cookie_u64(struct netlink_ext_ack *extack,
u64 cookie)
{
if (!extack)
return;
- memcpy(extack->cookie, &cookie, sizeof(cookie));
- extack->cookie_len = sizeof(cookie);
-}
-
-static inline void nl_set_extack_cookie_u32(struct netlink_ext_ack *extack,
- u32 cookie)
-{
- if (!extack)
- return;
+ BUILD_BUG_ON(sizeof(extack->cookie) < sizeof(cookie));
memcpy(extack->cookie, &cookie, sizeof(cookie));
extack->cookie_len = sizeof(cookie);
}
@@ -156,16 +229,19 @@ bool netlink_strict_get_check(struct sk_buff *skb);
int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
__u32 group, gfp_t allocation);
+
+typedef int (*netlink_filter_fn)(struct sock *dsk, struct sk_buff *skb, void *data);
+
int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb,
__u32 portid, __u32 group, gfp_t allocation,
- int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
+ netlink_filter_fn filter,
void *filter_data);
int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code);
int netlink_register_notifier(struct notifier_block *nb);
int netlink_unregister_notifier(struct notifier_block *nb);
/* finegrained unicast helpers: */
-struct sock *netlink_getsockbyfilp(struct file *filp);
+struct sock *netlink_getsockbyfd(int fd);
int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
long *timeo, struct sock *ssk);
void netlink_detachskb(struct sock *sk, struct sk_buff *skb);
@@ -216,9 +292,10 @@ struct netlink_callback {
u16 answer_flags;
u32 min_dump_alloc;
unsigned int prev_seq, seq;
+ int flags;
bool strict_check;
union {
- u8 ctx[48];
+ u8 ctx[NETLINK_CTX_SIZE];
/* args is deprecated. Cast a struct over ctx instead
* for proper type safety.
@@ -227,6 +304,10 @@ struct netlink_callback {
};
};
+#define NL_ASSERT_CTX_FITS(type_name) \
+ BUILD_BUG_ON(sizeof(type_name) > \
+ sizeof_field(struct netlink_callback, ctx))
+
struct netlink_notify {
struct net *net;
u32 portid;
@@ -240,9 +321,11 @@ struct netlink_dump_control {
int (*start)(struct netlink_callback *);
int (*dump)(struct sk_buff *skb, struct netlink_callback *);
int (*done)(struct netlink_callback *);
+ struct netlink_ext_ack *extack;
void *data;
struct module *module;
u32 min_dump_alloc;
+ int flags;
};
int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
@@ -273,5 +356,6 @@ bool netlink_ns_capable(const struct sk_buff *skb,
struct user_namespace *ns, int cap);
bool netlink_capable(const struct sk_buff *skb, int cap);
bool netlink_net_capable(const struct sk_buff *skb, int cap);
+struct sk_buff *netlink_alloc_large_skb(unsigned int size, int broadcast);
#endif /* __LINUX_NETLINK_H */
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index e6a2d72e0dc7..f22eec466040 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -15,24 +15,37 @@
#include <linux/refcount.h>
union inet_addr {
- __u32 all[4];
__be32 ip;
- __be32 ip6[4];
- struct in_addr in;
struct in6_addr in6;
};
struct netpoll {
struct net_device *dev;
+ netdevice_tracker dev_tracker;
+ /*
+ * Either dev_name or dev_mac can be used to specify the local
+ * interface - dev_name is used if it is a nonempty string, else
+ * dev_mac is used.
+ */
char dev_name[IFNAMSIZ];
+ u8 dev_mac[ETH_ALEN];
const char *name;
union inet_addr local_ip, remote_ip;
bool ipv6;
u16 local_port, remote_port;
u8 remote_mac[ETH_ALEN];
+ struct sk_buff_head skb_pool;
+ struct work_struct refill_wq;
};
+#define np_info(np, fmt, ...) \
+ pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
+#define np_err(np, fmt, ...) \
+ pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
+#define np_notice(np, fmt, ...) \
+ pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
+
struct netpoll_info {
refcount_t refcnt;
@@ -42,7 +55,6 @@ struct netpoll_info {
struct delayed_work tx_work;
- struct netpoll *netpoll;
struct rcu_head rcu;
};
@@ -55,14 +67,12 @@ static inline void netpoll_poll_disable(struct net_device *dev) { return; }
static inline void netpoll_poll_enable(struct net_device *dev) { return; }
#endif
-void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
-void netpoll_print_options(struct netpoll *np);
-int netpoll_parse_options(struct netpoll *np, char *opt);
+int netpoll_send_udp(struct netpoll *np, const char *msg, int len);
int __netpoll_setup(struct netpoll *np, struct net_device *ndev);
int netpoll_setup(struct netpoll *np);
-void __netpoll_cleanup(struct netpoll *np);
void __netpoll_free(struct netpoll *np);
void netpoll_cleanup(struct netpoll *np);
+void do_netpoll_cleanup(struct netpoll *np);
netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb);
#ifdef CONFIG_NETPOLL
@@ -70,7 +80,7 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi)
{
struct net_device *dev = napi->dev;
- if (dev && dev->npinfo) {
+ if (dev && rcu_access_pointer(dev->npinfo)) {
int owner = smp_processor_id();
while (cmpxchg(&napi->poll_owner, -1, owner) != -1)
diff --git a/include/linux/nfs.h b/include/linux/nfs.h
index 0dc7ad38a0da..0906a0b40c6a 100644
--- a/include/linux/nfs.h
+++ b/include/linux/nfs.h
@@ -8,10 +8,20 @@
#ifndef _LINUX_NFS_H
#define _LINUX_NFS_H
+#include <linux/cred.h>
+#include <linux/sunrpc/auth.h>
#include <linux/sunrpc/msg_prot.h>
#include <linux/string.h>
+#include <linux/crc32.h>
#include <uapi/linux/nfs.h>
+/* The LOCALIO program is entirely private to Linux and is
+ * NOT part of the uapi.
+ */
+#define NFS_LOCALIO_PROGRAM 400122
+#define LOCALIOPROC_NULL 0
+#define LOCALIOPROC_UUID_IS_LOCAL 1
+
/*
* This is the kernel NFS client file handle representation
*/
@@ -36,14 +46,6 @@ static inline void nfs_copy_fh(struct nfs_fh *target, const struct nfs_fh *sourc
memcpy(target->data, source->data, source->size);
}
-
-/*
- * This is really a general kernel constant, but since nothing like
- * this is defined in the kernel headers, I have to do it here.
- */
-#define NFS_OFFSET_MAX ((__s64)((~(__u64)0) >> 1))
-
-
enum nfs3_stable_how {
NFS_UNSTABLE = 0,
NFS_DATA_SYNC = 1,
@@ -52,4 +54,16 @@ enum nfs3_stable_how {
/* used by direct.c to mark verf as invalid */
NFS_INVALID_STABLE_HOW = -1
};
+
+/**
+ * nfs_fhandle_hash - calculate the crc32 hash for the filehandle
+ * @fh - pointer to filehandle
+ *
+ * returns a crc32 hash for the filehandle that is compatible with
+ * the one displayed by "wireshark".
+ */
+static inline u32 nfs_fhandle_hash(const struct nfs_fh *fh)
+{
+ return ~crc32_le(0xFFFFFFFF, &fh->data[0], fh->size);
+}
#endif /* _LINUX_NFS_H */
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 15004c469807..e947af6a3684 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -17,6 +17,7 @@
#include <linux/uidgid.h>
#include <uapi/linux/nfs4.h>
#include <linux/sunrpc/msg_prot.h>
+#include <linux/sunrpc/xdrgen/nfs4_1.h>
enum nfs4_acl_whotype {
NFS4_ACL_WHO_NAMED = 0,
@@ -46,6 +47,7 @@ struct nfs4_acl {
struct nfs4_label {
uint32_t lfs;
uint32_t pi;
+ u32 lsmid;
u32 len;
char *label;
};
@@ -70,6 +72,7 @@ struct nfs4_stateid_struct {
NFS4_LAYOUT_STATEID_TYPE,
NFS4_PNFS_DS_STATEID_TYPE,
NFS4_REVOKED_STATEID_TYPE,
+ NFS4_FREED_STATEID_TYPE,
} type;
};
@@ -150,7 +153,7 @@ enum nfs_opnum4 {
OP_WRITE_SAME = 70,
OP_CLONE = 71,
- /* xattr support (RFC8726) */
+ /* xattr support (RFC8276) */
OP_GETXATTR = 72,
OP_SETXATTR = 73,
OP_LISTXATTRS = 74,
@@ -281,17 +284,25 @@ enum nfsstat4 {
/* nfs42 */
NFS4ERR_PARTNER_NOTSUPP = 10088,
NFS4ERR_PARTNER_NO_AUTH = 10089,
- NFS4ERR_UNION_NOTSUPP = 10090,
- NFS4ERR_OFFLOAD_DENIED = 10091,
- NFS4ERR_WRONG_LFS = 10092,
- NFS4ERR_BADLABEL = 10093,
- NFS4ERR_OFFLOAD_NO_REQS = 10094,
+ NFS4ERR_UNION_NOTSUPP = 10090,
+ NFS4ERR_OFFLOAD_DENIED = 10091,
+ NFS4ERR_WRONG_LFS = 10092,
+ NFS4ERR_BADLABEL = 10093,
+ NFS4ERR_OFFLOAD_NO_REQS = 10094,
/* xattr (RFC8276) */
- NFS4ERR_NOXATTR = 10095,
- NFS4ERR_XATTR2BIG = 10096,
+ NFS4ERR_NOXATTR = 10095,
+ NFS4ERR_XATTR2BIG = 10096,
+
+ /* can be used for internal errors */
+ NFS4ERR_FIRST_FREE
};
+/* error codes for internal client use */
+#define NFS4ERR_RESET_TO_MDS 12001
+#define NFS4ERR_RESET_TO_PNFS 12002
+#define NFS4ERR_FATAL_IOERROR 12003
+
static inline bool seqid_mutating_err(u32 err)
{
/* See RFC 7530, section 9.1.7 */
@@ -358,11 +369,13 @@ enum limit_by4 {
NFS4_LIMIT_BLOCKS = 2
};
-enum open_delegation_type4 {
+enum nfs4_open_delegation_type4 {
NFS4_OPEN_DELEGATE_NONE = 0,
NFS4_OPEN_DELEGATE_READ = 1,
NFS4_OPEN_DELEGATE_WRITE = 2,
NFS4_OPEN_DELEGATE_NONE_EXT = 3, /* 4.1 */
+ NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG = 4,
+ NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG = 5,
};
enum why_no_delegation4 { /* new to v4.1 */
@@ -385,77 +398,206 @@ enum lock_type4 {
NFS4_WRITEW_LT = 4
};
+/*
+ * Symbol names and values are from RFC 7531 Section 2.
+ * "XDR Description of NFSv4.0"
+ */
+enum {
+ FATTR4_SUPPORTED_ATTRS = 0,
+ FATTR4_TYPE = 1,
+ FATTR4_FH_EXPIRE_TYPE = 2,
+ FATTR4_CHANGE = 3,
+ FATTR4_SIZE = 4,
+ FATTR4_LINK_SUPPORT = 5,
+ FATTR4_SYMLINK_SUPPORT = 6,
+ FATTR4_NAMED_ATTR = 7,
+ FATTR4_FSID = 8,
+ FATTR4_UNIQUE_HANDLES = 9,
+ FATTR4_LEASE_TIME = 10,
+ FATTR4_RDATTR_ERROR = 11,
+ FATTR4_ACL = 12,
+ FATTR4_ACLSUPPORT = 13,
+ FATTR4_ARCHIVE = 14,
+ FATTR4_CANSETTIME = 15,
+ FATTR4_CASE_INSENSITIVE = 16,
+ FATTR4_CASE_PRESERVING = 17,
+ FATTR4_CHOWN_RESTRICTED = 18,
+ FATTR4_FILEHANDLE = 19,
+ FATTR4_FILEID = 20,
+ FATTR4_FILES_AVAIL = 21,
+ FATTR4_FILES_FREE = 22,
+ FATTR4_FILES_TOTAL = 23,
+ FATTR4_FS_LOCATIONS = 24,
+ FATTR4_HIDDEN = 25,
+ FATTR4_HOMOGENEOUS = 26,
+ FATTR4_MAXFILESIZE = 27,
+ FATTR4_MAXLINK = 28,
+ FATTR4_MAXNAME = 29,
+ FATTR4_MAXREAD = 30,
+ FATTR4_MAXWRITE = 31,
+ FATTR4_MIMETYPE = 32,
+ FATTR4_MODE = 33,
+ FATTR4_NO_TRUNC = 34,
+ FATTR4_NUMLINKS = 35,
+ FATTR4_OWNER = 36,
+ FATTR4_OWNER_GROUP = 37,
+ FATTR4_QUOTA_AVAIL_HARD = 38,
+ FATTR4_QUOTA_AVAIL_SOFT = 39,
+ FATTR4_QUOTA_USED = 40,
+ FATTR4_RAWDEV = 41,
+ FATTR4_SPACE_AVAIL = 42,
+ FATTR4_SPACE_FREE = 43,
+ FATTR4_SPACE_TOTAL = 44,
+ FATTR4_SPACE_USED = 45,
+ FATTR4_SYSTEM = 46,
+ FATTR4_TIME_ACCESS = 47,
+ FATTR4_TIME_ACCESS_SET = 48,
+ FATTR4_TIME_BACKUP = 49,
+ FATTR4_TIME_CREATE = 50,
+ FATTR4_TIME_DELTA = 51,
+ FATTR4_TIME_METADATA = 52,
+ FATTR4_TIME_MODIFY = 53,
+ FATTR4_TIME_MODIFY_SET = 54,
+ FATTR4_MOUNTED_ON_FILEID = 55,
+};
+
+/*
+ * Symbol names and values are from RFC 5662 Section 2.
+ * "XDR Description of NFSv4.1"
+ */
+enum {
+ FATTR4_DIR_NOTIF_DELAY = 56,
+ FATTR4_DIRENT_NOTIF_DELAY = 57,
+ FATTR4_DACL = 58,
+ FATTR4_SACL = 59,
+ FATTR4_CHANGE_POLICY = 60,
+ FATTR4_FS_STATUS = 61,
+ FATTR4_FS_LAYOUT_TYPES = 62,
+ FATTR4_LAYOUT_HINT = 63,
+ FATTR4_LAYOUT_TYPES = 64,
+ FATTR4_LAYOUT_BLKSIZE = 65,
+ FATTR4_LAYOUT_ALIGNMENT = 66,
+ FATTR4_FS_LOCATIONS_INFO = 67,
+ FATTR4_MDSTHRESHOLD = 68,
+ FATTR4_RETENTION_GET = 69,
+ FATTR4_RETENTION_SET = 70,
+ FATTR4_RETENTEVT_GET = 71,
+ FATTR4_RETENTEVT_SET = 72,
+ FATTR4_RETENTION_HOLD = 73,
+ FATTR4_MODE_SET_MASKED = 74,
+ FATTR4_SUPPATTR_EXCLCREAT = 75,
+ FATTR4_FS_CHARSET_CAP = 76,
+};
+
+/*
+ * Symbol names and values are from RFC 7863 Section 2.
+ * "XDR Description of NFSv4.2"
+ */
+enum {
+ FATTR4_CLONE_BLKSIZE = 77,
+ FATTR4_SPACE_FREED = 78,
+ FATTR4_CHANGE_ATTR_TYPE = 79,
+ FATTR4_SEC_LABEL = 80,
+};
+
+/*
+ * Symbol names and values are from RFC 8275 Section 5.
+ * "The mode_umask Attribute"
+ */
+enum {
+ FATTR4_MODE_UMASK = 81,
+};
+
+/*
+ * Symbol names and values are from RFC 8276 Section 8.6.
+ * "Numeric Values Assigned to Protocol Extensions"
+ */
+enum {
+ FATTR4_XATTR_SUPPORT = 82,
+};
+
+/*
+ * The following internal definitions enable processing the above
+ * attribute bits within 32-bit word boundaries.
+ */
/* Mandatory Attributes */
-#define FATTR4_WORD0_SUPPORTED_ATTRS (1UL << 0)
-#define FATTR4_WORD0_TYPE (1UL << 1)
-#define FATTR4_WORD0_FH_EXPIRE_TYPE (1UL << 2)
-#define FATTR4_WORD0_CHANGE (1UL << 3)
-#define FATTR4_WORD0_SIZE (1UL << 4)
-#define FATTR4_WORD0_LINK_SUPPORT (1UL << 5)
-#define FATTR4_WORD0_SYMLINK_SUPPORT (1UL << 6)
-#define FATTR4_WORD0_NAMED_ATTR (1UL << 7)
-#define FATTR4_WORD0_FSID (1UL << 8)
-#define FATTR4_WORD0_UNIQUE_HANDLES (1UL << 9)
-#define FATTR4_WORD0_LEASE_TIME (1UL << 10)
-#define FATTR4_WORD0_RDATTR_ERROR (1UL << 11)
+#define FATTR4_WORD0_SUPPORTED_ATTRS BIT(FATTR4_SUPPORTED_ATTRS)
+#define FATTR4_WORD0_TYPE BIT(FATTR4_TYPE)
+#define FATTR4_WORD0_FH_EXPIRE_TYPE BIT(FATTR4_FH_EXPIRE_TYPE)
+#define FATTR4_WORD0_CHANGE BIT(FATTR4_CHANGE)
+#define FATTR4_WORD0_SIZE BIT(FATTR4_SIZE)
+#define FATTR4_WORD0_LINK_SUPPORT BIT(FATTR4_LINK_SUPPORT)
+#define FATTR4_WORD0_SYMLINK_SUPPORT BIT(FATTR4_SYMLINK_SUPPORT)
+#define FATTR4_WORD0_NAMED_ATTR BIT(FATTR4_NAMED_ATTR)
+#define FATTR4_WORD0_FSID BIT(FATTR4_FSID)
+#define FATTR4_WORD0_UNIQUE_HANDLES BIT(FATTR4_UNIQUE_HANDLES)
+#define FATTR4_WORD0_LEASE_TIME BIT(FATTR4_LEASE_TIME)
+#define FATTR4_WORD0_RDATTR_ERROR BIT(FATTR4_RDATTR_ERROR)
/* Mandatory in NFSv4.1 */
-#define FATTR4_WORD2_SUPPATTR_EXCLCREAT (1UL << 11)
+#define FATTR4_WORD2_SUPPATTR_EXCLCREAT BIT(FATTR4_SUPPATTR_EXCLCREAT - 64)
/* Recommended Attributes */
-#define FATTR4_WORD0_ACL (1UL << 12)
-#define FATTR4_WORD0_ACLSUPPORT (1UL << 13)
-#define FATTR4_WORD0_ARCHIVE (1UL << 14)
-#define FATTR4_WORD0_CANSETTIME (1UL << 15)
-#define FATTR4_WORD0_CASE_INSENSITIVE (1UL << 16)
-#define FATTR4_WORD0_CASE_PRESERVING (1UL << 17)
-#define FATTR4_WORD0_CHOWN_RESTRICTED (1UL << 18)
-#define FATTR4_WORD0_FILEHANDLE (1UL << 19)
-#define FATTR4_WORD0_FILEID (1UL << 20)
-#define FATTR4_WORD0_FILES_AVAIL (1UL << 21)
-#define FATTR4_WORD0_FILES_FREE (1UL << 22)
-#define FATTR4_WORD0_FILES_TOTAL (1UL << 23)
-#define FATTR4_WORD0_FS_LOCATIONS (1UL << 24)
-#define FATTR4_WORD0_HIDDEN (1UL << 25)
-#define FATTR4_WORD0_HOMOGENEOUS (1UL << 26)
-#define FATTR4_WORD0_MAXFILESIZE (1UL << 27)
-#define FATTR4_WORD0_MAXLINK (1UL << 28)
-#define FATTR4_WORD0_MAXNAME (1UL << 29)
-#define FATTR4_WORD0_MAXREAD (1UL << 30)
-#define FATTR4_WORD0_MAXWRITE (1UL << 31)
-#define FATTR4_WORD1_MIMETYPE (1UL << 0)
-#define FATTR4_WORD1_MODE (1UL << 1)
-#define FATTR4_WORD1_NO_TRUNC (1UL << 2)
-#define FATTR4_WORD1_NUMLINKS (1UL << 3)
-#define FATTR4_WORD1_OWNER (1UL << 4)
-#define FATTR4_WORD1_OWNER_GROUP (1UL << 5)
-#define FATTR4_WORD1_QUOTA_HARD (1UL << 6)
-#define FATTR4_WORD1_QUOTA_SOFT (1UL << 7)
-#define FATTR4_WORD1_QUOTA_USED (1UL << 8)
-#define FATTR4_WORD1_RAWDEV (1UL << 9)
-#define FATTR4_WORD1_SPACE_AVAIL (1UL << 10)
-#define FATTR4_WORD1_SPACE_FREE (1UL << 11)
-#define FATTR4_WORD1_SPACE_TOTAL (1UL << 12)
-#define FATTR4_WORD1_SPACE_USED (1UL << 13)
-#define FATTR4_WORD1_SYSTEM (1UL << 14)
-#define FATTR4_WORD1_TIME_ACCESS (1UL << 15)
-#define FATTR4_WORD1_TIME_ACCESS_SET (1UL << 16)
-#define FATTR4_WORD1_TIME_BACKUP (1UL << 17)
-#define FATTR4_WORD1_TIME_CREATE (1UL << 18)
-#define FATTR4_WORD1_TIME_DELTA (1UL << 19)
-#define FATTR4_WORD1_TIME_METADATA (1UL << 20)
-#define FATTR4_WORD1_TIME_MODIFY (1UL << 21)
-#define FATTR4_WORD1_TIME_MODIFY_SET (1UL << 22)
-#define FATTR4_WORD1_MOUNTED_ON_FILEID (1UL << 23)
-#define FATTR4_WORD1_FS_LAYOUT_TYPES (1UL << 30)
-#define FATTR4_WORD2_LAYOUT_TYPES (1UL << 0)
-#define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1)
-#define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4)
-#define FATTR4_WORD2_CLONE_BLKSIZE (1UL << 13)
-#define FATTR4_WORD2_CHANGE_ATTR_TYPE (1UL << 15)
-#define FATTR4_WORD2_SECURITY_LABEL (1UL << 16)
-#define FATTR4_WORD2_MODE_UMASK (1UL << 17)
-#define FATTR4_WORD2_XATTR_SUPPORT (1UL << 18)
+#define FATTR4_WORD0_ACL BIT(FATTR4_ACL)
+#define FATTR4_WORD0_ACLSUPPORT BIT(FATTR4_ACLSUPPORT)
+#define FATTR4_WORD0_ARCHIVE BIT(FATTR4_ARCHIVE)
+#define FATTR4_WORD0_CANSETTIME BIT(FATTR4_CANSETTIME)
+#define FATTR4_WORD0_CASE_INSENSITIVE BIT(FATTR4_CASE_INSENSITIVE)
+#define FATTR4_WORD0_CASE_PRESERVING BIT(FATTR4_CASE_PRESERVING)
+#define FATTR4_WORD0_CHOWN_RESTRICTED BIT(FATTR4_CHOWN_RESTRICTED)
+#define FATTR4_WORD0_FILEHANDLE BIT(FATTR4_FILEHANDLE)
+#define FATTR4_WORD0_FILEID BIT(FATTR4_FILEID)
+#define FATTR4_WORD0_FILES_AVAIL BIT(FATTR4_FILES_AVAIL)
+#define FATTR4_WORD0_FILES_FREE BIT(FATTR4_FILES_FREE)
+#define FATTR4_WORD0_FILES_TOTAL BIT(FATTR4_FILES_TOTAL)
+#define FATTR4_WORD0_FS_LOCATIONS BIT(FATTR4_FS_LOCATIONS)
+#define FATTR4_WORD0_HIDDEN BIT(FATTR4_HIDDEN)
+#define FATTR4_WORD0_HOMOGENEOUS BIT(FATTR4_HOMOGENEOUS)
+#define FATTR4_WORD0_MAXFILESIZE BIT(FATTR4_MAXFILESIZE)
+#define FATTR4_WORD0_MAXLINK BIT(FATTR4_MAXLINK)
+#define FATTR4_WORD0_MAXNAME BIT(FATTR4_MAXNAME)
+#define FATTR4_WORD0_MAXREAD BIT(FATTR4_MAXREAD)
+#define FATTR4_WORD0_MAXWRITE BIT(FATTR4_MAXWRITE)
+
+#define FATTR4_WORD1_MIMETYPE BIT(FATTR4_MIMETYPE - 32)
+#define FATTR4_WORD1_MODE BIT(FATTR4_MODE - 32)
+#define FATTR4_WORD1_NO_TRUNC BIT(FATTR4_NO_TRUNC - 32)
+#define FATTR4_WORD1_NUMLINKS BIT(FATTR4_NUMLINKS - 32)
+#define FATTR4_WORD1_OWNER BIT(FATTR4_OWNER - 32)
+#define FATTR4_WORD1_OWNER_GROUP BIT(FATTR4_OWNER_GROUP - 32)
+#define FATTR4_WORD1_QUOTA_HARD BIT(FATTR4_QUOTA_AVAIL_HARD - 32)
+#define FATTR4_WORD1_QUOTA_SOFT BIT(FATTR4_QUOTA_AVAIL_SOFT - 32)
+#define FATTR4_WORD1_QUOTA_USED BIT(FATTR4_QUOTA_USED - 32)
+#define FATTR4_WORD1_RAWDEV BIT(FATTR4_RAWDEV - 32)
+#define FATTR4_WORD1_SPACE_AVAIL BIT(FATTR4_SPACE_AVAIL - 32)
+#define FATTR4_WORD1_SPACE_FREE BIT(FATTR4_SPACE_FREE - 32)
+#define FATTR4_WORD1_SPACE_TOTAL BIT(FATTR4_SPACE_TOTAL - 32)
+#define FATTR4_WORD1_SPACE_USED BIT(FATTR4_SPACE_USED - 32)
+#define FATTR4_WORD1_SYSTEM BIT(FATTR4_SYSTEM - 32)
+#define FATTR4_WORD1_TIME_ACCESS BIT(FATTR4_TIME_ACCESS - 32)
+#define FATTR4_WORD1_TIME_ACCESS_SET BIT(FATTR4_TIME_ACCESS_SET - 32)
+#define FATTR4_WORD1_TIME_BACKUP BIT(FATTR4_TIME_BACKUP - 32)
+#define FATTR4_WORD1_TIME_CREATE BIT(FATTR4_TIME_CREATE - 32)
+#define FATTR4_WORD1_TIME_DELTA BIT(FATTR4_TIME_DELTA - 32)
+#define FATTR4_WORD1_TIME_METADATA BIT(FATTR4_TIME_METADATA - 32)
+#define FATTR4_WORD1_TIME_MODIFY BIT(FATTR4_TIME_MODIFY - 32)
+#define FATTR4_WORD1_TIME_MODIFY_SET BIT(FATTR4_TIME_MODIFY_SET - 32)
+#define FATTR4_WORD1_MOUNTED_ON_FILEID BIT(FATTR4_MOUNTED_ON_FILEID - 32)
+#define FATTR4_WORD1_DACL BIT(FATTR4_DACL - 32)
+#define FATTR4_WORD1_SACL BIT(FATTR4_SACL - 32)
+#define FATTR4_WORD1_FS_LAYOUT_TYPES BIT(FATTR4_FS_LAYOUT_TYPES - 32)
+
+#define FATTR4_WORD2_LAYOUT_TYPES BIT(FATTR4_LAYOUT_TYPES - 64)
+#define FATTR4_WORD2_LAYOUT_BLKSIZE BIT(FATTR4_LAYOUT_BLKSIZE - 64)
+#define FATTR4_WORD2_MDSTHRESHOLD BIT(FATTR4_MDSTHRESHOLD - 64)
+#define FATTR4_WORD2_CLONE_BLKSIZE BIT(FATTR4_CLONE_BLKSIZE - 64)
+#define FATTR4_WORD2_CHANGE_ATTR_TYPE BIT(FATTR4_CHANGE_ATTR_TYPE - 64)
+#define FATTR4_WORD2_SECURITY_LABEL BIT(FATTR4_SEC_LABEL - 64)
+#define FATTR4_WORD2_MODE_UMASK BIT(FATTR4_MODE_UMASK - 64)
+#define FATTR4_WORD2_XATTR_SUPPORT BIT(FATTR4_XATTR_SUPPORT - 64)
+#define FATTR4_WORD2_TIME_DELEG_ACCESS BIT(FATTR4_TIME_DELEG_ACCESS - 64)
+#define FATTR4_WORD2_TIME_DELEG_MODIFY BIT(FATTR4_TIME_DELEG_MODIFY - 64)
+#define FATTR4_WORD2_OPEN_ARGUMENTS BIT(FATTR4_OPEN_ARGUMENTS - 64)
/* MDS threshold bitmap bits */
#define THRESHOLD_RD (1UL << 0)
@@ -537,6 +679,7 @@ enum {
NFSPROC4_CLNT_SEEK,
NFSPROC4_CLNT_ALLOCATE,
NFSPROC4_CLNT_DEALLOCATE,
+ NFSPROC4_CLNT_ZERO_RANGE,
NFSPROC4_CLNT_LAYOUTSTATS,
NFSPROC4_CLNT_CLONE,
NFSPROC4_CLNT_COPY,
@@ -551,6 +694,7 @@ enum {
NFSPROC4_CLNT_LISTXATTRS,
NFSPROC4_CLNT_REMOVEXATTR,
NFSPROC4_CLNT_READ_PLUS,
+ NFSPROC4_CLNT_OFFLOAD_STATUS,
};
/* nfs41 types */
@@ -571,6 +715,12 @@ enum state_protect_how4 {
SP4_SSV = 2
};
+/* GET_DIR_DELEGATION non-fatal status codes */
+enum gddrnf4_status {
+ GDD4_OK = 0,
+ GDD4_UNAVAIL = 1
+};
+
enum pnfs_layouttype {
LAYOUT_NFSV4_1_FILES = 1,
LAYOUT_OSD2_OBJECTS = 2,
@@ -726,4 +876,39 @@ enum nfs4_setxattr_options {
SETXATTR4_CREATE = 1,
SETXATTR4_REPLACE = 2,
};
+
+enum {
+ RCA4_TYPE_MASK_RDATA_DLG = 0,
+ RCA4_TYPE_MASK_WDATA_DLG = 1,
+ RCA4_TYPE_MASK_DIR_DLG = 2,
+ RCA4_TYPE_MASK_FILE_LAYOUT = 3,
+ RCA4_TYPE_MASK_BLK_LAYOUT = 4,
+ RCA4_TYPE_MASK_OBJ_LAYOUT_MIN = 8,
+ RCA4_TYPE_MASK_OBJ_LAYOUT_MAX = 9,
+ RCA4_TYPE_MASK_OTHER_LAYOUT_MIN = 12,
+ RCA4_TYPE_MASK_OTHER_LAYOUT_MAX = 15,
+};
+
+enum nfs_cb_opnum4 {
+ OP_CB_GETATTR = 3,
+ OP_CB_RECALL = 4,
+
+ /* Callback operations new to NFSv4.1 */
+ OP_CB_LAYOUTRECALL = 5,
+ OP_CB_NOTIFY = 6,
+ OP_CB_PUSH_DELEG = 7,
+ OP_CB_RECALL_ANY = 8,
+ OP_CB_RECALLABLE_OBJ_AVAIL = 9,
+ OP_CB_RECALL_SLOT = 10,
+ OP_CB_SEQUENCE = 11,
+ OP_CB_WANTS_CANCELLED = 12,
+ OP_CB_NOTIFY_LOCK = 13,
+ OP_CB_NOTIFY_DEVICEID = 14,
+
+ /* Callback operations new to NFSv4.2 */
+ OP_CB_OFFLOAD = 15,
+
+ OP_CB_ILLEGAL = 10044,
+};
+
#endif
diff --git a/include/linux/nfs_common.h b/include/linux/nfs_common.h
new file mode 100644
index 000000000000..a541c3a02887
--- /dev/null
+++ b/include/linux/nfs_common.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This file contains constants and methods used by both NFS client and server.
+ */
+#ifndef _LINUX_NFS_COMMON_H
+#define _LINUX_NFS_COMMON_H
+
+#include <linux/errno.h>
+#include <uapi/linux/nfs.h>
+
+/* Mapping from NFS error code to "errno" error code. */
+
+int nfs_stat_to_errno(enum nfs_stat status);
+int nfs4_stat_to_errno(int stat);
+
+__u32 nfs_localio_errno_to_nfs4_stat(int errno);
+
+#endif /* _LINUX_NFS_COMMON_H */
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index ffba254d2098..c585939b6cd6 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -31,6 +31,10 @@
#include <linux/sunrpc/auth.h>
#include <linux/sunrpc/clnt.h>
+#ifdef CONFIG_NFS_FSCACHE
+#include <linux/netfs.h>
+#endif
+
#include <linux/nfs.h>
#include <linux/nfs2.h>
#include <linux/nfs3.h>
@@ -41,9 +45,9 @@
#include <linux/mempool.h>
/*
- * These are the default flags for swap requests
+ * These are the default for number of transports to different server IPs
*/
-#define NFS_RPC_SWAPFLAGS (RPC_TASK_SWAPPER|RPC_TASK_ROOTCREDS)
+#define NFS_MAX_TRANSPORTS 16
/*
* Size of the NFS directory verifier
@@ -56,7 +60,10 @@
struct nfs_access_entry {
struct rb_node rb_node;
struct list_head lru;
- const struct cred * cred;
+ kuid_t fsuid;
+ kgid_t fsgid;
+ struct group_info *group_info;
+ u64 timestamp;
__u32 mask;
struct rcu_head rcu_head;
};
@@ -70,34 +77,58 @@ struct nfs_lock_context {
struct rcu_head rcu_head;
};
+struct nfs_file_localio {
+ struct nfsd_file __rcu *ro_file;
+ struct nfsd_file __rcu *rw_file;
+ struct list_head list;
+ void __rcu *nfs_uuid; /* opaque pointer to 'nfs_uuid_t' */
+};
+
+static inline void nfs_localio_file_init(struct nfs_file_localio *nfl)
+{
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ nfl->ro_file = NULL;
+ nfl->rw_file = NULL;
+ INIT_LIST_HEAD(&nfl->list);
+ nfl->nfs_uuid = NULL;
+#endif
+}
+
struct nfs4_state;
struct nfs_open_context {
struct nfs_lock_context lock_context;
fl_owner_t flock_owner;
struct dentry *dentry;
const struct cred *cred;
- struct rpc_cred *ll_cred; /* low-level cred - use to check for expiry */
+ struct rpc_cred __rcu *ll_cred; /* low-level cred - use to check for expiry */
struct nfs4_state *state;
fmode_t mode;
+ int error;
unsigned long flags;
-#define NFS_CONTEXT_RESEND_WRITES (1)
#define NFS_CONTEXT_BAD (2)
#define NFS_CONTEXT_UNLOCK (3)
- int error;
+#define NFS_CONTEXT_FILE_OPEN (4)
- struct list_head list;
struct nfs4_threshold *mdsthreshold;
+ struct list_head list;
struct rcu_head rcu_head;
+ struct nfs_file_localio nfl;
};
struct nfs_open_dir_context {
struct list_head list;
+ atomic_t cache_hits;
+ atomic_t cache_misses;
unsigned long attr_gencount;
__be32 verf[NFS_DIR_VERIFIER_SIZE];
__u64 dir_cookie;
- __u64 dup_cookie;
- signed char duped;
+ __u64 last_cookie;
+ pgoff_t page_index;
+ unsigned int dtsize;
+ bool force_clear;
+ bool eof;
+ struct rcu_head rcu_head;
};
/*
@@ -130,6 +161,12 @@ struct nfs_inode {
unsigned long cache_validity; /* bit mask */
/*
+ * NFS Attributes not included in struct inode
+ */
+
+ struct timespec64 btime;
+
+ /*
* read_cache_jiffies is when we started read-caching this inode.
* attrtimeo is for how long the cached information is assumed
* to be valid. A successful attribute revalidation doubles
@@ -148,35 +185,72 @@ struct nfs_inode {
unsigned long attrtimeo_timestamp;
unsigned long attr_gencount;
- /* "Generation counter" for the attribute cache. This is
- * bumped whenever we update the metadata on the
- * server.
- */
- unsigned long cache_change_attribute;
struct rb_root access_cache;
struct list_head access_cache_entry_lru;
struct list_head access_cache_inode_lru;
- /*
- * This is the cookie verifier used for NFSv3 readdir
- * operations
- */
- __be32 cookieverf[NFS_DIR_VERIFIER_SIZE];
-
- atomic_long_t nrequests;
- struct nfs_mds_commit_info commit_info;
+ union {
+ /* Directory */
+ struct {
+ /* "Generation counter" for the attribute cache.
+ * This is bumped whenever we update the metadata
+ * on the server.
+ */
+ unsigned long cache_change_attribute;
+ /*
+ * This is the cookie verifier used for NFSv3 readdir
+ * operations
+ */
+ __be32 cookieverf[NFS_DIR_VERIFIER_SIZE];
+ /* Readers: in-flight sillydelete RPC calls */
+ /* Writers: rmdir */
+ struct rw_semaphore rmdir_sem;
+ };
+ /* Regular file */
+ struct {
+ atomic_long_t nrequests;
+ atomic_long_t redirtied_pages;
+ struct nfs_mds_commit_info commit_info;
+ struct mutex commit_mutex;
+ };
+ };
/* Open contexts for shared mmap writes */
struct list_head open_files;
- /* Readers: in-flight sillydelete RPC calls */
- /* Writers: rmdir */
- struct rw_semaphore rmdir_sem;
- struct mutex commit_mutex;
-
- /* track last access to cached pages */
- unsigned long page_index;
+ /* Keep track of out-of-order replies.
+ * The ooo array contains start/end pairs of
+ * numbers from the changeid sequence when
+ * the inode's iversion has been updated.
+ * It also contains end/start pair (i.e. reverse order)
+ * of sections of the changeid sequence that have
+ * been seen in replies from the server.
+ * Normally these should match and when both
+ * A:B and B:A are found in ooo, they are both removed.
+ * And if a reply with A:B causes an iversion update
+ * of A:B, then neither are added.
+ * When a reply has pre_change that doesn't match
+ * iversion, then the changeid pair and any consequent
+ * change in iversion ARE added. Later replies
+ * might fill in the gaps, or possibly a gap is caused
+ * by a change from another client.
+ * When a file or directory is opened, if the ooo table
+ * is not empty, then we assume the gaps were due to
+ * another client and we invalidate the cached data.
+ *
+ * We can only track a limited number of concurrent gaps.
+ * Currently that limit is 16.
+ * We allocate the table on demand. If there is insufficient
+ * memory, then we probably cannot cache the file anyway
+ * so there is no loss.
+ */
+ struct {
+ int cnt;
+ struct {
+ u64 start, end;
+ } gap[16];
+ } *ooo;
#if IS_ENABLED(CONFIG_NFS_V4)
struct nfs4_cached_acl *nfs4_acl;
@@ -191,14 +265,15 @@ struct nfs_inode {
/* how many bytes have been written/read and how many bytes queued up */
__u64 write_io;
__u64 read_io;
-#ifdef CONFIG_NFS_FSCACHE
- struct fscache_cookie *fscache;
-#endif
- struct inode vfs_inode;
-
#ifdef CONFIG_NFS_V4_2
struct nfs4_xattr_cache *xattr_cache;
#endif
+ union {
+ struct inode vfs_inode;
+#ifdef CONFIG_NFS_FSCACHE
+ struct netfs_inode netfs; /* netfs context and VFS inode */
+#endif
+ };
};
struct nfs4_copy_state {
@@ -234,7 +309,6 @@ struct nfs4_copy_state {
#define NFS_INO_INVALID_ATIME BIT(2) /* cached atime is invalid */
#define NFS_INO_INVALID_ACCESS BIT(3) /* cached access cred invalid */
#define NFS_INO_INVALID_ACL BIT(4) /* cached acls are invalid */
-#define NFS_INO_REVAL_PAGECACHE BIT(5) /* must revalidate pagecache */
#define NFS_INO_REVAL_FORCED BIT(6) /* force revalidation ignoring a delegation */
#define NFS_INO_INVALID_LABEL BIT(7) /* cached label is invalid */
#define NFS_INO_INVALID_CHANGE BIT(8) /* cached change is invalid */
@@ -248,10 +322,12 @@ struct nfs4_copy_state {
#define NFS_INO_INVALID_XATTR BIT(15) /* xattrs are invalid */
#define NFS_INO_INVALID_NLINK BIT(16) /* cached nlinks is invalid */
#define NFS_INO_INVALID_MODE BIT(17) /* cached mode is invalid */
+#define NFS_INO_INVALID_BTIME BIT(18) /* cached btime is invalid */
#define NFS_INO_INVALID_ATTR (NFS_INO_INVALID_CHANGE \
| NFS_INO_INVALID_CTIME \
| NFS_INO_INVALID_MTIME \
+ | NFS_INO_INVALID_BTIME \
| NFS_INO_INVALID_SIZE \
| NFS_INO_INVALID_NLINK \
| NFS_INO_INVALID_MODE \
@@ -260,12 +336,10 @@ struct nfs4_copy_state {
/*
* Bit offsets in flags field
*/
-#define NFS_INO_ADVISE_RDPLUS (0) /* advise readdirplus */
#define NFS_INO_STALE (1) /* possible stale inode */
#define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */
#define NFS_INO_INVALIDATING (3) /* inode is being invalidated */
-#define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */
-#define NFS_INO_FSCACHE_LOCK (6) /* FS-Cache cookie management lock */
+#define NFS_INO_PRESERVE_UNLINKED (4) /* preserve file if removed while open */
#define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */
#define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */
#define NFS_INO_LAYOUTSTATS (11) /* layoutstats inflight */
@@ -318,15 +392,6 @@ static inline int NFS_STALE(const struct inode *inode)
return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
}
-static inline struct fscache_cookie *nfs_i_fscache(struct inode *inode)
-{
-#ifdef CONFIG_NFS_FSCACHE
- return NFS_I(inode)->fscache;
-#else
- return NULL;
-#endif
-}
-
static inline __u64 NFS_FILEID(const struct inode *inode)
{
return NFS_I(inode)->fileid;
@@ -342,17 +407,15 @@ static inline void nfs_mark_for_revalidate(struct inode *inode)
struct nfs_inode *nfsi = NFS_I(inode);
spin_lock(&inode->i_lock);
- nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE
- | NFS_INO_INVALID_ACCESS
- | NFS_INO_INVALID_ACL
- | NFS_INO_INVALID_CHANGE
- | NFS_INO_INVALID_CTIME;
+ nfsi->cache_validity |= NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL |
+ NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_CTIME |
+ NFS_INO_INVALID_SIZE;
if (S_ISDIR(inode->i_mode))
nfsi->cache_validity |= NFS_INO_INVALID_DATA;
spin_unlock(&inode->i_lock);
}
-static inline int nfs_server_capable(struct inode *inode, int cap)
+static inline int nfs_server_capable(const struct inode *inode, int cap)
{
return NFS_SERVER(inode)->caps & cap;
}
@@ -377,17 +440,17 @@ extern void nfs_zap_caches(struct inode *);
extern void nfs_set_inode_stale(struct inode *inode);
extern void nfs_invalidate_atime(struct inode *);
extern struct inode *nfs_fhget(struct super_block *, struct nfs_fh *,
- struct nfs_fattr *, struct nfs4_label *);
+ struct nfs_fattr *);
struct inode *nfs_ilookup(struct super_block *sb, struct nfs_fattr *, struct nfs_fh *);
extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *);
extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr);
extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr);
extern int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fattr *fattr);
-extern int nfs_getattr(struct user_namespace *, const struct path *,
+extern int nfs_getattr(struct mnt_idmap *, const struct path *,
struct kstat *, u32, unsigned int);
-extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *);
+extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *, const struct cred *);
extern void nfs_access_set_mask(struct nfs_access_entry *, u32);
-extern int nfs_permission(struct user_namespace *, struct inode *, int);
+extern int nfs_permission(struct mnt_idmap *, struct inode *, int);
extern int nfs_open(struct inode *, struct file *);
extern int nfs_attribute_cache_expired(struct inode *inode);
extern int nfs_revalidate_inode(struct inode *inode, unsigned long flags);
@@ -396,10 +459,9 @@ extern int nfs_clear_invalid_mapping(struct address_space *mapping);
extern bool nfs_mapping_need_revalidate_inode(struct inode *inode);
extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping);
extern int nfs_revalidate_mapping_rcu(struct inode *inode);
-extern int nfs_setattr(struct user_namespace *, struct dentry *, struct iattr *);
+extern int nfs_setattr(struct mnt_idmap *, struct dentry *, struct iattr *);
extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, struct nfs_fattr *);
-extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
- struct nfs4_label *label);
+extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr);
extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx);
extern void put_nfs_open_context(struct nfs_open_context *ctx);
extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, const struct cred *cred, fmode_t mode);
@@ -415,9 +477,22 @@ extern void nfs_fattr_set_barrier(struct nfs_fattr *fattr);
extern unsigned long nfs_inc_attr_generation_counter(void);
extern struct nfs_fattr *nfs_alloc_fattr(void);
+extern struct nfs_fattr *nfs_alloc_fattr_with_label(struct nfs_server *server);
+
+static inline void nfs4_label_free(struct nfs4_label *label)
+{
+#ifdef CONFIG_NFS_V4_SECURITY_LABEL
+ if (label) {
+ kfree(label->label);
+ kfree(label);
+ }
+#endif
+}
static inline void nfs_free_fattr(const struct nfs_fattr *fattr)
{
+ if (fattr)
+ nfs4_label_free(fattr->label);
kfree(fattr);
}
@@ -487,11 +562,11 @@ static inline const struct cred *nfs_file_cred(struct file *file)
/*
* linux/fs/nfs/direct.c
*/
-extern ssize_t nfs_direct_IO(struct kiocb *, struct iov_iter *);
-extern ssize_t nfs_file_direct_read(struct kiocb *iocb,
- struct iov_iter *iter);
-extern ssize_t nfs_file_direct_write(struct kiocb *iocb,
- struct iov_iter *iter);
+int nfs_swap_rw(struct kiocb *iocb, struct iov_iter *iter);
+ssize_t nfs_file_direct_read(struct kiocb *iocb,
+ struct iov_iter *iter, bool swap);
+ssize_t nfs_file_direct_write(struct kiocb *iocb,
+ struct iov_iter *iter, bool swap);
/*
* linux/fs/nfs/dir.c
@@ -505,14 +580,16 @@ extern void nfs_set_verifier(struct dentry * dentry, unsigned long verf);
extern void nfs_clear_verifier_delegated(struct inode *inode);
#endif /* IS_ENABLED(CONFIG_NFS_V4) */
extern struct dentry *nfs_add_or_obtain(struct dentry *dentry,
- struct nfs_fh *fh, struct nfs_fattr *fattr,
- struct nfs4_label *label);
+ struct nfs_fh *fh, struct nfs_fattr *fattr);
extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh,
- struct nfs_fattr *fattr, struct nfs4_label *label);
+ struct nfs_fattr *fattr);
extern int nfs_may_open(struct inode *inode, const struct cred *cred, int openflags);
extern void nfs_access_zap_cache(struct inode *inode);
-extern int nfs_access_get_cached(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res,
- bool may_block);
+extern int nfs_access_get_cached(struct inode *inode, const struct cred *cred,
+ u32 *mask, bool may_block);
+extern int nfs_atomic_open_v23(struct inode *dir, struct dentry *dentry,
+ struct file *file, unsigned int open_flags,
+ umode_t mode);
/*
* linux/fs/nfs/symlink.c
@@ -547,10 +624,10 @@ extern void nfs_complete_unlink(struct dentry *dentry, struct inode *);
* linux/fs/nfs/write.c
*/
extern int nfs_congestion_kb;
-extern int nfs_writepage(struct page *page, struct writeback_control *wbc);
extern int nfs_writepages(struct address_space *, struct writeback_control *);
-extern int nfs_flush_incompatible(struct file *file, struct page *page);
-extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int);
+extern int nfs_flush_incompatible(struct file *file, struct folio *folio);
+extern int nfs_update_folio(struct file *file, struct folio *folio,
+ unsigned int offset, unsigned int count);
/*
* Try to write back everything synchronously (but check the
@@ -558,24 +635,26 @@ extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned
*/
extern int nfs_sync_inode(struct inode *inode);
extern int nfs_wb_all(struct inode *inode);
-extern int nfs_wb_page(struct inode *inode, struct page *page);
-extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
+extern int nfs_wb_folio(struct inode *inode, struct folio *folio);
+int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio);
extern int nfs_commit_inode(struct inode *, int);
-extern struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail);
+extern struct nfs_commit_data *nfs_commitdata_alloc(void);
extern void nfs_commit_free(struct nfs_commit_data *data);
+void nfs_commit_begin(struct nfs_mds_commit_info *cinfo);
+bool nfs_commit_end(struct nfs_mds_commit_info *cinfo);
-static inline int
-nfs_have_writebacks(struct inode *inode)
+static inline bool nfs_have_writebacks(const struct inode *inode)
{
- return atomic_long_read(&NFS_I(inode)->nrequests) != 0;
+ if (S_ISREG(inode->i_mode))
+ return atomic_long_read(&NFS_I(inode)->nrequests) != 0;
+ return false;
}
/*
* linux/fs/nfs/read.c
*/
-extern int nfs_readpage(struct file *, struct page *);
-extern int nfs_readpages(struct file *, struct address_space *,
- struct list_head *, unsigned);
+int nfs_read_folio(struct file *, struct folio *);
+void nfs_readahead(struct readahead_control *);
/*
* inline functions
@@ -595,8 +674,31 @@ nfs_fileid_to_ino_t(u64 fileid)
return ino;
}
+static inline void nfs_ooo_clear(struct nfs_inode *nfsi)
+{
+ nfsi->cache_validity &= ~NFS_INO_DATA_INVAL_DEFER;
+ kfree(nfsi->ooo);
+ nfsi->ooo = NULL;
+}
+
+static inline bool nfs_ooo_test(struct nfs_inode *nfsi)
+{
+ return (nfsi->cache_validity & NFS_INO_DATA_INVAL_DEFER) ||
+ (nfsi->ooo && nfsi->ooo->cnt > 0);
+
+}
+
#define NFS_JUKEBOX_RETRY_TIME (5 * HZ)
+/* We need to block new opens while a file is being unlinked.
+ * If it is opened *before* we decide to unlink, we will silly-rename
+ * instead. If it is opened *after*, then we need to create or will fail.
+ * If we allow the two to race, we could end up with a file that is open
+ * but deleted on the server resulting in ESTALE.
+ * So use ->d_fsdata to record when the unlink is happening
+ * and block dentry revalidation while it is set.
+ */
+#define NFS_FSDATA_BLOCKED ((void*)1)
# undef ifdebug
# ifdef NFS_DEBUG
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index d71a0e90faeb..d30c0245031c 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -8,6 +8,7 @@
#include <linux/wait.h>
#include <linux/nfs_xdr.h>
#include <linux/sunrpc/xprt.h>
+#include <linux/nfslocalio.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
@@ -48,6 +49,8 @@ struct nfs_client {
#define NFS_CS_NOPING 6 /* - don't ping on connect */
#define NFS_CS_DS 7 /* - Server is a DS */
#define NFS_CS_REUSEPORT 8 /* - reuse src port on reconnect */
+#define NFS_CS_PNFS 9 /* - Server used for pnfs */
+#define NFS_CS_NETUNREACH_FATAL 10 /* - ENETUNREACH errors are fatal */
struct sockaddr_storage cl_addr; /* server identifier */
size_t cl_addrlen;
char * cl_hostname; /* hostname of server */
@@ -62,7 +65,9 @@ struct nfs_client {
u32 cl_minorversion;/* NFSv4 minorversion */
unsigned int cl_nconnect; /* Number of connections */
- const char * cl_principal; /* used for machine cred */
+ unsigned int cl_max_connect; /* max number of xprts allowed */
+ const char * cl_principal; /* used for machine cred */
+ struct xprtsec_parms cl_xprtsec; /* xprt security policy */
#if IS_ENABLED(CONFIG_NFS_V4)
struct list_head cl_ds_clients; /* auth flavor data servers */
@@ -119,13 +124,17 @@ struct nfs_client {
* This is used to generate the mv0 callback address.
*/
char cl_ipaddr[48];
-
-#ifdef CONFIG_NFS_FSCACHE
- struct fscache_cookie *fscache; /* client index cache cookie */
-#endif
-
struct net *cl_net;
+ netns_tracker cl_ns_tracker;
struct list_head pending_cb_stateids;
+ struct rcu_head rcu;
+
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+ struct timespec64 cl_nfssvc_boot;
+ seqlock_t cl_boot_lock;
+ nfs_uuid_t cl_uuid;
+ struct work_struct cl_local_probe_work;
+#endif /* CONFIG_NFS_LOCALIO */
};
/*
@@ -141,7 +150,9 @@ struct nfs_server {
struct rpc_clnt * client_acl; /* ACL RPC client handle */
struct nlm_host *nlm_host; /* NLM client handle */
struct nfs_iostats __percpu *io_stats; /* I/O statistics */
+ wait_queue_head_t write_congestion_wait; /* wait until write congestion eases */
atomic_long_t writeback; /* number of writeback pages */
+ unsigned int write_congested;/* flag set when writeback gets too high */
unsigned int flags; /* various flags */
/* The following are for internal use only. Also see uapi/linux/nfs_mount.h */
@@ -155,13 +166,17 @@ struct nfs_server {
#define NFS_MOUNT_SOFTREVAL 0x800000
#define NFS_MOUNT_WRITE_EAGER 0x01000000
#define NFS_MOUNT_WRITE_WAIT 0x02000000
+#define NFS_MOUNT_TRUNK_DISCOVERY 0x04000000
+#define NFS_MOUNT_SHUTDOWN 0x08000000
+#define NFS_MOUNT_NO_ALIGNWRITE 0x10000000
+#define NFS_MOUNT_FORCE_RDIRPLUS 0x20000000
+#define NFS_MOUNT_NETUNREACH_FATAL 0x40000000
- unsigned int fattr_valid; /* Valid attributes */
unsigned int caps; /* server capabilities */
+ __u64 fattr_valid; /* Valid attributes */
unsigned int rsize; /* read size */
unsigned int rpages; /* read size (in pages) */
unsigned int wsize; /* write size */
- unsigned int wpages; /* write size (in pages) */
unsigned int wtmult; /* server disk block size */
unsigned int dtsize; /* readdir size */
unsigned short port; /* "port=" setting */
@@ -185,18 +200,27 @@ struct nfs_server {
change_attr_type;/* Description of change attribute */
struct nfs_fsid fsid;
+ int s_sysfs_id; /* sysfs dentry index */
__u64 maxfilesize; /* maximum file size */
- struct timespec64 time_delta; /* smallest time granularity */
unsigned long mount_time; /* when this fs was mounted */
struct super_block *super; /* VFS super block */
dev_t s_dev; /* superblock dev numbers */
struct nfs_auth_info auth_info; /* parsed auth flavors */
#ifdef CONFIG_NFS_FSCACHE
- struct nfs_fscache_key *fscache_key; /* unique key for superblock */
- struct fscache_cookie *fscache; /* superblock cookie */
+ struct fscache_volume *fscache; /* superblock cookie */
+ char *fscache_uniq; /* Uniquifier (or NULL) */
#endif
+ /* The following #defines numerically match the NFSv4 equivalents */
+#define NFS_FH_NOEXPIRE_WITH_OPEN (0x1)
+#define NFS_FH_VOLATILE_ANY (0x2)
+#define NFS_FH_VOL_MIGRATION (0x4)
+#define NFS_FH_VOL_RENAME (0x8)
+#define NFS_FH_RENAME_UNSAFE (NFS_FH_VOLATILE_ANY | NFS_FH_VOL_RENAME)
+ u32 fh_expire_type; /* V4 bitmask representing file
+ handle volatility type for
+ this filesystem */
u32 pnfs_blksize; /* layout_blksize attr */
#if IS_ENABLED(CONFIG_NFS_V4)
u32 attr_bitmask[3];/* V4 bitmask representing the set
@@ -220,23 +244,27 @@ struct nfs_server {
u32 acl_bitmask; /* V4 bitmask representing the ACEs
that are supported on this
filesystem */
- u32 fh_expire_type; /* V4 bitmask representing file
- handle volatility type for
- this filesystem */
struct pnfs_layoutdriver_type *pnfs_curr_ld; /* Active layout driver */
struct rpc_wait_queue roc_rpcwaitq;
- void *pnfs_ld_data; /* per mount point data */
/* the following fields are protected by nfs_client->cl_lock */
struct rb_root state_owners;
#endif
- struct ida openowner_id;
- struct ida lockowner_id;
+ atomic64_t owner_ctr;
struct list_head state_owners_lru;
struct list_head layouts;
struct list_head delegations;
+ atomic_long_t nr_active_delegations;
+ unsigned int delegation_hash_mask;
+ struct hlist_head *delegation_hash_table;
struct list_head ss_copies;
+ struct list_head ss_src_copies;
+ unsigned long delegation_flags;
+#define NFS4SERV_DELEGRETURN (1)
+#define NFS4SERV_DELEGATION_EXPIRED (2)
+#define NFS4SERV_DELEGRETURN_DELAYED (3)
+ unsigned long delegation_gen;
unsigned long mig_gen;
unsigned long mig_status;
#define NFS_MIG_IN_TRANSITION (1)
@@ -261,6 +289,8 @@ struct nfs_server {
/* User namespace info */
const struct cred *cred;
bool has_sec_mnt_opts;
+ struct kobject kobj;
+ struct rcu_head rcu;
};
/* Server capabilities */
@@ -270,6 +300,13 @@ struct nfs_server {
#define NFS_CAP_ACLS (1U << 3)
#define NFS_CAP_ATOMIC_OPEN (1U << 4)
#define NFS_CAP_LGOPEN (1U << 5)
+#define NFS_CAP_CASE_INSENSITIVE (1U << 6)
+#define NFS_CAP_CASE_PRESERVING (1U << 7)
+#define NFS_CAP_REBOOT_LAYOUTRETURN (1U << 8)
+#define NFS_CAP_OFFLOAD_STATUS (1U << 9)
+#define NFS_CAP_ZERO_RANGE (1U << 10)
+#define NFS_CAP_OPEN_XOR (1U << 12)
+#define NFS_CAP_DELEGTIME (1U << 13)
#define NFS_CAP_POSIX_LOCK (1U << 14)
#define NFS_CAP_UIDGID_NOMAP (1U << 15)
#define NFS_CAP_STATEID_NFSV41 (1U << 16)
@@ -286,5 +323,6 @@ struct nfs_server {
#define NFS_CAP_COPY_NOTIFY (1U << 27)
#define NFS_CAP_XATTR (1U << 28)
#define NFS_CAP_READ_PLUS (1U << 29)
-
+#define NFS_CAP_FS_LOCATIONS (1U << 30)
+#define NFS_CAP_MOVEABLE (1U << 31)
#endif
diff --git a/include/linux/nfs_iostat.h b/include/linux/nfs_iostat.h
index 027874c36c88..8d946089d151 100644
--- a/include/linux/nfs_iostat.h
+++ b/include/linux/nfs_iostat.h
@@ -119,16 +119,4 @@ enum nfs_stat_eventcounters {
__NFSIOS_COUNTSMAX,
};
-/*
- * NFS local caching servicing counters
- */
-enum nfs_stat_fscachecounters {
- NFSIOS_FSCACHE_PAGES_READ_OK,
- NFSIOS_FSCACHE_PAGES_READ_FAIL,
- NFSIOS_FSCACHE_PAGES_WRITTEN_OK,
- NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL,
- NFSIOS_FSCACHE_PAGES_UNCACHED,
- __NFSIOS_FSCACHEMAX,
-};
-
#endif /* _LINUX_NFS_IOSTAT */
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index f0373a6cb5fb..afe1d8f09d89 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -25,6 +25,7 @@
enum {
PG_BUSY = 0, /* nfs_{un}lock_request */
PG_MAPPED, /* page private set for buffered io */
+ PG_FOLIO, /* Tracking a folio (unset for O_DIRECT) */
PG_CLEAN, /* write succeeded */
PG_COMMIT_TO_DS, /* used by pnfs layouts */
PG_INODE_REF, /* extra ref held by inode when in writeback */
@@ -41,7 +42,10 @@ enum {
struct nfs_inode;
struct nfs_page {
struct list_head wb_list; /* Defines state of page: */
- struct page *wb_page; /* page to read in/write out */
+ union {
+ struct page *wb_page; /* page to read in/write out */
+ struct folio *wb_folio;
+ };
struct nfs_lock_context *wb_lock_context; /* lock context info */
pgoff_t wb_index; /* Offset >> PAGE_SHIFT */
unsigned int wb_offset, /* Offset & ~PAGE_MASK */
@@ -101,6 +105,9 @@ struct nfs_pageio_descriptor {
struct pnfs_layout_segment *pg_lseg;
struct nfs_io_completion *pg_io_completion;
struct nfs_direct_req *pg_dreq;
+#ifdef CONFIG_NFS_FSCACHE
+ void *pg_netfs;
+#endif
unsigned int pg_bsize; /* default bsize for mirrors */
u32 pg_mirror_count;
@@ -115,12 +122,15 @@ struct nfs_pageio_descriptor {
/* arbitrarily selected limit to number of mirrors */
#define NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX 16
-#define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags))
-
-extern struct nfs_page *nfs_create_request(struct nfs_open_context *ctx,
- struct page *page,
- unsigned int offset,
- unsigned int count);
+extern struct nfs_page *nfs_page_create_from_page(struct nfs_open_context *ctx,
+ struct page *page,
+ unsigned int pgbase,
+ loff_t offset,
+ unsigned int count);
+extern struct nfs_page *nfs_page_create_from_folio(struct nfs_open_context *ctx,
+ struct folio *folio,
+ unsigned int offset,
+ unsigned int count);
extern void nfs_release_request(struct nfs_page *);
@@ -140,19 +150,79 @@ extern void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *, pgoff_t);
extern size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
struct nfs_page *prev,
struct nfs_page *req);
-extern int nfs_wait_on_request(struct nfs_page *);
extern void nfs_unlock_request(struct nfs_page *req);
extern void nfs_unlock_and_release_request(struct nfs_page *);
-extern struct nfs_page *nfs_page_group_lock_head(struct nfs_page *req);
-extern int nfs_page_group_lock_subrequests(struct nfs_page *head);
-extern void nfs_join_page_group(struct nfs_page *head, struct inode *inode);
+extern void nfs_join_page_group(struct nfs_page *head,
+ struct nfs_commit_info *cinfo,
+ struct inode *inode);
extern int nfs_page_group_lock(struct nfs_page *);
extern void nfs_page_group_unlock(struct nfs_page *);
extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
+extern bool nfs_page_group_sync_on_bit_locked(struct nfs_page *, unsigned int);
extern int nfs_page_set_headlock(struct nfs_page *req);
extern void nfs_page_clear_headlock(struct nfs_page *req);
extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *);
+/**
+ * nfs_page_to_folio - Retrieve a struct folio for the request
+ * @req: pointer to a struct nfs_page
+ *
+ * If a folio was assigned to @req, then return it, otherwise return NULL.
+ */
+static inline struct folio *nfs_page_to_folio(const struct nfs_page *req)
+{
+ if (test_bit(PG_FOLIO, &req->wb_flags))
+ return req->wb_folio;
+ return NULL;
+}
+
+/**
+ * nfs_page_to_page - Retrieve a struct page for the request
+ * @req: pointer to a struct nfs_page
+ * @pgbase: folio byte offset
+ *
+ * Return the page containing the byte that is at offset @pgbase relative
+ * to the start of the folio.
+ * Note: The request starts at offset @req->wb_pgbase.
+ */
+static inline struct page *nfs_page_to_page(const struct nfs_page *req,
+ size_t pgbase)
+{
+ struct folio *folio = nfs_page_to_folio(req);
+
+ if (folio == NULL)
+ return req->wb_page;
+ return folio_page(folio, pgbase >> PAGE_SHIFT);
+}
+
+/**
+ * nfs_page_to_inode - Retrieve an inode for the request
+ * @req: pointer to a struct nfs_page
+ */
+static inline struct inode *nfs_page_to_inode(const struct nfs_page *req)
+{
+ struct folio *folio = nfs_page_to_folio(req);
+
+ if (folio == NULL)
+ return req->wb_page->mapping->host;
+ return folio->mapping->host;
+}
+
+/**
+ * nfs_page_max_length - Retrieve the maximum possible length for a request
+ * @req: pointer to a struct nfs_page
+ *
+ * Returns the maximum possible length of a request
+ */
+static inline size_t nfs_page_max_length(const struct nfs_page *req)
+{
+ struct folio *folio = nfs_page_to_folio(req);
+
+ if (folio == NULL)
+ return PAGE_SIZE;
+ return folio_size(folio);
+}
+
/*
* Lock the page of an asynchronous request
*/
@@ -202,8 +272,7 @@ nfs_list_entry(struct list_head *head)
return list_entry(head, struct nfs_page, wb_list);
}
-static inline
-loff_t req_offset(struct nfs_page *req)
+static inline loff_t req_offset(const struct nfs_page *req)
{
return (((loff_t)req->wb_index) << PAGE_SHIFT) + req->wb_offset;
}
diff --git a/include/linux/nfs_ssc.h b/include/linux/nfs_ssc.h
index 222ae8883e85..22265b1ff080 100644
--- a/include/linux/nfs_ssc.h
+++ b/include/linux/nfs_ssc.h
@@ -53,6 +53,7 @@ static inline void nfs42_ssc_close(struct file *filep)
if (nfs_ssc_client_tbl.ssc_nfs4_ops)
(*nfs_ssc_client_tbl.ssc_nfs4_ops->sco_close)(filep);
}
+#endif
struct nfsd4_ssc_umount_item {
struct list_head nsui_list;
@@ -64,9 +65,8 @@ struct nfsd4_ssc_umount_item {
refcount_t nsui_refcnt;
unsigned long nsui_expire;
struct vfsmount *nsui_vfsmount;
- char nsui_ipaddr[RPC_MAX_ADDRBUFLEN];
+ char nsui_ipaddr[RPC_MAX_ADDRBUFLEN + 1];
};
-#endif
/*
* NFS_FS
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 717ecc87c9e7..31463286402f 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -45,7 +45,7 @@ struct nfs4_threshold {
};
struct nfs_fattr {
- unsigned int valid; /* which fields are valid */
+ __u64 valid; /* which fields are valid */
umode_t mode;
__u32 nlink;
kuid_t uid;
@@ -67,6 +67,7 @@ struct nfs_fattr {
struct timespec64 atime;
struct timespec64 mtime;
struct timespec64 ctime;
+ struct timespec64 btime;
__u64 change_attr; /* NFSv4 change attribute */
__u64 pre_change_attr;/* pre-op NFSv4 change attribute */
__u64 pre_size; /* pre_op_attr.size */
@@ -80,32 +81,33 @@ struct nfs_fattr {
struct nfs4_label *label;
};
-#define NFS_ATTR_FATTR_TYPE (1U << 0)
-#define NFS_ATTR_FATTR_MODE (1U << 1)
-#define NFS_ATTR_FATTR_NLINK (1U << 2)
-#define NFS_ATTR_FATTR_OWNER (1U << 3)
-#define NFS_ATTR_FATTR_GROUP (1U << 4)
-#define NFS_ATTR_FATTR_RDEV (1U << 5)
-#define NFS_ATTR_FATTR_SIZE (1U << 6)
-#define NFS_ATTR_FATTR_PRESIZE (1U << 7)
-#define NFS_ATTR_FATTR_BLOCKS_USED (1U << 8)
-#define NFS_ATTR_FATTR_SPACE_USED (1U << 9)
-#define NFS_ATTR_FATTR_FSID (1U << 10)
-#define NFS_ATTR_FATTR_FILEID (1U << 11)
-#define NFS_ATTR_FATTR_ATIME (1U << 12)
-#define NFS_ATTR_FATTR_MTIME (1U << 13)
-#define NFS_ATTR_FATTR_CTIME (1U << 14)
-#define NFS_ATTR_FATTR_PREMTIME (1U << 15)
-#define NFS_ATTR_FATTR_PRECTIME (1U << 16)
-#define NFS_ATTR_FATTR_CHANGE (1U << 17)
-#define NFS_ATTR_FATTR_PRECHANGE (1U << 18)
-#define NFS_ATTR_FATTR_V4_LOCATIONS (1U << 19)
-#define NFS_ATTR_FATTR_V4_REFERRAL (1U << 20)
-#define NFS_ATTR_FATTR_MOUNTPOINT (1U << 21)
-#define NFS_ATTR_FATTR_MOUNTED_ON_FILEID (1U << 22)
-#define NFS_ATTR_FATTR_OWNER_NAME (1U << 23)
-#define NFS_ATTR_FATTR_GROUP_NAME (1U << 24)
-#define NFS_ATTR_FATTR_V4_SECURITY_LABEL (1U << 25)
+#define NFS_ATTR_FATTR_TYPE BIT_ULL(0)
+#define NFS_ATTR_FATTR_MODE BIT_ULL(1)
+#define NFS_ATTR_FATTR_NLINK BIT_ULL(2)
+#define NFS_ATTR_FATTR_OWNER BIT_ULL(3)
+#define NFS_ATTR_FATTR_GROUP BIT_ULL(4)
+#define NFS_ATTR_FATTR_RDEV BIT_ULL(5)
+#define NFS_ATTR_FATTR_SIZE BIT_ULL(6)
+#define NFS_ATTR_FATTR_PRESIZE BIT_ULL(7)
+#define NFS_ATTR_FATTR_BLOCKS_USED BIT_ULL(8)
+#define NFS_ATTR_FATTR_SPACE_USED BIT_ULL(9)
+#define NFS_ATTR_FATTR_FSID BIT_ULL(10)
+#define NFS_ATTR_FATTR_FILEID BIT_ULL(11)
+#define NFS_ATTR_FATTR_ATIME BIT_ULL(12)
+#define NFS_ATTR_FATTR_MTIME BIT_ULL(13)
+#define NFS_ATTR_FATTR_CTIME BIT_ULL(14)
+#define NFS_ATTR_FATTR_PREMTIME BIT_ULL(15)
+#define NFS_ATTR_FATTR_PRECTIME BIT_ULL(16)
+#define NFS_ATTR_FATTR_CHANGE BIT_ULL(17)
+#define NFS_ATTR_FATTR_PRECHANGE BIT_ULL(18)
+#define NFS_ATTR_FATTR_V4_LOCATIONS BIT_ULL(19)
+#define NFS_ATTR_FATTR_V4_REFERRAL BIT_ULL(20)
+#define NFS_ATTR_FATTR_MOUNTPOINT BIT_ULL(21)
+#define NFS_ATTR_FATTR_MOUNTED_ON_FILEID BIT_ULL(22)
+#define NFS_ATTR_FATTR_OWNER_NAME BIT_ULL(23)
+#define NFS_ATTR_FATTR_GROUP_NAME BIT_ULL(24)
+#define NFS_ATTR_FATTR_V4_SECURITY_LABEL BIT_ULL(25)
+#define NFS_ATTR_FATTR_BTIME BIT_ULL(26)
#define NFS_ATTR_FATTR (NFS_ATTR_FATTR_TYPE \
| NFS_ATTR_FATTR_MODE \
@@ -126,6 +128,7 @@ struct nfs_fattr {
| NFS_ATTR_FATTR_SPACE_USED)
#define NFS_ATTR_FATTR_V4 (NFS_ATTR_FATTR \
| NFS_ATTR_FATTR_SPACE_USED \
+ | NFS_ATTR_FATTR_BTIME \
| NFS_ATTR_FATTR_V4_SECURITY_LABEL)
/*
@@ -277,6 +280,7 @@ struct nfs4_layoutget {
struct nfs4_layoutget_args args;
struct nfs4_layoutget_res res;
const struct cred *cred;
+ struct pnfs_layout_hdr *lo;
gfp_t gfp_flags;
};
@@ -445,7 +449,23 @@ struct nfs42_clone_res {
struct stateowner_id {
__u64 create_time;
- __u32 uniquifier;
+ __u64 uniquifier;
+};
+
+struct nfs4_open_delegation {
+ __u32 open_delegation_type;
+ union {
+ struct {
+ fmode_t type;
+ __u32 do_recall;
+ nfs4_stateid stateid;
+ unsigned long pagemod_limit;
+ };
+ struct {
+ __u32 why_no_delegation;
+ __u32 will_notify;
+ };
+ };
};
/*
@@ -467,7 +487,7 @@ struct nfs_openargs {
nfs4_verifier verifier; /* EXCLUSIVE */
};
nfs4_stateid delegation; /* CLAIM_DELEGATE_CUR */
- fmode_t delegation_type; /* CLAIM_PREVIOUS */
+ __u32 delegation_type; /* CLAIM_PREVIOUS */
} u;
const struct qstr * name;
const struct nfs_server *server; /* Needed for ID mapping */
@@ -487,16 +507,12 @@ struct nfs_openres {
struct nfs4_change_info cinfo;
__u32 rflags;
struct nfs_fattr * f_attr;
- struct nfs4_label *f_label;
struct nfs_seqid * seqid;
const struct nfs_server *server;
- fmode_t delegation_type;
- nfs4_stateid delegation;
- unsigned long pagemod_limit;
- __u32 do_recall;
__u32 attrset[NFS4_BITMAP_SIZE];
struct nfs4_string *owner;
struct nfs4_string *group_owner;
+ struct nfs4_open_delegation delegation;
__u32 access_request;
__u32 access_supported;
__u32 access_result;
@@ -609,6 +625,13 @@ struct nfs_release_lockowner_res {
struct nfs4_sequence_res seq_res;
};
+struct nfs4_delegattr {
+ struct timespec64 atime;
+ struct timespec64 mtime;
+ bool atime_set;
+ bool mtime_set;
+};
+
struct nfs4_delegreturnargs {
struct nfs4_sequence_args seq_args;
const struct nfs_fh *fhandle;
@@ -616,6 +639,7 @@ struct nfs4_delegreturnargs {
const u32 *bitmask;
u32 bitmask_store[NFS_BITMASK_SZ];
struct nfs4_layoutreturn_args *lr_args;
+ struct nfs4_delegattr *sattr_args;
};
struct nfs4_delegreturnres {
@@ -624,6 +648,8 @@ struct nfs4_delegreturnres {
struct nfs_server *server;
struct nfs4_layoutreturn_res *lr_res;
int lr_ret;
+ bool sattr_res;
+ int sattr_ret;
};
/*
@@ -670,6 +696,7 @@ struct nfs_pgio_res {
struct {
unsigned int replen; /* used by read */
int eof; /* used by read */
+ void * scratch; /* used by read */
};
struct {
struct nfs_writeverf * verf; /* used by write */
@@ -745,14 +772,12 @@ struct nfs_auth_info {
*/
struct nfs_entry {
__u64 ino;
- __u64 cookie,
- prev_cookie;
+ __u64 cookie;
const char * name;
unsigned int len;
int eof;
struct nfs_fh * fh;
struct nfs_fattr * fattr;
- struct nfs4_label *label;
unsigned char d_type;
struct nfs_server * server;
};
@@ -802,9 +827,17 @@ struct nfs_setattrargs {
const struct nfs4_label *label;
};
+enum nfs4_acl_type {
+ NFS4ACL_NONE = 0,
+ NFS4ACL_ACL,
+ NFS4ACL_DACL,
+ NFS4ACL_SACL,
+};
+
struct nfs_setaclargs {
struct nfs4_sequence_args seq_args;
struct nfs_fh * fh;
+ enum nfs4_acl_type acl_type;
size_t acl_len;
struct page ** acl_pages;
};
@@ -816,6 +849,7 @@ struct nfs_setaclres {
struct nfs_getaclargs {
struct nfs4_sequence_args seq_args;
struct nfs_fh * fh;
+ enum nfs4_acl_type acl_type;
size_t acl_len;
struct page ** acl_pages;
};
@@ -824,16 +858,16 @@ struct nfs_getaclargs {
#define NFS4_ACL_TRUNC 0x0001 /* ACL was truncated */
struct nfs_getaclres {
struct nfs4_sequence_res seq_res;
+ enum nfs4_acl_type acl_type;
size_t acl_len;
size_t acl_data_offset;
int acl_flags;
- struct page * acl_scratch;
+ struct folio * acl_scratch;
};
struct nfs_setattrres {
struct nfs4_sequence_res seq_res;
struct nfs_fattr * fattr;
- struct nfs4_label *label;
const struct nfs_server * server;
};
@@ -1040,7 +1074,6 @@ struct nfs4_create_res {
const struct nfs_server * server;
struct nfs_fh * fh;
struct nfs_fattr * fattr;
- struct nfs4_label *label;
struct nfs4_change_info dir_cinfo;
};
@@ -1065,7 +1098,6 @@ struct nfs4_getattr_res {
struct nfs4_sequence_res seq_res;
const struct nfs_server * server;
struct nfs_fattr * fattr;
- struct nfs4_label *label;
};
struct nfs4_link_arg {
@@ -1080,7 +1112,6 @@ struct nfs4_link_res {
struct nfs4_sequence_res seq_res;
const struct nfs_server * server;
struct nfs_fattr * fattr;
- struct nfs4_label *label;
struct nfs4_change_info cinfo;
struct nfs_fattr * dir_attr;
};
@@ -1097,7 +1128,6 @@ struct nfs4_lookup_res {
const struct nfs_server * server;
struct nfs_fattr * fattr;
struct nfs_fh * fh;
- struct nfs4_label *label;
};
struct nfs4_lookupp_arg {
@@ -1111,7 +1141,6 @@ struct nfs4_lookupp_res {
const struct nfs_server *server;
struct nfs_fattr *fattr;
struct nfs_fh *fh;
- struct nfs4_label *label;
};
struct nfs4_lookup_root_arg {
@@ -1187,6 +1216,14 @@ struct nfs4_statfs_res {
struct nfs_fsstat *fsstat;
};
+struct nfs4_open_caps {
+ u32 oa_share_access[1];
+ u32 oa_share_deny[1];
+ u32 oa_share_access_want[1];
+ u32 oa_open_claim[1];
+ u32 oa_createmode[1];
+};
+
struct nfs4_server_caps_arg {
struct nfs4_sequence_args seq_args;
struct nfs_fh *fhandle;
@@ -1201,6 +1238,9 @@ struct nfs4_server_caps_res {
u32 has_links;
u32 has_symlinks;
u32 fh_expire_type;
+ u32 case_insensitive;
+ u32 case_preserving;
+ struct nfs4_open_caps open_caps;
};
#define NFS4_PATHNAME_MAXCOMPONENTS 512
@@ -1218,7 +1258,7 @@ struct nfs4_fs_location {
#define NFS4_FS_LOCATIONS_MAXENTRIES 10
struct nfs4_fs_locations {
- struct nfs_fattr fattr;
+ struct nfs_fattr *fattr;
const struct nfs_server *server;
struct nfs4_pathname fs_path;
int nlocations;
@@ -1278,11 +1318,6 @@ struct nfs4_fsid_present_res {
#endif /* CONFIG_NFS_V4 */
-struct nfstime4 {
- u64 seconds;
- u32 nseconds;
-};
-
#ifdef CONFIG_NFS_V4_1
struct pnfs_commit_bucket {
@@ -1299,7 +1334,7 @@ struct pnfs_commit_array {
struct rcu_head rcu;
refcount_t refcount;
unsigned int nbuckets;
- struct pnfs_commit_bucket buckets[];
+ struct pnfs_commit_bucket buckets[] __counted_by(nbuckets);
};
struct pnfs_ds_commit_info {
@@ -1401,7 +1436,7 @@ struct nfs41_secinfo_no_name_args {
struct nfs41_test_stateid_args {
struct nfs4_sequence_args seq_args;
- nfs4_stateid *stateid;
+ nfs4_stateid stateid;
};
struct nfs41_test_stateid_res {
@@ -1483,8 +1518,9 @@ struct nfs42_offload_status_args {
struct nfs42_offload_status_res {
struct nfs4_sequence_res osr_seq_res;
- uint64_t osr_count;
- int osr_status;
+ u64 osr_count;
+ int complete_count;
+ u32 osr_complete;
};
struct nfs42_copy_notify_args {
@@ -1523,6 +1559,7 @@ struct nfs42_seek_res {
struct nfs42_setxattrargs {
struct nfs4_sequence_args seq_args;
struct nfs_fh *fh;
+ const u32 *bitmask;
const char *xattr_name;
u32 xattr_flags;
size_t xattr_len;
@@ -1532,6 +1569,8 @@ struct nfs42_setxattrargs {
struct nfs42_setxattrres {
struct nfs4_sequence_res seq_res;
struct nfs4_change_info cinfo;
+ struct nfs_fattr *fattr;
+ const struct nfs_server *server;
};
struct nfs42_getxattrargs {
@@ -1557,7 +1596,7 @@ struct nfs42_listxattrsargs {
struct nfs42_listxattrsres {
struct nfs4_sequence_res seq_res;
- struct page *scratch;
+ struct folio *scratch;
void *xattr_buf;
size_t xattr_len;
u64 cookie;
@@ -1596,6 +1635,8 @@ enum {
NFS_IOHDR_STAT,
NFS_IOHDR_RESEND_PNFS,
NFS_IOHDR_RESEND_MDS,
+ NFS_IOHDR_UNSTABLE_WRITES,
+ NFS_IOHDR_ODIRECT,
};
struct nfs_io_completion;
@@ -1614,7 +1655,11 @@ struct nfs_pgio_header {
const struct nfs_rw_ops *rw_ops;
struct nfs_io_completion *io_completion;
struct nfs_direct_req *dreq;
+#ifdef CONFIG_NFS_FSCACHE
+ void *netfs;
+#endif
+ unsigned short retrans;
int pnfs_error;
int error; /* merge with pnfs_error */
unsigned int good_bytes; /* boundary of good data */
@@ -1700,6 +1745,7 @@ struct nfs_unlinkdata {
struct nfs_renamedata {
struct nfs_renameargs args;
struct nfs_renameres res;
+ struct rpc_task task;
const struct cred *cred;
struct inode *old_dir;
struct dentry *old_dentry;
@@ -1737,16 +1783,14 @@ struct nfs_rpc_ops {
int (*submount) (struct fs_context *, struct nfs_server *);
int (*try_get_tree) (struct fs_context *);
int (*getattr) (struct nfs_server *, struct nfs_fh *,
- struct nfs_fattr *, struct nfs4_label *,
- struct inode *);
+ struct nfs_fattr *, struct inode *);
int (*setattr) (struct dentry *, struct nfs_fattr *,
struct iattr *);
- int (*lookup) (struct inode *, struct dentry *,
- struct nfs_fh *, struct nfs_fattr *,
- struct nfs4_label *);
+ int (*lookup) (struct inode *, struct dentry *, const struct qstr *,
+ struct nfs_fh *, struct nfs_fattr *);
int (*lookupp) (struct inode *, struct nfs_fh *,
- struct nfs_fattr *, struct nfs4_label *);
- int (*access) (struct inode *, struct nfs_access_entry *);
+ struct nfs_fattr *);
+ int (*access) (struct inode *, struct nfs_access_entry *, const struct cred *);
int (*readlink)(struct inode *, struct page *, unsigned int,
unsigned int);
int (*create) (struct inode *, struct dentry *,
@@ -1761,9 +1805,9 @@ struct nfs_rpc_ops {
void (*rename_rpc_prepare)(struct rpc_task *task, struct nfs_renamedata *);
int (*rename_done) (struct rpc_task *task, struct inode *old_dir, struct inode *new_dir);
int (*link) (struct inode *, struct inode *, const struct qstr *);
- int (*symlink) (struct inode *, struct dentry *, struct page *,
+ int (*symlink) (struct inode *, struct dentry *, struct folio *,
unsigned int, struct iattr *);
- int (*mkdir) (struct inode *, struct dentry *, struct iattr *);
+ struct dentry *(*mkdir) (struct inode *, struct dentry *, struct iattr *);
int (*rmdir) (struct inode *, const struct qstr *);
int (*readdir) (struct nfs_readdir_arg *, struct nfs_readdir_res *);
int (*mknod) (struct inode *, struct dentry *, struct iattr *,
@@ -1796,7 +1840,8 @@ struct nfs_rpc_ops {
int open_flags,
struct iattr *iattr,
int *);
- int (*have_delegation)(struct inode *, fmode_t);
+ int (*have_delegation)(struct inode *, fmode_t, int);
+ int (*return_delegation)(struct inode *);
struct nfs_client *(*alloc_client) (const struct nfs_client_initdata *);
struct nfs_client *(*init_client) (struct nfs_client *,
const struct nfs_client_initdata *);
@@ -1804,14 +1849,28 @@ struct nfs_rpc_ops {
struct nfs_server *(*create_server)(struct fs_context *);
struct nfs_server *(*clone_server)(struct nfs_server *, struct nfs_fh *,
struct nfs_fattr *, rpc_authflavor_t);
+ int (*discover_trunking)(struct nfs_server *, struct nfs_fh *);
+ void (*enable_swap)(struct inode *inode);
+ void (*disable_swap)(struct inode *inode);
};
/*
- * NFS_CALL(getattr, inode, (fattr));
- * into
- * NFS_PROTO(inode)->getattr(fattr);
+ * Helper functions used by NFS client and/or server
*/
-#define NFS_CALL(op, inode, args) NFS_PROTO(inode)->op args
+static inline void encode_opaque_fixed(struct xdr_stream *xdr,
+ const void *buf, size_t len)
+{
+ WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0);
+}
+
+static inline int decode_opaque_fixed(struct xdr_stream *xdr,
+ void *buf, size_t len)
+{
+ ssize_t ret = xdr_stream_decode_opaque_fixed(xdr, buf, len);
+ if (unlikely(ret < 0))
+ return -EIO;
+ return 0;
+}
/*
* Function vectors etc. for the NFS client
@@ -1826,4 +1885,4 @@ extern const struct rpc_version nfs_version4;
extern const struct rpc_version nfsacl_version3;
extern const struct rpc_program nfsacl_program;
-#endif
+#endif /* _LINUX_NFS_XDR_H */
diff --git a/include/linux/nfslocalio.h b/include/linux/nfslocalio.h
new file mode 100644
index 000000000000..3d91043254e6
--- /dev/null
+++ b/include/linux/nfslocalio.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2024 Mike Snitzer <snitzer@hammerspace.com>
+ * Copyright (C) 2024 NeilBrown <neilb@suse.de>
+ */
+#ifndef __LINUX_NFSLOCALIO_H
+#define __LINUX_NFSLOCALIO_H
+
+#if IS_ENABLED(CONFIG_NFS_LOCALIO)
+
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/uuid.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/svcauth.h>
+#include <linux/nfs.h>
+#include <net/net_namespace.h>
+
+struct nfs_client;
+struct nfs_file_localio;
+
+/*
+ * Useful to allow a client to negotiate if localio
+ * possible with its server.
+ *
+ * See Documentation/filesystems/nfs/localio.rst for more detail.
+ */
+typedef struct {
+ uuid_t uuid;
+ unsigned nfs3_localio_probe_count;
+ /* this struct is over a cacheline, avoid bouncing */
+ spinlock_t ____cacheline_aligned lock;
+ struct list_head list;
+ spinlock_t *list_lock; /* nn->local_clients_lock */
+ struct net __rcu *net; /* nfsd's network namespace */
+ struct auth_domain *dom; /* auth_domain for localio */
+ /* Local files to close when net is shut down or exports change */
+ struct list_head files;
+} nfs_uuid_t;
+
+void nfs_uuid_init(nfs_uuid_t *);
+bool nfs_uuid_begin(nfs_uuid_t *);
+void nfs_uuid_end(nfs_uuid_t *);
+void nfs_uuid_is_local(const uuid_t *, struct list_head *, spinlock_t *,
+ struct net *, struct auth_domain *, struct module *);
+
+void nfs_localio_enable_client(struct nfs_client *clp);
+void nfs_localio_disable_client(struct nfs_client *clp);
+void nfs_localio_invalidate_clients(struct list_head *nn_local_clients,
+ spinlock_t *nn_local_clients_lock);
+
+/* localio needs to map filehandle -> struct nfsd_file */
+void nfs_close_local_fh(struct nfs_file_localio *);
+
+struct nfsd_localio_operations {
+ bool (*nfsd_net_try_get)(struct net *);
+ void (*nfsd_net_put)(struct net *);
+ struct nfsd_file *(*nfsd_open_local_fh)(struct net *,
+ struct auth_domain *,
+ struct rpc_clnt *,
+ const struct cred *,
+ const struct nfs_fh *,
+ struct nfsd_file __rcu **pnf,
+ const fmode_t);
+ struct net *(*nfsd_file_put_local)(struct nfsd_file __rcu **);
+ struct file *(*nfsd_file_file)(struct nfsd_file *);
+ void (*nfsd_file_dio_alignment)(struct nfsd_file *,
+ u32 *, u32 *, u32 *);
+} ____cacheline_aligned;
+
+extern void nfsd_localio_ops_init(void);
+extern const struct nfsd_localio_operations *nfs_to;
+
+struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *,
+ struct rpc_clnt *, const struct cred *,
+ const struct nfs_fh *, struct nfs_file_localio *,
+ struct nfsd_file __rcu **pnf,
+ const fmode_t);
+
+static inline void nfs_to_nfsd_net_put(struct net *net)
+{
+ /*
+ * Once reference to net (and associated nfsd_serv) is dropped, NFSD
+ * could be unloaded, so ensure safe return from nfsd_net_put() by
+ * always taking RCU.
+ */
+ rcu_read_lock();
+ nfs_to->nfsd_net_put(net);
+ rcu_read_unlock();
+}
+
+static inline void nfs_to_nfsd_file_put_local(struct nfsd_file __rcu **localio)
+{
+ /*
+ * Either *localio must be guaranteed to be non-NULL, or caller
+ * must prevent nfsd shutdown from completing as nfs_close_local_fh()
+ * does by blocking the nfs_uuid from being finally put.
+ */
+ struct net *net;
+
+ net = nfs_to->nfsd_file_put_local(localio);
+
+ if (net)
+ nfs_to_nfsd_net_put(net);
+}
+
+#else /* CONFIG_NFS_LOCALIO */
+
+struct nfs_file_localio;
+static inline void nfs_close_local_fh(struct nfs_file_localio *nfl)
+{
+}
+static inline void nfsd_localio_ops_init(void)
+{
+}
+struct nfs_client;
+static inline void nfs_localio_disable_client(struct nfs_client *clp)
+{
+}
+
+#endif /* CONFIG_NFS_LOCALIO */
+
+#endif /* __LINUX_NFSLOCALIO_H */
diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h
index b22782225f27..cbe5fd1dd2e7 100644
--- a/include/linux/nl802154.h
+++ b/include/linux/nl802154.h
@@ -8,6 +8,8 @@
#ifndef NL802154_H
#define NL802154_H
+#include <net/netlink.h>
+
#define IEEE802154_NL_NAME "802.15.4 MAC"
#define IEEE802154_MCAST_COORD_NAME "coordinator"
#define IEEE802154_MCAST_BEACON_NAME "beacon"
diff --git a/include/linux/nls.h b/include/linux/nls.h
index 499e486b3722..e0bf8367b274 100644
--- a/include/linux/nls.h
+++ b/include/linux/nls.h
@@ -47,7 +47,7 @@ enum utf16_endian {
/* nls_base.c */
extern int __register_nls(struct nls_table *, struct module *);
extern int unregister_nls(struct nls_table *);
-extern struct nls_table *load_nls(char *);
+extern struct nls_table *load_nls(const char *charset);
extern void unload_nls(struct nls_table *);
extern struct nls_table *load_nls_default(void);
#define register_nls(nls) __register_nls((nls), THIS_MODULE)
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 750c7f395ca9..cf3c6ab408aa 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -7,19 +7,18 @@
#include <linux/sched.h>
#include <asm/irq.h>
-#if defined(CONFIG_HAVE_NMI_WATCHDOG)
+
+/* Arch specific watchdogs might need to share extra watchdog-related APIs. */
+#if defined(CONFIG_HARDLOCKUP_DETECTOR_ARCH) || defined(CONFIG_HARDLOCKUP_DETECTOR_SPARC64)
#include <asm/nmi.h>
#endif
#ifdef CONFIG_LOCKUP_DETECTOR
void lockup_detector_init(void);
+void lockup_detector_retry_init(void);
void lockup_detector_soft_poweroff(void);
-void lockup_detector_cleanup(void);
-bool is_hardlockup(void);
extern int watchdog_user_enabled;
-extern int nmi_watchdog_user_enabled;
-extern int soft_watchdog_user_enabled;
extern int watchdog_thresh;
extern unsigned long watchdog_enabled;
@@ -35,8 +34,8 @@ extern int sysctl_hardlockup_all_cpu_backtrace;
#else /* CONFIG_LOCKUP_DETECTOR */
static inline void lockup_detector_init(void) { }
+static inline void lockup_detector_retry_init(void) { }
static inline void lockup_detector_soft_poweroff(void) { }
-static inline void lockup_detector_cleanup(void) { }
#endif /* !CONFIG_LOCKUP_DETECTOR */
#ifdef CONFIG_SOFTLOCKUP_DETECTOR
@@ -69,17 +68,17 @@ static inline void reset_hung_task_detector(void) { }
* 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
* bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
*
- * 'watchdog_user_enabled', 'nmi_watchdog_user_enabled' and
- * 'soft_watchdog_user_enabled' are variables that are only used as an
+ * 'watchdog_user_enabled', 'watchdog_hardlockup_user_enabled' and
+ * 'watchdog_softlockup_user_enabled' are variables that are only used as an
* 'interface' between the parameters in /proc/sys/kernel and the internal
* state bits in 'watchdog_enabled'. The 'watchdog_thresh' variable is
* handled differently because its value is not boolean, and the lockup
* detectors are 'suspended' while 'watchdog_thresh' is equal zero.
*/
-#define NMI_WATCHDOG_ENABLED_BIT 0
-#define SOFT_WATCHDOG_ENABLED_BIT 1
-#define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT)
-#define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT)
+#define WATCHDOG_HARDLOCKUP_ENABLED_BIT 0
+#define WATCHDOG_SOFTOCKUP_ENABLED_BIT 1
+#define WATCHDOG_HARDLOCKUP_ENABLED (1 << WATCHDOG_HARDLOCKUP_ENABLED_BIT)
+#define WATCHDOG_SOFTOCKUP_ENABLED (1 << WATCHDOG_SOFTOCKUP_ENABLED_BIT)
#if defined(CONFIG_HARDLOCKUP_DETECTOR)
extern void hardlockup_detector_disable(void);
@@ -88,50 +87,65 @@ extern unsigned int hardlockup_panic;
static inline void hardlockup_detector_disable(void) {}
#endif
-#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
-# define NMI_WATCHDOG_SYSCTL_PERM 0644
+/* Sparc64 has special implemetantion that is always enabled. */
+#if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HARDLOCKUP_DETECTOR_SPARC64)
+void arch_touch_nmi_watchdog(void);
#else
-# define NMI_WATCHDOG_SYSCTL_PERM 0444
+static inline void arch_touch_nmi_watchdog(void) { }
+#endif
+
+#if defined(CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER)
+void watchdog_hardlockup_touch_cpu(unsigned int cpu);
+void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs);
#endif
#if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
-extern void arch_touch_nmi_watchdog(void);
extern void hardlockup_detector_perf_stop(void);
extern void hardlockup_detector_perf_restart(void);
-extern void hardlockup_detector_perf_disable(void);
-extern void hardlockup_detector_perf_enable(void);
-extern void hardlockup_detector_perf_cleanup(void);
-extern int hardlockup_detector_perf_init(void);
+extern void hardlockup_config_perf_event(const char *str);
+extern void hardlockup_detector_perf_adjust_period(u64 period);
#else
static inline void hardlockup_detector_perf_stop(void) { }
static inline void hardlockup_detector_perf_restart(void) { }
-static inline void hardlockup_detector_perf_disable(void) { }
-static inline void hardlockup_detector_perf_enable(void) { }
-static inline void hardlockup_detector_perf_cleanup(void) { }
-# if !defined(CONFIG_HAVE_NMI_WATCHDOG)
-static inline int hardlockup_detector_perf_init(void) { return -ENODEV; }
-static inline void arch_touch_nmi_watchdog(void) {}
-# else
-static inline int hardlockup_detector_perf_init(void) { return 0; }
-# endif
+static inline void hardlockup_config_perf_event(const char *str) { }
+static inline void hardlockup_detector_perf_adjust_period(u64 period) { }
#endif
-void watchdog_nmi_stop(void);
-void watchdog_nmi_start(void);
-int watchdog_nmi_probe(void);
-int watchdog_nmi_enable(unsigned int cpu);
-void watchdog_nmi_disable(unsigned int cpu);
+void watchdog_hardlockup_stop(void);
+void watchdog_hardlockup_start(void);
+int watchdog_hardlockup_probe(void);
+void watchdog_hardlockup_enable(unsigned int cpu);
+void watchdog_hardlockup_disable(unsigned int cpu);
+
+void lockup_detector_reconfigure(void);
+
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_BUDDY
+void watchdog_buddy_check_hardlockup(int hrtimer_interrupts);
+#else
+static inline void watchdog_buddy_check_hardlockup(int hrtimer_interrupts) {}
+#endif
/**
- * touch_nmi_watchdog - restart NMI watchdog timeout.
+ * touch_nmi_watchdog - manually reset the hardlockup watchdog timeout.
*
- * If the architecture supports the NMI watchdog, touch_nmi_watchdog()
- * may be used to reset the timeout - for code which intentionally
- * disables interrupts for a long time. This call is stateless.
+ * If we support detecting hardlockups, touch_nmi_watchdog() may be
+ * used to pet the watchdog (reset the timeout) - for code which
+ * intentionally disables interrupts for a long time. This call is stateless.
+ *
+ * Though this function has "nmi" in the name, the hardlockup watchdog might
+ * not be backed by NMIs. This function will likely be renamed to
+ * touch_hardlockup_watchdog() in the future.
*/
static inline void touch_nmi_watchdog(void)
{
+ /*
+ * Pass on to the hardlockup detector selected via CONFIG_. Note that
+ * the hardlockup detector may not be arch-specific nor using NMIs
+ * and the arch_touch_nmi_watchdog() function will likely be renamed
+ * in the future.
+ */
arch_touch_nmi_watchdog();
+
touch_softlockup_watchdog();
}
@@ -143,31 +157,31 @@ static inline void touch_nmi_watchdog(void)
#ifdef arch_trigger_cpumask_backtrace
static inline bool trigger_all_cpu_backtrace(void)
{
- arch_trigger_cpumask_backtrace(cpu_online_mask, false);
+ arch_trigger_cpumask_backtrace(cpu_online_mask, -1);
return true;
}
-static inline bool trigger_allbutself_cpu_backtrace(void)
+static inline bool trigger_allbutcpu_cpu_backtrace(int exclude_cpu)
{
- arch_trigger_cpumask_backtrace(cpu_online_mask, true);
+ arch_trigger_cpumask_backtrace(cpu_online_mask, exclude_cpu);
return true;
}
static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
{
- arch_trigger_cpumask_backtrace(mask, false);
+ arch_trigger_cpumask_backtrace(mask, -1);
return true;
}
static inline bool trigger_single_cpu_backtrace(int cpu)
{
- arch_trigger_cpumask_backtrace(cpumask_of(cpu), false);
+ arch_trigger_cpumask_backtrace(cpumask_of(cpu), -1);
return true;
}
/* generic implementation */
void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
- bool exclude_self,
+ int exclude_cpu,
void (*raise)(cpumask_t *mask));
bool nmi_cpu_backtrace(struct pt_regs *regs);
@@ -176,7 +190,7 @@ static inline bool trigger_all_cpu_backtrace(void)
{
return false;
}
-static inline bool trigger_allbutself_cpu_backtrace(void)
+static inline bool trigger_allbutcpu_cpu_backtrace(int exclude_cpu)
{
return false;
}
@@ -192,24 +206,26 @@ static inline bool trigger_single_cpu_backtrace(int cpu)
#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
u64 hw_nmi_get_sample_period(int watchdog_thresh);
+bool arch_perf_nmi_is_available(void);
#endif
#if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
- defined(CONFIG_HARDLOCKUP_DETECTOR)
+ defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
void watchdog_update_hrtimer_threshold(u64 period);
#else
static inline void watchdog_update_hrtimer_threshold(u64 period) { }
#endif
-struct ctl_table;
-int proc_watchdog(struct ctl_table *, int, void *, size_t *, loff_t *);
-int proc_nmi_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *);
-int proc_soft_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *);
-int proc_watchdog_thresh(struct ctl_table *, int , void *, size_t *, loff_t *);
-int proc_watchdog_cpumask(struct ctl_table *, int, void *, size_t *, loff_t *);
-
#ifdef CONFIG_HAVE_ACPI_APEI_NMI
#include <asm/nmi.h>
#endif
+#ifdef CONFIG_NMI_CHECK_CPU
+void nmi_backtrace_stall_snap(const struct cpumask *btp);
+void nmi_backtrace_stall_check(const struct cpumask *btp);
+#else
+static inline void nmi_backtrace_stall_snap(const struct cpumask *btp) {}
+static inline void nmi_backtrace_stall_check(const struct cpumask *btp) {}
+#endif
+
#endif
diff --git a/include/linux/node.h b/include/linux/node.h
index 8e5a29897936..0269b064ba65 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -2,39 +2,49 @@
/*
* include/linux/node.h - generic node definition
*
- * This is mainly for topological representation. We define the
- * basic 'struct node' here, which can be embedded in per-arch
+ * This is mainly for topological representation. We define the
+ * basic 'struct node' here, which can be embedded in per-arch
* definitions of processors.
*
* Basic handling of the devices is done in drivers/base/node.c
- * and system devices are handled in drivers/base/sys.c.
+ * and system devices are handled in drivers/base/sys.c.
*
* Nodes are exported via driverfs in the class/node/devices/
- * directory.
+ * directory.
*/
#ifndef _LINUX_NODE_H_
#define _LINUX_NODE_H_
#include <linux/device.h>
-#include <linux/cpumask.h>
#include <linux/list.h>
-#include <linux/workqueue.h>
/**
- * struct node_hmem_attrs - heterogeneous memory performance attributes
+ * struct access_coordinate - generic performance coordinates container
*
* @read_bandwidth: Read bandwidth in MB/s
* @write_bandwidth: Write bandwidth in MB/s
* @read_latency: Read latency in nanoseconds
* @write_latency: Write latency in nanoseconds
*/
-struct node_hmem_attrs {
+struct access_coordinate {
unsigned int read_bandwidth;
unsigned int write_bandwidth;
unsigned int read_latency;
unsigned int write_latency;
};
+/*
+ * ACCESS_COORDINATE_LOCAL correlates to ACCESS CLASS 0
+ * - access_coordinate between target node and nearest initiator node
+ * ACCESS_COORDINATE_CPU correlates to ACCESS CLASS 1
+ * - access_coordinate between target node and nearest CPU node
+ */
+enum access_coordinate_class {
+ ACCESS_COORDINATE_LOCAL,
+ ACCESS_COORDINATE_CPU,
+ ACCESS_COORDINATE_MAX
+};
+
enum cache_indexing {
NODE_CACHE_DIRECT_MAP,
NODE_CACHE_INDEXED,
@@ -47,6 +57,11 @@ enum cache_write_policy {
NODE_CACHE_WRITE_OTHER,
};
+enum cache_mode {
+ NODE_CACHE_ADDR_MODE_RESERVED,
+ NODE_CACHE_ADDR_MODE_EXTENDED_LINEAR,
+};
+
/**
* struct node_cache_attrs - system memory caching attributes
*
@@ -55,6 +70,7 @@ enum cache_write_policy {
* @size: Total size of cache in bytes
* @line_size: Number of bytes fetched on a cache miss
* @level: The cache hierarchy level
+ * @address_mode: The address mode
*/
struct node_cache_attrs {
enum cache_indexing indexing;
@@ -62,12 +78,15 @@ struct node_cache_attrs {
u64 size;
u16 line_size;
u8 level;
+ u16 address_mode;
};
#ifdef CONFIG_HMEM_REPORTING
void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs);
-void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs,
- unsigned access);
+void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord,
+ enum access_coordinate_class access);
+void node_update_perf_attrs(unsigned int nid, struct access_coordinate *coord,
+ enum access_coordinate_class access);
#else
static inline void node_add_cache(unsigned int nid,
struct node_cache_attrs *cache_attrs)
@@ -75,8 +94,14 @@ static inline void node_add_cache(unsigned int nid,
}
static inline void node_set_perf_attrs(unsigned int nid,
- struct node_hmem_attrs *hmem_attrs,
- unsigned access)
+ struct access_coordinate *coord,
+ enum access_coordinate_class access)
+{
+}
+
+static inline void node_update_perf_attrs(unsigned int nid,
+ struct access_coordinate *coord,
+ enum access_coordinate_class access)
{
}
#endif
@@ -84,10 +109,6 @@ static inline void node_set_perf_attrs(unsigned int nid,
struct node {
struct device dev;
struct list_head access_list;
-
-#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS)
- struct work_struct node_work;
-#endif
#ifdef CONFIG_HMEM_REPORTING
struct list_head cache_attrs;
struct device *cache_dev;
@@ -96,68 +117,81 @@ struct node {
struct memory_block;
extern struct node *node_devices[];
-typedef void (*node_registration_func_t)(struct node *);
-#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA)
-void link_mem_sections(int nid, unsigned long start_pfn,
- unsigned long end_pfn,
- enum meminit_context context);
+#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_NUMA)
+void register_memory_blocks_under_node_hotplug(int nid, unsigned long start_pfn,
+ unsigned long end_pfn);
#else
-static inline void link_mem_sections(int nid, unsigned long start_pfn,
- unsigned long end_pfn,
- enum meminit_context context)
+static inline void register_memory_blocks_under_node_hotplug(int nid,
+ unsigned long start_pfn,
+ unsigned long end_pfn)
+{
+}
+static inline void register_memory_blocks_under_nodes(void)
{
}
#endif
-extern void unregister_node(struct node *node);
-#ifdef CONFIG_NUMA
-/* Core of the node registration - only memory hotplug should use this */
-extern int __register_one_node(int nid);
+struct node_notify {
+ int nid;
+};
-/* Registers an online node */
-static inline int register_one_node(int nid)
+#define NODE_ADDING_FIRST_MEMORY (1<<0)
+#define NODE_ADDED_FIRST_MEMORY (1<<1)
+#define NODE_CANCEL_ADDING_FIRST_MEMORY (1<<2)
+#define NODE_REMOVING_LAST_MEMORY (1<<3)
+#define NODE_REMOVED_LAST_MEMORY (1<<4)
+#define NODE_CANCEL_REMOVING_LAST_MEMORY (1<<5)
+
+#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_NUMA)
+extern int register_node_notifier(struct notifier_block *nb);
+extern void unregister_node_notifier(struct notifier_block *nb);
+extern int node_notify(unsigned long val, void *v);
+
+#define hotplug_node_notifier(fn, pri) ({ \
+ static __meminitdata struct notifier_block fn##_node_nb =\
+ { .notifier_call = fn, .priority = pri };\
+ register_node_notifier(&fn##_node_nb); \
+})
+#else
+static inline int register_node_notifier(struct notifier_block *nb)
{
- int error = 0;
-
- if (node_online(nid)) {
- struct pglist_data *pgdat = NODE_DATA(nid);
- unsigned long start_pfn = pgdat->node_start_pfn;
- unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
-
- error = __register_one_node(nid);
- if (error)
- return error;
- /* link memory sections under this node */
- link_mem_sections(nid, start_pfn, end_pfn, MEMINIT_EARLY);
- }
-
- return error;
+ return 0;
+}
+static inline void unregister_node_notifier(struct notifier_block *nb)
+{
+}
+static inline int node_notify(unsigned long val, void *v)
+{
+ return 0;
+}
+static inline int hotplug_node_notifier(notifier_fn_t fn, int pri)
+{
+ return 0;
}
+#endif
-extern void unregister_one_node(int nid);
+#ifdef CONFIG_NUMA
+extern void node_dev_init(void);
+/* Core of the node registration - only memory hotplug should use this */
+int register_node(int nid);
+void unregister_node(int nid);
extern int register_cpu_under_node(unsigned int cpu, unsigned int nid);
extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid);
extern void unregister_memory_block_under_nodes(struct memory_block *mem_blk);
extern int register_memory_node_under_compute_node(unsigned int mem_nid,
unsigned int cpu_nid,
- unsigned access);
-
-#ifdef CONFIG_HUGETLBFS
-extern void register_hugetlbfs_with_node(node_registration_func_t doregister,
- node_registration_func_t unregister);
-#endif
+ enum access_coordinate_class access);
#else
-static inline int __register_one_node(int nid)
+static inline void node_dev_init(void)
{
- return 0;
}
-static inline int register_one_node(int nid)
+static inline int register_node(int nid)
{
return 0;
}
-static inline int unregister_one_node(int nid)
+static inline int unregister_node(int nid)
{
return 0;
}
@@ -172,11 +206,6 @@ static inline int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
static inline void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
{
}
-
-static inline void register_hugetlbfs_with_node(node_registration_func_t reg,
- node_registration_func_t unreg)
-{
-}
#endif
#define to_node(device) container_of(device, struct node, dev)
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index ac398e143c9a..bd38648c998d 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -39,14 +39,11 @@
* int nodes_full(mask) Is mask full (all bits sets)?
* int nodes_weight(mask) Hamming weight - number of set bits
*
- * void nodes_shift_right(dst, src, n) Shift right
- * void nodes_shift_left(dst, src, n) Shift left
- *
- * int first_node(mask) Number lowest set bit, or MAX_NUMNODES
- * int next_node(node, mask) Next node past 'node', or MAX_NUMNODES
- * int next_node_in(node, mask) Next node past 'node', or wrap to first,
+ * unsigned int first_node(mask) Number lowest set bit, or MAX_NUMNODES
+ * unsigend int next_node(node, mask) Next node past 'node', or MAX_NUMNODES
+ * unsigned int next_node_in(node, mask) Next node past 'node', or wrap to first,
* or MAX_NUMNODES
- * int first_unset_node(mask) First node not set in mask, or
+ * unsigned int first_unset_node(mask) First node not set in mask, or
* MAX_NUMNODES
*
* nodemask_t nodemask_of_node(node) Return nodemask with bit 'node' set
@@ -93,9 +90,9 @@
#include <linux/threads.h>
#include <linux/bitmap.h>
#include <linux/minmax.h>
-#include <linux/numa.h>
+#include <linux/nodemask_types.h>
+#include <linux/random.h>
-typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t;
extern nodemask_t _unused_nodemask_arg_;
/**
@@ -106,11 +103,11 @@ extern nodemask_t _unused_nodemask_arg_;
*/
#define nodemask_pr_args(maskp) __nodemask_pr_numnodes(maskp), \
__nodemask_pr_bits(maskp)
-static inline unsigned int __nodemask_pr_numnodes(const nodemask_t *m)
+static __always_inline unsigned int __nodemask_pr_numnodes(const nodemask_t *m)
{
return m ? MAX_NUMNODES : 0;
}
-static inline const unsigned long *__nodemask_pr_bits(const nodemask_t *m)
+static __always_inline const unsigned long *__nodemask_pr_bits(const nodemask_t *m)
{
return m ? m->bits : NULL;
}
@@ -119,7 +116,7 @@ static inline const unsigned long *__nodemask_pr_bits(const nodemask_t *m)
* The inline keyword gives the compiler room to decide to inline, or
* not inline a function as it sees best. However, as these functions
* are called in both __init and non-__init functions, if they are not
- * inlined we will end up with a section mis-match error (of the type of
+ * inlined we will end up with a section mismatch error (of the type of
* freeable items not being freed). So we must use __always_inline here
* to fix the problem. If other functions in the future also end up in
* this situation they will also need to be annotated as __always_inline
@@ -131,19 +128,19 @@ static __always_inline void __node_set(int node, volatile nodemask_t *dstp)
}
#define node_clear(node, dst) __node_clear((node), &(dst))
-static inline void __node_clear(int node, volatile nodemask_t *dstp)
+static __always_inline void __node_clear(int node, volatile nodemask_t *dstp)
{
clear_bit(node, dstp->bits);
}
#define nodes_setall(dst) __nodes_setall(&(dst), MAX_NUMNODES)
-static inline void __nodes_setall(nodemask_t *dstp, unsigned int nbits)
+static __always_inline void __nodes_setall(nodemask_t *dstp, unsigned int nbits)
{
bitmap_fill(dstp->bits, nbits);
}
#define nodes_clear(dst) __nodes_clear(&(dst), MAX_NUMNODES)
-static inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits)
+static __always_inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits)
{
bitmap_zero(dstp->bits, nbits);
}
@@ -153,14 +150,14 @@ static inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits)
#define node_test_and_set(node, nodemask) \
__node_test_and_set((node), &(nodemask))
-static inline int __node_test_and_set(int node, nodemask_t *addr)
+static __always_inline bool __node_test_and_set(int node, nodemask_t *addr)
{
return test_and_set_bit(node, addr->bits);
}
#define nodes_and(dst, src1, src2) \
__nodes_and(&(dst), &(src1), &(src2), MAX_NUMNODES)
-static inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
+static __always_inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
@@ -168,7 +165,7 @@ static inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
#define nodes_or(dst, src1, src2) \
__nodes_or(&(dst), &(src1), &(src2), MAX_NUMNODES)
-static inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
+static __always_inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
@@ -176,7 +173,7 @@ static inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
#define nodes_xor(dst, src1, src2) \
__nodes_xor(&(dst), &(src1), &(src2), MAX_NUMNODES)
-static inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
+static __always_inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
@@ -184,15 +181,22 @@ static inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
#define nodes_andnot(dst, src1, src2) \
__nodes_andnot(&(dst), &(src1), &(src2), MAX_NUMNODES)
-static inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
+static __always_inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
}
+#define nodes_copy(dst, src) __nodes_copy(&(dst), &(src), MAX_NUMNODES)
+static __always_inline void __nodes_copy(nodemask_t *dstp,
+ const nodemask_t *srcp, unsigned int nbits)
+{
+ bitmap_copy(dstp->bits, srcp->bits, nbits);
+}
+
#define nodes_complement(dst, src) \
__nodes_complement(&(dst), &(src), MAX_NUMNODES)
-static inline void __nodes_complement(nodemask_t *dstp,
+static __always_inline void __nodes_complement(nodemask_t *dstp,
const nodemask_t *srcp, unsigned int nbits)
{
bitmap_complement(dstp->bits, srcp->bits, nbits);
@@ -200,7 +204,7 @@ static inline void __nodes_complement(nodemask_t *dstp,
#define nodes_equal(src1, src2) \
__nodes_equal(&(src1), &(src2), MAX_NUMNODES)
-static inline int __nodes_equal(const nodemask_t *src1p,
+static __always_inline bool __nodes_equal(const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_equal(src1p->bits, src2p->bits, nbits);
@@ -208,7 +212,7 @@ static inline int __nodes_equal(const nodemask_t *src1p,
#define nodes_intersects(src1, src2) \
__nodes_intersects(&(src1), &(src2), MAX_NUMNODES)
-static inline int __nodes_intersects(const nodemask_t *src1p,
+static __always_inline bool __nodes_intersects(const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_intersects(src1p->bits, src2p->bits, nbits);
@@ -216,59 +220,43 @@ static inline int __nodes_intersects(const nodemask_t *src1p,
#define nodes_subset(src1, src2) \
__nodes_subset(&(src1), &(src2), MAX_NUMNODES)
-static inline int __nodes_subset(const nodemask_t *src1p,
+static __always_inline bool __nodes_subset(const nodemask_t *src1p,
const nodemask_t *src2p, unsigned int nbits)
{
return bitmap_subset(src1p->bits, src2p->bits, nbits);
}
#define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES)
-static inline int __nodes_empty(const nodemask_t *srcp, unsigned int nbits)
+static __always_inline bool __nodes_empty(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_empty(srcp->bits, nbits);
}
#define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES)
-static inline int __nodes_full(const nodemask_t *srcp, unsigned int nbits)
+static __always_inline bool __nodes_full(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_full(srcp->bits, nbits);
}
#define nodes_weight(nodemask) __nodes_weight(&(nodemask), MAX_NUMNODES)
-static inline int __nodes_weight(const nodemask_t *srcp, unsigned int nbits)
+static __always_inline int __nodes_weight(const nodemask_t *srcp, unsigned int nbits)
{
return bitmap_weight(srcp->bits, nbits);
}
-#define nodes_shift_right(dst, src, n) \
- __nodes_shift_right(&(dst), &(src), (n), MAX_NUMNODES)
-static inline void __nodes_shift_right(nodemask_t *dstp,
- const nodemask_t *srcp, int n, int nbits)
-{
- bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
-}
-
-#define nodes_shift_left(dst, src, n) \
- __nodes_shift_left(&(dst), &(src), (n), MAX_NUMNODES)
-static inline void __nodes_shift_left(nodemask_t *dstp,
- const nodemask_t *srcp, int n, int nbits)
-{
- bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
-}
-
/* FIXME: better would be to fix all architectures to never return
- > MAX_NUMNODES, then the silly min_ts could be dropped. */
+ > MAX_NUMNODES, then the silly min()s could be dropped. */
#define first_node(src) __first_node(&(src))
-static inline int __first_node(const nodemask_t *srcp)
+static __always_inline unsigned int __first_node(const nodemask_t *srcp)
{
- return min_t(int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES));
+ return min(MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES));
}
#define next_node(n, src) __next_node((n), &(src))
-static inline int __next_node(int n, const nodemask_t *srcp)
+static __always_inline unsigned int __next_node(int n, const nodemask_t *srcp)
{
- return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
+ return min(MAX_NUMNODES, find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
}
/*
@@ -276,9 +264,16 @@ static inline int __next_node(int n, const nodemask_t *srcp)
* the first node in src if needed. Returns MAX_NUMNODES if src is empty.
*/
#define next_node_in(n, src) __next_node_in((n), &(src))
-int __next_node_in(int node, const nodemask_t *srcp);
+static __always_inline unsigned int __next_node_in(int node, const nodemask_t *srcp)
+{
+ unsigned int ret = __next_node(node, srcp);
-static inline void init_nodemask_of_node(nodemask_t *mask, int node)
+ if (ret == MAX_NUMNODES)
+ ret = __first_node(srcp);
+ return ret;
+}
+
+static __always_inline void init_nodemask_of_node(nodemask_t *mask, int node)
{
nodes_clear(*mask);
node_set(node, *mask);
@@ -296,10 +291,9 @@ static inline void init_nodemask_of_node(nodemask_t *mask, int node)
})
#define first_unset_node(mask) __first_unset_node(&(mask))
-static inline int __first_unset_node(const nodemask_t *maskp)
+static __always_inline unsigned int __first_unset_node(const nodemask_t *maskp)
{
- return min_t(int,MAX_NUMNODES,
- find_first_zero_bit(maskp->bits, MAX_NUMNODES));
+ return min(MAX_NUMNODES, find_first_zero_bit(maskp->bits, MAX_NUMNODES));
}
#define NODE_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(MAX_NUMNODES)
@@ -330,21 +324,21 @@ static inline int __first_unset_node(const nodemask_t *maskp)
#define nodemask_parse_user(ubuf, ulen, dst) \
__nodemask_parse_user((ubuf), (ulen), &(dst), MAX_NUMNODES)
-static inline int __nodemask_parse_user(const char __user *buf, int len,
+static __always_inline int __nodemask_parse_user(const char __user *buf, int len,
nodemask_t *dstp, int nbits)
{
return bitmap_parse_user(buf, len, dstp->bits, nbits);
}
#define nodelist_parse(buf, dst) __nodelist_parse((buf), &(dst), MAX_NUMNODES)
-static inline int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits)
+static __always_inline int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits)
{
return bitmap_parselist(buf, dstp->bits, nbits);
}
#define node_remap(oldbit, old, new) \
__node_remap((oldbit), &(old), &(new), MAX_NUMNODES)
-static inline int __node_remap(int oldbit,
+static __always_inline int __node_remap(int oldbit,
const nodemask_t *oldp, const nodemask_t *newp, int nbits)
{
return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits);
@@ -352,7 +346,7 @@ static inline int __node_remap(int oldbit,
#define nodes_remap(dst, src, old, new) \
__nodes_remap(&(dst), &(src), &(old), &(new), MAX_NUMNODES)
-static inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp,
+static __always_inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp,
const nodemask_t *oldp, const nodemask_t *newp, int nbits)
{
bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
@@ -360,7 +354,7 @@ static inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp,
#define nodes_onto(dst, orig, relmap) \
__nodes_onto(&(dst), &(orig), &(relmap), MAX_NUMNODES)
-static inline void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp,
+static __always_inline void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp,
const nodemask_t *relmapp, int nbits)
{
bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits);
@@ -368,21 +362,20 @@ static inline void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp,
#define nodes_fold(dst, orig, sz) \
__nodes_fold(&(dst), &(orig), sz, MAX_NUMNODES)
-static inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp,
+static __always_inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp,
int sz, int nbits)
{
bitmap_fold(dstp->bits, origp->bits, sz, nbits);
}
#if MAX_NUMNODES > 1
-#define for_each_node_mask(node, mask) \
- for ((node) = first_node(mask); \
- (node) < MAX_NUMNODES; \
- (node) = next_node((node), (mask)))
+#define for_each_node_mask(node, mask) \
+ for ((node) = first_node(mask); \
+ (node) < MAX_NUMNODES; \
+ (node) = next_node((node), (mask)))
#else /* MAX_NUMNODES == 1 */
-#define for_each_node_mask(node, mask) \
- if (!nodes_empty(mask)) \
- for ((node) = 0; (node) < 1; (node)++)
+#define for_each_node_mask(node, mask) \
+ for ((node) = 0; (node) < 1 && !nodes_empty(mask); (node)++)
#endif /* MAX_NUMNODES */
/*
@@ -411,22 +404,22 @@ enum node_states {
extern nodemask_t node_states[NR_NODE_STATES];
#if MAX_NUMNODES > 1
-static inline int node_state(int node, enum node_states state)
+static __always_inline int node_state(int node, enum node_states state)
{
return node_isset(node, node_states[state]);
}
-static inline void node_set_state(int node, enum node_states state)
+static __always_inline void node_set_state(int node, enum node_states state)
{
__node_set(node, &node_states[state]);
}
-static inline void node_clear_state(int node, enum node_states state)
+static __always_inline void node_clear_state(int node, enum node_states state)
{
__node_clear(node, &node_states[state]);
}
-static inline int num_node_state(enum node_states state)
+static __always_inline int num_node_state(enum node_states state)
{
return nodes_weight(node_states[state]);
}
@@ -436,11 +429,11 @@ static inline int num_node_state(enum node_states state)
#define first_online_node first_node(node_states[N_ONLINE])
#define first_memory_node first_node(node_states[N_MEMORY])
-static inline int next_online_node(int nid)
+static __always_inline unsigned int next_online_node(int nid)
{
return next_node(nid, node_states[N_ONLINE]);
}
-static inline int next_memory_node(int nid)
+static __always_inline unsigned int next_memory_node(int nid)
{
return next_node(nid, node_states[N_MEMORY]);
}
@@ -448,13 +441,13 @@ static inline int next_memory_node(int nid)
extern unsigned int nr_node_ids;
extern unsigned int nr_online_nodes;
-static inline void node_set_online(int nid)
+static __always_inline void node_set_online(int nid)
{
node_set_state(nid, N_ONLINE);
nr_online_nodes = num_node_state(N_ONLINE);
}
-static inline void node_set_offline(int nid)
+static __always_inline void node_set_offline(int nid)
{
node_clear_state(nid, N_ONLINE);
nr_online_nodes = num_node_state(N_ONLINE);
@@ -462,20 +455,20 @@ static inline void node_set_offline(int nid)
#else
-static inline int node_state(int node, enum node_states state)
+static __always_inline int node_state(int node, enum node_states state)
{
return node == 0;
}
-static inline void node_set_state(int node, enum node_states state)
+static __always_inline void node_set_state(int node, enum node_states state)
{
}
-static inline void node_clear_state(int node, enum node_states state)
+static __always_inline void node_clear_state(int node, enum node_states state)
{
}
-static inline int num_node_state(enum node_states state)
+static __always_inline int num_node_state(enum node_states state)
{
return 1;
}
@@ -486,6 +479,7 @@ static inline int num_node_state(enum node_states state)
#define first_online_node 0
#define first_memory_node 0
#define next_online_node(nid) (MAX_NUMNODES)
+#define next_memory_node(nid) (MAX_NUMNODES)
#define nr_node_ids 1U
#define nr_online_nodes 1U
@@ -494,14 +488,16 @@ static inline int num_node_state(enum node_states state)
#endif
+static __always_inline int node_random(const nodemask_t *maskp)
+{
#if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1)
-extern int node_random(const nodemask_t *maskp);
+ int node = find_random_bit(maskp->bits, MAX_NUMNODES);
+
+ return node < MAX_NUMNODES ? node : NUMA_NO_NODE;
#else
-static inline int node_random(const nodemask_t *mask)
-{
return 0;
-}
#endif
+}
#define node_online_map node_states[N_ONLINE]
#define node_possible_map node_states[N_POSSIBLE]
@@ -513,9 +509,10 @@ static inline int node_random(const nodemask_t *mask)
#define for_each_node(node) for_each_node_state(node, N_POSSIBLE)
#define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
+#define for_each_node_with_cpus(node) for_each_node_state(node, N_CPU)
/*
- * For nodemask scrach area.
+ * For nodemask scratch area.
* NODEMASK_ALLOC(type, name) allocates an object with a specified type and
* name.
*/
@@ -528,7 +525,7 @@ static inline int node_random(const nodemask_t *mask)
#define NODEMASK_FREE(m) do {} while (0)
#endif
-/* A example struture for using NODEMASK_ALLOC, used in mempolicy. */
+/* Example structure for using NODEMASK_ALLOC, used in mempolicy. */
struct nodemask_scratch {
nodemask_t mask1;
nodemask_t mask2;
diff --git a/include/linux/nodemask_types.h b/include/linux/nodemask_types.h
new file mode 100644
index 000000000000..f850a48742f1
--- /dev/null
+++ b/include/linux/nodemask_types.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_NODEMASK_TYPES_H
+#define __LINUX_NODEMASK_TYPES_H
+
+#include <linux/bitops.h>
+
+#ifdef CONFIG_NODES_SHIFT
+#define NODES_SHIFT CONFIG_NODES_SHIFT
+#else
+#define NODES_SHIFT 0
+#endif
+
+#define MAX_NUMNODES (1 << NODES_SHIFT)
+
+#define NUMA_NO_NODE (-1)
+
+typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t;
+
+#endif /* __LINUX_NODEMASK_TYPES_H */
diff --git a/include/linux/nospec.h b/include/linux/nospec.h
index c1e79f72cd89..9f0af4f116d9 100644
--- a/include/linux/nospec.h
+++ b/include/linux/nospec.h
@@ -11,6 +11,10 @@
struct task_struct;
+#ifndef barrier_nospec
+# define barrier_nospec() do { } while (0)
+#endif
+
/**
* array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
* @index: array element index
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 2fb373a5c1ed..01b6c9d9956f 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -73,6 +73,7 @@ struct raw_notifier_head {
struct srcu_notifier_head {
struct mutex mutex;
+ struct srcu_usage srcuu;
struct srcu_struct srcu;
struct notifier_block __rcu *head;
};
@@ -107,7 +108,8 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
{ \
.mutex = __MUTEX_INITIALIZER(name.mutex), \
.head = NULL, \
- .srcu = __SRCU_STRUCT_INIT(name.srcu, pcpu), \
+ .srcuu = __SRCU_USAGE_INIT(name.srcuu), \
+ .srcu = __SRCU_STRUCT_INIT(name.srcu, name.srcuu, pcpu, 0), \
}
#define ATOMIC_NOTIFIER_HEAD(name) \
@@ -150,6 +152,11 @@ extern int raw_notifier_chain_register(struct raw_notifier_head *nh,
extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
struct notifier_block *nb);
+extern int atomic_notifier_chain_register_unique_prio(
+ struct atomic_notifier_head *nh, struct notifier_block *nb);
+extern int blocking_notifier_chain_register_unique_prio(
+ struct blocking_notifier_head *nh, struct notifier_block *nb);
+
extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
struct notifier_block *nb);
extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
@@ -168,13 +175,13 @@ extern int raw_notifier_call_chain(struct raw_notifier_head *nh,
extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
unsigned long val, void *v);
-extern int atomic_notifier_call_chain_robust(struct atomic_notifier_head *nh,
- unsigned long val_up, unsigned long val_down, void *v);
extern int blocking_notifier_call_chain_robust(struct blocking_notifier_head *nh,
unsigned long val_up, unsigned long val_down, void *v);
extern int raw_notifier_call_chain_robust(struct raw_notifier_head *nh,
unsigned long val_up, unsigned long val_down, void *v);
+extern bool atomic_notifier_call_chain_is_empty(struct atomic_notifier_head *nh);
+
#define NOTIFY_DONE 0x0000 /* Don't care */
#define NOTIFY_OK 0x0001 /* Suits me */
#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */
@@ -230,7 +237,5 @@ static inline int notifier_to_errno(int ret)
#define KBD_KEYSYM 0x0004 /* Keyboard keysym */
#define KBD_POST_KEYSYM 0x0005 /* Called after keyboard keysym interpretation */
-extern struct blocking_notifier_head reboot_notifier_list;
-
#endif /* __KERNEL__ */
#endif /* _LINUX_NOTIFIER_H */
diff --git a/include/linux/ns/ns_common_types.h b/include/linux/ns/ns_common_types.h
new file mode 100644
index 000000000000..b332b019b29c
--- /dev/null
+++ b/include/linux/ns/ns_common_types.h
@@ -0,0 +1,196 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_NS_COMMON_TYPES_H
+#define _LINUX_NS_COMMON_TYPES_H
+
+#include <linux/atomic.h>
+#include <linux/ns/nstree_types.h>
+#include <linux/rbtree.h>
+#include <linux/refcount.h>
+#include <linux/types.h>
+
+struct cgroup_namespace;
+struct dentry;
+struct ipc_namespace;
+struct mnt_namespace;
+struct net;
+struct pid_namespace;
+struct proc_ns_operations;
+struct time_namespace;
+struct user_namespace;
+struct uts_namespace;
+
+extern struct cgroup_namespace init_cgroup_ns;
+extern struct ipc_namespace init_ipc_ns;
+extern struct mnt_namespace init_mnt_ns;
+extern struct net init_net;
+extern struct pid_namespace init_pid_ns;
+extern struct time_namespace init_time_ns;
+extern struct user_namespace init_user_ns;
+extern struct uts_namespace init_uts_ns;
+
+extern const struct proc_ns_operations cgroupns_operations;
+extern const struct proc_ns_operations ipcns_operations;
+extern const struct proc_ns_operations mntns_operations;
+extern const struct proc_ns_operations netns_operations;
+extern const struct proc_ns_operations pidns_operations;
+extern const struct proc_ns_operations pidns_for_children_operations;
+extern const struct proc_ns_operations timens_operations;
+extern const struct proc_ns_operations timens_for_children_operations;
+extern const struct proc_ns_operations userns_operations;
+extern const struct proc_ns_operations utsns_operations;
+
+/*
+ * Namespace lifetimes are managed via a two-tier reference counting model:
+ *
+ * (1) __ns_ref (refcount_t): Main reference count tracking memory
+ * lifetime. Controls when the namespace structure itself is freed.
+ * It also pins the namespace on the namespace trees whereas (2)
+ * only regulates their visibility to userspace.
+ *
+ * (2) __ns_ref_active (atomic_t): Reference count tracking active users.
+ * Controls visibility of the namespace in the namespace trees.
+ * Any live task that uses the namespace (via nsproxy or cred) holds
+ * an active reference. Any open file descriptor or bind-mount of
+ * the namespace holds an active reference. Once all tasks have
+ * called exited their namespaces and all file descriptors and
+ * bind-mounts have been released the active reference count drops
+ * to zero and the namespace becomes inactive. IOW, the namespace
+ * cannot be listed or opened via file handles anymore.
+ *
+ * Note that it is valid to transition from active to inactive and
+ * back from inactive to active e.g., when resurrecting an inactive
+ * namespace tree via the SIOCGSKNS ioctl().
+ *
+ * Relationship and lifecycle states:
+ *
+ * - Active (__ns_ref_active > 0):
+ * Namespace is actively used and visible to userspace. The namespace
+ * can be reopened via /proc/<pid>/ns/<ns_type>, via namespace file
+ * handles, or discovered via listns().
+ *
+ * - Inactive (__ns_ref_active == 0, __ns_ref > 0):
+ * No tasks are actively using the namespace and it isn't pinned by
+ * any bind-mounts or open file descriptors anymore. But the namespace
+ * is still kept alive by internal references. For example, the user
+ * namespace could be pinned by an open file through file->f_cred
+ * references when one of the now defunct tasks had opened a file and
+ * handed the file descriptor off to another process via a UNIX
+ * sockets. Such references keep the namespace structure alive through
+ * __ns_ref but will not hold an active reference.
+ *
+ * - Destroyed (__ns_ref == 0):
+ * No references remain. The namespace is removed from the tree and freed.
+ *
+ * State transitions:
+ *
+ * Active -> Inactive:
+ * When the last task using the namespace exits it drops its active
+ * references to all namespaces. However, user and pid namespaces
+ * remain accessible until the task has been reaped.
+ *
+ * Inactive -> Active:
+ * An inactive namespace tree might be resurrected due to e.g., the
+ * SIOCGSKNS ioctl() on a socket.
+ *
+ * Inactive -> Destroyed:
+ * When __ns_ref drops to zero the namespace is removed from the
+ * namespaces trees and the memory is freed (after RCU grace period).
+ *
+ * Initial namespaces:
+ * Boot-time namespaces (init_net, init_pid_ns, etc.) start with
+ * __ns_ref_active = 1 and remain active forever.
+ *
+ * @ns_type: type of namespace (e.g., CLONE_NEWNET)
+ * @stashed: cached dentry to be used by the vfs
+ * @ops: namespace operations
+ * @inum: namespace inode number (quickly recycled for non-initial namespaces)
+ * @__ns_ref: main reference count (do not use directly)
+ * @ns_tree: namespace tree nodes and active reference count
+ */
+struct ns_common {
+ u32 ns_type;
+ struct dentry *stashed;
+ const struct proc_ns_operations *ops;
+ unsigned int inum;
+ refcount_t __ns_ref; /* do not use directly */
+ union {
+ struct ns_tree;
+ struct rcu_head ns_rcu;
+ };
+};
+
+#define to_ns_common(__ns) \
+ _Generic((__ns), \
+ struct cgroup_namespace *: &(__ns)->ns, \
+ const struct cgroup_namespace *: &(__ns)->ns, \
+ struct ipc_namespace *: &(__ns)->ns, \
+ const struct ipc_namespace *: &(__ns)->ns, \
+ struct mnt_namespace *: &(__ns)->ns, \
+ const struct mnt_namespace *: &(__ns)->ns, \
+ struct net *: &(__ns)->ns, \
+ const struct net *: &(__ns)->ns, \
+ struct pid_namespace *: &(__ns)->ns, \
+ const struct pid_namespace *: &(__ns)->ns, \
+ struct time_namespace *: &(__ns)->ns, \
+ const struct time_namespace *: &(__ns)->ns, \
+ struct user_namespace *: &(__ns)->ns, \
+ const struct user_namespace *: &(__ns)->ns, \
+ struct uts_namespace *: &(__ns)->ns, \
+ const struct uts_namespace *: &(__ns)->ns)
+
+#define ns_init_inum(__ns) \
+ _Generic((__ns), \
+ struct cgroup_namespace *: CGROUP_NS_INIT_INO, \
+ struct ipc_namespace *: IPC_NS_INIT_INO, \
+ struct mnt_namespace *: MNT_NS_INIT_INO, \
+ struct net *: NET_NS_INIT_INO, \
+ struct pid_namespace *: PID_NS_INIT_INO, \
+ struct time_namespace *: TIME_NS_INIT_INO, \
+ struct user_namespace *: USER_NS_INIT_INO, \
+ struct uts_namespace *: UTS_NS_INIT_INO)
+
+#define ns_init_ns(__ns) \
+ _Generic((__ns), \
+ struct cgroup_namespace *: &init_cgroup_ns, \
+ struct ipc_namespace *: &init_ipc_ns, \
+ struct mnt_namespace *: &init_mnt_ns, \
+ struct net *: &init_net, \
+ struct pid_namespace *: &init_pid_ns, \
+ struct time_namespace *: &init_time_ns, \
+ struct user_namespace *: &init_user_ns, \
+ struct uts_namespace *: &init_uts_ns)
+
+#define ns_init_id(__ns) \
+ _Generic((__ns), \
+ struct cgroup_namespace *: CGROUP_NS_INIT_ID, \
+ struct ipc_namespace *: IPC_NS_INIT_ID, \
+ struct mnt_namespace *: MNT_NS_INIT_ID, \
+ struct net *: NET_NS_INIT_ID, \
+ struct pid_namespace *: PID_NS_INIT_ID, \
+ struct time_namespace *: TIME_NS_INIT_ID, \
+ struct user_namespace *: USER_NS_INIT_ID, \
+ struct uts_namespace *: UTS_NS_INIT_ID)
+
+#define to_ns_operations(__ns) \
+ _Generic((__ns), \
+ struct cgroup_namespace *: (IS_ENABLED(CONFIG_CGROUPS) ? &cgroupns_operations : NULL), \
+ struct ipc_namespace *: (IS_ENABLED(CONFIG_IPC_NS) ? &ipcns_operations : NULL), \
+ struct mnt_namespace *: &mntns_operations, \
+ struct net *: (IS_ENABLED(CONFIG_NET_NS) ? &netns_operations : NULL), \
+ struct pid_namespace *: (IS_ENABLED(CONFIG_PID_NS) ? &pidns_operations : NULL), \
+ struct time_namespace *: (IS_ENABLED(CONFIG_TIME_NS) ? &timens_operations : NULL), \
+ struct user_namespace *: (IS_ENABLED(CONFIG_USER_NS) ? &userns_operations : NULL), \
+ struct uts_namespace *: (IS_ENABLED(CONFIG_UTS_NS) ? &utsns_operations : NULL))
+
+#define ns_common_type(__ns) \
+ _Generic((__ns), \
+ struct cgroup_namespace *: CLONE_NEWCGROUP, \
+ struct ipc_namespace *: CLONE_NEWIPC, \
+ struct mnt_namespace *: CLONE_NEWNS, \
+ struct net *: CLONE_NEWNET, \
+ struct pid_namespace *: CLONE_NEWPID, \
+ struct time_namespace *: CLONE_NEWTIME, \
+ struct user_namespace *: CLONE_NEWUSER, \
+ struct uts_namespace *: CLONE_NEWUTS)
+
+#endif /* _LINUX_NS_COMMON_TYPES_H */
diff --git a/include/linux/ns/nstree_types.h b/include/linux/ns/nstree_types.h
new file mode 100644
index 000000000000..2fb28ee31efb
--- /dev/null
+++ b/include/linux/ns/nstree_types.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Christian Brauner <brauner@kernel.org> */
+#ifndef _LINUX_NSTREE_TYPES_H
+#define _LINUX_NSTREE_TYPES_H
+
+#include <linux/rbtree.h>
+#include <linux/list.h>
+
+/**
+ * struct ns_tree_root - Root of a namespace tree
+ * @ns_rb: Red-black tree root for efficient lookups
+ * @ns_list_head: List head for sequential iteration
+ *
+ * Each namespace tree maintains both an rbtree (for O(log n) lookups)
+ * and a list (for efficient sequential iteration). The list is kept in
+ * the same sorted order as the rbtree.
+ */
+struct ns_tree_root {
+ struct rb_root ns_rb;
+ struct list_head ns_list_head;
+};
+
+/**
+ * struct ns_tree_node - Node in a namespace tree
+ * @ns_node: Red-black tree node
+ * @ns_list_entry: List entry for sequential iteration
+ *
+ * Represents a namespace's position in a tree. Each namespace has
+ * multiple tree nodes for different trees (unified, per-type, owner).
+ */
+struct ns_tree_node {
+ struct rb_node ns_node;
+ struct list_head ns_list_entry;
+};
+
+/**
+ * struct ns_tree - Namespace tree nodes and active reference count
+ * @ns_id: Unique namespace identifier
+ * @__ns_ref_active: Active reference count (do not use directly)
+ * @ns_unified_node: Node in the global namespace tree
+ * @ns_tree_node: Node in the per-type namespace tree
+ * @ns_owner_node: Node in the owner namespace's tree of owned namespaces
+ * @ns_owner_root: Root of the tree of namespaces owned by this namespace
+ * (only used when this namespace is an owner)
+ */
+struct ns_tree {
+ u64 ns_id;
+ atomic_t __ns_ref_active;
+ struct ns_tree_node ns_unified_node;
+ struct ns_tree_node ns_tree_node;
+ struct ns_tree_node ns_owner_node;
+ struct ns_tree_root ns_owner_root;
+};
+
+#endif /* _LINUX_NSTREE_TYPES_H */
diff --git a/include/linux/ns_common.h b/include/linux/ns_common.h
index 0f1d024bd958..825f5865bfc5 100644
--- a/include/linux/ns_common.h
+++ b/include/linux/ns_common.h
@@ -2,15 +2,149 @@
#ifndef _LINUX_NS_COMMON_H
#define _LINUX_NS_COMMON_H
+#include <linux/ns/ns_common_types.h>
#include <linux/refcount.h>
+#include <linux/vfsdebug.h>
+#include <uapi/linux/sched.h>
+#include <uapi/linux/nsfs.h>
-struct proc_ns_operations;
+bool is_current_namespace(struct ns_common *ns);
+int __ns_common_init(struct ns_common *ns, u32 ns_type, const struct proc_ns_operations *ops, int inum);
+void __ns_common_free(struct ns_common *ns);
+struct ns_common *__must_check ns_owner(struct ns_common *ns);
-struct ns_common {
- atomic_long_t stashed;
- const struct proc_ns_operations *ops;
- unsigned int inum;
- refcount_t count;
-};
+static __always_inline bool is_ns_init_inum(const struct ns_common *ns)
+{
+ VFS_WARN_ON_ONCE(ns->inum == 0);
+ return unlikely(in_range(ns->inum, MNT_NS_INIT_INO,
+ IPC_NS_INIT_INO - MNT_NS_INIT_INO + 1));
+}
+
+static __always_inline bool is_ns_init_id(const struct ns_common *ns)
+{
+ VFS_WARN_ON_ONCE(ns->ns_id == 0);
+ return ns->ns_id <= NS_LAST_INIT_ID;
+}
+
+#define NS_COMMON_INIT(nsname) \
+{ \
+ .ns_type = ns_common_type(&nsname), \
+ .ns_id = ns_init_id(&nsname), \
+ .inum = ns_init_inum(&nsname), \
+ .ops = to_ns_operations(&nsname), \
+ .stashed = NULL, \
+ .__ns_ref = REFCOUNT_INIT(1), \
+ .__ns_ref_active = ATOMIC_INIT(1), \
+ .ns_unified_node.ns_list_entry = LIST_HEAD_INIT(nsname.ns.ns_unified_node.ns_list_entry), \
+ .ns_tree_node.ns_list_entry = LIST_HEAD_INIT(nsname.ns.ns_tree_node.ns_list_entry), \
+ .ns_owner_node.ns_list_entry = LIST_HEAD_INIT(nsname.ns.ns_owner_node.ns_list_entry), \
+ .ns_owner_root.ns_list_head = LIST_HEAD_INIT(nsname.ns.ns_owner_root.ns_list_head), \
+}
+
+#define ns_common_init(__ns) \
+ __ns_common_init(to_ns_common(__ns), \
+ ns_common_type(__ns), \
+ to_ns_operations(__ns), \
+ (((__ns) == ns_init_ns(__ns)) ? ns_init_inum(__ns) : 0))
+
+#define ns_common_init_inum(__ns, __inum) \
+ __ns_common_init(to_ns_common(__ns), \
+ ns_common_type(__ns), \
+ to_ns_operations(__ns), \
+ __inum)
+
+#define ns_common_free(__ns) __ns_common_free(to_ns_common((__ns)))
+
+static __always_inline __must_check int __ns_ref_active_read(const struct ns_common *ns)
+{
+ return atomic_read(&ns->__ns_ref_active);
+}
+
+static __always_inline __must_check int __ns_ref_read(const struct ns_common *ns)
+{
+ return refcount_read(&ns->__ns_ref);
+}
+
+static __always_inline __must_check bool __ns_ref_put(struct ns_common *ns)
+{
+ if (is_ns_init_id(ns)) {
+ VFS_WARN_ON_ONCE(__ns_ref_read(ns) != 1);
+ VFS_WARN_ON_ONCE(__ns_ref_active_read(ns) != 1);
+ return false;
+ }
+ if (refcount_dec_and_test(&ns->__ns_ref)) {
+ VFS_WARN_ON_ONCE(__ns_ref_active_read(ns));
+ return true;
+ }
+ return false;
+}
+
+static __always_inline __must_check bool __ns_ref_get(struct ns_common *ns)
+{
+ if (is_ns_init_id(ns)) {
+ VFS_WARN_ON_ONCE(__ns_ref_read(ns) != 1);
+ VFS_WARN_ON_ONCE(__ns_ref_active_read(ns) != 1);
+ return true;
+ }
+ if (refcount_inc_not_zero(&ns->__ns_ref))
+ return true;
+ VFS_WARN_ON_ONCE(__ns_ref_active_read(ns));
+ return false;
+}
+
+static __always_inline void __ns_ref_inc(struct ns_common *ns)
+{
+ if (is_ns_init_id(ns)) {
+ VFS_WARN_ON_ONCE(__ns_ref_read(ns) != 1);
+ VFS_WARN_ON_ONCE(__ns_ref_active_read(ns) != 1);
+ return;
+ }
+ refcount_inc(&ns->__ns_ref);
+}
+
+static __always_inline __must_check bool __ns_ref_dec_and_lock(struct ns_common *ns,
+ spinlock_t *ns_lock)
+{
+ if (is_ns_init_id(ns)) {
+ VFS_WARN_ON_ONCE(__ns_ref_read(ns) != 1);
+ VFS_WARN_ON_ONCE(__ns_ref_active_read(ns) != 1);
+ return false;
+ }
+ return refcount_dec_and_lock(&ns->__ns_ref, ns_lock);
+}
+
+#define ns_ref_read(__ns) __ns_ref_read(to_ns_common((__ns)))
+#define ns_ref_inc(__ns) \
+ do { if (__ns) __ns_ref_inc(to_ns_common((__ns))); } while (0)
+#define ns_ref_get(__ns) \
+ ((__ns) ? __ns_ref_get(to_ns_common((__ns))) : false)
+#define ns_ref_put(__ns) \
+ ((__ns) ? __ns_ref_put(to_ns_common((__ns))) : false)
+#define ns_ref_put_and_lock(__ns, __ns_lock) \
+ ((__ns) ? __ns_ref_dec_and_lock(to_ns_common((__ns)), __ns_lock) : false)
+
+#define ns_ref_active_read(__ns) \
+ ((__ns) ? __ns_ref_active_read(to_ns_common(__ns)) : 0)
+
+void __ns_ref_active_put(struct ns_common *ns);
+
+#define ns_ref_active_put(__ns) \
+ do { if (__ns) __ns_ref_active_put(to_ns_common(__ns)); } while (0)
+
+static __always_inline struct ns_common *__must_check ns_get_unless_inactive(struct ns_common *ns)
+{
+ if (!__ns_ref_active_read(ns)) {
+ VFS_WARN_ON_ONCE(is_ns_init_id(ns));
+ return NULL;
+ }
+ if (!__ns_ref_get(ns))
+ return NULL;
+ return ns;
+}
+
+void __ns_ref_active_get(struct ns_common *ns);
+
+#define ns_ref_active_get(__ns) \
+ do { if (__ns) __ns_ref_active_get(to_ns_common(__ns)); } while (0)
#endif
diff --git a/include/linux/nsfs.h b/include/linux/nsfs.h
new file mode 100644
index 000000000000..731b67fc2fec
--- /dev/null
+++ b/include/linux/nsfs.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Christian Brauner <brauner@kernel.org> */
+
+#ifndef _LINUX_NSFS_H
+#define _LINUX_NSFS_H
+
+#include <linux/ns_common.h>
+#include <linux/cred.h>
+#include <linux/pid_namespace.h>
+
+struct path;
+struct task_struct;
+struct proc_ns_operations;
+
+int ns_get_path(struct path *path, struct task_struct *task,
+ const struct proc_ns_operations *ns_ops);
+typedef struct ns_common *ns_get_path_helper_t(void *);
+int ns_get_path_cb(struct path *path, ns_get_path_helper_t ns_get_cb,
+ void *private_data);
+
+bool ns_match(const struct ns_common *ns, dev_t dev, ino_t ino);
+
+int ns_get_name(char *buf, size_t size, struct task_struct *task,
+ const struct proc_ns_operations *ns_ops);
+void nsfs_init(void);
+
+#define __current_namespace_from_type(__ns) \
+ _Generic((__ns), \
+ struct cgroup_namespace *: current->nsproxy->cgroup_ns, \
+ struct ipc_namespace *: current->nsproxy->ipc_ns, \
+ struct net *: current->nsproxy->net_ns, \
+ struct pid_namespace *: task_active_pid_ns(current), \
+ struct mnt_namespace *: current->nsproxy->mnt_ns, \
+ struct time_namespace *: current->nsproxy->time_ns, \
+ struct user_namespace *: current_user_ns(), \
+ struct uts_namespace *: current->nsproxy->uts_ns)
+
+#define current_in_namespace(__ns) (__current_namespace_from_type(__ns) == __ns)
+
+void nsproxy_ns_active_get(struct nsproxy *ns);
+void nsproxy_ns_active_put(struct nsproxy *ns);
+
+#endif /* _LINUX_NSFS_H */
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index cdb171efc7cb..5a67648721c7 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_NSPROXY_H
#define _LINUX_NSPROXY_H
+#include <linux/refcount.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
@@ -29,7 +30,7 @@ struct fs_struct;
* nsproxy is copied.
*/
struct nsproxy {
- atomic_t count;
+ refcount_t count;
struct uts_namespace *uts_ns;
struct ipc_namespace *ipc_ns;
struct mnt_namespace *mnt_ns;
@@ -91,24 +92,29 @@ static inline struct cred *nsset_cred(struct nsset *set)
*
*/
-int copy_namespaces(unsigned long flags, struct task_struct *tsk);
-void exit_task_namespaces(struct task_struct *tsk);
+int copy_namespaces(u64 flags, struct task_struct *tsk);
+void switch_cred_namespaces(const struct cred *old, const struct cred *new);
+void exit_nsproxy_namespaces(struct task_struct *tsk);
+void get_cred_namespaces(struct task_struct *tsk);
+void exit_cred_namespaces(struct task_struct *tsk);
void switch_task_namespaces(struct task_struct *tsk, struct nsproxy *new);
-void free_nsproxy(struct nsproxy *ns);
+int exec_task_namespaces(void);
+void deactivate_nsproxy(struct nsproxy *ns);
int unshare_nsproxy_namespaces(unsigned long, struct nsproxy **,
struct cred *, struct fs_struct *);
int __init nsproxy_cache_init(void);
static inline void put_nsproxy(struct nsproxy *ns)
{
- if (atomic_dec_and_test(&ns->count)) {
- free_nsproxy(ns);
- }
+ if (refcount_dec_and_test(&ns->count))
+ deactivate_nsproxy(ns);
}
static inline void get_nsproxy(struct nsproxy *ns)
{
- atomic_inc(&ns->count);
+ refcount_inc(&ns->count);
}
+DEFINE_FREE(put_nsproxy, struct nsproxy *, if (_T) put_nsproxy(_T))
+
#endif
diff --git a/include/linux/nstree.h b/include/linux/nstree.h
new file mode 100644
index 000000000000..175e4625bfa6
--- /dev/null
+++ b/include/linux/nstree.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025 Christian Brauner <brauner@kernel.org> */
+#ifndef _LINUX_NSTREE_H
+#define _LINUX_NSTREE_H
+
+#include <linux/ns/nstree_types.h>
+#include <linux/nsproxy.h>
+#include <linux/rbtree.h>
+#include <linux/seqlock.h>
+#include <linux/rculist.h>
+#include <linux/cookie.h>
+#include <uapi/linux/nsfs.h>
+
+struct ns_common;
+
+extern struct ns_tree_root cgroup_ns_tree;
+extern struct ns_tree_root ipc_ns_tree;
+extern struct ns_tree_root mnt_ns_tree;
+extern struct ns_tree_root net_ns_tree;
+extern struct ns_tree_root pid_ns_tree;
+extern struct ns_tree_root time_ns_tree;
+extern struct ns_tree_root user_ns_tree;
+extern struct ns_tree_root uts_ns_tree;
+
+void ns_tree_node_init(struct ns_tree_node *node);
+void ns_tree_root_init(struct ns_tree_root *root);
+bool ns_tree_node_empty(const struct ns_tree_node *node);
+struct rb_node *ns_tree_node_add(struct ns_tree_node *node,
+ struct ns_tree_root *root,
+ int (*cmp)(struct rb_node *, const struct rb_node *));
+void ns_tree_node_del(struct ns_tree_node *node, struct ns_tree_root *root);
+
+#define to_ns_tree(__ns) \
+ _Generic((__ns), \
+ struct cgroup_namespace *: &(cgroup_ns_tree), \
+ struct ipc_namespace *: &(ipc_ns_tree), \
+ struct net *: &(net_ns_tree), \
+ struct pid_namespace *: &(pid_ns_tree), \
+ struct mnt_namespace *: &(mnt_ns_tree), \
+ struct time_namespace *: &(time_ns_tree), \
+ struct user_namespace *: &(user_ns_tree), \
+ struct uts_namespace *: &(uts_ns_tree))
+
+#define ns_tree_gen_id(__ns) \
+ __ns_tree_gen_id(to_ns_common(__ns), \
+ (((__ns) == ns_init_ns(__ns)) ? ns_init_id(__ns) : 0))
+
+u64 __ns_tree_gen_id(struct ns_common *ns, u64 id);
+void __ns_tree_add_raw(struct ns_common *ns, struct ns_tree_root *ns_tree);
+void __ns_tree_remove(struct ns_common *ns, struct ns_tree_root *ns_tree);
+struct ns_common *ns_tree_lookup_rcu(u64 ns_id, int ns_type);
+struct ns_common *__ns_tree_adjoined_rcu(struct ns_common *ns,
+ struct ns_tree_root *ns_tree,
+ bool previous);
+
+static inline void __ns_tree_add(struct ns_common *ns, struct ns_tree_root *ns_tree, u64 id)
+{
+ __ns_tree_gen_id(ns, id);
+ __ns_tree_add_raw(ns, ns_tree);
+}
+
+/**
+ * ns_tree_add_raw - Add a namespace to a namespace
+ * @ns: Namespace to add
+ *
+ * This function adds a namespace to the appropriate namespace tree
+ * without assigning a id.
+ */
+#define ns_tree_add_raw(__ns) __ns_tree_add_raw(to_ns_common(__ns), to_ns_tree(__ns))
+
+/**
+ * ns_tree_add - Add a namespace to a namespace tree
+ * @ns: Namespace to add
+ *
+ * This function assigns a new id to the namespace and adds it to the
+ * appropriate namespace tree and list.
+ */
+#define ns_tree_add(__ns) \
+ __ns_tree_add(to_ns_common(__ns), to_ns_tree(__ns), \
+ (((__ns) == ns_init_ns(__ns)) ? ns_init_id(__ns) : 0))
+
+/**
+ * ns_tree_remove - Remove a namespace from a namespace tree
+ * @ns: Namespace to remove
+ *
+ * This function removes a namespace from the appropriate namespace
+ * tree and list.
+ */
+#define ns_tree_remove(__ns) __ns_tree_remove(to_ns_common(__ns), to_ns_tree(__ns))
+
+#define ns_tree_adjoined_rcu(__ns, __previous) \
+ __ns_tree_adjoined_rcu(to_ns_common(__ns), to_ns_tree(__ns), __previous)
+
+#define ns_tree_active(__ns) (!RB_EMPTY_NODE(&to_ns_common(__ns)->ns_tree_node.ns_node))
+
+#endif /* _LINUX_NSTREE_H */
diff --git a/include/linux/nubus.h b/include/linux/nubus.h
index eba50b057f6f..4d103ac8f5c7 100644
--- a/include/linux/nubus.h
+++ b/include/linux/nubus.h
@@ -86,13 +86,12 @@ extern struct list_head nubus_func_rsrcs;
struct nubus_driver {
struct device_driver driver;
int (*probe)(struct nubus_board *board);
- int (*remove)(struct nubus_board *board);
+ void (*remove)(struct nubus_board *board);
};
-extern struct bus_type nubus_bus_type;
-
/* Generic NuBus interface functions, modelled after the PCI interface */
#ifdef CONFIG_PROC_FS
+extern bool nubus_populate_procfs;
void nubus_proc_init(void);
struct proc_dir_entry *nubus_proc_add_board(struct nubus_board *board);
struct proc_dir_entry *nubus_proc_add_rsrc_dir(struct proc_dir_entry *procdir,
diff --git a/include/linux/numa.h b/include/linux/numa.h
index cb44cfe2b725..e6baaf6051bc 100644
--- a/include/linux/numa.h
+++ b/include/linux/numa.h
@@ -1,17 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_NUMA_H
#define _LINUX_NUMA_H
+#include <linux/init.h>
#include <linux/types.h>
+#include <linux/nodemask.h>
-#ifdef CONFIG_NODES_SHIFT
-#define NODES_SHIFT CONFIG_NODES_SHIFT
-#else
-#define NODES_SHIFT 0
-#endif
+#define NUMA_NO_MEMBLK (-1)
-#define MAX_NUMNODES (1 << NODES_SHIFT)
-
-#define NUMA_NO_NODE (-1)
+static inline bool numa_valid_node(int nid)
+{
+ return nid >= 0 && nid < MAX_NUMNODES;
+}
/* optionally keep NUMA memory info available post init */
#ifdef CONFIG_NUMA_KEEP_MEMINFO
@@ -21,33 +20,40 @@
#endif
#ifdef CONFIG_NUMA
-#include <linux/printk.h>
#include <asm/sparsemem.h>
+extern struct pglist_data *node_data[];
+#define NODE_DATA(nid) (node_data[nid])
+
+void __init alloc_node_data(int nid);
+void __init alloc_offline_node_data(int nid);
+
/* Generic implementation available */
-int numa_map_to_online_node(int node);
+int numa_nearest_node(int node, unsigned int state);
+
+int nearest_node_nodemask(int node, nodemask_t *mask);
#ifndef memory_add_physaddr_to_nid
-static inline int memory_add_physaddr_to_nid(u64 start)
-{
- pr_info_once("Unknown online node for memory at 0x%llx, assuming node 0\n",
- start);
- return 0;
-}
+int memory_add_physaddr_to_nid(u64 start);
#endif
+
#ifndef phys_to_target_node
-static inline int phys_to_target_node(u64 start)
-{
- pr_info_once("Unknown target node for memory at 0x%llx, assuming node 0\n",
- start);
- return 0;
-}
+int phys_to_target_node(u64 start);
#endif
+
+int numa_fill_memblks(u64 start, u64 end);
+
#else /* !CONFIG_NUMA */
-static inline int numa_map_to_online_node(int node)
+static inline int numa_nearest_node(int node, unsigned int state)
+{
+ return NUMA_NO_NODE;
+}
+
+static inline int nearest_node_nodemask(int node, nodemask_t *mask)
{
return NUMA_NO_NODE;
}
+
static inline int memory_add_physaddr_to_nid(u64 start)
{
return 0;
@@ -56,6 +62,14 @@ static inline int phys_to_target_node(u64 start)
{
return 0;
}
+
+static inline void alloc_offline_node_data(int nid) {}
+#endif
+
+#define numa_map_to_online_node(node) numa_nearest_node(node, N_ONLINE)
+
+#ifdef CONFIG_HAVE_ARCH_NODE_DEV_GROUP
+extern const struct attribute_group arch_node_dev_group;
#endif
#endif /* _LINUX_NUMA_H */
diff --git a/include/linux/numa_memblks.h b/include/linux/numa_memblks.h
new file mode 100644
index 000000000000..991076cba7c5
--- /dev/null
+++ b/include/linux/numa_memblks.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NUMA_MEMBLKS_H
+#define __NUMA_MEMBLKS_H
+
+#ifdef CONFIG_NUMA_MEMBLKS
+#include <linux/types.h>
+
+#define NR_NODE_MEMBLKS (MAX_NUMNODES * 2)
+
+void __init numa_set_distance(int from, int to, int distance);
+void __init numa_reset_distance(void);
+
+struct numa_memblk {
+ u64 start;
+ u64 end;
+ int nid;
+};
+
+struct numa_meminfo {
+ int nr_blks;
+ struct numa_memblk blk[NR_NODE_MEMBLKS];
+};
+
+int __init numa_add_memblk(int nodeid, u64 start, u64 end);
+int __init numa_add_reserved_memblk(int nid, u64 start, u64 end);
+void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi);
+
+int __init numa_cleanup_meminfo(struct numa_meminfo *mi);
+
+int __init numa_memblks_init(int (*init_func)(void),
+ bool memblock_force_top_down);
+
+extern int numa_distance_cnt;
+
+#ifdef CONFIG_NUMA_EMU
+extern int emu_nid_to_phys[MAX_NUMNODES];
+int numa_emu_cmdline(char *str);
+void __init numa_emu_update_cpu_to_node(int *emu_nid_to_phys,
+ unsigned int nr_emu_nids);
+u64 __init numa_emu_dma_end(void);
+void __init numa_emulation(struct numa_meminfo *numa_meminfo,
+ int numa_dist_cnt);
+#else
+static inline void numa_emulation(struct numa_meminfo *numa_meminfo,
+ int numa_dist_cnt)
+{ }
+static inline int numa_emu_cmdline(char *str)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_NUMA_EMU */
+
+#ifdef CONFIG_NUMA_KEEP_MEMINFO
+extern int phys_to_target_node(u64 start);
+#define phys_to_target_node phys_to_target_node
+extern int memory_add_physaddr_to_nid(u64 start);
+#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
+#endif /* CONFIG_NUMA_KEEP_MEMINFO */
+
+#endif /* CONFIG_NUMA_MEMBLKS */
+
+#endif /* __NUMA_MEMBLKS_H */
diff --git a/include/linux/nvme-auth.h b/include/linux/nvme-auth.h
new file mode 100644
index 000000000000..60e069a6757f
--- /dev/null
+++ b/include/linux/nvme-auth.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 Hannes Reinecke, SUSE Software Solutions
+ */
+
+#ifndef _NVME_AUTH_H
+#define _NVME_AUTH_H
+
+#include <crypto/kpp.h>
+
+struct nvme_dhchap_key {
+ size_t len;
+ u8 hash;
+ u8 key[];
+};
+
+u32 nvme_auth_get_seqnum(void);
+const char *nvme_auth_dhgroup_name(u8 dhgroup_id);
+const char *nvme_auth_dhgroup_kpp(u8 dhgroup_id);
+u8 nvme_auth_dhgroup_id(const char *dhgroup_name);
+
+const char *nvme_auth_hmac_name(u8 hmac_id);
+const char *nvme_auth_digest_name(u8 hmac_id);
+size_t nvme_auth_hmac_hash_len(u8 hmac_id);
+u8 nvme_auth_hmac_id(const char *hmac_name);
+
+u32 nvme_auth_key_struct_size(u32 key_len);
+struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
+ u8 key_hash);
+void nvme_auth_free_key(struct nvme_dhchap_key *key);
+struct nvme_dhchap_key *nvme_auth_alloc_key(u32 len, u8 hash);
+struct nvme_dhchap_key *nvme_auth_transform_key(
+ struct nvme_dhchap_key *key, char *nqn);
+int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key);
+int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len,
+ u8 *challenge, u8 *aug, size_t hlen);
+int nvme_auth_gen_privkey(struct crypto_kpp *dh_tfm, u8 dh_gid);
+int nvme_auth_gen_pubkey(struct crypto_kpp *dh_tfm,
+ u8 *host_key, size_t host_key_len);
+int nvme_auth_gen_shared_secret(struct crypto_kpp *dh_tfm,
+ u8 *ctrl_key, size_t ctrl_key_len,
+ u8 *sess_key, size_t sess_key_len);
+int nvme_auth_generate_psk(u8 hmac_id, u8 *skey, size_t skey_len,
+ u8 *c1, u8 *c2, size_t hash_len,
+ u8 **ret_psk, size_t *ret_len);
+int nvme_auth_generate_digest(u8 hmac_id, u8 *psk, size_t psk_len,
+ char *subsysnqn, char *hostnqn, u8 **ret_digest);
+int nvme_auth_derive_tls_psk(int hmac_id, u8 *psk, size_t psk_len,
+ u8 *psk_digest, u8 **ret_psk);
+
+#endif /* _NVME_AUTH_H */
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index 2a38f2b477a5..9f6acadfe0c8 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -7,6 +7,7 @@
#define _NVME_FC_DRIVER_H 1
#include <linux/scatterlist.h>
+#include <linux/blk-mq.h>
/*
@@ -184,7 +185,6 @@ enum nvmefc_fcp_datadir {
* @first_sgl: memory for 1st scatter/gather list segment for payload data
* @sg_cnt: number of elements in the scatter/gather list
* @io_dir: direction of the FCP request (see NVMEFC_FCP_xxx)
- * @sqid: The nvme SQID the command is being issued on
* @done: The callback routine the LLDD is to invoke upon completion of
* the FCP operation. req argument is the pointer to the original
* FCP IO operation.
@@ -193,12 +193,13 @@ enum nvmefc_fcp_datadir {
* while processing the operation. The length of the buffer
* corresponds to the fcprqst_priv_sz value specified in the
* nvme_fc_port_template supplied by the LLDD.
+ * @sqid: The nvme SQID the command is being issued on
*
* Values set by the LLDD indicating completion status of the FCP operation.
* Must be set prior to calling the done() callback.
+ * @rcv_rsplen: length, in bytes, of the FCP RSP IU received.
* @transferred_length: amount of payload data, in bytes, that were
* transferred. Should equal payload_length on success.
- * @rcv_rsplen: length, in bytes, of the FCP RSP IU received.
* @status: Completion status of the FCP operation. must be 0 upon success,
* negative errno value upon failure (ex: -EIO). Note: this is
* NOT a reflection of the NVME CQE completion status. Only the
@@ -218,14 +219,14 @@ struct nvmefc_fcp_req {
int sg_cnt;
enum nvmefc_fcp_datadir io_dir;
- __le16 sqid;
-
void (*done)(struct nvmefc_fcp_req *req);
void *private;
- u32 transferred_length;
+ __le16 sqid;
+
u16 rcv_rsplen;
+ u32 transferred_length;
u32 status;
} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
@@ -497,6 +498,8 @@ struct nvme_fc_port_template {
int (*xmt_ls_rsp)(struct nvme_fc_local_port *localport,
struct nvme_fc_remote_port *rport,
struct nvmefc_ls_rsp *ls_rsp);
+ void (*map_queues)(struct nvme_fc_local_port *localport,
+ struct blk_mq_queue_map *map);
u32 max_hw_queues;
u16 max_sgl_segments;
@@ -561,6 +564,15 @@ int nvme_fc_rcv_ls_req(struct nvme_fc_remote_port *remoteport,
void *lsreqbuf, u32 lsreqbuf_len);
+/*
+ * Routine called to get the appid field associated with request by the lldd
+ *
+ * If the return value is NULL : the user/libvirt has not set the appid to VM
+ * If the return value is non-zero: Returns the appid associated with VM
+ *
+ * @req: IO request from nvme fc to driver
+ */
+char *nvme_fc_io_getuuid(struct nvmefc_fcp_req *req);
/*
* *************** LLDD FC-NVME Target/Subsystem API ***************
@@ -608,7 +620,7 @@ enum {
*
* Structure used between LLDD and nvmet-fc layer to represent the exchange
* context for a FC-NVME FCP I/O operation (e.g. a nvme sqe, the sqe-related
- * memory transfers, and its assocated cqe transfer).
+ * memory transfers, and its associated cqe transfer).
*
* The structure is allocated by the LLDD whenever a FCP CMD IU is received
* from the FC link. The address of the structure is passed to the nvmet-fc
@@ -718,7 +730,7 @@ enum {
*
* Fields with static values for the port. Initialized by the
* port_info struct supplied to the registration call.
- * @port_num: NVME-FC transport subsytem port number
+ * @port_num: NVME-FC transport subsystem port number
* @node_name: FC WWNN for the port
* @port_name: FC WWPN for the port
* @private: pointer to memory allocated alongside the local port
@@ -779,6 +791,10 @@ struct nvmet_fc_target_port {
* LS received.
* Entrypoint is Mandatory.
*
+ * @map_queues: This functions lets the driver expose the queue mapping
+ * to the block layer.
+ * Entrypoint is Optional.
+ *
* @fcp_op: Called to perform a data transfer or transmit a response.
* The nvmefc_tgt_fcp_req structure is the same LLDD-supplied
* exchange structure specified in the nvmet_fc_rcv_fcp_req() call
@@ -904,6 +920,9 @@ struct nvmet_fc_target_port {
* further references to hosthandle.
* Entrypoint is Mandatory if the lldd calls nvmet_fc_invalidate_host().
*
+ * @host_traddr: called by the transport to retrieve the node name and
+ * port name of the host port address.
+ *
* @max_hw_queues: indicates the maximum number of hw queues the LLDD
* supports for cpu affinitization.
* Value is Mandatory. Must be at least 1.
@@ -959,6 +978,7 @@ struct nvmet_fc_target_template {
void (*ls_abort)(struct nvmet_fc_target_port *targetport,
void *hosthandle, struct nvmefc_ls_req *lsreq);
void (*host_release)(void *hosthandle);
+ int (*host_traddr)(void *hosthandle, u64 *wwnn, u64 *wwpn);
u32 max_hw_queues;
u16 max_sgl_segments;
@@ -1041,5 +1061,10 @@ int nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *tgtport,
void nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *fcpreq);
+/*
+ * add a define, visible to the compiler, that indicates support
+ * for feature. Allows for conditional compilation in LLDDs.
+ */
+#define NVME_FC_FEAT_UUID 0x0001
#endif /* _NVME_FC_DRIVER_H */
diff --git a/include/linux/nvme-keyring.h b/include/linux/nvme-keyring.h
new file mode 100644
index 000000000000..ab8971afa973
--- /dev/null
+++ b/include/linux/nvme-keyring.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 Hannes Reinecke, SUSE Labs
+ */
+
+#ifndef _NVME_KEYRING_H
+#define _NVME_KEYRING_H
+
+#include <linux/key.h>
+
+#if IS_ENABLED(CONFIG_NVME_KEYRING)
+
+struct key *nvme_tls_psk_refresh(struct key *keyring,
+ const char *hostnqn, const char *subnqn, u8 hmac_id,
+ u8 *data, size_t data_len, const char *digest);
+key_serial_t nvme_tls_psk_default(struct key *keyring,
+ const char *hostnqn, const char *subnqn);
+
+key_serial_t nvme_keyring_id(void);
+struct key *nvme_tls_key_lookup(key_serial_t key_id);
+#else
+static inline struct key *nvme_tls_psk_refresh(struct key *keyring,
+ const char *hostnqn, char *subnqn, u8 hmac_id,
+ u8 *data, size_t data_len, const char *digest)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
+static inline key_serial_t nvme_tls_psk_default(struct key *keyring,
+ const char *hostnqn, const char *subnqn)
+{
+ return 0;
+}
+static inline key_serial_t nvme_keyring_id(void)
+{
+ return 0;
+}
+static inline struct key *nvme_tls_key_lookup(key_serial_t key_id)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
+#endif /* !CONFIG_NVME_KEYRING */
+#endif /* _NVME_KEYRING_H */
diff --git a/include/linux/nvme-rdma.h b/include/linux/nvme-rdma.h
index 3ec8e50efa16..97c5f00b9aa3 100644
--- a/include/linux/nvme-rdma.h
+++ b/include/linux/nvme-rdma.h
@@ -6,6 +6,12 @@
#ifndef _LINUX_NVME_RDMA_H
#define _LINUX_NVME_RDMA_H
+#define NVME_RDMA_IP_PORT 4420
+
+#define NVME_RDMA_MAX_QUEUE_SIZE 256
+#define NVME_RDMA_MAX_METADATA_QUEUE_SIZE 128
+#define NVME_RDMA_DEFAULT_QUEUE_SIZE 128
+
enum nvme_rdma_cm_fmt {
NVME_RDMA_CM_FMT_1_0 = 0x0,
};
@@ -19,6 +25,7 @@ enum nvme_rdma_cm_status {
NVME_RDMA_CM_NO_RSC = 0x06,
NVME_RDMA_CM_INVALID_IRD = 0x07,
NVME_RDMA_CM_INVALID_ORD = 0x08,
+ NVME_RDMA_CM_INVALID_CNTLID = 0x09,
};
static inline const char *nvme_rdma_cm_msg(enum nvme_rdma_cm_status status)
@@ -40,6 +47,8 @@ static inline const char *nvme_rdma_cm_msg(enum nvme_rdma_cm_status status)
return "invalid IRD";
case NVME_RDMA_CM_INVALID_ORD:
return "Invalid ORD";
+ case NVME_RDMA_CM_INVALID_CNTLID:
+ return "invalid controller ID";
default:
return "unrecognized reason";
}
@@ -58,7 +67,8 @@ struct nvme_rdma_cm_req {
__le16 qid;
__le16 hrqsize;
__le16 hsqsize;
- u8 rsvd[24];
+ __le16 cntlid;
+ u8 rsvd[22];
};
/**
diff --git a/include/linux/nvme-tcp.h b/include/linux/nvme-tcp.h
index 959e0bd9a913..e435250fcb4d 100644
--- a/include/linux/nvme-tcp.h
+++ b/include/linux/nvme-tcp.h
@@ -12,11 +12,20 @@
#define NVME_TCP_DISC_PORT 8009
#define NVME_TCP_ADMIN_CCSZ SZ_8K
#define NVME_TCP_DIGEST_LENGTH 4
+#define NVME_TCP_MIN_MAXH2CDATA 4096
+#define NVME_TCP_MIN_C2HTERM_PLEN 24
+#define NVME_TCP_MAX_C2HTERM_PLEN 152
enum nvme_tcp_pfv {
NVME_TCP_PFV_1_0 = 0x0,
};
+enum nvme_tcp_tls_cipher {
+ NVME_TCP_TLS_CIPHER_INVALID = 0,
+ NVME_TCP_TLS_CIPHER_SHA256 = 1,
+ NVME_TCP_TLS_CIPHER_SHA384 = 2,
+};
+
enum nvme_tcp_fatal_error_status {
NVME_TCP_FES_INVALID_PDU_HDR = 0x01,
NVME_TCP_FES_PDU_SEQ_ERR = 0x02,
@@ -114,8 +123,9 @@ struct nvme_tcp_icresp_pdu {
struct nvme_tcp_term_pdu {
struct nvme_tcp_hdr hdr;
__le16 fes;
- __le32 fei;
- __u8 rsvd[8];
+ __le16 feil;
+ __le16 feiu;
+ __u8 rsvd[10];
};
/**
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index edcbd60b88b9..655d194f8e72 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -7,6 +7,7 @@
#ifndef _LINUX_NVME_H
#define _LINUX_NVME_H
+#include <linux/bits.h>
#include <linux/types.h>
#include <linux/uuid.h>
@@ -22,13 +23,32 @@
#define NVME_DISC_SUBSYS_NAME "nqn.2014-08.org.nvmexpress.discovery"
-#define NVME_RDMA_IP_PORT 4420
-
#define NVME_NSID_ALL 0xffffffff
+/* Special NSSR value, 'NVMe' */
+#define NVME_SUBSYS_RESET 0x4E564D65
+
enum nvme_subsys_type {
- NVME_NQN_DISC = 1, /* Discovery type target subsystem */
- NVME_NQN_NVME = 2, /* NVME type target subsystem */
+ /* Referral to another discovery type target subsystem */
+ NVME_NQN_DISC = 1,
+
+ /* NVME type target subsystem */
+ NVME_NQN_NVME = 2,
+
+ /* Current discovery type target subsystem */
+ NVME_NQN_CURR = 3,
+};
+
+enum nvme_ctrl_type {
+ NVME_CTRL_IO = 1, /* I/O controller */
+ NVME_CTRL_DISC = 2, /* Discovery controller */
+ NVME_CTRL_ADMIN = 3, /* Administrative controller */
+};
+
+enum nvme_dctype {
+ NVME_DCTYPE_NOT_REPORTED = 0,
+ NVME_DCTYPE_DDC = 1, /* Direct Discovery Controller */
+ NVME_DCTYPE_CDC = 2, /* Central Discovery Controller */
};
/* Address Family codes for Discovery Log Page entry ADRFAM field */
@@ -44,6 +64,7 @@ enum {
/* Transport Type codes for Discovery Log Page entry TRTYPE field */
enum {
+ NVMF_TRTYPE_PCI = 0, /* PCI */
NVMF_TRTYPE_RDMA = 1, /* RDMA */
NVMF_TRTYPE_FC = 2, /* Fibre Channel */
NVMF_TRTYPE_TCP = 3, /* TCP/IP */
@@ -68,10 +89,11 @@ enum {
enum {
NVMF_RDMA_QPTYPE_CONNECTED = 1, /* Reliable Connected */
NVMF_RDMA_QPTYPE_DATAGRAM = 2, /* Reliable Datagram */
+ NVMF_RDMA_QPTYPE_INVALID = 0xff,
};
-/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
- * RDMA_QPTYPE field
+/* RDMA Provider Type codes for Discovery Log Page entry TSAS
+ * RDMA_PRTYPE field
*/
enum {
NVMF_RDMA_PRTYPE_NOT_SPECIFIED = 1, /* No Provider Specified */
@@ -88,6 +110,14 @@ enum {
NVMF_RDMA_CMS_RDMA_CM = 1, /* Sockets based endpoint addressing */
};
+/* TSAS SECTYPE for TCP transport */
+enum {
+ NVMF_TCP_SECTYPE_NONE = 0, /* No Security */
+ NVMF_TCP_SECTYPE_TLS12 = 1, /* TLSv1.2, NVMe-oF 1.1 and NVMe-TCP 3.6.1.1 */
+ NVMF_TCP_SECTYPE_TLS13 = 2, /* TLSv1.3, NVMe-oF 1.1 and NVMe-TCP 3.6.1.1 */
+ NVMF_TCP_SECTYPE_INVALID = 0xff,
+};
+
#define NVME_AQ_DEPTH 32
#define NVME_NR_AEN_COMMANDS 1
#define NVME_AQ_BLK_MQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS)
@@ -119,6 +149,7 @@ enum {
NVME_REG_CMBMSC = 0x0050, /* Controller Memory Buffer Memory
* Space Control
*/
+ NVME_REG_CRTO = 0x0068, /* Controller Ready Timeouts */
NVME_REG_PMRCAP = 0x0e00, /* Persistent Memory Capabilities */
NVME_REG_PMRCTL = 0x0e04, /* Persistent Memory Region Control */
NVME_REG_PMRSTS = 0x0e08, /* Persistent Memory Region Status */
@@ -143,6 +174,9 @@ enum {
#define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7)
#define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff)
+#define NVME_CRTO_CRIMT(crto) ((crto) >> 16)
+#define NVME_CRTO_CRWMT(crto) ((crto) & 0xffff)
+
enum {
NVME_CMBSZ_SQS = 1 << 0,
NVME_CMBSZ_CQS = 1 << 1,
@@ -165,29 +199,57 @@ enum {
#define NVME_NVM_IOSQES 6
#define NVME_NVM_IOCQES 4
+/*
+ * Controller Configuration (CC) register (Offset 14h)
+ */
enum {
+ /* Enable (EN): bit 0 */
NVME_CC_ENABLE = 1 << 0,
NVME_CC_EN_SHIFT = 0,
+
+ /* Bits 03:01 are reserved (NVMe Base Specification rev 2.1) */
+
+ /* I/O Command Set Selected (CSS): bits 06:04 */
NVME_CC_CSS_SHIFT = 4,
- NVME_CC_MPS_SHIFT = 7,
- NVME_CC_AMS_SHIFT = 11,
- NVME_CC_SHN_SHIFT = 14,
- NVME_CC_IOSQES_SHIFT = 16,
- NVME_CC_IOCQES_SHIFT = 20,
+ NVME_CC_CSS_MASK = 7 << NVME_CC_CSS_SHIFT,
NVME_CC_CSS_NVM = 0 << NVME_CC_CSS_SHIFT,
NVME_CC_CSS_CSI = 6 << NVME_CC_CSS_SHIFT,
- NVME_CC_CSS_MASK = 7 << NVME_CC_CSS_SHIFT,
+
+ /* Memory Page Size (MPS): bits 10:07 */
+ NVME_CC_MPS_SHIFT = 7,
+ NVME_CC_MPS_MASK = 0xf << NVME_CC_MPS_SHIFT,
+
+ /* Arbitration Mechanism Selected (AMS): bits 13:11 */
+ NVME_CC_AMS_SHIFT = 11,
+ NVME_CC_AMS_MASK = 7 << NVME_CC_AMS_SHIFT,
NVME_CC_AMS_RR = 0 << NVME_CC_AMS_SHIFT,
NVME_CC_AMS_WRRU = 1 << NVME_CC_AMS_SHIFT,
NVME_CC_AMS_VS = 7 << NVME_CC_AMS_SHIFT,
+
+ /* Shutdown Notification (SHN): bits 15:14 */
+ NVME_CC_SHN_SHIFT = 14,
+ NVME_CC_SHN_MASK = 3 << NVME_CC_SHN_SHIFT,
NVME_CC_SHN_NONE = 0 << NVME_CC_SHN_SHIFT,
NVME_CC_SHN_NORMAL = 1 << NVME_CC_SHN_SHIFT,
NVME_CC_SHN_ABRUPT = 2 << NVME_CC_SHN_SHIFT,
- NVME_CC_SHN_MASK = 3 << NVME_CC_SHN_SHIFT,
+
+ /* I/O Submission Queue Entry Size (IOSQES): bits 19:16 */
+ NVME_CC_IOSQES_SHIFT = 16,
+ NVME_CC_IOSQES_MASK = 0xf << NVME_CC_IOSQES_SHIFT,
NVME_CC_IOSQES = NVME_NVM_IOSQES << NVME_CC_IOSQES_SHIFT,
+
+ /* I/O Completion Queue Entry Size (IOCQES): bits 23:20 */
+ NVME_CC_IOCQES_SHIFT = 20,
+ NVME_CC_IOCQES_MASK = 0xf << NVME_CC_IOCQES_SHIFT,
NVME_CC_IOCQES = NVME_NVM_IOCQES << NVME_CC_IOCQES_SHIFT,
- NVME_CAP_CSS_NVM = 1 << 0,
- NVME_CAP_CSS_CSI = 1 << 6,
+
+ /* Controller Ready Independent of Media Enable (CRIME): bit 24 */
+ NVME_CC_CRIME = 1 << 24,
+
+ /* Bits 25:31 are reserved (NVMe Base Specification rev 2.1) */
+};
+
+enum {
NVME_CSTS_RDY = 1 << 0,
NVME_CSTS_CFS = 1 << 1,
NVME_CSTS_NSSRO = 1 << 4,
@@ -196,10 +258,23 @@ enum {
NVME_CSTS_SHST_OCCUR = 1 << 2,
NVME_CSTS_SHST_CMPLT = 2 << 2,
NVME_CSTS_SHST_MASK = 3 << 2,
+};
+
+enum {
NVME_CMBMSC_CRE = 1 << 0,
NVME_CMBMSC_CMSE = 1 << 1,
};
+enum {
+ NVME_CAP_CSS_NVM = 1 << 0,
+ NVME_CAP_CSS_CSI = 1 << 6,
+};
+
+enum {
+ NVME_CAP_CRMS_CRWMS = 1ULL << 59,
+ NVME_CAP_CRMS_CRIMS = 1ULL << 60,
+};
+
struct nvme_id_power_state {
__le16 max_power; /* centiwatts */
__u8 rsvd2;
@@ -226,6 +301,9 @@ enum {
enum nvme_ctrl_attr {
NVME_CTRL_ATTR_HID_128_BIT = (1 << 0),
NVME_CTRL_ATTR_TBKAS = (1 << 6),
+ NVME_CTRL_ATTR_ELBAS = (1 << 15),
+ NVME_CTRL_ATTR_RHII = (1 << 18),
+ NVME_CTRL_ATTR_FDPS = (1 << 19),
};
struct nvme_id_ctrl {
@@ -244,7 +322,9 @@ struct nvme_id_ctrl {
__le32 rtd3e;
__le32 oaes;
__le32 ctratt;
- __u8 rsvd100[28];
+ __u8 rsvd100[11];
+ __u8 cntrltype;
+ __u8 fguid[16];
__le16 crdt1;
__le16 crdt2;
__le16 crdt3;
@@ -276,7 +356,8 @@ struct nvme_id_ctrl {
__le32 sanicap;
__le32 hmminds;
__le16 hmmaxd;
- __u8 rsvd338[4];
+ __le16 nvmsetidmax;
+ __le16 endgidmax;
__u8 anatt;
__u8 anacap;
__le32 anagrpmax;
@@ -306,12 +387,15 @@ struct nvme_id_ctrl {
__le16 icdoff;
__u8 ctrattr;
__u8 msdbd;
- __u8 rsvd1804[244];
+ __u8 rsvd1804[2];
+ __u8 dctype;
+ __u8 rsvd1807[241];
struct nvme_id_power_state psd[32];
__u8 vs[1024];
};
enum {
+ NVME_CTRL_CMIC_MULTI_PORT = 1 << 0,
NVME_CTRL_CMIC_MULTI_CTRL = 1 << 1,
NVME_CTRL_CMIC_ANA = 1 << 3,
NVME_CTRL_ONCS_COMPARE = 1 << 0,
@@ -322,6 +406,7 @@ enum {
NVME_CTRL_ONCS_TIMESTAMP = 1 << 6,
NVME_CTRL_VWC_PRESENT = 1 << 0,
NVME_CTRL_OACS_SEC_SUPP = 1 << 0,
+ NVME_CTRL_OACS_NS_MNGT_SUPP = 1 << 3,
NVME_CTRL_OACS_DIRECTIVES = 1 << 5,
NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8,
NVME_CTRL_LPA_CMD_EFFECTS_LOG = 1 << 1,
@@ -333,6 +418,11 @@ enum {
NVME_CTRL_CTRATT_PREDICTABLE_LAT = 1 << 5,
NVME_CTRL_CTRATT_NAMESPACE_GRANULARITY = 1 << 7,
NVME_CTRL_CTRATT_UUID_LIST = 1 << 9,
+ NVME_CTRL_SGLS_BYTE_ALIGNED = 1,
+ NVME_CTRL_SGLS_DWORD_ALIGNED = 2,
+ NVME_CTRL_SGLS_KSDBDS = 1 << 2,
+ NVME_CTRL_SGLS_MSDS = 1 << 19,
+ NVME_CTRL_SGLS_SAOS = 1 << 20,
};
struct nvme_lbaf {
@@ -376,11 +466,25 @@ struct nvme_id_ns {
__le16 endgid;
__u8 nguid[16];
__u8 eui64[8];
- struct nvme_lbaf lbaf[16];
- __u8 rsvd192[192];
+ struct nvme_lbaf lbaf[64];
__u8 vs[3712];
};
+/* I/O Command Set Independent Identify Namespace Data Structure */
+struct nvme_id_ns_cs_indep {
+ __u8 nsfeat;
+ __u8 nmic;
+ __u8 rescap;
+ __u8 fpi;
+ __le32 anagrpid;
+ __u8 nsattr;
+ __u8 rsvd9;
+ __le16 nvmsetid;
+ __le16 endgid;
+ __u8 nstat;
+ __u8 rsvd15[4081];
+};
+
struct nvme_zns_lbafe {
__le64 zsze;
__u8 zdes;
@@ -395,8 +499,7 @@ struct nvme_id_ns_zns {
__le32 rrl;
__le32 frl;
__u8 rsvd20[2796];
- struct nvme_zns_lbafe lbafe[16];
- __u8 rsvd3072[768];
+ struct nvme_zns_lbafe lbafe[64];
__u8 vs[256];
};
@@ -405,6 +508,38 @@ struct nvme_id_ctrl_zns {
__u8 rsvd1[4095];
};
+struct nvme_id_ns_nvm {
+ __le64 lbstm;
+ __u8 pic;
+ __u8 rsvd9[3];
+ __le32 elbaf[64];
+ __u8 rsvd268[3828];
+};
+
+enum {
+ NVME_ID_NS_NVM_STS_MASK = 0x7f,
+ NVME_ID_NS_NVM_GUARD_SHIFT = 7,
+ NVME_ID_NS_NVM_GUARD_MASK = 0x3,
+ NVME_ID_NS_NVM_QPIF_SHIFT = 9,
+ NVME_ID_NS_NVM_QPIF_MASK = 0xf,
+ NVME_ID_NS_NVM_QPIFS = 1 << 3,
+};
+
+static inline __u8 nvme_elbaf_sts(__u32 elbaf)
+{
+ return elbaf & NVME_ID_NS_NVM_STS_MASK;
+}
+
+static inline __u8 nvme_elbaf_guard_type(__u32 elbaf)
+{
+ return (elbaf >> NVME_ID_NS_NVM_GUARD_SHIFT) & NVME_ID_NS_NVM_GUARD_MASK;
+}
+
+static inline __u8 nvme_elbaf_qualified_guard_type(__u32 elbaf)
+{
+ return (elbaf >> NVME_ID_NS_NVM_QPIF_SHIFT) & NVME_ID_NS_NVM_QPIF_MASK;
+}
+
struct nvme_id_ctrl_nvm {
__u8 vsl;
__u8 wzsl;
@@ -422,6 +557,8 @@ enum {
NVME_ID_CNS_NS_DESC_LIST = 0x03,
NVME_ID_CNS_CS_NS = 0x05,
NVME_ID_CNS_CS_CTRL = 0x06,
+ NVME_ID_CNS_NS_ACTIVE_LIST_CS = 0x07,
+ NVME_ID_CNS_NS_CS_INDEP = 0x08,
NVME_ID_CNS_NS_PRESENT_LIST = 0x10,
NVME_ID_CNS_NS_PRESENT = 0x11,
NVME_ID_CNS_CTRL_NS_LIST = 0x12,
@@ -429,6 +566,7 @@ enum {
NVME_ID_CNS_SCNDRY_CTRL_LIST = 0x15,
NVME_ID_CNS_NS_GRANULARITY = 0x16,
NVME_ID_CNS_UUID_LIST = 0x17,
+ NVME_ID_CNS_ENDGRP_LIST = 0x19,
};
enum {
@@ -455,8 +593,12 @@ enum {
NVME_NS_FEAT_IO_OPT = 1 << 4,
NVME_NS_ATTR_RO = 1 << 0,
NVME_NS_FLBAS_LBA_MASK = 0xf,
+ NVME_NS_FLBAS_LBA_UMASK = 0x60,
+ NVME_NS_FLBAS_LBA_SHIFT = 1,
NVME_NS_FLBAS_META_EXT = 0x10,
NVME_NS_NMIC_SHARED = 1 << 0,
+ NVME_NS_ROTATIONAL = 1 << 4,
+ NVME_NS_VWC_NOT_PRESENT = 1 << 5,
NVME_LBAF_RP_BEST = 0,
NVME_LBAF_RP_BETTER = 1,
NVME_LBAF_RP_GOOD = 2,
@@ -473,6 +615,23 @@ enum {
NVME_NS_DPS_PI_TYPE3 = 3,
};
+enum {
+ NVME_NSTAT_NRDY = 1 << 0,
+};
+
+enum {
+ NVME_NVM_NS_16B_GUARD = 0,
+ NVME_NVM_NS_32B_GUARD = 1,
+ NVME_NVM_NS_64B_GUARD = 2,
+ NVME_NVM_NS_QTYPE_GUARD = 3,
+};
+
+static inline __u8 nvme_lbaf_index(__u8 flbas)
+{
+ return (flbas & NVME_NS_FLBAS_LBA_MASK) |
+ ((flbas & NVME_NS_FLBAS_LBA_UMASK) >> NVME_NS_FLBAS_LBA_SHIFT);
+}
+
/* Identify Namespace Metadata Capabilities (MC): */
enum {
NVME_MC_EXTENDED_LBA = (1 << 0),
@@ -497,6 +656,78 @@ enum {
NVME_NIDT_CSI = 0x04,
};
+struct nvme_endurance_group_log {
+ __u8 egcw;
+ __u8 egfeat;
+ __u8 rsvd2;
+ __u8 avsp;
+ __u8 avspt;
+ __u8 pused;
+ __le16 did;
+ __u8 rsvd8[24];
+ __u8 ee[16];
+ __u8 dur[16];
+ __u8 duw[16];
+ __u8 muw[16];
+ __u8 hrc[16];
+ __u8 hwc[16];
+ __u8 mdie[16];
+ __u8 neile[16];
+ __u8 tegcap[16];
+ __u8 uegcap[16];
+ __u8 rsvd192[320];
+};
+
+struct nvme_rotational_media_log {
+ __le16 endgid;
+ __le16 numa;
+ __le16 nrs;
+ __u8 rsvd6[2];
+ __le32 spinc;
+ __le32 fspinc;
+ __le32 ldc;
+ __le32 fldc;
+ __u8 rsvd24[488];
+};
+
+struct nvme_fdp_config {
+ __u8 flags;
+#define FDPCFG_FDPE (1U << 0)
+ __u8 fdpcidx;
+ __le16 reserved;
+};
+
+struct nvme_fdp_ruh_desc {
+ __u8 ruht;
+ __u8 reserved[3];
+};
+
+struct nvme_fdp_config_desc {
+ __le16 dsze;
+ __u8 fdpa;
+ __u8 vss;
+ __le32 nrg;
+ __le16 nruh;
+ __le16 maxpids;
+ __le32 nns;
+ __le64 runs;
+ __le32 erutl;
+ __u8 rsvd28[36];
+ struct nvme_fdp_ruh_desc ruhs[];
+};
+
+struct nvme_fdp_config_log {
+ __le16 numfdpc;
+ __u8 ver;
+ __u8 rsvd3;
+ __le32 sze;
+ __u8 rsvd8[8];
+ /*
+ * This is followed by variable number of nvme_fdp_config_desc
+ * structures, but sparse doesn't like nested variable sized arrays.
+ */
+};
+
struct nvme_smart_log {
__u8 critical_warning;
__u8 temperature[2];
@@ -538,8 +769,10 @@ enum {
NVME_CMD_EFFECTS_NCC = 1 << 2,
NVME_CMD_EFFECTS_NIC = 1 << 3,
NVME_CMD_EFFECTS_CCC = 1 << 4,
- NVME_CMD_EFFECTS_CSE_MASK = 3 << 16,
+ NVME_CMD_EFFECTS_CSER_MASK = GENMASK(15, 14),
+ NVME_CMD_EFFECTS_CSE_MASK = GENMASK(18, 16),
NVME_CMD_EFFECTS_UUID_SEL = 1 << 19,
+ NVME_CMD_EFFECTS_SCOPE_MASK = GENMASK(31, 20),
};
struct nvme_effects_log {
@@ -612,6 +845,10 @@ enum {
};
enum {
+ NVME_AER_ERROR_PERSIST_INT_ERR = 0x03,
+};
+
+enum {
NVME_AER_NOTICE_NS_CHANGED = 0x00,
NVME_AER_NOTICE_FW_ACT_STARTING = 0x01,
NVME_AER_NOTICE_ANA = 0x03,
@@ -636,8 +873,8 @@ struct nvme_lba_range_type {
__u8 type;
__u8 attributes;
__u8 rsvd2[14];
- __u64 slba;
- __u64 nlb;
+ __le64 slba;
+ __le64 nlb;
__u8 guid[16];
__u8 rsvd48[16];
};
@@ -652,26 +889,55 @@ enum {
NVME_LBART_ATTRIB_HIDE = 1 << 1,
};
+enum nvme_pr_type {
+ NVME_PR_WRITE_EXCLUSIVE = 1,
+ NVME_PR_EXCLUSIVE_ACCESS = 2,
+ NVME_PR_WRITE_EXCLUSIVE_REG_ONLY = 3,
+ NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY = 4,
+ NVME_PR_WRITE_EXCLUSIVE_ALL_REGS = 5,
+ NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS = 6,
+};
+
+enum nvme_eds {
+ NVME_EXTENDED_DATA_STRUCT = 0x1,
+};
+
+struct nvme_registered_ctrl {
+ __le16 cntlid;
+ __u8 rcsts;
+ __u8 rsvd3[5];
+ __le64 hostid;
+ __le64 rkey;
+};
+
struct nvme_reservation_status {
__le32 gen;
__u8 rtype;
__u8 regctl[2];
__u8 resv5[2];
__u8 ptpls;
- __u8 resv10[13];
- struct {
- __le16 cntlid;
- __u8 rcsts;
- __u8 resv3[5];
- __le64 hostid;
- __le64 rkey;
- } regctl_ds[];
+ __u8 resv10[14];
+ struct nvme_registered_ctrl regctl_ds[];
};
-enum nvme_async_event_type {
- NVME_AER_TYPE_ERROR = 0,
- NVME_AER_TYPE_SMART = 1,
- NVME_AER_TYPE_NOTICE = 2,
+struct nvme_registered_ctrl_ext {
+ __le16 cntlid;
+ __u8 rcsts;
+ __u8 rsvd3[5];
+ __le64 rkey;
+ __u8 hostid[16];
+ __u8 rsvd32[32];
+};
+
+struct nvme_reservation_status_ext {
+ __le32 gen;
+ __u8 rtype;
+ __u8 regctl[2];
+ __u8 resv5[2];
+ __u8 ptpls;
+ __u8 resv10[14];
+ __u8 rsvd24[40];
+ struct nvme_registered_ctrl_ext regctl_eds[];
};
/* I/O commands */
@@ -688,10 +954,12 @@ enum nvme_opcode {
nvme_cmd_resv_register = 0x0d,
nvme_cmd_resv_report = 0x0e,
nvme_cmd_resv_acquire = 0x11,
+ nvme_cmd_io_mgmt_recv = 0x12,
nvme_cmd_resv_release = 0x15,
nvme_cmd_zone_mgmt_send = 0x79,
nvme_cmd_zone_mgmt_recv = 0x7a,
nvme_cmd_zone_append = 0x7d,
+ nvme_cmd_vendor_start = 0x80,
};
#define nvme_opcode_name(opcode) { opcode, #opcode }
@@ -704,9 +972,11 @@ enum nvme_opcode {
nvme_opcode_name(nvme_cmd_compare), \
nvme_opcode_name(nvme_cmd_write_zeroes), \
nvme_opcode_name(nvme_cmd_dsm), \
+ nvme_opcode_name(nvme_cmd_verify), \
nvme_opcode_name(nvme_cmd_resv_register), \
nvme_opcode_name(nvme_cmd_resv_report), \
nvme_opcode_name(nvme_cmd_resv_acquire), \
+ nvme_opcode_name(nvme_cmd_io_mgmt_recv), \
nvme_opcode_name(nvme_cmd_resv_release), \
nvme_opcode_name(nvme_cmd_zone_mgmt_send), \
nvme_opcode_name(nvme_cmd_zone_mgmt_recv), \
@@ -806,12 +1076,14 @@ struct nvme_common_command {
__le32 cdw2[2];
__le64 metadata;
union nvme_data_ptr dptr;
+ struct_group(cdws,
__le32 cdw10;
__le32 cdw11;
__le32 cdw12;
__le32 cdw13;
__le32 cdw14;
__le32 cdw15;
+ );
};
struct nvme_rw_command {
@@ -819,7 +1091,8 @@ struct nvme_rw_command {
__u8 flags;
__u16 command_id;
__le32 nsid;
- __u64 rsvd2;
+ __le32 cdw2;
+ __le32 cdw3;
__le64 metadata;
union nvme_data_ptr dptr;
__le64 slba;
@@ -827,8 +1100,8 @@ struct nvme_rw_command {
__le16 control;
__le32 dsmgmt;
__le32 reftag;
- __le16 apptag;
- __le16 appmask;
+ __le16 lbat;
+ __le16 lbatm;
};
enum {
@@ -855,6 +1128,8 @@ enum {
NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12,
NVME_RW_PRINFO_PRACT = 1 << 13,
NVME_RW_DTYPE_STREAMS = 1 << 4,
+ NVME_RW_DTYPE_DPLCMT = 2 << 4,
+ NVME_WZ_DEAC = 1 << 9,
};
struct nvme_dsm_cmd {
@@ -896,8 +1171,8 @@ struct nvme_write_zeroes_cmd {
__le16 control;
__le32 dsmgmt;
__le32 reftag;
- __le16 apptag;
- __le16 appmask;
+ __le16 lbat;
+ __le16 lbatm;
};
enum nvme_zone_mgmt_action {
@@ -941,9 +1216,48 @@ struct nvme_zone_mgmt_recv_cmd {
__le32 cdw14[2];
};
+struct nvme_io_mgmt_recv_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 nsid;
+ __le64 rsvd2[2];
+ union nvme_data_ptr dptr;
+ __u8 mo;
+ __u8 rsvd11;
+ __u16 mos;
+ __le32 numd;
+ __le32 cdw12[4];
+};
+
+enum {
+ NVME_IO_MGMT_RECV_MO_RUHS = 1,
+};
+
+struct nvme_fdp_ruh_status_desc {
+ __le16 pid;
+ __le16 ruhid;
+ __le32 earutr;
+ __le64 ruamw;
+ __u8 reserved[16];
+};
+
+struct nvme_fdp_ruh_status {
+ __u8 rsvd0[14];
+ __le16 nruhsd;
+ struct nvme_fdp_ruh_status_desc ruhsd[];
+};
+
enum {
NVME_ZRA_ZONE_REPORT = 0,
NVME_ZRASF_ZONE_REPORT_ALL = 0,
+ NVME_ZRASF_ZONE_STATE_EMPTY = 0x01,
+ NVME_ZRASF_ZONE_STATE_IMP_OPEN = 0x02,
+ NVME_ZRASF_ZONE_STATE_EXP_OPEN = 0x03,
+ NVME_ZRASF_ZONE_STATE_CLOSED = 0x04,
+ NVME_ZRASF_ZONE_STATE_READONLY = 0x05,
+ NVME_ZRASF_ZONE_STATE_FULL = 0x06,
+ NVME_ZRASF_ZONE_STATE_OFFLINE = 0x07,
NVME_REPORT_ZONE_PARTIAL = 1,
};
@@ -966,11 +1280,14 @@ enum {
struct nvme_feat_host_behavior {
__u8 acre;
- __u8 resv1[511];
+ __u8 etdas;
+ __u8 lbafee;
+ __u8 resv1[509];
};
enum {
NVME_ENABLE_ACRE = 1,
+ NVME_ENABLE_LBAFEE = 1,
};
/* Admin commands */
@@ -1022,10 +1339,14 @@ enum nvme_admin_opcode {
nvme_admin_opcode_name(nvme_admin_ns_mgmt), \
nvme_admin_opcode_name(nvme_admin_activate_fw), \
nvme_admin_opcode_name(nvme_admin_download_fw), \
+ nvme_admin_opcode_name(nvme_admin_dev_self_test), \
nvme_admin_opcode_name(nvme_admin_ns_attach), \
nvme_admin_opcode_name(nvme_admin_keep_alive), \
nvme_admin_opcode_name(nvme_admin_directive_send), \
nvme_admin_opcode_name(nvme_admin_directive_recv), \
+ nvme_admin_opcode_name(nvme_admin_virtual_mgmt), \
+ nvme_admin_opcode_name(nvme_admin_nvme_mi_send), \
+ nvme_admin_opcode_name(nvme_admin_nvme_mi_recv), \
nvme_admin_opcode_name(nvme_admin_dbbuf), \
nvme_admin_opcode_name(nvme_admin_format_nvm), \
nvme_admin_opcode_name(nvme_admin_security_send), \
@@ -1062,6 +1383,7 @@ enum {
NVME_FEAT_PLM_WINDOW = 0x14,
NVME_FEAT_HOST_BEHAVIOR = 0x16,
NVME_FEAT_SANITIZE = 0x17,
+ NVME_FEAT_FDP = 0x1d,
NVME_FEAT_SW_PROGRESS = 0x80,
NVME_FEAT_HOST_ID = 0x81,
NVME_FEAT_RESV_MASK = 0x82,
@@ -1069,6 +1391,7 @@ enum {
NVME_FEAT_WRITE_PROTECT = 0x84,
NVME_FEAT_VENDOR_START = 0xC0,
NVME_FEAT_VENDOR_END = 0xFF,
+ NVME_LOG_SUPPORTED = 0x00,
NVME_LOG_ERROR = 0x01,
NVME_LOG_SMART = 0x02,
NVME_LOG_FW_SLOT = 0x03,
@@ -1079,6 +1402,9 @@ enum {
NVME_LOG_TELEMETRY_CTRL = 0x08,
NVME_LOG_ENDURANCE_GROUP = 0x09,
NVME_LOG_ANA = 0x0c,
+ NVME_LOG_FEATURES = 0x12,
+ NVME_LOG_RMI = 0x16,
+ NVME_LOG_FDP_CONFIGS = 0x20,
NVME_LOG_DISC = 0x70,
NVME_LOG_RESERVATION = 0x80,
NVME_FWACT_REPL = (0 << 3),
@@ -1086,6 +1412,24 @@ enum {
NVME_FWACT_ACTV = (2 << 3),
};
+struct nvme_supported_log {
+ __le32 lids[256];
+};
+
+enum {
+ NVME_LIDS_LSUPP = 1 << 0,
+};
+
+struct nvme_supported_features_log {
+ __le32 fis[256];
+};
+
+enum {
+ NVME_FIS_FSUPP = 1 << 0,
+ NVME_FIS_NSCPE = 1 << 20,
+ NVME_FIS_CSCPE = 1 << 21,
+};
+
/* NVMe Namespace Write Protect State */
enum {
NVME_NS_NO_WRITE_PROTECT = 0,
@@ -1106,7 +1450,8 @@ struct nvme_identify {
__u8 cns;
__u8 rsvd3;
__le16 ctrlid;
- __u8 rsvd11[3];
+ __le16 cnssid;
+ __u8 rsvd11;
__u8 csi;
__u32 rsvd12[4];
};
@@ -1214,7 +1559,7 @@ struct nvme_get_log_page_command {
__u8 lsp; /* upper 4 bits reserved */
__le16 numdl;
__le16 numdu;
- __u16 rsvd11;
+ __le16 lsi;
union {
struct {
__le32 lpol;
@@ -1256,6 +1601,8 @@ enum nvmf_capsule_command {
nvme_fabrics_type_property_set = 0x00,
nvme_fabrics_type_connect = 0x01,
nvme_fabrics_type_property_get = 0x04,
+ nvme_fabrics_type_auth_send = 0x05,
+ nvme_fabrics_type_auth_receive = 0x06,
};
#define nvme_fabrics_type_name(type) { type, #type }
@@ -1263,7 +1610,9 @@ enum nvmf_capsule_command {
__print_symbolic(type, \
nvme_fabrics_type_name(nvme_fabrics_type_property_set), \
nvme_fabrics_type_name(nvme_fabrics_type_connect), \
- nvme_fabrics_type_name(nvme_fabrics_type_property_get))
+ nvme_fabrics_type_name(nvme_fabrics_type_property_get), \
+ nvme_fabrics_type_name(nvme_fabrics_type_auth_send), \
+ nvme_fabrics_type_name(nvme_fabrics_type_auth_receive))
/*
* If not fabrics command, fctype will be ignored.
@@ -1296,6 +1645,12 @@ struct nvmf_common_command {
#define MAX_DISC_LOGS 255
+/* Discovery log page entry flags (EFLAGS): */
+enum {
+ NVME_DISC_EFLAGS_EPCSD = (1 << 1),
+ NVME_DISC_EFLAGS_DUPRETINFO = (1 << 0),
+};
+
/* Discovery log page entry */
struct nvmf_disc_rsp_page_entry {
__u8 trtype;
@@ -1305,7 +1660,8 @@ struct nvmf_disc_rsp_page_entry {
__le16 portid;
__le16 cntlid;
__le16 asqsz;
- __u8 resv8[22];
+ __le16 eflags;
+ __u8 resv10[20];
char trsvcid[NVMF_TRSVCID_SIZE];
__u8 resv64[192];
char subnqn[NVMF_NQN_FIELD_LEN];
@@ -1320,6 +1676,9 @@ struct nvmf_disc_rsp_page_entry {
__u16 pkey;
__u8 resv10[246];
} rdma;
+ struct tcp {
+ __u8 sectype;
+ } tcp;
} tsas;
};
@@ -1352,6 +1711,11 @@ struct nvmf_connect_command {
__u8 resv4[12];
};
+enum {
+ NVME_CONNECT_AUTHREQ_ASCR = (1U << 18),
+ NVME_CONNECT_AUTHREQ_ATR = (1U << 17),
+};
+
struct nvmf_connect_data {
uuid_t hostid;
__le16 cntlid;
@@ -1386,6 +1750,207 @@ struct nvmf_property_get_command {
__u8 resv4[16];
};
+struct nvmf_auth_common_command {
+ __u8 opcode;
+ __u8 resv1;
+ __u16 command_id;
+ __u8 fctype;
+ __u8 resv2[19];
+ union nvme_data_ptr dptr;
+ __u8 resv3;
+ __u8 spsp0;
+ __u8 spsp1;
+ __u8 secp;
+ __le32 al_tl;
+ __u8 resv4[16];
+};
+
+struct nvmf_auth_send_command {
+ __u8 opcode;
+ __u8 resv1;
+ __u16 command_id;
+ __u8 fctype;
+ __u8 resv2[19];
+ union nvme_data_ptr dptr;
+ __u8 resv3;
+ __u8 spsp0;
+ __u8 spsp1;
+ __u8 secp;
+ __le32 tl;
+ __u8 resv4[16];
+};
+
+struct nvmf_auth_receive_command {
+ __u8 opcode;
+ __u8 resv1;
+ __u16 command_id;
+ __u8 fctype;
+ __u8 resv2[19];
+ union nvme_data_ptr dptr;
+ __u8 resv3;
+ __u8 spsp0;
+ __u8 spsp1;
+ __u8 secp;
+ __le32 al;
+ __u8 resv4[16];
+};
+
+/* Value for secp */
+enum {
+ NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER = 0xe9,
+};
+
+/* Defined value for auth_type */
+enum {
+ NVME_AUTH_COMMON_MESSAGES = 0x00,
+ NVME_AUTH_DHCHAP_MESSAGES = 0x01,
+};
+
+/* Defined messages for auth_id */
+enum {
+ NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE = 0x00,
+ NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE = 0x01,
+ NVME_AUTH_DHCHAP_MESSAGE_REPLY = 0x02,
+ NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1 = 0x03,
+ NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 = 0x04,
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE2 = 0xf0,
+ NVME_AUTH_DHCHAP_MESSAGE_FAILURE1 = 0xf1,
+};
+
+struct nvmf_auth_dhchap_protocol_descriptor {
+ __u8 authid;
+ __u8 rsvd;
+ __u8 halen;
+ __u8 dhlen;
+ __u8 idlist[60];
+};
+
+enum {
+ NVME_AUTH_DHCHAP_AUTH_ID = 0x01,
+};
+
+/* Defined hash functions for DH-HMAC-CHAP authentication */
+enum {
+ NVME_AUTH_HASH_SHA256 = 0x01,
+ NVME_AUTH_HASH_SHA384 = 0x02,
+ NVME_AUTH_HASH_SHA512 = 0x03,
+ NVME_AUTH_HASH_INVALID = 0xff,
+};
+
+/* Defined Diffie-Hellman group identifiers for DH-HMAC-CHAP authentication */
+enum {
+ NVME_AUTH_DHGROUP_NULL = 0x00,
+ NVME_AUTH_DHGROUP_2048 = 0x01,
+ NVME_AUTH_DHGROUP_3072 = 0x02,
+ NVME_AUTH_DHGROUP_4096 = 0x03,
+ NVME_AUTH_DHGROUP_6144 = 0x04,
+ NVME_AUTH_DHGROUP_8192 = 0x05,
+ NVME_AUTH_DHGROUP_INVALID = 0xff,
+};
+
+enum {
+ NVME_AUTH_SECP_NOSC = 0x00,
+ NVME_AUTH_SECP_SC = 0x01,
+ NVME_AUTH_SECP_NEWTLSPSK = 0x02,
+ NVME_AUTH_SECP_REPLACETLSPSK = 0x03,
+};
+
+union nvmf_auth_protocol {
+ struct nvmf_auth_dhchap_protocol_descriptor dhchap;
+};
+
+struct nvmf_auth_dhchap_negotiate_data {
+ __u8 auth_type;
+ __u8 auth_id;
+ __le16 rsvd;
+ __le16 t_id;
+ __u8 sc_c;
+ __u8 napd;
+ union nvmf_auth_protocol auth_protocol[];
+};
+
+struct nvmf_auth_dhchap_challenge_data {
+ __u8 auth_type;
+ __u8 auth_id;
+ __u16 rsvd1;
+ __le16 t_id;
+ __u8 hl;
+ __u8 rsvd2;
+ __u8 hashid;
+ __u8 dhgid;
+ __le16 dhvlen;
+ __le32 seqnum;
+ /* 'hl' bytes of challenge value */
+ __u8 cval[];
+ /* followed by 'dhvlen' bytes of DH value */
+};
+
+struct nvmf_auth_dhchap_reply_data {
+ __u8 auth_type;
+ __u8 auth_id;
+ __le16 rsvd1;
+ __le16 t_id;
+ __u8 hl;
+ __u8 rsvd2;
+ __u8 cvalid;
+ __u8 rsvd3;
+ __le16 dhvlen;
+ __le32 seqnum;
+ /* 'hl' bytes of response data */
+ __u8 rval[];
+ /* followed by 'hl' bytes of Challenge value */
+ /* followed by 'dhvlen' bytes of DH value */
+};
+
+enum {
+ NVME_AUTH_DHCHAP_RESPONSE_VALID = (1 << 0),
+};
+
+struct nvmf_auth_dhchap_success1_data {
+ __u8 auth_type;
+ __u8 auth_id;
+ __le16 rsvd1;
+ __le16 t_id;
+ __u8 hl;
+ __u8 rsvd2;
+ __u8 rvalid;
+ __u8 rsvd3[7];
+ /* 'hl' bytes of response value */
+ __u8 rval[];
+};
+
+struct nvmf_auth_dhchap_success2_data {
+ __u8 auth_type;
+ __u8 auth_id;
+ __le16 rsvd1;
+ __le16 t_id;
+ __u8 rsvd2[10];
+};
+
+struct nvmf_auth_dhchap_failure_data {
+ __u8 auth_type;
+ __u8 auth_id;
+ __le16 rsvd1;
+ __le16 t_id;
+ __u8 rescode;
+ __u8 rescode_exp;
+};
+
+enum {
+ NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED = 0x01,
+};
+
+enum {
+ NVME_AUTH_DHCHAP_FAILURE_FAILED = 0x01,
+ NVME_AUTH_DHCHAP_FAILURE_NOT_USABLE = 0x02,
+ NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH = 0x03,
+ NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE = 0x04,
+ NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE = 0x05,
+ NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD = 0x06,
+ NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE = 0x07,
+};
+
+
struct nvme_dbbuf {
__u8 opcode;
__u8 flags;
@@ -1429,16 +1994,60 @@ struct nvme_command {
struct nvmf_connect_command connect;
struct nvmf_property_set_command prop_set;
struct nvmf_property_get_command prop_get;
+ struct nvmf_auth_common_command auth_common;
+ struct nvmf_auth_send_command auth_send;
+ struct nvmf_auth_receive_command auth_receive;
struct nvme_dbbuf dbbuf;
struct nvme_directive_cmd directive;
+ struct nvme_io_mgmt_recv_cmd imr;
};
};
-static inline bool nvme_is_fabrics(struct nvme_command *cmd)
+static inline bool nvme_is_fabrics(const struct nvme_command *cmd)
{
return cmd->common.opcode == nvme_fabrics_command;
}
+#ifdef CONFIG_NVME_VERBOSE_ERRORS
+const char *nvme_get_error_status_str(u16 status);
+const char *nvme_get_opcode_str(u8 opcode);
+const char *nvme_get_admin_opcode_str(u8 opcode);
+const char *nvme_get_fabrics_opcode_str(u8 opcode);
+#else /* CONFIG_NVME_VERBOSE_ERRORS */
+static inline const char *nvme_get_error_status_str(u16 status)
+{
+ return "I/O Error";
+}
+static inline const char *nvme_get_opcode_str(u8 opcode)
+{
+ return "I/O Cmd";
+}
+static inline const char *nvme_get_admin_opcode_str(u8 opcode)
+{
+ return "Admin Cmd";
+}
+
+static inline const char *nvme_get_fabrics_opcode_str(u8 opcode)
+{
+ return "Fabrics Cmd";
+}
+#endif /* CONFIG_NVME_VERBOSE_ERRORS */
+
+static inline const char *nvme_opcode_str(int qid, u8 opcode)
+{
+ return qid ? nvme_get_opcode_str(opcode) :
+ nvme_get_admin_opcode_str(opcode);
+}
+
+static inline const char *nvme_fabrics_opcode_str(
+ int qid, const struct nvme_command *cmd)
+{
+ if (nvme_is_fabrics(cmd))
+ return nvme_get_fabrics_opcode_str(cmd->fabrics.fctype);
+
+ return nvme_opcode_str(qid, cmd->common.opcode);
+}
+
struct nvme_error_slot {
__le64 error_count;
__le16 sqid;
@@ -1453,7 +2062,7 @@ struct nvme_error_slot {
__u8 resv2[24];
};
-static inline bool nvme_is_write(struct nvme_command *cmd)
+static inline bool nvme_is_write(const struct nvme_command *cmd)
{
/*
* What a mess...
@@ -1469,6 +2078,7 @@ enum {
/*
* Generic Command Status:
*/
+ NVME_SCT_GENERIC = 0x0,
NVME_SC_SUCCESS = 0x0,
NVME_SC_INVALID_OPCODE = 0x1,
NVME_SC_INVALID_FIELD = 0x2,
@@ -1504,6 +2114,8 @@ enum {
NVME_SC_NS_WRITE_PROTECTED = 0x20,
NVME_SC_CMD_INTERRUPTED = 0x21,
NVME_SC_TRANSIENT_TR_ERR = 0x22,
+ NVME_SC_ADMIN_COMMAND_MEDIA_NOT_READY = 0x24,
+ NVME_SC_INVALID_IO_CMD_SET = 0x2C,
NVME_SC_LBA_RANGE = 0x80,
NVME_SC_CAP_EXCEEDED = 0x81,
@@ -1514,6 +2126,7 @@ enum {
/*
* Command Specific Status:
*/
+ NVME_SCT_COMMAND_SPECIFIC = 0x100,
NVME_SC_CQ_INVALID = 0x100,
NVME_SC_QID_INVALID = 0x101,
NVME_SC_QUEUE_SIZE = 0x102,
@@ -1542,7 +2155,7 @@ enum {
NVME_SC_NS_NOT_ATTACHED = 0x11a,
NVME_SC_THIN_PROV_NOT_SUPP = 0x11b,
NVME_SC_CTRL_LIST_INVALID = 0x11c,
- NVME_SC_SELT_TEST_IN_PROGRESS = 0x11d,
+ NVME_SC_SELF_TEST_IN_PROGRESS = 0x11d,
NVME_SC_BP_WRITE_PROHIBITED = 0x11e,
NVME_SC_CTRL_ID_INVALID = 0x11f,
NVME_SC_SEC_CTRL_STATE_INVALID = 0x120,
@@ -1558,7 +2171,7 @@ enum {
NVME_SC_BAD_ATTRIBUTES = 0x180,
NVME_SC_INVALID_PI = 0x181,
NVME_SC_READ_ONLY = 0x182,
- NVME_SC_ONCS_NOT_SUPPORTED = 0x183,
+ NVME_SC_CMD_SIZE_LIM_EXCEEDED = 0x183,
/*
* I/O Command Set Specific - Fabrics commands:
@@ -1587,6 +2200,7 @@ enum {
/*
* Media and Data Integrity Errors:
*/
+ NVME_SCT_MEDIA_ERROR = 0x200,
NVME_SC_WRITE_FAULT = 0x280,
NVME_SC_READ_ERROR = 0x281,
NVME_SC_GUARD_CHECK = 0x282,
@@ -1599,16 +2213,26 @@ enum {
/*
* Path-related Errors:
*/
+ NVME_SCT_PATH = 0x300,
+ NVME_SC_INTERNAL_PATH_ERROR = 0x300,
NVME_SC_ANA_PERSISTENT_LOSS = 0x301,
NVME_SC_ANA_INACCESSIBLE = 0x302,
NVME_SC_ANA_TRANSITION = 0x303,
+ NVME_SC_CTRL_PATH_ERROR = 0x360,
NVME_SC_HOST_PATH_ERROR = 0x370,
NVME_SC_HOST_ABORTED_CMD = 0x371,
- NVME_SC_CRD = 0x1800,
- NVME_SC_DNR = 0x4000,
+ NVME_SC_MASK = 0x00ff, /* Status Code */
+ NVME_SCT_MASK = 0x0700, /* Status Code Type */
+ NVME_SCT_SC_MASK = NVME_SCT_MASK | NVME_SC_MASK,
+
+ NVME_STATUS_CRD = 0x1800, /* Command Retry Delayed */
+ NVME_STATUS_MORE = 0x2000,
+ NVME_STATUS_DNR = 0x4000, /* Do Not Retry */
};
+#define NVME_SCT(status) ((status) >> 8 & 7)
+
struct nvme_completion {
/*
* Used by Admin and Fabrics commands to return data:
@@ -1631,4 +2255,81 @@ struct nvme_completion {
#define NVME_MINOR(ver) (((ver) >> 8) & 0xff)
#define NVME_TERTIARY(ver) ((ver) & 0xff)
+enum {
+ NVME_AEN_RESV_LOG_PAGE_AVALIABLE = 0x00,
+};
+
+enum {
+ NVME_PR_LOG_EMPTY_LOG_PAGE = 0x00,
+ NVME_PR_LOG_REGISTRATION_PREEMPTED = 0x01,
+ NVME_PR_LOG_RESERVATION_RELEASED = 0x02,
+ NVME_PR_LOG_RESERVATOIN_PREEMPTED = 0x03,
+};
+
+enum {
+ NVME_PR_NOTIFY_BIT_REG_PREEMPTED = 1,
+ NVME_PR_NOTIFY_BIT_RESV_RELEASED = 2,
+ NVME_PR_NOTIFY_BIT_RESV_PREEMPTED = 3,
+};
+
+struct nvme_pr_log {
+ __le64 count;
+ __u8 type;
+ __u8 nr_pages;
+ __u8 rsvd1[2];
+ __le32 nsid;
+ __u8 rsvd2[48];
+};
+
+struct nvmet_pr_register_data {
+ __le64 crkey;
+ __le64 nrkey;
+};
+
+struct nvmet_pr_acquire_data {
+ __le64 crkey;
+ __le64 prkey;
+};
+
+struct nvmet_pr_release_data {
+ __le64 crkey;
+};
+
+enum nvme_pr_capabilities {
+ NVME_PR_SUPPORT_PTPL = 1,
+ NVME_PR_SUPPORT_WRITE_EXCLUSIVE = 1 << 1,
+ NVME_PR_SUPPORT_EXCLUSIVE_ACCESS = 1 << 2,
+ NVME_PR_SUPPORT_WRITE_EXCLUSIVE_REG_ONLY = 1 << 3,
+ NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_REG_ONLY = 1 << 4,
+ NVME_PR_SUPPORT_WRITE_EXCLUSIVE_ALL_REGS = 1 << 5,
+ NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_ALL_REGS = 1 << 6,
+ NVME_PR_SUPPORT_IEKEY_VER_1_3_DEF = 1 << 7,
+};
+
+enum nvme_pr_register_action {
+ NVME_PR_REGISTER_ACT_REG = 0,
+ NVME_PR_REGISTER_ACT_UNREG = 1,
+ NVME_PR_REGISTER_ACT_REPLACE = 1 << 1,
+};
+
+enum nvme_pr_acquire_action {
+ NVME_PR_ACQUIRE_ACT_ACQUIRE = 0,
+ NVME_PR_ACQUIRE_ACT_PREEMPT = 1,
+ NVME_PR_ACQUIRE_ACT_PREEMPT_AND_ABORT = 1 << 1,
+};
+
+enum nvme_pr_release_action {
+ NVME_PR_RELEASE_ACT_RELEASE = 0,
+ NVME_PR_RELEASE_ACT_CLEAR = 1,
+};
+
+enum nvme_pr_change_ptpl {
+ NVME_PR_CPTPL_NO_CHANGE = 0,
+ NVME_PR_CPTPL_RESV = 1 << 30,
+ NVME_PR_CPTPL_CLEARED = 2 << 30,
+ NVME_PR_CPTPL_PERSIST = 3 << 30,
+};
+
+#define NVME_PR_IGNORE_KEY (1 << 3)
+
#endif /* _LINUX_NVME_H */
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
index 923dada24eb4..34c0e58dfa26 100644
--- a/include/linux/nvmem-consumer.h
+++ b/include/linux/nvmem-consumer.h
@@ -18,14 +18,7 @@ struct device_node;
/* consumer cookie */
struct nvmem_cell;
struct nvmem_device;
-
-struct nvmem_cell_info {
- const char *name;
- unsigned int offset;
- unsigned int bytes;
- unsigned int bit_offset;
- unsigned int nbits;
-};
+struct nvmem_cell_info;
/**
* struct nvmem_cell_lookup - cell lookup entry
@@ -50,6 +43,8 @@ enum {
NVMEM_REMOVE,
NVMEM_CELL_ADD,
NVMEM_CELL_REMOVE,
+ NVMEM_LAYOUT_ADD,
+ NVMEM_LAYOUT_REMOVE,
};
#if IS_ENABLED(CONFIG_NVMEM)
@@ -86,6 +81,7 @@ int nvmem_device_cell_write(struct nvmem_device *nvmem,
struct nvmem_cell_info *info, void *buf);
const char *nvmem_dev_name(struct nvmem_device *nvmem);
+size_t nvmem_dev_size(struct nvmem_device *nvmem);
void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries,
size_t nentries);
@@ -132,6 +128,12 @@ static inline int nvmem_cell_write(struct nvmem_cell *cell,
return -EOPNOTSUPP;
}
+static inline int nvmem_cell_read_u8(struct device *dev,
+ const char *cell_id, u8 *val)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int nvmem_cell_read_u16(struct device *dev,
const char *cell_id, u16 *val)
{
@@ -150,6 +152,20 @@ static inline int nvmem_cell_read_u64(struct device *dev,
return -EOPNOTSUPP;
}
+static inline int nvmem_cell_read_variable_le_u32(struct device *dev,
+ const char *cell_id,
+ u32 *val)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int nvmem_cell_read_variable_le_u64(struct device *dev,
+ const char *cell_id,
+ u64 *val)
+{
+ return -EOPNOTSUPP;
+}
+
static inline struct nvmem_device *nvmem_device_get(struct device *dev,
const char *name)
{
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index e162b757b6d5..f3b13da78aac 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -9,22 +9,28 @@
#ifndef _LINUX_NVMEM_PROVIDER_H
#define _LINUX_NVMEM_PROVIDER_H
+#include <linux/device.h>
+#include <linux/device/driver.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/gpio/consumer.h>
struct nvmem_device;
-struct nvmem_cell_info;
typedef int (*nvmem_reg_read_t)(void *priv, unsigned int offset,
void *val, size_t bytes);
typedef int (*nvmem_reg_write_t)(void *priv, unsigned int offset,
void *val, size_t bytes);
+/* used for vendor specific post processing of cell data */
+typedef int (*nvmem_cell_post_process_t)(void *priv, const char *id, int index,
+ unsigned int offset, void *buf,
+ size_t bytes);
enum nvmem_type {
NVMEM_TYPE_UNKNOWN = 0,
NVMEM_TYPE_EEPROM,
NVMEM_TYPE_OTP,
NVMEM_TYPE_BATTERY_BACKED,
+ NVMEM_TYPE_FRAM,
};
#define NVMEM_DEVID_NONE (-1)
@@ -44,6 +50,31 @@ struct nvmem_keepout {
};
/**
+ * struct nvmem_cell_info - NVMEM cell description
+ * @name: Name.
+ * @offset: Offset within the NVMEM device.
+ * @raw_len: Length of raw data (without post processing).
+ * @bytes: Length of the cell.
+ * @bit_offset: Bit offset if cell is smaller than a byte.
+ * @nbits: Number of bits.
+ * @np: Optional device_node pointer.
+ * @read_post_process: Callback for optional post processing of cell data
+ * on reads.
+ * @priv: Opaque data passed to the read_post_process hook.
+ */
+struct nvmem_cell_info {
+ const char *name;
+ unsigned int offset;
+ size_t raw_len;
+ unsigned int bytes;
+ unsigned int bit_offset;
+ unsigned int nbits;
+ struct device_node *np;
+ nvmem_cell_post_process_t read_post_process;
+ void *priv;
+};
+
+/**
* struct nvmem_config - NVMEM device configuration
*
* @dev: Parent device.
@@ -52,23 +83,27 @@ struct nvmem_keepout {
* @owner: Pointer to exporter module. Used for refcounting.
* @cells: Optional array of pre-defined NVMEM cells.
* @ncells: Number of elements in cells.
+ * @add_legacy_fixed_of_cells: Read fixed NVMEM cells from old OF syntax.
+ * @fixup_dt_cell_info: Will be called before a cell is added. Can be
+ * used to modify the nvmem_cell_info.
* @keepout: Optional array of keepout ranges (sorted ascending by start).
* @nkeepout: Number of elements in the keepout array.
* @type: Type of the nvmem storage
* @read_only: Device is read-only.
* @root_only: Device is accessibly to root only.
- * @no_of_node: Device should not use the parent's of_node even if it's !NULL.
- * @reg_read: Callback to read data.
- * @reg_write: Callback to write data.
+ * @of_node: If given, this will be used instead of the parent's of_node.
+ * @reg_read: Callback to read data; return zero if successful.
+ * @reg_write: Callback to write data; return zero if successful.
* @size: Device size.
* @word_size: Minimum read/write access granularity.
* @stride: Minimum read/write access stride.
* @priv: User context passed to read/write callbacks.
- * @wp-gpio: Write protect pin
+ * @ignore_wp: Write Protect pin is managed by the provider.
+ * @layout: Fixed layout associated with this nvmem device.
*
* Note: A default "nvmem<id>" name will be assigned to the device if
* no name is specified in its configuration. In such case "<id>" is
- * generated with ida_simple_get() and provided id field is ignored.
+ * generated with ida_alloc() and provided id field is ignored.
*
* Note: Specifying name and setting id to -1 implies a unique device
* whose name is provided as-is (kept unaltered).
@@ -78,15 +113,19 @@ struct nvmem_config {
const char *name;
int id;
struct module *owner;
- struct gpio_desc *wp_gpio;
const struct nvmem_cell_info *cells;
int ncells;
+ bool add_legacy_fixed_of_cells;
+ void (*fixup_dt_cell_info)(struct nvmem_device *nvmem,
+ struct nvmem_cell_info *cell);
const struct nvmem_keepout *keepout;
unsigned int nkeepout;
enum nvmem_type type;
bool read_only;
bool root_only;
- bool no_of_node;
+ bool ignore_wp;
+ struct nvmem_layout *layout;
+ struct device_node *of_node;
nvmem_reg_read_t reg_read;
nvmem_reg_write_t reg_write;
int size;
@@ -99,22 +138,29 @@ struct nvmem_config {
};
/**
- * struct nvmem_cell_table - NVMEM cell definitions for given provider
+ * struct nvmem_layout - NVMEM layout definitions
*
- * @nvmem_name: Provider name.
- * @cells: Array of cell definitions.
- * @ncells: Number of cell definitions in the array.
- * @node: List node.
+ * @dev: Device-model layout device.
+ * @nvmem: The underlying NVMEM device
+ * @add_cells: Will be called if a nvmem device is found which
+ * has this layout. The function will add layout
+ * specific cells with nvmem_add_one_cell().
*
- * This structure together with related helper functions is provided for users
- * that don't can't access the nvmem provided structure but wish to register
- * cell definitions for it e.g. board files registering an EEPROM device.
+ * A nvmem device can hold a well defined structure which can just be
+ * evaluated during runtime. For example a TLV list, or a list of "name=val"
+ * pairs. A nvmem layout can parse the nvmem device and add appropriate
+ * cells.
*/
-struct nvmem_cell_table {
- const char *nvmem_name;
- const struct nvmem_cell_info *cells;
- size_t ncells;
- struct list_head node;
+struct nvmem_layout {
+ struct device dev;
+ struct nvmem_device *nvmem;
+ int (*add_cells)(struct nvmem_layout *layout);
+};
+
+struct nvmem_layout_driver {
+ struct device_driver driver;
+ int (*probe)(struct nvmem_layout *layout);
+ void (*remove)(struct nvmem_layout *layout);
};
#if IS_ENABLED(CONFIG_NVMEM)
@@ -125,10 +171,20 @@ void nvmem_unregister(struct nvmem_device *nvmem);
struct nvmem_device *devm_nvmem_register(struct device *dev,
const struct nvmem_config *cfg);
-int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem);
+int nvmem_add_one_cell(struct nvmem_device *nvmem,
+ const struct nvmem_cell_info *info);
+
+int nvmem_layout_register(struct nvmem_layout *layout);
+void nvmem_layout_unregister(struct nvmem_layout *layout);
-void nvmem_add_cell_table(struct nvmem_cell_table *table);
-void nvmem_del_cell_table(struct nvmem_cell_table *table);
+#define nvmem_layout_driver_register(drv) \
+ __nvmem_layout_driver_register(drv, THIS_MODULE)
+int __nvmem_layout_driver_register(struct nvmem_layout_driver *drv,
+ struct module *owner);
+void nvmem_layout_driver_unregister(struct nvmem_layout_driver *drv);
+#define module_nvmem_layout_driver(__nvmem_layout_driver) \
+ module_driver(__nvmem_layout_driver, nvmem_layout_driver_register, \
+ nvmem_layout_driver_unregister)
#else
@@ -145,14 +201,40 @@ devm_nvmem_register(struct device *dev, const struct nvmem_config *c)
return nvmem_register(c);
}
-static inline int
-devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
+static inline int nvmem_add_one_cell(struct nvmem_device *nvmem,
+ const struct nvmem_cell_info *info)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int nvmem_layout_register(struct nvmem_layout *layout)
{
return -EOPNOTSUPP;
}
-static inline void nvmem_add_cell_table(struct nvmem_cell_table *table) {}
-static inline void nvmem_del_cell_table(struct nvmem_cell_table *table) {}
+static inline void nvmem_layout_unregister(struct nvmem_layout *layout) {}
#endif /* CONFIG_NVMEM */
+
+#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
+
+/**
+ * of_nvmem_layout_get_container() - Get OF node of layout container
+ *
+ * @nvmem: nvmem device
+ *
+ * Return: a node pointer with refcount incremented or NULL if no
+ * container exists. Use of_node_put() on it when done.
+ */
+struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem);
+
+#else /* CONFIG_NVMEM && CONFIG_OF */
+
+static inline struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem)
+{
+ return NULL;
+}
+
+#endif /* CONFIG_NVMEM && CONFIG_OF */
+
#endif /* ifndef _LINUX_NVMEM_PROVIDER_H */
diff --git a/include/linux/oa_tc6.h b/include/linux/oa_tc6.h
new file mode 100644
index 000000000000..15f58e3c56c7
--- /dev/null
+++ b/include/linux/oa_tc6.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * OPEN Alliance 10BASE‑T1x MAC‑PHY Serial Interface framework
+ *
+ * Link: https://opensig.org/download/document/OPEN_Alliance_10BASET1x_MAC-PHY_Serial_Interface_V1.1.pdf
+ *
+ * Author: Parthiban Veerasooran <parthiban.veerasooran@microchip.com>
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/spi/spi.h>
+
+struct oa_tc6;
+
+struct oa_tc6 *oa_tc6_init(struct spi_device *spi, struct net_device *netdev);
+void oa_tc6_exit(struct oa_tc6 *tc6);
+int oa_tc6_write_register(struct oa_tc6 *tc6, u32 address, u32 value);
+int oa_tc6_write_registers(struct oa_tc6 *tc6, u32 address, u32 value[],
+ u8 length);
+int oa_tc6_read_register(struct oa_tc6 *tc6, u32 address, u32 *value);
+int oa_tc6_read_registers(struct oa_tc6 *tc6, u32 address, u32 value[],
+ u8 length);
+netdev_tx_t oa_tc6_start_xmit(struct oa_tc6 *tc6, struct sk_buff *skb);
+int oa_tc6_zero_align_receive_frame_enable(struct oa_tc6 *tc6);
diff --git a/include/linux/objagg.h b/include/linux/objagg.h
index 78021777df46..6df5b887dc54 100644
--- a/include/linux/objagg.h
+++ b/include/linux/objagg.h
@@ -8,7 +8,6 @@ struct objagg_ops {
size_t obj_size;
bool (*delta_check)(void *priv, const void *parent_obj,
const void *obj);
- int (*hints_obj_cmp)(const void *obj1, const void *obj2);
void * (*delta_create)(void *priv, void *parent_obj, void *obj);
void (*delta_destroy)(void *priv, void *delta_priv);
void * (*root_create)(void *priv, void *obj, unsigned int root_id);
diff --git a/include/linux/objpool.h b/include/linux/objpool.h
new file mode 100644
index 000000000000..b713a1fe7521
--- /dev/null
+++ b/include/linux/objpool.h
@@ -0,0 +1,277 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_OBJPOOL_H
+#define _LINUX_OBJPOOL_H
+
+#include <linux/types.h>
+#include <linux/refcount.h>
+#include <linux/atomic.h>
+#include <linux/cpumask.h>
+#include <linux/irqflags.h>
+#include <linux/smp.h>
+
+/*
+ * objpool: ring-array based lockless MPMC queue
+ *
+ * Copyright: wuqiang.matt@bytedance.com,mhiramat@kernel.org
+ *
+ * objpool is a scalable implementation of high performance queue for
+ * object allocation and reclamation, such as kretprobe instances.
+ *
+ * With leveraging percpu ring-array to mitigate hot spots of memory
+ * contention, it delivers near-linear scalability for high parallel
+ * scenarios. The objpool is best suited for the following cases:
+ * 1) Memory allocation or reclamation are prohibited or too expensive
+ * 2) Consumers are of different priorities, such as irqs and threads
+ *
+ * Limitations:
+ * 1) Maximum objects (capacity) is fixed after objpool creation
+ * 2) All pre-allocated objects are managed in percpu ring array,
+ * which consumes more memory than linked lists
+ */
+
+/**
+ * struct objpool_slot - percpu ring array of objpool
+ * @head: head sequence of the local ring array (to retrieve at)
+ * @tail: tail sequence of the local ring array (to append at)
+ * @last: the last sequence number marked as ready for retrieve
+ * @mask: bits mask for modulo capacity to compute array indexes
+ * @entries: object entries on this slot
+ *
+ * Represents a cpu-local array-based ring buffer, its size is specialized
+ * during initialization of object pool. The percpu objpool node is to be
+ * allocated from local memory for NUMA system, and to be kept compact in
+ * continuous memory: CPU assigned number of objects are stored just after
+ * the body of objpool_node.
+ *
+ * Real size of the ring array is far too smaller than the value range of
+ * head and tail, typed as uint32_t: [0, 2^32), so only lower bits (mask)
+ * of head and tail are used as the actual position in the ring array. In
+ * general the ring array is acting like a small sliding window, which is
+ * always moving forward in the loop of [0, 2^32).
+ */
+struct objpool_slot {
+ uint32_t head;
+ uint32_t tail;
+ uint32_t last;
+ uint32_t mask;
+ void *entries[];
+} __packed;
+
+struct objpool_head;
+
+/*
+ * caller-specified callback for object initial setup, it's only called
+ * once for each object (just after the memory allocation of the object)
+ */
+typedef int (*objpool_init_obj_cb)(void *obj, void *context);
+
+/* caller-specified cleanup callback for objpool destruction */
+typedef int (*objpool_fini_cb)(struct objpool_head *head, void *context);
+
+/**
+ * struct objpool_head - object pooling metadata
+ * @obj_size: object size, aligned to sizeof(void *)
+ * @nr_objs: total objs (to be pre-allocated with objpool)
+ * @nr_possible_cpus: cached value of num_possible_cpus()
+ * @capacity: max objs can be managed by one objpool_slot
+ * @gfp: gfp flags for kmalloc & vmalloc
+ * @ref: refcount of objpool
+ * @flags: flags for objpool management
+ * @cpu_slots: pointer to the array of objpool_slot
+ * @release: resource cleanup callback
+ * @context: caller-provided context
+ */
+struct objpool_head {
+ int obj_size;
+ int nr_objs;
+ int nr_possible_cpus;
+ int capacity;
+ gfp_t gfp;
+ refcount_t ref;
+ unsigned long flags;
+ struct objpool_slot **cpu_slots;
+ objpool_fini_cb release;
+ void *context;
+};
+
+#define OBJPOOL_NR_OBJECT_MAX (1UL << 24) /* maximum numbers of total objects */
+#define OBJPOOL_OBJECT_SIZE_MAX (1UL << 16) /* maximum size of an object */
+
+/**
+ * objpool_init() - initialize objpool and pre-allocated objects
+ * @pool: the object pool to be initialized, declared by caller
+ * @nr_objs: total objects to be pre-allocated by this object pool
+ * @object_size: size of an object (should be > 0)
+ * @gfp: flags for memory allocation (via kmalloc or vmalloc)
+ * @context: user context for object initialization callback
+ * @objinit: object initialization callback for extra setup
+ * @release: cleanup callback for extra cleanup task
+ *
+ * return value: 0 for success, otherwise error code
+ *
+ * All pre-allocated objects are to be zeroed after memory allocation.
+ * Caller could do extra initialization in objinit callback. objinit()
+ * will be called just after slot allocation and called only once for
+ * each object. After that the objpool won't touch any content of the
+ * objects. It's caller's duty to perform reinitialization after each
+ * pop (object allocation) or do clearance before each push (object
+ * reclamation).
+ */
+int objpool_init(struct objpool_head *pool, int nr_objs, int object_size,
+ gfp_t gfp, void *context, objpool_init_obj_cb objinit,
+ objpool_fini_cb release);
+
+/* try to retrieve object from slot */
+static inline void *__objpool_try_get_slot(struct objpool_head *pool, int cpu)
+{
+ struct objpool_slot *slot = pool->cpu_slots[cpu];
+ /* load head snapshot, other cpus may change it */
+ uint32_t head = smp_load_acquire(&slot->head);
+
+ while (head != READ_ONCE(slot->last)) {
+ void *obj;
+
+ /*
+ * data visibility of 'last' and 'head' could be out of
+ * order since memory updating of 'last' and 'head' are
+ * performed in push() and pop() independently
+ *
+ * before any retrieving attempts, pop() must guarantee
+ * 'last' is behind 'head', that is to say, there must
+ * be available objects in slot, which could be ensured
+ * by condition 'last != head && last - head <= nr_objs'
+ * that is equivalent to 'last - head - 1 < nr_objs' as
+ * 'last' and 'head' are both unsigned int32
+ */
+ if (READ_ONCE(slot->last) - head - 1 >= pool->nr_objs) {
+ head = READ_ONCE(slot->head);
+ continue;
+ }
+
+ /* obj must be retrieved before moving forward head */
+ obj = READ_ONCE(slot->entries[head & slot->mask]);
+
+ /* move head forward to mark it's consumption */
+ if (try_cmpxchg_release(&slot->head, &head, head + 1))
+ return obj;
+ }
+
+ return NULL;
+}
+
+/**
+ * objpool_pop() - allocate an object from objpool
+ * @pool: object pool
+ *
+ * return value: object ptr or NULL if failed
+ */
+static inline void *objpool_pop(struct objpool_head *pool)
+{
+ void *obj = NULL;
+ unsigned long flags;
+ int start, cpu;
+
+ /* disable local irq to avoid preemption & interruption */
+ raw_local_irq_save(flags);
+
+ start = raw_smp_processor_id();
+ for_each_possible_cpu_wrap(cpu, start) {
+ obj = __objpool_try_get_slot(pool, cpu);
+ if (obj)
+ break;
+ }
+ raw_local_irq_restore(flags);
+
+ return obj;
+}
+
+/* adding object to slot, abort if the slot was already full */
+static inline int
+__objpool_try_add_slot(void *obj, struct objpool_head *pool, int cpu)
+{
+ struct objpool_slot *slot = pool->cpu_slots[cpu];
+ uint32_t head, tail;
+
+ /* loading tail and head as a local snapshot, tail first */
+ tail = READ_ONCE(slot->tail);
+
+ do {
+ head = READ_ONCE(slot->head);
+ /* fault caught: something must be wrong */
+ WARN_ON_ONCE(tail - head > pool->nr_objs);
+ } while (!try_cmpxchg_acquire(&slot->tail, &tail, tail + 1));
+
+ /* now the tail position is reserved for the given obj */
+ WRITE_ONCE(slot->entries[tail & slot->mask], obj);
+ /* update sequence to make this obj available for pop() */
+ smp_store_release(&slot->last, tail + 1);
+
+ return 0;
+}
+
+/**
+ * objpool_push() - reclaim the object and return back to objpool
+ * @obj: object ptr to be pushed to objpool
+ * @pool: object pool
+ *
+ * return: 0 or error code (it fails only when user tries to push
+ * the same object multiple times or wrong "objects" into objpool)
+ */
+static inline int objpool_push(void *obj, struct objpool_head *pool)
+{
+ unsigned long flags;
+ int rc;
+
+ /* disable local irq to avoid preemption & interruption */
+ raw_local_irq_save(flags);
+ rc = __objpool_try_add_slot(obj, pool, raw_smp_processor_id());
+ raw_local_irq_restore(flags);
+
+ return rc;
+}
+
+
+/**
+ * objpool_drop() - discard the object and deref objpool
+ * @obj: object ptr to be discarded
+ * @pool: object pool
+ *
+ * return: 0 if objpool was released; -EAGAIN if there are still
+ * outstanding objects
+ *
+ * objpool_drop is normally for the release of outstanding objects
+ * after objpool cleanup (objpool_fini). Thinking of this example:
+ * kretprobe is unregistered and objpool_fini() is called to release
+ * all remained objects, but there are still objects being used by
+ * unfinished kretprobes (like blockable function: sys_accept). So
+ * only when the last outstanding object is dropped could the whole
+ * objpool be released along with the call of objpool_drop()
+ */
+int objpool_drop(void *obj, struct objpool_head *pool);
+
+/**
+ * objpool_free() - release objpool forcely (all objects to be freed)
+ * @pool: object pool to be released
+ */
+void objpool_free(struct objpool_head *pool);
+
+/**
+ * objpool_fini() - deref object pool (also releasing unused objects)
+ * @pool: object pool to be dereferenced
+ *
+ * objpool_fini() will try to release all remained free objects and
+ * then drop an extra reference of the objpool. If all objects are
+ * already returned to objpool (so called synchronous use cases),
+ * the objpool itself will be freed together. But if there are still
+ * outstanding objects (so called asynchronous use cases, such like
+ * blockable kretprobe), the objpool won't be released until all
+ * the outstanding objects are dropped, but the caller must assure
+ * there are no concurrent objpool_push() on the fly. Normally RCU
+ * is being required to make sure all ongoing objpool_push() must
+ * be finished before calling objpool_fini(), so does test_objpool,
+ * kretprobe or rethook
+ */
+void objpool_fini(struct objpool_head *pool);
+
+#endif /* _LINUX_OBJPOOL_H */
diff --git a/include/linux/objtool.h b/include/linux/objtool.h
index 7e72d975cb76..9a00e701454c 100644
--- a/include/linux/objtool.h
+++ b/include/linux/objtool.h
@@ -2,55 +2,23 @@
#ifndef _LINUX_OBJTOOL_H
#define _LINUX_OBJTOOL_H
-#ifndef __ASSEMBLY__
-
-#include <linux/types.h>
-
-/*
- * This struct is used by asm and inline asm code to manually annotate the
- * location of registers on the stack.
- */
-struct unwind_hint {
- u32 ip;
- s16 sp_offset;
- u8 sp_reg;
- u8 type;
- u8 end;
-};
-#endif
-
-/*
- * UNWIND_HINT_TYPE_CALL: Indicates that sp_reg+sp_offset resolves to PREV_SP
- * (the caller's SP right before it made the call). Used for all callable
- * functions, i.e. all C code and all callable asm functions.
- *
- * UNWIND_HINT_TYPE_REGS: Used in entry code to indicate that sp_reg+sp_offset
- * points to a fully populated pt_regs from a syscall, interrupt, or exception.
- *
- * UNWIND_HINT_TYPE_REGS_PARTIAL: Used in entry code to indicate that
- * sp_reg+sp_offset points to the iret return frame.
- *
- * UNWIND_HINT_FUNC: Generate the unwind metadata of a callable function.
- * Useful for code which doesn't have an ELF function annotation.
- */
-#define UNWIND_HINT_TYPE_CALL 0
-#define UNWIND_HINT_TYPE_REGS 1
-#define UNWIND_HINT_TYPE_REGS_PARTIAL 2
-#define UNWIND_HINT_TYPE_FUNC 3
+#include <linux/objtool_types.h>
+#include <linux/annotate.h>
-#ifdef CONFIG_STACK_VALIDATION
+#ifdef CONFIG_OBJTOOL
#ifndef __ASSEMBLY__
-#define UNWIND_HINT(sp_reg, sp_offset, type, end) \
+#define UNWIND_HINT(type, sp_reg, sp_offset, signal) \
"987: \n\t" \
".pushsection .discard.unwind_hints\n\t" \
+ ANNOTATE_DATA_SPECIAL "\n\t" \
/* struct unwind_hint */ \
".long 987b - .\n\t" \
".short " __stringify(sp_offset) "\n\t" \
".byte " __stringify(sp_reg) "\n\t" \
".byte " __stringify(type) "\n\t" \
- ".byte " __stringify(end) "\n\t" \
+ ".byte " __stringify(signal) "\n\t" \
".balign 4 \n\t" \
".popsection\n\t"
@@ -60,29 +28,38 @@ struct unwind_hint {
* It should only be used in special cases where you're 100% sure it won't
* affect the reliability of frame pointers and kernel stack traces.
*
- * For more information, see tools/objtool/Documentation/stack-validation.txt.
+ * For more information, see tools/objtool/Documentation/objtool.txt.
*/
#define STACK_FRAME_NON_STANDARD(func) \
static void __used __section(".discard.func_stack_frame_non_standard") \
*__func_stack_frame_non_standard_##func = func
-#else /* __ASSEMBLY__ */
-
/*
- * This macro indicates that the following intra-function call is valid.
- * Any non-annotated intra-function call will cause objtool to issue a warning.
+ * STACK_FRAME_NON_STANDARD_FP() is a frame-pointer-specific function ignore
+ * for the case where a function is intentionally missing frame pointer setup,
+ * but otherwise needs objtool/ORC coverage when frame pointers are disabled.
*/
-#define ANNOTATE_INTRA_FUNCTION_CALL \
- 999: \
- .pushsection .discard.intra_function_calls; \
- .long 999b; \
- .popsection;
+#ifdef CONFIG_FRAME_POINTER
+#define STACK_FRAME_NON_STANDARD_FP(func) STACK_FRAME_NON_STANDARD(func)
+#else
+#define STACK_FRAME_NON_STANDARD_FP(func)
+#endif
+
+#define ASM_REACHABLE \
+ "998:\n\t" \
+ ".pushsection .discard.reachable\n\t" \
+ ".long 998b\n\t" \
+ ".popsection\n\t"
+
+#define __ASM_BREF(label) label ## b
+
+#else /* __ASSEMBLY__ */
/*
* In asm, there are two kinds of code: normal C-type callable functions and
* the rest. The normal callable functions can be called by other code, and
* don't do anything unusual with the stack. Such normal callable functions
- * are annotated with the ENTRY/ENDPROC macros. Most asm code falls in this
+ * are annotated with SYM_FUNC_{START,END}. Most asm code falls in this
* category. In this case, no special debugging annotations are needed because
* objtool can automatically generate the ORC data for the ORC unwinder to read
* at runtime.
@@ -99,42 +76,55 @@ struct unwind_hint {
* the debuginfo as necessary. It will also warn if it sees any
* inconsistencies.
*/
-.macro UNWIND_HINT sp_reg:req sp_offset=0 type:req end=0
-.Lunwind_hint_ip_\@:
+.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0
+.Lhere_\@:
.pushsection .discard.unwind_hints
+ ANNOTATE_DATA_SPECIAL
/* struct unwind_hint */
- .long .Lunwind_hint_ip_\@ - .
+ .long .Lhere_\@ - .
.short \sp_offset
.byte \sp_reg
.byte \type
- .byte \end
+ .byte \signal
.balign 4
.popsection
.endm
.macro STACK_FRAME_NON_STANDARD func:req
.pushsection .discard.func_stack_frame_non_standard, "aw"
- .long \func - .
+ .quad \func
.popsection
.endm
+.macro STACK_FRAME_NON_STANDARD_FP func:req
+#ifdef CONFIG_FRAME_POINTER
+ STACK_FRAME_NON_STANDARD \func
+#endif
+.endm
+
#endif /* __ASSEMBLY__ */
-#else /* !CONFIG_STACK_VALIDATION */
+#else /* !CONFIG_OBJTOOL */
#ifndef __ASSEMBLY__
-#define UNWIND_HINT(sp_reg, sp_offset, type, end) \
- "\n\t"
+#define UNWIND_HINT(type, sp_reg, sp_offset, signal) "\n\t"
#define STACK_FRAME_NON_STANDARD(func)
+#define STACK_FRAME_NON_STANDARD_FP(func)
#else
-#define ANNOTATE_INTRA_FUNCTION_CALL
-.macro UNWIND_HINT sp_reg:req sp_offset=0 type:req end=0
+.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0
.endm
.macro STACK_FRAME_NON_STANDARD func:req
.endm
#endif
-#endif /* CONFIG_STACK_VALIDATION */
+#endif /* CONFIG_OBJTOOL */
+
+#if defined(CONFIG_NOINSTR_VALIDATION) && \
+ (defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO))
+#define VALIDATE_UNRET_BEGIN ANNOTATE_UNRET_BEGIN
+#else
+#define VALIDATE_UNRET_BEGIN
+#endif
#endif /* _LINUX_OBJTOOL_H */
diff --git a/include/linux/objtool_types.h b/include/linux/objtool_types.h
new file mode 100644
index 000000000000..c6def4049b1a
--- /dev/null
+++ b/include/linux/objtool_types.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_OBJTOOL_TYPES_H
+#define _LINUX_OBJTOOL_TYPES_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+/*
+ * This struct is used by asm and inline asm code to manually annotate the
+ * location of registers on the stack.
+ */
+struct unwind_hint {
+ u32 ip;
+ s16 sp_offset;
+ u8 sp_reg;
+ u8 type;
+ u8 signal;
+};
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * UNWIND_HINT_TYPE_UNDEFINED: A blind spot in ORC coverage which can result in
+ * a truncated and unreliable stack unwind.
+ *
+ * UNWIND_HINT_TYPE_END_OF_STACK: The end of the kernel stack unwind before
+ * hitting user entry, boot code, or fork entry (when there are no pt_regs
+ * available).
+ *
+ * UNWIND_HINT_TYPE_CALL: Indicates that sp_reg+sp_offset resolves to PREV_SP
+ * (the caller's SP right before it made the call). Used for all callable
+ * functions, i.e. all C code and all callable asm functions.
+ *
+ * UNWIND_HINT_TYPE_REGS: Used in entry code to indicate that sp_reg+sp_offset
+ * points to a fully populated pt_regs from a syscall, interrupt, or exception.
+ *
+ * UNWIND_HINT_TYPE_REGS_PARTIAL: Used in entry code to indicate that
+ * sp_reg+sp_offset points to the iret return frame.
+ *
+ * UNWIND_HINT_TYPE_FUNC: Generate the unwind metadata of a callable function.
+ * Useful for code which doesn't have an ELF function annotation.
+ *
+ * UNWIND_HINT_TYPE_{SAVE,RESTORE}: Save the unwind metadata at a certain
+ * location so that it can be restored later.
+ */
+#define UNWIND_HINT_TYPE_UNDEFINED 0
+#define UNWIND_HINT_TYPE_END_OF_STACK 1
+#define UNWIND_HINT_TYPE_CALL 2
+#define UNWIND_HINT_TYPE_REGS 3
+#define UNWIND_HINT_TYPE_REGS_PARTIAL 4
+/* The below hint types don't have corresponding ORC types */
+#define UNWIND_HINT_TYPE_FUNC 5
+#define UNWIND_HINT_TYPE_SAVE 6
+#define UNWIND_HINT_TYPE_RESTORE 7
+
+/*
+ * Annotate types
+ */
+#define ANNOTYPE_NOENDBR 1
+#define ANNOTYPE_RETPOLINE_SAFE 2
+#define ANNOTYPE_INSTR_BEGIN 3
+#define ANNOTYPE_INSTR_END 4
+#define ANNOTYPE_UNRET_BEGIN 5
+#define ANNOTYPE_IGNORE_ALTS 6
+#define ANNOTYPE_INTRA_FUNCTION_CALL 7
+#define ANNOTYPE_REACHABLE 8
+#define ANNOTYPE_NOCFI 9
+
+#define ANNOTYPE_DATA_SPECIAL 1
+
+#endif /* _LINUX_OBJTOOL_TYPES_H */
diff --git a/include/linux/of.h b/include/linux/of.h
index d8db8d3592fd..01bb3affcd49 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -13,17 +13,14 @@
*/
#include <linux/types.h>
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/errno.h>
#include <linux/kobject.h>
#include <linux/mod_devicetable.h>
-#include <linux/spinlock.h>
-#include <linux/topology.h>
-#include <linux/notifier.h>
#include <linux/property.h>
#include <linux/list.h>
#include <asm/byteorder.h>
-#include <asm/errno.h>
typedef u32 phandle;
typedef u32 ihandle;
@@ -70,7 +67,7 @@ struct device_node {
#endif
};
-#define MAX_PHANDLE_ARGS 16
+#define MAX_PHANDLE_ARGS NR_FWNODE_REFERENCE_ARGS
struct of_phandle_args {
struct device_node *np;
int args_count;
@@ -100,9 +97,19 @@ struct of_reconfig_data {
struct property *old_prop;
};
-/* initialize a node */
-extern struct kobj_type of_node_ktype;
+extern const struct kobj_type of_node_ktype;
extern const struct fwnode_operations of_fwnode_ops;
+
+/**
+ * of_node_init - initialize a devicetree node
+ * @node: Pointer to device node that has been created by kzalloc()
+ *
+ * On return the device_node refcount is set to one. Use of_node_put()
+ * on @node when done to free the memory allocated for it. If the node
+ * is NOT a dynamic node the memory will not be freed. The decision of
+ * whether to free the memory will be done by node->release(), which is
+ * of_node_release().
+ */
static inline void of_node_init(struct device_node *node)
{
#if defined(CONFIG_OF_KOBJ)
@@ -128,13 +135,13 @@ static inline struct device_node *of_node_get(struct device_node *node)
}
static inline void of_node_put(struct device_node *node) { }
#endif /* !CONFIG_OF_DYNAMIC */
+DEFINE_FREE(device_node, struct device_node *, if (_T) of_node_put(_T))
/* Pointer for first entry in chain of all nodes. */
extern struct device_node *of_root;
extern struct device_node *of_chosen;
extern struct device_node *of_aliases;
extern struct device_node *of_stdout;
-extern raw_spinlock_t devtree_lock;
/*
* struct device_node flag descriptions
@@ -175,17 +182,12 @@ static inline bool is_of_node(const struct fwnode_handle *fwnode)
&__of_fwnode_handle_node->fwnode : NULL; \
})
-static inline bool of_have_populated_dt(void)
-{
- return of_root != NULL;
-}
-
static inline bool of_node_is_root(const struct device_node *node)
{
return node && (node->parent == NULL);
}
-static inline int of_node_check_flag(struct device_node *n, unsigned long flag)
+static inline int of_node_check_flag(const struct device_node *n, unsigned long flag)
{
return test_bit(flag, &n->_flags);
}
@@ -207,7 +209,7 @@ static inline void of_node_clear_flag(struct device_node *n, unsigned long flag)
}
#if defined(CONFIG_OF_DYNAMIC) || defined(CONFIG_SPARC)
-static inline int of_property_check_flag(struct property *p, unsigned long flag)
+static inline int of_property_check_flag(const struct property *p, unsigned long flag)
{
return test_bit(flag, &p->_flags);
}
@@ -287,13 +289,20 @@ extern struct device_node *of_get_parent(const struct device_node *node);
extern struct device_node *of_get_next_parent(struct device_node *node);
extern struct device_node *of_get_next_child(const struct device_node *node,
struct device_node *prev);
+extern struct device_node *of_get_next_child_with_prefix(const struct device_node *node,
+ struct device_node *prev,
+ const char *prefix);
extern struct device_node *of_get_next_available_child(
const struct device_node *node, struct device_node *prev);
+extern struct device_node *of_get_next_reserved_child(
+ const struct device_node *node, struct device_node *prev);
extern struct device_node *of_get_compatible_child(const struct device_node *parent,
const char *compatible);
extern struct device_node *of_get_child_by_name(const struct device_node *node,
const char *name);
+extern struct device_node *of_get_available_child_by_name(const struct device_node *node,
+ const char *name);
/* cache lookup */
extern struct device_node *of_find_next_cache_node(const struct device_node *);
@@ -304,8 +313,12 @@ extern struct device_node *of_find_node_with_property(
extern struct property *of_find_property(const struct device_node *np,
const char *name,
int *lenp);
+extern bool of_property_read_bool(const struct device_node *np, const char *propname);
extern int of_property_count_elems_of_size(const struct device_node *np,
const char *propname, int elem_size);
+extern int of_property_read_u16_index(const struct device_node *np,
+ const char *propname,
+ u32 index, u16 *out_value);
extern int of_property_read_u32_index(const struct device_node *np,
const char *propname,
u32 index, u32 *out_value);
@@ -342,7 +355,7 @@ extern int of_property_read_string_helper(const struct device_node *np,
const char **out_strs, size_t sz, int index);
extern int of_device_is_compatible(const struct device_node *device,
const char *);
-extern int of_device_compatible_match(struct device_node *device,
+extern int of_device_compatible_match(const struct device_node *device,
const char *const *compat);
extern bool of_device_is_available(const struct device_node *device);
extern bool of_device_is_big_endian(const struct device_node *device);
@@ -350,34 +363,34 @@ extern const void *of_get_property(const struct device_node *node,
const char *name,
int *lenp);
extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
+extern struct device_node *of_cpu_device_node_get(int cpu);
+extern int of_cpu_node_to_id(struct device_node *np);
extern struct device_node *of_get_next_cpu_node(struct device_node *prev);
-extern struct device_node *of_get_cpu_state_node(struct device_node *cpu_node,
+extern struct device_node *of_get_cpu_state_node(const struct device_node *cpu_node,
int index);
-
-#define for_each_property_of_node(dn, pp) \
- for (pp = dn->properties; pp != NULL; pp = pp->next)
+extern u64 of_get_cpu_hwid(struct device_node *cpun, unsigned int thread);
extern int of_n_addr_cells(struct device_node *np);
extern int of_n_size_cells(struct device_node *np);
extern const struct of_device_id *of_match_node(
const struct of_device_id *matches, const struct device_node *node);
-extern int of_modalias_node(struct device_node *node, char *modalias, int len);
+extern const void *of_device_get_match_data(const struct device *dev);
+extern int of_alias_from_compatible(const struct device_node *node, char *alias,
+ int len);
extern void of_print_phandle_args(const char *msg, const struct of_phandle_args *args);
-extern struct device_node *of_parse_phandle(const struct device_node *np,
- const char *phandle_name,
- int index);
-extern int of_parse_phandle_with_args(const struct device_node *np,
- const char *list_name, const char *cells_name, int index,
- struct of_phandle_args *out_args);
+extern int __of_parse_phandle_with_args(const struct device_node *np,
+ const char *list_name, const char *cells_name, int cell_count,
+ int index, struct of_phandle_args *out_args);
extern int of_parse_phandle_with_args_map(const struct device_node *np,
const char *list_name, const char *stem_name, int index,
struct of_phandle_args *out_args);
-extern int of_parse_phandle_with_fixed_args(const struct device_node *np,
- const char *list_name, int cells_count, int index,
- struct of_phandle_args *out_args);
extern int of_count_phandle_with_args(const struct device_node *np,
const char *list_name, const char *cells_name);
+/* module functions */
+extern ssize_t of_modalias(const struct device_node *np, char *str, ssize_t len);
+extern int of_request_module(const struct device_node *np);
+
/* phandle iterator functions */
extern int of_phandle_iterator_init(struct of_phandle_iterator *it,
const struct device_node *np,
@@ -390,14 +403,25 @@ extern int of_phandle_iterator_args(struct of_phandle_iterator *it,
uint32_t *args,
int size);
-extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align));
-extern int of_alias_get_id(struct device_node *np, const char *stem);
+extern int of_alias_get_id(const struct device_node *np, const char *stem);
extern int of_alias_get_highest_id(const char *stem);
-extern int of_alias_get_alias_list(const struct of_device_id *matches,
- const char *stem, unsigned long *bitmap,
- unsigned int nbits);
-extern int of_machine_is_compatible(const char *compat);
+bool of_machine_compatible_match(const char *const *compats);
+bool of_machine_device_match(const struct of_device_id *matches);
+const void *of_machine_get_match_data(const struct of_device_id *matches);
+
+/**
+ * of_machine_is_compatible - Test root of device tree for a given compatible value
+ * @compat: compatible string to look for in root node's compatible property.
+ *
+ * Return: true if the root node has the given value in its compatible property.
+ */
+static inline bool of_machine_is_compatible(const char *compat)
+{
+ const char *compats[] = { compat, NULL };
+
+ return of_machine_compatible_match(compats);
+}
extern int of_add_property(struct device_node *np, struct property *prop);
extern int of_remove_property(struct device_node *np, struct property *prop);
@@ -415,139 +439,13 @@ extern int of_detach_node(struct device_node *);
#define of_match_ptr(_ptr) (_ptr)
-/**
- * of_property_read_u8_array - Find and read an array of u8 from a property.
- *
- * @np: device node from which the property value is to be read.
- * @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
- * @sz: number of array elements to read
- *
- * Search for a property in a device node and read 8-bit value(s) from
- * it.
- *
- * dts entry of array should be like:
- * ``property = /bits/ 8 <0x50 0x60 0x70>;``
- *
- * Return: 0 on success, -EINVAL if the property does not exist,
- * -ENODATA if property does not have a value, and -EOVERFLOW if the
- * property data isn't large enough.
- *
- * The out_values is modified only if a valid u8 value can be decoded.
- */
-static inline int of_property_read_u8_array(const struct device_node *np,
- const char *propname,
- u8 *out_values, size_t sz)
-{
- int ret = of_property_read_variable_u8_array(np, propname, out_values,
- sz, 0);
- if (ret >= 0)
- return 0;
- else
- return ret;
-}
-
-/**
- * of_property_read_u16_array - Find and read an array of u16 from a property.
- *
- * @np: device node from which the property value is to be read.
- * @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
- * @sz: number of array elements to read
- *
- * Search for a property in a device node and read 16-bit value(s) from
- * it.
- *
- * dts entry of array should be like:
- * ``property = /bits/ 16 <0x5000 0x6000 0x7000>;``
- *
- * Return: 0 on success, -EINVAL if the property does not exist,
- * -ENODATA if property does not have a value, and -EOVERFLOW if the
- * property data isn't large enough.
- *
- * The out_values is modified only if a valid u16 value can be decoded.
- */
-static inline int of_property_read_u16_array(const struct device_node *np,
- const char *propname,
- u16 *out_values, size_t sz)
-{
- int ret = of_property_read_variable_u16_array(np, propname, out_values,
- sz, 0);
- if (ret >= 0)
- return 0;
- else
- return ret;
-}
-
-/**
- * of_property_read_u32_array - Find and read an array of 32 bit integers
- * from a property.
- *
- * @np: device node from which the property value is to be read.
- * @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
- * @sz: number of array elements to read
- *
- * Search for a property in a device node and read 32-bit value(s) from
- * it.
- *
- * Return: 0 on success, -EINVAL if the property does not exist,
- * -ENODATA if property does not have a value, and -EOVERFLOW if the
- * property data isn't large enough.
- *
- * The out_values is modified only if a valid u32 value can be decoded.
- */
-static inline int of_property_read_u32_array(const struct device_node *np,
- const char *propname,
- u32 *out_values, size_t sz)
-{
- int ret = of_property_read_variable_u32_array(np, propname, out_values,
- sz, 0);
- if (ret >= 0)
- return 0;
- else
- return ret;
-}
-
-/**
- * of_property_read_u64_array - Find and read an array of 64 bit integers
- * from a property.
- *
- * @np: device node from which the property value is to be read.
- * @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
- * @sz: number of array elements to read
- *
- * Search for a property in a device node and read 64-bit value(s) from
- * it.
- *
- * Return: 0 on success, -EINVAL if the property does not exist,
- * -ENODATA if property does not have a value, and -EOVERFLOW if the
- * property data isn't large enough.
- *
- * The out_values is modified only if a valid u64 value can be decoded.
- */
-static inline int of_property_read_u64_array(const struct device_node *np,
- const char *propname,
- u64 *out_values, size_t sz)
-{
- int ret = of_property_read_variable_u64_array(np, propname, out_values,
- sz, 0);
- if (ret >= 0)
- return 0;
- else
- return ret;
-}
-
/*
- * struct property *prop;
- * const __be32 *p;
* u32 u;
*
- * of_property_for_each_u32(np, "propname", prop, p, u)
+ * of_property_for_each_u32(np, "propname", u)
* printk("U32 value: %x\n", u);
*/
-const __be32 *of_prop_next_u32(struct property *prop, const __be32 *cur,
+const __be32 *of_prop_next_u32(const struct property *prop, const __be32 *cur,
u32 *pu);
/*
* struct property *prop;
@@ -556,13 +454,11 @@ const __be32 *of_prop_next_u32(struct property *prop, const __be32 *cur,
* of_property_for_each_string(np, "propname", prop, s)
* printk("String value: %s\n", s);
*/
-const char *of_prop_next_string(struct property *prop, const char *cur);
-
-bool of_console_check(struct device_node *dn, char *name, int index);
+const char *of_prop_next_string(const struct property *prop, const char *cur);
-extern int of_cpu_node_to_id(struct device_node *np);
+bool of_console_check(const struct device_node *dn, char *name, int index);
-int of_map_id(struct device_node *np, u32 id,
+int of_map_id(const struct device_node *np, u32 id,
const char *map_name, const char *map_mask_name,
struct device_node **target, u32 *id_out);
@@ -573,8 +469,6 @@ void *of_kexec_alloc_and_setup_fdt(const struct kimage *image,
unsigned long initrd_load_addr,
unsigned long initrd_len,
const char *cmdline, size_t extra_fdt_size);
-int ima_get_kexec_buffer(void **addr, size_t *size);
-int ima_free_kexec_buffer(void);
#else /* CONFIG_OF */
static inline void of_core_init(void)
@@ -658,12 +552,25 @@ static inline struct device_node *of_get_next_child(
return NULL;
}
+static inline struct device_node *of_get_next_child_with_prefix(
+ const struct device_node *node, struct device_node *prev,
+ const char *prefix)
+{
+ return NULL;
+}
+
static inline struct device_node *of_get_next_available_child(
const struct device_node *node, struct device_node *prev)
{
return NULL;
}
+static inline struct device_node *of_get_next_reserved_child(
+ const struct device_node *node, struct device_node *prev)
+{
+ return NULL;
+}
+
static inline struct device_node *of_find_node_with_property(
struct device_node *from, const char *prop_name)
{
@@ -672,11 +579,6 @@ static inline struct device_node *of_find_node_with_property(
#define of_fwnode_handle(node) NULL
-static inline bool of_have_populated_dt(void)
-{
- return false;
-}
-
static inline struct device_node *of_get_compatible_child(const struct device_node *parent,
const char *compatible)
{
@@ -690,13 +592,20 @@ static inline struct device_node *of_get_child_by_name(
return NULL;
}
+static inline struct device_node *of_get_available_child_by_name(
+ const struct device_node *node,
+ const char *name)
+{
+ return NULL;
+}
+
static inline int of_device_is_compatible(const struct device_node *device,
const char *name)
{
return 0;
}
-static inline int of_device_compatible_match(struct device_node *device,
+static inline int of_device_compatible_match(const struct device_node *device,
const char *const *compat)
{
return 0;
@@ -727,34 +636,20 @@ static inline struct device_node *of_find_compatible_node(
return NULL;
}
-static inline int of_property_count_elems_of_size(const struct device_node *np,
- const char *propname, int elem_size)
-{
- return -ENOSYS;
-}
-
-static inline int of_property_read_u8_array(const struct device_node *np,
- const char *propname, u8 *out_values, size_t sz)
-{
- return -ENOSYS;
-}
-
-static inline int of_property_read_u16_array(const struct device_node *np,
- const char *propname, u16 *out_values, size_t sz)
+static inline bool of_property_read_bool(const struct device_node *np,
+ const char *propname)
{
- return -ENOSYS;
+ return false;
}
-static inline int of_property_read_u32_array(const struct device_node *np,
- const char *propname,
- u32 *out_values, size_t sz)
+static inline int of_property_count_elems_of_size(const struct device_node *np,
+ const char *propname, int elem_size)
{
return -ENOSYS;
}
-static inline int of_property_read_u64_array(const struct device_node *np,
- const char *propname,
- u64 *out_values, size_t sz)
+static inline int of_property_read_u16_index(const struct device_node *np,
+ const char *propname, u32 index, u16 *out_value)
{
return -ENOSYS;
}
@@ -784,6 +679,16 @@ static inline struct device_node *of_get_cpu_node(int cpu,
return NULL;
}
+static inline struct device_node *of_cpu_device_node_get(int cpu)
+{
+ return NULL;
+}
+
+static inline int of_cpu_node_to_id(struct device_node *np)
+{
+ return -ENODEV;
+}
+
static inline struct device_node *of_get_next_cpu_node(struct device_node *prev)
{
return NULL;
@@ -864,18 +769,12 @@ static inline int of_property_read_string_helper(const struct device_node *np,
return -ENOSYS;
}
-static inline struct device_node *of_parse_phandle(const struct device_node *np,
- const char *phandle_name,
- int index)
-{
- return NULL;
-}
-
-static inline int of_parse_phandle_with_args(const struct device_node *np,
- const char *list_name,
- const char *cells_name,
- int index,
- struct of_phandle_args *out_args)
+static inline int __of_parse_phandle_with_args(const struct device_node *np,
+ const char *list_name,
+ const char *cells_name,
+ int cell_count,
+ int index,
+ struct of_phandle_args *out_args)
{
return -ENOSYS;
}
@@ -889,18 +788,22 @@ static inline int of_parse_phandle_with_args_map(const struct device_node *np,
return -ENOSYS;
}
-static inline int of_parse_phandle_with_fixed_args(const struct device_node *np,
- const char *list_name, int cells_count, int index,
- struct of_phandle_args *out_args)
+static inline int of_count_phandle_with_args(const struct device_node *np,
+ const char *list_name,
+ const char *cells_name)
{
return -ENOSYS;
}
-static inline int of_count_phandle_with_args(struct device_node *np,
- const char *list_name,
- const char *cells_name)
+static inline ssize_t of_modalias(const struct device_node *np, char *str,
+ ssize_t len)
{
- return -ENOSYS;
+ return -ENODEV;
+}
+
+static inline int of_request_module(const struct device_node *np)
+{
+ return -ENODEV;
}
static inline int of_phandle_iterator_init(struct of_phandle_iterator *it,
@@ -934,14 +837,12 @@ static inline int of_alias_get_highest_id(const char *stem)
return -ENOSYS;
}
-static inline int of_alias_get_alias_list(const struct of_device_id *matches,
- const char *stem, unsigned long *bitmap,
- unsigned int nbits)
+static inline int of_machine_is_compatible(const char *compat)
{
- return -ENOSYS;
+ return 0;
}
-static inline int of_machine_is_compatible(const char *compat)
+static inline int of_add_property(struct device_node *np, struct property *prop)
{
return 0;
}
@@ -951,18 +852,34 @@ static inline int of_remove_property(struct device_node *np, struct property *pr
return 0;
}
+static inline bool of_machine_compatible_match(const char *const *compats)
+{
+ return false;
+}
+
+static inline bool of_machine_device_match(const struct of_device_id *matches)
+{
+ return false;
+}
+
+static inline const void *
+of_machine_get_match_data(const struct of_device_id *matches)
+{
+ return NULL;
+}
+
static inline bool of_console_check(const struct device_node *dn, const char *name, int index)
{
return false;
}
-static inline const __be32 *of_prop_next_u32(struct property *prop,
+static inline const __be32 *of_prop_next_u32(const struct property *prop,
const __be32 *cur, u32 *pu)
{
return NULL;
}
-static inline const char *of_prop_next_string(struct property *prop,
+static inline const char *of_prop_next_string(const struct property *prop,
const char *cur)
{
return NULL;
@@ -987,7 +904,8 @@ static inline void of_node_clear_flag(struct device_node *n, unsigned long flag)
{
}
-static inline int of_property_check_flag(struct property *p, unsigned long flag)
+static inline int of_property_check_flag(const struct property *p,
+ unsigned long flag)
{
return 0;
}
@@ -1000,12 +918,7 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag
{
}
-static inline int of_cpu_node_to_id(struct device_node *np)
-{
- return -ENODEV;
-}
-
-static inline int of_map_id(struct device_node *np, u32 id,
+static inline int of_map_id(const struct device_node *np, u32 id,
const char *map_name, const char *map_mask_name,
struct device_node **target, u32 *id_out)
{
@@ -1017,6 +930,11 @@ static inline phys_addr_t of_dma_get_max_cpu_address(struct device_node *np)
return PHYS_ADDR_MAX;
}
+static inline const void *of_device_get_match_data(const struct device *dev)
+{
+ return NULL;
+}
+
#define of_match_ptr(_ptr) NULL
#define of_match_node(_matches, _node) NULL
#endif /* CONFIG_OF */
@@ -1028,11 +946,8 @@ static inline phys_addr_t of_dma_get_max_cpu_address(struct device_node *np)
#define of_node_cmp(s1, s2) strcasecmp((s1), (s2))
#endif
-static inline int of_prop_val_eq(struct property *p1, struct property *p2)
-{
- return p1->length == p2->length &&
- !memcmp(p1->value, p2->value, (size_t)p1->length);
-}
+#define for_each_property_of_node(dn, pp) \
+ for (pp = dn->properties; pp != NULL; pp = pp->next)
#if defined(CONFIG_OF) && defined(CONFIG_NUMA)
extern int of_node_to_nid(struct device_node *np);
@@ -1072,6 +987,158 @@ static inline bool of_node_is_type(const struct device_node *np, const char *typ
}
/**
+ * of_parse_phandle - Resolve a phandle property to a device_node pointer
+ * @np: Pointer to device node holding phandle property
+ * @phandle_name: Name of property holding a phandle value
+ * @index: For properties holding a table of phandles, this is the index into
+ * the table
+ *
+ * Return: The device_node pointer with refcount incremented. Use
+ * of_node_put() on it when done.
+ */
+static inline struct device_node *of_parse_phandle(const struct device_node *np,
+ const char *phandle_name,
+ int index)
+{
+ struct of_phandle_args args;
+
+ if (__of_parse_phandle_with_args(np, phandle_name, NULL, 0,
+ index, &args))
+ return NULL;
+
+ return args.np;
+}
+
+/**
+ * of_parse_phandle_with_args() - Find a node pointed by phandle in a list
+ * @np: pointer to a device tree node containing a list
+ * @list_name: property name that contains a list
+ * @cells_name: property name that specifies phandles' arguments count
+ * @index: index of a phandle to parse out
+ * @out_args: optional pointer to output arguments structure (will be filled)
+ *
+ * This function is useful to parse lists of phandles and their arguments.
+ * Returns 0 on success and fills out_args, on error returns appropriate
+ * errno value.
+ *
+ * Caller is responsible to call of_node_put() on the returned out_args->np
+ * pointer.
+ *
+ * Example::
+ *
+ * phandle1: node1 {
+ * #list-cells = <2>;
+ * };
+ *
+ * phandle2: node2 {
+ * #list-cells = <1>;
+ * };
+ *
+ * node3 {
+ * list = <&phandle1 1 2 &phandle2 3>;
+ * };
+ *
+ * To get a device_node of the ``node2`` node you may call this:
+ * of_parse_phandle_with_args(node3, "list", "#list-cells", 1, &args);
+ */
+static inline int of_parse_phandle_with_args(const struct device_node *np,
+ const char *list_name,
+ const char *cells_name,
+ int index,
+ struct of_phandle_args *out_args)
+{
+ int cell_count = -1;
+
+ /* If cells_name is NULL we assume a cell count of 0 */
+ if (!cells_name)
+ cell_count = 0;
+
+ return __of_parse_phandle_with_args(np, list_name, cells_name,
+ cell_count, index, out_args);
+}
+
+/**
+ * of_parse_phandle_with_fixed_args() - Find a node pointed by phandle in a list
+ * @np: pointer to a device tree node containing a list
+ * @list_name: property name that contains a list
+ * @cell_count: number of argument cells following the phandle
+ * @index: index of a phandle to parse out
+ * @out_args: optional pointer to output arguments structure (will be filled)
+ *
+ * This function is useful to parse lists of phandles and their arguments.
+ * Returns 0 on success and fills out_args, on error returns appropriate
+ * errno value.
+ *
+ * Caller is responsible to call of_node_put() on the returned out_args->np
+ * pointer.
+ *
+ * Example::
+ *
+ * phandle1: node1 {
+ * };
+ *
+ * phandle2: node2 {
+ * };
+ *
+ * node3 {
+ * list = <&phandle1 0 2 &phandle2 2 3>;
+ * };
+ *
+ * To get a device_node of the ``node2`` node you may call this:
+ * of_parse_phandle_with_fixed_args(node3, "list", 2, 1, &args);
+ */
+static inline int of_parse_phandle_with_fixed_args(const struct device_node *np,
+ const char *list_name,
+ int cell_count,
+ int index,
+ struct of_phandle_args *out_args)
+{
+ return __of_parse_phandle_with_args(np, list_name, NULL, cell_count,
+ index, out_args);
+}
+
+/**
+ * of_parse_phandle_with_optional_args() - Find a node pointed by phandle in a list
+ * @np: pointer to a device tree node containing a list
+ * @list_name: property name that contains a list
+ * @cells_name: property name that specifies phandles' arguments count
+ * @index: index of a phandle to parse out
+ * @out_args: optional pointer to output arguments structure (will be filled)
+ *
+ * Same as of_parse_phandle_with_args() except that if the cells_name property
+ * is not found, cell_count of 0 is assumed.
+ *
+ * This is used to useful, if you have a phandle which didn't have arguments
+ * before and thus doesn't have a '#*-cells' property but is now migrated to
+ * having arguments while retaining backwards compatibility.
+ */
+static inline int of_parse_phandle_with_optional_args(const struct device_node *np,
+ const char *list_name,
+ const char *cells_name,
+ int index,
+ struct of_phandle_args *out_args)
+{
+ return __of_parse_phandle_with_args(np, list_name, cells_name,
+ 0, index, out_args);
+}
+
+/**
+ * of_phandle_args_equal() - Compare two of_phandle_args
+ * @a1: First of_phandle_args to compare
+ * @a2: Second of_phandle_args to compare
+ *
+ * Return: True if a1 and a2 are the same (same node pointer, same phandle
+ * args), false otherwise.
+ */
+static inline bool of_phandle_args_equal(const struct of_phandle_args *a1,
+ const struct of_phandle_args *a2)
+{
+ return a1->np == a2->np &&
+ a1->args_count == a2->args_count &&
+ !memcmp(a1->args, a2->args, sizeof(a1->args[0]) * a1->args_count);
+}
+
+/**
* of_property_count_u8_elems - Count the number of u8 elements in a property
*
* @np: device node from which the property value is to be read.
@@ -1080,7 +1147,7 @@ static inline bool of_node_is_type(const struct device_node *np, const char *typ
* Search for a property in a device node and count the number of u8 elements
* in it.
*
- * Return: The number of elements on sucess, -EINVAL if the property does
+ * Return: The number of elements on success, -EINVAL if the property does
* not exist or its length does not match a multiple of u8 and -ENODATA if the
* property does not have a value.
*/
@@ -1099,7 +1166,7 @@ static inline int of_property_count_u8_elems(const struct device_node *np,
* Search for a property in a device node and count the number of u16 elements
* in it.
*
- * Return: The number of elements on sucess, -EINVAL if the property does
+ * Return: The number of elements on success, -EINVAL if the property does
* not exist or its length does not match a multiple of u16 and -ENODATA if the
* property does not have a value.
*/
@@ -1118,7 +1185,7 @@ static inline int of_property_count_u16_elems(const struct device_node *np,
* Search for a property in a device node and count the number of u32 elements
* in it.
*
- * Return: The number of elements on sucess, -EINVAL if the property does
+ * Return: The number of elements on success, -EINVAL if the property does
* not exist or its length does not match a multiple of u32 and -ENODATA if the
* property does not have a value.
*/
@@ -1137,7 +1204,7 @@ static inline int of_property_count_u32_elems(const struct device_node *np,
* Search for a property in a device node and count the number of u64 elements
* in it.
*
- * Return: The number of elements on sucess, -EINVAL if the property does
+ * Return: The number of elements on success, -EINVAL if the property does
* not exist or its length does not match a multiple of u64 and -ENODATA if the
* property does not have a value.
*/
@@ -1214,22 +1281,145 @@ static inline int of_property_read_string_index(const struct device_node *np,
}
/**
- * of_property_read_bool - Find a property
- * @np: device node from which the property value is to be read.
+ * of_property_present - Test if a property is present in a node
+ * @np: device node to search for the property.
* @propname: name of the property to be searched.
*
- * Search for a property in a device node.
+ * Test for a property present in a device node.
*
* Return: true if the property exists false otherwise.
*/
-static inline bool of_property_read_bool(const struct device_node *np,
- const char *propname)
+static inline bool of_property_present(const struct device_node *np, const char *propname)
{
struct property *prop = of_find_property(np, propname, NULL);
return prop ? true : false;
}
+/**
+ * of_property_read_u8_array - Find and read an array of u8 from a property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz: number of array elements to read
+ *
+ * Search for a property in a device node and read 8-bit value(s) from
+ * it.
+ *
+ * dts entry of array should be like:
+ * ``property = /bits/ 8 <0x50 0x60 0x70>;``
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_values is modified only if a valid u8 value can be decoded.
+ */
+static inline int of_property_read_u8_array(const struct device_node *np,
+ const char *propname,
+ u8 *out_values, size_t sz)
+{
+ int ret = of_property_read_variable_u8_array(np, propname, out_values,
+ sz, 0);
+ if (ret >= 0)
+ return 0;
+ else
+ return ret;
+}
+
+/**
+ * of_property_read_u16_array - Find and read an array of u16 from a property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz: number of array elements to read
+ *
+ * Search for a property in a device node and read 16-bit value(s) from
+ * it.
+ *
+ * dts entry of array should be like:
+ * ``property = /bits/ 16 <0x5000 0x6000 0x7000>;``
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_values is modified only if a valid u16 value can be decoded.
+ */
+static inline int of_property_read_u16_array(const struct device_node *np,
+ const char *propname,
+ u16 *out_values, size_t sz)
+{
+ int ret = of_property_read_variable_u16_array(np, propname, out_values,
+ sz, 0);
+ if (ret >= 0)
+ return 0;
+ else
+ return ret;
+}
+
+/**
+ * of_property_read_u32_array - Find and read an array of 32 bit integers
+ * from a property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz: number of array elements to read
+ *
+ * Search for a property in a device node and read 32-bit value(s) from
+ * it.
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_values is modified only if a valid u32 value can be decoded.
+ */
+static inline int of_property_read_u32_array(const struct device_node *np,
+ const char *propname,
+ u32 *out_values, size_t sz)
+{
+ int ret = of_property_read_variable_u32_array(np, propname, out_values,
+ sz, 0);
+ if (ret >= 0)
+ return 0;
+ else
+ return ret;
+}
+
+/**
+ * of_property_read_u64_array - Find and read an array of 64 bit integers
+ * from a property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz: number of array elements to read
+ *
+ * Search for a property in a device node and read 64-bit value(s) from
+ * it.
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_values is modified only if a valid u64 value can be decoded.
+ */
+static inline int of_property_read_u64_array(const struct device_node *np,
+ const char *propname,
+ u64 *out_values, size_t sz)
+{
+ int ret = of_property_read_variable_u64_array(np, propname, out_values,
+ sz, 0);
+ if (ret >= 0)
+ return 0;
+ else
+ return ret;
+}
+
static inline int of_property_read_u8(const struct device_node *np,
const char *propname,
u8 *out_value)
@@ -1264,11 +1454,12 @@ static inline int of_property_read_s32(const struct device_node *np,
err == 0; \
err = of_phandle_iterator_next(it))
-#define of_property_for_each_u32(np, propname, prop, p, u) \
- for (prop = of_find_property(np, propname, NULL), \
- p = of_prop_next_u32(prop, NULL, &u); \
- p; \
- p = of_prop_next_u32(prop, p, &u))
+#define of_property_for_each_u32(np, propname, u) \
+ for (struct {const struct property *prop; const __be32 *item; } _it = \
+ {of_find_property(np, propname, NULL), \
+ of_prop_next_u32(_it.prop, NULL, &u)}; \
+ _it.item; \
+ _it.item = of_prop_next_u32(_it.prop, _it.item, &u))
#define of_property_for_each_string(np, propname, prop, s) \
for (prop = of_find_property(np, propname, NULL), \
@@ -1295,9 +1486,31 @@ static inline int of_property_read_s32(const struct device_node *np,
#define for_each_child_of_node(parent, child) \
for (child = of_get_next_child(parent, NULL); child != NULL; \
child = of_get_next_child(parent, child))
+
+#define for_each_child_of_node_scoped(parent, child) \
+ for (struct device_node *child __free(device_node) = \
+ of_get_next_child(parent, NULL); \
+ child != NULL; \
+ child = of_get_next_child(parent, child))
+
+#define for_each_child_of_node_with_prefix(parent, child, prefix) \
+ for (struct device_node *child __free(device_node) = \
+ of_get_next_child_with_prefix(parent, NULL, prefix); \
+ child != NULL; \
+ child = of_get_next_child_with_prefix(parent, child, prefix))
+
#define for_each_available_child_of_node(parent, child) \
for (child = of_get_next_available_child(parent, NULL); child != NULL; \
child = of_get_next_available_child(parent, child))
+#define for_each_reserved_child_of_node(parent, child) \
+ for (child = of_get_next_reserved_child(parent, NULL); child != NULL; \
+ child = of_get_next_reserved_child(parent, child))
+
+#define for_each_available_child_of_node_scoped(parent, child) \
+ for (struct device_node *child __free(device_node) = \
+ of_get_next_available_child(parent, NULL); \
+ child != NULL; \
+ child = of_get_next_available_child(parent, child))
#define for_each_of_cpu_node(cpu) \
for (cpu = of_get_next_cpu_node(NULL); cpu != NULL; \
@@ -1329,6 +1542,12 @@ static inline int of_get_available_child_count(const struct device_node *np)
return num;
}
+#define _OF_DECLARE_STUB(table, name, compat, fn, fn_type) \
+ static const struct of_device_id __of_table_##name \
+ __attribute__((unused)) \
+ = { .compatible = compat, \
+ .data = (fn == (fn_type)NULL) ? fn : fn }
+
#if defined(CONFIG_OF) && !defined(MODULE)
#define _OF_DECLARE(table, name, compat, fn, fn_type) \
static const struct of_device_id __of_table_##name \
@@ -1338,10 +1557,7 @@ static inline int of_get_available_child_count(const struct device_node *np)
.data = (fn == (fn_type)NULL) ? fn : fn }
#else
#define _OF_DECLARE(table, name, compat, fn, fn_type) \
- static const struct of_device_id __of_table_##name \
- __attribute__((unused)) \
- = { .compatible = compat, \
- .data = (fn == (fn_type)NULL) ? fn : fn }
+ _OF_DECLARE_STUB(table, name, compat, fn, fn_type)
#endif
typedef int (*of_init_fn_2)(struct device_node *, struct device_node *);
@@ -1397,6 +1613,8 @@ enum of_reconfig_change {
OF_RECONFIG_CHANGE_REMOVE,
};
+struct notifier_block;
+
#ifdef CONFIG_OF_DYNAMIC
extern int of_reconfig_notifier_register(struct notifier_block *);
extern int of_reconfig_notifier_unregister(struct notifier_block *);
@@ -1441,6 +1659,36 @@ static inline int of_changeset_update_property(struct of_changeset *ocs,
{
return of_changeset_action(ocs, OF_RECONFIG_UPDATE_PROPERTY, np, prop);
}
+
+struct device_node *of_changeset_create_node(struct of_changeset *ocs,
+ struct device_node *parent,
+ const char *full_name);
+int of_changeset_add_prop_string(struct of_changeset *ocs,
+ struct device_node *np,
+ const char *prop_name, const char *str);
+int of_changeset_add_prop_string_array(struct of_changeset *ocs,
+ struct device_node *np,
+ const char *prop_name,
+ const char * const *str_array, size_t sz);
+int of_changeset_add_prop_u32_array(struct of_changeset *ocs,
+ struct device_node *np,
+ const char *prop_name,
+ const u32 *array, size_t sz);
+static inline int of_changeset_add_prop_u32(struct of_changeset *ocs,
+ struct device_node *np,
+ const char *prop_name,
+ const u32 val)
+{
+ return of_changeset_add_prop_u32_array(ocs, np, prop_name, &val, 1);
+}
+
+int of_changeset_update_prop_string(struct of_changeset *ocs,
+ struct device_node *np,
+ const char *prop_name, const char *str);
+
+int of_changeset_add_prop_bool(struct of_changeset *ocs, struct device_node *np,
+ const char *prop_name);
+
#else /* CONFIG_OF_DYNAMIC */
static inline int of_reconfig_notifier_register(struct notifier_block *nb)
{
@@ -1473,17 +1721,46 @@ static inline bool of_device_is_system_power_controller(const struct device_node
return of_property_read_bool(np, "system-power-controller");
}
+/**
+ * of_have_populated_dt() - Has DT been populated by bootloader
+ *
+ * Return: True if a DTB has been populated by the bootloader and it isn't the
+ * empty builtin one. False otherwise.
+ */
+static inline bool of_have_populated_dt(void)
+{
+#ifdef CONFIG_OF
+ return of_property_present(of_root, "compatible");
+#else
+ return false;
+#endif
+}
+
/*
* Overlay support
*/
enum of_overlay_notify_action {
- OF_OVERLAY_PRE_APPLY = 0,
+ OF_OVERLAY_INIT = 0, /* kzalloc() of ovcs sets this value */
+ OF_OVERLAY_PRE_APPLY,
OF_OVERLAY_POST_APPLY,
OF_OVERLAY_PRE_REMOVE,
OF_OVERLAY_POST_REMOVE,
};
+static inline const char *of_overlay_action_name(enum of_overlay_notify_action action)
+{
+ static const char *const of_overlay_action_name[] = {
+ "init",
+ "pre-apply",
+ "post-apply",
+ "pre-remove",
+ "post-remove",
+ };
+
+ return of_overlay_action_name[action];
+}
+
struct of_overlay_notify_data {
struct device_node *overlay;
struct device_node *target;
@@ -1492,7 +1769,7 @@ struct of_overlay_notify_data {
#ifdef CONFIG_OF_OVERLAY
int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
- int *ovcs_id);
+ int *ovcs_id, const struct device_node *target_base);
int of_overlay_remove(int *ovcs_id);
int of_overlay_remove_all(void);
@@ -1501,8 +1778,8 @@ int of_overlay_notifier_unregister(struct notifier_block *nb);
#else
-static inline int of_overlay_fdt_apply(void *overlay_fdt, u32 overlay_fdt_size,
- int *ovcs_id)
+static inline int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
+ int *ovcs_id, const struct device_node *target_base)
{
return -ENOTSUPP;
}
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index 88bc943405cd..0cff90365391 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -10,7 +10,7 @@ struct of_bus;
struct of_pci_range_parser {
struct device_node *node;
- struct of_bus *bus;
+ const struct of_bus *bus;
const __be32 *range;
const __be32 *end;
int na;
@@ -26,6 +26,7 @@ struct of_pci_range {
u64 bus_addr;
};
u64 cpu_addr;
+ u64 parent_bus_addr;
u64 size;
u32 flags;
};
@@ -35,9 +36,27 @@ struct of_pci_range {
for (; of_pci_range_parser_one(parser, range);)
#define for_each_of_range for_each_of_pci_range
+/*
+ * of_range_count - Get the number of "ranges" or "dma-ranges" entries
+ * @parser: Parser state initialized by of_range_parser_init()
+ *
+ * Returns the number of entries or 0 if none.
+ *
+ * Note that calling this within or after the for_each_of_range() iterator will
+ * be inaccurate giving the number of entries remaining.
+ */
+static inline int of_range_count(const struct of_range_parser *parser)
+{
+ if (!parser || !parser->node || !parser->range || parser->range == parser->end)
+ return 0;
+ return (parser->end - parser->range) / (parser->na + parser->pna + parser->ns);
+}
+
/* Translate a DMA address from device space to CPU space */
extern u64 of_translate_dma_address(struct device_node *dev,
const __be32 *in_addr);
+extern const __be32 *of_translate_dma_region(struct device_node *dev, const __be32 *addr,
+ phys_addr_t *start, size_t *length);
#ifdef CONFIG_OF_ADDRESS
extern u64 of_translate_address(struct device_node *np, const __be32 *addr);
@@ -51,8 +70,10 @@ void __iomem *of_io_request_and_map(struct device_node *device,
* the address space flags too. The PCI version uses a BAR number
* instead of an absolute index
*/
-extern const __be32 *of_get_address(struct device_node *dev, int index,
- u64 *size, unsigned int *flags);
+extern const __be32 *__of_get_address(struct device_node *dev, int index, int bar_no,
+ u64 *size, unsigned int *flags);
+
+int of_property_read_reg(struct device_node *np, int idx, u64 *addr, u64 *size);
extern int of_pci_range_parser_init(struct of_pci_range_parser *parser,
struct device_node *node);
@@ -61,6 +82,13 @@ extern int of_pci_dma_range_parser_init(struct of_pci_range_parser *parser,
extern struct of_pci_range *of_pci_range_parser_one(
struct of_pci_range_parser *parser,
struct of_pci_range *range);
+extern int of_pci_address_to_resource(struct device_node *dev, int bar,
+ struct resource *r);
+extern int of_pci_range_to_resource(const struct of_pci_range *range,
+ const struct device_node *np,
+ struct resource *res);
+extern int of_range_to_resource(struct device_node *np, int index,
+ struct resource *res);
extern bool of_dma_is_coherent(struct device_node *np);
#else /* CONFIG_OF_ADDRESS */
static inline void __iomem *of_io_request_and_map(struct device_node *device,
@@ -75,12 +103,17 @@ static inline u64 of_translate_address(struct device_node *np,
return OF_BAD_ADDR;
}
-static inline const __be32 *of_get_address(struct device_node *dev, int index,
- u64 *size, unsigned int *flags)
+static inline const __be32 *__of_get_address(struct device_node *dev, int index, int bar_no,
+ u64 *size, unsigned int *flags)
{
return NULL;
}
+static inline int of_property_read_reg(struct device_node *np, int idx, u64 *addr, u64 *size)
+{
+ return -ENOSYS;
+}
+
static inline int of_pci_range_parser_init(struct of_pci_range_parser *parser,
struct device_node *node)
{
@@ -100,6 +133,25 @@ static inline struct of_pci_range *of_pci_range_parser_one(
return NULL;
}
+static inline int of_pci_address_to_resource(struct device_node *dev, int bar,
+ struct resource *r)
+{
+ return -ENOSYS;
+}
+
+static inline int of_pci_range_to_resource(struct of_pci_range *range,
+ struct device_node *np,
+ struct resource *res)
+{
+ return -ENOSYS;
+}
+
+static inline int of_range_to_resource(struct device_node *np, int index,
+ struct resource *res)
+{
+ return -ENOSYS;
+}
+
static inline bool of_dma_is_coherent(struct device_node *np)
{
return false;
@@ -124,32 +176,27 @@ static inline void __iomem *of_iomap(struct device_node *device, int index)
#endif
#define of_range_parser_init of_pci_range_parser_init
-#if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_PCI)
-extern const __be32 *of_get_pci_address(struct device_node *dev, int bar_no,
- u64 *size, unsigned int *flags);
-extern int of_pci_address_to_resource(struct device_node *dev, int bar,
- struct resource *r);
-extern int of_pci_range_to_resource(struct of_pci_range *range,
- struct device_node *np,
- struct resource *res);
-#else /* CONFIG_OF_ADDRESS && CONFIG_PCI */
-static inline int of_pci_address_to_resource(struct device_node *dev, int bar,
- struct resource *r)
+static inline const __be32 *of_get_address(struct device_node *dev, int index,
+ u64 *size, unsigned int *flags)
{
- return -ENOSYS;
+ return __of_get_address(dev, index, -1, size, flags);
}
-static inline const __be32 *of_get_pci_address(struct device_node *dev,
- int bar_no, u64 *size, unsigned int *flags)
+static inline const __be32 *of_get_pci_address(struct device_node *dev, int bar_no,
+ u64 *size, unsigned int *flags)
{
- return NULL;
+ return __of_get_address(dev, -1, bar_no, size, flags);
}
-static inline int of_pci_range_to_resource(struct of_pci_range *range,
- struct device_node *np,
- struct resource *res)
+
+static inline int of_address_count(struct device_node *np)
{
- return -ENOSYS;
+ struct resource res;
+ int count = 0;
+
+ while (of_address_to_resource(np, count, &res) == 0)
+ count++;
+
+ return count;
}
-#endif /* CONFIG_OF_ADDRESS && CONFIG_PCI */
#endif /* __OF_ADDRESS_H */
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index 1d7992a02e36..9042bca5bb84 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -2,14 +2,11 @@
#ifndef _LINUX_OF_DEVICE_H
#define _LINUX_OF_DEVICE_H
-#include <linux/cpu.h>
-#include <linux/platform_device.h>
-#include <linux/of_platform.h> /* temporary until merge */
-
-#include <linux/of.h>
-#include <linux/mod_devicetable.h>
+#include <linux/device/driver.h>
struct device;
+struct of_device_id;
+struct kobj_uevent_env;
#ifdef CONFIG_OF
extern const struct of_device_id *of_match_device(
@@ -26,26 +23,10 @@ static inline int of_driver_match_device(struct device *dev,
return of_match_device(drv->of_match_table, dev) != NULL;
}
-extern int of_device_add(struct platform_device *pdev);
-extern int of_device_register(struct platform_device *ofdev);
-extern void of_device_unregister(struct platform_device *ofdev);
-
-extern const void *of_device_get_match_data(const struct device *dev);
-
extern ssize_t of_device_modalias(struct device *dev, char *str, ssize_t len);
-extern int of_device_request_module(struct device *dev);
-extern void of_device_uevent(struct device *dev, struct kobj_uevent_env *env);
-extern int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env);
-
-static inline struct device_node *of_cpu_device_node_get(int cpu)
-{
- struct device *cpu_dev;
- cpu_dev = get_cpu_device(cpu);
- if (!cpu_dev)
- return of_get_cpu_node(cpu, NULL);
- return of_node_get(cpu_dev->of_node);
-}
+extern void of_device_uevent(const struct device *dev, struct kobj_uevent_env *env);
+extern int of_device_uevent_modalias(const struct device *dev, struct kobj_uevent_env *env);
int of_dma_configure_id(struct device *dev,
struct device_node *np,
@@ -56,6 +37,9 @@ static inline int of_dma_configure(struct device *dev,
{
return of_dma_configure_id(dev, np, force_dma, NULL);
}
+
+void of_device_make_bus_id(struct device *dev);
+
#else /* CONFIG_OF */
static inline int of_driver_match_device(struct device *dev,
@@ -64,26 +48,16 @@ static inline int of_driver_match_device(struct device *dev,
return 0;
}
-static inline void of_device_uevent(struct device *dev,
+static inline void of_device_uevent(const struct device *dev,
struct kobj_uevent_env *env) { }
-static inline const void *of_device_get_match_data(const struct device *dev)
-{
- return NULL;
-}
-
static inline int of_device_modalias(struct device *dev,
char *str, ssize_t len)
{
return -ENODEV;
}
-static inline int of_device_request_module(struct device *dev)
-{
- return -ENODEV;
-}
-
-static inline int of_device_uevent_modalias(struct device *dev,
+static inline int of_device_uevent_modalias(const struct device *dev,
struct kobj_uevent_env *env)
{
return -ENODEV;
@@ -95,14 +69,10 @@ static inline const struct of_device_id *of_match_device(
return NULL;
}
-static inline struct device_node *of_cpu_device_node_get(int cpu)
-{
- return NULL;
-}
-
static inline int of_dma_configure_id(struct device *dev,
- struct device_node *np,
- bool force_dma)
+ struct device_node *np,
+ bool force_dma,
+ const u32 *id)
{
return 0;
}
@@ -112,6 +82,9 @@ static inline int of_dma_configure(struct device *dev,
{
return 0;
}
+
+static inline void of_device_make_bus_id(struct device *dev) {}
+
#endif /* CONFIG_OF */
#endif /* _LINUX_OF_DEVICE_H */
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index acf820e88952..51dadbaa3d63 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -31,6 +31,7 @@ extern void *of_fdt_unflatten_tree(const unsigned long *blob,
extern int __initdata dt_root_addr_cells;
extern int __initdata dt_root_size_cells;
extern void *initial_boot_params;
+extern phys_addr_t initial_boot_params_pa;
extern char __dtb_start[];
extern char __dtb_end[];
@@ -54,30 +55,33 @@ extern int of_get_flat_dt_subnode_by_name(unsigned long node,
const char *uname);
extern const void *of_get_flat_dt_prop(unsigned long node, const char *name,
int *size);
+
+extern const __be32 *of_flat_dt_get_addr_size_prop(unsigned long node,
+ const char *name,
+ int *entries);
+extern bool of_flat_dt_get_addr_size(unsigned long node, const char *name,
+ u64 *addr, u64 *size);
+extern void of_flat_dt_read_addr_size(const __be32 *prop, int entry_index,
+ u64 *addr, u64 *size);
+
extern int of_flat_dt_is_compatible(unsigned long node, const char *name);
extern unsigned long of_get_flat_dt_root(void);
extern uint32_t of_get_flat_dt_phandle(unsigned long node);
-extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
- int depth, void *data);
-extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
- int depth, void *data);
+extern int early_init_dt_scan_chosen(char *cmdline);
+extern int early_init_dt_scan_memory(void);
+extern void early_init_dt_check_for_usable_mem_range(void);
extern int early_init_dt_scan_chosen_stdout(void);
extern void early_init_fdt_scan_reserved_mem(void);
extern void early_init_fdt_reserve_self(void);
-extern void __init early_init_dt_scan_chosen_arch(unsigned long node);
extern void early_init_dt_add_memory_arch(u64 base, u64 size);
-extern int early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size);
-extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
- bool no_map);
extern u64 dt_mem_next_cell(int s, const __be32 **cellp);
/* Early flat tree scan hooks */
-extern int early_init_dt_scan_root(unsigned long node, const char *uname,
- int depth, void *data);
+extern int early_init_dt_scan_root(void);
-extern bool early_init_dt_scan(void *params);
-extern bool early_init_dt_verify(void *params);
+extern bool early_init_dt_scan(void *dt_virt, phys_addr_t dt_phys);
+extern bool early_init_dt_verify(void *dt_virt, phys_addr_t dt_phys);
extern void early_init_dt_scan_nodes(void);
extern const char *of_flat_dt_get_machine_name(void);
@@ -90,6 +94,7 @@ extern void unflatten_and_copy_device_tree(void);
extern void early_init_devtree(void *);
extern void early_get_first_memblock_info(void *, phys_addr_t *);
#else /* CONFIG_OF_EARLY_FLATTREE */
+static inline void early_init_dt_check_for_usable_mem_range(void) {}
static inline int early_init_dt_scan_chosen_stdout(void) { return -ENODEV; }
static inline void early_init_fdt_scan_reserved_mem(void) {}
static inline void early_init_fdt_reserve_self(void) {}
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index f821095218b0..d0f66a5e1b2a 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -17,135 +17,22 @@
struct device_node;
-/*
- * This is Linux-specific flags. By default controllers' and Linux' mapping
- * match, but GPIO controllers are free to translate their own flags to
- * Linux-specific in their .xlate callback. Though, 1:1 mapping is recommended.
- */
-enum of_gpio_flags {
- OF_GPIO_ACTIVE_LOW = 0x1,
- OF_GPIO_SINGLE_ENDED = 0x2,
- OF_GPIO_OPEN_DRAIN = 0x4,
- OF_GPIO_TRANSITORY = 0x8,
- OF_GPIO_PULL_UP = 0x10,
- OF_GPIO_PULL_DOWN = 0x20,
-};
-
#ifdef CONFIG_OF_GPIO
-#include <linux/kernel.h>
-
-/*
- * OF GPIO chip for memory mapped banks
- */
-struct of_mm_gpio_chip {
- struct gpio_chip gc;
- void (*save_regs)(struct of_mm_gpio_chip *mm_gc);
- void __iomem *regs;
-};
-
-static inline struct of_mm_gpio_chip *to_of_mm_gpio_chip(struct gpio_chip *gc)
-{
- return container_of(gc, struct of_mm_gpio_chip, gc);
-}
-
-extern int of_get_named_gpio_flags(struct device_node *np,
- const char *list_name, int index, enum of_gpio_flags *flags);
-
-extern int of_mm_gpiochip_add_data(struct device_node *np,
- struct of_mm_gpio_chip *mm_gc,
- void *data);
-static inline int of_mm_gpiochip_add(struct device_node *np,
- struct of_mm_gpio_chip *mm_gc)
-{
- return of_mm_gpiochip_add_data(np, mm_gc, NULL);
-}
-extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc);
+extern int of_get_named_gpio(const struct device_node *np,
+ const char *list_name, int index);
#else /* CONFIG_OF_GPIO */
#include <linux/errno.h>
/* Drivers may not strictly depend on the GPIO support, so let them link. */
-static inline int of_get_named_gpio_flags(struct device_node *np,
- const char *list_name, int index, enum of_gpio_flags *flags)
+static inline int of_get_named_gpio(const struct device_node *np,
+ const char *propname, int index)
{
- if (flags)
- *flags = 0;
-
return -ENOSYS;
}
#endif /* CONFIG_OF_GPIO */
-/**
- * of_gpio_named_count() - Count GPIOs for a device
- * @np: device node to count GPIOs for
- * @propname: property name containing gpio specifier(s)
- *
- * The function returns the count of GPIOs specified for a node.
- * Note that the empty GPIO specifiers count too. Returns either
- * Number of gpios defined in property,
- * -EINVAL for an incorrectly formed gpios property, or
- * -ENOENT for a missing gpios property
- *
- * Example:
- * gpios = <0
- * &gpio1 1 2
- * 0
- * &gpio2 3 4>;
- *
- * The above example defines four GPIOs, two of which are not specified.
- * This function will return '4'
- */
-static inline int of_gpio_named_count(struct device_node *np, const char* propname)
-{
- return of_count_phandle_with_args(np, propname, "#gpio-cells");
-}
-
-/**
- * of_gpio_count() - Count GPIOs for a device
- * @np: device node to count GPIOs for
- *
- * Same as of_gpio_named_count, but hard coded to use the 'gpios' property
- */
-static inline int of_gpio_count(struct device_node *np)
-{
- return of_gpio_named_count(np, "gpios");
-}
-
-static inline int of_get_gpio_flags(struct device_node *np, int index,
- enum of_gpio_flags *flags)
-{
- return of_get_named_gpio_flags(np, "gpios", index, flags);
-}
-
-/**
- * of_get_named_gpio() - Get a GPIO number to use with GPIO API
- * @np: device node to get GPIO from
- * @propname: Name of property containing gpio specifier(s)
- * @index: index of the GPIO
- *
- * Returns GPIO number to use with Linux generic GPIO API, or one of the errno
- * value on the error condition.
- */
-static inline int of_get_named_gpio(struct device_node *np,
- const char *propname, int index)
-{
- return of_get_named_gpio_flags(np, propname, index, NULL);
-}
-
-/**
- * of_get_gpio() - Get a GPIO number to use with GPIO API
- * @np: device node to get GPIO from
- * @index: index of the GPIO
- *
- * Returns GPIO number to use with Linux generic GPIO API, or one of the errno
- * value on the error condition.
- */
-static inline int of_get_gpio(struct device_node *np, int index)
-{
- return of_get_gpio_flags(np, index, NULL);
-}
-
#endif /* __LINUX_OF_GPIO_H */
diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h
index 4d7756087b6b..a692d9d979a6 100644
--- a/include/linux/of_graph.h
+++ b/include/linux/of_graph.h
@@ -11,6 +11,7 @@
#ifndef __LINUX_OF_GRAPH_H
#define __LINUX_OF_GRAPH_H
+#include <linux/cleanup.h>
#include <linux/types.h>
#include <linux/errno.h>
@@ -37,14 +38,43 @@ struct of_endpoint {
for (child = of_graph_get_next_endpoint(parent, NULL); child != NULL; \
child = of_graph_get_next_endpoint(parent, child))
+/**
+ * for_each_of_graph_port - iterate over every port in a device or ports node
+ * @parent: parent device or ports node containing port
+ * @child: loop variable pointing to the current port node
+ *
+ * When breaking out of the loop, and continue to use the @child, you need to
+ * use return_ptr(@child) or no_free_ptr(@child) not to call __free() for it.
+ */
+#define for_each_of_graph_port(parent, child) \
+ for (struct device_node *child __free(device_node) = of_graph_get_next_port(parent, NULL);\
+ child != NULL; child = of_graph_get_next_port(parent, child))
+
+/**
+ * for_each_of_graph_port_endpoint - iterate over every endpoint in a port node
+ * @parent: parent port node
+ * @child: loop variable pointing to the current endpoint node
+ *
+ * When breaking out of the loop, and continue to use the @child, you need to
+ * use return_ptr(@child) or no_free_ptr(@child) not to call __free() for it.
+ */
+#define for_each_of_graph_port_endpoint(parent, child) \
+ for (struct device_node *child __free(device_node) = of_graph_get_next_port_endpoint(parent, NULL);\
+ child != NULL; child = of_graph_get_next_port_endpoint(parent, child))
+
#ifdef CONFIG_OF
bool of_graph_is_present(const struct device_node *node);
int of_graph_parse_endpoint(const struct device_node *node,
struct of_endpoint *endpoint);
-int of_graph_get_endpoint_count(const struct device_node *np);
+unsigned int of_graph_get_endpoint_count(const struct device_node *np);
+unsigned int of_graph_get_port_count(struct device_node *np);
struct device_node *of_graph_get_port_by_id(struct device_node *node, u32 id);
struct device_node *of_graph_get_next_endpoint(const struct device_node *parent,
struct device_node *previous);
+struct device_node *of_graph_get_next_port(const struct device_node *parent,
+ struct device_node *port);
+struct device_node *of_graph_get_next_port_endpoint(const struct device_node *port,
+ struct device_node *prev);
struct device_node *of_graph_get_endpoint_by_regs(
const struct device_node *parent, int port_reg, int reg);
struct device_node *of_graph_get_remote_endpoint(
@@ -68,7 +98,12 @@ static inline int of_graph_parse_endpoint(const struct device_node *node,
return -ENOSYS;
}
-static inline int of_graph_get_endpoint_count(const struct device_node *np)
+static inline unsigned int of_graph_get_endpoint_count(const struct device_node *np)
+{
+ return 0;
+}
+
+static inline unsigned int of_graph_get_port_count(struct device_node *np)
{
return 0;
}
@@ -86,6 +121,20 @@ static inline struct device_node *of_graph_get_next_endpoint(
return NULL;
}
+static inline struct device_node *of_graph_get_next_port(
+ const struct device_node *parent,
+ struct device_node *previous)
+{
+ return NULL;
+}
+
+static inline struct device_node *of_graph_get_next_port_endpoint(
+ const struct device_node *parent,
+ struct device_node *previous)
+{
+ return NULL;
+}
+
static inline struct device_node *of_graph_get_endpoint_by_regs(
const struct device_node *parent, int port_reg, int reg)
{
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h
index 16f4b3e87f20..e61cbbe12dac 100644
--- a/include/linux/of_iommu.h
+++ b/include/linux/of_iommu.h
@@ -2,34 +2,30 @@
#ifndef __OF_IOMMU_H
#define __OF_IOMMU_H
-#include <linux/device.h>
-#include <linux/iommu.h>
-#include <linux/of.h>
+struct device;
+struct device_node;
+struct iommu_ops;
#ifdef CONFIG_OF_IOMMU
-extern int of_get_dma_window(struct device_node *dn, const char *prefix,
- int index, unsigned long *busno, dma_addr_t *addr,
- size_t *size);
+extern int of_iommu_configure(struct device *dev, struct device_node *master_np,
+ const u32 *id);
-extern const struct iommu_ops *of_iommu_configure(struct device *dev,
- struct device_node *master_np,
- const u32 *id);
+extern void of_iommu_get_resv_regions(struct device *dev,
+ struct list_head *list);
#else
-static inline int of_get_dma_window(struct device_node *dn, const char *prefix,
- int index, unsigned long *busno, dma_addr_t *addr,
- size_t *size)
+static inline int of_iommu_configure(struct device *dev,
+ struct device_node *master_np,
+ const u32 *id)
{
- return -EINVAL;
+ return -ENODEV;
}
-static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
- struct device_node *master_np,
- const u32 *id)
+static inline void of_iommu_get_resv_regions(struct device *dev,
+ struct list_head *list)
{
- return NULL;
}
#endif /* CONFIG_OF_IOMMU */
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index aaf219bd0354..1c2bc0281807 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -20,12 +20,12 @@ typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *);
#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
extern unsigned int of_irq_workarounds;
extern struct device_node *of_irq_dflt_pic;
-extern int of_irq_parse_oldworld(struct device_node *device, int index,
- struct of_phandle_args *out_irq);
+int of_irq_parse_oldworld(const struct device_node *device, int index,
+ struct of_phandle_args *out_irq);
#else /* CONFIG_PPC32 && CONFIG_PPC_PMAC */
#define of_irq_workarounds (0)
#define of_irq_dflt_pic (NULL)
-static inline int of_irq_parse_oldworld(struct device_node *device, int index,
+static inline int of_irq_parse_oldworld(const struct device_node *device, int index,
struct of_phandle_args *out_irq)
{
return -EINVAL;
@@ -37,26 +37,30 @@ extern unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data);
extern int of_irq_to_resource(struct device_node *dev, int index,
struct resource *r);
-extern void of_irq_init(const struct of_device_id *matches);
-
#ifdef CONFIG_OF_IRQ
+extern void of_irq_init(const struct of_device_id *matches);
extern int of_irq_parse_one(struct device_node *device, int index,
struct of_phandle_args *out_irq);
extern int of_irq_count(struct device_node *dev);
extern int of_irq_get(struct device_node *dev, int index);
+extern const struct cpumask *of_irq_get_affinity(struct device_node *dev,
+ int index);
extern int of_irq_get_byname(struct device_node *dev, const char *name);
extern int of_irq_to_resource_table(struct device_node *dev,
struct resource *res, int nr_irqs);
extern struct device_node *of_irq_find_parent(struct device_node *child);
extern struct irq_domain *of_msi_get_domain(struct device *dev,
- struct device_node *np,
+ const struct device_node *np,
enum irq_domain_bus_token token);
extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
u32 id,
u32 bus_token);
-extern void of_msi_configure(struct device *dev, struct device_node *np);
-u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in);
+extern void of_msi_configure(struct device *dev, const struct device_node *np);
+extern u32 of_msi_xlate(struct device *dev, struct device_node **msi_np, u32 id_in);
#else
+static inline void of_irq_init(const struct of_device_id *matches)
+{
+}
static inline int of_irq_parse_one(struct device_node *device, int index,
struct of_phandle_args *out_irq)
{
@@ -74,6 +78,11 @@ static inline int of_irq_get_byname(struct device_node *dev, const char *name)
{
return 0;
}
+static inline const struct cpumask *of_irq_get_affinity(struct device_node *dev,
+ int index)
+{
+ return NULL;
+}
static inline int of_irq_to_resource_table(struct device_node *dev,
struct resource *res, int nr_irqs)
{
@@ -98,8 +107,7 @@ static inline struct irq_domain *of_msi_map_get_device_domain(struct device *dev
static inline void of_msi_configure(struct device *dev, struct device_node *np)
{
}
-static inline u32 of_msi_map_id(struct device *dev,
- struct device_node *msi_np, u32 id_in)
+static inline u32 of_msi_xlate(struct device *dev, struct device_node **msi_np, u32 id_in)
{
return id_in;
}
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index 2b05e7f7c238..8a52ef2e6fa6 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -14,9 +14,25 @@
#if IS_ENABLED(CONFIG_OF_MDIO)
bool of_mdiobus_child_is_phy(struct device_node *child);
-int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np);
-int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
- struct device_node *np);
+int __of_mdiobus_register(struct mii_bus *mdio, struct device_node *np,
+ struct module *owner);
+
+static inline int of_mdiobus_register(struct mii_bus *mdio,
+ struct device_node *np)
+{
+ return __of_mdiobus_register(mdio, np, THIS_MODULE);
+}
+
+int __devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
+ struct device_node *np, struct module *owner);
+
+static inline int devm_of_mdiobus_register(struct device *dev,
+ struct mii_bus *mdio,
+ struct device_node *np)
+{
+ return __devm_of_mdiobus_register(dev, mdio, np, THIS_MODULE);
+}
+
struct mdio_device *of_mdio_find_device(struct device_node *np);
struct phy_device *of_phy_find_device(struct device_node *phy_np);
struct phy_device *
@@ -72,6 +88,13 @@ static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *
return mdiobus_register(mdio);
}
+static inline int devm_of_mdiobus_register(struct device *dev,
+ struct mii_bus *mdio,
+ struct device_node *np)
+{
+ return devm_mdiobus_register(dev, mdio);
+}
+
static inline struct mdio_device *of_mdio_find_device(struct device_node *np)
{
return NULL;
diff --git a/include/linux/of_net.h b/include/linux/of_net.h
index daef3b0d9270..d88715a0b3a5 100644
--- a/include/linux/of_net.h
+++ b/include/linux/of_net.h
@@ -8,12 +8,14 @@
#include <linux/phy.h>
-#ifdef CONFIG_OF_NET
+#if defined(CONFIG_OF) && defined(CONFIG_NET)
#include <linux/of.h>
struct net_device;
extern int of_get_phy_mode(struct device_node *np, phy_interface_t *interface);
extern int of_get_mac_address(struct device_node *np, u8 *mac);
+extern int of_get_mac_address_nvmem(struct device_node *np, u8 *mac);
+int of_get_ethdev_address(struct device_node *np, struct net_device *dev);
extern struct net_device *of_find_net_device_by_node(struct device_node *np);
#else
static inline int of_get_phy_mode(struct device_node *np,
@@ -27,6 +29,16 @@ static inline int of_get_mac_address(struct device_node *np, u8 *mac)
return -ENODEV;
}
+static inline int of_get_mac_address_nvmem(struct device_node *np, u8 *mac)
+{
+ return -ENODEV;
+}
+
+static inline int of_get_ethdev_address(struct device_node *np, struct net_device *dev)
+{
+ return -ENODEV;
+}
+
static inline struct net_device *of_find_net_device_by_node(struct device_node *np)
{
return NULL;
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index 84a966623e78..17471ef8e092 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -6,11 +6,12 @@
* <benh@kernel.crashing.org>
*/
-#include <linux/device.h>
#include <linux/mod_devicetable.h>
-#include <linux/pm.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
+
+struct device;
+struct device_node;
+struct of_device_id;
+struct platform_device;
/**
* struct of_dev_auxdata - lookup table entry for device names & platform_data
@@ -46,12 +47,15 @@ struct of_dev_auxdata {
{ .compatible = _compat, .phys_addr = _phys, .name = _name, \
.platform_data = _pdata }
-extern const struct of_device_id of_default_bus_match_table[];
-
/* Platform drivers register/unregister */
extern struct platform_device *of_device_alloc(struct device_node *np,
const char *bus_id,
struct device *parent);
+
+extern int of_device_add(struct platform_device *pdev);
+extern int of_device_register(struct platform_device *ofdev);
+extern void of_device_unregister(struct platform_device *ofdev);
+
#ifdef CONFIG_OF
extern struct platform_device *of_find_device_by_node(struct device_node *np);
#else
@@ -61,16 +65,18 @@ static inline struct platform_device *of_find_device_by_node(struct device_node
}
#endif
+extern int of_platform_bus_probe(struct device_node *root,
+ const struct of_device_id *matches,
+ struct device *parent);
+
+#ifdef CONFIG_OF_ADDRESS
/* Platform devices and busses creation */
extern struct platform_device *of_platform_device_create(struct device_node *np,
const char *bus_id,
struct device *parent);
extern int of_platform_device_destroy(struct device *dev, void *data);
-extern int of_platform_bus_probe(struct device_node *root,
- const struct of_device_id *matches,
- struct device *parent);
-#ifdef CONFIG_OF_ADDRESS
+
extern int of_platform_populate(struct device_node *root,
const struct of_device_id *matches,
const struct of_dev_auxdata *lookup,
@@ -84,6 +90,18 @@ extern int devm_of_platform_populate(struct device *dev);
extern void devm_of_platform_depopulate(struct device *dev);
#else
+/* Platform devices and busses creation */
+static inline struct platform_device *of_platform_device_create(struct device_node *np,
+ const char *bus_id,
+ struct device *parent)
+{
+ return NULL;
+}
+static inline int of_platform_device_destroy(struct device *dev, void *data)
+{
+ return -ENODEV;
+}
+
static inline int of_platform_populate(struct device_node *root,
const struct of_device_id *matches,
const struct of_dev_auxdata *lookup,
@@ -107,10 +125,4 @@ static inline int devm_of_platform_populate(struct device *dev)
static inline void devm_of_platform_depopulate(struct device *dev) { }
#endif
-#if defined(CONFIG_OF_DYNAMIC) && defined(CONFIG_OF_ADDRESS)
-extern void of_platform_register_reconfig_notifier(void);
-#else
-static inline void of_platform_register_reconfig_notifier(void) { }
-#endif
-
#endif /* _LINUX_OF_PLATFORM_H */
diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h
index 8216a4156263..f573423359f4 100644
--- a/include/linux/of_reserved_mem.h
+++ b/include/linux/of_reserved_mem.h
@@ -7,11 +7,11 @@
struct of_phandle_args;
struct reserved_mem_ops;
+struct resource;
struct reserved_mem {
const char *name;
unsigned long fdt_node;
- unsigned long phandle;
const struct reserved_mem_ops *ops;
phys_addr_t base;
phys_addr_t size;
@@ -27,11 +27,11 @@ struct reserved_mem_ops {
typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem);
+#ifdef CONFIG_OF_RESERVED_MEM
+
#define RESERVEDMEM_OF_DECLARE(name, compat, init) \
_OF_DECLARE(reservedmem, name, compat, init, reservedmem_of_init_fn)
-#ifdef CONFIG_OF_RESERVED_MEM
-
int of_reserved_mem_device_init_by_idx(struct device *dev,
struct device_node *np, int idx);
int of_reserved_mem_device_init_by_name(struct device *dev,
@@ -39,11 +39,18 @@ int of_reserved_mem_device_init_by_name(struct device *dev,
const char *name);
void of_reserved_mem_device_release(struct device *dev);
-void fdt_init_reserved_mem(void);
-void fdt_reserved_mem_save_node(unsigned long node, const char *uname,
- phys_addr_t base, phys_addr_t size);
struct reserved_mem *of_reserved_mem_lookup(struct device_node *np);
+int of_reserved_mem_region_to_resource(const struct device_node *np,
+ unsigned int idx, struct resource *res);
+int of_reserved_mem_region_to_resource_byname(const struct device_node *np,
+ const char *name, struct resource *res);
+int of_reserved_mem_region_count(const struct device_node *np);
+
#else
+
+#define RESERVEDMEM_OF_DECLARE(name, compat, init) \
+ _OF_DECLARE_STUB(reservedmem, name, compat, init, reservedmem_of_init_fn)
+
static inline int of_reserved_mem_device_init_by_idx(struct device *dev,
struct device_node *np, int idx)
{
@@ -59,13 +66,29 @@ static inline int of_reserved_mem_device_init_by_name(struct device *dev,
static inline void of_reserved_mem_device_release(struct device *pdev) { }
-static inline void fdt_init_reserved_mem(void) { }
-static inline void fdt_reserved_mem_save_node(unsigned long node,
- const char *uname, phys_addr_t base, phys_addr_t size) { }
static inline struct reserved_mem *of_reserved_mem_lookup(struct device_node *np)
{
return NULL;
}
+
+static inline int of_reserved_mem_region_to_resource(const struct device_node *np,
+ unsigned int idx,
+ struct resource *res)
+{
+ return -ENOSYS;
+}
+
+static inline int of_reserved_mem_region_to_resource_byname(const struct device_node *np,
+ const char *name,
+ struct resource *res)
+{
+ return -ENOSYS;
+}
+
+static inline int of_reserved_mem_region_count(const struct device_node *np)
+{
+ return 0;
+}
#endif
/**
diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h
index 461b7aa587ba..6de479ebbe5d 100644
--- a/include/linux/oid_registry.h
+++ b/include/linux/oid_registry.h
@@ -30,9 +30,6 @@ enum OID {
/* PKCS#1 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-1(1)} */
OID_rsaEncryption, /* 1.2.840.113549.1.1.1 */
- OID_md2WithRSAEncryption, /* 1.2.840.113549.1.1.2 */
- OID_md3WithRSAEncryption, /* 1.2.840.113549.1.1.3 */
- OID_md4WithRSAEncryption, /* 1.2.840.113549.1.1.4 */
OID_sha1WithRSAEncryption, /* 1.2.840.113549.1.1.5 */
OID_sha256WithRSAEncryption, /* 1.2.840.113549.1.1.11 */
OID_sha384WithRSAEncryption, /* 1.2.840.113549.1.1.12 */
@@ -49,10 +46,9 @@ enum OID {
OID_smimeCapabilites, /* 1.2.840.113549.1.9.15 */
OID_smimeAuthenticatedAttrs, /* 1.2.840.113549.1.9.16.2.11 */
- /* {iso(1) member-body(2) us(840) rsadsi(113549) digestAlgorithm(2)} */
- OID_md2, /* 1.2.840.113549.2.2 */
- OID_md4, /* 1.2.840.113549.2.4 */
- OID_md5, /* 1.2.840.113549.2.5 */
+ OID_mskrb5, /* 1.2.840.48018.1.2.2 */
+ OID_krb5, /* 1.2.840.113554.1.2.2 */
+ OID_krb5u2u, /* 1.2.840.113554.1.2.2.3 */
/* Microsoft Authenticode & Software Publishing */
OID_msIndirectData, /* 1.3.6.1.4.1.311.2.1.4 */
@@ -62,9 +58,18 @@ enum OID {
OID_msIndividualSPKeyPurpose, /* 1.3.6.1.4.1.311.2.1.21 */
OID_msOutlookExpress, /* 1.3.6.1.4.1.311.16.4 */
+ OID_ntlmssp, /* 1.3.6.1.4.1.311.2.2.10 */
+ OID_negoex, /* 1.3.6.1.4.1.311.2.2.30 */
+
+ OID_spnego, /* 1.3.6.1.5.5.2 */
+
+ OID_IAKerb, /* 1.3.6.1.5.2.5 */
+ OID_PKU2U, /* 1.3.5.1.5.2.7 */
+ OID_Scram, /* 1.3.6.1.5.5.14 */
OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */
OID_sha1, /* 1.3.14.3.2.26 */
OID_id_ansip384r1, /* 1.3.132.0.34 */
+ OID_id_ansip521r1, /* 1.3.132.0.35 */
OID_sha256, /* 2.16.840.1.101.3.4.2.1 */
OID_sha384, /* 2.16.840.1.101.3.4.2.2 */
OID_sha512, /* 2.16.840.1.101.3.4.2.3 */
@@ -96,6 +101,10 @@ enum OID {
OID_authorityKeyIdentifier, /* 2.5.29.35 */
OID_extKeyUsage, /* 2.5.29.37 */
+ /* Heimdal mechanisms */
+ OID_NetlogonMechanism, /* 1.2.752.43.14.2 */
+ OID_appleLocalKdcSupported, /* 1.2.752.43.14.3 */
+
/* EC-RDSA */
OID_gostCPSignA, /* 1.2.643.2.2.35.1 */
OID_gostCPSignB, /* 1.2.643.2.2.35.2 */
@@ -125,12 +134,22 @@ enum OID {
OID_TPMImportableKey, /* 2.23.133.10.1.4 */
OID_TPMSealedData, /* 2.23.133.10.1.5 */
+ /* CSOR FIPS-202 SHA-3 */
+ OID_sha3_256, /* 2.16.840.1.101.3.4.2.8 */
+ OID_sha3_384, /* 2.16.840.1.101.3.4.2.9 */
+ OID_sha3_512, /* 2.16.840.1.101.3.4.2.10 */
+ OID_id_ecdsa_with_sha3_256, /* 2.16.840.1.101.3.4.3.10 */
+ OID_id_ecdsa_with_sha3_384, /* 2.16.840.1.101.3.4.3.11 */
+ OID_id_ecdsa_with_sha3_512, /* 2.16.840.1.101.3.4.3.12 */
+ OID_id_rsassa_pkcs1_v1_5_with_sha3_256, /* 2.16.840.1.101.3.4.3.14 */
+ OID_id_rsassa_pkcs1_v1_5_with_sha3_384, /* 2.16.840.1.101.3.4.3.15 */
+ OID_id_rsassa_pkcs1_v1_5_with_sha3_512, /* 2.16.840.1.101.3.4.3.16 */
+
OID__NR
};
extern enum OID look_up_OID(const void *data, size_t datasize);
extern int parse_OID(const void *data, size_t datasize, enum OID *oid);
extern int sprint_oid(const void *, size_t, char *, size_t);
-extern int sprint_OID(enum OID, char *, size_t);
#endif /* _LINUX_OID_REGISTRY_H */
diff --git a/include/linux/olpc-ec.h b/include/linux/olpc-ec.h
index c4602364e909..3c2891d85c41 100644
--- a/include/linux/olpc-ec.h
+++ b/include/linux/olpc-ec.h
@@ -56,6 +56,8 @@ extern int olpc_ec_sci_query(u16 *sci_value);
extern bool olpc_ec_wakeup_available(void);
+asmlinkage int xo1_do_sleep(u8 sleep_state);
+
#else
static inline int olpc_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *outbuf,
diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h
index 5c5c93ad6b50..6f6c31e3fb93 100644
--- a/include/linux/omap-dma.h
+++ b/include/linux/omap-dma.h
@@ -292,15 +292,22 @@ struct omap_system_dma_plat_info {
#define dma_omap15xx() __dma_omap15xx(d)
#define dma_omap16xx() __dma_omap16xx(d)
-#if defined(CONFIG_ARCH_OMAP)
extern struct omap_system_dma_plat_info *omap_get_plat_info(void);
+#if defined(CONFIG_ARCH_OMAP1)
extern void omap_set_dma_priority(int lch, int dst_port, int priority);
+#else
+static inline void omap_set_dma_priority(int lch, int dst_port, int priority)
+{
+}
+#endif
+
extern int omap_request_dma(int dev_id, const char *dev_name,
void (*callback)(int lch, u16 ch_status, void *data),
void *data, int *dma_ch);
-extern void omap_disable_dma_irq(int ch, u16 irq_bits);
extern void omap_free_dma(int ch);
+#if IS_ENABLED(CONFIG_USB_OMAP)
+extern void omap_disable_dma_irq(int ch, u16 irq_bits);
extern void omap_start_dma(int lch);
extern void omap_stop_dma(int lch);
extern void omap_set_dma_transfer_params(int lch, int data_type,
@@ -326,10 +333,12 @@ extern void omap_set_dma_dest_burst_mode(int lch,
extern dma_addr_t omap_get_dma_src_pos(int lch);
extern dma_addr_t omap_get_dma_dst_pos(int lch);
extern int omap_get_dma_active_status(int lch);
+#endif
+
extern int omap_dma_running(void);
-#if defined(CONFIG_ARCH_OMAP1) && IS_ENABLED(CONFIG_FB_OMAP)
-#include <mach/lcd_dma.h>
+#if IS_ENABLED(CONFIG_FB_OMAP)
+extern int omap_lcd_dma_running(void);
#else
static inline int omap_lcd_dma_running(void)
{
@@ -337,22 +346,4 @@ static inline int omap_lcd_dma_running(void)
}
#endif
-#else /* CONFIG_ARCH_OMAP */
-
-static inline struct omap_system_dma_plat_info *omap_get_plat_info(void)
-{
- return NULL;
-}
-
-static inline int omap_request_dma(int dev_id, const char *dev_name,
- void (*callback)(int lch, u16 ch_status, void *data),
- void *data, int *dma_ch)
-{
- return -ENODEV;
-}
-
-static inline void omap_free_dma(int ch) { }
-
-#endif /* CONFIG_ARCH_OMAP */
-
#endif /* __LINUX_OMAP_DMA_H */
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
index b7bf735960c2..263b915df1fb 100644
--- a/include/linux/omap-gpmc.h
+++ b/include/linux/omap-gpmc.h
@@ -66,10 +66,6 @@ extern int gpmc_calc_timings(struct gpmc_timings *gpmc_t,
struct device_node;
-extern int gpmc_get_client_irq(unsigned irq_config);
-
-extern unsigned int gpmc_ticks_to_ns(unsigned int ticks);
-
extern void gpmc_cs_write_reg(int cs, int idx, u32 val);
extern int gpmc_calc_divider(unsigned int sync_clk);
extern int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t,
@@ -81,19 +77,6 @@ extern int gpmc_configure(int cmd, int wval);
extern void gpmc_read_settings_dt(struct device_node *np,
struct gpmc_settings *p);
-extern void omap3_gpmc_save_context(void);
-extern void omap3_gpmc_restore_context(void);
-
struct gpmc_timings;
struct omap_nand_platform_data;
struct omap_onenand_platform_data;
-
-#if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2)
-extern int gpmc_onenand_init(struct omap_onenand_platform_data *d);
-#else
-#define board_onenand_data NULL
-static inline int gpmc_onenand_init(struct omap_onenand_platform_data *d)
-{
- return 0;
-}
-#endif
diff --git a/include/linux/omap-mailbox.h b/include/linux/omap-mailbox.h
index 8aa984ec1f38..3cc5c4ed7f5a 100644
--- a/include/linux/omap-mailbox.h
+++ b/include/linux/omap-mailbox.h
@@ -10,17 +10,4 @@ typedef uintptr_t mbox_msg_t;
#define omap_mbox_message(data) (u32)(mbox_msg_t)(data)
-typedef int __bitwise omap_mbox_irq_t;
-#define IRQ_TX ((__force omap_mbox_irq_t) 1)
-#define IRQ_RX ((__force omap_mbox_irq_t) 2)
-
-struct mbox_chan;
-struct mbox_client;
-
-struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl,
- const char *chan_name);
-
-void omap_mbox_enable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq);
-void omap_mbox_disable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq);
-
#endif /* OMAP_MAILBOX_H */
diff --git a/include/linux/once.h b/include/linux/once.h
index 9225ee6d96c7..449a0e34ad5a 100644
--- a/include/linux/once.h
+++ b/include/linux/once.h
@@ -5,9 +5,17 @@
#include <linux/types.h>
#include <linux/jump_label.h>
+/* Helpers used from arbitrary contexts.
+ * Hard irqs are blocked, be cautious.
+ */
bool __do_once_start(bool *done, unsigned long *flags);
void __do_once_done(bool *done, struct static_key_true *once_key,
- unsigned long *flags);
+ unsigned long *flags, struct module *mod);
+
+/* Variant for process contexts only. */
+bool __do_once_sleepable_start(bool *done);
+void __do_once_sleepable_done(bool *done, struct static_key_true *once_key,
+ struct module *mod);
/* Call a function exactly once. The idea of DO_ONCE() is to perform
* a function call such as initialization of random seeds, etc, only
@@ -16,7 +24,7 @@ void __do_once_done(bool *done, struct static_key_true *once_key,
* out the condition into a nop. DO_ONCE() guarantees type safety of
* arguments!
*
- * Not that the following is not equivalent ...
+ * Note that the following is not equivalent ...
*
* DO_ONCE(func, arg);
* DO_ONCE(func, arg);
@@ -38,7 +46,7 @@ void __do_once_done(bool *done, struct static_key_true *once_key,
#define DO_ONCE(func, ...) \
({ \
bool ___ret = false; \
- static bool ___done = false; \
+ static bool __section(".data..do_once") ___done = false; \
static DEFINE_STATIC_KEY_TRUE(___once_key); \
if (static_branch_unlikely(&___once_key)) { \
unsigned long ___flags; \
@@ -46,15 +54,33 @@ void __do_once_done(bool *done, struct static_key_true *once_key,
if (unlikely(___ret)) { \
func(__VA_ARGS__); \
__do_once_done(&___done, &___once_key, \
- &___flags); \
+ &___flags, THIS_MODULE); \
} \
} \
___ret; \
})
+/* Variant of DO_ONCE() for process/sleepable contexts. */
+#define DO_ONCE_SLEEPABLE(func, ...) \
+ ({ \
+ bool ___ret = false; \
+ static bool __section(".data..do_once") ___done = false; \
+ static DEFINE_STATIC_KEY_TRUE(___once_key); \
+ if (static_branch_unlikely(&___once_key)) { \
+ ___ret = __do_once_sleepable_start(&___done); \
+ if (unlikely(___ret)) { \
+ func(__VA_ARGS__); \
+ __do_once_sleepable_done(&___done, &___once_key,\
+ THIS_MODULE); \
+ } \
+ } \
+ ___ret; \
+ })
+
#define get_random_once(buf, nbytes) \
DO_ONCE(get_random_bytes, (buf), (nbytes))
-#define get_random_once_wait(buf, nbytes) \
- DO_ONCE(get_random_bytes_wait, (buf), (nbytes)) \
+
+#define get_random_sleepable_once(buf, nbytes) \
+ DO_ONCE_SLEEPABLE(get_random_bytes, (buf), (nbytes))
#endif /* _LINUX_ONCE_H */
diff --git a/include/linux/once_lite.h b/include/linux/once_lite.h
new file mode 100644
index 000000000000..236592c4eeb1
--- /dev/null
+++ b/include/linux/once_lite.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ONCE_LITE_H
+#define _LINUX_ONCE_LITE_H
+
+#include <linux/types.h>
+
+/* Call a function once. Similar to DO_ONCE(), but does not use jump label
+ * patching via static keys.
+ */
+#define DO_ONCE_LITE(func, ...) \
+ DO_ONCE_LITE_IF(true, func, ##__VA_ARGS__)
+
+#define __ONCE_LITE_IF(condition) \
+ ({ \
+ static bool __section(".data..once") __already_done; \
+ bool __ret_cond = !!(condition); \
+ bool __ret_once = false; \
+ \
+ if (unlikely(__ret_cond) && unlikely(!__already_done)) {\
+ __already_done = true; \
+ __ret_once = true; \
+ } \
+ unlikely(__ret_once); \
+ })
+
+#define DO_ONCE_LITE_IF(condition, func, ...) \
+ ({ \
+ bool __ret_do_once = !!(condition); \
+ \
+ if (__ONCE_LITE_IF(__ret_do_once)) \
+ func(__VA_ARGS__); \
+ \
+ unlikely(__ret_do_once); \
+ })
+
+#endif /* _LINUX_ONCE_LITE_H */
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 2db9a1432511..7b02bc1d0a7e 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -7,7 +7,6 @@
#include <linux/types.h>
#include <linux/nodemask.h>
#include <uapi/linux/oom.h>
-#include <linux/sched/coredump.h> /* MMF_* */
#include <linux/mm.h> /* VM_FAULT* */
struct zonelist;
@@ -78,15 +77,6 @@ static inline bool tsk_is_oom_victim(struct task_struct * tsk)
}
/*
- * Use this helper if tsk->mm != mm and the victim mm needs a special
- * handling. This is guaranteed to stay true after once set.
- */
-static inline bool mm_is_oom_victim(struct mm_struct *mm)
-{
- return test_bit(MMF_OOM_VICTIM, &mm->flags);
-}
-
-/*
* Checks whether a page fault on the given mm is still reliable.
* This is no longer true if the oom reaper started to reap the
* address space which is reflected by MMF_UNSTABLE flag set in
@@ -101,13 +91,11 @@ static inline bool mm_is_oom_victim(struct mm_struct *mm)
*/
static inline vm_fault_t check_stable_address_space(struct mm_struct *mm)
{
- if (unlikely(test_bit(MMF_UNSTABLE, &mm->flags)))
+ if (unlikely(mm_flags_test(MMF_UNSTABLE, mm)))
return VM_FAULT_SIGBUS;
return 0;
}
-bool __oom_reap_task_mm(struct mm_struct *mm);
-
long oom_badness(struct task_struct *p,
unsigned long totalpages);
@@ -123,8 +111,4 @@ extern void oom_killer_enable(void);
extern struct task_struct *find_lock_task_mm(struct task_struct *p);
-/* sysctls */
-extern int sysctl_oom_dump_tasks;
-extern int sysctl_oom_kill_allocating_task;
-extern int sysctl_panic_on_oom;
#endif /* _INCLUDE_LINUX_OOM_H */
diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h
index 5581dbd3bd34..ea8fb31379e3 100644
--- a/include/linux/osq_lock.h
+++ b/include/linux/osq_lock.h
@@ -6,11 +6,6 @@
* An MCS like lock especially tailored for optimistic spinning for sleeping
* lock implementations (mutex, rwsem, etc).
*/
-struct optimistic_spin_node {
- struct optimistic_spin_node *next, *prev;
- int locked; /* 1 if lock acquired */
- int cpu; /* encoded CPU # + 1 value */
-};
struct optimistic_spin_queue {
/*
diff --git a/include/linux/overflow.h b/include/linux/overflow.h
index 0f12345c21fb..736f633b2d5f 100644
--- a/include/linux/overflow.h
+++ b/include/linux/overflow.h
@@ -4,14 +4,12 @@
#include <linux/compiler.h>
#include <linux/limits.h>
+#include <linux/const.h>
/*
- * In the fallback code below, we need to compute the minimum and
- * maximum values representable in a given type. These macros may also
- * be useful elsewhere, so we provide them outside the
- * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block.
- *
- * It would seem more obvious to do something like
+ * We need to compute the minimum and maximum values representable in a given
+ * type. These macros may also be useful elsewhere. It would seem more obvious
+ * to do something like:
*
* #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0)
* #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0)
@@ -32,10 +30,11 @@
* https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html -
* credit to Christian Biere.
*/
-#define is_signed_type(type) (((type)(-1)) < (type)1)
#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type)))
-#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
-#define type_min(T) ((T)((T)-type_max(T)-(T)1))
+#define __type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
+#define type_max(t) __type_max(typeof(t))
+#define __type_min(T) ((T)((T)-type_max(T)-(T)1))
+#define type_min(t) __type_min(typeof(t))
/*
* Avoids triggering -Wtype-limits compilation warning,
@@ -54,194 +53,153 @@ static inline bool __must_check __must_check_overflow(bool overflow)
return unlikely(overflow);
}
-#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
-/*
- * For simplicity and code hygiene, the fallback code below insists on
- * a, b and *d having the same type (similar to the min() and max()
- * macros), whereas gcc's type-generic overflow checkers accept
- * different types. Hence we don't just make check_add_overflow an
- * alias for __builtin_add_overflow, but add type checks similar to
- * below.
- */
-#define check_add_overflow(a, b, d) __must_check_overflow(({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- __builtin_add_overflow(__a, __b, __d); \
-}))
-
-#define check_sub_overflow(a, b, d) __must_check_overflow(({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- __builtin_sub_overflow(__a, __b, __d); \
-}))
-
-#define check_mul_overflow(a, b, d) __must_check_overflow(({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- __builtin_mul_overflow(__a, __b, __d); \
-}))
-
-#else
-
-
-/* Checking for unsigned overflow is relatively easy without causing UB. */
-#define __unsigned_add_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = __a + __b; \
- *__d < __a; \
-})
-#define __unsigned_sub_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = __a - __b; \
- __a < __b; \
-})
-/*
- * If one of a or b is a compile-time constant, this avoids a division.
- */
-#define __unsigned_mul_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = __a * __b; \
- __builtin_constant_p(__b) ? \
- __b > 0 && __a > type_max(typeof(__a)) / __b : \
- __a > 0 && __b > type_max(typeof(__b)) / __a; \
-})
-
-/*
- * For signed types, detecting overflow is much harder, especially if
- * we want to avoid UB. But the interface of these macros is such that
- * we must provide a result in *d, and in fact we must produce the
- * result promised by gcc's builtins, which is simply the possibly
- * wrapped-around value. Fortunately, we can just formally do the
- * operations in the widest relevant unsigned type (u64) and then
- * truncate the result - gcc is smart enough to generate the same code
- * with and without the (u64) casts.
+/**
+ * check_add_overflow() - Calculate addition with overflow checking
+ * @a: first addend
+ * @b: second addend
+ * @d: pointer to store sum
+ *
+ * Returns true on wrap-around, false otherwise.
+ *
+ * *@d holds the results of the attempted addition, regardless of whether
+ * wrap-around occurred.
*/
+#define check_add_overflow(a, b, d) \
+ __must_check_overflow(__builtin_add_overflow(a, b, d))
-/*
- * Adding two signed integers can overflow only if they have the same
- * sign, and overflow has happened iff the result has the opposite
- * sign.
- */
-#define __signed_add_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = (u64)__a + (u64)__b; \
- (((~(__a ^ __b)) & (*__d ^ __a)) \
- & type_min(typeof(__a))) != 0; \
-})
-
-/*
- * Subtraction is similar, except that overflow can now happen only
- * when the signs are opposite. In this case, overflow has happened if
- * the result has the opposite sign of a.
- */
-#define __signed_sub_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = (u64)__a - (u64)__b; \
- ((((__a ^ __b)) & (*__d ^ __a)) \
- & type_min(typeof(__a))) != 0; \
-})
-
-/*
- * Signed multiplication is rather hard. gcc always follows C99, so
- * division is truncated towards 0. This means that we can write the
- * overflow check like this:
- *
- * (a > 0 && (b > MAX/a || b < MIN/a)) ||
- * (a < -1 && (b > MIN/a || b < MAX/a) ||
- * (a == -1 && b == MIN)
- *
- * The redundant casts of -1 are to silence an annoying -Wtype-limits
- * (included in -Wextra) warning: When the type is u8 or u16, the
- * __b_c_e in check_mul_overflow obviously selects
- * __unsigned_mul_overflow, but unfortunately gcc still parses this
- * code and warns about the limited range of __b.
- */
-
-#define __signed_mul_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- typeof(a) __tmax = type_max(typeof(a)); \
- typeof(a) __tmin = type_min(typeof(a)); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = (u64)__a * (u64)__b; \
- (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \
- (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \
- (__b == (typeof(__b))-1 && __a == __tmin); \
-})
+/**
+ * wrapping_add() - Intentionally perform a wrapping addition
+ * @type: type for result of calculation
+ * @a: first addend
+ * @b: second addend
+ *
+ * Return the potentially wrapped-around addition without
+ * tripping any wrap-around sanitizers that may be enabled.
+ */
+#define wrapping_add(type, a, b) \
+ ({ \
+ type __val; \
+ __builtin_add_overflow(a, b, &__val); \
+ __val; \
+ })
+/**
+ * wrapping_assign_add() - Intentionally perform a wrapping increment assignment
+ * @var: variable to be incremented
+ * @offset: amount to add
+ *
+ * Increments @var by @offset with wrap-around. Returns the resulting
+ * value of @var. Will not trip any wrap-around sanitizers.
+ *
+ * Returns the new value of @var.
+ */
+#define wrapping_assign_add(var, offset) \
+ ({ \
+ typeof(var) *__ptr = &(var); \
+ *__ptr = wrapping_add(typeof(var), *__ptr, offset); \
+ })
-#define check_add_overflow(a, b, d) __must_check_overflow( \
- __builtin_choose_expr(is_signed_type(typeof(a)), \
- __signed_add_overflow(a, b, d), \
- __unsigned_add_overflow(a, b, d)))
+/**
+ * check_sub_overflow() - Calculate subtraction with overflow checking
+ * @a: minuend; value to subtract from
+ * @b: subtrahend; value to subtract from @a
+ * @d: pointer to store difference
+ *
+ * Returns true on wrap-around, false otherwise.
+ *
+ * *@d holds the results of the attempted subtraction, regardless of whether
+ * wrap-around occurred.
+ */
+#define check_sub_overflow(a, b, d) \
+ __must_check_overflow(__builtin_sub_overflow(a, b, d))
-#define check_sub_overflow(a, b, d) __must_check_overflow( \
- __builtin_choose_expr(is_signed_type(typeof(a)), \
- __signed_sub_overflow(a, b, d), \
- __unsigned_sub_overflow(a, b, d)))
+/**
+ * wrapping_sub() - Intentionally perform a wrapping subtraction
+ * @type: type for result of calculation
+ * @a: minuend; value to subtract from
+ * @b: subtrahend; value to subtract from @a
+ *
+ * Return the potentially wrapped-around subtraction without
+ * tripping any wrap-around sanitizers that may be enabled.
+ */
+#define wrapping_sub(type, a, b) \
+ ({ \
+ type __val; \
+ __builtin_sub_overflow(a, b, &__val); \
+ __val; \
+ })
-#define check_mul_overflow(a, b, d) __must_check_overflow( \
- __builtin_choose_expr(is_signed_type(typeof(a)), \
- __signed_mul_overflow(a, b, d), \
- __unsigned_mul_overflow(a, b, d)))
+/**
+ * wrapping_assign_sub() - Intentionally perform a wrapping decrement assign
+ * @var: variable to be decremented
+ * @offset: amount to subtract
+ *
+ * Decrements @var by @offset with wrap-around. Returns the resulting
+ * value of @var. Will not trip any wrap-around sanitizers.
+ *
+ * Returns the new value of @var.
+ */
+#define wrapping_assign_sub(var, offset) \
+ ({ \
+ typeof(var) *__ptr = &(var); \
+ *__ptr = wrapping_sub(typeof(var), *__ptr, offset); \
+ })
-#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */
+/**
+ * check_mul_overflow() - Calculate multiplication with overflow checking
+ * @a: first factor
+ * @b: second factor
+ * @d: pointer to store product
+ *
+ * Returns true on wrap-around, false otherwise.
+ *
+ * *@d holds the results of the attempted multiplication, regardless of whether
+ * wrap-around occurred.
+ */
+#define check_mul_overflow(a, b, d) \
+ __must_check_overflow(__builtin_mul_overflow(a, b, d))
-/** check_shl_overflow() - Calculate a left-shifted value and check overflow
+/**
+ * wrapping_mul() - Intentionally perform a wrapping multiplication
+ * @type: type for result of calculation
+ * @a: first factor
+ * @b: second factor
*
+ * Return the potentially wrapped-around multiplication without
+ * tripping any wrap-around sanitizers that may be enabled.
+ */
+#define wrapping_mul(type, a, b) \
+ ({ \
+ type __val; \
+ __builtin_mul_overflow(a, b, &__val); \
+ __val; \
+ })
+
+/**
+ * check_shl_overflow() - Calculate a left-shifted value and check overflow
* @a: Value to be shifted
* @s: How many bits left to shift
* @d: Pointer to where to store the result
*
* Computes *@d = (@a << @s)
*
- * Returns true if '*d' cannot hold the result or when 'a << s' doesn't
+ * Returns true if '*@d' cannot hold the result or when '@a << @s' doesn't
* make sense. Example conditions:
- * - 'a << s' causes bits to be lost when stored in *d.
- * - 's' is garbage (e.g. negative) or so large that the result of
- * 'a << s' is guaranteed to be 0.
- * - 'a' is negative.
- * - 'a << s' sets the sign bit, if any, in '*d'.
*
- * '*d' will hold the results of the attempted shift, but is not
+ * - '@a << @s' causes bits to be lost when stored in *@d.
+ * - '@s' is garbage (e.g. negative) or so large that the result of
+ * '@a << @s' is guaranteed to be 0.
+ * - '@a' is negative.
+ * - '@a << @s' sets the sign bit, if any, in '*@d'.
+ *
+ * '*@d' will hold the results of the attempted shift, but is not
* considered "safe for use" if true is returned.
*/
#define check_shl_overflow(a, s, d) __must_check_overflow(({ \
typeof(a) _a = a; \
typeof(s) _s = s; \
typeof(d) _d = d; \
- u64 _a_full = _a; \
+ unsigned long long _a_full = _a; \
unsigned int _to_shift = \
is_non_negative(_s) && _s < 8 * sizeof(*d) ? _s : 0; \
*_d = (_a_full << _to_shift); \
@@ -249,87 +207,211 @@ static inline bool __must_check __must_check_overflow(bool overflow)
(*_d >> _to_shift) != _a); \
}))
+#define __overflows_type_constexpr(x, T) ( \
+ is_unsigned_type(typeof(x)) ? \
+ (x) > type_max(T) : \
+ is_unsigned_type(typeof(T)) ? \
+ (x) < 0 || (x) > type_max(T) : \
+ (x) < type_min(T) || (x) > type_max(T))
+
+#define __overflows_type(x, T) ({ \
+ typeof(T) v = 0; \
+ check_add_overflow((x), v, &v); \
+})
+
/**
- * array_size() - Calculate size of 2-dimensional array.
+ * overflows_type - helper for checking the overflows between value, variables,
+ * or data type
*
- * @a: dimension one
- * @b: dimension two
+ * @n: source constant value or variable to be checked
+ * @T: destination variable or data type proposed to store @x
*
- * Calculates size of 2-dimensional array: @a * @b.
+ * Compares the @x expression for whether or not it can safely fit in
+ * the storage of the type in @T. @x and @T can have different types.
+ * If @x is a constant expression, this will also resolve to a constant
+ * expression.
*
- * Returns: number of bytes needed to represent the array or SIZE_MAX on
- * overflow.
+ * Returns: true if overflow can occur, false otherwise.
+ */
+#define overflows_type(n, T) \
+ __builtin_choose_expr(__is_constexpr(n), \
+ __overflows_type_constexpr(n, T), \
+ __overflows_type(n, T))
+
+/**
+ * range_overflows() - Check if a range is out of bounds
+ * @start: Start of the range.
+ * @size: Size of the range.
+ * @max: Exclusive upper boundary.
+ *
+ * A strict check to determine if the range [@start, @start + @size) is
+ * invalid with respect to the allowable range [0, @max). Any range
+ * starting at or beyond @max is considered an overflow, even if @size is 0.
+ *
+ * Returns: true if the range is out of bounds.
+ */
+#define range_overflows(start, size, max) ({ \
+ typeof(start) start__ = (start); \
+ typeof(size) size__ = (size); \
+ typeof(max) max__ = (max); \
+ (void)(&start__ == &size__); \
+ (void)(&start__ == &max__); \
+ start__ >= max__ || size__ > max__ - start__; \
+})
+
+/**
+ * range_overflows_t() - Check if a range is out of bounds
+ * @type: Data type to use.
+ * @start: Start of the range.
+ * @size: Size of the range.
+ * @max: Exclusive upper boundary.
+ *
+ * Same as range_overflows() but forcing the parameters to @type.
+ *
+ * Returns: true if the range is out of bounds.
+ */
+#define range_overflows_t(type, start, size, max) \
+ range_overflows((type)(start), (type)(size), (type)(max))
+
+/**
+ * range_end_overflows() - Check if a range's endpoint is out of bounds
+ * @start: Start of the range.
+ * @size: Size of the range.
+ * @max: Exclusive upper boundary.
+ *
+ * Checks only if the endpoint of a range (@start + @size) exceeds @max.
+ * Unlike range_overflows(), a zero-sized range at the boundary (@start == @max)
+ * is not considered an overflow. Useful for iterator-style checks.
+ *
+ * Returns: true if the endpoint exceeds the boundary.
+ */
+#define range_end_overflows(start, size, max) ({ \
+ typeof(start) start__ = (start); \
+ typeof(size) size__ = (size); \
+ typeof(max) max__ = (max); \
+ (void)(&start__ == &size__); \
+ (void)(&start__ == &max__); \
+ start__ > max__ || size__ > max__ - start__; \
+})
+
+/**
+ * range_end_overflows_t() - Check if a range's endpoint is out of bounds
+ * @type: Data type to use.
+ * @start: Start of the range.
+ * @size: Size of the range.
+ * @max: Exclusive upper boundary.
+ *
+ * Same as range_end_overflows() but forcing the parameters to @type.
+ *
+ * Returns: true if the endpoint exceeds the boundary.
+ */
+#define range_end_overflows_t(type, start, size, max) \
+ range_end_overflows((type)(start), (type)(size), (type)(max))
+
+/**
+ * castable_to_type - like __same_type(), but also allows for casted literals
+ *
+ * @n: variable or constant value
+ * @T: variable or data type
+ *
+ * Unlike the __same_type() macro, this allows a constant value as the
+ * first argument. If this value would not overflow into an assignment
+ * of the second argument's type, it returns true. Otherwise, this falls
+ * back to __same_type().
+ */
+#define castable_to_type(n, T) \
+ __builtin_choose_expr(__is_constexpr(n), \
+ !__overflows_type_constexpr(n, T), \
+ __same_type(n, T))
+
+/**
+ * size_mul() - Calculate size_t multiplication with saturation at SIZE_MAX
+ * @factor1: first factor
+ * @factor2: second factor
+ *
+ * Returns: calculate @factor1 * @factor2, both promoted to size_t,
+ * with any overflow causing the return value to be SIZE_MAX. The
+ * lvalue must be size_t to avoid implicit type conversion.
*/
-static inline __must_check size_t array_size(size_t a, size_t b)
+static inline size_t __must_check size_mul(size_t factor1, size_t factor2)
{
size_t bytes;
- if (check_mul_overflow(a, b, &bytes))
+ if (check_mul_overflow(factor1, factor2, &bytes))
return SIZE_MAX;
return bytes;
}
/**
- * array3_size() - Calculate size of 3-dimensional array.
- *
- * @a: dimension one
- * @b: dimension two
- * @c: dimension three
- *
- * Calculates size of 3-dimensional array: @a * @b * @c.
+ * size_add() - Calculate size_t addition with saturation at SIZE_MAX
+ * @addend1: first addend
+ * @addend2: second addend
*
- * Returns: number of bytes needed to represent the array or SIZE_MAX on
- * overflow.
+ * Returns: calculate @addend1 + @addend2, both promoted to size_t,
+ * with any overflow causing the return value to be SIZE_MAX. The
+ * lvalue must be size_t to avoid implicit type conversion.
*/
-static inline __must_check size_t array3_size(size_t a, size_t b, size_t c)
+static inline size_t __must_check size_add(size_t addend1, size_t addend2)
{
size_t bytes;
- if (check_mul_overflow(a, b, &bytes))
- return SIZE_MAX;
- if (check_mul_overflow(bytes, c, &bytes))
+ if (check_add_overflow(addend1, addend2, &bytes))
return SIZE_MAX;
return bytes;
}
-/*
- * Compute a*b+c, returning SIZE_MAX on overflow. Internal helper for
- * struct_size() below.
+/**
+ * size_sub() - Calculate size_t subtraction with saturation at SIZE_MAX
+ * @minuend: value to subtract from
+ * @subtrahend: value to subtract from @minuend
+ *
+ * Returns: calculate @minuend - @subtrahend, both promoted to size_t,
+ * with any overflow causing the return value to be SIZE_MAX. For
+ * composition with the size_add() and size_mul() helpers, neither
+ * argument may be SIZE_MAX (or the result with be forced to SIZE_MAX).
+ * The lvalue must be size_t to avoid implicit type conversion.
*/
-static inline __must_check size_t __ab_c_size(size_t a, size_t b, size_t c)
+static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
{
size_t bytes;
- if (check_mul_overflow(a, b, &bytes))
- return SIZE_MAX;
- if (check_add_overflow(bytes, c, &bytes))
+ if (minuend == SIZE_MAX || subtrahend == SIZE_MAX ||
+ check_sub_overflow(minuend, subtrahend, &bytes))
return SIZE_MAX;
return bytes;
}
/**
- * struct_size() - Calculate size of structure with trailing array.
- * @p: Pointer to the structure.
- * @member: Name of the array member.
- * @count: Number of elements in the array.
+ * array_size() - Calculate size of 2-dimensional array.
+ * @a: dimension one
+ * @b: dimension two
*
- * Calculates size of memory needed for structure @p followed by an
- * array of @count number of @member elements.
+ * Calculates size of 2-dimensional array: @a * @b.
*
- * Return: number of bytes needed or SIZE_MAX on overflow.
+ * Returns: number of bytes needed to represent the array or SIZE_MAX on
+ * overflow.
*/
-#define struct_size(p, member, count) \
- __ab_c_size(count, \
- sizeof(*(p)->member) + __must_be_array((p)->member),\
- sizeof(*(p)))
+#define array_size(a, b) size_mul(a, b)
+
+/**
+ * array3_size() - Calculate size of 3-dimensional array.
+ * @a: dimension one
+ * @b: dimension two
+ * @c: dimension three
+ *
+ * Calculates size of 3-dimensional array: @a * @b * @c.
+ *
+ * Returns: number of bytes needed to represent the array or SIZE_MAX on
+ * overflow.
+ */
+#define array3_size(a, b, c) size_mul(size_mul(a, b), c)
/**
* flex_array_size() - Calculate size of a flexible array member
* within an enclosing structure.
- *
* @p: Pointer to the structure.
* @member: Name of the flexible array member.
* @count: Number of elements in the array.
@@ -340,7 +422,134 @@ static inline __must_check size_t __ab_c_size(size_t a, size_t b, size_t c)
* Return: number of bytes needed or SIZE_MAX on overflow.
*/
#define flex_array_size(p, member, count) \
- array_size(count, \
- sizeof(*(p)->member) + __must_be_array((p)->member))
+ __builtin_choose_expr(__is_constexpr(count), \
+ (count) * sizeof(*(p)->member) + __must_be_array((p)->member), \
+ size_mul(count, sizeof(*(p)->member) + __must_be_array((p)->member)))
+
+/**
+ * struct_size() - Calculate size of structure with trailing flexible array.
+ * @p: Pointer to the structure.
+ * @member: Name of the array member.
+ * @count: Number of elements in the array.
+ *
+ * Calculates size of memory needed for structure of @p followed by an
+ * array of @count number of @member elements.
+ *
+ * Return: number of bytes needed or SIZE_MAX on overflow.
+ */
+#define struct_size(p, member, count) \
+ __builtin_choose_expr(__is_constexpr(count), \
+ sizeof(*(p)) + flex_array_size(p, member, count), \
+ size_add(sizeof(*(p)), flex_array_size(p, member, count)))
+
+/**
+ * struct_size_t() - Calculate size of structure with trailing flexible array
+ * @type: structure type name.
+ * @member: Name of the array member.
+ * @count: Number of elements in the array.
+ *
+ * Calculates size of memory needed for structure @type followed by an
+ * array of @count number of @member elements. Prefer using struct_size()
+ * when possible instead, to keep calculations associated with a specific
+ * instance variable of type @type.
+ *
+ * Return: number of bytes needed or SIZE_MAX on overflow.
+ */
+#define struct_size_t(type, member, count) \
+ struct_size((type *)NULL, member, count)
+
+/**
+ * struct_offset() - Calculate the offset of a member within a struct
+ * @p: Pointer to the struct
+ * @member: Name of the member to get the offset of
+ *
+ * Calculates the offset of a particular @member of the structure pointed
+ * to by @p.
+ *
+ * Return: number of bytes to the location of @member.
+ */
+#define struct_offset(p, member) (offsetof(typeof(*(p)), member))
+
+/**
+ * __DEFINE_FLEX() - helper macro for DEFINE_FLEX() family.
+ * Enables caller macro to pass arbitrary trailing expressions
+ *
+ * @type: structure type name, including "struct" keyword.
+ * @name: Name for a variable to define.
+ * @member: Name of the array member.
+ * @count: Number of elements in the array; must be compile-time const.
+ * @trailer: Trailing expressions for attributes and/or initializers.
+ */
+#define __DEFINE_FLEX(type, name, member, count, trailer...) \
+ _Static_assert(__builtin_constant_p(count), \
+ "onstack flex array members require compile-time const count"); \
+ union { \
+ u8 bytes[struct_size_t(type, member, count)]; \
+ type obj; \
+ } name##_u trailer; \
+ type *name = (type *)&name##_u
+
+/**
+ * _DEFINE_FLEX() - helper macro for DEFINE_FLEX() family.
+ * Enables caller macro to pass (different) initializer.
+ *
+ * @type: structure type name, including "struct" keyword.
+ * @name: Name for a variable to define.
+ * @member: Name of the array member.
+ * @count: Number of elements in the array; must be compile-time const.
+ * @initializer: Initializer expression (e.g., pass `= { }` at minimum).
+ */
+#define _DEFINE_FLEX(type, name, member, count, initializer...) \
+ __DEFINE_FLEX(type, name, member, count, = { .obj initializer })
+
+/**
+ * DEFINE_RAW_FLEX() - Define an on-stack instance of structure with a trailing
+ * flexible array member, when it does not have a __counted_by annotation.
+ *
+ * @type: structure type name, including "struct" keyword.
+ * @name: Name for a variable to define.
+ * @member: Name of the array member.
+ * @count: Number of elements in the array; must be compile-time const.
+ *
+ * Define a zeroed, on-stack, instance of @type structure with a trailing
+ * flexible array member.
+ * Use __struct_size(@name) to get compile-time size of it afterwards.
+ * Use __member_size(@name->member) to get compile-time size of @name members.
+ * Use STACK_FLEX_ARRAY_SIZE(@name, @member) to get compile-time number of
+ * elements in array @member.
+ */
+#define DEFINE_RAW_FLEX(type, name, member, count) \
+ __DEFINE_FLEX(type, name, member, count, = { })
+
+/**
+ * DEFINE_FLEX() - Define an on-stack instance of structure with a trailing
+ * flexible array member.
+ *
+ * @TYPE: structure type name, including "struct" keyword.
+ * @NAME: Name for a variable to define.
+ * @MEMBER: Name of the array member.
+ * @COUNTER: Name of the __counted_by member.
+ * @COUNT: Number of elements in the array; must be compile-time const.
+ *
+ * Define a zeroed, on-stack, instance of @TYPE structure with a trailing
+ * flexible array member.
+ * Use __struct_size(@NAME) to get compile-time size of it afterwards.
+ * Use __member_size(@NAME->member) to get compile-time size of @NAME members.
+ * Use STACK_FLEX_ARRAY_SIZE(@name, @member) to get compile-time number of
+ * elements in array @member.
+ */
+#define DEFINE_FLEX(TYPE, NAME, MEMBER, COUNTER, COUNT) \
+ _DEFINE_FLEX(TYPE, NAME, MEMBER, COUNT, = { .COUNTER = COUNT, })
+
+/**
+ * STACK_FLEX_ARRAY_SIZE() - helper macro for DEFINE_FLEX() family.
+ * Returns the number of elements in @array.
+ *
+ * @name: Name for a variable defined in DEFINE_RAW_FLEX()/DEFINE_FLEX().
+ * @array: Name of the array member.
+ */
+#define STACK_FLEX_ARRAY_SIZE(name, array) \
+ (__member_size((name)->array) / sizeof(*(name)->array) + \
+ __must_be_array((name)->array))
#endif /* __LINUX_OVERFLOW_H */
diff --git a/include/linux/packing.h b/include/linux/packing.h
index 54667735cc67..20ae4d452c7b 100644
--- a/include/linux/packing.h
+++ b/include/linux/packing.h
@@ -1,12 +1,93 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (c) 2016-2018, NXP Semiconductors
+ * Copyright 2016-2018 NXP
* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
*/
#ifndef _LINUX_PACKING_H
#define _LINUX_PACKING_H
-#include <linux/types.h>
+#include <linux/array_size.h>
#include <linux/bitops.h>
+#include <linux/build_bug.h>
+#include <linux/minmax.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+#define GEN_PACKED_FIELD_STRUCT(__type) \
+ struct packed_field_ ## __type { \
+ __type startbit; \
+ __type endbit; \
+ __type offset; \
+ __type size; \
+ }
+
+/* struct packed_field_u8. Use with bit offsets < 256, buffers < 32B and
+ * unpacked structures < 256B.
+ */
+GEN_PACKED_FIELD_STRUCT(u8);
+
+/* struct packed_field_u16. Use with bit offsets < 65536, buffers < 8KB and
+ * unpacked structures < 64KB.
+ */
+GEN_PACKED_FIELD_STRUCT(u16);
+
+#define PACKED_FIELD(start, end, struct_name, struct_field) \
+{ \
+ (start), \
+ (end), \
+ offsetof(struct_name, struct_field), \
+ sizeof_field(struct_name, struct_field), \
+}
+
+#define CHECK_PACKED_FIELD_OVERLAP(fields, index1, index2) ({ \
+ typeof(&(fields)[0]) __f = (fields); \
+ typeof(__f[0]) _f1 = __f[index1]; typeof(__f[0]) _f2 = __f[index2]; \
+ const bool _ascending = __f[0].startbit < __f[1].startbit; \
+ BUILD_BUG_ON_MSG(_ascending && _f1.startbit >= _f2.startbit, \
+ __stringify(fields) " field " __stringify(index2) \
+ " breaks ascending order"); \
+ BUILD_BUG_ON_MSG(!_ascending && _f1.startbit <= _f2.startbit, \
+ __stringify(fields) " field " __stringify(index2) \
+ " breaks descending order"); \
+ BUILD_BUG_ON_MSG(max(_f1.endbit, _f2.endbit) <= \
+ min(_f1.startbit, _f2.startbit), \
+ __stringify(fields) " field " __stringify(index2) \
+ " overlaps with previous field"); \
+})
+
+#define CHECK_PACKED_FIELD(fields, index) ({ \
+ typeof(&(fields)[0]) _f = (fields); \
+ typeof(_f[0]) __f = _f[index]; \
+ BUILD_BUG_ON_MSG(__f.startbit < __f.endbit, \
+ __stringify(fields) " field " __stringify(index) \
+ " start bit must not be smaller than end bit"); \
+ BUILD_BUG_ON_MSG(__f.size != 1 && __f.size != 2 && \
+ __f.size != 4 && __f.size != 8, \
+ __stringify(fields) " field " __stringify(index) \
+ " has unsupported unpacked storage size"); \
+ BUILD_BUG_ON_MSG(__f.startbit - __f.endbit >= BITS_PER_BYTE * __f.size, \
+ __stringify(fields) " field " __stringify(index) \
+ " exceeds unpacked storage size"); \
+ __builtin_choose_expr(index != 0, \
+ CHECK_PACKED_FIELD_OVERLAP(fields, index - 1, index), \
+ 1); \
+})
+
+/* Note that the packed fields may be either in ascending or descending order.
+ * Thus, we must check that both the first and last field wit within the
+ * packed buffer size.
+ */
+#define CHECK_PACKED_FIELDS_SIZE(fields, pbuflen) ({ \
+ typeof(&(fields)[0]) _f = (fields); \
+ typeof(pbuflen) _len = (pbuflen); \
+ const size_t num_fields = ARRAY_SIZE(fields); \
+ BUILD_BUG_ON_MSG(!__builtin_constant_p(_len), \
+ __stringify(fields) " pbuflen " __stringify(pbuflen) \
+ " must be a compile time constant"); \
+ BUILD_BUG_ON_MSG(_f[0].startbit >= BITS_PER_BYTE * _len, \
+ __stringify(fields) " first field exceeds packed buffer size"); \
+ BUILD_BUG_ON_MSG(_f[num_fields - 1].startbit >= BITS_PER_BYTE * _len, \
+ __stringify(fields) " last field exceeds packed buffer size"); \
+})
#define QUIRK_MSB_ON_THE_RIGHT BIT(0)
#define QUIRK_LITTLE_ENDIAN BIT(1)
@@ -17,33 +98,361 @@ enum packing_op {
UNPACK,
};
-/**
- * packing - Convert numbers (currently u64) between a packed and an unpacked
- * format. Unpacked means laid out in memory in the CPU's native
- * understanding of integers, while packed means anything else that
- * requires translation.
- *
- * @pbuf: Pointer to a buffer holding the packed value.
- * @uval: Pointer to an u64 holding the unpacked value.
- * @startbit: The index (in logical notation, compensated for quirks) where
- * the packed value starts within pbuf. Must be larger than, or
- * equal to, endbit.
- * @endbit: The index (in logical notation, compensated for quirks) where
- * the packed value ends within pbuf. Must be smaller than, or equal
- * to, startbit.
- * @op: If PACK, then uval will be treated as const pointer and copied (packed)
- * into pbuf, between startbit and endbit.
- * If UNPACK, then pbuf will be treated as const pointer and the logical
- * value between startbit and endbit will be copied (unpacked) to uval.
- * @quirks: A bit mask of QUIRK_LITTLE_ENDIAN, QUIRK_LSW32_IS_FIRST and
- * QUIRK_MSB_ON_THE_RIGHT.
- *
- * Return: 0 on success, EINVAL or ERANGE if called incorrectly. Assuming
- * correct usage, return code may be discarded.
- * If op is PACK, pbuf is modified.
- * If op is UNPACK, uval is modified.
- */
int packing(void *pbuf, u64 *uval, int startbit, int endbit, size_t pbuflen,
enum packing_op op, u8 quirks);
+int pack(void *pbuf, u64 uval, size_t startbit, size_t endbit, size_t pbuflen,
+ u8 quirks);
+
+int unpack(const void *pbuf, u64 *uval, size_t startbit, size_t endbit,
+ size_t pbuflen, u8 quirks);
+
+void pack_fields_u8(void *pbuf, size_t pbuflen, const void *ustruct,
+ const struct packed_field_u8 *fields, size_t num_fields,
+ u8 quirks);
+
+void pack_fields_u16(void *pbuf, size_t pbuflen, const void *ustruct,
+ const struct packed_field_u16 *fields, size_t num_fields,
+ u8 quirks);
+
+void unpack_fields_u8(const void *pbuf, size_t pbuflen, void *ustruct,
+ const struct packed_field_u8 *fields, size_t num_fields,
+ u8 quirks);
+
+void unpack_fields_u16(const void *pbuf, size_t pbuflen, void *ustruct,
+ const struct packed_field_u16 *fields, size_t num_fields,
+ u8 quirks);
+
+/* Do not hand-edit the following packed field check macros!
+ *
+ * They are generated using scripts/gen_packed_field_checks.c, which may be
+ * built via "make scripts_gen_packed_field_checks". If larger macro sizes are
+ * needed in the future, please use this program to re-generate the macros and
+ * insert them here.
+ */
+
+#define CHECK_PACKED_FIELDS_1(fields) \
+ CHECK_PACKED_FIELD(fields, 0)
+
+#define CHECK_PACKED_FIELDS_2(fields) do { \
+ CHECK_PACKED_FIELDS_1(fields); \
+ CHECK_PACKED_FIELD(fields, 1); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_3(fields) do { \
+ CHECK_PACKED_FIELDS_2(fields); \
+ CHECK_PACKED_FIELD(fields, 2); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_4(fields) do { \
+ CHECK_PACKED_FIELDS_3(fields); \
+ CHECK_PACKED_FIELD(fields, 3); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_5(fields) do { \
+ CHECK_PACKED_FIELDS_4(fields); \
+ CHECK_PACKED_FIELD(fields, 4); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_6(fields) do { \
+ CHECK_PACKED_FIELDS_5(fields); \
+ CHECK_PACKED_FIELD(fields, 5); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_7(fields) do { \
+ CHECK_PACKED_FIELDS_6(fields); \
+ CHECK_PACKED_FIELD(fields, 6); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_8(fields) do { \
+ CHECK_PACKED_FIELDS_7(fields); \
+ CHECK_PACKED_FIELD(fields, 7); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_9(fields) do { \
+ CHECK_PACKED_FIELDS_8(fields); \
+ CHECK_PACKED_FIELD(fields, 8); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_10(fields) do { \
+ CHECK_PACKED_FIELDS_9(fields); \
+ CHECK_PACKED_FIELD(fields, 9); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_11(fields) do { \
+ CHECK_PACKED_FIELDS_10(fields); \
+ CHECK_PACKED_FIELD(fields, 10); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_12(fields) do { \
+ CHECK_PACKED_FIELDS_11(fields); \
+ CHECK_PACKED_FIELD(fields, 11); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_13(fields) do { \
+ CHECK_PACKED_FIELDS_12(fields); \
+ CHECK_PACKED_FIELD(fields, 12); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_14(fields) do { \
+ CHECK_PACKED_FIELDS_13(fields); \
+ CHECK_PACKED_FIELD(fields, 13); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_15(fields) do { \
+ CHECK_PACKED_FIELDS_14(fields); \
+ CHECK_PACKED_FIELD(fields, 14); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_16(fields) do { \
+ CHECK_PACKED_FIELDS_15(fields); \
+ CHECK_PACKED_FIELD(fields, 15); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_17(fields) do { \
+ CHECK_PACKED_FIELDS_16(fields); \
+ CHECK_PACKED_FIELD(fields, 16); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_18(fields) do { \
+ CHECK_PACKED_FIELDS_17(fields); \
+ CHECK_PACKED_FIELD(fields, 17); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_19(fields) do { \
+ CHECK_PACKED_FIELDS_18(fields); \
+ CHECK_PACKED_FIELD(fields, 18); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_20(fields) do { \
+ CHECK_PACKED_FIELDS_19(fields); \
+ CHECK_PACKED_FIELD(fields, 19); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_21(fields) do { \
+ CHECK_PACKED_FIELDS_20(fields); \
+ CHECK_PACKED_FIELD(fields, 20); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_22(fields) do { \
+ CHECK_PACKED_FIELDS_21(fields); \
+ CHECK_PACKED_FIELD(fields, 21); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_23(fields) do { \
+ CHECK_PACKED_FIELDS_22(fields); \
+ CHECK_PACKED_FIELD(fields, 22); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_24(fields) do { \
+ CHECK_PACKED_FIELDS_23(fields); \
+ CHECK_PACKED_FIELD(fields, 23); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_25(fields) do { \
+ CHECK_PACKED_FIELDS_24(fields); \
+ CHECK_PACKED_FIELD(fields, 24); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_26(fields) do { \
+ CHECK_PACKED_FIELDS_25(fields); \
+ CHECK_PACKED_FIELD(fields, 25); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_27(fields) do { \
+ CHECK_PACKED_FIELDS_26(fields); \
+ CHECK_PACKED_FIELD(fields, 26); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_28(fields) do { \
+ CHECK_PACKED_FIELDS_27(fields); \
+ CHECK_PACKED_FIELD(fields, 27); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_29(fields) do { \
+ CHECK_PACKED_FIELDS_28(fields); \
+ CHECK_PACKED_FIELD(fields, 28); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_30(fields) do { \
+ CHECK_PACKED_FIELDS_29(fields); \
+ CHECK_PACKED_FIELD(fields, 29); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_31(fields) do { \
+ CHECK_PACKED_FIELDS_30(fields); \
+ CHECK_PACKED_FIELD(fields, 30); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_32(fields) do { \
+ CHECK_PACKED_FIELDS_31(fields); \
+ CHECK_PACKED_FIELD(fields, 31); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_33(fields) do { \
+ CHECK_PACKED_FIELDS_32(fields); \
+ CHECK_PACKED_FIELD(fields, 32); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_34(fields) do { \
+ CHECK_PACKED_FIELDS_33(fields); \
+ CHECK_PACKED_FIELD(fields, 33); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_35(fields) do { \
+ CHECK_PACKED_FIELDS_34(fields); \
+ CHECK_PACKED_FIELD(fields, 34); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_36(fields) do { \
+ CHECK_PACKED_FIELDS_35(fields); \
+ CHECK_PACKED_FIELD(fields, 35); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_37(fields) do { \
+ CHECK_PACKED_FIELDS_36(fields); \
+ CHECK_PACKED_FIELD(fields, 36); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_38(fields) do { \
+ CHECK_PACKED_FIELDS_37(fields); \
+ CHECK_PACKED_FIELD(fields, 37); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_39(fields) do { \
+ CHECK_PACKED_FIELDS_38(fields); \
+ CHECK_PACKED_FIELD(fields, 38); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_40(fields) do { \
+ CHECK_PACKED_FIELDS_39(fields); \
+ CHECK_PACKED_FIELD(fields, 39); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_41(fields) do { \
+ CHECK_PACKED_FIELDS_40(fields); \
+ CHECK_PACKED_FIELD(fields, 40); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_42(fields) do { \
+ CHECK_PACKED_FIELDS_41(fields); \
+ CHECK_PACKED_FIELD(fields, 41); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_43(fields) do { \
+ CHECK_PACKED_FIELDS_42(fields); \
+ CHECK_PACKED_FIELD(fields, 42); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_44(fields) do { \
+ CHECK_PACKED_FIELDS_43(fields); \
+ CHECK_PACKED_FIELD(fields, 43); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_45(fields) do { \
+ CHECK_PACKED_FIELDS_44(fields); \
+ CHECK_PACKED_FIELD(fields, 44); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_46(fields) do { \
+ CHECK_PACKED_FIELDS_45(fields); \
+ CHECK_PACKED_FIELD(fields, 45); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_47(fields) do { \
+ CHECK_PACKED_FIELDS_46(fields); \
+ CHECK_PACKED_FIELD(fields, 46); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_48(fields) do { \
+ CHECK_PACKED_FIELDS_47(fields); \
+ CHECK_PACKED_FIELD(fields, 47); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_49(fields) do { \
+ CHECK_PACKED_FIELDS_48(fields); \
+ CHECK_PACKED_FIELD(fields, 48); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS_50(fields) do { \
+ CHECK_PACKED_FIELDS_49(fields); \
+ CHECK_PACKED_FIELD(fields, 49); \
+} while (0)
+
+#define CHECK_PACKED_FIELDS(fields) \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 1, ({ CHECK_PACKED_FIELDS_1(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 2, ({ CHECK_PACKED_FIELDS_2(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 3, ({ CHECK_PACKED_FIELDS_3(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 4, ({ CHECK_PACKED_FIELDS_4(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 5, ({ CHECK_PACKED_FIELDS_5(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 6, ({ CHECK_PACKED_FIELDS_6(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 7, ({ CHECK_PACKED_FIELDS_7(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 8, ({ CHECK_PACKED_FIELDS_8(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 9, ({ CHECK_PACKED_FIELDS_9(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 10, ({ CHECK_PACKED_FIELDS_10(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 11, ({ CHECK_PACKED_FIELDS_11(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 12, ({ CHECK_PACKED_FIELDS_12(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 13, ({ CHECK_PACKED_FIELDS_13(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 14, ({ CHECK_PACKED_FIELDS_14(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 15, ({ CHECK_PACKED_FIELDS_15(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 16, ({ CHECK_PACKED_FIELDS_16(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 17, ({ CHECK_PACKED_FIELDS_17(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 18, ({ CHECK_PACKED_FIELDS_18(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 19, ({ CHECK_PACKED_FIELDS_19(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 20, ({ CHECK_PACKED_FIELDS_20(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 21, ({ CHECK_PACKED_FIELDS_21(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 22, ({ CHECK_PACKED_FIELDS_22(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 23, ({ CHECK_PACKED_FIELDS_23(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 24, ({ CHECK_PACKED_FIELDS_24(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 25, ({ CHECK_PACKED_FIELDS_25(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 26, ({ CHECK_PACKED_FIELDS_26(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 27, ({ CHECK_PACKED_FIELDS_27(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 28, ({ CHECK_PACKED_FIELDS_28(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 29, ({ CHECK_PACKED_FIELDS_29(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 30, ({ CHECK_PACKED_FIELDS_30(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 31, ({ CHECK_PACKED_FIELDS_31(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 32, ({ CHECK_PACKED_FIELDS_32(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 33, ({ CHECK_PACKED_FIELDS_33(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 34, ({ CHECK_PACKED_FIELDS_34(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 35, ({ CHECK_PACKED_FIELDS_35(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 36, ({ CHECK_PACKED_FIELDS_36(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 37, ({ CHECK_PACKED_FIELDS_37(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 38, ({ CHECK_PACKED_FIELDS_38(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 39, ({ CHECK_PACKED_FIELDS_39(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 40, ({ CHECK_PACKED_FIELDS_40(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 41, ({ CHECK_PACKED_FIELDS_41(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 42, ({ CHECK_PACKED_FIELDS_42(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 43, ({ CHECK_PACKED_FIELDS_43(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 44, ({ CHECK_PACKED_FIELDS_44(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 45, ({ CHECK_PACKED_FIELDS_45(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 46, ({ CHECK_PACKED_FIELDS_46(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 47, ({ CHECK_PACKED_FIELDS_47(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 48, ({ CHECK_PACKED_FIELDS_48(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 49, ({ CHECK_PACKED_FIELDS_49(fields); }), \
+ __builtin_choose_expr(ARRAY_SIZE(fields) == 50, ({ CHECK_PACKED_FIELDS_50(fields); }), \
+ ({ BUILD_BUG_ON_MSG(1, "CHECK_PACKED_FIELDS() must be regenerated to support array sizes larger than 50."); }) \
+))))))))))))))))))))))))))))))))))))))))))))))))))
+
+/* End of generated content */
+
+#define pack_fields(pbuf, pbuflen, ustruct, fields, quirks) \
+ ({ \
+ CHECK_PACKED_FIELDS(fields); \
+ CHECK_PACKED_FIELDS_SIZE((fields), (pbuflen)); \
+ _Generic((fields), \
+ const struct packed_field_u8 * : pack_fields_u8, \
+ const struct packed_field_u16 * : pack_fields_u16 \
+ )((pbuf), (pbuflen), (ustruct), (fields), ARRAY_SIZE(fields), (quirks)); \
+ })
+
+#define unpack_fields(pbuf, pbuflen, ustruct, fields, quirks) \
+ ({ \
+ CHECK_PACKED_FIELDS(fields); \
+ CHECK_PACKED_FIELDS_SIZE((fields), (pbuflen)); \
+ _Generic((fields), \
+ const struct packed_field_u8 * : unpack_fields_u8, \
+ const struct packed_field_u16 * : unpack_fields_u16 \
+ )((pbuf), (pbuflen), (ustruct), (fields), ARRAY_SIZE(fields), (quirks)); \
+ })
+
#endif
diff --git a/include/linux/padata.h b/include/linux/padata.h
index a433f13fc4bf..765f2778e264 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -12,6 +12,7 @@
#ifndef PADATA_H
#define PADATA_H
+#include <linux/refcount.h>
#include <linux/compiler_types.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
@@ -89,20 +90,16 @@ struct padata_cpumask {
* @processed: Number of already processed objects.
* @cpu: Next CPU to be processed.
* @cpumask: The cpumasks in use for parallel and serial workers.
- * @reorder_work: work struct for reordering.
- * @lock: Reorder lock.
*/
struct parallel_data {
struct padata_shell *ps;
struct padata_list __percpu *reorder_list;
struct padata_serial_queue __percpu *squeue;
- atomic_t refcnt;
+ refcount_t refcnt;
unsigned int seq_nr;
unsigned int processed;
int cpu;
struct padata_cpumask cpumask;
- struct work_struct reorder_work;
- spinlock_t ____cacheline_aligned lock;
};
/**
@@ -136,6 +133,7 @@ struct padata_shell {
* appropriate for one worker thread to do at once.
* @max_threads: Max threads to use for the job, actual number may be less
* depending on task size and minimum chunk size.
+ * @numa_aware: Distribute jobs to different nodes with CPU in a round robin fashion.
*/
struct padata_mt_job {
void (*thread_fn)(unsigned long start, unsigned long end, void *arg);
@@ -145,6 +143,7 @@ struct padata_mt_job {
unsigned long align;
unsigned long min_chunk;
int max_threads;
+ bool numa_aware;
};
/**
@@ -177,10 +176,6 @@ struct padata_instance {
#ifdef CONFIG_PADATA
extern void __init padata_init(void);
-#else
-static inline void __init padata_init(void) {}
-#endif
-
extern struct padata_instance *padata_alloc(const char *name);
extern void padata_free(struct padata_instance *pinst);
extern struct padata_shell *padata_alloc_shell(struct padata_instance *pinst);
@@ -191,4 +186,12 @@ extern void padata_do_serial(struct padata_priv *padata);
extern void __init padata_do_multithreaded(struct padata_mt_job *job);
extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
cpumask_var_t cpumask);
+#else
+static inline void __init padata_init(void) {}
+static inline void __init padata_do_multithreaded(struct padata_mt_job *job)
+{
+ job->thread_fn(job->start, job->start + job->size, job->fn_arg);
+}
+#endif
+
#endif
diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h
index ef1e3e736e14..760006b1c480 100644
--- a/include/linux/page-flags-layout.h
+++ b/include/linux/page-flags-layout.h
@@ -55,7 +55,8 @@
#define SECTIONS_WIDTH 0
#endif
-#if ZONES_WIDTH + SECTIONS_WIDTH + NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
+#if ZONES_WIDTH + LRU_GEN_WIDTH + SECTIONS_WIDTH + NODES_SHIFT \
+ <= BITS_PER_LONG - NR_PAGEFLAGS
#define NODES_WIDTH NODES_SHIFT
#elif defined(CONFIG_SPARSEMEM_VMEMMAP)
#error "Vmemmap: No space for nodes field in page flags"
@@ -71,8 +72,10 @@
#define NODE_NOT_IN_PAGE_FLAGS 1
#endif
-#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
+#if defined(CONFIG_KASAN_SW_TAGS)
#define KASAN_TAG_WIDTH 8
+#elif defined(CONFIG_KASAN_HW_TAGS)
+#define KASAN_TAG_WIDTH 4
#else
#define KASAN_TAG_WIDTH 0
#endif
@@ -89,8 +92,8 @@
#define LAST_CPUPID_SHIFT 0
#endif
-#if ZONES_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + KASAN_TAG_WIDTH + LAST_CPUPID_SHIFT \
- <= BITS_PER_LONG - NR_PAGEFLAGS
+#if ZONES_WIDTH + LRU_GEN_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + \
+ KASAN_TAG_WIDTH + LAST_CPUPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
#define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT
#else
#define LAST_CPUPID_WIDTH 0
@@ -100,10 +103,22 @@
#define LAST_CPUPID_NOT_IN_PAGE_FLAGS
#endif
-#if ZONES_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + KASAN_TAG_WIDTH + LAST_CPUPID_WIDTH \
- > BITS_PER_LONG - NR_PAGEFLAGS
+#if ZONES_WIDTH + LRU_GEN_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + \
+ KASAN_TAG_WIDTH + LAST_CPUPID_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
#error "Not enough bits in page flags"
#endif
+/* see the comment on MAX_NR_TIERS */
+#define LRU_REFS_WIDTH min(__LRU_REFS_WIDTH, BITS_PER_LONG - NR_PAGEFLAGS - \
+ ZONES_WIDTH - LRU_GEN_WIDTH - SECTIONS_WIDTH - \
+ NODES_WIDTH - KASAN_TAG_WIDTH - LAST_CPUPID_WIDTH)
+
+#define NR_NON_PAGEFLAG_BITS (SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH + \
+ LAST_CPUPID_SHIFT + KASAN_TAG_WIDTH + \
+ LRU_GEN_WIDTH + LRU_REFS_WIDTH)
+
+#define NR_UNUSED_PAGEFLAG_BITS (BITS_PER_LONG - \
+ (NR_NON_PAGEFLAG_BITS + NR_PAGEFLAGS))
+
#endif
#endif /* _LINUX_PAGE_FLAGS_LAYOUT */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 04a34c08e0a6..f7a0e4af0c73 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -30,16 +30,11 @@
* - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying
* to read/write these pages might end badly. Don't touch!
* - The zero page(s)
- * - Pages not added to the page allocator when onlining a section because
- * they were excluded via the online_page_callback() or because they are
- * PG_hwpoison.
* - Pages allocated in the context of kexec/kdump (loaded kernel image,
* control pages, vmcoreinfo)
* - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are
* not marked PG_reserved (as they might be in use by somebody else who does
* not respect the caching strategy).
- * - Pages part of an offline section (struct pages of offline sections should
- * not be trusted as they will be initialized when first onlined).
* - MCA pages on ia64
* - Pages holding CPU notes for POWER Firmware Assisted Dump
* - Device memory (e.g. PMEM, DAX, HMM)
@@ -68,14 +63,9 @@
* might lose their PG_swapbacked flag when they simply can be dropped (e.g. as
* a result of MADV_FREE).
*
- * PG_uptodate tells whether the page's contents is valid. When a read
- * completes, the page becomes uptodate, unless a disk I/O error happened.
- *
* PG_referenced, PG_reclaim are used for page reclaim for anonymous and
* file-backed pagecache (see mm/vmscan.c).
*
- * PG_error is set to indicate that an I/O error occurred on this page.
- *
* PG_arch_1 is an architecture specific page state bit. The generic code
* guarantees that this bit is cleared for a page when it first is entered into
* the page cache.
@@ -102,49 +92,64 @@
*/
enum pageflags {
PG_locked, /* Page is locked. Don't touch. */
+ PG_writeback, /* Page is under writeback */
PG_referenced,
PG_uptodate,
PG_dirty,
PG_lru,
+ PG_head, /* Must be in bit 6 */
+ PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
PG_active,
PG_workingset,
- PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
- PG_error,
- PG_slab,
- PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
+ PG_owner_priv_1, /* Owner use. If pagecache, fs may use */
+ PG_owner_2, /* Owner use. If pagecache, fs may use */
PG_arch_1,
PG_reserved,
PG_private, /* If pagecache, has fs-private data */
PG_private_2, /* If pagecache, has fs aux data */
- PG_writeback, /* Page is under writeback */
- PG_head, /* A head page */
- PG_mappedtodisk, /* Has blocks allocated on-disk */
PG_reclaim, /* To be reclaimed asap */
PG_swapbacked, /* Page is backed by RAM/swap */
PG_unevictable, /* Page is "unevictable" */
+ PG_dropbehind, /* drop pages on IO completion */
#ifdef CONFIG_MMU
PG_mlocked, /* Page is vma mlocked */
#endif
-#ifdef CONFIG_ARCH_USES_PG_UNCACHED
- PG_uncached, /* Page has been mapped as uncached */
-#endif
#ifdef CONFIG_MEMORY_FAILURE
PG_hwpoison, /* hardware poisoned page. Don't touch */
#endif
-#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
+#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
PG_young,
PG_idle,
#endif
-#ifdef CONFIG_64BIT
+#ifdef CONFIG_ARCH_USES_PG_ARCH_2
PG_arch_2,
#endif
+#ifdef CONFIG_ARCH_USES_PG_ARCH_3
+ PG_arch_3,
+#endif
__NR_PAGEFLAGS,
- /* Filesystems */
+ PG_readahead = PG_reclaim,
+
+ /* Anonymous memory (and shmem) */
+ PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
+ /* Some filesystems */
PG_checked = PG_owner_priv_1,
- /* SwapBacked */
- PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
+ /*
+ * Depending on the way an anonymous folio can be mapped into a page
+ * table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped
+ * THP), PG_anon_exclusive may be set only for the head page or for
+ * tail pages of an anonymous folio. For now, we only expect it to be
+ * set on tail pages for PTE-mapped THP.
+ */
+ PG_anon_exclusive = PG_owner_2,
+
+ /*
+ * Set if all buffer heads in the folio are mapped.
+ * Filesystems which do not use BHs can use it for their own purpose.
+ */
+ PG_mappedtodisk = PG_owner_2,
/* Two page bits are conscripted by FS-Cache to maintain local caching
* state. These bits are set on pages belonging to the netfs's inodes
@@ -162,46 +167,172 @@ enum pageflags {
/* Remapped by swiotlb-xen. */
PG_xen_remapped = PG_owner_priv_1,
- /* SLOB */
- PG_slob_free = PG_private,
-
- /* Compound pages. Stored in first tail page's flags */
- PG_double_map = PG_workingset,
-
- /* non-lru isolated movable page */
- PG_isolated = PG_reclaim,
+#ifdef CONFIG_MIGRATION
+ /* movable_ops page that is isolated for migration */
+ PG_movable_ops_isolated = PG_reclaim,
+ /* this is a movable_ops page (for selected typed pages only) */
+ PG_movable_ops = PG_uptodate,
+#endif
/* Only valid for buddy pages. Used to track pages that are reported */
PG_reported = PG_uptodate,
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+ /* For self-hosted memmap pages */
+ PG_vmemmap_self_hosted = PG_owner_priv_1,
+#endif
+
+ /*
+ * Flags only valid for compound pages. Stored in first tail page's
+ * flags word. Cannot use the first 8 flags or any flag marked as
+ * PF_ANY.
+ */
+
+ /* At least one page in this folio has the hwpoison flag set */
+ PG_has_hwpoisoned = PG_active,
+ PG_large_rmappable = PG_workingset, /* anon or file-backed */
+ PG_partially_mapped = PG_reclaim, /* was identified to be partially mapped */
};
+#define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1)
+
#ifndef __GENERATING_BOUNDS_H
-struct page; /* forward declaration */
+#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
+DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
-static inline struct page *compound_head(struct page *page)
+/*
+ * Return the real head page struct iff the @page is a fake head page, otherwise
+ * return the @page itself. See Documentation/mm/vmemmap_dedup.rst.
+ */
+static __always_inline const struct page *page_fixed_fake_head(const struct page *page)
+{
+ if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
+ return page;
+
+ /*
+ * Only addresses aligned with PAGE_SIZE of struct page may be fake head
+ * struct page. The alignment check aims to avoid access the fields (
+ * e.g. compound_head) of the @page[1]. It can avoid touch a (possibly)
+ * cold cacheline in some cases.
+ */
+ if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) &&
+ test_bit(PG_head, &page->flags.f)) {
+ /*
+ * We can safely access the field of the @page[1] with PG_head
+ * because the @page is a compound page composed with at least
+ * two contiguous pages.
+ */
+ unsigned long head = READ_ONCE(page[1].compound_head);
+
+ if (likely(head & 1))
+ return (const struct page *)(head - 1);
+ }
+ return page;
+}
+
+static __always_inline bool page_count_writable(const struct page *page, int u)
+{
+ if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
+ return true;
+
+ /*
+ * The refcount check is ordered before the fake-head check to prevent
+ * the following race:
+ * CPU 1 (HVO) CPU 2 (speculative PFN walker)
+ *
+ * page_ref_freeze()
+ * synchronize_rcu()
+ * rcu_read_lock()
+ * page_is_fake_head() is false
+ * vmemmap_remap_pte()
+ * XXX: struct page[] becomes r/o
+ *
+ * page_ref_unfreeze()
+ * page_ref_count() is not zero
+ *
+ * atomic_add_unless(&page->_refcount)
+ * XXX: try to modify r/o struct page[]
+ *
+ * The refcount check also prevents modification attempts to other (r/o)
+ * tail pages that are not fake heads.
+ */
+ if (atomic_read_acquire(&page->_refcount) == u)
+ return false;
+
+ return page_fixed_fake_head(page) == page;
+}
+#else
+static inline const struct page *page_fixed_fake_head(const struct page *page)
+{
+ return page;
+}
+
+static inline bool page_count_writable(const struct page *page, int u)
+{
+ return true;
+}
+#endif
+
+static __always_inline int page_is_fake_head(const struct page *page)
+{
+ return page_fixed_fake_head(page) != page;
+}
+
+static __always_inline unsigned long _compound_head(const struct page *page)
{
unsigned long head = READ_ONCE(page->compound_head);
if (unlikely(head & 1))
- return (struct page *) (head - 1);
- return page;
+ return head - 1;
+ return (unsigned long)page_fixed_fake_head(page);
}
-static __always_inline int PageTail(struct page *page)
+#define compound_head(page) ((typeof(page))_compound_head(page))
+
+/**
+ * page_folio - Converts from page to folio.
+ * @p: The page.
+ *
+ * Every page is part of a folio. This function cannot be called on a
+ * NULL pointer.
+ *
+ * Context: No reference, nor lock is required on @page. If the caller
+ * does not hold a reference, this call may race with a folio split, so
+ * it should re-check the folio still contains this page after gaining
+ * a reference on the folio.
+ * Return: The folio which contains this page.
+ */
+#define page_folio(p) (_Generic((p), \
+ const struct page *: (const struct folio *)_compound_head(p), \
+ struct page *: (struct folio *)_compound_head(p)))
+
+/**
+ * folio_page - Return a page from a folio.
+ * @folio: The folio.
+ * @n: The page number to return.
+ *
+ * @n is relative to the start of the folio. This function does not
+ * check that the page number lies within @folio; the caller is presumed
+ * to have a reference to the page.
+ */
+#define folio_page(folio, n) (&(folio)->page + (n))
+
+static __always_inline int PageTail(const struct page *page)
{
- return READ_ONCE(page->compound_head) & 1;
+ return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page);
}
-static __always_inline int PageCompound(struct page *page)
+static __always_inline int PageCompound(const struct page *page)
{
- return test_bit(PG_head, &page->flags) || PageTail(page);
+ return test_bit(PG_head, &page->flags.f) ||
+ READ_ONCE(page->compound_head) & 1;
}
#define PAGE_POISON_PATTERN -1l
static inline int PagePoisoned(const struct page *page)
{
- return page->flags == PAGE_POISON_PATTERN;
+ return READ_ONCE(page->flags.f) == PAGE_POISON_PATTERN;
}
#ifdef CONFIG_DEBUG_VM
@@ -212,6 +343,25 @@ static inline void page_init_poison(struct page *page, size_t size)
}
#endif
+static const unsigned long *const_folio_flags(const struct folio *folio,
+ unsigned n)
+{
+ const struct page *page = &folio->page;
+
+ VM_BUG_ON_PGFLAGS(page->compound_head & 1, page);
+ VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags.f), page);
+ return &page[n].flags.f;
+}
+
+static unsigned long *folio_flags(struct folio *folio, unsigned n)
+{
+ struct page *page = &folio->page;
+
+ VM_BUG_ON_PGFLAGS(page->compound_head & 1, page);
+ VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags.f), page);
+ return &page[n].flags.f;
+}
+
/*
* Page flags policies wrt compound pages
*
@@ -225,9 +375,6 @@ static inline void page_init_poison(struct page *page, size_t size)
* for compound page all operations related to the page flag applied to
* head page.
*
- * PF_ONLY_HEAD:
- * for compound page, callers only ever operate on the head page.
- *
* PF_NO_TAIL:
* modifications of the page flag must be done on small or head pages,
* checks can be done on tail pages too.
@@ -243,9 +390,6 @@ static inline void page_init_poison(struct page *page, size_t size)
page; })
#define PF_ANY(page, enforce) PF_POISONED_CHECK(page)
#define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page))
-#define PF_ONLY_HEAD(page, enforce) ({ \
- VM_BUG_ON_PGFLAGS(PageTail(page), page); \
- PF_POISONED_CHECK(page); })
#define PF_NO_TAIL(page, enforce) ({ \
VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \
PF_POISONED_CHECK(compound_head(page)); })
@@ -256,36 +400,86 @@ static inline void page_init_poison(struct page *page, size_t size)
VM_BUG_ON_PGFLAGS(!PageHead(page), page); \
PF_POISONED_CHECK(&page[1]); })
+/* Which page is the flag stored in */
+#define FOLIO_PF_ANY 0
+#define FOLIO_PF_HEAD 0
+#define FOLIO_PF_NO_TAIL 0
+#define FOLIO_PF_NO_COMPOUND 0
+#define FOLIO_PF_SECOND 1
+
+#define FOLIO_HEAD_PAGE 0
+#define FOLIO_SECOND_PAGE 1
+
/*
* Macros to create function definitions for page flags
*/
+#define FOLIO_TEST_FLAG(name, page) \
+static __always_inline bool folio_test_##name(const struct folio *folio) \
+{ return test_bit(PG_##name, const_folio_flags(folio, page)); }
+
+#define FOLIO_SET_FLAG(name, page) \
+static __always_inline void folio_set_##name(struct folio *folio) \
+{ set_bit(PG_##name, folio_flags(folio, page)); }
+
+#define FOLIO_CLEAR_FLAG(name, page) \
+static __always_inline void folio_clear_##name(struct folio *folio) \
+{ clear_bit(PG_##name, folio_flags(folio, page)); }
+
+#define __FOLIO_SET_FLAG(name, page) \
+static __always_inline void __folio_set_##name(struct folio *folio) \
+{ __set_bit(PG_##name, folio_flags(folio, page)); }
+
+#define __FOLIO_CLEAR_FLAG(name, page) \
+static __always_inline void __folio_clear_##name(struct folio *folio) \
+{ __clear_bit(PG_##name, folio_flags(folio, page)); }
+
+#define FOLIO_TEST_SET_FLAG(name, page) \
+static __always_inline bool folio_test_set_##name(struct folio *folio) \
+{ return test_and_set_bit(PG_##name, folio_flags(folio, page)); }
+
+#define FOLIO_TEST_CLEAR_FLAG(name, page) \
+static __always_inline bool folio_test_clear_##name(struct folio *folio) \
+{ return test_and_clear_bit(PG_##name, folio_flags(folio, page)); }
+
+#define FOLIO_FLAG(name, page) \
+FOLIO_TEST_FLAG(name, page) \
+FOLIO_SET_FLAG(name, page) \
+FOLIO_CLEAR_FLAG(name, page)
+
#define TESTPAGEFLAG(uname, lname, policy) \
-static __always_inline int Page##uname(struct page *page) \
- { return test_bit(PG_##lname, &policy(page, 0)->flags); }
+FOLIO_TEST_FLAG(lname, FOLIO_##policy) \
+static __always_inline int Page##uname(const struct page *page) \
+{ return test_bit(PG_##lname, &policy(page, 0)->flags.f); }
#define SETPAGEFLAG(uname, lname, policy) \
+FOLIO_SET_FLAG(lname, FOLIO_##policy) \
static __always_inline void SetPage##uname(struct page *page) \
- { set_bit(PG_##lname, &policy(page, 1)->flags); }
+{ set_bit(PG_##lname, &policy(page, 1)->flags.f); }
#define CLEARPAGEFLAG(uname, lname, policy) \
+FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \
static __always_inline void ClearPage##uname(struct page *page) \
- { clear_bit(PG_##lname, &policy(page, 1)->flags); }
+{ clear_bit(PG_##lname, &policy(page, 1)->flags.f); }
#define __SETPAGEFLAG(uname, lname, policy) \
+__FOLIO_SET_FLAG(lname, FOLIO_##policy) \
static __always_inline void __SetPage##uname(struct page *page) \
- { __set_bit(PG_##lname, &policy(page, 1)->flags); }
+{ __set_bit(PG_##lname, &policy(page, 1)->flags.f); }
#define __CLEARPAGEFLAG(uname, lname, policy) \
+__FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \
static __always_inline void __ClearPage##uname(struct page *page) \
- { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
+{ __clear_bit(PG_##lname, &policy(page, 1)->flags.f); }
#define TESTSETFLAG(uname, lname, policy) \
+FOLIO_TEST_SET_FLAG(lname, FOLIO_##policy) \
static __always_inline int TestSetPage##uname(struct page *page) \
- { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
+{ return test_and_set_bit(PG_##lname, &policy(page, 1)->flags.f); }
#define TESTCLEARFLAG(uname, lname, policy) \
+FOLIO_TEST_CLEAR_FLAG(lname, FOLIO_##policy) \
static __always_inline int TestClearPage##uname(struct page *page) \
- { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
+{ return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags.f); }
#define PAGEFLAG(uname, lname, policy) \
TESTPAGEFLAG(uname, lname, policy) \
@@ -301,46 +495,73 @@ static __always_inline int TestClearPage##uname(struct page *page) \
TESTSETFLAG(uname, lname, policy) \
TESTCLEARFLAG(uname, lname, policy)
-#define TESTPAGEFLAG_FALSE(uname) \
+#define FOLIO_TEST_FLAG_FALSE(name) \
+static inline bool folio_test_##name(const struct folio *folio) \
+{ return false; }
+#define FOLIO_SET_FLAG_NOOP(name) \
+static inline void folio_set_##name(struct folio *folio) { }
+#define FOLIO_CLEAR_FLAG_NOOP(name) \
+static inline void folio_clear_##name(struct folio *folio) { }
+#define __FOLIO_SET_FLAG_NOOP(name) \
+static inline void __folio_set_##name(struct folio *folio) { }
+#define __FOLIO_CLEAR_FLAG_NOOP(name) \
+static inline void __folio_clear_##name(struct folio *folio) { }
+#define FOLIO_TEST_SET_FLAG_FALSE(name) \
+static inline bool folio_test_set_##name(struct folio *folio) \
+{ return false; }
+#define FOLIO_TEST_CLEAR_FLAG_FALSE(name) \
+static inline bool folio_test_clear_##name(struct folio *folio) \
+{ return false; }
+
+#define FOLIO_FLAG_FALSE(name) \
+FOLIO_TEST_FLAG_FALSE(name) \
+FOLIO_SET_FLAG_NOOP(name) \
+FOLIO_CLEAR_FLAG_NOOP(name)
+
+#define TESTPAGEFLAG_FALSE(uname, lname) \
+FOLIO_TEST_FLAG_FALSE(lname) \
static inline int Page##uname(const struct page *page) { return 0; }
-#define SETPAGEFLAG_NOOP(uname) \
+#define SETPAGEFLAG_NOOP(uname, lname) \
+FOLIO_SET_FLAG_NOOP(lname) \
static inline void SetPage##uname(struct page *page) { }
-#define CLEARPAGEFLAG_NOOP(uname) \
+#define CLEARPAGEFLAG_NOOP(uname, lname) \
+FOLIO_CLEAR_FLAG_NOOP(lname) \
static inline void ClearPage##uname(struct page *page) { }
-#define __CLEARPAGEFLAG_NOOP(uname) \
+#define __CLEARPAGEFLAG_NOOP(uname, lname) \
+__FOLIO_CLEAR_FLAG_NOOP(lname) \
static inline void __ClearPage##uname(struct page *page) { }
-#define TESTSETFLAG_FALSE(uname) \
+#define TESTSETFLAG_FALSE(uname, lname) \
+FOLIO_TEST_SET_FLAG_FALSE(lname) \
static inline int TestSetPage##uname(struct page *page) { return 0; }
-#define TESTCLEARFLAG_FALSE(uname) \
+#define TESTCLEARFLAG_FALSE(uname, lname) \
+FOLIO_TEST_CLEAR_FLAG_FALSE(lname) \
static inline int TestClearPage##uname(struct page *page) { return 0; }
-#define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname) \
- SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname)
+#define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname) \
+ SETPAGEFLAG_NOOP(uname, lname) CLEARPAGEFLAG_NOOP(uname, lname)
-#define TESTSCFLAG_FALSE(uname) \
- TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
+#define TESTSCFLAG_FALSE(uname, lname) \
+ TESTSETFLAG_FALSE(uname, lname) TESTCLEARFLAG_FALSE(uname, lname)
__PAGEFLAG(Locked, locked, PF_NO_TAIL)
-PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
-PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
-PAGEFLAG(Referenced, referenced, PF_HEAD)
- TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
- __SETPAGEFLAG(Referenced, referenced, PF_HEAD)
+FOLIO_FLAG(waiters, FOLIO_HEAD_PAGE)
+FOLIO_FLAG(referenced, FOLIO_HEAD_PAGE)
+ FOLIO_TEST_CLEAR_FLAG(referenced, FOLIO_HEAD_PAGE)
+ __FOLIO_SET_FLAG(referenced, FOLIO_HEAD_PAGE)
PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
__CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
TESTCLEARFLAG(LRU, lru, PF_HEAD)
-PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
- TESTCLEARFLAG(Active, active, PF_HEAD)
+FOLIO_FLAG(active, FOLIO_HEAD_PAGE)
+ __FOLIO_CLEAR_FLAG(active, FOLIO_HEAD_PAGE)
+ FOLIO_TEST_CLEAR_FLAG(active, FOLIO_HEAD_PAGE)
PAGEFLAG(Workingset, workingset, PF_HEAD)
TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
-__PAGEFLAG(Slab, slab, PF_NO_TAIL)
-__PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */
/* Xen */
@@ -354,19 +575,20 @@ PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
__CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
__SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
-PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
- __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
- __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
+FOLIO_FLAG(swapbacked, FOLIO_HEAD_PAGE)
+ __FOLIO_CLEAR_FLAG(swapbacked, FOLIO_HEAD_PAGE)
+ __FOLIO_SET_FLAG(swapbacked, FOLIO_HEAD_PAGE)
/*
* Private page markings that may be used by the filesystem that owns the page
* for its own purposes.
- * - PG_private and PG_private_2 cause releasepage() and co to be invoked
+ * - PG_private and PG_private_2 cause release_folio() and co to be invoked
*/
PAGEFLAG(Private, private, PF_ANY)
-PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
-PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
- TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
+FOLIO_FLAG(private_2, FOLIO_HEAD_PAGE)
+
+/* owner_2 can be set on tail pages for anon memory */
+FOLIO_FLAG(owner_2, FOLIO_HEAD_PAGE)
/*
* Only test-and-set exist for PG_writeback. The unconditional operators are
@@ -374,13 +596,17 @@ PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
*/
TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
-PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
+FOLIO_FLAG(mappedtodisk, FOLIO_HEAD_PAGE)
/* PG_readahead is only used for reads; PG_reclaim is only for writes */
PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
-PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
- TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
+FOLIO_FLAG(readahead, FOLIO_HEAD_PAGE)
+ FOLIO_TEST_CLEAR_FLAG(readahead, FOLIO_HEAD_PAGE)
+
+FOLIO_FLAG(dropbehind, FOLIO_HEAD_PAGE)
+ FOLIO_TEST_CLEAR_FLAG(dropbehind, FOLIO_HEAD_PAGE)
+ __FOLIO_SET_FLAG(dropbehind, FOLIO_HEAD_PAGE)
#ifdef CONFIG_HIGHMEM
/*
@@ -388,59 +614,69 @@ PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
* available at this point.
*/
#define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
+#define folio_test_highmem(__f) is_highmem_idx(folio_zonenum(__f))
#else
-PAGEFLAG_FALSE(HighMem)
+PAGEFLAG_FALSE(HighMem, highmem)
#endif
+#define PhysHighMem(__p) (PageHighMem(phys_to_page(__p)))
-#ifdef CONFIG_SWAP
-static __always_inline int PageSwapCache(struct page *page)
-{
-#ifdef CONFIG_THP_SWAP
- page = compound_head(page);
+/* Does kmap_local_folio() only allow access to one page of the folio? */
+#ifdef CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP
+#define folio_test_partial_kmap(f) true
+#else
+#define folio_test_partial_kmap(f) folio_test_highmem(f)
#endif
- return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags);
+#ifdef CONFIG_SWAP
+static __always_inline bool folio_test_swapcache(const struct folio *folio)
+{
+ return folio_test_swapbacked(folio) &&
+ test_bit(PG_swapcache, const_folio_flags(folio, 0));
}
-SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
-CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
+
+FOLIO_SET_FLAG(swapcache, FOLIO_HEAD_PAGE)
+FOLIO_CLEAR_FLAG(swapcache, FOLIO_HEAD_PAGE)
#else
-PAGEFLAG_FALSE(SwapCache)
+FOLIO_FLAG_FALSE(swapcache)
#endif
-PAGEFLAG(Unevictable, unevictable, PF_HEAD)
- __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
- TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
+FOLIO_FLAG(unevictable, FOLIO_HEAD_PAGE)
+ __FOLIO_CLEAR_FLAG(unevictable, FOLIO_HEAD_PAGE)
+ FOLIO_TEST_CLEAR_FLAG(unevictable, FOLIO_HEAD_PAGE)
#ifdef CONFIG_MMU
-PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
- __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
- TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
+FOLIO_FLAG(mlocked, FOLIO_HEAD_PAGE)
+ __FOLIO_CLEAR_FLAG(mlocked, FOLIO_HEAD_PAGE)
+ FOLIO_TEST_CLEAR_FLAG(mlocked, FOLIO_HEAD_PAGE)
+ FOLIO_TEST_SET_FLAG(mlocked, FOLIO_HEAD_PAGE)
#else
-PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked)
- TESTSCFLAG_FALSE(Mlocked)
-#endif
-
-#ifdef CONFIG_ARCH_USES_PG_UNCACHED
-PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
-#else
-PAGEFLAG_FALSE(Uncached)
+FOLIO_FLAG_FALSE(mlocked)
+ __FOLIO_CLEAR_FLAG_NOOP(mlocked)
+ FOLIO_TEST_CLEAR_FLAG_FALSE(mlocked)
+ FOLIO_TEST_SET_FLAG_FALSE(mlocked)
#endif
#ifdef CONFIG_MEMORY_FAILURE
PAGEFLAG(HWPoison, hwpoison, PF_ANY)
TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
#define __PG_HWPOISON (1UL << PG_hwpoison)
-extern bool take_page_off_buddy(struct page *page);
#else
-PAGEFLAG_FALSE(HWPoison)
+PAGEFLAG_FALSE(HWPoison, hwpoison)
#define __PG_HWPOISON 0
#endif
-#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
-TESTPAGEFLAG(Young, young, PF_ANY)
-SETPAGEFLAG(Young, young, PF_ANY)
-TESTCLEARFLAG(Young, young, PF_ANY)
-PAGEFLAG(Idle, idle, PF_ANY)
+#ifdef CONFIG_PAGE_IDLE_FLAG
+#ifdef CONFIG_64BIT
+FOLIO_TEST_FLAG(young, FOLIO_HEAD_PAGE)
+FOLIO_SET_FLAG(young, FOLIO_HEAD_PAGE)
+FOLIO_TEST_CLEAR_FLAG(young, FOLIO_HEAD_PAGE)
+FOLIO_FLAG(idle, FOLIO_HEAD_PAGE)
+#endif
+/* See page_idle.h for !64BIT workaround */
+#else /* !CONFIG_PAGE_IDLE_FLAG */
+FOLIO_FLAG_FALSE(young)
+FOLIO_TEST_CLEAR_FLAG_FALSE(young)
+FOLIO_FLAG_FALSE(idle)
#endif
/*
@@ -451,45 +687,54 @@ PAGEFLAG(Idle, idle, PF_ANY)
*/
__PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
+#ifdef CONFIG_MEMORY_HOTPLUG
+PAGEFLAG(VmemmapSelfHosted, vmemmap_self_hosted, PF_ANY)
+#else
+PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted)
+#endif
+
/*
- * On an anonymous page mapped into a user virtual memory area,
- * page->mapping points to its anon_vma, not to a struct address_space;
- * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
+ * On an anonymous folio mapped into a user virtual memory area,
+ * folio->mapping points to its anon_vma, not to a struct address_space;
+ * with the FOLIO_MAPPING_ANON bit set to distinguish it. See rmap.h.
*
- * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
- * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
- * bit; and then page->mapping points, not to an anon_vma, but to a private
- * structure which KSM associates with that merged page. See ksm.h.
+ * On an anonymous folio in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
+ * the FOLIO_MAPPING_ANON_KSM bit may be set along with the FOLIO_MAPPING_ANON
+ * bit; and then folio->mapping points, not to an anon_vma, but to a private
+ * structure which KSM associates with that merged folio. See ksm.h.
*
- * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
- * page and then page->mapping points a struct address_space.
+ * Please note that, confusingly, "folio_mapping" refers to the inode
+ * address_space which maps the folio from disk; whereas "folio_mapped"
+ * refers to user virtual address space into which the folio is mapped.
*
- * Please note that, confusingly, "page_mapping" refers to the inode
- * address_space which maps the page from disk; whereas "page_mapped"
- * refers to user virtual address space into which the page is mapped.
+ * For slab pages, since slab reuses the bits in struct page to store its
+ * internal states, the folio->mapping does not exist as such, nor do
+ * these flags below. So in order to avoid testing non-existent bits,
+ * please make sure that folio_test_slab(folio) actually evaluates to
+ * false before calling the following functions (e.g., folio_test_anon).
+ * See mm/slab.h.
*/
-#define PAGE_MAPPING_ANON 0x1
-#define PAGE_MAPPING_MOVABLE 0x2
-#define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
-#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
+#define FOLIO_MAPPING_ANON 0x1
+#define FOLIO_MAPPING_ANON_KSM 0x2
+#define FOLIO_MAPPING_KSM (FOLIO_MAPPING_ANON | FOLIO_MAPPING_ANON_KSM)
+#define FOLIO_MAPPING_FLAGS (FOLIO_MAPPING_ANON | FOLIO_MAPPING_ANON_KSM)
-static __always_inline int PageMappingFlags(struct page *page)
+static __always_inline bool folio_test_anon(const struct folio *folio)
{
- return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
+ return ((unsigned long)folio->mapping & FOLIO_MAPPING_ANON) != 0;
}
-static __always_inline int PageAnon(struct page *page)
+static __always_inline bool PageAnonNotKsm(const struct page *page)
{
- page = compound_head(page);
- return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
+ unsigned long flags = (unsigned long)page_folio(page)->mapping;
+
+ return (flags & FOLIO_MAPPING_FLAGS) == FOLIO_MAPPING_ANON;
}
-static __always_inline int __PageMovable(struct page *page)
+static __always_inline bool PageAnon(const struct page *page)
{
- return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
- PAGE_MAPPING_MOVABLE;
+ return folio_test_anon(page_folio(page));
}
-
#ifdef CONFIG_KSM
/*
* A KSM page is one of those write-protected "shared pages" or "merged pages"
@@ -497,30 +742,56 @@ static __always_inline int __PageMovable(struct page *page)
* is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
* anon_vma, but to that page's node of the stable tree.
*/
-static __always_inline int PageKsm(struct page *page)
+static __always_inline bool folio_test_ksm(const struct folio *folio)
{
- page = compound_head(page);
- return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
- PAGE_MAPPING_KSM;
+ return ((unsigned long)folio->mapping & FOLIO_MAPPING_FLAGS) ==
+ FOLIO_MAPPING_KSM;
}
#else
-TESTPAGEFLAG_FALSE(Ksm)
+FOLIO_TEST_FLAG_FALSE(ksm)
#endif
-u64 stable_page_flags(struct page *page);
+u64 stable_page_flags(const struct page *page);
-static inline int PageUptodate(struct page *page)
+/**
+ * folio_xor_flags_has_waiters - Change some folio flags.
+ * @folio: The folio.
+ * @mask: Bits set in this word will be changed.
+ *
+ * This must only be used for flags which are changed with the folio
+ * lock held. For example, it is unsafe to use for PG_dirty as that
+ * can be set without the folio lock held. It can also only be used
+ * on flags which are in the range 0-6 as some of the implementations
+ * only affect those bits.
+ *
+ * Return: Whether there are tasks waiting on the folio.
+ */
+static inline bool folio_xor_flags_has_waiters(struct folio *folio,
+ unsigned long mask)
{
- int ret;
- page = compound_head(page);
- ret = test_bit(PG_uptodate, &(page)->flags);
+ return xor_unlock_is_negative_byte(mask, folio_flags(folio, 0));
+}
+
+/**
+ * folio_test_uptodate - Is this folio up to date?
+ * @folio: The folio.
+ *
+ * The uptodate flag is set on a folio when every byte in the folio is
+ * at least as new as the corresponding bytes on storage. Anonymous
+ * and CoW folios are always uptodate. If the folio is not uptodate,
+ * some of the bytes in it may be; see the is_partially_uptodate()
+ * address_space operation.
+ */
+static inline bool folio_test_uptodate(const struct folio *folio)
+{
+ bool ret = test_bit(PG_uptodate, const_folio_flags(folio, 0));
/*
- * Must ensure that the data we read out of the page is loaded
- * _after_ we've loaded page->flags to check for PageUptodate.
- * We can skip the barrier if the page is not uptodate, because
+ * Must ensure that the data we read out of the folio is loaded
+ * _after_ we've loaded folio->flags to check the uptodate bit.
+ * We can skip the barrier if the folio is not uptodate, because
* we wouldn't be reading anything from it.
*
- * See SetPageUptodate() for the other side of the story.
+ * See folio_mark_uptodate() for the other side of the story.
*/
if (ret)
smp_rmb();
@@ -528,46 +799,71 @@ static inline int PageUptodate(struct page *page)
return ret;
}
-static __always_inline void __SetPageUptodate(struct page *page)
+static inline bool PageUptodate(const struct page *page)
+{
+ return folio_test_uptodate(page_folio(page));
+}
+
+static __always_inline void __folio_mark_uptodate(struct folio *folio)
{
- VM_BUG_ON_PAGE(PageTail(page), page);
smp_wmb();
- __set_bit(PG_uptodate, &page->flags);
+ __set_bit(PG_uptodate, folio_flags(folio, 0));
}
-static __always_inline void SetPageUptodate(struct page *page)
+static __always_inline void folio_mark_uptodate(struct folio *folio)
{
- VM_BUG_ON_PAGE(PageTail(page), page);
/*
* Memory barrier must be issued before setting the PG_uptodate bit,
- * so that all previous stores issued in order to bring the page
- * uptodate are actually visible before PageUptodate becomes true.
+ * so that all previous stores issued in order to bring the folio
+ * uptodate are actually visible before folio_test_uptodate becomes true.
*/
smp_wmb();
- set_bit(PG_uptodate, &page->flags);
+ set_bit(PG_uptodate, folio_flags(folio, 0));
+}
+
+static __always_inline void __SetPageUptodate(struct page *page)
+{
+ __folio_mark_uptodate((struct folio *)page);
+}
+
+static __always_inline void SetPageUptodate(struct page *page)
+{
+ folio_mark_uptodate((struct folio *)page);
}
CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
-int test_clear_page_writeback(struct page *page);
-int __test_set_page_writeback(struct page *page, bool keep_write);
+void __folio_start_writeback(struct folio *folio, bool keep_write);
+void set_page_writeback(struct page *page);
-#define test_set_page_writeback(page) \
- __test_set_page_writeback(page, false)
-#define test_set_page_writeback_keepwrite(page) \
- __test_set_page_writeback(page, true)
+#define folio_start_writeback(folio) \
+ __folio_start_writeback(folio, false)
-static inline void set_page_writeback(struct page *page)
+static __always_inline bool folio_test_head(const struct folio *folio)
{
- test_set_page_writeback(page);
+ return test_bit(PG_head, const_folio_flags(folio, FOLIO_PF_ANY));
}
-static inline void set_page_writeback_keepwrite(struct page *page)
+static __always_inline int PageHead(const struct page *page)
{
- test_set_page_writeback_keepwrite(page);
+ PF_POISONED_CHECK(page);
+ return test_bit(PG_head, &page->flags.f) && !page_is_fake_head(page);
}
-__PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
+__SETPAGEFLAG(Head, head, PF_ANY)
+__CLEARPAGEFLAG(Head, head, PF_ANY)
+CLEARPAGEFLAG(Head, head, PF_ANY)
+
+/**
+ * folio_test_large() - Does this folio contain more than one page?
+ * @folio: The folio to test.
+ *
+ * Return: True if the folio is larger than one page.
+ */
+static inline bool folio_test_large(const struct folio *folio)
+{
+ return folio_test_head(folio);
+}
static __always_inline void set_compound_head(struct page *page, struct page *head)
{
@@ -585,161 +881,126 @@ static inline void ClearPageCompound(struct page *page)
BUG_ON(!PageHead(page));
ClearPageHead(page);
}
-#endif
-
-#define PG_head_mask ((1UL << PG_head))
-
-#ifdef CONFIG_HUGETLB_PAGE
-int PageHuge(struct page *page);
-int PageHeadHuge(struct page *page);
+FOLIO_FLAG(large_rmappable, FOLIO_SECOND_PAGE)
+FOLIO_FLAG(partially_mapped, FOLIO_SECOND_PAGE)
#else
-TESTPAGEFLAG_FALSE(Huge)
-TESTPAGEFLAG_FALSE(HeadHuge)
+FOLIO_FLAG_FALSE(large_rmappable)
+FOLIO_FLAG_FALSE(partially_mapped)
#endif
+#define PG_head_mask ((1UL << PG_head))
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
- * PageHuge() only returns true for hugetlbfs pages, but not for
- * normal or transparent huge pages.
- *
- * PageTransHuge() returns true for both transparent huge and
- * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
- * called only in the core VM paths where hugetlbfs pages can't exist.
- */
-static inline int PageTransHuge(struct page *page)
-{
- VM_BUG_ON_PAGE(PageTail(page), page);
- return PageHead(page);
-}
-
-/*
* PageTransCompound returns true for both transparent huge pages
* and hugetlbfs pages, so it should only be called when it's known
* that hugetlbfs pages aren't involved.
*/
-static inline int PageTransCompound(struct page *page)
+static inline int PageTransCompound(const struct page *page)
{
return PageCompound(page);
}
+#else
+TESTPAGEFLAG_FALSE(TransCompound, transcompound)
+#endif
+#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
/*
- * PageTransCompoundMap is the same as PageTransCompound, but it also
- * guarantees the primary MMU has the entire compound page mapped
- * through pmd_trans_huge, which in turn guarantees the secondary MMUs
- * can also map the entire compound page. This allows the secondary
- * MMUs to call get_user_pages() only once for each compound page and
- * to immediately map the entire compound page with a single secondary
- * MMU fault. If there will be a pmd split later, the secondary MMUs
- * will get an update through the MMU notifier invalidation through
- * split_huge_pmd().
- *
- * Unlike PageTransCompound, this is safe to be called only while
- * split_huge_pmd() cannot run from under us, like if protected by the
- * MMU notifier, otherwise it may result in page->_mapcount check false
- * positives.
- *
- * We have to treat page cache THP differently since every subpage of it
- * would get _mapcount inc'ed once it is PMD mapped. But, it may be PTE
- * mapped in the current process so comparing subpage's _mapcount to
- * compound_mapcount to filter out PTE mapped case.
- */
-static inline int PageTransCompoundMap(struct page *page)
-{
- struct page *head;
-
- if (!PageTransCompound(page))
- return 0;
-
- if (PageAnon(page))
- return atomic_read(&page->_mapcount) < 0;
-
- head = compound_head(page);
- /* File THP is PMD mapped and not PTE mapped */
- return atomic_read(&page->_mapcount) ==
- atomic_read(compound_mapcount_ptr(head));
-}
-
-/*
- * PageTransTail returns true for both transparent huge pages
- * and hugetlbfs pages, so it should only be called when it's known
- * that hugetlbfs pages aren't involved.
- */
-static inline int PageTransTail(struct page *page)
-{
- return PageTail(page);
-}
-
-/*
- * PageDoubleMap indicates that the compound page is mapped with PTEs as well
- * as PMDs.
- *
- * This is required for optimization of rmap operations for THP: we can postpone
- * per small page mapcount accounting (and its overhead from atomic operations)
- * until the first PMD split.
+ * PageHasHWPoisoned indicates that at least one subpage is hwpoisoned in the
+ * compound page.
*
- * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up
- * by one. This reference will go away with last compound_mapcount.
- *
- * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap().
+ * This flag is set by hwpoison handler. Cleared by THP split or free page.
*/
-PAGEFLAG(DoubleMap, double_map, PF_SECOND)
- TESTSCFLAG(DoubleMap, double_map, PF_SECOND)
+FOLIO_FLAG(has_hwpoisoned, FOLIO_SECOND_PAGE)
#else
-TESTPAGEFLAG_FALSE(TransHuge)
-TESTPAGEFLAG_FALSE(TransCompound)
-TESTPAGEFLAG_FALSE(TransCompoundMap)
-TESTPAGEFLAG_FALSE(TransTail)
-PAGEFLAG_FALSE(DoubleMap)
- TESTSCFLAG_FALSE(DoubleMap)
+FOLIO_FLAG_FALSE(has_hwpoisoned)
#endif
/*
- * For pages that are never mapped to userspace (and aren't PageSlab),
- * page_type may be used. Because it is initialised to -1, we invert the
- * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
- * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and
- * low bits so that an underflow or overflow of page_mapcount() won't be
- * mistaken for a page type value.
+ * For pages that do not use mapcount, page_type may be used.
+ * The low 24 bits of pagetype may be used for your own purposes, as long
+ * as you are careful to not affect the top 8 bits. The low bits of
+ * pagetype will be overwritten when you clear the page_type from the page.
*/
+enum pagetype {
+ /* 0x00-0x7f are positive numbers, ie mapcount */
+ /* Reserve 0x80-0xef for mapcount overflow. */
+ PGTY_buddy = 0xf0,
+ PGTY_offline = 0xf1,
+ PGTY_table = 0xf2,
+ PGTY_guard = 0xf3,
+ PGTY_hugetlb = 0xf4,
+ PGTY_slab = 0xf5,
+ PGTY_zsmalloc = 0xf6,
+ PGTY_unaccepted = 0xf7,
+ PGTY_large_kmalloc = 0xf8,
+
+ PGTY_mapcount_underflow = 0xff
+};
-#define PAGE_TYPE_BASE 0xf0000000
-/* Reserve 0x0000007f to catch underflows of page_mapcount */
-#define PAGE_MAPCOUNT_RESERVE -128
-#define PG_buddy 0x00000080
-#define PG_offline 0x00000100
-#define PG_table 0x00000200
-#define PG_guard 0x00000400
+static inline bool page_type_has_type(int page_type)
+{
+ return page_type < (PGTY_mapcount_underflow << 24);
+}
-#define PageType(page, flag) \
- ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
+/* This takes a mapcount which is one more than page->_mapcount */
+static inline bool page_mapcount_is_type(unsigned int mapcount)
+{
+ return page_type_has_type(mapcount - 1);
+}
-static inline int page_has_type(struct page *page)
+static inline bool page_has_type(const struct page *page)
{
- return (int)page->page_type < PAGE_MAPCOUNT_RESERVE;
+ return page_type_has_type(data_race(page->page_type));
+}
+
+#define FOLIO_TYPE_OPS(lname, fname) \
+static __always_inline bool folio_test_##fname(const struct folio *folio) \
+{ \
+ return data_race(folio->page.page_type >> 24) == PGTY_##lname; \
+} \
+static __always_inline void __folio_set_##fname(struct folio *folio) \
+{ \
+ if (folio_test_##fname(folio)) \
+ return; \
+ VM_BUG_ON_FOLIO(data_race(folio->page.page_type) != UINT_MAX, \
+ folio); \
+ folio->page.page_type = (unsigned int)PGTY_##lname << 24; \
+} \
+static __always_inline void __folio_clear_##fname(struct folio *folio) \
+{ \
+ if (folio->page.page_type == UINT_MAX) \
+ return; \
+ VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio); \
+ folio->page.page_type = UINT_MAX; \
}
-#define PAGE_TYPE_OPS(uname, lname) \
-static __always_inline int Page##uname(struct page *page) \
+#define PAGE_TYPE_OPS(uname, lname, fname) \
+FOLIO_TYPE_OPS(lname, fname) \
+static __always_inline int Page##uname(const struct page *page) \
{ \
- return PageType(page, PG_##lname); \
+ return data_race(page->page_type >> 24) == PGTY_##lname; \
} \
static __always_inline void __SetPage##uname(struct page *page) \
{ \
- VM_BUG_ON_PAGE(!PageType(page, 0), page); \
- page->page_type &= ~PG_##lname; \
+ if (Page##uname(page)) \
+ return; \
+ VM_BUG_ON_PAGE(data_race(page->page_type) != UINT_MAX, page); \
+ page->page_type = (unsigned int)PGTY_##lname << 24; \
} \
static __always_inline void __ClearPage##uname(struct page *page) \
{ \
+ if (page->page_type == UINT_MAX) \
+ return; \
VM_BUG_ON_PAGE(!Page##uname(page), page); \
- page->page_type |= PG_##lname; \
+ page->page_type = UINT_MAX; \
}
/*
* PageBuddy() indicates that the page is free and in the buddy system
* (see mm/page_alloc.c).
*/
-PAGE_TYPE_OPS(Buddy, buddy)
+PAGE_TYPE_OPS(Buddy, buddy, buddy)
/*
* PageOffline() indicates that the page is logically offline although the
@@ -748,58 +1009,178 @@ PAGE_TYPE_OPS(Buddy, buddy)
* The content of these pages is effectively stale. Such pages should not
* be touched (read/write/dump/save) except by their owner.
*
+ * When a memory block gets onlined, all pages are initialized with a
+ * refcount of 1 and PageOffline(). generic_online_page() will
+ * take care of clearing PageOffline().
+ *
* If a driver wants to allow to offline unmovable PageOffline() pages without
* putting them back to the buddy, it can do so via the memory notifier by
* decrementing the reference count in MEM_GOING_OFFLINE and incrementing the
* reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline()
- * pages (now with a reference count of zero) are treated like free pages,
- * allowing the containing memory block to get offlined. A driver that
+ * pages (now with a reference count of zero) are treated like free (unmanaged)
+ * pages, allowing the containing memory block to get offlined. A driver that
* relies on this feature is aware that re-onlining the memory block will
- * require to re-set the pages PageOffline() and not giving them to the
- * buddy via online_page_callback_t.
+ * require not giving them to the buddy via generic_online_page().
+ *
+ * Memory offlining code will not adjust the managed page count for any
+ * PageOffline() pages, treating them like they were never exposed to the
+ * buddy using generic_online_page().
+ *
+ * There are drivers that mark a page PageOffline() and expect there won't be
+ * any further access to page content. PFN walkers that read content of random
+ * pages should check PageOffline() and synchronize with such drivers using
+ * page_offline_freeze()/page_offline_thaw().
*/
-PAGE_TYPE_OPS(Offline, offline)
+PAGE_TYPE_OPS(Offline, offline, offline)
+
+extern void page_offline_freeze(void);
+extern void page_offline_thaw(void);
+extern void page_offline_begin(void);
+extern void page_offline_end(void);
/*
* Marks pages in use as page tables.
*/
-PAGE_TYPE_OPS(Table, table)
+PAGE_TYPE_OPS(Table, table, pgtable)
/*
* Marks guardpages used with debug_pagealloc.
*/
-PAGE_TYPE_OPS(Guard, guard)
+PAGE_TYPE_OPS(Guard, guard, guard)
+
+PAGE_TYPE_OPS(Slab, slab, slab)
+
+#ifdef CONFIG_HUGETLB_PAGE
+FOLIO_TYPE_OPS(hugetlb, hugetlb)
+#else
+FOLIO_TEST_FLAG_FALSE(hugetlb)
+#endif
+
+PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc)
+
+/*
+ * Mark pages that has to be accepted before touched for the first time.
+ *
+ * Serialized with zone lock.
+ */
+PAGE_TYPE_OPS(Unaccepted, unaccepted, unaccepted)
+PAGE_TYPE_OPS(LargeKmalloc, large_kmalloc, large_kmalloc)
+
+/**
+ * PageHuge - Determine if the page belongs to hugetlbfs
+ * @page: The page to test.
+ *
+ * Context: Any context.
+ * Return: True for hugetlbfs pages, false for anon pages or pages
+ * belonging to other filesystems.
+ */
+static inline bool PageHuge(const struct page *page)
+{
+ return folio_test_hugetlb(page_folio(page));
+}
+
+/*
+ * Check if a page is currently marked HWPoisoned. Note that this check is
+ * best effort only and inherently racy: there is no way to synchronize with
+ * failing hardware.
+ */
+static inline bool is_page_hwpoison(const struct page *page)
+{
+ const struct folio *folio;
-extern bool is_free_buddy_page(struct page *page);
+ if (PageHWPoison(page))
+ return true;
+ folio = page_folio(page);
+ return folio_test_hugetlb(folio) && PageHWPoison(&folio->page);
+}
+
+static inline bool folio_contain_hwpoisoned_page(struct folio *folio)
+{
+ return folio_test_hwpoison(folio) ||
+ (folio_test_large(folio) && folio_test_has_hwpoisoned(folio));
+}
-__PAGEFLAG(Isolated, isolated, PF_ANY);
+bool is_free_buddy_page(const struct page *page);
+#ifdef CONFIG_MIGRATION
+/*
+ * This page is migratable through movable_ops (for selected typed pages
+ * only).
+ *
+ * Page migration of such pages might fail, for example, if the page is
+ * already isolated by somebody else, or if the page is about to get freed.
+ *
+ * While a subsystem might set selected typed pages that support page migration
+ * as being movable through movable_ops, it must never clear this flag.
+ *
+ * This flag is only cleared when the page is freed back to the buddy.
+ *
+ * Only selected page types support this flag (see page_movable_ops()) and
+ * the flag might be used in other context for other pages. Always use
+ * page_has_movable_ops() instead.
+ */
+TESTPAGEFLAG(MovableOps, movable_ops, PF_NO_TAIL);
+SETPAGEFLAG(MovableOps, movable_ops, PF_NO_TAIL);
/*
- * If network-based swap is enabled, sl*b must keep track of whether pages
- * were allocated from pfmemalloc reserves.
+ * A movable_ops page has this flag set while it is isolated for migration.
+ * This flag primarily protects against concurrent migration attempts.
+ *
+ * Once migration ended (success or failure), the flag is cleared. The
+ * flag is managed by the migration core.
*/
-static inline int PageSlabPfmemalloc(struct page *page)
+PAGEFLAG(MovableOpsIsolated, movable_ops_isolated, PF_NO_TAIL);
+#else /* !CONFIG_MIGRATION */
+TESTPAGEFLAG_FALSE(MovableOps, movable_ops);
+SETPAGEFLAG_NOOP(MovableOps, movable_ops);
+PAGEFLAG_FALSE(MovableOpsIsolated, movable_ops_isolated);
+#endif /* CONFIG_MIGRATION */
+
+/**
+ * page_has_movable_ops - test for a movable_ops page
+ * @page: The page to test.
+ *
+ * Test whether this is a movable_ops page. Such pages will stay that
+ * way until freed.
+ *
+ * Returns true if this is a movable_ops page, otherwise false.
+ */
+static inline bool page_has_movable_ops(const struct page *page)
+{
+ return PageMovableOps(page) &&
+ (PageOffline(page) || PageZsmalloc(page));
+}
+
+static __always_inline int PageAnonExclusive(const struct page *page)
{
- VM_BUG_ON_PAGE(!PageSlab(page), page);
- return PageActive(page);
+ VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
+ /*
+ * HugeTLB stores this information on the head page; THP keeps it per
+ * page
+ */
+ if (PageHuge(page))
+ page = compound_head(page);
+ return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags.f);
}
-static inline void SetPageSlabPfmemalloc(struct page *page)
+static __always_inline void SetPageAnonExclusive(struct page *page)
{
- VM_BUG_ON_PAGE(!PageSlab(page), page);
- SetPageActive(page);
+ VM_BUG_ON_PGFLAGS(!PageAnonNotKsm(page), page);
+ VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
+ set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags.f);
}
-static inline void __ClearPageSlabPfmemalloc(struct page *page)
+static __always_inline void ClearPageAnonExclusive(struct page *page)
{
- VM_BUG_ON_PAGE(!PageSlab(page), page);
- __ClearPageActive(page);
+ VM_BUG_ON_PGFLAGS(!PageAnonNotKsm(page), page);
+ VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
+ clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags.f);
}
-static inline void ClearPageSlabPfmemalloc(struct page *page)
+static __always_inline void __ClearPageAnonExclusive(struct page *page)
{
- VM_BUG_ON_PAGE(!PageSlab(page), page);
- ClearPageActive(page);
+ VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
+ VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
+ __clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags.f);
}
#ifdef CONFIG_MMU
@@ -816,8 +1197,8 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
(1UL << PG_lru | 1UL << PG_locked | \
1UL << PG_private | 1UL << PG_private_2 | \
1UL << PG_writeback | 1UL << PG_reserved | \
- 1UL << PG_slab | 1UL << PG_active | \
- 1UL << PG_unevictable | __PG_MLOCKED)
+ 1UL << PG_active | \
+ 1UL << PG_unevictable | __PG_MLOCKED | LRU_GEN_MASK)
/*
* Flags checked when a page is prepped for return by the page allocator.
@@ -828,25 +1209,32 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
* alloc-free cycle to prevent from reusing the page.
*/
#define PAGE_FLAGS_CHECK_AT_PREP \
- (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
+ ((PAGEFLAGS_MASK & ~__PG_HWPOISON) | LRU_GEN_MASK | LRU_REFS_MASK)
+
+/*
+ * Flags stored in the second page of a compound page. They may overlap
+ * the CHECK_AT_FREE flags above, so need to be cleared.
+ */
+#define PAGE_FLAGS_SECOND \
+ (0xffUL /* order */ | 1UL << PG_has_hwpoisoned | \
+ 1UL << PG_large_rmappable | 1UL << PG_partially_mapped)
#define PAGE_FLAGS_PRIVATE \
(1UL << PG_private | 1UL << PG_private_2)
/**
- * page_has_private - Determine if page has private stuff
- * @page: The page to be checked
+ * folio_has_private - Determine if folio has private stuff
+ * @folio: The folio to be checked
*
- * Determine if a page has private stuff, indicating that release routines
+ * Determine if a folio has private stuff, indicating that release routines
* should be invoked upon it.
*/
-static inline int page_has_private(struct page *page)
+static inline int folio_has_private(const struct folio *folio)
{
- return !!(page->flags & PAGE_FLAGS_PRIVATE);
+ return !!(folio->flags.f & PAGE_FLAGS_PRIVATE);
}
#undef PF_ANY
#undef PF_HEAD
-#undef PF_ONLY_HEAD
#undef PF_NO_TAIL
#undef PF_NO_COMPOUND
#undef PF_SECOND
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 572458016331..3e2f960e166c 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -3,10 +3,6 @@
#define __LINUX_PAGEISOLATION_H
#ifdef CONFIG_MEMORY_ISOLATION
-static inline bool has_isolate_pageblock(struct zone *zone)
-{
- return zone->nr_isolate_pageblock;
-}
static inline bool is_migrate_isolate_page(struct page *page)
{
return get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
@@ -15,51 +11,60 @@ static inline bool is_migrate_isolate(int migratetype)
{
return migratetype == MIGRATE_ISOLATE;
}
+#define get_pageblock_isolate(page) \
+ get_pfnblock_bit(page, page_to_pfn(page), PB_migrate_isolate)
+#define clear_pageblock_isolate(page) \
+ clear_pfnblock_bit(page, page_to_pfn(page), PB_migrate_isolate)
+#define set_pageblock_isolate(page) \
+ set_pfnblock_bit(page, page_to_pfn(page), PB_migrate_isolate)
#else
-static inline bool has_isolate_pageblock(struct zone *zone)
+static inline bool is_migrate_isolate_page(struct page *page)
{
return false;
}
-static inline bool is_migrate_isolate_page(struct page *page)
+static inline bool is_migrate_isolate(int migratetype)
{
return false;
}
-static inline bool is_migrate_isolate(int migratetype)
+static inline bool get_pageblock_isolate(struct page *page)
{
return false;
}
+static inline void clear_pageblock_isolate(struct page *page)
+{
+}
+static inline void set_pageblock_isolate(struct page *page)
+{
+}
#endif
-#define MEMORY_OFFLINE 0x1
-#define REPORT_FAILURE 0x2
-
-struct page *has_unmovable_pages(struct zone *zone, struct page *page,
- int migratetype, int flags);
-void set_pageblock_migratetype(struct page *page, int migratetype);
-int move_freepages_block(struct zone *zone, struct page *page,
- int migratetype, int *num_movable);
-
/*
- * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
+ * Pageblock isolation modes:
+ * PB_ISOLATE_MODE_MEM_OFFLINE - isolate to offline (!allocate) memory
+ * e.g., skip over PageHWPoison() pages and
+ * PageOffline() pages. Unmovable pages will be
+ * reported in this mode.
+ * PB_ISOLATE_MODE_CMA_ALLOC - isolate for CMA allocations
+ * PB_ISOLATE_MODE_OTHER - isolate for other purposes
*/
-int
-start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
- unsigned migratetype, int flags);
+enum pb_isolate_mode {
+ PB_ISOLATE_MODE_MEM_OFFLINE,
+ PB_ISOLATE_MODE_CMA_ALLOC,
+ PB_ISOLATE_MODE_OTHER,
+};
-/*
- * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
- * target range is [start_pfn, end_pfn)
- */
-void
-undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
- unsigned migratetype);
+void __meminit init_pageblock_migratetype(struct page *page,
+ enum migratetype migratetype,
+ bool isolate);
-/*
- * Test all pages in [start_pfn, end_pfn) are isolated or not.
- */
-int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
- int isol_flags);
+bool pageblock_isolate_and_move_free_pages(struct zone *zone, struct page *page);
+bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *page);
-struct page *alloc_migrate_target(struct page *page, unsigned long private);
+int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+ enum pb_isolate_mode mode);
+void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn);
+
+int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
+ enum pb_isolate_mode mode);
#endif
diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h
index 679591301994..d649b6bbbc87 100644
--- a/include/linux/page_counter.h
+++ b/include/linux/page_counter.h
@@ -3,15 +3,19 @@
#define _LINUX_PAGE_COUNTER_H
#include <linux/atomic.h>
-#include <linux/kernel.h>
+#include <linux/cache.h>
+#include <linux/limits.h>
#include <asm/page.h>
struct page_counter {
+ /*
+ * Make sure 'usage' does not share cacheline with any other field in
+ * v2. The memcg->memory.usage is a hot member of struct mem_cgroup.
+ */
atomic_long_t usage;
- unsigned long min;
- unsigned long low;
- unsigned long high;
- unsigned long max;
+ unsigned long failcnt; /* v1-only field */
+
+ CACHELINE_PADDING(_pad1_);
/* effective memory.min and memory.min usage tracking */
unsigned long emin;
@@ -23,18 +27,21 @@ struct page_counter {
atomic_long_t low_usage;
atomic_long_t children_low_usage;
- /* legacy */
unsigned long watermark;
- unsigned long failcnt;
+ /* Latest cg2 reset watermark */
+ unsigned long local_watermark;
- /*
- * 'parent' is placed here to be far from 'usage' to reduce
- * cache false sharing, as 'usage' is written mostly while
- * parent is frequently read for cgroup's hierarchical
- * counting nature.
- */
+ /* Keep all the read most fields in a separete cacheline. */
+ CACHELINE_PADDING(_pad2_);
+
+ bool protection_support;
+ bool track_failcnt;
+ unsigned long min;
+ unsigned long low;
+ unsigned long high;
+ unsigned long max;
struct page_counter *parent;
-};
+} ____cacheline_internodealigned_in_smp;
#if BITS_PER_LONG == 32
#define PAGE_COUNTER_MAX LONG_MAX
@@ -42,12 +49,18 @@ struct page_counter {
#define PAGE_COUNTER_MAX (LONG_MAX / PAGE_SIZE)
#endif
+/*
+ * Protection is supported only for the first counter (with id 0).
+ */
static inline void page_counter_init(struct page_counter *counter,
- struct page_counter *parent)
+ struct page_counter *parent,
+ bool protection_support)
{
- atomic_long_set(&counter->usage, 0);
+ counter->usage = (atomic_long_t)ATOMIC_LONG_INIT(0);
counter->max = PAGE_COUNTER_MAX;
counter->parent = parent;
+ counter->protection_support = protection_support;
+ counter->track_failcnt = false;
}
static inline unsigned long page_counter_read(struct page_counter *counter)
@@ -76,7 +89,24 @@ int page_counter_memparse(const char *buf, const char *max,
static inline void page_counter_reset_watermark(struct page_counter *counter)
{
- counter->watermark = page_counter_read(counter);
+ unsigned long usage = page_counter_read(counter);
+
+ /*
+ * Update local_watermark first, so it's always <= watermark
+ * (modulo CPU/compiler re-ordering)
+ */
+ counter->local_watermark = usage;
+ counter->watermark = usage;
}
+#if IS_ENABLED(CONFIG_MEMCG) || IS_ENABLED(CONFIG_CGROUP_DMEM)
+void page_counter_calculate_protection(struct page_counter *root,
+ struct page_counter *counter,
+ bool recursive_protection);
+#else
+static inline void page_counter_calculate_protection(struct page_counter *root,
+ struct page_counter *counter,
+ bool recursive_protection) {}
+#endif
+
#endif /* _LINUX_PAGE_COUNTER_H */
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index aff81ba31bd8..76c817162d2f 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -3,23 +3,40 @@
#define __LINUX_PAGE_EXT_H
#include <linux/types.h>
+#include <linux/mmzone.h>
#include <linux/stacktrace.h>
-#include <linux/stackdepot.h>
struct pglist_data;
+
+#ifdef CONFIG_PAGE_EXTENSION
+/**
+ * struct page_ext_operations - per page_ext client operations
+ * @offset: Offset to the client's data within page_ext. Offset is returned to
+ * the client by page_ext_init.
+ * @size: The size of the client data within page_ext.
+ * @need: Function that returns true if client requires page_ext.
+ * @init: (optional) Called to initialize client once page_exts are allocated.
+ * @need_shared_flags: True when client is using shared page_ext->flags
+ * field.
+ *
+ * Each Page Extension client must define page_ext_operations in
+ * page_ext_ops array.
+ */
struct page_ext_operations {
size_t offset;
size_t size;
bool (*need)(void);
void (*init)(void);
+ bool need_shared_flags;
};
-#ifdef CONFIG_PAGE_EXTENSION
-
+/*
+ * The page_ext_flags users must set need_shared_flags to true.
+ */
enum page_ext_flags {
PAGE_EXT_OWNER,
PAGE_EXT_OWNER_ALLOCATED,
-#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
+#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
PAGE_EXT_YOUNG,
PAGE_EXT_IDLE,
#endif
@@ -36,9 +53,15 @@ struct page_ext {
unsigned long flags;
};
+extern bool early_page_ext;
extern unsigned long page_ext_size;
extern void pgdat_page_ext_init(struct pglist_data *pgdat);
+static inline bool early_page_ext_enabled(void)
+{
+ return early_page_ext;
+}
+
#ifdef CONFIG_SPARSEMEM
static inline void page_ext_init_flatmem(void)
{
@@ -47,15 +70,37 @@ extern void page_ext_init(void);
static inline void page_ext_init_flatmem_late(void)
{
}
+
+static inline bool page_ext_iter_next_fast_possible(unsigned long next_pfn)
+{
+ /*
+ * page_ext is allocated per memory section. Once we cross a
+ * memory section, we have to fetch the new pointer.
+ */
+ return next_pfn % PAGES_PER_SECTION;
+}
#else
extern void page_ext_init_flatmem(void);
extern void page_ext_init_flatmem_late(void);
static inline void page_ext_init(void)
{
}
+
+static inline bool page_ext_iter_next_fast_possible(unsigned long next_pfn)
+{
+ return true;
+}
#endif
-struct page_ext *lookup_page_ext(const struct page *page);
+extern struct page_ext *page_ext_get(const struct page *page);
+extern void page_ext_put(struct page_ext *page_ext);
+extern struct page_ext *page_ext_lookup(unsigned long pfn);
+
+static inline void *page_ext_data(struct page_ext *page_ext,
+ struct page_ext_operations *ops)
+{
+ return (void *)(page_ext) + ops->offset;
+}
static inline struct page_ext *page_ext_next(struct page_ext *curr)
{
@@ -64,16 +109,93 @@ static inline struct page_ext *page_ext_next(struct page_ext *curr)
return next;
}
+struct page_ext_iter {
+ unsigned long index;
+ unsigned long start_pfn;
+ struct page_ext *page_ext;
+};
+
+/**
+ * page_ext_iter_begin() - Prepare for iterating through page extensions.
+ * @iter: page extension iterator.
+ * @pfn: PFN of the page we're interested in.
+ *
+ * Must be called with RCU read lock taken.
+ *
+ * Return: NULL if no page_ext exists for this page.
+ */
+static inline struct page_ext *page_ext_iter_begin(struct page_ext_iter *iter,
+ unsigned long pfn)
+{
+ iter->index = 0;
+ iter->start_pfn = pfn;
+ iter->page_ext = page_ext_lookup(pfn);
+
+ return iter->page_ext;
+}
+
+/**
+ * page_ext_iter_next() - Get next page extension
+ * @iter: page extension iterator.
+ *
+ * Must be called with RCU read lock taken.
+ *
+ * Return: NULL if no next page_ext exists.
+ */
+static inline struct page_ext *page_ext_iter_next(struct page_ext_iter *iter)
+{
+ unsigned long pfn;
+
+ if (WARN_ON_ONCE(!iter->page_ext))
+ return NULL;
+
+ iter->index++;
+ pfn = iter->start_pfn + iter->index;
+
+ if (page_ext_iter_next_fast_possible(pfn))
+ iter->page_ext = page_ext_next(iter->page_ext);
+ else
+ iter->page_ext = page_ext_lookup(pfn);
+
+ return iter->page_ext;
+}
+
+/**
+ * page_ext_iter_get() - Get current page extension
+ * @iter: page extension iterator.
+ *
+ * Return: NULL if no page_ext exists for this iterator.
+ */
+static inline struct page_ext *page_ext_iter_get(const struct page_ext_iter *iter)
+{
+ return iter->page_ext;
+}
+
+/**
+ * for_each_page_ext(): iterate through page_ext objects.
+ * @__page: the page we're interested in
+ * @__pgcount: how many pages to iterate through
+ * @__page_ext: struct page_ext pointer where the current page_ext
+ * object is returned
+ * @__iter: struct page_ext_iter object (defined in the stack)
+ *
+ * IMPORTANT: must be called with RCU read lock taken.
+ */
+#define for_each_page_ext(__page, __pgcount, __page_ext, __iter) \
+ for (__page_ext = page_ext_iter_begin(&__iter, page_to_pfn(__page));\
+ __page_ext && __iter.index < __pgcount; \
+ __page_ext = page_ext_iter_next(&__iter))
+
#else /* !CONFIG_PAGE_EXTENSION */
struct page_ext;
-static inline void pgdat_page_ext_init(struct pglist_data *pgdat)
+static inline bool early_page_ext_enabled(void)
{
+ return false;
}
-static inline struct page_ext *lookup_page_ext(const struct page *page)
+static inline void pgdat_page_ext_init(struct pglist_data *pgdat)
{
- return NULL;
}
static inline void page_ext_init(void)
@@ -87,5 +209,14 @@ static inline void page_ext_init_flatmem_late(void)
static inline void page_ext_init_flatmem(void)
{
}
+
+static inline struct page_ext *page_ext_get(const struct page *page)
+{
+ return NULL;
+}
+
+static inline void page_ext_put(struct page_ext *page_ext)
+{
+}
#endif /* CONFIG_PAGE_EXTENSION */
#endif /* __LINUX_PAGE_EXT_H */
diff --git a/include/linux/page_frag_cache.h b/include/linux/page_frag_cache.h
new file mode 100644
index 000000000000..41a91df82631
--- /dev/null
+++ b/include/linux/page_frag_cache.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_PAGE_FRAG_CACHE_H
+#define _LINUX_PAGE_FRAG_CACHE_H
+
+#include <linux/bits.h>
+#include <linux/log2.h>
+#include <linux/mm_types_task.h>
+#include <linux/types.h>
+
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+/* Use a full byte here to enable assembler optimization as the shift
+ * operation is usually expecting a byte.
+ */
+#define PAGE_FRAG_CACHE_ORDER_MASK GENMASK(7, 0)
+#else
+/* Compiler should be able to figure out we don't read things as any value
+ * ANDed with 0 is 0.
+ */
+#define PAGE_FRAG_CACHE_ORDER_MASK 0
+#endif
+
+#define PAGE_FRAG_CACHE_PFMEMALLOC_BIT (PAGE_FRAG_CACHE_ORDER_MASK + 1)
+
+static inline bool encoded_page_decode_pfmemalloc(unsigned long encoded_page)
+{
+ return !!(encoded_page & PAGE_FRAG_CACHE_PFMEMALLOC_BIT);
+}
+
+static inline void page_frag_cache_init(struct page_frag_cache *nc)
+{
+ nc->encoded_page = 0;
+}
+
+static inline bool page_frag_cache_is_pfmemalloc(struct page_frag_cache *nc)
+{
+ return encoded_page_decode_pfmemalloc(nc->encoded_page);
+}
+
+void page_frag_cache_drain(struct page_frag_cache *nc);
+void __page_frag_cache_drain(struct page *page, unsigned int count);
+void *__page_frag_alloc_align(struct page_frag_cache *nc, unsigned int fragsz,
+ gfp_t gfp_mask, unsigned int align_mask);
+
+static inline void *page_frag_alloc_align(struct page_frag_cache *nc,
+ unsigned int fragsz, gfp_t gfp_mask,
+ unsigned int align)
+{
+ WARN_ON_ONCE(!is_power_of_2(align));
+ return __page_frag_alloc_align(nc, fragsz, gfp_mask, -align);
+}
+
+static inline void *page_frag_alloc(struct page_frag_cache *nc,
+ unsigned int fragsz, gfp_t gfp_mask)
+{
+ return __page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u);
+}
+
+void page_frag_free(void *addr);
+
+#endif
diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h
index 1e894d34bdce..89ca0d5dc1e7 100644
--- a/include/linux/page_idle.h
+++ b/include/linux/page_idle.h
@@ -6,135 +6,84 @@
#include <linux/page-flags.h>
#include <linux/page_ext.h>
-#ifdef CONFIG_IDLE_PAGE_TRACKING
-
-#ifdef CONFIG_64BIT
-static inline bool page_is_young(struct page *page)
-{
- return PageYoung(page);
-}
-
-static inline void set_page_young(struct page *page)
-{
- SetPageYoung(page);
-}
-
-static inline bool test_and_clear_page_young(struct page *page)
-{
- return TestClearPageYoung(page);
-}
-
-static inline bool page_is_idle(struct page *page)
-{
- return PageIdle(page);
-}
-
-static inline void set_page_idle(struct page *page)
-{
- SetPageIdle(page);
-}
-
-static inline void clear_page_idle(struct page *page)
-{
- ClearPageIdle(page);
-}
-#else /* !CONFIG_64BIT */
+#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
/*
* If there is not enough space to store Idle and Young bits in page flags, use
* page ext flags instead.
*/
-extern struct page_ext_operations page_idle_ops;
-
-static inline bool page_is_young(struct page *page)
+static inline bool folio_test_young(const struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
+ bool page_young;
if (unlikely(!page_ext))
return false;
- return test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
+ page_young = test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
+ page_ext_put(page_ext);
+
+ return page_young;
}
-static inline void set_page_young(struct page *page)
+static inline void folio_set_young(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
if (unlikely(!page_ext))
return;
set_bit(PAGE_EXT_YOUNG, &page_ext->flags);
+ page_ext_put(page_ext);
}
-static inline bool test_and_clear_page_young(struct page *page)
+static inline bool folio_test_clear_young(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
+ bool page_young;
if (unlikely(!page_ext))
return false;
- return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
+ page_young = test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
+ page_ext_put(page_ext);
+
+ return page_young;
}
-static inline bool page_is_idle(struct page *page)
+static inline bool folio_test_idle(const struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
+ bool page_idle;
if (unlikely(!page_ext))
return false;
- return test_bit(PAGE_EXT_IDLE, &page_ext->flags);
+ page_idle = test_bit(PAGE_EXT_IDLE, &page_ext->flags);
+ page_ext_put(page_ext);
+
+ return page_idle;
}
-static inline void set_page_idle(struct page *page)
+static inline void folio_set_idle(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
if (unlikely(!page_ext))
return;
set_bit(PAGE_EXT_IDLE, &page_ext->flags);
+ page_ext_put(page_ext);
}
-static inline void clear_page_idle(struct page *page)
+static inline void folio_clear_idle(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
if (unlikely(!page_ext))
return;
clear_bit(PAGE_EXT_IDLE, &page_ext->flags);
+ page_ext_put(page_ext);
}
-#endif /* CONFIG_64BIT */
-
-#else /* !CONFIG_IDLE_PAGE_TRACKING */
-
-static inline bool page_is_young(struct page *page)
-{
- return false;
-}
-
-static inline void set_page_young(struct page *page)
-{
-}
-
-static inline bool test_and_clear_page_young(struct page *page)
-{
- return false;
-}
-
-static inline bool page_is_idle(struct page *page)
-{
- return false;
-}
-
-static inline void set_page_idle(struct page *page)
-{
-}
-
-static inline void clear_page_idle(struct page *page)
-{
-}
-
-#endif /* CONFIG_IDLE_PAGE_TRACKING */
-
+#endif /* CONFIG_PAGE_IDLE_FLAG && !64BIT */
#endif /* _LINUX_MM_PAGE_IDLE_H */
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index 3468794f83d2..3328357f6dba 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -8,68 +8,70 @@
extern struct static_key_false page_owner_inited;
extern struct page_ext_operations page_owner_ops;
-extern void __reset_page_owner(struct page *page, unsigned int order);
+extern void __reset_page_owner(struct page *page, unsigned short order);
extern void __set_page_owner(struct page *page,
- unsigned int order, gfp_t gfp_mask);
-extern void __split_page_owner(struct page *page, unsigned int nr);
-extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
-extern void __set_page_owner_migrate_reason(struct page *page, int reason);
-extern void __dump_page_owner(struct page *page);
+ unsigned short order, gfp_t gfp_mask);
+extern void __split_page_owner(struct page *page, int old_order,
+ int new_order);
+extern void __folio_copy_owner(struct folio *newfolio, struct folio *old);
+extern void __folio_set_owner_migrate_reason(struct folio *folio, int reason);
+extern void __dump_page_owner(const struct page *page);
extern void pagetypeinfo_showmixedcount_print(struct seq_file *m,
pg_data_t *pgdat, struct zone *zone);
-static inline void reset_page_owner(struct page *page, unsigned int order)
+static inline void reset_page_owner(struct page *page, unsigned short order)
{
if (static_branch_unlikely(&page_owner_inited))
__reset_page_owner(page, order);
}
static inline void set_page_owner(struct page *page,
- unsigned int order, gfp_t gfp_mask)
+ unsigned short order, gfp_t gfp_mask)
{
if (static_branch_unlikely(&page_owner_inited))
__set_page_owner(page, order, gfp_mask);
}
-static inline void split_page_owner(struct page *page, unsigned int nr)
+static inline void split_page_owner(struct page *page, int old_order,
+ int new_order)
{
if (static_branch_unlikely(&page_owner_inited))
- __split_page_owner(page, nr);
+ __split_page_owner(page, old_order, new_order);
}
-static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
+static inline void folio_copy_owner(struct folio *newfolio, struct folio *old)
{
if (static_branch_unlikely(&page_owner_inited))
- __copy_page_owner(oldpage, newpage);
+ __folio_copy_owner(newfolio, old);
}
-static inline void set_page_owner_migrate_reason(struct page *page, int reason)
+static inline void folio_set_owner_migrate_reason(struct folio *folio, int reason)
{
if (static_branch_unlikely(&page_owner_inited))
- __set_page_owner_migrate_reason(page, reason);
+ __folio_set_owner_migrate_reason(folio, reason);
}
-static inline void dump_page_owner(struct page *page)
+static inline void dump_page_owner(const struct page *page)
{
if (static_branch_unlikely(&page_owner_inited))
__dump_page_owner(page);
}
#else
-static inline void reset_page_owner(struct page *page, unsigned int order)
+static inline void reset_page_owner(struct page *page, unsigned short order)
{
}
static inline void set_page_owner(struct page *page,
- unsigned int order, gfp_t gfp_mask)
+ unsigned short order, gfp_t gfp_mask)
{
}
-static inline void split_page_owner(struct page *page,
- unsigned int order)
+static inline void split_page_owner(struct page *page, int old_order,
+ int new_order)
{
}
-static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
+static inline void folio_copy_owner(struct folio *newfolio, struct folio *folio)
{
}
-static inline void set_page_owner_migrate_reason(struct page *page, int reason)
+static inline void folio_set_owner_migrate_reason(struct folio *folio, int reason)
{
}
-static inline void dump_page_owner(struct page *page)
+static inline void dump_page_owner(const struct page *page)
{
}
#endif /* CONFIG_PAGE_OWNER */
diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h
index f3318f34fc54..544150d1d5fd 100644
--- a/include/linux/page_ref.h
+++ b/include/linux/page_ref.h
@@ -62,14 +62,36 @@ static inline void __page_ref_unfreeze(struct page *page, int v)
#endif
-static inline int page_ref_count(struct page *page)
+static inline int page_ref_count(const struct page *page)
{
return atomic_read(&page->_refcount);
}
-static inline int page_count(struct page *page)
+/**
+ * folio_ref_count - The reference count on this folio.
+ * @folio: The folio.
+ *
+ * The refcount is usually incremented by calls to folio_get() and
+ * decremented by calls to folio_put(). Some typical users of the
+ * folio refcount:
+ *
+ * - Each reference from a page table
+ * - The page cache
+ * - Filesystem private data
+ * - The LRU list
+ * - Pipes
+ * - Direct IO which references this page in the process address space
+ *
+ * Return: The number of references to this folio.
+ */
+static inline int folio_ref_count(const struct folio *folio)
+{
+ return page_ref_count(&folio->page);
+}
+
+static inline int page_count(const struct page *page)
{
- return atomic_read(&compound_head(page)->_refcount);
+ return folio_ref_count(page_folio(page));
}
static inline void set_page_count(struct page *page, int v)
@@ -79,6 +101,11 @@ static inline void set_page_count(struct page *page, int v)
__page_ref_set(page, v);
}
+static inline void folio_set_count(struct folio *folio, int v)
+{
+ set_page_count(&folio->page, v);
+}
+
/*
* Setup the page count before being freed into the page allocator for
* the first time (boot or memory hotplug)
@@ -95,6 +122,11 @@ static inline void page_ref_add(struct page *page, int nr)
__page_ref_mod(page, nr);
}
+static inline void folio_ref_add(struct folio *folio, int nr)
+{
+ page_ref_add(&folio->page, nr);
+}
+
static inline void page_ref_sub(struct page *page, int nr)
{
atomic_sub(nr, &page->_refcount);
@@ -102,12 +134,17 @@ static inline void page_ref_sub(struct page *page, int nr)
__page_ref_mod(page, -nr);
}
-static inline int page_ref_sub_return(struct page *page, int nr)
+static inline void folio_ref_sub(struct folio *folio, int nr)
{
- int ret = atomic_sub_return(nr, &page->_refcount);
+ page_ref_sub(&folio->page, nr);
+}
+
+static inline int folio_ref_sub_return(struct folio *folio, int nr)
+{
+ int ret = atomic_sub_return(nr, &folio->_refcount);
if (page_ref_tracepoint_active(page_ref_mod_and_return))
- __page_ref_mod_and_return(page, -nr, ret);
+ __page_ref_mod_and_return(&folio->page, -nr, ret);
return ret;
}
@@ -118,6 +155,11 @@ static inline void page_ref_inc(struct page *page)
__page_ref_mod(page, 1);
}
+static inline void folio_ref_inc(struct folio *folio)
+{
+ page_ref_inc(&folio->page);
+}
+
static inline void page_ref_dec(struct page *page)
{
atomic_dec(&page->_refcount);
@@ -125,6 +167,11 @@ static inline void page_ref_dec(struct page *page)
__page_ref_mod(page, -1);
}
+static inline void folio_ref_dec(struct folio *folio)
+{
+ page_ref_dec(&folio->page);
+}
+
static inline int page_ref_sub_and_test(struct page *page, int nr)
{
int ret = atomic_sub_and_test(nr, &page->_refcount);
@@ -134,6 +181,11 @@ static inline int page_ref_sub_and_test(struct page *page, int nr)
return ret;
}
+static inline int folio_ref_sub_and_test(struct folio *folio, int nr)
+{
+ return page_ref_sub_and_test(&folio->page, nr);
+}
+
static inline int page_ref_inc_return(struct page *page)
{
int ret = atomic_inc_return(&page->_refcount);
@@ -143,6 +195,11 @@ static inline int page_ref_inc_return(struct page *page)
return ret;
}
+static inline int folio_ref_inc_return(struct folio *folio)
+{
+ return page_ref_inc_return(&folio->page);
+}
+
static inline int page_ref_dec_and_test(struct page *page)
{
int ret = atomic_dec_and_test(&page->_refcount);
@@ -152,6 +209,11 @@ static inline int page_ref_dec_and_test(struct page *page)
return ret;
}
+static inline int folio_ref_dec_and_test(struct folio *folio)
+{
+ return page_ref_dec_and_test(&folio->page);
+}
+
static inline int page_ref_dec_return(struct page *page)
{
int ret = atomic_dec_return(&page->_refcount);
@@ -161,15 +223,52 @@ static inline int page_ref_dec_return(struct page *page)
return ret;
}
-static inline int page_ref_add_unless(struct page *page, int nr, int u)
+static inline int folio_ref_dec_return(struct folio *folio)
{
- int ret = atomic_add_unless(&page->_refcount, nr, u);
+ return page_ref_dec_return(&folio->page);
+}
+
+static inline bool page_ref_add_unless(struct page *page, int nr, int u)
+{
+ bool ret = false;
+
+ rcu_read_lock();
+ /* avoid writing to the vmemmap area being remapped */
+ if (page_count_writable(page, u))
+ ret = atomic_add_unless(&page->_refcount, nr, u);
+ rcu_read_unlock();
if (page_ref_tracepoint_active(page_ref_mod_unless))
__page_ref_mod_unless(page, nr, ret);
return ret;
}
+static inline bool folio_ref_add_unless(struct folio *folio, int nr, int u)
+{
+ return page_ref_add_unless(&folio->page, nr, u);
+}
+
+/**
+ * folio_try_get - Attempt to increase the refcount on a folio.
+ * @folio: The folio.
+ *
+ * If you do not already have a reference to a folio, you can attempt to
+ * get one using this function. It may fail if, for example, the folio
+ * has been freed since you found a pointer to it, or it is frozen for
+ * the purposes of splitting or migration.
+ *
+ * Return: True if the reference count was successfully incremented.
+ */
+static inline bool folio_try_get(struct folio *folio)
+{
+ return folio_ref_add_unless(folio, 1, 0);
+}
+
+static inline bool folio_ref_try_add(struct folio *folio, int count)
+{
+ return folio_ref_add_unless(folio, count, 0);
+}
+
static inline int page_ref_freeze(struct page *page, int count)
{
int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count);
@@ -179,6 +278,11 @@ static inline int page_ref_freeze(struct page *page, int count)
return ret;
}
+static inline int folio_ref_freeze(struct folio *folio, int count)
+{
+ return page_ref_freeze(&folio->page, count);
+}
+
static inline void page_ref_unfreeze(struct page *page, int count)
{
VM_BUG_ON_PAGE(page_count(page) != 0, page);
@@ -189,4 +293,8 @@ static inline void page_ref_unfreeze(struct page *page, int count)
__page_ref_unfreeze(page, count);
}
+static inline void folio_ref_unfreeze(struct folio *folio, int count)
+{
+ page_ref_unfreeze(&folio->page, count);
+}
#endif
diff --git a/include/linux/page_reporting.h b/include/linux/page_reporting.h
index 3b99e0ec24f2..fe648dfa3a7c 100644
--- a/include/linux/page_reporting.h
+++ b/include/linux/page_reporting.h
@@ -18,6 +18,9 @@ struct page_reporting_dev_info {
/* Current state of page reporting */
atomic_t state;
+
+ /* Minimal order of page reporting */
+ unsigned int order;
};
/* Tear-down and bring-up for page reporting devices */
diff --git a/include/linux/page_table_check.h b/include/linux/page_table_check.h
new file mode 100644
index 000000000000..289620d4aad3
--- /dev/null
+++ b/include/linux/page_table_check.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (c) 2021, Google LLC.
+ * Pasha Tatashin <pasha.tatashin@soleen.com>
+ */
+#ifndef __LINUX_PAGE_TABLE_CHECK_H
+#define __LINUX_PAGE_TABLE_CHECK_H
+
+#ifdef CONFIG_PAGE_TABLE_CHECK
+#include <linux/jump_label.h>
+
+extern struct static_key_true page_table_check_disabled;
+extern struct page_ext_operations page_table_check_ops;
+
+void __page_table_check_zero(struct page *page, unsigned int order);
+void __page_table_check_pte_clear(struct mm_struct *mm, pte_t pte);
+void __page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd);
+void __page_table_check_pud_clear(struct mm_struct *mm, pud_t pud);
+void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte,
+ unsigned int nr);
+void __page_table_check_pmds_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd,
+ unsigned int nr);
+void __page_table_check_puds_set(struct mm_struct *mm, pud_t *pudp, pud_t pud,
+ unsigned int nr);
+void __page_table_check_pte_clear_range(struct mm_struct *mm,
+ unsigned long addr,
+ pmd_t pmd);
+
+static inline void page_table_check_alloc(struct page *page, unsigned int order)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_zero(page, order);
+}
+
+static inline void page_table_check_free(struct page *page, unsigned int order)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_zero(page, order);
+}
+
+static inline void page_table_check_pte_clear(struct mm_struct *mm, pte_t pte)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_pte_clear(mm, pte);
+}
+
+static inline void page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_pmd_clear(mm, pmd);
+}
+
+static inline void page_table_check_pud_clear(struct mm_struct *mm, pud_t pud)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_pud_clear(mm, pud);
+}
+
+static inline void page_table_check_ptes_set(struct mm_struct *mm,
+ pte_t *ptep, pte_t pte, unsigned int nr)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_ptes_set(mm, ptep, pte, nr);
+}
+
+static inline void page_table_check_pmds_set(struct mm_struct *mm,
+ pmd_t *pmdp, pmd_t pmd, unsigned int nr)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_pmds_set(mm, pmdp, pmd, nr);
+}
+
+static inline void page_table_check_puds_set(struct mm_struct *mm,
+ pud_t *pudp, pud_t pud, unsigned int nr)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_puds_set(mm, pudp, pud, nr);
+}
+
+static inline void page_table_check_pte_clear_range(struct mm_struct *mm,
+ unsigned long addr,
+ pmd_t pmd)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_pte_clear_range(mm, addr, pmd);
+}
+
+#else
+
+static inline void page_table_check_alloc(struct page *page, unsigned int order)
+{
+}
+
+static inline void page_table_check_free(struct page *page, unsigned int order)
+{
+}
+
+static inline void page_table_check_pte_clear(struct mm_struct *mm, pte_t pte)
+{
+}
+
+static inline void page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd)
+{
+}
+
+static inline void page_table_check_pud_clear(struct mm_struct *mm, pud_t pud)
+{
+}
+
+static inline void page_table_check_ptes_set(struct mm_struct *mm,
+ pte_t *ptep, pte_t pte, unsigned int nr)
+{
+}
+
+static inline void page_table_check_pmds_set(struct mm_struct *mm,
+ pmd_t *pmdp, pmd_t pmd, unsigned int nr)
+{
+}
+
+static inline void page_table_check_puds_set(struct mm_struct *mm,
+ pud_t *pudp, pud_t pud, unsigned int nr)
+{
+}
+
+static inline void page_table_check_pte_clear_range(struct mm_struct *mm,
+ unsigned long addr,
+ pmd_t pmd)
+{
+}
+
+#endif /* CONFIG_PAGE_TABLE_CHECK */
+
+#define page_table_check_pmd_set(mm, pmdp, pmd) page_table_check_pmds_set(mm, pmdp, pmd, 1)
+#define page_table_check_pud_set(mm, pudp, pud) page_table_check_puds_set(mm, pudp, pud, 1)
+
+#endif /* __LINUX_PAGE_TABLE_CHECK_H */
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index fff52ad370c1..e046278a01fa 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -13,22 +13,38 @@
#include <linux/types.h>
-#define PB_migratetype_bits 3
/* Bit indices that affect a whole block of pages */
enum pageblock_bits {
- PB_migrate,
- PB_migrate_end = PB_migrate + PB_migratetype_bits - 1,
- /* 3 bits required for migrate types */
- PB_migrate_skip,/* If set the block is skipped by compaction */
+ PB_migrate_0,
+ PB_migrate_1,
+ PB_migrate_2,
+ PB_compact_skip,/* If set the block is skipped by compaction */
+#ifdef CONFIG_MEMORY_ISOLATION
+ /*
+ * Pageblock isolation is represented with a separate bit, so that
+ * the migratetype of a block is not overwritten by isolation.
+ */
+ PB_migrate_isolate, /* If set the block is isolated */
+#endif
/*
* Assume the bits will always align on a word. If this assumption
* changes then get/set pageblock needs updating.
*/
- NR_PAGEBLOCK_BITS
+ __NR_PAGEBLOCK_BITS
};
-#ifdef CONFIG_HUGETLB_PAGE
+#define NR_PAGEBLOCK_BITS (roundup_pow_of_two(__NR_PAGEBLOCK_BITS))
+
+#define MIGRATETYPE_MASK (BIT(PB_migrate_0)|BIT(PB_migrate_1)|BIT(PB_migrate_2))
+
+#ifdef CONFIG_MEMORY_ISOLATION
+#define MIGRATETYPE_AND_ISO_MASK (MIGRATETYPE_MASK | BIT(PB_migrate_isolate))
+#else
+#define MIGRATETYPE_AND_ISO_MASK MIGRATETYPE_MASK
+#endif
+
+#if defined(CONFIG_HUGETLB_PAGE)
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
@@ -37,44 +53,51 @@ extern unsigned int pageblock_order;
#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
-/* Huge pages are a constant size */
-#define pageblock_order HUGETLB_PAGE_ORDER
+/*
+ * Huge pages are a constant size, but don't exceed the maximum allocation
+ * granularity.
+ */
+#define pageblock_order MIN_T(unsigned int, HUGETLB_PAGE_ORDER, PAGE_BLOCK_MAX_ORDER)
#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
-#else /* CONFIG_HUGETLB_PAGE */
+#elif defined(CONFIG_TRANSPARENT_HUGEPAGE)
+
+#define pageblock_order MIN_T(unsigned int, HPAGE_PMD_ORDER, PAGE_BLOCK_MAX_ORDER)
-/* If huge pages are not used, group by MAX_ORDER_NR_PAGES */
-#define pageblock_order (MAX_ORDER-1)
+#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+/* If huge pages are not used, group by PAGE_BLOCK_MAX_ORDER */
+#define pageblock_order PAGE_BLOCK_MAX_ORDER
#endif /* CONFIG_HUGETLB_PAGE */
#define pageblock_nr_pages (1UL << pageblock_order)
+#define pageblock_align(pfn) ALIGN((pfn), pageblock_nr_pages)
+#define pageblock_aligned(pfn) IS_ALIGNED((pfn), pageblock_nr_pages)
+#define pageblock_start_pfn(pfn) ALIGN_DOWN((pfn), pageblock_nr_pages)
+#define pageblock_end_pfn(pfn) ALIGN((pfn) + 1, pageblock_nr_pages)
/* Forward declaration */
struct page;
-unsigned long get_pfnblock_flags_mask(struct page *page,
- unsigned long pfn,
- unsigned long mask);
-
-void set_pfnblock_flags_mask(struct page *page,
- unsigned long flags,
- unsigned long pfn,
- unsigned long mask);
+enum migratetype get_pfnblock_migratetype(const struct page *page,
+ unsigned long pfn);
+bool get_pfnblock_bit(const struct page *page, unsigned long pfn,
+ enum pageblock_bits pb_bit);
+void set_pfnblock_bit(const struct page *page, unsigned long pfn,
+ enum pageblock_bits pb_bit);
+void clear_pfnblock_bit(const struct page *page, unsigned long pfn,
+ enum pageblock_bits pb_bit);
/* Declarations for getting and setting flags. See mm/page_alloc.c */
#ifdef CONFIG_COMPACTION
#define get_pageblock_skip(page) \
- get_pfnblock_flags_mask(page, page_to_pfn(page), \
- (1 << (PB_migrate_skip)))
+ get_pfnblock_bit(page, page_to_pfn(page), PB_compact_skip)
#define clear_pageblock_skip(page) \
- set_pfnblock_flags_mask(page, 0, page_to_pfn(page), \
- (1 << PB_migrate_skip))
+ clear_pfnblock_bit(page, page_to_pfn(page), PB_compact_skip)
#define set_pageblock_skip(page) \
- set_pfnblock_flags_mask(page, (1 << PB_migrate_skip), \
- page_to_pfn(page), \
- (1 << PB_migrate_skip))
+ set_pfnblock_bit(page, page_to_pfn(page), PB_compact_skip)
#else
static inline bool get_pageblock_skip(struct page *page)
{
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index e89df447fae3..31a848485ad9 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -16,14 +16,183 @@
#include <linux/hardirq.h> /* for in_interrupt() */
#include <linux/hugetlb_inline.h>
-struct pagevec;
+struct folio_batch;
-static inline bool mapping_empty(struct address_space *mapping)
+unsigned long invalidate_mapping_pages(struct address_space *mapping,
+ pgoff_t start, pgoff_t end);
+
+static inline void invalidate_remote_inode(struct inode *inode)
+{
+ if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+ S_ISLNK(inode->i_mode))
+ invalidate_mapping_pages(inode->i_mapping, 0, -1);
+}
+int invalidate_inode_pages2(struct address_space *mapping);
+int invalidate_inode_pages2_range(struct address_space *mapping,
+ pgoff_t start, pgoff_t end);
+int kiocb_invalidate_pages(struct kiocb *iocb, size_t count);
+void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count);
+int filemap_invalidate_pages(struct address_space *mapping,
+ loff_t pos, loff_t end, bool nowait);
+
+int write_inode_now(struct inode *, int sync);
+int filemap_fdatawrite(struct address_space *);
+int filemap_flush(struct address_space *);
+int filemap_flush_nr(struct address_space *mapping, long *nr_to_write);
+int filemap_fdatawait_keep_errors(struct address_space *mapping);
+int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend);
+int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
+ loff_t start_byte, loff_t end_byte);
+int filemap_invalidate_inode(struct inode *inode, bool flush,
+ loff_t start, loff_t end);
+
+static inline int filemap_fdatawait(struct address_space *mapping)
+{
+ return filemap_fdatawait_range(mapping, 0, LLONG_MAX);
+}
+
+bool filemap_range_has_page(struct address_space *, loff_t lstart, loff_t lend);
+int filemap_write_and_wait_range(struct address_space *mapping,
+ loff_t lstart, loff_t lend);
+int filemap_fdatawrite_range(struct address_space *mapping,
+ loff_t start, loff_t end);
+int filemap_check_errors(struct address_space *mapping);
+void __filemap_set_wb_err(struct address_space *mapping, int err);
+int kiocb_write_and_wait(struct kiocb *iocb, size_t count);
+
+static inline int filemap_write_and_wait(struct address_space *mapping)
+{
+ return filemap_write_and_wait_range(mapping, 0, LLONG_MAX);
+}
+
+/**
+ * filemap_set_wb_err - set a writeback error on an address_space
+ * @mapping: mapping in which to set writeback error
+ * @err: error to be set in mapping
+ *
+ * When writeback fails in some way, we must record that error so that
+ * userspace can be informed when fsync and the like are called. We endeavor
+ * to report errors on any file that was open at the time of the error. Some
+ * internal callers also need to know when writeback errors have occurred.
+ *
+ * When a writeback error occurs, most filesystems will want to call
+ * filemap_set_wb_err to record the error in the mapping so that it will be
+ * automatically reported whenever fsync is called on the file.
+ */
+static inline void filemap_set_wb_err(struct address_space *mapping, int err)
+{
+ /* Fastpath for common case of no error */
+ if (unlikely(err))
+ __filemap_set_wb_err(mapping, err);
+}
+
+/**
+ * filemap_check_wb_err - has an error occurred since the mark was sampled?
+ * @mapping: mapping to check for writeback errors
+ * @since: previously-sampled errseq_t
+ *
+ * Grab the errseq_t value from the mapping, and see if it has changed "since"
+ * the given value was sampled.
+ *
+ * If it has then report the latest error set, otherwise return 0.
+ */
+static inline int filemap_check_wb_err(struct address_space *mapping,
+ errseq_t since)
+{
+ return errseq_check(&mapping->wb_err, since);
+}
+
+/**
+ * filemap_sample_wb_err - sample the current errseq_t to test for later errors
+ * @mapping: mapping to be sampled
+ *
+ * Writeback errors are always reported relative to a particular sample point
+ * in the past. This function provides those sample points.
+ */
+static inline errseq_t filemap_sample_wb_err(struct address_space *mapping)
+{
+ return errseq_sample(&mapping->wb_err);
+}
+
+/**
+ * file_sample_sb_err - sample the current errseq_t to test for later errors
+ * @file: file pointer to be sampled
+ *
+ * Grab the most current superblock-level errseq_t value for the given
+ * struct file.
+ */
+static inline errseq_t file_sample_sb_err(struct file *file)
+{
+ return errseq_sample(&file->f_path.dentry->d_sb->s_wb_err);
+}
+
+/*
+ * Flush file data before changing attributes. Caller must hold any locks
+ * required to prevent further writes to this file until we're done setting
+ * flags.
+ */
+static inline int inode_drain_writes(struct inode *inode)
+{
+ inode_dio_wait(inode);
+ return filemap_write_and_wait(inode->i_mapping);
+}
+
+static inline bool mapping_empty(const struct address_space *mapping)
{
return xa_empty(&mapping->i_pages);
}
/*
+ * mapping_shrinkable - test if page cache state allows inode reclaim
+ * @mapping: the page cache mapping
+ *
+ * This checks the mapping's cache state for the pupose of inode
+ * reclaim and LRU management.
+ *
+ * The caller is expected to hold the i_lock, but is not required to
+ * hold the i_pages lock, which usually protects cache state. That's
+ * because the i_lock and the list_lru lock that protect the inode and
+ * its LRU state don't nest inside the irq-safe i_pages lock.
+ *
+ * Cache deletions are performed under the i_lock, which ensures that
+ * when an inode goes empty, it will reliably get queued on the LRU.
+ *
+ * Cache additions do not acquire the i_lock and may race with this
+ * check, in which case we'll report the inode as shrinkable when it
+ * has cache pages. This is okay: the shrinker also checks the
+ * refcount and the referenced bit, which will be elevated or set in
+ * the process of adding new cache pages to an inode.
+ */
+static inline bool mapping_shrinkable(const struct address_space *mapping)
+{
+ void *head;
+
+ /*
+ * On highmem systems, there could be lowmem pressure from the
+ * inodes before there is highmem pressure from the page
+ * cache. Make inodes shrinkable regardless of cache state.
+ */
+ if (IS_ENABLED(CONFIG_HIGHMEM))
+ return true;
+
+ /* Cache completely empty? Shrink away. */
+ head = rcu_access_pointer(mapping->i_pages.xa_head);
+ if (!head)
+ return true;
+
+ /*
+ * The xarray stores single offset-0 entries directly in the
+ * head pointer, which allows non-resident page cache entries
+ * to escape the shadow shrinker's list of xarray nodes. The
+ * inode shrinker needs to pick them up under memory pressure.
+ */
+ if (!xa_is_node(head) && xa_is_value(head))
+ return true;
+
+ return false;
+}
+
+/*
* Bits in mapping->flags.
*/
enum mapping_flags {
@@ -34,9 +203,24 @@ enum mapping_flags {
AS_EXITING = 4, /* final truncate in progress */
/* writeback related tags are not used */
AS_NO_WRITEBACK_TAGS = 5,
- AS_THP_SUPPORT = 6, /* THPs supported */
+ AS_RELEASE_ALWAYS = 6, /* Call ->release_folio(), even if no private data */
+ AS_STABLE_WRITES = 7, /* must wait for writeback before modifying
+ folio contents */
+ AS_INACCESSIBLE = 8, /* Do not attempt direct R/W access to the mapping */
+ AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM = 9,
+ AS_KERNEL_FILE = 10, /* mapping for a fake kernel file that shouldn't
+ account usage to user cgroups */
+ /* Bits 16-25 are used for FOLIO_ORDER */
+ AS_FOLIO_ORDER_BITS = 5,
+ AS_FOLIO_ORDER_MIN = 16,
+ AS_FOLIO_ORDER_MAX = AS_FOLIO_ORDER_MIN + AS_FOLIO_ORDER_BITS,
};
+#define AS_FOLIO_ORDER_BITS_MASK ((1u << AS_FOLIO_ORDER_BITS) - 1)
+#define AS_FOLIO_ORDER_MIN_MASK (AS_FOLIO_ORDER_BITS_MASK << AS_FOLIO_ORDER_MIN)
+#define AS_FOLIO_ORDER_MAX_MASK (AS_FOLIO_ORDER_BITS_MASK << AS_FOLIO_ORDER_MAX)
+#define AS_FOLIO_ORDER_MASK (AS_FOLIO_ORDER_MIN_MASK | AS_FOLIO_ORDER_MAX_MASK)
+
/**
* mapping_set_error - record a writeback error in the address_space
* @mapping: the mapping in which an error should be set
@@ -80,7 +264,7 @@ static inline void mapping_clear_unevictable(struct address_space *mapping)
clear_bit(AS_UNEVICTABLE, &mapping->flags);
}
-static inline bool mapping_unevictable(struct address_space *mapping)
+static inline bool mapping_unevictable(const struct address_space *mapping)
{
return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags);
}
@@ -90,7 +274,7 @@ static inline void mapping_set_exiting(struct address_space *mapping)
set_bit(AS_EXITING, &mapping->flags);
}
-static inline int mapping_exiting(struct address_space *mapping)
+static inline int mapping_exiting(const struct address_space *mapping)
{
return test_bit(AS_EXITING, &mapping->flags);
}
@@ -100,18 +284,74 @@ static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
}
-static inline int mapping_use_writeback_tags(struct address_space *mapping)
+static inline int mapping_use_writeback_tags(const struct address_space *mapping)
{
return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
}
-static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
+static inline bool mapping_release_always(const struct address_space *mapping)
+{
+ return test_bit(AS_RELEASE_ALWAYS, &mapping->flags);
+}
+
+static inline void mapping_set_release_always(struct address_space *mapping)
+{
+ set_bit(AS_RELEASE_ALWAYS, &mapping->flags);
+}
+
+static inline void mapping_clear_release_always(struct address_space *mapping)
+{
+ clear_bit(AS_RELEASE_ALWAYS, &mapping->flags);
+}
+
+static inline bool mapping_stable_writes(const struct address_space *mapping)
+{
+ return test_bit(AS_STABLE_WRITES, &mapping->flags);
+}
+
+static inline void mapping_set_stable_writes(struct address_space *mapping)
+{
+ set_bit(AS_STABLE_WRITES, &mapping->flags);
+}
+
+static inline void mapping_clear_stable_writes(struct address_space *mapping)
+{
+ clear_bit(AS_STABLE_WRITES, &mapping->flags);
+}
+
+static inline void mapping_set_inaccessible(struct address_space *mapping)
+{
+ /*
+ * It's expected inaccessible mappings are also unevictable. Compaction
+ * migrate scanner (isolate_migratepages_block()) relies on this to
+ * reduce page locking.
+ */
+ set_bit(AS_UNEVICTABLE, &mapping->flags);
+ set_bit(AS_INACCESSIBLE, &mapping->flags);
+}
+
+static inline bool mapping_inaccessible(const struct address_space *mapping)
+{
+ return test_bit(AS_INACCESSIBLE, &mapping->flags);
+}
+
+static inline void mapping_set_writeback_may_deadlock_on_reclaim(struct address_space *mapping)
+{
+ set_bit(AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM, &mapping->flags);
+}
+
+static inline bool mapping_writeback_may_deadlock_on_reclaim(const struct address_space *mapping)
+{
+ return test_bit(AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM, &mapping->flags);
+}
+
+static inline gfp_t mapping_gfp_mask(const struct address_space *mapping)
{
return mapping->gfp_mask;
}
/* Restricts the given gfp_mask to what the mapping allows. */
-static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
+static inline gfp_t mapping_gfp_constraint(const struct address_space *mapping,
gfp_t gfp_mask)
{
return mapping_gfp_mask(mapping) & gfp_mask;
@@ -126,12 +366,160 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
m->gfp_mask = mask;
}
-static inline bool mapping_thp_support(struct address_space *mapping)
+/*
+ * There are some parts of the kernel which assume that PMD entries
+ * are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then,
+ * limit the maximum allocation order to PMD size. I'm not aware of any
+ * assumptions about maximum order if THP are disabled, but 8 seems like
+ * a good order (that's 1MB if you're using 4kB pages)
+ */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define PREFERRED_MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER
+#else
+#define PREFERRED_MAX_PAGECACHE_ORDER 8
+#endif
+
+/*
+ * xas_split_alloc() does not support arbitrary orders. This implies no
+ * 512MB THP on ARM64 with 64KB base page size.
+ */
+#define MAX_XAS_ORDER (XA_CHUNK_SHIFT * 2 - 1)
+#define MAX_PAGECACHE_ORDER min(MAX_XAS_ORDER, PREFERRED_MAX_PAGECACHE_ORDER)
+
+/*
+ * mapping_max_folio_size_supported() - Check the max folio size supported
+ *
+ * The filesystem should call this function at mount time if there is a
+ * requirement on the folio mapping size in the page cache.
+ */
+static inline size_t mapping_max_folio_size_supported(void)
+{
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+ return 1U << (PAGE_SHIFT + MAX_PAGECACHE_ORDER);
+ return PAGE_SIZE;
+}
+
+/*
+ * mapping_set_folio_order_range() - Set the orders supported by a file.
+ * @mapping: The address space of the file.
+ * @min: Minimum folio order (between 0-MAX_PAGECACHE_ORDER inclusive).
+ * @max: Maximum folio order (between @min-MAX_PAGECACHE_ORDER inclusive).
+ *
+ * The filesystem should call this function in its inode constructor to
+ * indicate which base size (min) and maximum size (max) of folio the VFS
+ * can use to cache the contents of the file. This should only be used
+ * if the filesystem needs special handling of folio sizes (ie there is
+ * something the core cannot know).
+ * Do not tune it based on, eg, i_size.
+ *
+ * Context: This should not be called while the inode is active as it
+ * is non-atomic.
+ */
+static inline void mapping_set_folio_order_range(struct address_space *mapping,
+ unsigned int min,
+ unsigned int max)
+{
+ if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+ return;
+
+ if (min > MAX_PAGECACHE_ORDER)
+ min = MAX_PAGECACHE_ORDER;
+
+ if (max > MAX_PAGECACHE_ORDER)
+ max = MAX_PAGECACHE_ORDER;
+
+ if (max < min)
+ max = min;
+
+ mapping->flags = (mapping->flags & ~AS_FOLIO_ORDER_MASK) |
+ (min << AS_FOLIO_ORDER_MIN) | (max << AS_FOLIO_ORDER_MAX);
+}
+
+static inline void mapping_set_folio_min_order(struct address_space *mapping,
+ unsigned int min)
+{
+ mapping_set_folio_order_range(mapping, min, MAX_PAGECACHE_ORDER);
+}
+
+/**
+ * mapping_set_large_folios() - Indicate the file supports large folios.
+ * @mapping: The address space of the file.
+ *
+ * The filesystem should call this function in its inode constructor to
+ * indicate that the VFS can use large folios to cache the contents of
+ * the file.
+ *
+ * Context: This should not be called while the inode is active as it
+ * is non-atomic.
+ */
+static inline void mapping_set_large_folios(struct address_space *mapping)
+{
+ mapping_set_folio_order_range(mapping, 0, MAX_PAGECACHE_ORDER);
+}
+
+static inline unsigned int
+mapping_max_folio_order(const struct address_space *mapping)
+{
+ if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+ return 0;
+ return (mapping->flags & AS_FOLIO_ORDER_MAX_MASK) >> AS_FOLIO_ORDER_MAX;
+}
+
+static inline unsigned int
+mapping_min_folio_order(const struct address_space *mapping)
+{
+ if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+ return 0;
+ return (mapping->flags & AS_FOLIO_ORDER_MIN_MASK) >> AS_FOLIO_ORDER_MIN;
+}
+
+static inline unsigned long
+mapping_min_folio_nrpages(const struct address_space *mapping)
+{
+ return 1UL << mapping_min_folio_order(mapping);
+}
+
+static inline unsigned long
+mapping_min_folio_nrbytes(const struct address_space *mapping)
+{
+ return mapping_min_folio_nrpages(mapping) << PAGE_SHIFT;
+}
+
+/**
+ * mapping_align_index() - Align index for this mapping.
+ * @mapping: The address_space.
+ * @index: The page index.
+ *
+ * The index of a folio must be naturally aligned. If you are adding a
+ * new folio to the page cache and need to know what index to give it,
+ * call this function.
+ */
+static inline pgoff_t mapping_align_index(const struct address_space *mapping,
+ pgoff_t index)
+{
+ return round_down(index, mapping_min_folio_nrpages(mapping));
+}
+
+/*
+ * Large folio support currently depends on THP. These dependencies are
+ * being worked on but are not yet fixed.
+ */
+static inline bool mapping_large_folio_support(const struct address_space *mapping)
{
- return test_bit(AS_THP_SUPPORT, &mapping->flags);
+ /* AS_FOLIO_ORDER is only reasonable for pagecache folios */
+ VM_WARN_ONCE((unsigned long)mapping & FOLIO_MAPPING_ANON,
+ "Anonymous mapping always supports large folio");
+
+ return mapping_max_folio_order(mapping) > 0;
}
-static inline int filemap_nr_thps(struct address_space *mapping)
+/* Return the maximum folio size for this pagecache mapping, in bytes. */
+static inline size_t mapping_max_folio_size(const struct address_space *mapping)
+{
+ return PAGE_SIZE << mapping_max_folio_order(mapping);
+}
+
+static inline int filemap_nr_thps(const struct address_space *mapping)
{
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
return atomic_read(&mapping->nr_thps);
@@ -143,171 +531,142 @@ static inline int filemap_nr_thps(struct address_space *mapping)
static inline void filemap_nr_thps_inc(struct address_space *mapping)
{
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
- if (!mapping_thp_support(mapping))
+ if (!mapping_large_folio_support(mapping))
atomic_inc(&mapping->nr_thps);
#else
- WARN_ON_ONCE(1);
+ WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0);
#endif
}
static inline void filemap_nr_thps_dec(struct address_space *mapping)
{
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
- if (!mapping_thp_support(mapping))
+ if (!mapping_large_folio_support(mapping))
atomic_dec(&mapping->nr_thps);
#else
- WARN_ON_ONCE(1);
+ WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0);
#endif
}
-void release_pages(struct page **pages, int nr);
+struct address_space *folio_mapping(const struct folio *folio);
-/*
- * For file cache pages, return the address_space, otherwise return NULL
+/**
+ * folio_flush_mapping - Find the file mapping this folio belongs to.
+ * @folio: The folio.
+ *
+ * For folios which are in the page cache, return the mapping that this
+ * page belongs to. Anonymous folios return NULL, even if they're in
+ * the swap cache. Other kinds of folio also return NULL.
+ *
+ * This is ONLY used by architecture cache flushing code. If you aren't
+ * writing cache flushing code, you want either folio_mapping() or
+ * folio_file_mapping().
*/
-static inline struct address_space *page_mapping_file(struct page *page)
+static inline struct address_space *folio_flush_mapping(struct folio *folio)
{
- if (unlikely(PageSwapCache(page)))
+ if (unlikely(folio_test_swapcache(folio)))
return NULL;
- return page_mapping(page);
-}
-
-/*
- * speculatively take a reference to a page.
- * If the page is free (_refcount == 0), then _refcount is untouched, and 0
- * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
- *
- * This function must be called inside the same rcu_read_lock() section as has
- * been used to lookup the page in the pagecache radix-tree (or page table):
- * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
- *
- * Unless an RCU grace period has passed, the count of all pages coming out
- * of the allocator must be considered unstable. page_count may return higher
- * than expected, and put_page must be able to do the right thing when the
- * page has been finished with, no matter what it is subsequently allocated
- * for (because put_page is what is used here to drop an invalid speculative
- * reference).
- *
- * This is the interesting part of the lockless pagecache (and lockless
- * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
- * has the following pattern:
- * 1. find page in radix tree
- * 2. conditionally increment refcount
- * 3. check the page is still in pagecache (if no, goto 1)
- *
- * Remove-side that cares about stability of _refcount (eg. reclaim) has the
- * following (with the i_pages lock held):
- * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
- * B. remove page from pagecache
- * C. free the page
- *
- * There are 2 critical interleavings that matter:
- * - 2 runs before A: in this case, A sees elevated refcount and bails out
- * - A runs before 2: in this case, 2 sees zero refcount and retries;
- * subsequently, B will complete and 1 will find no page, causing the
- * lookup to return NULL.
- *
- * It is possible that between 1 and 2, the page is removed then the exact same
- * page is inserted into the same position in pagecache. That's OK: the
- * old find_get_page using a lock could equally have run before or after
- * such a re-insertion, depending on order that locks are granted.
- *
- * Lookups racing against pagecache insertion isn't a big problem: either 1
- * will find the page or it will not. Likewise, the old find_get_page could run
- * either before the insertion or afterwards, depending on timing.
- */
-static inline int __page_cache_add_speculative(struct page *page, int count)
-{
-#ifdef CONFIG_TINY_RCU
-# ifdef CONFIG_PREEMPT_COUNT
- VM_BUG_ON(!in_atomic() && !irqs_disabled());
-# endif
- /*
- * Preempt must be disabled here - we rely on rcu_read_lock doing
- * this for us.
- *
- * Pagecache won't be truncated from interrupt context, so if we have
- * found a page in the radix tree here, we have pinned its refcount by
- * disabling preempt, and hence no need for the "speculative get" that
- * SMP requires.
- */
- VM_BUG_ON_PAGE(page_count(page) == 0, page);
- page_ref_add(page, count);
-#else
- if (unlikely(!page_ref_add_unless(page, count, 0))) {
- /*
- * Either the page has been freed, or will be freed.
- * In either case, retry here and the caller should
- * do the right thing (see comments above).
- */
- return 0;
- }
-#endif
- VM_BUG_ON_PAGE(PageTail(page), page);
-
- return 1;
+ return folio_mapping(folio);
}
-static inline int page_cache_get_speculative(struct page *page)
+/**
+ * folio_inode - Get the host inode for this folio.
+ * @folio: The folio.
+ *
+ * For folios which are in the page cache, return the inode that this folio
+ * belongs to.
+ *
+ * Do not call this for folios which aren't in the page cache.
+ */
+static inline struct inode *folio_inode(struct folio *folio)
{
- return __page_cache_add_speculative(page, 1);
+ return folio->mapping->host;
}
-static inline int page_cache_add_speculative(struct page *page, int count)
+/**
+ * folio_attach_private - Attach private data to a folio.
+ * @folio: Folio to attach data to.
+ * @data: Data to attach to folio.
+ *
+ * Attaching private data to a folio increments the page's reference count.
+ * The data must be detached before the folio will be freed.
+ */
+static inline void folio_attach_private(struct folio *folio, void *data)
{
- return __page_cache_add_speculative(page, count);
+ folio_get(folio);
+ folio->private = data;
+ folio_set_private(folio);
}
/**
- * attach_page_private - Attach private data to a page.
- * @page: Page to attach data to.
- * @data: Data to attach to page.
+ * folio_change_private - Change private data on a folio.
+ * @folio: Folio to change the data on.
+ * @data: Data to set on the folio.
+ *
+ * Change the private data attached to a folio and return the old
+ * data. The page must previously have had data attached and the data
+ * must be detached before the folio will be freed.
*
- * Attaching private data to a page increments the page's reference count.
- * The data must be detached before the page will be freed.
+ * Return: Data that was previously attached to the folio.
*/
-static inline void attach_page_private(struct page *page, void *data)
+static inline void *folio_change_private(struct folio *folio, void *data)
{
- get_page(page);
- set_page_private(page, (unsigned long)data);
- SetPagePrivate(page);
+ void *old = folio_get_private(folio);
+
+ folio->private = data;
+ return old;
}
/**
- * detach_page_private - Detach private data from a page.
- * @page: Page to detach data from.
+ * folio_detach_private - Detach private data from a folio.
+ * @folio: Folio to detach data from.
*
- * Removes the data that was previously attached to the page and decrements
+ * Removes the data that was previously attached to the folio and decrements
* the refcount on the page.
*
- * Return: Data that was attached to the page.
+ * Return: Data that was attached to the folio.
*/
-static inline void *detach_page_private(struct page *page)
+static inline void *folio_detach_private(struct folio *folio)
{
- void *data = (void *)page_private(page);
+ void *data = folio_get_private(folio);
- if (!PagePrivate(page))
+ if (!folio_test_private(folio))
return NULL;
- ClearPagePrivate(page);
- set_page_private(page, 0);
- put_page(page);
+ folio_clear_private(folio);
+ folio->private = NULL;
+ folio_put(folio);
return data;
}
+static inline void attach_page_private(struct page *page, void *data)
+{
+ folio_attach_private(page_folio(page), data);
+}
+
+static inline void *detach_page_private(struct page *page)
+{
+ return folio_detach_private(page_folio(page));
+}
+
#ifdef CONFIG_NUMA
-extern struct page *__page_cache_alloc(gfp_t gfp);
+struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order,
+ struct mempolicy *policy);
#else
-static inline struct page *__page_cache_alloc(gfp_t gfp)
+static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order,
+ struct mempolicy *policy)
{
- return alloc_pages(gfp, 0);
+ return folio_alloc_noprof(gfp, order);
}
#endif
-static inline struct page *page_cache_alloc(struct address_space *x)
+#define filemap_alloc_folio(...) \
+ alloc_hooks(filemap_alloc_folio_noprof(__VA_ARGS__))
+
+static inline struct page *__page_cache_alloc(gfp_t gfp)
{
- return __page_cache_alloc(mapping_gfp_mask(x));
+ return &filemap_alloc_folio(gfp, 0, NULL)->page;
}
static inline gfp_t readahead_gfp_mask(struct address_space *x)
@@ -315,25 +674,176 @@ static inline gfp_t readahead_gfp_mask(struct address_space *x)
return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
}
-typedef int filler_t(void *, struct page *);
+typedef int filler_t(struct file *, struct folio *);
pgoff_t page_cache_next_miss(struct address_space *mapping,
pgoff_t index, unsigned long max_scan);
pgoff_t page_cache_prev_miss(struct address_space *mapping,
pgoff_t index, unsigned long max_scan);
-#define FGP_ACCESSED 0x00000001
-#define FGP_LOCK 0x00000002
-#define FGP_CREAT 0x00000004
-#define FGP_WRITE 0x00000008
-#define FGP_NOFS 0x00000010
-#define FGP_NOWAIT 0x00000020
-#define FGP_FOR_MMAP 0x00000040
-#define FGP_HEAD 0x00000080
-#define FGP_ENTRY 0x00000100
+/**
+ * typedef fgf_t - Flags for getting folios from the page cache.
+ *
+ * Most users of the page cache will not need to use these flags;
+ * there are convenience functions such as filemap_get_folio() and
+ * filemap_lock_folio(). For users which need more control over exactly
+ * what is done with the folios, these flags to __filemap_get_folio()
+ * are available.
+ *
+ * * %FGP_ACCESSED - The folio will be marked accessed.
+ * * %FGP_LOCK - The folio is returned locked.
+ * * %FGP_CREAT - If no folio is present then a new folio is allocated,
+ * added to the page cache and the VM's LRU list. The folio is
+ * returned locked.
+ * * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the
+ * folio is already in cache. If the folio was allocated, unlock it
+ * before returning so the caller can do the same dance.
+ * * %FGP_WRITE - The folio will be written to by the caller.
+ * * %FGP_NOFS - __GFP_FS will get cleared in gfp.
+ * * %FGP_NOWAIT - Don't block on the folio lock.
+ * * %FGP_STABLE - Wait for the folio to be stable (finished writeback)
+ * * %FGP_DONTCACHE - Uncached buffered IO
+ * * %FGP_WRITEBEGIN - The flags to use in a filesystem write_begin()
+ * implementation.
+ */
+typedef unsigned int __bitwise fgf_t;
+
+#define FGP_ACCESSED ((__force fgf_t)0x00000001)
+#define FGP_LOCK ((__force fgf_t)0x00000002)
+#define FGP_CREAT ((__force fgf_t)0x00000004)
+#define FGP_WRITE ((__force fgf_t)0x00000008)
+#define FGP_NOFS ((__force fgf_t)0x00000010)
+#define FGP_NOWAIT ((__force fgf_t)0x00000020)
+#define FGP_FOR_MMAP ((__force fgf_t)0x00000040)
+#define FGP_STABLE ((__force fgf_t)0x00000080)
+#define FGP_DONTCACHE ((__force fgf_t)0x00000100)
+#define FGF_GET_ORDER(fgf) (((__force unsigned)fgf) >> 26) /* top 6 bits */
+
+#define FGP_WRITEBEGIN (FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE)
+
+static inline unsigned int filemap_get_order(size_t size)
+{
+ unsigned int shift = ilog2(size);
+
+ if (shift <= PAGE_SHIFT)
+ return 0;
+
+ return shift - PAGE_SHIFT;
+}
+
+/**
+ * fgf_set_order - Encode a length in the fgf_t flags.
+ * @size: The suggested size of the folio to create.
+ *
+ * The caller of __filemap_get_folio() can use this to suggest a preferred
+ * size for the folio that is created. If there is already a folio at
+ * the index, it will be returned, no matter what its size. If a folio
+ * is freshly created, it may be of a different size than requested
+ * due to alignment constraints, memory pressure, or the presence of
+ * other folios at nearby indices.
+ */
+static inline fgf_t fgf_set_order(size_t size)
+{
+ unsigned int order = filemap_get_order(size);
+
+ if (!order)
+ return 0;
+ return (__force fgf_t)(order << 26);
+}
+
+void *filemap_get_entry(struct address_space *mapping, pgoff_t index);
+struct folio *__filemap_get_folio_mpol(struct address_space *mapping,
+ pgoff_t index, fgf_t fgf_flags, gfp_t gfp, struct mempolicy *policy);
+struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
+ fgf_t fgp_flags, gfp_t gfp);
+
+static inline struct folio *__filemap_get_folio(struct address_space *mapping,
+ pgoff_t index, fgf_t fgf_flags, gfp_t gfp)
+{
+ return __filemap_get_folio_mpol(mapping, index, fgf_flags, gfp, NULL);
+}
+
+/**
+ * write_begin_get_folio - Get folio for write_begin with flags.
+ * @iocb: The kiocb passed from write_begin (may be NULL).
+ * @mapping: The address space to search.
+ * @index: The page cache index.
+ * @len: Length of data being written.
+ *
+ * This is a helper for filesystem write_begin() implementations.
+ * It wraps __filemap_get_folio(), setting appropriate flags in
+ * the write begin context.
+ *
+ * Return: A folio or an ERR_PTR.
+ */
+static inline struct folio *write_begin_get_folio(const struct kiocb *iocb,
+ struct address_space *mapping, pgoff_t index, size_t len)
+{
+ fgf_t fgp_flags = FGP_WRITEBEGIN;
+
+ fgp_flags |= fgf_set_order(len);
+
+ if (iocb && iocb->ki_flags & IOCB_DONTCACHE)
+ fgp_flags |= FGP_DONTCACHE;
+
+ return __filemap_get_folio(mapping, index, fgp_flags,
+ mapping_gfp_mask(mapping));
+}
+
+/**
+ * filemap_get_folio - Find and get a folio.
+ * @mapping: The address_space to search.
+ * @index: The page index.
+ *
+ * Looks up the page cache entry at @mapping & @index. If a folio is
+ * present, it is returned with an increased refcount.
+ *
+ * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for
+ * this index. Will not return a shadow, swap or DAX entry.
+ */
+static inline struct folio *filemap_get_folio(struct address_space *mapping,
+ pgoff_t index)
+{
+ return __filemap_get_folio(mapping, index, 0, 0);
+}
+
+/**
+ * filemap_lock_folio - Find and lock a folio.
+ * @mapping: The address_space to search.
+ * @index: The page index.
+ *
+ * Looks up the page cache entry at @mapping & @index. If a folio is
+ * present, it is returned locked with an increased refcount.
+ *
+ * Context: May sleep.
+ * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for
+ * this index. Will not return a shadow, swap or DAX entry.
+ */
+static inline struct folio *filemap_lock_folio(struct address_space *mapping,
+ pgoff_t index)
+{
+ return __filemap_get_folio(mapping, index, FGP_LOCK, 0);
+}
-struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
- int fgp_flags, gfp_t cache_gfp_mask);
+/**
+ * filemap_grab_folio - grab a folio from the page cache
+ * @mapping: The address space to search
+ * @index: The page index
+ *
+ * Looks up the page cache entry at @mapping & @index. If no folio is found,
+ * a new folio is created. The folio is locked, marked as accessed, and
+ * returned.
+ *
+ * Return: A found or created folio. ERR_PTR(-ENOMEM) if no folio is found
+ * and failed to create a folio.
+ */
+static inline struct folio *filemap_grab_folio(struct address_space *mapping,
+ pgoff_t index)
+{
+ return __filemap_get_folio(mapping, index,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
+ mapping_gfp_mask(mapping));
+}
/**
* find_get_page - find and get a page reference
@@ -352,7 +862,7 @@ static inline struct page *find_get_page(struct address_space *mapping,
}
static inline struct page *find_get_page_flags(struct address_space *mapping,
- pgoff_t offset, int fgp_flags)
+ pgoff_t offset, fgf_t fgp_flags)
{
return pagecache_get_page(mapping, offset, fgp_flags, 0);
}
@@ -377,25 +887,6 @@ static inline struct page *find_lock_page(struct address_space *mapping,
}
/**
- * find_lock_head - Locate, pin and lock a pagecache page.
- * @mapping: The address_space to search.
- * @index: The page index.
- *
- * Looks up the page cache entry at @mapping & @index. If there is a
- * page cache page, its head page is returned locked and with an increased
- * refcount.
- *
- * Context: May sleep.
- * Return: A struct page which is !PageTail, or %NULL if there is no page
- * in the cache for this index.
- */
-static inline struct page *find_lock_head(struct address_space *mapping,
- pgoff_t index)
-{
- return pagecache_get_page(mapping, index, FGP_LOCK | FGP_HEAD, 0);
-}
-
-/**
* find_or_create_page - locate or add a pagecache page
* @mapping: the page's address_space
* @index: the page's index into the mapping
@@ -427,7 +918,8 @@ static inline struct page *find_or_create_page(struct address_space *mapping,
* @mapping: target address_space
* @index: the page index
*
- * Same as grab_cache_page(), but do not wait if the page is unavailable.
+ * Returns locked page at given index in given cache, creating it if
+ * needed, but do not wait if the page is locked or to reclaim memory.
* This is intended for speculative data generators, where the data can
* be regenerated if the page couldn't be grabbed. This routine should
* be safe to call while holding the lock for another page.
@@ -443,108 +935,116 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
mapping_gfp_mask(mapping));
}
-/* Does this page contain this index? */
-static inline bool thp_contains(struct page *head, pgoff_t index)
-{
- /* HugeTLBfs indexes the page cache in units of hpage_size */
- if (PageHuge(head))
- return head->index == index;
- return page_index(head) == (index & ~(thp_nr_pages(head) - 1UL));
-}
-
-/*
- * Given the page we found in the page cache, return the page corresponding
- * to this index in the file
+/**
+ * folio_next_index - Get the index of the next folio.
+ * @folio: The current folio.
+ *
+ * Return: The index of the folio which follows this folio in the file.
*/
-static inline struct page *find_subpage(struct page *head, pgoff_t index)
+static inline pgoff_t folio_next_index(const struct folio *folio)
{
- /* HugeTLBfs wants the head page regardless */
- if (PageHuge(head))
- return head;
-
- return head + (index & (thp_nr_pages(head) - 1));
+ return folio->index + folio_nr_pages(folio);
}
-unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
- pgoff_t end, struct pagevec *pvec, pgoff_t *indices);
-unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
- pgoff_t end, unsigned int nr_pages,
- struct page **pages);
-static inline unsigned find_get_pages(struct address_space *mapping,
- pgoff_t *start, unsigned int nr_pages,
- struct page **pages)
-{
- return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages,
- pages);
-}
-unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
- unsigned int nr_pages, struct page **pages);
-unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
- pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
- struct page **pages);
-static inline unsigned find_get_pages_tag(struct address_space *mapping,
- pgoff_t *index, xa_mark_t tag, unsigned int nr_pages,
- struct page **pages)
+/**
+ * folio_next_pos - Get the file position of the next folio.
+ * @folio: The current folio.
+ *
+ * Return: The position of the folio which follows this folio in the file.
+ */
+static inline loff_t folio_next_pos(const struct folio *folio)
{
- return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
- nr_pages, pages);
+ return (loff_t)folio_next_index(folio) << PAGE_SHIFT;
}
-struct page *grab_cache_page_write_begin(struct address_space *mapping,
- pgoff_t index, unsigned flags);
-
-/*
- * Returns locked page at given index in given cache, creating it if needed.
+/**
+ * folio_file_page - The page for a particular index.
+ * @folio: The folio which contains this index.
+ * @index: The index we want to look up.
+ *
+ * Sometimes after looking up a folio in the page cache, we need to
+ * obtain the specific page for an index (eg a page fault).
+ *
+ * Return: The page containing the file data for this index.
*/
-static inline struct page *grab_cache_page(struct address_space *mapping,
- pgoff_t index)
+static inline struct page *folio_file_page(struct folio *folio, pgoff_t index)
{
- return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
+ return folio_page(folio, index & (folio_nr_pages(folio) - 1));
}
-extern struct page * read_cache_page(struct address_space *mapping,
- pgoff_t index, filler_t *filler, void *data);
+/**
+ * folio_contains - Does this folio contain this index?
+ * @folio: The folio.
+ * @index: The page index within the file.
+ *
+ * Context: The caller should have the folio locked and ensure
+ * e.g., shmem did not move this folio to the swap cache.
+ * Return: true or false.
+ */
+static inline bool folio_contains(const struct folio *folio, pgoff_t index)
+{
+ VM_WARN_ON_ONCE_FOLIO(folio_test_swapcache(folio), folio);
+ return index - folio->index < folio_nr_pages(folio);
+}
+
+unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
+ pgoff_t end, struct folio_batch *fbatch);
+unsigned filemap_get_folios_contig(struct address_space *mapping,
+ pgoff_t *start, pgoff_t end, struct folio_batch *fbatch);
+unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start,
+ pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch);
+unsigned filemap_get_folios_dirty(struct address_space *mapping,
+ pgoff_t *start, pgoff_t end, struct folio_batch *fbatch);
+
+struct folio *read_cache_folio(struct address_space *, pgoff_t index,
+ filler_t *filler, struct file *file);
+struct folio *mapping_read_folio_gfp(struct address_space *, pgoff_t index,
+ gfp_t flags);
+struct page *read_cache_page(struct address_space *, pgoff_t index,
+ filler_t *filler, struct file *file);
extern struct page * read_cache_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
-extern int read_cache_pages(struct address_space *mapping,
- struct list_head *pages, filler_t *filler, void *data);
static inline struct page *read_mapping_page(struct address_space *mapping,
- pgoff_t index, void *data)
+ pgoff_t index, struct file *file)
{
- return read_cache_page(mapping, index, NULL, data);
+ return read_cache_page(mapping, index, NULL, file);
}
-/*
- * Get index of the page with in radix-tree
- * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
- */
-static inline pgoff_t page_to_index(struct page *page)
+static inline struct folio *read_mapping_folio(struct address_space *mapping,
+ pgoff_t index, struct file *file)
{
- pgoff_t pgoff;
-
- if (likely(!PageTransTail(page)))
- return page->index;
-
- /*
- * We don't initialize ->index for tail pages: calculate based on
- * head page
- */
- pgoff = compound_head(page)->index;
- pgoff += page - compound_head(page);
- return pgoff;
+ return read_cache_folio(mapping, index, NULL, file);
}
-/*
- * Get the offset in PAGE_SIZE.
- * (TODO: hugepage should have ->index in PAGE_SIZE)
+/**
+ * page_pgoff - Calculate the logical page offset of this page.
+ * @folio: The folio containing this page.
+ * @page: The page which we need the offset of.
+ *
+ * For file pages, this is the offset from the beginning of the file
+ * in units of PAGE_SIZE. For anonymous pages, this is the offset from
+ * the beginning of the anon_vma in units of PAGE_SIZE. This will
+ * return nonsense for KSM pages.
+ *
+ * Context: Caller must have a reference on the folio or otherwise
+ * prevent it from being split or freed.
+ *
+ * Return: The offset in units of PAGE_SIZE.
*/
-static inline pgoff_t page_to_pgoff(struct page *page)
+static inline pgoff_t page_pgoff(const struct folio *folio,
+ const struct page *page)
{
- if (unlikely(PageHeadHuge(page)))
- return page->index << compound_order(page);
+ return folio->index + folio_page_idx(folio, page);
+}
- return page_to_index(page);
+/**
+ * folio_pos - Returns the byte position of this folio in its file.
+ * @folio: The folio.
+ */
+static inline loff_t folio_pos(const struct folio *folio)
+{
+ return ((loff_t)folio->index) * PAGE_SIZE;
}
/*
@@ -552,36 +1052,36 @@ static inline pgoff_t page_to_pgoff(struct page *page)
*/
static inline loff_t page_offset(struct page *page)
{
- return ((loff_t)page->index) << PAGE_SHIFT;
+ struct folio *folio = page_folio(page);
+
+ return folio_pos(folio) + folio_page_idx(folio, page) * PAGE_SIZE;
}
-static inline loff_t page_file_offset(struct page *page)
+/*
+ * Get the offset in PAGE_SIZE (even for hugetlb folios).
+ */
+static inline pgoff_t folio_pgoff(const struct folio *folio)
{
- return ((loff_t)page_index(page)) << PAGE_SHIFT;
+ return folio->index;
}
-extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
- unsigned long address);
-
-static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
- unsigned long address)
+static inline pgoff_t linear_page_index(const struct vm_area_struct *vma,
+ const unsigned long address)
{
pgoff_t pgoff;
- if (unlikely(is_vm_hugetlb_page(vma)))
- return linear_hugepage_index(vma, address);
pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
pgoff += vma->vm_pgoff;
return pgoff;
}
struct wait_page_key {
- struct page *page;
+ struct folio *folio;
int bit_nr;
int page_match;
};
struct wait_page_queue {
- struct page *page;
+ struct folio *folio;
int bit_nr;
wait_queue_entry_t wait;
};
@@ -589,7 +1089,7 @@ struct wait_page_queue {
static inline bool wake_page_match(struct wait_page_queue *wait_page,
struct wait_page_key *key)
{
- if (wait_page->page != key->page)
+ if (wait_page->folio != key->folio)
return false;
key->page_match = 1;
@@ -599,227 +1099,242 @@ static inline bool wake_page_match(struct wait_page_queue *wait_page,
return true;
}
-extern void __lock_page(struct page *page);
-extern int __lock_page_killable(struct page *page);
-extern int __lock_page_async(struct page *page, struct wait_page_queue *wait);
-extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
- unsigned int flags);
-extern void unlock_page(struct page *page);
+void __folio_lock(struct folio *folio);
+int __folio_lock_killable(struct folio *folio);
+vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf);
+void unlock_page(struct page *page);
+void folio_unlock(struct folio *folio);
+
+/**
+ * folio_trylock() - Attempt to lock a folio.
+ * @folio: The folio to attempt to lock.
+ *
+ * Sometimes it is undesirable to wait for a folio to be unlocked (eg
+ * when the locks are being taken in the wrong order, or if making
+ * progress through a batch of folios is more important than processing
+ * them in order). Usually folio_lock() is the correct function to call.
+ *
+ * Context: Any context.
+ * Return: Whether the lock was successfully acquired.
+ */
+static inline bool folio_trylock(struct folio *folio)
+{
+ return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0)));
+}
/*
* Return true if the page was successfully locked
*/
-static inline int trylock_page(struct page *page)
+static inline bool trylock_page(struct page *page)
{
- page = compound_head(page);
- return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
+ return folio_trylock(page_folio(page));
}
-/*
- * lock_page may only be called if we have the page's inode pinned.
+/**
+ * folio_lock() - Lock this folio.
+ * @folio: The folio to lock.
+ *
+ * The folio lock protects against many things, probably more than it
+ * should. It is primarily held while a folio is being brought uptodate,
+ * either from its backing file or from swap. It is also held while a
+ * folio is being truncated from its address_space, so holding the lock
+ * is sufficient to keep folio->mapping stable.
+ *
+ * The folio lock is also held while write() is modifying the page to
+ * provide POSIX atomicity guarantees (as long as the write does not
+ * cross a page boundary). Other modifications to the data in the folio
+ * do not hold the folio lock and can race with writes, eg DMA and stores
+ * to mapped pages.
+ *
+ * Context: May sleep. If you need to acquire the locks of two or
+ * more folios, they must be in order of ascending index, if they are
+ * in the same address_space. If they are in different address_spaces,
+ * acquire the lock of the folio which belongs to the address_space which
+ * has the lowest address in memory first.
*/
-static inline void lock_page(struct page *page)
+static inline void folio_lock(struct folio *folio)
{
might_sleep();
- if (!trylock_page(page))
- __lock_page(page);
+ if (!folio_trylock(folio))
+ __folio_lock(folio);
}
-/*
- * lock_page_killable is like lock_page but can be interrupted by fatal
- * signals. It returns 0 if it locked the page and -EINTR if it was
- * killed while waiting.
+/**
+ * lock_page() - Lock the folio containing this page.
+ * @page: The page to lock.
+ *
+ * See folio_lock() for a description of what the lock protects.
+ * This is a legacy function and new code should probably use folio_lock()
+ * instead.
+ *
+ * Context: May sleep. Pages in the same folio share a lock, so do not
+ * attempt to lock two pages which share a folio.
*/
-static inline int lock_page_killable(struct page *page)
+static inline void lock_page(struct page *page)
{
+ struct folio *folio;
might_sleep();
- if (!trylock_page(page))
- return __lock_page_killable(page);
- return 0;
+
+ folio = page_folio(page);
+ if (!folio_trylock(folio))
+ __folio_lock(folio);
}
-/*
- * lock_page_async - Lock the page, unless this would block. If the page
- * is already locked, then queue a callback when the page becomes unlocked.
- * This callback can then retry the operation.
+/**
+ * folio_lock_killable() - Lock this folio, interruptible by a fatal signal.
+ * @folio: The folio to lock.
*
- * Returns 0 if the page is locked successfully, or -EIOCBQUEUED if the page
- * was already locked and the callback defined in 'wait' was queued.
+ * Attempts to lock the folio, like folio_lock(), except that the sleep
+ * to acquire the lock is interruptible by a fatal signal.
+ *
+ * Context: May sleep; see folio_lock().
+ * Return: 0 if the lock was acquired; -EINTR if a fatal signal was received.
*/
-static inline int lock_page_async(struct page *page,
- struct wait_page_queue *wait)
+static inline int folio_lock_killable(struct folio *folio)
{
- if (!trylock_page(page))
- return __lock_page_async(page, wait);
+ might_sleep();
+ if (!folio_trylock(folio))
+ return __folio_lock_killable(folio);
return 0;
}
/*
- * lock_page_or_retry - Lock the page, unless this would block and the
+ * folio_lock_or_retry - Lock the folio, unless this would block and the
* caller indicated that it can handle a retry.
*
* Return value and mmap_lock implications depend on flags; see
- * __lock_page_or_retry().
+ * __folio_lock_or_retry().
*/
-static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
- unsigned int flags)
+static inline vm_fault_t folio_lock_or_retry(struct folio *folio,
+ struct vm_fault *vmf)
{
might_sleep();
- return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
+ if (!folio_trylock(folio))
+ return __folio_lock_or_retry(folio, vmf);
+ return 0;
}
/*
- * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
+ * This is exported only for folio_wait_locked/folio_wait_writeback, etc.,
* and should not be used directly.
*/
-extern void wait_on_page_bit(struct page *page, int bit_nr);
-extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
+void folio_wait_bit(struct folio *folio, int bit_nr);
+int folio_wait_bit_killable(struct folio *folio, int bit_nr);
/*
- * Wait for a page to be unlocked.
+ * Wait for a folio to be unlocked.
*
- * This must be called with the caller "holding" the page,
- * ie with increased "page->count" so that the page won't
- * go away during the wait..
+ * This must be called with the caller "holding" the folio,
+ * ie with increased folio reference count so that the folio won't
+ * go away during the wait.
*/
-static inline void wait_on_page_locked(struct page *page)
+static inline void folio_wait_locked(struct folio *folio)
{
- if (PageLocked(page))
- wait_on_page_bit(compound_head(page), PG_locked);
+ if (folio_test_locked(folio))
+ folio_wait_bit(folio, PG_locked);
}
-static inline int wait_on_page_locked_killable(struct page *page)
+static inline int folio_wait_locked_killable(struct folio *folio)
{
- if (!PageLocked(page))
+ if (!folio_test_locked(folio))
return 0;
- return wait_on_page_bit_killable(compound_head(page), PG_locked);
+ return folio_wait_bit_killable(folio, PG_locked);
}
-int put_and_wait_on_page_locked(struct page *page, int state);
+void folio_end_read(struct folio *folio, bool success);
void wait_on_page_writeback(struct page *page);
-int wait_on_page_writeback_killable(struct page *page);
-extern void end_page_writeback(struct page *page);
-void wait_for_stable_page(struct page *page);
-
-void page_endio(struct page *page, bool is_write, int err);
-
-/**
- * set_page_private_2 - Set PG_private_2 on a page and take a ref
- * @page: The page.
- *
- * Set the PG_private_2 flag on a page and take the reference needed for the VM
- * to handle its lifetime correctly. This sets the flag and takes the
- * reference unconditionally, so care must be taken not to set the flag again
- * if it's already set.
- */
-static inline void set_page_private_2(struct page *page)
-{
- page = compound_head(page);
- get_page(page);
- SetPagePrivate2(page);
-}
-
-void end_page_private_2(struct page *page);
-void wait_on_page_private_2(struct page *page);
-int wait_on_page_private_2_killable(struct page *page);
-
-/*
- * Add an arbitrary waiter to a page's wait queue
- */
-extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
+void folio_wait_writeback(struct folio *folio);
+int folio_wait_writeback_killable(struct folio *folio);
+void end_page_writeback(struct page *page);
+void folio_end_writeback(struct folio *folio);
+void folio_end_writeback_no_dropbehind(struct folio *folio);
+void folio_end_dropbehind(struct folio *folio);
+void folio_wait_stable(struct folio *folio);
+void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn);
+void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb);
+void __folio_cancel_dirty(struct folio *folio);
+static inline void folio_cancel_dirty(struct folio *folio)
+{
+ /* Avoid atomic ops, locking, etc. when not actually needed. */
+ if (folio_test_dirty(folio))
+ __folio_cancel_dirty(folio);
+}
+bool folio_clear_dirty_for_io(struct folio *folio);
+bool clear_page_dirty_for_io(struct page *page);
+void folio_invalidate(struct folio *folio, size_t offset, size_t length);
+bool noop_dirty_folio(struct address_space *mapping, struct folio *folio);
+
+#ifdef CONFIG_MIGRATION
+int filemap_migrate_folio(struct address_space *mapping, struct folio *dst,
+ struct folio *src, enum migrate_mode mode);
+#else
+#define filemap_migrate_folio NULL
+#endif
+void folio_end_private_2(struct folio *folio);
+void folio_wait_private_2(struct folio *folio);
+int folio_wait_private_2_killable(struct folio *folio);
/*
- * Fault everything in given userspace address range in.
+ * Fault in userspace address range.
*/
-static inline int fault_in_pages_writeable(char __user *uaddr, int size)
-{
- char __user *end = uaddr + size - 1;
-
- if (unlikely(size == 0))
- return 0;
-
- if (unlikely(uaddr > end))
- return -EFAULT;
- /*
- * Writing zeroes into userspace here is OK, because we know that if
- * the zero gets there, we'll be overwriting it.
- */
- do {
- if (unlikely(__put_user(0, uaddr) != 0))
- return -EFAULT;
- uaddr += PAGE_SIZE;
- } while (uaddr <= end);
-
- /* Check whether the range spilled into the next page. */
- if (((unsigned long)uaddr & PAGE_MASK) ==
- ((unsigned long)end & PAGE_MASK))
- return __put_user(0, end);
-
- return 0;
-}
-
-static inline int fault_in_pages_readable(const char __user *uaddr, int size)
-{
- volatile char c;
- const char __user *end = uaddr + size - 1;
-
- if (unlikely(size == 0))
- return 0;
+size_t fault_in_writeable(char __user *uaddr, size_t size);
+size_t fault_in_subpage_writeable(char __user *uaddr, size_t size);
+size_t fault_in_safe_writeable(const char __user *uaddr, size_t size);
+size_t fault_in_readable(const char __user *uaddr, size_t size);
- if (unlikely(uaddr > end))
- return -EFAULT;
-
- do {
- if (unlikely(__get_user(c, uaddr) != 0))
- return -EFAULT;
- uaddr += PAGE_SIZE;
- } while (uaddr <= end);
-
- /* Check whether the range spilled into the next page. */
- if (((unsigned long)uaddr & PAGE_MASK) ==
- ((unsigned long)end & PAGE_MASK)) {
- return __get_user(c, end);
- }
-
- (void)c;
- return 0;
-}
-
-int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
- pgoff_t index, gfp_t gfp_mask);
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
- pgoff_t index, gfp_t gfp_mask);
-extern void delete_from_page_cache(struct page *page);
-extern void __delete_from_page_cache(struct page *page, void *shadow);
-void replace_page_cache_page(struct page *old, struct page *new);
+ pgoff_t index, gfp_t gfp);
+int filemap_add_folio(struct address_space *mapping, struct folio *folio,
+ pgoff_t index, gfp_t gfp);
+void filemap_remove_folio(struct folio *folio);
+void __filemap_remove_folio(struct folio *folio, void *shadow);
+void replace_page_cache_folio(struct folio *old, struct folio *new);
void delete_from_page_cache_batch(struct address_space *mapping,
- struct pagevec *pvec);
+ struct folio_batch *fbatch);
+bool filemap_release_folio(struct folio *folio, gfp_t gfp);
loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end,
int whence);
-/*
- * Like add_to_page_cache_locked, but used to add newly allocated pages:
- * the page is new, so we can just run __SetPageLocked() against it.
+/* Must be non-static for BPF error injection */
+int __filemap_add_folio(struct address_space *mapping, struct folio *folio,
+ pgoff_t index, gfp_t gfp, void **shadowp);
+
+bool filemap_range_has_writeback(struct address_space *mapping,
+ loff_t start_byte, loff_t end_byte);
+
+/**
+ * filemap_range_needs_writeback - check if range potentially needs writeback
+ * @mapping: address space within which to check
+ * @start_byte: offset in bytes where the range starts
+ * @end_byte: offset in bytes where the range ends (inclusive)
+ *
+ * Find at least one page in the range supplied, usually used to check if
+ * direct writing in this range will trigger a writeback. Used by O_DIRECT
+ * read/write with IOCB_NOWAIT, to see if the caller needs to do
+ * filemap_write_and_wait_range() before proceeding.
+ *
+ * Return: %true if the caller should do filemap_write_and_wait_range() before
+ * doing O_DIRECT to a page in this range, %false otherwise.
*/
-static inline int add_to_page_cache(struct page *page,
- struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
+static inline bool filemap_range_needs_writeback(struct address_space *mapping,
+ loff_t start_byte,
+ loff_t end_byte)
{
- int error;
-
- __SetPageLocked(page);
- error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
- if (unlikely(error))
- __ClearPageLocked(page);
- return error;
+ if (!mapping->nrpages)
+ return false;
+ if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
+ !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
+ return false;
+ return filemap_range_has_writeback(mapping, start_byte, end_byte);
}
/**
* struct readahead_control - Describes a readahead request.
*
* A readahead request is for consecutive pages. Filesystems which
- * implement the ->readahead method should call readahead_page() or
- * readahead_page_batch() in a loop and attempt to start I/O against
- * each page in the request.
+ * implement the ->readahead method should call readahead_folio() or
+ * __readahead_batch() in a loop and attempt to start reads into each
+ * folio in the request.
*
* Most of the fields in this struct are private and should be accessed
* by the functions below.
@@ -837,6 +1352,9 @@ struct readahead_control {
pgoff_t _index;
unsigned int _nr_pages;
unsigned int _batch_count;
+ bool dropbehind;
+ bool _workingset;
+ unsigned long _pflags;
};
#define DEFINE_READAHEAD(ractl, f, r, m, i) \
@@ -852,7 +1370,7 @@ struct readahead_control {
void page_cache_ra_unbounded(struct readahead_control *,
unsigned long nr_to_read, unsigned long lookahead_count);
void page_cache_sync_ra(struct readahead_control *, unsigned long req_count);
-void page_cache_async_ra(struct readahead_control *, struct page *,
+void page_cache_async_ra(struct readahead_control *, struct folio *,
unsigned long req_count);
void readahead_expand(struct readahead_control *ractl,
loff_t new_start, size_t new_len);
@@ -884,8 +1402,7 @@ void page_cache_sync_readahead(struct address_space *mapping,
* @mapping: address_space which holds the pagecache and I/O vectors
* @ra: file_ra_state which holds the readahead state
* @file: Used by the filesystem for authentication.
- * @page: The page at @index which triggered the readahead call.
- * @index: Index of first page to be read.
+ * @folio: The folio which triggered the readahead call.
* @req_count: Total number of pages being read by the caller.
*
* page_cache_async_readahead() should be called when a page is used which
@@ -896,39 +1413,47 @@ void page_cache_sync_readahead(struct address_space *mapping,
static inline
void page_cache_async_readahead(struct address_space *mapping,
struct file_ra_state *ra, struct file *file,
- struct page *page, pgoff_t index, unsigned long req_count)
+ struct folio *folio, unsigned long req_count)
{
- DEFINE_READAHEAD(ractl, file, ra, mapping, index);
- page_cache_async_ra(&ractl, page, req_count);
+ DEFINE_READAHEAD(ractl, file, ra, mapping, folio->index);
+ page_cache_async_ra(&ractl, folio, req_count);
}
-/**
- * readahead_page - Get the next page to read.
- * @rac: The current readahead request.
- *
- * Context: The page is locked and has an elevated refcount. The caller
- * should decreases the refcount once the page has been submitted for I/O
- * and unlock the page once all I/O to that page has completed.
- * Return: A pointer to the next page, or %NULL if we are done.
- */
-static inline struct page *readahead_page(struct readahead_control *rac)
+static inline struct folio *__readahead_folio(struct readahead_control *ractl)
{
- struct page *page;
+ struct folio *folio;
- BUG_ON(rac->_batch_count > rac->_nr_pages);
- rac->_nr_pages -= rac->_batch_count;
- rac->_index += rac->_batch_count;
+ BUG_ON(ractl->_batch_count > ractl->_nr_pages);
+ ractl->_nr_pages -= ractl->_batch_count;
+ ractl->_index += ractl->_batch_count;
- if (!rac->_nr_pages) {
- rac->_batch_count = 0;
+ if (!ractl->_nr_pages) {
+ ractl->_batch_count = 0;
return NULL;
}
- page = xa_load(&rac->mapping->i_pages, rac->_index);
- VM_BUG_ON_PAGE(!PageLocked(page), page);
- rac->_batch_count = thp_nr_pages(page);
+ folio = xa_load(&ractl->mapping->i_pages, ractl->_index);
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
+ ractl->_batch_count = folio_nr_pages(folio);
+
+ return folio;
+}
+
+/**
+ * readahead_folio - Get the next folio to read.
+ * @ractl: The current readahead request.
+ *
+ * Context: The folio is locked. The caller should unlock the folio once
+ * all I/O to that folio has completed.
+ * Return: A pointer to the next folio, or %NULL if we are done.
+ */
+static inline struct folio *readahead_folio(struct readahead_control *ractl)
+{
+ struct folio *folio = __readahead_folio(ractl);
- return page;
+ if (folio)
+ folio_put(folio);
+ return folio;
}
static inline unsigned int __readahead_batch(struct readahead_control *rac,
@@ -936,7 +1461,7 @@ static inline unsigned int __readahead_batch(struct readahead_control *rac,
{
unsigned int i = 0;
XA_STATE(xas, &rac->mapping->i_pages, 0);
- struct page *page;
+ struct folio *folio;
BUG_ON(rac->_batch_count > rac->_nr_pages);
rac->_nr_pages -= rac->_batch_count;
@@ -945,23 +1470,12 @@ static inline unsigned int __readahead_batch(struct readahead_control *rac,
xas_set(&xas, rac->_index);
rcu_read_lock();
- xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) {
- if (xas_retry(&xas, page))
+ xas_for_each(&xas, folio, rac->_index + rac->_nr_pages - 1) {
+ if (xas_retry(&xas, folio))
continue;
- VM_BUG_ON_PAGE(!PageLocked(page), page);
- VM_BUG_ON_PAGE(PageTail(page), page);
- array[i++] = page;
- rac->_batch_count += thp_nr_pages(page);
-
- /*
- * The page cache isn't using multi-index entries yet,
- * so the xas cursor needs to be manually moved to the
- * next index. This can be removed once the page cache
- * is converted.
- */
- if (PageHead(page))
- xas_set(&xas, rac->_index + rac->_batch_count);
-
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
+ array[i++] = folio_page(folio, 0);
+ rac->_batch_count += folio_nr_pages(folio);
if (i == array_sz)
break;
}
@@ -971,24 +1485,10 @@ static inline unsigned int __readahead_batch(struct readahead_control *rac,
}
/**
- * readahead_page_batch - Get a batch of pages to read.
- * @rac: The current readahead request.
- * @array: An array of pointers to struct page.
- *
- * Context: The pages are locked and have an elevated refcount. The caller
- * should decreases the refcount once the page has been submitted for I/O
- * and unlock the page once all I/O to that page has completed.
- * Return: The number of pages placed in the array. 0 indicates the request
- * is complete.
- */
-#define readahead_page_batch(rac, array) \
- __readahead_batch(rac, array, ARRAY_SIZE(array))
-
-/**
* readahead_pos - The byte offset into the file of this readahead request.
* @rac: The readahead request.
*/
-static inline loff_t readahead_pos(struct readahead_control *rac)
+static inline loff_t readahead_pos(const struct readahead_control *rac)
{
return (loff_t)rac->_index * PAGE_SIZE;
}
@@ -997,7 +1497,7 @@ static inline loff_t readahead_pos(struct readahead_control *rac)
* readahead_length - The number of bytes in this readahead request.
* @rac: The readahead request.
*/
-static inline size_t readahead_length(struct readahead_control *rac)
+static inline size_t readahead_length(const struct readahead_control *rac)
{
return rac->_nr_pages * PAGE_SIZE;
}
@@ -1006,7 +1506,7 @@ static inline size_t readahead_length(struct readahead_control *rac)
* readahead_index - The index of the first page in this readahead request.
* @rac: The readahead request.
*/
-static inline pgoff_t readahead_index(struct readahead_control *rac)
+static inline pgoff_t readahead_index(const struct readahead_control *rac)
{
return rac->_index;
}
@@ -1015,7 +1515,7 @@ static inline pgoff_t readahead_index(struct readahead_control *rac)
* readahead_count - The number of pages in this readahead request.
* @rac: The readahead request.
*/
-static inline unsigned int readahead_count(struct readahead_control *rac)
+static inline unsigned int readahead_count(const struct readahead_control *rac)
{
return rac->_nr_pages;
}
@@ -1024,59 +1524,60 @@ static inline unsigned int readahead_count(struct readahead_control *rac)
* readahead_batch_length - The number of bytes in the current batch.
* @rac: The readahead request.
*/
-static inline size_t readahead_batch_length(struct readahead_control *rac)
+static inline size_t readahead_batch_length(const struct readahead_control *rac)
{
return rac->_batch_count * PAGE_SIZE;
}
-static inline unsigned long dir_pages(struct inode *inode)
+static inline unsigned long dir_pages(const struct inode *inode)
{
return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
PAGE_SHIFT;
}
/**
- * page_mkwrite_check_truncate - check if page was truncated
- * @page: the page to check
- * @inode: the inode to check the page against
+ * folio_mkwrite_check_truncate - check if folio was truncated
+ * @folio: the folio to check
+ * @inode: the inode to check the folio against
*
- * Returns the number of bytes in the page up to EOF,
- * or -EFAULT if the page was truncated.
+ * Return: the number of bytes in the folio up to EOF,
+ * or -EFAULT if the folio was truncated.
*/
-static inline int page_mkwrite_check_truncate(struct page *page,
- struct inode *inode)
+static inline ssize_t folio_mkwrite_check_truncate(const struct folio *folio,
+ const struct inode *inode)
{
loff_t size = i_size_read(inode);
pgoff_t index = size >> PAGE_SHIFT;
- int offset = offset_in_page(size);
+ size_t offset = offset_in_folio(folio, size);
- if (page->mapping != inode->i_mapping)
+ if (!folio->mapping)
return -EFAULT;
- /* page is wholly inside EOF */
- if (page->index < index)
- return PAGE_SIZE;
- /* page is wholly past EOF */
- if (page->index > index || !offset)
+ /* folio is wholly inside EOF */
+ if (folio_next_index(folio) - 1 < index)
+ return folio_size(folio);
+ /* folio is wholly past EOF */
+ if (folio->index > index || !offset)
return -EFAULT;
- /* page is partially inside EOF */
+ /* folio is partially inside EOF */
return offset;
}
/**
- * i_blocks_per_page - How many blocks fit in this page.
+ * i_blocks_per_folio - How many blocks fit in this folio.
* @inode: The inode which contains the blocks.
- * @page: The page (head page if the page is a THP).
+ * @folio: The folio.
*
- * If the block size is larger than the size of this page, return zero.
+ * If the block size is larger than the size of this folio, return zero.
*
- * Context: The caller should hold a refcount on the page to prevent it
+ * Context: The caller should hold a refcount on the folio to prevent it
* from being split.
- * Return: The number of filesystem blocks covered by this page.
+ * Return: The number of filesystem blocks covered by this folio.
*/
static inline
-unsigned int i_blocks_per_page(struct inode *inode, struct page *page)
+unsigned int i_blocks_per_folio(const struct inode *inode,
+ const struct folio *folio)
{
- return thp_size(page) >> inode->i_blkbits;
+ return folio_size(folio) >> inode->i_blkbits;
}
#endif /* _LINUX_PAGEMAP_H */
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index 7f3f19065a9f..63be5a451627 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -3,82 +3,103 @@
* include/linux/pagevec.h
*
* In many places it is efficient to batch an operation up against multiple
- * pages. A pagevec is a multipage container which is used for that.
+ * folios. A folio_batch is a container which is used for that.
*/
#ifndef _LINUX_PAGEVEC_H
#define _LINUX_PAGEVEC_H
-#include <linux/xarray.h>
+#include <linux/types.h>
-/* 15 pointers + header align the pagevec structure to a power of two */
-#define PAGEVEC_SIZE 15
+/* 31 pointers + header align the folio_batch structure to a power of two */
+#define PAGEVEC_SIZE 31
-struct page;
-struct address_space;
+struct folio;
-struct pagevec {
+/**
+ * struct folio_batch - A collection of folios.
+ *
+ * The folio_batch is used to amortise the cost of retrieving and
+ * operating on a set of folios. The order of folios in the batch may be
+ * significant (eg delete_from_page_cache_batch()). Some users of the
+ * folio_batch store "exceptional" entries in it which can be removed
+ * by calling folio_batch_remove_exceptionals().
+ */
+struct folio_batch {
unsigned char nr;
+ unsigned char i;
bool percpu_pvec_drained;
- struct page *pages[PAGEVEC_SIZE];
+ struct folio *folios[PAGEVEC_SIZE];
};
-void __pagevec_release(struct pagevec *pvec);
-void __pagevec_lru_add(struct pagevec *pvec);
-void pagevec_remove_exceptionals(struct pagevec *pvec);
-unsigned pagevec_lookup_range(struct pagevec *pvec,
- struct address_space *mapping,
- pgoff_t *start, pgoff_t end);
-static inline unsigned pagevec_lookup(struct pagevec *pvec,
- struct address_space *mapping,
- pgoff_t *start)
-{
- return pagevec_lookup_range(pvec, mapping, start, (pgoff_t)-1);
-}
-
-unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
- struct address_space *mapping, pgoff_t *index, pgoff_t end,
- xa_mark_t tag);
-static inline unsigned pagevec_lookup_tag(struct pagevec *pvec,
- struct address_space *mapping, pgoff_t *index, xa_mark_t tag)
+/**
+ * folio_batch_init() - Initialise a batch of folios
+ * @fbatch: The folio batch.
+ *
+ * A freshly initialised folio_batch contains zero folios.
+ */
+static inline void folio_batch_init(struct folio_batch *fbatch)
{
- return pagevec_lookup_range_tag(pvec, mapping, index, (pgoff_t)-1, tag);
+ fbatch->nr = 0;
+ fbatch->i = 0;
+ fbatch->percpu_pvec_drained = false;
}
-static inline void pagevec_init(struct pagevec *pvec)
+static inline void folio_batch_reinit(struct folio_batch *fbatch)
{
- pvec->nr = 0;
- pvec->percpu_pvec_drained = false;
+ fbatch->nr = 0;
+ fbatch->i = 0;
}
-static inline void pagevec_reinit(struct pagevec *pvec)
+static inline unsigned int folio_batch_count(const struct folio_batch *fbatch)
{
- pvec->nr = 0;
+ return fbatch->nr;
}
-static inline unsigned pagevec_count(struct pagevec *pvec)
+static inline unsigned int folio_batch_space(const struct folio_batch *fbatch)
{
- return pvec->nr;
+ return PAGEVEC_SIZE - fbatch->nr;
}
-static inline unsigned pagevec_space(struct pagevec *pvec)
+/**
+ * folio_batch_add() - Add a folio to a batch.
+ * @fbatch: The folio batch.
+ * @folio: The folio to add.
+ *
+ * The folio is added to the end of the batch.
+ * The batch must have previously been initialised using folio_batch_init().
+ *
+ * Return: The number of slots still available.
+ */
+static inline unsigned folio_batch_add(struct folio_batch *fbatch,
+ struct folio *folio)
{
- return PAGEVEC_SIZE - pvec->nr;
+ fbatch->folios[fbatch->nr++] = folio;
+ return folio_batch_space(fbatch);
}
-/*
- * Add a page to a pagevec. Returns the number of slots still available.
+/**
+ * folio_batch_next - Return the next folio to process.
+ * @fbatch: The folio batch being processed.
+ *
+ * Use this function to implement a queue of folios.
+ *
+ * Return: The next folio in the queue, or NULL if the queue is empty.
*/
-static inline unsigned pagevec_add(struct pagevec *pvec, struct page *page)
+static inline struct folio *folio_batch_next(struct folio_batch *fbatch)
{
- pvec->pages[pvec->nr++] = page;
- return pagevec_space(pvec);
+ if (fbatch->i == fbatch->nr)
+ return NULL;
+ return fbatch->folios[fbatch->i++];
}
-static inline void pagevec_release(struct pagevec *pvec)
+void __folio_batch_release(struct folio_batch *pvec);
+
+static inline void folio_batch_release(struct folio_batch *fbatch)
{
- if (pagevec_count(pvec))
- __pagevec_release(pvec);
+ if (folio_batch_count(fbatch))
+ __folio_batch_release(fbatch);
}
+void folio_batch_remove_exceptionals(struct folio_batch *fbatch);
#endif /* _LINUX_PAGEVEC_H */
diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h
index ac7b38ad5903..88e18615dd72 100644
--- a/include/linux/pagewalk.h
+++ b/include/linux/pagewalk.h
@@ -6,6 +6,18 @@
struct mm_walk;
+/* Locking requirement during a page walk. */
+enum page_walk_lock {
+ /* mmap_lock should be locked for read to stabilize the vma tree */
+ PGWALK_RDLOCK = 0,
+ /* vma will be write-locked during the walk */
+ PGWALK_WRLOCK = 1,
+ /* vma is expected to be already write-locked during the walk */
+ PGWALK_WRLOCK_VERIFY = 2,
+ /* vma is expected to be already read-locked during the walk */
+ PGWALK_VMA_RDLOCK_VERIFY = 3,
+};
+
/**
* struct mm_walk_ops - callbacks for walk_page_range
* @pgd_entry: if set, called for each non-empty PGD (top-level) entry
@@ -15,21 +27,42 @@ struct mm_walk;
* this handler is required to be able to handle
* pmd_trans_huge() pmds. They may simply choose to
* split_huge_page() instead of handling it explicitly.
- * @pte_entry: if set, called for each non-empty PTE (lowest-level)
- * entry
+ * @pte_entry: if set, called for each PTE (lowest-level) entry
+ * including empty ones, except if @install_pte is set.
+ * If @install_pte is set, @pte_entry is called only for
+ * existing PTEs.
* @pte_hole: if set, called for each hole at all levels,
- * depth is -1 if not known, 0:PGD, 1:P4D, 2:PUD, 3:PMD
- * 4:PTE. Any folded depths (where PTRS_PER_P?D is equal
- * to 1) are skipped.
- * @hugetlb_entry: if set, called for each hugetlb entry
+ * depth is -1 if not known, 0:PGD, 1:P4D, 2:PUD, 3:PMD.
+ * Any folded depths (where PTRS_PER_P?D is equal to 1)
+ * are skipped. If @install_pte is specified, this will
+ * not trigger for any populated ranges.
+ * @hugetlb_entry: if set, called for each hugetlb entry. This hook
+ * function is called with the vma lock held, in order to
+ * protect against a concurrent freeing of the pte_t* or
+ * the ptl. In some cases, the hook function needs to drop
+ * and retake the vma lock in order to avoid deadlocks
+ * while calling other functions. In such cases the hook
+ * function must either refrain from accessing the pte or
+ * ptl after dropping the vma lock, or else revalidate
+ * those items after re-acquiring the vma lock and before
+ * accessing them.
* @test_walk: caller specific callback function to determine whether
* we walk over the current vma or not. Returning 0 means
* "do page table walk over the current vma", returning
* a negative value means "abort current page table walk
* right now" and returning 1 means "skip the current vma"
+ * Note that this callback is not called when the caller
+ * passes in a single VMA as for walk_page_vma().
* @pre_vma: if set, called before starting walk on a non-null vma.
* @post_vma: if set, called after a walk on a non-null vma, provided
* that @pre_vma and the vma walk succeeded.
+ * @install_pte: if set, missing page table entries are installed and
+ * thus all levels are always walked in the specified
+ * range. This callback is then invoked at the PTE level
+ * (having split any THP pages prior), providing the PTE to
+ * install. If allocations fail, the walk is aborted. This
+ * operation is only available for userland memory. Not
+ * usable for hugetlb ranges.
*
* p?d_entry callbacks are called even if those levels are folded on a
* particular architecture/configuration.
@@ -55,6 +88,9 @@ struct mm_walk_ops {
int (*pre_vma)(unsigned long start, unsigned long end,
struct mm_walk *walk);
void (*post_vma)(struct mm_walk *walk);
+ int (*install_pte)(unsigned long addr, unsigned long next,
+ pte_t *ptep, struct mm_walk *walk);
+ enum page_walk_lock walk_lock;
};
/*
@@ -95,14 +131,77 @@ struct mm_walk {
int walk_page_range(struct mm_struct *mm, unsigned long start,
unsigned long end, const struct mm_walk_ops *ops,
void *private);
-int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
- unsigned long end, const struct mm_walk_ops *ops,
- pgd_t *pgd,
- void *private);
+int walk_kernel_page_table_range(unsigned long start,
+ unsigned long end, const struct mm_walk_ops *ops,
+ pgd_t *pgd, void *private);
+int walk_kernel_page_table_range_lockless(unsigned long start,
+ unsigned long end, const struct mm_walk_ops *ops,
+ pgd_t *pgd, void *private);
+int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, const struct mm_walk_ops *ops,
+ void *private);
int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
void *private);
int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
pgoff_t nr, const struct mm_walk_ops *ops,
void *private);
+typedef int __bitwise folio_walk_flags_t;
+
+/*
+ * Walk migration entries as well. Careful: a large folio might get split
+ * concurrently.
+ */
+#define FW_MIGRATION ((__force folio_walk_flags_t)BIT(0))
+
+/* Walk shared zeropages (small + huge) as well. */
+#define FW_ZEROPAGE ((__force folio_walk_flags_t)BIT(1))
+
+enum folio_walk_level {
+ FW_LEVEL_PTE,
+ FW_LEVEL_PMD,
+ FW_LEVEL_PUD,
+};
+
+/**
+ * struct folio_walk - folio_walk_start() / folio_walk_end() data
+ * @page: exact folio page referenced (if applicable)
+ * @level: page table level identifying the entry type
+ * @pte: pointer to the page table entry (FW_LEVEL_PTE).
+ * @pmd: pointer to the page table entry (FW_LEVEL_PMD).
+ * @pud: pointer to the page table entry (FW_LEVEL_PUD).
+ * @ptl: pointer to the page table lock.
+ *
+ * (see folio_walk_start() documentation for more details)
+ */
+struct folio_walk {
+ /* public */
+ struct page *page;
+ enum folio_walk_level level;
+ union {
+ pte_t *ptep;
+ pud_t *pudp;
+ pmd_t *pmdp;
+ };
+ union {
+ pte_t pte;
+ pud_t pud;
+ pmd_t pmd;
+ };
+ /* private */
+ struct vm_area_struct *vma;
+ spinlock_t *ptl;
+};
+
+struct folio *folio_walk_start(struct folio_walk *fw,
+ struct vm_area_struct *vma, unsigned long addr,
+ folio_walk_flags_t flags);
+
+#define folio_walk_end(__fw, __vma) do { \
+ spin_unlock((__fw)->ptl); \
+ if (likely((__fw)->level == FW_LEVEL_PTE)) \
+ pte_unmap((__fw)->ptep); \
+ vma_pgtable_walk_end(__vma); \
+} while (0)
+
#endif /* _LINUX_PAGEWALK_H */
diff --git a/include/linux/panic.h b/include/linux/panic.h
new file mode 100644
index 000000000000..a00bc0937698
--- /dev/null
+++ b/include/linux/panic.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_PANIC_H
+#define _LINUX_PANIC_H
+
+#include <linux/compiler_attributes.h>
+#include <linux/stdarg.h>
+#include <linux/types.h>
+
+struct pt_regs;
+
+extern long (*panic_blink)(int state);
+__printf(1, 2)
+void panic(const char *fmt, ...) __noreturn __cold;
+__printf(1, 0)
+void vpanic(const char *fmt, va_list args) __noreturn __cold;
+void nmi_panic(struct pt_regs *regs, const char *msg);
+void check_panic_on_warn(const char *origin);
+extern void oops_enter(void);
+extern void oops_exit(void);
+extern bool oops_may_print(void);
+
+extern bool panic_triggering_all_cpu_backtrace;
+extern int panic_timeout;
+extern unsigned long panic_print;
+extern int panic_on_oops;
+extern int panic_on_warn;
+
+extern unsigned long panic_on_taint;
+extern bool panic_on_taint_nousertaint;
+
+extern int sysctl_panic_on_stackoverflow;
+
+extern bool crash_kexec_post_notifiers;
+
+extern void __stack_chk_fail(void);
+void abort(void);
+
+/*
+ * panic_cpu is used for synchronizing panic() and crash_kexec() execution. It
+ * holds a CPU number which is executing panic() currently. A value of
+ * PANIC_CPU_INVALID means no CPU has entered panic() or crash_kexec().
+ */
+extern atomic_t panic_cpu;
+#define PANIC_CPU_INVALID -1
+
+bool panic_try_start(void);
+void panic_reset(void);
+bool panic_in_progress(void);
+bool panic_on_this_cpu(void);
+bool panic_on_other_cpu(void);
+
+/*
+ * Only to be used by arch init code. If the user over-wrote the default
+ * CONFIG_PANIC_TIMEOUT, honor it.
+ */
+static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout)
+{
+ if (panic_timeout == arch_default_timeout)
+ panic_timeout = timeout;
+}
+
+/* This cannot be an enum because some may be used in assembly source. */
+#define TAINT_PROPRIETARY_MODULE 0
+#define TAINT_FORCED_MODULE 1
+#define TAINT_CPU_OUT_OF_SPEC 2
+#define TAINT_FORCED_RMMOD 3
+#define TAINT_MACHINE_CHECK 4
+#define TAINT_BAD_PAGE 5
+#define TAINT_USER 6
+#define TAINT_DIE 7
+#define TAINT_OVERRIDDEN_ACPI_TABLE 8
+#define TAINT_WARN 9
+#define TAINT_CRAP 10
+#define TAINT_FIRMWARE_WORKAROUND 11
+#define TAINT_OOT_MODULE 12
+#define TAINT_UNSIGNED_MODULE 13
+#define TAINT_SOFTLOCKUP 14
+#define TAINT_LIVEPATCH 15
+#define TAINT_AUX 16
+#define TAINT_RANDSTRUCT 17
+#define TAINT_TEST 18
+#define TAINT_FWCTL 19
+#define TAINT_FLAGS_COUNT 20
+#define TAINT_FLAGS_MAX ((1UL << TAINT_FLAGS_COUNT) - 1)
+
+struct taint_flag {
+ char c_true; /* character printed when tainted */
+ char c_false; /* character printed when not tainted */
+ const char *desc; /* verbose description of the set taint flag */
+};
+
+extern const struct taint_flag taint_flags[TAINT_FLAGS_COUNT];
+
+enum lockdep_ok {
+ LOCKDEP_STILL_OK,
+ LOCKDEP_NOW_UNRELIABLE,
+};
+
+extern const char *print_tainted(void);
+extern const char *print_tainted_verbose(void);
+extern void add_taint(unsigned flag, enum lockdep_ok);
+extern int test_taint(unsigned flag);
+extern unsigned long get_taint(void);
+
+#endif /* _LINUX_PANIC_H */
diff --git a/include/linux/panic_notifier.h b/include/linux/panic_notifier.h
new file mode 100644
index 000000000000..41e32483d7a7
--- /dev/null
+++ b/include/linux/panic_notifier.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_PANIC_NOTIFIERS_H
+#define _LINUX_PANIC_NOTIFIERS_H
+
+#include <linux/notifier.h>
+#include <linux/types.h>
+
+extern struct atomic_notifier_head panic_notifier_list;
+
+extern bool crash_kexec_post_notifiers;
+
+#endif /* _LINUX_PANIC_NOTIFIERS_H */
diff --git a/include/linux/papr_scm.h b/include/linux/papr_scm.h
new file mode 100644
index 000000000000..eb36453813db
--- /dev/null
+++ b/include/linux/papr_scm.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __LINUX_PAPR_SCM_H
+#define __LINUX_PAPR_SCM_H
+
+/* DIMM health bitmap indicators */
+/* SCM device is unable to persist memory contents */
+#define PAPR_PMEM_UNARMED (1ULL << (63 - 0))
+/* SCM device failed to persist memory contents */
+#define PAPR_PMEM_SHUTDOWN_DIRTY (1ULL << (63 - 1))
+/* SCM device contents are persisted from previous IPL */
+#define PAPR_PMEM_SHUTDOWN_CLEAN (1ULL << (63 - 2))
+/* SCM device contents are not persisted from previous IPL */
+#define PAPR_PMEM_EMPTY (1ULL << (63 - 3))
+/* SCM device memory life remaining is critically low */
+#define PAPR_PMEM_HEALTH_CRITICAL (1ULL << (63 - 4))
+/* SCM device will be garded off next IPL due to failure */
+#define PAPR_PMEM_HEALTH_FATAL (1ULL << (63 - 5))
+/* SCM contents cannot persist due to current platform health status */
+#define PAPR_PMEM_HEALTH_UNHEALTHY (1ULL << (63 - 6))
+/* SCM device is unable to persist memory contents in certain conditions */
+#define PAPR_PMEM_HEALTH_NON_CRITICAL (1ULL << (63 - 7))
+/* SCM device is encrypted */
+#define PAPR_PMEM_ENCRYPTED (1ULL << (63 - 8))
+/* SCM device has been scrubbed and locked */
+#define PAPR_PMEM_SCRUBBED_AND_LOCKED (1ULL << (63 - 9))
+
+#define PAPR_PMEM_SAVE_FAILED (1ULL << (63 - 10))
+
+/* Bits status indicators for health bitmap indicating unarmed dimm */
+#define PAPR_PMEM_UNARMED_MASK (PAPR_PMEM_UNARMED | \
+ PAPR_PMEM_HEALTH_UNHEALTHY)
+
+/* Bits status indicators for health bitmap indicating unflushed dimm */
+#define PAPR_PMEM_BAD_SHUTDOWN_MASK (PAPR_PMEM_SHUTDOWN_DIRTY)
+
+/* Bits status indicators for health bitmap indicating unrestored dimm */
+#define PAPR_PMEM_BAD_RESTORE_MASK (PAPR_PMEM_EMPTY)
+
+/* Bit status indicators for smart event notification */
+#define PAPR_PMEM_SMART_EVENT_MASK (PAPR_PMEM_HEALTH_CRITICAL | \
+ PAPR_PMEM_HEALTH_FATAL | \
+ PAPR_PMEM_HEALTH_UNHEALTHY)
+
+#define PAPR_PMEM_SAVE_MASK (PAPR_PMEM_SAVE_FAILED)
+
+#define PAPR_SCM_PERF_STATS_EYECATCHER __stringify(SCMSTATS)
+#define PAPR_SCM_PERF_STATS_VERSION 0x1
+
+#endif /* __LINUX_PAPR_SCM_H */
diff --git a/include/linux/parport.h b/include/linux/parport.h
index 1c16ffb8b908..464c2ad28039 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -40,10 +40,6 @@ struct amiga_parport_state {
unsigned char statusdir;/* ciab.ddrb & 7 */
};
-struct ax88796_parport_state {
- unsigned char cpr;
-};
-
struct ip32_parport_state {
unsigned int dcr;
unsigned int ecr;
@@ -55,7 +51,6 @@ struct parport_state {
/* ARC has no state. */
struct ax_parport_state ax;
struct amiga_parport_state amiga;
- struct ax88796_parport_state ax88796;
/* Atari has not state. */
struct ip32_parport_state ip32;
void *misc;
@@ -257,13 +252,10 @@ struct parport {
struct parport_driver {
const char *name;
- void (*attach) (struct parport *);
void (*detach) (struct parport *);
void (*match_port)(struct parport *);
int (*probe)(struct pardevice *);
struct device_driver driver;
- bool devmodel;
- struct list_head list;
};
#define to_parport_driver(n) container_of(n, struct parport_driver, driver)
@@ -306,9 +298,6 @@ int __must_check __parport_register_driver(struct parport_driver *,
* to receive notifications about ports being found in the
* system, as well as ports no longer available.
*
- * If devmodel is true then the new device model is used
- * for registration.
- *
* The @driver structure is allocated by the caller and must not be
* deallocated until after calling parport_unregister_driver().
*
@@ -519,7 +508,7 @@ extern int parport_device_proc_register(struct pardevice *device);
extern int parport_device_proc_unregister(struct pardevice *device);
/* If PC hardware is the only type supported, we can optimise a bit. */
-#if !defined(CONFIG_PARPORT_NOT_PC)
+#if !defined(CONFIG_PARPORT_NOT_PC) && defined(CONFIG_PARPORT_PC)
#include <linux/parport_pc.h>
#define parport_write_data(p,x) parport_pc_write_data(p,x)
diff --git a/include/linux/parport_pc.h b/include/linux/parport_pc.h
index 3d6fc576d6a1..f1ec5c10c3b3 100644
--- a/include/linux/parport_pc.h
+++ b/include/linux/parport_pc.h
@@ -26,6 +26,9 @@ struct parport_pc_private {
/* Whether or not there's an ECR. */
int ecr;
+ /* Bitmask of writable ECR bits. */
+ unsigned char ecr_writable;
+
/* Number of PWords that FIFO will hold. */
int fifo_depth;
diff --git a/include/linux/part_stat.h b/include/linux/part_stat.h
index d2558121d48c..729415e91215 100644
--- a/include/linux/part_stat.h
+++ b/include/linux/part_stat.h
@@ -2,7 +2,8 @@
#ifndef _LINUX_PART_STAT_H
#define _LINUX_PART_STAT_H
-#include <linux/genhd.h>
+#include <linux/blkdev.h>
+#include <asm/local.h>
struct disk_stats {
u64 nsecs[NR_STAT_GROUPS];
@@ -16,8 +17,8 @@ struct disk_stats {
/*
* Macros to operate on percpu disk statistics:
*
- * {disk|part|all}_stat_{add|sub|inc|dec}() modify the stat counters and should
- * be called between disk_stat_lock() and disk_stat_unlock().
+ * part_stat_{add|sub|inc|dec}() modify the stat counters and should
+ * be called between part_stat_lock() and part_stat_unlock().
*
* part_stat_read() can be called at any time.
*/
@@ -32,7 +33,7 @@ struct disk_stats {
#define part_stat_read(part, field) \
({ \
- typeof((part)->bd_stats->field) res = 0; \
+ TYPEOF_UNQUAL((part)->bd_stats->field) res = 0; \
unsigned int _cpu; \
for_each_possible_cpu(_cpu) \
res += per_cpu_ptr((part)->bd_stats, _cpu)->field; \
@@ -58,7 +59,7 @@ static inline void part_stat_set_all(struct block_device *part, int value)
#define part_stat_add(part, field, addnd) do { \
__part_stat_add((part), field, addnd); \
- if ((part)->bd_partno) \
+ if (bdev_is_partition(part)) \
__part_stat_add(bdev_whole(part), field, addnd); \
} while (0)
@@ -78,4 +79,6 @@ static inline void part_stat_set_all(struct block_device *part, int value)
#define part_stat_local_read_cpu(part, field, cpu) \
local_read(&(part_stat_get_cpu(part, field, cpu)))
+unsigned int bdev_count_inflight(struct block_device *part);
+
#endif /* _LINUX_PART_STAT_H */
diff --git a/include/linux/path.h b/include/linux/path.h
index 475225a03d0d..7ea389dc764b 100644
--- a/include/linux/path.h
+++ b/include/linux/path.h
@@ -18,10 +18,13 @@ static inline int path_equal(const struct path *path1, const struct path *path2)
return path1->mnt == path2->mnt && path1->dentry == path2->dentry;
}
-static inline void path_put_init(struct path *path)
-{
- path_put(path);
- *path = (struct path) { };
-}
+/*
+ * Cleanup macro for use with __free(path_put). Avoids dereference and
+ * copying @path unlike DEFINE_FREE(). path_put() will handle the empty
+ * path correctly just ensure @path is initialized:
+ *
+ * struct path path __free(path_put) = {};
+ */
+#define __free_path_put path_put
#endif /* _LINUX_PATH_H */
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 5ba475ca9078..078225b514d4 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -84,6 +84,14 @@ extern struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root,
void acpi_pci_add_bus(struct pci_bus *bus);
void acpi_pci_remove_bus(struct pci_bus *bus);
+#ifdef CONFIG_PCI
+void pci_acpi_setup(struct device *dev, struct acpi_device *adev);
+void pci_acpi_cleanup(struct device *dev, struct acpi_device *adev);
+#else
+static inline void pci_acpi_setup(struct device *dev, struct acpi_device *adev) {}
+static inline void pci_acpi_cleanup(struct device *dev, struct acpi_device *adev) {}
+#endif
+
#ifdef CONFIG_ACPI_PCI_SLOT
void acpi_pci_slot_init(void);
void acpi_pci_slot_enumerate(struct pci_bus *bus);
@@ -122,6 +130,9 @@ static inline void pci_acpi_add_edr_notifier(struct pci_dev *pdev) { }
static inline void pci_acpi_remove_edr_notifier(struct pci_dev *pdev) { }
#endif /* CONFIG_PCIE_EDR */
+int pci_acpi_set_companion_lookup_hook(struct acpi_device *(*func)(struct pci_dev *));
+void pci_acpi_clear_companion_lookup_hook(void);
+
#else /* CONFIG_ACPI */
static inline void acpi_pci_add_bus(struct pci_bus *bus) { }
static inline void acpi_pci_remove_bus(struct pci_bus *bus) { }
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h
index df54cd5b15db..75c6c86cf09d 100644
--- a/include/linux/pci-ats.h
+++ b/include/linux/pci-ats.h
@@ -8,6 +8,7 @@
/* Address Translation Service */
bool pci_ats_supported(struct pci_dev *dev);
int pci_enable_ats(struct pci_dev *dev, int ps);
+int pci_prepare_ats(struct pci_dev *dev, int ps);
void pci_disable_ats(struct pci_dev *dev);
int pci_ats_queue_depth(struct pci_dev *dev);
int pci_ats_page_aligned(struct pci_dev *dev);
@@ -16,6 +17,8 @@ static inline bool pci_ats_supported(struct pci_dev *d)
{ return false; }
static inline int pci_enable_ats(struct pci_dev *d, int ps)
{ return -ENODEV; }
+static inline int pci_prepare_ats(struct pci_dev *dev, int ps)
+{ return -ENODEV; }
static inline void pci_disable_ats(struct pci_dev *d) { }
static inline int pci_ats_queue_depth(struct pci_dev *d)
{ return -ENODEV; }
@@ -39,6 +42,7 @@ int pci_enable_pasid(struct pci_dev *pdev, int features);
void pci_disable_pasid(struct pci_dev *pdev);
int pci_pasid_features(struct pci_dev *pdev);
int pci_max_pasids(struct pci_dev *pdev);
+int pci_pasid_status(struct pci_dev *pdev);
#else /* CONFIG_PCI_PASID */
static inline int pci_enable_pasid(struct pci_dev *pdev, int features)
{ return -EINVAL; }
@@ -47,6 +51,8 @@ static inline int pci_pasid_features(struct pci_dev *pdev)
{ return -EINVAL; }
static inline int pci_max_pasids(struct pci_dev *pdev)
{ return -EINVAL; }
+static inline int pci_pasid_status(struct pci_dev *pdev)
+{ return -EINVAL; }
#endif /* CONFIG_PCI_PASID */
#endif /* LINUX_PCI_ATS_H */
diff --git a/include/linux/pci-bwctrl.h b/include/linux/pci-bwctrl.h
new file mode 100644
index 000000000000..cee07127455b
--- /dev/null
+++ b/include/linux/pci-bwctrl.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * PCIe bandwidth controller
+ *
+ * Copyright (C) 2023-2024 Intel Corporation
+ */
+
+#ifndef LINUX_PCI_BWCTRL_H
+#define LINUX_PCI_BWCTRL_H
+
+#include <linux/pci.h>
+
+struct thermal_cooling_device;
+
+#ifdef CONFIG_PCIE_THERMAL
+struct thermal_cooling_device *pcie_cooling_device_register(struct pci_dev *port);
+void pcie_cooling_device_unregister(struct thermal_cooling_device *cdev);
+#else
+static inline struct thermal_cooling_device *pcie_cooling_device_register(struct pci_dev *port)
+{
+ return NULL;
+}
+static inline void pcie_cooling_device_unregister(struct thermal_cooling_device *cdev)
+{
+}
+#endif
+
+#endif
diff --git a/include/linux/pci-dma-compat.h b/include/linux/pci-dma-compat.h
deleted file mode 100644
index 249d4d7fbf18..000000000000
--- a/include/linux/pci-dma-compat.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* include this file if the platform implements the dma_ DMA Mapping API
- * and wants to provide the pci_ DMA Mapping API in terms of it */
-
-#ifndef _ASM_GENERIC_PCI_DMA_COMPAT_H
-#define _ASM_GENERIC_PCI_DMA_COMPAT_H
-
-#include <linux/dma-mapping.h>
-
-/* This defines the direction arg to the DMA mapping routines. */
-#define PCI_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL
-#define PCI_DMA_TODEVICE DMA_TO_DEVICE
-#define PCI_DMA_FROMDEVICE DMA_FROM_DEVICE
-#define PCI_DMA_NONE DMA_NONE
-
-static inline void *
-pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
- dma_addr_t *dma_handle)
-{
- return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC);
-}
-
-static inline void *
-pci_zalloc_consistent(struct pci_dev *hwdev, size_t size,
- dma_addr_t *dma_handle)
-{
- return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC);
-}
-
-static inline void
-pci_free_consistent(struct pci_dev *hwdev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
-{
- dma_free_coherent(&hwdev->dev, size, vaddr, dma_handle);
-}
-
-static inline dma_addr_t
-pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
-{
- return dma_map_single(&hwdev->dev, ptr, size, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
- size_t size, int direction)
-{
- dma_unmap_single(&hwdev->dev, dma_addr, size, (enum dma_data_direction)direction);
-}
-
-static inline dma_addr_t
-pci_map_page(struct pci_dev *hwdev, struct page *page,
- unsigned long offset, size_t size, int direction)
-{
- return dma_map_page(&hwdev->dev, page, offset, size, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
- size_t size, int direction)
-{
- dma_unmap_page(&hwdev->dev, dma_address, size, (enum dma_data_direction)direction);
-}
-
-static inline int
-pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
- int nents, int direction)
-{
- return dma_map_sg(&hwdev->dev, sg, nents, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
- int nents, int direction)
-{
- dma_unmap_sg(&hwdev->dev, sg, nents, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle,
- size_t size, int direction)
-{
- dma_sync_single_for_cpu(&hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
- size_t size, int direction)
-{
- dma_sync_single_for_device(&hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg,
- int nelems, int direction)
-{
- dma_sync_sg_for_cpu(&hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
-}
-
-static inline void
-pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
- int nelems, int direction)
-{
- dma_sync_sg_for_device(&hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
-}
-
-static inline int
-pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr)
-{
- return dma_mapping_error(&pdev->dev, dma_addr);
-}
-
-#ifdef CONFIG_PCI
-static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
-{
- return dma_set_mask(&dev->dev, mask);
-}
-
-static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
-{
- return dma_set_coherent_mask(&dev->dev, mask);
-}
-#else
-static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
-{ return -EIO; }
-static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
-{ return -EIO; }
-#endif
-
-#endif
diff --git a/include/linux/pci-doe.h b/include/linux/pci-doe.h
new file mode 100644
index 000000000000..bd4346a7c4e7
--- /dev/null
+++ b/include/linux/pci-doe.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Data Object Exchange
+ * PCIe r6.0, sec 6.30 DOE
+ *
+ * Copyright (C) 2021 Huawei
+ * Jonathan Cameron <Jonathan.Cameron@huawei.com>
+ *
+ * Copyright (C) 2022 Intel Corporation
+ * Ira Weiny <ira.weiny@intel.com>
+ */
+
+#ifndef LINUX_PCI_DOE_H
+#define LINUX_PCI_DOE_H
+
+struct pci_doe_mb;
+
+#define PCI_DOE_FEATURE_DISCOVERY 0
+#define PCI_DOE_FEATURE_CMA 1
+#define PCI_DOE_FEATURE_SSESSION 2
+
+struct pci_doe_mb *pci_find_doe_mailbox(struct pci_dev *pdev, u16 vendor,
+ u8 type);
+
+int pci_doe(struct pci_doe_mb *doe_mb, u16 vendor, u8 type,
+ const void *request, size_t request_sz,
+ void *response, size_t response_sz);
+
+#endif
diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
index fbdadd4d8377..d930651473b4 100644
--- a/include/linux/pci-ecam.h
+++ b/include/linux/pci-ecam.h
@@ -45,6 +45,10 @@ struct pci_ecam_ops {
unsigned int bus_shift;
struct pci_ops pci_ops;
int (*init)(struct pci_config_window *);
+ int (*enable_device)(struct pci_host_bridge *,
+ struct pci_dev *);
+ void (*disable_device)(struct pci_host_bridge *,
+ struct pci_dev *);
};
/*
@@ -55,6 +59,7 @@ struct pci_ecam_ops {
struct pci_config_window {
struct resource res;
struct resource busr;
+ unsigned int bus_shift;
void *priv;
const struct pci_ecam_ops *ops;
union {
@@ -86,11 +91,6 @@ extern const struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 *
extern const struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */
extern const struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */
extern const struct pci_ecam_ops tegra194_pcie_ops; /* Tegra194 PCIe */
-#endif
-
-#if IS_ENABLED(CONFIG_PCI_HOST_COMMON)
-/* for DT-based PCI controllers that support ECAM */
-int pci_host_common_probe(struct platform_device *pdev);
-int pci_host_common_remove(struct platform_device *pdev);
+extern const struct pci_ecam_ops loongson_pci_ecam_ops; /* Loongson PCIe */
#endif
#endif
diff --git a/include/linux/pci-ep-cfs.h b/include/linux/pci-ep-cfs.h
index 662881335c7e..3e2140d7e31d 100644
--- a/include/linux/pci-ep-cfs.h
+++ b/include/linux/pci-ep-cfs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0+ */
-/**
+/*
* PCI Endpoint ConfigFS header file
*
* Copyright (C) 2017 Texas Instruments
diff --git a/include/linux/pci-ep-msi.h b/include/linux/pci-ep-msi.h
new file mode 100644
index 000000000000..7c5db90f9620
--- /dev/null
+++ b/include/linux/pci-ep-msi.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * PCI Endpoint *Function* side MSI header file
+ *
+ * Copyright (C) 2024 NXP
+ * Author: Frank Li <Frank.Li@nxp.com>
+ */
+
+#ifndef __PCI_EP_MSI__
+#define __PCI_EP_MSI__
+
+struct pci_epf;
+
+#ifdef CONFIG_PCI_ENDPOINT_MSI_DOORBELL
+int pci_epf_alloc_doorbell(struct pci_epf *epf, u16 nums);
+void pci_epf_free_doorbell(struct pci_epf *epf);
+#else
+static inline int pci_epf_alloc_doorbell(struct pci_epf *epf, u16 nums)
+{
+ return -ENODATA;
+}
+
+static inline void pci_epf_free_doorbell(struct pci_epf *epf)
+{
+}
+#endif /* CONFIG_GENERIC_MSI_IRQ */
+
+#endif /* __PCI_EP_MSI__ */
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index b82c9b100e97..4286bfdbfdfa 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/**
+/*
* PCI Endpoint *Controller* (EPC) header file
*
* Copyright (C) 2017 Texas Instruments
@@ -19,13 +19,6 @@ enum pci_epc_interface_type {
SECONDARY_INTERFACE,
};
-enum pci_epc_irq_type {
- PCI_EPC_IRQ_UNKNOWN,
- PCI_EPC_IRQ_LEGACY,
- PCI_EPC_IRQ_MSI,
- PCI_EPC_IRQ_MSIX,
-};
-
static inline const char *
pci_epc_interface_string(enum pci_epc_interface_type type)
{
@@ -40,10 +33,42 @@ pci_epc_interface_string(enum pci_epc_interface_type type)
}
/**
+ * struct pci_epc_map - information about EPC memory for mapping a RC PCI
+ * address range
+ * @pci_addr: start address of the RC PCI address range to map
+ * @pci_size: size of the RC PCI address range mapped from @pci_addr
+ * @map_pci_addr: RC PCI address used as the first address mapped (may be lower
+ * than @pci_addr)
+ * @map_size: size of the controller memory needed for mapping the RC PCI address
+ * range @map_pci_addr..@pci_addr+@pci_size
+ * @phys_base: base physical address of the allocated EPC memory for mapping the
+ * RC PCI address range
+ * @phys_addr: physical address at which @pci_addr is mapped
+ * @virt_base: base virtual address of the allocated EPC memory for mapping the
+ * RC PCI address range
+ * @virt_addr: virtual address at which @pci_addr is mapped
+ */
+struct pci_epc_map {
+ u64 pci_addr;
+ size_t pci_size;
+
+ u64 map_pci_addr;
+ size_t map_size;
+
+ phys_addr_t phys_base;
+ phys_addr_t phys_addr;
+ void __iomem *virt_base;
+ void __iomem *virt_addr;
+};
+
+/**
* struct pci_epc_ops - set of function pointers for performing EPC operations
* @write_header: ops to populate configuration space header
* @set_bar: ops to configure the BAR
* @clear_bar: ops to reset the BAR
+ * @align_addr: operation to get the mapping address, mapping size and offset
+ * into a controller memory window needed to map an RC PCI address
+ * region
* @map_addr: ops to map CPU address to PCI address
* @unmap_addr: ops to unmap CPU address and PCI address
* @set_msi: ops to set the requested number of MSI interrupts in the MSI
@@ -58,34 +83,38 @@ pci_epc_interface_string(enum pci_epc_interface_type type)
* @map_msi_irq: ops to map physical address to MSI address and return MSI data
* @start: ops to start the PCI link
* @stop: ops to stop the PCI link
+ * @get_features: ops to get the features supported by the EPC
* @owner: the module owner containing the ops
*/
struct pci_epc_ops {
- int (*write_header)(struct pci_epc *epc, u8 func_no,
+ int (*write_header)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_header *hdr);
- int (*set_bar)(struct pci_epc *epc, u8 func_no,
+ int (*set_bar)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar);
- void (*clear_bar)(struct pci_epc *epc, u8 func_no,
+ void (*clear_bar)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar);
- int (*map_addr)(struct pci_epc *epc, u8 func_no,
+ u64 (*align_addr)(struct pci_epc *epc, u64 pci_addr, size_t *size,
+ size_t *offset);
+ int (*map_addr)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t addr, u64 pci_addr, size_t size);
- void (*unmap_addr)(struct pci_epc *epc, u8 func_no,
+ void (*unmap_addr)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t addr);
- int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 interrupts);
- int (*get_msi)(struct pci_epc *epc, u8 func_no);
- int (*set_msix)(struct pci_epc *epc, u8 func_no, u16 interrupts,
- enum pci_barno, u32 offset);
- int (*get_msix)(struct pci_epc *epc, u8 func_no);
- int (*raise_irq)(struct pci_epc *epc, u8 func_no,
- enum pci_epc_irq_type type, u16 interrupt_num);
- int (*map_msi_irq)(struct pci_epc *epc, u8 func_no,
+ int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u8 nr_irqs);
+ int (*get_msi)(struct pci_epc *epc, u8 func_no, u8 vfunc_no);
+ int (*set_msix)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u16 nr_irqs, enum pci_barno, u32 offset);
+ int (*get_msix)(struct pci_epc *epc, u8 func_no, u8 vfunc_no);
+ int (*raise_irq)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ unsigned int type, u16 interrupt_num);
+ int (*map_msi_irq)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t phys_addr, u8 interrupt_num,
u32 entry_size, u32 *msi_data,
u32 *msi_addr_offset);
int (*start)(struct pci_epc *epc);
void (*stop)(struct pci_epc *epc);
const struct pci_epc_features* (*get_features)(struct pci_epc *epc,
- u8 func_no);
+ u8 func_no, u8 vfunc_no);
struct module *owner;
};
@@ -120,6 +149,7 @@ struct pci_epc_mem {
* struct pci_epc - represents the PCI EPC device
* @dev: PCI EPC device
* @pci_epf: list of endpoint functions present in this EPC device
+ * @list_lock: Mutex for protecting pci_epf list
* @ops: function pointers for performing endpoint operations
* @windows: array of address space of the endpoint controller
* @mem: first window of the endpoint controller, which corresponds to
@@ -127,24 +157,67 @@ struct pci_epc_mem {
* single window.
* @num_windows: number of windows supported by device
* @max_functions: max number of functions that can be configured in this EPC
+ * @max_vfs: Array indicating the maximum number of virtual functions that can
+ * be associated with each physical function
* @group: configfs group representing the PCI EPC device
* @lock: mutex to protect pci_epc ops
* @function_num_map: bitmap to manage physical function number
- * @notifier: used to notify EPF of any EPC events (like linkup)
+ * @domain_nr: PCI domain number of the endpoint controller
+ * @init_complete: flag to indicate whether the EPC initialization is complete
+ * or not
*/
struct pci_epc {
struct device dev;
struct list_head pci_epf;
+ struct mutex list_lock;
const struct pci_epc_ops *ops;
struct pci_epc_mem **windows;
struct pci_epc_mem *mem;
unsigned int num_windows;
u8 max_functions;
+ u8 *max_vfs;
struct config_group *group;
/* mutex to protect against concurrent access of EP controller */
struct mutex lock;
unsigned long function_num_map;
- struct atomic_notifier_head notifier;
+ int domain_nr;
+ bool init_complete;
+};
+
+/**
+ * enum pci_epc_bar_type - configurability of endpoint BAR
+ * @BAR_PROGRAMMABLE: The BAR mask can be configured by the EPC.
+ * @BAR_FIXED: The BAR mask is fixed by the hardware.
+ * @BAR_RESIZABLE: The BAR implements the PCI-SIG Resizable BAR Capability.
+ * NOTE: An EPC driver can currently only set a single supported
+ * size.
+ * @BAR_RESERVED: The BAR should not be touched by an EPF driver.
+ */
+enum pci_epc_bar_type {
+ BAR_PROGRAMMABLE = 0,
+ BAR_FIXED,
+ BAR_RESIZABLE,
+ BAR_RESERVED,
+};
+
+/**
+ * struct pci_epc_bar_desc - hardware description for a BAR
+ * @type: the type of the BAR
+ * @fixed_size: the fixed size, only applicable if type is BAR_FIXED_MASK.
+ * @only_64bit: if true, an EPF driver is not allowed to choose if this BAR
+ * should be configured as 32-bit or 64-bit, the EPF driver must
+ * configure this BAR as 64-bit. Additionally, the BAR succeeding
+ * this BAR must be set to type BAR_RESERVED.
+ *
+ * only_64bit should not be set on a BAR of type BAR_RESERVED.
+ * (If BARx is a 64-bit BAR that an EPF driver is not allowed to
+ * touch, then both BARx and BARx+1 must be set to type
+ * BAR_RESERVED.)
+ */
+struct pci_epc_bar_desc {
+ enum pci_epc_bar_type type;
+ u64 fixed_size;
+ bool only_64bit;
};
/**
@@ -152,24 +225,23 @@ struct pci_epc {
* @linkup_notifier: indicate if the EPC device can notify EPF driver on link up
* @msi_capable: indicate if the endpoint function has MSI capability
* @msix_capable: indicate if the endpoint function has MSI-X capability
- * @reserved_bar: bitmap to indicate reserved BAR unavailable to function driver
- * @bar_fixed_64bit: bitmap to indicate fixed 64bit BARs
- * @bar_fixed_size: Array specifying the size supported by each BAR
+ * @intx_capable: indicate if the endpoint can raise INTx interrupts
+ * @bar: array specifying the hardware description for each BAR
* @align: alignment size required for BAR buffer allocation
*/
struct pci_epc_features {
unsigned int linkup_notifier : 1;
- unsigned int core_init_notifier : 1;
unsigned int msi_capable : 1;
unsigned int msix_capable : 1;
- u8 reserved_bar;
- u8 bar_fixed_64bit;
- u64 bar_fixed_size[PCI_STD_NUM_BARS];
+ unsigned int intx_capable : 1;
+ struct pci_epc_bar_desc bar[PCI_STD_NUM_BARS];
size_t align;
};
#define to_pci_epc(device) container_of((device), struct pci_epc, dev)
+#ifdef CONFIG_PCI_ENDPOINT
+
#define pci_epc_create(dev, ops) \
__pci_epc_create((dev), (ops), THIS_MODULE)
#define devm_pci_epc_create(dev, ops) \
@@ -185,51 +257,49 @@ static inline void *epc_get_drvdata(struct pci_epc *epc)
return dev_get_drvdata(&epc->dev);
}
-static inline int
-pci_epc_register_notifier(struct pci_epc *epc, struct notifier_block *nb)
-{
- return atomic_notifier_chain_register(&epc->notifier, nb);
-}
-
struct pci_epc *
__devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
struct module *owner);
struct pci_epc *
__pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
struct module *owner);
-void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc);
void pci_epc_destroy(struct pci_epc *epc);
int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
enum pci_epc_interface_type type);
void pci_epc_linkup(struct pci_epc *epc);
+void pci_epc_linkdown(struct pci_epc *epc);
void pci_epc_init_notify(struct pci_epc *epc);
+void pci_epc_notify_pending_init(struct pci_epc *epc, struct pci_epf *epf);
+void pci_epc_deinit_notify(struct pci_epc *epc);
+void pci_epc_bus_master_enable_notify(struct pci_epc *epc);
void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
enum pci_epc_interface_type type);
-int pci_epc_write_header(struct pci_epc *epc, u8 func_no,
+int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_header *hdr);
-int pci_epc_set_bar(struct pci_epc *epc, u8 func_no,
+int pci_epc_bar_size_to_rebar_cap(size_t size, u32 *cap);
+int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar);
-void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no,
+void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar);
-int pci_epc_map_addr(struct pci_epc *epc, u8 func_no,
+int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t phys_addr,
u64 pci_addr, size_t size);
-void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
+void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t phys_addr);
-int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts);
-int pci_epc_get_msi(struct pci_epc *epc, u8 func_no);
-int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,
+int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 nr_irqs);
+int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no);
+int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u16 nr_irqs,
enum pci_barno, u32 offset);
-int pci_epc_get_msix(struct pci_epc *epc, u8 func_no);
-int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no,
+int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no);
+int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t phys_addr, u8 interrupt_num,
u32 entry_size, u32 *msi_data, u32 *msi_addr_offset);
-int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
- enum pci_epc_irq_type type, u16 interrupt_num);
+int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ unsigned int type, u16 interrupt_num);
int pci_epc_start(struct pci_epc *epc);
void pci_epc_stop(struct pci_epc *epc);
const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
- u8 func_no);
+ u8 func_no, u8 vfunc_no);
enum pci_barno
pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features);
enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
@@ -247,4 +317,18 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
phys_addr_t *phys_addr, size_t size);
void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr,
void __iomem *virt_addr, size_t size);
+int pci_epc_mem_map(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u64 pci_addr, size_t pci_size, struct pci_epc_map *map);
+void pci_epc_mem_unmap(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ struct pci_epc_map *map);
+
+#else
+static inline void pci_epc_init_notify(struct pci_epc *epc)
+{
+}
+
+static inline void pci_epc_deinit_notify(struct pci_epc *epc)
+{
+}
+#endif /* CONFIG_PCI_ENDPOINT */
#endif /* __LINUX_PCI_EPC_H */
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index 6833e2160ef1..48f68c4dcfa5 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/**
+/*
* PCI Endpoint *Function* (EPF) header file
*
* Copyright (C) 2017 Texas Instruments
@@ -12,16 +12,13 @@
#include <linux/configfs.h>
#include <linux/device.h>
#include <linux/mod_devicetable.h>
+#include <linux/msi.h>
#include <linux/pci.h>
struct pci_epf;
+struct pci_epc_features;
enum pci_epc_interface_type;
-enum pci_notify_event {
- CORE_INIT,
- LINK_UP,
-};
-
enum pci_barno {
NO_BAR = -1,
BAR_0,
@@ -42,7 +39,7 @@ enum pci_barno {
* @baseclass_code: broadly classifies the type of function the device performs
* @cache_line_size: specifies the system cacheline size in units of DWORDs
* @subsys_vendor_id: vendor of the add-in card or subsystem
- * @subsys_id: id specific to vendor
+ * @subsys_id: ID specific to vendor
* @interrupt_pin: interrupt pin the device (or device function) uses
*/
struct pci_epf_header {
@@ -63,7 +60,7 @@ struct pci_epf_header {
* @bind: ops to perform when a EPC device has been bound to EPF device
* @unbind: ops to perform when a binding has been lost between a EPC device
* and EPF device
- * @add_cfs: ops to initialize function specific configfs attributes
+ * @add_cfs: ops to initialize function-specific configfs attributes
*/
struct pci_epf_ops {
int (*bind)(struct pci_epf *epf);
@@ -73,6 +70,22 @@ struct pci_epf_ops {
};
/**
+ * struct pci_epc_event_ops - Callbacks for capturing the EPC events
+ * @epc_init: Callback for the EPC initialization complete event
+ * @epc_deinit: Callback for the EPC deinitialization event
+ * @link_up: Callback for the EPC link up event
+ * @link_down: Callback for the EPC link down event
+ * @bus_master_enable: Callback for the EPC Bus Master Enable event
+ */
+struct pci_epc_event_ops {
+ int (*epc_init)(struct pci_epf *epf);
+ void (*epc_deinit)(struct pci_epf *epf);
+ int (*link_up)(struct pci_epf *epf);
+ int (*link_down)(struct pci_epf *epf);
+ int (*bus_master_enable)(struct pci_epf *epf);
+};
+
+/**
* struct pci_epf_driver - represents the PCI EPF driver
* @probe: ops to perform when a new EPF device has been bound to the EPF driver
* @remove: ops to perform when the binding between the EPF device and EPF
@@ -84,45 +97,63 @@ struct pci_epf_ops {
* @id_table: identifies EPF devices for probing
*/
struct pci_epf_driver {
- int (*probe)(struct pci_epf *epf);
- int (*remove)(struct pci_epf *epf);
+ int (*probe)(struct pci_epf *epf,
+ const struct pci_epf_device_id *id);
+ void (*remove)(struct pci_epf *epf);
struct device_driver driver;
- struct pci_epf_ops *ops;
+ const struct pci_epf_ops *ops;
struct module *owner;
struct list_head epf_group;
const struct pci_epf_device_id *id_table;
};
-#define to_pci_epf_driver(drv) (container_of((drv), struct pci_epf_driver, \
- driver))
+#define to_pci_epf_driver(drv) container_of_const((drv), struct pci_epf_driver, driver)
/**
* struct pci_epf_bar - represents the BAR of EPF device
* @phys_addr: physical address that should be mapped to the BAR
* @addr: virtual address corresponding to the @phys_addr
* @size: the size of the address space present in BAR
+ * @mem_size: the size actually allocated to accommodate the iATU alignment
+ * requirement
+ * @barno: BAR number
+ * @flags: flags that are set for the BAR
*/
struct pci_epf_bar {
dma_addr_t phys_addr;
void *addr;
size_t size;
+ size_t mem_size;
enum pci_barno barno;
int flags;
};
/**
+ * struct pci_epf_doorbell_msg - represents doorbell message
+ * @msg: MSI message
+ * @virq: IRQ number of this doorbell MSI message
+ */
+struct pci_epf_doorbell_msg {
+ struct msi_msg msg;
+ int virq;
+};
+
+/**
* struct pci_epf - represents the PCI EPF device
* @dev: the PCI EPF device
* @name: the name of the PCI EPF device
* @header: represents standard configuration header
* @bar: represents the BAR of EPF device
* @msi_interrupts: number of MSI interrupts required by this function
- * @func_no: unique function number within this endpoint device
+ * @msix_interrupts: number of MSI-X interrupts required by this function
+ * @func_no: unique (physical) function number within this endpoint device
+ * @vfunc_no: unique virtual function number within a physical function
* @epc: the EPC device to which this EPF device is bound
+ * @epf_pf: the physical EPF device to which this virtual EPF device is bound
* @driver: the EPF driver to which this EPF device is bound
+ * @id: pointer to the EPF device ID
* @list: to add pci_epf as a list of PCI endpoint functions to pci_epc
- * @nb: notifier block to notify EPF of any EPC events (like linkup)
* @lock: mutex to protect pci_epf_ops
* @sec_epc: the secondary EPC device to which this EPF device is bound
* @sec_epc_list: to add pci_epf as list of PCI endpoint functions to secondary
@@ -130,37 +161,54 @@ struct pci_epf_bar {
* @sec_epc_bar: represents the BAR of EPF device associated with secondary EPC
* @sec_epc_func_no: unique (physical) function number within the secondary EPC
* @group: configfs group associated with the EPF device
+ * @is_bound: indicates if bind notification to function driver has been invoked
+ * @is_vf: true - virtual function, false - physical function
+ * @vfunction_num_map: bitmap to manage virtual function number
+ * @pci_vepf: list of virtual endpoint functions associated with this function
+ * @event_ops: callbacks for capturing the EPC events
+ * @db_msg: data for MSI from RC side
+ * @num_db: number of doorbells
*/
struct pci_epf {
struct device dev;
const char *name;
struct pci_epf_header *header;
- struct pci_epf_bar bar[6];
+ struct pci_epf_bar bar[PCI_STD_NUM_BARS];
u8 msi_interrupts;
u16 msix_interrupts;
u8 func_no;
+ u8 vfunc_no;
struct pci_epc *epc;
+ struct pci_epf *epf_pf;
struct pci_epf_driver *driver;
+ const struct pci_epf_device_id *id;
struct list_head list;
- struct notifier_block nb;
/* mutex to protect against concurrent access of pci_epf_ops */
struct mutex lock;
/* Below members are to attach secondary EPC to an endpoint function */
struct pci_epc *sec_epc;
struct list_head sec_epc_list;
- struct pci_epf_bar sec_epc_bar[6];
+ struct pci_epf_bar sec_epc_bar[PCI_STD_NUM_BARS];
u8 sec_epc_func_no;
struct config_group *group;
+ unsigned int is_bound;
+ unsigned int is_vf;
+ unsigned long vfunction_num_map;
+ struct list_head pci_vepf;
+ const struct pci_epc_event_ops *event_ops;
+ struct pci_epf_doorbell_msg *db_msg;
+ u16 num_db;
};
/**
- * struct pci_epf_msix_tbl - represents the MSIX table entry structure
- * @msg_addr: Writes to this address will trigger MSIX interrupt in host
- * @msg_data: Data that should be written to @msg_addr to trigger MSIX interrupt
+ * struct pci_epf_msix_tbl - represents the MSI-X table entry structure
+ * @msg_addr: Writes to this address will trigger MSI-X interrupt in host
+ * @msg_data: Data that should be written to @msg_addr to trigger MSI-X
+ * interrupt
* @vector_ctrl: Identifies if the function is prohibited from sending a message
- * using this MSIX table entry
+ * using this MSI-X table entry
*/
struct pci_epf_msix_tbl {
u64 msg_addr;
@@ -189,11 +237,21 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver,
struct module *owner);
void pci_epf_unregister_driver(struct pci_epf_driver *driver);
void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
- size_t align, enum pci_epc_interface_type type);
+ const struct pci_epc_features *epc_features,
+ enum pci_epc_interface_type type);
void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar,
enum pci_epc_interface_type type);
+
+int pci_epf_assign_bar_space(struct pci_epf *epf, size_t size,
+ enum pci_barno bar,
+ const struct pci_epc_features *epc_features,
+ enum pci_epc_interface_type type,
+ dma_addr_t bar_addr);
+
+int pci_epf_align_inbound_addr(struct pci_epf *epf, enum pci_barno bar,
+ u64 addr, dma_addr_t *base, size_t *off);
int pci_epf_bind(struct pci_epf *epf);
void pci_epf_unbind(struct pci_epf *epf);
-struct config_group *pci_epf_type_add_cfs(struct pci_epf *epf,
- struct config_group *group);
+int pci_epf_add_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf);
+void pci_epf_remove_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf);
#endif /* __LINUX_PCI_EPF_H */
diff --git a/include/linux/pci-ide.h b/include/linux/pci-ide.h
new file mode 100644
index 000000000000..37a1ad9501b0
--- /dev/null
+++ b/include/linux/pci-ide.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common helpers for drivers (e.g. low-level PCI/TSM drivers) implementing the
+ * IDE key management protocol (IDE_KM) as defined by:
+ * PCIe r7.0 section 6.33 Integrity & Data Encryption (IDE)
+ *
+ * Copyright(c) 2024-2025 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __PCI_IDE_H__
+#define __PCI_IDE_H__
+
+enum pci_ide_partner_select {
+ PCI_IDE_EP,
+ PCI_IDE_RP,
+ PCI_IDE_PARTNER_MAX,
+ /*
+ * In addition to the resources in each partner port the
+ * platform / host-bridge additionally has a Stream ID pool that
+ * it shares across root ports. Let pci_ide_stream_alloc() use
+ * the alloc_stream_index() helper as endpoints and root ports.
+ */
+ PCI_IDE_HB = PCI_IDE_PARTNER_MAX,
+};
+
+/**
+ * struct pci_ide_partner - Per port pair Selective IDE Stream settings
+ * @rid_start: Partner Port Requester ID range start
+ * @rid_end: Partner Port Requester ID range end
+ * @stream_index: Selective IDE Stream Register Block selection
+ * @mem_assoc: PCI bus memory address association for targeting peer partner
+ * @pref_assoc: PCI bus prefetchable memory address association for
+ * targeting peer partner
+ * @default_stream: Endpoint uses this stream for all upstream TLPs regardless of
+ * address and RID association registers
+ * @setup: flag to track whether to run pci_ide_stream_teardown() for this
+ * partner slot
+ * @enable: flag whether to run pci_ide_stream_disable() for this partner slot
+ *
+ * By default, pci_ide_stream_alloc() initializes @mem_assoc and @pref_assoc
+ * with the immediate ancestor downstream port memory ranges (i.e. Type 1
+ * Configuration Space Header values). Caller may zero size ({0, -1}) the range
+ * to drop it from consideration at pci_ide_stream_setup() time.
+ */
+struct pci_ide_partner {
+ u16 rid_start;
+ u16 rid_end;
+ u8 stream_index;
+ struct pci_bus_region mem_assoc;
+ struct pci_bus_region pref_assoc;
+ unsigned int default_stream:1;
+ unsigned int setup:1;
+ unsigned int enable:1;
+};
+
+/**
+ * struct pci_ide_regs - Hardware register association settings for Selective
+ * IDE Streams
+ * @rid1: IDE RID Association Register 1
+ * @rid2: IDE RID Association Register 2
+ * @addr: Up to two address association blocks (IDE Address Association Register
+ * 1 through 3) for MMIO and prefetchable MMIO
+ * @nr_addr: Number of address association blocks initialized
+ *
+ * See pci_ide_stream_to_regs()
+ */
+struct pci_ide_regs {
+ u32 rid1;
+ u32 rid2;
+ struct {
+ u32 assoc1;
+ u32 assoc2;
+ u32 assoc3;
+ } addr[2];
+ int nr_addr;
+};
+
+/**
+ * struct pci_ide - PCIe Selective IDE Stream descriptor
+ * @pdev: PCIe Endpoint in the pci_ide_partner pair
+ * @partner: per-partner settings
+ * @host_bridge_stream: allocated from host bridge @ide_stream_ida pool
+ * @stream_id: unique Stream ID (within Partner Port pairing)
+ * @name: name of the established Selective IDE Stream in sysfs
+ * @tsm_dev: For TSM established IDE, the TSM device context
+ *
+ * Negative @stream_id values indicate "uninitialized" on the
+ * expectation that with TSM established IDE the TSM owns the stream_id
+ * allocation.
+ */
+struct pci_ide {
+ struct pci_dev *pdev;
+ struct pci_ide_partner partner[PCI_IDE_PARTNER_MAX];
+ u8 host_bridge_stream;
+ int stream_id;
+ const char *name;
+ struct tsm_dev *tsm_dev;
+};
+
+/*
+ * Some devices need help with aliased stream-ids even for idle streams. Use
+ * this id as the "never enabled" place holder.
+ */
+#define PCI_IDE_RESERVED_STREAM_ID 255
+
+void pci_ide_set_nr_streams(struct pci_host_bridge *hb, u16 nr);
+struct pci_ide_partner *pci_ide_to_settings(struct pci_dev *pdev,
+ struct pci_ide *ide);
+struct pci_ide *pci_ide_stream_alloc(struct pci_dev *pdev);
+void pci_ide_stream_free(struct pci_ide *ide);
+int pci_ide_stream_register(struct pci_ide *ide);
+void pci_ide_stream_unregister(struct pci_ide *ide);
+void pci_ide_stream_setup(struct pci_dev *pdev, struct pci_ide *ide);
+void pci_ide_stream_teardown(struct pci_dev *pdev, struct pci_ide *ide);
+int pci_ide_stream_enable(struct pci_dev *pdev, struct pci_ide *ide);
+void pci_ide_stream_disable(struct pci_dev *pdev, struct pci_ide *ide);
+void pci_ide_stream_release(struct pci_ide *ide);
+DEFINE_FREE(pci_ide_stream_release, struct pci_ide *, if (_T) pci_ide_stream_release(_T))
+#endif /* __PCI_IDE_H__ */
diff --git a/include/linux/pci-p2pdma.h b/include/linux/pci-p2pdma.h
index 8318a97c9c61..517e121d2598 100644
--- a/include/linux/pci-p2pdma.h
+++ b/include/linux/pci-p2pdma.h
@@ -16,12 +16,62 @@
struct block_device;
struct scatterlist;
+/**
+ * struct p2pdma_provider
+ *
+ * A p2pdma provider is a range of MMIO address space available to the CPU.
+ */
+struct p2pdma_provider {
+ struct device *owner;
+ u64 bus_offset;
+};
+
+enum pci_p2pdma_map_type {
+ /*
+ * PCI_P2PDMA_MAP_UNKNOWN: Used internally as an initial state before
+ * the mapping type has been calculated. Exported routines for the API
+ * will never return this value.
+ */
+ PCI_P2PDMA_MAP_UNKNOWN = 0,
+
+ /*
+ * Not a PCI P2PDMA transfer.
+ */
+ PCI_P2PDMA_MAP_NONE,
+
+ /*
+ * PCI_P2PDMA_MAP_NOT_SUPPORTED: Indicates the transaction will
+ * traverse the host bridge and the host bridge is not in the
+ * allowlist. DMA Mapping routines should return an error when
+ * this is returned.
+ */
+ PCI_P2PDMA_MAP_NOT_SUPPORTED,
+
+ /*
+ * PCI_P2PDMA_MAP_BUS_ADDR: Indicates that two devices can talk to
+ * each other directly through a PCI switch and the transaction will
+ * not traverse the host bridge. Such a mapping should program
+ * the DMA engine with PCI bus addresses.
+ */
+ PCI_P2PDMA_MAP_BUS_ADDR,
+
+ /*
+ * PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: Indicates two devices can talk
+ * to each other, but the transaction traverses a host bridge on the
+ * allowlist. In this case, a normal mapping either with CPU physical
+ * addresses (in the case of dma-direct) or IOVA addresses (in the
+ * case of IOMMUs) should be used to program the DMA engine.
+ */
+ PCI_P2PDMA_MAP_THRU_HOST_BRIDGE,
+};
+
#ifdef CONFIG_PCI_P2PDMA
+int pcim_p2pdma_init(struct pci_dev *pdev);
+struct p2pdma_provider *pcim_p2pdma_provider(struct pci_dev *pdev, int bar);
int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
u64 offset);
int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients,
int num_clients, bool verbose);
-bool pci_has_p2pmem(struct pci_dev *pdev);
struct pci_dev *pci_p2pmem_find_many(struct device **clients, int num_clients);
void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size);
void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size);
@@ -30,15 +80,22 @@ struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
unsigned int *nents, u32 length);
void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl);
void pci_p2pmem_publish(struct pci_dev *pdev, bool publish);
-int pci_p2pdma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, unsigned long attrs);
-void pci_p2pdma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, unsigned long attrs);
int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev,
bool *use_p2pdma);
ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev,
bool use_p2pdma);
+enum pci_p2pdma_map_type pci_p2pdma_map_type(struct p2pdma_provider *provider,
+ struct device *dev);
#else /* CONFIG_PCI_P2PDMA */
+static inline int pcim_p2pdma_init(struct pci_dev *pdev)
+{
+ return -EOPNOTSUPP;
+}
+static inline struct p2pdma_provider *pcim_p2pdma_provider(struct pci_dev *pdev,
+ int bar)
+{
+ return NULL;
+}
static inline int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar,
size_t size, u64 offset)
{
@@ -49,10 +106,6 @@ static inline int pci_p2pdma_distance_many(struct pci_dev *provider,
{
return -1;
}
-static inline bool pci_has_p2pmem(struct pci_dev *pdev)
-{
- return false;
-}
static inline struct pci_dev *pci_p2pmem_find_many(struct device **clients,
int num_clients)
{
@@ -83,17 +136,6 @@ static inline void pci_p2pmem_free_sgl(struct pci_dev *pdev,
static inline void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
{
}
-static inline int pci_p2pdma_map_sg_attrs(struct device *dev,
- struct scatterlist *sg, int nents, enum dma_data_direction dir,
- unsigned long attrs)
-{
- return 0;
-}
-static inline void pci_p2pdma_unmap_sg_attrs(struct device *dev,
- struct scatterlist *sg, int nents, enum dma_data_direction dir,
- unsigned long attrs)
-{
-}
static inline int pci_p2pdma_enable_store(const char *page,
struct pci_dev **p2p_dev, bool *use_p2pdma)
{
@@ -105,6 +147,11 @@ static inline ssize_t pci_p2pdma_enable_show(char *page,
{
return sprintf(page, "none\n");
}
+static inline enum pci_p2pdma_map_type
+pci_p2pdma_map_type(struct p2pdma_provider *provider, struct device *dev)
+{
+ return PCI_P2PDMA_MAP_NOT_SUPPORTED;
+}
#endif /* CONFIG_PCI_P2PDMA */
@@ -119,16 +166,48 @@ static inline struct pci_dev *pci_p2pmem_find(struct device *client)
return pci_p2pmem_find_many(&client, 1);
}
-static inline int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir)
+struct pci_p2pdma_map_state {
+ struct p2pdma_provider *mem;
+ enum pci_p2pdma_map_type map;
+};
+
+
+/* helper for pci_p2pdma_state(), do not use directly */
+void __pci_p2pdma_update_state(struct pci_p2pdma_map_state *state,
+ struct device *dev, struct page *page);
+
+/**
+ * pci_p2pdma_state - check the P2P transfer state of a page
+ * @state: P2P state structure
+ * @dev: device to transfer to/from
+ * @page: page to map
+ *
+ * Check if @page is a PCI P2PDMA page, and if yes of what kind. Returns the
+ * map type, and updates @state with all information needed for a P2P transfer.
+ */
+static inline enum pci_p2pdma_map_type
+pci_p2pdma_state(struct pci_p2pdma_map_state *state, struct device *dev,
+ struct page *page)
{
- return pci_p2pdma_map_sg_attrs(dev, sg, nents, dir, 0);
+ if (IS_ENABLED(CONFIG_PCI_P2PDMA) && is_pci_p2pdma_page(page)) {
+ __pci_p2pdma_update_state(state, dev, page);
+ return state->map;
+ }
+ return PCI_P2PDMA_MAP_NONE;
}
-static inline void pci_p2pdma_unmap_sg(struct device *dev,
- struct scatterlist *sg, int nents, enum dma_data_direction dir)
+/**
+ * pci_p2pdma_bus_addr_map - Translate a physical address to a bus address
+ * for a PCI_P2PDMA_MAP_BUS_ADDR transfer.
+ * @provider: P2P provider structure
+ * @paddr: physical address to map
+ *
+ * Map a physically contiguous PCI_P2PDMA_MAP_BUS_ADDR transfer.
+ */
+static inline dma_addr_t
+pci_p2pdma_bus_addr_map(struct p2pdma_provider *provider, phys_addr_t paddr)
{
- pci_p2pdma_unmap_sg_attrs(dev, sg, nents, dir, 0);
+ return paddr + provider->bus_offset;
}
#endif /* _LINUX_PCI_P2P_H */
diff --git a/include/linux/pci-pwrctrl.h b/include/linux/pci-pwrctrl.h
new file mode 100644
index 000000000000..4aefc7901cd1
--- /dev/null
+++ b/include/linux/pci-pwrctrl.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024 Linaro Ltd.
+ */
+
+#ifndef __PCI_PWRCTRL_H__
+#define __PCI_PWRCTRL_H__
+
+#include <linux/notifier.h>
+#include <linux/workqueue.h>
+
+struct device;
+struct device_link;
+
+/*
+ * This is a simple framework for solving the issue of PCI devices that require
+ * certain resources (regulators, GPIOs, clocks) to be enabled before the
+ * device can actually be detected on the PCI bus.
+ *
+ * The idea is to reuse the platform bus to populate OF nodes describing the
+ * PCI device and its resources, let these platform devices probe and enable
+ * relevant resources and then trigger a rescan of the PCI bus allowing for the
+ * same device (with a second associated struct device) to be registered with
+ * the PCI subsystem.
+ *
+ * To preserve a correct hierarchy for PCI power management and device reset,
+ * we create a device link between the power control platform device (parent)
+ * and the supplied PCI device (child).
+ */
+
+/**
+ * struct pci_pwrctrl - PCI device power control context.
+ * @dev: Address of the power controlling device.
+ *
+ * An object of this type must be allocated by the PCI power control device and
+ * passed to the pwrctrl subsystem to trigger a bus rescan and setup a device
+ * link with the device once it's up.
+ */
+struct pci_pwrctrl {
+ struct device *dev;
+
+ /* private: internal use only */
+ struct notifier_block nb;
+ struct device_link *link;
+ struct work_struct work;
+};
+
+void pci_pwrctrl_init(struct pci_pwrctrl *pwrctrl, struct device *dev);
+int pci_pwrctrl_device_set_ready(struct pci_pwrctrl *pwrctrl);
+void pci_pwrctrl_device_unset_ready(struct pci_pwrctrl *pwrctrl);
+int devm_pci_pwrctrl_device_set_ready(struct device *dev,
+ struct pci_pwrctrl *pwrctrl);
+
+#endif /* __PCI_PWRCTRL_H__ */
diff --git a/include/linux/pci-tph.h b/include/linux/pci-tph.h
new file mode 100644
index 000000000000..ba28140ce670
--- /dev/null
+++ b/include/linux/pci-tph.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * TPH (TLP Processing Hints)
+ *
+ * Copyright (C) 2024 Advanced Micro Devices, Inc.
+ * Eric Van Tassell <Eric.VanTassell@amd.com>
+ * Wei Huang <wei.huang2@amd.com>
+ */
+#ifndef LINUX_PCI_TPH_H
+#define LINUX_PCI_TPH_H
+
+/*
+ * According to the ECN for PCI Firmware Spec, Steering Tag can be different
+ * depending on the memory type: Volatile Memory or Persistent Memory. When a
+ * caller query about a target's Steering Tag, it must provide the target's
+ * tph_mem_type. ECN link: https://members.pcisig.com/wg/PCI-SIG/document/15470.
+ */
+enum tph_mem_type {
+ TPH_MEM_TYPE_VM, /* volatile memory */
+ TPH_MEM_TYPE_PM /* persistent memory */
+};
+
+#ifdef CONFIG_PCIE_TPH
+int pcie_tph_set_st_entry(struct pci_dev *pdev,
+ unsigned int index, u16 tag);
+int pcie_tph_get_cpu_st(struct pci_dev *dev,
+ enum tph_mem_type mem_type,
+ unsigned int cpu_uid, u16 *tag);
+void pcie_disable_tph(struct pci_dev *pdev);
+int pcie_enable_tph(struct pci_dev *pdev, int mode);
+u16 pcie_tph_get_st_table_size(struct pci_dev *pdev);
+u32 pcie_tph_get_st_table_loc(struct pci_dev *pdev);
+#else
+static inline int pcie_tph_set_st_entry(struct pci_dev *pdev,
+ unsigned int index, u16 tag)
+{ return -EINVAL; }
+static inline int pcie_tph_get_cpu_st(struct pci_dev *dev,
+ enum tph_mem_type mem_type,
+ unsigned int cpu_uid, u16 *tag)
+{ return -EINVAL; }
+static inline void pcie_disable_tph(struct pci_dev *pdev) { }
+static inline int pcie_enable_tph(struct pci_dev *pdev, int mode)
+{ return -EINVAL; }
+#endif
+
+#endif /* LINUX_PCI_TPH_H */
diff --git a/include/linux/pci-tsm.h b/include/linux/pci-tsm.h
new file mode 100644
index 000000000000..a6435aba03f9
--- /dev/null
+++ b/include/linux/pci-tsm.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PCI_TSM_H
+#define __PCI_TSM_H
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/sockptr.h>
+
+struct pci_tsm;
+struct tsm_dev;
+struct kvm;
+enum pci_tsm_req_scope;
+
+/*
+ * struct pci_tsm_ops - manage confidential links and security state
+ * @link_ops: Coordinate PCIe SPDM and IDE establishment via a platform TSM.
+ * Provide a secure session transport for TDISP state management
+ * (typically bare metal physical function operations).
+ * @devsec_ops: Lock, unlock, and interrogate the security state of the
+ * function via the platform TSM (typically virtual function
+ * operations).
+ *
+ * This operations are mutually exclusive either a tsm_dev instance
+ * manages physical link properties or it manages function security
+ * states like TDISP lock/unlock.
+ */
+struct pci_tsm_ops {
+ /*
+ * struct pci_tsm_link_ops - Manage physical link and the TSM/DSM session
+ * @probe: establish context with the TSM (allocate / wrap 'struct
+ * pci_tsm') for follow-on link operations
+ * @remove: destroy link operations context
+ * @connect: establish / validate a secure connection (e.g. IDE)
+ * with the device
+ * @disconnect: teardown the secure link
+ * @bind: bind a TDI in preparation for it to be accepted by a TVM
+ * @unbind: remove a TDI from secure operation with a TVM
+ * @guest_req: marshal TVM information and state change requests
+ *
+ * Context: @probe, @remove, @connect, and @disconnect run under
+ * pci_tsm_rwsem held for write to sync with TSM unregistration and
+ * mutual exclusion of @connect and @disconnect. @connect and
+ * @disconnect additionally run under the DSM lock (struct
+ * pci_tsm_pf0::lock) as well as @probe and @remove of the subfunctions.
+ * @bind, @unbind, and @guest_req run under pci_tsm_rwsem held for read
+ * and the DSM lock.
+ */
+ struct_group_tagged(pci_tsm_link_ops, link_ops,
+ struct pci_tsm *(*probe)(struct tsm_dev *tsm_dev,
+ struct pci_dev *pdev);
+ void (*remove)(struct pci_tsm *tsm);
+ int (*connect)(struct pci_dev *pdev);
+ void (*disconnect)(struct pci_dev *pdev);
+ struct pci_tdi *(*bind)(struct pci_dev *pdev,
+ struct kvm *kvm, u32 tdi_id);
+ void (*unbind)(struct pci_tdi *tdi);
+ ssize_t (*guest_req)(struct pci_tdi *tdi,
+ enum pci_tsm_req_scope scope,
+ sockptr_t req_in, size_t in_len,
+ sockptr_t req_out, size_t out_len,
+ u64 *tsm_code);
+ );
+
+ /*
+ * struct pci_tsm_devsec_ops - Manage the security state of the function
+ * @lock: establish context with the TSM (allocate / wrap 'struct
+ * pci_tsm') for follow-on security state transitions from the
+ * LOCKED state
+ * @unlock: destroy TSM context and return device to UNLOCKED state
+ *
+ * Context: @lock and @unlock run under pci_tsm_rwsem held for write to
+ * sync with TSM unregistration and each other
+ */
+ struct_group_tagged(pci_tsm_devsec_ops, devsec_ops,
+ struct pci_tsm *(*lock)(struct tsm_dev *tsm_dev,
+ struct pci_dev *pdev);
+ void (*unlock)(struct pci_tsm *tsm);
+ );
+};
+
+/**
+ * struct pci_tdi - Core TEE I/O Device Interface (TDI) context
+ * @pdev: host side representation of guest-side TDI
+ * @kvm: TEE VM context of bound TDI
+ * @tdi_id: Identifier (virtual BDF) for the TDI as referenced by the TSM and DSM
+ */
+struct pci_tdi {
+ struct pci_dev *pdev;
+ struct kvm *kvm;
+ u32 tdi_id;
+};
+
+/**
+ * struct pci_tsm - Core TSM context for a given PCIe endpoint
+ * @pdev: Back ref to device function, distinguishes type of pci_tsm context
+ * @dsm_dev: PCI Device Security Manager for link operations on @pdev
+ * @tsm_dev: PCI TEE Security Manager device for Link Confidentiality or Device
+ * Function Security operations
+ * @tdi: TDI context established by the @bind link operation
+ *
+ * This structure is wrapped by low level TSM driver data and returned by
+ * probe()/lock(), it is freed by the corresponding remove()/unlock().
+ *
+ * For link operations it serves to cache the association between a Device
+ * Security Manager (DSM) and the functions that manager can assign to a TVM.
+ * That can be "self", for assigning function0 of a TEE I/O device, a
+ * sub-function (SR-IOV virtual function, or non-function0
+ * multifunction-device), or a downstream endpoint (PCIe upstream switch-port as
+ * DSM).
+ */
+struct pci_tsm {
+ struct pci_dev *pdev;
+ struct pci_dev *dsm_dev;
+ struct tsm_dev *tsm_dev;
+ struct pci_tdi *tdi;
+};
+
+/**
+ * struct pci_tsm_pf0 - Physical Function 0 TDISP link context
+ * @base_tsm: generic core "tsm" context
+ * @lock: mutual exclustion for pci_tsm_ops invocation
+ * @doe_mb: PCIe Data Object Exchange mailbox
+ */
+struct pci_tsm_pf0 {
+ struct pci_tsm base_tsm;
+ struct mutex lock;
+ struct pci_doe_mb *doe_mb;
+};
+
+/* physical function0 and capable of 'connect' */
+static inline bool is_pci_tsm_pf0(struct pci_dev *pdev)
+{
+ if (!pdev)
+ return false;
+
+ if (!pci_is_pcie(pdev))
+ return false;
+
+ if (pdev->is_virtfn)
+ return false;
+
+ /*
+ * Allow for a Device Security Manager (DSM) associated with function0
+ * of an Endpoint to coordinate TDISP requests for other functions
+ * (physical or virtual) of the device, or allow for an Upstream Port
+ * DSM to accept TDISP requests for the Endpoints downstream of the
+ * switch.
+ */
+ switch (pci_pcie_type(pdev)) {
+ case PCI_EXP_TYPE_ENDPOINT:
+ case PCI_EXP_TYPE_UPSTREAM:
+ case PCI_EXP_TYPE_RC_END:
+ if (pdev->ide_cap || (pdev->devcap & PCI_EXP_DEVCAP_TEE))
+ break;
+ fallthrough;
+ default:
+ return false;
+ }
+
+ return PCI_FUNC(pdev->devfn) == 0;
+}
+
+/**
+ * enum pci_tsm_req_scope - Scope of guest requests to be validated by TSM
+ *
+ * Guest requests are a transport for a TVM to communicate with a TSM + DSM for
+ * a given TDI. A TSM driver is responsible for maintaining the kernel security
+ * model and limit commands that may affect the host, or are otherwise outside
+ * the typical TDISP operational model.
+ */
+enum pci_tsm_req_scope {
+ /**
+ * @PCI_TSM_REQ_INFO: Read-only, without side effects, request for
+ * typical TDISP collateral information like Device Interface Reports.
+ * No device secrets are permitted, and no device state is changed.
+ */
+ PCI_TSM_REQ_INFO = 0,
+ /**
+ * @PCI_TSM_REQ_STATE_CHANGE: Request to change the TDISP state from
+ * UNLOCKED->LOCKED, LOCKED->RUN, or other architecture specific state
+ * changes to support those transitions for a TDI. No other (unrelated
+ * to TDISP) device / host state, configuration, or data change is
+ * permitted.
+ */
+ PCI_TSM_REQ_STATE_CHANGE = 1,
+ /**
+ * @PCI_TSM_REQ_DEBUG_READ: Read-only request for debug information
+ *
+ * A method to facilitate TVM information retrieval outside of typical
+ * TDISP operational requirements. No device secrets are permitted.
+ */
+ PCI_TSM_REQ_DEBUG_READ = 2,
+ /**
+ * @PCI_TSM_REQ_DEBUG_WRITE: Device state changes for debug purposes
+ *
+ * The request may affect the operational state of the device outside of
+ * the TDISP operational model. If allowed, requires CAP_SYS_RAW_IO, and
+ * will taint the kernel.
+ */
+ PCI_TSM_REQ_DEBUG_WRITE = 3,
+};
+
+#ifdef CONFIG_PCI_TSM
+int pci_tsm_register(struct tsm_dev *tsm_dev);
+void pci_tsm_unregister(struct tsm_dev *tsm_dev);
+int pci_tsm_link_constructor(struct pci_dev *pdev, struct pci_tsm *tsm,
+ struct tsm_dev *tsm_dev);
+int pci_tsm_pf0_constructor(struct pci_dev *pdev, struct pci_tsm_pf0 *tsm,
+ struct tsm_dev *tsm_dev);
+void pci_tsm_pf0_destructor(struct pci_tsm_pf0 *tsm);
+int pci_tsm_doe_transfer(struct pci_dev *pdev, u8 type, const void *req,
+ size_t req_sz, void *resp, size_t resp_sz);
+int pci_tsm_bind(struct pci_dev *pdev, struct kvm *kvm, u32 tdi_id);
+void pci_tsm_unbind(struct pci_dev *pdev);
+void pci_tsm_tdi_constructor(struct pci_dev *pdev, struct pci_tdi *tdi,
+ struct kvm *kvm, u32 tdi_id);
+ssize_t pci_tsm_guest_req(struct pci_dev *pdev, enum pci_tsm_req_scope scope,
+ sockptr_t req_in, size_t in_len, sockptr_t req_out,
+ size_t out_len, u64 *tsm_code);
+#else
+static inline int pci_tsm_register(struct tsm_dev *tsm_dev)
+{
+ return 0;
+}
+static inline void pci_tsm_unregister(struct tsm_dev *tsm_dev)
+{
+}
+static inline int pci_tsm_bind(struct pci_dev *pdev, struct kvm *kvm, u64 tdi_id)
+{
+ return -ENXIO;
+}
+static inline void pci_tsm_unbind(struct pci_dev *pdev)
+{
+}
+static inline ssize_t pci_tsm_guest_req(struct pci_dev *pdev,
+ enum pci_tsm_req_scope scope,
+ sockptr_t req_in, size_t in_len,
+ sockptr_t req_out, size_t out_len,
+ u64 *tsm_code)
+{
+ return -ENXIO;
+}
+#endif
+#endif /*__PCI_TSM_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index c20211e59a57..864775651c6f 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -23,7 +23,7 @@
#ifndef LINUX_PCI_H
#define LINUX_PCI_H
-
+#include <linux/args.h>
#include <linux/mod_devicetable.h>
#include <linux/types.h>
@@ -38,6 +38,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/resource_ext.h>
+#include <linux/msi_api.h>
#include <uapi/linux/pci.h>
#include <linux/pci_ids.h>
@@ -49,6 +50,12 @@
PCI_STATUS_SIG_TARGET_ABORT | \
PCI_STATUS_PARITY)
+/* Number of reset methods used in pci_reset_fn_methods array in pci.c */
+#define PCI_NUM_RESET_METHODS 8
+
+#define PCI_RESET_PROBE true
+#define PCI_RESET_DO_RESET false
+
/*
* The PCI interface treats multi-function devices as independent
* devices. The slot/function address of each device is encoded
@@ -112,7 +119,8 @@ enum {
#define PCI_CB_BRIDGE_MEM_1_WINDOW (PCI_BRIDGE_RESOURCES + 3)
/* Total number of bridge resources for P2P and CardBus */
-#define PCI_BRIDGE_RESOURCE_NUM 4
+#define PCI_P2P_BRIDGE_RESOURCE_NUM 3
+#define PCI_BRIDGE_RESOURCE_NUM 4
/* Resources assigned to buses behind the bridge */
PCI_BRIDGE_RESOURCES,
@@ -149,6 +157,15 @@ enum pci_interrupt_pin {
#define PCI_NUM_INTX 4
/*
+ * Reading from a device that doesn't respond typically returns ~0. A
+ * successful read from a device may also return ~0, so you need additional
+ * information to reliably identify errors.
+ */
+#define PCI_ERROR_RESPONSE (~0ULL)
+#define PCI_SET_ERROR_RESPONSE(val) (*(val) = ((typeof(*(val))) PCI_ERROR_RESPONSE))
+#define PCI_POSSIBLE_ERROR(val) ((val) == ((typeof(val)) PCI_ERROR_RESPONSE))
+
+/*
* pci_power_t values must match the bits in the Capabilities PME_Support
* and Control/Status PowerState fields in the Power Management capability.
*/
@@ -227,6 +244,10 @@ enum pci_dev_flags {
PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
/* Don't use Relaxed Ordering for TLPs directed at this device */
PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11),
+ /* Device does honor MSI masking despite saying otherwise */
+ PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12),
+ /* Device requires write to PCI_MSIX_ENTRY_DATA before any MSIX reads */
+ PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST = (__force pci_dev_flags_t) (1 << 13),
};
enum pci_irq_reroute_variant {
@@ -288,26 +309,32 @@ enum pci_bus_speed {
enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
-struct pci_cap_saved_data {
- u16 cap_nr;
- bool cap_extended;
- unsigned int size;
- u32 data[];
-};
-
-struct pci_cap_saved_state {
- struct hlist_node next;
- struct pci_cap_saved_data cap;
+struct pci_vpd {
+ struct mutex lock;
+ unsigned int len;
+ u8 cap;
};
struct irq_affinity;
+struct pcie_bwctrl_data;
struct pcie_link_state;
-struct pci_vpd;
struct pci_sriov;
struct pci_p2pdma;
struct rcec_ea;
-/* The pci_dev structure describes PCI devices */
+/* struct pci_dev - describes a PCI device
+ *
+ * @supported_speeds: PCIe Supported Link Speeds Vector (+ reserved 0 at
+ * LSB). 0 when the supported speeds cannot be
+ * determined (e.g., for Root Complex Integrated
+ * Endpoints without the relevant Capability
+ * Registers).
+ * @is_hotplug_bridge: Hotplug bridge of any kind (e.g. PCIe Hot-Plug Capable,
+ * Conventional PCI Hot-Plug, ACPI slot).
+ * Such bridges are allocated additional MMIO and bus
+ * number resources to allow for hierarchy expansion.
+ * @is_pciehp: PCIe Hot-Plug Capable bridge.
+ */
struct pci_dev {
struct list_head bus_list; /* Node in per-bus list */
struct pci_bus *bus; /* Bus this device is on */
@@ -327,12 +354,14 @@ struct pci_dev {
u8 hdr_type; /* PCI header type (`multi' flag masked out) */
#ifdef CONFIG_PCIEAER
u16 aer_cap; /* AER capability offset */
- struct aer_stats *aer_stats; /* AER stats for this device */
+ struct aer_info *aer_info; /* AER info for this device */
#endif
#ifdef CONFIG_PCIEPORTBUS
struct rcec_ea *rcec_ea; /* RCEC cached endpoint association */
struct pci_dev *rcec; /* Associated RCEC device */
#endif
+ u32 devcap; /* PCIe Device Capabilities */
+ u16 rebar_cap; /* Resizable BAR capability offset */
u8 pcie_cap; /* PCIe capability offset */
u8 msi_cap; /* MSI capability offset */
u8 msix_cap; /* MSI-X capability offset */
@@ -354,11 +383,13 @@ struct pci_dev {
pci_power_t current_state; /* Current operating state. In ACPI,
this is D0-D3, D0 being fully
functional, and D3 being off. */
- unsigned int imm_ready:1; /* Supports Immediate Readiness */
u8 pm_cap; /* PM capability offset */
unsigned int pme_support:5; /* Bitmask of states from which PME#
can be generated */
unsigned int pme_poll:1; /* Poll device's PME status bit */
+ unsigned int pinned:1; /* Whether this dev is pinned */
+ unsigned int config_rrs_sv:1; /* Config RRS software visibility */
+ unsigned int imm_ready:1; /* Supports Immediate Readiness */
unsigned int d1_support:1; /* Low power state D1 is supported */
unsigned int d2_support:1; /* Low power state D2 is supported */
unsigned int no_d1d2:1; /* D1 and D2 are forbidden */
@@ -368,10 +399,6 @@ struct pci_dev {
unsigned int mmio_always_on:1; /* Disallow turning off io/mem
decoding during BAR sizing */
unsigned int wakeup_prepared:1;
- unsigned int runtime_d3cold:1; /* Whether go through runtime
- D3cold, not set for devices
- powered on/off by the
- corresponding bridge */
unsigned int skip_bus_pm:1; /* Internal: Skip bus-level PM */
unsigned int ignore_hotplug:1; /* Ignore hotplug events */
unsigned int hotplug_user_indicators:1; /* SlotCtl indicators
@@ -382,13 +409,16 @@ struct pci_dev {
unsigned int d3hot_delay; /* D3hot->D0 transition time in ms */
unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
+ u16 l1ss; /* L1SS Capability pointer */
#ifdef CONFIG_PCIEASPM
struct pcie_link_state *link_state; /* ASPM link state */
+ unsigned int aspm_l0s_support:1; /* ASPM L0s support */
+ unsigned int aspm_l1_support:1; /* ASPM L1 support */
unsigned int ltr_path:1; /* Latency Tolerance Reporting
supported from root to here */
- u16 l1ss; /* L1SS Capability pointer */
#endif
- unsigned int eetlp_prefix_path:1; /* End-to-End TLP Prefix */
+ unsigned int pasid_no_tlp:1; /* PASID works without TLP Prefix */
+ unsigned int eetlp_prefix_max:3; /* Max # of End-End TLP Prefixes, 0=not supported */
pci_channel_state_t error_state; /* Current connectivity state */
struct device dev; /* Generic device interface */
@@ -401,8 +431,7 @@ struct pci_dev {
*/
unsigned int irq;
struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
-
- bool match_driver; /* Skip attaching driver */
+ struct resource driver_exclusive_resource; /* driver exclusive resource ranges */
unsigned int transparent:1; /* Subtractive decode bridge */
unsigned int io_window:1; /* Bridge has I/O window */
@@ -422,13 +451,16 @@ struct pci_dev {
unsigned int ats_enabled:1; /* Address Translation Svc */
unsigned int pasid_enabled:1; /* Process Address Space ID */
unsigned int pri_enabled:1; /* Page Request Interface */
- unsigned int is_managed:1;
+ unsigned int tph_enabled:1; /* TLP Processing Hints */
+ unsigned int fm_enabled:1; /* Flit Mode (segment captured) */
+ unsigned int is_managed:1; /* Managed via devres */
+ unsigned int is_msi_managed:1; /* MSI release via devres installed */
unsigned int needs_freset:1; /* Requires fundamental reset */
unsigned int state_saved:1;
unsigned int is_physfn:1;
unsigned int is_virtfn:1;
- unsigned int reset_fn:1;
unsigned int is_hotplug_bridge:1;
+ unsigned int is_pciehp:1;
unsigned int shpc_managed:1; /* SHPC owned by shpchp */
unsigned int is_thunderbolt:1; /* Thunderbolt controller */
/*
@@ -453,12 +485,15 @@ struct pci_dev {
unsigned int link_active_reporting:1;/* Device capable of reporting link active */
unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */
unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */
+ unsigned int rom_bar_overlap:1; /* ROM BAR disable broken */
+ unsigned int rom_attr_enabled:1; /* Display of ROM attribute enabled? */
+ unsigned int non_mappable_bars:1; /* BARs can't be mapped to user-space */
pci_dev_flags_t dev_flags;
atomic_t enable_cnt; /* pci_enable_device has been called */
+ spinlock_t pcie_cap_lock; /* Protects RMW ops in capability accessors */
u32 saved_config_space[16]; /* Config space saved at suspend time */
struct hlist_head saved_cap_space;
- int rom_attr_enabled; /* Display of ROM attribute enabled? */
struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
@@ -466,19 +501,24 @@ struct pci_dev {
unsigned int broken_cmd_compl:1; /* No compl for some cmds */
#endif
#ifdef CONFIG_PCIE_PTM
+ u16 ptm_cap; /* PTM Capability */
unsigned int ptm_root:1;
+ unsigned int ptm_responder:1;
+ unsigned int ptm_requester:1;
unsigned int ptm_enabled:1;
u8 ptm_granularity;
#endif
#ifdef CONFIG_PCI_MSI
- const struct attribute_group **msi_irq_groups;
+ void __iomem *msix_base;
+ raw_spinlock_t msi_lock;
#endif
- struct pci_vpd *vpd;
+ struct pci_vpd vpd;
#ifdef CONFIG_PCIE_DPC
u16 dpc_cap;
unsigned int dpc_rp_extensions:1;
u8 dpc_rp_log_size;
#endif
+ struct pcie_bwctrl_data *link_bwctrl;
#ifdef CONFIG_PCI_ATS
union {
struct pci_sriov *sriov; /* PF: SR-IOV info */
@@ -497,14 +537,46 @@ struct pci_dev {
u16 pasid_features;
#endif
#ifdef CONFIG_PCI_P2PDMA
- struct pci_p2pdma *p2pdma;
+ struct pci_p2pdma __rcu *p2pdma;
+#endif
+#ifdef CONFIG_PCI_DOE
+ struct xarray doe_mbs; /* Data Object Exchange mailboxes */
+#endif
+#ifdef CONFIG_PCI_NPEM
+ struct npem *npem; /* Native PCIe Enclosure Management */
+#endif
+#ifdef CONFIG_PCI_IDE
+ u16 ide_cap; /* Link Integrity & Data Encryption */
+ u8 nr_ide_mem; /* Address association resources for streams */
+ u8 nr_link_ide; /* Link Stream count (Selective Stream offset) */
+ u16 nr_sel_ide; /* Selective Stream count (register block allocator) */
+ struct ida ide_stream_ida;
+ unsigned int ide_cfg:1; /* Config cycles over IDE */
+ unsigned int ide_tee_limit:1; /* Disallow T=0 traffic over IDE */
+#endif
+#ifdef CONFIG_PCI_TSM
+ struct pci_tsm *tsm; /* TSM operation state */
#endif
u16 acs_cap; /* ACS Capability offset */
+ u8 supported_speeds; /* Supported Link Speeds Vector */
phys_addr_t rom; /* Physical address if not from BAR */
size_t romlen; /* Length if not from BAR */
- char *driver_override; /* Driver name to force a match */
+ /*
+ * Driver name to force a match. Do not set directly, because core
+ * frees it. Use driver_set_override() to set or clear it.
+ */
+ const char *driver_override;
unsigned long priv_flags; /* Private flags for the PCI driver */
+
+ /* These methods index pci_reset_fn_methods[] */
+ u8 reset_methods[PCI_NUM_RESET_METHODS]; /* In priority order */
+
+#ifdef CONFIG_PCIE_TPH
+ u16 tph_cap; /* TPH capability offset */
+ u8 tph_mode; /* TPH mode */
+ u8 tph_req_type; /* TPH requester type */
+#endif
};
static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
@@ -520,12 +592,24 @@ struct pci_dev *pci_alloc_dev(struct pci_bus *bus);
#define to_pci_dev(n) container_of(n, struct pci_dev, dev)
#define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
+#define for_each_pci_dev_reverse(d) \
+ while ((d = pci_get_device_reverse(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
static inline int pci_channel_offline(struct pci_dev *pdev)
{
return (pdev->error_state != pci_channel_io_normal);
}
+/*
+ * Currently in ACPI spec, for each PCI host bridge, PCI Segment
+ * Group number is limited to a 16-bit value, therefore (int)-1 is
+ * not a valid PCI domain number, and can be used as a sentinel
+ * value indicating ->domain_nr is not set by the driver (and
+ * CONFIG_PCI_DOMAINS_GENERIC=y archs will set it with
+ * pci_bus_find_domain_nr()).
+ */
+#define PCI_DOMAIN_NR_NOT_SET (-1)
+
struct pci_host_bridge {
struct device dev;
struct pci_bus *bus; /* Root bus */
@@ -533,20 +617,30 @@ struct pci_host_bridge {
struct pci_ops *child_ops;
void *sysdata;
int busnr;
+ int domain_nr;
struct list_head windows; /* resource_entry */
struct list_head dma_ranges; /* dma ranges resource list */
+#ifdef CONFIG_PCI_IDE
+ u16 nr_ide_streams; /* Max streams possibly active in @ide_stream_ida */
+ struct ida ide_stream_ida;
+ struct ida ide_stream_ids_ida; /* track unique ids per domain */
+#endif
u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */
int (*map_irq)(const struct pci_dev *, u8, u8);
void (*release_fn)(struct pci_host_bridge *);
+ int (*enable_device)(struct pci_host_bridge *bridge, struct pci_dev *dev);
+ void (*disable_device)(struct pci_host_bridge *bridge, struct pci_dev *dev);
void *release_data;
unsigned int ignore_reset_delay:1; /* For entire hierarchy */
unsigned int no_ext_tags:1; /* No Extended Tags */
+ unsigned int no_inc_mrrs:1; /* No Increase MRRS */
unsigned int native_aer:1; /* OS may use PCIe AER */
unsigned int native_pcie_hotplug:1; /* OS may use PCIe hotplug */
unsigned int native_shpc_hotplug:1; /* OS may use SHPC hotplug */
unsigned int native_pme:1; /* OS may use PCIe PME */
unsigned int native_ltr:1; /* OS may use PCIe LTR */
unsigned int native_dpc:1; /* OS may use PCIe DPC */
+ unsigned int native_cxl_error:1; /* OS may use CXL RAS/Events */
unsigned int preserve_config:1; /* Preserve FW resource setup */
unsigned int size_windows:1; /* Enable root bus sizing */
unsigned int msi_domain:1; /* Bridge wants MSI domain */
@@ -576,6 +670,7 @@ struct pci_host_bridge *pci_alloc_host_bridge(size_t priv);
struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
size_t priv);
void pci_free_host_bridge(struct pci_host_bridge *bridge);
+struct device *pci_get_host_bridge_device(struct pci_dev *dev);
struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
@@ -584,27 +679,6 @@ void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge);
-/*
- * The first PCI_BRIDGE_RESOURCE_NUM PCI bus resources (those that correspond
- * to P2P or CardBus bridge windows) go in a table. Additional ones (for
- * buses below host bridges or subtractive decode bridges) go in the list.
- * Use pci_bus_for_each_resource() to iterate through all the resources.
- */
-
-/*
- * PCI_SUBTRACTIVE_DECODE means the bridge forwards the window implicitly
- * and there's no way to program the bridge with the details of the window.
- * This does not apply to ACPI _CRS windows, even with the _DEC subtractive-
- * decode bit set, because they are explicit and can be programmed with _SRS.
- */
-#define PCI_SUBTRACTIVE_DECODE 0x1
-
-struct pci_bus_resource {
- struct list_head list;
- struct resource *res;
- unsigned int flags;
-};
-
#define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */
struct pci_bus {
@@ -640,6 +714,8 @@ struct pci_bus {
struct bin_attribute *legacy_io; /* Legacy I/O for this bus */
struct bin_attribute *legacy_mem; /* Legacy mem */
unsigned int is_added:1;
+ unsigned int unsafe_warn:1; /* warned about RW1C config write */
+ unsigned int flit_mode:1; /* Link in Flit mode */
};
#define to_pci_bus(n) container_of(n, struct pci_bus, dev)
@@ -675,6 +751,46 @@ static inline bool pci_is_bridge(struct pci_dev *dev)
dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
}
+/**
+ * pci_is_vga - check if the PCI device is a VGA device
+ * @pdev: PCI device
+ *
+ * The PCI Code and ID Assignment spec, r1.15, secs 1.4 and 1.1, define
+ * VGA Base Class and Sub-Classes:
+ *
+ * 03 00 PCI_CLASS_DISPLAY_VGA VGA-compatible or 8514-compatible
+ * 00 01 PCI_CLASS_NOT_DEFINED_VGA VGA-compatible (before Class Code)
+ *
+ * Return true if the PCI device is a VGA device and uses the legacy VGA
+ * resources ([mem 0xa0000-0xbffff], [io 0x3b0-0x3bb], [io 0x3c0-0x3df] and
+ * aliases).
+ */
+static inline bool pci_is_vga(struct pci_dev *pdev)
+{
+ if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
+ return true;
+
+ if ((pdev->class >> 8) == PCI_CLASS_NOT_DEFINED_VGA)
+ return true;
+
+ return false;
+}
+
+/**
+ * pci_is_display - check if the PCI device is a display controller
+ * @pdev: PCI device
+ *
+ * Determine whether the given PCI device corresponds to a display
+ * controller. Display controllers are typically used for graphical output
+ * and are identified based on their class code.
+ *
+ * Return: true if the PCI device is a display controller, false otherwise.
+ */
+static inline bool pci_is_display(struct pci_dev *pdev)
+{
+ return (pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY;
+}
+
#define for_each_pci_bridge(dev, bus) \
list_for_each_entry(dev, &bus->devices, bus_list) \
if (!pci_is_bridge(dev)) {} else
@@ -738,6 +854,7 @@ struct pci_ops {
void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where);
int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
+ int (*assert_perst)(struct pci_bus *bus, bool assert);
};
/*
@@ -760,6 +877,11 @@ struct pci_bus_region {
pci_bus_addr_t end;
};
+static inline pci_bus_addr_t pci_bus_region_size(const struct pci_bus_region *region)
+{
+ return region->end - region->start + 1;
+}
+
struct pci_dynids {
spinlock_t lock; /* Protects list, index */
struct list_head list; /* For IDs added at runtime */
@@ -813,6 +935,9 @@ struct pci_error_handlers {
/* Device driver may resume normal operations */
void (*resume)(struct pci_dev *dev);
+
+ /* Allow device driver to record more details of a correctable error */
+ void (*cor_error_detected)(struct pci_dev *dev);
};
@@ -820,7 +945,6 @@ struct module;
/**
* struct pci_driver - PCI driver structure
- * @node: List of driver structures.
* @name: Driver name.
* @id_table: Pointer to table of device IDs the driver is
* interested in. Most drivers should export this
@@ -862,11 +986,19 @@ struct module;
* MSI-X vectors available for distribution to the VFs.
* @err_handler: See Documentation/PCI/pci-error-recovery.rst
* @groups: Sysfs attribute groups.
+ * @dev_groups: Attributes attached to the device that will be
+ * created once it is bound to the driver.
* @driver: Driver model structure.
* @dynids: List of dynamically added device IDs.
+ * @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA.
+ * For most device drivers, no need to care about this flag
+ * as long as all DMAs are handled through the kernel DMA API.
+ * For some special ones, for example VFIO drivers, they know
+ * how to manage the DMA themselves and set this flag so that
+ * the IOMMU layer will allow them to setup and manage their
+ * own I/O address space.
*/
struct pci_driver {
- struct list_head node;
const char *name;
const struct pci_device_id *id_table; /* Must be non-NULL for probe to be called */
int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
@@ -879,11 +1011,14 @@ struct pci_driver {
u32 (*sriov_get_vf_total_msix)(struct pci_dev *pf);
const struct pci_error_handlers *err_handler;
const struct attribute_group **groups;
+ const struct attribute_group **dev_groups;
struct device_driver driver;
struct pci_dynids dynids;
+ bool driver_managed_dma;
};
-#define to_pci_driver(drv) container_of(drv, struct pci_driver, driver)
+#define to_pci_driver(__drv) \
+ ( __drv ? container_of_const(__drv, struct pci_driver, driver) : NULL )
/**
* PCI_DEVICE - macro used to describe a specific PCI device
@@ -899,6 +1034,35 @@ struct pci_driver {
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
/**
+ * PCI_DEVICE_DRIVER_OVERRIDE - macro used to describe a PCI device with
+ * override_only flags.
+ * @vend: the 16 bit PCI Vendor ID
+ * @dev: the 16 bit PCI Device ID
+ * @driver_override: the 32 bit PCI Device override_only
+ *
+ * This macro is used to create a struct pci_device_id that matches only a
+ * driver_override device. The subvendor and subdevice fields will be set to
+ * PCI_ANY_ID.
+ */
+#define PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, driver_override) \
+ .vendor = (vend), .device = (dev), .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, .override_only = (driver_override)
+
+/**
+ * PCI_DRIVER_OVERRIDE_DEVICE_VFIO - macro used to describe a VFIO
+ * "driver_override" PCI device.
+ * @vend: the 16 bit PCI Vendor ID
+ * @dev: the 16 bit PCI Device ID
+ *
+ * This macro is used to create a struct pci_device_id that matches a
+ * specific device. The subvendor and subdevice fields will be set to
+ * PCI_ANY_ID and the driver_override will be set to
+ * PCI_ID_F_VFIO_DRIVER_OVERRIDE.
+ */
+#define PCI_DRIVER_OVERRIDE_DEVICE_VFIO(vend, dev) \
+ PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, PCI_ID_F_VFIO_DRIVER_OVERRIDE)
+
+/**
* PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem
* @vend: the 16 bit PCI Vendor ID
* @dev: the 16 bit PCI Device ID
@@ -941,6 +1105,20 @@ struct pci_driver {
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
/**
+ * PCI_VDEVICE_SUB - describe a specific PCI device/subdevice in a short form
+ * @vend: the vendor name
+ * @dev: the 16 bit PCI Device ID
+ * @subvend: the 16 bit PCI Subvendor ID
+ * @subdev: the 16 bit PCI Subdevice ID
+ *
+ * Generate the pci_device_id struct layout for the specific PCI
+ * device/subdevice. Private data may follow the output.
+ */
+#define PCI_VDEVICE_SUB(vend, dev, subvend, subdev) \
+ .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
+ .subvendor = (subvend), .subdevice = (subdev), 0, 0
+
+/**
* PCI_DEVICE_DATA - macro used to describe a specific PCI device in very short form
* @vend: the vendor name (without PCI_VENDOR_ID_ prefix)
* @dev: the device name (without PCI_DEVICE_ID_<vend>_ prefix)
@@ -965,7 +1143,7 @@ enum {
PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* Scan all, not just dev 0 */
};
-#define PCI_IRQ_LEGACY (1 << 0) /* Allow legacy interrupts */
+#define PCI_IRQ_INTX (1 << 0) /* Allow INTx interrupts */
#define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */
#define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */
#define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */
@@ -992,7 +1170,7 @@ enum pcie_bus_config_types {
extern enum pcie_bus_config_types pcie_bus_config;
-extern struct bus_type pci_bus_type;
+extern const struct bus_type pci_bus_type;
/* Do NOT directly access these two variables, unless you are arch-specific PCI
* code, or PCI core code. */
@@ -1014,9 +1192,6 @@ resource_size_t pcibios_align_resource(void *, const struct resource *,
resource_size_t,
resource_size_t);
-/* Weak but can be overridden by arch */
-void pci_fixup_cardbus(struct pci_bus *);
-
/* Generic PCI functions used internally */
void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
@@ -1062,6 +1237,7 @@ int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
struct pci_dev *pci_dev_get(struct pci_dev *dev);
void pci_dev_put(struct pci_dev *dev);
+DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T))
void pci_remove_bus(struct pci_bus *b);
void pci_stop_and_remove_bus_device(struct pci_dev *dev);
void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
@@ -1084,11 +1260,14 @@ u16 pci_find_ext_capability(struct pci_dev *dev, int cap);
u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 pos, int cap);
struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap);
+u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec);
u64 pci_get_dsn(struct pci_dev *dev);
struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
struct pci_dev *from);
+struct pci_dev *pci_get_device_reverse(unsigned int vendor, unsigned int device,
+ struct pci_dev *from);
struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
unsigned int ss_vendor, unsigned int ss_device,
struct pci_dev *from);
@@ -1096,6 +1275,8 @@ struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
unsigned int devfn);
struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
+struct pci_dev *pci_get_base_class(unsigned int class, struct pci_dev *from);
+
int pci_dev_present(const struct pci_device_id *ids);
int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn,
@@ -1128,16 +1309,48 @@ int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val);
int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val);
int pci_write_config_word(const struct pci_dev *dev, int where, u16 val);
int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val);
+void pci_clear_and_set_config_dword(const struct pci_dev *dev, int pos,
+ u32 clear, u32 set);
int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val);
-int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
- u16 clear, u16 set);
+int pcie_capability_clear_and_set_word_unlocked(struct pci_dev *dev, int pos,
+ u16 clear, u16 set);
+int pcie_capability_clear_and_set_word_locked(struct pci_dev *dev, int pos,
+ u16 clear, u16 set);
int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
u32 clear, u32 set);
+/**
+ * pcie_capability_clear_and_set_word - RMW accessor for PCI Express Capability Registers
+ * @dev: PCI device structure of the PCI Express device
+ * @pos: PCI Express Capability Register
+ * @clear: Clear bitmask
+ * @set: Set bitmask
+ *
+ * Perform a Read-Modify-Write (RMW) operation using @clear and @set
+ * bitmasks on PCI Express Capability Register at @pos. Certain PCI Express
+ * Capability Registers are accessed concurrently in RMW fashion, hence
+ * require locking which is handled transparently to the caller.
+ */
+static inline int pcie_capability_clear_and_set_word(struct pci_dev *dev,
+ int pos,
+ u16 clear, u16 set)
+{
+ switch (pos) {
+ case PCI_EXP_LNKCTL:
+ case PCI_EXP_LNKCTL2:
+ case PCI_EXP_RTCTL:
+ return pcie_capability_clear_and_set_word_locked(dev, pos,
+ clear, set);
+ default:
+ return pcie_capability_clear_and_set_word_unlocked(dev, pos,
+ clear, set);
+ }
+}
+
static inline int pcie_capability_set_word(struct pci_dev *dev, int pos,
u16 set)
{
@@ -1171,7 +1384,6 @@ int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val);
int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val);
int __must_check pci_enable_device(struct pci_dev *dev);
-int __must_check pci_enable_device_io(struct pci_dev *dev);
int __must_check pci_enable_device_mem(struct pci_dev *dev);
int __must_check pci_reenable_device(struct pci_dev *);
int __must_check pcim_enable_device(struct pci_dev *pdev);
@@ -1224,8 +1436,9 @@ int pcie_set_mps(struct pci_dev *dev, int mps);
u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
enum pci_bus_speed *speed,
enum pcie_link_width *width);
+int pcie_link_speed_mbps(struct pci_dev *pdev);
void pcie_print_link_status(struct pci_dev *dev);
-bool pcie_has_flr(struct pci_dev *dev);
+int pcie_reset_flr(struct pci_dev *dev, bool probe);
int pcie_flr(struct pci_dev *dev);
int __pci_reset_function_locked(struct pci_dev *dev);
int pci_reset_function(struct pci_dev *dev);
@@ -1238,18 +1451,17 @@ void pci_reset_secondary_bus(struct pci_dev *dev);
void pcibios_reset_secondary_bus(struct pci_dev *dev);
void pci_update_resource(struct pci_dev *dev, int resno);
int __must_check pci_assign_resource(struct pci_dev *dev, int i);
-int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
-void pci_release_resource(struct pci_dev *dev, int resno);
-static inline int pci_rebar_bytes_to_size(u64 bytes)
-{
- bytes = roundup_pow_of_two(bytes);
+int pci_release_resource(struct pci_dev *dev, int resno);
- /* Return BAR size as defined in the resizable BAR specification */
- return max(ilog2(bytes), 20) - 20;
-}
+/* Resizable BAR related routines */
+int pci_rebar_bytes_to_size(u64 bytes);
+resource_size_t pci_rebar_size_to_bytes(int size);
+u64 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar);
+bool pci_rebar_size_supported(struct pci_dev *pdev, int bar, int size);
+int pci_rebar_get_max_size(struct pci_dev *pdev, int bar);
+int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size,
+ int exclude_bars);
-u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar);
-int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size);
int pci_select_bars(struct pci_dev *dev, unsigned long flags);
bool pci_device_is_present(struct pci_dev *pdev);
void pci_ignore_hotplug(struct pci_dev *dev);
@@ -1275,14 +1487,9 @@ int pci_load_saved_state(struct pci_dev *dev,
struct pci_saved_state *state);
int pci_load_and_free_saved_state(struct pci_dev *dev,
struct pci_saved_state **state);
-struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap);
-struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev,
- u16 cap);
-int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size);
-int pci_add_ext_cap_save_buffer(struct pci_dev *dev,
- u16 cap, unsigned int size);
int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state);
int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
+int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state);
pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
void pci_pme_active(struct pci_dev *dev, bool enable);
@@ -1302,7 +1509,6 @@ void set_pcie_port_type(struct pci_dev *pdev);
void set_pcie_hotplug_bridge(struct pci_dev *pdev);
/* Functions for PCI Hotplug drivers to use */
-unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge);
unsigned int pci_rescan_bus(struct pci_bus *bus);
void pci_lock_rescan_remove(void);
void pci_unlock_rescan_remove(void);
@@ -1310,6 +1516,8 @@ void pci_unlock_rescan_remove(void);
/* Vital Product Data routines */
ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
+ssize_t pci_read_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
+ssize_t pci_write_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
@@ -1322,8 +1530,6 @@ void pci_assign_unassigned_resources(void);
void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus);
-int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type);
-void pdev_enable_device(struct pci_dev *);
int pci_enable_resources(struct pci_dev *, int mask);
void pci_assign_irq(struct pci_dev *dev);
struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res);
@@ -1337,38 +1543,84 @@ int pci_request_selected_regions(struct pci_dev *, int, const char *);
int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
void pci_release_selected_regions(struct pci_dev *, int);
+static inline __must_check struct resource *
+pci_request_config_region_exclusive(struct pci_dev *pdev, unsigned int offset,
+ unsigned int len, const char *name)
+{
+ return __request_region(&pdev->driver_exclusive_resource, offset, len,
+ name, IORESOURCE_EXCLUSIVE);
+}
+
+static inline void pci_release_config_region(struct pci_dev *pdev,
+ unsigned int offset,
+ unsigned int len)
+{
+ __release_region(&pdev->driver_exclusive_resource, offset, len);
+}
+
/* drivers/pci/bus.c */
void pci_add_resource(struct list_head *resources, struct resource *res);
void pci_add_resource_offset(struct list_head *resources, struct resource *res,
resource_size_t offset);
void pci_free_resource_list(struct list_head *resources);
-void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
- unsigned int flags);
+void pci_bus_add_resource(struct pci_bus *bus, struct resource *res);
struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
void pci_bus_remove_resources(struct pci_bus *bus);
+void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res);
int devm_request_pci_bus_resources(struct device *dev,
struct list_head *resources);
/* Temporary until new and working PCI SBR API in place */
int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
-#define pci_bus_for_each_resource(bus, res, i) \
- for (i = 0; \
- (res = pci_bus_resource_n(bus, i)) || i < PCI_BRIDGE_RESOURCE_NUM; \
- i++)
+#define __pci_bus_for_each_res0(bus, res, ...) \
+ for (unsigned int __b = 0; \
+ (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \
+ __b++)
+
+#define __pci_bus_for_each_res1(bus, res, __b) \
+ for (__b = 0; \
+ (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \
+ __b++)
+
+/**
+ * pci_bus_for_each_resource - iterate over PCI bus resources
+ * @bus: the PCI bus
+ * @res: pointer to the current resource
+ * @...: optional index of the current resource
+ *
+ * Iterate over PCI bus resources. The first part is to go over PCI bus
+ * resource array, which has at most the %PCI_BRIDGE_RESOURCE_NUM entries.
+ * After that continue with the separate list of the additional resources,
+ * if not empty. That's why the Logical OR is being used.
+ *
+ * Possible usage:
+ *
+ * struct pci_bus *bus = ...;
+ * struct resource *res;
+ * unsigned int i;
+ *
+ * // With optional index
+ * pci_bus_for_each_resource(bus, res, i)
+ * pr_info("PCI bus resource[%u]: %pR\n", i, res);
+ *
+ * // Without index
+ * pci_bus_for_each_resource(bus, res)
+ * _do_something_(res);
+ */
+#define pci_bus_for_each_resource(bus, res, ...) \
+ CONCATENATE(__pci_bus_for_each_res, COUNT_ARGS(__VA_ARGS__)) \
+ (bus, res, __VA_ARGS__)
int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
struct resource *res, resource_size_t size,
resource_size_t align, resource_size_t min,
unsigned long type_mask,
- resource_size_t (*alignf)(void *,
- const struct resource *,
- resource_size_t,
- resource_size_t),
+ resource_alignf alignf,
void *alignf_data);
-int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
+int pci_register_io_range(const struct fwnode_handle *fwnode, phys_addr_t addr,
resource_size_t size);
unsigned long pci_address_to_pio(phys_addr_t addr);
phys_addr_t pci_pio_to_address(unsigned long pio);
@@ -1435,9 +1687,10 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
void *userdata);
+void pci_walk_bus_reverse(struct pci_bus *top,
+ int (*cb)(struct pci_dev *, void *), void *userdata);
int pci_cfg_space_size(struct pci_dev *dev);
unsigned char pci_bus_max_busnr(struct pci_bus *bus);
-void pci_setup_bridge(struct pci_bus *bus);
resource_size_t pcibios_window_alignment(struct pci_bus *bus,
unsigned long type);
@@ -1455,22 +1708,10 @@ int pci_set_vga_state(struct pci_dev *pdev, bool decode,
*/
#define PCI_IRQ_VIRTUAL (1 << 4)
-#define PCI_IRQ_ALL_TYPES \
- (PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX)
-
-/* kmem_cache style wrapper around pci_alloc_consistent() */
+#define PCI_IRQ_ALL_TYPES (PCI_IRQ_INTX | PCI_IRQ_MSI | PCI_IRQ_MSIX)
#include <linux/dmapool.h>
-#define pci_pool dma_pool
-#define pci_pool_create(name, pdev, size, align, allocation) \
- dma_pool_create(name, &pdev->dev, size, align, allocation)
-#define pci_pool_destroy(pool) dma_pool_destroy(pool)
-#define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle)
-#define pci_pool_zalloc(pool, flags, handle) \
- dma_pool_zalloc(pool, flags, handle)
-#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
-
struct msix_entry {
u32 vector; /* Kernel uses to write allocated vector */
u16 entry; /* Driver uses to specify entry, OS writes */
@@ -1482,7 +1723,7 @@ void pci_disable_msi(struct pci_dev *dev);
int pci_msix_vec_count(struct pci_dev *dev);
void pci_disable_msix(struct pci_dev *dev);
void pci_restore_msi_state(struct pci_dev *dev);
-int pci_msi_enabled(void);
+bool pci_msi_enabled(void);
int pci_enable_msi(struct pci_dev *dev);
int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
int minvec, int maxvec);
@@ -1494,10 +1735,17 @@ static inline int pci_enable_msix_exact(struct pci_dev *dev,
return rc;
return 0;
}
+int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
+ unsigned int max_vecs, unsigned int flags);
int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
unsigned int max_vecs, unsigned int flags,
struct irq_affinity *affd);
+bool pci_msix_can_alloc_dyn(struct pci_dev *dev);
+struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
+ const struct irq_affinity_desc *affdesc);
+void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map);
+
void pci_free_irq_vectors(struct pci_dev *dev);
int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
@@ -1508,7 +1756,7 @@ static inline void pci_disable_msi(struct pci_dev *dev) { }
static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; }
static inline void pci_disable_msix(struct pci_dev *dev) { }
static inline void pci_restore_msi_state(struct pci_dev *dev) { }
-static inline int pci_msi_enabled(void) { return 0; }
+static inline bool pci_msi_enabled(void) { return false; }
static inline int pci_enable_msi(struct pci_dev *dev)
{ return -ENOSYS; }
static inline int pci_enable_msix_range(struct pci_dev *dev,
@@ -1523,10 +1771,31 @@ pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
unsigned int max_vecs, unsigned int flags,
struct irq_affinity *aff_desc)
{
- if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1 && dev->irq)
+ if ((flags & PCI_IRQ_INTX) && min_vecs == 1 && dev->irq)
return 1;
return -ENOSPC;
}
+static inline int
+pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
+ unsigned int max_vecs, unsigned int flags)
+{
+ return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs,
+ flags, NULL);
+}
+
+static inline bool pci_msix_can_alloc_dyn(struct pci_dev *dev)
+{ return false; }
+static inline struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
+ const struct irq_affinity_desc *affdesc)
+{
+ struct msi_map map = { .index = -ENOSYS, };
+
+ return map;
+}
+
+static inline void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map)
+{
+}
static inline void pci_free_irq_vectors(struct pci_dev *dev)
{
@@ -1580,22 +1849,42 @@ static inline int pci_irqd_intx_xlate(struct irq_domain *d,
#ifdef CONFIG_PCIEPORTBUS
extern bool pcie_ports_disabled;
extern bool pcie_ports_native;
+
+int pcie_set_target_speed(struct pci_dev *port, enum pci_bus_speed speed_req,
+ bool use_lt);
#else
#define pcie_ports_disabled true
#define pcie_ports_native false
+
+static inline int pcie_set_target_speed(struct pci_dev *port,
+ enum pci_bus_speed speed_req,
+ bool use_lt)
+{
+ return -EOPNOTSUPP;
+}
#endif
-#define PCIE_LINK_STATE_L0S BIT(0)
-#define PCIE_LINK_STATE_L1 BIT(1)
-#define PCIE_LINK_STATE_CLKPM BIT(2)
-#define PCIE_LINK_STATE_L1_1 BIT(3)
-#define PCIE_LINK_STATE_L1_2 BIT(4)
-#define PCIE_LINK_STATE_L1_1_PCIPM BIT(5)
-#define PCIE_LINK_STATE_L1_2_PCIPM BIT(6)
+#define PCIE_LINK_STATE_L0S (BIT(0) | BIT(1)) /* Upstr/dwnstr L0s */
+#define PCIE_LINK_STATE_L1 BIT(2) /* L1 state */
+#define PCIE_LINK_STATE_L1_1 BIT(3) /* ASPM L1.1 state */
+#define PCIE_LINK_STATE_L1_2 BIT(4) /* ASPM L1.2 state */
+#define PCIE_LINK_STATE_L1_1_PCIPM BIT(5) /* PCI-PM L1.1 state */
+#define PCIE_LINK_STATE_L1_2_PCIPM BIT(6) /* PCI-PM L1.2 state */
+#define PCIE_LINK_STATE_ASPM_ALL (PCIE_LINK_STATE_L0S |\
+ PCIE_LINK_STATE_L1 |\
+ PCIE_LINK_STATE_L1_1 |\
+ PCIE_LINK_STATE_L1_2 |\
+ PCIE_LINK_STATE_L1_1_PCIPM |\
+ PCIE_LINK_STATE_L1_2_PCIPM)
+#define PCIE_LINK_STATE_CLKPM BIT(7)
+#define PCIE_LINK_STATE_ALL (PCIE_LINK_STATE_ASPM_ALL |\
+ PCIE_LINK_STATE_CLKPM)
#ifdef CONFIG_PCIEASPM
int pci_disable_link_state(struct pci_dev *pdev, int state);
int pci_disable_link_state_locked(struct pci_dev *pdev, int state);
+int pci_enable_link_state(struct pci_dev *pdev, int state);
+int pci_enable_link_state_locked(struct pci_dev *pdev, int state);
void pcie_no_aspm(void);
bool pcie_aspm_support_enabled(void);
bool pcie_aspm_enabled(struct pci_dev *pdev);
@@ -1604,11 +1893,23 @@ static inline int pci_disable_link_state(struct pci_dev *pdev, int state)
{ return 0; }
static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
{ return 0; }
+static inline int pci_enable_link_state(struct pci_dev *pdev, int state)
+{ return 0; }
+static inline int pci_enable_link_state_locked(struct pci_dev *pdev, int state)
+{ return 0; }
static inline void pcie_no_aspm(void) { }
static inline bool pcie_aspm_support_enabled(void) { return false; }
static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
#endif
+#ifdef CONFIG_HOTPLUG_PCI
+void pci_hp_ignore_link_change(struct pci_dev *pdev);
+void pci_hp_unignore_link_change(struct pci_dev *pdev);
+#else
+static inline void pci_hp_ignore_link_change(struct pci_dev *pdev) { }
+static inline void pci_hp_unignore_link_change(struct pci_dev *pdev) { }
+#endif
+
#ifdef CONFIG_PCIEAER
bool pci_aer_available(void);
#else
@@ -1617,10 +1918,72 @@ static inline bool pci_aer_available(void) { return false; }
bool pci_ats_disabled(void);
+#define PCIE_PTM_CONTEXT_UPDATE_AUTO 0
+#define PCIE_PTM_CONTEXT_UPDATE_MANUAL 1
+
+struct pcie_ptm_ops {
+ int (*check_capability)(void *drvdata);
+ int (*context_update_write)(void *drvdata, u8 mode);
+ int (*context_update_read)(void *drvdata, u8 *mode);
+ int (*context_valid_write)(void *drvdata, bool valid);
+ int (*context_valid_read)(void *drvdata, bool *valid);
+ int (*local_clock_read)(void *drvdata, u64 *clock);
+ int (*master_clock_read)(void *drvdata, u64 *clock);
+ int (*t1_read)(void *drvdata, u64 *clock);
+ int (*t2_read)(void *drvdata, u64 *clock);
+ int (*t3_read)(void *drvdata, u64 *clock);
+ int (*t4_read)(void *drvdata, u64 *clock);
+
+ bool (*context_update_visible)(void *drvdata);
+ bool (*context_valid_visible)(void *drvdata);
+ bool (*local_clock_visible)(void *drvdata);
+ bool (*master_clock_visible)(void *drvdata);
+ bool (*t1_visible)(void *drvdata);
+ bool (*t2_visible)(void *drvdata);
+ bool (*t3_visible)(void *drvdata);
+ bool (*t4_visible)(void *drvdata);
+};
+
+struct pci_ptm_debugfs {
+ struct dentry *debugfs;
+ const struct pcie_ptm_ops *ops;
+ struct mutex lock;
+ void *pdata;
+};
+
+#ifdef CONFIG_PCIE_PTM
+int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
+void pci_disable_ptm(struct pci_dev *dev);
+bool pcie_ptm_enabled(struct pci_dev *dev);
+#else
+static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
+{ return -EINVAL; }
+static inline void pci_disable_ptm(struct pci_dev *dev) { }
+static inline bool pcie_ptm_enabled(struct pci_dev *dev)
+{ return false; }
+#endif
+
+#if IS_ENABLED(CONFIG_DEBUG_FS) && IS_ENABLED(CONFIG_PCIE_PTM)
+struct pci_ptm_debugfs *pcie_ptm_create_debugfs(struct device *dev, void *pdata,
+ const struct pcie_ptm_ops *ops);
+void pcie_ptm_destroy_debugfs(struct pci_ptm_debugfs *ptm_debugfs);
+#else
+static inline struct pci_ptm_debugfs
+*pcie_ptm_create_debugfs(struct device *dev, void *pdata,
+ const struct pcie_ptm_ops *ops) { return NULL; }
+static inline void
+pcie_ptm_destroy_debugfs(struct pci_ptm_debugfs *ptm_debugfs) { }
+#endif
+
void pci_cfg_access_lock(struct pci_dev *dev);
bool pci_cfg_access_trylock(struct pci_dev *dev);
void pci_cfg_access_unlock(struct pci_dev *dev);
+void pci_dev_lock(struct pci_dev *dev);
+int pci_dev_trylock(struct pci_dev *dev);
+void pci_dev_unlock(struct pci_dev *dev);
+DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
+
/*
* PCI domain support. Sometimes called PCI segment (eg by ACPI),
* a PCI domain is defined to be a set of PCI buses which share
@@ -1628,10 +1991,17 @@ void pci_cfg_access_unlock(struct pci_dev *dev);
*/
#ifdef CONFIG_PCI_DOMAINS
extern int pci_domains_supported;
+int pci_bus_find_emul_domain_nr(u32 hint, u32 min, u32 max);
+void pci_bus_release_emul_domain_nr(int domain_nr);
#else
enum { pci_domains_supported = 0 };
static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
+static inline int pci_bus_find_emul_domain_nr(u32 hint, u32 min, u32 max)
+{
+ return 0;
+}
+static inline void pci_bus_release_emul_domain_nr(int domain_nr) { }
#endif /* CONFIG_PCI_DOMAINS */
/*
@@ -1651,6 +2021,7 @@ static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
{ return 0; }
#endif
int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent);
+void pci_bus_release_domain_nr(struct device *parent, int domain_nr);
#endif
/* Some architectures require additional setup to direct VGA traffic */
@@ -1713,6 +2084,11 @@ static inline struct pci_dev *pci_get_device(unsigned int vendor,
struct pci_dev *from)
{ return NULL; }
+static inline struct pci_dev *pci_get_device_reverse(unsigned int vendor,
+ unsigned int device,
+ struct pci_dev *from)
+{ return NULL; }
+
static inline struct pci_dev *pci_get_subsys(unsigned int vendor,
unsigned int device,
unsigned int ss_vendor,
@@ -1724,28 +2100,35 @@ static inline struct pci_dev *pci_get_class(unsigned int class,
struct pci_dev *from)
{ return NULL; }
-#define pci_dev_present(ids) (0)
+static inline struct pci_dev *pci_get_base_class(unsigned int class,
+ struct pci_dev *from)
+{ return NULL; }
+
+static inline int pci_dev_present(const struct pci_device_id *ids)
+{ return 0; }
+
#define no_pci_devices() (1)
#define pci_dev_put(dev) do { } while (0)
static inline void pci_set_master(struct pci_dev *dev) { }
+static inline void pci_clear_master(struct pci_dev *dev) { }
static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
static inline void pci_disable_device(struct pci_dev *dev) { }
static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; }
static inline int pci_assign_resource(struct pci_dev *dev, int i)
{ return -EBUSY; }
-static inline int __pci_register_driver(struct pci_driver *drv,
- struct module *owner)
+static inline int __must_check __pci_register_driver(struct pci_driver *drv,
+ struct module *owner,
+ const char *mod_name)
{ return 0; }
static inline int pci_register_driver(struct pci_driver *drv)
{ return 0; }
static inline void pci_unregister_driver(struct pci_driver *drv) { }
static inline u8 pci_find_capability(struct pci_dev *dev, int cap)
{ return 0; }
-static inline int pci_find_next_capability(struct pci_dev *dev, u8 post,
- int cap)
+static inline u8 pci_find_next_capability(struct pci_dev *dev, u8 post, int cap)
{ return 0; }
-static inline int pci_find_ext_capability(struct pci_dev *dev, int cap)
+static inline u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
{ return 0; }
static inline u64 pci_get_dsn(struct pci_dev *dev)
@@ -1756,6 +2139,8 @@ static inline int pci_save_state(struct pci_dev *dev) { return 0; }
static inline void pci_restore_state(struct pci_dev *dev) { }
static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
{ return 0; }
+static inline int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
+{ return 0; }
static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable)
{ return 0; }
static inline pci_power_t pci_choose_state(struct pci_dev *dev,
@@ -1772,6 +2157,10 @@ static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
{ return -EIO; }
static inline void pci_release_regions(struct pci_dev *dev) { }
+static inline int pci_register_io_range(const struct fwnode_handle *fwnode,
+ phys_addr_t addr, resource_size_t size)
+{ return -EINVAL; }
+
static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
@@ -1815,38 +2204,26 @@ pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
{
return -ENOSPC;
}
-#endif /* CONFIG_PCI */
-
static inline int
pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
unsigned int max_vecs, unsigned int flags)
{
- return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs, flags,
- NULL);
+ return -ENOSPC;
}
+#endif /* CONFIG_PCI */
/* Include architecture-dependent settings and functions */
#include <asm/pci.h>
-/* These two functions provide almost identical functionality. Depending
- * on the architecture, one will be implemented as a wrapper around the
- * other (in drivers/pci/mmap.c).
- *
+/*
* pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff
* is expected to be an offset within that region.
*
- * pci_mmap_page_range() is the legacy architecture-specific interface,
- * which accepts a "user visible" resource address converted by
- * pci_resource_to_user(), as used in the legacy mmap() interface in
- * /proc/bus/pci/.
*/
int pci_mmap_resource_range(struct pci_dev *dev, int bar,
struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
-int pci_mmap_page_range(struct pci_dev *pdev, int bar,
- struct vm_area_struct *vma,
- enum pci_mmap_state mmap_state, int write_combine);
#ifndef arch_can_pci_mmap_wc
#define arch_can_pci_mmap_wc() 0
@@ -1867,16 +2244,27 @@ int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
* These helpers provide future and backwards compatibility
* for accessing popular PCI BAR info
*/
-#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
-#define pci_resource_end(dev, bar) ((dev)->resource[(bar)].end)
-#define pci_resource_flags(dev, bar) ((dev)->resource[(bar)].flags)
-#define pci_resource_len(dev,bar) \
- ((pci_resource_start((dev), (bar)) == 0 && \
- pci_resource_end((dev), (bar)) == \
- pci_resource_start((dev), (bar))) ? 0 : \
- \
- (pci_resource_end((dev), (bar)) - \
- pci_resource_start((dev), (bar)) + 1))
+#define pci_resource_n(dev, bar) (&(dev)->resource[(bar)])
+#define pci_resource_start(dev, bar) (pci_resource_n(dev, bar)->start)
+#define pci_resource_end(dev, bar) (pci_resource_n(dev, bar)->end)
+#define pci_resource_flags(dev, bar) (pci_resource_n(dev, bar)->flags)
+#define pci_resource_len(dev,bar) \
+ (pci_resource_end((dev), (bar)) ? \
+ resource_size(pci_resource_n((dev), (bar))) : 0)
+
+#define __pci_dev_for_each_res0(dev, res, ...) \
+ for (unsigned int __b = 0; \
+ __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
+ __b++)
+
+#define __pci_dev_for_each_res1(dev, res, __b) \
+ for (__b = 0; \
+ __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
+ __b++)
+
+#define pci_dev_for_each_resource(dev, res, ...) \
+ CONCATENATE(__pci_dev_for_each_res, COUNT_ARGS(__VA_ARGS__)) \
+ (dev, res, __VA_ARGS__)
/*
* Similar to the helpers above, these manipulate per-pci_dev
@@ -1951,8 +2339,8 @@ enum pci_fixup_pass {
#ifdef CONFIG_LTO_CLANG
#define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
class_shift, hook, stub) \
- void __cficanonical stub(struct pci_dev *dev); \
- void __cficanonical stub(struct pci_dev *dev) \
+ void stub(struct pci_dev *dev); \
+ void stub(struct pci_dev *dev) \
{ \
hook(dev); \
} \
@@ -2043,13 +2431,18 @@ static inline void pci_fixup_device(enum pci_fixup_pass pass,
struct pci_dev *dev) { }
#endif
+int pcim_intx(struct pci_dev *pdev, int enabled);
+int pcim_request_all_regions(struct pci_dev *pdev, const char *name);
void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
+void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
+ const char *name);
+void pcim_iounmap_region(struct pci_dev *pdev, int bar);
void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr);
void __iomem * const *pcim_iomap_table(struct pci_dev *pdev);
+int pcim_request_region(struct pci_dev *pdev, int bar, const char *name);
int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name);
-int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
- const char *name);
-void pcim_iounmap_regions(struct pci_dev *pdev, int mask);
+void __iomem *pcim_iomap_range(struct pci_dev *pdev, int bar,
+ unsigned long offset, unsigned long len);
extern int pci_pci_problems;
#define PCIPCI_FAIL 1 /* No PCI PCI DMA */
@@ -2060,8 +2453,6 @@ extern int pci_pci_problems;
#define PCIPCI_ALIMAGIK 32 /* Need low latency setting */
#define PCIAGP_FAIL 64 /* No PCI to AGP DMA */
-extern unsigned long pci_cardbus_io_size;
-extern unsigned long pci_cardbus_mem_size;
extern u8 pci_dfl_cache_line_size;
extern u8 pci_cache_line_size;
@@ -2070,7 +2461,7 @@ void pcibios_disable_device(struct pci_dev *dev);
void pcibios_set_master(struct pci_dev *dev);
int pcibios_set_pcie_reset_state(struct pci_dev *dev,
enum pcie_reset_state state);
-int pcibios_add_device(struct pci_dev *dev);
+int pcibios_device_add(struct pci_dev *dev);
void pcibios_release_device(struct pci_dev *dev);
#ifdef CONFIG_PCI
void pcibios_penalize_isa_irq(int irq, int active);
@@ -2081,6 +2472,11 @@ int pcibios_alloc_irq(struct pci_dev *dev);
void pcibios_free_irq(struct pci_dev *dev);
resource_size_t pcibios_default_alignment(void);
+#if !defined(HAVE_PCI_MMAP) && !defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)
+extern int pci_create_resource_files(struct pci_dev *dev);
+extern void pci_remove_resource_files(struct pci_dev *dev);
+#endif
+
#if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG)
void __init pci_mmcfg_early_init(void);
void __init pci_mmcfg_late_init(void);
@@ -2097,7 +2493,8 @@ void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar);
#ifdef CONFIG_PCI_IOV
int pci_iov_virtfn_bus(struct pci_dev *dev, int id);
int pci_iov_virtfn_devfn(struct pci_dev *dev, int id);
-
+int pci_iov_vf_id(struct pci_dev *dev);
+void *pci_iov_get_pf_drvdata(struct pci_dev *dev, struct pci_driver *pf_driver);
int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
void pci_disable_sriov(struct pci_dev *dev);
@@ -2110,6 +2507,8 @@ int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
int pci_sriov_get_totalvfs(struct pci_dev *dev);
int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn);
resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
+int pci_iov_vf_bar_set_size(struct pci_dev *dev, int resno, int size);
+u32 pci_iov_vf_bar_get_sizes(struct pci_dev *dev, int resno, int num_vfs);
void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe);
/* Arch may override these (weak) */
@@ -2125,6 +2524,18 @@ static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id)
{
return -ENOSYS;
}
+
+static inline int pci_iov_vf_id(struct pci_dev *dev)
+{
+ return -ENOSYS;
+}
+
+static inline void *pci_iov_get_pf_drvdata(struct pci_dev *dev,
+ struct pci_driver *pf_driver)
+{
+ return ERR_PTR(-EINVAL);
+}
+
static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
{ return -ENODEV; }
@@ -2150,14 +2561,13 @@ static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
#define pci_sriov_configure_simple NULL
static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
{ return 0; }
+static inline int pci_iov_vf_bar_set_size(struct pci_dev *dev, int resno, int size)
+{ return -ENODEV; }
+static inline u32 pci_iov_vf_bar_get_sizes(struct pci_dev *dev, int resno, int num_vfs)
+{ return 0; }
static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { }
#endif
-#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
-void pci_hp_create_module_link(struct pci_slot *pci_slot);
-void pci_hp_remove_module_link(struct pci_slot *pci_slot);
-#endif
-
/**
* pci_pcie_cap - get the saved PCIe capability offset
* @dev: PCI device
@@ -2222,6 +2632,16 @@ static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
return NULL;
}
+static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
+{
+ /*
+ * error_state is set in pci_dev_set_io_state() using xchg/cmpxchg()
+ * and read w/o common lock. READ_ONCE() ensures compiler cannot cache
+ * the value (e.g. inside the loop in pci_dev_wait()).
+ */
+ return READ_ONCE(dev->error_state) == pci_channel_io_perm_failure;
+}
+
void pci_request_acs(void);
bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
bool pci_acs_path_enabled(struct pci_dev *start,
@@ -2240,20 +2660,6 @@ int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask);
#define PCI_VPD_LRDT_RO_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA)
#define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
-/* Small Resource Data Type Tag Item Names */
-#define PCI_VPD_STIN_END 0x0f /* End */
-
-#define PCI_VPD_SRDT_END (PCI_VPD_STIN_END << 3)
-
-#define PCI_VPD_SRDT_TIN_MASK 0x78
-#define PCI_VPD_SRDT_LEN_MASK 0x07
-#define PCI_VPD_LRDT_TIN_MASK 0x7f
-
-#define PCI_VPD_LRDT_TAG_SIZE 3
-#define PCI_VPD_SRDT_TAG_SIZE 1
-
-#define PCI_VPD_INFO_FLD_HDR_SIZE 3
-
#define PCI_VPD_RO_KEYWORD_PARTNO "PN"
#define PCI_VPD_RO_KEYWORD_SERIALNO "SN"
#define PCI_VPD_RO_KEYWORD_MFR_ID "MN"
@@ -2261,89 +2667,52 @@ int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask);
#define PCI_VPD_RO_KEYWORD_CHKSUM "RV"
/**
- * pci_vpd_lrdt_size - Extracts the Large Resource Data Type length
- * @lrdt: Pointer to the beginning of the Large Resource Data Type tag
- *
- * Returns the extracted Large Resource Data Type length.
- */
-static inline u16 pci_vpd_lrdt_size(const u8 *lrdt)
-{
- return (u16)lrdt[1] + ((u16)lrdt[2] << 8);
-}
-
-/**
- * pci_vpd_lrdt_tag - Extracts the Large Resource Data Type Tag Item
- * @lrdt: Pointer to the beginning of the Large Resource Data Type tag
- *
- * Returns the extracted Large Resource Data Type Tag item.
- */
-static inline u16 pci_vpd_lrdt_tag(const u8 *lrdt)
-{
- return (u16)(lrdt[0] & PCI_VPD_LRDT_TIN_MASK);
-}
-
-/**
- * pci_vpd_srdt_size - Extracts the Small Resource Data Type length
- * @srdt: Pointer to the beginning of the Small Resource Data Type tag
- *
- * Returns the extracted Small Resource Data Type length.
- */
-static inline u8 pci_vpd_srdt_size(const u8 *srdt)
-{
- return (*srdt) & PCI_VPD_SRDT_LEN_MASK;
-}
-
-/**
- * pci_vpd_srdt_tag - Extracts the Small Resource Data Type Tag Item
- * @srdt: Pointer to the beginning of the Small Resource Data Type tag
+ * pci_vpd_alloc - Allocate buffer and read VPD into it
+ * @dev: PCI device
+ * @size: pointer to field where VPD length is returned
*
- * Returns the extracted Small Resource Data Type Tag Item.
+ * Returns pointer to allocated buffer or an ERR_PTR in case of failure
*/
-static inline u8 pci_vpd_srdt_tag(const u8 *srdt)
-{
- return ((*srdt) & PCI_VPD_SRDT_TIN_MASK) >> 3;
-}
+void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size);
/**
- * pci_vpd_info_field_size - Extracts the information field length
- * @info_field: Pointer to the beginning of an information field header
+ * pci_vpd_find_id_string - Locate id string in VPD
+ * @buf: Pointer to buffered VPD data
+ * @len: The length of the buffer area in which to search
+ * @size: Pointer to field where length of id string is returned
*
- * Returns the extracted information field length.
+ * Returns the index of the id string or -ENOENT if not found.
*/
-static inline u8 pci_vpd_info_field_size(const u8 *info_field)
-{
- return info_field[2];
-}
+int pci_vpd_find_id_string(const u8 *buf, unsigned int len, unsigned int *size);
/**
- * pci_vpd_find_tag - Locates the Resource Data Type tag provided
- * @buf: Pointer to buffered vpd data
- * @len: The length of the vpd buffer
- * @rdt: The Resource Data Type to search for
+ * pci_vpd_find_ro_info_keyword - Locate info field keyword in VPD RO section
+ * @buf: Pointer to buffered VPD data
+ * @len: The length of the buffer area in which to search
+ * @kw: The keyword to search for
+ * @size: Pointer to field where length of found keyword data is returned
*
- * Returns the index where the Resource Data Type was found or
- * -ENOENT otherwise.
+ * Returns the index of the information field keyword data or -ENOENT if
+ * not found.
*/
-int pci_vpd_find_tag(const u8 *buf, unsigned int len, u8 rdt);
+int pci_vpd_find_ro_info_keyword(const void *buf, unsigned int len,
+ const char *kw, unsigned int *size);
/**
- * pci_vpd_find_info_keyword - Locates an information field keyword in the VPD
- * @buf: Pointer to buffered vpd data
- * @off: The offset into the buffer at which to begin the search
- * @len: The length of the buffer area, relative to off, in which to search
- * @kw: The keyword to search for
+ * pci_vpd_check_csum - Check VPD checksum
+ * @buf: Pointer to buffered VPD data
+ * @len: VPD size
*
- * Returns the index where the information field keyword was found or
- * -ENOENT otherwise.
+ * Returns 1 if VPD has no checksum, otherwise 0 or an errno
*/
-int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
- unsigned int len, const char *kw);
+int pci_vpd_check_csum(const void *buf, unsigned int len);
/* PCI <-> OF binding helpers */
#ifdef CONFIG_OF
struct device_node;
struct irq_domain;
struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
+bool pci_host_of_has_msi_map(struct device *dev);
/* Arch may override this (weak) */
struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
@@ -2351,6 +2720,7 @@ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
#else /* CONFIG_OF */
static inline struct irq_domain *
pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
+static inline bool pci_host_of_has_msi_map(struct device *dev) { return false; }
#endif /* CONFIG_OF */
static inline struct device_node *
@@ -2376,6 +2746,12 @@ pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; }
static inline bool pci_pr3_present(struct pci_dev *pdev) { return false; }
#endif
+#if defined(CONFIG_X86) && defined(CONFIG_ACPI)
+bool arch_pci_dev_is_removable(struct pci_dev *pdev);
+#else
+static inline bool arch_pci_dev_is_removable(struct pci_dev *pdev) { return false; }
+#endif
+
#ifdef CONFIG_EEH
static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
{
@@ -2436,21 +2812,18 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
return false;
}
-#if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH)
+#if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH) || defined(CONFIG_S390)
void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
#endif
-/* Provide the legacy pci_dma_* API */
-#include <linux/pci-dma-compat.h>
-
-#define pci_printk(level, pdev, fmt, arg...) \
- dev_printk(level, &(pdev)->dev, fmt, ##arg)
+#include <linux/dma-mapping.h>
#define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg)
#define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg)
#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg)
#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg)
#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg)
+#define pci_warn_once(pdev, fmt, arg...) dev_warn_once(&(pdev)->dev, fmt, ##arg)
#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg)
#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg)
#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg)
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index b482e42d7153..ddf79641917f 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -44,12 +44,13 @@ struct hotplug_slot_ops {
int (*get_attention_status) (struct hotplug_slot *slot, u8 *value);
int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
- int (*reset_slot) (struct hotplug_slot *slot, int probe);
+ int (*reset_slot) (struct hotplug_slot *slot, bool probe);
};
/**
* struct hotplug_slot - used to register a physical slot with the hotplug pci core
* @ops: pointer to the &struct hotplug_slot_ops to be used for this slot
+ * @pci_slot: represents a physical slot
* @owner: The module owner of this structure
* @mod_name: The module name (KBUILD_MODNAME) of this structure
*/
@@ -57,7 +58,6 @@ struct hotplug_slot {
const struct hotplug_slot_ops *ops;
/* Variables below this are for use only by the hotplug pci core. */
- struct list_head slot_list;
struct pci_slot *pci_slot;
struct module *owner;
const char *mod_name;
@@ -104,6 +104,7 @@ static inline bool shpchp_is_native(struct pci_dev *bridge) { return true; }
static inline bool hotplug_is_native(struct pci_dev *bridge)
{
- return pciehp_is_native(bridge) || shpchp_is_native(bridge);
+ return (bridge->is_pciehp && pciehp_is_native(bridge)) ||
+ shpchp_is_native(bridge);
}
#endif
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 4c3fa5293d76..a9a089566b7c 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2,7 +2,7 @@
/*
* PCI Class, Vendor and Device IDs
*
- * Please keep sorted.
+ * Please keep sorted by numeric Vendor ID and Device ID.
*
* Do not add new entries to this file unless the definitions
* are shared between multiple drivers.
@@ -60,6 +60,8 @@
#define PCI_CLASS_BRIDGE_EISA 0x0602
#define PCI_CLASS_BRIDGE_MC 0x0603
#define PCI_CLASS_BRIDGE_PCI 0x0604
+#define PCI_CLASS_BRIDGE_PCI_NORMAL 0x060400
+#define PCI_CLASS_BRIDGE_PCI_SUBTRACTIVE 0x060401
#define PCI_CLASS_BRIDGE_PCMCIA 0x0605
#define PCI_CLASS_BRIDGE_NUBUS 0x0606
#define PCI_CLASS_BRIDGE_CARDBUS 0x0607
@@ -73,6 +75,9 @@
#define PCI_CLASS_COMMUNICATION_MODEM 0x0703
#define PCI_CLASS_COMMUNICATION_OTHER 0x0780
+/* Interface for SERIAL/MODEM */
+#define PCI_SERIAL_16550_COMPATIBLE 0x02
+
#define PCI_BASE_CLASS_SYSTEM 0x08
#define PCI_CLASS_SYSTEM_PIC 0x0800
#define PCI_CLASS_SYSTEM_PIC_IOAPIC 0x080010
@@ -116,6 +121,7 @@
#define PCI_CLASS_SERIAL_USB_OHCI 0x0c0310
#define PCI_CLASS_SERIAL_USB_EHCI 0x0c0320
#define PCI_CLASS_SERIAL_USB_XHCI 0x0c0330
+#define PCI_CLASS_SERIAL_USB_CDNS 0x0c0380
#define PCI_CLASS_SERIAL_USB_DEVICE 0x0c03fe
#define PCI_CLASS_SERIAL_FIBER 0x0c04
#define PCI_CLASS_SERIAL_SMBUS 0x0c05
@@ -146,12 +152,21 @@
#define PCI_CLASS_SP_DPIO 0x1100
#define PCI_CLASS_SP_OTHER 0x1180
+#define PCI_BASE_CLASS_ACCELERATOR 0x12
+#define PCI_CLASS_ACCELERATOR_PROCESSING 0x1200
+
#define PCI_CLASS_OTHERS 0xff
/* Vendors and devices. Sort key: vendor first, device next. */
+#define PCI_VENDOR_ID_PCI_SIG 0x0001
#define PCI_VENDOR_ID_LOONGSON 0x0014
+#define PCI_DEVICE_ID_LOONGSON_HDA 0x7a07
+#define PCI_DEVICE_ID_LOONGSON_HDMI 0x7a37
+
+#define PCI_VENDOR_ID_SOLIDIGM 0x025e
+
#define PCI_VENDOR_ID_TTTECH 0x0357
#define PCI_DEVICE_ID_TTTECH_MC322 0x000a
@@ -166,6 +181,8 @@
#define PCI_DEVICE_ID_BERKOM_A4T 0xffa4
#define PCI_DEVICE_ID_BERKOM_SCITEL_QUADRO 0xffa8
+#define PCI_VENDOR_ID_ITTIM 0x0b48
+
#define PCI_VENDOR_ID_COMPAQ 0x0e11
#define PCI_DEVICE_ID_COMPAQ_TOKENRING 0x0508
#define PCI_DEVICE_ID_COMPAQ_TACHYON 0xa0fc
@@ -501,6 +518,7 @@
#define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM 0x0251
#define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM_PCIE 0x0361
#define PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL 0x252
+#define PCI_DEVICE_ID_IBM_ISM 0x04ed
#define PCI_SUBVENDOR_ID_IBM 0x1014
#define PCI_SUBDEVICE_ID_IBM_SATURN_SERIAL_ONE_PORT 0x03d4
@@ -552,9 +570,24 @@
#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493
+#define PCI_DEVICE_ID_AMD_17H_M40H_DF_F3 0x13f3
#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F3 0x144b
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443
+#define PCI_DEVICE_ID_AMD_17H_MA0H_DF_F3 0x1727
#define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653
+#define PCI_DEVICE_ID_AMD_19H_M10H_DF_F3 0x14b0
+#define PCI_DEVICE_ID_AMD_19H_M40H_DF_F3 0x167c
+#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F3 0x166d
+#define PCI_DEVICE_ID_AMD_19H_M60H_DF_F3 0x14e3
+#define PCI_DEVICE_ID_AMD_19H_M70H_DF_F3 0x14f3
+#define PCI_DEVICE_ID_AMD_19H_M78H_DF_F3 0x12fb
+#define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3 0x12c3
+#define PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3 0x16fb
+#define PCI_DEVICE_ID_AMD_1AH_M60H_DF_F3 0x124b
+#define PCI_DEVICE_ID_AMD_1AH_M70H_DF_F3 0x12bb
+#define PCI_DEVICE_ID_AMD_MI200_DF_F3 0x14d3
+#define PCI_DEVICE_ID_AMD_MI300_DF_F3 0x152b
+#define PCI_DEVICE_ID_AMD_VANGOGH_USB 0x163a
#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
@@ -631,6 +664,8 @@
#define PCI_DEVICE_ID_DELL_RAC4 0x0012
#define PCI_DEVICE_ID_DELL_PERC5 0x0015
+#define PCI_SUBVENDOR_ID_DELL 0x1028
+
#define PCI_VENDOR_ID_MATROX 0x102B
#define PCI_DEVICE_ID_MATROX_MGA_2 0x0518
#define PCI_DEVICE_ID_MATROX_MIL 0x0519
@@ -1118,6 +1153,7 @@
#define PCI_DEVICE_ID_3COM_3CR990SVR 0x990a
#define PCI_VENDOR_ID_AL 0x10b9
+#define PCI_DEVICE_ID_AL_M1489 0x1489
#define PCI_DEVICE_ID_AL_M1533 0x1533
#define PCI_DEVICE_ID_AL_M1535 0x1535
#define PCI_DEVICE_ID_AL_M1541 0x1541
@@ -1735,6 +1771,10 @@
#define PCI_SUBDEVICE_ID_AT_2700FX 0x2701
#define PCI_SUBDEVICE_ID_AT_2701FX 0x2703
+#define PCI_VENDOR_ID_ASIX 0x125b
+#define PCI_DEVICE_ID_ASIX_AX99100 0x9100
+#define PCI_DEVICE_ID_ASIX_AX99100_LB 0x9110
+
#define PCI_VENDOR_ID_ESS 0x125d
#define PCI_DEVICE_ID_ESS_ESS1968 0x1968
#define PCI_DEVICE_ID_ESS_ESS1978 0x1978
@@ -1959,24 +1999,6 @@
#define PCI_DEVICE_ID_APPLICOM_PCI2000PFB 0x0003
#define PCI_VENDOR_ID_MOXA 0x1393
-#define PCI_DEVICE_ID_MOXA_RC7000 0x0001
-#define PCI_DEVICE_ID_MOXA_CP102 0x1020
-#define PCI_DEVICE_ID_MOXA_CP102UL 0x1021
-#define PCI_DEVICE_ID_MOXA_CP102U 0x1022
-#define PCI_DEVICE_ID_MOXA_C104 0x1040
-#define PCI_DEVICE_ID_MOXA_CP104U 0x1041
-#define PCI_DEVICE_ID_MOXA_CP104JU 0x1042
-#define PCI_DEVICE_ID_MOXA_CP104EL 0x1043
-#define PCI_DEVICE_ID_MOXA_CT114 0x1140
-#define PCI_DEVICE_ID_MOXA_CP114 0x1141
-#define PCI_DEVICE_ID_MOXA_CP118U 0x1180
-#define PCI_DEVICE_ID_MOXA_CP118EL 0x1181
-#define PCI_DEVICE_ID_MOXA_CP132 0x1320
-#define PCI_DEVICE_ID_MOXA_CP132U 0x1321
-#define PCI_DEVICE_ID_MOXA_CP134U 0x1340
-#define PCI_DEVICE_ID_MOXA_C168 0x1680
-#define PCI_DEVICE_ID_MOXA_CP168U 0x1681
-#define PCI_DEVICE_ID_MOXA_CP168EL 0x1682
#define PCI_DEVICE_ID_MOXA_CP204J 0x2040
#define PCI_DEVICE_ID_MOXA_C218 0x2180
#define PCI_DEVICE_ID_MOXA_C320 0x3200
@@ -2085,6 +2107,9 @@
#define PCI_DEVICE_ID_ICE_1712 0x1712
#define PCI_DEVICE_ID_VT1724 0x1724
+#define PCI_VENDOR_ID_MICROSOFT 0x1414
+#define PCI_DEVICE_ID_HYPERV_VIDEO 0x5353
+
#define PCI_VENDOR_ID_OXSEMI 0x1415
#define PCI_DEVICE_ID_OXSEMI_12PCI840 0x8403
#define PCI_DEVICE_ID_OXSEMI_PCIe840 0xC000
@@ -2105,6 +2130,8 @@
#define PCI_VENDOR_ID_CHELSIO 0x1425
+#define PCI_VENDOR_ID_EDIMAX 0x1432
+
#define PCI_VENDOR_ID_ADLINK 0x144a
#define PCI_VENDOR_ID_SAMSUNG 0x144d
@@ -2397,6 +2424,9 @@
#define PCI_VENDOR_ID_QCOM 0x17cb
#define PCI_VENDOR_ID_CDNS 0x17cd
+#define PCI_DEVICE_ID_CDNS_USBSS 0x0100
+#define PCI_DEVICE_ID_CDNS_USB 0x0120
+#define PCI_DEVICE_ID_CDNS_USBSSP 0x0200
#define PCI_VENDOR_ID_ARECA 0x17d3
#define PCI_DEVICE_ID_ARECA_1110 0x1110
@@ -2448,7 +2478,8 @@
#define PCI_VENDOR_ID_TDI 0x192E
#define PCI_DEVICE_ID_TDI_EHCI 0x0101
-#define PCI_VENDOR_ID_FREESCALE 0x1957
+#define PCI_VENDOR_ID_FREESCALE 0x1957 /* duplicate: NXP */
+#define PCI_VENDOR_ID_NXP 0x1957 /* duplicate: FREESCALE */
#define PCI_DEVICE_ID_MPC8308 0xc006
#define PCI_DEVICE_ID_MPC8315E 0x00b4
#define PCI_DEVICE_ID_MPC8315 0x00b5
@@ -2540,11 +2571,16 @@
#define PCI_DEVICE_ID_KORENIX_JETCARDF3 0x17ff
#define PCI_VENDOR_ID_HUAWEI 0x19e5
+#define PCI_DEVICE_ID_HUAWEI_ZIP_VF 0xa251
+#define PCI_DEVICE_ID_HUAWEI_SEC_VF 0xa256
+#define PCI_DEVICE_ID_HUAWEI_HPRE_VF 0xa259
#define PCI_VENDOR_ID_NETRONOME 0x19ee
+#define PCI_DEVICE_ID_NETRONOME_NFP3800 0x3800
#define PCI_DEVICE_ID_NETRONOME_NFP4000 0x4000
#define PCI_DEVICE_ID_NETRONOME_NFP5000 0x5000
#define PCI_DEVICE_ID_NETRONOME_NFP6000 0x6000
+#define PCI_DEVICE_ID_NETRONOME_NFP3800_VF 0x3803
#define PCI_DEVICE_ID_NETRONOME_NFP6000_VF 0x6003
#define PCI_VENDOR_ID_QMI 0x1a32
@@ -2559,6 +2595,11 @@
#define PCI_VENDOR_ID_REDHAT 0x1b36
+#define PCI_VENDOR_ID_WCHIC 0x1c00
+#define PCI_DEVICE_ID_WCHIC_CH382_0S1P 0x3050
+#define PCI_DEVICE_ID_WCHIC_CH382_2S1P 0x3250
+#define PCI_DEVICE_ID_WCHIC_CH382_2S 0x3253
+
#define PCI_VENDOR_ID_SILICOM_DENMARK 0x1c2c
#define PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS 0x1c36
@@ -2570,13 +2611,26 @@
#define PCI_VENDOR_ID_ZHAOXIN 0x1d17
+#define PCI_VENDOR_ID_ROCKCHIP 0x1d87
+
#define PCI_VENDOR_ID_HYGON 0x1d94
+#define PCI_VENDOR_ID_META 0x1d9b
+
+#define PCI_VENDOR_ID_FUNGIBLE 0x1dad
+
#define PCI_VENDOR_ID_HXT 0x1dbf
#define PCI_VENDOR_ID_TEKRAM 0x1de1
#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
+#define PCI_VENDOR_ID_RPI 0x1de4
+#define PCI_DEVICE_ID_RPI_RP1_C0 0x0001
+
+#define PCI_VENDOR_ID_ALIBABA 0x1ded
+
+#define PCI_VENDOR_ID_CXL 0x1e98
+
#define PCI_VENDOR_ID_TEHUTI 0x1fc9
#define PCI_DEVICE_ID_TEHUTI_3009 0x3009
#define PCI_DEVICE_ID_TEHUTI_3010 0x3010
@@ -2605,6 +2659,12 @@
#define PCI_VENDOR_ID_AKS 0x416c
#define PCI_DEVICE_ID_AKS_ALADDINCARD 0x0100
+#define PCI_VENDOR_ID_WCHCN 0x4348
+#define PCI_DEVICE_ID_WCHCN_CH353_4S 0x3453
+#define PCI_DEVICE_ID_WCHCN_CH353_2S1PF 0x5046
+#define PCI_DEVICE_ID_WCHCN_CH353_1S1P 0x5053
+#define PCI_DEVICE_ID_WCHCN_CH353_2S1P 0x7053
+
#define PCI_VENDOR_ID_ACCESSIO 0x494f
#define PCI_DEVICE_ID_ACCESSIO_WDG_CSM 0x22c0
@@ -2624,13 +2684,16 @@
#define PCI_DEVICE_ID_DCI_PCCOM8 0x0002
#define PCI_DEVICE_ID_DCI_PCCOM2 0x0004
+#define PCI_VENDOR_ID_GLENFLY 0x6766
+
#define PCI_VENDOR_ID_INTEL 0x8086
#define PCI_DEVICE_ID_INTEL_EESSC 0x0008
+#define PCI_DEVICE_ID_INTEL_HDA_CML_LP 0x02c8
#define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320
#define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321
#define PCI_DEVICE_ID_INTEL_PXH_0 0x0329
-#define PCI_DEVICE_ID_INTEL_PXH_1 0x032A
-#define PCI_DEVICE_ID_INTEL_PXHV 0x032C
+#define PCI_DEVICE_ID_INTEL_PXH_1 0x032a
+#define PCI_DEVICE_ID_INTEL_PXHV 0x032c
#define PCI_DEVICE_ID_INTEL_80332_0 0x0330
#define PCI_DEVICE_ID_INTEL_80332_1 0x0332
#define PCI_DEVICE_ID_INTEL_80333_0 0x0370
@@ -2640,26 +2703,35 @@
#define PCI_DEVICE_ID_INTEL_82375 0x0482
#define PCI_DEVICE_ID_INTEL_82424 0x0483
#define PCI_DEVICE_ID_INTEL_82378 0x0484
+#define PCI_DEVICE_ID_INTEL_82425 0x0486
+#define PCI_DEVICE_ID_INTEL_HDA_CML_H 0x06c8
#define PCI_DEVICE_ID_INTEL_MRST_SD0 0x0807
#define PCI_DEVICE_ID_INTEL_MRST_SD1 0x0808
+#define PCI_DEVICE_ID_INTEL_HDA_OAKTRAIL 0x080a
#define PCI_DEVICE_ID_INTEL_MFD_SD 0x0820
#define PCI_DEVICE_ID_INTEL_MFD_SDIO1 0x0821
#define PCI_DEVICE_ID_INTEL_MFD_SDIO2 0x0822
#define PCI_DEVICE_ID_INTEL_MFD_EMMC0 0x0823
#define PCI_DEVICE_ID_INTEL_MFD_EMMC1 0x0824
-#define PCI_DEVICE_ID_INTEL_MRST_SD2 0x084F
-#define PCI_DEVICE_ID_INTEL_QUARK_X1000_ILB 0x095E
+#define PCI_DEVICE_ID_INTEL_MRST_SD2 0x084f
+#define PCI_DEVICE_ID_INTEL_QUARK_X1000_ILB 0x095e
#define PCI_DEVICE_ID_INTEL_I960 0x0960
#define PCI_DEVICE_ID_INTEL_I960RM 0x0962
+#define PCI_DEVICE_ID_INTEL_HDA_HSW_0 0x0a0c
+#define PCI_DEVICE_ID_INTEL_DSA_SPR0 0x0b25
+#define PCI_DEVICE_ID_INTEL_HDA_HSW_2 0x0c0c
#define PCI_DEVICE_ID_INTEL_CENTERTON_ILB 0x0c60
+#define PCI_DEVICE_ID_INTEL_IAX_SPR0 0x0cfe
+#define PCI_DEVICE_ID_INTEL_HDA_HSW_3 0x0d0c
+#define PCI_DEVICE_ID_INTEL_HDA_BYT 0x0f04
+#define PCI_DEVICE_ID_INTEL_SST_BYT 0x0f28
#define PCI_DEVICE_ID_INTEL_8257X_SOL 0x1062
#define PCI_DEVICE_ID_INTEL_82573E_SOL 0x1085
-#define PCI_DEVICE_ID_INTEL_82573L_SOL 0x108F
+#define PCI_DEVICE_ID_INTEL_82573L_SOL 0x108f
#define PCI_DEVICE_ID_INTEL_82815_MC 0x1130
#define PCI_DEVICE_ID_INTEL_82815_CGC 0x1132
+#define PCI_DEVICE_ID_INTEL_SST_TNG 0x119a
#define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221
-#define PCI_DEVICE_ID_INTEL_7505_0 0x2550
-#define PCI_DEVICE_ID_INTEL_7205_0 0x255d
#define PCI_DEVICE_ID_INTEL_82437 0x122d
#define PCI_DEVICE_ID_INTEL_82371FB_0 0x122e
#define PCI_DEVICE_ID_INTEL_82371FB_1 0x1230
@@ -2685,20 +2757,26 @@
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE 0x1576
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI 0x1577
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE 0x1578
+#define PCI_DEVICE_ID_INTEL_HDA_BDW 0x160c
#define PCI_DEVICE_ID_INTEL_80960_RP 0x1960
#define PCI_DEVICE_ID_INTEL_QAT_C3XXX 0x19e2
#define PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF 0x19e3
#define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21
#define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30
#define PCI_DEVICE_ID_INTEL_IOAT 0x1a38
+#define PCI_DEVICE_ID_INTEL_HDA_CPT 0x1c20
#define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN 0x1c41
#define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX 0x1c5f
+#define PCI_DEVICE_ID_INTEL_HDA_PBG 0x1d20
#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_0 0x1d40
#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_1 0x1d41
+#define PCI_DEVICE_ID_INTEL_HDA_PPT 0x1e20
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI 0x1e31
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MIN 0x1e40
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MAX 0x1e5f
#define PCI_DEVICE_ID_INTEL_VMD_201D 0x201d
+#define PCI_DEVICE_ID_INTEL_HDA_BSW 0x2284
+#define PCI_DEVICE_ID_INTEL_SST_BSW 0x22a8
#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MIN 0x2310
#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MAX 0x231f
#define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410
@@ -2748,17 +2826,13 @@
#define PCI_DEVICE_ID_INTEL_82801EB_11 0x24db
#define PCI_DEVICE_ID_INTEL_82801EB_12 0x24dc
#define PCI_DEVICE_ID_INTEL_82801EB_13 0x24dd
-#define PCI_DEVICE_ID_INTEL_ESB_1 0x25a1
-#define PCI_DEVICE_ID_INTEL_ESB_2 0x25a2
-#define PCI_DEVICE_ID_INTEL_ESB_4 0x25a4
-#define PCI_DEVICE_ID_INTEL_ESB_5 0x25a6
-#define PCI_DEVICE_ID_INTEL_ESB_9 0x25ab
-#define PCI_DEVICE_ID_INTEL_ESB_10 0x25ac
#define PCI_DEVICE_ID_INTEL_82820_HB 0x2500
#define PCI_DEVICE_ID_INTEL_82820_UP_HB 0x2501
#define PCI_DEVICE_ID_INTEL_82850_HB 0x2530
#define PCI_DEVICE_ID_INTEL_82860_HB 0x2531
#define PCI_DEVICE_ID_INTEL_E7501_MCH 0x254c
+#define PCI_DEVICE_ID_INTEL_7505_0 0x2550
+#define PCI_DEVICE_ID_INTEL_7205_0 0x255d
#define PCI_DEVICE_ID_INTEL_82845G_HB 0x2560
#define PCI_DEVICE_ID_INTEL_82845G_IG 0x2562
#define PCI_DEVICE_ID_INTEL_82865_HB 0x2570
@@ -2768,30 +2842,39 @@
#define PCI_DEVICE_ID_INTEL_82915G_IG 0x2582
#define PCI_DEVICE_ID_INTEL_82915GM_HB 0x2590
#define PCI_DEVICE_ID_INTEL_82915GM_IG 0x2592
-#define PCI_DEVICE_ID_INTEL_5000_ERR 0x25F0
-#define PCI_DEVICE_ID_INTEL_5000_FBD0 0x25F5
-#define PCI_DEVICE_ID_INTEL_5000_FBD1 0x25F6
-#define PCI_DEVICE_ID_INTEL_82945G_HB 0x2770
-#define PCI_DEVICE_ID_INTEL_82945G_IG 0x2772
-#define PCI_DEVICE_ID_INTEL_3000_HB 0x2778
-#define PCI_DEVICE_ID_INTEL_82945GM_HB 0x27A0
-#define PCI_DEVICE_ID_INTEL_82945GM_IG 0x27A2
+#define PCI_DEVICE_ID_INTEL_ESB_1 0x25a1
+#define PCI_DEVICE_ID_INTEL_ESB_2 0x25a2
+#define PCI_DEVICE_ID_INTEL_ESB_4 0x25a4
+#define PCI_DEVICE_ID_INTEL_ESB_5 0x25a6
+#define PCI_DEVICE_ID_INTEL_ESB_9 0x25ab
+#define PCI_DEVICE_ID_INTEL_ESB_10 0x25ac
+#define PCI_DEVICE_ID_INTEL_5000_ERR 0x25f0
+#define PCI_DEVICE_ID_INTEL_5000_FBD0 0x25f5
+#define PCI_DEVICE_ID_INTEL_5000_FBD1 0x25f6
#define PCI_DEVICE_ID_INTEL_ICH6_0 0x2640
#define PCI_DEVICE_ID_INTEL_ICH6_1 0x2641
#define PCI_DEVICE_ID_INTEL_ICH6_2 0x2642
+#define PCI_DEVICE_ID_INTEL_HDA_ICH6 0x2668
#define PCI_DEVICE_ID_INTEL_ICH6_16 0x266a
#define PCI_DEVICE_ID_INTEL_ICH6_17 0x266d
#define PCI_DEVICE_ID_INTEL_ICH6_18 0x266e
#define PCI_DEVICE_ID_INTEL_ICH6_19 0x266f
#define PCI_DEVICE_ID_INTEL_ESB2_0 0x2670
#define PCI_DEVICE_ID_INTEL_ESB2_14 0x2698
+#define PCI_DEVICE_ID_INTEL_HDA_ESB2 0x269a
#define PCI_DEVICE_ID_INTEL_ESB2_17 0x269b
#define PCI_DEVICE_ID_INTEL_ESB2_18 0x269e
+#define PCI_DEVICE_ID_INTEL_82945G_HB 0x2770
+#define PCI_DEVICE_ID_INTEL_82945G_IG 0x2772
+#define PCI_DEVICE_ID_INTEL_3000_HB 0x2778
+#define PCI_DEVICE_ID_INTEL_82945GM_HB 0x27a0
+#define PCI_DEVICE_ID_INTEL_82945GM_IG 0x27a2
+#define PCI_DEVICE_ID_INTEL_ICH7_30 0x27b0
#define PCI_DEVICE_ID_INTEL_ICH7_0 0x27b8
#define PCI_DEVICE_ID_INTEL_ICH7_1 0x27b9
-#define PCI_DEVICE_ID_INTEL_ICH7_30 0x27b0
#define PCI_DEVICE_ID_INTEL_TGP_LPC 0x27bc
#define PCI_DEVICE_ID_INTEL_ICH7_31 0x27bd
+#define PCI_DEVICE_ID_INTEL_HDA_ICH7 0x27d8
#define PCI_DEVICE_ID_INTEL_ICH7_17 0x27da
#define PCI_DEVICE_ID_INTEL_ICH7_19 0x27dd
#define PCI_DEVICE_ID_INTEL_ICH7_20 0x27de
@@ -2802,17 +2885,20 @@
#define PCI_DEVICE_ID_INTEL_ICH8_3 0x2814
#define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815
#define PCI_DEVICE_ID_INTEL_ICH8_5 0x283e
+#define PCI_DEVICE_ID_INTEL_HDA_ICH8 0x284b
#define PCI_DEVICE_ID_INTEL_ICH8_6 0x2850
#define PCI_DEVICE_ID_INTEL_VMD_28C0 0x28c0
#define PCI_DEVICE_ID_INTEL_ICH9_0 0x2910
-#define PCI_DEVICE_ID_INTEL_ICH9_1 0x2917
#define PCI_DEVICE_ID_INTEL_ICH9_2 0x2912
#define PCI_DEVICE_ID_INTEL_ICH9_3 0x2913
#define PCI_DEVICE_ID_INTEL_ICH9_4 0x2914
-#define PCI_DEVICE_ID_INTEL_ICH9_5 0x2919
-#define PCI_DEVICE_ID_INTEL_ICH9_6 0x2930
#define PCI_DEVICE_ID_INTEL_ICH9_7 0x2916
+#define PCI_DEVICE_ID_INTEL_ICH9_1 0x2917
#define PCI_DEVICE_ID_INTEL_ICH9_8 0x2918
+#define PCI_DEVICE_ID_INTEL_ICH9_5 0x2919
+#define PCI_DEVICE_ID_INTEL_ICH9_6 0x2930
+#define PCI_DEVICE_ID_INTEL_HDA_ICH9_0 0x293e
+#define PCI_DEVICE_ID_INTEL_HDA_ICH9_1 0x293f
#define PCI_DEVICE_ID_INTEL_I7_MCR 0x2c18
#define PCI_DEVICE_ID_INTEL_I7_MC_TAD 0x2c19
#define PCI_DEVICE_ID_INTEL_I7_MC_RAS 0x2c1a
@@ -2829,8 +2915,8 @@
#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR 0x2c31
#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK 0x2c32
#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC 0x2c33
-#define PCI_DEVICE_ID_INTEL_I7_NONCORE 0x2c41
#define PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT 0x2c40
+#define PCI_DEVICE_ID_INTEL_I7_NONCORE 0x2c41
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE 0x2c50
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT 0x2c51
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2 0x2c70
@@ -2839,7 +2925,7 @@
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_PHY0 0x2c91
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR 0x2c98
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD 0x2c99
-#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST 0x2c9C
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST 0x2c9c
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL 0x2ca0
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR 0x2ca1
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK 0x2ca2
@@ -2864,6 +2950,7 @@
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2 0x2db1
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2 0x2db2
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2 0x2db3
+#define PCI_DEVICE_ID_INTEL_HDA_GML 0x3198
#define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340
#define PCI_DEVICE_ID_INTEL_IOAT_TBG4 0x3429
#define PCI_DEVICE_ID_INTEL_IOAT_TBG5 0x342a
@@ -2874,12 +2961,13 @@
#define PCI_DEVICE_ID_INTEL_IOAT_TBG1 0x3431
#define PCI_DEVICE_ID_INTEL_IOAT_TBG2 0x3432
#define PCI_DEVICE_ID_INTEL_IOAT_TBG3 0x3433
+#define PCI_DEVICE_ID_INTEL_HDA_ICL_LP 0x34c8
#define PCI_DEVICE_ID_INTEL_82830_HB 0x3575
#define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577
-#define PCI_DEVICE_ID_INTEL_82854_HB 0x358c
-#define PCI_DEVICE_ID_INTEL_82854_IG 0x358e
#define PCI_DEVICE_ID_INTEL_82855GM_HB 0x3580
#define PCI_DEVICE_ID_INTEL_82855GM_IG 0x3582
+#define PCI_DEVICE_ID_INTEL_82854_HB 0x358c
+#define PCI_DEVICE_ID_INTEL_82854_IG 0x358e
#define PCI_DEVICE_ID_INTEL_E7520_MCH 0x3590
#define PCI_DEVICE_ID_INTEL_E7320_MCH 0x3592
#define PCI_DEVICE_ID_INTEL_MCH_PA 0x3595
@@ -2889,11 +2977,11 @@
#define PCI_DEVICE_ID_INTEL_MCH_PC 0x3599
#define PCI_DEVICE_ID_INTEL_MCH_PC1 0x359a
#define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e
+#define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b
+#define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c
#define PCI_DEVICE_ID_INTEL_I7300_MCH_ERR 0x360c
#define PCI_DEVICE_ID_INTEL_I7300_MCH_FB0 0x360f
#define PCI_DEVICE_ID_INTEL_I7300_MCH_FB1 0x3610
-#define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b
-#define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c
#define PCI_DEVICE_ID_INTEL_IOAT_JSF0 0x3710
#define PCI_DEVICE_ID_INTEL_IOAT_JSF1 0x3711
#define PCI_DEVICE_ID_INTEL_IOAT_JSF2 0x3712
@@ -2906,14 +2994,19 @@
#define PCI_DEVICE_ID_INTEL_IOAT_JSF9 0x3719
#define PCI_DEVICE_ID_INTEL_QAT_C62X 0x37c8
#define PCI_DEVICE_ID_INTEL_QAT_C62X_VF 0x37c9
+#define PCI_DEVICE_ID_INTEL_HDA_ICL_N 0x38c8
#define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14
#define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16
#define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18
#define PCI_DEVICE_ID_INTEL_ICH10_3 0x3a1a
#define PCI_DEVICE_ID_INTEL_ICH10_4 0x3a30
+#define PCI_DEVICE_ID_INTEL_HDA_ICH10_0 0x3a3e
#define PCI_DEVICE_ID_INTEL_ICH10_5 0x3a60
+#define PCI_DEVICE_ID_INTEL_HDA_ICH10_1 0x3a6e
#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN 0x3b00
#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX 0x3b1f
+#define PCI_DEVICE_ID_INTEL_HDA_5_3400_SERIES_0 0x3b56
+#define PCI_DEVICE_ID_INTEL_HDA_5_3400_SERIES_1 0x3b57
#define PCI_DEVICE_ID_INTEL_IOAT_SNB0 0x3c20
#define PCI_DEVICE_ID_INTEL_IOAT_SNB1 0x3c21
#define PCI_DEVICE_ID_INTEL_IOAT_SNB2 0x3c22
@@ -2924,16 +3017,12 @@
#define PCI_DEVICE_ID_INTEL_IOAT_SNB7 0x3c27
#define PCI_DEVICE_ID_INTEL_IOAT_SNB8 0x3c2e
#define PCI_DEVICE_ID_INTEL_IOAT_SNB9 0x3c2f
-#define PCI_DEVICE_ID_INTEL_UNC_HA 0x3c46
-#define PCI_DEVICE_ID_INTEL_UNC_IMC0 0x3cb0
-#define PCI_DEVICE_ID_INTEL_UNC_IMC1 0x3cb1
-#define PCI_DEVICE_ID_INTEL_UNC_IMC2 0x3cb4
-#define PCI_DEVICE_ID_INTEL_UNC_IMC3 0x3cb5
#define PCI_DEVICE_ID_INTEL_UNC_QPI0 0x3c41
#define PCI_DEVICE_ID_INTEL_UNC_QPI1 0x3c42
#define PCI_DEVICE_ID_INTEL_UNC_R2PCIE 0x3c43
#define PCI_DEVICE_ID_INTEL_UNC_R3QPI0 0x3c44
#define PCI_DEVICE_ID_INTEL_UNC_R3QPI1 0x3c45
+#define PCI_DEVICE_ID_INTEL_UNC_HA 0x3c46
#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS 0x3c71 /* 15.1 */
#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR0 0x3c72 /* 16.2 */
#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR1 0x3c73 /* 16.3 */
@@ -2945,22 +3034,48 @@
#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1 0x3cab /* 15.3 */
#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2 0x3cac /* 15.4 */
#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3 0x3cad /* 15.5 */
+#define PCI_DEVICE_ID_INTEL_UNC_IMC0 0x3cb0
+#define PCI_DEVICE_ID_INTEL_UNC_IMC1 0x3cb1
+#define PCI_DEVICE_ID_INTEL_UNC_IMC2 0x3cb4
+#define PCI_DEVICE_ID_INTEL_UNC_IMC3 0x3cb5
#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO 0x3cb8 /* 17.0 */
#define PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX 0x3ce0
#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0 0x3cf4 /* 12.6 */
#define PCI_DEVICE_ID_INTEL_SBRIDGE_BR 0x3cf5 /* 13.6 */
#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1 0x3cf6 /* 12.7 */
+#define PCI_DEVICE_ID_INTEL_HDA_ICL_H 0x3dc8
#define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f
-#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0
-#define PCI_DEVICE_ID_INTEL_5100_19 0x65f3
-#define PCI_DEVICE_ID_INTEL_5100_21 0x65f5
-#define PCI_DEVICE_ID_INTEL_5100_22 0x65f6
#define PCI_DEVICE_ID_INTEL_5400_ERR 0x4030
#define PCI_DEVICE_ID_INTEL_5400_FBD0 0x4035
#define PCI_DEVICE_ID_INTEL_5400_FBD1 0x4036
-#define PCI_DEVICE_ID_INTEL_IOAT_SCNB 0x65ff
+#define PCI_DEVICE_ID_INTEL_HDA_TGL_H 0x43c8
+#define PCI_DEVICE_ID_INTEL_HDA_DG1 0x490d
+#define PCI_DEVICE_ID_INTEL_HDA_EHL_0 0x4b55
+#define PCI_DEVICE_ID_INTEL_HDA_EHL_3 0x4b58
+#define PCI_DEVICE_ID_INTEL_HDA_WCL 0x4d28
+#define PCI_DEVICE_ID_INTEL_HDA_JSL_N 0x4dc8
+#define PCI_DEVICE_ID_INTEL_HDA_DG2_0 0x4f90
+#define PCI_DEVICE_ID_INTEL_HDA_DG2_1 0x4f91
+#define PCI_DEVICE_ID_INTEL_HDA_DG2_2 0x4f92
#define PCI_DEVICE_ID_INTEL_EP80579_0 0x5031
#define PCI_DEVICE_ID_INTEL_EP80579_1 0x5032
+#define PCI_DEVICE_ID_INTEL_HDA_ADL_P 0x51c8
+#define PCI_DEVICE_ID_INTEL_HDA_ADL_PS 0x51c9
+#define PCI_DEVICE_ID_INTEL_HDA_RPL_P_0 0x51ca
+#define PCI_DEVICE_ID_INTEL_HDA_RPL_P_1 0x51cb
+#define PCI_DEVICE_ID_INTEL_HDA_ADL_M 0x51cc
+#define PCI_DEVICE_ID_INTEL_HDA_ADL_PX 0x51cd
+#define PCI_DEVICE_ID_INTEL_HDA_RPL_M 0x51ce
+#define PCI_DEVICE_ID_INTEL_HDA_RPL_PX 0x51cf
+#define PCI_DEVICE_ID_INTEL_HDA_ADL_N 0x54c8
+#define PCI_DEVICE_ID_INTEL_HDA_APL 0x5a98
+#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0
+#define PCI_DEVICE_ID_INTEL_5100_19 0x65f3
+#define PCI_DEVICE_ID_INTEL_5100_21 0x65f5
+#define PCI_DEVICE_ID_INTEL_5100_22 0x65f6
+#define PCI_DEVICE_ID_INTEL_IOAT_SCNB 0x65ff
+#define PCI_DEVICE_ID_INTEL_HDA_FCL 0x67a8
+#define PCI_DEVICE_ID_INTEL_HDA_NVL_S 0x6e50
#define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000
#define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010
#define PCI_DEVICE_ID_INTEL_82371SB_2 0x7020
@@ -2989,8 +3104,14 @@
#define PCI_DEVICE_ID_INTEL_82443GX_0 0x71a0
#define PCI_DEVICE_ID_INTEL_82443GX_2 0x71a2
#define PCI_DEVICE_ID_INTEL_82372FB_1 0x7601
+#define PCI_DEVICE_ID_INTEL_HDA_ARL 0x7728
+#define PCI_DEVICE_ID_INTEL_HDA_RPL_S 0x7a50
+#define PCI_DEVICE_ID_INTEL_HDA_ADL_S 0x7ad0
+#define PCI_DEVICE_ID_INTEL_HDA_MTL 0x7e28
+#define PCI_DEVICE_ID_INTEL_HDA_ARL_S 0x7f50
#define PCI_DEVICE_ID_INTEL_SCH_LPC 0x8119
#define PCI_DEVICE_ID_INTEL_SCH_IDE 0x811a
+#define PCI_DEVICE_ID_INTEL_HDA_POULSBO 0x811b
#define PCI_DEVICE_ID_INTEL_E6XX_CU 0x8183
#define PCI_DEVICE_ID_INTEL_ITC_LPC 0x8186
#define PCI_DEVICE_ID_INTEL_82454GX 0x84c4
@@ -2999,9 +3120,36 @@
#define PCI_DEVICE_ID_INTEL_82454NX 0x84cb
#define PCI_DEVICE_ID_INTEL_84460GX 0x84ea
#define PCI_DEVICE_ID_INTEL_IXP4XX 0x8500
+#define PCI_DEVICE_ID_INTEL_HDA_LPT 0x8c20
+#define PCI_DEVICE_ID_INTEL_HDA_9_SERIES 0x8ca0
+#define PCI_DEVICE_ID_INTEL_HDA_WBG_0 0x8d20
+#define PCI_DEVICE_ID_INTEL_HDA_WBG_1 0x8d21
#define PCI_DEVICE_ID_INTEL_IXP2800 0x9004
+#define PCI_DEVICE_ID_INTEL_HDA_LKF 0x98c8
#define PCI_DEVICE_ID_INTEL_VMD_9A0B 0x9a0b
+#define PCI_DEVICE_ID_INTEL_HDA_LPT_LP_0 0x9c20
+#define PCI_DEVICE_ID_INTEL_HDA_LPT_LP_1 0x9c21
+#define PCI_DEVICE_ID_INTEL_HDA_WPT_LP 0x9ca0
+#define PCI_DEVICE_ID_INTEL_HDA_SKL_LP 0x9d70
+#define PCI_DEVICE_ID_INTEL_HDA_KBL_LP 0x9d71
+#define PCI_DEVICE_ID_INTEL_HDA_CNL_LP 0x9dc8
+#define PCI_DEVICE_ID_INTEL_HDA_TGL_LP 0xa0c8
+#define PCI_DEVICE_ID_INTEL_HDA_SKL 0xa170
+#define PCI_DEVICE_ID_INTEL_HDA_KBL 0xa171
+#define PCI_DEVICE_ID_INTEL_HDA_LBG_0 0xa1f0
+#define PCI_DEVICE_ID_INTEL_HDA_LBG_1 0xa270
+#define PCI_DEVICE_ID_INTEL_HDA_KBL_H 0xa2f0
+#define PCI_DEVICE_ID_INTEL_HDA_CNL_H 0xa348
+#define PCI_DEVICE_ID_INTEL_HDA_CML_S 0xa3f0
+#define PCI_DEVICE_ID_INTEL_HDA_LNL_P 0xa828
#define PCI_DEVICE_ID_INTEL_S21152BB 0xb152
+#define PCI_DEVICE_ID_INTEL_HDA_BMG 0xe2f7
+#define PCI_DEVICE_ID_INTEL_HDA_PTL_H 0xe328
+#define PCI_DEVICE_ID_INTEL_HDA_PTL 0xe428
+#define PCI_DEVICE_ID_INTEL_HDA_CML_R 0xf0c8
+#define PCI_DEVICE_ID_INTEL_HDA_RKL_S 0xf1c8
+
+#define PCI_VENDOR_ID_WANGXUN 0x8088
#define PCI_VENDOR_ID_SCALEMP 0x8686
#define PCI_DEVICE_ID_SCALEMP_VSMP_CTL 0x1010
@@ -3083,6 +3231,8 @@
#define PCI_VENDOR_ID_3COM_2 0xa727
+#define PCI_VENDOR_ID_SOLIDRUN 0xd063
+
#define PCI_VENDOR_ID_DIGIUM 0xd161
#define PCI_DEVICE_ID_DIGIUM_HFC4S 0xb410
diff --git a/include/linux/pcie-dwc.h b/include/linux/pcie-dwc.h
new file mode 100644
index 000000000000..8ff778e7aec0
--- /dev/null
+++ b/include/linux/pcie-dwc.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021-2023 Alibaba Inc.
+ * Copyright (C) 2025 Linaro Ltd.
+ *
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#ifndef LINUX_PCIE_DWC_H
+#define LINUX_PCIE_DWC_H
+
+#include <linux/pci_ids.h>
+
+struct dwc_pcie_vsec_id {
+ u16 vendor_id;
+ u16 vsec_id;
+ u8 vsec_rev;
+};
+
+/*
+ * VSEC IDs are allocated by the vendor, so a given ID may mean different
+ * things to different vendors. See PCIe r6.0, sec 7.9.5.2.
+ */
+static const struct dwc_pcie_vsec_id dwc_pcie_rasdes_vsec_ids[] = {
+ { .vendor_id = PCI_VENDOR_ID_ALIBABA,
+ .vsec_id = 0x02, .vsec_rev = 0x4 },
+ { .vendor_id = PCI_VENDOR_ID_AMPERE,
+ .vsec_id = 0x02, .vsec_rev = 0x4 },
+ { .vendor_id = PCI_VENDOR_ID_QCOM,
+ .vsec_id = 0x02, .vsec_rev = 0x4 },
+ { .vendor_id = PCI_VENDOR_ID_ROCKCHIP,
+ .vsec_id = 0x02, .vsec_rev = 0x4 },
+ { .vendor_id = PCI_VENDOR_ID_SAMSUNG,
+ .vsec_id = 0x02, .vsec_rev = 0x4 },
+ {}
+};
+
+#endif /* LINUX_PCIE_DWC_H */
diff --git a/include/linux/pcs-lynx.h b/include/linux/pcs-lynx.h
index a6440d6ebe95..7958cccd16f2 100644
--- a/include/linux/pcs-lynx.h
+++ b/include/linux/pcs-lynx.h
@@ -9,13 +9,9 @@
#include <linux/mdio.h>
#include <linux/phylink.h>
-struct lynx_pcs {
- struct phylink_pcs pcs;
- struct mdio_device *mdio;
-};
+struct phylink_pcs *lynx_pcs_create_mdiodev(struct mii_bus *bus, int addr);
+struct phylink_pcs *lynx_pcs_create_fwnode(struct fwnode_handle *node);
-struct lynx_pcs *lynx_pcs_create(struct mdio_device *mdio);
-
-void lynx_pcs_destroy(struct lynx_pcs *pcs);
+void lynx_pcs_destroy(struct phylink_pcs *pcs);
#endif /* __LINUX_PCS_LYNX_H */
diff --git a/include/linux/pcs-rzn1-miic.h b/include/linux/pcs-rzn1-miic.h
new file mode 100644
index 000000000000..56d12b21365d
--- /dev/null
+++ b/include/linux/pcs-rzn1-miic.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022 Schneider Electric
+ *
+ * Clément Léger <clement.leger@bootlin.com>
+ */
+
+#ifndef __LINUX_PCS_MIIC_H
+#define __LINUX_PCS_MIIC_H
+
+struct phylink;
+struct device_node;
+
+struct phylink_pcs *miic_create(struct device *dev, struct device_node *np);
+
+void miic_destroy(struct phylink_pcs *pcs);
+
+#endif /* __LINUX_PCS_MIIC_H */
diff --git a/include/linux/pcs/pcs-mtk-lynxi.h b/include/linux/pcs/pcs-mtk-lynxi.h
new file mode 100644
index 000000000000..be3b4ab32f4a
--- /dev/null
+++ b/include/linux/pcs/pcs-mtk-lynxi.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_PCS_MTK_LYNXI_H
+#define __LINUX_PCS_MTK_LYNXI_H
+
+#include <linux/phylink.h>
+#include <linux/regmap.h>
+
+#define MTK_SGMII_FLAG_PN_SWAP BIT(0)
+struct phylink_pcs *mtk_pcs_lynxi_create(struct device *dev,
+ struct regmap *regmap,
+ u32 ana_rgc3, u32 flags);
+void mtk_pcs_lynxi_destroy(struct phylink_pcs *pcs);
+#endif
diff --git a/include/linux/pcs/pcs-xpcs.h b/include/linux/pcs/pcs-xpcs.h
index 2cb5188a7ef1..36073f7b6bb4 100644
--- a/include/linux/pcs/pcs-xpcs.h
+++ b/include/linux/pcs/pcs-xpcs.h
@@ -7,40 +7,57 @@
#ifndef __LINUX_PCS_XPCS_H
#define __LINUX_PCS_XPCS_H
+#include <linux/clk.h>
+#include <linux/fwnode.h>
+#include <linux/mdio.h>
#include <linux/phy.h>
#include <linux/phylink.h>
+#include <linux/types.h>
/* AN mode */
#define DW_AN_C73 1
#define DW_AN_C37_SGMII 2
+#define DW_2500BASEX 3
+#define DW_AN_C37_1000BASEX 4
+#define DW_10GBASER 5
-struct mdio_xpcs_args {
- __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
- struct mii_bus *bus;
- int addr;
- int an_mode;
+enum dw_xpcs_pcs_id {
+ DW_XPCS_ID_NATIVE = 0,
+ NXP_SJA1105_XPCS_ID = 0x00000010,
+ NXP_SJA1110_XPCS_ID = 0x00000020,
+ DW_XPCS_ID = 0x7996ced0,
+ DW_XPCS_ID_MASK = 0xffffffff,
};
-struct mdio_xpcs_ops {
- int (*validate)(struct mdio_xpcs_args *xpcs,
- unsigned long *supported,
- struct phylink_link_state *state);
- int (*config)(struct mdio_xpcs_args *xpcs,
- const struct phylink_link_state *state);
- int (*get_state)(struct mdio_xpcs_args *xpcs,
- struct phylink_link_state *state);
- int (*link_up)(struct mdio_xpcs_args *xpcs, int speed,
- phy_interface_t interface);
- int (*probe)(struct mdio_xpcs_args *xpcs, phy_interface_t interface);
+enum dw_xpcs_pma_id {
+ DW_XPCS_PMA_ID_NATIVE = 0,
+ DW_XPCS_PMA_GEN1_3G_ID,
+ DW_XPCS_PMA_GEN2_3G_ID,
+ DW_XPCS_PMA_GEN2_6G_ID,
+ DW_XPCS_PMA_GEN4_3G_ID,
+ DW_XPCS_PMA_GEN4_6G_ID,
+ DW_XPCS_PMA_GEN5_10G_ID,
+ DW_XPCS_PMA_GEN5_12G_ID,
+ WX_TXGBE_XPCS_PMA_10G_ID = 0xfc806000,
+ /* Meta Platforms OUI 88:25:08, model 0, revision 0 */
+ MP_FBNIC_XPCS_PMA_100G_ID = 0x46904000,
};
-#if IS_ENABLED(CONFIG_PCS_XPCS)
-struct mdio_xpcs_ops *mdio_xpcs_get_ops(void);
-#else
-static inline struct mdio_xpcs_ops *mdio_xpcs_get_ops(void)
-{
- return NULL;
-}
-#endif
+struct dw_xpcs_info {
+ u32 pcs;
+ u32 pma;
+};
+
+struct dw_xpcs;
+
+struct phylink_pcs *xpcs_to_phylink_pcs(struct dw_xpcs *xpcs);
+int xpcs_get_an_mode(struct dw_xpcs *xpcs, phy_interface_t interface);
+void xpcs_config_eee_mult_fact(struct dw_xpcs *xpcs, u8 mult_fact);
+struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr);
+struct dw_xpcs *xpcs_create_fwnode(struct fwnode_handle *fwnode);
+void xpcs_destroy(struct dw_xpcs *xpcs);
+
+struct phylink_pcs *xpcs_create_pcs_mdiodev(struct mii_bus *bus, int addr);
+void xpcs_destroy_pcs(struct phylink_pcs *pcs);
#endif /* __LINUX_PCS_XPCS_H */
diff --git a/include/linux/pda_power.h b/include/linux/pda_power.h
deleted file mode 100644
index 2a69db4b60b7..000000000000
--- a/include/linux/pda_power.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Common power driver for PDAs and phones with one or two external
- * power supplies (AC/USB) connected to main and backup batteries,
- * and optional builtin charger.
- *
- * Copyright © 2007 Anton Vorontsov <cbou@mail.ru>
- */
-
-#ifndef __PDA_POWER_H__
-#define __PDA_POWER_H__
-
-#define PDA_POWER_CHARGE_AC (1 << 0)
-#define PDA_POWER_CHARGE_USB (1 << 1)
-
-struct device;
-
-struct pda_power_pdata {
- int (*init)(struct device *dev);
- int (*is_ac_online)(void);
- int (*is_usb_online)(void);
- void (*set_charge)(int flags);
- void (*exit)(struct device *dev);
- int (*suspend)(pm_message_t state);
- int (*resume)(void);
-
- char **supplied_to;
- size_t num_supplicants;
-
- unsigned int wait_for_status; /* msecs, default is 500 */
- unsigned int wait_for_charger; /* msecs, default is 500 */
- unsigned int polling_interval; /* msecs, default is 2000 */
-
- unsigned long ac_max_uA; /* current to draw when on AC */
-
- bool use_otg_notifier;
-};
-
-#endif /* __PDA_POWER_H__ */
diff --git a/include/linux/pds/pds_adminq.h b/include/linux/pds/pds_adminq.h
new file mode 100644
index 000000000000..40ff0ec2b879
--- /dev/null
+++ b/include/linux/pds/pds_adminq.h
@@ -0,0 +1,1545 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#ifndef _PDS_CORE_ADMINQ_H_
+#define _PDS_CORE_ADMINQ_H_
+
+#define PDSC_ADMINQ_MAX_POLL_INTERVAL 256000 /* usecs */
+
+enum pds_core_adminq_flags {
+ PDS_AQ_FLAG_FASTPOLL = BIT(1), /* completion poll at 1ms */
+};
+
+/*
+ * enum pds_core_adminq_opcode - AdminQ command opcodes
+ * These commands are only processed on AdminQ, not available in devcmd
+ */
+enum pds_core_adminq_opcode {
+ PDS_AQ_CMD_NOP = 0,
+
+ /* Client control */
+ PDS_AQ_CMD_CLIENT_REG = 6,
+ PDS_AQ_CMD_CLIENT_UNREG = 7,
+ PDS_AQ_CMD_CLIENT_CMD = 8,
+
+ /* LIF commands */
+ PDS_AQ_CMD_LIF_IDENTIFY = 20,
+ PDS_AQ_CMD_LIF_INIT = 21,
+ PDS_AQ_CMD_LIF_RESET = 22,
+ PDS_AQ_CMD_LIF_GETATTR = 23,
+ PDS_AQ_CMD_LIF_SETATTR = 24,
+ PDS_AQ_CMD_LIF_SETPHC = 25,
+
+ PDS_AQ_CMD_RX_MODE_SET = 30,
+ PDS_AQ_CMD_RX_FILTER_ADD = 31,
+ PDS_AQ_CMD_RX_FILTER_DEL = 32,
+
+ /* Queue commands */
+ PDS_AQ_CMD_Q_IDENTIFY = 39,
+ PDS_AQ_CMD_Q_INIT = 40,
+ PDS_AQ_CMD_Q_CONTROL = 41,
+
+ /* SR/IOV commands */
+ PDS_AQ_CMD_VF_GETATTR = 60,
+ PDS_AQ_CMD_VF_SETATTR = 61,
+};
+
+/*
+ * enum pds_core_notifyq_opcode - NotifyQ event codes
+ */
+enum pds_core_notifyq_opcode {
+ PDS_EVENT_LINK_CHANGE = 1,
+ PDS_EVENT_RESET = 2,
+ PDS_EVENT_XCVR = 5,
+ PDS_EVENT_CLIENT = 6,
+};
+
+#define PDS_COMP_COLOR_MASK 0x80
+
+/**
+ * struct pds_core_notifyq_event - Generic event reporting structure
+ * @eid: event number
+ * @ecode: event code
+ *
+ * This is the generic event report struct from which the other
+ * actual events will be formed.
+ */
+struct pds_core_notifyq_event {
+ __le64 eid;
+ __le16 ecode;
+};
+
+/**
+ * struct pds_core_link_change_event - Link change event notification
+ * @eid: event number
+ * @ecode: event code = PDS_EVENT_LINK_CHANGE
+ * @link_status: link up/down, with error bits
+ * @link_speed: speed of the network link
+ *
+ * Sent when the network link state changes between UP and DOWN
+ */
+struct pds_core_link_change_event {
+ __le64 eid;
+ __le16 ecode;
+ __le16 link_status;
+ __le32 link_speed; /* units of 1Mbps: e.g. 10000 = 10Gbps */
+};
+
+/**
+ * struct pds_core_reset_event - Reset event notification
+ * @eid: event number
+ * @ecode: event code = PDS_EVENT_RESET
+ * @reset_code: reset type
+ * @state: 0=pending, 1=complete, 2=error
+ *
+ * Sent when the NIC or some subsystem is going to be or
+ * has been reset.
+ */
+struct pds_core_reset_event {
+ __le64 eid;
+ __le16 ecode;
+ u8 reset_code;
+ u8 state;
+};
+
+/**
+ * struct pds_core_client_event - Client event notification
+ * @eid: event number
+ * @ecode: event code = PDS_EVENT_CLIENT
+ * @client_id: client to sent event to
+ * @client_event: wrapped event struct for the client
+ *
+ * Sent when an event needs to be passed on to a client
+ */
+struct pds_core_client_event {
+ __le64 eid;
+ __le16 ecode;
+ __le16 client_id;
+ u8 client_event[54];
+};
+
+/**
+ * struct pds_core_notifyq_cmd - Placeholder for building qcq
+ * @data: anonymous field for building the qcq
+ */
+struct pds_core_notifyq_cmd {
+ __le32 data; /* Not used but needed for qcq structure */
+};
+
+/*
+ * union pds_core_notifyq_comp - Overlay of notifyq event structures
+ */
+union pds_core_notifyq_comp {
+ struct {
+ __le64 eid;
+ __le16 ecode;
+ };
+ struct pds_core_notifyq_event event;
+ struct pds_core_link_change_event link_change;
+ struct pds_core_reset_event reset;
+ u8 data[64];
+};
+
+#define PDS_DEVNAME_LEN 32
+/**
+ * struct pds_core_client_reg_cmd - Register a new client with DSC
+ * @opcode: opcode PDS_AQ_CMD_CLIENT_REG
+ * @rsvd: word boundary padding
+ * @devname: text name of client device
+ * @vif_type: what type of device (enum pds_core_vif_types)
+ *
+ * Tell the DSC of the new client, and receive a client_id from DSC.
+ */
+struct pds_core_client_reg_cmd {
+ u8 opcode;
+ u8 rsvd[3];
+ char devname[PDS_DEVNAME_LEN];
+ u8 vif_type;
+};
+
+/**
+ * struct pds_core_client_reg_comp - Client registration completion
+ * @status: Status of the command (enum pdc_core_status_code)
+ * @rsvd: Word boundary padding
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @client_id: New id assigned by DSC
+ * @rsvd1: Word boundary padding
+ * @color: Color bit
+ */
+struct pds_core_client_reg_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ __le16 client_id;
+ u8 rsvd1[9];
+ u8 color;
+};
+
+/**
+ * struct pds_core_client_unreg_cmd - Unregister a client from DSC
+ * @opcode: opcode PDS_AQ_CMD_CLIENT_UNREG
+ * @rsvd: word boundary padding
+ * @client_id: id of client being removed
+ *
+ * Tell the DSC this client is going away and remove its context
+ * This uses the generic completion.
+ */
+struct pds_core_client_unreg_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 client_id;
+};
+
+/**
+ * struct pds_core_client_request_cmd - Pass along a wrapped client AdminQ cmd
+ * @opcode: opcode PDS_AQ_CMD_CLIENT_CMD
+ * @rsvd: word boundary padding
+ * @client_id: id of client being removed
+ * @client_cmd: the wrapped client command
+ *
+ * Proxy post an adminq command for the client.
+ * This uses the generic completion.
+ */
+struct pds_core_client_request_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 client_id;
+ u8 client_cmd[60];
+};
+
+#define PDS_CORE_MAX_FRAGS 16
+
+#define PDS_CORE_QCQ_F_INITED BIT(0)
+#define PDS_CORE_QCQ_F_SG BIT(1)
+#define PDS_CORE_QCQ_F_INTR BIT(2)
+#define PDS_CORE_QCQ_F_TX_STATS BIT(3)
+#define PDS_CORE_QCQ_F_RX_STATS BIT(4)
+#define PDS_CORE_QCQ_F_NOTIFYQ BIT(5)
+#define PDS_CORE_QCQ_F_CMB_RINGS BIT(6)
+#define PDS_CORE_QCQ_F_CORE BIT(7)
+
+enum pds_core_lif_type {
+ PDS_CORE_LIF_TYPE_DEFAULT = 0,
+};
+
+#define PDS_CORE_IFNAMSIZ 16
+
+/**
+ * enum pds_core_logical_qtype - Logical Queue Types
+ * @PDS_CORE_QTYPE_ADMINQ: Administrative Queue
+ * @PDS_CORE_QTYPE_NOTIFYQ: Notify Queue
+ * @PDS_CORE_QTYPE_RXQ: Receive Queue
+ * @PDS_CORE_QTYPE_TXQ: Transmit Queue
+ * @PDS_CORE_QTYPE_EQ: Event Queue
+ * @PDS_CORE_QTYPE_MAX: Max queue type supported
+ */
+enum pds_core_logical_qtype {
+ PDS_CORE_QTYPE_ADMINQ = 0,
+ PDS_CORE_QTYPE_NOTIFYQ = 1,
+ PDS_CORE_QTYPE_RXQ = 2,
+ PDS_CORE_QTYPE_TXQ = 3,
+ PDS_CORE_QTYPE_EQ = 4,
+
+ PDS_CORE_QTYPE_MAX = 16 /* don't change - used in struct size */
+};
+
+/**
+ * union pds_core_lif_config - LIF configuration
+ * @state: LIF state (enum pds_core_lif_state)
+ * @rsvd: Word boundary padding
+ * @name: LIF name
+ * @rsvd2: Word boundary padding
+ * @features: LIF features active (enum pds_core_hw_features)
+ * @queue_count: Queue counts per queue-type
+ * @words: Full union buffer size
+ */
+union pds_core_lif_config {
+ struct {
+ u8 state;
+ u8 rsvd[3];
+ char name[PDS_CORE_IFNAMSIZ];
+ u8 rsvd2[12];
+ __le64 features;
+ __le32 queue_count[PDS_CORE_QTYPE_MAX];
+ } __packed;
+ __le32 words[64];
+};
+
+/**
+ * struct pds_core_lif_status - LIF status register
+ * @eid: most recent NotifyQ event id
+ * @rsvd: full struct size
+ */
+struct pds_core_lif_status {
+ __le64 eid;
+ u8 rsvd[56];
+};
+
+/**
+ * struct pds_core_lif_info - LIF info structure
+ * @config: LIF configuration structure
+ * @status: LIF status structure
+ */
+struct pds_core_lif_info {
+ union pds_core_lif_config config;
+ struct pds_core_lif_status status;
+};
+
+/**
+ * struct pds_core_lif_identity - LIF identity information (type-specific)
+ * @features: LIF features (see enum pds_core_hw_features)
+ * @version: Identify structure version
+ * @hw_index: LIF hardware index
+ * @rsvd: Word boundary padding
+ * @max_nb_sessions: Maximum number of sessions supported
+ * @rsvd2: buffer padding
+ * @config: LIF config struct with features, q counts
+ */
+struct pds_core_lif_identity {
+ __le64 features;
+ u8 version;
+ u8 hw_index;
+ u8 rsvd[2];
+ __le32 max_nb_sessions;
+ u8 rsvd2[120];
+ union pds_core_lif_config config;
+};
+
+/**
+ * struct pds_core_lif_identify_cmd - Get LIF identity info command
+ * @opcode: Opcode PDS_AQ_CMD_LIF_IDENTIFY
+ * @type: LIF type (enum pds_core_lif_type)
+ * @client_id: Client identifier
+ * @ver: Version of identify returned by device
+ * @rsvd: Word boundary padding
+ * @ident_pa: DMA address to receive identity info
+ *
+ * Firmware will copy LIF identity data (struct pds_core_lif_identity)
+ * into the buffer address given.
+ */
+struct pds_core_lif_identify_cmd {
+ u8 opcode;
+ u8 type;
+ __le16 client_id;
+ u8 ver;
+ u8 rsvd[3];
+ __le64 ident_pa;
+};
+
+/**
+ * struct pds_core_lif_identify_comp - LIF identify command completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @ver: Version of identify returned by device
+ * @bytes: Bytes copied into the buffer
+ * @rsvd: Word boundary padding
+ * @color: Color bit
+ */
+struct pds_core_lif_identify_comp {
+ u8 status;
+ u8 ver;
+ __le16 bytes;
+ u8 rsvd[11];
+ u8 color;
+};
+
+/**
+ * struct pds_core_lif_init_cmd - LIF init command
+ * @opcode: Opcode PDS_AQ_CMD_LIF_INIT
+ * @type: LIF type (enum pds_core_lif_type)
+ * @client_id: Client identifier
+ * @rsvd: Word boundary padding
+ * @info_pa: Destination address for LIF info (struct pds_core_lif_info)
+ */
+struct pds_core_lif_init_cmd {
+ u8 opcode;
+ u8 type;
+ __le16 client_id;
+ __le32 rsvd;
+ __le64 info_pa;
+};
+
+/**
+ * struct pds_core_lif_init_comp - LIF init command completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @rsvd: Word boundary padding
+ * @hw_index: Hardware index of the initialized LIF
+ * @rsvd1: Word boundary padding
+ * @color: Color bit
+ */
+struct pds_core_lif_init_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 hw_index;
+ u8 rsvd1[11];
+ u8 color;
+};
+
+/**
+ * struct pds_core_lif_reset_cmd - LIF reset command
+ * Will reset only the specified LIF.
+ * @opcode: Opcode PDS_AQ_CMD_LIF_RESET
+ * @rsvd: Word boundary padding
+ * @client_id: Client identifier
+ */
+struct pds_core_lif_reset_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 client_id;
+};
+
+/**
+ * enum pds_core_lif_attr - List of LIF attributes
+ * @PDS_CORE_LIF_ATTR_STATE: LIF state attribute
+ * @PDS_CORE_LIF_ATTR_NAME: LIF name attribute
+ * @PDS_CORE_LIF_ATTR_FEATURES: LIF features attribute
+ * @PDS_CORE_LIF_ATTR_STATS_CTRL: LIF statistics control attribute
+ */
+enum pds_core_lif_attr {
+ PDS_CORE_LIF_ATTR_STATE = 0,
+ PDS_CORE_LIF_ATTR_NAME = 1,
+ PDS_CORE_LIF_ATTR_FEATURES = 4,
+ PDS_CORE_LIF_ATTR_STATS_CTRL = 6,
+};
+
+/**
+ * struct pds_core_lif_setattr_cmd - Set LIF attributes on the NIC
+ * @opcode: Opcode PDS_AQ_CMD_LIF_SETATTR
+ * @attr: Attribute type (enum pds_core_lif_attr)
+ * @client_id: Client identifier
+ * @state: LIF state (enum pds_core_lif_state)
+ * @name: The name string, 0 terminated
+ * @features: Features (enum pds_core_hw_features)
+ * @stats_ctl: Stats control commands (enum pds_core_stats_ctl_cmd)
+ * @rsvd: Command Buffer padding
+ */
+struct pds_core_lif_setattr_cmd {
+ u8 opcode;
+ u8 attr;
+ __le16 client_id;
+ union {
+ u8 state;
+ char name[PDS_CORE_IFNAMSIZ];
+ __le64 features;
+ u8 stats_ctl;
+ u8 rsvd[60];
+ } __packed;
+};
+
+/**
+ * struct pds_core_lif_setattr_comp - LIF set attr command completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @rsvd: Word boundary padding
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @features: Features (enum pds_core_hw_features)
+ * @rsvd2: Word boundary padding
+ * @color: Color bit
+ */
+struct pds_core_lif_setattr_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ union {
+ __le64 features;
+ u8 rsvd2[11];
+ } __packed;
+ u8 color;
+};
+
+/**
+ * struct pds_core_lif_getattr_cmd - Get LIF attributes from the NIC
+ * @opcode: Opcode PDS_AQ_CMD_LIF_GETATTR
+ * @attr: Attribute type (enum pds_core_lif_attr)
+ * @client_id: Client identifier
+ */
+struct pds_core_lif_getattr_cmd {
+ u8 opcode;
+ u8 attr;
+ __le16 client_id;
+};
+
+/**
+ * struct pds_core_lif_getattr_comp - LIF get attr command completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @rsvd: Word boundary padding
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @state: LIF state (enum pds_core_lif_state)
+ * @features: Features (enum pds_core_hw_features)
+ * @rsvd2: Word boundary padding
+ * @color: Color bit
+ */
+struct pds_core_lif_getattr_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ union {
+ u8 state;
+ __le64 features;
+ u8 rsvd2[11];
+ } __packed;
+ u8 color;
+};
+
+/**
+ * union pds_core_q_identity - Queue identity information
+ * @version: Queue type version that can be used with FW
+ * @supported: Bitfield of queue versions, first bit = ver 0
+ * @rsvd: Word boundary padding
+ * @features: Queue features
+ * @desc_sz: Descriptor size
+ * @comp_sz: Completion descriptor size
+ * @rsvd2: Word boundary padding
+ */
+struct pds_core_q_identity {
+ u8 version;
+ u8 supported;
+ u8 rsvd[6];
+#define PDS_CORE_QIDENT_F_CQ 0x01 /* queue has completion ring */
+ __le64 features;
+ __le16 desc_sz;
+ __le16 comp_sz;
+ u8 rsvd2[6];
+};
+
+/**
+ * struct pds_core_q_identify_cmd - queue identify command
+ * @opcode: Opcode PDS_AQ_CMD_Q_IDENTIFY
+ * @type: Logical queue type (enum pds_core_logical_qtype)
+ * @client_id: Client identifier
+ * @ver: Highest queue type version that the driver supports
+ * @rsvd: Word boundary padding
+ * @ident_pa: DMA address to receive the data (struct pds_core_q_identity)
+ */
+struct pds_core_q_identify_cmd {
+ u8 opcode;
+ u8 type;
+ __le16 client_id;
+ u8 ver;
+ u8 rsvd[3];
+ __le64 ident_pa;
+};
+
+/**
+ * struct pds_core_q_identify_comp - queue identify command completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @rsvd: Word boundary padding
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @ver: Queue type version that can be used with FW
+ * @rsvd1: Word boundary padding
+ * @color: Color bit
+ */
+struct pds_core_q_identify_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ u8 ver;
+ u8 rsvd1[10];
+ u8 color;
+};
+
+/**
+ * struct pds_core_q_init_cmd - Queue init command
+ * @opcode: Opcode PDS_AQ_CMD_Q_INIT
+ * @type: Logical queue type
+ * @client_id: Client identifier
+ * @ver: Queue type version
+ * @rsvd: Word boundary padding
+ * @index: (LIF, qtype) relative admin queue index
+ * @intr_index: Interrupt control register index, or Event queue index
+ * @pid: Process ID
+ * @flags:
+ * IRQ: Interrupt requested on completion
+ * ENA: Enable the queue. If ENA=0 the queue is initialized
+ * but remains disabled, to be later enabled with the
+ * Queue Enable command. If ENA=1, then queue is
+ * initialized and then enabled.
+ * @cos: Class of service for this queue
+ * @ring_size: Queue ring size, encoded as a log2(size), in
+ * number of descriptors. The actual ring size is
+ * (1 << ring_size). For example, to select a ring size
+ * of 64 descriptors write ring_size = 6. The minimum
+ * ring_size value is 2 for a ring of 4 descriptors.
+ * The maximum ring_size value is 12 for a ring of 4k
+ * descriptors. Values of ring_size <2 and >12 are
+ * reserved.
+ * @ring_base: Queue ring base address
+ * @cq_ring_base: Completion queue ring base address
+ */
+struct pds_core_q_init_cmd {
+ u8 opcode;
+ u8 type;
+ __le16 client_id;
+ u8 ver;
+ u8 rsvd[3];
+ __le32 index;
+ __le16 pid;
+ __le16 intr_index;
+ __le16 flags;
+#define PDS_CORE_QINIT_F_IRQ 0x01 /* Request interrupt on completion */
+#define PDS_CORE_QINIT_F_ENA 0x02 /* Enable the queue */
+ u8 cos;
+#define PDS_CORE_QSIZE_MIN_LG2 2
+#define PDS_CORE_QSIZE_MAX_LG2 12
+ u8 ring_size;
+ __le64 ring_base;
+ __le64 cq_ring_base;
+} __packed;
+
+/**
+ * struct pds_core_q_init_comp - Queue init command completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @rsvd: Word boundary padding
+ * @comp_index: Index in the descriptor ring for which this is the completion
+ * @hw_index: Hardware Queue ID
+ * @hw_type: Hardware Queue type
+ * @rsvd2: Word boundary padding
+ * @color: Color
+ */
+struct pds_core_q_init_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ __le32 hw_index;
+ u8 hw_type;
+ u8 rsvd2[6];
+ u8 color;
+};
+
+/*
+ * enum pds_vdpa_cmd_opcode - vDPA Device commands
+ */
+enum pds_vdpa_cmd_opcode {
+ PDS_VDPA_CMD_INIT = 48,
+ PDS_VDPA_CMD_IDENT = 49,
+ PDS_VDPA_CMD_RESET = 51,
+ PDS_VDPA_CMD_VQ_RESET = 52,
+ PDS_VDPA_CMD_VQ_INIT = 53,
+ PDS_VDPA_CMD_STATUS_UPDATE = 54,
+ PDS_VDPA_CMD_SET_FEATURES = 55,
+ PDS_VDPA_CMD_SET_ATTR = 56,
+};
+
+/**
+ * struct pds_vdpa_cmd - generic command
+ * @opcode: Opcode
+ * @vdpa_index: Index for vdpa subdevice
+ * @vf_id: VF id
+ */
+struct pds_vdpa_cmd {
+ u8 opcode;
+ u8 vdpa_index;
+ __le16 vf_id;
+};
+
+/**
+ * struct pds_vdpa_init_cmd - INIT command
+ * @opcode: Opcode PDS_VDPA_CMD_INIT
+ * @vdpa_index: Index for vdpa subdevice
+ * @vf_id: VF id
+ */
+struct pds_vdpa_init_cmd {
+ u8 opcode;
+ u8 vdpa_index;
+ __le16 vf_id;
+};
+
+/**
+ * struct pds_vdpa_ident - vDPA identification data
+ * @hw_features: vDPA features supported by device
+ * @max_vqs: max queues available (2 queues for a single queuepair)
+ * @max_qlen: log(2) of maximum number of descriptors
+ * @min_qlen: log(2) of minimum number of descriptors
+ *
+ * This struct is used in a DMA block that is set up for the PDS_VDPA_CMD_IDENT
+ * transaction. Set up the DMA block and send the address in the IDENT cmd
+ * data, the DSC will write the ident information, then we can remove the DMA
+ * block after reading the answer. If the completion status is 0, then there
+ * is valid information, else there was an error and the data should be invalid.
+ */
+struct pds_vdpa_ident {
+ __le64 hw_features;
+ __le16 max_vqs;
+ __le16 max_qlen;
+ __le16 min_qlen;
+};
+
+/**
+ * struct pds_vdpa_ident_cmd - IDENT command
+ * @opcode: Opcode PDS_VDPA_CMD_IDENT
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @len: length of ident info DMA space
+ * @ident_pa: address for DMA of ident info (struct pds_vdpa_ident)
+ * only used for this transaction, then forgotten by DSC
+ */
+struct pds_vdpa_ident_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ __le32 len;
+ __le64 ident_pa;
+};
+
+/**
+ * struct pds_vdpa_status_cmd - STATUS_UPDATE command
+ * @opcode: Opcode PDS_VDPA_CMD_STATUS_UPDATE
+ * @vdpa_index: Index for vdpa subdevice
+ * @vf_id: VF id
+ * @status: new status bits
+ */
+struct pds_vdpa_status_cmd {
+ u8 opcode;
+ u8 vdpa_index;
+ __le16 vf_id;
+ u8 status;
+};
+
+/**
+ * enum pds_vdpa_attr - List of VDPA device attributes
+ * @PDS_VDPA_ATTR_MAC: MAC address
+ * @PDS_VDPA_ATTR_MAX_VQ_PAIRS: Max virtqueue pairs
+ */
+enum pds_vdpa_attr {
+ PDS_VDPA_ATTR_MAC = 1,
+ PDS_VDPA_ATTR_MAX_VQ_PAIRS = 2,
+};
+
+/**
+ * struct pds_vdpa_setattr_cmd - SET_ATTR command
+ * @opcode: Opcode PDS_VDPA_CMD_SET_ATTR
+ * @vdpa_index: Index for vdpa subdevice
+ * @vf_id: VF id
+ * @attr: attribute to be changed (enum pds_vdpa_attr)
+ * @pad: Word boundary padding
+ * @mac: new mac address to be assigned as vdpa device address
+ * @max_vq_pairs: new limit of virtqueue pairs
+ */
+struct pds_vdpa_setattr_cmd {
+ u8 opcode;
+ u8 vdpa_index;
+ __le16 vf_id;
+ u8 attr;
+ u8 pad[3];
+ union {
+ u8 mac[6];
+ __le16 max_vq_pairs;
+ } __packed;
+};
+
+/**
+ * struct pds_vdpa_vq_init_cmd - queue init command
+ * @opcode: Opcode PDS_VDPA_CMD_VQ_INIT
+ * @vdpa_index: Index for vdpa subdevice
+ * @vf_id: VF id
+ * @qid: Queue id (bit0 clear = rx, bit0 set = tx, qid=N is ctrlq)
+ * @len: log(2) of max descriptor count
+ * @desc_addr: DMA address of descriptor area
+ * @avail_addr: DMA address of available descriptors (aka driver area)
+ * @used_addr: DMA address of used descriptors (aka device area)
+ * @intr_index: interrupt index
+ * @avail_index: initial device position in available ring
+ * @used_index: initial device position in used ring
+ */
+struct pds_vdpa_vq_init_cmd {
+ u8 opcode;
+ u8 vdpa_index;
+ __le16 vf_id;
+ __le16 qid;
+ __le16 len;
+ __le64 desc_addr;
+ __le64 avail_addr;
+ __le64 used_addr;
+ __le16 intr_index;
+ __le16 avail_index;
+ __le16 used_index;
+};
+
+/**
+ * struct pds_vdpa_vq_init_comp - queue init completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @hw_qtype: HW queue type, used in doorbell selection
+ * @hw_qindex: HW queue index, used in doorbell selection
+ * @rsvd: Word boundary padding
+ * @color: Color bit
+ */
+struct pds_vdpa_vq_init_comp {
+ u8 status;
+ u8 hw_qtype;
+ __le16 hw_qindex;
+ u8 rsvd[11];
+ u8 color;
+};
+
+/**
+ * struct pds_vdpa_vq_reset_cmd - queue reset command
+ * @opcode: Opcode PDS_VDPA_CMD_VQ_RESET
+ * @vdpa_index: Index for vdpa subdevice
+ * @vf_id: VF id
+ * @qid: Queue id
+ */
+struct pds_vdpa_vq_reset_cmd {
+ u8 opcode;
+ u8 vdpa_index;
+ __le16 vf_id;
+ __le16 qid;
+};
+
+/**
+ * struct pds_vdpa_vq_reset_comp - queue reset completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @rsvd0: Word boundary padding
+ * @avail_index: current device position in available ring
+ * @used_index: current device position in used ring
+ * @rsvd: Word boundary padding
+ * @color: Color bit
+ */
+struct pds_vdpa_vq_reset_comp {
+ u8 status;
+ u8 rsvd0;
+ __le16 avail_index;
+ __le16 used_index;
+ u8 rsvd[9];
+ u8 color;
+};
+
+/**
+ * struct pds_vdpa_set_features_cmd - set hw features
+ * @opcode: Opcode PDS_VDPA_CMD_SET_FEATURES
+ * @vdpa_index: Index for vdpa subdevice
+ * @vf_id: VF id
+ * @rsvd: Word boundary padding
+ * @features: Feature bit mask
+ */
+struct pds_vdpa_set_features_cmd {
+ u8 opcode;
+ u8 vdpa_index;
+ __le16 vf_id;
+ __le32 rsvd;
+ __le64 features;
+};
+
+#define PDS_LM_DEVICE_STATE_LENGTH 65536
+#define PDS_LM_CHECK_DEVICE_STATE_LENGTH(X) \
+ PDS_CORE_SIZE_CHECK(union, PDS_LM_DEVICE_STATE_LENGTH, X)
+
+/*
+ * enum pds_lm_cmd_opcode - Live Migration Device commands
+ */
+enum pds_lm_cmd_opcode {
+ PDS_LM_CMD_HOST_VF_STATUS = 1,
+
+ /* Device state commands */
+ PDS_LM_CMD_STATE_SIZE = 16,
+ PDS_LM_CMD_SUSPEND = 18,
+ PDS_LM_CMD_SUSPEND_STATUS = 19,
+ PDS_LM_CMD_RESUME = 20,
+ PDS_LM_CMD_SAVE = 21,
+ PDS_LM_CMD_RESTORE = 22,
+
+ /* Dirty page tracking commands */
+ PDS_LM_CMD_DIRTY_STATUS = 32,
+ PDS_LM_CMD_DIRTY_ENABLE = 33,
+ PDS_LM_CMD_DIRTY_DISABLE = 34,
+ PDS_LM_CMD_DIRTY_READ_SEQ = 35,
+ PDS_LM_CMD_DIRTY_WRITE_ACK = 36,
+};
+
+/**
+ * struct pds_lm_cmd - generic command
+ * @opcode: Opcode
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @rsvd2: Structure padding to 60 Bytes
+ */
+struct pds_lm_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 rsvd2[56];
+};
+
+/**
+ * struct pds_lm_state_size_cmd - STATE_SIZE command
+ * @opcode: Opcode
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ */
+struct pds_lm_state_size_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+};
+
+/**
+ * struct pds_lm_state_size_comp - STATE_SIZE command completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @rsvd: Word boundary padding
+ * @comp_index: Index in the desc ring for which this is the completion
+ * @size: Size of the device state
+ * @rsvd2: Word boundary padding
+ * @color: Color bit
+ */
+struct pds_lm_state_size_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ union {
+ __le64 size;
+ u8 rsvd2[11];
+ } __packed;
+ u8 color;
+};
+
+enum pds_lm_suspend_resume_type {
+ PDS_LM_SUSPEND_RESUME_TYPE_FULL = 0,
+ PDS_LM_SUSPEND_RESUME_TYPE_P2P = 1,
+};
+
+/**
+ * struct pds_lm_suspend_cmd - SUSPEND command
+ * @opcode: Opcode PDS_LM_CMD_SUSPEND
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @type: Type of suspend (enum pds_lm_suspend_resume_type)
+ */
+struct pds_lm_suspend_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 type;
+};
+
+/**
+ * struct pds_lm_suspend_status_cmd - SUSPEND status command
+ * @opcode: Opcode PDS_AQ_CMD_LM_SUSPEND_STATUS
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @type: Type of suspend (enum pds_lm_suspend_resume_type)
+ */
+struct pds_lm_suspend_status_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 type;
+};
+
+/**
+ * struct pds_lm_resume_cmd - RESUME command
+ * @opcode: Opcode PDS_LM_CMD_RESUME
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @type: Type of resume (enum pds_lm_suspend_resume_type)
+ */
+struct pds_lm_resume_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 type;
+};
+
+/**
+ * struct pds_lm_sg_elem - Transmit scatter-gather (SG) descriptor element
+ * @addr: DMA address of SG element data buffer
+ * @len: Length of SG element data buffer, in bytes
+ * @rsvd: Word boundary padding
+ */
+struct pds_lm_sg_elem {
+ __le64 addr;
+ __le32 len;
+ __le16 rsvd[2];
+};
+
+/**
+ * struct pds_lm_save_cmd - SAVE command
+ * @opcode: Opcode PDS_LM_CMD_SAVE
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @rsvd2: Word boundary padding
+ * @sgl_addr: IOVA address of the SGL to dma the device state
+ * @num_sge: Total number of SG elements
+ */
+struct pds_lm_save_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 rsvd2[4];
+ __le64 sgl_addr;
+ __le32 num_sge;
+} __packed;
+
+/**
+ * struct pds_lm_restore_cmd - RESTORE command
+ * @opcode: Opcode PDS_LM_CMD_RESTORE
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @rsvd2: Word boundary padding
+ * @sgl_addr: IOVA address of the SGL to dma the device state
+ * @num_sge: Total number of SG elements
+ */
+struct pds_lm_restore_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 rsvd2[4];
+ __le64 sgl_addr;
+ __le32 num_sge;
+} __packed;
+
+/**
+ * union pds_lm_dev_state - device state information
+ * @words: Device state words
+ */
+union pds_lm_dev_state {
+ __le32 words[PDS_LM_DEVICE_STATE_LENGTH / sizeof(__le32)];
+};
+
+enum pds_lm_host_vf_status {
+ PDS_LM_STA_NONE = 0,
+ PDS_LM_STA_IN_PROGRESS,
+ PDS_LM_STA_MAX,
+};
+
+/**
+ * struct pds_lm_dirty_region_info - Memory region info for STATUS and ENABLE
+ * @dma_base: Base address of the DMA-contiguous memory region
+ * @page_count: Number of pages in the memory region
+ * @page_size_log2: Log2 page size in the memory region
+ * @rsvd: Word boundary padding
+ */
+struct pds_lm_dirty_region_info {
+ __le64 dma_base;
+ __le32 page_count;
+ u8 page_size_log2;
+ u8 rsvd[3];
+};
+
+/**
+ * struct pds_lm_dirty_status_cmd - DIRTY_STATUS command
+ * @opcode: Opcode PDS_LM_CMD_DIRTY_STATUS
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @max_regions: Capacity of the region info buffer
+ * @rsvd2: Word boundary padding
+ * @regions_dma: DMA address of the region info buffer
+ *
+ * The minimum of max_regions (from the command) and num_regions (from the
+ * completion) of struct pds_lm_dirty_region_info will be written to
+ * regions_dma.
+ *
+ * The max_regions may be zero, in which case regions_dma is ignored. In that
+ * case, the completion will only report the maximum number of regions
+ * supported by the device, and the number of regions currently enabled.
+ */
+struct pds_lm_dirty_status_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 max_regions;
+ u8 rsvd2[3];
+ __le64 regions_dma;
+} __packed;
+
+/**
+ * enum pds_lm_dirty_bmp_type - Type of dirty page bitmap
+ * @PDS_LM_DIRTY_BMP_TYPE_NONE: No bitmap / disabled
+ * @PDS_LM_DIRTY_BMP_TYPE_SEQ_ACK: Seq/Ack bitmap representation
+ */
+enum pds_lm_dirty_bmp_type {
+ PDS_LM_DIRTY_BMP_TYPE_NONE = 0,
+ PDS_LM_DIRTY_BMP_TYPE_SEQ_ACK = 1,
+};
+
+/**
+ * struct pds_lm_dirty_status_comp - STATUS command completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @rsvd: Word boundary padding
+ * @comp_index: Index in the desc ring for which this is the completion
+ * @max_regions: Maximum number of regions supported by the device
+ * @num_regions: Number of regions currently enabled
+ * @bmp_type: Type of dirty bitmap representation
+ * @rsvd2: Word boundary padding
+ * @bmp_type_mask: Mask of supported bitmap types, bit index per type
+ * @rsvd3: Word boundary padding
+ * @color: Color bit
+ *
+ * This completion descriptor is used for STATUS, ENABLE, and DISABLE.
+ */
+struct pds_lm_dirty_status_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ u8 max_regions;
+ u8 num_regions;
+ u8 bmp_type;
+ u8 rsvd2;
+ __le32 bmp_type_mask;
+ u8 rsvd3[3];
+ u8 color;
+};
+
+/**
+ * struct pds_lm_dirty_enable_cmd - DIRTY_ENABLE command
+ * @opcode: Opcode PDS_LM_CMD_DIRTY_ENABLE
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @bmp_type: Type of dirty bitmap representation
+ * @num_regions: Number of entries in the region info buffer
+ * @rsvd2: Word boundary padding
+ * @regions_dma: DMA address of the region info buffer
+ *
+ * The num_regions must be nonzero, and less than or equal to the maximum
+ * number of regions supported by the device.
+ *
+ * The memory regions should not overlap.
+ *
+ * The information should be initialized by the driver. The device may modify
+ * the information on successful completion, such as by size-aligning the
+ * number of pages in a region.
+ *
+ * The modified number of pages will be greater than or equal to the page count
+ * given in the enable command, and at least as coarsly aligned as the given
+ * value. For example, the count might be aligned to a multiple of 64, but
+ * if the value is already a multiple of 128 or higher, it will not change.
+ * If the driver requires its own minimum alignment of the number of pages, the
+ * driver should account for that already in the region info of this command.
+ *
+ * This command uses struct pds_lm_dirty_status_comp for its completion.
+ */
+struct pds_lm_dirty_enable_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 bmp_type;
+ u8 num_regions;
+ u8 rsvd2[2];
+ __le64 regions_dma;
+} __packed;
+
+/**
+ * struct pds_lm_dirty_disable_cmd - DIRTY_DISABLE command
+ * @opcode: Opcode PDS_LM_CMD_DIRTY_DISABLE
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ *
+ * Dirty page tracking will be disabled. This may be called in any state, as
+ * long as dirty page tracking is supported by the device, to ensure that dirty
+ * page tracking is disabled.
+ *
+ * This command uses struct pds_lm_dirty_status_comp for its completion. On
+ * success, num_regions will be zero.
+ */
+struct pds_lm_dirty_disable_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+};
+
+/**
+ * struct pds_lm_dirty_seq_ack_cmd - DIRTY_READ_SEQ or _WRITE_ACK command
+ * @opcode: Opcode PDS_LM_CMD_DIRTY_[READ_SEQ|WRITE_ACK]
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @off_bytes: Byte offset in the bitmap
+ * @len_bytes: Number of bytes to transfer
+ * @num_sge: Number of DMA scatter gather elements
+ * @rsvd2: Word boundary padding
+ * @sgl_addr: DMA address of scatter gather list
+ *
+ * Read bytes from the SEQ bitmap, or write bytes into the ACK bitmap.
+ *
+ * This command treats the entire bitmap as a byte buffer. It does not
+ * distinguish between guest memory regions. The driver should refer to the
+ * number of pages in each region, according to PDS_LM_CMD_DIRTY_STATUS, to
+ * determine the region boundaries in the bitmap. Each region will be
+ * represented by exactly the number of bits as the page count for that region,
+ * immediately following the last bit of the previous region.
+ */
+struct pds_lm_dirty_seq_ack_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ __le32 off_bytes;
+ __le32 len_bytes;
+ __le16 num_sge;
+ u8 rsvd2[2];
+ __le64 sgl_addr;
+} __packed;
+
+/**
+ * struct pds_lm_host_vf_status_cmd - HOST_VF_STATUS command
+ * @opcode: Opcode PDS_LM_CMD_HOST_VF_STATUS
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @status: Current LM status of host VF driver (enum pds_lm_host_status)
+ */
+struct pds_lm_host_vf_status_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 status;
+};
+
+enum pds_fwctl_cmd_opcode {
+ PDS_FWCTL_CMD_IDENT = 70,
+ PDS_FWCTL_CMD_RPC = 71,
+ PDS_FWCTL_CMD_QUERY = 72,
+};
+
+/**
+ * struct pds_fwctl_cmd - Firmware control command structure
+ * @opcode: Opcode
+ * @rsvd: Reserved
+ * @ep: Endpoint identifier
+ * @op: Operation identifier
+ */
+struct pds_fwctl_cmd {
+ u8 opcode;
+ u8 rsvd[3];
+ __le32 ep;
+ __le32 op;
+} __packed;
+
+/**
+ * struct pds_fwctl_comp - Firmware control completion structure
+ * @status: Status of the firmware control operation
+ * @rsvd: Reserved
+ * @comp_index: Completion index in little-endian format
+ * @rsvd2: Reserved
+ * @color: Color bit indicating the state of the completion
+ */
+struct pds_fwctl_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ u8 rsvd2[11];
+ u8 color;
+} __packed;
+
+/**
+ * struct pds_fwctl_ident_cmd - Firmware control identification command structure
+ * @opcode: Operation code for the command
+ * @rsvd: Reserved
+ * @version: Interface version
+ * @rsvd2: Reserved
+ * @len: Length of the identification data
+ * @ident_pa: Physical address of the identification data
+ */
+struct pds_fwctl_ident_cmd {
+ u8 opcode;
+ u8 rsvd;
+ u8 version;
+ u8 rsvd2;
+ __le32 len;
+ __le64 ident_pa;
+} __packed;
+
+/* future feature bits here
+ * enum pds_fwctl_features {
+ * };
+ * (compilers don't like empty enums)
+ */
+
+/**
+ * struct pds_fwctl_ident - Firmware control identification structure
+ * @features: Supported features (enum pds_fwctl_features)
+ * @version: Interface version
+ * @rsvd: Reserved
+ * @max_req_sz: Maximum request size
+ * @max_resp_sz: Maximum response size
+ * @max_req_sg_elems: Maximum number of request SGs
+ * @max_resp_sg_elems: Maximum number of response SGs
+ */
+struct pds_fwctl_ident {
+ __le64 features;
+ u8 version;
+ u8 rsvd[3];
+ __le32 max_req_sz;
+ __le32 max_resp_sz;
+ u8 max_req_sg_elems;
+ u8 max_resp_sg_elems;
+} __packed;
+
+enum pds_fwctl_query_entity {
+ PDS_FWCTL_RPC_ROOT = 0,
+ PDS_FWCTL_RPC_ENDPOINT = 1,
+ PDS_FWCTL_RPC_OPERATION = 2,
+};
+
+#define PDS_FWCTL_RPC_OPCODE_CMD_SHIFT 0
+#define PDS_FWCTL_RPC_OPCODE_CMD_MASK GENMASK(15, PDS_FWCTL_RPC_OPCODE_CMD_SHIFT)
+#define PDS_FWCTL_RPC_OPCODE_VER_SHIFT 16
+#define PDS_FWCTL_RPC_OPCODE_VER_MASK GENMASK(23, PDS_FWCTL_RPC_OPCODE_VER_SHIFT)
+
+#define PDS_FWCTL_RPC_OPCODE_GET_CMD(op) FIELD_GET(PDS_FWCTL_RPC_OPCODE_CMD_MASK, op)
+#define PDS_FWCTL_RPC_OPCODE_GET_VER(op) FIELD_GET(PDS_FWCTL_RPC_OPCODE_VER_MASK, op)
+
+#define PDS_FWCTL_RPC_OPCODE_CMP(op1, op2) \
+ (PDS_FWCTL_RPC_OPCODE_GET_CMD(op1) == PDS_FWCTL_RPC_OPCODE_GET_CMD(op2) && \
+ PDS_FWCTL_RPC_OPCODE_GET_VER(op1) <= PDS_FWCTL_RPC_OPCODE_GET_VER(op2))
+
+/*
+ * FW command attributes that map to the FWCTL scope values
+ */
+#define PDSFC_FW_CMD_ATTR_READ 0x00
+#define PDSFC_FW_CMD_ATTR_DEBUG_READ 0x02
+#define PDSFC_FW_CMD_ATTR_WRITE 0x04
+#define PDSFC_FW_CMD_ATTR_DEBUG_WRITE 0x08
+#define PDSFC_FW_CMD_ATTR_SYNC 0x10
+
+/**
+ * struct pds_fwctl_query_cmd - Firmware control query command structure
+ * @opcode: Operation code for the command
+ * @entity: Entity type to query (enum pds_fwctl_query_entity)
+ * @version: Version of the query data structure supported by the driver
+ * @rsvd: Reserved
+ * @query_data_buf_len: Length of the query data buffer
+ * @query_data_buf_pa: Physical address of the query data buffer
+ * @ep: Endpoint identifier to query (when entity is PDS_FWCTL_RPC_ENDPOINT)
+ * @op: Operation identifier to query (when entity is PDS_FWCTL_RPC_OPERATION)
+ *
+ * This structure is used to send a query command to the firmware control
+ * interface. The structure is packed to ensure there is no padding between
+ * the fields.
+ */
+struct pds_fwctl_query_cmd {
+ u8 opcode;
+ u8 entity;
+ u8 version;
+ u8 rsvd;
+ __le32 query_data_buf_len;
+ __le64 query_data_buf_pa;
+ union {
+ __le32 ep;
+ __le32 op;
+ };
+} __packed;
+
+/**
+ * struct pds_fwctl_query_comp - Firmware control query completion structure
+ * @status: Status of the query command
+ * @rsvd: Reserved
+ * @comp_index: Completion index in little-endian format
+ * @version: Version of the query data structure returned by firmware. This
+ * should be less than or equal to the version supported by the driver
+ * @rsvd2: Reserved
+ * @color: Color bit indicating the state of the completion
+ */
+struct pds_fwctl_query_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ u8 version;
+ u8 rsvd2[2];
+ u8 color;
+} __packed;
+
+/**
+ * struct pds_fwctl_query_data_endpoint - query data for entity PDS_FWCTL_RPC_ROOT
+ * @id: The identifier for the data endpoint
+ */
+struct pds_fwctl_query_data_endpoint {
+ __le32 id;
+} __packed;
+
+/**
+ * struct pds_fwctl_query_data_operation - query data for entity PDS_FWCTL_RPC_ENDPOINT
+ * @id: Operation identifier
+ * @scope: Scope of the operation (enum fwctl_rpc_scope)
+ * @rsvd: Reserved
+ */
+struct pds_fwctl_query_data_operation {
+ __le32 id;
+ u8 scope;
+ u8 rsvd[3];
+} __packed;
+
+/**
+ * struct pds_fwctl_query_data - query data structure
+ * @version: Version of the query data structure
+ * @rsvd: Reserved
+ * @num_entries: Number of entries in the union
+ * @entries: Array of query data entries, depending on the entity type
+ */
+struct pds_fwctl_query_data {
+ u8 version;
+ u8 rsvd[3];
+ __le32 num_entries;
+ u8 entries[] __counted_by_le(num_entries);
+} __packed;
+
+/**
+ * struct pds_fwctl_rpc_cmd - Firmware control RPC command
+ * @opcode: opcode PDS_FWCTL_CMD_RPC
+ * @rsvd: Reserved
+ * @flags: Indicates indirect request and/or response handling
+ * @ep: Endpoint identifier
+ * @op: Operation identifier
+ * @inline_req0: Buffer for inline request
+ * @inline_req1: Buffer for inline request
+ * @req_pa: Physical address of request data
+ * @req_sz: Size of the request
+ * @req_sg_elems: Number of request SGs
+ * @req_rsvd: Reserved
+ * @inline_req2: Buffer for inline request
+ * @resp_pa: Physical address of response data
+ * @resp_sz: Size of the response
+ * @resp_sg_elems: Number of response SGs
+ * @resp_rsvd: Reserved
+ */
+struct pds_fwctl_rpc_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 flags;
+#define PDS_FWCTL_RPC_IND_REQ 0x1
+#define PDS_FWCTL_RPC_IND_RESP 0x2
+ __le32 ep;
+ __le32 op;
+ u8 inline_req0[16];
+ union {
+ u8 inline_req1[16];
+ struct {
+ __le64 req_pa;
+ __le32 req_sz;
+ u8 req_sg_elems;
+ u8 req_rsvd[3];
+ };
+ };
+ union {
+ u8 inline_req2[16];
+ struct {
+ __le64 resp_pa;
+ __le32 resp_sz;
+ u8 resp_sg_elems;
+ u8 resp_rsvd[3];
+ };
+ };
+} __packed;
+
+/**
+ * struct pds_sg_elem - Transmit scatter-gather (SG) descriptor element
+ * @addr: DMA address of SG element data buffer
+ * @len: Length of SG element data buffer, in bytes
+ * @rsvd: Reserved
+ */
+struct pds_sg_elem {
+ __le64 addr;
+ __le32 len;
+ u8 rsvd[4];
+} __packed;
+
+/**
+ * struct pds_fwctl_rpc_comp - Completion of a firmware control RPC
+ * @status: Status of the command
+ * @rsvd: Reserved
+ * @comp_index: Completion index of the command
+ * @err: Error code, if any, from the RPC
+ * @resp_sz: Size of the response
+ * @rsvd2: Reserved
+ * @color: Color bit indicating the state of the completion
+ */
+struct pds_fwctl_rpc_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ __le32 err;
+ __le32 resp_sz;
+ u8 rsvd2[3];
+ u8 color;
+} __packed;
+
+union pds_core_adminq_cmd {
+ u8 opcode;
+ u8 bytes[64];
+
+ struct pds_core_client_reg_cmd client_reg;
+ struct pds_core_client_unreg_cmd client_unreg;
+ struct pds_core_client_request_cmd client_request;
+
+ struct pds_core_lif_identify_cmd lif_ident;
+ struct pds_core_lif_init_cmd lif_init;
+ struct pds_core_lif_reset_cmd lif_reset;
+ struct pds_core_lif_setattr_cmd lif_setattr;
+ struct pds_core_lif_getattr_cmd lif_getattr;
+
+ struct pds_core_q_identify_cmd q_ident;
+ struct pds_core_q_init_cmd q_init;
+
+ struct pds_vdpa_cmd vdpa;
+ struct pds_vdpa_init_cmd vdpa_init;
+ struct pds_vdpa_ident_cmd vdpa_ident;
+ struct pds_vdpa_status_cmd vdpa_status;
+ struct pds_vdpa_setattr_cmd vdpa_setattr;
+ struct pds_vdpa_set_features_cmd vdpa_set_features;
+ struct pds_vdpa_vq_init_cmd vdpa_vq_init;
+ struct pds_vdpa_vq_reset_cmd vdpa_vq_reset;
+
+ struct pds_lm_suspend_cmd lm_suspend;
+ struct pds_lm_suspend_status_cmd lm_suspend_status;
+ struct pds_lm_resume_cmd lm_resume;
+ struct pds_lm_state_size_cmd lm_state_size;
+ struct pds_lm_save_cmd lm_save;
+ struct pds_lm_restore_cmd lm_restore;
+ struct pds_lm_host_vf_status_cmd lm_host_vf_status;
+ struct pds_lm_dirty_status_cmd lm_dirty_status;
+ struct pds_lm_dirty_enable_cmd lm_dirty_enable;
+ struct pds_lm_dirty_disable_cmd lm_dirty_disable;
+ struct pds_lm_dirty_seq_ack_cmd lm_dirty_seq_ack;
+
+ struct pds_fwctl_cmd fwctl;
+ struct pds_fwctl_ident_cmd fwctl_ident;
+ struct pds_fwctl_rpc_cmd fwctl_rpc;
+ struct pds_fwctl_query_cmd fwctl_query;
+};
+
+union pds_core_adminq_comp {
+ struct {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ u8 rsvd2[11];
+ u8 color;
+ };
+ u32 words[4];
+
+ struct pds_core_client_reg_comp client_reg;
+
+ struct pds_core_lif_identify_comp lif_ident;
+ struct pds_core_lif_init_comp lif_init;
+ struct pds_core_lif_setattr_comp lif_setattr;
+ struct pds_core_lif_getattr_comp lif_getattr;
+
+ struct pds_core_q_identify_comp q_ident;
+ struct pds_core_q_init_comp q_init;
+
+ struct pds_vdpa_vq_init_comp vdpa_vq_init;
+ struct pds_vdpa_vq_reset_comp vdpa_vq_reset;
+
+ struct pds_lm_state_size_comp lm_state_size;
+ struct pds_lm_dirty_status_comp lm_dirty_status;
+
+ struct pds_fwctl_comp fwctl;
+ struct pds_fwctl_rpc_comp fwctl_rpc;
+ struct pds_fwctl_query_comp fwctl_query;
+};
+
+#ifndef __CHECKER__
+static_assert(sizeof(union pds_core_adminq_cmd) == 64);
+static_assert(sizeof(union pds_core_adminq_comp) == 16);
+static_assert(sizeof(union pds_core_notifyq_comp) == 64);
+#endif /* __CHECKER__ */
+
+/* The color bit is a 'done' bit for the completion descriptors
+ * where the meaning alternates between '1' and '0' for alternating
+ * passes through the completion descriptor ring.
+ */
+static inline bool pdsc_color_match(u8 color, bool done_color)
+{
+ return (!!(color & PDS_COMP_COLOR_MASK)) == done_color;
+}
+
+struct pdsc;
+int pdsc_adminq_post(struct pdsc *pdsc,
+ union pds_core_adminq_cmd *cmd,
+ union pds_core_adminq_comp *comp,
+ bool fast_poll);
+
+#endif /* _PDS_CORE_ADMINQ_H_ */
diff --git a/include/linux/pds/pds_auxbus.h b/include/linux/pds/pds_auxbus.h
new file mode 100644
index 000000000000..214ef12302d0
--- /dev/null
+++ b/include/linux/pds/pds_auxbus.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#ifndef _PDSC_AUXBUS_H_
+#define _PDSC_AUXBUS_H_
+
+#include <linux/auxiliary_bus.h>
+
+struct pds_auxiliary_dev {
+ struct auxiliary_device aux_dev;
+ struct pci_dev *vf_pdev;
+ u16 client_id;
+};
+
+int pds_client_adminq_cmd(struct pds_auxiliary_dev *padev,
+ union pds_core_adminq_cmd *req,
+ size_t req_len,
+ union pds_core_adminq_comp *resp,
+ u64 flags);
+#endif /* _PDSC_AUXBUS_H_ */
diff --git a/include/linux/pds/pds_common.h b/include/linux/pds/pds_common.h
new file mode 100644
index 000000000000..b193adbe7cc3
--- /dev/null
+++ b/include/linux/pds/pds_common.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) OR BSD-2-Clause */
+/* Copyright(c) 2023 Advanced Micro Devices, Inc. */
+
+#ifndef _PDS_COMMON_H_
+#define _PDS_COMMON_H_
+
+#include <linux/notifier.h>
+
+#define PDS_CORE_DRV_NAME "pds_core"
+
+/* the device's internal addressing uses up to 52 bits */
+#define PDS_CORE_ADDR_LEN 52
+#define PDS_CORE_ADDR_MASK (BIT_ULL(PDS_ADDR_LEN) - 1)
+#define PDS_PAGE_SIZE 4096
+
+enum pds_core_driver_type {
+ PDS_DRIVER_LINUX = 1,
+ PDS_DRIVER_WIN = 2,
+ PDS_DRIVER_DPDK = 3,
+ PDS_DRIVER_FREEBSD = 4,
+ PDS_DRIVER_IPXE = 5,
+ PDS_DRIVER_ESXI = 6,
+};
+
+enum pds_core_vif_types {
+ PDS_DEV_TYPE_CORE = 0,
+ PDS_DEV_TYPE_VDPA = 1,
+ PDS_DEV_TYPE_VFIO = 2,
+ PDS_DEV_TYPE_ETH = 3,
+ PDS_DEV_TYPE_RDMA = 4,
+ PDS_DEV_TYPE_LM = 5,
+ PDS_DEV_TYPE_FWCTL = 6,
+
+ /* new ones added before this line */
+ PDS_DEV_TYPE_MAX = 16 /* don't change - used in struct size */
+};
+
+#define PDS_DEV_TYPE_CORE_STR "Core"
+#define PDS_DEV_TYPE_VDPA_STR "vDPA"
+#define PDS_DEV_TYPE_VFIO_STR "vfio"
+#define PDS_DEV_TYPE_ETH_STR "Eth"
+#define PDS_DEV_TYPE_RDMA_STR "RDMA"
+#define PDS_DEV_TYPE_LM_STR "LM"
+#define PDS_DEV_TYPE_FWCTL_STR "fwctl"
+
+#define PDS_VDPA_DEV_NAME PDS_CORE_DRV_NAME "." PDS_DEV_TYPE_VDPA_STR
+#define PDS_VFIO_LM_DEV_NAME PDS_CORE_DRV_NAME "." PDS_DEV_TYPE_LM_STR "." PDS_DEV_TYPE_VFIO_STR
+
+struct pdsc;
+
+int pdsc_register_notify(struct notifier_block *nb);
+void pdsc_unregister_notify(struct notifier_block *nb);
+void *pdsc_get_pf_struct(struct pci_dev *vf_pdev);
+int pds_client_register(struct pdsc *pf, char *devname);
+int pds_client_unregister(struct pdsc *pf, u16 client_id);
+#endif /* _PDS_COMMON_H_ */
diff --git a/include/linux/pds/pds_core_if.h b/include/linux/pds/pds_core_if.h
new file mode 100644
index 000000000000..17a87c1a55d7
--- /dev/null
+++ b/include/linux/pds/pds_core_if.h
@@ -0,0 +1,572 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) OR BSD-2-Clause */
+/* Copyright(c) 2023 Advanced Micro Devices, Inc. */
+
+#ifndef _PDS_CORE_IF_H_
+#define _PDS_CORE_IF_H_
+
+#define PCI_VENDOR_ID_PENSANDO 0x1dd8
+#define PCI_DEVICE_ID_PENSANDO_CORE_PF 0x100c
+#define PCI_DEVICE_ID_VIRTIO_NET_TRANS 0x1000
+#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003
+#define PCI_DEVICE_ID_PENSANDO_VDPA_VF 0x100b
+#define PDS_CORE_BARS_MAX 4
+#define PDS_CORE_PCI_BAR_DBELL 1
+
+/* Bar0 */
+#define PDS_CORE_DEV_INFO_SIGNATURE 0x44455649 /* 'DEVI' */
+#define PDS_CORE_BAR0_SIZE 0x8000
+#define PDS_CORE_BAR0_DEV_INFO_REGS_OFFSET 0x0000
+#define PDS_CORE_BAR0_DEV_CMD_REGS_OFFSET 0x0800
+#define PDS_CORE_BAR0_DEV_CMD_DATA_REGS_OFFSET 0x0c00
+#define PDS_CORE_BAR0_INTR_STATUS_OFFSET 0x1000
+#define PDS_CORE_BAR0_INTR_CTRL_OFFSET 0x2000
+#define PDS_CORE_DEV_CMD_DONE 0x00000001
+
+#define PDS_CORE_DEVCMD_TIMEOUT 5
+
+#define PDS_CORE_CLIENT_ID 0
+#define PDS_CORE_ASIC_TYPE_CAPRI 0
+
+/*
+ * enum pds_core_cmd_opcode - Device commands
+ */
+enum pds_core_cmd_opcode {
+ /* Core init */
+ PDS_CORE_CMD_NOP = 0,
+ PDS_CORE_CMD_IDENTIFY = 1,
+ PDS_CORE_CMD_RESET = 2,
+ PDS_CORE_CMD_INIT = 3,
+
+ PDS_CORE_CMD_FW_DOWNLOAD = 4,
+ PDS_CORE_CMD_FW_CONTROL = 5,
+
+ /* SR/IOV commands */
+ PDS_CORE_CMD_VF_GETATTR = 60,
+ PDS_CORE_CMD_VF_SETATTR = 61,
+ PDS_CORE_CMD_VF_CTRL = 62,
+
+ /* Add commands before this line */
+ PDS_CORE_CMD_MAX,
+ PDS_CORE_CMD_COUNT
+};
+
+/*
+ * enum pds_core_status_code - Device command return codes
+ */
+enum pds_core_status_code {
+ PDS_RC_SUCCESS = 0, /* Success */
+ PDS_RC_EVERSION = 1, /* Incorrect version for request */
+ PDS_RC_EOPCODE = 2, /* Invalid cmd opcode */
+ PDS_RC_EIO = 3, /* I/O error */
+ PDS_RC_EPERM = 4, /* Permission denied */
+ PDS_RC_EQID = 5, /* Bad qid */
+ PDS_RC_EQTYPE = 6, /* Bad qtype */
+ PDS_RC_ENOENT = 7, /* No such element */
+ PDS_RC_EINTR = 8, /* operation interrupted */
+ PDS_RC_EAGAIN = 9, /* Try again */
+ PDS_RC_ENOMEM = 10, /* Out of memory */
+ PDS_RC_EFAULT = 11, /* Bad address */
+ PDS_RC_EBUSY = 12, /* Device or resource busy */
+ PDS_RC_EEXIST = 13, /* object already exists */
+ PDS_RC_EINVAL = 14, /* Invalid argument */
+ PDS_RC_ENOSPC = 15, /* No space left or alloc failure */
+ PDS_RC_ERANGE = 16, /* Parameter out of range */
+ PDS_RC_BAD_ADDR = 17, /* Descriptor contains a bad ptr */
+ PDS_RC_DEV_CMD = 18, /* Device cmd attempted on AdminQ */
+ PDS_RC_ENOSUPP = 19, /* Operation not supported */
+ PDS_RC_ERROR = 29, /* Generic error */
+ PDS_RC_ERDMA = 30, /* Generic RDMA error */
+ PDS_RC_EVFID = 31, /* VF ID does not exist */
+ PDS_RC_BAD_FW = 32, /* FW file is invalid or corrupted */
+ PDS_RC_ECLIENT = 33, /* No such client id */
+ PDS_RC_BAD_PCI = 255, /* Broken PCI when reading status */
+};
+
+/**
+ * struct pds_core_drv_identity - Driver identity information
+ * @drv_type: Driver type (enum pds_core_driver_type)
+ * @os_dist: OS distribution, numeric format
+ * @os_dist_str: OS distribution, string format
+ * @kernel_ver: Kernel version, numeric format
+ * @kernel_ver_str: Kernel version, string format
+ * @driver_ver_str: Driver version, string format
+ */
+struct pds_core_drv_identity {
+ __le32 drv_type;
+ __le32 os_dist;
+ char os_dist_str[128];
+ __le32 kernel_ver;
+ char kernel_ver_str[32];
+ char driver_ver_str[32];
+};
+
+#define PDS_DEV_TYPE_MAX 16
+/**
+ * struct pds_core_dev_identity - Device identity information
+ * @version: Version of device identify
+ * @type: Identify type (0 for now)
+ * @state: Device state
+ * @rsvd: Word boundary padding
+ * @nlifs: Number of LIFs provisioned
+ * @nintrs: Number of interrupts provisioned
+ * @ndbpgs_per_lif: Number of doorbell pages per LIF
+ * @intr_coal_mult: Interrupt coalescing multiplication factor
+ * Scale user-supplied interrupt coalescing
+ * value in usecs to device units using:
+ * device units = usecs * mult / div
+ * @intr_coal_div: Interrupt coalescing division factor
+ * Scale user-supplied interrupt coalescing
+ * value in usecs to device units using:
+ * device units = usecs * mult / div
+ * @vif_types: How many of each VIF device type is supported
+ */
+struct pds_core_dev_identity {
+ u8 version;
+ u8 type;
+ u8 state;
+ u8 rsvd;
+ __le32 nlifs;
+ __le32 nintrs;
+ __le32 ndbpgs_per_lif;
+ __le32 intr_coal_mult;
+ __le32 intr_coal_div;
+ __le16 vif_types[PDS_DEV_TYPE_MAX];
+};
+
+#define PDS_CORE_IDENTITY_VERSION_1 1
+
+/**
+ * struct pds_core_dev_identify_cmd - Driver/device identify command
+ * @opcode: Opcode PDS_CORE_CMD_IDENTIFY
+ * @ver: Highest version of identify supported by driver
+ *
+ * Expects to find driver identification info (struct pds_core_drv_identity)
+ * in cmd_regs->data. Driver should keep the devcmd interface locked
+ * while preparing the driver info.
+ */
+struct pds_core_dev_identify_cmd {
+ u8 opcode;
+ u8 ver;
+};
+
+/**
+ * struct pds_core_dev_identify_comp - Device identify command completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @ver: Version of identify returned by device
+ *
+ * Device identification info (struct pds_core_dev_identity) can be found
+ * in cmd_regs->data. Driver should keep the devcmd interface locked
+ * while reading the results.
+ */
+struct pds_core_dev_identify_comp {
+ u8 status;
+ u8 ver;
+};
+
+/**
+ * struct pds_core_dev_reset_cmd - Device reset command
+ * @opcode: Opcode PDS_CORE_CMD_RESET
+ *
+ * Resets and clears all LIFs, VDevs, and VIFs on the device.
+ */
+struct pds_core_dev_reset_cmd {
+ u8 opcode;
+};
+
+/**
+ * struct pds_core_dev_reset_comp - Reset command completion
+ * @status: Status of the command (enum pds_core_status_code)
+ */
+struct pds_core_dev_reset_comp {
+ u8 status;
+};
+
+/*
+ * struct pds_core_dev_init_data - Pointers and info needed for the Core
+ * initialization PDS_CORE_CMD_INIT command. The in and out structs are
+ * overlays on the pds_core_dev_cmd_regs.data space for passing data down
+ * to the firmware on init, and then returning initialization results.
+ */
+struct pds_core_dev_init_data_in {
+ __le64 adminq_q_base;
+ __le64 adminq_cq_base;
+ __le64 notifyq_cq_base;
+ __le32 flags;
+ __le16 intr_index;
+ u8 adminq_ring_size;
+ u8 notifyq_ring_size;
+};
+
+struct pds_core_dev_init_data_out {
+ __le32 core_hw_index;
+ __le32 adminq_hw_index;
+ __le32 notifyq_hw_index;
+ u8 adminq_hw_type;
+ u8 notifyq_hw_type;
+};
+
+/**
+ * struct pds_core_dev_init_cmd - Core device initialize
+ * @opcode: opcode PDS_CORE_CMD_INIT
+ *
+ * Initializes the core device and sets up the AdminQ and NotifyQ.
+ * Expects to find initialization data (struct pds_core_dev_init_data_in)
+ * in cmd_regs->data. Driver should keep the devcmd interface locked
+ * while preparing the driver info.
+ */
+struct pds_core_dev_init_cmd {
+ u8 opcode;
+};
+
+/**
+ * struct pds_core_dev_init_comp - Core init completion
+ * @status: Status of the command (enum pds_core_status_code)
+ *
+ * Initialization result data (struct pds_core_dev_init_data_in)
+ * is found in cmd_regs->data.
+ */
+struct pds_core_dev_init_comp {
+ u8 status;
+};
+
+/**
+ * struct pds_core_fw_download_cmd - Firmware download command
+ * @opcode: opcode
+ * @rsvd: Word boundary padding
+ * @addr: DMA address of the firmware buffer
+ * @offset: offset of the firmware buffer within the full image
+ * @length: number of valid bytes in the firmware buffer
+ */
+struct pds_core_fw_download_cmd {
+ u8 opcode;
+ u8 rsvd[3];
+ __le32 offset;
+ __le64 addr;
+ __le32 length;
+};
+
+/**
+ * struct pds_core_fw_download_comp - Firmware download completion
+ * @status: Status of the command (enum pds_core_status_code)
+ */
+struct pds_core_fw_download_comp {
+ u8 status;
+};
+
+/**
+ * enum pds_core_fw_control_oper - FW control operations
+ * @PDS_CORE_FW_INSTALL_ASYNC: Install firmware asynchronously
+ * @PDS_CORE_FW_INSTALL_STATUS: Firmware installation status
+ * @PDS_CORE_FW_ACTIVATE_ASYNC: Activate firmware asynchronously
+ * @PDS_CORE_FW_ACTIVATE_STATUS: Firmware activate status
+ * @PDS_CORE_FW_UPDATE_CLEANUP: Cleanup any firmware update leftovers
+ * @PDS_CORE_FW_GET_BOOT: Return current active firmware slot
+ * @PDS_CORE_FW_SET_BOOT: Set active firmware slot for next boot
+ * @PDS_CORE_FW_GET_LIST: Return list of installed firmware images
+ */
+enum pds_core_fw_control_oper {
+ PDS_CORE_FW_INSTALL_ASYNC = 0,
+ PDS_CORE_FW_INSTALL_STATUS = 1,
+ PDS_CORE_FW_ACTIVATE_ASYNC = 2,
+ PDS_CORE_FW_ACTIVATE_STATUS = 3,
+ PDS_CORE_FW_UPDATE_CLEANUP = 4,
+ PDS_CORE_FW_GET_BOOT = 5,
+ PDS_CORE_FW_SET_BOOT = 6,
+ PDS_CORE_FW_GET_LIST = 7,
+};
+
+enum pds_core_fw_slot {
+ PDS_CORE_FW_SLOT_INVALID = 0,
+ PDS_CORE_FW_SLOT_A = 1,
+ PDS_CORE_FW_SLOT_B = 2,
+ PDS_CORE_FW_SLOT_GOLD = 3,
+};
+
+/**
+ * struct pds_core_fw_control_cmd - Firmware control command
+ * @opcode: opcode
+ * @rsvd: Word boundary padding
+ * @oper: firmware control operation (enum pds_core_fw_control_oper)
+ * @slot: slot to operate on (enum pds_core_fw_slot)
+ */
+struct pds_core_fw_control_cmd {
+ u8 opcode;
+ u8 rsvd[3];
+ u8 oper;
+ u8 slot;
+};
+
+/**
+ * struct pds_core_fw_control_comp - Firmware control copletion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @rsvd: Word alignment space
+ * @slot: Slot number (enum pds_core_fw_slot)
+ * @rsvd1: Struct padding
+ * @color: Color bit
+ */
+struct pds_core_fw_control_comp {
+ u8 status;
+ u8 rsvd[3];
+ u8 slot;
+ u8 rsvd1[10];
+ u8 color;
+};
+
+struct pds_core_fw_name_info {
+#define PDS_CORE_FWSLOT_BUFLEN 8
+#define PDS_CORE_FWVERS_BUFLEN 32
+ char slotname[PDS_CORE_FWSLOT_BUFLEN];
+ char fw_version[PDS_CORE_FWVERS_BUFLEN];
+};
+
+struct pds_core_fw_list_info {
+#define PDS_CORE_FWVERS_LIST_LEN 16
+ u8 num_fw_slots;
+ struct pds_core_fw_name_info fw_names[PDS_CORE_FWVERS_LIST_LEN];
+} __packed;
+
+enum pds_core_vf_attr {
+ PDS_CORE_VF_ATTR_SPOOFCHK = 1,
+ PDS_CORE_VF_ATTR_TRUST = 2,
+ PDS_CORE_VF_ATTR_MAC = 3,
+ PDS_CORE_VF_ATTR_LINKSTATE = 4,
+ PDS_CORE_VF_ATTR_VLAN = 5,
+ PDS_CORE_VF_ATTR_RATE = 6,
+ PDS_CORE_VF_ATTR_STATSADDR = 7,
+};
+
+/**
+ * enum pds_core_vf_link_status - Virtual Function link status
+ * @PDS_CORE_VF_LINK_STATUS_AUTO: Use link state of the uplink
+ * @PDS_CORE_VF_LINK_STATUS_UP: Link always up
+ * @PDS_CORE_VF_LINK_STATUS_DOWN: Link always down
+ */
+enum pds_core_vf_link_status {
+ PDS_CORE_VF_LINK_STATUS_AUTO = 0,
+ PDS_CORE_VF_LINK_STATUS_UP = 1,
+ PDS_CORE_VF_LINK_STATUS_DOWN = 2,
+};
+
+/**
+ * struct pds_core_vf_setattr_cmd - Set VF attributes on the NIC
+ * @opcode: Opcode
+ * @attr: Attribute type (enum pds_core_vf_attr)
+ * @vf_index: VF index
+ * @macaddr: mac address
+ * @vlanid: vlan ID
+ * @maxrate: max Tx rate in Mbps
+ * @spoofchk: enable address spoof checking
+ * @trust: enable VF trust
+ * @linkstate: set link up or down
+ * @stats: stats addr struct
+ * @stats.pa: set DMA address for VF stats
+ * @stats.len: length of VF stats space
+ * @pad: force union to specific size
+ */
+struct pds_core_vf_setattr_cmd {
+ u8 opcode;
+ u8 attr;
+ __le16 vf_index;
+ union {
+ u8 macaddr[6];
+ __le16 vlanid;
+ __le32 maxrate;
+ u8 spoofchk;
+ u8 trust;
+ u8 linkstate;
+ struct {
+ __le64 pa;
+ __le32 len;
+ } stats;
+ u8 pad[60];
+ } __packed;
+};
+
+struct pds_core_vf_setattr_comp {
+ u8 status;
+ u8 attr;
+ __le16 vf_index;
+ __le16 comp_index;
+ u8 rsvd[9];
+ u8 color;
+};
+
+/**
+ * struct pds_core_vf_getattr_cmd - Get VF attributes from the NIC
+ * @opcode: Opcode
+ * @attr: Attribute type (enum pds_core_vf_attr)
+ * @vf_index: VF index
+ */
+struct pds_core_vf_getattr_cmd {
+ u8 opcode;
+ u8 attr;
+ __le16 vf_index;
+};
+
+struct pds_core_vf_getattr_comp {
+ u8 status;
+ u8 attr;
+ __le16 vf_index;
+ union {
+ u8 macaddr[6];
+ __le16 vlanid;
+ __le32 maxrate;
+ u8 spoofchk;
+ u8 trust;
+ u8 linkstate;
+ __le64 stats_pa;
+ u8 pad[11];
+ } __packed;
+ u8 color;
+};
+
+enum pds_core_vf_ctrl_opcode {
+ PDS_CORE_VF_CTRL_START_ALL = 0,
+ PDS_CORE_VF_CTRL_START = 1,
+};
+
+/**
+ * struct pds_core_vf_ctrl_cmd - VF control command
+ * @opcode: Opcode for the command
+ * @ctrl_opcode: VF control operation type
+ * @vf_index: VF Index. It is unused if op START_ALL is used.
+ */
+
+struct pds_core_vf_ctrl_cmd {
+ u8 opcode;
+ u8 ctrl_opcode;
+ __le16 vf_index;
+};
+
+/**
+ * struct pds_core_vf_ctrl_comp - VF_CTRL command completion.
+ * @status: Status of the command (enum pds_core_status_code)
+ */
+struct pds_core_vf_ctrl_comp {
+ u8 status;
+};
+
+/*
+ * union pds_core_dev_cmd - Overlay of core device command structures
+ */
+union pds_core_dev_cmd {
+ u8 opcode;
+ u32 words[16];
+
+ struct pds_core_dev_identify_cmd identify;
+ struct pds_core_dev_init_cmd init;
+ struct pds_core_dev_reset_cmd reset;
+ struct pds_core_fw_download_cmd fw_download;
+ struct pds_core_fw_control_cmd fw_control;
+
+ struct pds_core_vf_setattr_cmd vf_setattr;
+ struct pds_core_vf_getattr_cmd vf_getattr;
+ struct pds_core_vf_ctrl_cmd vf_ctrl;
+};
+
+/*
+ * union pds_core_dev_comp - Overlay of core device completion structures
+ */
+union pds_core_dev_comp {
+ u8 status;
+ u8 bytes[16];
+
+ struct pds_core_dev_identify_comp identify;
+ struct pds_core_dev_reset_comp reset;
+ struct pds_core_dev_init_comp init;
+ struct pds_core_fw_download_comp fw_download;
+ struct pds_core_fw_control_comp fw_control;
+
+ struct pds_core_vf_setattr_comp vf_setattr;
+ struct pds_core_vf_getattr_comp vf_getattr;
+ struct pds_core_vf_ctrl_comp vf_ctrl;
+};
+
+/**
+ * struct pds_core_dev_hwstamp_regs - Hardware current timestamp registers
+ * @tick_low: Low 32 bits of hardware timestamp
+ * @tick_high: High 32 bits of hardware timestamp
+ */
+struct pds_core_dev_hwstamp_regs {
+ u32 tick_low;
+ u32 tick_high;
+};
+
+/**
+ * struct pds_core_dev_info_regs - Device info register format (read-only)
+ * @signature: Signature value of 0x44455649 ('DEVI')
+ * @version: Current version of info
+ * @asic_type: Asic type
+ * @asic_rev: Asic revision
+ * @fw_status: Firmware status
+ * bit 0 - 1 = fw running
+ * bit 4-7 - 4 bit generation number, changes on fw restart
+ * @fw_heartbeat: Firmware heartbeat counter
+ * @serial_num: Serial number
+ * @fw_version: Firmware version
+ * @oprom_regs: oprom_regs to store oprom debug enable/disable and bmp
+ * @rsvd_pad1024: Struct padding
+ * @hwstamp: Hardware current timestamp registers
+ * @rsvd_pad2048: Struct padding
+ */
+struct pds_core_dev_info_regs {
+#define PDS_CORE_DEVINFO_FWVERS_BUFLEN 32
+#define PDS_CORE_DEVINFO_SERIAL_BUFLEN 32
+ u32 signature;
+ u8 version;
+ u8 asic_type;
+ u8 asic_rev;
+#define PDS_CORE_FW_STS_F_STOPPED 0x00
+#define PDS_CORE_FW_STS_F_RUNNING 0x01
+#define PDS_CORE_FW_STS_F_GENERATION 0xF0
+ u8 fw_status;
+ __le32 fw_heartbeat;
+ char fw_version[PDS_CORE_DEVINFO_FWVERS_BUFLEN];
+ char serial_num[PDS_CORE_DEVINFO_SERIAL_BUFLEN];
+ u8 oprom_regs[32]; /* reserved */
+ u8 rsvd_pad1024[916];
+ struct pds_core_dev_hwstamp_regs hwstamp; /* on 1k boundary */
+ u8 rsvd_pad2048[1016];
+} __packed;
+
+/**
+ * struct pds_core_dev_cmd_regs - Device command register format (read-write)
+ * @doorbell: Device Cmd Doorbell, write-only
+ * Write a 1 to signal device to process cmd
+ * @done: Command completed indicator, poll for completion
+ * bit 0 == 1 when command is complete
+ * @cmd: Opcode-specific command bytes
+ * @comp: Opcode-specific response bytes
+ * @rsvd: Struct padding
+ * @data: Opcode-specific side-data
+ */
+struct pds_core_dev_cmd_regs {
+ u32 doorbell;
+ u32 done;
+ union pds_core_dev_cmd cmd;
+ union pds_core_dev_comp comp;
+ u8 rsvd[48];
+ u32 data[478];
+} __packed;
+
+/**
+ * struct pds_core_dev_regs - Device register format for bar 0 page 0
+ * @info: Device info registers
+ * @devcmd: Device command registers
+ */
+struct pds_core_dev_regs {
+ struct pds_core_dev_info_regs info;
+ struct pds_core_dev_cmd_regs devcmd;
+} __packed;
+
+#ifndef __CHECKER__
+static_assert(sizeof(struct pds_core_drv_identity) <= 1912);
+static_assert(sizeof(struct pds_core_dev_identity) <= 1912);
+static_assert(sizeof(union pds_core_dev_cmd) == 64);
+static_assert(sizeof(union pds_core_dev_comp) == 16);
+static_assert(sizeof(struct pds_core_dev_info_regs) == 2048);
+static_assert(sizeof(struct pds_core_dev_cmd_regs) == 2048);
+static_assert(sizeof(struct pds_core_dev_regs) == 4096);
+#endif /* __CHECKER__ */
+
+#endif /* _PDS_CORE_IF_H_ */
diff --git a/include/linux/pds/pds_intr.h b/include/linux/pds/pds_intr.h
new file mode 100644
index 000000000000..56277c37248c
--- /dev/null
+++ b/include/linux/pds/pds_intr.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR Linux-OpenIB) OR BSD-2-Clause */
+/* Copyright(c) 2023 Advanced Micro Devices, Inc. */
+
+#ifndef _PDS_INTR_H_
+#define _PDS_INTR_H_
+
+/*
+ * Interrupt control register
+ * @coal_init: Coalescing timer initial value, in
+ * device units. Use @identity->intr_coal_mult
+ * and @identity->intr_coal_div to convert from
+ * usecs to device units:
+ *
+ * coal_init = coal_usecs * coal_mutl / coal_div
+ *
+ * When an interrupt is sent the interrupt
+ * coalescing timer current value
+ * (@coalescing_curr) is initialized with this
+ * value and begins counting down. No more
+ * interrupts are sent until the coalescing
+ * timer reaches 0. When @coalescing_init=0
+ * interrupt coalescing is effectively disabled
+ * and every interrupt assert results in an
+ * interrupt. Reset value: 0
+ * @mask: Interrupt mask. When @mask=1 the interrupt
+ * resource will not send an interrupt. When
+ * @mask=0 the interrupt resource will send an
+ * interrupt if an interrupt event is pending
+ * or on the next interrupt assertion event.
+ * Reset value: 1
+ * @credits: Interrupt credits. This register indicates
+ * how many interrupt events the hardware has
+ * sent. When written by software this
+ * register atomically decrements @int_credits
+ * by the value written. When @int_credits
+ * becomes 0 then the "pending interrupt" bit
+ * in the Interrupt Status register is cleared
+ * by the hardware and any pending but unsent
+ * interrupts are cleared.
+ * !!!IMPORTANT!!! This is a signed register.
+ * @flags: Interrupt control flags
+ * @unmask -- When this bit is written with a 1
+ * the interrupt resource will set mask=0.
+ * @coal_timer_reset -- When this
+ * bit is written with a 1 the
+ * @coalescing_curr will be reloaded with
+ * @coalescing_init to reset the coalescing
+ * timer.
+ * @mask_on_assert: Automatically mask on assertion. When
+ * @mask_on_assert=1 the interrupt resource
+ * will set @mask=1 whenever an interrupt is
+ * sent. When using interrupts in Legacy
+ * Interrupt mode the driver must select
+ * @mask_on_assert=0 for proper interrupt
+ * operation.
+ * @coalescing_curr: Coalescing timer current value, in
+ * microseconds. When this value reaches 0
+ * the interrupt resource is again eligible to
+ * send an interrupt. If an interrupt event
+ * is already pending when @coalescing_curr
+ * reaches 0 the pending interrupt will be
+ * sent, otherwise an interrupt will be sent
+ * on the next interrupt assertion event.
+ */
+struct pds_core_intr {
+ u32 coal_init;
+ u32 mask;
+ u16 credits;
+ u16 flags;
+#define PDS_CORE_INTR_F_UNMASK 0x0001
+#define PDS_CORE_INTR_F_TIMER_RESET 0x0002
+ u32 mask_on_assert;
+ u32 coalescing_curr;
+ u32 rsvd6[3];
+};
+
+#ifndef __CHECKER__
+static_assert(sizeof(struct pds_core_intr) == 32);
+#endif /* __CHECKER__ */
+
+#define PDS_CORE_INTR_CTRL_REGS_MAX 2048
+#define PDS_CORE_INTR_CTRL_COAL_MAX 0x3F
+#define PDS_CORE_INTR_INDEX_NOT_ASSIGNED -1
+
+struct pds_core_intr_status {
+ u32 status[2];
+};
+
+/**
+ * enum pds_core_intr_mask_vals - valid values for mask and mask_assert.
+ * @PDS_CORE_INTR_MASK_CLEAR: unmask interrupt.
+ * @PDS_CORE_INTR_MASK_SET: mask interrupt.
+ */
+enum pds_core_intr_mask_vals {
+ PDS_CORE_INTR_MASK_CLEAR = 0,
+ PDS_CORE_INTR_MASK_SET = 1,
+};
+
+/**
+ * enum pds_core_intr_credits_bits - Bitwise composition of credits values.
+ * @PDS_CORE_INTR_CRED_COUNT: bit mask of credit count, no shift needed.
+ * @PDS_CORE_INTR_CRED_COUNT_SIGNED: bit mask of credit count, including sign bit.
+ * @PDS_CORE_INTR_CRED_UNMASK: unmask the interrupt.
+ * @PDS_CORE_INTR_CRED_RESET_COALESCE: reset the coalesce timer.
+ * @PDS_CORE_INTR_CRED_REARM: unmask the and reset the timer.
+ */
+enum pds_core_intr_credits_bits {
+ PDS_CORE_INTR_CRED_COUNT = 0x7fffu,
+ PDS_CORE_INTR_CRED_COUNT_SIGNED = 0xffffu,
+ PDS_CORE_INTR_CRED_UNMASK = 0x10000u,
+ PDS_CORE_INTR_CRED_RESET_COALESCE = 0x20000u,
+ PDS_CORE_INTR_CRED_REARM = (PDS_CORE_INTR_CRED_UNMASK |
+ PDS_CORE_INTR_CRED_RESET_COALESCE),
+};
+
+static inline void
+pds_core_intr_coal_init(struct pds_core_intr __iomem *intr_ctrl, u32 coal)
+{
+ iowrite32(coal, &intr_ctrl->coal_init);
+}
+
+static inline void
+pds_core_intr_mask(struct pds_core_intr __iomem *intr_ctrl, u32 mask)
+{
+ iowrite32(mask, &intr_ctrl->mask);
+}
+
+static inline void
+pds_core_intr_credits(struct pds_core_intr __iomem *intr_ctrl,
+ u32 cred, u32 flags)
+{
+ if (WARN_ON_ONCE(cred > PDS_CORE_INTR_CRED_COUNT)) {
+ cred = ioread32(&intr_ctrl->credits);
+ cred &= PDS_CORE_INTR_CRED_COUNT_SIGNED;
+ }
+
+ iowrite32(cred | flags, &intr_ctrl->credits);
+}
+
+static inline void
+pds_core_intr_clean_flags(struct pds_core_intr __iomem *intr_ctrl, u32 flags)
+{
+ u32 cred;
+
+ cred = ioread32(&intr_ctrl->credits);
+ cred &= PDS_CORE_INTR_CRED_COUNT_SIGNED;
+ cred |= flags;
+ iowrite32(cred, &intr_ctrl->credits);
+}
+
+static inline void
+pds_core_intr_clean(struct pds_core_intr __iomem *intr_ctrl)
+{
+ pds_core_intr_clean_flags(intr_ctrl, PDS_CORE_INTR_CRED_RESET_COALESCE);
+}
+
+static inline void
+pds_core_intr_mask_assert(struct pds_core_intr __iomem *intr_ctrl, u32 mask)
+{
+ iowrite32(mask, &intr_ctrl->mask_on_assert);
+}
+
+#endif /* _PDS_INTR_H_ */
diff --git a/include/linux/pe.h b/include/linux/pe.h
index daf09ffffe38..cd2b7275385f 100644
--- a/include/linux/pe.h
+++ b/include/linux/pe.h
@@ -11,128 +11,188 @@
#include <linux/types.h>
/*
- * Linux EFI stub v1.0 adds the following functionality:
- * - Loading initrd from the LINUX_EFI_INITRD_MEDIA_GUID device path,
- * - Loading/starting the kernel from firmware that targets a different
- * machine type, via the entrypoint exposed in the .compat PE/COFF section.
+ * Starting from version v3.0, the major version field should be interpreted as
+ * a bit mask of features supported by the kernel's EFI stub:
+ * - 0x1: initrd loading from the LINUX_EFI_INITRD_MEDIA_GUID device path,
+ * - 0x2: initrd loading using the initrd= command line option, where the file
+ * may be specified using device path notation, and is not required to
+ * reside on the same volume as the loaded kernel image.
*
* The recommended way of loading and starting v1.0 or later kernels is to use
* the LoadImage() and StartImage() EFI boot services, and expose the initrd
* via the LINUX_EFI_INITRD_MEDIA_GUID device path.
*
- * Versions older than v1.0 support initrd loading via the image load options
- * (using initrd=, limited to the volume from which the kernel itself was
- * loaded), or via arch specific means (bootparams, DT, etc).
+ * Versions older than v1.0 may support initrd loading via the image load
+ * options (using initrd=, limited to the volume from which the kernel itself
+ * was loaded), or only via arch specific means (bootparams, DT, etc).
*
- * On x86, LoadImage() and StartImage() can be omitted if the EFI handover
- * protocol is implemented, which can be inferred from the version,
- * handover_offset and xloadflags fields in the bootparams structure.
+ * The minor version field must remain 0x0.
+ * (https://lore.kernel.org/all/efd6f2d4-547c-1378-1faa-53c044dbd297@gmail.com/)
*/
-#define LINUX_EFISTUB_MAJOR_VERSION 0x1
+#define LINUX_EFISTUB_MAJOR_VERSION 0x3
#define LINUX_EFISTUB_MINOR_VERSION 0x0
-#define MZ_MAGIC 0x5a4d /* "MZ" */
+/*
+ * LINUX_PE_MAGIC appears at offset 0x38 into the MS-DOS header of EFI bootable
+ * Linux kernel images that target the architecture as specified by the PE/COFF
+ * header machine type field.
+ */
+#define LINUX_PE_MAGIC 0x818223cd
+
+#define IMAGE_DOS_SIGNATURE 0x5a4d /* "MZ" */
+
+#define IMAGE_NT_SIGNATURE 0x00004550 /* "PE\0\0" */
-#define PE_MAGIC 0x00004550 /* "PE\0\0" */
-#define PE_OPT_MAGIC_PE32 0x010b
-#define PE_OPT_MAGIC_PE32_ROM 0x0107
-#define PE_OPT_MAGIC_PE32PLUS 0x020b
+#define IMAGE_ROM_OPTIONAL_HDR_MAGIC 0x0107 /* ROM image (for R3000/R4000/R10000/ALPHA), without MZ and PE\0\0 sign */
+#define IMAGE_NT_OPTIONAL_HDR32_MAGIC 0x010b /* PE32 executable image */
+#define IMAGE_NT_OPTIONAL_HDR64_MAGIC 0x020b /* PE32+ executable image */
/* machine type */
-#define IMAGE_FILE_MACHINE_UNKNOWN 0x0000
-#define IMAGE_FILE_MACHINE_AM33 0x01d3
-#define IMAGE_FILE_MACHINE_AMD64 0x8664
-#define IMAGE_FILE_MACHINE_ARM 0x01c0
-#define IMAGE_FILE_MACHINE_ARMV7 0x01c4
-#define IMAGE_FILE_MACHINE_ARM64 0xaa64
-#define IMAGE_FILE_MACHINE_EBC 0x0ebc
-#define IMAGE_FILE_MACHINE_I386 0x014c
-#define IMAGE_FILE_MACHINE_IA64 0x0200
-#define IMAGE_FILE_MACHINE_M32R 0x9041
-#define IMAGE_FILE_MACHINE_MIPS16 0x0266
-#define IMAGE_FILE_MACHINE_MIPSFPU 0x0366
-#define IMAGE_FILE_MACHINE_MIPSFPU16 0x0466
-#define IMAGE_FILE_MACHINE_POWERPC 0x01f0
-#define IMAGE_FILE_MACHINE_POWERPCFP 0x01f1
-#define IMAGE_FILE_MACHINE_R4000 0x0166
-#define IMAGE_FILE_MACHINE_RISCV32 0x5032
-#define IMAGE_FILE_MACHINE_RISCV64 0x5064
-#define IMAGE_FILE_MACHINE_RISCV128 0x5128
-#define IMAGE_FILE_MACHINE_SH3 0x01a2
-#define IMAGE_FILE_MACHINE_SH3DSP 0x01a3
-#define IMAGE_FILE_MACHINE_SH3E 0x01a4
-#define IMAGE_FILE_MACHINE_SH4 0x01a6
-#define IMAGE_FILE_MACHINE_SH5 0x01a8
-#define IMAGE_FILE_MACHINE_THUMB 0x01c2
-#define IMAGE_FILE_MACHINE_WCEMIPSV2 0x0169
+#define IMAGE_FILE_MACHINE_UNKNOWN 0x0000 /* Unknown architecture */
+#define IMAGE_FILE_MACHINE_TARGET_HOST 0x0001 /* Interacts with the host and not a WOW64 guest (not for file image) */
+#define IMAGE_FILE_MACHINE_ALPHA_OLD 0x0183 /* DEC Alpha AXP 32-bit (old images) */
+#define IMAGE_FILE_MACHINE_ALPHA 0x0184 /* DEC Alpha AXP 32-bit */
+#define IMAGE_FILE_MACHINE_ALPHA64 0x0284 /* DEC Alpha AXP 64-bit (with 8kB page size) */
+#define IMAGE_FILE_MACHINE_AXP64 IMAGE_FILE_MACHINE_ALPHA64
+#define IMAGE_FILE_MACHINE_AM33 0x01d3 /* Matsushita AM33, now Panasonic MN103 */
+#define IMAGE_FILE_MACHINE_AMD64 0x8664 /* AMD64 (x64) */
+#define IMAGE_FILE_MACHINE_ARM 0x01c0 /* ARM Little-Endian (ARMv4) */
+#define IMAGE_FILE_MACHINE_THUMB 0x01c2 /* ARM Thumb Little-Endian (ARMv4T) */
+#define IMAGE_FILE_MACHINE_ARMNT 0x01c4 /* ARM Thumb-2 Little-Endian (ARMv7) */
+#define IMAGE_FILE_MACHINE_ARMV7 IMAGE_FILE_MACHINE_ARMNT
+#define IMAGE_FILE_MACHINE_ARM64 0xaa64 /* ARM64 Little-Endian (Classic ABI) */
+#define IMAGE_FILE_MACHINE_ARM64EC 0xa641 /* ARM64 Little-Endian (Emulation Compatible ABI for AMD64) */
+#define IMAGE_FILE_MACHINE_ARM64X 0xa64e /* ARM64 Little-Endian (fat binary with both Classic ABI and EC ABI code) */
+#define IMAGE_FILE_MACHINE_CEE 0xc0ee /* COM+ Execution Engine (CLR pure MSIL object files) */
+#define IMAGE_FILE_MACHINE_CEF 0x0cef /* Windows CE 3.0 Common Executable Format (CEF bytecode) */
+#define IMAGE_FILE_MACHINE_CHPE_X86 0x3a64 /* ARM64 Little-Endian (Compiled Hybrid PE ABI for I386) */
+#define IMAGE_FILE_MACHINE_HYBRID_X86 IMAGE_FILE_MACHINE_CHPE_X86
+#define IMAGE_FILE_MACHINE_EBC 0x0ebc /* EFI/UEFI Byte Code */
+#define IMAGE_FILE_MACHINE_I386 0x014c /* Intel 386 (x86) */
+#define IMAGE_FILE_MACHINE_I860 0x014d /* Intel 860 (N10) */
+#define IMAGE_FILE_MACHINE_IA64 0x0200 /* Intel IA-64 (with 8kB page size) */
+#define IMAGE_FILE_MACHINE_LOONGARCH32 0x6232 /* LoongArch 32-bit processor family */
+#define IMAGE_FILE_MACHINE_LOONGARCH64 0x6264 /* LoongArch 64-bit processor family */
+#define IMAGE_FILE_MACHINE_M32R 0x9041 /* Mitsubishi M32R 32-bit Little-Endian */
+#define IMAGE_FILE_MACHINE_M68K 0x0268 /* Motorola 68000 series */
+#define IMAGE_FILE_MACHINE_MIPS16 0x0266 /* MIPS III with MIPS16 ASE Little-Endian */
+#define IMAGE_FILE_MACHINE_MIPSFPU 0x0366 /* MIPS III with FPU Little-Endian */
+#define IMAGE_FILE_MACHINE_MIPSFPU16 0x0466 /* MIPS III with MIPS16 ASE and FPU Little-Endian */
+#define IMAGE_FILE_MACHINE_MPPC_601 0x0601 /* PowerPC 32-bit Big-Endian */
+#define IMAGE_FILE_MACHINE_OMNI 0xace1 /* Microsoft OMNI VM (omniprox.dll) */
+#define IMAGE_FILE_MACHINE_PARISC 0x0290 /* HP PA-RISC */
+#define IMAGE_FILE_MACHINE_POWERPC 0x01f0 /* PowerPC 32-bit Little-Endian */
+#define IMAGE_FILE_MACHINE_POWERPCFP 0x01f1 /* PowerPC 32-bit with FPU Little-Endian */
+#define IMAGE_FILE_MACHINE_POWERPCBE 0x01f2 /* PowerPC 64-bit Big-Endian */
+#define IMAGE_FILE_MACHINE_R3000 0x0162 /* MIPS I Little-Endian */
+#define IMAGE_FILE_MACHINE_R3000_BE 0x0160 /* MIPS I Big-Endian */
+#define IMAGE_FILE_MACHINE_R4000 0x0166 /* MIPS III Little-Endian (with 1kB or 4kB page size) */
+#define IMAGE_FILE_MACHINE_R10000 0x0168 /* MIPS IV Little-Endian */
+#define IMAGE_FILE_MACHINE_RISCV32 0x5032 /* RISC-V 32-bit address space */
+#define IMAGE_FILE_MACHINE_RISCV64 0x5064 /* RISC-V 64-bit address space */
+#define IMAGE_FILE_MACHINE_RISCV128 0x5128 /* RISC-V 128-bit address space */
+#define IMAGE_FILE_MACHINE_SH3 0x01a2 /* Hitachi SH-3 32-bit Little-Endian (with 1kB page size) */
+#define IMAGE_FILE_MACHINE_SH3DSP 0x01a3 /* Hitachi SH-3 DSP 32-bit (with 1kB page size) */
+#define IMAGE_FILE_MACHINE_SH3E 0x01a4 /* Hitachi SH-3E Little-Endian (with 1kB page size) */
+#define IMAGE_FILE_MACHINE_SH4 0x01a6 /* Hitachi SH-4 32-bit Little-Endian (with 1kB page size) */
+#define IMAGE_FILE_MACHINE_SH5 0x01a8 /* Hitachi SH-5 64-bit */
+#define IMAGE_FILE_MACHINE_TAHOE 0x07cc /* Intel EM machine */
+#define IMAGE_FILE_MACHINE_TRICORE 0x0520 /* Infineon AUDO 32-bit */
+#define IMAGE_FILE_MACHINE_WCEMIPSV2 0x0169 /* MIPS Windows CE v2 Little-Endian */
/* flags */
-#define IMAGE_FILE_RELOCS_STRIPPED 0x0001
-#define IMAGE_FILE_EXECUTABLE_IMAGE 0x0002
-#define IMAGE_FILE_LINE_NUMS_STRIPPED 0x0004
-#define IMAGE_FILE_LOCAL_SYMS_STRIPPED 0x0008
-#define IMAGE_FILE_AGGRESSIVE_WS_TRIM 0x0010
-#define IMAGE_FILE_LARGE_ADDRESS_AWARE 0x0020
-#define IMAGE_FILE_16BIT_MACHINE 0x0040
-#define IMAGE_FILE_BYTES_REVERSED_LO 0x0080
-#define IMAGE_FILE_32BIT_MACHINE 0x0100
-#define IMAGE_FILE_DEBUG_STRIPPED 0x0200
-#define IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP 0x0400
-#define IMAGE_FILE_NET_RUN_FROM_SWAP 0x0800
-#define IMAGE_FILE_SYSTEM 0x1000
-#define IMAGE_FILE_DLL 0x2000
-#define IMAGE_FILE_UP_SYSTEM_ONLY 0x4000
-#define IMAGE_FILE_BYTES_REVERSED_HI 0x8000
-
-#define IMAGE_FILE_OPT_ROM_MAGIC 0x107
-#define IMAGE_FILE_OPT_PE32_MAGIC 0x10b
-#define IMAGE_FILE_OPT_PE32_PLUS_MAGIC 0x20b
-
-#define IMAGE_SUBSYSTEM_UNKNOWN 0
-#define IMAGE_SUBSYSTEM_NATIVE 1
-#define IMAGE_SUBSYSTEM_WINDOWS_GUI 2
-#define IMAGE_SUBSYSTEM_WINDOWS_CUI 3
-#define IMAGE_SUBSYSTEM_POSIX_CUI 7
-#define IMAGE_SUBSYSTEM_WINDOWS_CE_GUI 9
-#define IMAGE_SUBSYSTEM_EFI_APPLICATION 10
-#define IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER 11
-#define IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER 12
-#define IMAGE_SUBSYSTEM_EFI_ROM_IMAGE 13
-#define IMAGE_SUBSYSTEM_XBOX 14
-
-#define IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE 0x0040
-#define IMAGE_DLL_CHARACTERISTICS_FORCE_INTEGRITY 0x0080
-#define IMAGE_DLL_CHARACTERISTICS_NX_COMPAT 0x0100
-#define IMAGE_DLLCHARACTERISTICS_NO_ISOLATION 0x0200
-#define IMAGE_DLLCHARACTERISTICS_NO_SEH 0x0400
-#define IMAGE_DLLCHARACTERISTICS_NO_BIND 0x0800
-#define IMAGE_DLLCHARACTERISTICS_WDM_DRIVER 0x2000
-#define IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE 0x8000
-
-/* they actually defined 0x00000000 as well, but I think we'll skip that one. */
-#define IMAGE_SCN_RESERVED_0 0x00000001
-#define IMAGE_SCN_RESERVED_1 0x00000002
-#define IMAGE_SCN_RESERVED_2 0x00000004
-#define IMAGE_SCN_TYPE_NO_PAD 0x00000008 /* don't pad - obsolete */
-#define IMAGE_SCN_RESERVED_3 0x00000010
+#define IMAGE_FILE_RELOCS_STRIPPED 0x0001 /* Relocation info stripped from file */
+#define IMAGE_FILE_EXECUTABLE_IMAGE 0x0002 /* File is executable (i.e. no unresolved external references) */
+#define IMAGE_FILE_LINE_NUMS_STRIPPED 0x0004 /* Line nunbers stripped from file */
+#define IMAGE_FILE_LOCAL_SYMS_STRIPPED 0x0008 /* Local symbols stripped from file */
+#define IMAGE_FILE_AGGRESSIVE_WS_TRIM 0x0010 /* Aggressively trim working set */
+#define IMAGE_FILE_LARGE_ADDRESS_AWARE 0x0020 /* App can handle >2gb addresses (image can be loaded at address above 2GB) */
+#define IMAGE_FILE_16BIT_MACHINE 0x0040 /* 16 bit word machine */
+#define IMAGE_FILE_BYTES_REVERSED_LO 0x0080 /* Bytes of machine word are reversed (should be set together with IMAGE_FILE_BYTES_REVERSED_HI) */
+#define IMAGE_FILE_32BIT_MACHINE 0x0100 /* 32 bit word machine */
+#define IMAGE_FILE_DEBUG_STRIPPED 0x0200 /* Debugging info stripped from file in .DBG file */
+#define IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP 0x0400 /* If Image is on removable media, copy and run from the swap file */
+#define IMAGE_FILE_NET_RUN_FROM_SWAP 0x0800 /* If Image is on Net, copy and run from the swap file */
+#define IMAGE_FILE_SYSTEM 0x1000 /* System kernel-mode file (can't be loaded in user-mode) */
+#define IMAGE_FILE_DLL 0x2000 /* File is a DLL */
+#define IMAGE_FILE_UP_SYSTEM_ONLY 0x4000 /* File should only be run on a UP (uniprocessor) machine */
+#define IMAGE_FILE_BYTES_REVERSED_HI 0x8000 /* Bytes of machine word are reversed (should be set together with IMAGE_FILE_BYTES_REVERSED_LO) */
+
+/* subsys */
+#define IMAGE_SUBSYSTEM_UNKNOWN 0 /* Unknown subsystem */
+#define IMAGE_SUBSYSTEM_NATIVE 1 /* No subsystem required (NT device drivers and NT native system processes) */
+#define IMAGE_SUBSYSTEM_WINDOWS_GUI 2 /* Windows graphical user interface (GUI) subsystem */
+#define IMAGE_SUBSYSTEM_WINDOWS_CUI 3 /* Windows character-mode user interface (CUI) subsystem */
+#define IMAGE_SUBSYSTEM_WINDOWS_OLD_CE_GUI 4 /* Old Windows CE subsystem */
+#define IMAGE_SUBSYSTEM_OS2_CUI 5 /* OS/2 CUI subsystem */
+#define IMAGE_SUBSYSTEM_RESERVED_6 6
+#define IMAGE_SUBSYSTEM_POSIX_CUI 7 /* POSIX CUI subsystem */
+#define IMAGE_SUBSYSTEM_MMOSA 8 /* MMOSA/Native Win32E */
+#define IMAGE_SUBSYSTEM_WINDOWS_CE_GUI 9 /* Windows CE subsystem */
+#define IMAGE_SUBSYSTEM_EFI_APPLICATION 10 /* Extensible Firmware Interface (EFI) application */
+#define IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER 11 /* EFI driver with boot services */
+#define IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER 12 /* EFI driver with run-time services */
+#define IMAGE_SUBSYSTEM_EFI_ROM_IMAGE 13 /* EFI ROM image */
+#define IMAGE_SUBSYSTEM_XBOX 14 /* Xbox system */
+#define IMAGE_SUBSYSTEM_RESERVED_15 15
+#define IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION 16 /* Windows Boot application */
+#define IMAGE_SUBSYSTEM_XBOX_CODE_CATALOG 17 /* Xbox Code Catalog */
+
+/* dll_flags */
+#define IMAGE_LIBRARY_PROCESS_INIT 0x0001 /* DLL initialization function called just after process initialization */
+#define IMAGE_LIBRARY_PROCESS_TERM 0x0002 /* DLL initialization function called just before process termination */
+#define IMAGE_LIBRARY_THREAD_INIT 0x0004 /* DLL initialization function called just after thread initialization */
+#define IMAGE_LIBRARY_THREAD_TERM 0x0008 /* DLL initialization function called just before thread initialization */
+#define IMAGE_DLLCHARACTERISTICS_RESERVED_4 0x0010
+#define IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA 0x0020 /* ASLR with 64 bit address space (image can be loaded at address above 4GB) */
+#define IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE 0x0040 /* The DLL can be relocated at load time */
+#define IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY 0x0080 /* Code integrity checks are forced */
+#define IMAGE_DLLCHARACTERISTICS_NX_COMPAT 0x0100 /* Image is compatible with data execution prevention */
+#define IMAGE_DLLCHARACTERISTICS_NO_ISOLATION 0x0200 /* Image is isolation aware, but should not be isolated (prevents loading of manifest file) */
+#define IMAGE_DLLCHARACTERISTICS_NO_SEH 0x0400 /* Image does not use SEH, no SE handler may reside in this image */
+#define IMAGE_DLLCHARACTERISTICS_NO_BIND 0x0800 /* Do not bind the image */
+#define IMAGE_DLLCHARACTERISTICS_X86_THUNK 0x1000 /* Image is a Wx86 Thunk DLL (for non-x86/risc DLL files) */
+#define IMAGE_DLLCHARACTERISTICS_APPCONTAINER 0x1000 /* Image should execute in an AppContainer (for EXE Metro Apps in Windows 8) */
+#define IMAGE_DLLCHARACTERISTICS_WDM_DRIVER 0x2000 /* A WDM driver */
+#define IMAGE_DLLCHARACTERISTICS_GUARD_CF 0x4000 /* Image supports Control Flow Guard */
+#define IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE 0x8000 /* The image is terminal server (Remote Desktop Services) aware */
+
+/* IMAGE_DEBUG_TYPE_EX_DLLCHARACTERISTICS flags */
+#define IMAGE_DLLCHARACTERISTICS_EX_CET_COMPAT 0x0001 /* Image is Control-flow Enforcement Technology Shadow Stack compatible */
+#define IMAGE_DLLCHARACTERISTICS_EX_CET_COMPAT_STRICT_MODE 0x0002 /* CET is enforced in strict mode */
+#define IMAGE_DLLCHARACTERISTICS_EX_CET_SET_CONTEXT_IP_VALIDATION_RELAXED_MODE 0x0004 /* Relaxed mode for Context IP Validation under CET is allowed */
+#define IMAGE_DLLCHARACTERISTICS_EX_CET_DYNAMIC_APIS_ALLOW_IN_PROC 0x0008 /* Use of dynamic APIs is restricted to processes only */
+#define IMAGE_DLLCHARACTERISTICS_EX_CET_RESERVED_1 0x0010
+#define IMAGE_DLLCHARACTERISTICS_EX_CET_RESERVED_2 0x0020
+#define IMAGE_DLLCHARACTERISTICS_EX_FORWARD_CFI_COMPAT 0x0040 /* All branch targets in all image code sections are annotated with forward-edge control flow integrity guard instructions */
+#define IMAGE_DLLCHARACTERISTICS_EX_HOTPATCH_COMPATIBLE 0x0080 /* Image can be modified while in use, hotpatch-compatible */
+
+/* section_header flags */
+#define IMAGE_SCN_SCALE_INDEX 0x00000001 /* address of tls index is scaled = multiplied by 4 (for .tls section on MIPS only) */
+#define IMAGE_SCN_TYPE_NO_LOAD 0x00000002 /* reserved */
+#define IMAGE_SCN_TYPE_GROUPED 0x00000004 /* obsolete (used for 16-bit offset code) */
+#define IMAGE_SCN_TYPE_NO_PAD 0x00000008 /* .o only - don't pad - obsolete (same as IMAGE_SCN_ALIGN_1BYTES) */
+#define IMAGE_SCN_TYPE_COPY 0x00000010 /* reserved */
#define IMAGE_SCN_CNT_CODE 0x00000020 /* .text */
#define IMAGE_SCN_CNT_INITIALIZED_DATA 0x00000040 /* .data */
#define IMAGE_SCN_CNT_UNINITIALIZED_DATA 0x00000080 /* .bss */
-#define IMAGE_SCN_LNK_OTHER 0x00000100 /* reserved */
-#define IMAGE_SCN_LNK_INFO 0x00000200 /* .drectve comments */
-#define IMAGE_SCN_RESERVED_4 0x00000400
+#define IMAGE_SCN_LNK_OTHER 0x00000100 /* .o only - other type than code, data or info */
+#define IMAGE_SCN_LNK_INFO 0x00000200 /* .o only - .drectve comments */
+#define IMAGE_SCN_LNK_OVERLAY 0x00000400 /* section contains overlay */
#define IMAGE_SCN_LNK_REMOVE 0x00000800 /* .o only - scn to be rm'd*/
#define IMAGE_SCN_LNK_COMDAT 0x00001000 /* .o only - COMDAT data */
-#define IMAGE_SCN_RESERVED_5 0x00002000 /* spec omits this */
-#define IMAGE_SCN_RESERVED_6 0x00004000 /* spec omits this */
-#define IMAGE_SCN_GPREL 0x00008000 /* global pointer referenced data */
-/* spec lists 0x20000 twice, I suspect they meant 0x10000 for one of them */
-#define IMAGE_SCN_MEM_PURGEABLE 0x00010000 /* reserved for "future" use */
-#define IMAGE_SCN_16BIT 0x00020000 /* reserved for "future" use */
-#define IMAGE_SCN_LOCKED 0x00040000 /* reserved for "future" use */
-#define IMAGE_SCN_PRELOAD 0x00080000 /* reserved for "future" use */
+#define IMAGE_SCN_RESERVED_13 0x00002000 /* spec omits this */
+#define IMAGE_SCN_MEM_PROTECTED 0x00004000 /* section is memory protected (for M68K) */
+#define IMAGE_SCN_NO_DEFER_SPEC_EXC 0x00004000 /* reset speculative exceptions handling bits in the TLB entries (for non-M68K) */
+#define IMAGE_SCN_MEM_FARDATA 0x00008000 /* section uses FAR_EXTERNAL relocations (for M68K) */
+#define IMAGE_SCN_GPREL 0x00008000 /* global pointer referenced data (for non-M68K) */
+#define IMAGE_SCN_MEM_SYSHEAP 0x00010000 /* use system heap (for M68K) */
+#define IMAGE_SCN_MEM_PURGEABLE 0x00020000 /* section can be released from RAM (for M68K) */
+#define IMAGE_SCN_MEM_16BIT 0x00020000 /* section is 16-bit (for non-M68K where it makes sense: I386, THUMB, MIPS16, MIPSFPU16, ...) */
+#define IMAGE_SCN_MEM_LOCKED 0x00040000 /* prevent the section from being moved (for M68K and .o I386) */
+#define IMAGE_SCN_MEM_PRELOAD 0x00080000 /* section is preload to RAM (for M68K and .o I386) */
/* and here they just stuck a 1-byte integer in the middle of a bitfield */
-#define IMAGE_SCN_ALIGN_1BYTES 0x00100000 /* it does what it says on the box */
+#define IMAGE_SCN_ALIGN_1BYTES 0x00100000 /* .o only - it does what it says on the box */
#define IMAGE_SCN_ALIGN_2BYTES 0x00200000
#define IMAGE_SCN_ALIGN_4BYTES 0x00300000
#define IMAGE_SCN_ALIGN_8BYTES 0x00400000
@@ -146,7 +206,9 @@
#define IMAGE_SCN_ALIGN_2048BYTES 0x00c00000
#define IMAGE_SCN_ALIGN_4096BYTES 0x00d00000
#define IMAGE_SCN_ALIGN_8192BYTES 0x00e00000
-#define IMAGE_SCN_LNK_NRELOC_OVFL 0x01000000 /* extended relocations */
+#define IMAGE_SCN_ALIGN_RESERVED 0x00f00000
+#define IMAGE_SCN_ALIGN_MASK 0x00f00000
+#define IMAGE_SCN_LNK_NRELOC_OVFL 0x01000000 /* .o only - extended relocations */
#define IMAGE_SCN_MEM_DISCARDABLE 0x02000000 /* scn can be discarded */
#define IMAGE_SCN_MEM_NOT_CACHED 0x04000000 /* cannot be cached */
#define IMAGE_SCN_MEM_NOT_PAGED 0x08000000 /* not pageable */
@@ -155,7 +217,28 @@
#define IMAGE_SCN_MEM_READ 0x40000000 /* readable */
#define IMAGE_SCN_MEM_WRITE 0x80000000 /* writeable */
-#define IMAGE_DEBUG_TYPE_CODEVIEW 2
+#define IMAGE_DEBUG_TYPE_UNKNOWN 0 /* Unknown value, ignored by all tools */
+#define IMAGE_DEBUG_TYPE_COFF 1 /* COFF debugging information */
+#define IMAGE_DEBUG_TYPE_CODEVIEW 2 /* CodeView debugging information or Visual C++ Program Database debugging information */
+#define IMAGE_DEBUG_TYPE_FPO 3 /* Frame pointer omission (FPO) information */
+#define IMAGE_DEBUG_TYPE_MISC 4 /* Location of DBG file with CodeView debugging information */
+#define IMAGE_DEBUG_TYPE_EXCEPTION 5 /* Exception information, copy of .pdata section */
+#define IMAGE_DEBUG_TYPE_FIXUP 6 /* Fixup information */
+#define IMAGE_DEBUG_TYPE_OMAP_TO_SRC 7 /* The mapping from an RVA in image to an RVA in source image */
+#define IMAGE_DEBUG_TYPE_OMAP_FROM_SRC 8 /* The mapping from an RVA in source image to an RVA in image */
+#define IMAGE_DEBUG_TYPE_BORLAND 9 /* Borland debugging information */
+#define IMAGE_DEBUG_TYPE_RESERVED10 10 /* Coldpath / Hotpatch debug information */
+#define IMAGE_DEBUG_TYPE_CLSID 11 /* CLSID */
+#define IMAGE_DEBUG_TYPE_VC_FEATURE 12 /* Visual C++ counts / statistics */
+#define IMAGE_DEBUG_TYPE_POGO 13 /* COFF group information, data for profile-guided optimization */
+#define IMAGE_DEBUG_TYPE_ILTCG 14 /* Incremental link-time code generation */
+#define IMAGE_DEBUG_TYPE_MPX 15 /* Intel Memory Protection Extensions */
+#define IMAGE_DEBUG_TYPE_REPRO 16 /* PE determinism or reproducibility */
+#define IMAGE_DEBUG_TYPE_EMBEDDED_PORTABLE_PDB 17 /* Embedded Portable PDB debugging information */
+#define IMAGE_DEBUG_TYPE_SPGO 18 /* Sample profile-guided optimization */
+#define IMAGE_DEBUG_TYPE_PDBCHECKSUM 19 /* PDB Checksum */
+#define IMAGE_DEBUG_TYPE_EX_DLLCHARACTERISTICS 20 /* Extended DLL characteristics bits */
+#define IMAGE_DEBUG_TYPE_PERFMAP 21 /* Location of associated Ready To Run PerfMap file */
#ifndef __ASSEMBLY__
@@ -221,7 +304,7 @@ struct pe32_opt_hdr {
uint16_t image_minor; /* minor image version */
uint16_t subsys_major; /* major subsystem version */
uint16_t subsys_minor; /* minor subsystem version */
- uint32_t win32_version; /* reserved, must be 0 */
+ uint32_t win32_version; /* win32 version reported at runtime */
uint32_t image_size; /* image size */
uint32_t header_size; /* header size rounded up to
file_align */
@@ -232,7 +315,7 @@ struct pe32_opt_hdr {
uint32_t stack_size; /* amt of stack required */
uint32_t heap_size_req; /* amt of heap requested */
uint32_t heap_size; /* amt of heap required */
- uint32_t loader_flags; /* reserved, must be 0 */
+ uint32_t loader_flags; /* loader flags */
uint32_t data_dirs; /* number of data dir entries */
};
@@ -255,7 +338,7 @@ struct pe32plus_opt_hdr {
uint16_t image_minor; /* minor image version */
uint16_t subsys_major; /* major subsystem version */
uint16_t subsys_minor; /* minor subsystem version */
- uint32_t win32_version; /* reserved, must be 0 */
+ uint32_t win32_version; /* win32 version reported at runtime */
uint32_t image_size; /* image size */
uint32_t header_size; /* header size rounded up to
file_align */
@@ -266,7 +349,7 @@ struct pe32plus_opt_hdr {
uint64_t stack_size; /* amt of stack required */
uint64_t heap_size_req; /* amt of heap requested */
uint64_t heap_size; /* amt of heap required */
- uint32_t loader_flags; /* reserved, must be 0 */
+ uint32_t loader_flags; /* loader flags */
uint32_t data_dirs; /* number of data dir entries */
};
@@ -287,10 +370,10 @@ struct data_directory {
struct data_dirent global_ptr; /* global pointer reg. Size=0 */
struct data_dirent tls; /* .tls */
struct data_dirent load_config; /* load configuration structure */
- struct data_dirent bound_imports; /* no idea */
+ struct data_dirent bound_imports; /* bound import table */
struct data_dirent import_addrs; /* import address table */
struct data_dirent delay_imports; /* delay-load import table */
- struct data_dirent clr_runtime_hdr; /* .cor (object only) */
+ struct data_dirent clr_runtime_hdr; /* .cor (clr/.net executables) */
struct data_dirent reserved;
};
diff --git a/include/linux/peci-cpu.h b/include/linux/peci-cpu.h
new file mode 100644
index 000000000000..601cdd086bf6
--- /dev/null
+++ b/include/linux/peci-cpu.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2021 Intel Corporation */
+
+#ifndef __LINUX_PECI_CPU_H
+#define __LINUX_PECI_CPU_H
+
+#include <linux/types.h>
+
+/* Copied from x86 <asm/processor.h> */
+#define X86_VENDOR_INTEL 0
+
+/* Copied from x86 <asm/cpu_device_id.h> */
+#define VFM_MODEL_BIT 0
+#define VFM_FAMILY_BIT 8
+#define VFM_VENDOR_BIT 16
+#define VFM_RSVD_BIT 24
+
+#define VFM_MODEL_MASK GENMASK(VFM_FAMILY_BIT - 1, VFM_MODEL_BIT)
+#define VFM_FAMILY_MASK GENMASK(VFM_VENDOR_BIT - 1, VFM_FAMILY_BIT)
+#define VFM_VENDOR_MASK GENMASK(VFM_RSVD_BIT - 1, VFM_VENDOR_BIT)
+
+#define VFM_MODEL(vfm) (((vfm) & VFM_MODEL_MASK) >> VFM_MODEL_BIT)
+#define VFM_FAMILY(vfm) (((vfm) & VFM_FAMILY_MASK) >> VFM_FAMILY_BIT)
+#define VFM_VENDOR(vfm) (((vfm) & VFM_VENDOR_MASK) >> VFM_VENDOR_BIT)
+
+#define VFM_MAKE(_vendor, _family, _model) ( \
+ ((_model) << VFM_MODEL_BIT) | \
+ ((_family) << VFM_FAMILY_BIT) | \
+ ((_vendor) << VFM_VENDOR_BIT) \
+)
+/* End of copied code */
+
+#include "../../arch/x86/include/asm/intel-family.h"
+
+#define PECI_PCS_PKG_ID 0 /* Package Identifier Read */
+#define PECI_PKG_ID_CPU_ID 0x0000 /* CPUID Info */
+#define PECI_PKG_ID_PLATFORM_ID 0x0001 /* Platform ID */
+#define PECI_PKG_ID_DEVICE_ID 0x0002 /* Uncore Device ID */
+#define PECI_PKG_ID_MAX_THREAD_ID 0x0003 /* Max Thread ID */
+#define PECI_PKG_ID_MICROCODE_REV 0x0004 /* CPU Microcode Update Revision */
+#define PECI_PKG_ID_MCA_ERROR_LOG 0x0005 /* Machine Check Status */
+#define PECI_PCS_MODULE_TEMP 9 /* Per Core DTS Temperature Read */
+#define PECI_PCS_THERMAL_MARGIN 10 /* DTS thermal margin */
+#define PECI_PCS_DDR_DIMM_TEMP 14 /* DDR DIMM Temperature */
+#define PECI_PCS_TEMP_TARGET 16 /* Temperature Target Read */
+#define PECI_PCS_TDP_UNITS 30 /* Units for power/energy registers */
+
+struct peci_device;
+
+int peci_temp_read(struct peci_device *device, s16 *temp_raw);
+
+int peci_pcs_read(struct peci_device *device, u8 index,
+ u16 param, u32 *data);
+
+int peci_pci_local_read(struct peci_device *device, u8 bus, u8 dev,
+ u8 func, u16 reg, u32 *data);
+
+int peci_ep_pci_local_read(struct peci_device *device, u8 seg,
+ u8 bus, u8 dev, u8 func, u16 reg, u32 *data);
+
+int peci_mmio_read(struct peci_device *device, u8 bar, u8 seg,
+ u8 bus, u8 dev, u8 func, u64 address, u32 *data);
+
+#endif /* __LINUX_PECI_CPU_H */
diff --git a/include/linux/peci.h b/include/linux/peci.h
new file mode 100644
index 000000000000..3e0bc37591d6
--- /dev/null
+++ b/include/linux/peci.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2018-2021 Intel Corporation */
+
+#ifndef __LINUX_PECI_H
+#define __LINUX_PECI_H
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+/*
+ * Currently we don't support any PECI command over 32 bytes.
+ */
+#define PECI_REQUEST_MAX_BUF_SIZE 32
+
+struct peci_controller;
+struct peci_request;
+
+/**
+ * struct peci_controller_ops - PECI controller specific methods
+ * @xfer: PECI transfer function
+ *
+ * PECI controllers may have different hardware interfaces - the drivers
+ * implementing PECI controllers can use this structure to abstract away those
+ * differences by exposing a common interface for PECI core.
+ */
+struct peci_controller_ops {
+ int (*xfer)(struct peci_controller *controller, u8 addr, struct peci_request *req);
+};
+
+/**
+ * struct peci_controller - PECI controller
+ * @dev: device object to register PECI controller to the device model
+ * @ops: pointer to device specific controller operations
+ * @bus_lock: lock used to protect multiple callers
+ * @id: PECI controller ID
+ *
+ * PECI controllers usually connect to their drivers using non-PECI bus,
+ * such as the platform bus.
+ * Each PECI controller can communicate with one or more PECI devices.
+ */
+struct peci_controller {
+ struct device dev;
+ const struct peci_controller_ops *ops;
+ struct mutex bus_lock; /* held for the duration of xfer */
+ u8 id;
+};
+
+struct peci_controller *devm_peci_controller_add(struct device *parent,
+ const struct peci_controller_ops *ops);
+
+static inline struct peci_controller *to_peci_controller(void *d)
+{
+ return container_of(d, struct peci_controller, dev);
+}
+
+/**
+ * struct peci_device - PECI device
+ * @dev: device object to register PECI device to the device model
+ * @info: PECI device characteristics
+ * @info.x86_vfm: device vendor-family-model
+ * @info.peci_revision: PECI revision supported by the PECI device
+ * @info.socket_id: the socket ID represented by the PECI device
+ * @addr: address used on the PECI bus connected to the parent controller
+ * @deleted: indicates that PECI device was already deleted
+ *
+ * A peci_device identifies a single device (i.e. CPU) connected to a PECI bus.
+ * The behaviour exposed to the rest of the system is defined by the PECI driver
+ * managing the device.
+ */
+struct peci_device {
+ struct device dev;
+ struct {
+ u32 x86_vfm;
+ u8 peci_revision;
+ u8 socket_id;
+ } info;
+ u8 addr;
+ bool deleted;
+};
+
+static inline struct peci_device *to_peci_device(struct device *d)
+{
+ return container_of(d, struct peci_device, dev);
+}
+
+/**
+ * struct peci_request - PECI request
+ * @device: PECI device to which the request is sent
+ * @tx: TX buffer specific data
+ * @tx.buf: TX buffer
+ * @tx.len: transfer data length in bytes
+ * @rx: RX buffer specific data
+ * @rx.buf: RX buffer
+ * @rx.len: received data length in bytes
+ *
+ * A peci_request represents a request issued by PECI originator (TX) and
+ * a response received from PECI responder (RX).
+ */
+struct peci_request {
+ struct peci_device *device;
+ struct {
+ u8 buf[PECI_REQUEST_MAX_BUF_SIZE];
+ u8 len;
+ } rx, tx;
+};
+
+#endif /* __LINUX_PECI_H */
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index dff7040f629a..43c854a273c3 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -26,13 +26,11 @@
#define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned"
#define PER_CPU_ALIGNED_SECTION "..shared_aligned"
#endif
-#define PER_CPU_FIRST_SECTION "..first"
#else
#define PER_CPU_SHARED_ALIGNED_SECTION ""
#define PER_CPU_ALIGNED_SECTION "..shared_aligned"
-#define PER_CPU_FIRST_SECTION ""
#endif
@@ -54,7 +52,7 @@
__section(".discard") __attribute__((unused))
/*
- * s390 and alpha modules require percpu variables to be defined as
+ * alpha modules require percpu variables to be defined as
* weak to force the compiler to generate GOT based external
* references for them. This is necessary because percpu sections
* will be located outside of the usually addressable area.
@@ -65,14 +63,15 @@
* 1. The symbol must be globally unique, even the static ones.
* 2. Static percpu variables cannot be defined inside a function.
*
- * Archs which need weak percpu definitions should define
- * ARCH_NEEDS_WEAK_PER_CPU in asm/percpu.h when necessary.
+ * Archs which need weak percpu definitions should set
+ * CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU when necessary.
*
* To ensure that the generic code observes the above two
* restrictions, if CONFIG_DEBUG_FORCE_WEAK_PER_CPU is set weak
* definition is used for all cases.
*/
-#if defined(ARCH_NEEDS_WEAK_PER_CPU) || defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU)
+#if (defined(CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU) && defined(MODULE)) || \
+ defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU)
/*
* __pcpu_scope_* dummy variable is used to enforce scope. It
* receives the static modifier when it's used in front of
@@ -115,14 +114,17 @@
DEFINE_PER_CPU_SECTION(type, name, "")
/*
- * Declaration/definition used for per-CPU variables that must come first in
- * the set of variables.
+ * Declaration/definition used for per-CPU variables that are frequently
+ * accessed and should be in a single cacheline.
+ *
+ * For use only by architecture and core code. Only use scalar or pointer
+ * types to maximize density.
*/
-#define DECLARE_PER_CPU_FIRST(type, name) \
- DECLARE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
+#define DECLARE_PER_CPU_CACHE_HOT(type, name) \
+ DECLARE_PER_CPU_SECTION(type, name, "..hot.." #name)
-#define DEFINE_PER_CPU_FIRST(type, name) \
- DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
+#define DEFINE_PER_CPU_CACHE_HOT(type, name) \
+ DEFINE_PER_CPU_SECTION(type, name, "..hot.." #name)
/*
* Declaration/definition used for per-CPU variables that must be cacheline
@@ -220,15 +222,17 @@ do { \
(void)__vpp_verify; \
} while (0)
+#define PERCPU_PTR(__p) \
+ (TYPEOF_UNQUAL(*(__p)) __force __kernel *)((__force unsigned long)(__p))
+
#ifdef CONFIG_SMP
/*
- * Add an offset to a pointer but keep the pointer as-is. Use RELOC_HIDE()
- * to prevent the compiler from making incorrect assumptions about the
- * pointer value. The weird cast keeps both GCC and sparse happy.
+ * Add an offset to a pointer. Use RELOC_HIDE() to prevent the compiler
+ * from making incorrect assumptions about the pointer value.
*/
#define SHIFT_PERCPU_PTR(__p, __offset) \
- RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset))
+ RELOC_HIDE(PERCPU_PTR(__p), (__offset))
#define per_cpu_ptr(ptr, cpu) \
({ \
@@ -254,13 +258,13 @@ do { \
#else /* CONFIG_SMP */
-#define VERIFY_PERCPU_PTR(__p) \
+#define per_cpu_ptr(ptr, cpu) \
({ \
- __verify_pcpu_ptr(__p); \
- (typeof(*(__p)) __kernel __force *)(__p); \
+ (void)(cpu); \
+ __verify_pcpu_ptr(ptr); \
+ PERCPU_PTR(ptr); \
})
-#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); })
#define raw_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr)
@@ -310,12 +314,12 @@ extern void __bad_size_call_parameter(void);
#ifdef CONFIG_DEBUG_PREEMPT
extern void __this_cpu_preempt_check(const char *op);
#else
-static inline void __this_cpu_preempt_check(const char *op) { }
+static __always_inline void __this_cpu_preempt_check(const char *op) { }
#endif
#define __pcpu_size_call_return(stem, variable) \
({ \
- typeof(variable) pscr_ret__; \
+ TYPEOF_UNQUAL(variable) pscr_ret__; \
__verify_pcpu_ptr(&(variable)); \
switch(sizeof(variable)) { \
case 1: pscr_ret__ = stem##1(variable); break; \
@@ -330,7 +334,7 @@ static inline void __this_cpu_preempt_check(const char *op) { }
#define __pcpu_size_call_return2(stem, variable, ...) \
({ \
- typeof(variable) pscr2_ret__; \
+ TYPEOF_UNQUAL(variable) pscr2_ret__; \
__verify_pcpu_ptr(&(variable)); \
switch(sizeof(variable)) { \
case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \
@@ -343,31 +347,19 @@ static inline void __this_cpu_preempt_check(const char *op) { }
pscr2_ret__; \
})
-/*
- * Special handling for cmpxchg_double. cmpxchg_double is passed two
- * percpu variables. The first has to be aligned to a double word
- * boundary and the second has to follow directly thereafter.
- * We enforce this on all architectures even if they don't support
- * a double cmpxchg instruction, since it's a cheap requirement, and it
- * avoids breaking the requirement for architectures with the instruction.
- */
-#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \
+#define __pcpu_size_call_return2bool(stem, variable, ...) \
({ \
- bool pdcrb_ret__; \
- __verify_pcpu_ptr(&(pcp1)); \
- BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \
- VM_BUG_ON((unsigned long)(&(pcp1)) % (2 * sizeof(pcp1))); \
- VM_BUG_ON((unsigned long)(&(pcp2)) != \
- (unsigned long)(&(pcp1)) + sizeof(pcp1)); \
- switch(sizeof(pcp1)) { \
- case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \
- case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \
- case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \
- case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \
+ bool pscr2_ret__; \
+ __verify_pcpu_ptr(&(variable)); \
+ switch(sizeof(variable)) { \
+ case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \
+ case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \
+ case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \
+ case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \
default: \
__bad_size_call_parameter(); break; \
} \
- pdcrb_ret__; \
+ pscr2_ret__; \
})
#define __pcpu_size_call(stem, variable, ...) \
@@ -384,7 +376,7 @@ do { \
} while (0)
/*
- * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@linux.com>
+ * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@gentwo.org>
*
* Optimized manipulation for memory allocated through the per cpu
* allocator or for addresses of per cpu variables.
@@ -412,7 +404,7 @@ do { \
* instead.
*
* If there is no other protection through preempt disable and/or disabling
- * interupts then one of these RMW operations can show unexpected behavior
+ * interrupts then one of these RMW operations can show unexpected behavior
* because the execution thread was rescheduled on another processor or an
* interrupt occurred and the same percpu variable was modified from the
* interrupt context.
@@ -426,9 +418,8 @@ do { \
#define raw_cpu_xchg(pcp, nval) __pcpu_size_call_return2(raw_cpu_xchg_, pcp, nval)
#define raw_cpu_cmpxchg(pcp, oval, nval) \
__pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval)
-#define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2)
-
+#define raw_cpu_try_cmpxchg(pcp, ovalp, nval) \
+ __pcpu_size_call_return2bool(raw_cpu_try_cmpxchg_, pcp, ovalp, nval)
#define raw_cpu_sub(pcp, val) raw_cpu_add(pcp, -(val))
#define raw_cpu_inc(pcp) raw_cpu_add(pcp, 1)
#define raw_cpu_dec(pcp) raw_cpu_sub(pcp, 1)
@@ -488,9 +479,10 @@ do { \
raw_cpu_cmpxchg(pcp, oval, nval); \
})
-#define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
-({ __this_cpu_preempt_check("cmpxchg_double"); \
- raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2); \
+#define __this_cpu_try_cmpxchg(pcp, ovalp, nval) \
+({ \
+ __this_cpu_preempt_check("try_cmpxchg"); \
+ raw_cpu_try_cmpxchg(pcp, ovalp, nval); \
})
#define __this_cpu_sub(pcp, val) __this_cpu_add(pcp, -(typeof(pcp))(val))
@@ -513,9 +505,8 @@ do { \
#define this_cpu_xchg(pcp, nval) __pcpu_size_call_return2(this_cpu_xchg_, pcp, nval)
#define this_cpu_cmpxchg(pcp, oval, nval) \
__pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
-#define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
- __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2)
-
+#define this_cpu_try_cmpxchg(pcp, ovalp, nval) \
+ __pcpu_size_call_return2bool(this_cpu_try_cmpxchg_, pcp, ovalp, nval)
#define this_cpu_sub(pcp, val) this_cpu_add(pcp, -(typeof(pcp))(val))
#define this_cpu_inc(pcp) this_cpu_add(pcp, 1)
#define this_cpu_dec(pcp) this_cpu_sub(pcp, 1)
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 16c35a728b4c..d73a1c08c3e3 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -51,9 +51,9 @@
#define _LINUX_PERCPU_REFCOUNT_H
#include <linux/atomic.h>
-#include <linux/kernel.h>
#include <linux/percpu.h>
#include <linux/rcupdate.h>
+#include <linux/types.h>
#include <linux/gfp.h>
struct percpu_ref;
@@ -213,7 +213,7 @@ static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
* percpu_ref_get - increment a percpu refcount
* @ref: percpu_ref to get
*
- * Analagous to atomic_long_inc().
+ * Analogous to atomic_long_inc().
*
* This function is safe to call as long as @ref is between init and exit.
*/
@@ -267,6 +267,28 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
}
/**
+ * percpu_ref_tryget_live_rcu - same as percpu_ref_tryget_live() but the
+ * caller is responsible for taking RCU.
+ *
+ * This function is safe to call as long as @ref is between init and exit.
+ */
+static inline bool percpu_ref_tryget_live_rcu(struct percpu_ref *ref)
+{
+ unsigned long __percpu *percpu_count;
+ bool ret = false;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ if (likely(__ref_is_percpu(ref, &percpu_count))) {
+ this_cpu_inc(*percpu_count);
+ ret = true;
+ } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
+ ret = atomic_long_inc_not_zero(&ref->data->count);
+ }
+ return ret;
+}
+
+/**
* percpu_ref_tryget_live - try to increment a live percpu refcount
* @ref: percpu_ref to try-get
*
@@ -283,20 +305,11 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
*/
static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
{
- unsigned long __percpu *percpu_count;
bool ret = false;
rcu_read_lock();
-
- if (__ref_is_percpu(ref, &percpu_count)) {
- this_cpu_inc(*percpu_count);
- ret = true;
- } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
- ret = atomic_long_inc_not_zero(&ref->data->count);
- }
-
+ ret = percpu_ref_tryget_live_rcu(ref);
rcu_read_unlock();
-
return ret;
}
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index 5fda40f97fe9..288f5235649a 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -8,6 +8,7 @@
#include <linux/wait.h>
#include <linux/rcu_sync.h>
#include <linux/lockdep.h>
+#include <linux/cleanup.h>
struct percpu_rw_semaphore {
struct rcu_sync rss;
@@ -42,9 +43,10 @@ is_static struct percpu_rw_semaphore name = { \
#define DEFINE_STATIC_PERCPU_RWSEM(name) \
__DEFINE_PERCPU_RWSEM(name, static)
-extern bool __percpu_down_read(struct percpu_rw_semaphore *, bool);
+extern bool __percpu_down_read(struct percpu_rw_semaphore *, bool, bool);
-static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
+static inline void percpu_down_read_internal(struct percpu_rw_semaphore *sem,
+ bool freezable)
{
might_sleep();
@@ -62,7 +64,7 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
if (likely(rcu_sync_is_idle(&sem->rss)))
this_cpu_inc(*sem->read_count);
else
- __percpu_down_read(sem, false); /* Unconditional memory barrier */
+ __percpu_down_read(sem, false, freezable); /* Unconditional memory barrier */
/*
* The preempt_enable() prevents the compiler from
* bleeding the critical section out.
@@ -70,6 +72,17 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
preempt_enable();
}
+static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
+{
+ percpu_down_read_internal(sem, false);
+}
+
+static inline void percpu_down_read_freezable(struct percpu_rw_semaphore *sem,
+ bool freeze)
+{
+ percpu_down_read_internal(sem, freeze);
+}
+
static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
{
bool ret = true;
@@ -81,7 +94,7 @@ static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
if (likely(rcu_sync_is_idle(&sem->rss)))
this_cpu_inc(*sem->read_count);
else
- ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */
+ ret = __percpu_down_read(sem, true, false); /* Unconditional memory barrier */
preempt_enable();
/*
* The barrier() from preempt_enable() prevents the compiler from
@@ -121,9 +134,22 @@ static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
preempt_enable();
}
+extern bool percpu_is_read_locked(struct percpu_rw_semaphore *);
extern void percpu_down_write(struct percpu_rw_semaphore *);
extern void percpu_up_write(struct percpu_rw_semaphore *);
+DEFINE_GUARD(percpu_read, struct percpu_rw_semaphore *,
+ percpu_down_read(_T), percpu_up_read(_T))
+DEFINE_GUARD_COND(percpu_read, _try, percpu_down_read_trylock(_T))
+
+DEFINE_GUARD(percpu_write, struct percpu_rw_semaphore *,
+ percpu_down_write(_T), percpu_up_write(_T))
+
+static inline bool percpu_is_write_locked(struct percpu_rw_semaphore *sem)
+{
+ return atomic_read(&sem->block);
+}
+
extern int __percpu_init_rwsem(struct percpu_rw_semaphore *,
const char *, struct lock_class_key *);
@@ -139,7 +165,7 @@ extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
#define percpu_rwsem_assert_held(sem) lockdep_assert_held(sem)
static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
- bool read, unsigned long ip)
+ unsigned long ip)
{
lock_release(&sem->dep_map, ip);
}
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 5e76af742c80..85bf8dd9f087 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -2,13 +2,14 @@
#ifndef __LINUX_PERCPU_H
#define __LINUX_PERCPU_H
+#include <linux/alloc_tag.h>
#include <linux/mmdebug.h>
#include <linux/preempt.h>
#include <linux/smp.h>
-#include <linux/cpumask.h>
-#include <linux/printk.h>
#include <linux/pfn.h>
#include <linux/init.h>
+#include <linux/cleanup.h>
+#include <linux/sched.h>
#include <asm/percpu.h>
@@ -35,15 +36,24 @@
#define PCPU_BITMAP_BLOCK_BITS (PCPU_BITMAP_BLOCK_SIZE >> \
PCPU_MIN_ALLOC_SHIFT)
+#ifdef CONFIG_RANDOM_KMALLOC_CACHES
+# if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PAGE_SIZE_4KB)
+# define PERCPU_DYNAMIC_SIZE_SHIFT 13
+# else
+# define PERCPU_DYNAMIC_SIZE_SHIFT 12
+#endif /* LOCKDEP and PAGE_SIZE > 4KiB */
+#else
+#define PERCPU_DYNAMIC_SIZE_SHIFT 10
+#endif
+
/*
* Percpu allocator can serve percpu allocations before slab is
* initialized which allows slab to depend on the percpu allocator.
- * The following two parameters decide how much resource to
- * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or
- * larger than PERCPU_DYNAMIC_EARLY_SIZE.
+ * The following parameter decide how much resource to preallocate
+ * for this. Keep PERCPU_DYNAMIC_RESERVE equal to or larger than
+ * PERCPU_DYNAMIC_EARLY_SIZE.
*/
-#define PERCPU_DYNAMIC_EARLY_SLOTS 128
-#define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10)
+#define PERCPU_DYNAMIC_EARLY_SIZE (20 << PERCPU_DYNAMIC_SIZE_SHIFT)
/*
* PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
@@ -57,9 +67,9 @@
* intelligent way to determine this would be nice.
*/
#if BITS_PER_LONG > 32
-#define PERCPU_DYNAMIC_RESERVE (28 << 10)
+#define PERCPU_DYNAMIC_RESERVE (28 << PERCPU_DYNAMIC_SIZE_SHIFT)
#else
-#define PERCPU_DYNAMIC_RESERVE (20 << 10)
+#define PERCPU_DYNAMIC_RESERVE (20 << PERCPU_DYNAMIC_SIZE_SHIFT)
#endif
extern void *pcpu_base_addr;
@@ -95,10 +105,7 @@ extern const char * const pcpu_fc_names[PCPU_FC_NR];
extern enum pcpu_fc pcpu_chosen_fc;
-typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size,
- size_t align);
-typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size);
-typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr);
+typedef int (pcpu_fc_cpu_to_node_fn_t)(int cpu);
typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to);
extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
@@ -108,22 +115,17 @@ extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai);
extern void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
void *base_addr);
-#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
extern int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
size_t atom_size,
pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
- pcpu_fc_alloc_fn_t alloc_fn,
- pcpu_fc_free_fn_t free_fn);
-#endif
+ pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn);
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
+void __init pcpu_populate_pte(unsigned long addr);
extern int __init pcpu_page_first_chunk(size_t reserved_size,
- pcpu_fc_alloc_fn_t alloc_fn,
- pcpu_fc_free_fn_t free_fn,
- pcpu_fc_populate_pte_fn_t populate_pte_fn);
+ pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn);
#endif
-extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr);
extern bool is_kernel_percpu_address(unsigned long addr);
@@ -131,10 +133,15 @@ extern bool is_kernel_percpu_address(unsigned long addr);
extern void __init setup_per_cpu_areas(void);
#endif
-extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp);
-extern void __percpu *__alloc_percpu(size_t size, size_t align);
-extern void free_percpu(void __percpu *__pdata);
-extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
+extern void __percpu *pcpu_alloc_noprof(size_t size, size_t align, bool reserved,
+ gfp_t gfp) __alloc_size(1);
+
+#define __alloc_percpu_gfp(_size, _align, _gfp) \
+ alloc_hooks(pcpu_alloc_noprof(_size, _align, false, _gfp))
+#define __alloc_percpu(_size, _align) \
+ alloc_hooks(pcpu_alloc_noprof(_size, _align, false, GFP_KERNEL))
+#define __alloc_reserved_percpu(_size, _align) \
+ alloc_hooks(pcpu_alloc_noprof(_size, _align, true, GFP_KERNEL))
#define alloc_percpu_gfp(type, gfp) \
(typeof(type) __percpu *)__alloc_percpu_gfp(sizeof(type), \
@@ -142,6 +149,15 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
#define alloc_percpu(type) \
(typeof(type) __percpu *)__alloc_percpu(sizeof(type), \
__alignof__(type))
+#define alloc_percpu_noprof(type) \
+ ((typeof(type) __percpu *)pcpu_alloc_noprof(sizeof(type), \
+ __alignof__(type), false, GFP_KERNEL))
+
+extern void free_percpu(void __percpu *__pdata);
+
+DEFINE_FREE(free_percpu, void __percpu *, free_percpu(_T))
+
+extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
extern unsigned long pcpu_nr_pages(void);
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 01861eebed79..3a44dd1e33d2 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -13,7 +13,9 @@
#include <linux/threads.h>
#include <linux/percpu.h>
#include <linux/types.h>
-#include <linux/gfp.h>
+
+/* percpu_counter batch for local add or sub */
+#define PERCPU_COUNTER_LOCAL_BATCH INT_MAX
#ifdef CONFIG_SMP
@@ -28,22 +30,35 @@ struct percpu_counter {
extern int percpu_counter_batch;
-int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
- struct lock_class_key *key);
+int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount,
+ gfp_t gfp, u32 nr_counters,
+ struct lock_class_key *key);
-#define percpu_counter_init(fbc, value, gfp) \
+#define percpu_counter_init_many(fbc, value, gfp, nr_counters) \
({ \
static struct lock_class_key __key; \
\
- __percpu_counter_init(fbc, value, gfp, &__key); \
+ __percpu_counter_init_many(fbc, value, gfp, nr_counters,\
+ &__key); \
})
-void percpu_counter_destroy(struct percpu_counter *fbc);
+
+#define percpu_counter_init(fbc, value, gfp) \
+ percpu_counter_init_many(fbc, value, gfp, 1)
+
+void percpu_counter_destroy_many(struct percpu_counter *fbc, u32 nr_counters);
+static inline void percpu_counter_destroy(struct percpu_counter *fbc)
+{
+ percpu_counter_destroy_many(fbc, 1);
+}
+
void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
s32 batch);
s64 __percpu_counter_sum(struct percpu_counter *fbc);
int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
+bool __percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit,
+ s64 amount, s32 batch);
void percpu_counter_sync(struct percpu_counter *fbc);
static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
@@ -56,6 +71,29 @@ static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
}
+static inline bool
+percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount)
+{
+ return __percpu_counter_limited_add(fbc, limit, amount,
+ percpu_counter_batch);
+}
+
+/*
+ * With percpu_counter_add_local() and percpu_counter_sub_local(), counts
+ * are accumulated in local per cpu counter and not in fbc->count until
+ * local count overflows PERCPU_COUNTER_LOCAL_BATCH. This makes counter
+ * write efficient.
+ * But percpu_counter_sum(), instead of percpu_counter_read(), needs to be
+ * used to add up the counts from each CPU to account for all the local
+ * counts. So percpu_counter_add_local() and percpu_counter_sub_local()
+ * should be used when a counter is updated frequently and read rarely.
+ */
+static inline void
+percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
+{
+ percpu_counter_add_batch(fbc, amount, PERCPU_COUNTER_LOCAL_BATCH);
+}
+
static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
{
s64 ret = __percpu_counter_sum(fbc);
@@ -98,11 +136,27 @@ struct percpu_counter {
s64 count;
};
+static inline int percpu_counter_init_many(struct percpu_counter *fbc,
+ s64 amount, gfp_t gfp,
+ u32 nr_counters)
+{
+ u32 i;
+
+ for (i = 0; i < nr_counters; i++)
+ fbc[i].count = amount;
+
+ return 0;
+}
+
static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
gfp_t gfp)
{
- fbc->count = amount;
- return 0;
+ return percpu_counter_init_many(fbc, amount, gfp, 1);
+}
+
+static inline void percpu_counter_destroy_many(struct percpu_counter *fbc,
+ u32 nr_counters)
+{
}
static inline void percpu_counter_destroy(struct percpu_counter *fbc)
@@ -133,9 +187,39 @@ __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
static inline void
percpu_counter_add(struct percpu_counter *fbc, s64 amount)
{
- preempt_disable();
+ unsigned long flags;
+
+ local_irq_save(flags);
fbc->count += amount;
- preempt_enable();
+ local_irq_restore(flags);
+}
+
+static inline bool
+percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount)
+{
+ unsigned long flags;
+ bool good = false;
+ s64 count;
+
+ if (amount == 0)
+ return true;
+
+ local_irq_save(flags);
+ count = fbc->count + amount;
+ if ((amount > 0 && count <= limit) ||
+ (amount < 0 && count >= limit)) {
+ fbc->count = count;
+ good = true;
+ }
+ local_irq_restore(flags);
+ return good;
+}
+
+/* non-SMP percpu_counter_add_local is the same with percpu_counter_add */
+static inline void
+percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
+{
+ percpu_counter_add(fbc, amount);
}
static inline void
@@ -193,4 +277,10 @@ static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
percpu_counter_add(fbc, -amount);
}
+static inline void
+percpu_counter_sub_local(struct percpu_counter *fbc, s64 amount)
+{
+ percpu_counter_add_local(fbc, -amount);
+}
+
#endif /* _LINUX_PERCPU_COUNTER_H */
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 505480217cf1..52b37f7bdbf9 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -17,15 +17,24 @@
#ifdef CONFIG_ARM_PMU
/*
- * The ARMv7 CPU PMU supports up to 32 event counters.
+ * The Armv7 and Armv8.8 or less CPU PMU supports up to 32 event counters.
+ * The Armv8.9/9.4 CPU PMU supports up to 33 event counters.
*/
+#ifdef CONFIG_ARM
#define ARMPMU_MAX_HWEVENTS 32
-
+#else
+#define ARMPMU_MAX_HWEVENTS 33
+#endif
/*
* ARM PMU hw_event flags
*/
-/* Event uses a 64bit counter */
-#define ARMPMU_EVT_64BIT 1
+#define ARMPMU_EVT_64BIT 0x00001 /* Event uses a 64bit counter */
+#define ARMPMU_EVT_47BIT 0x00002 /* Event uses a 47bit counter */
+#define ARMPMU_EVT_63BIT 0x00004 /* Event uses a 63bit counter */
+
+static_assert((PERF_EVENT_FLAG_ARCH & ARMPMU_EVT_64BIT) == ARMPMU_EVT_64BIT);
+static_assert((PERF_EVENT_FLAG_ARCH & ARMPMU_EVT_47BIT) == ARMPMU_EVT_47BIT);
+static_assert((PERF_EVENT_FLAG_ARCH & ARMPMU_EVT_63BIT) == ARMPMU_EVT_63BIT);
#define HW_OP_UNSUPPORTED 0xFFFF
#define C(_x) PERF_COUNT_HW_CACHE_##_x
@@ -55,18 +64,17 @@ struct pmu_hw_events {
DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
/*
- * Hardware lock to serialize accesses to PMU registers. Needed for the
- * read/modify/write sequences.
- */
- raw_spinlock_t pmu_lock;
-
- /*
* When using percpu IRQs, we need a percpu dev_id. Place it here as we
* already have to allocate this struct per cpu.
*/
struct arm_pmu *percpu_pmu;
int irq;
+
+ struct perf_branch_stack *branch_stack;
+
+ /* Active events requesting branch records */
+ unsigned int branch_users;
};
enum armpmu_attr_groups {
@@ -81,7 +89,6 @@ struct arm_pmu {
struct pmu pmu;
cpumask_t supported_cpus;
char *name;
- int pmuver;
irqreturn_t (*handle_irq)(struct arm_pmu *pmu);
void (*enable)(struct perf_event *event);
void (*disable)(struct perf_event *event);
@@ -97,21 +104,28 @@ struct arm_pmu {
void (*stop)(struct arm_pmu *);
void (*reset)(void *);
int (*map_event)(struct perf_event *event);
- int (*filter_match)(struct perf_event *event);
- int num_events;
+ /*
+ * Called by KVM to map the PMUv3 event space onto non-PMUv3 hardware.
+ */
+ int (*map_pmuv3_event)(unsigned int eventsel);
+ DECLARE_BITMAP(cntr_mask, ARMPMU_MAX_HWEVENTS);
bool secure_access; /* 32-bit ARM only */
-#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
- DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
-#define ARMV8_PMUV3_EXT_COMMON_EVENT_BASE 0x4000
- DECLARE_BITMAP(pmceid_ext_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
struct platform_device *plat_device;
struct pmu_hw_events __percpu *hw_events;
struct hlist_node node;
struct notifier_block cpu_pm_nb;
/* the attr_groups array must be NULL-terminated */
const struct attribute_group *attr_groups[ARMPMU_NR_ATTR_GROUPS + 1];
- /* store the PMMIR_EL1 to expose slots */
+
+ /* PMUv3 only */
+ int pmuver;
+ bool has_smt;
u64 reg_pmmir;
+ u64 reg_brbidr;
+#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
+ DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
+#define ARMV8_PMUV3_EXT_COMMON_EVENT_BASE 0x4000
+ DECLARE_BITMAP(pmceid_ext_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
/* Only to be used by ACPI probing code */
unsigned long acpi_cpuid;
@@ -163,18 +177,48 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
#endif
+#ifdef CONFIG_KVM
+void kvm_host_pmu_init(struct arm_pmu *pmu);
+#else
+#define kvm_host_pmu_init(x) do { } while(0)
+#endif
+
+bool arm_pmu_irq_is_nmi(void);
+
/* Internal functions only for core arm_pmu code */
struct arm_pmu *armpmu_alloc(void);
-struct arm_pmu *armpmu_alloc_atomic(void);
void armpmu_free(struct arm_pmu *pmu);
int armpmu_register(struct arm_pmu *pmu);
-int armpmu_request_irq(int irq, int cpu);
-void armpmu_free_irq(int irq, int cpu);
+int armpmu_request_irq(struct arm_pmu * __percpu *armpmu, int irq, int cpu);
+void armpmu_free_irq(struct arm_pmu * __percpu *armpmu, int irq, int cpu);
#define ARMV8_PMU_PDEV_NAME "armv8-pmu"
#endif /* CONFIG_ARM_PMU */
#define ARMV8_SPE_PDEV_NAME "arm,spe-v1"
+#define ARMV8_TRBE_PDEV_NAME "arm,trbe"
+
+/* Why does everything I do descend into this? */
+#define __GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
+ (lo) == (hi) ? #cfg ":" #lo "\n" : #cfg ":" #lo "-" #hi
+
+#define _GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
+ __GEN_PMU_FORMAT_ATTR(cfg, lo, hi)
+
+#define GEN_PMU_FORMAT_ATTR(name) \
+ PMU_FORMAT_ATTR(name, \
+ _GEN_PMU_FORMAT_ATTR(ATTR_CFG_FLD_##name##_CFG, \
+ ATTR_CFG_FLD_##name##_LO, \
+ ATTR_CFG_FLD_##name##_HI))
+
+#define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi) \
+ ((((attr)->cfg) >> lo) & GENMASK_ULL(hi - lo, 0))
+
+#define ATTR_CFG_GET_FLD(attr, name) \
+ _ATTR_CFG_GET_FLD(attr, \
+ ATTR_CFG_FLD_##name##_CFG, \
+ ATTR_CFG_FLD_##name##_LO, \
+ ATTR_CFG_FLD_##name##_HI)
#endif /* __ARM_PMU_H__ */
diff --git a/include/linux/perf/arm_pmuv3.h b/include/linux/perf/arm_pmuv3.h
new file mode 100644
index 000000000000..d698efba28a2
--- /dev/null
+++ b/include/linux/perf/arm_pmuv3.h
@@ -0,0 +1,318 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ */
+
+#ifndef __PERF_ARM_PMUV3_H
+#define __PERF_ARM_PMUV3_H
+
+#define ARMV8_PMU_MAX_GENERAL_COUNTERS 31
+#define ARMV8_PMU_CYCLE_IDX 31
+#define ARMV8_PMU_INSTR_IDX 32 /* Not accessible from AArch32 */
+
+/*
+ * Common architectural and microarchitectural event numbers.
+ */
+#define ARMV8_PMUV3_PERFCTR_SW_INCR 0x0000
+#define ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL 0x0001
+#define ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL 0x0002
+#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x0003
+#define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x0004
+#define ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL 0x0005
+#define ARMV8_PMUV3_PERFCTR_LD_RETIRED 0x0006
+#define ARMV8_PMUV3_PERFCTR_ST_RETIRED 0x0007
+#define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x0008
+#define ARMV8_PMUV3_PERFCTR_EXC_TAKEN 0x0009
+#define ARMV8_PMUV3_PERFCTR_EXC_RETURN 0x000A
+#define ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED 0x000B
+#define ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED 0x000C
+#define ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED 0x000D
+#define ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED 0x000E
+#define ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED 0x000F
+#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x0010
+#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x0011
+#define ARMV8_PMUV3_PERFCTR_BR_PRED 0x0012
+#define ARMV8_PMUV3_PERFCTR_MEM_ACCESS 0x0013
+#define ARMV8_PMUV3_PERFCTR_L1I_CACHE 0x0014
+#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB 0x0015
+#define ARMV8_PMUV3_PERFCTR_L2D_CACHE 0x0016
+#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL 0x0017
+#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB 0x0018
+#define ARMV8_PMUV3_PERFCTR_BUS_ACCESS 0x0019
+#define ARMV8_PMUV3_PERFCTR_MEMORY_ERROR 0x001A
+#define ARMV8_PMUV3_PERFCTR_INST_SPEC 0x001B
+#define ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED 0x001C
+#define ARMV8_PMUV3_PERFCTR_BUS_CYCLES 0x001D
+#define ARMV8_PMUV3_PERFCTR_CHAIN 0x001E
+#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE 0x001F
+#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE 0x0020
+#define ARMV8_PMUV3_PERFCTR_BR_RETIRED 0x0021
+#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED 0x0022
+#define ARMV8_PMUV3_PERFCTR_STALL_FRONTEND 0x0023
+#define ARMV8_PMUV3_PERFCTR_STALL_BACKEND 0x0024
+#define ARMV8_PMUV3_PERFCTR_L1D_TLB 0x0025
+#define ARMV8_PMUV3_PERFCTR_L1I_TLB 0x0026
+#define ARMV8_PMUV3_PERFCTR_L2I_CACHE 0x0027
+#define ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL 0x0028
+#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE 0x0029
+#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL 0x002A
+#define ARMV8_PMUV3_PERFCTR_L3D_CACHE 0x002B
+#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB 0x002C
+#define ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL 0x002D
+#define ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL 0x002E
+#define ARMV8_PMUV3_PERFCTR_L2D_TLB 0x002F
+#define ARMV8_PMUV3_PERFCTR_L2I_TLB 0x0030
+#define ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS 0x0031
+#define ARMV8_PMUV3_PERFCTR_LL_CACHE 0x0032
+#define ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS 0x0033
+#define ARMV8_PMUV3_PERFCTR_DTLB_WALK 0x0034
+#define ARMV8_PMUV3_PERFCTR_ITLB_WALK 0x0035
+#define ARMV8_PMUV3_PERFCTR_LL_CACHE_RD 0x0036
+#define ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD 0x0037
+#define ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD 0x0038
+#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_LMISS_RD 0x0039
+#define ARMV8_PMUV3_PERFCTR_OP_RETIRED 0x003A
+#define ARMV8_PMUV3_PERFCTR_OP_SPEC 0x003B
+#define ARMV8_PMUV3_PERFCTR_STALL 0x003C
+#define ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND 0x003D
+#define ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND 0x003E
+#define ARMV8_PMUV3_PERFCTR_STALL_SLOT 0x003F
+
+/* Statistical profiling extension microarchitectural events */
+#define ARMV8_SPE_PERFCTR_SAMPLE_POP 0x4000
+#define ARMV8_SPE_PERFCTR_SAMPLE_FEED 0x4001
+#define ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE 0x4002
+#define ARMV8_SPE_PERFCTR_SAMPLE_COLLISION 0x4003
+
+/* AMUv1 architecture events */
+#define ARMV8_AMU_PERFCTR_CNT_CYCLES 0x4004
+#define ARMV8_AMU_PERFCTR_STALL_BACKEND_MEM 0x4005
+
+/* long-latency read miss events */
+#define ARMV8_PMUV3_PERFCTR_L1I_CACHE_LMISS 0x4006
+#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_LMISS_RD 0x4009
+#define ARMV8_PMUV3_PERFCTR_L2I_CACHE_LMISS 0x400A
+#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_LMISS_RD 0x400B
+
+/* Trace buffer events */
+#define ARMV8_PMUV3_PERFCTR_TRB_WRAP 0x400C
+#define ARMV8_PMUV3_PERFCTR_TRB_TRIG 0x400E
+
+/* Trace unit events */
+#define ARMV8_PMUV3_PERFCTR_TRCEXTOUT0 0x4010
+#define ARMV8_PMUV3_PERFCTR_TRCEXTOUT1 0x4011
+#define ARMV8_PMUV3_PERFCTR_TRCEXTOUT2 0x4012
+#define ARMV8_PMUV3_PERFCTR_TRCEXTOUT3 0x4013
+#define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT4 0x4018
+#define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT5 0x4019
+#define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT6 0x401A
+#define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT7 0x401B
+
+/* additional latency from alignment events */
+#define ARMV8_PMUV3_PERFCTR_LDST_ALIGN_LAT 0x4020
+#define ARMV8_PMUV3_PERFCTR_LD_ALIGN_LAT 0x4021
+#define ARMV8_PMUV3_PERFCTR_ST_ALIGN_LAT 0x4022
+
+/* Armv8.5 Memory Tagging Extension events */
+#define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED 0x4024
+#define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_RD 0x4025
+#define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_WR 0x4026
+
+/* ARMv8 recommended implementation defined event types */
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD 0x0040
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR 0x0041
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD 0x0042
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR 0x0043
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_INNER 0x0044
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_OUTER 0x0045
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_VICTIM 0x0046
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_CLEAN 0x0047
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_INVAL 0x0048
+
+#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD 0x004C
+#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR 0x004D
+#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD 0x004E
+#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR 0x004F
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_RD 0x0050
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WR 0x0051
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_RD 0x0052
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_WR 0x0053
+
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_VICTIM 0x0056
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_CLEAN 0x0057
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_INVAL 0x0058
+
+#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_RD 0x005C
+#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_WR 0x005D
+#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_RD 0x005E
+#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_WR 0x005F
+#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD 0x0060
+#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR 0x0061
+#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_SHARED 0x0062
+#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NOT_SHARED 0x0063
+#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NORMAL 0x0064
+#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_PERIPH 0x0065
+#define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_RD 0x0066
+#define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_WR 0x0067
+#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LD_SPEC 0x0068
+#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_ST_SPEC 0x0069
+#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LDST_SPEC 0x006A
+
+#define ARMV8_IMPDEF_PERFCTR_LDREX_SPEC 0x006C
+#define ARMV8_IMPDEF_PERFCTR_STREX_PASS_SPEC 0x006D
+#define ARMV8_IMPDEF_PERFCTR_STREX_FAIL_SPEC 0x006E
+#define ARMV8_IMPDEF_PERFCTR_STREX_SPEC 0x006F
+#define ARMV8_IMPDEF_PERFCTR_LD_SPEC 0x0070
+#define ARMV8_IMPDEF_PERFCTR_ST_SPEC 0x0071
+#define ARMV8_IMPDEF_PERFCTR_LDST_SPEC 0x0072
+#define ARMV8_IMPDEF_PERFCTR_DP_SPEC 0x0073
+#define ARMV8_IMPDEF_PERFCTR_ASE_SPEC 0x0074
+#define ARMV8_IMPDEF_PERFCTR_VFP_SPEC 0x0075
+#define ARMV8_IMPDEF_PERFCTR_PC_WRITE_SPEC 0x0076
+#define ARMV8_IMPDEF_PERFCTR_CRYPTO_SPEC 0x0077
+#define ARMV8_IMPDEF_PERFCTR_BR_IMMED_SPEC 0x0078
+#define ARMV8_IMPDEF_PERFCTR_BR_RETURN_SPEC 0x0079
+#define ARMV8_IMPDEF_PERFCTR_BR_INDIRECT_SPEC 0x007A
+
+#define ARMV8_IMPDEF_PERFCTR_ISB_SPEC 0x007C
+#define ARMV8_IMPDEF_PERFCTR_DSB_SPEC 0x007D
+#define ARMV8_IMPDEF_PERFCTR_DMB_SPEC 0x007E
+
+#define ARMV8_IMPDEF_PERFCTR_EXC_UNDEF 0x0081
+#define ARMV8_IMPDEF_PERFCTR_EXC_SVC 0x0082
+#define ARMV8_IMPDEF_PERFCTR_EXC_PABORT 0x0083
+#define ARMV8_IMPDEF_PERFCTR_EXC_DABORT 0x0084
+
+#define ARMV8_IMPDEF_PERFCTR_EXC_IRQ 0x0086
+#define ARMV8_IMPDEF_PERFCTR_EXC_FIQ 0x0087
+#define ARMV8_IMPDEF_PERFCTR_EXC_SMC 0x0088
+
+#define ARMV8_IMPDEF_PERFCTR_EXC_HVC 0x008A
+#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_PABORT 0x008B
+#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_DABORT 0x008C
+#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_OTHER 0x008D
+#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_IRQ 0x008E
+#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_FIQ 0x008F
+#define ARMV8_IMPDEF_PERFCTR_RC_LD_SPEC 0x0090
+#define ARMV8_IMPDEF_PERFCTR_RC_ST_SPEC 0x0091
+
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_RD 0x00A0
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WR 0x00A1
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_RD 0x00A2
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_WR 0x00A3
+
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_VICTIM 0x00A6
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_CLEAN 0x00A7
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_INVAL 0x00A8
+
+/*
+ * Per-CPU PMCR: config reg
+ */
+#define ARMV8_PMU_PMCR_E (1 << 0) /* Enable all counters */
+#define ARMV8_PMU_PMCR_P (1 << 1) /* Reset all counters */
+#define ARMV8_PMU_PMCR_C (1 << 2) /* Cycle counter reset */
+#define ARMV8_PMU_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
+#define ARMV8_PMU_PMCR_X (1 << 4) /* Export to ETM */
+#define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
+#define ARMV8_PMU_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */
+#define ARMV8_PMU_PMCR_LP (1 << 7) /* Long event counter enable */
+#define ARMV8_PMU_PMCR_N GENMASK(15, 11) /* Number of counters supported */
+/* Mask for writable bits */
+#define ARMV8_PMU_PMCR_MASK (ARMV8_PMU_PMCR_E | ARMV8_PMU_PMCR_P | \
+ ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_D | \
+ ARMV8_PMU_PMCR_X | ARMV8_PMU_PMCR_DP | \
+ ARMV8_PMU_PMCR_LC | ARMV8_PMU_PMCR_LP)
+
+/*
+ * PMOVSR: counters overflow flag status reg
+ */
+#define ARMV8_PMU_OVSR_P GENMASK(30, 0)
+#define ARMV8_PMU_OVSR_C BIT(31)
+#define ARMV8_PMU_OVSR_F BIT_ULL(32) /* arm64 only */
+/* Mask for writable bits is both P and C fields */
+#define ARMV8_PMU_OVERFLOWED_MASK (ARMV8_PMU_OVSR_P | ARMV8_PMU_OVSR_C | \
+ ARMV8_PMU_OVSR_F)
+
+/*
+ * PMXEVTYPER: Event selection reg
+ */
+#define ARMV8_PMU_EVTYPE_EVENT GENMASK(15, 0) /* Mask for EVENT bits */
+#define ARMV8_PMU_EVTYPE_TH GENMASK_ULL(43, 32) /* arm64 only */
+#define ARMV8_PMU_EVTYPE_TC GENMASK_ULL(63, 61) /* arm64 only */
+
+/*
+ * Event filters for PMUv3
+ */
+#define ARMV8_PMU_EXCLUDE_EL1 (1U << 31)
+#define ARMV8_PMU_EXCLUDE_EL0 (1U << 30)
+#define ARMV8_PMU_EXCLUDE_NS_EL1 (1U << 29)
+#define ARMV8_PMU_EXCLUDE_NS_EL0 (1U << 28)
+#define ARMV8_PMU_INCLUDE_EL2 (1U << 27)
+#define ARMV8_PMU_EXCLUDE_EL3 (1U << 26)
+
+/*
+ * PMUSERENR: user enable reg
+ */
+#define ARMV8_PMU_USERENR_EN (1 << 0) /* PMU regs can be accessed at EL0 */
+#define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */
+#define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */
+#define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */
+#define ARMV8_PMU_USERENR_UEN (1 << 4) /* Fine grained per counter access at EL0 */
+/* Mask for writable bits */
+#define ARMV8_PMU_USERENR_MASK (ARMV8_PMU_USERENR_EN | ARMV8_PMU_USERENR_SW | \
+ ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_ER)
+
+/* PMMIR_EL1.SLOTS mask */
+#define ARMV8_PMU_SLOTS GENMASK(7, 0)
+#define ARMV8_PMU_BUS_SLOTS GENMASK(15, 8)
+#define ARMV8_PMU_BUS_WIDTH GENMASK(19, 16)
+#define ARMV8_PMU_THWIDTH GENMASK(23, 20)
+
+/*
+ * This code is really good
+ */
+
+#define PMEVN_CASE(n, case_macro) \
+ case n: case_macro(n); break
+
+#define PMEVN_SWITCH(x, case_macro) \
+ do { \
+ switch (x) { \
+ PMEVN_CASE(0, case_macro); \
+ PMEVN_CASE(1, case_macro); \
+ PMEVN_CASE(2, case_macro); \
+ PMEVN_CASE(3, case_macro); \
+ PMEVN_CASE(4, case_macro); \
+ PMEVN_CASE(5, case_macro); \
+ PMEVN_CASE(6, case_macro); \
+ PMEVN_CASE(7, case_macro); \
+ PMEVN_CASE(8, case_macro); \
+ PMEVN_CASE(9, case_macro); \
+ PMEVN_CASE(10, case_macro); \
+ PMEVN_CASE(11, case_macro); \
+ PMEVN_CASE(12, case_macro); \
+ PMEVN_CASE(13, case_macro); \
+ PMEVN_CASE(14, case_macro); \
+ PMEVN_CASE(15, case_macro); \
+ PMEVN_CASE(16, case_macro); \
+ PMEVN_CASE(17, case_macro); \
+ PMEVN_CASE(18, case_macro); \
+ PMEVN_CASE(19, case_macro); \
+ PMEVN_CASE(20, case_macro); \
+ PMEVN_CASE(21, case_macro); \
+ PMEVN_CASE(22, case_macro); \
+ PMEVN_CASE(23, case_macro); \
+ PMEVN_CASE(24, case_macro); \
+ PMEVN_CASE(25, case_macro); \
+ PMEVN_CASE(26, case_macro); \
+ PMEVN_CASE(27, case_macro); \
+ PMEVN_CASE(28, case_macro); \
+ PMEVN_CASE(29, case_macro); \
+ PMEVN_CASE(30, case_macro); \
+ default: WARN(1, "Invalid PMEV* index\n"); \
+ } \
+ } while (0)
+
+#include <asm/arm_pmuv3.h>
+
+#endif
diff --git a/include/linux/perf/riscv_pmu.h b/include/linux/perf/riscv_pmu.h
new file mode 100644
index 000000000000..f82a28040594
--- /dev/null
+++ b/include/linux/perf/riscv_pmu.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 SiFive
+ * Copyright (C) 2018 Andes Technology Corporation
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ *
+ */
+
+#ifndef _RISCV_PMU_H
+#define _RISCV_PMU_H
+
+#include <linux/perf_event.h>
+#include <linux/ptrace.h>
+#include <linux/interrupt.h>
+
+#ifdef CONFIG_RISCV_PMU
+
+/*
+ * The RISCV_MAX_COUNTERS parameter should be specified.
+ */
+
+#define RISCV_MAX_COUNTERS 64
+#define RISCV_OP_UNSUPP (-EOPNOTSUPP)
+#define RISCV_PMU_SBI_PDEV_NAME "riscv-pmu-sbi"
+#define RISCV_PMU_LEGACY_PDEV_NAME "riscv-pmu-legacy"
+
+#define RISCV_PMU_STOP_FLAG_RESET 1
+
+#define RISCV_PMU_CONFIG1_GUEST_EVENTS 0x1
+
+struct cpu_hw_events {
+ /* currently enabled events */
+ int n_events;
+ /* Counter overflow interrupt */
+ int irq;
+ /* currently enabled events */
+ struct perf_event *events[RISCV_MAX_COUNTERS];
+ /* currently enabled hardware counters */
+ DECLARE_BITMAP(used_hw_ctrs, RISCV_MAX_COUNTERS);
+ /* currently enabled firmware counters */
+ DECLARE_BITMAP(used_fw_ctrs, RISCV_MAX_COUNTERS);
+ /* The virtual address of the shared memory where counter snapshot will be taken */
+ void *snapshot_addr;
+ /* The physical address of the shared memory where counter snapshot will be taken */
+ phys_addr_t snapshot_addr_phys;
+ /* Boolean flag to indicate setup is already done */
+ bool snapshot_set_done;
+ /* A shadow copy of the counter values to avoid clobbering during multiple SBI calls */
+ u64 snapshot_cval_shcopy[RISCV_MAX_COUNTERS];
+};
+
+struct riscv_pmu {
+ struct pmu pmu;
+ char *name;
+
+ irqreturn_t (*handle_irq)(int irq_num, void *dev);
+
+ unsigned long cmask;
+ u64 (*ctr_read)(struct perf_event *event);
+ int (*ctr_get_idx)(struct perf_event *event);
+ int (*ctr_get_width)(int idx);
+ void (*ctr_clear_idx)(struct perf_event *event);
+ void (*ctr_start)(struct perf_event *event, u64 init_val);
+ void (*ctr_stop)(struct perf_event *event, unsigned long flag);
+ int (*event_map)(struct perf_event *event, u64 *config);
+ void (*event_init)(struct perf_event *event);
+ void (*event_mapped)(struct perf_event *event, struct mm_struct *mm);
+ void (*event_unmapped)(struct perf_event *event, struct mm_struct *mm);
+ uint8_t (*csr_index)(struct perf_event *event);
+
+ struct cpu_hw_events __percpu *hw_events;
+ struct hlist_node node;
+ struct notifier_block riscv_pm_nb;
+};
+
+#define to_riscv_pmu(p) (container_of(p, struct riscv_pmu, pmu))
+
+void riscv_pmu_start(struct perf_event *event, int flags);
+void riscv_pmu_stop(struct perf_event *event, int flags);
+unsigned long riscv_pmu_ctr_read_csr(unsigned long csr);
+int riscv_pmu_event_set_period(struct perf_event *event);
+uint64_t riscv_pmu_ctr_get_width_mask(struct perf_event *event);
+u64 riscv_pmu_event_update(struct perf_event *event);
+#ifdef CONFIG_RISCV_PMU_LEGACY
+void riscv_pmu_legacy_skip_init(void);
+#else
+static inline void riscv_pmu_legacy_skip_init(void) {};
+#endif
+struct riscv_pmu *riscv_pmu_alloc(void);
+#ifdef CONFIG_RISCV_PMU_SBI
+int riscv_pmu_get_hpm_info(u32 *hw_ctr_width, u32 *num_hw_ctr);
+int riscv_pmu_get_event_info(u32 type, u64 config, u64 *econfig);
+#endif
+
+#endif /* CONFIG_RISCV_PMU */
+
+#endif /* _RISCV_PMU_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index f5a6a2f069ed..9870d768db4c 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -26,15 +26,9 @@
# include <asm/local64.h>
#endif
-struct perf_guest_info_callbacks {
- int (*is_in_guest)(void);
- int (*is_user_mode)(void);
- unsigned long (*get_guest_ip)(void);
- void (*handle_intel_pt_intr)(void);
-};
-
#ifdef CONFIG_HAVE_HW_BREAKPOINT
-#include <asm/hw_breakpoint.h>
+# include <linux/rhashtable-types.h>
+# include <asm/hw_breakpoint.h>
#endif
#include <linux/list.h>
@@ -57,19 +51,22 @@ struct perf_guest_info_callbacks {
#include <linux/cgroup.h>
#include <linux/refcount.h>
#include <linux/security.h>
+#include <linux/static_call.h>
+#include <linux/lockdep.h>
+
#include <asm/local.h>
struct perf_callchain_entry {
- __u64 nr;
- __u64 ip[]; /* /proc/sys/kernel/perf_event_max_stack */
+ u64 nr;
+ u64 ip[]; /* /proc/sys/kernel/perf_event_max_stack */
};
struct perf_callchain_entry_ctx {
- struct perf_callchain_entry *entry;
- u32 max_stack;
- u32 nr;
- short contexts;
- bool contexts_maxed;
+ struct perf_callchain_entry *entry;
+ u32 max_stack;
+ u32 nr;
+ short contexts;
+ bool contexts_maxed;
};
typedef unsigned long (*perf_copy_f)(void *dst, const void *src,
@@ -90,6 +87,11 @@ struct perf_raw_record {
u32 size;
};
+static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag)
+{
+ return frag->pad < sizeof(u64);
+}
+
/*
* branch stack layout:
* nr: number of taken branches stored in entries[]
@@ -111,8 +113,8 @@ struct perf_raw_record {
* already stored in age order, the hw_idx should be 0.
*/
struct perf_branch_stack {
- __u64 nr;
- __u64 hw_idx;
+ u64 nr;
+ u64 hw_idx;
struct perf_branch_entry entries[];
};
@@ -122,13 +124,24 @@ struct task_struct;
* extra PMU register associated with an event
*/
struct hw_perf_event_extra {
- u64 config; /* register value */
- unsigned int reg; /* register address or index */
- int alloc; /* extra register already allocated */
- int idx; /* index in shared_regs->regs[] */
+ u64 config; /* register value */
+ unsigned int reg; /* register address or index */
+ int alloc; /* extra register already allocated */
+ int idx; /* index in shared_regs->regs[] */
};
/**
+ * hw_perf_event::flag values
+ *
+ * PERF_EVENT_FLAG_ARCH bits are reserved for architecture-specific
+ * usage.
+ */
+#define PERF_EVENT_FLAG_ARCH 0x0fffffff
+#define PERF_EVENT_FLAG_USER_READ_CNT 0x80000000
+
+static_assert((PERF_EVENT_FLAG_USER_READ_CNT & PERF_EVENT_FLAG_ARCH) == 0);
+
+/**
* struct hw_perf_event - performance event hardware details:
*/
struct hw_perf_event {
@@ -136,7 +149,9 @@ struct hw_perf_event {
union {
struct { /* hardware */
u64 config;
+ u64 config1;
u64 last_tag;
+ u64 dyn_constraint;
unsigned long config_base;
unsigned long event_base;
int event_base_rdpmc;
@@ -147,6 +162,15 @@ struct hw_perf_event {
struct hw_perf_event_extra extra_reg;
struct hw_perf_event_extra branch_reg;
};
+ struct { /* aux / Intel-PT */
+ u64 aux_config;
+ /*
+ * For AUX area events, aux_paused cannot be a state
+ * flag because it can be updated asynchronously to
+ * state.
+ */
+ unsigned int aux_paused;
+ };
struct { /* software */
struct hrtimer hrtimer;
};
@@ -166,7 +190,7 @@ struct hw_perf_event {
* creation and event initalization.
*/
struct arch_hw_breakpoint info;
- struct list_head bp_list;
+ struct rhlist_head bp_list;
};
#endif
struct { /* amd_iommu */
@@ -195,9 +219,14 @@ struct hw_perf_event {
/*
* hw_perf_event::state flags; used to track the PERF_EF_* state.
*/
-#define PERF_HES_STOPPED 0x01 /* the counter is stopped */
-#define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
-#define PERF_HES_ARCH 0x04
+
+/* the counter is stopped */
+#define PERF_HES_STOPPED 0x01
+
+/* event->count up-to-date */
+#define PERF_HES_UPTODATE 0x02
+
+#define PERF_HES_ARCH 0x04
int state;
@@ -246,41 +275,66 @@ struct hw_perf_event {
*/
u64 freq_time_stamp;
u64 freq_count_stamp;
-#endif
+#endif /* CONFIG_PERF_EVENTS */
};
struct perf_event;
+struct perf_event_pmu_context;
/*
* Common implementation detail of pmu::{start,commit,cancel}_txn
*/
-#define PERF_PMU_TXN_ADD 0x1 /* txn to add/schedule event on PMU */
-#define PERF_PMU_TXN_READ 0x2 /* txn to read event group from PMU */
+
+/* txn to add/schedule event on PMU */
+#define PERF_PMU_TXN_ADD 0x1
+
+/* txn to read event group from PMU */
+#define PERF_PMU_TXN_READ 0x2
/**
* pmu::capabilities flags
*/
-#define PERF_PMU_CAP_NO_INTERRUPT 0x0001
-#define PERF_PMU_CAP_NO_NMI 0x0002
-#define PERF_PMU_CAP_AUX_NO_SG 0x0004
-#define PERF_PMU_CAP_EXTENDED_REGS 0x0008
-#define PERF_PMU_CAP_EXCLUSIVE 0x0010
-#define PERF_PMU_CAP_ITRACE 0x0020
-#define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x0040
-#define PERF_PMU_CAP_NO_EXCLUDE 0x0080
-#define PERF_PMU_CAP_AUX_OUTPUT 0x0100
-#define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0200
+#define PERF_PMU_CAP_NO_INTERRUPT 0x0001
+#define PERF_PMU_CAP_NO_NMI 0x0002
+#define PERF_PMU_CAP_AUX_NO_SG 0x0004
+#define PERF_PMU_CAP_EXTENDED_REGS 0x0008
+#define PERF_PMU_CAP_EXCLUSIVE 0x0010
+#define PERF_PMU_CAP_ITRACE 0x0020
+#define PERF_PMU_CAP_NO_EXCLUDE 0x0040
+#define PERF_PMU_CAP_AUX_OUTPUT 0x0080
+#define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0100
+#define PERF_PMU_CAP_AUX_PAUSE 0x0200
+#define PERF_PMU_CAP_AUX_PREFER_LARGE 0x0400
+
+/**
+ * pmu::scope
+ */
+enum perf_pmu_scope {
+ PERF_PMU_SCOPE_NONE = 0,
+ PERF_PMU_SCOPE_CORE,
+ PERF_PMU_SCOPE_DIE,
+ PERF_PMU_SCOPE_CLUSTER,
+ PERF_PMU_SCOPE_PKG,
+ PERF_PMU_SCOPE_SYS_WIDE,
+ PERF_PMU_MAX_SCOPE,
+};
struct perf_output_handle;
+#define PMU_NULL_DEV ((void *)(~0UL))
+
/**
* struct pmu - generic performance monitoring unit
*/
struct pmu {
struct list_head entry;
+ spinlock_t events_lock;
+ struct list_head events;
+
struct module *module;
struct device *dev;
+ struct device *parent;
const struct attribute_group **attr_groups;
const struct attribute_group **attr_update;
const char *name;
@@ -291,8 +345,12 @@ struct pmu {
*/
int capabilities;
- int __percpu *pmu_disable_count;
- struct perf_cpu_context __percpu *pmu_cpu_context;
+ /*
+ * PMU scope
+ */
+ unsigned int scope;
+
+ struct perf_cpu_pmu_context * __percpu *cpu_pmu_context;
atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */
int task_ctx_nr;
int hrtimer_interval_ms;
@@ -336,9 +394,21 @@ struct pmu {
* Flags for ->add()/->del()/ ->start()/->stop(). There are
* matching hw_perf_event::state flags.
*/
-#define PERF_EF_START 0x01 /* start the counter when adding */
-#define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
-#define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
+
+/* start the counter when adding */
+#define PERF_EF_START 0x01
+
+/* reload the counter when starting */
+#define PERF_EF_RELOAD 0x02
+
+/* update the counter when stopping */
+#define PERF_EF_UPDATE 0x04
+
+/* AUX area event, pause tracing */
+#define PERF_EF_PAUSE 0x08
+
+/* AUX area event, resume tracing */
+#define PERF_EF_RESUME 0x10
/*
* Adds/Removes a counter to/from the PMU, can be done inside a
@@ -378,6 +448,18 @@ struct pmu {
*
* ->start() with PERF_EF_RELOAD will reprogram the counter
* value, must be preceded by a ->stop() with PERF_EF_UPDATE.
+ *
+ * ->stop() with PERF_EF_PAUSE will stop as simply as possible. Will not
+ * overlap another ->stop() with PERF_EF_PAUSE nor ->start() with
+ * PERF_EF_RESUME.
+ *
+ * ->start() with PERF_EF_RESUME will start as simply as possible but
+ * only if the counter is not otherwise stopped. Will not overlap
+ * another ->start() with PERF_EF_RESUME nor ->stop() with
+ * PERF_EF_PAUSE.
+ *
+ * Notably, PERF_EF_PAUSE/PERF_EF_RESUME *can* be concurrent with other
+ * ->stop()/->start() invocations, just not itself.
*/
void (*start) (struct perf_event *event, int flags);
void (*stop) (struct perf_event *event, int flags);
@@ -420,15 +502,16 @@ struct pmu {
/*
* Will return the value for perf_event_mmap_page::index for this event,
- * if no implementation is provided it will default to: event->hw.idx + 1.
+ * if no implementation is provided it will default to 0 (see
+ * perf_event_idx_default).
*/
int (*event_idx) (struct perf_event *event); /*optional */
/*
* context-switches callback
*/
- void (*sched_task) (struct perf_event_context *ctx,
- bool sched_in);
+ void (*sched_task) (struct perf_event_pmu_context *pmu_ctx,
+ struct task_struct *task, bool sched_in);
/*
* Kmem cache of PMU specific data
@@ -436,16 +519,6 @@ struct pmu {
struct kmem_cache *task_ctx_cache;
/*
- * PMU specific parts of task perf event context (i.e. ctx->task_ctx_data)
- * can be synchronized using this function. See Intel LBR callstack support
- * implementation and Perf core context switch handling callbacks for usage
- * examples.
- */
- void (*swap_task_ctx) (struct perf_event_context *prev,
- struct perf_event_context *next);
- /* optional */
-
- /*
* Set up pmu-private data structures for an AUX area
*/
void *(*setup_aux) (struct perf_event *event, void **pages,
@@ -506,9 +579,10 @@ struct pmu {
/* optional */
/*
- * Filter events for PMU-specific reasons.
+ * Skip programming this PMU on the given CPU. Typically needed for
+ * big.LITTLE things.
*/
- int (*filter_match) (struct perf_event *event); /* optional */
+ bool (*filter) (struct pmu *pmu, int cpu); /* optional */
/*
* Check period value for PERF_EVENT_IOC_PERIOD ioctl.
@@ -533,10 +607,10 @@ enum perf_addr_filter_action_t {
* This is a hardware-agnostic filter configuration as specified by the user.
*/
struct perf_addr_filter {
- struct list_head entry;
- struct path path;
- unsigned long offset;
- unsigned long size;
+ struct list_head entry;
+ struct path path;
+ unsigned long offset;
+ unsigned long size;
enum perf_addr_filter_action_t action;
};
@@ -551,23 +625,62 @@ struct perf_addr_filter {
* bundled together; see perf_event_addr_filters().
*/
struct perf_addr_filters_head {
- struct list_head list;
- raw_spinlock_t lock;
- unsigned int nr_file_filters;
+ struct list_head list;
+ raw_spinlock_t lock;
+ unsigned int nr_file_filters;
};
struct perf_addr_filter_range {
- unsigned long start;
- unsigned long size;
+ unsigned long start;
+ unsigned long size;
};
-/**
- * enum perf_event_state - the states of an event:
+/*
+ * The normal states are:
+ *
+ * ACTIVE --.
+ * ^ |
+ * | |
+ * sched_{in,out}() |
+ * | |
+ * v |
+ * ,---> INACTIVE --+ <-.
+ * | | |
+ * | {dis,en}able()
+ * sched_in() | |
+ * | OFF <--' --+
+ * | |
+ * `---> ERROR ------'
+ *
+ * That is:
+ *
+ * sched_in: INACTIVE -> {ACTIVE,ERROR}
+ * sched_out: ACTIVE -> INACTIVE
+ * disable: {ACTIVE,INACTIVE} -> OFF
+ * enable: {OFF,ERROR} -> INACTIVE
+ *
+ * Where {OFF,ERROR} are disabled states.
+ *
+ * Then we have the {EXIT,REVOKED,DEAD} states which are various shades of
+ * defunct events:
+ *
+ * - EXIT means task that the even was assigned to died, but child events
+ * still live, and further children can still be created. But the event
+ * itself will never be active again. It can only transition to
+ * {REVOKED,DEAD};
+ *
+ * - REVOKED means the PMU the event was associated with is gone; all
+ * functionality is stopped but the event is still alive. Can only
+ * transition to DEAD;
+ *
+ * - DEAD event really is DYING tearing down state and freeing bits.
+ *
*/
enum perf_event_state {
- PERF_EVENT_STATE_DEAD = -4,
- PERF_EVENT_STATE_EXIT = -3,
- PERF_EVENT_STATE_ERROR = -2,
+ PERF_EVENT_STATE_DEAD = -5,
+ PERF_EVENT_STATE_REVOKED = -4, /* pmu gone, must not touch */
+ PERF_EVENT_STATE_EXIT = -3, /* task died, still inherit */
+ PERF_EVENT_STATE_ERROR = -2, /* scheduling error, can enable */
PERF_EVENT_STATE_OFF = -1,
PERF_EVENT_STATE_INACTIVE = 0,
PERF_EVENT_STATE_ACTIVE = 1,
@@ -589,10 +702,13 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *,
* PERF_EV_CAP_SIBLING: An event with this flag must be a group sibling and
* cannot be a group leader. If an event with this flag is detached from the
* group it is scheduled out and moved into an unrecoverable ERROR state.
+ * PERF_EV_CAP_READ_SCOPE: A CPU event that can be read from any CPU of the
+ * PMU scope where it is active.
*/
#define PERF_EV_CAP_SOFTWARE BIT(0)
#define PERF_EV_CAP_READ_ACTIVE_PKG BIT(1)
#define PERF_EV_CAP_SIBLING BIT(2)
+#define PERF_EV_CAP_READ_SCOPE BIT(3)
#define SWEVENT_HLIST_BITS 8
#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
@@ -602,23 +718,43 @@ struct swevent_hlist {
struct rcu_head rcu_head;
};
-#define PERF_ATTACH_CONTEXT 0x01
-#define PERF_ATTACH_GROUP 0x02
-#define PERF_ATTACH_TASK 0x04
-#define PERF_ATTACH_TASK_DATA 0x08
-#define PERF_ATTACH_ITRACE 0x10
-#define PERF_ATTACH_SCHED_CB 0x20
-#define PERF_ATTACH_CHILD 0x40
-
+#define PERF_ATTACH_CONTEXT 0x0001
+#define PERF_ATTACH_GROUP 0x0002
+#define PERF_ATTACH_TASK 0x0004
+#define PERF_ATTACH_TASK_DATA 0x0008
+#define PERF_ATTACH_GLOBAL_DATA 0x0010
+#define PERF_ATTACH_SCHED_CB 0x0020
+#define PERF_ATTACH_CHILD 0x0040
+#define PERF_ATTACH_EXCLUSIVE 0x0080
+#define PERF_ATTACH_CALLCHAIN 0x0100
+#define PERF_ATTACH_ITRACE 0x0200
+
+struct bpf_prog;
struct perf_cgroup;
struct perf_buffer;
struct pmu_event_list {
- raw_spinlock_t lock;
- struct list_head list;
+ raw_spinlock_t lock;
+ struct list_head list;
};
+/*
+ * event->sibling_list is modified whole holding both ctx->lock and ctx->mutex
+ * as such iteration must hold either lock. However, since ctx->lock is an IRQ
+ * safe lock, and is only held by the CPU doing the modification, having IRQs
+ * disabled is sufficient since it will hold-off the IPIs.
+ */
+#ifdef CONFIG_PROVE_LOCKING
+# define lockdep_assert_event_ctx(event) \
+ WARN_ON_ONCE(__lockdep_enabled && \
+ (this_cpu_read(hardirqs_enabled) && \
+ lockdep_is_held(&(event)->ctx->mutex) != LOCK_STATE_HELD))
+#else
+# define lockdep_assert_event_ctx(event)
+#endif
+
#define for_each_sibling_event(sibling, event) \
+ lockdep_assert_event_ctx(event); \
if ((event)->group_leader == (event)) \
list_for_each_entry((sibling), &(event)->sibling_list, sibling_list)
@@ -661,7 +797,13 @@ struct perf_event {
/* The cumulative AND of all event_caps for events in this group. */
int group_caps;
+ unsigned int group_generation;
struct perf_event *group_leader;
+ /*
+ * event->pmu will always point to pmu in which this event belongs.
+ * Whereas event->pmu_ctx->pmu may point to other pmu when group of
+ * different pmu events is created.
+ */
struct pmu *pmu;
void *pmu_private;
@@ -680,16 +822,6 @@ struct perf_event {
u64 total_time_running;
u64 tstamp;
- /*
- * timestamp shadows the actual context timing but it can
- * be safely used in NMI interrupt context. It reflects the
- * context time as it was when the event was last scheduled in.
- *
- * ctx_time already accounts for ctx->timestamp. Therefore to
- * compute ctx_time for a sample, simply add perf_clock().
- */
- u64 shadow_ctx_time;
-
struct perf_event_attr attr;
u16 header_size;
u16 id_header_size;
@@ -697,6 +829,12 @@ struct perf_event {
struct hw_perf_event hw;
struct perf_event_context *ctx;
+ /*
+ * event->pmu_ctx points to perf_event_pmu_context in which the event
+ * is added. This pmu_ctx can be of other pmu for sw event when that
+ * sw event is part of a group which also contains non-sw events.
+ */
+ struct perf_event_pmu_context *pmu_ctx;
atomic_long_t refcount;
/*
@@ -721,7 +859,7 @@ struct perf_event {
/* mmap bits */
struct mutex mmap_mutex;
- atomic_t mmap_count;
+ refcount_t mmap_count;
struct perf_buffer *rb;
struct list_head rb_entry;
@@ -733,11 +871,14 @@ struct perf_event {
struct fasync_struct *fasync;
/* delayed work for NMIs and such */
- int pending_wakeup;
- int pending_kill;
- int pending_disable;
+ unsigned int pending_wakeup;
+ unsigned int pending_kill;
+ unsigned int pending_disable;
unsigned long pending_addr; /* SIGTRAP */
- struct irq_work pending;
+ struct irq_work pending_irq;
+ struct irq_work pending_disable_irq;
+ struct callback_head pending_task;
+ unsigned int pending_work;
atomic_t event_limit;
@@ -756,20 +897,20 @@ struct perf_event {
struct pid_namespace *ns;
u64 id;
+ atomic64_t lost_samples;
+
u64 (*clock)(void);
perf_overflow_handler_t overflow_handler;
void *overflow_handler_context;
-#ifdef CONFIG_BPF_SYSCALL
- perf_overflow_handler_t orig_overflow_handler;
struct bpf_prog *prog;
-#endif
+ u64 bpf_cookie;
#ifdef CONFIG_EVENT_TRACING
struct trace_event_call *tp_event;
struct event_filter *filter;
-#ifdef CONFIG_FUNCTION_TRACER
+# ifdef CONFIG_FUNCTION_TRACER
struct ftrace_ops ftrace_ops;
-#endif
+# endif
#endif
#ifdef CONFIG_CGROUP_PERF
@@ -780,22 +921,90 @@ struct perf_event {
void *security;
#endif
struct list_head sb_list;
+ struct list_head pmu_list;
+
+ /*
+ * Certain events gets forwarded to another pmu internally by over-
+ * writing kernel copy of event->attr.type without user being aware
+ * of it. event->orig_type contains original 'type' requested by
+ * user.
+ */
+ u32 orig_type;
#endif /* CONFIG_PERF_EVENTS */
};
+/*
+ * ,-----------------------[1:n]------------------------.
+ * V V
+ * perf_event_context <-[1:n]-> perf_event_pmu_context <-[1:n]- perf_event
+ * | |
+ * `--[n:1]-> pmu <-[1:n]--'
+ *
+ *
+ * struct perf_event_pmu_context lifetime is refcount based and RCU freed
+ * (similar to perf_event_context). Locking is as if it were a member of
+ * perf_event_context; specifically:
+ *
+ * modification, both: ctx->mutex && ctx->lock
+ * reading, either: ctx->mutex || ctx->lock
+ *
+ * There is one exception to this; namely put_pmu_ctx() isn't always called
+ * with ctx->mutex held; this means that as long as we can guarantee the epc
+ * has events the above rules hold.
+ *
+ * Specificially, sys_perf_event_open()'s group_leader case depends on
+ * ctx->mutex pinning the configuration. Since we hold a reference on
+ * group_leader (through the filedesc) it can't go away, therefore it's
+ * associated pmu_ctx must exist and cannot change due to ctx->mutex.
+ *
+ * perf_event holds a refcount on perf_event_context
+ * perf_event holds a refcount on perf_event_pmu_context
+ */
+struct perf_event_pmu_context {
+ struct pmu *pmu;
+ struct perf_event_context *ctx;
+
+ struct list_head pmu_ctx_entry;
+
+ struct list_head pinned_active;
+ struct list_head flexible_active;
+
+ /* Used to identify the per-cpu perf_event_pmu_context */
+ unsigned int embedded : 1;
+
+ unsigned int nr_events;
+ unsigned int nr_cgroups;
+ unsigned int nr_freq;
+
+ atomic_t refcount; /* event <-> epc */
+ struct rcu_head rcu_head;
+
+ /*
+ * Set when one or more (plausibly active) event can't be scheduled
+ * due to pmu overcommit or pmu constraints, except tolerant to
+ * events not necessary to be active due to scheduling constraints,
+ * such as cgroups.
+ */
+ int rotate_necessary;
+};
+
+static inline bool perf_pmu_ctx_is_active(struct perf_event_pmu_context *epc)
+{
+ return !list_empty(&epc->flexible_active) || !list_empty(&epc->pinned_active);
+}
struct perf_event_groups {
- struct rb_root tree;
- u64 index;
+ struct rb_root tree;
+ u64 index;
};
+
/**
* struct perf_event_context - event context structure
*
* Used as a container for task events and CPU events as well:
*/
struct perf_event_context {
- struct pmu *pmu;
/*
* Protect the states of the events in the list,
* nr_active, and the list:
@@ -808,26 +1017,20 @@ struct perf_event_context {
*/
struct mutex mutex;
- struct list_head active_ctx_list;
+ struct list_head pmu_ctx_list;
struct perf_event_groups pinned_groups;
struct perf_event_groups flexible_groups;
struct list_head event_list;
- struct list_head pinned_active;
- struct list_head flexible_active;
-
int nr_events;
- int nr_active;
+ int nr_user;
int is_active;
+
int nr_stat;
int nr_freq;
int rotate_disable;
- /*
- * Set when nr_events != nr_active, except tolerant to events not
- * necessary to be active due to scheduling constraints, such as cgroups.
- */
- int rotate_necessary;
- refcount_t refcount;
+
+ refcount_t refcount; /* event <-> ctx */
struct task_struct *task;
/*
@@ -835,6 +1038,7 @@ struct perf_event_context {
*/
u64 time;
u64 timestamp;
+ u64 timeoffset;
/*
* These fields let us detect when two contexts have both
@@ -847,39 +1051,85 @@ struct perf_event_context {
#ifdef CONFIG_CGROUP_PERF
int nr_cgroups; /* cgroup evts */
#endif
- void *task_ctx_data; /* pmu specific data */
struct rcu_head rcu_head;
-};
-/*
- * Number of contexts where an event can trigger:
- * task, softirq, hardirq, nmi.
- */
-#define PERF_NR_CONTEXTS 4
+ /*
+ * The count of events for which using the switch-out fast path
+ * should be avoided.
+ *
+ * Sum (event->pending_work + events with
+ * (attr->inherit && (attr->sample_type & PERF_SAMPLE_READ)))
+ *
+ * The SIGTRAP is targeted at ctx->task, as such it won't do changing
+ * that until the signal is delivered.
+ */
+ local_t nr_no_switch_fast;
+};
/**
- * struct perf_event_cpu_context - per cpu event context structure
+ * struct perf_ctx_data - PMU specific data for a task
+ * @rcu_head: To avoid the race on free PMU specific data
+ * @refcount: To track users
+ * @global: To track system-wide users
+ * @ctx_cache: Kmem cache of PMU specific data
+ * @data: PMU specific data
+ *
+ * Currently, the struct is only used in Intel LBR call stack mode to
+ * save/restore the call stack of a task on context switches.
+ *
+ * The rcu_head is used to prevent the race on free the data.
+ * The data only be allocated when Intel LBR call stack mode is enabled.
+ * The data will be freed when the mode is disabled.
+ * The content of the data will only be accessed in context switch, which
+ * should be protected by rcu_read_lock().
+ *
+ * Because of the alignment requirement of Intel Arch LBR, the Kmem cache
+ * is used to allocate the PMU specific data. The ctx_cache is to track
+ * the Kmem cache.
+ *
+ * Careful: Struct perf_ctx_data is added as a pointer in struct task_struct.
+ * When system-wide Intel LBR call stack mode is enabled, a buffer with
+ * constant size will be allocated for each task.
+ * Also, system memory consumption can further grow when the size of
+ * struct perf_ctx_data enlarges.
*/
-struct perf_cpu_context {
- struct perf_event_context ctx;
- struct perf_event_context *task_ctx;
+struct perf_ctx_data {
+ struct rcu_head rcu_head;
+ refcount_t refcount;
+ int global;
+ struct kmem_cache *ctx_cache;
+ void *data;
+};
+
+struct perf_cpu_pmu_context {
+ struct perf_event_pmu_context epc;
+ struct perf_event_pmu_context *task_epc;
+
+ struct list_head sched_cb_entry;
+ int sched_cb_usage;
+
int active_oncpu;
int exclusive;
+ int pmu_disable_count;
raw_spinlock_t hrtimer_lock;
struct hrtimer hrtimer;
ktime_t hrtimer_interval;
unsigned int hrtimer_active;
+};
+
+/**
+ * struct perf_event_cpu_context - per cpu event context structure
+ */
+struct perf_cpu_context {
+ struct perf_event_context ctx;
+ struct perf_event_context *task_ctx;
+ int online;
#ifdef CONFIG_CGROUP_PERF
struct perf_cgroup *cgrp;
- struct list_head cgrp_cpuctx_entry;
#endif
- struct list_head sched_cb_entry;
- int sched_cb_usage;
-
- int online;
/*
* Per-CPU storage for iterators used in visit_groups_merge. The default
* storage is of size 2 to hold the CPU and any CPU event iterators.
@@ -894,7 +1144,13 @@ struct perf_output_handle {
struct perf_buffer *rb;
unsigned long wakeup;
unsigned long size;
- u64 aux_flags;
+ union {
+ u64 flags; /* perf_output*() */
+ u64 aux_flags; /* perf_aux_output*() */
+ struct {
+ u64 skip_read : 1;
+ };
+ };
union {
void *addr;
unsigned long head;
@@ -917,6 +1173,8 @@ struct bpf_perf_event_data_kern {
struct perf_cgroup_info {
u64 time;
u64 timestamp;
+ u64 timeoffset;
+ int active;
};
struct perf_cgroup {
@@ -941,6 +1199,8 @@ perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
#ifdef CONFIG_PERF_EVENTS
+extern struct perf_event_context *perf_cpu_task_ctx(void);
+
extern void *perf_aux_output_begin(struct perf_output_handle *handle,
struct perf_event *event);
extern void perf_aux_output_end(struct perf_output_handle *handle,
@@ -952,7 +1212,7 @@ extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags);
extern void perf_event_itrace_started(struct perf_event *event);
extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
-extern void perf_pmu_unregister(struct pmu *pmu);
+extern int perf_pmu_unregister(struct pmu *pmu);
extern void __perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task);
@@ -978,61 +1238,107 @@ extern void perf_pmu_resched(struct pmu *pmu);
extern int perf_event_refresh(struct perf_event *event, int refresh);
extern void perf_event_update_userpage(struct perf_event *event);
extern int perf_event_release_kernel(struct perf_event *event);
+
extern struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr *attr,
- int cpu,
- struct task_struct *task,
- perf_overflow_handler_t callback,
- void *context);
+ int cpu,
+ struct task_struct *task,
+ perf_overflow_handler_t callback,
+ void *context);
+
extern void perf_pmu_migrate_context(struct pmu *pmu,
- int src_cpu, int dst_cpu);
-int perf_event_read_local(struct perf_event *event, u64 *value,
- u64 *enabled, u64 *running);
+ int src_cpu, int dst_cpu);
+extern int perf_event_read_local(struct perf_event *event, u64 *value,
+ u64 *enabled, u64 *running);
extern u64 perf_event_read_value(struct perf_event *event,
u64 *enabled, u64 *running);
+extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs);
+
+static inline bool branch_sample_no_flags(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_NO_FLAGS;
+}
+
+static inline bool branch_sample_no_cycles(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_NO_CYCLES;
+}
+
+static inline bool branch_sample_type(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_TYPE_SAVE;
+}
+
+static inline bool branch_sample_hw_index(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX;
+}
+
+static inline bool branch_sample_priv(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_PRIV_SAVE;
+}
+
+static inline bool branch_sample_counters(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS;
+}
+
+static inline bool branch_sample_call_stack(const struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK;
+}
struct perf_sample_data {
/*
- * Fields set by perf_sample_data_init(), group so as to
- * minimize the cachelines touched.
+ * Fields set by perf_sample_data_init() unconditionally,
+ * group so as to minimize the cachelines touched.
*/
- u64 addr;
- struct perf_raw_record *raw;
- struct perf_branch_stack *br_stack;
+ u64 sample_flags;
u64 period;
- union perf_sample_weight weight;
- u64 txn;
- union perf_mem_data_src data_src;
+ u64 dyn_size;
/*
- * The other fields, optionally {set,used} by
- * perf_{prepare,output}_sample().
+ * Fields commonly set by __perf_event_header__init_id(),
+ * group so as to minimize the cachelines touched.
*/
u64 type;
- u64 ip;
struct {
u32 pid;
u32 tid;
} tid_entry;
u64 time;
u64 id;
- u64 stream_id;
struct {
u32 cpu;
u32 reserved;
} cpu_entry;
+
+ /*
+ * The other fields, optionally {set,used} by
+ * perf_{prepare,output}_sample().
+ */
+ u64 ip;
struct perf_callchain_entry *callchain;
- u64 aux_size;
+ struct perf_raw_record *raw;
+ struct perf_branch_stack *br_stack;
+ u64 *br_stack_cntr;
+ union perf_sample_weight weight;
+ union perf_mem_data_src data_src;
+ u64 txn;
struct perf_regs regs_user;
struct perf_regs regs_intr;
u64 stack_user_size;
- u64 phys_addr;
+ u64 stream_id;
u64 cgroup;
+ u64 addr;
+ u64 phys_addr;
u64 data_page_size;
u64 code_page_size;
+ u64 aux_size;
} ____cacheline_aligned;
/* default value for data source */
@@ -1040,26 +1346,144 @@ struct perf_sample_data {
PERF_MEM_S(LVL, NA) |\
PERF_MEM_S(SNOOP, NA) |\
PERF_MEM_S(LOCK, NA) |\
- PERF_MEM_S(TLB, NA))
+ PERF_MEM_S(TLB, NA) |\
+ PERF_MEM_S(LVLNUM, NA))
static inline void perf_sample_data_init(struct perf_sample_data *data,
u64 addr, u64 period)
{
/* remaining struct members initialized in perf_prepare_sample() */
- data->addr = addr;
- data->raw = NULL;
- data->br_stack = NULL;
+ data->sample_flags = PERF_SAMPLE_PERIOD;
data->period = period;
- data->weight.full = 0;
- data->data_src.val = PERF_MEM_NA;
- data->txn = 0;
+ data->dyn_size = 0;
+
+ if (addr) {
+ data->addr = addr;
+ data->sample_flags |= PERF_SAMPLE_ADDR;
+ }
+}
+
+static inline void perf_sample_save_callchain(struct perf_sample_data *data,
+ struct perf_event *event,
+ struct pt_regs *regs)
+{
+ int size = 1;
+
+ if (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN))
+ return;
+ if (WARN_ON_ONCE(data->sample_flags & PERF_SAMPLE_CALLCHAIN))
+ return;
+
+ data->callchain = perf_callchain(event, regs);
+ size += data->callchain->nr;
+
+ data->dyn_size += size * sizeof(u64);
+ data->sample_flags |= PERF_SAMPLE_CALLCHAIN;
+}
+
+static inline void perf_sample_save_raw_data(struct perf_sample_data *data,
+ struct perf_event *event,
+ struct perf_raw_record *raw)
+{
+ struct perf_raw_frag *frag = &raw->frag;
+ u32 sum = 0;
+ int size;
+
+ if (!(event->attr.sample_type & PERF_SAMPLE_RAW))
+ return;
+ if (WARN_ON_ONCE(data->sample_flags & PERF_SAMPLE_RAW))
+ return;
+
+ do {
+ sum += frag->size;
+ if (perf_raw_frag_last(frag))
+ break;
+ frag = frag->next;
+ } while (1);
+
+ size = round_up(sum + sizeof(u32), sizeof(u64));
+ raw->size = size - sizeof(u32);
+ frag->pad = raw->size - sum;
+
+ data->raw = raw;
+ data->dyn_size += size;
+ data->sample_flags |= PERF_SAMPLE_RAW;
+}
+
+static inline bool has_branch_stack(struct perf_event *event)
+{
+ return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
+}
+
+static inline void perf_sample_save_brstack(struct perf_sample_data *data,
+ struct perf_event *event,
+ struct perf_branch_stack *brs,
+ u64 *brs_cntr)
+{
+ int size = sizeof(u64); /* nr */
+
+ if (!has_branch_stack(event))
+ return;
+ if (WARN_ON_ONCE(data->sample_flags & PERF_SAMPLE_BRANCH_STACK))
+ return;
+
+ if (branch_sample_hw_index(event))
+ size += sizeof(u64);
+
+ brs->nr = min_t(u16, event->attr.sample_max_stack, brs->nr);
+
+ size += brs->nr * sizeof(struct perf_branch_entry);
+
+ /*
+ * The extension space for counters is appended after the
+ * struct perf_branch_stack. It is used to store the occurrences
+ * of events of each branch.
+ */
+ if (brs_cntr)
+ size += brs->nr * sizeof(u64);
+
+ data->br_stack = brs;
+ data->br_stack_cntr = brs_cntr;
+ data->dyn_size += size;
+ data->sample_flags |= PERF_SAMPLE_BRANCH_STACK;
+}
+
+static inline u32 perf_sample_data_size(struct perf_sample_data *data,
+ struct perf_event *event)
+{
+ u32 size = sizeof(struct perf_event_header);
+
+ size += event->header_size + event->id_header_size;
+ size += data->dyn_size;
+
+ return size;
+}
+
+/*
+ * Clear all bitfields in the perf_branch_entry.
+ * The to and from fields are not cleared because they are
+ * systematically modified by caller.
+ */
+static inline void perf_clear_branch_entry_bitfields(struct perf_branch_entry *br)
+{
+ br->mispred = 0;
+ br->predicted = 0;
+ br->in_tx = 0;
+ br->abort = 0;
+ br->cycles = 0;
+ br->type = 0;
+ br->spec = PERF_BR_SPEC_NA;
+ br->reserved = 0;
}
extern void perf_output_sample(struct perf_output_handle *handle,
struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event);
-extern void perf_prepare_sample(struct perf_event_header *header,
+extern void perf_prepare_sample(struct perf_sample_data *data,
+ struct perf_event *event,
+ struct pt_regs *regs);
+extern void perf_prepare_header(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event,
struct pt_regs *regs);
@@ -1081,9 +1505,11 @@ extern int perf_event_output(struct perf_event *event,
static inline bool
is_default_overflow_handler(struct perf_event *event)
{
- if (likely(event->overflow_handler == perf_event_output_forward))
+ perf_overflow_handler_t overflow_handler = event->overflow_handler;
+
+ if (likely(overflow_handler == perf_event_output_forward))
return true;
- if (unlikely(event->overflow_handler == perf_event_output_backward))
+ if (unlikely(overflow_handler == perf_event_output_backward))
return true;
return false;
}
@@ -1127,7 +1553,7 @@ static inline int is_software_event(struct perf_event *event)
*/
static inline int in_software_context(struct perf_event *event)
{
- return event->ctx->pmu->task_ctx_nr == perf_sw_context;
+ return event->pmu_ctx->pmu->task_ctx_nr == perf_sw_context;
}
static inline int is_exclusive_pmu(struct pmu *pmu)
@@ -1236,9 +1662,48 @@ extern void perf_event_bpf_event(struct bpf_prog *prog,
enum perf_bpf_event_type type,
u16 flags);
-extern struct perf_guest_info_callbacks *perf_guest_cbs;
-extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
-extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
+#define PERF_GUEST_ACTIVE 0x01
+#define PERF_GUEST_USER 0x02
+
+struct perf_guest_info_callbacks {
+ unsigned int (*state)(void);
+ unsigned long (*get_ip)(void);
+ unsigned int (*handle_intel_pt_intr)(void);
+};
+
+#ifdef CONFIG_GUEST_PERF_EVENTS
+
+extern struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
+
+DECLARE_STATIC_CALL(__perf_guest_state, *perf_guest_cbs->state);
+DECLARE_STATIC_CALL(__perf_guest_get_ip, *perf_guest_cbs->get_ip);
+DECLARE_STATIC_CALL(__perf_guest_handle_intel_pt_intr, *perf_guest_cbs->handle_intel_pt_intr);
+
+static inline unsigned int perf_guest_state(void)
+{
+ return static_call(__perf_guest_state)();
+}
+
+static inline unsigned long perf_guest_get_ip(void)
+{
+ return static_call(__perf_guest_get_ip)();
+}
+
+static inline unsigned int perf_guest_handle_intel_pt_intr(void)
+{
+ return static_call(__perf_guest_handle_intel_pt_intr)();
+}
+
+extern void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs);
+extern void perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs);
+
+#else /* !CONFIG_GUEST_PERF_EVENTS: */
+
+static inline unsigned int perf_guest_state(void) { return 0; }
+static inline unsigned long perf_guest_get_ip(void) { return 0; }
+static inline unsigned int perf_guest_handle_intel_pt_intr(void) { return 0; }
+
+#endif /* !CONFIG_GUEST_PERF_EVENTS */
extern void perf_event_exec(void);
extern void perf_event_comm(struct task_struct *tsk, bool exec);
@@ -1254,9 +1719,8 @@ DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
extern struct perf_callchain_entry *
-get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
- u32 max_stack, bool crosstask, bool add_mark);
-extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs);
+get_perf_callchain(struct pt_regs *regs, bool kernel, bool user,
+ u32 max_stack, bool crosstask, bool add_mark, u64 defer_cookie);
extern int get_callchain_buffers(int max_stack);
extern void put_callchain_buffers(void);
extern struct perf_callchain_entry *get_callchain_entry(int *rctx);
@@ -1269,6 +1733,7 @@ static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *
{
if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) {
struct perf_callchain_entry *entry = ctx->entry;
+
entry->ip[entry->nr++] = ip;
++ctx->contexts;
return 0;
@@ -1282,6 +1747,7 @@ static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64
{
if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) {
struct perf_callchain_entry *entry = ctx->entry;
+
entry->ip[entry->nr++] = ip;
++ctx->nr;
return 0;
@@ -1291,19 +1757,10 @@ static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64
}
extern int sysctl_perf_event_paranoid;
-extern int sysctl_perf_event_mlock;
extern int sysctl_perf_event_sample_rate;
-extern int sysctl_perf_cpu_time_max_percent;
extern void perf_sample_event_took(u64 sample_len_ns);
-int perf_proc_update_handler(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
-int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
-int perf_event_max_stack_handler(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
-
/* Access to perf_event_open(2) syscall. */
#define PERF_SECURITY_OPEN 0
@@ -1317,30 +1774,26 @@ static inline int perf_is_paranoid(void)
return sysctl_perf_event_paranoid > -1;
}
-static inline int perf_allow_kernel(struct perf_event_attr *attr)
-{
- if (sysctl_perf_event_paranoid > 1 && !perfmon_capable())
- return -EACCES;
-
- return security_perf_event_open(attr, PERF_SECURITY_KERNEL);
-}
+extern int perf_allow_kernel(void);
-static inline int perf_allow_cpu(struct perf_event_attr *attr)
+static inline int perf_allow_cpu(void)
{
if (sysctl_perf_event_paranoid > 0 && !perfmon_capable())
return -EACCES;
- return security_perf_event_open(attr, PERF_SECURITY_CPU);
+ return security_perf_event_open(PERF_SECURITY_CPU);
}
-static inline int perf_allow_tracepoint(struct perf_event_attr *attr)
+static inline int perf_allow_tracepoint(void)
{
if (sysctl_perf_event_paranoid > -1 && !perfmon_capable())
return -EPERM;
- return security_perf_event_open(attr, PERF_SECURITY_TRACEPOINT);
+ return security_perf_event_open(PERF_SECURITY_TRACEPOINT);
}
+extern int perf_exclude_event(struct perf_event *event, struct pt_regs *regs);
+
extern void perf_event_init(void);
extern void perf_tp_event(u16 event_type, u64 count, void *record,
int entry_size, struct pt_regs *regs,
@@ -1348,19 +1801,34 @@ extern void perf_tp_event(u16 event_type, u64 count, void *record,
struct task_struct *task);
extern void perf_bp_event(struct perf_event *event, void *data);
-#ifndef perf_misc_flags
-# define perf_misc_flags(regs) \
+extern unsigned long perf_misc_flags(struct perf_event *event, struct pt_regs *regs);
+extern unsigned long perf_instruction_pointer(struct perf_event *event,
+ struct pt_regs *regs);
+
+#ifndef perf_arch_misc_flags
+# define perf_arch_misc_flags(regs) \
(user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
-# define perf_instruction_pointer(regs) instruction_pointer(regs)
+# define perf_arch_instruction_pointer(regs) instruction_pointer(regs)
#endif
#ifndef perf_arch_bpf_user_pt_regs
# define perf_arch_bpf_user_pt_regs(regs) regs
#endif
-static inline bool has_branch_stack(struct perf_event *event)
+#ifndef perf_arch_guest_misc_flags
+static inline unsigned long perf_arch_guest_misc_flags(struct pt_regs *regs)
{
- return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
+ unsigned long guest_state = perf_guest_state();
+
+ if (!(guest_state & PERF_GUEST_ACTIVE))
+ return 0;
+
+ if (guest_state & PERF_GUEST_USER)
+ return PERF_RECORD_MISC_GUEST_USER;
+ else
+ return PERF_RECORD_MISC_GUEST_KERNEL;
}
+# define perf_arch_guest_misc_flags(regs) perf_arch_guest_misc_flags(regs)
+#endif
static inline bool needs_branch_stack(struct perf_event *event)
{
@@ -1369,7 +1837,14 @@ static inline bool needs_branch_stack(struct perf_event *event)
static inline bool has_aux(struct perf_event *event)
{
- return event->pmu->setup_aux;
+ return event->pmu && event->pmu->setup_aux;
+}
+
+static inline bool has_aux_action(struct perf_event *event)
+{
+ return event->attr.aux_sample_size ||
+ event->attr.aux_pause ||
+ event->attr.aux_resume;
}
static inline bool is_write_backward(struct perf_event *event)
@@ -1396,7 +1871,16 @@ perf_event_addr_filters(struct perf_event *event)
return ifh;
}
+static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
+{
+ /* Only the parent has fasync state */
+ if (event->parent)
+ event = event->parent;
+ return &event->fasync;
+}
+
extern void perf_event_addr_filters_sync(struct perf_event *event);
+extern void perf_report_aux_output_id(struct perf_event *event, u64 hw_id);
extern int perf_output_begin(struct perf_output_handle *handle,
struct perf_sample_data *data,
@@ -1412,7 +1896,7 @@ extern int perf_output_begin_backward(struct perf_output_handle *handle,
extern void perf_output_end(struct perf_output_handle *handle);
extern unsigned int perf_output_copy(struct perf_output_handle *handle,
- const void *buf, unsigned int len);
+ const void *buf, unsigned int len);
extern unsigned int perf_output_skip(struct perf_output_handle *handle,
unsigned int len);
extern long perf_output_copy_aux(struct perf_output_handle *aux_handle,
@@ -1429,7 +1913,9 @@ extern void perf_event_task_tick(void);
extern int perf_event_account_interrupt(struct perf_event *event);
extern int perf_event_period(struct perf_event *event, u64 value);
extern u64 perf_event_pause(struct perf_event *event, bool reset);
+
#else /* !CONFIG_PERF_EVENTS: */
+
static inline void *
perf_aux_output_begin(struct perf_output_handle *handle,
struct perf_event *event) { return NULL; }
@@ -1481,11 +1967,6 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
static inline void
perf_bp_event(struct perf_event *event, void *data) { }
-static inline int perf_register_guest_info_callbacks
-(struct perf_guest_info_callbacks *callbacks) { return 0; }
-static inline int perf_unregister_guest_info_callbacks
-(struct perf_guest_info_callbacks *callbacks) { return 0; }
-
static inline void perf_event_mmap(struct vm_area_struct *vma) { }
typedef int (perf_ksymbol_get_name_f)(char *name, int name_len, void *data);
@@ -1512,15 +1993,14 @@ static inline void perf_event_disable(struct perf_event *event) { }
static inline int __perf_event_disable(void *info) { return -1; }
static inline void perf_event_task_tick(void) { }
static inline int perf_event_release_kernel(struct perf_event *event) { return 0; }
-static inline int perf_event_period(struct perf_event *event, u64 value)
-{
- return -EINVAL;
-}
-static inline u64 perf_event_pause(struct perf_event *event, bool reset)
-{
- return 0;
-}
-#endif
+static inline int
+perf_event_period(struct perf_event *event, u64 value) { return -EINVAL; }
+static inline u64
+perf_event_pause(struct perf_event *event, bool reset) { return 0; }
+static inline int
+perf_exclude_event(struct perf_event *event, struct pt_regs *regs) { return 0; }
+
+#endif /* !CONFIG_PERF_EVENTS */
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
extern void perf_restore_debug_store(void);
@@ -1528,36 +2008,31 @@ extern void perf_restore_debug_store(void);
static inline void perf_restore_debug_store(void) { }
#endif
-static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag)
-{
- return frag->pad < sizeof(u64);
-}
-
-#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
+#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
struct perf_pmu_events_attr {
- struct device_attribute attr;
- u64 id;
- const char *event_str;
+ struct device_attribute attr;
+ u64 id;
+ const char *event_str;
};
struct perf_pmu_events_ht_attr {
- struct device_attribute attr;
- u64 id;
- const char *event_str_ht;
- const char *event_str_noht;
+ struct device_attribute attr;
+ u64 id;
+ const char *event_str_ht;
+ const char *event_str_noht;
};
struct perf_pmu_events_hybrid_attr {
- struct device_attribute attr;
- u64 id;
- const char *event_str;
- u64 pmu_type;
+ struct device_attribute attr;
+ u64 id;
+ const char *event_str;
+ u64 pmu_type;
};
struct perf_pmu_format_hybrid_attr {
- struct device_attribute attr;
- u64 pmu_type;
+ struct device_attribute attr;
+ u64 pmu_type;
};
ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
@@ -1576,7 +2051,13 @@ static struct perf_pmu_events_attr _var = { \
.event_str = _str, \
};
-#define PMU_FORMAT_ATTR(_name, _format) \
+#define PMU_EVENT_ATTR_ID(_name, _show, _id) \
+ (&((struct perf_pmu_events_attr[]) { \
+ { .attr = __ATTR(_name, 0444, _show, NULL), \
+ .id = _id, } \
+ })[0].attr.attr)
+
+#define PMU_FORMAT_ATTR_SHOW(_name, _format) \
static ssize_t \
_name##_show(struct device *dev, \
struct device_attribute *attr, \
@@ -1585,24 +2066,51 @@ _name##_show(struct device *dev, \
BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
return sprintf(page, _format "\n"); \
} \
+
+#define PMU_FORMAT_ATTR(_name, _format) \
+ PMU_FORMAT_ATTR_SHOW(_name, _format) \
\
static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
/* Performance counter hotplug functions */
#ifdef CONFIG_PERF_EVENTS
-int perf_event_init_cpu(unsigned int cpu);
-int perf_event_exit_cpu(unsigned int cpu);
+extern int perf_event_init_cpu(unsigned int cpu);
+extern int perf_event_exit_cpu(unsigned int cpu);
#else
-#define perf_event_init_cpu NULL
-#define perf_event_exit_cpu NULL
+# define perf_event_init_cpu NULL
+# define perf_event_exit_cpu NULL
#endif
-extern void __weak arch_perf_update_userpage(struct perf_event *event,
- struct perf_event_mmap_page *userpg,
- u64 now);
+extern void arch_perf_update_userpage(struct perf_event *event,
+ struct perf_event_mmap_page *userpg,
+ u64 now);
-#ifdef CONFIG_MMU
-extern __weak u64 arch_perf_get_page_size(struct mm_struct *mm, unsigned long addr);
+/*
+ * Snapshot branch stack on software events.
+ *
+ * Branch stack can be very useful in understanding software events. For
+ * example, when a long function, e.g. sys_perf_event_open, returns an
+ * errno, it is not obvious why the function failed. Branch stack could
+ * provide very helpful information in this type of scenarios.
+ *
+ * On software event, it is necessary to stop the hardware branch recorder
+ * fast. Otherwise, the hardware register/buffer will be flushed with
+ * entries of the triggering event. Therefore, static call is used to
+ * stop the hardware recorder.
+ */
+
+/*
+ * cnt is the number of entries allocated for entries.
+ * Return number of entries copied to .
+ */
+typedef int (perf_snapshot_branch_stack_t)(struct perf_branch_entry *entries,
+ unsigned int cnt);
+DECLARE_STATIC_CALL(perf_snapshot_branch_stack, perf_snapshot_branch_stack_t);
+
+#ifndef PERF_NEEDS_LOPWR_CB
+static inline void perf_lopwr_cb(bool mode)
+{
+}
#endif
#endif /* _LINUX_PERF_EVENT_H */
diff --git a/include/linux/perf_event_api.h b/include/linux/perf_event_api.h
new file mode 100644
index 000000000000..c2fd6048b790
--- /dev/null
+++ b/include/linux/perf_event_api.h
@@ -0,0 +1 @@
+#include <linux/perf_event.h>
diff --git a/include/linux/pfn.h b/include/linux/pfn.h
index 14bc053c53d8..b90ca0b6c331 100644
--- a/include/linux/pfn.h
+++ b/include/linux/pfn.h
@@ -4,15 +4,6 @@
#ifndef __ASSEMBLY__
#include <linux/types.h>
-
-/*
- * pfn_t: encapsulates a page-frame number that is optionally backed
- * by memmap (struct page). Whether a pfn_t has a 'struct page'
- * backing is indicated by flags in the high bits of the value.
- */
-typedef struct {
- u64 val;
-} pfn_t;
#endif
#define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK)
diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h
deleted file mode 100644
index 2d9148221e9a..000000000000
--- a/include/linux/pfn_t.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_PFN_T_H_
-#define _LINUX_PFN_T_H_
-#include <linux/mm.h>
-
-/*
- * PFN_FLAGS_MASK - mask of all the possible valid pfn_t flags
- * PFN_SG_CHAIN - pfn is a pointer to the next scatterlist entry
- * PFN_SG_LAST - pfn references a page and is the last scatterlist entry
- * PFN_DEV - pfn is not covered by system memmap by default
- * PFN_MAP - pfn has a dynamic page mapping established by a device driver
- * PFN_SPECIAL - for CONFIG_FS_DAX_LIMITED builds to allow XIP, but not
- * get_user_pages
- */
-#define PFN_FLAGS_MASK (((u64) (~PAGE_MASK)) << (BITS_PER_LONG_LONG - PAGE_SHIFT))
-#define PFN_SG_CHAIN (1ULL << (BITS_PER_LONG_LONG - 1))
-#define PFN_SG_LAST (1ULL << (BITS_PER_LONG_LONG - 2))
-#define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3))
-#define PFN_MAP (1ULL << (BITS_PER_LONG_LONG - 4))
-#define PFN_SPECIAL (1ULL << (BITS_PER_LONG_LONG - 5))
-
-#define PFN_FLAGS_TRACE \
- { PFN_SPECIAL, "SPECIAL" }, \
- { PFN_SG_CHAIN, "SG_CHAIN" }, \
- { PFN_SG_LAST, "SG_LAST" }, \
- { PFN_DEV, "DEV" }, \
- { PFN_MAP, "MAP" }
-
-static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, u64 flags)
-{
- pfn_t pfn_t = { .val = pfn | (flags & PFN_FLAGS_MASK), };
-
- return pfn_t;
-}
-
-/* a default pfn to pfn_t conversion assumes that @pfn is pfn_valid() */
-static inline pfn_t pfn_to_pfn_t(unsigned long pfn)
-{
- return __pfn_to_pfn_t(pfn, 0);
-}
-
-static inline pfn_t phys_to_pfn_t(phys_addr_t addr, u64 flags)
-{
- return __pfn_to_pfn_t(addr >> PAGE_SHIFT, flags);
-}
-
-static inline bool pfn_t_has_page(pfn_t pfn)
-{
- return (pfn.val & PFN_MAP) == PFN_MAP || (pfn.val & PFN_DEV) == 0;
-}
-
-static inline unsigned long pfn_t_to_pfn(pfn_t pfn)
-{
- return pfn.val & ~PFN_FLAGS_MASK;
-}
-
-static inline struct page *pfn_t_to_page(pfn_t pfn)
-{
- if (pfn_t_has_page(pfn))
- return pfn_to_page(pfn_t_to_pfn(pfn));
- return NULL;
-}
-
-static inline phys_addr_t pfn_t_to_phys(pfn_t pfn)
-{
- return PFN_PHYS(pfn_t_to_pfn(pfn));
-}
-
-static inline pfn_t page_to_pfn_t(struct page *page)
-{
- return pfn_to_pfn_t(page_to_pfn(page));
-}
-
-static inline int pfn_t_valid(pfn_t pfn)
-{
- return pfn_valid(pfn_t_to_pfn(pfn));
-}
-
-#ifdef CONFIG_MMU
-static inline pte_t pfn_t_pte(pfn_t pfn, pgprot_t pgprot)
-{
- return pfn_pte(pfn_t_to_pfn(pfn), pgprot);
-}
-#endif
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static inline pmd_t pfn_t_pmd(pfn_t pfn, pgprot_t pgprot)
-{
- return pfn_pmd(pfn_t_to_pfn(pfn), pgprot);
-}
-
-#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
-static inline pud_t pfn_t_pud(pfn_t pfn, pgprot_t pgprot)
-{
- return pfn_pud(pfn_t_to_pfn(pfn), pgprot);
-}
-#endif
-#endif
-
-#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
-static inline bool pfn_t_devmap(pfn_t pfn)
-{
- const u64 flags = PFN_DEV|PFN_MAP;
-
- return (pfn.val & flags) == flags;
-}
-#else
-static inline bool pfn_t_devmap(pfn_t pfn)
-{
- return false;
-}
-pte_t pte_mkdevmap(pte_t pte);
-pmd_t pmd_mkdevmap(pmd_t pmd);
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
- defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
-pud_t pud_mkdevmap(pud_t pud);
-#endif
-#endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
-
-#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
-static inline bool pfn_t_special(pfn_t pfn)
-{
- return (pfn.val & PFN_SPECIAL) == PFN_SPECIAL;
-}
-#else
-static inline bool pfn_t_special(pfn_t pfn)
-{
- return false;
-}
-#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
-#endif /* _LINUX_PFN_T_H_ */
diff --git a/include/linux/pgalloc.h b/include/linux/pgalloc.h
new file mode 100644
index 000000000000..9174fa59bbc5
--- /dev/null
+++ b/include/linux/pgalloc.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_PGALLOC_H
+#define _LINUX_PGALLOC_H
+
+#include <linux/pgtable.h>
+#include <asm/pgalloc.h>
+
+/*
+ * {pgd,p4d}_populate_kernel() are defined as macros to allow
+ * compile-time optimization based on the configured page table levels.
+ * Without this, linking may fail because callers (e.g., KASAN) may rely
+ * on calls to these functions being optimized away when passing symbols
+ * that exist only for certain page table levels.
+ */
+#define pgd_populate_kernel(addr, pgd, p4d) \
+ do { \
+ pgd_populate(&init_mm, pgd, p4d); \
+ if (ARCH_PAGE_TABLE_SYNC_MASK & PGTBL_PGD_MODIFIED) \
+ arch_sync_kernel_mappings(addr, addr); \
+ } while (0)
+
+#define p4d_populate_kernel(addr, p4d, pud) \
+ do { \
+ p4d_populate(&init_mm, p4d, pud); \
+ if (ARCH_PAGE_TABLE_SYNC_MASK & PGTBL_P4D_MODIFIED) \
+ arch_sync_kernel_mappings(addr, addr); \
+ } while (0)
+
+#endif /* _LINUX_PGALLOC_H */
diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h
new file mode 100644
index 000000000000..38a82d65e58e
--- /dev/null
+++ b/include/linux/pgalloc_tag.h
@@ -0,0 +1,214 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * page allocation tagging
+ */
+#ifndef _LINUX_PGALLOC_TAG_H
+#define _LINUX_PGALLOC_TAG_H
+
+#include <linux/alloc_tag.h>
+
+#ifdef CONFIG_MEM_ALLOC_PROFILING
+
+#include <linux/page_ext.h>
+
+extern struct page_ext_operations page_alloc_tagging_ops;
+extern unsigned long alloc_tag_ref_mask;
+extern int alloc_tag_ref_offs;
+extern struct alloc_tag_kernel_section kernel_tags;
+
+DECLARE_STATIC_KEY_FALSE(mem_profiling_compressed);
+
+typedef u16 pgalloc_tag_idx;
+
+union pgtag_ref_handle {
+ union codetag_ref *ref; /* reference in page extension */
+ struct page *page; /* reference in page flags */
+};
+
+/* Reserved indexes */
+#define CODETAG_ID_NULL 0
+#define CODETAG_ID_EMPTY 1
+#define CODETAG_ID_FIRST 2
+
+#ifdef CONFIG_MODULES
+
+extern struct alloc_tag_module_section module_tags;
+
+static inline struct alloc_tag *module_idx_to_tag(pgalloc_tag_idx idx)
+{
+ return &module_tags.first_tag[idx - kernel_tags.count];
+}
+
+static inline pgalloc_tag_idx module_tag_to_idx(struct alloc_tag *tag)
+{
+ return CODETAG_ID_FIRST + kernel_tags.count + (tag - module_tags.first_tag);
+}
+
+#else /* CONFIG_MODULES */
+
+static inline struct alloc_tag *module_idx_to_tag(pgalloc_tag_idx idx)
+{
+ pr_warn("invalid page tag reference %lu\n", (unsigned long)idx);
+ return NULL;
+}
+
+static inline pgalloc_tag_idx module_tag_to_idx(struct alloc_tag *tag)
+{
+ pr_warn("invalid page tag 0x%lx\n", (unsigned long)tag);
+ return CODETAG_ID_NULL;
+}
+
+#endif /* CONFIG_MODULES */
+
+static inline void idx_to_ref(pgalloc_tag_idx idx, union codetag_ref *ref)
+{
+ switch (idx) {
+ case (CODETAG_ID_NULL):
+ ref->ct = NULL;
+ break;
+ case (CODETAG_ID_EMPTY):
+ set_codetag_empty(ref);
+ break;
+ default:
+ idx -= CODETAG_ID_FIRST;
+ ref->ct = idx < kernel_tags.count ?
+ &kernel_tags.first_tag[idx].ct :
+ &module_idx_to_tag(idx)->ct;
+ break;
+ }
+}
+
+static inline pgalloc_tag_idx ref_to_idx(union codetag_ref *ref)
+{
+ struct alloc_tag *tag;
+
+ if (!ref->ct)
+ return CODETAG_ID_NULL;
+
+ if (is_codetag_empty(ref))
+ return CODETAG_ID_EMPTY;
+
+ tag = ct_to_alloc_tag(ref->ct);
+ if (tag >= kernel_tags.first_tag && tag < kernel_tags.first_tag + kernel_tags.count)
+ return CODETAG_ID_FIRST + (tag - kernel_tags.first_tag);
+
+ return module_tag_to_idx(tag);
+}
+
+
+
+/* Should be called only if mem_alloc_profiling_enabled() */
+static inline bool get_page_tag_ref(struct page *page, union codetag_ref *ref,
+ union pgtag_ref_handle *handle)
+{
+ if (!page)
+ return false;
+
+ if (static_key_enabled(&mem_profiling_compressed)) {
+ pgalloc_tag_idx idx;
+
+ idx = (page->flags.f >> alloc_tag_ref_offs) &
+ alloc_tag_ref_mask;
+ idx_to_ref(idx, ref);
+ handle->page = page;
+ } else {
+ struct page_ext *page_ext;
+ union codetag_ref *tmp;
+
+ page_ext = page_ext_get(page);
+ if (!page_ext)
+ return false;
+
+ tmp = (union codetag_ref *)page_ext_data(page_ext, &page_alloc_tagging_ops);
+ ref->ct = tmp->ct;
+ handle->ref = tmp;
+ }
+
+ return true;
+}
+
+static inline void put_page_tag_ref(union pgtag_ref_handle handle)
+{
+ if (WARN_ON(!handle.ref))
+ return;
+
+ if (!static_key_enabled(&mem_profiling_compressed))
+ page_ext_put((void *)handle.ref - page_alloc_tagging_ops.offset);
+}
+
+static inline void update_page_tag_ref(union pgtag_ref_handle handle, union codetag_ref *ref)
+{
+ if (static_key_enabled(&mem_profiling_compressed)) {
+ struct page *page = handle.page;
+ unsigned long old_flags;
+ unsigned long flags;
+ unsigned long idx;
+
+ if (WARN_ON(!page || !ref))
+ return;
+
+ idx = (unsigned long)ref_to_idx(ref);
+ idx = (idx & alloc_tag_ref_mask) << alloc_tag_ref_offs;
+ do {
+ old_flags = READ_ONCE(page->flags.f);
+ flags = old_flags;
+ flags &= ~(alloc_tag_ref_mask << alloc_tag_ref_offs);
+ flags |= idx;
+ } while (unlikely(!try_cmpxchg(&page->flags.f, &old_flags, flags)));
+ } else {
+ if (WARN_ON(!handle.ref || !ref))
+ return;
+
+ handle.ref->ct = ref->ct;
+ }
+}
+
+/* Should be called only if mem_alloc_profiling_enabled() */
+void __clear_page_tag_ref(struct page *page);
+
+static inline void clear_page_tag_ref(struct page *page)
+{
+ if (mem_alloc_profiling_enabled())
+ __clear_page_tag_ref(page);
+}
+
+/* Should be called only if mem_alloc_profiling_enabled() */
+static inline struct alloc_tag *__pgalloc_tag_get(struct page *page)
+{
+ struct alloc_tag *tag = NULL;
+ union pgtag_ref_handle handle;
+ union codetag_ref ref;
+
+ if (get_page_tag_ref(page, &ref, &handle)) {
+ alloc_tag_sub_check(&ref);
+ if (ref.ct)
+ tag = ct_to_alloc_tag(ref.ct);
+ put_page_tag_ref(handle);
+ }
+
+ return tag;
+}
+
+static inline struct alloc_tag *pgalloc_tag_get(struct page *page)
+{
+ if (mem_alloc_profiling_enabled())
+ return __pgalloc_tag_get(page);
+ return NULL;
+}
+
+void pgalloc_tag_split(struct folio *folio, int old_order, int new_order);
+void pgalloc_tag_swap(struct folio *new, struct folio *old);
+
+void __init alloc_tag_sec_init(void);
+
+#else /* CONFIG_MEM_ALLOC_PROFILING */
+
+static inline void clear_page_tag_ref(struct page *page) {}
+static inline void alloc_tag_sec_init(void) {}
+static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) {}
+static inline void pgalloc_tag_swap(struct folio *new, struct folio *old) {}
+static inline struct alloc_tag *pgalloc_tag_get(struct page *page) { return NULL; }
+
+#endif /* CONFIG_MEM_ALLOC_PROFILING */
+
+#endif /* _LINUX_PGALLOC_TAG_H */
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 46b13780c2c8..652f287c1ef6 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -5,6 +5,9 @@
#include <linux/pfn.h>
#include <asm/pgtable.h>
+#define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT)
+#define PUD_ORDER (PUD_SHIFT - PAGE_SHIFT)
+
#ifndef __ASSEMBLY__
#ifdef CONFIG_MMU
@@ -12,6 +15,7 @@
#include <linux/bug.h>
#include <linux/errno.h>
#include <asm-generic/pgtable_uffd.h>
+#include <linux/page_table_check.h>
#if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \
defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS
@@ -29,6 +33,26 @@
#endif
/*
+ * This defines the first usable user address. Platforms
+ * can override its value with custom FIRST_USER_ADDRESS
+ * defined in their respective <asm/pgtable.h>.
+ */
+#ifndef FIRST_USER_ADDRESS
+#define FIRST_USER_ADDRESS 0UL
+#endif
+
+/*
+ * This defines the generic helper for accessing PMD page
+ * table page. Although platforms can still override this
+ * via their respective <asm/pgtable.h>.
+ */
+#ifndef pmd_pgtable
+#define pmd_pgtable(pmd) pmd_page(pmd)
+#endif
+
+#define pmd_folio(pmd) page_folio(pmd_page(pmd))
+
+/*
* A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD]
*
* The pXx_index() functions return the index of the entry in the page
@@ -66,6 +90,27 @@ static inline unsigned long pud_index(unsigned long address)
#define pgd_index(a) (((a) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
#endif
+#ifndef kernel_pte_init
+static inline void kernel_pte_init(void *addr)
+{
+}
+#define kernel_pte_init kernel_pte_init
+#endif
+
+#ifndef pmd_init
+static inline void pmd_init(void *addr)
+{
+}
+#define pmd_init pmd_init
+#endif
+
+#ifndef pud_init
+static inline void pud_init(void *addr)
+{
+}
+#define pud_init pud_init
+#endif
+
#ifndef pte_offset_kernel
static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
{
@@ -74,21 +119,31 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
#define pte_offset_kernel pte_offset_kernel
#endif
-#if defined(CONFIG_HIGHPTE)
-#define pte_offset_map(dir, address) \
- ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
- pte_index((address)))
-#define pte_unmap(pte) kunmap_atomic((pte))
+#ifdef CONFIG_HIGHPTE
+#define __pte_map(pmd, address) \
+ ((pte_t *)kmap_local_page(pmd_page(*(pmd))) + pte_index((address)))
+#define pte_unmap(pte) do { \
+ kunmap_local((pte)); \
+ rcu_read_unlock(); \
+} while (0)
#else
-#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
-#define pte_unmap(pte) ((void)(pte)) /* NOP */
+static inline pte_t *__pte_map(pmd_t *pmd, unsigned long address)
+{
+ return pte_offset_kernel(pmd, address);
+}
+static inline void pte_unmap(pte_t *pte)
+{
+ rcu_read_unlock();
+}
#endif
+void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable);
+
/* Find an entry in the second-level page table.. */
#ifndef pmd_offset
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
{
- return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
+ return pud_pgtable(*pud) + pmd_index(address);
}
#define pmd_offset pmd_offset
#endif
@@ -96,7 +151,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
#ifndef pud_offset
static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
{
- return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address);
+ return p4d_pgtable(*p4d) + pud_index(address);
}
#define pud_offset pud_offset
#endif
@@ -117,9 +172,7 @@ static inline pgd_t *pgd_offset_pgd(pgd_t *pgd, unsigned long address)
* a shortcut which implies the use of the kernel's pgd, instead
* of a process's
*/
-#ifndef pgd_offset_k
#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
-#endif
/*
* In many cases it is known that a virtual address is mapped at PMD or PTE
@@ -145,6 +198,110 @@ static inline pte_t *virt_to_kpte(unsigned long vaddr)
return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr);
}
+#ifndef pmd_young
+static inline int pmd_young(pmd_t pmd)
+{
+ return 0;
+}
+#endif
+
+#ifndef pmd_dirty
+static inline int pmd_dirty(pmd_t pmd)
+{
+ return 0;
+}
+#endif
+
+/*
+ * A facility to provide lazy MMU batching. This allows PTE updates and
+ * page invalidations to be delayed until a call to leave lazy MMU mode
+ * is issued. Some architectures may benefit from doing this, and it is
+ * beneficial for both shadow and direct mode hypervisors, which may batch
+ * the PTE updates which happen during this window. Note that using this
+ * interface requires that read hazards be removed from the code. A read
+ * hazard could result in the direct mode hypervisor case, since the actual
+ * write to the page tables may not yet have taken place, so reads though
+ * a raw PTE pointer after it has been modified are not guaranteed to be
+ * up to date.
+ *
+ * In the general case, no lock is guaranteed to be held between entry and exit
+ * of the lazy mode. So the implementation must assume preemption may be enabled
+ * and cpu migration is possible; it must take steps to be robust against this.
+ * (In practice, for user PTE updates, the appropriate page table lock(s) are
+ * held, but for kernel PTE updates, no lock is held). Nesting is not permitted
+ * and the mode cannot be used in interrupt context.
+ */
+#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
+static inline void arch_enter_lazy_mmu_mode(void) {}
+static inline void arch_leave_lazy_mmu_mode(void) {}
+static inline void arch_flush_lazy_mmu_mode(void) {}
+#endif
+
+#ifndef pte_batch_hint
+/**
+ * pte_batch_hint - Number of pages that can be added to batch without scanning.
+ * @ptep: Page table pointer for the entry.
+ * @pte: Page table entry.
+ *
+ * Some architectures know that a set of contiguous ptes all map the same
+ * contiguous memory with the same permissions. In this case, it can provide a
+ * hint to aid pte batching without the core code needing to scan every pte.
+ *
+ * An architecture implementation may ignore the PTE accessed state. Further,
+ * the dirty state must apply atomically to all the PTEs described by the hint.
+ *
+ * May be overridden by the architecture, else pte_batch_hint is always 1.
+ */
+static inline unsigned int pte_batch_hint(pte_t *ptep, pte_t pte)
+{
+ return 1;
+}
+#endif
+
+#ifndef pte_advance_pfn
+static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr)
+{
+ return __pte(pte_val(pte) + (nr << PFN_PTE_SHIFT));
+}
+#endif
+
+#define pte_next_pfn(pte) pte_advance_pfn(pte, 1)
+
+#ifndef set_ptes
+/**
+ * set_ptes - Map consecutive pages to a contiguous range of addresses.
+ * @mm: Address space to map the pages into.
+ * @addr: Address to map the first page at.
+ * @ptep: Page table pointer for the first entry.
+ * @pte: Page table entry for the first page.
+ * @nr: Number of pages to map.
+ *
+ * When nr==1, initial state of pte may be present or not present, and new state
+ * may be present or not present. When nr>1, initial state of all ptes must be
+ * not present, and new state must be present.
+ *
+ * May be overridden by the architecture, or the architecture can define
+ * set_pte() and PFN_PTE_SHIFT.
+ *
+ * Context: The caller holds the page table lock. The pages all belong
+ * to the same folio. The PTEs are all in the same PMD.
+ */
+static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, unsigned int nr)
+{
+ page_table_check_ptes_set(mm, ptep, pte, nr);
+
+ for (;;) {
+ set_pte(ptep, pte);
+ if (--nr == 0)
+ break;
+ ptep++;
+ pte = pte_next_pfn(pte);
+ }
+}
+#endif
+#define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1)
+
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
extern int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
@@ -177,12 +334,47 @@ static inline int pudp_set_access_flags(struct vm_area_struct *vma,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
+#ifndef ptep_get
+static inline pte_t ptep_get(pte_t *ptep)
+{
+ return READ_ONCE(*ptep);
+}
+#endif
+
+#ifndef pmdp_get
+static inline pmd_t pmdp_get(pmd_t *pmdp)
+{
+ return READ_ONCE(*pmdp);
+}
+#endif
+
+#ifndef pudp_get
+static inline pud_t pudp_get(pud_t *pudp)
+{
+ return READ_ONCE(*pudp);
+}
+#endif
+
+#ifndef p4dp_get
+static inline p4d_t p4dp_get(p4d_t *p4dp)
+{
+ return READ_ONCE(*p4dp);
+}
+#endif
+
+#ifndef pgdp_get
+static inline pgd_t pgdp_get(pgd_t *pgdp)
+{
+ return READ_ONCE(*pgdp);
+}
+#endif
+
#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long address,
pte_t *ptep)
{
- pte_t pte = *ptep;
+ pte_t pte = ptep_get(ptep);
int r = 1;
if (!pte_young(pte))
r = 0;
@@ -193,7 +385,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
#endif
#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
unsigned long address,
pmd_t *pmdp)
@@ -214,7 +406,7 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
BUILD_BUG();
return 0;
}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG */
#endif
#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
@@ -240,35 +432,138 @@ static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
+#ifndef arch_has_hw_nonleaf_pmd_young
+/*
+ * Return whether the accessed bit in non-leaf PMD entries is supported on the
+ * local CPU.
+ */
+static inline bool arch_has_hw_nonleaf_pmd_young(void)
+{
+ return IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG);
+}
+#endif
+
+#ifndef arch_has_hw_pte_young
+/*
+ * Return whether the accessed bit is supported on the local CPU.
+ *
+ * This stub assumes accessing through an old PTE triggers a page fault.
+ * Architectures that automatically set the access bit should overwrite it.
+ */
+static inline bool arch_has_hw_pte_young(void)
+{
+ return IS_ENABLED(CONFIG_ARCH_HAS_HW_PTE_YOUNG);
+}
+#endif
+
+#ifndef exec_folio_order
+/*
+ * Returns preferred minimum folio order for executable file-backed memory. Must
+ * be in range [0, PMD_ORDER). Default to order-0.
+ */
+static inline unsigned int exec_folio_order(void)
+{
+ return 0;
+}
+#endif
+
+#ifndef arch_check_zapped_pte
+static inline void arch_check_zapped_pte(struct vm_area_struct *vma,
+ pte_t pte)
+{
+}
+#endif
+
+#ifndef arch_check_zapped_pmd
+static inline void arch_check_zapped_pmd(struct vm_area_struct *vma,
+ pmd_t pmd)
+{
+}
+#endif
+
+#ifndef arch_check_zapped_pud
+static inline void arch_check_zapped_pud(struct vm_area_struct *vma, pud_t pud)
+{
+}
+#endif
+
#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long address,
pte_t *ptep)
{
- pte_t pte = *ptep;
+ pte_t pte = ptep_get(ptep);
pte_clear(mm, address, ptep);
+ page_table_check_pte_clear(mm, pte);
return pte;
}
#endif
-#ifndef __HAVE_ARCH_PTEP_GET
-static inline pte_t ptep_get(pte_t *ptep)
+#ifndef clear_young_dirty_ptes
+/**
+ * clear_young_dirty_ptes - Mark PTEs that map consecutive pages of the
+ * same folio as old/clean.
+ * @mm: Address space the pages are mapped into.
+ * @addr: Address the first page is mapped at.
+ * @ptep: Page table pointer for the first entry.
+ * @nr: Number of entries to mark old/clean.
+ * @flags: Flags to modify the PTE batch semantics.
+ *
+ * May be overridden by the architecture; otherwise, implemented by
+ * get_and_clear/modify/set for each pte in the range.
+ *
+ * Note that PTE bits in the PTE range besides the PFN can differ. For example,
+ * some PTEs might be write-protected.
+ *
+ * Context: The caller holds the page table lock. The PTEs map consecutive
+ * pages that belong to the same folio. The PTEs are all in the same PMD.
+ */
+static inline void clear_young_dirty_ptes(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ unsigned int nr, cydp_t flags)
{
- return READ_ONCE(*ptep);
+ pte_t pte;
+
+ for (;;) {
+ if (flags == CYDP_CLEAR_YOUNG)
+ ptep_test_and_clear_young(vma, addr, ptep);
+ else {
+ pte = ptep_get_and_clear(vma->vm_mm, addr, ptep);
+ if (flags & CYDP_CLEAR_YOUNG)
+ pte = pte_mkold(pte);
+ if (flags & CYDP_CLEAR_DIRTY)
+ pte = pte_mkclean(pte);
+ set_pte_at(vma->vm_mm, addr, ptep, pte);
+ }
+ if (--nr == 0)
+ break;
+ ptep++;
+ addr += PAGE_SIZE;
+ }
}
#endif
-#ifdef CONFIG_GUP_GET_PTE_LOW_HIGH
+static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ pte_t pte = ptep_get(ptep);
+
+ pte_clear(mm, addr, ptep);
+ /*
+ * No need for ptep_get_and_clear(): page table check doesn't care about
+ * any bits that could have been set by HW concurrently.
+ */
+ page_table_check_pte_clear(mm, pte);
+}
+
+#ifdef CONFIG_GUP_GET_PXX_LOW_HIGH
/*
- * WARNING: only to be used in the get_user_pages_fast() implementation.
- *
- * With get_user_pages_fast(), we walk down the pagetables without taking any
- * locks. For this we would like to load the pointers atomically, but sometimes
- * that is not possible (e.g. without expensive cmpxchg8b on x86_32 PAE). What
- * we do have is the guarantee that a PTE will only either go from not present
- * to present, or present to not present or both -- it will not switch to a
- * completely different present page without a TLB flush in between; something
- * that we are blocking by holding interrupts off.
+ * For walking the pagetables without holding any locks. Some architectures
+ * (eg x86-32 PAE) cannot load the entries atomically without using expensive
+ * instructions. We are guaranteed that a PTE will only either go from not
+ * present to present, or present to not present -- it will not switch to a
+ * completely different present page without a TLB flush inbetween; which we
+ * are blocking by holding interrupts off.
*
* Setting ptes from not present to present goes:
*
@@ -303,15 +598,46 @@ static inline pte_t ptep_get_lockless(pte_t *ptep)
return pte;
}
-#else /* CONFIG_GUP_GET_PTE_LOW_HIGH */
+#define ptep_get_lockless ptep_get_lockless
+
+#if CONFIG_PGTABLE_LEVELS > 2
+static inline pmd_t pmdp_get_lockless(pmd_t *pmdp)
+{
+ pmd_t pmd;
+
+ do {
+ pmd.pmd_low = pmdp->pmd_low;
+ smp_rmb();
+ pmd.pmd_high = pmdp->pmd_high;
+ smp_rmb();
+ } while (unlikely(pmd.pmd_low != pmdp->pmd_low));
+
+ return pmd;
+}
+#define pmdp_get_lockless pmdp_get_lockless
+#define pmdp_get_lockless_sync() tlb_remove_table_sync_one()
+#endif /* CONFIG_PGTABLE_LEVELS > 2 */
+#endif /* CONFIG_GUP_GET_PXX_LOW_HIGH */
+
/*
* We require that the PTE can be read atomically.
*/
+#ifndef ptep_get_lockless
static inline pte_t ptep_get_lockless(pte_t *ptep)
{
return ptep_get(ptep);
}
-#endif /* CONFIG_GUP_GET_PTE_LOW_HIGH */
+#endif
+
+#ifndef pmdp_get_lockless
+static inline pmd_t pmdp_get_lockless(pmd_t *pmdp)
+{
+ return pmdp_get(pmdp);
+}
+static inline void pmdp_get_lockless_sync(void)
+{
+}
+#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
@@ -320,7 +646,10 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
pmd_t *pmdp)
{
pmd_t pmd = *pmdp;
+
pmd_clear(pmdp);
+ page_table_check_pmd_clear(mm, pmd);
+
return pmd;
}
#endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */
@@ -332,6 +661,8 @@ static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
pud_t pud = *pudp;
pud_clear(pudp);
+ page_table_check_pud_clear(mm, pud);
+
return pud;
}
#endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */
@@ -348,11 +679,11 @@ static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
#endif
#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL
-static inline pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm,
+static inline pud_t pudp_huge_get_and_clear_full(struct vm_area_struct *vma,
unsigned long address, pud_t *pudp,
int full)
{
- return pudp_huge_get_and_clear(mm, address, pudp);
+ return pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
}
#endif
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -362,12 +693,125 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
unsigned long address, pte_t *ptep,
int full)
{
- pte_t pte;
- pte = ptep_get_and_clear(mm, address, ptep);
+ return ptep_get_and_clear(mm, address, ptep);
+}
+#endif
+
+#ifndef get_and_clear_full_ptes
+/**
+ * get_and_clear_full_ptes - Clear present PTEs that map consecutive pages of
+ * the same folio, collecting dirty/accessed bits.
+ * @mm: Address space the pages are mapped into.
+ * @addr: Address the first page is mapped at.
+ * @ptep: Page table pointer for the first entry.
+ * @nr: Number of entries to clear.
+ * @full: Whether we are clearing a full mm.
+ *
+ * May be overridden by the architecture; otherwise, implemented as a simple
+ * loop over ptep_get_and_clear_full(), merging dirty/accessed bits into the
+ * returned PTE.
+ *
+ * Note that PTE bits in the PTE range besides the PFN can differ. For example,
+ * some PTEs might be write-protected.
+ *
+ * Context: The caller holds the page table lock. The PTEs map consecutive
+ * pages that belong to the same folio. The PTEs are all in the same PMD.
+ */
+static inline pte_t get_and_clear_full_ptes(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep, unsigned int nr, int full)
+{
+ pte_t pte, tmp_pte;
+
+ pte = ptep_get_and_clear_full(mm, addr, ptep, full);
+ while (--nr) {
+ ptep++;
+ addr += PAGE_SIZE;
+ tmp_pte = ptep_get_and_clear_full(mm, addr, ptep, full);
+ if (pte_dirty(tmp_pte))
+ pte = pte_mkdirty(pte);
+ if (pte_young(tmp_pte))
+ pte = pte_mkyoung(pte);
+ }
return pte;
}
#endif
+/**
+ * get_and_clear_ptes - Clear present PTEs that map consecutive pages of
+ * the same folio, collecting dirty/accessed bits.
+ * @mm: Address space the pages are mapped into.
+ * @addr: Address the first page is mapped at.
+ * @ptep: Page table pointer for the first entry.
+ * @nr: Number of entries to clear.
+ *
+ * Use this instead of get_and_clear_full_ptes() if it is known that we don't
+ * need to clear the full mm, which is mostly the case.
+ *
+ * Note that PTE bits in the PTE range besides the PFN can differ. For example,
+ * some PTEs might be write-protected.
+ *
+ * Context: The caller holds the page table lock. The PTEs map consecutive
+ * pages that belong to the same folio. The PTEs are all in the same PMD.
+ */
+static inline pte_t get_and_clear_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned int nr)
+{
+ return get_and_clear_full_ptes(mm, addr, ptep, nr, 0);
+}
+
+#ifndef clear_full_ptes
+/**
+ * clear_full_ptes - Clear present PTEs that map consecutive pages of the same
+ * folio.
+ * @mm: Address space the pages are mapped into.
+ * @addr: Address the first page is mapped at.
+ * @ptep: Page table pointer for the first entry.
+ * @nr: Number of entries to clear.
+ * @full: Whether we are clearing a full mm.
+ *
+ * May be overridden by the architecture; otherwise, implemented as a simple
+ * loop over ptep_get_and_clear_full().
+ *
+ * Note that PTE bits in the PTE range besides the PFN can differ. For example,
+ * some PTEs might be write-protected.
+ *
+ * Context: The caller holds the page table lock. The PTEs map consecutive
+ * pages that belong to the same folio. The PTEs are all in the same PMD.
+ */
+static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned int nr, int full)
+{
+ for (;;) {
+ ptep_get_and_clear_full(mm, addr, ptep, full);
+ if (--nr == 0)
+ break;
+ ptep++;
+ addr += PAGE_SIZE;
+ }
+}
+#endif
+
+/**
+ * clear_ptes - Clear present PTEs that map consecutive pages of the same folio.
+ * @mm: Address space the pages are mapped into.
+ * @addr: Address the first page is mapped at.
+ * @ptep: Page table pointer for the first entry.
+ * @nr: Number of entries to clear.
+ *
+ * Use this instead of clear_full_ptes() if it is known that we don't need to
+ * clear the full mm, which is mostly the case.
+ *
+ * Note that PTE bits in the PTE range besides the PFN can differ. For example,
+ * some PTEs might be write-protected.
+ *
+ * Context: The caller holds the page table lock. The PTEs map consecutive
+ * pages that belong to the same folio. The PTEs are all in the same PMD.
+ */
+static inline void clear_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned int nr)
+{
+ clear_full_ptes(mm, addr, ptep, nr, 0);
+}
/*
* If two threads concurrently fault at the same page, the thread that
@@ -377,13 +821,18 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
* fault. This function updates TLB only, do nothing with cache or others.
* It is the difference with function update_mmu_cache.
*/
-#ifndef __HAVE_ARCH_UPDATE_MMU_TLB
+#ifndef update_mmu_tlb_range
+static inline void update_mmu_tlb_range(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep, unsigned int nr)
+{
+}
+#endif
+
static inline void update_mmu_tlb(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
+ update_mmu_tlb_range(vma, address, ptep, 1);
}
-#define __HAVE_ARCH_UPDATE_MMU_TLB
-#endif
/*
* Some architectures may be able to avoid expensive synchronization
@@ -400,6 +849,35 @@ static inline void pte_clear_not_present_full(struct mm_struct *mm,
}
#endif
+#ifndef clear_not_present_full_ptes
+/**
+ * clear_not_present_full_ptes - Clear multiple not present PTEs which are
+ * consecutive in the pgtable.
+ * @mm: Address space the ptes represent.
+ * @addr: Address of the first pte.
+ * @ptep: Page table pointer for the first entry.
+ * @nr: Number of entries to clear.
+ * @full: Whether we are clearing a full mm.
+ *
+ * May be overridden by the architecture; otherwise, implemented as a simple
+ * loop over pte_clear_not_present_full().
+ *
+ * Context: The caller holds the page table lock. The PTEs are all not present.
+ * The PTEs are all in the same PMD.
+ */
+static inline void clear_not_present_full_ptes(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep, unsigned int nr, int full)
+{
+ for (;;) {
+ pte_clear_not_present_full(mm, addr, ptep, full);
+ if (--nr == 0)
+ break;
+ ptep++;
+ addr += PAGE_SIZE;
+ }
+}
+#endif
+
#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
unsigned long address,
@@ -415,15 +893,60 @@ extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma,
pud_t *pudp);
#endif
+#ifndef pte_mkwrite
+static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
+{
+ return pte_mkwrite_novma(pte);
+}
+#endif
+
+#if defined(CONFIG_ARCH_WANT_PMD_MKWRITE) && !defined(pmd_mkwrite)
+static inline pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
+{
+ return pmd_mkwrite_novma(pmd);
+}
+#endif
+
#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
struct mm_struct;
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
{
- pte_t old_pte = *ptep;
+ pte_t old_pte = ptep_get(ptep);
set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
}
#endif
+#ifndef wrprotect_ptes
+/**
+ * wrprotect_ptes - Write-protect PTEs that map consecutive pages of the same
+ * folio.
+ * @mm: Address space the pages are mapped into.
+ * @addr: Address the first page is mapped at.
+ * @ptep: Page table pointer for the first entry.
+ * @nr: Number of entries to write-protect.
+ *
+ * May be overridden by the architecture; otherwise, implemented as a simple
+ * loop over ptep_set_wrprotect().
+ *
+ * Note that PTE bits in the PTE range besides the PFN can differ. For example,
+ * some PTEs might be write-protected.
+ *
+ * Context: The caller holds the page table lock. The PTEs map consecutive
+ * pages that belong to the same folio. The PTEs are all in the same PMD.
+ */
+static inline void wrprotect_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned int nr)
+{
+ for (;;) {
+ ptep_set_wrprotect(mm, addr, ptep);
+ if (--nr == 0)
+ break;
+ ptep++;
+ addr += PAGE_SIZE;
+ }
+}
+#endif
+
/*
* On some architectures hardware does not set page access bit when accessing
* memory page, it is responsibility of software setting this bit. It brings
@@ -432,28 +955,12 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
* To be differentiate with macro pte_mkyoung, this macro is used on platforms
* where software maintains page access bit.
*/
-#ifndef pte_savedwrite
-#define pte_savedwrite pte_write
-#endif
-
-#ifndef pte_mk_savedwrite
-#define pte_mk_savedwrite pte_mkwrite
-#endif
-
-#ifndef pte_clear_savedwrite
-#define pte_clear_savedwrite pte_wrprotect
-#endif
-
-#ifndef pmd_savedwrite
-#define pmd_savedwrite pmd_write
-#endif
-
-#ifndef pmd_mk_savedwrite
-#define pmd_mk_savedwrite pmd_mkwrite
-#endif
-
-#ifndef pmd_clear_savedwrite
-#define pmd_clear_savedwrite pmd_wrprotect
+#ifndef pte_sw_mkyoung
+static inline pte_t pte_sw_mkyoung(pte_t pte)
+{
+ return pte;
+}
+#define pte_sw_mkyoung pte_sw_mkyoung
#endif
#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
@@ -474,6 +981,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
#endif
#ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline void pudp_set_wrprotect(struct mm_struct *mm,
unsigned long address, pud_t *pudp)
{
@@ -487,6 +995,7 @@ static inline void pudp_set_wrprotect(struct mm_struct *mm,
{
BUILD_BUG();
}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
#endif
@@ -515,6 +1024,10 @@ extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
#endif
+#ifndef arch_needs_pgtable_deposit
+#define arch_needs_pgtable_deposit() (false)
+#endif
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* This is an implementation of pmdp_establish() that is only suitable for an
@@ -535,6 +1048,26 @@ extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp);
#endif
+#ifndef __HAVE_ARCH_PMDP_INVALIDATE_AD
+
+/*
+ * pmdp_invalidate_ad() invalidates the PMD while changing a transparent
+ * hugepage mapping in the page tables. This function is similar to
+ * pmdp_invalidate(), but should only be used if the access and dirty bits would
+ * not be cleared by the software in the new PMD value. The function ensures
+ * that hardware changes of the access and dirty bits updates would not be lost.
+ *
+ * Doing so can allow in certain architectures to avoid a TLB flush in most
+ * cases. Yet, another TLB flush might be necessary later if the PMD update
+ * itself requires such flush (e.g., if protection was set to be stricter). Yet,
+ * even when a TLB flush is needed because of the update, the caller may be able
+ * to batch these TLB flushing operations, so fewer TLB flush operations are
+ * needed.
+ */
+extern pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
+#endif
+
#ifndef __HAVE_ARCH_PTE_SAME
static inline int pte_same(pte_t pte_a, pte_t pte_b)
{
@@ -585,11 +1118,14 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
{
return pmd_val(pmd_a) == pmd_val(pmd_b);
}
+#endif
+#ifndef pud_same
static inline int pud_same(pud_t pud_a, pud_t pud_b)
{
return pud_val(pud_a) == pud_val(pud_b);
}
+#define pud_same pud_same
#endif
#ifndef __HAVE_ARCH_P4D_SAME
@@ -606,45 +1142,16 @@ static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b)
}
#endif
-/*
- * Use set_p*_safe(), and elide TLB flushing, when confident that *no*
- * TLB flush will be required as a result of the "set". For example, use
- * in scenarios where it is known ahead of time that the routine is
- * setting non-present entries, or re-setting an existing entry to the
- * same value. Otherwise, use the typical "set" helpers and flush the
- * TLB.
- */
-#define set_pte_safe(ptep, pte) \
-({ \
- WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \
- set_pte(ptep, pte); \
-})
-
-#define set_pmd_safe(pmdp, pmd) \
-({ \
- WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \
- set_pmd(pmdp, pmd); \
-})
-
-#define set_pud_safe(pudp, pud) \
-({ \
- WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \
- set_pud(pudp, pud); \
-})
-
-#define set_p4d_safe(p4dp, p4d) \
-({ \
- WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \
- set_p4d(p4dp, p4d); \
-})
-
-#define set_pgd_safe(pgdp, pgd) \
-({ \
- WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \
- set_pgd(pgdp, pgd); \
-})
-
#ifndef __HAVE_ARCH_DO_SWAP_PAGE
+static inline void arch_do_swap_page_nr(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t pte, pte_t oldpte,
+ int nr)
+{
+
+}
+#else
/*
* Some architectures support metadata associated with a page. When a
* page is being swapped out, this metadata must be saved so it can be
@@ -653,12 +1160,17 @@ static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b)
* page as metadata for the page. arch_do_swap_page() can restore this
* metadata when a page is swapped back in.
*/
-static inline void arch_do_swap_page(struct mm_struct *mm,
- struct vm_area_struct *vma,
- unsigned long addr,
- pte_t pte, pte_t oldpte)
-{
-
+static inline void arch_do_swap_page_nr(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long addr,
+ pte_t pte, pte_t oldpte,
+ int nr)
+{
+ for (int i = 0; i < nr; i++) {
+ arch_do_swap_page(vma->vm_mm, vma, addr + i * PAGE_SIZE,
+ pte_advance_pfn(pte, i),
+ pte_advance_pfn(oldpte, i));
+ }
}
#endif
@@ -686,7 +1198,7 @@ static inline int arch_unmap_one(struct mm_struct *mm,
* prototypes must be defined in the arch-specific asm/pgtable.h file.
*/
#ifndef __HAVE_ARCH_PREPARE_TO_SWAP
-static inline int arch_prepare_to_swap(struct page *page)
+static inline int arch_prepare_to_swap(struct folio *folio)
{
return 0;
}
@@ -703,17 +1215,13 @@ static inline void arch_swap_invalidate_area(int type)
#endif
#ifndef __HAVE_ARCH_SWAP_RESTORE
-static inline void arch_swap_restore(swp_entry_t entry, struct page *page)
+static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
{
}
#endif
-#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
-#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
-#endif
-
#ifndef __HAVE_ARCH_MOVE_PTE
-#define move_pte(pte, prot, old_addr, new_addr) (pte)
+#define move_pte(pte, old_addr, new_addr) (pte)
#endif
#ifndef pte_accessible
@@ -721,7 +1229,11 @@ static inline void arch_swap_restore(swp_entry_t entry, struct page *page)
#endif
#ifndef flush_tlb_fix_spurious_fault
-#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address)
+#define flush_tlb_fix_spurious_fault(vma, address, ptep) flush_tlb_page(vma, address)
+#endif
+
+#ifndef flush_tlb_fix_spurious_fault_pmd
+#define flush_tlb_fix_spurious_fault_pmd(vma, address, pmdp) do { } while (0)
#endif
/*
@@ -868,7 +1380,9 @@ static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
/*
* Commit an update to a pte, leaving any hardware-controlled bits in
- * the PTE unmodified.
+ * the PTE unmodified. The pte returned from ptep_modify_prot_start() may
+ * additionally have young and/or dirty bits set where previously they were not,
+ * so the updated pte may have these additional changes.
*/
static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
unsigned long addr,
@@ -877,6 +1391,102 @@ static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
__ptep_modify_prot_commit(vma, addr, ptep, pte);
}
#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
+
+/**
+ * modify_prot_start_ptes - Start a pte protection read-modify-write transaction
+ * over a batch of ptes, which protects against asynchronous hardware
+ * modifications to the ptes. The intention is not to prevent the hardware from
+ * making pte updates, but to prevent any updates it may make from being lost.
+ * Please see the comment above ptep_modify_prot_start() for full description.
+ *
+ * @vma: The virtual memory area the pages are mapped into.
+ * @addr: Address the first page is mapped at.
+ * @ptep: Page table pointer for the first entry.
+ * @nr: Number of entries.
+ *
+ * May be overridden by the architecture; otherwise, implemented as a simple
+ * loop over ptep_modify_prot_start(), collecting the a/d bits from each pte
+ * in the batch.
+ *
+ * Note that PTE bits in the PTE batch besides the PFN can differ.
+ *
+ * Context: The caller holds the page table lock. The PTEs map consecutive
+ * pages that belong to the same folio. All other PTE bits must be identical for
+ * all PTEs in the batch except for young and dirty bits. The PTEs are all in
+ * the same PMD.
+ */
+#ifndef modify_prot_start_ptes
+static inline pte_t modify_prot_start_ptes(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep, unsigned int nr)
+{
+ pte_t pte, tmp_pte;
+
+ pte = ptep_modify_prot_start(vma, addr, ptep);
+ while (--nr) {
+ ptep++;
+ addr += PAGE_SIZE;
+ tmp_pte = ptep_modify_prot_start(vma, addr, ptep);
+ if (pte_dirty(tmp_pte))
+ pte = pte_mkdirty(pte);
+ if (pte_young(tmp_pte))
+ pte = pte_mkyoung(pte);
+ }
+ return pte;
+}
+#endif
+
+/**
+ * modify_prot_commit_ptes - Commit an update to a batch of ptes, leaving any
+ * hardware-controlled bits in the PTE unmodified.
+ *
+ * @vma: The virtual memory area the pages are mapped into.
+ * @addr: Address the first page is mapped at.
+ * @ptep: Page table pointer for the first entry.
+ * @old_pte: Old page table entry (for the first entry) which is now cleared.
+ * @pte: New page table entry to be set.
+ * @nr: Number of entries.
+ *
+ * May be overridden by the architecture; otherwise, implemented as a simple
+ * loop over ptep_modify_prot_commit().
+ *
+ * Context: The caller holds the page table lock. The PTEs are all in the same
+ * PMD. On exit, the set ptes in the batch map the same folio. The ptes set by
+ * ptep_modify_prot_start() may additionally have young and/or dirty bits set
+ * where previously they were not, so the updated ptes may have these
+ * additional changes.
+ */
+#ifndef modify_prot_commit_ptes
+static inline void modify_prot_commit_ptes(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep, pte_t old_pte, pte_t pte, unsigned int nr)
+{
+ int i;
+
+ for (i = 0; i < nr; ++i, ++ptep, addr += PAGE_SIZE) {
+ ptep_modify_prot_commit(vma, addr, ptep, old_pte, pte);
+
+ /* Advance PFN only, set same prot */
+ old_pte = pte_next_pfn(old_pte);
+ pte = pte_next_pfn(pte);
+ }
+}
+#endif
+
+/*
+ * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
+ * and let generic vmalloc, ioremap and page table update code know when
+ * arch_sync_kernel_mappings() needs to be called.
+ */
+#ifndef ARCH_PAGE_TABLE_SYNC_MASK
+#define ARCH_PAGE_TABLE_SYNC_MASK 0
+#endif
+
+/*
+ * There is no default implementation for arch_sync_kernel_mappings(). It is
+ * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK
+ * is 0.
+ */
+void arch_sync_kernel_mappings(unsigned long start, unsigned long end);
+
#endif /* CONFIG_MMU */
/*
@@ -933,27 +1543,6 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
#endif
/*
- * A facility to provide lazy MMU batching. This allows PTE updates and
- * page invalidations to be delayed until a call to leave lazy MMU mode
- * is issued. Some architectures may benefit from doing this, and it is
- * beneficial for both shadow and direct mode hypervisors, which may batch
- * the PTE updates which happen during this window. Note that using this
- * interface requires that read hazards be removed from the code. A read
- * hazard could result in the direct mode hypervisor case, since the actual
- * write to the page tables may not yet have taken place, so reads though
- * a raw PTE pointer after it has been modified are not guaranteed to be
- * up to date. This mode can only be entered and left under the protection of
- * the page table locks for all page tables which may be modified. In the UP
- * case, this is required so that preemption is disabled, and in the SMP case,
- * it must synchronize the delayed page table writes properly on other CPUs.
- */
-#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
-#define arch_enter_lazy_mmu_mode() do {} while (0)
-#define arch_leave_lazy_mmu_mode() do {} while (0)
-#define arch_flush_lazy_mmu_mode() do {} while (0)
-#endif
-
-/*
* A facility to provide batching of the reload of page tables and
* other process state with the actual context switch code for
* paravirtualized guests. By convention, only one of the batched
@@ -968,6 +1557,18 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
#define arch_start_context_switch(prev) do {} while (0)
#endif
+/*
+ * Some platforms can customize the PTE soft-dirty bit making it unavailable
+ * even if the architecture provides the resource.
+ * Adding this API allows architectures to add their own checks for the
+ * devices on which the kernel is running.
+ * Note: When overriding it, please make sure the CONFIG_MEM_SOFT_DIRTY
+ * is part of this macro.
+ */
+#ifndef pgtable_supports_soft_dirty
+#define pgtable_supports_soft_dirty() IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)
+#endif
+
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
#ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION
static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
@@ -1054,62 +1655,92 @@ static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
* vmf_insert_pfn.
*/
-/*
- * track_pfn_remap is called when a _new_ pfn mapping is being established
- * by remap_pfn_range() for physical range indicated by pfn and size.
- */
-static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
- unsigned long pfn, unsigned long addr,
- unsigned long size)
+static inline int pfnmap_setup_cachemode(unsigned long pfn, unsigned long size,
+ pgprot_t *prot)
{
return 0;
}
-/*
- * track_pfn_insert is called when a _new_ single pfn is established
- * by vmf_insert_pfn().
- */
-static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
- pfn_t pfn)
+static inline int pfnmap_track(unsigned long pfn, unsigned long size,
+ pgprot_t *prot)
{
+ return 0;
}
-/*
- * track_pfn_copy is called when vma that is covering the pfnmap gets
- * copied through copy_page_range().
- */
-static inline int track_pfn_copy(struct vm_area_struct *vma)
+static inline void pfnmap_untrack(unsigned long pfn, unsigned long size)
{
- return 0;
}
+#else
+/**
+ * pfnmap_setup_cachemode - setup the cachemode in the pgprot for a pfn range
+ * @pfn: the start of the pfn range
+ * @size: the size of the pfn range in bytes
+ * @prot: the pgprot to modify
+ *
+ * Lookup the cachemode for the pfn range starting at @pfn with the size
+ * @size and store it in @prot, leaving other data in @prot unchanged.
+ *
+ * This allows for a hardware implementation to have fine-grained control of
+ * memory cache behavior at page level granularity. Without a hardware
+ * implementation, this function does nothing.
+ *
+ * Currently there is only one implementation for this - x86 Page Attribute
+ * Table (PAT). See Documentation/arch/x86/pat.rst for more details.
+ *
+ * This function can fail if the pfn range spans pfns that require differing
+ * cachemodes. If the pfn range was previously verified to have a single
+ * cachemode, it is sufficient to query only a single pfn. The assumption is
+ * that this is the case for drivers using the vmf_insert_pfn*() interface.
+ *
+ * Returns 0 on success and -EINVAL on error.
+ */
+int pfnmap_setup_cachemode(unsigned long pfn, unsigned long size,
+ pgprot_t *prot);
+
+/**
+ * pfnmap_track - track a pfn range
+ * @pfn: the start of the pfn range
+ * @size: the size of the pfn range in bytes
+ * @prot: the pgprot to track
+ *
+ * Requested the pfn range to be 'tracked' by a hardware implementation and
+ * setup the cachemode in @prot similar to pfnmap_setup_cachemode().
+ *
+ * This allows for fine-grained control of memory cache behaviour at page
+ * level granularity. Tracking memory this way is persisted across VMA splits
+ * (VMA merging does not apply for VM_PFNMAP).
+ *
+ * Currently, there is only one implementation for this - x86 Page Attribute
+ * Table (PAT). See Documentation/arch/x86/pat.rst for more details.
+ *
+ * Returns 0 on success and -EINVAL on error.
+ */
+int pfnmap_track(unsigned long pfn, unsigned long size, pgprot_t *prot);
-/*
- * untrack_pfn is called while unmapping a pfnmap for a region.
- * untrack can be called for a specific region indicated by pfn and size or
- * can be for the entire vma (in which case pfn, size are zero).
+/**
+ * pfnmap_untrack - untrack a pfn range
+ * @pfn: the start of the pfn range
+ * @size: the size of the pfn range in bytes
+ *
+ * Untrack a pfn range previously tracked through pfnmap_track().
*/
-static inline void untrack_pfn(struct vm_area_struct *vma,
- unsigned long pfn, unsigned long size)
-{
-}
+void pfnmap_untrack(unsigned long pfn, unsigned long size);
+#endif
-/*
- * untrack_pfn_moved is called while mremapping a pfnmap for a new region.
+/**
+ * pfnmap_setup_cachemode_pfn - setup the cachemode in the pgprot for a pfn
+ * @pfn: the pfn
+ * @prot: the pgprot to modify
+ *
+ * Lookup the cachemode for @pfn and store it in @prot, leaving other
+ * data in @prot unchanged.
+ *
+ * See pfnmap_setup_cachemode() for details.
*/
-static inline void untrack_pfn_moved(struct vm_area_struct *vma)
+static inline void pfnmap_setup_cachemode_pfn(unsigned long pfn, pgprot_t *prot)
{
+ pfnmap_setup_cachemode(pfn, PAGE_SIZE, prot);
}
-#else
-extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
- unsigned long pfn, unsigned long addr,
- unsigned long size);
-extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
- pfn_t pfn);
-extern int track_pfn_copy(struct vm_area_struct *vma);
-extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
- unsigned long size);
-extern void untrack_pfn_moved(struct vm_area_struct *vma);
-#endif
#ifdef CONFIG_MMU
#ifdef __HAVE_COLOR_ZERO_PAGE
@@ -1171,176 +1802,42 @@ static inline int pud_write(pud_t pud)
}
#endif /* pud_write */
-#if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
-static inline int pmd_devmap(pmd_t pmd)
-{
- return 0;
-}
-static inline int pud_devmap(pud_t pud)
-{
- return 0;
-}
-static inline int pgd_devmap(pgd_t pgd)
-{
- return 0;
-}
-#endif
-
#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
- (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
- !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
+ !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
static inline int pud_trans_huge(pud_t pud)
{
return 0;
}
#endif
-/* See pmd_none_or_trans_huge_or_clear_bad for discussion. */
-static inline int pud_none_or_trans_huge_or_dev_or_clear_bad(pud_t *pud)
+static inline int pud_trans_unstable(pud_t *pud)
{
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
+ defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
pud_t pudval = READ_ONCE(*pud);
- if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval))
+ if (pud_none(pudval) || pud_trans_huge(pudval))
return 1;
if (unlikely(pud_bad(pudval))) {
pud_clear_bad(pud);
return 1;
}
- return 0;
-}
-
-/* See pmd_trans_unstable for discussion. */
-static inline int pud_trans_unstable(pud_t *pud)
-{
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
- defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
- return pud_none_or_trans_huge_or_dev_or_clear_bad(pud);
-#else
- return 0;
-#endif
-}
-
-#ifndef pmd_read_atomic
-static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
-{
- /*
- * Depend on compiler for an atomic pmd read. NOTE: this is
- * only going to work, if the pmdval_t isn't larger than
- * an unsigned long.
- */
- return *pmdp;
-}
#endif
-
-#ifndef arch_needs_pgtable_deposit
-#define arch_needs_pgtable_deposit() (false)
-#endif
-/*
- * This function is meant to be used by sites walking pagetables with
- * the mmap_lock held in read mode to protect against MADV_DONTNEED and
- * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd
- * into a null pmd and the transhuge page fault can convert a null pmd
- * into an hugepmd or into a regular pmd (if the hugepage allocation
- * fails). While holding the mmap_lock in read mode the pmd becomes
- * stable and stops changing under us only if it's not null and not a
- * transhuge pmd. When those races occurs and this function makes a
- * difference vs the standard pmd_none_or_clear_bad, the result is
- * undefined so behaving like if the pmd was none is safe (because it
- * can return none anyway). The compiler level barrier() is critically
- * important to compute the two checks atomically on the same pmdval.
- *
- * For 32bit kernels with a 64bit large pmd_t this automatically takes
- * care of reading the pmd atomically to avoid SMP race conditions
- * against pmd_populate() when the mmap_lock is hold for reading by the
- * caller (a special atomic read not done by "gcc" as in the generic
- * version above, is also needed when THP is disabled because the page
- * fault can populate the pmd from under us).
- */
-static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
-{
- pmd_t pmdval = pmd_read_atomic(pmd);
- /*
- * The barrier will stabilize the pmdval in a register or on
- * the stack so that it will stop changing under the code.
- *
- * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE,
- * pmd_read_atomic is allowed to return a not atomic pmdval
- * (for example pointing to an hugepage that has never been
- * mapped in the pmd). The below checks will only care about
- * the low part of the pmd with 32bit PAE x86 anyway, with the
- * exception of pmd_none(). So the important thing is that if
- * the low part of the pmd is found null, the high part will
- * be also null or the pmd_none() check below would be
- * confused.
- */
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- barrier();
-#endif
- /*
- * !pmd_present() checks for pmd migration entries
- *
- * The complete check uses is_pmd_migration_entry() in linux/swapops.h
- * But using that requires moving current function and pmd_trans_unstable()
- * to linux/swapops.h to resolve dependency, which is too much code move.
- *
- * !pmd_present() is equivalent to is_pmd_migration_entry() currently,
- * because !pmd_present() pages can only be under migration not swapped
- * out.
- *
- * pmd_none() is preserved for future condition checks on pmd migration
- * entries and not confusing with this function name, although it is
- * redundant with !pmd_present().
- */
- if (pmd_none(pmdval) || pmd_trans_huge(pmdval) ||
- (IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION) && !pmd_present(pmdval)))
- return 1;
- if (unlikely(pmd_bad(pmdval))) {
- pmd_clear_bad(pmd);
- return 1;
- }
- return 0;
-}
-
-/*
- * This is a noop if Transparent Hugepage Support is not built into
- * the kernel. Otherwise it is equivalent to
- * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in
- * places that already verified the pmd is not none and they want to
- * walk ptes while holding the mmap sem in read mode (write mode don't
- * need this). If THP is not enabled, the pmd can't go away under the
- * code even if MADV_DONTNEED runs, but if THP is enabled we need to
- * run a pmd_trans_unstable before walking the ptes after
- * split_huge_pmd returns (because it may have run when the pmd become
- * null, but then a page fault can map in a THP and not a regular page).
- */
-static inline int pmd_trans_unstable(pmd_t *pmd)
-{
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- return pmd_none_or_trans_huge_or_clear_bad(pmd);
-#else
return 0;
-#endif
-}
-
-/*
- * the ordering of these checks is important for pmds with _page_devmap set.
- * if we check pmd_trans_unstable() first we will trip the bad_pmd() check
- * inside of pmd_none_or_trans_huge_or_clear_bad(). this will end up correctly
- * returning 1 but not before it spams dmesg with the pmd_clear_bad() output.
- */
-static inline int pmd_devmap_trans_unstable(pmd_t *pmd)
-{
- return pmd_devmap(*pmd) || pmd_trans_unstable(pmd);
}
#ifndef CONFIG_NUMA_BALANCING
/*
- * Technically a PTE can be PROTNONE even when not doing NUMA balancing but
- * the only case the kernel cares is for NUMA balancing and is only ever set
- * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked
- * _PAGE_PROTNONE so by default, implement the helper as "always no". It
- * is the responsibility of the caller to distinguish between PROT_NONE
- * protections and NUMA hinting fault protections.
+ * In an inaccessible (PROT_NONE) VMA, pte_protnone() may indicate "yes". It is
+ * perfectly valid to indicate "no" in that case, which is why our default
+ * implementation defaults to "always no".
+ *
+ * In an accessible VMA, however, pte_protnone() reliably indicates PROT_NONE
+ * page protection due to NUMA hinting. NUMA hinting faults only apply in
+ * accessible VMAs.
+ *
+ * So, to reliably identify PROT_NONE PTEs that require a NUMA hinting fault,
+ * looking at the VMA accessibility is sufficient.
*/
static inline int pte_protnone(pte_t pte)
{
@@ -1359,16 +1856,13 @@ static inline int pmd_protnone(pmd_t pmd)
#ifndef __PAGETABLE_P4D_FOLDED
int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot);
-int p4d_clear_huge(p4d_t *p4d);
+void p4d_clear_huge(p4d_t *p4d);
#else
static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
{
return 0;
}
-static inline int p4d_clear_huge(p4d_t *p4d)
-{
- return 0;
-}
+static inline void p4d_clear_huge(p4d_t *p4d) { }
#endif /* !__PAGETABLE_P4D_FOLDED */
int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
@@ -1391,10 +1885,7 @@ static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
{
return 0;
}
-static inline int p4d_clear_huge(p4d_t *p4d)
-{
- return 0;
-}
+static inline void p4d_clear_huge(p4d_t *p4d) { }
static inline int pud_clear_huge(pud_t *pud)
{
return 0;
@@ -1479,10 +1970,11 @@ static inline bool arch_has_pfn_modify_check(void)
/*
* Page Table Modification bits for pgtbl_mod_mask.
*
- * These are used by the p?d_alloc_track*() set of functions an in the generic
- * vmalloc/ioremap code to track at which page-table levels entries have been
- * modified. Based on that the code can better decide when vmalloc and ioremap
- * mapping changes need to be synchronized to other page-tables in the system.
+ * These are used by the p?d_alloc_track*() and p*d_populate_kernel()
+ * functions in the generic vmalloc, ioremap and page table update code
+ * to track at which page-table levels entries have been modified.
+ * Based on that the code can better decide when page table changes need
+ * to be synchronized to other page-tables in the system.
*/
#define __PGTBL_PGD_MODIFIED 0
#define __PGTBL_P4D_MODIFIED 1
@@ -1499,6 +1991,32 @@ static inline bool arch_has_pfn_modify_check(void)
/* Page-Table Modification Mask */
typedef unsigned int pgtbl_mod_mask;
+enum pgtable_level {
+ PGTABLE_LEVEL_PTE = 0,
+ PGTABLE_LEVEL_PMD,
+ PGTABLE_LEVEL_PUD,
+ PGTABLE_LEVEL_P4D,
+ PGTABLE_LEVEL_PGD,
+};
+
+static inline const char *pgtable_level_to_str(enum pgtable_level level)
+{
+ switch (level) {
+ case PGTABLE_LEVEL_PTE:
+ return "pte";
+ case PGTABLE_LEVEL_PMD:
+ return "pmd";
+ case PGTABLE_LEVEL_PUD:
+ return "pud";
+ case PGTABLE_LEVEL_P4D:
+ return "p4d";
+ case PGTABLE_LEVEL_PGD:
+ return "pgd";
+ default:
+ return "unknown";
+ }
+}
+
#endif /* !__ASSEMBLY__ */
#if !defined(MAX_POSSIBLE_PHYSMEM_BITS) && !defined(CONFIG_64BIT)
@@ -1515,13 +2033,12 @@ typedef unsigned int pgtbl_mod_mask;
#endif
#ifndef has_transparent_hugepage
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#define has_transparent_hugepage() 1
-#else
-#define has_transparent_hugepage() 0
-#endif
+#define has_transparent_hugepage() IS_BUILTIN(CONFIG_TRANSPARENT_HUGEPAGE)
#endif
+#ifndef has_transparent_pud_hugepage
+#define has_transparent_pud_hugepage() IS_BUILTIN(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
+#endif
/*
* On some architectures it depends on the mm if the p4d/pud or pmd
* layer of the page table hierarchy is folded or not.
@@ -1549,23 +2066,37 @@ typedef unsigned int pgtbl_mod_mask;
#endif
/*
- * p?d_leaf() - true if this entry is a final mapping to a physical address.
- * This differs from p?d_huge() by the fact that they are always available (if
- * the architecture supports large pages at the appropriate level) even
- * if CONFIG_HUGETLB_PAGE is not defined.
- * Only meaningful when called on a valid entry.
+ * pXd_leaf() is the API to check whether a pgtable entry is a huge page
+ * mapping. It should work globally across all archs, without any
+ * dependency on CONFIG_* options. For architectures that do not support
+ * huge mappings on specific levels, below fallbacks will be used.
+ *
+ * A leaf pgtable entry should always imply the following:
+ *
+ * - It is a "present" entry. IOW, before using this API, please check it
+ * with pXd_present() first. NOTE: it may not always mean the "present
+ * bit" is set. For example, PROT_NONE entries are always "present".
+ *
+ * - It should _never_ be a swap entry of any type. Above "present" check
+ * should have guarded this, but let's be crystal clear on this.
+ *
+ * - It should contain a huge PFN, which points to a huge page larger than
+ * PAGE_SIZE of the platform. The PFN format isn't important here.
+ *
+ * - It should cover all kinds of huge mappings (i.e. pXd_trans_huge()
+ * or hugetlb mappings).
*/
#ifndef pgd_leaf
-#define pgd_leaf(x) 0
+#define pgd_leaf(x) false
#endif
#ifndef p4d_leaf
-#define p4d_leaf(x) 0
+#define p4d_leaf(x) false
#endif
#ifndef pud_leaf
-#define pud_leaf(x) 0
+#define pud_leaf(x) false
#endif
#ifndef pmd_leaf
-#define pmd_leaf(x) 0
+#define pmd_leaf(x) false
#endif
#ifndef pgd_leaf_size
@@ -1580,8 +2111,87 @@ typedef unsigned int pgtbl_mod_mask;
#ifndef pmd_leaf_size
#define pmd_leaf_size(x) PMD_SIZE
#endif
+#ifndef __pte_leaf_size
#ifndef pte_leaf_size
#define pte_leaf_size(x) PAGE_SIZE
#endif
+#define __pte_leaf_size(x,y) pte_leaf_size(y)
+#endif
+
+/*
+ * We always define pmd_pfn for all archs as it's used in lots of generic
+ * code. Now it happens too for pud_pfn (and can happen for larger
+ * mappings too in the future; we're not there yet). Instead of defining
+ * it for all archs (like pmd_pfn), provide a fallback.
+ *
+ * Note that returning 0 here means any arch that didn't define this can
+ * get severely wrong when it hits a real pud leaf. It's arch's
+ * responsibility to properly define it when a huge pud is possible.
+ */
+#ifndef pud_pfn
+#define pud_pfn(x) 0
+#endif
+
+/*
+ * Some architectures have MMUs that are configurable or selectable at boot
+ * time. These lead to variable PTRS_PER_x. For statically allocated arrays it
+ * helps to have a static maximum value.
+ */
+
+#ifndef MAX_PTRS_PER_PTE
+#define MAX_PTRS_PER_PTE PTRS_PER_PTE
+#endif
+
+#ifndef MAX_PTRS_PER_PMD
+#define MAX_PTRS_PER_PMD PTRS_PER_PMD
+#endif
+
+#ifndef MAX_PTRS_PER_PUD
+#define MAX_PTRS_PER_PUD PTRS_PER_PUD
+#endif
+
+#ifndef MAX_PTRS_PER_P4D
+#define MAX_PTRS_PER_P4D PTRS_PER_P4D
+#endif
+
+#ifndef pte_pgprot
+#define pte_pgprot(x) ((pgprot_t) {0})
+#endif
+
+#ifndef pmd_pgprot
+#define pmd_pgprot(x) ((pgprot_t) {0})
+#endif
+
+#ifndef pud_pgprot
+#define pud_pgprot(x) ((pgprot_t) {0})
+#endif
+
+/* description of effects of mapping type and prot in current implementation.
+ * this is due to the limited x86 page protection hardware. The expected
+ * behavior is in parens:
+ *
+ * map_type prot
+ * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC
+ * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes
+ * w: (no) no w: (no) no w: (yes) yes w: (no) no
+ * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
+ *
+ * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
+ * w: (no) no w: (no) no w: (copy) copy w: (no) no
+ * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
+ *
+ * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and
+ * MAP_PRIVATE (with Enhanced PAN supported):
+ * r: (no) no
+ * w: (no) no
+ * x: (yes) yes
+ */
+#define DECLARE_VM_GET_PAGE_PROT \
+pgprot_t vm_get_page_prot(vm_flags_t vm_flags) \
+{ \
+ return protection_map[vm_flags & \
+ (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)]; \
+} \
+EXPORT_SYMBOL(vm_get_page_prot);
#endif /* _LINUX_PGTABLE_H */
diff --git a/include/linux/pgtable_api.h b/include/linux/pgtable_api.h
new file mode 100644
index 000000000000..ff367a4ba8c4
--- /dev/null
+++ b/include/linux/pgtable_api.h
@@ -0,0 +1 @@
+#include <linux/pgtable.h>
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 60d2b26026a2..fbbe028cc4b7 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -14,6 +14,7 @@
#include <linux/compiler.h>
#include <linux/spinlock.h>
#include <linux/ethtool.h>
+#include <linux/leds.h>
#include <linux/linkmode.h>
#include <linux/netlink.h>
#include <linux/mdio.h>
@@ -29,45 +30,27 @@
#include <linux/refcount.h>
#include <linux/atomic.h>
-
-#define PHY_DEFAULT_FEATURES (SUPPORTED_Autoneg | \
- SUPPORTED_TP | \
- SUPPORTED_MII)
-
-#define PHY_10BT_FEATURES (SUPPORTED_10baseT_Half | \
- SUPPORTED_10baseT_Full)
-
-#define PHY_100BT_FEATURES (SUPPORTED_100baseT_Half | \
- SUPPORTED_100baseT_Full)
-
-#define PHY_1000BT_FEATURES (SUPPORTED_1000baseT_Half | \
- SUPPORTED_1000baseT_Full)
+#include <net/eee.h>
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1s_p2mp_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init;
-extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init;
extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init;
-extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init;
-extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap1_features) __ro_after_init;
+extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap2_features) __ro_after_init;
#define PHY_BASIC_FEATURES ((unsigned long *)&phy_basic_features)
#define PHY_BASIC_T1_FEATURES ((unsigned long *)&phy_basic_t1_features)
+#define PHY_BASIC_T1S_P2MP_FEATURES ((unsigned long *)&phy_basic_t1s_p2mp_features)
#define PHY_GBIT_FEATURES ((unsigned long *)&phy_gbit_features)
#define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features)
-#define PHY_GBIT_ALL_PORTS_FEATURES ((unsigned long *)&phy_gbit_all_ports_features)
#define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features)
-#define PHY_10GBIT_FEC_FEATURES ((unsigned long *)&phy_10gbit_fec_features)
-#define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features)
+#define PHY_EEE_CAP1_FEATURES ((unsigned long *)&phy_eee_cap1_features)
+#define PHY_EEE_CAP2_FEATURES ((unsigned long *)&phy_eee_cap2_features)
extern const int phy_basic_ports_array[3];
-extern const int phy_fibre_port_array[1];
-extern const int phy_all_ports_features_array[7];
-extern const int phy_10_100_features_array[4];
-extern const int phy_basic_t1_features_array[2];
-extern const int phy_gbit_features_array[2];
-extern const int phy_10gbit_features_array[1];
/*
* Set phydev->irq to PHY_POLL if interrupts are not supported,
@@ -80,6 +63,7 @@ extern const int phy_10gbit_features_array[1];
#define PHY_IS_INTERNAL 0x00000001
#define PHY_RST_AFTER_CLK_EN 0x00000002
#define PHY_POLL_CABLE_TEST 0x00000004
+#define PHY_ALWAYS_CALL_SUSPEND 0x00000008
#define MDIO_DEVICE_IS_PHY 0x80000000
/**
@@ -87,21 +71,23 @@ extern const int phy_10gbit_features_array[1];
*
* @PHY_INTERFACE_MODE_NA: Not Applicable - don't touch
* @PHY_INTERFACE_MODE_INTERNAL: No interface, MAC and PHY combined
- * @PHY_INTERFACE_MODE_MII: Median-independent interface
- * @PHY_INTERFACE_MODE_GMII: Gigabit median-independent interface
+ * @PHY_INTERFACE_MODE_MII: Media-independent interface
+ * @PHY_INTERFACE_MODE_GMII: Gigabit media-independent interface
* @PHY_INTERFACE_MODE_SGMII: Serial gigabit media-independent interface
* @PHY_INTERFACE_MODE_TBI: Ten Bit Interface
* @PHY_INTERFACE_MODE_REVMII: Reverse Media Independent Interface
* @PHY_INTERFACE_MODE_RMII: Reduced Media Independent Interface
+ * @PHY_INTERFACE_MODE_REVRMII: Reduced Media Independent Interface in PHY role
* @PHY_INTERFACE_MODE_RGMII: Reduced gigabit media-independent interface
* @PHY_INTERFACE_MODE_RGMII_ID: RGMII with Internal RX+TX delay
* @PHY_INTERFACE_MODE_RGMII_RXID: RGMII with Internal RX delay
- * @PHY_INTERFACE_MODE_RGMII_TXID: RGMII with Internal RX delay
+ * @PHY_INTERFACE_MODE_RGMII_TXID: RGMII with Internal TX delay
* @PHY_INTERFACE_MODE_RTBI: Reduced TBI
- * @PHY_INTERFACE_MODE_SMII: ??? MII
+ * @PHY_INTERFACE_MODE_SMII: Serial MII
* @PHY_INTERFACE_MODE_XGMII: 10 gigabit media-independent interface
* @PHY_INTERFACE_MODE_XLGMII:40 gigabit media-independent interface
* @PHY_INTERFACE_MODE_MOCA: Multimedia over Coax
+ * @PHY_INTERFACE_MODE_PSGMII: Penta SGMII
* @PHY_INTERFACE_MODE_QSGMII: Quad SGMII
* @PHY_INTERFACE_MODE_TRGMII: Turbo RGMII
* @PHY_INTERFACE_MODE_100BASEX: 100 BaseX
@@ -111,8 +97,16 @@ extern const int phy_10gbit_features_array[1];
* @PHY_INTERFACE_MODE_RXAUI: Reduced XAUI
* @PHY_INTERFACE_MODE_XAUI: 10 Gigabit Attachment Unit Interface
* @PHY_INTERFACE_MODE_10GBASER: 10G BaseR
+ * @PHY_INTERFACE_MODE_25GBASER: 25G BaseR
* @PHY_INTERFACE_MODE_USXGMII: Universal Serial 10GE MII
* @PHY_INTERFACE_MODE_10GKR: 10GBASE-KR - with Clause 73 AN
+ * @PHY_INTERFACE_MODE_QUSGMII: Quad Universal SGMII
+ * @PHY_INTERFACE_MODE_1000BASEKX: 1000Base-KX - with Clause 73 AN
+ * @PHY_INTERFACE_MODE_10G_QXGMII: 10G-QXGMII - 4 ports over 10G USXGMII
+ * @PHY_INTERFACE_MODE_50GBASER: 50GBase-R - with Clause 134 FEC
+ * @PHY_INTERFACE_MODE_LAUI: 50 Gigabit Attachment Unit Interface
+ * @PHY_INTERFACE_MODE_100GBASEP: 100GBase-P - with Clause 134 FEC
+ * @PHY_INTERFACE_MODE_MIILITE: MII-Lite - MII without RXER TXER CRS COL
* @PHY_INTERFACE_MODE_MAX: Book keeping
*
* Describes the interface between the MAC and PHY.
@@ -126,6 +120,7 @@ typedef enum {
PHY_INTERFACE_MODE_TBI,
PHY_INTERFACE_MODE_REVMII,
PHY_INTERFACE_MODE_RMII,
+ PHY_INTERFACE_MODE_REVRMII,
PHY_INTERFACE_MODE_RGMII,
PHY_INTERFACE_MODE_RGMII_ID,
PHY_INTERFACE_MODE_RGMII_RXID,
@@ -135,6 +130,7 @@ typedef enum {
PHY_INTERFACE_MODE_XGMII,
PHY_INTERFACE_MODE_XLGMII,
PHY_INTERFACE_MODE_MOCA,
+ PHY_INTERFACE_MODE_PSGMII,
PHY_INTERFACE_MODE_QSGMII,
PHY_INTERFACE_MODE_TRGMII,
PHY_INTERFACE_MODE_100BASEX,
@@ -145,18 +141,63 @@ typedef enum {
PHY_INTERFACE_MODE_XAUI,
/* 10GBASE-R, XFI, SFI - single lane 10G Serdes */
PHY_INTERFACE_MODE_10GBASER,
+ PHY_INTERFACE_MODE_25GBASER,
PHY_INTERFACE_MODE_USXGMII,
/* 10GBASE-KR - with Clause 73 AN */
PHY_INTERFACE_MODE_10GKR,
+ PHY_INTERFACE_MODE_QUSGMII,
+ PHY_INTERFACE_MODE_1000BASEKX,
+ PHY_INTERFACE_MODE_10G_QXGMII,
+ PHY_INTERFACE_MODE_50GBASER,
+ PHY_INTERFACE_MODE_LAUI,
+ PHY_INTERFACE_MODE_100GBASEP,
+ PHY_INTERFACE_MODE_MIILITE,
PHY_INTERFACE_MODE_MAX,
} phy_interface_t;
-/*
- * phy_supported_speeds - return all speeds currently supported by a PHY device
- */
-unsigned int phy_supported_speeds(struct phy_device *phy,
- unsigned int *speeds,
- unsigned int size);
+/* PHY interface mode bitmap handling */
+#define DECLARE_PHY_INTERFACE_MASK(name) \
+ DECLARE_BITMAP(name, PHY_INTERFACE_MODE_MAX)
+
+static inline void phy_interface_zero(unsigned long *intf)
+{
+ bitmap_zero(intf, PHY_INTERFACE_MODE_MAX);
+}
+
+static inline bool phy_interface_empty(const unsigned long *intf)
+{
+ return bitmap_empty(intf, PHY_INTERFACE_MODE_MAX);
+}
+
+static inline void phy_interface_copy(unsigned long *d, const unsigned long *s)
+{
+ bitmap_copy(d, s, PHY_INTERFACE_MODE_MAX);
+}
+
+static inline unsigned int phy_interface_weight(const unsigned long *intf)
+{
+ return bitmap_weight(intf, PHY_INTERFACE_MODE_MAX);
+}
+
+static inline void phy_interface_and(unsigned long *dst, const unsigned long *a,
+ const unsigned long *b)
+{
+ bitmap_and(dst, a, b, PHY_INTERFACE_MODE_MAX);
+}
+
+static inline void phy_interface_or(unsigned long *dst, const unsigned long *a,
+ const unsigned long *b)
+{
+ bitmap_or(dst, a, b, PHY_INTERFACE_MODE_MAX);
+}
+
+static inline void phy_interface_set_rgmii(unsigned long *intf)
+{
+ __set_bit(PHY_INTERFACE_MODE_RGMII, intf);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_ID, intf);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_RXID, intf);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_TXID, intf);
+}
/**
* phy_modes - map phy_interface_t enum to device tree binding of phy-mode
@@ -185,6 +226,8 @@ static inline const char *phy_modes(phy_interface_t interface)
return "rev-mii";
case PHY_INTERFACE_MODE_RMII:
return "rmii";
+ case PHY_INTERFACE_MODE_REVRMII:
+ return "rev-rmii";
case PHY_INTERFACE_MODE_RGMII:
return "rgmii";
case PHY_INTERFACE_MODE_RGMII_ID:
@@ -203,12 +246,16 @@ static inline const char *phy_modes(phy_interface_t interface)
return "xlgmii";
case PHY_INTERFACE_MODE_MOCA:
return "moca";
+ case PHY_INTERFACE_MODE_PSGMII:
+ return "psgmii";
case PHY_INTERFACE_MODE_QSGMII:
return "qsgmii";
case PHY_INTERFACE_MODE_TRGMII:
return "trgmii";
case PHY_INTERFACE_MODE_1000BASEX:
return "1000base-x";
+ case PHY_INTERFACE_MODE_1000BASEKX:
+ return "1000base-kx";
case PHY_INTERFACE_MODE_2500BASEX:
return "2500base-x";
case PHY_INTERFACE_MODE_5GBASER:
@@ -219,29 +266,66 @@ static inline const char *phy_modes(phy_interface_t interface)
return "xaui";
case PHY_INTERFACE_MODE_10GBASER:
return "10gbase-r";
+ case PHY_INTERFACE_MODE_25GBASER:
+ return "25gbase-r";
case PHY_INTERFACE_MODE_USXGMII:
return "usxgmii";
case PHY_INTERFACE_MODE_10GKR:
return "10gbase-kr";
case PHY_INTERFACE_MODE_100BASEX:
return "100base-x";
+ case PHY_INTERFACE_MODE_QUSGMII:
+ return "qusgmii";
+ case PHY_INTERFACE_MODE_10G_QXGMII:
+ return "10g-qxgmii";
+ case PHY_INTERFACE_MODE_50GBASER:
+ return "50gbase-r";
+ case PHY_INTERFACE_MODE_LAUI:
+ return "laui";
+ case PHY_INTERFACE_MODE_100GBASEP:
+ return "100gbase-p";
+ case PHY_INTERFACE_MODE_MIILITE:
+ return "mii-lite";
default:
return "unknown";
}
}
-
-#define PHY_INIT_TIMEOUT 100000
-#define PHY_FORCE_TIMEOUT 10
+/**
+ * rgmii_clock - map link speed to the clock rate
+ * @speed: link speed value
+ *
+ * Description: maps RGMII supported link speeds into the clock rates.
+ * This can also be used for MII, GMII, and RMII interface modes as the
+ * clock rates are identical, but the caller must be aware that errors
+ * for unsupported clock rates will not be signalled.
+ *
+ * Returns: clock rate or negative errno
+ */
+static inline long rgmii_clock(int speed)
+{
+ switch (speed) {
+ case SPEED_10:
+ return 2500000;
+ case SPEED_100:
+ return 25000000;
+ case SPEED_1000:
+ return 125000000;
+ default:
+ return -EINVAL;
+ }
+}
#define PHY_MAX_ADDR 32
/* Used when trying to connect to a specific phy (mii bus id:phy device id) */
#define PHY_ID_FMT "%s:%02x"
+#define PHY_ID_SIZE (MII_BUS_ID_SIZE + 3)
#define MII_BUS_ID_SIZE 61
struct device;
+struct kernel_hwtstamp_config;
struct phylink;
struct sfp_bus;
struct sfp_upstream_ops;
@@ -265,37 +349,6 @@ struct mdio_bus_stats {
};
/**
- * struct phy_package_shared - Shared information in PHY packages
- * @addr: Common PHY address used to combine PHYs in one package
- * @refcnt: Number of PHYs connected to this shared data
- * @flags: Initialization of PHY package
- * @priv_size: Size of the shared private data @priv
- * @priv: Driver private data shared across a PHY package
- *
- * Represents a shared structure between different phydev's in the same
- * package, for example a quad PHY. See phy_package_join() and
- * phy_package_leave().
- */
-struct phy_package_shared {
- int addr;
- refcount_t refcnt;
- unsigned long flags;
- size_t priv_size;
-
- /* private data pointer */
- /* note that this pointer is shared between different phydevs and
- * the user has to take care of appropriate locking. It is allocated
- * and freed automatically by phy_package_join() and
- * phy_package_leave().
- */
- void *priv;
-};
-
-/* used as bit number in atomic bitops */
-#define PHY_SHARED_F_INIT_DONE 0
-#define PHY_SHARED_F_PROBE_DONE 1
-
-/**
* struct mii_bus - Represents an MDIO bus
*
* @owner: Who owns this device
@@ -315,6 +368,11 @@ struct mii_bus {
int (*read)(struct mii_bus *bus, int addr, int regnum);
/** @write: Perform a write transfer on the bus */
int (*write)(struct mii_bus *bus, int addr, int regnum, u16 val);
+ /** @read_c45: Perform a C45 read transfer on the bus */
+ int (*read_c45)(struct mii_bus *bus, int addr, int devnum, int regnum);
+ /** @write_c45: Perform a C45 write transfer on the bus */
+ int (*write_c45)(struct mii_bus *bus, int addr, int devnum,
+ int regnum, u16 val);
/** @reset: Perform a reset of the bus */
int (*reset)(struct mii_bus *bus);
@@ -362,19 +420,13 @@ struct mii_bus {
/** @reset_gpiod: Reset GPIO descriptor pointer */
struct gpio_desc *reset_gpiod;
- /** @probe_capabilities: bus capabilities, used for probing */
- enum {
- MDIOBUS_NO_CAP = 0,
- MDIOBUS_C22,
- MDIOBUS_C45,
- MDIOBUS_C22_C45,
- } probe_capabilities;
-
/** @shared_lock: protect access to the shared element */
struct mutex shared_lock;
+#if IS_ENABLED(CONFIG_PHY_PACKAGE)
/** @shared: shared state across different PHYs */
struct phy_package_shared *shared[PHY_MAX_ADDR];
+#endif
};
#define to_mii_bus(d) container_of(d, struct mii_bus, dev)
@@ -407,7 +459,7 @@ static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev)
}
struct mii_bus *mdio_find_bus(const char *mdio_name);
-struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
+struct phy_device *mdiobus_scan_c22(struct mii_bus *bus, int addr);
#define PHY_INTERRUPT_DISABLED false
#define PHY_INTERRUPT_ENABLED true
@@ -444,14 +496,17 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
* Once complete, move to UP to restart the PHY.
* - phy_stop aborts the running test and moves to @PHY_HALTED
*
- * @PHY_HALTED: PHY is up, but no polling or interrupts are done. Or
- * PHY is in an error state.
+ * @PHY_HALTED: PHY is up, but no polling or interrupts are done.
* - phy_start moves to @PHY_UP
+ *
+ * @PHY_ERROR: PHY is up, but is in an error state.
+ * - phy_stop moves to @PHY_HALTED
*/
enum phy_state {
PHY_DOWN = 0,
PHY_READY,
PHY_HALTED,
+ PHY_ERROR,
PHY_UP,
PHY_RUNNING,
PHY_NOLINK,
@@ -464,7 +519,7 @@ enum phy_state {
* struct phy_c45_device_ids - 802.3-c45 Device Identifiers
* @devices_in_package: IEEE 802.3 devices in package register value.
* @mmds_present: bit vector of MMDs present.
- * @device_ids: The device identifer for each present device.
+ * @device_ids: The device identifier for each present device.
*/
struct phy_c45_device_ids {
u32 devices_in_package;
@@ -476,10 +531,39 @@ struct macsec_context;
struct macsec_ops;
/**
+ * struct phy_oatc14_sqi_capability - SQI capability information for OATC14
+ * 10Base-T1S PHY
+ * @updated: Indicates whether the SQI capability fields have been updated.
+ * @sqi_max: Maximum supported Signal Quality Indicator (SQI) level reported by
+ * the PHY.
+ * @sqiplus_bits: Bits for SQI+ levels supported by the PHY.
+ * 0 - SQI+ is not supported
+ * 3 - SQI+ is supported, using 3 bits (8 levels)
+ * 4 - SQI+ is supported, using 4 bits (16 levels)
+ * 5 - SQI+ is supported, using 5 bits (32 levels)
+ * 6 - SQI+ is supported, using 6 bits (64 levels)
+ * 7 - SQI+ is supported, using 7 bits (128 levels)
+ * 8 - SQI+ is supported, using 8 bits (256 levels)
+ *
+ * This structure is used by the OATC14 10Base-T1S PHY driver to store the SQI
+ * and SQI+ capability information retrieved from the PHY.
+ */
+struct phy_oatc14_sqi_capability {
+ bool updated;
+ int sqi_max;
+ u8 sqiplus_bits;
+};
+
+/**
* struct phy_device - An instance of a PHY
*
* @mdio: MDIO bus this PHY is on
* @drv: Pointer to the driver for this PHY instance
+ * @devlink: Create a link between phy dev and mac dev, if the external phy
+ * used by current mac interface is managed by another mac interface.
+ * @phyindex: Unique id across the phy's parent tree of phys to address the PHY
+ * from userspace, similar to ifindex. A zero index means the PHY
+ * wasn't assigned an id yet.
* @phy_id: UID for this device found during discovery
* @c45_ids: 802.3-c45 Device Identifiers if is_c45.
* @is_c45: Set to true if this PHY uses clause 45 addressing.
@@ -494,10 +578,18 @@ struct macsec_ops;
* @downshifted_rate: Set true if link speed has been downshifted.
* @is_on_sfp_module: Set true if PHY is located on an SFP module.
* @mac_managed_pm: Set true if MAC driver takes of suspending/resuming PHY
+ * @wol_enabled: Set to true if the PHY or the attached MAC have Wake-on-LAN
+ * enabled.
+ * @is_genphy_driven: PHY is driven by one of the generic PHY drivers
* @state: State of the PHY for management purposes
* @dev_flags: Device-specific flags used by the PHY driver.
+ *
+ * - Bits [15:0] are free to use by the PHY driver to communicate
+ * driver specific behavior.
+ * - Bits [23:16] are currently reserved for future use.
+ * - Bits [31:24] are reserved for defining generic
+ * PHY driver behavior.
* @irq: IRQ number of the PHY's interrupt (-1 if none)
- * @phy_timer: The timer for handling the state machine
* @phylink: Pointer to phylink instance for this PHY
* @sfp_bus_attached: Flag indicating whether the SFP bus has been attached
* @sfp_bus: SFP bus attached to this PHY's fiber port
@@ -515,15 +607,31 @@ struct macsec_ops;
* @supported: Combined MAC/PHY supported linkmodes
* @advertising: Currently advertised linkmodes
* @adv_old: Saved advertised while power saving for WoL
+ * @supported_eee: supported PHY EEE linkmodes
+ * @advertising_eee: Currently advertised EEE linkmodes
+ * @enable_tx_lpi: When True, MAC should transmit LPI to PHY
+ * @eee_active: phylib private state, indicating that EEE has been negotiated
+ * @eee_cfg: User configuration of EEE
* @lp_advertising: Current link partner advertised linkmodes
- * @eee_broken_modes: Energy efficient ethernet modes which should be prohibited
+ * @host_interfaces: PHY interface modes supported by host
+ * @eee_disabled_modes: Energy efficient ethernet modes not to be advertised
* @autoneg: Flag autoneg being used
+ * @rate_matching: Current rate matching mode
* @link: Current link state
* @autoneg_complete: Flag auto negotiation of the link has completed
* @mdix: Current crossover
* @mdix_ctrl: User setting of crossover
+ * @pma_extable: Cached value of PMA/PMD Extended Abilities Register
* @interrupts: Flag interrupts have been enabled
+ * @irq_suspended: Flag indicating PHY is suspended and therefore interrupt
+ * handling shall be postponed until PHY has resumed
+ * @irq_rerun: Flag indicating interrupts occurred while PHY was suspended,
+ * requiring a rerun of the interrupt handler after resume
+ * @default_timestamp: Flag indicating whether we are using the phy
+ * timestamp as the default one
* @interface: enum phy_interface_t value
+ * @possible_interfaces: bitmap if interface modes that the attached PHY
+ * will switch between depending on media speed.
* @skb: Netlink message for cable diagnostics
* @nest: Netlink nest used for cable diagnostics
* @ehdr: nNtlink header for cable diagnostics
@@ -531,14 +639,18 @@ struct macsec_ops;
* @phy_num_led_triggers: Number of triggers in @phy_led_triggers
* @led_link_trigger: LED trigger for link up/down
* @last_triggered: last LED trigger for link speed
+ * @leds: list of PHY LED structures
* @master_slave_set: User requested master/slave configuration
* @master_slave_get: Current master/slave advertisement
* @master_slave_state: Current master/slave configuration
* @mii_ts: Pointer to time stamper callbacks
+ * @psec: Pointer to Power Sourcing Equipment control struct
* @lock: Mutex for serialization access to PHY
* @state_queue: Work queue for state machine
+ * @link_down_events: Number of times link was lost
* @shared: Pointer to private data shared by phys in one package
* @priv: Pointer to driver private data
+ * @oatc14_sqi_capability: SQI capability information for OATC14 10Base-T1S PHY
*
* interrupts currently only supports enabled or disabled,
* but could be changed in the future to support enabling
@@ -552,8 +664,11 @@ struct phy_device {
/* Information about the PHY type */
/* And management functions */
- struct phy_driver *drv;
+ const struct phy_driver *drv;
+ struct device_link *devlink;
+
+ u32 phyindex;
u32 phy_id;
struct phy_c45_device_ids c45_ids;
@@ -569,20 +684,31 @@ struct phy_device {
unsigned downshifted_rate:1;
unsigned is_on_sfp_module:1;
unsigned mac_managed_pm:1;
+ unsigned wol_enabled:1;
+ unsigned is_genphy_driven:1;
unsigned autoneg:1;
/* The most recently read link state */
unsigned link:1;
unsigned autoneg_complete:1;
+ bool pause:1;
+ bool asym_pause:1;
/* Interrupts are enabled */
unsigned interrupts:1;
+ unsigned irq_suspended:1;
+ unsigned irq_rerun:1;
+
+ unsigned default_timestamp:1;
+
+ int rate_matching;
enum phy_state state;
u32 dev_flags;
phy_interface_t interface;
+ DECLARE_PHY_INTERFACE_MASK(possible_interfaces);
/*
* forced speed & duplex (no autoneg)
@@ -591,8 +717,6 @@ struct phy_device {
int speed;
int duplex;
int port;
- int pause;
- int asym_pause;
u8 master_slave_get;
u8 master_slave_set;
u8 master_slave_state;
@@ -604,9 +728,17 @@ struct phy_device {
__ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising);
/* used with phy_speed_down */
__ETHTOOL_DECLARE_LINK_MODE_MASK(adv_old);
-
+ /* used for eee validation and configuration*/
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported_eee);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising_eee);
/* Energy efficient ethernet modes which should be prohibited */
- u32 eee_broken_modes;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(eee_disabled_modes);
+ bool enable_tx_lpi;
+ bool eee_active;
+ struct eee_config eee_cfg;
+
+ /* Host supported PHY interface types. Should be ignored if empty. */
+ DECLARE_PHY_INTERFACE_MASK(host_interfaces);
#ifdef CONFIG_LED_TRIGGER_PHY
struct phy_led_trigger *phy_led_triggers;
@@ -615,6 +747,7 @@ struct phy_device {
struct phy_led_trigger *led_link_trigger;
#endif
+ struct list_head leds;
/*
* Interrupt number for this PHY
@@ -626,9 +759,11 @@ struct phy_device {
/* For use by PHYs to maintain extra state */
void *priv;
+#if IS_ENABLED(CONFIG_PHY_PACKAGE)
/* shared data pointer */
/* For use by PHYs inside the same package that need a shared state. */
struct phy_package_shared *shared;
+#endif
/* Reporting cable test results */
struct sk_buff *skb;
@@ -646,10 +781,15 @@ struct phy_device {
struct phylink *phylink;
struct net_device *attached_dev;
struct mii_timestamper *mii_ts;
+ struct pse_control *psec;
u8 mdix;
u8 mdix_ctrl;
+ int pma_extable;
+
+ unsigned int link_down_events;
+
void (*phy_link_change)(struct phy_device *phydev, bool up);
void (*adjust_link)(struct net_device *dev);
@@ -657,12 +797,15 @@ struct phy_device {
/* MACsec management functions */
const struct macsec_ops *macsec_ops;
#endif
+
+ struct phy_oatc14_sqi_capability oatc14_sqi_capability;
};
-static inline struct phy_device *to_phy_device(const struct device *dev)
-{
- return container_of(to_mdio_device(dev), struct phy_device, mdio);
-}
+/* Generic phy_device::dev_flags */
+#define PHY_F_NO_IRQ 0x80000000
+#define PHY_F_RXC_ALWAYS_ON 0x40000000
+
+#define to_phy_device(__dev) container_of_const(to_mdio_device(__dev), struct phy_device, mdio)
/**
* struct phy_tdr_config - Configuration of a TDR raw test
@@ -686,6 +829,267 @@ struct phy_tdr_config {
#define PHY_PAIR_ALL -1
/**
+ * enum link_inband_signalling - in-band signalling modes that are supported
+ *
+ * @LINK_INBAND_DISABLE: in-band signalling can be disabled
+ * @LINK_INBAND_ENABLE: in-band signalling can be enabled without bypass
+ * @LINK_INBAND_BYPASS: in-band signalling can be enabled with bypass
+ *
+ * The possible and required bits can only be used if the valid bit is set.
+ * If possible is clear, that means inband signalling can not be used.
+ * Required is only valid when possible is set, and means that inband
+ * signalling must be used.
+ */
+enum link_inband_signalling {
+ LINK_INBAND_DISABLE = BIT(0),
+ LINK_INBAND_ENABLE = BIT(1),
+ LINK_INBAND_BYPASS = BIT(2),
+};
+
+/**
+ * struct phy_plca_cfg - Configuration of the PLCA (Physical Layer Collision
+ * Avoidance) Reconciliation Sublayer.
+ *
+ * @version: read-only PLCA register map version. -1 = not available. Ignored
+ * when setting the configuration. Format is the same as reported by the PLCA
+ * IDVER register (31.CA00). -1 = not available.
+ * @enabled: PLCA configured mode (enabled/disabled). -1 = not available / don't
+ * set. 0 = disabled, anything else = enabled.
+ * @node_id: the PLCA local node identifier. -1 = not available / don't set.
+ * Allowed values [0 .. 254]. 255 = node disabled.
+ * @node_cnt: the PLCA node count (maximum number of nodes having a TO). Only
+ * meaningful for the coordinator (node_id = 0). -1 = not available / don't
+ * set. Allowed values [1 .. 255].
+ * @to_tmr: The value of the PLCA to_timer in bit-times, which determines the
+ * PLCA transmit opportunity window opening. See IEEE802.3 Clause 148 for
+ * more details. The to_timer shall be set equal over all nodes.
+ * -1 = not available / don't set. Allowed values [0 .. 255].
+ * @burst_cnt: controls how many additional frames a node is allowed to send in
+ * single transmit opportunity (TO). The default value of 0 means that the
+ * node is allowed exactly one frame per TO. A value of 1 allows two frames
+ * per TO, and so on. -1 = not available / don't set.
+ * Allowed values [0 .. 255].
+ * @burst_tmr: controls how many bit times to wait for the MAC to send a new
+ * frame before interrupting the burst. This value should be set to a value
+ * greater than the MAC inter-packet gap (which is typically 96 bits).
+ * -1 = not available / don't set. Allowed values [0 .. 255].
+ *
+ * A structure containing configuration parameters for setting/getting the PLCA
+ * RS configuration. The driver does not need to implement all the parameters,
+ * but should report what is actually used.
+ */
+struct phy_plca_cfg {
+ int version;
+ int enabled;
+ int node_id;
+ int node_cnt;
+ int to_tmr;
+ int burst_cnt;
+ int burst_tmr;
+};
+
+/**
+ * struct phy_plca_status - Status of the PLCA (Physical Layer Collision
+ * Avoidance) Reconciliation Sublayer.
+ *
+ * @pst: The PLCA status as reported by the PST bit in the PLCA STATUS
+ * register(31.CA03), indicating BEACON activity.
+ *
+ * A structure containing status information of the PLCA RS configuration.
+ * The driver does not need to implement all the parameters, but should report
+ * what is actually used.
+ */
+struct phy_plca_status {
+ bool pst;
+};
+
+/* Modes for PHY LED configuration */
+enum phy_led_modes {
+ PHY_LED_ACTIVE_HIGH = 0,
+ PHY_LED_ACTIVE_LOW = 1,
+ PHY_LED_INACTIVE_HIGH_IMPEDANCE = 2,
+
+ /* keep it last */
+ __PHY_LED_MODES_NUM,
+};
+
+/**
+ * struct phy_led: An LED driven by the PHY
+ *
+ * @list: List of LEDs
+ * @phydev: PHY this LED is attached to
+ * @led_cdev: Standard LED class structure
+ * @index: Number of the LED
+ */
+struct phy_led {
+ struct list_head list;
+ struct phy_device *phydev;
+ struct led_classdev led_cdev;
+ u8 index;
+};
+
+#define to_phy_led(d) container_of(d, struct phy_led, led_cdev)
+
+/*
+ * PHY_MSE_CAP_* - Bitmask flags for Mean Square Error (MSE) capabilities
+ *
+ * These flags describe which MSE metrics and selectors are implemented
+ * by the PHY for the current link mode. They are used in
+ * struct phy_mse_capability.supported_caps.
+ *
+ * Standardization:
+ * The OPEN Alliance (OA) defines the presence of MSE/SQI/pMSE but not their
+ * numeric scaling, update intervals, or aggregation windows. See:
+ * OA 100BASE-T1 TC1 v1.0, sections 6.1.1-6.1.3
+ * OA 1000BASE-T1 TC12 v2.2, sections 6.1.1-6.1.2
+ *
+ * Description of flags:
+ *
+ * PHY_MSE_CAP_CHANNEL_A
+ * Per-pair diagnostics for Channel A are supported. Mapping to the
+ * physical wire pair may depend on MDI/MDI-X polarity.
+ *
+ * PHY_MSE_CAP_CHANNEL_B, _C, _D
+ * Same as above for channels B-D.
+ *
+ * PHY_MSE_CAP_WORST_CHANNEL
+ * The PHY or driver can identify and report the single worst-performing
+ * channel without querying each one individually.
+ *
+ * PHY_MSE_CAP_LINK
+ * The PHY provides only a link-wide aggregate measurement or cannot map
+ * results to a specific pair (for example 100BASE-TX with unknown
+ * MDI/MDI-X).
+ *
+ * PHY_MSE_CAP_AVG
+ * Average MSE (mean DCQ metric) is supported. For 100/1000BASE-T1 the OA
+ * recommends 2^16 symbols, scaled 0..511, but the exact scaling is
+ * vendor-specific.
+ *
+ * PHY_MSE_CAP_PEAK
+ * Peak MSE (current peak within the measurement window) is supported.
+ * Defined as pMSE for 100BASE-T1; vendor-specific for others.
+ *
+ * PHY_MSE_CAP_WORST_PEAK
+ * Latched worst-case peak MSE since the last read (read-to-clear if
+ * implemented). Optional in OA 100BASE-T1 TC1 6.1.3.
+ */
+#define PHY_MSE_CAP_CHANNEL_A BIT(0)
+#define PHY_MSE_CAP_CHANNEL_B BIT(1)
+#define PHY_MSE_CAP_CHANNEL_C BIT(2)
+#define PHY_MSE_CAP_CHANNEL_D BIT(3)
+#define PHY_MSE_CAP_WORST_CHANNEL BIT(4)
+#define PHY_MSE_CAP_LINK BIT(5)
+#define PHY_MSE_CAP_AVG BIT(6)
+#define PHY_MSE_CAP_PEAK BIT(7)
+#define PHY_MSE_CAP_WORST_PEAK BIT(8)
+
+/*
+ * enum phy_mse_channel - Identifiers for selecting MSE measurement channels
+ *
+ * PHY_MSE_CHANNEL_A - PHY_MSE_CHANNEL_D
+ * Select per-pair measurement for the corresponding channel.
+ *
+ * PHY_MSE_CHANNEL_WORST
+ * Select the single worst-performing channel reported by hardware.
+ *
+ * PHY_MSE_CHANNEL_LINK
+ * Select link-wide aggregate data (used when per-pair results are
+ * unavailable).
+ */
+enum phy_mse_channel {
+ PHY_MSE_CHANNEL_A,
+ PHY_MSE_CHANNEL_B,
+ PHY_MSE_CHANNEL_C,
+ PHY_MSE_CHANNEL_D,
+ PHY_MSE_CHANNEL_WORST,
+ PHY_MSE_CHANNEL_LINK,
+};
+
+/**
+ * struct phy_mse_capability - Capabilities of Mean Square Error (MSE)
+ * measurement interface
+ *
+ * Standardization notes:
+ *
+ * - Presence of MSE/SQI/pMSE is defined by OPEN Alliance specs, but numeric
+ * scaling, refresh/update rate and aggregation windows are not fixed and
+ * are vendor-/product-specific. (OA 100BASE-T1 TC1 v1.0 6.1.*;
+ * OA 1000BASE-T1 TC12 v2.2 6.1.*)
+ *
+ * - Typical recommendations: 2^16 symbols and 0..511 scaling for MSE; pMSE only
+ * defined for 100BASE-T1 (sliding window example), others are vendor
+ * extensions. Drivers must report actual scale/limits here.
+ *
+ * Describes the MSE measurement capabilities for the current link mode. These
+ * properties are dynamic and may change when link settings are modified.
+ * Callers should re-query this capability after any link state change to
+ * ensure they have the most up-to-date information.
+ *
+ * Callers should only request measurements for channels and types that are
+ * indicated as supported by the @supported_caps bitmask. If @supported_caps
+ * is 0, the device provides no MSE diagnostics, and driver operations should
+ * typically return -EOPNOTSUPP.
+ *
+ * Snapshot values for average and peak MSE can be normalized to a 0..1 ratio
+ * by dividing the raw snapshot by the corresponding @max_average_mse or
+ * @max_peak_mse value.
+ *
+ * @max_average_mse: The maximum value for an average MSE snapshot. This
+ * defines the scale for the measurement. If the PHY_MSE_CAP_AVG capability is
+ * supported, this value MUST be greater than 0. (vendor-specific units).
+ * @max_peak_mse: The maximum value for a peak MSE snapshot. If either
+ * PHY_MSE_CAP_PEAK or PHY_MSE_CAP_WORST_PEAK is supported, this value MUST
+ * be greater than 0. (vendor-specific units).
+ * @refresh_rate_ps: The typical interval, in picoseconds, between hardware
+ * updates of the MSE values. This is an estimate, and callers should not
+ * assume synchronous sampling. (vendor-specific units).
+ * @num_symbols: The number of symbols aggregated per hardware sample to
+ * calculate the MSE. (vendor-specific units).
+ * @supported_caps: A bitmask of PHY_MSE_CAP_* values indicating which
+ * measurement types (e.g., average, peak) and channels
+ * (e.g., per-pair or link-wide) are supported.
+ */
+struct phy_mse_capability {
+ u64 max_average_mse;
+ u64 max_peak_mse;
+ u64 refresh_rate_ps;
+ u64 num_symbols;
+ u32 supported_caps;
+};
+
+/**
+ * struct phy_mse_snapshot - A snapshot of Mean Square Error (MSE) diagnostics
+ *
+ * Holds a set of MSE diagnostic values that were all captured from a single
+ * measurement window.
+ *
+ * Values are raw, device-scaled and not normalized. Use struct
+ * phy_mse_capability to interpret the scale and sampling window.
+ *
+ * @average_mse: The average MSE value over the measurement window.
+ * OPEN Alliance references MSE as a DCQ metric; recommends 2^16 symbols and
+ * 0..511 scaling. Exact scale and refresh are vendor-specific.
+ * (100BASE-T1 TC1 v1.0 6.1.1; 1000BASE-T1 TC12 v2.2 6.1.1).
+ *
+ * @peak_mse: The peak MSE value observed within the measurement window.
+ * For 100BASE-T1, "pMSE" is optional and may be implemented via a sliding
+ * 128-symbol window with periodic capture; not standardized for 1000BASE-T1.
+ * (100BASE-T1 TC1 v1.0 6.1.3, Table "DCQ.peakMSE").
+ *
+ * @worst_peak_mse: A latched high-water mark of the peak MSE since last read
+ * (read-to-clear if implemented). OPEN Alliance shows a latched "worst case
+ * peak MSE" for 100BASE-T1 pMSE; availability/semantics outside that are
+ * vendor-specific. (100BASE-T1 TC1 v1.0 6.1.3, DCQ.peakMSE high byte;
+ * 1000BASE-T1 TC12 v2.2 treats DCQ details as vendor-specific.)
+ */
+struct phy_mse_snapshot {
+ u64 average_mse;
+ u64 peak_mse;
+ u64 worst_peak_mse;
+};
+
+/**
* struct phy_driver - Driver structure for a particular PHY type
*
* @mdiodrv: Data common to all MDIO devices
@@ -740,6 +1144,31 @@ struct phy_driver {
*/
int (*get_features)(struct phy_device *phydev);
+ /**
+ * @inband_caps: query whether in-band is supported for the given PHY
+ * interface mode. Returns a bitmask of bits defined by enum
+ * link_inband_signalling.
+ */
+ unsigned int (*inband_caps)(struct phy_device *phydev,
+ phy_interface_t interface);
+
+ /**
+ * @config_inband: configure in-band mode for the PHY
+ */
+ int (*config_inband)(struct phy_device *phydev, unsigned int modes);
+
+ /**
+ * @get_rate_matching: Get the supported type of rate matching for a
+ * particular phy interface. This is used by phy consumers to determine
+ * whether to advertise lower-speed modes for that interface. It is
+ * assumed that if a rate matching mode is supported on an interface,
+ * then that interface's rate can be adapted to all slower link speeds
+ * supported by the phy. If the interface is not supported, this should
+ * return %RATE_MATCH_NONE.
+ */
+ int (*get_rate_matching)(struct phy_device *phydev,
+ phy_interface_t iface);
+
/* PHY Power Management */
/** @suspend: Suspend the hardware, saving state if needed */
int (*suspend)(struct phy_device *phydev);
@@ -778,7 +1207,8 @@ struct phy_driver {
* driver for the given phydev. If NULL, matching is based on
* phy_id and phy_id_mask.
*/
- int (*match_phy_device)(struct phy_device *phydev);
+ int (*match_phy_device)(struct phy_device *phydev,
+ const struct phy_driver *phydrv);
/**
* @set_wol: Some devices (e.g. qnap TS-119P II) require PHY
@@ -862,6 +1292,53 @@ struct phy_driver {
int (*cable_test_get_status)(struct phy_device *dev, bool *finished);
/* Get statistics from the PHY using ethtool */
+ /**
+ * @get_phy_stats: Retrieve PHY statistics.
+ * @dev: The PHY device for which the statistics are retrieved.
+ * @eth_stats: structure where Ethernet PHY stats will be stored.
+ * @stats: structure where additional PHY-specific stats will be stored.
+ *
+ * Retrieves the supported PHY statistics and populates the provided
+ * structures. The input structures are pre-initialized with
+ * `ETHTOOL_STAT_NOT_SET`, and the driver must only modify members
+ * corresponding to supported statistics. Unmodified members will remain
+ * set to `ETHTOOL_STAT_NOT_SET` and will not be returned to userspace.
+ */
+ void (*get_phy_stats)(struct phy_device *dev,
+ struct ethtool_eth_phy_stats *eth_stats,
+ struct ethtool_phy_stats *stats);
+
+ /**
+ * @get_link_stats: Retrieve link statistics.
+ * @dev: The PHY device for which the statistics are retrieved.
+ * @link_stats: structure where link-specific stats will be stored.
+ *
+ * Retrieves link-related statistics for the given PHY device. The input
+ * structure is pre-initialized with `ETHTOOL_STAT_NOT_SET`, and the
+ * driver must only modify members corresponding to supported
+ * statistics. Unmodified members will remain set to
+ * `ETHTOOL_STAT_NOT_SET` and will not be returned to userspace.
+ */
+ void (*get_link_stats)(struct phy_device *dev,
+ struct ethtool_link_ext_stats *link_stats);
+
+ /**
+ * @update_stats: Trigger periodic statistics updates.
+ * @dev: The PHY device for which statistics updates are triggered.
+ *
+ * Periodically gathers statistics from the PHY device to update locally
+ * maintained 64-bit counters. This is necessary for PHYs that implement
+ * reduced-width counters (e.g., 16-bit or 32-bit) which can overflow
+ * more frequently compared to 64-bit counters. By invoking this
+ * callback, drivers can fetch the current counter values, handle
+ * overflow detection, and accumulate the results into local 64-bit
+ * counters for accurate reporting through the `get_phy_stats` and
+ * `get_link_stats` interfaces.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+ int (*update_stats)(struct phy_device *dev);
+
/** @get_sset_count: Number of statistic counters */
int (*get_sset_count)(struct phy_device *dev);
/** @get_strings: Names of the statistic counters */
@@ -878,52 +1355,229 @@ struct phy_driver {
int (*set_tunable)(struct phy_device *dev,
struct ethtool_tunable *tuna,
const void *data);
- /** @set_loopback: Set the loopback mood of the PHY */
- int (*set_loopback)(struct phy_device *dev, bool enable);
+ /**
+ * @set_loopback: Set the loopback mode of the PHY
+ * enable selects if the loopback mode is enabled or disabled. If the
+ * loopback mode is enabled, then the speed of the loopback mode can be
+ * requested with the speed argument. If the speed argument is zero,
+ * then any speed can be selected. If the speed argument is > 0, then
+ * this speed shall be selected for the loopback mode or EOPNOTSUPP
+ * shall be returned if speed selection is not supported.
+ */
+ int (*set_loopback)(struct phy_device *dev, bool enable, int speed);
/** @get_sqi: Get the signal quality indication */
int (*get_sqi)(struct phy_device *dev);
/** @get_sqi_max: Get the maximum signal quality indication */
int (*get_sqi_max)(struct phy_device *dev);
+
+ /**
+ * @get_mse_capability: Get capabilities and scale of MSE measurement
+ * @dev: PHY device
+ * @cap: Output (filled on success)
+ *
+ * Fill @cap with the PHY's MSE capability for the current
+ * link mode: scale limits (max_average_mse, max_peak_mse), update
+ * interval (refresh_rate_ps), sample length (num_symbols) and the
+ * capability bitmask (supported_caps).
+ *
+ * Implementations may defer capability report until hardware has
+ * converged; in that case they should return -EAGAIN and allow the
+ * caller to retry later.
+ *
+ * Return: 0 on success. On failure, returns a negative errno code, such
+ * as -EOPNOTSUPP if MSE measurement is not supported by the PHY or in
+ * the current link mode, or -EAGAIN if the capability information is
+ * not yet available.
+ */
+ int (*get_mse_capability)(struct phy_device *dev,
+ struct phy_mse_capability *cap);
+
+ /**
+ * @get_mse_snapshot: Retrieve a snapshot of MSE diagnostic values
+ * @dev: PHY device
+ * @channel: Channel identifier (PHY_MSE_CHANNEL_*)
+ * @snapshot: Output (filled on success)
+ *
+ * Fill @snapshot with a correlated set of MSE values from the most
+ * recent measurement window.
+ *
+ * Callers must validate @channel against supported_caps returned by
+ * get_mse_capability(). Drivers must not coerce @channel; if the
+ * requested selector is not implemented by the device or current link
+ * mode, the operation must fail.
+ *
+ * worst_peak_mse is latched and must be treated as read-to-clear.
+ *
+ * Return: 0 on success. On failure, returns a negative errno code, such
+ * as -EOPNOTSUPP if MSE measurement is not supported by the PHY or in
+ * the current link mode, or -EAGAIN if measurements are not yet
+ * available.
+ */
+ int (*get_mse_snapshot)(struct phy_device *dev,
+ enum phy_mse_channel channel,
+ struct phy_mse_snapshot *snapshot);
+
+ /* PLCA RS interface */
+ /** @get_plca_cfg: Return the current PLCA configuration */
+ int (*get_plca_cfg)(struct phy_device *dev,
+ struct phy_plca_cfg *plca_cfg);
+ /** @set_plca_cfg: Set the PLCA configuration */
+ int (*set_plca_cfg)(struct phy_device *dev,
+ const struct phy_plca_cfg *plca_cfg);
+ /** @get_plca_status: Return the current PLCA status info */
+ int (*get_plca_status)(struct phy_device *dev,
+ struct phy_plca_status *plca_st);
+
+ /**
+ * @led_brightness_set: Set a PHY LED brightness. Index
+ * indicates which of the PHYs led should be set. Value
+ * follows the standard LED class meaning, e.g. LED_OFF,
+ * LED_HALF, LED_FULL.
+ */
+ int (*led_brightness_set)(struct phy_device *dev,
+ u8 index, enum led_brightness value);
+
+ /**
+ * @led_blink_set: Set a PHY LED blinking. Index indicates
+ * which of the PHYs led should be configured to blink. Delays
+ * are in milliseconds and if both are zero then a sensible
+ * default should be chosen. The call should adjust the
+ * timings in that case and if it can't match the values
+ * specified exactly.
+ */
+ int (*led_blink_set)(struct phy_device *dev, u8 index,
+ unsigned long *delay_on,
+ unsigned long *delay_off);
+ /**
+ * @led_hw_is_supported: Can the HW support the given rules.
+ * @dev: PHY device which has the LED
+ * @index: Which LED of the PHY device
+ * @rules The core is interested in these rules
+ *
+ * Return 0 if yes, -EOPNOTSUPP if not, or an error code.
+ */
+ int (*led_hw_is_supported)(struct phy_device *dev, u8 index,
+ unsigned long rules);
+ /**
+ * @led_hw_control_set: Set the HW to control the LED
+ * @dev: PHY device which has the LED
+ * @index: Which LED of the PHY device
+ * @rules The rules used to control the LED
+ *
+ * Returns 0, or a an error code.
+ */
+ int (*led_hw_control_set)(struct phy_device *dev, u8 index,
+ unsigned long rules);
+ /**
+ * @led_hw_control_get: Get how the HW is controlling the LED
+ * @dev: PHY device which has the LED
+ * @index: Which LED of the PHY device
+ * @rules Pointer to the rules used to control the LED
+ *
+ * Set *@rules to how the HW is currently blinking. Returns 0
+ * on success, or a error code if the current blinking cannot
+ * be represented in rules, or some other error happens.
+ */
+ int (*led_hw_control_get)(struct phy_device *dev, u8 index,
+ unsigned long *rules);
+
+ /**
+ * @led_polarity_set: Set the LED polarity modes
+ * @dev: PHY device which has the LED
+ * @index: Which LED of the PHY device
+ * @modes: bitmap of LED polarity modes
+ *
+ * Configure LED with all the required polarity modes in @modes
+ * to make it correctly turn ON or OFF.
+ *
+ * Returns 0, or an error code.
+ */
+ int (*led_polarity_set)(struct phy_device *dev, int index,
+ unsigned long modes);
+
+ /**
+ * @get_next_update_time: Get the time until the next update event
+ * @dev: PHY device
+ *
+ * Callback to determine the time (in jiffies) until the next
+ * update event for the PHY state machine. Allows PHY drivers to
+ * dynamically adjust polling intervals based on link state or other
+ * conditions.
+ *
+ * Returns the time in jiffies until the next update event.
+ */
+ unsigned int (*get_next_update_time)(struct phy_device *dev);
};
-#define to_phy_driver(d) container_of(to_mdio_common_driver(d), \
+#define to_phy_driver(d) container_of_const(to_mdio_common_driver(d), \
struct phy_driver, mdiodrv)
-#define PHY_ANY_ID "MATCH ANY PHY"
-#define PHY_ANY_UID 0xffffffff
+#define PHY_ID_MATCH_EXTACT_MASK GENMASK(31, 0)
+#define PHY_ID_MATCH_MODEL_MASK GENMASK(31, 4)
+#define PHY_ID_MATCH_VENDOR_MASK GENMASK(31, 10)
-#define PHY_ID_MATCH_EXACT(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 0)
-#define PHY_ID_MATCH_MODEL(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 4)
-#define PHY_ID_MATCH_VENDOR(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 10)
+#define PHY_ID_MATCH_EXACT(id) .phy_id = (id), .phy_id_mask = PHY_ID_MATCH_EXTACT_MASK
+#define PHY_ID_MATCH_MODEL(id) .phy_id = (id), .phy_id_mask = PHY_ID_MATCH_MODEL_MASK
+#define PHY_ID_MATCH_VENDOR(id) .phy_id = (id), .phy_id_mask = PHY_ID_MATCH_VENDOR_MASK
-/* A Structure for boards to register fixups with the PHY Lib */
-struct phy_fixup {
- struct list_head list;
- char bus_id[MII_BUS_ID_SIZE + 3];
- u32 phy_uid;
- u32 phy_uid_mask;
- int (*run)(struct phy_device *phydev);
-};
+/**
+ * phy_id_compare - compare @id1 with @id2 taking account of @mask
+ * @id1: first PHY ID
+ * @id2: second PHY ID
+ * @mask: the PHY ID mask, set bits are significant in matching
+ *
+ * Return true if the bits from @id1 and @id2 specified by @mask match.
+ * This uses an equivalent test to (@id & @mask) == (@phy_id & @mask).
+ */
+static inline bool phy_id_compare(u32 id1, u32 id2, u32 mask)
+{
+ return !((id1 ^ id2) & mask);
+}
-const char *phy_speed_to_str(int speed);
-const char *phy_duplex_to_str(unsigned int duplex);
+/**
+ * phy_id_compare_vendor - compare @id with @vendor mask
+ * @id: PHY ID
+ * @vendor_mask: PHY Vendor mask
+ *
+ * Return: true if the bits from @id match @vendor using the
+ * generic PHY Vendor mask.
+ */
+static inline bool phy_id_compare_vendor(u32 id, u32 vendor_mask)
+{
+ return phy_id_compare(id, vendor_mask, PHY_ID_MATCH_VENDOR_MASK);
+}
-/* A structure for mapping a particular speed and duplex
- * combination to a particular SUPPORTED and ADVERTISED value
+/**
+ * phy_id_compare_model - compare @id with @model mask
+ * @id: PHY ID
+ * @model_mask: PHY Model mask
+ *
+ * Return: true if the bits from @id match @model using the
+ * generic PHY Model mask.
*/
-struct phy_setting {
- u32 speed;
- u8 duplex;
- u8 bit;
-};
+static inline bool phy_id_compare_model(u32 id, u32 model_mask)
+{
+ return phy_id_compare(id, model_mask, PHY_ID_MATCH_MODEL_MASK);
+}
-const struct phy_setting *
-phy_lookup_setting(int speed, int duplex, const unsigned long *mask,
- bool exact);
-size_t phy_speeds(unsigned int *speeds, size_t size,
- unsigned long *mask);
-void of_set_phy_supported(struct phy_device *phydev);
-void of_set_phy_eee_broken(struct phy_device *phydev);
-int phy_speed_down_core(struct phy_device *phydev);
+/**
+ * phydev_id_compare - compare @id with the PHY's Clause 22 ID
+ * @phydev: the PHY device
+ * @id: the PHY ID to be matched
+ *
+ * Compare the @phydev clause 22 ID with the provided @id and return true or
+ * false depending whether it matches, using the bound driver mask. The
+ * @phydev must be bound to a driver.
+ */
+static inline bool phydev_id_compare(struct phy_device *phydev, u32 id)
+{
+ return phy_id_compare(id, phydev->phy_id, phydev->drv->phy_id_mask);
+}
+
+const char *phy_speed_to_str(int speed);
+const char *phy_duplex_to_str(unsigned int duplex);
+const char *phy_rate_matching_to_str(int rate_matching);
+
+int phy_interface_num_ports(phy_interface_t interface);
/**
* phy_is_started - Convenience function to check whether PHY is started
@@ -934,9 +1588,53 @@ static inline bool phy_is_started(struct phy_device *phydev)
return phydev->state >= PHY_UP;
}
+/**
+ * phy_driver_is_genphy - Convenience function to check whether PHY is driven
+ * by one of the generic PHY drivers
+ * @phydev: The phy_device struct
+ * Return: true if PHY is driven by one of the genphy drivers
+ */
+static inline bool phy_driver_is_genphy(struct phy_device *phydev)
+{
+ return phydev->is_genphy_driven;
+}
+
+/**
+ * phy_disable_eee_mode - Don't advertise an EEE mode.
+ * @phydev: The phy_device struct
+ * @link_mode: The EEE mode to be disabled
+ */
+static inline void phy_disable_eee_mode(struct phy_device *phydev, u32 link_mode)
+{
+ WARN_ON(phy_is_started(phydev));
+
+ linkmode_set_bit(link_mode, phydev->eee_disabled_modes);
+ linkmode_clear_bit(link_mode, phydev->advertising_eee);
+}
+
+/**
+ * phy_can_wakeup() - indicate whether PHY has driver model wakeup capabilities
+ * @phydev: The phy_device struct
+ *
+ * Returns: true/false depending on the PHY driver's device_set_wakeup_capable()
+ * setting.
+ */
+static inline bool phy_can_wakeup(struct phy_device *phydev)
+{
+ return device_can_wakeup(&phydev->mdio.dev);
+}
+
+/**
+ * phy_may_wakeup() - indicate whether PHY has wakeup enabled
+ * @phydev: The phy_device struct
+ *
+ * Returns: true/false depending on the PHY driver's device_set_wakeup_enabled()
+ * setting if using the driver model, otherwise the legacy determination.
+ */
+bool phy_may_wakeup(struct phy_device *phydev);
+
void phy_resolve_aneg_pause(struct phy_device *phydev);
void phy_resolve_aneg_linkmode(struct phy_device *phydev);
-void phy_check_downshift(struct phy_device *phydev);
/**
* phy_read - Convenience function for reading a given PHY register
@@ -955,16 +1653,17 @@ static inline int phy_read(struct phy_device *phydev, u32 regnum)
#define phy_read_poll_timeout(phydev, regnum, val, cond, sleep_us, \
timeout_us, sleep_before_read) \
({ \
- int __ret = read_poll_timeout(phy_read, val, (cond) || val < 0, \
+ int __ret, __val; \
+ __ret = read_poll_timeout(__val = phy_read, val, \
+ __val < 0 || (cond), \
sleep_us, timeout_us, sleep_before_read, phydev, regnum); \
- if (val < 0) \
- __ret = val; \
+ if (__val < 0) \
+ __ret = __val; \
if (__ret) \
phydev_err(phydev, "%s failed: %d\n", __func__, __ret); \
__ret; \
})
-
/**
* __phy_read - convenience function for reading a given PHY register
* @phydev: the phy_device struct
@@ -1040,23 +1739,26 @@ int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
* @regnum: The register on the MMD to read
* @val: Variable to read the register into
* @cond: Break condition (usually involving @val)
- * @sleep_us: Maximum time to sleep between reads in us (0
- * tight-loops). Should be less than ~20ms since usleep_range
- * is used (see Documentation/timers/timers-howto.rst).
+ * @sleep_us: Maximum time to sleep between reads in us (0 tight-loops). Please
+ * read usleep_range() function description for details and
+ * limitations.
* @timeout_us: Timeout in us, 0 means never timeout
* @sleep_before_read: if it is true, sleep @sleep_us before read.
- * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either
* case, the last read value at @args is stored in @val. Must not
* be called from atomic context if sleep_us or timeout_us are used.
*/
#define phy_read_mmd_poll_timeout(phydev, devaddr, regnum, val, cond, \
sleep_us, timeout_us, sleep_before_read) \
({ \
- int __ret = read_poll_timeout(phy_read_mmd, val, (cond) || val < 0, \
+ int __ret, __val; \
+ __ret = read_poll_timeout(__val = phy_read_mmd, val, \
+ __val < 0 || (cond), \
sleep_us, timeout_us, sleep_before_read, \
phydev, devaddr, regnum); \
- if (val < 0) \
- __ret = val; \
+ if (__val < 0) \
+ __ret = __val; \
if (__ret) \
phydev_err(phydev, "%s failed: %d\n", __func__, __ret); \
__ret; \
@@ -1228,6 +1930,9 @@ static inline bool phy_polling_mode(struct phy_device *phydev)
if (phydev->drv->flags & PHY_POLL_CABLE_TEST)
return true;
+ if (phydev->drv->update_stats)
+ return true;
+
return phydev->irq == PHY_POLL;
}
@@ -1237,7 +1942,7 @@ static inline bool phy_polling_mode(struct phy_device *phydev)
*/
static inline bool phy_has_hwtstamp(struct phy_device *phydev)
{
- return phydev && phydev->mii_ts && phydev->mii_ts->hwtstamp;
+ return phydev && phydev->mii_ts && phydev->mii_ts->hwtstamp_set;
}
/**
@@ -1268,9 +1973,11 @@ static inline bool phy_has_txtstamp(struct phy_device *phydev)
return phydev && phydev->mii_ts && phydev->mii_ts->txtstamp;
}
-static inline int phy_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
+static inline int phy_hwtstamp(struct phy_device *phydev,
+ struct kernel_hwtstamp_config *cfg,
+ struct netlink_ext_ack *extack)
{
- return phydev->mii_ts->hwtstamp(phydev->mii_ts, ifr);
+ return phydev->mii_ts->hwtstamp_set(phydev->mii_ts, cfg, extack);
}
static inline bool phy_rxtstamp(struct phy_device *phydev, struct sk_buff *skb,
@@ -1280,7 +1987,7 @@ static inline bool phy_rxtstamp(struct phy_device *phydev, struct sk_buff *skb,
}
static inline int phy_ts_info(struct phy_device *phydev,
- struct ethtool_ts_info *tsinfo)
+ struct kernel_ethtool_ts_info *tsinfo)
{
return phydev->mii_ts->ts_info(phydev->mii_ts, tsinfo);
}
@@ -1292,12 +1999,18 @@ static inline void phy_txtstamp(struct phy_device *phydev, struct sk_buff *skb,
}
/**
- * phy_is_internal - Convenience function for testing if a PHY is internal
- * @phydev: the phy_device struct
+ * phy_is_default_hwtstamp - Is the PHY hwtstamp the default timestamp
+ * @phydev: Pointer to phy_device
+ *
+ * This is used to get default timestamping device taking into account
+ * the new API choice, which is selecting the timestamping from MAC by
+ * default if the phydev does not have default_timestamp flag enabled.
+ *
+ * Return: True if phy is the default hw timestamp, false otherwise.
*/
-static inline bool phy_is_internal(struct phy_device *phydev)
+static inline bool phy_is_default_hwtstamp(struct phy_device *phydev)
{
- return phydev->is_internal;
+ return phy_has_hwtstamp(phydev) && phydev->default_timestamp;
}
/**
@@ -1354,6 +2067,9 @@ static inline bool phy_is_pseudo_fixed_link(struct phy_device *phydev)
return phydev->is_pseudo_fixed_link;
}
+phy_interface_t phy_fix_phy_mode_for_mac_delays(phy_interface_t interface,
+ bool mac_txid, bool mac_rxid);
+
int phy_save_page(struct phy_device *phydev);
int phy_select_page(struct phy_device *phydev, int page);
int phy_restore_page(struct phy_device *phydev, int oldpage, int ret);
@@ -1367,37 +2083,29 @@ int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum,
struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
bool is_c45,
struct phy_c45_device_ids *c45_ids);
-#if IS_ENABLED(CONFIG_PHYLIB)
+int fwnode_get_phy_id(struct fwnode_handle *fwnode, u32 *phy_id);
+struct mdio_device *fwnode_mdio_find_device(struct fwnode_handle *fwnode);
+struct phy_device *fwnode_phy_find_device(struct fwnode_handle *phy_fwnode);
+struct fwnode_handle *fwnode_get_phy_node(const struct fwnode_handle *fwnode);
struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45);
int phy_device_register(struct phy_device *phy);
void phy_device_free(struct phy_device *phydev);
-#else
-static inline
-struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
-{
- return NULL;
-}
-
-static inline int phy_device_register(struct phy_device *phy)
-{
- return 0;
-}
-
-static inline void phy_device_free(struct phy_device *phydev) { }
-#endif /* CONFIG_PHYLIB */
void phy_device_remove(struct phy_device *phydev);
+int phy_get_c45_ids(struct phy_device *phydev);
int phy_init_hw(struct phy_device *phydev);
int phy_suspend(struct phy_device *phydev);
int phy_resume(struct phy_device *phydev);
int __phy_resume(struct phy_device *phydev);
-int phy_loopback(struct phy_device *phydev, bool enable);
+int phy_loopback(struct phy_device *phydev, bool enable, int speed);
+int phy_sfp_connect_phy(void *upstream, struct phy_device *phy);
+void phy_sfp_disconnect_phy(void *upstream, struct phy_device *phy);
void phy_sfp_attach(void *upstream, struct sfp_bus *bus);
void phy_sfp_detach(void *upstream, struct sfp_bus *bus);
int phy_sfp_probe(struct phy_device *phydev,
const struct sfp_upstream_ops *ops);
struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
phy_interface_t interface);
-struct phy_device *phy_find_first(struct mii_bus *bus);
+struct phy_device *phy_find_next(struct mii_bus *bus, struct phy_device *pos);
int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
u32 flags, phy_interface_t interface);
int phy_connect_direct(struct net_device *dev, struct phy_device *phydev,
@@ -1411,14 +2119,28 @@ void phy_detach(struct phy_device *phydev);
void phy_start(struct phy_device *phydev);
void phy_stop(struct phy_device *phydev);
int phy_config_aneg(struct phy_device *phydev);
+int _phy_start_aneg(struct phy_device *phydev);
int phy_start_aneg(struct phy_device *phydev);
int phy_aneg_done(struct phy_device *phydev);
+unsigned int phy_inband_caps(struct phy_device *phydev,
+ phy_interface_t interface);
+int phy_config_inband(struct phy_device *phydev, unsigned int modes);
int phy_speed_down(struct phy_device *phydev, bool sync);
int phy_speed_up(struct phy_device *phydev);
+bool phy_check_valid(int speed, int duplex, unsigned long *features);
int phy_restart_aneg(struct phy_device *phydev);
int phy_reset_after_clk_enable(struct phy_device *phydev);
+static inline struct phy_device *phy_find_first(struct mii_bus *bus)
+{
+ return phy_find_next(bus, NULL);
+}
+
+#define mdiobus_for_each_phy(_bus, _phydev) \
+ for (_phydev = phy_find_first(_bus); _phydev; \
+ _phydev = phy_find_next(_bus, _phydev))
+
#if IS_ENABLED(CONFIG_PHYLIB)
int phy_start_cable_test(struct phy_device *phydev,
struct netlink_ext_ack *extack);
@@ -1443,10 +2165,6 @@ int phy_start_cable_test_tdr(struct phy_device *phydev,
}
#endif
-int phy_cable_test_result(struct phy_device *phydev, u8 pair, u16 result);
-int phy_cable_test_fault_length(struct phy_device *phydev, u8 pair,
- u16 cm);
-
static inline void phy_device_reset(struct phy_device *phydev, int value)
{
mdio_device_reset(&phydev->mdio, value);
@@ -1455,6 +2173,9 @@ static inline void phy_device_reset(struct phy_device *phydev, int value)
#define phydev_err(_phydev, format, args...) \
dev_err(&_phydev->mdio.dev, format, ##args)
+#define phydev_err_probe(_phydev, err, format, args...) \
+ dev_err_probe(&_phydev->mdio.dev, err, format, ##args)
+
#define phydev_info(_phydev, format, args...) \
dev_info(&_phydev->mdio.dev, format, ##args)
@@ -1485,21 +2206,24 @@ char *phy_attached_info_irq(struct phy_device *phydev)
__malloc;
void phy_attached_info(struct phy_device *phydev);
+int genphy_match_phy_device(struct phy_device *phydev,
+ const struct phy_driver *phydrv);
+
/* Clause 22 PHY */
int genphy_read_abilities(struct phy_device *phydev);
int genphy_setup_forced(struct phy_device *phydev);
int genphy_restart_aneg(struct phy_device *phydev);
int genphy_check_and_restart_aneg(struct phy_device *phydev, bool restart);
-int genphy_config_eee_advert(struct phy_device *phydev);
int __genphy_config_aneg(struct phy_device *phydev, bool changed);
int genphy_aneg_done(struct phy_device *phydev);
int genphy_update_link(struct phy_device *phydev);
int genphy_read_lpa(struct phy_device *phydev);
int genphy_read_status_fixed(struct phy_device *phydev);
int genphy_read_status(struct phy_device *phydev);
+int genphy_read_master_slave(struct phy_device *phydev);
int genphy_suspend(struct phy_device *phydev);
int genphy_resume(struct phy_device *phydev);
-int genphy_loopback(struct phy_device *phydev, bool enable);
+int genphy_loopback(struct phy_device *phydev, bool enable, int speed);
int genphy_soft_reset(struct phy_device *phydev);
irqreturn_t genphy_handle_interrupt_no_ack(struct phy_device *phydev);
@@ -1519,7 +2243,7 @@ int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum,
/* Clause 37 */
int genphy_c37_config_aneg(struct phy_device *phydev);
-int genphy_c37_read_status(struct phy_device *phydev);
+int genphy_c37_read_status(struct phy_device *phydev, bool *changed);
/* Clause 45 PHY */
int genphy_c45_restart_aneg(struct phy_device *phydev);
@@ -1529,18 +2253,39 @@ int genphy_c45_read_link(struct phy_device *phydev);
int genphy_c45_read_lpa(struct phy_device *phydev);
int genphy_c45_read_pma(struct phy_device *phydev);
int genphy_c45_pma_setup_forced(struct phy_device *phydev);
+int genphy_c45_pma_baset1_setup_master_slave(struct phy_device *phydev);
int genphy_c45_an_config_aneg(struct phy_device *phydev);
int genphy_c45_an_disable_aneg(struct phy_device *phydev);
int genphy_c45_read_mdix(struct phy_device *phydev);
int genphy_c45_pma_read_abilities(struct phy_device *phydev);
+int genphy_c45_pma_read_ext_abilities(struct phy_device *phydev);
+int genphy_c45_pma_baset1_read_abilities(struct phy_device *phydev);
+int genphy_c45_read_eee_abilities(struct phy_device *phydev);
+int genphy_c45_pma_baset1_read_master_slave(struct phy_device *phydev);
int genphy_c45_read_status(struct phy_device *phydev);
+int genphy_c45_baset1_read_status(struct phy_device *phydev);
int genphy_c45_config_aneg(struct phy_device *phydev);
-int genphy_c45_loopback(struct phy_device *phydev, bool enable);
+int genphy_c45_loopback(struct phy_device *phydev, bool enable, int speed);
int genphy_c45_pma_resume(struct phy_device *phydev);
int genphy_c45_pma_suspend(struct phy_device *phydev);
-
-/* Generic C45 PHY driver */
-extern struct phy_driver genphy_c45_driver;
+int genphy_c45_fast_retrain(struct phy_device *phydev, bool enable);
+int genphy_c45_plca_get_cfg(struct phy_device *phydev,
+ struct phy_plca_cfg *plca_cfg);
+int genphy_c45_plca_set_cfg(struct phy_device *phydev,
+ const struct phy_plca_cfg *plca_cfg);
+int genphy_c45_plca_get_status(struct phy_device *phydev,
+ struct phy_plca_status *plca_st);
+int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *lp);
+int genphy_c45_ethtool_get_eee(struct phy_device *phydev,
+ struct ethtool_keee *data);
+int genphy_c45_ethtool_set_eee(struct phy_device *phydev,
+ struct ethtool_keee *data);
+int genphy_c45_an_config_eee_aneg(struct phy_device *phydev);
+int genphy_c45_oatc14_cable_test_start(struct phy_device *phydev);
+int genphy_c45_oatc14_cable_test_get_status(struct phy_device *phydev,
+ bool *finished);
+int genphy_c45_oatc14_get_sqi_max(struct phy_device *phydev);
+int genphy_c45_oatc14_get_sqi(struct phy_device *phydev);
/* The gen10g_* functions are the old Clause 45 stub */
int gen10g_config_aneg(struct phy_device *phydev);
@@ -1556,14 +2301,11 @@ static inline int phy_read_status(struct phy_device *phydev)
return genphy_read_status(phydev);
}
-void phy_driver_unregister(struct phy_driver *drv);
void phy_drivers_unregister(struct phy_driver *drv, int n);
-int phy_driver_register(struct phy_driver *new_driver, struct module *owner);
int phy_drivers_register(struct phy_driver *new_driver, int n,
struct module *owner);
void phy_error(struct phy_device *phydev);
void phy_state_machine(struct work_struct *work);
-void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies);
void phy_trigger_machine(struct phy_device *phydev);
void phy_mac_interrupt(struct phy_device *phydev);
void phy_start_machine(struct phy_device *phydev);
@@ -1579,11 +2321,16 @@ int phy_disable_interrupts(struct phy_device *phydev);
void phy_request_interrupt(struct phy_device *phydev);
void phy_free_interrupt(struct phy_device *phydev);
void phy_print_status(struct phy_device *phydev);
-int phy_set_max_speed(struct phy_device *phydev, u32 max_speed);
+int phy_get_rate_matching(struct phy_device *phydev,
+ phy_interface_t iface);
+void phy_set_max_speed(struct phy_device *phydev, u32 max_speed);
void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode);
void phy_advertise_supported(struct phy_device *phydev);
+void phy_advertise_eee_all(struct phy_device *phydev);
void phy_support_sym_pause(struct phy_device *phydev);
void phy_support_asym_pause(struct phy_device *phydev);
+void phy_support_eee(struct phy_device *phydev);
+void phy_disable_eee(struct phy_device *phydev);
void phy_set_sym_pause(struct phy_device *phydev, bool rx, bool tx,
bool autoneg);
void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx);
@@ -1591,14 +2338,19 @@ bool phy_validate_pause(struct phy_device *phydev,
struct ethtool_pauseparam *pp);
void phy_get_pause(struct phy_device *phydev, bool *tx_pause, bool *rx_pause);
-s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
- const int *delay_values, int size, bool is_rx);
+s32 phy_get_internal_delay(struct phy_device *phydev, const int *delay_values,
+ int size, bool is_rx);
+
+int phy_get_tx_amplitude_gain(struct phy_device *phydev, struct device *dev,
+ enum ethtool_link_mode_bit_indices linkmode,
+ u32 *val);
+
+int phy_get_mac_termination(struct phy_device *phydev, struct device *dev,
+ u32 *val);
void phy_resolve_pause(unsigned long *local_adv, unsigned long *partner_adv,
bool *tx_pause, bool *rx_pause);
-int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
- int (*run)(struct phy_device *));
int phy_register_fixup_for_id(const char *bus_id,
int (*run)(struct phy_device *));
int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask,
@@ -1608,10 +2360,12 @@ int phy_unregister_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask);
int phy_unregister_fixup_for_id(const char *bus_id);
int phy_unregister_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask);
+int phy_eee_tx_clock_stop_capable(struct phy_device *phydev);
+int phy_eee_rx_clock_stop(struct phy_device *phydev, bool clk_stop_enable);
int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable);
int phy_get_eee_err(struct phy_device *phydev);
-int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data);
-int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data);
+int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_keee *data);
+int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_keee *data);
int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol);
void phy_ethtool_get_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol);
@@ -1620,104 +2374,34 @@ int phy_ethtool_get_link_ksettings(struct net_device *ndev,
int phy_ethtool_set_link_ksettings(struct net_device *ndev,
const struct ethtool_link_ksettings *cmd);
int phy_ethtool_nway_reset(struct net_device *ndev);
-int phy_package_join(struct phy_device *phydev, int addr, size_t priv_size);
-void phy_package_leave(struct phy_device *phydev);
-int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
- int addr, size_t priv_size);
-
-#if IS_ENABLED(CONFIG_PHYLIB)
-int __init mdio_bus_init(void);
-void mdio_bus_exit(void);
-#endif
int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data);
int phy_ethtool_get_sset_count(struct phy_device *phydev);
int phy_ethtool_get_stats(struct phy_device *phydev,
struct ethtool_stats *stats, u64 *data);
-static inline int phy_package_read(struct phy_device *phydev, u32 regnum)
-{
- struct phy_package_shared *shared = phydev->shared;
-
- if (!shared)
- return -EIO;
-
- return mdiobus_read(phydev->mdio.bus, shared->addr, regnum);
-}
-
-static inline int __phy_package_read(struct phy_device *phydev, u32 regnum)
-{
- struct phy_package_shared *shared = phydev->shared;
-
- if (!shared)
- return -EIO;
-
- return __mdiobus_read(phydev->mdio.bus, shared->addr, regnum);
-}
-
-static inline int phy_package_write(struct phy_device *phydev,
- u32 regnum, u16 val)
-{
- struct phy_package_shared *shared = phydev->shared;
-
- if (!shared)
- return -EIO;
-
- return mdiobus_write(phydev->mdio.bus, shared->addr, regnum, val);
-}
-
-static inline int __phy_package_write(struct phy_device *phydev,
- u32 regnum, u16 val)
-{
- struct phy_package_shared *shared = phydev->shared;
-
- if (!shared)
- return -EIO;
-
- return __mdiobus_write(phydev->mdio.bus, shared->addr, regnum, val);
-}
-
-static inline bool __phy_package_set_once(struct phy_device *phydev,
- unsigned int b)
-{
- struct phy_package_shared *shared = phydev->shared;
-
- if (!shared)
- return false;
-
- return !test_and_set_bit(b, &shared->flags);
-}
-
-static inline bool phy_package_init_once(struct phy_device *phydev)
-{
- return __phy_package_set_once(phydev, PHY_SHARED_F_INIT_DONE);
-}
-
-static inline bool phy_package_probe_once(struct phy_device *phydev)
-{
- return __phy_package_set_once(phydev, PHY_SHARED_F_PROBE_DONE);
-}
-
-extern struct bus_type mdio_bus_type;
-
-struct mdio_board_info {
- const char *bus_id;
- char modalias[MDIO_NAME_SIZE];
- int mdio_addr;
- const void *platform_data;
-};
-
-#if IS_ENABLED(CONFIG_MDIO_DEVICE)
-int mdiobus_register_board_info(const struct mdio_board_info *info,
- unsigned int n);
-#else
-static inline int mdiobus_register_board_info(const struct mdio_board_info *i,
- unsigned int n)
-{
- return 0;
-}
-#endif
-
+void __phy_ethtool_get_phy_stats(struct phy_device *phydev,
+ struct ethtool_eth_phy_stats *phy_stats,
+ struct ethtool_phy_stats *phydev_stats);
+void __phy_ethtool_get_link_ext_stats(struct phy_device *phydev,
+ struct ethtool_link_ext_stats *link_stats);
+
+int phy_ethtool_get_plca_cfg(struct phy_device *phydev,
+ struct phy_plca_cfg *plca_cfg);
+int phy_ethtool_set_plca_cfg(struct phy_device *phydev,
+ const struct phy_plca_cfg *plca_cfg,
+ struct netlink_ext_ack *extack);
+int phy_ethtool_get_plca_status(struct phy_device *phydev,
+ struct phy_plca_status *plca_st);
+
+int __phy_hwtstamp_get(struct phy_device *phydev,
+ struct kernel_hwtstamp_config *config);
+int __phy_hwtstamp_set(struct phy_device *phydev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+
+extern const struct bus_type mdio_bus_type;
+extern const struct class mdio_bus_class;
/**
* phy_module_driver() - Helper macro for registering PHY drivers
@@ -1743,7 +2427,4 @@ module_exit(phy_module_exit)
#define module_phy_driver(__phy_drivers) \
phy_module_driver(__phy_drivers, ARRAY_SIZE(__phy_drivers))
-bool phy_driver_is_genphy(struct phy_device *phydev);
-bool phy_driver_is_genphy_10g(struct phy_device *phydev);
-
#endif /* __PHY_H */
diff --git a/include/linux/phy/pcie.h b/include/linux/phy/pcie.h
new file mode 100644
index 000000000000..e7ac81764576
--- /dev/null
+++ b/include/linux/phy/pcie.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022 Rockchip Electronics Co., Ltd.
+ */
+#ifndef __PHY_PCIE_H
+#define __PHY_PCIE_H
+
+#define PHY_MODE_PCIE_RC 20
+#define PHY_MODE_PCIE_EP 21
+#define PHY_MODE_PCIE_BIFURCATION 22
+
+#endif
diff --git a/include/linux/phy/phy-dp.h b/include/linux/phy/phy-dp.h
index 18cad23642cd..9cce5766bc0b 100644
--- a/include/linux/phy/phy-dp.h
+++ b/include/linux/phy/phy-dp.h
@@ -8,6 +8,9 @@
#include <linux/types.h>
+#define PHY_SUBMODE_DP 0
+#define PHY_SUBMODE_EDP 1
+
/**
* struct phy_configure_opts_dp - DisplayPort PHY configuration set
*
diff --git a/include/linux/phy/phy-hdmi.h b/include/linux/phy/phy-hdmi.h
new file mode 100644
index 000000000000..f0ec963c6e84
--- /dev/null
+++ b/include/linux/phy/phy-hdmi.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2022,2024 NXP
+ */
+
+#ifndef __PHY_HDMI_H_
+#define __PHY_HDMI_H_
+
+/**
+ * struct phy_configure_opts_hdmi - HDMI configuration set
+ * @tmds_char_rate: HDMI TMDS Character Rate in Hertz.
+ * @bpc: Bits per color channel.
+ *
+ * This structure is used to represent the configuration state of a HDMI phy.
+ */
+struct phy_configure_opts_hdmi {
+ unsigned long long tmds_char_rate;
+ unsigned int bpc;
+};
+
+#endif /* __PHY_HDMI_H_ */
diff --git a/include/linux/phy/phy-lvds.h b/include/linux/phy/phy-lvds.h
new file mode 100644
index 000000000000..09931d080a6d
--- /dev/null
+++ b/include/linux/phy/phy-lvds.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2020,2022 NXP
+ */
+
+#ifndef __PHY_LVDS_H_
+#define __PHY_LVDS_H_
+
+/**
+ * struct phy_configure_opts_lvds - LVDS configuration set
+ * @bits_per_lane_and_dclk_cycle: Number of bits per lane per differential
+ * clock cycle.
+ * @differential_clk_rate: Clock rate, in Hertz, of the LVDS
+ * differential clock.
+ * @lanes: Number of active, consecutive,
+ * data lanes, starting from lane 0,
+ * used for the transmissions.
+ * @is_slave: Boolean, true if the phy is a slave
+ * which works together with a master
+ * phy to support dual link transmission,
+ * otherwise a regular phy or a master phy.
+ *
+ * This structure is used to represent the configuration state of a LVDS phy.
+ */
+struct phy_configure_opts_lvds {
+ unsigned int bits_per_lane_and_dclk_cycle;
+ unsigned long differential_clk_rate;
+ unsigned int lanes;
+ bool is_slave;
+};
+
+#endif /* __PHY_LVDS_H_ */
diff --git a/include/linux/phy/phy-mipi-dphy.h b/include/linux/phy/phy-mipi-dphy.h
index a877ffee845d..1ac128d78dfe 100644
--- a/include/linux/phy/phy-mipi-dphy.h
+++ b/include/linux/phy/phy-mipi-dphy.h
@@ -279,6 +279,9 @@ int phy_mipi_dphy_get_default_config(unsigned long pixel_clock,
unsigned int bpp,
unsigned int lanes,
struct phy_configure_opts_mipi_dphy *cfg);
+int phy_mipi_dphy_get_default_config_for_hsclk(unsigned long long hs_clk_rate,
+ unsigned int lanes,
+ struct phy_configure_opts_mipi_dphy *cfg);
int phy_mipi_dphy_config_validate(struct phy_configure_opts_mipi_dphy *cfg);
#endif /* __PHY_MIPI_DPHY_H_ */
diff --git a/include/linux/phy/phy-sun4i-usb.h b/include/linux/phy/phy-sun4i-usb.h
index 91eb755ee73b..f3e7b13608e4 100644
--- a/include/linux/phy/phy-sun4i-usb.h
+++ b/include/linux/phy/phy-sun4i-usb.h
@@ -11,7 +11,7 @@
/**
* sun4i_usb_phy_set_squelch_detect() - Enable/disable squelch detect
* @phy: reference to a sun4i usb phy
- * @enabled: wether to enable or disable squelch detect
+ * @enabled: whether to enable or disable squelch detect
*/
void sun4i_usb_phy_set_squelch_detect(struct phy *phy, bool enabled);
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index 0ed434d02196..2af0d01ebb39 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -17,6 +17,8 @@
#include <linux/regulator/consumer.h>
#include <linux/phy/phy-dp.h>
+#include <linux/phy/phy-hdmi.h>
+#include <linux/phy/phy-lvds.h>
#include <linux/phy/phy-mipi-dphy.h>
struct phy;
@@ -41,7 +43,8 @@ enum phy_mode {
PHY_MODE_MIPI_DPHY,
PHY_MODE_SATA,
PHY_MODE_LVDS,
- PHY_MODE_DP
+ PHY_MODE_DP,
+ PHY_MODE_HDMI,
};
enum phy_media {
@@ -50,6 +53,15 @@ enum phy_media {
PHY_MEDIA_DAC,
};
+enum phy_ufs_state {
+ PHY_UFS_HIBERN8_ENTER,
+ PHY_UFS_HIBERN8_EXIT,
+};
+
+union phy_notify {
+ enum phy_ufs_state ufs_state;
+};
+
/**
* union phy_configure_opts - Opaque generic phy configuration
*
@@ -57,10 +69,16 @@ enum phy_media {
* the MIPI_DPHY phy mode.
* @dp: Configuration set applicable for phys supporting
* the DisplayPort protocol.
+ * @lvds: Configuration set applicable for phys supporting
+ * the LVDS phy mode.
+ * @hdmi: Configuration set applicable for phys supporting
+ * the HDMI phy mode.
*/
union phy_configure_opts {
struct phy_configure_opts_mipi_dphy mipi_dphy;
struct phy_configure_opts_dp dp;
+ struct phy_configure_opts_lvds lvds;
+ struct phy_configure_opts_hdmi hdmi;
};
/**
@@ -74,6 +92,7 @@ union phy_configure_opts {
* @set_speed: set the speed of the phy (optional)
* @reset: resetting the phy
* @calibrate: calibrate the phy
+ * @notify_phystate: notify and configure the phy for a particular state
* @release: ops to be performed while the consumer relinquishes the PHY
* @owner: the module owner containing the ops
*/
@@ -118,6 +137,12 @@ struct phy_ops {
union phy_configure_opts *opts);
int (*reset)(struct phy *phy);
int (*calibrate)(struct phy *phy);
+
+ /* notify phy connect status change */
+ int (*connect)(struct phy *phy, int port);
+ int (*disconnect)(struct phy *phy, int port);
+
+ int (*notify_phystate)(struct phy *phy, union phy_notify state);
void (*release)(struct phy *phy);
struct module *owner;
};
@@ -125,7 +150,7 @@ struct phy_ops {
/**
* struct phy_attrs - represents phy attributes
* @bus_width: Data path width implemented by PHY
- * @max_link_rate: Maximum link rate supported by PHY (in Mbps)
+ * @max_link_rate: Maximum link rate supported by PHY (units to be decided by producer and consumer)
* @mode: PHY mode
*/
struct phy_attrs {
@@ -140,20 +165,24 @@ struct phy_attrs {
* @id: id of the phy device
* @ops: function pointers for performing phy operations
* @mutex: mutex to protect phy_ops
+ * @lockdep_key: lockdep information for this mutex
* @init_count: used to protect when the PHY is used by multiple consumers
* @power_count: used to protect when the PHY is used by multiple consumers
* @attrs: used to specify PHY specific attributes
* @pwr: power regulator associated with the phy
+ * @debugfs: debugfs directory
*/
struct phy {
struct device dev;
int id;
const struct phy_ops *ops;
struct mutex mutex;
+ struct lock_class_key lockdep_key;
int init_count;
int power_count;
struct phy_attrs attrs;
struct regulator *pwr;
+ struct dentry *debugfs;
};
/**
@@ -170,7 +199,7 @@ struct phy_provider {
struct module *owner;
struct list_head list;
struct phy * (*of_xlate)(struct device *dev,
- struct of_phandle_args *args);
+ const struct of_phandle_args *args);
};
/**
@@ -216,8 +245,6 @@ int phy_pm_runtime_get(struct phy *phy);
int phy_pm_runtime_get_sync(struct phy *phy);
int phy_pm_runtime_put(struct phy *phy);
int phy_pm_runtime_put_sync(struct phy *phy);
-void phy_pm_runtime_allow(struct phy *phy);
-void phy_pm_runtime_forbid(struct phy *phy);
int phy_init(struct phy *phy);
int phy_exit(struct phy *phy);
int phy_power_on(struct phy *phy);
@@ -237,6 +264,9 @@ static inline enum phy_mode phy_get_mode(struct phy *phy)
}
int phy_reset(struct phy *phy);
int phy_calibrate(struct phy *phy);
+int phy_notify_connect(struct phy *phy, int port);
+int phy_notify_disconnect(struct phy *phy, int port);
+int phy_notify_state(struct phy *phy, union phy_notify state);
static inline int phy_get_bus_width(struct phy *phy)
{
return phy->attrs.bus_width;
@@ -246,11 +276,12 @@ static inline void phy_set_bus_width(struct phy *phy, int bus_width)
phy->attrs.bus_width = bus_width;
}
struct phy *phy_get(struct device *dev, const char *string);
-struct phy *phy_optional_get(struct device *dev, const char *string);
struct phy *devm_phy_get(struct device *dev, const char *string);
struct phy *devm_phy_optional_get(struct device *dev, const char *string);
struct phy *devm_of_phy_get(struct device *dev, struct device_node *np,
const char *con_id);
+struct phy *devm_of_phy_optional_get(struct device *dev, struct device_node *np,
+ const char *con_id);
struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np,
int index);
void of_phy_put(struct phy *phy);
@@ -258,7 +289,7 @@ void phy_put(struct device *dev, struct phy *phy);
void devm_phy_put(struct device *dev, struct phy *phy);
struct phy *of_phy_get(struct device_node *np, const char *con_id);
struct phy *of_phy_simple_xlate(struct device *dev,
- struct of_phandle_args *args);
+ const struct of_phandle_args *args);
struct phy *phy_create(struct device *dev, struct device_node *node,
const struct phy_ops *ops);
struct phy *devm_phy_create(struct device *dev, struct device_node *node,
@@ -268,11 +299,11 @@ void devm_phy_destroy(struct device *dev, struct phy *phy);
struct phy_provider *__of_phy_provider_register(struct device *dev,
struct device_node *children, struct module *owner,
struct phy * (*of_xlate)(struct device *dev,
- struct of_phandle_args *args));
+ const struct of_phandle_args *args));
struct phy_provider *__devm_of_phy_provider_register(struct device *dev,
struct device_node *children, struct module *owner,
struct phy * (*of_xlate)(struct device *dev,
- struct of_phandle_args *args));
+ const struct of_phandle_args *args));
void of_phy_provider_unregister(struct phy_provider *phy_provider);
void devm_of_phy_provider_unregister(struct device *dev,
struct phy_provider *phy_provider);
@@ -307,16 +338,6 @@ static inline int phy_pm_runtime_put_sync(struct phy *phy)
return -ENOSYS;
}
-static inline void phy_pm_runtime_allow(struct phy *phy)
-{
- return;
-}
-
-static inline void phy_pm_runtime_forbid(struct phy *phy)
-{
- return;
-}
-
static inline int phy_init(struct phy *phy)
{
if (!phy)
@@ -389,6 +410,27 @@ static inline int phy_calibrate(struct phy *phy)
return -ENOSYS;
}
+static inline int phy_notify_connect(struct phy *phy, int index)
+{
+ if (!phy)
+ return 0;
+ return -ENOSYS;
+}
+
+static inline int phy_notify_disconnect(struct phy *phy, int index)
+{
+ if (!phy)
+ return 0;
+ return -ENOSYS;
+}
+
+static inline int phy_notify_state(struct phy *phy, union phy_notify state)
+{
+ if (!phy)
+ return 0;
+ return -ENOSYS;
+}
+
static inline int phy_configure(struct phy *phy,
union phy_configure_opts *opts)
{
@@ -422,12 +464,6 @@ static inline struct phy *phy_get(struct device *dev, const char *string)
return ERR_PTR(-ENOSYS);
}
-static inline struct phy *phy_optional_get(struct device *dev,
- const char *string)
-{
- return ERR_PTR(-ENOSYS);
-}
-
static inline struct phy *devm_phy_get(struct device *dev, const char *string)
{
return ERR_PTR(-ENOSYS);
@@ -446,6 +482,13 @@ static inline struct phy *devm_of_phy_get(struct device *dev,
return ERR_PTR(-ENOSYS);
}
+static inline struct phy *devm_of_phy_optional_get(struct device *dev,
+ struct device_node *np,
+ const char *con_id)
+{
+ return NULL;
+}
+
static inline struct phy *devm_of_phy_get_by_index(struct device *dev,
struct device_node *np,
int index)
@@ -471,7 +514,7 @@ static inline struct phy *of_phy_get(struct device_node *np, const char *con_id)
}
static inline struct phy *of_phy_simple_xlate(struct device *dev,
- struct of_phandle_args *args)
+ const struct of_phandle_args *args)
{
return ERR_PTR(-ENOSYS);
}
@@ -501,7 +544,7 @@ static inline void devm_phy_destroy(struct device *dev, struct phy *phy)
static inline struct phy_provider *__of_phy_provider_register(
struct device *dev, struct device_node *children, struct module *owner,
struct phy * (*of_xlate)(struct device *dev,
- struct of_phandle_args *args))
+ const struct of_phandle_args *args))
{
return ERR_PTR(-ENOSYS);
}
@@ -509,7 +552,7 @@ static inline struct phy_provider *__of_phy_provider_register(
static inline struct phy_provider *__devm_of_phy_provider_register(struct device
*dev, struct device_node *children, struct module *owner,
struct phy * (*of_xlate)(struct device *dev,
- struct of_phandle_args *args))
+ const struct of_phandle_args *args))
{
return ERR_PTR(-ENOSYS);
}
diff --git a/include/linux/phy/tegra/xusb.h b/include/linux/phy/tegra/xusb.h
index 71d956935405..6ca51e0080ec 100644
--- a/include/linux/phy/tegra/xusb.h
+++ b/include/linux/phy/tegra/xusb.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
*/
#ifndef PHY_TEGRA_XUSB_H
@@ -8,6 +8,7 @@
struct tegra_xusb_padctl;
struct device;
+enum usb_device_speed;
struct tegra_xusb_padctl *tegra_xusb_padctl_get(struct device *dev);
void tegra_xusb_padctl_put(struct tegra_xusb_padctl *padctl);
@@ -20,7 +21,17 @@ int tegra_xusb_padctl_usb3_set_lfps_detect(struct tegra_xusb_padctl *padctl,
unsigned int port, bool enable);
int tegra_xusb_padctl_set_vbus_override(struct tegra_xusb_padctl *padctl,
bool val);
+void tegra_phy_xusb_utmi_pad_power_on(struct phy *phy);
+void tegra_phy_xusb_utmi_pad_power_down(struct phy *phy);
int tegra_phy_xusb_utmi_port_reset(struct phy *phy);
int tegra_xusb_padctl_get_usb3_companion(struct tegra_xusb_padctl *padctl,
unsigned int port);
+int tegra_xusb_padctl_get_port_number(struct phy *phy);
+int tegra_xusb_padctl_enable_phy_sleepwalk(struct tegra_xusb_padctl *padctl, struct phy *phy,
+ enum usb_device_speed speed);
+int tegra_xusb_padctl_disable_phy_sleepwalk(struct tegra_xusb_padctl *padctl, struct phy *phy);
+int tegra_xusb_padctl_enable_phy_wake(struct tegra_xusb_padctl *padctl, struct phy *phy);
+int tegra_xusb_padctl_disable_phy_wake(struct tegra_xusb_padctl *padctl, struct phy *phy);
+bool tegra_xusb_padctl_remote_wake_detected(struct tegra_xusb_padctl *padctl, struct phy *phy);
+
#endif /* PHY_TEGRA_XUSB_H */
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index 52bc8e487ef7..436bff20f324 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -2,51 +2,38 @@
#ifndef __PHY_FIXED_H
#define __PHY_FIXED_H
+#include <linux/types.h>
+
struct fixed_phy_status {
- int link;
int speed;
int duplex;
- int pause;
- int asym_pause;
+ bool link:1;
+ bool pause:1;
+ bool asym_pause:1;
};
struct device_node;
-struct gpio_desc;
+struct net_device;
#if IS_ENABLED(CONFIG_FIXED_PHY)
extern int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier);
-extern int fixed_phy_add(unsigned int irq, int phy_id,
- struct fixed_phy_status *status);
-extern struct phy_device *fixed_phy_register(unsigned int irq,
- struct fixed_phy_status *status,
- struct device_node *np);
-
-extern struct phy_device *
-fixed_phy_register_with_gpiod(unsigned int irq,
- struct fixed_phy_status *status,
- struct gpio_desc *gpiod);
+struct phy_device *fixed_phy_register(const struct fixed_phy_status *status,
+ struct device_node *np);
+struct phy_device *fixed_phy_register_100fd(void);
extern void fixed_phy_unregister(struct phy_device *phydev);
extern int fixed_phy_set_link_update(struct phy_device *phydev,
int (*link_update)(struct net_device *,
struct fixed_phy_status *));
#else
-static inline int fixed_phy_add(unsigned int irq, int phy_id,
- struct fixed_phy_status *status)
-{
- return -ENODEV;
-}
-static inline struct phy_device *fixed_phy_register(unsigned int irq,
- struct fixed_phy_status *status,
- struct device_node *np)
+static inline struct phy_device *
+fixed_phy_register(const struct fixed_phy_status *status,
+ struct device_node *np)
{
return ERR_PTR(-ENODEV);
}
-static inline struct phy_device *
-fixed_phy_register_with_gpiod(unsigned int irq,
- struct fixed_phy_status *status,
- struct gpio_desc *gpiod)
+static inline struct phy_device *fixed_phy_register_100fd(void)
{
return ERR_PTR(-ENODEV);
}
@@ -54,16 +41,6 @@ fixed_phy_register_with_gpiod(unsigned int irq,
static inline void fixed_phy_unregister(struct phy_device *phydev)
{
}
-static inline int fixed_phy_set_link_update(struct phy_device *phydev,
- int (*link_update)(struct net_device *,
- struct fixed_phy_status *))
-{
- return -ENODEV;
-}
-static inline int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier)
-{
- return -EINVAL;
-}
#endif /* CONFIG_FIXED_PHY */
#endif /* __PHY_FIXED_H */
diff --git a/include/linux/phy_link_topology.h b/include/linux/phy_link_topology.h
new file mode 100644
index 000000000000..68a59e25821c
--- /dev/null
+++ b/include/linux/phy_link_topology.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * PHY device list allow maintaining a list of PHY devices that are
+ * part of a netdevice's link topology. PHYs can for example be chained,
+ * as is the case when using a PHY that exposes an SFP module, on which an
+ * SFP transceiver that embeds a PHY is connected.
+ *
+ * This list can then be used by userspace to leverage individual PHY
+ * capabilities.
+ */
+#ifndef __PHY_LINK_TOPOLOGY_H
+#define __PHY_LINK_TOPOLOGY_H
+
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+
+struct xarray;
+struct phy_device;
+struct sfp_bus;
+
+struct phy_link_topology {
+ struct xarray phys;
+ u32 next_phy_index;
+};
+
+struct phy_device_node {
+ enum phy_upstream upstream_type;
+
+ union {
+ struct net_device *netdev;
+ struct phy_device *phydev;
+ } upstream;
+
+ struct sfp_bus *parent_sfp_bus;
+
+ struct phy_device *phy;
+};
+
+#if IS_ENABLED(CONFIG_PHYLIB)
+int phy_link_topo_add_phy(struct net_device *dev,
+ struct phy_device *phy,
+ enum phy_upstream upt, void *upstream);
+
+void phy_link_topo_del_phy(struct net_device *dev, struct phy_device *phy);
+
+static inline struct phy_device *
+phy_link_topo_get_phy(struct net_device *dev, u32 phyindex)
+{
+ struct phy_link_topology *topo = dev->link_topo;
+ struct phy_device_node *pdn;
+
+ if (!topo)
+ return NULL;
+
+ pdn = xa_load(&topo->phys, phyindex);
+ if (pdn)
+ return pdn->phy;
+
+ return NULL;
+}
+
+#else
+static inline int phy_link_topo_add_phy(struct net_device *dev,
+ struct phy_device *phy,
+ enum phy_upstream upt, void *upstream)
+{
+ return 0;
+}
+
+static inline void phy_link_topo_del_phy(struct net_device *dev,
+ struct phy_device *phy)
+{
+}
+
+static inline struct phy_device *
+phy_link_topo_get_phy(struct net_device *dev, u32 phyindex)
+{
+ return NULL;
+}
+#endif
+
+#endif /* __PHY_LINK_TOPOLOGY_H */
diff --git a/include/linux/phylib_stubs.h b/include/linux/phylib_stubs.h
new file mode 100644
index 000000000000..9d2d6090c86d
--- /dev/null
+++ b/include/linux/phylib_stubs.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Stubs for the Network PHY library
+ */
+
+#include <linux/rtnetlink.h>
+
+struct ethtool_eth_phy_stats;
+struct ethtool_link_ext_stats;
+struct ethtool_phy_stats;
+struct kernel_hwtstamp_config;
+struct netlink_ext_ack;
+struct phy_device;
+
+#if IS_ENABLED(CONFIG_PHYLIB)
+
+extern const struct phylib_stubs *phylib_stubs;
+
+struct phylib_stubs {
+ int (*hwtstamp_get)(struct phy_device *phydev,
+ struct kernel_hwtstamp_config *config);
+ int (*hwtstamp_set)(struct phy_device *phydev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
+ void (*get_phy_stats)(struct phy_device *phydev,
+ struct ethtool_eth_phy_stats *phy_stats,
+ struct ethtool_phy_stats *phydev_stats);
+ void (*get_link_ext_stats)(struct phy_device *phydev,
+ struct ethtool_link_ext_stats *link_stats);
+};
+
+static inline int phy_hwtstamp_get(struct phy_device *phydev,
+ struct kernel_hwtstamp_config *config)
+{
+ /* phylib_register_stubs() and phylib_unregister_stubs()
+ * also run under rtnl_lock().
+ */
+ ASSERT_RTNL();
+
+ if (!phylib_stubs)
+ return -EOPNOTSUPP;
+
+ return phylib_stubs->hwtstamp_get(phydev, config);
+}
+
+static inline int phy_hwtstamp_set(struct phy_device *phydev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ /* phylib_register_stubs() and phylib_unregister_stubs()
+ * also run under rtnl_lock().
+ */
+ ASSERT_RTNL();
+
+ if (!phylib_stubs)
+ return -EOPNOTSUPP;
+
+ return phylib_stubs->hwtstamp_set(phydev, config, extack);
+}
+
+static inline void phy_ethtool_get_phy_stats(struct phy_device *phydev,
+ struct ethtool_eth_phy_stats *phy_stats,
+ struct ethtool_phy_stats *phydev_stats)
+{
+ ASSERT_RTNL();
+
+ if (!phylib_stubs)
+ return;
+
+ phylib_stubs->get_phy_stats(phydev, phy_stats, phydev_stats);
+}
+
+static inline void phy_ethtool_get_link_ext_stats(struct phy_device *phydev,
+ struct ethtool_link_ext_stats *link_stats)
+{
+ ASSERT_RTNL();
+
+ if (!phylib_stubs)
+ return;
+
+ phylib_stubs->get_link_ext_stats(phydev, link_stats);
+}
+
+#else
+
+static inline int phy_hwtstamp_get(struct phy_device *phydev,
+ struct kernel_hwtstamp_config *config)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int phy_hwtstamp_set(struct phy_device *phydev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void phy_ethtool_get_phy_stats(struct phy_device *phydev,
+ struct ethtool_eth_phy_stats *phy_stats,
+ struct ethtool_phy_stats *phydev_stats)
+{
+}
+
+static inline void phy_ethtool_get_link_ext_stats(struct phy_device *phydev,
+ struct ethtool_link_ext_stats *link_stats)
+{
+}
+
+#endif
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index fd2acfd9b597..38363e566ac3 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -5,10 +5,13 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
+#include <net/eee.h>
+
struct device_node;
struct ethtool_cmd;
struct fwnode_handle;
struct net_device;
+struct phylink;
enum {
MLO_PAUSE_NONE,
@@ -20,6 +23,76 @@ enum {
MLO_AN_PHY = 0, /* Conventional PHY */
MLO_AN_FIXED, /* Fixed-link mode */
MLO_AN_INBAND, /* In-band protocol */
+
+ /* PCS "negotiation" mode.
+ * PHYLINK_PCS_NEG_NONE - protocol has no inband capability
+ * PHYLINK_PCS_NEG_OUTBAND - some out of band or fixed link setting
+ * PHYLINK_PCS_NEG_INBAND_DISABLED - inband mode disabled, e.g.
+ * 1000base-X with autoneg off
+ * PHYLINK_PCS_NEG_INBAND_ENABLED - inband mode enabled
+ * Additionally, this can be tested using bitmasks:
+ * PHYLINK_PCS_NEG_INBAND - inband mode selected
+ * PHYLINK_PCS_NEG_ENABLED - negotiation mode enabled
+ */
+ PHYLINK_PCS_NEG_NONE = 0,
+ PHYLINK_PCS_NEG_ENABLED = BIT(4),
+ PHYLINK_PCS_NEG_OUTBAND = BIT(5),
+ PHYLINK_PCS_NEG_INBAND = BIT(6),
+ PHYLINK_PCS_NEG_INBAND_DISABLED = PHYLINK_PCS_NEG_INBAND,
+ PHYLINK_PCS_NEG_INBAND_ENABLED = PHYLINK_PCS_NEG_INBAND |
+ PHYLINK_PCS_NEG_ENABLED,
+
+ /* MAC_SYM_PAUSE and MAC_ASYM_PAUSE are used when configuring our
+ * autonegotiation advertisement. They correspond to the PAUSE and
+ * ASM_DIR bits defined by 802.3, respectively.
+ *
+ * The following table lists the values of tx_pause and rx_pause which
+ * might be requested in mac_link_up. The exact values depend on either
+ * the results of autonegotation (if MLO_PAUSE_AN is set) or user
+ * configuration (if MLO_PAUSE_AN is not set).
+ *
+ * MAC_SYM_PAUSE MAC_ASYM_PAUSE MLO_PAUSE_AN tx_pause/rx_pause
+ * ============= ============== ============ ==================
+ * 0 0 0 0/0
+ * 0 0 1 0/0
+ * 0 1 0 0/0, 0/1, 1/0, 1/1
+ * 0 1 1 0/0, 1/0
+ * 1 0 0 0/0, 1/1
+ * 1 0 1 0/0, 1/1
+ * 1 1 0 0/0, 0/1, 1/0, 1/1
+ * 1 1 1 0/0, 0/1, 1/1
+ *
+ * If you set MAC_ASYM_PAUSE, the user may request any combination of
+ * tx_pause and rx_pause. You do not have to support these
+ * combinations.
+ *
+ * However, you should support combinations of tx_pause and rx_pause
+ * which might be the result of autonegotation. For example, don't set
+ * MAC_SYM_PAUSE unless your device can support tx_pause and rx_pause
+ * at the same time.
+ */
+ MAC_SYM_PAUSE = BIT(0),
+ MAC_ASYM_PAUSE = BIT(1),
+ MAC_10HD = BIT(2),
+ MAC_10FD = BIT(3),
+ MAC_10 = MAC_10HD | MAC_10FD,
+ MAC_100HD = BIT(4),
+ MAC_100FD = BIT(5),
+ MAC_100 = MAC_100HD | MAC_100FD,
+ MAC_1000HD = BIT(6),
+ MAC_1000FD = BIT(7),
+ MAC_1000 = MAC_1000HD | MAC_1000FD,
+ MAC_2500FD = BIT(8),
+ MAC_5000FD = BIT(9),
+ MAC_10000FD = BIT(10),
+ MAC_20000FD = BIT(11),
+ MAC_25000FD = BIT(12),
+ MAC_40000FD = BIT(13),
+ MAC_50000FD = BIT(14),
+ MAC_56000FD = BIT(15),
+ MAC_100000FD = BIT(16),
+ MAC_200000FD = BIT(17),
+ MAC_400000FD = BIT(18),
};
static inline bool phylink_autoneg_inband(unsigned int mode)
@@ -36,8 +109,11 @@ static inline bool phylink_autoneg_inband(unsigned int mode)
* @speed: link speed, one of the SPEED_* constants.
* @duplex: link duplex mode, one of DUPLEX_* constants.
* @pause: link pause state, described by MLO_PAUSE_* constants.
+ * @rate_matching: rate matching being performed, one of the RATE_MATCH_*
+ * constants. If rate matching is taking place, then the speed/duplex of
+ * the medium link mode (@speed and @duplex) and the speed/duplex of the phy
+ * interface mode (@interface) are different.
* @link: true if the link is up.
- * @an_enabled: true if autonegotiation is enabled/desired.
* @an_complete: true if autonegotiation has completed.
*/
struct phylink_link_state {
@@ -47,8 +123,8 @@ struct phylink_link_state {
int speed;
int duplex;
int pause;
+ int rate_matching;
unsigned int link:1;
- unsigned int an_enabled:1;
unsigned int an_complete:1;
};
@@ -61,101 +137,122 @@ enum phylink_op_type {
* struct phylink_config - PHYLINK configuration structure
* @dev: a pointer to a struct device associated with the MAC
* @type: operation type of PHYLINK instance
- * @pcs_poll: MAC PCS cannot provide link change interrupt
* @poll_fixed_state: if true, starts link_poll,
* if MAC link is at %MLO_AN_FIXED mode.
- * @ovr_an_inband: if true, override PCS to MLO_AN_INBAND
+ * @mac_managed_pm: if true, indicate the MAC driver is responsible for PHY PM.
+ * @mac_requires_rxc: if true, the MAC always requires a receive clock from PHY.
+ * The PHY driver should start the clock signal as soon as
+ * possible and avoid stopping it during suspend events.
+ * @default_an_inband: if true, defaults to MLO_AN_INBAND rather than
+ * MLO_AN_PHY. A fixed-link specification will override.
+ * @eee_rx_clk_stop_enable: if true, PHY can stop the receive clock during LPI
* @get_fixed_state: callback to execute to determine the fixed link state,
* if MAC link is at %MLO_AN_FIXED mode.
+ * @supported_interfaces: bitmap describing which PHY_INTERFACE_MODE_xxx
+ * are supported by the MAC/PCS.
+ * @lpi_interfaces: bitmap describing which PHY interface modes can support
+ * LPI signalling.
+ * @mac_capabilities: MAC pause/speed/duplex capabilities.
+ * @lpi_capabilities: MAC speeds which can support LPI signalling
+ * @lpi_timer_default: Default EEE LPI timer setting.
+ * @eee_enabled_default: If set, EEE will be enabled by phylink at creation time
+ * @wol_phy_legacy: Use Wake-on-Lan with PHY even if phy_can_wakeup() is false
+ * @wol_phy_speed_ctrl: Use phy speed control on suspend/resume
+ * @wol_mac_support: Bitmask of MAC supported %WAKE_* options
*/
struct phylink_config {
struct device *dev;
enum phylink_op_type type;
- bool pcs_poll;
bool poll_fixed_state;
- bool ovr_an_inband;
+ bool mac_managed_pm;
+ bool mac_requires_rxc;
+ bool default_an_inband;
+ bool eee_rx_clk_stop_enable;
void (*get_fixed_state)(struct phylink_config *config,
struct phylink_link_state *state);
+ DECLARE_PHY_INTERFACE_MASK(supported_interfaces);
+ DECLARE_PHY_INTERFACE_MASK(lpi_interfaces);
+ unsigned long mac_capabilities;
+ unsigned long lpi_capabilities;
+ u32 lpi_timer_default;
+ bool eee_enabled_default;
+
+ /* Wake-on-Lan support */
+ bool wol_phy_legacy;
+ bool wol_phy_speed_ctrl;
+ u32 wol_mac_support;
};
+void phylink_limit_mac_speed(struct phylink_config *config, u32 max_speed);
+
/**
* struct phylink_mac_ops - MAC operations structure.
- * @validate: Validate and update the link configuration.
- * @mac_pcs_get_state: Read the current link state from the hardware.
+ * @mac_get_caps: Get MAC capabilities for interface mode.
+ * @mac_select_pcs: Select a PCS for the interface mode.
* @mac_prepare: prepare for a major reconfiguration of the interface.
* @mac_config: configure the MAC for the selected mode and state.
* @mac_finish: finish a major reconfiguration of the interface.
- * @mac_an_restart: restart 802.3z BaseX autonegotiation.
* @mac_link_down: take the link down.
* @mac_link_up: allow the link to come up.
+ * @mac_disable_tx_lpi: disable LPI.
+ * @mac_enable_tx_lpi: enable and configure LPI.
+ * @mac_wol_set: configure Wake-on-Lan settings at the MAC.
*
* The individual methods are described more fully below.
*/
struct phylink_mac_ops {
- void (*validate)(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state);
- void (*mac_pcs_get_state)(struct phylink_config *config,
- struct phylink_link_state *state);
+ unsigned long (*mac_get_caps)(struct phylink_config *config,
+ phy_interface_t interface);
+ struct phylink_pcs *(*mac_select_pcs)(struct phylink_config *config,
+ phy_interface_t interface);
int (*mac_prepare)(struct phylink_config *config, unsigned int mode,
phy_interface_t iface);
void (*mac_config)(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state);
int (*mac_finish)(struct phylink_config *config, unsigned int mode,
phy_interface_t iface);
- void (*mac_an_restart)(struct phylink_config *config);
void (*mac_link_down)(struct phylink_config *config, unsigned int mode,
phy_interface_t interface);
void (*mac_link_up)(struct phylink_config *config,
struct phy_device *phy, unsigned int mode,
phy_interface_t interface, int speed, int duplex,
bool tx_pause, bool rx_pause);
+ void (*mac_disable_tx_lpi)(struct phylink_config *config);
+ int (*mac_enable_tx_lpi)(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop);
+
+ int (*mac_wol_set)(struct phylink_config *config, u32 wolopts,
+ const u8 *sopass);
};
#if 0 /* For kernel-doc purposes only. */
/**
- * validate - Validate and update the link configuration
+ * mac_get_caps: Get MAC capabilities for interface mode.
* @config: a pointer to a &struct phylink_config.
- * @supported: ethtool bitmask for supported link modes.
- * @state: a pointer to a &struct phylink_link_state.
- *
- * Clear bits in the @supported and @state->advertising masks that
- * are not supportable by the MAC.
+ * @interface: PHY interface mode.
*
- * Note that the PHY may be able to transform from one connection
- * technology to another, so, eg, don't clear 1000BaseX just
- * because the MAC is unable to BaseX mode. This is more about
- * clearing unsupported speeds and duplex settings. The port modes
- * should not be cleared; phylink_set_port_modes() will help with this.
- *
- * If the @state->interface mode is %PHY_INTERFACE_MODE_1000BASEX
- * or %PHY_INTERFACE_MODE_2500BASEX, select the appropriate mode
- * based on @state->advertising and/or @state->speed and update
- * @state->interface accordingly. See phylink_helper_basex_speed().
- *
- * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink expects the
- * MAC driver to return all supported link modes.
- *
- * If the @state->interface mode is not supported, then the @supported
- * mask must be cleared.
+ * Optional method. When not provided, config->mac_capabilities will be used.
+ * When implemented, this returns the MAC capabilities for the specified
+ * interface mode where there is some special handling required by the MAC
+ * driver (e.g. not supporting half-duplex in certain interface modes.)
*/
-void validate(struct phylink_config *config, unsigned long *supported,
- struct phylink_link_state *state);
-
+unsigned long mac_get_caps(struct phylink_config *config,
+ phy_interface_t interface);
/**
- * mac_pcs_get_state() - Read the current inband link state from the hardware
+ * mac_select_pcs: Select a PCS for the interface mode.
* @config: a pointer to a &struct phylink_config.
- * @state: a pointer to a &struct phylink_link_state.
+ * @interface: PHY interface mode for PCS
*
- * Read the current inband link state from the MAC PCS, reporting the
- * current speed in @state->speed, duplex mode in @state->duplex, pause
- * mode in @state->pause using the %MLO_PAUSE_RX and %MLO_PAUSE_TX bits,
- * negotiation completion state in @state->an_complete, and link up state
- * in @state->link. If possible, @state->lp_advertising should also be
- * populated.
+ * Return the &struct phylink_pcs for the specified interface mode, or
+ * NULL if none is required, or an error pointer on error.
+ *
+ * This must not modify any state. It is used to query which PCS should
+ * be used. Phylink will use this during validation to ensure that the
+ * configuration is valid, and when setting a configuration to internally
+ * set the PCS that will be used.
*/
-void mac_pcs_get_state(struct phylink_config *config,
- struct phylink_link_state *state);
+struct phylink_pcs *mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface);
/**
* mac_prepare() - prepare to change the PHY interface mode
@@ -193,8 +290,9 @@ int mac_prepare(struct phylink_config *config, unsigned int mode,
* guaranteed to be correct, and so any mac_config() implementation must
* never reference these fields.
*
- * (this requires a rewrite - please refer to mac_link_up() for situations
- * where the PCS and MAC are not tightly integrated.)
+ * This will only be called to reconfigure the MAC for a "major" change in
+ * e.g. interface mode. It will not be called for changes in speed, duplex
+ * or pause modes or to change the in-band advertisement.
*
* In all negotiation modes, as defined by @mode, @state->pause indicates the
* pause settings which should be applied as follows. If %MLO_PAUSE_AN is not
@@ -226,7 +324,7 @@ int mac_prepare(struct phylink_config *config, unsigned int mode,
* 1000base-X or Cisco SGMII mode depending on the @state->interface
* mode). In both cases, link state management (whether the link
* is up or not) is performed by the MAC, and reported via the
- * mac_pcs_get_state() callback. Changes in link state must be made
+ * pcs_get_state() callback. Changes in link state must be made
* by calling phylink_mac_change().
*
* Interface mode specific details are mentioned below.
@@ -234,9 +332,8 @@ int mac_prepare(struct phylink_config *config, unsigned int mode,
* If in 802.3z mode, the link speed is fixed, dependent on the
* @state->interface. Duplex and pause modes are negotiated via
* the in-band configuration word. Advertised pause modes are set
- * according to the @state->an_enabled and @state->advertising
- * flags. Beware of MACs which only support full duplex at gigabit
- * and higher speeds.
+ * according to @state->advertising. Beware of MACs which only
+ * support full duplex at gigabit and higher speeds.
*
* If in Cisco SGMII mode, the link speed and duplex mode are passed
* in the serial bitstream 16-bit configuration word, and the MAC
@@ -245,7 +342,7 @@ int mac_prepare(struct phylink_config *config, unsigned int mode,
* responsible for reading the configuration word and configuring
* itself accordingly.
*
- * Valid state members: interface, an_enabled, pause, advertising.
+ * Valid state members: interface, pause, advertising.
*
* Implementations are expected to update the MAC to reflect the
* requested settings - i.o.w., if nothing has changed between two
@@ -275,29 +372,29 @@ int mac_finish(struct phylink_config *config, unsigned int mode,
phy_interface_t iface);
/**
- * mac_an_restart() - restart 802.3z BaseX autonegotiation
- * @config: a pointer to a &struct phylink_config.
- */
-void mac_an_restart(struct phylink_config *config);
-
-/**
- * mac_link_down() - take the link down
+ * mac_link_down() - notification that the link has gone down
* @config: a pointer to a &struct phylink_config.
* @mode: link autonegotiation mode
* @interface: link &typedef phy_interface_t mode
*
- * If @mode is not an in-band negotiation mode (as defined by
- * phylink_autoneg_inband()), force the link down and disable any
- * Energy Efficient Ethernet MAC configuration. Interface type
- * selection must be done in mac_config().
+ * Notifies the MAC that the link has gone down. This will not be called
+ * unless mac_link_up() has been previously called.
+ *
+ * The MAC should stop processing packets for transmission and reception.
+ * phylink will have called netif_carrier_off() to notify the networking
+ * stack that the link has gone down, so MAC drivers should not make this
+ * call.
+ *
+ * If @mode is %MLO_AN_INBAND, then this function must not prevent the
+ * link coming up.
*/
void mac_link_down(struct phylink_config *config, unsigned int mode,
phy_interface_t interface);
/**
- * mac_link_up() - allow the link to come up
+ * mac_link_up() - notification that the link has come up
* @config: a pointer to a &struct phylink_config.
- * @phy: any attached phy
+ * @phy: any attached phy (deprecated - please use LPI interfaces)
* @mode: link autonegotiation mode
* @interface: link &typedef phy_interface_t mode
* @speed: link speed
@@ -305,7 +402,10 @@ void mac_link_down(struct phylink_config *config, unsigned int mode,
* @tx_pause: link transmit pause enablement status
* @rx_pause: link receive pause enablement status
*
- * Configure the MAC for an established link.
+ * Notifies the MAC that the link has come up, and the parameters of the
+ * link as seen from the MACs point of view. If mac_link_up() has been
+ * called previously, there will be an intervening call to mac_link_down()
+ * before this method will be subsequently called.
*
* @speed, @duplex, @tx_pause and @rx_pause indicate the finalised link
* settings, and should be used to configure the MAC block appropriately
@@ -317,55 +417,180 @@ void mac_link_down(struct phylink_config *config, unsigned int mode,
* that the user wishes to override the pause settings, and this should
* be allowed when considering the implementation of this method.
*
- * If in-band negotiation mode is disabled, allow the link to come up. If
- * @phy is non-%NULL, configure Energy Efficient Ethernet by calling
- * phy_init_eee() and perform appropriate MAC configuration for EEE.
+ * Once configured, the MAC may begin to process packets for transmission
+ * and reception.
+ *
* Interface type selection must be done in mac_config().
*/
void mac_link_up(struct phylink_config *config, struct phy_device *phy,
unsigned int mode, phy_interface_t interface,
int speed, int duplex, bool tx_pause, bool rx_pause);
+
+/**
+ * mac_disable_tx_lpi() - disable LPI generation at the MAC
+ * @config: a pointer to a &struct phylink_config.
+ *
+ * Disable generation of LPI at the MAC, effectively preventing the MAC
+ * from indicating that it is idle.
+ */
+void mac_disable_tx_lpi(struct phylink_config *config);
+
+/**
+ * mac_enable_tx_lpi() - configure and enable LPI generation at the MAC
+ * @config: a pointer to a &struct phylink_config.
+ * @timer: LPI timeout in microseconds.
+ * @tx_clk_stop: allow xMII transmit clock to be stopped during LPI
+ *
+ * Configure the LPI timeout accordingly. This will only be called when
+ * the link is already up, to cater for situations where the hardware
+ * needs to be programmed according to the link speed.
+ *
+ * Enable LPI generation at the MAC, and configure whether the xMII transmit
+ * clock may be stopped.
+ *
+ * Returns: 0 on success. Please consult with rmk before returning an error.
+ */
+int mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop);
+
+/**
+ * mac_wol_set() - configure the Wake-on-Lan parameters
+ * @config: a pointer to a &struct phylink_config.
+ * @wolopts: Bitmask of %WAKE_* flags for enabled Wake-On-Lan modes.
+ * @sopass: SecureOn(tm) password; meaningful only for %WAKE_MAGICSECURE
+ *
+ * Enable the specified Wake-on-Lan options at the MAC. Options that the
+ * PHY can handle will have been removed from @wolopts.
+ *
+ * The presence of this method enables phylink-managed WoL support.
+ *
+ * Returns: 0 on success.
+ */
+int (*mac_wol_set)(struct phylink_config *config, u32 wolopts,
+ const u8 *sopass);
#endif
struct phylink_pcs_ops;
/**
* struct phylink_pcs - PHYLINK PCS instance
+ * @supported_interfaces: describing which PHY_INTERFACE_MODE_xxx
+ * are supported by this PCS.
* @ops: a pointer to the &struct phylink_pcs_ops structure
+ * @phylink: pointer to &struct phylink_config
* @poll: poll the PCS for link changes
+ * @rxc_always_on: The MAC driver requires the reference clock
+ * to always be on. Standalone PCS drivers which
+ * do not have access to a PHY device can check
+ * this instead of PHY_F_RXC_ALWAYS_ON.
*
* This structure is designed to be embedded within the PCS private data,
* and will be passed between phylink and the PCS.
+ *
+ * The @phylink member is private to phylink and must not be touched by
+ * the PCS driver.
*/
struct phylink_pcs {
+ DECLARE_PHY_INTERFACE_MASK(supported_interfaces);
const struct phylink_pcs_ops *ops;
+ struct phylink *phylink;
bool poll;
+ bool rxc_always_on;
};
/**
* struct phylink_pcs_ops - MAC PCS operations structure.
+ * @pcs_validate: validate the link configuration.
+ * @pcs_inband_caps: query inband support for interface mode.
+ * @pcs_enable: enable the PCS.
+ * @pcs_disable: disable the PCS.
+ * @pcs_pre_config: pre-mac_config method (for errata)
+ * @pcs_post_config: post-mac_config method (for arrata)
* @pcs_get_state: read the current MAC PCS link state from the hardware.
* @pcs_config: configure the MAC PCS for the selected mode and state.
* @pcs_an_restart: restart 802.3z BaseX autonegotiation.
* @pcs_link_up: program the PCS for the resolved link configuration
* (where necessary).
+ * @pcs_disable_eee: optional notification to PCS that EEE has been disabled
+ * at the MAC.
+ * @pcs_enable_eee: optional notification to PCS that EEE will be enabled at
+ * the MAC.
+ * @pcs_pre_init: configure PCS components necessary for MAC hardware
+ * initialization e.g. RX clock for stmmac.
*/
struct phylink_pcs_ops {
- void (*pcs_get_state)(struct phylink_pcs *pcs,
+ int (*pcs_validate)(struct phylink_pcs *pcs, unsigned long *supported,
+ const struct phylink_link_state *state);
+ unsigned int (*pcs_inband_caps)(struct phylink_pcs *pcs,
+ phy_interface_t interface);
+ int (*pcs_enable)(struct phylink_pcs *pcs);
+ void (*pcs_disable)(struct phylink_pcs *pcs);
+ void (*pcs_pre_config)(struct phylink_pcs *pcs,
+ phy_interface_t interface);
+ int (*pcs_post_config)(struct phylink_pcs *pcs,
+ phy_interface_t interface);
+ void (*pcs_get_state)(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state);
- int (*pcs_config)(struct phylink_pcs *pcs, unsigned int mode,
+ int (*pcs_config)(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface,
const unsigned long *advertising,
bool permit_pause_to_mac);
void (*pcs_an_restart)(struct phylink_pcs *pcs);
- void (*pcs_link_up)(struct phylink_pcs *pcs, unsigned int mode,
+ void (*pcs_link_up)(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface, int speed, int duplex);
+ void (*pcs_disable_eee)(struct phylink_pcs *pcs);
+ void (*pcs_enable_eee)(struct phylink_pcs *pcs);
+ int (*pcs_pre_init)(struct phylink_pcs *pcs);
};
#if 0 /* For kernel-doc purposes only. */
/**
+ * pcs_validate() - validate the link configuration.
+ * @pcs: a pointer to a &struct phylink_pcs.
+ * @supported: ethtool bitmask for supported link modes.
+ * @state: a const pointer to a &struct phylink_link_state.
+ *
+ * Validate the interface mode, and advertising's autoneg bit, removing any
+ * media ethtool link modes that would not be supportable from the supported
+ * mask. Phylink will propagate the changes to the advertising mask. See the
+ * &struct phylink_mac_ops validate() method.
+ *
+ * Returns -EINVAL if the interface mode/autoneg mode is not supported.
+ * Returns non-zero positive if the link state can be supported.
+ */
+int pcs_validate(struct phylink_pcs *pcs, unsigned long *supported,
+ const struct phylink_link_state *state);
+
+/**
+ * pcs_inband_caps - query PCS in-band capabilities for interface mode.
+ * @pcs: a pointer to a &struct phylink_pcs.
+ * @interface: interface mode to be queried
+ *
+ * Returns zero if it is unknown what in-band signalling is supported by the
+ * PHY (e.g. because the PHY driver doesn't implement the method.) Otherwise,
+ * returns a bit mask of the LINK_INBAND_* values from
+ * &enum link_inband_signalling to describe which inband modes are supported
+ * for this interface mode.
+ */
+unsigned int pcs_inband_caps(struct phylink_pcs *pcs,
+ phy_interface_t interface);
+
+/**
+ * pcs_enable() - enable the PCS.
+ * @pcs: a pointer to a &struct phylink_pcs.
+ */
+int pcs_enable(struct phylink_pcs *pcs);
+
+/**
+ * pcs_disable() - disable the PCS.
+ * @pcs: a pointer to a &struct phylink_pcs.
+ */
+void pcs_disable(struct phylink_pcs *pcs);
+
+/**
* pcs_get_state() - Read the current inband link state from the hardware
* @pcs: a pointer to a &struct phylink_pcs.
+ * @neg_mode: link negotiation mode (PHYLINK_PCS_NEG_xxx)
* @state: a pointer to a &struct phylink_link_state.
*
* Read the current inband link state from the MAC PCS, reporting the
@@ -375,16 +600,16 @@ struct phylink_pcs_ops {
* in @state->link. If possible, @state->lp_advertising should also be
* populated.
*
- * When present, this overrides mac_pcs_get_state() in &struct
- * phylink_mac_ops.
+ * Note that the @neg_mode parameter is always the PHYLINK_PCS_NEG_xxx
+ * state, not MLO_AN_xxx.
*/
-void pcs_get_state(struct phylink_pcs *pcs,
+void pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
struct phylink_link_state *state);
/**
* pcs_config() - Configure the PCS mode and advertisement
* @pcs: a pointer to a &struct phylink_pcs.
- * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
+ * @neg_mode: link negotiation mode (see below)
* @interface: interface mode to be used
* @advertising: adertisement ethtool link mode mask
* @permit_pause_to_mac: permit forwarding pause resolution to MAC
@@ -402,8 +627,20 @@ void pcs_get_state(struct phylink_pcs *pcs,
* For 1000BASE-X, the advertisement should be programmed into the PCS.
*
* For most 10GBASE-R, there is no advertisement.
+ *
+ * The %neg_mode argument should be tested via the phylink_mode_*() family of
+ * functions, or for PCS that set pcs->neg_mode true, should be tested
+ * against the PHYLINK_PCS_NEG_* definitions.
+ *
+ * pcs_config() will be called when configuration of the PCS is required
+ * or when the advertisement is possibly updated. It must not unnecessarily
+ * disrupt an established link.
+ *
+ * When an autonegotiation restart is required for 802.3z modes, .pcs_config()
+ * should return a positive non-zero integer (e.g. 1) to indicate to phylink
+ * to call the pcs_an_restart() method.
*/
-int pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+int pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface, const unsigned long *advertising,
bool permit_pause_to_mac);
@@ -419,7 +656,7 @@ void pcs_an_restart(struct phylink_pcs *pcs);
/**
* pcs_link_up() - program the PCS for the resolved link configuration
* @pcs: a pointer to a &struct phylink_pcs.
- * @mode: link autonegotiation mode
+ * @neg_mode: link negotiation mode (see below)
* @interface: link &typedef phy_interface_t mode
* @speed: link speed
* @duplex: link duplex
@@ -428,26 +665,90 @@ void pcs_an_restart(struct phylink_pcs *pcs);
* the resolved link parameters. For example, a PCS operating in SGMII
* mode without in-band AN needs to be manually configured for the link
* and duplex setting. Otherwise, this should be a no-op.
+ *
+ * The %mode argument should be tested via the phylink_mode_*() family of
+ * functions, or for PCS that set pcs->neg_mode true, should be tested
+ * against the PHYLINK_PCS_NEG_* definitions.
*/
-void pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
+void pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface, int speed, int duplex);
+
+/**
+ * pcs_disable_eee() - Disable EEE at the PCS
+ * @pcs: a pointer to a &struct phylink_pcs
+ *
+ * Optional method informing the PCS that EEE has been disabled at the MAC.
+ */
+void pcs_disable_eee(struct phylink_pcs *pcs);
+
+/**
+ * pcs_enable_eee() - Enable EEE at the PCS
+ * @pcs: a pointer to a &struct phylink_pcs
+ *
+ * Optional method informing the PCS that EEE is about to be enabled at the MAC.
+ */
+void pcs_enable_eee(struct phylink_pcs *pcs);
+
+/**
+ * pcs_pre_init() - Configure PCS components necessary for MAC initialization
+ * @pcs: a pointer to a &struct phylink_pcs.
+ *
+ * This function can be called by MAC drivers through the
+ * phylink_pcs_pre_init() wrapper, before their hardware is initialized. It
+ * should not be called after the link is brought up, as reconfiguring the PCS
+ * at this point could break the link.
+ *
+ * Some MAC devices require specific hardware initialization to be performed by
+ * their associated PCS device before they can properly initialize their own
+ * hardware. An example of this is the initialization of stmmac controllers,
+ * which requires an active REF_CLK signal to be provided by the PHY/PCS.
+ *
+ * By calling phylink_pcs_pre_init(), MAC drivers can ensure that the PCS is
+ * setup in a way that allows for successful hardware initialization.
+ *
+ * The specific configuration performed by pcs_pre_init() is dependent on the
+ * model of PCS and the requirements of the MAC device attached to it. PCS
+ * driver authors should consider whether their target device is to be used in
+ * conjunction with a MAC device whose driver calls phylink_pcs_pre_init(). MAC
+ * driver authors should document their requirements for the PCS
+ * pre-initialization.
+ *
+ */
+int pcs_pre_init(struct phylink_pcs *pcs);
+
#endif
-struct phylink *phylink_create(struct phylink_config *, struct fwnode_handle *,
- phy_interface_t iface,
- const struct phylink_mac_ops *mac_ops);
-void phylink_set_pcs(struct phylink *, struct phylink_pcs *pcs);
+struct phylink *phylink_create(struct phylink_config *,
+ const struct fwnode_handle *,
+ phy_interface_t,
+ const struct phylink_mac_ops *);
void phylink_destroy(struct phylink *);
+bool phylink_expects_phy(struct phylink *pl);
int phylink_connect_phy(struct phylink *, struct phy_device *);
int phylink_of_phy_connect(struct phylink *, struct device_node *, u32 flags);
+int phylink_fwnode_phy_connect(struct phylink *pl,
+ const struct fwnode_handle *fwnode,
+ u32 flags);
void phylink_disconnect_phy(struct phylink *);
+int phylink_set_fixed_link(struct phylink *,
+ const struct phylink_link_state *);
void phylink_mac_change(struct phylink *, bool up);
+void phylink_pcs_change(struct phylink_pcs *, bool up);
+
+int phylink_pcs_pre_init(struct phylink *pl, struct phylink_pcs *pcs);
void phylink_start(struct phylink *);
void phylink_stop(struct phylink *);
+void phylink_rx_clk_stop_block(struct phylink *);
+void phylink_rx_clk_stop_unblock(struct phylink *);
+
+void phylink_suspend(struct phylink *pl, bool mac_wol);
+void phylink_prepare_resume(struct phylink *pl);
+void phylink_resume(struct phylink *pl);
+
void phylink_ethtool_get_wol(struct phylink *, struct ethtool_wolinfo *);
int phylink_ethtool_set_wol(struct phylink *, struct ethtool_wolinfo *);
@@ -461,9 +762,8 @@ void phylink_ethtool_get_pauseparam(struct phylink *,
int phylink_ethtool_set_pauseparam(struct phylink *,
struct ethtool_pauseparam *);
int phylink_get_eee_err(struct phylink *);
-int phylink_init_eee(struct phylink *, bool);
-int phylink_ethtool_get_eee(struct phylink *, struct ethtool_eee *);
-int phylink_ethtool_set_eee(struct phylink *, struct ethtool_eee *);
+int phylink_ethtool_get_eee(struct phylink *link, struct ethtool_keee *eee);
+int phylink_ethtool_set_eee(struct phylink *link, struct ethtool_keee *eee);
int phylink_mii_ioctl(struct phylink *, struct ifreq *, int);
int phylink_speed_down(struct phylink *pl, bool sync);
int phylink_speed_up(struct phylink *pl);
@@ -478,18 +778,59 @@ int phylink_speed_up(struct phylink *pl);
#define phylink_test(bm, mode) __phylink_do_bit(test_bit, bm, mode)
void phylink_set_port_modes(unsigned long *bits);
-void phylink_helper_basex_speed(struct phylink_link_state *state);
+/**
+ * phylink_get_link_timer_ns - return the PCS link timer value
+ * @interface: link &typedef phy_interface_t mode
+ *
+ * Return the PCS link timer setting in nanoseconds for the PHY @interface
+ * mode, or -EINVAL if not appropriate.
+ */
+static inline int phylink_get_link_timer_ns(phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_10G_QXGMII:
+ return 1600000;
+
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return 10000000;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * phylink_mac_implements_lpi() - determine if MAC implements LPI ops
+ * @ops: phylink_mac_ops structure
+ *
+ * Returns true if the phylink MAC operations structure indicates that the
+ * LPI operations have been implemented, false otherwise.
+ */
+static inline bool phylink_mac_implements_lpi(const struct phylink_mac_ops *ops)
+{
+ return ops && ops->mac_disable_tx_lpi && ops->mac_enable_tx_lpi;
+}
+
+void phylink_mii_c22_pcs_decode_state(struct phylink_link_state *state,
+ unsigned int neg_mode, u16 bmsr, u16 lpa);
void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
+ unsigned int neg_mode,
struct phylink_link_state *state);
-int phylink_mii_c22_pcs_set_advertisement(struct mdio_device *pcs,
- phy_interface_t interface,
- const unsigned long *advertising);
-int phylink_mii_c22_pcs_config(struct mdio_device *pcs, unsigned int mode,
+int phylink_mii_c22_pcs_encode_advertisement(phy_interface_t interface,
+ const unsigned long *advertising);
+int phylink_mii_c22_pcs_config(struct mdio_device *pcs,
phy_interface_t interface,
- const unsigned long *advertising);
+ const unsigned long *advertising,
+ unsigned int neg_mode);
void phylink_mii_c22_pcs_an_restart(struct mdio_device *pcs);
+void phylink_resolve_c73(struct phylink_link_state *state);
+
void phylink_mii_c45_pcs_get_state(struct mdio_device *pcs,
struct phylink_link_state *state);
diff --git a/include/linux/pid.h b/include/linux/pid.h
index fa10acb8d6a4..003a1027d219 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -2,18 +2,12 @@
#ifndef _LINUX_PID_H
#define _LINUX_PID_H
+#include <linux/pid_types.h>
#include <linux/rculist.h>
-#include <linux/wait.h>
+#include <linux/rcupdate.h>
#include <linux/refcount.h>
-
-enum pid_type
-{
- PIDTYPE_PID,
- PIDTYPE_TGID,
- PIDTYPE_PGID,
- PIDTYPE_SID,
- PIDTYPE_MAX,
-};
+#include <linux/sched.h>
+#include <linux/wait.h>
/*
* What is struct pid?
@@ -51,33 +45,44 @@ enum pid_type
* find_pid_ns() using the int nr and struct pid_namespace *ns.
*/
+#define RESERVED_PIDS 300
+
+struct pidfs_attr;
+
struct upid {
int nr;
struct pid_namespace *ns;
};
-struct pid
-{
+struct pid {
refcount_t count;
unsigned int level;
spinlock_t lock;
+ struct {
+ u64 ino;
+ struct rb_node pidfs_node;
+ struct dentry *stashed;
+ struct pidfs_attr *attr;
+ };
/* lists of tasks that use this pid */
struct hlist_head tasks[PIDTYPE_MAX];
struct hlist_head inodes;
/* wait queue for pidfd notifications */
wait_queue_head_t wait_pidfd;
struct rcu_head rcu;
- struct upid numbers[1];
+ struct upid numbers[];
};
+extern seqcount_spinlock_t pidmap_lock_seq;
extern struct pid init_struct_pid;
-extern const struct file_operations pidfd_fops;
-
struct file;
-extern struct pid *pidfd_pid(const struct file *file);
+struct pid *pidfd_pid(const struct file *file);
struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags);
+struct task_struct *pidfd_get_task(int pidfd, unsigned int *flags);
+int pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret_file);
+void do_notify_pidfd(struct task_struct *task);
static inline struct pid *get_pid(struct pid *pid)
{
@@ -100,19 +105,13 @@ extern struct pid *get_task_pid(struct task_struct *task, enum pid_type type);
* these helpers must be called with the tasklist_lock write-held.
*/
extern void attach_pid(struct task_struct *task, enum pid_type);
-extern void detach_pid(struct task_struct *task, enum pid_type);
-extern void change_pid(struct task_struct *task, enum pid_type,
- struct pid *pid);
+void detach_pid(struct pid **pids, struct task_struct *task, enum pid_type);
+void change_pid(struct pid **pids, struct task_struct *task, enum pid_type,
+ struct pid *pid);
extern void exchange_tids(struct task_struct *task, struct task_struct *old);
extern void transfer_pid(struct task_struct *old, struct task_struct *new,
enum pid_type);
-struct pid_namespace;
-extern struct pid_namespace init_pid_ns;
-
-extern int pid_max;
-extern int pid_max_min, pid_max_max;
-
/*
* look up a PID in the hash table. Must be called with the tasklist_lock
* or rcu_read_lock() held.
@@ -134,6 +133,7 @@ extern struct pid *find_ge_pid(int nr, struct pid_namespace *);
extern struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
size_t set_tid_size);
extern void free_pid(struct pid *pid);
+void free_pids(struct pid **pids);
extern void disable_pid_allocation(struct pid_namespace *ns);
/*
@@ -212,4 +212,127 @@ pid_t pid_vnr(struct pid *pid);
} \
task = tg___; \
} while_each_pid_task(pid, type, task)
+
+static inline struct pid *task_pid(struct task_struct *task)
+{
+ return task->thread_pid;
+}
+
+/*
+ * the helpers to get the task's different pids as they are seen
+ * from various namespaces
+ *
+ * task_xid_nr() : global id, i.e. the id seen from the init namespace;
+ * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
+ * current.
+ * task_xid_nr_ns() : id seen from the ns specified;
+ *
+ * see also pid_nr() etc in include/linux/pid.h
+ */
+pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
+
+static inline pid_t task_pid_nr(struct task_struct *tsk)
+{
+ return tsk->pid;
+}
+
+static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
+}
+
+static inline pid_t task_pid_vnr(struct task_struct *tsk)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
+}
+
+
+static inline pid_t task_tgid_nr(struct task_struct *tsk)
+{
+ return tsk->tgid;
+}
+
+/**
+ * pid_alive - check that a task structure is not stale
+ * @p: Task structure to be checked.
+ *
+ * Test if a process is not yet dead (at most zombie state)
+ * If pid_alive fails, then pointers within the task structure
+ * can be stale and must not be dereferenced.
+ *
+ * Return: 1 if the process is alive. 0 otherwise.
+ */
+static inline int pid_alive(const struct task_struct *p)
+{
+ return p->thread_pid != NULL;
+}
+
+static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
+}
+
+static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
+}
+
+
+static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
+}
+
+static inline pid_t task_session_vnr(struct task_struct *tsk)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
+}
+
+static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
+}
+
+static inline pid_t task_tgid_vnr(struct task_struct *tsk)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
+}
+
+static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
+{
+ pid_t pid = 0;
+
+ rcu_read_lock();
+ if (pid_alive(tsk))
+ pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
+ rcu_read_unlock();
+
+ return pid;
+}
+
+static inline pid_t task_ppid_nr(const struct task_struct *tsk)
+{
+ return task_ppid_nr_ns(tsk, &init_pid_ns);
+}
+
+/* Obsolete, do not use: */
+static inline pid_t task_pgrp_nr(struct task_struct *tsk)
+{
+ return task_pgrp_nr_ns(tsk, &init_pid_ns);
+}
+
+/**
+ * is_global_init - check if a task structure is init. Since init
+ * is free to have sub-threads we need to check tgid.
+ * @tsk: Task structure to be checked.
+ *
+ * Check if a task structure is the first user space task the kernel created.
+ *
+ * Return: 1 if the task structure is init. 0 otherwise.
+ */
+static inline int is_global_init(struct task_struct *tsk)
+{
+ return task_tgid_nr(tsk) == 1;
+}
+
#endif /* _LINUX_PID_H */
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index 7c7e627503d2..0e7ae12c96d2 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -16,6 +16,13 @@
struct fs_pin;
+#if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE)
+/* modes for vm.memfd_noexec sysctl */
+#define MEMFD_NOEXEC_SCOPE_EXEC 0 /* MFD_EXEC implied if unset */
+#define MEMFD_NOEXEC_SCOPE_NOEXEC_SEAL 1 /* MFD_NOEXEC_SEAL implied if unset */
+#define MEMFD_NOEXEC_SCOPE_NOEXEC_ENFORCED 2 /* same as 1, except MFD_EXEC rejected */
+#endif
+
struct pid_namespace {
struct idr idr;
struct rcu_head rcu;
@@ -23,6 +30,7 @@ struct pid_namespace {
struct task_struct *child_reaper;
struct kmem_cache *pid_cachep;
unsigned int level;
+ int pid_max;
struct pid_namespace *parent;
#ifdef CONFIG_BSD_PROCESS_ACCT
struct fs_pin *bacct;
@@ -31,6 +39,14 @@ struct pid_namespace {
struct ucounts *ucounts;
int reboot; /* group exit code if this pidns was rebooted */
struct ns_common ns;
+ struct work_struct work;
+#ifdef CONFIG_SYSCTL
+ struct ctl_table_set set;
+ struct ctl_table_header *sysctls;
+#if defined(CONFIG_MEMFD_CREATE)
+ int memfd_noexec_scope;
+#endif
+#endif
} __randomize_layout;
extern struct pid_namespace init_pid_ns;
@@ -38,19 +54,43 @@ extern struct pid_namespace init_pid_ns;
#define PIDNS_ADDING (1U << 31)
#ifdef CONFIG_PID_NS
+static inline struct pid_namespace *to_pid_ns(struct ns_common *ns)
+{
+ return container_of(ns, struct pid_namespace, ns);
+}
+
static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns)
{
- if (ns != &init_pid_ns)
- refcount_inc(&ns->ns.count);
+ ns_ref_inc(ns);
return ns;
}
-extern struct pid_namespace *copy_pid_ns(unsigned long flags,
+#if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE)
+static inline int pidns_memfd_noexec_scope(struct pid_namespace *ns)
+{
+ int scope = MEMFD_NOEXEC_SCOPE_EXEC;
+
+ for (; ns; ns = ns->parent)
+ scope = max(scope, READ_ONCE(ns->memfd_noexec_scope));
+
+ return scope;
+}
+#else
+static inline int pidns_memfd_noexec_scope(struct pid_namespace *ns)
+{
+ return 0;
+}
+#endif
+
+extern struct pid_namespace *copy_pid_ns(u64 flags,
struct user_namespace *user_ns, struct pid_namespace *ns);
extern void zap_pid_ns_processes(struct pid_namespace *pid_ns);
extern int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd);
extern void put_pid_ns(struct pid_namespace *ns);
+extern bool pidns_is_ancestor(struct pid_namespace *child,
+ struct pid_namespace *ancestor);
+
#else /* !CONFIG_PID_NS */
#include <linux/err.h>
@@ -59,7 +99,12 @@ static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns)
return ns;
}
-static inline struct pid_namespace *copy_pid_ns(unsigned long flags,
+static inline int pidns_memfd_noexec_scope(struct pid_namespace *ns)
+{
+ return 0;
+}
+
+static inline struct pid_namespace *copy_pid_ns(u64 flags,
struct user_namespace *user_ns, struct pid_namespace *ns)
{
if (flags & CLONE_NEWPID)
@@ -80,10 +125,23 @@ static inline int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd)
{
return 0;
}
+
+static inline bool pidns_is_ancestor(struct pid_namespace *child,
+ struct pid_namespace *ancestor)
+{
+ return false;
+}
#endif /* CONFIG_PID_NS */
extern struct pid_namespace *task_active_pid_ns(struct task_struct *tsk);
void pidhash_init(void);
void pid_idr_init(void);
+int register_pidns_sysctls(struct pid_namespace *pidns);
+void unregister_pidns_sysctls(struct pid_namespace *pidns);
+
+static inline bool task_is_in_init_pid_ns(struct task_struct *tsk)
+{
+ return task_active_pid_ns(tsk) == &init_pid_ns;
+}
#endif /* _LINUX_PID_NS_H */
diff --git a/include/linux/pid_types.h b/include/linux/pid_types.h
new file mode 100644
index 000000000000..c2aee1d91dcf
--- /dev/null
+++ b/include/linux/pid_types.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_PID_TYPES_H
+#define _LINUX_PID_TYPES_H
+
+enum pid_type {
+ PIDTYPE_PID,
+ PIDTYPE_TGID,
+ PIDTYPE_PGID,
+ PIDTYPE_SID,
+ PIDTYPE_MAX,
+};
+
+struct pid_namespace;
+extern struct pid_namespace init_pid_ns;
+
+#endif /* _LINUX_PID_TYPES_H */
diff --git a/include/linux/pidfs.h b/include/linux/pidfs.h
new file mode 100644
index 000000000000..3e08c33da2df
--- /dev/null
+++ b/include/linux/pidfs.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_PID_FS_H
+#define _LINUX_PID_FS_H
+
+struct coredump_params;
+
+struct file *pidfs_alloc_file(struct pid *pid, unsigned int flags);
+void __init pidfs_init(void);
+void pidfs_add_pid(struct pid *pid);
+void pidfs_remove_pid(struct pid *pid);
+void pidfs_exit(struct task_struct *tsk);
+#ifdef CONFIG_COREDUMP
+void pidfs_coredump(const struct coredump_params *cprm);
+#endif
+extern const struct dentry_operations pidfs_dentry_operations;
+int pidfs_register_pid(struct pid *pid);
+void pidfs_free_pid(struct pid *pid);
+
+#endif /* _LINUX_PID_FS_H */
diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h
index 019fecd75d0c..63ce16191eb9 100644
--- a/include/linux/pinctrl/consumer.h
+++ b/include/linux/pinctrl/consumer.h
@@ -12,45 +12,54 @@
#define __LINUX_PINCTRL_CONSUMER_H
#include <linux/err.h>
-#include <linux/list.h>
-#include <linux/seq_file.h>
+#include <linux/types.h>
+
#include <linux/pinctrl/pinctrl-state.h>
+struct device;
+struct gpio_chip;
+
/* This struct is private to the core and should be regarded as a cookie */
struct pinctrl;
struct pinctrl_state;
-struct device;
#ifdef CONFIG_PINCTRL
/* External interface to pin control */
-extern bool pinctrl_gpio_can_use_line(unsigned gpio);
-extern int pinctrl_gpio_request(unsigned gpio);
-extern void pinctrl_gpio_free(unsigned gpio);
-extern int pinctrl_gpio_direction_input(unsigned gpio);
-extern int pinctrl_gpio_direction_output(unsigned gpio);
-extern int pinctrl_gpio_set_config(unsigned gpio, unsigned long config);
-
-extern struct pinctrl * __must_check pinctrl_get(struct device *dev);
-extern void pinctrl_put(struct pinctrl *p);
-extern struct pinctrl_state * __must_check pinctrl_lookup_state(
- struct pinctrl *p,
- const char *name);
-extern int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *s);
-
-extern struct pinctrl * __must_check devm_pinctrl_get(struct device *dev);
-extern void devm_pinctrl_put(struct pinctrl *p);
-extern int pinctrl_select_default_state(struct device *dev);
+bool pinctrl_gpio_can_use_line(struct gpio_chip *gc, unsigned int offset);
+int pinctrl_gpio_request(struct gpio_chip *gc, unsigned int offset);
+void pinctrl_gpio_free(struct gpio_chip *gc, unsigned int offset);
+int pinctrl_gpio_direction_input(struct gpio_chip *gc,
+ unsigned int offset);
+int pinctrl_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int offset);
+int pinctrl_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
+ unsigned long config);
+
+struct pinctrl * __must_check pinctrl_get(struct device *dev);
+void pinctrl_put(struct pinctrl *p);
+struct pinctrl_state * __must_check pinctrl_lookup_state(struct pinctrl *p,
+ const char *name);
+int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *s);
+
+struct pinctrl * __must_check devm_pinctrl_get(struct device *dev);
+void devm_pinctrl_put(struct pinctrl *p);
+int pinctrl_select_default_state(struct device *dev);
#ifdef CONFIG_PM
-extern int pinctrl_pm_select_default_state(struct device *dev);
-extern int pinctrl_pm_select_sleep_state(struct device *dev);
-extern int pinctrl_pm_select_idle_state(struct device *dev);
+int pinctrl_pm_select_default_state(struct device *dev);
+int pinctrl_pm_select_init_state(struct device *dev);
+int pinctrl_pm_select_sleep_state(struct device *dev);
+int pinctrl_pm_select_idle_state(struct device *dev);
#else
static inline int pinctrl_pm_select_default_state(struct device *dev)
{
return 0;
}
+static inline int pinctrl_pm_select_init_state(struct device *dev)
+{
+ return 0;
+}
static inline int pinctrl_pm_select_sleep_state(struct device *dev)
{
return 0;
@@ -63,31 +72,38 @@ static inline int pinctrl_pm_select_idle_state(struct device *dev)
#else /* !CONFIG_PINCTRL */
-static inline bool pinctrl_gpio_can_use_line(unsigned gpio)
+static inline bool
+pinctrl_gpio_can_use_line(struct gpio_chip *gc, unsigned int offset)
{
return true;
}
-static inline int pinctrl_gpio_request(unsigned gpio)
+static inline int
+pinctrl_gpio_request(struct gpio_chip *gc, unsigned int offset)
{
return 0;
}
-static inline void pinctrl_gpio_free(unsigned gpio)
+static inline void
+pinctrl_gpio_free(struct gpio_chip *gc, unsigned int offset)
{
}
-static inline int pinctrl_gpio_direction_input(unsigned gpio)
+static inline int
+pinctrl_gpio_direction_input(struct gpio_chip *gc, unsigned int offset)
{
return 0;
}
-static inline int pinctrl_gpio_direction_output(unsigned gpio)
+static inline int
+pinctrl_gpio_direction_output(struct gpio_chip *gc, unsigned int offset)
{
return 0;
}
-static inline int pinctrl_gpio_set_config(unsigned gpio, unsigned long config)
+static inline int
+pinctrl_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
+ unsigned long config)
{
return 0;
}
@@ -101,9 +117,8 @@ static inline void pinctrl_put(struct pinctrl *p)
{
}
-static inline struct pinctrl_state * __must_check pinctrl_lookup_state(
- struct pinctrl *p,
- const char *name)
+static inline struct pinctrl_state * __must_check pinctrl_lookup_state(struct pinctrl *p,
+ const char *name)
{
return NULL;
}
@@ -133,6 +148,11 @@ static inline int pinctrl_pm_select_default_state(struct device *dev)
return 0;
}
+static inline int pinctrl_pm_select_init_state(struct device *dev)
+{
+ return 0;
+}
+
static inline int pinctrl_pm_select_sleep_state(struct device *dev)
{
return 0;
@@ -145,8 +165,8 @@ static inline int pinctrl_pm_select_idle_state(struct device *dev)
#endif /* CONFIG_PINCTRL */
-static inline struct pinctrl * __must_check pinctrl_get_select(
- struct device *dev, const char *name)
+static inline struct pinctrl * __must_check pinctrl_get_select(struct device *dev,
+ const char *name)
{
struct pinctrl *p;
struct pinctrl_state *s;
@@ -171,14 +191,13 @@ static inline struct pinctrl * __must_check pinctrl_get_select(
return p;
}
-static inline struct pinctrl * __must_check pinctrl_get_select_default(
- struct device *dev)
+static inline struct pinctrl * __must_check pinctrl_get_select_default(struct device *dev)
{
return pinctrl_get_select(dev, PINCTRL_STATE_DEFAULT);
}
-static inline struct pinctrl * __must_check devm_pinctrl_get_select(
- struct device *dev, const char *name)
+static inline struct pinctrl * __must_check devm_pinctrl_get_select(struct device *dev,
+ const char *name)
{
struct pinctrl *p;
struct pinctrl_state *s;
@@ -203,8 +222,7 @@ static inline struct pinctrl * __must_check devm_pinctrl_get_select(
return p;
}
-static inline struct pinctrl * __must_check devm_pinctrl_get_select_default(
- struct device *dev)
+static inline struct pinctrl * __must_check devm_pinctrl_get_select_default(struct device *dev)
{
return devm_pinctrl_get_select(dev, PINCTRL_STATE_DEFAULT);
}
diff --git a/include/linux/pinctrl/devinfo.h b/include/linux/pinctrl/devinfo.h
index a48ff69acddd..bb6653af4f92 100644
--- a/include/linux/pinctrl/devinfo.h
+++ b/include/linux/pinctrl/devinfo.h
@@ -14,11 +14,17 @@
#ifndef PINCTRL_DEVINFO_H
#define PINCTRL_DEVINFO_H
+struct device;
+
#ifdef CONFIG_PINCTRL
+#include <linux/device.h>
+
/* The device core acts as a consumer toward pinctrl */
#include <linux/pinctrl/consumer.h>
+struct pinctrl;
+
/**
* struct dev_pin_info - pin state container for devices
* @p: pinctrl handle for the containing device
@@ -40,9 +46,15 @@ struct dev_pin_info {
extern int pinctrl_bind_pins(struct device *dev);
extern int pinctrl_init_done(struct device *dev);
-#else
+static inline struct pinctrl *dev_pinctrl(struct device *dev)
+{
+ if (!dev->pins)
+ return NULL;
-struct device;
+ return dev->pins->p;
+}
+
+#else
/* Stubs if we're not using pinctrl */
@@ -56,5 +68,10 @@ static inline int pinctrl_init_done(struct device *dev)
return 0;
}
+static inline struct pinctrl *dev_pinctrl(struct device *dev)
+{
+ return NULL;
+}
+
#endif /* CONFIG_PINCTRL */
#endif /* PINCTRL_DEVINFO_H */
diff --git a/include/linux/pinctrl/machine.h b/include/linux/pinctrl/machine.h
index e987dc9fd2af..25620229b1d6 100644
--- a/include/linux/pinctrl/machine.h
+++ b/include/linux/pinctrl/machine.h
@@ -11,7 +11,7 @@
#ifndef __LINUX_PINCTRL_MACHINE_H
#define __LINUX_PINCTRL_MACHINE_H
-#include <linux/bug.h>
+#include <linux/array_size.h>
#include <linux/pinctrl/pinctrl-state.h>
@@ -47,7 +47,7 @@ struct pinctrl_map_mux {
struct pinctrl_map_configs {
const char *group_or_pin;
unsigned long *configs;
- unsigned num_configs;
+ unsigned int num_configs;
};
/**
@@ -149,16 +149,29 @@ struct pinctrl_map {
#define PIN_MAP_CONFIGS_GROUP_HOG_DEFAULT(dev, grp, cfgs) \
PIN_MAP_CONFIGS_GROUP(dev, PINCTRL_STATE_DEFAULT, dev, grp, cfgs)
+struct device;
+struct pinctrl_map;
+
#ifdef CONFIG_PINCTRL
-extern int pinctrl_register_mappings(const struct pinctrl_map *map,
- unsigned num_maps);
-extern void pinctrl_unregister_mappings(const struct pinctrl_map *map);
-extern void pinctrl_provide_dummies(void);
+int pinctrl_register_mappings(const struct pinctrl_map *map,
+ unsigned int num_maps);
+int devm_pinctrl_register_mappings(struct device *dev,
+ const struct pinctrl_map *map,
+ unsigned int num_maps);
+void pinctrl_unregister_mappings(const struct pinctrl_map *map);
+void pinctrl_provide_dummies(void);
#else
static inline int pinctrl_register_mappings(const struct pinctrl_map *map,
- unsigned num_maps)
+ unsigned int num_maps)
+{
+ return 0;
+}
+
+static inline int devm_pinctrl_register_mappings(struct device *dev,
+ const struct pinctrl_map *map,
+ unsigned int num_maps)
{
return 0;
}
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index e18ab3d5908f..1be4032071c2 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -11,9 +11,12 @@
#ifndef __LINUX_PINCTRL_PINCONF_GENERIC_H
#define __LINUX_PINCTRL_PINCONF_GENERIC_H
-#include <linux/device.h>
+#include <linux/types.h>
+
#include <linux/pinctrl/machine.h>
+struct device_node;
+
struct pinctrl_dev;
struct pinctrl_map;
@@ -35,7 +38,8 @@ struct pinctrl_map;
* impedance.
* @PIN_CONFIG_BIAS_PULL_DOWN: the pin will be pulled down (usually with high
* impedance to GROUND). If the argument is != 0 pull-down is enabled,
- * if it is 0, pull-down is total, i.e. the pin is connected to GROUND.
+ * the value is interpreted by the driver and can be custom or an SI unit
+ * such as Ohms.
* @PIN_CONFIG_BIAS_PULL_PIN_DEFAULT: the pin will be pulled up or down based
* on embedded knowledge of the controller hardware, like current mux
* function. The pull direction and possibly strength too will normally
@@ -46,7 +50,8 @@ struct pinctrl_map;
* @PIN_CONFIG_BIAS_DISABLE.
* @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high
* impedance to VDD). If the argument is != 0 pull-up is enabled,
- * if it is 0, pull-up is total, i.e. the pin is connected to VDD.
+ * the value is interpreted by the driver and can be custom or an SI unit
+ * such as Ohms.
* @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open
* collector) which means it is usually wired with other output ports
* which are then pulled up with an external resistor. Setting this
@@ -76,33 +81,47 @@ struct pinctrl_map;
* @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin.
* If the argument != 0, schmitt-trigger mode is enabled. If it's 0,
* schmitt-trigger mode is disabled.
+ * @PIN_CONFIG_INPUT_SCHMITT_UV: this will configure an input pin to run in
+ * schmitt-trigger mode. The argument is in uV.
* @PIN_CONFIG_MODE_LOW_POWER: this will configure the pin for low power
* operation, if several modes of operation are supported these can be
* passed in the argument on a custom form, else just use argument 1
* to indicate low power mode, argument 0 turns low power mode off.
* @PIN_CONFIG_MODE_PWM: this will configure the pin for PWM
+ * @PIN_CONFIG_LEVEL: setting this will configure the pin as an output and
+ * drive a value on the line. Use argument 1 to indicate high level,
+ * argument 0 to indicate low level. Conversely the value of the line
+ * can be read using this parameter, if and only if that value can be
+ * represented as a binary 0 or 1 where 0 indicate a low voltage level
+ * and 1 indicate a high voltage level.
+ * (Please see Documentation/driver-api/pin-control.rst,
+ * section "GPIO mode pitfalls" for a discussion around this parameter.)
* @PIN_CONFIG_OUTPUT_ENABLE: this will enable the pin's output mode
* without driving a value there. For most platforms this reduces to
* enable the output buffers and then let the pin controller current
* configuration (eg. the currently selected mux function) drive values on
* the line. Use argument 1 to enable output mode, argument 0 to disable
* it.
- * @PIN_CONFIG_OUTPUT: this will configure the pin as an output and drive a
- * value on the line. Use argument 1 to indicate high level, argument 0 to
- * indicate low level. (Please see Documentation/driver-api/pinctl.rst,
- * section "GPIO mode pitfalls" for a discussion around this parameter.)
+ * @PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS: this will configure the output impedance
+ * of the pin with the value passed as argument. The argument is in ohms.
* @PIN_CONFIG_PERSIST_STATE: retain pin state across sleep or controller reset
* @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power
* supplies, the argument to this parameter (on a custom format) tells
* the driver which alternative power source to use.
- * @PIN_CONFIG_SLEEP_HARDWARE_STATE: indicate this is sleep related state.
- * @PIN_CONFIG_SLEW_RATE: if the pin can select slew rate, the argument to
- * this parameter (on a custom format) tells the driver which alternative
- * slew rate to use.
* @PIN_CONFIG_SKEW_DELAY: if the pin has programmable skew rate (on inputs)
* or latch delay (on outputs) this parameter (in a custom format)
* specifies the clock skew or latch delay. It typically controls how
* many double inverters are put in front of the line.
+ * @PIN_CONFIG_SKEW_DELAY_INPUT_PS: if the pin has independent values for the
+ * programmable skew rate (on inputs) and latch delay (on outputs), then
+ * this parameter specifies the clock skew only. The argument is in ps.
+ * @PIN_CONFIG_SKEW_DELAY_OUPUT_PS: if the pin has independent values for the
+ * programmable skew rate (on inputs) and latch delay (on outputs), then
+ * this parameter specifies the latch delay only. The argument is in ps.
+ * @PIN_CONFIG_SLEEP_HARDWARE_STATE: indicate this is sleep related state.
+ * @PIN_CONFIG_SLEW_RATE: if the pin can select slew rate, the argument to
+ * this parameter (on a custom format) tells the driver which alternative
+ * slew rate to use.
* @PIN_CONFIG_END: this is the last enumerator for pin configurations, if
* you need to pass in custom configurations to the pin controller, use
* PIN_CONFIG_END+1 as the base offset.
@@ -125,15 +144,19 @@ enum pin_config_param {
PIN_CONFIG_INPUT_ENABLE,
PIN_CONFIG_INPUT_SCHMITT,
PIN_CONFIG_INPUT_SCHMITT_ENABLE,
+ PIN_CONFIG_INPUT_SCHMITT_UV,
PIN_CONFIG_MODE_LOW_POWER,
PIN_CONFIG_MODE_PWM,
+ PIN_CONFIG_LEVEL,
PIN_CONFIG_OUTPUT_ENABLE,
- PIN_CONFIG_OUTPUT,
+ PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS,
PIN_CONFIG_PERSIST_STATE,
PIN_CONFIG_POWER_SOURCE,
+ PIN_CONFIG_SKEW_DELAY,
+ PIN_CONFIG_SKEW_DELAY_INPUT_PS,
+ PIN_CONFIG_SKEW_DELAY_OUTPUT_PS,
PIN_CONFIG_SLEEP_HARDWARE_STATE,
PIN_CONFIG_SLEW_RATE,
- PIN_CONFIG_SKEW_DELAY,
PIN_CONFIG_END = 0x7F,
PIN_CONFIG_MAX = 0xFF,
};
@@ -166,52 +189,59 @@ static inline unsigned long pinconf_to_config_packed(enum pin_config_param param
return PIN_CONF_PACKED(param, argument);
}
-#define PCONFDUMP(a, b, c, d) { \
- .param = a, .display = b, .format = c, .has_arg = d \
+#define PCONFDUMP_WITH_VALUES(a, b, c, d, e, f) { \
+ .param = a, .display = b, .format = c, .has_arg = d, \
+ .values = e, .num_values = f \
}
+#define PCONFDUMP(a, b, c, d) PCONFDUMP_WITH_VALUES(a, b, c, d, NULL, 0)
+
struct pin_config_item {
const enum pin_config_param param;
const char * const display;
const char * const format;
bool has_arg;
+ const char * const *values;
+ size_t num_values;
};
struct pinconf_generic_params {
const char * const property;
enum pin_config_param param;
u32 default_value;
+ const char * const *values;
+ size_t num_values;
};
int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev,
struct device_node *np, struct pinctrl_map **map,
- unsigned *reserved_maps, unsigned *num_maps,
+ unsigned int *reserved_maps, unsigned int *num_maps,
enum pinctrl_map_type type);
int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev,
struct device_node *np_config, struct pinctrl_map **map,
- unsigned *num_maps, enum pinctrl_map_type type);
+ unsigned int *num_maps, enum pinctrl_map_type type);
void pinconf_generic_dt_free_map(struct pinctrl_dev *pctldev,
- struct pinctrl_map *map, unsigned num_maps);
+ struct pinctrl_map *map, unsigned int num_maps);
-static inline int pinconf_generic_dt_node_to_map_group(
- struct pinctrl_dev *pctldev, struct device_node *np_config,
- struct pinctrl_map **map, unsigned *num_maps)
+static inline int pinconf_generic_dt_node_to_map_group(struct pinctrl_dev *pctldev,
+ struct device_node *np_config, struct pinctrl_map **map,
+ unsigned int *num_maps)
{
return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps,
PIN_MAP_TYPE_CONFIGS_GROUP);
}
-static inline int pinconf_generic_dt_node_to_map_pin(
- struct pinctrl_dev *pctldev, struct device_node *np_config,
- struct pinctrl_map **map, unsigned *num_maps)
+static inline int pinconf_generic_dt_node_to_map_pin(struct pinctrl_dev *pctldev,
+ struct device_node *np_config, struct pinctrl_map **map,
+ unsigned int *num_maps)
{
return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps,
PIN_MAP_TYPE_CONFIGS_PIN);
}
-static inline int pinconf_generic_dt_node_to_map_all(
- struct pinctrl_dev *pctldev, struct device_node *np_config,
- struct pinctrl_map **map, unsigned *num_maps)
+static inline int pinconf_generic_dt_node_to_map_all(struct pinctrl_dev *pctldev,
+ struct device_node *np_config, struct pinctrl_map **map,
+ unsigned *num_maps)
{
/*
* passing the type as PIN_MAP_TYPE_INVALID causes the underlying parser
@@ -221,4 +251,8 @@ static inline int pinconf_generic_dt_node_to_map_all(
PIN_MAP_TYPE_INVALID);
}
+int pinconf_generic_dt_node_to_map_pinmux(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **map,
+ unsigned int *num_maps);
#endif /* __LINUX_PINCTRL_PINCONF_GENERIC_H */
diff --git a/include/linux/pinctrl/pinconf.h b/include/linux/pinctrl/pinconf.h
index f8a8215e9021..770ec2221156 100644
--- a/include/linux/pinctrl/pinconf.h
+++ b/include/linux/pinctrl/pinconf.h
@@ -40,25 +40,25 @@ struct pinconf_ops {
bool is_generic;
#endif
int (*pin_config_get) (struct pinctrl_dev *pctldev,
- unsigned pin,
+ unsigned int pin,
unsigned long *config);
int (*pin_config_set) (struct pinctrl_dev *pctldev,
- unsigned pin,
+ unsigned int pin,
unsigned long *configs,
- unsigned num_configs);
+ unsigned int num_configs);
int (*pin_config_group_get) (struct pinctrl_dev *pctldev,
- unsigned selector,
+ unsigned int selector,
unsigned long *config);
int (*pin_config_group_set) (struct pinctrl_dev *pctldev,
- unsigned selector,
+ unsigned int selector,
unsigned long *configs,
- unsigned num_configs);
+ unsigned int num_configs);
void (*pin_config_dbg_show) (struct pinctrl_dev *pctldev,
struct seq_file *s,
- unsigned offset);
+ unsigned int offset);
void (*pin_config_group_dbg_show) (struct pinctrl_dev *pctldev,
struct seq_file *s,
- unsigned selector);
+ unsigned int selector);
void (*pin_config_config_dbg_show) (struct pinctrl_dev *pctldev,
struct seq_file *s,
unsigned long config);
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index 70b45d28e7a9..1a8084e29405 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -11,20 +11,41 @@
#ifndef __LINUX_PINCTRL_PINCTRL_H
#define __LINUX_PINCTRL_PINCTRL_H
-#include <linux/radix-tree.h>
-#include <linux/list.h>
-#include <linux/seq_file.h>
-#include <linux/pinctrl/pinctrl-state.h>
-#include <linux/pinctrl/devinfo.h>
+#include <linux/bits.h>
+#include <linux/types.h>
struct device;
+struct device_node;
+struct gpio_chip;
+struct module;
+struct seq_file;
+
+struct pin_config_item;
+struct pinconf_generic_params;
+struct pinconf_ops;
struct pinctrl_dev;
struct pinctrl_map;
struct pinmux_ops;
-struct pinconf_ops;
-struct pin_config_item;
-struct gpio_chip;
-struct device_node;
+
+/**
+ * struct pingroup - provides information on pingroup
+ * @name: a name for pingroup
+ * @pins: an array of pins in the pingroup
+ * @npins: number of pins in the pingroup
+ */
+struct pingroup {
+ const char *name;
+ const unsigned int *pins;
+ size_t npins;
+};
+
+/* Convenience macro to define a single named or anonymous pingroup */
+#define PINCTRL_PINGROUP(_name, _pins, _npins) \
+(struct pingroup) { \
+ .name = _name, \
+ .pins = _pins, \
+ .npins = _npins, \
+}
/**
* struct pinctrl_pin_desc - boards/machines provide information on their
@@ -34,7 +55,7 @@ struct device_node;
* @drv_data: driver-defined per-pin data. pinctrl core does not touch this
*/
struct pinctrl_pin_desc {
- unsigned number;
+ unsigned int number;
const char *name;
void *drv_data;
};
@@ -62,7 +83,7 @@ struct pinctrl_gpio_range {
unsigned int base;
unsigned int pin_base;
unsigned int npins;
- unsigned const *pins;
+ unsigned int const *pins;
struct gpio_chip *gc;
};
@@ -88,18 +109,18 @@ struct pinctrl_gpio_range {
struct pinctrl_ops {
int (*get_groups_count) (struct pinctrl_dev *pctldev);
const char *(*get_group_name) (struct pinctrl_dev *pctldev,
- unsigned selector);
+ unsigned int selector);
int (*get_group_pins) (struct pinctrl_dev *pctldev,
- unsigned selector,
- const unsigned **pins,
- unsigned *num_pins);
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins);
void (*pin_dbg_show) (struct pinctrl_dev *pctldev, struct seq_file *s,
- unsigned offset);
+ unsigned int offset);
int (*dt_node_to_map) (struct pinctrl_dev *pctldev,
struct device_node *np_config,
- struct pinctrl_map **map, unsigned *num_maps);
+ struct pinctrl_map **map, unsigned int *num_maps);
void (*dt_free_map) (struct pinctrl_dev *pctldev,
- struct pinctrl_map *map, unsigned num_maps);
+ struct pinctrl_map *map, unsigned int num_maps);
};
/**
@@ -145,25 +166,25 @@ struct pinctrl_desc {
/* External interface to pin controller */
-extern int pinctrl_register_and_init(struct pinctrl_desc *pctldesc,
+extern int pinctrl_register_and_init(const struct pinctrl_desc *pctldesc,
struct device *dev, void *driver_data,
struct pinctrl_dev **pctldev);
extern int pinctrl_enable(struct pinctrl_dev *pctldev);
/* Please use pinctrl_register_and_init() and pinctrl_enable() instead */
-extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
+extern struct pinctrl_dev *pinctrl_register(const struct pinctrl_desc *pctldesc,
struct device *dev, void *driver_data);
extern void pinctrl_unregister(struct pinctrl_dev *pctldev);
extern int devm_pinctrl_register_and_init(struct device *dev,
- struct pinctrl_desc *pctldesc,
+ const struct pinctrl_desc *pctldesc,
void *driver_data,
struct pinctrl_dev **pctldev);
/* Please use devm_pinctrl_register_and_init() instead */
extern struct pinctrl_dev *devm_pinctrl_register(struct device *dev,
- struct pinctrl_desc *pctldesc,
+ const struct pinctrl_desc *pctldesc,
void *driver_data);
extern void devm_pinctrl_unregister(struct device *dev,
@@ -173,7 +194,7 @@ extern void pinctrl_add_gpio_range(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range);
extern void pinctrl_add_gpio_ranges(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *ranges,
- unsigned nranges);
+ unsigned int nranges);
extern void pinctrl_remove_gpio_range(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range);
@@ -183,8 +204,41 @@ extern struct pinctrl_gpio_range *
pinctrl_find_gpio_range_from_pin(struct pinctrl_dev *pctldev,
unsigned int pin);
extern int pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
- const char *pin_group, const unsigned **pins,
- unsigned *num_pins);
+ const char *pin_group, const unsigned int **pins,
+ unsigned int *num_pins);
+
+#define PINFUNCTION_FLAG_GPIO BIT(0)
+
+/**
+ * struct pinfunction - Description about a function
+ * @name: Name of the function
+ * @groups: An array of groups for this function
+ * @ngroups: Number of groups in @groups
+ * @flags: Additional pin function flags
+ */
+struct pinfunction {
+ const char *name;
+ const char * const *groups;
+ size_t ngroups;
+ unsigned long flags;
+};
+
+/* Convenience macro to define a single named pinfunction */
+#define PINCTRL_PINFUNCTION(_name, _groups, _ngroups) \
+(struct pinfunction) { \
+ .name = (_name), \
+ .groups = (_groups), \
+ .ngroups = (_ngroups), \
+ }
+
+/* Same as PINCTRL_PINFUNCTION() but for the GPIO category of functions */
+#define PINCTRL_GPIO_PINFUNCTION(_name, _groups, _ngroups) \
+(struct pinfunction) { \
+ .name = (_name), \
+ .groups = (_groups), \
+ .ngroups = (_ngroups), \
+ .flags = PINFUNCTION_FLAG_GPIO, \
+ }
#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_PINCTRL)
extern struct pinctrl_dev *of_pinctrl_get(struct device_node *np);
diff --git a/include/linux/pinctrl/pinmux.h b/include/linux/pinctrl/pinmux.h
index 9a647fa5c8f1..094bbe2fd6fd 100644
--- a/include/linux/pinctrl/pinmux.h
+++ b/include/linux/pinctrl/pinmux.h
@@ -11,11 +11,10 @@
#ifndef __LINUX_PINCTRL_PINMUX_H
#define __LINUX_PINCTRL_PINMUX_H
-#include <linux/list.h>
-#include <linux/seq_file.h>
-#include <linux/pinctrl/pinctrl.h>
+#include <linux/types.h>
struct pinctrl_dev;
+struct pinctrl_gpio_range;
/**
* struct pinmux_ops - pinmux operations, to be implemented by pin controller
@@ -36,6 +35,16 @@ struct pinctrl_dev;
* name can be used with the generic @pinctrl_ops to retrieve the
* actual pins affected. The applicable groups will be returned in
* @groups and the number of groups in @num_groups
+ * @function_is_gpio: determine if the indicated function selector passed
+ * corresponds to the GPIO function which is used by the accelerated GPIO
+ * functions @gpio_request_enable, @gpio_disable_free and
+ * @gpio_set_direction. When the pin control core can properly determine
+ * if a function is a GPIO function, it is easier to use the @strict mode
+ * on the pin controller. Since a single function is passed, this is
+ * only useful on pin controllers that use a specific function for GPIO,
+ * and that usually presupposes that a one-group-per-pin approach is
+ * used, so that a single function can be set on a single pin to turn
+ * it to GPIO mode.
* @set_mux: enable a certain muxing function with a certain pin group. The
* driver does not need to figure out whether enabling this function
* conflicts some other use of the pins in that group, such collisions
@@ -58,26 +67,28 @@ struct pinctrl_dev;
* the pin request.
*/
struct pinmux_ops {
- int (*request) (struct pinctrl_dev *pctldev, unsigned offset);
- int (*free) (struct pinctrl_dev *pctldev, unsigned offset);
+ int (*request) (struct pinctrl_dev *pctldev, unsigned int offset);
+ int (*free) (struct pinctrl_dev *pctldev, unsigned int offset);
int (*get_functions_count) (struct pinctrl_dev *pctldev);
const char *(*get_function_name) (struct pinctrl_dev *pctldev,
- unsigned selector);
+ unsigned int selector);
int (*get_function_groups) (struct pinctrl_dev *pctldev,
- unsigned selector,
- const char * const **groups,
- unsigned *num_groups);
- int (*set_mux) (struct pinctrl_dev *pctldev, unsigned func_selector,
- unsigned group_selector);
+ unsigned int selector,
+ const char * const **groups,
+ unsigned int *num_groups);
+ bool (*function_is_gpio) (struct pinctrl_dev *pctldev,
+ unsigned int selector);
+ int (*set_mux) (struct pinctrl_dev *pctldev, unsigned int func_selector,
+ unsigned int group_selector);
int (*gpio_request_enable) (struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
- unsigned offset);
+ unsigned int offset);
void (*gpio_disable_free) (struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
- unsigned offset);
+ unsigned int offset);
int (*gpio_set_direction) (struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
- unsigned offset,
+ unsigned int offset,
bool input);
bool strict;
};
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 5d2705f1d01c..7f6a92ac9704 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -31,13 +31,39 @@ struct pipe_buffer {
unsigned long private;
};
+/*
+ * Really only alpha needs 32-bit fields, but
+ * might as well do it for 64-bit architectures
+ * since that's what we've historically done,
+ * and it makes 'head_tail' always be a simple
+ * 'unsigned long'.
+ */
+#ifdef CONFIG_64BIT
+typedef unsigned int pipe_index_t;
+#else
+typedef unsigned short pipe_index_t;
+#endif
+
+/**
+ * struct pipe_index - pipe indeces
+ * @head: The point of buffer production
+ * @tail: The point of buffer consumption
+ * @head_tail: unsigned long union of @head and @tail
+ */
+union pipe_index {
+ unsigned long head_tail;
+ struct {
+ pipe_index_t head;
+ pipe_index_t tail;
+ };
+};
+
/**
* struct pipe_inode_info - a linux kernel pipe
* @mutex: mutex protecting the whole thing
* @rd_wait: reader wait point in case of empty pipe
* @wr_wait: writer wait point in case of full pipe
- * @head: The point of buffer production
- * @tail: The point of buffer consumption
+ * @pipe_index: the pipe indeces
* @note_loss: The next read() should insert a data-lost message
* @max_usage: The maximum number of slots that may be used in the ring
* @ring_size: total number of buffers (should be a power of 2)
@@ -48,6 +74,7 @@ struct pipe_buffer {
* @files: number of struct file referring this pipe (protected by ->i_lock)
* @r_counter: reader counter
* @w_counter: writer counter
+ * @poll_usage: is this pipe used for epoll, which has crazy wakeups?
* @fasync_readers: reader side fasync
* @fasync_writers: writer side fasync
* @bufs: the circular array of pipe buffers
@@ -57,20 +84,22 @@ struct pipe_buffer {
struct pipe_inode_info {
struct mutex mutex;
wait_queue_head_t rd_wait, wr_wait;
- unsigned int head;
- unsigned int tail;
+
+ union pipe_index;
+
unsigned int max_usage;
unsigned int ring_size;
-#ifdef CONFIG_WATCH_QUEUE
- bool note_loss;
-#endif
unsigned int nr_accounted;
unsigned int readers;
unsigned int writers;
unsigned int files;
unsigned int r_counter;
unsigned int w_counter;
- struct page *tmp_page;
+ bool poll_usage;
+#ifdef CONFIG_WATCH_QUEUE
+ bool note_loss;
+#endif
+ struct page *tmp_page[2];
struct fasync_struct *fasync_readers;
struct fasync_struct *fasync_writers;
struct pipe_buffer *bufs;
@@ -123,13 +152,19 @@ struct pipe_buf_operations {
};
/**
- * pipe_empty - Return true if the pipe is empty
- * @head: The pipe ring head pointer
- * @tail: The pipe ring tail pointer
+ * pipe_has_watch_queue - Check whether the pipe is a watch_queue,
+ * i.e. it was created with O_NOTIFICATION_PIPE
+ * @pipe: The pipe to check
+ *
+ * Return: true if pipe is a watch queue, false otherwise.
*/
-static inline bool pipe_empty(unsigned int head, unsigned int tail)
+static inline bool pipe_has_watch_queue(const struct pipe_inode_info *pipe)
{
- return head == tail;
+#ifdef CONFIG_WATCH_QUEUE
+ return pipe->watch_queue != NULL;
+#else
+ return false;
+#endif
}
/**
@@ -139,7 +174,17 @@ static inline bool pipe_empty(unsigned int head, unsigned int tail)
*/
static inline unsigned int pipe_occupancy(unsigned int head, unsigned int tail)
{
- return head - tail;
+ return (pipe_index_t)(head - tail);
+}
+
+/**
+ * pipe_empty - Return true if the pipe is empty
+ * @head: The pipe ring head pointer
+ * @tail: The pipe ring tail pointer
+ */
+static inline bool pipe_empty(unsigned int head, unsigned int tail)
+{
+ return !pipe_occupancy(head, tail);
}
/**
@@ -155,23 +200,50 @@ static inline bool pipe_full(unsigned int head, unsigned int tail,
}
/**
- * pipe_space_for_user - Return number of slots available to userspace
- * @head: The pipe ring head pointer
- * @tail: The pipe ring tail pointer
- * @pipe: The pipe info structure
+ * pipe_is_full - Return true if the pipe is full
+ * @pipe: the pipe
*/
-static inline unsigned int pipe_space_for_user(unsigned int head, unsigned int tail,
- struct pipe_inode_info *pipe)
+static inline bool pipe_is_full(const struct pipe_inode_info *pipe)
{
- unsigned int p_occupancy, p_space;
+ return pipe_full(pipe->head, pipe->tail, pipe->max_usage);
+}
- p_occupancy = pipe_occupancy(head, tail);
- if (p_occupancy >= pipe->max_usage)
- return 0;
- p_space = pipe->ring_size - p_occupancy;
- if (p_space > pipe->max_usage)
- p_space = pipe->max_usage;
- return p_space;
+/**
+ * pipe_is_empty - Return true if the pipe is empty
+ * @pipe: the pipe
+ */
+static inline bool pipe_is_empty(const struct pipe_inode_info *pipe)
+{
+ return pipe_empty(pipe->head, pipe->tail);
+}
+
+/**
+ * pipe_buf_usage - Return how many pipe buffers are in use
+ * @pipe: the pipe
+ */
+static inline unsigned int pipe_buf_usage(const struct pipe_inode_info *pipe)
+{
+ return pipe_occupancy(pipe->head, pipe->tail);
+}
+
+/**
+ * pipe_buf - Return the pipe buffer for the specified slot in the pipe ring
+ * @pipe: The pipe to access
+ * @slot: The slot of interest
+ */
+static inline struct pipe_buffer *pipe_buf(const struct pipe_inode_info *pipe,
+ unsigned int slot)
+{
+ return &pipe->bufs[slot & (pipe->ring_size - 1)];
+}
+
+/**
+ * pipe_head_buf - Return the pipe buffer at the head of the pipe ring
+ * @pipe: The pipe to access
+ */
+static inline struct pipe_buffer *pipe_head_buf(const struct pipe_inode_info *pipe)
+{
+ return pipe_buf(pipe, pipe->head);
}
/**
@@ -236,10 +308,6 @@ void pipe_lock(struct pipe_inode_info *);
void pipe_unlock(struct pipe_inode_info *);
void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *);
-extern unsigned int pipe_max_size;
-extern unsigned long pipe_user_pages_hard;
-extern unsigned long pipe_user_pages_soft;
-
/* Wait for a pipe to be readable/writable while dropping the pipe lock */
void pipe_wait_readable(struct pipe_inode_info *);
void pipe_wait_writable(struct pipe_inode_info *);
@@ -254,22 +322,18 @@ void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
extern const struct pipe_buf_operations nosteal_pipe_buf_ops;
-#ifdef CONFIG_WATCH_QUEUE
unsigned long account_pipe_buffers(struct user_struct *user,
unsigned long old, unsigned long new);
bool too_many_pipe_buffers_soft(unsigned long user_bufs);
bool too_many_pipe_buffers_hard(unsigned long user_bufs);
bool pipe_is_unprivileged_user(void);
-#endif
/* for F_SETPIPE_SZ and F_GETPIPE_SZ */
-#ifdef CONFIG_WATCH_QUEUE
int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots);
-#endif
-long pipe_fcntl(struct file *, unsigned int, unsigned long arg);
+long pipe_fcntl(struct file *, unsigned int, unsigned int arg);
struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice);
int create_pipe_files(struct file **, int);
-unsigned int round_pipe_size(unsigned long size);
+unsigned int round_pipe_size(unsigned int size);
#endif
diff --git a/include/linux/pkeys.h b/include/linux/pkeys.h
index 2955ba976048..86be8bf27b41 100644
--- a/include/linux/pkeys.h
+++ b/include/linux/pkeys.h
@@ -4,6 +4,8 @@
#include <linux/mm.h>
+#define ARCH_DEFAULT_PKEY 0
+
#ifdef CONFIG_ARCH_HAS_PKEYS
#include <asm/pkeys.h>
#else /* ! CONFIG_ARCH_HAS_PKEYS */
@@ -44,10 +46,6 @@ static inline bool arch_pkeys_enabled(void)
return false;
}
-static inline void copy_init_pkru_to_fpregs(void)
-{
-}
-
#endif /* ! CONFIG_ARCH_HAS_PKEYS */
#endif /* _LINUX_PKEYS_H */
diff --git a/include/linux/pktcdvd.h b/include/linux/pktcdvd.h
deleted file mode 100644
index 174601554b06..000000000000
--- a/include/linux/pktcdvd.h
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
- * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
- *
- * May be copied or modified under the terms of the GNU General Public
- * License. See linux/COPYING for more information.
- *
- * Packet writing layer for ATAPI and SCSI CD-R, CD-RW, DVD-R, and
- * DVD-RW devices.
- *
- */
-#ifndef __PKTCDVD_H
-#define __PKTCDVD_H
-
-#include <linux/blkdev.h>
-#include <linux/completion.h>
-#include <linux/cdrom.h>
-#include <linux/kobject.h>
-#include <linux/sysfs.h>
-#include <linux/mempool.h>
-#include <uapi/linux/pktcdvd.h>
-
-/* default bio write queue congestion marks */
-#define PKT_WRITE_CONGESTION_ON 10000
-#define PKT_WRITE_CONGESTION_OFF 9000
-
-
-struct packet_settings
-{
- __u32 size; /* packet size in (512 byte) sectors */
- __u8 fp; /* fixed packets */
- __u8 link_loss; /* the rest is specified
- * as per Mt Fuji */
- __u8 write_type;
- __u8 track_mode;
- __u8 block_mode;
-};
-
-/*
- * Very crude stats for now
- */
-struct packet_stats
-{
- unsigned long pkt_started;
- unsigned long pkt_ended;
- unsigned long secs_w;
- unsigned long secs_rg;
- unsigned long secs_r;
-};
-
-struct packet_cdrw
-{
- struct list_head pkt_free_list;
- struct list_head pkt_active_list;
- spinlock_t active_list_lock; /* Serialize access to pkt_active_list */
- struct task_struct *thread;
- atomic_t pending_bios;
-};
-
-/*
- * Switch to high speed reading after reading this many kilobytes
- * with no interspersed writes.
- */
-#define HI_SPEED_SWITCH 512
-
-struct packet_iosched
-{
- atomic_t attention; /* Set to non-zero when queue processing is needed */
- int writing; /* Non-zero when writing, zero when reading */
- spinlock_t lock; /* Protecting read/write queue manipulations */
- struct bio_list read_queue;
- struct bio_list write_queue;
- sector_t last_write; /* The sector where the last write ended */
- int successive_reads;
-};
-
-/*
- * 32 buffers of 2048 bytes
- */
-#if (PAGE_SIZE % CD_FRAMESIZE) != 0
-#error "PAGE_SIZE must be a multiple of CD_FRAMESIZE"
-#endif
-#define PACKET_MAX_SIZE 128
-#define FRAMES_PER_PAGE (PAGE_SIZE / CD_FRAMESIZE)
-#define PACKET_MAX_SECTORS (PACKET_MAX_SIZE * CD_FRAMESIZE >> 9)
-
-enum packet_data_state {
- PACKET_IDLE_STATE, /* Not used at the moment */
- PACKET_WAITING_STATE, /* Waiting for more bios to arrive, so */
- /* we don't have to do as much */
- /* data gathering */
- PACKET_READ_WAIT_STATE, /* Waiting for reads to fill in holes */
- PACKET_WRITE_WAIT_STATE, /* Waiting for the write to complete */
- PACKET_RECOVERY_STATE, /* Recover after read/write errors */
- PACKET_FINISHED_STATE, /* After write has finished */
-
- PACKET_NUM_STATES /* Number of possible states */
-};
-
-/*
- * Information needed for writing a single packet
- */
-struct pktcdvd_device;
-
-struct packet_data
-{
- struct list_head list;
-
- spinlock_t lock; /* Lock protecting state transitions and */
- /* orig_bios list */
-
- struct bio_list orig_bios; /* Original bios passed to pkt_make_request */
- /* that will be handled by this packet */
- int write_size; /* Total size of all bios in the orig_bios */
- /* list, measured in number of frames */
-
- struct bio *w_bio; /* The bio we will send to the real CD */
- /* device once we have all data for the */
- /* packet we are going to write */
- sector_t sector; /* First sector in this packet */
- int frames; /* Number of frames in this packet */
-
- enum packet_data_state state; /* Current state */
- atomic_t run_sm; /* Incremented whenever the state */
- /* machine needs to be run */
- long sleep_time; /* Set this to non-zero to make the state */
- /* machine run after this many jiffies. */
-
- atomic_t io_wait; /* Number of pending IO operations */
- atomic_t io_errors; /* Number of read/write errors during IO */
-
- struct bio *r_bios[PACKET_MAX_SIZE]; /* bios to use during data gathering */
- struct page *pages[PACKET_MAX_SIZE / FRAMES_PER_PAGE];
-
- int cache_valid; /* If non-zero, the data for the zone defined */
- /* by the sector variable is completely cached */
- /* in the pages[] vector. */
-
- int id; /* ID number for debugging */
- struct pktcdvd_device *pd;
-};
-
-struct pkt_rb_node {
- struct rb_node rb_node;
- struct bio *bio;
-};
-
-struct packet_stacked_data
-{
- struct bio *bio; /* Original read request bio */
- struct pktcdvd_device *pd;
-};
-#define PSD_POOL_SIZE 64
-
-struct pktcdvd_kobj
-{
- struct kobject kobj;
- struct pktcdvd_device *pd;
-};
-#define to_pktcdvdkobj(_k) \
- ((struct pktcdvd_kobj*)container_of(_k,struct pktcdvd_kobj,kobj))
-
-struct pktcdvd_device
-{
- struct block_device *bdev; /* dev attached */
- dev_t pkt_dev; /* our dev */
- char name[20];
- struct packet_settings settings;
- struct packet_stats stats;
- int refcnt; /* Open count */
- int write_speed; /* current write speed, kB/s */
- int read_speed; /* current read speed, kB/s */
- unsigned long offset; /* start offset */
- __u8 mode_offset; /* 0 / 8 */
- __u8 type;
- unsigned long flags;
- __u16 mmc3_profile;
- __u32 nwa; /* next writable address */
- __u32 lra; /* last recorded address */
- struct packet_cdrw cdrw;
- wait_queue_head_t wqueue;
-
- spinlock_t lock; /* Serialize access to bio_queue */
- struct rb_root bio_queue; /* Work queue of bios we need to handle */
- int bio_queue_size; /* Number of nodes in bio_queue */
- sector_t current_sector; /* Keep track of where the elevator is */
- atomic_t scan_queue; /* Set to non-zero when pkt_handle_queue */
- /* needs to be run. */
- mempool_t rb_pool; /* mempool for pkt_rb_node allocations */
-
- struct packet_iosched iosched;
- struct gendisk *disk;
-
- int write_congestion_off;
- int write_congestion_on;
-
- struct device *dev; /* sysfs pktcdvd[0-7] dev */
- struct pktcdvd_kobj *kobj_stat; /* sysfs pktcdvd[0-7]/stat/ */
- struct pktcdvd_kobj *kobj_wqueue; /* sysfs pktcdvd[0-7]/write_queue/ */
-
- struct dentry *dfs_d_root; /* debugfs: devname directory */
- struct dentry *dfs_f_info; /* debugfs: info file */
-};
-
-#endif /* __PKTCDVD_H */
diff --git a/include/linux/pl353-smc.h b/include/linux/pl353-smc.h
deleted file mode 100644
index 0e0d3df9bf72..000000000000
--- a/include/linux/pl353-smc.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * ARM PL353 SMC Driver Header
- *
- * Copyright (C) 2012 - 2018 Xilinx, Inc
- */
-
-#ifndef __LINUX_PL353_SMC_H
-#define __LINUX_PL353_SMC_H
-
-enum pl353_smc_ecc_mode {
- PL353_SMC_ECCMODE_BYPASS = 0,
- PL353_SMC_ECCMODE_APB = 1,
- PL353_SMC_ECCMODE_MEM = 2
-};
-
-enum pl353_smc_mem_width {
- PL353_SMC_MEM_WIDTH_8 = 0,
- PL353_SMC_MEM_WIDTH_16 = 1
-};
-
-u32 pl353_smc_get_ecc_val(int ecc_reg);
-bool pl353_smc_ecc_is_busy(void);
-int pl353_smc_get_nand_int_status_raw(void);
-void pl353_smc_clr_nand_int(void);
-int pl353_smc_set_ecc_mode(enum pl353_smc_ecc_mode mode);
-int pl353_smc_set_ecc_pg_size(unsigned int pg_sz);
-int pl353_smc_set_buswidth(unsigned int bw);
-void pl353_smc_set_cycles(u32 timings[]);
-#endif
diff --git a/include/linux/platform_data/ad5449.h b/include/linux/platform_data/ad5449.h
deleted file mode 100644
index d687ef5726c2..000000000000
--- a/include/linux/platform_data/ad5449.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * AD5415, AD5426, AD5429, AD5432, AD5439, AD5443, AD5449 Digital to Analog
- * Converter driver.
- *
- * Copyright 2012 Analog Devices Inc.
- * Author: Lars-Peter Clausen <lars@metafoo.de>
- */
-
-#ifndef __LINUX_PLATFORM_DATA_AD5449_H__
-#define __LINUX_PLATFORM_DATA_AD5449_H__
-
-/**
- * enum ad5449_sdo_mode - AD5449 SDO pin configuration
- * @AD5449_SDO_DRIVE_FULL: Drive the SDO pin with full strength.
- * @AD5449_SDO_DRIVE_WEAK: Drive the SDO pin with not full strength.
- * @AD5449_SDO_OPEN_DRAIN: Operate the SDO pin in open-drain mode.
- * @AD5449_SDO_DISABLED: Disable the SDO pin, in this mode it is not possible to
- * read back from the device.
- */
-enum ad5449_sdo_mode {
- AD5449_SDO_DRIVE_FULL = 0x0,
- AD5449_SDO_DRIVE_WEAK = 0x1,
- AD5449_SDO_OPEN_DRAIN = 0x2,
- AD5449_SDO_DISABLED = 0x3,
-};
-
-/**
- * struct ad5449_platform_data - Platform data for the ad5449 DAC driver
- * @sdo_mode: SDO pin mode
- * @hardware_clear_to_midscale: Whether asserting the hardware CLR pin sets the
- * outputs to midscale (true) or to zero scale(false).
- */
-struct ad5449_platform_data {
- enum ad5449_sdo_mode sdo_mode;
- bool hardware_clear_to_midscale;
-};
-
-#endif
diff --git a/include/linux/platform_data/ad5755.h b/include/linux/platform_data/ad5755.h
deleted file mode 100644
index e371e08f04bc..000000000000
--- a/include/linux/platform_data/ad5755.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2012 Analog Devices Inc.
- */
-#ifndef __LINUX_PLATFORM_DATA_AD5755_H__
-#define __LINUX_PLATFORM_DATA_AD5755_H__
-
-enum ad5755_mode {
- AD5755_MODE_VOLTAGE_0V_5V = 0,
- AD5755_MODE_VOLTAGE_0V_10V = 1,
- AD5755_MODE_VOLTAGE_PLUSMINUS_5V = 2,
- AD5755_MODE_VOLTAGE_PLUSMINUS_10V = 3,
- AD5755_MODE_CURRENT_4mA_20mA = 4,
- AD5755_MODE_CURRENT_0mA_20mA = 5,
- AD5755_MODE_CURRENT_0mA_24mA = 6,
-};
-
-enum ad5755_dc_dc_phase {
- AD5755_DC_DC_PHASE_ALL_SAME_EDGE = 0,
- AD5755_DC_DC_PHASE_A_B_SAME_EDGE_C_D_OPP_EDGE = 1,
- AD5755_DC_DC_PHASE_A_C_SAME_EDGE_B_D_OPP_EDGE = 2,
- AD5755_DC_DC_PHASE_90_DEGREE = 3,
-};
-
-enum ad5755_dc_dc_freq {
- AD5755_DC_DC_FREQ_250kHZ = 0,
- AD5755_DC_DC_FREQ_410kHZ = 1,
- AD5755_DC_DC_FREQ_650kHZ = 2,
-};
-
-enum ad5755_dc_dc_maxv {
- AD5755_DC_DC_MAXV_23V = 0,
- AD5755_DC_DC_MAXV_24V5 = 1,
- AD5755_DC_DC_MAXV_27V = 2,
- AD5755_DC_DC_MAXV_29V5 = 3,
-};
-
-enum ad5755_slew_rate {
- AD5755_SLEW_RATE_64k = 0,
- AD5755_SLEW_RATE_32k = 1,
- AD5755_SLEW_RATE_16k = 2,
- AD5755_SLEW_RATE_8k = 3,
- AD5755_SLEW_RATE_4k = 4,
- AD5755_SLEW_RATE_2k = 5,
- AD5755_SLEW_RATE_1k = 6,
- AD5755_SLEW_RATE_500 = 7,
- AD5755_SLEW_RATE_250 = 8,
- AD5755_SLEW_RATE_125 = 9,
- AD5755_SLEW_RATE_64 = 10,
- AD5755_SLEW_RATE_32 = 11,
- AD5755_SLEW_RATE_16 = 12,
- AD5755_SLEW_RATE_8 = 13,
- AD5755_SLEW_RATE_4 = 14,
- AD5755_SLEW_RATE_0_5 = 15,
-};
-
-enum ad5755_slew_step_size {
- AD5755_SLEW_STEP_SIZE_1 = 0,
- AD5755_SLEW_STEP_SIZE_2 = 1,
- AD5755_SLEW_STEP_SIZE_4 = 2,
- AD5755_SLEW_STEP_SIZE_8 = 3,
- AD5755_SLEW_STEP_SIZE_16 = 4,
- AD5755_SLEW_STEP_SIZE_32 = 5,
- AD5755_SLEW_STEP_SIZE_64 = 6,
- AD5755_SLEW_STEP_SIZE_128 = 7,
- AD5755_SLEW_STEP_SIZE_256 = 8,
-};
-
-/**
- * struct ad5755_platform_data - AD5755 DAC driver platform data
- * @ext_dc_dc_compenstation_resistor: Whether an external DC-DC converter
- * compensation register is used.
- * @dc_dc_phase: DC-DC converter phase.
- * @dc_dc_freq: DC-DC converter frequency.
- * @dc_dc_maxv: DC-DC maximum allowed boost voltage.
- * @dac.mode: The mode to be used for the DAC output.
- * @dac.ext_current_sense_resistor: Whether an external current sense resistor
- * is used.
- * @dac.enable_voltage_overrange: Whether to enable 20% voltage output overrange.
- * @dac.slew.enable: Whether to enable digital slew.
- * @dac.slew.rate: Slew rate of the digital slew.
- * @dac.slew.step_size: Slew step size of the digital slew.
- **/
-struct ad5755_platform_data {
- bool ext_dc_dc_compenstation_resistor;
- enum ad5755_dc_dc_phase dc_dc_phase;
- enum ad5755_dc_dc_freq dc_dc_freq;
- enum ad5755_dc_dc_maxv dc_dc_maxv;
-
- struct {
- enum ad5755_mode mode;
- bool ext_current_sense_resistor;
- bool enable_voltage_overrange;
- struct {
- bool enable;
- enum ad5755_slew_rate rate;
- enum ad5755_slew_step_size step_size;
- } slew;
- } dac[4];
-};
-
-#endif
diff --git a/include/linux/platform_data/adp5588.h b/include/linux/platform_data/adp5588.h
deleted file mode 100644
index 6d3f7d911a92..000000000000
--- a/include/linux/platform_data/adp5588.h
+++ /dev/null
@@ -1,171 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Analog Devices ADP5588 I/O Expander and QWERTY Keypad Controller
- *
- * Copyright 2009-2010 Analog Devices Inc.
- */
-
-#ifndef _ADP5588_H
-#define _ADP5588_H
-
-#define DEV_ID 0x00 /* Device ID */
-#define CFG 0x01 /* Configuration Register1 */
-#define INT_STAT 0x02 /* Interrupt Status Register */
-#define KEY_LCK_EC_STAT 0x03 /* Key Lock and Event Counter Register */
-#define Key_EVENTA 0x04 /* Key Event Register A */
-#define Key_EVENTB 0x05 /* Key Event Register B */
-#define Key_EVENTC 0x06 /* Key Event Register C */
-#define Key_EVENTD 0x07 /* Key Event Register D */
-#define Key_EVENTE 0x08 /* Key Event Register E */
-#define Key_EVENTF 0x09 /* Key Event Register F */
-#define Key_EVENTG 0x0A /* Key Event Register G */
-#define Key_EVENTH 0x0B /* Key Event Register H */
-#define Key_EVENTI 0x0C /* Key Event Register I */
-#define Key_EVENTJ 0x0D /* Key Event Register J */
-#define KP_LCK_TMR 0x0E /* Keypad Lock1 to Lock2 Timer */
-#define UNLOCK1 0x0F /* Unlock Key1 */
-#define UNLOCK2 0x10 /* Unlock Key2 */
-#define GPIO_INT_STAT1 0x11 /* GPIO Interrupt Status */
-#define GPIO_INT_STAT2 0x12 /* GPIO Interrupt Status */
-#define GPIO_INT_STAT3 0x13 /* GPIO Interrupt Status */
-#define GPIO_DAT_STAT1 0x14 /* GPIO Data Status, Read twice to clear */
-#define GPIO_DAT_STAT2 0x15 /* GPIO Data Status, Read twice to clear */
-#define GPIO_DAT_STAT3 0x16 /* GPIO Data Status, Read twice to clear */
-#define GPIO_DAT_OUT1 0x17 /* GPIO DATA OUT */
-#define GPIO_DAT_OUT2 0x18 /* GPIO DATA OUT */
-#define GPIO_DAT_OUT3 0x19 /* GPIO DATA OUT */
-#define GPIO_INT_EN1 0x1A /* GPIO Interrupt Enable */
-#define GPIO_INT_EN2 0x1B /* GPIO Interrupt Enable */
-#define GPIO_INT_EN3 0x1C /* GPIO Interrupt Enable */
-#define KP_GPIO1 0x1D /* Keypad or GPIO Selection */
-#define KP_GPIO2 0x1E /* Keypad or GPIO Selection */
-#define KP_GPIO3 0x1F /* Keypad or GPIO Selection */
-#define GPI_EM1 0x20 /* GPI Event Mode 1 */
-#define GPI_EM2 0x21 /* GPI Event Mode 2 */
-#define GPI_EM3 0x22 /* GPI Event Mode 3 */
-#define GPIO_DIR1 0x23 /* GPIO Data Direction */
-#define GPIO_DIR2 0x24 /* GPIO Data Direction */
-#define GPIO_DIR3 0x25 /* GPIO Data Direction */
-#define GPIO_INT_LVL1 0x26 /* GPIO Edge/Level Detect */
-#define GPIO_INT_LVL2 0x27 /* GPIO Edge/Level Detect */
-#define GPIO_INT_LVL3 0x28 /* GPIO Edge/Level Detect */
-#define Debounce_DIS1 0x29 /* Debounce Disable */
-#define Debounce_DIS2 0x2A /* Debounce Disable */
-#define Debounce_DIS3 0x2B /* Debounce Disable */
-#define GPIO_PULL1 0x2C /* GPIO Pull Disable */
-#define GPIO_PULL2 0x2D /* GPIO Pull Disable */
-#define GPIO_PULL3 0x2E /* GPIO Pull Disable */
-#define CMP_CFG_STAT 0x30 /* Comparator Configuration and Status Register */
-#define CMP_CONFG_SENS1 0x31 /* Sensor1 Comparator Configuration Register */
-#define CMP_CONFG_SENS2 0x32 /* L2 Light Sensor Reference Level, Output Falling for Sensor 1 */
-#define CMP1_LVL2_TRIP 0x33 /* L2 Light Sensor Hysteresis (Active when Output Rising) for Sensor 1 */
-#define CMP1_LVL2_HYS 0x34 /* L3 Light Sensor Reference Level, Output Falling For Sensor 1 */
-#define CMP1_LVL3_TRIP 0x35 /* L3 Light Sensor Hysteresis (Active when Output Rising) For Sensor 1 */
-#define CMP1_LVL3_HYS 0x36 /* Sensor 2 Comparator Configuration Register */
-#define CMP2_LVL2_TRIP 0x37 /* L2 Light Sensor Reference Level, Output Falling for Sensor 2 */
-#define CMP2_LVL2_HYS 0x38 /* L2 Light Sensor Hysteresis (Active when Output Rising) for Sensor 2 */
-#define CMP2_LVL3_TRIP 0x39 /* L3 Light Sensor Reference Level, Output Falling For Sensor 2 */
-#define CMP2_LVL3_HYS 0x3A /* L3 Light Sensor Hysteresis (Active when Output Rising) For Sensor 2 */
-#define CMP1_ADC_DAT_R1 0x3B /* Comparator 1 ADC data Register1 */
-#define CMP1_ADC_DAT_R2 0x3C /* Comparator 1 ADC data Register2 */
-#define CMP2_ADC_DAT_R1 0x3D /* Comparator 2 ADC data Register1 */
-#define CMP2_ADC_DAT_R2 0x3E /* Comparator 2 ADC data Register2 */
-
-#define ADP5588_DEVICE_ID_MASK 0xF
-
- /* Configuration Register1 */
-#define ADP5588_AUTO_INC (1 << 7)
-#define ADP5588_GPIEM_CFG (1 << 6)
-#define ADP5588_OVR_FLOW_M (1 << 5)
-#define ADP5588_INT_CFG (1 << 4)
-#define ADP5588_OVR_FLOW_IEN (1 << 3)
-#define ADP5588_K_LCK_IM (1 << 2)
-#define ADP5588_GPI_IEN (1 << 1)
-#define ADP5588_KE_IEN (1 << 0)
-
-/* Interrupt Status Register */
-#define ADP5588_CMP2_INT (1 << 5)
-#define ADP5588_CMP1_INT (1 << 4)
-#define ADP5588_OVR_FLOW_INT (1 << 3)
-#define ADP5588_K_LCK_INT (1 << 2)
-#define ADP5588_GPI_INT (1 << 1)
-#define ADP5588_KE_INT (1 << 0)
-
-/* Key Lock and Event Counter Register */
-#define ADP5588_K_LCK_EN (1 << 6)
-#define ADP5588_LCK21 0x30
-#define ADP5588_KEC 0xF
-
-#define ADP5588_MAXGPIO 18
-#define ADP5588_BANK(offs) ((offs) >> 3)
-#define ADP5588_BIT(offs) (1u << ((offs) & 0x7))
-
-/* Put one of these structures in i2c_board_info platform_data */
-
-#define ADP5588_KEYMAPSIZE 80
-
-#define GPI_PIN_ROW0 97
-#define GPI_PIN_ROW1 98
-#define GPI_PIN_ROW2 99
-#define GPI_PIN_ROW3 100
-#define GPI_PIN_ROW4 101
-#define GPI_PIN_ROW5 102
-#define GPI_PIN_ROW6 103
-#define GPI_PIN_ROW7 104
-#define GPI_PIN_COL0 105
-#define GPI_PIN_COL1 106
-#define GPI_PIN_COL2 107
-#define GPI_PIN_COL3 108
-#define GPI_PIN_COL4 109
-#define GPI_PIN_COL5 110
-#define GPI_PIN_COL6 111
-#define GPI_PIN_COL7 112
-#define GPI_PIN_COL8 113
-#define GPI_PIN_COL9 114
-
-#define GPI_PIN_ROW_BASE GPI_PIN_ROW0
-#define GPI_PIN_ROW_END GPI_PIN_ROW7
-#define GPI_PIN_COL_BASE GPI_PIN_COL0
-#define GPI_PIN_COL_END GPI_PIN_COL9
-
-#define GPI_PIN_BASE GPI_PIN_ROW_BASE
-#define GPI_PIN_END GPI_PIN_COL_END
-
-#define ADP5588_GPIMAPSIZE_MAX (GPI_PIN_END - GPI_PIN_BASE + 1)
-
-struct adp5588_gpi_map {
- unsigned short pin;
- unsigned short sw_evt;
-};
-
-struct adp5588_kpad_platform_data {
- int rows; /* Number of rows */
- int cols; /* Number of columns */
- const unsigned short *keymap; /* Pointer to keymap */
- unsigned short keymapsize; /* Keymap size */
- unsigned repeat:1; /* Enable key repeat */
- unsigned en_keylock:1; /* Enable Key Lock feature */
- unsigned short unlock_key1; /* Unlock Key 1 */
- unsigned short unlock_key2; /* Unlock Key 2 */
- const struct adp5588_gpi_map *gpimap;
- unsigned short gpimapsize;
- const struct adp5588_gpio_platform_data *gpio_data;
-};
-
-struct i2c_client; /* forward declaration */
-
-struct adp5588_gpio_platform_data {
- int gpio_start; /* GPIO Chip base # */
- const char *const *names;
- unsigned irq_base; /* interrupt base # */
- unsigned pullup_dis_mask; /* Pull-Up Disable Mask */
- int (*setup)(struct i2c_client *client,
- unsigned gpio, unsigned ngpio,
- void *context);
- int (*teardown)(struct i2c_client *client,
- unsigned gpio, unsigned ngpio,
- void *context);
- void *context;
-};
-
-#endif
diff --git a/include/linux/platform_data/amd_qdma.h b/include/linux/platform_data/amd_qdma.h
new file mode 100644
index 000000000000..967a6ef31cf9
--- /dev/null
+++ b/include/linux/platform_data/amd_qdma.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _PLATDATA_AMD_QDMA_H
+#define _PLATDATA_AMD_QDMA_H
+
+#include <linux/dmaengine.h>
+
+/**
+ * struct qdma_queue_info - DMA queue information. This information is used to
+ * match queue when DMA channel is requested
+ * @dir: Channel transfer direction
+ */
+struct qdma_queue_info {
+ enum dma_transfer_direction dir;
+};
+
+#define QDMA_FILTER_PARAM(qinfo) ((void *)(qinfo))
+
+struct dma_slave_map;
+
+/**
+ * struct qdma_platdata - Platform specific data for QDMA engine
+ * @max_mm_channels: Maximum number of MM DMA channels in each direction
+ * @device_map: DMA slave map
+ * @irq_index: The index of first IRQ
+ * @dma_dev: The device pointer for dma operations
+ */
+struct qdma_platdata {
+ u32 max_mm_channels;
+ u32 irq_index;
+ struct dma_slave_map *device_map;
+ struct device *dma_dev;
+};
+
+#endif /* _PLATDATA_AMD_QDMA_H */
diff --git a/include/linux/platform_data/amd_xdma.h b/include/linux/platform_data/amd_xdma.h
new file mode 100644
index 000000000000..b5e23e14bac8
--- /dev/null
+++ b/include/linux/platform_data/amd_xdma.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2022, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _PLATDATA_AMD_XDMA_H
+#define _PLATDATA_AMD_XDMA_H
+
+#include <linux/dmaengine.h>
+
+/**
+ * struct xdma_chan_info - DMA channel information
+ * This information is used to match channel when request dma channel
+ * @dir: Channel transfer direction
+ */
+struct xdma_chan_info {
+ enum dma_transfer_direction dir;
+};
+
+#define XDMA_FILTER_PARAM(chan_info) ((void *)(chan_info))
+
+struct dma_slave_map;
+
+/**
+ * struct xdma_platdata - platform specific data for XDMA engine
+ * @max_dma_channels: Maximum dma channels in each direction
+ */
+struct xdma_platdata {
+ u32 max_dma_channels;
+ u32 device_map_cnt;
+ struct dma_slave_map *device_map;
+};
+
+#endif /* _PLATDATA_AMD_XDMA_H */
diff --git a/include/linux/platform_data/asoc-palm27x.h b/include/linux/platform_data/asoc-palm27x.h
deleted file mode 100644
index 22b69a393a57..000000000000
--- a/include/linux/platform_data/asoc-palm27x.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _INCLUDE_PALMASOC_H_
-#define _INCLUDE_PALMASOC_H_
-
-struct palm27x_asoc_info {
- int jack_gpio;
-};
-
-#endif
diff --git a/include/linux/platform_data/asoc-pxa.h b/include/linux/platform_data/asoc-pxa.h
new file mode 100644
index 000000000000..7b5b9e20fbf5
--- /dev/null
+++ b/include/linux/platform_data/asoc-pxa.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __SOC_PXA_AUDIO_H__
+#define __SOC_PXA_AUDIO_H__
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/ac97_codec.h>
+
+/*
+ * @reset_gpio: AC97 reset gpio (normally gpio113 or gpio95)
+ * a -1 value means no gpio will be used for reset
+ * @codec_pdata: AC97 codec platform_data
+
+ * reset_gpio should only be specified for pxa27x CPUs where a silicon
+ * bug prevents correct operation of the reset line. If not specified,
+ * the default behaviour on these CPUs is to consider gpio 113 as the
+ * AC97 reset line, which is the default on most boards.
+ */
+typedef struct {
+ int (*startup)(struct snd_pcm_substream *, void *);
+ void (*shutdown)(struct snd_pcm_substream *, void *);
+ void (*suspend)(void *);
+ void (*resume)(void *);
+ void *priv;
+ int reset_gpio;
+ void *codec_pdata[AC97_BUS_MAX_DEVICES];
+} pxa2xx_audio_ops_t;
+
+extern void pxa_set_ac97_info(pxa2xx_audio_ops_t *ops);
+extern void pxa27x_configure_ac97reset(int reset_gpio, bool to_gpio);
+
+#endif
diff --git a/include/linux/platform_data/asoc-s3c.h b/include/linux/platform_data/asoc-s3c.h
index f9c00f839e9f..085dd8e8af76 100644
--- a/include/linux/platform_data/asoc-s3c.h
+++ b/include/linux/platform_data/asoc-s3c.h
@@ -13,8 +13,6 @@
#include <linux/dmaengine.h>
-extern void s3c64xx_ac97_setup_gpio(int);
-
struct samsung_i2s_type {
/* If the Primary DAI has 5.1 Channels */
#define QUIRK_PRI_6CHAN (1 << 0)
diff --git a/include/linux/platform_data/asoc-s3c24xx_simtec.h b/include/linux/platform_data/asoc-s3c24xx_simtec.h
deleted file mode 100644
index 1a7efc98d108..000000000000
--- a/include/linux/platform_data/asoc-s3c24xx_simtec.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <ben@simtec.co.uk>
- *
- * Simtec Audio support.
-*/
-
-/**
- * struct s3c24xx_audio_simtec_pdata - platform data for simtec audio
- * @use_mpllin: Select codec clock from MPLLin
- * @output_cdclk: Need to output CDCLK to the codec
- * @have_mic: Set if we have a MIC socket
- * @have_lout: Set if we have a LineOut socket
- * @amp_gpio: GPIO pin to enable the AMP
- * @amp_gain: Option GPIO to control AMP gain
- */
-struct s3c24xx_audio_simtec_pdata {
- unsigned int use_mpllin:1;
- unsigned int output_cdclk:1;
-
- unsigned int have_mic:1;
- unsigned int have_lout:1;
-
- int amp_gpio;
- int amp_gain[2];
-
- void (*startup)(void);
-};
diff --git a/include/linux/platform_data/asoc-ux500-msp.h b/include/linux/platform_data/asoc-ux500-msp.h
deleted file mode 100644
index b8d0f730dda8..000000000000
--- a/include/linux/platform_data/asoc-ux500-msp.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson SA 2010
- *
- * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
- */
-
-#ifndef __MSP_H
-#define __MSP_H
-
-#include <linux/platform_data/dma-ste-dma40.h>
-
-/* Platform data structure for a MSP I2S-device */
-struct msp_i2s_platform_data {
- int id;
- struct stedma40_chan_cfg *msp_i2s_dma_rx;
- struct stedma40_chan_cfg *msp_i2s_dma_tx;
-};
-
-#endif
diff --git a/include/linux/platform_data/ata-samsung_cf.h b/include/linux/platform_data/ata-samsung_cf.h
deleted file mode 100644
index fccf969dc4da..000000000000
--- a/include/linux/platform_data/ata-samsung_cf.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * Samsung CF-ATA platform_device info
-*/
-
-#ifndef __ATA_SAMSUNG_CF_H
-#define __ATA_SAMSUNG_CF_H __FILE__
-
-/**
- * struct s3c_ide_platdata - S3C IDE driver platform data.
- * @setup_gpio: Setup the external GPIO pins to the right state for data
- * transfer in true-ide mode.
- */
-struct s3c_ide_platdata {
- void (*setup_gpio)(void);
-};
-
-/*
- * s3c_ide_set_platdata() - Setup the platform specifc data for IDE driver.
- * @pdata: Platform data for IDE driver.
- */
-extern void s3c_ide_set_platdata(struct s3c_ide_platdata *pdata);
-
-/* architecture-specific IDE configuration */
-extern void s3c64xx_ide_setup_gpio(void);
-extern void s5pv210_ide_setup_gpio(void);
-
-#endif /*__ATA_SAMSUNG_CF_H */
diff --git a/include/linux/platform_data/bcm7038_wdt.h b/include/linux/platform_data/bcm7038_wdt.h
new file mode 100644
index 000000000000..e18cfd9ec8f9
--- /dev/null
+++ b/include/linux/platform_data/bcm7038_wdt.h
@@ -0,0 +1,8 @@
+#ifndef __BCM7038_WDT_PDATA_H
+#define __BCM7038_WDT_PDATA_H
+
+struct bcm7038_wdt_platform_data {
+ const char *clk_name;
+};
+
+#endif /* __BCM7038_WDT_PDATA_H */
diff --git a/include/linux/platform_data/bcmgenet.h b/include/linux/platform_data/bcmgenet.h
deleted file mode 100644
index d8f8738629d2..000000000000
--- a/include/linux/platform_data/bcmgenet.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_PLATFORM_DATA_BCMGENET_H__
-#define __LINUX_PLATFORM_DATA_BCMGENET_H__
-
-#include <linux/types.h>
-#include <linux/if_ether.h>
-#include <linux/phy.h>
-
-struct bcmgenet_platform_data {
- bool mdio_enabled;
- phy_interface_t phy_interface;
- int phy_address;
- int phy_speed;
- int phy_duplex;
- u8 mac_address[ETH_ALEN];
- int genet_version;
-};
-
-#endif
diff --git a/include/linux/platform_data/bd6107.h b/include/linux/platform_data/bd6107.h
index 54a06a4d2618..596ca4f95cfa 100644
--- a/include/linux/platform_data/bd6107.h
+++ b/include/linux/platform_data/bd6107.h
@@ -8,7 +8,7 @@
struct device;
struct bd6107_platform_data {
- struct device *fbdev;
+ struct device *dev;
unsigned int def_value;
};
diff --git a/include/linux/platform_data/brcmfmac.h b/include/linux/platform_data/brcmfmac.h
index 1d30bf278231..ec99b7b73d1d 100644
--- a/include/linux/platform_data/brcmfmac.h
+++ b/include/linux/platform_data/brcmfmac.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 201 Broadcom Corporation
+ * Copyright (c) 2016 Broadcom Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -125,7 +125,7 @@ struct brcmfmac_pd_cc_entry {
*/
struct brcmfmac_pd_cc {
int table_size;
- struct brcmfmac_pd_cc_entry table[0];
+ struct brcmfmac_pd_cc_entry table[];
};
/**
@@ -178,7 +178,7 @@ struct brcmfmac_platform_data {
void (*power_off)(void);
char *fw_alternative_path;
int device_count;
- struct brcmfmac_pd_device devices[0];
+ struct brcmfmac_pd_device devices[];
};
diff --git a/include/linux/platform_data/brcmnand.h b/include/linux/platform_data/brcmnand.h
new file mode 100644
index 000000000000..8b8777985dce
--- /dev/null
+++ b/include/linux/platform_data/brcmnand.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef BRCMNAND_PLAT_DATA_H
+#define BRCMNAND_PLAT_DATA_H
+
+struct brcmnand_platform_data {
+ int chip_select;
+ const char * const *part_probe_types;
+ unsigned int ecc_stepsize;
+ unsigned int ecc_strength;
+};
+
+#endif /* BRCMNAND_PLAT_DATA_H */
diff --git a/include/linux/platform_data/clk-davinci-pll.h b/include/linux/platform_data/clk-davinci-pll.h
deleted file mode 100644
index e55dab1d578b..000000000000
--- a/include/linux/platform_data/clk-davinci-pll.h
+++ /dev/null
@@ -1,21 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * PLL clock driver for TI Davinci SoCs
- *
- * Copyright (C) 2018 David Lechner <david@lechnology.com>
- */
-
-#ifndef __LINUX_PLATFORM_DATA_CLK_DAVINCI_PLL_H__
-#define __LINUX_PLATFORM_DATA_CLK_DAVINCI_PLL_H__
-
-#include <linux/regmap.h>
-
-/**
- * davinci_pll_platform_data
- * @cfgchip: CFGCHIP syscon regmap
- */
-struct davinci_pll_platform_data {
- struct regmap *cfgchip;
-};
-
-#endif /* __LINUX_PLATFORM_DATA_CLK_DAVINCI_PLL_H__ */
diff --git a/include/linux/platform_data/clk-fch.h b/include/linux/platform_data/clk-fch.h
index b9f682459f08..11a2a23fd9b2 100644
--- a/include/linux/platform_data/clk-fch.h
+++ b/include/linux/platform_data/clk-fch.h
@@ -12,7 +12,7 @@
struct fch_clk_data {
void __iomem *base;
- u32 is_rv;
+ char *name;
};
#endif /* __CLK_FCH_H */
diff --git a/include/linux/platform_data/clk-s3c2410.h b/include/linux/platform_data/clk-s3c2410.h
deleted file mode 100644
index 7eb1cfa5409b..000000000000
--- a/include/linux/platform_data/clk-s3c2410.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2020 Krzysztof Kozlowski <krzk@kernel.org>
- */
-
-#ifndef __LINUX_PLATFORM_DATA_CLK_S3C2410_H_
-#define __LINUX_PLATFORM_DATA_CLK_S3C2410_H_
-
-/**
- * struct s3c2410_clk_platform_data - platform data for S3C2410 clock driver
- *
- * @modify_misccr: Function to modify the MISCCR and return the new value
- */
-struct s3c2410_clk_platform_data {
- unsigned int (*modify_misccr)(unsigned int clr, unsigned int chg);
-};
-
-#endif /* __LINUX_PLATFORM_DATA_CLK_S3C2410_H_ */
-
diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h
index 45f53afc46e2..69294f79cc88 100644
--- a/include/linux/platform_data/cros_ec_commands.h
+++ b/include/linux/platform_data/cros_ec_commands.h
@@ -13,8 +13,8 @@
#ifndef __CROS_EC_COMMANDS_H
#define __CROS_EC_COMMANDS_H
-
-
+#include <linux/bits.h>
+#include <linux/types.h>
#define BUILD_ASSERT(_cond)
@@ -51,10 +51,14 @@
/*
* The actual block is 0x800-0x8ff, but some BIOSes think it's 0x880-0x8ff
* and they tell the kernel that so we have to think of it as two parts.
+ *
+ * Other BIOSes report only the I/O port region spanned by the Microchip
+ * MEC series EC; an attempt to address a larger region may fail.
*/
-#define EC_HOST_CMD_REGION0 0x800
-#define EC_HOST_CMD_REGION1 0x880
-#define EC_HOST_CMD_REGION_SIZE 0x80
+#define EC_HOST_CMD_REGION0 0x800
+#define EC_HOST_CMD_REGION1 0x880
+#define EC_HOST_CMD_REGION_SIZE 0x80
+#define EC_HOST_CMD_MEC_REGION_SIZE 0x8
/* EC command register bit functions */
#define EC_LPC_CMDR_DATA BIT(0) /* Data ready for host to read */
@@ -783,7 +787,7 @@ struct ec_host_response {
*
* Packets always start with a request or response header. They are followed
* by data_len bytes of data. If the data_crc_present flag is set, the data
- * bytes are followed by a CRC-8 of that data, using using x^8 + x^2 + x + 1
+ * bytes are followed by a CRC-8 of that data, using x^8 + x^2 + x + 1
* polynomial.
*
* Host algorithm when sending a request q:
@@ -1078,7 +1082,7 @@ struct ec_params_get_cmd_versions_v1 {
} __ec_align2;
/**
- * struct ec_response_get_cmd_version - Response to the get command versions.
+ * struct ec_response_get_cmd_versions - Response to the get command versions.
* @version_mask: Mask of supported versions; use EC_VER_MASK() to compare with
* a desired version.
*/
@@ -1296,6 +1300,50 @@ enum ec_feature_code {
* mux.
*/
EC_FEATURE_TYPEC_MUX_REQUIRE_AP_ACK = 43,
+ /*
+ * The EC supports entering and residing in S4.
+ */
+ EC_FEATURE_S4_RESIDENCY = 44,
+ /*
+ * The EC supports the AP directing mux sets for the board.
+ */
+ EC_FEATURE_TYPEC_AP_MUX_SET = 45,
+ /*
+ * The EC supports the AP composing VDMs for us to send.
+ */
+ EC_FEATURE_TYPEC_AP_VDM_SEND = 46,
+ /*
+ * The EC supports system safe mode panic recovery.
+ */
+ EC_FEATURE_SYSTEM_SAFE_MODE = 47,
+ /*
+ * The EC will reboot on runtime assertion failures.
+ */
+ EC_FEATURE_ASSERT_REBOOTS = 48,
+ /*
+ * The EC image is built with tokenized logging enabled.
+ */
+ EC_FEATURE_TOKENIZED_LOGGING = 49,
+ /*
+ * The EC supports triggering an STB dump.
+ */
+ EC_FEATURE_AMD_STB_DUMP = 50,
+ /*
+ * The EC supports memory dump commands.
+ */
+ EC_FEATURE_MEMORY_DUMP = 51,
+ /*
+ * The EC supports DP2.1 capability
+ */
+ EC_FEATURE_TYPEC_DP2_1 = 52,
+ /*
+ * The MCU is System Companion Processor Core 1
+ */
+ EC_FEATURE_SCP_C1 = 53,
+ /*
+ * The EC supports UCSI PPM.
+ */
+ EC_FEATURE_UCSI_PPM = 54,
};
#define EC_FEATURE_MASK_0(event_code) BIT(event_code % 32)
@@ -1777,6 +1825,16 @@ struct ec_response_pwm_get_duty {
uint16_t duty; /* Duty cycle, EC_PWM_MAX_DUTY = 100% */
} __ec_align2;
+#define EC_CMD_PWM_GET_FAN_DUTY 0x0027
+
+struct ec_params_pwm_get_fan_duty {
+ uint8_t fan_idx;
+} __ec_align1;
+
+struct ec_response_pwm_get_fan_duty {
+ uint32_t percent; /* Percentage of duty cycle, ranging from 0 ~ 100 */
+} __ec_align4;
+
/*****************************************************************************/
/*
* Lightbar commands. This looks worse than it is. Since we only use one HOST
@@ -2340,6 +2398,12 @@ enum motionsense_command {
*/
MOTIONSENSE_CMD_SENSOR_SCALE = 18,
+ /*
+ * Activity management
+ * Retrieve current status of given activity.
+ */
+ MOTIONSENSE_CMD_GET_ACTIVITY = 20,
+
/* Number of motionsense sub-commands. */
MOTIONSENSE_NUM_CMDS
};
@@ -2399,6 +2463,11 @@ enum motionsensor_orientation {
MOTIONSENSE_ORIENTATION_UNKNOWN = 4,
};
+struct ec_response_activity_data {
+ uint8_t activity; /* motionsensor_activity */
+ uint8_t state;
+} __ec_todo_packed;
+
struct ec_response_motion_sensor_data {
/* Flags for each sensor. */
uint8_t flags;
@@ -2412,8 +2481,7 @@ struct ec_response_motion_sensor_data {
uint32_t timestamp;
};
struct __ec_todo_unpacked {
- uint8_t activity; /* motionsensor_activity */
- uint8_t state;
+ struct ec_response_activity_data activity_data;
int16_t add_info[2];
};
};
@@ -2446,6 +2514,7 @@ enum motionsensor_activity {
MOTIONSENSE_ACTIVITY_SIG_MOTION = 1,
MOTIONSENSE_ACTIVITY_DOUBLE_TAP = 2,
MOTIONSENSE_ACTIVITY_ORIENTATION = 3,
+ MOTIONSENSE_ACTIVITY_BODY_DETECTION = 4,
};
struct ec_motion_sense_activity {
@@ -2623,6 +2692,7 @@ struct ec_params_motion_sense {
uint32_t max_data_vector;
} fifo_read;
+ /* Used for MOTIONSENSE_CMD_SET_ACTIVITY */
struct ec_motion_sense_activity set_activity;
/* Used for MOTIONSENSE_CMD_LID_ANGLE */
@@ -2668,6 +2738,12 @@ struct ec_params_motion_sense {
*/
int16_t hys_degree;
} tablet_mode_threshold;
+
+ /* Used for MOTIONSENSE_CMD_GET_ACTIVITY */
+ struct __ec_todo_unpacked {
+ uint8_t sensor_num;
+ uint8_t activity; /* enum motionsensor_activity */
+ } get_activity;
};
} __ec_todo_packed;
@@ -2685,7 +2761,7 @@ struct ec_response_motion_sense {
* Sensor data is truncated if response_max is too small
* for holding all the data.
*/
- struct ec_response_motion_sensor_data sensor[0];
+ DECLARE_FLEX_ARRAY(struct ec_response_motion_sensor_data, sensor);
} dump;
/* Used for MOTIONSENSE_CMD_INFO. */
@@ -2785,6 +2861,10 @@ struct ec_response_motion_sense {
uint16_t hys_degree;
} tablet_mode_threshold;
+ /* USED for MOTIONSENSE_CMD_GET_ACTIVITY. */
+ struct __ec_todo_unpacked {
+ uint8_t state;
+ } get_activity;
};
} __ec_todo_packed;
@@ -3057,14 +3137,31 @@ struct ec_params_thermal_set_threshold_v1 {
/****************************************************************************/
-/* Toggle automatic fan control */
+/* Set or get fan control mode */
#define EC_CMD_THERMAL_AUTO_FAN_CTRL 0x0052
+enum ec_auto_fan_ctrl_cmd {
+ EC_AUTO_FAN_CONTROL_CMD_SET = 0,
+ EC_AUTO_FAN_CONTROL_CMD_GET,
+};
+
/* Version 1 of input params */
struct ec_params_auto_fan_ctrl_v1 {
uint8_t fan_idx;
} __ec_align1;
+/* Version 2 of input params */
+struct ec_params_auto_fan_ctrl_v2 {
+ uint8_t fan_idx;
+ uint8_t cmd; /* enum ec_auto_fan_ctrl_cmd */
+ uint8_t set_auto; /* only used with EC_AUTO_FAN_CONTROL_CMD_SET - bool
+ */
+} __ec_align4;
+
+struct ec_response_auto_fan_control {
+ uint8_t is_auto; /* bool */
+} __ec_align1;
+
/* Get/Set TMP006 calibration data */
#define EC_CMD_TMP006_GET_CALIBRATION 0x0053
#define EC_CMD_TMP006_SET_CALIBRATION 0x0054
@@ -3386,6 +3483,9 @@ enum ec_mkbp_event {
/* Send an incoming CEC message to the AP */
EC_MKBP_EVENT_CEC_MESSAGE = 9,
+ /* Peripheral device charger event */
+ EC_MKBP_EVENT_PCHG = 12,
+
/* Number of MKBP events */
EC_MKBP_EVENT_COUNT,
};
@@ -3444,6 +3544,34 @@ union __ec_align_offset1 ec_response_get_next_data_v1 {
};
BUILD_ASSERT(sizeof(union ec_response_get_next_data_v1) == 16);
+union __ec_align_offset1 ec_response_get_next_data_v3 {
+ uint8_t key_matrix[18];
+
+ /* Unaligned */
+ uint32_t host_event;
+ uint64_t host_event64;
+
+ struct __ec_todo_unpacked {
+ /* For aligning the fifo_info */
+ uint8_t reserved[3];
+ struct ec_response_motion_sense_fifo_info info;
+ } sensor_fifo;
+
+ uint32_t buttons;
+
+ uint32_t switches;
+
+ uint32_t fp_events;
+
+ uint32_t sysrq;
+
+ /* CEC events from enum mkbp_cec_event */
+ uint32_t cec_events;
+
+ uint8_t cec_message[16];
+};
+BUILD_ASSERT(sizeof(union ec_response_get_next_data_v3) == 18);
+
struct ec_response_get_next_event {
uint8_t event_type;
/* Followed by event data if any */
@@ -3456,12 +3584,21 @@ struct ec_response_get_next_event_v1 {
union ec_response_get_next_data_v1 data;
} __ec_align1;
+struct ec_response_get_next_event_v3 {
+ uint8_t event_type;
+ /* Followed by event data if any */
+ union ec_response_get_next_data_v3 data;
+} __ec_align1;
+
/* Bit indices for buttons and switches.*/
/* Buttons */
#define EC_MKBP_POWER_BUTTON 0
#define EC_MKBP_VOL_UP 1
#define EC_MKBP_VOL_DOWN 2
#define EC_MKBP_RECOVERY 3
+#define EC_MKBP_BRI_UP 4
+#define EC_MKBP_BRI_DOWN 5
+#define EC_MKBP_SCREEN_LOCK 6
/* Switches */
#define EC_MKBP_LID_OPEN 0
@@ -3787,16 +3924,61 @@ struct ec_params_i2c_write {
* discharge the battery.
*/
#define EC_CMD_CHARGE_CONTROL 0x0096
-#define EC_VER_CHARGE_CONTROL 1
+#define EC_VER_CHARGE_CONTROL 3
enum ec_charge_control_mode {
CHARGE_CONTROL_NORMAL = 0,
CHARGE_CONTROL_IDLE,
CHARGE_CONTROL_DISCHARGE,
+ /* Add no more entry below. */
+ CHARGE_CONTROL_COUNT,
+};
+
+#define EC_CHARGE_MODE_TEXT \
+ { \
+ [CHARGE_CONTROL_NORMAL] = "NORMAL", \
+ [CHARGE_CONTROL_IDLE] = "IDLE", \
+ [CHARGE_CONTROL_DISCHARGE] = "DISCHARGE", \
+ }
+
+enum ec_charge_control_cmd {
+ EC_CHARGE_CONTROL_CMD_SET = 0,
+ EC_CHARGE_CONTROL_CMD_GET,
+};
+
+enum ec_charge_control_flag {
+ EC_CHARGE_CONTROL_FLAG_NO_IDLE = BIT(0),
};
struct ec_params_charge_control {
- uint32_t mode; /* enum charge_control_mode */
+ uint32_t mode; /* enum charge_control_mode */
+
+ /* Below are the fields added in V2. */
+ uint8_t cmd; /* enum ec_charge_control_cmd. */
+ uint8_t flags; /* enum ec_charge_control_flag (v3+) */
+ /*
+ * Lower and upper thresholds for battery sustainer. This struct isn't
+ * named to avoid tainting foreign projects' name spaces.
+ *
+ * If charge mode is explicitly set (e.g. DISCHARGE), battery sustainer
+ * will be disabled. To disable battery sustainer, set mode=NORMAL,
+ * lower=-1, upper=-1.
+ */
+ struct {
+ int8_t lower; /* Display SoC in percentage. */
+ int8_t upper; /* Display SoC in percentage. */
+ } sustain_soc;
+} __ec_align4;
+
+/* Added in v2 */
+struct ec_response_charge_control {
+ uint32_t mode; /* enum charge_control_mode */
+ struct { /* Battery sustainer thresholds */
+ int8_t lower;
+ int8_t upper;
+ } sustain_soc;
+ uint8_t flags; /* enum ec_charge_control_flag (v3+) */
+ uint8_t reserved;
} __ec_align4;
/*****************************************************************************/
@@ -3939,60 +4121,52 @@ struct ec_response_i2c_passthru {
} __ec_align1;
/*****************************************************************************/
-/* Power button hang detect */
-
+/* AP hang detect */
#define EC_CMD_HANG_DETECT 0x009F
-/* Reasons to start hang detection timer */
-/* Power button pressed */
-#define EC_HANG_START_ON_POWER_PRESS BIT(0)
-
-/* Lid closed */
-#define EC_HANG_START_ON_LID_CLOSE BIT(1)
+#define EC_HANG_DETECT_MIN_TIMEOUT 5
+#define EC_HANG_DETECT_MAX_TIMEOUT 65535
- /* Lid opened */
-#define EC_HANG_START_ON_LID_OPEN BIT(2)
+/* EC hang detect commands */
+enum ec_hang_detect_cmds {
+ /* Reload AP hang detect timer. */
+ EC_HANG_DETECT_CMD_RELOAD = 0x0,
-/* Start of AP S3->S0 transition (booting or resuming from suspend) */
-#define EC_HANG_START_ON_RESUME BIT(3)
+ /* Stop AP hang detect timer. */
+ EC_HANG_DETECT_CMD_CANCEL = 0x1,
-/* Reasons to cancel hang detection */
-
-/* Power button released */
-#define EC_HANG_STOP_ON_POWER_RELEASE BIT(8)
-
-/* Any host command from AP received */
-#define EC_HANG_STOP_ON_HOST_COMMAND BIT(9)
-
-/* Stop on end of AP S0->S3 transition (suspending or shutting down) */
-#define EC_HANG_STOP_ON_SUSPEND BIT(10)
+ /* Configure watchdog with given reboot timeout and
+ * cancel currently running AP hang detect timer.
+ */
+ EC_HANG_DETECT_CMD_SET_TIMEOUT = 0x2,
-/*
- * If this flag is set, all the other fields are ignored, and the hang detect
- * timer is started. This provides the AP a way to start the hang timer
- * without reconfiguring any of the other hang detect settings. Note that
- * you must previously have configured the timeouts.
- */
-#define EC_HANG_START_NOW BIT(30)
+ /* Get last hang status - whether the AP boot was clear or not */
+ EC_HANG_DETECT_CMD_GET_STATUS = 0x3,
-/*
- * If this flag is set, all the other fields are ignored (including
- * EC_HANG_START_NOW). This provides the AP a way to stop the hang timer
- * without reconfiguring any of the other hang detect settings.
- */
-#define EC_HANG_STOP_NOW BIT(31)
+ /* Clear last hang status. Called when AP is rebooting/shutting down
+ * gracefully.
+ */
+ EC_HANG_DETECT_CMD_CLEAR_STATUS = 0x4
+};
struct ec_params_hang_detect {
- /* Flags; see EC_HANG_* */
- uint32_t flags;
-
- /* Timeout in msec before generating host event, if enabled */
- uint16_t host_event_timeout_msec;
+ uint16_t command; /* enum ec_hang_detect_cmds */
+ /* Timeout in seconds before generating reboot */
+ uint16_t reboot_timeout_sec;
+} __ec_align2;
- /* Timeout in msec before generating warm reboot, if enabled */
- uint16_t warm_reboot_timeout_msec;
-} __ec_align4;
+/* Status codes that describe whether AP has boot normally or the hang has been
+ * detected and EC has reset AP
+ */
+enum ec_hang_detect_status {
+ EC_HANG_DETECT_AP_BOOT_NORMAL = 0x0,
+ EC_HANG_DETECT_AP_BOOT_EC_WDT = 0x1,
+ EC_HANG_DETECT_AP_BOOT_COUNT,
+};
+struct ec_response_hang_detect {
+ uint8_t status; /* enum ec_hang_detect_status */
+} __ec_align1;
/*****************************************************************************/
/* Commands for battery charging */
@@ -4228,6 +4402,7 @@ enum ec_device_event {
EC_DEVICE_EVENT_TRACKPAD,
EC_DEVICE_EVENT_DSP,
EC_DEVICE_EVENT_WIFI,
+ EC_DEVICE_EVENT_WLC,
};
enum ec_device_event_param {
@@ -4413,8 +4588,20 @@ struct ec_response_i2c_passthru_protect {
* These commands are for sending and receiving message via HDMI CEC
*/
+#define EC_CEC_MAX_PORTS 16
+
#define MAX_CEC_MSG_LEN 16
+/*
+ * Helper macros for packing/unpacking cec_events.
+ * bits[27:0] : bitmask of events from enum mkbp_cec_event
+ * bits[31:28]: port number
+ */
+#define EC_MKBP_EVENT_CEC_PACK(events, port) \
+ (((events) & GENMASK(27, 0)) | (((port) & 0xf) << 28))
+#define EC_MKBP_EVENT_CEC_GET_EVENTS(event) ((event) & GENMASK(27, 0))
+#define EC_MKBP_EVENT_CEC_GET_PORT(event) (((event) >> 28) & 0xf)
+
/* CEC message from the AP to be written on the CEC bus */
#define EC_CMD_CEC_WRITE_MSG 0x00B8
@@ -4426,19 +4613,54 @@ struct ec_params_cec_write {
uint8_t msg[MAX_CEC_MSG_LEN];
} __ec_align1;
+/**
+ * struct ec_params_cec_write_v1 - Message to write to the CEC bus
+ * @port: CEC port to write the message on
+ * @msg_len: length of msg in bytes
+ * @msg: message content to write to the CEC bus
+ */
+struct ec_params_cec_write_v1 {
+ uint8_t port;
+ uint8_t msg_len;
+ uint8_t msg[MAX_CEC_MSG_LEN];
+} __ec_align1;
+
+/* CEC message read from a CEC bus reported back to the AP */
+#define EC_CMD_CEC_READ_MSG 0x00B9
+
+/**
+ * struct ec_params_cec_read - Read a message from the CEC bus
+ * @port: CEC port to read a message on
+ */
+struct ec_params_cec_read {
+ uint8_t port;
+} __ec_align1;
+
+/**
+ * struct ec_response_cec_read - Message read from the CEC bus
+ * @msg_len: length of msg in bytes
+ * @msg: message content read from the CEC bus
+ */
+struct ec_response_cec_read {
+ uint8_t msg_len;
+ uint8_t msg[MAX_CEC_MSG_LEN];
+} __ec_align1;
+
/* Set various CEC parameters */
#define EC_CMD_CEC_SET 0x00BA
/**
* struct ec_params_cec_set - CEC parameters set
* @cmd: parameter type, can be CEC_CMD_ENABLE or CEC_CMD_LOGICAL_ADDRESS
+ * @port: CEC port to set the parameter on
* @val: in case cmd is CEC_CMD_ENABLE, this field can be 0 to disable CEC
* or 1 to enable CEC functionality, in case cmd is
* CEC_CMD_LOGICAL_ADDRESS, this field encodes the requested logical
* address between 0 and 15 or 0xff to unregister
*/
struct ec_params_cec_set {
- uint8_t cmd; /* enum cec_command */
+ uint8_t cmd : 4; /* enum cec_command */
+ uint8_t port : 4;
uint8_t val;
} __ec_align1;
@@ -4448,9 +4670,11 @@ struct ec_params_cec_set {
/**
* struct ec_params_cec_get - CEC parameters get
* @cmd: parameter type, can be CEC_CMD_ENABLE or CEC_CMD_LOGICAL_ADDRESS
+ * @port: CEC port to get the parameter on
*/
struct ec_params_cec_get {
- uint8_t cmd; /* enum cec_command */
+ uint8_t cmd : 4; /* enum cec_command */
+ uint8_t port : 4;
} __ec_align1;
/**
@@ -4464,6 +4688,17 @@ struct ec_response_cec_get {
uint8_t val;
} __ec_align1;
+/* Get the number of CEC ports */
+#define EC_CMD_CEC_PORT_COUNT 0x00C1
+
+/**
+ * struct ec_response_cec_port_count - CEC port count response
+ * @port_count: number of CEC ports
+ */
+struct ec_response_cec_port_count {
+ uint8_t port_count;
+} __ec_align1;
+
/* CEC parameters command */
enum cec_command {
/* CEC reading, writing and events enable */
@@ -4478,6 +4713,8 @@ enum mkbp_cec_event {
EC_MKBP_CEC_SEND_OK = BIT(0),
/* Outgoing message was not acknowledged */
EC_MKBP_CEC_SEND_FAILED = BIT(1),
+ /* Incoming message can be read out by AP */
+ EC_MKBP_CEC_HAVE_DATA = BIT(2),
};
/*****************************************************************************/
@@ -4856,8 +5093,12 @@ struct ec_response_pd_status {
#define PD_EVENT_POWER_CHANGE BIT(1)
#define PD_EVENT_IDENTITY_RECEIVED BIT(2)
#define PD_EVENT_DATA_SWAP BIT(3)
+#define PD_EVENT_TYPEC BIT(4)
+#define PD_EVENT_PPM BIT(5)
+#define PD_EVENT_INIT BIT(6)
+
struct ec_response_host_event_status {
- uint32_t status; /* PD MCU host event status */
+ uint32_t status; /* PD MCU host event status */
} __ec_align4;
/* Set USB type-C port role and muxes */
@@ -5460,6 +5701,133 @@ struct ec_response_rollback_info {
/* Issue AP reset */
#define EC_CMD_AP_RESET 0x0125
+/*
+ * Get the number of peripheral charge ports
+ */
+#define EC_CMD_PCHG_COUNT 0x0134
+
+#define EC_PCHG_MAX_PORTS 8
+
+struct ec_response_pchg_count {
+ uint8_t port_count;
+} __ec_align1;
+
+/*
+ * Get the status of a peripheral charge port
+ */
+#define EC_CMD_PCHG 0x0135
+
+struct ec_params_pchg {
+ uint8_t port;
+} __ec_align1;
+
+struct ec_response_pchg {
+ uint32_t error; /* enum pchg_error */
+ uint8_t state; /* enum pchg_state state */
+ uint8_t battery_percentage;
+ uint8_t unused0;
+ uint8_t unused1;
+ /* Fields added in version 1 */
+ uint32_t fw_version;
+ uint32_t dropped_event_count;
+} __ec_align2;
+
+enum pchg_state {
+ /* Charger is reset and not initialized. */
+ PCHG_STATE_RESET = 0,
+ /* Charger is initialized or disabled. */
+ PCHG_STATE_INITIALIZED,
+ /* Charger is enabled and ready to detect a device. */
+ PCHG_STATE_ENABLED,
+ /* Device is in proximity. */
+ PCHG_STATE_DETECTED,
+ /* Device is being charged. */
+ PCHG_STATE_CHARGING,
+ /* Device is fully charged. It implies DETECTED (& not charging). */
+ PCHG_STATE_FULL,
+ /* In download (a.k.a. firmware update) mode */
+ PCHG_STATE_DOWNLOAD,
+ /* In download mode. Ready for receiving data. */
+ PCHG_STATE_DOWNLOADING,
+ /* Device is ready for data communication. */
+ PCHG_STATE_CONNECTED,
+ /* Put no more entry below */
+ PCHG_STATE_COUNT,
+};
+
+#define EC_PCHG_STATE_TEXT { \
+ [PCHG_STATE_RESET] = "RESET", \
+ [PCHG_STATE_INITIALIZED] = "INITIALIZED", \
+ [PCHG_STATE_ENABLED] = "ENABLED", \
+ [PCHG_STATE_DETECTED] = "DETECTED", \
+ [PCHG_STATE_CHARGING] = "CHARGING", \
+ [PCHG_STATE_FULL] = "FULL", \
+ [PCHG_STATE_DOWNLOAD] = "DOWNLOAD", \
+ [PCHG_STATE_DOWNLOADING] = "DOWNLOADING", \
+ [PCHG_STATE_CONNECTED] = "CONNECTED", \
+ }
+
+/*
+ * Update firmware of peripheral chip
+ */
+#define EC_CMD_PCHG_UPDATE 0x0136
+
+/* Port number is encoded in bit[28:31]. */
+#define EC_MKBP_PCHG_PORT_SHIFT 28
+/* Utility macro for converting MKBP event to port number. */
+#define EC_MKBP_PCHG_EVENT_TO_PORT(e) (((e) >> EC_MKBP_PCHG_PORT_SHIFT) & 0xf)
+/* Utility macro for extracting event bits. */
+#define EC_MKBP_PCHG_EVENT_MASK(e) ((e) \
+ & GENMASK(EC_MKBP_PCHG_PORT_SHIFT-1, 0))
+
+#define EC_MKBP_PCHG_UPDATE_OPENED BIT(0)
+#define EC_MKBP_PCHG_WRITE_COMPLETE BIT(1)
+#define EC_MKBP_PCHG_UPDATE_CLOSED BIT(2)
+#define EC_MKBP_PCHG_UPDATE_ERROR BIT(3)
+#define EC_MKBP_PCHG_DEVICE_EVENT BIT(4)
+
+enum ec_pchg_update_cmd {
+ /* Reset chip to normal mode. */
+ EC_PCHG_UPDATE_CMD_RESET_TO_NORMAL = 0,
+ /* Reset and put a chip in update (a.k.a. download) mode. */
+ EC_PCHG_UPDATE_CMD_OPEN,
+ /* Write a block of data containing FW image. */
+ EC_PCHG_UPDATE_CMD_WRITE,
+ /* Close update session. */
+ EC_PCHG_UPDATE_CMD_CLOSE,
+ /* End of commands */
+ EC_PCHG_UPDATE_CMD_COUNT,
+};
+
+struct ec_params_pchg_update {
+ /* PCHG port number */
+ uint8_t port;
+ /* enum ec_pchg_update_cmd */
+ uint8_t cmd;
+ /* Padding */
+ uint8_t reserved0;
+ uint8_t reserved1;
+ /* Version of new firmware */
+ uint32_t version;
+ /* CRC32 of new firmware */
+ uint32_t crc32;
+ /* Address in chip memory where <data> is written to */
+ uint32_t addr;
+ /* Size of <data> */
+ uint32_t size;
+ /* Partial data of new firmware */
+ uint8_t data[];
+} __ec_align4;
+
+BUILD_ASSERT(EC_PCHG_UPDATE_CMD_COUNT
+ < BIT(sizeof(((struct ec_params_pchg_update *)0)->cmd)*8));
+
+struct ec_response_pchg_update {
+ /* Block size */
+ uint32_t block_size;
+} __ec_align4;
+
+
/*****************************************************************************/
/* Voltage regulator controls */
@@ -5577,7 +5945,7 @@ struct ec_response_typec_discovery {
uint8_t svid_count; /* Number of SVIDs partner sent */
uint16_t reserved;
uint32_t discovery_vdo[6]; /* Max VDOs allowed after VDM header is 6 */
- struct svid_mode_info svids[0];
+ struct svid_mode_info svids[];
} __ec_align1;
/* USB Type-C commands for AP-controlled device policy. */
@@ -5587,8 +5955,34 @@ enum typec_control_command {
TYPEC_CONTROL_COMMAND_EXIT_MODES,
TYPEC_CONTROL_COMMAND_CLEAR_EVENTS,
TYPEC_CONTROL_COMMAND_ENTER_MODE,
+ TYPEC_CONTROL_COMMAND_TBT_UFP_REPLY,
+ TYPEC_CONTROL_COMMAND_USB_MUX_SET,
+ TYPEC_CONTROL_COMMAND_BIST_SHARE_MODE,
+ TYPEC_CONTROL_COMMAND_SEND_VDM_REQ,
};
+/* Replies the AP may specify to the TBT EnterMode command as a UFP */
+enum typec_tbt_ufp_reply {
+ TYPEC_TBT_UFP_REPLY_NAK,
+ TYPEC_TBT_UFP_REPLY_ACK,
+};
+
+struct typec_usb_mux_set {
+ uint8_t mux_index; /* Index of the mux to set in the chain */
+ uint8_t mux_flags; /* USB_PD_MUX_*-encoded USB mux state to set */
+} __ec_align1;
+
+#define VDO_MAX_SIZE 7
+
+struct typec_vdm_req {
+ /* VDM data, including VDM header */
+ uint32_t vdm_data[VDO_MAX_SIZE];
+ /* Number of 32-bit fields filled in */
+ uint8_t vdm_data_objects;
+ /* Partner to address - see enum typec_partner_type */
+ uint8_t partner_type;
+} __ec_align1;
+
struct ec_params_typec_control {
uint8_t port;
uint8_t command; /* enum typec_control_command */
@@ -5602,6 +5996,10 @@ struct ec_params_typec_control {
union {
uint32_t clear_events_mask;
uint8_t mode_to_enter; /* enum typec_mode */
+ uint8_t tbt_ufp_reply; /* enum typec_tbt_ufp_reply */
+ struct typec_usb_mux_set mux_params;
+ /* Used for VMD_REQ */
+ struct typec_vdm_req vdm_req_params;
uint8_t placeholder[128];
};
} __ec_align1;
@@ -5680,6 +6078,12 @@ enum tcpc_cc_polarity {
#define PD_STATUS_EVENT_SOP_DISC_DONE BIT(0)
#define PD_STATUS_EVENT_SOP_PRIME_DISC_DONE BIT(1)
#define PD_STATUS_EVENT_HARD_RESET BIT(2)
+#define PD_STATUS_EVENT_DISCONNECTED BIT(3)
+#define PD_STATUS_EVENT_MUX_0_SET_DONE BIT(4)
+#define PD_STATUS_EVENT_MUX_1_SET_DONE BIT(5)
+#define PD_STATUS_EVENT_VDM_REQ_REPLY BIT(6)
+#define PD_STATUS_EVENT_VDM_REQ_FAILED BIT(7)
+#define PD_STATUS_EVENT_VDM_ATTENTION BIT(8)
struct ec_params_typec_status {
uint8_t port;
@@ -5723,6 +6127,60 @@ struct ec_response_typec_status {
uint32_t sink_cap_pdos[7]; /* Max 7 PDOs can be present */
} __ec_align1;
+/*
+ * Gather the response to the most recent VDM REQ from the AP, as well
+ * as popping the oldest VDM:Attention from the DPM queue
+ */
+#define EC_CMD_TYPEC_VDM_RESPONSE 0x013C
+
+struct ec_params_typec_vdm_response {
+ uint8_t port;
+} __ec_align1;
+
+struct ec_response_typec_vdm_response {
+ /* Number of 32-bit fields filled in */
+ uint8_t vdm_data_objects;
+ /* Partner to address - see enum typec_partner_type */
+ uint8_t partner_type;
+ /* enum ec_status describing VDM response */
+ uint16_t vdm_response_err;
+ /* VDM data, including VDM header */
+ uint32_t vdm_response[VDO_MAX_SIZE];
+ /* Number of 32-bit Attention fields filled in */
+ uint8_t vdm_attention_objects;
+ /* Number of remaining messages to consume */
+ uint8_t vdm_attention_left;
+ /* Reserved */
+ uint16_t reserved1;
+ /* VDM:Attention contents */
+ uint32_t vdm_attention[2];
+} __ec_align1;
+
+#undef VDO_MAX_SIZE
+
+/*
+ * UCSI OPM-PPM commands
+ *
+ * These commands are used for communication between OPM and PPM.
+ * Only UCSI3.0 is tested.
+ */
+
+#define EC_CMD_UCSI_PPM_SET 0x0140
+
+/* The data size is stored in the host command protocol header. */
+struct ec_params_ucsi_ppm_set {
+ uint16_t offset;
+ uint8_t data[];
+} __ec_align2;
+
+#define EC_CMD_UCSI_PPM_GET 0x0141
+
+/* For 'GET' sub-commands, data will be returned as a raw payload. */
+struct ec_params_ucsi_ppm_get {
+ uint16_t offset;
+ uint8_t size;
+} __ec_align2;
+
/*****************************************************************************/
/* The command range 0x200-0x2FF is reserved for Rotor. */
diff --git a/include/linux/platform_data/cros_ec_proto.h b/include/linux/platform_data/cros_ec_proto.h
index 02599687770c..de14923720a5 100644
--- a/include/linux/platform_data/cros_ec_proto.h
+++ b/include/linux/platform_data/cros_ec_proto.h
@@ -9,6 +9,7 @@
#define __LINUX_CROS_EC_PROTO_H
#include <linux/device.h>
+#include <linux/lockdep_types.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
@@ -21,6 +22,9 @@
#define CROS_EC_DEV_SCP_NAME "cros_scp"
#define CROS_EC_DEV_TP_NAME "cros_tp"
+#define CROS_EC_DEV_EC_INDEX 0
+#define CROS_EC_DEV_PD_INDEX 1
+
/*
* The EC is unresponsive for a time after a reboot command. Add a
* simple delay to make sure that the bus stays locked.
@@ -29,15 +33,33 @@
/*
* Max bus-specific overhead incurred by request/responses.
- * I2C requires 1 additional byte for requests.
- * I2C requires 2 additional bytes for responses.
- * SPI requires up to 32 additional bytes for responses.
+ *
+ * Request:
+ * - I2C requires 1 byte (see struct ec_host_request_i2c).
+ * - ISHTP requires 4 bytes (see struct cros_ish_out_msg).
+ *
+ * Response:
+ * - I2C requires 2 bytes (see struct ec_host_response_i2c).
+ * - ISHTP requires 4 bytes (see struct cros_ish_in_msg).
+ * - SPI requires 32 bytes (see EC_MSG_PREAMBLE_COUNT).
*/
#define EC_PROTO_VERSION_UNKNOWN 0
-#define EC_MAX_REQUEST_OVERHEAD 1
+#define EC_MAX_REQUEST_OVERHEAD 4
#define EC_MAX_RESPONSE_OVERHEAD 32
/*
+ * ACPI notify value for MKBP host event.
+ */
+#define ACPI_NOTIFY_CROS_EC_MKBP 0x80
+
+/*
+ * EC panic is not covered by the standard (0-F) ACPI notify values.
+ * Arbitrarily choosing B0 to notify ec panic, which is in the 84-BF
+ * device specific ACPI notify range.
+ */
+#define ACPI_NOTIFY_CROS_EC_PANIC 0xB0
+
+/*
* Command interface between EC and AP, for LPC, I2C and SPI interfaces.
*/
enum {
@@ -76,8 +98,6 @@ struct cros_ec_command {
* struct cros_ec_device - Information about a ChromeOS EC device.
* @phys_name: Name of physical comms layer (e.g. 'i2c-4').
* @dev: Device pointer for physical comms device
- * @was_wake_device: True if this device was set to wake the system from
- * sleep at the last suspend.
* @cros_class: The class structure for this device.
* @cmd_readmem: Direct read of the EC memory-mapped region, if supported.
* @offset: Is within EC_LPC_ADDR_MEMMAP region.
@@ -108,12 +128,15 @@ struct cros_ec_command {
* @dout_size: Size of dout buffer to allocate (zero to use static dout).
* @wake_enabled: True if this device can wake the system from sleep.
* @suspended: True if this device had been suspended.
+ * @registered: True if this device had been registered.
* @cmd_xfer: Send command to EC and get response.
* Returns the number of bytes received if the communication
* succeeded, but that doesn't mean the EC was happy with the
* command. The caller should check msg.result for the EC's result
* code.
* @pkt_xfer: Send packet to EC and get response.
+ * @lockdep_key: Lockdep class for each instance. Unused if CONFIG_LOCKDEP is
+ * not enabled.
* @lock: One transaction at a time.
* @mkbp_event_supported: 0 if MKBP not supported. Otherwise its value is
* the maximum supported version of the MKBP host event
@@ -123,6 +146,15 @@ struct cros_ec_command {
* @event_data: Raw payload transferred with the MKBP event.
* @event_size: Size in bytes of the event data.
* @host_event_wake_mask: Mask of host events that cause wake from suspend.
+ * @suspend_timeout_ms: The timeout in milliseconds between when sleep event
+ * is received and when the EC will declare sleep
+ * transition failure if the sleep signal is not
+ * asserted. See also struct
+ * ec_params_host_sleep_event_v1 in cros_ec_commands.h.
+ * @last_resume_result: The number of sleep power signal transitions that
+ * occurred since the suspend message. The high bit
+ * indicates a timeout occurred. See also struct
+ * ec_response_host_sleep_event_v1 in cros_ec_commands.h.
* @last_event_time: exact time from the hard irq when we got notified of
* a new event.
* @notifier_ready: The notifier_block to let the kernel re-query EC
@@ -132,12 +164,12 @@ struct cros_ec_command {
* main EC.
* @pd: The platform_device used by the mfd driver to interface with the
* PD behind an EC.
+ * @panic_notifier: EC panic notifier.
*/
struct cros_ec_device {
/* These are used by other drivers that want to talk to the EC */
const char *phys_name;
struct device *dev;
- bool was_wake_device;
struct class *cros_class;
int (*cmd_readmem)(struct cros_ec_device *ec, unsigned int offset,
unsigned int bytes, void *dest);
@@ -155,25 +187,30 @@ struct cros_ec_device {
int dout_size;
bool wake_enabled;
bool suspended;
+ bool registered;
int (*cmd_xfer)(struct cros_ec_device *ec,
struct cros_ec_command *msg);
int (*pkt_xfer)(struct cros_ec_device *ec,
struct cros_ec_command *msg);
+ struct lock_class_key lockdep_key;
struct mutex lock;
u8 mkbp_event_supported;
bool host_sleep_v1;
struct blocking_notifier_head event_notifier;
- struct ec_response_get_next_event_v1 event_data;
+ struct ec_response_get_next_event_v3 event_data;
int event_size;
u32 host_event_wake_mask;
u32 last_resume_result;
+ u16 suspend_timeout_ms;
ktime_t last_event_time;
struct notifier_block notifier_ready;
/* The platform devices used by the mfd driver */
struct platform_device *ec;
struct platform_device *pd;
+
+ struct blocking_notifier_head panic_notifier;
};
/**
@@ -205,7 +242,7 @@ struct cros_ec_dev {
struct cros_ec_debugfs *debug_info;
bool has_kb_wake_angle;
u16 cmd_offset;
- u32 features[2];
+ struct ec_response_get_features features;
};
#define to_cros_ec_dev(dev) container_of(dev, struct cros_ec_dev, class_dev)
@@ -216,9 +253,14 @@ int cros_ec_prepare_tx(struct cros_ec_device *ec_dev,
int cros_ec_check_result(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg);
+int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg);
+
int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg);
+int cros_ec_rwsig_continue(struct cros_ec_device *ec_dev);
+
int cros_ec_query_all(struct cros_ec_device *ec_dev);
int cros_ec_get_next_event(struct cros_ec_device *ec_dev,
@@ -227,10 +269,19 @@ int cros_ec_get_next_event(struct cros_ec_device *ec_dev,
u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev);
-int cros_ec_check_features(struct cros_ec_dev *ec, int feature);
+bool cros_ec_check_features(struct cros_ec_dev *ec, int feature);
int cros_ec_get_sensor_count(struct cros_ec_dev *ec);
+int cros_ec_cmd(struct cros_ec_device *ec_dev, unsigned int version, int command, const void *outdata,
+ size_t outsize, void *indata, size_t insize);
+
+int cros_ec_cmd_readmem(struct cros_ec_device *ec_dev, u8 offset, u8 size, void *dest);
+
+int cros_ec_get_cmd_versions(struct cros_ec_device *ec_dev, u16 cmd);
+
+bool cros_ec_device_registered(struct cros_ec_device *ec_dev);
+
/**
* cros_ec_get_time_ns() - Return time in ns.
*
diff --git a/include/linux/platform_data/cyttsp4.h b/include/linux/platform_data/cyttsp4.h
deleted file mode 100644
index 5dc9d2be384b..000000000000
--- a/include/linux/platform_data/cyttsp4.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Header file for:
- * Cypress TrueTouch(TM) Standard Product (TTSP) touchscreen drivers.
- * For use with Cypress Txx3xx parts.
- * Supported parts include:
- * CY8CTST341
- * CY8CTMA340
- *
- * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc.
- * Copyright (C) 2012 Javier Martinez Canillas <javier@dowhile0.org>
- *
- * Contact Cypress Semiconductor at www.cypress.com (kev@cypress.com)
- */
-#ifndef _CYTTSP4_H_
-#define _CYTTSP4_H_
-
-#define CYTTSP4_MT_NAME "cyttsp4_mt"
-#define CYTTSP4_I2C_NAME "cyttsp4_i2c_adapter"
-#define CYTTSP4_SPI_NAME "cyttsp4_spi_adapter"
-
-#define CY_TOUCH_SETTINGS_MAX 32
-
-struct touch_framework {
- const uint16_t *abs;
- uint8_t size;
- uint8_t enable_vkeys;
-} __packed;
-
-struct cyttsp4_mt_platform_data {
- struct touch_framework *frmwrk;
- unsigned short flags;
- char const *inp_dev_name;
-};
-
-struct touch_settings {
- const uint8_t *data;
- uint32_t size;
- uint8_t tag;
-} __packed;
-
-struct cyttsp4_core_platform_data {
- int irq_gpio;
- int rst_gpio;
- int level_irq_udelay;
- int (*xres)(struct cyttsp4_core_platform_data *pdata,
- struct device *dev);
- int (*init)(struct cyttsp4_core_platform_data *pdata,
- int on, struct device *dev);
- int (*power)(struct cyttsp4_core_platform_data *pdata,
- int on, struct device *dev, atomic_t *ignore_irq);
- int (*irq_stat)(struct cyttsp4_core_platform_data *pdata,
- struct device *dev);
- struct touch_settings *sett[CY_TOUCH_SETTINGS_MAX];
-};
-
-struct cyttsp4_platform_data {
- struct cyttsp4_core_platform_data *core_pdata;
- struct cyttsp4_mt_platform_data *mt_pdata;
-};
-
-#endif /* _CYTTSP4_H_ */
diff --git a/include/linux/platform_data/davinci-cpufreq.h b/include/linux/platform_data/davinci-cpufreq.h
index bc208c64e3d7..1ef91c36f609 100644
--- a/include/linux/platform_data/davinci-cpufreq.h
+++ b/include/linux/platform_data/davinci-cpufreq.h
@@ -16,4 +16,10 @@ struct davinci_cpufreq_config {
int (*init)(void);
};
+#ifdef CONFIG_CPU_FREQ
+int davinci_cpufreq_init(void);
+#else
+static inline int davinci_cpufreq_init(void) { return 0; }
+#endif
+
#endif /* _MACH_DAVINCI_CPUFREQ_H */
diff --git a/include/linux/platform_data/davinci_asp.h b/include/linux/platform_data/davinci_asp.h
index 5d1fb0d78a22..b9c8520b4bd3 100644
--- a/include/linux/platform_data/davinci_asp.h
+++ b/include/linux/platform_data/davinci_asp.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* TI DaVinci Audio Serial Port support
*
* Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DAVINCI_ASP_H
@@ -34,16 +26,6 @@ struct davinci_mcasp_pdata {
struct gen_pool *sram_pool;
/*
- * If McBSP peripheral gets the clock from an external pin,
- * there are three chooses, that are MCBSP_CLKX, MCBSP_CLKR
- * and MCBSP_CLKS.
- * Depending on different hardware connections it is possible
- * to use this setting to change the behaviour of McBSP
- * driver.
- */
- int clk_input_pin;
-
- /*
* This flag works when both clock and FS are outputs for the cpu
* and makes clock more accurate (FS is not symmetrical and the
* clock is very fast.
@@ -96,11 +78,7 @@ enum {
MCASP_VERSION_2, /* DA8xx/OMAPL1x */
MCASP_VERSION_3, /* TI81xx/AM33xx */
MCASP_VERSION_4, /* DRA7xxx */
-};
-
-enum mcbsp_clk_input_pin {
- MCBSP_CLKR = 0, /* as in DM365 */
- MCBSP_CLKS,
+ MCASP_VERSION_OMAP, /* OMAP4/5 */
};
#define INACTIVE_MODE 0
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h
index b34a094b2258..860ba4bc5ead 100644
--- a/include/linux/platform_data/dma-dw.h
+++ b/include/linux/platform_data/dma-dw.h
@@ -41,36 +41,39 @@ struct dw_dma_slave {
/**
* struct dw_dma_platform_data - Controller configuration parameters
+ * @nr_masters: Number of AHB masters supported by the controller
* @nr_channels: Number of channels supported by hardware (max 8)
* @chan_allocation_order: Allocate channels starting from 0 or 7
* @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0.
* @block_size: Maximum block size supported by the controller
- * @nr_masters: Number of AHB masters supported by the controller
* @data_width: Maximum data width supported by hardware per AHB master
* (in bytes, power of 2)
* @multi_block: Multi block transfers supported by hardware per channel.
* @max_burst: Maximum value of burst transaction size supported by hardware
* per channel (in units of CTL.SRC_TR_WIDTH/CTL.DST_TR_WIDTH).
* @protctl: Protection control signals setting per channel.
+ * @quirks: Optional platform quirks.
*/
struct dw_dma_platform_data {
- unsigned int nr_channels;
+ u32 nr_masters;
+ u32 nr_channels;
#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */
#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */
- unsigned char chan_allocation_order;
+ u32 chan_allocation_order;
#define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */
#define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */
- unsigned char chan_priority;
- unsigned int block_size;
- unsigned char nr_masters;
- unsigned char data_width[DW_DMA_MAX_NR_MASTERS];
- unsigned char multi_block[DW_DMA_MAX_NR_CHANNELS];
+ u32 chan_priority;
+ u32 block_size;
+ u32 data_width[DW_DMA_MAX_NR_MASTERS];
+ u32 multi_block[DW_DMA_MAX_NR_CHANNELS];
u32 max_burst[DW_DMA_MAX_NR_CHANNELS];
#define CHAN_PROTCTL_PRIVILEGED BIT(0)
#define CHAN_PROTCTL_BUFFERABLE BIT(1)
#define CHAN_PROTCTL_CACHEABLE BIT(2)
#define CHAN_PROTCTL_MASK GENMASK(2, 0)
- unsigned char protctl;
+ u32 protctl;
+#define DW_DMA_QUIRK_XBAR_PRESENT BIT(0)
+ u32 quirks;
};
#endif /* _PLATFORM_DATA_DMA_DW_H */
diff --git a/include/linux/platform_data/dma-ep93xx.h b/include/linux/platform_data/dma-ep93xx.h
deleted file mode 100644
index eb9805bb3fe8..000000000000
--- a/include/linux/platform_data/dma-ep93xx.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_ARCH_DMA_H
-#define __ASM_ARCH_DMA_H
-
-#include <linux/types.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-
-/*
- * M2P channels.
- *
- * Note that these values are also directly used for setting the PPALLOC
- * register.
- */
-#define EP93XX_DMA_I2S1 0
-#define EP93XX_DMA_I2S2 1
-#define EP93XX_DMA_AAC1 2
-#define EP93XX_DMA_AAC2 3
-#define EP93XX_DMA_AAC3 4
-#define EP93XX_DMA_I2S3 5
-#define EP93XX_DMA_UART1 6
-#define EP93XX_DMA_UART2 7
-#define EP93XX_DMA_UART3 8
-#define EP93XX_DMA_IRDA 9
-/* M2M channels */
-#define EP93XX_DMA_SSP 10
-#define EP93XX_DMA_IDE 11
-
-/**
- * struct ep93xx_dma_data - configuration data for the EP93xx dmaengine
- * @port: peripheral which is requesting the channel
- * @direction: TX/RX channel
- * @name: optional name for the channel, this is displayed in /proc/interrupts
- *
- * This information is passed as private channel parameter in a filter
- * function. Note that this is only needed for slave/cyclic channels. For
- * memcpy channels %NULL data should be passed.
- */
-struct ep93xx_dma_data {
- int port;
- enum dma_transfer_direction direction;
- const char *name;
-};
-
-/**
- * struct ep93xx_dma_chan_data - platform specific data for a DMA channel
- * @name: name of the channel, used for getting the right clock for the channel
- * @base: mapped registers
- * @irq: interrupt number used by this channel
- */
-struct ep93xx_dma_chan_data {
- const char *name;
- void __iomem *base;
- int irq;
-};
-
-/**
- * struct ep93xx_dma_platform_data - platform data for the dmaengine driver
- * @channels: array of channels which are passed to the driver
- * @num_channels: number of channels in the array
- *
- * This structure is passed to the DMA engine driver via platform data. For
- * M2P channels, contract is that even channels are for TX and odd for RX.
- * There is no requirement for the M2M channels.
- */
-struct ep93xx_dma_platform_data {
- struct ep93xx_dma_chan_data *channels;
- size_t num_channels;
-};
-
-static inline bool ep93xx_dma_chan_is_m2p(struct dma_chan *chan)
-{
- return !strcmp(dev_name(chan->device->dev), "ep93xx-dma-m2p");
-}
-
-/**
- * ep93xx_dma_chan_direction - returns direction the channel can be used
- * @chan: channel
- *
- * This function can be used in filter functions to find out whether the
- * channel supports given DMA direction. Only M2P channels have such
- * limitation, for M2M channels the direction is configurable.
- */
-static inline enum dma_transfer_direction
-ep93xx_dma_chan_direction(struct dma_chan *chan)
-{
- if (!ep93xx_dma_chan_is_m2p(chan))
- return DMA_TRANS_NONE;
-
- /* even channels are for TX, odd for RX */
- return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
-}
-
-#endif /* __ASM_ARCH_DMA_H */
diff --git a/include/linux/platform_data/dma-hsu.h b/include/linux/platform_data/dma-hsu.h
index c65b412b2b33..611bae193c1c 100644
--- a/include/linux/platform_data/dma-hsu.h
+++ b/include/linux/platform_data/dma-hsu.h
@@ -8,7 +8,7 @@
#ifndef _PLATFORM_DATA_DMA_HSU_H
#define _PLATFORM_DATA_DMA_HSU_H
-#include <linux/device.h>
+struct device;
struct hsu_dma_slave {
struct device *dma_dev;
diff --git a/include/linux/platform_data/dma-imx-sdma.h b/include/linux/platform_data/dma-imx-sdma.h
deleted file mode 100644
index 725602d9df91..000000000000
--- a/include/linux/platform_data/dma-imx-sdma.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __MACH_MXC_SDMA_H__
-#define __MACH_MXC_SDMA_H__
-
-/**
- * struct sdma_script_start_addrs - SDMA script start pointers
- *
- * start addresses of the different functions in the physical
- * address space of the SDMA engine.
- */
-struct sdma_script_start_addrs {
- s32 ap_2_ap_addr;
- s32 ap_2_bp_addr;
- s32 ap_2_ap_fixed_addr;
- s32 bp_2_ap_addr;
- s32 loopback_on_dsp_side_addr;
- s32 mcu_interrupt_only_addr;
- s32 firi_2_per_addr;
- s32 firi_2_mcu_addr;
- s32 per_2_firi_addr;
- s32 mcu_2_firi_addr;
- s32 uart_2_per_addr;
- s32 uart_2_mcu_addr;
- s32 per_2_app_addr;
- s32 mcu_2_app_addr;
- s32 per_2_per_addr;
- s32 uartsh_2_per_addr;
- s32 uartsh_2_mcu_addr;
- s32 per_2_shp_addr;
- s32 mcu_2_shp_addr;
- s32 ata_2_mcu_addr;
- s32 mcu_2_ata_addr;
- s32 app_2_per_addr;
- s32 app_2_mcu_addr;
- s32 shp_2_per_addr;
- s32 shp_2_mcu_addr;
- s32 mshc_2_mcu_addr;
- s32 mcu_2_mshc_addr;
- s32 spdif_2_mcu_addr;
- s32 mcu_2_spdif_addr;
- s32 asrc_2_mcu_addr;
- s32 ext_mem_2_ipu_addr;
- s32 descrambler_addr;
- s32 dptc_dvfs_addr;
- s32 utra_addr;
- s32 ram_code_start_addr;
- /* End of v1 array */
- s32 mcu_2_ssish_addr;
- s32 ssish_2_mcu_addr;
- s32 hdmi_dma_addr;
- /* End of v2 array */
- s32 zcanfd_2_mcu_addr;
- s32 zqspi_2_mcu_addr;
- s32 mcu_2_ecspi_addr;
- /* End of v3 array */
- s32 mcu_2_zqspi_addr;
- /* End of v4 array */
-};
-
-#endif /* __MACH_MXC_SDMA_H__ */
diff --git a/include/linux/platform_data/dma-mmp_tdma.h b/include/linux/platform_data/dma-mmp_tdma.h
deleted file mode 100644
index 8bec5484dc86..000000000000
--- a/include/linux/platform_data/dma-mmp_tdma.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * SRAM Memory Management
- *
- * Copyright (c) 2011 Marvell Semiconductors Inc.
- */
-
-#ifndef __DMA_MMP_TDMA_H
-#define __DMA_MMP_TDMA_H
-
-#include <linux/genalloc.h>
-
-/* ARBITRARY: SRAM allocations are multiples of this 2^N size */
-#define SRAM_GRANULARITY 512
-
-enum sram_type {
- MMP_SRAM_UNDEFINED = 0,
- MMP_ASRAM,
- MMP_ISRAM,
-};
-
-struct sram_platdata {
- char *pool_name;
- int granularity;
-};
-
-#ifdef CONFIG_MMP_SRAM
-extern struct gen_pool *sram_get_gpool(char *pool_name);
-#else
-static inline struct gen_pool *sram_get_gpool(char *pool_name)
-{
- return NULL;
-}
-#endif
-
-#endif /* __DMA_MMP_TDMA_H */
diff --git a/include/linux/platform_data/dma-s3c24xx.h b/include/linux/platform_data/dma-s3c24xx.h
deleted file mode 100644
index 96d02dbeea67..000000000000
--- a/include/linux/platform_data/dma-s3c24xx.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * S3C24XX DMA handling
- *
- * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
- */
-
-/* Helper to encode the source selection constraints for early s3c socs. */
-#define S3C24XX_DMA_CHANREQ(src, chan) ((BIT(3) | src) << chan * 4)
-
-enum s3c24xx_dma_bus {
- S3C24XX_DMA_APB,
- S3C24XX_DMA_AHB,
-};
-
-/**
- * @bus: on which bus does the peripheral reside - AHB or APB.
- * @handshake: is a handshake with the peripheral necessary
- * @chansel: channel selection information, depending on variant; reqsel for
- * s3c2443 and later and channel-selection map for earlier SoCs
- * see CHANSEL doc in s3c2443-dma.c
- */
-struct s3c24xx_dma_channel {
- enum s3c24xx_dma_bus bus;
- bool handshake;
- u16 chansel;
-};
-
-struct dma_slave_map;
-
-/**
- * struct s3c24xx_dma_platdata - platform specific settings
- * @num_phy_channels: number of physical channels
- * @channels: array of virtual channel descriptions
- * @num_channels: number of virtual channels
- * @slave_map: dma slave map matching table
- * @slavecnt: number of elements in slave_map
- */
-struct s3c24xx_dma_platdata {
- int num_phy_channels;
- struct s3c24xx_dma_channel *channels;
- int num_channels;
- const struct dma_slave_map *slave_map;
- int slavecnt;
-};
-
-struct dma_chan;
-bool s3c24xx_dma_filter(struct dma_chan *chan, void *param);
diff --git a/include/linux/platform_data/dma-ste-dma40.h b/include/linux/platform_data/dma-ste-dma40.h
deleted file mode 100644
index 10641633facc..000000000000
--- a/include/linux/platform_data/dma-ste-dma40.h
+++ /dev/null
@@ -1,209 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson SA 2007-2010
- * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
- * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
- */
-
-
-#ifndef STE_DMA40_H
-#define STE_DMA40_H
-
-#include <linux/dmaengine.h>
-#include <linux/scatterlist.h>
-#include <linux/workqueue.h>
-#include <linux/interrupt.h>
-
-/*
- * Maxium size for a single dma descriptor
- * Size is limited to 16 bits.
- * Size is in the units of addr-widths (1,2,4,8 bytes)
- * Larger transfers will be split up to multiple linked desc
- */
-#define STEDMA40_MAX_SEG_SIZE 0xFFFF
-
-/* dev types for memcpy */
-#define STEDMA40_DEV_DST_MEMORY (-1)
-#define STEDMA40_DEV_SRC_MEMORY (-1)
-
-enum stedma40_mode {
- STEDMA40_MODE_LOGICAL = 0,
- STEDMA40_MODE_PHYSICAL,
- STEDMA40_MODE_OPERATION,
-};
-
-enum stedma40_mode_opt {
- STEDMA40_PCHAN_BASIC_MODE = 0,
- STEDMA40_LCHAN_SRC_LOG_DST_LOG = 0,
- STEDMA40_PCHAN_MODULO_MODE,
- STEDMA40_PCHAN_DOUBLE_DST_MODE,
- STEDMA40_LCHAN_SRC_PHY_DST_LOG,
- STEDMA40_LCHAN_SRC_LOG_DST_PHY,
-};
-
-#define STEDMA40_ESIZE_8_BIT 0x0
-#define STEDMA40_ESIZE_16_BIT 0x1
-#define STEDMA40_ESIZE_32_BIT 0x2
-#define STEDMA40_ESIZE_64_BIT 0x3
-
-/* The value 4 indicates that PEN-reg shall be set to 0 */
-#define STEDMA40_PSIZE_PHY_1 0x4
-#define STEDMA40_PSIZE_PHY_2 0x0
-#define STEDMA40_PSIZE_PHY_4 0x1
-#define STEDMA40_PSIZE_PHY_8 0x2
-#define STEDMA40_PSIZE_PHY_16 0x3
-
-/*
- * The number of elements differ in logical and
- * physical mode
- */
-#define STEDMA40_PSIZE_LOG_1 STEDMA40_PSIZE_PHY_2
-#define STEDMA40_PSIZE_LOG_4 STEDMA40_PSIZE_PHY_4
-#define STEDMA40_PSIZE_LOG_8 STEDMA40_PSIZE_PHY_8
-#define STEDMA40_PSIZE_LOG_16 STEDMA40_PSIZE_PHY_16
-
-/* Maximum number of possible physical channels */
-#define STEDMA40_MAX_PHYS 32
-
-enum stedma40_flow_ctrl {
- STEDMA40_NO_FLOW_CTRL,
- STEDMA40_FLOW_CTRL,
-};
-
-/**
- * struct stedma40_half_channel_info - dst/src channel configuration
- *
- * @big_endian: true if the src/dst should be read as big endian
- * @data_width: Data width of the src/dst hardware
- * @p_size: Burst size
- * @flow_ctrl: Flow control on/off.
- */
-struct stedma40_half_channel_info {
- bool big_endian;
- enum dma_slave_buswidth data_width;
- int psize;
- enum stedma40_flow_ctrl flow_ctrl;
-};
-
-/**
- * struct stedma40_chan_cfg - Structure to be filled by client drivers.
- *
- * @dir: MEM 2 MEM, PERIPH 2 MEM , MEM 2 PERIPH, PERIPH 2 PERIPH
- * @high_priority: true if high-priority
- * @realtime: true if realtime mode is to be enabled. Only available on DMA40
- * version 3+, i.e DB8500v2+
- * @mode: channel mode: physical, logical, or operation
- * @mode_opt: options for the chosen channel mode
- * @dev_type: src/dst device type (driver uses dir to figure out which)
- * @src_info: Parameters for dst half channel
- * @dst_info: Parameters for dst half channel
- * @use_fixed_channel: if true, use physical channel specified by phy_channel
- * @phy_channel: physical channel to use, only if use_fixed_channel is true
- *
- * This structure has to be filled by the client drivers.
- * It is recommended to do all dma configurations for clients in the machine.
- *
- */
-struct stedma40_chan_cfg {
- enum dma_transfer_direction dir;
- bool high_priority;
- bool realtime;
- enum stedma40_mode mode;
- enum stedma40_mode_opt mode_opt;
- int dev_type;
- struct stedma40_half_channel_info src_info;
- struct stedma40_half_channel_info dst_info;
-
- bool use_fixed_channel;
- int phy_channel;
-};
-
-/**
- * struct stedma40_platform_data - Configuration struct for the dma device.
- *
- * @dev_tx: mapping between destination event line and io address
- * @dev_rx: mapping between source event line and io address
- * @disabled_channels: A vector, ending with -1, that marks physical channels
- * that are for different reasons not available for the driver.
- * @soft_lli_chans: A vector, that marks physical channels will use LLI by SW
- * which avoids HW bug that exists in some versions of the controller.
- * SoftLLI introduces relink overhead that could impact performace for
- * certain use cases.
- * @num_of_soft_lli_chans: The number of channels that needs to be configured
- * to use SoftLLI.
- * @use_esram_lcla: flag for mapping the lcla into esram region
- * @num_of_memcpy_chans: The number of channels reserved for memcpy.
- * @num_of_phy_chans: The number of physical channels implemented in HW.
- * 0 means reading the number of channels from DMA HW but this is only valid
- * for 'multiple of 4' channels, like 8.
- */
-struct stedma40_platform_data {
- int disabled_channels[STEDMA40_MAX_PHYS];
- int *soft_lli_chans;
- int num_of_soft_lli_chans;
- bool use_esram_lcla;
- int num_of_memcpy_chans;
- int num_of_phy_chans;
-};
-
-#ifdef CONFIG_STE_DMA40
-
-/**
- * stedma40_filter() - Provides stedma40_chan_cfg to the
- * ste_dma40 dma driver via the dmaengine framework.
- * does some checking of what's provided.
- *
- * Never directly called by client. It used by dmaengine.
- * @chan: dmaengine handle.
- * @data: Must be of type: struct stedma40_chan_cfg and is
- * the configuration of the framework.
- *
- *
- */
-
-bool stedma40_filter(struct dma_chan *chan, void *data);
-
-/**
- * stedma40_slave_mem() - Transfers a raw data buffer to or from a slave
- * (=device)
- *
- * @chan: dmaengine handle
- * @addr: source or destination physicall address.
- * @size: bytes to transfer
- * @direction: direction of transfer
- * @flags: is actually enum dma_ctrl_flags. See dmaengine.h
- */
-
-static inline struct
-dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
- dma_addr_t addr,
- unsigned int size,
- enum dma_transfer_direction direction,
- unsigned long flags)
-{
- struct scatterlist sg;
- sg_init_table(&sg, 1);
- sg.dma_address = addr;
- sg.length = size;
-
- return dmaengine_prep_slave_sg(chan, &sg, 1, direction, flags);
-}
-
-#else
-static inline bool stedma40_filter(struct dma_chan *chan, void *data)
-{
- return false;
-}
-
-static inline struct
-dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
- dma_addr_t addr,
- unsigned int size,
- enum dma_transfer_direction direction,
- unsigned long flags)
-{
- return NULL;
-}
-#endif
-
-#endif
diff --git a/include/linux/platform_data/dmtimer-omap.h b/include/linux/platform_data/dmtimer-omap.h
index 95d852aef130..726d89143842 100644
--- a/include/linux/platform_data/dmtimer-omap.h
+++ b/include/linux/platform_data/dmtimer-omap.h
@@ -36,9 +36,13 @@ struct omap_dm_timer_ops {
int (*set_pwm)(struct omap_dm_timer *timer, int def_on,
int toggle, int trigger, int autoreload);
int (*get_pwm_status)(struct omap_dm_timer *timer);
+ int (*set_cap)(struct omap_dm_timer *timer,
+ int autoreload, bool config_period);
+ int (*get_cap_status)(struct omap_dm_timer *timer);
int (*set_prescaler)(struct omap_dm_timer *timer, int prescaler);
unsigned int (*read_counter)(struct omap_dm_timer *timer);
+ unsigned int (*read_cap)(struct omap_dm_timer *timer, bool is_period);
int (*write_counter)(struct omap_dm_timer *timer,
unsigned int value);
unsigned int (*read_status)(struct omap_dm_timer *timer);
diff --git a/include/linux/platform_data/emc2305.h b/include/linux/platform_data/emc2305.h
new file mode 100644
index 000000000000..76043a97f975
--- /dev/null
+++ b/include/linux/platform_data/emc2305.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_PLATFORM_DATA_EMC2305__
+#define __LINUX_PLATFORM_DATA_EMC2305__
+
+#define EMC2305_PWM_MAX 5
+
+/**
+ * struct emc2305_platform_data - EMC2305 driver platform data
+ * @max_state: maximum cooling state of the cooling device;
+ * @pwm_num: number of active channels;
+ * @pwm_output_mask: PWM output mask
+ * @pwm_polarity_mask: PWM polarity mask
+ * @pwm_separate: separate PWM settings for every channel;
+ * @pwm_min: array of minimum PWM per channel;
+ * @pwm_freq: array of PWM frequency per channel
+ */
+struct emc2305_platform_data {
+ u8 max_state;
+ u8 pwm_num;
+ u8 pwm_output_mask;
+ u8 pwm_polarity_mask;
+ bool pwm_separate;
+ u8 pwm_min[EMC2305_PWM_MAX];
+ u16 pwm_freq[EMC2305_PWM_MAX];
+};
+
+#endif
diff --git a/include/linux/platform_data/eth-ep93xx.h b/include/linux/platform_data/eth-ep93xx.h
deleted file mode 100644
index 8eef637a804d..000000000000
--- a/include/linux/platform_data/eth-ep93xx.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_PLATFORM_DATA_ETH_EP93XX
-#define _LINUX_PLATFORM_DATA_ETH_EP93XX
-
-struct ep93xx_eth_data {
- unsigned char dev_addr[6];
- unsigned char phy_id;
-};
-
-#endif
diff --git a/include/linux/platform_data/eth_ixp4xx.h b/include/linux/platform_data/eth_ixp4xx.h
deleted file mode 100644
index 114b0940729f..000000000000
--- a/include/linux/platform_data/eth_ixp4xx.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __PLATFORM_DATA_ETH_IXP4XX
-#define __PLATFORM_DATA_ETH_IXP4XX
-
-#include <linux/types.h>
-
-#define IXP4XX_ETH_NPEA 0x00
-#define IXP4XX_ETH_NPEB 0x10
-#define IXP4XX_ETH_NPEC 0x20
-
-/* Information about built-in Ethernet MAC interfaces */
-struct eth_plat_info {
- u8 phy; /* MII PHY ID, 0 - 31 */
- u8 rxq; /* configurable, currently 0 - 31 only */
- u8 txreadyq;
- u8 hwaddr[6];
- u8 npe; /* NPE instance used by this interface */
- bool has_mdio; /* If this instance has an MDIO bus */
-};
-
-#endif
diff --git a/include/linux/platform_data/fb-s3c2410.h b/include/linux/platform_data/fb-s3c2410.h
deleted file mode 100644
index 10c11e6316d6..000000000000
--- a/include/linux/platform_data/fb-s3c2410.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2004 Arnaud Patard <arnaud.patard@rtp-net.org>
- *
- * Inspired by pxafb.h
-*/
-
-#ifndef __ASM_PLAT_FB_S3C2410_H
-#define __ASM_PLAT_FB_S3C2410_H __FILE__
-
-#include <linux/compiler_types.h>
-
-struct s3c2410fb_hw {
- unsigned long lcdcon1;
- unsigned long lcdcon2;
- unsigned long lcdcon3;
- unsigned long lcdcon4;
- unsigned long lcdcon5;
-};
-
-/* LCD description */
-struct s3c2410fb_display {
- /* LCD type */
- unsigned type;
-#define S3C2410_LCDCON1_DSCAN4 (0<<5)
-#define S3C2410_LCDCON1_STN4 (1<<5)
-#define S3C2410_LCDCON1_STN8 (2<<5)
-#define S3C2410_LCDCON1_TFT (3<<5)
-
-#define S3C2410_LCDCON1_TFT1BPP (8<<1)
-#define S3C2410_LCDCON1_TFT2BPP (9<<1)
-#define S3C2410_LCDCON1_TFT4BPP (10<<1)
-#define S3C2410_LCDCON1_TFT8BPP (11<<1)
-#define S3C2410_LCDCON1_TFT16BPP (12<<1)
-#define S3C2410_LCDCON1_TFT24BPP (13<<1)
-
- /* Screen size */
- unsigned short width;
- unsigned short height;
-
- /* Screen info */
- unsigned short xres;
- unsigned short yres;
- unsigned short bpp;
-
- unsigned pixclock; /* pixclock in picoseconds */
- unsigned short left_margin; /* value in pixels (TFT) or HCLKs (STN) */
- unsigned short right_margin; /* value in pixels (TFT) or HCLKs (STN) */
- unsigned short hsync_len; /* value in pixels (TFT) or HCLKs (STN) */
- unsigned short upper_margin; /* value in lines (TFT) or 0 (STN) */
- unsigned short lower_margin; /* value in lines (TFT) or 0 (STN) */
- unsigned short vsync_len; /* value in lines (TFT) or 0 (STN) */
-
- /* lcd configuration registers */
- unsigned long lcdcon5;
-#define S3C2410_LCDCON5_BPP24BL (1<<12)
-#define S3C2410_LCDCON5_FRM565 (1<<11)
-#define S3C2410_LCDCON5_INVVCLK (1<<10)
-#define S3C2410_LCDCON5_INVVLINE (1<<9)
-#define S3C2410_LCDCON5_INVVFRAME (1<<8)
-#define S3C2410_LCDCON5_INVVD (1<<7)
-#define S3C2410_LCDCON5_INVVDEN (1<<6)
-#define S3C2410_LCDCON5_INVPWREN (1<<5)
-#define S3C2410_LCDCON5_INVLEND (1<<4)
-#define S3C2410_LCDCON5_PWREN (1<<3)
-#define S3C2410_LCDCON5_ENLEND (1<<2)
-#define S3C2410_LCDCON5_BSWP (1<<1)
-#define S3C2410_LCDCON5_HWSWP (1<<0)
-};
-
-struct s3c2410fb_mach_info {
-
- struct s3c2410fb_display *displays; /* attached displays info */
- unsigned num_displays; /* number of defined displays */
- unsigned default_display;
-
- /* GPIOs */
-
- unsigned long gpcup;
- unsigned long gpcup_mask;
- unsigned long gpccon;
- unsigned long gpccon_mask;
- unsigned long gpdup;
- unsigned long gpdup_mask;
- unsigned long gpdcon;
- unsigned long gpdcon_mask;
-
- void __iomem * gpccon_reg;
- void __iomem * gpcup_reg;
- void __iomem * gpdcon_reg;
- void __iomem * gpdup_reg;
-
- /* lpc3600 control register */
- unsigned long lpcsel;
-};
-
-extern void s3c24xx_fb_set_platdata(struct s3c2410fb_mach_info *);
-
-#endif /* __ASM_PLAT_FB_S3C2410_H */
diff --git a/include/linux/platform_data/gpio-ath79.h b/include/linux/platform_data/gpio-ath79.h
deleted file mode 100644
index 3ea6dd942c27..000000000000
--- a/include/linux/platform_data/gpio-ath79.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Atheros AR7XXX/AR9XXX GPIO controller platform data
- *
- * Copyright (C) 2015 Alban Bedel <albeu@free.fr>
- */
-
-#ifndef __LINUX_PLATFORM_DATA_GPIO_ATH79_H
-#define __LINUX_PLATFORM_DATA_GPIO_ATH79_H
-
-struct ath79_gpio_platform_data {
- unsigned ngpios;
- bool oe_inverted;
-};
-
-#endif
diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h
deleted file mode 100644
index e182a46e609f..000000000000
--- a/include/linux/platform_data/gpio-davinci.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * DaVinci GPIO Platform Related Defines
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __DAVINCI_GPIO_PLATFORM_H
-#define __DAVINCI_GPIO_PLATFORM_H
-
-struct davinci_gpio_platform_data {
- bool no_auto_base;
- u32 base;
- u32 ngpio;
- u32 gpio_unbanked;
-};
-
-/* Convert GPIO signal to GPIO pin number */
-#define GPIO_TO_PIN(bank, gpio) (16 * (bank) + (gpio))
-
-#endif
diff --git a/include/linux/platform_data/gpio-dwapb.h b/include/linux/platform_data/gpio-dwapb.h
deleted file mode 100644
index 0aa5c6720259..000000000000
--- a/include/linux/platform_data/gpio-dwapb.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright(c) 2014 Intel Corporation.
- */
-
-#ifndef GPIO_DW_APB_H
-#define GPIO_DW_APB_H
-
-#define DWAPB_MAX_GPIOS 32
-
-struct dwapb_port_property {
- struct fwnode_handle *fwnode;
- unsigned int idx;
- unsigned int ngpio;
- unsigned int gpio_base;
- int irq[DWAPB_MAX_GPIOS];
- bool irq_shared;
-};
-
-struct dwapb_platform_data {
- struct dwapb_port_property *properties;
- unsigned int nports;
-};
-
-#endif
diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h
index f377817ce75c..cdd8cfb424f5 100644
--- a/include/linux/platform_data/gpio-omap.h
+++ b/include/linux/platform_data/gpio-omap.h
@@ -144,9 +144,6 @@
#define OMAP_MAX_GPIO_LINES 192
-#define OMAP_MPUIO(nr) (OMAP_MAX_GPIO_LINES + (nr))
-#define OMAP_GPIO_IS_MPUIO(nr) ((nr) >= OMAP_MAX_GPIO_LINES)
-
#ifndef __ASSEMBLER__
struct omap_gpio_reg_offs {
u16 revision;
diff --git a/include/linux/platform_data/gpio_backlight.h b/include/linux/platform_data/gpio_backlight.h
index 1a8b5b1946fe..323fbf5f7613 100644
--- a/include/linux/platform_data/gpio_backlight.h
+++ b/include/linux/platform_data/gpio_backlight.h
@@ -8,7 +8,7 @@
struct device;
struct gpio_backlight_platform_data {
- struct device *fbdev;
+ struct device *dev;
};
#endif
diff --git a/include/linux/platform_data/gpmc-omap.h b/include/linux/platform_data/gpmc-omap.h
index c9cc4e32435d..dcca6c5e23bb 100644
--- a/include/linux/platform_data/gpmc-omap.h
+++ b/include/linux/platform_data/gpmc-omap.h
@@ -136,6 +136,13 @@ struct gpmc_device_timings {
#define GPMC_MUX_AAD 1 /* Addr-Addr-Data multiplex */
#define GPMC_MUX_AD 2 /* Addr-Data multiplex */
+/* Wait pin polarity values */
+#define GPMC_WAITPINPOLARITY_INVALID UINT_MAX
+#define GPMC_WAITPINPOLARITY_ACTIVE_LOW 0
+#define GPMC_WAITPINPOLARITY_ACTIVE_HIGH 1
+
+#define GPMC_WAITPIN_INVALID UINT_MAX
+
struct gpmc_settings {
bool burst_wrap; /* enables wrap bursting */
bool burst_read; /* enables read page/burst mode */
@@ -149,6 +156,7 @@ struct gpmc_settings {
u32 device_width; /* device bus width (8 or 16 bit) */
u32 mux_add_data; /* multiplex address & data */
u32 wait_pin; /* wait-pin to be used */
+ u32 wait_pin_polarity;
};
/* Data for each chip select */
diff --git a/include/linux/platform_data/gsc_hwmon.h b/include/linux/platform_data/gsc_hwmon.h
index 281f499eda97..70e8a6bec0f6 100644
--- a/include/linux/platform_data/gsc_hwmon.h
+++ b/include/linux/platform_data/gsc_hwmon.h
@@ -29,18 +29,17 @@ struct gsc_hwmon_channel {
/**
* struct gsc_hwmon_platform_data - platform data for gsc_hwmon driver
- * @channels: pointer to array of gsc_hwmon_channel structures
- * describing channels
* @nchannels: number of elements in @channels array
* @vreference: voltage reference (mV)
* @resolution: ADC bit resolution
* @fan_base: register base for FAN controller
+ * @channels: array of gsc_hwmon_channel structures describing channels
*/
struct gsc_hwmon_platform_data {
- const struct gsc_hwmon_channel *channels;
int nchannels;
unsigned int resolution;
unsigned int vreference;
unsigned int fan_base;
+ struct gsc_hwmon_channel channels[] __counted_by(nchannels);
};
#endif
diff --git a/include/linux/platform_data/hirschmann-hellcreek.h b/include/linux/platform_data/hirschmann-hellcreek.h
index 6a000df5541f..8748680e9e3c 100644
--- a/include/linux/platform_data/hirschmann-hellcreek.h
+++ b/include/linux/platform_data/hirschmann-hellcreek.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
/*
* Hirschmann Hellcreek TSN switch platform data.
*
diff --git a/include/linux/platform_data/huawei-gaokun-ec.h b/include/linux/platform_data/huawei-gaokun-ec.h
new file mode 100644
index 000000000000..faa15d315128
--- /dev/null
+++ b/include/linux/platform_data/huawei-gaokun-ec.h
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Huawei Matebook E Go Embedded Controller
+ *
+ * Copyright (C) 2024-2025 Pengyu Luo <mitltlatltl@gmail.com>
+ */
+
+#ifndef __HUAWEI_GAOKUN_EC_H__
+#define __HUAWEI_GAOKUN_EC_H__
+
+#define GAOKUN_UCSI_CCI_SIZE 4
+#define GAOKUN_UCSI_MSGI_SIZE 16
+#define GAOKUN_UCSI_READ_SIZE (GAOKUN_UCSI_CCI_SIZE + GAOKUN_UCSI_MSGI_SIZE)
+#define GAOKUN_UCSI_WRITE_SIZE 24 /* 8B CTRL, 16B MSGO */
+
+#define GAOKUN_UCSI_NO_PORT_UPDATE (-1)
+
+#define GAOKUN_SMART_CHARGE_DATA_SIZE 4 /* mode, delay, start, end */
+
+/* -------------------------------------------------------------------------- */
+
+struct gaokun_ec;
+struct gaokun_ucsi_reg;
+struct notifier_block;
+
+#define GAOKUN_MOD_NAME "huawei_gaokun_ec"
+#define GAOKUN_DEV_PSY "psy"
+#define GAOKUN_DEV_UCSI "ucsi"
+
+/* -------------------------------------------------------------------------- */
+/* Common API */
+
+int gaokun_ec_register_notify(struct gaokun_ec *ec,
+ struct notifier_block *nb);
+void gaokun_ec_unregister_notify(struct gaokun_ec *ec,
+ struct notifier_block *nb);
+
+int gaokun_ec_read(struct gaokun_ec *ec, const u8 *req,
+ size_t resp_len, u8 *resp);
+int gaokun_ec_write(struct gaokun_ec *ec, const u8 *req);
+int gaokun_ec_read_byte(struct gaokun_ec *ec, const u8 *req, u8 *byte);
+
+/* -------------------------------------------------------------------------- */
+/* API for PSY */
+
+int gaokun_ec_psy_multi_read(struct gaokun_ec *ec, u8 reg,
+ size_t resp_len, u8 *resp);
+
+static inline int gaokun_ec_psy_read_byte(struct gaokun_ec *ec,
+ u8 reg, u8 *byte)
+{
+ return gaokun_ec_psy_multi_read(ec, reg, sizeof(*byte), byte);
+}
+
+static inline int gaokun_ec_psy_read_word(struct gaokun_ec *ec,
+ u8 reg, u16 *word)
+{
+ return gaokun_ec_psy_multi_read(ec, reg, sizeof(*word), (u8 *)word);
+}
+
+int gaokun_ec_psy_get_smart_charge(struct gaokun_ec *ec,
+ u8 resp[GAOKUN_SMART_CHARGE_DATA_SIZE]);
+int gaokun_ec_psy_set_smart_charge(struct gaokun_ec *ec,
+ const u8 req[GAOKUN_SMART_CHARGE_DATA_SIZE]);
+
+int gaokun_ec_psy_get_smart_charge_enable(struct gaokun_ec *ec, bool *on);
+int gaokun_ec_psy_set_smart_charge_enable(struct gaokun_ec *ec, bool on);
+
+/* -------------------------------------------------------------------------- */
+/* API for UCSI */
+
+int gaokun_ec_ucsi_read(struct gaokun_ec *ec, u8 resp[GAOKUN_UCSI_READ_SIZE]);
+int gaokun_ec_ucsi_write(struct gaokun_ec *ec,
+ const u8 req[GAOKUN_UCSI_WRITE_SIZE]);
+
+int gaokun_ec_ucsi_get_reg(struct gaokun_ec *ec, struct gaokun_ucsi_reg *ureg);
+int gaokun_ec_ucsi_pan_ack(struct gaokun_ec *ec, int port_id);
+
+#endif /* __HUAWEI_GAOKUN_EC_H__ */
diff --git a/include/linux/platform_data/hwmon-s3c.h b/include/linux/platform_data/hwmon-s3c.h
index 1707ad4147df..7d21e0c41037 100644
--- a/include/linux/platform_data/hwmon-s3c.h
+++ b/include/linux/platform_data/hwmon-s3c.h
@@ -33,14 +33,4 @@ struct s3c_hwmon_pdata {
struct s3c_hwmon_chcfg *in[8];
};
-/**
- * s3c_hwmon_set_platdata - Set platform data for S3C HWMON device
- * @pd: Platform data to register to device.
- *
- * Register the given platform data for use with the S3C HWMON device.
- * The call will copy the platform data, so the board definitions can
- * make the structure itself __initdata.
- */
-extern void __init s3c_hwmon_set_platdata(struct s3c_hwmon_pdata *pd);
-
#endif /* __HWMON_S3C_H__ */
diff --git a/include/linux/platform_data/i2c-davinci.h b/include/linux/platform_data/i2c-davinci.h
deleted file mode 100644
index 98967df07468..000000000000
--- a/include/linux/platform_data/i2c-davinci.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * DaVinci I2C controller platform_device info
- *
- * Author: Vladimir Barinov, MontaVista Software, Inc. <source@mvista.com>
- *
- * 2007 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
-*/
-
-#ifndef __ASM_ARCH_I2C_H
-#define __ASM_ARCH_I2C_H
-
-/* All frequencies are expressed in kHz */
-struct davinci_i2c_platform_data {
- unsigned int bus_freq; /* standard bus frequency (kHz) */
- unsigned int bus_delay; /* post-transaction delay (usec) */
- bool gpio_recovery; /* Use GPIO recovery method */
- bool has_pfunc; /* Chip has a ICPFUNC register */
-};
-
-/* for board setup code */
-void davinci_init_i2c(struct davinci_i2c_platform_data *);
-
-#endif /* __ASM_ARCH_I2C_H */
diff --git a/include/linux/platform_data/i2c-gpio.h b/include/linux/platform_data/i2c-gpio.h
index a907774fd177..545639bcca72 100644
--- a/include/linux/platform_data/i2c-gpio.h
+++ b/include/linux/platform_data/i2c-gpio.h
@@ -16,16 +16,25 @@
* isn't actively driven high when setting the output value high.
* gpio_get_value() must return the actual pin state even if the
* pin is configured as an output.
+ * @sda_is_output_only: SDA output drivers can't be turned off.
+ * This is for clients that can only read SDA/SCL.
+ * @sda_has_no_pullup: SDA is used in a non-compliant way and has no pull-up.
+ * Therefore disable open-drain.
* @scl_is_open_drain: SCL is set up as open drain. Same requirements
* as for sda_is_open_drain apply.
* @scl_is_output_only: SCL output drivers cannot be turned off.
+ * @scl_has_no_pullup: SCL is used in a non-compliant way and has no pull-up.
+ * Therefore disable open-drain.
*/
struct i2c_gpio_platform_data {
int udelay;
int timeout;
unsigned int sda_is_open_drain:1;
+ unsigned int sda_is_output_only:1;
+ unsigned int sda_has_no_pullup:1;
unsigned int scl_is_open_drain:1;
unsigned int scl_is_output_only:1;
+ unsigned int scl_has_no_pullup:1;
};
#endif /* _LINUX_I2C_GPIO_H */
diff --git a/include/linux/platform_data/i2c-mux-gpio.h b/include/linux/platform_data/i2c-mux-gpio.h
index 5e4c2c272a73..96843aab4d1e 100644
--- a/include/linux/platform_data/i2c-mux-gpio.h
+++ b/include/linux/platform_data/i2c-mux-gpio.h
@@ -18,16 +18,16 @@
* @values: Array of bitmasks of GPIO settings (low/high) for each
* position
* @n_values: Number of multiplexer positions (busses to instantiate)
- * @classes: Optional I2C auto-detection classes
* @idle: Bitmask to write to MUX when idle or GPIO_I2CMUX_NO_IDLE if not used
+ * @settle_time: Delay to wait when a new bus is selected
*/
struct i2c_mux_gpio_platform_data {
int parent;
int base_nr;
const unsigned *values;
int n_values;
- const unsigned *classes;
unsigned idle;
+ u32 settle_time;
};
#endif /* _LINUX_I2C_MUX_GPIO_H */
diff --git a/include/linux/platform_data/i2c-mux-reg.h b/include/linux/platform_data/i2c-mux-reg.h
index 2543c2a1c9ae..e2e895768311 100644
--- a/include/linux/platform_data/i2c-mux-reg.h
+++ b/include/linux/platform_data/i2c-mux-reg.h
@@ -17,7 +17,6 @@
* @n_values: Number of multiplexer channels
* @little_endian: Indicating if the register is in little endian
* @write_only: Reading the register is not allowed by hardware
- * @classes: Optional I2C auto-detection classes
* @idle: Value to write to mux when idle
* @idle_in_use: indicate if idle value is in use
* @reg: Virtual address of the register to switch channel
@@ -30,7 +29,6 @@ struct i2c_mux_reg_platform_data {
int n_values;
bool little_endian;
bool write_only;
- const unsigned int *classes;
u32 idle;
bool idle_in_use;
void __iomem *reg;
diff --git a/include/linux/platform_data/irda-pxaficp.h b/include/linux/platform_data/irda-pxaficp.h
deleted file mode 100644
index bd35ddcf3068..000000000000
--- a/include/linux/platform_data/irda-pxaficp.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ASMARM_ARCH_IRDA_H
-#define ASMARM_ARCH_IRDA_H
-
-/* board specific transceiver capabilities */
-
-#define IR_OFF 1
-#define IR_SIRMODE 2
-#define IR_FIRMODE 4
-
-struct pxaficp_platform_data {
- int transceiver_cap;
- void (*transceiver_mode)(struct device *dev, int mode);
- int (*startup)(struct device *dev);
- void (*shutdown)(struct device *dev);
- int gpio_pwdown; /* powerdown GPIO for the IrDA chip */
- bool gpio_pwdown_inverted; /* gpio_pwdown is inverted */
-};
-
-extern void pxa_set_ficp_info(struct pxaficp_platform_data *info);
-
-#if defined(CONFIG_PXA25x) || defined(CONFIG_PXA27x)
-void pxa2xx_transceiver_mode(struct device *dev, int mode);
-#endif
-
-#endif
diff --git a/include/linux/platform_data/irda-sa11x0.h b/include/linux/platform_data/irda-sa11x0.h
deleted file mode 100644
index 7db59c917575..000000000000
--- a/include/linux/platform_data/irda-sa11x0.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * arch/arm/include/asm/mach/irda.h
- *
- * Copyright (C) 2004 Russell King.
- */
-#ifndef __ASM_ARM_MACH_IRDA_H
-#define __ASM_ARM_MACH_IRDA_H
-
-struct irda_platform_data {
- int (*startup)(struct device *);
- void (*shutdown)(struct device *);
- int (*set_power)(struct device *, unsigned int state);
- void (*set_speed)(struct device *, unsigned int speed);
-};
-
-#endif
diff --git a/include/linux/platform_data/keyboard-pxa930_rotary.h b/include/linux/platform_data/keyboard-pxa930_rotary.h
deleted file mode 100644
index 3271aa01cbe8..000000000000
--- a/include/linux/platform_data/keyboard-pxa930_rotary.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_ARCH_PXA930_ROTARY_H
-#define __ASM_ARCH_PXA930_ROTARY_H
-
-/* NOTE:
- *
- * rotary can be either interpreted as a ralative input event (e.g.
- * REL_WHEEL or REL_HWHEEL) or a specific key event (e.g. UP/DOWN
- * or LEFT/RIGHT), depending on if up_key & down_key are assigned
- * or rel_code is assigned a non-zero value. When all are non-zero,
- * up_key and down_key will be preferred.
- */
-struct pxa930_rotary_platform_data {
- int up_key;
- int down_key;
- int rel_code;
-};
-
-void __init pxa930_set_rotarykey_info(struct pxa930_rotary_platform_data *info);
-
-#endif /* __ASM_ARCH_PXA930_ROTARY_H */
diff --git a/include/linux/platform_data/keyboard-spear.h b/include/linux/platform_data/keyboard-spear.h
deleted file mode 100644
index 5e3ff653900c..000000000000
--- a/include/linux/platform_data/keyboard-spear.h
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Copyright (C) 2010 ST Microelectronics
- * Rajeev Kumar <rajeevkumar.linux@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __PLAT_KEYBOARD_H
-#define __PLAT_KEYBOARD_H
-
-#include <linux/bitops.h>
-#include <linux/input.h>
-#include <linux/input/matrix_keypad.h>
-#include <linux/types.h>
-
-#define DECLARE_9x9_KEYMAP(_name) \
-int _name[] = { \
- KEY(0, 0, KEY_ESC), \
- KEY(0, 1, KEY_1), \
- KEY(0, 2, KEY_2), \
- KEY(0, 3, KEY_3), \
- KEY(0, 4, KEY_4), \
- KEY(0, 5, KEY_5), \
- KEY(0, 6, KEY_6), \
- KEY(0, 7, KEY_7), \
- KEY(0, 8, KEY_8), \
- KEY(1, 0, KEY_9), \
- KEY(1, 1, KEY_MINUS), \
- KEY(1, 2, KEY_EQUAL), \
- KEY(1, 3, KEY_BACKSPACE), \
- KEY(1, 4, KEY_TAB), \
- KEY(1, 5, KEY_Q), \
- KEY(1, 6, KEY_W), \
- KEY(1, 7, KEY_E), \
- KEY(1, 8, KEY_R), \
- KEY(2, 0, KEY_T), \
- KEY(2, 1, KEY_Y), \
- KEY(2, 2, KEY_U), \
- KEY(2, 3, KEY_I), \
- KEY(2, 4, KEY_O), \
- KEY(2, 5, KEY_P), \
- KEY(2, 6, KEY_LEFTBRACE), \
- KEY(2, 7, KEY_RIGHTBRACE), \
- KEY(2, 8, KEY_ENTER), \
- KEY(3, 0, KEY_LEFTCTRL), \
- KEY(3, 1, KEY_A), \
- KEY(3, 2, KEY_S), \
- KEY(3, 3, KEY_D), \
- KEY(3, 4, KEY_F), \
- KEY(3, 5, KEY_G), \
- KEY(3, 6, KEY_H), \
- KEY(3, 7, KEY_J), \
- KEY(3, 8, KEY_K), \
- KEY(4, 0, KEY_L), \
- KEY(4, 1, KEY_SEMICOLON), \
- KEY(4, 2, KEY_APOSTROPHE), \
- KEY(4, 3, KEY_GRAVE), \
- KEY(4, 4, KEY_LEFTSHIFT), \
- KEY(4, 5, KEY_BACKSLASH), \
- KEY(4, 6, KEY_Z), \
- KEY(4, 7, KEY_X), \
- KEY(4, 8, KEY_C), \
- KEY(5, 0, KEY_V), \
- KEY(5, 1, KEY_B), \
- KEY(5, 2, KEY_N), \
- KEY(5, 3, KEY_M), \
- KEY(5, 4, KEY_COMMA), \
- KEY(5, 5, KEY_DOT), \
- KEY(5, 6, KEY_SLASH), \
- KEY(5, 7, KEY_RIGHTSHIFT), \
- KEY(5, 8, KEY_KPASTERISK), \
- KEY(6, 0, KEY_LEFTALT), \
- KEY(6, 1, KEY_SPACE), \
- KEY(6, 2, KEY_CAPSLOCK), \
- KEY(6, 3, KEY_F1), \
- KEY(6, 4, KEY_F2), \
- KEY(6, 5, KEY_F3), \
- KEY(6, 6, KEY_F4), \
- KEY(6, 7, KEY_F5), \
- KEY(6, 8, KEY_F6), \
- KEY(7, 0, KEY_F7), \
- KEY(7, 1, KEY_F8), \
- KEY(7, 2, KEY_F9), \
- KEY(7, 3, KEY_F10), \
- KEY(7, 4, KEY_NUMLOCK), \
- KEY(7, 5, KEY_SCROLLLOCK), \
- KEY(7, 6, KEY_KP7), \
- KEY(7, 7, KEY_KP8), \
- KEY(7, 8, KEY_KP9), \
- KEY(8, 0, KEY_KPMINUS), \
- KEY(8, 1, KEY_KP4), \
- KEY(8, 2, KEY_KP5), \
- KEY(8, 3, KEY_KP6), \
- KEY(8, 4, KEY_KPPLUS), \
- KEY(8, 5, KEY_KP1), \
- KEY(8, 6, KEY_KP2), \
- KEY(8, 7, KEY_KP3), \
- KEY(8, 8, KEY_KP0), \
-}
-
-#define DECLARE_6x6_KEYMAP(_name) \
-int _name[] = { \
- KEY(0, 0, KEY_RESERVED), \
- KEY(0, 1, KEY_1), \
- KEY(0, 2, KEY_2), \
- KEY(0, 3, KEY_3), \
- KEY(0, 4, KEY_4), \
- KEY(0, 5, KEY_5), \
- KEY(1, 0, KEY_Q), \
- KEY(1, 1, KEY_W), \
- KEY(1, 2, KEY_E), \
- KEY(1, 3, KEY_R), \
- KEY(1, 4, KEY_T), \
- KEY(1, 5, KEY_Y), \
- KEY(2, 0, KEY_D), \
- KEY(2, 1, KEY_F), \
- KEY(2, 2, KEY_G), \
- KEY(2, 3, KEY_H), \
- KEY(2, 4, KEY_J), \
- KEY(2, 5, KEY_K), \
- KEY(3, 0, KEY_B), \
- KEY(3, 1, KEY_N), \
- KEY(3, 2, KEY_M), \
- KEY(3, 3, KEY_COMMA), \
- KEY(3, 4, KEY_DOT), \
- KEY(3, 5, KEY_SLASH), \
- KEY(4, 0, KEY_F6), \
- KEY(4, 1, KEY_F7), \
- KEY(4, 2, KEY_F8), \
- KEY(4, 3, KEY_F9), \
- KEY(4, 4, KEY_F10), \
- KEY(4, 5, KEY_NUMLOCK), \
- KEY(5, 0, KEY_KP2), \
- KEY(5, 1, KEY_KP3), \
- KEY(5, 2, KEY_KP0), \
- KEY(5, 3, KEY_KPDOT), \
- KEY(5, 4, KEY_RO), \
- KEY(5, 5, KEY_ZENKAKUHANKAKU), \
-}
-
-#define KEYPAD_9x9 0
-#define KEYPAD_6x6 1
-#define KEYPAD_2x2 2
-
-/**
- * struct kbd_platform_data - spear keyboard platform data
- * keymap: pointer to keymap data (table and size)
- * rep: enables key autorepeat
- * mode: choose keyboard support(9x9, 6x6, 2x2)
- * suspended_rate: rate at which keyboard would operate in suspended mode
- *
- * This structure is supposed to be used by platform code to supply
- * keymaps to drivers that implement keyboards.
- */
-struct kbd_platform_data {
- const struct matrix_keymap_data *keymap;
- bool rep;
- unsigned int mode;
- unsigned int suspended_rate;
-};
-
-#endif /* __PLAT_KEYBOARD_H */
diff --git a/include/linux/platform_data/keypad-ep93xx.h b/include/linux/platform_data/keypad-ep93xx.h
deleted file mode 100644
index 3054fced8509..000000000000
--- a/include/linux/platform_data/keypad-ep93xx.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __KEYPAD_EP93XX_H
-#define __KEYPAD_EP93XX_H
-
-struct matrix_keymap_data;
-
-/* flags for the ep93xx_keypad driver */
-#define EP93XX_KEYPAD_DISABLE_3_KEY (1<<0) /* disable 3-key reset */
-#define EP93XX_KEYPAD_DIAG_MODE (1<<1) /* diagnostic mode */
-#define EP93XX_KEYPAD_BACK_DRIVE (1<<2) /* back driving mode */
-#define EP93XX_KEYPAD_TEST_MODE (1<<3) /* scan only column 0 */
-#define EP93XX_KEYPAD_AUTOREPEAT (1<<4) /* enable key autorepeat */
-
-/**
- * struct ep93xx_keypad_platform_data - platform specific device structure
- * @keymap_data: pointer to &matrix_keymap_data
- * @debounce: debounce start count; terminal count is 0xff
- * @prescale: row/column counter pre-scaler load value
- * @flags: see above
- */
-struct ep93xx_keypad_platform_data {
- struct matrix_keymap_data *keymap_data;
- unsigned int debounce;
- unsigned int prescale;
- unsigned int flags;
- unsigned int clk_rate;
-};
-
-#define EP93XX_MATRIX_ROWS (8)
-#define EP93XX_MATRIX_COLS (8)
-
-#endif /* __KEYPAD_EP93XX_H */
diff --git a/include/linux/platform_data/keypad-nomadik-ske.h b/include/linux/platform_data/keypad-nomadik-ske.h
deleted file mode 100644
index 7efabbca1dca..000000000000
--- a/include/linux/platform_data/keypad-nomadik-ske.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson SA 2010
- *
- * Author: Naveen Kumar Gaddipati <naveen.gaddipati@stericsson.com>
- *
- * ux500 Scroll key and Keypad Encoder (SKE) header
- */
-
-#ifndef __SKE_H
-#define __SKE_H
-
-#include <linux/input/matrix_keypad.h>
-
-/* register definitions for SKE peripheral */
-#define SKE_CR 0x00
-#define SKE_VAL0 0x04
-#define SKE_VAL1 0x08
-#define SKE_DBCR 0x0C
-#define SKE_IMSC 0x10
-#define SKE_RIS 0x14
-#define SKE_MIS 0x18
-#define SKE_ICR 0x1C
-
-/*
- * Keypad module
- */
-
-/**
- * struct keypad_platform_data - structure for platform specific data
- * @init: pointer to keypad init function
- * @exit: pointer to keypad deinitialisation function
- * @keymap_data: matrix scan code table for keycodes
- * @krow: maximum number of rows
- * @kcol: maximum number of columns
- * @debounce_ms: platform specific debounce time
- * @no_autorepeat: flag for auto repetition
- * @wakeup_enable: allow waking up the system
- */
-struct ske_keypad_platform_data {
- int (*init)(void);
- int (*exit)(void);
- const struct matrix_keymap_data *keymap_data;
- u8 krow;
- u8 kcol;
- u8 debounce_ms;
- bool no_autorepeat;
- bool wakeup_enable;
-};
-#endif /*__SKE_KPD_H*/
diff --git a/include/linux/platform_data/keypad-omap.h b/include/linux/platform_data/keypad-omap.h
index 3e7c64c854f4..f3f1311cdf3a 100644
--- a/include/linux/platform_data/keypad-omap.h
+++ b/include/linux/platform_data/keypad-omap.h
@@ -19,9 +19,6 @@ struct omap_kp_platform_data {
bool rep;
unsigned long delay;
bool dbounce;
- /* specific to OMAP242x*/
- unsigned int *row_gpios;
- unsigned int *col_gpios;
};
/* Group (0..3) -- when multiple keys are pressed, only the
diff --git a/include/linux/platform_data/keypad-pxa27x.h b/include/linux/platform_data/keypad-pxa27x.h
deleted file mode 100644
index a376442b9935..000000000000
--- a/include/linux/platform_data/keypad-pxa27x.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_ARCH_PXA27x_KEYPAD_H
-#define __ASM_ARCH_PXA27x_KEYPAD_H
-
-#include <linux/input.h>
-#include <linux/input/matrix_keypad.h>
-
-#define MAX_MATRIX_KEY_ROWS (8)
-#define MAX_MATRIX_KEY_COLS (8)
-#define MATRIX_ROW_SHIFT (3)
-#define MAX_DIRECT_KEY_NUM (8)
-
-/* pxa3xx keypad platform specific parameters
- *
- * NOTE:
- * 1. direct_key_num indicates the number of keys in the direct keypad
- * _plus_ the number of rotary-encoder sensor inputs, this can be
- * left as 0 if only rotary encoders are enabled, the driver will
- * automatically calculate this
- *
- * 2. direct_key_map is the key code map for the direct keys, if rotary
- * encoder(s) are enabled, direct key 0/1(2/3) will be ignored
- *
- * 3. rotary can be either interpreted as a relative input event (e.g.
- * REL_WHEEL/REL_HWHEEL) or specific keys (e.g. UP/DOWN/LEFT/RIGHT)
- *
- * 4. matrix key and direct key will use the same debounce_interval by
- * default, which should be sufficient in most cases
- *
- * pxa168 keypad platform specific parameter
- *
- * NOTE:
- * clear_wakeup_event callback is a workaround required to clear the
- * keypad interrupt. The keypad wake must be cleared in addition to
- * reading the MI/DI bits in the KPC register.
- */
-struct pxa27x_keypad_platform_data {
-
- /* code map for the matrix keys */
- const struct matrix_keymap_data *matrix_keymap_data;
- unsigned int matrix_key_rows;
- unsigned int matrix_key_cols;
-
- /* direct keys */
- int direct_key_num;
- unsigned int direct_key_map[MAX_DIRECT_KEY_NUM];
- /* the key output may be low active */
- int direct_key_low_active;
- /* give board a chance to choose the start direct key */
- unsigned int direct_key_mask;
-
- /* rotary encoders 0 */
- int enable_rotary0;
- int rotary0_rel_code;
- int rotary0_up_key;
- int rotary0_down_key;
-
- /* rotary encoders 1 */
- int enable_rotary1;
- int rotary1_rel_code;
- int rotary1_up_key;
- int rotary1_down_key;
-
- /* key debounce interval */
- unsigned int debounce_interval;
-
- /* clear wakeup event requirement for pxa168 */
- void (*clear_wakeup_event)(void);
-};
-
-extern void pxa_set_keypad_info(struct pxa27x_keypad_platform_data *info);
-
-#endif /* __ASM_ARCH_PXA27x_KEYPAD_H */
diff --git a/include/linux/platform_data/keyscan-davinci.h b/include/linux/platform_data/keyscan-davinci.h
deleted file mode 100644
index 260d596ba0af..000000000000
--- a/include/linux/platform_data/keyscan-davinci.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2009 Texas Instruments, Inc
- *
- * Author: Miguel Aguilar <miguel.aguilar@ridgerun.com>
- */
-
-#ifndef DAVINCI_KEYSCAN_H
-#define DAVINCI_KEYSCAN_H
-
-#include <linux/io.h>
-
-enum davinci_matrix_types {
- DAVINCI_KEYSCAN_MATRIX_4X4,
- DAVINCI_KEYSCAN_MATRIX_5X3,
-};
-
-struct davinci_ks_platform_data {
- int (*device_enable)(struct device *dev);
- unsigned short *keymap;
- u32 keymapsize;
- u8 rep:1;
- u8 strobe;
- u8 interval;
- u8 matrix_type;
-};
-
-#endif
-
diff --git a/include/linux/platform_data/lcd-mipid.h b/include/linux/platform_data/lcd-mipid.h
index 63f05eb23827..4927cfc5158c 100644
--- a/include/linux/platform_data/lcd-mipid.h
+++ b/include/linux/platform_data/lcd-mipid.h
@@ -15,10 +15,8 @@ enum mipid_test_result {
#ifdef __KERNEL__
struct mipid_platform_data {
- int nreset_gpio;
int data_lines;
- void (*shutdown)(struct mipid_platform_data *pdata);
void (*set_bklight_level)(struct mipid_platform_data *pdata,
int level);
int (*get_bklight_level)(struct mipid_platform_data *pdata);
diff --git a/include/linux/platform_data/leds-lp55xx.h b/include/linux/platform_data/leds-lp55xx.h
index 3441064713a3..3cc8db0b12b5 100644
--- a/include/linux/platform_data/leds-lp55xx.h
+++ b/include/linux/platform_data/leds-lp55xx.h
@@ -73,6 +73,9 @@ struct lp55xx_platform_data {
/* Clock configuration */
u8 clock_mode;
+ /* Charge pump mode */
+ u32 charge_pump_mode;
+
/* optional enable GPIO */
struct gpio_desc *enable_gpiod;
diff --git a/include/linux/platform_data/leds-omap.h b/include/linux/platform_data/leds-omap.h
deleted file mode 100644
index dd1a3ec86fe4..000000000000
--- a/include/linux/platform_data/leds-omap.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2006 Samsung Electronics
- * Kyungmin Park <kyungmin.park@samsung.com>
- */
-#ifndef ASMARM_ARCH_LED_H
-#define ASMARM_ARCH_LED_H
-
-struct omap_led_config {
- struct led_classdev cdev;
- s16 gpio;
-};
-
-struct omap_led_platform_data {
- s16 nr_leds;
- struct omap_led_config *leds;
-};
-
-#endif
diff --git a/include/linux/platform_data/leds-s3c24xx.h b/include/linux/platform_data/leds-s3c24xx.h
deleted file mode 100644
index 64f8d14876e0..000000000000
--- a/include/linux/platform_data/leds-s3c24xx.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2006 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C24XX - LEDs GPIO connector
-*/
-
-#ifndef __LEDS_S3C24XX_H
-#define __LEDS_S3C24XX_H
-
-struct s3c24xx_led_platdata {
- char *name;
- char *def_trigger;
-};
-
-#endif /* __LEDS_S3C24XX_H */
diff --git a/include/linux/platform_data/lenovo-yoga-c630.h b/include/linux/platform_data/lenovo-yoga-c630.h
new file mode 100644
index 000000000000..5d1f9fb33cfc
--- /dev/null
+++ b/include/linux/platform_data/lenovo-yoga-c630.h
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2024, Linaro Ltd
+ * Authors:
+ * Bjorn Andersson
+ * Dmitry Baryshkov
+ */
+
+#ifndef _LENOVO_YOGA_C630_DATA_H
+#define _LENOVO_YOGA_C630_DATA_H
+
+struct yoga_c630_ec;
+struct notifier_block;
+
+#define YOGA_C630_MOD_NAME "lenovo_yoga_c630"
+
+#define YOGA_C630_DEV_UCSI "ucsi"
+#define YOGA_C630_DEV_PSY "psy"
+
+int yoga_c630_ec_read8(struct yoga_c630_ec *ec, u8 addr);
+int yoga_c630_ec_read16(struct yoga_c630_ec *ec, u8 addr);
+
+int yoga_c630_ec_register_notify(struct yoga_c630_ec *ec, struct notifier_block *nb);
+void yoga_c630_ec_unregister_notify(struct yoga_c630_ec *ec, struct notifier_block *nb);
+
+#define YOGA_C630_UCSI_WRITE_SIZE 8
+#define YOGA_C630_UCSI_CCI_SIZE 4
+#define YOGA_C630_UCSI_DATA_SIZE 16
+#define YOGA_C630_UCSI_READ_SIZE (YOGA_C630_UCSI_CCI_SIZE + YOGA_C630_UCSI_DATA_SIZE)
+
+u16 yoga_c630_ec_ucsi_get_version(struct yoga_c630_ec *ec);
+int yoga_c630_ec_ucsi_write(struct yoga_c630_ec *ec,
+ const u8 req[YOGA_C630_UCSI_WRITE_SIZE]);
+int yoga_c630_ec_ucsi_read(struct yoga_c630_ec *ec,
+ u8 resp[YOGA_C630_UCSI_READ_SIZE]);
+
+#define LENOVO_EC_EVENT_USB 0x20
+#define LENOVO_EC_EVENT_UCSI 0x21
+#define LENOVO_EC_EVENT_HPD 0x22
+#define LENOVO_EC_EVENT_BAT_STATUS 0x24
+#define LENOVO_EC_EVENT_BAT_INFO 0x25
+#define LENOVO_EC_EVENT_BAT_ADPT_STATUS 0x37
+
+#endif
diff --git a/include/linux/platform_data/lp855x.h b/include/linux/platform_data/lp855x.h
index ab222dd05bbc..3b4a891acefe 100644
--- a/include/linux/platform_data/lp855x.h
+++ b/include/linux/platform_data/lp855x.h
@@ -124,12 +124,12 @@ struct lp855x_rom_data {
};
/**
- * struct lp855x_platform_data
+ * struct lp855x_platform_data - lp855 platform-specific data
* @name : Backlight driver name. If it is not defined, default name is set.
* @device_control : value of DEVICE CONTROL register
* @initial_brightness : initial value of backlight brightness
* @period_ns : platform specific pwm period value. unit is nano.
- Only valid when mode is PWM_BASED.
+ * Only valid when mode is PWM_BASED.
* @size_program : total size of lp855x_rom_data
* @rom_data : list of new eeprom/eprom registers
*/
diff --git a/include/linux/platform_data/lv5207lp.h b/include/linux/platform_data/lv5207lp.h
index c9da8d402750..95d85c1394bc 100644
--- a/include/linux/platform_data/lv5207lp.h
+++ b/include/linux/platform_data/lv5207lp.h
@@ -8,7 +8,7 @@
struct device;
struct lv5207lp_platform_data {
- struct device *fbdev;
+ struct device *dev;
unsigned int max_value;
unsigned int def_value;
};
diff --git a/include/linux/platform_data/max6639.h b/include/linux/platform_data/max6639.h
deleted file mode 100644
index 65bfdb4fdc15..000000000000
--- a/include/linux/platform_data/max6639.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_MAX6639_H
-#define _LINUX_MAX6639_H
-
-#include <linux/types.h>
-
-/* platform data for the MAX6639 temperature sensor and fan control */
-
-struct max6639_platform_data {
- bool pwm_polarity; /* Polarity low (0) or high (1, default) */
- int ppr; /* Pulses per rotation 1..4 (default == 2) */
- int rpm_range; /* 2000, 4000 (default), 8000 or 16000 */
-};
-
-#endif /* _LINUX_MAX6639_H */
diff --git a/include/linux/platform_data/max6697.h b/include/linux/platform_data/max6697.h
deleted file mode 100644
index 6fbb70005541..000000000000
--- a/include/linux/platform_data/max6697.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * max6697.h
- * Copyright (c) 2012 Guenter Roeck <linux@roeck-us.net>
- */
-
-#ifndef MAX6697_H
-#define MAX6697_H
-
-#include <linux/types.h>
-
-/*
- * For all bit masks:
- * bit 0: local temperature
- * bit 1..7: remote temperatures
- */
-struct max6697_platform_data {
- bool smbus_timeout_disable; /* set to disable SMBus timeouts */
- bool extended_range_enable; /* set to enable extended temp range */
- bool beta_compensation; /* set to enable beta compensation */
- u8 alert_mask; /* set bit to 1 to disable alert */
- u8 over_temperature_mask; /* set bit to 1 to disable */
- u8 resistance_cancellation; /* set bit to 0 to disable
- * bit mask for MAX6581,
- * boolean for other chips
- */
- u8 ideality_mask; /* set bit to 0 to disable */
- u8 ideality_value; /* transistor ideality as per
- * MAX6581 datasheet
- */
-};
-
-#endif /* MAX6697_H */
diff --git a/include/linux/platform_data/max732x.h b/include/linux/platform_data/max732x.h
index f231c635faec..423999207cd5 100644
--- a/include/linux/platform_data/max732x.h
+++ b/include/linux/platform_data/max732x.h
@@ -7,17 +7,5 @@
struct max732x_platform_data {
/* number of the first GPIO */
unsigned gpio_base;
-
- /* interrupt base */
- int irq_base;
-
- void *context; /* param to setup/teardown */
-
- int (*setup)(struct i2c_client *client,
- unsigned gpio, unsigned ngpio,
- void *context);
- int (*teardown)(struct i2c_client *client,
- unsigned gpio, unsigned ngpio,
- void *context);
};
#endif /* __LINUX_I2C_MAX732X_H */
diff --git a/include/linux/platform_data/mcs.h b/include/linux/platform_data/mcs.h
deleted file mode 100644
index fcc6f2a1f5c3..000000000000
--- a/include/linux/platform_data/mcs.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2009 - 2010 Samsung Electronics Co.Ltd
- * Author: Joonyoung Shim <jy0922.shim@samsung.com>
- * Author: HeungJun Kim <riverful.kim@samsung.com>
- */
-
-#ifndef __LINUX_MCS_H
-#define __LINUX_MCS_H
-
-#define MCS_KEY_MAP(v, c) ((((v) & 0xff) << 16) | ((c) & 0xffff))
-#define MCS_KEY_VAL(v) (((v) >> 16) & 0xff)
-#define MCS_KEY_CODE(v) ((v) & 0xffff)
-
-struct mcs_platform_data {
- void (*poweron)(bool);
- void (*cfg_pin)(void);
-
- /* touchscreen */
- unsigned int x_size;
- unsigned int y_size;
-
- /* touchkey */
- const u32 *keymap;
- unsigned int keymap_size;
- unsigned int key_maxval;
- bool no_autorepeat;
-};
-
-#endif /* __LINUX_MCS_H */
diff --git a/include/linux/platform_data/mdio-bcm-unimac.h b/include/linux/platform_data/mdio-bcm-unimac.h
index 8a5f9f0b2c52..724e1f57b81f 100644
--- a/include/linux/platform_data/mdio-bcm-unimac.h
+++ b/include/linux/platform_data/mdio-bcm-unimac.h
@@ -1,11 +1,14 @@
#ifndef __MDIO_BCM_UNIMAC_PDATA_H
#define __MDIO_BCM_UNIMAC_PDATA_H
+struct clk;
+
struct unimac_mdio_pdata {
u32 phy_mask;
int (*wait_func)(void *data);
void *wait_func_data;
const char *bus_name;
+ struct clk *clk;
};
#define UNIMAC_MDIO_DRV_NAME "unimac-mdio"
diff --git a/include/linux/platform_data/media/omap4iss.h b/include/linux/platform_data/media/omap4iss.h
deleted file mode 100644
index 2a511a8fcda7..000000000000
--- a/include/linux/platform_data/media/omap4iss.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef ARCH_ARM_PLAT_OMAP4_ISS_H
-#define ARCH_ARM_PLAT_OMAP4_ISS_H
-
-#include <linux/i2c.h>
-
-struct iss_device;
-
-enum iss_interface_type {
- ISS_INTERFACE_CSI2A_PHY1,
- ISS_INTERFACE_CSI2B_PHY2,
-};
-
-/**
- * struct iss_csiphy_lane: CSI2 lane position and polarity
- * @pos: position of the lane
- * @pol: polarity of the lane
- */
-struct iss_csiphy_lane {
- u8 pos;
- u8 pol;
-};
-
-#define ISS_CSIPHY1_NUM_DATA_LANES 4
-#define ISS_CSIPHY2_NUM_DATA_LANES 1
-
-/**
- * struct iss_csiphy_lanes_cfg - CSI2 lane configuration
- * @data: Configuration of one or two data lanes
- * @clk: Clock lane configuration
- */
-struct iss_csiphy_lanes_cfg {
- struct iss_csiphy_lane data[ISS_CSIPHY1_NUM_DATA_LANES];
- struct iss_csiphy_lane clk;
-};
-
-/**
- * struct iss_csi2_platform_data - CSI2 interface platform data
- * @crc: Enable the cyclic redundancy check
- * @vpclk_div: Video port output clock control
- */
-struct iss_csi2_platform_data {
- unsigned crc:1;
- unsigned vpclk_div:2;
- struct iss_csiphy_lanes_cfg lanecfg;
-};
-
-struct iss_subdev_i2c_board_info {
- struct i2c_board_info *board_info;
- int i2c_adapter_id;
-};
-
-struct iss_v4l2_subdevs_group {
- struct iss_subdev_i2c_board_info *subdevs;
- enum iss_interface_type interface;
- union {
- struct iss_csi2_platform_data csi2;
- } bus; /* gcc < 4.6.0 chokes on anonymous union initializers */
-};
-
-struct iss_platform_data {
- struct iss_v4l2_subdevs_group *subdevs;
- void (*set_constraints)(struct iss_device *iss, bool enable);
-};
-
-#endif
diff --git a/include/linux/platform_data/media/s5p_hdmi.h b/include/linux/platform_data/media/s5p_hdmi.h
deleted file mode 100644
index 457321e917b9..000000000000
--- a/include/linux/platform_data/media/s5p_hdmi.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Driver header for S5P HDMI chip.
- *
- * Copyright (c) 2011 Samsung Electronics, Co. Ltd
- * Contact: Tomasz Stanislawski <t.stanislaws@samsung.com>
- */
-
-#ifndef S5P_HDMI_H
-#define S5P_HDMI_H
-
-struct i2c_board_info;
-
-/**
- * @hdmiphy_bus: controller id for HDMIPHY bus
- * @hdmiphy_info: template for HDMIPHY I2C device
- * @mhl_bus: controller id for MHL control bus
- * @mhl_info: template for MHL I2C device
- * @hpd_gpio: GPIO for Hot-Plug-Detect pin
- *
- * NULL pointer for *_info fields indicates that
- * the corresponding chip is not present
- */
-struct s5p_hdmi_platform_data {
- int hdmiphy_bus;
- struct i2c_board_info *hdmiphy_info;
- int mhl_bus;
- struct i2c_board_info *mhl_info;
- int hpd_gpio;
-};
-
-#endif /* S5P_HDMI_H */
diff --git a/include/linux/platform_data/microchip-ksz.h b/include/linux/platform_data/microchip-ksz.h
index ea1cc6d829e9..028781ad4059 100644
--- a/include/linux/platform_data/microchip-ksz.h
+++ b/include/linux/platform_data/microchip-ksz.h
@@ -20,10 +20,36 @@
#define __MICROCHIP_KSZ_H
#include <linux/types.h>
+#include <linux/platform_data/dsa.h>
+
+enum ksz_chip_id {
+ KSZ8463_CHIP_ID = 0x8463,
+ KSZ8563_CHIP_ID = 0x8563,
+ KSZ8795_CHIP_ID = 0x8795,
+ KSZ8794_CHIP_ID = 0x8794,
+ KSZ8765_CHIP_ID = 0x8765,
+ KSZ88X3_CHIP_ID = 0x8830,
+ KSZ8864_CHIP_ID = 0x8864,
+ KSZ8895_CHIP_ID = 0x8895,
+ KSZ9477_CHIP_ID = 0x00947700,
+ KSZ9896_CHIP_ID = 0x00989600,
+ KSZ9897_CHIP_ID = 0x00989700,
+ KSZ9893_CHIP_ID = 0x00989300,
+ KSZ9563_CHIP_ID = 0x00956300,
+ KSZ8567_CHIP_ID = 0x00856700,
+ KSZ9567_CHIP_ID = 0x00956700,
+ LAN9370_CHIP_ID = 0x00937000,
+ LAN9371_CHIP_ID = 0x00937100,
+ LAN9372_CHIP_ID = 0x00937200,
+ LAN9373_CHIP_ID = 0x00937300,
+ LAN9374_CHIP_ID = 0x00937400,
+ LAN9646_CHIP_ID = 0x00964600,
+};
struct ksz_platform_data {
+ /* Must be first such that dsa_register_switch() can access it */
+ struct dsa_chip_data cd;
u32 chip_id;
- u16 enabled_ports;
};
#endif
diff --git a/include/linux/platform_data/mlxreg.h b/include/linux/platform_data/mlxreg.h
index 101333fe2b8d..f6cca7a035c7 100644
--- a/include/linux/platform_data/mlxreg.h
+++ b/include/linux/platform_data/mlxreg.h
@@ -25,12 +25,75 @@ enum mlxreg_wdt_type {
};
/**
+ * enum mlxreg_hotplug_kind - kind of hotplug entry
+ *
+ * @MLXREG_HOTPLUG_DEVICE_NA: do not care;
+ * @MLXREG_HOTPLUG_LC_PRESENT: entry for line card presence in/out events;
+ * @MLXREG_HOTPLUG_LC_VERIFIED: entry for line card verification status events
+ * coming after line card security signature validation;
+ * @MLXREG_HOTPLUG_LC_POWERED: entry for line card power on/off events;
+ * @MLXREG_HOTPLUG_LC_SYNCED: entry for line card synchronization events, coming
+ * after hardware-firmware synchronization handshake;
+ * @MLXREG_HOTPLUG_LC_READY: entry for line card ready events, indicating line card
+ PHYs ready / unready state;
+ * @MLXREG_HOTPLUG_LC_ACTIVE: entry for line card active events, indicating firmware
+ * availability / unavailability for the ports on line card;
+ * @MLXREG_HOTPLUG_LC_THERMAL: entry for line card thermal shutdown events, positive
+ * event indicates that system should power off the line
+ * card for which this event has been received;
+ */
+enum mlxreg_hotplug_kind {
+ MLXREG_HOTPLUG_DEVICE_NA = 0,
+ MLXREG_HOTPLUG_LC_PRESENT = 1,
+ MLXREG_HOTPLUG_LC_VERIFIED = 2,
+ MLXREG_HOTPLUG_LC_POWERED = 3,
+ MLXREG_HOTPLUG_LC_SYNCED = 4,
+ MLXREG_HOTPLUG_LC_READY = 5,
+ MLXREG_HOTPLUG_LC_ACTIVE = 6,
+ MLXREG_HOTPLUG_LC_THERMAL = 7,
+};
+
+/**
+ * enum mlxreg_hotplug_device_action - hotplug device action required for
+ * driver's connectivity
+ *
+ * @MLXREG_HOTPLUG_DEVICE_DEFAULT_ACTION: probe device for 'on' event, remove
+ * for 'off' event;
+ * @MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION: probe platform device for 'on'
+ * event, remove for 'off' event;
+ * @MLXREG_HOTPLUG_DEVICE_NO_ACTION: no connectivity action is required;
+ */
+enum mlxreg_hotplug_device_action {
+ MLXREG_HOTPLUG_DEVICE_DEFAULT_ACTION = 0,
+ MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION = 1,
+ MLXREG_HOTPLUG_DEVICE_NO_ACTION = 2,
+};
+
+/**
+ * struct mlxreg_core_hotplug_notifier - hotplug notifier block:
+ *
+ * @identity: notifier identity name;
+ * @handle: user handle to be passed by user handler function;
+ * @user_handler: user handler function associated with the event;
+ */
+struct mlxreg_core_hotplug_notifier {
+ char identity[MLXREG_CORE_LABEL_MAX_SIZE];
+ void *handle;
+ int (*user_handler)(void *handle, enum mlxreg_hotplug_kind kind, u8 action);
+};
+
+/**
* struct mlxreg_hotplug_device - I2C device data:
*
* @adapter: I2C device adapter;
* @client: I2C device client;
* @brdinfo: device board information;
* @nr: I2C device adapter number, to which device is to be attached;
+ * @pdev: platform device, if device is instantiated as a platform device;
+ * @action: action to be performed upon event receiving;
+ * @handle: user handle to be passed by user handler function;
+ * @user_handler: user handler function associated with the event;
+ * @notifier: pointer to event notifier block;
*
* Structure represents I2C hotplug device static data (board topology) and
* dynamic data (related kernel objects handles).
@@ -40,6 +103,11 @@ struct mlxreg_hotplug_device {
struct i2c_client *client;
struct i2c_board_info *brdinfo;
int nr;
+ struct platform_device *pdev;
+ enum mlxreg_hotplug_device_action action;
+ void *handle;
+ int (*user_handler)(void *handle, enum mlxreg_hotplug_kind kind, u8 action);
+ struct mlxreg_core_hotplug_notifier *notifier;
};
/**
@@ -51,12 +119,18 @@ struct mlxreg_hotplug_device {
* @bit: attribute effective bit;
* @capability: attribute capability register;
* @reg_prsnt: attribute presence register;
+ * @reg_sync: attribute synch register;
+ * @reg_pwr: attribute power register;
+ * @reg_ena: attribute enable register;
* @mode: access mode;
* @np - pointer to node platform associated with attribute;
* @hpdev - hotplug device data;
+ * @notifier: pointer to event notifier block;
* @health_cntr: dynamic device health indication counter;
* @attached: true if device has been attached after good health indication;
* @regnum: number of registers occupied by multi-register attribute;
+ * @slot: slot number, at which device is located;
+ * @secured: if set indicates that entry access is secured;
*/
struct mlxreg_core_data {
char label[MLXREG_CORE_LABEL_MAX_SIZE];
@@ -65,18 +139,25 @@ struct mlxreg_core_data {
u32 bit;
u32 capability;
u32 reg_prsnt;
+ u32 reg_sync;
+ u32 reg_pwr;
+ u32 reg_ena;
umode_t mode;
struct device_node *np;
struct mlxreg_hotplug_device hpdev;
+ struct mlxreg_core_hotplug_notifier *notifier;
u32 health_cntr;
bool attached;
u8 regnum;
+ u8 slot;
+ u8 secured;
};
/**
* struct mlxreg_core_item - same type components controlled by the driver:
*
* @data: component data;
+ * @kind: kind of hotplug attribute;
* @aggr_mask: group aggregation mask;
* @reg: group interrupt status register;
* @mask: group interrupt mask;
@@ -89,6 +170,7 @@ struct mlxreg_core_data {
*/
struct mlxreg_core_item {
struct mlxreg_core_data *data;
+ enum mlxreg_hotplug_kind kind;
u32 aggr_mask;
u32 reg;
u32 mask;
@@ -127,25 +209,31 @@ struct mlxreg_core_platform_data {
* @items: same type components with the hotplug capability;
* @irq: platform interrupt number;
* @regmap: register map of parent device;
- * @counter: number of the components with the hotplug capability;
+ * @count: number of the components with the hotplug capability;
* @cell: location of top aggregation interrupt register;
* @mask: top aggregation interrupt common mask;
* @cell_low: location of low aggregation interrupt register;
* @mask_low: low aggregation interrupt common mask;
* @deferred_nr: I2C adapter number must be exist prior probing execution;
* @shift_nr: I2C adapter numbers must be incremented by this value;
+ * @addr: mapped resource address;
+ * @handle: handle to be passed by callback;
+ * @completion_notify: callback to notify when platform driver probing is done;
*/
struct mlxreg_core_hotplug_platform_data {
struct mlxreg_core_item *items;
int irq;
void *regmap;
- int counter;
+ int count;
u32 cell;
u32 mask;
u32 cell_low;
u32 mask_low;
int deferred_nr;
int shift_nr;
+ void __iomem *addr;
+ void *handle;
+ int (*completion_notify)(void *handle, int id);
};
#endif /* __LINUX_PLATFORM_DATA_MLXREG_H */
diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h
deleted file mode 100644
index cba1184b364c..000000000000
--- a/include/linux/platform_data/mmc-esdhc-imx.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2010 Wolfram Sang <kernel@pengutronix.de>
- */
-
-#ifndef __ASM_ARCH_IMX_ESDHC_H
-#define __ASM_ARCH_IMX_ESDHC_H
-
-#include <linux/types.h>
-
-enum wp_types {
- ESDHC_WP_NONE, /* no WP, neither controller nor gpio */
- ESDHC_WP_CONTROLLER, /* mmc controller internal WP */
- ESDHC_WP_GPIO, /* external gpio pin for WP */
-};
-
-enum cd_types {
- ESDHC_CD_NONE, /* no CD, neither controller nor gpio */
- ESDHC_CD_CONTROLLER, /* mmc controller internal CD */
- ESDHC_CD_GPIO, /* external gpio pin for CD */
- ESDHC_CD_PERMANENT, /* no CD, card permanently wired to host */
-};
-
-/**
- * struct esdhc_platform_data - platform data for esdhc on i.MX
- *
- * ESDHC_WP(CD)_CONTROLLER type is not available on i.MX25/35.
- *
- * @wp_type: type of write_protect method (see wp_types enum above)
- * @cd_type: type of card_detect method (see cd_types enum above)
- */
-
-struct esdhc_platform_data {
- enum wp_types wp_type;
- enum cd_types cd_type;
- int max_bus_width;
- unsigned int delay_line;
- unsigned int tuning_step; /* The delay cell steps in tuning procedure */
- unsigned int tuning_start_tap; /* The start delay cell point in tuning procedure */
- unsigned int strobe_dll_delay_target; /* The delay cell for strobe pad (read clock) */
-};
-#endif /* __ASM_ARCH_IMX_ESDHC_H */
diff --git a/include/linux/platform_data/mmc-omap.h b/include/linux/platform_data/mmc-omap.h
index 91051e9907f3..054d0c3c5ec5 100644
--- a/include/linux/platform_data/mmc-omap.h
+++ b/include/linux/platform_data/mmc-omap.h
@@ -20,8 +20,6 @@ struct omap_mmc_platform_data {
* maximum frequency on the MMC bus */
unsigned int max_freq;
- /* switch the bus to a new slot */
- int (*switch_slot)(struct device *dev, int slot);
/* initialize board-specific MMC functionality, can be NULL if
* not supported */
int (*init)(struct device *dev);
diff --git a/include/linux/platform_data/mmc-pxamci.h b/include/linux/platform_data/mmc-pxamci.h
index 7e44e84e7150..652f323b5ecc 100644
--- a/include/linux/platform_data/mmc-pxamci.h
+++ b/include/linux/platform_data/mmc-pxamci.h
@@ -7,6 +7,7 @@
struct device;
struct mmc_host;
+struct property_entry;
struct pxamci_platform_data {
unsigned int ocr_mask; /* available voltages */
@@ -18,7 +19,8 @@ struct pxamci_platform_data {
bool gpio_card_ro_invert; /* gpio ro is inverted */
};
-extern void pxa_set_mci_info(struct pxamci_platform_data *info);
+extern void pxa_set_mci_info(const struct pxamci_platform_data *info,
+ const struct property_entry *props);
extern void pxa3xx_set_mci2_info(struct pxamci_platform_data *info);
extern void pxa3xx_set_mci3_info(struct pxamci_platform_data *info);
diff --git a/include/linux/platform_data/mmc-s3cmci.h b/include/linux/platform_data/mmc-s3cmci.h
deleted file mode 100644
index bacb86db3112..000000000000
--- a/include/linux/platform_data/mmc-s3cmci.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ARCH_MCI_H
-#define _ARCH_MCI_H
-
-/**
- * struct s3c24xx_mci_pdata - sd/mmc controller platform data
- * @no_wprotect: Set this to indicate there is no write-protect switch.
- * @no_detect: Set this if there is no detect switch.
- * @wprotect_invert: Invert the default sense of the write protect switch.
- * @use_dma: Set to allow the use of DMA.
- * @gpio_detect: GPIO number for the card detect line.
- * @gpio_wprotect: GPIO number for the write protect line.
- * @ocr_avail: The mask of the available power states, non-zero to use.
- * @set_power: Callback to control the power mode.
- *
- * The @gpio_detect is used for card detection when @no_wprotect is unset,
- * and the default sense is that 0 returned from gpio_get_value() means
- * that a card is inserted. If @detect_invert is set, then the value from
- * gpio_get_value() is inverted, which makes 1 mean card inserted.
- *
- * The driver will use @gpio_wprotect to signal whether the card is write
- * protected if @no_wprotect is not set. A 0 returned from gpio_get_value()
- * means the card is read/write, and 1 means read-only. The @wprotect_invert
- * will invert the value returned from gpio_get_value().
- *
- * Card power is set by @ocr_availa, using MCC_VDD_ constants if it is set
- * to a non-zero value, otherwise the default of 3.2-3.4V is used.
- */
-struct s3c24xx_mci_pdata {
- unsigned int no_wprotect:1;
- unsigned int no_detect:1;
- unsigned int wprotect_invert:1;
- unsigned int use_dma:1;
-
- unsigned long ocr_avail;
- void (*set_power)(unsigned char power_mode,
- unsigned short vdd);
- struct gpio_desc *bus[6];
-};
-
-/**
- * s3c24xx_mci_set_platdata - set platform data for mmc/sdi device
- * @pdata: The platform data
- *
- * Copy the platform data supplied by @pdata so that this can be marked
- * __initdata.
- */
-extern void s3c24xx_mci_def_set_power(unsigned char power_mode, unsigned short vdd);
-extern void s3c24xx_mci_set_platdata(struct s3c24xx_mci_pdata *pdata);
-
-#endif /* _ARCH_NCI_H */
diff --git a/include/linux/platform_data/mmp_audio.h b/include/linux/platform_data/mmp_audio.h
deleted file mode 100644
index 83428d8ee18d..000000000000
--- a/include/linux/platform_data/mmp_audio.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * MMP Platform AUDIO Management
- *
- * Copyright (c) 2011 Marvell Semiconductors Inc.
- */
-
-#ifndef MMP_AUDIO_H
-#define MMP_AUDIO_H
-
-struct mmp_audio_platdata {
- u32 period_max_capture;
- u32 buffer_max_capture;
- u32 period_max_playback;
- u32 buffer_max_playback;
-};
-
-#endif /* MMP_AUDIO_H */
diff --git a/include/linux/platform_data/mouse-pxa930_trkball.h b/include/linux/platform_data/mouse-pxa930_trkball.h
deleted file mode 100644
index ba0ac7a30d8c..000000000000
--- a/include/linux/platform_data/mouse-pxa930_trkball.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_ARCH_PXA930_TRKBALL_H
-#define __ASM_ARCH_PXA930_TRKBALL_H
-
-struct pxa930_trkball_platform_data {
- int x_filter;
- int y_filter;
-};
-
-#endif /* __ASM_ARCH_PXA930_TRKBALL_H */
-
diff --git a/include/linux/platform_data/mtd-davinci-aemif.h b/include/linux/platform_data/mtd-davinci-aemif.h
deleted file mode 100644
index a49826214a39..000000000000
--- a/include/linux/platform_data/mtd-davinci-aemif.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * TI DaVinci AEMIF support
- *
- * Copyright 2010 (C) Texas Instruments, Inc. https://www.ti.com/
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-#ifndef _MACH_DAVINCI_AEMIF_H
-#define _MACH_DAVINCI_AEMIF_H
-
-#include <linux/platform_device.h>
-
-#define NRCSR_OFFSET 0x00
-#define AWCCR_OFFSET 0x04
-#define A1CR_OFFSET 0x10
-
-#define ACR_ASIZE_MASK 0x3
-#define ACR_EW_MASK BIT(30)
-#define ACR_SS_MASK BIT(31)
-
-/* All timings in nanoseconds */
-struct davinci_aemif_timing {
- u8 wsetup;
- u8 wstrobe;
- u8 whold;
-
- u8 rsetup;
- u8 rstrobe;
- u8 rhold;
-
- u8 ta;
-};
-
-#endif
diff --git a/include/linux/platform_data/mtd-davinci.h b/include/linux/platform_data/mtd-davinci.h
deleted file mode 100644
index dd474dd44848..000000000000
--- a/include/linux/platform_data/mtd-davinci.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * mach-davinci/nand.h
- *
- * Copyright © 2006 Texas Instruments.
- *
- * Ported to 2.6.23 Copyright © 2008 by
- * Sander Huijsen <Shuijsen@optelecom-nkf.com>
- * Troy Kisky <troy.kisky@boundarydevices.com>
- * Dirk Behme <Dirk.Behme@gmail.com>
- *
- * --------------------------------------------------------------------------
- */
-
-#ifndef __ARCH_ARM_DAVINCI_NAND_H
-#define __ARCH_ARM_DAVINCI_NAND_H
-
-#include <linux/mtd/rawnand.h>
-
-#define NANDFCR_OFFSET 0x60
-#define NANDFSR_OFFSET 0x64
-#define NANDF1ECC_OFFSET 0x70
-
-/* 4-bit ECC syndrome registers */
-#define NAND_4BIT_ECC_LOAD_OFFSET 0xbc
-#define NAND_4BIT_ECC1_OFFSET 0xc0
-#define NAND_4BIT_ECC2_OFFSET 0xc4
-#define NAND_4BIT_ECC3_OFFSET 0xc8
-#define NAND_4BIT_ECC4_OFFSET 0xcc
-#define NAND_ERR_ADD1_OFFSET 0xd0
-#define NAND_ERR_ADD2_OFFSET 0xd4
-#define NAND_ERR_ERRVAL1_OFFSET 0xd8
-#define NAND_ERR_ERRVAL2_OFFSET 0xdc
-
-/* NOTE: boards don't need to use these address bits
- * for ALE/CLE unless they support booting from NAND.
- * They're used unless platform data overrides them.
- */
-#define MASK_ALE 0x08
-#define MASK_CLE 0x10
-
-struct davinci_nand_pdata { /* platform_data */
- uint32_t mask_ale;
- uint32_t mask_cle;
-
- /*
- * 0-indexed chip-select number of the asynchronous
- * interface to which the NAND device has been connected.
- *
- * So, if you have NAND connected to CS3 of DA850, you
- * will pass '1' here. Since the asynchronous interface
- * on DA850 starts from CS2.
- */
- uint32_t core_chipsel;
-
- /* for packages using two chipselects */
- uint32_t mask_chipsel;
-
- /* board's default static partition info */
- struct mtd_partition *parts;
- unsigned nr_parts;
-
- /* none == NAND_ECC_ENGINE_TYPE_NONE (strongly *not* advised!!)
- * soft == NAND_ECC_ENGINE_TYPE_SOFT
- * else == NAND_ECC_ENGINE_TYPE_ON_HOST, according to ecc_bits
- *
- * All DaVinci-family chips support 1-bit hardware ECC.
- * Newer ones also support 4-bit ECC, but are awkward
- * using it with large page chips.
- */
- enum nand_ecc_engine_type engine_type;
- enum nand_ecc_placement ecc_placement;
- u8 ecc_bits;
-
- /* e.g. NAND_BUSWIDTH_16 */
- unsigned options;
- /* e.g. NAND_BBT_USE_FLASH */
- unsigned bbt_options;
-
- /* Main and mirror bbt descriptor overrides */
- struct nand_bbt_descr *bbt_td;
- struct nand_bbt_descr *bbt_md;
-
- /* Access timings */
- struct davinci_aemif_timing *timing;
-};
-
-#endif /* __ARCH_ARM_DAVINCI_NAND_H */
diff --git a/include/linux/platform_data/mtd-nand-omap2.h b/include/linux/platform_data/mtd-nand-omap2.h
index de6ada739121..8c2f1f185353 100644
--- a/include/linux/platform_data/mtd-nand-omap2.h
+++ b/include/linux/platform_data/mtd-nand-omap2.h
@@ -7,6 +7,7 @@
#define _MTD_NAND_OMAP2_H
#include <linux/mtd/partitions.h>
+#include <linux/mod_devicetable.h>
#define GPMC_BCH_NUM_REMAINDER 8
@@ -61,4 +62,11 @@ struct gpmc_nand_regs {
void __iomem *gpmc_bch_result5[GPMC_BCH_NUM_REMAINDER];
void __iomem *gpmc_bch_result6[GPMC_BCH_NUM_REMAINDER];
};
-#endif
+
+static const struct of_device_id omap_nand_ids[] = {
+ { .compatible = "ti,omap2-nand", },
+ { .compatible = "ti,am64-nand", },
+ {},
+};
+
+#endif /* _MTD_NAND_OMAP2_H */
diff --git a/include/linux/platform_data/mtd-nand-s3c2410.h b/include/linux/platform_data/mtd-nand-s3c2410.h
deleted file mode 100644
index 25390fc3e795..000000000000
--- a/include/linux/platform_data/mtd-nand-s3c2410.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2004 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C2410 - NAND device controller platform_device info
-*/
-
-#ifndef __MTD_NAND_S3C2410_H
-#define __MTD_NAND_S3C2410_H
-
-#include <linux/mtd/rawnand.h>
-
-/**
- * struct s3c2410_nand_set - define a set of one or more nand chips
- * @flash_bbt: Openmoko u-boot can create a Bad Block Table
- * Setting this flag will allow the kernel to
- * look for it at boot time and also skip the NAND
- * scan.
- * @options: Default value to set into 'struct nand_chip' options.
- * @nr_chips: Number of chips in this set
- * @nr_partitions: Number of partitions pointed to by @partitions
- * @name: Name of set (optional)
- * @nr_map: Map for low-layer logical to physical chip numbers (option)
- * @partitions: The mtd partition list
- *
- * define a set of one or more nand chips registered with an unique mtd. Also
- * allows to pass flag to the underlying NAND layer. 'disable_ecc' will trigger
- * a warning at boot time.
- */
-struct s3c2410_nand_set {
- unsigned int flash_bbt:1;
-
- unsigned int options;
- int nr_chips;
- int nr_partitions;
- char *name;
- int *nr_map;
- struct mtd_partition *partitions;
- struct device_node *of_node;
-};
-
-struct s3c2410_platform_nand {
- /* timing information for controller, all times in nanoseconds */
-
- int tacls; /* time for active CLE/ALE to nWE/nOE */
- int twrph0; /* active time for nWE/nOE */
- int twrph1; /* time for release CLE/ALE from nWE/nOE inactive */
-
- unsigned int ignore_unset_ecc:1;
-
- enum nand_ecc_engine_type engine_type;
-
- int nr_sets;
- struct s3c2410_nand_set *sets;
-
- void (*select_chip)(struct s3c2410_nand_set *,
- int chip);
-};
-
-/**
- * s3c_nand_set_platdata() - register NAND platform data.
- * @nand: The NAND platform data to register with s3c_device_nand.
- *
- * This function copies the given NAND platform data, @nand and registers
- * it with the s3c_device_nand. This allows @nand to be __initdata.
-*/
-extern void s3c_nand_set_platdata(struct s3c2410_platform_nand *nand);
-
-#endif /*__MTD_NAND_S3C2410_H */
diff --git a/include/linux/platform_data/net-cw1200.h b/include/linux/platform_data/net-cw1200.h
index c510734405bb..89d0ec6f7d46 100644
--- a/include/linux/platform_data/net-cw1200.h
+++ b/include/linux/platform_data/net-cw1200.h
@@ -14,8 +14,6 @@ struct cw1200_platform_data_spi {
/* All others are optional */
bool have_5ghz;
- int reset; /* GPIO to RSTn signal (0 disables) */
- int powerup; /* GPIO to POWERUP signal (0 disables) */
int (*power_ctrl)(const struct cw1200_platform_data_spi *pdata,
bool enable); /* Control 3v3 / 1v8 supply */
int (*clk_ctrl)(const struct cw1200_platform_data_spi *pdata,
@@ -30,8 +28,6 @@ struct cw1200_platform_data_sdio {
/* All others are optional */
bool have_5ghz;
bool no_nptb; /* SDIO hardware does not support non-power-of-2-blocksizes */
- int reset; /* GPIO to RSTn signal (0 disables) */
- int powerup; /* GPIO to POWERUP signal (0 disables) */
int irq; /* IRQ line or 0 to use SDIO IRQ */
int (*power_ctrl)(const struct cw1200_platform_data_sdio *pdata,
bool enable); /* Control 3v3 / 1v8 supply */
diff --git a/include/linux/platform_data/nfcmrvl.h b/include/linux/platform_data/nfcmrvl.h
deleted file mode 100644
index 9e75ac8d19be..000000000000
--- a/include/linux/platform_data/nfcmrvl.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (C) 2015, Marvell International Ltd.
- *
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available on the worldwide web at
- * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
- */
-
-#ifndef _NFCMRVL_PTF_H_
-#define _NFCMRVL_PTF_H_
-
-struct nfcmrvl_platform_data {
- /*
- * Generic
- */
-
- /* GPIO that is wired to RESET_N signal */
- int reset_n_io;
- /* Tell if transport is muxed in HCI one */
- unsigned int hci_muxed;
-
- /*
- * UART specific
- */
-
- /* Tell if UART needs flow control at init */
- unsigned int flow_control;
- /* Tell if firmware supports break control for power management */
- unsigned int break_control;
-
-
- /*
- * I2C specific
- */
-
- unsigned int irq;
- unsigned int irq_polarity;
-};
-
-#endif /* _NFCMRVL_PTF_H_ */
diff --git a/include/linux/platform_data/ntc_thermistor.h b/include/linux/platform_data/ntc_thermistor.h
deleted file mode 100644
index b324d03e580c..000000000000
--- a/include/linux/platform_data/ntc_thermistor.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * ntc_thermistor.h - NTC Thermistors
- *
- * Copyright (C) 2010 Samsung Electronics
- * MyungJoo Ham <myungjoo.ham@samsung.com>
- */
-#ifndef _LINUX_NTC_H
-#define _LINUX_NTC_H
-
-struct iio_channel;
-
-enum ntc_thermistor_type {
- TYPE_B57330V2103,
- TYPE_B57891S0103,
- TYPE_NCPXXWB473,
- TYPE_NCPXXWF104,
- TYPE_NCPXXWL333,
- TYPE_NCPXXXH103,
-};
-
-struct ntc_thermistor_platform_data {
- /*
- * One (not both) of read_uV and read_ohm should be provided and only
- * one of the two should be provided.
- * Both functions should return negative value for an error case.
- *
- * pullup_uV, pullup_ohm, pulldown_ohm, and connect are required to use
- * read_uV()
- *
- * How to setup pullup_ohm, pulldown_ohm, and connect is
- * described at Documentation/hwmon/ntc_thermistor.rst
- *
- * pullup/down_ohm: 0 for infinite / not-connected
- *
- * chan: iio_channel pointer to communicate with the ADC which the
- * thermistor is using for conversion of the analog values.
- */
- int (*read_uv)(struct ntc_thermistor_platform_data *);
- unsigned int pullup_uv;
-
- unsigned int pullup_ohm;
- unsigned int pulldown_ohm;
- enum { NTC_CONNECTED_POSITIVE, NTC_CONNECTED_GROUND } connect;
- struct iio_channel *chan;
-
- int (*read_ohm)(void);
-};
-
-#endif /* _LINUX_NTC_H */
diff --git a/include/linux/platform_data/omap-twl4030.h b/include/linux/platform_data/omap-twl4030.h
index 0dd851ea1c72..7fcb55fe21c9 100644
--- a/include/linux/platform_data/omap-twl4030.h
+++ b/include/linux/platform_data/omap-twl4030.h
@@ -37,9 +37,6 @@ struct omap_tw4030_pdata {
bool has_digimic0;
bool has_digimic1;
u8 has_linein;
-
- /* Jack detect GPIO or <= 0 if it is not implemented */
- int jack_detect;
};
#endif /* _OMAP_TWL4030_H_ */
diff --git a/include/linux/platform_data/omap1_bl.h b/include/linux/platform_data/omap1_bl.h
index 5e8b17d77a5f..3d0bab31a0a9 100644
--- a/include/linux/platform_data/omap1_bl.h
+++ b/include/linux/platform_data/omap1_bl.h
@@ -6,7 +6,6 @@
struct omap_backlight_config {
int default_intensity;
- int (*set_power)(struct device *dev, int state);
};
#endif
diff --git a/include/linux/platform_data/pca953x.h b/include/linux/platform_data/pca953x.h
index 4eb53e023997..3c3787c4d96c 100644
--- a/include/linux/platform_data/pca953x.h
+++ b/include/linux/platform_data/pca953x.h
@@ -11,21 +11,8 @@ struct pca953x_platform_data {
/* number of the first GPIO */
unsigned gpio_base;
- /* initial polarity inversion setting */
- u32 invert;
-
/* interrupt base */
int irq_base;
-
- void *context; /* param to setup/teardown */
-
- int (*setup)(struct i2c_client *client,
- unsigned gpio, unsigned ngpio,
- void *context);
- int (*teardown)(struct i2c_client *client,
- unsigned gpio, unsigned ngpio,
- void *context);
- const char *const *names;
};
#endif /* _LINUX_PCA953X_H */
diff --git a/include/linux/platform_data/pcf857x.h b/include/linux/platform_data/pcf857x.h
deleted file mode 100644
index 11d4ed78c7f4..000000000000
--- a/include/linux/platform_data/pcf857x.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_PCF857X_H
-#define __LINUX_PCF857X_H
-
-/**
- * struct pcf857x_platform_data - data to set up pcf857x driver
- * @gpio_base: number of the chip's first GPIO
- * @n_latch: optional bit-inverse of initial register value; if
- * you leave this initialized to zero the driver will act
- * like the chip was just reset
- * @setup: optional callback issued once the GPIOs are valid
- * @teardown: optional callback issued before the GPIOs are invalidated
- * @context: optional parameter passed to setup() and teardown()
- *
- * In addition to the I2C_BOARD_INFO() state appropriate to each chip,
- * the i2c_board_info used with the pcf875x driver must provide its
- * platform_data (pointer to one of these structures) with at least
- * the gpio_base value initialized.
- *
- * The @setup callback may be used with the kind of board-specific glue
- * which hands the (now-valid) GPIOs to other drivers, or which puts
- * devices in their initial states using these GPIOs.
- *
- * These GPIO chips are only "quasi-bidirectional"; read the chip specs
- * to understand the behavior. They don't have separate registers to
- * record which pins are used for input or output, record which output
- * values are driven, or provide access to input values. That must be
- * inferred by reading the chip's value and knowing the last value written
- * to it. If you leave n_latch initialized to zero, that last written
- * value is presumed to be all ones (as if the chip were just reset).
- */
-struct pcf857x_platform_data {
- unsigned gpio_base;
- unsigned n_latch;
-
- int (*setup)(struct i2c_client *client,
- int gpio, unsigned ngpio,
- void *context);
- int (*teardown)(struct i2c_client *client,
- int gpio, unsigned ngpio,
- void *context);
- void *context;
-};
-
-#endif /* __LINUX_PCF857X_H */
diff --git a/include/linux/platform_data/pcmcia-pxa2xx_viper.h b/include/linux/platform_data/pcmcia-pxa2xx_viper.h
deleted file mode 100644
index a23b58aff9e1..000000000000
--- a/include/linux/platform_data/pcmcia-pxa2xx_viper.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ARCOM_PCMCIA_H
-#define __ARCOM_PCMCIA_H
-
-struct arcom_pcmcia_pdata {
- int cd_gpio;
- int rdy_gpio;
- int pwr_gpio;
- void (*reset)(int state);
-};
-
-#endif
diff --git a/include/linux/platform_data/pxa2xx_udc.h b/include/linux/platform_data/pxa2xx_udc.h
index ff9c35dca59d..bc99cc6a3c5f 100644
--- a/include/linux/platform_data/pxa2xx_udc.h
+++ b/include/linux/platform_data/pxa2xx_udc.h
@@ -25,4 +25,10 @@ struct pxa2xx_udc_mach_info {
int gpio_pullup; /* high == pullup activated */
};
+#ifdef CONFIG_PXA27x
+extern void pxa27x_clear_otgph(void);
+#else
+#define pxa27x_clear_otgph() do {} while (0)
+#endif
+
#endif
diff --git a/include/linux/platform_data/rtc-ds2404.h b/include/linux/platform_data/rtc-ds2404.h
deleted file mode 100644
index 22c53825528f..000000000000
--- a/include/linux/platform_data/rtc-ds2404.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * ds2404.h - platform data structure for the DS2404 RTC.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 Sven Schnelle <svens@stackframe.org>
- */
-
-#ifndef __LINUX_DS2404_H
-#define __LINUX_DS2404_H
-
-struct ds2404_platform_data {
-
- unsigned int gpio_rst;
- unsigned int gpio_clk;
- unsigned int gpio_dq;
-};
-#endif
diff --git a/include/linux/platform_data/rtc-v3020.h b/include/linux/platform_data/rtc-v3020.h
deleted file mode 100644
index e55d82cebf80..000000000000
--- a/include/linux/platform_data/rtc-v3020.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * v3020.h - Registers definition and platform data structure for the v3020 RTC.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2006, 8D Technologies inc.
- */
-#ifndef __LINUX_V3020_H
-#define __LINUX_V3020_H
-
-/* The v3020 has only one data pin but which one
- * is used depends on the board. */
-struct v3020_platform_data {
- int leftshift; /* (1<<(leftshift)) & readl() */
-
- unsigned int use_gpio:1;
- unsigned int gpio_cs;
- unsigned int gpio_wr;
- unsigned int gpio_rd;
- unsigned int gpio_io;
-};
-
-#define V3020_STATUS_0 0x00
-#define V3020_STATUS_1 0x01
-#define V3020_SECONDS 0x02
-#define V3020_MINUTES 0x03
-#define V3020_HOURS 0x04
-#define V3020_MONTH_DAY 0x05
-#define V3020_MONTH 0x06
-#define V3020_YEAR 0x07
-#define V3020_WEEK_DAY 0x08
-#define V3020_WEEK 0x09
-
-#define V3020_IS_COMMAND(val) ((val)>=0x0E)
-
-#define V3020_CMD_RAM2CLOCK 0x0E
-#define V3020_CMD_CLOCK2RAM 0x0F
-
-#endif /* __LINUX_V3020_H */
diff --git a/include/linux/platform_data/s3c-hsudc.h b/include/linux/platform_data/s3c-hsudc.h
deleted file mode 100644
index a170939832d5..000000000000
--- a/include/linux/platform_data/s3c-hsudc.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * S3C24XX USB 2.0 High-speed USB controller gadget driver
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * The S3C24XX USB 2.0 high-speed USB controller supports upto 9 endpoints.
- * Each endpoint can be configured as either in or out endpoint. Endpoints
- * can be configured for Bulk or Interrupt transfer mode.
-*/
-
-#ifndef __LINUX_USB_S3C_HSUDC_H
-#define __LINUX_USB_S3C_HSUDC_H
-
-/**
- * s3c24xx_hsudc_platdata - Platform data for USB High-Speed gadget controller.
- * @epnum: Number of endpoints to be instantiated by the controller driver.
- * @gpio_init: Platform specific USB related GPIO initialization.
- * @gpio_uninit: Platform specific USB releted GPIO uninitialzation.
- *
- * Representation of platform data for the S3C24XX USB 2.0 High Speed gadget
- * controllers.
- */
-struct s3c24xx_hsudc_platdata {
- unsigned int epnum;
- void (*gpio_init)(void);
- void (*gpio_uninit)(void);
- void (*phy_init)(void);
- void (*phy_uninit)(void);
-};
-
-#endif /* __LINUX_USB_S3C_HSUDC_H */
diff --git a/include/linux/platform_data/sa11x0-serial.h b/include/linux/platform_data/sa11x0-serial.h
index 8b79ab08af45..a88096bc74e4 100644
--- a/include/linux/platform_data/sa11x0-serial.h
+++ b/include/linux/platform_data/sa11x0-serial.h
@@ -10,7 +10,6 @@
#define SA11X0_SERIAL_H
struct uart_port;
-struct uart_info;
/*
* This is a temporary structure for registering these
diff --git a/include/linux/mmc/sh_mmcif.h b/include/linux/platform_data/sh_mmcif.h
index e25533b95d9f..6eb914f958f9 100644
--- a/include/linux/mmc/sh_mmcif.h
+++ b/include/linux/platform_data/sh_mmcif.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * include/linux/mmc/sh_mmcif.h
- *
* platform data for eMMC driver
*
* Copyright (C) 2010 Renesas Solutions Corp.
diff --git a/include/linux/platform_data/shmob_drm.h b/include/linux/platform_data/shmob_drm.h
index d661399b217d..6c19d4fbbe39 100644
--- a/include/linux/platform_data/shmob_drm.h
+++ b/include/linux/platform_data/shmob_drm.h
@@ -10,7 +10,7 @@
#ifndef __SHMOB_DRM_H__
#define __SHMOB_DRM_H__
-#include <drm/drm_mode.h>
+#include <video/videomode.h>
enum shmob_drm_clk_source {
SHMOB_DRM_CLK_BUS,
@@ -18,72 +18,21 @@ enum shmob_drm_clk_source {
SHMOB_DRM_CLK_EXTERNAL,
};
-enum shmob_drm_interface {
- SHMOB_DRM_IFACE_RGB8, /* 24bpp, 8:8:8 */
- SHMOB_DRM_IFACE_RGB9, /* 18bpp, 9:9 */
- SHMOB_DRM_IFACE_RGB12A, /* 24bpp, 12:12 */
- SHMOB_DRM_IFACE_RGB12B, /* 12bpp */
- SHMOB_DRM_IFACE_RGB16, /* 16bpp */
- SHMOB_DRM_IFACE_RGB18, /* 18bpp */
- SHMOB_DRM_IFACE_RGB24, /* 24bpp */
- SHMOB_DRM_IFACE_YUV422, /* 16bpp */
- SHMOB_DRM_IFACE_SYS8A, /* 24bpp, 8:8:8 */
- SHMOB_DRM_IFACE_SYS8B, /* 18bpp, 8:8:2 */
- SHMOB_DRM_IFACE_SYS8C, /* 18bpp, 2:8:8 */
- SHMOB_DRM_IFACE_SYS8D, /* 16bpp, 8:8 */
- SHMOB_DRM_IFACE_SYS9, /* 18bpp, 9:9 */
- SHMOB_DRM_IFACE_SYS12, /* 24bpp, 12:12 */
- SHMOB_DRM_IFACE_SYS16A, /* 16bpp */
- SHMOB_DRM_IFACE_SYS16B, /* 18bpp, 16:2 */
- SHMOB_DRM_IFACE_SYS16C, /* 18bpp, 2:16 */
- SHMOB_DRM_IFACE_SYS18, /* 18bpp */
- SHMOB_DRM_IFACE_SYS24, /* 24bpp */
-};
-
-struct shmob_drm_backlight_data {
- const char *name;
- int max_brightness;
- int (*get_brightness)(void);
- int (*set_brightness)(int brightness);
-};
-
struct shmob_drm_panel_data {
unsigned int width_mm; /* Panel width in mm */
unsigned int height_mm; /* Panel height in mm */
- struct drm_mode_modeinfo mode;
+ struct videomode mode;
};
-struct shmob_drm_sys_interface_data {
- unsigned int read_latch:6;
- unsigned int read_setup:8;
- unsigned int read_cycle:8;
- unsigned int read_strobe:8;
- unsigned int write_setup:8;
- unsigned int write_cycle:8;
- unsigned int write_strobe:8;
- unsigned int cs_setup:3;
- unsigned int vsync_active_high:1;
- unsigned int vsync_dir_input:1;
-};
-
-#define SHMOB_DRM_IFACE_FL_DWPOL (1 << 0) /* Rising edge dot clock data latch */
-#define SHMOB_DRM_IFACE_FL_DIPOL (1 << 1) /* Active low display enable */
-#define SHMOB_DRM_IFACE_FL_DAPOL (1 << 2) /* Active low display data */
-#define SHMOB_DRM_IFACE_FL_HSCNT (1 << 3) /* Disable HSYNC during VBLANK */
-#define SHMOB_DRM_IFACE_FL_DWCNT (1 << 4) /* Disable dotclock during blanking */
-
struct shmob_drm_interface_data {
- enum shmob_drm_interface interface;
- struct shmob_drm_sys_interface_data sys;
+ unsigned int bus_fmt; /* MEDIA_BUS_FMT_* */
unsigned int clk_div;
- unsigned int flags;
};
struct shmob_drm_platform_data {
enum shmob_drm_clk_source clk_source;
struct shmob_drm_interface_data iface;
struct shmob_drm_panel_data panel;
- struct shmob_drm_backlight_data backlight;
};
#endif /* __SHMOB_DRM_H__ */
diff --git a/include/linux/platform_data/sht3x.h b/include/linux/platform_data/sht3x.h
deleted file mode 100644
index 14680d2a98f7..000000000000
--- a/include/linux/platform_data/sht3x.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2016 Sensirion AG, Switzerland
- * Author: David Frey <david.frey@sensirion.com>
- * Author: Pascal Sachs <pascal.sachs@sensirion.com>
- */
-
-#ifndef __SHT3X_H_
-#define __SHT3X_H_
-
-struct sht3x_platform_data {
- bool blocking_io;
- bool high_precision;
-};
-#endif /* __SHT3X_H_ */
diff --git a/include/linux/platform_data/si5351.h b/include/linux/platform_data/si5351.h
index c71a2dd66143..5f412a615532 100644
--- a/include/linux/platform_data/si5351.h
+++ b/include/linux/platform_data/si5351.h
@@ -105,10 +105,12 @@ struct si5351_clkout_config {
* @clk_xtal: xtal input clock
* @clk_clkin: clkin input clock
* @pll_src: array of pll source clock setting
+ * @pll_reset: array indicating if plls should be reset after setting the rate
* @clkout: array of clkout configuration
*/
struct si5351_platform_data {
enum si5351_pll_src pll_src[2];
+ bool pll_reset[2];
struct si5351_clkout_config clkout[8];
};
diff --git a/include/linux/platform_data/simplefb.h b/include/linux/platform_data/simplefb.h
index 27ea99af6e1d..4f94d52ac99f 100644
--- a/include/linux/platform_data/simplefb.h
+++ b/include/linux/platform_data/simplefb.h
@@ -22,6 +22,7 @@
{ "r8g8b8", 24, {16, 8}, {8, 8}, {0, 8}, {0, 0}, DRM_FORMAT_RGB888 }, \
{ "x8r8g8b8", 32, {16, 8}, {8, 8}, {0, 8}, {0, 0}, DRM_FORMAT_XRGB8888 }, \
{ "a8r8g8b8", 32, {16, 8}, {8, 8}, {0, 8}, {24, 8}, DRM_FORMAT_ARGB8888 }, \
+ { "x8b8g8r8", 32, {0, 8}, {8, 8}, {16, 8}, {0, 0}, DRM_FORMAT_XBGR8888 }, \
{ "a8b8g8r8", 32, {0, 8}, {8, 8}, {16, 8}, {24, 8}, DRM_FORMAT_ABGR8888 }, \
{ "x2r10g10b10", 32, {20, 10}, {10, 10}, {0, 10}, {0, 0}, DRM_FORMAT_XRGB2101010 }, \
{ "a2r10g10b10", 32, {20, 10}, {10, 10}, {0, 10}, {30, 2}, DRM_FORMAT_ARGB2101010 }, \
diff --git a/include/linux/platform_data/spi-ath79.h b/include/linux/platform_data/spi-ath79.h
deleted file mode 100644
index 81a388ff58cc..000000000000
--- a/include/linux/platform_data/spi-ath79.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Platform data definition for Atheros AR71XX/AR724X/AR913X SPI controller
- *
- * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
- */
-
-#ifndef _ATH79_SPI_PLATFORM_H
-#define _ATH79_SPI_PLATFORM_H
-
-struct ath79_spi_platform_data {
- unsigned bus_num;
- unsigned num_chipselect;
-};
-
-#endif /* _ATH79_SPI_PLATFORM_H */
diff --git a/include/linux/platform_data/spi-clps711x.h b/include/linux/platform_data/spi-clps711x.h
deleted file mode 100644
index efaa596848c9..000000000000
--- a/include/linux/platform_data/spi-clps711x.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CLPS711X SPI bus driver definitions
- *
- * Copyright (C) 2012 Alexander Shiyan <shc_work@mail.ru>
- */
-
-#ifndef ____LINUX_PLATFORM_DATA_SPI_CLPS711X_H
-#define ____LINUX_PLATFORM_DATA_SPI_CLPS711X_H
-
-/* Board specific platform_data */
-struct spi_clps711x_pdata {
- int *chipselect; /* Array of GPIO-numbers */
- int num_chipselect; /* Total count of GPIOs */
-};
-
-#endif
diff --git a/include/linux/platform_data/spi-davinci.h b/include/linux/platform_data/spi-davinci.h
deleted file mode 100644
index 2cb5cc70fd9d..000000000000
--- a/include/linux/platform_data/spi-davinci.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright 2009 Texas Instruments.
- */
-
-#ifndef __ARCH_ARM_DAVINCI_SPI_H
-#define __ARCH_ARM_DAVINCI_SPI_H
-
-#include <linux/platform_data/edma.h>
-
-#define SPI_INTERN_CS 0xFF
-
-enum {
- SPI_VERSION_1, /* For DM355/DM365/DM6467 */
- SPI_VERSION_2, /* For DA8xx */
-};
-
-/**
- * davinci_spi_platform_data - Platform data for SPI master device on DaVinci
- *
- * @version: version of the SPI IP. Different DaVinci devices have slightly
- * varying versions of the same IP.
- * @num_chipselect: number of chipselects supported by this SPI master
- * @intr_line: interrupt line used to connect the SPI IP to the ARM interrupt
- * controller withn the SoC. Possible values are 0 and 1.
- * @cshold_bug: set this to true if the SPI controller on your chip requires
- * a write to CSHOLD bit in between transfers (like in DM355).
- * @dma_event_q: DMA event queue to use if SPI_IO_TYPE_DMA is used for any
- * device on the bus.
- */
-struct davinci_spi_platform_data {
- u8 version;
- u8 num_chipselect;
- u8 intr_line;
- u8 prescaler_limit;
- bool cshold_bug;
- enum dma_event_q dma_event_q;
-};
-
-/**
- * davinci_spi_config - Per-chip-select configuration for SPI slave devices
- *
- * @wdelay: amount of delay between transmissions. Measured in number of
- * SPI module clocks.
- * @odd_parity: polarity of parity flag at the end of transmit data stream.
- * 0 - odd parity, 1 - even parity.
- * @parity_enable: enable transmission of parity at end of each transmit
- * data stream.
- * @io_type: type of IO transfer. Choose between polled, interrupt and DMA.
- * @timer_disable: disable chip-select timers (setup and hold)
- * @c2tdelay: chip-select setup time. Measured in number of SPI module clocks.
- * @t2cdelay: chip-select hold time. Measured in number of SPI module clocks.
- * @t2edelay: transmit data finished to SPI ENAn pin inactive time. Measured
- * in number of SPI clocks.
- * @c2edelay: chip-select active to SPI ENAn signal active time. Measured in
- * number of SPI clocks.
- */
-struct davinci_spi_config {
- u8 wdelay;
- u8 odd_parity;
- u8 parity_enable;
-#define SPI_IO_TYPE_INTR 0
-#define SPI_IO_TYPE_POLL 1
-#define SPI_IO_TYPE_DMA 2
- u8 io_type;
- u8 timer_disable;
- u8 c2tdelay;
- u8 t2cdelay;
- u8 t2edelay;
- u8 c2edelay;
-};
-
-#endif /* __ARCH_ARM_DAVINCI_SPI_H */
diff --git a/include/linux/platform_data/spi-ep93xx.h b/include/linux/platform_data/spi-ep93xx.h
deleted file mode 100644
index b439f2a896e0..000000000000
--- a/include/linux/platform_data/spi-ep93xx.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_MACH_EP93XX_SPI_H
-#define __ASM_MACH_EP93XX_SPI_H
-
-struct spi_device;
-
-/**
- * struct ep93xx_spi_info - EP93xx specific SPI descriptor
- * @use_dma: use DMA for the transfers
- */
-struct ep93xx_spi_info {
- bool use_dma;
-};
-
-#endif /* __ASM_MACH_EP93XX_SPI_H */
diff --git a/include/linux/platform_data/spi-mt65xx.h b/include/linux/platform_data/spi-mt65xx.h
index 65fd5ffd257c..f0db674f07b8 100644
--- a/include/linux/platform_data/spi-mt65xx.h
+++ b/include/linux/platform_data/spi-mt65xx.h
@@ -12,5 +12,6 @@
/* Board specific platform_data */
struct mtk_chip_config {
u32 sample_sel;
+ u32 tick_delay;
};
#endif
diff --git a/include/linux/platform_data/spi-omap2-mcspi.h b/include/linux/platform_data/spi-omap2-mcspi.h
index 3b400b1919a9..9e3c15b4ac91 100644
--- a/include/linux/platform_data/spi-omap2-mcspi.h
+++ b/include/linux/platform_data/spi-omap2-mcspi.h
@@ -16,9 +16,6 @@ struct omap2_mcspi_platform_config {
struct omap2_mcspi_device_config {
unsigned turbo_mode:1;
-
- /* toggle chip select after every word */
- unsigned cs_per_word:1;
};
#endif
diff --git a/include/linux/platform_data/spi-s3c64xx.h b/include/linux/platform_data/spi-s3c64xx.h
index 773daf7915a3..1d6e6c424fc6 100644
--- a/include/linux/platform_data/spi-s3c64xx.h
+++ b/include/linux/platform_data/spi-s3c64xx.h
@@ -16,7 +16,6 @@ struct platform_device;
* struct s3c64xx_spi_csinfo - ChipSelect description
* @fb_delay: Slave specific feedback delay.
* Refer to FB_CLK_SEL register definition in SPI chapter.
- * @line: Custom 'identity' of the CS line.
*
* This is per SPI-Slave Chipselect information.
* Allocate and initialize one in machine init code and make the
@@ -24,45 +23,36 @@ struct platform_device;
*/
struct s3c64xx_spi_csinfo {
u8 fb_delay;
- unsigned line;
};
/**
* struct s3c64xx_spi_info - SPI Controller defining structure
* @src_clk_nr: Clock source index for the CLK_CFG[SPI_CLKSEL] field.
* @num_cs: Number of CS this controller emulates.
+ * @no_cs: Used when CS line is not connected.
* @cfg_gpio: Configure pins for this SPI controller.
*/
struct s3c64xx_spi_info {
int src_clk_nr;
int num_cs;
bool no_cs;
+ bool polling;
int (*cfg_gpio)(void);
};
/**
* s3c64xx_spi_set_platdata - SPI Controller configure callback by the board
* initialization code.
- * @cfg_gpio: Pointer to gpio setup function.
* @src_clk_nr: Clock the SPI controller is to use to generate SPI clocks.
* @num_cs: Number of elements in the 'cs' array.
*
* Call this from machine init code for each SPI Controller that
* has some chips attached to it.
*/
-extern void s3c64xx_spi0_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
- int num_cs);
-extern void s3c64xx_spi1_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
- int num_cs);
-extern void s3c64xx_spi2_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
- int num_cs);
+extern void s3c64xx_spi0_set_platdata(int src_clk_nr, int num_cs);
/* defined by architecture to configure gpio */
extern int s3c64xx_spi0_cfg_gpio(void);
-extern int s3c64xx_spi1_cfg_gpio(void);
-extern int s3c64xx_spi2_cfg_gpio(void);
extern struct s3c64xx_spi_info s3c64xx_spi0_pdata;
-extern struct s3c64xx_spi_info s3c64xx_spi1_pdata;
-extern struct s3c64xx_spi_info s3c64xx_spi2_pdata;
#endif /*__SPI_S3C64XX_H */
diff --git a/include/linux/platform_data/ssm2518.h b/include/linux/platform_data/ssm2518.h
deleted file mode 100644
index 3f9e632d6f63..000000000000
--- a/include/linux/platform_data/ssm2518.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * SSM2518 amplifier audio driver
- *
- * Copyright 2013 Analog Devices Inc.
- * Author: Lars-Peter Clausen <lars@metafoo.de>
- */
-
-#ifndef __LINUX_PLATFORM_DATA_SSM2518_H__
-#define __LINUX_PLATFORM_DATA_SSM2518_H__
-
-/**
- * struct ssm2518_platform_data - Platform data for the ssm2518 driver
- * @enable_gpio: GPIO connected to the nSD pin. Set to -1 if the nSD pin is
- * hardwired.
- */
-struct ssm2518_platform_data {
- int enable_gpio;
-};
-
-#endif
diff --git a/include/linux/platform_data/st33zp24.h b/include/linux/platform_data/st33zp24.h
deleted file mode 100644
index 61db674f36cc..000000000000
--- a/include/linux/platform_data/st33zp24.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * STMicroelectronics TPM Linux driver for TPM 1.2 ST33ZP24
- * Copyright (C) 2009 - 2016 STMicroelectronics
- */
-#ifndef __ST33ZP24_H__
-#define __ST33ZP24_H__
-
-#define TPM_ST33_I2C "st33zp24-i2c"
-#define TPM_ST33_SPI "st33zp24-spi"
-
-struct st33zp24_platform_data {
- int io_lpcpd;
-};
-
-#endif /* __ST33ZP24_H__ */
diff --git a/include/linux/platform_data/st_sensors_pdata.h b/include/linux/platform_data/st_sensors_pdata.h
index e40b28ca892e..a657830232ae 100644
--- a/include/linux/platform_data/st_sensors_pdata.h
+++ b/include/linux/platform_data/st_sensors_pdata.h
@@ -13,8 +13,9 @@
/**
* struct st_sensors_platform_data - Platform data for the ST sensors
* @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2).
- * Available only for accelerometer and pressure sensors.
+ * Available only for accelerometer, magnetometer and pressure sensors.
* Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet).
+ * Magnetometer DRDY is supported only on LSM9DS0 and LSM303D.
* @open_drain: set the interrupt line to be open drain if possible.
* @spi_3wire: enable spi-3wire mode.
* @pullups: enable/disable i2c controller pullup resistors.
diff --git a/include/linux/platform_data/syscon.h b/include/linux/platform_data/syscon.h
deleted file mode 100644
index 2c089dd3e2bd..000000000000
--- a/include/linux/platform_data/syscon.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef PLATFORM_DATA_SYSCON_H
-#define PLATFORM_DATA_SYSCON_H
-
-struct syscon_platform_data {
- const char *label;
-};
-
-#endif
diff --git a/include/linux/platform_data/ti-aemif.h b/include/linux/platform_data/ti-aemif.h
deleted file mode 100644
index 77625251df07..000000000000
--- a/include/linux/platform_data/ti-aemif.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * TI DaVinci AEMIF platform glue.
- *
- * Copyright (C) 2017 BayLibre SAS
- *
- * Author:
- * Bartosz Golaszewski <bgolaszewski@baylibre.com>
- */
-
-#ifndef __TI_DAVINCI_AEMIF_DATA_H__
-#define __TI_DAVINCI_AEMIF_DATA_H__
-
-#include <linux/of_platform.h>
-
-/**
- * struct aemif_abus_data - Async bus configuration parameters.
- *
- * @cs - Chip-select number.
- */
-struct aemif_abus_data {
- u32 cs;
-};
-
-/**
- * struct aemif_platform_data - Data to set up the TI aemif driver.
- *
- * @dev_lookup: of_dev_auxdata passed to of_platform_populate() for aemif
- * subdevices.
- * @cs_offset: Lowest allowed chip-select number.
- * @abus_data: Array of async bus configuration entries.
- * @num_abus_data: Number of abus entries.
- * @sub_devices: Array of platform subdevices.
- * @num_sub_devices: Number of subdevices.
- */
-struct aemif_platform_data {
- struct of_dev_auxdata *dev_lookup;
- u32 cs_offset;
- struct aemif_abus_data *abus_data;
- size_t num_abus_data;
- struct platform_device *sub_devices;
- size_t num_sub_devices;
-};
-
-#endif /* __TI_DAVINCI_AEMIF_DATA_H__ */
diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h
index fafc1beea504..d8f15770a522 100644
--- a/include/linux/platform_data/ti-sysc.h
+++ b/include/linux/platform_data/ti-sysc.h
@@ -50,6 +50,10 @@ struct sysc_regbits {
s8 emufree_shift;
};
+#define SYSC_MODULE_QUIRK_OTG BIT(30)
+#define SYSC_QUIRK_RESET_ON_CTX_LOST BIT(29)
+#define SYSC_QUIRK_REINIT_ON_CTX_LOST BIT(28)
+#define SYSC_QUIRK_REINIT_ON_RESUME BIT(27)
#define SYSC_QUIRK_GPMC_DEBUG BIT(26)
#define SYSC_MODULE_QUIRK_ENA_RESETDONE BIT(25)
#define SYSC_MODULE_QUIRK_PRUSS BIT(24)
@@ -67,7 +71,6 @@ struct sysc_regbits {
#define SYSC_QUIRK_SWSUP_SIDLE_ACT BIT(12)
#define SYSC_QUIRK_SWSUP_SIDLE BIT(11)
#define SYSC_QUIRK_EXT_OPT_CLOCK BIT(10)
-#define SYSC_QUIRK_LEGACY_IDLE BIT(9)
#define SYSC_QUIRK_RESET_STATUS BIT(8)
#define SYSC_QUIRK_NO_IDLE BIT(7)
#define SYSC_QUIRK_NO_IDLE_ON_INIT BIT(6)
diff --git a/include/linux/platform_data/timer-ixp4xx.h b/include/linux/platform_data/timer-ixp4xx.h
deleted file mode 100644
index ee92ae7edaed..000000000000
--- a/include/linux/platform_data/timer-ixp4xx.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __TIMER_IXP4XX_H
-#define __TIMER_IXP4XX_H
-
-#include <linux/ioport.h>
-
-void __init ixp4xx_timer_setup(resource_size_t timerbase,
- int timer_irq,
- unsigned int timer_freq);
-
-#endif
diff --git a/include/linux/platform_data/tmio.h b/include/linux/platform_data/tmio.h
new file mode 100644
index 000000000000..426291713b83
--- /dev/null
+++ b/include/linux/platform_data/tmio.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef MFD_TMIO_H
+#define MFD_TMIO_H
+
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+/* TMIO MMC platform flags */
+
+/*
+ * Some controllers can support a 2-byte block size when the bus width is
+ * configured in 4-bit mode.
+ */
+#define TMIO_MMC_BLKSZ_2BYTES BIT(1)
+
+/* Some controllers can support SDIO IRQ signalling */
+#define TMIO_MMC_SDIO_IRQ BIT(2)
+
+/* Some features are only available or tested on R-Car Gen2 or later */
+#define TMIO_MMC_MIN_RCAR2 BIT(3)
+
+/*
+ * Some controllers require waiting for the SD bus to become idle before
+ * writing to some registers.
+ */
+#define TMIO_MMC_HAS_IDLE_WAIT BIT(4)
+
+/*
+ * Use the busy timeout feature. Probably all TMIO versions support it. Yet,
+ * we don't have documentation for old variants, so we enable only known good
+ * variants with this flag. Can be removed once all variants are known good.
+ */
+#define TMIO_MMC_USE_BUSY_TIMEOUT BIT(5)
+
+/* Some controllers have CMD12 automatically issue/non-issue register */
+#define TMIO_MMC_HAVE_CMD12_CTRL BIT(7)
+
+/* Controller has some SDIO status bits which must be 1 */
+#define TMIO_MMC_SDIO_STATUS_SETBITS BIT(8)
+
+/* Some controllers have a 32-bit wide data port register */
+#define TMIO_MMC_32BIT_DATA_PORT BIT(9)
+
+/* Some controllers allows to set SDx actual clock */
+#define TMIO_MMC_CLK_ACTUAL BIT(10)
+
+/* Some controllers have a CBSY bit */
+#define TMIO_MMC_HAVE_CBSY BIT(11)
+
+/* Some controllers have a 64-bit wide data port register */
+#define TMIO_MMC_64BIT_DATA_PORT BIT(12)
+
+struct tmio_mmc_data {
+ void *chan_priv_tx;
+ void *chan_priv_rx;
+ unsigned int hclk;
+ unsigned long capabilities;
+ unsigned long capabilities2;
+ unsigned long flags;
+ u32 ocr_mask; /* available voltages */
+ dma_addr_t dma_rx_offset;
+ unsigned int max_blk_count;
+ unsigned short max_segs;
+};
+#endif
diff --git a/include/linux/platform_data/touchscreen-s3c2410.h b/include/linux/platform_data/touchscreen-s3c2410.h
deleted file mode 100644
index bf8d3b9d7c6a..000000000000
--- a/include/linux/platform_data/touchscreen-s3c2410.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2005 Arnaud Patard <arnaud.patard@rtp-net.org>
-*/
-
-#ifndef __TOUCHSCREEN_S3C2410_H
-#define __TOUCHSCREEN_S3C2410_H
-
-struct s3c2410_ts_mach_info {
- int delay;
- int presc;
- int oversampling_shift;
- void (*cfg_gpio)(struct platform_device *dev);
-};
-
-extern void s3c24xx_ts_set_platdata(struct s3c2410_ts_mach_info *);
-extern void s3c64xx_ts_set_platdata(struct s3c2410_ts_mach_info *);
-
-/* defined by architecture to configure gpio */
-extern void s3c24xx_ts_cfg_gpio(struct platform_device *dev);
-
-#endif /*__TOUCHSCREEN_S3C2410_H */
diff --git a/include/linux/platform_data/tps68470.h b/include/linux/platform_data/tps68470.h
new file mode 100644
index 000000000000..e605a2cab07f
--- /dev/null
+++ b/include/linux/platform_data/tps68470.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * TI TPS68470 PMIC platform data definition.
+ *
+ * Copyright (c) 2021 Red Hat Inc.
+ *
+ * Red Hat authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+#ifndef __PDATA_TPS68470_H
+#define __PDATA_TPS68470_H
+
+enum tps68470_regulators {
+ TPS68470_CORE,
+ TPS68470_ANA,
+ TPS68470_VCM,
+ TPS68470_VIO,
+ TPS68470_VSIO,
+ TPS68470_AUX1,
+ TPS68470_AUX2,
+ TPS68470_NUM_REGULATORS
+};
+
+struct regulator_init_data;
+
+struct tps68470_regulator_platform_data {
+ const struct regulator_init_data *reg_init_data[TPS68470_NUM_REGULATORS];
+};
+
+struct tps68470_clk_consumer {
+ const char *consumer_dev_name;
+ const char *consumer_con_id;
+};
+
+struct tps68470_clk_platform_data {
+ unsigned int n_consumers;
+ struct tps68470_clk_consumer consumers[];
+};
+
+#endif
diff --git a/include/linux/platform_data/tsl2563.h b/include/linux/platform_data/tsl2563.h
deleted file mode 100644
index 9cf9309c3f24..000000000000
--- a/include/linux/platform_data/tsl2563.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_TSL2563_H
-#define __LINUX_TSL2563_H
-
-struct tsl2563_platform_data {
- int cover_comp_gain;
-};
-
-#endif /* __LINUX_TSL2563_H */
diff --git a/include/linux/platform_data/uio_dmem_genirq.h b/include/linux/platform_data/uio_dmem_genirq.h
index 973c1bb32168..c8f6de685306 100644
--- a/include/linux/platform_data/uio_dmem_genirq.h
+++ b/include/linux/platform_data/uio_dmem_genirq.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* include/linux/platform_data/uio_dmem_genirq.h
*
* Copyright (C) 2012 Damian Hobson-Garcia
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _UIO_DMEM_GENIRQ_H
diff --git a/include/linux/platform_data/uio_pruss.h b/include/linux/platform_data/uio_pruss.h
deleted file mode 100644
index 31f2e22661bc..000000000000
--- a/include/linux/platform_data/uio_pruss.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * include/linux/platform_data/uio_pruss.h
- *
- * Platform data for uio_pruss driver
- *
- * Copyright (C) 2010-11 Texas Instruments Incorporated - https://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _UIO_PRUSS_H_
-#define _UIO_PRUSS_H_
-
-/* To configure the PRUSS INTC base offset for UIO driver */
-struct uio_pruss_pdata {
- u32 pintc_base;
- struct gen_pool *sram_pool;
-};
-#endif /* _UIO_PRUSS_H_ */
diff --git a/include/linux/platform_data/usb-davinci.h b/include/linux/platform_data/usb-davinci.h
deleted file mode 100644
index 879f5c78b91a..000000000000
--- a/include/linux/platform_data/usb-davinci.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * USB related definitions
- *
- * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef __ASM_ARCH_USB_H
-#define __ASM_ARCH_USB_H
-
-/* Passed as the platform data to the OHCI driver */
-struct da8xx_ohci_root_hub {
- /* Time from power on to power good (in 2 ms units) */
- u8 potpgt;
-};
-
-void davinci_setup_usb(unsigned mA, unsigned potpgt_ms);
-
-#endif /* ifndef __ASM_ARCH_USB_H */
diff --git a/include/linux/platform_data/usb-omap.h b/include/linux/platform_data/usb-omap.h
index 5e70d667031c..580978e468f8 100644
--- a/include/linux/platform_data/usb-omap.h
+++ b/include/linux/platform_data/usb-omap.h
@@ -1,22 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* usb-omap.h - Platform data for the various OMAP USB IPs
*
* Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com
- *
- * This software is distributed under the terms of the GNU General Public
- * License ("GPL") version 2, as published by the Free Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#define OMAP3_HS_USB_PORTS 3
diff --git a/include/linux/platform_data/usb-omap1.h b/include/linux/platform_data/usb-omap1.h
index 43b5ce139c37..e7b8dc92a269 100644
--- a/include/linux/platform_data/usb-omap1.h
+++ b/include/linux/platform_data/usb-omap1.h
@@ -48,6 +48,10 @@ struct omap_usb_config {
u32 (*usb2_init)(unsigned nwires, unsigned alt_pingroup);
int (*ocpi_enable)(void);
+
+ void (*lb_reset)(void);
+
+ int (*transceiver_power)(int on);
};
#endif /* __LINUX_USB_OMAP1_H */
diff --git a/include/linux/platform_data/usb-pxa3xx-ulpi.h b/include/linux/platform_data/usb-pxa3xx-ulpi.h
deleted file mode 100644
index 4d31a5cbdeb1..000000000000
--- a/include/linux/platform_data/usb-pxa3xx-ulpi.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * PXA3xx U2D header
- *
- * Copyright (C) 2010 CompuLab Ltd.
- *
- * Igor Grinberg <grinberg@compulab.co.il>
- */
-#ifndef __PXA310_U2D__
-#define __PXA310_U2D__
-
-#include <linux/usb/ulpi.h>
-
-struct pxa3xx_u2d_platform_data {
-
-#define ULPI_SER_6PIN (1 << 0)
-#define ULPI_SER_3PIN (1 << 1)
- unsigned int ulpi_mode;
-
- int (*init)(struct device *);
- void (*exit)(struct device *);
-};
-
-
-/* Start PXA3xx U2D host */
-int pxa3xx_u2d_start_hc(struct usb_bus *host);
-/* Stop PXA3xx U2D host */
-void pxa3xx_u2d_stop_hc(struct usb_bus *host);
-
-extern void pxa3xx_set_u2d_info(struct pxa3xx_u2d_platform_data *info);
-
-#endif /* __PXA310_U2D__ */
diff --git a/include/linux/platform_data/usb-s3c2410_udc.h b/include/linux/platform_data/usb-s3c2410_udc.h
deleted file mode 100644
index 07394819d03b..000000000000
--- a/include/linux/platform_data/usb-s3c2410_udc.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* arch/arm/plat-samsung/include/plat/udc.h
- *
- * Copyright (c) 2005 Arnaud Patard <arnaud.patard@rtp-net.org>
- *
- * Changelog:
- * 14-Mar-2005 RTP Created file
- * 02-Aug-2005 RTP File rename
- * 07-Sep-2005 BJD Minor cleanups, changed cmd to enum
- * 18-Jan-2007 HMW Add per-platform vbus_draw function
-*/
-
-#ifndef __ASM_ARM_ARCH_UDC_H
-#define __ASM_ARM_ARCH_UDC_H
-
-enum s3c2410_udc_cmd_e {
- S3C2410_UDC_P_ENABLE = 1, /* Pull-up enable */
- S3C2410_UDC_P_DISABLE = 2, /* Pull-up disable */
- S3C2410_UDC_P_RESET = 3, /* UDC reset, in case of */
-};
-
-struct s3c2410_udc_mach_info {
- void (*udc_command)(enum s3c2410_udc_cmd_e);
- void (*vbus_draw)(unsigned int ma);
-
- unsigned int pullup_pin;
- unsigned int pullup_pin_inverted;
-
- unsigned int vbus_pin;
- unsigned char vbus_pin_inverted;
-};
-
-extern void __init s3c24xx_udc_set_platdata(struct s3c2410_udc_mach_info *);
-
-struct s3c24xx_hsudc_platdata;
-
-extern void __init s3c24xx_hsudc_set_platdata(struct s3c24xx_hsudc_platdata *pd);
-
-#endif /* __ASM_ARM_ARCH_UDC_H */
diff --git a/include/linux/platform_data/usb3503.h b/include/linux/platform_data/usb3503.h
index d01ef97ddf36..f3c942f396f8 100644
--- a/include/linux/platform_data/usb3503.h
+++ b/include/linux/platform_data/usb3503.h
@@ -12,6 +12,7 @@ enum usb3503_mode {
USB3503_MODE_UNKNOWN,
USB3503_MODE_HUB,
USB3503_MODE_STANDBY,
+ USB3503_MODE_BYPASS,
};
struct usb3503_platform_data {
diff --git a/include/linux/platform_data/ux500_wdt.h b/include/linux/platform_data/ux500_wdt.h
deleted file mode 100644
index de6a4ad41e76..000000000000
--- a/include/linux/platform_data/ux500_wdt.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST Ericsson SA 2011
- *
- * STE Ux500 Watchdog platform data
- */
-#ifndef __UX500_WDT_H
-#define __UX500_WDT_H
-
-/**
- * struct ux500_wdt_data
- */
-struct ux500_wdt_data {
- unsigned int timeout;
- bool has_28_bits_resolution;
-};
-
-#endif /* __UX500_WDT_H */
diff --git a/include/linux/platform_data/video-imxfb.h b/include/linux/platform_data/video-imxfb.h
deleted file mode 100644
index 02812651af7d..000000000000
--- a/include/linux/platform_data/video-imxfb.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This structure describes the machine which we are running on.
- */
-#ifndef __MACH_IMXFB_H__
-#define __MACH_IMXFB_H__
-
-#include <linux/fb.h>
-
-#define PCR_TFT (1 << 31)
-#define PCR_COLOR (1 << 30)
-#define PCR_PBSIZ_1 (0 << 28)
-#define PCR_PBSIZ_2 (1 << 28)
-#define PCR_PBSIZ_4 (2 << 28)
-#define PCR_PBSIZ_8 (3 << 28)
-#define PCR_BPIX_1 (0 << 25)
-#define PCR_BPIX_2 (1 << 25)
-#define PCR_BPIX_4 (2 << 25)
-#define PCR_BPIX_8 (3 << 25)
-#define PCR_BPIX_12 (4 << 25)
-#define PCR_BPIX_16 (5 << 25)
-#define PCR_BPIX_18 (6 << 25)
-#define PCR_PIXPOL (1 << 24)
-#define PCR_FLMPOL (1 << 23)
-#define PCR_LPPOL (1 << 22)
-#define PCR_CLKPOL (1 << 21)
-#define PCR_OEPOL (1 << 20)
-#define PCR_SCLKIDLE (1 << 19)
-#define PCR_END_SEL (1 << 18)
-#define PCR_END_BYTE_SWAP (1 << 17)
-#define PCR_REV_VS (1 << 16)
-#define PCR_ACD_SEL (1 << 15)
-#define PCR_ACD(x) (((x) & 0x7f) << 8)
-#define PCR_SCLK_SEL (1 << 7)
-#define PCR_SHARP (1 << 6)
-#define PCR_PCD(x) ((x) & 0x3f)
-
-#define PWMR_CLS(x) (((x) & 0x1ff) << 16)
-#define PWMR_LDMSK (1 << 15)
-#define PWMR_SCR1 (1 << 10)
-#define PWMR_SCR0 (1 << 9)
-#define PWMR_CC_EN (1 << 8)
-#define PWMR_PW(x) ((x) & 0xff)
-
-#define LSCR1_PS_RISE_DELAY(x) (((x) & 0x7f) << 26)
-#define LSCR1_CLS_RISE_DELAY(x) (((x) & 0x3f) << 16)
-#define LSCR1_REV_TOGGLE_DELAY(x) (((x) & 0xf) << 8)
-#define LSCR1_GRAY2(x) (((x) & 0xf) << 4)
-#define LSCR1_GRAY1(x) (((x) & 0xf))
-
-struct imx_fb_videomode {
- struct fb_videomode mode;
- u32 pcr;
- bool aus_mode;
- unsigned char bpp;
-};
-
-struct imx_fb_platform_data {
- struct imx_fb_videomode *mode;
- int num_modes;
-
- u_int pwmr;
- u_int lscr1;
- u_int dmacr;
-
- int (*init)(struct platform_device *);
- void (*exit)(struct platform_device *);
-};
-
-#endif /* ifndef __MACH_IMXFB_H__ */
diff --git a/include/linux/platform_data/video-mx3fb.h b/include/linux/platform_data/video-mx3fb.h
deleted file mode 100644
index d03dc322a616..000000000000
--- a/include/linux/platform_data/video-mx3fb.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2008
- * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
- */
-
-#ifndef __ASM_ARCH_MX3FB_H__
-#define __ASM_ARCH_MX3FB_H__
-
-#include <linux/device.h>
-#include <linux/fb.h>
-
-/* Proprietary FB_SYNC_ flags */
-#define FB_SYNC_OE_ACT_HIGH 0x80000000
-#define FB_SYNC_CLK_INVERT 0x40000000
-#define FB_SYNC_DATA_INVERT 0x20000000
-#define FB_SYNC_CLK_IDLE_EN 0x10000000
-#define FB_SYNC_SHARP_MODE 0x08000000
-#define FB_SYNC_SWAP_RGB 0x04000000
-#define FB_SYNC_CLK_SEL_EN 0x02000000
-
-/*
- * Specify the way your display is connected. The IPU can arbitrarily
- * map the internal colors to the external data lines. We only support
- * the following mappings at the moment.
- */
-enum disp_data_mapping {
- /* blue -> d[0..5], green -> d[6..11], red -> d[12..17] */
- IPU_DISP_DATA_MAPPING_RGB666,
- /* blue -> d[0..4], green -> d[5..10], red -> d[11..15] */
- IPU_DISP_DATA_MAPPING_RGB565,
- /* blue -> d[0..7], green -> d[8..15], red -> d[16..23] */
- IPU_DISP_DATA_MAPPING_RGB888,
-};
-
-/**
- * struct mx3fb_platform_data - mx3fb platform data
- *
- * @dma_dev: pointer to the dma-device, used for dma-slave connection
- * @mode: pointer to a platform-provided per mxc_register_fb() videomode
- */
-struct mx3fb_platform_data {
- struct device *dma_dev;
- const char *name;
- const struct fb_videomode *mode;
- int num_modes;
- enum disp_data_mapping disp_data_fmt;
-};
-
-#endif
diff --git a/include/linux/platform_data/video-pxafb.h b/include/linux/platform_data/video-pxafb.h
index b3d574778326..38c24c77ba43 100644
--- a/include/linux/platform_data/video-pxafb.h
+++ b/include/linux/platform_data/video-pxafb.h
@@ -8,7 +8,6 @@
*/
#include <linux/fb.h>
-#include <mach/regs-lcd.h>
/*
* Supported LCD connections
@@ -151,7 +150,27 @@ struct pxafb_mach_info {
};
void pxa_set_fb_info(struct device *, struct pxafb_mach_info *);
-unsigned long pxafb_get_hsync_time(struct device *dev);
+
+/* smartpanel related */
+#define SMART_CMD_A0 (0x1 << 8)
+#define SMART_CMD_READ_STATUS_REG (0x0 << 9)
+#define SMART_CMD_READ_FRAME_BUFFER ((0x0 << 9) | SMART_CMD_A0)
+#define SMART_CMD_WRITE_COMMAND (0x1 << 9)
+#define SMART_CMD_WRITE_DATA ((0x1 << 9) | SMART_CMD_A0)
+#define SMART_CMD_WRITE_FRAME ((0x2 << 9) | SMART_CMD_A0)
+#define SMART_CMD_WAIT_FOR_VSYNC (0x3 << 9)
+#define SMART_CMD_NOOP (0x4 << 9)
+#define SMART_CMD_INTERRUPT (0x5 << 9)
+
+#define SMART_CMD(x) (SMART_CMD_WRITE_COMMAND | ((x) & 0xff))
+#define SMART_DAT(x) (SMART_CMD_WRITE_DATA | ((x) & 0xff))
+
+/* SMART_DELAY() is introduced for software controlled delay primitive which
+ * can be inserted between command sequences, unused command 0x6 is used here
+ * and delay ranges from 0ms ~ 255ms
+ */
+#define SMART_CMD_DELAY (0x6 << 9)
+#define SMART_DELAY(ms) (SMART_CMD_DELAY | ((ms) & 0xff))
#ifdef CONFIG_FB_PXA_SMARTPANEL
extern int pxafb_smart_queue(struct fb_info *info, uint16_t *cmds, int);
diff --git a/include/linux/platform_data/voltage-omap.h b/include/linux/platform_data/voltage-omap.h
index 43e8da9fb447..6d74e507dbd2 100644
--- a/include/linux/platform_data/voltage-omap.h
+++ b/include/linux/platform_data/voltage-omap.h
@@ -29,7 +29,6 @@ struct omap_volt_data {
struct voltagedomain;
struct voltagedomain *voltdm_lookup(const char *name);
-int voltdm_scale(struct voltagedomain *voltdm, unsigned long target_volt);
unsigned long voltdm_get_voltage(struct voltagedomain *voltdm);
struct omap_volt_data *omap_voltage_get_voltdata(struct voltagedomain *voltdm,
unsigned long volt);
diff --git a/include/linux/platform_data/wan_ixp4xx_hss.h b/include/linux/platform_data/wan_ixp4xx_hss.h
deleted file mode 100644
index d525a0feb9e1..000000000000
--- a/include/linux/platform_data/wan_ixp4xx_hss.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __PLATFORM_DATA_WAN_IXP4XX_HSS_H
-#define __PLATFORM_DATA_WAN_IXP4XX_HSS_H
-
-#include <linux/types.h>
-
-/* Information about built-in HSS (synchronous serial) interfaces */
-struct hss_plat_info {
- int (*set_clock)(int port, unsigned int clock_type);
- int (*open)(int port, void *pdev,
- void (*set_carrier_cb)(void *pdev, int carrier));
- void (*close)(int port, void *pdev);
- u8 txreadyq;
- u32 timer_freq;
-};
-
-#endif
diff --git a/include/linux/platform_data/x86/amd-fch.h b/include/linux/platform_data/x86/amd-fch.h
new file mode 100644
index 000000000000..2cf5153edbc2
--- /dev/null
+++ b/include/linux/platform_data/x86/amd-fch.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_AMD_FCH_H_
+#define _ASM_X86_AMD_FCH_H_
+
+#define FCH_PM_BASE 0xFED80300
+
+/* Register offsets from PM base: */
+#define FCH_PM_DECODEEN 0x00
+#define FCH_PM_DECODEEN_SMBUS0SEL GENMASK(20, 19)
+#define FCH_PM_SCRATCH 0x80
+#define FCH_PM_S5_RESET_STATUS 0xC0
+
+#endif /* _ASM_X86_AMD_FCH_H_ */
diff --git a/include/linux/platform_data/x86/asus-wmi-leds-ids.h b/include/linux/platform_data/x86/asus-wmi-leds-ids.h
new file mode 100644
index 000000000000..034a039c4e37
--- /dev/null
+++ b/include/linux/platform_data/x86/asus-wmi-leds-ids.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PLATFORM_DATA_X86_ASUS_WMI_LEDS_IDS_H
+#define __PLATFORM_DATA_X86_ASUS_WMI_LEDS_IDS_H
+
+#include <linux/dmi.h>
+#include <linux/types.h>
+
+/* To be used by both hid-asus and asus-wmi to determine which controls kbd_brightness */
+#if IS_REACHABLE(CONFIG_ASUS_WMI) || IS_REACHABLE(CONFIG_HID_ASUS)
+static const struct dmi_system_id asus_use_hid_led_dmi_ids[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "ROG Zephyrus"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "ROG Strix"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "ROG Flow"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "ProArt P16"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GA403U"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GU605M"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "RC71L"),
+ },
+ },
+ { },
+};
+#endif
+
+#endif /* __PLATFORM_DATA_X86_ASUS_WMI_LEDS_IDS_H */
diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
index 2f274cf52805..419491d4abca 100644
--- a/include/linux/platform_data/x86/asus-wmi.h
+++ b/include/linux/platform_data/x86/asus-wmi.h
@@ -5,6 +5,9 @@
#include <linux/errno.h>
#include <linux/types.h>
+#define ASUS_WMI_MGMT_GUID "97845ED0-4E6D-11DE-8A39-0800200C9A66"
+#define ASUS_ACPI_UID_ASUSWMI "ASUSWMI"
+
/* WMI Methods */
#define ASUS_WMI_METHODID_SPEC 0x43455053 /* BIOS SPECification */
#define ASUS_WMI_METHODID_SFBD 0x44424653 /* Set First Boot Device */
@@ -49,6 +52,11 @@
#define ASUS_WMI_DEVID_LED4 0x00020014
#define ASUS_WMI_DEVID_LED5 0x00020015
#define ASUS_WMI_DEVID_LED6 0x00020016
+#define ASUS_WMI_DEVID_MICMUTE_LED 0x00040017
+
+/* Disable Camera LED */
+#define ASUS_WMI_DEVID_CAMERA_LED_NEG 0x00060078 /* 0 = on (unused) */
+#define ASUS_WMI_DEVID_CAMERA_LED 0x00060079 /* 1 = on */
/* Backlight and Brightness */
#define ASUS_WMI_DEVID_ALS_ENABLE 0x00050001 /* Ambient Light Sensor */
@@ -57,12 +65,24 @@
#define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021
#define ASUS_WMI_DEVID_LIGHT_SENSOR 0x00050022 /* ?? */
#define ASUS_WMI_DEVID_LIGHTBAR 0x00050025
+#define ASUS_WMI_DEVID_OOBE 0x0005002F
+/* This can only be used to disable the screen, not re-enable */
+#define ASUS_WMI_DEVID_SCREENPAD_POWER 0x00050031
+/* Writing a brightness re-enables the screen if disabled */
+#define ASUS_WMI_DEVID_SCREENPAD_LIGHT 0x00050032
#define ASUS_WMI_DEVID_FAN_BOOST_MODE 0x00110018
#define ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY 0x00120075
+#define ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY_VIVO 0x00110019
/* Misc */
+#define ASUS_WMI_DEVID_PANEL_HD 0x0005001C
+#define ASUS_WMI_DEVID_PANEL_OD 0x00050019
#define ASUS_WMI_DEVID_CAMERA 0x00060013
#define ASUS_WMI_DEVID_LID_FLIP 0x00060062
+#define ASUS_WMI_DEVID_LID_FLIP_ROG 0x00060077
+#define ASUS_WMI_DEVID_MINI_LED_MODE 0x0005001E
+#define ASUS_WMI_DEVID_MINI_LED_MODE2 0x0005002E
+#define ASUS_WMI_DEVID_SCREEN_AUTO_BRIGHTNESS 0x0005002A
/* Storage */
#define ASUS_WMI_DEVID_CARDREADER 0x00080013
@@ -76,6 +96,20 @@
#define ASUS_WMI_DEVID_THERMAL_CTRL 0x00110011
#define ASUS_WMI_DEVID_FAN_CTRL 0x00110012 /* deprecated */
#define ASUS_WMI_DEVID_CPU_FAN_CTRL 0x00110013
+#define ASUS_WMI_DEVID_GPU_FAN_CTRL 0x00110014
+#define ASUS_WMI_DEVID_MID_FAN_CTRL 0x00110031
+#define ASUS_WMI_DEVID_CPU_FAN_CURVE 0x00110024
+#define ASUS_WMI_DEVID_GPU_FAN_CURVE 0x00110025
+#define ASUS_WMI_DEVID_MID_FAN_CURVE 0x00110032
+
+/* Tunables for AUS ROG laptops */
+#define ASUS_WMI_DEVID_PPT_PL2_SPPT 0x001200A0
+#define ASUS_WMI_DEVID_PPT_PL1_SPL 0x001200A3
+#define ASUS_WMI_DEVID_PPT_APU_SPPT 0x001200B0
+#define ASUS_WMI_DEVID_PPT_PLAT_SPPT 0x001200B1
+#define ASUS_WMI_DEVID_PPT_PL3_FPPT 0x001200C1
+#define ASUS_WMI_DEVID_NV_DYN_BOOST 0x001200C0
+#define ASUS_WMI_DEVID_NV_THERM_TARGET 0x001200C2
/* Power */
#define ASUS_WMI_DEVID_PROCESSOR_STATE 0x00120012
@@ -89,6 +123,39 @@
/* Keyboard dock */
#define ASUS_WMI_DEVID_KBD_DOCK 0x00120063
+/* Charging mode - 1=Barrel, 2=USB */
+#define ASUS_WMI_DEVID_CHARGE_MODE 0x0012006C
+
+/* MCU powersave mode */
+#define ASUS_WMI_DEVID_MCU_POWERSAVE 0x001200E2
+
+/* epu is connected? 1 == true */
+#define ASUS_WMI_DEVID_EGPU_CONNECTED 0x00090018
+/* egpu on/off */
+#define ASUS_WMI_DEVID_EGPU 0x00090019
+
+/* dgpu on/off */
+#define ASUS_WMI_DEVID_DGPU 0x00090020
+
+#define ASUS_WMI_DEVID_APU_MEM 0x000600C1
+
+#define ASUS_WMI_DEVID_DGPU_BASE_TGP 0x00120099
+#define ASUS_WMI_DEVID_DGPU_SET_TGP 0x00120098
+
+/* gpu mux switch, 0 = dGPU, 1 = Optimus */
+#define ASUS_WMI_DEVID_GPU_MUX 0x00090016
+#define ASUS_WMI_DEVID_GPU_MUX_VIVO 0x00090026
+
+/* TUF laptop RGB modes/colours */
+#define ASUS_WMI_DEVID_TUF_RGB_MODE 0x00100056
+#define ASUS_WMI_DEVID_TUF_RGB_MODE2 0x0010005A
+
+/* TUF laptop RGB power/state */
+#define ASUS_WMI_DEVID_TUF_RGB_STATE 0x00100057
+
+/* Bootup sound control */
+#define ASUS_WMI_DEVID_BOOT_SOUND 0x00130022
+
/* DSTS masks */
#define ASUS_WMI_DSTS_STATUS_BIT 0x00000001
#define ASUS_WMI_DSTS_UNKNOWN_BIT 0x00000002
@@ -99,9 +166,33 @@
#define ASUS_WMI_DSTS_MAX_BRIGTH_MASK 0x0000FF00
#define ASUS_WMI_DSTS_LIGHTBAR_MASK 0x0000000F
+enum asus_ally_mcu_hack {
+ ASUS_WMI_ALLY_MCU_HACK_INIT,
+ ASUS_WMI_ALLY_MCU_HACK_ENABLED,
+ ASUS_WMI_ALLY_MCU_HACK_DISABLED,
+};
+
#if IS_REACHABLE(CONFIG_ASUS_WMI)
+void set_ally_mcu_hack(enum asus_ally_mcu_hack status);
+void set_ally_mcu_powersave(bool enabled);
+int asus_wmi_get_devstate_dsts(u32 dev_id, u32 *retval);
+int asus_wmi_set_devstate(u32 dev_id, u32 ctrl_param, u32 *retval);
int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1, u32 *retval);
#else
+static inline void set_ally_mcu_hack(enum asus_ally_mcu_hack status)
+{
+}
+static inline void set_ally_mcu_powersave(bool enabled)
+{
+}
+static inline int asus_wmi_set_devstate(u32 dev_id, u32 ctrl_param, u32 *retval)
+{
+ return -ENODEV;
+}
+static inline int asus_wmi_get_devstate_dsts(u32 dev_id, u32 *retval)
+{
+ return -ENODEV;
+}
static inline int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1,
u32 *retval)
{
diff --git a/include/linux/platform_data/x86/clk-lpss.h b/include/linux/platform_data/x86/clk-lpss.h
index 207e1a317800..7f132029316a 100644
--- a/include/linux/platform_data/x86/clk-lpss.h
+++ b/include/linux/platform_data/x86/clk-lpss.h
@@ -15,6 +15,6 @@ struct lpss_clk_data {
struct clk *clk;
};
-extern int lpt_clk_init(void);
+int lpss_atom_clk_init(void);
#endif /* __CLK_LPSS_H */
diff --git a/include/linux/platform_data/x86/int3472.h b/include/linux/platform_data/x86/int3472.h
new file mode 100644
index 000000000000..b1b837583d54
--- /dev/null
+++ b/include/linux/platform_data/x86/int3472.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Intel INT3472 ACPI camera sensor power-management support
+ *
+ * Author: Dan Scally <djrscally@gmail.com>
+ */
+
+#ifndef __PLATFORM_DATA_X86_INT3472_H
+#define __PLATFORM_DATA_X86_INT3472_H
+
+#include <linux/clk-provider.h>
+#include <linux/gpio/machine.h>
+#include <linux/leds.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/types.h>
+
+/* FIXME drop this once the I2C_DEV_NAME_FORMAT macro has been added to include/linux/i2c.h */
+#ifndef I2C_DEV_NAME_FORMAT
+#define I2C_DEV_NAME_FORMAT "i2c-%s"
+#endif
+
+/* PMIC GPIO Types */
+#define INT3472_GPIO_TYPE_RESET 0x00
+#define INT3472_GPIO_TYPE_POWERDOWN 0x01
+#define INT3472_GPIO_TYPE_POWER_ENABLE 0x0b
+#define INT3472_GPIO_TYPE_CLK_ENABLE 0x0c
+#define INT3472_GPIO_TYPE_PRIVACY_LED 0x0d
+#define INT3472_GPIO_TYPE_HANDSHAKE 0x12
+#define INT3472_GPIO_TYPE_HOTPLUG_DETECT 0x13
+
+#define INT3472_PDEV_MAX_NAME_LEN 23
+#define INT3472_MAX_SENSOR_GPIOS 3
+#define INT3472_MAX_REGULATORS 3
+
+/* E.g. "avdd\0" */
+#define GPIO_SUPPLY_NAME_LENGTH 5
+/* 12 chars for acpi_dev_name() + "-", e.g. "ABCD1234:00-" */
+#define GPIO_REGULATOR_NAME_LENGTH (12 + GPIO_SUPPLY_NAME_LENGTH)
+/* lower- and upper-case mapping */
+#define GPIO_REGULATOR_SUPPLY_MAP_COUNT 2
+/*
+ * Ensure the GPIO is driven low/high for at least 2 ms before changing.
+ *
+ * 2 ms has been chosen because it is the minimum time ovXXXX sensors need to
+ * have their reset line driven logical high to properly register a reset.
+ */
+#define GPIO_REGULATOR_ENABLE_TIME (2 * USEC_PER_MSEC)
+#define GPIO_REGULATOR_OFF_ON_DELAY (2 * USEC_PER_MSEC)
+
+#define INT3472_LED_MAX_NAME_LEN 32
+
+#define CIO2_SENSOR_SSDB_MCLKSPEED_OFFSET 86
+
+#define INT3472_REGULATOR(_name, _ops, _enable_time, _off_on_delay) \
+ (const struct regulator_desc) { \
+ .name = _name, \
+ .type = REGULATOR_VOLTAGE, \
+ .ops = _ops, \
+ .owner = THIS_MODULE, \
+ .enable_time = _enable_time, \
+ .off_on_delay = _off_on_delay, \
+ }
+
+#define to_int3472_clk(hw) \
+ container_of(hw, struct int3472_clock, clk_hw)
+
+#define to_int3472_device(clk) \
+ container_of(clk, struct int3472_discrete_device, clock)
+
+struct acpi_device;
+struct dmi_system_id;
+struct i2c_client;
+struct platform_device;
+
+struct int3472_cldb {
+ u8 version;
+ /*
+ * control logic type
+ * 0: UNKNOWN
+ * 1: DISCRETE(CRD-D)
+ * 2: PMIC TPS68470
+ * 3: PMIC uP6641
+ */
+ u8 control_logic_type;
+ u8 control_logic_id;
+ u8 sensor_card_sku;
+ u8 reserved[10];
+ u8 clock_source;
+ u8 reserved2[17];
+};
+
+struct int3472_discrete_quirks {
+ /* For models where AVDD GPIO is shared between sensors */
+ const char *avdd_second_sensor;
+};
+
+struct int3472_gpio_regulator {
+ /* SUPPLY_MAP_COUNT * 2 to make room for second sensor mappings */
+ struct regulator_consumer_supply supply_map[GPIO_REGULATOR_SUPPLY_MAP_COUNT * 2];
+ char supply_name_upper[GPIO_SUPPLY_NAME_LENGTH];
+ char regulator_name[GPIO_REGULATOR_NAME_LENGTH];
+ struct regulator_dev *rdev;
+ struct regulator_desc rdesc;
+};
+
+struct int3472_discrete_device {
+ struct acpi_device *adev;
+ struct device *dev;
+ struct acpi_device *sensor;
+ const char *sensor_name;
+
+ struct int3472_gpio_regulator regulators[INT3472_MAX_REGULATORS];
+
+ struct int3472_clock {
+ struct clk *clk;
+ struct clk_hw clk_hw;
+ struct clk_lookup *cl;
+ struct gpio_desc *ena_gpio;
+ u32 frequency;
+ u8 imgclk_index;
+ } clock;
+
+ struct int3472_pled {
+ struct led_classdev classdev;
+ struct led_lookup_data lookup;
+ char name[INT3472_LED_MAX_NAME_LEN];
+ struct gpio_desc *gpio;
+ } pled;
+
+ struct int3472_discrete_quirks quirks;
+
+ unsigned int ngpios; /* how many GPIOs have we seen */
+ unsigned int n_sensor_gpios; /* how many have we mapped to sensor */
+ unsigned int n_regulator_gpios; /* how many have we mapped to a regulator */
+ struct gpiod_lookup_table gpios;
+};
+
+extern const struct dmi_system_id skl_int3472_discrete_quirks[];
+
+union acpi_object *skl_int3472_get_acpi_buffer(struct acpi_device *adev,
+ char *id);
+int skl_int3472_fill_cldb(struct acpi_device *adev, struct int3472_cldb *cldb);
+int skl_int3472_get_sensor_adev_and_name(struct device *dev,
+ struct acpi_device **sensor_adev_ret,
+ const char **name_ret);
+
+int int3472_discrete_parse_crs(struct int3472_discrete_device *int3472);
+void int3472_discrete_cleanup(struct int3472_discrete_device *int3472);
+
+int skl_int3472_register_gpio_clock(struct int3472_discrete_device *int3472,
+ struct gpio_desc *gpio);
+int skl_int3472_register_dsm_clock(struct int3472_discrete_device *int3472);
+void skl_int3472_unregister_clock(struct int3472_discrete_device *int3472);
+
+int skl_int3472_register_regulator(struct int3472_discrete_device *int3472,
+ struct gpio_desc *gpio,
+ unsigned int enable_time,
+ const char *supply_name,
+ const char *second_sensor);
+void skl_int3472_unregister_regulator(struct int3472_discrete_device *int3472);
+
+int skl_int3472_register_pled(struct int3472_discrete_device *int3472, struct gpio_desc *gpio);
+void skl_int3472_unregister_pled(struct int3472_discrete_device *int3472);
+
+#endif
diff --git a/include/linux/platform_data/intel-mid_wdt.h b/include/linux/platform_data/x86/intel-mid_wdt.h
index 8dba70b4b020..e5c0210d0fec 100644
--- a/include/linux/platform_data/intel-mid_wdt.h
+++ b/include/linux/platform_data/x86/intel-mid_wdt.h
@@ -6,8 +6,8 @@
* Contact: David Cohen <david.a.cohen@linux.intel.com>
*/
-#ifndef __INTEL_MID_WDT_H__
-#define __INTEL_MID_WDT_H__
+#ifndef __PLATFORM_X86_INTEL_MID_WDT_H_
+#define __PLATFORM_X86_INTEL_MID_WDT_H_
#include <linux/platform_device.h>
@@ -16,4 +16,4 @@ struct intel_mid_wdt_pdata {
int (*probe)(struct platform_device *pdev);
};
-#endif /*__INTEL_MID_WDT_H__*/
+#endif /* __PLATFORM_X86_INTEL_MID_WDT_H_ */
diff --git a/include/linux/platform_data/x86/intel_pmc_ipc.h b/include/linux/platform_data/x86/intel_pmc_ipc.h
new file mode 100644
index 000000000000..85ea381e4a27
--- /dev/null
+++ b/include/linux/platform_data/x86/intel_pmc_ipc.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Intel Core SoC Power Management Controller Header File
+ *
+ * Copyright (c) 2025, Intel Corporation.
+ * All Rights Reserved.
+ *
+ */
+#ifndef INTEL_PMC_IPC_H
+#define INTEL_PMC_IPC_H
+#include <linux/acpi.h>
+#include <linux/cleanup.h>
+
+#define IPC_SOC_REGISTER_ACCESS 0xAA
+#define IPC_SOC_SUB_CMD_READ 0x00
+#define IPC_SOC_SUB_CMD_WRITE 0x01
+#define PMC_IPCS_PARAM_COUNT 7
+#define VALID_IPC_RESPONSE 5
+
+struct pmc_ipc_cmd {
+ u32 cmd;
+ u32 sub_cmd;
+ u32 size;
+ u32 wbuf[4];
+};
+
+struct pmc_ipc_rbuf {
+ u32 buf[4];
+};
+
+/**
+ * intel_pmc_ipc() - PMC IPC Mailbox accessor
+ * @ipc_cmd: Prepared input command to send
+ * @rbuf: Allocated array for returned IPC data
+ *
+ * Return: 0 on success. Non-zero on mailbox error
+ */
+static inline int intel_pmc_ipc(struct pmc_ipc_cmd *ipc_cmd, struct pmc_ipc_rbuf *rbuf)
+{
+#ifdef CONFIG_ACPI
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object params[PMC_IPCS_PARAM_COUNT] = {
+ {.type = ACPI_TYPE_INTEGER,},
+ {.type = ACPI_TYPE_INTEGER,},
+ {.type = ACPI_TYPE_INTEGER,},
+ {.type = ACPI_TYPE_INTEGER,},
+ {.type = ACPI_TYPE_INTEGER,},
+ {.type = ACPI_TYPE_INTEGER,},
+ {.type = ACPI_TYPE_INTEGER,},
+ };
+ struct acpi_object_list arg_list = { PMC_IPCS_PARAM_COUNT, params };
+ int status;
+
+ if (!ipc_cmd || !rbuf)
+ return -EINVAL;
+
+ /*
+ * 0: IPC Command
+ * 1: IPC Sub Command
+ * 2: Size
+ * 3-6: Write Buffer for offset
+ */
+ params[0].integer.value = ipc_cmd->cmd;
+ params[1].integer.value = ipc_cmd->sub_cmd;
+ params[2].integer.value = ipc_cmd->size;
+ params[3].integer.value = ipc_cmd->wbuf[0];
+ params[4].integer.value = ipc_cmd->wbuf[1];
+ params[5].integer.value = ipc_cmd->wbuf[2];
+ params[6].integer.value = ipc_cmd->wbuf[3];
+
+ status = acpi_evaluate_object(NULL, "\\IPCS", &arg_list, &buffer);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ union acpi_object *obj __free(kfree) = buffer.pointer;
+
+ if (obj && obj->type == ACPI_TYPE_PACKAGE &&
+ obj->package.count == VALID_IPC_RESPONSE) {
+ const union acpi_object *objs = obj->package.elements;
+
+ if ((u8)objs[0].integer.value != 0)
+ return -EINVAL;
+
+ rbuf->buf[0] = objs[1].integer.value;
+ rbuf->buf[1] = objs[2].integer.value;
+ rbuf->buf[2] = objs[3].integer.value;
+ rbuf->buf[3] = objs[4].integer.value;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+#else
+ return -ENODEV;
+#endif /* CONFIG_ACPI */
+}
+
+#endif /* INTEL_PMC_IPC_H */
diff --git a/include/linux/platform_data/x86/intel_scu_ipc.h b/include/linux/platform_data/x86/intel_scu_ipc.h
new file mode 100644
index 000000000000..b287627759f7
--- /dev/null
+++ b/include/linux/platform_data/x86/intel_scu_ipc.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PLATFORM_X86_INTEL_SCU_IPC_H_
+#define __PLATFORM_X86_INTEL_SCU_IPC_H_
+
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/types.h>
+
+struct device;
+struct module;
+
+struct intel_scu_ipc_dev;
+
+/**
+ * struct intel_scu_ipc_data - Data used to configure SCU IPC
+ * @mem: Base address of SCU IPC MMIO registers
+ * @irq: The IRQ number used for SCU (optional)
+ */
+struct intel_scu_ipc_data {
+ struct resource mem;
+ int irq;
+};
+
+struct intel_scu_ipc_dev *
+__intel_scu_ipc_register(struct device *parent,
+ const struct intel_scu_ipc_data *scu_data,
+ struct module *owner);
+
+#define intel_scu_ipc_register(parent, scu_data) \
+ __intel_scu_ipc_register(parent, scu_data, THIS_MODULE)
+
+void intel_scu_ipc_unregister(struct intel_scu_ipc_dev *scu);
+
+struct intel_scu_ipc_dev *
+__devm_intel_scu_ipc_register(struct device *parent,
+ const struct intel_scu_ipc_data *scu_data,
+ struct module *owner);
+
+#define devm_intel_scu_ipc_register(parent, scu_data) \
+ __devm_intel_scu_ipc_register(parent, scu_data, THIS_MODULE)
+
+struct intel_scu_ipc_dev *intel_scu_ipc_dev_get(void);
+void intel_scu_ipc_dev_put(struct intel_scu_ipc_dev *scu);
+struct intel_scu_ipc_dev *devm_intel_scu_ipc_dev_get(struct device *dev);
+
+int intel_scu_ipc_dev_ioread8(struct intel_scu_ipc_dev *scu, u16 addr,
+ u8 *data);
+int intel_scu_ipc_dev_iowrite8(struct intel_scu_ipc_dev *scu, u16 addr,
+ u8 data);
+int intel_scu_ipc_dev_readv(struct intel_scu_ipc_dev *scu, u16 *addr,
+ u8 *data, size_t len);
+int intel_scu_ipc_dev_writev(struct intel_scu_ipc_dev *scu, u16 *addr,
+ u8 *data, size_t len);
+
+int intel_scu_ipc_dev_update(struct intel_scu_ipc_dev *scu, u16 addr,
+ u8 data, u8 mask);
+
+int intel_scu_ipc_dev_simple_command(struct intel_scu_ipc_dev *scu, int cmd,
+ int sub);
+int intel_scu_ipc_dev_command_with_size(struct intel_scu_ipc_dev *scu, int cmd,
+ int sub, const void *in, size_t inlen,
+ size_t size, void *out, size_t outlen);
+
+static inline int intel_scu_ipc_dev_command(struct intel_scu_ipc_dev *scu, int cmd,
+ int sub, const void *in, size_t inlen,
+ void *out, size_t outlen)
+{
+ return intel_scu_ipc_dev_command_with_size(scu, cmd, sub, in, inlen,
+ inlen, out, outlen);
+}
+
+#endif
diff --git a/include/linux/platform_data/x86/nvidia-wmi-ec-backlight.h b/include/linux/platform_data/x86/nvidia-wmi-ec-backlight.h
new file mode 100644
index 000000000000..23d60130272c
--- /dev/null
+++ b/include/linux/platform_data/x86/nvidia-wmi-ec-backlight.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#ifndef __PLATFORM_DATA_X86_NVIDIA_WMI_EC_BACKLIGHT_H
+#define __PLATFORM_DATA_X86_NVIDIA_WMI_EC_BACKLIGHT_H
+
+#define WMI_BRIGHTNESS_GUID "603E9613-EF25-4338-A3D0-C46177516DB7"
+
+/**
+ * enum wmi_brightness_method - WMI method IDs
+ * @WMI_BRIGHTNESS_METHOD_LEVEL: Get/Set EC brightness level status
+ * @WMI_BRIGHTNESS_METHOD_SOURCE: Get/Set EC Brightness Source
+ */
+enum wmi_brightness_method {
+ WMI_BRIGHTNESS_METHOD_LEVEL = 1,
+ WMI_BRIGHTNESS_METHOD_SOURCE = 2,
+ WMI_BRIGHTNESS_METHOD_MAX
+};
+
+/**
+ * enum wmi_brightness_mode - Operation mode for WMI-wrapped method
+ * @WMI_BRIGHTNESS_MODE_GET: Get the current brightness level/source.
+ * @WMI_BRIGHTNESS_MODE_SET: Set the brightness level.
+ * @WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL: Get the maximum brightness level. This
+ * is only valid when the WMI method is
+ * %WMI_BRIGHTNESS_METHOD_LEVEL.
+ */
+enum wmi_brightness_mode {
+ WMI_BRIGHTNESS_MODE_GET = 0,
+ WMI_BRIGHTNESS_MODE_SET = 1,
+ WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL = 2,
+ WMI_BRIGHTNESS_MODE_MAX
+};
+
+/**
+ * enum wmi_brightness_source - Backlight brightness control source selection
+ * @WMI_BRIGHTNESS_SOURCE_GPU: Backlight brightness is controlled by the GPU.
+ * @WMI_BRIGHTNESS_SOURCE_EC: Backlight brightness is controlled by the
+ * system's Embedded Controller (EC).
+ * @WMI_BRIGHTNESS_SOURCE_AUX: Backlight brightness is controlled over the
+ * DisplayPort AUX channel.
+ */
+enum wmi_brightness_source {
+ WMI_BRIGHTNESS_SOURCE_GPU = 1,
+ WMI_BRIGHTNESS_SOURCE_EC = 2,
+ WMI_BRIGHTNESS_SOURCE_AUX = 3,
+ WMI_BRIGHTNESS_SOURCE_MAX
+};
+
+/**
+ * struct wmi_brightness_args - arguments for the WMI-wrapped ACPI method
+ * @mode: Pass in an &enum wmi_brightness_mode value to select between
+ * getting or setting a value.
+ * @val: In parameter for value to set when using %WMI_BRIGHTNESS_MODE_SET
+ * mode. Not used in conjunction with %WMI_BRIGHTNESS_MODE_GET or
+ * %WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL mode.
+ * @ret: Out parameter returning retrieved value when operating in
+ * %WMI_BRIGHTNESS_MODE_GET or %WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL
+ * mode. Not used in %WMI_BRIGHTNESS_MODE_SET mode.
+ * @ignored: Padding; not used. The ACPI method expects a 24 byte params struct.
+ *
+ * This is the parameters structure for the WmiBrightnessNotify ACPI method as
+ * wrapped by WMI. The value passed in to @val or returned by @ret will be a
+ * brightness value when the WMI method ID is %WMI_BRIGHTNESS_METHOD_LEVEL, or
+ * an &enum wmi_brightness_source value with %WMI_BRIGHTNESS_METHOD_SOURCE.
+ */
+struct wmi_brightness_args {
+ u32 mode;
+ u32 val;
+ u32 ret;
+ u32 ignored[3];
+};
+
+#endif
diff --git a/include/linux/platform_data/x86/p2sb.h b/include/linux/platform_data/x86/p2sb.h
new file mode 100644
index 000000000000..a1d5fddc8f13
--- /dev/null
+++ b/include/linux/platform_data/x86/p2sb.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Primary to Sideband (P2SB) bridge access support
+ */
+
+#ifndef _PLATFORM_DATA_X86_P2SB_H
+#define _PLATFORM_DATA_X86_P2SB_H
+
+#include <linux/errno.h>
+#include <linux/kconfig.h>
+
+struct pci_bus;
+struct resource;
+
+#if IS_BUILTIN(CONFIG_P2SB)
+
+int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem);
+
+#else /* CONFIG_P2SB */
+
+static inline int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_P2SB is not set */
+
+#endif /* _PLATFORM_DATA_X86_P2SB_H */
diff --git a/include/linux/platform_data/x86/pmc_atom.h b/include/linux/platform_data/x86/pmc_atom.h
index 022bcea9edec..161e4bc1c9ee 100644
--- a/include/linux/platform_data/x86/pmc_atom.h
+++ b/include/linux/platform_data/x86/pmc_atom.h
@@ -1,12 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Intel Atom SOC Power Management Controller Header File
- * Copyright (c) 2014, Intel Corporation.
+ * Intel Atom SoC Power Management Controller Header File
+ * Copyright (c) 2014-2015,2022 Intel Corporation.
*/
#ifndef PMC_ATOM_H
#define PMC_ATOM_H
+#include <linux/bits.h>
+
/* ValleyView Power Control Unit PCI Device ID */
#define PCI_DEVICE_ID_VLV_PMC 0x0F1C
/* CherryTrail Power Control Unit PCI Device ID */
@@ -41,13 +43,26 @@
BIT_ORED_DEDICATED_IRQ_GPSC | \
BIT_SHARED_IRQ_GPSS)
+/* External clk generator settings */
+#define PMC_CLK_CTL_OFFSET 0x60
+#define PMC_CLK_CTL_SIZE 4
+#define PMC_CLK_NUM 6
+#define PMC_CLK_CTL_GATED_ON_D3 0x0
+#define PMC_CLK_CTL_FORCE_ON 0x1
+#define PMC_CLK_CTL_FORCE_OFF 0x2
+#define PMC_CLK_CTL_RESERVED 0x3
+#define PMC_MASK_CLK_CTL GENMASK(1, 0)
+#define PMC_MASK_CLK_FREQ BIT(2)
+#define PMC_CLK_FREQ_XTAL (0 << 2) /* 25 MHz */
+#define PMC_CLK_FREQ_PLL (1 << 2) /* 19.2 MHz */
+
/* The timers accumulate time spent in sleep state */
#define PMC_S0IR_TMR 0x80
#define PMC_S0I1_TMR 0x84
#define PMC_S0I2_TMR 0x88
#define PMC_S0I3_TMR 0x8C
#define PMC_S0_TMR 0x90
-/* Sleep state counter is in units of of 32us */
+/* Sleep state counter is in units of 32us */
#define PMC_TMR_SHIFT 5
/* Power status of power islands */
@@ -102,14 +117,14 @@
#define BIT_SCC_SDIO BIT(9)
#define BIT_SCC_SDCARD BIT(10)
#define BIT_SCC_MIPI BIT(11)
-#define BIT_HDA BIT(12)
+#define BIT_HDA BIT(12) /* CHT datasheet: reserved */
#define BIT_LPE BIT(13)
#define BIT_OTG BIT(14)
-#define BIT_USH BIT(15)
-#define BIT_GBE BIT(16)
-#define BIT_SATA BIT(17)
-#define BIT_USB_EHCI BIT(18)
-#define BIT_SEC BIT(19)
+#define BIT_USH BIT(15) /* CHT datasheet: reserved */
+#define BIT_GBE BIT(16) /* CHT datasheet: reserved */
+#define BIT_SATA BIT(17) /* CHT datasheet: reserved */
+#define BIT_USB_EHCI BIT(18) /* CHT datasheet: XHCI! */
+#define BIT_SEC BIT(19) /* BYT datasheet: reserved */
#define BIT_PCIE_PORT0 BIT(20)
#define BIT_PCIE_PORT1 BIT(21)
#define BIT_PCIE_PORT2 BIT(22)
@@ -139,11 +154,10 @@
#define ACPI_MMIO_REG_LEN 0x100
#define PM1_CNT 0x4
-#define SLEEP_TYPE_MASK 0xFFFFECFF
+#define SLEEP_TYPE_MASK GENMASK(12, 10)
#define SLEEP_TYPE_S5 0x1C00
-#define SLEEP_ENABLE 0x2000
+#define SLEEP_ENABLE BIT(13)
extern int pmc_atom_read(int offset, u32 *value);
-extern int pmc_atom_write(int offset, u32 value);
#endif /* PMC_ATOM_H */
diff --git a/include/linux/platform_data/x86/pwm-lpss.h b/include/linux/platform_data/x86/pwm-lpss.h
new file mode 100644
index 000000000000..f0349edb47f4
--- /dev/null
+++ b/include/linux/platform_data/x86/pwm-lpss.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Intel Low Power Subsystem PWM controller driver */
+
+#ifndef __PLATFORM_DATA_X86_PWM_LPSS_H
+#define __PLATFORM_DATA_X86_PWM_LPSS_H
+
+#include <linux/types.h>
+
+struct device;
+
+struct pwm_lpss_chip;
+
+struct pwm_lpss_boardinfo {
+ unsigned long clk_rate;
+ unsigned int npwm;
+ unsigned long base_unit_bits;
+ /*
+ * NOTE:
+ * Intel Broxton, Apollo Lake, and Gemini Lake have different programming flow.
+ *
+ * Initial Enable or First Activation
+ * 1. Program the base unit and on time divisor values.
+ * 2. Set the software update bit.
+ * 3. Poll in a loop on the PWMCTRL bit until software update bit is cleared.+
+ * 4. Enable the PWM output by setting PWM Enable.
+ * 5. Repeat the above steps for the next PWM Module.
+ *
+ * Dynamic update while PWM is Enabled
+ * 1. Program the base unit and on-time divisor values.
+ * 2. Set the software update bit.
+ * 3. Repeat the above steps for the next PWM module.
+ *
+ * + After setting PWMCTRL register's SW update bit, hardware automatically
+ * deasserts the SW update bit after a brief delay. It was observed that
+ * setting of PWM enable is typically done via read-modify-write of the PWMCTRL
+ * register. If there is no/little delay between setting software update bit
+ * and setting enable bit via read-modify-write, it is possible that the read
+ * could return with software enable as 1. In that case, the last write to set
+ * enable to 1 could also set sw_update to 1. If this happens, sw_update gets
+ * stuck and the driver code can hang as it explicitly waits for sw_update bit
+ * to be 0 after setting the enable bit to 1. To avoid this race condition,
+ * SW should poll on the software update bit to make sure that it is 0 before
+ * doing the read-modify-write to set the enable bit to 1.
+ *
+ * Also, we noted that if sw_update bit was set in step #1 above then when it
+ * is set again in step #2, sw_update bit never gets cleared and the flow hangs.
+ * As such, we need to make sure that sw_update bit is 0 when doing step #1.
+ */
+ bool bypass;
+ /*
+ * On some devices the _PS0/_PS3 AML code of the GPU (GFX0) device
+ * messes with the PWM0 controllers state,
+ */
+ bool other_devices_aml_touches_pwm_regs;
+};
+
+struct pwm_chip *devm_pwm_lpss_probe(struct device *dev, void __iomem *base,
+ const struct pwm_lpss_boardinfo *info);
+
+#endif /* __PLATFORM_DATA_X86_PWM_LPSS_H */
diff --git a/include/linux/platform_data/x86/simatic-ipc-base.h b/include/linux/platform_data/x86/simatic-ipc-base.h
new file mode 100644
index 000000000000..2d7f7120ba6b
--- /dev/null
+++ b/include/linux/platform_data/x86/simatic-ipc-base.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Siemens SIMATIC IPC drivers
+ *
+ * Copyright (c) Siemens AG, 2018-2023
+ *
+ * Authors:
+ * Henning Schild <henning.schild@siemens.com>
+ * Gerd Haeussler <gerd.haeussler.ext@siemens.com>
+ */
+
+#ifndef __PLATFORM_DATA_X86_SIMATIC_IPC_BASE_H
+#define __PLATFORM_DATA_X86_SIMATIC_IPC_BASE_H
+
+#include <linux/types.h>
+
+#define SIMATIC_IPC_DEVICE_NONE 0
+#define SIMATIC_IPC_DEVICE_227D 1
+#define SIMATIC_IPC_DEVICE_427E 2
+#define SIMATIC_IPC_DEVICE_127E 3
+#define SIMATIC_IPC_DEVICE_227E 4
+#define SIMATIC_IPC_DEVICE_227G 5
+#define SIMATIC_IPC_DEVICE_BX_21A 6
+#define SIMATIC_IPC_DEVICE_BX_39A 7
+#define SIMATIC_IPC_DEVICE_BX_59A 8
+
+struct simatic_ipc_platform {
+ u8 devmode;
+};
+
+#endif /* __PLATFORM_DATA_X86_SIMATIC_IPC_BASE_H */
diff --git a/include/linux/platform_data/x86/simatic-ipc.h b/include/linux/platform_data/x86/simatic-ipc.h
new file mode 100644
index 000000000000..8d8b3b919674
--- /dev/null
+++ b/include/linux/platform_data/x86/simatic-ipc.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Siemens SIMATIC IPC drivers
+ *
+ * Copyright (c) Siemens AG, 2018-2023
+ *
+ * Authors:
+ * Henning Schild <henning.schild@siemens.com>
+ * Gerd Haeussler <gerd.haeussler.ext@siemens.com>
+ */
+
+#ifndef __PLATFORM_DATA_X86_SIMATIC_IPC_H
+#define __PLATFORM_DATA_X86_SIMATIC_IPC_H
+
+#include <linux/dmi.h>
+#include <linux/platform_data/x86/simatic-ipc-base.h>
+
+#define SIMATIC_IPC_DMI_ENTRY_OEM 129
+/* binary type */
+#define SIMATIC_IPC_DMI_TYPE 0xff
+#define SIMATIC_IPC_DMI_GROUP 0x05
+#define SIMATIC_IPC_DMI_ENTRY 0x02
+#define SIMATIC_IPC_DMI_TID 0x02
+
+enum simatic_ipc_station_ids {
+ SIMATIC_IPC_INVALID_STATION_ID = 0,
+ SIMATIC_IPC_IPC227D = 0x00000501,
+ SIMATIC_IPC_IPC427D = 0x00000701,
+ SIMATIC_IPC_IPC227E = 0x00000901,
+ SIMATIC_IPC_IPC277E = 0x00000902,
+ SIMATIC_IPC_IPC427E = 0x00000A01,
+ SIMATIC_IPC_IPC477E = 0x00000A02,
+ SIMATIC_IPC_IPC127E = 0x00000D01,
+ SIMATIC_IPC_IPC227G = 0x00000F01,
+ SIMATIC_IPC_IPC277G = 0x00000F02,
+ SIMATIC_IPC_IPCBX_39A = 0x00001001,
+ SIMATIC_IPC_IPCPX_39A = 0x00001002,
+ SIMATIC_IPC_IPCBX_21A = 0x00001101,
+ SIMATIC_IPC_IPCBX_56A = 0x00001201,
+ SIMATIC_IPC_IPCBX_59A = 0x00001202,
+};
+
+static inline u32 simatic_ipc_get_station_id(u8 *data, int max_len)
+{
+ struct {
+ u8 type; /* type (0xff = binary) */
+ u8 len; /* len of data entry */
+ u8 group;
+ u8 entry;
+ u8 tid;
+ __le32 station_id; /* station id (LE) */
+ } __packed * data_entry = (void *)data + sizeof(struct dmi_header);
+
+ while ((u8 *)data_entry < data + max_len) {
+ if (data_entry->type == SIMATIC_IPC_DMI_TYPE &&
+ data_entry->len == sizeof(*data_entry) &&
+ data_entry->group == SIMATIC_IPC_DMI_GROUP &&
+ data_entry->entry == SIMATIC_IPC_DMI_ENTRY &&
+ data_entry->tid == SIMATIC_IPC_DMI_TID) {
+ return le32_to_cpu(data_entry->station_id);
+ }
+ data_entry = (void *)((u8 *)(data_entry) + data_entry->len);
+ }
+
+ return SIMATIC_IPC_INVALID_STATION_ID;
+}
+
+static inline void
+simatic_ipc_find_dmi_entry_helper(const struct dmi_header *dh, void *_data)
+{
+ u32 *id = _data;
+
+ if (dh->type != SIMATIC_IPC_DMI_ENTRY_OEM)
+ return;
+
+ *id = simatic_ipc_get_station_id((u8 *)dh, dh->length);
+}
+
+#endif /* __PLATFORM_DATA_X86_SIMATIC_IPC_H */
diff --git a/include/linux/platform_data/x86/soc.h b/include/linux/platform_data/x86/soc.h
new file mode 100644
index 000000000000..f981907a5cb0
--- /dev/null
+++ b/include/linux/platform_data/x86/soc.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Helpers for Intel SoC model detection
+ *
+ * Copyright (c) 2019, Intel Corporation.
+ */
+
+#ifndef __PLATFORM_DATA_X86_SOC_H
+#define __PLATFORM_DATA_X86_SOC_H
+
+#include <linux/types.h>
+
+#if IS_ENABLED(CONFIG_X86)
+
+#include <linux/mod_devicetable.h>
+
+#include <asm/cpu_device_id.h>
+
+#define SOC_INTEL_IS_CPU(soc, type) \
+static inline bool soc_intel_is_##soc(void) \
+{ \
+ static const struct x86_cpu_id soc##_cpu_ids[] = { \
+ X86_MATCH_VFM(type, NULL), \
+ {} \
+ }; \
+ const struct x86_cpu_id *id; \
+ \
+ id = x86_match_cpu(soc##_cpu_ids); \
+ if (id) \
+ return true; \
+ return false; \
+}
+
+SOC_INTEL_IS_CPU(byt, INTEL_ATOM_SILVERMONT);
+SOC_INTEL_IS_CPU(cht, INTEL_ATOM_AIRMONT);
+SOC_INTEL_IS_CPU(apl, INTEL_ATOM_GOLDMONT);
+SOC_INTEL_IS_CPU(glk, INTEL_ATOM_GOLDMONT_PLUS);
+SOC_INTEL_IS_CPU(cml, INTEL_KABYLAKE_L);
+
+#undef SOC_INTEL_IS_CPU
+
+#else /* IS_ENABLED(CONFIG_X86) */
+
+static inline bool soc_intel_is_byt(void)
+{
+ return false;
+}
+
+static inline bool soc_intel_is_cht(void)
+{
+ return false;
+}
+
+static inline bool soc_intel_is_apl(void)
+{
+ return false;
+}
+
+static inline bool soc_intel_is_glk(void)
+{
+ return false;
+}
+
+static inline bool soc_intel_is_cml(void)
+{
+ return false;
+}
+#endif /* IS_ENABLED(CONFIG_X86) */
+
+#endif /* __PLATFORM_DATA_X86_SOC_H */
diff --git a/include/linux/platform_data/x86/intel-spi.h b/include/linux/platform_data/x86/spi-intel.h
index 7f53a5c6f35e..a512ec37abbb 100644
--- a/include/linux/platform_data/x86/intel-spi.h
+++ b/include/linux/platform_data/x86/spi-intel.h
@@ -6,8 +6,8 @@
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
*/
-#ifndef INTEL_SPI_PDATA_H
-#define INTEL_SPI_PDATA_H
+#ifndef SPI_INTEL_PDATA_H
+#define SPI_INTEL_PDATA_H
enum intel_spi_type {
INTEL_SPI_BYT = 1,
@@ -19,11 +19,13 @@ enum intel_spi_type {
/**
* struct intel_spi_boardinfo - Board specific data for Intel SPI driver
* @type: Type which this controller is compatible with
- * @writeable: The chip is writeable
+ * @set_writeable: Try to make the chip writeable (optional)
+ * @data: Data to be passed to @set_writeable can be %NULL
*/
struct intel_spi_boardinfo {
enum intel_spi_type type;
- bool writeable;
+ bool (*set_writeable)(void __iomem *base, void *data);
+ void *data;
};
-#endif /* INTEL_SPI_PDATA_H */
+#endif /* SPI_INTEL_PDATA_H */
diff --git a/include/linux/platform_data/zforce_ts.h b/include/linux/platform_data/zforce_ts.h
deleted file mode 100644
index 2463a4a856a6..000000000000
--- a/include/linux/platform_data/zforce_ts.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* drivers/input/touchscreen/zforce.c
- *
- * Copyright (C) 2012-2013 MundoReader S.L.
- */
-
-#ifndef _LINUX_INPUT_ZFORCE_TS_H
-#define _LINUX_INPUT_ZFORCE_TS_H
-
-struct zforce_ts_platdata {
- unsigned int x_max;
- unsigned int y_max;
-};
-
-#endif /* _LINUX_INPUT_ZFORCE_TS_H */
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index cd81e060863c..813da101b5bf 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -31,7 +31,11 @@ struct platform_device {
struct resource *resource;
const struct platform_device_id *id_entry;
- char *driver_override; /* Driver name to force a match */
+ /*
+ * Driver name to force a match. Do not set directly, because core
+ * frees it. Use driver_set_override() to set or clear it.
+ */
+ const char *driver_override;
/* MFD cell pointer */
struct mfd_cell *mfd_cell;
@@ -48,7 +52,7 @@ struct platform_device {
extern int platform_device_register(struct platform_device *);
extern void platform_device_unregister(struct platform_device *);
-extern struct bus_type platform_bus_type;
+extern const struct bus_type platform_bus_type;
extern struct device platform_bus;
extern struct resource *platform_get_resource(struct platform_device *,
@@ -59,6 +63,8 @@ extern struct resource *platform_get_mem_or_io(struct platform_device *,
extern struct device *
platform_find_device_by_driver(struct device *start,
const struct device_driver *drv);
+
+#ifdef CONFIG_HAS_IOMEM
extern void __iomem *
devm_platform_get_and_ioremap_resource(struct platform_device *pdev,
unsigned int index, struct resource **res);
@@ -66,13 +72,38 @@ extern void __iomem *
devm_platform_ioremap_resource(struct platform_device *pdev,
unsigned int index);
extern void __iomem *
-devm_platform_ioremap_resource_wc(struct platform_device *pdev,
- unsigned int index);
-extern void __iomem *
devm_platform_ioremap_resource_byname(struct platform_device *pdev,
const char *name);
+#else
+
+static inline void __iomem *
+devm_platform_get_and_ioremap_resource(struct platform_device *pdev,
+ unsigned int index, struct resource **res)
+{
+ return IOMEM_ERR_PTR(-EINVAL);
+}
+
+
+static inline void __iomem *
+devm_platform_ioremap_resource(struct platform_device *pdev,
+ unsigned int index)
+{
+ return IOMEM_ERR_PTR(-EINVAL);
+}
+
+static inline void __iomem *
+devm_platform_ioremap_resource_byname(struct platform_device *pdev,
+ const char *name)
+{
+ return IOMEM_ERR_PTR(-EINVAL);
+}
+
+#endif
+
extern int platform_get_irq(struct platform_device *, unsigned int);
extern int platform_get_irq_optional(struct platform_device *, unsigned int);
+extern int platform_get_irq_affinity(struct platform_device *, unsigned int,
+ const struct cpumask **);
extern int platform_irq_count(struct platform_device *);
extern int devm_platform_get_irqs_affinity(struct platform_device *dev,
struct irq_affinity *affd,
@@ -200,21 +231,28 @@ extern int platform_device_add_resources(struct platform_device *pdev,
unsigned int num);
extern int platform_device_add_data(struct platform_device *pdev,
const void *data, size_t size);
-extern int platform_device_add_properties(struct platform_device *pdev,
- const struct property_entry *properties);
extern int platform_device_add(struct platform_device *pdev);
extern void platform_device_del(struct platform_device *pdev);
extern void platform_device_put(struct platform_device *pdev);
+DEFINE_FREE(platform_device_put, struct platform_device *, if (_T) platform_device_put(_T))
struct platform_driver {
int (*probe)(struct platform_device *);
- int (*remove)(struct platform_device *);
+ void (*remove)(struct platform_device *);
void (*shutdown)(struct platform_device *);
int (*suspend)(struct platform_device *, pm_message_t state);
int (*resume)(struct platform_device *);
struct device_driver driver;
const struct platform_device_id *id_table;
bool prevent_deferred_probe;
+ /*
+ * For most device drivers, no need to care about this flag as long as
+ * all DMAs are handled through the kernel DMA API. For some special
+ * ones, for example VFIO drivers, they know how to manage the DMA
+ * themselves and set this flag so that the IOMMU layer will allow them
+ * to setup and manage their own I/O address space.
+ */
+ bool driver_managed_dma;
};
#define to_platform_driver(drv) (container_of((drv), struct platform_driver, \
@@ -333,8 +371,6 @@ extern int platform_pm_restore(struct device *dev);
#define platform_pm_restore NULL
#endif
-extern int platform_dma_configure(struct device *dev);
-
#ifdef CONFIG_PM_SLEEP
#define USE_PLATFORM_PM_SLEEP_OPS \
.suspend = platform_pm_suspend, \
diff --git a/include/linux/platform_profile.h b/include/linux/platform_profile.h
index a6329003aee7..855b28340e95 100644
--- a/include/linux/platform_profile.h
+++ b/include/linux/platform_profile.h
@@ -2,13 +2,14 @@
/*
* Platform profile sysfs interface
*
- * See Documentation/ABI/testing/sysfs-platform_profile.rst for more
+ * See Documentation/userspace-api/sysfs-platform_profile.rst for more
* information.
*/
#ifndef _PLATFORM_PROFILE_H_
#define _PLATFORM_PROFILE_H_
+#include <linux/device.h>
#include <linux/bitops.h>
/*
@@ -23,19 +24,38 @@ enum platform_profile_option {
PLATFORM_PROFILE_BALANCED,
PLATFORM_PROFILE_BALANCED_PERFORMANCE,
PLATFORM_PROFILE_PERFORMANCE,
+ PLATFORM_PROFILE_MAX_POWER,
+ PLATFORM_PROFILE_CUSTOM,
PLATFORM_PROFILE_LAST, /*must always be last */
};
-struct platform_profile_handler {
- unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
- int (*profile_get)(struct platform_profile_handler *pprof,
- enum platform_profile_option *profile);
- int (*profile_set)(struct platform_profile_handler *pprof,
- enum platform_profile_option profile);
+/**
+ * struct platform_profile_ops - platform profile operations
+ * @probe: Callback to setup choices available to the new class device. These
+ * choices will only be enforced when setting a new profile, not when
+ * getting the current one.
+ * @hidden_choices: Callback to setup choices that are not visible to the user
+ * but can be set by the driver.
+ * @profile_get: Callback that will be called when showing the current platform
+ * profile in sysfs.
+ * @profile_set: Callback that will be called when storing a new platform
+ * profile in sysfs.
+ */
+struct platform_profile_ops {
+ int (*probe)(void *drvdata, unsigned long *choices);
+ int (*hidden_choices)(void *drvdata, unsigned long *choices);
+ int (*profile_get)(struct device *dev, enum platform_profile_option *profile);
+ int (*profile_set)(struct device *dev, enum platform_profile_option profile);
};
-int platform_profile_register(struct platform_profile_handler *pprof);
-int platform_profile_remove(void);
-void platform_profile_notify(void);
+struct device *platform_profile_register(struct device *dev, const char *name,
+ void *drvdata,
+ const struct platform_profile_ops *ops);
+void platform_profile_remove(struct device *dev);
+struct device *devm_platform_profile_register(struct device *dev, const char *name,
+ void *drvdata,
+ const struct platform_profile_ops *ops);
+int platform_profile_cycle(void);
+void platform_profile_notify(struct device *dev);
#endif /*_PLATFORM_PROFILE_H_*/
diff --git a/include/linux/pldmfw.h b/include/linux/pldmfw.h
index 0fc831338226..f5047983004f 100644
--- a/include/linux/pldmfw.h
+++ b/include/linux/pldmfw.h
@@ -125,9 +125,17 @@ struct pldmfw_ops;
* a pointer to their own data, used to implement the device specific
* operations.
*/
+
+enum pldmfw_update_mode {
+ PLDMFW_UPDATE_MODE_FULL,
+ PLDMFW_UPDATE_MODE_SINGLE_COMPONENT,
+};
+
struct pldmfw {
const struct pldmfw_ops *ops;
struct device *dev;
+ u16 component_identifier;
+ enum pldmfw_update_mode mode;
};
bool pldmfw_op_pci_match_record(struct pldmfw *context, struct pldmfw_record *record);
diff --git a/include/linux/plist.h b/include/linux/plist.h
index 66bab1bca35c..8c1c8adf7fe9 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -73,18 +73,11 @@
#ifndef _LINUX_PLIST_H_
#define _LINUX_PLIST_H_
-#include <linux/kernel.h>
+#include <linux/container_of.h>
#include <linux/list.h>
+#include <linux/plist_types.h>
-struct plist_head {
- struct list_head node_list;
-};
-
-struct plist_node {
- int prio;
- struct list_head prio_list;
- struct list_head node_list;
-};
+#include <asm/bug.h>
/**
* PLIST_HEAD_INIT - static struct plist_head initializer
diff --git a/include/linux/plist_types.h b/include/linux/plist_types.h
new file mode 100644
index 000000000000..c37e784330af
--- /dev/null
+++ b/include/linux/plist_types.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_PLIST_TYPES_H
+#define _LINUX_PLIST_TYPES_H
+
+#include <linux/types.h>
+
+struct plist_head {
+ struct list_head node_list;
+};
+
+struct plist_node {
+ int prio;
+ struct list_head prio_list;
+ struct list_head node_list;
+};
+
+#endif /* _LINUX_PLIST_TYPES_H */
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 1d8209c09686..98a899858ece 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -8,33 +8,44 @@
#ifndef _LINUX_PM_H
#define _LINUX_PM_H
-#include <linux/list.h>
-#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/export.h>
+#include <linux/hrtimer_types.h>
+#include <linux/mutex.h>
#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/util_macros.h>
#include <linux/wait.h>
-#include <linux/timer.h>
-#include <linux/hrtimer.h>
-#include <linux/completion.h>
+#include <linux/workqueue_types.h>
/*
* Callbacks for platform drivers to implement.
*/
extern void (*pm_power_off)(void);
-extern void (*pm_power_off_prepare)(void);
struct device; /* we have a circular dep with device.h */
#ifdef CONFIG_VT_CONSOLE_SLEEP
-extern void pm_vt_switch_required(struct device *dev, bool required);
+extern int pm_vt_switch_required(struct device *dev, bool required);
extern void pm_vt_switch_unregister(struct device *dev);
#else
-static inline void pm_vt_switch_required(struct device *dev, bool required)
+static inline int pm_vt_switch_required(struct device *dev, bool required)
{
+ return 0;
}
static inline void pm_vt_switch_unregister(struct device *dev)
{
}
#endif /* CONFIG_VT_CONSOLE_SLEEP */
+#ifdef CONFIG_CXL_SUSPEND
+bool cxl_mem_active(void);
+#else
+static inline bool cxl_mem_active(void)
+{
+ return false;
+}
+#endif
+
/*
* Device power management
*/
@@ -300,55 +311,129 @@ struct dev_pm_ops {
int (*runtime_idle)(struct device *dev);
};
+#define SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ .suspend = pm_sleep_ptr(suspend_fn), \
+ .resume = pm_sleep_ptr(resume_fn), \
+ .freeze = pm_sleep_ptr(suspend_fn), \
+ .thaw = pm_sleep_ptr(resume_fn), \
+ .poweroff = pm_sleep_ptr(suspend_fn), \
+ .restore = pm_sleep_ptr(resume_fn),
+
+#define LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ .suspend_late = pm_sleep_ptr(suspend_fn), \
+ .resume_early = pm_sleep_ptr(resume_fn), \
+ .freeze_late = pm_sleep_ptr(suspend_fn), \
+ .thaw_early = pm_sleep_ptr(resume_fn), \
+ .poweroff_late = pm_sleep_ptr(suspend_fn), \
+ .restore_early = pm_sleep_ptr(resume_fn),
+
+#define NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ .suspend_noirq = pm_sleep_ptr(suspend_fn), \
+ .resume_noirq = pm_sleep_ptr(resume_fn), \
+ .freeze_noirq = pm_sleep_ptr(suspend_fn), \
+ .thaw_noirq = pm_sleep_ptr(resume_fn), \
+ .poweroff_noirq = pm_sleep_ptr(suspend_fn), \
+ .restore_noirq = pm_sleep_ptr(resume_fn),
+
+#define RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
+ .runtime_suspend = suspend_fn, \
+ .runtime_resume = resume_fn, \
+ .runtime_idle = idle_fn,
+
#ifdef CONFIG_PM_SLEEP
#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
- .suspend = suspend_fn, \
- .resume = resume_fn, \
- .freeze = suspend_fn, \
- .thaw = resume_fn, \
- .poweroff = suspend_fn, \
- .restore = resume_fn,
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#else
#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#endif
#ifdef CONFIG_PM_SLEEP
#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
- .suspend_late = suspend_fn, \
- .resume_early = resume_fn, \
- .freeze_late = suspend_fn, \
- .thaw_early = resume_fn, \
- .poweroff_late = suspend_fn, \
- .restore_early = resume_fn,
+ LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#else
#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#endif
#ifdef CONFIG_PM_SLEEP
#define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
- .suspend_noirq = suspend_fn, \
- .resume_noirq = resume_fn, \
- .freeze_noirq = suspend_fn, \
- .thaw_noirq = resume_fn, \
- .poweroff_noirq = suspend_fn, \
- .restore_noirq = resume_fn,
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#else
#define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#endif
#ifdef CONFIG_PM
#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
- .runtime_suspend = suspend_fn, \
- .runtime_resume = resume_fn, \
- .runtime_idle = idle_fn,
+ RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn)
#else
#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn)
#endif
+#define _DEFINE_DEV_PM_OPS(name, \
+ suspend_fn, resume_fn, \
+ runtime_suspend_fn, runtime_resume_fn, idle_fn) \
+const struct dev_pm_ops name = { \
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ RUNTIME_PM_OPS(runtime_suspend_fn, runtime_resume_fn, idle_fn) \
+}
+
+#define _EXPORT_PM_OPS(name, license, ns) \
+ const struct dev_pm_ops name; \
+ __EXPORT_SYMBOL(name, license, ns); \
+ const struct dev_pm_ops name
+
+#define _DISCARD_PM_OPS(name, license, ns) \
+ static __maybe_unused const struct dev_pm_ops __static_##name
+
+#ifdef CONFIG_PM
+#define _EXPORT_DEV_PM_OPS(name, license, ns) _EXPORT_PM_OPS(name, license, ns)
+#else
+#define _EXPORT_DEV_PM_OPS(name, license, ns) _DISCARD_PM_OPS(name, license, ns)
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+#define _EXPORT_DEV_SLEEP_PM_OPS(name, license, ns) _EXPORT_PM_OPS(name, license, ns)
+#else
+#define _EXPORT_DEV_SLEEP_PM_OPS(name, license, ns) _DISCARD_PM_OPS(name, license, ns)
+#endif
+
+#define EXPORT_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "", "")
+#define EXPORT_GPL_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "GPL", "")
+#define EXPORT_NS_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "", #ns)
+#define EXPORT_NS_GPL_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "GPL", #ns)
+
+#define EXPORT_DEV_SLEEP_PM_OPS(name) _EXPORT_DEV_SLEEP_PM_OPS(name, "", "")
+#define EXPORT_GPL_DEV_SLEEP_PM_OPS(name) _EXPORT_DEV_SLEEP_PM_OPS(name, "GPL", "")
+#define EXPORT_NS_DEV_SLEEP_PM_OPS(name, ns) _EXPORT_DEV_SLEEP_PM_OPS(name, "", #ns)
+#define EXPORT_NS_GPL_DEV_SLEEP_PM_OPS(name, ns) _EXPORT_DEV_SLEEP_PM_OPS(name, "GPL", #ns)
+
/*
* Use this if you want to use the same suspend and resume callbacks for suspend
* to RAM and hibernation.
+ *
+ * If the underlying dev_pm_ops struct symbol has to be exported, use
+ * EXPORT_SIMPLE_DEV_PM_OPS() or EXPORT_GPL_SIMPLE_DEV_PM_OPS() instead.
*/
+#define DEFINE_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
+ _DEFINE_DEV_PM_OPS(name, suspend_fn, resume_fn, NULL, NULL, NULL)
+
+#define EXPORT_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
+ EXPORT_DEV_SLEEP_PM_OPS(name) = { \
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ }
+#define EXPORT_GPL_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
+ EXPORT_GPL_DEV_SLEEP_PM_OPS(name) = { \
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ }
+#define EXPORT_NS_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn, ns) \
+ EXPORT_NS_DEV_SLEEP_PM_OPS(name, ns) = { \
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ }
+#define EXPORT_NS_GPL_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn, ns) \
+ EXPORT_NS_GPL_DEV_SLEEP_PM_OPS(name, ns) = { \
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ }
+
+/* Deprecated. Use DEFINE_SIMPLE_DEV_PM_OPS() instead. */
#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
const struct dev_pm_ops __maybe_unused name = { \
SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
@@ -366,6 +451,9 @@ const struct dev_pm_ops __maybe_unused name = { \
* suspend and "early" resume callback pointers, .suspend_late() and
* .resume_early(), to the same routines as .runtime_suspend() and
* .runtime_resume(), respectively (and analogously for hibernation).
+ *
+ * Deprecated. You most likely don't want this macro. Use
+ * DEFINE_RUNTIME_DEV_PM_OPS() instead.
*/
#define UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
const struct dev_pm_ops __maybe_unused name = { \
@@ -373,11 +461,17 @@ const struct dev_pm_ops __maybe_unused name = { \
SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
}
-#ifdef CONFIG_PM
-#define pm_ptr(_ptr) (_ptr)
-#else
-#define pm_ptr(_ptr) NULL
-#endif
+/*
+ * Use this if you want to have the suspend and resume callbacks be called
+ * with IRQs disabled.
+ */
+#define DEFINE_NOIRQ_DEV_PM_OPS(name, suspend_fn, resume_fn) \
+const struct dev_pm_ops name = { \
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+}
+
+#define pm_ptr(_ptr) PTR_IF(IS_ENABLED(CONFIG_PM), (_ptr))
+#define pm_sleep_ptr(_ptr) PTR_IF(IS_ENABLED(CONFIG_PM_SLEEP), (_ptr))
/*
* PM_EVENT_ messages
@@ -414,6 +508,7 @@ const struct dev_pm_ops __maybe_unused name = { \
* RECOVER Creation of a hibernation image or restoration of the main
* memory contents from a hibernation image has failed, call
* ->thaw() and ->complete() for all devices.
+ * POWEROFF System will poweroff, call ->poweroff() for all devices.
*
* The following PM_EVENT_ messages are defined for internal use by
* kernel subsystems. They are never issued by the PM core.
@@ -444,6 +539,7 @@ const struct dev_pm_ops __maybe_unused name = { \
#define PM_EVENT_USER 0x0100
#define PM_EVENT_REMOTE 0x0200
#define PM_EVENT_AUTO 0x0400
+#define PM_EVENT_POWEROFF 0x0800
#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE)
#define PM_EVENT_USER_SUSPEND (PM_EVENT_USER | PM_EVENT_SUSPEND)
@@ -458,6 +554,7 @@ const struct dev_pm_ops __maybe_unused name = { \
#define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, })
#define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, })
#define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, })
+#define PMSG_POWEROFF ((struct pm_message){ .event = PM_EVENT_POWEROFF, })
#define PMSG_RESUME ((struct pm_message){ .event = PM_EVENT_RESUME, })
#define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, })
#define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, })
@@ -474,7 +571,8 @@ const struct dev_pm_ops __maybe_unused name = { \
{ .event = PM_EVENT_AUTO_RESUME, })
#define PMSG_IS_AUTO(msg) (((msg).event & PM_EVENT_AUTO) != 0)
-
+#define PMSG_NO_WAKEUP(msg) (((msg).event & \
+ (PM_EVENT_FREEZE | PM_EVENT_QUIESCE)) != 0)
/*
* Device run-time power management status.
*
@@ -499,10 +597,12 @@ const struct dev_pm_ops __maybe_unused name = { \
*/
enum rpm_status {
+ RPM_INVALID = -1,
RPM_ACTIVE = 0,
RPM_RESUMING,
RPM_SUSPENDED,
RPM_SUSPENDING,
+ RPM_BLOCKED,
};
/*
@@ -565,8 +665,8 @@ struct pm_subsys_data {
struct dev_pm_info {
pm_message_t power_state;
- unsigned int can_wakeup:1;
- unsigned int async_suspend:1;
+ bool can_wakeup:1;
+ bool async_suspend:1;
bool in_dpm_list:1; /* Owned by the PM core */
bool is_prepared:1; /* Owned by the PM core */
bool is_suspended:1; /* Ditto */
@@ -584,10 +684,14 @@ struct dev_pm_info {
bool wakeup_path:1;
bool syscore:1;
bool no_pm_callbacks:1; /* Owned by the PM core */
- unsigned int must_resume:1; /* Owned by the PM core */
- unsigned int may_skip_resume:1; /* Set by subsystems */
+ bool work_in_progress:1; /* Owned by the PM core */
+ bool smart_suspend:1; /* Owned by the PM core */
+ bool must_resume:1; /* Owned by the PM core */
+ bool may_skip_resume:1; /* Set by subsystems */
+ bool out_band_wakeup:1;
+ bool strict_midlayer:1;
#else
- unsigned int should_wakeup:1;
+ bool should_wakeup:1;
#endif
#ifdef CONFIG_PM
struct hrtimer suspend_timer;
@@ -598,20 +702,21 @@ struct dev_pm_info {
atomic_t usage_count;
atomic_t child_count;
unsigned int disable_depth:3;
- unsigned int idle_notification:1;
- unsigned int request_pending:1;
- unsigned int deferred_resume:1;
- unsigned int needs_force_resume:1;
- unsigned int runtime_auto:1;
+ bool idle_notification:1;
+ bool request_pending:1;
+ bool deferred_resume:1;
+ bool needs_force_resume:1;
+ bool runtime_auto:1;
bool ignore_children:1;
- unsigned int no_callbacks:1;
- unsigned int irq_safe:1;
- unsigned int use_autosuspend:1;
- unsigned int timer_autosuspends:1;
- unsigned int memalloc_noio:1;
+ bool no_callbacks:1;
+ bool irq_safe:1;
+ bool use_autosuspend:1;
+ bool timer_autosuspends:1;
+ bool memalloc_noio:1;
unsigned int links_count;
enum rpm_request request;
enum rpm_status runtime_status;
+ enum rpm_status last_status;
int runtime_error;
int autosuspend_delay;
u64 last_busy;
@@ -622,6 +727,7 @@ struct dev_pm_info {
struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */
void (*set_latency_tolerance)(struct device *, s32);
struct dev_pm_qos *qos;
+ bool detach_power_off:1; /* Owned by the driver core */
};
extern int dev_pm_get_subsys_data(struct device *dev);
@@ -636,6 +742,7 @@ extern void dev_pm_put_subsys_data(struct device *dev);
* @activate: Called before executing probe routines for bus types and drivers.
* @sync: Called after successful driver probe.
* @dismiss: Called after unsuccessful driver probe and after driver removal.
+ * @set_performance_state: Called to request a new performance state.
*
* Power domains provide callbacks that are executed during system suspend,
* hibernation, system resume and during runtime PM transitions instead of
@@ -648,6 +755,7 @@ struct dev_pm_domain {
int (*activate)(struct device *dev);
void (*sync)(struct device *dev);
void (*dismiss)(struct device *dev);
+ int (*set_performance_state)(struct device *dev, unsigned int state);
};
/*
@@ -721,11 +829,11 @@ extern int dpm_suspend_late(pm_message_t state);
extern int dpm_suspend(pm_message_t state);
extern int dpm_prepare(pm_message_t state);
-extern void __suspend_report_result(const char *function, void *fn, int ret);
+extern void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret);
-#define suspend_report_result(fn, ret) \
+#define suspend_report_result(dev, fn, ret) \
do { \
- __suspend_report_result(__func__, fn, ret); \
+ __suspend_report_result(__func__, dev, fn, ret); \
} while (0)
extern int device_pm_wait_for_dev(struct device *sub, struct device *dev);
@@ -739,10 +847,8 @@ extern int pm_generic_resume_early(struct device *dev);
extern int pm_generic_resume_noirq(struct device *dev);
extern int pm_generic_resume(struct device *dev);
extern int pm_generic_freeze_noirq(struct device *dev);
-extern int pm_generic_freeze_late(struct device *dev);
extern int pm_generic_freeze(struct device *dev);
extern int pm_generic_thaw_noirq(struct device *dev);
-extern int pm_generic_thaw_early(struct device *dev);
extern int pm_generic_thaw(struct device *dev);
extern int pm_generic_restore_noirq(struct device *dev);
extern int pm_generic_restore_early(struct device *dev);
@@ -765,7 +871,7 @@ static inline int dpm_suspend_start(pm_message_t state)
return 0;
}
-#define suspend_report_result(fn, ret) do {} while (0)
+#define suspend_report_result(dev, fn, ret) do {} while (0)
static inline int device_pm_wait_for_dev(struct device *a, struct device *b)
{
@@ -784,10 +890,8 @@ static inline void dpm_for_each_dev(void *data, void (*fn)(struct device *, void
#define pm_generic_resume_noirq NULL
#define pm_generic_resume NULL
#define pm_generic_freeze_noirq NULL
-#define pm_generic_freeze_late NULL
#define pm_generic_freeze NULL
#define pm_generic_thaw_noirq NULL
-#define pm_generic_thaw_early NULL
#define pm_generic_thaw NULL
#define pm_generic_restore_noirq NULL
#define pm_generic_restore_early NULL
diff --git a/include/linux/pm2301_charger.h b/include/linux/pm2301_charger.h
deleted file mode 100644
index b8fac96f05aa..000000000000
--- a/include/linux/pm2301_charger.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * PM2301 charger driver.
- *
- * Copyright (C) 2012 ST Ericsson Corporation
- *
- * Contact: Olivier LAUNAY (olivier.launay@stericsson.com
- */
-
-#ifndef __LINUX_PM2301_H
-#define __LINUX_PM2301_H
-
-/**
- * struct pm2xxx_bm_charger_parameters - Charger specific parameters
- * @ac_volt_max: maximum allowed AC charger voltage in mV
- * @ac_curr_max: maximum allowed AC charger current in mA
- */
-struct pm2xxx_bm_charger_parameters {
- int ac_volt_max;
- int ac_curr_max;
-};
-
-/**
- * struct pm2xxx_bm_data - pm2xxx battery management data
- * @enable_overshoot flag to enable VBAT overshoot control
- * @chg_params charger parameters
- */
-struct pm2xxx_bm_data {
- bool enable_overshoot;
- const struct pm2xxx_bm_charger_parameters *chg_params;
-};
-
-struct pm2xxx_charger_platform_data {
- char **supplied_to;
- size_t num_supplicants;
- int i2c_bus;
- const char *label;
- int gpio_irq_number;
- unsigned int lpn_gpio;
- int irq_type;
-};
-
-struct pm2xxx_platform_data {
- struct pm2xxx_charger_platform_data *wall_charger;
- struct pm2xxx_bm_data *battery;
-};
-
-#endif /* __LINUX_PM2301_H */
diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h
index 8ddc7860e131..c3b46fa358d3 100644
--- a/include/linux/pm_clock.h
+++ b/include/linux/pm_clock.h
@@ -41,12 +41,11 @@ extern int pm_clk_create(struct device *dev);
extern void pm_clk_destroy(struct device *dev);
extern int pm_clk_add(struct device *dev, const char *con_id);
extern int pm_clk_add_clk(struct device *dev, struct clk *clk);
-extern int of_pm_clk_add_clk(struct device *dev, const char *name);
extern int of_pm_clk_add_clks(struct device *dev);
-extern void pm_clk_remove(struct device *dev, const char *con_id);
extern void pm_clk_remove_clk(struct device *dev, struct clk *clk);
extern int pm_clk_suspend(struct device *dev);
extern int pm_clk_resume(struct device *dev);
+extern int devm_pm_clk_create(struct device *dev);
#else
static inline bool pm_clk_no_clocks(struct device *dev)
{
@@ -75,21 +74,22 @@ static inline int of_pm_clk_add_clks(struct device *dev)
{
return -EINVAL;
}
-static inline void pm_clk_remove(struct device *dev, const char *con_id)
-{
-}
#define pm_clk_suspend NULL
#define pm_clk_resume NULL
static inline void pm_clk_remove_clk(struct device *dev, struct clk *clk)
{
}
+static inline int devm_pm_clk_create(struct device *dev)
+{
+ return -EINVAL;
+}
#endif
#ifdef CONFIG_HAVE_CLK
-extern void pm_clk_add_notifier(struct bus_type *bus,
+extern void pm_clk_add_notifier(const struct bus_type *bus,
struct pm_clk_notifier_block *clknb);
#else
-static inline void pm_clk_add_notifier(struct bus_type *bus,
+static inline void pm_clk_add_notifier(const struct bus_type *bus,
struct pm_clk_notifier_block *clknb)
{
}
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index dfcfbcecc34b..93ba0143ca47 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -16,7 +16,49 @@
#include <linux/of.h>
#include <linux/notifier.h>
#include <linux/spinlock.h>
-#include <linux/cpumask.h>
+#include <linux/cpumask_types.h>
+#include <linux/time64.h>
+
+/*
+ * Flags to control the behaviour when attaching a device to its PM domains.
+ *
+ * PD_FLAG_NO_DEV_LINK: As the default behaviour creates a device-link
+ * for every PM domain that gets attached, this
+ * flag can be used to skip that.
+ *
+ * PD_FLAG_DEV_LINK_ON: Add the DL_FLAG_RPM_ACTIVE to power-on the
+ * supplier and its PM domain when creating the
+ * device-links.
+ *
+ * PD_FLAG_REQUIRED_OPP: Assign required_devs for the required OPPs. The
+ * index of the required OPP must correspond to the
+ * index in the array of the pd_names. If pd_names
+ * isn't specified, the index just follows the
+ * index for the attached PM domain.
+ *
+ * PD_FLAG_ATTACH_POWER_ON: Power on the domain during attach.
+ *
+ * PD_FLAG_DETACH_POWER_OFF: Power off the domain during detach.
+ *
+ */
+#define PD_FLAG_NO_DEV_LINK BIT(0)
+#define PD_FLAG_DEV_LINK_ON BIT(1)
+#define PD_FLAG_REQUIRED_OPP BIT(2)
+#define PD_FLAG_ATTACH_POWER_ON BIT(3)
+#define PD_FLAG_DETACH_POWER_OFF BIT(4)
+
+struct dev_pm_domain_attach_data {
+ const char * const *pd_names;
+ const u32 num_pd_names;
+ const u32 pd_flags;
+};
+
+struct dev_pm_domain_list {
+ struct device **pd_devs;
+ struct device_link **pd_links;
+ u32 *opp_tokens;
+ u32 num_pds;
+};
/*
* Flags to control the behaviour of a genpd.
@@ -60,6 +102,25 @@
* GENPD_FLAG_MIN_RESIDENCY: Enable the genpd governor to consider its
* components' next wakeup when determining the
* optimal idle state.
+ *
+ * GENPD_FLAG_OPP_TABLE_FW: The genpd provider supports performance states,
+ * but its corresponding OPP tables are not
+ * described in DT, but are given directly by FW.
+ *
+ * GENPD_FLAG_DEV_NAME_FW: Instructs genpd to generate an unique device name
+ * using ida. It is used by genpd providers which
+ * get their genpd-names directly from FW.
+ *
+ * GENPD_FLAG_NO_SYNC_STATE: The ->sync_state() support is implemented in a
+ * genpd provider specific way, likely through a
+ * parent device node. This flag makes genpd to
+ * skip its internal support for this.
+ *
+ * GENPD_FLAG_NO_STAY_ON: For genpd OF providers a powered-on PM domain at
+ * initialization is prevented from being
+ * powered-off until the ->sync_state() callback is
+ * invoked. This flag informs genpd to allow a
+ * power-off without waiting for ->sync_state().
*/
#define GENPD_FLAG_PM_CLK (1U << 0)
#define GENPD_FLAG_IRQ_SAFE (1U << 1)
@@ -68,6 +129,10 @@
#define GENPD_FLAG_CPU_DOMAIN (1U << 4)
#define GENPD_FLAG_RPM_ALWAYS_ON (1U << 5)
#define GENPD_FLAG_MIN_RESIDENCY (1U << 6)
+#define GENPD_FLAG_OPP_TABLE_FW (1U << 7)
+#define GENPD_FLAG_DEV_NAME_FW (1U << 8)
+#define GENPD_FLAG_NO_SYNC_STATE (1U << 9)
+#define GENPD_FLAG_NO_STAY_ON (1U << 10)
enum gpd_status {
GENPD_STATE_ON = 0, /* PM domain is on */
@@ -81,7 +146,14 @@ enum genpd_notication {
GENPD_NOTIFY_ON,
};
+enum genpd_sync_state {
+ GENPD_SYNC_STATE_OFF = 0,
+ GENPD_SYNC_STATE_SIMPLE,
+ GENPD_SYNC_STATE_ONECELL,
+};
+
struct dev_power_governor {
+ bool (*system_power_down_ok)(struct dev_pm_domain *domain);
bool (*power_down_ok)(struct dev_pm_domain *domain);
bool (*suspend_ok)(struct device *dev);
};
@@ -91,19 +163,32 @@ struct gpd_dev_ops {
int (*stop)(struct device *dev);
};
+struct genpd_governor_data {
+ s64 max_off_time_ns;
+ bool max_off_time_changed;
+ ktime_t next_wakeup;
+ ktime_t next_hrtimer;
+ ktime_t last_enter;
+ bool reflect_residency;
+ bool cached_power_down_ok;
+ bool cached_power_down_state_idx;
+};
+
struct genpd_power_state {
+ const char *name;
s64 power_off_latency_ns;
s64 power_on_latency_ns;
s64 residency_ns;
u64 usage;
u64 rejected;
+ u64 above;
+ u64 below;
struct fwnode_handle *fwnode;
- ktime_t idle_time;
+ u64 idle_time;
void *data;
};
struct genpd_lock_ops;
-struct dev_pm_opp;
struct opp_table;
struct generic_pm_domain {
@@ -114,6 +199,7 @@ struct generic_pm_domain {
struct list_head child_links; /* Links with PM domain as a child */
struct list_head dev_list; /* List of devices */
struct dev_power_governor *gov;
+ struct genpd_governor_data *gd; /* Data used by a genpd governor. */
struct work_struct power_off_work;
struct fwnode_handle *provider; /* Identity of the domain provider */
bool has_provider;
@@ -121,24 +207,25 @@ struct generic_pm_domain {
atomic_t sd_count; /* Number of subdomains with power "on" */
enum gpd_status status; /* Current state of the domain */
unsigned int device_count; /* Number of devices */
+ unsigned int device_id; /* unique device id */
unsigned int suspended_count; /* System suspend device counter */
unsigned int prepared_count; /* Suspend counter of prepared devices */
unsigned int performance_state; /* Aggregated max performance state */
cpumask_var_t cpus; /* A cpumask of the attached CPUs */
+ bool synced_poweroff; /* A consumer needs a synced poweroff */
+ bool stay_on; /* Stay powered-on during boot. */
+ enum genpd_sync_state sync_state; /* How sync_state is managed. */
int (*power_off)(struct generic_pm_domain *domain);
int (*power_on)(struct generic_pm_domain *domain);
struct raw_notifier_head power_notifiers; /* Power on/off notifiers */
struct opp_table *opp_table; /* OPP table of the genpd */
- unsigned int (*opp_to_performance_state)(struct generic_pm_domain *genpd,
- struct dev_pm_opp *opp);
int (*set_performance_state)(struct generic_pm_domain *genpd,
unsigned int state);
struct gpd_dev_ops dev_ops;
- s64 max_off_time_ns; /* Maximum allowed "suspended" time. */
- ktime_t next_wakeup; /* Maintained by the domain governor */
- bool max_off_time_changed;
- bool cached_power_down_ok;
- bool cached_power_down_state_idx;
+ int (*set_hwmode_dev)(struct generic_pm_domain *domain,
+ struct device *dev, bool enable);
+ bool (*get_hwmode_dev)(struct generic_pm_domain *domain,
+ struct device *dev);
int (*attach_dev)(struct generic_pm_domain *domain,
struct device *dev);
void (*detach_dev)(struct generic_pm_domain *domain,
@@ -149,8 +236,8 @@ struct generic_pm_domain {
unsigned int state_count);
unsigned int state_count; /* number of states */
unsigned int state_idx; /* state that genpd will go to when off */
- ktime_t on_time;
- ktime_t accounting_time;
+ u64 on_time;
+ u64 accounting_time;
const struct genpd_lock_ops *lock_ops;
union {
struct mutex mlock;
@@ -158,8 +245,11 @@ struct generic_pm_domain {
spinlock_t slock;
unsigned long lock_flags;
};
+ struct {
+ raw_spinlock_t raw_slock;
+ unsigned long raw_lock_flags;
+ };
};
-
};
static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
@@ -182,6 +272,7 @@ struct gpd_timing_data {
s64 suspend_latency_ns;
s64 resume_latency_ns;
s64 effective_constraint_ns;
+ ktime_t next_wakeup;
bool constraint_changed;
bool cached_suspend_ok;
};
@@ -193,12 +284,16 @@ struct pm_domain_data {
struct generic_pm_domain_data {
struct pm_domain_data base;
- struct gpd_timing_data td;
+ struct gpd_timing_data *td;
struct notifier_block nb;
struct notifier_block *power_nb;
int cpu;
unsigned int performance_state;
- ktime_t next_wakeup;
+ unsigned int default_pstate;
+ unsigned int rpm_pstate;
+ unsigned int opp_token;
+ bool hw_mode;
+ bool rpm_always_on;
void *data;
};
@@ -222,10 +317,19 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
int pm_genpd_init(struct generic_pm_domain *genpd,
struct dev_power_governor *gov, bool is_off);
int pm_genpd_remove(struct generic_pm_domain *genpd);
+void pm_genpd_inc_rejected(struct generic_pm_domain *genpd,
+ unsigned int state_idx);
+struct device *dev_to_genpd_dev(struct device *dev);
int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state);
int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb);
int dev_pm_genpd_remove_notifier(struct device *dev);
void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next);
+ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev);
+void dev_pm_genpd_synced_poweroff(struct device *dev);
+int dev_pm_genpd_set_hwmode(struct device *dev, bool enable);
+bool dev_pm_genpd_get_hwmode(struct device *dev);
+int dev_pm_genpd_rpm_always_on(struct device *dev, bool on);
+bool dev_pm_genpd_is_on(struct device *dev);
extern struct dev_power_governor simple_qos_governor;
extern struct dev_power_governor pm_domain_always_on_gov;
@@ -267,6 +371,15 @@ static inline int pm_genpd_remove(struct generic_pm_domain *genpd)
return -EOPNOTSUPP;
}
+static inline void pm_genpd_inc_rejected(struct generic_pm_domain *genpd,
+ unsigned int state_idx)
+{ }
+
+static inline struct device *dev_to_genpd_dev(struct device *dev)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
static inline int dev_pm_genpd_set_performance_state(struct device *dev,
unsigned int state)
{
@@ -287,6 +400,33 @@ static inline int dev_pm_genpd_remove_notifier(struct device *dev)
static inline void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
{ }
+static inline ktime_t dev_pm_genpd_get_next_hrtimer(struct device *dev)
+{
+ return KTIME_MAX;
+}
+static inline void dev_pm_genpd_synced_poweroff(struct device *dev)
+{ }
+
+static inline int dev_pm_genpd_set_hwmode(struct device *dev, bool enable)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool dev_pm_genpd_get_hwmode(struct device *dev)
+{
+ return false;
+}
+
+static inline int dev_pm_genpd_rpm_always_on(struct device *dev, bool on)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool dev_pm_genpd_is_on(struct device *dev)
+{
+ return false;
+}
+
#define simple_qos_governor (*(struct dev_power_governor *)(NULL))
#define pm_domain_always_on_gov (*(struct dev_power_governor *)(NULL))
#endif
@@ -302,7 +442,7 @@ static inline void dev_pm_genpd_resume(struct device *dev) {}
/* OF PM domain providers */
struct of_device_id;
-typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args,
+typedef struct generic_pm_domain *(*genpd_xlate_t)(const struct of_phandle_args *args,
void *data);
struct genpd_onecell_data {
@@ -317,16 +457,15 @@ int of_genpd_add_provider_simple(struct device_node *np,
int of_genpd_add_provider_onecell(struct device_node *np,
struct genpd_onecell_data *data);
void of_genpd_del_provider(struct device_node *np);
-int of_genpd_add_device(struct of_phandle_args *args, struct device *dev);
-int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
- struct of_phandle_args *subdomain_spec);
-int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
- struct of_phandle_args *subdomain_spec);
+int of_genpd_add_device(const struct of_phandle_args *args, struct device *dev);
+int of_genpd_add_subdomain(const struct of_phandle_args *parent_spec,
+ const struct of_phandle_args *subdomain_spec);
+int of_genpd_remove_subdomain(const struct of_phandle_args *parent_spec,
+ const struct of_phandle_args *subdomain_spec);
struct generic_pm_domain *of_genpd_remove_last(struct device_node *np);
int of_genpd_parse_idle_states(struct device_node *dn,
struct genpd_power_state **states, int *n);
-unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
- struct dev_pm_opp *opp);
+void of_genpd_sync_state(struct device_node *np);
int genpd_dev_pm_attach(struct device *dev);
struct device *genpd_dev_pm_attach_by_id(struct device *dev,
@@ -348,20 +487,20 @@ static inline int of_genpd_add_provider_onecell(struct device_node *np,
static inline void of_genpd_del_provider(struct device_node *np) {}
-static inline int of_genpd_add_device(struct of_phandle_args *args,
+static inline int of_genpd_add_device(const struct of_phandle_args *args,
struct device *dev)
{
return -ENODEV;
}
-static inline int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
- struct of_phandle_args *subdomain_spec)
+static inline int of_genpd_add_subdomain(const struct of_phandle_args *parent_spec,
+ const struct of_phandle_args *subdomain_spec)
{
return -ENODEV;
}
-static inline int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
- struct of_phandle_args *subdomain_spec)
+static inline int of_genpd_remove_subdomain(const struct of_phandle_args *parent_spec,
+ const struct of_phandle_args *subdomain_spec)
{
return -ENODEV;
}
@@ -372,12 +511,7 @@ static inline int of_genpd_parse_idle_states(struct device_node *dn,
return -ENODEV;
}
-static inline unsigned int
-pm_genpd_opp_to_performance_state(struct device *genpd_dev,
- struct dev_pm_opp *opp)
-{
- return 0;
-}
+static inline void of_genpd_sync_state(struct device_node *np) {}
static inline int genpd_dev_pm_attach(struct device *dev)
{
@@ -404,16 +538,24 @@ struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
#ifdef CONFIG_PM
-int dev_pm_domain_attach(struct device *dev, bool power_on);
+int dev_pm_domain_attach(struct device *dev, u32 flags);
struct device *dev_pm_domain_attach_by_id(struct device *dev,
unsigned int index);
struct device *dev_pm_domain_attach_by_name(struct device *dev,
const char *name);
+int dev_pm_domain_attach_list(struct device *dev,
+ const struct dev_pm_domain_attach_data *data,
+ struct dev_pm_domain_list **list);
+int devm_pm_domain_attach_list(struct device *dev,
+ const struct dev_pm_domain_attach_data *data,
+ struct dev_pm_domain_list **list);
void dev_pm_domain_detach(struct device *dev, bool power_off);
+void dev_pm_domain_detach_list(struct dev_pm_domain_list *list);
int dev_pm_domain_start(struct device *dev);
void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd);
+int dev_pm_domain_set_performance_state(struct device *dev, unsigned int state);
#else
-static inline int dev_pm_domain_attach(struct device *dev, bool power_on)
+static inline int dev_pm_domain_attach(struct device *dev, u32 flags)
{
return 0;
}
@@ -427,13 +569,33 @@ static inline struct device *dev_pm_domain_attach_by_name(struct device *dev,
{
return NULL;
}
+static inline int dev_pm_domain_attach_list(struct device *dev,
+ const struct dev_pm_domain_attach_data *data,
+ struct dev_pm_domain_list **list)
+{
+ return 0;
+}
+
+static inline int devm_pm_domain_attach_list(struct device *dev,
+ const struct dev_pm_domain_attach_data *data,
+ struct dev_pm_domain_list **list)
+{
+ return 0;
+}
+
static inline void dev_pm_domain_detach(struct device *dev, bool power_off) {}
+static inline void dev_pm_domain_detach_list(struct dev_pm_domain_list *list) {}
static inline int dev_pm_domain_start(struct device *dev)
{
return 0;
}
static inline void dev_pm_domain_set(struct device *dev,
struct dev_pm_domain *pd) {}
+static inline int dev_pm_domain_set_performance_state(struct device *dev,
+ unsigned int state)
+{
+ return 0;
+}
#endif
#endif /* _LINUX_PM_DOMAIN_H */
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 84150a22fd7c..789406d95e69 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -11,11 +11,13 @@
#ifndef __LINUX_OPP_H__
#define __LINUX_OPP_H__
+#include <linux/cleanup.h>
#include <linux/energy_model.h>
#include <linux/err.h>
#include <linux/notifier.h>
struct clk;
+struct cpufreq_frequency_table;
struct regulator;
struct dev_pm_opp;
struct device;
@@ -32,69 +34,104 @@ enum dev_pm_opp_event {
* @u_volt_min: Minimum voltage in microvolts corresponding to this OPP
* @u_volt_max: Maximum voltage in microvolts corresponding to this OPP
* @u_amp: Maximum current drawn by the device in microamperes
+ * @u_watt: Power used by the device in microwatts
*
- * This structure stores the voltage/current values for a single power supply.
+ * This structure stores the voltage/current/power values for a single power
+ * supply.
*/
struct dev_pm_opp_supply {
unsigned long u_volt;
unsigned long u_volt_min;
unsigned long u_volt_max;
unsigned long u_amp;
+ unsigned long u_watt;
};
+typedef int (*config_regulators_t)(struct device *dev,
+ struct dev_pm_opp *old_opp, struct dev_pm_opp *new_opp,
+ struct regulator **regulators, unsigned int count);
+
+typedef int (*config_clks_t)(struct device *dev, struct opp_table *opp_table,
+ struct dev_pm_opp *opp, void *data, bool scaling_down);
+
/**
- * struct dev_pm_opp_icc_bw - Interconnect bandwidth values
- * @avg: Average bandwidth corresponding to this OPP (in icc units)
- * @peak: Peak bandwidth corresponding to this OPP (in icc units)
+ * struct dev_pm_opp_config - Device OPP configuration values
+ * @clk_names: Clk names, NULL terminated array.
+ * @config_clks: Custom set clk helper.
+ * @prop_name: Name to postfix to properties.
+ * @config_regulators: Custom set regulator helper.
+ * @supported_hw: Array of hierarchy of versions to match.
+ * @supported_hw_count: Number of elements in the array.
+ * @regulator_names: Array of pointers to the names of the regulator, NULL terminated.
+ * @required_dev: The required OPP device.
+ * @required_dev_index: The index of the required OPP for the @required_dev.
*
- * This structure stores the bandwidth values for a single interconnect path.
+ * This structure contains platform specific OPP configurations for the device.
*/
-struct dev_pm_opp_icc_bw {
- u32 avg;
- u32 peak;
+struct dev_pm_opp_config {
+ /* NULL terminated */
+ const char * const *clk_names;
+ config_clks_t config_clks;
+ const char *prop_name;
+ config_regulators_t config_regulators;
+ const unsigned int *supported_hw;
+ unsigned int supported_hw_count;
+ const char * const *regulator_names;
+ struct device *required_dev;
+ unsigned int required_dev_index;
};
+#define OPP_LEVEL_UNSET U32_MAX
+
/**
- * struct dev_pm_opp_info - OPP freq/voltage/current values
- * @rate: Target clk rate in hz
- * @supplies: Array of voltage/current values for all power supplies
- *
- * This structure stores the freq/voltage/current values for a single OPP.
+ * struct dev_pm_opp_data - The data to use to initialize an OPP.
+ * @turbo: Flag to indicate whether the OPP is to be marked turbo or not.
+ * @level: The performance level for the OPP. Set level to OPP_LEVEL_UNSET if
+ * level field isn't used.
+ * @freq: The clock rate in Hz for the OPP.
+ * @u_volt: The voltage in uV for the OPP.
*/
-struct dev_pm_opp_info {
- unsigned long rate;
- struct dev_pm_opp_supply *supplies;
+struct dev_pm_opp_data {
+ bool turbo;
+ unsigned int level;
+ unsigned long freq;
+ unsigned long u_volt;
};
/**
- * struct dev_pm_set_opp_data - Set OPP data
- * @old_opp: Old OPP info
- * @new_opp: New OPP info
- * @regulators: Array of regulator pointers
- * @regulator_count: Number of regulators
- * @clk: Pointer to clk
- * @dev: Pointer to the struct device
+ * struct dev_pm_opp_key - Key used to identify OPP entries
+ * @freq: Frequency in Hz. Use 0 if frequency is not to be matched.
+ * @level: Performance level associated with the OPP entry.
+ * Use OPP_LEVEL_UNSET if level is not to be matched.
+ * @bw: Bandwidth associated with the OPP entry.
+ * Use 0 if bandwidth is not to be matched.
*
- * This structure contains all information required for setting an OPP.
+ * This structure is used to uniquely identify an OPP entry based on
+ * frequency, performance level, and bandwidth. Each field can be
+ * selectively ignored during matching by setting it to its respective
+ * NOP value.
*/
-struct dev_pm_set_opp_data {
- struct dev_pm_opp_info old_opp;
- struct dev_pm_opp_info new_opp;
-
- struct regulator **regulators;
- unsigned int regulator_count;
- struct clk *clk;
- struct device *dev;
+struct dev_pm_opp_key {
+ unsigned long freq;
+ unsigned int level;
+ u32 bw;
};
#if defined(CONFIG_PM_OPP)
struct opp_table *dev_pm_opp_get_opp_table(struct device *dev);
+struct opp_table *dev_pm_opp_get_opp_table_ref(struct opp_table *opp_table);
void dev_pm_opp_put_opp_table(struct opp_table *opp_table);
+unsigned long dev_pm_opp_get_bw(struct dev_pm_opp *opp, bool peak, int index);
+
unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
-unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp);
+int dev_pm_opp_get_supplies(struct dev_pm_opp *opp, struct dev_pm_opp_supply *supplies);
+
+unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp);
+
+unsigned long dev_pm_opp_get_freq_indexed(struct dev_pm_opp *opp, u32 index);
unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp);
@@ -112,22 +149,47 @@ unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev);
struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq,
bool available);
-struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
- unsigned int level);
-struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
- unsigned int *level);
+
+struct dev_pm_opp *dev_pm_opp_find_key_exact(struct device *dev,
+ struct dev_pm_opp_key *key,
+ bool available);
+
+struct dev_pm_opp *
+dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq,
+ u32 index, bool available);
struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
unsigned long *freq);
-struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev,
- unsigned long u_volt);
+
+struct dev_pm_opp *dev_pm_opp_find_freq_floor_indexed(struct device *dev,
+ unsigned long *freq, u32 index);
struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
unsigned long *freq);
+
+struct dev_pm_opp *dev_pm_opp_find_freq_ceil_indexed(struct device *dev,
+ unsigned long *freq, u32 index);
+
+struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
+ unsigned int level);
+
+struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
+ unsigned int *level);
+
+struct dev_pm_opp *dev_pm_opp_find_level_floor(struct device *dev,
+ unsigned int *level);
+
+struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev,
+ unsigned int *bw, int index);
+
+struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
+ unsigned int *bw, int index);
+
+struct dev_pm_opp *dev_pm_opp_get(struct dev_pm_opp *opp);
void dev_pm_opp_put(struct dev_pm_opp *opp);
-int dev_pm_opp_add(struct device *dev, unsigned long freq,
- unsigned long u_volt);
+int dev_pm_opp_add_dynamic(struct device *dev, struct dev_pm_opp_data *opp);
+
void dev_pm_opp_remove(struct device *dev, unsigned long freq);
void dev_pm_opp_remove_all_dynamic(struct device *dev);
@@ -142,23 +204,13 @@ int dev_pm_opp_disable(struct device *dev, unsigned long freq);
int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb);
int dev_pm_opp_unregister_notifier(struct device *dev, struct notifier_block *nb);
-struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, unsigned int count);
-void dev_pm_opp_put_supported_hw(struct opp_table *opp_table);
-int devm_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, unsigned int count);
-struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name);
-void dev_pm_opp_put_prop_name(struct opp_table *opp_table);
-struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count);
-void dev_pm_opp_put_regulators(struct opp_table *opp_table);
-int devm_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count);
-struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name);
-void dev_pm_opp_put_clkname(struct opp_table *opp_table);
-int devm_pm_opp_set_clkname(struct device *dev, const char *name);
-struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data));
-void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table);
-int devm_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data));
-struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs);
-void dev_pm_opp_detach_genpd(struct opp_table *opp_table);
-int devm_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs);
+int dev_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config);
+int devm_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config);
+void dev_pm_opp_clear_config(int token);
+int dev_pm_opp_config_clks_simple(struct device *dev,
+ struct opp_table *opp_table, struct dev_pm_opp *opp, void *data,
+ bool scaling_down);
+
struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table, struct opp_table *dst_table, struct dev_pm_opp *src_opp);
int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate);
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
@@ -168,6 +220,7 @@ int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
void dev_pm_opp_remove_table(struct device *dev);
void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask);
int dev_pm_opp_sync_regulators(struct device *dev);
+
#else
static inline struct opp_table *dev_pm_opp_get_opp_table(struct device *dev)
{
@@ -179,14 +232,34 @@ static inline struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *
return ERR_PTR(-EOPNOTSUPP);
}
+static inline struct opp_table *dev_pm_opp_get_opp_table_ref(struct opp_table *opp_table)
+{
+ return opp_table;
+}
+
static inline void dev_pm_opp_put_opp_table(struct opp_table *opp_table) {}
+static inline unsigned long dev_pm_opp_get_bw(struct dev_pm_opp *opp, bool peak, int index)
+{
+ return 0;
+}
+
static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
{
return 0;
}
-static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
+static inline int dev_pm_opp_get_supplies(struct dev_pm_opp *opp, struct dev_pm_opp_supply *supplies)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp)
+{
+ return 0;
+}
+
+static inline unsigned long dev_pm_opp_get_freq_indexed(struct dev_pm_opp *opp, u32 index)
{
return 0;
}
@@ -239,14 +312,16 @@ static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
return ERR_PTR(-EOPNOTSUPP);
}
-static inline struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
- unsigned int level)
+static inline struct dev_pm_opp *dev_pm_opp_find_key_exact(struct device *dev,
+ struct dev_pm_opp_key *key,
+ bool available)
{
return ERR_PTR(-EOPNOTSUPP);
}
-static inline struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
- unsigned int *level)
+static inline struct dev_pm_opp *
+dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq,
+ u32 index, bool available)
{
return ERR_PTR(-EOPNOTSUPP);
}
@@ -257,8 +332,8 @@ static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
return ERR_PTR(-EOPNOTSUPP);
}
-static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev,
- unsigned long u_volt)
+static inline struct dev_pm_opp *
+dev_pm_opp_find_freq_floor_indexed(struct device *dev, unsigned long *freq, u32 index)
{
return ERR_PTR(-EOPNOTSUPP);
}
@@ -269,123 +344,106 @@ static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
return ERR_PTR(-EOPNOTSUPP);
}
-static inline void dev_pm_opp_put(struct dev_pm_opp *opp) {}
-
-static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
- unsigned long u_volt)
+static inline struct dev_pm_opp *
+dev_pm_opp_find_freq_ceil_indexed(struct device *dev, unsigned long *freq, u32 index)
{
- return -EOPNOTSUPP;
+ return ERR_PTR(-EOPNOTSUPP);
}
-static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq)
+static inline struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
+ unsigned int level)
{
+ return ERR_PTR(-EOPNOTSUPP);
}
-static inline void dev_pm_opp_remove_all_dynamic(struct device *dev)
+static inline struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
+ unsigned int *level)
{
+ return ERR_PTR(-EOPNOTSUPP);
}
-static inline int
-dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
- unsigned long u_volt, unsigned long u_volt_min,
- unsigned long u_volt_max)
+static inline struct dev_pm_opp *dev_pm_opp_find_level_floor(struct device *dev,
+ unsigned int *level)
{
- return 0;
+ return ERR_PTR(-EOPNOTSUPP);
}
-static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq)
+static inline struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev,
+ unsigned int *bw, int index)
{
- return 0;
+ return ERR_PTR(-EOPNOTSUPP);
}
-static inline int dev_pm_opp_disable(struct device *dev, unsigned long freq)
+static inline struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
+ unsigned int *bw, int index)
{
- return 0;
+ return ERR_PTR(-EOPNOTSUPP);
}
-static inline int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb)
+static inline struct dev_pm_opp *dev_pm_opp_get(struct dev_pm_opp *opp)
{
- return -EOPNOTSUPP;
+ return opp;
}
-static inline int dev_pm_opp_unregister_notifier(struct device *dev, struct notifier_block *nb)
-{
- return -EOPNOTSUPP;
-}
+static inline void dev_pm_opp_put(struct dev_pm_opp *opp) {}
-static inline struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev,
- const u32 *versions,
- unsigned int count)
+static inline int
+dev_pm_opp_add_dynamic(struct device *dev, struct dev_pm_opp_data *opp)
{
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
-static inline void dev_pm_opp_put_supported_hw(struct opp_table *opp_table) {}
-
-static inline int devm_pm_opp_set_supported_hw(struct device *dev,
- const u32 *versions,
- unsigned int count)
+static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq)
{
- return -EOPNOTSUPP;
}
-static inline struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
- int (*set_opp)(struct dev_pm_set_opp_data *data))
+static inline void dev_pm_opp_remove_all_dynamic(struct device *dev)
{
- return ERR_PTR(-EOPNOTSUPP);
}
-static inline void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table) {}
-
-static inline int devm_pm_opp_register_set_opp_helper(struct device *dev,
- int (*set_opp)(struct dev_pm_set_opp_data *data))
+static inline int
+dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
+ unsigned long u_volt, unsigned long u_volt_min,
+ unsigned long u_volt_max)
{
- return -EOPNOTSUPP;
+ return 0;
}
-static inline struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name)
+static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq)
{
- return ERR_PTR(-EOPNOTSUPP);
+ return 0;
}
-static inline void dev_pm_opp_put_prop_name(struct opp_table *opp_table) {}
-
-static inline struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count)
+static inline int dev_pm_opp_disable(struct device *dev, unsigned long freq)
{
- return ERR_PTR(-EOPNOTSUPP);
+ return 0;
}
-static inline void dev_pm_opp_put_regulators(struct opp_table *opp_table) {}
-
-static inline int devm_pm_opp_set_regulators(struct device *dev,
- const char * const names[],
- unsigned int count)
+static inline int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb)
{
return -EOPNOTSUPP;
}
-static inline struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name)
+static inline int dev_pm_opp_unregister_notifier(struct device *dev, struct notifier_block *nb)
{
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
-static inline void dev_pm_opp_put_clkname(struct opp_table *opp_table) {}
-
-static inline int devm_pm_opp_set_clkname(struct device *dev, const char *name)
+static inline int dev_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config)
{
return -EOPNOTSUPP;
}
-static inline struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs)
+static inline int devm_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config)
{
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
-static inline void dev_pm_opp_detach_genpd(struct opp_table *opp_table) {}
+static inline void dev_pm_opp_clear_config(int token) {}
-static inline int devm_pm_opp_attach_genpd(struct device *dev,
- const char **names,
- struct device ***virt_devs)
+static inline int dev_pm_opp_config_clks_simple(struct device *dev,
+ struct opp_table *opp_table, struct dev_pm_opp *opp, void *data,
+ bool scaling_down)
{
return -EOPNOTSUPP;
}
@@ -436,10 +494,25 @@ static inline int dev_pm_opp_sync_regulators(struct device *dev)
#endif /* CONFIG_PM_OPP */
+#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
+int dev_pm_opp_init_cpufreq_table(struct device *dev, struct cpufreq_frequency_table **table);
+void dev_pm_opp_free_cpufreq_table(struct device *dev, struct cpufreq_frequency_table **table);
+#else
+static inline int dev_pm_opp_init_cpufreq_table(struct device *dev, struct cpufreq_frequency_table **table)
+{
+ return -EINVAL;
+}
+
+static inline void dev_pm_opp_free_cpufreq_table(struct device *dev, struct cpufreq_frequency_table **table)
+{
+}
+#endif
+
+
#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
int dev_pm_opp_of_add_table(struct device *dev);
int dev_pm_opp_of_add_table_indexed(struct device *dev, int index);
-int dev_pm_opp_of_add_table_noclk(struct device *dev, int index);
+int devm_pm_opp_of_add_table_indexed(struct device *dev, int index);
void dev_pm_opp_of_remove_table(struct device *dev);
int devm_pm_opp_of_add_table(struct device *dev);
int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask);
@@ -448,8 +521,11 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpuma
struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev);
struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp);
int of_get_required_opp_performance_state(struct device_node *np, int index);
+bool dev_pm_opp_of_has_required_opp(struct device *dev);
int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table);
int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus);
+int dev_pm_opp_calc_power(struct device *dev, unsigned long *uW,
+ unsigned long *kHz);
static inline void dev_pm_opp_of_unregister_em(struct device *dev)
{
em_dev_unregister_perf_domain(dev);
@@ -465,7 +541,7 @@ static inline int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
return -EOPNOTSUPP;
}
-static inline int dev_pm_opp_of_add_table_noclk(struct device *dev, int index)
+static inline int devm_pm_opp_of_add_table_indexed(struct device *dev, int index)
{
return -EOPNOTSUPP;
}
@@ -513,15 +589,173 @@ static inline void dev_pm_opp_of_unregister_em(struct device *dev)
{
}
+static inline int dev_pm_opp_calc_power(struct device *dev, unsigned long *uW,
+ unsigned long *kHz)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int of_get_required_opp_performance_state(struct device_node *np, int index)
{
return -EOPNOTSUPP;
}
+static inline bool dev_pm_opp_of_has_required_opp(struct device *dev)
+{
+ return false;
+}
+
static inline int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table)
{
return -EOPNOTSUPP;
}
#endif
+/* Scope based cleanup macro for OPP reference counting */
+DEFINE_FREE(put_opp, struct dev_pm_opp *, if (!IS_ERR_OR_NULL(_T)) dev_pm_opp_put(_T))
+
+/* Scope based cleanup macro for OPP table reference counting */
+DEFINE_FREE(put_opp_table, struct opp_table *, if (!IS_ERR_OR_NULL(_T)) dev_pm_opp_put_opp_table(_T))
+
+/* OPP Configuration helpers */
+
+static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
+ unsigned long u_volt)
+{
+ struct dev_pm_opp_data data = {
+ .freq = freq,
+ .u_volt = u_volt,
+ };
+
+ return dev_pm_opp_add_dynamic(dev, &data);
+}
+
+/* Regulators helpers */
+static inline int dev_pm_opp_set_regulators(struct device *dev,
+ const char * const names[])
+{
+ struct dev_pm_opp_config config = {
+ .regulator_names = names,
+ };
+
+ return dev_pm_opp_set_config(dev, &config);
+}
+
+static inline void dev_pm_opp_put_regulators(int token)
+{
+ dev_pm_opp_clear_config(token);
+}
+
+static inline int devm_pm_opp_set_regulators(struct device *dev,
+ const char * const names[])
+{
+ struct dev_pm_opp_config config = {
+ .regulator_names = names,
+ };
+
+ return devm_pm_opp_set_config(dev, &config);
+}
+
+/* Supported-hw helpers */
+static inline int dev_pm_opp_set_supported_hw(struct device *dev,
+ const u32 *versions,
+ unsigned int count)
+{
+ struct dev_pm_opp_config config = {
+ .supported_hw = versions,
+ .supported_hw_count = count,
+ };
+
+ return dev_pm_opp_set_config(dev, &config);
+}
+
+static inline void dev_pm_opp_put_supported_hw(int token)
+{
+ dev_pm_opp_clear_config(token);
+}
+
+static inline int devm_pm_opp_set_supported_hw(struct device *dev,
+ const u32 *versions,
+ unsigned int count)
+{
+ struct dev_pm_opp_config config = {
+ .supported_hw = versions,
+ .supported_hw_count = count,
+ };
+
+ return devm_pm_opp_set_config(dev, &config);
+}
+
+/* clkname helpers */
+static inline int dev_pm_opp_set_clkname(struct device *dev, const char *name)
+{
+ const char *names[] = { name, NULL };
+ struct dev_pm_opp_config config = {
+ .clk_names = names,
+ };
+
+ return dev_pm_opp_set_config(dev, &config);
+}
+
+static inline void dev_pm_opp_put_clkname(int token)
+{
+ dev_pm_opp_clear_config(token);
+}
+
+static inline int devm_pm_opp_set_clkname(struct device *dev, const char *name)
+{
+ const char *names[] = { name, NULL };
+ struct dev_pm_opp_config config = {
+ .clk_names = names,
+ };
+
+ return devm_pm_opp_set_config(dev, &config);
+}
+
+/* config-regulators helpers */
+static inline int dev_pm_opp_set_config_regulators(struct device *dev,
+ config_regulators_t helper)
+{
+ struct dev_pm_opp_config config = {
+ .config_regulators = helper,
+ };
+
+ return dev_pm_opp_set_config(dev, &config);
+}
+
+static inline void dev_pm_opp_put_config_regulators(int token)
+{
+ dev_pm_opp_clear_config(token);
+}
+
+/* prop-name helpers */
+static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
+{
+ struct dev_pm_opp_config config = {
+ .prop_name = name,
+ };
+
+ return dev_pm_opp_set_config(dev, &config);
+}
+
+static inline void dev_pm_opp_put_prop_name(int token)
+{
+ dev_pm_opp_clear_config(token);
+}
+
+static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
+{
+ return dev_pm_opp_get_freq_indexed(opp, 0);
+}
+
+static inline int dev_pm_opp_set_level(struct device *dev, unsigned int level)
+{
+ struct dev_pm_opp *opp __free(put_opp) = dev_pm_opp_find_level_exact(dev, level);
+
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ return dev_pm_opp_set_opp(dev, opp);
+}
+
#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 4a69d4af3ff8..6cea4455f867 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -162,6 +162,15 @@ static inline void cpu_latency_qos_update_request(struct pm_qos_request *req,
static inline void cpu_latency_qos_remove_request(struct pm_qos_request *req) {}
#endif
+#ifdef CONFIG_PM_QOS_CPU_SYSTEM_WAKEUP
+s32 cpu_wakeup_latency_qos_limit(void);
+#else
+static inline s32 cpu_wakeup_latency_qos_limit(void)
+{
+ return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
+}
+#endif
+
#ifdef CONFIG_PM
enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask);
enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask);
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 6c08a085367b..41037c513f06 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -21,6 +21,41 @@
#define RPM_GET_PUT 0x04 /* Increment/decrement the
usage_count */
#define RPM_AUTO 0x08 /* Use autosuspend_delay */
+#define RPM_TRANSPARENT 0x10 /* Succeed if runtime PM is disabled */
+
+/*
+ * Use this for defining a set of PM operations to be used in all situations
+ * (system suspend, hibernation or runtime PM).
+ *
+ * Note that the behaviour differs from the deprecated UNIVERSAL_DEV_PM_OPS()
+ * macro, which uses the provided callbacks for both runtime PM and system
+ * sleep, while DEFINE_RUNTIME_DEV_PM_OPS() uses pm_runtime_force_suspend()
+ * and pm_runtime_force_resume() for its system sleep callbacks.
+ *
+ * If the underlying dev_pm_ops struct symbol has to be exported, use
+ * EXPORT_RUNTIME_DEV_PM_OPS() or EXPORT_GPL_RUNTIME_DEV_PM_OPS() instead.
+ */
+#define DEFINE_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
+ _DEFINE_DEV_PM_OPS(name, pm_runtime_force_suspend, \
+ pm_runtime_force_resume, suspend_fn, \
+ resume_fn, idle_fn)
+
+#define EXPORT_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
+ EXPORT_DEV_PM_OPS(name) = { \
+ RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
+ }
+#define EXPORT_GPL_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
+ EXPORT_GPL_DEV_PM_OPS(name) = { \
+ RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
+ }
+#define EXPORT_NS_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn, ns) \
+ EXPORT_NS_DEV_PM_OPS(name, ns) = { \
+ RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
+ }
+#define EXPORT_NS_GPL_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn, ns) \
+ EXPORT_NS_GPL_DEV_PM_OPS(name, ns) = { \
+ RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
+ }
#ifdef CONFIG_PM
extern struct workqueue_struct *pm_wq;
@@ -33,15 +68,17 @@ static inline bool queue_pm_work(struct work_struct *work)
extern int pm_generic_runtime_suspend(struct device *dev);
extern int pm_generic_runtime_resume(struct device *dev);
extern int pm_runtime_force_suspend(struct device *dev);
-extern int pm_runtime_force_resume(struct device *dev);
extern int __pm_runtime_idle(struct device *dev, int rpmflags);
extern int __pm_runtime_suspend(struct device *dev, int rpmflags);
extern int __pm_runtime_resume(struct device *dev, int rpmflags);
-extern int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count);
+extern int pm_runtime_get_if_active(struct device *dev);
+extern int pm_runtime_get_if_in_use(struct device *dev);
extern int pm_schedule_suspend(struct device *dev, unsigned int delay);
extern int __pm_runtime_set_status(struct device *dev, unsigned int status);
-extern int pm_runtime_barrier(struct device *dev);
+extern void pm_runtime_barrier(struct device *dev);
+extern bool pm_runtime_block_if_disabled(struct device *dev);
+extern void pm_runtime_unblock(struct device *dev);
extern void pm_runtime_enable(struct device *dev);
extern void __pm_runtime_disable(struct device *dev, bool check_resume);
extern void pm_runtime_allow(struct device *dev);
@@ -51,25 +88,16 @@ extern void pm_runtime_irq_safe(struct device *dev);
extern void __pm_runtime_use_autosuspend(struct device *dev, bool use);
extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
extern u64 pm_runtime_autosuspend_expiration(struct device *dev);
-extern void pm_runtime_update_max_time_suspended(struct device *dev,
- s64 delta_ns);
extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
extern void pm_runtime_get_suppliers(struct device *dev);
extern void pm_runtime_put_suppliers(struct device *dev);
extern void pm_runtime_new_link(struct device *dev);
extern void pm_runtime_drop_link(struct device_link *link);
+extern void pm_runtime_release_supplier(struct device_link *link);
-/**
- * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter.
- * @dev: Target device.
- *
- * Increment the runtime PM usage counter of @dev if its runtime PM status is
- * %RPM_ACTIVE and its runtime PM usage counter is greater than 0.
- */
-static inline int pm_runtime_get_if_in_use(struct device *dev)
-{
- return pm_runtime_get_if_active(dev, false);
-}
+int devm_pm_runtime_set_active_enabled(struct device *dev);
+extern int devm_pm_runtime_enable(struct device *dev);
+int devm_pm_runtime_get_noresume(struct device *dev);
/**
* pm_suspend_ignore_children - Set runtime PM behavior regarding children.
@@ -127,7 +155,7 @@ static inline bool pm_runtime_suspended(struct device *dev)
* pm_runtime_active - Check whether or not a device is runtime-active.
* @dev: Target device.
*
- * Return %true if runtime PM is enabled for @dev and its runtime PM status is
+ * Return %true if runtime PM is disabled for @dev or its runtime PM status is
* %RPM_ACTIVE, or %false otherwise.
*
* Note that the return value of this function can only be trusted if it is
@@ -173,6 +201,17 @@ static inline bool pm_runtime_enabled(struct device *dev)
}
/**
+ * pm_runtime_blocked - Check if runtime PM enabling is blocked.
+ * @dev: Target device.
+ *
+ * Do not call this function outside system suspend/resume code paths.
+ */
+static inline bool pm_runtime_blocked(struct device *dev)
+{
+ return dev->power.last_status == RPM_BLOCKED;
+}
+
+/**
* pm_runtime_has_no_callbacks - Check if runtime PM callbacks may be present.
* @dev: Target device.
*
@@ -218,7 +257,6 @@ static inline bool queue_pm_work(struct work_struct *work) { return false; }
static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; }
static inline int pm_generic_runtime_resume(struct device *dev) { return 0; }
static inline int pm_runtime_force_suspend(struct device *dev) { return 0; }
-static inline int pm_runtime_force_resume(struct device *dev) { return 0; }
static inline int __pm_runtime_idle(struct device *dev, int rpmflags)
{
@@ -240,19 +278,25 @@ static inline int pm_runtime_get_if_in_use(struct device *dev)
{
return -EINVAL;
}
-static inline int pm_runtime_get_if_active(struct device *dev,
- bool ign_usage_count)
+static inline int pm_runtime_get_if_active(struct device *dev)
{
return -EINVAL;
}
static inline int __pm_runtime_set_status(struct device *dev,
unsigned int status) { return 0; }
-static inline int pm_runtime_barrier(struct device *dev) { return 0; }
+static inline void pm_runtime_barrier(struct device *dev) {}
+static inline bool pm_runtime_block_if_disabled(struct device *dev) { return true; }
+static inline void pm_runtime_unblock(struct device *dev) {}
static inline void pm_runtime_enable(struct device *dev) {}
static inline void __pm_runtime_disable(struct device *dev, bool c) {}
+static inline bool pm_runtime_blocked(struct device *dev) { return true; }
static inline void pm_runtime_allow(struct device *dev) {}
static inline void pm_runtime_forbid(struct device *dev) {}
+static inline int devm_pm_runtime_set_active_enabled(struct device *dev) { return 0; }
+static inline int devm_pm_runtime_enable(struct device *dev) { return 0; }
+static inline int devm_pm_runtime_get_noresume(struct device *dev) { return 0; }
+
static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {}
static inline void pm_runtime_get_noresume(struct device *dev) {}
static inline void pm_runtime_put_noidle(struct device *dev) {}
@@ -279,9 +323,22 @@ static inline void pm_runtime_get_suppliers(struct device *dev) {}
static inline void pm_runtime_put_suppliers(struct device *dev) {}
static inline void pm_runtime_new_link(struct device *dev) {}
static inline void pm_runtime_drop_link(struct device_link *link) {}
+static inline void pm_runtime_release_supplier(struct device_link *link) {}
#endif /* !CONFIG_PM */
+#ifdef CONFIG_PM_SLEEP
+
+bool pm_runtime_need_not_resume(struct device *dev);
+int pm_runtime_force_resume(struct device *dev);
+
+#else /* !CONFIG_PM_SLEEP */
+
+static inline bool pm_runtime_need_not_resume(struct device *dev) {return true; }
+static inline int pm_runtime_force_resume(struct device *dev) { return -ENXIO; }
+
+#endif /* CONFIG_PM_SLEEP */
+
/**
* pm_runtime_idle - Conditionally set up autosuspend of a device or suspend it.
* @dev: Target device.
@@ -289,6 +346,19 @@ static inline void pm_runtime_drop_link(struct device_link *link) {}
* Invoke the "idle check" callback of @dev and, depending on its return value,
* set up autosuspend of @dev or suspend it (depending on whether or not
* autosuspend has been enabled for it).
+ *
+ * Return:
+ * * 0: Success.
+ * * -EINVAL: Runtime PM error.
+ * * -EACCES: Runtime PM disabled.
+ * * -EAGAIN: Runtime PM usage counter non-zero, Runtime PM status change
+ * ongoing or device not in %RPM_ACTIVE state.
+ * * -EBUSY: Runtime PM child_count non-zero.
+ * * -EPERM: Device PM QoS resume latency 0.
+ * * -EINPROGRESS: Suspend already in progress.
+ * * -ENOSYS: CONFIG_PM not enabled.
+ * Other values and conditions for the above values are possible as returned by
+ * Runtime PM idle and suspend callbacks.
*/
static inline int pm_runtime_idle(struct device *dev)
{
@@ -298,6 +368,19 @@ static inline int pm_runtime_idle(struct device *dev)
/**
* pm_runtime_suspend - Suspend a device synchronously.
* @dev: Target device.
+ *
+ * Return:
+ * * 1: Success; device was already suspended.
+ * * 0: Success.
+ * * -EINVAL: Runtime PM error.
+ * * -EACCES: Runtime PM disabled.
+ * * -EAGAIN: Runtime PM usage counter non-zero or Runtime PM status change
+ * ongoing.
+ * * -EBUSY: Runtime PM child_count non-zero.
+ * * -EPERM: Device PM QoS resume latency 0.
+ * * -ENOSYS: CONFIG_PM not enabled.
+ * Other values and conditions for the above values are possible as returned by
+ * Runtime PM suspend callbacks.
*/
static inline int pm_runtime_suspend(struct device *dev)
{
@@ -305,14 +388,30 @@ static inline int pm_runtime_suspend(struct device *dev)
}
/**
- * pm_runtime_autosuspend - Set up autosuspend of a device or suspend it.
+ * pm_runtime_autosuspend - Update the last access time and set up autosuspend
+ * of a device.
* @dev: Target device.
*
- * Set up autosuspend of @dev or suspend it (depending on whether or not
- * autosuspend is enabled for it) without engaging its "idle check" callback.
+ * First update the last access time, then set up autosuspend of @dev or suspend
+ * it (depending on whether or not autosuspend is enabled for it) without
+ * engaging its "idle check" callback.
+ *
+ * Return:
+ * * 1: Success; device was already suspended.
+ * * 0: Success.
+ * * -EINVAL: Runtime PM error.
+ * * -EACCES: Runtime PM disabled.
+ * * -EAGAIN: Runtime PM usage counter non-zero or Runtime PM status change
+ * ongoing.
+ * * -EBUSY: Runtime PM child_count non-zero.
+ * * -EPERM: Device PM QoS resume latency 0.
+ * * -ENOSYS: CONFIG_PM not enabled.
+ * Other values and conditions for the above values are possible as returned by
+ * Runtime PM suspend callbacks.
*/
static inline int pm_runtime_autosuspend(struct device *dev)
{
+ pm_runtime_mark_last_busy(dev);
return __pm_runtime_suspend(dev, RPM_AUTO);
}
@@ -331,6 +430,17 @@ static inline int pm_runtime_resume(struct device *dev)
*
* Queue up a work item to run an equivalent of pm_runtime_idle() for @dev
* asynchronously.
+ *
+ * Return:
+ * * 0: Success.
+ * * -EINVAL: Runtime PM error.
+ * * -EACCES: Runtime PM disabled.
+ * * -EAGAIN: Runtime PM usage counter non-zero, Runtime PM status change
+ * ongoing or device not in %RPM_ACTIVE state.
+ * * -EBUSY: Runtime PM child_count non-zero.
+ * * -EPERM: Device PM QoS resume latency 0.
+ * * -EINPROGRESS: Suspend already in progress.
+ * * -ENOSYS: CONFIG_PM not enabled.
*/
static inline int pm_request_idle(struct device *dev)
{
@@ -347,14 +457,28 @@ static inline int pm_request_resume(struct device *dev)
}
/**
- * pm_request_autosuspend - Queue up autosuspend of a device.
+ * pm_request_autosuspend - Update the last access time and queue up autosuspend
+ * of a device.
* @dev: Target device.
*
- * Queue up a work item to run an equivalent pm_runtime_autosuspend() for @dev
- * asynchronously.
+ * Update the last access time of a device and queue up a work item to run an
+ * equivalent pm_runtime_autosuspend() for @dev asynchronously.
+ *
+ * Return:
+ * * 1: Success; device was already suspended.
+ * * 0: Success.
+ * * -EINVAL: Runtime PM error.
+ * * -EACCES: Runtime PM disabled.
+ * * -EAGAIN: Runtime PM usage counter non-zero or Runtime PM status change
+ * ongoing.
+ * * -EBUSY: Runtime PM child_count non-zero.
+ * * -EPERM: Device PM QoS resume latency 0.
+ * * -EINPROGRESS: Suspend already in progress.
+ * * -ENOSYS: CONFIG_PM not enabled.
*/
static inline int pm_request_autosuspend(struct device *dev)
{
+ pm_runtime_mark_last_busy(dev);
return __pm_runtime_suspend(dev, RPM_ASYNC | RPM_AUTO);
}
@@ -380,12 +504,28 @@ static inline int pm_runtime_get(struct device *dev)
* The possible return values of this function are the same as for
* pm_runtime_resume() and the runtime PM usage counter of @dev remains
* incremented in all cases, even if it returns an error code.
+ * Consider using pm_runtime_resume_and_get() instead of it, especially
+ * if its return value is checked by the caller, as this is likely to result
+ * in cleaner code.
*/
static inline int pm_runtime_get_sync(struct device *dev)
{
return __pm_runtime_resume(dev, RPM_GET_PUT);
}
+static inline int pm_runtime_get_active(struct device *dev, int rpmflags)
+{
+ int ret;
+
+ ret = __pm_runtime_resume(dev, RPM_GET_PUT | rpmflags);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
+ return 0;
+}
+
/**
* pm_runtime_resume_and_get - Bump up usage counter of a device and resume it.
* @dev: Target device.
@@ -396,15 +536,7 @@ static inline int pm_runtime_get_sync(struct device *dev)
*/
static inline int pm_runtime_resume_and_get(struct device *dev)
{
- int ret;
-
- ret = __pm_runtime_resume(dev, RPM_GET_PUT);
- if (ret < 0) {
- pm_runtime_put_noidle(dev);
- return ret;
- }
-
- return 0;
+ return pm_runtime_get_active(dev, 0);
}
/**
@@ -413,6 +545,18 @@ static inline int pm_runtime_resume_and_get(struct device *dev)
*
* Decrement the runtime PM usage counter of @dev and if it turns out to be
* equal to 0, queue up a work item for @dev like in pm_request_idle().
+ *
+ * Return:
+ * * 1: Success. Usage counter dropped to zero, but device was already suspended.
+ * * 0: Success.
+ * * -EINVAL: Runtime PM error.
+ * * -EACCES: Runtime PM disabled.
+ * * -EAGAIN: Runtime PM usage counter became non-zero or Runtime PM status
+ * change ongoing.
+ * * -EBUSY: Runtime PM child_count non-zero.
+ * * -EPERM: Device PM QoS resume latency 0.
+ * * -EINPROGRESS: Suspend already in progress.
+ * * -ENOSYS: CONFIG_PM not enabled.
*/
static inline int pm_runtime_put(struct device *dev)
{
@@ -420,18 +564,103 @@ static inline int pm_runtime_put(struct device *dev)
}
/**
- * pm_runtime_put_autosuspend - Drop device usage counter and queue autosuspend if 0.
+ * __pm_runtime_put_autosuspend - Drop device usage counter and queue autosuspend if 0.
* @dev: Target device.
*
* Decrement the runtime PM usage counter of @dev and if it turns out to be
* equal to 0, queue up a work item for @dev like in pm_request_autosuspend().
+ *
+ * Return:
+ * * 1: Success. Usage counter dropped to zero, but device was already suspended.
+ * * 0: Success.
+ * * -EINVAL: Runtime PM error.
+ * * -EACCES: Runtime PM disabled.
+ * * -EAGAIN: Runtime PM usage counter became non-zero or Runtime PM status
+ * change ongoing.
+ * * -EBUSY: Runtime PM child_count non-zero.
+ * * -EPERM: Device PM QoS resume latency 0.
+ * * -EINPROGRESS: Suspend already in progress.
+ * * -ENOSYS: CONFIG_PM not enabled.
+ */
+static inline int __pm_runtime_put_autosuspend(struct device *dev)
+{
+ return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_ASYNC | RPM_AUTO);
+}
+
+/**
+ * pm_runtime_put_autosuspend - Update the last access time of a device, drop
+ * its usage counter and queue autosuspend if the usage counter becomes 0.
+ * @dev: Target device.
+ *
+ * Update the last access time of @dev, decrement runtime PM usage counter of
+ * @dev and if it turns out to be equal to 0, queue up a work item for @dev like
+ * in pm_request_autosuspend().
+ *
+ * Return:
+ * * 1: Success. Usage counter dropped to zero, but device was already suspended.
+ * * 0: Success.
+ * * -EINVAL: Runtime PM error.
+ * * -EACCES: Runtime PM disabled.
+ * * -EAGAIN: Runtime PM usage counter became non-zero or Runtime PM status
+ * change ongoing.
+ * * -EBUSY: Runtime PM child_count non-zero.
+ * * -EPERM: Device PM QoS resume latency 0.
+ * * -EINPROGRESS: Suspend already in progress.
+ * * -ENOSYS: CONFIG_PM not enabled.
*/
static inline int pm_runtime_put_autosuspend(struct device *dev)
{
- return __pm_runtime_suspend(dev,
- RPM_GET_PUT | RPM_ASYNC | RPM_AUTO);
+ pm_runtime_mark_last_busy(dev);
+ return __pm_runtime_put_autosuspend(dev);
}
+DEFINE_GUARD(pm_runtime_noresume, struct device *,
+ pm_runtime_get_noresume(_T), pm_runtime_put_noidle(_T));
+
+DEFINE_GUARD(pm_runtime_active, struct device *,
+ pm_runtime_get_sync(_T), pm_runtime_put(_T));
+DEFINE_GUARD(pm_runtime_active_auto, struct device *,
+ pm_runtime_get_sync(_T), pm_runtime_put_autosuspend(_T));
+/*
+ * Use the following guards with ACQUIRE()/ACQUIRE_ERR().
+ *
+ * The difference between the "_try" and "_try_enabled" variants is that the
+ * former do not produce an error when runtime PM is disabled for the given
+ * device.
+ */
+DEFINE_GUARD_COND(pm_runtime_active, _try,
+ pm_runtime_get_active(_T, RPM_TRANSPARENT), _RET == 0)
+DEFINE_GUARD_COND(pm_runtime_active, _try_enabled,
+ pm_runtime_resume_and_get(_T), _RET == 0)
+DEFINE_GUARD_COND(pm_runtime_active_auto, _try,
+ pm_runtime_get_active(_T, RPM_TRANSPARENT), _RET == 0)
+DEFINE_GUARD_COND(pm_runtime_active_auto, _try_enabled,
+ pm_runtime_resume_and_get(_T), _RET == 0)
+
+/* ACQUIRE() wrapper macros for the guards defined above. */
+
+#define PM_RUNTIME_ACQUIRE(_dev, _var) \
+ ACQUIRE(pm_runtime_active_try, _var)(_dev)
+
+#define PM_RUNTIME_ACQUIRE_AUTOSUSPEND(_dev, _var) \
+ ACQUIRE(pm_runtime_active_auto_try, _var)(_dev)
+
+#define PM_RUNTIME_ACQUIRE_IF_ENABLED(_dev, _var) \
+ ACQUIRE(pm_runtime_active_try_enabled, _var)(_dev)
+
+#define PM_RUNTIME_ACQUIRE_IF_ENABLED_AUTOSUSPEND(_dev, _var) \
+ ACQUIRE(pm_runtime_active_auto_try_enabled, _var)(_dev)
+
+/*
+ * ACQUIRE_ERR() wrapper macro for guard pm_runtime_active.
+ *
+ * Always check PM_RUNTIME_ACQUIRE_ERR() after using one of the
+ * PM_RUNTIME_ACQUIRE*() macros defined above (yes, it can be used with
+ * any of them) and if it is nonzero, avoid accessing the given device.
+ */
+#define PM_RUNTIME_ACQUIRE_ERR(_var_ptr) \
+ ACQUIRE_ERR(pm_runtime_active, _var_ptr)
+
/**
* pm_runtime_put_sync - Drop device usage counter and run "idle check" if 0.
* @dev: Target device.
@@ -441,9 +670,21 @@ static inline int pm_runtime_put_autosuspend(struct device *dev)
* return value, set up autosuspend of @dev or suspend it (depending on whether
* or not autosuspend has been enabled for it).
*
- * The possible return values of this function are the same as for
- * pm_runtime_idle() and the runtime PM usage counter of @dev remains
- * decremented in all cases, even if it returns an error code.
+ * The runtime PM usage counter of @dev remains decremented in all cases, even
+ * if it returns an error code.
+ *
+ * Return:
+ * * 1: Success. Usage counter dropped to zero, but device was already suspended.
+ * * 0: Success.
+ * * -EINVAL: Runtime PM error.
+ * * -EACCES: Runtime PM disabled.
+ * * -EAGAIN: Runtime PM usage counter became non-zero or Runtime PM status
+ * change ongoing.
+ * * -EBUSY: Runtime PM child_count non-zero.
+ * * -EPERM: Device PM QoS resume latency 0.
+ * * -ENOSYS: CONFIG_PM not enabled.
+ * Other values and conditions for the above values are possible as returned by
+ * Runtime PM suspend callbacks.
*/
static inline int pm_runtime_put_sync(struct device *dev)
{
@@ -457,9 +698,21 @@ static inline int pm_runtime_put_sync(struct device *dev)
* Decrement the runtime PM usage counter of @dev and if it turns out to be
* equal to 0, carry out runtime-suspend of @dev synchronously.
*
- * The possible return values of this function are the same as for
- * pm_runtime_suspend() and the runtime PM usage counter of @dev remains
- * decremented in all cases, even if it returns an error code.
+ * The runtime PM usage counter of @dev remains decremented in all cases, even
+ * if it returns an error code.
+ *
+ * Return:
+ * * 1: Success. Usage counter dropped to zero, but device was already suspended.
+ * * 0: Success.
+ * * -EINVAL: Runtime PM error.
+ * * -EACCES: Runtime PM disabled.
+ * * -EAGAIN: Runtime PM usage counter became non-zero or Runtime PM status
+ * change ongoing.
+ * * -EBUSY: Runtime PM child_count non-zero.
+ * * -EPERM: Device PM QoS resume latency 0.
+ * * -ENOSYS: CONFIG_PM not enabled.
+ * Other values and conditions for the above values are possible as returned by
+ * Runtime PM suspend callbacks.
*/
static inline int pm_runtime_put_sync_suspend(struct device *dev)
{
@@ -467,19 +720,35 @@ static inline int pm_runtime_put_sync_suspend(struct device *dev)
}
/**
- * pm_runtime_put_sync_autosuspend - Drop device usage counter and autosuspend if 0.
+ * pm_runtime_put_sync_autosuspend - Update the last access time of a device,
+ * drop device usage counter and autosuspend if 0.
* @dev: Target device.
*
- * Decrement the runtime PM usage counter of @dev and if it turns out to be
- * equal to 0, set up autosuspend of @dev or suspend it synchronously (depending
- * on whether or not autosuspend has been enabled for it).
- *
- * The possible return values of this function are the same as for
- * pm_runtime_autosuspend() and the runtime PM usage counter of @dev remains
- * decremented in all cases, even if it returns an error code.
+ * Update the last access time of @dev, decrement the runtime PM usage counter
+ * of @dev and if it turns out to be equal to 0, set up autosuspend of @dev or
+ * suspend it synchronously (depending on whether or not autosuspend has been
+ * enabled for it).
+ *
+ * The runtime PM usage counter of @dev remains decremented in all cases, even
+ * if it returns an error code.
+ *
+ * Return:
+ * * 1: Success. Usage counter dropped to zero, but device was already suspended.
+ * * 0: Success.
+ * * -EINVAL: Runtime PM error.
+ * * -EACCES: Runtime PM disabled.
+ * * -EAGAIN: Runtime PM usage counter became non-zero or Runtime PM status
+ * change ongoing.
+ * * -EBUSY: Runtime PM child_count non-zero.
+ * * -EPERM: Device PM QoS resume latency 0.
+ * * -EINPROGRESS: Suspend already in progress.
+ * * -ENOSYS: CONFIG_PM not enabled.
+ * Other values and conditions for the above values are possible as returned by
+ * Runtime PM suspend callbacks.
*/
static inline int pm_runtime_put_sync_autosuspend(struct device *dev)
{
+ pm_runtime_mark_last_busy(dev);
return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_AUTO);
}
@@ -515,11 +784,18 @@ static inline int pm_runtime_set_suspended(struct device *dev)
* pm_runtime_disable - Disable runtime PM for a device.
* @dev: Target device.
*
- * Prevent the runtime PM framework from working with @dev (by incrementing its
- * "blocking" counter).
+ * Prevent the runtime PM framework from working with @dev by incrementing its
+ * "disable" counter.
*
- * For each invocation of this function for @dev there must be a matching
- * pm_runtime_enable() call in order for runtime PM to be enabled for it.
+ * If the counter is zero when this function runs and there is a pending runtime
+ * resume request for @dev, it will be resumed. If the counter is still zero at
+ * that point, all of the pending runtime PM requests for @dev will be canceled
+ * and all runtime PM operations in progress involving it will be waited for to
+ * complete.
+ *
+ * For each invocation of this function for @dev, there must be a matching
+ * pm_runtime_enable() call, so that runtime PM is eventually enabled for it
+ * again.
*/
static inline void pm_runtime_disable(struct device *dev)
{
@@ -533,6 +809,10 @@ static inline void pm_runtime_disable(struct device *dev)
* Allow the runtime PM autosuspend mechanism to be used for @dev whenever
* requested (or "autosuspend" will be handled as direct runtime-suspend for
* it).
+ *
+ * NOTE: It's important to undo this with pm_runtime_dont_use_autosuspend()
+ * at driver exit time unless your driver initially enabled pm_runtime
+ * with devm_pm_runtime_enable() (which handles it for you).
*/
static inline void pm_runtime_use_autosuspend(struct device *dev)
{
diff --git a/include/linux/pm_wakeirq.h b/include/linux/pm_wakeirq.h
index cd5b62db9084..25b63ed51b76 100644
--- a/include/linux/pm_wakeirq.h
+++ b/include/linux/pm_wakeirq.h
@@ -1,15 +1,5 @@
-/*
- * pm_wakeirq.h - Device wakeirq helper functions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* pm_wakeirq.h - Device wakeirq helper functions */
#ifndef _LINUX_PM_WAKEIRQ_H
#define _LINUX_PM_WAKEIRQ_H
@@ -17,11 +7,10 @@
#ifdef CONFIG_PM
extern int dev_pm_set_wake_irq(struct device *dev, int irq);
-extern int dev_pm_set_dedicated_wake_irq(struct device *dev,
- int irq);
+extern int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq);
+extern int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq);
extern void dev_pm_clear_wake_irq(struct device *dev);
-extern void dev_pm_enable_wake_irq(struct device *dev);
-extern void dev_pm_disable_wake_irq(struct device *dev);
+extern int devm_pm_set_wake_irq(struct device *dev, int irq);
#else /* !CONFIG_PM */
@@ -35,16 +24,18 @@ static inline int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
return 0;
}
-static inline void dev_pm_clear_wake_irq(struct device *dev)
+static inline int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq)
{
+ return 0;
}
-static inline void dev_pm_enable_wake_irq(struct device *dev)
+static inline void dev_pm_clear_wake_irq(struct device *dev)
{
}
-static inline void dev_pm_disable_wake_irq(struct device *dev)
+static inline int devm_pm_set_wake_irq(struct device *dev, int irq)
{
+ return 0;
}
#endif /* CONFIG_PM */
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
index 196a157456aa..41e8f344a205 100644
--- a/include/linux/pm_wakeup.h
+++ b/include/linux/pm_wakeup.h
@@ -10,7 +10,7 @@
#define _LINUX_PM_WAKEUP_H
#ifndef _DEVICE_H_
-# error "please don't include this file directly"
+# error "Please do not include this file directly."
#endif
#include <linux/types.h>
@@ -94,11 +94,17 @@ static inline void device_set_wakeup_path(struct device *dev)
dev->power.wakeup_path = true;
}
+static inline void device_set_out_band_wakeup(struct device *dev)
+{
+ dev->power.out_band_wakeup = true;
+}
+
+static inline bool device_out_band_wakeup(struct device *dev)
+{
+ return dev->power.out_band_wakeup;
+}
+
/* drivers/base/power/wakeup.c */
-extern struct wakeup_source *wakeup_source_create(const char *name);
-extern void wakeup_source_destroy(struct wakeup_source *ws);
-extern void wakeup_source_add(struct wakeup_source *ws);
-extern void wakeup_source_remove(struct wakeup_source *ws);
extern struct wakeup_source *wakeup_source_register(struct device *dev,
const char *name);
extern void wakeup_source_unregister(struct wakeup_source *ws);
@@ -107,9 +113,8 @@ extern void wakeup_sources_read_unlock(int idx);
extern struct wakeup_source *wakeup_sources_walk_start(void);
extern struct wakeup_source *wakeup_sources_walk_next(struct wakeup_source *ws);
extern int device_wakeup_enable(struct device *dev);
-extern int device_wakeup_disable(struct device *dev);
+extern void device_wakeup_disable(struct device *dev);
extern void device_set_wakeup_capable(struct device *dev, bool capable);
-extern int device_init_wakeup(struct device *dev, bool val);
extern int device_set_wakeup_enable(struct device *dev, bool enable);
extern void __pm_stay_awake(struct wakeup_source *ws);
extern void pm_stay_awake(struct device *dev);
@@ -130,17 +135,6 @@ static inline bool device_can_wakeup(struct device *dev)
return dev->power.can_wakeup;
}
-static inline struct wakeup_source *wakeup_source_create(const char *name)
-{
- return NULL;
-}
-
-static inline void wakeup_source_destroy(struct wakeup_source *ws) {}
-
-static inline void wakeup_source_add(struct wakeup_source *ws) {}
-
-static inline void wakeup_source_remove(struct wakeup_source *ws) {}
-
static inline struct wakeup_source *wakeup_source_register(struct device *dev,
const char *name)
{
@@ -155,10 +149,9 @@ static inline int device_wakeup_enable(struct device *dev)
return 0;
}
-static inline int device_wakeup_disable(struct device *dev)
+static inline void device_wakeup_disable(struct device *dev)
{
dev->power.should_wakeup = false;
- return 0;
}
static inline int device_set_wakeup_enable(struct device *dev, bool enable)
@@ -167,13 +160,6 @@ static inline int device_set_wakeup_enable(struct device *dev, bool enable)
return 0;
}
-static inline int device_init_wakeup(struct device *dev, bool val)
-{
- device_set_wakeup_capable(dev, val);
- device_set_wakeup_enable(dev, val);
- return 0;
-}
-
static inline bool device_may_wakeup(struct device *dev)
{
return dev->power.can_wakeup && dev->power.should_wakeup;
@@ -186,6 +172,13 @@ static inline bool device_wakeup_path(struct device *dev)
static inline void device_set_wakeup_path(struct device *dev) {}
+static inline void device_set_out_band_wakeup(struct device *dev) {}
+
+static inline bool device_out_band_wakeup(struct device *dev)
+{
+ return false;
+}
+
static inline void __pm_stay_awake(struct wakeup_source *ws) {}
static inline void pm_stay_awake(struct device *dev) {}
@@ -202,19 +195,68 @@ static inline void pm_wakeup_dev_event(struct device *dev, unsigned int msec,
#endif /* !CONFIG_PM_SLEEP */
+static inline bool device_awake_path(struct device *dev)
+{
+ return device_wakeup_path(dev);
+}
+
+static inline void device_set_awake_path(struct device *dev)
+{
+ device_set_wakeup_path(dev);
+}
+
static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
{
- return pm_wakeup_ws_event(ws, msec, false);
+ pm_wakeup_ws_event(ws, msec, false);
}
static inline void pm_wakeup_event(struct device *dev, unsigned int msec)
{
- return pm_wakeup_dev_event(dev, msec, false);
+ pm_wakeup_dev_event(dev, msec, false);
}
static inline void pm_wakeup_hard_event(struct device *dev)
{
- return pm_wakeup_dev_event(dev, 0, true);
+ pm_wakeup_dev_event(dev, 0, true);
+}
+
+/**
+ * device_init_wakeup - Device wakeup initialization.
+ * @dev: Device to handle.
+ * @enable: Whether or not to enable @dev as a wakeup device.
+ *
+ * By default, most devices should leave wakeup disabled. The exceptions are
+ * devices that everyone expects to be wakeup sources: keyboards, power buttons,
+ * possibly network interfaces, etc. Also, devices that don't generate their
+ * own wakeup requests but merely forward requests from one bus to another
+ * (like PCI bridges) should have wakeup enabled by default.
+ */
+static inline int device_init_wakeup(struct device *dev, bool enable)
+{
+ if (enable) {
+ device_set_wakeup_capable(dev, true);
+ return device_wakeup_enable(dev);
+ }
+ device_wakeup_disable(dev);
+ device_set_wakeup_capable(dev, false);
+ return 0;
+}
+
+static void device_disable_wakeup(void *dev)
+{
+ device_init_wakeup(dev, false);
+}
+
+/**
+ * devm_device_init_wakeup - Resource managed device wakeup initialization.
+ * @dev: Device to handle.
+ *
+ * This function is the devm managed version of device_init_wakeup(dev, true).
+ */
+static inline int devm_device_init_wakeup(struct device *dev)
+{
+ device_init_wakeup(dev, true);
+ return devm_add_action_or_reset(dev, device_disable_wakeup, dev);
}
#endif /* _LINUX_PM_WAKEUP_H */
diff --git a/include/linux/pmbus.h b/include/linux/pmbus.h
index 12cbbf305969..884040e1383b 100644
--- a/include/linux/pmbus.h
+++ b/include/linux/pmbus.h
@@ -43,6 +43,50 @@
*/
#define PMBUS_NO_CAPABILITY BIT(2)
+/*
+ * PMBUS_READ_STATUS_AFTER_FAILED_CHECK
+ *
+ * Some PMBus chips end up in an undefined state when trying to read an
+ * unsupported register. For such chips, it is necessary to reset the
+ * chip pmbus controller to a known state after a failed register check.
+ * This can be done by reading a known register. By setting this flag the
+ * driver will try to read the STATUS register after each failed
+ * register check. This read may fail, but it will put the chip in a
+ * known state.
+ */
+#define PMBUS_READ_STATUS_AFTER_FAILED_CHECK BIT(3)
+
+/*
+ * PMBUS_NO_WRITE_PROTECT
+ *
+ * Some PMBus chips respond with invalid data when reading the WRITE_PROTECT
+ * register. For such chips, this flag should be set so that the PMBus core
+ * driver doesn't use the WRITE_PROTECT command to determine its behavior.
+ */
+#define PMBUS_NO_WRITE_PROTECT BIT(4)
+
+/*
+ * PMBUS_USE_COEFFICIENTS_CMD
+ *
+ * When this flag is set the PMBus core driver will use the COEFFICIENTS
+ * register to initialize the coefficients for the direct mode format.
+ */
+#define PMBUS_USE_COEFFICIENTS_CMD BIT(5)
+
+/*
+ * PMBUS_OP_PROTECTED
+ * Set if the chip OPERATION command is protected and protection is not
+ * determined by the standard WRITE_PROTECT command.
+ */
+#define PMBUS_OP_PROTECTED BIT(6)
+
+/*
+ * PMBUS_VOUT_PROTECTED
+ * Set if the chip VOUT_COMMAND command is protected and protection is not
+ * determined by the standard WRITE_PROTECT command.
+ */
+#define PMBUS_VOUT_PROTECTED BIT(7)
+
struct pmbus_platform_data {
u32 flags; /* Device specific flags */
diff --git a/include/linux/pmu.h b/include/linux/pmu.h
index 52453a24a24f..c677442d007c 100644
--- a/include/linux/pmu.h
+++ b/include/linux/pmu.h
@@ -13,7 +13,7 @@
#include <uapi/linux/pmu.h>
-extern int find_via_pmu(void);
+extern int __init find_via_pmu(void);
extern int pmu_request(struct adb_request *req,
void (*done)(struct adb_request *), int nbytes, ...);
diff --git a/include/linux/pnfs_osd_xdr.h b/include/linux/pnfs_osd_xdr.h
deleted file mode 100644
index 17d7d0d20eca..000000000000
--- a/include/linux/pnfs_osd_xdr.h
+++ /dev/null
@@ -1,317 +0,0 @@
-/*
- * pNFS-osd on-the-wire data structures
- *
- * Copyright (C) 2007 Panasas Inc. [year of first publication]
- * All rights reserved.
- *
- * Benny Halevy <bhalevy@panasas.com>
- * Boaz Harrosh <ooo@electrozaur.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * See the file COPYING included with this distribution for more details.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the Panasas company nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef __PNFS_OSD_XDR_H__
-#define __PNFS_OSD_XDR_H__
-
-#include <linux/nfs_fs.h>
-
-/*
- * draft-ietf-nfsv4-minorversion-22
- * draft-ietf-nfsv4-pnfs-obj-12
- */
-
-/* Layout Structure */
-
-enum pnfs_osd_raid_algorithm4 {
- PNFS_OSD_RAID_0 = 1,
- PNFS_OSD_RAID_4 = 2,
- PNFS_OSD_RAID_5 = 3,
- PNFS_OSD_RAID_PQ = 4 /* Reed-Solomon P+Q */
-};
-
-/* struct pnfs_osd_data_map4 {
- * uint32_t odm_num_comps;
- * length4 odm_stripe_unit;
- * uint32_t odm_group_width;
- * uint32_t odm_group_depth;
- * uint32_t odm_mirror_cnt;
- * pnfs_osd_raid_algorithm4 odm_raid_algorithm;
- * };
- */
-struct pnfs_osd_data_map {
- u32 odm_num_comps;
- u64 odm_stripe_unit;
- u32 odm_group_width;
- u32 odm_group_depth;
- u32 odm_mirror_cnt;
- u32 odm_raid_algorithm;
-};
-
-/* struct pnfs_osd_objid4 {
- * deviceid4 oid_device_id;
- * uint64_t oid_partition_id;
- * uint64_t oid_object_id;
- * };
- */
-struct pnfs_osd_objid {
- struct nfs4_deviceid oid_device_id;
- u64 oid_partition_id;
- u64 oid_object_id;
-};
-
-/* For printout. I use:
- * kprint("dev(%llx:%llx)", _DEVID_LO(pointer), _DEVID_HI(pointer));
- * BE style
- */
-#define _DEVID_LO(oid_device_id) \
- (unsigned long long)be64_to_cpup((__be64 *)(oid_device_id)->data)
-
-#define _DEVID_HI(oid_device_id) \
- (unsigned long long)be64_to_cpup(((__be64 *)(oid_device_id)->data) + 1)
-
-enum pnfs_osd_version {
- PNFS_OSD_MISSING = 0,
- PNFS_OSD_VERSION_1 = 1,
- PNFS_OSD_VERSION_2 = 2
-};
-
-struct pnfs_osd_opaque_cred {
- u32 cred_len;
- void *cred;
-};
-
-enum pnfs_osd_cap_key_sec {
- PNFS_OSD_CAP_KEY_SEC_NONE = 0,
- PNFS_OSD_CAP_KEY_SEC_SSV = 1,
-};
-
-/* struct pnfs_osd_object_cred4 {
- * pnfs_osd_objid4 oc_object_id;
- * pnfs_osd_version4 oc_osd_version;
- * pnfs_osd_cap_key_sec4 oc_cap_key_sec;
- * opaque oc_capability_key<>;
- * opaque oc_capability<>;
- * };
- */
-struct pnfs_osd_object_cred {
- struct pnfs_osd_objid oc_object_id;
- u32 oc_osd_version;
- u32 oc_cap_key_sec;
- struct pnfs_osd_opaque_cred oc_cap_key;
- struct pnfs_osd_opaque_cred oc_cap;
-};
-
-/* struct pnfs_osd_layout4 {
- * pnfs_osd_data_map4 olo_map;
- * uint32_t olo_comps_index;
- * pnfs_osd_object_cred4 olo_components<>;
- * };
- */
-struct pnfs_osd_layout {
- struct pnfs_osd_data_map olo_map;
- u32 olo_comps_index;
- u32 olo_num_comps;
- struct pnfs_osd_object_cred *olo_comps;
-};
-
-/* Device Address */
-enum pnfs_osd_targetid_type {
- OBJ_TARGET_ANON = 1,
- OBJ_TARGET_SCSI_NAME = 2,
- OBJ_TARGET_SCSI_DEVICE_ID = 3,
-};
-
-/* union pnfs_osd_targetid4 switch (pnfs_osd_targetid_type4 oti_type) {
- * case OBJ_TARGET_SCSI_NAME:
- * string oti_scsi_name<>;
- *
- * case OBJ_TARGET_SCSI_DEVICE_ID:
- * opaque oti_scsi_device_id<>;
- *
- * default:
- * void;
- * };
- *
- * union pnfs_osd_targetaddr4 switch (bool ota_available) {
- * case TRUE:
- * netaddr4 ota_netaddr;
- * case FALSE:
- * void;
- * };
- *
- * struct pnfs_osd_deviceaddr4 {
- * pnfs_osd_targetid4 oda_targetid;
- * pnfs_osd_targetaddr4 oda_targetaddr;
- * uint64_t oda_lun;
- * opaque oda_systemid<>;
- * pnfs_osd_object_cred4 oda_root_obj_cred;
- * opaque oda_osdname<>;
- * };
- */
-struct pnfs_osd_targetid {
- u32 oti_type;
- struct nfs4_string oti_scsi_device_id;
-};
-
-/* struct netaddr4 {
- * // see struct rpcb in RFC1833
- * string r_netid<>; // network id
- * string r_addr<>; // universal address
- * };
- */
-struct pnfs_osd_net_addr {
- struct nfs4_string r_netid;
- struct nfs4_string r_addr;
-};
-
-struct pnfs_osd_targetaddr {
- u32 ota_available;
- struct pnfs_osd_net_addr ota_netaddr;
-};
-
-struct pnfs_osd_deviceaddr {
- struct pnfs_osd_targetid oda_targetid;
- struct pnfs_osd_targetaddr oda_targetaddr;
- u8 oda_lun[8];
- struct nfs4_string oda_systemid;
- struct pnfs_osd_object_cred oda_root_obj_cred;
- struct nfs4_string oda_osdname;
-};
-
-/* LAYOUTCOMMIT: layoutupdate */
-
-/* union pnfs_osd_deltaspaceused4 switch (bool dsu_valid) {
- * case TRUE:
- * int64_t dsu_delta;
- * case FALSE:
- * void;
- * };
- *
- * struct pnfs_osd_layoutupdate4 {
- * pnfs_osd_deltaspaceused4 olu_delta_space_used;
- * bool olu_ioerr_flag;
- * };
- */
-struct pnfs_osd_layoutupdate {
- u32 dsu_valid;
- s64 dsu_delta;
- u32 olu_ioerr_flag;
-};
-
-/* LAYOUTRETURN: I/O Rrror Report */
-
-enum pnfs_osd_errno {
- PNFS_OSD_ERR_EIO = 1,
- PNFS_OSD_ERR_NOT_FOUND = 2,
- PNFS_OSD_ERR_NO_SPACE = 3,
- PNFS_OSD_ERR_BAD_CRED = 4,
- PNFS_OSD_ERR_NO_ACCESS = 5,
- PNFS_OSD_ERR_UNREACHABLE = 6,
- PNFS_OSD_ERR_RESOURCE = 7
-};
-
-/* struct pnfs_osd_ioerr4 {
- * pnfs_osd_objid4 oer_component;
- * length4 oer_comp_offset;
- * length4 oer_comp_length;
- * bool oer_iswrite;
- * pnfs_osd_errno4 oer_errno;
- * };
- */
-struct pnfs_osd_ioerr {
- struct pnfs_osd_objid oer_component;
- u64 oer_comp_offset;
- u64 oer_comp_length;
- u32 oer_iswrite;
- u32 oer_errno;
-};
-
-/* OSD XDR Client API */
-/* Layout helpers */
-/* Layout decoding is done in two parts:
- * 1. First Call pnfs_osd_xdr_decode_layout_map to read in only the header part
- * of the layout. @iter members need not be initialized.
- * Returned:
- * @layout members are set. (@layout->olo_comps set to NULL).
- *
- * Zero on success, or negative error if passed xdr is broken.
- *
- * 2. 2nd Call pnfs_osd_xdr_decode_layout_comp() in a loop until it returns
- * false, to decode the next component.
- * Returned:
- * true if there is more to decode or false if we are done or error.
- *
- * Example:
- * struct pnfs_osd_xdr_decode_layout_iter iter;
- * struct pnfs_osd_layout layout;
- * struct pnfs_osd_object_cred comp;
- * int status;
- *
- * status = pnfs_osd_xdr_decode_layout_map(&layout, &iter, xdr);
- * if (unlikely(status))
- * goto err;
- * while(pnfs_osd_xdr_decode_layout_comp(&comp, &iter, xdr, &status)) {
- * // All of @comp strings point to inside the xdr_buffer
- * // or scrach buffer. Copy them out to user memory eg.
- * copy_single_comp(dest_comp++, &comp);
- * }
- * if (unlikely(status))
- * goto err;
- */
-
-struct pnfs_osd_xdr_decode_layout_iter {
- unsigned total_comps;
- unsigned decoded_comps;
-};
-
-extern int pnfs_osd_xdr_decode_layout_map(struct pnfs_osd_layout *layout,
- struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr);
-
-extern bool pnfs_osd_xdr_decode_layout_comp(struct pnfs_osd_object_cred *comp,
- struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr,
- int *err);
-
-/* Device Info helpers */
-
-/* Note: All strings inside @deviceaddr point to space inside @p.
- * @p should stay valid while @deviceaddr is in use.
- */
-extern void pnfs_osd_xdr_decode_deviceaddr(
- struct pnfs_osd_deviceaddr *deviceaddr, __be32 *p);
-
-/* layoutupdate (layout_commit) xdr helpers */
-extern int
-pnfs_osd_xdr_encode_layoutupdate(struct xdr_stream *xdr,
- struct pnfs_osd_layoutupdate *lou);
-
-/* osd_ioerror encoding (layout_return) */
-extern __be32 *pnfs_osd_xdr_ioerr_reserve_space(struct xdr_stream *xdr);
-extern void pnfs_osd_xdr_encode_ioerr(__be32 *p, struct pnfs_osd_ioerr *ioerr);
-
-#endif /* __PNFS_OSD_XDR_H__ */
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index c2a7cfbca713..23fe3eaf242d 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -290,8 +290,8 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
}
struct pnp_fixup {
- char id[7];
- void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
+ char id[8];
+ void (*quirk_function) (struct pnp_dev *dev); /* fixup function */
};
/* config parameters */
@@ -383,7 +383,7 @@ struct pnp_driver {
struct device_driver driver;
};
-#define to_pnp_driver(drv) container_of(drv, struct pnp_driver, driver)
+#define to_pnp_driver(drv) container_of_const(drv, struct pnp_driver, driver)
struct pnp_card_driver {
struct list_head global_list;
@@ -419,8 +419,8 @@ struct pnp_protocol {
/* protocol specific suspend/resume */
bool (*can_wakeup) (struct pnp_dev *dev);
- int (*suspend) (struct pnp_dev * dev, pm_message_t state);
- int (*resume) (struct pnp_dev * dev);
+ int (*suspend) (struct pnp_dev *dev, pm_message_t state);
+ int (*resume) (struct pnp_dev *dev);
/* used by pnp layer only (look but don't touch) */
unsigned char number; /* protocol number */
@@ -435,8 +435,6 @@ struct pnp_protocol {
#define protocol_for_each_dev(protocol, dev) \
list_for_each_entry(dev, &(protocol)->devices, protocol_list)
-extern struct bus_type pnp_bus_type;
-
#if defined(CONFIG_PNP)
/* device management */
@@ -469,6 +467,8 @@ int compare_pnp_id(struct pnp_id *pos, const char *id);
int pnp_register_driver(struct pnp_driver *drv);
void pnp_unregister_driver(struct pnp_driver *drv);
+bool dev_is_pnp(const struct device *dev);
+
#else
/* device management */
@@ -492,7 +492,7 @@ static inline int pnp_start_dev(struct pnp_dev *dev) { return -ENODEV; }
static inline int pnp_stop_dev(struct pnp_dev *dev) { return -ENODEV; }
static inline int pnp_activate_dev(struct pnp_dev *dev) { return -ENODEV; }
static inline int pnp_disable_dev(struct pnp_dev *dev) { return -ENODEV; }
-static inline int pnp_range_reserved(resource_size_t start, resource_size_t end) { return 0;}
+static inline int pnp_range_reserved(resource_size_t start, resource_size_t end) { return 0; }
/* protocol helpers */
static inline int pnp_is_active(struct pnp_dev *dev) { return 0; }
@@ -500,6 +500,8 @@ static inline int compare_pnp_id(struct pnp_id *pos, const char *id) { return -E
static inline int pnp_register_driver(struct pnp_driver *drv) { return -ENODEV; }
static inline void pnp_unregister_driver(struct pnp_driver *drv) { }
+static inline bool dev_is_pnp(const struct device *dev) { return false; }
+
#endif /* CONFIG_PNP */
/**
diff --git a/include/linux/poison.h b/include/linux/poison.h
index aff1c9250c82..299e2dd7da6d 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -38,11 +38,8 @@
* Magic nums for obj red zoning.
* Placed in the first word before and the first word after an obj.
*/
-#define RED_INACTIVE 0x09F911029D74E35BULL /* when obj is inactive */
-#define RED_ACTIVE 0xD84156C5635688C0ULL /* when obj is active */
-
-#define SLUB_RED_INACTIVE 0xbb
-#define SLUB_RED_ACTIVE 0xcc
+#define SLUB_RED_INACTIVE 0xbb /* when obj is inactive */
+#define SLUB_RED_ACTIVE 0xcc /* when obj is active */
/* ...and for poisoning */
#define POISON_INUSE 0x5a /* for use-uninitialised poisoning */
@@ -52,12 +49,6 @@
/********** arch/$ARCH/mm/init.c **********/
#define POISON_FREE_INITMEM 0xcc
-/********** arch/ia64/hp/common/sba_iommu.c **********/
-/*
- * arch/ia64/hp/common/sba_iommu.c uses a 16-byte poison string with a
- * value of "SBAIOMMU POISON\0" for spill-over poisoning.
- */
-
/********** fs/jbd/journal.c **********/
#define JBD_POISON_FREE 0x5b
#define JBD2_POISON_FREE 0x5c
@@ -78,4 +69,28 @@
/********** security/ **********/
#define KEY_DESTROY 0xbd
+/********** net/core/page_pool.c **********/
+/*
+ * page_pool uses additional free bits within this value to store data, see the
+ * definition of PP_DMA_INDEX_MASK in mm.h
+ */
+#define PP_SIGNATURE (0x40 + POISON_POINTER_DELTA)
+
+/********** net/core/skbuff.c **********/
+#define SKB_LIST_POISON_NEXT ((void *)(0x800 + POISON_POINTER_DELTA))
+/********** net/ **********/
+#define NET_PTR_POISON ((void *)(0x801 + POISON_POINTER_DELTA))
+
+/********** kernel/bpf/ **********/
+#define BPF_PTR_POISON ((void *)(0xeB9FUL + POISON_POINTER_DELTA))
+
+/********** VFS **********/
+#define VFS_PTR_POISON ((void *)(0xF5 + POISON_POINTER_DELTA))
+
+/********** lib/stackdepot.c **********/
+#define STACK_DEPOT_POISON ((void *)(0xD390 + POISON_POINTER_DELTA))
+
+/********** io_uring/ **********/
+#define IO_URING_PTR_POISON ((void *)(0x1091UL + POISON_POINTER_DELTA))
+
#endif
diff --git a/include/linux/poll.h b/include/linux/poll.h
index 1cdc32b1f1b0..12bb18e8b978 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -8,19 +8,13 @@
#include <linux/wait.h>
#include <linux/string.h>
#include <linux/fs.h>
-#include <linux/sysctl.h>
#include <linux/uaccess.h>
#include <uapi/linux/poll.h>
#include <uapi/linux/eventpoll.h>
-extern struct ctl_table epoll_table[]; /* for sysctl */
/* ~832 bytes of stack space used max in sys_select/sys_poll before allocating
additional memory. */
-#ifdef __clang__
-#define MAX_STACK_ALLOC 768
-#else
#define MAX_STACK_ALLOC 832
-#endif
#define FRONTEND_STACK_ALLOC 256
#define SELECT_STACK_ALLOC FRONTEND_STACK_ALLOC
#define POLL_STACK_ALLOC FRONTEND_STACK_ALLOC
@@ -31,14 +25,14 @@ extern struct ctl_table epoll_table[]; /* for sysctl */
struct poll_table_struct;
-/*
+/*
* structures and helpers for f_op->poll implementations
*/
typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *);
/*
- * Do not touch the structure directly, use the access functions
- * poll_does_not_wait() and poll_requested_events() instead.
+ * Do not touch the structure directly, use the access function
+ * poll_requested_events() instead.
*/
typedef struct poll_table_struct {
poll_queue_proc _qproc;
@@ -47,18 +41,16 @@ typedef struct poll_table_struct {
static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
{
- if (p && p->_qproc && wait_address)
+ if (p && p->_qproc) {
p->_qproc(filp, wait_address, p);
-}
-
-/*
- * Return true if it is guaranteed that poll will not wait. This is the case
- * if the poll() of another file descriptor in the set got an event, so there
- * is no need for waiting.
- */
-static inline bool poll_does_not_wait(const poll_table *p)
-{
- return p == NULL || p->_qproc == NULL;
+ /*
+ * This memory barrier is paired in the wq_has_sleeper().
+ * See the comment above prepare_to_wait(), we need to
+ * ensure that subsequent tests in this thread can't be
+ * reordered with __add_wait_queue() in _qproc() paths.
+ */
+ smp_mb();
+ }
}
/*
diff --git a/include/linux/polynomial.h b/include/linux/polynomial.h
new file mode 100644
index 000000000000..9e074a0bb6fa
--- /dev/null
+++ b/include/linux/polynomial.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
+ */
+
+#ifndef _POLYNOMIAL_H
+#define _POLYNOMIAL_H
+
+/*
+ * struct polynomial_term - one term descriptor of a polynomial
+ * @deg: degree of the term.
+ * @coef: multiplication factor of the term.
+ * @divider: distributed divider per each degree.
+ * @divider_leftover: divider leftover, which couldn't be redistributed.
+ */
+struct polynomial_term {
+ unsigned int deg;
+ long coef;
+ long divider;
+ long divider_leftover;
+};
+
+/*
+ * struct polynomial - a polynomial descriptor
+ * @total_divider: total data divider.
+ * @terms: polynomial terms, last term must have degree of 0
+ */
+struct polynomial {
+ long total_divider;
+ struct polynomial_term terms[];
+};
+
+long polynomial_calc(const struct polynomial *poly, long data);
+
+#endif
diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h
index 468328b1e1dd..a500d3160fe8 100644
--- a/include/linux/posix-clock.h
+++ b/include/linux/posix-clock.h
@@ -14,6 +14,7 @@
#include <linux/rwsem.h>
struct posix_clock;
+struct posix_clock_context;
/**
* struct posix_clock_operations - functional interface to the clock
@@ -50,18 +51,18 @@ struct posix_clock_operations {
/*
* Optional character device methods:
*/
- long (*ioctl) (struct posix_clock *pc,
- unsigned int cmd, unsigned long arg);
+ long (*ioctl)(struct posix_clock_context *pccontext, unsigned int cmd,
+ unsigned long arg);
- int (*open) (struct posix_clock *pc, fmode_t f_mode);
+ int (*open)(struct posix_clock_context *pccontext, fmode_t f_mode);
- __poll_t (*poll) (struct posix_clock *pc,
- struct file *file, poll_table *wait);
+ __poll_t (*poll)(struct posix_clock_context *pccontext, struct file *file,
+ poll_table *wait);
- int (*release) (struct posix_clock *pc);
+ int (*release)(struct posix_clock_context *pccontext);
- ssize_t (*read) (struct posix_clock *pc,
- uint flags, char __user *buf, size_t cnt);
+ ssize_t (*read)(struct posix_clock_context *pccontext, uint flags,
+ char __user *buf, size_t cnt);
};
/**
@@ -91,6 +92,28 @@ struct posix_clock {
};
/**
+ * struct posix_clock_context - represents clock file operations context
+ *
+ * @clk: Pointer to the clock
+ * @fp: Pointer to the file used to open the clock
+ * @private_clkdata: Pointer to user data
+ *
+ * Drivers should use struct posix_clock_context during specific character
+ * device file operation methods to access the posix clock. In particular,
+ * the file pointer can be used to verify correct access mode for ioctl()
+ * calls.
+ *
+ * Drivers can store a private data structure during the open operation
+ * if they have specific information that is required in other file
+ * operations.
+ */
+struct posix_clock_context {
+ struct posix_clock *clk;
+ struct file *fp;
+ void *private_clkdata;
+};
+
+/**
* posix_clock_register() - register a new clock
* @clk: Pointer to the clock. Caller must provide 'ops' field
* @dev: Pointer to the initialized device. Caller must provide
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index 896c16d2c5fb..4d3dbcef379e 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -2,39 +2,19 @@
#ifndef _linux_POSIX_TIMERS_H
#define _linux_POSIX_TIMERS_H
-#include <linux/spinlock.h>
-#include <linux/list.h>
#include <linux/alarmtimer.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pid.h>
+#include <linux/posix-timers_types.h>
+#include <linux/rcuref.h>
+#include <linux/spinlock.h>
#include <linux/timerqueue.h>
-#include <linux/task_work.h>
struct kernel_siginfo;
struct task_struct;
-
-/*
- * Bit fields within a clockid:
- *
- * The most significant 29 bits hold either a pid or a file descriptor.
- *
- * Bit 2 indicates whether a cpu clock refers to a thread or a process.
- *
- * Bits 1 and 0 give the type: PROF=0, VIRT=1, SCHED=2, or FD=3.
- *
- * A clockid is invalid if bits 2, 1, and 0 are all set.
- */
-#define CPUCLOCK_PID(clock) ((pid_t) ~((clock) >> 3))
-#define CPUCLOCK_PERTHREAD(clock) \
- (((clock) & (clockid_t) CPUCLOCK_PERTHREAD_MASK) != 0)
-
-#define CPUCLOCK_PERTHREAD_MASK 4
-#define CPUCLOCK_WHICH(clock) ((clock) & (clockid_t) CPUCLOCK_CLOCK_MASK)
-#define CPUCLOCK_CLOCK_MASK 3
-#define CPUCLOCK_PROF 0
-#define CPUCLOCK_VIRT 1
-#define CPUCLOCK_SCHED 2
-#define CPUCLOCK_MAX 3
-#define CLOCKFD CPUCLOCK_MAX
-#define CLOCKFD_MASK (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK)
+struct sigqueue;
+struct k_itimer;
static inline clockid_t make_process_cpuclock(const unsigned int pid,
const clockid_t clock)
@@ -57,22 +37,33 @@ static inline int clockid_to_fd(const clockid_t clk)
return ~(clk >> 3);
}
+static inline bool clockid_aux_valid(clockid_t id)
+{
+ return IS_ENABLED(CONFIG_POSIX_AUX_CLOCKS) && id >= CLOCK_AUX && id <= CLOCK_AUX_LAST;
+}
+
#ifdef CONFIG_POSIX_TIMERS
+#include <linux/signal_types.h>
+
/**
* cpu_timer - Posix CPU timer representation for k_itimer
* @node: timerqueue node to queue in the task/sig
* @head: timerqueue head on which this timer is queued
- * @task: Pointer to target task
+ * @pid: Pointer to target task PID
* @elist: List head for the expiry list
* @firing: Timer is currently firing
+ * @nanosleep: Timer is used for nanosleep and is not a regular posix-timer
+ * @handling: Pointer to the task which handles expiry
*/
struct cpu_timer {
- struct timerqueue_node node;
- struct timerqueue_head *head;
- struct pid *pid;
- struct list_head elist;
- int firing;
+ struct timerqueue_node node;
+ struct timerqueue_head *head;
+ struct pid *pid;
+ struct list_head elist;
+ bool firing;
+ bool nanosleep;
+ struct task_struct __rcu *handling;
};
static inline bool cpu_timer_enqueue(struct timerqueue_head *head,
@@ -82,12 +73,19 @@ static inline bool cpu_timer_enqueue(struct timerqueue_head *head,
return timerqueue_add(head, &ctmr->node);
}
-static inline void cpu_timer_dequeue(struct cpu_timer *ctmr)
+static inline bool cpu_timer_queued(struct cpu_timer *ctmr)
{
- if (ctmr->head) {
+ return !!ctmr->head;
+}
+
+static inline bool cpu_timer_dequeue(struct cpu_timer *ctmr)
+{
+ if (cpu_timer_queued(ctmr)) {
timerqueue_del(ctmr->head, &ctmr->node);
ctmr->head = NULL;
+ return true;
}
+ return false;
}
static inline u64 cpu_timer_getexpires(struct cpu_timer *ctmr)
@@ -100,42 +98,6 @@ static inline void cpu_timer_setexpires(struct cpu_timer *ctmr, u64 exp)
ctmr->node.expires = exp;
}
-/**
- * posix_cputimer_base - Container per posix CPU clock
- * @nextevt: Earliest-expiration cache
- * @tqhead: timerqueue head for cpu_timers
- */
-struct posix_cputimer_base {
- u64 nextevt;
- struct timerqueue_head tqhead;
-};
-
-/**
- * posix_cputimers - Container for posix CPU timer related data
- * @bases: Base container for posix CPU clocks
- * @timers_active: Timers are queued.
- * @expiry_active: Timer expiry is active. Used for
- * process wide timers to avoid multiple
- * task trying to handle expiry concurrently
- *
- * Used in task_struct and signal_struct
- */
-struct posix_cputimers {
- struct posix_cputimer_base bases[CPUCLOCK_MAX];
- unsigned int timers_active;
- unsigned int expiry_active;
-};
-
-/**
- * posix_cputimers_work - Container for task work based posix CPU timer expiry
- * @work: The task work to be scheduled
- * @scheduled: @work has been scheduled already, no further processing
- */
-struct posix_cputimers_work {
- struct callback_head work;
- unsigned int scheduled;
-};
-
static inline void posix_cputimers_init(struct posix_cputimers *pct)
{
memset(pct, 0, sizeof(*pct));
@@ -152,6 +114,13 @@ static inline void posix_cputimers_rt_watchdog(struct posix_cputimers *pct,
pct->bases[CPUCLOCK_SCHED].nextevt = runtime;
}
+void posixtimer_rearm_itimer(struct task_struct *p);
+bool posixtimer_init_sigqueue(struct sigqueue *q);
+void posixtimer_send_sigqueue(struct k_itimer *tmr);
+bool posixtimer_deliver_signal(struct kernel_siginfo *info, struct sigqueue *timer_sigq);
+void posixtimer_free_timer(struct k_itimer *timer);
+long posixtimer_create_prctl(unsigned long ctrl);
+
/* Init task static initializer */
#define INIT_CPU_TIMERBASE(b) { \
.nextevt = U64_MAX, \
@@ -168,64 +137,79 @@ static inline void posix_cputimers_rt_watchdog(struct posix_cputimers *pct,
.bases = INIT_CPU_TIMERBASES(s.posix_cputimers.bases), \
},
#else
-struct posix_cputimers { };
struct cpu_timer { };
#define INIT_CPU_TIMERS(s)
static inline void posix_cputimers_init(struct posix_cputimers *pct) { }
static inline void posix_cputimers_group_init(struct posix_cputimers *pct,
u64 cpu_limit) { }
+static inline void posixtimer_rearm_itimer(struct task_struct *p) { }
+static inline bool posixtimer_deliver_signal(struct kernel_siginfo *info,
+ struct sigqueue *timer_sigq) { return false; }
+static inline void posixtimer_free_timer(struct k_itimer *timer) { }
+static inline long posixtimer_create_prctl(unsigned long ctrl) { return -EINVAL; }
#endif
#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
+void clear_posix_cputimers_work(struct task_struct *p);
void posix_cputimers_init_work(void);
#else
+static inline void clear_posix_cputimers_work(struct task_struct *p) { }
static inline void posix_cputimers_init_work(void) { }
#endif
-#define REQUEUE_PENDING 1
-
/**
* struct k_itimer - POSIX.1b interval timer structure.
- * @list: List head for binding the timer to signals->posix_timers
+ * @list: List node for binding the timer to tsk::signal::posix_timers
+ * @ignored_list: List node for tracking ignored timers in tsk::signal::ignored_posix_timers
* @t_hash: Entry in the posix timer hash table
* @it_lock: Lock protecting the timer
* @kclock: Pointer to the k_clock struct handling this timer
* @it_clock: The posix timer clock id
* @it_id: The posix timer id for identifying the timer
- * @it_active: Marker that timer is active
+ * @it_status: The status of the timer
+ * @it_sig_periodic: The periodic status at signal delivery
* @it_overrun: The overrun counter for pending signals
* @it_overrun_last: The overrun at the time of the last delivered signal
- * @it_requeue_pending: Indicator that timer waits for being requeued on
- * signal delivery
+ * @it_signal_seq: Sequence count to control signal delivery
+ * @it_sigqueue_seq: The sequence count at the point where the signal was queued
* @it_sigev_notify: The notify word of sigevent struct for signal delivery
* @it_interval: The interval for periodic timers
* @it_signal: Pointer to the creators signal struct
* @it_pid: The pid of the process/task targeted by the signal
* @it_process: The task to wakeup on clock_nanosleep (CPU timers)
- * @sigq: Pointer to preallocated sigqueue
+ * @rcuref: Reference count for life time management
+ * @sigq: Embedded sigqueue
* @it: Union representing the various posix timer type
* internals.
* @rcu: RCU head for freeing the timer.
*/
struct k_itimer {
- struct list_head list;
+ /* 1st cacheline contains read-mostly fields */
struct hlist_node t_hash;
- spinlock_t it_lock;
- const struct k_clock *kclock;
- clockid_t it_clock;
+ struct hlist_node list;
timer_t it_id;
- int it_active;
+ clockid_t it_clock;
+ int it_sigev_notify;
+ enum pid_type it_pid_type;
+ struct signal_struct *it_signal;
+ const struct k_clock *kclock;
+
+ /* 2nd cacheline and above contain fields which are modified regularly */
+ spinlock_t it_lock;
+ int it_status;
+ bool it_sig_periodic;
s64 it_overrun;
s64 it_overrun_last;
- int it_requeue_pending;
- int it_sigev_notify;
+ unsigned int it_signal_seq;
+ unsigned int it_sigqueue_seq;
ktime_t it_interval;
- struct signal_struct *it_signal;
+ struct hlist_node ignored_list;
union {
struct pid *it_pid;
struct task_struct *it_process;
};
- struct sigqueue *sigq;
+ struct sigqueue sigq;
+ rcuref_t rcuref;
union {
struct {
struct hrtimer timer;
@@ -236,7 +220,7 @@ struct k_itimer {
} alarm;
} it;
struct rcu_head rcu;
-};
+} ____cacheline_aligned_in_smp;
void run_posix_cpu_timers(void);
void posix_cpu_timers_exit(struct task_struct *task);
@@ -244,7 +228,38 @@ void posix_cpu_timers_exit_group(struct task_struct *task);
void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx,
u64 *newval, u64 *oldval);
-void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new);
+int update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new);
+
+#ifdef CONFIG_POSIX_TIMERS
+static inline void posixtimer_putref(struct k_itimer *tmr)
+{
+ if (rcuref_put(&tmr->rcuref))
+ posixtimer_free_timer(tmr);
+}
+
+static inline void posixtimer_sigqueue_getref(struct sigqueue *q)
+{
+ struct k_itimer *tmr = container_of(q, struct k_itimer, sigq);
+
+ WARN_ON_ONCE(!rcuref_get(&tmr->rcuref));
+}
+
+static inline void posixtimer_sigqueue_putref(struct sigqueue *q)
+{
+ struct k_itimer *tmr = container_of(q, struct k_itimer, sigq);
+
+ posixtimer_putref(tmr);
+}
+
+static inline bool posixtimer_valid(const struct k_itimer *timer)
+{
+ unsigned long val = (unsigned long)timer->it_signal;
+
+ return !(val & 0x1UL);
+}
+#else /* CONFIG_POSIX_TIMERS */
+static inline void posixtimer_sigqueue_getref(struct sigqueue *q) { }
+static inline void posixtimer_sigqueue_putref(struct sigqueue *q) { }
+#endif /* !CONFIG_POSIX_TIMERS */
-void posixtimer_rearm(struct kernel_siginfo *info);
#endif
diff --git a/include/linux/posix-timers_types.h b/include/linux/posix-timers_types.h
new file mode 100644
index 000000000000..a4712c1008c9
--- /dev/null
+++ b/include/linux/posix-timers_types.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _linux_POSIX_TIMERS_TYPES_H
+#define _linux_POSIX_TIMERS_TYPES_H
+
+#include <linux/mutex_types.h>
+#include <linux/timerqueue_types.h>
+#include <linux/types.h>
+
+/*
+ * Bit fields within a clockid:
+ *
+ * The most significant 29 bits hold either a pid or a file descriptor.
+ *
+ * Bit 2 indicates whether a cpu clock refers to a thread or a process.
+ *
+ * Bits 1 and 0 give the type: PROF=0, VIRT=1, SCHED=2, or FD=3.
+ *
+ * A clockid is invalid if bits 2, 1, and 0 are all set.
+ */
+#define CPUCLOCK_PID(clock) ((pid_t) ~((clock) >> 3))
+#define CPUCLOCK_PERTHREAD(clock) \
+ (((clock) & (clockid_t) CPUCLOCK_PERTHREAD_MASK) != 0)
+
+#define CPUCLOCK_PERTHREAD_MASK 4
+#define CPUCLOCK_WHICH(clock) ((clock) & (clockid_t) CPUCLOCK_CLOCK_MASK)
+#define CPUCLOCK_CLOCK_MASK 3
+#define CPUCLOCK_PROF 0
+#define CPUCLOCK_VIRT 1
+#define CPUCLOCK_SCHED 2
+#define CPUCLOCK_MAX 3
+#define CLOCKFD CPUCLOCK_MAX
+#define CLOCKFD_MASK (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK)
+
+#ifdef CONFIG_POSIX_TIMERS
+
+/**
+ * posix_cputimer_base - Container per posix CPU clock
+ * @nextevt: Earliest-expiration cache
+ * @tqhead: timerqueue head for cpu_timers
+ */
+struct posix_cputimer_base {
+ u64 nextevt;
+ struct timerqueue_head tqhead;
+};
+
+/**
+ * posix_cputimers - Container for posix CPU timer related data
+ * @bases: Base container for posix CPU clocks
+ * @timers_active: Timers are queued.
+ * @expiry_active: Timer expiry is active. Used for
+ * process wide timers to avoid multiple
+ * task trying to handle expiry concurrently
+ *
+ * Used in task_struct and signal_struct
+ */
+struct posix_cputimers {
+ struct posix_cputimer_base bases[CPUCLOCK_MAX];
+ unsigned int timers_active;
+ unsigned int expiry_active;
+};
+
+/**
+ * posix_cputimers_work - Container for task work based posix CPU timer expiry
+ * @work: The task work to be scheduled
+ * @mutex: Mutex held around expiry in context of this task work
+ * @scheduled: @work has been scheduled already, no further processing
+ */
+struct posix_cputimers_work {
+ struct callback_head work;
+ struct mutex mutex;
+ unsigned int scheduled;
+};
+
+#else /* CONFIG_POSIX_TIMERS */
+
+struct posix_cputimers { };
+
+#endif /* CONFIG_POSIX_TIMERS */
+
+#endif /* _linux_POSIX_TIMERS_TYPES_H */
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index 307094ebb88c..62d497763e25 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -27,11 +27,16 @@ struct posix_acl_entry {
};
struct posix_acl {
- refcount_t a_refcount;
- struct rcu_head a_rcu;
- unsigned int a_count;
- struct posix_acl_entry a_entries[];
+ /* New members MUST be added within the struct_group() macro below. */
+ struct_group_tagged(posix_acl_hdr, hdr,
+ refcount_t a_refcount;
+ unsigned int a_count;
+ struct rcu_head a_rcu;
+ );
+ struct posix_acl_entry a_entries[] __counted_by(a_count);
};
+static_assert(offsetof(struct posix_acl, a_entries) == sizeof(struct posix_acl_hdr),
+ "struct member likely outside of struct_group_tagged()");
#define FOREACH_ACL_ENTRY(pa, acl, pe) \
for(pa=(acl)->a_entries, pe=pa+(acl)->a_count; pa<pe; pa++)
@@ -62,34 +67,36 @@ posix_acl_release(struct posix_acl *acl)
/* posix_acl.c */
extern void posix_acl_init(struct posix_acl *, int);
-extern struct posix_acl *posix_acl_alloc(int, gfp_t);
+extern struct posix_acl *posix_acl_alloc(unsigned int count, gfp_t flags);
extern struct posix_acl *posix_acl_from_mode(umode_t, gfp_t);
extern int posix_acl_equiv_mode(const struct posix_acl *, umode_t *);
extern int __posix_acl_create(struct posix_acl **, gfp_t, umode_t *);
extern int __posix_acl_chmod(struct posix_acl **, gfp_t, umode_t);
extern struct posix_acl *get_posix_acl(struct inode *, int);
-extern int set_posix_acl(struct user_namespace *, struct inode *, int,
- struct posix_acl *);
+int set_posix_acl(struct mnt_idmap *, struct dentry *, int,
+ struct posix_acl *);
+
+struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type);
+struct posix_acl *posix_acl_clone(const struct posix_acl *acl, gfp_t flags);
#ifdef CONFIG_FS_POSIX_ACL
-int posix_acl_chmod(struct user_namespace *, struct inode *, umode_t);
+int posix_acl_chmod(struct mnt_idmap *, struct dentry *, umode_t);
extern int posix_acl_create(struct inode *, umode_t *, struct posix_acl **,
struct posix_acl **);
-int posix_acl_update_mode(struct user_namespace *, struct inode *, umode_t *,
+int posix_acl_update_mode(struct mnt_idmap *, struct inode *, umode_t *,
struct posix_acl **);
-extern int simple_set_acl(struct user_namespace *, struct inode *,
- struct posix_acl *, int);
+int simple_set_acl(struct mnt_idmap *, struct dentry *,
+ struct posix_acl *, int);
extern int simple_acl_create(struct inode *, struct inode *);
struct posix_acl *get_cached_acl(struct inode *inode, int type);
-struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type);
void set_cached_acl(struct inode *inode, int type, struct posix_acl *acl);
void forget_cached_acl(struct inode *inode, int type);
void forget_all_cached_acls(struct inode *inode);
int posix_acl_valid(struct user_namespace *, const struct posix_acl *);
-int posix_acl_permission(struct user_namespace *, struct inode *,
+int posix_acl_permission(struct mnt_idmap *, struct inode *,
const struct posix_acl *, int);
static inline void cache_no_acl(struct inode *inode)
@@ -97,9 +104,18 @@ static inline void cache_no_acl(struct inode *inode)
inode->i_acl = NULL;
inode->i_default_acl = NULL;
}
+
+int vfs_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
+ const char *acl_name, struct posix_acl *kacl);
+struct posix_acl *vfs_get_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name);
+int vfs_remove_acl(struct mnt_idmap *idmap, struct dentry *dentry,
+ const char *acl_name);
+int posix_acl_listxattr(struct inode *inode, char **buffer,
+ ssize_t *remaining_size);
#else
-static inline int posix_acl_chmod(struct user_namespace *mnt_userns,
- struct inode *inode, umode_t mode)
+static inline int posix_acl_chmod(struct mnt_idmap *idmap,
+ struct dentry *dentry, umode_t mode)
{
return 0;
}
@@ -124,8 +140,33 @@ static inline int posix_acl_create(struct inode *inode, umode_t *mode,
static inline void forget_all_cached_acls(struct inode *inode)
{
}
+
+static inline int vfs_set_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *name,
+ struct posix_acl *acl)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline struct posix_acl *vfs_get_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry,
+ const char *acl_name)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int vfs_remove_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name)
+{
+ return -EOPNOTSUPP;
+}
+static inline int posix_acl_listxattr(struct inode *inode, char **buffer,
+ ssize_t *remaining_size)
+{
+ return 0;
+}
#endif /* CONFIG_FS_POSIX_ACL */
-struct posix_acl *get_acl(struct inode *inode, int type);
+struct posix_acl *get_inode_acl(struct inode *inode, int type);
#endif /* __LINUX_POSIX_ACL_H */
diff --git a/include/linux/posix_acl_xattr.h b/include/linux/posix_acl_xattr.h
index 060e8d203181..e86f3b731da2 100644
--- a/include/linux/posix_acl_xattr.h
+++ b/include/linux/posix_acl_xattr.h
@@ -33,27 +33,43 @@ posix_acl_xattr_count(size_t size)
}
#ifdef CONFIG_FS_POSIX_ACL
-void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns,
- void *value, size_t size);
-void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns,
- void *value, size_t size);
+struct posix_acl *posix_acl_from_xattr(struct user_namespace *user_ns,
+ const void *value, size_t size);
#else
-static inline void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns,
- void *value, size_t size)
-{
-}
-static inline void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns,
- void *value, size_t size)
+static inline struct posix_acl *
+posix_acl_from_xattr(struct user_namespace *user_ns, const void *value,
+ size_t size)
{
+ return ERR_PTR(-EOPNOTSUPP);
}
#endif
-struct posix_acl *posix_acl_from_xattr(struct user_namespace *user_ns,
- const void *value, size_t size);
int posix_acl_to_xattr(struct user_namespace *user_ns,
const struct posix_acl *acl, void *buffer, size_t size);
+static inline const char *posix_acl_xattr_name(int type)
+{
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ return XATTR_NAME_POSIX_ACL_ACCESS;
+ case ACL_TYPE_DEFAULT:
+ return XATTR_NAME_POSIX_ACL_DEFAULT;
+ }
+
+ return "";
+}
+
+static inline int posix_acl_type(const char *name)
+{
+ if (strcmp(name, XATTR_NAME_POSIX_ACL_ACCESS) == 0)
+ return ACL_TYPE_ACCESS;
+ else if (strcmp(name, XATTR_NAME_POSIX_ACL_DEFAULT) == 0)
+ return ACL_TYPE_DEFAULT;
+
+ return -1;
+}
-extern const struct xattr_handler posix_acl_access_xattr_handler;
-extern const struct xattr_handler posix_acl_default_xattr_handler;
+/* These are legacy handlers. Don't use them for new code. */
+extern const struct xattr_handler nop_posix_acl_access;
+extern const struct xattr_handler nop_posix_acl_default;
#endif /* _POSIX_ACL_XATTR_H */
diff --git a/include/linux/power/ab8500.h b/include/linux/power/ab8500.h
deleted file mode 100644
index 51976b52f373..000000000000
--- a/include/linux/power/ab8500.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson 2013
- * Author: Hongbo Zhang <hongbo.zhang@linaro.com>
- */
-
-#ifndef PWR_AB8500_H
-#define PWR_AB8500_H
-
-extern const struct abx500_res_to_temp ab8500_temp_tbl_a_thermistor[];
-extern const int ab8500_temp_tbl_a_size;
-
-extern const struct abx500_res_to_temp ab8500_temp_tbl_b_thermistor[];
-extern const int ab8500_temp_tbl_b_size;
-
-#endif /* PWR_AB8500_H */
diff --git a/include/linux/power/bq25890_charger.h b/include/linux/power/bq25890_charger.h
new file mode 100644
index 000000000000..c706ddb77a08
--- /dev/null
+++ b/include/linux/power/bq25890_charger.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Platform data for the TI bq25890 battery charger driver.
+ */
+
+#ifndef _BQ25890_CHARGER_H_
+#define _BQ25890_CHARGER_H_
+
+struct regulator_init_data;
+
+struct bq25890_platform_data {
+ const struct regulator_init_data *regulator_init_data;
+};
+
+#endif
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index a1aa68141d0b..d56e1276aafe 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -2,6 +2,8 @@
#ifndef __LINUX_BQ27X00_BATTERY_H__
#define __LINUX_BQ27X00_BATTERY_H__
+#include <linux/power_supply.h>
+
enum bq27xxx_chip {
BQ27000 = 1, /* bq27000, bq27200 */
BQ27010, /* bq27010, bq27210 */
@@ -45,21 +47,12 @@ struct bq27xxx_access_methods {
};
struct bq27xxx_reg_cache {
- int temperature;
- int time_to_empty;
- int time_to_empty_avg;
- int time_to_full;
- int charge_full;
- int cycle_count;
int capacity;
- int energy;
int flags;
- int health;
};
struct bq27xxx_device_info {
struct device *dev;
- int id;
enum bq27xxx_chip chip;
u32 opts;
const char *name;
@@ -68,7 +61,11 @@ struct bq27xxx_device_info {
struct bq27xxx_access_methods bus;
struct bq27xxx_reg_cache cache;
int charge_design_full;
+ int voltage_min_design;
+ int voltage_max_design;
+ bool removed;
unsigned long last_update;
+ union power_supply_propval last_status;
struct delayed_work work;
struct power_supply *bat;
struct list_head list;
@@ -79,5 +76,6 @@ struct bq27xxx_device_info {
void bq27xxx_battery_update(struct bq27xxx_device_info *di);
int bq27xxx_battery_setup(struct bq27xxx_device_info *di);
void bq27xxx_battery_teardown(struct bq27xxx_device_info *di);
+extern const struct dev_pm_ops bq27xxx_battery_battery_pm_ops;
#endif
diff --git a/include/linux/power/generic-adc-battery.h b/include/linux/power/generic-adc-battery.h
deleted file mode 100644
index c68cbf34cd34..000000000000
--- a/include/linux/power/generic-adc-battery.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012, Anish Kumar <anish198519851985@gmail.com>
- */
-
-#ifndef GENERIC_ADC_BATTERY_H
-#define GENERIC_ADC_BATTERY_H
-
-/**
- * struct gab_platform_data - platform_data for generic adc iio battery driver.
- * @battery_info: recommended structure to specify static power supply
- * parameters
- * @cal_charge: calculate charge level.
- * @jitter_delay: delay required after the interrupt to check battery
- * status.Default set is 10ms.
- */
-struct gab_platform_data {
- struct power_supply_info battery_info;
- int (*cal_charge)(long value);
- int jitter_delay;
-};
-
-#endif /* GENERIC_ADC_BATTERY_H */
diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h
index d55c746ac56e..c417abd2ab70 100644
--- a/include/linux/power/max17042_battery.h
+++ b/include/linux/power/max17042_battery.h
@@ -69,7 +69,7 @@ enum max17042_register {
MAX17042_RelaxCFG = 0x2A,
MAX17042_MiscCFG = 0x2B,
MAX17042_TGAIN = 0x2C,
- MAx17042_TOFF = 0x2D,
+ MAX17042_TOFF = 0x2D,
MAX17042_CGAIN = 0x2E,
MAX17042_COFF = 0x2F,
@@ -78,7 +78,7 @@ enum max17042_register {
MAX17042_T_empty = 0x34,
MAX17042_FullCAP0 = 0x35,
- MAX17042_LAvg_empty = 0x36,
+ MAX17042_IAvg_empty = 0x36,
MAX17042_FCTC = 0x37,
MAX17042_RCOMP0 = 0x38,
MAX17042_TempCo = 0x39,
@@ -110,13 +110,14 @@ enum max17042_register {
MAX17042_VFSOC = 0xFF,
};
+/* Registers specific to max17055 only */
enum max17055_register {
MAX17055_QRes = 0x0C,
+ MAX17055_RCell = 0x14,
MAX17055_TTF = 0x20,
- MAX17055_V_empty = 0x3A,
- MAX17055_TIMER = 0x3E,
+ MAX17055_DieTemp = 0x34,
MAX17055_USER_MEM = 0x40,
- MAX17055_RGAIN = 0x42,
+ MAX17055_RGAIN = 0x43,
MAX17055_ConvgCfg = 0x49,
MAX17055_VFRemCap = 0x4A,
@@ -155,13 +156,14 @@ enum max17055_register {
MAX17055_AtAvCap = 0xDF,
};
-/* Registers specific to max17047/50 */
+/* Registers specific to max17047/50/55 */
enum max17047_register {
MAX17047_QRTbl00 = 0x12,
MAX17047_FullSOCThr = 0x13,
MAX17047_QRTbl10 = 0x22,
MAX17047_QRTbl20 = 0x32,
MAX17047_V_empty = 0x3A,
+ MAX17047_TIMER = 0x3E,
MAX17047_QRTbl30 = 0x42,
};
@@ -219,7 +221,7 @@ struct max17042_config_data {
u16 fullcap; /* 0x10 */
u16 fullcapnom; /* 0x23 */
u16 socempty; /* 0x33 */
- u16 lavg_empty; /* 0x36 */
+ u16 iavg_empty; /* 0x36 */
u16 dqacc; /* 0x45 */
u16 dpacc; /* 0x46 */
u16 qrtbl00; /* 0x12 */
diff --git a/include/linux/power/max77705_charger.h b/include/linux/power/max77705_charger.h
new file mode 100644
index 000000000000..b3950ce0625e
--- /dev/null
+++ b/include/linux/power/max77705_charger.h
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Maxim MAX77705 definitions.
+ *
+ * Copyright (C) 2015 Samsung Electronics, Inc.
+ * Copyright (C) 2025 Dzmitry Sankouski <dsankouski@gmail.com>
+ */
+
+#ifndef __MAX77705_CHARGER_H
+#define __MAX77705_CHARGER_H __FILE__
+
+#include <linux/regmap.h>
+
+/* MAX77705_CHG_REG_CHG_INT */
+#define MAX77705_BYP_I (0)
+#define MAX77705_INP_LIMIT_I (1)
+#define MAX77705_BATP_I (2)
+#define MAX77705_BAT_I (3)
+#define MAX77705_CHG_I (4)
+#define MAX77705_WCIN_I (5)
+#define MAX77705_CHGIN_I (6)
+#define MAX77705_AICL_I (7)
+
+/* MAX77705_CHG_REG_CHG_INT_OK */
+#define MAX77705_BYP_OK BIT(MAX77705_BYP_I)
+#define MAX77705_DISQBAT_OK BIT(MAX77705_INP_LIMIT_I)
+#define MAX77705_BATP_OK BIT(MAX77705_BATP_I)
+#define MAX77705_BAT_OK BIT(MAX77705_BAT_I)
+#define MAX77705_CHG_OK BIT(MAX77705_CHG_I)
+#define MAX77705_WCIN_OK BIT(MAX77705_WCIN_I)
+#define MAX77705_CHGIN_OK BIT(MAX77705_CHGIN_I)
+#define MAX77705_AICL_OK BIT(MAX77705_AICL_I)
+
+/* MAX77705_CHG_REG_DETAILS_00 */
+#define MAX77705_BATP_DTLS BIT(0)
+#define MAX77705_WCIN_DTLS GENMASK(4, 3)
+#define MAX77705_WCIN_DTLS_SHIFT 3
+#define MAX77705_CHGIN_DTLS GENMASK(6, 5)
+#define MAX77705_CHGIN_DTLS_SHIFT 5
+
+/* MAX77705_CHG_REG_DETAILS_01 */
+#define MAX77705_CHG_DTLS GENMASK(3, 0)
+#define MAX77705_CHG_DTLS_SHIFT 0
+#define MAX77705_BAT_DTLS GENMASK(6, 4)
+#define MAX77705_BAT_DTLS_SHIFT 4
+
+/* MAX77705_CHG_REG_DETAILS_02 */
+#define MAX77705_BYP_DTLS GENMASK(3, 0)
+#define MAX77705_BYP_DTLS_SHIFT 0
+
+/* MAX77705_CHG_REG_CNFG_00 */
+#define MAX77705_CHG_SHIFT 0
+#define MAX77705_UNO_SHIFT 1
+#define MAX77705_OTG_SHIFT 1
+#define MAX77705_BUCK_SHIFT 2
+#define MAX77705_BOOST_SHIFT 3
+#define MAX77705_WDTEN_SHIFT 4
+#define MAX77705_CHG_MASK BIT(MAX77705_CHG_SHIFT)
+#define MAX77705_UNO_MASK BIT(MAX77705_UNO_SHIFT)
+#define MAX77705_OTG_MASK BIT(MAX77705_OTG_SHIFT)
+#define MAX77705_BUCK_MASK BIT(MAX77705_BUCK_SHIFT)
+#define MAX77705_BOOST_MASK BIT(MAX77705_BOOST_SHIFT)
+#define MAX77705_WDTEN_MASK BIT(MAX77705_WDTEN_SHIFT)
+#define MAX77705_UNO_CTRL (MAX77705_UNO_MASK | MAX77705_BOOST_MASK)
+#define MAX77705_OTG_CTRL (MAX77705_OTG_MASK | MAX77705_BOOST_MASK)
+
+/* MAX77705_CHG_REG_CNFG_01 */
+#define MAX77705_FCHGTIME_DISABLE 0
+#define MAX77705_CHG_RSTRT_DISABLE 0x3
+
+#define MAX77705_CHG_PQEN_DISABLE 0
+#define MAX77705_CHG_PQEN_ENABLE 1
+
+/* MAX77705_CHG_REG_CNFG_02 */
+#define MAX77705_OTG_ILIM_500 0
+#define MAX77705_OTG_ILIM_900 1
+#define MAX77705_OTG_ILIM_1200 2
+#define MAX77705_OTG_ILIM_1500 3
+
+/* MAX77705_CHG_REG_CNFG_03 */
+#define MAX77705_TO_ITH_150MA 0
+#define MAX77705_TO_TIME_30M 3
+#define MAX77705_SYS_TRACK_ENABLE 0
+#define MAX77705_SYS_TRACK_DISABLE 1
+
+/* MAX77705_CHG_REG_CNFG_04 */
+#define MAX77705_CHG_MINVSYS_SHIFT 6
+#define MAX77705_CHG_MINVSYS_MASK GENMASK(7, 6)
+
+/* MAX77705_CHG_REG_CNFG_05 */
+#define MAX77705_B2SOVRC_DISABLE 0
+#define MAX77705_B2SOVRC_4_5A 6
+#define MAX77705_B2SOVRC_4_8A 8
+#define MAX77705_B2SOVRC_5_0A 9
+
+/* MAX77705_CHG_CNFG_06 */
+#define MAX77705_WDTCLR_SHIFT 0
+#define MAX77705_WDTCLR_MASK GENMASK(1, 0)
+#define MAX77705_WDTCLR 1
+#define MAX77705_CHGPROT_UNLOCKED 3
+#define MAX77705_SLOWEST_LX_SLOPE 3
+
+/* MAX77705_CHG_REG_CNFG_07 */
+#define MAX77705_CHG_FMBST 4
+#define MAX77705_REG_FMBST_SHIFT 2
+#define MAX77705_REG_FMBST_MASK BIT(MAX77705_REG_FMBST_SHIFT)
+#define MAX77705_REG_FGSRC_SHIFT 1
+#define MAX77705_REG_FGSRC_MASK BIT(MAX77705_REG_FGSRC_SHIFT)
+
+/* MAX77705_CHG_REG_CNFG_08 */
+#define MAX77705_CHG_FSW_3MHz 0
+#define MAX77705_CHG_FSW_2MHz 1
+#define MAX77705_CHG_FSW_1_5MHz 2
+
+/* MAX77705_CHG_REG_CNFG_09 */
+#define MAX77705_CHG_DISABLE 0
+
+/* MAX77705_CHG_REG_CNFG_12 */
+/* REG=4.5V, UVLO=4.7V */
+#define MAX77705_VCHGIN_4_5 0
+/* REG=4.5V, UVLO=4.7V */
+#define MAX77705_WCIN_4_5 0
+#define MAX77705_DISABLE_SKIP 1
+#define MAX77705_AUTO_SKIP 0
+
+#define AICL_WORK_DELAY_MS 100
+
+/* uA */
+#define MAX77705_CURRENT_CHGIN_STEP 25000
+#define MAX77705_CURRENT_CHG_STEP 50000
+#define MAX77705_CURRENT_CHGIN_MIN 100000
+#define MAX77705_CURRENT_CHGIN_MAX 3200000
+
+enum max77705_field_idx {
+ MAX77705_CHGPROT,
+ MAX77705_CHG_EN,
+ MAX77705_CHG_CC_LIM,
+ MAX77705_CHG_CHGIN_LIM,
+ MAX77705_CHG_CV_PRM,
+ MAX77705_CHG_PQEN,
+ MAX77705_CHG_RSTRT,
+ MAX77705_CHG_WCIN,
+ MAX77705_FCHGTIME,
+ MAX77705_LX_SLOPE,
+ MAX77705_MODE,
+ MAX77705_OTG_ILIM,
+ MAX77705_REG_B2SOVRC,
+ MAX77705_REG_DISKIP,
+ MAX77705_REG_FSW,
+ MAX77705_SYS_TRACK,
+ MAX77705_TO,
+ MAX77705_TO_TIME,
+ MAX77705_VBYPSET,
+ MAX77705_VCHGIN,
+ MAX77705_WCIN,
+ MAX77705_N_REGMAP_FIELDS,
+};
+
+static const struct reg_field max77705_reg_field[MAX77705_N_REGMAP_FIELDS] = {
+ [MAX77705_MODE] = REG_FIELD(MAX77705_CHG_REG_CNFG_00, 0, 3),
+ [MAX77705_FCHGTIME] = REG_FIELD(MAX77705_CHG_REG_CNFG_01, 0, 2),
+ [MAX77705_CHG_RSTRT] = REG_FIELD(MAX77705_CHG_REG_CNFG_01, 4, 5),
+ [MAX77705_CHG_PQEN] = REG_FIELD(MAX77705_CHG_REG_CNFG_01, 7, 7),
+ [MAX77705_CHG_CC_LIM] = REG_FIELD(MAX77705_CHG_REG_CNFG_02, 0, 5),
+ [MAX77705_OTG_ILIM] = REG_FIELD(MAX77705_CHG_REG_CNFG_02, 6, 7),
+ [MAX77705_TO] = REG_FIELD(MAX77705_CHG_REG_CNFG_03, 0, 2),
+ [MAX77705_TO_TIME] = REG_FIELD(MAX77705_CHG_REG_CNFG_03, 3, 5),
+ [MAX77705_SYS_TRACK] = REG_FIELD(MAX77705_CHG_REG_CNFG_03, 7, 7),
+ [MAX77705_CHG_CV_PRM] = REG_FIELD(MAX77705_CHG_REG_CNFG_04, 0, 5),
+ [MAX77705_REG_B2SOVRC] = REG_FIELD(MAX77705_CHG_REG_CNFG_05, 0, 3),
+ [MAX77705_CHGPROT] = REG_FIELD(MAX77705_CHG_REG_CNFG_06, 2, 3),
+ [MAX77705_LX_SLOPE] = REG_FIELD(MAX77705_CHG_REG_CNFG_06, 5, 6),
+ [MAX77705_REG_FSW] = REG_FIELD(MAX77705_CHG_REG_CNFG_08, 0, 1),
+ [MAX77705_CHG_CHGIN_LIM] = REG_FIELD(MAX77705_CHG_REG_CNFG_09, 0, 6),
+ [MAX77705_CHG_EN] = REG_FIELD(MAX77705_CHG_REG_CNFG_09, 7, 7),
+ [MAX77705_CHG_WCIN] = REG_FIELD(MAX77705_CHG_REG_CNFG_10, 0, 5),
+ [MAX77705_VBYPSET] = REG_FIELD(MAX77705_CHG_REG_CNFG_11, 0, 6),
+ [MAX77705_REG_DISKIP] = REG_FIELD(MAX77705_CHG_REG_CNFG_12, 0, 0),
+ [MAX77705_WCIN] = REG_FIELD(MAX77705_CHG_REG_CNFG_12, 1, 2),
+ [MAX77705_VCHGIN] = REG_FIELD(MAX77705_CHG_REG_CNFG_12, 3, 4),
+};
+
+struct max77705_charger_data {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regmap_field *rfield[MAX77705_N_REGMAP_FIELDS];
+ struct power_supply_battery_info *bat_info;
+ struct workqueue_struct *wqueue;
+ struct work_struct chgin_work;
+ struct power_supply *psy_chg;
+};
+
+#endif /* __MAX77705_CHARGER_H */
diff --git a/include/linux/power/power_on_reason.h b/include/linux/power/power_on_reason.h
new file mode 100644
index 000000000000..95a1ec0c403c
--- /dev/null
+++ b/include/linux/power/power_on_reason.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Author: Kamel Bouhra <kamel.bouhara@bootlin.com>
+ */
+
+#ifndef POWER_ON_REASON_H
+#define POWER_ON_REASON_H
+
+#define POWER_ON_REASON_REGULAR "regular power-up"
+#define POWER_ON_REASON_RTC "RTC wakeup"
+#define POWER_ON_REASON_WATCHDOG "watchdog timeout"
+#define POWER_ON_REASON_SOFTWARE "software reset"
+#define POWER_ON_REASON_RST_BTN "reset button action"
+#define POWER_ON_REASON_CPU_CLK_FAIL "CPU clock failure"
+#define POWER_ON_REASON_XTAL_FAIL "crystal oscillator failure"
+#define POWER_ON_REASON_BROWN_OUT "brown-out reset"
+#define POWER_ON_REASON_UNKNOWN "unknown reason"
+
+#endif /* POWER_ON_REASON_H */
diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
index 971c9264179e..3a2c79dfc1ff 100644
--- a/include/linux/power/smartreflex.h
+++ b/include/linux/power/smartreflex.h
@@ -155,6 +155,7 @@ struct omap_sr {
struct voltagedomain *voltdm;
struct dentry *dbg_dir;
unsigned int irq;
+ struct clk *fck;
int srid;
int ip_type;
int nvalue_count;
@@ -169,6 +170,7 @@ struct omap_sr {
u32 senp_mod;
u32 senn_mod;
void __iomem *base;
+ unsigned long enabled:1;
};
/**
@@ -271,8 +273,6 @@ struct omap_sr_nvalue_table {
* @senn_avgweight SENNAVGWEIGHT value of the sr AVGWEIGHT register
* @senp_avgweight SENPAVGWEIGHT value of the sr AVGWEIGHT register
* @nvalue_count: Number of distinct nvalues in the nvalue table
- * @enable_on_init: whether this sr module needs to enabled at
- * boot up or not.
* @nvalue_table: table containing the efuse offsets and nvalues
* corresponding to them.
* @voltdm: Pointer to the voltage domain associated with the SR
@@ -288,7 +288,6 @@ struct omap_sr_data {
u32 senn_avgweight;
u32 senp_avgweight;
int nvalue_count;
- bool enable_on_init;
struct omap_sr_nvalue_table *nvalue_table;
struct voltagedomain *voltdm;
};
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index be203985ecdd..360ffdf272da 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -15,6 +15,8 @@
#include <linux/device.h>
#include <linux/workqueue.h>
#include <linux/leds.h>
+#include <linux/rwsem.h>
+#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/notifier.h>
@@ -40,7 +42,7 @@ enum {
};
/* What algorithm is the charger using? */
-enum {
+enum power_supply_charge_type {
POWER_SUPPLY_CHARGE_TYPE_UNKNOWN = 0,
POWER_SUPPLY_CHARGE_TYPE_NONE,
POWER_SUPPLY_CHARGE_TYPE_TRICKLE, /* slow speed */
@@ -49,6 +51,7 @@ enum {
POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE, /* dynamically adjusted speed */
POWER_SUPPLY_CHARGE_TYPE_CUSTOM, /* use CHARGE_CONTROL_* props */
POWER_SUPPLY_CHARGE_TYPE_LONGLIFE, /* slow speed, longer life */
+ POWER_SUPPLY_CHARGE_TYPE_BYPASS, /* bypassing the charger */
};
enum {
@@ -57,6 +60,7 @@ enum {
POWER_SUPPLY_HEALTH_OVERHEAT,
POWER_SUPPLY_HEALTH_DEAD,
POWER_SUPPLY_HEALTH_OVERVOLTAGE,
+ POWER_SUPPLY_HEALTH_UNDERVOLTAGE,
POWER_SUPPLY_HEALTH_UNSPEC_FAILURE,
POWER_SUPPLY_HEALTH_COLD,
POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE,
@@ -66,6 +70,9 @@ enum {
POWER_SUPPLY_HEALTH_WARM,
POWER_SUPPLY_HEALTH_COOL,
POWER_SUPPLY_HEALTH_HOT,
+ POWER_SUPPLY_HEALTH_NO_BATTERY,
+ POWER_SUPPLY_HEALTH_BLOWN_FUSE,
+ POWER_SUPPLY_HEALTH_CELL_IMBALANCE,
};
enum {
@@ -97,6 +104,7 @@ enum power_supply_property {
/* Properties of type `int' */
POWER_SUPPLY_PROP_STATUS = 0,
POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_CHARGE_TYPES,
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_ONLINE,
@@ -132,6 +140,7 @@ enum power_supply_property {
POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX,
POWER_SUPPLY_PROP_CHARGE_CONTROL_START_THRESHOLD, /* in percents! */
POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD, /* in percents! */
+ POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR,
POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT,
POWER_SUPPLY_PROP_INPUT_POWER_LIMIT,
@@ -167,6 +176,8 @@ enum power_supply_property {
POWER_SUPPLY_PROP_MANUFACTURE_YEAR,
POWER_SUPPLY_PROP_MANUFACTURE_MONTH,
POWER_SUPPLY_PROP_MANUFACTURE_DAY,
+ POWER_SUPPLY_PROP_INTERNAL_RESISTANCE,
+ POWER_SUPPLY_PROP_STATE_OF_HEALTH,
/* Properties of type `const char *' */
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
@@ -202,6 +213,13 @@ enum power_supply_usb_type {
POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID, /* Apple Charging Method */
};
+enum power_supply_charge_behaviour {
+ POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO = 0,
+ POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE,
+ POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE_AWAKE,
+ POWER_SUPPLY_CHARGE_BEHAVIOUR_FORCE_DISCHARGE,
+};
+
enum power_supply_notifier_events {
PSY_EVENT_PROP_CHANGED,
};
@@ -216,7 +234,6 @@ struct power_supply;
/* Run-time specific power supply configuration */
struct power_supply_config {
- struct device_node *of_node;
struct fwnode_handle *fwnode;
/* Driver private data */
@@ -227,14 +244,17 @@ struct power_supply_config {
char **supplied_to;
size_t num_supplicants;
+
+ bool no_wakeup_source;
};
/* Description of power supply */
struct power_supply_desc {
const char *name;
enum power_supply_type type;
- const enum power_supply_usb_type *usb_types;
- size_t num_usb_types;
+ u8 charge_behaviours;
+ u32 charge_types;
+ u32 usb_types;
const enum power_supply_property *properties;
size_t num_properties;
@@ -258,7 +278,6 @@ struct power_supply_desc {
int (*property_is_writeable)(struct power_supply *psy,
enum power_supply_property psp);
void (*external_power_changed)(struct power_supply *psy);
- void (*set_charged)(struct power_supply *psy);
/*
* Set if thermal zone should not be created for this power supply.
@@ -270,6 +289,29 @@ struct power_supply_desc {
int use_for_apm;
};
+struct power_supply_ext {
+ const char *const name;
+ u8 charge_behaviours;
+ u32 charge_types;
+ const enum power_supply_property *properties;
+ size_t num_properties;
+
+ int (*get_property)(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *data,
+ enum power_supply_property psp,
+ union power_supply_propval *val);
+ int (*set_property)(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *data,
+ enum power_supply_property psp,
+ const union power_supply_propval *val);
+ int (*property_is_writeable)(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *data,
+ enum power_supply_property psp);
+};
+
struct power_supply {
const struct power_supply_desc *desc;
@@ -278,7 +320,6 @@ struct power_supply {
char **supplied_from;
size_t num_supplies;
- struct device_node *of_node;
/* Driver private data */
void *drv_data;
@@ -289,28 +330,29 @@ struct power_supply {
struct delayed_work deferred_register_work;
spinlock_t changed_lock;
bool changed;
+ bool update_groups;
bool initialized;
bool removing;
atomic_t use_cnt;
+ struct power_supply_battery_info *battery_info;
+ struct rw_semaphore extensions_sem; /* protects "extensions" */
+ struct list_head extensions;
#ifdef CONFIG_THERMAL
struct thermal_zone_device *tzd;
struct thermal_cooling_device *tcd;
#endif
#ifdef CONFIG_LEDS_TRIGGERS
- struct led_trigger *charging_full_trig;
- char *charging_full_trig_name;
+ struct led_trigger *trig;
struct led_trigger *charging_trig;
- char *charging_trig_name;
struct led_trigger *full_trig;
- char *full_trig_name;
- struct led_trigger *online_trig;
- char *online_trig_name;
struct led_trigger *charging_blink_full_solid_trig;
- char *charging_blink_full_solid_trig_name;
+ struct led_trigger *charging_orange_full_green_trig;
#endif
};
+#define dev_to_psy(__dev) container_of_const(__dev, struct power_supply, dev)
+
/*
* This is recommended structure to specify static power supply parameters.
* Generic one, parametrizable for different power supplies. Power supply
@@ -340,45 +382,423 @@ struct power_supply_resistance_temp_table {
int resistance; /* internal resistance percent */
};
+struct power_supply_vbat_ri_table {
+ int vbat_uv; /* Battery voltage in microvolt */
+ int ri_uohm; /* Internal resistance in microohm */
+};
+
+/**
+ * struct power_supply_maintenance_charge_table - setting for maintenace charging
+ * @charge_current_max_ua: maintenance charging current that is used to keep
+ * the charge of the battery full as current is consumed after full charging.
+ * The corresponding charge_voltage_max_uv is used as a safeguard: when we
+ * reach this voltage the maintenance charging current is turned off. It is
+ * turned back on if we fall below this voltage.
+ * @charge_voltage_max_uv: maintenance charging voltage that is usually a bit
+ * lower than the constant_charge_voltage_max_uv. We can apply this settings
+ * charge_current_max_ua until we get back up to this voltage.
+ * @safety_timer_minutes: maintenance charging safety timer, with an expiry
+ * time in minutes. We will only use maintenance charging in this setting
+ * for a certain amount of time, then we will first move to the next
+ * maintenance charge current and voltage pair in respective array and wait
+ * for the next safety timer timeout, or, if we reached the last maintencance
+ * charging setting, disable charging until we reach
+ * charge_restart_voltage_uv and restart ordinary CC/CV charging from there.
+ * These timers should be chosen to align with the typical discharge curve
+ * for the battery.
+ *
+ * Ordinary CC/CV charging will stop charging when the charge current goes
+ * below charge_term_current_ua, and then restart it (if the device is still
+ * plugged into the charger) at charge_restart_voltage_uv. This happens in most
+ * consumer products because the power usage while connected to a charger is
+ * not zero, and devices are not manufactured to draw power directly from the
+ * charger: instead they will at all times dissipate the battery a little, like
+ * the power used in standby mode. This will over time give a charge graph
+ * such as this:
+ *
+ * Energy
+ * ^ ... ... ... ... ... ... ...
+ * | . . . . . . . . . . . . .
+ * | .. . .. . .. . .. . .. . .. . ..
+ * |. .. .. .. .. .. ..
+ * +-------------------------------------------------------------------> t
+ *
+ * Practically this means that the Li-ions are wandering back and forth in the
+ * battery and this causes degeneration of the battery anode and cathode.
+ * To prolong the life of the battery, maintenance charging is applied after
+ * reaching charge_term_current_ua to hold up the charge in the battery while
+ * consuming power, thus lowering the wear on the battery:
+ *
+ * Energy
+ * ^ .......................................
+ * | . ......................
+ * | ..
+ * |.
+ * +-------------------------------------------------------------------> t
+ *
+ * Maintenance charging uses the voltages from this table: a table of settings
+ * is traversed using a slightly lower current and voltage than what is used for
+ * CC/CV charging. The maintenance charging will for safety reasons not go on
+ * indefinately: we lower the current and voltage with successive maintenance
+ * settings, then disable charging completely after we reach the last one,
+ * and after that we do not restart charging until we reach
+ * charge_restart_voltage_uv (see struct power_supply_battery_info) and restart
+ * ordinary CC/CV charging from there.
+ *
+ * As an example, a Samsung EB425161LA Lithium-Ion battery is CC/CV charged
+ * at 900mA to 4340mV, then maintenance charged at 600mA and 4150mV for up to
+ * 60 hours, then maintenance charged at 600mA and 4100mV for up to 200 hours.
+ * After this the charge cycle is restarted waiting for
+ * charge_restart_voltage_uv.
+ *
+ * For most mobile electronics this type of maintenance charging is enough for
+ * the user to disconnect the device and make use of it before both maintenance
+ * charging cycles are complete, if the current and voltage has been chosen
+ * appropriately. These need to be determined from battery discharge curves
+ * and expected standby current.
+ *
+ * If the voltage anyway drops to charge_restart_voltage_uv during maintenance
+ * charging, ordinary CC/CV charging is restarted. This can happen if the
+ * device is e.g. actively used during charging, so more current is drawn than
+ * the expected stand-by current. Also overvoltage protection will be applied
+ * as usual.
+ */
+struct power_supply_maintenance_charge_table {
+ int charge_current_max_ua;
+ int charge_voltage_max_uv;
+ int charge_safety_timer_minutes;
+};
+
#define POWER_SUPPLY_OCV_TEMP_MAX 20
-/*
+/**
+ * struct power_supply_battery_info - information about batteries
+ * @technology: from the POWER_SUPPLY_TECHNOLOGY_* enum
+ * @energy_full_design_uwh: energy content when fully charged in microwatt
+ * hours
+ * @charge_full_design_uah: charge content when fully charged in microampere
+ * hours
+ * @voltage_min_design_uv: minimum voltage across the poles when the battery
+ * is at minimum voltage level in microvolts. If the voltage drops below this
+ * level the battery will need precharging when using CC/CV charging.
+ * @voltage_max_design_uv: voltage across the poles when the battery is fully
+ * charged in microvolts. This is the "nominal voltage" i.e. the voltage
+ * printed on the label of the battery.
+ * @tricklecharge_current_ua: the tricklecharge current used when trickle
+ * charging the battery in microamperes. This is the charging phase when the
+ * battery is completely empty and we need to carefully trickle in some
+ * charge until we reach the precharging voltage.
+ * @precharge_current_ua: current to use in the precharge phase in microamperes,
+ * the precharge rate is limited by limiting the current to this value.
+ * @precharge_voltage_max_uv: the maximum voltage allowed when precharging in
+ * microvolts. When we pass this voltage we will nominally switch over to the
+ * CC (constant current) charging phase defined by constant_charge_current_ua
+ * and constant_charge_voltage_max_uv.
+ * @charge_term_current_ua: when the current in the CV (constant voltage)
+ * charging phase drops below this value in microamperes the charging will
+ * terminate completely and not restart until the voltage over the battery
+ * poles reach charge_restart_voltage_uv unless we use maintenance charging.
+ * @charge_restart_voltage_uv: when the battery has been fully charged by
+ * CC/CV charging and charging has been disabled, and the voltage subsequently
+ * drops below this value in microvolts, the charging will be restarted
+ * (typically using CV charging).
+ * @overvoltage_limit_uv: If the voltage exceeds the nominal voltage
+ * voltage_max_design_uv and we reach this voltage level, all charging must
+ * stop and emergency procedures take place, such as shutting down the system
+ * in some cases.
+ * @constant_charge_current_max_ua: current in microamperes to use in the CC
+ * (constant current) charging phase. The charging rate is limited
+ * by this current. This is the main charging phase and as the current is
+ * constant into the battery the voltage slowly ascends to
+ * constant_charge_voltage_max_uv.
+ * @constant_charge_voltage_max_uv: voltage in microvolts signifying the end of
+ * the CC (constant current) charging phase and the beginning of the CV
+ * (constant voltage) charging phase.
+ * @maintenance_charge: an array of maintenance charging settings to be used
+ * after the main CC/CV charging phase is complete.
+ * @maintenance_charge_size: the number of maintenance charging settings in
+ * maintenance_charge.
+ * @alert_low_temp_charge_current_ua: The charging current to use if the battery
+ * enters low alert temperature, i.e. if the internal temperature is between
+ * temp_alert_min and temp_min. No matter the charging phase, this
+ * and alert_high_temp_charge_voltage_uv will be applied.
+ * @alert_low_temp_charge_voltage_uv: Same as alert_low_temp_charge_current_ua,
+ * but for the charging voltage.
+ * @alert_high_temp_charge_current_ua: The charging current to use if the
+ * battery enters high alert temperature, i.e. if the internal temperature is
+ * between temp_alert_max and temp_max. No matter the charging phase, this
+ * and alert_high_temp_charge_voltage_uv will be applied, usually lowering
+ * the charging current as an evasive manouver.
+ * @alert_high_temp_charge_voltage_uv: Same as
+ * alert_high_temp_charge_current_ua, but for the charging voltage.
+ * @factory_internal_resistance_uohm: the internal resistance of the battery
+ * at fabrication time, expressed in microohms. This resistance will vary
+ * depending on the lifetime and charge of the battery, so this is just a
+ * nominal ballpark figure. This internal resistance is given for the state
+ * when the battery is discharging.
+ * @factory_internal_resistance_charging_uohm: the internal resistance of the
+ * battery at fabrication time while charging, expressed in microohms.
+ * The charging process will affect the internal resistance of the battery
+ * so this value provides a better resistance under these circumstances.
+ * This resistance will vary depending on the lifetime and charge of the
+ * battery, so this is just a nominal ballpark figure.
+ * @ocv_temp: array indicating the open circuit voltage (OCV) capacity
+ * temperature indices. This is an array of temperatures in degrees Celsius
+ * indicating which capacity table to use for a certain temperature, since
+ * the capacity for reasons of chemistry will be different at different
+ * temperatures. Determining capacity is a multivariate problem and the
+ * temperature is the first variable we determine.
+ * @temp_ambient_alert_min: the battery will go outside of operating conditions
+ * when the ambient temperature goes below this temperature in degrees
+ * Celsius.
+ * @temp_ambient_alert_max: the battery will go outside of operating conditions
+ * when the ambient temperature goes above this temperature in degrees
+ * Celsius.
+ * @temp_alert_min: the battery should issue an alert if the internal
+ * temperature goes below this temperature in degrees Celsius.
+ * @temp_alert_max: the battery should issue an alert if the internal
+ * temperature goes above this temperature in degrees Celsius.
+ * @temp_min: the battery will go outside of operating conditions when
+ * the internal temperature goes below this temperature in degrees Celsius.
+ * Normally this means the system should shut down.
+ * @temp_max: the battery will go outside of operating conditions when
+ * the internal temperature goes above this temperature in degrees Celsius.
+ * Normally this means the system should shut down.
+ * @ocv_table: for each entry in ocv_temp there is a corresponding entry in
+ * ocv_table and a size for each entry in ocv_table_size. These arrays
+ * determine the capacity in percent in relation to the voltage in microvolts
+ * at the indexed temperature.
+ * @ocv_table_size: for each entry in ocv_temp this array is giving the size of
+ * each entry in the array of capacity arrays in ocv_table.
+ * @resist_table: this is a table that correlates a battery temperature to the
+ * expected internal resistance at this temperature. The resistance is given
+ * as a percentage of factory_internal_resistance_uohm. Knowing the
+ * resistance of the battery is usually necessary for calculating the open
+ * circuit voltage (OCV) that is then used with the ocv_table to calculate
+ * the capacity of the battery. The resist_table must be ordered descending
+ * by temperature: highest temperature with lowest resistance first, lowest
+ * temperature with highest resistance last.
+ * @resist_table_size: the number of items in the resist_table.
+ * @vbat2ri_discharging: this is a table that correlates Battery voltage (VBAT)
+ * to internal resistance (Ri). The resistance is given in microohm for the
+ * corresponding voltage in microvolts. The internal resistance is used to
+ * determine the open circuit voltage so that we can determine the capacity
+ * of the battery. These voltages to resistance tables apply when the battery
+ * is discharging. The table must be ordered descending by voltage: highest
+ * voltage first.
+ * @vbat2ri_discharging_size: the number of items in the vbat2ri_discharging
+ * table.
+ * @vbat2ri_charging: same function as vbat2ri_discharging but for the state
+ * when the battery is charging. Being under charge changes the battery's
+ * internal resistance characteristics so a separate table is needed.*
+ * The table must be ordered descending by voltage: highest voltage first.
+ * @vbat2ri_charging_size: the number of items in the vbat2ri_charging
+ * table.
+ * @bti_resistance_ohm: The Battery Type Indicator (BIT) nominal resistance
+ * in ohms for this battery, if an identification resistor is mounted
+ * between a third battery terminal and ground. This scheme is used by a lot
+ * of mobile device batteries.
+ * @bti_resistance_tolerance: The tolerance in percent of the BTI resistance,
+ * for example 10 for +/- 10%, if the bti_resistance is set to 7000 and the
+ * tolerance is 10% we will detect a proper battery if the BTI resistance
+ * is between 6300 and 7700 Ohm.
+ *
* This is the recommended struct to manage static battery parameters,
* populated by power_supply_get_battery_info(). Most platform drivers should
* use these for consistency.
+ *
* Its field names must correspond to elements in enum power_supply_property.
- * The default field value is -EINVAL.
- * Power supply class itself doesn't use this.
+ * The default field value is -EINVAL or NULL for pointers.
+ *
+ * CC/CV CHARGING:
+ *
+ * The charging parameters here assume a CC/CV charging scheme. This method
+ * is most common with Lithium Ion batteries (other methods are possible) and
+ * looks as follows:
+ *
+ * ^ Battery voltage
+ * | --- overvoltage_limit_uv
+ * |
+ * | ...................................................
+ * | .. constant_charge_voltage_max_uv
+ * | ..
+ * | .
+ * | .
+ * | .
+ * | .
+ * | .
+ * | .. precharge_voltage_max_uv
+ * | ..
+ * |. (trickle charging)
+ * +------------------------------------------------------------------> time
+ *
+ * ^ Current into the battery
+ * |
+ * | ............. constant_charge_current_max_ua
+ * | . .
+ * | . .
+ * | . .
+ * | . .
+ * | . ..
+ * | . ....
+ * | . .....
+ * | ... precharge_current_ua ....... charge_term_current_ua
+ * | . .
+ * | . .
+ * |.... tricklecharge_current_ua .
+ * | .
+ * +-----------------------------------------------------------------> time
+ *
+ * These diagrams are synchronized on time and the voltage and current
+ * follow each other.
+ *
+ * With CC/CV charging commence over time like this for an empty battery:
+ *
+ * 1. When the battery is completely empty it may need to be charged with
+ * an especially small current so that electrons just "trickle in",
+ * this is the tricklecharge_current_ua.
+ *
+ * 2. Next a small initial pre-charge current (precharge_current_ua)
+ * is applied if the voltage is below precharge_voltage_max_uv until we
+ * reach precharge_voltage_max_uv. CAUTION: in some texts this is referred
+ * to as "trickle charging" but the use in the Linux kernel is different
+ * see below!
+ *
+ * 3. Then the main charging current is applied, which is called the constant
+ * current (CC) phase. A current regulator is set up to allow
+ * constant_charge_current_max_ua of current to flow into the battery.
+ * The chemical reaction in the battery will make the voltage go up as
+ * charge goes into the battery. This current is applied until we reach
+ * the constant_charge_voltage_max_uv voltage.
+ *
+ * 4. At this voltage we switch over to the constant voltage (CV) phase. This
+ * means we allow current to go into the battery, but we keep the voltage
+ * fixed. This current will continue to charge the battery while keeping
+ * the voltage the same. A chemical reaction in the battery goes on
+ * storing energy without affecting the voltage. Over time the current
+ * will slowly drop and when we reach charge_term_current_ua we will
+ * end the constant voltage phase.
+ *
+ * After this the battery is fully charged, and if we do not support maintenance
+ * charging, the charging will not restart until power dissipation makes the
+ * voltage fall so that we reach charge_restart_voltage_uv and at this point
+ * we restart charging at the appropriate phase, usually this will be inside
+ * the CV phase.
+ *
+ * If we support maintenance charging the voltage is however kept high after
+ * the CV phase with a very low current. This is meant to let the same charge
+ * go in for usage while the charger is still connected, mainly for
+ * dissipation for the power consuming entity while connected to the
+ * charger.
+ *
+ * All charging MUST terminate if the overvoltage_limit_uv is ever reached.
+ * Overcharging Lithium Ion cells can be DANGEROUS and lead to fire or
+ * explosions.
+ *
+ * DETERMINING BATTERY CAPACITY:
+ *
+ * Several members of the struct deal with trying to determine the remaining
+ * capacity in the battery, usually as a percentage of charge. In practice
+ * many chargers uses a so-called fuel gauge or coloumb counter that measure
+ * how much charge goes into the battery and how much goes out (+/- leak
+ * consumption). This does not help if we do not know how much capacity the
+ * battery has to begin with, such as when it is first used or was taken out
+ * and charged in a separate charger. Therefore many capacity algorithms use
+ * the open circuit voltage with a look-up table to determine the rough
+ * capacity of the battery. The open circuit voltage can be conceptualized
+ * with an ideal voltage source (V) in series with an internal resistance (Ri)
+ * like this:
+ *
+ * +-------> IBAT >----------------+
+ * | ^ |
+ * [ ] Ri | |
+ * | | VBAT |
+ * o <---------- | |
+ * +| ^ | [ ] Rload
+ * .---. | | |
+ * | V | | OCV | |
+ * '---' | | |
+ * | | | |
+ * GND +-------------------------------+
+ *
+ * If we disconnect the load (here simplified as a fixed resistance Rload)
+ * and measure VBAT with a infinite impedance voltage meter we will get
+ * VBAT = OCV and this assumption is sometimes made even under load, assuming
+ * Rload is insignificant. However this will be of dubious quality because the
+ * load is rarely that small and Ri is strongly nonlinear depending on
+ * temperature and how much capacity is left in the battery due to the
+ * chemistry involved.
+ *
+ * In many practical applications we cannot just disconnect the battery from
+ * the load, so instead we often try to measure the instantaneous IBAT (the
+ * current out from the battery), estimate the Ri and thus calculate the
+ * voltage drop over Ri and compensate like this:
+ *
+ * OCV = VBAT - (IBAT * Ri)
+ *
+ * The tables vbat2ri_discharging and vbat2ri_charging are used to determine
+ * (by interpolation) the Ri from the VBAT under load. These curves are highly
+ * nonlinear and may need many datapoints but can be found in datasheets for
+ * some batteries. This gives the compensated open circuit voltage (OCV) for
+ * the battery even under load. Using this method will also compensate for
+ * temperature changes in the environment: this will also make the internal
+ * resistance change, and it will affect the VBAT under load, so correlating
+ * VBAT to Ri takes both remaining capacity and temperature into consideration.
+ *
+ * Alternatively a manufacturer can specify how the capacity of the battery
+ * is dependent on the battery temperature which is the main factor affecting
+ * Ri. As we know all checmical reactions are faster when it is warm and slower
+ * when it is cold. You can put in 1500mAh and only get 800mAh out before the
+ * voltage drops too low for example. This effect is also highly nonlinear and
+ * the purpose of the table resist_table: this will take a temperature and
+ * tell us how big percentage of Ri the specified temperature correlates to.
+ * Usually we have 100% of the factory_internal_resistance_uohm at 25 degrees
+ * Celsius.
+ *
+ * The power supply class itself doesn't use this struct as of now.
*/
struct power_supply_battery_info {
- int energy_full_design_uwh; /* microWatt-hours */
- int charge_full_design_uah; /* microAmp-hours */
- int voltage_min_design_uv; /* microVolts */
- int voltage_max_design_uv; /* microVolts */
- int tricklecharge_current_ua; /* microAmps */
- int precharge_current_ua; /* microAmps */
- int precharge_voltage_max_uv; /* microVolts */
- int charge_term_current_ua; /* microAmps */
- int charge_restart_voltage_uv; /* microVolts */
- int overvoltage_limit_uv; /* microVolts */
- int constant_charge_current_max_ua; /* microAmps */
- int constant_charge_voltage_max_uv; /* microVolts */
- int factory_internal_resistance_uohm; /* microOhms */
- int ocv_temp[POWER_SUPPLY_OCV_TEMP_MAX];/* celsius */
- int temp_ambient_alert_min; /* celsius */
- int temp_ambient_alert_max; /* celsius */
- int temp_alert_min; /* celsius */
- int temp_alert_max; /* celsius */
- int temp_min; /* celsius */
- int temp_max; /* celsius */
- struct power_supply_battery_ocv_table *ocv_table[POWER_SUPPLY_OCV_TEMP_MAX];
+ unsigned int technology;
+ int energy_full_design_uwh;
+ int charge_full_design_uah;
+ int voltage_min_design_uv;
+ int voltage_max_design_uv;
+ int tricklecharge_current_ua;
+ int precharge_current_ua;
+ int precharge_voltage_max_uv;
+ int charge_term_current_ua;
+ int charge_restart_voltage_uv;
+ int overvoltage_limit_uv;
+ int constant_charge_current_max_ua;
+ int constant_charge_voltage_max_uv;
+ const struct power_supply_maintenance_charge_table *maintenance_charge;
+ int maintenance_charge_size;
+ int alert_low_temp_charge_current_ua;
+ int alert_low_temp_charge_voltage_uv;
+ int alert_high_temp_charge_current_ua;
+ int alert_high_temp_charge_voltage_uv;
+ int factory_internal_resistance_uohm;
+ int factory_internal_resistance_charging_uohm;
+ int ocv_temp[POWER_SUPPLY_OCV_TEMP_MAX];
+ int temp_ambient_alert_min;
+ int temp_ambient_alert_max;
+ int temp_alert_min;
+ int temp_alert_max;
+ int temp_min;
+ int temp_max;
+ const struct power_supply_battery_ocv_table *ocv_table[POWER_SUPPLY_OCV_TEMP_MAX];
int ocv_table_size[POWER_SUPPLY_OCV_TEMP_MAX];
- struct power_supply_resistance_temp_table *resist_table;
+ const struct power_supply_resistance_temp_table *resist_table;
int resist_table_size;
+ const struct power_supply_vbat_ri_table *vbat2ri_discharging;
+ int vbat2ri_discharging_size;
+ const struct power_supply_vbat_ri_table *vbat2ri_charging;
+ int vbat2ri_charging_size;
+ int bti_resistance_ohm;
+ int bti_resistance_tolerance;
};
-extern struct atomic_notifier_head power_supply_notifier;
extern int power_supply_reg_notifier(struct notifier_block *nb);
extern void power_supply_unreg_notifier(struct notifier_block *nb);
#if IS_ENABLED(CONFIG_POWER_SUPPLY)
@@ -389,39 +809,67 @@ static inline void power_supply_put(struct power_supply *psy) {}
static inline struct power_supply *power_supply_get_by_name(const char *name)
{ return NULL; }
#endif
-#ifdef CONFIG_OF
-extern struct power_supply *power_supply_get_by_phandle(struct device_node *np,
- const char *property);
-extern struct power_supply *devm_power_supply_get_by_phandle(
+extern struct power_supply *power_supply_get_by_reference(struct fwnode_handle *fwnode,
+ const char *property);
+extern struct power_supply *devm_power_supply_get_by_reference(
struct device *dev, const char *property);
-#else /* !CONFIG_OF */
-static inline struct power_supply *
-power_supply_get_by_phandle(struct device_node *np, const char *property)
-{ return NULL; }
-static inline struct power_supply *
-devm_power_supply_get_by_phandle(struct device *dev, const char *property)
-{ return NULL; }
-#endif /* CONFIG_OF */
+extern const enum power_supply_property power_supply_battery_info_properties[];
+extern const size_t power_supply_battery_info_properties_size;
extern int power_supply_get_battery_info(struct power_supply *psy,
- struct power_supply_battery_info *info);
+ struct power_supply_battery_info **info_out);
extern void power_supply_put_battery_info(struct power_supply *psy,
struct power_supply_battery_info *info);
-extern int power_supply_ocv2cap_simple(struct power_supply_battery_ocv_table *table,
+extern bool power_supply_battery_info_has_prop(struct power_supply_battery_info *info,
+ enum power_supply_property psp);
+extern int power_supply_battery_info_get_prop(struct power_supply_battery_info *info,
+ enum power_supply_property psp,
+ union power_supply_propval *val);
+extern int power_supply_ocv2cap_simple(const struct power_supply_battery_ocv_table *table,
int table_len, int ocv);
-extern struct power_supply_battery_ocv_table *
+extern const struct power_supply_battery_ocv_table *
power_supply_find_ocv2cap_table(struct power_supply_battery_info *info,
int temp, int *table_len);
extern int power_supply_batinfo_ocv2cap(struct power_supply_battery_info *info,
int ocv, int temp);
extern int
-power_supply_temp2resist_simple(struct power_supply_resistance_temp_table *table,
+power_supply_temp2resist_simple(const struct power_supply_resistance_temp_table *table,
int table_len, int temp);
+extern int power_supply_vbat2ri(struct power_supply_battery_info *info,
+ int vbat_uv, bool charging);
+extern const struct power_supply_maintenance_charge_table *
+power_supply_get_maintenance_charging_setting(struct power_supply_battery_info *info, int index);
+extern bool power_supply_battery_bti_in_range(struct power_supply_battery_info *info,
+ int resistance);
extern void power_supply_changed(struct power_supply *psy);
extern int power_supply_am_i_supplied(struct power_supply *psy);
-extern int power_supply_set_input_current_limit_from_supplier(
- struct power_supply *psy);
-extern int power_supply_set_battery_charged(struct power_supply *psy);
+int power_supply_get_property_from_supplier(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val);
+
+static inline bool
+power_supply_supports_maintenance_charging(struct power_supply_battery_info *info)
+{
+ const struct power_supply_maintenance_charge_table *mt;
+
+ mt = power_supply_get_maintenance_charging_setting(info, 0);
+
+ return (mt != NULL);
+}
+
+static inline bool
+power_supply_supports_vbat2ri(struct power_supply_battery_info *info)
+{
+ return ((info->vbat2ri_discharging != NULL) &&
+ info->vbat2ri_discharging_size > 0);
+}
+
+static inline bool
+power_supply_supports_temp2ri(struct power_supply_battery_info *info)
+{
+ return ((info->resist_table != NULL) &&
+ info->resist_table_size > 0);
+}
#ifdef CONFIG_POWER_SUPPLY
extern int power_supply_is_system_supplied(void);
@@ -432,18 +880,24 @@ static inline int power_supply_is_system_supplied(void) { return -ENOSYS; }
extern int power_supply_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val);
+int power_supply_get_property_direct(struct power_supply *psy, enum power_supply_property psp,
+ union power_supply_propval *val);
#if IS_ENABLED(CONFIG_POWER_SUPPLY)
extern int power_supply_set_property(struct power_supply *psy,
enum power_supply_property psp,
const union power_supply_propval *val);
+int power_supply_set_property_direct(struct power_supply *psy, enum power_supply_property psp,
+ const union power_supply_propval *val);
#else
static inline int power_supply_set_property(struct power_supply *psy,
enum power_supply_property psp,
const union power_supply_propval *val)
{ return 0; }
+static inline int power_supply_set_property_direct(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{ return 0; }
#endif
-extern int power_supply_property_is_writeable(struct power_supply *psy,
- enum power_supply_property psp);
extern void power_supply_external_power_changed(struct power_supply *psy);
extern struct power_supply *__must_check
@@ -451,25 +905,24 @@ power_supply_register(struct device *parent,
const struct power_supply_desc *desc,
const struct power_supply_config *cfg);
extern struct power_supply *__must_check
-power_supply_register_no_ws(struct device *parent,
- const struct power_supply_desc *desc,
- const struct power_supply_config *cfg);
-extern struct power_supply *__must_check
devm_power_supply_register(struct device *parent,
const struct power_supply_desc *desc,
const struct power_supply_config *cfg);
-extern struct power_supply *__must_check
-devm_power_supply_register_no_ws(struct device *parent,
- const struct power_supply_desc *desc,
- const struct power_supply_config *cfg);
extern void power_supply_unregister(struct power_supply *psy);
extern int power_supply_powers(struct power_supply *psy, struct device *dev);
+extern int __must_check
+power_supply_register_extension(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ struct device *dev,
+ void *data);
+extern void power_supply_unregister_extension(struct power_supply *psy,
+ const struct power_supply_ext *ext);
+
#define to_power_supply(device) container_of(device, struct power_supply, dev)
extern void *power_supply_get_drvdata(struct power_supply *psy);
-/* For APM emulation, think legacy userspace. */
-extern struct class *power_supply_class;
+extern int power_supply_for_each_psy(void *data, int (*fn)(struct power_supply *psy, void *data));
static inline bool power_supply_is_amp_property(enum power_supply_property psp)
{
@@ -525,17 +978,47 @@ static inline bool power_supply_is_watt_property(enum power_supply_property psp)
return false;
}
-#ifdef CONFIG_POWER_SUPPLY_HWMON
-int power_supply_add_hwmon_sysfs(struct power_supply *psy);
-void power_supply_remove_hwmon_sysfs(struct power_supply *psy);
+#ifdef CONFIG_SYSFS
+ssize_t power_supply_charge_behaviour_show(struct device *dev,
+ unsigned int available_behaviours,
+ enum power_supply_charge_behaviour behaviour,
+ char *buf);
+
+int power_supply_charge_behaviour_parse(unsigned int available_behaviours, const char *buf);
+ssize_t power_supply_charge_types_show(struct device *dev,
+ unsigned int available_types,
+ enum power_supply_charge_type current_type,
+ char *buf);
+int power_supply_charge_types_parse(unsigned int available_types, const char *buf);
#else
-static inline int power_supply_add_hwmon_sysfs(struct power_supply *psy)
+static inline
+ssize_t power_supply_charge_behaviour_show(struct device *dev,
+ unsigned int available_behaviours,
+ enum power_supply_charge_behaviour behaviour,
+ char *buf)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int power_supply_charge_behaviour_parse(unsigned int available_behaviours,
+ const char *buf)
{
- return 0;
+ return -EOPNOTSUPP;
}
static inline
-void power_supply_remove_hwmon_sysfs(struct power_supply *psy) {}
+ssize_t power_supply_charge_types_show(struct device *dev,
+ unsigned int available_types,
+ enum power_supply_charge_type current_type,
+ char *buf)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int power_supply_charge_types_parse(unsigned int available_types, const char *buf)
+{
+ return -EOPNOTSUPP;
+}
#endif
#endif /* __LINUX_POWER_SUPPLY_H__ */
diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
index 9d3ffc8f5ea6..fb847e47f148 100644
--- a/include/linux/ppp-comp.h
+++ b/include/linux/ppp-comp.h
@@ -9,7 +9,7 @@
#include <uapi/linux/ppp-comp.h>
-
+struct compstat;
struct module;
/*
diff --git a/include/linux/ppp_channel.h b/include/linux/ppp_channel.h
index 91f9a928344e..f73fbea0dbc2 100644
--- a/include/linux/ppp_channel.h
+++ b/include/linux/ppp_channel.h
@@ -20,6 +20,8 @@
#include <linux/poll.h>
#include <net/net_namespace.h>
+struct net_device_path;
+struct net_device_path_ctx;
struct ppp_channel;
struct ppp_channel_ops {
@@ -40,8 +42,7 @@ struct ppp_channel {
int hdrlen; /* amount of headroom channel needs */
void *ppp; /* opaque to channel */
int speed; /* transfer rate (bytes/second) */
- /* the following is not used at present */
- int latency; /* overhead time in milliseconds */
+ bool direct_xmit; /* no qdisc, xmit directly */
};
#ifdef __KERNEL__
diff --git a/include/linux/ppp_defs.h b/include/linux/ppp_defs.h
index 9d2b388fae1a..b7e57fdbd413 100644
--- a/include/linux/ppp_defs.h
+++ b/include/linux/ppp_defs.h
@@ -11,4 +11,18 @@
#include <uapi/linux/ppp_defs.h>
#define PPP_FCS(fcs, c) crc_ccitt_byte(fcs, c)
+
+/**
+ * ppp_proto_is_valid - checks if PPP protocol is valid
+ * @proto: PPP protocol
+ *
+ * Assumes proto is not compressed.
+ * Protocol is valid if the value is odd and the least significant bit of the
+ * most significant octet is 0 (see RFC 1661, section 2).
+ */
+static inline bool ppp_proto_is_valid(u16 proto)
+{
+ return !!((proto & 0x0101) == 0x0001);
+}
+
#endif /* _PPP_DEFS_H_ */
diff --git a/include/linux/pps_gen_kernel.h b/include/linux/pps_gen_kernel.h
new file mode 100644
index 000000000000..6214c8aa2e02
--- /dev/null
+++ b/include/linux/pps_gen_kernel.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * PPS generator API kernel header
+ *
+ * Copyright (C) 2024 Rodolfo Giometti <giometti@enneenne.com>
+ */
+
+#ifndef LINUX_PPS_GEN_KERNEL_H
+#define LINUX_PPS_GEN_KERNEL_H
+
+#include <linux/pps_gen.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+
+/*
+ * Global defines
+ */
+
+#define PPS_GEN_MAX_SOURCES 16 /* should be enough... */
+
+struct pps_gen_device;
+
+/**
+ * struct pps_gen_source_info - the specific PPS generator info
+ * @use_system_clock: true, if the system clock is used to generate pulses
+ * @get_time: query the time stored into the generator clock
+ * @enable: enable/disable the PPS pulses generation
+ *
+ * This is the main generator struct where all needed information must be
+ * placed before calling the pps_gen_register_source().
+ */
+struct pps_gen_source_info {
+ bool use_system_clock;
+
+ int (*get_time)(struct pps_gen_device *pps_gen,
+ struct timespec64 *time);
+ int (*enable)(struct pps_gen_device *pps_gen, bool enable);
+
+/* private: internal use only */
+ struct module *owner;
+ struct device *parent; /* for device_create */
+};
+
+/* The main struct */
+struct pps_gen_device {
+ const struct pps_gen_source_info *info; /* PSS generator info */
+ bool enabled; /* PSS generator status */
+
+ unsigned int event;
+ unsigned int sequence;
+
+ unsigned int last_ev; /* last PPS event id */
+ wait_queue_head_t queue; /* PPS event queue */
+
+ unsigned int id; /* PPS generator unique ID */
+ struct cdev cdev;
+ struct device *dev;
+ struct fasync_struct *async_queue; /* fasync method */
+ spinlock_t lock;
+};
+
+/*
+ * Global variables
+ */
+
+extern const struct attribute_group *pps_gen_groups[];
+
+/*
+ * Exported functions
+ */
+
+extern struct pps_gen_device *pps_gen_register_source(
+ const struct pps_gen_source_info *info);
+extern void pps_gen_unregister_source(struct pps_gen_device *pps_gen);
+extern void pps_gen_event(struct pps_gen_device *pps_gen,
+ unsigned int event, void *data);
+
+#endif /* LINUX_PPS_GEN_KERNEL_H */
diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h
index 78c8ac4951b5..aab0aebb529e 100644
--- a/include/linux/pps_kernel.h
+++ b/include/linux/pps_kernel.h
@@ -52,12 +52,12 @@ struct pps_device {
int current_mode; /* PPS mode at event time */
unsigned int last_ev; /* last PPS event id */
+ unsigned int last_fetched_ev; /* last fetched PPS event id */
wait_queue_head_t queue; /* PPS event queue */
unsigned int id; /* PPS source unique ID */
void const *lookup_cookie; /* For pps_lookup_dev() only */
- struct cdev cdev;
- struct device *dev;
+ struct device dev;
struct fasync_struct *async_queue; /* fasync method */
spinlock_t lock;
};
diff --git a/include/linux/pr.h b/include/linux/pr.h
index 94ceec713afe..3003daec28a5 100644
--- a/include/linux/pr.h
+++ b/include/linux/pr.h
@@ -4,6 +4,18 @@
#include <uapi/linux/pr.h>
+struct pr_keys {
+ u32 generation;
+ u32 num_keys;
+ u64 keys[];
+};
+
+struct pr_held_reservation {
+ u64 key;
+ u32 generation;
+ enum pr_type type;
+};
+
struct pr_ops {
int (*pr_register)(struct block_device *bdev, u64 old_key, u64 new_key,
u32 flags);
@@ -14,6 +26,19 @@ struct pr_ops {
int (*pr_preempt)(struct block_device *bdev, u64 old_key, u64 new_key,
enum pr_type type, bool abort);
int (*pr_clear)(struct block_device *bdev, u64 key);
+ /*
+ * pr_read_keys - Read the registered keys and return them in the
+ * pr_keys->keys array. The keys array will have been allocated at the
+ * end of the pr_keys struct, and pr_keys->num_keys must be set to the
+ * number of keys the array can hold. If there are more than can fit
+ * in the array, success will still be returned and pr_keys->num_keys
+ * will reflect the total number of keys the device contains, so the
+ * caller can retry with a larger array.
+ */
+ int (*pr_read_keys)(struct block_device *bdev,
+ struct pr_keys *keys_info);
+ int (*pr_read_reservation)(struct block_device *bdev,
+ struct pr_held_reservation *rsv);
};
#endif /* LINUX_PR_H */
diff --git a/include/linux/prandom.h b/include/linux/prandom.h
index bbf4b4ad61df..ff7dcc3fa105 100644
--- a/include/linux/prandom.h
+++ b/include/linux/prandom.h
@@ -9,64 +9,9 @@
#define _LINUX_PRANDOM_H
#include <linux/types.h>
+#include <linux/once.h>
#include <linux/percpu.h>
-
-u32 prandom_u32(void);
-void prandom_bytes(void *buf, size_t nbytes);
-void prandom_seed(u32 seed);
-void prandom_reseed_late(void);
-
-DECLARE_PER_CPU(unsigned long, net_rand_noise);
-
-#define PRANDOM_ADD_NOISE(a, b, c, d) \
- prandom_u32_add_noise((unsigned long)(a), (unsigned long)(b), \
- (unsigned long)(c), (unsigned long)(d))
-
-#if BITS_PER_LONG == 64
-/*
- * The core SipHash round function. Each line can be executed in
- * parallel given enough CPU resources.
- */
-#define PRND_SIPROUND(v0, v1, v2, v3) ( \
- v0 += v1, v1 = rol64(v1, 13), v2 += v3, v3 = rol64(v3, 16), \
- v1 ^= v0, v0 = rol64(v0, 32), v3 ^= v2, \
- v0 += v3, v3 = rol64(v3, 21), v2 += v1, v1 = rol64(v1, 17), \
- v3 ^= v0, v1 ^= v2, v2 = rol64(v2, 32) \
-)
-
-#define PRND_K0 (0x736f6d6570736575 ^ 0x6c7967656e657261)
-#define PRND_K1 (0x646f72616e646f6d ^ 0x7465646279746573)
-
-#elif BITS_PER_LONG == 32
-/*
- * On 32-bit machines, we use HSipHash, a reduced-width version of SipHash.
- * This is weaker, but 32-bit machines are not used for high-traffic
- * applications, so there is less output for an attacker to analyze.
- */
-#define PRND_SIPROUND(v0, v1, v2, v3) ( \
- v0 += v1, v1 = rol32(v1, 5), v2 += v3, v3 = rol32(v3, 8), \
- v1 ^= v0, v0 = rol32(v0, 16), v3 ^= v2, \
- v0 += v3, v3 = rol32(v3, 7), v2 += v1, v1 = rol32(v1, 13), \
- v3 ^= v0, v1 ^= v2, v2 = rol32(v2, 16) \
-)
-#define PRND_K0 0x6c796765
-#define PRND_K1 0x74656462
-
-#else
-#error Unsupported BITS_PER_LONG
-#endif
-
-static inline void prandom_u32_add_noise(unsigned long a, unsigned long b,
- unsigned long c, unsigned long d)
-{
- /*
- * This is not used cryptographically; it's just
- * a convenient 4-word hash function. (3 xor, 2 add, 2 rol)
- */
- a ^= raw_cpu_read(net_rand_noise);
- PRND_SIPROUND(a, b, c, d);
- raw_cpu_write(net_rand_noise, d);
-}
+#include <linux/random.h>
struct rnd_state {
__u32 s1, s2, s3, s4;
@@ -79,23 +24,6 @@ void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
#define prandom_init_once(pcpu_state) \
DO_ONCE(prandom_seed_full_state, (pcpu_state))
-/**
- * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
- * @ep_ro: right open interval endpoint
- *
- * Returns a pseudo-random number that is in interval [0, ep_ro). Note
- * that the result depends on PRNG being well distributed in [0, ~0U]
- * u32 space. Here we use maximally equidistributed combined Tausworthe
- * generator, that is, prandom_u32(). This is useful when requesting a
- * random index of an array containing ep_ro elements, for example.
- *
- * Returns: pseudo-random number in interval [0, ep_ro)
- */
-static inline u32 prandom_u32_max(u32 ep_ro)
-{
- return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
-}
-
/*
* Handle minimum values for seeds
*/
@@ -111,19 +39,12 @@ static inline u32 __seed(u32 x, u32 m)
*/
static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
{
- u32 i = (seed >> 32) ^ (seed << 10) ^ seed;
+ u32 i = ((seed >> 32) ^ (seed << 10) ^ seed) & 0xffffffffUL;
state->s1 = __seed(i, 2U);
state->s2 = __seed(i, 8U);
state->s3 = __seed(i, 16U);
state->s4 = __seed(i, 128U);
- PRANDOM_ADD_NOISE(state, i, 0, 0);
-}
-
-/* Pseudo random number generator from numerical recipes. */
-static inline u32 next_pseudo_random32(u32 seed)
-{
- return seed * 1664525 + 1013904223;
}
#endif
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 9881eac0698f..d964f965c8ff 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -8,7 +8,8 @@
*/
#include <linux/linkage.h>
-#include <linux/list.h>
+#include <linux/cleanup.h>
+#include <linux/types.h>
/*
* We put the hardirq and softirq counter into the preemption
@@ -77,14 +78,42 @@
/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
#include <asm/preempt.h>
+/**
+ * interrupt_context_level - return interrupt context level
+ *
+ * Returns the current interrupt context level.
+ * 0 - normal context
+ * 1 - softirq context
+ * 2 - hardirq context
+ * 3 - NMI context
+ */
+static __always_inline unsigned char interrupt_context_level(void)
+{
+ unsigned long pc = preempt_count();
+ unsigned char level = 0;
+
+ level += !!(pc & (NMI_MASK));
+ level += !!(pc & (NMI_MASK | HARDIRQ_MASK));
+ level += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
+
+ return level;
+}
+
+/*
+ * These macro definitions avoid redundant invocations of preempt_count()
+ * because such invocations would result in redundant loads given that
+ * preempt_count() is commonly implemented with READ_ONCE().
+ */
+
#define nmi_count() (preempt_count() & NMI_MASK)
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#ifdef CONFIG_PREEMPT_RT
# define softirq_count() (current->softirq_disable_cnt & SOFTIRQ_MASK)
+# define irq_count() ((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) | softirq_count())
#else
# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
+# define irq_count() (preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_MASK))
#endif
-#define irq_count() (nmi_count() | hardirq_count() | softirq_count())
/*
* Macros to retrieve the current execution context:
@@ -97,15 +126,17 @@
#define in_nmi() (nmi_count())
#define in_hardirq() (hardirq_count())
#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
-#define in_task() (!(in_nmi() | in_hardirq() | in_serving_softirq()))
+#ifdef CONFIG_PREEMPT_RT
+# define in_task() (!((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) | in_serving_softirq()))
+#else
+# define in_task() (!(preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
+#endif
/*
* The following macros are deprecated and should not be used in new code:
- * in_irq() - Obsolete version of in_hardirq()
* in_softirq() - We have BH disabled, or are processing softirqs
* in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled
*/
-#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
@@ -121,7 +152,12 @@
/*
* The preempt_count offset after spin_lock()
*/
-#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
+#if !defined(CONFIG_PREEMPT_RT)
+#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
+#else
+/* Locks on RT do not disable preemption */
+#define PREEMPT_LOCK_OFFSET 0
+#endif
/*
* The preempt_count offset needed for things like:
@@ -281,6 +317,7 @@ do { \
#ifdef CONFIG_PREEMPT_NOTIFIERS
struct preempt_notifier;
+struct task_struct;
/**
* preempt_ops - notifiers called when a task is preempted and rescheduled
@@ -322,18 +359,18 @@ void preempt_notifier_unregister(struct preempt_notifier *notifier);
static inline void preempt_notifier_init(struct preempt_notifier *notifier,
struct preempt_ops *ops)
{
- INIT_HLIST_NODE(&notifier->link);
+ /* INIT_HLIST_NODE() open coded, to avoid dependency on list.h */
+ notifier->link.next = NULL;
+ notifier->link.pprev = NULL;
notifier->ops = ops;
}
#endif
-#ifdef CONFIG_SMP
-
/*
* Migrate-Disable and why it is undesired.
*
- * When a preempted task becomes elegible to run under the ideal model (IOW it
+ * When a preempted task becomes eligible to run under the ideal model (IOW it
* becomes one of the M highest priority tasks), it might still have to wait
* for the preemptee's migrate_disable() section to complete. Thereby suffering
* a reduction in bandwidth in the exact duration of the migrate_disable()
@@ -348,7 +385,7 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,
* - a lower priority tasks; which under preempt_disable() could've instantly
* migrated away when another CPU becomes available, is now constrained
* by the ability to push the higher priority task away, which might itself be
- * in a migrate_disable() section, reducing it's available bandwidth.
+ * in a migrate_disable() section, reducing its available bandwidth.
*
* IOW it trades latency / moves the interference term, but it stays in the
* system, and as long as it remains unbounded, the system is not fully
@@ -360,7 +397,7 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,
* PREEMPT_RT breaks a number of assumptions traditionally held. By forcing a
* number of primitives into becoming preemptible, they would also allow
* migration. This turns out to break a bunch of per-cpu usage. To this end,
- * all these primitives employ migirate_disable() to restore this implicit
+ * all these primitives employ migrate_disable() to restore this implicit
* assumption.
*
* This is a 'temporary' work-around at best. The correct solution is getting
@@ -368,7 +405,7 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,
* per-cpu locking or short preempt-disable regions.
*
* The end goal must be to get rid of migrate_disable(), alternatively we need
- * a schedulability theory that does not depend on abritrary migration.
+ * a schedulability theory that does not depend on arbitrary migration.
*
*
* Notes on the implementation.
@@ -385,14 +422,99 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,
* work-conserving schedulers.
*
*/
-extern void migrate_disable(void);
-extern void migrate_enable(void);
+
+/**
+ * preempt_disable_nested - Disable preemption inside a normally preempt disabled section
+ *
+ * Use for code which requires preemption protection inside a critical
+ * section which has preemption disabled implicitly on non-PREEMPT_RT
+ * enabled kernels, by e.g.:
+ * - holding a spinlock/rwlock
+ * - soft interrupt context
+ * - regular interrupt handlers
+ *
+ * On PREEMPT_RT enabled kernels spinlock/rwlock held sections, soft
+ * interrupt context and regular interrupt handlers are preemptible and
+ * only prevent migration. preempt_disable_nested() ensures that preemption
+ * is disabled for cases which require CPU local serialization even on
+ * PREEMPT_RT. For non-PREEMPT_RT kernels this is a NOP.
+ *
+ * The use cases are code sequences which are not serialized by a
+ * particular lock instance, e.g.:
+ * - seqcount write side critical sections where the seqcount is not
+ * associated to a particular lock and therefore the automatic
+ * protection mechanism does not work. This prevents a live lock
+ * against a preempting high priority reader.
+ * - RMW per CPU variable updates like vmstat.
+ */
+/* Macro to avoid header recursion hell vs. lockdep */
+#define preempt_disable_nested() \
+do { \
+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) \
+ preempt_disable(); \
+ else \
+ lockdep_assert_preemption_disabled(); \
+} while (0)
+
+/**
+ * preempt_enable_nested - Undo the effect of preempt_disable_nested()
+ */
+static __always_inline void preempt_enable_nested(void)
+{
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable();
+}
+
+DEFINE_LOCK_GUARD_0(preempt, preempt_disable(), preempt_enable())
+DEFINE_LOCK_GUARD_0(preempt_notrace, preempt_disable_notrace(), preempt_enable_notrace())
+
+#ifdef CONFIG_PREEMPT_DYNAMIC
+
+extern bool preempt_model_none(void);
+extern bool preempt_model_voluntary(void);
+extern bool preempt_model_full(void);
+extern bool preempt_model_lazy(void);
#else
-static inline void migrate_disable(void) { }
-static inline void migrate_enable(void) { }
+static inline bool preempt_model_none(void)
+{
+ return IS_ENABLED(CONFIG_PREEMPT_NONE);
+}
+static inline bool preempt_model_voluntary(void)
+{
+ return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY);
+}
+static inline bool preempt_model_full(void)
+{
+ return IS_ENABLED(CONFIG_PREEMPT);
+}
+
+static inline bool preempt_model_lazy(void)
+{
+ return IS_ENABLED(CONFIG_PREEMPT_LAZY);
+}
+
+#endif
+
+static inline bool preempt_model_rt(void)
+{
+ return IS_ENABLED(CONFIG_PREEMPT_RT);
+}
-#endif /* CONFIG_SMP */
+extern const char *preempt_model_str(void);
+
+/*
+ * Does the preemption model allow non-cooperative preemption?
+ *
+ * For !CONFIG_PREEMPT_DYNAMIC kernels this is an exact match with
+ * CONFIG_PREEMPTION; for CONFIG_PREEMPT_DYNAMIC this doesn't work as the
+ * kernel is *built* with CONFIG_PREEMPTION=y but may run with e.g. the
+ * PREEMPT_NONE model.
+ */
+static inline bool preempt_model_preemptible(void)
+{
+ return preempt_model_full() || preempt_model_lazy() || preempt_model_rt();
+}
#endif /* __LINUX_PREEMPT_H */
diff --git a/include/linux/prefetch.h b/include/linux/prefetch.h
index b83a3f944f28..b068e2e60939 100644
--- a/include/linux/prefetch.h
+++ b/include/linux/prefetch.h
@@ -25,11 +25,10 @@ struct page;
prefetch() should be defined by the architecture, if not, the
#define below provides a no-op define.
- There are 3 prefetch() macros:
+ There are 2 prefetch() macros:
prefetch(x) - prefetches the cacheline at "x" for read
prefetchw(x) - prefetches the cacheline at "x" for write
- spin_lock_prefetch(x) - prefetches the spinlock *x for taking
there is also PREFETCH_STRIDE which is the architecure-preferred
"lookahead" size for prefetching streamed operations.
@@ -44,10 +43,6 @@ struct page;
#define prefetchw(x) __builtin_prefetch(x,1)
#endif
-#ifndef ARCH_HAS_SPINLOCK_PREFETCH
-#define spin_lock_prefetch(x) prefetchw(x)
-#endif
-
#ifndef PREFETCH_STRIDE
#define PREFETCH_STRIDE (4*L1_CACHE_BYTES)
#endif
diff --git a/include/linux/printk.h b/include/linux/printk.h
index fe7eb2351610..45c663124c9b 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -2,12 +2,14 @@
#ifndef __KERNEL_PRINTK__
#define __KERNEL_PRINTK__
-#include <stdarg.h>
+#include <linux/stdarg.h>
#include <linux/init.h>
#include <linux/kern_levels.h>
#include <linux/linkage.h>
-#include <linux/cache.h>
#include <linux/ratelimit_types.h>
+#include <linux/once_lite.h>
+
+struct console;
extern const char linux_banner[];
extern const char linux_proc_banner[];
@@ -44,8 +46,6 @@ static inline const char *printk_skip_headers(const char *buffer)
return buffer;
}
-#define CONSOLE_EXT_LOG_MAX 8192
-
/* printk's without a loglevel use this.. */
#define MESSAGE_LOGLEVEL_DEFAULT CONFIG_MESSAGE_LOGLEVEL_DEFAULT
@@ -62,6 +62,10 @@ static inline const char *printk_skip_headers(const char *buffer)
#define CONSOLE_LOGLEVEL_DEFAULT CONFIG_CONSOLE_LOGLEVEL_DEFAULT
#define CONSOLE_LOGLEVEL_QUIET CONFIG_CONSOLE_LOGLEVEL_QUIET
+int match_devname_and_update_preferred_console(const char *match,
+ const char *name,
+ const short idx);
+
extern int console_printk[];
#define console_loglevel (console_printk[0])
@@ -69,20 +73,11 @@ extern int console_printk[];
#define minimum_console_loglevel (console_printk[2])
#define default_console_loglevel (console_printk[3])
-static inline void console_silent(void)
-{
- console_loglevel = CONSOLE_LOGLEVEL_SILENT;
-}
-
-static inline void console_verbose(void)
-{
- if (console_loglevel)
- console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
-}
+extern void console_verbose(void);
/* strlen("ratelimit") + 1 */
#define DEVKMSG_STR_MAX_SIZE 10
-extern char devkmsg_log_str[];
+extern char devkmsg_log_str[DEVKMSG_STR_MAX_SIZE];
struct ctl_table;
extern int suppress_printk;
@@ -137,7 +132,7 @@ struct va_format {
#define no_printk(fmt, ...) \
({ \
if (0) \
- printk(fmt, ##__VA_ARGS__); \
+ _printk(fmt, ##__VA_ARGS__); \
0; \
})
@@ -149,18 +144,6 @@ static inline __printf(1, 2) __cold
void early_printk(const char *s, ...) { }
#endif
-#ifdef CONFIG_PRINTK_NMI
-extern void printk_nmi_enter(void);
-extern void printk_nmi_exit(void);
-extern void printk_nmi_direct_enter(void);
-extern void printk_nmi_direct_exit(void);
-#else
-static inline void printk_nmi_enter(void) { }
-static inline void printk_nmi_exit(void) { }
-static inline void printk_nmi_direct_enter(void) { }
-static inline void printk_nmi_direct_exit(void) { }
-#endif /* PRINTK_NMI */
-
struct dev_printk_info;
#ifdef CONFIG_PRINTK
@@ -171,14 +154,30 @@ int vprintk_emit(int facility, int level,
asmlinkage __printf(1, 0)
int vprintk(const char *fmt, va_list args);
+__printf(1, 0)
+int vprintk_deferred(const char *fmt, va_list args);
asmlinkage __printf(1, 2) __cold
-int printk(const char *fmt, ...);
+int _printk(const char *fmt, ...);
/*
* Special printk facility for scheduler/timekeeping use only, _DO_NOT_USE_ !
*/
-__printf(1, 2) __cold int printk_deferred(const char *fmt, ...);
+__printf(1, 2) __cold int _printk_deferred(const char *fmt, ...);
+
+extern void __printk_deferred_enter(void);
+extern void __printk_deferred_exit(void);
+
+extern void printk_force_console_enter(void);
+extern void printk_force_console_exit(void);
+
+/*
+ * The printk_deferred_enter/exit macros are available only as a hack for
+ * some code paths that need to defer all printk console printing. Interrupts
+ * must be disabled for the deferred duration.
+ */
+#define printk_deferred_enter() __printk_deferred_enter()
+#define printk_deferred_exit() __printk_deferred_exit()
/*
* Please don't use printk_ratelimit(), because it shares ratelimiting state
@@ -193,10 +192,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
extern int printk_delay_msec;
extern int dmesg_restrict;
-extern int
-devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, void *buf,
- size_t *lenp, loff_t *ppos);
-
extern void wake_up_klogd(void);
char *log_buf_addr_get(void);
@@ -206,25 +201,53 @@ void __init setup_log_buf(int early);
__printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...);
void dump_stack_print_info(const char *log_lvl);
void show_regs_print_info(const char *log_lvl);
+extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold;
extern asmlinkage void dump_stack(void) __cold;
-extern void printk_safe_flush(void);
-extern void printk_safe_flush_on_panic(void);
+void printk_trigger_flush(void);
+void console_try_replay_all(void);
+void printk_legacy_allow_panic_sync(void);
+extern bool nbcon_device_try_acquire(struct console *con);
+extern void nbcon_device_release(struct console *con);
+void nbcon_atomic_flush_unsafe(void);
+bool pr_flush(int timeout_ms, bool reset_on_progress);
#else
static inline __printf(1, 0)
int vprintk(const char *s, va_list args)
{
return 0;
}
+static inline __printf(1, 0)
+int vprintk_deferred(const char *fmt, va_list args)
+{
+ return 0;
+}
static inline __printf(1, 2) __cold
-int printk(const char *s, ...)
+int _printk(const char *s, ...)
{
return 0;
}
static inline __printf(1, 2) __cold
-int printk_deferred(const char *s, ...)
+int _printk_deferred(const char *s, ...)
{
return 0;
}
+
+static inline void printk_deferred_enter(void)
+{
+}
+
+static inline void printk_deferred_exit(void)
+{
+}
+
+static inline void printk_force_console_enter(void)
+{
+}
+
+static inline void printk_force_console_exit(void)
+{
+}
+
static inline int printk_ratelimit(void)
{
return 0;
@@ -269,19 +292,97 @@ static inline void show_regs_print_info(const char *log_lvl)
{
}
+static inline void dump_stack_lvl(const char *log_lvl)
+{
+}
+
static inline void dump_stack(void)
{
}
+static inline void printk_trigger_flush(void)
+{
+}
+static inline void console_try_replay_all(void)
+{
+}
-static inline void printk_safe_flush(void)
+static inline void printk_legacy_allow_panic_sync(void)
{
}
-static inline void printk_safe_flush_on_panic(void)
+static inline bool nbcon_device_try_acquire(struct console *con)
{
+ return false;
}
+
+static inline void nbcon_device_release(struct console *con)
+{
+}
+
+static inline void nbcon_atomic_flush_unsafe(void)
+{
+}
+
+static inline bool pr_flush(int timeout_ms, bool reset_on_progress)
+{
+ return true;
+}
+
#endif
+#ifdef CONFIG_SMP
+extern int __printk_cpu_sync_try_get(void);
+extern void __printk_cpu_sync_wait(void);
+extern void __printk_cpu_sync_put(void);
+
+#else
+
+#define __printk_cpu_sync_try_get() true
+#define __printk_cpu_sync_wait()
+#define __printk_cpu_sync_put()
+#endif /* CONFIG_SMP */
+
+/**
+ * printk_cpu_sync_get_irqsave() - Disable interrupts and acquire the printk
+ * cpu-reentrant spinning lock.
+ * @flags: Stack-allocated storage for saving local interrupt state,
+ * to be passed to printk_cpu_sync_put_irqrestore().
+ *
+ * If the lock is owned by another CPU, spin until it becomes available.
+ * Interrupts are restored while spinning.
+ *
+ * CAUTION: This function must be used carefully. It does not behave like a
+ * typical lock. Here are important things to watch out for...
+ *
+ * * This function is reentrant on the same CPU. Therefore the calling
+ * code must not assume exclusive access to data if code accessing the
+ * data can run reentrant or within NMI context on the same CPU.
+ *
+ * * If there exists usage of this function from NMI context, it becomes
+ * unsafe to perform any type of locking or spinning to wait for other
+ * CPUs after calling this function from any context. This includes
+ * using spinlocks or any other busy-waiting synchronization methods.
+ */
+#define printk_cpu_sync_get_irqsave(flags) \
+ for (;;) { \
+ local_irq_save(flags); \
+ if (__printk_cpu_sync_try_get()) \
+ break; \
+ local_irq_restore(flags); \
+ __printk_cpu_sync_wait(); \
+ }
+
+/**
+ * printk_cpu_sync_put_irqrestore() - Release the printk cpu-reentrant spinning
+ * lock and restore interrupts.
+ * @flags: Caller's saved interrupt state, from printk_cpu_sync_get_irqsave().
+ */
+#define printk_cpu_sync_put_irqrestore(flags) \
+ do { \
+ __printk_cpu_sync_put(); \
+ local_irq_restore(flags); \
+ } while (0)
+
extern int kptr_restrict;
/**
@@ -301,6 +402,117 @@ extern int kptr_restrict;
#define pr_fmt(fmt) fmt
#endif
+struct module;
+
+#ifdef CONFIG_PRINTK_INDEX
+struct pi_entry {
+ const char *fmt;
+ const char *func;
+ const char *file;
+ unsigned int line;
+
+ /*
+ * While printk and pr_* have the level stored in the string at compile
+ * time, some subsystems dynamically add it at runtime through the
+ * format string. For these dynamic cases, we allow the subsystem to
+ * tell us the level at compile time.
+ *
+ * NULL indicates that the level, if any, is stored in fmt.
+ */
+ const char *level;
+
+ /*
+ * The format string used by various subsystem specific printk()
+ * wrappers to prefix the message.
+ *
+ * Note that the static prefix defined by the pr_fmt() macro is stored
+ * directly in the message format (@fmt), not here.
+ */
+ const char *subsys_fmt_prefix;
+} __packed;
+
+#define __printk_index_emit(_fmt, _level, _subsys_fmt_prefix) \
+ do { \
+ if (__builtin_constant_p(_fmt) && __builtin_constant_p(_level)) { \
+ /*
+ * We check __builtin_constant_p multiple times here
+ * for the same input because GCC will produce an error
+ * if we try to assign a static variable to fmt if it
+ * is not a constant, even with the outer if statement.
+ */ \
+ static const struct pi_entry _entry \
+ __used = { \
+ .fmt = __builtin_constant_p(_fmt) ? (_fmt) : NULL, \
+ .func = __func__, \
+ .file = __FILE__, \
+ .line = __LINE__, \
+ .level = __builtin_constant_p(_level) ? (_level) : NULL, \
+ .subsys_fmt_prefix = _subsys_fmt_prefix,\
+ }; \
+ static const struct pi_entry *_entry_ptr \
+ __used __section(".printk_index") = &_entry; \
+ } \
+ } while (0)
+
+#else /* !CONFIG_PRINTK_INDEX */
+#define __printk_index_emit(...) do {} while (0)
+#endif /* CONFIG_PRINTK_INDEX */
+
+/*
+ * Some subsystems have their own custom printk that applies a va_format to a
+ * generic format, for example, to include a device number or other metadata
+ * alongside the format supplied by the caller.
+ *
+ * In order to store these in the way they would be emitted by the printk
+ * infrastructure, the subsystem provides us with the start, fixed string, and
+ * any subsequent text in the format string.
+ *
+ * We take a variable argument list as pr_fmt/dev_fmt/etc are sometimes passed
+ * as multiple arguments (eg: `"%s: ", "blah"`), and we must only take the
+ * first one.
+ *
+ * subsys_fmt_prefix must be known at compile time, or compilation will fail
+ * (since this is a mistake). If fmt or level is not known at compile time, no
+ * index entry will be made (since this can legitimately happen).
+ */
+#define printk_index_subsys_emit(subsys_fmt_prefix, level, fmt, ...) \
+ __printk_index_emit(fmt, level, subsys_fmt_prefix)
+
+#define printk_index_wrap(_p_func, _fmt, ...) \
+ ({ \
+ __printk_index_emit(_fmt, NULL, NULL); \
+ _p_func(_fmt, ##__VA_ARGS__); \
+ })
+
+
+/**
+ * printk - print a kernel message
+ * @fmt: format string
+ *
+ * This is printk(). It can be called from any context. We want it to work.
+ *
+ * If printk indexing is enabled, _printk() is called from printk_index_wrap.
+ * Otherwise, printk is simply #defined to _printk.
+ *
+ * We try to grab the console_lock. If we succeed, it's easy - we log the
+ * output and call the console drivers. If we fail to get the semaphore, we
+ * place the output into the log buffer and return. The current holder of
+ * the console_sem will notice the new output in console_unlock(); and will
+ * send it to the consoles before releasing the lock.
+ *
+ * One effect of this deferred printing is that code which calls printk() and
+ * then changes console_loglevel may break. This is because console_loglevel
+ * is inspected when the actual printing occurs.
+ *
+ * See also:
+ * printf(3)
+ *
+ * See the vsnprintf() documentation for format string extensions over C99.
+ */
+#define printk(fmt, ...) printk_index_wrap(_printk, fmt, ##__VA_ARGS__)
+#define printk_deferred(fmt, ...) \
+ printk_index_wrap(_printk_deferred, fmt, ##__VA_ARGS__)
+
/**
* pr_emerg - Print an emergency-level message
* @fmt: format string
@@ -436,27 +648,9 @@ extern int kptr_restrict;
#ifdef CONFIG_PRINTK
#define printk_once(fmt, ...) \
-({ \
- static bool __section(".data.once") __print_once; \
- bool __ret_print_once = !__print_once; \
- \
- if (!__print_once) { \
- __print_once = true; \
- printk(fmt, ##__VA_ARGS__); \
- } \
- unlikely(__ret_print_once); \
-})
+ DO_ONCE_LITE(printk, fmt, ##__VA_ARGS__)
#define printk_deferred_once(fmt, ...) \
-({ \
- static bool __section(".data.once") __print_once; \
- bool __ret_print_once = !__print_once; \
- \
- if (!__print_once) { \
- __print_once = true; \
- printk_deferred(fmt, ##__VA_ARGS__); \
- } \
- unlikely(__ret_print_once); \
-})
+ DO_ONCE_LITE(printk_deferred, fmt, ##__VA_ARGS__)
#else
#define printk_once(fmt, ...) \
no_printk(fmt, ##__VA_ARGS__)
diff --git a/include/linux/prmt.h b/include/linux/prmt.h
new file mode 100644
index 000000000000..8cdc987de963
--- /dev/null
+++ b/include/linux/prmt.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <linux/uuid.h>
+
+#ifdef CONFIG_ACPI_PRMT
+void init_prmt(void);
+bool acpi_prm_handler_available(const guid_t *handler_guid);
+int acpi_call_prm_handler(guid_t handler_guid, void *param_buffer);
+#else
+static inline void init_prmt(void) { }
+static inline bool acpi_prm_handler_available(const guid_t *handler_guid) { return false; }
+static inline int acpi_call_prm_handler(guid_t handler_guid, void *param_buffer)
+{
+ return -EOPNOTSUPP;
+}
+#endif
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 069c7fd95396..19d1c5e5f335 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -20,10 +20,16 @@ enum {
* If in doubt, ignore this flag.
*/
#ifdef MODULE
- PROC_ENTRY_PERMANENT = 0U,
+ PROC_ENTRY_PERMANENT = 0U,
#else
- PROC_ENTRY_PERMANENT = 1U << 0,
+ PROC_ENTRY_PERMANENT = 1U << 0,
#endif
+
+ PROC_ENTRY_proc_read_iter = 1U << 1,
+ PROC_ENTRY_proc_compat_ioctl = 1U << 2,
+ PROC_ENTRY_proc_lseek = 1U << 3,
+
+ PROC_ENTRY_FORCE_LOOKUP = 1U << 7,
};
struct proc_ops {
@@ -60,11 +66,10 @@ enum proc_pidonly {
struct proc_fs_info {
struct pid_namespace *pid_ns;
- struct dentry *proc_self; /* For /proc/self */
- struct dentry *proc_thread_self; /* For /proc/thread-self */
kgid_t pid_gid;
enum proc_hidepid hide_pid;
enum proc_pidonly pidonly;
+ struct rcu_head rcu;
};
static inline struct proc_fs_info *proc_sb_info(struct super_block *sb)
@@ -110,7 +115,16 @@ extern struct proc_dir_entry *proc_create_data(const char *, umode_t,
struct proc_dir_entry *proc_create(const char *name, umode_t mode, struct proc_dir_entry *parent, const struct proc_ops *proc_ops);
extern void proc_set_size(struct proc_dir_entry *, loff_t);
extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
-extern void *PDE_DATA(const struct inode *);
+
+/*
+ * Obtain the private data passed by user through proc_create_data() or
+ * related.
+ */
+static inline void *pde_data(const struct inode *inode)
+{
+ return inode->i_private;
+}
+
extern void *proc_get_parent_data(const struct inode *);
extern void proc_remove(struct proc_dir_entry *);
extern void remove_proc_entry(const char *, struct proc_dir_entry *);
@@ -149,6 +163,9 @@ int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task);
#endif /* CONFIG_PROC_PID_ARCH_STATUS */
+void arch_report_meminfo(struct seq_file *m);
+void arch_proc_pid_thread_features(struct seq_file *m, struct task_struct *task);
+
#else /* CONFIG_PROC_FS */
static inline void proc_root_init(void)
@@ -178,12 +195,20 @@ static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
#define proc_create_seq(name, mode, parent, ops) ({NULL;})
#define proc_create_single(name, mode, parent, show) ({NULL;})
#define proc_create_single_data(name, mode, parent, show, data) ({NULL;})
-#define proc_create(name, mode, parent, proc_ops) ({NULL;})
-#define proc_create_data(name, mode, parent, proc_ops, data) ({NULL;})
+
+static inline struct proc_dir_entry *
+proc_create(const char *name, umode_t mode, struct proc_dir_entry *parent,
+ const struct proc_ops *proc_ops)
+{ return NULL; }
+
+static inline struct proc_dir_entry *
+proc_create_data(const char *name, umode_t mode, struct proc_dir_entry *parent,
+ const struct proc_ops *proc_ops, void *data)
+{ return NULL; }
static inline void proc_set_size(struct proc_dir_entry *de, loff_t size) {}
static inline void proc_set_user(struct proc_dir_entry *de, kuid_t uid, kgid_t gid) {}
-static inline void *PDE_DATA(const struct inode *inode) {BUG(); return NULL;}
+static inline void *pde_data(const struct inode *inode) {BUG(); return NULL;}
static inline void *proc_get_parent_data(const struct inode *inode) { BUG(); return NULL; }
static inline void proc_remove(struct proc_dir_entry *de) {}
@@ -191,8 +216,10 @@ static inline void proc_remove(struct proc_dir_entry *de) {}
static inline int remove_proc_subtree(const char *name, struct proc_dir_entry *parent) { return 0; }
#define proc_create_net_data(name, mode, parent, ops, state_size, data) ({NULL;})
+#define proc_create_net_data_write(name, mode, parent, ops, write, state_size, data) ({NULL;})
#define proc_create_net(name, mode, parent, state_size, ops) ({NULL;})
#define proc_create_net_single(name, mode, parent, show, data) ({NULL;})
+#define proc_create_net_single_write(name, mode, parent, show, write, data) ({NULL;})
static inline struct pid *tgid_pidfd_to_pid(const struct file *file)
{
diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
index 75807ecef880..e81b8e596e4f 100644
--- a/include/linux/proc_ns.h
+++ b/include/linux/proc_ns.h
@@ -5,7 +5,8 @@
#ifndef _LINUX_PROC_NS_H
#define _LINUX_PROC_NS_H
-#include <linux/ns_common.h>
+#include <linux/nsfs.h>
+#include <uapi/linux/nsfs.h>
struct pid_namespace;
struct nsset;
@@ -16,7 +17,6 @@ struct inode;
struct proc_ns_operations {
const char *name;
const char *real_ns_name;
- int type;
struct ns_common *(*get)(struct task_struct *task);
void (*put)(struct ns_common *ns);
int (*install)(struct nsset *nsset, struct ns_common *ns);
@@ -39,13 +39,14 @@ extern const struct proc_ns_operations timens_for_children_operations;
* We always define these enumerators
*/
enum {
- PROC_ROOT_INO = 1,
- PROC_IPC_INIT_INO = 0xEFFFFFFFU,
- PROC_UTS_INIT_INO = 0xEFFFFFFEU,
- PROC_USER_INIT_INO = 0xEFFFFFFDU,
- PROC_PID_INIT_INO = 0xEFFFFFFCU,
- PROC_CGROUP_INIT_INO = 0xEFFFFFFBU,
- PROC_TIME_INIT_INO = 0xEFFFFFFAU,
+ PROC_IPC_INIT_INO = IPC_NS_INIT_INO,
+ PROC_UTS_INIT_INO = UTS_NS_INIT_INO,
+ PROC_USER_INIT_INO = USER_NS_INIT_INO,
+ PROC_PID_INIT_INO = PID_NS_INIT_INO,
+ PROC_CGROUP_INIT_INO = CGROUP_NS_INIT_INO,
+ PROC_TIME_INIT_INO = TIME_NS_INIT_INO,
+ PROC_NET_INIT_INO = NET_NS_INIT_INO,
+ PROC_MNT_INIT_INO = MNT_NS_INIT_INO,
};
#ifdef CONFIG_PROC_FS
@@ -64,26 +65,6 @@ static inline void proc_free_inum(unsigned int inum) {}
#endif /* CONFIG_PROC_FS */
-static inline int ns_alloc_inum(struct ns_common *ns)
-{
- atomic_long_set(&ns->stashed, 0);
- return proc_alloc_inum(&ns->inum);
-}
-
-#define ns_free_inum(ns) proc_free_inum((ns)->inum)
-
-extern struct file *proc_ns_fget(int fd);
#define get_proc_ns(inode) ((struct ns_common *)(inode)->i_private)
-extern int ns_get_path(struct path *path, struct task_struct *task,
- const struct proc_ns_operations *ns_ops);
-typedef struct ns_common *ns_get_path_helper_t(void *);
-extern int ns_get_path_cb(struct path *path, ns_get_path_helper_t ns_get_cb,
- void *private_data);
-
-extern bool ns_match(const struct ns_common *ns, dev_t dev, ino_t ino);
-
-extern int ns_get_name(char *buf, size_t size, struct task_struct *task,
- const struct proc_ns_operations *ns_ops);
-extern void nsfs_init(void);
#endif /* _LINUX_PROC_NS_H */
diff --git a/include/linux/profile.h b/include/linux/profile.h
index fd18ca96f557..3f53cdb0c27c 100644
--- a/include/linux/profile.h
+++ b/include/linux/profile.h
@@ -4,38 +4,26 @@
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/cpumask.h>
#include <linux/cache.h>
#include <asm/errno.h>
#define CPU_PROFILING 1
#define SCHED_PROFILING 2
-#define SLEEP_PROFILING 3
#define KVM_PROFILING 4
struct proc_dir_entry;
struct notifier_block;
#if defined(CONFIG_PROFILING) && defined(CONFIG_PROC_FS)
-void create_prof_cpu_mask(void);
int create_proc_profile(void);
#else
-static inline void create_prof_cpu_mask(void)
-{
-}
-
static inline int create_proc_profile(void)
{
return 0;
}
#endif
-enum profile_type {
- PROFILE_TASK_EXIT,
- PROFILE_MUNMAP
-};
-
#ifdef CONFIG_PROFILING
extern int prof_on __read_mostly;
@@ -66,23 +54,6 @@ static inline void profile_hit(int type, void *ip)
struct task_struct;
struct mm_struct;
-/* task is in do_exit() */
-void profile_task_exit(struct task_struct * task);
-
-/* task is dead, free task struct ? Returns 1 if
- * the task was taken, 0 if the task should be freed.
- */
-int profile_handoff_task(struct task_struct * task);
-
-/* sys_munmap */
-void profile_munmap(unsigned long addr);
-
-int task_handoff_register(struct notifier_block * n);
-int task_handoff_unregister(struct notifier_block * n);
-
-int profile_event_register(enum profile_type, struct notifier_block * n);
-int profile_event_unregister(enum profile_type, struct notifier_block * n);
-
#else
#define prof_on 0
@@ -107,29 +78,6 @@ static inline void profile_hit(int type, void *ip)
return;
}
-static inline int task_handoff_register(struct notifier_block * n)
-{
- return -ENOSYS;
-}
-
-static inline int task_handoff_unregister(struct notifier_block * n)
-{
- return -ENOSYS;
-}
-
-static inline int profile_event_register(enum profile_type t, struct notifier_block * n)
-{
- return -ENOSYS;
-}
-
-static inline int profile_event_unregister(enum profile_type t, struct notifier_block * n)
-{
- return -ENOSYS;
-}
-
-#define profile_task_exit(a) do { } while (0)
-#define profile_handoff_task(a) (0)
-#define profile_munmap(a) do { } while (0)
#endif /* CONFIG_PROFILING */
diff --git a/include/linux/property.h b/include/linux/property.h
index 0d876316e61d..272bfbdea7bf 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -10,9 +10,14 @@
#ifndef _LINUX_PROPERTY_H_
#define _LINUX_PROPERTY_H_
+#include <linux/args.h>
+#include <linux/array_size.h>
#include <linux/bits.h>
+#include <linux/cleanup.h>
#include <linux/fwnode.h>
+#include <linux/stddef.h>
#include <linux/types.h>
+#include <linux/util_macros.h>
struct device;
@@ -25,33 +30,34 @@ enum dev_prop_type {
DEV_PROP_REF,
};
-enum dev_dma_attr {
- DEV_DMA_NOT_SUPPORTED,
- DEV_DMA_NON_COHERENT,
- DEV_DMA_COHERENT,
-};
-
-struct fwnode_handle *dev_fwnode(struct device *dev);
+const struct fwnode_handle *__dev_fwnode_const(const struct device *dev);
+struct fwnode_handle *__dev_fwnode(struct device *dev);
+#define dev_fwnode(dev) \
+ _Generic((dev), \
+ const struct device *: __dev_fwnode_const, \
+ struct device *: __dev_fwnode)(dev)
-bool device_property_present(struct device *dev, const char *propname);
-int device_property_read_u8_array(struct device *dev, const char *propname,
+bool device_property_present(const struct device *dev, const char *propname);
+bool device_property_read_bool(const struct device *dev, const char *propname);
+int device_property_read_u8_array(const struct device *dev, const char *propname,
u8 *val, size_t nval);
-int device_property_read_u16_array(struct device *dev, const char *propname,
+int device_property_read_u16_array(const struct device *dev, const char *propname,
u16 *val, size_t nval);
-int device_property_read_u32_array(struct device *dev, const char *propname,
+int device_property_read_u32_array(const struct device *dev, const char *propname,
u32 *val, size_t nval);
-int device_property_read_u64_array(struct device *dev, const char *propname,
+int device_property_read_u64_array(const struct device *dev, const char *propname,
u64 *val, size_t nval);
-int device_property_read_string_array(struct device *dev, const char *propname,
+int device_property_read_string_array(const struct device *dev, const char *propname,
const char **val, size_t nval);
-int device_property_read_string(struct device *dev, const char *propname,
+int device_property_read_string(const struct device *dev, const char *propname,
const char **val);
-int device_property_match_string(struct device *dev,
+int device_property_match_string(const struct device *dev,
const char *propname, const char *string);
-bool fwnode_device_is_available(const struct fwnode_handle *fwnode);
bool fwnode_property_present(const struct fwnode_handle *fwnode,
const char *propname);
+bool fwnode_property_read_bool(const struct fwnode_handle *fwnode,
+ const char *propname);
int fwnode_property_read_u8_array(const struct fwnode_handle *fwnode,
const char *propname, u8 *val,
size_t nval);
@@ -71,6 +77,65 @@ int fwnode_property_read_string(const struct fwnode_handle *fwnode,
const char *propname, const char **val);
int fwnode_property_match_string(const struct fwnode_handle *fwnode,
const char *propname, const char *string);
+
+bool fwnode_device_is_available(const struct fwnode_handle *fwnode);
+
+static inline bool fwnode_device_is_big_endian(const struct fwnode_handle *fwnode)
+{
+ if (fwnode_property_present(fwnode, "big-endian"))
+ return true;
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) &&
+ fwnode_property_present(fwnode, "native-endian"))
+ return true;
+ return false;
+}
+
+static inline
+bool fwnode_device_is_compatible(const struct fwnode_handle *fwnode, const char *compat)
+{
+ return fwnode_property_match_string(fwnode, "compatible", compat) >= 0;
+}
+
+/**
+ * device_is_big_endian - check if a device has BE registers
+ * @dev: Pointer to the struct device
+ *
+ * Returns: true if the device has a "big-endian" property, or if the kernel
+ * was compiled for BE *and* the device has a "native-endian" property.
+ * Returns false otherwise.
+ *
+ * Callers would nominally use ioread32be/iowrite32be if
+ * device_is_big_endian() == true, or readl/writel otherwise.
+ */
+static inline bool device_is_big_endian(const struct device *dev)
+{
+ return fwnode_device_is_big_endian(dev_fwnode(dev));
+}
+
+/**
+ * device_is_compatible - match 'compatible' property of the device with a given string
+ * @dev: Pointer to the struct device
+ * @compat: The string to match 'compatible' property with
+ *
+ * Returns: true if matches, otherwise false.
+ */
+static inline bool device_is_compatible(const struct device *dev, const char *compat)
+{
+ return fwnode_device_is_compatible(dev_fwnode(dev), compat);
+}
+
+int fwnode_property_match_property_string(const struct fwnode_handle *fwnode,
+ const char *propname,
+ const char * const *array, size_t n);
+
+static inline
+int device_property_match_property_string(const struct device *dev,
+ const char *propname,
+ const char * const *array, size_t n)
+{
+ return fwnode_property_match_property_string(dev_fwnode(dev), propname, array, n);
+}
+
int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
const char *prop, const char *nargs_prop,
unsigned int nargs, unsigned int index,
@@ -82,15 +147,18 @@ struct fwnode_handle *fwnode_find_reference(const struct fwnode_handle *fwnode,
const char *fwnode_get_name(const struct fwnode_handle *fwnode);
const char *fwnode_get_name_prefix(const struct fwnode_handle *fwnode);
+bool fwnode_name_eq(const struct fwnode_handle *fwnode, const char *name);
+
struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode);
-struct fwnode_handle *fwnode_get_next_parent(
- struct fwnode_handle *fwnode);
-struct device *fwnode_get_next_parent_dev(struct fwnode_handle *fwnode);
+struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode);
+
+#define fwnode_for_each_parent_node(fwnode, parent) \
+ for (parent = fwnode_get_parent(fwnode); parent; \
+ parent = fwnode_get_next_parent(parent))
+
unsigned int fwnode_count_parents(const struct fwnode_handle *fwn);
struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwn,
unsigned int depth);
-bool fwnode_is_ancestor_of(struct fwnode_handle *test_ancestor,
- struct fwnode_handle *test_child);
struct fwnode_handle *fwnode_get_next_child_node(
const struct fwnode_handle *fwnode, struct fwnode_handle *child);
struct fwnode_handle *fwnode_get_next_available_child_node(
@@ -100,91 +168,134 @@ struct fwnode_handle *fwnode_get_next_available_child_node(
for (child = fwnode_get_next_child_node(fwnode, NULL); child; \
child = fwnode_get_next_child_node(fwnode, child))
+#define fwnode_for_each_named_child_node(fwnode, child, name) \
+ fwnode_for_each_child_node(fwnode, child) \
+ for_each_if(fwnode_name_eq(child, name))
+
#define fwnode_for_each_available_child_node(fwnode, child) \
for (child = fwnode_get_next_available_child_node(fwnode, NULL); child;\
child = fwnode_get_next_available_child_node(fwnode, child))
-struct fwnode_handle *device_get_next_child_node(
- struct device *dev, struct fwnode_handle *child);
+#define fwnode_for_each_child_node_scoped(fwnode, child) \
+ for (struct fwnode_handle *child __free(fwnode_handle) = \
+ fwnode_get_next_child_node(fwnode, NULL); \
+ child; child = fwnode_get_next_child_node(fwnode, child))
+
+#define fwnode_for_each_available_child_node_scoped(fwnode, child) \
+ for (struct fwnode_handle *child __free(fwnode_handle) = \
+ fwnode_get_next_available_child_node(fwnode, NULL); \
+ child; child = fwnode_get_next_available_child_node(fwnode, child))
+
+struct fwnode_handle *device_get_next_child_node(const struct device *dev,
+ struct fwnode_handle *child);
#define device_for_each_child_node(dev, child) \
for (child = device_get_next_child_node(dev, NULL); child; \
child = device_get_next_child_node(dev, child))
-struct fwnode_handle *fwnode_get_named_child_node(
- const struct fwnode_handle *fwnode, const char *childname);
-struct fwnode_handle *device_get_named_child_node(struct device *dev,
+#define device_for_each_named_child_node(dev, child, name) \
+ device_for_each_child_node(dev, child) \
+ for_each_if(fwnode_name_eq(child, name))
+
+#define device_for_each_child_node_scoped(dev, child) \
+ for (struct fwnode_handle *child __free(fwnode_handle) = \
+ device_get_next_child_node(dev, NULL); \
+ child; child = device_get_next_child_node(dev, child))
+
+#define device_for_each_named_child_node_scoped(dev, child, name) \
+ device_for_each_child_node_scoped(dev, child) \
+ for_each_if(fwnode_name_eq(child, name))
+
+struct fwnode_handle *fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
+ const char *childname);
+struct fwnode_handle *device_get_named_child_node(const struct device *dev,
const char *childname);
struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode);
-void fwnode_handle_put(struct fwnode_handle *fwnode);
-int fwnode_irq_get(struct fwnode_handle *fwnode, unsigned int index);
+/**
+ * fwnode_handle_put - Drop reference to a device node
+ * @fwnode: Pointer to the device node to drop the reference to.
+ *
+ * This has to be used when terminating device_for_each_child_node() iteration
+ * with break or return to prevent stale device node references from being left
+ * behind.
+ */
+static inline void fwnode_handle_put(struct fwnode_handle *fwnode)
+{
+ fwnode_call_void_op(fwnode, put);
+}
+
+DEFINE_FREE(fwnode_handle, struct fwnode_handle *, fwnode_handle_put(_T))
+
+int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index);
+int fwnode_irq_get_byname(const struct fwnode_handle *fwnode, const char *name);
-unsigned int device_get_child_node_count(struct device *dev);
+unsigned int fwnode_get_child_node_count(const struct fwnode_handle *fwnode);
+
+static inline unsigned int device_get_child_node_count(const struct device *dev)
+{
+ return fwnode_get_child_node_count(dev_fwnode(dev));
+}
-static inline bool device_property_read_bool(struct device *dev,
- const char *propname)
+unsigned int fwnode_get_named_child_node_count(const struct fwnode_handle *fwnode,
+ const char *name);
+static inline unsigned int device_get_named_child_node_count(const struct device *dev,
+ const char *name)
{
- return device_property_present(dev, propname);
+ return fwnode_get_named_child_node_count(dev_fwnode(dev), name);
}
-static inline int device_property_read_u8(struct device *dev,
+static inline int device_property_read_u8(const struct device *dev,
const char *propname, u8 *val)
{
return device_property_read_u8_array(dev, propname, val, 1);
}
-static inline int device_property_read_u16(struct device *dev,
+static inline int device_property_read_u16(const struct device *dev,
const char *propname, u16 *val)
{
return device_property_read_u16_array(dev, propname, val, 1);
}
-static inline int device_property_read_u32(struct device *dev,
+static inline int device_property_read_u32(const struct device *dev,
const char *propname, u32 *val)
{
return device_property_read_u32_array(dev, propname, val, 1);
}
-static inline int device_property_read_u64(struct device *dev,
+static inline int device_property_read_u64(const struct device *dev,
const char *propname, u64 *val)
{
return device_property_read_u64_array(dev, propname, val, 1);
}
-static inline int device_property_count_u8(struct device *dev, const char *propname)
+static inline int device_property_count_u8(const struct device *dev, const char *propname)
{
return device_property_read_u8_array(dev, propname, NULL, 0);
}
-static inline int device_property_count_u16(struct device *dev, const char *propname)
+static inline int device_property_count_u16(const struct device *dev, const char *propname)
{
return device_property_read_u16_array(dev, propname, NULL, 0);
}
-static inline int device_property_count_u32(struct device *dev, const char *propname)
+static inline int device_property_count_u32(const struct device *dev, const char *propname)
{
return device_property_read_u32_array(dev, propname, NULL, 0);
}
-static inline int device_property_count_u64(struct device *dev, const char *propname)
+static inline int device_property_count_u64(const struct device *dev, const char *propname)
{
return device_property_read_u64_array(dev, propname, NULL, 0);
}
-static inline int device_property_string_array_count(struct device *dev,
+static inline int device_property_string_array_count(const struct device *dev,
const char *propname)
{
return device_property_read_string_array(dev, propname, NULL, 0);
}
-static inline bool fwnode_property_read_bool(const struct fwnode_handle *fwnode,
- const char *propname)
-{
- return fwnode_property_present(fwnode, propname);
-}
-
static inline int fwnode_property_read_u8(const struct fwnode_handle *fwnode,
const char *propname, u8 *val)
{
@@ -244,20 +355,27 @@ struct software_node;
/**
* struct software_node_ref_args - Reference property with additional arguments
- * @node: Reference to a software node
+ * @swnode: Reference to a software node
+ * @fwnode: Alternative reference to a firmware node handle
* @nargs: Number of elements in @args array
* @args: Integer arguments
*/
struct software_node_ref_args {
- const struct software_node *node;
+ const struct software_node *swnode;
+ struct fwnode_handle *fwnode;
unsigned int nargs;
u64 args[NR_FWNODE_REFERENCE_ARGS];
};
#define SOFTWARE_NODE_REFERENCE(_ref_, ...) \
(const struct software_node_ref_args) { \
- .node = _ref_, \
- .nargs = ARRAY_SIZE(((u64[]){ 0, ##__VA_ARGS__ })) - 1, \
+ .swnode = _Generic(_ref_, \
+ const struct software_node *: _ref_, \
+ default: NULL), \
+ .fwnode = _Generic(_ref_, \
+ struct fwnode_handle *: _ref_, \
+ default: NULL), \
+ .nargs = COUNT_ARGS(__VA_ARGS__), \
.args = { __VA_ARGS__ }, \
}
@@ -292,24 +410,14 @@ struct property_entry {
* crafted to avoid gcc-4.4.4's problems with initialization of anon unions
* and structs.
*/
-
-#define __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_) \
- sizeof(((struct property_entry *)NULL)->value._elem_[0])
-
-#define __PROPERTY_ENTRY_ARRAY_ELSIZE_LEN(_name_, _elsize_, _Type_, \
- _val_, _len_) \
-(struct property_entry) { \
- .name = _name_, \
- .length = (_len_) * (_elsize_), \
- .type = DEV_PROP_##_Type_, \
- { .pointer = _val_ }, \
+#define __PROPERTY_ENTRY_ARRAY_LEN(_name_, _elem_, _Type_, _val_, _len_) \
+(struct property_entry) { \
+ .name = _name_, \
+ .length = (_len_) * sizeof_field(struct property_entry, value._elem_[0]), \
+ .type = DEV_PROP_##_Type_, \
+ { .pointer = _val_ }, \
}
-#define __PROPERTY_ENTRY_ARRAY_LEN(_name_, _elem_, _Type_, _val_, _len_)\
- __PROPERTY_ENTRY_ARRAY_ELSIZE_LEN(_name_, \
- __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_), \
- _Type_, _val_, _len_)
-
#define PROPERTY_ENTRY_U8_ARRAY_LEN(_name_, _val_, _len_) \
__PROPERTY_ENTRY_ARRAY_LEN(_name_, u8_data, U8, _val_, _len_)
#define PROPERTY_ENTRY_U16_ARRAY_LEN(_name_, _val_, _len_) \
@@ -320,10 +428,14 @@ struct property_entry {
__PROPERTY_ENTRY_ARRAY_LEN(_name_, u64_data, U64, _val_, _len_)
#define PROPERTY_ENTRY_STRING_ARRAY_LEN(_name_, _val_, _len_) \
__PROPERTY_ENTRY_ARRAY_LEN(_name_, str, STRING, _val_, _len_)
+
#define PROPERTY_ENTRY_REF_ARRAY_LEN(_name_, _val_, _len_) \
- __PROPERTY_ENTRY_ARRAY_ELSIZE_LEN(_name_, \
- sizeof(struct software_node_ref_args), \
- REF, _val_, _len_)
+(struct property_entry) { \
+ .name = _name_, \
+ .length = (_len_) * sizeof(struct software_node_ref_args), \
+ .type = DEV_PROP_REF, \
+ { .pointer = _val_ }, \
+}
#define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \
PROPERTY_ENTRY_U8_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
@@ -335,13 +447,13 @@ struct property_entry {
PROPERTY_ENTRY_U64_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
#define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_) \
PROPERTY_ENTRY_STRING_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
-#define PROPERTY_ENTRY_REF_ARRAY(_name_, _val_) \
+#define PROPERTY_ENTRY_REF_ARRAY(_name_, _val_) \
PROPERTY_ENTRY_REF_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
#define __PROPERTY_ENTRY_ELEMENT(_name_, _elem_, _Type_, _val_) \
(struct property_entry) { \
.name = _name_, \
- .length = __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_), \
+ .length = sizeof_field(struct property_entry, value._elem_[0]), \
.is_inline = true, \
.type = DEV_PROP_##_Type_, \
{ .value = { ._elem_[0] = _val_ } }, \
@@ -358,12 +470,6 @@ struct property_entry {
#define PROPERTY_ENTRY_STRING(_name_, _val_) \
__PROPERTY_ENTRY_ELEMENT(_name_, str, STRING, _val_)
-#define PROPERTY_ENTRY_BOOL(_name_) \
-(struct property_entry) { \
- .name = _name_, \
- .is_inline = true, \
-}
-
#define PROPERTY_ENTRY_REF(_name_, _ref_, ...) \
(struct property_entry) { \
.name = _name_, \
@@ -372,28 +478,26 @@ struct property_entry {
{ .pointer = &SOFTWARE_NODE_REFERENCE(_ref_, ##__VA_ARGS__), }, \
}
+#define PROPERTY_ENTRY_BOOL(_name_) \
+(struct property_entry) { \
+ .name = _name_, \
+ .is_inline = true, \
+}
+
struct property_entry *
property_entries_dup(const struct property_entry *properties);
-
void property_entries_free(const struct property_entry *properties);
-int device_add_properties(struct device *dev,
- const struct property_entry *properties);
-void device_remove_properties(struct device *dev);
-
-bool device_dma_supported(struct device *dev);
+bool device_dma_supported(const struct device *dev);
+enum dev_dma_attr device_get_dma_attr(const struct device *dev);
-enum dev_dma_attr device_get_dma_attr(struct device *dev);
-
-const void *device_get_match_data(struct device *dev);
+const void *device_get_match_data(const struct device *dev);
int device_get_phy_mode(struct device *dev);
+int fwnode_get_phy_mode(const struct fwnode_handle *fwnode);
-void *device_get_mac_address(struct device *dev, char *addr, int alen);
+void __iomem *fwnode_iomap(struct fwnode_handle *fwnode, int index);
-int fwnode_get_phy_mode(struct fwnode_handle *fwnode);
-void *fwnode_get_mac_address(struct fwnode_handle *fwnode,
- char *addr, int alen);
struct fwnode_handle *fwnode_graph_get_next_endpoint(
const struct fwnode_handle *fwnode, struct fwnode_handle *prev);
struct fwnode_handle *
@@ -404,11 +508,8 @@ struct fwnode_handle *fwnode_graph_get_remote_port(
const struct fwnode_handle *fwnode);
struct fwnode_handle *fwnode_graph_get_remote_endpoint(
const struct fwnode_handle *fwnode);
-struct fwnode_handle *
-fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port,
- u32 endpoint);
-static inline bool fwnode_graph_is_endpoint(struct fwnode_handle *fwnode)
+static inline bool fwnode_graph_is_endpoint(const struct fwnode_handle *fwnode)
{
return fwnode_property_present(fwnode, "remote-endpoint");
}
@@ -421,7 +522,8 @@ static inline bool fwnode_graph_is_endpoint(struct fwnode_handle *fwnode)
* one.
* @FWNODE_GRAPH_DEVICE_DISABLED: That the device to which the remote
* endpoint of the given endpoint belongs to,
- * may be disabled.
+ * may be disabled, or that the endpoint is not
+ * connected.
*/
#define FWNODE_GRAPH_ENDPOINT_NEXT BIT(0)
#define FWNODE_GRAPH_DEVICE_DISABLED BIT(1)
@@ -429,28 +531,35 @@ static inline bool fwnode_graph_is_endpoint(struct fwnode_handle *fwnode)
struct fwnode_handle *
fwnode_graph_get_endpoint_by_id(const struct fwnode_handle *fwnode,
u32 port, u32 endpoint, unsigned long flags);
+unsigned int fwnode_graph_get_endpoint_count(const struct fwnode_handle *fwnode,
+ unsigned long flags);
-#define fwnode_graph_for_each_endpoint(fwnode, child) \
- for (child = NULL; \
- (child = fwnode_graph_get_next_endpoint(fwnode, child)); )
+#define fwnode_graph_for_each_endpoint(fwnode, child) \
+ for (child = fwnode_graph_get_next_endpoint(fwnode, NULL); child; \
+ child = fwnode_graph_get_next_endpoint(fwnode, child))
int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint);
-typedef void *(*devcon_match_fn_t)(struct fwnode_handle *fwnode, const char *id,
+typedef void *(*devcon_match_fn_t)(const struct fwnode_handle *fwnode, const char *id,
void *data);
-void *fwnode_connection_find_match(struct fwnode_handle *fwnode,
+void *fwnode_connection_find_match(const struct fwnode_handle *fwnode,
const char *con_id, void *data,
devcon_match_fn_t match);
-static inline void *device_connection_find_match(struct device *dev,
+static inline void *device_connection_find_match(const struct device *dev,
const char *con_id, void *data,
devcon_match_fn_t match)
{
return fwnode_connection_find_match(dev_fwnode(dev), con_id, data, match);
}
+int fwnode_connection_find_matches(const struct fwnode_handle *fwnode,
+ const char *con_id, void *data,
+ devcon_match_fn_t match,
+ void **matches, unsigned int matches_len);
+
/* -------------------------------------------------------------------------- */
/* Software fwnode support - when HW description is incomplete or missing */
@@ -466,6 +575,13 @@ struct software_node {
const struct property_entry *properties;
};
+#define SOFTWARE_NODE(_name_, _properties_, _parent_) \
+ (struct software_node) { \
+ .name = _name_, \
+ .properties = _properties_, \
+ .parent = _parent_, \
+ }
+
bool is_software_node(const struct fwnode_handle *fwnode);
const struct software_node *
to_software_node(const struct fwnode_handle *fwnode);
@@ -475,17 +591,12 @@ const struct software_node *
software_node_find_by_name(const struct software_node *parent,
const char *name);
-int software_node_register_nodes(const struct software_node *nodes);
-void software_node_unregister_nodes(const struct software_node *nodes);
-
-int software_node_register_node_group(const struct software_node **node_group);
-void software_node_unregister_node_group(const struct software_node **node_group);
+int software_node_register_node_group(const struct software_node * const *node_group);
+void software_node_unregister_node_group(const struct software_node * const *node_group);
int software_node_register(const struct software_node *node);
void software_node_unregister(const struct software_node *node);
-int software_node_notify(struct device *dev, unsigned long action);
-
struct fwnode_handle *
fwnode_create_software_node(const struct property_entry *properties,
const struct fwnode_handle *parent);
diff --git a/include/linux/pruss_driver.h b/include/linux/pruss_driver.h
index ecfded30ed05..2e18fef1a2e1 100644
--- a/include/linux/pruss_driver.h
+++ b/include/linux/pruss_driver.h
@@ -9,7 +9,55 @@
#ifndef _PRUSS_DRIVER_H_
#define _PRUSS_DRIVER_H_
+#include <linux/mutex.h>
+#include <linux/remoteproc/pruss.h>
#include <linux/types.h>
+#include <linux/err.h>
+
+/*
+ * enum pruss_gp_mux_sel - PRUSS GPI/O Mux modes for the
+ * PRUSS_GPCFG0/1 registers
+ *
+ * NOTE: The below defines are the most common values, but there
+ * are some exceptions like on 66AK2G, where the RESERVED and MII2
+ * values are interchanged. Also, this bit-field does not exist on
+ * AM335x SoCs
+ */
+enum pruss_gp_mux_sel {
+ PRUSS_GP_MUX_SEL_GP,
+ PRUSS_GP_MUX_SEL_ENDAT,
+ PRUSS_GP_MUX_SEL_RESERVED,
+ PRUSS_GP_MUX_SEL_SD,
+ PRUSS_GP_MUX_SEL_MII2,
+ PRUSS_GP_MUX_SEL_MAX,
+};
+
+/*
+ * enum pruss_gpi_mode - PRUSS GPI configuration modes, used
+ * to program the PRUSS_GPCFG0/1 registers
+ */
+enum pruss_gpi_mode {
+ PRUSS_GPI_MODE_DIRECT,
+ PRUSS_GPI_MODE_PARALLEL,
+ PRUSS_GPI_MODE_28BIT_SHIFT,
+ PRUSS_GPI_MODE_MII,
+ PRUSS_GPI_MODE_MAX,
+};
+
+/**
+ * enum pru_type - PRU core type identifier
+ *
+ * @PRU_TYPE_PRU: Programmable Real-time Unit
+ * @PRU_TYPE_RTU: Auxiliary Programmable Real-Time Unit
+ * @PRU_TYPE_TX_PRU: Transmit Programmable Real-Time Unit
+ * @PRU_TYPE_MAX: just keep this one at the end
+ */
+enum pru_type {
+ PRU_TYPE_PRU,
+ PRU_TYPE_RTU,
+ PRU_TYPE_TX_PRU,
+ PRU_TYPE_MAX,
+};
/*
* enum pruss_mem - PRUSS memory range identifiers
@@ -39,6 +87,8 @@ struct pruss_mem_region {
* @cfg_base: base iomap for CFG region
* @cfg_regmap: regmap for config region
* @mem_regions: data for each of the PRUSS memory regions
+ * @mem_in_use: to indicate if memory resource is in use
+ * @lock: mutex to serialize access to resources
* @core_clk_mux: clk handle for PRUSS CORE_CLK_MUX
* @iep_clk_mux: clk handle for PRUSS IEP_CLK_MUX
*/
@@ -47,8 +97,81 @@ struct pruss {
void __iomem *cfg_base;
struct regmap *cfg_regmap;
struct pruss_mem_region mem_regions[PRUSS_MEM_MAX];
+ struct pruss_mem_region *mem_in_use[PRUSS_MEM_MAX];
+ struct mutex lock; /* PRU resource lock */
struct clk *core_clk_mux;
struct clk *iep_clk_mux;
};
+#if IS_ENABLED(CONFIG_TI_PRUSS)
+
+struct pruss *pruss_get(struct rproc *rproc);
+void pruss_put(struct pruss *pruss);
+int pruss_request_mem_region(struct pruss *pruss, enum pruss_mem mem_id,
+ struct pruss_mem_region *region);
+int pruss_release_mem_region(struct pruss *pruss,
+ struct pruss_mem_region *region);
+int pruss_cfg_get_gpmux(struct pruss *pruss, enum pruss_pru_id pru_id, u8 *mux);
+int pruss_cfg_set_gpmux(struct pruss *pruss, enum pruss_pru_id pru_id, u8 mux);
+int pruss_cfg_gpimode(struct pruss *pruss, enum pruss_pru_id pru_id,
+ enum pruss_gpi_mode mode);
+int pruss_cfg_miirt_enable(struct pruss *pruss, bool enable);
+int pruss_cfg_xfr_enable(struct pruss *pruss, enum pru_type pru_type,
+ bool enable);
+
+#else
+
+static inline struct pruss *pruss_get(struct rproc *rproc)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void pruss_put(struct pruss *pruss) { }
+
+static inline int pruss_request_mem_region(struct pruss *pruss,
+ enum pruss_mem mem_id,
+ struct pruss_mem_region *region)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int pruss_release_mem_region(struct pruss *pruss,
+ struct pruss_mem_region *region)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int pruss_cfg_get_gpmux(struct pruss *pruss,
+ enum pruss_pru_id pru_id, u8 *mux)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int pruss_cfg_set_gpmux(struct pruss *pruss,
+ enum pruss_pru_id pru_id, u8 mux)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int pruss_cfg_gpimode(struct pruss *pruss,
+ enum pruss_pru_id pru_id,
+ enum pruss_gpi_mode mode)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int pruss_cfg_miirt_enable(struct pruss *pruss, bool enable)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int pruss_cfg_xfr_enable(struct pruss *pruss,
+ enum pru_type pru_type,
+ bool enable)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif /* CONFIG_TI_PRUSS */
+
#endif /* _PRUSS_DRIVER_H_ */
diff --git a/include/linux/pse-pd/pse.h b/include/linux/pse-pd/pse.h
new file mode 100644
index 000000000000..4e5696cfade7
--- /dev/null
+++ b/include/linux/pse-pd/pse.h
@@ -0,0 +1,421 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+// Copyright (c) 2022 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+ */
+#ifndef _LINUX_PSE_CONTROLLER_H
+#define _LINUX_PSE_CONTROLLER_H
+
+#include <linux/list.h>
+#include <linux/netlink.h>
+#include <linux/kfifo.h>
+#include <uapi/linux/ethtool.h>
+#include <uapi/linux/ethtool_netlink_generated.h>
+#include <linux/regulator/driver.h>
+
+/* Maximum current in uA according to IEEE 802.3-2022 Table 145-1 */
+#define MAX_PI_CURRENT 1920000
+/* Maximum power in mW according to IEEE 802.3-2022 Table 145-16 */
+#define MAX_PI_PW 99900
+
+struct net_device;
+struct phy_device;
+struct pse_controller_dev;
+struct netlink_ext_ack;
+
+/* C33 PSE extended state and substate. */
+struct ethtool_c33_pse_ext_state_info {
+ enum ethtool_c33_pse_ext_state c33_pse_ext_state;
+ union {
+ enum ethtool_c33_pse_ext_substate_error_condition error_condition;
+ enum ethtool_c33_pse_ext_substate_mr_pse_enable mr_pse_enable;
+ enum ethtool_c33_pse_ext_substate_option_detect_ted option_detect_ted;
+ enum ethtool_c33_pse_ext_substate_option_vport_lim option_vport_lim;
+ enum ethtool_c33_pse_ext_substate_ovld_detected ovld_detected;
+ enum ethtool_c33_pse_ext_substate_power_not_available power_not_available;
+ enum ethtool_c33_pse_ext_substate_short_detected short_detected;
+ u32 __c33_pse_ext_substate;
+ };
+};
+
+struct ethtool_c33_pse_pw_limit_range {
+ u32 min;
+ u32 max;
+};
+
+/**
+ * struct pse_irq_desc - notification sender description for IRQ based events.
+ *
+ * @name: the visible name for the IRQ
+ * @map_event: driver callback to map IRQ status into PSE devices with events.
+ */
+struct pse_irq_desc {
+ const char *name;
+ int (*map_event)(int irq, struct pse_controller_dev *pcdev,
+ unsigned long *notifs,
+ unsigned long *notifs_mask);
+};
+
+/**
+ * struct pse_control_config - PSE control/channel configuration.
+ *
+ * @podl_admin_control: set PoDL PSE admin control as described in
+ * IEEE 802.3-2018 30.15.1.2.1 acPoDLPSEAdminControl
+ * @c33_admin_control: set PSE admin control as described in
+ * IEEE 802.3-2022 30.9.1.2.1 acPSEAdminControl
+ */
+struct pse_control_config {
+ enum ethtool_podl_pse_admin_state podl_admin_control;
+ enum ethtool_c33_pse_admin_state c33_admin_control;
+};
+
+/**
+ * struct pse_admin_state - PSE operational state
+ *
+ * @podl_admin_state: operational state of the PoDL PSE
+ * functions. IEEE 802.3-2018 30.15.1.1.2 aPoDLPSEAdminState
+ * @c33_admin_state: operational state of the PSE
+ * functions. IEEE 802.3-2022 30.9.1.1.2 aPSEAdminState
+ */
+struct pse_admin_state {
+ enum ethtool_podl_pse_admin_state podl_admin_state;
+ enum ethtool_c33_pse_admin_state c33_admin_state;
+};
+
+/**
+ * struct pse_pw_status - PSE power detection status
+ *
+ * @podl_pw_status: power detection status of the PoDL PSE.
+ * IEEE 802.3-2018 30.15.1.1.3 aPoDLPSEPowerDetectionStatus:
+ * @c33_pw_status: power detection status of the PSE.
+ * IEEE 802.3-2022 30.9.1.1.5 aPSEPowerDetectionStatus:
+ */
+struct pse_pw_status {
+ enum ethtool_podl_pse_pw_d_status podl_pw_status;
+ enum ethtool_c33_pse_pw_d_status c33_pw_status;
+};
+
+/**
+ * struct pse_ext_state_info - PSE extended state information
+ *
+ * @c33_ext_state_info: extended state information of the PSE
+ */
+struct pse_ext_state_info {
+ struct ethtool_c33_pse_ext_state_info c33_ext_state_info;
+};
+
+/**
+ * struct pse_pw_limit_ranges - PSE power limit configuration range
+ *
+ * @c33_pw_limit_ranges: supported power limit configuration range. The driver
+ * is in charge of the memory allocation.
+ */
+struct pse_pw_limit_ranges {
+ struct ethtool_c33_pse_pw_limit_range *c33_pw_limit_ranges;
+};
+
+/**
+ * struct ethtool_pse_control_status - PSE control/channel status.
+ *
+ * @pw_d_id: PSE power domain index.
+ * @podl_admin_state: operational state of the PoDL PSE
+ * functions. IEEE 802.3-2018 30.15.1.1.2 aPoDLPSEAdminState
+ * @podl_pw_status: power detection status of the PoDL PSE.
+ * IEEE 802.3-2018 30.15.1.1.3 aPoDLPSEPowerDetectionStatus:
+ * @c33_admin_state: operational state of the PSE
+ * functions. IEEE 802.3-2022 30.9.1.1.2 aPSEAdminState
+ * @c33_pw_status: power detection status of the PSE.
+ * IEEE 802.3-2022 30.9.1.1.5 aPSEPowerDetectionStatus:
+ * @c33_pw_class: detected class of a powered PD
+ * IEEE 802.3-2022 30.9.1.1.8 aPSEPowerClassification
+ * @c33_actual_pw: power currently delivered by the PSE in mW
+ * IEEE 802.3-2022 30.9.1.1.23 aPSEActualPower
+ * @c33_ext_state_info: extended state information of the PSE
+ * @c33_avail_pw_limit: available power limit of the PSE in mW
+ * IEEE 802.3-2022 145.2.5.4 pse_avail_pwr
+ * @c33_pw_limit_ranges: supported power limit configuration range. The driver
+ * is in charge of the memory allocation
+ * @c33_pw_limit_nb_ranges: number of supported power limit configuration
+ * ranges
+ * @prio_max: max priority allowed for the c33_prio variable value.
+ * @prio: priority of the PSE. Managed by PSE core in case of static budget
+ * evaluation strategy.
+ */
+struct ethtool_pse_control_status {
+ u32 pw_d_id;
+ enum ethtool_podl_pse_admin_state podl_admin_state;
+ enum ethtool_podl_pse_pw_d_status podl_pw_status;
+ enum ethtool_c33_pse_admin_state c33_admin_state;
+ enum ethtool_c33_pse_pw_d_status c33_pw_status;
+ u32 c33_pw_class;
+ u32 c33_actual_pw;
+ struct ethtool_c33_pse_ext_state_info c33_ext_state_info;
+ u32 c33_avail_pw_limit;
+ struct ethtool_c33_pse_pw_limit_range *c33_pw_limit_ranges;
+ u32 c33_pw_limit_nb_ranges;
+ u32 prio_max;
+ u32 prio;
+};
+
+/**
+ * struct pse_controller_ops - PSE controller driver callbacks
+ *
+ * @setup_pi_matrix: Setup PI matrix of the PSE controller.
+ * The PSE PIs devicetree nodes have already been parsed by
+ * of_load_pse_pis() and the pcdev->pi[x]->pairset[y].np
+ * populated. This callback should establish the
+ * relationship between the PSE controller hardware ports
+ * and the PSE Power Interfaces, either through software
+ * mapping or hardware configuration.
+ * @pi_get_admin_state: Get the operational state of the PSE PI. This ops
+ * is mandatory.
+ * @pi_get_pw_status: Get the power detection status of the PSE PI. This
+ * ops is mandatory.
+ * @pi_get_ext_state: Get the extended state of the PSE PI.
+ * @pi_get_pw_class: Get the power class of the PSE PI.
+ * @pi_get_actual_pw: Get actual power of the PSE PI in mW.
+ * @pi_enable: Configure the PSE PI as enabled.
+ * @pi_disable: Configure the PSE PI as disabled.
+ * @pi_get_voltage: Return voltage similarly to get_voltage regulator
+ * callback in uV.
+ * @pi_get_pw_limit: Get the configured power limit of the PSE PI in mW.
+ * @pi_set_pw_limit: Configure the power limit of the PSE PI in mW.
+ * @pi_get_pw_limit_ranges: Get the supported power limit configuration
+ * range. The driver is in charge of the memory
+ * allocation and should return the number of
+ * ranges.
+ * @pi_get_prio: Get the PSE PI priority.
+ * @pi_set_prio: Configure the PSE PI priority.
+ * @pi_get_pw_req: Get the power requested by a PD before enabling the PSE PI.
+ * This is only relevant when an interrupt is registered using
+ * devm_pse_irq_helper helper.
+ */
+struct pse_controller_ops {
+ int (*setup_pi_matrix)(struct pse_controller_dev *pcdev);
+ int (*pi_get_admin_state)(struct pse_controller_dev *pcdev, int id,
+ struct pse_admin_state *admin_state);
+ int (*pi_get_pw_status)(struct pse_controller_dev *pcdev, int id,
+ struct pse_pw_status *pw_status);
+ int (*pi_get_ext_state)(struct pse_controller_dev *pcdev, int id,
+ struct pse_ext_state_info *ext_state_info);
+ int (*pi_get_pw_class)(struct pse_controller_dev *pcdev, int id);
+ int (*pi_get_actual_pw)(struct pse_controller_dev *pcdev, int id);
+ int (*pi_enable)(struct pse_controller_dev *pcdev, int id);
+ int (*pi_disable)(struct pse_controller_dev *pcdev, int id);
+ int (*pi_get_voltage)(struct pse_controller_dev *pcdev, int id);
+ int (*pi_get_pw_limit)(struct pse_controller_dev *pcdev,
+ int id);
+ int (*pi_set_pw_limit)(struct pse_controller_dev *pcdev,
+ int id, int max_mW);
+ int (*pi_get_pw_limit_ranges)(struct pse_controller_dev *pcdev, int id,
+ struct pse_pw_limit_ranges *pw_limit_ranges);
+ int (*pi_get_prio)(struct pse_controller_dev *pcdev, int id);
+ int (*pi_set_prio)(struct pse_controller_dev *pcdev, int id,
+ unsigned int prio);
+ int (*pi_get_pw_req)(struct pse_controller_dev *pcdev, int id);
+};
+
+struct module;
+struct device_node;
+struct of_phandle_args;
+struct pse_control;
+struct ethtool_pse_control_status;
+
+/* PSE PI pairset pinout can either be Alternative A or Alternative B */
+enum pse_pi_pairset_pinout {
+ ALTERNATIVE_A,
+ ALTERNATIVE_B,
+};
+
+/**
+ * struct pse_pi_pairset - PSE PI pairset entity describing the pinout
+ * alternative ant its phandle
+ *
+ * @pinout: description of the pinout alternative
+ * @np: device node pointer describing the pairset phandle
+ */
+struct pse_pi_pairset {
+ enum pse_pi_pairset_pinout pinout;
+ struct device_node *np;
+};
+
+/**
+ * struct pse_pi - PSE PI (Power Interface) entity as described in
+ * IEEE 802.3-2022 145.2.4
+ *
+ * @pairset: table of the PSE PI pinout alternative for the two pairset
+ * @np: device node pointer of the PSE PI node
+ * @rdev: regulator represented by the PSE PI
+ * @admin_state_enabled: PI enabled state
+ * @pw_d: Power domain of the PSE PI
+ * @prio: Priority of the PSE PI. Used in static budget evaluation strategy
+ * @isr_pd_detected: PSE PI detection status managed by the interruption
+ * handler. This variable is relevant when the power enabled
+ * management is managed in software like the static
+ * budget evaluation strategy.
+ * @pw_allocated_mW: Power allocated to a PSE PI to manage power budget in
+ * static budget evaluation strategy.
+ */
+struct pse_pi {
+ struct pse_pi_pairset pairset[2];
+ struct device_node *np;
+ struct regulator_dev *rdev;
+ bool admin_state_enabled;
+ struct pse_power_domain *pw_d;
+ int prio;
+ bool isr_pd_detected;
+ int pw_allocated_mW;
+};
+
+/**
+ * struct pse_ntf - PSE notification element
+ *
+ * @id: ID of the PSE control
+ * @notifs: PSE notifications to be reported
+ */
+struct pse_ntf {
+ int id;
+ unsigned long notifs;
+};
+
+/**
+ * struct pse_controller_dev - PSE controller entity that might
+ * provide multiple PSE controls
+ * @ops: a pointer to device specific struct pse_controller_ops
+ * @owner: kernel module of the PSE controller driver
+ * @list: internal list of PSE controller devices
+ * @pse_control_head: head of internal list of requested PSE controls
+ * @dev: corresponding driver model device struct
+ * @of_pse_n_cells: number of cells in PSE line specifiers
+ * @nr_lines: number of PSE controls in this controller device
+ * @lock: Mutex for serialization access to the PSE controller
+ * @types: types of the PSE controller
+ * @pi: table of PSE PIs described in this controller device
+ * @no_of_pse_pi: flag set if the pse_pis devicetree node is not used
+ * @irq: PSE interrupt
+ * @pis_prio_max: Maximum value allowed for the PSE PIs priority
+ * @supp_budget_eval_strategies: budget evaluation strategies supported
+ * by the PSE
+ * @ntf_work: workqueue for PSE notification management
+ * @ntf_fifo: PSE notifications FIFO
+ * @ntf_fifo_lock: protect @ntf_fifo writer
+ */
+struct pse_controller_dev {
+ const struct pse_controller_ops *ops;
+ struct module *owner;
+ struct list_head list;
+ struct list_head pse_control_head;
+ struct device *dev;
+ int of_pse_n_cells;
+ unsigned int nr_lines;
+ struct mutex lock;
+ enum ethtool_pse_types types;
+ struct pse_pi *pi;
+ bool no_of_pse_pi;
+ int irq;
+ unsigned int pis_prio_max;
+ u32 supp_budget_eval_strategies;
+ struct work_struct ntf_work;
+ DECLARE_KFIFO_PTR(ntf_fifo, struct pse_ntf);
+ spinlock_t ntf_fifo_lock; /* Protect @ntf_fifo writer */
+};
+
+/**
+ * enum pse_budget_eval_strategies - PSE budget evaluation strategies.
+ * @PSE_BUDGET_EVAL_STRAT_DISABLED: Budget evaluation strategy disabled.
+ * @PSE_BUDGET_EVAL_STRAT_STATIC: PSE static budget evaluation strategy.
+ * Budget evaluation strategy based on the power requested during PD
+ * classification. This strategy is managed by the PSE core.
+ * @PSE_BUDGET_EVAL_STRAT_DYNAMIC: PSE dynamic budget evaluation
+ * strategy. Budget evaluation strategy based on the current consumption
+ * per ports compared to the total power budget. This mode is managed by
+ * the PSE controller.
+ */
+
+enum pse_budget_eval_strategies {
+ PSE_BUDGET_EVAL_STRAT_DISABLED = 1 << 0,
+ PSE_BUDGET_EVAL_STRAT_STATIC = 1 << 1,
+ PSE_BUDGET_EVAL_STRAT_DYNAMIC = 1 << 2,
+};
+
+#if IS_ENABLED(CONFIG_PSE_CONTROLLER)
+int pse_controller_register(struct pse_controller_dev *pcdev);
+void pse_controller_unregister(struct pse_controller_dev *pcdev);
+struct device;
+int devm_pse_controller_register(struct device *dev,
+ struct pse_controller_dev *pcdev);
+int devm_pse_irq_helper(struct pse_controller_dev *pcdev, int irq,
+ int irq_flags, const struct pse_irq_desc *d);
+
+struct pse_control *of_pse_control_get(struct device_node *node,
+ struct phy_device *phydev);
+void pse_control_put(struct pse_control *psec);
+
+int pse_ethtool_get_status(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ struct ethtool_pse_control_status *status);
+int pse_ethtool_set_config(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ const struct pse_control_config *config);
+int pse_ethtool_set_pw_limit(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ const unsigned int pw_limit);
+int pse_ethtool_set_prio(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ unsigned int prio);
+
+bool pse_has_podl(struct pse_control *psec);
+bool pse_has_c33(struct pse_control *psec);
+
+#else
+
+static inline struct pse_control *of_pse_control_get(struct device_node *node,
+ struct phy_device *phydev)
+{
+ return ERR_PTR(-ENOENT);
+}
+
+static inline void pse_control_put(struct pse_control *psec)
+{
+}
+
+static inline int pse_ethtool_get_status(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ struct ethtool_pse_control_status *status)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int pse_ethtool_set_config(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ const struct pse_control_config *config)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int pse_ethtool_set_pw_limit(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ const unsigned int pw_limit)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int pse_ethtool_set_prio(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ unsigned int prio)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool pse_has_podl(struct pse_control *psec)
+{
+ return false;
+}
+
+static inline bool pse_has_c33(struct pse_control *psec)
+{
+ return false;
+}
+
+#endif
+
+#endif
diff --git a/include/linux/pseudo_fs.h b/include/linux/pseudo_fs.h
index eceda1d1407a..a651e60d9410 100644
--- a/include/linux/pseudo_fs.h
+++ b/include/linux/pseudo_fs.h
@@ -5,9 +5,11 @@
struct pseudo_fs_context {
const struct super_operations *ops;
- const struct xattr_handler **xattr;
+ const struct export_operations *eops;
+ const struct xattr_handler * const *xattr;
const struct dentry_operations *dops;
unsigned long magic;
+ unsigned int s_d_flags;
};
struct pseudo_fs_context *init_pseudo(struct fs_context *fc,
diff --git a/include/linux/psi.h b/include/linux/psi.h
index 65eb1476ac70..e0745873e3f2 100644
--- a/include/linux/psi.h
+++ b/include/linux/psi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PSI_H
#define _LINUX_PSI_H
@@ -5,6 +6,8 @@
#include <linux/psi_types.h>
#include <linux/sched.h>
#include <linux/poll.h>
+#include <linux/cgroup-defs.h>
+#include <linux/cgroup.h>
struct seq_file;
struct css_set;
@@ -16,26 +19,28 @@ extern struct psi_group psi_system;
void psi_init(void);
-void psi_task_change(struct task_struct *task, int clear, int set);
-void psi_task_switch(struct task_struct *prev, struct task_struct *next,
- bool sleep);
-
void psi_memstall_enter(unsigned long *flags);
void psi_memstall_leave(unsigned long *flags);
int psi_show(struct seq_file *s, struct psi_group *group, enum psi_res res);
+struct psi_trigger *psi_trigger_create(struct psi_group *group, char *buf,
+ enum psi_res res, struct file *file,
+ struct kernfs_open_file *of);
+void psi_trigger_destroy(struct psi_trigger *t);
+
+__poll_t psi_trigger_poll(void **trigger_ptr, struct file *file,
+ poll_table *wait);
#ifdef CONFIG_CGROUPS
+static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
+{
+ return cgroup_ino(cgrp) == 1 ? &psi_system : cgrp->psi;
+}
+
int psi_cgroup_alloc(struct cgroup *cgrp);
void psi_cgroup_free(struct cgroup *cgrp);
void cgroup_move_task(struct task_struct *p, struct css_set *to);
-
-struct psi_trigger *psi_trigger_create(struct psi_group *group,
- char *buf, size_t nbytes, enum psi_res res);
-void psi_trigger_replace(void **trigger_ptr, struct psi_trigger *t);
-
-__poll_t psi_trigger_poll(void **trigger_ptr, struct file *file,
- poll_table *wait);
+void psi_cgroup_restart(struct psi_group *group);
#endif
#else /* CONFIG_PSI */
@@ -57,6 +62,7 @@ static inline void cgroup_move_task(struct task_struct *p, struct css_set *to)
{
rcu_assign_pointer(p->cgroups, to);
}
+static inline void psi_cgroup_restart(struct psi_group *group) {}
#endif
#endif /* CONFIG_PSI */
diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h
index 0a23300d49af..dd10c22299ab 100644
--- a/include/linux/psi_types.h
+++ b/include/linux/psi_types.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PSI_TYPES_H
#define _LINUX_PSI_TYPES_H
@@ -15,12 +16,15 @@ enum psi_task_count {
NR_MEMSTALL,
NR_RUNNING,
/*
- * This can't have values other than 0 or 1 and could be
- * implemented as a bit flag. But for now we still have room
- * in the first cacheline of psi_group_cpu, and this way we
- * don't have to special case any state tracking for it.
+ * For IO and CPU stalls the presence of running/oncpu tasks
+ * in the domain means a partial rather than a full stall.
+ * For memory it's not so simple because of page reclaimers:
+ * they are running/oncpu while representing a stall. To tell
+ * whether a domain has productivity left or not, we need to
+ * distinguish between regular running (i.e. productive)
+ * threads and memstall ones.
*/
- NR_ONCPU,
+ NR_MEMSTALL_RUNNING,
NR_PSI_TASK_COUNTS = 4,
};
@@ -28,14 +32,20 @@ enum psi_task_count {
#define TSK_IOWAIT (1 << NR_IOWAIT)
#define TSK_MEMSTALL (1 << NR_MEMSTALL)
#define TSK_RUNNING (1 << NR_RUNNING)
-#define TSK_ONCPU (1 << NR_ONCPU)
+#define TSK_MEMSTALL_RUNNING (1 << NR_MEMSTALL_RUNNING)
+
+/* Only one task can be scheduled, no corresponding task count */
+#define TSK_ONCPU (1 << NR_PSI_TASK_COUNTS)
/* Resources that workloads could be stalled on */
enum psi_res {
PSI_IO,
PSI_MEM,
PSI_CPU,
- NR_PSI_RESOURCES = 3,
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+ PSI_IRQ,
+#endif
+ NR_PSI_RESOURCES,
};
/*
@@ -51,11 +61,20 @@ enum psi_states {
PSI_MEM_FULL,
PSI_CPU_SOME,
PSI_CPU_FULL,
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+ PSI_IRQ_FULL,
+#endif
/* Only per-CPU, to weigh the CPU in the global average: */
PSI_NONIDLE,
- NR_PSI_STATES = 7,
+ NR_PSI_STATES,
};
+/* Use one bit in the state mask to track TSK_ONCPU */
+#define PSI_ONCPU (1 << NR_PSI_STATES)
+
+/* Flag whether to re-arm avgs_work, see details in get_recent_times() */
+#define PSI_STATE_RESCHEDULE (1 << (NR_PSI_STATES + 1))
+
enum psi_aggregators {
PSI_AVGS = 0,
PSI_POLL,
@@ -65,11 +84,9 @@ enum psi_aggregators {
struct psi_group_cpu {
/* 1st cacheline updated by the scheduler */
- /* Aggregator needs to know of concurrent changes */
- seqcount_t seq ____cacheline_aligned_in_smp;
-
/* States of the tasks belonging to this group */
- unsigned int tasks[NR_PSI_TASK_COUNTS];
+ unsigned int tasks[NR_PSI_TASK_COUNTS]
+ ____cacheline_aligned_in_smp;
/* Aggregate pressure state derived from the tasks */
u32 state_mask;
@@ -118,6 +135,9 @@ struct psi_trigger {
/* Wait queue for polling */
wait_queue_head_t event_wait;
+ /* Kernfs file for cgroup triggers */
+ struct kernfs_open_file *of;
+
/* Pending event flag */
int event;
@@ -130,11 +150,17 @@ struct psi_trigger {
*/
u64 last_event_time;
- /* Refcounting to prevent premature destruction */
- struct kref refcount;
+ /* Deferred event(s) from previous ratelimit window */
+ bool pending_event;
+
+ /* Trigger type - PSI_AVGS for unprivileged, PSI_POLL for RT */
+ enum psi_aggregators aggregator;
};
struct psi_group {
+ struct psi_group *parent;
+ bool enabled;
+
/* Protects data used by the aggregator */
struct mutex avgs_lock;
@@ -149,33 +175,40 @@ struct psi_group {
/* Aggregator work control */
struct delayed_work avgs_work;
+ /* Unprivileged triggers against N*PSI_FREQ windows */
+ struct list_head avg_triggers;
+ u32 avg_nr_triggers[NR_PSI_STATES - 1];
+
/* Total stall times and sampled pressure averages */
u64 total[NR_PSI_AGGREGATORS][NR_PSI_STATES - 1];
unsigned long avg[NR_PSI_STATES - 1][3];
- /* Monitor work control */
- struct task_struct __rcu *poll_task;
- struct timer_list poll_timer;
- wait_queue_head_t poll_wait;
- atomic_t poll_wakeup;
+ /* Monitor RT polling work control */
+ struct task_struct __rcu *rtpoll_task;
+ struct timer_list rtpoll_timer;
+ wait_queue_head_t rtpoll_wait;
+ atomic_t rtpoll_wakeup;
+ atomic_t rtpoll_scheduled;
/* Protects data used by the monitor */
- struct mutex trigger_lock;
-
- /* Configured polling triggers */
- struct list_head triggers;
- u32 nr_triggers[NR_PSI_STATES - 1];
- u32 poll_states;
- u64 poll_min_period;
-
- /* Total stall times at the start of monitor activation */
- u64 polling_total[NR_PSI_STATES - 1];
- u64 polling_next_update;
- u64 polling_until;
+ struct mutex rtpoll_trigger_lock;
+
+ /* Configured RT polling triggers */
+ struct list_head rtpoll_triggers;
+ u32 rtpoll_nr_triggers[NR_PSI_STATES - 1];
+ u32 rtpoll_states;
+ u64 rtpoll_min_period;
+
+ /* Total stall times at the start of RT polling monitor activation */
+ u64 rtpoll_total[NR_PSI_STATES - 1];
+ u64 rtpoll_next_update;
+ u64 rtpoll_until;
};
#else /* CONFIG_PSI */
+#define NR_PSI_RESOURCES 0
+
struct psi_group { };
#endif /* CONFIG_PSI */
diff --git a/include/linux/psp-platform-access.h b/include/linux/psp-platform-access.h
new file mode 100644
index 000000000000..540abf7de048
--- /dev/null
+++ b/include/linux/psp-platform-access.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __PSP_PLATFORM_ACCESS_H
+#define __PSP_PLATFORM_ACCESS_H
+
+#include <linux/psp.h>
+
+enum psp_platform_access_msg {
+ PSP_CMD_NONE = 0x0,
+ PSP_SFS_GET_FW_VERSIONS,
+ PSP_SFS_UPDATE,
+ PSP_CMD_HSTI_QUERY = 0x14,
+ PSP_I2C_REQ_BUS_CMD = 0x64,
+ PSP_DYNAMIC_BOOST_GET_NONCE,
+ PSP_DYNAMIC_BOOST_SET_UID,
+ PSP_DYNAMIC_BOOST_GET_PARAMETER,
+ PSP_DYNAMIC_BOOST_SET_PARAMETER,
+};
+
+struct psp_req_buffer_hdr {
+ u32 payload_size;
+ u32 status;
+} __packed;
+
+struct psp_request {
+ struct psp_req_buffer_hdr header;
+ void *buf;
+} __packed;
+
+/**
+ * psp_send_platform_access_msg() - Send a message to control platform features
+ *
+ * This function is intended to be used by drivers outside of ccp to communicate
+ * with the platform.
+ *
+ * Returns:
+ * 0: success
+ * -%EBUSY: mailbox in recovery or in use
+ * -%ENODEV: driver not bound with PSP device
+ * -%ETIMEDOUT: request timed out
+ * -%EIO: unknown error (see kernel log)
+ */
+int psp_send_platform_access_msg(enum psp_platform_access_msg, struct psp_request *req);
+
+/**
+ * psp_ring_platform_doorbell() - Ring platform doorbell
+ *
+ * This function is intended to be used by drivers outside of ccp to ring the
+ * platform doorbell with a message.
+ *
+ * Returns:
+ * 0: success
+ * -%EBUSY: mailbox in recovery or in use
+ * -%ENODEV: driver not bound with PSP device
+ * -%ETIMEDOUT: request timed out
+ * -%EIO: error will be stored in result argument
+ */
+int psp_ring_platform_doorbell(int msg, u32 *result);
+
+/**
+ * psp_check_platform_access_status() - Checks whether platform features is ready
+ *
+ * This function is intended to be used by drivers outside of ccp to determine
+ * if platform features has initialized.
+ *
+ * Returns:
+ * 0 platform features is ready
+ * -%ENODEV platform features is not ready or present
+ */
+int psp_check_platform_access_status(void);
+
+#endif /* __PSP_PLATFORM_ACCESS_H */
diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h
index d48a7192e881..69ffa4b4d1fa 100644
--- a/include/linux/psp-sev.h
+++ b/include/linux/psp-sev.h
@@ -14,13 +14,38 @@
#include <uapi/linux/psp-sev.h>
-#ifdef CONFIG_X86
-#include <linux/mem_encrypt.h>
+/* As defined by SEV API, under "Guest Policy". */
+#define SEV_POLICY_MASK_NODBG BIT(0)
+#define SEV_POLICY_MASK_NOKS BIT(1)
+#define SEV_POLICY_MASK_ES BIT(2)
+#define SEV_POLICY_MASK_NOSEND BIT(3)
+#define SEV_POLICY_MASK_DOMAIN BIT(4)
+#define SEV_POLICY_MASK_SEV BIT(5)
+#define SEV_POLICY_MASK_API_MAJOR GENMASK(23, 16)
+#define SEV_POLICY_MASK_API_MINOR GENMASK(31, 24)
-#define __psp_pa(x) __sme_pa(x)
-#else
-#define __psp_pa(x) __pa(x)
-#endif
+/* As defined by SEV-SNP Firmware ABI, under "Guest Policy". */
+#define SNP_POLICY_MASK_API_MINOR GENMASK_ULL(7, 0)
+#define SNP_POLICY_MASK_API_MAJOR GENMASK_ULL(15, 8)
+#define SNP_POLICY_MASK_SMT BIT_ULL(16)
+#define SNP_POLICY_MASK_RSVD_MBO BIT_ULL(17)
+#define SNP_POLICY_MASK_MIGRATE_MA BIT_ULL(18)
+#define SNP_POLICY_MASK_DEBUG BIT_ULL(19)
+#define SNP_POLICY_MASK_SINGLE_SOCKET BIT_ULL(20)
+#define SNP_POLICY_MASK_CXL_ALLOW BIT_ULL(21)
+#define SNP_POLICY_MASK_MEM_AES_256_XTS BIT_ULL(22)
+#define SNP_POLICY_MASK_RAPL_DIS BIT_ULL(23)
+#define SNP_POLICY_MASK_CIPHERTEXT_HIDING_DRAM BIT_ULL(24)
+#define SNP_POLICY_MASK_PAGE_SWAP_DISABLE BIT_ULL(25)
+
+/* Base SEV-SNP policy bitmask for minimum supported SEV firmware version */
+#define SNP_POLICY_MASK_BASE (SNP_POLICY_MASK_API_MINOR | \
+ SNP_POLICY_MASK_API_MAJOR | \
+ SNP_POLICY_MASK_SMT | \
+ SNP_POLICY_MASK_RSVD_MBO | \
+ SNP_POLICY_MASK_MIGRATE_MA | \
+ SNP_POLICY_MASK_DEBUG | \
+ SNP_POLICY_MASK_SINGLE_SOCKET)
#define SEV_FW_BLOB_MAX_SIZE 0x4000 /* 16KB */
@@ -52,6 +77,7 @@ enum sev_cmd {
SEV_CMD_DF_FLUSH = 0x00A,
SEV_CMD_DOWNLOAD_FIRMWARE = 0x00B,
SEV_CMD_GET_ID = 0x00C,
+ SEV_CMD_INIT_EX = 0x00D,
/* Guest commands */
SEV_CMD_DECOMMISSION = 0x020,
@@ -85,6 +111,44 @@ enum sev_cmd {
SEV_CMD_DBG_DECRYPT = 0x060,
SEV_CMD_DBG_ENCRYPT = 0x061,
+ /* SNP specific commands */
+ SEV_CMD_SNP_INIT = 0x081,
+ SEV_CMD_SNP_SHUTDOWN = 0x082,
+ SEV_CMD_SNP_PLATFORM_STATUS = 0x083,
+ SEV_CMD_SNP_DF_FLUSH = 0x084,
+ SEV_CMD_SNP_INIT_EX = 0x085,
+ SEV_CMD_SNP_SHUTDOWN_EX = 0x086,
+ SEV_CMD_SNP_DECOMMISSION = 0x090,
+ SEV_CMD_SNP_ACTIVATE = 0x091,
+ SEV_CMD_SNP_GUEST_STATUS = 0x092,
+ SEV_CMD_SNP_GCTX_CREATE = 0x093,
+ SEV_CMD_SNP_GUEST_REQUEST = 0x094,
+ SEV_CMD_SNP_ACTIVATE_EX = 0x095,
+ SEV_CMD_SNP_LAUNCH_START = 0x0A0,
+ SEV_CMD_SNP_LAUNCH_UPDATE = 0x0A1,
+ SEV_CMD_SNP_LAUNCH_FINISH = 0x0A2,
+ SEV_CMD_SNP_DBG_DECRYPT = 0x0B0,
+ SEV_CMD_SNP_DBG_ENCRYPT = 0x0B1,
+ SEV_CMD_SNP_PAGE_SWAP_OUT = 0x0C0,
+ SEV_CMD_SNP_PAGE_SWAP_IN = 0x0C1,
+ SEV_CMD_SNP_PAGE_MOVE = 0x0C2,
+ SEV_CMD_SNP_PAGE_MD_INIT = 0x0C3,
+ SEV_CMD_SNP_PAGE_SET_STATE = 0x0C6,
+ SEV_CMD_SNP_PAGE_RECLAIM = 0x0C7,
+ SEV_CMD_SNP_PAGE_UNSMASH = 0x0C8,
+ SEV_CMD_SNP_CONFIG = 0x0C9,
+ SEV_CMD_SNP_DOWNLOAD_FIRMWARE_EX = 0x0CA,
+ SEV_CMD_SNP_COMMIT = 0x0CB,
+ SEV_CMD_SNP_VLEK_LOAD = 0x0CD,
+ SEV_CMD_SNP_FEATURE_INFO = 0x0CE,
+
+ /* SEV-TIO commands */
+ SEV_CMD_TIO_STATUS = 0x0D0,
+ SEV_CMD_TIO_INIT = 0x0D1,
+ SEV_CMD_TIO_DEV_CREATE = 0x0D2,
+ SEV_CMD_TIO_DEV_RECLAIM = 0x0D3,
+ SEV_CMD_TIO_DEV_CONNECT = 0x0D4,
+ SEV_CMD_TIO_DEV_DISCONNECT = 0x0D5,
SEV_CMD_MAX,
};
@@ -102,6 +166,26 @@ struct sev_data_init {
u32 tmr_len; /* In */
} __packed;
+/**
+ * struct sev_data_init_ex - INIT_EX command parameters
+ *
+ * @length: len of the command buffer read by the PSP
+ * @flags: processing flags
+ * @tmr_address: system physical address used for SEV-ES
+ * @tmr_len: len of tmr_address
+ * @nv_address: system physical address used for PSP NV storage
+ * @nv_len: len of nv_address
+ */
+struct sev_data_init_ex {
+ u32 length; /* In */
+ u32 flags; /* In */
+ u64 tmr_address; /* In */
+ u32 tmr_len; /* In */
+ u32 reserved; /* In */
+ u64 nv_address; /* In/Out */
+ u32 nv_len; /* In */
+} __packed;
+
#define SEV_INIT_FLAGS_SEV_ES 0x01
/**
@@ -510,12 +594,326 @@ struct sev_data_attestation_report {
u32 len; /* In/Out */
} __packed;
+/**
+ * struct sev_data_snp_download_firmware - SNP_DOWNLOAD_FIRMWARE command params
+ *
+ * @address: physical address of firmware image
+ * @len: length of the firmware image
+ */
+struct sev_data_snp_download_firmware {
+ u64 address; /* In */
+ u32 len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_snp_activate - SNP_ACTIVATE command params
+ *
+ * @gctx_paddr: system physical address guest context page
+ * @asid: ASID to bind to the guest
+ */
+struct sev_data_snp_activate {
+ u64 gctx_paddr; /* In */
+ u32 asid; /* In */
+} __packed;
+
+/**
+ * struct sev_data_snp_addr - generic SNP command params
+ *
+ * @address: physical address of generic data param
+ */
+struct sev_data_snp_addr {
+ u64 address; /* In/Out */
+} __packed;
+
+/**
+ * struct sev_data_snp_launch_start - SNP_LAUNCH_START command params
+ *
+ * @gctx_paddr: system physical address of guest context page
+ * @policy: guest policy
+ * @ma_gctx_paddr: system physical address of migration agent
+ * @ma_en: the guest is associated with a migration agent
+ * @imi_en: launch flow is launching an IMI (Incoming Migration Image) for the
+ * purpose of guest-assisted migration.
+ * @rsvd: reserved
+ * @desired_tsc_khz: hypervisor desired mean TSC freq in kHz of the guest
+ * @gosvw: guest OS-visible workarounds, as defined by hypervisor
+ */
+struct sev_data_snp_launch_start {
+ u64 gctx_paddr; /* In */
+ u64 policy; /* In */
+ u64 ma_gctx_paddr; /* In */
+ u32 ma_en:1; /* In */
+ u32 imi_en:1; /* In */
+ u32 rsvd:30;
+ u32 desired_tsc_khz; /* In */
+ u8 gosvw[16]; /* In */
+} __packed;
+
+/* SNP support page type */
+enum {
+ SNP_PAGE_TYPE_NORMAL = 0x1,
+ SNP_PAGE_TYPE_VMSA = 0x2,
+ SNP_PAGE_TYPE_ZERO = 0x3,
+ SNP_PAGE_TYPE_UNMEASURED = 0x4,
+ SNP_PAGE_TYPE_SECRET = 0x5,
+ SNP_PAGE_TYPE_CPUID = 0x6,
+
+ SNP_PAGE_TYPE_MAX
+};
+
+/**
+ * struct sev_data_snp_launch_update - SNP_LAUNCH_UPDATE command params
+ *
+ * @gctx_paddr: system physical address of guest context page
+ * @page_size: page size 0 indicates 4K and 1 indicates 2MB page
+ * @page_type: encoded page type
+ * @imi_page: indicates that this page is part of the IMI (Incoming Migration
+ * Image) of the guest
+ * @rsvd: reserved
+ * @rsvd2: reserved
+ * @address: system physical address of destination page to encrypt
+ * @rsvd3: reserved
+ * @vmpl1_perms: VMPL permission mask for VMPL1
+ * @vmpl2_perms: VMPL permission mask for VMPL2
+ * @vmpl3_perms: VMPL permission mask for VMPL3
+ * @rsvd4: reserved
+ */
+struct sev_data_snp_launch_update {
+ u64 gctx_paddr; /* In */
+ u32 page_size:1; /* In */
+ u32 page_type:3; /* In */
+ u32 imi_page:1; /* In */
+ u32 rsvd:27;
+ u32 rsvd2;
+ u64 address; /* In */
+ u32 rsvd3:8;
+ u32 vmpl1_perms:8; /* In */
+ u32 vmpl2_perms:8; /* In */
+ u32 vmpl3_perms:8; /* In */
+ u32 rsvd4;
+} __packed;
+
+/**
+ * struct sev_data_snp_launch_finish - SNP_LAUNCH_FINISH command params
+ *
+ * @gctx_paddr: system physical address of guest context page
+ * @id_block_paddr: system physical address of ID block
+ * @id_auth_paddr: system physical address of ID block authentication structure
+ * @id_block_en: indicates whether ID block is present
+ * @auth_key_en: indicates whether author key is present in authentication structure
+ * @vcek_disabled: indicates whether use of VCEK is allowed for attestation reports
+ * @rsvd: reserved
+ * @host_data: host-supplied data for guest, not interpreted by firmware
+ */
+struct sev_data_snp_launch_finish {
+ u64 gctx_paddr;
+ u64 id_block_paddr;
+ u64 id_auth_paddr;
+ u8 id_block_en:1;
+ u8 auth_key_en:1;
+ u8 vcek_disabled:1;
+ u64 rsvd:61;
+ u8 host_data[32];
+} __packed;
+
+/**
+ * struct sev_data_snp_guest_status - SNP_GUEST_STATUS command params
+ *
+ * @gctx_paddr: system physical address of guest context page
+ * @address: system physical address of guest status page
+ */
+struct sev_data_snp_guest_status {
+ u64 gctx_paddr;
+ u64 address;
+} __packed;
+
+/**
+ * struct sev_data_snp_page_reclaim - SNP_PAGE_RECLAIM command params
+ *
+ * @paddr: system physical address of page to be claimed. The 0th bit in the
+ * address indicates the page size. 0h indicates 4KB and 1h indicates
+ * 2MB page.
+ */
+struct sev_data_snp_page_reclaim {
+ u64 paddr;
+} __packed;
+
+/**
+ * struct sev_data_snp_page_unsmash - SNP_PAGE_UNSMASH command params
+ *
+ * @paddr: system physical address of page to be unsmashed. The 0th bit in the
+ * address indicates the page size. 0h indicates 4 KB and 1h indicates
+ * 2 MB page.
+ */
+struct sev_data_snp_page_unsmash {
+ u64 paddr;
+} __packed;
+
+/**
+ * struct sev_data_snp_dbg - DBG_ENCRYPT/DBG_DECRYPT command parameters
+ *
+ * @gctx_paddr: system physical address of guest context page
+ * @src_addr: source address of data to operate on
+ * @dst_addr: destination address of data to operate on
+ */
+struct sev_data_snp_dbg {
+ u64 gctx_paddr; /* In */
+ u64 src_addr; /* In */
+ u64 dst_addr; /* In */
+} __packed;
+
+/**
+ * struct sev_data_snp_guest_request - SNP_GUEST_REQUEST command params
+ *
+ * @gctx_paddr: system physical address of guest context page
+ * @req_paddr: system physical address of request page
+ * @res_paddr: system physical address of response page
+ */
+struct sev_data_snp_guest_request {
+ u64 gctx_paddr; /* In */
+ u64 req_paddr; /* In */
+ u64 res_paddr; /* In */
+} __packed;
+
+/**
+ * struct sev_data_snp_init_ex - SNP_INIT_EX structure
+ *
+ * @init_rmp: indicate that the RMP should be initialized.
+ * @list_paddr_en: indicate that list_paddr is valid
+ * @rsvd: reserved
+ * @rsvd1: reserved
+ * @list_paddr: system physical address of range list
+ * @rsvd2: reserved
+ */
+struct sev_data_snp_init_ex {
+ u32 init_rmp:1;
+ u32 list_paddr_en:1;
+ u32 rapl_dis:1;
+ u32 ciphertext_hiding_en:1;
+ u32 tio_en:1;
+ u32 rsvd:27;
+ u32 rsvd1;
+ u64 list_paddr;
+ u16 max_snp_asid;
+ u8 rsvd2[46];
+} __packed;
+
+/**
+ * struct sev_data_range - RANGE structure
+ *
+ * @base: system physical address of first byte of range
+ * @page_count: number of 4KB pages in this range
+ * @rsvd: reserved
+ */
+struct sev_data_range {
+ u64 base;
+ u32 page_count;
+ u32 rsvd;
+} __packed;
+
+/**
+ * struct sev_data_range_list - RANGE_LIST structure
+ *
+ * @num_elements: number of elements in RANGE_ARRAY
+ * @rsvd: reserved
+ * @ranges: array of num_elements of type RANGE
+ */
+struct sev_data_range_list {
+ u32 num_elements;
+ u32 rsvd;
+ struct sev_data_range ranges[];
+} __packed;
+
+/**
+ * struct sev_data_snp_shutdown_ex - SNP_SHUTDOWN_EX structure
+ *
+ * @len: length of the command buffer read by the PSP
+ * @iommu_snp_shutdown: Disable enforcement of SNP in the IOMMU
+ * @rsvd1: reserved
+ */
+struct sev_data_snp_shutdown_ex {
+ u32 len;
+ u32 iommu_snp_shutdown:1;
+ u32 rsvd1:31;
+} __packed;
+
+/**
+ * struct sev_platform_init_args
+ *
+ * @error: SEV firmware error code
+ * @probe: True if this is being called as part of CCP module probe, which
+ * will defer SEV_INIT/SEV_INIT_EX firmware initialization until needed
+ * unless psp_init_on_probe module param is set
+ * @max_snp_asid: When non-zero, enable ciphertext hiding and specify the
+ * maximum ASID that can be used for an SEV-SNP guest.
+ */
+struct sev_platform_init_args {
+ int error;
+ bool probe;
+ unsigned int max_snp_asid;
+};
+
+/**
+ * struct sev_data_snp_commit - SNP_COMMIT structure
+ *
+ * @len: length of the command buffer read by the PSP
+ */
+struct sev_data_snp_commit {
+ u32 len;
+} __packed;
+
+/**
+ * struct sev_data_snp_feature_info - SEV_SNP_FEATURE_INFO structure
+ *
+ * @length: len of the command buffer read by the PSP
+ * @ecx_in: subfunction index
+ * @feature_info_paddr : System Physical Address of the FEATURE_INFO structure
+ */
+struct sev_data_snp_feature_info {
+ u32 length;
+ u32 ecx_in;
+ u64 feature_info_paddr;
+} __packed;
+
+/**
+ * struct feature_info - FEATURE_INFO structure
+ *
+ * @eax: output of SNP_FEATURE_INFO command
+ * @ebx: output of SNP_FEATURE_INFO command
+ * @ecx: output of SNP_FEATURE_INFO command
+ * #edx: output of SNP_FEATURE_INFO command
+ */
+struct snp_feature_info {
+ u32 eax;
+ u32 ebx;
+ u32 ecx;
+ u32 edx;
+} __packed;
+
+/* Feature bits in ECX */
+#define SNP_RAPL_DISABLE_SUPPORTED BIT(2)
+#define SNP_CIPHER_TEXT_HIDING_SUPPORTED BIT(3)
+#define SNP_AES_256_XTS_POLICY_SUPPORTED BIT(4)
+#define SNP_CXL_ALLOW_POLICY_SUPPORTED BIT(5)
+
+/* Feature bits in EBX */
+#define SNP_SEV_TIO_SUPPORTED BIT(1)
+
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
/**
+ * sev_module_init - perform PSP SEV module initialization
+ *
+ * Returns:
+ * 0 if the PSP module is successfully initialized
+ * negative value if the PSP module initialization fails
+ */
+int sev_module_init(void);
+
+/**
* sev_platform_init - perform SEV INIT command
*
- * @error: SEV command return code
+ * @args: struct sev_platform_init_args to pass in arguments
*
* Returns:
* 0 if the SEV successfully processed the command
@@ -524,7 +922,7 @@ struct sev_data_attestation_report {
* -%ETIMEDOUT if the SEV command timed out
* -%EIO if the SEV returned a non-zero return code
*/
-int sev_platform_init(int *error);
+int sev_platform_init(struct sev_platform_init_args *args);
/**
* sev_platform_status - perform SEV PLATFORM_STATUS command
@@ -624,14 +1022,36 @@ int sev_guest_df_flush(int *error);
*/
int sev_guest_decommission(struct sev_data_decommission *data, int *error);
+/**
+ * sev_do_cmd - issue an SEV or an SEV-SNP command
+ *
+ * @cmd: SEV or SEV-SNP firmware command to issue
+ * @data: arguments for firmware command
+ * @psp_ret: SEV command return code
+ *
+ * Returns:
+ * 0 if the SEV device successfully processed the command
+ * -%ENODEV if the PSP device is not available
+ * -%ENOTSUPP if PSP device does not support SEV
+ * -%ETIMEDOUT if the SEV command timed out
+ * -%EIO if PSP device returned a non-zero return code
+ */
+int sev_do_cmd(int cmd, void *data, int *psp_ret);
+
void *psp_copy_user_blob(u64 uaddr, u32 len);
+void *snp_alloc_firmware_page(gfp_t mask);
+int snp_reclaim_pages(unsigned long paddr, unsigned int npages, bool locked);
+void snp_free_firmware_page(void *addr);
+void sev_platform_shutdown(void);
+bool sev_is_snp_ciphertext_hiding_supported(void);
+u64 sev_get_snp_policy_bits(void);
#else /* !CONFIG_CRYPTO_DEV_SP_PSP */
static inline int
sev_platform_status(struct sev_user_data_status *status, int *error) { return -ENODEV; }
-static inline int sev_platform_init(int *error) { return -ENODEV; }
+static inline int sev_platform_init(struct sev_platform_init_args *args) { return -ENODEV; }
static inline int
sev_guest_deactivate(struct sev_data_deactivate *data, int *error) { return -ENODEV; }
@@ -640,6 +1060,9 @@ static inline int
sev_guest_decommission(struct sev_data_decommission *data, int *error) { return -ENODEV; }
static inline int
+sev_do_cmd(int cmd, void *data, int *psp_ret) { return -ENODEV; }
+
+static inline int
sev_guest_activate(struct sev_data_activate *data, int *error) { return -ENODEV; }
static inline int sev_guest_df_flush(int *error) { return -ENODEV; }
@@ -649,6 +1072,22 @@ sev_issue_cmd_external_user(struct file *filep, unsigned int id, void *data, int
static inline void *psp_copy_user_blob(u64 __user uaddr, u32 len) { return ERR_PTR(-EINVAL); }
+static inline void *snp_alloc_firmware_page(gfp_t mask)
+{
+ return NULL;
+}
+
+static inline int snp_reclaim_pages(unsigned long paddr, unsigned int npages, bool locked)
+{
+ return -ENODEV;
+}
+
+static inline void snp_free_firmware_page(void *addr) { }
+
+static inline void sev_platform_shutdown(void) { }
+
+static inline bool sev_is_snp_ciphertext_hiding_supported(void) { return false; }
+
#endif /* CONFIG_CRYPTO_DEV_SP_PSP */
#endif /* __PSP_SEV_H__ */
diff --git a/include/linux/psp.h b/include/linux/psp.h
new file mode 100644
index 000000000000..92e60aeef21e
--- /dev/null
+++ b/include/linux/psp.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __PSP_H
+#define __PSP_H
+
+#ifdef CONFIG_X86
+#include <linux/mem_encrypt.h>
+
+#define __psp_pa(x) __sme_pa(x)
+#else
+#define __psp_pa(x) __pa(x)
+#endif
+
+/*
+ * Fields and bits used by most PSP mailboxes
+ *
+ * Note: Some mailboxes (such as SEV) have extra bits or different meanings
+ * and should include an appropriate local definition in their source file.
+ */
+#define PSP_CMDRESP_STS GENMASK(15, 0)
+#define PSP_CMDRESP_CMD GENMASK(23, 16)
+#define PSP_CMDRESP_RESERVED GENMASK(29, 24)
+#define PSP_CMDRESP_RECOVERY BIT(30)
+#define PSP_CMDRESP_RESP BIT(31)
+
+#define PSP_DRBL_MSG PSP_CMDRESP_CMD
+#define PSP_DRBL_RING BIT(0)
+
+#endif /* __PSP_H */
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index eb93a54cff31..fed601053c51 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -14,7 +14,7 @@
#include <linux/errno.h>
#include <linux/kmsg_dump.h>
#include <linux/mutex.h>
-#include <linux/semaphore.h>
+#include <linux/spinlock.h>
#include <linux/time.h>
#include <linux/types.h>
@@ -57,6 +57,9 @@ struct pstore_info;
* @size: size of @buf
* @ecc_notice_size:
* ECC information for @buf
+ * @priv: pointer for backend specific use, will be
+ * kfree()d by the pstore core if non-NULL
+ * when the record is freed.
*
* Valid for PSTORE_TYPE_DMESG @type:
*
@@ -74,6 +77,7 @@ struct pstore_record {
char *buf;
ssize_t size;
ssize_t ecc_notice_size;
+ void *priv;
int count;
enum kmsg_dump_reason reason;
@@ -87,7 +91,7 @@ struct pstore_record {
* @owner: module which is responsible for this backend driver
* @name: name of the backend driver
*
- * @buf_lock: semaphore to serialize access to @buf
+ * @buf_lock: spinlock to serialize access to @buf
* @buf: preallocated crash dump buffer
* @bufsize: size of @buf available for crash dump bytes (must match
* smallest number of bytes available for writing to a
@@ -178,7 +182,7 @@ struct pstore_info {
struct module *owner;
const char *name;
- struct semaphore buf_lock;
+ raw_spinlock_t buf_lock;
char *buf;
size_t bufsize;
diff --git a/include/linux/pstore_blk.h b/include/linux/pstore_blk.h
index 99564f93d774..924ca07aafbd 100644
--- a/include/linux/pstore_blk.h
+++ b/include/linux/pstore_blk.h
@@ -10,36 +10,15 @@
/**
* struct pstore_device_info - back-end pstore/blk driver structure.
*
- * @total_size: The total size in bytes pstore/blk can use. It must be greater
- * than 4096 and be multiple of 4096.
* @flags: Refer to macro starting with PSTORE_FLAGS defined in
* linux/pstore.h. It means what front-ends this device support.
* Zero means all backends for compatible.
- * @read: The general read operation. Both of the function parameters
- * @size and @offset are relative value to bock device (not the
- * whole disk).
- * On success, the number of bytes should be returned, others
- * means error.
- * @write: The same as @read, but the following error number:
- * -EBUSY means try to write again later.
- * -ENOMSG means to try next zone.
- * @erase: The general erase operation for device with special removing
- * job. Both of the function parameters @size and @offset are
- * relative value to storage.
- * Return 0 on success and others on failure.
- * @panic_write:The write operation only used for panic case. It's optional
- * if you do not care panic log. The parameters are relative
- * value to storage.
- * On success, the number of bytes should be returned, others
- * excluding -ENOMSG mean error. -ENOMSG means to try next zone.
+ * @zone: The struct pstore_zone_info details.
+ *
*/
struct pstore_device_info {
- unsigned long total_size;
unsigned int flags;
- pstore_zone_read_op read;
- pstore_zone_write_op write;
- pstore_zone_erase_op erase;
- pstore_zone_write_op panic_write;
+ struct pstore_zone_info zone;
};
int register_pstore_device(struct pstore_device_info *dev);
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index 9f16afec7290..9d65ff94e216 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -8,28 +8,7 @@
#ifndef __LINUX_PSTORE_RAM_H__
#define __LINUX_PSTORE_RAM_H__
-#include <linux/compiler.h>
-#include <linux/device.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
#include <linux/pstore.h>
-#include <linux/types.h>
-
-/*
- * Choose whether access to the RAM zone requires locking or not. If a zone
- * can be written to from different CPUs like with ftrace for example, then
- * PRZ_FLAG_NO_LOCK is used. For all other cases, locking is required.
- */
-#define PRZ_FLAG_NO_LOCK BIT(0)
-/*
- * If a PRZ should only have a single-boot lifetime, this marks it as
- * getting wiped after its contents get copied out after boot.
- */
-#define PRZ_FLAG_ZAP_OLD BIT(1)
-
-struct persistent_ram_buffer;
-struct rs_control;
struct persistent_ram_ecc_info {
int block_size;
@@ -39,84 +18,6 @@ struct persistent_ram_ecc_info {
uint16_t *par;
};
-/**
- * struct persistent_ram_zone - Details of a persistent RAM zone (PRZ)
- * used as a pstore backend
- *
- * @paddr: physical address of the mapped RAM area
- * @size: size of mapping
- * @label: unique name of this PRZ
- * @type: frontend type for this PRZ
- * @flags: holds PRZ_FLAGS_* bits
- *
- * @buffer_lock:
- * locks access to @buffer "size" bytes and "start" offset
- * @buffer:
- * pointer to actual RAM area managed by this PRZ
- * @buffer_size:
- * bytes in @buffer->data (not including any trailing ECC bytes)
- *
- * @par_buffer:
- * pointer into @buffer->data containing ECC bytes for @buffer->data
- * @par_header:
- * pointer into @buffer->data containing ECC bytes for @buffer header
- * (i.e. all fields up to @data)
- * @rs_decoder:
- * RSLIB instance for doing ECC calculations
- * @corrected_bytes:
- * ECC corrected bytes accounting since boot
- * @bad_blocks:
- * ECC uncorrectable bytes accounting since boot
- * @ecc_info:
- * ECC configuration details
- *
- * @old_log:
- * saved copy of @buffer->data prior to most recent wipe
- * @old_log_size:
- * bytes contained in @old_log
- *
- */
-struct persistent_ram_zone {
- phys_addr_t paddr;
- size_t size;
- void *vaddr;
- char *label;
- enum pstore_type_id type;
- u32 flags;
-
- raw_spinlock_t buffer_lock;
- struct persistent_ram_buffer *buffer;
- size_t buffer_size;
-
- char *par_buffer;
- char *par_header;
- struct rs_control *rs_decoder;
- int corrected_bytes;
- int bad_blocks;
- struct persistent_ram_ecc_info ecc_info;
-
- char *old_log;
- size_t old_log_size;
-};
-
-struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
- u32 sig, struct persistent_ram_ecc_info *ecc_info,
- unsigned int memtype, u32 flags, char *label);
-void persistent_ram_free(struct persistent_ram_zone *prz);
-void persistent_ram_zap(struct persistent_ram_zone *prz);
-
-int persistent_ram_write(struct persistent_ram_zone *prz, const void *s,
- unsigned int count);
-int persistent_ram_write_user(struct persistent_ram_zone *prz,
- const void __user *s, unsigned int count);
-
-void persistent_ram_save_old(struct persistent_ram_zone *prz);
-size_t persistent_ram_old_size(struct persistent_ram_zone *prz);
-void *persistent_ram_old(struct persistent_ram_zone *prz);
-void persistent_ram_free_old(struct persistent_ram_zone *prz);
-ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
- char *str, size_t len);
-
/*
* Ramoops platform data
* @mem_size memory size for ramoops
diff --git a/include/linux/ptdump.h b/include/linux/ptdump.h
index 2a3a95586425..240bd3bff18d 100644
--- a/include/linux/ptdump.h
+++ b/include/linux/ptdump.h
@@ -11,13 +11,30 @@ struct ptdump_range {
};
struct ptdump_state {
- /* level is 0:PGD to 4:PTE, or -1 if unknown */
- void (*note_page)(struct ptdump_state *st, unsigned long addr,
- int level, u64 val);
- void (*effective_prot)(struct ptdump_state *st, int level, u64 val);
+ void (*note_page_pte)(struct ptdump_state *st, unsigned long addr, pte_t pte);
+ void (*note_page_pmd)(struct ptdump_state *st, unsigned long addr, pmd_t pmd);
+ void (*note_page_pud)(struct ptdump_state *st, unsigned long addr, pud_t pud);
+ void (*note_page_p4d)(struct ptdump_state *st, unsigned long addr, p4d_t p4d);
+ void (*note_page_pgd)(struct ptdump_state *st, unsigned long addr, pgd_t pgd);
+ void (*note_page_flush)(struct ptdump_state *st);
+ void (*effective_prot_pte)(struct ptdump_state *st, pte_t pte);
+ void (*effective_prot_pmd)(struct ptdump_state *st, pmd_t pmd);
+ void (*effective_prot_pud)(struct ptdump_state *st, pud_t pud);
+ void (*effective_prot_p4d)(struct ptdump_state *st, p4d_t p4d);
+ void (*effective_prot_pgd)(struct ptdump_state *st, pgd_t pgd);
const struct ptdump_range *range;
};
+bool ptdump_walk_pgd_level_core(struct seq_file *m,
+ struct mm_struct *mm, pgd_t *pgd,
+ bool checkwx, bool dmesg);
void ptdump_walk_pgd(struct ptdump_state *st, struct mm_struct *mm, pgd_t *pgd);
+bool ptdump_check_wx(void);
+
+static inline void debug_checkwx(void)
+{
+ if (IS_ENABLED(CONFIG_DEBUG_WX))
+ ptdump_check_wx();
+}
#endif /* _LINUX_PTDUMP_H */
diff --git a/include/linux/pti.h b/include/linux/pti.h
index 1a941efcaa62..1fbf9d6c20ef 100644
--- a/include/linux/pti.h
+++ b/include/linux/pti.h
@@ -2,7 +2,7 @@
#ifndef _INCLUDE_PTI_H
#define _INCLUDE_PTI_H
-#ifdef CONFIG_PAGE_TABLE_ISOLATION
+#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
#include <asm/pti.h>
#else
static inline void pti_init(void) { }
diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h
index ae04968a3a47..3a74f69e0b59 100644
--- a/include/linux/ptp_classify.h
+++ b/include/linux/ptp_classify.h
@@ -10,8 +10,12 @@
#ifndef _PTP_CLASSIFY_H_
#define _PTP_CLASSIFY_H_
+#include <linux/unaligned.h>
#include <linux/ip.h>
+#include <linux/ktime.h>
#include <linux/skbuff.h>
+#include <linux/udp.h>
+#include <net/checksum.h>
#define PTP_CLASS_NONE 0x00 /* not a PTP event message */
#define PTP_CLASS_V1 0x01 /* protocol version 1 */
@@ -37,11 +41,15 @@
#define PTP_MSGTYPE_PDELAY_RESP 0x3
#define PTP_EV_PORT 319
+#define PTP_GEN_PORT 320
#define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */
#define OFF_PTP_SOURCE_UUID 22 /* PTPv1 only */
#define OFF_PTP_SEQUENCE_ID 30
+/* PTP header flag fields */
+#define PTP_FLAG_TWOSTEP BIT(1)
+
/* Below defines should actually be removed at some point in time. */
#define IP6_HLEN 40
#define UDP_HLEN 8
@@ -125,6 +133,80 @@ static inline u8 ptp_get_msgtype(const struct ptp_header *hdr,
return msgtype;
}
+/**
+ * ptp_check_diff8 - Computes new checksum (when altering a 64-bit field)
+ * @old: old field value
+ * @new: new field value
+ * @oldsum: previous checksum
+ *
+ * This function can be used to calculate a new checksum when only a single
+ * field is changed. Similar as ip_vs_check_diff*() in ip_vs.h.
+ *
+ * Return: Updated checksum
+ */
+static inline __wsum ptp_check_diff8(__be64 old, __be64 new, __wsum oldsum)
+{
+ __be64 diff[2] = { ~old, new };
+
+ return csum_partial(diff, sizeof(diff), oldsum);
+}
+
+/**
+ * ptp_header_update_correction - Update PTP header's correction field
+ * @skb: packet buffer
+ * @type: type of the packet (see ptp_classify_raw())
+ * @hdr: ptp header
+ * @correction: new correction value
+ *
+ * This updates the correction field of a PTP header and updates the UDP
+ * checksum (if UDP is used as transport). It is needed for hardware capable of
+ * one-step P2P that does not already modify the correction field of Pdelay_Req
+ * event messages on ingress.
+ */
+static inline
+void ptp_header_update_correction(struct sk_buff *skb, unsigned int type,
+ struct ptp_header *hdr, s64 correction)
+{
+ __be64 correction_old;
+ struct udphdr *uhdr;
+
+ /* previous correction value is required for checksum update. */
+ memcpy(&correction_old, &hdr->correction, sizeof(correction_old));
+
+ /* write new correction value */
+ put_unaligned_be64((u64)correction, &hdr->correction);
+
+ switch (type & PTP_CLASS_PMASK) {
+ case PTP_CLASS_IPV4:
+ case PTP_CLASS_IPV6:
+ /* locate udp header */
+ uhdr = (struct udphdr *)((char *)hdr - sizeof(struct udphdr));
+ break;
+ default:
+ return;
+ }
+
+ /* update checksum */
+ uhdr->check = csum_fold(ptp_check_diff8(correction_old,
+ hdr->correction,
+ ~csum_unfold(uhdr->check)));
+ if (!uhdr->check)
+ uhdr->check = CSUM_MANGLED_0;
+
+ skb->ip_summed = CHECKSUM_NONE;
+}
+
+/**
+ * ptp_msg_is_sync - Evaluates whether the given skb is a PTP Sync message
+ * @skb: packet buffer
+ * @type: type of the packet (see ptp_classify_raw())
+ *
+ * This function evaluates whether the given skb is a PTP Sync message.
+ *
+ * Return: true if sync message, false otherwise
+ */
+bool ptp_msg_is_sync(struct sk_buff *skb, unsigned int type);
+
void __init ptp_classifier_init(void);
#else
static inline void ptp_classifier_init(void)
@@ -147,5 +229,15 @@ static inline u8 ptp_get_msgtype(const struct ptp_header *hdr,
*/
return PTP_MSGTYPE_SYNC;
}
+static inline bool ptp_msg_is_sync(struct sk_buff *skb, unsigned int type)
+{
+ return false;
+}
+
+static inline
+void ptp_header_update_correction(struct sk_buff *skb, unsigned int type,
+ struct ptp_header *hdr, s64 correction)
+{
+}
#endif
#endif /* _PTP_CLASSIFY_H_ */
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index 0d47fd33b228..884364596dd3 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -11,7 +11,10 @@
#include <linux/device.h>
#include <linux/pps_kernel.h>
#include <linux/ptp_clock.h>
+#include <linux/timecounter.h>
+#include <linux/skbuff.h>
+#define PTP_CLOCK_NAME_LEN 32
/**
* struct ptp_clock_request - request PTP clock event
*
@@ -42,10 +45,14 @@ struct system_device_crosststamp;
/**
* struct ptp_system_timestamp - system time corresponding to a PHC timestamp
+ * @pre_ts: system timestamp before capturing PHC
+ * @post_ts: system timestamp after capturing PHC
+ * @clockid: clock-base used for capturing the system timestamps
*/
struct ptp_system_timestamp {
struct timespec64 pre_ts;
struct timespec64 post_ts;
+ clockid_t clockid;
};
/**
@@ -60,7 +67,25 @@ struct ptp_system_timestamp {
* @n_ext_ts: The number of external time stamp channels.
* @n_per_out: The number of programmable periodic signals.
* @n_pins: The number of programmable pins.
+ * @n_per_lp: The number of channels that support loopback the periodic
+ * output signal.
* @pps: Indicates whether the clock supports a PPS callback.
+ *
+ * @supported_perout_flags: The set of flags the driver supports for the
+ * PTP_PEROUT_REQUEST ioctl. The PTP core will
+ * reject a request with any flag not specified
+ * here.
+ *
+ * @supported_extts_flags: The set of flags the driver supports for the
+ * PTP_EXTTS_REQUEST ioctl. The PTP core will use
+ * this list to reject unsupported requests.
+ * PTP_ENABLE_FEATURE is assumed and does not need to
+ * be included. If PTP_STRICT_FLAGS is *not* set,
+ * then both PTP_RISING_EDGE and PTP_FALLING_EDGE
+ * will be assumed. Note that PTP_STRICT_FLAGS must
+ * be set if the drivers wants to honor
+ * PTP_EXTTS_REQUEST2 and any future flags.
+ *
* @pin_config: Array of length 'n_pins'. If the number of
* programmable pins is nonzero, then drivers must
* allocate and initialize this array.
@@ -72,14 +97,14 @@ struct ptp_system_timestamp {
* nominal frequency in parts per million, but with a
* 16 bit binary fractional field.
*
- * @adjfreq: Adjusts the frequency of the hardware clock.
- * This method is deprecated. New drivers should implement
- * the @adjfine method instead.
- * parameter delta: Desired frequency offset from nominal frequency
- * in parts per billion
+ * @adjphase: Indicates that the PHC should use an internal servo
+ * algorithm to correct the provided phase offset.
+ * parameter delta: PHC servo phase adjustment target
+ * in nanoseconds.
*
- * @adjphase: Adjusts the phase offset of the hardware clock.
- * parameter delta: Desired change in nanoseconds.
+ * @getmaxphase: Advertises maximum offset that can be provided
+ * to the hardware clock's phase control functionality
+ * through adjphase.
*
* @adjtime: Shifts the time of the hardware clock.
* parameter delta: Desired change in nanoseconds.
@@ -105,6 +130,32 @@ struct ptp_system_timestamp {
* @settime64: Set the current time on the hardware clock.
* parameter ts: Time value to set.
*
+ * @getcycles64: Reads the current free running cycle counter from the hardware
+ * clock.
+ * If @getcycles64 and @getcyclesx64 are not supported, then
+ * @gettime64 or @gettimex64 will be used as default
+ * implementation.
+ * parameter ts: Holds the result.
+ *
+ * @getcyclesx64: Reads the current free running cycle counter from the
+ * hardware clock and optionally also the system clock.
+ * If @getcycles64 and @getcyclesx64 are not supported, then
+ * @gettimex64 will be used as default implementation if
+ * available.
+ * parameter ts: Holds the PHC timestamp.
+ * parameter sts: If not NULL, it holds a pair of timestamps
+ * from the system clock. The first reading is made right before
+ * reading the lowest bits of the PHC timestamp and the second
+ * reading immediately follows that.
+ *
+ * @getcrosscycles: Reads the current free running cycle counter from the
+ * hardware clock and system clock simultaneously.
+ * If @getcycles64 and @getcyclesx64 are not supported, then
+ * @getcrosststamp will be used as default implementation if
+ * available.
+ * parameter cts: Contains timestamp (device,system) pair,
+ * where system time is realtime and monotonic.
+ *
* @enable: Request driver to enable or disable an ancillary feature.
* parameter request: Desired resource to enable or disable.
* parameter on: Caller passes one to enable or zero to disable.
@@ -126,6 +177,11 @@ struct ptp_system_timestamp {
* scheduling time (>=0) or negative value in case further
* scheduling is not required.
*
+ * @perout_loopback: Request driver to enable or disable the periodic output
+ * signal loopback.
+ * parameter index: index of the periodic output signal channel.
+ * parameter on: caller passes one to enable or zero to disable.
+ *
* Drivers should embed their ptp_clock_info within a private
* structure, obtaining a reference to it using container_of().
*
@@ -134,17 +190,20 @@ struct ptp_system_timestamp {
struct ptp_clock_info {
struct module *owner;
- char name[16];
+ char name[PTP_CLOCK_NAME_LEN];
s32 max_adj;
int n_alarm;
int n_ext_ts;
int n_per_out;
int n_pins;
+ int n_per_lp;
int pps;
+ unsigned int supported_perout_flags;
+ unsigned int supported_extts_flags;
struct ptp_pin_desc *pin_config;
int (*adjfine)(struct ptp_clock_info *ptp, long scaled_ppm);
- int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta);
int (*adjphase)(struct ptp_clock_info *ptp, s32 phase);
+ s32 (*getmaxphase)(struct ptp_clock_info *ptp);
int (*adjtime)(struct ptp_clock_info *ptp, s64 delta);
int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts);
int (*gettimex64)(struct ptp_clock_info *ptp, struct timespec64 *ts,
@@ -152,11 +211,18 @@ struct ptp_clock_info {
int (*getcrosststamp)(struct ptp_clock_info *ptp,
struct system_device_crosststamp *cts);
int (*settime64)(struct ptp_clock_info *p, const struct timespec64 *ts);
+ int (*getcycles64)(struct ptp_clock_info *ptp, struct timespec64 *ts);
+ int (*getcyclesx64)(struct ptp_clock_info *ptp, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts);
+ int (*getcrosscycles)(struct ptp_clock_info *ptp,
+ struct system_device_crosststamp *cts);
int (*enable)(struct ptp_clock_info *ptp,
struct ptp_clock_request *request, int on);
int (*verify)(struct ptp_clock_info *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan);
long (*do_aux_work)(struct ptp_clock_info *ptp);
+ int (*perout_loopback)(struct ptp_clock_info *ptp, unsigned int index,
+ int on);
};
struct ptp_clock;
@@ -164,6 +230,7 @@ struct ptp_clock;
enum ptp_clock_events {
PTP_CLOCK_ALARM,
PTP_CLOCK_EXTTS,
+ PTP_CLOCK_EXTOFF,
PTP_CLOCK_PPS,
PTP_CLOCK_PPSUSR,
};
@@ -174,6 +241,7 @@ enum ptp_clock_events {
* @type: One of the ptp_clock_events enumeration values.
* @index: Identifies the source of the event.
* @timestamp: When the event occurred (%PTP_CLOCK_EXTTS only).
+ * @offset: When the event occurred (%PTP_CLOCK_EXTOFF only).
* @pps_times: When the event occurred (%PTP_CLOCK_PPSUSR only).
*/
@@ -182,11 +250,84 @@ struct ptp_clock_event {
int index;
union {
u64 timestamp;
+ s64 offset;
struct pps_event_time pps_times;
};
};
-#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
+/**
+ * scaled_ppm_to_ppb() - convert scaled ppm to ppb
+ *
+ * @ppm: Parts per million, but with a 16 bit binary fractional field
+ */
+static inline long scaled_ppm_to_ppb(long ppm)
+{
+ /*
+ * The 'freq' field in the 'struct timex' is in parts per
+ * million, but with a 16 bit binary fractional field.
+ *
+ * We want to calculate
+ *
+ * ppb = scaled_ppm * 1000 / 2^16
+ *
+ * which simplifies to
+ *
+ * ppb = scaled_ppm * 125 / 2^13
+ */
+ s64 ppb = 1 + ppm;
+
+ ppb *= 125;
+ ppb >>= 13;
+ return (long)ppb;
+}
+
+/**
+ * diff_by_scaled_ppm - Calculate difference using scaled ppm
+ * @base: the base increment value to adjust
+ * @scaled_ppm: scaled parts per million to adjust by
+ * @diff: on return, the absolute value of calculated diff
+ *
+ * Calculate the difference to adjust the base increment using scaled parts
+ * per million.
+ *
+ * Use mul_u64_u64_div_u64 to perform the difference calculation in avoid
+ * possible overflow.
+ *
+ * Returns: true if scaled_ppm is negative, false otherwise
+ */
+static inline bool diff_by_scaled_ppm(u64 base, long scaled_ppm, u64 *diff)
+{
+ bool negative = false;
+
+ if (scaled_ppm < 0) {
+ negative = true;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ *diff = mul_u64_u64_div_u64(base, (u64)scaled_ppm, 1000000ULL << 16);
+
+ return negative;
+}
+
+/**
+ * adjust_by_scaled_ppm - Adjust a base increment by scaled parts per million
+ * @base: the base increment value to adjust
+ * @scaled_ppm: scaled parts per million frequency adjustment
+ *
+ * Helper function which calculates a new increment value based on the
+ * requested scaled parts per million adjustment.
+ */
+static inline u64 adjust_by_scaled_ppm(u64 base, long scaled_ppm)
+{
+ u64 diff;
+
+ if (diff_by_scaled_ppm(base, scaled_ppm, &diff))
+ return base - diff;
+
+ return base + diff;
+}
+
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
/**
* ptp_clock_register() - register a PTP hardware clock driver
@@ -194,7 +335,7 @@ struct ptp_clock_event {
* @info: Structure describing the new clock.
* @parent: Pointer to the parent device of the new clock.
*
- * Returns a valid pointer on success or PTR_ERR on failure. If PHC
+ * Returns: a valid pointer on success or PTR_ERR on failure. If PHC
* support is missing at the configuration level, this function
* returns NULL, and drivers are expected to gracefully handle that
* case separately.
@@ -230,12 +371,22 @@ extern void ptp_clock_event(struct ptp_clock *ptp,
extern int ptp_clock_index(struct ptp_clock *ptp);
/**
- * scaled_ppm_to_ppb() - convert scaled ppm to ppb
+ * ptp_clock_index_by_of_node() - obtain the device index of
+ * a PTP clock based on the PTP device of_node
*
- * @ppm: Parts per million, but with a 16 bit binary fractional field
+ * @np: The device of_node pointer of the PTP device.
+ * Return: The PHC index on success or -1 on failure.
*/
+int ptp_clock_index_by_of_node(struct device_node *np);
-extern s32 scaled_ppm_to_ppb(long ppm);
+/**
+ * ptp_clock_index_by_dev() - obtain the device index of
+ * a PTP clock based on the PTP device.
+ *
+ * @parent: The parent device (PTP device) pointer of the PTP clock.
+ * Return: The PHC index on success or -1 on failure.
+ */
+int ptp_clock_index_by_dev(struct device *parent);
/**
* ptp_find_pin() - obtain the pin index of a given auxiliary function
@@ -264,6 +415,11 @@ int ptp_find_pin(struct ptp_clock *ptp,
* should most likely call ptp_find_pin() directly from their
* ptp_clock_info::enable() method.
*
+* @ptp: The clock obtained from ptp_clock_register().
+* @func: One of the ptp_pin_function enumerated values.
+* @chan: The particular functional channel to find.
+* Return: Pin index in the range of zero to ptp_clock_caps.n_pins - 1,
+* or -1 if the auxiliary function cannot be found.
*/
int ptp_find_pin_unlocked(struct ptp_clock *ptp,
@@ -297,27 +453,70 @@ static inline void ptp_clock_event(struct ptp_clock *ptp,
{ }
static inline int ptp_clock_index(struct ptp_clock *ptp)
{ return -1; }
+static inline int ptp_clock_index_by_of_node(struct device_node *np)
+{ return -1; }
+static inline int ptp_clock_index_by_dev(struct device *parent)
+{ return -1; }
static inline int ptp_find_pin(struct ptp_clock *ptp,
enum ptp_pin_function func, unsigned int chan)
{ return -1; }
+static inline int ptp_find_pin_unlocked(struct ptp_clock *ptp,
+ enum ptp_pin_function func,
+ unsigned int chan)
+{ return -1; }
static inline int ptp_schedule_worker(struct ptp_clock *ptp,
unsigned long delay)
{ return -EOPNOTSUPP; }
static inline void ptp_cancel_worker_sync(struct ptp_clock *ptp)
{ }
+#endif
+
+#if IS_BUILTIN(CONFIG_PTP_1588_CLOCK)
+/*
+ * These are called by the network core, and don't work if PTP is in
+ * a loadable module.
+ */
+
+/**
+ * ptp_get_vclocks_index() - get all vclocks index on pclock, and
+ * caller is responsible to free memory
+ * of vclock_index
+ *
+ * @pclock_index: phc index of ptp pclock.
+ * @vclock_index: pointer to pointer of vclock index.
+ *
+ * return number of vclocks.
+ */
+int ptp_get_vclocks_index(int pclock_index, int **vclock_index);
+
+/**
+ * ptp_convert_timestamp() - convert timestamp to a ptp vclock time
+ *
+ * @hwtstamp: timestamp
+ * @vclock_index: phc index of ptp vclock.
+ *
+ * Returns: converted timestamp, or 0 on error.
+ */
+ktime_t ptp_convert_timestamp(const ktime_t *hwtstamp, int vclock_index);
+#else
+static inline int ptp_get_vclocks_index(int pclock_index, int **vclock_index)
+{ return 0; }
+static inline ktime_t ptp_convert_timestamp(const ktime_t *hwtstamp,
+ int vclock_index)
+{ return 0; }
#endif
static inline void ptp_read_system_prets(struct ptp_system_timestamp *sts)
{
if (sts)
- ktime_get_real_ts64(&sts->pre_ts);
+ ktime_get_clock_ts64(sts->clockid, &sts->pre_ts);
}
static inline void ptp_read_system_postts(struct ptp_system_timestamp *sts)
{
if (sts)
- ktime_get_real_ts64(&sts->post_ts);
+ ktime_get_clock_ts64(sts->clockid, &sts->post_ts);
}
#endif
diff --git a/include/linux/ptp_kvm.h b/include/linux/ptp_kvm.h
index f960a719f0d5..e8c74fa3f455 100644
--- a/include/linux/ptp_kvm.h
+++ b/include/linux/ptp_kvm.h
@@ -8,12 +8,15 @@
#ifndef _PTP_KVM_H_
#define _PTP_KVM_H_
+#include <linux/clocksource_ids.h>
+#include <linux/types.h>
+
struct timespec64;
-struct clocksource;
int kvm_arch_ptp_init(void);
+void kvm_arch_ptp_exit(void);
int kvm_arch_ptp_get_clock(struct timespec64 *ts);
int kvm_arch_ptp_get_crosststamp(u64 *cycle,
- struct timespec64 *tspec, struct clocksource **cs);
+ struct timespec64 *tspec, enum clocksource_ids *cs_id);
#endif /* _PTP_KVM_H_ */
diff --git a/include/linux/ptp_mock.h b/include/linux/ptp_mock.h
new file mode 100644
index 000000000000..72eb401034d9
--- /dev/null
+++ b/include/linux/ptp_mock.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Mock-up PTP Hardware Clock driver for virtual network devices
+ *
+ * Copyright 2023 NXP
+ */
+
+#ifndef _PTP_MOCK_H_
+#define _PTP_MOCK_H_
+
+struct device;
+struct mock_phc;
+
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK_MOCK)
+
+struct mock_phc *mock_phc_create(struct device *dev);
+void mock_phc_destroy(struct mock_phc *phc);
+int mock_phc_index(struct mock_phc *phc);
+
+#else
+
+static inline struct mock_phc *mock_phc_create(struct device *dev)
+{
+ return NULL;
+}
+
+static inline void mock_phc_destroy(struct mock_phc *phc)
+{
+}
+
+static inline int mock_phc_index(struct mock_phc *phc)
+{
+ return -1;
+}
+
+#endif
+
+#endif /* _PTP_MOCK_H_ */
diff --git a/include/linux/ptp_pch.h b/include/linux/ptp_pch.h
index 51818198c292..7ba643b62c15 100644
--- a/include/linux/ptp_pch.h
+++ b/include/linux/ptp_pch.h
@@ -10,6 +10,10 @@
#ifndef _PTP_PCH_H_
#define _PTP_PCH_H_
+#include <linux/types.h>
+
+struct pci_dev;
+
void pch_ch_control_write(struct pci_dev *pdev, u32 val);
u32 pch_ch_event_read(struct pci_dev *pdev);
void pch_ch_event_write(struct pci_dev *pdev, u32 val);
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 808f9d3ee546..534531807d95 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -243,6 +243,24 @@ static inline bool ptr_ring_empty_bh(struct ptr_ring *r)
return ret;
}
+/* Zero entries from tail to specified head.
+ * NB: if consumer_head can be >= r->size need to fixup tail later.
+ */
+static inline void __ptr_ring_zero_tail(struct ptr_ring *r, int consumer_head)
+{
+ int head = consumer_head;
+
+ /* Zero out entries in the reverse order: this way we touch the
+ * cache line that producer might currently be reading the last;
+ * producer won't make progress and touch other cache lines
+ * besides the first one until we write out all entries.
+ */
+ while (likely(head > r->consumer_tail))
+ r->queue[--head] = NULL;
+
+ r->consumer_tail = consumer_head;
+}
+
/* Must only be called after __ptr_ring_peek returned !NULL */
static inline void __ptr_ring_discard_one(struct ptr_ring *r)
{
@@ -261,8 +279,7 @@ static inline void __ptr_ring_discard_one(struct ptr_ring *r)
/* Note: we must keep consumer_head valid at all times for __ptr_ring_empty
* to work correctly.
*/
- int consumer_head = r->consumer_head;
- int head = consumer_head++;
+ int consumer_head = r->consumer_head + 1;
/* Once we have processed enough entries invalidate them in
* the ring all at once so producer can reuse their space in the ring.
@@ -270,16 +287,9 @@ static inline void __ptr_ring_discard_one(struct ptr_ring *r)
* but helps keep the implementation simple.
*/
if (unlikely(consumer_head - r->consumer_tail >= r->batch ||
- consumer_head >= r->size)) {
- /* Zero out entries in the reverse order: this way we touch the
- * cache line that producer might currently be reading the last;
- * producer won't make progress and touch other cache lines
- * besides the first one until we write out all entries.
- */
- while (likely(head >= r->consumer_tail))
- r->queue[head--] = NULL;
- r->consumer_tail = consumer_head;
- }
+ consumer_head >= r->size))
+ __ptr_ring_zero_tail(r, consumer_head);
+
if (unlikely(consumer_head >= r->size)) {
consumer_head = 0;
r->consumer_tail = 0;
@@ -464,11 +474,11 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r,
/* Not all gfp_t flags (besides GFP_KERNEL) are allowed. See
* documentation for vmalloc for which of them are legal.
*/
-static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
+static inline void **__ptr_ring_init_queue_alloc_noprof(unsigned int size, gfp_t gfp)
{
if (size > KMALLOC_MAX_SIZE / sizeof(void *))
return NULL;
- return kvmalloc_array(size, sizeof(void *), gfp | __GFP_ZERO);
+ return kvmalloc_array_noprof(size, sizeof(void *), gfp | __GFP_ZERO);
}
static inline void __ptr_ring_set_size(struct ptr_ring *r, int size)
@@ -484,9 +494,9 @@ static inline void __ptr_ring_set_size(struct ptr_ring *r, int size)
r->batch = 1;
}
-static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp)
+static inline int ptr_ring_init_noprof(struct ptr_ring *r, int size, gfp_t gfp)
{
- r->queue = __ptr_ring_init_queue_alloc(size, gfp);
+ r->queue = __ptr_ring_init_queue_alloc_noprof(size, gfp);
if (!r->queue)
return -ENOMEM;
@@ -497,6 +507,7 @@ static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp)
return 0;
}
+#define ptr_ring_init(...) alloc_hooks(ptr_ring_init_noprof(__VA_ARGS__))
/*
* Return entries into ring. Destroy entries that don't fit.
@@ -512,7 +523,6 @@ static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n,
void (*destroy)(void *))
{
unsigned long flags;
- int head;
spin_lock_irqsave(&r->consumer_lock, flags);
spin_lock(&r->producer_lock);
@@ -524,17 +534,14 @@ static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n,
* Clean out buffered entries (for simplicity). This way following code
* can test entries for NULL and if not assume they are valid.
*/
- head = r->consumer_head - 1;
- while (likely(head >= r->consumer_tail))
- r->queue[head--] = NULL;
- r->consumer_tail = r->consumer_head;
+ __ptr_ring_zero_tail(r, r->consumer_head);
/*
* Go over entries in batch, start moving head back and copy entries.
* Stop when we run into previously unconsumed entries.
*/
while (n) {
- head = r->consumer_head - 1;
+ int head = r->consumer_head - 1;
if (head < 0)
head = r->size - 1;
if (r->queue[head]) {
@@ -587,11 +594,11 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue,
* In particular if you consume ring in interrupt or BH context, you must
* disable interrupts/BH when doing so.
*/
-static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
+static inline int ptr_ring_resize_noprof(struct ptr_ring *r, int size, gfp_t gfp,
void (*destroy)(void *))
{
unsigned long flags;
- void **queue = __ptr_ring_init_queue_alloc(size, gfp);
+ void **queue = __ptr_ring_init_queue_alloc_noprof(size, gfp);
void **old;
if (!queue)
@@ -609,39 +616,39 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
return 0;
}
+#define ptr_ring_resize(...) alloc_hooks(ptr_ring_resize_noprof(__VA_ARGS__))
/*
* Note: producer lock is nested within consumer lock, so if you
* resize you must make sure all uses nest correctly.
- * In particular if you consume ring in interrupt or BH context, you must
- * disable interrupts/BH when doing so.
+ * In particular if you consume ring in BH context, you must
+ * disable BH when doing so.
*/
-static inline int ptr_ring_resize_multiple(struct ptr_ring **rings,
- unsigned int nrings,
- int size,
- gfp_t gfp, void (*destroy)(void *))
+static inline int ptr_ring_resize_multiple_bh_noprof(struct ptr_ring **rings,
+ unsigned int nrings,
+ int size, gfp_t gfp,
+ void (*destroy)(void *))
{
- unsigned long flags;
void ***queues;
int i;
- queues = kmalloc_array(nrings, sizeof(*queues), gfp);
+ queues = kmalloc_array_noprof(nrings, sizeof(*queues), gfp);
if (!queues)
goto noqueues;
for (i = 0; i < nrings; ++i) {
- queues[i] = __ptr_ring_init_queue_alloc(size, gfp);
+ queues[i] = __ptr_ring_init_queue_alloc_noprof(size, gfp);
if (!queues[i])
goto nomem;
}
for (i = 0; i < nrings; ++i) {
- spin_lock_irqsave(&(rings[i])->consumer_lock, flags);
+ spin_lock_bh(&(rings[i])->consumer_lock);
spin_lock(&(rings[i])->producer_lock);
queues[i] = __ptr_ring_swap_queue(rings[i], queues[i],
size, gfp, destroy);
spin_unlock(&(rings[i])->producer_lock);
- spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags);
+ spin_unlock_bh(&(rings[i])->consumer_lock);
}
for (i = 0; i < nrings; ++i)
@@ -660,6 +667,8 @@ nomem:
noqueues:
return -ENOMEM;
}
+#define ptr_ring_resize_multiple_bh(...) \
+ alloc_hooks(ptr_ring_resize_multiple_bh_noprof(__VA_ARGS__))
static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *))
{
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index b5ebf6c01292..90507d4afcd6 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -30,7 +30,6 @@ extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
#define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */
#define PT_PTRACED 0x00000001
-#define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */
#define PT_OPT_FLAG_SHIFT 3
/* PT_TRACE_* event enable flags */
@@ -47,12 +46,6 @@ extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
#define PT_EXITKILL (PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT)
#define PT_SUSPEND_SECCOMP (PTRACE_O_SUSPEND_SECCOMP << PT_OPT_FLAG_SHIFT)
-/* single stepping state bits (used on ARM and PA-RISC) */
-#define PT_SINGLESTEP_BIT 31
-#define PT_SINGLESTEP (1<<PT_SINGLESTEP_BIT)
-#define PT_BLOCKSTEP_BIT 30
-#define PT_BLOCKSTEP (1<<PT_BLOCKSTEP_BIT)
-
extern long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data);
extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
@@ -60,7 +53,7 @@ extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned
extern void ptrace_disable(struct task_struct *);
extern int ptrace_request(struct task_struct *child, long request,
unsigned long addr, unsigned long data);
-extern void ptrace_notify(int exit_code);
+extern int ptrace_notify(int exit_code, unsigned long message);
extern void __ptrace_link(struct task_struct *child,
struct task_struct *new_parent,
const struct cred *ptracer_cred);
@@ -155,8 +148,7 @@ static inline bool ptrace_event_enabled(struct task_struct *task, int event)
static inline void ptrace_event(int event, unsigned long message)
{
if (unlikely(ptrace_event_enabled(current, event))) {
- current->ptrace_message = message;
- ptrace_notify((event << 8) | SIGTRAP);
+ ptrace_notify((event << 8) | SIGTRAP, message);
} else if (event == PTRACE_EVENT_EXEC) {
/* legacy EXEC report via SIGTRAP */
if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED)
@@ -362,29 +354,25 @@ static inline void user_single_step_report(struct pt_regs *regs)
#ifndef arch_ptrace_stop_needed
/**
* arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called
- * @code: current->exit_code value ptrace will stop with
- * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with
*
* This is called with the siglock held, to decide whether or not it's
- * necessary to release the siglock and call arch_ptrace_stop() with the
- * same @code and @info arguments. It can be defined to a constant if
- * arch_ptrace_stop() is never required, or always is. On machines where
- * this makes sense, it should be defined to a quick test to optimize out
- * calling arch_ptrace_stop() when it would be superfluous. For example,
- * if the thread has not been back to user mode since the last stop, the
- * thread state might indicate that nothing needs to be done.
+ * necessary to release the siglock and call arch_ptrace_stop(). It can be
+ * defined to a constant if arch_ptrace_stop() is never required, or always
+ * is. On machines where this makes sense, it should be defined to a quick
+ * test to optimize out calling arch_ptrace_stop() when it would be
+ * superfluous. For example, if the thread has not been back to user mode
+ * since the last stop, the thread state might indicate that nothing needs
+ * to be done.
*
* This is guaranteed to be invoked once before a task stops for ptrace and
* may include arch-specific operations necessary prior to a ptrace stop.
*/
-#define arch_ptrace_stop_needed(code, info) (0)
+#define arch_ptrace_stop_needed() (0)
#endif
#ifndef arch_ptrace_stop
/**
* arch_ptrace_stop - Do machine-specific work before stopping for ptrace
- * @code: current->exit_code value ptrace will stop with
- * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with
*
* This is called with no locks held when arch_ptrace_stop_needed() has
* just returned nonzero. It is allowed to block, e.g. for user memory
@@ -394,27 +382,98 @@ static inline void user_single_step_report(struct pt_regs *regs)
* we only do it when the arch requires it for this particular stop, as
* indicated by arch_ptrace_stop_needed().
*/
-#define arch_ptrace_stop(code, info) do { } while (0)
+#define arch_ptrace_stop() do { } while (0)
#endif
#ifndef current_pt_regs
#define current_pt_regs() task_pt_regs(current)
#endif
-/*
- * unlike current_pt_regs(), this one is equal to task_pt_regs(current)
- * on *all* architectures; the only reason to have a per-arch definition
- * is optimisation.
- */
-#ifndef signal_pt_regs
-#define signal_pt_regs() task_pt_regs(current)
-#endif
-
#ifndef current_user_stack_pointer
#define current_user_stack_pointer() user_stack_pointer(current_pt_regs())
#endif
+#ifndef exception_ip
+#define exception_ip(x) instruction_pointer(x)
+#endif
+
extern int task_current_syscall(struct task_struct *target, struct syscall_info *info);
extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact);
+
+/*
+ * ptrace report for syscall entry and exit looks identical.
+ */
+static inline int ptrace_report_syscall(unsigned long message)
+{
+ int ptrace = current->ptrace;
+ int signr;
+
+ if (!(ptrace & PT_PTRACED))
+ return 0;
+
+ signr = ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0),
+ message);
+
+ /*
+ * this isn't the same as continuing with a signal, but it will do
+ * for normal use. strace only continues with a signal if the
+ * stopping signal is not SIGTRAP. -brl
+ */
+ if (signr)
+ send_sig(signr, current, 1);
+
+ return fatal_signal_pending(current);
+}
+
+/**
+ * ptrace_report_syscall_entry - task is about to attempt a system call
+ * @regs: user register state of current task
+ *
+ * This will be called if %SYSCALL_WORK_SYSCALL_TRACE or
+ * %SYSCALL_WORK_SYSCALL_EMU have been set, when the current task has just
+ * entered the kernel for a system call. Full user register state is
+ * available here. Changing the values in @regs can affect the system
+ * call number and arguments to be tried. It is safe to block here,
+ * preventing the system call from beginning.
+ *
+ * Returns zero normally, or nonzero if the calling arch code should abort
+ * the system call. That must prevent normal entry so no system call is
+ * made. If @task ever returns to user mode after this, its register state
+ * is unspecified, but should be something harmless like an %ENOSYS error
+ * return. It should preserve enough information so that syscall_rollback()
+ * can work (see asm-generic/syscall.h).
+ *
+ * Called without locks, just after entering kernel mode.
+ */
+static inline __must_check int ptrace_report_syscall_entry(
+ struct pt_regs *regs)
+{
+ return ptrace_report_syscall(PTRACE_EVENTMSG_SYSCALL_ENTRY);
+}
+
+/**
+ * ptrace_report_syscall_exit - task has just finished a system call
+ * @regs: user register state of current task
+ * @step: nonzero if simulating single-step or block-step
+ *
+ * This will be called if %SYSCALL_WORK_SYSCALL_TRACE has been set, when
+ * the current task has just finished an attempted system call. Full
+ * user register state is available here. It is safe to block here,
+ * preventing signals from being processed.
+ *
+ * If @step is nonzero, this report is also in lieu of the normal
+ * trap that would follow the system call instruction because
+ * user_enable_block_step() or user_enable_single_step() was used.
+ * In this case, %SYSCALL_WORK_SYSCALL_TRACE might not be set.
+ *
+ * Called without locks, just before checking for pending signals.
+ */
+static inline void ptrace_report_syscall_exit(struct pt_regs *regs, int step)
+{
+ if (step)
+ user_single_step_report(regs);
+ else
+ ptrace_report_syscall(PTRACE_EVENTMSG_SYSCALL_EXIT);
+}
#endif
diff --git a/include/linux/ptrace_api.h b/include/linux/ptrace_api.h
new file mode 100644
index 000000000000..26e7d275ad8d
--- /dev/null
+++ b/include/linux/ptrace_api.h
@@ -0,0 +1 @@
+#include <linux/ptrace.h>
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 5bb90af4997e..b11ae91723f8 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -2,12 +2,15 @@
#ifndef __LINUX_PWM_H
#define __LINUX_PWM_H
+#include <linux/cdev.h>
+#include <linux/device.h>
#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
-struct pwm_capture;
-struct seq_file;
+MODULE_IMPORT_NS("PWM");
struct pwm_chip;
@@ -44,8 +47,33 @@ struct pwm_args {
};
enum {
- PWMF_REQUESTED = 1 << 0,
- PWMF_EXPORTED = 1 << 1,
+ PWMF_REQUESTED = 0,
+ PWMF_EXPORTED = 1,
+};
+
+/**
+ * struct pwm_waveform - description of a PWM waveform
+ * @period_length_ns: PWM period
+ * @duty_length_ns: PWM duty cycle
+ * @duty_offset_ns: offset of the rising edge from the period's start
+ *
+ * This is a representation of a PWM waveform alternative to struct pwm_state
+ * below. It's more expressive than struct pwm_state as it contains a
+ * duty_offset_ns and so can represent offsets other than zero (with .polarity =
+ * PWM_POLARITY_NORMAL) and period - duty_cycle (.polarity =
+ * PWM_POLARITY_INVERSED).
+ *
+ * Note there is no explicit bool for enabled. A "disabled" PWM is represented
+ * by .period_length_ns = 0. Note further that the behaviour of a "disabled" PWM
+ * is undefined. Depending on the hardware's capabilities it might drive the
+ * active or inactive level, go high-z or even continue to toggle.
+ *
+ * The unit for all three members is nanoseconds.
+ */
+struct pwm_waveform {
+ u64 period_length_ns;
+ u64 duty_length_ns;
+ u64 duty_offset_ns;
};
/*
@@ -54,12 +82,17 @@ enum {
* @duty_cycle: PWM duty cycle (in nanoseconds)
* @polarity: PWM polarity
* @enabled: PWM enabled status
+ * @usage_power: If set, the PWM driver is only required to maintain the power
+ * output but has more freedom regarding signal form.
+ * If supported, the signal can be optimized, for example to
+ * improve EMI by phase shifting individual channels.
*/
struct pwm_state {
u64 period;
u64 duty_cycle;
enum pwm_polarity polarity;
bool enabled;
+ bool usage_power;
};
/**
@@ -67,9 +100,7 @@ struct pwm_state {
* @label: name of the PWM device
* @flags: flags associated with the PWM device
* @hwpwm: per-chip relative index of the PWM device
- * @pwm: global index of the PWM device
* @chip: PWM chip providing this PWM device
- * @chip_data: chip-private data associated with the PWM device
* @args: PWM arguments
* @state: last applied state
* @last: last implemented state (for PWM_DEBUG)
@@ -78,9 +109,7 @@ struct pwm_device {
const char *label;
unsigned long flags;
unsigned int hwpwm;
- unsigned int pwm;
struct pwm_chip *chip;
- void *chip_data;
struct pwm_args args;
struct pwm_state state;
@@ -93,8 +122,8 @@ struct pwm_device {
* @state: state to fill with the current PWM state
*
* The returned PWM state represents the state that was applied by a previous call to
- * pwm_apply_state(). Drivers may have to slightly tweak that state before programming it to
- * hardware. If pwm_apply_state() was never called, this returns either the current hardware
+ * pwm_apply_might_sleep(). Drivers may have to slightly tweak that state before programming it to
+ * hardware. If pwm_apply_might_sleep() was never called, this returns either the current hardware
* state (if supported) or the default settings.
*/
static inline void pwm_get_state(const struct pwm_device *pwm,
@@ -112,12 +141,6 @@ static inline bool pwm_is_enabled(const struct pwm_device *pwm)
return state.enabled;
}
-static inline void pwm_set_period(struct pwm_device *pwm, u64 period)
-{
- if (pwm)
- pwm->state.period = period;
-}
-
static inline u64 pwm_get_period(const struct pwm_device *pwm)
{
struct pwm_state state;
@@ -127,12 +150,6 @@ static inline u64 pwm_get_period(const struct pwm_device *pwm)
return state.period;
}
-static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty)
-{
- if (pwm)
- pwm->state.duty_cycle = duty;
-}
-
static inline u64 pwm_get_duty_cycle(const struct pwm_device *pwm)
{
struct pwm_state state;
@@ -158,20 +175,20 @@ static inline void pwm_get_args(const struct pwm_device *pwm,
}
/**
- * pwm_init_state() - prepare a new state to be applied with pwm_apply_state()
+ * pwm_init_state() - prepare a new state to be applied with pwm_apply_might_sleep()
* @pwm: PWM device
* @state: state to fill with the prepared PWM state
*
* This functions prepares a state that can later be tweaked and applied
- * to the PWM device with pwm_apply_state(). This is a convenient function
+ * to the PWM device with pwm_apply_might_sleep(). This is a convenient function
* that first retrieves the current PWM state and the replaces the period
* and polarity fields with the reference values defined in pwm->args.
* Once the function returns, you can adjust the ->enabled and ->duty_cycle
- * fields according to your needs before calling pwm_apply_state().
+ * fields according to your needs before calling pwm_apply_might_sleep().
*
* ->duty_cycle is initially set to zero to avoid cases where the current
* ->duty_cycle value exceed the pwm_args->period one, which would trigger
- * an error if the user calls pwm_apply_state() without adjusting ->duty_cycle
+ * an error if the user calls pwm_apply_might_sleep() without adjusting ->duty_cycle
* first.
*/
static inline void pwm_init_state(const struct pwm_device *pwm,
@@ -188,6 +205,7 @@ static inline void pwm_init_state(const struct pwm_device *pwm,
state->period = args.period;
state->polarity = args.polarity;
state->duty_cycle = 0;
+ state->usage_power = false;
}
/**
@@ -202,6 +220,8 @@ static inline void pwm_init_state(const struct pwm_device *pwm,
*
* pwm_get_state(pwm, &state);
* duty = pwm_get_relative_duty_cycle(&state, 100);
+ *
+ * Returns: rounded relative duty cycle multiplied by @scale
*/
static inline unsigned int
pwm_get_relative_duty_cycle(const struct pwm_state *state, unsigned int scale)
@@ -226,10 +246,10 @@ pwm_get_relative_duty_cycle(const struct pwm_state *state, unsigned int scale)
*
* pwm_init_state(pwm, &state);
* pwm_set_relative_duty_cycle(&state, 50, 100);
- * pwm_apply_state(pwm, &state);
+ * pwm_apply_might_sleep(pwm, &state);
*
- * This functions returns -EINVAL if @duty_cycle and/or @scale are
- * inconsistent (@scale == 0 or @duty_cycle > @scale).
+ * Returns: 0 on success or ``-EINVAL`` if @duty_cycle and/or @scale are
+ * inconsistent (@scale == 0 or @duty_cycle > @scale)
*/
static inline int
pwm_set_relative_duty_cycle(struct pwm_state *state, unsigned int duty_cycle,
@@ -246,81 +266,138 @@ pwm_set_relative_duty_cycle(struct pwm_state *state, unsigned int duty_cycle,
}
/**
+ * struct pwm_capture - PWM capture data
+ * @period: period of the PWM signal (in nanoseconds)
+ * @duty_cycle: duty cycle of the PWM signal (in nanoseconds)
+ */
+struct pwm_capture {
+ unsigned int period;
+ unsigned int duty_cycle;
+};
+
+#define PWM_WFHWSIZE 20
+
+/**
* struct pwm_ops - PWM controller operations
* @request: optional hook for requesting a PWM
* @free: optional hook for freeing a PWM
* @capture: capture and report PWM signal
+ * @sizeof_wfhw: size (in bytes) of driver specific waveform presentation
+ * @round_waveform_tohw: convert a struct pwm_waveform to driver specific presentation
+ * @round_waveform_fromhw: convert a driver specific waveform presentation to struct pwm_waveform
+ * @read_waveform: read driver specific waveform presentation from hardware
+ * @write_waveform: write driver specific waveform presentation to hardware
* @apply: atomically apply a new PWM config
- * @get_state: get the current PWM state. This function is only
- * called once per PWM device when the PWM chip is
- * registered.
- * @owner: helps prevent removal of modules exporting active PWMs
- * @config: configure duty cycles and period length for this PWM
- * @set_polarity: configure the polarity of this PWM
- * @enable: enable PWM output toggling
- * @disable: disable PWM output toggling
+ * @get_state: get the current PWM state.
*/
struct pwm_ops {
int (*request)(struct pwm_chip *chip, struct pwm_device *pwm);
void (*free)(struct pwm_chip *chip, struct pwm_device *pwm);
int (*capture)(struct pwm_chip *chip, struct pwm_device *pwm,
struct pwm_capture *result, unsigned long timeout);
+
+ size_t sizeof_wfhw;
+ int (*round_waveform_tohw)(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_waveform *wf, void *wfhw);
+ int (*round_waveform_fromhw)(struct pwm_chip *chip, struct pwm_device *pwm,
+ const void *wfhw, struct pwm_waveform *wf);
+ int (*read_waveform)(struct pwm_chip *chip, struct pwm_device *pwm,
+ void *wfhw);
+ int (*write_waveform)(struct pwm_chip *chip, struct pwm_device *pwm,
+ const void *wfhw);
+
int (*apply)(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state);
- void (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm,
- struct pwm_state *state);
- struct module *owner;
-
- /* Only used by legacy drivers */
- int (*config)(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns);
- int (*set_polarity)(struct pwm_chip *chip, struct pwm_device *pwm,
- enum pwm_polarity polarity);
- int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm);
- void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm);
+ int (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state);
};
/**
* struct pwm_chip - abstract a PWM controller
* @dev: device providing the PWMs
+ * @cdev: &struct cdev for this device
* @ops: callbacks for this PWM controller
- * @base: number of first PWM controlled by this chip
+ * @owner: module providing this chip
+ * @id: unique number of this PWM chip
* @npwm: number of PWMs controlled by this chip
* @of_xlate: request a PWM device given a device tree PWM specifier
- * @of_pwm_n_cells: number of cells expected in the device tree PWM specifier
- * @list: list node for internal use
+ * @atomic: can the driver's ->apply() be called in atomic context
+ * @gpio: &struct gpio_chip to operate this PWM chip's lines as GPO
+ * @uses_pwmchip_alloc: signals if pwmchip_allow was used to allocate this chip
+ * @operational: signals if the chip can be used (or is already deregistered)
+ * @nonatomic_lock: mutex for nonatomic chips
+ * @atomic_lock: mutex for atomic chips
* @pwms: array of PWM devices allocated by the framework
*/
struct pwm_chip {
- struct device *dev;
+ struct device dev;
+ struct cdev cdev;
const struct pwm_ops *ops;
- int base;
+ struct module *owner;
+ unsigned int id;
unsigned int npwm;
- struct pwm_device * (*of_xlate)(struct pwm_chip *pc,
+ struct pwm_device * (*of_xlate)(struct pwm_chip *chip,
const struct of_phandle_args *args);
- unsigned int of_pwm_n_cells;
+ bool atomic;
/* only used internally by the PWM framework */
- struct list_head list;
- struct pwm_device *pwms;
+ struct gpio_chip gpio;
+ bool uses_pwmchip_alloc;
+ bool operational;
+ union {
+ /*
+ * depending on the chip being atomic or not either the mutex or
+ * the spinlock is used. It protects .operational and
+ * synchronizes the callbacks in .ops
+ */
+ struct mutex nonatomic_lock;
+ spinlock_t atomic_lock;
+ };
+ struct pwm_device pwms[] __counted_by(npwm);
};
/**
- * struct pwm_capture - PWM capture data
- * @period: period of the PWM signal (in nanoseconds)
- * @duty_cycle: duty cycle of the PWM signal (in nanoseconds)
+ * pwmchip_supports_waveform() - checks if the given chip supports waveform callbacks
+ * @chip: The pwm_chip to test
+ *
+ * Returns: true iff the pwm chip support the waveform functions like
+ * pwm_set_waveform_might_sleep() and pwm_round_waveform_might_sleep()
*/
-struct pwm_capture {
- unsigned int period;
- unsigned int duty_cycle;
-};
+static inline bool pwmchip_supports_waveform(struct pwm_chip *chip)
+{
+ /*
+ * only check for .write_waveform(). If that is available,
+ * .round_waveform_tohw() and .round_waveform_fromhw() asserted to be
+ * available, too, in pwmchip_add().
+ */
+ return chip->ops->write_waveform != NULL;
+}
+
+static inline struct device *pwmchip_parent(const struct pwm_chip *chip)
+{
+ return chip->dev.parent;
+}
+
+static inline void *pwmchip_get_drvdata(const struct pwm_chip *chip)
+{
+ return dev_get_drvdata(&chip->dev);
+}
+
+static inline void pwmchip_set_drvdata(struct pwm_chip *chip, void *data)
+{
+ dev_set_drvdata(&chip->dev, data);
+}
+
+#if IS_REACHABLE(CONFIG_PWM)
-#if IS_ENABLED(CONFIG_PWM)
-/* PWM user APIs */
-struct pwm_device *pwm_request(int pwm_id, const char *label);
-void pwm_free(struct pwm_device *pwm);
-int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state);
+/* PWM consumer APIs */
+int pwm_round_waveform_might_sleep(struct pwm_device *pwm, struct pwm_waveform *wf);
+int pwm_get_waveform_might_sleep(struct pwm_device *pwm, struct pwm_waveform *wf);
+int pwm_set_waveform_might_sleep(struct pwm_device *pwm, const struct pwm_waveform *wf, bool exact);
+int pwm_apply_might_sleep(struct pwm_device *pwm, const struct pwm_state *state);
+int pwm_apply_atomic(struct pwm_device *pwm, const struct pwm_state *state);
+int pwm_get_state_hw(struct pwm_device *pwm, struct pwm_state *state);
int pwm_adjust_config(struct pwm_device *pwm);
/**
@@ -348,7 +425,7 @@ static inline int pwm_config(struct pwm_device *pwm, int duty_ns,
state.duty_cycle = duty_ns;
state.period = period_ns;
- return pwm_apply_state(pwm, &state);
+ return pwm_apply_might_sleep(pwm, &state);
}
/**
@@ -369,7 +446,7 @@ static inline int pwm_enable(struct pwm_device *pwm)
return 0;
state.enabled = true;
- return pwm_apply_state(pwm, &state);
+ return pwm_apply_might_sleep(pwm, &state);
}
/**
@@ -388,87 +465,113 @@ static inline void pwm_disable(struct pwm_device *pwm)
return;
state.enabled = false;
- pwm_apply_state(pwm, &state);
+ pwm_apply_might_sleep(pwm, &state);
+}
+
+/**
+ * pwm_might_sleep() - is pwm_apply_atomic() supported?
+ * @pwm: PWM device
+ *
+ * Returns: false if pwm_apply_atomic() can be called from atomic context.
+ */
+static inline bool pwm_might_sleep(struct pwm_device *pwm)
+{
+ return !pwm->chip->atomic;
}
/* PWM provider APIs */
-int pwm_capture(struct pwm_device *pwm, struct pwm_capture *result,
- unsigned long timeout);
-int pwm_set_chip_data(struct pwm_device *pwm, void *data);
-void *pwm_get_chip_data(struct pwm_device *pwm);
-
-int pwmchip_add(struct pwm_chip *chip);
-int pwmchip_remove(struct pwm_chip *chip);
-struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
- unsigned int index,
- const char *label);
-
-struct pwm_device *of_pwm_xlate_with_flags(struct pwm_chip *pc,
+void pwmchip_put(struct pwm_chip *chip);
+struct pwm_chip *pwmchip_alloc(struct device *parent, unsigned int npwm, size_t sizeof_priv);
+struct pwm_chip *devm_pwmchip_alloc(struct device *parent, unsigned int npwm, size_t sizeof_priv);
+
+int __pwmchip_add(struct pwm_chip *chip, struct module *owner);
+#define pwmchip_add(chip) __pwmchip_add(chip, THIS_MODULE)
+void pwmchip_remove(struct pwm_chip *chip);
+
+/*
+ * For FFI wrapper use only:
+ * The Rust PWM abstraction needs this to properly free the pwm_chip.
+ */
+void pwmchip_release(struct device *dev);
+
+int __devm_pwmchip_add(struct device *dev, struct pwm_chip *chip, struct module *owner);
+#define devm_pwmchip_add(dev, chip) __devm_pwmchip_add(dev, chip, THIS_MODULE)
+
+struct pwm_device *of_pwm_xlate_with_flags(struct pwm_chip *chip,
const struct of_phandle_args *args);
+struct pwm_device *of_pwm_single_xlate(struct pwm_chip *chip,
+ const struct of_phandle_args *args);
struct pwm_device *pwm_get(struct device *dev, const char *con_id);
-struct pwm_device *of_pwm_get(struct device *dev, struct device_node *np,
- const char *con_id);
void pwm_put(struct pwm_device *pwm);
struct pwm_device *devm_pwm_get(struct device *dev, const char *con_id);
-struct pwm_device *devm_of_pwm_get(struct device *dev, struct device_node *np,
- const char *con_id);
struct pwm_device *devm_fwnode_pwm_get(struct device *dev,
struct fwnode_handle *fwnode,
const char *con_id);
-void devm_pwm_put(struct device *dev, struct pwm_device *pwm);
#else
-static inline struct pwm_device *pwm_request(int pwm_id, const char *label)
+static inline bool pwm_might_sleep(struct pwm_device *pwm)
{
- return ERR_PTR(-ENODEV);
+ return true;
+}
+
+static inline int pwm_apply_might_sleep(struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ might_sleep();
+ return -EOPNOTSUPP;
}
-static inline void pwm_free(struct pwm_device *pwm)
+static inline int pwm_apply_atomic(struct pwm_device *pwm,
+ const struct pwm_state *state)
{
+ return -EOPNOTSUPP;
}
-static inline int pwm_apply_state(struct pwm_device *pwm,
- const struct pwm_state *state)
+static inline int pwm_get_state_hw(struct pwm_device *pwm, struct pwm_state *state)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int pwm_adjust_config(struct pwm_device *pwm)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int pwm_config(struct pwm_device *pwm, int duty_ns,
int period_ns)
{
+ might_sleep();
return -EINVAL;
}
-static inline int pwm_capture(struct pwm_device *pwm,
- struct pwm_capture *result,
- unsigned long timeout)
+static inline int pwm_enable(struct pwm_device *pwm)
{
+ might_sleep();
return -EINVAL;
}
-static inline int pwm_enable(struct pwm_device *pwm)
+static inline void pwm_disable(struct pwm_device *pwm)
{
- return -EINVAL;
+ might_sleep();
}
-static inline void pwm_disable(struct pwm_device *pwm)
+static inline void pwmchip_put(struct pwm_chip *chip)
{
}
-static inline int pwm_set_chip_data(struct pwm_device *pwm, void *data)
+static inline struct pwm_chip *pwmchip_alloc(struct device *parent,
+ unsigned int npwm,
+ size_t sizeof_priv)
{
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
-static inline void *pwm_get_chip_data(struct pwm_device *pwm)
+static inline struct pwm_chip *devm_pwmchip_alloc(struct device *parent,
+ unsigned int npwm,
+ size_t sizeof_priv)
{
- return NULL;
+ return pwmchip_alloc(parent, npwm, sizeof_priv);
}
static inline int pwmchip_add(struct pwm_chip *chip)
@@ -481,40 +584,27 @@ static inline int pwmchip_remove(struct pwm_chip *chip)
return -EINVAL;
}
-static inline struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
- unsigned int index,
- const char *label)
+static inline int devm_pwmchip_add(struct device *dev, struct pwm_chip *chip)
{
- return ERR_PTR(-ENODEV);
+ return -EINVAL;
}
static inline struct pwm_device *pwm_get(struct device *dev,
const char *consumer)
{
- return ERR_PTR(-ENODEV);
-}
-
-static inline struct pwm_device *of_pwm_get(struct device *dev,
- struct device_node *np,
- const char *con_id)
-{
+ might_sleep();
return ERR_PTR(-ENODEV);
}
static inline void pwm_put(struct pwm_device *pwm)
{
+ might_sleep();
}
static inline struct pwm_device *devm_pwm_get(struct device *dev,
const char *consumer)
{
- return ERR_PTR(-ENODEV);
-}
-
-static inline struct pwm_device *devm_of_pwm_get(struct device *dev,
- struct device_node *np,
- const char *con_id)
-{
+ might_sleep();
return ERR_PTR(-ENODEV);
}
@@ -522,46 +612,11 @@ static inline struct pwm_device *
devm_fwnode_pwm_get(struct device *dev, struct fwnode_handle *fwnode,
const char *con_id)
{
+ might_sleep();
return ERR_PTR(-ENODEV);
}
-
-static inline void devm_pwm_put(struct device *dev, struct pwm_device *pwm)
-{
-}
#endif
-static inline void pwm_apply_args(struct pwm_device *pwm)
-{
- struct pwm_state state = { };
-
- /*
- * PWM users calling pwm_apply_args() expect to have a fresh config
- * where the polarity and period are set according to pwm_args info.
- * The problem is, polarity can only be changed when the PWM is
- * disabled.
- *
- * PWM drivers supporting hardware readout may declare the PWM device
- * as enabled, and prevent polarity setting, which changes from the
- * existing behavior, where all PWM devices are declared as disabled
- * at startup (even if they are actually enabled), thus authorizing
- * polarity setting.
- *
- * To fulfill this requirement, we apply a new state which disables
- * the PWM device and set the reference period and polarity config.
- *
- * Note that PWM users requiring a smooth handover between the
- * bootloader and the kernel (like critical regulators controlled by
- * PWM devices) will have to switch to the atomic API and avoid calling
- * pwm_apply_args().
- */
-
- state.enabled = false;
- state.polarity = pwm->args.polarity;
- state.period = pwm->args.period;
-
- pwm_apply_state(pwm, &state);
-}
-
struct pwm_lookup {
struct list_head list;
const char *provider;
@@ -589,7 +644,7 @@ struct pwm_lookup {
PWM_LOOKUP_WITH_MODULE(_provider, _index, _dev_id, _con_id, _period, \
_polarity, NULL)
-#if IS_ENABLED(CONFIG_PWM)
+#if IS_REACHABLE(CONFIG_PWM)
void pwm_add_table(struct pwm_lookup *table, size_t num);
void pwm_remove_table(struct pwm_lookup *table, size_t num);
#else
@@ -602,17 +657,4 @@ static inline void pwm_remove_table(struct pwm_lookup *table, size_t num)
}
#endif
-#ifdef CONFIG_PWM_SYSFS
-void pwmchip_sysfs_export(struct pwm_chip *chip);
-void pwmchip_sysfs_unexport(struct pwm_chip *chip);
-#else
-static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
-{
-}
-
-static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip)
-{
-}
-#endif /* CONFIG_PWM_SYSFS */
-
#endif /* __LINUX_PWM_H */
diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h
index 06086cb93b6f..0bf80e98d5b4 100644
--- a/include/linux/pwm_backlight.h
+++ b/include/linux/pwm_backlight.h
@@ -8,7 +8,6 @@
#include <linux/backlight.h>
struct platform_pwm_backlight_data {
- int pwm_id;
unsigned int max_brightness;
unsigned int dft_brightness;
unsigned int lth_brightness;
@@ -20,7 +19,6 @@ struct platform_pwm_backlight_data {
int (*notify)(struct device *dev, int brightness);
void (*notify_after)(struct device *dev, int brightness);
void (*exit)(struct device *dev);
- int (*check_fb)(struct device *dev, struct fb_info *info);
};
#endif
diff --git a/include/linux/pwrseq/consumer.h b/include/linux/pwrseq/consumer.h
new file mode 100644
index 000000000000..7d583b4f266e
--- /dev/null
+++ b/include/linux/pwrseq/consumer.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024 Linaro Ltd.
+ */
+
+#ifndef __POWER_SEQUENCING_CONSUMER_H__
+#define __POWER_SEQUENCING_CONSUMER_H__
+
+#include <linux/err.h>
+
+struct device;
+struct pwrseq_desc;
+
+#if IS_ENABLED(CONFIG_POWER_SEQUENCING)
+
+struct pwrseq_desc * __must_check
+pwrseq_get(struct device *dev, const char *target);
+void pwrseq_put(struct pwrseq_desc *desc);
+
+struct pwrseq_desc * __must_check
+devm_pwrseq_get(struct device *dev, const char *target);
+
+int pwrseq_power_on(struct pwrseq_desc *desc);
+int pwrseq_power_off(struct pwrseq_desc *desc);
+
+#else /* CONFIG_POWER_SEQUENCING */
+
+static inline struct pwrseq_desc * __must_check
+pwrseq_get(struct device *dev, const char *target)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void pwrseq_put(struct pwrseq_desc *desc)
+{
+}
+
+static inline struct pwrseq_desc * __must_check
+devm_pwrseq_get(struct device *dev, const char *target)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline int pwrseq_power_on(struct pwrseq_desc *desc)
+{
+ return -ENOSYS;
+}
+
+static inline int pwrseq_power_off(struct pwrseq_desc *desc)
+{
+ return -ENOSYS;
+}
+
+#endif /* CONFIG_POWER_SEQUENCING */
+
+#endif /* __POWER_SEQUENCING_CONSUMER_H__ */
diff --git a/include/linux/pwrseq/provider.h b/include/linux/pwrseq/provider.h
new file mode 100644
index 000000000000..33b3d2c2e39d
--- /dev/null
+++ b/include/linux/pwrseq/provider.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024 Linaro Ltd.
+ */
+
+#ifndef __POWER_SEQUENCING_PROVIDER_H__
+#define __POWER_SEQUENCING_PROVIDER_H__
+
+struct device;
+struct module;
+struct pwrseq_device;
+
+typedef int (*pwrseq_power_state_func)(struct pwrseq_device *);
+typedef int (*pwrseq_match_func)(struct pwrseq_device *, struct device *);
+
+#define PWRSEQ_NO_MATCH 0
+#define PWRSEQ_MATCH_OK 1
+
+/**
+ * struct pwrseq_unit_data - Configuration of a single power sequencing
+ * unit.
+ * @name: Name of the unit.
+ * @deps: Units that must be enabled before this one and disabled after it
+ * in the order they come in this array. Must be NULL-terminated.
+ * @enable: Callback running the part of the power-on sequence provided by
+ * this unit.
+ * @disable: Callback running the part of the power-off sequence provided
+ * by this unit.
+ */
+struct pwrseq_unit_data {
+ const char *name;
+ const struct pwrseq_unit_data **deps;
+ pwrseq_power_state_func enable;
+ pwrseq_power_state_func disable;
+};
+
+/**
+ * struct pwrseq_target_data - Configuration of a power sequencing target.
+ * @name: Name of the target.
+ * @unit: Final unit that this target must reach in order to be considered
+ * enabled.
+ * @post_enable: Callback run after the target unit has been enabled, *after*
+ * the state lock has been released. It's useful for implementing
+ * boot-up delays without blocking other users from powering up
+ * using the same power sequencer.
+ */
+struct pwrseq_target_data {
+ const char *name;
+ const struct pwrseq_unit_data *unit;
+ pwrseq_power_state_func post_enable;
+};
+
+/**
+ * struct pwrseq_config - Configuration used for registering a new provider.
+ * @parent: Parent device for the sequencer. Must be set.
+ * @owner: Module providing this device.
+ * @drvdata: Private driver data.
+ * @match: Provider callback used to match the consumer device to the sequencer.
+ * @targets: Array of targets for this power sequencer. Must be NULL-terminated.
+ */
+struct pwrseq_config {
+ struct device *parent;
+ struct module *owner;
+ void *drvdata;
+ pwrseq_match_func match;
+ const struct pwrseq_target_data **targets;
+};
+
+struct pwrseq_device *
+pwrseq_device_register(const struct pwrseq_config *config);
+void pwrseq_device_unregister(struct pwrseq_device *pwrseq);
+struct pwrseq_device *
+devm_pwrseq_device_register(struct device *dev,
+ const struct pwrseq_config *config);
+
+void *pwrseq_device_get_drvdata(struct pwrseq_device *pwrseq);
+
+#endif /* __POWER_SEQUENCING_PROVIDER_H__ */
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index 7f73b26ed22e..844a2743ca94 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2003 Russell King, All Rights Reserved.
+ * Copyright (C) 2003 Russell King, All Rights Reserved.
*
* This driver supports the following PXA CPU/SSP ports:-
*
@@ -11,8 +11,8 @@
* PXA3xx SSP1, SSP2, SSP3, SSP4
*/
-#ifndef __LINUX_SSP_H
-#define __LINUX_SSP_H
+#ifndef __LINUX_PXA2XX_SSP_H
+#define __LINUX_PXA2XX_SSP_H
#include <linux/bits.h>
#include <linux/compiler_types.h>
@@ -38,7 +38,6 @@ struct device_node;
#define SSDR (0x10) /* SSP Data Write/Data Read Register */
#define SSTO (0x28) /* SSP Time Out Register */
-#define DDS_RATE (0x28) /* SSP DDS Clock Rate Register (Intel Quark) */
#define SSPSP (0x2C) /* SSP Programmable Serial Protocol */
#define SSTSA (0x30) /* SSP Tx Timeslot Active */
#define SSRSA (0x34) /* SSP Rx Timeslot Active */
@@ -60,7 +59,7 @@ struct device_node;
/* PXA27x, PXA3xx */
#define SSCR0_EDSS BIT(20) /* Extended data size select */
#define SSCR0_NCS BIT(21) /* Network clock select */
-#define SSCR0_RIM BIT(22) /* Receive FIFO overrrun interrupt mask */
+#define SSCR0_RIM BIT(22) /* Receive FIFO overrun interrupt mask */
#define SSCR0_TUM BIT(23) /* Transmit FIFO underrun interrupt mask */
#define SSCR0_FRDC GENMASK(26, 24) /* Frame rate divider control (mask) */
#define SSCR0_SlotsPerFrm(x) (((x) - 1) << 24) /* Time slots per frame [1..8] */
@@ -105,6 +104,9 @@ struct device_node;
#define CE4100_SSCR1_RFT GENMASK(11, 10) /* Receive FIFO Threshold (mask) */
#define CE4100_SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */
+/* Intel Quark X1000 */
+#define DDS_RATE 0x28 /* SSP DDS Clock Rate Register */
+
/* QUARK_X1000 SSCR0 bit definition */
#define QUARK_X1000_SSCR0_DSS GENMASK(4, 0) /* Data Size Select (mask) */
#define QUARK_X1000_SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..32] */
@@ -124,7 +126,7 @@ struct device_node;
#define QUARK_X1000_SSCR1_EFWR BIT(16) /* Enable FIFO Write/Read */
#define QUARK_X1000_SSCR1_STRF BIT(17) /* Select FIFO or EFWR */
-/* extra bits in PXA255, PXA26x and PXA27x SSP ports */
+/* Extra bits in PXA255, PXA26x and PXA27x SSP ports */
#define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */
#define SSCR0_PSP (3 << 4) /* PSP - Programmable Serial Protocol */
@@ -181,6 +183,21 @@ struct device_node;
#define SSACD_ACPS(x) ((x) << 4) /* Audio clock PLL select */
#define SSACD_SCDX8 BIT(7) /* SYSCLK division ratio select */
+/* Intel Merrifield SSP */
+#define SFIFOL 0x68 /* FIFO level */
+#define SFIFOTT 0x6c /* FIFO trigger threshold */
+
+#define RX_THRESH_MRFLD_DFLT 16
+#define TX_THRESH_MRFLD_DFLT 16
+
+#define SFIFOL_TFL_MASK GENMASK(15, 0) /* Transmit FIFO Level mask */
+#define SFIFOL_RFL_MASK GENMASK(31, 16) /* Receive FIFO Level mask */
+
+#define SFIFOTT_TFT GENMASK(15, 0) /* Transmit FIFO Threshold (mask) */
+#define SFIFOTT_TxThresh(x) (((x) - 1) << 0) /* TX FIFO trigger threshold / level */
+#define SFIFOTT_RFT GENMASK(31, 16) /* Receive FIFO Threshold (mask) */
+#define SFIFOTT_RxThresh(x) (((x) - 1) << 16) /* RX FIFO trigger threshold / level */
+
/* LPSS SSP */
#define SSITF 0x44 /* TX FIFO trigger level */
#define SSITF_TxHiThresh(x) (((x) - 1) << 0)
@@ -200,16 +217,19 @@ enum pxa_ssp_type {
PXA27x_SSP,
PXA3xx_SSP,
PXA168_SSP,
- MMP2_SSP,
PXA910_SSP,
CE4100_SSP,
+ MMP2_SSP,
+ MRFLD_SSP,
QUARK_X1000_SSP,
- LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */
+ /* Keep LPSS types sorted with lpss_platforms[] */
+ LPSS_LPT_SSP,
LPSS_BYT_SSP,
LPSS_BSW_SSP,
LPSS_SPT_SSP,
LPSS_BXT_SSP,
LPSS_CNL_SSP,
+ SSP_MAX
};
struct ssp_device {
@@ -252,6 +272,22 @@ static inline u32 pxa_ssp_read_reg(struct ssp_device *dev, u32 reg)
return __raw_readl(dev->mmio_base + reg);
}
+static inline void pxa_ssp_enable(struct ssp_device *ssp)
+{
+ u32 sscr0;
+
+ sscr0 = pxa_ssp_read_reg(ssp, SSCR0) | SSCR0_SSE;
+ pxa_ssp_write_reg(ssp, SSCR0, sscr0);
+}
+
+static inline void pxa_ssp_disable(struct ssp_device *ssp)
+{
+ u32 sscr0;
+
+ sscr0 = pxa_ssp_read_reg(ssp, SSCR0) & ~SSCR0_SSE;
+ pxa_ssp_write_reg(ssp, SSCR0, sscr0);
+}
+
#if IS_ENABLED(CONFIG_PXA_SSP)
struct ssp_device *pxa_ssp_request(int port, const char *label);
void pxa_ssp_free(struct ssp_device *);
@@ -270,4 +306,4 @@ static inline struct ssp_device *pxa_ssp_request_of(const struct device_node *n,
static inline void pxa_ssp_free(struct ssp_device *ssp) {}
#endif
-#endif
+#endif /* __LINUX_PXA2XX_SSP_H */
diff --git a/include/linux/qat/qat_mig_dev.h b/include/linux/qat/qat_mig_dev.h
new file mode 100644
index 000000000000..dbbb6a063dd2
--- /dev/null
+++ b/include/linux/qat/qat_mig_dev.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2024 Intel Corporation */
+#ifndef QAT_MIG_DEV_H_
+#define QAT_MIG_DEV_H_
+
+struct pci_dev;
+
+struct qat_mig_dev {
+ void *parent_accel_dev;
+ u8 *state;
+ u32 setup_size;
+ u32 remote_setup_size;
+ u32 state_size;
+ s32 vf_id;
+};
+
+struct qat_mig_dev *qat_vfmig_create(struct pci_dev *pdev, int vf_id);
+int qat_vfmig_init(struct qat_mig_dev *mdev);
+void qat_vfmig_cleanup(struct qat_mig_dev *mdev);
+void qat_vfmig_reset(struct qat_mig_dev *mdev);
+int qat_vfmig_open(struct qat_mig_dev *mdev);
+void qat_vfmig_close(struct qat_mig_dev *mdev);
+int qat_vfmig_suspend(struct qat_mig_dev *mdev);
+int qat_vfmig_resume(struct qat_mig_dev *mdev);
+int qat_vfmig_save_state(struct qat_mig_dev *mdev);
+int qat_vfmig_save_setup(struct qat_mig_dev *mdev);
+int qat_vfmig_load_state(struct qat_mig_dev *mdev);
+int qat_vfmig_load_setup(struct qat_mig_dev *mdev, int size);
+void qat_vfmig_destroy(struct qat_mig_dev *mdev);
+
+#endif /*QAT_MIG_DEV_H_*/
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
deleted file mode 100644
index 0165824c5128..000000000000
--- a/include/linux/qcom_scm.h
+++ /dev/null
@@ -1,174 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2010-2015, 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (C) 2015 Linaro Ltd.
- */
-#ifndef __QCOM_SCM_H
-#define __QCOM_SCM_H
-
-#include <linux/err.h>
-#include <linux/types.h>
-#include <linux/cpumask.h>
-
-#define QCOM_SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
-#define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0
-#define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1
-#define QCOM_SCM_HDCP_MAX_REQ_CNT 5
-
-struct qcom_scm_hdcp_req {
- u32 addr;
- u32 val;
-};
-
-struct qcom_scm_vmperm {
- int vmid;
- int perm;
-};
-
-enum qcom_scm_ocmem_client {
- QCOM_SCM_OCMEM_UNUSED_ID = 0x0,
- QCOM_SCM_OCMEM_GRAPHICS_ID,
- QCOM_SCM_OCMEM_VIDEO_ID,
- QCOM_SCM_OCMEM_LP_AUDIO_ID,
- QCOM_SCM_OCMEM_SENSORS_ID,
- QCOM_SCM_OCMEM_OTHER_OS_ID,
- QCOM_SCM_OCMEM_DEBUG_ID,
-};
-
-enum qcom_scm_sec_dev_id {
- QCOM_SCM_MDSS_DEV_ID = 1,
- QCOM_SCM_OCMEM_DEV_ID = 5,
- QCOM_SCM_PCIE0_DEV_ID = 11,
- QCOM_SCM_PCIE1_DEV_ID = 12,
- QCOM_SCM_GFX_DEV_ID = 18,
- QCOM_SCM_UFS_DEV_ID = 19,
- QCOM_SCM_ICE_DEV_ID = 20,
-};
-
-enum qcom_scm_ice_cipher {
- QCOM_SCM_ICE_CIPHER_AES_128_XTS = 0,
- QCOM_SCM_ICE_CIPHER_AES_128_CBC = 1,
- QCOM_SCM_ICE_CIPHER_AES_256_XTS = 3,
- QCOM_SCM_ICE_CIPHER_AES_256_CBC = 4,
-};
-
-#define QCOM_SCM_VMID_HLOS 0x3
-#define QCOM_SCM_VMID_MSS_MSA 0xF
-#define QCOM_SCM_VMID_WLAN 0x18
-#define QCOM_SCM_VMID_WLAN_CE 0x19
-#define QCOM_SCM_PERM_READ 0x4
-#define QCOM_SCM_PERM_WRITE 0x2
-#define QCOM_SCM_PERM_EXEC 0x1
-#define QCOM_SCM_PERM_RW (QCOM_SCM_PERM_READ | QCOM_SCM_PERM_WRITE)
-#define QCOM_SCM_PERM_RWX (QCOM_SCM_PERM_RW | QCOM_SCM_PERM_EXEC)
-
-#if IS_ENABLED(CONFIG_QCOM_SCM)
-extern bool qcom_scm_is_available(void);
-
-extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus);
-extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus);
-extern void qcom_scm_cpu_power_down(u32 flags);
-extern int qcom_scm_set_remote_state(u32 state, u32 id);
-
-extern int qcom_scm_pas_init_image(u32 peripheral, const void *metadata,
- size_t size);
-extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr,
- phys_addr_t size);
-extern int qcom_scm_pas_auth_and_reset(u32 peripheral);
-extern int qcom_scm_pas_shutdown(u32 peripheral);
-extern bool qcom_scm_pas_supported(u32 peripheral);
-
-extern int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val);
-extern int qcom_scm_io_writel(phys_addr_t addr, unsigned int val);
-
-extern bool qcom_scm_restore_sec_cfg_available(void);
-extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare);
-extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size);
-extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
-extern int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
- u32 cp_nonpixel_start,
- u32 cp_nonpixel_size);
-extern int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
- unsigned int *src,
- const struct qcom_scm_vmperm *newvm,
- unsigned int dest_cnt);
-
-extern bool qcom_scm_ocmem_lock_available(void);
-extern int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset,
- u32 size, u32 mode);
-extern int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset,
- u32 size);
-
-extern bool qcom_scm_ice_available(void);
-extern int qcom_scm_ice_invalidate_key(u32 index);
-extern int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
- enum qcom_scm_ice_cipher cipher,
- u32 data_unit_size);
-
-extern bool qcom_scm_hdcp_available(void);
-extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
- u32 *resp);
-
-extern int qcom_scm_qsmmu500_wait_safe_toggle(bool en);
-#else
-
-#include <linux/errno.h>
-
-static inline bool qcom_scm_is_available(void) { return false; }
-
-static inline int qcom_scm_set_cold_boot_addr(void *entry,
- const cpumask_t *cpus) { return -ENODEV; }
-static inline int qcom_scm_set_warm_boot_addr(void *entry,
- const cpumask_t *cpus) { return -ENODEV; }
-static inline void qcom_scm_cpu_power_down(u32 flags) {}
-static inline u32 qcom_scm_set_remote_state(u32 state,u32 id)
- { return -ENODEV; }
-
-static inline int qcom_scm_pas_init_image(u32 peripheral, const void *metadata,
- size_t size) { return -ENODEV; }
-static inline int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr,
- phys_addr_t size) { return -ENODEV; }
-static inline int qcom_scm_pas_auth_and_reset(u32 peripheral)
- { return -ENODEV; }
-static inline int qcom_scm_pas_shutdown(u32 peripheral) { return -ENODEV; }
-static inline bool qcom_scm_pas_supported(u32 peripheral) { return false; }
-
-static inline int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
- { return -ENODEV; }
-static inline int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
- { return -ENODEV; }
-
-static inline bool qcom_scm_restore_sec_cfg_available(void) { return false; }
-static inline int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
- { return -ENODEV; }
-static inline int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
- { return -ENODEV; }
-static inline int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
- { return -ENODEV; }
-extern inline int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
- u32 cp_nonpixel_start,
- u32 cp_nonpixel_size)
- { return -ENODEV; }
-static inline int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
- unsigned int *src, const struct qcom_scm_vmperm *newvm,
- unsigned int dest_cnt) { return -ENODEV; }
-
-static inline bool qcom_scm_ocmem_lock_available(void) { return false; }
-static inline int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset,
- u32 size, u32 mode) { return -ENODEV; }
-static inline int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id,
- u32 offset, u32 size) { return -ENODEV; }
-
-static inline bool qcom_scm_ice_available(void) { return false; }
-static inline int qcom_scm_ice_invalidate_key(u32 index) { return -ENODEV; }
-static inline int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
- enum qcom_scm_ice_cipher cipher,
- u32 data_unit_size) { return -ENODEV; }
-
-static inline bool qcom_scm_hdcp_available(void) { return false; }
-static inline int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
- u32 *resp) { return -ENODEV; }
-
-static inline int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
- { return -ENODEV; }
-#endif
-#endif
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 977807e1be53..827624840ee2 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2016 QLogic Corporation
- * Copyright (c) 2019-2020 Marvell International Ltd.
+ * Copyright (c) 2019-2021 Marvell International Ltd.
*/
#ifndef _COMMON_HSI_H
@@ -47,10 +47,10 @@
#define ISCSI_CDU_TASK_SEG_TYPE 0
#define FCOE_CDU_TASK_SEG_TYPE 0
#define RDMA_CDU_TASK_SEG_TYPE 1
+#define ETH_CDU_TASK_SEG_TYPE 2
#define FW_ASSERT_GENERAL_ATTN_IDX 32
-
/* Queue Zone sizes in bytes */
#define TSTORM_QZONE_SIZE 8
#define MSTORM_QZONE_SIZE 16
@@ -60,9 +60,12 @@
#define PSTORM_QZONE_SIZE 0
#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7
-#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DEFAULT 16
-#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE 48
-#define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD 112
+#define ETH_MAX_RXQ_VF_DEFAULT 16
+#define ETH_MAX_RXQ_VF_DOUBLE 48
+#define ETH_MAX_RXQ_VF_QUAD 112
+
+#define ETH_RGSRC_CTX_SIZE 6
+#define ETH_TGSRC_CTX_SIZE 6
/********************************/
/* CORE (LIGHT L2) FW CONSTANTS */
@@ -89,8 +92,8 @@
#define MAX_NUM_LL2_TX_STATS_COUNTERS 48
#define FW_MAJOR_VERSION 8
-#define FW_MINOR_VERSION 42
-#define FW_REVISION_VERSION 2
+#define FW_MINOR_VERSION 59
+#define FW_REVISION_VERSION 1
#define FW_ENGINEERING_VERSION 0
/***********************/
@@ -112,6 +115,7 @@
#define MAX_NUM_VFS (MAX_NUM_VFS_K2)
#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
+#define MAX_NUM_FUNCTIONS_K2 (MAX_NUM_PFS_K2 + MAX_NUM_VFS_K2)
#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB)
#define MAX_FUNCTION_NUMBER_K2 (MAX_NUM_PFS + MAX_NUM_VFS_K2)
@@ -133,7 +137,7 @@
#define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1)
/* CIDs */
-#define NUM_OF_CONNECTION_TYPES_E4 (8)
+#define NUM_OF_CONNECTION_TYPES (8)
#define NUM_OF_LCIDS (320)
#define NUM_OF_LTIDS (320)
@@ -144,7 +148,7 @@
#define GTT_DWORD_SIZE BIT(GTT_DWORD_SIZE_BITS)
/* Tools Version */
-#define TOOLS_VERSION 10
+#define TOOLS_VERSION 11
/*****************/
/* CDU CONSTANTS */
@@ -162,6 +166,7 @@
#define CDU_CONTEXT_VALIDATION_CFG_USE_REGION (3)
#define CDU_CONTEXT_VALIDATION_CFG_USE_CID (4)
#define CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE (5)
+#define CDU_CONTEXT_VALIDATION_DEFAULT_CFG (0x3d)
/*****************/
/* DQ CONSTANTS */
@@ -302,6 +307,9 @@
/* PWM address mapping */
#define DQ_PWM_OFFSET_DPM_BASE 0x0
#define DQ_PWM_OFFSET_DPM_END 0x27
+#define DQ_PWM_OFFSET_XCM32_24ICID_BASE 0x28
+#define DQ_PWM_OFFSET_UCM32_24ICID_BASE 0x30
+#define DQ_PWM_OFFSET_TCM32_24ICID_BASE 0x38
#define DQ_PWM_OFFSET_XCM16_BASE 0x40
#define DQ_PWM_OFFSET_XCM32_BASE 0x44
#define DQ_PWM_OFFSET_UCM16_BASE 0x48
@@ -325,6 +333,13 @@
#define DQ_PWM_OFFSET_TCM_LL2_PROD_UPDATE \
(DQ_PWM_OFFSET_TCM32_BASE + DQ_TCM_AGG_VAL_SEL_REG9 - 4)
+#define DQ_PWM_OFFSET_XCM_RDMA_24B_ICID_SQ_PROD \
+ (DQ_PWM_OFFSET_XCM32_24ICID_BASE + 2)
+#define DQ_PWM_OFFSET_UCM_RDMA_24B_ICID_CQ_CONS_32BIT \
+ (DQ_PWM_OFFSET_UCM32_24ICID_BASE + 4)
+#define DQ_PWM_OFFSET_TCM_ROCE_24B_ICID_RQ_PROD \
+ (DQ_PWM_OFFSET_TCM32_24ICID_BASE + 1)
+
#define DQ_REGION_SHIFT (12)
/* DPM */
@@ -360,6 +375,7 @@
/* Number of global Vport/QCN rate limiters */
#define MAX_QM_GLOBAL_RLS 256
+#define COMMON_MAX_QM_GLOBAL_RLS MAX_QM_GLOBAL_RLS
/* QM registers data */
#define QM_LINE_CRD_REG_WIDTH 16
@@ -379,7 +395,7 @@
#define CAU_FSM_ETH_TX 1
/* Number of Protocol Indices per Status Block */
-#define PIS_PER_SB_E4 12
+#define PIS_PER_SB 12
#define MAX_PIS_PER_SB PIS_PER_SB
#define CAU_HC_STOPPED_STATE 3
@@ -700,9 +716,16 @@ enum mf_mode {
MAX_MF_MODE
};
+/* Per protocol packet duplication enable bit vector. If set, duplicate
+ * offloaded traffic to LL2 debug queueu.
+ */
+struct offload_pkt_dup_enable {
+ __le16 enable_vector;
+};
+
/* Per-protocol connection types */
enum protocol_type {
- PROTOCOLID_ISCSI,
+ PROTOCOLID_TCP_ULP,
PROTOCOLID_FCOE,
PROTOCOLID_ROCE,
PROTOCOLID_CORE,
@@ -717,6 +740,12 @@ enum protocol_type {
MAX_PROTOCOL_TYPE
};
+/* Pstorm packet duplication config */
+struct pstorm_pkt_dup_cfg {
+ struct offload_pkt_dup_enable enable;
+ __le16 reserved[3];
+};
+
struct regpair {
__le32 lo;
__le32 hi;
@@ -728,10 +757,24 @@ struct rdma_eqe_destroy_qp {
u8 reserved[4];
};
+/* RoCE Suspend Event Data */
+struct rdma_eqe_suspend_qp {
+ __le32 cid;
+ u8 reserved[4];
+};
+
/* RDMA Event Data Union */
union rdma_eqe_data {
struct regpair async_handle;
struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
+ struct rdma_eqe_suspend_qp rdma_suspend_qp_data;
+};
+
+/* Tstorm packet duplication config */
+struct tstorm_pkt_dup_cfg {
+ struct offload_pkt_dup_enable enable;
+ __le16 reserved;
+ __le32 cid;
};
struct tstorm_queue_zone {
@@ -891,6 +934,15 @@ struct db_legacy_addr {
#define DB_LEGACY_ADDR_ICID_SHIFT 5
};
+/* Structure for doorbell address, in legacy mode, without DEMS */
+struct db_legacy_wo_dems_addr {
+ __le32 addr;
+#define DB_LEGACY_WO_DEMS_ADDR_RESERVED0_MASK 0x3
+#define DB_LEGACY_WO_DEMS_ADDR_RESERVED0_SHIFT 0
+#define DB_LEGACY_WO_DEMS_ADDR_ICID_MASK 0x3FFFFFFF
+#define DB_LEGACY_WO_DEMS_ADDR_ICID_SHIFT 2
+};
+
/* Structure for doorbell address, in PWM mode */
struct db_pwm_addr {
__le32 addr;
@@ -907,6 +959,31 @@ struct db_pwm_addr {
};
/* Parameters to RDMA firmware, passed in EDPM doorbell */
+struct db_rdma_24b_icid_dpm_params {
+ __le32 params;
+#define DB_RDMA_24B_ICID_DPM_PARAMS_SIZE_MASK 0x3F
+#define DB_RDMA_24B_ICID_DPM_PARAMS_SIZE_SHIFT 0
+#define DB_RDMA_24B_ICID_DPM_PARAMS_DPM_TYPE_MASK 0x3
+#define DB_RDMA_24B_ICID_DPM_PARAMS_DPM_TYPE_SHIFT 6
+#define DB_RDMA_24B_ICID_DPM_PARAMS_OPCODE_MASK 0xFF
+#define DB_RDMA_24B_ICID_DPM_PARAMS_OPCODE_SHIFT 8
+#define DB_RDMA_24B_ICID_DPM_PARAMS_ICID_EXT_MASK 0xFF
+#define DB_RDMA_24B_ICID_DPM_PARAMS_ICID_EXT_SHIFT 16
+#define DB_RDMA_24B_ICID_DPM_PARAMS_INV_BYTE_CNT_MASK 0x7
+#define DB_RDMA_24B_ICID_DPM_PARAMS_INV_BYTE_CNT_SHIFT 24
+#define DB_RDMA_24B_ICID_DPM_PARAMS_EXT_ICID_MODE_EN_MASK 0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_EXT_ICID_MODE_EN_SHIFT 27
+#define DB_RDMA_24B_ICID_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
+#define DB_RDMA_24B_ICID_DPM_PARAMS_S_FLG_MASK 0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_S_FLG_SHIFT 29
+#define DB_RDMA_24B_ICID_DPM_PARAMS_RESERVED1_MASK 0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_RESERVED1_SHIFT 30
+#define DB_RDMA_24B_ICID_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1
+#define DB_RDMA_24B_ICID_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31
+};
+
+/* Parameters to RDMA firmware, passed in EDPM doorbell */
struct db_rdma_dpm_params {
__le32 params;
#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F
@@ -1220,21 +1297,41 @@ struct rdif_task_context {
__le32 reserved2;
};
+/* Searcher Table struct */
+struct src_entry_header {
+ __le32 flags;
+#define SRC_ENTRY_HEADER_NEXT_PTR_TYPE_MASK 0x1
+#define SRC_ENTRY_HEADER_NEXT_PTR_TYPE_SHIFT 0
+#define SRC_ENTRY_HEADER_EMPTY_MASK 0x1
+#define SRC_ENTRY_HEADER_EMPTY_SHIFT 1
+#define SRC_ENTRY_HEADER_RESERVED_MASK 0x3FFFFFFF
+#define SRC_ENTRY_HEADER_RESERVED_SHIFT 2
+ __le32 magic_number;
+ struct regpair next_ptr;
+};
+
+/* Enumeration for address type */
+enum src_header_next_ptr_type_enum {
+ e_physical_addr,
+ e_logical_addr,
+ MAX_SRC_HEADER_NEXT_PTR_TYPE_ENUM
+};
+
/* Status block structure */
-struct status_block_e4 {
- __le16 pi_array[PIS_PER_SB_E4];
+struct status_block {
+ __le16 pi_array[PIS_PER_SB];
__le32 sb_num;
-#define STATUS_BLOCK_E4_SB_NUM_MASK 0x1FF
-#define STATUS_BLOCK_E4_SB_NUM_SHIFT 0
-#define STATUS_BLOCK_E4_ZERO_PAD_MASK 0x7F
-#define STATUS_BLOCK_E4_ZERO_PAD_SHIFT 9
-#define STATUS_BLOCK_E4_ZERO_PAD2_MASK 0xFFFF
-#define STATUS_BLOCK_E4_ZERO_PAD2_SHIFT 16
+#define STATUS_BLOCK_SB_NUM_MASK 0x1FF
+#define STATUS_BLOCK_SB_NUM_SHIFT 0
+#define STATUS_BLOCK_ZERO_PAD_MASK 0x7F
+#define STATUS_BLOCK_ZERO_PAD_SHIFT 9
+#define STATUS_BLOCK_ZERO_PAD2_MASK 0xFFFF
+#define STATUS_BLOCK_ZERO_PAD2_SHIFT 16
__le32 prod_index;
-#define STATUS_BLOCK_E4_PROD_INDEX_MASK 0xFFFFFF
-#define STATUS_BLOCK_E4_PROD_INDEX_SHIFT 0
-#define STATUS_BLOCK_E4_ZERO_PAD3_MASK 0xFF
-#define STATUS_BLOCK_E4_ZERO_PAD3_SHIFT 24
+#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF
+#define STATUS_BLOCK_PROD_INDEX_SHIFT 0
+#define STATUS_BLOCK_ZERO_PAD3_MASK 0xFF
+#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24
};
/* Tdif context */
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index cd1207ad4ada..c84e08bc6802 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -67,6 +67,7 @@
/* Ethernet vport update constants */
#define ETH_FILTER_RULES_COUNT 10
#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128
+#define ETH_RSS_IND_TABLE_MASK_SIZE_REGS (ETH_RSS_IND_TABLE_ENTRIES_NUM / 32)
#define ETH_RSS_KEY_SIZE_REGS 10
#define ETH_RSS_ENGINE_NUM_K2 207
#define ETH_RSS_ENGINE_NUM_BB 127
diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h
index 68eda1c21cde..7ba0abc867f1 100644
--- a/include/linux/qed/fcoe_common.h
+++ b/include/linux/qed/fcoe_common.h
@@ -150,49 +150,49 @@ struct ystorm_fcoe_task_st_ctx {
u8 reserved2[8];
};
-struct e4_ystorm_fcoe_task_ag_ctx {
+struct ystorm_fcoe_task_ag_ctx {
u8 byte0;
u8 byte1;
__le16 word0;
u8 flags0;
-#define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF
-#define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
+#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4
+#define YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
+#define YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
+#define YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
+#define YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
+#define YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0
+#define YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
+#define YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
+#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
+#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
+#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6
+#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
+#define YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0
+#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 byte2;
__le32 reg0;
u8 byte3;
@@ -206,73 +206,73 @@ struct e4_ystorm_fcoe_task_ag_ctx {
__le32 reg2;
};
-struct e4_tstorm_fcoe_task_ag_ctx {
+struct tstorm_fcoe_task_ag_ctx {
u8 reserved;
u8 byte1;
__le16 icid;
u8 flags0;
-#define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6
-#define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7
+#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
+#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7
u8 flags1;
-#define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0
-#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2
-#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4
-#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6
u8 flags2;
-#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0
-#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6
u8 flags3;
-#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0
-#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2
-#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4
-#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5
-#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3
+#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0
-#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2
+#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3
+#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4
+#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5
+#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6
+#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7
u8 cleanup_state;
__le16 last_sent_tid;
__le32 rec_rr_tov_exp_timeout;
@@ -352,49 +352,49 @@ struct tstorm_fcoe_task_st_ctx {
struct fcoe_tstorm_fcoe_task_st_ctx_read_only read_only;
};
-struct e4_mstorm_fcoe_task_ag_ctx {
+struct mstorm_fcoe_task_ag_ctx {
u8 byte0;
u8 byte1;
__le16 icid;
u8 flags0;
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5
-#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
-#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
+#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5
+#define MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
+#define MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
-#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
-#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4
-#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0
+#define MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
+#define MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
+#define MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
+#define MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
+#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
+#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0
+#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6
+#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 cleanup_state;
__le32 received_bytes;
u8 byte3;
@@ -440,56 +440,56 @@ struct mstorm_fcoe_task_st_ctx {
struct scsi_cached_sges data_desc;
};
-struct e4_ustorm_fcoe_task_ag_ctx {
+struct ustorm_fcoe_task_ag_ctx {
u8 reserved;
u8 byte1;
__le16 icid;
u8 flags0;
-#define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6
+#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
+#define USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4
-#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
-#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+#define USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0
+#define USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2
+#define USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
u8 flags2;
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7
+#define USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5
+#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3
-#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
-#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0
+#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1
+#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2
+#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
__le32 dif_err_intervals;
__le32 dif_error_1st_interval;
__le32 global_cq_num;
@@ -499,18 +499,18 @@ struct e4_ustorm_fcoe_task_ag_ctx {
};
/* FCoE task context */
-struct e4_fcoe_task_context {
+struct fcoe_task_context {
struct ystorm_fcoe_task_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2];
struct tdif_task_context tdif_context;
- struct e4_ystorm_fcoe_task_ag_ctx ystorm_ag_context;
- struct e4_tstorm_fcoe_task_ag_ctx tstorm_ag_context;
+ struct ystorm_fcoe_task_ag_ctx ystorm_ag_context;
+ struct tstorm_fcoe_task_ag_ctx tstorm_ag_context;
struct timers_context timer_context;
struct tstorm_fcoe_task_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2];
- struct e4_mstorm_fcoe_task_ag_ctx mstorm_ag_context;
+ struct mstorm_fcoe_task_ag_ctx mstorm_ag_context;
struct mstorm_fcoe_task_st_ctx mstorm_st_context;
- struct e4_ustorm_fcoe_task_ag_ctx ustorm_ag_context;
+ struct ustorm_fcoe_task_ag_ctx ustorm_ag_context;
struct rdif_task_context rdif_context;
};
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index 157019f716f1..1a60285a01e3 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -714,49 +714,49 @@ struct ystorm_iscsi_task_st_ctx {
union iscsi_task_hdr pdu_hdr;
};
-struct e4_ystorm_iscsi_task_ag_ctx {
+struct ystorm_iscsi_task_ag_ctx {
u8 reserved;
u8 byte1;
__le16 word0;
u8 flags0;
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_MASK 0x1 /* bit3 */
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_SHIFT 7
+#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
+#define YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
+#define YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_MASK 0x1 /* bit3 */
+#define YSTORM_ISCSI_TASK_AG_CTX_TTT_VALID_SHIFT 7
u8 flags1;
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
+#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
+#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 byte2;
__le32 TTT;
u8 byte3;
@@ -764,49 +764,49 @@ struct e4_ystorm_iscsi_task_ag_ctx {
__le16 word1;
};
-struct e4_mstorm_iscsi_task_ag_ctx {
+struct mstorm_iscsi_task_ag_ctx {
u8 cdu_validation;
u8 byte1;
__le16 task_cid;
u8 flags0;
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7
+#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
+#define MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7
u8 flags1;
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 byte2;
__le32 reg0;
u8 byte3;
@@ -814,56 +814,56 @@ struct e4_mstorm_iscsi_task_ag_ctx {
__le16 word1;
};
-struct e4_ustorm_iscsi_task_ag_ctx {
+struct ustorm_iscsi_task_ag_ctx {
u8 reserved;
u8 state;
__le16 icid;
u8 flags0;
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
-#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6
+#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6
u8 flags1;
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0
-#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2
+#define USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
u8 flags2;
-#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0
+#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2
+#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5
+#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
-#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0
+#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2
+#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
__le32 dif_err_intervals;
__le32 dif_error_1st_interval;
__le32 rcv_cont_len;
@@ -952,14 +952,14 @@ struct ustorm_iscsi_task_st_ctx {
};
/* iscsi task context */
-struct e4_iscsi_task_context {
+struct iscsi_task_context {
struct ystorm_iscsi_task_st_ctx ystorm_st_context;
- struct e4_ystorm_iscsi_task_ag_ctx ystorm_ag_context;
+ struct ystorm_iscsi_task_ag_ctx ystorm_ag_context;
struct regpair ystorm_ag_padding[2];
struct tdif_task_context tdif_context;
- struct e4_mstorm_iscsi_task_ag_ctx mstorm_ag_context;
+ struct mstorm_iscsi_task_ag_ctx mstorm_ag_context;
struct regpair mstorm_ag_padding[2];
- struct e4_ustorm_iscsi_task_ag_ctx ustorm_ag_context;
+ struct ustorm_iscsi_task_ag_ctx ustorm_ag_context;
struct mstorm_iscsi_task_st_ctx mstorm_st_context;
struct ustorm_iscsi_task_st_ctx ustorm_st_context;
struct rdif_task_context rdif_context;
@@ -1431,73 +1431,73 @@ struct ystorm_iscsi_stats_drv {
struct regpair iscsi_tx_tcp_pkt_cnt;
};
-struct e4_tstorm_iscsi_task_ag_ctx {
+struct tstorm_iscsi_task_ag_ctx {
u8 byte0;
u8 byte1;
__le16 word0;
u8 flags0;
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7
+#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6
u8 flags2;
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6
u8 flags3;
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7
u8 byte2;
__le16 word1;
__le32 reg0;
diff --git a/include/linux/qed/nvmetcp_common.h b/include/linux/qed/nvmetcp_common.h
new file mode 100644
index 000000000000..cc7c7481a0e0
--- /dev/null
+++ b/include/linux/qed/nvmetcp_common.h
@@ -0,0 +1,531 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* Copyright 2021 Marvell. All rights reserved. */
+
+#ifndef __NVMETCP_COMMON__
+#define __NVMETCP_COMMON__
+
+#include "tcp_common.h"
+#include <linux/nvme-tcp.h>
+
+#define NVMETCP_SLOW_PATH_LAYER_CODE (6)
+#define NVMETCP_WQE_NUM_SGES_SLOWIO (0xf)
+
+/* NVMeTCP firmware function init parameters */
+struct nvmetcp_spe_func_init {
+ __le16 half_way_close_timeout;
+ u8 num_sq_pages_in_ring;
+ u8 num_r2tq_pages_in_ring;
+ u8 num_uhq_pages_in_ring;
+ u8 ll2_rx_queue_id;
+ u8 flags;
+#define NVMETCP_SPE_FUNC_INIT_COUNTERS_EN_MASK 0x1
+#define NVMETCP_SPE_FUNC_INIT_COUNTERS_EN_SHIFT 0
+#define NVMETCP_SPE_FUNC_INIT_NVMETCP_MODE_MASK 0x1
+#define NVMETCP_SPE_FUNC_INIT_NVMETCP_MODE_SHIFT 1
+#define NVMETCP_SPE_FUNC_INIT_RESERVED0_MASK 0x3F
+#define NVMETCP_SPE_FUNC_INIT_RESERVED0_SHIFT 2
+ u8 debug_flags;
+ __le16 reserved1;
+ u8 params;
+#define NVMETCP_SPE_FUNC_INIT_MAX_SYN_RT_MASK 0xF
+#define NVMETCP_SPE_FUNC_INIT_MAX_SYN_RT_SHIFT 0
+#define NVMETCP_SPE_FUNC_INIT_RESERVED1_MASK 0xF
+#define NVMETCP_SPE_FUNC_INIT_RESERVED1_SHIFT 4
+ u8 reserved2[5];
+ struct scsi_init_func_params func_params;
+ struct scsi_init_func_queues q_params;
+};
+
+/* NVMeTCP init params passed by driver to FW in NVMeTCP init ramrod. */
+struct nvmetcp_init_ramrod_params {
+ struct nvmetcp_spe_func_init nvmetcp_init_spe;
+ struct tcp_init_params tcp_init;
+};
+
+/* NVMeTCP Ramrod Command IDs */
+enum nvmetcp_ramrod_cmd_id {
+ NVMETCP_RAMROD_CMD_ID_UNUSED = 0,
+ NVMETCP_RAMROD_CMD_ID_INIT_FUNC = 1,
+ NVMETCP_RAMROD_CMD_ID_DESTROY_FUNC = 2,
+ NVMETCP_RAMROD_CMD_ID_OFFLOAD_CONN = 3,
+ NVMETCP_RAMROD_CMD_ID_UPDATE_CONN = 4,
+ NVMETCP_RAMROD_CMD_ID_TERMINATION_CONN = 5,
+ NVMETCP_RAMROD_CMD_ID_CLEAR_SQ = 6,
+ MAX_NVMETCP_RAMROD_CMD_ID
+};
+
+struct nvmetcp_glbl_queue_entry {
+ struct regpair cq_pbl_addr;
+ struct regpair reserved;
+};
+
+/* NVMeTCP conn level EQEs */
+enum nvmetcp_eqe_opcode {
+ NVMETCP_EVENT_TYPE_INIT_FUNC = 0, /* Response after init Ramrod */
+ NVMETCP_EVENT_TYPE_DESTROY_FUNC, /* Response after destroy Ramrod */
+ NVMETCP_EVENT_TYPE_OFFLOAD_CONN,/* Response after option 2 offload Ramrod */
+ NVMETCP_EVENT_TYPE_UPDATE_CONN, /* Response after update Ramrod */
+ NVMETCP_EVENT_TYPE_CLEAR_SQ, /* Response after clear sq Ramrod */
+ NVMETCP_EVENT_TYPE_TERMINATE_CONN, /* Response after termination Ramrod */
+ NVMETCP_EVENT_TYPE_RESERVED0,
+ NVMETCP_EVENT_TYPE_RESERVED1,
+ NVMETCP_EVENT_TYPE_ASYN_CONNECT_COMPLETE, /* Connect completed (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_TERMINATE_DONE, /* Termination completed (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_START_OF_ERROR_TYPES = 10, /* Separate EQs from err EQs */
+ NVMETCP_EVENT_TYPE_ASYN_ABORT_RCVD, /* TCP RST packet receive (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_CLOSE_RCVD, /* TCP FIN packet receive (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_SYN_RCVD, /* TCP SYN+ACK packet receive (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_MAX_RT_TIME, /* TCP max retransmit time (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_MAX_RT_CNT, /* TCP max retransmit count (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT, /* TCP ka probes count (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_ASYN_FIN_WAIT2, /* TCP fin wait 2 (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_NVMETCP_CONN_ERROR, /* NVMeTCP error response (A-syn EQE) */
+ NVMETCP_EVENT_TYPE_TCP_CONN_ERROR, /* NVMeTCP error - tcp error (A-syn EQE) */
+ MAX_NVMETCP_EQE_OPCODE
+};
+
+struct nvmetcp_conn_offload_section {
+ struct regpair cccid_itid_table_addr; /* CCCID to iTID table address */
+ __le16 cccid_max_range; /* CCCID max value - used for validation */
+ __le16 reserved[3];
+};
+
+/* NVMe TCP connection offload params passed by driver to FW in NVMeTCP offload ramrod */
+struct nvmetcp_conn_offload_params {
+ struct regpair sq_pbl_addr;
+ struct regpair r2tq_pbl_addr;
+ struct regpair xhq_pbl_addr;
+ struct regpair uhq_pbl_addr;
+ __le16 physical_q0;
+ __le16 physical_q1;
+ u8 flags;
+#define NVMETCP_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1
+#define NVMETCP_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0
+#define NVMETCP_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1
+#define NVMETCP_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1
+#define NVMETCP_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1
+#define NVMETCP_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2
+#define NVMETCP_CONN_OFFLOAD_PARAMS_NVMETCP_MODE_MASK 0x1
+#define NVMETCP_CONN_OFFLOAD_PARAMS_NVMETCP_MODE_SHIFT 3
+#define NVMETCP_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0xF
+#define NVMETCP_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 4
+ u8 default_cq;
+ __le16 reserved0;
+ __le32 reserved1;
+ __le32 initial_ack;
+
+ struct nvmetcp_conn_offload_section nvmetcp; /* NVMe/TCP section */
+};
+
+/* NVMe TCP and TCP connection offload params passed by driver to FW in NVMeTCP offload ramrod. */
+struct nvmetcp_spe_conn_offload {
+ __le16 reserved;
+ __le16 conn_id;
+ __le32 fw_cid;
+ struct nvmetcp_conn_offload_params nvmetcp;
+ struct tcp_offload_params_opt2 tcp;
+};
+
+/* NVMeTCP connection update params passed by driver to FW in NVMETCP update ramrod. */
+struct nvmetcp_conn_update_ramrod_params {
+ __le16 reserved0;
+ __le16 conn_id;
+ __le32 reserved1;
+ u8 flags;
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED0_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED0_SHIFT 2
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_DATA_SHIFT 3
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED2_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED2_SHIFT 4
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED3_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED3_SHIFT 5
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED4_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED4_SHIFT 6
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED5_MASK 0x1
+#define NVMETCP_CONN_UPDATE_RAMROD_PARAMS_RESERVED5_SHIFT 7
+ u8 reserved3[3];
+ __le32 max_seq_size;
+ __le32 max_send_pdu_length;
+ __le32 max_recv_pdu_length;
+ __le32 first_seq_length;
+ __le32 reserved4[5];
+};
+
+/* NVMeTCP connection termination request */
+struct nvmetcp_spe_conn_termination {
+ __le16 reserved0;
+ __le16 conn_id;
+ __le32 reserved1;
+ u8 abortive;
+ u8 reserved2[7];
+ struct regpair reserved3;
+ struct regpair reserved4;
+};
+
+struct nvmetcp_dif_flags {
+ u8 flags;
+};
+
+enum nvmetcp_wqe_type {
+ NVMETCP_WQE_TYPE_NORMAL,
+ NVMETCP_WQE_TYPE_TASK_CLEANUP,
+ NVMETCP_WQE_TYPE_MIDDLE_PATH,
+ NVMETCP_WQE_TYPE_IC,
+ MAX_NVMETCP_WQE_TYPE
+};
+
+struct nvmetcp_wqe {
+ __le16 task_id;
+ u8 flags;
+#define NVMETCP_WQE_WQE_TYPE_MASK 0x7 /* [use nvmetcp_wqe_type] */
+#define NVMETCP_WQE_WQE_TYPE_SHIFT 0
+#define NVMETCP_WQE_NUM_SGES_MASK 0xF
+#define NVMETCP_WQE_NUM_SGES_SHIFT 3
+#define NVMETCP_WQE_RESPONSE_MASK 0x1
+#define NVMETCP_WQE_RESPONSE_SHIFT 7
+ struct nvmetcp_dif_flags prot_flags;
+ __le32 contlen_cdbsize;
+#define NVMETCP_WQE_CONT_LEN_MASK 0xFFFFFF
+#define NVMETCP_WQE_CONT_LEN_SHIFT 0
+#define NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD_MASK 0xFF
+#define NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD_SHIFT 24
+};
+
+struct nvmetcp_host_cccid_itid_entry {
+ __le16 itid;
+};
+
+struct nvmetcp_connect_done_results {
+ __le16 icid;
+ __le16 conn_id;
+ struct tcp_ulp_connect_done_params params;
+};
+
+struct nvmetcp_eqe_data {
+ __le16 icid;
+ __le16 conn_id;
+ __le16 reserved;
+ u8 error_code;
+ u8 error_pdu_opcode_reserved;
+#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F
+#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_SHIFT 0
+#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_VALID_MASK 0x1
+#define NVMETCP_EQE_DATA_ERROR_PDU_OPCODE_VALID_SHIFT 6
+#define NVMETCP_EQE_DATA_RESERVED0_MASK 0x1
+#define NVMETCP_EQE_DATA_RESERVED0_SHIFT 7
+};
+
+enum nvmetcp_task_type {
+ NVMETCP_TASK_TYPE_HOST_WRITE,
+ NVMETCP_TASK_TYPE_HOST_READ,
+ NVMETCP_TASK_TYPE_INIT_CONN_REQUEST,
+ NVMETCP_TASK_TYPE_RESERVED0,
+ NVMETCP_TASK_TYPE_CLEANUP,
+ NVMETCP_TASK_TYPE_HOST_READ_NO_CQE,
+ MAX_NVMETCP_TASK_TYPE
+};
+
+struct nvmetcp_db_data {
+ u8 params;
+#define NVMETCP_DB_DATA_DEST_MASK 0x3 /* destination of doorbell (use enum db_dest) */
+#define NVMETCP_DB_DATA_DEST_SHIFT 0
+#define NVMETCP_DB_DATA_AGG_CMD_MASK 0x3 /* aggregative command to CM (use enum db_agg_cmd_sel) */
+#define NVMETCP_DB_DATA_AGG_CMD_SHIFT 2
+#define NVMETCP_DB_DATA_BYPASS_EN_MASK 0x1 /* enable QM bypass */
+#define NVMETCP_DB_DATA_BYPASS_EN_SHIFT 4
+#define NVMETCP_DB_DATA_RESERVED_MASK 0x1
+#define NVMETCP_DB_DATA_RESERVED_SHIFT 5
+#define NVMETCP_DB_DATA_AGG_VAL_SEL_MASK 0x3 /* aggregative value selection */
+#define NVMETCP_DB_DATA_AGG_VAL_SEL_SHIFT 6
+ u8 agg_flags; /* bit for every DQ counter flags in CM context that DQ can increment */
+ __le16 sq_prod;
+};
+
+struct nvmetcp_fw_nvmf_cqe {
+ __le32 reserved[4];
+};
+
+struct nvmetcp_icresp_mdata {
+ u8 digest;
+ u8 cpda;
+ __le16 pfv;
+ __le32 maxdata;
+ __le16 rsvd[4];
+};
+
+union nvmetcp_fw_cqe_data {
+ struct nvmetcp_fw_nvmf_cqe nvme_cqe;
+ struct nvmetcp_icresp_mdata icresp_mdata;
+};
+
+struct nvmetcp_fw_cqe {
+ __le16 conn_id;
+ u8 cqe_type;
+ u8 cqe_error_status_bits;
+#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7
+#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0
+#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1
+#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1
+#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4
+ __le16 itid;
+ u8 task_type;
+ u8 fw_dbg_field;
+ u8 caused_conn_err;
+ u8 reserved0[3];
+ __le32 reserved1;
+ union nvmetcp_fw_cqe_data cqe_data;
+ struct regpair task_opaque;
+ __le32 reserved[6];
+};
+
+enum nvmetcp_fw_cqes_type {
+ NVMETCP_FW_CQE_TYPE_NORMAL = 1,
+ NVMETCP_FW_CQE_TYPE_RESERVED0,
+ NVMETCP_FW_CQE_TYPE_RESERVED1,
+ NVMETCP_FW_CQE_TYPE_CLEANUP,
+ NVMETCP_FW_CQE_TYPE_DUMMY,
+ MAX_NVMETCP_FW_CQES_TYPE
+};
+
+struct ystorm_nvmetcp_task_state {
+ struct scsi_cached_sges data_desc;
+ struct scsi_sgl_params sgl_params;
+ __le32 resrved0;
+ __le32 buffer_offset;
+ __le16 cccid;
+ struct nvmetcp_dif_flags dif_flags;
+ u8 flags;
+#define YSTORM_NVMETCP_TASK_STATE_LOCAL_COMP_MASK 0x1
+#define YSTORM_NVMETCP_TASK_STATE_LOCAL_COMP_SHIFT 0
+#define YSTORM_NVMETCP_TASK_STATE_SLOW_IO_MASK 0x1
+#define YSTORM_NVMETCP_TASK_STATE_SLOW_IO_SHIFT 1
+#define YSTORM_NVMETCP_TASK_STATE_SET_DIF_OFFSET_MASK 0x1
+#define YSTORM_NVMETCP_TASK_STATE_SET_DIF_OFFSET_SHIFT 2
+#define YSTORM_NVMETCP_TASK_STATE_SEND_W_RSP_MASK 0x1
+#define YSTORM_NVMETCP_TASK_STATE_SEND_W_RSP_SHIFT 3
+};
+
+struct ystorm_nvmetcp_task_rxmit_opt {
+ __le32 reserved[4];
+};
+
+struct nvmetcp_task_hdr {
+ __le32 reg[18];
+};
+
+struct nvmetcp_task_hdr_aligned {
+ struct nvmetcp_task_hdr task_hdr;
+ __le32 reserved[2]; /* HSI_COMMENT: Align to QREG */
+};
+
+struct e5_tdif_task_context {
+ __le32 reserved[16];
+};
+
+struct e5_rdif_task_context {
+ __le32 reserved[12];
+};
+
+struct ystorm_nvmetcp_task_st_ctx {
+ struct ystorm_nvmetcp_task_state state;
+ struct ystorm_nvmetcp_task_rxmit_opt rxmit_opt;
+ struct nvmetcp_task_hdr_aligned pdu_hdr;
+};
+
+struct mstorm_nvmetcp_task_st_ctx {
+ struct scsi_cached_sges data_desc;
+ struct scsi_sgl_params sgl_params;
+ __le32 rem_task_size;
+ __le32 data_buffer_offset;
+ u8 task_type;
+ struct nvmetcp_dif_flags dif_flags;
+ __le16 dif_task_icid;
+ struct regpair reserved0;
+ __le32 expected_itt;
+ __le32 reserved1;
+};
+
+struct ustorm_nvmetcp_task_st_ctx {
+ __le32 rem_rcv_len;
+ __le32 exp_data_transfer_len;
+ __le32 exp_data_sn;
+ struct regpair reserved0;
+ __le32 reg1_map;
+#define REG1_NUM_SGES_MASK 0xF
+#define REG1_NUM_SGES_SHIFT 0
+#define REG1_RESERVED1_MASK 0xFFFFFFF
+#define REG1_RESERVED1_SHIFT 4
+ u8 flags2;
+#define USTORM_NVMETCP_TASK_ST_CTX_AHS_EXIST_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_AHS_EXIST_SHIFT 0
+#define USTORM_NVMETCP_TASK_ST_CTX_RESERVED1_MASK 0x7F
+#define USTORM_NVMETCP_TASK_ST_CTX_RESERVED1_SHIFT 1
+ struct nvmetcp_dif_flags dif_flags;
+ __le16 reserved3;
+ __le16 tqe_opaque[2];
+ __le32 reserved5;
+ __le32 nvme_tcp_opaque_lo;
+ __le32 nvme_tcp_opaque_hi;
+ u8 task_type;
+ u8 error_flags;
+#define USTORM_NVMETCP_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0
+#define USTORM_NVMETCP_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1
+#define USTORM_NVMETCP_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2
+#define USTORM_NVMETCP_TASK_ST_CTX_NVME_TCP_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_NVME_TCP_SHIFT 3
+ u8 flags;
+#define USTORM_NVMETCP_TASK_ST_CTX_CQE_WRITE_MASK 0x3
+#define USTORM_NVMETCP_TASK_ST_CTX_CQE_WRITE_SHIFT 0
+#define USTORM_NVMETCP_TASK_ST_CTX_LOCAL_COMP_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_LOCAL_COMP_SHIFT 2
+#define USTORM_NVMETCP_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3
+#define USTORM_NVMETCP_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4
+#define USTORM_NVMETCP_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5
+#define USTORM_NVMETCP_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1
+#define USTORM_NVMETCP_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6
+ u8 cq_rss_number;
+};
+
+struct e5_ystorm_nvmetcp_task_ag_ctx {
+ u8 reserved /* cdu_validation */;
+ u8 byte1 /* state_and_core_id */;
+ __le16 word0 /* icid */;
+ u8 flags0;
+ u8 flags1;
+ u8 flags2;
+ u8 flags3;
+ __le32 TTT;
+ u8 byte2;
+ u8 byte3;
+ u8 byte4;
+ u8 reserved7;
+};
+
+struct e5_mstorm_nvmetcp_task_ag_ctx {
+ u8 cdu_validation;
+ u8 byte1;
+ __le16 task_cid;
+ u8 flags0;
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_VALID_MASK 0x1
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_VALID_SHIFT 6
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7
+ u8 flags1;
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1_MASK 0x3
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1_SHIFT 2
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF2_MASK 0x3
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF2_SHIFT 4
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1EN_MASK 0x1
+#define E5_MSTORM_NVMETCP_TASK_AG_CTX_CF1EN_SHIFT 7
+ u8 flags2;
+ u8 flags3;
+ __le32 reg0;
+ u8 byte2;
+ u8 byte3;
+ u8 byte4;
+ u8 reserved7;
+};
+
+struct e5_ustorm_nvmetcp_task_ag_ctx {
+ u8 reserved;
+ u8 state_and_core_id;
+ __le16 icid;
+ u8 flags0;
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6
+ u8 flags1;
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED1_MASK 0x3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED1_SHIFT 0
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_MASK 0x3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_SHIFT 2
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3_MASK 0x3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3_SHIFT 4
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+ u8 flags2;
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CF3EN_SHIFT 3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7
+ u8 flags3;
+ u8 flags4;
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED5_MASK 0x3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED5_SHIFT 0
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED6_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED6_SHIFT 2
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED7_MASK 0x1
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_RESERVED7_SHIFT 3
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+ u8 byte2;
+ u8 byte3;
+ u8 reserved8;
+ __le32 dif_err_intervals;
+ __le32 dif_error_1st_interval;
+ __le32 rcv_cont_len;
+ __le32 exp_cont_len;
+ __le32 total_data_acked;
+ __le32 exp_data_acked;
+ __le16 word1;
+ __le16 next_tid;
+ __le32 hdr_residual_count;
+ __le32 exp_r2t_sn;
+};
+
+struct e5_nvmetcp_task_context {
+ struct ystorm_nvmetcp_task_st_ctx ystorm_st_context;
+ struct e5_ystorm_nvmetcp_task_ag_ctx ystorm_ag_context;
+ struct regpair ystorm_ag_padding[2];
+ struct e5_tdif_task_context tdif_context;
+ struct e5_mstorm_nvmetcp_task_ag_ctx mstorm_ag_context;
+ struct regpair mstorm_ag_padding[2];
+ struct e5_ustorm_nvmetcp_task_ag_ctx ustorm_ag_context;
+ struct regpair ustorm_ag_padding[2];
+ struct mstorm_nvmetcp_task_st_ctx mstorm_st_context;
+ struct regpair mstorm_st_padding[2];
+ struct ustorm_nvmetcp_task_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
+ struct e5_rdif_task_context rdif_context;
+};
+
+#endif /* __NVMETCP_COMMON__*/
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index f34dbd0db795..a84063492c71 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -268,14 +268,15 @@ static inline dma_addr_t qed_chain_get_pbl_phys(const struct qed_chain *chain)
}
/**
- * @brief qed_chain_advance_page -
+ * qed_chain_advance_page(): Advance the next element across pages for a
+ * linked chain.
*
- * Advance the next element across pages for a linked chain
+ * @p_chain: P_chain.
+ * @p_next_elem: P_next_elem.
+ * @idx_to_inc: Idx_to_inc.
+ * @page_to_inc: page_to_inc.
*
- * @param p_chain
- * @param p_next_elem
- * @param idx_to_inc
- * @param page_to_inc
+ * Return: Void.
*/
static inline void
qed_chain_advance_page(struct qed_chain *p_chain,
@@ -336,12 +337,14 @@ qed_chain_advance_page(struct qed_chain *p_chain,
} while (0)
/**
- * @brief qed_chain_return_produced -
+ * qed_chain_return_produced(): A chain in which the driver "Produces"
+ * elements should use this API
+ * to indicate previous produced elements
+ * are now consumed.
*
- * A chain in which the driver "Produces" elements should use this API
- * to indicate previous produced elements are now consumed.
+ * @p_chain: Chain.
*
- * @param p_chain
+ * Return: Void.
*/
static inline void qed_chain_return_produced(struct qed_chain *p_chain)
{
@@ -353,15 +356,15 @@ static inline void qed_chain_return_produced(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_produce -
+ * qed_chain_produce(): A chain in which the driver "Produces"
+ * elements should use this to get a pointer to
+ * the next element which can be "Produced". It's driver
+ * responsibility to validate that the chain has room for
+ * new element.
*
- * A chain in which the driver "Produces" elements should use this to get
- * a pointer to the next element which can be "Produced". It's driver
- * responsibility to validate that the chain has room for new element.
+ * @p_chain: Chain.
*
- * @param p_chain
- *
- * @return void*, a pointer to next element
+ * Return: void*, a pointer to next element.
*/
static inline void *qed_chain_produce(struct qed_chain *p_chain)
{
@@ -395,14 +398,11 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_get_capacity -
- *
- * Get the maximum number of BDs in chain
+ * qed_chain_get_capacity(): Get the maximum number of BDs in chain
*
- * @param p_chain
- * @param num
+ * @p_chain: Chain.
*
- * @return number of unusable BDs
+ * Return: number of unusable BDs.
*/
static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain)
{
@@ -410,12 +410,14 @@ static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_recycle_consumed -
+ * qed_chain_recycle_consumed(): Returns an element which was
+ * previously consumed;
+ * Increments producers so they could
+ * be written to FW.
*
- * Returns an element which was previously consumed;
- * Increments producers so they could be written to FW.
+ * @p_chain: Chain.
*
- * @param p_chain
+ * Return: Void.
*/
static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain)
{
@@ -427,14 +429,13 @@ static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_consume -
+ * qed_chain_consume(): A Chain in which the driver utilizes data written
+ * by a different source (i.e., FW) should use this to
+ * access passed buffers.
*
- * A Chain in which the driver utilizes data written by a different source
- * (i.e., FW) should use this to access passed buffers.
+ * @p_chain: Chain.
*
- * @param p_chain
- *
- * @return void*, a pointer to the next buffer written
+ * Return: void*, a pointer to the next buffer written.
*/
static inline void *qed_chain_consume(struct qed_chain *p_chain)
{
@@ -468,9 +469,11 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_reset - Resets the chain to its start state
+ * qed_chain_reset(): Resets the chain to its start state.
+ *
+ * @p_chain: pointer to a previously allocated chain.
*
- * @param p_chain pointer to a previously allocated chain
+ * Return Void.
*/
static inline void qed_chain_reset(struct qed_chain *p_chain)
{
@@ -519,13 +522,12 @@ static inline void qed_chain_reset(struct qed_chain *p_chain)
}
/**
- * @brief qed_chain_get_last_elem -
+ * qed_chain_get_last_elem(): Returns a pointer to the last element of the
+ * chain.
*
- * Returns a pointer to the last element of the chain
+ * @p_chain: Chain.
*
- * @param p_chain
- *
- * @return void*
+ * Return: void*.
*/
static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
{
@@ -563,10 +565,13 @@ out:
}
/**
- * @brief qed_chain_set_prod - sets the prod to the given value
+ * qed_chain_set_prod(): sets the prod to the given value.
+ *
+ * @p_chain: Chain.
+ * @prod_idx: Prod Idx.
+ * @p_prod_elem: Prod elem.
*
- * @param prod_idx
- * @param p_prod_elem
+ * Return Void.
*/
static inline void qed_chain_set_prod(struct qed_chain *p_chain,
u32 prod_idx, void *p_prod_elem)
@@ -610,9 +615,11 @@ static inline void qed_chain_set_prod(struct qed_chain *p_chain,
}
/**
- * @brief qed_chain_pbl_zero_mem - set chain memory to 0
+ * qed_chain_pbl_zero_mem(): set chain memory to 0.
+ *
+ * @p_chain: Chain.
*
- * @param p_chain
+ * Return: Void.
*/
static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
{
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index 812a4d751163..e1bf3219b4e6 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -145,12 +145,6 @@ struct qed_filter_mcast_params {
unsigned char mac[64][ETH_ALEN];
};
-union qed_filter_type_params {
- enum qed_filter_rx_mode_type accept_flags;
- struct qed_filter_ucast_params ucast;
- struct qed_filter_mcast_params mcast;
-};
-
enum qed_filter_type {
QED_FILTER_TYPE_UCAST,
QED_FILTER_TYPE_MCAST,
@@ -158,11 +152,6 @@ enum qed_filter_type {
QED_MAX_FILTER_TYPES,
};
-struct qed_filter_params {
- enum qed_filter_type type;
- union qed_filter_type_params filter;
-};
-
struct qed_tunn_params {
u16 vxlan_port;
u8 update_vxlan_port;
@@ -314,8 +303,14 @@ struct qed_eth_ops {
int (*q_tx_stop)(struct qed_dev *cdev, u8 rss_id, void *handle);
- int (*filter_config)(struct qed_dev *cdev,
- struct qed_filter_params *params);
+ int (*filter_config_rx_mode)(struct qed_dev *cdev,
+ enum qed_filter_rx_mode_type type);
+
+ int (*filter_config_ucast)(struct qed_dev *cdev,
+ struct qed_filter_ucast_params *params);
+
+ int (*filter_config_mcast)(struct qed_dev *cdev,
+ struct qed_filter_mcast_params *params);
int (*fastpath_stop)(struct qed_dev *cdev);
@@ -336,7 +331,7 @@ struct qed_eth_ops {
int (*configure_arfs_searcher)(struct qed_dev *cdev,
enum qed_filter_config_mode mode);
int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle);
- int (*req_bulletin_update_mac)(struct qed_dev *cdev, u8 *mac);
+ int (*req_bulletin_update_mac)(struct qed_dev *cdev, const u8 *mac);
};
const struct qed_eth_ops *qed_get_eth_ops(void);
diff --git a/include/linux/qed/qed_fcoe_if.h b/include/linux/qed/qed_fcoe_if.h
index 16752eca5cbd..0d3b6ed21628 100644
--- a/include/linux/qed/qed_fcoe_if.h
+++ b/include/linux/qed/qed_fcoe_if.h
@@ -67,16 +67,13 @@ struct qed_fcoe_cb_ops {
u32 (*get_login_failures)(void *cookie);
};
-void qed_fcoe_set_pf_params(struct qed_dev *cdev,
- struct qed_fcoe_pf_params *params);
-
/**
* struct qed_fcoe_ops - qed FCoE operations.
* @common: common operations pointer
* @fill_dev_info: fills FCoE specific information
* @param cdev
* @param info
- * @return 0 on sucesss, otherwise error value.
+ * @return 0 on success, otherwise error value.
* @register_ops: register FCoE operations
* @param cdev
* @param ops - specified using qed_iscsi_cb_ops
@@ -96,7 +93,7 @@ void qed_fcoe_set_pf_params(struct qed_dev *cdev,
* connection.
* @param p_doorbell - qed will fill the address of the
* doorbell.
- * return 0 on sucesss, otherwise error value.
+ * return 0 on success, otherwise error value.
* @release_conn: release a previously acquired fcoe connection
* @param cdev
* @param handle - the connection handle.
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index 68d17a4fbf20..6dc4943d8aec 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -24,6 +24,9 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include <net/devlink.h>
+#define QED_TX_SWS_TIMER_DFLT 500
+#define QED_TWO_MSL_TIMER_DFLT 4000
+
enum dcbx_protocol_type {
DCBX_PROTOCOL_ISCSI,
DCBX_PROTOCOL_FCOE,
@@ -542,6 +545,22 @@ struct qed_iscsi_pf_params {
u8 bdq_pbl_num_entries[3];
};
+struct qed_nvmetcp_pf_params {
+ u64 glbl_q_params_addr;
+ u16 cq_num_entries;
+ u16 num_cons;
+ u16 num_tasks;
+ u8 num_sq_pages_in_ring;
+ u8 num_r2tq_pages_in_ring;
+ u8 num_uhq_pages_in_ring;
+ u8 num_queues;
+ u8 gl_rq_pi;
+ u8 gl_cmd_pi;
+ u8 debug_mode;
+ u8 ll2_ooo_queue_id;
+ u16 min_rto;
+};
+
struct qed_rdma_pf_params {
/* Supplied to QED during resource allocation (may affect the ILT and
* the doorbell BAR).
@@ -560,6 +579,7 @@ struct qed_pf_params {
struct qed_eth_pf_params eth_pf_params;
struct qed_fcoe_pf_params fcoe_pf_params;
struct qed_iscsi_pf_params iscsi_pf_params;
+ struct qed_nvmetcp_pf_params nvmetcp_pf_params;
struct qed_rdma_pf_params rdma_pf_params;
};
@@ -571,7 +591,7 @@ enum qed_int_mode {
};
struct qed_sb_info {
- struct status_block_e4 *sb_virt;
+ struct status_block *sb_virt;
dma_addr_t sb_phys;
u32 sb_ack; /* Last given ack */
u16 igu_sb_id;
@@ -596,7 +616,6 @@ enum qed_hw_err_type {
enum qed_dev_type {
QED_DEV_TYPE_BB,
QED_DEV_TYPE_AH,
- QED_DEV_TYPE_E5,
};
struct qed_dev_info {
@@ -633,6 +652,7 @@ struct qed_dev_info {
bool wol_support;
bool smart_an;
+ bool esl;
/* MBI version */
u32 mbi_version;
@@ -662,6 +682,7 @@ enum qed_sb_type {
enum qed_protocol {
QED_PROTOCOL_ETH,
QED_PROTOCOL_ISCSI,
+ QED_PROTOCOL_NVMETCP = QED_PROTOCOL_ISCSI,
QED_PROTOCOL_FCOE,
};
@@ -787,6 +808,12 @@ struct qed_devlink {
struct devlink_health_reporter *fw_reporter;
};
+struct qed_sb_info_dbg {
+ u32 igu_prod;
+ u32 igu_cons;
+ u16 pi[PIS_PER_SB];
+};
+
struct qed_common_cb_ops {
void (*arfs_filter_op)(void *dev, void *fltr, u8 fw_rc);
void (*link_update)(void *dev, struct qed_link_output *link);
@@ -801,47 +828,47 @@ struct qed_common_cb_ops {
struct qed_selftest_ops {
/**
- * @brief selftest_interrupt - Perform interrupt test
+ * selftest_interrupt(): Perform interrupt test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*selftest_interrupt)(struct qed_dev *cdev);
/**
- * @brief selftest_memory - Perform memory test
+ * selftest_memory(): Perform memory test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*selftest_memory)(struct qed_dev *cdev);
/**
- * @brief selftest_register - Perform register test
+ * selftest_register(): Perform register test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*selftest_register)(struct qed_dev *cdev);
/**
- * @brief selftest_clock - Perform clock test
+ * selftest_clock(): Perform clock test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*selftest_clock)(struct qed_dev *cdev);
/**
- * @brief selftest_nvram - Perform nvram test
+ * selftest_nvram(): Perform nvram test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*selftest_nvram) (struct qed_dev *cdev);
};
@@ -909,47 +936,53 @@ struct qed_common_ops {
enum qed_hw_err_type err_type);
/**
- * @brief can_link_change - can the instance change the link or not
+ * can_link_change(): can the instance change the link or not.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return true if link-change is allowed, false otherwise.
+ * Return: true if link-change is allowed, false otherwise.
*/
bool (*can_link_change)(struct qed_dev *cdev);
/**
- * @brief set_link - set links according to params
+ * set_link(): set links according to params.
*
- * @param cdev
- * @param params - values used to override the default link configuration
+ * @cdev: Qed dev pointer.
+ * @params: values used to override the default link configuration.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*set_link)(struct qed_dev *cdev,
struct qed_link_params *params);
/**
- * @brief get_link - returns the current link state.
+ * get_link(): returns the current link state.
*
- * @param cdev
- * @param if_link - structure to be filled with current link configuration.
+ * @cdev: Qed dev pointer.
+ * @if_link: structure to be filled with current link configuration.
+ *
+ * Return: Void.
*/
void (*get_link)(struct qed_dev *cdev,
struct qed_link_output *if_link);
/**
- * @brief - drains chip in case Tx completions fail to arrive due to pause.
+ * drain(): drains chip in case Tx completions fail to arrive due to pause.
+ *
+ * @cdev: Qed dev pointer.
*
- * @param cdev
+ * Return: Int.
*/
int (*drain)(struct qed_dev *cdev);
/**
- * @brief update_msglvl - update module debug level
+ * update_msglvl(): update module debug level.
*
- * @param cdev
- * @param dp_module
- * @param dp_level
+ * @cdev: Qed dev pointer.
+ * @dp_module: Debug module.
+ * @dp_level: Debug level.
+ *
+ * Return: Void.
*/
void (*update_msglvl)(struct qed_dev *cdev,
u32 dp_module,
@@ -963,70 +996,73 @@ struct qed_common_ops {
struct qed_chain *p_chain);
/**
- * @brief nvm_flash - Flash nvm data.
+ * nvm_flash(): Flash nvm data.
*
- * @param cdev
- * @param name - file containing the data
+ * @cdev: Qed dev pointer.
+ * @name: file containing the data.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*nvm_flash)(struct qed_dev *cdev, const char *name);
/**
- * @brief nvm_get_image - reads an entire image from nvram
+ * nvm_get_image(): reads an entire image from nvram.
*
- * @param cdev
- * @param type - type of the request nvram image
- * @param buf - preallocated buffer to fill with the image
- * @param len - length of the allocated buffer
+ * @cdev: Qed dev pointer.
+ * @type: type of the request nvram image.
+ * @buf: preallocated buffer to fill with the image.
+ * @len: length of the allocated buffer.
*
- * @return 0 on success, error otherwise
+ * Return: 0 on success, error otherwise.
*/
int (*nvm_get_image)(struct qed_dev *cdev,
enum qed_nvm_images type, u8 *buf, u16 len);
/**
- * @brief set_coalesce - Configure Rx coalesce value in usec
+ * set_coalesce(): Configure Rx coalesce value in usec.
*
- * @param cdev
- * @param rx_coal - Rx coalesce value in usec
- * @param tx_coal - Tx coalesce value in usec
- * @param qid - Queue index
- * @param sb_id - Status Block Id
+ * @cdev: Qed dev pointer.
+ * @rx_coal: Rx coalesce value in usec.
+ * @tx_coal: Tx coalesce value in usec.
+ * @handle: Handle.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*set_coalesce)(struct qed_dev *cdev,
u16 rx_coal, u16 tx_coal, void *handle);
/**
- * @brief set_led - Configure LED mode
+ * set_led() - Configure LED mode.
*
- * @param cdev
- * @param mode - LED mode
+ * @cdev: Qed dev pointer.
+ * @mode: LED mode.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*set_led)(struct qed_dev *cdev,
enum qed_led_mode mode);
/**
- * @brief attn_clr_enable - Prevent attentions from being reasserted
+ * attn_clr_enable(): Prevent attentions from being reasserted.
+ *
+ * @cdev: Qed dev pointer.
+ * @clr_enable: Clear enable.
*
- * @param cdev
- * @param clr_enable
+ * Return: Void.
*/
void (*attn_clr_enable)(struct qed_dev *cdev, bool clr_enable);
/**
- * @brief db_recovery_add - add doorbell information to the doorbell
- * recovery mechanism.
+ * db_recovery_add(): add doorbell information to the doorbell
+ * recovery mechanism.
*
- * @param cdev
- * @param db_addr - doorbell address
- * @param db_data - address of where db_data is stored
- * @param db_is_32b - doorbell is 32b pr 64b
- * @param db_is_user - doorbell recovery addresses are user or kernel space
+ * @cdev: Qed dev pointer.
+ * @db_addr: Doorbell address.
+ * @db_data: Dddress of where db_data is stored.
+ * @db_width: Doorbell is 32b or 64b.
+ * @db_space: Doorbell recovery addresses are user or kernel space.
+ *
+ * Return: Int.
*/
int (*db_recovery_add)(struct qed_dev *cdev,
void __iomem *db_addr,
@@ -1035,120 +1071,143 @@ struct qed_common_ops {
enum qed_db_rec_space db_space);
/**
- * @brief db_recovery_del - remove doorbell information from the doorbell
+ * db_recovery_del(): remove doorbell information from the doorbell
* recovery mechanism. db_data serves as key (db_addr is not unique).
*
- * @param cdev
- * @param db_addr - doorbell address
- * @param db_data - address where db_data is stored. Serves as key for the
- * entry to delete.
+ * @cdev: Qed dev pointer.
+ * @db_addr: Doorbell address.
+ * @db_data: Address where db_data is stored. Serves as key for the
+ * entry to delete.
+ *
+ * Return: Int.
*/
int (*db_recovery_del)(struct qed_dev *cdev,
void __iomem *db_addr, void *db_data);
/**
- * @brief recovery_process - Trigger a recovery process
+ * recovery_process(): Trigger a recovery process.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*recovery_process)(struct qed_dev *cdev);
/**
- * @brief recovery_prolog - Execute the prolog operations of a recovery process
+ * recovery_prolog(): Execute the prolog operations of a recovery process.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*recovery_prolog)(struct qed_dev *cdev);
/**
- * @brief update_drv_state - API to inform the change in the driver state.
+ * update_drv_state(): API to inform the change in the driver state.
*
- * @param cdev
- * @param active
+ * @cdev: Qed dev pointer.
+ * @active: Active
*
+ * Return: Int.
*/
int (*update_drv_state)(struct qed_dev *cdev, bool active);
/**
- * @brief update_mac - API to inform the change in the mac address
+ * update_mac(): API to inform the change in the mac address.
*
- * @param cdev
- * @param mac
+ * @cdev: Qed dev pointer.
+ * @mac: MAC.
*
+ * Return: Int.
*/
- int (*update_mac)(struct qed_dev *cdev, u8 *mac);
+ int (*update_mac)(struct qed_dev *cdev, const u8 *mac);
/**
- * @brief update_mtu - API to inform the change in the mtu
+ * update_mtu(): API to inform the change in the mtu.
*
- * @param cdev
- * @param mtu
+ * @cdev: Qed dev pointer.
+ * @mtu: MTU.
*
+ * Return: Int.
*/
int (*update_mtu)(struct qed_dev *cdev, u16 mtu);
/**
- * @brief update_wol - update of changes in the WoL configuration
+ * update_wol(): Update of changes in the WoL configuration.
*
- * @param cdev
- * @param enabled - true iff WoL should be enabled.
+ * @cdev: Qed dev pointer.
+ * @enabled: true iff WoL should be enabled.
+ *
+ * Return: Int.
*/
int (*update_wol) (struct qed_dev *cdev, bool enabled);
/**
- * @brief read_module_eeprom
+ * read_module_eeprom(): Read EEPROM.
+ *
+ * @cdev: Qed dev pointer.
+ * @buf: buffer.
+ * @dev_addr: PHY device memory region.
+ * @offset: offset into eeprom contents to be read.
+ * @len: buffer length, i.e., max bytes to be read.
*
- * @param cdev
- * @param buf - buffer
- * @param dev_addr - PHY device memory region
- * @param offset - offset into eeprom contents to be read
- * @param len - buffer length, i.e., max bytes to be read
+ * Return: Int.
*/
int (*read_module_eeprom)(struct qed_dev *cdev,
char *buf, u8 dev_addr, u32 offset, u32 len);
/**
- * @brief get_affin_hwfn_idx
+ * get_affin_hwfn_idx(): Get affine HW function.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
+ *
+ * Return: u8.
*/
u8 (*get_affin_hwfn_idx)(struct qed_dev *cdev);
/**
- * @brief read_nvm_cfg - Read NVM config attribute value.
- * @param cdev
- * @param buf - buffer
- * @param cmd - NVM CFG command id
- * @param entity_id - Entity id
+ * read_nvm_cfg(): Read NVM config attribute value.
+ *
+ * @cdev: Qed dev pointer.
+ * @buf: Buffer.
+ * @cmd: NVM CFG command id.
+ * @entity_id: Entity id.
*
+ * Return: Int.
*/
int (*read_nvm_cfg)(struct qed_dev *cdev, u8 **buf, u32 cmd,
u32 entity_id);
/**
- * @brief read_nvm_cfg - Read NVM config attribute value.
- * @param cdev
- * @param cmd - NVM CFG command id
+ * read_nvm_cfg_len(): Read NVM config attribute value.
*
- * @return config id length, 0 on error.
+ * @cdev: Qed dev pointer.
+ * @cmd: NVM CFG command id.
+ *
+ * Return: config id length, 0 on error.
*/
int (*read_nvm_cfg_len)(struct qed_dev *cdev, u32 cmd);
/**
- * @brief set_grc_config - Configure value for grc config id.
- * @param cdev
- * @param cfg_id - grc config id
- * @param val - grc config value
+ * set_grc_config(): Configure value for grc config id.
+ *
+ * @cdev: Qed dev pointer.
+ * @cfg_id: grc config id
+ * @val: grc config value
*
+ * Return: Int.
*/
int (*set_grc_config)(struct qed_dev *cdev, u32 cfg_id, u32 val);
struct devlink* (*devlink_register)(struct qed_dev *cdev);
void (*devlink_unregister)(struct devlink *devlink);
+
+ __printf(2, 3) void (*mfw_report)(struct qed_dev *cdev, char *fmt, ...);
+
+ int (*get_sb_info)(struct qed_dev *cdev, struct qed_sb_info *sb,
+ u16 qid, struct qed_sb_info_dbg *sb_dbg);
+
+ int (*get_esl_status)(struct qed_dev *cdev, bool *esl_active);
};
#define MASK_FIELD(_name, _value) \
@@ -1368,7 +1427,7 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
u16 rc = 0;
prod = le32_to_cpu(sb_info->sb_virt->prod_index) &
- STATUS_BLOCK_E4_PROD_INDEX_MASK;
+ STATUS_BLOCK_PROD_INDEX_MASK;
if (sb_info->sb_ack != prod) {
sb_info->sb_ack = prod;
rc |= QED_SB_IDX;
@@ -1379,18 +1438,16 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
}
/**
+ * qed_sb_ack(): This function creates an update command for interrupts
+ * that is written to the IGU.
*
- * @brief This function creates an update command for interrupts that is
- * written to the IGU.
- *
- * @param sb_info - This is the structure allocated and
- * initialized per status block. Assumption is
- * that it was initialized using qed_sb_init
- * @param int_cmd - Enable/Disable/Nop
- * @param upd_flg - whether igu consumer should be
- * updated.
+ * @sb_info: This is the structure allocated and
+ * initialized per status block. Assumption is
+ * that it was initialized using qed_sb_init
+ * @int_cmd: Enable/Disable/Nop
+ * @upd_flg: Whether igu consumer should be updated.
*
- * @return inline void
+ * Return: inline void.
*/
static inline void qed_sb_ack(struct qed_sb_info *sb_info,
enum igu_int_cmd int_cmd,
diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h
index 04180d9af560..fbf7973ae9ba 100644
--- a/include/linux/qed/qed_iscsi_if.h
+++ b/include/linux/qed/qed_iscsi_if.h
@@ -133,7 +133,7 @@ struct qed_iscsi_cb_ops {
* @fill_dev_info: fills iSCSI specific information
* @param cdev
* @param info
- * @return 0 on sucesss, otherwise error value.
+ * @return 0 on success, otherwise error value.
* @register_ops: register iscsi operations
* @param cdev
* @param ops - specified using qed_iscsi_cb_ops
@@ -152,7 +152,7 @@ struct qed_iscsi_cb_ops {
* connection.
* @param p_doorbell - qed will fill the address of the
* doorbell.
- * @return 0 on sucesss, otherwise error value.
+ * @return 0 on success, otherwise error value.
* @release_conn: release a previously acquired iscsi connection
* @param cdev
* @param handle - the connection handle.
@@ -182,7 +182,7 @@ struct qed_iscsi_cb_ops {
* @param stats - pointer to struck that would be filled
* we stats
* @return 0 on success, error otherwise.
- * @change_mac Change MAC of interface
+ * @change_mac: Change MAC of interface
* @param cdev
* @param handle - the connection handle.
* @param mac - new MAC to configure.
diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h
index ea273ba1c991..aa29ac53b833 100644
--- a/include/linux/qed/qed_ll2_if.h
+++ b/include/linux/qed/qed_ll2_if.h
@@ -18,7 +18,7 @@
enum qed_ll2_conn_type {
QED_LL2_TYPE_FCOE,
- QED_LL2_TYPE_ISCSI,
+ QED_LL2_TYPE_TCP_ULP,
QED_LL2_TYPE_TEST,
QED_LL2_TYPE_OOO,
QED_LL2_TYPE_RESERVED2,
@@ -208,57 +208,57 @@ enum qed_ll2_xmit_flags {
struct qed_ll2_ops {
/**
- * @brief start - initializes ll2
+ * start(): Initializes ll2.
*
- * @param cdev
- * @param params - protocol driver configuration for the ll2.
+ * @cdev: Qed dev pointer.
+ * @params: Protocol driver configuration for the ll2.
*
- * @return 0 on success, otherwise error value.
+ * Return: 0 on success, otherwise error value.
*/
int (*start)(struct qed_dev *cdev, struct qed_ll2_params *params);
/**
- * @brief stop - stops the ll2
+ * stop(): Stops the ll2
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return 0 on success, otherwise error value.
+ * Return: 0 on success, otherwise error value.
*/
int (*stop)(struct qed_dev *cdev);
/**
- * @brief start_xmit - transmits an skb over the ll2 interface
+ * start_xmit(): Transmits an skb over the ll2 interface
*
- * @param cdev
- * @param skb
- * @param xmit_flags - Transmit options defined by the enum qed_ll2_xmit_flags.
+ * @cdev: Qed dev pointer.
+ * @skb: SKB.
+ * @xmit_flags: Transmit options defined by the enum qed_ll2_xmit_flags.
*
- * @return 0 on success, otherwise error value.
+ * Return: 0 on success, otherwise error value.
*/
int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb,
unsigned long xmit_flags);
/**
- * @brief register_cb_ops - protocol driver register the callback for Rx/Tx
+ * register_cb_ops(): Protocol driver register the callback for Rx/Tx
* packets. Should be called before `start'.
*
- * @param cdev
- * @param cookie - to be passed to the callback functions.
- * @param ops - the callback functions to register for Rx / Tx.
+ * @cdev: Qed dev pointer.
+ * @cookie: to be passed to the callback functions.
+ * @ops: the callback functions to register for Rx / Tx.
*
- * @return 0 on success, otherwise error value.
+ * Return: 0 on success, otherwise error value.
*/
void (*register_cb_ops)(struct qed_dev *cdev,
const struct qed_ll2_cb_ops *ops,
void *cookie);
/**
- * @brief get LL2 related statistics
+ * get_stats(): Get LL2 related statistics.
*
- * @param cdev
- * @param stats - pointer to struct that would be filled with stats
+ * @cdev: Qed dev pointer.
+ * @stats: Pointer to struct that would be filled with stats.
*
- * @return 0 on success, error otherwise.
+ * Return: 0 on success, error otherwise.
*/
int (*get_stats)(struct qed_dev *cdev, struct qed_ll2_stats *stats);
};
@@ -267,7 +267,7 @@ struct qed_ll2_ops {
int qed_ll2_alloc_if(struct qed_dev *);
void qed_ll2_dealloc_if(struct qed_dev *);
#else
-static const struct qed_ll2_ops qed_ll2_ops_pass = {
+static __maybe_unused const struct qed_ll2_ops qed_ll2_ops_pass = {
.start = NULL,
.stop = NULL,
.start_xmit = NULL,
diff --git a/include/linux/qed/qed_nvmetcp_if.h b/include/linux/qed/qed_nvmetcp_if.h
new file mode 100644
index 000000000000..bbfbfba51f37
--- /dev/null
+++ b/include/linux/qed/qed_nvmetcp_if.h
@@ -0,0 +1,257 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* Copyright 2021 Marvell. All rights reserved. */
+
+#ifndef _QED_NVMETCP_IF_H
+#define _QED_NVMETCP_IF_H
+#include <linux/types.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/storage_common.h>
+#include <linux/qed/nvmetcp_common.h>
+
+#define QED_NVMETCP_MAX_IO_SIZE 0x800000
+#define QED_NVMETCP_CMN_HDR_SIZE (sizeof(struct nvme_tcp_hdr))
+#define QED_NVMETCP_CMD_HDR_SIZE (sizeof(struct nvme_tcp_cmd_pdu))
+#define QED_NVMETCP_NON_IO_HDR_SIZE ((QED_NVMETCP_CMN_HDR_SIZE + 16))
+
+typedef int (*nvmetcp_event_cb_t) (void *context,
+ u8 fw_event_code, void *fw_handle);
+
+struct qed_dev_nvmetcp_info {
+ struct qed_dev_info common;
+ u8 port_id; /* Physical port */
+ u8 num_cqs;
+};
+
+#define MAX_TID_BLOCKS_NVMETCP (512)
+struct qed_nvmetcp_tid {
+ u32 size; /* In bytes per task */
+ u32 num_tids_per_block;
+ u8 *blocks[MAX_TID_BLOCKS_NVMETCP];
+};
+
+struct qed_nvmetcp_id_params {
+ u8 mac[ETH_ALEN];
+ u32 ip[4];
+ u16 port;
+};
+
+struct qed_nvmetcp_params_offload {
+ /* FW initializations */
+ dma_addr_t sq_pbl_addr;
+ dma_addr_t nvmetcp_cccid_itid_table_addr;
+ u16 nvmetcp_cccid_max_range;
+ u8 default_cq;
+
+ /* Networking and TCP stack initializations */
+ struct qed_nvmetcp_id_params src;
+ struct qed_nvmetcp_id_params dst;
+ u32 ka_timeout;
+ u32 ka_interval;
+ u32 max_rt_time;
+ u32 cwnd;
+ u16 mss;
+ u16 vlan_id;
+ bool timestamp_en;
+ bool delayed_ack_en;
+ bool tcp_keep_alive_en;
+ bool ecn_en;
+ u8 ip_version;
+ u8 ka_max_probe_cnt;
+ u8 ttl;
+ u8 tos_or_tc;
+ u8 rcv_wnd_scale;
+};
+
+struct qed_nvmetcp_params_update {
+ u32 max_io_size;
+ u32 max_recv_pdu_length;
+ u32 max_send_pdu_length;
+
+ /* Placeholder: pfv, cpda, hpda */
+
+ bool hdr_digest_en;
+ bool data_digest_en;
+};
+
+struct qed_nvmetcp_cb_ops {
+ struct qed_common_cb_ops common;
+};
+
+struct nvmetcp_sge {
+ struct regpair sge_addr; /* SGE address */
+ __le32 sge_len; /* SGE length */
+ __le32 reserved;
+};
+
+/* IO path HSI function SGL params */
+struct storage_sgl_task_params {
+ struct nvmetcp_sge *sgl;
+ struct regpair sgl_phys_addr;
+ u32 total_buffer_size;
+ u16 num_sges;
+ bool small_mid_sge;
+};
+
+/* IO path HSI function FW task context params */
+struct nvmetcp_task_params {
+ void *context; /* Output parameter - set/filled by the HSI function */
+ struct nvmetcp_wqe *sqe;
+ u32 tx_io_size; /* in bytes (Without DIF, if exists) */
+ u32 rx_io_size; /* in bytes (Without DIF, if exists) */
+ u16 conn_icid;
+ u16 itid;
+ struct regpair opq; /* qedn_task_ctx address */
+ u16 host_cccid;
+ u8 cq_rss_number;
+ bool send_write_incapsule;
+};
+
+/**
+ * struct qed_nvmetcp_ops - qed NVMeTCP operations.
+ * @common: common operations pointer
+ * @ll2: light L2 operations pointer
+ * @fill_dev_info: fills NVMeTCP specific information
+ * @param cdev
+ * @param info
+ * @return 0 on success, otherwise error value.
+ * @register_ops: register nvmetcp operations
+ * @param cdev
+ * @param ops - specified using qed_nvmetcp_cb_ops
+ * @param cookie - driver private
+ * @start: nvmetcp in FW
+ * @param cdev
+ * @param tasks - qed will fill information about tasks
+ * return 0 on success, otherwise error value.
+ * @stop: nvmetcp in FW
+ * @param cdev
+ * return 0 on success, otherwise error value.
+ * @acquire_conn: acquire a new nvmetcp connection
+ * @param cdev
+ * @param handle - qed will fill handle that should be
+ * used henceforth as identifier of the
+ * connection.
+ * @param p_doorbell - qed will fill the address of the
+ * doorbell.
+ * @return 0 on success, otherwise error value.
+ * @release_conn: release a previously acquired nvmetcp connection
+ * @param cdev
+ * @param handle - the connection handle.
+ * @return 0 on success, otherwise error value.
+ * @offload_conn: configures an offloaded connection
+ * @param cdev
+ * @param handle - the connection handle.
+ * @param conn_info - the configuration to use for the
+ * offload.
+ * @return 0 on success, otherwise error value.
+ * @update_conn: updates an offloaded connection
+ * @param cdev
+ * @param handle - the connection handle.
+ * @param conn_info - the configuration to use for the
+ * offload.
+ * @return 0 on success, otherwise error value.
+ * @destroy_conn: stops an offloaded connection
+ * @param cdev
+ * @param handle - the connection handle.
+ * @return 0 on success, otherwise error value.
+ * @clear_sq: clear all task in sq
+ * @param cdev
+ * @param handle - the connection handle.
+ * @return 0 on success, otherwise error value.
+ * @add_src_tcp_port_filter: Add source tcp port filter
+ * @param cdev
+ * @param src_port
+ * @remove_src_tcp_port_filter: Remove source tcp port filter
+ * @param cdev
+ * @param src_port
+ * @add_dst_tcp_port_filter: Add destination tcp port filter
+ * @param cdev
+ * @param dest_port
+ * @remove_dst_tcp_port_filter: Remove destination tcp port filter
+ * @param cdev
+ * @param dest_port
+ * @clear_all_filters: Clear all filters.
+ * @param cdev
+ * @init_read_io: Init read IO.
+ * @task_params
+ * @cmd_pdu_header
+ * @nvme_cmd
+ * @sgl_task_params
+ * @init_write_io: Init write IO.
+ * @task_params
+ * @cmd_pdu_header
+ * @nvme_cmd
+ * @sgl_task_params
+ * @init_icreq_exchange: Exchange ICReq.
+ * @task_params
+ * @init_conn_req_pdu_hdr
+ * @tx_sgl_task_params
+ * @rx_sgl_task_params
+ * @init_task_cleanup: Init task cleanup.
+ * @task_params
+ */
+struct qed_nvmetcp_ops {
+ const struct qed_common_ops *common;
+
+ const struct qed_ll2_ops *ll2;
+
+ int (*fill_dev_info)(struct qed_dev *cdev,
+ struct qed_dev_nvmetcp_info *info);
+
+ void (*register_ops)(struct qed_dev *cdev,
+ struct qed_nvmetcp_cb_ops *ops, void *cookie);
+
+ int (*start)(struct qed_dev *cdev,
+ struct qed_nvmetcp_tid *tasks,
+ void *event_context, nvmetcp_event_cb_t async_event_cb);
+
+ int (*stop)(struct qed_dev *cdev);
+
+ int (*acquire_conn)(struct qed_dev *cdev,
+ u32 *handle,
+ u32 *fw_cid, void __iomem **p_doorbell);
+
+ int (*release_conn)(struct qed_dev *cdev, u32 handle);
+
+ int (*offload_conn)(struct qed_dev *cdev,
+ u32 handle,
+ struct qed_nvmetcp_params_offload *conn_info);
+
+ int (*update_conn)(struct qed_dev *cdev,
+ u32 handle,
+ struct qed_nvmetcp_params_update *conn_info);
+
+ int (*destroy_conn)(struct qed_dev *cdev, u32 handle, u8 abrt_conn);
+
+ int (*clear_sq)(struct qed_dev *cdev, u32 handle);
+
+ int (*add_src_tcp_port_filter)(struct qed_dev *cdev, u16 src_port);
+
+ void (*remove_src_tcp_port_filter)(struct qed_dev *cdev, u16 src_port);
+
+ int (*add_dst_tcp_port_filter)(struct qed_dev *cdev, u16 dest_port);
+
+ void (*remove_dst_tcp_port_filter)(struct qed_dev *cdev, u16 dest_port);
+
+ void (*clear_all_filters)(struct qed_dev *cdev);
+
+ void (*init_read_io)(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_cmd_pdu *cmd_pdu_header,
+ struct nvme_command *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params);
+
+ void (*init_write_io)(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_cmd_pdu *cmd_pdu_header,
+ struct nvme_command *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params);
+
+ void (*init_icreq_exchange)(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_icreq_pdu *init_conn_req_pdu_hdr,
+ struct storage_sgl_task_params *tx_sgl_task_params,
+ struct storage_sgl_task_params *rx_sgl_task_params);
+
+ void (*init_task_cleanup)(struct nvmetcp_task_params *task_params);
+};
+
+const struct qed_nvmetcp_ops *qed_get_nvmetcp_ops(void);
+void qed_put_nvmetcp_ops(void);
+#endif
diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h
index aeb242cefebf..3b76c07fbcf8 100644
--- a/include/linux/qed/qed_rdma_if.h
+++ b/include/linux/qed/qed_rdma_if.h
@@ -662,7 +662,8 @@ struct qed_rdma_ops {
u8 connection_handle,
struct qed_ll2_stats *p_stats);
int (*ll2_set_mac_filter)(struct qed_dev *cdev,
- u8 *old_mac_address, u8 *new_mac_address);
+ u8 *old_mac_address,
+ const u8 *new_mac_address);
int (*iwarp_set_engine_affin)(struct qed_dev *cdev, bool b_reset);
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h
index bab078b25834..6dfed163ab6c 100644
--- a/include/linux/qed/rdma_common.h
+++ b/include/linux/qed/rdma_common.h
@@ -27,6 +27,7 @@
#define RDMA_MAX_PDS (64 * 1024)
#define RDMA_MAX_XRC_SRQS (1024)
#define RDMA_MAX_SRQS (32 * 1024)
+#define RDMA_MAX_IRQ_ELEMS_IN_PAGE (128)
#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 18ebd39c9487..89a0d83ddad0 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -91,7 +91,7 @@ extern bool qid_valid(struct kqid qid);
*
* When there is no mapping defined for the user-namespace, type,
* qid tuple an invalid kqid is returned. Callers are expected to
- * test for and handle handle invalid kqids being returned.
+ * test for and handle invalid kqids being returned.
* Invalid kqids may be tested for using qid_valid().
*/
static inline struct kqid make_kqid(struct user_namespace *from,
@@ -285,7 +285,9 @@ static inline void dqstats_dec(unsigned int type)
#define DQ_FAKE_B 3 /* no limits only usage */
#define DQ_READ_B 4 /* dquot was read into memory */
#define DQ_ACTIVE_B 5 /* dquot is active (dquot_release not called) */
-#define DQ_LASTSET_B 6 /* Following 6 bits (see QIF_) are reserved\
+#define DQ_RELEASING_B 6 /* dquot is in releasing_dquots list waiting
+ * to be cleaned up */
+#define DQ_LASTSET_B 7 /* Following 6 bits (see QIF_) are reserved\
* for the mask of entries set via SETQUOTA\
* quotactl. They are set under dq_data_lock\
* and the quota format handling dquot can\
@@ -524,7 +526,7 @@ struct quota_info {
const struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */
};
-int register_quota_format(struct quota_format_type *fmt);
+void register_quota_format(struct quota_format_type *fmt);
void unregister_quota_format(struct quota_format_type *fmt);
struct quota_module_name {
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index a0f6668924d3..c334f82ed385 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -19,12 +19,13 @@ static inline struct quota_info *sb_dqopt(struct super_block *sb)
return &sb->s_dquot;
}
-/* i_mutex must being held */
-static inline bool is_quota_modification(struct inode *inode, struct iattr *ia)
+/* i_rwsem must being held */
+static inline bool is_quota_modification(struct mnt_idmap *idmap,
+ struct inode *inode, struct iattr *ia)
{
- return (ia->ia_valid & ATTR_SIZE) ||
- (ia->ia_valid & ATTR_UID && !uid_eq(ia->ia_uid, inode->i_uid)) ||
- (ia->ia_valid & ATTR_GID && !gid_eq(ia->ia_gid, inode->i_gid));
+ return ((ia->ia_valid & ATTR_SIZE) ||
+ i_uid_needs_update(idmap, ia, inode) ||
+ i_gid_needs_update(idmap, ia, inode));
}
#if defined(CONFIG_QUOTA)
@@ -56,7 +57,7 @@ static inline bool dquot_is_busy(struct dquot *dquot)
{
if (test_bit(DQ_MOD_B, &dquot->dq_flags))
return true;
- if (atomic_read(&dquot->dq_count) > 1)
+ if (atomic_read(&dquot->dq_count) > 0)
return true;
return false;
}
@@ -73,7 +74,7 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags);
int dquot_alloc_inode(struct inode *inode);
-int dquot_claim_space_nodirty(struct inode *inode, qsize_t number);
+void dquot_claim_space_nodirty(struct inode *inode, qsize_t number);
void dquot_free_inode(struct inode *inode);
void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number);
@@ -115,7 +116,8 @@ int dquot_set_dqblk(struct super_block *sb, struct kqid id,
struct qc_dqblk *di);
int __dquot_transfer(struct inode *inode, struct dquot **transfer_to);
-int dquot_transfer(struct inode *inode, struct iattr *iattr);
+int dquot_transfer(struct mnt_idmap *idmap, struct inode *inode,
+ struct iattr *iattr);
static inline struct mem_dqinfo *sb_dqinfo(struct super_block *sb, int type)
{
@@ -234,7 +236,8 @@ static inline void dquot_free_inode(struct inode *inode)
{
}
-static inline int dquot_transfer(struct inode *inode, struct iattr *iattr)
+static inline int dquot_transfer(struct mnt_idmap *idmap,
+ struct inode *inode, struct iattr *iattr)
{
return 0;
}
@@ -254,10 +257,9 @@ static inline void __dquot_free_space(struct inode *inode, qsize_t number,
inode_sub_bytes(inode, number);
}
-static inline int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
+static inline void dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
{
inode_add_bytes(inode, number);
- return 0;
}
static inline int dquot_reclaim_space_nodirty(struct inode *inode,
@@ -355,14 +357,10 @@ static inline int dquot_reserve_block(struct inode *inode, qsize_t nr)
DQUOT_SPACE_WARN|DQUOT_SPACE_RESERVE);
}
-static inline int dquot_claim_block(struct inode *inode, qsize_t nr)
+static inline void dquot_claim_block(struct inode *inode, qsize_t nr)
{
- int ret;
-
- ret = dquot_claim_space_nodirty(inode, nr << inode->i_blkbits);
- if (!ret)
- mark_inode_dirty_sync(inode);
- return ret;
+ dquot_claim_space_nodirty(inode, nr << inode->i_blkbits);
+ mark_inode_dirty_sync(inode);
}
static inline void dquot_reclaim_block(struct inode *inode, qsize_t nr)
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 64ad900ac742..eae67015ce51 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -9,8 +9,10 @@
#define _LINUX_RADIX_TREE_H
#include <linux/bitops.h>
-#include <linux/kernel.h>
+#include <linux/gfp_types.h>
#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/math.h>
#include <linux/percpu.h>
#include <linux/preempt.h>
#include <linux/rcupdate.h>
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index 154e954b711d..2467b3be15c9 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -10,17 +10,14 @@
#ifdef __KERNEL__
-/* Set to 1 to use kernel-wide empty_zero_page */
-#define RAID6_USE_EMPTY_ZERO_PAGE 0
#include <linux/blkdev.h>
+#include <linux/mm.h>
-/* We need a pre-zeroed page... if we don't want to use the kernel-provided
- one define it here */
-#if RAID6_USE_EMPTY_ZERO_PAGE
-# define raid6_empty_zero_page empty_zero_page
-#else
-extern const char raid6_empty_zero_page[PAGE_SIZE];
-#endif
+/* This should be const but the raid6 code is too convoluted for that. */
+static inline void *raid6_get_zero_page(void)
+{
+ return page_address(ZERO_PAGE(0));
+}
#else /* ! __KERNEL__ */
/* Used for testing in user space */
@@ -81,7 +78,7 @@ struct raid6_calls {
void (*xor_syndrome)(int, int, int, size_t, void **);
int (*valid)(void); /* Returns 1 if this routine set is usable */
const char *name; /* Name of this routine set */
- int prefer; /* Has special performance attribute */
+ int priority; /* Relative priority ranking if non-zero */
};
/* Selected algorithm */
@@ -92,8 +89,6 @@ extern const struct raid6_calls raid6_intx1;
extern const struct raid6_calls raid6_intx2;
extern const struct raid6_calls raid6_intx4;
extern const struct raid6_calls raid6_intx8;
-extern const struct raid6_calls raid6_intx16;
-extern const struct raid6_calls raid6_intx32;
extern const struct raid6_calls raid6_mmxx1;
extern const struct raid6_calls raid6_mmxx2;
extern const struct raid6_calls raid6_sse1x1;
@@ -116,6 +111,12 @@ extern const struct raid6_calls raid6_vpermxor1;
extern const struct raid6_calls raid6_vpermxor2;
extern const struct raid6_calls raid6_vpermxor4;
extern const struct raid6_calls raid6_vpermxor8;
+extern const struct raid6_calls raid6_lsx;
+extern const struct raid6_calls raid6_lasx;
+extern const struct raid6_calls raid6_rvvx1;
+extern const struct raid6_calls raid6_rvvx2;
+extern const struct raid6_calls raid6_rvvx4;
+extern const struct raid6_calls raid6_rvvx8;
struct raid6_recov_calls {
void (*data2)(int, size_t, int, int, void **);
@@ -131,6 +132,9 @@ extern const struct raid6_recov_calls raid6_recov_avx2;
extern const struct raid6_recov_calls raid6_recov_avx512;
extern const struct raid6_recov_calls raid6_recov_s390xc;
extern const struct raid6_recov_calls raid6_recov_neon;
+extern const struct raid6_recov_calls raid6_recov_lsx;
+extern const struct raid6_recov_calls raid6_recov_lasx;
+extern const struct raid6_recov_calls raid6_recov_rvv;
extern const struct raid6_calls raid6_neonx1;
extern const struct raid6_calls raid6_neonx2;
@@ -192,6 +196,11 @@ static inline uint32_t raid6_jiffies(void)
return tv.tv_sec*1000 + tv.tv_usec/1000;
}
+static inline void *raid6_get_zero_page(void)
+{
+ return raid6_empty_zero_page;
+}
+
#endif /* ! __KERNEL__ */
#endif /* LINUX_RAID_RAID6_H */
diff --git a/include/linux/raid/xor.h b/include/linux/raid/xor.h
index 2a9fee8ddae3..51b811b62322 100644
--- a/include/linux/raid/xor.h
+++ b/include/linux/raid/xor.h
@@ -11,13 +11,20 @@ struct xor_block_template {
struct xor_block_template *next;
const char *name;
int speed;
- void (*do_2)(unsigned long, unsigned long *, unsigned long *);
- void (*do_3)(unsigned long, unsigned long *, unsigned long *,
- unsigned long *);
- void (*do_4)(unsigned long, unsigned long *, unsigned long *,
- unsigned long *, unsigned long *);
- void (*do_5)(unsigned long, unsigned long *, unsigned long *,
- unsigned long *, unsigned long *, unsigned long *);
+ void (*do_2)(unsigned long, unsigned long * __restrict,
+ const unsigned long * __restrict);
+ void (*do_3)(unsigned long, unsigned long * __restrict,
+ const unsigned long * __restrict,
+ const unsigned long * __restrict);
+ void (*do_4)(unsigned long, unsigned long * __restrict,
+ const unsigned long * __restrict,
+ const unsigned long * __restrict,
+ const unsigned long * __restrict);
+ void (*do_5)(unsigned long, unsigned long * __restrict,
+ const unsigned long * __restrict,
+ const unsigned long * __restrict,
+ const unsigned long * __restrict,
+ const unsigned long * __restrict);
};
#endif
diff --git a/include/linux/raid_class.h b/include/linux/raid_class.h
index 5cdfcb873a8f..e50416ba9cd9 100644
--- a/include/linux/raid_class.h
+++ b/include/linux/raid_class.h
@@ -11,7 +11,7 @@ struct raid_template {
};
struct raid_function_template {
- void *cookie;
+ const void *cookie;
int (*is_raid)(struct device *);
void (*get_resync)(struct device *);
void (*get_state)(struct device *);
@@ -77,7 +77,3 @@ DEFINE_RAID_ATTRIBUTE(enum raid_state, state)
struct raid_template *raid_class_attach(struct raid_function_template *);
void raid_class_release(struct raid_template *);
-
-int __must_check raid_component_add(struct raid_template *, struct device *,
- struct device *);
-
diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h
index 917528d102c4..d506dc63dd47 100644
--- a/include/linux/ramfs.h
+++ b/include/linux/ramfs.h
@@ -7,6 +7,7 @@
struct inode *ramfs_get_inode(struct super_block *sb, const struct inode *dir,
umode_t mode, dev_t dev);
extern int ramfs_init_fs_context(struct fs_context *fc);
+extern void ramfs_kill_sb(struct super_block *sb);
#ifdef CONFIG_MMU
static inline int
diff --git a/include/linux/random.h b/include/linux/random.h
index f45b8be3e3c4..8a8064dc3970 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -1,60 +1,46 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * include/linux/random.h
- *
- * Include file for the random number generator.
- */
+
#ifndef _LINUX_RANDOM_H
#define _LINUX_RANDOM_H
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/list.h>
-#include <linux/once.h>
#include <uapi/linux/random.h>
-struct random_ready_callback {
- struct list_head list;
- void (*func)(struct random_ready_callback *rdy);
- struct module *owner;
-};
+struct notifier_block;
-extern void add_device_randomness(const void *, unsigned int);
-extern void add_bootloader_randomness(const void *, unsigned int);
+void add_device_randomness(const void *buf, size_t len);
+void __init add_bootloader_randomness(const void *buf, size_t len);
+void add_input_randomness(unsigned int type, unsigned int code,
+ unsigned int value) __latent_entropy;
+void add_interrupt_randomness(int irq) __latent_entropy;
+void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy, bool sleep_after);
-#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
static inline void add_latent_entropy(void)
{
- add_device_randomness((const void *)&latent_entropy,
- sizeof(latent_entropy));
-}
+#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
#else
-static inline void add_latent_entropy(void) {}
+ add_device_randomness(NULL, 0);
#endif
+}
-extern void add_input_randomness(unsigned int type, unsigned int code,
- unsigned int value) __latent_entropy;
-extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
-
-extern void get_random_bytes(void *buf, int nbytes);
-extern int wait_for_random_bytes(void);
-extern int __init rand_initialize(void);
-extern bool rng_is_initialized(void);
-extern int add_random_ready_callback(struct random_ready_callback *rdy);
-extern void del_random_ready_callback(struct random_ready_callback *rdy);
-extern int __must_check get_random_bytes_arch(void *buf, int nbytes);
-
-#ifndef MODULE
-extern const struct file_operations random_fops, urandom_fops;
+#if IS_ENABLED(CONFIG_VMGENID)
+void add_vmfork_randomness(const void *unique_vm_id, size_t len);
+int register_random_vmfork_notifier(struct notifier_block *nb);
+int unregister_random_vmfork_notifier(struct notifier_block *nb);
+#else
+static inline int register_random_vmfork_notifier(struct notifier_block *nb) { return 0; }
+static inline int unregister_random_vmfork_notifier(struct notifier_block *nb) { return 0; }
#endif
+void get_random_bytes(void *buf, size_t len);
+u8 get_random_u8(void);
+u16 get_random_u16(void);
u32 get_random_u32(void);
u64 get_random_u64(void);
-static inline unsigned int get_random_int(void)
-{
- return get_random_u32();
-}
static inline unsigned long get_random_long(void)
{
#if BITS_PER_LONG == 64
@@ -64,98 +50,93 @@ static inline unsigned long get_random_long(void)
#endif
}
+u32 __get_random_u32_below(u32 ceil);
+
/*
- * On 64-bit architectures, protect against non-terminated C string overflows
- * by zeroing out the first byte of the canary; this leaves 56 bits of entropy.
+ * Returns a random integer in the interval [0, ceil), with uniform
+ * distribution, suitable for all uses. Fastest when ceil is a constant, but
+ * still fast for variable ceil as well.
*/
-#ifdef CONFIG_64BIT
-# ifdef __LITTLE_ENDIAN
-# define CANARY_MASK 0xffffffffffffff00UL
-# else /* big endian, 64 bits: */
-# define CANARY_MASK 0x00ffffffffffffffUL
-# endif
-#else /* 32 bits: */
-# define CANARY_MASK 0xffffffffUL
-#endif
-
-static inline unsigned long get_random_canary(void)
+static inline u32 get_random_u32_below(u32 ceil)
{
- unsigned long val = get_random_long();
-
- return val & CANARY_MASK;
-}
-
-/* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes).
- * Returns the result of the call to wait_for_random_bytes. */
-static inline int get_random_bytes_wait(void *buf, int nbytes)
-{
- int ret = wait_for_random_bytes();
- get_random_bytes(buf, nbytes);
- return ret;
-}
-
-#define declare_get_random_var_wait(var) \
- static inline int get_random_ ## var ## _wait(var *out) { \
- int ret = wait_for_random_bytes(); \
- if (unlikely(ret)) \
- return ret; \
- *out = get_random_ ## var(); \
- return 0; \
+ if (!__builtin_constant_p(ceil))
+ return __get_random_u32_below(ceil);
+
+ /*
+ * For the fast path, below, all operations on ceil are precomputed by
+ * the compiler, so this incurs no overhead for checking pow2, doing
+ * divisions, or branching based on integer size. The resultant
+ * algorithm does traditional reciprocal multiplication (typically
+ * optimized by the compiler into shifts and adds), rejecting samples
+ * whose lower half would indicate a range indivisible by ceil.
+ */
+ BUILD_BUG_ON_MSG(!ceil, "get_random_u32_below() must take ceil > 0");
+ if (ceil <= 1)
+ return 0;
+ for (;;) {
+ if (ceil <= 1U << 8) {
+ u32 mult = ceil * get_random_u8();
+ if (likely(is_power_of_2(ceil) || (u8)mult >= (1U << 8) % ceil))
+ return mult >> 8;
+ } else if (ceil <= 1U << 16) {
+ u32 mult = ceil * get_random_u16();
+ if (likely(is_power_of_2(ceil) || (u16)mult >= (1U << 16) % ceil))
+ return mult >> 16;
+ } else {
+ u64 mult = (u64)ceil * get_random_u32();
+ if (likely(is_power_of_2(ceil) || (u32)mult >= -ceil % ceil))
+ return mult >> 32;
+ }
}
-declare_get_random_var_wait(u32)
-declare_get_random_var_wait(u64)
-declare_get_random_var_wait(int)
-declare_get_random_var_wait(long)
-#undef declare_get_random_var
-
-unsigned long randomize_page(unsigned long start, unsigned long range);
+}
/*
- * This is designed to be standalone for just prandom
- * users, but for now we include it from <linux/random.h>
- * for legacy reasons.
+ * Returns a random integer in the interval (floor, U32_MAX], with uniform
+ * distribution, suitable for all uses. Fastest when floor is a constant, but
+ * still fast for variable floor as well.
*/
-#include <linux/prandom.h>
-
-#ifdef CONFIG_ARCH_RANDOM
-# include <asm/archrandom.h>
-#else
-static inline bool __must_check arch_get_random_long(unsigned long *v)
-{
- return false;
-}
-static inline bool __must_check arch_get_random_int(unsigned int *v)
-{
- return false;
-}
-static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
-{
- return false;
-}
-static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
+static inline u32 get_random_u32_above(u32 floor)
{
- return false;
+ BUILD_BUG_ON_MSG(__builtin_constant_p(floor) && floor == U32_MAX,
+ "get_random_u32_above() must take floor < U32_MAX");
+ return floor + 1 + get_random_u32_below(U32_MAX - floor);
}
-#endif
/*
- * Called from the boot CPU during startup; not valid to call once
- * secondary CPUs are up and preemption is possible.
+ * Returns a random integer in the interval [floor, ceil], with uniform
+ * distribution, suitable for all uses. Fastest when floor and ceil are
+ * constant, but still fast for variable floor and ceil as well.
*/
-#ifndef arch_get_random_seed_long_early
-static inline bool __init arch_get_random_seed_long_early(unsigned long *v)
+static inline u32 get_random_u32_inclusive(u32 floor, u32 ceil)
{
- WARN_ON(system_state != SYSTEM_BOOTING);
- return arch_get_random_seed_long(v);
+ BUILD_BUG_ON_MSG(__builtin_constant_p(floor) && __builtin_constant_p(ceil) &&
+ (floor > ceil || ceil - floor == U32_MAX),
+ "get_random_u32_inclusive() must take floor <= ceil");
+ return floor + get_random_u32_below(ceil - floor + 1);
}
-#endif
-#ifndef arch_get_random_long_early
-static inline bool __init arch_get_random_long_early(unsigned long *v)
+void __init random_init_early(const char *command_line);
+void __init random_init(void);
+bool rng_is_initialized(void);
+int wait_for_random_bytes(void);
+int execute_with_initialized_rng(struct notifier_block *nb);
+
+/* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes).
+ * Returns the result of the call to wait_for_random_bytes. */
+static inline int get_random_bytes_wait(void *buf, size_t nbytes)
{
- WARN_ON(system_state != SYSTEM_BOOTING);
- return arch_get_random_long(v);
+ int ret = wait_for_random_bytes();
+ get_random_bytes(buf, nbytes);
+ return ret;
}
+
+#ifdef CONFIG_SMP
+int random_prepare_cpu(unsigned int cpu);
+int random_online_cpu(unsigned int cpu);
+#endif
+
+#ifndef MODULE
+extern const struct file_operations random_fops, urandom_fops;
#endif
#endif /* _LINUX_RANDOM_H */
diff --git a/include/linux/randomize_kstack.h b/include/linux/randomize_kstack.h
index bebc911161b6..1d982dbdd0d0 100644
--- a/include/linux/randomize_kstack.h
+++ b/include/linux/randomize_kstack.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_RANDOMIZE_KSTACK_H
#define _LINUX_RANDOMIZE_KSTACK_H
+#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET
#include <linux/kernel.h>
#include <linux/jump_label.h>
#include <linux/percpu-defs.h>
@@ -16,39 +17,82 @@ DECLARE_PER_CPU(u32, kstack_offset);
* alignment. Also, since this use is being explicitly masked to a max of
* 10 bits, stack-clash style attacks are unlikely. For more details see
* "VLAs" in Documentation/process/deprecated.rst
+ *
+ * The normal __builtin_alloca() is initialized with INIT_STACK_ALL (currently
+ * only with Clang and not GCC). Initializing the unused area on each syscall
+ * entry is expensive, and generating an implicit call to memset() may also be
+ * problematic (such as in noinstr functions). Therefore, if the compiler
+ * supports it (which it should if it initializes allocas), always use the
+ * "uninitialized" variant of the builtin.
*/
-void *__builtin_alloca(size_t size);
+#if __has_builtin(__builtin_alloca_uninitialized)
+#define __kstack_alloca __builtin_alloca_uninitialized
+#else
+#define __kstack_alloca __builtin_alloca
+#endif
+
/*
- * Use, at most, 10 bits of entropy. We explicitly cap this to keep the
- * "VLA" from being unbounded (see above). 10 bits leaves enough room for
- * per-arch offset masks to reduce entropy (by removing higher bits, since
- * high entropy may overly constrain usable stack space), and for
- * compiler/arch-specific stack alignment to remove the lower bits.
+ * Use, at most, 6 bits of entropy (on 64-bit; 8 on 32-bit). This cap is
+ * to keep the "VLA" from being unbounded (see above). Additionally clear
+ * the bottom 4 bits (on 64-bit systems, 2 for 32-bit), since stack
+ * alignment will always be at least word size. This makes the compiler
+ * code gen better when it is applying the actual per-arch alignment to
+ * the final offset. The resulting randomness is reasonable without overly
+ * constraining usable stack space.
*/
-#define KSTACK_OFFSET_MAX(x) ((x) & 0x3FF)
+#ifdef CONFIG_64BIT
+#define KSTACK_OFFSET_MAX(x) ((x) & 0b1111110000)
+#else
+#define KSTACK_OFFSET_MAX(x) ((x) & 0b1111111100)
+#endif
-/*
- * These macros must be used during syscall entry when interrupts and
+/**
+ * add_random_kstack_offset - Increase stack utilization by previously
+ * chosen random offset
+ *
+ * This should be used in the syscall entry path when interrupts and
* preempt are disabled, and after user registers have been stored to
- * the stack.
+ * the stack. For testing the resulting entropy, please see:
+ * tools/testing/selftests/lkdtm/stack-entropy.sh
*/
#define add_random_kstack_offset() do { \
if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
&randomize_kstack_offset)) { \
u32 offset = raw_cpu_read(kstack_offset); \
- u8 *ptr = __builtin_alloca(KSTACK_OFFSET_MAX(offset)); \
+ u8 *ptr = __kstack_alloca(KSTACK_OFFSET_MAX(offset)); \
/* Keep allocation even after "ptr" loses scope. */ \
asm volatile("" :: "r"(ptr) : "memory"); \
} \
} while (0)
+/**
+ * choose_random_kstack_offset - Choose the random offset for the next
+ * add_random_kstack_offset()
+ *
+ * This should only be used during syscall exit when interrupts and
+ * preempt are disabled. This position in the syscall flow is done to
+ * frustrate attacks from userspace attempting to learn the next offset:
+ * - Maximize the timing uncertainty visible from userspace: if the
+ * offset is chosen at syscall entry, userspace has much more control
+ * over the timing between choosing offsets. "How long will we be in
+ * kernel mode?" tends to be more difficult to predict than "how long
+ * will we be in user mode?"
+ * - Reduce the lifetime of the new offset sitting in memory during
+ * kernel mode execution. Exposure of "thread-local" memory content
+ * (e.g. current, percpu, etc) tends to be easier than arbitrary
+ * location memory exposure.
+ */
#define choose_random_kstack_offset(rand) do { \
if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
&randomize_kstack_offset)) { \
u32 offset = raw_cpu_read(kstack_offset); \
- offset ^= (rand); \
+ offset = ror32(offset, 5) ^ (rand); \
raw_cpu_write(kstack_offset, offset); \
} \
} while (0)
+#else /* CONFIG_RANDOMIZE_KSTACK_OFFSET */
+#define add_random_kstack_offset() do { } while (0)
+#define choose_random_kstack_offset(rand) do { } while (0)
+#endif /* CONFIG_RANDOMIZE_KSTACK_OFFSET */
#endif
diff --git a/include/linux/range.h b/include/linux/range.h
index 274681cc3154..d7f98e1285d7 100644
--- a/include/linux/range.h
+++ b/include/linux/range.h
@@ -13,6 +13,20 @@ static inline u64 range_len(const struct range *range)
return range->end - range->start + 1;
}
+/* True if r1 completely contains r2 */
+static inline bool range_contains(const struct range *r1,
+ const struct range *r2)
+{
+ return r1->start <= r2->start && r1->end >= r2->end;
+}
+
+/* True if any part of r1 overlaps r2 */
+static inline bool range_overlaps(const struct range *r1,
+ const struct range *r2)
+{
+ return r1->start <= r2->end && r1->end >= r2->start;
+}
+
int add_range(struct range *range, int az, int nr_range,
u64 start, u64 end);
@@ -26,12 +40,10 @@ int clean_sort_range(struct range *range, int az);
void sort_range(struct range *range, int nr_range);
-#define MAX_RESOURCE ((resource_size_t)~0)
-static inline resource_size_t cap_resource(u64 val)
-{
- if (val > MAX_RESOURCE)
- return MAX_RESOURCE;
+#define DEFINE_RANGE(_start, _end) \
+(struct range) { \
+ .start = (_start), \
+ .end = (_end), \
+ }
- return val;
-}
#endif
diff --git a/include/linux/ras.h b/include/linux/ras.h
index 1f4048bf2674..468941bfe855 100644
--- a/include/linux/ras.h
+++ b/include/linux/ras.h
@@ -24,7 +24,7 @@ int __init parse_cec_param(char *str);
void log_non_standard_event(const guid_t *sec_type,
const guid_t *fru_id, const char *fru_text,
const u8 sev, const u8 *err, const u32 len);
-void log_arm_hw_error(struct cper_sec_proc_arm *err);
+void log_arm_hw_error(struct cper_sec_proc_arm *err, const u8 sev);
#else
static inline void
log_non_standard_event(const guid_t *sec_type,
@@ -32,7 +32,35 @@ log_non_standard_event(const guid_t *sec_type,
const u8 sev, const u8 *err, const u32 len)
{ return; }
static inline void
-log_arm_hw_error(struct cper_sec_proc_arm *err) { return; }
+log_arm_hw_error(struct cper_sec_proc_arm *err, const u8 sev) { return; }
#endif
+struct atl_err {
+ u64 addr;
+ u64 ipid;
+ u32 cpu;
+};
+
+#if IS_ENABLED(CONFIG_AMD_ATL)
+void amd_atl_register_decoder(unsigned long (*f)(struct atl_err *));
+void amd_atl_unregister_decoder(void);
+void amd_retire_dram_row(struct atl_err *err);
+unsigned long amd_convert_umc_mca_addr_to_sys_addr(struct atl_err *err);
+#else
+static inline void amd_retire_dram_row(struct atl_err *err) { }
+static inline unsigned long
+amd_convert_umc_mca_addr_to_sys_addr(struct atl_err *err) { return -EINVAL; }
+#endif /* CONFIG_AMD_ATL */
+
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+#include <asm/smp_plat.h>
+/*
+ * Include ARM-specific SMP header which provides a function mapping mpidr to
+ * CPU logical index.
+ */
+#define GET_LOGICAL_INDEX(mpidr) get_logical_index(mpidr & MPIDR_HWID_BITMASK)
+#else
+#define GET_LOGICAL_INDEX(mpidr) -EINVAL
+#endif /* CONFIG_ARM || CONFIG_ARM64 */
+
#endif /* __RAS_H__ */
diff --git a/include/linux/raspberrypi/vchiq.h b/include/linux/raspberrypi/vchiq.h
new file mode 100644
index 000000000000..ee4469f4fc51
--- /dev/null
+++ b/include/linux/raspberrypi/vchiq.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
+
+#ifndef VCHIQ_H
+#define VCHIQ_H
+
+#define VCHIQ_MAKE_FOURCC(x0, x1, x2, x3) \
+ (((x0) << 24) | ((x1) << 16) | ((x2) << 8) | (x3))
+
+enum vchiq_reason {
+ VCHIQ_SERVICE_OPENED, /* service, -, - */
+ VCHIQ_SERVICE_CLOSED, /* service, -, - */
+ VCHIQ_MESSAGE_AVAILABLE, /* service, header, - */
+ VCHIQ_BULK_TRANSMIT_DONE, /* service, -, bulk_userdata */
+ VCHIQ_BULK_RECEIVE_DONE, /* service, -, bulk_userdata */
+ VCHIQ_BULK_TRANSMIT_ABORTED, /* service, -, bulk_userdata */
+ VCHIQ_BULK_RECEIVE_ABORTED /* service, -, bulk_userdata */
+};
+
+enum vchiq_bulk_mode {
+ VCHIQ_BULK_MODE_CALLBACK,
+ VCHIQ_BULK_MODE_BLOCKING,
+ VCHIQ_BULK_MODE_NOCALLBACK,
+ VCHIQ_BULK_MODE_WAITING /* Reserved for internal use */
+};
+
+enum vchiq_service_option {
+ VCHIQ_SERVICE_OPTION_AUTOCLOSE,
+ VCHIQ_SERVICE_OPTION_SLOT_QUOTA,
+ VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA,
+ VCHIQ_SERVICE_OPTION_SYNCHRONOUS,
+ VCHIQ_SERVICE_OPTION_TRACE
+};
+
+struct vchiq_header {
+ /* The message identifier - opaque to applications. */
+ int msgid;
+
+ /* Size of message data. */
+ unsigned int size;
+
+ char data[]; /* message */
+};
+
+struct vchiq_element {
+ const void __user *data;
+ unsigned int size;
+};
+
+struct vchiq_instance;
+struct vchiq_state;
+
+struct vchiq_service_base {
+ int fourcc;
+ int (*callback)(struct vchiq_instance *instance,
+ enum vchiq_reason reason,
+ struct vchiq_header *header,
+ unsigned int handle,
+ void *cb_data, void __user *cb_userdata);
+ void *userdata;
+};
+
+struct vchiq_completion_data_kernel {
+ enum vchiq_reason reason;
+ struct vchiq_header *header;
+ void *service_userdata;
+ void *cb_data;
+ void __user *cb_userdata;
+};
+
+struct vchiq_service_params_kernel {
+ int fourcc;
+ int (*callback)(struct vchiq_instance *instance,
+ enum vchiq_reason reason,
+ struct vchiq_header *header,
+ unsigned int handle,
+ void *cb_data, void __user *cb_userdata);
+ void *userdata;
+ short version; /* Increment for non-trivial changes */
+ short version_min; /* Update for incompatible changes */
+};
+
+extern int vchiq_initialise(struct vchiq_state *state,
+ struct vchiq_instance **pinstance);
+extern int vchiq_shutdown(struct vchiq_instance *instance);
+extern int vchiq_connect(struct vchiq_instance *instance);
+extern int vchiq_open_service(struct vchiq_instance *instance,
+ const struct vchiq_service_params_kernel *params,
+ unsigned int *pservice);
+extern int vchiq_close_service(struct vchiq_instance *instance,
+ unsigned int service);
+extern int vchiq_use_service(struct vchiq_instance *instance, unsigned int service);
+extern int vchiq_release_service(struct vchiq_instance *instance,
+ unsigned int service);
+extern void vchiq_msg_queue_push(struct vchiq_instance *instance, unsigned int handle,
+ struct vchiq_header *header);
+extern void vchiq_release_message(struct vchiq_instance *instance, unsigned int service,
+ struct vchiq_header *header);
+extern int vchiq_queue_kernel_message(struct vchiq_instance *instance, unsigned int handle,
+ void *data, unsigned int size);
+extern int vchiq_bulk_transmit(struct vchiq_instance *instance, unsigned int service,
+ const void *data, unsigned int size, void *userdata,
+ enum vchiq_bulk_mode mode);
+extern int vchiq_bulk_receive(struct vchiq_instance *instance, unsigned int service,
+ void *data, unsigned int size, void *userdata,
+ enum vchiq_bulk_mode mode);
+extern void *vchiq_get_service_userdata(struct vchiq_instance *instance, unsigned int service);
+extern int vchiq_get_peer_version(struct vchiq_instance *instance, unsigned int handle,
+ short *peer_version);
+extern struct vchiq_header *vchiq_msg_hold(struct vchiq_instance *instance, unsigned int handle);
+
+#endif /* VCHIQ_H */
diff --git a/include/linux/raspberrypi/vchiq_arm.h b/include/linux/raspberrypi/vchiq_arm.h
new file mode 100644
index 000000000000..e32b02f99024
--- /dev/null
+++ b/include/linux/raspberrypi/vchiq_arm.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
+ * Copyright (c) 2010-2012 Broadcom. All rights reserved.
+ */
+
+#ifndef VCHIQ_ARM_H
+#define VCHIQ_ARM_H
+
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/semaphore.h>
+#include <linux/atomic.h>
+#include "vchiq_core.h"
+#include "vchiq_debugfs.h"
+
+/* Some per-instance constants */
+#define MAX_COMPLETIONS 128
+#define MAX_SERVICES 64
+#define MAX_ELEMENTS 8
+#define MSG_QUEUE_SIZE 128
+
+#define VCHIQ_DRV_MAX_CALLBACKS 10
+
+struct rpi_firmware;
+struct vchiq_device;
+
+enum USE_TYPE_E {
+ USE_TYPE_SERVICE,
+ USE_TYPE_VCHIQ
+};
+
+struct vchiq_platform_info {
+ unsigned int cache_line_size;
+};
+
+struct vchiq_drv_mgmt {
+ struct rpi_firmware *fw;
+ const struct vchiq_platform_info *info;
+
+ bool connected;
+ int num_deferred_callbacks;
+ /* Protects connected and num_deferred_callbacks */
+ struct mutex connected_mutex;
+
+ void (*deferred_callback[VCHIQ_DRV_MAX_CALLBACKS])(void);
+
+ struct semaphore free_fragments_sema;
+ struct semaphore free_fragments_mutex;
+ char *fragments_base;
+ char *free_fragments;
+ unsigned int fragments_size;
+
+ void __iomem *regs;
+
+ struct vchiq_state state;
+};
+
+struct user_service {
+ struct vchiq_service *service;
+ void __user *userdata;
+ struct vchiq_instance *instance;
+ char is_vchi;
+ char dequeue_pending;
+ char close_pending;
+ int message_available_pos;
+ int msg_insert;
+ int msg_remove;
+ struct completion insert_event;
+ struct completion remove_event;
+ struct completion close_event;
+ struct vchiq_header *msg_queue[MSG_QUEUE_SIZE];
+};
+
+struct bulk_waiter_node {
+ struct bulk_waiter bulk_waiter;
+ int pid;
+ struct list_head list;
+};
+
+struct vchiq_instance {
+ struct vchiq_state *state;
+ struct vchiq_completion_data_kernel completions[MAX_COMPLETIONS];
+ int completion_insert;
+ int completion_remove;
+ struct completion insert_event;
+ struct completion remove_event;
+ struct mutex completion_mutex;
+
+ int connected;
+ int closing;
+ int pid;
+ int mark;
+ int use_close_delivered;
+ int trace;
+
+ struct list_head bulk_waiter_list;
+ struct mutex bulk_waiter_list_mutex;
+
+ struct vchiq_debugfs_node debugfs_node;
+};
+
+int
+vchiq_use_service(struct vchiq_instance *instance, unsigned int handle);
+
+extern int
+vchiq_release_service(struct vchiq_instance *instance, unsigned int handle);
+
+extern int
+vchiq_check_service(struct vchiq_service *service);
+
+extern void
+vchiq_dump_service_use_state(struct vchiq_state *state);
+
+extern int
+vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
+ enum USE_TYPE_E use_type);
+extern int
+vchiq_release_internal(struct vchiq_state *state,
+ struct vchiq_service *service);
+
+extern struct vchiq_debugfs_node *
+vchiq_instance_get_debugfs_node(struct vchiq_instance *instance);
+
+extern int
+vchiq_instance_get_use_count(struct vchiq_instance *instance);
+
+extern int
+vchiq_instance_get_pid(struct vchiq_instance *instance);
+
+extern int
+vchiq_instance_get_trace(struct vchiq_instance *instance);
+
+extern void
+vchiq_instance_set_trace(struct vchiq_instance *instance, int trace);
+
+extern void
+vchiq_add_connected_callback(struct vchiq_device *device,
+ void (*callback)(void));
+
+#if IS_ENABLED(CONFIG_VCHIQ_CDEV)
+
+extern void
+vchiq_deregister_chrdev(void);
+
+extern int
+vchiq_register_chrdev(struct device *parent);
+
+#else
+
+static inline void vchiq_deregister_chrdev(void) { }
+static inline int vchiq_register_chrdev(struct device *parent) { return 0; }
+
+#endif /* IS_ENABLED(CONFIG_VCHIQ_CDEV) */
+
+extern int
+service_callback(struct vchiq_instance *vchiq_instance, enum vchiq_reason reason,
+ struct vchiq_header *header, unsigned int handle,
+ void *cb_data, void __user *cb_userdata);
+
+extern void
+free_bulk_waiter(struct vchiq_instance *instance);
+
+#endif /* VCHIQ_ARM_H */
diff --git a/include/linux/raspberrypi/vchiq_bus.h b/include/linux/raspberrypi/vchiq_bus.h
new file mode 100644
index 000000000000..9de179b39f85
--- /dev/null
+++ b/include/linux/raspberrypi/vchiq_bus.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 Ideas On Board Oy
+ */
+
+#ifndef _VCHIQ_DEVICE_H
+#define _VCHIQ_DEVICE_H
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+struct vchiq_drv_mgmt;
+
+struct vchiq_device {
+ struct device dev;
+ struct vchiq_drv_mgmt *drv_mgmt;
+};
+
+struct vchiq_driver {
+ int (*probe)(struct vchiq_device *device);
+ void (*remove)(struct vchiq_device *device);
+ int (*resume)(struct vchiq_device *device);
+ int (*suspend)(struct vchiq_device *device,
+ pm_message_t state);
+
+ const struct vchiq_device_id *id_table;
+ struct device_driver driver;
+};
+
+static inline struct vchiq_device *to_vchiq_device(struct device *d)
+{
+ return container_of(d, struct vchiq_device, dev);
+}
+
+static inline struct vchiq_driver *to_vchiq_driver(struct device_driver *d)
+{
+ return container_of(d, struct vchiq_driver, driver);
+}
+
+extern const struct bus_type vchiq_bus_type;
+
+struct vchiq_device *
+vchiq_device_register(struct device *parent, const char *name);
+void vchiq_device_unregister(struct vchiq_device *dev);
+
+int vchiq_driver_register(struct vchiq_driver *vchiq_drv);
+void vchiq_driver_unregister(struct vchiq_driver *vchiq_drv);
+
+/**
+ * module_vchiq_driver() - Helper macro for registering a vchiq driver
+ * @__vchiq_driver: vchiq driver struct
+ *
+ * Helper macro for vchiq drivers which do not do anything special in
+ * module init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_vchiq_driver(__vchiq_driver) \
+ module_driver(__vchiq_driver, vchiq_driver_register, vchiq_driver_unregister)
+
+#endif /* _VCHIQ_DEVICE_H */
diff --git a/include/linux/raspberrypi/vchiq_cfg.h b/include/linux/raspberrypi/vchiq_cfg.h
new file mode 100644
index 000000000000..a16d0299996c
--- /dev/null
+++ b/include/linux/raspberrypi/vchiq_cfg.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright (c) 2010-2014 Broadcom. All rights reserved. */
+
+#ifndef VCHIQ_CFG_H
+#define VCHIQ_CFG_H
+
+#define VCHIQ_MAGIC VCHIQ_MAKE_FOURCC('V', 'C', 'H', 'I')
+/* The version of VCHIQ - change with any non-trivial change */
+#define VCHIQ_VERSION 8
+/*
+ * The minimum compatible version - update to match VCHIQ_VERSION with any
+ * incompatible change
+ */
+#define VCHIQ_VERSION_MIN 3
+
+/* The version that introduced the VCHIQ_IOC_LIB_VERSION ioctl */
+#define VCHIQ_VERSION_LIB_VERSION 7
+
+/* The version that introduced the VCHIQ_IOC_CLOSE_DELIVERED ioctl */
+#define VCHIQ_VERSION_CLOSE_DELIVERED 7
+
+/* The version that made it safe to use SYNCHRONOUS mode */
+#define VCHIQ_VERSION_SYNCHRONOUS_MODE 8
+
+#define VCHIQ_MAX_STATES 1
+#define VCHIQ_MAX_SERVICES 4096
+#define VCHIQ_MAX_SLOTS 128
+#define VCHIQ_MAX_SLOTS_PER_SIDE 64
+
+#define VCHIQ_NUM_CURRENT_BULKS 32
+#define VCHIQ_NUM_SERVICE_BULKS 4
+
+#ifndef VCHIQ_ENABLE_DEBUG
+#define VCHIQ_ENABLE_DEBUG 1
+#endif
+
+#ifndef VCHIQ_ENABLE_STATS
+#define VCHIQ_ENABLE_STATS 1
+#endif
+
+#endif /* VCHIQ_CFG_H */
diff --git a/include/linux/raspberrypi/vchiq_core.h b/include/linux/raspberrypi/vchiq_core.h
new file mode 100644
index 000000000000..e7bf7a114985
--- /dev/null
+++ b/include/linux/raspberrypi/vchiq_core.h
@@ -0,0 +1,646 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
+
+#ifndef VCHIQ_CORE_H
+#define VCHIQ_CORE_H
+
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/dev_printk.h>
+#include <linux/kthread.h>
+#include <linux/kref.h>
+#include <linux/rcupdate.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock_types.h>
+#include <linux/wait.h>
+
+#include "vchiq.h"
+#include "vchiq_cfg.h"
+
+/* Do this so that we can test-build the code on non-rpi systems */
+#if IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE)
+
+#else
+
+#ifndef dsb
+#define dsb(a)
+#endif
+
+#endif /* IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE) */
+
+#define VCHIQ_SERVICE_HANDLE_INVALID 0
+
+#define VCHIQ_SLOT_SIZE 4096
+#define VCHIQ_MAX_MSG_SIZE (VCHIQ_SLOT_SIZE - sizeof(struct vchiq_header))
+
+#define VCHIQ_SLOT_MASK (VCHIQ_SLOT_SIZE - 1)
+#define VCHIQ_SLOT_QUEUE_MASK (VCHIQ_MAX_SLOTS_PER_SIDE - 1)
+#define VCHIQ_SLOT_ZERO_SLOTS DIV_ROUND_UP(sizeof(struct vchiq_slot_zero), \
+ VCHIQ_SLOT_SIZE)
+
+#define BITSET_SIZE(b) ((b + 31) >> 5)
+#define BITSET_WORD(b) (b >> 5)
+#define BITSET_BIT(b) (1 << (b & 31))
+#define BITSET_IS_SET(bs, b) (bs[BITSET_WORD(b)] & BITSET_BIT(b))
+#define BITSET_SET(bs, b) (bs[BITSET_WORD(b)] |= BITSET_BIT(b))
+
+enum {
+ DEBUG_ENTRIES,
+#if VCHIQ_ENABLE_DEBUG
+ DEBUG_SLOT_HANDLER_COUNT,
+ DEBUG_SLOT_HANDLER_LINE,
+ DEBUG_PARSE_LINE,
+ DEBUG_PARSE_HEADER,
+ DEBUG_PARSE_MSGID,
+ DEBUG_AWAIT_COMPLETION_LINE,
+ DEBUG_DEQUEUE_MESSAGE_LINE,
+ DEBUG_SERVICE_CALLBACK_LINE,
+ DEBUG_MSG_QUEUE_FULL_COUNT,
+ DEBUG_COMPLETION_QUEUE_FULL_COUNT,
+#endif
+ DEBUG_MAX
+};
+
+#if VCHIQ_ENABLE_DEBUG
+
+#define DEBUG_INITIALISE(local) int *debug_ptr = (local)->debug
+#define DEBUG_TRACE(d) \
+ do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(sy); } while (0)
+#define DEBUG_VALUE(d, v) \
+ do { debug_ptr[DEBUG_ ## d] = (v); dsb(sy); } while (0)
+#define DEBUG_COUNT(d) \
+ do { debug_ptr[DEBUG_ ## d]++; dsb(sy); } while (0)
+
+#else /* VCHIQ_ENABLE_DEBUG */
+
+#define DEBUG_INITIALISE(local)
+#define DEBUG_TRACE(d)
+#define DEBUG_VALUE(d, v)
+#define DEBUG_COUNT(d)
+
+#endif /* VCHIQ_ENABLE_DEBUG */
+
+enum vchiq_connstate {
+ VCHIQ_CONNSTATE_DISCONNECTED,
+ VCHIQ_CONNSTATE_CONNECTING,
+ VCHIQ_CONNSTATE_CONNECTED,
+ VCHIQ_CONNSTATE_PAUSING,
+ VCHIQ_CONNSTATE_PAUSE_SENT,
+ VCHIQ_CONNSTATE_PAUSED,
+ VCHIQ_CONNSTATE_RESUMING,
+ VCHIQ_CONNSTATE_PAUSE_TIMEOUT,
+ VCHIQ_CONNSTATE_RESUME_TIMEOUT
+};
+
+enum {
+ VCHIQ_SRVSTATE_FREE,
+ VCHIQ_SRVSTATE_HIDDEN,
+ VCHIQ_SRVSTATE_LISTENING,
+ VCHIQ_SRVSTATE_OPENING,
+ VCHIQ_SRVSTATE_OPEN,
+ VCHIQ_SRVSTATE_OPENSYNC,
+ VCHIQ_SRVSTATE_CLOSESENT,
+ VCHIQ_SRVSTATE_CLOSERECVD,
+ VCHIQ_SRVSTATE_CLOSEWAIT,
+ VCHIQ_SRVSTATE_CLOSED
+};
+
+enum vchiq_bulk_dir {
+ VCHIQ_BULK_TRANSMIT,
+ VCHIQ_BULK_RECEIVE
+};
+
+struct vchiq_bulk {
+ short mode;
+ short dir;
+ void *cb_data;
+ void __user *cb_userdata;
+ struct bulk_waiter *waiter;
+ dma_addr_t dma_addr;
+ int size;
+ void *remote_data;
+ int remote_size;
+ int actual;
+ void *offset;
+ void __user *uoffset;
+};
+
+struct vchiq_bulk_queue {
+ int local_insert; /* Where to insert the next local bulk */
+ int remote_insert; /* Where to insert the next remote bulk (master) */
+ int process; /* Bulk to transfer next */
+ int remote_notify; /* Bulk to notify the remote client of next (mstr) */
+ int remove; /* Bulk to notify the local client of, and remove, next */
+ struct vchiq_bulk bulks[VCHIQ_NUM_SERVICE_BULKS];
+};
+
+/*
+ * Remote events provide a way of presenting several virtual doorbells to a
+ * peer (ARM host to VPU) using only one physical doorbell. They can be thought
+ * of as a way for the peer to signal a semaphore, in this case implemented as
+ * a workqueue.
+ *
+ * Remote events remain signalled until acknowledged by the receiver, and they
+ * are non-counting. They are designed in such a way as to minimise the number
+ * of interrupts and avoid unnecessary waiting.
+ *
+ * A remote_event is as small data structures that live in shared memory. It
+ * comprises two booleans - armed and fired:
+ *
+ * The sender sets fired when they signal the receiver.
+ * If fired is set, the receiver has been signalled and need not wait.
+ * The receiver sets the armed field before they begin to wait.
+ * If armed is set, the receiver is waiting and wishes to be woken by interrupt.
+ */
+struct remote_event {
+ int armed;
+ int fired;
+ u32 __unused;
+};
+
+struct opaque_platform_state;
+
+struct vchiq_slot {
+ char data[VCHIQ_SLOT_SIZE];
+};
+
+struct vchiq_slot_info {
+ /* Use two counters rather than one to avoid the need for a mutex. */
+ short use_count;
+ short release_count;
+};
+
+/*
+ * VCHIQ is a reliable connection-oriented datagram protocol.
+ *
+ * A VCHIQ service is equivalent to a TCP connection, except:
+ * + FOURCCs are used for the rendezvous, and port numbers are assigned at the
+ * time the connection is established.
+ * + There is less of a distinction between server and client sockets, the only
+ * difference being which end makes the first move.
+ * + For a multi-client server, the server creates new "listening" services as
+ * the existing one becomes connected - there is no need to specify the
+ * maximum number of clients up front.
+ * + Data transfer is reliable but packetized (messages have defined ends).
+ * + Messages can be either short (capable of fitting in a slot) and in-band,
+ * or copied between external buffers (bulk transfers).
+ */
+struct vchiq_service {
+ struct vchiq_service_base base;
+ unsigned int handle;
+ struct kref ref_count;
+ struct rcu_head rcu;
+ int srvstate;
+ void (*userdata_term)(void *userdata);
+ unsigned int localport;
+ unsigned int remoteport;
+ int public_fourcc;
+ int client_id;
+ char auto_close;
+ char sync;
+ char closing;
+ char trace;
+ atomic_t poll_flags;
+ short version;
+ short version_min;
+ short peer_version;
+
+ struct vchiq_state *state;
+ struct vchiq_instance *instance;
+
+ int service_use_count;
+
+ struct vchiq_bulk_queue bulk_tx;
+ struct vchiq_bulk_queue bulk_rx;
+
+ struct completion remove_event;
+ struct completion bulk_remove_event;
+ struct mutex bulk_mutex;
+
+ struct service_stats_struct {
+ int quota_stalls;
+ int slot_stalls;
+ int bulk_stalls;
+ int error_count;
+ int ctrl_tx_count;
+ int ctrl_rx_count;
+ int bulk_tx_count;
+ int bulk_rx_count;
+ int bulk_aborted_count;
+ u64 ctrl_tx_bytes;
+ u64 ctrl_rx_bytes;
+ u64 bulk_tx_bytes;
+ u64 bulk_rx_bytes;
+ } stats;
+
+ int msg_queue_read;
+ int msg_queue_write;
+ struct completion msg_queue_pop;
+ struct completion msg_queue_push;
+ struct vchiq_header *msg_queue[VCHIQ_MAX_SLOTS];
+};
+
+/*
+ * The quota information is outside struct vchiq_service so that it can
+ * be statically allocated, since for accounting reasons a service's slot
+ * usage is carried over between users of the same port number.
+ */
+struct vchiq_service_quota {
+ unsigned short slot_quota;
+ unsigned short slot_use_count;
+ unsigned short message_quota;
+ unsigned short message_use_count;
+ struct completion quota_event;
+ int previous_tx_index;
+};
+
+struct vchiq_shared_state {
+ /* A non-zero value here indicates that the content is valid. */
+ int initialised;
+
+ /* The first and last (inclusive) slots allocated to the owner. */
+ int slot_first;
+ int slot_last;
+
+ /* The slot allocated to synchronous messages from the owner. */
+ int slot_sync;
+
+ /*
+ * Signalling this event indicates that owner's slot handler thread
+ * should run.
+ */
+ struct remote_event trigger;
+
+ /*
+ * Indicates the byte position within the stream where the next message
+ * will be written. The least significant bits are an index into the
+ * slot. The next bits are the index of the slot in slot_queue.
+ */
+ int tx_pos;
+
+ /* This event should be signalled when a slot is recycled. */
+ struct remote_event recycle;
+
+ /* The slot_queue index where the next recycled slot will be written. */
+ int slot_queue_recycle;
+
+ /* This event should be signalled when a synchronous message is sent. */
+ struct remote_event sync_trigger;
+
+ /*
+ * This event should be signalled when a synchronous message has been
+ * released.
+ */
+ struct remote_event sync_release;
+
+ /* A circular buffer of slot indexes. */
+ int slot_queue[VCHIQ_MAX_SLOTS_PER_SIDE];
+
+ /* Debugging state */
+ int debug[DEBUG_MAX];
+};
+
+/*
+ * vchiq_slot_zero describes the memory shared between the ARM host and the
+ * VideoCore VPU. The "master" and "slave" states are owned by the respective
+ * sides but visible to the other; the slots are shared, and the remaining
+ * fields are read-only.
+ *
+ * In the configuration used by this implementation, the memory is allocated
+ * by the host, the VPU is the master (the side which controls the DMA for bulk
+ * transfers), and the host is the slave.
+ *
+ * The ownership of slots changes with use:
+ * + When empty they are owned by the sender.
+ * + When partially filled they are shared with the receiver.
+ * + When completely full they are owned by the receiver.
+ * + When the receiver has finished processing the contents, they are recycled
+ * back to the sender.
+ */
+struct vchiq_slot_zero {
+ int magic;
+ short version;
+ short version_min;
+ int slot_zero_size;
+ int slot_size;
+ int max_slots;
+ int max_slots_per_side;
+ int platform_data[2];
+ struct vchiq_shared_state master;
+ struct vchiq_shared_state slave;
+ struct vchiq_slot_info slots[VCHIQ_MAX_SLOTS];
+};
+
+/*
+ * This is the private runtime state used by each side. The same structure was
+ * originally used by both sides, but implementations have since diverged.
+ */
+struct vchiq_state {
+ struct device *dev;
+ int id;
+ int initialised;
+ enum vchiq_connstate conn_state;
+ short version_common;
+
+ struct vchiq_shared_state *local;
+ struct vchiq_shared_state *remote;
+ struct vchiq_slot *slot_data;
+
+ unsigned short default_slot_quota;
+ unsigned short default_message_quota;
+
+ /* Event indicating connect message received */
+ struct completion connect;
+
+ /* Mutex protecting services */
+ struct mutex mutex;
+ struct vchiq_instance **instance;
+
+ /* Processes all incoming messages which aren't synchronous */
+ struct task_struct *slot_handler_thread;
+
+ /*
+ * Slots which have been fully processed and released by the (peer)
+ * receiver are added to the receiver queue, which is asynchronously
+ * processed by the recycle thread.
+ */
+ struct task_struct *recycle_thread;
+
+ /*
+ * Processes incoming synchronous messages
+ *
+ * The synchronous message channel is shared between all synchronous
+ * services, and provides a way for urgent messages to bypass
+ * potentially long queues of asynchronous messages in the normal slots.
+ *
+ * There can be only one outstanding synchronous message in
+ * each direction, and as a precious shared resource synchronous
+ * services should be used sparingly.
+ */
+ struct task_struct *sync_thread;
+
+ /* Local implementation of the trigger remote event */
+ wait_queue_head_t trigger_event;
+
+ /* Local implementation of the recycle remote event */
+ wait_queue_head_t recycle_event;
+
+ /* Local implementation of the sync trigger remote event */
+ wait_queue_head_t sync_trigger_event;
+
+ /* Local implementation of the sync release remote event */
+ wait_queue_head_t sync_release_event;
+
+ char *tx_data;
+ char *rx_data;
+ struct vchiq_slot_info *rx_info;
+
+ struct mutex slot_mutex;
+
+ struct mutex recycle_mutex;
+
+ struct mutex sync_mutex;
+
+ spinlock_t msg_queue_spinlock;
+
+ spinlock_t bulk_waiter_spinlock;
+
+ spinlock_t quota_spinlock;
+
+ /*
+ * Indicates the byte position within the stream from where the next
+ * message will be read. The least significant bits are an index into
+ * the slot.The next bits are the index of the slot in
+ * remote->slot_queue.
+ */
+ int rx_pos;
+
+ /*
+ * A cached copy of local->tx_pos. Only write to local->tx_pos, and read
+ * from remote->tx_pos.
+ */
+ int local_tx_pos;
+
+ /* The slot_queue index of the slot to become available next. */
+ int slot_queue_available;
+
+ /* A flag to indicate if any poll has been requested */
+ int poll_needed;
+
+ /* Ths index of the previous slot used for data messages. */
+ int previous_data_index;
+
+ /* The number of slots occupied by data messages. */
+ unsigned short data_use_count;
+
+ /* The maximum number of slots to be occupied by data messages. */
+ unsigned short data_quota;
+
+ /* An array of bit sets indicating which services must be polled. */
+ atomic_t poll_services[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
+
+ /* The number of the first unused service */
+ int unused_service;
+
+ /* Signalled when a free slot becomes available. */
+ struct completion slot_available_event;
+
+ /* Signalled when a free data slot becomes available. */
+ struct completion data_quota_event;
+
+ struct state_stats_struct {
+ int slot_stalls;
+ int data_stalls;
+ int ctrl_tx_count;
+ int ctrl_rx_count;
+ int error_count;
+ } stats;
+
+ struct vchiq_service __rcu *services[VCHIQ_MAX_SERVICES];
+ struct vchiq_service_quota service_quotas[VCHIQ_MAX_SERVICES];
+ struct vchiq_slot_info slot_info[VCHIQ_MAX_SLOTS];
+
+ struct opaque_platform_state *platform_state;
+};
+
+struct pagelist {
+ u32 length;
+ u16 type;
+ u16 offset;
+ u32 addrs[1]; /* N.B. 12 LSBs hold the number
+ * of following pages at consecutive
+ * addresses.
+ */
+};
+
+struct vchiq_pagelist_info {
+ struct pagelist *pagelist;
+ size_t pagelist_buffer_size;
+ dma_addr_t dma_addr;
+ enum dma_data_direction dma_dir;
+ unsigned int num_pages;
+ unsigned int pages_need_release;
+ struct page **pages;
+ struct scatterlist *scatterlist;
+ unsigned int scatterlist_mapped;
+};
+
+static inline bool vchiq_remote_initialised(const struct vchiq_state *state)
+{
+ return state->remote && state->remote->initialised;
+}
+
+struct bulk_waiter {
+ struct vchiq_bulk *bulk;
+ struct completion event;
+ int actual;
+};
+
+struct vchiq_config {
+ unsigned int max_msg_size;
+ unsigned int bulk_threshold; /* The message size above which it
+ * is better to use a bulk transfer
+ * (<= max_msg_size)
+ */
+ unsigned int max_outstanding_bulks;
+ unsigned int max_services;
+ short version; /* The version of VCHIQ */
+ short version_min; /* The minimum compatible version of VCHIQ */
+};
+
+extern spinlock_t bulk_waiter_spinlock;
+
+extern const char *
+get_conn_state_name(enum vchiq_connstate conn_state);
+
+extern struct vchiq_slot_zero *
+vchiq_init_slots(struct device *dev, void *mem_base, int mem_size);
+
+extern int
+vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero, struct device *dev);
+
+extern int
+vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance);
+
+struct vchiq_service *
+vchiq_add_service_internal(struct vchiq_state *state,
+ const struct vchiq_service_params_kernel *params,
+ int srvstate, struct vchiq_instance *instance,
+ void (*userdata_term)(void *userdata));
+
+extern int
+vchiq_open_service_internal(struct vchiq_service *service, int client_id);
+
+extern int
+vchiq_close_service_internal(struct vchiq_service *service, int close_recvd);
+
+extern void
+vchiq_terminate_service_internal(struct vchiq_service *service);
+
+extern void
+vchiq_free_service_internal(struct vchiq_service *service);
+
+extern void
+vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance);
+
+extern void
+remote_event_pollall(struct vchiq_state *state);
+
+extern int
+vchiq_bulk_xfer_waiting(struct vchiq_instance *instance, unsigned int handle,
+ struct bulk_waiter *userdata);
+
+extern int
+vchiq_bulk_xfer_blocking(struct vchiq_instance *instance, unsigned int handle,
+ struct vchiq_bulk *bulk);
+
+extern int
+vchiq_bulk_xfer_callback(struct vchiq_instance *instance, unsigned int handle,
+ struct vchiq_bulk *bulk);
+
+extern void
+vchiq_dump_state(struct seq_file *f, struct vchiq_state *state);
+
+extern void
+request_poll(struct vchiq_state *state, struct vchiq_service *service,
+ int poll_type);
+
+struct vchiq_service *handle_to_service(struct vchiq_instance *instance, unsigned int handle);
+
+extern struct vchiq_service *
+find_service_by_handle(struct vchiq_instance *instance, unsigned int handle);
+
+extern struct vchiq_service *
+find_service_by_port(struct vchiq_state *state, unsigned int localport);
+
+extern struct vchiq_service *
+find_service_for_instance(struct vchiq_instance *instance, unsigned int handle);
+
+extern struct vchiq_service *
+find_closed_service_for_instance(struct vchiq_instance *instance, unsigned int handle);
+
+extern struct vchiq_service *
+__next_service_by_instance(struct vchiq_state *state,
+ struct vchiq_instance *instance,
+ int *pidx);
+
+extern struct vchiq_service *
+next_service_by_instance(struct vchiq_state *state,
+ struct vchiq_instance *instance,
+ int *pidx);
+
+extern void
+vchiq_service_get(struct vchiq_service *service);
+
+extern void
+vchiq_service_put(struct vchiq_service *service);
+
+extern int
+vchiq_queue_message(struct vchiq_instance *instance, unsigned int handle,
+ ssize_t (*copy_callback)(void *context, void *dest,
+ size_t offset, size_t maxsize),
+ void *context,
+ size_t size);
+
+void vchiq_dump_platform_state(struct seq_file *f);
+
+void vchiq_dump_platform_instances(struct vchiq_state *state, struct seq_file *f);
+
+void vchiq_dump_platform_service_state(struct seq_file *f, struct vchiq_service *service);
+
+int vchiq_use_service_internal(struct vchiq_service *service);
+
+int vchiq_release_service_internal(struct vchiq_service *service);
+
+void vchiq_on_remote_use(struct vchiq_state *state);
+
+void vchiq_on_remote_release(struct vchiq_state *state);
+
+int vchiq_platform_init_state(struct vchiq_state *state);
+
+int vchiq_check_service(struct vchiq_service *service);
+
+int vchiq_send_remote_use(struct vchiq_state *state);
+
+int vchiq_send_remote_use_active(struct vchiq_state *state);
+
+void vchiq_platform_conn_state_changed(struct vchiq_state *state,
+ enum vchiq_connstate oldstate,
+ enum vchiq_connstate newstate);
+
+void vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate);
+
+void vchiq_log_dump_mem(struct device *dev, const char *label, u32 addr,
+ const void *void_mem, size_t num_bytes);
+
+int vchiq_remove_service(struct vchiq_instance *instance, unsigned int service);
+
+int vchiq_get_client_id(struct vchiq_instance *instance, unsigned int service);
+
+void vchiq_get_config(struct vchiq_config *config);
+
+int vchiq_set_service_option(struct vchiq_instance *instance, unsigned int service,
+ enum vchiq_service_option option, int value);
+
+#endif
diff --git a/include/linux/raspberrypi/vchiq_debugfs.h b/include/linux/raspberrypi/vchiq_debugfs.h
new file mode 100644
index 000000000000..b29e6693c949
--- /dev/null
+++ b/include/linux/raspberrypi/vchiq_debugfs.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved. */
+
+#ifndef VCHIQ_DEBUGFS_H
+#define VCHIQ_DEBUGFS_H
+
+struct vchiq_state;
+struct vchiq_instance;
+
+struct vchiq_debugfs_node {
+ struct dentry *dentry;
+};
+
+void vchiq_debugfs_init(struct vchiq_state *state);
+
+void vchiq_debugfs_deinit(void);
+
+void vchiq_debugfs_add_instance(struct vchiq_instance *instance);
+
+void vchiq_debugfs_remove_instance(struct vchiq_instance *instance);
+
+#endif /* VCHIQ_DEBUGFS_H */
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h
index b17e0cd0a30c..7aaad158ee37 100644
--- a/include/linux/ratelimit.h
+++ b/include/linux/ratelimit.h
@@ -22,16 +22,43 @@ static inline void ratelimit_default_init(struct ratelimit_state *rs)
DEFAULT_RATELIMIT_BURST);
}
+static inline void ratelimit_state_inc_miss(struct ratelimit_state *rs)
+{
+ atomic_inc(&rs->missed);
+}
+
+static inline int ratelimit_state_get_miss(struct ratelimit_state *rs)
+{
+ return atomic_read(&rs->missed);
+}
+
+static inline int ratelimit_state_reset_miss(struct ratelimit_state *rs)
+{
+ return atomic_xchg_relaxed(&rs->missed, 0);
+}
+
+static inline void ratelimit_state_reset_interval(struct ratelimit_state *rs, int interval_init)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&rs->lock, flags);
+ rs->interval = interval_init;
+ rs->flags &= ~RATELIMIT_INITIALIZED;
+ atomic_set(&rs->rs_n_left, rs->burst);
+ ratelimit_state_reset_miss(rs);
+ raw_spin_unlock_irqrestore(&rs->lock, flags);
+}
+
static inline void ratelimit_state_exit(struct ratelimit_state *rs)
{
+ int m;
+
if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE))
return;
- if (rs->missed) {
- pr_warn("%s: %d output lines suppressed due to ratelimiting\n",
- current->comm, rs->missed);
- rs->missed = 0;
- }
+ m = ratelimit_state_reset_miss(rs);
+ if (m)
+ pr_warn("%s: %d output lines suppressed due to ratelimiting\n", current->comm, m);
}
static inline void
diff --git a/include/linux/ratelimit_types.h b/include/linux/ratelimit_types.h
index b676aa419eef..b19c4354540a 100644
--- a/include/linux/ratelimit_types.h
+++ b/include/linux/ratelimit_types.h
@@ -4,31 +4,36 @@
#include <linux/bits.h>
#include <linux/param.h>
-#include <linux/spinlock_types.h>
+#include <linux/spinlock_types_raw.h>
#define DEFAULT_RATELIMIT_INTERVAL (5 * HZ)
#define DEFAULT_RATELIMIT_BURST 10
/* issue num suppressed message on exit */
#define RATELIMIT_MSG_ON_RELEASE BIT(0)
+#define RATELIMIT_INITIALIZED BIT(1)
struct ratelimit_state {
raw_spinlock_t lock; /* protect the state */
int interval;
int burst;
- int printed;
- int missed;
+ atomic_t rs_n_left;
+ atomic_t missed;
+ unsigned int flags;
unsigned long begin;
- unsigned long flags;
};
-#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) { \
- .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
- .interval = interval_init, \
- .burst = burst_init, \
+#define RATELIMIT_STATE_INIT_FLAGS(name, interval_init, burst_init, flags_init) { \
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
+ .interval = interval_init, \
+ .burst = burst_init, \
+ .flags = flags_init, \
}
+#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) \
+ RATELIMIT_STATE_INIT_FLAGS(name, interval_init, burst_init, 0)
+
#define RATELIMIT_STATE_INIT_DISABLED \
RATELIMIT_STATE_INIT(ratelimit_state, 0, DEFAULT_RATELIMIT_BURST)
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index d31ecaf4fdd3..4091e978aef2 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -17,24 +17,14 @@
#ifndef _LINUX_RBTREE_H
#define _LINUX_RBTREE_H
-#include <linux/kernel.h>
+#include <linux/container_of.h>
+#include <linux/rbtree_types.h>
+
#include <linux/stddef.h>
#include <linux/rcupdate.h>
-struct rb_node {
- unsigned long __rb_parent_color;
- struct rb_node *rb_right;
- struct rb_node *rb_left;
-} __attribute__((aligned(sizeof(long))));
- /* The alignment might seem pointless, but allegedly CRIS needs it */
-
-struct rb_root {
- struct rb_node *rb_node;
-};
-
#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3))
-#define RB_ROOT (struct rb_root) { NULL, }
#define rb_entry(ptr, type, member) container_of(ptr, type, member)
#define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL)
@@ -53,8 +43,36 @@ extern void rb_erase(struct rb_node *, struct rb_root *);
/* Find logical next and previous nodes in a tree */
extern struct rb_node *rb_next(const struct rb_node *);
extern struct rb_node *rb_prev(const struct rb_node *);
-extern struct rb_node *rb_first(const struct rb_root *);
-extern struct rb_node *rb_last(const struct rb_root *);
+
+/*
+ * This function returns the first node (in sort order) of the tree.
+ */
+static inline struct rb_node *rb_first(const struct rb_root *root)
+{
+ struct rb_node *n;
+
+ n = root->rb_node;
+ if (!n)
+ return NULL;
+ while (n->rb_left)
+ n = n->rb_left;
+ return n;
+}
+
+/*
+ * This function returns the last node (in sort order) of the tree.
+ */
+static inline struct rb_node *rb_last(const struct rb_root *root)
+{
+ struct rb_node *n;
+
+ n = root->rb_node;
+ if (!n)
+ return NULL;
+ while (n->rb_right)
+ n = n->rb_right;
+ return n;
+}
/* Postorder iteration - always visit the parent after its children */
extern struct rb_node *rb_first_postorder(const struct rb_root *);
@@ -112,23 +130,6 @@ static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent
typeof(*pos), field); 1; }); \
pos = n)
-/*
- * Leftmost-cached rbtrees.
- *
- * We do not cache the rightmost node based on footprint
- * size vs number of potential users that could benefit
- * from O(1) rb_last(). Just not worth it, users that want
- * this feature can always implement the logic explicitly.
- * Furthermore, users that want to cache both pointers may
- * find it a bit asymmetric, but that's ok.
- */
-struct rb_root_cached {
- struct rb_root rb_root;
- struct rb_node *rb_leftmost;
-};
-
-#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL }
-
/* Same as rb_first(), but O(1) */
#define rb_first_cached(root) (root)->rb_leftmost
@@ -238,6 +239,43 @@ rb_add(struct rb_node *node, struct rb_root *tree,
}
/**
+ * rb_find_add_cached() - find equivalent @node in @tree, or add @node
+ * @node: node to look-for / insert
+ * @tree: tree to search / modify
+ * @cmp: operator defining the node order
+ *
+ * Returns the rb_node matching @node, or NULL when no match is found and @node
+ * is inserted.
+ */
+static __always_inline struct rb_node *
+rb_find_add_cached(struct rb_node *node, struct rb_root_cached *tree,
+ int (*cmp)(const struct rb_node *new, const struct rb_node *exist))
+{
+ bool leftmost = true;
+ struct rb_node **link = &tree->rb_root.rb_node;
+ struct rb_node *parent = NULL;
+ int c;
+
+ while (*link) {
+ parent = *link;
+ c = cmp(node, parent);
+
+ if (c < 0) {
+ link = &parent->rb_left;
+ } else if (c > 0) {
+ link = &parent->rb_right;
+ leftmost = false;
+ } else {
+ return parent;
+ }
+ }
+
+ rb_link_node(node, parent, link);
+ rb_insert_color_cached(node, tree, leftmost);
+ return NULL;
+}
+
+/**
* rb_find_add() - find equivalent @node in @tree, or add @node
* @node: node to look-for / insert
* @tree: tree to search / modify
@@ -272,6 +310,42 @@ rb_find_add(struct rb_node *node, struct rb_root *tree,
}
/**
+ * rb_find_add_rcu() - find equivalent @node in @tree, or add @node
+ * @node: node to look-for / insert
+ * @tree: tree to search / modify
+ * @cmp: operator defining the node order
+ *
+ * Adds a Store-Release for link_node.
+ *
+ * Returns the rb_node matching @node, or NULL when no match is found and @node
+ * is inserted.
+ */
+static __always_inline struct rb_node *
+rb_find_add_rcu(struct rb_node *node, struct rb_root *tree,
+ int (*cmp)(struct rb_node *, const struct rb_node *))
+{
+ struct rb_node **link = &tree->rb_node;
+ struct rb_node *parent = NULL;
+ int c;
+
+ while (*link) {
+ parent = *link;
+ c = cmp(node, parent);
+
+ if (c < 0)
+ link = &parent->rb_left;
+ else if (c > 0)
+ link = &parent->rb_right;
+ else
+ return parent;
+ }
+
+ rb_link_node_rcu(node, parent, link);
+ rb_insert_color(node, tree);
+ return NULL;
+}
+
+/**
* rb_find() - find @key in tree @tree
* @key: key to match
* @tree: tree to search
@@ -300,6 +374,37 @@ rb_find(const void *key, const struct rb_root *tree,
}
/**
+ * rb_find_rcu() - find @key in tree @tree
+ * @key: key to match
+ * @tree: tree to search
+ * @cmp: operator defining the node order
+ *
+ * Notably, tree descent vs concurrent tree rotations is unsound and can result
+ * in false-negatives.
+ *
+ * Returns the rb_node matching @key or NULL.
+ */
+static __always_inline struct rb_node *
+rb_find_rcu(const void *key, const struct rb_root *tree,
+ int (*cmp)(const void *key, const struct rb_node *))
+{
+ struct rb_node *node = tree->rb_node;
+
+ while (node) {
+ int c = cmp(key, node);
+
+ if (c < 0)
+ node = rcu_dereference_raw(node->rb_left);
+ else if (c > 0)
+ node = rcu_dereference_raw(node->rb_right);
+ else
+ return node;
+ }
+
+ return NULL;
+}
+
+/**
* rb_find_first() - find the first @key in @tree
* @key: key to match
* @tree: tree to search
diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
index d1c53e9d8c75..6dbc5a1bf6a8 100644
--- a/include/linux/rbtree_augmented.h
+++ b/include/linux/rbtree_augmented.h
@@ -60,6 +60,32 @@ rb_insert_augmented_cached(struct rb_node *node,
rb_insert_augmented(node, &root->rb_root, augment);
}
+static __always_inline struct rb_node *
+rb_add_augmented_cached(struct rb_node *node, struct rb_root_cached *tree,
+ bool (*less)(struct rb_node *, const struct rb_node *),
+ const struct rb_augment_callbacks *augment)
+{
+ struct rb_node **link = &tree->rb_root.rb_node;
+ struct rb_node *parent = NULL;
+ bool leftmost = true;
+
+ while (*link) {
+ parent = *link;
+ if (less(node, parent)) {
+ link = &parent->rb_left;
+ } else {
+ link = &parent->rb_right;
+ leftmost = false;
+ }
+ }
+
+ rb_link_node(node, parent, link);
+ augment->propagate(parent, NULL); /* suboptimal */
+ rb_insert_augmented_cached(node, tree, leftmost, augment);
+
+ return leftmost ? node : NULL;
+}
+
/*
* Template for declaring augmented rbtree callbacks (generic case)
*
@@ -156,13 +182,13 @@ RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, \
static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p)
{
- rb->__rb_parent_color = rb_color(rb) | (unsigned long)p;
+ rb->__rb_parent_color = rb_color(rb) + (unsigned long)p;
}
static inline void rb_set_parent_color(struct rb_node *rb,
struct rb_node *p, int color)
{
- rb->__rb_parent_color = (unsigned long)p | color;
+ rb->__rb_parent_color = (unsigned long)p + color;
}
static inline void
diff --git a/include/linux/rbtree_latch.h b/include/linux/rbtree_latch.h
index 3d1a9e716b80..2f630eb8307e 100644
--- a/include/linux/rbtree_latch.h
+++ b/include/linux/rbtree_latch.h
@@ -14,7 +14,7 @@
*
* If we need to allow unconditional lookups (say as required for NMI context
* usage) we need a more complex setup; this data structure provides this by
- * employing the latch technique -- see @raw_write_seqcount_latch -- to
+ * employing the latch technique -- see @write_seqcount_latch_begin -- to
* implement a latched RB-tree which does allow for unconditional lookups by
* virtue of always having (at least) one stable copy of the tree.
*
@@ -132,7 +132,7 @@ __lt_find(void *key, struct latch_tree_root *ltr, int idx,
* @ops: operators defining the node order
*
* It inserts @node into @root in an ordered fashion such that we can always
- * observe one complete tree. See the comment for raw_write_seqcount_latch().
+ * observe one complete tree. See the comment for write_seqcount_latch_begin().
*
* The inserts use rcu_assign_pointer() to publish the element such that the
* tree structure is stored before we can observe the new @node.
@@ -145,10 +145,11 @@ latch_tree_insert(struct latch_tree_node *node,
struct latch_tree_root *root,
const struct latch_tree_ops *ops)
{
- raw_write_seqcount_latch(&root->seq);
+ write_seqcount_latch_begin(&root->seq);
__lt_insert(node, root, 0, ops->less);
- raw_write_seqcount_latch(&root->seq);
+ write_seqcount_latch(&root->seq);
__lt_insert(node, root, 1, ops->less);
+ write_seqcount_latch_end(&root->seq);
}
/**
@@ -159,7 +160,7 @@ latch_tree_insert(struct latch_tree_node *node,
*
* Removes @node from the trees @root in an ordered fashion such that we can
* always observe one complete tree. See the comment for
- * raw_write_seqcount_latch().
+ * write_seqcount_latch_begin().
*
* It is assumed that @node will observe one RCU quiescent state before being
* reused of freed.
@@ -172,10 +173,11 @@ latch_tree_erase(struct latch_tree_node *node,
struct latch_tree_root *root,
const struct latch_tree_ops *ops)
{
- raw_write_seqcount_latch(&root->seq);
+ write_seqcount_latch_begin(&root->seq);
__lt_erase(node, root, 0);
- raw_write_seqcount_latch(&root->seq);
+ write_seqcount_latch(&root->seq);
__lt_erase(node, root, 1);
+ write_seqcount_latch_end(&root->seq);
}
/**
@@ -204,7 +206,7 @@ latch_tree_find(void *key, struct latch_tree_root *root,
unsigned int seq;
do {
- seq = raw_read_seqcount_latch(&root->seq);
+ seq = read_seqcount_latch(&root->seq);
node = __lt_find(key, root, seq & 1, ops->comp);
} while (read_seqcount_latch_retry(&root->seq, seq));
diff --git a/include/linux/rbtree_types.h b/include/linux/rbtree_types.h
new file mode 100644
index 000000000000..45b6ecde3665
--- /dev/null
+++ b/include/linux/rbtree_types.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_RBTREE_TYPES_H
+#define _LINUX_RBTREE_TYPES_H
+
+struct rb_node {
+ unsigned long __rb_parent_color;
+ struct rb_node *rb_right;
+ struct rb_node *rb_left;
+} __attribute__((aligned(sizeof(long))));
+/* The alignment might seem pointless, but allegedly CRIS needs it */
+
+struct rb_root {
+ struct rb_node *rb_node;
+};
+
+/*
+ * Leftmost-cached rbtrees.
+ *
+ * We do not cache the rightmost node based on footprint
+ * size vs number of potential users that could benefit
+ * from O(1) rb_last(). Just not worth it, users that want
+ * this feature can always implement the logic explicitly.
+ * Furthermore, users that want to cache both pointers may
+ * find it a bit asymmetric, but that's ok.
+ */
+struct rb_root_cached {
+ struct rb_root rb_root;
+ struct rb_node *rb_leftmost;
+};
+
+#define RB_ROOT (struct rb_root) { NULL, }
+#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL }
+
+#endif
diff --git a/include/linux/rcu_notifier.h b/include/linux/rcu_notifier.h
new file mode 100644
index 000000000000..5640f024773b
--- /dev/null
+++ b/include/linux/rcu_notifier.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Read-Copy Update notifiers, initially RCU CPU stall notifier.
+ * Separate from rcupdate.h to avoid #include loops.
+ *
+ * Copyright (C) 2023 Paul E. McKenney.
+ */
+
+#ifndef __LINUX_RCU_NOTIFIER_H
+#define __LINUX_RCU_NOTIFIER_H
+
+// Actions for RCU CPU stall notifier calls.
+#define RCU_STALL_NOTIFY_NORM 1
+#define RCU_STALL_NOTIFY_EXP 2
+
+#if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER)
+
+#include <linux/notifier.h>
+#include <linux/types.h>
+
+int rcu_stall_chain_notifier_register(struct notifier_block *n);
+int rcu_stall_chain_notifier_unregister(struct notifier_block *n);
+
+#else // #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER)
+
+// No RCU CPU stall warnings in Tiny RCU.
+static inline int rcu_stall_chain_notifier_register(struct notifier_block *n) { return -EEXIST; }
+static inline int rcu_stall_chain_notifier_unregister(struct notifier_block *n) { return -ENOENT; }
+
+#endif // #else // #if defined(CONFIG_RCU_STALL_COMMON) && defined(CONFIG_RCU_CPU_STALL_NOTIFIER)
+
+#endif /* __LINUX_RCU_NOTIFIER_H */
diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h
index 3db96c4f45fd..2fdc2208f1ca 100644
--- a/include/linux/rcu_segcblist.h
+++ b/include/linux/rcu_segcblist.h
@@ -69,7 +69,7 @@ struct rcu_cblist {
*
*
* ----------------------------------------------------------------------------
- * | SEGCBLIST_SOFTIRQ_ONLY |
+ * | SEGCBLIST_RCU_CORE |
* | |
* | Callbacks processed by rcu_core() from softirqs or local |
* | rcuc kthread, without holding nocb_lock. |
@@ -77,36 +77,38 @@ struct rcu_cblist {
* |
* v
* ----------------------------------------------------------------------------
- * | SEGCBLIST_OFFLOADED |
+ * | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING | SEGCBLIST_OFFLOADED |
* | |
* | Callbacks processed by rcu_core() from softirqs or local |
- * | rcuc kthread, while holding nocb_lock. Waking up CB and GP kthreads, |
- * | allowing nocb_timer to be armed. |
+ * | rcuc kthread, while holding nocb_lock. Waking up CB and GP kthreads. |
* ----------------------------------------------------------------------------
* |
* v
- * -----------------------------------
- * | |
- * v v
- * --------------------------------------- ----------------------------------|
- * | SEGCBLIST_OFFLOADED | | | SEGCBLIST_OFFLOADED | |
- * | SEGCBLIST_KTHREAD_CB | | SEGCBLIST_KTHREAD_GP |
- * | | | |
- * | | | |
- * | CB kthread woke up and | | GP kthread woke up and |
- * | acknowledged SEGCBLIST_OFFLOADED. | | acknowledged SEGCBLIST_OFFLOADED|
- * | Processes callbacks concurrently | | |
- * | with rcu_core(), holding | | |
- * | nocb_lock. | | |
- * --------------------------------------- -----------------------------------
- * | |
- * -----------------------------------
+ * ----------------------------------------------------------------------------
+ * | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING | SEGCBLIST_OFFLOADED |
+ * | + unparked CB kthread |
+ * | |
+ * | CB kthread got unparked and processes callbacks concurrently with |
+ * | rcu_core(), holding nocb_lock. |
+ * ---------------------------------------------------------------------------
* |
* v
- * |--------------------------------------------------------------------------|
+ * ---------------------------------------------------------------------------|
+ * | SEGCBLIST_RCU_CORE | |
+ * | SEGCBLIST_LOCKING | |
* | SEGCBLIST_OFFLOADED | |
- * | SEGCBLIST_KTHREAD_CB | |
* | SEGCBLIST_KTHREAD_GP |
+ * | + unparked CB kthread |
+ * | |
+ * | GP kthread woke up and acknowledged nocb_lock. |
+ * ---------------------------------------- -----------------------------------
+ * |
+ * v
+ * |--------------------------------------------------------------------------|
+ * | SEGCBLIST_LOCKING | |
+ * | SEGCBLIST_OFFLOADED | |
+ * | SEGCBLIST_KTHREAD_GP | |
+ * | + unparked CB kthread |
* | |
* | Kthreads handle callbacks holding nocb_lock, local rcu_core() stops |
* | handling callbacks. Enable bypass queueing. |
@@ -120,9 +122,10 @@ struct rcu_cblist {
*
*
* |--------------------------------------------------------------------------|
- * | SEGCBLIST_OFFLOADED | |
- * | SEGCBLIST_KTHREAD_CB | |
+ * | SEGCBLIST_LOCKING | |
+ * | SEGCBLIST_OFFLOADED | |
* | SEGCBLIST_KTHREAD_GP |
+ * | + unparked CB kthread |
* | |
* | CB/GP kthreads handle callbacks holding nocb_lock, local rcu_core() |
* | ignores callbacks. Bypass enqueue is enabled. |
@@ -130,55 +133,59 @@ struct rcu_cblist {
* |
* v
* |--------------------------------------------------------------------------|
- * | SEGCBLIST_KTHREAD_CB | |
+ * | SEGCBLIST_RCU_CORE | |
+ * | SEGCBLIST_LOCKING | |
+ * | SEGCBLIST_OFFLOADED | |
* | SEGCBLIST_KTHREAD_GP |
+ * | + unparked CB kthread |
+ * | |
+ * | CB/GP kthreads handle callbacks holding nocb_lock, local rcu_core() |
+ * | handles callbacks concurrently. Bypass enqueue is disabled. |
+ * | Invoke RCU core so we make sure not to preempt it in the middle with |
+ * | leaving some urgent work unattended within a jiffy. |
+ * ----------------------------------------------------------------------------
+ * |
+ * v
+ * |--------------------------------------------------------------------------|
+ * | SEGCBLIST_RCU_CORE | |
+ * | SEGCBLIST_LOCKING | |
+ * | SEGCBLIST_KTHREAD_GP |
+ * | + unparked CB kthread |
* | |
* | CB/GP kthreads and local rcu_core() handle callbacks concurrently |
- * | holding nocb_lock. Wake up CB and GP kthreads if necessary. Disable |
- * | bypass enqueue. |
+ * | holding nocb_lock. Wake up GP kthread if necessary. |
* ----------------------------------------------------------------------------
* |
* v
- * -----------------------------------
- * | |
- * v v
- * ---------------------------------------------------------------------------|
+ * |--------------------------------------------------------------------------|
+ * | SEGCBLIST_RCU_CORE | |
+ * | SEGCBLIST_LOCKING | |
+ * | + unparked CB kthread |
* | |
- * | SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP |
- * | | |
- * | GP kthread woke up and | CB kthread woke up and |
- * | acknowledged the fact that | acknowledged the fact that |
- * | SEGCBLIST_OFFLOADED got cleared. | SEGCBLIST_OFFLOADED got cleared. |
- * | | The CB kthread goes to sleep |
- * | The callbacks from the target CPU | until it ever gets re-offloaded. |
- * | will be ignored from the GP kthread | |
- * | loop. | |
+ * | GP kthread woke up and acknowledged the fact that SEGCBLIST_OFFLOADED |
+ * | got cleared. The callbacks from the target CPU will be ignored from the|
+ * | GP kthread loop. |
* ----------------------------------------------------------------------------
- * | |
- * -----------------------------------
* |
* v
* ----------------------------------------------------------------------------
- * | 0 |
+ * | SEGCBLIST_RCU_CORE | SEGCBLIST_LOCKING |
+ * | + parked CB kthread |
* | |
- * | Callbacks processed by rcu_core() from softirqs or local |
- * | rcuc kthread, while holding nocb_lock. Forbid nocb_timer to be armed. |
- * | Flush pending nocb_timer. Flush nocb bypass callbacks. |
+ * | CB kthread is parked. Callbacks processed by rcu_core() from softirqs or |
+ * | local rcuc kthread, while holding nocb_lock. |
* ----------------------------------------------------------------------------
* |
* v
* ----------------------------------------------------------------------------
- * | SEGCBLIST_SOFTIRQ_ONLY |
+ * | SEGCBLIST_RCU_CORE |
* | |
* | Callbacks processed by rcu_core() from softirqs or local |
* | rcuc kthread, without holding nocb_lock. |
* ----------------------------------------------------------------------------
*/
#define SEGCBLIST_ENABLED BIT(0)
-#define SEGCBLIST_SOFTIRQ_ONLY BIT(1)
-#define SEGCBLIST_KTHREAD_CB BIT(2)
-#define SEGCBLIST_KTHREAD_GP BIT(3)
-#define SEGCBLIST_OFFLOADED BIT(4)
+#define SEGCBLIST_OFFLOADED BIT(1)
struct rcu_segcblist {
struct rcu_head *head;
diff --git a/include/linux/rcu_sync.h b/include/linux/rcu_sync.h
index 0027d4c8087c..3860dbb9107a 100644
--- a/include/linux/rcu_sync.h
+++ b/include/linux/rcu_sync.h
@@ -37,7 +37,6 @@ static inline bool rcu_sync_is_idle(struct rcu_sync *rsp)
}
extern void rcu_sync_init(struct rcu_sync *);
-extern void rcu_sync_enter_start(struct rcu_sync *);
extern void rcu_sync_enter(struct rcu_sync *);
extern void rcu_sync_exit(struct rcu_sync *);
extern void rcu_sync_dtor(struct rcu_sync *);
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index f8633d37e358..2abba7552605 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -11,15 +11,6 @@
#include <linux/rcupdate.h>
/*
- * Why is there no list_empty_rcu()? Because list_empty() serves this
- * purpose. The list_empty() function fetches the RCU-protected pointer
- * and compares it to the address of the list head, but neither dereferences
- * this pointer itself nor provides this pointer to the caller. Therefore,
- * it is not necessary to use rcu_dereference(), so that list_empty() can
- * be used anywhere you would want to use a list_empty_rcu().
- */
-
-/*
* INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers
* @list: list to be initialized
*
@@ -39,6 +30,27 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
* way, we must not access it directly
*/
#define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next)))
+/*
+ * Return the ->prev pointer of a list_head in an rcu safe way. Don't
+ * access it directly.
+ *
+ * Any list traversed with list_bidir_prev_rcu() must never use
+ * list_del_rcu(). Doing so will poison the ->prev pointer that
+ * list_bidir_prev_rcu() relies on, which will result in segfaults.
+ * To prevent these segfaults, use list_bidir_del_rcu() instead
+ * of list_del_rcu().
+ */
+#define list_bidir_prev_rcu(list) (*((struct list_head __rcu **)(&(list)->prev)))
+
+/**
+ * list_for_each_rcu - Iterate over a list in an RCU-safe fashion
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @head: the head for your list.
+ */
+#define list_for_each_rcu(pos, head) \
+ for (pos = rcu_dereference((head)->next); \
+ !list_is_head(pos, (head)); \
+ pos = rcu_dereference(pos->next))
/**
* list_tail_rcu - returns the prev pointer of the head of the list
@@ -168,6 +180,39 @@ static inline void list_del_rcu(struct list_head *entry)
}
/**
+ * list_bidir_del_rcu - deletes entry from list without re-initialization
+ * @entry: the element to delete from the list.
+ *
+ * In contrast to list_del_rcu() doesn't poison the prev pointer thus
+ * allowing backwards traversal via list_bidir_prev_rcu().
+ *
+ * Note: list_empty() on entry does not return true after this because
+ * the entry is in a special undefined state that permits RCU-based
+ * lockfree reverse traversal. In particular this means that we can not
+ * poison the forward and backwards pointers that may still be used for
+ * walking the list.
+ *
+ * The caller must take whatever precautions are necessary (such as
+ * holding appropriate locks) to avoid racing with another list-mutation
+ * primitive, such as list_bidir_del_rcu() or list_add_rcu(), running on
+ * this same list. However, it is perfectly legal to run concurrently
+ * with the _rcu list-traversal primitives, such as
+ * list_for_each_entry_rcu().
+ *
+ * Note that list_del_rcu() and list_bidir_del_rcu() must not be used on
+ * the same list.
+ *
+ * Note that the caller is not permitted to immediately free
+ * the newly deleted entry. Instead, either synchronize_rcu()
+ * or call_rcu() must be used to defer freeing until an RCU
+ * grace period has elapsed.
+ */
+static inline void list_bidir_del_rcu(struct list_head *entry)
+{
+ __list_del_entry(entry);
+}
+
+/**
* hlist_del_init_rcu - deletes entry from hash list with re-initialization
* @n: the element to delete from the hash list.
*
@@ -200,7 +245,10 @@ static inline void hlist_del_init_rcu(struct hlist_node *n)
* @old : the element to be replaced
* @new : the new element to insert
*
- * The @old entry will be replaced with the @new entry atomically.
+ * The @old entry will be replaced with the @new entry atomically from
+ * the perspective of concurrent readers. It is the caller's responsibility
+ * to synchronize with concurrent updaters, if any.
+ *
* Note: @old should not be empty.
*/
static inline void list_replace_rcu(struct list_head *old,
@@ -318,21 +366,29 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
/*
* Where are list_empty_rcu() and list_first_entry_rcu()?
*
- * Implementing those functions following their counterparts list_empty() and
- * list_first_entry() is not advisable because they lead to subtle race
- * conditions as the following snippet shows:
+ * They do not exist because they would lead to subtle race conditions:
*
* if (!list_empty_rcu(mylist)) {
* struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member);
* do_something(bar);
* }
*
- * The list may not be empty when list_empty_rcu checks it, but it may be when
- * list_first_entry_rcu rereads the ->next pointer.
+ * The list might be non-empty when list_empty_rcu() checks it, but it
+ * might have become empty by the time that list_first_entry_rcu() rereads
+ * the ->next pointer, which would result in a SEGV.
+ *
+ * When not using RCU, it is OK for list_first_entry() to re-read that
+ * pointer because both functions should be protected by some lock that
+ * blocks writers.
*
- * Rereading the ->next pointer is not a problem for list_empty() and
- * list_first_entry() because they would be protected by a lock that blocks
- * writers.
+ * When using RCU, list_empty() uses READ_ONCE() to fetch the
+ * RCU-protected ->next pointer and then compares it to the address of the
+ * list head. However, it neither dereferences this pointer nor provides
+ * this pointer to its caller. Thus, READ_ONCE() suffices (that is,
+ * rcu_dereference() is not needed), which means that list_empty() can be
+ * used anywhere you would want to use list_empty_rcu(). Just don't
+ * expect anything useful to happen if you do a subsequent lockless
+ * call to list_first_entry_rcu()!!!
*
* See list_first_or_null_rcu for an alternative.
*/
@@ -356,7 +412,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
})
/**
- * list_next_or_null_rcu - get the first element from a list
+ * list_next_or_null_rcu - get the next element from a list
* @head: the head for the list.
* @ptr: the list head to take the next element from.
* @type: the type of the struct this is embedded in.
@@ -520,7 +576,9 @@ static inline void hlist_del_rcu(struct hlist_node *n)
* @old : the element to be replaced
* @new : the new element to insert
*
- * The @old entry will be replaced with the @new entry atomically.
+ * The @old entry will be replaced with the @new entry atomically from
+ * the perspective of concurrent readers. It is the caller's responsibility
+ * to synchronize with concurrent updaters, if any.
*/
static inline void hlist_replace_rcu(struct hlist_node *old,
struct hlist_node *new)
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index d8afdb8784c1..a97c3bcb1656 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -53,6 +53,13 @@ static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n)
(*((struct hlist_nulls_node __rcu __force **)&(node)->next))
/**
+ * hlist_nulls_pprev_rcu - returns the dereferenced pprev of @node.
+ * @node: element of the list.
+ */
+#define hlist_nulls_pprev_rcu(node) \
+ (*((struct hlist_nulls_node __rcu __force **)(node)->pprev))
+
+/**
* hlist_nulls_del_rcu - deletes entry from hash list without re-initialization
* @n: the element to delete from the hash list.
*
@@ -101,7 +108,7 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
{
struct hlist_nulls_node *first = h->first;
- n->next = first;
+ WRITE_ONCE(n->next, first);
WRITE_ONCE(n->pprev, &h->first);
rcu_assign_pointer(hlist_nulls_first_rcu(h), n);
if (!is_a_nulls(first))
@@ -137,9 +144,9 @@ static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
last = i;
if (last) {
- n->next = last->next;
- n->pprev = &last->next;
- rcu_assign_pointer(hlist_next_rcu(last), n);
+ WRITE_ONCE(n->next, last->next);
+ WRITE_ONCE(n->pprev, &last->next);
+ rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
} else {
hlist_nulls_add_head_rcu(n, h);
}
@@ -148,8 +155,60 @@ static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
/* after that hlist_nulls_del will work */
static inline void hlist_nulls_add_fake(struct hlist_nulls_node *n)
{
- n->pprev = &n->next;
- n->next = (struct hlist_nulls_node *)NULLS_MARKER(NULL);
+ WRITE_ONCE(n->pprev, &n->next);
+ WRITE_ONCE(n->next, (struct hlist_nulls_node *)NULLS_MARKER(NULL));
+}
+
+/**
+ * hlist_nulls_replace_rcu - replace an old entry by a new one
+ * @old: the element to be replaced
+ * @new: the new element to insert
+ *
+ * Description:
+ * Replace the old entry with the new one in a RCU-protected hlist_nulls, while
+ * permitting racing traversals.
+ *
+ * The caller must take whatever precautions are necessary (such as holding
+ * appropriate locks) to avoid racing with another list-mutation primitive, such
+ * as hlist_nulls_add_head_rcu() or hlist_nulls_del_rcu(), running on this same
+ * list. However, it is perfectly legal to run concurrently with the _rcu
+ * list-traversal primitives, such as hlist_nulls_for_each_entry_rcu().
+ */
+static inline void hlist_nulls_replace_rcu(struct hlist_nulls_node *old,
+ struct hlist_nulls_node *new)
+{
+ struct hlist_nulls_node *next = old->next;
+
+ WRITE_ONCE(new->next, next);
+ WRITE_ONCE(new->pprev, old->pprev);
+ rcu_assign_pointer(hlist_nulls_pprev_rcu(new), new);
+ if (!is_a_nulls(next))
+ WRITE_ONCE(next->pprev, &new->next);
+}
+
+/**
+ * hlist_nulls_replace_init_rcu - replace an old entry by a new one and
+ * initialize the old
+ * @old: the element to be replaced
+ * @new: the new element to insert
+ *
+ * Description:
+ * Replace the old entry with the new one in a RCU-protected hlist_nulls, while
+ * permitting racing traversals, and reinitialize the old entry.
+ *
+ * Note: @old must be hashed.
+ *
+ * The caller must take whatever precautions are necessary (such as holding
+ * appropriate locks) to avoid racing with another list-mutation primitive, such
+ * as hlist_nulls_add_head_rcu() or hlist_nulls_del_rcu(), running on this same
+ * list. However, it is perfectly legal to run concurrently with the _rcu
+ * list-traversal primitives, such as hlist_nulls_for_each_entry_rcu().
+ */
+static inline void hlist_nulls_replace_init_rcu(struct hlist_nulls_node *old,
+ struct hlist_nulls_node *new)
+{
+ hlist_nulls_replace_rcu(old, new);
+ WRITE_ONCE(old->pprev, NULL);
}
/**
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 9455476c5ba2..c5b30054cd01 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -24,24 +24,49 @@
#include <linux/compiler.h>
#include <linux/atomic.h>
#include <linux/irqflags.h>
-#include <linux/preempt.h>
+#include <linux/sched.h>
#include <linux/bottom_half.h>
#include <linux/lockdep.h>
+#include <linux/cleanup.h>
#include <asm/processor.h>
-#include <linux/cpumask.h>
+#include <linux/context_tracking_irq.h>
#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
-#define ulong2long(a) (*(long *)(&(a)))
-#define USHORT_CMP_GE(a, b) (USHRT_MAX / 2 >= (unsigned short)((a) - (b)))
-#define USHORT_CMP_LT(a, b) (USHRT_MAX / 2 < (unsigned short)((a) - (b)))
+
+#define RCU_SEQ_CTR_SHIFT 2
+#define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1)
/* Exported common interfaces */
void call_rcu(struct rcu_head *head, rcu_callback_t func);
void rcu_barrier_tasks(void);
-void rcu_barrier_tasks_rude(void);
void synchronize_rcu(void);
+struct rcu_gp_oldstate;
+unsigned long get_completed_synchronize_rcu(void);
+void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
+
+// Maximum number of unsigned long values corresponding to
+// not-yet-completed RCU grace periods.
+#define NUM_ACTIVE_RCU_POLL_OLDSTATE 2
+
+/**
+ * same_state_synchronize_rcu - Are two old-state values identical?
+ * @oldstate1: First old-state value.
+ * @oldstate2: Second old-state value.
+ *
+ * The two old-state values must have been obtained from either
+ * get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or
+ * get_completed_synchronize_rcu(). Returns @true if the two values are
+ * identical and @false otherwise. This allows structures whose lifetimes
+ * are tracked by old-state values to push these values to a list header,
+ * allowing those structures to be slightly smaller.
+ */
+static inline bool same_state_synchronize_rcu(unsigned long oldstate1, unsigned long oldstate2)
+{
+ return oldstate1 == oldstate2;
+}
+
#ifdef CONFIG_PREEMPT_RCU
void __rcu_read_lock(void);
@@ -53,7 +78,7 @@ void __rcu_read_unlock(void);
* nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other
* types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
*/
-#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
+#define rcu_preempt_depth() READ_ONCE(current->rcu_read_lock_nesting)
#else /* #ifdef CONFIG_PREEMPT_RCU */
@@ -70,8 +95,9 @@ static inline void __rcu_read_lock(void)
static inline void __rcu_read_unlock(void)
{
+ if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
+ rcu_read_unlock_strict();
preempt_enable();
- rcu_read_unlock_strict();
}
static inline int rcu_preempt_depth(void)
@@ -81,18 +107,19 @@ static inline int rcu_preempt_depth(void)
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+#ifdef CONFIG_RCU_LAZY
+void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func);
+#else
+static inline void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func)
+{
+ call_rcu(head, func);
+}
+#endif
+
/* Internal to kernel */
void rcu_init(void);
-extern int rcu_scheduler_active __read_mostly;
+extern int rcu_scheduler_active;
void rcu_sched_clock_irq(int user);
-void rcu_report_dead(unsigned int cpu);
-void rcutree_migrate_callbacks(int cpu);
-
-#ifdef CONFIG_TASKS_RCU_GENERIC
-void rcu_init_tasks_generic(void);
-#else
-static inline void rcu_init_tasks_generic(void) { }
-#endif
#ifdef CONFIG_RCU_STALL_COMMON
void rcu_sysrq_start(void);
@@ -102,50 +129,30 @@ static inline void rcu_sysrq_start(void) { }
static inline void rcu_sysrq_end(void) { }
#endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */
-#ifdef CONFIG_NO_HZ_FULL
-void rcu_user_enter(void);
-void rcu_user_exit(void);
+#if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_VIRT_XFER_TO_GUEST_WORK))
+void rcu_irq_work_resched(void);
#else
-static inline void rcu_user_enter(void) { }
-static inline void rcu_user_exit(void) { }
-#endif /* CONFIG_NO_HZ_FULL */
+static __always_inline void rcu_irq_work_resched(void) { }
+#endif
#ifdef CONFIG_RCU_NOCB_CPU
void rcu_init_nohz(void);
int rcu_nocb_cpu_offload(int cpu);
int rcu_nocb_cpu_deoffload(int cpu);
void rcu_nocb_flush_deferred_wakeup(void);
+
+#define RCU_NOCB_LOCKDEP_WARN(c, s) RCU_LOCKDEP_WARN(c, s)
+
#else /* #ifdef CONFIG_RCU_NOCB_CPU */
+
static inline void rcu_init_nohz(void) { }
static inline int rcu_nocb_cpu_offload(int cpu) { return -EINVAL; }
static inline int rcu_nocb_cpu_deoffload(int cpu) { return 0; }
static inline void rcu_nocb_flush_deferred_wakeup(void) { }
-#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
-/**
- * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
- * @a: Code that RCU needs to pay attention to.
- *
- * RCU read-side critical sections are forbidden in the inner idle loop,
- * that is, between the rcu_idle_enter() and the rcu_idle_exit() -- RCU
- * will happily ignore any such read-side critical sections. However,
- * things like powertop need tracepoints in the inner idle loop.
- *
- * This macro provides the way out: RCU_NONIDLE(do_something_with_RCU())
- * will tell RCU that it needs to pay attention, invoke its argument
- * (in this example, calling the do_something_with_RCU() function),
- * and then tell RCU to go back to ignoring this CPU. It is permissible
- * to nest RCU_NONIDLE() wrappers, but not indefinitely (but the limit is
- * on the order of a million or so, even on 32-bit systems). It is
- * not legal to block within RCU_NONIDLE(), nor is it permissible to
- * transfer control either into or out of RCU_NONIDLE()'s statement.
- */
-#define RCU_NONIDLE(a) \
- do { \
- rcu_irq_enter_irqson(); \
- do { a; } while (0); \
- rcu_irq_exit_irqson(); \
- } while (0)
+#define RCU_NOCB_LOCKDEP_WARN(c, s)
+
+#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
/*
* Note a quasi-voluntary context switch for RCU-tasks's benefit.
@@ -161,21 +168,34 @@ static inline void rcu_nocb_flush_deferred_wakeup(void) { }
} while (0)
void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
void synchronize_rcu_tasks(void);
+void rcu_tasks_torture_stats_print(char *tt, char *tf);
# else
# define rcu_tasks_classic_qs(t, preempt) do { } while (0)
# define call_rcu_tasks call_rcu
# define synchronize_rcu_tasks synchronize_rcu
# endif
-# ifdef CONFIG_TASKS_RCU_TRACE
-# define rcu_tasks_trace_qs(t) \
- do { \
- if (!likely(READ_ONCE((t)->trc_reader_checked)) && \
- !unlikely(READ_ONCE((t)->trc_reader_nesting))) { \
- smp_store_release(&(t)->trc_reader_checked, true); \
- smp_mb(); /* Readers partitioned by store. */ \
- } \
+# ifdef CONFIG_TASKS_TRACE_RCU
+// Bits for ->trc_reader_special.b.need_qs field.
+#define TRC_NEED_QS 0x1 // Task needs a quiescent state.
+#define TRC_NEED_QS_CHECKED 0x2 // Task has been checked for needing quiescent state.
+
+u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new);
+void rcu_tasks_trace_qs_blkd(struct task_struct *t);
+
+# define rcu_tasks_trace_qs(t) \
+ do { \
+ int ___rttq_nesting = READ_ONCE((t)->trc_reader_nesting); \
+ \
+ if (unlikely(READ_ONCE((t)->trc_reader_special.b.need_qs) == TRC_NEED_QS) && \
+ likely(!___rttq_nesting)) { \
+ rcu_trc_cmpxchg_need_qs((t), TRC_NEED_QS, TRC_NEED_QS_CHECKED); \
+ } else if (___rttq_nesting && ___rttq_nesting != INT_MIN && \
+ !READ_ONCE((t)->trc_reader_special.b.blocked)) { \
+ rcu_tasks_trace_qs_blkd(t); \
+ } \
} while (0)
+void rcu_tasks_trace_torture_stats_print(char *tt, char *tf);
# else
# define rcu_tasks_trace_qs(t) do { } while (0)
# endif
@@ -183,18 +203,19 @@ void synchronize_rcu_tasks(void);
#define rcu_tasks_qs(t, preempt) \
do { \
rcu_tasks_classic_qs((t), (preempt)); \
- rcu_tasks_trace_qs((t)); \
+ rcu_tasks_trace_qs(t); \
} while (0)
# ifdef CONFIG_TASKS_RUDE_RCU
-void call_rcu_tasks_rude(struct rcu_head *head, rcu_callback_t func);
void synchronize_rcu_tasks_rude(void);
+void rcu_tasks_rude_torture_stats_print(char *tt, char *tf);
# endif
#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false)
void exit_tasks_rcu_start(void);
void exit_tasks_rcu_finish(void);
#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
+#define rcu_tasks_classic_qs(t, preempt) do { } while (0)
#define rcu_tasks_qs(t, preempt) do { } while (0)
#define rcu_note_voluntary_context_switch(t) do { } while (0)
#define call_rcu_tasks call_rcu
@@ -204,6 +225,18 @@ static inline void exit_tasks_rcu_finish(void) { }
#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
/**
+ * rcu_trace_implies_rcu_gp - does an RCU Tasks Trace grace period imply an RCU grace period?
+ *
+ * As an accident of implementation, an RCU Tasks Trace grace period also
+ * acts as an RCU grace period. However, this could change at any time.
+ * Code relying on this accident must call this function to verify that
+ * this accident is still happening.
+ *
+ * You have been warned!
+ */
+static inline bool rcu_trace_implies_rcu_gp(void) { return true; }
+
+/**
* cond_resched_tasks_rcu_qs - Report potential quiescent states to RCU
*
* This macro resembles cond_resched(), except that it is defined to
@@ -216,6 +249,37 @@ do { \
cond_resched(); \
} while (0)
+/**
+ * rcu_softirq_qs_periodic - Report RCU and RCU-Tasks quiescent states
+ * @old_ts: jiffies at start of processing.
+ *
+ * This helper is for long-running softirq handlers, such as NAPI threads in
+ * networking. The caller should initialize the variable passed in as @old_ts
+ * at the beginning of the softirq handler. When invoked frequently, this macro
+ * will invoke rcu_softirq_qs() every 100 milliseconds thereafter, which will
+ * provide both RCU and RCU-Tasks quiescent states. Note that this macro
+ * modifies its old_ts argument.
+ *
+ * Because regions of code that have disabled softirq act as RCU read-side
+ * critical sections, this macro should be invoked with softirq (and
+ * preemption) enabled.
+ *
+ * The macro is not needed when CONFIG_PREEMPT_RT is defined. RT kernels would
+ * have more chance to invoke schedule() calls and provide necessary quiescent
+ * states. As a contrast, calling cond_resched() only won't achieve the same
+ * effect because cond_resched() does not provide RCU-Tasks quiescent states.
+ */
+#define rcu_softirq_qs_periodic(old_ts) \
+do { \
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT) && \
+ time_after(jiffies, (old_ts) + HZ / 10)) { \
+ preempt_disable(); \
+ rcu_softirq_qs(); \
+ preempt_enable(); \
+ (old_ts) = jiffies; \
+ } \
+} while (0)
+
/*
* Infrastructure to implement the synchronize_() primitives in
* TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
@@ -267,6 +331,11 @@ static inline void rcu_lock_acquire(struct lockdep_map *map)
lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_);
}
+static inline void rcu_try_lock_acquire(struct lockdep_map *map)
+{
+ lock_acquire(map, 0, 1, 2, 0, NULL, _THIS_IP_);
+}
+
static inline void rcu_lock_release(struct lockdep_map *map)
{
lock_release(map, _THIS_IP_);
@@ -281,6 +350,7 @@ int rcu_read_lock_any_held(void);
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
# define rcu_lock_acquire(a) do { } while (0)
+# define rcu_try_lock_acquire(a) do { } while (0)
# define rcu_lock_release(a) do { } while (0)
static inline int rcu_read_lock_held(void)
@@ -303,6 +373,11 @@ static inline int rcu_read_lock_any_held(void)
return !preemptible();
}
+static inline int debug_lockdep_rcu_enabled(void)
+{
+ return 0;
+}
+
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
#ifdef CONFIG_PROVE_RCU
@@ -311,25 +386,32 @@ static inline int rcu_read_lock_any_held(void)
* RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met
* @c: condition to check
* @s: informative message
+ *
+ * This checks debug_lockdep_rcu_enabled() before checking (c) to
+ * prevent early boot splats due to lockdep not yet being initialized,
+ * and rechecks it after checking (c) to prevent false-positive splats
+ * due to races with lockdep being disabled. See commit 3066820034b5dd
+ * ("rcu: Reject RCU_LOCKDEP_WARN() false positives") for more detail.
*/
#define RCU_LOCKDEP_WARN(c, s) \
do { \
- static bool __section(".data.unlikely") __warned; \
- if (debug_lockdep_rcu_enabled() && !__warned && (c)) { \
+ static bool __section(".data..unlikely") __warned; \
+ if (debug_lockdep_rcu_enabled() && (c) && \
+ debug_lockdep_rcu_enabled() && !__warned) { \
__warned = true; \
lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
} \
} while (0)
-#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
+#ifndef CONFIG_PREEMPT_RCU
static inline void rcu_preempt_sleep_check(void)
{
RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map),
"Illegal context switch in RCU read-side critical section");
}
-#else /* #ifdef CONFIG_PROVE_RCU */
+#else // #ifndef CONFIG_PREEMPT_RCU
static inline void rcu_preempt_sleep_check(void) { }
-#endif /* #else #ifdef CONFIG_PROVE_RCU */
+#endif // #else // #ifndef CONFIG_PREEMPT_RCU
#define rcu_sleep_check() \
do { \
@@ -341,11 +423,71 @@ static inline void rcu_preempt_sleep_check(void) { }
"Illegal context switch in RCU-sched read-side critical section"); \
} while (0)
+// See RCU_LOCKDEP_WARN() for an explanation of the double call to
+// debug_lockdep_rcu_enabled().
+static inline bool lockdep_assert_rcu_helper(bool c)
+{
+ return debug_lockdep_rcu_enabled() &&
+ (c || !rcu_is_watching() || !rcu_lockdep_current_cpu_online()) &&
+ debug_lockdep_rcu_enabled();
+}
+
+/**
+ * lockdep_assert_in_rcu_read_lock - WARN if not protected by rcu_read_lock()
+ *
+ * Splats if lockdep is enabled and there is no rcu_read_lock() in effect.
+ */
+#define lockdep_assert_in_rcu_read_lock() \
+ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map)))
+
+/**
+ * lockdep_assert_in_rcu_read_lock_bh - WARN if not protected by rcu_read_lock_bh()
+ *
+ * Splats if lockdep is enabled and there is no rcu_read_lock_bh() in effect.
+ * Note that local_bh_disable() and friends do not suffice here, instead an
+ * actual rcu_read_lock_bh() is required.
+ */
+#define lockdep_assert_in_rcu_read_lock_bh() \
+ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_bh_lock_map)))
+
+/**
+ * lockdep_assert_in_rcu_read_lock_sched - WARN if not protected by rcu_read_lock_sched()
+ *
+ * Splats if lockdep is enabled and there is no rcu_read_lock_sched()
+ * in effect. Note that preempt_disable() and friends do not suffice here,
+ * instead an actual rcu_read_lock_sched() is required.
+ */
+#define lockdep_assert_in_rcu_read_lock_sched() \
+ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_sched_lock_map)))
+
+/**
+ * lockdep_assert_in_rcu_reader - WARN if not within some type of RCU reader
+ *
+ * Splats if lockdep is enabled and there is no RCU reader of any
+ * type in effect. Note that regions of code protected by things like
+ * preempt_disable, local_bh_disable(), and local_irq_disable() all qualify
+ * as RCU readers.
+ *
+ * Note that this will never trigger in PREEMPT_NONE or PREEMPT_VOLUNTARY
+ * kernels that are not also built with PREEMPT_COUNT. But if you have
+ * lockdep enabled, you might as well also enable PREEMPT_COUNT.
+ */
+#define lockdep_assert_in_rcu_reader() \
+ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map) && \
+ !lock_is_held(&rcu_bh_lock_map) && \
+ !lock_is_held(&rcu_sched_lock_map) && \
+ preemptible()))
+
#else /* #ifdef CONFIG_PROVE_RCU */
#define RCU_LOCKDEP_WARN(c, s) do { } while (0 && (c))
#define rcu_sleep_check() do { } while (0)
+#define lockdep_assert_in_rcu_read_lock() do { } while (0)
+#define lockdep_assert_in_rcu_read_lock_bh() do { } while (0)
+#define lockdep_assert_in_rcu_read_lock_sched() do { } while (0)
+#define lockdep_assert_in_rcu_reader() do { } while (0)
+
#endif /* #else #ifdef CONFIG_PROVE_RCU */
/*
@@ -363,32 +505,48 @@ static inline void rcu_preempt_sleep_check(void) { }
#define rcu_check_sparse(p, space)
#endif /* #else #ifdef __CHECKER__ */
-#define __rcu_access_pointer(p, space) \
+#define __unrcu_pointer(p, local) \
+({ \
+ typeof(*p) *local = (typeof(*p) *__force)(p); \
+ rcu_check_sparse(p, __rcu); \
+ ((typeof(*p) __force __kernel *)(local)); \
+})
+/**
+ * unrcu_pointer - mark a pointer as not being RCU protected
+ * @p: pointer needing to lose its __rcu property
+ *
+ * Converts @p from an __rcu pointer to a __kernel pointer.
+ * This allows an __rcu pointer to be used with xchg() and friends.
+ */
+#define unrcu_pointer(p) __unrcu_pointer(p, __UNIQUE_ID(rcu))
+
+#define __rcu_access_pointer(p, local, space) \
({ \
- typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \
+ typeof(*p) *local = (typeof(*p) *__force)READ_ONCE(p); \
rcu_check_sparse(p, space); \
- ((typeof(*p) __force __kernel *)(_________p1)); \
+ ((typeof(*p) __force __kernel *)(local)); \
})
-#define __rcu_dereference_check(p, c, space) \
+#define __rcu_dereference_check(p, local, c, space) \
({ \
/* Dependency order vs. p above. */ \
- typeof(*p) *________p1 = (typeof(*p) *__force)READ_ONCE(p); \
+ typeof(*p) *local = (typeof(*p) *__force)READ_ONCE(p); \
RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
rcu_check_sparse(p, space); \
- ((typeof(*p) __force __kernel *)(________p1)); \
+ ((typeof(*p) __force __kernel *)(local)); \
})
-#define __rcu_dereference_protected(p, c, space) \
+#define __rcu_dereference_protected(p, local, c, space) \
({ \
RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \
rcu_check_sparse(p, space); \
((typeof(*p) __force __kernel *)(p)); \
})
-#define rcu_dereference_raw(p) \
+#define __rcu_dereference_raw(p, local) \
({ \
/* Dependency order vs. p above. */ \
- typeof(p) ________p1 = READ_ONCE(p); \
- ((typeof(*p) __force __kernel *)(________p1)); \
+ typeof(p) local = READ_ONCE(p); \
+ ((typeof(*p) __force __kernel *)(local)); \
})
+#define rcu_dereference_raw(p) __rcu_dereference_raw(p, __UNIQUE_ID(rcu))
/**
* RCU_INITIALIZER() - statically initialize an RCU-protected global variable
@@ -467,15 +625,23 @@ do { \
* against NULL. Although rcu_access_pointer() may also be used in cases
* where update-side locks prevent the value of the pointer from changing,
* you should instead use rcu_dereference_protected() for this use case.
+ * Within an RCU read-side critical section, there is little reason to
+ * use rcu_access_pointer().
+ *
+ * It is usually best to test the rcu_access_pointer() return value
+ * directly in order to avoid accidental dereferences being introduced
+ * by later inattentive changes. In other words, assigning the
+ * rcu_access_pointer() return value to a local variable results in an
+ * accident waiting to happen.
*
* It is also permissible to use rcu_access_pointer() when read-side
- * access to the pointer was removed at least one grace period ago, as
- * is the case in the context of the RCU callback that is freeing up
- * the data, or after a synchronize_rcu() returns. This can be useful
- * when tearing down multi-linked structures after a grace period
- * has elapsed.
+ * access to the pointer was removed at least one grace period ago, as is
+ * the case in the context of the RCU callback that is freeing up the data,
+ * or after a synchronize_rcu() returns. This can be useful when tearing
+ * down multi-linked structures after a grace period has elapsed. However,
+ * rcu_dereference_protected() is normally preferred for this use case.
*/
-#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu)
+#define rcu_access_pointer(p) __rcu_access_pointer((p), __UNIQUE_ID(rcu), __rcu)
/**
* rcu_dereference_check() - rcu_dereference with debug checking
@@ -511,17 +677,24 @@ do { \
* annotated as __rcu.
*/
#define rcu_dereference_check(p, c) \
- __rcu_dereference_check((p), (c) || rcu_read_lock_held(), __rcu)
+ __rcu_dereference_check((p), __UNIQUE_ID(rcu), \
+ (c) || rcu_read_lock_held(), __rcu)
/**
* rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
* @p: The pointer to read, prior to dereferencing
* @c: The conditions under which the dereference will take place
*
- * This is the RCU-bh counterpart to rcu_dereference_check().
+ * This is the RCU-bh counterpart to rcu_dereference_check(). However,
+ * please note that starting in v5.0 kernels, vanilla RCU grace periods
+ * wait for local_bh_disable() regions of code in addition to regions of
+ * code demarked by rcu_read_lock() and rcu_read_unlock(). This means
+ * that synchronize_rcu(), call_rcu, and friends all take not only
+ * rcu_read_lock() but also rcu_read_lock_bh() into account.
*/
#define rcu_dereference_bh_check(p, c) \
- __rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu)
+ __rcu_dereference_check((p), __UNIQUE_ID(rcu), \
+ (c) || rcu_read_lock_bh_held(), __rcu)
/**
* rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
@@ -529,9 +702,33 @@ do { \
* @c: The conditions under which the dereference will take place
*
* This is the RCU-sched counterpart to rcu_dereference_check().
+ * However, please note that starting in v5.0 kernels, vanilla RCU grace
+ * periods wait for preempt_disable() regions of code in addition to
+ * regions of code demarked by rcu_read_lock() and rcu_read_unlock().
+ * This means that synchronize_rcu(), call_rcu, and friends all take not
+ * only rcu_read_lock() but also rcu_read_lock_sched() into account.
*/
#define rcu_dereference_sched_check(p, c) \
- __rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \
+ __rcu_dereference_check((p), __UNIQUE_ID(rcu), \
+ (c) || rcu_read_lock_sched_held(), \
+ __rcu)
+
+/**
+ * rcu_dereference_all_check() - rcu_dereference_all with debug checking
+ * @p: The pointer to read, prior to dereferencing
+ * @c: The conditions under which the dereference will take place
+ *
+ * This is similar to rcu_dereference_check(), but allows protection
+ * by all forms of vanilla RCU readers, including preemption disabled,
+ * bh-disabled, and interrupt-disabled regions of code. Note that "vanilla
+ * RCU" excludes SRCU and the various Tasks RCU flavors. Please note
+ * that this macro should not be backported to any Linux-kernel version
+ * preceding v5.0 due to changes in synchronize_rcu() semantics prior
+ * to that version.
+ */
+#define rcu_dereference_all_check(p, c) \
+ __rcu_dereference_check((p), __UNIQUE_ID(rcu), \
+ (c) || rcu_read_lock_any_held(), \
__rcu)
/*
@@ -541,7 +738,8 @@ do { \
* The no-tracing version of rcu_dereference_raw() must not call
* rcu_read_lock_held().
*/
-#define rcu_dereference_raw_check(p) __rcu_dereference_check((p), 1, __rcu)
+#define rcu_dereference_raw_check(p) \
+ __rcu_dereference_check((p), __UNIQUE_ID(rcu), 1, __rcu)
/**
* rcu_dereference_protected() - fetch RCU pointer when updates prevented
@@ -560,7 +758,7 @@ do { \
* but very ugly failures.
*/
#define rcu_dereference_protected(p, c) \
- __rcu_dereference_protected((p), (c), __rcu)
+ __rcu_dereference_protected((p), __UNIQUE_ID(rcu), (c), __rcu)
/**
@@ -588,6 +786,14 @@ do { \
#define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0)
/**
+ * rcu_dereference_all() - fetch RCU-all-protected pointer for dereferencing
+ * @p: The pointer to read, prior to dereferencing
+ *
+ * Makes rcu_dereference_check() do the dirty work.
+ */
+#define rcu_dereference_all(p) rcu_dereference_all_check(p, 0)
+
+/**
* rcu_pointer_handoff() - Hand off a pointer from RCU to other mechanism
* @p: The pointer to hand off
*
@@ -620,6 +826,10 @@ do { \
* sections, invocation of the corresponding RCU callback is deferred
* until after the all the other CPUs exit their critical sections.
*
+ * Both synchronize_rcu() and call_rcu() also wait for regions of code
+ * with preemption disabled, including regions of code with interrupts or
+ * softirqs disabled.
+ *
* Note, however, that RCU callbacks are permitted to run concurrently
* with new RCU read-side critical sections. One way that this can happen
* is via the following sequence of events: (1) CPU 0 enters an RCU
@@ -672,33 +882,11 @@ static __always_inline void rcu_read_lock(void)
/**
* rcu_read_unlock() - marks the end of an RCU read-side critical section.
*
- * In most situations, rcu_read_unlock() is immune from deadlock.
- * However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock()
- * is responsible for deboosting, which it does via rt_mutex_unlock().
- * Unfortunately, this function acquires the scheduler's runqueue and
- * priority-inheritance spinlocks. This means that deadlock could result
- * if the caller of rcu_read_unlock() already holds one of these locks or
- * any lock that is ever acquired while holding them.
- *
- * That said, RCU readers are never priority boosted unless they were
- * preempted. Therefore, one way to avoid deadlock is to make sure
- * that preemption never happens within any RCU read-side critical
- * section whose outermost rcu_read_unlock() is called with one of
- * rt_mutex_unlock()'s locks held. Such preemption can be avoided in
- * a number of ways, for example, by invoking preempt_disable() before
- * critical section's outermost rcu_read_lock().
- *
- * Given that the set of locks acquired by rt_mutex_unlock() might change
- * at any time, a somewhat more future-proofed approach is to make sure
- * that that preemption never happens within any RCU read-side critical
- * section whose outermost rcu_read_unlock() is called with irqs disabled.
- * This approach relies on the fact that rt_mutex_unlock() currently only
- * acquires irq-disabled locks.
- *
- * The second of these two approaches is best in most situations,
- * however, the first approach can also be useful, at least to those
- * developers willing to keep abreast of the set of locks acquired by
- * rt_mutex_unlock().
+ * In almost all situations, rcu_read_unlock() is immune from deadlock.
+ * This deadlock immunity also extends to the scheduler's runqueue
+ * and priority-inheritance spinlocks, courtesy of the quiescent-state
+ * deferral that is carried out when rcu_read_unlock() is invoked with
+ * interrupts disabled.
*
* See rcu_read_lock() for more information.
*/
@@ -706,17 +894,19 @@ static inline void rcu_read_unlock(void)
{
RCU_LOCKDEP_WARN(!rcu_is_watching(),
"rcu_read_unlock() used illegally while idle");
+ rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
__release(RCU);
__rcu_read_unlock();
- rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
}
/**
* rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
*
- * This is equivalent of rcu_read_lock(), but also disables softirqs.
- * Note that anything else that disables softirqs can also serve as
- * an RCU read-side critical section.
+ * This is equivalent to rcu_read_lock(), but also disables softirqs.
+ * Note that anything else that disables softirqs can also serve as an RCU
+ * read-side critical section. However, please note that this equivalence
+ * applies only to v5.0 and later. Before v5.0, rcu_read_lock() and
+ * rcu_read_lock_bh() were unrelated.
*
* Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
* must occur in the same context, for example, it is illegal to invoke
@@ -749,9 +939,12 @@ static inline void rcu_read_unlock_bh(void)
/**
* rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
*
- * This is equivalent of rcu_read_lock(), but disables preemption.
- * Read-side critical sections can also be introduced by anything else
- * that disables preemption, including local_irq_disable() and friends.
+ * This is equivalent to rcu_read_lock(), but also disables preemption.
+ * Read-side critical sections can also be introduced by anything else that
+ * disables preemption, including local_irq_disable() and friends. However,
+ * please note that the equivalence to rcu_read_lock() applies only to
+ * v5.0 and later. Before v5.0, rcu_read_lock() and rcu_read_lock_sched()
+ * were unrelated.
*
* Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
* must occur in the same context, for example, it is illegal to invoke
@@ -795,6 +988,20 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
preempt_enable_notrace();
}
+static __always_inline void rcu_read_lock_dont_migrate(void)
+{
+ if (IS_ENABLED(CONFIG_PREEMPT_RCU))
+ migrate_disable();
+ rcu_read_lock();
+}
+
+static inline void rcu_read_unlock_migrate(void)
+{
+ rcu_read_unlock();
+ if (IS_ENABLED(CONFIG_PREEMPT_RCU))
+ migrate_enable();
+}
+
/**
* RCU_INIT_POINTER() - initialize an RCU protected pointer
* @p: The pointer to be initialized.
@@ -849,80 +1056,74 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
#define RCU_POINTER_INITIALIZER(p, v) \
.p = RCU_INITIALIZER(v)
-/*
- * Does the specified offset indicate that the corresponding rcu_head
- * structure can be handled by kvfree_rcu()?
- */
-#define __is_kvfree_rcu_offset(offset) ((offset) < 4096)
-
/**
* kfree_rcu() - kfree an object after a grace period.
- * @ptr: pointer to kfree for both single- and double-argument invocations.
- * @rhf: the name of the struct rcu_head within the type of @ptr,
- * but only for double-argument invocations.
+ * @ptr: pointer to kfree for double-argument invocations.
+ * @rhf: the name of the struct rcu_head within the type of @ptr.
*
* Many rcu callbacks functions just call kfree() on the base structure.
* These functions are trivial, but their size adds up, and furthermore
* when they are used in a kernel module, that module must invoke the
* high-latency rcu_barrier() function at module-unload time.
*
- * The kfree_rcu() function handles this issue. Rather than encoding a
- * function address in the embedded rcu_head structure, kfree_rcu() instead
- * encodes the offset of the rcu_head structure within the base structure.
- * Because the functions are not allowed in the low-order 4096 bytes of
- * kernel virtual memory, offsets up to 4095 bytes can be accommodated.
+ * The kfree_rcu() function handles this issue. In order to have a universal
+ * callback function handling different offsets of rcu_head, the callback needs
+ * to determine the starting address of the freed object, which can be a large
+ * kmalloc or vmalloc allocation. To allow simply aligning the pointer down to
+ * page boundary for those, only offsets up to 4095 bytes can be accommodated.
* If the offset is larger than 4095 bytes, a compile-time error will
* be generated in kvfree_rcu_arg_2(). If this error is triggered, you can
* either fall back to use of call_rcu() or rearrange the structure to
* position the rcu_head structure into the first 4096 bytes.
*
- * Note that the allowable offset might decrease in the future, for example,
- * to allow something like kmem_cache_free_rcu().
+ * The object to be freed can be allocated either by kmalloc() or
+ * kmem_cache_alloc().
+ *
+ * Note that the allowable offset might decrease in the future.
*
* The BUILD_BUG_ON check must not involve any function calls, hence the
* checks are done in macros here.
*/
-#define kfree_rcu(ptr, rhf...) kvfree_rcu(ptr, ## rhf)
+#define kfree_rcu(ptr, rhf) kvfree_rcu_arg_2(ptr, rhf)
+#define kvfree_rcu(ptr, rhf) kvfree_rcu_arg_2(ptr, rhf)
/**
- * kvfree_rcu() - kvfree an object after a grace period.
- *
- * This macro consists of one or two arguments and it is
- * based on whether an object is head-less or not. If it
- * has a head then a semantic stays the same as it used
- * to be before:
- *
- * kvfree_rcu(ptr, rhf);
- *
- * where @ptr is a pointer to kvfree(), @rhf is the name
- * of the rcu_head structure within the type of @ptr.
+ * kfree_rcu_mightsleep() - kfree an object after a grace period.
+ * @ptr: pointer to kfree for single-argument invocations.
*
* When it comes to head-less variant, only one argument
* is passed and that is just a pointer which has to be
* freed after a grace period. Therefore the semantic is
*
- * kvfree_rcu(ptr);
+ * kfree_rcu_mightsleep(ptr);
*
- * where @ptr is a pointer to kvfree().
+ * where @ptr is the pointer to be freed by kvfree().
*
* Please note, head-less way of freeing is permitted to
* use from a context that has to follow might_sleep()
* annotation. Otherwise, please switch and embed the
* rcu_head structure within the type of @ptr.
*/
-#define kvfree_rcu(...) KVFREE_GET_MACRO(__VA_ARGS__, \
- kvfree_rcu_arg_2, kvfree_rcu_arg_1)(__VA_ARGS__)
+#define kfree_rcu_mightsleep(ptr) kvfree_rcu_arg_1(ptr)
+#define kvfree_rcu_mightsleep(ptr) kvfree_rcu_arg_1(ptr)
+
+/*
+ * In mm/slab_common.c, no suitable header to include here.
+ */
+void kvfree_call_rcu(struct rcu_head *head, void *ptr);
-#define KVFREE_GET_MACRO(_1, _2, NAME, ...) NAME
+/*
+ * The BUILD_BUG_ON() makes sure the rcu_head offset can be handled. See the
+ * comment of kfree_rcu() for details.
+ */
#define kvfree_rcu_arg_2(ptr, rhf) \
do { \
typeof (ptr) ___p = (ptr); \
\
- if (___p) { \
- BUILD_BUG_ON(!__is_kvfree_rcu_offset(offsetof(typeof(*(ptr)), rhf))); \
- kvfree_call_rcu(&((___p)->rhf), (rcu_callback_t)(unsigned long) \
- (offsetof(typeof(*(ptr)), rhf))); \
- } \
+ if (___p) { \
+ BUILD_BUG_ON(offsetof(typeof(*(ptr)), rhf) >= 4096); \
+ kvfree_call_rcu(&((___p)->rhf), (void *) (___p)); \
+ } \
} while (0)
#define kvfree_rcu_arg_1(ptr) \
@@ -930,7 +1131,7 @@ do { \
typeof(ptr) ___p = (ptr); \
\
if (___p) \
- kvfree_call_rcu(NULL, (rcu_callback_t) (___p)); \
+ kvfree_call_rcu(NULL, (void *) (___p)); \
} while (0)
/*
@@ -991,4 +1192,18 @@ rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
extern int rcu_expedited;
extern int rcu_normal;
+DEFINE_LOCK_GUARD_0(rcu,
+ do {
+ rcu_read_lock();
+ /*
+ * sparse doesn't call the cleanup function,
+ * so just release immediately and don't track
+ * the context. We don't need to anyway, since
+ * the whole point of the guard is to not need
+ * the explicit unlock.
+ */
+ __release(RCU);
+ } while (0),
+ rcu_read_unlock())
+
#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcupdate_trace.h b/include/linux/rcupdate_trace.h
index 86c8f6c98412..e6c44eb428ab 100644
--- a/include/linux/rcupdate_trace.h
+++ b/include/linux/rcupdate_trace.h
@@ -10,6 +10,7 @@
#include <linux/sched.h>
#include <linux/rcupdate.h>
+#include <linux/cleanup.h>
extern struct lockdep_map rcu_trace_lock_map;
@@ -31,7 +32,7 @@ static inline int rcu_read_lock_trace_held(void)
#ifdef CONFIG_TASKS_TRACE_RCU
-void rcu_read_unlock_trace_special(struct task_struct *t, int nesting);
+void rcu_read_unlock_trace_special(struct task_struct *t);
/**
* rcu_read_lock_trace - mark beginning of RCU-trace read-side critical section
@@ -75,17 +76,19 @@ static inline void rcu_read_unlock_trace(void)
nesting = READ_ONCE(t->trc_reader_nesting) - 1;
barrier(); // Critical section before disabling.
// Disable IPI-based setting of .need_qs.
- WRITE_ONCE(t->trc_reader_nesting, INT_MIN);
+ WRITE_ONCE(t->trc_reader_nesting, INT_MIN + nesting);
if (likely(!READ_ONCE(t->trc_reader_special.s)) || nesting) {
WRITE_ONCE(t->trc_reader_nesting, nesting);
return; // We assume shallow reader nesting.
}
- rcu_read_unlock_trace_special(t, nesting);
+ WARN_ON_ONCE(nesting != 0);
+ rcu_read_unlock_trace_special(t);
}
void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
void synchronize_rcu_tasks_trace(void);
void rcu_barrier_tasks_trace(void);
+struct task_struct *get_rcu_tasks_trace_gp_kthread(void);
#else
/*
* The BPF JIT forms these addresses even when it doesn't call these
@@ -96,4 +99,8 @@ static inline void rcu_read_lock_trace(void) { BUG(); }
static inline void rcu_read_unlock_trace(void) { BUG(); }
#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
+DEFINE_LOCK_GUARD_0(rcu_tasks_trace,
+ rcu_read_lock_trace(),
+ rcu_read_unlock_trace())
+
#endif /* __LINUX_RCUPDATE_TRACE_H */
diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h
index 699b938358bf..4c92d4291cce 100644
--- a/include/linux/rcupdate_wait.h
+++ b/include/linux/rcupdate_wait.h
@@ -8,6 +8,7 @@
#include <linux/rcupdate.h>
#include <linux/completion.h>
+#include <linux/sched.h>
/*
* Structure allowing asynchronous waiting on RCU.
@@ -15,21 +16,24 @@
struct rcu_synchronize {
struct rcu_head head;
struct completion completion;
+
+ /* This is for debugging. */
+ struct rcu_gp_oldstate oldstate;
};
void wakeme_after_rcu(struct rcu_head *head);
-void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
+void __wait_rcu_gp(bool checktiny, unsigned int state, int n, call_rcu_func_t *crcu_array,
struct rcu_synchronize *rs_array);
-#define _wait_rcu_gp(checktiny, ...) \
-do { \
- call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \
- struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \
- __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \
- __crcu_array, __rs_array); \
+#define _wait_rcu_gp(checktiny, state, ...) \
+do { \
+ call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \
+ struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \
+ __wait_rcu_gp(checktiny, state, ARRAY_SIZE(__crcu_array), __crcu_array, __rs_array); \
} while (0)
-#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__)
+#define wait_rcu_gp(...) _wait_rcu_gp(false, TASK_UNINTERRUPTIBLE, __VA_ARGS__)
+#define wait_rcu_gp_state(state, ...) _wait_rcu_gp(false, state, __VA_ARGS__)
/**
* synchronize_rcu_mult - Wait concurrently for multiple grace periods
@@ -42,12 +46,37 @@ do { \
* call_srcu() function, with this wrapper supplying the pointer to the
* corresponding srcu_struct.
*
+ * Note that call_rcu_hurry() should be used instead of call_rcu()
+ * because in kernels built with CONFIG_RCU_LAZY=y the delay between the
+ * invocation of call_rcu() and that of the corresponding RCU callback
+ * can be multiple seconds.
+ *
* The first argument tells Tiny RCU's _wait_rcu_gp() not to
* bother waiting for RCU. The reason for this is because anywhere
* synchronize_rcu_mult() can be called is automatically already a full
* grace period.
*/
#define synchronize_rcu_mult(...) \
- _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__)
+ _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), TASK_UNINTERRUPTIBLE, __VA_ARGS__)
+
+static inline void cond_resched_rcu(void)
+{
+#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
+ rcu_read_unlock();
+ cond_resched();
+ rcu_read_lock();
+#endif
+}
+
+// Has the current task blocked within its current RCU read-side
+// critical section?
+static inline bool has_rcu_reader_blocked(void)
+{
+#ifdef CONFIG_PREEMPT_RCU
+ return !list_empty(&current->rcu_node_entry);
+#else
+ return false;
+#endif
+}
#endif /* _LINUX_SCHED_RCUPDATE_WAIT_H */
diff --git a/include/linux/rcuref.h b/include/linux/rcuref.h
new file mode 100644
index 000000000000..2fb2af6d9824
--- /dev/null
+++ b/include/linux/rcuref.h
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _LINUX_RCUREF_H
+#define _LINUX_RCUREF_H
+
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/limits.h>
+#include <linux/lockdep.h>
+#include <linux/preempt.h>
+#include <linux/rcupdate.h>
+
+#define RCUREF_ONEREF 0x00000000U
+#define RCUREF_MAXREF 0x7FFFFFFFU
+#define RCUREF_SATURATED 0xA0000000U
+#define RCUREF_RELEASED 0xC0000000U
+#define RCUREF_DEAD 0xE0000000U
+#define RCUREF_NOREF 0xFFFFFFFFU
+
+/**
+ * rcuref_init - Initialize a rcuref reference count with the given reference count
+ * @ref: Pointer to the reference count
+ * @cnt: The initial reference count typically '1'
+ */
+static inline void rcuref_init(rcuref_t *ref, unsigned int cnt)
+{
+ atomic_set(&ref->refcnt, cnt - 1);
+}
+
+/**
+ * rcuref_read - Read the number of held reference counts of a rcuref
+ * @ref: Pointer to the reference count
+ *
+ * Return: The number of held references (0 ... N). The value 0 does not
+ * indicate that it is safe to schedule the object, protected by this reference
+ * counter, for deconstruction.
+ * If you want to know if the reference counter has been marked DEAD (as
+ * signaled by rcuref_put()) please use rcuread_is_dead().
+ */
+static inline unsigned int rcuref_read(rcuref_t *ref)
+{
+ unsigned int c = atomic_read(&ref->refcnt);
+
+ /* Return 0 if within the DEAD zone. */
+ return c >= RCUREF_RELEASED ? 0 : c + 1;
+}
+
+/**
+ * rcuref_is_dead - Check if the rcuref has been already marked dead
+ * @ref: Pointer to the reference count
+ *
+ * Return: True if the object has been marked DEAD. This signals that a previous
+ * invocation of rcuref_put() returned true on this reference counter meaning
+ * the protected object can safely be scheduled for deconstruction.
+ * Otherwise, returns false.
+ */
+static inline bool rcuref_is_dead(rcuref_t *ref)
+{
+ unsigned int c = atomic_read(&ref->refcnt);
+
+ return (c >= RCUREF_RELEASED) && (c < RCUREF_NOREF);
+}
+
+extern __must_check bool rcuref_get_slowpath(rcuref_t *ref);
+
+/**
+ * rcuref_get - Acquire one reference on a rcuref reference count
+ * @ref: Pointer to the reference count
+ *
+ * Similar to atomic_inc_not_zero() but saturates at RCUREF_MAXREF.
+ *
+ * Provides no memory ordering, it is assumed the caller has guaranteed the
+ * object memory to be stable (RCU, etc.). It does provide a control dependency
+ * and thereby orders future stores. See documentation in lib/rcuref.c
+ *
+ * Return:
+ * False if the attempt to acquire a reference failed. This happens
+ * when the last reference has been put already
+ *
+ * True if a reference was successfully acquired
+ */
+static inline __must_check bool rcuref_get(rcuref_t *ref)
+{
+ /*
+ * Unconditionally increase the reference count. The saturation and
+ * dead zones provide enough tolerance for this.
+ */
+ if (likely(!atomic_add_negative_relaxed(1, &ref->refcnt)))
+ return true;
+
+ /* Handle the cases inside the saturation and dead zones */
+ return rcuref_get_slowpath(ref);
+}
+
+extern __must_check bool rcuref_put_slowpath(rcuref_t *ref, unsigned int cnt);
+
+/*
+ * Internal helper. Do not invoke directly.
+ */
+static __always_inline __must_check bool __rcuref_put(rcuref_t *ref)
+{
+ int cnt;
+
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && preemptible(),
+ "suspicious rcuref_put_rcusafe() usage");
+ /*
+ * Unconditionally decrease the reference count. The saturation and
+ * dead zones provide enough tolerance for this.
+ */
+ cnt = atomic_sub_return_release(1, &ref->refcnt);
+ if (likely(cnt >= 0))
+ return false;
+
+ /*
+ * Handle the last reference drop and cases inside the saturation
+ * and dead zones.
+ */
+ return rcuref_put_slowpath(ref, cnt);
+}
+
+/**
+ * rcuref_put_rcusafe -- Release one reference for a rcuref reference count RCU safe
+ * @ref: Pointer to the reference count
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides an acquire ordering on success such that free()
+ * must come after.
+ *
+ * Can be invoked from contexts, which guarantee that no grace period can
+ * happen which would free the object concurrently if the decrement drops
+ * the last reference and the slowpath races against a concurrent get() and
+ * put() pair. rcu_read_lock()'ed and atomic contexts qualify.
+ *
+ * Return:
+ * True if this was the last reference with no future references
+ * possible. This signals the caller that it can safely release the
+ * object which is protected by the reference counter.
+ *
+ * False if there are still active references or the put() raced
+ * with a concurrent get()/put() pair. Caller is not allowed to
+ * release the protected object.
+ */
+static inline __must_check bool rcuref_put_rcusafe(rcuref_t *ref)
+{
+ return __rcuref_put(ref);
+}
+
+/**
+ * rcuref_put -- Release one reference for a rcuref reference count
+ * @ref: Pointer to the reference count
+ *
+ * Can be invoked from any context.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides an acquire ordering on success such that free()
+ * must come after.
+ *
+ * Return:
+ *
+ * True if this was the last reference with no future references
+ * possible. This signals the caller that it can safely schedule the
+ * object, which is protected by the reference counter, for
+ * deconstruction.
+ *
+ * False if there are still active references or the put() raced
+ * with a concurrent get()/put() pair. Caller is not allowed to
+ * deconstruct the protected object.
+ */
+static inline __must_check bool rcuref_put(rcuref_t *ref)
+{
+ bool released;
+
+ preempt_disable();
+ released = __rcuref_put(ref);
+ preempt_enable();
+ return released;
+}
+
+#endif
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 35e0be326ffc..f519cd680228 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -14,44 +14,80 @@
#include <asm/param.h> /* for HZ */
-/* Never flag non-existent other CPUs! */
-static inline bool rcu_eqs_special_set(int cpu) { return false; }
+struct rcu_gp_oldstate {
+ unsigned long rgos_norm;
+};
+
+// Maximum number of rcu_gp_oldstate values corresponding to
+// not-yet-completed RCU grace periods.
+#define NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE 2
+
+/*
+ * Are the two oldstate values the same? See the Tree RCU version for
+ * docbook header.
+ */
+static inline bool same_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp1,
+ struct rcu_gp_oldstate *rgosp2)
+{
+ return rgosp1->rgos_norm == rgosp2->rgos_norm;
+}
unsigned long get_state_synchronize_rcu(void);
+
+static inline void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
+{
+ rgosp->rgos_norm = get_state_synchronize_rcu();
+}
+
unsigned long start_poll_synchronize_rcu(void);
+
+static inline void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
+{
+ rgosp->rgos_norm = start_poll_synchronize_rcu();
+}
+
bool poll_state_synchronize_rcu(unsigned long oldstate);
+static inline bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
+{
+ return poll_state_synchronize_rcu(rgosp->rgos_norm);
+}
+
static inline void cond_synchronize_rcu(unsigned long oldstate)
{
might_sleep();
}
-extern void rcu_barrier(void);
+static inline void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
+{
+ cond_synchronize_rcu(rgosp->rgos_norm);
+}
-static inline void synchronize_rcu_expedited(void)
+static inline unsigned long start_poll_synchronize_rcu_expedited(void)
{
- synchronize_rcu();
+ return start_poll_synchronize_rcu();
}
-/*
- * Add one more declaration of kvfree() here. It is
- * not so straight forward to just include <linux/mm.h>
- * where it is defined due to getting many compile
- * errors caused by that include.
- */
-extern void kvfree(const void *addr);
+static inline void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
+{
+ rgosp->rgos_norm = start_poll_synchronize_rcu_expedited();
+}
+
+static inline void cond_synchronize_rcu_expedited(unsigned long oldstate)
+{
+ cond_synchronize_rcu(oldstate);
+}
-static inline void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
+static inline void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
{
- if (head) {
- call_rcu(head, func);
- return;
- }
+ cond_synchronize_rcu_expedited(rgosp->rgos_norm);
+}
- // kvfree_rcu(one_arg) call.
- might_sleep();
+extern void rcu_barrier(void);
+
+static inline void synchronize_rcu_expedited(void)
+{
synchronize_rcu();
- kvfree((void *) func);
}
void rcu_qs(void);
@@ -67,46 +103,32 @@ static inline void rcu_softirq_qs(void)
rcu_tasks_qs(current, (preempt)); \
} while (0)
-static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
+static inline int rcu_needs_cpu(void)
{
- *nextevt = KTIME_MAX;
return 0;
}
+static inline void rcu_request_urgent_qs_task(struct task_struct *t) { }
+
/*
* Take advantage of the fact that there is only one CPU, which
* allows us to ignore virtualization-based context switches.
*/
-static inline void rcu_virt_note_context_switch(int cpu) { }
+static inline void rcu_virt_note_context_switch(void) { }
static inline void rcu_cpu_stall_reset(void) { }
static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; }
-static inline void rcu_idle_enter(void) { }
-static inline void rcu_idle_exit(void) { }
-static inline void rcu_irq_enter(void) { }
-static inline void rcu_irq_exit_irqson(void) { }
-static inline void rcu_irq_enter_irqson(void) { }
-static inline void rcu_irq_exit(void) { }
-static inline void rcu_irq_exit_preempt(void) { }
static inline void rcu_irq_exit_check_preempt(void) { }
-#define rcu_is_idle_cpu(cpu) \
- (is_idle_task(current) && !in_nmi() && !in_irq() && !in_serving_softirq())
static inline void exit_rcu(void) { }
static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t)
{
return false;
}
static inline void rcu_preempt_deferred_qs(struct task_struct *t) { }
-#ifdef CONFIG_SRCU
void rcu_scheduler_starting(void);
-#else /* #ifndef CONFIG_SRCU */
-static inline void rcu_scheduler_starting(void) { }
-#endif /* #else #ifndef CONFIG_SRCU */
static inline void rcu_end_inkernel_boot(void) { }
static inline bool rcu_inkernel_boot_has_ended(void) { return true; }
static inline bool rcu_is_watching(void) { return true; }
-static inline void rcu_momentary_dyntick_idle(void) { }
-static inline void kfree_rcu_scheduler_running(void) { }
-static inline bool rcu_gp_might_be_stalled(void) { return false; }
+static inline void rcu_momentary_eqs(void) { }
/* Avoid RCU read-side critical sections leaking across. */
static inline void rcu_all_qs(void) { barrier(); }
@@ -117,6 +139,6 @@ static inline void rcu_all_qs(void) { barrier(); }
#define rcutree_offline_cpu NULL
#define rcutree_dead_cpu NULL
#define rcutree_dying_cpu NULL
-static inline void rcu_cpu_starting(unsigned int cpu) { }
+static inline void rcutree_report_cpu_starting(unsigned int cpu) { }
#endif /* __LINUX_RCUTINY_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index b89b54130f49..9d2d7bd251d4 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -19,40 +19,70 @@
void rcu_softirq_qs(void);
void rcu_note_context_switch(bool preempt);
-int rcu_needs_cpu(u64 basem, u64 *nextevt);
+int rcu_needs_cpu(void);
void rcu_cpu_stall_reset(void);
+void rcu_request_urgent_qs_task(struct task_struct *t);
/*
* Note a virtualization-based context switch. This is simply a
* wrapper around rcu_note_context_switch(), which allows TINY_RCU
* to save a few bytes. The caller must have disabled interrupts.
*/
-static inline void rcu_virt_note_context_switch(int cpu)
+static inline void rcu_virt_note_context_switch(void)
{
rcu_note_context_switch(false);
}
void synchronize_rcu_expedited(void);
-void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
void rcu_barrier(void);
-bool rcu_eqs_special_set(int cpu);
-void rcu_momentary_dyntick_idle(void);
-void kfree_rcu_scheduler_running(void);
-bool rcu_gp_might_be_stalled(void);
+void rcu_momentary_eqs(void);
+
+struct rcu_gp_oldstate {
+ unsigned long rgos_norm;
+ unsigned long rgos_exp;
+};
+
+// Maximum number of rcu_gp_oldstate values corresponding to
+// not-yet-completed RCU grace periods.
+#define NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE 4
+
+/**
+ * same_state_synchronize_rcu_full - Are two old-state values identical?
+ * @rgosp1: First old-state value.
+ * @rgosp2: Second old-state value.
+ *
+ * The two old-state values must have been obtained from either
+ * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(),
+ * or get_completed_synchronize_rcu_full(). Returns @true if the two
+ * values are identical and @false otherwise. This allows structures
+ * whose lifetimes are tracked by old-state values to push these values
+ * to a list header, allowing those structures to be slightly smaller.
+ *
+ * Note that equality is judged on a bitwise basis, so that an
+ * @rcu_gp_oldstate structure with an already-completed state in one field
+ * will compare not-equal to a structure with an already-completed state
+ * in the other field. After all, the @rcu_gp_oldstate structure is opaque
+ * so how did such a situation come to pass in the first place?
+ */
+static inline bool same_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp1,
+ struct rcu_gp_oldstate *rgosp2)
+{
+ return rgosp1->rgos_norm == rgosp2->rgos_norm && rgosp1->rgos_exp == rgosp2->rgos_exp;
+}
+
+unsigned long start_poll_synchronize_rcu_expedited(void);
+void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp);
+void cond_synchronize_rcu_expedited(unsigned long oldstate);
+void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp);
unsigned long get_state_synchronize_rcu(void);
+void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
unsigned long start_poll_synchronize_rcu(void);
+void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
bool poll_state_synchronize_rcu(unsigned long oldstate);
+bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
void cond_synchronize_rcu(unsigned long oldstate);
-
-void rcu_idle_enter(void);
-void rcu_idle_exit(void);
-void rcu_irq_enter(void);
-void rcu_irq_exit(void);
-void rcu_irq_exit_preempt(void);
-void rcu_irq_enter_irqson(void);
-void rcu_irq_exit_irqson(void);
-bool rcu_is_idle_cpu(int cpu);
+void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
#ifdef CONFIG_PROVE_RCU
void rcu_irq_exit_check_preempt(void);
@@ -60,23 +90,38 @@ void rcu_irq_exit_check_preempt(void);
static inline void rcu_irq_exit_check_preempt(void) { }
#endif
+struct task_struct;
+void rcu_preempt_deferred_qs(struct task_struct *t);
+
void exit_rcu(void);
void rcu_scheduler_starting(void);
-extern int rcu_scheduler_active __read_mostly;
+extern int rcu_scheduler_active;
void rcu_end_inkernel_boot(void);
bool rcu_inkernel_boot_has_ended(void);
bool rcu_is_watching(void);
-#ifndef CONFIG_PREEMPTION
+#ifndef CONFIG_PREEMPT_RCU
void rcu_all_qs(void);
#endif
/* RCUtree hotplug events */
int rcutree_prepare_cpu(unsigned int cpu);
int rcutree_online_cpu(unsigned int cpu);
-int rcutree_offline_cpu(unsigned int cpu);
+void rcutree_report_cpu_starting(unsigned int cpu);
+
+#ifdef CONFIG_HOTPLUG_CPU
int rcutree_dead_cpu(unsigned int cpu);
int rcutree_dying_cpu(unsigned int cpu);
-void rcu_cpu_starting(unsigned int cpu);
+int rcutree_offline_cpu(unsigned int cpu);
+#else
+#define rcutree_dead_cpu NULL
+#define rcutree_dying_cpu NULL
+#define rcutree_offline_cpu NULL
+#endif
+
+void rcutree_migrate_callbacks(int cpu);
+
+/* Called from hotplug and also arm64 early secondary boot failure */
+void rcutree_report_cpu_dead(void);
#endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/rcuwait.h b/include/linux/rcuwait.h
index 61c56cca95c4..9ad134a04b41 100644
--- a/include/linux/rcuwait.h
+++ b/include/linux/rcuwait.h
@@ -4,18 +4,7 @@
#include <linux/rcupdate.h>
#include <linux/sched/signal.h>
-
-/*
- * rcuwait provides a way of blocking and waking up a single
- * task in an rcu-safe manner.
- *
- * The only time @task is non-nil is when a user is blocked (or
- * checking if it needs to) on a condition, and reset as soon as we
- * know that the condition has succeeded and are awoken.
- */
-struct rcuwait {
- struct task_struct __rcu *task;
-};
+#include <linux/types.h>
#define __RCUWAIT_INITIALIZER(name) \
{ .task = NULL, }
@@ -47,15 +36,11 @@ static inline void prepare_to_rcuwait(struct rcuwait *w)
rcu_assign_pointer(w->task, current);
}
-static inline void finish_rcuwait(struct rcuwait *w)
-{
- rcu_assign_pointer(w->task, NULL);
- __set_current_state(TASK_RUNNING);
-}
+extern void finish_rcuwait(struct rcuwait *w);
-#define rcuwait_wait_event(w, condition, state) \
+#define ___rcuwait_wait_event(w, condition, state, ret, cmd) \
({ \
- int __ret = 0; \
+ long __ret = ret; \
prepare_to_rcuwait(w); \
for (;;) { \
/* \
@@ -71,10 +56,27 @@ static inline void finish_rcuwait(struct rcuwait *w)
break; \
} \
\
- schedule(); \
+ cmd; \
} \
finish_rcuwait(w); \
__ret; \
})
+#define rcuwait_wait_event(w, condition, state) \
+ ___rcuwait_wait_event(w, condition, state, 0, schedule())
+
+#define __rcuwait_wait_event_timeout(w, condition, state, timeout) \
+ ___rcuwait_wait_event(w, ___wait_cond_timeout(condition), \
+ state, timeout, \
+ __ret = schedule_timeout(__ret))
+
+#define rcuwait_wait_event_timeout(w, condition, state, timeout) \
+({ \
+ long __ret = timeout; \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __rcuwait_wait_event_timeout(w, condition, \
+ state, timeout); \
+ __ret; \
+})
+
#endif /* _LINUX_RCUWAIT_H_ */
diff --git a/include/linux/rcuwait_api.h b/include/linux/rcuwait_api.h
new file mode 100644
index 000000000000..f962e28544dd
--- /dev/null
+++ b/include/linux/rcuwait_api.h
@@ -0,0 +1 @@
+#include <linux/rcuwait.h>
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index 3734cd8f38a8..aa08c3bbbf59 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -7,6 +7,7 @@
#include <uapi/linux/reboot.h>
struct device;
+struct sys_off_handler;
#define SYS_DOWN 0x0001 /* Notify of system down */
#define SYS_RESTART SYS_DOWN
@@ -62,6 +63,106 @@ extern void machine_shutdown(void);
struct pt_regs;
extern void machine_crash_shutdown(struct pt_regs *);
+void do_kernel_power_off(void);
+
+/*
+ * sys-off handler API.
+ */
+
+/*
+ * Standard sys-off priority levels. Users are expected to set priorities
+ * relative to the standard levels.
+ *
+ * SYS_OFF_PRIO_PLATFORM: Use this for platform-level handlers.
+ *
+ * SYS_OFF_PRIO_LOW: Use this for handler of last resort.
+ *
+ * SYS_OFF_PRIO_DEFAULT: Use this for normal handlers.
+ *
+ * SYS_OFF_PRIO_HIGH: Use this for higher priority handlers.
+ *
+ * SYS_OFF_PRIO_FIRMWARE: Use this if handler uses firmware call.
+ */
+#define SYS_OFF_PRIO_PLATFORM -256
+#define SYS_OFF_PRIO_LOW -128
+#define SYS_OFF_PRIO_DEFAULT 0
+#define SYS_OFF_PRIO_HIGH 192
+#define SYS_OFF_PRIO_FIRMWARE 224
+
+enum sys_off_mode {
+ /**
+ * @SYS_OFF_MODE_POWER_OFF_PREPARE:
+ *
+ * Handlers prepare system to be powered off. Handlers are
+ * allowed to sleep.
+ */
+ SYS_OFF_MODE_POWER_OFF_PREPARE,
+
+ /**
+ * @SYS_OFF_MODE_POWER_OFF:
+ *
+ * Handlers power-off system. Handlers are disallowed to sleep.
+ */
+ SYS_OFF_MODE_POWER_OFF,
+
+ /**
+ * @SYS_OFF_MODE_RESTART_PREPARE:
+ *
+ * Handlers prepare system to be restarted. Handlers are
+ * allowed to sleep.
+ */
+ SYS_OFF_MODE_RESTART_PREPARE,
+
+ /**
+ * @SYS_OFF_MODE_RESTART:
+ *
+ * Handlers restart system. Handlers are disallowed to sleep.
+ */
+ SYS_OFF_MODE_RESTART,
+};
+
+/**
+ * struct sys_off_data - sys-off callback argument
+ *
+ * @mode: Mode ID. Currently used only by the sys-off restart mode,
+ * see enum reboot_mode for the available modes.
+ * @cb_data: User's callback data.
+ * @cmd: Command string. Currently used only by the sys-off restart mode,
+ * NULL otherwise.
+ * @dev: Device of the sys-off handler. Only if known (devm_register_*),
+ * NULL otherwise.
+ */
+struct sys_off_data {
+ int mode;
+ void *cb_data;
+ const char *cmd;
+ struct device *dev;
+};
+
+struct sys_off_handler *
+register_sys_off_handler(enum sys_off_mode mode,
+ int priority,
+ int (*callback)(struct sys_off_data *data),
+ void *cb_data);
+void unregister_sys_off_handler(struct sys_off_handler *handler);
+
+int devm_register_sys_off_handler(struct device *dev,
+ enum sys_off_mode mode,
+ int priority,
+ int (*callback)(struct sys_off_data *data),
+ void *cb_data);
+
+int devm_register_power_off_handler(struct device *dev,
+ int (*callback)(struct sys_off_data *data),
+ void *cb_data);
+
+int devm_register_restart_handler(struct device *dev,
+ int (*callback)(struct sys_off_data *data),
+ void *cb_data);
+
+int register_platform_power_off(void (*power_off)(void));
+void unregister_platform_power_off(void (*power_off)(void));
+
/*
* Architecture independent implemenations of sys_reboot commands.
*/
@@ -70,16 +171,46 @@ extern void kernel_restart_prepare(char *cmd);
extern void kernel_restart(char *cmd);
extern void kernel_halt(void);
extern void kernel_power_off(void);
+extern bool kernel_can_power_off(void);
-extern int C_A_D; /* for sysctl */
void ctrl_alt_del(void);
-#define POWEROFF_CMD_PATH_LEN 256
-extern char poweroff_cmd[POWEROFF_CMD_PATH_LEN];
-
extern void orderly_poweroff(bool force);
extern void orderly_reboot(void);
+/**
+ * enum hw_protection_action - Hardware protection action
+ *
+ * @HWPROT_ACT_DEFAULT:
+ * The default action should be taken. This is HWPROT_ACT_SHUTDOWN
+ * by default, but can be overridden.
+ * @HWPROT_ACT_SHUTDOWN:
+ * The system should be shut down (powered off) for HW protection.
+ * @HWPROT_ACT_REBOOT:
+ * The system should be rebooted for HW protection.
+ */
+enum hw_protection_action { HWPROT_ACT_DEFAULT, HWPROT_ACT_SHUTDOWN, HWPROT_ACT_REBOOT };
+
+void __hw_protection_trigger(const char *reason, int ms_until_forced,
+ enum hw_protection_action action);
+
+/**
+ * hw_protection_trigger - Trigger default emergency system hardware protection action
+ *
+ * @reason: Reason of emergency shutdown or reboot to be printed.
+ * @ms_until_forced: Time to wait for orderly shutdown or reboot before
+ * triggering it. Negative value disables the forced
+ * shutdown or reboot.
+ *
+ * Initiate an emergency system shutdown or reboot in order to protect
+ * hardware from further damage. The exact action taken is controllable at
+ * runtime and defaults to shutdown.
+ */
+static inline void hw_protection_trigger(const char *reason, int ms_until_forced)
+{
+ __hw_protection_trigger(reason, ms_until_forced, HWPROT_ACT_DEFAULT);
+}
+
/*
* Emergency restart, callable from an interrupt handler.
*/
diff --git a/include/linux/ref_tracker.h b/include/linux/ref_tracker.h
new file mode 100644
index 000000000000..d10563afd91c
--- /dev/null
+++ b/include/linux/ref_tracker.h
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#ifndef _LINUX_REF_TRACKER_H
+#define _LINUX_REF_TRACKER_H
+#include <linux/refcount.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/stackdepot.h>
+
+#define __ostream_printf __printf(2, 3)
+
+struct ref_tracker;
+
+struct ref_tracker_dir {
+#ifdef CONFIG_REF_TRACKER
+ spinlock_t lock;
+ unsigned int quarantine_avail;
+ refcount_t untracked;
+ refcount_t no_tracker;
+ bool dead;
+ struct list_head list; /* List of active trackers */
+ struct list_head quarantine; /* List of dead trackers */
+ const char *class; /* object classname */
+#endif
+};
+
+#ifdef CONFIG_REF_TRACKER
+
+#ifdef CONFIG_DEBUG_FS
+
+void ref_tracker_dir_debugfs(struct ref_tracker_dir *dir);
+void ref_tracker_dir_symlink(struct ref_tracker_dir *dir, const char *fmt, ...);
+
+#else /* CONFIG_DEBUG_FS */
+
+static inline void ref_tracker_dir_debugfs(struct ref_tracker_dir *dir)
+{
+}
+
+static inline __ostream_printf
+void ref_tracker_dir_symlink(struct ref_tracker_dir *dir, const char *fmt, ...)
+{
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
+/**
+ * ref_tracker_dir_init - initialize a ref_tracker dir
+ * @dir: ref_tracker_dir to be initialized
+ * @quarantine_count: max number of entries to be tracked
+ * @class: pointer to static string that describes object type
+ *
+ * Initialize a ref_tracker_dir. If debugfs is configured, then a file
+ * will also be created for it under the top-level ref_tracker debugfs
+ * directory.
+ *
+ * Note that @class must point to a static string.
+ */
+static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir,
+ unsigned int quarantine_count,
+ const char *class)
+{
+ INIT_LIST_HEAD(&dir->list);
+ INIT_LIST_HEAD(&dir->quarantine);
+ spin_lock_init(&dir->lock);
+ dir->quarantine_avail = quarantine_count;
+ dir->dead = false;
+ refcount_set(&dir->untracked, 1);
+ refcount_set(&dir->no_tracker, 1);
+ dir->class = class;
+ ref_tracker_dir_debugfs(dir);
+ stack_depot_init();
+}
+
+void ref_tracker_dir_exit(struct ref_tracker_dir *dir);
+
+void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir,
+ unsigned int display_limit);
+
+void ref_tracker_dir_print(struct ref_tracker_dir *dir,
+ unsigned int display_limit);
+
+int ref_tracker_dir_snprint(struct ref_tracker_dir *dir, char *buf, size_t size);
+
+int ref_tracker_alloc(struct ref_tracker_dir *dir,
+ struct ref_tracker **trackerp, gfp_t gfp);
+
+int ref_tracker_free(struct ref_tracker_dir *dir,
+ struct ref_tracker **trackerp);
+
+#else /* CONFIG_REF_TRACKER */
+
+static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir,
+ unsigned int quarantine_count,
+ const char *class)
+{
+}
+
+static inline void ref_tracker_dir_debugfs(struct ref_tracker_dir *dir)
+{
+}
+
+static inline __ostream_printf
+void ref_tracker_dir_symlink(struct ref_tracker_dir *dir, const char *fmt, ...)
+{
+}
+
+static inline void ref_tracker_dir_exit(struct ref_tracker_dir *dir)
+{
+}
+
+static inline void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir,
+ unsigned int display_limit)
+{
+}
+
+static inline void ref_tracker_dir_print(struct ref_tracker_dir *dir,
+ unsigned int display_limit)
+{
+}
+
+static inline int ref_tracker_dir_snprint(struct ref_tracker_dir *dir,
+ char *buf, size_t size)
+{
+ return 0;
+}
+
+static inline int ref_tracker_alloc(struct ref_tracker_dir *dir,
+ struct ref_tracker **trackerp,
+ gfp_t gfp)
+{
+ return 0;
+}
+
+static inline int ref_tracker_free(struct ref_tracker_dir *dir,
+ struct ref_tracker **trackerp)
+{
+ return 0;
+}
+
+#endif
+
+#endif /* _LINUX_REF_TRACKER_H */
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index b8a6e387f8f9..80dc023ac2bf 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -87,6 +87,15 @@
* The decrements dec_and_test() and sub_and_test() also provide acquire
* ordering on success.
*
+ * refcount_{add|inc}_not_zero_acquire() and refcount_set_release() provide
+ * acquire and release ordering for cases when the memory occupied by the
+ * object might be reused to store another object. This is important for the
+ * cases where secondary validation is required to detect such reuse, e.g.
+ * SLAB_TYPESAFE_BY_RCU. The secondary validation checks have to happen after
+ * the refcount is taken, hence acquire order is necessary. Similarly, when the
+ * object is initialized, all stores to its attributes should be visible before
+ * the refcount is set, otherwise a stale attribute value might be used by
+ * another task which succeeds in taking a refcount to the new object.
*/
#ifndef _LINUX_REFCOUNT_H
@@ -96,22 +105,11 @@
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/limits.h>
+#include <linux/refcount_types.h>
#include <linux/spinlock_types.h>
struct mutex;
-/**
- * typedef refcount_t - variant of atomic_t specialized for reference counts
- * @refs: atomic_t counter field
- *
- * The counter saturates at REFCOUNT_SATURATED and will not move once
- * there. This avoids wrapping the counter and causing 'spurious'
- * use-after-free bugs.
- */
-typedef struct refcount_struct {
- atomic_t refs;
-} refcount_t;
-
#define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), }
#define REFCOUNT_MAX INT_MAX
#define REFCOUNT_SATURATED (INT_MIN / 2)
@@ -137,6 +135,31 @@ static inline void refcount_set(refcount_t *r, int n)
}
/**
+ * refcount_set_release - set a refcount's value with release ordering
+ * @r: the refcount
+ * @n: value to which the refcount will be set
+ *
+ * This function should be used when memory occupied by the object might be
+ * reused to store another object -- consider SLAB_TYPESAFE_BY_RCU.
+ *
+ * Provides release memory ordering which will order previous memory operations
+ * against this store. This ensures all updates to this object are visible
+ * once the refcount is set and stale values from the object previously
+ * occupying this memory are overwritten with new ones.
+ *
+ * This function should be called only after new object is fully initialized.
+ * After this call the object should be considered visible to other tasks even
+ * if it was not yet added into an object collection normally used to discover
+ * it. This is because other tasks might have discovered the object previously
+ * occupying the same memory and after memory reuse they can succeed in taking
+ * refcount to the new object and start using it.
+ */
+static inline void refcount_set_release(refcount_t *r, int n)
+{
+ atomic_set_release(&r->refs, n);
+}
+
+/**
* refcount_read - get a refcount's value
* @r: the refcount
*
@@ -147,7 +170,8 @@ static inline unsigned int refcount_read(const refcount_t *r)
return atomic_read(&r->refs);
}
-static inline __must_check bool __refcount_add_not_zero(int i, refcount_t *r, int *oldp)
+static inline __must_check __signed_wrap
+bool __refcount_add_not_zero(int i, refcount_t *r, int *oldp)
{
int old = refcount_read(r);
@@ -188,7 +212,73 @@ static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
return __refcount_add_not_zero(i, r, NULL);
}
-static inline void __refcount_add(int i, refcount_t *r, int *oldp)
+static inline __must_check __signed_wrap
+bool __refcount_add_not_zero_limited_acquire(int i, refcount_t *r, int *oldp,
+ int limit)
+{
+ int old = refcount_read(r);
+
+ do {
+ if (!old)
+ break;
+
+ if (i > limit - old) {
+ if (oldp)
+ *oldp = old;
+ return false;
+ }
+ } while (!atomic_try_cmpxchg_acquire(&r->refs, &old, old + i));
+
+ if (oldp)
+ *oldp = old;
+
+ if (unlikely(old < 0 || old + i < 0))
+ refcount_warn_saturate(r, REFCOUNT_ADD_NOT_ZERO_OVF);
+
+ return old;
+}
+
+static inline __must_check bool
+__refcount_inc_not_zero_limited_acquire(refcount_t *r, int *oldp, int limit)
+{
+ return __refcount_add_not_zero_limited_acquire(1, r, oldp, limit);
+}
+
+static inline __must_check __signed_wrap
+bool __refcount_add_not_zero_acquire(int i, refcount_t *r, int *oldp)
+{
+ return __refcount_add_not_zero_limited_acquire(i, r, oldp, INT_MAX);
+}
+
+/**
+ * refcount_add_not_zero_acquire - add a value to a refcount with acquire ordering unless it is 0
+ *
+ * @i: the value to add to the refcount
+ * @r: the refcount
+ *
+ * Will saturate at REFCOUNT_SATURATED and WARN.
+ *
+ * This function should be used when memory occupied by the object might be
+ * reused to store another object -- consider SLAB_TYPESAFE_BY_RCU.
+ *
+ * Provides acquire memory ordering on success, it is assumed the caller has
+ * guaranteed the object memory to be stable (RCU, etc.). It does provide a
+ * control dependency and thereby orders future stores. See the comment on top.
+ *
+ * Use of this function is not recommended for the normal reference counting
+ * use case in which references are taken and released one at a time. In these
+ * cases, refcount_inc_not_zero_acquire() should instead be used to increment a
+ * reference count.
+ *
+ * Return: false if the passed refcount is 0, true otherwise
+ */
+static inline __must_check bool refcount_add_not_zero_acquire(int i, refcount_t *r)
+{
+ return __refcount_add_not_zero_acquire(i, r, NULL);
+}
+
+static inline __signed_wrap
+void __refcount_add(int i, refcount_t *r, int *oldp)
{
int old = atomic_fetch_add_relaxed(i, &r->refs);
@@ -245,6 +335,32 @@ static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
return __refcount_inc_not_zero(r, NULL);
}
+static inline __must_check bool __refcount_inc_not_zero_acquire(refcount_t *r, int *oldp)
+{
+ return __refcount_add_not_zero_acquire(1, r, oldp);
+}
+
+/**
+ * refcount_inc_not_zero_acquire - increment a refcount with acquire ordering unless it is 0
+ * @r: the refcount to increment
+ *
+ * Similar to refcount_inc_not_zero(), but provides acquire memory ordering on
+ * success.
+ *
+ * This function should be used when memory occupied by the object might be
+ * reused to store another object -- consider SLAB_TYPESAFE_BY_RCU.
+ *
+ * Provides acquire memory ordering on success, it is assumed the caller has
+ * guaranteed the object memory to be stable (RCU, etc.). It does provide a
+ * control dependency and thereby orders future stores. See the comment on top.
+ *
+ * Return: true if the increment was successful, false otherwise
+ */
+static inline __must_check bool refcount_inc_not_zero_acquire(refcount_t *r)
+{
+ return __refcount_inc_not_zero_acquire(r, NULL);
+}
+
static inline void __refcount_inc(refcount_t *r, int *oldp)
{
__refcount_add(1, r, oldp);
@@ -267,19 +383,20 @@ static inline void refcount_inc(refcount_t *r)
__refcount_inc(r, NULL);
}
-static inline __must_check bool __refcount_sub_and_test(int i, refcount_t *r, int *oldp)
+static inline __must_check __signed_wrap
+bool __refcount_sub_and_test(int i, refcount_t *r, int *oldp)
{
int old = atomic_fetch_sub_release(i, &r->refs);
if (oldp)
*oldp = old;
- if (old == i) {
+ if (old > 0 && old == i) {
smp_acquire__after_ctrl_dep();
return true;
}
- if (unlikely(old < 0 || old - i < 0))
+ if (unlikely(old <= 0 || old - i < 0))
refcount_warn_saturate(r, REFCOUNT_SUB_UAF);
return false;
@@ -361,9 +478,9 @@ static inline void refcount_dec(refcount_t *r)
extern __must_check bool refcount_dec_if_one(refcount_t *r);
extern __must_check bool refcount_dec_not_one(refcount_t *r);
-extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock);
-extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock);
+extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) __cond_acquires(lock);
+extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) __cond_acquires(lock);
extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
spinlock_t *lock,
- unsigned long *flags);
+ unsigned long *flags) __cond_acquires(lock);
#endif /* _LINUX_REFCOUNT_H */
diff --git a/include/linux/refcount_api.h b/include/linux/refcount_api.h
new file mode 100644
index 000000000000..5f032589f568
--- /dev/null
+++ b/include/linux/refcount_api.h
@@ -0,0 +1 @@
+#include <linux/refcount.h>
diff --git a/include/linux/refcount_types.h b/include/linux/refcount_types.h
new file mode 100644
index 000000000000..162004f06edf
--- /dev/null
+++ b/include/linux/refcount_types.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_REFCOUNT_TYPES_H
+#define _LINUX_REFCOUNT_TYPES_H
+
+#include <linux/types.h>
+
+/**
+ * typedef refcount_t - variant of atomic_t specialized for reference counts
+ * @refs: atomic_t counter field
+ *
+ * The counter saturates at REFCOUNT_SATURATED and will not move once
+ * there. This avoids wrapping the counter and causing 'spurious'
+ * use-after-free bugs.
+ */
+typedef struct refcount_struct {
+ atomic_t refs;
+} refcount_t;
+
+#endif /* _LINUX_REFCOUNT_TYPES_H */
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index f87a11a5cc4a..b0b9be750d93 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -24,9 +24,11 @@ struct module;
struct clk;
struct device;
struct device_node;
+struct fsi_device;
struct i2c_client;
struct i3c_device;
struct irq_domain;
+struct mdio_device;
struct slim_device;
struct spi_device;
struct spmi_device;
@@ -36,12 +38,40 @@ struct regmap_field;
struct snd_ac97;
struct sdw_slave;
-/* An enum of all the supported cache types */
+/*
+ * regmap_mdio address encoding. IEEE 802.3ae clause 45 addresses consist of a
+ * device address and a register address.
+ */
+#define REGMAP_MDIO_C45_DEVAD_SHIFT 16
+#define REGMAP_MDIO_C45_DEVAD_MASK GENMASK(20, 16)
+#define REGMAP_MDIO_C45_REGNUM_MASK GENMASK(15, 0)
+
+/*
+ * regmap.reg_shift indicates by how much we must shift registers prior to
+ * performing any operation. It's a signed value, positive numbers means
+ * downshifting the register's address, while negative numbers means upshifting.
+ */
+#define REGMAP_UPSHIFT(s) (-(s))
+#define REGMAP_DOWNSHIFT(s) (s)
+
+/*
+ * The supported cache types, the default is no cache. Any new caches should
+ * usually use the maple tree cache unless they specifically require that there
+ * are never any allocations at runtime in which case they should use the sparse
+ * flat cache. The rbtree cache *may* have some performance advantage for very
+ * low end systems that make heavy use of cache syncs but is mainly legacy.
+ * These caches are sparse and entries will be initialized from hardware if no
+ * default has been provided.
+ * The non-sparse flat cache is provided for compatibility with existing users
+ * and will zero-initialize cache entries for which no defaults are provided.
+ * New users should use the sparse flat cache.
+ */
enum regcache_type {
REGCACHE_NONE,
REGCACHE_RBTREE,
- REGCACHE_COMPRESSED,
REGCACHE_FLAT,
+ REGCACHE_MAPLE,
+ REGCACHE_FLAT_S,
};
/**
@@ -88,17 +118,17 @@ struct reg_sequence {
* @addr: Address to poll
* @val: Unsigned integer variable to read the value into
* @cond: Break condition (usually involving @val)
- * @sleep_us: Maximum time to sleep between reads in us (0
- * tight-loops). Should be less than ~20ms since usleep_range
- * is used (see Documentation/timers/timers-howto.rst).
+ * @sleep_us: Maximum time to sleep between reads in us (0 tight-loops). Please
+ * read usleep_range() function description for details and
+ * limitations.
* @timeout_us: Timeout in us, 0 means never timeout
*
- * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_read
+ * This is modelled after the readx_poll_timeout macros in linux/iopoll.h.
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout or the regmap_read
* error return value in case of a error read. In the two former cases,
* the last read value at @addr is stored in @val. Must not be called
* from atomic context if sleep_us or timeout_us are used.
- *
- * This is modelled after the readx_poll_timeout macros in linux/iopoll.h.
*/
#define regmap_read_poll_timeout(map, addr, val, cond, sleep_us, timeout_us) \
({ \
@@ -115,20 +145,20 @@ struct reg_sequence {
* @addr: Address to poll
* @val: Unsigned integer variable to read the value into
* @cond: Break condition (usually involving @val)
- * @delay_us: Time to udelay between reads in us (0 tight-loops).
- * Should be less than ~10us since udelay is used
- * (see Documentation/timers/timers-howto.rst).
+ * @delay_us: Time to udelay between reads in us (0 tight-loops). Please
+ * read udelay() function description for details and
+ * limitations.
* @timeout_us: Timeout in us, 0 means never timeout
*
- * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_read
- * error return value in case of a error read. In the two former cases,
- * the last read value at @addr is stored in @val.
- *
* This is modelled after the readx_poll_timeout_atomic macros in linux/iopoll.h.
*
* Note: In general regmap cannot be used in atomic context. If you want to use
* this macro then first setup your regmap for atomic use (flat or no cache
* and MMIO regmap).
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout or the regmap_read
+ * error return value in case of a error read. In the two former cases,
+ * the last read value at @addr is stored in @val.
*/
#define regmap_read_poll_timeout_atomic(map, addr, val, cond, delay_us, timeout_us) \
({ \
@@ -159,17 +189,17 @@ struct reg_sequence {
* @field: Regmap field to read from
* @val: Unsigned integer variable to read the value into
* @cond: Break condition (usually involving @val)
- * @sleep_us: Maximum time to sleep between reads in us (0
- * tight-loops). Should be less than ~20ms since usleep_range
- * is used (see Documentation/timers/timers-howto.rst).
+ * @sleep_us: Maximum time to sleep between reads in us (0 tight-loops). Please
+ * read usleep_range() function description for details and
+ * limitations.
* @timeout_us: Timeout in us, 0 means never timeout
*
- * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_field_read
+ * This is modelled after the readx_poll_timeout macros in linux/iopoll.h.
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout or the regmap_field_read
* error return value in case of a error read. In the two former cases,
* the last read value at @addr is stored in @val. Must not be called
* from atomic context if sleep_us or timeout_us are used.
- *
- * This is modelled after the readx_poll_timeout macros in linux/iopoll.h.
*/
#define regmap_field_read_poll_timeout(field, val, cond, sleep_us, timeout_us) \
({ \
@@ -236,6 +266,11 @@ typedef void (*regmap_unlock)(void *);
* @reg_stride: The register address stride. Valid register addresses are a
* multiple of this value. If set to 0, a value of 1 will be
* used.
+ * @reg_shift: The number of bits to shift the register before performing any
+ * operations. Any positive number will be downshifted, and negative
+ * values will be upshifted
+ * @reg_base: Value to be added to every register address before performing any
+ * operation.
* @pad_bits: Number of bits of padding between register and value.
* @val_bits: Number of bits in a register value, mandatory.
*
@@ -274,28 +309,46 @@ typedef void (*regmap_unlock)(void *);
* performed on such table (a register is no increment
* readable if it belongs to one of the ranges specified
* by rd_noinc_table).
- * @disable_locking: This regmap is either protected by external means or
- * is guaranteed not to be accessed from multiple threads.
- * Don't use any locking mechanisms.
- * @lock: Optional lock callback (overrides regmap's default lock
- * function, based on spinlock or mutex).
- * @unlock: As above for unlocking.
- * @lock_arg: this field is passed as the only argument of lock/unlock
- * functions (ignored in case regular lock/unlock functions
- * are not overridden).
* @reg_read: Optional callback that if filled will be used to perform
* all the reads from the registers. Should only be provided for
* devices whose read operation cannot be represented as a simple
* read operation on a bus such as SPI, I2C, etc. Most of the
* devices do not need this.
* @reg_write: Same as above for writing.
+ * @reg_update_bits: Optional callback that if filled will be used to perform
+ * all the update_bits(rmw) operation. Should only be provided
+ * if the function require special handling with lock and reg
+ * handling and the operation cannot be represented as a simple
+ * update_bits operation on a bus such as SPI, I2C, etc.
+ * @read: Optional callback that if filled will be used to perform all the
+ * bulk reads from the registers. Data is returned in the buffer used
+ * to transmit data.
+ * @write: Same as above for writing.
+ * @max_raw_read: Max raw read size that can be used on the device.
+ * @max_raw_write: Max raw write size that can be used on the device.
+ * @can_sleep: Optional, specifies whether regmap operations can sleep.
* @fast_io: Register IO is fast. Use a spinlock instead of a mutex
* to perform locking. This field is ignored if custom lock/unlock
* functions are used (see fields lock/unlock of struct regmap_config).
* This field is a duplicate of a similar file in
* 'struct regmap_bus' and serves exact same purpose.
* Use it only for "no-bus" cases.
+ * @io_port: Support IO port accessors. Makes sense only when MMIO vs. IO port
+ * access can be distinguished.
+ * @disable_locking: This regmap is either protected by external means or
+ * is guaranteed not to be accessed from multiple threads.
+ * Don't use any locking mechanisms.
+ * @lock: Optional lock callback (overrides regmap's default lock
+ * function, based on spinlock or mutex).
+ * @unlock: As above for unlocking.
+ * @lock_arg: This field is passed as the only argument of lock/unlock
+ * functions (ignored in case regular lock/unlock functions
+ * are not overridden).
* @max_register: Optional, specifies the maximum valid register address.
+ * @max_register_is_0: Optional, specifies that zero value in @max_register
+ * should be taken into account. This is a workaround to
+ * apply handling of @max_register for regmap that contains
+ * only one register.
* @wr_table: Optional, points to a struct regmap_access_table specifying
* valid ranges for write access.
* @rd_table: As above, for read access.
@@ -333,26 +386,28 @@ typedef void (*regmap_unlock)(void *);
* @reg_defaults_raw: Power on reset values for registers (for use with
* register cache support).
* @num_reg_defaults_raw: Number of elements in reg_defaults_raw.
+ * @use_hwlock: Indicate if a hardware spinlock should be used.
+ * @use_raw_spinlock: Indicate if a raw spinlock should be used.
+ * @hwlock_id: Specify the hardware spinlock id.
+ * @hwlock_mode: The hardware spinlock mode, should be HWLOCK_IRQSTATE,
+ * HWLOCK_IRQ or 0.
* @reg_format_endian: Endianness for formatted register addresses. If this is
- * DEFAULT, the @reg_format_endian_default value from the
- * regmap bus is used.
+ * DEFAULT, the @reg_format_endian_default value from the
+ * regmap bus is used.
* @val_format_endian: Endianness for formatted register values. If this is
- * DEFAULT, the @reg_format_endian_default value from the
- * regmap bus is used.
+ * DEFAULT, the @reg_format_endian_default value from the
+ * regmap bus is used.
*
* @ranges: Array of configuration entries for virtual address ranges.
* @num_ranges: Number of range configuration entries.
- * @use_hwlock: Indicate if a hardware spinlock should be used.
- * @hwlock_id: Specify the hardware spinlock id.
- * @hwlock_mode: The hardware spinlock mode, should be HWLOCK_IRQSTATE,
- * HWLOCK_IRQ or 0.
- * @can_sleep: Optional, specifies whether regmap operations can sleep.
*/
struct regmap_config {
const char *name;
int reg_bits;
int reg_stride;
+ int reg_shift;
+ unsigned int reg_base;
int pad_bits;
int val_bits;
@@ -363,17 +418,29 @@ struct regmap_config {
bool (*writeable_noinc_reg)(struct device *dev, unsigned int reg);
bool (*readable_noinc_reg)(struct device *dev, unsigned int reg);
- bool disable_locking;
- regmap_lock lock;
- regmap_unlock unlock;
- void *lock_arg;
-
int (*reg_read)(void *context, unsigned int reg, unsigned int *val);
int (*reg_write)(void *context, unsigned int reg, unsigned int val);
+ int (*reg_update_bits)(void *context, unsigned int reg,
+ unsigned int mask, unsigned int val);
+ /* Bulk read/write */
+ int (*read)(void *context, const void *reg_buf, size_t reg_size,
+ void *val_buf, size_t val_size);
+ int (*write)(void *context, const void *data, size_t count);
+ size_t max_raw_read;
+ size_t max_raw_write;
+
+ bool can_sleep;
bool fast_io;
+ bool io_port;
+
+ bool disable_locking;
+ regmap_lock lock;
+ regmap_unlock unlock;
+ void *lock_arg;
unsigned int max_register;
+ bool max_register_is_0;
const struct regmap_access_table *wr_table;
const struct regmap_access_table *rd_table;
const struct regmap_access_table *volatile_table;
@@ -395,17 +462,16 @@ struct regmap_config {
bool use_relaxed_mmio;
bool can_multi_write;
+ bool use_hwlock;
+ bool use_raw_spinlock;
+ unsigned int hwlock_id;
+ unsigned int hwlock_mode;
+
enum regmap_endian reg_format_endian;
enum regmap_endian val_format_endian;
const struct regmap_range_cfg *ranges;
unsigned int num_ranges;
-
- bool use_hwlock;
- unsigned int hwlock_id;
- unsigned int hwlock_mode;
-
- bool can_sleep;
};
/**
@@ -445,6 +511,32 @@ struct regmap_range_cfg {
unsigned int window_len;
};
+/**
+ * struct regmap_sdw_mbq_cfg - Configuration for Multi-Byte Quantities
+ *
+ * @mbq_size: Callback returning the actual size of the given register.
+ * @deferrable: Callback returning true if the hardware can defer
+ * transactions to the given register. Deferral should
+ * only be used by SDCA parts and typically which controls
+ * are deferrable will be specified in either as a hard
+ * coded list or from the DisCo tables in the platform
+ * firmware.
+ *
+ * @timeout_us: The time in microseconds after which waiting for a deferred
+ * transaction should time out.
+ * @retry_us: The time in microseconds between polls of the function busy
+ * status whilst waiting for an opportunity to retry a deferred
+ * transaction.
+ *
+ * Provides additional configuration required for SoundWire MBQ register maps.
+ */
+struct regmap_sdw_mbq_cfg {
+ int (*mbq_size)(struct device *dev, unsigned int reg);
+ bool (*deferrable)(struct device *dev, unsigned int reg);
+ unsigned long timeout_us;
+ unsigned long retry_us;
+};
+
struct regmap_async;
typedef int (*regmap_hw_write)(void *context, const void *data,
@@ -461,8 +553,12 @@ typedef int (*regmap_hw_read)(void *context,
void *val_buf, size_t val_size);
typedef int (*regmap_hw_reg_read)(void *context, unsigned int reg,
unsigned int *val);
+typedef int (*regmap_hw_reg_noinc_read)(void *context, unsigned int reg,
+ void *val, size_t val_count);
typedef int (*regmap_hw_reg_write)(void *context, unsigned int reg,
unsigned int val);
+typedef int (*regmap_hw_reg_noinc_write)(void *context, unsigned int reg,
+ const void *val, size_t val_count);
typedef int (*regmap_hw_reg_update_bits)(void *context, unsigned int reg,
unsigned int mask, unsigned int val);
typedef struct regmap_async *(*regmap_hw_async_alloc)(void);
@@ -476,6 +572,7 @@ typedef void (*regmap_hw_free_context)(void *context);
* to perform locking. This field is ignored if custom lock/unlock
* functions are used (see fields lock/unlock of
* struct regmap_config).
+ * @free_on_exit: kfree this on exit of regmap
* @write: Write operation.
* @gather_write: Write operation with split register/value, return -ENOTSUPP
* if not implemented on a given device.
@@ -483,6 +580,8 @@ typedef void (*regmap_hw_free_context)(void *context);
* must serialise with respect to non-async I/O.
* @reg_write: Write a single register value to the given register address. This
* write operation has to complete when returning from the function.
+ * @reg_write_noinc: Write multiple register value to the same register. This
+ * write operation has to complete when returning from the function.
* @reg_update_bits: Update bits operation to be used against volatile
* registers, intended for devices supporting some mechanism
* for setting clearing bits without having to
@@ -505,13 +604,16 @@ typedef void (*regmap_hw_free_context)(void *context);
*/
struct regmap_bus {
bool fast_io;
+ bool free_on_exit;
regmap_hw_write write;
regmap_hw_gather_write gather_write;
regmap_hw_async_write async_write;
regmap_hw_reg_write reg_write;
+ regmap_hw_reg_noinc_write reg_noinc_write;
regmap_hw_reg_update_bits reg_update_bits;
regmap_hw_read read;
regmap_hw_reg_read reg_read;
+ regmap_hw_reg_noinc_read reg_noinc_read;
regmap_hw_free_context free_context;
regmap_hw_async_alloc async_alloc;
u8 read_flag_mask;
@@ -538,6 +640,10 @@ struct regmap *__regmap_init_i2c(struct i2c_client *i2c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__regmap_init_mdio(struct mdio_device *mdio_dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
struct regmap *__regmap_init_sccb(struct i2c_client *i2c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
@@ -575,14 +681,19 @@ struct regmap *__regmap_init_sdw(struct sdw_slave *sdw,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
-struct regmap *__regmap_init_sdw_mbq(struct sdw_slave *sdw,
+struct regmap *__regmap_init_sdw_mbq(struct device *dev, struct sdw_slave *sdw,
const struct regmap_config *config,
+ const struct regmap_sdw_mbq_cfg *mbq_config,
struct lock_class_key *lock_key,
const char *lock_name);
struct regmap *__regmap_init_spi_avmm(struct spi_device *spi,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__regmap_init_fsi(struct fsi_device *fsi_dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
struct regmap *__devm_regmap_init(struct device *dev,
const struct regmap_bus *bus,
@@ -594,6 +705,10 @@ struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__devm_regmap_init_mdio(struct mdio_device *mdio_dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
struct regmap *__devm_regmap_init_sccb(struct i2c_client *i2c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
@@ -628,8 +743,9 @@ struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
-struct regmap *__devm_regmap_init_sdw_mbq(struct sdw_slave *sdw,
+struct regmap *__devm_regmap_init_sdw_mbq(struct device *dev, struct sdw_slave *sdw,
const struct regmap_config *config,
+ const struct regmap_sdw_mbq_cfg *mbq_config,
struct lock_class_key *lock_key,
const char *lock_name);
struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus,
@@ -644,6 +760,11 @@ struct regmap *__devm_regmap_init_spi_avmm(struct spi_device *spi,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__devm_regmap_init_fsi(struct fsi_device *fsi_dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
+
/*
* Wrapper for regmap_init macros to include a unique lockdep key and name
* for each call. No-op if CONFIG_LOCKDEP is not set.
@@ -698,6 +819,19 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
i2c, config)
/**
+ * regmap_init_mdio() - Initialise register map
+ *
+ * @mdio_dev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_mdio(mdio_dev, config) \
+ __regmap_lockdep_wrapper(__regmap_init_mdio, #config, \
+ mdio_dev, config)
+
+/**
* regmap_init_sccb() - Initialise register map
*
* @i2c: Device that will be interacted with
@@ -784,7 +918,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
* @config: Configuration for register map
*
* The return value will be an ERR_PTR() on error or a valid pointer to
- * a struct regmap.
+ * a struct regmap. Implies 'fast_io'.
*/
#define regmap_init_mmio_clk(dev, clk_id, regs, config) \
__regmap_lockdep_wrapper(__regmap_init_mmio_clk, #config, \
@@ -798,7 +932,7 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
* @config: Configuration for register map
*
* The return value will be an ERR_PTR() on error or a valid pointer to
- * a struct regmap.
+ * a struct regmap. Implies 'fast_io'.
*/
#define regmap_init_mmio(dev, regs, config) \
regmap_init_mmio_clk(dev, NULL, regs, config)
@@ -841,7 +975,22 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
*/
#define regmap_init_sdw_mbq(sdw, config) \
__regmap_lockdep_wrapper(__regmap_init_sdw_mbq, #config, \
- sdw, config)
+ &sdw->dev, sdw, config, NULL)
+
+/**
+ * regmap_init_sdw_mbq_cfg() - Initialise MBQ SDW register map with config
+ *
+ * @sdw: Device that will be interacted with
+ * @config: Configuration for register map
+ * @mbq_config: Properties for the MBQ registers
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define regmap_init_sdw_mbq_cfg(dev, sdw, config, mbq_config) \
+ __regmap_lockdep_wrapper(__regmap_init_sdw_mbq, #config, \
+ dev, sdw, config, mbq_config)
/**
* regmap_init_spi_avmm() - Initialize register map for Intel SPI Slave
@@ -858,6 +1007,19 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
spi, config)
/**
+ * regmap_init_fsi() - Initialise register map
+ *
+ * @fsi_dev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_fsi(fsi_dev, config) \
+ __regmap_lockdep_wrapper(__regmap_init_fsi, #config, fsi_dev, \
+ config)
+
+/**
* devm_regmap_init() - Initialise managed register map
*
* @dev: Device that will be interacted with
@@ -889,6 +1051,20 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
i2c, config)
/**
+ * devm_regmap_init_mdio() - Initialise managed register map
+ *
+ * @mdio_dev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_mdio(mdio_dev, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_mdio, #config, \
+ mdio_dev, config)
+
+/**
* devm_regmap_init_sccb() - Initialise managed register map
*
* @i2c: Device that will be interacted with
@@ -967,7 +1143,7 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
*
* The return value will be an ERR_PTR() on error or a valid pointer
* to a struct regmap. The regmap will be automatically freed by the
- * device management code.
+ * device management code. Implies 'fast_io'.
*/
#define devm_regmap_init_mmio_clk(dev, clk_id, regs, config) \
__regmap_lockdep_wrapper(__devm_regmap_init_mmio_clk, #config, \
@@ -982,7 +1158,7 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
*
* The return value will be an ERR_PTR() on error or a valid pointer
* to a struct regmap. The regmap will be automatically freed by the
- * device management code.
+ * device management code. Implies 'fast_io'.
*/
#define devm_regmap_init_mmio(dev, regs, config) \
devm_regmap_init_mmio_clk(dev, NULL, regs, config)
@@ -1027,7 +1203,23 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
*/
#define devm_regmap_init_sdw_mbq(sdw, config) \
__regmap_lockdep_wrapper(__devm_regmap_init_sdw_mbq, #config, \
- sdw, config)
+ &sdw->dev, sdw, config, NULL)
+
+/**
+ * devm_regmap_init_sdw_mbq_cfg() - Initialise managed MBQ SDW register map with config
+ *
+ * @dev: Device that will be interacted with
+ * @sdw: SoundWire Device that will be interacted with
+ * @config: Configuration for register map
+ * @mbq_config: Properties for the MBQ registers
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_sdw_mbq_cfg(dev, sdw, config, mbq_config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_sdw_mbq, \
+ #config, dev, sdw, config, mbq_config)
/**
* devm_regmap_init_slimbus() - Initialise managed register map
@@ -1072,6 +1264,20 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
__regmap_lockdep_wrapper(__devm_regmap_init_spi_avmm, #config, \
spi, config)
+/**
+ * devm_regmap_init_fsi() - Initialise managed register map
+ *
+ * @fsi_dev: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_fsi(fsi_dev, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_fsi, #config, \
+ fsi_dev, config)
+
int regmap_mmio_attach_clk(struct regmap *map, struct clk *clk);
void regmap_mmio_detach_clk(struct regmap *map);
void regmap_exit(struct regmap *map);
@@ -1095,12 +1301,15 @@ int regmap_multi_reg_write_bypassed(struct regmap *map,
int regmap_raw_write_async(struct regmap *map, unsigned int reg,
const void *val, size_t val_len);
int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val);
+int regmap_read_bypassed(struct regmap *map, unsigned int reg, unsigned int *val);
int regmap_raw_read(struct regmap *map, unsigned int reg,
void *val, size_t val_len);
int regmap_noinc_read(struct regmap *map, unsigned int reg,
void *val, size_t val_len);
int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
size_t val_count);
+int regmap_multi_reg_read(struct regmap *map, const unsigned int *reg, void *val,
+ size_t val_count);
int regmap_update_bits_base(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val,
bool *change, bool async, bool force);
@@ -1143,11 +1352,13 @@ static inline int regmap_write_bits(struct regmap *map, unsigned int reg,
int regmap_get_val_bytes(struct regmap *map);
int regmap_get_max_register(struct regmap *map);
int regmap_get_reg_stride(struct regmap *map);
+bool regmap_might_sleep(struct regmap *map);
int regmap_async_complete(struct regmap *map);
bool regmap_can_raw_write(struct regmap *map);
size_t regmap_get_raw_read_max(struct regmap *map);
size_t regmap_get_raw_write_max(struct regmap *map);
+void regcache_sort_defaults(struct reg_default *defaults, unsigned int ndefaults);
int regcache_sync(struct regmap *map);
int regcache_sync_region(struct regmap *map, unsigned int min,
unsigned int max);
@@ -1156,6 +1367,7 @@ int regcache_drop_region(struct regmap *map, unsigned int min,
void regcache_cache_only(struct regmap *map, bool enable);
void regcache_cache_bypass(struct regmap *map, bool enable);
void regcache_mark_dirty(struct regmap *map);
+bool regcache_reg_cached(struct regmap *map, unsigned int reg);
bool regmap_check_range_table(struct regmap *map, unsigned int reg,
const struct regmap_access_table *table);
@@ -1188,6 +1400,15 @@ static inline int regmap_clear_bits(struct regmap *map,
return regmap_update_bits_base(map, reg, bits, 0, NULL, false, false);
}
+static inline int regmap_assign_bits(struct regmap *map, unsigned int reg,
+ unsigned int bits, bool value)
+{
+ if (value)
+ return regmap_set_bits(map, reg, bits);
+ else
+ return regmap_clear_bits(map, reg, bits);
+}
+
int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits);
/**
@@ -1231,12 +1452,13 @@ void devm_regmap_field_free(struct device *dev, struct regmap_field *field);
int regmap_field_bulk_alloc(struct regmap *regmap,
struct regmap_field **rm_field,
- struct reg_field *reg_field,
+ const struct reg_field *reg_field,
int num_fields);
void regmap_field_bulk_free(struct regmap_field *field);
int devm_regmap_field_bulk_alloc(struct device *dev, struct regmap *regmap,
struct regmap_field **field,
- struct reg_field *reg_field, int num_fields);
+ const struct reg_field *reg_field,
+ int num_fields);
void devm_regmap_field_bulk_free(struct device *dev,
struct regmap_field *field);
@@ -1270,6 +1492,22 @@ static inline int regmap_field_update_bits(struct regmap_field *field,
NULL, false, false);
}
+static inline int regmap_field_set_bits(struct regmap_field *field,
+ unsigned int bits)
+{
+ return regmap_field_update_bits_base(field, bits, bits, NULL, false,
+ false);
+}
+
+static inline int regmap_field_clear_bits(struct regmap_field *field,
+ unsigned int bits)
+{
+ return regmap_field_update_bits_base(field, bits, 0, NULL, false,
+ false);
+}
+
+int regmap_field_test_bits(struct regmap_field *field, unsigned int bits);
+
static inline int
regmap_field_force_update_bits(struct regmap_field *field,
unsigned int mask, unsigned int val)
@@ -1358,10 +1596,15 @@ struct regmap_irq_sub_irq_map {
unsigned int *offset;
};
+struct regmap_irq_chip_data;
+
/**
* struct regmap_irq_chip - Description of a generic regmap irq_chip.
*
* @name: Descriptive name for IRQ controller.
+ * @domain_suffix: Name suffix to be appended to end of IRQ domain name. Needed
+ * when multiple regmap-IRQ controllers are created from same
+ * device.
*
* @main_status: Base main status register address. For chips which have
* interrupts arranged in separate sub-irq blocks with own IRQ
@@ -1378,56 +1621,68 @@ struct regmap_irq_sub_irq_map {
* status_base. Should contain num_regs arrays.
* Can be provided for chips with more complex mapping than
* 1.st bit to 1.st sub-reg, 2.nd bit to 2.nd sub-reg, ...
- * When used with not_fixed_stride, each one-element array
- * member contains offset calculated as address from each
- * peripheral to first peripheral.
* @num_main_regs: Number of 'main status' irq registers for chips which have
* main_status set.
*
* @status_base: Base status register address.
- * @mask_base: Base mask register address.
- * @mask_writeonly: Base mask register is write only.
- * @unmask_base: Base unmask register address. for chips who have
- * separate mask and unmask registers
+ * @mask_base: Base mask register address. Mask bits are set to 1 when an
+ * interrupt is masked, 0 when unmasked.
+ * @unmask_base: Base unmask register address. Unmask bits are set to 1 when
+ * an interrupt is unmasked and 0 when masked.
* @ack_base: Base ack address. If zero then the chip is clear on read.
* Using zero value is possible with @use_ack bit.
* @wake_base: Base address for wake enables. If zero unsupported.
- * @type_base: Base address for irq type. If zero unsupported.
- * @virt_reg_base: Base addresses for extra config regs.
+ * @config_base: Base address for IRQ type config regs. If null unsupported.
* @irq_reg_stride: Stride to use for chips where registers are not contiguous.
* @init_ack_masked: Ack all masked interrupts once during initalization.
- * @mask_invert: Inverted mask register: cleared bits are masked out.
+ * @mask_unmask_non_inverted: Controls mask bit inversion for chips that set
+ * both @mask_base and @unmask_base. If false, mask and unmask bits are
+ * inverted (which is deprecated behavior); if true, bits will not be
+ * inverted and the registers keep their normal behavior. Note that if
+ * you use only one of @mask_base or @unmask_base, this flag has no
+ * effect and is unnecessary. Any new drivers that set both @mask_base
+ * and @unmask_base should set this to true to avoid relying on the
+ * deprecated behavior.
* @use_ack: Use @ack register even if it is zero.
* @ack_invert: Inverted ack register: cleared bits for ack.
* @clear_ack: Use this to set 1 and 0 or vice-versa to clear interrupts.
- * @wake_invert: Inverted wake register: cleared bits are wake enabled.
- * @type_invert: Invert the type flags.
- * @type_in_mask: Use the mask registers for controlling irq type. For
- * interrupts defining type_rising/falling_mask use mask_base
- * for edge configuration and never update bits in type_base.
+ * @status_invert: Inverted status register: cleared bits are active interrupts.
+ * @status_is_level: Status register is actuall signal level: Xor status
+ * register with previous value to get active interrupts.
+ * @wake_invert: Inverted wake register: cleared bits are wake disabled.
+ * @type_in_mask: Use the mask registers for controlling irq type. Use this if
+ * the hardware provides separate bits for rising/falling edge
+ * or low/high level interrupts and they should be combined into
+ * a single logical interrupt. Use &struct regmap_irq_type data
+ * to define the mask bit for each irq type.
* @clear_on_unmask: For chips with interrupts cleared on read: read the status
* registers before unmasking interrupts to clear any bits
* set when they were masked.
- * @not_fixed_stride: Used when chip peripherals are not laid out with fixed
- * stride. Must be used with sub_reg_offsets containing the
- * offsets to each peripheral.
* @runtime_pm: Hold a runtime PM lock on the device when accessing it.
+ * @no_status: No status register: all interrupts assumed generated by device.
*
* @num_regs: Number of registers in each control bank.
+ *
* @irqs: Descriptors for individual IRQs. Interrupt numbers are
* assigned based on the index in the array of the interrupt.
* @num_irqs: Number of descriptors.
- * @num_type_reg: Number of type registers.
- * @num_virt_regs: Number of non-standard irq configuration registers.
- * If zero unsupported.
- * @type_reg_stride: Stride to use for chips where type registers are not
- * contiguous.
+ * @num_config_bases: Number of config base registers.
+ * @num_config_regs: Number of config registers for each config base register.
+ *
* @handle_pre_irq: Driver specific callback to handle interrupt from device
* before regmap_irq_handler process the interrupts.
* @handle_post_irq: Driver specific callback to handle interrupt from device
* after handling the interrupts in regmap_irq_handler().
- * @set_type_virt: Driver specific callback to extend regmap_irq_set_type()
- * and configure virt regs.
+ * @handle_mask_sync: Callback used to handle IRQ mask syncs. The index will be
+ * in the range [0, num_regs)
+ * @set_type_config: Callback used for configuring irq types.
+ * @get_irq_reg: Callback for mapping (base register, index) pairs to register
+ * addresses. The base register will be one of @status_base,
+ * @mask_base, etc., @main_status, or any of @config_base.
+ * The index will be in the range [0, num_main_regs[ for the
+ * main status base, [0, num_config_regs[ for any config
+ * register base, and [0, num_regs[ for any other base.
+ * If unspecified then regmap_irq_get_irq_reg_linear() is used.
* @irq_drv_data: Driver specific IRQ data which is passed as parameter when
* driver specific pre/post interrupt handler is called.
*
@@ -1437,10 +1692,11 @@ struct regmap_irq_sub_irq_map {
*/
struct regmap_irq_chip {
const char *name;
+ const char *domain_suffix;
unsigned int main_status;
unsigned int num_main_status_bits;
- struct regmap_irq_sub_irq_map *sub_reg_offsets;
+ const struct regmap_irq_sub_irq_map *sub_reg_offsets;
int num_main_regs;
unsigned int status_base;
@@ -1448,39 +1704,46 @@ struct regmap_irq_chip {
unsigned int unmask_base;
unsigned int ack_base;
unsigned int wake_base;
- unsigned int type_base;
- unsigned int *virt_reg_base;
+ const unsigned int *config_base;
unsigned int irq_reg_stride;
- bool mask_writeonly:1;
- bool init_ack_masked:1;
- bool mask_invert:1;
- bool use_ack:1;
- bool ack_invert:1;
- bool clear_ack:1;
- bool wake_invert:1;
- bool runtime_pm:1;
- bool type_invert:1;
- bool type_in_mask:1;
- bool clear_on_unmask:1;
- bool not_fixed_stride:1;
+ unsigned int init_ack_masked:1;
+ unsigned int mask_unmask_non_inverted:1;
+ unsigned int use_ack:1;
+ unsigned int ack_invert:1;
+ unsigned int clear_ack:1;
+ unsigned int status_invert:1;
+ unsigned int status_is_level:1;
+ unsigned int wake_invert:1;
+ unsigned int type_in_mask:1;
+ unsigned int clear_on_unmask:1;
+ unsigned int runtime_pm:1;
+ unsigned int no_status:1;
int num_regs;
const struct regmap_irq *irqs;
int num_irqs;
- int num_type_reg;
- int num_virt_regs;
- unsigned int type_reg_stride;
+ int num_config_bases;
+ int num_config_regs;
int (*handle_pre_irq)(void *irq_drv_data);
int (*handle_post_irq)(void *irq_drv_data);
- int (*set_type_virt)(unsigned int **buf, unsigned int type,
- unsigned long hwirq, int reg);
+ int (*handle_mask_sync)(int index, unsigned int mask_buf_def,
+ unsigned int mask_buf, void *irq_drv_data);
+ int (*set_type_config)(unsigned int **buf, unsigned int type,
+ const struct regmap_irq *irq_data, int idx,
+ void *irq_drv_data);
+ unsigned int (*get_irq_reg)(struct regmap_irq_chip_data *data,
+ unsigned int base, int index);
void *irq_drv_data;
};
-struct regmap_irq_chip_data;
+unsigned int regmap_irq_get_irq_reg_linear(struct regmap_irq_chip_data *data,
+ unsigned int base, int index);
+int regmap_irq_set_type_config_simple(unsigned int **buf, unsigned int type,
+ const struct regmap_irq *irq_data,
+ int idx, void *irq_drv_data);
int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
int irq_base, const struct regmap_irq_chip *chip,
@@ -1567,6 +1830,13 @@ static inline int regmap_read(struct regmap *map, unsigned int reg,
return -EINVAL;
}
+static inline int regmap_read_bypassed(struct regmap *map, unsigned int reg,
+ unsigned int *val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
static inline int regmap_raw_read(struct regmap *map, unsigned int reg,
void *val, size_t val_len)
{
@@ -1610,6 +1880,13 @@ static inline int regmap_clear_bits(struct regmap *map,
return -EINVAL;
}
+static inline int regmap_assign_bits(struct regmap *map, unsigned int reg,
+ unsigned int bits, bool value)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
static inline int regmap_test_bits(struct regmap *map,
unsigned int reg, unsigned int bits)
{
@@ -1701,6 +1978,27 @@ regmap_field_force_update_bits(struct regmap_field *field,
return -EINVAL;
}
+static inline int regmap_field_set_bits(struct regmap_field *field,
+ unsigned int bits)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_field_clear_bits(struct regmap_field *field,
+ unsigned int bits)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_field_test_bits(struct regmap_field *field,
+ unsigned int bits)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
static inline int regmap_fields_write(struct regmap_field *field,
unsigned int id, unsigned int val)
{
@@ -1749,6 +2047,18 @@ static inline int regmap_get_reg_stride(struct regmap *map)
return -EINVAL;
}
+static inline bool regmap_might_sleep(struct regmap *map)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return true;
+}
+
+static inline void regcache_sort_defaults(struct reg_default *defaults,
+ unsigned int ndefaults)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+}
+
static inline int regcache_sync(struct regmap *map)
{
WARN_ONCE(1, "regmap API is disabled");
diff --git a/include/linux/regset.h b/include/linux/regset.h
index a00765f0e8cf..ad1ca6fe04f4 100644
--- a/include/linux/regset.h
+++ b/include/linux/regset.h
@@ -151,7 +151,8 @@ typedef int user_regset_writeback_fn(struct task_struct *target,
* @align: Required alignment, in bytes.
* @bias: Bias from natural indexing.
* @core_note_type: ELF note @n_type value used in core dumps.
- * @get: Function to fetch values.
+ * @core_note_name: ELF note name to qualify the note type.
+ * @regset_get: Function to fetch values.
* @set: Function to store values.
* @active: Function to report if regset is active, or %NULL.
* @writeback: Function to write data back to user memory, or %NULL.
@@ -190,6 +191,10 @@ typedef int user_regset_writeback_fn(struct task_struct *target,
*
* If nonzero, @core_note_type gives the n_type field (NT_* value)
* of the core file note in which this regset's data appears.
+ * @core_note_name specifies the note name. The preferred way to
+ * specify these two fields is to use the @USER_REGSET_NOTE_TYPE()
+ * macro.
+ *
* NT_PRSTATUS is a special case in that the regset data starts at
* offsetof(struct elf_prstatus, pr_reg) into the note data; that is
* part of the per-machine ELF formats userland knows about. In
@@ -207,8 +212,13 @@ struct user_regset {
unsigned int align;
unsigned int bias;
unsigned int core_note_type;
+ const char *core_note_name;
};
+#define USER_REGSET_NOTE_TYPE(type) \
+ .core_note_type = (NT_ ## type), \
+ .core_note_name = (NN_ ## type)
+
/**
* struct user_regset_view - available regsets
* @name: Identifier, e.g. UTS_MACHINE string.
@@ -275,15 +285,15 @@ static inline int user_regset_copyin(unsigned int *pos, unsigned int *count,
return 0;
}
-static inline int user_regset_copyin_ignore(unsigned int *pos,
- unsigned int *count,
- const void **kbuf,
- const void __user **ubuf,
- const int start_pos,
- const int end_pos)
+static inline void user_regset_copyin_ignore(unsigned int *pos,
+ unsigned int *count,
+ const void **kbuf,
+ const void __user **ubuf,
+ const int start_pos,
+ const int end_pos)
{
if (*count == 0)
- return 0;
+ return;
BUG_ON(*pos < start_pos);
if (end_pos < 0 || *pos < end_pos) {
unsigned int copy = (end_pos < 0 ? *count
@@ -295,7 +305,6 @@ static inline int user_regset_copyin_ignore(unsigned int *pos,
*pos += copy;
*count -= copy;
}
- return 0;
}
extern int regset_get(struct task_struct *target,
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 20e84a84fb77..56fe2693d9b2 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -33,6 +33,7 @@
#include <linux/err.h>
#include <linux/suspend.h>
+#include <regulator/regulator.h>
struct device;
struct notifier_block;
@@ -85,42 +86,6 @@ struct regulator_dev;
#define REGULATOR_MODE_STANDBY 0x8
/*
- * Regulator notifier events.
- *
- * UNDER_VOLTAGE Regulator output is under voltage.
- * OVER_CURRENT Regulator output current is too high.
- * REGULATION_OUT Regulator output is out of regulation.
- * FAIL Regulator output has failed.
- * OVER_TEMP Regulator over temp.
- * FORCE_DISABLE Regulator forcibly shut down by software.
- * VOLTAGE_CHANGE Regulator voltage changed.
- * Data passed is old voltage cast to (void *).
- * DISABLE Regulator was disabled.
- * PRE_VOLTAGE_CHANGE Regulator is about to have voltage changed.
- * Data passed is "struct pre_voltage_change_data"
- * ABORT_VOLTAGE_CHANGE Regulator voltage change failed for some reason.
- * Data passed is old voltage cast to (void *).
- * PRE_DISABLE Regulator is about to be disabled
- * ABORT_DISABLE Regulator disable failed for some reason
- *
- * NOTE: These events can be OR'ed together when passed into handler.
- */
-
-#define REGULATOR_EVENT_UNDER_VOLTAGE 0x01
-#define REGULATOR_EVENT_OVER_CURRENT 0x02
-#define REGULATOR_EVENT_REGULATION_OUT 0x04
-#define REGULATOR_EVENT_FAIL 0x08
-#define REGULATOR_EVENT_OVER_TEMP 0x10
-#define REGULATOR_EVENT_FORCE_DISABLE 0x20
-#define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40
-#define REGULATOR_EVENT_DISABLE 0x80
-#define REGULATOR_EVENT_PRE_VOLTAGE_CHANGE 0x100
-#define REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE 0x200
-#define REGULATOR_EVENT_PRE_DISABLE 0x400
-#define REGULATOR_EVENT_ABORT_DISABLE 0x800
-#define REGULATOR_EVENT_ENABLE 0x1000
-
-/*
* Regulator errors that can be queried using regulator_get_error_flags
*
* UNDER_VOLTAGE Regulator output is under voltage.
@@ -138,6 +103,10 @@ struct regulator_dev;
#define REGULATOR_ERROR_FAIL BIT(4)
#define REGULATOR_ERROR_OVER_TEMP BIT(5)
+#define REGULATOR_ERROR_UNDER_VOLTAGE_WARN BIT(6)
+#define REGULATOR_ERROR_OVER_CURRENT_WARN BIT(7)
+#define REGULATOR_ERROR_OVER_VOLTAGE_WARN BIT(8)
+#define REGULATOR_ERROR_OVER_TEMP_WARN BIT(9)
/**
* struct pre_voltage_change_data - Data sent with PRE_VOLTAGE_CHANGE event
@@ -157,10 +126,13 @@ struct regulator;
/**
* struct regulator_bulk_data - Data used for bulk regulator operations.
*
- * @supply: The name of the supply. Initialised by the user before
- * using the bulk regulator APIs.
- * @consumer: The regulator consumer for the supply. This will be managed
- * by the bulk API.
+ * @supply: The name of the supply. Initialised by the user before
+ * using the bulk regulator APIs.
+ * @consumer: The regulator consumer for the supply. This will be managed
+ * by the bulk API.
+ * @init_load_uA: After getting the regulator, regulator_set_load() will be
+ * called with this load. Initialised by the user before
+ * using the bulk regulator APIs.
*
* The regulator APIs provide a series of regulator_bulk_() API calls as
* a convenience to consumers which require multiple supplies. This
@@ -169,6 +141,7 @@ struct regulator;
struct regulator_bulk_data {
const char *supply;
struct regulator *consumer;
+ int init_load_uA;
/* private: Internal use */
int ret;
@@ -189,6 +162,9 @@ struct regulator *__must_check regulator_get_optional(struct device *dev,
const char *id);
struct regulator *__must_check devm_regulator_get_optional(struct device *dev,
const char *id);
+int devm_regulator_get_enable(struct device *dev, const char *id);
+int devm_regulator_get_enable_optional(struct device *dev, const char *id);
+int devm_regulator_get_enable_read_voltage(struct device *dev, const char *id);
void regulator_put(struct regulator *regulator);
void devm_regulator_put(struct regulator *regulator);
@@ -208,17 +184,12 @@ void regulator_bulk_unregister_supply_alias(struct device *dev,
int devm_regulator_register_supply_alias(struct device *dev, const char *id,
struct device *alias_dev,
const char *alias_id);
-void devm_regulator_unregister_supply_alias(struct device *dev,
- const char *id);
int devm_regulator_bulk_register_supply_alias(struct device *dev,
const char *const *id,
struct device *alias_dev,
const char *const *alias_id,
int num_id);
-void devm_regulator_bulk_unregister_supply_alias(struct device *dev,
- const char *const *id,
- int num_id);
/* regulator output control and status */
int __must_check regulator_enable(struct regulator *regulator);
@@ -231,8 +202,17 @@ int __must_check regulator_bulk_get(struct device *dev, int num_consumers,
struct regulator_bulk_data *consumers);
int __must_check devm_regulator_bulk_get(struct device *dev, int num_consumers,
struct regulator_bulk_data *consumers);
+void devm_regulator_bulk_put(struct regulator_bulk_data *consumers);
+int __must_check devm_regulator_bulk_get_exclusive(struct device *dev, int num_consumers,
+ struct regulator_bulk_data *consumers);
+int __must_check devm_regulator_bulk_get_const(
+ struct device *dev, int num_consumers,
+ const struct regulator_bulk_data *in_consumers,
+ struct regulator_bulk_data **out_consumers);
int __must_check regulator_bulk_enable(int num_consumers,
struct regulator_bulk_data *consumers);
+int devm_regulator_bulk_get_enable(struct device *dev, int num_consumers,
+ const char * const *id);
int regulator_bulk_disable(int num_consumers,
struct regulator_bulk_data *consumers);
int regulator_bulk_force_disable(int num_consumers,
@@ -253,6 +233,11 @@ int regulator_sync_voltage(struct regulator *regulator);
int regulator_set_current_limit(struct regulator *regulator,
int min_uA, int max_uA);
int regulator_get_current_limit(struct regulator *regulator);
+int regulator_get_unclaimed_power_budget(struct regulator *regulator);
+int regulator_request_power_budget(struct regulator *regulator,
+ unsigned int pw_req);
+void regulator_free_power_budget(struct regulator *regulator,
+ unsigned int pw);
int regulator_set_mode(struct regulator *regulator, unsigned int mode);
unsigned int regulator_get_mode(struct regulator *regulator);
@@ -268,6 +253,7 @@ int regulator_get_hardware_vsel_register(struct regulator *regulator,
unsigned *vsel_mask);
int regulator_list_hardware_vsel(struct regulator *regulator,
unsigned selector);
+int regulator_hardware_enable(struct regulator *regulator, bool enable);
/* regulator notifier block */
int regulator_register_notifier(struct regulator *regulator,
@@ -337,6 +323,23 @@ devm_regulator_get_exclusive(struct device *dev, const char *id)
return ERR_PTR(-ENODEV);
}
+static inline int devm_regulator_get_enable(struct device *dev, const char *id)
+{
+ return 0;
+}
+
+static inline int devm_regulator_get_enable_optional(struct device *dev,
+ const char *id)
+{
+ return 0;
+}
+
+static inline int devm_regulator_get_enable_read_voltage(struct device *dev,
+ const char *id)
+{
+ return -ENODEV;
+}
+
static inline struct regulator *__must_check
regulator_get_optional(struct device *dev, const char *id)
{
@@ -358,6 +361,10 @@ static inline void devm_regulator_put(struct regulator *regulator)
{
}
+static inline void devm_regulator_bulk_put(struct regulator_bulk_data *consumers)
+{
+}
+
static inline int regulator_register_supply_alias(struct device *dev,
const char *id,
struct device *alias_dev,
@@ -394,11 +401,6 @@ static inline int devm_regulator_register_supply_alias(struct device *dev,
return 0;
}
-static inline void devm_regulator_unregister_supply_alias(struct device *dev,
- const char *id)
-{
-}
-
static inline int devm_regulator_bulk_register_supply_alias(struct device *dev,
const char *const *id,
struct device *alias_dev,
@@ -408,11 +410,6 @@ static inline int devm_regulator_bulk_register_supply_alias(struct device *dev,
return 0;
}
-static inline void devm_regulator_bulk_unregister_supply_alias(
- struct device *dev, const char *const *id, int num_id)
-{
-}
-
static inline int regulator_enable(struct regulator *regulator)
{
return 0;
@@ -452,12 +449,27 @@ static inline int devm_regulator_bulk_get(struct device *dev, int num_consumers,
return 0;
}
+static inline int devm_regulator_bulk_get_const(
+ struct device *dev, int num_consumers,
+ const struct regulator_bulk_data *in_consumers,
+ struct regulator_bulk_data **out_consumers)
+{
+ return 0;
+}
+
static inline int regulator_bulk_enable(int num_consumers,
struct regulator_bulk_data *consumers)
{
return 0;
}
+static inline int devm_regulator_bulk_get_enable(struct device *dev,
+ int num_consumers,
+ const char * const *id)
+{
+ return 0;
+}
+
static inline int regulator_bulk_disable(int num_consumers,
struct regulator_bulk_data *consumers)
{
@@ -519,6 +531,22 @@ static inline int regulator_get_current_limit(struct regulator *regulator)
return 0;
}
+static inline int regulator_get_unclaimed_power_budget(struct regulator *regulator)
+{
+ return INT_MAX;
+}
+
+static inline int regulator_request_power_budget(struct regulator *regulator,
+ unsigned int pw_req)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void regulator_free_power_budget(struct regulator *regulator,
+ unsigned int pw)
+{
+}
+
static inline int regulator_set_mode(struct regulator *regulator,
unsigned int mode)
{
@@ -565,6 +593,12 @@ static inline int regulator_list_hardware_vsel(struct regulator *regulator,
return -EOPNOTSUPP;
}
+static inline int regulator_hardware_enable(struct regulator *regulator,
+ bool enable)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int regulator_register_notifier(struct regulator *regulator,
struct notifier_block *nb)
{
@@ -642,6 +676,44 @@ regulator_is_equal(struct regulator *reg1, struct regulator *reg2)
}
#endif
+#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_REGULATOR)
+struct regulator *__must_check of_regulator_get(struct device *dev,
+ struct device_node *node,
+ const char *id);
+struct regulator *__must_check devm_of_regulator_get(struct device *dev,
+ struct device_node *node,
+ const char *id);
+struct regulator *__must_check of_regulator_get_optional(struct device *dev,
+ struct device_node *node,
+ const char *id);
+struct regulator *__must_check devm_of_regulator_get_optional(struct device *dev,
+ struct device_node *node,
+ const char *id);
+int __must_check of_regulator_bulk_get_all(struct device *dev, struct device_node *np,
+ struct regulator_bulk_data **consumers);
+#else
+static inline struct regulator *__must_check of_regulator_get_optional(struct device *dev,
+ struct device_node *node,
+ const char *id)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct regulator *__must_check devm_of_regulator_get_optional(struct device *dev,
+ struct device_node *node,
+ const char *id)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int of_regulator_bulk_get_all(struct device *dev, struct device_node *np,
+ struct regulator_bulk_data **consumers)
+{
+ return 0;
+}
+
+#endif
+
static inline int regulator_set_voltage_triplet(struct regulator *regulator,
int min_uV, int target_uV,
int max_uV)
diff --git a/include/linux/regulator/coupler.h b/include/linux/regulator/coupler.h
index 5f86824bd117..5e314a4294fb 100644
--- a/include/linux/regulator/coupler.h
+++ b/include/linux/regulator/coupler.h
@@ -8,7 +8,8 @@
#ifndef __LINUX_REGULATOR_COUPLER_H_
#define __LINUX_REGULATOR_COUPLER_H_
-#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
#include <linux/suspend.h>
struct regulator_coupler;
@@ -52,7 +53,6 @@ struct regulator_coupler {
#ifdef CONFIG_REGULATOR
int regulator_coupler_register(struct regulator_coupler *coupler);
-const char *rdev_get_name(struct regulator_dev *rdev);
int regulator_check_consumers(struct regulator_dev *rdev,
int *min_uV, int *max_uV,
suspend_state_t state);
@@ -69,10 +69,6 @@ static inline int regulator_coupler_register(struct regulator_coupler *coupler)
{
return 0;
}
-static inline const char *rdev_get_name(struct regulator_dev *rdev)
-{
- return NULL;
-}
static inline int regulator_check_consumers(struct regulator_dev *rdev,
int *min_uV, int *max_uV,
suspend_state_t state)
diff --git a/include/linux/regulator/db8500-prcmu.h b/include/linux/regulator/db8500-prcmu.h
index f90df9ee703e..d58ff273157e 100644
--- a/include/linux/regulator/db8500-prcmu.h
+++ b/include/linux/regulator/db8500-prcmu.h
@@ -35,10 +35,4 @@ enum db8500_regulator_id {
DB8500_NUM_REGULATORS
};
-/*
- * Exported interface for CPUIdle only. This function is called with all
- * interrupts turned off.
- */
-int power_state_active_is_enabled(void);
-
#endif
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 4ea520c248e9..978cf593b662 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -40,14 +40,18 @@ enum regulator_status {
REGULATOR_STATUS_UNDEFINED,
};
+enum regulator_detection_severity {
+ /* Hardware shut down voltage outputs if condition is detected */
+ REGULATOR_SEVERITY_PROT,
+ /* Hardware is probably damaged/inoperable */
+ REGULATOR_SEVERITY_ERR,
+ /* Hardware is still recoverable but recovery action must be taken */
+ REGULATOR_SEVERITY_WARN,
+};
+
/* Initialize struct linear_range for regulators */
#define REGULATOR_LINEAR_RANGE(_min_uV, _min_sel, _max_sel, _step_uV) \
-{ \
- .min = _min_uV, \
- .min_sel = _min_sel, \
- .max_sel = _max_sel, \
- .step = _step_uV, \
-}
+ LINEAR_RANGE(_min_uV, _min_sel, _max_sel, _step_uV)
/**
* struct regulator_ops - regulator operations.
@@ -78,8 +82,31 @@ enum regulator_status {
* @get_current_limit: Get the configured limit for a current-limited regulator.
* @set_input_current_limit: Configure an input limit.
*
- * @set_over_current_protection: Support capability of automatically shutting
- * down when detecting an over current event.
+ * @set_over_current_protection: Support enabling of and setting limits for over
+ * current situation detection. Detection can be configured for three
+ * levels of severity.
+ *
+ * - REGULATOR_SEVERITY_PROT should automatically shut down the regulator(s).
+ *
+ * - REGULATOR_SEVERITY_ERR should indicate that over-current situation is
+ * caused by an unrecoverable error but HW does not perform
+ * automatic shut down.
+ *
+ * - REGULATOR_SEVERITY_WARN should indicate situation where hardware is
+ * still believed to not be damaged but that a board sepcific
+ * recovery action is needed. If lim_uA is 0 the limit should not
+ * be changed but the detection should just be enabled/disabled as
+ * is requested.
+ *
+ * @set_over_voltage_protection: Support enabling of and setting limits for over
+ * voltage situation detection. Detection can be configured for same
+ * severities as over current protection. Units of uV.
+ * @set_under_voltage_protection: Support enabling of and setting limits for
+ * under voltage situation detection. Detection can be configured for same
+ * severities as over current protection. Units of uV.
+ * @set_thermal_protection: Support enabling of and setting limits for over
+ * temperature situation detection.Detection can be configured for same
+ * severities as over current protection. Units of degree Kelvin.
*
* @set_active_discharge: Set active discharge enable/disable of regulators.
*
@@ -143,8 +170,15 @@ struct regulator_ops {
int (*get_current_limit) (struct regulator_dev *);
int (*set_input_current_limit) (struct regulator_dev *, int lim_uA);
- int (*set_over_current_protection) (struct regulator_dev *);
- int (*set_active_discharge) (struct regulator_dev *, bool enable);
+ int (*set_over_current_protection)(struct regulator_dev *, int lim_uA,
+ int severity, bool enable);
+ int (*set_over_voltage_protection)(struct regulator_dev *, int lim_uV,
+ int severity, bool enable);
+ int (*set_under_voltage_protection)(struct regulator_dev *, int lim_uV,
+ int severity, bool enable);
+ int (*set_thermal_protection)(struct regulator_dev *, int lim,
+ int severity, bool enable);
+ int (*set_active_discharge)(struct regulator_dev *, bool enable);
/* enable/disable regulator */
int (*enable) (struct regulator_dev *);
@@ -235,6 +269,11 @@ enum regulator_type {
* config but it cannot store it for later usage.
* Callback should return 0 on success or negative ERRNO
* indicating failure.
+ * @init_cb: Optional callback called after the parsing of init_data.
+ * Allows the regulator to perform runtime init if necessary,
+ * such as synching the regulator and the parsed constraints.
+ * Callback should return 0 on success or negative ERRNO
+ * indicating failure.
* @id: Numerical identifier for the regulator.
* @ops: Regulator operations table.
* @irq: Interrupt number for the regulator.
@@ -253,17 +292,20 @@ enum regulator_type {
* @ramp_delay: Time to settle down after voltage change (unit: uV/us)
* @min_dropout_uV: The minimum dropout voltage this regulator can handle
* @linear_ranges: A constant table of possible voltage ranges.
- * @linear_range_selectors: A constant table of voltage range selectors.
- * If pickable ranges are used each range must
- * have corresponding selector here.
+ * @linear_range_selectors_bitfield: A constant table of voltage range
+ * selectors as bitfield values. If
+ * pickable ranges are used each range
+ * must have corresponding selector here.
* @n_linear_ranges: Number of entries in the @linear_ranges (and in
- * linear_range_selectors if used) table(s).
+ * linear_range_selectors_bitfield if used) table(s).
* @volt_table: Voltage mapping table (if table based mapping)
* @curr_table: Current limit mapping table (if table based mapping)
*
* @vsel_range_reg: Register for range selector when using pickable ranges
* and ``regulator_map_*_voltage_*_pickable`` functions.
* @vsel_range_mask: Mask for register bitfield used for range selector
+ * @range_applied_by_vsel: A flag to indicate that changes to vsel_range_reg
+ * are only effective after vsel_reg is written
* @vsel_reg: Register for selector when using ``regulator_map_*_voltage_*``
* @vsel_mask: Mask for register bitfield used for selector
* @vsel_step: Specify the resolution of selector stepping when setting
@@ -304,6 +346,13 @@ enum regulator_type {
* @pull_down_val_on: Enabling value for control when using regmap
* set_pull_down
*
+ * @ramp_reg: Register for controlling the regulator ramp-rate.
+ * @ramp_mask: Bitmask for the ramp-rate control register.
+ * @ramp_delay_table: Table for mapping the regulator ramp-rate values. Values
+ * should be given in units of V/S (uV/uS). See the
+ * regulator_set_ramp_delay_regmap().
+ * @n_ramp_values: number of elements at @ramp_delay_table.
+ *
* @enable_time: Time taken for initial enable of regulator (in uS).
* @off_on_delay: guard time (in uS), before re-enabling a regulator
*
@@ -321,6 +370,8 @@ struct regulator_desc {
int (*of_parse_cb)(struct device_node *,
const struct regulator_desc *,
struct regulator_config *);
+ int (*init_cb)(struct regulator_dev *,
+ struct regulator_config *);
int id;
unsigned int continuous_voltage_range:1;
unsigned n_voltages;
@@ -338,7 +389,7 @@ struct regulator_desc {
int min_dropout_uV;
const struct linear_range *linear_ranges;
- const unsigned int *linear_range_selectors;
+ const unsigned int *linear_range_selectors_bitfield;
int n_linear_ranges;
@@ -347,6 +398,7 @@ struct regulator_desc {
unsigned int vsel_range_reg;
unsigned int vsel_range_mask;
+ bool range_applied_by_vsel;
unsigned int vsel_reg;
unsigned int vsel_mask;
unsigned int vsel_step;
@@ -413,6 +465,130 @@ struct regulator_config {
struct gpio_desc *ena_gpiod;
};
+/**
+ * struct regulator_err_state - regulator error/notification status
+ *
+ * @rdev: Regulator which status the struct indicates.
+ * @notifs: Events which have occurred on the regulator.
+ * @errors: Errors which are active on the regulator.
+ * @possible_errs: Errors which can be signaled (by given IRQ).
+ */
+struct regulator_err_state {
+ struct regulator_dev *rdev;
+ unsigned long notifs;
+ unsigned long errors;
+ int possible_errs;
+};
+
+/**
+ * struct regulator_irq_data - regulator error/notification status data
+ *
+ * @states: Status structs for each of the associated regulators.
+ * @num_states: Amount of associated regulators.
+ * @data: Driver data pointer given at regulator_irq_desc.
+ * @opaque: Value storage for IC driver. Core does not update this. ICs
+ * may want to store status register value here at map_event and
+ * compare contents at 'renable' callback to see if new problems
+ * have been added to status. If that is the case it may be
+ * desirable to return REGULATOR_ERROR_CLEARED and not
+ * REGULATOR_ERROR_ON to allow IRQ fire again and to generate
+ * notifications also for the new issues.
+ *
+ * This structure is passed to 'map_event' and 'renable' callbacks for
+ * reporting regulator status to core.
+ */
+struct regulator_irq_data {
+ struct regulator_err_state *states;
+ int num_states;
+ void *data;
+ long opaque;
+};
+
+/**
+ * struct regulator_irq_desc - notification sender for IRQ based events.
+ *
+ * @name: The visible name for the IRQ
+ * @fatal_cnt: If this IRQ is used to signal HW damaging condition it may be
+ * best to shut-down regulator(s) or reboot the SOC if error
+ * handling is repeatedly failing. If fatal_cnt is given the IRQ
+ * handling is aborted if it fails for fatal_cnt times and die()
+ * callback (if populated) is called. If die() is not populated
+ * poweroff for the system is attempted in order to prevent any
+ * further damage.
+ * @reread_ms: The time which is waited before attempting to re-read status
+ * at the worker if IC reading fails. Immediate re-read is done
+ * if time is not specified.
+ * @irq_off_ms: The time which IRQ is kept disabled before re-evaluating the
+ * status for devices which keep IRQ disabled for duration of the
+ * error. If this is not given the IRQ is left enabled and renable
+ * is not called.
+ * @skip_off: If set to true the IRQ handler will attempt to check if any of
+ * the associated regulators are enabled prior to taking other
+ * actions. If no regulators are enabled and this is set to true
+ * a spurious IRQ is assumed and IRQ_NONE is returned.
+ * @high_prio: Boolean to indicate that high priority WQ should be used.
+ * @data: Driver private data pointer which will be passed as such to
+ * the renable, map_event and die callbacks in regulator_irq_data.
+ * @die: Protection callback. If IC status reading or recovery actions
+ * fail fatal_cnt times this callback is called or system is
+ * powered off. This callback should implement a final protection
+ * attempt like disabling the regulator. If protection succeeded
+ * die() may return 0. If anything else is returned the core
+ * assumes final protection failed and attempts to perform a
+ * poweroff as a last resort.
+ * @map_event: Driver callback to map IRQ status into regulator devices with
+ * events / errors. NOTE: callback MUST initialize both the
+ * errors and notifs for all rdevs which it signals having
+ * active events as core does not clean the map data.
+ * REGULATOR_FAILED_RETRY can be returned to indicate that the
+ * status reading from IC failed. If this is repeated for
+ * fatal_cnt times the core will call die() callback or power-off
+ * the system as a last resort to protect the HW.
+ * @renable: Optional callback to check status (if HW supports that) before
+ * re-enabling IRQ. If implemented this should clear the error
+ * flags so that errors fetched by regulator_get_error_flags()
+ * are updated. If callback is not implemented then errors are
+ * assumed to be cleared and IRQ is re-enabled.
+ * REGULATOR_FAILED_RETRY can be returned to
+ * indicate that the status reading from IC failed. If this is
+ * repeated for 'fatal_cnt' times the core will call die()
+ * callback or if die() is not populated then attempt to power-off
+ * the system as a last resort to protect the HW.
+ * Returning zero indicates that the problem in HW has been solved
+ * and IRQ will be re-enabled. Returning REGULATOR_ERROR_ON
+ * indicates the error condition is still active and keeps IRQ
+ * disabled. Please note that returning REGULATOR_ERROR_ON does
+ * not retrigger evaluating what events are active or resending
+ * notifications. If this is needed you probably want to return
+ * zero and allow IRQ to retrigger causing events to be
+ * re-evaluated and re-sent.
+ *
+ * This structure is used for registering regulator IRQ notification helper.
+ */
+struct regulator_irq_desc {
+ const char *name;
+ int fatal_cnt;
+ int reread_ms;
+ int irq_off_ms;
+ bool skip_off;
+ bool high_prio;
+ void *data;
+
+ int (*die)(struct regulator_irq_data *rid);
+ int (*map_event)(int irq, struct regulator_irq_data *rid,
+ unsigned long *dev_mask);
+ int (*renable)(struct regulator_irq_data *rid);
+};
+
+/*
+ * Return values for regulator IRQ helpers.
+ */
+enum {
+ REGULATOR_ERROR_CLEARED,
+ REGULATOR_FAILED_RETRY,
+ REGULATOR_ERROR_ON,
+};
+
/*
* struct coupling_desc
*
@@ -477,20 +653,74 @@ struct regulator_dev {
/* time when this regulator was disabled last time */
ktime_t last_off;
+ int cached_err;
+ bool use_cached_err;
+ spinlock_t err_lock;
+
+ int pw_requested_mW;
+
+ /* regulator notification forwarding */
+ struct notifier_block supply_fwd_nb;
};
+/*
+ * Convert error flags to corresponding notifications.
+ *
+ * Can be used by drivers which use the notification helpers to
+ * find out correct notification flags based on the error flags. Drivers
+ * can avoid storing both supported notification and error flags which
+ * may save few bytes.
+ */
+static inline int regulator_err2notif(int err)
+{
+ switch (err) {
+ case REGULATOR_ERROR_UNDER_VOLTAGE:
+ return REGULATOR_EVENT_UNDER_VOLTAGE;
+ case REGULATOR_ERROR_OVER_CURRENT:
+ return REGULATOR_EVENT_OVER_CURRENT;
+ case REGULATOR_ERROR_REGULATION_OUT:
+ return REGULATOR_EVENT_REGULATION_OUT;
+ case REGULATOR_ERROR_FAIL:
+ return REGULATOR_EVENT_FAIL;
+ case REGULATOR_ERROR_OVER_TEMP:
+ return REGULATOR_EVENT_OVER_TEMP;
+ case REGULATOR_ERROR_UNDER_VOLTAGE_WARN:
+ return REGULATOR_EVENT_UNDER_VOLTAGE_WARN;
+ case REGULATOR_ERROR_OVER_CURRENT_WARN:
+ return REGULATOR_EVENT_OVER_CURRENT_WARN;
+ case REGULATOR_ERROR_OVER_VOLTAGE_WARN:
+ return REGULATOR_EVENT_OVER_VOLTAGE_WARN;
+ case REGULATOR_ERROR_OVER_TEMP_WARN:
+ return REGULATOR_EVENT_OVER_TEMP_WARN;
+ }
+ return 0;
+}
+
+
struct regulator_dev *
-regulator_register(const struct regulator_desc *regulator_desc,
+regulator_register(struct device *dev,
+ const struct regulator_desc *regulator_desc,
const struct regulator_config *config);
struct regulator_dev *
devm_regulator_register(struct device *dev,
const struct regulator_desc *regulator_desc,
const struct regulator_config *config);
void regulator_unregister(struct regulator_dev *rdev);
-void devm_regulator_unregister(struct device *dev, struct regulator_dev *rdev);
int regulator_notifier_call_chain(struct regulator_dev *rdev,
unsigned long event, void *data);
+void *devm_regulator_irq_helper(struct device *dev,
+ const struct regulator_irq_desc *d, int irq,
+ int irq_flags, int common_errs,
+ int *per_rdev_errs, struct regulator_dev **rdev,
+ int rdev_amount);
+void *regulator_irq_helper(struct device *dev,
+ const struct regulator_irq_desc *d, int irq,
+ int irq_flags, int common_errs, int *per_rdev_errs,
+ struct regulator_dev **rdev, int rdev_amount);
+void regulator_irq_helper_cancel(void **handle);
+int regulator_irq_map_event_simple(int irq, struct regulator_irq_data *rid,
+ unsigned long *dev_mask);
void *rdev_get_drvdata(struct regulator_dev *rdev);
struct device *rdev_get_dev(struct regulator_dev *rdev);
@@ -539,7 +769,10 @@ int regulator_set_current_limit_regmap(struct regulator_dev *rdev,
int min_uA, int max_uA);
int regulator_get_current_limit_regmap(struct regulator_dev *rdev);
void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data);
+int regulator_find_closest_bigger(unsigned int target, const unsigned int *table,
+ unsigned int num_sel, unsigned int *sel);
int regulator_set_ramp_delay_regmap(struct regulator_dev *rdev, int ramp_delay);
+int regulator_sync_voltage_rdev(struct regulator_dev *rdev);
/*
* Helper functions intended to be used by regulator drivers prior registering
@@ -550,4 +783,14 @@ int regulator_desc_list_voltage_linear_range(const struct regulator_desc *desc,
int regulator_desc_list_voltage_linear(const struct regulator_desc *desc,
unsigned int selector);
+
+#ifdef CONFIG_REGULATOR
+const char *rdev_get_name(struct regulator_dev *rdev);
+#else
+static inline const char *rdev_get_name(struct regulator_dev *rdev)
+{
+ return NULL;
+}
+#endif
+
#endif
diff --git a/include/linux/regulator/gpio-regulator.h b/include/linux/regulator/gpio-regulator.h
index fdeb312cdabd..c223e50ff9f7 100644
--- a/include/linux/regulator/gpio-regulator.h
+++ b/include/linux/regulator/gpio-regulator.h
@@ -42,6 +42,7 @@ struct gpio_regulator_state {
/**
* struct gpio_regulator_config - config structure
* @supply_name: Name of the regulator supply
+ * @input_supply: Name of the input regulator supply
* @enabled_at_boot: Whether regulator has been enabled at
* boot or not. 1 = Yes, 0 = No
* This is used to keep the regulator at
@@ -62,6 +63,7 @@ struct gpio_regulator_state {
*/
struct gpio_regulator_config {
const char *supply_name;
+ const char *input_supply;
unsigned enabled_at_boot:1;
unsigned startup_delay;
diff --git a/include/linux/regulator/lp872x.h b/include/linux/regulator/lp872x.h
index d780dbb8b423..b62e45aa1dd3 100644
--- a/include/linux/regulator/lp872x.h
+++ b/include/linux/regulator/lp872x.h
@@ -10,7 +10,7 @@
#include <linux/regulator/machine.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#define LP872X_MAX_REGULATORS 9
@@ -40,11 +40,6 @@ enum lp872x_regulator_id {
LP872X_ID_MAX,
};
-enum lp872x_dvs_state {
- DVS_LOW = GPIOF_OUT_INIT_LOW,
- DVS_HIGH = GPIOF_OUT_INIT_HIGH,
-};
-
enum lp872x_dvs_sel {
SEL_V1,
SEL_V2,
@@ -52,14 +47,14 @@ enum lp872x_dvs_sel {
/**
* lp872x_dvs
- * @gpio : gpio pin number for dvs control
+ * @gpio : gpio descriptor for dvs control
* @vsel : dvs selector for buck v1 or buck v2 register
* @init_state : initial dvs pin state
*/
struct lp872x_dvs {
- int gpio;
+ struct gpio_desc *gpio;
enum lp872x_dvs_sel vsel;
- enum lp872x_dvs_state init_state;
+ enum gpiod_flags init_state;
};
/**
@@ -78,14 +73,14 @@ struct lp872x_regulator_data {
* @update_config : if LP872X_GENERAL_CFG register is updated, set true
* @regulator_data : platform regulator id and init data
* @dvs : dvs data for buck voltage control
- * @enable_gpio : gpio pin number for enable control
+ * @enable_gpio : gpio descriptor for enable control
*/
struct lp872x_platform_data {
u8 general_config;
bool update_config;
struct lp872x_regulator_data regulator_data[LP872X_MAX_REGULATORS];
struct lp872x_dvs *dvs;
- int enable_gpio;
+ struct gpio_desc *enable_gpio;
};
#endif
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 8a56f033b6cd..1fc440c5c4c7 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -49,6 +49,13 @@ struct regulator;
#define DISABLE_IN_SUSPEND 1
#define ENABLE_IN_SUSPEND 2
+/*
+ * Default time window (in milliseconds) following a critical under-voltage
+ * event during which less critical actions can be safely carried out by the
+ * system.
+ */
+#define REGULATOR_DEF_UV_LESS_CRITICAL_WINDOW_MS 10
+
/* Regulator active discharge flags */
enum regulator_active_discharge {
REGULATOR_ACTIVE_DISCHARGE_DEFAULT,
@@ -83,6 +90,14 @@ struct regulator_state {
bool changeable;
};
+#define REGULATOR_NOTIF_LIMIT_DISABLE -1
+#define REGULATOR_NOTIF_LIMIT_ENABLE -2
+struct notification_limit {
+ int prot;
+ int err;
+ int warn;
+};
+
/**
* struct regulation_constraints - regulator operating constraints.
*
@@ -98,8 +113,14 @@ struct regulator_state {
* @min_uA: Smallest current consumers may set.
* @max_uA: Largest current consumers may set.
* @ilim_uA: Maximum input current.
+ * @pw_budget_mW: Power budget for the regulator in mW.
* @system_load: Load that isn't captured by any consumer requests.
*
+ * @over_curr_limits: Limits for acting on over current.
+ * @over_voltage_limits: Limits for acting on over voltage.
+ * @under_voltage_limits: Limits for acting on under voltage.
+ * @temp_limits: Limits for acting on over temperature.
+ *
* @max_spread: Max possible spread between coupled regulators
* @max_uV_step: Max possible step change in voltage
* @valid_modes_mask: Mask of modes which may be configured by consumers.
@@ -114,8 +135,15 @@ struct regulator_state {
* @ramp_disable: Disable ramp delay when initialising or when setting voltage.
* @soft_start: Enable soft start so that voltage ramps slowly.
* @pull_down: Enable pull down when regulator is disabled.
+ * @system_critical: Set if the regulator is critical to system stability or
+ * functionality.
* @over_current_protection: Auto disable on over current event.
*
+ * @over_current_detection: Configure over current limits.
+ * @over_voltage_detection: Configure over voltage limits.
+ * @under_voltage_detection: Configure under voltage limits.
+ * @over_temp_detection: Configure over temperature limits.
+ *
* @input_uV: Input voltage for regulator when supplied by another regulator.
*
* @state_disk: State for regulator when system is suspended in disk mode.
@@ -135,6 +163,13 @@ struct regulator_state {
* regulator_active_discharge values are used for
* initialisation.
* @enable_time: Turn-on time of the rails (unit: microseconds)
+ * @uv_less_critical_window_ms: Specifies the time window (in milliseconds)
+ * following a critical under-voltage (UV) event
+ * during which less critical actions can be
+ * safely carried out by the system (for example
+ * logging). After this time window more critical
+ * actions should be done (for example prevent
+ * HW damage).
*/
struct regulation_constraints {
@@ -151,6 +186,7 @@ struct regulation_constraints {
int max_uA;
int ilim_uA;
+ int pw_budget_mW;
int system_load;
/* used for coupled regulators */
@@ -172,6 +208,10 @@ struct regulation_constraints {
struct regulator_state state_disk;
struct regulator_state state_mem;
struct regulator_state state_standby;
+ struct notification_limit over_curr_limits;
+ struct notification_limit over_voltage_limits;
+ struct notification_limit under_voltage_limits;
+ struct notification_limit temp_limits;
suspend_state_t initial_state; /* suspend state to set at init */
/* mode to set on startup */
@@ -182,6 +222,7 @@ struct regulation_constraints {
unsigned int settling_time_up;
unsigned int settling_time_down;
unsigned int enable_time;
+ unsigned int uv_less_critical_window_ms;
unsigned int active_discharge;
@@ -192,7 +233,12 @@ struct regulation_constraints {
unsigned ramp_disable:1; /* disable ramp delay */
unsigned soft_start:1; /* ramp voltage slowly */
unsigned pull_down:1; /* pull down resistor when regulator off */
+ unsigned system_critical:1; /* critical to system stability */
unsigned over_current_protection:1; /* auto disable on over current */
+ unsigned over_current_detection:1; /* notify on over current */
+ unsigned over_voltage_detection:1; /* notify on over voltage */
+ unsigned under_voltage_detection:1; /* notify on under voltage */
+ unsigned over_temp_detection:1; /* notify on over temperature */
};
/**
@@ -229,8 +275,6 @@ struct regulator_consumer_supply {
* be usable.
* @num_consumer_supplies: Number of consumer device supplies.
* @consumer_supplies: Consumer device supply configuration.
- *
- * @regulator_init: Callback invoked when the regulator has been registered.
* @driver_data: Data passed to regulator_init.
*/
struct regulator_init_data {
@@ -241,8 +285,7 @@ struct regulator_init_data {
int num_consumer_supplies;
struct regulator_consumer_supply *consumer_supplies;
- /* optional regulator machine specific init */
- int (*regulator_init)(void *driver_data);
+ /* optional regulator machine specific data */
void *driver_data; /* core does not touch this */
};
diff --git a/include/linux/regulator/max8952.h b/include/linux/regulator/max8952.h
index 8712c091abf0..61dcd8e00a2f 100644
--- a/include/linux/regulator/max8952.h
+++ b/include/linux/regulator/max8952.h
@@ -2,7 +2,7 @@
/*
* max8952.h - Voltage regulation for the Maxim 8952
*
- * Copyright (C) 2010 Samsung Electrnoics
+ * Copyright (C) 2010 Samsung Electronics
* MyungJoo Ham <myungjoo.ham@samsung.com>
*/
diff --git a/include/linux/regulator/max8973-regulator.h b/include/linux/regulator/max8973-regulator.h
index 8313e7ed6aec..a225e9eeb30d 100644
--- a/include/linux/regulator/max8973-regulator.h
+++ b/include/linux/regulator/max8973-regulator.h
@@ -48,10 +48,6 @@
* control signal from EN input pin. If it is false then
* voltage output will be enabled/disabled through EN bit of
* device register.
- * @enable_gpio: Enable GPIO. If EN pin is controlled through GPIO from host
- * then GPIO number can be provided. If no GPIO controlled then
- * it should be -1.
- * @dvs_gpio: GPIO for dvs. It should be -1 if this is tied with fixed logic.
* @dvs_def_state: Default state of dvs. 1 if it is high else 0.
*/
struct max8973_regulator_platform_data {
@@ -59,8 +55,6 @@ struct max8973_regulator_platform_data {
unsigned long control_flags;
unsigned long junction_temp_warning;
bool enable_ext_control;
- int enable_gpio;
- int dvs_gpio;
unsigned dvs_def_state:1;
};
diff --git a/include/linux/regulator/mt6331-regulator.h b/include/linux/regulator/mt6331-regulator.h
new file mode 100644
index 000000000000..2801a9879c14
--- /dev/null
+++ b/include/linux/regulator/mt6331-regulator.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __LINUX_REGULATOR_MT6331_H
+#define __LINUX_REGULATOR_MT6331_H
+
+enum {
+ /* BUCK */
+ MT6331_ID_VDVFS11 = 0,
+ MT6331_ID_VDVFS12,
+ MT6331_ID_VDVFS13,
+ MT6331_ID_VDVFS14,
+ MT6331_ID_VCORE2,
+ MT6331_ID_VIO18,
+ /* LDO */
+ MT6331_ID_VTCXO1,
+ MT6331_ID_VTCXO2,
+ MT6331_ID_AVDD32_AUD,
+ MT6331_ID_VAUXA32,
+ MT6331_ID_VCAMA,
+ MT6331_ID_VIO28,
+ MT6331_ID_VCAM_AF,
+ MT6331_ID_VMC,
+ MT6331_ID_VMCH,
+ MT6331_ID_VEMC33,
+ MT6331_ID_VGP1,
+ MT6331_ID_VSIM1,
+ MT6331_ID_VSIM2,
+ MT6331_ID_VMIPI,
+ MT6331_ID_VIBR,
+ MT6331_ID_VGP4,
+ MT6331_ID_VCAMD,
+ MT6331_ID_VUSB10,
+ MT6331_ID_VCAM_IO,
+ MT6331_ID_VSRAM_DVFS1,
+ MT6331_ID_VGP2,
+ MT6331_ID_VGP3,
+ MT6331_ID_VRTC,
+ MT6331_ID_VDIG18,
+ MT6331_ID_VREG_MAX
+};
+
+#endif /* __LINUX_REGULATOR_MT6331_H */
diff --git a/include/linux/regulator/mt6332-regulator.h b/include/linux/regulator/mt6332-regulator.h
new file mode 100644
index 000000000000..af5e3ed31029
--- /dev/null
+++ b/include/linux/regulator/mt6332-regulator.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Collabora Ltd.
+ * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __LINUX_REGULATOR_MT6332_H
+#define __LINUX_REGULATOR_MT6332_H
+
+enum {
+ /* BUCK */
+ MT6332_ID_VDRAM = 0,
+ MT6332_ID_VDVFS2,
+ MT6332_ID_VPA,
+ MT6332_ID_VRF1,
+ MT6332_ID_VRF2,
+ MT6332_ID_VSBST,
+ /* LDO */
+ MT6332_ID_VAUXB32,
+ MT6332_ID_VBIF28,
+ MT6332_ID_VDIG18,
+ MT6332_ID_VSRAM_DVFS2,
+ MT6332_ID_VUSB33,
+ MT6332_ID_VREG_MAX
+};
+
+#endif /* __LINUX_REGULATOR_MT6332_H */
diff --git a/include/linux/regulator/mt6357-regulator.h b/include/linux/regulator/mt6357-regulator.h
new file mode 100644
index 000000000000..238b1ee77ea6
--- /dev/null
+++ b/include/linux/regulator/mt6357-regulator.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ */
+
+#ifndef __LINUX_REGULATOR_MT6357_H
+#define __LINUX_REGULATOR_MT6357_H
+
+enum {
+ /* Bucks */
+ MT6357_ID_VCORE,
+ MT6357_ID_VMODEM,
+ MT6357_ID_VPA,
+ MT6357_ID_VPROC,
+ MT6357_ID_VS1,
+
+ /* LDOs */
+ MT6357_ID_VAUX18,
+ MT6357_ID_VAUD28,
+ MT6357_ID_VCAMA,
+ MT6357_ID_VCAMD,
+ MT6357_ID_VCAMIO,
+ MT6357_ID_VCN18,
+ MT6357_ID_VCN28,
+ MT6357_ID_VCN33_BT,
+ MT6357_ID_VCN33_WIFI,
+ MT6357_ID_VDRAM,
+ MT6357_ID_VEFUSE,
+ MT6357_ID_VEMC,
+ MT6357_ID_VFE28,
+ MT6357_ID_VIBR,
+ MT6357_ID_VIO18,
+ MT6357_ID_VIO28,
+ MT6357_ID_VLDO28,
+ MT6357_ID_VMC,
+ MT6357_ID_VMCH,
+ MT6357_ID_VRF12,
+ MT6357_ID_VRF18,
+ MT6357_ID_VSIM1,
+ MT6357_ID_VSIM2,
+ MT6357_ID_VSRAM_OTHERS,
+ MT6357_ID_VSRAM_PROC,
+ MT6357_ID_VUSB33,
+ MT6357_ID_VXO22,
+
+ MT6357_ID_RG_MAX,
+};
+
+#define MT6357_MAX_REGULATOR MT6357_ID_RG_MAX
+
+#endif /* __LINUX_REGULATOR_MT6357_H */
diff --git a/include/linux/regulator/mt6358-regulator.h b/include/linux/regulator/mt6358-regulator.h
index 1cc304946d09..562386f9b80e 100644
--- a/include/linux/regulator/mt6358-regulator.h
+++ b/include/linux/regulator/mt6358-regulator.h
@@ -41,8 +41,7 @@ enum {
MT6358_ID_VIO28,
MT6358_ID_VA12,
MT6358_ID_VRF18,
- MT6358_ID_VCN33_BT,
- MT6358_ID_VCN33_WIFI,
+ MT6358_ID_VCN33,
MT6358_ID_VCAMA2,
MT6358_ID_VMC,
MT6358_ID_VLDO28,
@@ -51,6 +50,49 @@ enum {
MT6358_ID_RG_MAX,
};
+enum {
+ MT6366_ID_VDRAM1 = 0,
+ MT6366_ID_VCORE,
+ MT6366_ID_VPA,
+ MT6366_ID_VPROC11,
+ MT6366_ID_VPROC12,
+ MT6366_ID_VGPU,
+ MT6366_ID_VS2,
+ MT6366_ID_VMODEM,
+ MT6366_ID_VS1,
+ MT6366_ID_VDRAM2,
+ MT6366_ID_VSIM1,
+ MT6366_ID_VIBR,
+ MT6366_ID_VRF12,
+ MT6366_ID_VIO18,
+ MT6366_ID_VUSB,
+ MT6366_ID_VCN18,
+ MT6366_ID_VFE28,
+ MT6366_ID_VSRAM_PROC11,
+ MT6366_ID_VCN28,
+ MT6366_ID_VSRAM_OTHERS,
+ MT6366_ID_VSRAM_GPU,
+ MT6366_ID_VXO22,
+ MT6366_ID_VEFUSE,
+ MT6366_ID_VAUX18,
+ MT6366_ID_VMCH,
+ MT6366_ID_VBIF28,
+ MT6366_ID_VSRAM_PROC12,
+ MT6366_ID_VEMC,
+ MT6366_ID_VIO28,
+ MT6366_ID_VA12,
+ MT6366_ID_VRF18,
+ MT6366_ID_VCN33,
+ MT6366_ID_VMC,
+ MT6366_ID_VAUD28,
+ MT6366_ID_VSIM2,
+ MT6366_ID_VM18,
+ MT6366_ID_VMDDR,
+ MT6366_ID_VSRAM_CORE,
+ MT6366_ID_RG_MAX,
+};
+
#define MT6358_MAX_REGULATOR MT6358_ID_RG_MAX
+#define MT6366_MAX_REGULATOR MT6366_ID_RG_MAX
#endif /* __LINUX_REGULATOR_MT6358_H */
diff --git a/include/linux/regulator/mt6359-regulator.h b/include/linux/regulator/mt6359-regulator.h
new file mode 100644
index 000000000000..6d6e5a58f482
--- /dev/null
+++ b/include/linux/regulator/mt6359-regulator.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ */
+
+#ifndef __LINUX_REGULATOR_MT6359_H
+#define __LINUX_REGULATOR_MT6359_H
+
+enum {
+ MT6359_ID_VS1 = 0,
+ MT6359_ID_VGPU11,
+ MT6359_ID_VMODEM,
+ MT6359_ID_VPU,
+ MT6359_ID_VCORE,
+ MT6359_ID_VS2,
+ MT6359_ID_VPA,
+ MT6359_ID_VPROC2,
+ MT6359_ID_VPROC1,
+ MT6359_ID_VCORE_SSHUB,
+ MT6359_ID_VGPU11_SSHUB = MT6359_ID_VCORE_SSHUB,
+ MT6359_ID_VAUD18 = 10,
+ MT6359_ID_VSIM1,
+ MT6359_ID_VIBR,
+ MT6359_ID_VRF12,
+ MT6359_ID_VUSB,
+ MT6359_ID_VSRAM_PROC2,
+ MT6359_ID_VIO18,
+ MT6359_ID_VCAMIO,
+ MT6359_ID_VCN18,
+ MT6359_ID_VFE28,
+ MT6359_ID_VCN13,
+ MT6359_ID_VCN33_1_BT,
+ MT6359_ID_VCN33_1_WIFI,
+ MT6359_ID_VAUX18,
+ MT6359_ID_VSRAM_OTHERS,
+ MT6359_ID_VEFUSE,
+ MT6359_ID_VXO22,
+ MT6359_ID_VRFCK,
+ MT6359_ID_VBIF28,
+ MT6359_ID_VIO28,
+ MT6359_ID_VEMC,
+ MT6359_ID_VCN33_2_BT,
+ MT6359_ID_VCN33_2_WIFI,
+ MT6359_ID_VA12,
+ MT6359_ID_VA09,
+ MT6359_ID_VRF18,
+ MT6359_ID_VSRAM_MD,
+ MT6359_ID_VUFS,
+ MT6359_ID_VM18,
+ MT6359_ID_VBBCK,
+ MT6359_ID_VSRAM_PROC1,
+ MT6359_ID_VSIM2,
+ MT6359_ID_VSRAM_OTHERS_SSHUB,
+ MT6359_ID_RG_MAX,
+};
+
+#define MT6359_MAX_REGULATOR MT6359_ID_RG_MAX
+
+#endif /* __LINUX_REGULATOR_MT6359_H */
diff --git a/include/linux/regulator/mt6363-regulator.h b/include/linux/regulator/mt6363-regulator.h
new file mode 100644
index 000000000000..60761f01d3ad
--- /dev/null
+++ b/include/linux/regulator/mt6363-regulator.h
@@ -0,0 +1,330 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 MediaTek Inc.
+ * Copyright (c) 2025 Collabora Ltd
+ */
+
+#include <linux/bits.h>
+
+#ifndef __LINUX_REGULATOR_MT6363_H
+#define __LINUX_REGULATOR_MT6363_H
+
+/* Register */
+#define MT6363_TOP_TRAP 0x6
+#define MT6363_TOP_TMA_KEY_L 0x36e
+#define MT6363_RG_BUCK0_EN_ADDR 0x210
+#define MT6363_RG_BUCK_VS2_EN_BIT 0
+#define MT6363_RG_BUCK_VBUCK1_EN_BIT 1
+#define MT6363_RG_BUCK_VBUCK2_EN_BIT 2
+#define MT6363_RG_BUCK_VBUCK3_EN_BIT 3
+#define MT6363_RG_BUCK_VBUCK4_EN_BIT 4
+#define MT6363_RG_BUCK_VBUCK5_EN_BIT 5
+#define MT6363_RG_BUCK_VBUCK6_EN_BIT 6
+#define MT6363_RG_BUCK_VBUCK7_EN_BIT 7
+#define MT6363_RG_BUCK1_EN_ADDR 0x213
+#define MT6363_RG_BUCK_VS1_EN_BIT 0
+#define MT6363_RG_BUCK_VS3_EN_BIT 1
+#define MT6363_RG_LDO_VSRAM_DIGRF_EN_BIT 4
+#define MT6363_RG_LDO_VSRAM_MDFE_EN_BIT 5
+#define MT6363_RG_LDO_VSRAM_MODEM_EN_BIT 6
+#define MT6363_RG_BUCK0_LP_ADDR 0x216
+#define MT6363_RG_BUCK_VS2_LP_BIT 0
+#define MT6363_RG_BUCK_VBUCK1_LP_BIT 1
+#define MT6363_RG_BUCK_VBUCK2_LP_BIT 2
+#define MT6363_RG_BUCK_VBUCK3_LP_BIT 3
+#define MT6363_RG_BUCK_VBUCK4_LP_BIT 4
+#define MT6363_RG_BUCK_VBUCK5_LP_BIT 5
+#define MT6363_RG_BUCK_VBUCK6_LP_BIT 6
+#define MT6363_RG_BUCK_VBUCK7_LP_BIT 7
+#define MT6363_RG_BUCK1_LP_ADDR 0x219
+#define MT6363_RG_BUCK_VS1_LP_BIT 0
+#define MT6363_RG_BUCK_VS3_LP_BIT 1
+#define MT6363_RG_LDO_VSRAM_DIGRF_LP_BIT 4
+#define MT6363_RG_LDO_VSRAM_MDFE_LP_BIT 5
+#define MT6363_RG_LDO_VSRAM_MODEM_LP_BIT 6
+#define MT6363_RG_BUCK_VS2_VOSEL_ADDR 0x21c
+#define MT6363_RG_BUCK_VS2_VOSEL_MASK GENMASK(7, 0)
+#define MT6363_RG_BUCK_VBUCK1_VOSEL_ADDR 0x21d
+#define MT6363_RG_BUCK_VBUCK1_VOSEL_MASK GENMASK(7, 0)
+#define MT6363_RG_BUCK_VBUCK2_VOSEL_ADDR 0x21e
+#define MT6363_RG_BUCK_VBUCK2_VOSEL_MASK GENMASK(7, 0)
+#define MT6363_RG_BUCK_VBUCK3_VOSEL_ADDR 0x21f
+#define MT6363_RG_BUCK_VBUCK3_VOSEL_MASK GENMASK(7, 0)
+#define MT6363_RG_BUCK_VBUCK4_VOSEL_ADDR 0x220
+#define MT6363_RG_BUCK_VBUCK4_VOSEL_MASK GENMASK(7, 0)
+#define MT6363_RG_BUCK_VBUCK5_VOSEL_ADDR 0x221
+#define MT6363_RG_BUCK_VBUCK5_VOSEL_MASK GENMASK(7, 0)
+#define MT6363_RG_BUCK_VBUCK6_VOSEL_ADDR 0x222
+#define MT6363_RG_BUCK_VBUCK6_VOSEL_MASK GENMASK(7, 0)
+#define MT6363_RG_BUCK_VBUCK7_VOSEL_ADDR 0x223
+#define MT6363_RG_BUCK_VBUCK7_VOSEL_MASK GENMASK(7, 0)
+#define MT6363_RG_BUCK_VS1_VOSEL_ADDR 0x224
+#define MT6363_RG_BUCK_VS1_VOSEL_MASK GENMASK(7, 0)
+#define MT6363_RG_BUCK_VS3_VOSEL_ADDR 0x225
+#define MT6363_RG_BUCK_VS3_VOSEL_MASK GENMASK(7, 0)
+#define MT6363_RG_LDO_VSRAM_DIGRF_VOSEL_ADDR 0x228
+#define MT6363_RG_LDO_VSRAM_DIGRF_VOSEL_MASK GENMASK(6, 0)
+#define MT6363_RG_LDO_VSRAM_MDFE_VOSEL_ADDR 0x229
+#define MT6363_RG_LDO_VSRAM_MDFE_VOSEL_MASK GENMASK(6, 0)
+#define MT6363_RG_LDO_VSRAM_MODEM_VOSEL_ADDR 0x22a
+#define MT6363_RG_LDO_VSRAM_MODEM_VOSEL_MASK GENMASK(6, 0)
+#define MT6363_BUCK_TOP_KEY_PROT_LO 0x13fa
+#define MT6363_BUCK_VS2_WDTDBG_VOSEL_ADDR 0x13fc
+#define MT6363_BUCK_VBUCK1_WDTDBG_VOSEL_ADDR 0x13fd
+#define MT6363_BUCK_VBUCK2_WDTDBG_VOSEL_ADDR 0x13fe
+#define MT6363_BUCK_VBUCK3_WDTDBG_VOSEL_ADDR 0x13ff
+#define MT6363_BUCK_VBUCK4_WDTDBG_VOSEL_ADDR 0x1400
+#define MT6363_BUCK_VBUCK5_WDTDBG_VOSEL_ADDR 0x1401
+#define MT6363_BUCK_VBUCK6_WDTDBG_VOSEL_ADDR 0x1402
+#define MT6363_BUCK_VBUCK7_WDTDBG_VOSEL_ADDR 0x1403
+#define MT6363_BUCK_VS1_WDTDBG_VOSEL_ADDR 0x1404
+#define MT6363_BUCK_VS3_WDTDBG_VOSEL_ADDR 0x1405
+#define MT6363_RG_BUCK_EFUSE_RSV1 0x1417
+#define MT6363_RG_BUCK_EFUSE_RSV1_MASK GENMASK(7, 4)
+#define MT6363_BUCK_VS2_OP_EN_0 0x145d
+#define MT6363_BUCK_VS2_HW_LP_MODE 0x1468
+#define MT6363_BUCK_VBUCK1_OP_EN_0 0x14dd
+#define MT6363_BUCK_VBUCK1_HW_LP_MODE 0x14e8
+#define MT6363_RG_BUCK_VBUCK1_SSHUB_EN_ADDR 0x14ea
+#define MT6363_RG_BUCK_VBUCK1_SSHUB_VOSEL_ADDR 0x14eb
+#define MT6363_RG_BUCK_VBUCK1_SSHUB_VOSEL_MASK GENMASK(7, 0)
+#define MT6363_BUCK_VBUCK2_OP_EN_0 0x155d
+#define MT6363_BUCK_VBUCK2_HW_LP_MODE 0x1568
+#define MT6363_RG_BUCK_VBUCK2_SSHUB_EN_ADDR 0x156a
+#define MT6363_RG_BUCK_VBUCK2_SSHUB_VOSEL_ADDR 0x156b
+#define MT6363_RG_BUCK_VBUCK2_SSHUB_VOSEL_MASK GENMASK(7, 0)
+#define MT6363_BUCK_VBUCK3_OP_EN_0 0x15dd
+#define MT6363_BUCK_VBUCK3_HW_LP_MODE 0x15e8
+#define MT6363_BUCK_VBUCK4_OP_EN_0 0x165d
+#define MT6363_BUCK_VBUCK4_HW_LP_MODE 0x1668
+#define MT6363_RG_BUCK_VBUCK4_SSHUB_EN_ADDR 0x166a
+#define MT6363_RG_BUCK_VBUCK4_SSHUB_VOSEL_ADDR 0x166b
+#define MT6363_RG_BUCK_VBUCK4_SSHUB_VOSEL_MASK GENMASK(7, 0)
+#define MT6363_BUCK_VBUCK5_OP_EN_0 0x16dd
+#define MT6363_BUCK_VBUCK5_HW_LP_MODE 0x16e8
+#define MT6363_BUCK_VBUCK6_OP_EN_0 0x175d
+#define MT6363_BUCK_VBUCK6_HW_LP_MODE 0x1768
+#define MT6363_BUCK_VBUCK7_OP_EN_0 0x17dd
+#define MT6363_BUCK_VBUCK7_HW_LP_MODE 0x17e8
+#define MT6363_BUCK_VS1_OP_EN_0 0x185d
+#define MT6363_BUCK_VS1_HW_LP_MODE 0x1868
+#define MT6363_BUCK_VS3_OP_EN_0 0x18dd
+#define MT6363_BUCK_VS3_HW_LP_MODE 0x18e8
+#define MT6363_RG_VS1_FCCM_ADDR 0x1964
+#define MT6363_RG_VS1_FCCM_BIT 0
+#define MT6363_RG_VS3_FCCM_ADDR 0x1973
+#define MT6363_RG_VS3_FCCM_BIT 0
+#define MT6363_RG_BUCK0_FCCM_ADDR 0x1a02
+#define MT6363_RG_VBUCK1_FCCM_BIT 0
+#define MT6363_RG_VBUCK2_FCCM_BIT 1
+#define MT6363_RG_VBUCK3_FCCM_BIT 2
+#define MT6363_RG_VS2_FCCM_BIT 3
+#define MT6363_RG_BUCK0_1_FCCM_ADDR 0x1a82
+#define MT6363_RG_VBUCK4_FCCM_BIT 0
+#define MT6363_RG_VBUCK5_FCCM_BIT 1
+#define MT6363_RG_VBUCK6_FCCM_BIT 2
+#define MT6363_RG_VBUCK7_FCCM_BIT 3
+#define MT6363_RG_VCN13_VOSEL_ADDR 0x1b0f
+#define MT6363_RG_VCN13_VOSEL_MASK GENMASK(3, 0)
+#define MT6363_RG_VEMC_VOSEL_ADDR 0x1b10
+#define MT6363_RG_VEMC_VOSEL_MASK GENMASK(3, 0)
+#define MT6363_RG_VEMC_VOSEL_1_MASK GENMASK(7, 4)
+#define MT6363_RG_LDO_VSRAM_CPUB_VOSEL_ADDR 0x1b14
+#define MT6363_RG_LDO_VSRAM_CPUB_VOSEL_MASK GENMASK(6, 0)
+#define MT6363_RG_LDO_VSRAM_CPUM_VOSEL_ADDR 0x1b15
+#define MT6363_RG_LDO_VSRAM_CPUM_VOSEL_MASK GENMASK(6, 0)
+#define MT6363_RG_LDO_VSRAM_CPUL_VOSEL_ADDR 0x1b16
+#define MT6363_RG_LDO_VSRAM_CPUL_VOSEL_MASK GENMASK(6, 0)
+#define MT6363_RG_LDO_VSRAM_APU_VOSEL_ADDR 0x1b17
+#define MT6363_RG_LDO_VSRAM_APU_VOSEL_MASK GENMASK(6, 0)
+#define MT6363_RG_VEMC_VOCAL_ADDR 0x1b1b
+#define MT6363_RG_VEMC_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_LDO_VCN15_ADDR 0x1b57
+#define MT6363_RG_LDO_VCN15_EN_BIT 0
+#define MT6363_RG_LDO_VCN15_LP_BIT 1
+#define MT6363_LDO_VCN15_HW_LP_MODE 0x1b5b
+#define MT6363_LDO_VCN15_OP_EN0 0x1b5c
+#define MT6363_RG_LDO_VRF09_ADDR 0x1b65
+#define MT6363_RG_LDO_VRF09_EN_BIT 0
+#define MT6363_RG_LDO_VRF09_LP_BIT 1
+#define MT6363_LDO_VRF09_HW_LP_MODE 0x1b69
+#define MT6363_LDO_VRF09_OP_EN0 0x1b6a
+#define MT6363_RG_LDO_VRF12_ADDR 0x1b73
+#define MT6363_RG_LDO_VRF12_EN_BIT 0
+#define MT6363_RG_LDO_VRF12_LP_BIT 1
+#define MT6363_LDO_VRF12_HW_LP_MODE 0x1b77
+#define MT6363_LDO_VRF12_OP_EN0 0x1b78
+#define MT6363_RG_LDO_VRF13_ADDR 0x1b81
+#define MT6363_RG_LDO_VRF13_EN_BIT 0
+#define MT6363_RG_LDO_VRF13_LP_BIT 1
+#define MT6363_LDO_VRF13_HW_LP_MODE 0x1b85
+#define MT6363_LDO_VRF13_OP_EN0 0x1b86
+#define MT6363_RG_LDO_VRF18_ADDR 0x1b8f
+#define MT6363_RG_LDO_VRF18_EN_BIT 0
+#define MT6363_RG_LDO_VRF18_LP_BIT 1
+#define MT6363_LDO_VRF18_HW_LP_MODE 0x1b93
+#define MT6363_LDO_VRF18_OP_EN0 0x1b94
+#define MT6363_RG_LDO_VRFIO18_ADDR 0x1b9d
+#define MT6363_RG_LDO_VRFIO18_EN_BIT 0
+#define MT6363_RG_LDO_VRFIO18_LP_BIT 1
+#define MT6363_LDO_VRFIO18_HW_LP_MODE 0x1ba1
+#define MT6363_LDO_VRFIO18_OP_EN0 0x1ba2
+#define MT6363_RG_LDO_VTREF18_ADDR 0x1bd7
+#define MT6363_RG_LDO_VTREF18_EN_BIT 0
+#define MT6363_RG_LDO_VTREF18_LP_BIT 1
+#define MT6363_LDO_VTREF18_HW_LP_MODE 0x1bdb
+#define MT6363_LDO_VTREF18_OP_EN0 0x1bdc
+#define MT6363_RG_LDO_VAUX18_ADDR 0x1be5
+#define MT6363_RG_LDO_VAUX18_EN_BIT 0
+#define MT6363_RG_LDO_VAUX18_LP_BIT 1
+#define MT6363_LDO_VAUX18_HW_LP_MODE 0x1be9
+#define MT6363_LDO_VAUX18_OP_EN0 0x1bea
+#define MT6363_RG_LDO_VEMC_ADDR 0x1bf3
+#define MT6363_RG_LDO_VEMC_EN_BIT 0
+#define MT6363_RG_LDO_VEMC_LP_BIT 1
+#define MT6363_LDO_VEMC_HW_LP_MODE 0x1bf7
+#define MT6363_LDO_VEMC_OP_EN0 0x1bf8
+#define MT6363_RG_LDO_VUFS12_ADDR 0x1c01
+#define MT6363_RG_LDO_VUFS12_EN_BIT 0
+#define MT6363_RG_LDO_VUFS12_LP_BIT 1
+#define MT6363_LDO_VUFS12_HW_LP_MODE 0x1c05
+#define MT6363_LDO_VUFS12_OP_EN0 0x1c06
+#define MT6363_RG_LDO_VUFS18_ADDR 0x1c0f
+#define MT6363_RG_LDO_VUFS18_EN_BIT 0
+#define MT6363_RG_LDO_VUFS18_LP_BIT 1
+#define MT6363_LDO_VUFS18_HW_LP_MODE 0x1c13
+#define MT6363_LDO_VUFS18_OP_EN0 0x1c14
+#define MT6363_RG_LDO_VIO18_ADDR 0x1c1d
+#define MT6363_RG_LDO_VIO18_EN_BIT 0
+#define MT6363_RG_LDO_VIO18_LP_BIT 1
+#define MT6363_LDO_VIO18_HW_LP_MODE 0x1c21
+#define MT6363_LDO_VIO18_OP_EN0 0x1c22
+#define MT6363_RG_LDO_VIO075_ADDR 0x1c57
+#define MT6363_RG_LDO_VIO075_EN_BIT 0
+#define MT6363_RG_LDO_VIO075_LP_BIT 1
+#define MT6363_LDO_VIO075_HW_LP_MODE 0x1c5b
+#define MT6363_LDO_VIO075_OP_EN0 0x1c5c
+#define MT6363_RG_LDO_VA12_1_ADDR 0x1c65
+#define MT6363_RG_LDO_VA12_1_EN_BIT 0
+#define MT6363_RG_LDO_VA12_1_LP_BIT 1
+#define MT6363_LDO_VA12_1_HW_LP_MODE 0x1c69
+#define MT6363_LDO_VA12_1_OP_EN0 0x1c6a
+#define MT6363_RG_LDO_VA12_2_ADDR 0x1c73
+#define MT6363_RG_LDO_VA12_2_EN_BIT 0
+#define MT6363_RG_LDO_VA12_2_LP_BIT 1
+#define MT6363_LDO_VA12_2_HW_LP_MODE 0x1c77
+#define MT6363_LDO_VA12_2_OP_EN0 0x1c78
+#define MT6363_RG_LDO_VA15_ADDR 0x1c81
+#define MT6363_RG_LDO_VA15_EN_BIT 0
+#define MT6363_RG_LDO_VA15_LP_BIT 1
+#define MT6363_LDO_VA15_HW_LP_MODE 0x1c85
+#define MT6363_LDO_VA15_OP_EN0 0x1c86
+#define MT6363_RG_LDO_VM18_ADDR 0x1c8f
+#define MT6363_RG_LDO_VM18_EN_BIT 0
+#define MT6363_RG_LDO_VM18_LP_BIT 1
+#define MT6363_LDO_VM18_HW_LP_MODE 0x1c93
+#define MT6363_LDO_VM18_OP_EN0 0x1c94
+#define MT6363_RG_LDO_VCN13_ADDR 0x1cd7
+#define MT6363_RG_LDO_VCN13_EN_BIT 0
+#define MT6363_RG_LDO_VCN13_LP_BIT 1
+#define MT6363_LDO_VCN13_HW_LP_MODE 0x1cdb
+#define MT6363_LDO_VCN13_OP_EN0 0x1ce4
+#define MT6363_LDO_VSRAM_DIGRF_HW_LP_MODE 0x1cf1
+#define MT6363_LDO_VSRAM_DIGRF_OP_EN0 0x1cfa
+#define MT6363_LDO_VSRAM_MDFE_HW_LP_MODE 0x1d5b
+#define MT6363_LDO_VSRAM_MDFE_OP_EN0 0x1d64
+#define MT6363_LDO_VSRAM_MODEM_HW_LP_MODE 0x1d76
+#define MT6363_LDO_VSRAM_MODEM_OP_EN0 0x1d7f
+#define MT6363_RG_LDO_VSRAM_CPUB_ADDR 0x1dd7
+#define MT6363_RG_LDO_VSRAM_CPUB_EN_BIT 0
+#define MT6363_RG_LDO_VSRAM_CPUB_LP_BIT 1
+#define MT6363_LDO_VSRAM_CPUB_HW_LP_MODE 0x1ddb
+#define MT6363_LDO_VSRAM_CPUB_OP_EN0 0x1de4
+#define MT6363_RG_LDO_VSRAM_CPUM_ADDR 0x1ded
+#define MT6363_RG_LDO_VSRAM_CPUM_EN_BIT 0
+#define MT6363_RG_LDO_VSRAM_CPUM_LP_BIT 1
+#define MT6363_LDO_VSRAM_CPUM_HW_LP_MODE 0x1df1
+#define MT6363_LDO_VSRAM_CPUM_OP_EN0 0x1dfa
+#define MT6363_RG_LDO_VSRAM_CPUL_ADDR 0x1e57
+#define MT6363_RG_LDO_VSRAM_CPUL_EN_BIT 0
+#define MT6363_RG_LDO_VSRAM_CPUL_LP_BIT 1
+#define MT6363_LDO_VSRAM_CPUL_HW_LP_MODE 0x1e5b
+#define MT6363_LDO_VSRAM_CPUL_OP_EN0 0x1e64
+#define MT6363_RG_LDO_VSRAM_APU_ADDR 0x1e6d
+#define MT6363_RG_LDO_VSRAM_APU_EN_BIT 0
+#define MT6363_RG_LDO_VSRAM_APU_LP_BIT 1
+#define MT6363_LDO_VSRAM_APU_HW_LP_MODE 0x1e71
+#define MT6363_LDO_VSRAM_APU_OP_EN0 0x1e7a
+#define MT6363_RG_VTREF18_VOCAL_ADDR 0x1ed8
+#define MT6363_RG_VTREF18_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_VTREF18_VOSEL_ADDR 0x1ed9
+#define MT6363_RG_VTREF18_VOSEL_MASK GENMASK(3, 0)
+#define MT6363_RG_VAUX18_VOCAL_ADDR 0x1edc
+#define MT6363_RG_VAUX18_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_VAUX18_VOSEL_ADDR 0x1edd
+#define MT6363_RG_VAUX18_VOSEL_MASK GENMASK(3, 0)
+#define MT6363_RG_VCN15_VOCAL_ADDR 0x1ee3
+#define MT6363_RG_VCN15_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_VCN15_VOSEL_ADDR 0x1ee4
+#define MT6363_RG_VCN15_VOSEL_MASK GENMASK(3, 0)
+#define MT6363_RG_VUFS18_VOCAL_ADDR 0x1ee7
+#define MT6363_RG_VUFS18_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_VUFS18_VOSEL_ADDR 0x1ee8
+#define MT6363_RG_VUFS18_VOSEL_MASK GENMASK(3, 0)
+#define MT6363_RG_VIO18_VOCAL_ADDR 0x1eeb
+#define MT6363_RG_VIO18_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_VIO18_VOSEL_ADDR 0x1eec
+#define MT6363_RG_VIO18_VOSEL_MASK GENMASK(3, 0)
+#define MT6363_RG_VM18_VOCAL_ADDR 0x1eef
+#define MT6363_RG_VM18_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_VM18_VOSEL_ADDR 0x1ef0
+#define MT6363_RG_VM18_VOSEL_MASK GENMASK(3, 0)
+#define MT6363_RG_VA15_VOCAL_ADDR 0x1ef3
+#define MT6363_RG_VA15_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_VA15_VOSEL_ADDR 0x1ef4
+#define MT6363_RG_VA15_VOSEL_MASK GENMASK(3, 0)
+#define MT6363_RG_VRF18_VOCAL_ADDR 0x1ef7
+#define MT6363_RG_VRF18_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_VRF18_VOSEL_ADDR 0x1ef8
+#define MT6363_RG_VRF18_VOSEL_MASK GENMASK(3, 0)
+#define MT6363_RG_VRFIO18_VOCAL_ADDR 0x1efb
+#define MT6363_RG_VRFIO18_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_VRFIO18_VOSEL_ADDR 0x1efc
+#define MT6363_RG_VRFIO18_VOSEL_MASK GENMASK(3, 0)
+#define MT6363_RG_VIO075_VOCFG_ADDR 0x1f01
+#define MT6363_RG_VIO075_VOCAL_ADDR MT6363_RG_VIO075_VOCFG_ADDR
+#define MT6363_RG_VIO075_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_VIO075_VOSEL_ADDR MT6363_RG_VIO075_VOCFG_ADDR
+#define MT6363_RG_VIO075_VOSEL_MASK GENMASK(6, 4)
+#define MT6363_RG_VCN13_VOCAL_ADDR 0x1f58
+#define MT6363_RG_VCN13_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_VUFS12_VOCAL_ADDR 0x1f61
+#define MT6363_RG_VUFS12_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_VUFS12_VOSEL_ADDR 0x1f62
+#define MT6363_RG_VUFS12_VOSEL_MASK GENMASK(3, 0)
+#define MT6363_RG_VA12_1_VOCAL_ADDR 0x1f65
+#define MT6363_RG_VA12_1_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_VA12_1_VOSEL_ADDR 0x1f66
+#define MT6363_RG_VA12_1_VOSEL_MASK GENMASK(3, 0)
+#define MT6363_RG_VA12_2_VOCAL_ADDR 0x1f69
+#define MT6363_RG_VA12_2_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_VA12_2_VOSEL_ADDR 0x1f6a
+#define MT6363_RG_VA12_2_VOSEL_MASK GENMASK(3, 0)
+#define MT6363_RG_VRF12_VOCAL_ADDR 0x1f6d
+#define MT6363_RG_VRF12_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_VRF12_VOSEL_ADDR 0x1f6e
+#define MT6363_RG_VRF12_VOSEL_MASK GENMASK(3, 0)
+#define MT6363_RG_VRF13_VOCAL_ADDR 0x1f71
+#define MT6363_RG_VRF13_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_VRF13_VOSEL_ADDR 0x1f72
+#define MT6363_RG_VRF13_VOSEL_MASK GENMASK(3, 0)
+#define MT6363_RG_VRF09_VOCAL_ADDR 0x1f78
+#define MT6363_RG_VRF09_VOCAL_MASK GENMASK(3, 0)
+#define MT6363_RG_VRF09_VOSEL_ADDR 0x1f79
+#define MT6363_RG_VRF09_VOSEL_MASK GENMASK(3, 0)
+#define MT6363_ISINK_EN_CTRL0 0x21db
+#define MT6363_ISINK_CTRL0_MASK GENMASK(7, 0)
+#define MT6363_ISINK_EN_CTRL1 0x21dc
+#define MT6363_ISINK_CTRL1_MASK GENMASK(7, 4)
+
+#endif /* __LINUX_REGULATOR_MT6363_H */
diff --git a/include/linux/regulator/pca9450.h b/include/linux/regulator/pca9450.h
index 71902f41c919..0df8b3c48082 100644
--- a/include/linux/regulator/pca9450.h
+++ b/include/linux/regulator/pca9450.h
@@ -9,6 +9,8 @@
enum pca9450_chip_type {
PCA9450_TYPE_PCA9450A = 0,
PCA9450_TYPE_PCA9450BC,
+ PCA9450_TYPE_PCA9451A,
+ PCA9450_TYPE_PCA9452,
PCA9450_TYPE_AMOUNT,
};
@@ -33,6 +35,8 @@ enum {
PCA9450_DVS_LEVEL_MAX,
};
+#define PCA9450_RESTART_HANDLER_PRIORITY 130
+
#define PCA9450_BUCK1_VOLTAGE_NUM 0x80
#define PCA9450_BUCK2_VOLTAGE_NUM 0x80
#define PCA9450_BUCK3_VOLTAGE_NUM 0x80
@@ -196,11 +200,11 @@ enum {
/* PCA9450_REG_LDO3_VOLT bits */
#define LDO3_EN_MASK 0xC0
-#define LDO3OUT_MASK 0x0F
+#define LDO3OUT_MASK 0x1F
/* PCA9450_REG_LDO4_VOLT bits */
#define LDO4_EN_MASK 0xC0
-#define LDO4OUT_MASK 0x0F
+#define LDO4OUT_MASK 0x1F
/* PCA9450_REG_LDO5_VOLT bits */
#define LDO5L_EN_MASK 0xC0
@@ -219,11 +223,53 @@ enum {
#define IRQ_THERM_105 0x02
#define IRQ_THERM_125 0x01
+/* PCA9450_REG_PWRCTRL bits */
+#define T_ON_DEB_MASK 0xC0
+#define T_ON_DEB_120US (0 << 6)
+#define T_ON_DEB_20MS (1 << 6)
+#define T_ON_DEB_100MS (2 << 6)
+#define T_ON_DEB_750MS (3 << 6)
+#define T_OFF_DEB_MASK 0x20
+#define T_OFF_DEB_120US (0 << 5)
+#define T_OFF_DEB_2MS (1 << 5)
+#define T_ON_STEP_MASK 0x18
+#define T_ON_STEP_1MS (0 << 3)
+#define T_ON_STEP_2MS (1 << 3)
+#define T_ON_STEP_4MS (2 << 3)
+#define T_ON_STEP_8MS (3 << 3)
+#define T_OFF_STEP_MASK 0x06
+#define T_OFF_STEP_2MS (0 << 1)
+#define T_OFF_STEP_4MS (1 << 1)
+#define T_OFF_STEP_8MS (2 << 1)
+#define T_OFF_STEP_16MS (3 << 1)
+#define T_RESTART_MASK 0x01
+#define T_RESTART_250MS 0
+#define T_RESTART_500MS 1
+
/* PCA9450_REG_RESET_CTRL bits */
#define WDOG_B_CFG_MASK 0xC0
#define WDOG_B_CFG_NONE 0x00
#define WDOG_B_CFG_WARM 0x40
#define WDOG_B_CFG_COLD_LDO12 0x80
#define WDOG_B_CFG_COLD 0xC0
+#define T_PMIC_RST_DEB_MASK 0x07
+#define T_PMIC_RST_DEB_10MS 0x00
+#define T_PMIC_RST_DEB_50MS 0x01
+#define T_PMIC_RST_DEB_100MS 0x02
+#define T_PMIC_RST_DEB_500MS 0x03
+#define T_PMIC_RST_DEB_1S 0x04
+#define T_PMIC_RST_DEB_2S 0x05
+#define T_PMIC_RST_DEB_4S 0x06
+#define T_PMIC_RST_DEB_8S 0x07
+
+/* PCA9450_REG_CONFIG2 bits */
+#define I2C_LT_MASK 0x03
+#define I2C_LT_FORCE_DISABLE 0x00
+#define I2C_LT_ON_STANDBY_RUN 0x01
+#define I2C_LT_ON_RUN 0x02
+#define I2C_LT_FORCE_ENABLE 0x03
+
+/* PCA9450_REG_SW_RST command */
+#define SW_RST_COMMAND 0x14
#endif /* __LINUX_REG_PCA9450_H__ */
diff --git a/include/linux/regulator/s2dos05.h b/include/linux/regulator/s2dos05.h
new file mode 100644
index 000000000000..2e89fcbce769
--- /dev/null
+++ b/include/linux/regulator/s2dos05.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// s2dos05.h
+//
+// Copyright (c) 2016 Samsung Electronics Co., Ltd
+// http://www.samsung.com
+// Copyright (C) 2024 Dzmitry Sankouski <dsankouski@gmail.com>
+
+#ifndef __LINUX_S2DOS05_H
+#define __LINUX_S2DOS05_H
+
+// S2DOS05 registers
+// Slave Addr : 0xC0
+enum S2DOS05_reg {
+ S2DOS05_REG_DEV_ID,
+ S2DOS05_REG_TOPSYS_STAT,
+ S2DOS05_REG_STAT,
+ S2DOS05_REG_EN,
+ S2DOS05_REG_LDO1_CFG,
+ S2DOS05_REG_LDO2_CFG,
+ S2DOS05_REG_LDO3_CFG,
+ S2DOS05_REG_LDO4_CFG,
+ S2DOS05_REG_BUCK_CFG,
+ S2DOS05_REG_BUCK_VOUT,
+ S2DOS05_REG_IRQ_MASK = 0x0D,
+ S2DOS05_REG_SSD_TSD = 0x0E,
+ S2DOS05_REG_OCL = 0x10,
+ S2DOS05_REG_IRQ = 0x11
+};
+
+// S2DOS05 regulator ids
+enum S2DOS05_regulators {
+ S2DOS05_LDO1,
+ S2DOS05_LDO2,
+ S2DOS05_LDO3,
+ S2DOS05_LDO4,
+ S2DOS05_BUCK1,
+ S2DOS05_REG_MAX,
+};
+
+#define S2DOS05_IRQ_PWRMT_MASK BIT(5)
+#define S2DOS05_IRQ_TSD_MASK BIT(4)
+#define S2DOS05_IRQ_SSD_MASK BIT(3)
+#define S2DOS05_IRQ_SCP_MASK BIT(2)
+#define S2DOS05_IRQ_UVLO_MASK BIT(1)
+#define S2DOS05_IRQ_OCD_MASK BIT(0)
+
+#define S2DOS05_BUCK_MIN1 506250
+#define S2DOS05_LDO_MIN1 1500000
+#define S2DOS05_LDO_MIN2 2700000
+#define S2DOS05_BUCK_STEP1 6250
+#define S2DOS05_LDO_STEP1 25000
+#define S2DOS05_LDO_VSEL_MASK 0x7F
+#define S2DOS05_LDO_FD_MASK 0x80
+#define S2DOS05_BUCK_VSEL_MASK 0xFF
+#define S2DOS05_BUCK_FD_MASK 0x08
+
+#define S2DOS05_ENABLE_MASK_L1 BIT(0)
+#define S2DOS05_ENABLE_MASK_L2 BIT(1)
+#define S2DOS05_ENABLE_MASK_L3 BIT(2)
+#define S2DOS05_ENABLE_MASK_L4 BIT(3)
+#define S2DOS05_ENABLE_MASK_B1 BIT(4)
+
+#define S2DOS05_RAMP_DELAY 12000
+
+#define S2DOS05_ENABLE_TIME_LDO 50
+#define S2DOS05_ENABLE_TIME_BUCK 350
+
+#define S2DOS05_LDO_N_VOLTAGES (S2DOS05_LDO_VSEL_MASK + 1)
+#define S2DOS05_BUCK_N_VOLTAGES (S2DOS05_BUCK_VSEL_MASK + 1)
+
+#define S2DOS05_REGULATOR_MAX (S2DOS05_REG_MAX)
+
+#endif // __LINUX_S2DOS05_H
diff --git a/include/linux/regulator/tps62360.h b/include/linux/regulator/tps62360.h
index 94a90c06f1e5..398e74a1d941 100644
--- a/include/linux/regulator/tps62360.h
+++ b/include/linux/regulator/tps62360.h
@@ -19,10 +19,6 @@
* @en_discharge: Enable discharge the output capacitor via internal
* register.
* @en_internal_pulldn: internal pull down enable or not.
- * @vsel0_gpio: Gpio number for vsel0. It should be -1 if this is tied with
- * fixed logic.
- * @vsel1_gpio: Gpio number for vsel1. It should be -1 if this is tied with
- * fixed logic.
* @vsel0_def_state: Default state of vsel0. 1 if it is high else 0.
* @vsel1_def_state: Default state of vsel1. 1 if it is high else 0.
*/
@@ -30,8 +26,6 @@ struct tps62360_regulator_platform_data {
struct regulator_init_data *reg_init_data;
bool en_discharge;
bool en_internal_pulldn;
- int vsel0_gpio;
- int vsel1_gpio;
int vsel0_def_state;
int vsel1_def_state;
};
diff --git a/include/linux/regulator/userspace-consumer.h b/include/linux/regulator/userspace-consumer.h
index b5dba0628951..2249ee697f8b 100644
--- a/include/linux/regulator/userspace-consumer.h
+++ b/include/linux/regulator/userspace-consumer.h
@@ -21,6 +21,7 @@ struct regulator_userspace_consumer_data {
struct regulator_bulk_data *supplies;
bool init_on;
+ bool no_autoswitch;
};
#endif /* __REGULATOR_PLATFORM_CONSUMER_H_ */
diff --git a/include/linux/relay.h b/include/linux/relay.h
index 72b876dd5cb8..6772a7075840 100644
--- a/include/linux/relay.h
+++ b/include/linux/relay.h
@@ -29,6 +29,22 @@
#define RELAYFS_CHANNEL_VERSION 7
/*
+ * Relay buffer statistics
+ */
+enum {
+ RELAY_STATS_BUF_FULL = (1 << 0),
+ RELAY_STATS_WRT_BIG = (1 << 1),
+
+ RELAY_STATS_LAST = RELAY_STATS_WRT_BIG,
+};
+
+struct rchan_buf_stats
+{
+ unsigned int full_count; /* counter for buffer full */
+ unsigned int big_count; /* counter for too big to write */
+};
+
+/*
* Per-cpu relay channel buffer
*/
struct rchan_buf
@@ -43,11 +59,11 @@ struct rchan_buf
struct irq_work wakeup_work; /* reader wakeup */
struct dentry *dentry; /* channel file dentry */
struct kref kref; /* channel buffer refcount */
+ struct rchan_buf_stats stats; /* buffer stats */
struct page **page_array; /* array of current buffer pages */
unsigned int page_count; /* number of current buffer pages */
unsigned int finalized; /* buffer has been finalized */
size_t *padding; /* padding counts per sub-buffer */
- size_t prev_padding; /* temporary variable */
size_t bytes_consumed; /* bytes consumed in cur read subbuf */
size_t early_bytes; /* bytes consumed before VFS inited */
unsigned int cpu; /* this buf's cpu */
@@ -65,7 +81,6 @@ struct rchan
const struct rchan_callbacks *cb; /* client callbacks */
struct kref kref; /* channel refcount */
void *private_data; /* for user-defined data */
- size_t last_toobig; /* tried to log event > subbuf size */
struct rchan_buf * __percpu *buf; /* per-cpu channel buffers */
int is_global; /* One global buffer ? */
struct list_head list; /* for channel list */
@@ -84,7 +99,6 @@ struct rchan_callbacks
* @buf: the channel buffer containing the new sub-buffer
* @subbuf: the start of the new sub-buffer
* @prev_subbuf: the start of the previous sub-buffer
- * @prev_padding: unused space at the end of previous sub-buffer
*
* The client should return 1 to continue logging, 0 to stop
* logging.
@@ -100,8 +114,7 @@ struct rchan_callbacks
*/
int (*subbuf_start) (struct rchan_buf *buf,
void *subbuf,
- void *prev_subbuf,
- size_t prev_padding);
+ void *prev_subbuf);
/*
* create_buf_file - create file to represent a relay channel buffer
@@ -159,11 +172,9 @@ struct rchan *relay_open(const char *base_filename,
size_t n_subbufs,
const struct rchan_callbacks *cb,
void *private_data);
-extern int relay_late_setup_files(struct rchan *chan,
- const char *base_filename,
- struct dentry *parent);
extern void relay_close(struct rchan *chan);
extern void relay_flush(struct rchan *chan);
+size_t relay_stats(struct rchan *chan, int flags);
extern void relay_subbufs_consumed(struct rchan *chan,
unsigned int cpu,
size_t consumed);
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index 8b795b544f75..b4795698d8c2 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -243,7 +243,7 @@ struct fw_rsc_trace {
* @da: device address
* @align: the alignment between the consumer and producer parts of the vring
* @num: num of buffers supported by this vring (must be power of two)
- * @notifyid is a unique rproc-wide notify index for this vring. This notify
+ * @notifyid: a unique rproc-wide notify index for this vring. This notify
* index is used when kicking a remote processor, to let it know that this
* vring is triggered.
* @pa: physical address
@@ -266,18 +266,18 @@ struct fw_rsc_vdev_vring {
/**
* struct fw_rsc_vdev - virtio device header
* @id: virtio device id (as in virtio_ids.h)
- * @notifyid is a unique rproc-wide notify index for this vdev. This notify
+ * @notifyid: a unique rproc-wide notify index for this vdev. This notify
* index is used when kicking a remote processor, to let it know that the
* status/features of this vdev have changes.
- * @dfeatures specifies the virtio device features supported by the firmware
- * @gfeatures is a place holder used by the host to write back the
+ * @dfeatures: specifies the virtio device features supported by the firmware
+ * @gfeatures: a place holder used by the host to write back the
* negotiated features that are supported by both sides.
- * @config_len is the size of the virtio config space of this vdev. The config
+ * @config_len: the size of the virtio config space of this vdev. The config
* space lies in the resource table immediate after this vdev header.
- * @status is a place holder where the host will indicate its virtio progress.
- * @num_of_vrings indicates how many vrings are described in this vdev header
+ * @status: a place holder where the host will indicate its virtio progress.
+ * @num_of_vrings: indicates how many vrings are described in this vdev header
* @reserved: reserved (must be zero)
- * @vring is an array of @num_of_vrings entries of 'struct fw_rsc_vdev_vring'.
+ * @vring: an array of @num_of_vrings entries of 'struct fw_rsc_vdev_vring'.
*
* This resource is a virtio device header: it provides information about
* the vdev, and is then used by the host and its peer remote processors
@@ -287,16 +287,17 @@ struct fw_rsc_vdev_vring {
* to statically allocate a vdev upon registration of the rproc (dynamic vdev
* allocation is not yet supported).
*
- * Note: unlike virtualization systems, the term 'host' here means
- * the Linux side which is running remoteproc to control the remote
- * processors. We use the name 'gfeatures' to comply with virtio's terms,
- * though there isn't really any virtualized guest OS here: it's the host
- * which is responsible for negotiating the final features.
- * Yeah, it's a bit confusing.
- *
- * Note: immediately following this structure is the virtio config space for
- * this vdev (which is specific to the vdev; for more info, read the virtio
- * spec). the size of the config space is specified by @config_len.
+ * Note:
+ * 1. unlike virtualization systems, the term 'host' here means
+ * the Linux side which is running remoteproc to control the remote
+ * processors. We use the name 'gfeatures' to comply with virtio's terms,
+ * though there isn't really any virtualized guest OS here: it's the host
+ * which is responsible for negotiating the final features.
+ * Yeah, it's a bit confusing.
+ *
+ * 2. immediately following this structure is the virtio config space for
+ * this vdev (which is specific to the vdev; for more info, read the virtio
+ * spec). The size of the config space is specified by @config_len.
*/
struct fw_rsc_vdev {
u32 id;
@@ -368,9 +369,8 @@ enum rsc_handling_status {
* @da_to_va: optional platform hook to perform address translations
* @parse_fw: parse firmware to extract information (e.g. resource table)
* @handle_rsc: optional platform hook to handle vendor resources. Should return
- * RSC_HANDLED if resource was handled, RSC_IGNORED if not handled and a
- * negative value on error
- * @load_rsc_table: load resource table from firmware image
+ * RSC_HANDLED if resource was handled, RSC_IGNORED if not handled
+ * and a negative value on error
* @find_loaded_rsc_table: find the loaded resource table from firmware image
* @get_loaded_rsc_table: get resource table installed in memory
* by external entity
@@ -440,7 +440,7 @@ enum rproc_state {
* enum rproc_crash_type - remote processor crash types
* @RPROC_MMUFAULT: iommu fault
* @RPROC_WATCHDOG: watchdog bite
- * @RPROC_FATAL_ERROR fatal error
+ * @RPROC_FATAL_ERROR: fatal error
*
* Each element of the enum is used as an array index. So that, the value of
* the elements should be always something sane.
@@ -457,9 +457,9 @@ enum rproc_crash_type {
* enum rproc_dump_mechanism - Coredump options for core
* @RPROC_COREDUMP_DISABLED: Don't perform any dump
* @RPROC_COREDUMP_ENABLED: Copy dump to separate buffer and carry on with
- recovery
+ * recovery
* @RPROC_COREDUMP_INLINE: Read segments directly from device memory. Stall
- recovery until all segments are read
+ * recovery until all segments are read
*/
enum rproc_dump_mechanism {
RPROC_COREDUMP_DISABLED,
@@ -475,6 +475,7 @@ enum rproc_dump_mechanism {
* @priv: private data associated with the dump_segment
* @dump: custom dump function to fill device memory segment associated
* with coredump
+ * @offset: offset of the segment
*/
struct rproc_dump_segment {
struct list_head node;
@@ -489,6 +490,20 @@ struct rproc_dump_segment {
};
/**
+ * enum rproc_features - features supported
+ *
+ * @RPROC_FEAT_ATTACH_ON_RECOVERY: The remote processor does not need help
+ * from Linux to recover, such as firmware
+ * loading. Linux just needs to attach after
+ * recovery.
+ */
+
+enum rproc_features {
+ RPROC_FEAT_ATTACH_ON_RECOVERY,
+ RPROC_MAX_FEATURES,
+};
+
+/**
* struct rproc - represents a physical remote processor device
* @node: list node of this rproc object
* @domain: iommu domain
@@ -522,10 +537,14 @@ struct rproc_dump_segment {
* @table_sz: size of @cached_table
* @has_iommu: flag to indicate if remote processor is behind an MMU
* @auto_boot: flag to indicate if remote processor should be auto-started
+ * @sysfs_read_only: flag to make remoteproc sysfs files read only
* @dump_segments: list of segments in the firmware
* @nb_vdev: number of vdev currently handled by rproc
- * @char_dev: character device of the rproc
+ * @elf_class: firmware ELF class
+ * @elf_machine: firmware ELF machine
+ * @cdev: character device of the rproc
* @cdev_put_on_release: flag to indicate if remoteproc should be shutdown on @char_dev release
+ * @features: indicate remoteproc features
*/
struct rproc {
struct list_head node;
@@ -559,12 +578,14 @@ struct rproc {
size_t table_sz;
bool has_iommu;
bool auto_boot;
+ bool sysfs_read_only;
struct list_head dump_segments;
int nb_vdev;
u8 elf_class;
u16 elf_machine;
struct cdev cdev;
bool cdev_put_on_release;
+ DECLARE_BITMAP(features, RPROC_MAX_FEATURES);
};
/**
@@ -592,7 +613,7 @@ struct rproc_subdev {
/**
* struct rproc_vring - remoteproc vring state
* @va: virtual address
- * @len: length, in bytes
+ * @num: vring size
* @da: device address
* @align: vring alignment
* @notifyid: rproc-specific unique vring index
@@ -601,7 +622,7 @@ struct rproc_subdev {
*/
struct rproc_vring {
void *va;
- int len;
+ int num;
u32 da;
u32 align;
int notifyid;
@@ -611,21 +632,19 @@ struct rproc_vring {
/**
* struct rproc_vdev - remoteproc state for a supported virtio device
- * @refcount: reference counter for the vdev and vring allocations
* @subdev: handle for registering the vdev as a rproc subdevice
+ * @pdev: remoteproc virtio platform device
* @id: virtio device id (as in virtio_ids.h)
* @node: list node
* @rproc: the rproc handle
- * @vdev: the virio device
* @vring: the vrings for this vdev
* @rsc_offset: offset of the vdev's resource entry
* @index: vdev position versus other vdev declared in resource table
*/
struct rproc_vdev {
- struct kref refcount;
struct rproc_subdev subdev;
- struct device dev;
+ struct platform_device *pdev;
unsigned int id;
struct list_head node;
@@ -666,10 +685,15 @@ rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len,
u32 da, const char *name, ...);
int rproc_boot(struct rproc *rproc);
-void rproc_shutdown(struct rproc *rproc);
+int rproc_shutdown(struct rproc *rproc);
int rproc_detach(struct rproc *rproc);
int rproc_set_firmware(struct rproc *rproc, const char *fw_name);
void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type);
+void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem);
+
+/* from remoteproc_coredump.c */
+void rproc_coredump_cleanup(struct rproc *rproc);
+void rproc_coredump(struct rproc *rproc);
void rproc_coredump_using_sections(struct rproc *rproc);
int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size);
int rproc_coredump_add_custom_segment(struct rproc *rproc,
@@ -681,18 +705,6 @@ int rproc_coredump_add_custom_segment(struct rproc *rproc,
void *priv);
int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine);
-static inline struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev)
-{
- return container_of(vdev->dev.parent, struct rproc_vdev, dev);
-}
-
-static inline struct rproc *vdev_to_rproc(struct virtio_device *vdev)
-{
- struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
-
- return rvdev->rproc;
-}
-
void rproc_add_subdev(struct rproc *rproc, struct rproc_subdev *subdev);
void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev);
diff --git a/include/linux/remoteproc/mtk_scp.h b/include/linux/remoteproc/mtk_scp.h
index b47416f7aeb8..344ff41c22c7 100644
--- a/include/linux/remoteproc/mtk_scp.h
+++ b/include/linux/remoteproc/mtk_scp.h
@@ -41,6 +41,9 @@ enum scp_ipi_id {
SCP_IPI_ISP_FRAME,
SCP_IPI_FD_CMD,
SCP_IPI_CROS_HOST_CMD,
+ SCP_IPI_VDEC_LAT,
+ SCP_IPI_VDEC_CORE,
+ SCP_IPI_IMGSYS_CMD,
SCP_IPI_NS_SERVICE = 0xFF,
SCP_IPI_MAX = 0x100,
};
diff --git a/include/linux/remoteproc/pruss.h b/include/linux/remoteproc/pruss.h
new file mode 100644
index 000000000000..039b50d58df2
--- /dev/null
+++ b/include/linux/remoteproc/pruss.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * PRU-ICSS Subsystem user interfaces
+ *
+ * Copyright (C) 2015-2022 Texas Instruments Incorporated - http://www.ti.com
+ * Suman Anna <s-anna@ti.com>
+ */
+
+#ifndef __LINUX_PRUSS_H
+#define __LINUX_PRUSS_H
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+#define PRU_RPROC_DRVNAME "pru-rproc"
+
+/**
+ * enum pruss_pru_id - PRU core identifiers
+ * @PRUSS_PRU0: PRU Core 0.
+ * @PRUSS_PRU1: PRU Core 1.
+ * @PRUSS_NUM_PRUS: Total number of PRU Cores available.
+ *
+ */
+
+enum pruss_pru_id {
+ PRUSS_PRU0 = 0,
+ PRUSS_PRU1,
+ PRUSS_NUM_PRUS,
+};
+
+/*
+ * enum pru_ctable_idx - Configurable Constant table index identifiers
+ */
+enum pru_ctable_idx {
+ PRU_C24 = 0,
+ PRU_C25,
+ PRU_C26,
+ PRU_C27,
+ PRU_C28,
+ PRU_C29,
+ PRU_C30,
+ PRU_C31,
+};
+
+struct device_node;
+struct rproc;
+
+#if IS_ENABLED(CONFIG_PRU_REMOTEPROC)
+
+struct rproc *pru_rproc_get(struct device_node *np, int index,
+ enum pruss_pru_id *pru_id);
+void pru_rproc_put(struct rproc *rproc);
+int pru_rproc_set_ctable(struct rproc *rproc, enum pru_ctable_idx c, u32 addr);
+
+#else
+
+static inline struct rproc *
+pru_rproc_get(struct device_node *np, int index, enum pruss_pru_id *pru_id)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void pru_rproc_put(struct rproc *rproc) { }
+
+static inline int pru_rproc_set_ctable(struct rproc *rproc,
+ enum pru_ctable_idx c, u32 addr)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif /* CONFIG_PRU_REMOTEPROC */
+
+static inline bool is_pru_rproc(struct device *dev)
+{
+ const char *drv_name = dev_driver_string(dev);
+
+ if (strncmp(drv_name, PRU_RPROC_DRVNAME, sizeof(PRU_RPROC_DRVNAME)))
+ return false;
+
+ return true;
+}
+
+#endif /* __LINUX_PRUSS_H */
diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h
index 9b05af9b3e28..54701668b3df 100644
--- a/include/linux/resctrl.h
+++ b/include/linux/resctrl.h
@@ -2,7 +2,21 @@
#ifndef _RESCTRL_H
#define _RESCTRL_H
+#include <linux/cacheinfo.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/pid.h>
+#include <linux/resctrl_types.h>
+
+#ifdef CONFIG_ARCH_HAS_CPU_RESCTRL
+#include <asm/resctrl.h>
+#endif
+
+/* CLOSID, RMID value used by the default control group */
+#define RESCTRL_RESERVED_CLOSID 0
+#define RESCTRL_RESERVED_RMID 0
+
+#define RESCTRL_PICK_ANY_CPU -1
#ifdef CONFIG_PROC_CPU_RESCTRL
@@ -13,4 +27,674 @@ int proc_resctrl_show(struct seq_file *m,
#endif
+/* max value for struct rdt_domain's mbps_val */
+#define MBA_MAX_MBPS U32_MAX
+
+/* Walk all possible resources, with variants for only controls or monitors. */
+#define for_each_rdt_resource(_r) \
+ for ((_r) = resctrl_arch_get_resource(0); \
+ (_r) && (_r)->rid < RDT_NUM_RESOURCES; \
+ (_r) = resctrl_arch_get_resource((_r)->rid + 1))
+
+#define for_each_capable_rdt_resource(r) \
+ for_each_rdt_resource((r)) \
+ if ((r)->alloc_capable || (r)->mon_capable)
+
+#define for_each_alloc_capable_rdt_resource(r) \
+ for_each_rdt_resource((r)) \
+ if ((r)->alloc_capable)
+
+#define for_each_mon_capable_rdt_resource(r) \
+ for_each_rdt_resource((r)) \
+ if ((r)->mon_capable)
+
+enum resctrl_res_level {
+ RDT_RESOURCE_L3,
+ RDT_RESOURCE_L2,
+ RDT_RESOURCE_MBA,
+ RDT_RESOURCE_SMBA,
+
+ /* Must be the last */
+ RDT_NUM_RESOURCES,
+};
+
+/**
+ * enum resctrl_conf_type - The type of configuration.
+ * @CDP_NONE: No prioritisation, both code and data are controlled or monitored.
+ * @CDP_CODE: Configuration applies to instruction fetches.
+ * @CDP_DATA: Configuration applies to reads and writes.
+ */
+enum resctrl_conf_type {
+ CDP_NONE,
+ CDP_CODE,
+ CDP_DATA,
+};
+
+#define CDP_NUM_TYPES (CDP_DATA + 1)
+
+/*
+ * struct pseudo_lock_region - pseudo-lock region information
+ * @s: Resctrl schema for the resource to which this
+ * pseudo-locked region belongs
+ * @closid: The closid that this pseudo-locked region uses
+ * @d: RDT domain to which this pseudo-locked region
+ * belongs
+ * @cbm: bitmask of the pseudo-locked region
+ * @lock_thread_wq: waitqueue used to wait on the pseudo-locking thread
+ * completion
+ * @thread_done: variable used by waitqueue to test if pseudo-locking
+ * thread completed
+ * @cpu: core associated with the cache on which the setup code
+ * will be run
+ * @line_size: size of the cache lines
+ * @size: size of pseudo-locked region in bytes
+ * @kmem: the kernel memory associated with pseudo-locked region
+ * @minor: minor number of character device associated with this
+ * region
+ * @debugfs_dir: pointer to this region's directory in the debugfs
+ * filesystem
+ * @pm_reqs: Power management QoS requests related to this region
+ */
+struct pseudo_lock_region {
+ struct resctrl_schema *s;
+ u32 closid;
+ struct rdt_ctrl_domain *d;
+ u32 cbm;
+ wait_queue_head_t lock_thread_wq;
+ int thread_done;
+ int cpu;
+ unsigned int line_size;
+ unsigned int size;
+ void *kmem;
+ unsigned int minor;
+ struct dentry *debugfs_dir;
+ struct list_head pm_reqs;
+};
+
+/**
+ * struct resctrl_staged_config - parsed configuration to be applied
+ * @new_ctrl: new ctrl value to be loaded
+ * @have_new_ctrl: whether the user provided new_ctrl is valid
+ */
+struct resctrl_staged_config {
+ u32 new_ctrl;
+ bool have_new_ctrl;
+};
+
+enum resctrl_domain_type {
+ RESCTRL_CTRL_DOMAIN,
+ RESCTRL_MON_DOMAIN,
+};
+
+/**
+ * struct rdt_domain_hdr - common header for different domain types
+ * @list: all instances of this resource
+ * @id: unique id for this instance
+ * @type: type of this instance
+ * @cpu_mask: which CPUs share this resource
+ */
+struct rdt_domain_hdr {
+ struct list_head list;
+ int id;
+ enum resctrl_domain_type type;
+ struct cpumask cpu_mask;
+};
+
+/**
+ * struct rdt_ctrl_domain - group of CPUs sharing a resctrl control resource
+ * @hdr: common header for different domain types
+ * @plr: pseudo-locked region (if any) associated with domain
+ * @staged_config: parsed configuration to be applied
+ * @mbps_val: When mba_sc is enabled, this holds the array of user
+ * specified control values for mba_sc in MBps, indexed
+ * by closid
+ */
+struct rdt_ctrl_domain {
+ struct rdt_domain_hdr hdr;
+ struct pseudo_lock_region *plr;
+ struct resctrl_staged_config staged_config[CDP_NUM_TYPES];
+ u32 *mbps_val;
+};
+
+/**
+ * struct mbm_cntr_cfg - Assignable counter configuration.
+ * @evtid: MBM event to which the counter is assigned. Only valid
+ * if @rdtgroup is not NULL.
+ * @rdtgrp: resctrl group assigned to the counter. NULL if the
+ * counter is free.
+ */
+struct mbm_cntr_cfg {
+ enum resctrl_event_id evtid;
+ struct rdtgroup *rdtgrp;
+};
+
+/**
+ * struct rdt_mon_domain - group of CPUs sharing a resctrl monitor resource
+ * @hdr: common header for different domain types
+ * @ci_id: cache info id for this domain
+ * @rmid_busy_llc: bitmap of which limbo RMIDs are above threshold
+ * @mbm_states: Per-event pointer to the MBM event's saved state.
+ * An MBM event's state is an array of struct mbm_state
+ * indexed by RMID on x86 or combined CLOSID, RMID on Arm.
+ * @mbm_over: worker to periodically read MBM h/w counters
+ * @cqm_limbo: worker to periodically read CQM h/w counters
+ * @mbm_work_cpu: worker CPU for MBM h/w counters
+ * @cqm_work_cpu: worker CPU for CQM h/w counters
+ * @cntr_cfg: array of assignable counters' configuration (indexed
+ * by counter ID)
+ */
+struct rdt_mon_domain {
+ struct rdt_domain_hdr hdr;
+ unsigned int ci_id;
+ unsigned long *rmid_busy_llc;
+ struct mbm_state *mbm_states[QOS_NUM_L3_MBM_EVENTS];
+ struct delayed_work mbm_over;
+ struct delayed_work cqm_limbo;
+ int mbm_work_cpu;
+ int cqm_work_cpu;
+ struct mbm_cntr_cfg *cntr_cfg;
+};
+
+/**
+ * struct resctrl_cache - Cache allocation related data
+ * @cbm_len: Length of the cache bit mask
+ * @min_cbm_bits: Minimum number of consecutive bits to be set.
+ * The value 0 means the architecture can support
+ * zero CBM.
+ * @shareable_bits: Bitmask of shareable resource with other
+ * executing entities
+ * @arch_has_sparse_bitmasks: True if a bitmask like f00f is valid.
+ * @arch_has_per_cpu_cfg: True if QOS_CFG register for this cache
+ * level has CPU scope.
+ * @io_alloc_capable: True if portion of the cache can be configured
+ * for I/O traffic.
+ */
+struct resctrl_cache {
+ unsigned int cbm_len;
+ unsigned int min_cbm_bits;
+ unsigned int shareable_bits;
+ bool arch_has_sparse_bitmasks;
+ bool arch_has_per_cpu_cfg;
+ bool io_alloc_capable;
+};
+
+/**
+ * enum membw_throttle_mode - System's memory bandwidth throttling mode
+ * @THREAD_THROTTLE_UNDEFINED: Not relevant to the system
+ * @THREAD_THROTTLE_MAX: Memory bandwidth is throttled at the core
+ * always using smallest bandwidth percentage
+ * assigned to threads, aka "max throttling"
+ * @THREAD_THROTTLE_PER_THREAD: Memory bandwidth is throttled at the thread
+ */
+enum membw_throttle_mode {
+ THREAD_THROTTLE_UNDEFINED = 0,
+ THREAD_THROTTLE_MAX,
+ THREAD_THROTTLE_PER_THREAD,
+};
+
+/**
+ * struct resctrl_membw - Memory bandwidth allocation related data
+ * @min_bw: Minimum memory bandwidth percentage user can request
+ * @max_bw: Maximum memory bandwidth value, used as the reset value
+ * @bw_gran: Granularity at which the memory bandwidth is allocated
+ * @delay_linear: True if memory B/W delay is in linear scale
+ * @arch_needs_linear: True if we can't configure non-linear resources
+ * @throttle_mode: Bandwidth throttling mode when threads request
+ * different memory bandwidths
+ * @mba_sc: True if MBA software controller(mba_sc) is enabled
+ * @mb_map: Mapping of memory B/W percentage to memory B/W delay
+ */
+struct resctrl_membw {
+ u32 min_bw;
+ u32 max_bw;
+ u32 bw_gran;
+ u32 delay_linear;
+ bool arch_needs_linear;
+ enum membw_throttle_mode throttle_mode;
+ bool mba_sc;
+ u32 *mb_map;
+};
+
+struct resctrl_schema;
+
+enum resctrl_scope {
+ RESCTRL_L2_CACHE = 2,
+ RESCTRL_L3_CACHE = 3,
+ RESCTRL_L3_NODE,
+};
+
+/**
+ * enum resctrl_schema_fmt - The format user-space provides for a schema.
+ * @RESCTRL_SCHEMA_BITMAP: The schema is a bitmap in hex.
+ * @RESCTRL_SCHEMA_RANGE: The schema is a decimal number.
+ */
+enum resctrl_schema_fmt {
+ RESCTRL_SCHEMA_BITMAP,
+ RESCTRL_SCHEMA_RANGE,
+};
+
+/**
+ * struct resctrl_mon - Monitoring related data of a resctrl resource.
+ * @num_rmid: Number of RMIDs available.
+ * @mbm_cfg_mask: Memory transactions that can be tracked when bandwidth
+ * monitoring events can be configured.
+ * @num_mbm_cntrs: Number of assignable counters.
+ * @mbm_cntr_assignable:Is system capable of supporting counter assignment?
+ * @mbm_assign_on_mkdir:True if counters should automatically be assigned to MBM
+ * events of monitor groups created via mkdir.
+ */
+struct resctrl_mon {
+ int num_rmid;
+ unsigned int mbm_cfg_mask;
+ int num_mbm_cntrs;
+ bool mbm_cntr_assignable;
+ bool mbm_assign_on_mkdir;
+};
+
+/**
+ * struct rdt_resource - attributes of a resctrl resource
+ * @rid: The index of the resource
+ * @alloc_capable: Is allocation available on this machine
+ * @mon_capable: Is monitor feature available on this machine
+ * @ctrl_scope: Scope of this resource for control functions
+ * @mon_scope: Scope of this resource for monitor functions
+ * @cache: Cache allocation related data
+ * @membw: If the component has bandwidth controls, their properties.
+ * @mon: Monitoring related data.
+ * @ctrl_domains: RCU list of all control domains for this resource
+ * @mon_domains: RCU list of all monitor domains for this resource
+ * @name: Name to use in "schemata" file.
+ * @schema_fmt: Which format string and parser is used for this schema.
+ * @cdp_capable: Is the CDP feature available on this resource
+ */
+struct rdt_resource {
+ int rid;
+ bool alloc_capable;
+ bool mon_capable;
+ enum resctrl_scope ctrl_scope;
+ enum resctrl_scope mon_scope;
+ struct resctrl_cache cache;
+ struct resctrl_membw membw;
+ struct resctrl_mon mon;
+ struct list_head ctrl_domains;
+ struct list_head mon_domains;
+ char *name;
+ enum resctrl_schema_fmt schema_fmt;
+ bool cdp_capable;
+};
+
+/*
+ * Get the resource that exists at this level. If the level is not supported
+ * a dummy/not-capable resource can be returned. Levels >= RDT_NUM_RESOURCES
+ * will return NULL.
+ */
+struct rdt_resource *resctrl_arch_get_resource(enum resctrl_res_level l);
+
+/**
+ * struct resctrl_schema - configuration abilities of a resource presented to
+ * user-space
+ * @list: Member of resctrl_schema_all.
+ * @name: The name to use in the "schemata" file.
+ * @fmt_str: Format string to show domain value.
+ * @conf_type: Whether this schema is specific to code/data.
+ * @res: The resource structure exported by the architecture to describe
+ * the hardware that is configured by this schema.
+ * @num_closid: The number of closid that can be used with this schema. When
+ * features like CDP are enabled, this will be lower than the
+ * hardware supports for the resource.
+ */
+struct resctrl_schema {
+ struct list_head list;
+ char name[8];
+ const char *fmt_str;
+ enum resctrl_conf_type conf_type;
+ struct rdt_resource *res;
+ u32 num_closid;
+};
+
+struct resctrl_cpu_defaults {
+ u32 closid;
+ u32 rmid;
+};
+
+struct resctrl_mon_config_info {
+ struct rdt_resource *r;
+ struct rdt_mon_domain *d;
+ u32 evtid;
+ u32 mon_config;
+};
+
+/**
+ * resctrl_arch_sync_cpu_closid_rmid() - Refresh this CPU's CLOSID and RMID.
+ * Call via IPI.
+ * @info: If non-NULL, a pointer to a struct resctrl_cpu_defaults
+ * specifying the new CLOSID and RMID for tasks in the default
+ * resctrl ctrl and mon group when running on this CPU. If NULL,
+ * this CPU is not re-assigned to a different default group.
+ *
+ * Propagates reassignment of CPUs and/or tasks to different resctrl groups
+ * when requested by the resctrl core code.
+ *
+ * This function records the per-cpu defaults specified by @info (if any),
+ * and then reconfigures the CPU's hardware CLOSID and RMID for subsequent
+ * execution based on @current, in the same way as during a task switch.
+ */
+void resctrl_arch_sync_cpu_closid_rmid(void *info);
+
+/**
+ * resctrl_get_default_ctrl() - Return the default control value for this
+ * resource.
+ * @r: The resource whose default control type is queried.
+ */
+static inline u32 resctrl_get_default_ctrl(struct rdt_resource *r)
+{
+ switch (r->schema_fmt) {
+ case RESCTRL_SCHEMA_BITMAP:
+ return BIT_MASK(r->cache.cbm_len) - 1;
+ case RESCTRL_SCHEMA_RANGE:
+ return r->membw.max_bw;
+ }
+
+ return WARN_ON_ONCE(1);
+}
+
+/* The number of closid supported by this resource regardless of CDP */
+u32 resctrl_arch_get_num_closid(struct rdt_resource *r);
+u32 resctrl_arch_system_num_rmid_idx(void);
+int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid);
+
+void resctrl_enable_mon_event(enum resctrl_event_id eventid);
+
+bool resctrl_is_mon_event_enabled(enum resctrl_event_id eventid);
+
+bool resctrl_arch_is_evt_configurable(enum resctrl_event_id evt);
+
+static inline bool resctrl_is_mbm_event(enum resctrl_event_id eventid)
+{
+ return (eventid >= QOS_L3_MBM_TOTAL_EVENT_ID &&
+ eventid <= QOS_L3_MBM_LOCAL_EVENT_ID);
+}
+
+u32 resctrl_get_mon_evt_cfg(enum resctrl_event_id eventid);
+
+/* Iterate over all memory bandwidth events */
+#define for_each_mbm_event_id(eventid) \
+ for (eventid = QOS_L3_MBM_TOTAL_EVENT_ID; \
+ eventid <= QOS_L3_MBM_LOCAL_EVENT_ID; eventid++)
+
+/* Iterate over memory bandwidth arrays in domain structures */
+#define for_each_mbm_idx(idx) \
+ for (idx = 0; idx < QOS_NUM_L3_MBM_EVENTS; idx++)
+
+/**
+ * resctrl_arch_mon_event_config_write() - Write the config for an event.
+ * @config_info: struct resctrl_mon_config_info describing the resource, domain
+ * and event.
+ *
+ * Reads resource, domain and eventid from @config_info and writes the
+ * event config_info->mon_config into hardware.
+ *
+ * Called via IPI to reach a CPU that is a member of the specified domain.
+ */
+void resctrl_arch_mon_event_config_write(void *config_info);
+
+/**
+ * resctrl_arch_mon_event_config_read() - Read the config for an event.
+ * @config_info: struct resctrl_mon_config_info describing the resource, domain
+ * and event.
+ *
+ * Reads resource, domain and eventid from @config_info and reads the
+ * hardware config value into config_info->mon_config.
+ *
+ * Called via IPI to reach a CPU that is a member of the specified domain.
+ */
+void resctrl_arch_mon_event_config_read(void *config_info);
+
+/* For use by arch code to remap resctrl's smaller CDP CLOSID range */
+static inline u32 resctrl_get_config_index(u32 closid,
+ enum resctrl_conf_type type)
+{
+ switch (type) {
+ default:
+ case CDP_NONE:
+ return closid;
+ case CDP_CODE:
+ return closid * 2 + 1;
+ case CDP_DATA:
+ return closid * 2;
+ }
+}
+
+bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level l);
+int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable);
+
+/**
+ * resctrl_arch_mbm_cntr_assign_enabled() - Check if MBM counter assignment
+ * mode is enabled.
+ * @r: Pointer to the resource structure.
+ *
+ * Return:
+ * true if the assignment mode is enabled, false otherwise.
+ */
+bool resctrl_arch_mbm_cntr_assign_enabled(struct rdt_resource *r);
+
+/**
+ * resctrl_arch_mbm_cntr_assign_set() - Configure the MBM counter assignment mode.
+ * @r: Pointer to the resource structure.
+ * @enable: Set to true to enable, false to disable the assignment mode.
+ *
+ * Return:
+ * 0 on success, < 0 on error.
+ */
+int resctrl_arch_mbm_cntr_assign_set(struct rdt_resource *r, bool enable);
+
+/*
+ * Update the ctrl_val and apply this config right now.
+ * Must be called on one of the domain's CPUs.
+ */
+int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_ctrl_domain *d,
+ u32 closid, enum resctrl_conf_type t, u32 cfg_val);
+
+u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_ctrl_domain *d,
+ u32 closid, enum resctrl_conf_type type);
+int resctrl_online_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d);
+int resctrl_online_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d);
+void resctrl_offline_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d);
+void resctrl_offline_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d);
+void resctrl_online_cpu(unsigned int cpu);
+void resctrl_offline_cpu(unsigned int cpu);
+
+/**
+ * resctrl_arch_rmid_read() - Read the eventid counter corresponding to rmid
+ * for this resource and domain.
+ * @r: resource that the counter should be read from.
+ * @d: domain that the counter should be read from.
+ * @closid: closid that matches the rmid. Depending on the architecture, the
+ * counter may match traffic of both @closid and @rmid, or @rmid
+ * only.
+ * @rmid: rmid of the counter to read.
+ * @eventid: eventid to read, e.g. L3 occupancy.
+ * @val: result of the counter read in bytes.
+ * @arch_mon_ctx: An architecture specific value from
+ * resctrl_arch_mon_ctx_alloc(), for MPAM this identifies
+ * the hardware monitor allocated for this read request.
+ *
+ * Some architectures need to sleep when first programming some of the counters.
+ * (specifically: arm64's MPAM cache occupancy counters can return 'not ready'
+ * for a short period of time). Call from a non-migrateable process context on
+ * a CPU that belongs to domain @d. e.g. use smp_call_on_cpu() or
+ * schedule_work_on(). This function can be called with interrupts masked,
+ * e.g. using smp_call_function_any(), but may consistently return an error.
+ *
+ * Return:
+ * 0 on success, or -EIO, -EINVAL etc on error.
+ */
+int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d,
+ u32 closid, u32 rmid, enum resctrl_event_id eventid,
+ u64 *val, void *arch_mon_ctx);
+
+/**
+ * resctrl_arch_rmid_read_context_check() - warn about invalid contexts
+ *
+ * When built with CONFIG_DEBUG_ATOMIC_SLEEP generate a warning when
+ * resctrl_arch_rmid_read() is called with preemption disabled.
+ *
+ * The contract with resctrl_arch_rmid_read() is that if interrupts
+ * are unmasked, it can sleep. This allows NOHZ_FULL systems to use an
+ * IPI, (and fail if the call needed to sleep), while most of the time
+ * the work is scheduled, allowing the call to sleep.
+ */
+static inline void resctrl_arch_rmid_read_context_check(void)
+{
+ if (!irqs_disabled())
+ might_sleep();
+}
+
+/**
+ * resctrl_find_domain() - Search for a domain id in a resource domain list.
+ * @h: The domain list to search.
+ * @id: The domain id to search for.
+ * @pos: A pointer to position in the list id should be inserted.
+ *
+ * Search the domain list to find the domain id. If the domain id is
+ * found, return the domain. NULL otherwise. If the domain id is not
+ * found (and NULL returned) then the first domain with id bigger than
+ * the input id can be returned to the caller via @pos.
+ */
+struct rdt_domain_hdr *resctrl_find_domain(struct list_head *h, int id,
+ struct list_head **pos);
+
+/**
+ * resctrl_arch_reset_rmid() - Reset any private state associated with rmid
+ * and eventid.
+ * @r: The domain's resource.
+ * @d: The rmid's domain.
+ * @closid: closid that matches the rmid. Depending on the architecture, the
+ * counter may match traffic of both @closid and @rmid, or @rmid only.
+ * @rmid: The rmid whose counter values should be reset.
+ * @eventid: The eventid whose counter values should be reset.
+ *
+ * This can be called from any CPU.
+ */
+void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_mon_domain *d,
+ u32 closid, u32 rmid,
+ enum resctrl_event_id eventid);
+
+/**
+ * resctrl_arch_reset_rmid_all() - Reset all private state associated with
+ * all rmids and eventids.
+ * @r: The resctrl resource.
+ * @d: The domain for which all architectural counter state will
+ * be cleared.
+ *
+ * This can be called from any CPU.
+ */
+void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_mon_domain *d);
+
+/**
+ * resctrl_arch_reset_all_ctrls() - Reset the control for each CLOSID to its
+ * default.
+ * @r: The resctrl resource to reset.
+ *
+ * This can be called from any CPU.
+ */
+void resctrl_arch_reset_all_ctrls(struct rdt_resource *r);
+
+/**
+ * resctrl_arch_config_cntr() - Configure the counter with its new RMID
+ * and event details.
+ * @r: Resource structure.
+ * @d: The domain in which counter with ID @cntr_id should be configured.
+ * @evtid: Monitoring event type (e.g., QOS_L3_MBM_TOTAL_EVENT_ID
+ * or QOS_L3_MBM_LOCAL_EVENT_ID).
+ * @rmid: RMID.
+ * @closid: CLOSID.
+ * @cntr_id: Counter ID to configure.
+ * @assign: True to assign the counter or update an existing assignment,
+ * false to unassign the counter.
+ *
+ * This can be called from any CPU.
+ */
+void resctrl_arch_config_cntr(struct rdt_resource *r, struct rdt_mon_domain *d,
+ enum resctrl_event_id evtid, u32 rmid, u32 closid,
+ u32 cntr_id, bool assign);
+
+/**
+ * resctrl_arch_cntr_read() - Read the event data corresponding to the counter ID
+ * assigned to the RMID, event pair for this resource
+ * and domain.
+ * @r: Resource that the counter should be read from.
+ * @d: Domain that the counter should be read from.
+ * @closid: CLOSID that matches the RMID.
+ * @rmid: The RMID to which @cntr_id is assigned.
+ * @cntr_id: The counter to read.
+ * @eventid: The MBM event to which @cntr_id is assigned.
+ * @val: Result of the counter read in bytes.
+ *
+ * Called on a CPU that belongs to domain @d when "mbm_event" mode is enabled.
+ * Called from a non-migrateable process context via smp_call_on_cpu() unless all
+ * CPUs are nohz_full, in which case it is called via IPI (smp_call_function_any()).
+ *
+ * Return:
+ * 0 on success, or -EIO, -EINVAL etc on error.
+ */
+int resctrl_arch_cntr_read(struct rdt_resource *r, struct rdt_mon_domain *d,
+ u32 closid, u32 rmid, int cntr_id,
+ enum resctrl_event_id eventid, u64 *val);
+
+/**
+ * resctrl_arch_reset_cntr() - Reset any private state associated with counter ID.
+ * @r: The domain's resource.
+ * @d: The counter ID's domain.
+ * @closid: CLOSID that matches the RMID.
+ * @rmid: The RMID to which @cntr_id is assigned.
+ * @cntr_id: The counter to reset.
+ * @eventid: The MBM event to which @cntr_id is assigned.
+ *
+ * This can be called from any CPU.
+ */
+void resctrl_arch_reset_cntr(struct rdt_resource *r, struct rdt_mon_domain *d,
+ u32 closid, u32 rmid, int cntr_id,
+ enum resctrl_event_id eventid);
+
+/**
+ * resctrl_arch_io_alloc_enable() - Enable/disable io_alloc feature.
+ * @r: The resctrl resource.
+ * @enable: Enable (true) or disable (false) io_alloc on resource @r.
+ *
+ * This can be called from any CPU.
+ *
+ * Return:
+ * 0 on success, <0 on error.
+ */
+int resctrl_arch_io_alloc_enable(struct rdt_resource *r, bool enable);
+
+/**
+ * resctrl_arch_get_io_alloc_enabled() - Get io_alloc feature state.
+ * @r: The resctrl resource.
+ *
+ * Return:
+ * true if io_alloc is enabled or false if disabled.
+ */
+bool resctrl_arch_get_io_alloc_enabled(struct rdt_resource *r);
+
+extern unsigned int resctrl_rmid_realloc_threshold;
+extern unsigned int resctrl_rmid_realloc_limit;
+
+int resctrl_init(void);
+void resctrl_exit(void);
+
+#ifdef CONFIG_RESCTRL_FS_PSEUDO_LOCK
+u64 resctrl_arch_get_prefetch_disable_bits(void);
+int resctrl_arch_pseudo_lock_fn(void *_plr);
+int resctrl_arch_measure_cycles_lat_fn(void *_plr);
+int resctrl_arch_measure_l2_residency(void *_plr);
+int resctrl_arch_measure_l3_residency(void *_plr);
+#else
+static inline u64 resctrl_arch_get_prefetch_disable_bits(void) { return 0; }
+static inline int resctrl_arch_pseudo_lock_fn(void *_plr) { return 0; }
+static inline int resctrl_arch_measure_cycles_lat_fn(void *_plr) { return 0; }
+static inline int resctrl_arch_measure_l2_residency(void *_plr) { return 0; }
+static inline int resctrl_arch_measure_l3_residency(void *_plr) { return 0; }
+#endif /* CONFIG_RESCTRL_FS_PSEUDO_LOCK */
#endif /* _RESCTRL_H */
diff --git a/include/linux/resctrl_types.h b/include/linux/resctrl_types.h
new file mode 100644
index 000000000000..acfe07860b34
--- /dev/null
+++ b/include/linux/resctrl_types.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025 Arm Ltd.
+ * Based on arch/x86/kernel/cpu/resctrl/internal.h
+ */
+
+#ifndef __LINUX_RESCTRL_TYPES_H
+#define __LINUX_RESCTRL_TYPES_H
+
+#define MAX_MBA_BW 100u
+#define MBM_OVERFLOW_INTERVAL 1000
+
+/* Reads to Local DRAM Memory */
+#define READS_TO_LOCAL_MEM BIT(0)
+
+/* Reads to Remote DRAM Memory */
+#define READS_TO_REMOTE_MEM BIT(1)
+
+/* Non-Temporal Writes to Local Memory */
+#define NON_TEMP_WRITE_TO_LOCAL_MEM BIT(2)
+
+/* Non-Temporal Writes to Remote Memory */
+#define NON_TEMP_WRITE_TO_REMOTE_MEM BIT(3)
+
+/* Reads to Local Memory the system identifies as "Slow Memory" */
+#define READS_TO_LOCAL_S_MEM BIT(4)
+
+/* Reads to Remote Memory the system identifies as "Slow Memory" */
+#define READS_TO_REMOTE_S_MEM BIT(5)
+
+/* Dirty Victims to All Types of Memory */
+#define DIRTY_VICTIMS_TO_ALL_MEM BIT(6)
+
+/* Max event bits supported */
+#define MAX_EVT_CONFIG_BITS GENMASK(6, 0)
+
+/* Number of memory transactions that an MBM event can be configured with */
+#define NUM_MBM_TRANSACTIONS 7
+
+/* Event IDs */
+enum resctrl_event_id {
+ /* Must match value of first event below */
+ QOS_FIRST_EVENT = 0x01,
+
+ /*
+ * These values match those used to program IA32_QM_EVTSEL before
+ * reading IA32_QM_CTR on RDT systems.
+ */
+ QOS_L3_OCCUP_EVENT_ID = 0x01,
+ QOS_L3_MBM_TOTAL_EVENT_ID = 0x02,
+ QOS_L3_MBM_LOCAL_EVENT_ID = 0x03,
+
+ /* Must be the last */
+ QOS_NUM_EVENTS,
+};
+
+#define QOS_NUM_L3_MBM_EVENTS (QOS_L3_MBM_LOCAL_EVENT_ID - QOS_L3_MBM_TOTAL_EVENT_ID + 1)
+#define MBM_STATE_IDX(evt) ((evt) - QOS_L3_MBM_TOTAL_EVENT_ID)
+
+#endif /* __LINUX_RESCTRL_TYPES_H */
diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h
index ec35814e0bbb..46514cb1b9e0 100644
--- a/include/linux/reset-controller.h
+++ b/include/linux/reset-controller.h
@@ -27,31 +27,6 @@ struct device_node;
struct of_phandle_args;
/**
- * struct reset_control_lookup - represents a single lookup entry
- *
- * @list: internal list of all reset lookup entries
- * @provider: name of the reset controller device controlling this reset line
- * @index: ID of the reset controller in the reset controller device
- * @dev_id: name of the device associated with this reset line
- * @con_id: name of the reset line (can be NULL)
- */
-struct reset_control_lookup {
- struct list_head list;
- const char *provider;
- unsigned int index;
- const char *dev_id;
- const char *con_id;
-};
-
-#define RESET_LOOKUP(_provider, _index, _dev_id, _con_id) \
- { \
- .provider = _provider, \
- .index = _index, \
- .dev_id = _dev_id, \
- .con_id = _con_id, \
- }
-
-/**
* struct reset_controller_dev - reset controller entity that might
* provide multiple reset controls
* @ops: a pointer to device specific struct reset_control_ops
@@ -60,6 +35,9 @@ struct reset_control_lookup {
* @reset_control_head: head of internal list of requested reset controls
* @dev: corresponding driver model device struct
* @of_node: corresponding device tree node as phandle target
+ * @of_args: for reset-gpios controllers: corresponding phandle args with
+ * of_node and GPIO number complementing of_node; either this or
+ * of_node should be present
* @of_reset_n_cells: number of cells in reset line specifiers
* @of_xlate: translation function to translate from specifier as found in the
* device tree to id as given to the reset control ops, defaults
@@ -73,20 +51,35 @@ struct reset_controller_dev {
struct list_head reset_control_head;
struct device *dev;
struct device_node *of_node;
+ const struct of_phandle_args *of_args;
int of_reset_n_cells;
int (*of_xlate)(struct reset_controller_dev *rcdev,
const struct of_phandle_args *reset_spec);
unsigned int nr_resets;
};
+#if IS_ENABLED(CONFIG_RESET_CONTROLLER)
int reset_controller_register(struct reset_controller_dev *rcdev);
void reset_controller_unregister(struct reset_controller_dev *rcdev);
struct device;
int devm_reset_controller_register(struct device *dev,
struct reset_controller_dev *rcdev);
+#else
+static inline int reset_controller_register(struct reset_controller_dev *rcdev)
+{
+ return 0;
+}
-void reset_controller_add_lookup(struct reset_control_lookup *lookup,
- unsigned int num_entries);
+static inline void reset_controller_unregister(struct reset_controller_dev *rcdev)
+{
+}
+
+static inline int devm_reset_controller_register(struct device *dev,
+ struct reset_controller_dev *rcdev)
+{
+ return 0;
+}
+#endif
#endif
diff --git a/include/linux/reset.h b/include/linux/reset.h
index db0e6115a2f6..44f9e3415f92 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_RESET_H_
#define _LINUX_RESET_H_
+#include <linux/bits.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/types.h>
@@ -25,6 +26,48 @@ struct reset_control_bulk_data {
struct reset_control *rstc;
};
+#define RESET_CONTROL_FLAGS_BIT_SHARED BIT(0) /* not exclusive */
+#define RESET_CONTROL_FLAGS_BIT_OPTIONAL BIT(1)
+#define RESET_CONTROL_FLAGS_BIT_ACQUIRED BIT(2) /* iff exclusive, not released */
+#define RESET_CONTROL_FLAGS_BIT_DEASSERTED BIT(3)
+
+/**
+ * enum reset_control_flags - Flags that can be passed to the reset_control_get functions
+ * to determine the type of reset control.
+ * These values cannot be OR'd.
+ *
+ * @RESET_CONTROL_EXCLUSIVE: exclusive, acquired,
+ * @RESET_CONTROL_EXCLUSIVE_DEASSERTED: exclusive, acquired, deasserted
+ * @RESET_CONTROL_EXCLUSIVE_RELEASED: exclusive, released,
+ * @RESET_CONTROL_SHARED: shared
+ * @RESET_CONTROL_SHARED_DEASSERTED: shared, deasserted
+ * @RESET_CONTROL_OPTIONAL_EXCLUSIVE: optional, exclusive, acquired
+ * @RESET_CONTROL_OPTIONAL_EXCLUSIVE_DEASSERTED: optional, exclusive, acquired, deasserted
+ * @RESET_CONTROL_OPTIONAL_EXCLUSIVE_RELEASED: optional, exclusive, released
+ * @RESET_CONTROL_OPTIONAL_SHARED: optional, shared
+ * @RESET_CONTROL_OPTIONAL_SHARED_DEASSERTED: optional, shared, deasserted
+ */
+enum reset_control_flags {
+ RESET_CONTROL_EXCLUSIVE = RESET_CONTROL_FLAGS_BIT_ACQUIRED,
+ RESET_CONTROL_EXCLUSIVE_DEASSERTED = RESET_CONTROL_FLAGS_BIT_ACQUIRED |
+ RESET_CONTROL_FLAGS_BIT_DEASSERTED,
+ RESET_CONTROL_EXCLUSIVE_RELEASED = 0,
+ RESET_CONTROL_SHARED = RESET_CONTROL_FLAGS_BIT_SHARED,
+ RESET_CONTROL_SHARED_DEASSERTED = RESET_CONTROL_FLAGS_BIT_SHARED |
+ RESET_CONTROL_FLAGS_BIT_DEASSERTED,
+ RESET_CONTROL_OPTIONAL_EXCLUSIVE = RESET_CONTROL_FLAGS_BIT_OPTIONAL |
+ RESET_CONTROL_FLAGS_BIT_ACQUIRED,
+ RESET_CONTROL_OPTIONAL_EXCLUSIVE_DEASSERTED = RESET_CONTROL_FLAGS_BIT_OPTIONAL |
+ RESET_CONTROL_FLAGS_BIT_ACQUIRED |
+ RESET_CONTROL_FLAGS_BIT_DEASSERTED,
+ RESET_CONTROL_OPTIONAL_EXCLUSIVE_RELEASED = RESET_CONTROL_FLAGS_BIT_OPTIONAL,
+ RESET_CONTROL_OPTIONAL_SHARED = RESET_CONTROL_FLAGS_BIT_OPTIONAL |
+ RESET_CONTROL_FLAGS_BIT_SHARED,
+ RESET_CONTROL_OPTIONAL_SHARED_DEASSERTED = RESET_CONTROL_FLAGS_BIT_OPTIONAL |
+ RESET_CONTROL_FLAGS_BIT_SHARED |
+ RESET_CONTROL_FLAGS_BIT_DEASSERTED,
+};
+
#ifdef CONFIG_RESET_CONTROLLER
int reset_control_reset(struct reset_control *rstc);
@@ -42,30 +85,25 @@ int reset_control_bulk_acquire(int num_rstcs, struct reset_control_bulk_data *rs
void reset_control_bulk_release(int num_rstcs, struct reset_control_bulk_data *rstcs);
struct reset_control *__of_reset_control_get(struct device_node *node,
- const char *id, int index, bool shared,
- bool optional, bool acquired);
+ const char *id, int index, enum reset_control_flags flags);
struct reset_control *__reset_control_get(struct device *dev, const char *id,
- int index, bool shared,
- bool optional, bool acquired);
+ int index, enum reset_control_flags flags);
void reset_control_put(struct reset_control *rstc);
int __reset_control_bulk_get(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs,
- bool shared, bool optional, bool acquired);
+ enum reset_control_flags flags);
void reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs);
int __device_reset(struct device *dev, bool optional);
struct reset_control *__devm_reset_control_get(struct device *dev,
- const char *id, int index, bool shared,
- bool optional, bool acquired);
+ const char *id, int index, enum reset_control_flags flags);
int __devm_reset_control_bulk_get(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs,
- bool shared, bool optional, bool acquired);
+ enum reset_control_flags flags);
struct reset_control *devm_reset_control_array_get(struct device *dev,
- bool shared, bool optional);
-struct reset_control *of_reset_control_array_get(struct device_node *np,
- bool shared, bool optional,
- bool acquired);
+ enum reset_control_flags flags);
+struct reset_control *of_reset_control_array_get(struct device_node *np, enum reset_control_flags);
int reset_control_get_count(struct device *dev);
@@ -116,17 +154,19 @@ static inline int __device_reset(struct device *dev, bool optional)
static inline struct reset_control *__of_reset_control_get(
struct device_node *node,
- const char *id, int index, bool shared,
- bool optional, bool acquired)
+ const char *id, int index, enum reset_control_flags flags)
{
+ bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL;
+
return optional ? NULL : ERR_PTR(-ENOTSUPP);
}
static inline struct reset_control *__reset_control_get(
struct device *dev, const char *id,
- int index, bool shared, bool optional,
- bool acquired)
+ int index, enum reset_control_flags flags)
{
+ bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL;
+
return optional ? NULL : ERR_PTR(-ENOTSUPP);
}
@@ -162,8 +202,10 @@ reset_control_bulk_release(int num_rstcs, struct reset_control_bulk_data *rstcs)
static inline int
__reset_control_bulk_get(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs,
- bool shared, bool optional, bool acquired)
+ enum reset_control_flags flags)
{
+ bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL;
+
return optional ? 0 : -EOPNOTSUPP;
}
@@ -174,30 +216,36 @@ reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs)
static inline struct reset_control *__devm_reset_control_get(
struct device *dev, const char *id,
- int index, bool shared, bool optional,
- bool acquired)
+ int index, enum reset_control_flags flags)
{
+ bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL;
+
return optional ? NULL : ERR_PTR(-ENOTSUPP);
}
static inline int
__devm_reset_control_bulk_get(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs,
- bool shared, bool optional, bool acquired)
+ enum reset_control_flags flags)
{
+ bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL;
+
return optional ? 0 : -EOPNOTSUPP;
}
static inline struct reset_control *
-devm_reset_control_array_get(struct device *dev, bool shared, bool optional)
+devm_reset_control_array_get(struct device *dev, enum reset_control_flags flags)
{
+ bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL;
+
return optional ? NULL : ERR_PTR(-ENOTSUPP);
}
static inline struct reset_control *
-of_reset_control_array_get(struct device_node *np, bool shared, bool optional,
- bool acquired)
+of_reset_control_array_get(struct device_node *np, enum reset_control_flags flags)
{
+ bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL;
+
return optional ? NULL : ERR_PTR(-ENOTSUPP);
}
@@ -236,7 +284,7 @@ static inline int device_reset_optional(struct device *dev)
static inline struct reset_control *
__must_check reset_control_get_exclusive(struct device *dev, const char *id)
{
- return __reset_control_get(dev, id, 0, false, false, true);
+ return __reset_control_get(dev, id, 0, RESET_CONTROL_EXCLUSIVE);
}
/**
@@ -253,7 +301,7 @@ static inline int __must_check
reset_control_bulk_get_exclusive(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, true);
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_EXCLUSIVE);
}
/**
@@ -274,7 +322,7 @@ static inline struct reset_control *
__must_check reset_control_get_exclusive_released(struct device *dev,
const char *id)
{
- return __reset_control_get(dev, id, 0, false, false, false);
+ return __reset_control_get(dev, id, 0, RESET_CONTROL_EXCLUSIVE_RELEASED);
}
/**
@@ -295,7 +343,7 @@ static inline int __must_check
reset_control_bulk_get_exclusive_released(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, false);
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_EXCLUSIVE_RELEASED);
}
/**
@@ -316,7 +364,8 @@ static inline int __must_check
reset_control_bulk_get_optional_exclusive_released(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, false);
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs,
+ RESET_CONTROL_OPTIONAL_EXCLUSIVE_RELEASED);
}
/**
@@ -344,7 +393,7 @@ reset_control_bulk_get_optional_exclusive_released(struct device *dev, int num_r
static inline struct reset_control *reset_control_get_shared(
struct device *dev, const char *id)
{
- return __reset_control_get(dev, id, 0, true, false, false);
+ return __reset_control_get(dev, id, 0, RESET_CONTROL_SHARED);
}
/**
@@ -361,7 +410,7 @@ static inline int __must_check
reset_control_bulk_get_shared(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __reset_control_bulk_get(dev, num_rstcs, rstcs, true, false, false);
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_SHARED);
}
/**
@@ -378,7 +427,7 @@ reset_control_bulk_get_shared(struct device *dev, int num_rstcs,
static inline struct reset_control *reset_control_get_optional_exclusive(
struct device *dev, const char *id)
{
- return __reset_control_get(dev, id, 0, false, true, true);
+ return __reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_EXCLUSIVE);
}
/**
@@ -398,7 +447,7 @@ static inline int __must_check
reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, true);
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_OPTIONAL_EXCLUSIVE);
}
/**
@@ -415,7 +464,7 @@ reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs,
static inline struct reset_control *reset_control_get_optional_shared(
struct device *dev, const char *id)
{
- return __reset_control_get(dev, id, 0, true, true, false);
+ return __reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_SHARED);
}
/**
@@ -435,7 +484,7 @@ static inline int __must_check
reset_control_bulk_get_optional_shared(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __reset_control_bulk_get(dev, num_rstcs, rstcs, true, true, false);
+ return __reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_OPTIONAL_SHARED);
}
/**
@@ -451,7 +500,27 @@ reset_control_bulk_get_optional_shared(struct device *dev, int num_rstcs,
static inline struct reset_control *of_reset_control_get_exclusive(
struct device_node *node, const char *id)
{
- return __of_reset_control_get(node, id, 0, false, false, true);
+ return __of_reset_control_get(node, id, 0, RESET_CONTROL_EXCLUSIVE);
+}
+
+/**
+ * of_reset_control_get_optional_exclusive - Lookup and obtain an optional exclusive
+ * reference to a reset controller.
+ * @node: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Optional variant of of_reset_control_get_exclusive(). If the requested reset
+ * is not specified in the device tree, this function returns NULL instead of
+ * an error.
+ *
+ * Returns a struct reset_control or IS_ERR() condition containing errno.
+ *
+ * Use of id names is optional.
+ */
+static inline struct reset_control *of_reset_control_get_optional_exclusive(
+ struct device_node *node, const char *id)
+{
+ return __of_reset_control_get(node, id, 0, RESET_CONTROL_OPTIONAL_EXCLUSIVE);
}
/**
@@ -476,7 +545,7 @@ static inline struct reset_control *of_reset_control_get_exclusive(
static inline struct reset_control *of_reset_control_get_shared(
struct device_node *node, const char *id)
{
- return __of_reset_control_get(node, id, 0, true, false, false);
+ return __of_reset_control_get(node, id, 0, RESET_CONTROL_SHARED);
}
/**
@@ -493,7 +562,7 @@ static inline struct reset_control *of_reset_control_get_shared(
static inline struct reset_control *of_reset_control_get_exclusive_by_index(
struct device_node *node, int index)
{
- return __of_reset_control_get(node, NULL, index, false, false, true);
+ return __of_reset_control_get(node, NULL, index, RESET_CONTROL_EXCLUSIVE);
}
/**
@@ -521,7 +590,7 @@ static inline struct reset_control *of_reset_control_get_exclusive_by_index(
static inline struct reset_control *of_reset_control_get_shared_by_index(
struct device_node *node, int index)
{
- return __of_reset_control_get(node, NULL, index, true, false, false);
+ return __of_reset_control_get(node, NULL, index, RESET_CONTROL_SHARED);
}
/**
@@ -540,7 +609,26 @@ static inline struct reset_control *
__must_check devm_reset_control_get_exclusive(struct device *dev,
const char *id)
{
- return __devm_reset_control_get(dev, id, 0, false, false, true);
+ return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_EXCLUSIVE);
+}
+
+/**
+ * devm_reset_control_get_exclusive_deasserted - resource managed
+ * reset_control_get_exclusive() +
+ * reset_control_deassert()
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Managed reset_control_get_exclusive() + reset_control_deassert(). For reset
+ * controllers returned from this function, reset_control_assert() +
+ * reset_control_put() is called automatically on driver detach.
+ *
+ * See reset_control_get_exclusive() for more information.
+ */
+static inline struct reset_control * __must_check
+devm_reset_control_get_exclusive_deasserted(struct device *dev, const char *id)
+{
+ return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_EXCLUSIVE_DEASSERTED);
}
/**
@@ -560,7 +648,8 @@ static inline int __must_check
devm_reset_control_bulk_get_exclusive(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, true);
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs,
+ RESET_CONTROL_EXCLUSIVE);
}
/**
@@ -579,7 +668,7 @@ static inline struct reset_control *
__must_check devm_reset_control_get_exclusive_released(struct device *dev,
const char *id)
{
- return __devm_reset_control_get(dev, id, 0, false, false, false);
+ return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_EXCLUSIVE_RELEASED);
}
/**
@@ -599,7 +688,8 @@ static inline int __must_check
devm_reset_control_bulk_get_exclusive_released(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, false);
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs,
+ RESET_CONTROL_EXCLUSIVE_RELEASED);
}
/**
@@ -618,7 +708,7 @@ static inline struct reset_control *
__must_check devm_reset_control_get_optional_exclusive_released(struct device *dev,
const char *id)
{
- return __devm_reset_control_get(dev, id, 0, false, true, false);
+ return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_EXCLUSIVE_RELEASED);
}
/**
@@ -638,7 +728,8 @@ static inline int __must_check
devm_reset_control_bulk_get_optional_exclusive_released(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, false);
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs,
+ RESET_CONTROL_OPTIONAL_EXCLUSIVE_RELEASED);
}
/**
@@ -653,7 +744,26 @@ devm_reset_control_bulk_get_optional_exclusive_released(struct device *dev, int
static inline struct reset_control *devm_reset_control_get_shared(
struct device *dev, const char *id)
{
- return __devm_reset_control_get(dev, id, 0, true, false, false);
+ return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_SHARED);
+}
+
+/**
+ * devm_reset_control_get_shared_deasserted - resource managed
+ * reset_control_get_shared() +
+ * reset_control_deassert()
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Managed reset_control_get_shared() + reset_control_deassert(). For reset
+ * controllers returned from this function, reset_control_assert() +
+ * reset_control_put() is called automatically on driver detach.
+ *
+ * See devm_reset_control_get_shared() for more information.
+ */
+static inline struct reset_control * __must_check
+devm_reset_control_get_shared_deasserted(struct device *dev, const char *id)
+{
+ return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_SHARED_DEASSERTED);
}
/**
@@ -673,7 +783,29 @@ static inline int __must_check
devm_reset_control_bulk_get_shared(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, true, false, false);
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_SHARED);
+}
+
+/**
+ * devm_reset_control_bulk_get_shared_deasserted - resource managed
+ * reset_control_bulk_get_shared() +
+ * reset_control_bulk_deassert()
+ * @dev: device to be reset by the controller
+ * @num_rstcs: number of entries in rstcs array
+ * @rstcs: array of struct reset_control_bulk_data with reset line names set
+ *
+ * Managed reset_control_bulk_get_shared() + reset_control_bulk_deassert(). For
+ * reset controllers returned from this function, reset_control_bulk_assert() +
+ * reset_control_bulk_put() are called automatically on driver detach.
+ *
+ * See devm_reset_control_bulk_get_shared() for more information.
+ */
+static inline int __must_check
+devm_reset_control_bulk_get_shared_deasserted(struct device *dev, int num_rstcs,
+ struct reset_control_bulk_data *rstcs)
+{
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs,
+ RESET_CONTROL_SHARED_DEASSERTED);
}
/**
@@ -691,7 +823,26 @@ devm_reset_control_bulk_get_shared(struct device *dev, int num_rstcs,
static inline struct reset_control *devm_reset_control_get_optional_exclusive(
struct device *dev, const char *id)
{
- return __devm_reset_control_get(dev, id, 0, false, true, true);
+ return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_EXCLUSIVE);
+}
+
+/**
+ * devm_reset_control_get_optional_exclusive_deasserted - resource managed
+ * reset_control_get_optional_exclusive() +
+ * reset_control_deassert()
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Managed reset_control_get_optional_exclusive() + reset_control_deassert().
+ * For reset controllers returned from this function, reset_control_assert() +
+ * reset_control_put() is called automatically on driver detach.
+ *
+ * See devm_reset_control_get_optional_exclusive() for more information.
+ */
+static inline struct reset_control *
+devm_reset_control_get_optional_exclusive_deasserted(struct device *dev, const char *id)
+{
+ return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_EXCLUSIVE_DEASSERTED);
}
/**
@@ -711,7 +862,8 @@ static inline int __must_check
devm_reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, true, false, true);
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs,
+ RESET_CONTROL_OPTIONAL_EXCLUSIVE);
}
/**
@@ -729,7 +881,26 @@ devm_reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs
static inline struct reset_control *devm_reset_control_get_optional_shared(
struct device *dev, const char *id)
{
- return __devm_reset_control_get(dev, id, 0, true, true, false);
+ return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_SHARED);
+}
+
+/**
+ * devm_reset_control_get_optional_shared_deasserted - resource managed
+ * reset_control_get_optional_shared() +
+ * reset_control_deassert()
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Managed reset_control_get_optional_shared() + reset_control_deassert(). For
+ * reset controllers returned from this function, reset_control_assert() +
+ * reset_control_put() is called automatically on driver detach.
+ *
+ * See devm_reset_control_get_optional_shared() for more information.
+ */
+static inline struct reset_control *
+devm_reset_control_get_optional_shared_deasserted(struct device *dev, const char *id)
+{
+ return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_SHARED_DEASSERTED);
}
/**
@@ -749,7 +920,7 @@ static inline int __must_check
devm_reset_control_bulk_get_optional_shared(struct device *dev, int num_rstcs,
struct reset_control_bulk_data *rstcs)
{
- return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, true, true, false);
+ return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_OPTIONAL_SHARED);
}
/**
@@ -767,7 +938,7 @@ devm_reset_control_bulk_get_optional_shared(struct device *dev, int num_rstcs,
static inline struct reset_control *
devm_reset_control_get_exclusive_by_index(struct device *dev, int index)
{
- return __devm_reset_control_get(dev, NULL, index, false, false, true);
+ return __devm_reset_control_get(dev, NULL, index, RESET_CONTROL_EXCLUSIVE);
}
/**
@@ -783,7 +954,7 @@ devm_reset_control_get_exclusive_by_index(struct device *dev, int index)
static inline struct reset_control *
devm_reset_control_get_shared_by_index(struct device *dev, int index)
{
- return __devm_reset_control_get(dev, NULL, index, true, false, false);
+ return __devm_reset_control_get(dev, NULL, index, RESET_CONTROL_SHARED);
}
/*
@@ -831,54 +1002,60 @@ static inline struct reset_control *devm_reset_control_get_by_index(
static inline struct reset_control *
devm_reset_control_array_get_exclusive(struct device *dev)
{
- return devm_reset_control_array_get(dev, false, false);
+ return devm_reset_control_array_get(dev, RESET_CONTROL_EXCLUSIVE);
+}
+
+static inline struct reset_control *
+devm_reset_control_array_get_exclusive_released(struct device *dev)
+{
+ return devm_reset_control_array_get(dev, RESET_CONTROL_EXCLUSIVE_RELEASED);
}
static inline struct reset_control *
devm_reset_control_array_get_shared(struct device *dev)
{
- return devm_reset_control_array_get(dev, true, false);
+ return devm_reset_control_array_get(dev, RESET_CONTROL_SHARED);
}
static inline struct reset_control *
devm_reset_control_array_get_optional_exclusive(struct device *dev)
{
- return devm_reset_control_array_get(dev, false, true);
+ return devm_reset_control_array_get(dev, RESET_CONTROL_OPTIONAL_EXCLUSIVE);
}
static inline struct reset_control *
devm_reset_control_array_get_optional_shared(struct device *dev)
{
- return devm_reset_control_array_get(dev, true, true);
+ return devm_reset_control_array_get(dev, RESET_CONTROL_OPTIONAL_SHARED);
}
static inline struct reset_control *
of_reset_control_array_get_exclusive(struct device_node *node)
{
- return of_reset_control_array_get(node, false, false, true);
+ return of_reset_control_array_get(node, RESET_CONTROL_EXCLUSIVE);
}
static inline struct reset_control *
of_reset_control_array_get_exclusive_released(struct device_node *node)
{
- return of_reset_control_array_get(node, false, false, false);
+ return of_reset_control_array_get(node, RESET_CONTROL_EXCLUSIVE_RELEASED);
}
static inline struct reset_control *
of_reset_control_array_get_shared(struct device_node *node)
{
- return of_reset_control_array_get(node, true, false, true);
+ return of_reset_control_array_get(node, RESET_CONTROL_SHARED);
}
static inline struct reset_control *
of_reset_control_array_get_optional_exclusive(struct device_node *node)
{
- return of_reset_control_array_get(node, false, true, true);
+ return of_reset_control_array_get(node, RESET_CONTROL_OPTIONAL_EXCLUSIVE);
}
static inline struct reset_control *
of_reset_control_array_get_optional_shared(struct device_node *node)
{
- return of_reset_control_array_get(node, true, true, true);
+ return of_reset_control_array_get(node, RESET_CONTROL_OPTIONAL_SHARED);
}
#endif
diff --git a/include/linux/reset/bcm63xx_pmb.h b/include/linux/reset/bcm63xx_pmb.h
index bb4af7b5eb36..c77b6999518a 100644
--- a/include/linux/reset/bcm63xx_pmb.h
+++ b/include/linux/reset/bcm63xx_pmb.h
@@ -1,17 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Broadcom BCM63xx Processor Monitor Bus shared routines (SMP and reset)
*
* Copyright (C) 2015, Broadcom Corporation
* Author: Florian Fainelli <f.fainelli@gmail.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __BCM63XX_PMB_H
#define __BCM63XX_PMB_H
diff --git a/include/linux/resource.h b/include/linux/resource.h
index bdf491cbcab7..4fdbc0c3f315 100644
--- a/include/linux/resource.h
+++ b/include/linux/resource.h
@@ -8,7 +8,5 @@
struct task_struct;
void getrusage(struct task_struct *p, int who, struct rusage *ru);
-int do_prlimit(struct task_struct *tsk, unsigned int resource,
- struct rlimit *new_rlim, struct rlimit *old_rlim);
#endif
diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h
index 980a65594412..36ddfa1ec301 100644
--- a/include/linux/restart_block.h
+++ b/include/linux/restart_block.h
@@ -7,8 +7,8 @@
#include <linux/compiler.h>
#include <linux/types.h>
-#include <linux/time64.h>
+struct __kernel_timespec;
struct timespec;
struct old_timespec32;
struct pollfd;
@@ -26,7 +26,7 @@ struct restart_block {
unsigned long arch_data;
long (*fn)(struct restart_block *);
union {
- /* For futex_wait and futex_wait_requeue_pi */
+ /* For futex_wait() */
struct {
u32 __user *uaddr;
u32 val;
@@ -43,7 +43,7 @@ struct restart_block {
struct __kernel_timespec __user *rmtp;
struct old_timespec32 __user *compat_rmtp;
};
- u64 expires;
+ ktime_t expires;
} nanosleep;
/* For poll */
struct {
diff --git a/include/linux/resume_user_mode.h b/include/linux/resume_user_mode.h
new file mode 100644
index 000000000000..bf92227c78d0
--- /dev/null
+++ b/include/linux/resume_user_mode.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef LINUX_RESUME_USER_MODE_H
+#define LINUX_RESUME_USER_MODE_H
+
+#include <linux/sched.h>
+#include <linux/task_work.h>
+#include <linux/memcontrol.h>
+#include <linux/rseq.h>
+#include <linux/blk-cgroup.h>
+
+/**
+ * set_notify_resume - cause resume_user_mode_work() to be called
+ * @task: task that will call resume_user_mode_work()
+ *
+ * Calling this arranges that @task will call resume_user_mode_work()
+ * before returning to user mode. If it's already running in user mode,
+ * it will enter the kernel and call resume_user_mode_work() soon.
+ * If it's blocked, it will not be woken.
+ */
+static inline void set_notify_resume(struct task_struct *task)
+{
+ if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_RESUME))
+ kick_process(task);
+}
+
+
+/**
+ * resume_user_mode_work - Perform work before returning to user mode
+ * @regs: user-mode registers of @current task
+ *
+ * This is called when %TIF_NOTIFY_RESUME has been set. Now we are
+ * about to return to user mode, and the user state in @regs can be
+ * inspected or adjusted. The caller in arch code has cleared
+ * %TIF_NOTIFY_RESUME before the call. If the flag gets set again
+ * asynchronously, this will be called again before we return to
+ * user mode.
+ *
+ * Called without locks.
+ */
+static inline void resume_user_mode_work(struct pt_regs *regs)
+{
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ /*
+ * This barrier pairs with task_work_add()->set_notify_resume() after
+ * hlist_add_head(task->task_works);
+ */
+ smp_mb__after_atomic();
+ if (unlikely(task_work_pending(current)))
+ task_work_run();
+
+#ifdef CONFIG_KEYS_REQUEST_CACHE
+ if (unlikely(current->cached_requested_key)) {
+ key_put(current->cached_requested_key);
+ current->cached_requested_key = NULL;
+ }
+#endif
+
+ mem_cgroup_handle_over_high(GFP_KERNEL);
+ blkcg_maybe_throttle_current();
+
+ rseq_handle_slowpath(regs);
+}
+
+#endif /* LINUX_RESUME_USER_MODE_H */
diff --git a/include/linux/rethook.h b/include/linux/rethook.h
new file mode 100644
index 000000000000..ba60962805f6
--- /dev/null
+++ b/include/linux/rethook.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Return hooking with list-based shadow stack.
+ */
+#ifndef _LINUX_RETHOOK_H
+#define _LINUX_RETHOOK_H
+
+#include <linux/compiler.h>
+#include <linux/objpool.h>
+#include <linux/kallsyms.h>
+#include <linux/llist.h>
+#include <linux/rcupdate.h>
+
+struct rethook_node;
+
+typedef void (*rethook_handler_t) (struct rethook_node *, void *, unsigned long, struct pt_regs *);
+
+/**
+ * struct rethook - The rethook management data structure.
+ * @data: The user-defined data storage.
+ * @handler: The user-defined return hook handler.
+ * @pool: The pool of struct rethook_node.
+ * @ref: The reference counter.
+ * @rcu: The rcu_head for deferred freeing.
+ *
+ * Don't embed to another data structure, because this is a self-destructive
+ * data structure when all rethook_node are freed.
+ */
+struct rethook {
+ void *data;
+ /*
+ * To avoid sparse warnings, this uses a raw function pointer with
+ * __rcu, instead of rethook_handler_t. But this must be same as
+ * rethook_handler_t.
+ */
+ void (__rcu *handler) (struct rethook_node *, void *, unsigned long, struct pt_regs *);
+ struct objpool_head pool;
+ struct rcu_head rcu;
+};
+
+/**
+ * struct rethook_node - The rethook shadow-stack entry node.
+ * @rcu: The rcu_head for deferred freeing.
+ * @llist: The llist, linked to a struct task_struct::rethooks.
+ * @rethook: The pointer to the struct rethook.
+ * @ret_addr: The storage for the real return address.
+ * @frame: The storage for the frame pointer.
+ *
+ * You can embed this to your extended data structure to store any data
+ * on each entry of the shadow stack.
+ */
+struct rethook_node {
+ struct rcu_head rcu;
+ struct llist_node llist;
+ struct rethook *rethook;
+ unsigned long ret_addr;
+ unsigned long frame;
+};
+
+struct rethook *rethook_alloc(void *data, rethook_handler_t handler, int size, int num);
+void rethook_stop(struct rethook *rh);
+void rethook_free(struct rethook *rh);
+struct rethook_node *rethook_try_get(struct rethook *rh);
+void rethook_recycle(struct rethook_node *node);
+void rethook_hook(struct rethook_node *node, struct pt_regs *regs, bool mcount);
+unsigned long rethook_find_ret_addr(struct task_struct *tsk, unsigned long frame,
+ struct llist_node **cur);
+
+/* Arch dependent code must implement arch_* and trampoline code */
+void arch_rethook_prepare(struct rethook_node *node, struct pt_regs *regs, bool mcount);
+void arch_rethook_trampoline(void);
+
+/**
+ * is_rethook_trampoline() - Check whether the address is rethook trampoline
+ * @addr: The address to be checked
+ *
+ * Return true if the @addr is the rethook trampoline address.
+ */
+static inline bool is_rethook_trampoline(unsigned long addr)
+{
+ return addr == (unsigned long)dereference_symbol_descriptor(arch_rethook_trampoline);
+}
+
+/* If the architecture needs to fixup the return address, implement it. */
+void arch_rethook_fixup_return(struct pt_regs *regs,
+ unsigned long correct_ret_addr);
+
+/* Generic trampoline handler, arch code must prepare asm stub */
+unsigned long rethook_trampoline_handler(struct pt_regs *regs,
+ unsigned long frame);
+
+#ifdef CONFIG_RETHOOK
+void rethook_flush_task(struct task_struct *tk);
+#else
+#define rethook_flush_task(tsk) do { } while (0)
+#endif
+
+#endif
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index 231e06b74b50..6816e4c5f3f0 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -147,7 +147,8 @@ void rfkill_destroy(struct rfkill *rfkill);
* Prefer to use rfkill_set_hw_state if you don't need any special reason.
*/
bool rfkill_set_hw_state_reason(struct rfkill *rfkill,
- bool blocked, unsigned long reason);
+ bool blocked,
+ enum rfkill_hard_block_reasons reason);
/**
* rfkill_set_hw_state - Set the internal rfkill hardware block state
* @rfkill: pointer to the rfkill class to modify.
@@ -230,10 +231,17 @@ void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw);
bool rfkill_blocked(struct rfkill *rfkill);
/**
+ * rfkill_soft_blocked - Query soft rfkill block state
+ *
+ * @rfkill: rfkill struct to query
+ */
+bool rfkill_soft_blocked(struct rfkill *rfkill);
+
+/**
* rfkill_find_type - Helper for finding rfkill type by name
* @name: the name of the type
*
- * Returns enum rfkill_type that corresponds to the name.
+ * Returns: enum rfkill_type that corresponds to the name.
*/
enum rfkill_type rfkill_find_type(const char *name);
@@ -273,7 +281,7 @@ static inline void rfkill_destroy(struct rfkill *rfkill)
static inline bool rfkill_set_hw_state_reason(struct rfkill *rfkill,
bool blocked,
- unsigned long reason)
+ enum rfkill_hard_block_reasons reason)
{
return blocked;
}
@@ -301,6 +309,11 @@ static inline bool rfkill_blocked(struct rfkill *rfkill)
return false;
}
+static inline bool rfkill_soft_blocked(struct rfkill *rfkill)
+{
+ return false;
+}
+
static inline enum rfkill_type rfkill_find_type(const char *name)
{
return RFKILL_TYPE_ALL;
diff --git a/include/linux/rhashtable-types.h b/include/linux/rhashtable-types.h
index 57467cbf4c5b..015c8298bebc 100644
--- a/include/linux/rhashtable-types.h
+++ b/include/linux/rhashtable-types.h
@@ -9,10 +9,11 @@
#ifndef _LINUX_RHASHTABLE_TYPES_H
#define _LINUX_RHASHTABLE_TYPES_H
+#include <linux/alloc_tag.h>
#include <linux/atomic.h>
#include <linux/compiler.h>
#include <linux/mutex.h>
-#include <linux/workqueue.h>
+#include <linux/workqueue_types.h>
struct rhash_head {
struct rhash_head __rcu *next;
@@ -88,6 +89,9 @@ struct rhashtable {
struct mutex mutex;
spinlock_t lock;
atomic_t nelems;
+#ifdef CONFIG_MEM_ALLOC_PROFILING
+ struct alloc_tag *alloc_tag;
+#endif
};
/**
@@ -127,9 +131,12 @@ struct rhashtable_iter {
bool end_of_table;
};
-int rhashtable_init(struct rhashtable *ht,
+int rhashtable_init_noprof(struct rhashtable *ht,
const struct rhashtable_params *params);
-int rhltable_init(struct rhltable *hlt,
+#define rhashtable_init(...) alloc_hooks(rhashtable_init_noprof(__VA_ARGS__))
+
+int rhltable_init_noprof(struct rhltable *hlt,
const struct rhashtable_params *params);
+#define rhltable_init(...) alloc_hooks(rhltable_init_noprof(__VA_ARGS__))
#endif /* _LINUX_RHASHTABLE_TYPES_H */
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 68dab3e08aad..08e664b21f5a 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -122,7 +122,7 @@ static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
return hash & (tbl->size - 1);
}
-static inline unsigned int rht_key_get_hash(struct rhashtable *ht,
+static __always_inline unsigned int rht_key_get_hash(struct rhashtable *ht,
const void *key, const struct rhashtable_params params,
unsigned int hash_rnd)
{
@@ -152,7 +152,7 @@ static inline unsigned int rht_key_get_hash(struct rhashtable *ht,
return hash;
}
-static inline unsigned int rht_key_hashfn(
+static __always_inline unsigned int rht_key_hashfn(
struct rhashtable *ht, const struct bucket_table *tbl,
const void *key, const struct rhashtable_params params)
{
@@ -161,7 +161,7 @@ static inline unsigned int rht_key_hashfn(
return rht_bucket_index(tbl, hash);
}
-static inline unsigned int rht_head_hashfn(
+static __always_inline unsigned int rht_head_hashfn(
struct rhashtable *ht, const struct bucket_table *tbl,
const struct rhash_head *he, const struct rhashtable_params params)
{
@@ -272,13 +272,13 @@ struct rhash_lock_head __rcu **rht_bucket_nested_insert(
rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
#define rht_dereference_rcu(p, ht) \
- rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
+ rcu_dereference_all_check(p, lockdep_rht_mutex_is_held(ht))
#define rht_dereference_bucket(p, tbl, hash) \
rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
#define rht_dereference_bucket_rcu(p, tbl, hash) \
- rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
+ rcu_dereference_all_check(p, lockdep_rht_bucket_is_held(tbl, hash))
#define rht_entry(tpos, pos, member) \
({ tpos = container_of(pos, typeof(*tpos), member); 1; })
@@ -323,37 +323,57 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert(
* When we write to a bucket without unlocking, we use rht_assign_locked().
*/
-static inline void rht_lock(struct bucket_table *tbl,
- struct rhash_lock_head __rcu **bkt)
+static inline unsigned long rht_lock(struct bucket_table *tbl,
+ struct rhash_lock_head __rcu **bkt)
{
- local_bh_disable();
+ unsigned long flags;
+
+ local_irq_save(flags);
bit_spin_lock(0, (unsigned long *)bkt);
lock_map_acquire(&tbl->dep_map);
+ return flags;
}
-static inline void rht_lock_nested(struct bucket_table *tbl,
- struct rhash_lock_head __rcu **bucket,
- unsigned int subclass)
+static inline unsigned long rht_lock_nested(struct bucket_table *tbl,
+ struct rhash_lock_head __rcu **bucket,
+ unsigned int subclass)
{
- local_bh_disable();
+ unsigned long flags;
+
+ local_irq_save(flags);
bit_spin_lock(0, (unsigned long *)bucket);
lock_acquire_exclusive(&tbl->dep_map, subclass, 0, NULL, _THIS_IP_);
+ return flags;
}
static inline void rht_unlock(struct bucket_table *tbl,
- struct rhash_lock_head __rcu **bkt)
+ struct rhash_lock_head __rcu **bkt,
+ unsigned long flags)
{
lock_map_release(&tbl->dep_map);
bit_spin_unlock(0, (unsigned long *)bkt);
- local_bh_enable();
+ local_irq_restore(flags);
}
-static inline struct rhash_head *__rht_ptr(
- struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt)
+enum rht_lookup_freq {
+ RHT_LOOKUP_NORMAL,
+ RHT_LOOKUP_LIKELY,
+};
+
+static __always_inline struct rhash_head *__rht_ptr(
+ struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt,
+ const enum rht_lookup_freq freq)
{
- return (struct rhash_head *)
- ((unsigned long)p & ~BIT(0) ?:
- (unsigned long)RHT_NULLS_MARKER(bkt));
+ unsigned long p_val = (unsigned long)p & ~BIT(0);
+
+ BUILD_BUG_ON(!__builtin_constant_p(freq));
+
+ if (freq == RHT_LOOKUP_LIKELY)
+ return (struct rhash_head *)
+ (likely(p_val) ? p_val : (unsigned long)RHT_NULLS_MARKER(bkt));
+ else
+ return (struct rhash_head *)
+ (p_val ?: (unsigned long)RHT_NULLS_MARKER(bkt));
}
/*
@@ -363,10 +383,17 @@ static inline struct rhash_head *__rht_ptr(
* rht_ptr_exclusive() dereferences in a context where exclusive
* access is guaranteed, such as when destroying the table.
*/
+static __always_inline struct rhash_head *__rht_ptr_rcu(
+ struct rhash_lock_head __rcu *const *bkt,
+ const enum rht_lookup_freq freq)
+{
+ return __rht_ptr(rcu_dereference_all(*bkt), bkt, freq);
+}
+
static inline struct rhash_head *rht_ptr_rcu(
struct rhash_lock_head __rcu *const *bkt)
{
- return __rht_ptr(rcu_dereference(*bkt), bkt);
+ return __rht_ptr_rcu(bkt, RHT_LOOKUP_NORMAL);
}
static inline struct rhash_head *rht_ptr(
@@ -374,13 +401,15 @@ static inline struct rhash_head *rht_ptr(
struct bucket_table *tbl,
unsigned int hash)
{
- return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt);
+ return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt,
+ RHT_LOOKUP_NORMAL);
}
static inline struct rhash_head *rht_ptr_exclusive(
struct rhash_lock_head __rcu *const *bkt)
{
- return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt);
+ return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt,
+ RHT_LOOKUP_NORMAL);
}
static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt,
@@ -393,7 +422,8 @@ static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt,
static inline void rht_assign_unlock(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bkt,
- struct rhash_head *obj)
+ struct rhash_head *obj,
+ unsigned long flags)
{
if (rht_is_a_nulls(obj))
obj = NULL;
@@ -401,7 +431,7 @@ static inline void rht_assign_unlock(struct bucket_table *tbl,
rcu_assign_pointer(*bkt, (void *)obj);
preempt_enable();
__release(bitlock);
- local_bh_enable();
+ local_irq_restore(flags);
}
/**
@@ -489,7 +519,7 @@ static inline void rht_assign_unlock(struct bucket_table *tbl,
for (({barrier(); }), \
pos = head; \
!rht_is_a_nulls(pos); \
- pos = rcu_dereference_raw(pos->next))
+ pos = rcu_dereference_all(pos->next))
/**
* rht_for_each_rcu - iterate over rcu hash chain
@@ -505,7 +535,7 @@ static inline void rht_assign_unlock(struct bucket_table *tbl,
for (({barrier(); }), \
pos = rht_ptr_rcu(rht_bucket(tbl, hash)); \
!rht_is_a_nulls(pos); \
- pos = rcu_dereference_raw(pos->next))
+ pos = rcu_dereference_all(pos->next))
/**
* rht_for_each_entry_rcu_from - iterated over rcu hash chain from given head
@@ -552,7 +582,7 @@ static inline void rht_assign_unlock(struct bucket_table *tbl,
* list returned by rhltable_lookup.
*/
#define rhl_for_each_rcu(pos, list) \
- for (pos = list; pos; pos = rcu_dereference_raw(pos->next))
+ for (pos = list; pos; pos = rcu_dereference_all(pos->next))
/**
* rhl_for_each_entry_rcu - iterate over rcu hash table list of given type
@@ -566,7 +596,7 @@ static inline void rht_assign_unlock(struct bucket_table *tbl,
*/
#define rhl_for_each_entry_rcu(tpos, pos, list, member) \
for (pos = list; pos && rht_entry(tpos, pos, member); \
- pos = rcu_dereference_raw(pos->next))
+ pos = rcu_dereference_all(pos->next))
static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
const void *obj)
@@ -578,9 +608,10 @@ static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
}
/* Internal function, do not use. */
-static inline struct rhash_head *__rhashtable_lookup(
+static __always_inline struct rhash_head *__rhashtable_lookup(
struct rhashtable *ht, const void *key,
- const struct rhashtable_params params)
+ const struct rhashtable_params params,
+ const enum rht_lookup_freq freq)
{
struct rhashtable_compare_arg arg = {
.ht = ht,
@@ -591,12 +622,13 @@ static inline struct rhash_head *__rhashtable_lookup(
struct rhash_head *he;
unsigned int hash;
+ BUILD_BUG_ON(!__builtin_constant_p(freq));
tbl = rht_dereference_rcu(ht->tbl, ht);
restart:
hash = rht_key_hashfn(ht, tbl, key, params);
bkt = rht_bucket(tbl, hash);
do {
- rht_for_each_rcu_from(he, rht_ptr_rcu(bkt), tbl, hash) {
+ rht_for_each_rcu_from(he, __rht_ptr_rcu(bkt, freq), tbl, hash) {
if (params.obj_cmpfn ?
params.obj_cmpfn(&arg, rht_obj(ht, he)) :
rhashtable_compare(&arg, rht_obj(ht, he)))
@@ -625,21 +657,32 @@ restart:
* @params: hash table parameters
*
* Computes the hash value for the key and traverses the bucket chain looking
- * for a entry with an identical key. The first matching entry is returned.
+ * for an entry with an identical key. The first matching entry is returned.
*
* This must only be called under the RCU read lock.
*
* Returns the first entry on which the compare function returned true.
*/
-static inline void *rhashtable_lookup(
+static __always_inline void *rhashtable_lookup(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
{
- struct rhash_head *he = __rhashtable_lookup(ht, key, params);
+ struct rhash_head *he = __rhashtable_lookup(ht, key, params,
+ RHT_LOOKUP_NORMAL);
return he ? rht_obj(ht, he) : NULL;
}
+static __always_inline void *rhashtable_lookup_likely(
+ struct rhashtable *ht, const void *key,
+ const struct rhashtable_params params)
+{
+ struct rhash_head *he = __rhashtable_lookup(ht, key, params,
+ RHT_LOOKUP_LIKELY);
+
+ return likely(he) ? rht_obj(ht, he) : NULL;
+}
+
/**
* rhashtable_lookup_fast - search hash table, without RCU read lock
* @ht: hash table
@@ -647,14 +690,14 @@ static inline void *rhashtable_lookup(
* @params: hash table parameters
*
* Computes the hash value for the key and traverses the bucket chain looking
- * for a entry with an identical key. The first matching entry is returned.
+ * for an entry with an identical key. The first matching entry is returned.
*
* Only use this function when you have other mechanisms guaranteeing
* that the object won't go away after the RCU read lock is released.
*
* Returns the first entry on which the compare function returned true.
*/
-static inline void *rhashtable_lookup_fast(
+static __always_inline void *rhashtable_lookup_fast(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
{
@@ -674,27 +717,38 @@ static inline void *rhashtable_lookup_fast(
* @params: hash table parameters
*
* Computes the hash value for the key and traverses the bucket chain looking
- * for a entry with an identical key. All matching entries are returned
+ * for an entry with an identical key. All matching entries are returned
* in a list.
*
* This must only be called under the RCU read lock.
*
* Returns the list of entries that match the given key.
*/
-static inline struct rhlist_head *rhltable_lookup(
+static __always_inline struct rhlist_head *rhltable_lookup(
struct rhltable *hlt, const void *key,
const struct rhashtable_params params)
{
- struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params);
+ struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params,
+ RHT_LOOKUP_NORMAL);
return he ? container_of(he, struct rhlist_head, rhead) : NULL;
}
+static __always_inline struct rhlist_head *rhltable_lookup_likely(
+ struct rhltable *hlt, const void *key,
+ const struct rhashtable_params params)
+{
+ struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params,
+ RHT_LOOKUP_LIKELY);
+
+ return likely(he) ? container_of(he, struct rhlist_head, rhead) : NULL;
+}
+
/* Internal function, please use rhashtable_insert_fast() instead. This
- * function returns the existing element already in hashes in there is a clash,
+ * function returns the existing element already in hashes if there is a clash,
* otherwise it returns an error via ERR_PTR().
*/
-static inline void *__rhashtable_insert_fast(
+static __always_inline void *__rhashtable_insert_fast(
struct rhashtable *ht, const void *key, struct rhash_head *obj,
const struct rhashtable_params params, bool rhlist)
{
@@ -706,6 +760,7 @@ static inline void *__rhashtable_insert_fast(
struct rhash_head __rcu **pprev;
struct bucket_table *tbl;
struct rhash_head *head;
+ unsigned long flags;
unsigned int hash;
int elasticity;
void *data;
@@ -720,11 +775,11 @@ static inline void *__rhashtable_insert_fast(
if (!bkt)
goto out;
pprev = NULL;
- rht_lock(tbl, bkt);
+ flags = rht_lock(tbl, bkt);
if (unlikely(rcu_access_pointer(tbl->future_tbl))) {
slow_path:
- rht_unlock(tbl, bkt);
+ rht_unlock(tbl, bkt, flags);
rcu_read_unlock();
return rhashtable_insert_slow(ht, key, obj);
}
@@ -756,9 +811,9 @@ slow_path:
RCU_INIT_POINTER(list->rhead.next, head);
if (pprev) {
rcu_assign_pointer(*pprev, obj);
- rht_unlock(tbl, bkt);
+ rht_unlock(tbl, bkt, flags);
} else
- rht_assign_unlock(tbl, bkt, obj);
+ rht_assign_unlock(tbl, bkt, obj, flags);
data = NULL;
goto out;
}
@@ -785,7 +840,7 @@ slow_path:
}
atomic_inc(&ht->nelems);
- rht_assign_unlock(tbl, bkt, obj);
+ rht_assign_unlock(tbl, bkt, obj, flags);
if (rht_grow_above_75(ht, tbl))
schedule_work(&ht->run_work);
@@ -797,7 +852,7 @@ out:
return data;
out_unlock:
- rht_unlock(tbl, bkt);
+ rht_unlock(tbl, bkt, flags);
goto out;
}
@@ -816,7 +871,7 @@ out_unlock:
* Will trigger an automatic deferred table resizing if residency in the
* table grows beyond 70%.
*/
-static inline int rhashtable_insert_fast(
+static __always_inline int rhashtable_insert_fast(
struct rhashtable *ht, struct rhash_head *obj,
const struct rhashtable_params params)
{
@@ -845,7 +900,7 @@ static inline int rhashtable_insert_fast(
* Will trigger an automatic deferred table resizing if residency in the
* table grows beyond 70%.
*/
-static inline int rhltable_insert_key(
+static __always_inline int rhltable_insert_key(
struct rhltable *hlt, const void *key, struct rhlist_head *list,
const struct rhashtable_params params)
{
@@ -868,7 +923,7 @@ static inline int rhltable_insert_key(
* Will trigger an automatic deferred table resizing if residency in the
* table grows beyond 70%.
*/
-static inline int rhltable_insert(
+static __always_inline int rhltable_insert(
struct rhltable *hlt, struct rhlist_head *list,
const struct rhashtable_params params)
{
@@ -893,7 +948,7 @@ static inline int rhltable_insert(
* Will trigger an automatic deferred table resizing if residency in the
* table grows beyond 70%.
*/
-static inline int rhashtable_lookup_insert_fast(
+static __always_inline int rhashtable_lookup_insert_fast(
struct rhashtable *ht, struct rhash_head *obj,
const struct rhashtable_params params)
{
@@ -920,7 +975,7 @@ static inline int rhashtable_lookup_insert_fast(
* object if it exists, NULL if it did not and the insertion was successful,
* and an ERR_PTR otherwise.
*/
-static inline void *rhashtable_lookup_get_insert_fast(
+static __always_inline void *rhashtable_lookup_get_insert_fast(
struct rhashtable *ht, struct rhash_head *obj,
const struct rhashtable_params params)
{
@@ -947,7 +1002,7 @@ static inline void *rhashtable_lookup_get_insert_fast(
*
* Returns zero on success.
*/
-static inline int rhashtable_lookup_insert_key(
+static __always_inline int rhashtable_lookup_insert_key(
struct rhashtable *ht, const void *key, struct rhash_head *obj,
const struct rhashtable_params params)
{
@@ -973,7 +1028,7 @@ static inline int rhashtable_lookup_insert_key(
* object if it exists, NULL if it does not and the insertion was successful,
* and an ERR_PTR otherwise.
*/
-static inline void *rhashtable_lookup_get_insert_key(
+static __always_inline void *rhashtable_lookup_get_insert_key(
struct rhashtable *ht, const void *key, struct rhash_head *obj,
const struct rhashtable_params params)
{
@@ -983,7 +1038,7 @@ static inline void *rhashtable_lookup_get_insert_key(
}
/* Internal function, please use rhashtable_remove_fast() instead */
-static inline int __rhashtable_remove_fast_one(
+static __always_inline int __rhashtable_remove_fast_one(
struct rhashtable *ht, struct bucket_table *tbl,
struct rhash_head *obj, const struct rhashtable_params params,
bool rhlist)
@@ -991,6 +1046,7 @@ static inline int __rhashtable_remove_fast_one(
struct rhash_lock_head __rcu **bkt;
struct rhash_head __rcu **pprev;
struct rhash_head *he;
+ unsigned long flags;
unsigned int hash;
int err = -ENOENT;
@@ -999,7 +1055,7 @@ static inline int __rhashtable_remove_fast_one(
if (!bkt)
return -ENOENT;
pprev = NULL;
- rht_lock(tbl, bkt);
+ flags = rht_lock(tbl, bkt);
rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
struct rhlist_head *list;
@@ -1043,14 +1099,14 @@ static inline int __rhashtable_remove_fast_one(
if (pprev) {
rcu_assign_pointer(*pprev, obj);
- rht_unlock(tbl, bkt);
+ rht_unlock(tbl, bkt, flags);
} else {
- rht_assign_unlock(tbl, bkt, obj);
+ rht_assign_unlock(tbl, bkt, obj, flags);
}
goto unlocked;
}
- rht_unlock(tbl, bkt);
+ rht_unlock(tbl, bkt, flags);
unlocked:
if (err > 0) {
atomic_dec(&ht->nelems);
@@ -1064,7 +1120,7 @@ unlocked:
}
/* Internal function, please use rhashtable_remove_fast() instead */
-static inline int __rhashtable_remove_fast(
+static __always_inline int __rhashtable_remove_fast(
struct rhashtable *ht, struct rhash_head *obj,
const struct rhashtable_params params, bool rhlist)
{
@@ -1105,7 +1161,7 @@ static inline int __rhashtable_remove_fast(
*
* Returns zero on success, -ENOENT if the entry could not be found.
*/
-static inline int rhashtable_remove_fast(
+static __always_inline int rhashtable_remove_fast(
struct rhashtable *ht, struct rhash_head *obj,
const struct rhashtable_params params)
{
@@ -1120,14 +1176,14 @@ static inline int rhashtable_remove_fast(
*
* Since the hash chain is single linked, the removal operation needs to
* walk the bucket chain upon removal. The removal operation is thus
- * considerable slow if the hash table is not correctly sized.
+ * considerably slower if the hash table is not correctly sized.
*
* Will automatically shrink the table if permitted when residency drops
* below 30%
*
* Returns zero on success, -ENOENT if the entry could not be found.
*/
-static inline int rhltable_remove(
+static __always_inline int rhltable_remove(
struct rhltable *hlt, struct rhlist_head *list,
const struct rhashtable_params params)
{
@@ -1135,7 +1191,7 @@ static inline int rhltable_remove(
}
/* Internal function, please use rhashtable_replace_fast() instead */
-static inline int __rhashtable_replace_fast(
+static __always_inline int __rhashtable_replace_fast(
struct rhashtable *ht, struct bucket_table *tbl,
struct rhash_head *obj_old, struct rhash_head *obj_new,
const struct rhashtable_params params)
@@ -1143,6 +1199,7 @@ static inline int __rhashtable_replace_fast(
struct rhash_lock_head __rcu **bkt;
struct rhash_head __rcu **pprev;
struct rhash_head *he;
+ unsigned long flags;
unsigned int hash;
int err = -ENOENT;
@@ -1158,7 +1215,7 @@ static inline int __rhashtable_replace_fast(
return -ENOENT;
pprev = NULL;
- rht_lock(tbl, bkt);
+ flags = rht_lock(tbl, bkt);
rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
if (he != obj_old) {
@@ -1169,15 +1226,15 @@ static inline int __rhashtable_replace_fast(
rcu_assign_pointer(obj_new->next, obj_old->next);
if (pprev) {
rcu_assign_pointer(*pprev, obj_new);
- rht_unlock(tbl, bkt);
+ rht_unlock(tbl, bkt, flags);
} else {
- rht_assign_unlock(tbl, bkt, obj_new);
+ rht_assign_unlock(tbl, bkt, obj_new, flags);
}
err = 0;
goto unlocked;
}
- rht_unlock(tbl, bkt);
+ rht_unlock(tbl, bkt, flags);
unlocked:
return err;
@@ -1197,7 +1254,7 @@ unlocked:
* Returns zero on success, -ENOENT if the entry could not be found,
* -EINVAL if hash is not the same for the old and new objects.
*/
-static inline int rhashtable_replace_fast(
+static __always_inline int rhashtable_replace_fast(
struct rhashtable *ht, struct rhash_head *obj_old,
struct rhash_head *obj_new,
const struct rhashtable_params params)
@@ -1248,7 +1305,7 @@ static inline int rhashtable_replace_fast(
static inline void rhltable_walk_enter(struct rhltable *hlt,
struct rhashtable_iter *iter)
{
- return rhashtable_walk_enter(&hlt->ht, iter);
+ rhashtable_walk_enter(&hlt->ht, iter);
}
/**
@@ -1264,12 +1321,12 @@ static inline void rhltable_free_and_destroy(struct rhltable *hlt,
void *arg),
void *arg)
{
- return rhashtable_free_and_destroy(&hlt->ht, free_fn, arg);
+ rhashtable_free_and_destroy(&hlt->ht, free_fn, arg);
}
static inline void rhltable_destroy(struct rhltable *hlt)
{
- return rhltable_free_and_destroy(hlt, NULL, NULL);
+ rhltable_free_and_destroy(hlt, NULL, NULL);
}
#endif /* _LINUX_RHASHTABLE_H */
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index dac53fd3afea..876358cfe1b1 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -6,6 +6,8 @@
#include <linux/seq_file.h>
#include <linux/poll.h>
+#include <uapi/linux/trace_mmap.h>
+
struct trace_buffer;
struct ring_buffer_iter;
@@ -87,6 +89,14 @@ void ring_buffer_discard_commit(struct trace_buffer *buffer,
struct trace_buffer *
__ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key);
+struct trace_buffer *__ring_buffer_alloc_range(unsigned long size, unsigned flags,
+ int order, unsigned long start,
+ unsigned long range_size,
+ unsigned long scratch_size,
+ struct lock_class_key *key);
+
+void *ring_buffer_meta_scratch(struct trace_buffer *buffer, unsigned int *size);
+
/*
* Because the ring buffer is generic, if other users of the ring buffer get
* traced by ftrace, it can produce lockdep warnings. We need to keep each
@@ -98,10 +108,24 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
__ring_buffer_alloc((size), (flags), &__key); \
})
-int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);
-__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
- struct file *filp, poll_table *poll_table);
+/*
+ * Because the ring buffer is generic, if other users of the ring buffer get
+ * traced by ftrace, it can produce lockdep warnings. We need to keep each
+ * ring buffer's lock class separate.
+ */
+#define ring_buffer_alloc_range(size, flags, order, start, range_size, s_size) \
+({ \
+ static struct lock_class_key __key; \
+ __ring_buffer_alloc_range((size), (flags), (order), (start), \
+ (range_size), (s_size), &__key); \
+})
+typedef bool (*ring_buffer_cond_fn)(void *data);
+int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full,
+ ring_buffer_cond_fn cond, void *data);
+__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
+ struct file *filp, poll_table *poll_table, int full);
+void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu);
#define RING_BUFFER_ALL_CPUS -1
@@ -113,14 +137,16 @@ void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val);
struct ring_buffer_event *ring_buffer_lock_reserve(struct trace_buffer *buffer,
unsigned long length);
-int ring_buffer_unlock_commit(struct trace_buffer *buffer,
- struct ring_buffer_event *event);
+int ring_buffer_unlock_commit(struct trace_buffer *buffer);
int ring_buffer_write(struct trace_buffer *buffer,
unsigned long length, void *data);
void ring_buffer_nest_start(struct trace_buffer *buffer);
void ring_buffer_nest_end(struct trace_buffer *buffer);
+DEFINE_GUARD(ring_buffer_nest, struct trace_buffer *,
+ ring_buffer_nest_start(_T), ring_buffer_nest_end(_T))
+
struct ring_buffer_event *
ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events);
@@ -129,9 +155,7 @@ ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events);
struct ring_buffer_iter *
-ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags);
-void ring_buffer_read_prepare_sync(void);
-void ring_buffer_read_start(struct ring_buffer_iter *iter);
+ring_buffer_read_start(struct trace_buffer *buffer, int cpu, gfp_t flags);
void ring_buffer_read_finish(struct ring_buffer_iter *iter);
struct ring_buffer_event *
@@ -142,6 +166,7 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter);
unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_max_event_size(struct trace_buffer *buffer);
void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu);
void ring_buffer_reset_online_cpus(struct trace_buffer *buffer);
@@ -168,6 +193,7 @@ void ring_buffer_record_off(struct trace_buffer *buffer);
void ring_buffer_record_on(struct trace_buffer *buffer);
bool ring_buffer_record_is_on(struct trace_buffer *buffer);
bool ring_buffer_record_is_set_on(struct trace_buffer *buffer);
+bool ring_buffer_record_is_on_cpu(struct trace_buffer *buffer, int cpu);
void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu);
void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu);
@@ -189,18 +215,26 @@ void ring_buffer_set_clock(struct trace_buffer *buffer,
void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs);
bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer);
-size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu);
size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu);
-void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu);
-void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data);
-int ring_buffer_read_page(struct trace_buffer *buffer, void **data_page,
+struct buffer_data_read_page;
+struct buffer_data_read_page *
+ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu);
+void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu,
+ struct buffer_data_read_page *page);
+int ring_buffer_read_page(struct trace_buffer *buffer,
+ struct buffer_data_read_page *data_page,
size_t len, int cpu, int full);
+void *ring_buffer_read_page_data(struct buffer_data_read_page *page);
struct trace_seq;
int ring_buffer_print_entry_header(struct trace_seq *s);
-int ring_buffer_print_page_header(struct trace_seq *s);
+int ring_buffer_print_page_header(struct trace_buffer *buffer, struct trace_seq *s);
+
+int ring_buffer_subbuf_order_get(struct trace_buffer *buffer);
+int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order);
+int ring_buffer_subbuf_size_get(struct trace_buffer *buffer);
enum ring_buffer_flags {
RB_FL_OVERWRITE = 1 << 0,
@@ -212,4 +246,8 @@ int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node);
#define trace_rb_cpu_prepare NULL
#endif
+int ring_buffer_map(struct trace_buffer *buffer, int cpu,
+ struct vm_area_struct *vma);
+int ring_buffer_unmap(struct trace_buffer *buffer, int cpu);
+int ring_buffer_map_get_reader(struct trace_buffer *buffer, int cpu);
#endif /* _LINUX_RING_BUFFER_H */
diff --git a/include/linux/rio.h b/include/linux/rio.h
index 2cd637268b4f..2c29f21ba9e5 100644
--- a/include/linux/rio.h
+++ b/include/linux/rio.h
@@ -78,7 +78,7 @@
#define RIO_CTAG_RESRVD 0xfffe0000 /* Reserved */
#define RIO_CTAG_UDEVID 0x0001ffff /* Unique device identifier */
-extern struct bus_type rio_bus_type;
+extern const struct bus_type rio_bus_type;
extern struct class rio_mport_class;
struct rio_mport;
@@ -465,7 +465,7 @@ struct rio_driver {
struct device_driver driver;
};
-#define to_rio_driver(drv) container_of(drv,struct rio_driver, driver)
+#define to_rio_driver(drv) container_of_const(drv,struct rio_driver, driver)
union rio_pw_msg {
struct {
diff --git a/include/linux/rio_drv.h b/include/linux/rio_drv.h
index e49c32b0f394..dd8afe511242 100644
--- a/include/linux/rio_drv.h
+++ b/include/linux/rio_drv.h
@@ -391,13 +391,8 @@ struct rio_dev *rio_dev_get(struct rio_dev *);
void rio_dev_put(struct rio_dev *);
#ifdef CONFIG_RAPIDIO_DMA_ENGINE
-extern struct dma_chan *rio_request_dma(struct rio_dev *rdev);
extern struct dma_chan *rio_request_mport_dma(struct rio_mport *mport);
extern void rio_release_dma(struct dma_chan *dchan);
-extern struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(
- struct rio_dev *rdev, struct dma_chan *dchan,
- struct rio_dma_data *data,
- enum dma_transfer_direction direction, unsigned long flags);
extern struct dma_async_tx_descriptor *rio_dma_prep_xfer(
struct dma_chan *dchan, u16 destid,
struct rio_dma_data *data,
diff --git a/include/linux/rio_ids.h b/include/linux/rio_ids.h
index 4846f72759b2..c7e2f21dd5c1 100644
--- a/include/linux/rio_ids.h
+++ b/include/linux/rio_ids.h
@@ -9,18 +9,6 @@
#ifndef LINUX_RIO_IDS_H
#define LINUX_RIO_IDS_H
-#define RIO_VID_FREESCALE 0x0002
-#define RIO_DID_MPC8560 0x0003
-
-#define RIO_VID_TUNDRA 0x000d
-#define RIO_DID_TSI500 0x0500
-#define RIO_DID_TSI568 0x0568
-#define RIO_DID_TSI572 0x0572
-#define RIO_DID_TSI574 0x0574
-#define RIO_DID_TSI576 0x0578 /* Same ID as Tsi578 */
-#define RIO_DID_TSI577 0x0577
-#define RIO_DID_TSI578 0x0578
-
#define RIO_VID_IDT 0x0038
#define RIO_DID_IDT70K200 0x0310
#define RIO_DID_IDTCPS8 0x035c
@@ -33,7 +21,6 @@
#define RIO_DID_IDTCPS1616 0x0379
#define RIO_DID_IDTVPS1616 0x0377
#define RIO_DID_IDTSPS1616 0x0378
-#define RIO_DID_TSI721 0x80ab
#define RIO_DID_IDTRXS1632 0x80e5
#define RIO_DID_IDTRXS2448 0x80e6
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index def5c62c93b3..daa92a58585d 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -11,6 +11,9 @@
#include <linux/rwsem.h>
#include <linux/memcontrol.h>
#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/memremap.h>
+#include <linux/bit_spinlock.h>
/*
* The anon_vma heads a list of private "related" vmas, to scan if
@@ -39,12 +42,15 @@ struct anon_vma {
atomic_t refcount;
/*
- * Count of child anon_vmas and VMAs which points to this anon_vma.
+ * Count of child anon_vmas. Equals to the count of all anon_vmas that
+ * have ->parent pointing to this one, including itself.
*
* This counter is used for making decision about reusing anon_vma
* instead of forking new one. See comments in function anon_vma_clone.
*/
- unsigned degree;
+ unsigned long num_children;
+ /* Count of VMAs whose ->anon_vma pointer points to this object. */
+ unsigned long num_active_vmas;
struct anon_vma *parent; /* Parent of this anon_vma */
@@ -86,18 +92,15 @@ struct anon_vma_chain {
};
enum ttu_flags {
- TTU_MIGRATION = 0x1, /* migration mode */
- TTU_MUNLOCK = 0x2, /* munlock mode */
-
TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */
TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */
- TTU_IGNORE_HWPOISON = 0x20, /* corrupted page is recoverable */
+ TTU_SYNC = 0x10, /* avoid racy checks with PVMW_SYNC */
+ TTU_HWPOISON = 0x20, /* do convert pte to hwpoison entry */
TTU_BATCH_FLUSH = 0x40, /* Batch TLB flushes where possible
* and caller guarantees they will
* do a final flush if necessary */
TTU_RMAP_LOCKED = 0x80, /* do not grab rmap lock:
* caller holds it */
- TTU_SPLIT_FREEZE = 0x100, /* freeze pte under splitting thp */
};
#ifdef CONFIG_MMU
@@ -119,6 +122,11 @@ static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
down_write(&anon_vma->root->rwsem);
}
+static inline int anon_vma_trylock_write(struct anon_vma *anon_vma)
+{
+ return down_write_trylock(&anon_vma->root->rwsem);
+}
+
static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
{
up_write(&anon_vma->root->rwsem);
@@ -129,6 +137,11 @@ static inline void anon_vma_lock_read(struct anon_vma *anon_vma)
down_read(&anon_vma->root->rwsem);
}
+static inline int anon_vma_trylock_read(struct anon_vma *anon_vma)
+{
+ return down_read_trylock(&anon_vma->root->rwsem);
+}
+
static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
{
up_read(&anon_vma->root->rwsem);
@@ -159,50 +172,765 @@ static inline void anon_vma_merge(struct vm_area_struct *vma,
unlink_anon_vmas(next);
}
-struct anon_vma *page_get_anon_vma(struct page *page);
+struct anon_vma *folio_get_anon_vma(const struct folio *folio);
-/* bitflags for do_page_add_anon_rmap() */
-#define RMAP_EXCLUSIVE 0x01
-#define RMAP_COMPOUND 0x02
+#ifdef CONFIG_MM_ID
+static __always_inline void folio_lock_large_mapcount(struct folio *folio)
+{
+ bit_spin_lock(FOLIO_MM_IDS_LOCK_BITNUM, &folio->_mm_ids);
+}
+
+static __always_inline void folio_unlock_large_mapcount(struct folio *folio)
+{
+ __bit_spin_unlock(FOLIO_MM_IDS_LOCK_BITNUM, &folio->_mm_ids);
+}
+
+static inline unsigned int folio_mm_id(const struct folio *folio, int idx)
+{
+ VM_WARN_ON_ONCE(idx != 0 && idx != 1);
+ return folio->_mm_id[idx] & MM_ID_MASK;
+}
+
+static inline void folio_set_mm_id(struct folio *folio, int idx, mm_id_t id)
+{
+ VM_WARN_ON_ONCE(idx != 0 && idx != 1);
+ folio->_mm_id[idx] &= ~MM_ID_MASK;
+ folio->_mm_id[idx] |= id;
+}
+
+static inline void __folio_large_mapcount_sanity_checks(const struct folio *folio,
+ int diff, mm_id_t mm_id)
+{
+ VM_WARN_ON_ONCE(!folio_test_large(folio) || folio_test_hugetlb(folio));
+ VM_WARN_ON_ONCE(diff <= 0);
+ VM_WARN_ON_ONCE(mm_id < MM_ID_MIN || mm_id > MM_ID_MAX);
+
+ /*
+ * Make sure we can detect at least one complete PTE mapping of the
+ * folio in a single MM as "exclusively mapped". This is primarily
+ * a check on 32bit, where we currently reduce the size of the per-MM
+ * mapcount to a short.
+ */
+ VM_WARN_ON_ONCE(diff > folio_large_nr_pages(folio));
+ VM_WARN_ON_ONCE(folio_large_nr_pages(folio) - 1 > MM_ID_MAPCOUNT_MAX);
+
+ VM_WARN_ON_ONCE(folio_mm_id(folio, 0) == MM_ID_DUMMY &&
+ folio->_mm_id_mapcount[0] != -1);
+ VM_WARN_ON_ONCE(folio_mm_id(folio, 0) != MM_ID_DUMMY &&
+ folio->_mm_id_mapcount[0] < 0);
+ VM_WARN_ON_ONCE(folio_mm_id(folio, 1) == MM_ID_DUMMY &&
+ folio->_mm_id_mapcount[1] != -1);
+ VM_WARN_ON_ONCE(folio_mm_id(folio, 1) != MM_ID_DUMMY &&
+ folio->_mm_id_mapcount[1] < 0);
+ VM_WARN_ON_ONCE(!folio_mapped(folio) &&
+ test_bit(FOLIO_MM_IDS_SHARED_BITNUM, &folio->_mm_ids));
+}
+
+static __always_inline void folio_set_large_mapcount(struct folio *folio,
+ int mapcount, struct vm_area_struct *vma)
+{
+ __folio_large_mapcount_sanity_checks(folio, mapcount, vma->vm_mm->mm_id);
+
+ VM_WARN_ON_ONCE(folio_mm_id(folio, 0) != MM_ID_DUMMY);
+ VM_WARN_ON_ONCE(folio_mm_id(folio, 1) != MM_ID_DUMMY);
+
+ /* Note: mapcounts start at -1. */
+ atomic_set(&folio->_large_mapcount, mapcount - 1);
+ folio->_mm_id_mapcount[0] = mapcount - 1;
+ folio_set_mm_id(folio, 0, vma->vm_mm->mm_id);
+}
+
+static __always_inline int folio_add_return_large_mapcount(struct folio *folio,
+ int diff, struct vm_area_struct *vma)
+{
+ const mm_id_t mm_id = vma->vm_mm->mm_id;
+ int new_mapcount_val;
+
+ folio_lock_large_mapcount(folio);
+ __folio_large_mapcount_sanity_checks(folio, diff, mm_id);
+
+ new_mapcount_val = atomic_read(&folio->_large_mapcount) + diff;
+ atomic_set(&folio->_large_mapcount, new_mapcount_val);
+
+ /*
+ * If a folio is mapped more than once into an MM on 32bit, we
+ * can in theory overflow the per-MM mapcount (although only for
+ * fairly large folios), turning it negative. In that case, just
+ * free up the slot and mark the folio "mapped shared", otherwise
+ * we might be in trouble when unmapping pages later.
+ */
+ if (folio_mm_id(folio, 0) == mm_id) {
+ folio->_mm_id_mapcount[0] += diff;
+ if (!IS_ENABLED(CONFIG_64BIT) && unlikely(folio->_mm_id_mapcount[0] < 0)) {
+ folio->_mm_id_mapcount[0] = -1;
+ folio_set_mm_id(folio, 0, MM_ID_DUMMY);
+ folio->_mm_ids |= FOLIO_MM_IDS_SHARED_BIT;
+ }
+ } else if (folio_mm_id(folio, 1) == mm_id) {
+ folio->_mm_id_mapcount[1] += diff;
+ if (!IS_ENABLED(CONFIG_64BIT) && unlikely(folio->_mm_id_mapcount[1] < 0)) {
+ folio->_mm_id_mapcount[1] = -1;
+ folio_set_mm_id(folio, 1, MM_ID_DUMMY);
+ folio->_mm_ids |= FOLIO_MM_IDS_SHARED_BIT;
+ }
+ } else if (folio_mm_id(folio, 0) == MM_ID_DUMMY) {
+ folio_set_mm_id(folio, 0, mm_id);
+ folio->_mm_id_mapcount[0] = diff - 1;
+ /* We might have other mappings already. */
+ if (new_mapcount_val != diff - 1)
+ folio->_mm_ids |= FOLIO_MM_IDS_SHARED_BIT;
+ } else if (folio_mm_id(folio, 1) == MM_ID_DUMMY) {
+ folio_set_mm_id(folio, 1, mm_id);
+ folio->_mm_id_mapcount[1] = diff - 1;
+ /* Slot 0 certainly has mappings as well. */
+ folio->_mm_ids |= FOLIO_MM_IDS_SHARED_BIT;
+ }
+ folio_unlock_large_mapcount(folio);
+ return new_mapcount_val + 1;
+}
+#define folio_add_large_mapcount folio_add_return_large_mapcount
+
+static __always_inline int folio_sub_return_large_mapcount(struct folio *folio,
+ int diff, struct vm_area_struct *vma)
+{
+ const mm_id_t mm_id = vma->vm_mm->mm_id;
+ int new_mapcount_val;
+
+ folio_lock_large_mapcount(folio);
+ __folio_large_mapcount_sanity_checks(folio, diff, mm_id);
+
+ new_mapcount_val = atomic_read(&folio->_large_mapcount) - diff;
+ atomic_set(&folio->_large_mapcount, new_mapcount_val);
+
+ /*
+ * There are valid corner cases where we might underflow a per-MM
+ * mapcount (some mappings added when no slot was free, some mappings
+ * added once a slot was free), so we always set it to -1 once we go
+ * negative.
+ */
+ if (folio_mm_id(folio, 0) == mm_id) {
+ folio->_mm_id_mapcount[0] -= diff;
+ if (folio->_mm_id_mapcount[0] >= 0)
+ goto out;
+ folio->_mm_id_mapcount[0] = -1;
+ folio_set_mm_id(folio, 0, MM_ID_DUMMY);
+ } else if (folio_mm_id(folio, 1) == mm_id) {
+ folio->_mm_id_mapcount[1] -= diff;
+ if (folio->_mm_id_mapcount[1] >= 0)
+ goto out;
+ folio->_mm_id_mapcount[1] = -1;
+ folio_set_mm_id(folio, 1, MM_ID_DUMMY);
+ }
+
+ /*
+ * If one MM slot owns all mappings, the folio is mapped exclusively.
+ * Note that if the folio is now unmapped (new_mapcount_val == -1), both
+ * slots must be free (mapcount == -1), and we'll also mark it as
+ * exclusive.
+ */
+ if (folio->_mm_id_mapcount[0] == new_mapcount_val ||
+ folio->_mm_id_mapcount[1] == new_mapcount_val)
+ folio->_mm_ids &= ~FOLIO_MM_IDS_SHARED_BIT;
+out:
+ folio_unlock_large_mapcount(folio);
+ return new_mapcount_val + 1;
+}
+#define folio_sub_large_mapcount folio_sub_return_large_mapcount
+#else /* !CONFIG_MM_ID */
+/*
+ * See __folio_rmap_sanity_checks(), we might map large folios even without
+ * CONFIG_TRANSPARENT_HUGEPAGE. We'll keep that working for now.
+ */
+static inline void folio_set_large_mapcount(struct folio *folio, int mapcount,
+ struct vm_area_struct *vma)
+{
+ /* Note: mapcounts start at -1. */
+ atomic_set(&folio->_large_mapcount, mapcount - 1);
+}
+
+static inline void folio_add_large_mapcount(struct folio *folio,
+ int diff, struct vm_area_struct *vma)
+{
+ atomic_add(diff, &folio->_large_mapcount);
+}
+
+static inline int folio_add_return_large_mapcount(struct folio *folio,
+ int diff, struct vm_area_struct *vma)
+{
+ BUILD_BUG();
+}
+
+static inline void folio_sub_large_mapcount(struct folio *folio,
+ int diff, struct vm_area_struct *vma)
+{
+ atomic_sub(diff, &folio->_large_mapcount);
+}
+
+static inline int folio_sub_return_large_mapcount(struct folio *folio,
+ int diff, struct vm_area_struct *vma)
+{
+ BUILD_BUG();
+}
+#endif /* CONFIG_MM_ID */
+
+#define folio_inc_large_mapcount(folio, vma) \
+ folio_add_large_mapcount(folio, 1, vma)
+#define folio_inc_return_large_mapcount(folio, vma) \
+ folio_add_return_large_mapcount(folio, 1, vma)
+#define folio_dec_large_mapcount(folio, vma) \
+ folio_sub_large_mapcount(folio, 1, vma)
+#define folio_dec_return_large_mapcount(folio, vma) \
+ folio_sub_return_large_mapcount(folio, 1, vma)
+
+/* RMAP flags, currently only relevant for some anon rmap operations. */
+typedef int __bitwise rmap_t;
+
+/*
+ * No special request: A mapped anonymous (sub)page is possibly shared between
+ * processes.
+ */
+#define RMAP_NONE ((__force rmap_t)0)
+
+/* The anonymous (sub)page is exclusive to a single process. */
+#define RMAP_EXCLUSIVE ((__force rmap_t)BIT(0))
+
+static __always_inline void __folio_rmap_sanity_checks(const struct folio *folio,
+ const struct page *page, int nr_pages, enum pgtable_level level)
+{
+ /* hugetlb folios are handled separately. */
+ VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
+
+ /* When (un)mapping zeropages, we should never touch ref+mapcount. */
+ VM_WARN_ON_FOLIO(is_zero_folio(folio), folio);
+
+ /*
+ * TODO: we get driver-allocated folios that have nothing to do with
+ * the rmap using vm_insert_page(); therefore, we cannot assume that
+ * folio_test_large_rmappable() holds for large folios. We should
+ * handle any desired mapcount+stats accounting for these folios in
+ * VM_MIXEDMAP VMAs separately, and then sanity-check here that
+ * we really only get rmappable folios.
+ */
+
+ VM_WARN_ON_ONCE(nr_pages <= 0);
+ VM_WARN_ON_FOLIO(page_folio(page) != folio, folio);
+ VM_WARN_ON_FOLIO(page_folio(page + nr_pages - 1) != folio, folio);
+
+ switch (level) {
+ case PGTABLE_LEVEL_PTE:
+ break;
+ case PGTABLE_LEVEL_PMD:
+ /*
+ * We don't support folios larger than a single PMD yet. So
+ * when PGTABLE_LEVEL_PMD is set, we assume that we are creating
+ * a single "entire" mapping of the folio.
+ */
+ VM_WARN_ON_FOLIO(folio_nr_pages(folio) != HPAGE_PMD_NR, folio);
+ VM_WARN_ON_FOLIO(nr_pages != HPAGE_PMD_NR, folio);
+ break;
+ case PGTABLE_LEVEL_PUD:
+ /*
+ * Assume that we are creating a single "entire" mapping of the
+ * folio.
+ */
+ VM_WARN_ON_FOLIO(folio_nr_pages(folio) != HPAGE_PUD_NR, folio);
+ VM_WARN_ON_FOLIO(nr_pages != HPAGE_PUD_NR, folio);
+ break;
+ default:
+ BUILD_BUG();
+ }
+
+ /*
+ * Anon folios must have an associated live anon_vma as long as they're
+ * mapped into userspace.
+ * Note that the atomic_read() mainly does two things:
+ *
+ * 1. In KASAN builds with CONFIG_SLUB_RCU_DEBUG, it causes KASAN to
+ * check that the associated anon_vma has not yet been freed (subject
+ * to KASAN's usual limitations). This check will pass if the
+ * anon_vma's refcount has already dropped to 0 but an RCU grace
+ * period hasn't passed since then.
+ * 2. If the anon_vma has not yet been freed, it checks that the
+ * anon_vma still has a nonzero refcount (as opposed to being in the
+ * middle of an RCU delay for getting freed).
+ */
+ if (folio_test_anon(folio) && !folio_test_ksm(folio)) {
+ unsigned long mapping = (unsigned long)folio->mapping;
+ struct anon_vma *anon_vma;
+
+ anon_vma = (void *)(mapping - FOLIO_MAPPING_ANON);
+ VM_WARN_ON_FOLIO(atomic_read(&anon_vma->refcount) == 0, folio);
+ }
+}
/*
* rmap interfaces called when adding or removing pte of page
*/
-void page_move_anon_rmap(struct page *, struct vm_area_struct *);
-void page_add_anon_rmap(struct page *, struct vm_area_struct *,
- unsigned long, bool);
-void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
- unsigned long, int);
-void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
- unsigned long, bool);
-void page_add_file_rmap(struct page *, bool);
-void page_remove_rmap(struct page *, bool);
+void folio_move_anon_rmap(struct folio *, struct vm_area_struct *);
+void folio_add_anon_rmap_ptes(struct folio *, struct page *, int nr_pages,
+ struct vm_area_struct *, unsigned long address, rmap_t flags);
+#define folio_add_anon_rmap_pte(folio, page, vma, address, flags) \
+ folio_add_anon_rmap_ptes(folio, page, 1, vma, address, flags)
+void folio_add_anon_rmap_pmd(struct folio *, struct page *,
+ struct vm_area_struct *, unsigned long address, rmap_t flags);
+void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
+ unsigned long address, rmap_t flags);
+void folio_add_file_rmap_ptes(struct folio *, struct page *, int nr_pages,
+ struct vm_area_struct *);
+#define folio_add_file_rmap_pte(folio, page, vma) \
+ folio_add_file_rmap_ptes(folio, page, 1, vma)
+void folio_add_file_rmap_pmd(struct folio *, struct page *,
+ struct vm_area_struct *);
+void folio_add_file_rmap_pud(struct folio *, struct page *,
+ struct vm_area_struct *);
+void folio_remove_rmap_ptes(struct folio *, struct page *, int nr_pages,
+ struct vm_area_struct *);
+#define folio_remove_rmap_pte(folio, page, vma) \
+ folio_remove_rmap_ptes(folio, page, 1, vma)
+void folio_remove_rmap_pmd(struct folio *, struct page *,
+ struct vm_area_struct *);
+void folio_remove_rmap_pud(struct folio *, struct page *,
+ struct vm_area_struct *);
-void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
- unsigned long);
-void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
- unsigned long);
+void hugetlb_add_anon_rmap(struct folio *, struct vm_area_struct *,
+ unsigned long address, rmap_t flags);
+void hugetlb_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
+ unsigned long address);
-static inline void page_dup_rmap(struct page *page, bool compound)
+/* See folio_try_dup_anon_rmap_*() */
+static inline int hugetlb_try_dup_anon_rmap(struct folio *folio,
+ struct vm_area_struct *vma)
{
- atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount);
+ VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+ VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
+
+ if (PageAnonExclusive(&folio->page)) {
+ if (unlikely(folio_needs_cow_for_dma(vma, folio)))
+ return -EBUSY;
+ ClearPageAnonExclusive(&folio->page);
+ }
+ atomic_inc(&folio->_entire_mapcount);
+ atomic_inc(&folio->_large_mapcount);
+ return 0;
+}
+
+/* See folio_try_share_anon_rmap_*() */
+static inline int hugetlb_try_share_anon_rmap(struct folio *folio)
+{
+ VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+ VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
+ VM_WARN_ON_FOLIO(!PageAnonExclusive(&folio->page), folio);
+
+ /* Paired with the memory barrier in try_grab_folio(). */
+ if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
+ smp_mb();
+
+ if (unlikely(folio_maybe_dma_pinned(folio)))
+ return -EBUSY;
+ ClearPageAnonExclusive(&folio->page);
+
+ /*
+ * This is conceptually a smp_wmb() paired with the smp_rmb() in
+ * gup_must_unshare().
+ */
+ if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
+ smp_mb__after_atomic();
+ return 0;
+}
+
+static inline void hugetlb_add_file_rmap(struct folio *folio)
+{
+ VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+ VM_WARN_ON_FOLIO(folio_test_anon(folio), folio);
+
+ atomic_inc(&folio->_entire_mapcount);
+ atomic_inc(&folio->_large_mapcount);
+}
+
+static inline void hugetlb_remove_rmap(struct folio *folio)
+{
+ VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+
+ atomic_dec(&folio->_entire_mapcount);
+ atomic_dec(&folio->_large_mapcount);
+}
+
+static __always_inline void __folio_dup_file_rmap(struct folio *folio,
+ struct page *page, int nr_pages, struct vm_area_struct *dst_vma,
+ enum pgtable_level level)
+{
+ const int orig_nr_pages = nr_pages;
+
+ __folio_rmap_sanity_checks(folio, page, nr_pages, level);
+
+ switch (level) {
+ case PGTABLE_LEVEL_PTE:
+ if (!folio_test_large(folio)) {
+ atomic_inc(&folio->_mapcount);
+ break;
+ }
+
+ if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) {
+ do {
+ atomic_inc(&page->_mapcount);
+ } while (page++, --nr_pages > 0);
+ }
+ folio_add_large_mapcount(folio, orig_nr_pages, dst_vma);
+ break;
+ case PGTABLE_LEVEL_PMD:
+ case PGTABLE_LEVEL_PUD:
+ atomic_inc(&folio->_entire_mapcount);
+ folio_inc_large_mapcount(folio, dst_vma);
+ break;
+ default:
+ BUILD_BUG();
+ }
+}
+
+/**
+ * folio_dup_file_rmap_ptes - duplicate PTE mappings of a page range of a folio
+ * @folio: The folio to duplicate the mappings of
+ * @page: The first page to duplicate the mappings of
+ * @nr_pages: The number of pages of which the mapping will be duplicated
+ * @dst_vma: The destination vm area
+ *
+ * The page range of the folio is defined by [page, page + nr_pages)
+ *
+ * The caller needs to hold the page table lock.
+ */
+static inline void folio_dup_file_rmap_ptes(struct folio *folio,
+ struct page *page, int nr_pages, struct vm_area_struct *dst_vma)
+{
+ __folio_dup_file_rmap(folio, page, nr_pages, dst_vma, PGTABLE_LEVEL_PTE);
+}
+
+static __always_inline void folio_dup_file_rmap_pte(struct folio *folio,
+ struct page *page, struct vm_area_struct *dst_vma)
+{
+ __folio_dup_file_rmap(folio, page, 1, dst_vma, PGTABLE_LEVEL_PTE);
+}
+
+/**
+ * folio_dup_file_rmap_pmd - duplicate a PMD mapping of a page range of a folio
+ * @folio: The folio to duplicate the mapping of
+ * @page: The first page to duplicate the mapping of
+ * @dst_vma: The destination vm area
+ *
+ * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
+ *
+ * The caller needs to hold the page table lock.
+ */
+static inline void folio_dup_file_rmap_pmd(struct folio *folio,
+ struct page *page, struct vm_area_struct *dst_vma)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ __folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, dst_vma, PGTABLE_LEVEL_PTE);
+#else
+ WARN_ON_ONCE(true);
+#endif
+}
+
+static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
+ struct page *page, int nr_pages, struct vm_area_struct *dst_vma,
+ struct vm_area_struct *src_vma, enum pgtable_level level)
+{
+ const int orig_nr_pages = nr_pages;
+ bool maybe_pinned;
+ int i;
+
+ VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
+ __folio_rmap_sanity_checks(folio, page, nr_pages, level);
+
+ /*
+ * If this folio may have been pinned by the parent process,
+ * don't allow to duplicate the mappings but instead require to e.g.,
+ * copy the subpage immediately for the child so that we'll always
+ * guarantee the pinned folio won't be randomly replaced in the
+ * future on write faults.
+ */
+ maybe_pinned = likely(!folio_is_device_private(folio)) &&
+ unlikely(folio_needs_cow_for_dma(src_vma, folio));
+
+ /*
+ * No need to check+clear for already shared PTEs/PMDs of the
+ * folio. But if any page is PageAnonExclusive, we must fallback to
+ * copying if the folio maybe pinned.
+ */
+ switch (level) {
+ case PGTABLE_LEVEL_PTE:
+ if (unlikely(maybe_pinned)) {
+ for (i = 0; i < nr_pages; i++)
+ if (PageAnonExclusive(page + i))
+ return -EBUSY;
+ }
+
+ if (!folio_test_large(folio)) {
+ if (PageAnonExclusive(page))
+ ClearPageAnonExclusive(page);
+ atomic_inc(&folio->_mapcount);
+ break;
+ }
+
+ do {
+ if (PageAnonExclusive(page))
+ ClearPageAnonExclusive(page);
+ if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
+ atomic_inc(&page->_mapcount);
+ } while (page++, --nr_pages > 0);
+ folio_add_large_mapcount(folio, orig_nr_pages, dst_vma);
+ break;
+ case PGTABLE_LEVEL_PMD:
+ case PGTABLE_LEVEL_PUD:
+ if (PageAnonExclusive(page)) {
+ if (unlikely(maybe_pinned))
+ return -EBUSY;
+ ClearPageAnonExclusive(page);
+ }
+ atomic_inc(&folio->_entire_mapcount);
+ folio_inc_large_mapcount(folio, dst_vma);
+ break;
+ default:
+ BUILD_BUG();
+ }
+ return 0;
+}
+
+/**
+ * folio_try_dup_anon_rmap_ptes - try duplicating PTE mappings of a page range
+ * of a folio
+ * @folio: The folio to duplicate the mappings of
+ * @page: The first page to duplicate the mappings of
+ * @nr_pages: The number of pages of which the mapping will be duplicated
+ * @dst_vma: The destination vm area
+ * @src_vma: The vm area from which the mappings are duplicated
+ *
+ * The page range of the folio is defined by [page, page + nr_pages)
+ *
+ * The caller needs to hold the page table lock and the
+ * vma->vma_mm->write_protect_seq.
+ *
+ * Duplicating the mappings can only fail if the folio may be pinned; device
+ * private folios cannot get pinned and consequently this function cannot fail
+ * for them.
+ *
+ * If duplicating the mappings succeeded, the duplicated PTEs have to be R/O in
+ * the parent and the child. They must *not* be writable after this call
+ * succeeded.
+ *
+ * Returns 0 if duplicating the mappings succeeded. Returns -EBUSY otherwise.
+ */
+static inline int folio_try_dup_anon_rmap_ptes(struct folio *folio,
+ struct page *page, int nr_pages, struct vm_area_struct *dst_vma,
+ struct vm_area_struct *src_vma)
+{
+ return __folio_try_dup_anon_rmap(folio, page, nr_pages, dst_vma,
+ src_vma, PGTABLE_LEVEL_PTE);
+}
+
+static __always_inline int folio_try_dup_anon_rmap_pte(struct folio *folio,
+ struct page *page, struct vm_area_struct *dst_vma,
+ struct vm_area_struct *src_vma)
+{
+ return __folio_try_dup_anon_rmap(folio, page, 1, dst_vma, src_vma,
+ PGTABLE_LEVEL_PTE);
+}
+
+/**
+ * folio_try_dup_anon_rmap_pmd - try duplicating a PMD mapping of a page range
+ * of a folio
+ * @folio: The folio to duplicate the mapping of
+ * @page: The first page to duplicate the mapping of
+ * @dst_vma: The destination vm area
+ * @src_vma: The vm area from which the mapping is duplicated
+ *
+ * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
+ *
+ * The caller needs to hold the page table lock and the
+ * vma->vma_mm->write_protect_seq.
+ *
+ * Duplicating the mapping can only fail if the folio may be pinned; device
+ * private folios cannot get pinned and consequently this function cannot fail
+ * for them.
+ *
+ * If duplicating the mapping succeeds, the duplicated PMD has to be R/O in
+ * the parent and the child. They must *not* be writable after this call
+ * succeeded.
+ *
+ * Returns 0 if duplicating the mapping succeeded. Returns -EBUSY otherwise.
+ */
+static inline int folio_try_dup_anon_rmap_pmd(struct folio *folio,
+ struct page *page, struct vm_area_struct *dst_vma,
+ struct vm_area_struct *src_vma)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ return __folio_try_dup_anon_rmap(folio, page, HPAGE_PMD_NR, dst_vma,
+ src_vma, PGTABLE_LEVEL_PMD);
+#else
+ WARN_ON_ONCE(true);
+ return -EBUSY;
+#endif
+}
+
+static __always_inline int __folio_try_share_anon_rmap(struct folio *folio,
+ struct page *page, int nr_pages, enum pgtable_level level)
+{
+ VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
+ VM_WARN_ON_FOLIO(!PageAnonExclusive(page), folio);
+ __folio_rmap_sanity_checks(folio, page, nr_pages, level);
+
+ /* device private folios cannot get pinned via GUP. */
+ if (unlikely(folio_is_device_private(folio))) {
+ ClearPageAnonExclusive(page);
+ return 0;
+ }
+
+ /*
+ * We have to make sure that when we clear PageAnonExclusive, that
+ * the page is not pinned and that concurrent GUP-fast won't succeed in
+ * concurrently pinning the page.
+ *
+ * Conceptually, PageAnonExclusive clearing consists of:
+ * (A1) Clear PTE
+ * (A2) Check if the page is pinned; back off if so.
+ * (A3) Clear PageAnonExclusive
+ * (A4) Restore PTE (optional, but certainly not writable)
+ *
+ * When clearing PageAnonExclusive, we cannot possibly map the page
+ * writable again, because anon pages that may be shared must never
+ * be writable. So in any case, if the PTE was writable it cannot
+ * be writable anymore afterwards and there would be a PTE change. Only
+ * if the PTE wasn't writable, there might not be a PTE change.
+ *
+ * Conceptually, GUP-fast pinning of an anon page consists of:
+ * (B1) Read the PTE
+ * (B2) FOLL_WRITE: check if the PTE is not writable; back off if so.
+ * (B3) Pin the mapped page
+ * (B4) Check if the PTE changed by re-reading it; back off if so.
+ * (B5) If the original PTE is not writable, check if
+ * PageAnonExclusive is not set; back off if so.
+ *
+ * If the PTE was writable, we only have to make sure that GUP-fast
+ * observes a PTE change and properly backs off.
+ *
+ * If the PTE was not writable, we have to make sure that GUP-fast either
+ * detects a (temporary) PTE change or that PageAnonExclusive is cleared
+ * and properly backs off.
+ *
+ * Consequently, when clearing PageAnonExclusive(), we have to make
+ * sure that (A1), (A2)/(A3) and (A4) happen in the right memory
+ * order. In GUP-fast pinning code, we have to make sure that (B3),(B4)
+ * and (B5) happen in the right memory order.
+ *
+ * We assume that there might not be a memory barrier after
+ * clearing/invalidating the PTE (A1) and before restoring the PTE (A4),
+ * so we use explicit ones here.
+ */
+
+ /* Paired with the memory barrier in try_grab_folio(). */
+ if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
+ smp_mb();
+
+ if (unlikely(folio_maybe_dma_pinned(folio)))
+ return -EBUSY;
+ ClearPageAnonExclusive(page);
+
+ /*
+ * This is conceptually a smp_wmb() paired with the smp_rmb() in
+ * gup_must_unshare().
+ */
+ if (IS_ENABLED(CONFIG_HAVE_GUP_FAST))
+ smp_mb__after_atomic();
+ return 0;
+}
+
+/**
+ * folio_try_share_anon_rmap_pte - try marking an exclusive anonymous page
+ * mapped by a PTE possibly shared to prepare
+ * for KSM or temporary unmapping
+ * @folio: The folio to share a mapping of
+ * @page: The mapped exclusive page
+ *
+ * The caller needs to hold the page table lock and has to have the page table
+ * entries cleared/invalidated.
+ *
+ * This is similar to folio_try_dup_anon_rmap_pte(), however, not used during
+ * fork() to duplicate mappings, but instead to prepare for KSM or temporarily
+ * unmapping parts of a folio (swap, migration) via folio_remove_rmap_pte().
+ *
+ * Marking the mapped page shared can only fail if the folio maybe pinned;
+ * device private folios cannot get pinned and consequently this function cannot
+ * fail.
+ *
+ * Returns 0 if marking the mapped page possibly shared succeeded. Returns
+ * -EBUSY otherwise.
+ */
+static inline int folio_try_share_anon_rmap_pte(struct folio *folio,
+ struct page *page)
+{
+ return __folio_try_share_anon_rmap(folio, page, 1, PGTABLE_LEVEL_PTE);
+}
+
+/**
+ * folio_try_share_anon_rmap_pmd - try marking an exclusive anonymous page
+ * range mapped by a PMD possibly shared to
+ * prepare for temporary unmapping
+ * @folio: The folio to share the mapping of
+ * @page: The first page to share the mapping of
+ *
+ * The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
+ *
+ * The caller needs to hold the page table lock and has to have the page table
+ * entries cleared/invalidated.
+ *
+ * This is similar to folio_try_dup_anon_rmap_pmd(), however, not used during
+ * fork() to duplicate a mapping, but instead to prepare for temporarily
+ * unmapping parts of a folio (swap, migration) via folio_remove_rmap_pmd().
+ *
+ * Marking the mapped pages shared can only fail if the folio maybe pinned;
+ * device private folios cannot get pinned and consequently this function cannot
+ * fail.
+ *
+ * Returns 0 if marking the mapped pages possibly shared succeeded. Returns
+ * -EBUSY otherwise.
+ */
+static inline int folio_try_share_anon_rmap_pmd(struct folio *folio,
+ struct page *page)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ return __folio_try_share_anon_rmap(folio, page, HPAGE_PMD_NR,
+ PGTABLE_LEVEL_PMD);
+#else
+ WARN_ON_ONCE(true);
+ return -EBUSY;
+#endif
}
/*
* Called from mm/vmscan.c to handle paging out
*/
-int page_referenced(struct page *, int is_locked,
- struct mem_cgroup *memcg, unsigned long *vm_flags);
+int folio_referenced(struct folio *, int is_locked,
+ struct mem_cgroup *memcg, vm_flags_t *vm_flags);
+
+void try_to_migrate(struct folio *folio, enum ttu_flags flags);
+void try_to_unmap(struct folio *, enum ttu_flags flags);
-bool try_to_unmap(struct page *, enum ttu_flags flags);
+struct page *make_device_exclusive(struct mm_struct *mm, unsigned long addr,
+ void *owner, struct folio **foliop);
/* Avoid racy checks */
#define PVMW_SYNC (1 << 0)
-/* Look for migarion entries rather than present PTEs */
+/* Look for migration entries rather than present PTEs */
#define PVMW_MIGRATION (1 << 1)
+/* Result flags */
+
+/* The page is mapped across page table boundary */
+#define PVMW_PGTABLE_CROSSED (1 << 16)
+
struct page_vma_mapped_walk {
- struct page *page;
+ unsigned long pfn;
+ unsigned long nr_pages;
+ pgoff_t pgoff;
struct vm_area_struct *vma;
unsigned long address;
pmd_t *pmd;
@@ -211,21 +939,52 @@ struct page_vma_mapped_walk {
unsigned int flags;
};
+#define DEFINE_FOLIO_VMA_WALK(name, _folio, _vma, _address, _flags) \
+ struct page_vma_mapped_walk name = { \
+ .pfn = folio_pfn(_folio), \
+ .nr_pages = folio_nr_pages(_folio), \
+ .pgoff = folio_pgoff(_folio), \
+ .vma = _vma, \
+ .address = _address, \
+ .flags = _flags, \
+ }
+
static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw)
{
/* HugeTLB pte is set to the relevant page table entry without pte_mapped. */
- if (pvmw->pte && !PageHuge(pvmw->page))
+ if (pvmw->pte && !is_vm_hugetlb_page(pvmw->vma))
pte_unmap(pvmw->pte);
if (pvmw->ptl)
spin_unlock(pvmw->ptl);
}
-bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw);
-
-/*
- * Used by swapoff to help locate where page is expected in vma.
+/**
+ * page_vma_mapped_walk_restart - Restart the page table walk.
+ * @pvmw: Pointer to struct page_vma_mapped_walk.
+ *
+ * It restarts the page table walk when changes occur in the page
+ * table, such as splitting a PMD. Ensures that the PTL held during
+ * the previous walk is released and resets the state to allow for
+ * a new walk starting at the current address stored in pvmw->address.
*/
-unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
+static inline void
+page_vma_mapped_walk_restart(struct page_vma_mapped_walk *pvmw)
+{
+ WARN_ON_ONCE(!pvmw->pmd && !pvmw->pte);
+
+ if (likely(pvmw->ptl))
+ spin_unlock(pvmw->ptl);
+ else
+ WARN_ON_ONCE(1);
+
+ pvmw->ptl = NULL;
+ pvmw->pmd = NULL;
+ pvmw->pte = NULL;
+}
+
+bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw);
+unsigned long page_address_in_vma(const struct folio *folio,
+ const struct page *, const struct vm_area_struct *);
/*
* Cleans the PTEs of shared mappings.
@@ -233,27 +992,27 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
*
* returns the number of cleaned PTEs.
*/
-int page_mkclean(struct page *);
+int folio_mkclean(struct folio *);
-/*
- * called in munlock()/munmap() path to check for other vmas holding
- * the page mlocked.
- */
-void try_to_munlock(struct page *);
+int mapping_wrprotect_range(struct address_space *mapping, pgoff_t pgoff,
+ unsigned long pfn, unsigned long nr_pages);
-void remove_migration_ptes(struct page *old, struct page *new, bool locked);
+int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
+ struct vm_area_struct *vma);
-/*
- * Called by memory-failure.c to kill processes.
- */
-struct anon_vma *page_lock_anon_vma_read(struct page *page);
-void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
-int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
+enum rmp_flags {
+ RMP_LOCKED = 1 << 0,
+ RMP_USE_SHARED_ZEROPAGE = 1 << 1,
+};
+
+void remove_migration_ptes(struct folio *src, struct folio *dst, int flags);
/*
* rmap_walk_control: To control rmap traversing for specific needs
*
* arg: passed to rmap_one() and invalid_vma()
+ * try_lock: bail out if the rmap lock is contended
+ * contended: indicate the rmap traversal bailed out due to lock contention
* rmap_one: executed on each vma where page is mapped
* done: for checking traversing termination condition
* anon_lock: for getting anon_lock by optimized way rather than default
@@ -261,42 +1020,46 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
*/
struct rmap_walk_control {
void *arg;
+ bool try_lock;
+ bool contended;
/*
* Return false if page table scanning in rmap_walk should be stopped.
* Otherwise, return true.
*/
- bool (*rmap_one)(struct page *page, struct vm_area_struct *vma,
+ bool (*rmap_one)(struct folio *folio, struct vm_area_struct *vma,
unsigned long addr, void *arg);
- int (*done)(struct page *page);
- struct anon_vma *(*anon_lock)(struct page *page);
+ int (*done)(struct folio *folio);
+ struct anon_vma *(*anon_lock)(const struct folio *folio,
+ struct rmap_walk_control *rwc);
bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
};
-void rmap_walk(struct page *page, struct rmap_walk_control *rwc);
-void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
+void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc);
+void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc);
+struct anon_vma *folio_lock_anon_vma_read(const struct folio *folio,
+ struct rmap_walk_control *rwc);
#else /* !CONFIG_MMU */
#define anon_vma_init() do {} while (0)
#define anon_vma_prepare(vma) (0)
-#define anon_vma_link(vma) do {} while (0)
-static inline int page_referenced(struct page *page, int is_locked,
+static inline int folio_referenced(struct folio *folio, int is_locked,
struct mem_cgroup *memcg,
- unsigned long *vm_flags)
+ vm_flags_t *vm_flags)
{
*vm_flags = 0;
return 0;
}
-#define try_to_unmap(page, refs) false
+static inline void try_to_unmap(struct folio *folio, enum ttu_flags flags)
+{
+}
-static inline int page_mkclean(struct page *page)
+static inline int folio_mkclean(struct folio *folio)
{
return 0;
}
-
-
#endif /* CONFIG_MMU */
#endif /* _LINUX_RMAP_H */
diff --git a/include/linux/rolling_buffer.h b/include/linux/rolling_buffer.h
new file mode 100644
index 000000000000..ac15b1ffdd83
--- /dev/null
+++ b/include/linux/rolling_buffer.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Rolling buffer of folios
+ *
+ * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#ifndef _ROLLING_BUFFER_H
+#define _ROLLING_BUFFER_H
+
+#include <linux/folio_queue.h>
+#include <linux/uio.h>
+
+/*
+ * Rolling buffer. Whilst the buffer is live and in use, folios and folio
+ * queue segments can be added to one end by one thread and removed from the
+ * other end by another thread. The buffer isn't allowed to be empty; it must
+ * always have at least one folio_queue in it so that neither side has to
+ * modify both queue pointers.
+ *
+ * The iterator in the buffer is extended as buffers are inserted. It can be
+ * snapshotted to use a segment of the buffer.
+ */
+struct rolling_buffer {
+ struct folio_queue *head; /* Producer's insertion point */
+ struct folio_queue *tail; /* Consumer's removal point */
+ struct iov_iter iter; /* Iterator tracking what's left in the buffer */
+ u8 next_head_slot; /* Next slot in ->head */
+ u8 first_tail_slot; /* First slot in ->tail */
+};
+
+/*
+ * Snapshot of a rolling buffer.
+ */
+struct rolling_buffer_snapshot {
+ struct folio_queue *curr_folioq; /* Queue segment in which current folio resides */
+ unsigned char curr_slot; /* Folio currently being read */
+ unsigned char curr_order; /* Order of folio */
+};
+
+/* Marks to store per-folio in the internal folio_queue structs. */
+#define ROLLBUF_MARK_1 BIT(0)
+#define ROLLBUF_MARK_2 BIT(1)
+
+int rolling_buffer_init(struct rolling_buffer *roll, unsigned int rreq_id,
+ unsigned int direction);
+int rolling_buffer_make_space(struct rolling_buffer *roll);
+ssize_t rolling_buffer_load_from_ra(struct rolling_buffer *roll,
+ struct readahead_control *ractl,
+ struct folio_batch *put_batch);
+ssize_t rolling_buffer_append(struct rolling_buffer *roll, struct folio *folio,
+ unsigned int flags);
+struct folio_queue *rolling_buffer_delete_spent(struct rolling_buffer *roll);
+void rolling_buffer_clear(struct rolling_buffer *roll);
+
+static inline void rolling_buffer_advance(struct rolling_buffer *roll, size_t amount)
+{
+ iov_iter_advance(&roll->iter, amount);
+}
+
+#endif /* _ROLLING_BUFFER_H */
diff --git a/include/linux/root_dev.h b/include/linux/root_dev.h
index 4e78651371ba..847c9a06101b 100644
--- a/include/linux/root_dev.h
+++ b/include/linux/root_dev.h
@@ -9,15 +9,8 @@
enum {
Root_NFS = MKDEV(UNNAMED_MAJOR, 255),
Root_CIFS = MKDEV(UNNAMED_MAJOR, 254),
+ Root_Generic = MKDEV(UNNAMED_MAJOR, 253),
Root_RAM0 = MKDEV(RAMDISK_MAJOR, 0),
- Root_RAM1 = MKDEV(RAMDISK_MAJOR, 1),
- Root_FD0 = MKDEV(FLOPPY_MAJOR, 0),
- Root_HDA1 = MKDEV(IDE0_MAJOR, 1),
- Root_HDA2 = MKDEV(IDE0_MAJOR, 2),
- Root_SDA1 = MKDEV(SCSI_DISK0_MAJOR, 1),
- Root_SDA2 = MKDEV(SCSI_DISK0_MAJOR, 2),
- Root_HDC1 = MKDEV(IDE1_MAJOR, 1),
- Root_SR0 = MKDEV(SCSI_CDROM_MAJOR, 0),
};
extern dev_t ROOT_DEV;
diff --git a/include/linux/rpmb.h b/include/linux/rpmb.h
new file mode 100644
index 000000000000..ed3f8e431eff
--- /dev/null
+++ b/include/linux/rpmb.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2015-2019 Intel Corp. All rights reserved
+ * Copyright (C) 2021-2022 Linaro Ltd
+ */
+#ifndef __RPMB_H__
+#define __RPMB_H__
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+/**
+ * enum rpmb_type - type of underlying storage technology
+ *
+ * @RPMB_TYPE_EMMC : emmc (JESD84-B50.1)
+ * @RPMB_TYPE_UFS : UFS (JESD220)
+ * @RPMB_TYPE_NVME : NVM Express
+ */
+enum rpmb_type {
+ RPMB_TYPE_EMMC,
+ RPMB_TYPE_UFS,
+ RPMB_TYPE_NVME,
+};
+
+/**
+ * struct rpmb_descr - RPMB description provided by the underlying block device
+ *
+ * @type : block device type
+ * @route_frames : routes frames to and from the RPMB device
+ * @dev_id : unique device identifier read from the hardware
+ * @dev_id_len : length of unique device identifier
+ * @reliable_wr_count: number of sectors that can be written in one access
+ * @capacity : capacity of the device in units of 128K
+ *
+ * @dev_id is intended to be used as input when deriving the authenticaion key.
+ */
+struct rpmb_descr {
+ enum rpmb_type type;
+ int (*route_frames)(struct device *dev, u8 *req, unsigned int req_len,
+ u8 *resp, unsigned int resp_len);
+ u8 *dev_id;
+ size_t dev_id_len;
+ u16 reliable_wr_count;
+ u16 capacity;
+};
+
+/**
+ * struct rpmb_dev - device which can support RPMB partition
+ *
+ * @dev : device
+ * @id : device_id
+ * @list_node : linked list node
+ * @descr : RPMB description
+ */
+struct rpmb_dev {
+ struct device dev;
+ int id;
+ struct list_head list_node;
+ struct rpmb_descr descr;
+};
+
+#define to_rpmb_dev(x) container_of((x), struct rpmb_dev, dev)
+
+/**
+ * struct rpmb_frame - RPMB frame structure for authenticated access
+ *
+ * @stuff : stuff bytes, a padding/reserved area of 196 bytes at the
+ * beginning of the RPMB frame. They don’t carry meaningful
+ * data but are required to make the frame exactly 512 bytes.
+ * @key_mac : The authentication key or the message authentication
+ * code (MAC) depending on the request/response type.
+ * The MAC will be delivered in the last (or the only)
+ * block of data.
+ * @data : Data to be written or read by signed access.
+ * @nonce : Random number generated by the host for the requests
+ * and copied to the response by the RPMB engine.
+ * @write_counter: Counter value for the total amount of the successful
+ * authenticated data write requests made by the host.
+ * @addr : Address of the data to be programmed to or read
+ * from the RPMB. Address is the serial number of
+ * the accessed block (half sector 256B).
+ * @block_count : Number of blocks (half sectors, 256B) requested to be
+ * read/programmed.
+ * @result : Includes information about the status of the write counter
+ * (valid, expired) and result of the access made to the RPMB.
+ * @req_resp : Defines the type of request and response to/from the memory.
+ *
+ * The stuff bytes and big-endian properties are modeled to fit to the spec.
+ */
+struct rpmb_frame {
+ u8 stuff[196];
+ u8 key_mac[32];
+ u8 data[256];
+ u8 nonce[16];
+ __be32 write_counter;
+ __be16 addr;
+ __be16 block_count;
+ __be16 result;
+ __be16 req_resp;
+};
+
+#define RPMB_PROGRAM_KEY 0x1 /* Program RPMB Authentication Key */
+#define RPMB_GET_WRITE_COUNTER 0x2 /* Read RPMB write counter */
+#define RPMB_WRITE_DATA 0x3 /* Write data to RPMB partition */
+#define RPMB_READ_DATA 0x4 /* Read data from RPMB partition */
+#define RPMB_RESULT_READ 0x5 /* Read result request (Internal) */
+
+#if IS_ENABLED(CONFIG_RPMB)
+struct rpmb_dev *rpmb_dev_get(struct rpmb_dev *rdev);
+void rpmb_dev_put(struct rpmb_dev *rdev);
+struct rpmb_dev *rpmb_dev_find_device(const void *data,
+ const struct rpmb_dev *start,
+ int (*match)(struct device *dev,
+ const void *data));
+int rpmb_interface_register(struct class_interface *intf);
+void rpmb_interface_unregister(struct class_interface *intf);
+struct rpmb_dev *rpmb_dev_register(struct device *dev,
+ struct rpmb_descr *descr);
+int rpmb_dev_unregister(struct rpmb_dev *rdev);
+
+int rpmb_route_frames(struct rpmb_dev *rdev, u8 *req,
+ unsigned int req_len, u8 *resp, unsigned int resp_len);
+
+#else
+static inline struct rpmb_dev *rpmb_dev_get(struct rpmb_dev *rdev)
+{
+ return NULL;
+}
+
+static inline void rpmb_dev_put(struct rpmb_dev *rdev) { }
+
+static inline struct rpmb_dev *
+rpmb_dev_find_device(const void *data, const struct rpmb_dev *start,
+ int (*match)(struct device *dev, const void *data))
+{
+ return NULL;
+}
+
+static inline int rpmb_interface_register(struct class_interface *intf)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void rpmb_interface_unregister(struct class_interface *intf)
+{
+}
+
+static inline struct rpmb_dev *
+rpmb_dev_register(struct device *dev, struct rpmb_descr *descr)
+{
+ return NULL;
+}
+
+static inline int rpmb_dev_unregister(struct rpmb_dev *dev)
+{
+ return 0;
+}
+
+static inline int rpmb_route_frames(struct rpmb_dev *rdev, u8 *req,
+ unsigned int req_len, u8 *resp,
+ unsigned int resp_len)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_RPMB */
+
+#endif /* __RPMB_H__ */
diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h
index d97dcd049f18..fb7ab9165645 100644
--- a/include/linux/rpmsg.h
+++ b/include/linux/rpmsg.h
@@ -41,7 +41,9 @@ struct rpmsg_channel_info {
* rpmsg_device - device that belong to the rpmsg bus
* @dev: the device struct
* @id: device id (used to match between rpmsg drivers and devices)
- * @driver_override: driver name to force a match
+ * @driver_override: driver name to force a match; do not set directly,
+ * because core frees it; use driver_set_override() to
+ * set or clear it.
* @src: local address
* @dst: destination address
* @ept: the rpmsg endpoint of this channel
@@ -51,7 +53,7 @@ struct rpmsg_channel_info {
struct rpmsg_device {
struct device dev;
struct rpmsg_device_id id;
- char *driver_override;
+ const char *driver_override;
u32 src;
u32 dst;
struct rpmsg_endpoint *ept;
@@ -62,12 +64,14 @@ struct rpmsg_device {
};
typedef int (*rpmsg_rx_cb_t)(struct rpmsg_device *, void *, int, void *, u32);
+typedef int (*rpmsg_flowcontrol_cb_t)(struct rpmsg_device *, void *, bool);
/**
* struct rpmsg_endpoint - binds a local rpmsg address to its user
* @rpdev: rpmsg channel device
* @refcount: when this drops to zero, the ept is deallocated
* @cb: rx callback handler
+ * @flow_cb: remote flow control callback handler
* @cb_lock: must be taken before accessing/changing @cb
* @addr: local rpmsg address
* @priv: private data for the driver's use
@@ -90,6 +94,7 @@ struct rpmsg_endpoint {
struct rpmsg_device *rpdev;
struct kref refcount;
rpmsg_rx_cb_t cb;
+ rpmsg_flowcontrol_cb_t flow_cb;
struct mutex cb_lock;
u32 addr;
void *priv;
@@ -104,6 +109,7 @@ struct rpmsg_endpoint {
* @probe: invoked when a matching rpmsg channel (i.e. device) is found
* @remove: invoked when the rpmsg channel is removed
* @callback: invoked when an inbound message is received on the channel
+ * @flowcontrol: invoked when remote side flow control request is received
*/
struct rpmsg_driver {
struct device_driver drv;
@@ -111,6 +117,7 @@ struct rpmsg_driver {
int (*probe)(struct rpmsg_device *dev);
void (*remove)(struct rpmsg_device *dev);
int (*callback)(struct rpmsg_device *, void *, int, void *, u32);
+ int (*flowcontrol)(struct rpmsg_device *, void *, bool);
};
static inline u16 rpmsg16_to_cpu(struct rpmsg_device *rpdev, __rpmsg16 val)
@@ -163,6 +170,8 @@ static inline __rpmsg64 cpu_to_rpmsg64(struct rpmsg_device *rpdev, u64 val)
#if IS_ENABLED(CONFIG_RPMSG)
+int rpmsg_register_device_override(struct rpmsg_device *rpdev,
+ const char *driver_override);
int rpmsg_register_device(struct rpmsg_device *rpdev);
int rpmsg_unregister_device(struct device *parent,
struct rpmsg_channel_info *chinfo);
@@ -175,19 +184,25 @@ struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *,
int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len);
int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
-int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
- void *data, int len);
int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len);
int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst);
-int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
- void *data, int len);
__poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
poll_table *wait);
+ssize_t rpmsg_get_mtu(struct rpmsg_endpoint *ept);
+
+int rpmsg_set_flow_control(struct rpmsg_endpoint *ept, bool pause, u32 dst);
+
#else
+static inline int rpmsg_register_device_override(struct rpmsg_device *rpdev,
+ const char *driver_override)
+{
+ return -ENXIO;
+}
+
static inline int rpmsg_register_device(struct rpmsg_device *rpdev)
{
return -ENXIO;
@@ -231,7 +246,7 @@ static inline struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *rpdev
/* This shouldn't be possible */
WARN_ON(1);
- return ERR_PTR(-ENXIO);
+ return NULL;
}
static inline int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len)
@@ -252,8 +267,7 @@ static inline int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len,
}
-static inline int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src,
- u32 dst, void *data, int len)
+static inline int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len)
{
/* This shouldn't be possible */
WARN_ON(1);
@@ -261,7 +275,8 @@ static inline int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src,
return -ENXIO;
}
-static inline int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len)
+static inline int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data,
+ int len, u32 dst)
{
/* This shouldn't be possible */
WARN_ON(1);
@@ -269,17 +284,16 @@ static inline int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len)
return -ENXIO;
}
-static inline int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data,
- int len, u32 dst)
+static inline __poll_t rpmsg_poll(struct rpmsg_endpoint *ept,
+ struct file *filp, poll_table *wait)
{
/* This shouldn't be possible */
WARN_ON(1);
- return -ENXIO;
+ return 0;
}
-static inline int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src,
- u32 dst, void *data, int len)
+static inline ssize_t rpmsg_get_mtu(struct rpmsg_endpoint *ept)
{
/* This shouldn't be possible */
WARN_ON(1);
@@ -287,13 +301,12 @@ static inline int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src,
return -ENXIO;
}
-static inline __poll_t rpmsg_poll(struct rpmsg_endpoint *ept,
- struct file *filp, poll_table *wait)
+static inline int rpmsg_set_flow_control(struct rpmsg_endpoint *ept, bool pause, u32 dst)
{
/* This shouldn't be possible */
WARN_ON(1);
- return 0;
+ return -ENXIO;
}
#endif /* IS_ENABLED(CONFIG_RPMSG) */
diff --git a/include/linux/rpmsg/qcom_glink.h b/include/linux/rpmsg/qcom_glink.h
index 22fc3a69b683..bfbd48f435fa 100644
--- a/include/linux/rpmsg/qcom_glink.h
+++ b/include/linux/rpmsg/qcom_glink.h
@@ -5,7 +5,7 @@
#include <linux/device.h>
-struct qcom_glink;
+struct qcom_glink_smem;
#if IS_ENABLED(CONFIG_RPMSG_QCOM_GLINK)
void qcom_glink_ssr_notify(const char *ssr_name);
@@ -15,20 +15,20 @@ static inline void qcom_glink_ssr_notify(const char *ssr_name) {}
#if IS_ENABLED(CONFIG_RPMSG_QCOM_GLINK_SMEM)
-struct qcom_glink *qcom_glink_smem_register(struct device *parent,
- struct device_node *node);
-void qcom_glink_smem_unregister(struct qcom_glink *glink);
+struct qcom_glink_smem *qcom_glink_smem_register(struct device *parent,
+ struct device_node *node);
+void qcom_glink_smem_unregister(struct qcom_glink_smem *glink);
#else
-static inline struct qcom_glink *
+static inline struct qcom_glink_smem *
qcom_glink_smem_register(struct device *parent,
struct device_node *node)
{
return NULL;
}
-static inline void qcom_glink_smem_unregister(struct qcom_glink *glink) {}
+static inline void qcom_glink_smem_unregister(struct qcom_glink_smem *glink) {}
#endif
#endif
diff --git a/include/linux/rpmsg/qcom_smd.h b/include/linux/rpmsg/qcom_smd.h
index 2e92d7407a85..3379bf4e1cb1 100644
--- a/include/linux/rpmsg/qcom_smd.h
+++ b/include/linux/rpmsg/qcom_smd.h
@@ -11,7 +11,7 @@ struct qcom_smd_edge;
struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent,
struct device_node *node);
-int qcom_smd_unregister_edge(struct qcom_smd_edge *edge);
+void qcom_smd_unregister_edge(struct qcom_smd_edge *edge);
#else
@@ -22,9 +22,8 @@ qcom_smd_register_edge(struct device *parent,
return NULL;
}
-static inline int qcom_smd_unregister_edge(struct qcom_smd_edge *edge)
+static inline void qcom_smd_unregister_edge(struct qcom_smd_edge *edge)
{
- return 0;
}
#endif
diff --git a/include/linux/rseq.h b/include/linux/rseq.h
new file mode 100644
index 000000000000..2266f4dc77b6
--- /dev/null
+++ b/include/linux/rseq.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+#ifndef _LINUX_RSEQ_H
+#define _LINUX_RSEQ_H
+
+#ifdef CONFIG_RSEQ
+#include <linux/sched.h>
+
+#include <uapi/linux/rseq.h>
+
+void __rseq_handle_slowpath(struct pt_regs *regs);
+
+/* Invoked from resume_user_mode_work() */
+static inline void rseq_handle_slowpath(struct pt_regs *regs)
+{
+ if (IS_ENABLED(CONFIG_GENERIC_ENTRY)) {
+ if (current->rseq.event.slowpath)
+ __rseq_handle_slowpath(regs);
+ } else {
+ /* '&' is intentional to spare one conditional branch */
+ if (current->rseq.event.sched_switch & current->rseq.event.has_rseq)
+ __rseq_handle_slowpath(regs);
+ }
+}
+
+void __rseq_signal_deliver(int sig, struct pt_regs *regs);
+
+/*
+ * Invoked from signal delivery to fixup based on the register context before
+ * switching to the signal delivery context.
+ */
+static inline void rseq_signal_deliver(struct ksignal *ksig, struct pt_regs *regs)
+{
+ if (IS_ENABLED(CONFIG_GENERIC_IRQ_ENTRY)) {
+ /* '&' is intentional to spare one conditional branch */
+ if (current->rseq.event.has_rseq & current->rseq.event.user_irq)
+ __rseq_signal_deliver(ksig->sig, regs);
+ } else {
+ if (current->rseq.event.has_rseq)
+ __rseq_signal_deliver(ksig->sig, regs);
+ }
+}
+
+static inline void rseq_raise_notify_resume(struct task_struct *t)
+{
+ set_tsk_thread_flag(t, TIF_RSEQ);
+}
+
+/* Invoked from context switch to force evaluation on exit to user */
+static __always_inline void rseq_sched_switch_event(struct task_struct *t)
+{
+ struct rseq_event *ev = &t->rseq.event;
+
+ if (IS_ENABLED(CONFIG_GENERIC_IRQ_ENTRY)) {
+ /*
+ * Avoid a boat load of conditionals by using simple logic
+ * to determine whether NOTIFY_RESUME needs to be raised.
+ *
+ * It's required when the CPU or MM CID has changed or
+ * the entry was from user space.
+ */
+ bool raise = (ev->user_irq | ev->ids_changed) & ev->has_rseq;
+
+ if (raise) {
+ ev->sched_switch = true;
+ rseq_raise_notify_resume(t);
+ }
+ } else {
+ if (ev->has_rseq) {
+ t->rseq.event.sched_switch = true;
+ rseq_raise_notify_resume(t);
+ }
+ }
+}
+
+/*
+ * Invoked from __set_task_cpu() when a task migrates or from
+ * mm_cid_schedin() when the CID changes to enforce an IDs update.
+ *
+ * This does not raise TIF_NOTIFY_RESUME as that happens in
+ * rseq_sched_switch_event().
+ */
+static __always_inline void rseq_sched_set_ids_changed(struct task_struct *t)
+{
+ t->rseq.event.ids_changed = true;
+}
+
+/* Enforce a full update after RSEQ registration and when execve() failed */
+static inline void rseq_force_update(void)
+{
+ if (current->rseq.event.has_rseq) {
+ current->rseq.event.ids_changed = true;
+ current->rseq.event.sched_switch = true;
+ rseq_raise_notify_resume(current);
+ }
+}
+
+/*
+ * KVM/HYPERV invoke resume_user_mode_work() before entering guest mode,
+ * which clears TIF_NOTIFY_RESUME on architectures that don't use the
+ * generic TIF bits and therefore can't provide a separate TIF_RSEQ flag.
+ *
+ * To avoid updating user space RSEQ in that case just to do it eventually
+ * again before returning to user space, because __rseq_handle_slowpath()
+ * does nothing when invoked with NULL register state.
+ *
+ * After returning from guest mode, before exiting to userspace, hypervisors
+ * must invoke this function to re-raise TIF_NOTIFY_RESUME if necessary.
+ */
+static inline void rseq_virt_userspace_exit(void)
+{
+ /*
+ * The generic optimization for deferring RSEQ updates until the next
+ * exit relies on having a dedicated TIF_RSEQ.
+ */
+ if (!IS_ENABLED(CONFIG_HAVE_GENERIC_TIF_BITS) &&
+ current->rseq.event.sched_switch)
+ rseq_raise_notify_resume(current);
+}
+
+static inline void rseq_reset(struct task_struct *t)
+{
+ memset(&t->rseq, 0, sizeof(t->rseq));
+ t->rseq.ids.cpu_id = RSEQ_CPU_ID_UNINITIALIZED;
+}
+
+static inline void rseq_execve(struct task_struct *t)
+{
+ rseq_reset(t);
+}
+
+/*
+ * If parent process has a registered restartable sequences area, the
+ * child inherits. Unregister rseq for a clone with CLONE_VM set.
+ *
+ * On fork, keep the IDs (CPU, MMCID) of the parent, which avoids a fault
+ * on the COW page on exit to user space, when the child stays on the same
+ * CPU as the parent. That's obviously not guaranteed, but in overcommit
+ * scenarios it is more likely and optimizes for the fork/exec case without
+ * taking the fault.
+ */
+static inline void rseq_fork(struct task_struct *t, u64 clone_flags)
+{
+ if (clone_flags & CLONE_VM)
+ rseq_reset(t);
+ else
+ t->rseq = current->rseq;
+}
+
+#else /* CONFIG_RSEQ */
+static inline void rseq_handle_slowpath(struct pt_regs *regs) { }
+static inline void rseq_signal_deliver(struct ksignal *ksig, struct pt_regs *regs) { }
+static inline void rseq_sched_switch_event(struct task_struct *t) { }
+static inline void rseq_sched_set_ids_changed(struct task_struct *t) { }
+static inline void rseq_force_update(void) { }
+static inline void rseq_virt_userspace_exit(void) { }
+static inline void rseq_fork(struct task_struct *t, u64 clone_flags) { }
+static inline void rseq_execve(struct task_struct *t) { }
+#endif /* !CONFIG_RSEQ */
+
+#ifdef CONFIG_DEBUG_RSEQ
+void rseq_syscall(struct pt_regs *regs);
+#else /* CONFIG_DEBUG_RSEQ */
+static inline void rseq_syscall(struct pt_regs *regs) { }
+#endif /* !CONFIG_DEBUG_RSEQ */
+
+#endif /* _LINUX_RSEQ_H */
diff --git a/include/linux/rseq_entry.h b/include/linux/rseq_entry.h
new file mode 100644
index 000000000000..c92167ff8a7f
--- /dev/null
+++ b/include/linux/rseq_entry.h
@@ -0,0 +1,616 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_RSEQ_ENTRY_H
+#define _LINUX_RSEQ_ENTRY_H
+
+/* Must be outside the CONFIG_RSEQ guard to resolve the stubs */
+#ifdef CONFIG_RSEQ_STATS
+#include <linux/percpu.h>
+
+struct rseq_stats {
+ unsigned long exit;
+ unsigned long signal;
+ unsigned long slowpath;
+ unsigned long fastpath;
+ unsigned long ids;
+ unsigned long cs;
+ unsigned long clear;
+ unsigned long fixup;
+};
+
+DECLARE_PER_CPU(struct rseq_stats, rseq_stats);
+
+/*
+ * Slow path has interrupts and preemption enabled, but the fast path
+ * runs with interrupts disabled so there is no point in having the
+ * preemption checks implied in __this_cpu_inc() for every operation.
+ */
+#ifdef RSEQ_BUILD_SLOW_PATH
+#define rseq_stat_inc(which) this_cpu_inc((which))
+#else
+#define rseq_stat_inc(which) raw_cpu_inc((which))
+#endif
+
+#else /* CONFIG_RSEQ_STATS */
+#define rseq_stat_inc(x) do { } while (0)
+#endif /* !CONFIG_RSEQ_STATS */
+
+#ifdef CONFIG_RSEQ
+#include <linux/jump_label.h>
+#include <linux/rseq.h>
+#include <linux/uaccess.h>
+
+#include <linux/tracepoint-defs.h>
+
+#ifdef CONFIG_TRACEPOINTS
+DECLARE_TRACEPOINT(rseq_update);
+DECLARE_TRACEPOINT(rseq_ip_fixup);
+void __rseq_trace_update(struct task_struct *t);
+void __rseq_trace_ip_fixup(unsigned long ip, unsigned long start_ip,
+ unsigned long offset, unsigned long abort_ip);
+
+static inline void rseq_trace_update(struct task_struct *t, struct rseq_ids *ids)
+{
+ if (tracepoint_enabled(rseq_update) && ids)
+ __rseq_trace_update(t);
+}
+
+static inline void rseq_trace_ip_fixup(unsigned long ip, unsigned long start_ip,
+ unsigned long offset, unsigned long abort_ip)
+{
+ if (tracepoint_enabled(rseq_ip_fixup))
+ __rseq_trace_ip_fixup(ip, start_ip, offset, abort_ip);
+}
+
+#else /* CONFIG_TRACEPOINT */
+static inline void rseq_trace_update(struct task_struct *t, struct rseq_ids *ids) { }
+static inline void rseq_trace_ip_fixup(unsigned long ip, unsigned long start_ip,
+ unsigned long offset, unsigned long abort_ip) { }
+#endif /* !CONFIG_TRACEPOINT */
+
+DECLARE_STATIC_KEY_MAYBE(CONFIG_RSEQ_DEBUG_DEFAULT_ENABLE, rseq_debug_enabled);
+
+#ifdef RSEQ_BUILD_SLOW_PATH
+#define rseq_inline
+#else
+#define rseq_inline __always_inline
+#endif
+
+bool rseq_debug_update_user_cs(struct task_struct *t, struct pt_regs *regs, unsigned long csaddr);
+bool rseq_debug_validate_ids(struct task_struct *t);
+
+static __always_inline void rseq_note_user_irq_entry(void)
+{
+ if (IS_ENABLED(CONFIG_GENERIC_IRQ_ENTRY))
+ current->rseq.event.user_irq = true;
+}
+
+/*
+ * Check whether there is a valid critical section and whether the
+ * instruction pointer in @regs is inside the critical section.
+ *
+ * - If the critical section is invalid, terminate the task.
+ *
+ * - If valid and the instruction pointer is inside, set it to the abort IP.
+ *
+ * - If valid and the instruction pointer is outside, clear the critical
+ * section address.
+ *
+ * Returns true, if the section was valid and either fixup or clear was
+ * done, false otherwise.
+ *
+ * In the failure case task::rseq_event::fatal is set when a invalid
+ * section was found. It's clear when the failure was an unresolved page
+ * fault.
+ *
+ * If inlined into the exit to user path with interrupts disabled, the
+ * caller has to protect against page faults with pagefault_disable().
+ *
+ * In preemptible task context this would be counterproductive as the page
+ * faults could not be fully resolved. As a consequence unresolved page
+ * faults in task context are fatal too.
+ */
+
+#ifdef RSEQ_BUILD_SLOW_PATH
+/*
+ * The debug version is put out of line, but kept here so the code stays
+ * together.
+ *
+ * @csaddr has already been checked by the caller to be in user space
+ */
+bool rseq_debug_update_user_cs(struct task_struct *t, struct pt_regs *regs,
+ unsigned long csaddr)
+{
+ struct rseq_cs __user *ucs = (struct rseq_cs __user *)(unsigned long)csaddr;
+ u64 start_ip, abort_ip, offset, cs_end, head, tasksize = TASK_SIZE;
+ unsigned long ip = instruction_pointer(regs);
+ u64 __user *uc_head = (u64 __user *) ucs;
+ u32 usig, __user *uc_sig;
+
+ scoped_user_rw_access(ucs, efault) {
+ /*
+ * Evaluate the user pile and exit if one of the conditions
+ * is not fulfilled.
+ */
+ unsafe_get_user(start_ip, &ucs->start_ip, efault);
+ if (unlikely(start_ip >= tasksize))
+ goto die;
+ /* If outside, just clear the critical section. */
+ if (ip < start_ip)
+ goto clear;
+
+ unsafe_get_user(offset, &ucs->post_commit_offset, efault);
+ cs_end = start_ip + offset;
+ /* Check for overflow and wraparound */
+ if (unlikely(cs_end >= tasksize || cs_end < start_ip))
+ goto die;
+
+ /* If not inside, clear it. */
+ if (ip >= cs_end)
+ goto clear;
+
+ unsafe_get_user(abort_ip, &ucs->abort_ip, efault);
+ /* Ensure it's "valid" */
+ if (unlikely(abort_ip >= tasksize || abort_ip < sizeof(*uc_sig)))
+ goto die;
+ /* Validate that the abort IP is not in the critical section */
+ if (unlikely(abort_ip - start_ip < offset))
+ goto die;
+
+ /*
+ * Check version and flags for 0. No point in emitting
+ * deprecated warnings before dying. That could be done in
+ * the slow path eventually, but *shrug*.
+ */
+ unsafe_get_user(head, uc_head, efault);
+ if (unlikely(head))
+ goto die;
+
+ /* abort_ip - 4 is >= 0. See abort_ip check above */
+ uc_sig = (u32 __user *)(unsigned long)(abort_ip - sizeof(*uc_sig));
+ unsafe_get_user(usig, uc_sig, efault);
+ if (unlikely(usig != t->rseq.sig))
+ goto die;
+
+ /* rseq_event.user_irq is only valid if CONFIG_GENERIC_IRQ_ENTRY=y */
+ if (IS_ENABLED(CONFIG_GENERIC_IRQ_ENTRY)) {
+ /* If not in interrupt from user context, let it die */
+ if (unlikely(!t->rseq.event.user_irq))
+ goto die;
+ }
+ unsafe_put_user(0ULL, &t->rseq.usrptr->rseq_cs, efault);
+ instruction_pointer_set(regs, (unsigned long)abort_ip);
+ rseq_stat_inc(rseq_stats.fixup);
+ break;
+ clear:
+ unsafe_put_user(0ULL, &t->rseq.usrptr->rseq_cs, efault);
+ rseq_stat_inc(rseq_stats.clear);
+ abort_ip = 0ULL;
+ }
+
+ if (unlikely(abort_ip))
+ rseq_trace_ip_fixup(ip, start_ip, offset, abort_ip);
+ return true;
+die:
+ t->rseq.event.fatal = true;
+efault:
+ return false;
+}
+
+/*
+ * On debug kernels validate that user space did not mess with it if the
+ * debug branch is enabled.
+ */
+bool rseq_debug_validate_ids(struct task_struct *t)
+{
+ struct rseq __user *rseq = t->rseq.usrptr;
+ u32 cpu_id, uval, node_id;
+
+ /*
+ * On the first exit after registering the rseq region CPU ID is
+ * RSEQ_CPU_ID_UNINITIALIZED and node_id in user space is 0!
+ */
+ node_id = t->rseq.ids.cpu_id != RSEQ_CPU_ID_UNINITIALIZED ?
+ cpu_to_node(t->rseq.ids.cpu_id) : 0;
+
+ scoped_user_read_access(rseq, efault) {
+ unsafe_get_user(cpu_id, &rseq->cpu_id_start, efault);
+ if (cpu_id != t->rseq.ids.cpu_id)
+ goto die;
+ unsafe_get_user(uval, &rseq->cpu_id, efault);
+ if (uval != cpu_id)
+ goto die;
+ unsafe_get_user(uval, &rseq->node_id, efault);
+ if (uval != node_id)
+ goto die;
+ unsafe_get_user(uval, &rseq->mm_cid, efault);
+ if (uval != t->rseq.ids.mm_cid)
+ goto die;
+ }
+ return true;
+die:
+ t->rseq.event.fatal = true;
+efault:
+ return false;
+}
+
+#endif /* RSEQ_BUILD_SLOW_PATH */
+
+/*
+ * This only ensures that abort_ip is in the user address space and
+ * validates that it is preceded by the signature.
+ *
+ * No other sanity checks are done here, that's what the debug code is for.
+ */
+static rseq_inline bool
+rseq_update_user_cs(struct task_struct *t, struct pt_regs *regs, unsigned long csaddr)
+{
+ struct rseq_cs __user *ucs = (struct rseq_cs __user *)(unsigned long)csaddr;
+ unsigned long ip = instruction_pointer(regs);
+ unsigned long tasksize = TASK_SIZE;
+ u64 start_ip, abort_ip, offset;
+ u32 usig, __user *uc_sig;
+
+ rseq_stat_inc(rseq_stats.cs);
+
+ if (unlikely(csaddr >= tasksize)) {
+ t->rseq.event.fatal = true;
+ return false;
+ }
+
+ if (static_branch_unlikely(&rseq_debug_enabled))
+ return rseq_debug_update_user_cs(t, regs, csaddr);
+
+ scoped_user_rw_access(ucs, efault) {
+ unsafe_get_user(start_ip, &ucs->start_ip, efault);
+ unsafe_get_user(offset, &ucs->post_commit_offset, efault);
+ unsafe_get_user(abort_ip, &ucs->abort_ip, efault);
+
+ /*
+ * No sanity checks. If user space screwed it up, it can
+ * keep the pieces. That's what debug code is for.
+ *
+ * If outside, just clear the critical section.
+ */
+ if (ip - start_ip >= offset)
+ goto clear;
+
+ /*
+ * Two requirements for @abort_ip:
+ * - Must be in user space as x86 IRET would happily return to
+ * the kernel.
+ * - The four bytes preceding the instruction at @abort_ip must
+ * contain the signature.
+ *
+ * The latter protects against the following attack vector:
+ *
+ * An attacker with limited abilities to write, creates a critical
+ * section descriptor, sets the abort IP to a library function or
+ * some other ROP gadget and stores the address of the descriptor
+ * in TLS::rseq::rseq_cs. An RSEQ abort would then evade ROP
+ * protection.
+ */
+ if (unlikely(abort_ip >= tasksize || abort_ip < sizeof(*uc_sig)))
+ goto die;
+
+ /* The address is guaranteed to be >= 0 and < TASK_SIZE */
+ uc_sig = (u32 __user *)(unsigned long)(abort_ip - sizeof(*uc_sig));
+ unsafe_get_user(usig, uc_sig, efault);
+ if (unlikely(usig != t->rseq.sig))
+ goto die;
+
+ /* Invalidate the critical section */
+ unsafe_put_user(0ULL, &t->rseq.usrptr->rseq_cs, efault);
+ /* Update the instruction pointer */
+ instruction_pointer_set(regs, (unsigned long)abort_ip);
+ rseq_stat_inc(rseq_stats.fixup);
+ break;
+ clear:
+ unsafe_put_user(0ULL, &t->rseq.usrptr->rseq_cs, efault);
+ rseq_stat_inc(rseq_stats.clear);
+ abort_ip = 0ULL;
+ }
+
+ if (unlikely(abort_ip))
+ rseq_trace_ip_fixup(ip, start_ip, offset, abort_ip);
+ return true;
+die:
+ t->rseq.event.fatal = true;
+efault:
+ return false;
+}
+
+/*
+ * Updates CPU ID, Node ID and MM CID and reads the critical section
+ * address, when @csaddr != NULL. This allows to put the ID update and the
+ * read under the same uaccess region to spare a separate begin/end.
+ *
+ * As this is either invoked from a C wrapper with @csaddr = NULL or from
+ * the fast path code with a valid pointer, a clever compiler should be
+ * able to optimize the read out. Spares a duplicate implementation.
+ *
+ * Returns true, if the operation was successful, false otherwise.
+ *
+ * In the failure case task::rseq_event::fatal is set when invalid data
+ * was found on debug kernels. It's clear when the failure was an unresolved page
+ * fault.
+ *
+ * If inlined into the exit to user path with interrupts disabled, the
+ * caller has to protect against page faults with pagefault_disable().
+ *
+ * In preemptible task context this would be counterproductive as the page
+ * faults could not be fully resolved. As a consequence unresolved page
+ * faults in task context are fatal too.
+ */
+static rseq_inline
+bool rseq_set_ids_get_csaddr(struct task_struct *t, struct rseq_ids *ids,
+ u32 node_id, u64 *csaddr)
+{
+ struct rseq __user *rseq = t->rseq.usrptr;
+
+ if (static_branch_unlikely(&rseq_debug_enabled)) {
+ if (!rseq_debug_validate_ids(t))
+ return false;
+ }
+
+ scoped_user_rw_access(rseq, efault) {
+ unsafe_put_user(ids->cpu_id, &rseq->cpu_id_start, efault);
+ unsafe_put_user(ids->cpu_id, &rseq->cpu_id, efault);
+ unsafe_put_user(node_id, &rseq->node_id, efault);
+ unsafe_put_user(ids->mm_cid, &rseq->mm_cid, efault);
+ if (csaddr)
+ unsafe_get_user(*csaddr, &rseq->rseq_cs, efault);
+ }
+
+ /* Cache the new values */
+ t->rseq.ids.cpu_cid = ids->cpu_cid;
+ rseq_stat_inc(rseq_stats.ids);
+ rseq_trace_update(t, ids);
+ return true;
+efault:
+ return false;
+}
+
+/*
+ * Update user space with new IDs and conditionally check whether the task
+ * is in a critical section.
+ */
+static rseq_inline bool rseq_update_usr(struct task_struct *t, struct pt_regs *regs,
+ struct rseq_ids *ids, u32 node_id)
+{
+ u64 csaddr;
+
+ if (!rseq_set_ids_get_csaddr(t, ids, node_id, &csaddr))
+ return false;
+
+ /*
+ * On architectures which utilize the generic entry code this
+ * allows to skip the critical section when the entry was not from
+ * a user space interrupt, unless debug mode is enabled.
+ */
+ if (IS_ENABLED(CONFIG_GENERIC_IRQ_ENTRY)) {
+ if (!static_branch_unlikely(&rseq_debug_enabled)) {
+ if (likely(!t->rseq.event.user_irq))
+ return true;
+ }
+ }
+ if (likely(!csaddr))
+ return true;
+ /* Sigh, this really needs to do work */
+ return rseq_update_user_cs(t, regs, csaddr);
+}
+
+/*
+ * If you want to use this then convert your architecture to the generic
+ * entry code. I'm tired of building workarounds for people who can't be
+ * bothered to make the maintenance of generic infrastructure less
+ * burdensome. Just sucking everything into the architecture code and
+ * thereby making others chase the horrible hacks and keep them working is
+ * neither acceptable nor sustainable.
+ */
+#ifdef CONFIG_GENERIC_ENTRY
+
+/*
+ * This is inlined into the exit path because:
+ *
+ * 1) It's a one time comparison in the fast path when there is no event to
+ * handle
+ *
+ * 2) The access to the user space rseq memory (TLS) is unlikely to fault
+ * so the straight inline operation is:
+ *
+ * - Four 32-bit stores only if CPU ID/ MM CID need to be updated
+ * - One 64-bit load to retrieve the critical section address
+ *
+ * 3) In the unlikely case that the critical section address is != NULL:
+ *
+ * - One 64-bit load to retrieve the start IP
+ * - One 64-bit load to retrieve the offset for calculating the end
+ * - One 64-bit load to retrieve the abort IP
+ * - One 64-bit load to retrieve the signature
+ * - One store to clear the critical section address
+ *
+ * The non-debug case implements only the minimal required checking. It
+ * provides protection against a rogue abort IP in kernel space, which
+ * would be exploitable at least on x86, and also against a rogue CS
+ * descriptor by checking the signature at the abort IP. Any fallout from
+ * invalid critical section descriptors is a user space problem. The debug
+ * case provides the full set of checks and terminates the task if a
+ * condition is not met.
+ *
+ * In case of a fault or an invalid value, this sets TIF_NOTIFY_RESUME and
+ * tells the caller to loop back into exit_to_user_mode_loop(). The rseq
+ * slow path there will handle the failure.
+ */
+static __always_inline bool rseq_exit_user_update(struct pt_regs *regs, struct task_struct *t)
+{
+ /*
+ * Page faults need to be disabled as this is called with
+ * interrupts disabled
+ */
+ guard(pagefault)();
+ if (likely(!t->rseq.event.ids_changed)) {
+ struct rseq __user *rseq = t->rseq.usrptr;
+ /*
+ * If IDs have not changed rseq_event::user_irq must be true
+ * See rseq_sched_switch_event().
+ */
+ u64 csaddr;
+
+ if (unlikely(get_user_inline(csaddr, &rseq->rseq_cs)))
+ return false;
+
+ if (static_branch_unlikely(&rseq_debug_enabled) || unlikely(csaddr)) {
+ if (unlikely(!rseq_update_user_cs(t, regs, csaddr)))
+ return false;
+ }
+ return true;
+ }
+
+ struct rseq_ids ids = {
+ .cpu_id = task_cpu(t),
+ .mm_cid = task_mm_cid(t),
+ };
+ u32 node_id = cpu_to_node(ids.cpu_id);
+
+ return rseq_update_usr(t, regs, &ids, node_id);
+}
+
+static __always_inline bool __rseq_exit_to_user_mode_restart(struct pt_regs *regs)
+{
+ struct task_struct *t = current;
+
+ /*
+ * If the task did not go through schedule or got the flag enforced
+ * by the rseq syscall or execve, then nothing to do here.
+ *
+ * CPU ID and MM CID can only change when going through a context
+ * switch.
+ *
+ * rseq_sched_switch_event() sets the rseq_event::sched_switch bit
+ * only when rseq_event::has_rseq is true. That conditional is
+ * required to avoid setting the TIF bit if RSEQ is not registered
+ * for a task. rseq_event::sched_switch is cleared when RSEQ is
+ * unregistered by a task so it's sufficient to check for the
+ * sched_switch bit alone.
+ *
+ * A sane compiler requires three instructions for the nothing to do
+ * case including clearing the events, but your mileage might vary.
+ */
+ if (unlikely((t->rseq.event.sched_switch))) {
+ rseq_stat_inc(rseq_stats.fastpath);
+
+ if (unlikely(!rseq_exit_user_update(regs, t)))
+ return true;
+ }
+ /* Clear state so next entry starts from a clean slate */
+ t->rseq.event.events = 0;
+ return false;
+}
+
+/* Required to allow conversion to GENERIC_ENTRY w/o GENERIC_TIF_BITS */
+#ifdef CONFIG_HAVE_GENERIC_TIF_BITS
+static __always_inline bool test_tif_rseq(unsigned long ti_work)
+{
+ return ti_work & _TIF_RSEQ;
+}
+
+static __always_inline void clear_tif_rseq(void)
+{
+ static_assert(TIF_RSEQ != TIF_NOTIFY_RESUME);
+ clear_thread_flag(TIF_RSEQ);
+}
+#else
+static __always_inline bool test_tif_rseq(unsigned long ti_work) { return true; }
+static __always_inline void clear_tif_rseq(void) { }
+#endif
+
+static __always_inline bool
+rseq_exit_to_user_mode_restart(struct pt_regs *regs, unsigned long ti_work)
+{
+ if (likely(!test_tif_rseq(ti_work)))
+ return false;
+
+ if (unlikely(__rseq_exit_to_user_mode_restart(regs))) {
+ current->rseq.event.slowpath = true;
+ set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
+ return true;
+ }
+
+ clear_tif_rseq();
+ return false;
+}
+
+#else /* CONFIG_GENERIC_ENTRY */
+static inline bool rseq_exit_to_user_mode_restart(struct pt_regs *regs, unsigned long ti_work)
+{
+ return false;
+}
+#endif /* !CONFIG_GENERIC_ENTRY */
+
+static __always_inline void rseq_syscall_exit_to_user_mode(void)
+{
+ struct rseq_event *ev = &current->rseq.event;
+
+ rseq_stat_inc(rseq_stats.exit);
+
+ /* Needed to remove the store for the !lockdep case */
+ if (IS_ENABLED(CONFIG_LOCKDEP)) {
+ WARN_ON_ONCE(ev->sched_switch);
+ ev->events = 0;
+ }
+}
+
+static __always_inline void rseq_irqentry_exit_to_user_mode(void)
+{
+ struct rseq_event *ev = &current->rseq.event;
+
+ rseq_stat_inc(rseq_stats.exit);
+
+ lockdep_assert_once(!ev->sched_switch);
+
+ /*
+ * Ensure that event (especially user_irq) is cleared when the
+ * interrupt did not result in a schedule and therefore the
+ * rseq processing could not clear it.
+ */
+ ev->events = 0;
+}
+
+/* Required to keep ARM64 working */
+static __always_inline void rseq_exit_to_user_mode_legacy(void)
+{
+ struct rseq_event *ev = &current->rseq.event;
+
+ rseq_stat_inc(rseq_stats.exit);
+
+ if (static_branch_unlikely(&rseq_debug_enabled))
+ WARN_ON_ONCE(ev->sched_switch);
+
+ /*
+ * Ensure that event (especially user_irq) is cleared when the
+ * interrupt did not result in a schedule and therefore the
+ * rseq processing did not clear it.
+ */
+ ev->events = 0;
+}
+
+void __rseq_debug_syscall_return(struct pt_regs *regs);
+
+static inline void rseq_debug_syscall_return(struct pt_regs *regs)
+{
+ if (static_branch_unlikely(&rseq_debug_enabled))
+ __rseq_debug_syscall_return(regs);
+}
+#else /* CONFIG_RSEQ */
+static inline void rseq_note_user_irq_entry(void) { }
+static inline bool rseq_exit_to_user_mode_restart(struct pt_regs *regs, unsigned long ti_work)
+{
+ return false;
+}
+static inline void rseq_syscall_exit_to_user_mode(void) { }
+static inline void rseq_irqentry_exit_to_user_mode(void) { }
+static inline void rseq_exit_to_user_mode_legacy(void) { }
+static inline void rseq_debug_syscall_return(struct pt_regs *regs) { }
+#endif /* !CONFIG_RSEQ */
+
+#endif /* _LINUX_RSEQ_ENTRY_H */
diff --git a/include/linux/rseq_types.h b/include/linux/rseq_types.h
new file mode 100644
index 000000000000..332dc14b81c9
--- /dev/null
+++ b/include/linux/rseq_types.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_RSEQ_TYPES_H
+#define _LINUX_RSEQ_TYPES_H
+
+#include <linux/irq_work_types.h>
+#include <linux/types.h>
+#include <linux/workqueue_types.h>
+
+#ifdef CONFIG_RSEQ
+struct rseq;
+
+/**
+ * struct rseq_event - Storage for rseq related event management
+ * @all: Compound to initialize and clear the data efficiently
+ * @events: Compound to access events with a single load/store
+ * @sched_switch: True if the task was scheduled and needs update on
+ * exit to user
+ * @ids_changed: Indicator that IDs need to be updated
+ * @user_irq: True on interrupt entry from user mode
+ * @has_rseq: True if the task has a rseq pointer installed
+ * @error: Compound error code for the slow path to analyze
+ * @fatal: User space data corrupted or invalid
+ * @slowpath: Indicator that slow path processing via TIF_NOTIFY_RESUME
+ * is required
+ *
+ * @sched_switch and @ids_changed must be adjacent and the combo must be
+ * 16bit aligned to allow a single store, when both are set at the same
+ * time in the scheduler.
+ */
+struct rseq_event {
+ union {
+ u64 all;
+ struct {
+ union {
+ u32 events;
+ struct {
+ u8 sched_switch;
+ u8 ids_changed;
+ u8 user_irq;
+ };
+ };
+
+ u8 has_rseq;
+ u8 __pad;
+ union {
+ u16 error;
+ struct {
+ u8 fatal;
+ u8 slowpath;
+ };
+ };
+ };
+ };
+};
+
+/**
+ * struct rseq_ids - Cache for ids, which need to be updated
+ * @cpu_cid: Compound of @cpu_id and @mm_cid to make the
+ * compiler emit a single compare on 64-bit
+ * @cpu_id: The CPU ID which was written last to user space
+ * @mm_cid: The MM CID which was written last to user space
+ *
+ * @cpu_id and @mm_cid are updated when the data is written to user space.
+ */
+struct rseq_ids {
+ union {
+ u64 cpu_cid;
+ struct {
+ u32 cpu_id;
+ u32 mm_cid;
+ };
+ };
+};
+
+/**
+ * struct rseq_data - Storage for all rseq related data
+ * @usrptr: Pointer to the registered user space RSEQ memory
+ * @len: Length of the RSEQ region
+ * @sig: Signature of critial section abort IPs
+ * @event: Storage for event management
+ * @ids: Storage for cached CPU ID and MM CID
+ */
+struct rseq_data {
+ struct rseq __user *usrptr;
+ u32 len;
+ u32 sig;
+ struct rseq_event event;
+ struct rseq_ids ids;
+};
+
+#else /* CONFIG_RSEQ */
+struct rseq_data { };
+#endif /* !CONFIG_RSEQ */
+
+#ifdef CONFIG_SCHED_MM_CID
+
+#define MM_CID_UNSET BIT(31)
+#define MM_CID_ONCPU BIT(30)
+#define MM_CID_TRANSIT BIT(29)
+
+/**
+ * struct sched_mm_cid - Storage for per task MM CID data
+ * @active: MM CID is active for the task
+ * @cid: The CID associated to the task either permanently or
+ * borrowed from the CPU
+ */
+struct sched_mm_cid {
+ unsigned int active;
+ unsigned int cid;
+};
+
+/**
+ * struct mm_cid_pcpu - Storage for per CPU MM_CID data
+ * @cid: The CID associated to the CPU either permanently or
+ * while a task with a CID is running
+ */
+struct mm_cid_pcpu {
+ unsigned int cid;
+}____cacheline_aligned_in_smp;
+
+/**
+ * struct mm_mm_cid - Storage for per MM CID data
+ * @pcpu: Per CPU storage for CIDs associated to a CPU
+ * @percpu: Set, when CIDs are in per CPU mode
+ * @transit: Set to MM_CID_TRANSIT during a mode change transition phase
+ * @max_cids: The exclusive maximum CID value for allocation and convergence
+ * @irq_work: irq_work to handle the affinity mode change case
+ * @work: Regular work to handle the affinity mode change case
+ * @lock: Spinlock to protect against affinity setting which can't take @mutex
+ * @mutex: Mutex to serialize forks and exits related to this mm
+ * @nr_cpus_allowed: The number of CPUs in the per MM allowed CPUs map. The map
+ * is growth only.
+ * @users: The number of tasks sharing this MM. Separate from mm::mm_users
+ * as that is modified by mmget()/mm_put() by other entities which
+ * do not actually share the MM.
+ * @pcpu_thrs: Threshold for switching back from per CPU mode
+ * @update_deferred: A deferred switch back to per task mode is pending.
+ */
+struct mm_mm_cid {
+ /* Hotpath read mostly members */
+ struct mm_cid_pcpu __percpu *pcpu;
+ unsigned int percpu;
+ unsigned int transit;
+ unsigned int max_cids;
+
+ /* Rarely used. Moves @lock and @mutex into the second cacheline */
+ struct irq_work irq_work;
+ struct work_struct work;
+
+ raw_spinlock_t lock;
+ struct mutex mutex;
+
+ /* Low frequency modified */
+ unsigned int nr_cpus_allowed;
+ unsigned int users;
+ unsigned int pcpu_thrs;
+ unsigned int update_deferred;
+}____cacheline_aligned_in_smp;
+#else /* CONFIG_SCHED_MM_CID */
+struct mm_mm_cid { };
+struct sched_mm_cid { };
+#endif /* !CONFIG_SCHED_MM_CID */
+
+#endif
diff --git a/include/linux/rslib.h b/include/linux/rslib.h
index 238bb85243d3..a04dacbdc8ae 100644
--- a/include/linux/rslib.h
+++ b/include/linux/rslib.h
@@ -10,7 +10,6 @@
#ifndef _RSLIB_H_
#define _RSLIB_H_
-#include <linux/list.h>
#include <linux/types.h> /* for gfp_t */
#include <linux/gfp.h> /* for GFP_KERNEL */
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index bd611e26291d..95da051fb155 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -42,7 +42,7 @@ static inline time64_t rtc_tm_sub(struct rtc_time *lhs, struct rtc_time *rhs)
#include <linux/timerqueue.h>
#include <linux/workqueue.h>
-extern struct class *rtc_class;
+extern const struct class rtc_class;
/*
* For these RTC methods the device parameter is the physical device
@@ -66,6 +66,8 @@ struct rtc_class_ops {
int (*alarm_irq_enable)(struct device *, unsigned int enabled);
int (*read_offset)(struct device *, long *offset);
int (*set_offset)(struct device *, long offset);
+ int (*param_get)(struct device *, struct rtc_param *param);
+ int (*param_set)(struct device *, struct rtc_param *param);
};
struct rtc_device;
@@ -80,6 +82,7 @@ struct rtc_timer {
/* flags */
#define RTC_DEV_BUSY 0
+#define RTC_NO_CDEV 1
struct rtc_device {
struct device dev;
@@ -107,8 +110,6 @@ struct rtc_device {
struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */
int pie_enabled;
struct work_struct irqwork;
- /* Some hardware can't support UIE mode */
- int uie_unsupported;
/*
* This offset specifies the update timing of the RTC.
@@ -145,6 +146,7 @@ struct rtc_device {
time64_t range_min;
timeu64_t range_max;
+ timeu64_t alarm_offset_max;
time64_t start_secs;
time64_t offset_secs;
bool set_start_time;
@@ -168,6 +170,7 @@ struct rtc_device {
/* useful timestamps */
#define RTC_TIMESTAMP_BEGIN_0000 -62167219200ULL /* 0000-01-01 00:00:00 */
#define RTC_TIMESTAMP_BEGIN_1900 -2208988800LL /* 1900-01-01 00:00:00 */
+#define RTC_TIMESTAMP_EPOCH_GPS 315964800LL /* 1980-01-06 00:00:00 */
#define RTC_TIMESTAMP_BEGIN_2000 946684800LL /* 2000-01-01 00:00:00 */
#define RTC_TIMESTAMP_END_2063 2966371199LL /* 2063-12-31 23:59:59 */
#define RTC_TIMESTAMP_END_2079 3471292799LL /* 2079-12-31 23:59:59 */
@@ -223,6 +226,23 @@ static inline bool is_leap_year(unsigned int year)
return (!(year % 4) && (year % 100)) || !(year % 400);
}
+/**
+ * rtc_bound_alarmtime() - Return alarm time bound by rtc limit
+ * @rtc: Pointer to rtc device structure
+ * @requested: Requested alarm timeout
+ *
+ * Return: Alarm timeout bound by maximum alarm time supported by rtc.
+ */
+static inline ktime_t rtc_bound_alarmtime(struct rtc_device *rtc,
+ ktime_t requested)
+{
+ if (rtc->alarm_offset_max &&
+ rtc->alarm_offset_max * MSEC_PER_SEC < ktime_to_ms(requested))
+ return ms_to_ktime(rtc->alarm_offset_max * MSEC_PER_SEC);
+
+ return requested;
+}
+
#define devm_rtc_register_device(device) \
__devm_rtc_register_device(THIS_MODULE, device)
diff --git a/include/linux/rtc/ds1685.h b/include/linux/rtc/ds1685.h
index 67ee9d20cc5a..01da4582db6d 100644
--- a/include/linux/rtc/ds1685.h
+++ b/include/linux/rtc/ds1685.h
@@ -8,7 +8,7 @@
* include larger, battery-backed NV-SRAM, burst-mode access, and an RTC
* write counter.
*
- * Copyright (C) 2011-2014 Joshua Kinard <kumba@gentoo.org>.
+ * Copyright (C) 2011-2014 Joshua Kinard <linux@kumba.dev>.
* Copyright (C) 2009 Matthias Fuchs <matthias.fuchs@esd-electronics.com>.
*
* References:
@@ -46,7 +46,6 @@ struct ds1685_priv {
u32 regstep;
int irq_num;
bool bcd_mode;
- bool no_irq;
u8 (*read)(struct ds1685_priv *, int);
void (*write)(struct ds1685_priv *, int, u8);
void (*prepare_poweroff)(void);
diff --git a/include/linux/rtc/m48t59.h b/include/linux/rtc/m48t59.h
index 9465d5405fe2..373ba77071c6 100644
--- a/include/linux/rtc/m48t59.h
+++ b/include/linux/rtc/m48t59.h
@@ -56,6 +56,9 @@ struct m48t59_plat_data {
void __iomem *ioaddr;
/* offset to RTC registers, automatically set according to the type */
unsigned int offset;
+
+ /* YY digits (in RTC) are offset, i.e. year is 1900 + yy_offset + YY */
+ int yy_offset;
};
#endif /* _LINUX_RTC_M48T59_H_ */
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index d1672de9ca89..ede4c6bf6f22 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -13,11 +13,48 @@
#ifndef __LINUX_RT_MUTEX_H
#define __LINUX_RT_MUTEX_H
+#include <linux/compiler.h>
#include <linux/linkage.h>
-#include <linux/rbtree.h>
-#include <linux/spinlock_types.h>
+#include <linux/rbtree_types.h>
+#include <linux/spinlock_types_raw.h>
-extern int max_lock_depth; /* for sysctl */
+extern int max_lock_depth;
+
+struct rt_mutex_base {
+ raw_spinlock_t wait_lock;
+ struct rb_root_cached waiters;
+ struct task_struct *owner;
+};
+
+#define __RT_MUTEX_BASE_INITIALIZER(rtbasename) \
+{ \
+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(rtbasename.wait_lock), \
+ .waiters = RB_ROOT_CACHED, \
+ .owner = NULL \
+}
+
+/**
+ * rt_mutex_base_is_locked - is the rtmutex locked
+ * @lock: the mutex to be queried
+ *
+ * Returns true if the mutex is locked, false if unlocked.
+ */
+static inline bool rt_mutex_base_is_locked(struct rt_mutex_base *lock)
+{
+ return READ_ONCE(lock->owner) != NULL;
+}
+
+#ifdef CONFIG_RT_MUTEXES
+#define RT_MUTEX_HAS_WAITERS 1UL
+
+static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock)
+{
+ unsigned long owner = (unsigned long) READ_ONCE(lock->owner);
+
+ return (struct task_struct *) (owner & ~RT_MUTEX_HAS_WAITERS);
+}
+#endif
+extern void rt_mutex_base_init(struct rt_mutex_base *rtb);
/**
* The rt_mutex structure
@@ -28,9 +65,7 @@ extern int max_lock_depth; /* for sysctl */
* @owner: the mutex owner
*/
struct rt_mutex {
- raw_spinlock_t wait_lock;
- struct rb_root_cached waiters;
- struct task_struct *owner;
+ struct rt_mutex_base rtmutex;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
@@ -52,43 +87,44 @@ do { \
} while (0)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \
- , .dep_map = { .name = #mutexname }
+#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \
+ .dep_map = { \
+ .name = #mutexname, \
+ .wait_type_inner = LD_WAIT_SLEEP, \
+ }
#else
#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)
#endif
-#define __RT_MUTEX_INITIALIZER(mutexname) \
- { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
- , .waiters = RB_ROOT_CACHED \
- , .owner = NULL \
- __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)}
+#define __RT_MUTEX_INITIALIZER(mutexname) \
+{ \
+ .rtmutex = __RT_MUTEX_BASE_INITIALIZER(mutexname.rtmutex), \
+ __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \
+}
#define DEFINE_RT_MUTEX(mutexname) \
struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
-/**
- * rt_mutex_is_locked - is the mutex locked
- * @lock: the mutex to be queried
- *
- * Returns 1 if the mutex is locked, 0 if unlocked.
- */
-static inline int rt_mutex_is_locked(struct rt_mutex *lock)
-{
- return lock->owner != NULL;
-}
-
extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass);
+extern void _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock);
#define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0)
+#define rt_mutex_lock_nest_lock(lock, nest_lock) \
+ do { \
+ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
+ _rt_mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
+ } while (0)
+
#else
extern void rt_mutex_lock(struct rt_mutex *lock);
#define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock)
+#define rt_mutex_lock_nest_lock(lock, nest_lock) rt_mutex_lock(lock)
#endif
extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
+extern int rt_mutex_lock_killable(struct rt_mutex *lock);
extern int rt_mutex_trylock(struct rt_mutex *lock);
extern void rt_mutex_unlock(struct rt_mutex *lock);
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index bb9cb84114c1..ea39dd23a197 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -10,23 +10,32 @@
#include <uapi/linux/rtnetlink.h>
extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
+
+static inline int rtnetlink_maybe_send(struct sk_buff *skb, struct net *net,
+ u32 pid, u32 group, int echo)
+{
+ return !skb ? 0 : rtnetlink_send(skb, net, pid, group, echo);
+}
+
extern int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid);
extern void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid,
- u32 group, struct nlmsghdr *nlh, gfp_t flags);
+ u32 group, const struct nlmsghdr *nlh, gfp_t flags);
extern void rtnl_set_sk_err(struct net *net, u32 group, int error);
extern int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics);
extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst,
u32 id, long expires, u32 error);
-void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags);
+void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change, gfp_t flags,
+ u32 portid, const struct nlmsghdr *nlh);
void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change,
gfp_t flags, int *new_nsid, int new_ifindex);
struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
unsigned change, u32 event,
gfp_t flags, int *new_nsid,
- int new_ifindex);
+ int new_ifindex, u32 portid,
+ const struct nlmsghdr *nlh);
void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev,
- gfp_t flags);
+ gfp_t flags, u32 portid, const struct nlmsghdr *nlh);
/* RTNL is used as a global lock for all changes to network configuration */
@@ -34,13 +43,19 @@ extern void rtnl_lock(void);
extern void rtnl_unlock(void);
extern int rtnl_trylock(void);
extern int rtnl_is_locked(void);
+extern int rtnl_lock_interruptible(void);
extern int rtnl_lock_killable(void);
extern bool refcount_dec_and_rtnl_lock(refcount_t *r);
extern wait_queue_head_t netdev_unregistering_wq;
+extern atomic_t dev_unreg_count;
extern struct rw_semaphore pernet_ops_rwsem;
extern struct rw_semaphore net_rwsem;
+#define ASSERT_RTNL() \
+ WARN_ONCE(!rtnl_is_locked(), \
+ "RTNL: assertion failed at %s (%d)\n", __FILE__, __LINE__)
+
#ifdef CONFIG_PROVE_LOCKING
extern bool lockdep_rtnl_is_held(void);
#else
@@ -61,25 +76,88 @@ static inline bool lockdep_rtnl_is_held(void)
rcu_dereference_check(p, lockdep_rtnl_is_held())
/**
- * rcu_dereference_bh_rtnl - rcu_dereference_bh with debug checking
- * @p: The pointer to read, prior to dereference
- *
- * Do an rcu_dereference_bh(p), but check caller either holds rcu_read_lock_bh()
- * or RTNL. Note : Please prefer rtnl_dereference() or rcu_dereference_bh()
- */
-#define rcu_dereference_bh_rtnl(p) \
- rcu_dereference_bh_check(p, lockdep_rtnl_is_held())
-
-/**
* rtnl_dereference - fetch RCU pointer when updates are prevented by RTNL
* @p: The pointer to read, prior to dereferencing
*
- * Return the value of the specified RCU-protected pointer, but omit
+ * Return: the value of the specified RCU-protected pointer, but omit
* the READ_ONCE(), because caller holds RTNL.
*/
#define rtnl_dereference(p) \
rcu_dereference_protected(p, lockdep_rtnl_is_held())
+/**
+ * rcu_replace_pointer_rtnl - replace an RCU pointer under rtnl_lock, returning
+ * its old value
+ * @rp: RCU pointer, whose value is returned
+ * @p: regular pointer
+ *
+ * Perform a replacement under rtnl_lock, where @rp is an RCU-annotated
+ * pointer. The old value of @rp is returned, and @rp is set to @p
+ */
+#define rcu_replace_pointer_rtnl(rp, p) \
+ rcu_replace_pointer(rp, p, lockdep_rtnl_is_held())
+
+#ifdef CONFIG_DEBUG_NET_SMALL_RTNL
+void __rtnl_net_lock(struct net *net);
+void __rtnl_net_unlock(struct net *net);
+void rtnl_net_lock(struct net *net);
+void rtnl_net_unlock(struct net *net);
+int rtnl_net_trylock(struct net *net);
+int rtnl_net_lock_killable(struct net *net);
+int rtnl_net_lock_cmp_fn(const struct lockdep_map *a, const struct lockdep_map *b);
+
+bool rtnl_net_is_locked(struct net *net);
+
+#define ASSERT_RTNL_NET(net) \
+ WARN_ONCE(!rtnl_net_is_locked(net), \
+ "RTNL_NET: assertion failed at %s (%d)\n", \
+ __FILE__, __LINE__)
+
+bool lockdep_rtnl_net_is_held(struct net *net);
+
+#define rcu_dereference_rtnl_net(net, p) \
+ rcu_dereference_check(p, lockdep_rtnl_net_is_held(net))
+#define rtnl_net_dereference(net, p) \
+ rcu_dereference_protected(p, lockdep_rtnl_net_is_held(net))
+#define rcu_replace_pointer_rtnl_net(net, rp, p) \
+ rcu_replace_pointer(rp, p, lockdep_rtnl_net_is_held(net))
+#else
+static inline void __rtnl_net_lock(struct net *net) {}
+static inline void __rtnl_net_unlock(struct net *net) {}
+
+static inline void rtnl_net_lock(struct net *net)
+{
+ rtnl_lock();
+}
+
+static inline void rtnl_net_unlock(struct net *net)
+{
+ rtnl_unlock();
+}
+
+static inline int rtnl_net_trylock(struct net *net)
+{
+ return rtnl_trylock();
+}
+
+static inline int rtnl_net_lock_killable(struct net *net)
+{
+ return rtnl_lock_killable();
+}
+
+static inline void ASSERT_RTNL_NET(struct net *net)
+{
+ ASSERT_RTNL();
+}
+
+#define rcu_dereference_rtnl_net(net, p) \
+ rcu_dereference_rtnl(p)
+#define rtnl_net_dereference(net, p) \
+ rtnl_dereference(p)
+#define rcu_replace_pointer_rtnl_net(net, rp, p) \
+ rcu_replace_pointer_rtnl(rp, p)
+#endif
+
static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev)
{
return rtnl_dereference(dev->ingress_queue);
@@ -100,15 +178,18 @@ void net_dec_ingress_queue(void);
#ifdef CONFIG_NET_EGRESS
void net_inc_egress_queue(void);
void net_dec_egress_queue(void);
+void netdev_xmit_skip_txqueue(bool skip);
#endif
void rtnetlink_init(void);
void __rtnl_unlock(void);
void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail);
-#define ASSERT_RTNL() \
- WARN_ONCE(!rtnl_is_locked(), \
- "RTNL: assertion failed at %s (%d)\n", __FILE__, __LINE__)
+/* Shared by rtnl_fdb_dump() and various ndo_fdb_dump() helpers. */
+struct ndo_fdb_dump_context {
+ unsigned long ifindex;
+ unsigned long fdb_idx;
+};
extern int ndo_dflt_fdb_dump(struct sk_buff *skb,
struct netlink_callback *cb,
@@ -134,4 +215,31 @@ extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
int (*vlan_fill)(struct sk_buff *skb,
struct net_device *dev,
u32 filter_mask));
+
+extern void rtnl_offload_xstats_notify(struct net_device *dev);
+
+static inline int rtnl_has_listeners(const struct net *net, u32 group)
+{
+ struct sock *rtnl = net->rtnl;
+
+ return netlink_has_listeners(rtnl, group);
+}
+
+/**
+ * rtnl_notify_needed - check if notification is needed
+ * @net: Pointer to the net namespace
+ * @nlflags: netlink ingress message flags
+ * @group: rtnl group
+ *
+ * Based on the ingress message flags and rtnl group, returns true
+ * if a notification is needed, false otherwise.
+ */
+static inline bool
+rtnl_notify_needed(const struct net *net, u16 nlflags, u32 group)
+{
+ return (nlflags & NLM_F_ECHO) || rtnl_has_listeners(net, group);
+}
+
+void netif_set_operstate(struct net_device *dev, int newstate);
+
#endif /* __LINUX_RTNETLINK_H */
diff --git a/include/linux/rtsx_common.h b/include/linux/rtsx_common.h
index bf290ad14c57..da9c8c6b5d50 100644
--- a/include/linux/rtsx_common.h
+++ b/include/linux/rtsx_common.h
@@ -12,7 +12,6 @@
#define DRV_NAME_RTSX_PCI "rtsx_pci"
#define DRV_NAME_RTSX_PCI_SDMMC "rtsx_pci_sdmmc"
-#define DRV_NAME_RTSX_PCI_MS "rtsx_pci_ms"
#define RTSX_REG_PAIR(addr, val) (((u32)(addr) << 16) | (u8)(val))
diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h
index 6f155f99aa16..3c5689356004 100644
--- a/include/linux/rtsx_pci.h
+++ b/include/linux/rtsx_pci.h
@@ -60,6 +60,7 @@
#define SD_EXIST (1 << 16)
#define DELINK_INT GPIO0_INT
#define MS_OC_INT (1 << 23)
+#define SD_OVP_INT (1 << 23)
#define SD_OC_INT (1 << 22)
#define CARD_INT (XD_INT | MS_INT | SD_INT)
@@ -80,6 +81,7 @@
#define OC_INT_EN (1 << 23)
#define DELINK_INT_EN GPIO0_INT_EN
#define MS_OC_INT_EN (1 << 23)
+#define SD_OVP_INT_EN (1 << 23)
#define SD_OC_INT_EN (1 << 22)
#define RTSX_DUM_REG 0x1C
@@ -583,6 +585,7 @@
#define OBFF_DISABLE 0x00
#define CDRESUMECTL 0xFE52
+#define CDGW 0xFE53
#define WAKE_SEL_CTL 0xFE54
#define PCLK_CTL 0xFE55
#define PCLK_MODE_SEL 0x20
@@ -764,6 +767,9 @@
#define SD_VIO_LDO_1V8 0x40
#define SD_VIO_LDO_3V3 0x70
+#define RTS5264_AUTOLOAD_CFG2 0xFF7D
+#define RTS5264_CHIP_RST_N_SEL (1 << 6)
+
#define RTS5260_AUTOLOAD_CFG4 0xFF7F
#define RTS5260_MIMO_DISABLE 0x8A
/*RTS5261*/
@@ -1067,6 +1073,9 @@
#define PCR_SETTING_REG1 0x724
#define PCR_SETTING_REG2 0x814
#define PCR_SETTING_REG3 0x747
+#define PCR_SETTING_REG4 0x818
+#define PCR_SETTING_REG5 0x81C
+
#define rtsx_pci_init_cmd(pcr) ((pcr)->ci = 0)
@@ -1095,7 +1104,7 @@ struct pcr_ops {
unsigned int (*cd_deglitch)(struct rtsx_pcr *pcr);
int (*conv_clk_and_div_n)(int clk, int dir);
void (*fetch_vendor_settings)(struct rtsx_pcr *pcr);
- void (*force_power_down)(struct rtsx_pcr *pcr, u8 pm_state);
+ void (*force_power_down)(struct rtsx_pcr *pcr, u8 pm_state, bool runtime);
void (*stop_cmd)(struct rtsx_pcr *pcr);
void (*set_aspm)(struct rtsx_pcr *pcr, bool enable);
@@ -1109,6 +1118,7 @@ struct pcr_ops {
};
enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN};
+enum ASPM_MODE {ASPM_MODE_CFG, ASPM_MODE_REG};
#define ASPM_L1_1_EN BIT(0)
#define ASPM_L1_2_EN BIT(1)
@@ -1150,6 +1160,8 @@ struct rtsx_cr_option {
bool ocp_en;
u8 sd_400mA_ocp_thd;
u8 sd_800mA_ocp_thd;
+ u8 sd_cd_reverse_en;
+ u8 sd_wp_reverse_en;
};
/*
@@ -1200,8 +1212,6 @@ struct rtsx_pcr {
unsigned int card_exist;
struct delayed_work carddet_work;
- struct delayed_work idle_work;
- struct delayed_work rtd3_work;
spinlock_t lock;
struct mutex pcr_mutex;
@@ -1211,7 +1221,6 @@ struct rtsx_pcr {
unsigned int cur_clock;
bool remove_pci;
bool msi_en;
- bool is_runtime_suspended;
#define EXTRA_CAPS_SD_SDR50 (1 << 0)
#define EXTRA_CAPS_SD_SDR104 (1 << 1)
@@ -1234,6 +1243,7 @@ struct rtsx_pcr {
u8 card_drive_sel;
#define ASPM_L1_EN 0x02
u8 aspm_en;
+ enum ASPM_MODE aspm_mode;
bool aspm_enabled;
#define PCR_MS_PMOS (1 << 0)
@@ -1259,6 +1269,7 @@ struct rtsx_pcr {
u8 dma_error_count;
u8 ocp_stat;
u8 ocp_stat2;
+ u8 ovp_stat;
u8 rtd3_en;
};
@@ -1269,6 +1280,7 @@ struct rtsx_pcr {
#define PID_5260 0x5260
#define PID_5261 0x5261
#define PID_5228 0x5228
+#define PID_5264 0x5264
#define CHK_PCI_PID(pcr, pid) ((pcr)->pci->device == (pid))
#define PCI_VID(pcr) ((pcr)->pci->vendor)
@@ -1302,8 +1314,6 @@ void rtsx_pci_add_cmd(struct rtsx_pcr *pcr,
u8 cmd_type, u16 reg_addr, u8 mask, u8 data);
void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr);
int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout);
-int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
- int num_sg, bool read, int timeout);
int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
int num_sg, bool read);
void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
diff --git a/include/linux/rtsx_usb.h b/include/linux/rtsx_usb.h
index 159729cffd8e..276b509c03e3 100644
--- a/include/linux/rtsx_usb.h
+++ b/include/linux/rtsx_usb.h
@@ -12,6 +12,10 @@
#include <linux/usb.h>
+#define DRV_NAME_RTSX_USB "rtsx_usb"
+#define DRV_NAME_RTSX_USB_SDMMC "rtsx_usb_sdmmc"
+#define DRV_NAME_RTSX_USB_MS "rtsx_usb_ms"
+
/* related module names */
#define RTSX_USB_SD_CARD 0
#define RTSX_USB_MS_CARD 1
@@ -54,8 +58,6 @@ struct rtsx_ucr {
struct usb_device *pusb_dev;
struct usb_interface *pusb_intf;
struct usb_sg_request current_sg;
- unsigned char *iobuf;
- dma_addr_t iobuf_dma;
struct timer_list sg_timer;
struct mutex dev_mutex;
@@ -97,6 +99,17 @@ extern int rtsx_usb_card_exclusive_check(struct rtsx_ucr *ucr, int card);
#define CD_MASK (SD_CD | MS_CD | XD_CD)
#define SD_WP 0x08
+/* OCPCTL */
+#define MS_OCP_DETECT_EN 0x08
+#define MS_OCP_INT_EN 0x04
+#define MS_OCP_INT_CLR 0x02
+#define MS_OCP_CLEAR 0x01
+
+/* OCPSTAT */
+#define MS_OCP_DETECT 0x80
+#define MS_OCP_NOW 0x02
+#define MS_OCP_EVER 0x01
+
/* reader command field offset & parameters */
#define READ_REG_CMD 0
#define WRITE_REG_CMD 1
diff --git a/include/linux/rv.h b/include/linux/rv.h
new file mode 100644
index 000000000000..92fd467547e7
--- /dev/null
+++ b/include/linux/rv.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Runtime Verification.
+ *
+ * For futher information, see: kernel/trace/rv/rv.c.
+ */
+#ifndef _LINUX_RV_H
+#define _LINUX_RV_H
+
+#define MAX_DA_NAME_LEN 32
+#define MAX_DA_RETRY_RACING_EVENTS 3
+
+#ifdef CONFIG_RV
+#include <linux/array_size.h>
+#include <linux/bitops.h>
+#include <linux/list.h>
+#include <linux/types.h>
+
+/*
+ * Deterministic automaton per-object variables.
+ */
+struct da_monitor {
+ bool monitoring;
+ unsigned int curr_state;
+};
+
+#ifdef CONFIG_RV_LTL_MONITOR
+
+/*
+ * In the future, if the number of atomic propositions or the size of Buchi
+ * automaton is larger, we can switch to dynamic allocation. For now, the code
+ * is simpler this way.
+ */
+#define RV_MAX_LTL_ATOM 32
+#define RV_MAX_BA_STATES 32
+
+/**
+ * struct ltl_monitor - A linear temporal logic runtime verification monitor
+ * @states: States in the Buchi automaton. As Buchi automaton is a
+ * non-deterministic state machine, the monitor can be in multiple
+ * states simultaneously. This is a bitmask of all possible states.
+ * If this is zero, that means either:
+ * - The monitor has not started yet (e.g. because not all
+ * atomic propositions are known).
+ * - There is no possible state to be in. In other words, a
+ * violation of the LTL property is detected.
+ * @atoms: The values of atomic propositions.
+ * @unknown_atoms: Atomic propositions which are still unknown.
+ */
+struct ltl_monitor {
+ DECLARE_BITMAP(states, RV_MAX_BA_STATES);
+ DECLARE_BITMAP(atoms, RV_MAX_LTL_ATOM);
+ DECLARE_BITMAP(unknown_atoms, RV_MAX_LTL_ATOM);
+};
+
+static inline bool rv_ltl_valid_state(struct ltl_monitor *mon)
+{
+ for (int i = 0; i < ARRAY_SIZE(mon->states); ++i) {
+ if (mon->states[i])
+ return true;
+ }
+ return false;
+}
+
+static inline bool rv_ltl_all_atoms_known(struct ltl_monitor *mon)
+{
+ for (int i = 0; i < ARRAY_SIZE(mon->unknown_atoms); ++i) {
+ if (mon->unknown_atoms[i])
+ return false;
+ }
+ return true;
+}
+
+#else
+
+struct ltl_monitor {};
+
+#endif /* CONFIG_RV_LTL_MONITOR */
+
+#define RV_PER_TASK_MONITOR_INIT (CONFIG_RV_PER_TASK_MONITORS)
+
+union rv_task_monitor {
+ struct da_monitor da_mon;
+ struct ltl_monitor ltl_mon;
+};
+
+#ifdef CONFIG_RV_REACTORS
+struct rv_reactor {
+ const char *name;
+ const char *description;
+ __printf(1, 0) void (*react)(const char *msg, va_list args);
+ struct list_head list;
+};
+#endif
+
+struct rv_monitor {
+ const char *name;
+ const char *description;
+ bool enabled;
+ int (*enable)(void);
+ void (*disable)(void);
+ void (*reset)(void);
+#ifdef CONFIG_RV_REACTORS
+ struct rv_reactor *reactor;
+ __printf(1, 0) void (*react)(const char *msg, va_list args);
+#endif
+ struct list_head list;
+ struct rv_monitor *parent;
+ struct dentry *root_d;
+};
+
+bool rv_monitoring_on(void);
+int rv_unregister_monitor(struct rv_monitor *monitor);
+int rv_register_monitor(struct rv_monitor *monitor, struct rv_monitor *parent);
+int rv_get_task_monitor_slot(void);
+void rv_put_task_monitor_slot(int slot);
+
+#ifdef CONFIG_RV_REACTORS
+int rv_unregister_reactor(struct rv_reactor *reactor);
+int rv_register_reactor(struct rv_reactor *reactor);
+__printf(2, 3)
+void rv_react(struct rv_monitor *monitor, const char *msg, ...);
+#else
+__printf(2, 3)
+static inline void rv_react(struct rv_monitor *monitor, const char *msg, ...)
+{
+}
+#endif /* CONFIG_RV_REACTORS */
+
+#endif /* CONFIG_RV */
+#endif /* _LINUX_RV_H */
diff --git a/include/linux/rw_hint.h b/include/linux/rw_hint.h
new file mode 100644
index 000000000000..adcc43042c90
--- /dev/null
+++ b/include/linux/rw_hint.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_RW_HINT_H
+#define _LINUX_RW_HINT_H
+
+#include <linux/build_bug.h>
+#include <linux/compiler_attributes.h>
+#include <uapi/linux/fcntl.h>
+
+/* Block storage write lifetime hint values. */
+enum rw_hint {
+ WRITE_LIFE_NOT_SET = RWH_WRITE_LIFE_NOT_SET,
+ WRITE_LIFE_NONE = RWH_WRITE_LIFE_NONE,
+ WRITE_LIFE_SHORT = RWH_WRITE_LIFE_SHORT,
+ WRITE_LIFE_MEDIUM = RWH_WRITE_LIFE_MEDIUM,
+ WRITE_LIFE_LONG = RWH_WRITE_LIFE_LONG,
+ WRITE_LIFE_EXTREME = RWH_WRITE_LIFE_EXTREME,
+ WRITE_LIFE_HINT_NR,
+} __packed;
+
+/* Sparse ignores __packed annotations on enums, hence the #ifndef below. */
+#ifndef __CHECKER__
+static_assert(sizeof(enum rw_hint) == 1);
+#endif
+
+#endif /* _LINUX_RW_HINT_H */
diff --git a/include/linux/rwbase_rt.h b/include/linux/rwbase_rt.h
new file mode 100644
index 000000000000..f2394a409c9d
--- /dev/null
+++ b/include/linux/rwbase_rt.h
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef _LINUX_RWBASE_RT_H
+#define _LINUX_RWBASE_RT_H
+
+#include <linux/rtmutex.h>
+#include <linux/atomic.h>
+
+#define READER_BIAS (1U << 31)
+#define WRITER_BIAS (1U << 30)
+
+struct rwbase_rt {
+ atomic_t readers;
+ struct rt_mutex_base rtmutex;
+};
+
+#define __RWBASE_INITIALIZER(name) \
+{ \
+ .readers = ATOMIC_INIT(READER_BIAS), \
+ .rtmutex = __RT_MUTEX_BASE_INITIALIZER(name.rtmutex), \
+}
+
+#define init_rwbase_rt(rwbase) \
+ do { \
+ rt_mutex_base_init(&(rwbase)->rtmutex); \
+ atomic_set(&(rwbase)->readers, READER_BIAS); \
+ } while (0)
+
+
+static __always_inline bool rw_base_is_locked(const struct rwbase_rt *rwb)
+{
+ return atomic_read(&rwb->readers) != READER_BIAS;
+}
+
+static __always_inline bool rw_base_is_write_locked(const struct rwbase_rt *rwb)
+{
+ return atomic_read(&rwb->readers) == WRITER_BIAS;
+}
+
+static __always_inline bool rw_base_is_contended(const struct rwbase_rt *rwb)
+{
+ return atomic_read(&rwb->readers) > 0;
+}
+
+#endif /* _LINUX_RWBASE_RT_H */
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
index 7ce9a51ae5c0..5b87c6f4a243 100644
--- a/include/linux/rwlock.h
+++ b/include/linux/rwlock.h
@@ -1,8 +1,8 @@
#ifndef __LINUX_RWLOCK_H
#define __LINUX_RWLOCK_H
-#ifndef __LINUX_SPINLOCK_H
-# error "please don't include this file directly"
+#ifndef __LINUX_INSIDE_SPINLOCK_H
+# error "Please do not include this file directly."
#endif
/*
@@ -30,31 +30,16 @@ do { \
#ifdef CONFIG_DEBUG_SPINLOCK
extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock);
-#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock)
extern int do_raw_read_trylock(rwlock_t *lock);
extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock);
extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock);
-#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock)
extern int do_raw_write_trylock(rwlock_t *lock);
extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock);
#else
-
-#ifndef arch_read_lock_flags
-# define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
-#endif
-
-#ifndef arch_write_lock_flags
-# define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-#endif
-
# define do_raw_read_lock(rwlock) do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0)
-# define do_raw_read_lock_flags(lock, flags) \
- do {__acquire(lock); arch_read_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
# define do_raw_read_unlock(rwlock) do {arch_read_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
# define do_raw_write_lock(rwlock) do {__acquire(lock); arch_write_lock(&(rwlock)->raw_lock); } while (0)
-# define do_raw_write_lock_flags(lock, flags) \
- do {__acquire(lock); arch_write_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock)
# define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
#endif
@@ -70,6 +55,12 @@ do { \
#define write_lock(lock) _raw_write_lock(lock)
#define read_lock(lock) _raw_read_lock(lock)
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#define write_lock_nested(lock, subclass) _raw_write_lock_nested(lock, subclass)
+#else
+#define write_lock_nested(lock, subclass) _raw_write_lock(lock)
+#endif
+
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
#define read_lock_irqsave(lock, flags) \
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
index abfb53ab11be..31d3d1116323 100644
--- a/include/linux/rwlock_api_smp.h
+++ b/include/linux/rwlock_api_smp.h
@@ -2,7 +2,7 @@
#define __LINUX_RWLOCK_API_SMP_H
#ifndef __LINUX_SPINLOCK_API_SMP_H
-# error "please don't include this file directly"
+# error "Please do not include this file directly."
#endif
/*
@@ -17,6 +17,7 @@
void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock);
void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_write_lock_nested(rwlock_t *lock, int subclass) __acquires(lock);
void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock);
void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock);
void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock);
@@ -157,8 +158,7 @@ static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
local_irq_save(flags);
preempt_disable();
rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, do_raw_read_trylock, do_raw_read_lock,
- do_raw_read_lock_flags, &flags);
+ LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
return flags;
}
@@ -184,8 +184,7 @@ static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
local_irq_save(flags);
preempt_disable();
rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- LOCK_CONTENDED_FLAGS(lock, do_raw_write_trylock, do_raw_write_lock,
- do_raw_write_lock_flags, &flags);
+ LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
return flags;
}
@@ -211,6 +210,13 @@ static inline void __raw_write_lock(rwlock_t *lock)
LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
}
+static inline void __raw_write_lock_nested(rwlock_t *lock, int subclass)
+{
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
+}
+
#endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */
static inline void __raw_write_unlock(rwlock_t *lock)
diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h
new file mode 100644
index 000000000000..7d81fc6918ee
--- /dev/null
+++ b/include/linux/rwlock_rt.h
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef __LINUX_RWLOCK_RT_H
+#define __LINUX_RWLOCK_RT_H
+
+#ifndef __LINUX_SPINLOCK_RT_H
+#error Do not #include directly. Use <linux/spinlock.h>.
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern void __rt_rwlock_init(rwlock_t *rwlock, const char *name,
+ struct lock_class_key *key);
+#else
+static inline void __rt_rwlock_init(rwlock_t *rwlock, char *name,
+ struct lock_class_key *key)
+{
+}
+#endif
+
+#define rwlock_init(rwl) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ init_rwbase_rt(&(rwl)->rwbase); \
+ __rt_rwlock_init(rwl, #rwl, &__key); \
+} while (0)
+
+extern void rt_read_lock(rwlock_t *rwlock) __acquires(rwlock);
+extern int rt_read_trylock(rwlock_t *rwlock);
+extern void rt_read_unlock(rwlock_t *rwlock) __releases(rwlock);
+extern void rt_write_lock(rwlock_t *rwlock) __acquires(rwlock);
+extern void rt_write_lock_nested(rwlock_t *rwlock, int subclass) __acquires(rwlock);
+extern int rt_write_trylock(rwlock_t *rwlock);
+extern void rt_write_unlock(rwlock_t *rwlock) __releases(rwlock);
+
+static __always_inline void read_lock(rwlock_t *rwlock)
+{
+ rt_read_lock(rwlock);
+}
+
+static __always_inline void read_lock_bh(rwlock_t *rwlock)
+{
+ local_bh_disable();
+ rt_read_lock(rwlock);
+}
+
+static __always_inline void read_lock_irq(rwlock_t *rwlock)
+{
+ rt_read_lock(rwlock);
+}
+
+#define read_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ rt_read_lock(lock); \
+ flags = 0; \
+ } while (0)
+
+#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock))
+
+static __always_inline void read_unlock(rwlock_t *rwlock)
+{
+ rt_read_unlock(rwlock);
+}
+
+static __always_inline void read_unlock_bh(rwlock_t *rwlock)
+{
+ rt_read_unlock(rwlock);
+ local_bh_enable();
+}
+
+static __always_inline void read_unlock_irq(rwlock_t *rwlock)
+{
+ rt_read_unlock(rwlock);
+}
+
+static __always_inline void read_unlock_irqrestore(rwlock_t *rwlock,
+ unsigned long flags)
+{
+ rt_read_unlock(rwlock);
+}
+
+static __always_inline void write_lock(rwlock_t *rwlock)
+{
+ rt_write_lock(rwlock);
+}
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static __always_inline void write_lock_nested(rwlock_t *rwlock, int subclass)
+{
+ rt_write_lock_nested(rwlock, subclass);
+}
+#else
+#define write_lock_nested(lock, subclass) rt_write_lock(((void)(subclass), (lock)))
+#endif
+
+static __always_inline void write_lock_bh(rwlock_t *rwlock)
+{
+ local_bh_disable();
+ rt_write_lock(rwlock);
+}
+
+static __always_inline void write_lock_irq(rwlock_t *rwlock)
+{
+ rt_write_lock(rwlock);
+}
+
+#define write_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ rt_write_lock(lock); \
+ flags = 0; \
+ } while (0)
+
+#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock))
+
+#define write_trylock_irqsave(lock, flags) \
+({ \
+ int __locked; \
+ \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
+ __locked = write_trylock(lock); \
+ __locked; \
+})
+
+static __always_inline void write_unlock(rwlock_t *rwlock)
+{
+ rt_write_unlock(rwlock);
+}
+
+static __always_inline void write_unlock_bh(rwlock_t *rwlock)
+{
+ rt_write_unlock(rwlock);
+ local_bh_enable();
+}
+
+static __always_inline void write_unlock_irq(rwlock_t *rwlock)
+{
+ rt_write_unlock(rwlock);
+}
+
+static __always_inline void write_unlock_irqrestore(rwlock_t *rwlock,
+ unsigned long flags)
+{
+ rt_write_unlock(rwlock);
+}
+
+#define rwlock_is_contended(lock) (((void)(lock), 0))
+
+#endif /* __LINUX_RWLOCK_RT_H */
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
index 3bd03e18061c..1948442e7750 100644
--- a/include/linux/rwlock_types.h
+++ b/include/linux/rwlock_types.h
@@ -1,9 +1,23 @@
#ifndef __LINUX_RWLOCK_TYPES_H
#define __LINUX_RWLOCK_TYPES_H
+#if !defined(__LINUX_SPINLOCK_TYPES_H)
+# error "Do not include directly, include spinlock_types.h"
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define RW_DEP_MAP_INIT(lockname) \
+ .dep_map = { \
+ .name = #lockname, \
+ .wait_type_inner = LD_WAIT_CONFIG, \
+ }
+#else
+# define RW_DEP_MAP_INIT(lockname)
+#endif
+
+#ifndef CONFIG_PREEMPT_RT
/*
- * include/linux/rwlock_types.h - generic rwlock type definitions
- * and initializers
+ * generic rwlock type definitions and initializers
*
* portions Copyright 2005, Red Hat, Inc., Ingo Molnar
* Released under the General Public License (GPL).
@@ -21,16 +35,6 @@ typedef struct {
#define RWLOCK_MAGIC 0xdeaf1eed
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define RW_DEP_MAP_INIT(lockname) \
- .dep_map = { \
- .name = #lockname, \
- .wait_type_inner = LD_WAIT_CONFIG, \
- }
-#else
-# define RW_DEP_MAP_INIT(lockname)
-#endif
-
#ifdef CONFIG_DEBUG_SPINLOCK
#define __RW_LOCK_UNLOCKED(lockname) \
(rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \
@@ -46,4 +50,29 @@ typedef struct {
#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
+#else /* !CONFIG_PREEMPT_RT */
+
+#include <linux/rwbase_rt.h>
+
+typedef struct {
+ struct rwbase_rt rwbase;
+ atomic_t readers;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+} rwlock_t;
+
+#define __RWLOCK_RT_INITIALIZER(name) \
+{ \
+ .rwbase = __RWBASE_INITIALIZER(name), \
+ RW_DEP_MAP_INIT(name) \
+}
+
+#define __RW_LOCK_UNLOCKED(name) __RWLOCK_RT_INITIALIZER(name)
+
+#define DEFINE_RWLOCK(name) \
+ rwlock_t name = __RW_LOCK_UNLOCKED(name)
+
+#endif /* CONFIG_PREEMPT_RT */
+
#endif /* __LINUX_RWLOCK_TYPES_H */
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index a66038d88878..f1aaf676a874 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -11,11 +11,24 @@
#include <linux/linkage.h>
#include <linux/types.h>
-#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/atomic.h>
#include <linux/err.h>
+#include <linux/cleanup.h>
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __RWSEM_DEP_MAP_INIT(lockname) \
+ .dep_map = { \
+ .name = #lockname, \
+ .wait_type_inner = LD_WAIT_SLEEP, \
+ },
+#else
+# define __RWSEM_DEP_MAP_INIT(lockname)
+#endif
+
+#ifndef CONFIG_PREEMPT_RT
+
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
#include <linux/osq_lock.h>
#endif
@@ -53,26 +66,26 @@ struct rw_semaphore {
#endif
};
-/* In all implementations count != 0 means locked */
+#define RWSEM_UNLOCKED_VALUE 0UL
+#define RWSEM_WRITER_LOCKED (1UL << 0)
+#define __RWSEM_COUNT_INIT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
+
static inline int rwsem_is_locked(struct rw_semaphore *sem)
{
- return atomic_long_read(&sem->count) != 0;
+ return atomic_long_read(&sem->count) != RWSEM_UNLOCKED_VALUE;
}
-#define RWSEM_UNLOCKED_VALUE 0L
-#define __RWSEM_COUNT_INIT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
+static inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem)
+{
+ WARN_ON(atomic_long_read(&sem->count) == RWSEM_UNLOCKED_VALUE);
+}
-/* Common initializer macros and functions */
+static inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem)
+{
+ WARN_ON(!(atomic_long_read(&sem->count) & RWSEM_WRITER_LOCKED));
+}
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __RWSEM_DEP_MAP_INIT(lockname) \
- .dep_map = { \
- .name = #lockname, \
- .wait_type_inner = LD_WAIT_SLEEP, \
- },
-#else
-# define __RWSEM_DEP_MAP_INIT(lockname)
-#endif
+/* Common initializer macros and functions */
#ifdef CONFIG_DEBUG_RWSEMS
# define __RWSEM_DEBUG_INIT(lockname) .magic = &lockname,
@@ -119,6 +132,91 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem)
return !list_empty(&sem->wait_list);
}
+#if defined(CONFIG_DEBUG_RWSEMS) || defined(CONFIG_DETECT_HUNG_TASK_BLOCKER)
+/*
+ * Return just the real task structure pointer of the owner
+ */
+extern struct task_struct *rwsem_owner(struct rw_semaphore *sem);
+
+/*
+ * Return true if the rwsem is owned by a reader.
+ */
+extern bool is_rwsem_reader_owned(struct rw_semaphore *sem);
+#endif
+
+#else /* !CONFIG_PREEMPT_RT */
+
+#include <linux/rwbase_rt.h>
+
+struct rw_semaphore {
+ struct rwbase_rt rwbase;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
+#define __RWSEM_INITIALIZER(name) \
+ { \
+ .rwbase = __RWBASE_INITIALIZER(name), \
+ __RWSEM_DEP_MAP_INIT(name) \
+ }
+
+#define DECLARE_RWSEM(lockname) \
+ struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
+
+extern void __init_rwsem(struct rw_semaphore *rwsem, const char *name,
+ struct lock_class_key *key);
+
+#define init_rwsem(sem) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __init_rwsem((sem), #sem, &__key); \
+} while (0)
+
+static __always_inline int rwsem_is_locked(const struct rw_semaphore *sem)
+{
+ return rw_base_is_locked(&sem->rwbase);
+}
+
+static __always_inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem)
+{
+ WARN_ON(!rwsem_is_locked(sem));
+}
+
+static __always_inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem)
+{
+ WARN_ON(!rw_base_is_write_locked(&sem->rwbase));
+}
+
+static __always_inline int rwsem_is_contended(struct rw_semaphore *sem)
+{
+ return rw_base_is_contended(&sem->rwbase);
+}
+
+#endif /* CONFIG_PREEMPT_RT */
+
+/*
+ * The functions below are the same for all rwsem implementations including
+ * the RT specific variant.
+ */
+
+static inline void rwsem_assert_held(const struct rw_semaphore *sem)
+{
+ if (IS_ENABLED(CONFIG_LOCKDEP))
+ lockdep_assert_held(sem);
+ else
+ rwsem_assert_held_nolockdep(sem);
+}
+
+static inline void rwsem_assert_held_write(const struct rw_semaphore *sem)
+{
+ if (IS_ENABLED(CONFIG_LOCKDEP))
+ lockdep_assert_held_write(sem);
+ else
+ rwsem_assert_held_write_nolockdep(sem);
+}
+
/*
* lock for reading
*/
@@ -152,6 +250,14 @@ extern void up_read(struct rw_semaphore *sem);
*/
extern void up_write(struct rw_semaphore *sem);
+DEFINE_GUARD(rwsem_read, struct rw_semaphore *, down_read(_T), up_read(_T))
+DEFINE_GUARD_COND(rwsem_read, _try, down_read_trylock(_T))
+DEFINE_GUARD_COND(rwsem_read, _intr, down_read_interruptible(_T), _RET == 0)
+
+DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
+DEFINE_GUARD_COND(rwsem_write, _try, down_write_trylock(_T))
+DEFINE_GUARD_COND(rwsem_write, _kill, down_write_killable(_T), _RET == 0)
+
/*
* downgrade write lock to read lock
*/
@@ -181,7 +287,7 @@ extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *
do { \
typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
_down_write_nest_lock(sem, &(nest_lock)->dep_map); \
-} while (0);
+} while (0)
/*
* Take/release a lock when not the owner will release it.
diff --git a/include/linux/s3c_adc_battery.h b/include/linux/s3c_adc_battery.h
deleted file mode 100644
index 57f982c375f8..000000000000
--- a/include/linux/s3c_adc_battery.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _S3C_ADC_BATTERY_H
-#define _S3C_ADC_BATTERY_H
-
-struct s3c_adc_bat_thresh {
- int volt; /* mV */
- int cur; /* mA */
- int level; /* percent */
-};
-
-struct s3c_adc_bat_pdata {
- int (*init)(void);
- void (*exit)(void);
- void (*enable_charger)(void);
- void (*disable_charger)(void);
-
- const struct s3c_adc_bat_thresh *lut_noac;
- unsigned int lut_noac_cnt;
- const struct s3c_adc_bat_thresh *lut_acin;
- unsigned int lut_acin_cnt;
-
- const unsigned int volt_channel;
- const unsigned int current_channel;
- const unsigned int backup_volt_channel;
-
- const unsigned int volt_samples;
- const unsigned int current_samples;
- const unsigned int backup_volt_samples;
-
- const unsigned int volt_mult;
- const unsigned int current_mult;
- const unsigned int backup_volt_mult;
- const unsigned int internal_impedance;
-
- const unsigned int backup_volt_max;
- const unsigned int backup_volt_min;
-};
-
-#endif
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index 2713e689ad66..cc7ad189caa5 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -9,8 +9,17 @@
#ifndef __LINUX_SCALE_BITMAP_H
#define __LINUX_SCALE_BITMAP_H
-#include <linux/kernel.h>
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/cache.h>
+#include <linux/list.h>
+#include <linux/log2.h>
+#include <linux/minmax.h>
+#include <linux/percpu.h>
#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+#include <linux/wait.h>
struct seq_file;
@@ -19,19 +28,19 @@ struct seq_file;
*/
struct sbitmap_word {
/**
- * @depth: Number of bits being used in @word/@cleared
- */
- unsigned long depth;
-
- /**
* @word: word holding free bits
*/
- unsigned long word ____cacheline_aligned_in_smp;
+ unsigned long word;
/**
* @cleared: word holding cleared bits
*/
unsigned long cleared ____cacheline_aligned_in_smp;
+
+ /**
+ * @swap_lock: serializes simultaneous updates of ->word and ->cleared
+ */
+ raw_spinlock_t swap_lock;
} ____cacheline_aligned_in_smp;
/**
@@ -66,7 +75,7 @@ struct sbitmap {
*/
struct sbitmap_word *map;
- /*
+ /**
* @alloc_hint: Cache of last successfully allocated or freed bit.
*
* This is per-cpu, which allows multiple users to stick to different
@@ -83,11 +92,6 @@ struct sbitmap {
*/
struct sbq_wait_state {
/**
- * @wait_cnt: Number of frees remaining before we wake up.
- */
- atomic_t wait_cnt;
-
- /**
* @wait: Wait queue.
*/
wait_queue_head_t wait;
@@ -124,16 +128,27 @@ struct sbitmap_queue {
*/
struct sbq_wait_state *ws;
- /*
+ /**
* @ws_active: count of currently active ws waitqueues
*/
atomic_t ws_active;
/**
* @min_shallow_depth: The minimum shallow depth which may be passed to
- * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow().
+ * sbitmap_queue_get_shallow()
*/
unsigned int min_shallow_depth;
+
+ /**
+ * @completion_cnt: Number of bits cleared passed to the
+ * wakeup function.
+ */
+ atomic_t completion_cnt;
+
+ /**
+ * @wakeup_cnt: Number of thread wake ups issued.
+ */
+ atomic_t wakeup_cnt;
};
/**
@@ -155,6 +170,14 @@ struct sbitmap_queue {
int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
gfp_t flags, int node, bool round_robin, bool alloc_hint);
+/* sbitmap internal helper */
+static inline unsigned int __map_depth(const struct sbitmap *sb, int index)
+{
+ if (index == sb->map_nr - 1)
+ return sb->depth - (index << sb->shift);
+ return 1U << sb->shift;
+}
+
/**
* sbitmap_free() - Free memory used by a &struct sbitmap.
* @sb: Bitmap to free.
@@ -162,7 +185,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
static inline void sbitmap_free(struct sbitmap *sb)
{
free_percpu(sb->alloc_hint);
- kfree(sb->map);
+ kvfree(sb->map);
sb->map = NULL;
}
@@ -187,23 +210,6 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth);
int sbitmap_get(struct sbitmap *sb);
/**
- * sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap,
- * limiting the depth used from each word.
- * @sb: Bitmap to allocate from.
- * @shallow_depth: The maximum number of bits to allocate from a single word.
- *
- * This rather specific operation allows for having multiple users with
- * different allocation limits. E.g., there can be a high-priority class that
- * uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow()
- * with a @shallow_depth of (1 << (@sb->shift - 1)). Then, the low-priority
- * class can only allocate half of the total bits in the bitmap, preventing it
- * from starving out the high-priority class.
- *
- * Return: Non-negative allocated bit number if successful, -1 otherwise.
- */
-int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth);
-
-/**
* sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap.
* @sb: Bitmap to check.
*
@@ -242,7 +248,7 @@ static inline void __sbitmap_for_each_set(struct sbitmap *sb,
while (scanned < sb->depth) {
unsigned long word;
unsigned int depth = min_t(unsigned int,
- sb->map[index].depth - nr,
+ __map_depth(sb, index) - nr,
sb->depth - scanned);
scanned += depth;
@@ -407,6 +413,17 @@ static inline void sbitmap_queue_free(struct sbitmap_queue *sbq)
}
/**
+ * sbitmap_queue_recalculate_wake_batch() - Recalculate wake batch
+ * @sbq: Bitmap queue to recalculate wake batch.
+ * @users: Number of shares.
+ *
+ * Like sbitmap_queue_update_wake_batch(), this will calculate wake batch
+ * by depth. This interface is for HCTX shared tags or queue shared tags.
+ */
+void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq,
+ unsigned int users);
+
+/**
* sbitmap_queue_resize() - Resize a &struct sbitmap_queue.
* @sbq: Bitmap queue to resize.
* @depth: New number of bits to resize to.
@@ -427,11 +444,24 @@ void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth);
int __sbitmap_queue_get(struct sbitmap_queue *sbq);
/**
- * __sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
+ * __sbitmap_queue_get_batch() - Try to allocate a batch of free bits
+ * @sbq: Bitmap queue to allocate from.
+ * @nr_tags: number of tags requested
+ * @offset: offset to add to returned bits
+ *
+ * Return: Mask of allocated tags, 0 if none are found. Each tag allocated is
+ * a bit in the mask returned, and the caller must add @offset to the value to
+ * get the absolute tag value.
+ */
+unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
+ unsigned int *offset);
+
+/**
+ * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
* sbitmap_queue, limiting the depth used from each word, with preemption
* already disabled.
* @sbq: Bitmap queue to allocate from.
- * @shallow_depth: The maximum number of bits to allocate from a single word.
+ * @shallow_depth: The maximum number of bits to allocate from the queue.
* See sbitmap_get_shallow().
*
* If you call this, make sure to call sbitmap_queue_min_shallow_depth() after
@@ -439,8 +469,8 @@ int __sbitmap_queue_get(struct sbitmap_queue *sbq);
*
* Return: Non-negative allocated bit number if successful, -1 otherwise.
*/
-int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
- unsigned int shallow_depth);
+int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
+ unsigned int shallow_depth);
/**
* sbitmap_queue_get() - Try to allocate a free bit from a &struct
@@ -463,32 +493,6 @@ static inline int sbitmap_queue_get(struct sbitmap_queue *sbq,
}
/**
- * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
- * sbitmap_queue, limiting the depth used from each word.
- * @sbq: Bitmap queue to allocate from.
- * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
- * sbitmap_queue_clear()).
- * @shallow_depth: The maximum number of bits to allocate from a single word.
- * See sbitmap_get_shallow().
- *
- * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after
- * initializing @sbq.
- *
- * Return: Non-negative allocated bit number if successful, -1 otherwise.
- */
-static inline int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
- unsigned int *cpu,
- unsigned int shallow_depth)
-{
- int nr;
-
- *cpu = get_cpu();
- nr = __sbitmap_queue_get_shallow(sbq, shallow_depth);
- put_cpu();
- return nr;
-}
-
-/**
* sbitmap_queue_min_shallow_depth() - Inform a &struct sbitmap_queue of the
* minimum shallow depth that will be used.
* @sbq: Bitmap queue in question.
@@ -515,6 +519,17 @@ void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
unsigned int cpu);
+/**
+ * sbitmap_queue_clear_batch() - Free a batch of allocated bits
+ * &struct sbitmap_queue.
+ * @sbq: Bitmap to free from.
+ * @offset: offset for each tag in array
+ * @tags: array of tags
+ * @nr_tags: number of tags in array
+ */
+void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
+ int *tags, int nr_tags);
+
static inline int sbq_index_inc(int index)
{
return (index + 1) & (SBQ_WAIT_QUEUES - 1);
@@ -532,6 +547,8 @@ static inline void sbq_index_atomic_inc(atomic_t *index)
* sbitmap_queue.
* @sbq: Bitmap queue to wait on.
* @wait_index: A counter per "user" of @sbq.
+ *
+ * Return: Next wait queue to be used
*/
static inline struct sbq_wait_state *sbq_wait_ptr(struct sbitmap_queue *sbq,
atomic_t *wait_index)
@@ -554,8 +571,9 @@ void sbitmap_queue_wake_all(struct sbitmap_queue *sbq);
* sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue
* on a &struct sbitmap_queue.
* @sbq: Bitmap queue to wake up.
+ * @nr: Number of bits cleared.
*/
-void sbitmap_queue_wake_up(struct sbitmap_queue *sbq);
+void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr);
/**
* sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 6f70572b2938..29f6ceb98d74 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -16,6 +16,9 @@ struct scatterlist {
#ifdef CONFIG_NEED_SG_DMA_LENGTH
unsigned int dma_length;
#endif
+#ifdef CONFIG_NEED_SG_DMA_FLAGS
+ unsigned int dma_flags;
+#endif
};
/*
@@ -39,6 +42,12 @@ struct sg_table {
unsigned int orig_nents; /* original size of list */
};
+struct sg_append_table {
+ struct sg_table sgt; /* The scatter list table */
+ struct scatterlist *prv; /* last populated sge in the table */
+ unsigned int total_nents; /* Total entries in the table */
+};
+
/*
* Notes on SG table design.
*
@@ -63,10 +72,49 @@ struct sg_table {
* a valid sg entry, or whether it points to the start of a new scatterlist.
* Those low bits are there for everyone! (thanks mason :-)
*/
-#define sg_is_chain(sg) ((sg)->page_link & SG_CHAIN)
-#define sg_is_last(sg) ((sg)->page_link & SG_END)
-#define sg_chain_ptr(sg) \
- ((struct scatterlist *) ((sg)->page_link & ~(SG_CHAIN | SG_END)))
+#define SG_PAGE_LINK_MASK (SG_CHAIN | SG_END)
+
+static inline unsigned int __sg_flags(struct scatterlist *sg)
+{
+ return sg->page_link & SG_PAGE_LINK_MASK;
+}
+
+static inline struct scatterlist *sg_chain_ptr(struct scatterlist *sg)
+{
+ return (struct scatterlist *)(sg->page_link & ~SG_PAGE_LINK_MASK);
+}
+
+static inline bool sg_is_chain(struct scatterlist *sg)
+{
+ return __sg_flags(sg) & SG_CHAIN;
+}
+
+static inline bool sg_is_last(struct scatterlist *sg)
+{
+ return __sg_flags(sg) & SG_END;
+}
+
+/**
+ * sg_next - return the next scatterlist entry in a list
+ * @sg: The current sg entry
+ *
+ * Description:
+ * Usually the next entry will be @sg + 1, but if this sg element is part
+ * of a chained scatterlist, it could jump to the start of a new
+ * scatterlist array.
+ *
+ **/
+static inline struct scatterlist *sg_next(struct scatterlist *sg)
+{
+ if (sg_is_last(sg))
+ return NULL;
+
+ sg++;
+ if (unlikely(sg_is_chain(sg)))
+ sg = sg_chain_ptr(sg);
+
+ return sg;
+}
/**
* sg_assign_page - Assign a given page to an SG entry
@@ -86,7 +134,7 @@ static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
* In order for the low bit stealing approach to work, pages
* must be aligned at a 32-bit boundary as a minimum.
*/
- BUG_ON((unsigned long) page & (SG_CHAIN | SG_END));
+ BUG_ON((unsigned long)page & SG_PAGE_LINK_MASK);
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg_is_chain(sg));
#endif
@@ -110,17 +158,42 @@ static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
static inline void sg_set_page(struct scatterlist *sg, struct page *page,
unsigned int len, unsigned int offset)
{
+ VM_WARN_ON_ONCE(!page_range_contiguous(page, ALIGN(len + offset, PAGE_SIZE) / PAGE_SIZE));
sg_assign_page(sg, page);
sg->offset = offset;
sg->length = len;
}
+/**
+ * sg_set_folio - Set sg entry to point at given folio
+ * @sg: SG entry
+ * @folio: The folio
+ * @len: Length of data
+ * @offset: Offset into folio
+ *
+ * Description:
+ * Use this function to set an sg entry pointing at a folio, never assign
+ * the folio directly. We encode sg table information in the lower bits
+ * of the folio pointer. See sg_page() for looking up the page belonging
+ * to an sg entry.
+ *
+ **/
+static inline void sg_set_folio(struct scatterlist *sg, struct folio *folio,
+ size_t len, size_t offset)
+{
+ WARN_ON_ONCE(len > UINT_MAX);
+ WARN_ON_ONCE(offset > UINT_MAX);
+ sg_assign_page(sg, &folio->page);
+ sg->offset = offset;
+ sg->length = len;
+}
+
static inline struct page *sg_page(struct scatterlist *sg)
{
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg_is_chain(sg));
#endif
- return (struct page *)((sg)->page_link & ~(SG_CHAIN | SG_END));
+ return (struct page *)((sg)->page_link & ~SG_PAGE_LINK_MASK);
}
/**
@@ -182,7 +255,7 @@ static inline void __sg_chain(struct scatterlist *chain_sg,
* @sgl: Second scatterlist
*
* Description:
- * Links @prv@ and @sgl@ together, to form a longer scatterlist.
+ * Links @prv and @sgl together, to form a longer scatterlist.
*
**/
static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
@@ -222,6 +295,108 @@ static inline void sg_unmark_end(struct scatterlist *sg)
sg->page_link &= ~SG_END;
}
+/*
+ * On 64-bit architectures there is a 4-byte padding in struct scatterlist
+ * (assuming also CONFIG_NEED_SG_DMA_LENGTH is set). Use this padding for DMA
+ * flags bits to indicate when a specific dma address is a bus address or the
+ * buffer may have been bounced via SWIOTLB.
+ */
+#ifdef CONFIG_NEED_SG_DMA_FLAGS
+
+#define SG_DMA_BUS_ADDRESS (1 << 0)
+#define SG_DMA_SWIOTLB (1 << 1)
+
+/**
+ * sg_dma_is_bus_address - Return whether a given segment was marked
+ * as a bus address
+ * @sg: SG entry
+ *
+ * Description:
+ * Returns true if sg_dma_mark_bus_address() has been called on
+ * this segment.
+ **/
+static inline bool sg_dma_is_bus_address(struct scatterlist *sg)
+{
+ return sg->dma_flags & SG_DMA_BUS_ADDRESS;
+}
+
+/**
+ * sg_dma_mark_bus_address - Mark the scatterlist entry as a bus address
+ * @sg: SG entry
+ *
+ * Description:
+ * Marks the passed in sg entry to indicate that the dma_address is
+ * a bus address and doesn't need to be unmapped. This should only be
+ * used by dma_map_sg() implementations to mark bus addresses
+ * so they can be properly cleaned up in dma_unmap_sg().
+ **/
+static inline void sg_dma_mark_bus_address(struct scatterlist *sg)
+{
+ sg->dma_flags |= SG_DMA_BUS_ADDRESS;
+}
+
+/**
+ * sg_dma_unmark_bus_address - Unmark the scatterlist entry as a bus address
+ * @sg: SG entry
+ *
+ * Description:
+ * Clears the bus address mark.
+ **/
+static inline void sg_dma_unmark_bus_address(struct scatterlist *sg)
+{
+ sg->dma_flags &= ~SG_DMA_BUS_ADDRESS;
+}
+
+/**
+ * sg_dma_is_swiotlb - Return whether the scatterlist was marked for SWIOTLB
+ * bouncing
+ * @sg: SG entry
+ *
+ * Description:
+ * Returns true if the scatterlist was marked for SWIOTLB bouncing. Not all
+ * elements may have been bounced, so the caller would have to check
+ * individual SG entries with swiotlb_find_pool().
+ */
+static inline bool sg_dma_is_swiotlb(struct scatterlist *sg)
+{
+ return sg->dma_flags & SG_DMA_SWIOTLB;
+}
+
+/**
+ * sg_dma_mark_swiotlb - Mark the scatterlist for SWIOTLB bouncing
+ * @sg: SG entry
+ *
+ * Description:
+ * Marks a a scatterlist for SWIOTLB bounce. Not all SG entries may be
+ * bounced.
+ */
+static inline void sg_dma_mark_swiotlb(struct scatterlist *sg)
+{
+ sg->dma_flags |= SG_DMA_SWIOTLB;
+}
+
+#else
+
+static inline bool sg_dma_is_bus_address(struct scatterlist *sg)
+{
+ return false;
+}
+static inline void sg_dma_mark_bus_address(struct scatterlist *sg)
+{
+}
+static inline void sg_dma_unmark_bus_address(struct scatterlist *sg)
+{
+}
+static inline bool sg_dma_is_swiotlb(struct scatterlist *sg)
+{
+ return false;
+}
+static inline void sg_dma_mark_swiotlb(struct scatterlist *sg)
+{
+}
+
+#endif /* CONFIG_NEED_SG_DMA_FLAGS */
+
/**
* sg_phys - Return physical address of an sg entry
* @sg: SG entry
@@ -266,7 +441,6 @@ static inline void sg_init_marker(struct scatterlist *sgl,
int sg_nents(struct scatterlist *sg);
int sg_nents_for_len(struct scatterlist *sg, u64 len);
-struct scatterlist *sg_next(struct scatterlist *);
struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
void sg_init_table(struct scatterlist *, unsigned int);
void sg_init_one(struct scatterlist *, const void *, unsigned int);
@@ -280,19 +454,51 @@ typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
void __sg_free_table(struct sg_table *, unsigned int, unsigned int,
- sg_free_fn *);
+ sg_free_fn *, unsigned int);
void sg_free_table(struct sg_table *);
+void sg_free_append_table(struct sg_append_table *sgt);
int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int,
struct scatterlist *, unsigned int, gfp_t, sg_alloc_fn *);
int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
-struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt,
- struct page **pages, unsigned int n_pages, unsigned int offset,
- unsigned long size, unsigned int max_segment,
- struct scatterlist *prv, unsigned int left_pages,
- gfp_t gfp_mask);
-int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
- unsigned int n_pages, unsigned int offset,
- unsigned long size, gfp_t gfp_mask);
+int sg_alloc_append_table_from_pages(struct sg_append_table *sgt,
+ struct page **pages, unsigned int n_pages,
+ unsigned int offset, unsigned long size,
+ unsigned int max_segment,
+ unsigned int left_pages, gfp_t gfp_mask);
+int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages,
+ unsigned int n_pages, unsigned int offset,
+ unsigned long size,
+ unsigned int max_segment, gfp_t gfp_mask);
+
+/**
+ * sg_alloc_table_from_pages - Allocate and initialize an sg table from
+ * an array of pages
+ * @sgt: The sg table header to use
+ * @pages: Pointer to an array of page pointers
+ * @n_pages: Number of pages in the pages array
+ * @offset: Offset from start of the first page to the start of a buffer
+ * @size: Number of valid bytes in the buffer (after offset)
+ * @gfp_mask: GFP allocation mask
+ *
+ * Description:
+ * Allocate and initialize an sg table from a list of pages. Contiguous
+ * ranges of the pages are squashed into a single scatterlist node. A user
+ * may provide an offset at a start and a size of valid data in a buffer
+ * specified by the page array. The returned sg table is released by
+ * sg_free_table.
+ *
+ * Returns:
+ * 0 on success, negative error on failure
+ */
+static inline int sg_alloc_table_from_pages(struct sg_table *sgt,
+ struct page **pages,
+ unsigned int n_pages,
+ unsigned int offset,
+ unsigned long size, gfp_t gfp_mask)
+{
+ return sg_alloc_table_from_pages_segment(sgt, pages, n_pages, offset,
+ size, UINT_MAX, gfp_mask);
+}
#ifdef CONFIG_SGL_ALLOC
struct scatterlist *sgl_alloc_order(unsigned long long length,
@@ -395,7 +601,7 @@ void __sg_page_iter_start(struct sg_page_iter *piter,
*/
static inline struct page *sg_page_iter_page(struct sg_page_iter *piter)
{
- return nth_page(sg_page(piter->sg), piter->sg_pgoffset);
+ return sg_page(piter->sg) + piter->sg_pgoffset;
}
/**
@@ -474,7 +680,7 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter)
* Iterates over sg entries mapping page-by-page. On each successful
* iteration, @miter->page points to the mapped page and
* @miter->length bytes of data can be accessed at @miter->addr. As
- * long as an interation is enclosed between start and stop, the user
+ * long as an iteration is enclosed between start and stop, the user
* is free to choose control structure and when to stop.
*
* @miter->consumed is set to @miter->length on each iteration. It
@@ -487,6 +693,7 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter)
#define SG_MITER_ATOMIC (1 << 0) /* use kmap_atomic */
#define SG_MITER_TO_SG (1 << 1) /* flush back to phys on unmap */
#define SG_MITER_FROM_SG (1 << 2) /* nop */
+#define SG_MITER_LOCAL (1 << 3) /* use kmap_local */
struct sg_mapping_iter {
/* the following three fields can be accessed directly */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d2c881384517..d395f2810fac 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -10,38 +10,56 @@
#include <uapi/linux/sched.h>
#include <asm/current.h>
-
-#include <linux/pid.h>
-#include <linux/sem.h>
+#include <asm/processor.h>
+#include <linux/thread_info.h>
+#include <linux/preempt.h>
+#include <linux/cpumask_types.h>
+
+#include <linux/cache.h>
+#include <linux/irqflags_types.h>
+#include <linux/smp_types.h>
+#include <linux/pid_types.h>
+#include <linux/sem_types.h>
#include <linux/shm.h>
-#include <linux/mutex.h>
-#include <linux/plist.h>
-#include <linux/hrtimer.h>
-#include <linux/irqflags.h>
-#include <linux/seccomp.h>
-#include <linux/nodemask.h>
-#include <linux/rcupdate.h>
-#include <linux/refcount.h>
+#include <linux/kmsan_types.h>
+#include <linux/mutex_types.h>
+#include <linux/plist_types.h>
+#include <linux/hrtimer_types.h>
+#include <linux/timer_types.h>
+#include <linux/seccomp_types.h>
+#include <linux/nodemask_types.h>
+#include <linux/refcount_types.h>
#include <linux/resource.h>
#include <linux/latencytop.h>
#include <linux/sched/prio.h>
#include <linux/sched/types.h>
#include <linux/signal_types.h>
-#include <linux/syscall_user_dispatch.h>
+#include <linux/spinlock.h>
+#include <linux/syscall_user_dispatch_types.h>
#include <linux/mm_types_task.h>
+#include <linux/netdevice_xmit.h>
#include <linux/task_io_accounting.h>
-#include <linux/posix-timers.h>
-#include <linux/rseq.h>
-#include <linux/seqlock.h>
+#include <linux/posix-timers_types.h>
+#include <linux/restart_block.h>
+#include <linux/rseq_types.h>
+#include <linux/seqlock_types.h>
#include <linux/kcsan.h>
+#include <linux/rv.h>
+#include <linux/uidgid_types.h>
+#include <linux/tracepoint-defs.h>
+#include <linux/unwind_deferred_types.h>
#include <asm/kmap_size.h>
+#ifndef COMPILE_OFFSETS
+#include <generated/rq-offsets.h>
+#endif
/* task_struct member predeclarations (sorted alphabetically): */
struct audit_context;
-struct backing_dev_info;
struct bio_list;
struct blk_plug;
struct bpf_local_storage;
+struct bpf_run_ctx;
+struct bpf_net_context;
struct capture_control;
struct cfs_rq;
struct fs_struct;
@@ -52,6 +70,7 @@ struct mempolicy;
struct nameidata;
struct nsproxy;
struct perf_event_context;
+struct perf_ctx_data;
struct pid_namespace;
struct pipe_inode_info;
struct rcu_node;
@@ -60,47 +79,62 @@ struct robust_list_head;
struct root_domain;
struct rq;
struct sched_attr;
-struct sched_param;
+struct sched_dl_entity;
struct seq_file;
struct sighand_struct;
struct signal_struct;
struct task_delay_info;
struct task_group;
+struct task_struct;
+struct user_event_mm;
+
+#include <linux/sched/ext.h>
/*
* Task state bitmask. NOTE! These bits are also
* encoded in fs/proc/array.c: get_task_state().
*
- * We have two separate sets of flags: task->state
+ * We have two separate sets of flags: task->__state
* is about runnability, while task->exit_state are
* about the task exiting. Confusing, but this way
* modifying one set can't modify the other one by
* mistake.
*/
-/* Used in tsk->state: */
-#define TASK_RUNNING 0x0000
-#define TASK_INTERRUPTIBLE 0x0001
-#define TASK_UNINTERRUPTIBLE 0x0002
-#define __TASK_STOPPED 0x0004
-#define __TASK_TRACED 0x0008
+/* Used in tsk->__state: */
+#define TASK_RUNNING 0x00000000
+#define TASK_INTERRUPTIBLE 0x00000001
+#define TASK_UNINTERRUPTIBLE 0x00000002
+#define __TASK_STOPPED 0x00000004
+#define __TASK_TRACED 0x00000008
/* Used in tsk->exit_state: */
-#define EXIT_DEAD 0x0010
-#define EXIT_ZOMBIE 0x0020
+#define EXIT_DEAD 0x00000010
+#define EXIT_ZOMBIE 0x00000020
#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
-/* Used in tsk->state again: */
-#define TASK_PARKED 0x0040
-#define TASK_DEAD 0x0080
-#define TASK_WAKEKILL 0x0100
-#define TASK_WAKING 0x0200
-#define TASK_NOLOAD 0x0400
-#define TASK_NEW 0x0800
-#define TASK_STATE_MAX 0x1000
+/* Used in tsk->__state again: */
+#define TASK_PARKED 0x00000040
+#define TASK_DEAD 0x00000080
+#define TASK_WAKEKILL 0x00000100
+#define TASK_WAKING 0x00000200
+#define TASK_NOLOAD 0x00000400
+#define TASK_NEW 0x00000800
+#define TASK_RTLOCK_WAIT 0x00001000
+#define TASK_FREEZABLE 0x00002000
+#define __TASK_FREEZABLE_UNSAFE (0x00004000 * IS_ENABLED(CONFIG_LOCKDEP))
+#define TASK_FROZEN 0x00008000
+#define TASK_STATE_MAX 0x00010000
+
+#define TASK_ANY (TASK_STATE_MAX-1)
+
+/*
+ * DO NOT ADD ANY NEW USERS !
+ */
+#define TASK_FREEZABLE_UNSAFE (TASK_FREEZABLE | __TASK_FREEZABLE_UNSAFE)
/* Convenience macros for the sake of set_current_state: */
#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
-#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
+#define TASK_TRACED __TASK_TRACED
#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
@@ -113,47 +147,59 @@ struct task_group;
__TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
TASK_PARKED)
-#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
-
-#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
+#define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING)
-#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
-
-#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+#define task_is_traced(task) ((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0)
+#define task_is_stopped(task) ((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0)
+#define task_is_stopped_or_traced(task) ((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED)) != 0)
/*
* Special states are those that do not use the normal wait-loop pattern. See
* the comment with set_special_state().
*/
-#define is_special_task_state(state) \
- ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
-
-#define __set_current_state(state_value) \
- do { \
- WARN_ON_ONCE(is_special_task_state(state_value));\
- current->task_state_change = _THIS_IP_; \
- current->state = (state_value); \
- } while (0)
+#define is_special_task_state(state) \
+ ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | \
+ TASK_DEAD | TASK_FROZEN))
-#define set_current_state(state_value) \
- do { \
- WARN_ON_ONCE(is_special_task_state(state_value));\
- current->task_state_change = _THIS_IP_; \
- smp_store_mb(current->state, (state_value)); \
+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+# define debug_normal_state_change(state_value) \
+ do { \
+ WARN_ON_ONCE(is_special_task_state(state_value)); \
+ current->task_state_change = _THIS_IP_; \
} while (0)
-#define set_special_state(state_value) \
+# define debug_special_state_change(state_value) \
do { \
- unsigned long flags; /* may shadow */ \
WARN_ON_ONCE(!is_special_task_state(state_value)); \
- raw_spin_lock_irqsave(&current->pi_lock, flags); \
current->task_state_change = _THIS_IP_; \
- current->state = (state_value); \
- raw_spin_unlock_irqrestore(&current->pi_lock, flags); \
} while (0)
+
+# define debug_rtlock_wait_set_state() \
+ do { \
+ current->saved_state_change = current->task_state_change;\
+ current->task_state_change = _THIS_IP_; \
+ } while (0)
+
+# define debug_rtlock_wait_restore_state() \
+ do { \
+ current->task_state_change = current->saved_state_change;\
+ } while (0)
+
#else
+# define debug_normal_state_change(cond) do { } while (0)
+# define debug_special_state_change(cond) do { } while (0)
+# define debug_rtlock_wait_set_state() do { } while (0)
+# define debug_rtlock_wait_restore_state() do { } while (0)
+#endif
+
+#define trace_set_current_state(state_value) \
+ do { \
+ if (tracepoint_enabled(sched_set_state_tp)) \
+ __trace_set_current_state(state_value); \
+ } while (0)
+
/*
- * set_current_state() includes a barrier so that the write of current->state
+ * set_current_state() includes a barrier so that the write of current->__state
* is correctly serialised wrt the caller's subsequent test of whether to
* actually sleep:
*
@@ -176,9 +222,9 @@ struct task_group;
* wake_up_state(p, TASK_UNINTERRUPTIBLE);
*
* where wake_up_state()/try_to_wake_up() executes a full memory barrier before
- * accessing p->state.
+ * accessing p->__state.
*
- * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
+ * Wakeup will do: if (@state & p->__state) p->__state = TASK_RUNNING, that is,
* once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
* TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
*
@@ -190,31 +236,94 @@ struct task_group;
* Also see the comments of try_to_wake_up().
*/
#define __set_current_state(state_value) \
- current->state = (state_value)
+ do { \
+ debug_normal_state_change((state_value)); \
+ trace_set_current_state(state_value); \
+ WRITE_ONCE(current->__state, (state_value)); \
+ } while (0)
#define set_current_state(state_value) \
- smp_store_mb(current->state, (state_value))
+ do { \
+ debug_normal_state_change((state_value)); \
+ trace_set_current_state(state_value); \
+ smp_store_mb(current->__state, (state_value)); \
+ } while (0)
/*
* set_special_state() should be used for those states when the blocking task
* can not use the regular condition based wait-loop. In that case we must
- * serialize against wakeups such that any possible in-flight TASK_RUNNING stores
- * will not collide with our state change.
+ * serialize against wakeups such that any possible in-flight TASK_RUNNING
+ * stores will not collide with our state change.
*/
#define set_special_state(state_value) \
do { \
unsigned long flags; /* may shadow */ \
+ \
raw_spin_lock_irqsave(&current->pi_lock, flags); \
- current->state = (state_value); \
+ debug_special_state_change((state_value)); \
+ trace_set_current_state(state_value); \
+ WRITE_ONCE(current->__state, (state_value)); \
raw_spin_unlock_irqrestore(&current->pi_lock, flags); \
} while (0)
-#endif
+/*
+ * PREEMPT_RT specific variants for "sleeping" spin/rwlocks
+ *
+ * RT's spin/rwlock substitutions are state preserving. The state of the
+ * task when blocking on the lock is saved in task_struct::saved_state and
+ * restored after the lock has been acquired. These operations are
+ * serialized by task_struct::pi_lock against try_to_wake_up(). Any non RT
+ * lock related wakeups while the task is blocked on the lock are
+ * redirected to operate on task_struct::saved_state to ensure that these
+ * are not dropped. On restore task_struct::saved_state is set to
+ * TASK_RUNNING so any wakeup attempt redirected to saved_state will fail.
+ *
+ * The lock operation looks like this:
+ *
+ * current_save_and_set_rtlock_wait_state();
+ * for (;;) {
+ * if (try_lock())
+ * break;
+ * raw_spin_unlock_irq(&lock->wait_lock);
+ * schedule_rtlock();
+ * raw_spin_lock_irq(&lock->wait_lock);
+ * set_current_state(TASK_RTLOCK_WAIT);
+ * }
+ * current_restore_rtlock_saved_state();
+ */
+#define current_save_and_set_rtlock_wait_state() \
+ do { \
+ lockdep_assert_irqs_disabled(); \
+ raw_spin_lock(&current->pi_lock); \
+ current->saved_state = current->__state; \
+ debug_rtlock_wait_set_state(); \
+ trace_set_current_state(TASK_RTLOCK_WAIT); \
+ WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT); \
+ raw_spin_unlock(&current->pi_lock); \
+ } while (0);
+
+#define current_restore_rtlock_saved_state() \
+ do { \
+ lockdep_assert_irqs_disabled(); \
+ raw_spin_lock(&current->pi_lock); \
+ debug_rtlock_wait_restore_state(); \
+ trace_set_current_state(current->saved_state); \
+ WRITE_ONCE(current->__state, current->saved_state); \
+ current->saved_state = TASK_RUNNING; \
+ raw_spin_unlock(&current->pi_lock); \
+ } while (0);
-/* Task command name length: */
-#define TASK_COMM_LEN 16
+#define get_current_state() READ_ONCE(current->__state)
-extern void scheduler_tick(void);
+/*
+ * Define the task command name length as enum, then it can be visible to
+ * BPF programs.
+ */
+enum {
+ TASK_COMM_LEN = 16,
+};
+
+extern void sched_tick(void);
#define MAX_SCHEDULE_TIMEOUT LONG_MAX
@@ -226,12 +335,21 @@ extern long schedule_timeout_idle(long timeout);
asmlinkage void schedule(void);
extern void schedule_preempt_disabled(void);
asmlinkage void preempt_schedule_irq(void);
+#ifdef CONFIG_PREEMPT_RT
+ extern void schedule_rtlock(void);
+#endif
extern int __must_check io_schedule_prepare(void);
extern void io_schedule_finish(int token);
extern long io_schedule_timeout(long timeout);
extern void io_schedule(void);
+/* wrapper functions to trace from this header file */
+DECLARE_TRACEPOINT(sched_set_state_tp);
+extern void __trace_set_current_state(int state_value);
+DECLARE_TRACEPOINT(sched_set_need_resched_tp);
+extern void __trace_set_need_resched(struct task_struct *curr, int tif);
+
/**
* struct prev_cputime - snapshot of system and user cputime
* @utime: time spent in user mode
@@ -284,10 +402,14 @@ enum uclamp_id {
UCLAMP_CNT
};
-#ifdef CONFIG_SMP
extern struct root_domain def_root_domain;
extern struct mutex sched_domains_mutex;
-#endif
+extern void sched_domains_mutex_lock(void);
+extern void sched_domains_mutex_unlock(void);
+
+struct sched_param {
+ int sched_priority;
+};
struct sched_info {
#ifdef CONFIG_SCHED_INFO
@@ -299,6 +421,12 @@ struct sched_info {
/* Time spent waiting on a runqueue: */
unsigned long long run_delay;
+ /* Max time spent waiting on a runqueue: */
+ unsigned long long max_run_delay;
+
+ /* Min time spent waiting on a runqueue: */
+ unsigned long long min_run_delay;
+
/* Timestamps: */
/* When did we last run on a CPU? */
@@ -329,34 +457,6 @@ struct load_weight {
u32 inv_weight;
};
-/**
- * struct util_est - Estimation utilization of FAIR tasks
- * @enqueued: instantaneous estimated utilization of a task/cpu
- * @ewma: the Exponential Weighted Moving Average (EWMA)
- * utilization of a task
- *
- * Support data structure to track an Exponential Weighted Moving Average
- * (EWMA) of a FAIR task's utilization. New samples are added to the moving
- * average each time a task completes an activation. Sample's weight is chosen
- * so that the EWMA will be relatively insensitive to transient changes to the
- * task's workload.
- *
- * The enqueued attribute has a slightly different meaning for tasks and cpus:
- * - task: the task's util_avg at last task dequeue time
- * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU
- * Thus, the util_est.enqueued of a task represents the contribution on the
- * estimated utilization of the CPU where that task is currently enqueued.
- *
- * Only for tasks we track a moving average of the past instantaneous
- * estimated utilization. This allows to absorb sporadic drops in utilization
- * of an otherwise almost periodic task.
- */
-struct util_est {
- unsigned int enqueued;
- unsigned int ewma;
-#define UTIL_EST_WEIGHT_SHIFT 2
-} __attribute__((__aligned__(sizeof(u64))));
-
/*
* The load/runnable/util_avg accumulates an infinite geometric series
* (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c).
@@ -411,9 +511,20 @@ struct sched_avg {
unsigned long load_avg;
unsigned long runnable_avg;
unsigned long util_avg;
- struct util_est util_est;
+ unsigned int util_est;
} ____cacheline_aligned;
+/*
+ * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg
+ * updates. When a task is dequeued, its util_est should not be updated if its
+ * util_avg has not been updated in the meantime.
+ * This information is mapped into the MSB bit of util_est at dequeue time.
+ * Since max value of util_est for a task is 1024 (PELT util_avg for a task)
+ * it is safe to use MSB.
+ */
+#define UTIL_EST_WEIGHT_SHIFT 2
+#define UTIL_AVG_UNCHANGED 0x80000000
+
struct sched_statistics {
#ifdef CONFIG_SCHEDSTATS
u64 wait_start;
@@ -429,7 +540,9 @@ struct sched_statistics {
u64 block_start;
u64 block_max;
- u64 exec_max;
+ s64 sum_block_runtime;
+
+ s64 exec_max;
u64 slice_max;
u64 nr_migrations_cold;
@@ -447,25 +560,45 @@ struct sched_statistics {
u64 nr_wakeups_affine_attempts;
u64 nr_wakeups_passive;
u64 nr_wakeups_idle;
+
+#ifdef CONFIG_SCHED_CORE
+ u64 core_forceidle_sum;
#endif
-};
+#endif /* CONFIG_SCHEDSTATS */
+} ____cacheline_aligned;
struct sched_entity {
/* For load-balancing: */
struct load_weight load;
struct rb_node run_node;
+ u64 deadline;
+ u64 min_vruntime;
+ u64 min_slice;
+
struct list_head group_node;
- unsigned int on_rq;
+ unsigned char on_rq;
+ unsigned char sched_delayed;
+ unsigned char rel_deadline;
+ unsigned char custom_slice;
+ /* hole */
u64 exec_start;
u64 sum_exec_runtime;
- u64 vruntime;
u64 prev_sum_exec_runtime;
+ u64 vruntime;
+ union {
+ /*
+ * When !@on_rq this field is vlag.
+ * When cfs_rq->curr == se (which implies @on_rq)
+ * this field is vprot. See protect_slice().
+ */
+ s64 vlag;
+ u64 vprot;
+ };
+ u64 slice;
u64 nr_migrations;
- struct sched_statistics statistics;
-
#ifdef CONFIG_FAIR_GROUP_SCHED
int depth;
struct sched_entity *parent;
@@ -477,7 +610,6 @@ struct sched_entity {
unsigned long runnable_weight;
#endif
-#ifdef CONFIG_SMP
/*
* Per entity load average tracking.
*
@@ -485,7 +617,6 @@ struct sched_entity {
* collide with read-mostly values above.
*/
struct sched_avg avg;
-#endif
};
struct sched_rt_entity {
@@ -506,6 +637,9 @@ struct sched_rt_entity {
#endif
} __randomize_layout;
+struct rq_flags;
+typedef struct task_struct *(*dl_server_pick_f)(struct sched_dl_entity *, struct rq_flags *rf);
+
struct sched_dl_entity {
struct rb_node rb_node;
@@ -536,10 +670,6 @@ struct sched_dl_entity {
* task has to wait for a replenishment to be performed at the
* next firing of dl_timer.
*
- * @dl_boosted tells if we are boosted due to DI. If so we are
- * outside bandwidth enforcement mechanism (but only until we
- * exit the critical section);
- *
* @dl_yielded tells if task gave up the CPU before consuming
* all its available runtime during the last job.
*
@@ -552,11 +682,36 @@ struct sched_dl_entity {
*
* @dl_overrun tells if the task asked to be informed about runtime
* overruns.
+ *
+ * @dl_server tells if this is a server entity.
+ *
+ * @dl_server_active tells if the dlserver is active(started).
+ * dlserver is started on first cfs enqueue on an idle runqueue
+ * and is stopped when a dequeue results in 0 cfs tasks on the
+ * runqueue. In other words, dlserver is active only when cpu's
+ * runqueue has atleast one cfs task.
+ *
+ * @dl_defer tells if this is a deferred or regular server. For
+ * now only defer server exists.
+ *
+ * @dl_defer_armed tells if the deferrable server is waiting
+ * for the replenishment timer to activate it.
+ *
+ * @dl_defer_running tells if the deferrable server is actually
+ * running, skipping the defer phase.
+ *
+ * @dl_defer_idle tracks idle state
*/
unsigned int dl_throttled : 1;
unsigned int dl_yielded : 1;
unsigned int dl_non_contending : 1;
unsigned int dl_overrun : 1;
+ unsigned int dl_server : 1;
+ unsigned int dl_server_active : 1;
+ unsigned int dl_defer : 1;
+ unsigned int dl_defer_armed : 1;
+ unsigned int dl_defer_running : 1;
+ unsigned int dl_defer_idle : 1;
/*
* Bandwidth enforcement timer. Each -deadline task has its
@@ -571,7 +726,16 @@ struct sched_dl_entity {
* timer is needed to decrease the active utilization at the correct
* time.
*/
- struct hrtimer inactive_timer;
+ struct hrtimer inactive_timer;
+
+ /*
+ * Bits for DL-server functionality. Also see the comment near
+ * dl_server_update().
+ *
+ * @rq the runqueue this server is for
+ */
+ struct rq *rq;
+ dl_server_pick_f server_pick_task;
#ifdef CONFIG_RT_MUTEXES
/*
@@ -635,6 +799,12 @@ enum perf_event_task_context {
perf_nr_task_contexts,
};
+/*
+ * Number of contexts where an event can trigger:
+ * task, softirq, hardirq, nmi.
+ */
+#define PERF_NR_CONTEXTS 4
+
struct wake_q_node {
struct wake_q_node *next;
};
@@ -654,8 +824,10 @@ struct task_struct {
*/
struct thread_info thread_info;
#endif
- /* -1 unrunnable, 0 runnable, >0 stopped: */
- volatile long state;
+ unsigned int __state;
+
+ /* saved state for "spinlock sleepers" */
+ unsigned int saved_state;
/*
* This begins the randomizable portion of task_struct. Only
@@ -669,13 +841,12 @@ struct task_struct {
unsigned int flags;
unsigned int ptrace;
-#ifdef CONFIG_SMP
+#ifdef CONFIG_MEM_ALLOC_PROFILING
+ struct alloc_tag *alloc_tag;
+#endif
+
int on_cpu;
struct __call_single_node wake_entry;
-#ifdef CONFIG_THREAD_INFO_IN_TASK
- /* Current CPU: */
- unsigned int cpu;
-#endif
unsigned int wakee_flips;
unsigned long wakee_flip_decay_ts;
struct task_struct *last_wakee;
@@ -689,7 +860,6 @@ struct task_struct {
*/
int recent_used_cpu;
int wake_cpu;
-#endif
int on_rq;
int prio;
@@ -697,13 +867,30 @@ struct task_struct {
int normal_prio;
unsigned int rt_priority;
- const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
+ struct sched_dl_entity dl;
+ struct sched_dl_entity *dl_server;
+#ifdef CONFIG_SCHED_CLASS_EXT
+ struct sched_ext_entity scx;
+#endif
+ const struct sched_class *sched_class;
+
+#ifdef CONFIG_SCHED_CORE
+ struct rb_node core_node;
+ unsigned long core_cookie;
+ unsigned int core_occupation;
+#endif
+
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
+#ifdef CONFIG_CFS_BANDWIDTH
+ struct callback_head sched_throttle_work;
+ struct list_head throttle_node;
+ bool throttled;
+#endif
#endif
- struct sched_dl_entity dl;
+
#ifdef CONFIG_UCLAMP_TASK
/*
@@ -718,6 +905,8 @@ struct task_struct {
struct uclamp_se uclamp[UCLAMP_CNT];
#endif
+ struct sched_statistics stats;
+
#ifdef CONFIG_PREEMPT_NOTIFIERS
/* List of struct preempt_notifier: */
struct hlist_head preempt_notifiers;
@@ -728,13 +917,13 @@ struct task_struct {
#endif
unsigned int policy;
+ unsigned long max_allowed_capacity;
int nr_cpus_allowed;
const cpumask_t *cpus_ptr;
+ cpumask_t *user_cpus_ptr;
cpumask_t cpus_mask;
void *migration_pending;
-#ifdef CONFIG_SMP
unsigned short migration_disabled;
-#endif
unsigned short migration_flags;
#ifdef CONFIG_PREEMPT_RCU
@@ -750,33 +939,29 @@ struct task_struct {
u8 rcu_tasks_idx;
int rcu_tasks_idle_cpu;
struct list_head rcu_tasks_holdout_list;
+ int rcu_tasks_exit_cpu;
+ struct list_head rcu_tasks_exit_list;
#endif /* #ifdef CONFIG_TASKS_RCU */
#ifdef CONFIG_TASKS_TRACE_RCU
int trc_reader_nesting;
int trc_ipi_to_cpu;
union rcu_special trc_reader_special;
- bool trc_reader_checked;
struct list_head trc_holdout_list;
+ struct list_head trc_blkd_node;
+ int trc_blkd_cpu;
#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
struct sched_info sched_info;
struct list_head tasks;
-#ifdef CONFIG_SMP
struct plist_node pushable_tasks;
struct rb_node pushable_dl_tasks;
-#endif
struct mm_struct *mm;
struct mm_struct *active_mm;
+ struct address_space *faults_disabled_mapping;
- /* Per-thread vma caching: */
- struct vmacache vmacache;
-
-#ifdef SPLIT_RSS_COUNTING
- struct task_rss_stat rss_stat;
-#endif
int exit_state;
int exit_code;
int exit_signal;
@@ -792,9 +977,7 @@ struct task_struct {
unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1;
unsigned sched_migrated:1;
-#ifdef CONFIG_PSI
- unsigned sched_psi_wake_requeue:1;
-#endif
+ unsigned sched_task_hot:1;
/* Force alignment to the next boundary: */
unsigned :0;
@@ -815,16 +998,23 @@ struct task_struct {
* ->sched_remote_wakeup gets used, so it can be in this word.
*/
unsigned sched_remote_wakeup:1;
+#ifdef CONFIG_RT_MUTEXES
+ unsigned sched_rt_mutex:1;
+#endif
- /* Bit to tell LSMs we're in execve(): */
+ /* Bit to tell TOMOYO we're in execve(): */
unsigned in_execve:1;
unsigned in_iowait:1;
#ifndef TIF_RESTORE_SIGMASK
unsigned restore_sigmask:1;
#endif
-#ifdef CONFIG_MEMCG
+#ifdef CONFIG_MEMCG_V1
unsigned in_user_fault:1;
#endif
+#ifdef CONFIG_LRU_GEN
+ /* whether the LRU algorithm may apply to this access */
+ unsigned in_lru_fault:1;
+#endif
#ifdef CONFIG_COMPAT_BRK
unsigned brk_randomized:1;
#endif
@@ -845,7 +1035,24 @@ struct task_struct {
/* Used by page_owner=on to detect recursion in page tracking. */
unsigned in_page_owner:1;
#endif
-
+#ifdef CONFIG_EVENTFD
+ /* Recursion prevention for eventfd_signal() */
+ unsigned in_eventfd:1;
+#endif
+#ifdef CONFIG_ARCH_HAS_CPU_PASID
+ unsigned pasid_activated:1;
+#endif
+#ifdef CONFIG_X86_BUS_LOCK_DETECT
+ unsigned reported_split_lock:1;
+#endif
+#ifdef CONFIG_TASK_DELAY_ACCT
+ /* delay due to memory thrashing */
+ unsigned in_thrashing:1;
+#endif
+ unsigned in_nf_duplicate:1;
+#ifdef CONFIG_PREEMPT_RT
+ struct netdev_xmit net_xmit;
+#endif
unsigned long atomic_flags; /* Flags requiring atomic access. */
struct restart_block restart_block;
@@ -888,7 +1095,6 @@ struct task_struct {
/* PID/PID hash table linkage. */
struct pid *thread_pid;
struct hlist_node pid_links[PIDTYPE_MAX];
- struct list_head thread_group;
struct list_head thread_node;
struct completion *vfork_done;
@@ -899,8 +1105,8 @@ struct task_struct {
/* CLONE_CHILD_CLEARTID: */
int __user *clear_child_tid;
- /* PF_IO_WORKER */
- void *pf_io_worker;
+ /* PF_KTHREAD | PF_IO_WORKER */
+ void *worker_private;
u64 utime;
u64 stime;
@@ -957,9 +1163,12 @@ struct task_struct {
/*
* executable name, excluding path.
*
- * - normally initialized setup_new_exec()
- * - access it with [gs]et_task_comm()
- * - lock it with task_lock()
+ * - normally initialized begin_new_exec()
+ * - set it with set_task_comm()
+ * - strscpy_pad() to ensure it is always NUL-terminated and
+ * zero-padded
+ * - task_lock() to ensure the operation is atomic and the name is
+ * fully updated.
*/
char comm[TASK_COMM_LEN];
@@ -989,7 +1198,6 @@ struct task_struct {
/* Signal handlers: */
struct signal_struct *signal;
struct sighand_struct __rcu *sighand;
- struct sigqueue *sigqueue_cache;
sigset_t blocked;
sigset_t real_blocked;
/* Restored if set_restore_sigmask() was used: */
@@ -1032,9 +1240,14 @@ struct task_struct {
struct rt_mutex_waiter *pi_blocked_on;
#endif
-#ifdef CONFIG_DEBUG_MUTEXES
- /* Mutex deadlock detection: */
- struct mutex_waiter *blocked_on;
+ struct mutex *blocked_on; /* lock we're blocked on */
+
+#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
+ /*
+ * Encoded lock address causing task block (lower 2 bits = type from
+ * <linux/hung_task.h>). Accessed via hung_task_*() helpers.
+ */
+ unsigned long blocker;
#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
@@ -1071,16 +1284,12 @@ struct task_struct {
/* Stacked block device info: */
struct bio_list *bio_list;
-#ifdef CONFIG_BLOCK
/* Stack plugging: */
struct blk_plug *plug;
-#endif
/* VM state: */
struct reclaim_state *reclaim_state;
- struct backing_dev_info *backing_dev_info;
-
struct io_context *io_context;
#ifdef CONFIG_COMPACTION
@@ -1109,14 +1318,16 @@ struct task_struct {
/* Sequence number to catch updates: */
seqcount_spinlock_t mems_allowed_seq;
int cpuset_mem_spread_rotor;
- int cpuset_slab_spread_rotor;
#endif
#ifdef CONFIG_CGROUPS
/* Control Group info protected by css_set_lock: */
struct css_set __rcu *cgroups;
/* cg_list protected by css_set_lock and tsk->alloc_lock: */
struct list_head cg_list;
-#endif
+#ifdef CONFIG_PREEMPT_RT
+ struct llist_node cg_dead_lnode;
+#endif /* CONFIG_PREEMPT_RT */
+#endif /* CONFIG_CGROUPS */
#ifdef CONFIG_X86_CPU_RESCTRL
u32 closid;
u32 rmid;
@@ -1132,9 +1343,11 @@ struct task_struct {
unsigned int futex_state;
#endif
#ifdef CONFIG_PERF_EVENTS
- struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
+ u8 perf_recursion[PERF_NR_CONTEXTS];
+ struct perf_event_context *perf_event_ctxp;
struct mutex perf_event_mutex;
struct list_head perf_event_list;
+ struct perf_ctx_data __rcu *perf_ctx_data;
#endif
#ifdef CONFIG_DEBUG_PREEMPT
unsigned long preempt_disable_ip;
@@ -1143,6 +1356,7 @@ struct task_struct {
/* Protected by alloc_lock: */
struct mempolicy *mempolicy;
short il_prev;
+ u8 il_weight;
short pref_node_fork;
#endif
#ifdef CONFIG_NUMA_BALANCING
@@ -1195,23 +1409,11 @@ struct task_struct {
unsigned long numa_pages_migrated;
#endif /* CONFIG_NUMA_BALANCING */
-#ifdef CONFIG_RSEQ
- struct rseq __user *rseq;
- u32 rseq_sig;
- /*
- * RmW on rseq_event_mask must be performed atomically
- * with respect to preemption.
- */
- unsigned long rseq_event_mask;
-#endif
+ struct rseq_data rseq;
+ struct sched_mm_cid mm_cid;
struct tlbflush_unmap_batch tlb_ubc;
- union {
- refcount_t rcu_users;
- struct rcu_head rcu;
- };
-
/* Cache last used pipe for splice(): */
struct pipe_inode_info *splice_pipe;
@@ -1254,6 +1456,13 @@ struct task_struct {
#ifdef CONFIG_TRACE_IRQFLAGS
struct irqtrace_events kcsan_save_irqtrace;
#endif
+#ifdef CONFIG_KCSAN_WEAK_MEMORY
+ int kcsan_stack_depth;
+#endif
+#endif
+
+#ifdef CONFIG_KMSAN
+ struct kmsan_ctx kmsan_ctx;
#endif
#if IS_ENABLED(CONFIG_KUNIT)
@@ -1266,10 +1475,11 @@ struct task_struct {
int curr_ret_depth;
/* Stack of return addresses for return function tracing: */
- struct ftrace_ret_stack *ret_stack;
+ unsigned long *ret_stack;
/* Timestamp for last schedule: */
unsigned long long ftrace_timestamp;
+ unsigned long long ftrace_sleeptime;
/*
* Number of functions that haven't been traced
@@ -1282,9 +1492,6 @@ struct task_struct {
#endif
#ifdef CONFIG_TRACING
- /* State flags for use by tracers: */
- unsigned long trace;
-
/* Bitmask and counter of trace recursion: */
unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
@@ -1314,20 +1521,23 @@ struct task_struct {
unsigned int kcov_softirq;
#endif
-#ifdef CONFIG_MEMCG
+#ifdef CONFIG_MEMCG_V1
struct mem_cgroup *memcg_in_oom;
- gfp_t memcg_oom_gfp_mask;
- int memcg_oom_order;
+#endif
+#ifdef CONFIG_MEMCG
/* Number of pages to reclaim on returning to userland: */
unsigned int memcg_nr_pages_over_high;
/* Used by memcontrol for targeted memcg charge: */
struct mem_cgroup *active_memcg;
+
+ /* Cache for current->cgroups->memcg->objcg lookups: */
+ struct obj_cgroup *objcg;
#endif
#ifdef CONFIG_BLK_CGROUP
- struct request_queue *throttle_queue;
+ struct gendisk *throttle_disk;
#endif
#ifdef CONFIG_UPROBES
@@ -1340,10 +1550,16 @@ struct task_struct {
struct kmap_ctrl kmap_ctrl;
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
+# ifdef CONFIG_PREEMPT_RT
+ unsigned long saved_state_change;
+# endif
#endif
+ struct rcu_head rcu;
+ refcount_t rcu_users;
int pagefault_disabled;
#ifdef CONFIG_MMU
struct task_struct *oom_reaper_list;
+ struct timer_list oom_reaper_timer;
#endif
#ifdef CONFIG_VMAP_STACK
struct vm_struct *stack_vm_area;
@@ -1362,10 +1578,16 @@ struct task_struct {
#ifdef CONFIG_BPF_SYSCALL
/* Used by BPF task local storage */
struct bpf_local_storage __rcu *bpf_storage;
+ /* Used for BPF run context */
+ struct bpf_run_ctx *bpf_ctx;
#endif
+ /* Used by BPF for per-TASK xdp storage */
+ struct bpf_net_context *bpf_net_context;
-#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+#ifdef CONFIG_KSTACK_ERASE
unsigned long lowest_stack;
+#endif
+#ifdef CONFIG_KSTACK_ERASE_METRICS
unsigned long prev_lowest_stack;
#endif
@@ -1377,158 +1599,100 @@ struct task_struct {
mce_whole_page : 1,
__mce_reserved : 62;
struct callback_head mce_kill_me;
+ int mce_count;
#endif
#ifdef CONFIG_KRETPROBES
struct llist_head kretprobe_instances;
#endif
+#ifdef CONFIG_RETHOOK
+ struct llist_head rethooks;
+#endif
+#ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH
/*
- * New fields for task_struct should be added above here, so that
- * they are included in the randomized portion of task_struct.
+ * If L1D flush is supported on mm context switch
+ * then we use this callback head to queue kill work
+ * to kill tasks that are not running on SMT disabled
+ * cores
*/
- randomized_struct_fields_end
-
- /* CPU-specific state of this task: */
- struct thread_struct thread;
+ struct callback_head l1d_flush_kill;
+#endif
+#ifdef CONFIG_RV
/*
- * WARNING: on x86, 'thread_struct' contains a variable-sized
- * structure. It *MUST* be at the end of 'task_struct'.
- *
- * Do not put anything below here!
+ * Per-task RV monitor, fixed in CONFIG_RV_PER_TASK_MONITORS.
+ * If memory becomes a concern, we can think about a dynamic method.
*/
-};
-
-static inline struct pid *task_pid(struct task_struct *task)
-{
- return task->thread_pid;
-}
-
-/*
- * the helpers to get the task's different pids as they are seen
- * from various namespaces
- *
- * task_xid_nr() : global id, i.e. the id seen from the init namespace;
- * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
- * current.
- * task_xid_nr_ns() : id seen from the ns specified;
- *
- * see also pid_nr() etc in include/linux/pid.h
- */
-pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
-
-static inline pid_t task_pid_nr(struct task_struct *tsk)
-{
- return tsk->pid;
-}
-
-static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
-}
-
-static inline pid_t task_pid_vnr(struct task_struct *tsk)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
-}
-
-
-static inline pid_t task_tgid_nr(struct task_struct *tsk)
-{
- return tsk->tgid;
-}
-
-/**
- * pid_alive - check that a task structure is not stale
- * @p: Task structure to be checked.
- *
- * Test if a process is not yet dead (at most zombie state)
- * If pid_alive fails, then pointers within the task structure
- * can be stale and must not be dereferenced.
- *
- * Return: 1 if the process is alive. 0 otherwise.
- */
-static inline int pid_alive(const struct task_struct *p)
-{
- return p->thread_pid != NULL;
-}
-
-static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
-}
-
-static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
-}
-
-
-static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
-}
-
-static inline pid_t task_session_vnr(struct task_struct *tsk)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
-}
-
-static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
-}
+ union rv_task_monitor rv[CONFIG_RV_PER_TASK_MONITORS];
+#endif
-static inline pid_t task_tgid_vnr(struct task_struct *tsk)
-{
- return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
-}
+#ifdef CONFIG_USER_EVENTS
+ struct user_event_mm *user_event_mm;
+#endif
-static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
-{
- pid_t pid = 0;
+#ifdef CONFIG_UNWIND_USER
+ struct unwind_task_info unwind_info;
+#endif
- rcu_read_lock();
- if (pid_alive(tsk))
- pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
- rcu_read_unlock();
+ /* CPU-specific state of this task: */
+ struct thread_struct thread;
- return pid;
-}
+ /*
+ * New fields for task_struct should be added above here, so that
+ * they are included in the randomized portion of task_struct.
+ */
+ randomized_struct_fields_end
+} __attribute__ ((aligned (64)));
-static inline pid_t task_ppid_nr(const struct task_struct *tsk)
+#ifdef CONFIG_SCHED_PROXY_EXEC
+DECLARE_STATIC_KEY_TRUE(__sched_proxy_exec);
+static inline bool sched_proxy_exec(void)
{
- return task_ppid_nr_ns(tsk, &init_pid_ns);
+ return static_branch_likely(&__sched_proxy_exec);
}
-
-/* Obsolete, do not use: */
-static inline pid_t task_pgrp_nr(struct task_struct *tsk)
+#else
+static inline bool sched_proxy_exec(void)
{
- return task_pgrp_nr_ns(tsk, &init_pid_ns);
+ return false;
}
+#endif
#define TASK_REPORT_IDLE (TASK_REPORT + 1)
#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
-static inline unsigned int task_state_index(struct task_struct *tsk)
+static inline unsigned int __task_state_index(unsigned int tsk_state,
+ unsigned int tsk_exit_state)
{
- unsigned int tsk_state = READ_ONCE(tsk->state);
- unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
+ unsigned int state = (tsk_state | tsk_exit_state) & TASK_REPORT;
BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
- if (tsk_state == TASK_IDLE)
+ if ((tsk_state & TASK_IDLE) == TASK_IDLE)
state = TASK_REPORT_IDLE;
+ /*
+ * We're lying here, but rather than expose a completely new task state
+ * to userspace, we can make this appear as if the task has gone through
+ * a regular rt_mutex_lock() call.
+ * Report frozen tasks as uninterruptible.
+ */
+ if ((tsk_state & TASK_RTLOCK_WAIT) || (tsk_state & TASK_FROZEN))
+ state = TASK_UNINTERRUPTIBLE;
+
return fls(state);
}
+static inline unsigned int task_state_index(struct task_struct *tsk)
+{
+ return __task_state_index(READ_ONCE(tsk->__state), tsk->exit_state);
+}
+
static inline char task_index_to_char(unsigned int state)
{
static const char state_char[] = "RSDTtXZPI";
- BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
+ BUILD_BUG_ON(TASK_REPORT_MAX * 2 != 1 << (sizeof(state_char) - 1));
return state_char[state];
}
@@ -1538,20 +1702,6 @@ static inline char task_state_to_char(struct task_struct *tsk)
return task_index_to_char(task_state_index(tsk));
}
-/**
- * is_global_init - check if a task structure is init. Since init
- * is free to have sub-threads we need to check tgid.
- * @tsk: Task structure to be checked.
- *
- * Check if a task structure is the first user space task the kernel created.
- *
- * Return: 1 if the task structure is init. 0 otherwise.
- */
-static inline int is_global_init(struct task_struct *tsk)
-{
- return task_tgid_nr(tsk) == 1;
-}
-
extern struct pid *cad_pid;
/*
@@ -1560,6 +1710,7 @@ extern struct pid *cad_pid;
#define PF_VCPU 0x00000001 /* I'm a virtual CPU */
#define PF_IDLE 0x00000002 /* I am an IDLE thread */
#define PF_EXITING 0x00000004 /* Getting shut down */
+#define PF_POSTCOREDUMP 0x00000008 /* Coredumps should ignore this task */
#define PF_IO_WORKER 0x00000010 /* Task is an IO worker */
#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
#define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */
@@ -1567,24 +1718,28 @@ extern struct pid *cad_pid;
#define PF_SUPERPRIV 0x00000100 /* Used super-user privileges */
#define PF_DUMPCORE 0x00000200 /* Dumped core */
#define PF_SIGNALED 0x00000400 /* Killed by a signal */
-#define PF_MEMALLOC 0x00000800 /* Allocating memory */
+#define PF_MEMALLOC 0x00000800 /* Allocating memory to free memory. See memalloc_noreclaim_save() */
#define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */
#define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */
-#define PF_USED_ASYNC 0x00004000 /* Used async_schedule*(), used by module init */
+#define PF_USER_WORKER 0x00004000 /* Kernel thread cloned from userspace thread */
#define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */
-#define PF_FROZEN 0x00010000 /* Frozen for system suspend */
+#define PF_KCOMPACTD 0x00010000 /* I am kcompactd */
#define PF_KSWAPD 0x00020000 /* I am kswapd */
-#define PF_MEMALLOC_NOFS 0x00040000 /* All allocation requests will inherit GFP_NOFS */
-#define PF_MEMALLOC_NOIO 0x00080000 /* All allocation requests will inherit GFP_NOIO */
+#define PF_MEMALLOC_NOFS 0x00040000 /* All allocations inherit GFP_NOFS. See memalloc_nfs_save() */
+#define PF_MEMALLOC_NOIO 0x00080000 /* All allocations inherit GFP_NOIO. See memalloc_noio_save() */
#define PF_LOCAL_THROTTLE 0x00100000 /* Throttle writes only against the bdi I write to,
* I am cleaning dirty pages from some other bdi. */
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
-#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
+#define PF__HOLE__00800000 0x00800000
+#define PF__HOLE__01000000 0x01000000
+#define PF__HOLE__02000000 0x02000000
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
-#define PF_MEMALLOC_PIN 0x10000000 /* Allocation context constrained to zones which allow long term pinning. */
-#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
+#define PF_MEMALLOC_PIN 0x10000000 /* Allocations constrained to zones which allow long term pinning.
+ * See memalloc_pin_save() */
+#define PF_BLOCK_TS 0x20000000 /* plug has ts that needs updating */
+#define PF__HOLE__40000000 0x40000000
#define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */
/*
@@ -1615,14 +1770,10 @@ extern struct pid *cad_pid;
#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
#define used_math() tsk_used_math(current)
-static inline bool is_percpu_thread(void)
+static __always_inline bool is_percpu_thread(void)
{
-#ifdef CONFIG_SMP
return (current->flags & PF_NO_SETAFFINITY) &&
(current->nr_cpus_allowed == 1);
-#else
- return true;
-#endif
}
/* Per-process atomic flags. */
@@ -1684,21 +1835,26 @@ current_restore_flags(unsigned long orig_flags, unsigned long flags)
}
extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
-extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
-#ifdef CONFIG_SMP
-extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
+extern int task_can_attach(struct task_struct *p);
+extern int dl_bw_alloc(int cpu, u64 dl_bw);
+extern void dl_bw_free(int cpu, u64 dl_bw);
+
+/* set_cpus_allowed_force() - consider using set_cpus_allowed_ptr() instead */
+extern void set_cpus_allowed_force(struct task_struct *p, const struct cpumask *new_mask);
+
+/**
+ * set_cpus_allowed_ptr - set CPU affinity mask of a task
+ * @p: the task
+ * @new_mask: CPU affinity mask
+ *
+ * Return: zero if successful, or a negative error code
+ */
extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
-#else
-static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
-{
-}
-static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
-{
- if (!cpumask_test_cpu(0, new_mask))
- return -EINVAL;
- return 0;
-}
-#endif
+extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node);
+extern void release_user_cpus_ptr(struct task_struct *p);
+extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask);
+extern void force_compatible_cpus_allowed_ptr(struct task_struct *p);
+extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p);
extern int yield_to(struct task_struct *p, bool preempt);
extern void set_user_nice(struct task_struct *p, long nice);
@@ -1723,6 +1879,7 @@ extern int sched_setscheduler(struct task_struct *, int, const struct sched_para
extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
extern void sched_set_fifo(struct task_struct *p);
extern void sched_set_fifo_low(struct task_struct *p);
+extern void sched_set_fifo_secondary(struct task_struct *p);
extern void sched_set_normal(struct task_struct *p, int nice);
extern int sched_setattr(struct task_struct *, const struct sched_attr *);
extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
@@ -1745,9 +1902,7 @@ extern void ia64_set_curr_task(int cpu, struct task_struct *p);
void yield(void);
union thread_union {
-#ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
struct task_struct task;
-#endif
#ifndef CONFIG_THREAD_INFO_IN_TASK
struct thread_info thread_info;
#endif
@@ -1761,11 +1916,8 @@ extern struct thread_info init_thread_info;
extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
#ifdef CONFIG_THREAD_INFO_IN_TASK
-static inline struct thread_info *task_thread_info(struct task_struct *task)
-{
- return &task->thread_info;
-}
-#elif !defined(__HAVE_THREAD_FUNCTIONS)
+# define task_thread_info(task) (&(task)->thread_info)
+#else
# define task_thread_info(task) ((struct thread_info *)(task)->stack)
#endif
@@ -1792,26 +1944,33 @@ extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
extern void wake_up_new_task(struct task_struct *tsk);
-#ifdef CONFIG_SMP
extern void kick_process(struct task_struct *tsk);
-#else
-static inline void kick_process(struct task_struct *tsk) { }
-#endif
extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
+#define set_task_comm(tsk, from) ({ \
+ BUILD_BUG_ON(sizeof(from) != TASK_COMM_LEN); \
+ __set_task_comm(tsk, from, false); \
+})
-static inline void set_task_comm(struct task_struct *tsk, const char *from)
-{
- __set_task_comm(tsk, from, false);
-}
-
-extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
+/*
+ * - Why not use task_lock()?
+ * User space can randomly change their names anyway, so locking for readers
+ * doesn't make sense. For writers, locking is probably necessary, as a race
+ * condition could lead to long-term mixed results.
+ * The strscpy_pad() in __set_task_comm() can ensure that the task comm is
+ * always NUL-terminated and zero-padded. Therefore the race condition between
+ * reader and writer is not an issue.
+ *
+ * - BUILD_BUG_ON() can help prevent the buf from being truncated.
+ * Since the callers don't perform any return value checks, this safeguard is
+ * necessary.
+ */
#define get_task_comm(buf, tsk) ({ \
- BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \
- __get_task_comm(buf, sizeof(buf), tsk); \
+ BUILD_BUG_ON(sizeof(buf) < TASK_COMM_LEN); \
+ strscpy_pad(buf, (tsk)->comm); \
+ buf; \
})
-#ifdef CONFIG_SMP
static __always_inline void scheduler_ipi(void)
{
/*
@@ -1821,14 +1980,8 @@ static __always_inline void scheduler_ipi(void)
*/
preempt_fold_need_resched();
}
-extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
-#else
-static inline void scheduler_ipi(void) { }
-static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state)
-{
- return 1;
-}
-#endif
+
+extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state);
/*
* Set thread flags in other task's structures.
@@ -1867,12 +2020,16 @@ static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
static inline void set_tsk_need_resched(struct task_struct *tsk)
{
+ if (tracepoint_enabled(sched_set_need_resched_tp) &&
+ !test_tsk_thread_flag(tsk, TIF_NEED_RESCHED))
+ __trace_set_need_resched(tsk, TIF_NEED_RESCHED);
set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}
static inline void clear_tsk_need_resched(struct task_struct *tsk)
{
- clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
+ atomic_long_andnot(_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY,
+ (atomic_long_t *)&task_thread_info(tsk)->flags);
}
static inline int test_tsk_need_resched(struct task_struct *tsk)
@@ -1880,6 +2037,13 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
+static inline void set_need_resched_current(void)
+{
+ lockdep_assert_irqs_disabled();
+ set_tsk_need_resched(current);
+ set_preempt_need_resched();
+}
+
/*
* cond_resched() and cond_resched_lock(): latency reduction via
* explicit rescheduling in places that are safe. The return
@@ -1889,7 +2053,7 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
extern int __cond_resched(void);
-#ifdef CONFIG_PREEMPT_DYNAMIC
+#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
DECLARE_STATIC_CALL(cond_resched, __cond_resched);
@@ -1898,23 +2062,35 @@ static __always_inline int _cond_resched(void)
return static_call_mod(cond_resched)();
}
-#else
+#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
+
+extern int dynamic_cond_resched(void);
+
+static __always_inline int _cond_resched(void)
+{
+ return dynamic_cond_resched();
+}
+
+#else /* !CONFIG_PREEMPTION */
static inline int _cond_resched(void)
{
return __cond_resched();
}
-#endif /* CONFIG_PREEMPT_DYNAMIC */
+#endif /* PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
-#else
+#else /* CONFIG_PREEMPTION && !CONFIG_PREEMPT_DYNAMIC */
-static inline int _cond_resched(void) { return 0; }
+static inline int _cond_resched(void)
+{
+ return 0;
+}
-#endif /* !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) */
+#endif /* !CONFIG_PREEMPTION || CONFIG_PREEMPT_DYNAMIC */
#define cond_resched() ({ \
- ___might_sleep(__FILE__, __LINE__, 0); \
+ __might_resched(__FILE__, __LINE__, 0); \
_cond_resched(); \
})
@@ -1922,60 +2098,105 @@ extern int __cond_resched_lock(spinlock_t *lock);
extern int __cond_resched_rwlock_read(rwlock_t *lock);
extern int __cond_resched_rwlock_write(rwlock_t *lock);
-#define cond_resched_lock(lock) ({ \
- ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
- __cond_resched_lock(lock); \
+#define MIGHT_RESCHED_RCU_SHIFT 8
+#define MIGHT_RESCHED_PREEMPT_MASK ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
+
+#ifndef CONFIG_PREEMPT_RT
+/*
+ * Non RT kernels have an elevated preempt count due to the held lock,
+ * but are not allowed to be inside a RCU read side critical section
+ */
+# define PREEMPT_LOCK_RESCHED_OFFSETS PREEMPT_LOCK_OFFSET
+#else
+/*
+ * spin/rw_lock() on RT implies rcu_read_lock(). The might_sleep() check in
+ * cond_resched*lock() has to take that into account because it checks for
+ * preempt_count() and rcu_preempt_depth().
+ */
+# define PREEMPT_LOCK_RESCHED_OFFSETS \
+ (PREEMPT_LOCK_OFFSET + (1U << MIGHT_RESCHED_RCU_SHIFT))
+#endif
+
+#define cond_resched_lock(lock) ({ \
+ __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
+ __cond_resched_lock(lock); \
})
-#define cond_resched_rwlock_read(lock) ({ \
- __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
- __cond_resched_rwlock_read(lock); \
+#define cond_resched_rwlock_read(lock) ({ \
+ __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
+ __cond_resched_rwlock_read(lock); \
})
-#define cond_resched_rwlock_write(lock) ({ \
- __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
- __cond_resched_rwlock_write(lock); \
+#define cond_resched_rwlock_write(lock) ({ \
+ __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
+ __cond_resched_rwlock_write(lock); \
})
-static inline void cond_resched_rcu(void)
+#ifndef CONFIG_PREEMPT_RT
+static inline struct mutex *__get_task_blocked_on(struct task_struct *p)
{
-#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
- rcu_read_unlock();
- cond_resched();
- rcu_read_lock();
-#endif
+ struct mutex *m = p->blocked_on;
+
+ if (m)
+ lockdep_assert_held_once(&m->wait_lock);
+ return m;
}
-/*
- * Does a critical section need to be broken due to another
- * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
- * but a general need for low latency)
- */
-static inline int spin_needbreak(spinlock_t *lock)
+static inline void __set_task_blocked_on(struct task_struct *p, struct mutex *m)
{
-#ifdef CONFIG_PREEMPTION
- return spin_is_contended(lock);
-#else
- return 0;
-#endif
+ struct mutex *blocked_on = READ_ONCE(p->blocked_on);
+
+ WARN_ON_ONCE(!m);
+ /* The task should only be setting itself as blocked */
+ WARN_ON_ONCE(p != current);
+ /* Currently we serialize blocked_on under the mutex::wait_lock */
+ lockdep_assert_held_once(&m->wait_lock);
+ /*
+ * Check ensure we don't overwrite existing mutex value
+ * with a different mutex. Note, setting it to the same
+ * lock repeatedly is ok.
+ */
+ WARN_ON_ONCE(blocked_on && blocked_on != m);
+ WRITE_ONCE(p->blocked_on, m);
}
-/*
- * Check if a rwlock is contended.
- * Returns non-zero if there is another task waiting on the rwlock.
- * Returns zero if the lock is not contended or the system / underlying
- * rwlock implementation does not support contention detection.
- * Technically does not depend on CONFIG_PREEMPTION, but a general need
- * for low latency.
- */
-static inline int rwlock_needbreak(rwlock_t *lock)
+static inline void set_task_blocked_on(struct task_struct *p, struct mutex *m)
{
-#ifdef CONFIG_PREEMPTION
- return rwlock_is_contended(lock);
+ guard(raw_spinlock_irqsave)(&m->wait_lock);
+ __set_task_blocked_on(p, m);
+}
+
+static inline void __clear_task_blocked_on(struct task_struct *p, struct mutex *m)
+{
+ if (m) {
+ struct mutex *blocked_on = READ_ONCE(p->blocked_on);
+
+ /* Currently we serialize blocked_on under the mutex::wait_lock */
+ lockdep_assert_held_once(&m->wait_lock);
+ /*
+ * There may be cases where we re-clear already cleared
+ * blocked_on relationships, but make sure we are not
+ * clearing the relationship with a different lock.
+ */
+ WARN_ON_ONCE(blocked_on && blocked_on != m);
+ }
+ WRITE_ONCE(p->blocked_on, NULL);
+}
+
+static inline void clear_task_blocked_on(struct task_struct *p, struct mutex *m)
+{
+ guard(raw_spinlock_irqsave)(&m->wait_lock);
+ __clear_task_blocked_on(p, m);
+}
#else
- return 0;
-#endif
+static inline void __clear_task_blocked_on(struct task_struct *p, struct rt_mutex *m)
+{
+}
+
+static inline void clear_task_blocked_on(struct task_struct *p, struct rt_mutex *m)
+{
}
+#endif /* !CONFIG_PREEMPT_RT */
static __always_inline bool need_resched(void)
{
@@ -1989,11 +2210,7 @@ static __always_inline bool need_resched(void)
static inline unsigned int task_cpu(const struct task_struct *p)
{
-#ifdef CONFIG_THREAD_INFO_IN_TASK
- return READ_ONCE(p->cpu);
-#else
return READ_ONCE(task_thread_info(p)->cpu);
-#endif
}
extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
@@ -2011,6 +2228,15 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
#endif /* CONFIG_SMP */
+static inline bool task_is_runnable(struct task_struct *p)
+{
+ return p->on_rq && !p->se.sched_delayed;
+}
+
+extern bool sched_task_on_rq(struct task_struct *p);
+extern unsigned long get_wchan(struct task_struct *p);
+extern struct task_struct *cpu_curr_snapshot(int cpu);
+
/*
* In order to reduce various lock holder preemption latencies provide an
* interface to see if a vCPU is currently running or not.
@@ -2033,143 +2259,185 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
#define TASK_SIZE_OF(tsk) TASK_SIZE
#endif
-#ifdef CONFIG_SMP
-/* Returns effective CPU energy utilization, as seen by the scheduler */
-unsigned long sched_cpu_util(int cpu, unsigned long max);
-#endif /* CONFIG_SMP */
-
-#ifdef CONFIG_RSEQ
-
-/*
- * Map the event mask on the user-space ABI enum rseq_cs_flags
- * for direct mask checks.
- */
-enum rseq_event_mask_bits {
- RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
- RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
- RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
-};
-
-enum rseq_event_mask {
- RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT),
- RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT),
- RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT),
-};
-
-static inline void rseq_set_notify_resume(struct task_struct *t)
+static inline bool owner_on_cpu(struct task_struct *owner)
{
- if (t->rseq)
- set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
+ /*
+ * As lock holder preemption issue, we both skip spinning if
+ * task is not on cpu or its cpu is preempted
+ */
+ return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner));
}
-void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
+/* Returns effective CPU energy utilization, as seen by the scheduler */
+unsigned long sched_cpu_util(int cpu);
+
+#ifdef CONFIG_SCHED_CORE
+extern void sched_core_free(struct task_struct *tsk);
+extern void sched_core_fork(struct task_struct *p);
+extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
+ unsigned long uaddr);
+extern int sched_core_idle_cpu(int cpu);
+#else
+static inline void sched_core_free(struct task_struct *tsk) { }
+static inline void sched_core_fork(struct task_struct *p) { }
+static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); }
+#endif
-static inline void rseq_handle_notify_resume(struct ksignal *ksig,
- struct pt_regs *regs)
-{
- if (current->rseq)
- __rseq_handle_notify_resume(ksig, regs);
-}
+extern void sched_set_stop_task(int cpu, struct task_struct *stop);
-static inline void rseq_signal_deliver(struct ksignal *ksig,
- struct pt_regs *regs)
+#ifdef CONFIG_MEM_ALLOC_PROFILING
+static __always_inline struct alloc_tag *alloc_tag_save(struct alloc_tag *tag)
{
- preempt_disable();
- __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
- preempt_enable();
- rseq_handle_notify_resume(ksig, regs);
+ swap(current->alloc_tag, tag);
+ return tag;
}
-/* rseq_preempt() requires preemption to be disabled. */
-static inline void rseq_preempt(struct task_struct *t)
+static __always_inline void alloc_tag_restore(struct alloc_tag *tag, struct alloc_tag *old)
{
- __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
- rseq_set_notify_resume(t);
+#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
+ WARN(current->alloc_tag != tag, "current->alloc_tag was changed:\n");
+#endif
+ current->alloc_tag = old;
}
+#else
+#define alloc_tag_save(_tag) NULL
+#define alloc_tag_restore(_tag, _old) do {} while (0)
+#endif
-/* rseq_migrate() requires preemption to be disabled. */
-static inline void rseq_migrate(struct task_struct *t)
+/* Avoids recursive inclusion hell */
+#ifdef CONFIG_SCHED_MM_CID
+void sched_mm_cid_before_execve(struct task_struct *t);
+void sched_mm_cid_after_execve(struct task_struct *t);
+void sched_mm_cid_fork(struct task_struct *t);
+void sched_mm_cid_exit(struct task_struct *t);
+static __always_inline int task_mm_cid(struct task_struct *t)
{
- __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
- rseq_set_notify_resume(t);
+ return t->mm_cid.cid & ~(MM_CID_ONCPU | MM_CID_TRANSIT);
}
-
-/*
- * If parent process has a registered restartable sequences area, the
- * child inherits. Unregister rseq for a clone with CLONE_VM set.
- */
-static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
+#else
+static inline void sched_mm_cid_before_execve(struct task_struct *t) { }
+static inline void sched_mm_cid_after_execve(struct task_struct *t) { }
+static inline void sched_mm_cid_fork(struct task_struct *t) { }
+static inline void sched_mm_cid_exit(struct task_struct *t) { }
+static __always_inline int task_mm_cid(struct task_struct *t)
{
- if (clone_flags & CLONE_VM) {
- t->rseq = NULL;
- t->rseq_sig = 0;
- t->rseq_event_mask = 0;
- } else {
- t->rseq = current->rseq;
- t->rseq_sig = current->rseq_sig;
- t->rseq_event_mask = current->rseq_event_mask;
- }
+ /*
+ * Use the processor id as a fall-back when the mm cid feature is
+ * disabled. This provides functional per-cpu data structure accesses
+ * in user-space, althrough it won't provide the memory usage benefits.
+ */
+ return task_cpu(t);
}
+#endif
-static inline void rseq_execve(struct task_struct *t)
-{
- t->rseq = NULL;
- t->rseq_sig = 0;
- t->rseq_event_mask = 0;
-}
+#ifndef MODULE
+#ifndef COMPILE_OFFSETS
+extern void ___migrate_enable(void);
+
+struct rq;
+DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+
+/*
+ * The "struct rq" is not available here, so we can't access the
+ * "runqueues" with this_cpu_ptr(), as the compilation will fail in
+ * this_cpu_ptr() -> raw_cpu_ptr() -> __verify_pcpu_ptr():
+ * typeof((ptr) + 0)
+ *
+ * So use arch_raw_cpu_ptr()/PERCPU_PTR() directly here.
+ */
+#ifdef CONFIG_SMP
+#define this_rq_raw() arch_raw_cpu_ptr(&runqueues)
#else
+#define this_rq_raw() PERCPU_PTR(&runqueues)
+#endif
+#define this_rq_pinned() (*(unsigned int *)((void *)this_rq_raw() + RQ_nr_pinned))
-static inline void rseq_set_notify_resume(struct task_struct *t)
-{
-}
-static inline void rseq_handle_notify_resume(struct ksignal *ksig,
- struct pt_regs *regs)
-{
-}
-static inline void rseq_signal_deliver(struct ksignal *ksig,
- struct pt_regs *regs)
-{
-}
-static inline void rseq_preempt(struct task_struct *t)
-{
-}
-static inline void rseq_migrate(struct task_struct *t)
-{
-}
-static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
+static inline void __migrate_enable(void)
{
-}
-static inline void rseq_execve(struct task_struct *t)
-{
-}
+ struct task_struct *p = current;
+#ifdef CONFIG_DEBUG_PREEMPT
+ /*
+ * Check both overflow from migrate_disable() and superfluous
+ * migrate_enable().
+ */
+ if (WARN_ON_ONCE((s16)p->migration_disabled <= 0))
+ return;
#endif
-#ifdef CONFIG_DEBUG_RSEQ
-
-void rseq_syscall(struct pt_regs *regs);
+ if (p->migration_disabled > 1) {
+ p->migration_disabled--;
+ return;
+ }
-#else
+ /*
+ * Ensure stop_task runs either before or after this, and that
+ * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
+ */
+ guard(preempt)();
+ if (unlikely(p->cpus_ptr != &p->cpus_mask))
+ ___migrate_enable();
+ /*
+ * Mustn't clear migration_disabled() until cpus_ptr points back at the
+ * regular cpus_mask, otherwise things that race (eg.
+ * select_fallback_rq) get confused.
+ */
+ barrier();
+ p->migration_disabled = 0;
+ this_rq_pinned()--;
+}
-static inline void rseq_syscall(struct pt_regs *regs)
+static inline void __migrate_disable(void)
{
-}
+ struct task_struct *p = current;
+ if (p->migration_disabled) {
+#ifdef CONFIG_DEBUG_PREEMPT
+ /*
+ *Warn about overflow half-way through the range.
+ */
+ WARN_ON_ONCE((s16)p->migration_disabled < 0);
#endif
+ p->migration_disabled++;
+ return;
+ }
+
+ guard(preempt)();
+ this_rq_pinned()++;
+ p->migration_disabled = 1;
+}
+#else /* !COMPILE_OFFSETS */
+static inline void __migrate_disable(void) { }
+static inline void __migrate_enable(void) { }
+#endif /* !COMPILE_OFFSETS */
-const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq);
-char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len);
-int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq);
+/*
+ * So that it is possible to not export the runqueues variable, define and
+ * export migrate_enable/migrate_disable in kernel/sched/core.c too, and use
+ * them for the modules. The macro "INSTANTIATE_EXPORTED_MIGRATE_DISABLE" will
+ * be defined in kernel/sched/core.c.
+ */
+#ifndef INSTANTIATE_EXPORTED_MIGRATE_DISABLE
+static __always_inline void migrate_disable(void)
+{
+ __migrate_disable();
+}
-const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq);
-const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq);
-const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq);
+static __always_inline void migrate_enable(void)
+{
+ __migrate_enable();
+}
+#else /* INSTANTIATE_EXPORTED_MIGRATE_DISABLE */
+extern void migrate_disable(void);
+extern void migrate_enable(void);
+#endif /* INSTANTIATE_EXPORTED_MIGRATE_DISABLE */
-int sched_trace_rq_cpu(struct rq *rq);
-int sched_trace_rq_cpu_capacity(struct rq *rq);
-int sched_trace_rq_nr_running(struct rq *rq);
+#else /* MODULE */
+extern void migrate_disable(void);
+extern void migrate_enable(void);
+#endif /* MODULE */
-const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
+DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable())
#endif
diff --git a/include/linux/sched/affinity.h b/include/linux/sched/affinity.h
new file mode 100644
index 000000000000..227f5be81bcd
--- /dev/null
+++ b/include/linux/sched/affinity.h
@@ -0,0 +1 @@
+#include <linux/sched.h>
diff --git a/include/linux/sched/clock.h b/include/linux/sched/clock.h
index 867d588314e0..196f0ca351a2 100644
--- a/include/linux/sched/clock.h
+++ b/include/linux/sched/clock.h
@@ -12,7 +12,16 @@
*
* Please use one of the three interfaces below.
*/
-extern unsigned long long notrace sched_clock(void);
+extern u64 sched_clock(void);
+
+#if defined(CONFIG_ARCH_WANTS_NO_INSTR) || defined(CONFIG_GENERIC_SCHED_CLOCK)
+extern u64 sched_clock_noinstr(void);
+#else
+static __always_inline u64 sched_clock_noinstr(void)
+{
+ return sched_clock();
+}
+#endif
/*
* See the comment in kernel/sched/clock.c
@@ -45,7 +54,12 @@ static inline u64 cpu_clock(int cpu)
return sched_clock();
}
-static inline u64 local_clock(void)
+static __always_inline u64 local_clock_noinstr(void)
+{
+ return sched_clock_noinstr();
+}
+
+static __always_inline u64 local_clock(void)
{
return sched_clock();
}
@@ -79,10 +93,9 @@ static inline u64 cpu_clock(int cpu)
return sched_clock_cpu(cpu);
}
-static inline u64 local_clock(void)
-{
- return sched_clock_cpu(raw_smp_processor_id());
-}
+extern u64 local_clock_noinstr(void);
+extern u64 local_clock(void);
+
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
diff --git a/include/linux/sched/cond_resched.h b/include/linux/sched/cond_resched.h
new file mode 100644
index 000000000000..227f5be81bcd
--- /dev/null
+++ b/include/linux/sched/cond_resched.h
@@ -0,0 +1 @@
+#include <linux/sched.h>
diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
index dfd82eab2902..624fda17a785 100644
--- a/include/linux/sched/coredump.h
+++ b/include/linux/sched/coredump.h
@@ -8,11 +8,19 @@
#define SUID_DUMP_USER 1 /* Dump as user of process */
#define SUID_DUMP_ROOT 2 /* Dump as root */
-/* mm flags */
+static inline unsigned long __mm_flags_get_dumpable(const struct mm_struct *mm)
+{
+ /*
+ * By convention, dumpable bits are contained in first 32 bits of the
+ * bitmap, so we can simply access this first unsigned long directly.
+ */
+ return __mm_flags_get_word(mm);
+}
-/* for SUID_DUMP_* above */
-#define MMF_DUMPABLE_BITS 2
-#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
+static inline void __mm_flags_set_mask_dumpable(struct mm_struct *mm, int value)
+{
+ __mm_flags_set_mask_bits_word(mm, MMF_DUMPABLE_MASK, value);
+}
extern void set_dumpable(struct mm_struct *mm, int value);
/*
@@ -28,54 +36,9 @@ static inline int __get_dumpable(unsigned long mm_flags)
static inline int get_dumpable(struct mm_struct *mm)
{
- return __get_dumpable(mm->flags);
-}
-
-/* coredump filter bits */
-#define MMF_DUMP_ANON_PRIVATE 2
-#define MMF_DUMP_ANON_SHARED 3
-#define MMF_DUMP_MAPPED_PRIVATE 4
-#define MMF_DUMP_MAPPED_SHARED 5
-#define MMF_DUMP_ELF_HEADERS 6
-#define MMF_DUMP_HUGETLB_PRIVATE 7
-#define MMF_DUMP_HUGETLB_SHARED 8
-#define MMF_DUMP_DAX_PRIVATE 9
-#define MMF_DUMP_DAX_SHARED 10
+ unsigned long flags = __mm_flags_get_dumpable(mm);
-#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
-#define MMF_DUMP_FILTER_BITS 9
-#define MMF_DUMP_FILTER_MASK \
- (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
-#define MMF_DUMP_FILTER_DEFAULT \
- ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
- (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
-
-#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
-# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS)
-#else
-# define MMF_DUMP_MASK_DEFAULT_ELF 0
-#endif
- /* leave room for more dump flags */
-#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */
-#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */
-/*
- * This one-shot flag is dropped due to necessity of changing exe once again
- * on NFS restore
- */
-//#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */
-
-#define MMF_HAS_UPROBES 19 /* has uprobes */
-#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */
-#define MMF_OOM_SKIP 21 /* mm is of no interest for the OOM killer */
-#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */
-#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */
-#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */
-#define MMF_OOM_VICTIM 25 /* mm is the oom victim */
-#define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */
-#define MMF_MULTIPROCESS 27 /* mm is shared between processes */
-#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
-
-#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
- MMF_DISABLE_THP_MASK)
+ return __get_dumpable(flags);
+}
#endif /* _LINUX_SCHED_COREDUMP_H */
diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h
index 6205578ab6ee..bdd31ab93bc5 100644
--- a/include/linux/sched/cpufreq.h
+++ b/include/linux/sched/cpufreq.h
@@ -26,7 +26,7 @@ bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy);
static inline unsigned long map_util_freq(unsigned long util,
unsigned long freq, unsigned long cap)
{
- return (freq + (freq >> 2)) * util / cap;
+ return freq * util / cap;
}
static inline unsigned long map_util_perf(unsigned long util)
diff --git a/include/linux/sched/cputime.h b/include/linux/sched/cputime.h
index 6c9f19a33865..5f8fd5b24a2e 100644
--- a/include/linux/sched/cputime.h
+++ b/include/linux/sched/cputime.h
@@ -8,25 +8,17 @@
* cputime accounting APIs:
*/
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-#include <asm/cputime.h>
-
-#ifndef cputime_to_nsecs
-# define cputime_to_nsecs(__ct) \
- (cputime_to_usecs(__ct) * NSEC_PER_USEC)
-#endif
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
-
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-extern void task_cputime(struct task_struct *t,
+extern bool task_cputime(struct task_struct *t,
u64 *utime, u64 *stime);
extern u64 task_gtime(struct task_struct *t);
#else
-static inline void task_cputime(struct task_struct *t,
+static inline bool task_cputime(struct task_struct *t,
u64 *utime, u64 *stime)
{
*utime = t->utime;
*stime = t->stime;
+ return false;
}
static inline u64 task_gtime(struct task_struct *t)
diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
index 1aff00b65f3c..c40115d4e34d 100644
--- a/include/linux/sched/deadline.h
+++ b/include/linux/sched/deadline.h
@@ -1,4 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SCHED_DEADLINE_H
+#define _LINUX_SCHED_DEADLINE_H
/*
* SCHED_DEADLINE tasks has negative priorities, reflecting
@@ -6,16 +8,18 @@
* NORMAL/BATCH tasks.
*/
-#define MAX_DL_PRIO 0
+#include <linux/sched.h>
-static inline int dl_prio(int prio)
+static inline bool dl_prio(int prio)
{
- if (unlikely(prio < MAX_DL_PRIO))
- return 1;
- return 0;
+ return unlikely(prio < MAX_DL_PRIO);
}
-static inline int dl_task(struct task_struct *p)
+/*
+ * Returns true if a task has a priority that belongs to DL class. PI-boosted
+ * tasks will return true. Use dl_policy() to ignore PI-boosted tasks.
+ */
+static inline bool dl_task(struct task_struct *p)
{
return dl_prio(p->prio);
}
@@ -25,10 +29,12 @@ static inline bool dl_time_before(u64 a, u64 b)
return (s64)(a - b) < 0;
}
-#ifdef CONFIG_SMP
-
struct root_domain;
extern void dl_add_task_root_domain(struct task_struct *p);
extern void dl_clear_root_domain(struct root_domain *rd);
+extern void dl_clear_root_domain_cpu(int cpu);
+
+extern u64 dl_cookie;
+extern bool dl_bw_visited(int cpu, u64 cookie);
-#endif /* CONFIG_SMP */
+#endif /* _LINUX_SCHED_DEADLINE_H */
diff --git a/include/linux/sched/debug.h b/include/linux/sched/debug.h
index ae51f4529fc9..35ed4577a6cc 100644
--- a/include/linux/sched/debug.h
+++ b/include/linux/sched/debug.h
@@ -14,7 +14,7 @@ extern void dump_cpu_task(int cpu);
/*
* Only dump TASK_* tasks. (0 for all tasks)
*/
-extern void show_state_filter(unsigned long state_filter);
+extern void show_state_filter(unsigned int state_filter);
static inline void show_state(void)
{
@@ -35,12 +35,10 @@ extern void show_stack(struct task_struct *task, unsigned long *sp,
extern void sched_show_task(struct task_struct *p);
-#ifdef CONFIG_SCHED_DEBUG
struct seq_file;
extern void proc_sched_show_task(struct task_struct *p,
struct pid_namespace *ns, struct seq_file *m);
extern void proc_sched_set_task(struct task_struct *p);
-#endif
/* Attach to any functions which should be ignored in wchan output. */
#define __sched __section(".sched.text")
diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h
new file mode 100644
index 000000000000..bcb962d5ee7d
--- /dev/null
+++ b/include/linux/sched/ext.h
@@ -0,0 +1,257 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst
+ *
+ * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
+ * Copyright (c) 2022 David Vernet <dvernet@meta.com>
+ */
+#ifndef _LINUX_SCHED_EXT_H
+#define _LINUX_SCHED_EXT_H
+
+#ifdef CONFIG_SCHED_CLASS_EXT
+
+#include <linux/llist.h>
+#include <linux/rhashtable-types.h>
+
+enum scx_public_consts {
+ SCX_OPS_NAME_LEN = 128,
+
+ /*
+ * %SCX_SLICE_DFL is used to refill slices when the BPF scheduler misses
+ * to set the slice for a task that is selected for execution.
+ * %SCX_EV_REFILL_SLICE_DFL counts the number of times the default slice
+ * refill has been triggered.
+ *
+ * %SCX_SLICE_BYPASS is used as the slice for all tasks in the bypass
+ * mode. As making forward progress for all tasks is the main goal of
+ * the bypass mode, a shorter slice is used.
+ */
+ SCX_SLICE_DFL = 20 * 1000000, /* 20ms */
+ SCX_SLICE_BYPASS = 5 * 1000000, /* 5ms */
+ SCX_SLICE_INF = U64_MAX, /* infinite, implies nohz */
+};
+
+/*
+ * DSQ (dispatch queue) IDs are 64bit of the format:
+ *
+ * Bits: [63] [62 .. 0]
+ * [ B] [ ID ]
+ *
+ * B: 1 for IDs for built-in DSQs, 0 for ops-created user DSQs
+ * ID: 63 bit ID
+ *
+ * Built-in IDs:
+ *
+ * Bits: [63] [62] [61..32] [31 .. 0]
+ * [ 1] [ L] [ R ] [ V ]
+ *
+ * 1: 1 for built-in DSQs.
+ * L: 1 for LOCAL_ON DSQ IDs, 0 for others
+ * V: For LOCAL_ON DSQ IDs, a CPU number. For others, a pre-defined value.
+ */
+enum scx_dsq_id_flags {
+ SCX_DSQ_FLAG_BUILTIN = 1LLU << 63,
+ SCX_DSQ_FLAG_LOCAL_ON = 1LLU << 62,
+
+ SCX_DSQ_INVALID = SCX_DSQ_FLAG_BUILTIN | 0,
+ SCX_DSQ_GLOBAL = SCX_DSQ_FLAG_BUILTIN | 1,
+ SCX_DSQ_LOCAL = SCX_DSQ_FLAG_BUILTIN | 2,
+ SCX_DSQ_BYPASS = SCX_DSQ_FLAG_BUILTIN | 3,
+ SCX_DSQ_LOCAL_ON = SCX_DSQ_FLAG_BUILTIN | SCX_DSQ_FLAG_LOCAL_ON,
+ SCX_DSQ_LOCAL_CPU_MASK = 0xffffffffLLU,
+};
+
+/*
+ * A dispatch queue (DSQ) can be either a FIFO or p->scx.dsq_vtime ordered
+ * queue. A built-in DSQ is always a FIFO. The built-in local DSQs are used to
+ * buffer between the scheduler core and the BPF scheduler. See the
+ * documentation for more details.
+ */
+struct scx_dispatch_q {
+ raw_spinlock_t lock;
+ struct task_struct __rcu *first_task; /* lockless peek at head */
+ struct list_head list; /* tasks in dispatch order */
+ struct rb_root priq; /* used to order by p->scx.dsq_vtime */
+ u32 nr;
+ u32 seq; /* used by BPF iter */
+ u64 id;
+ struct rhash_head hash_node;
+ struct llist_node free_node;
+ struct rcu_head rcu;
+};
+
+/* scx_entity.flags */
+enum scx_ent_flags {
+ SCX_TASK_QUEUED = 1 << 0, /* on ext runqueue */
+ SCX_TASK_RESET_RUNNABLE_AT = 1 << 2, /* runnable_at should be reset */
+ SCX_TASK_DEQD_FOR_SLEEP = 1 << 3, /* last dequeue was for SLEEP */
+
+ SCX_TASK_STATE_SHIFT = 8, /* bit 8 and 9 are used to carry scx_task_state */
+ SCX_TASK_STATE_BITS = 2,
+ SCX_TASK_STATE_MASK = ((1 << SCX_TASK_STATE_BITS) - 1) << SCX_TASK_STATE_SHIFT,
+
+ SCX_TASK_CURSOR = 1 << 31, /* iteration cursor, not a task */
+};
+
+/* scx_entity.flags & SCX_TASK_STATE_MASK */
+enum scx_task_state {
+ SCX_TASK_NONE, /* ops.init_task() not called yet */
+ SCX_TASK_INIT, /* ops.init_task() succeeded, but task can be cancelled */
+ SCX_TASK_READY, /* fully initialized, but not in sched_ext */
+ SCX_TASK_ENABLED, /* fully initialized and in sched_ext */
+
+ SCX_TASK_NR_STATES,
+};
+
+/* scx_entity.dsq_flags */
+enum scx_ent_dsq_flags {
+ SCX_TASK_DSQ_ON_PRIQ = 1 << 0, /* task is queued on the priority queue of a dsq */
+};
+
+/*
+ * Mask bits for scx_entity.kf_mask. Not all kfuncs can be called from
+ * everywhere and the following bits track which kfunc sets are currently
+ * allowed for %current. This simple per-task tracking works because SCX ops
+ * nest in a limited way. BPF will likely implement a way to allow and disallow
+ * kfuncs depending on the calling context which will replace this manual
+ * mechanism. See scx_kf_allow().
+ */
+enum scx_kf_mask {
+ SCX_KF_UNLOCKED = 0, /* sleepable and not rq locked */
+ /* ENQUEUE and DISPATCH may be nested inside CPU_RELEASE */
+ SCX_KF_CPU_RELEASE = 1 << 0, /* ops.cpu_release() */
+ /*
+ * ops.dispatch() may release rq lock temporarily and thus ENQUEUE and
+ * SELECT_CPU may be nested inside. ops.dequeue (in REST) may also be
+ * nested inside DISPATCH.
+ */
+ SCX_KF_DISPATCH = 1 << 1, /* ops.dispatch() */
+ SCX_KF_ENQUEUE = 1 << 2, /* ops.enqueue() and ops.select_cpu() */
+ SCX_KF_SELECT_CPU = 1 << 3, /* ops.select_cpu() */
+ SCX_KF_REST = 1 << 4, /* other rq-locked operations */
+
+ __SCX_KF_RQ_LOCKED = SCX_KF_CPU_RELEASE | SCX_KF_DISPATCH |
+ SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU | SCX_KF_REST,
+ __SCX_KF_TERMINAL = SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU | SCX_KF_REST,
+};
+
+enum scx_dsq_lnode_flags {
+ SCX_DSQ_LNODE_ITER_CURSOR = 1 << 0,
+
+ /* high 16 bits can be for iter cursor flags */
+ __SCX_DSQ_LNODE_PRIV_SHIFT = 16,
+};
+
+struct scx_dsq_list_node {
+ struct list_head node;
+ u32 flags;
+ u32 priv; /* can be used by iter cursor */
+};
+
+#define INIT_DSQ_LIST_CURSOR(__node, __flags, __priv) \
+ (struct scx_dsq_list_node) { \
+ .node = LIST_HEAD_INIT((__node).node), \
+ .flags = SCX_DSQ_LNODE_ITER_CURSOR | (__flags), \
+ .priv = (__priv), \
+ }
+
+/*
+ * The following is embedded in task_struct and contains all fields necessary
+ * for a task to be scheduled by SCX.
+ */
+struct sched_ext_entity {
+ struct scx_dispatch_q *dsq;
+ struct scx_dsq_list_node dsq_list; /* dispatch order */
+ struct rb_node dsq_priq; /* p->scx.dsq_vtime order */
+ u32 dsq_seq;
+ u32 dsq_flags; /* protected by DSQ lock */
+ u32 flags; /* protected by rq lock */
+ u32 weight;
+ s32 sticky_cpu;
+ s32 holding_cpu;
+ s32 selected_cpu;
+ u32 kf_mask; /* see scx_kf_mask above */
+ struct task_struct *kf_tasks[2]; /* see SCX_CALL_OP_TASK() */
+ atomic_long_t ops_state;
+
+ struct list_head runnable_node; /* rq->scx.runnable_list */
+ unsigned long runnable_at;
+
+#ifdef CONFIG_SCHED_CORE
+ u64 core_sched_at; /* see scx_prio_less() */
+#endif
+ u64 ddsp_dsq_id;
+ u64 ddsp_enq_flags;
+
+ /* BPF scheduler modifiable fields */
+
+ /*
+ * Runtime budget in nsecs. This is usually set through
+ * scx_bpf_dsq_insert() but can also be modified directly by the BPF
+ * scheduler. Automatically decreased by SCX as the task executes. On
+ * depletion, a scheduling event is triggered.
+ *
+ * This value is cleared to zero if the task is preempted by
+ * %SCX_KICK_PREEMPT and shouldn't be used to determine how long the
+ * task ran. Use p->se.sum_exec_runtime instead.
+ */
+ u64 slice;
+
+ /*
+ * Used to order tasks when dispatching to the vtime-ordered priority
+ * queue of a dsq. This is usually set through
+ * scx_bpf_dsq_insert_vtime() but can also be modified directly by the
+ * BPF scheduler. Modifying it while a task is queued on a dsq may
+ * mangle the ordering and is not recommended.
+ */
+ u64 dsq_vtime;
+
+ /*
+ * If set, reject future sched_setscheduler(2) calls updating the policy
+ * to %SCHED_EXT with -%EACCES.
+ *
+ * Can be set from ops.init_task() while the BPF scheduler is being
+ * loaded (!scx_init_task_args->fork). If set and the task's policy is
+ * already %SCHED_EXT, the task's policy is rejected and forcefully
+ * reverted to %SCHED_NORMAL. The number of such events are reported
+ * through /sys/kernel/debug/sched_ext::nr_rejected. Setting this flag
+ * during fork is not allowed.
+ */
+ bool disallow; /* reject switching into SCX */
+
+ /* cold fields */
+#ifdef CONFIG_EXT_GROUP_SCHED
+ struct cgroup *cgrp_moving_from;
+#endif
+ struct list_head tasks_node;
+};
+
+void sched_ext_dead(struct task_struct *p);
+void print_scx_info(const char *log_lvl, struct task_struct *p);
+void scx_softlockup(u32 dur_s);
+bool scx_hardlockup(int cpu);
+bool scx_rcu_cpu_stall(void);
+
+#else /* !CONFIG_SCHED_CLASS_EXT */
+
+static inline void sched_ext_dead(struct task_struct *p) {}
+static inline void print_scx_info(const char *log_lvl, struct task_struct *p) {}
+static inline void scx_softlockup(u32 dur_s) {}
+static inline bool scx_hardlockup(int cpu) { return false; }
+static inline bool scx_rcu_cpu_stall(void) { return false; }
+
+#endif /* CONFIG_SCHED_CLASS_EXT */
+
+struct scx_task_group {
+#ifdef CONFIG_EXT_GROUP_SCHED
+ u32 flags; /* SCX_TG_* */
+ u32 weight;
+ u64 bw_period_us;
+ u64 bw_quota_us;
+ u64 bw_burst_us;
+ bool idle;
+#endif
+};
+
+#endif /* _LINUX_SCHED_EXT_H */
diff --git a/include/linux/sched/hotplug.h b/include/linux/sched/hotplug.h
index 412cdaba33eb..17e04859b9a4 100644
--- a/include/linux/sched/hotplug.h
+++ b/include/linux/sched/hotplug.h
@@ -18,10 +18,6 @@ extern int sched_cpu_dying(unsigned int cpu);
# define sched_cpu_dying NULL
#endif
-#ifdef CONFIG_HOTPLUG_CPU
-extern void idle_task_exit(void);
-#else
static inline void idle_task_exit(void) {}
-#endif
#endif /* _LINUX_SCHED_HOTPLUG_H */
diff --git a/include/linux/sched/idle.h b/include/linux/sched/idle.h
index 22873d276be6..8465ff1f20d1 100644
--- a/include/linux/sched/idle.h
+++ b/include/linux/sched/idle.h
@@ -5,8 +5,8 @@
#include <linux/sched.h>
enum cpu_idle_type {
+ __CPU_NOT_IDLE = 0,
CPU_IDLE,
- CPU_NOT_IDLE,
CPU_NEWLY_IDLE,
CPU_MAX_IDLE_TYPES
};
@@ -19,12 +19,37 @@ extern void wake_up_if_idle(int cpu);
*/
#ifdef TIF_POLLING_NRFLAG
-static inline void __current_set_polling(void)
+#ifdef _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H
+
+static __always_inline void __current_set_polling(void)
{
- set_thread_flag(TIF_POLLING_NRFLAG);
+ arch_set_bit(TIF_POLLING_NRFLAG,
+ (unsigned long *)(&current_thread_info()->flags));
}
-static inline bool __must_check current_set_polling_and_test(void)
+static __always_inline void __current_clr_polling(void)
+{
+ arch_clear_bit(TIF_POLLING_NRFLAG,
+ (unsigned long *)(&current_thread_info()->flags));
+}
+
+#else
+
+static __always_inline void __current_set_polling(void)
+{
+ set_bit(TIF_POLLING_NRFLAG,
+ (unsigned long *)(&current_thread_info()->flags));
+}
+
+static __always_inline void __current_clr_polling(void)
+{
+ clear_bit(TIF_POLLING_NRFLAG,
+ (unsigned long *)(&current_thread_info()->flags));
+}
+
+#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H */
+
+static __always_inline bool __must_check current_set_polling_and_test(void)
{
__current_set_polling();
@@ -37,12 +62,7 @@ static inline bool __must_check current_set_polling_and_test(void)
return unlikely(tif_need_resched());
}
-static inline void __current_clr_polling(void)
-{
- clear_thread_flag(TIF_POLLING_NRFLAG);
-}
-
-static inline bool __must_check current_clr_polling_and_test(void)
+static __always_inline bool __must_check current_clr_polling_and_test(void)
{
__current_clr_polling();
@@ -55,6 +75,21 @@ static inline bool __must_check current_clr_polling_and_test(void)
return unlikely(tif_need_resched());
}
+static __always_inline void current_clr_polling(void)
+{
+ __current_clr_polling();
+
+ /*
+ * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
+ * Once the bit is cleared, we'll get IPIs with every new
+ * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
+ * fold.
+ */
+ smp_mb__after_atomic(); /* paired with resched_curr() */
+
+ preempt_fold_need_resched();
+}
+
#else
static inline void __current_set_polling(void) { }
static inline void __current_clr_polling(void) { }
@@ -67,21 +102,15 @@ static inline bool __must_check current_clr_polling_and_test(void)
{
return unlikely(tif_need_resched());
}
-#endif
-static inline void current_clr_polling(void)
+static __always_inline void current_clr_polling(void)
{
__current_clr_polling();
- /*
- * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
- * Once the bit is cleared, we'll get IPIs with every new
- * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
- * fold.
- */
smp_mb(); /* paired with resched_curr() */
preempt_fold_need_resched();
}
+#endif
#endif /* _LINUX_SCHED_IDLE_H */
diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h
index cc9f393e2a70..d8501f4709b5 100644
--- a/include/linux/sched/isolation.h
+++ b/include/linux/sched/isolation.h
@@ -2,59 +2,79 @@
#define _LINUX_SCHED_ISOLATION_H
#include <linux/cpumask.h>
+#include <linux/cpuset.h>
#include <linux/init.h>
#include <linux/tick.h>
-enum hk_flags {
- HK_FLAG_TIMER = 1,
- HK_FLAG_RCU = (1 << 1),
- HK_FLAG_MISC = (1 << 2),
- HK_FLAG_SCHED = (1 << 3),
- HK_FLAG_TICK = (1 << 4),
- HK_FLAG_DOMAIN = (1 << 5),
- HK_FLAG_WQ = (1 << 6),
- HK_FLAG_MANAGED_IRQ = (1 << 7),
- HK_FLAG_KTHREAD = (1 << 8),
+enum hk_type {
+ HK_TYPE_DOMAIN,
+ HK_TYPE_MANAGED_IRQ,
+ HK_TYPE_KERNEL_NOISE,
+ HK_TYPE_MAX,
+
+ /*
+ * The following housekeeping types are only set by the nohz_full
+ * boot commandline option. So they can share the same value.
+ */
+ HK_TYPE_TICK = HK_TYPE_KERNEL_NOISE,
+ HK_TYPE_TIMER = HK_TYPE_KERNEL_NOISE,
+ HK_TYPE_RCU = HK_TYPE_KERNEL_NOISE,
+ HK_TYPE_MISC = HK_TYPE_KERNEL_NOISE,
+ HK_TYPE_WQ = HK_TYPE_KERNEL_NOISE,
+ HK_TYPE_KTHREAD = HK_TYPE_KERNEL_NOISE
};
#ifdef CONFIG_CPU_ISOLATION
DECLARE_STATIC_KEY_FALSE(housekeeping_overridden);
-extern int housekeeping_any_cpu(enum hk_flags flags);
-extern const struct cpumask *housekeeping_cpumask(enum hk_flags flags);
-extern bool housekeeping_enabled(enum hk_flags flags);
-extern void housekeeping_affine(struct task_struct *t, enum hk_flags flags);
-extern bool housekeeping_test_cpu(int cpu, enum hk_flags flags);
+extern int housekeeping_any_cpu(enum hk_type type);
+extern const struct cpumask *housekeeping_cpumask(enum hk_type type);
+extern bool housekeeping_enabled(enum hk_type type);
+extern void housekeeping_affine(struct task_struct *t, enum hk_type type);
+extern bool housekeeping_test_cpu(int cpu, enum hk_type type);
extern void __init housekeeping_init(void);
#else
-static inline int housekeeping_any_cpu(enum hk_flags flags)
+static inline int housekeeping_any_cpu(enum hk_type type)
{
return smp_processor_id();
}
-static inline const struct cpumask *housekeeping_cpumask(enum hk_flags flags)
+static inline const struct cpumask *housekeeping_cpumask(enum hk_type type)
{
return cpu_possible_mask;
}
-static inline bool housekeeping_enabled(enum hk_flags flags)
+static inline bool housekeeping_enabled(enum hk_type type)
{
return false;
}
static inline void housekeeping_affine(struct task_struct *t,
- enum hk_flags flags) { }
+ enum hk_type type) { }
+
+static inline bool housekeeping_test_cpu(int cpu, enum hk_type type)
+{
+ return true;
+}
+
static inline void housekeeping_init(void) { }
#endif /* CONFIG_CPU_ISOLATION */
-static inline bool housekeeping_cpu(int cpu, enum hk_flags flags)
+static inline bool housekeeping_cpu(int cpu, enum hk_type type)
{
#ifdef CONFIG_CPU_ISOLATION
if (static_branch_unlikely(&housekeeping_overridden))
- return housekeeping_test_cpu(cpu, flags);
+ return housekeeping_test_cpu(cpu, type);
#endif
return true;
}
+static inline bool cpu_is_isolated(int cpu)
+{
+ return !housekeeping_test_cpu(cpu, HK_TYPE_DOMAIN) ||
+ !housekeeping_test_cpu(cpu, HK_TYPE_TICK) ||
+ cpuset_cpu_is_isolated(cpu);
+}
+
#endif /* _LINUX_SCHED_ISOLATION_H */
diff --git a/include/linux/sched/jobctl.h b/include/linux/sched/jobctl.h
index fa067de9f1a9..68876d0a7ef9 100644
--- a/include/linux/sched/jobctl.h
+++ b/include/linux/sched/jobctl.h
@@ -19,6 +19,10 @@ struct task_struct;
#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */
#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */
#define JOBCTL_TRAP_FREEZE_BIT 23 /* trap for cgroup freezer */
+#define JOBCTL_PTRACE_FROZEN_BIT 24 /* frozen for ptrace */
+
+#define JOBCTL_STOPPED_BIT 26 /* do_signal_stop() */
+#define JOBCTL_TRACED_BIT 27 /* ptrace_stop() */
#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT)
#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT)
@@ -28,6 +32,10 @@ struct task_struct;
#define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT)
#define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT)
#define JOBCTL_TRAP_FREEZE (1UL << JOBCTL_TRAP_FREEZE_BIT)
+#define JOBCTL_PTRACE_FROZEN (1UL << JOBCTL_PTRACE_FROZEN_BIT)
+
+#define JOBCTL_STOPPED (1UL << JOBCTL_STOPPED_BIT)
+#define JOBCTL_TRACED (1UL << JOBCTL_TRACED_BIT)
#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index e24b1fe348e3..0e1d73955fa5 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -8,6 +8,7 @@
#include <linux/mm_types.h>
#include <linux/gfp.h>
#include <linux/sync_core.h>
+#include <linux/sched/coredump.h>
/*
* Routines for handling mm_structs
@@ -28,7 +29,7 @@ extern struct mm_struct *mm_alloc(void);
*
* Use mmdrop() to release the reference acquired by mmgrab().
*
- * See also <Documentation/vm/active_mm.rst> for an in-depth explanation
+ * See also <Documentation/mm/active_mm.rst> for an in-depth explanation
* of &mm_struct.mm_count vs &mm_struct.mm_users.
*/
static inline void mmgrab(struct mm_struct *mm)
@@ -36,6 +37,11 @@ static inline void mmgrab(struct mm_struct *mm)
atomic_inc(&mm->mm_count);
}
+static inline void smp_mb__after_mmgrab(void)
+{
+ smp_mb__after_atomic();
+}
+
extern void __mmdrop(struct mm_struct *mm);
static inline void mmdrop(struct mm_struct *mm)
@@ -49,6 +55,63 @@ static inline void mmdrop(struct mm_struct *mm)
__mmdrop(mm);
}
+#ifdef CONFIG_PREEMPT_RT
+/*
+ * RCU callback for delayed mm drop. Not strictly RCU, but call_rcu() is
+ * by far the least expensive way to do that.
+ */
+static inline void __mmdrop_delayed(struct rcu_head *rhp)
+{
+ struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
+
+ __mmdrop(mm);
+}
+
+/*
+ * Invoked from finish_task_switch(). Delegates the heavy lifting on RT
+ * kernels via RCU.
+ */
+static inline void mmdrop_sched(struct mm_struct *mm)
+{
+ /* Provides a full memory barrier. See mmdrop() */
+ if (atomic_dec_and_test(&mm->mm_count))
+ call_rcu(&mm->delayed_drop, __mmdrop_delayed);
+}
+#else
+static inline void mmdrop_sched(struct mm_struct *mm)
+{
+ mmdrop(mm);
+}
+#endif
+
+/* Helpers for lazy TLB mm refcounting */
+static inline void mmgrab_lazy_tlb(struct mm_struct *mm)
+{
+ if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT))
+ mmgrab(mm);
+}
+
+static inline void mmdrop_lazy_tlb(struct mm_struct *mm)
+{
+ if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT)) {
+ mmdrop(mm);
+ } else {
+ /*
+ * mmdrop_lazy_tlb must provide a full memory barrier, see the
+ * membarrier comment finish_task_switch which relies on this.
+ */
+ smp_mb();
+ }
+}
+
+static inline void mmdrop_lazy_tlb_sched(struct mm_struct *mm)
+{
+ if (IS_ENABLED(CONFIG_MMU_LAZY_TLB_REFCOUNT))
+ mmdrop_sched(mm);
+ else
+ smp_mb(); /* see mmdrop_lazy_tlb() above */
+}
+
/**
* mmget() - Pin the address space associated with a &struct mm_struct.
* @mm: The address space to pin.
@@ -62,7 +125,7 @@ static inline void mmdrop(struct mm_struct *mm)
*
* Use mmput() to release the reference acquired by mmget().
*
- * See also <Documentation/vm/active_mm.rst> for an in-depth explanation
+ * See also <Documentation/mm/active_mm.rst> for an in-depth explanation
* of &mm_struct.mm_count vs &mm_struct.mm_users.
*/
static inline void mmget(struct mm_struct *mm)
@@ -77,7 +140,7 @@ static inline bool mmget_not_zero(struct mm_struct *mm)
/* mmput gets rid of the mappings and all user-space */
extern void mmput(struct mm_struct *);
-#ifdef CONFIG_MMU
+#if defined(CONFIG_MMU) || defined(CONFIG_FUTEX_PRIVATE_HASH)
/* same as above but performs the slow path from the async context. Can
* be called from the atomic context as well
*/
@@ -106,18 +169,48 @@ static inline void mm_update_next_owner(struct mm_struct *mm)
#endif /* CONFIG_MEMCG */
#ifdef CONFIG_MMU
+#ifndef arch_get_mmap_end
+#define arch_get_mmap_end(addr, len, flags) (TASK_SIZE)
+#endif
+
+#ifndef arch_get_mmap_base
+#define arch_get_mmap_base(addr, base) (base)
+#endif
+
extern void arch_pick_mmap_layout(struct mm_struct *mm,
- struct rlimit *rlim_stack);
-extern unsigned long
-arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
- unsigned long, unsigned long);
-extern unsigned long
+ const struct rlimit *rlim_stack);
+
+unsigned long
+arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags, vm_flags_t vm_flags);
+unsigned long
arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags, vm_flags_t);
+
+unsigned long mm_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
+
+unsigned long mm_get_unmapped_area_vmflags(struct file *filp,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags,
+ vm_flags_t vm_flags);
+
+unsigned long
+generic_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
- unsigned long flags);
+ unsigned long flags, vm_flags_t vm_flags);
+unsigned long
+generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags, vm_flags_t vm_flags);
#else
static inline void arch_pick_mmap_layout(struct mm_struct *mm,
- struct rlimit *rlim_stack) {}
+ const struct rlimit *rlim_stack) {}
#endif
static inline bool in_vfork(struct task_struct *tsk)
@@ -174,17 +267,43 @@ static inline gfp_t current_gfp_context(gfp_t flags)
}
#ifdef CONFIG_LOCKDEP
-extern void __fs_reclaim_acquire(void);
-extern void __fs_reclaim_release(void);
+extern void __fs_reclaim_acquire(unsigned long ip);
+extern void __fs_reclaim_release(unsigned long ip);
extern void fs_reclaim_acquire(gfp_t gfp_mask);
extern void fs_reclaim_release(gfp_t gfp_mask);
#else
-static inline void __fs_reclaim_acquire(void) { }
-static inline void __fs_reclaim_release(void) { }
+static inline void __fs_reclaim_acquire(unsigned long ip) { }
+static inline void __fs_reclaim_release(unsigned long ip) { }
static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
static inline void fs_reclaim_release(gfp_t gfp_mask) { }
#endif
+/* Any memory-allocation retry loop should use
+ * memalloc_retry_wait(), and pass the flags for the most
+ * constrained allocation attempt that might have failed.
+ * This provides useful documentation of where loops are,
+ * and a central place to fine tune the waiting as the MM
+ * implementation changes.
+ */
+static inline void memalloc_retry_wait(gfp_t gfp_flags)
+{
+ /* We use io_schedule_timeout because waiting for memory
+ * typically included waiting for dirty pages to be
+ * written out, which requires IO.
+ */
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ gfp_flags = current_gfp_context(gfp_flags);
+ if (gfpflags_allow_blocking(gfp_flags) &&
+ !(gfp_flags & __GFP_NORETRY))
+ /* Probably waited already, no need for much more */
+ io_schedule_timeout(1);
+ else
+ /* Probably didn't wait, and has now released a lock,
+ * so now is a good time to wait
+ */
+ io_schedule_timeout(HZ/50);
+}
+
/**
* might_alloc - Mark possible allocation sites
* @gfp_mask: gfp_t flags that would be used to allocate
@@ -198,10 +317,31 @@ static inline void might_alloc(gfp_t gfp_mask)
fs_reclaim_acquire(gfp_mask);
fs_reclaim_release(gfp_mask);
+ if (current->flags & PF_MEMALLOC)
+ return;
+
might_sleep_if(gfpflags_allow_blocking(gfp_mask));
}
/**
+ * memalloc_flags_save - Add a PF_* flag to current->flags, save old value
+ *
+ * This allows PF_* flags to be conveniently added, irrespective of current
+ * value, and then the old version restored with memalloc_flags_restore().
+ */
+static inline unsigned memalloc_flags_save(unsigned flags)
+{
+ unsigned oldflags = ~current->flags & flags;
+ current->flags |= flags;
+ return oldflags;
+}
+
+static inline void memalloc_flags_restore(unsigned flags)
+{
+ current->flags &= ~flags;
+}
+
+/**
* memalloc_noio_save - Marks implicit GFP_NOIO allocation scope.
*
* This functions marks the beginning of the GFP_NOIO allocation scope.
@@ -210,13 +350,12 @@ static inline void might_alloc(gfp_t gfp_mask)
* point of view. Use memalloc_noio_restore to end the scope with flags
* returned by this function.
*
- * This function is safe to be used from any context.
+ * Context: This function is safe to be used from any context.
+ * Return: The saved flags to be passed to memalloc_noio_restore.
*/
static inline unsigned int memalloc_noio_save(void)
{
- unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
- current->flags |= PF_MEMALLOC_NOIO;
- return flags;
+ return memalloc_flags_save(PF_MEMALLOC_NOIO);
}
/**
@@ -229,7 +368,7 @@ static inline unsigned int memalloc_noio_save(void)
*/
static inline void memalloc_noio_restore(unsigned int flags)
{
- current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
+ memalloc_flags_restore(flags);
}
/**
@@ -241,13 +380,12 @@ static inline void memalloc_noio_restore(unsigned int flags)
* point of view. Use memalloc_nofs_restore to end the scope with flags
* returned by this function.
*
- * This function is safe to be used from any context.
+ * Context: This function is safe to be used from any context.
+ * Return: The saved flags to be passed to memalloc_nofs_restore.
*/
static inline unsigned int memalloc_nofs_save(void)
{
- unsigned int flags = current->flags & PF_MEMALLOC_NOFS;
- current->flags |= PF_MEMALLOC_NOFS;
- return flags;
+ return memalloc_flags_save(PF_MEMALLOC_NOFS);
}
/**
@@ -260,32 +398,76 @@ static inline unsigned int memalloc_nofs_save(void)
*/
static inline void memalloc_nofs_restore(unsigned int flags)
{
- current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags;
+ memalloc_flags_restore(flags);
}
+/**
+ * memalloc_noreclaim_save - Marks implicit __GFP_MEMALLOC scope.
+ *
+ * This function marks the beginning of the __GFP_MEMALLOC allocation scope.
+ * All further allocations will implicitly add the __GFP_MEMALLOC flag, which
+ * prevents entering reclaim and allows access to all memory reserves. This
+ * should only be used when the caller guarantees the allocation will allow more
+ * memory to be freed very shortly, i.e. it needs to allocate some memory in
+ * the process of freeing memory, and cannot reclaim due to potential recursion.
+ *
+ * Users of this scope have to be extremely careful to not deplete the reserves
+ * completely and implement a throttling mechanism which controls the
+ * consumption of the reserve based on the amount of freed memory. Usage of a
+ * pre-allocated pool (e.g. mempool) should be always considered before using
+ * this scope.
+ *
+ * Individual allocations under the scope can opt out using __GFP_NOMEMALLOC
+ *
+ * Context: This function should not be used in an interrupt context as that one
+ * does not give PF_MEMALLOC access to reserves.
+ * See __gfp_pfmemalloc_flags().
+ * Return: The saved flags to be passed to memalloc_noreclaim_restore.
+ */
static inline unsigned int memalloc_noreclaim_save(void)
{
- unsigned int flags = current->flags & PF_MEMALLOC;
- current->flags |= PF_MEMALLOC;
- return flags;
+ return memalloc_flags_save(PF_MEMALLOC);
}
+/**
+ * memalloc_noreclaim_restore - Ends the implicit __GFP_MEMALLOC scope.
+ * @flags: Flags to restore.
+ *
+ * Ends the implicit __GFP_MEMALLOC scope started by memalloc_noreclaim_save
+ * function. Always make sure that the given flags is the return value from the
+ * pairing memalloc_noreclaim_save call.
+ */
static inline void memalloc_noreclaim_restore(unsigned int flags)
{
- current->flags = (current->flags & ~PF_MEMALLOC) | flags;
+ memalloc_flags_restore(flags);
}
+/**
+ * memalloc_pin_save - Marks implicit ~__GFP_MOVABLE scope.
+ *
+ * This function marks the beginning of the ~__GFP_MOVABLE allocation scope.
+ * All further allocations will implicitly remove the __GFP_MOVABLE flag, which
+ * will constraint the allocations to zones that allow long term pinning, i.e.
+ * not ZONE_MOVABLE zones.
+ *
+ * Return: The saved flags to be passed to memalloc_pin_restore.
+ */
static inline unsigned int memalloc_pin_save(void)
{
- unsigned int flags = current->flags & PF_MEMALLOC_PIN;
-
- current->flags |= PF_MEMALLOC_PIN;
- return flags;
+ return memalloc_flags_save(PF_MEMALLOC_PIN);
}
+/**
+ * memalloc_pin_restore - Ends the implicit ~__GFP_MOVABLE scope.
+ * @flags: Flags to restore.
+ *
+ * Ends the implicit ~__GFP_MOVABLE scope started by memalloc_pin_save function.
+ * Always make sure that the given flags is the return value from the pairing
+ * memalloc_pin_save call.
+ */
static inline void memalloc_pin_restore(unsigned int flags)
{
- current->flags = (current->flags & ~PF_MEMALLOC_PIN) | flags;
+ memalloc_flags_restore(flags);
}
#ifdef CONFIG_MEMCG
@@ -298,6 +480,10 @@ DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg);
* __GFP_ACCOUNT allocations till the end of the scope will be charged to the
* given memcg.
*
+ * Please, make sure that caller has a reference to the passed memcg structure,
+ * so its lifetime is guaranteed to exceed the scope between two
+ * set_active_memcg() calls.
+ *
* NOTE: This function can nest. Users must save the return value and
* reset the previous value after their own charging scope is over.
*/
@@ -306,7 +492,7 @@ set_active_memcg(struct mem_cgroup *memcg)
{
struct mem_cgroup *old;
- if (in_interrupt()) {
+ if (!in_task()) {
old = this_cpu_read(int_active_memcg);
this_cpu_write(int_active_memcg, memcg);
} else {
@@ -347,6 +533,13 @@ enum {
static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm)
{
+ /*
+ * The atomic_read() below prevents CSE. The following should
+ * help the compiler generate more efficient code on architectures
+ * where sync_core_before_usermode() is a no-op.
+ */
+ if (!IS_ENABLED(CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE))
+ return;
if (current->mm != mm)
return;
if (likely(!(atomic_read(&mm->membarrier_state) &
diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h
index 6d67e9a5af6b..0db7f67935fe 100644
--- a/include/linux/sched/nohz.h
+++ b/include/linux/sched/nohz.h
@@ -6,7 +6,7 @@
* This is the interface between the scheduler and nohz/dynticks:
*/
-#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+#ifdef CONFIG_NO_HZ_COMMON
extern void nohz_balance_enter_idle(int cpu);
extern int get_nohz_timer_target(void);
#else
@@ -23,7 +23,7 @@ static inline void calc_load_nohz_remote(struct rq *rq) { }
static inline void calc_load_nohz_stop(void) { }
#endif /* CONFIG_NO_HZ_COMMON */
-#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
+#ifdef CONFIG_NO_HZ_COMMON
extern void wake_up_nohz_cpu(int cpu);
#else
static inline void wake_up_nohz_cpu(int cpu) { }
diff --git a/include/linux/sched/numa_balancing.h b/include/linux/sched/numa_balancing.h
index 3988762efe15..52b22c5c396d 100644
--- a/include/linux/sched/numa_balancing.h
+++ b/include/linux/sched/numa_balancing.h
@@ -15,13 +15,23 @@
#define TNF_FAULT_LOCAL 0x08
#define TNF_MIGRATE_FAIL 0x10
+enum numa_vmaskip_reason {
+ NUMAB_SKIP_UNSUITABLE,
+ NUMAB_SKIP_SHARED_RO,
+ NUMAB_SKIP_INACCESSIBLE,
+ NUMAB_SKIP_SCAN_DELAY,
+ NUMAB_SKIP_PID_INACTIVE,
+ NUMAB_SKIP_IGNORE_PID,
+ NUMAB_SKIP_SEQ_COMPLETED,
+};
+
#ifdef CONFIG_NUMA_BALANCING
extern void task_numa_fault(int last_node, int node, int pages, int flags);
extern pid_t task_numa_group_id(struct task_struct *p);
extern void set_numabalancing_state(bool enabled);
extern void task_numa_free(struct task_struct *p, bool final);
-extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
- int src_nid, int dst_cpu);
+bool should_numa_migrate_memory(struct task_struct *p, struct folio *folio,
+ int src_nid, int dst_cpu);
#else
static inline void task_numa_fault(int last_node, int node, int pages,
int flags)
@@ -38,7 +48,7 @@ static inline void task_numa_free(struct task_struct *p, bool final)
{
}
static inline bool should_numa_migrate_memory(struct task_struct *p,
- struct page *page, int src_nid, int dst_cpu)
+ struct folio *folio, int src_nid, int dst_cpu)
{
return true;
}
diff --git a/include/linux/sched/posix-timers.h b/include/linux/sched/posix-timers.h
new file mode 100644
index 000000000000..523a381d6c88
--- /dev/null
+++ b/include/linux/sched/posix-timers.h
@@ -0,0 +1 @@
+#include <linux/posix-timers.h>
diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
index ab83d85e1183..6ab43b4f72f9 100644
--- a/include/linux/sched/prio.h
+++ b/include/linux/sched/prio.h
@@ -14,6 +14,7 @@
*/
#define MAX_RT_PRIO 100
+#define MAX_DL_PRIO 0
#define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH)
#define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2)
diff --git a/include/linux/sched/rseq_api.h b/include/linux/sched/rseq_api.h
new file mode 100644
index 000000000000..cf2af72693e1
--- /dev/null
+++ b/include/linux/sched/rseq_api.h
@@ -0,0 +1 @@
+#include <linux/rseq.h>
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
index e5af028c08b4..4e3338103654 100644
--- a/include/linux/sched/rt.h
+++ b/include/linux/sched/rt.h
@@ -6,19 +6,40 @@
struct task_struct;
-static inline int rt_prio(int prio)
+static inline bool rt_prio(int prio)
{
- if (unlikely(prio < MAX_RT_PRIO))
- return 1;
- return 0;
+ return unlikely(prio < MAX_RT_PRIO && prio >= MAX_DL_PRIO);
}
-static inline int rt_task(struct task_struct *p)
+static inline bool rt_or_dl_prio(int prio)
+{
+ return unlikely(prio < MAX_RT_PRIO);
+}
+
+/*
+ * Returns true if a task has a priority that belongs to RT class. PI-boosted
+ * tasks will return true. Use rt_policy() to ignore PI-boosted tasks.
+ */
+static inline bool rt_task(struct task_struct *p)
{
return rt_prio(p->prio);
}
-static inline bool task_is_realtime(struct task_struct *tsk)
+/*
+ * Returns true if a task has a priority that belongs to RT or DL classes.
+ * PI-boosted tasks will return true. Use rt_or_dl_task_policy() to ignore
+ * PI-boosted tasks.
+ */
+static inline bool rt_or_dl_task(struct task_struct *p)
+{
+ return rt_or_dl_prio(p->prio);
+}
+
+/*
+ * Returns true if a task has a policy that belongs to RT or DL classes.
+ * PI-boosted tasks will return false.
+ */
+static inline bool rt_or_dl_task_policy(struct task_struct *tsk)
{
int policy = tsk->policy;
@@ -30,6 +51,10 @@ static inline bool task_is_realtime(struct task_struct *tsk)
}
#ifdef CONFIG_RT_MUTEXES
+extern void rt_mutex_pre_schedule(void);
+extern void rt_mutex_schedule(void);
+extern void rt_mutex_post_schedule(void);
+
/*
* Must hold either p->pi_lock or task_rq(p)->lock.
*/
@@ -39,20 +64,12 @@ static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *p)
}
extern void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task);
extern void rt_mutex_adjust_pi(struct task_struct *p);
-static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
-{
- return tsk->pi_blocked_on != NULL;
-}
#else
static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
{
return NULL;
}
# define rt_mutex_adjust_pi(p) do { } while (0)
-static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
-{
- return false;
-}
#endif
extern void normalize_rt_tasks(void);
diff --git a/include/linux/sched/sd_flags.h b/include/linux/sched/sd_flags.h
index 34b21e971d77..42839cfa2778 100644
--- a/include/linux/sched/sd_flags.h
+++ b/include/linux/sched/sd_flags.h
@@ -91,6 +91,16 @@ SD_FLAG(SD_WAKE_AFFINE, SDF_SHARED_CHILD)
SD_FLAG(SD_ASYM_CPUCAPACITY, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS)
/*
+ * Domain members have different CPU capacities spanning all unique CPU
+ * capacity values.
+ *
+ * SHARED_PARENT: Set from the topmost domain down to the first domain where
+ * all available CPU capacities are visible
+ * NEEDS_GROUPS: Per-CPU capacity is asymmetric between groups.
+ */
+SD_FLAG(SD_ASYM_CPUCAPACITY_FULL, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS)
+
+/*
* Domain members share CPU capacity (i.e. SMT)
*
* SHARED_CHILD: Set from the base domain up until spanned CPUs no longer share
@@ -100,13 +110,20 @@ SD_FLAG(SD_ASYM_CPUCAPACITY, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS)
SD_FLAG(SD_SHARE_CPUCAPACITY, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
/*
- * Domain members share CPU package resources (i.e. caches)
+ * Domain members share CPU cluster (LLC tags or L2 cache)
+ *
+ * NEEDS_GROUPS: Clusters are shared between groups.
+ */
+SD_FLAG(SD_CLUSTER, SDF_NEEDS_GROUPS)
+
+/*
+ * Domain members share CPU Last Level Caches
*
* SHARED_CHILD: Set from the base domain up until spanned CPUs no longer share
* the same cache(s).
* NEEDS_GROUPS: Caches are shared between groups.
*/
-SD_FLAG(SD_SHARE_PKG_RESOURCES, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
+SD_FLAG(SD_SHARE_LLC, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
/*
* Only a single load balancing instance
@@ -122,12 +139,9 @@ SD_FLAG(SD_SERIALIZE, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS)
/*
* Place busy tasks earlier in the domain
*
- * SHARED_CHILD: Usually set on the SMT level. Technically could be set further
- * up, but currently assumed to be set from the base domain
- * upwards (see update_top_cache_domain()).
* NEEDS_GROUPS: Load balancing flag.
*/
-SD_FLAG(SD_ASYM_PACKING, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
+SD_FLAG(SD_ASYM_PACKING, SDF_NEEDS_GROUPS)
/*
* Prefer to place tasks in a sibling domain
@@ -140,14 +154,6 @@ SD_FLAG(SD_ASYM_PACKING, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
SD_FLAG(SD_PREFER_SIBLING, SDF_NEEDS_GROUPS)
/*
- * sched_groups of this level overlap
- *
- * SHARED_PARENT: Set for all NUMA levels above NODE.
- * NEEDS_GROUPS: Overlaps can only exist with more than one group.
- */
-SD_FLAG(SD_OVERLAP, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS)
-
-/*
* Cross-node balancing
*
* SHARED_PARENT: Set for all NUMA levels above NODE.
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 3f6a0fcaa10c..7d6449982822 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -9,6 +9,7 @@
#include <linux/sched/task.h>
#include <linux/cred.h>
#include <linux/refcount.h>
+#include <linux/pid.h>
#include <linux/posix-timers.h>
#include <linux/mm_types.h>
#include <asm/ptrace.h>
@@ -72,6 +73,17 @@ struct multiprocess_signals {
struct hlist_node node;
};
+struct core_thread {
+ struct task_struct *task;
+ struct core_thread *next;
+};
+
+struct core_state {
+ atomic_t nr_threads;
+ struct core_thread dumper;
+ struct completion startup;
+};
+
/*
* NOTE! "signal_struct" does not have its own
* locking, because a shared signal_struct always
@@ -83,6 +95,7 @@ struct signal_struct {
refcount_t sigcnt;
atomic_t live;
int nr_threads;
+ int quick_threads;
struct list_head thread_head;
wait_queue_head_t wait_chldexit; /* for wait4() */
@@ -98,18 +111,16 @@ struct signal_struct {
/* thread group exit support */
int group_exit_code;
- /* overloaded:
- * - notify group_exit_task when ->count is equal to notify_count
- * - everyone except group_exit_task is stopped during signal delivery
- * of fatal signals, group_exit_task processes the signal.
- */
+ /* notify group_exec_task when notify_count is less or equal to 0 */
int notify_count;
- struct task_struct *group_exit_task;
+ struct task_struct *group_exec_task;
/* thread group stop support, overloads group_exit_code too */
int group_stop_count;
unsigned int flags; /* see SIGNAL_* flags below */
+ struct core_state *core_state; /* coredumping support */
+
/*
* PR_SET_CHILD_SUBREAPER marks a process, like a service
* manager, to re-parent orphan (double-forking) child processes
@@ -125,8 +136,10 @@ struct signal_struct {
#ifdef CONFIG_POSIX_TIMERS
/* POSIX.1b Interval Timers */
- int posix_timer_id;
- struct list_head posix_timers;
+ unsigned int timer_create_restore_ids:1;
+ atomic_t next_posix_timer_id;
+ struct hlist_head posix_timers;
+ struct hlist_head ignored_posix_timers;
/* ITIMER_REAL timer for the process */
struct hrtimer real_timer;
@@ -213,6 +226,10 @@ struct signal_struct {
struct tty_audit_buf *tty_audit_buf;
#endif
+#ifdef CONFIG_CGROUPS
+ struct rw_semaphore cgroup_threadgroup_rwsem;
+#endif
+
/*
* Thread is the potential origin of an oom condition; kill first on
* oom
@@ -243,7 +260,6 @@ struct signal_struct {
#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */
#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */
#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */
-#define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */
/*
* Pending notifications to parent.
*/
@@ -259,31 +275,24 @@ struct signal_struct {
static inline void signal_set_stop_flags(struct signal_struct *sig,
unsigned int flags)
{
- WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
+ WARN_ON(sig->flags & SIGNAL_GROUP_EXIT);
sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
}
-/* If true, all threads except ->group_exit_task have pending SIGKILL */
-static inline int signal_group_exit(const struct signal_struct *sig)
-{
- return (sig->flags & SIGNAL_GROUP_EXIT) ||
- (sig->group_exit_task != NULL);
-}
-
extern void flush_signals(struct task_struct *);
extern void ignore_signals(struct task_struct *);
extern void flush_signal_handlers(struct task_struct *, int force_default);
-extern int dequeue_signal(struct task_struct *task,
- sigset_t *mask, kernel_siginfo_t *info);
+extern int dequeue_signal(sigset_t *mask, kernel_siginfo_t *info, enum pid_type *type);
static inline int kernel_dequeue_signal(void)
{
struct task_struct *task = current;
kernel_siginfo_t __info;
+ enum pid_type __type;
int ret;
spin_lock_irq(&task->sighand->siglock);
- ret = dequeue_signal(task, &task->blocked, &__info);
+ ret = dequeue_signal(&task->blocked, &__info, &__type);
spin_unlock_irq(&task->sighand->siglock);
return ret;
@@ -292,42 +301,32 @@ static inline int kernel_dequeue_signal(void)
static inline void kernel_signal_stop(void)
{
spin_lock_irq(&current->sighand->siglock);
- if (current->jobctl & JOBCTL_STOP_DEQUEUED)
+ if (current->jobctl & JOBCTL_STOP_DEQUEUED) {
+ current->jobctl |= JOBCTL_STOPPED;
set_special_state(TASK_STOPPED);
+ }
spin_unlock_irq(&current->sighand->siglock);
schedule();
}
-#ifdef __ARCH_SI_TRAPNO
-# define ___ARCH_SI_TRAPNO(_a1) , _a1
-#else
-# define ___ARCH_SI_TRAPNO(_a1)
-#endif
-#ifdef __ia64__
-# define ___ARCH_SI_IA64(_a1, _a2, _a3) , _a1, _a2, _a3
-#else
-# define ___ARCH_SI_IA64(_a1, _a2, _a3)
-#endif
-int force_sig_fault_to_task(int sig, int code, void __user *addr
- ___ARCH_SI_TRAPNO(int trapno)
- ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
- , struct task_struct *t);
-int force_sig_fault(int sig, int code, void __user *addr
- ___ARCH_SI_TRAPNO(int trapno)
- ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr));
-int send_sig_fault(int sig, int code, void __user *addr
- ___ARCH_SI_TRAPNO(int trapno)
- ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
- , struct task_struct *t);
+int force_sig_fault_to_task(int sig, int code, void __user *addr,
+ struct task_struct *t);
+int force_sig_fault(int sig, int code, void __user *addr);
+int send_sig_fault(int sig, int code, void __user *addr, struct task_struct *t);
int force_sig_mceerr(int code, void __user *, short);
int send_sig_mceerr(int code, void __user *, short, struct task_struct *);
int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper);
int force_sig_pkuerr(void __user *addr, u32 pkey);
+int send_sig_perf(void __user *addr, u32 type, u64 sig_data);
int force_sig_ptrace_errno_trap(int errno, void __user *addr);
+int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno);
+int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
+ struct task_struct *t);
+int force_sig_seccomp(int syscall, int reason, bool force_coredump);
extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *);
extern void force_sigsegv(int sig);
@@ -341,13 +340,38 @@ extern int kill_pid(struct pid *pid, int sig, int priv);
extern __must_check bool do_notify_parent(struct task_struct *, int);
extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
extern void force_sig(int);
+extern void force_fatal_sig(int);
+extern void force_exit_sig(int);
extern int send_sig(int, struct task_struct *, int);
extern int zap_other_threads(struct task_struct *p);
-extern struct sigqueue *sigqueue_alloc(void);
-extern void sigqueue_free(struct sigqueue *);
-extern int send_sigqueue(struct sigqueue *, struct pid *, enum pid_type);
extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
+static inline void clear_notify_signal(void)
+{
+ clear_thread_flag(TIF_NOTIFY_SIGNAL);
+ smp_mb__after_atomic();
+}
+
+/*
+ * Returns 'true' if kick_process() is needed to force a transition from
+ * user -> kernel to guarantee expedient run of TWA_SIGNAL based task_work.
+ */
+static inline bool __set_notify_signal(struct task_struct *task)
+{
+ return !test_and_set_tsk_thread_flag(task, TIF_NOTIFY_SIGNAL) &&
+ !wake_up_state(task, TASK_INTERRUPTIBLE);
+}
+
+/*
+ * Called to break out of interruptible wait loops, and enter the
+ * exit_to_user_mode_loop().
+ */
+static inline void set_notify_signal(struct task_struct *task)
+{
+ if (__set_notify_signal(task))
+ kick_process(task);
+}
+
static inline int restart_syscall(void)
{
set_tsk_thread_flag(current, TIF_SIGPENDING);
@@ -381,7 +405,7 @@ static inline int fatal_signal_pending(struct task_struct *p)
return task_sigpending(p) && __fatal_signal_pending(p);
}
-static inline int signal_pending_state(long state, struct task_struct *p)
+static inline int signal_pending_state(unsigned int state, struct task_struct *p)
{
if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
return 0;
@@ -411,19 +435,28 @@ static inline bool fault_signal_pending(vm_fault_t fault_flags,
* This is required every time the blocked sigset_t changes.
* callers must hold sighand->siglock.
*/
-extern void recalc_sigpending_and_wake(struct task_struct *t);
extern void recalc_sigpending(void);
extern void calculate_sigpending(void);
extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
-static inline void signal_wake_up(struct task_struct *t, bool resume)
+static inline void signal_wake_up(struct task_struct *t, bool fatal)
{
- signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
+ unsigned int state = 0;
+ if (fatal && !(t->jobctl & JOBCTL_PTRACE_FROZEN)) {
+ t->jobctl &= ~(JOBCTL_STOPPED | JOBCTL_TRACED);
+ state = TASK_WAKEKILL | __TASK_TRACED;
+ }
+ signal_wake_up_state(t, state);
}
static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
{
- signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
+ unsigned int state = 0;
+ if (resume) {
+ t->jobctl &= ~JOBCTL_TRACED;
+ state = __TASK_TRACED;
+ }
+ signal_wake_up_state(t, state);
}
void task_join_group_stop(struct task_struct *task);
@@ -537,6 +570,17 @@ static inline int kill_cad_pid(int sig, int priv)
#define SEND_SIG_NOINFO ((struct kernel_siginfo *) 0)
#define SEND_SIG_PRIV ((struct kernel_siginfo *) 1)
+static inline int __on_sig_stack(unsigned long sp)
+{
+#ifdef CONFIG_STACK_GROWSUP
+ return sp >= current->sas_ss_sp &&
+ sp - current->sas_ss_sp < current->sas_ss_size;
+#else
+ return sp > current->sas_ss_sp &&
+ sp - current->sas_ss_sp <= current->sas_ss_size;
+#endif
+}
+
/*
* True if we are on the alternate signal stack.
*/
@@ -554,13 +598,7 @@ static inline int on_sig_stack(unsigned long sp)
if (current->sas_ss_flags & SS_AUTODISARM)
return 0;
-#ifdef CONFIG_STACK_GROWSUP
- return sp >= current->sas_ss_sp &&
- sp - current->sas_ss_sp < current->sas_ss_size;
-#else
- return sp > current->sas_ss_sp &&
- sp - current->sas_ss_sp <= current->sas_ss_size;
-#endif
+ return __on_sig_stack(sp);
}
static inline int sas_ss_flags(unsigned long sp)
@@ -604,17 +642,18 @@ extern void flush_itimer_signals(void);
extern bool current_is_single_threaded(void);
/*
- * Careful: do_each_thread/while_each_thread is a double loop so
- * 'break' will not work as expected - use goto instead.
+ * Without tasklist/siglock it is only rcu-safe if g can't exit/exec,
+ * otherwise next_thread(t) will never reach g after list_del_rcu(g).
*/
-#define do_each_thread(g, t) \
- for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
-
#define while_each_thread(g, t) \
while ((t = next_thread(t)) != g)
+#define for_other_threads(p, t) \
+ for (t = p; (t = next_thread(t)) != p; )
+
#define __for_each_thread(signal, t) \
- list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
+ list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node, \
+ lockdep_is_held(&tasklist_lock))
#define for_each_thread(p, t) \
__for_each_thread((p)->signal, t)
@@ -673,22 +712,31 @@ bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
return p1->signal == p2->signal;
}
-static inline struct task_struct *next_thread(const struct task_struct *p)
+/*
+ * returns NULL if p is the last thread in the thread group
+ */
+static inline struct task_struct *__next_thread(struct task_struct *p)
+{
+ return list_next_or_null_rcu(&p->signal->thread_head,
+ &p->thread_node,
+ struct task_struct,
+ thread_node);
+}
+
+static inline struct task_struct *next_thread(struct task_struct *p)
{
- return list_entry_rcu(p->thread_group.next,
- struct task_struct, thread_group);
+ return __next_thread(p) ?: p->group_leader;
}
static inline int thread_group_empty(struct task_struct *p)
{
- return list_empty(&p->thread_group);
+ return thread_group_leader(p) &&
+ list_is_last(&p->thread_node, &p->signal->thread_head);
}
#define delay_group_leader(p) \
(thread_group_leader(p) && !thread_group_empty(p))
-extern bool thread_group_exited(struct pid *pid);
-
extern struct sighand_struct *__lock_task_sighand(struct task_struct *task,
unsigned long *flags);
@@ -708,6 +756,12 @@ static inline void unlock_task_sighand(struct task_struct *task,
spin_unlock_irqrestore(&task->sighand->siglock, *flags);
}
+#ifdef CONFIG_LOCKDEP
+extern void lockdep_assert_task_sighand_held(struct task_struct *task);
+#else
+static inline void lockdep_assert_task_sighand_held(struct task_struct *task) { }
+#endif
+
static inline unsigned long task_rlimit(const struct task_struct *task,
unsigned int limit)
{
diff --git a/include/linux/sched/smt.h b/include/linux/sched/smt.h
index 59d3736c454c..166b19af956f 100644
--- a/include/linux/sched/smt.h
+++ b/include/linux/sched/smt.h
@@ -12,9 +12,9 @@ static __always_inline bool sched_smt_active(void)
return static_branch_likely(&sched_smt_present);
}
#else
-static inline bool sched_smt_active(void) { return false; }
+static __always_inline bool sched_smt_active(void) { return false; }
#endif
void arch_smt_update(void);
-#endif
+#endif /* _LINUX_SCHED_SMT_H */
diff --git a/include/linux/sched/stat.h b/include/linux/sched/stat.h
index 568286411b43..0108a38bb64d 100644
--- a/include/linux/sched/stat.h
+++ b/include/linux/sched/stat.h
@@ -3,6 +3,7 @@
#define _LINUX_SCHED_STAT_H
#include <linux/percpu.h>
+#include <linux/kconfig.h>
/*
* Various counters maintained by the scheduler and fork(),
@@ -16,21 +17,14 @@ extern unsigned long total_forks;
extern int nr_threads;
DECLARE_PER_CPU(unsigned long, process_counts);
extern int nr_processes(void);
-extern unsigned long nr_running(void);
+extern unsigned int nr_running(void);
extern bool single_task_running(void);
-extern unsigned long nr_iowait(void);
-extern unsigned long nr_iowait_cpu(int cpu);
+extern unsigned int nr_iowait(void);
+extern unsigned int nr_iowait_cpu(int cpu);
static inline int sched_info_on(void)
{
-#ifdef CONFIG_SCHEDSTATS
- return 1;
-#elif defined(CONFIG_TASK_DELAY_ACCT)
- extern int delayacct_on;
- return delayacct_on;
-#else
- return 0;
-#endif
+ return IS_ENABLED(CONFIG_SCHED_INFO);
}
#ifdef CONFIG_SCHEDSTATS
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index db2c0f34aaaf..5a64582b086b 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -4,99 +4,29 @@
#include <linux/types.h>
-struct ctl_table;
-
#ifdef CONFIG_DETECT_HUNG_TASK
-
-#ifdef CONFIG_SMP
-extern unsigned int sysctl_hung_task_all_cpu_backtrace;
-#else
-#define sysctl_hung_task_all_cpu_backtrace 0
-#endif /* CONFIG_SMP */
-
-extern int sysctl_hung_task_check_count;
-extern unsigned int sysctl_hung_task_panic;
+/* used for hung_task and block/ */
extern unsigned long sysctl_hung_task_timeout_secs;
-extern unsigned long sysctl_hung_task_check_interval_secs;
-extern int sysctl_hung_task_warnings;
-int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
#else
/* Avoid need for ifdefs elsewhere in the code */
enum { sysctl_hung_task_timeout_secs = 0 };
#endif
-extern unsigned int sysctl_sched_child_runs_first;
-
-extern unsigned int sysctl_sched_latency;
-extern unsigned int sysctl_sched_min_granularity;
-extern unsigned int sysctl_sched_wakeup_granularity;
-
enum sched_tunable_scaling {
SCHED_TUNABLESCALING_NONE,
SCHED_TUNABLESCALING_LOG,
SCHED_TUNABLESCALING_LINEAR,
SCHED_TUNABLESCALING_END,
};
-extern unsigned int sysctl_sched_tunable_scaling;
-
-extern unsigned int sysctl_numa_balancing_scan_delay;
-extern unsigned int sysctl_numa_balancing_scan_period_min;
-extern unsigned int sysctl_numa_balancing_scan_period_max;
-extern unsigned int sysctl_numa_balancing_scan_size;
-
-#ifdef CONFIG_SCHED_DEBUG
-extern __read_mostly unsigned int sysctl_sched_migration_cost;
-extern __read_mostly unsigned int sysctl_sched_nr_migrate;
-
-extern int sysctl_resched_latency_warn_ms;
-extern int sysctl_resched_latency_warn_once;
-#endif
-/*
- * control realtime throttling:
- *
- * /proc/sys/kernel/sched_rt_period_us
- * /proc/sys/kernel/sched_rt_runtime_us
- */
-extern unsigned int sysctl_sched_rt_period;
-extern int sysctl_sched_rt_runtime;
+#define NUMA_BALANCING_DISABLED 0x0
+#define NUMA_BALANCING_NORMAL 0x1
+#define NUMA_BALANCING_MEMORY_TIERING 0x2
-extern unsigned int sysctl_sched_dl_period_max;
-extern unsigned int sysctl_sched_dl_period_min;
-
-#ifdef CONFIG_UCLAMP_TASK
-extern unsigned int sysctl_sched_uclamp_util_min;
-extern unsigned int sysctl_sched_uclamp_util_max;
-extern unsigned int sysctl_sched_uclamp_util_min_rt_default;
-#endif
-
-#ifdef CONFIG_CFS_BANDWIDTH
-extern unsigned int sysctl_sched_cfs_bandwidth_slice;
-#endif
-
-#ifdef CONFIG_SCHED_AUTOGROUP
-extern unsigned int sysctl_sched_autogroup_enabled;
-#endif
-
-extern int sysctl_sched_rr_timeslice;
-extern int sched_rr_timeslice;
-
-int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
- size_t *lenp, loff_t *ppos);
-int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
- size_t *lenp, loff_t *ppos);
-int sysctl_sched_uclamp_handler(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
-int sysctl_numa_balancing(struct ctl_table *table, int write, void *buffer,
- size_t *lenp, loff_t *ppos);
-int sysctl_schedstats(struct ctl_table *table, int write, void *buffer,
- size_t *lenp, loff_t *ppos);
-
-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
-extern unsigned int sysctl_sched_energy_aware;
-int sched_energy_aware_handler(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
+#ifdef CONFIG_NUMA_BALANCING
+extern int sysctl_numa_balancing_mode;
+#else
+#define sysctl_numa_balancing_mode 0
#endif
#endif /* _LINUX_SCHED_SYSCTL_H */
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index ef02be869cf2..525aa2a632b2 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -7,6 +7,8 @@
* functionality:
*/
+#include <linux/rcupdate.h>
+#include <linux/refcount.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
@@ -23,7 +25,12 @@ struct kernel_clone_args {
int __user *pidfd;
int __user *child_tid;
int __user *parent_tid;
+ const char *name;
int exit_signal;
+ u32 kthread:1;
+ u32 io_thread:1;
+ u32 user_worker:1;
+ u32 no_files:1;
unsigned long stack;
unsigned long stack_size;
unsigned long tls;
@@ -31,9 +38,12 @@ struct kernel_clone_args {
/* Number of elements in *set_tid */
size_t set_tid_size;
int cgroup;
- int io_thread;
+ int idle;
+ int (*fn)(void *);
+ void *fn_arg;
struct cgroup *cgrp;
struct css_set *cset;
+ unsigned int kill_seq;
};
/*
@@ -53,20 +63,23 @@ extern int lockdep_tasklist_lock_is_held(void);
extern asmlinkage void schedule_tail(struct task_struct *prev);
extern void init_idle(struct task_struct *idle, int cpu);
-extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
+extern int sched_fork(u64 clone_flags, struct task_struct *p);
+extern int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs);
+extern void sched_cancel_fork(struct task_struct *p);
extern void sched_post_fork(struct task_struct *p);
extern void sched_dead(struct task_struct *p);
void __noreturn do_task_dead(void);
+void __noreturn make_task_dead(int signr);
+extern void mm_cache_init(void);
extern void proc_caches_init(void);
extern void fork_init(void);
extern void release_task(struct task_struct * p);
-extern int copy_thread(unsigned long, unsigned long, unsigned long,
- struct task_struct *, unsigned long);
+extern int copy_thread(struct task_struct *, const struct kernel_clone_args *);
extern void flush_thread(void);
@@ -77,27 +90,26 @@ static inline void exit_thread(struct task_struct *tsk)
{
}
#endif
-extern void do_group_exit(int);
+extern __noreturn void do_group_exit(int);
extern void exit_files(struct task_struct *);
-extern void exit_itimers(struct signal_struct *);
+extern void exit_itimers(struct task_struct *);
extern pid_t kernel_clone(struct kernel_clone_args *kargs);
+struct task_struct *copy_process(struct pid *pid, int trace, int node,
+ struct kernel_clone_args *args);
struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node);
struct task_struct *fork_idle(int);
-struct mm_struct *copy_init_mm(void);
-extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
+extern pid_t kernel_thread(int (*fn)(void *), void *arg, const char *name,
+ unsigned long flags);
+extern pid_t user_mode_thread(int (*fn)(void *), void *arg, unsigned long flags);
extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
int kernel_wait(pid_t pid, int *stat);
extern void free_task(struct task_struct *tsk);
/* sched_exec is called by processes performing an exec */
-#ifdef CONFIG_SMP
extern void sched_exec(void);
-#else
-#define sched_exec() {}
-#endif
static inline struct task_struct *get_task_struct(struct task_struct *t)
{
@@ -105,14 +117,50 @@ static inline struct task_struct *get_task_struct(struct task_struct *t)
return t;
}
+static inline struct task_struct *tryget_task_struct(struct task_struct *t)
+{
+ return refcount_inc_not_zero(&t->usage) ? t : NULL;
+}
+
extern void __put_task_struct(struct task_struct *t);
+extern void __put_task_struct_rcu_cb(struct rcu_head *rhp);
static inline void put_task_struct(struct task_struct *t)
{
- if (refcount_dec_and_test(&t->usage))
- __put_task_struct(t);
+ if (!refcount_dec_and_test(&t->usage))
+ return;
+
+ /*
+ * Under PREEMPT_RT, we can't call __put_task_struct
+ * in atomic context because it will indirectly
+ * acquire sleeping locks. The same is true if the
+ * current process has a mutex enqueued (blocked on
+ * a PI chain).
+ *
+ * In !RT, it is always safe to call __put_task_struct().
+ * Though, in order to simplify the code, resort to the
+ * deferred call too.
+ *
+ * call_rcu() will schedule __put_task_struct_rcu_cb()
+ * to be called in process context.
+ *
+ * __put_task_struct() is called when
+ * refcount_dec_and_test(&t->usage) succeeds.
+ *
+ * This means that it can't "conflict" with
+ * put_task_struct_rcu_user() which abuses ->rcu the same
+ * way; rcu_users has a reference so task->usage can't be
+ * zero after rcu_users 1 -> 0 transition.
+ *
+ * delayed_free_task() also uses ->rcu, but it is only called
+ * when it fails to fork a process. Therefore, there is no
+ * way it can conflict with __put_task_struct().
+ */
+ call_rcu(&t->rcu, __put_task_struct_rcu_cb);
}
+DEFINE_FREE(put_task, struct task_struct *, if (_T) put_task_struct(_T))
+
static inline void put_task_struct_many(struct task_struct *t, int nr)
{
if (refcount_sub_and_test(nr, &t->usage))
@@ -121,6 +169,9 @@ static inline void put_task_struct_many(struct task_struct *t, int nr)
void put_task_struct_rcu_user(struct task_struct *task);
+/* Free all architecture-specific resources held by a thread. */
+void release_thread(struct task_struct *dead_task);
+
#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
extern int arch_task_struct_size __read_mostly;
#else
@@ -157,11 +208,10 @@ static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
* Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
* subscriptions and synchronises with wait4(). Also used in procfs. Also
* pins the final release of task.io_context. Also protects ->cpuset and
- * ->cgroup.subsys[]. And ->vfork_done.
+ * ->cgroup.subsys[]. And ->vfork_done. And ->sysvshm.shm_clist.
*
- * Nests both inside and outside of read_lock(&tasklist_lock).
- * It must not be nested with write_lock_irq(&tasklist_lock),
- * neither inside nor outside.
+ * Nests inside of read_lock(&tasklist_lock). It must not be nested with
+ * write_lock_irq(&tasklist_lock), neither inside nor outside.
*/
static inline void task_lock(struct task_struct *p)
{
@@ -173,4 +223,6 @@ static inline void task_unlock(struct task_struct *p)
spin_unlock(&p->alloc_lock);
}
+DEFINE_GUARD(task_lock, struct task_struct *, task_lock(_T), task_unlock(_T))
+
#endif /* _LINUX_SCHED_TASK_H */
diff --git a/include/linux/sched/task_flags.h b/include/linux/sched/task_flags.h
new file mode 100644
index 000000000000..227f5be81bcd
--- /dev/null
+++ b/include/linux/sched/task_flags.h
@@ -0,0 +1 @@
+#include <linux/sched.h>
diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h
index 2413427e439c..1fab7e9043a3 100644
--- a/include/linux/sched/task_stack.h
+++ b/include/linux/sched/task_stack.h
@@ -8,6 +8,8 @@
#include <linux/sched.h>
#include <linux/magic.h>
+#include <linux/refcount.h>
+#include <linux/kasan.h>
#ifdef CONFIG_THREAD_INFO_IN_TASK
@@ -16,19 +18,23 @@
* try_get_task_stack() instead. task_stack_page will return a pointer
* that could get freed out from under you.
*/
-static inline void *task_stack_page(const struct task_struct *task)
+static __always_inline void *task_stack_page(const struct task_struct *task)
{
return task->stack;
}
#define setup_thread_stack(new,old) do { } while(0)
-static inline unsigned long *end_of_stack(const struct task_struct *task)
+static __always_inline unsigned long *end_of_stack(const struct task_struct *task)
{
+#ifdef CONFIG_STACK_GROWSUP
+ return (unsigned long *)((unsigned long)task->stack + THREAD_SIZE) - 1;
+#else
return task->stack;
+#endif
}
-#elif !defined(__HAVE_THREAD_FUNCTIONS)
+#else
#define task_stack_page(task) ((void *)(task)->stack)
@@ -47,7 +53,7 @@ static inline void setup_thread_stack(struct task_struct *p, struct task_struct
* When the stack grows up, this is the highest address.
* Beyond that position, we corrupt data on the next page.
*/
-static inline unsigned long *end_of_stack(struct task_struct *p)
+static inline unsigned long *end_of_stack(const struct task_struct *p)
{
#ifdef CONFIG_STACK_GROWSUP
return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
@@ -75,6 +81,8 @@ static inline void *try_get_task_stack(struct task_struct *tsk)
static inline void put_task_stack(struct task_struct *tsk) {}
#endif
+void exit_task_stack_account(struct task_struct *tsk);
+
#define task_stack_end_corrupted(task) \
(*(end_of_stack(task)) != STACK_END_MAGIC)
@@ -82,34 +90,22 @@ static inline int object_is_on_stack(const void *obj)
{
void *stack = task_stack_page(current);
+ obj = kasan_reset_tag(obj);
return (obj >= stack) && (obj < (stack + THREAD_SIZE));
}
extern void thread_stack_cache_init(void);
#ifdef CONFIG_DEBUG_STACK_USAGE
+unsigned long stack_not_used(struct task_struct *p);
+#else
static inline unsigned long stack_not_used(struct task_struct *p)
{
- unsigned long *n = end_of_stack(p);
-
- do { /* Skip over canary */
-# ifdef CONFIG_STACK_GROWSUP
- n--;
-# else
- n++;
-# endif
- } while (!*n);
-
-# ifdef CONFIG_STACK_GROWSUP
- return (unsigned long)end_of_stack(p) - (unsigned long)n;
-# else
- return (unsigned long)n - (unsigned long)end_of_stack(p);
-# endif
+ return 0;
}
#endif
extern void set_task_stack_end_magic(struct task_struct *tsk);
-#ifndef __HAVE_ARCH_KSTACK_END
static inline int kstack_end(void *addr)
{
/* Reliable end of stack detection:
@@ -117,6 +113,5 @@ static inline int kstack_end(void *addr)
*/
return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
}
-#endif
#endif /* _LINUX_SCHED_TASK_STACK_H */
diff --git a/include/linux/sched/thread_info_api.h b/include/linux/sched/thread_info_api.h
new file mode 100644
index 000000000000..2c60fbc16c08
--- /dev/null
+++ b/include/linux/sched/thread_info_api.h
@@ -0,0 +1 @@
+#include <linux/thread_info.h>
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 8f0f778b7c91..45c0022b91ce 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -9,7 +9,6 @@
/*
* sched-domains (multiprocessor balancing) declarations:
*/
-#ifdef CONFIG_SMP
/* Generate SD flag indexes */
#define SD_FLAG(name, mflags) __##name,
@@ -25,37 +24,31 @@ enum {
};
#undef SD_FLAG
-#ifdef CONFIG_SCHED_DEBUG
-
struct sd_flag_debug {
unsigned int meta_flags;
char *name;
};
extern const struct sd_flag_debug sd_flag_debug[];
-#endif
+struct sched_domain_topology_level;
#ifdef CONFIG_SCHED_SMT
-static inline int cpu_smt_flags(void)
-{
- return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
-}
+extern int cpu_smt_flags(void);
+extern const struct cpumask *tl_smt_mask(struct sched_domain_topology_level *tl, int cpu);
#endif
-#ifdef CONFIG_SCHED_MC
-static inline int cpu_core_flags(void)
-{
- return SD_SHARE_PKG_RESOURCES;
-}
+#ifdef CONFIG_SCHED_CLUSTER
+extern int cpu_cluster_flags(void);
+extern const struct cpumask *tl_cls_mask(struct sched_domain_topology_level *tl, int cpu);
#endif
-#ifdef CONFIG_NUMA
-static inline int cpu_numa_flags(void)
-{
- return SD_NUMA;
-}
+#ifdef CONFIG_SCHED_MC
+extern int cpu_core_flags(void);
+extern const struct cpumask *tl_mc_mask(struct sched_domain_topology_level *tl, int cpu);
#endif
+extern const struct cpumask *tl_pkg_mask(struct sched_domain_topology_level *tl, int cpu);
+
extern int arch_asym_cpu_priority(int cpu);
struct sched_domain_attr {
@@ -74,6 +67,7 @@ struct sched_domain_shared {
atomic_t ref;
atomic_t nr_busy_cpus;
int has_idle_cores;
+ int nr_idle_scan;
};
struct sched_domain {
@@ -86,6 +80,7 @@ struct sched_domain {
unsigned int busy_factor; /* less balancing by factor if busy */
unsigned int imbalance_pct; /* No balance until over watermark */
unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */
+ unsigned int imb_numa_nr; /* Nr running tasks that allows a NUMA imbalance */
int nohz_idle; /* NOHZ IDLE status */
int flags; /* See SD_* */
@@ -97,17 +92,21 @@ struct sched_domain {
unsigned int nr_balance_failed; /* initialise to 0 */
/* idle_balance() stats */
+ unsigned int newidle_call;
+ unsigned int newidle_success;
+ unsigned int newidle_ratio;
u64 max_newidle_lb_cost;
- unsigned long next_decay_max_lb_cost;
-
- u64 avg_scan_cost; /* select_idle_sibling */
+ unsigned long last_decay_max_lb_cost;
#ifdef CONFIG_SCHEDSTATS
- /* load_balance() stats */
+ /* sched_balance_rq() stats */
unsigned int lb_count[CPU_MAX_IDLE_TYPES];
unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
- unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
+ unsigned int lb_imbalance_load[CPU_MAX_IDLE_TYPES];
+ unsigned int lb_imbalance_util[CPU_MAX_IDLE_TYPES];
+ unsigned int lb_imbalance_task[CPU_MAX_IDLE_TYPES];
+ unsigned int lb_imbalance_misfit[CPU_MAX_IDLE_TYPES];
unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
@@ -133,9 +132,7 @@ struct sched_domain {
unsigned int ttwu_move_affine;
unsigned int ttwu_move_balance;
#endif
-#ifdef CONFIG_SCHED_DEBUG
char *name;
-#endif
union {
void *private; /* used during construction */
struct rcu_head rcu; /* used during destruction */
@@ -158,10 +155,6 @@ static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
return to_cpumask(sd->span);
}
-extern void partition_sched_domains_locked(int ndoms_new,
- cpumask_var_t doms_new[],
- struct sched_domain_attr *dattr_new);
-
extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
struct sched_domain_attr *dattr_new);
@@ -169,13 +162,13 @@ extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
+bool cpus_equal_capacity(int this_cpu, int that_cpu);
bool cpus_share_cache(int this_cpu, int that_cpu);
+bool cpus_share_resources(int this_cpu, int that_cpu);
-typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
+typedef const struct cpumask *(*sched_domain_mask_f)(struct sched_domain_topology_level *tl, int cpu);
typedef int (*sched_domain_flags_f)(void);
-#define SDTL_OVERLAP 0x01
-
struct sd_data {
struct sched_domain *__percpu *sd;
struct sched_domain_shared *__percpu *sds;
@@ -186,44 +179,16 @@ struct sd_data {
struct sched_domain_topology_level {
sched_domain_mask_f mask;
sched_domain_flags_f sd_flags;
- int flags;
int numa_level;
struct sd_data data;
-#ifdef CONFIG_SCHED_DEBUG
char *name;
-#endif
};
-extern void set_sched_topology(struct sched_domain_topology_level *tl);
-
-#ifdef CONFIG_SCHED_DEBUG
-# define SD_INIT_NAME(type) .name = #type
-#else
-# define SD_INIT_NAME(type)
-#endif
-
-#else /* CONFIG_SMP */
-
-struct sched_domain_attr;
-
-static inline void
-partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],
- struct sched_domain_attr *dattr_new)
-{
-}
-
-static inline void
-partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
- struct sched_domain_attr *dattr_new)
-{
-}
-
-static inline bool cpus_share_cache(int this_cpu, int that_cpu)
-{
- return true;
-}
+extern void __init set_sched_topology(struct sched_domain_topology_level *tl);
+extern void sched_update_asym_prefer_cpu(int cpu, int old_prio, int new_prio);
-#endif /* !CONFIG_SMP */
+#define SDTL_INIT(maskfn, flagsfn, dname) ((struct sched_domain_topology_level) \
+ { .mask = maskfn, .sd_flags = flagsfn, .name = #dname })
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
extern void rebuild_sched_domains_energy(void);
@@ -251,21 +216,29 @@ unsigned long arch_scale_cpu_capacity(int cpu)
}
#endif
-#ifndef arch_scale_thermal_pressure
+#ifndef arch_scale_hw_pressure
static __always_inline
-unsigned long arch_scale_thermal_pressure(int cpu)
+unsigned long arch_scale_hw_pressure(int cpu)
{
return 0;
}
#endif
-#ifndef arch_set_thermal_pressure
+#ifndef arch_update_hw_pressure
static __always_inline
-void arch_set_thermal_pressure(const struct cpumask *cpus,
- unsigned long th_pressure)
+void arch_update_hw_pressure(const struct cpumask *cpus,
+ unsigned long capped_frequency)
{ }
#endif
+#ifndef arch_scale_freq_ref
+static __always_inline
+unsigned int arch_scale_freq_ref(int cpu)
+{
+ return 0;
+}
+#endif
+
static inline int task_node(const struct task_struct *p)
{
return cpu_to_node(task_cpu(p));
diff --git a/include/linux/sched/types.h b/include/linux/sched/types.h
index 3c3e049224ae..969aaf5ef9d6 100644
--- a/include/linux/sched/types.h
+++ b/include/linux/sched/types.h
@@ -20,4 +20,4 @@ struct task_cputime {
unsigned long long sum_exec_runtime;
};
-#endif
+#endif /* _LINUX_SCHED_TYPES_H */
diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h
index 3632c5d6ec55..4cc52698e214 100644
--- a/include/linux/sched/user.h
+++ b/include/linux/sched/user.h
@@ -4,6 +4,7 @@
#include <linux/uidgid.h>
#include <linux/atomic.h>
+#include <linux/percpu_counter.h>
#include <linux/refcount.h>
#include <linux/ratelimit.h>
@@ -12,16 +13,9 @@
*/
struct user_struct {
refcount_t __count; /* reference count */
- atomic_t processes; /* How many processes does this user have? */
- atomic_t sigpending; /* How many pending signals does this user have? */
#ifdef CONFIG_EPOLL
- atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
+ struct percpu_counter epoll_watches; /* The number of file descriptors currently watched */
#endif
-#ifdef CONFIG_POSIX_MQUEUE
- /* protected by mq_lock */
- unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
-#endif
- unsigned long locked_shm; /* How many pages of mlocked shm ? */
unsigned long unix_inflight; /* How many files in flight in unix sockets */
atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */
@@ -30,7 +24,8 @@ struct user_struct {
kuid_t uid;
#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL) || \
- defined(CONFIG_NET) || defined(CONFIG_IO_URING)
+ defined(CONFIG_NET) || defined(CONFIG_IO_URING) || \
+ defined(CONFIG_VFIO_PCI_ZDEV_KVM) || IS_ENABLED(CONFIG_IOMMUFD)
atomic_long_t locked_vm;
#endif
#ifdef CONFIG_WATCH_QUEUE
diff --git a/include/linux/sched/vhost_task.h b/include/linux/sched/vhost_task.h
new file mode 100644
index 000000000000..25446c5d3508
--- /dev/null
+++ b/include/linux/sched/vhost_task.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SCHED_VHOST_TASK_H
+#define _LINUX_SCHED_VHOST_TASK_H
+
+struct vhost_task;
+
+struct vhost_task *vhost_task_create(bool (*fn)(void *),
+ void (*handle_kill)(void *), void *arg,
+ const char *name);
+void vhost_task_start(struct vhost_task *vtsk);
+void vhost_task_stop(struct vhost_task *vtsk);
+void vhost_task_wake(struct vhost_task *vtsk);
+
+#endif /* _LINUX_SCHED_VHOST_TASK_H */
diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h
index 26a2013ac39c..0f28b4623ad4 100644
--- a/include/linux/sched/wake_q.h
+++ b/include/linux/sched/wake_q.h
@@ -42,8 +42,11 @@ struct wake_q_head {
#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
-#define DEFINE_WAKE_Q(name) \
- struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
+#define WAKE_Q_HEAD_INITIALIZER(name) \
+ { WAKE_Q_TAIL, &name.first }
+
+#define DEFINE_WAKE_Q(name) \
+ struct wake_q_head name = WAKE_Q_HEAD_INITIALIZER(name)
static inline void wake_q_init(struct wake_q_head *head)
{
@@ -60,4 +63,38 @@ extern void wake_q_add(struct wake_q_head *head, struct task_struct *task);
extern void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task);
extern void wake_up_q(struct wake_q_head *head);
+/* Spin unlock helpers to unlock and call wake_up_q with preempt disabled */
+static inline
+void raw_spin_unlock_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q)
+{
+ guard(preempt)();
+ raw_spin_unlock(lock);
+ if (wake_q) {
+ wake_up_q(wake_q);
+ wake_q_init(wake_q);
+ }
+}
+
+static inline
+void raw_spin_unlock_irq_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q)
+{
+ guard(preempt)();
+ raw_spin_unlock_irq(lock);
+ if (wake_q) {
+ wake_up_q(wake_q);
+ wake_q_init(wake_q);
+ }
+}
+
+static inline
+void raw_spin_unlock_irqrestore_wake(raw_spinlock_t *lock, unsigned long flags,
+ struct wake_q_head *wake_q)
+{
+ guard(preempt)();
+ raw_spin_unlock_irqrestore(lock, flags);
+ if (wake_q) {
+ wake_up_q(wake_q);
+ wake_q_init(wake_q);
+ }
+}
#endif /* _LINUX_SCHED_WAKE_Q_H */
diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h
index 528718e4ed52..cb41c5edb4d4 100644
--- a/include/linux/sched_clock.h
+++ b/include/linux/sched_clock.h
@@ -5,6 +5,8 @@
#ifndef LINUX_SCHED_CLOCK
#define LINUX_SCHED_CLOCK
+#include <linux/types.h>
+
#ifdef CONFIG_GENERIC_SCHED_CLOCK
/**
* struct clock_read_data - data required to read from sched_clock()
@@ -14,7 +16,7 @@
* @sched_clock_mask: Bitmask for two's complement subtraction of non 64bit
* clocks.
* @read_sched_clock: Current clock source (or dummy source when suspended).
- * @mult: Multipler for scaled math conversion.
+ * @mult: Multiplier for scaled math conversion.
* @shift: Shift value for scaled math conversion.
*
* Care must be taken when updating this structure; it is read by
diff --git a/include/linux/scmi_imx_protocol.h b/include/linux/scmi_imx_protocol.h
new file mode 100644
index 000000000000..27bd372cbfb1
--- /dev/null
+++ b/include/linux/scmi_imx_protocol.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * SCMI Message Protocol driver NXP extension header
+ *
+ * Copyright 2024 NXP.
+ */
+
+#ifndef _LINUX_SCMI_NXP_PROTOCOL_H
+#define _LINUX_SCMI_NXP_PROTOCOL_H
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/scmi_protocol.h>
+#include <linux/types.h>
+
+#define SCMI_PROTOCOL_IMX_LMM 0x80
+#define SCMI_PROTOCOL_IMX_BBM 0x81
+#define SCMI_PROTOCOL_IMX_CPU 0x82
+#define SCMI_PROTOCOL_IMX_MISC 0x84
+
+#define SCMI_IMX_VENDOR "NXP"
+#define SCMI_IMX_SUBVENDOR "IMX"
+
+struct scmi_imx_bbm_proto_ops {
+ int (*rtc_time_set)(const struct scmi_protocol_handle *ph, u32 id,
+ uint64_t sec);
+ int (*rtc_time_get)(const struct scmi_protocol_handle *ph, u32 id,
+ u64 *val);
+ int (*rtc_alarm_set)(const struct scmi_protocol_handle *ph, u32 id,
+ bool enable, u64 sec);
+ int (*button_get)(const struct scmi_protocol_handle *ph, u32 *state);
+};
+
+enum scmi_nxp_notification_events {
+ SCMI_EVENT_IMX_BBM_RTC = 0x0,
+ SCMI_EVENT_IMX_BBM_BUTTON = 0x1,
+ SCMI_EVENT_IMX_MISC_CONTROL = 0x0,
+};
+
+struct scmi_imx_bbm_notif_report {
+ bool is_rtc;
+ bool is_button;
+ ktime_t timestamp;
+ unsigned int rtc_id;
+ unsigned int rtc_evt;
+};
+
+struct scmi_imx_misc_ctrl_notify_report {
+ ktime_t timestamp;
+ unsigned int ctrl_id;
+ unsigned int flags;
+};
+
+struct scmi_imx_misc_proto_ops {
+ int (*misc_ctrl_set)(const struct scmi_protocol_handle *ph, u32 id,
+ u32 num, u32 *val);
+ int (*misc_ctrl_get)(const struct scmi_protocol_handle *ph, u32 id,
+ u32 *num, u32 *val);
+ int (*misc_ctrl_req_notify)(const struct scmi_protocol_handle *ph,
+ u32 ctrl_id, u32 evt_id, u32 flags);
+};
+
+/* See LMM_ATTRIBUTES in imx95.rst */
+#define LMM_ID_DISCOVER 0xFFFFFFFFU
+#define LMM_MAX_NAME 16
+
+enum scmi_imx_lmm_state {
+ LMM_STATE_LM_OFF,
+ LMM_STATE_LM_ON,
+ LMM_STATE_LM_SUSPEND,
+ LMM_STATE_LM_POWERED,
+};
+
+struct scmi_imx_lmm_info {
+ u32 lmid;
+ enum scmi_imx_lmm_state state;
+ u32 errstatus;
+ u8 name[LMM_MAX_NAME];
+};
+
+struct scmi_imx_lmm_proto_ops {
+ int (*lmm_power_boot)(const struct scmi_protocol_handle *ph, u32 lmid,
+ bool boot);
+ int (*lmm_info)(const struct scmi_protocol_handle *ph, u32 lmid,
+ struct scmi_imx_lmm_info *info);
+ int (*lmm_reset_vector_set)(const struct scmi_protocol_handle *ph,
+ u32 lmid, u32 cpuid, u32 flags, u64 vector);
+ int (*lmm_shutdown)(const struct scmi_protocol_handle *ph, u32 lmid,
+ u32 flags);
+};
+
+struct scmi_imx_cpu_proto_ops {
+ int (*cpu_reset_vector_set)(const struct scmi_protocol_handle *ph,
+ u32 cpuid, u64 vector, bool start,
+ bool boot, bool resume);
+ int (*cpu_start)(const struct scmi_protocol_handle *ph, u32 cpuid,
+ bool start);
+ int (*cpu_started)(const struct scmi_protocol_handle *ph, u32 cpuid,
+ bool *started);
+};
+#endif
diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h
index 79d0a1237e6c..aafaac1496b0 100644
--- a/include/linux/scmi_protocol.h
+++ b/include/linux/scmi_protocol.h
@@ -13,8 +13,9 @@
#include <linux/notifier.h>
#include <linux/types.h>
-#define SCMI_MAX_STR_SIZE 16
-#define SCMI_MAX_NUM_RATES 16
+#define SCMI_MAX_STR_SIZE 64
+#define SCMI_SHORT_NAME_MAX_SIZE 16
+#define SCMI_MAX_NUM_RATES 16
/**
* struct scmi_revision_info - version information structure
@@ -36,13 +37,20 @@ struct scmi_revision_info {
u8 num_protocols;
u8 num_agents;
u32 impl_ver;
- char vendor_id[SCMI_MAX_STR_SIZE];
- char sub_vendor_id[SCMI_MAX_STR_SIZE];
+ char vendor_id[SCMI_SHORT_NAME_MAX_SIZE];
+ char sub_vendor_id[SCMI_SHORT_NAME_MAX_SIZE];
};
struct scmi_clock_info {
char name[SCMI_MAX_STR_SIZE];
+ unsigned int enable_latency;
bool rate_discrete;
+ bool rate_changed_notifications;
+ bool rate_change_requested_notifications;
+ bool state_ctrl_forbidden;
+ bool rate_ctrl_forbidden;
+ bool parent_ctrl_forbidden;
+ bool extended_config;
union {
struct {
int num_rates;
@@ -54,12 +62,27 @@ struct scmi_clock_info {
u64 step_size;
} range;
};
+ int num_parents;
+ u32 *parents;
+};
+
+enum scmi_power_scale {
+ SCMI_POWER_BOGOWATTS,
+ SCMI_POWER_MILLIWATTS,
+ SCMI_POWER_MICROWATTS
};
struct scmi_handle;
struct scmi_device;
struct scmi_protocol_handle;
+enum scmi_clock_oem_config {
+ SCMI_CLOCK_CFG_DUTY_CYCLE = 0x1,
+ SCMI_CLOCK_CFG_PHASE,
+ SCMI_CLOCK_CFG_OEM_START = 0x80,
+ SCMI_CLOCK_CFG_OEM_END = 0xFF,
+};
+
/**
* struct scmi_clk_proto_ops - represents the various operations provided
* by SCMI Clock Protocol
@@ -70,30 +93,55 @@ struct scmi_protocol_handle;
* @rate_set: set the clock rate of a clock
* @enable: enables the specified clock
* @disable: disables the specified clock
+ * @state_get: get the status of the specified clock
+ * @config_oem_get: get the value of an OEM specific clock config
+ * @config_oem_set: set the value of an OEM specific clock config
+ * @parent_get: get the parent id of a clk
+ * @parent_set: set the parent of a clock
*/
struct scmi_clk_proto_ops {
int (*count_get)(const struct scmi_protocol_handle *ph);
- const struct scmi_clock_info *(*info_get)
+ const struct scmi_clock_info __must_check *(*info_get)
(const struct scmi_protocol_handle *ph, u32 clk_id);
int (*rate_get)(const struct scmi_protocol_handle *ph, u32 clk_id,
u64 *rate);
int (*rate_set)(const struct scmi_protocol_handle *ph, u32 clk_id,
u64 rate);
- int (*enable)(const struct scmi_protocol_handle *ph, u32 clk_id);
- int (*disable)(const struct scmi_protocol_handle *ph, u32 clk_id);
+ int (*enable)(const struct scmi_protocol_handle *ph, u32 clk_id,
+ bool atomic);
+ int (*disable)(const struct scmi_protocol_handle *ph, u32 clk_id,
+ bool atomic);
+ int (*state_get)(const struct scmi_protocol_handle *ph, u32 clk_id,
+ bool *enabled, bool atomic);
+ int (*config_oem_get)(const struct scmi_protocol_handle *ph, u32 clk_id,
+ enum scmi_clock_oem_config oem_type,
+ u32 *oem_val, u32 *attributes, bool atomic);
+ int (*config_oem_set)(const struct scmi_protocol_handle *ph, u32 clk_id,
+ enum scmi_clock_oem_config oem_type,
+ u32 oem_val, bool atomic);
+ int (*parent_get)(const struct scmi_protocol_handle *ph, u32 clk_id, u32 *parent_id);
+ int (*parent_set)(const struct scmi_protocol_handle *ph, u32 clk_id, u32 parent_id);
+};
+
+struct scmi_perf_domain_info {
+ char name[SCMI_MAX_STR_SIZE];
+ bool set_perf;
};
/**
* struct scmi_perf_proto_ops - represents the various operations provided
* by SCMI Performance Protocol
*
+ * @num_domains_get: gets the number of supported performance domains
+ * @info_get: get the information of a performance domain
* @limits_set: sets limits on the performance level of a domain
* @limits_get: gets limits on the performance level of a domain
* @level_set: sets the performance level of a domain
* @level_get: gets the performance level of a domain
- * @device_domain_id: gets the scmi domain id for a given device
* @transition_latency_get: gets the DVFS transition latency for a given device
+ * @rate_limit_get: gets the minimum time (us) required between successive
+ * requests
* @device_opps_add: adds all the OPPs for a given device
* @freq_set: sets the frequency for a given device using sustained frequency
* to sustained performance level mapping
@@ -101,8 +149,17 @@ struct scmi_clk_proto_ops {
* to sustained performance level mapping
* @est_power_get: gets the estimated power cost for a given performance domain
* at a given frequency
+ * @fast_switch_possible: indicates if fast DVFS switching is possible or not
+ * for a given device
+ * @fast_switch_rate_limit: gets the minimum time (us) required between
+ * successive fast_switching requests
+ * @power_scale_get: indicates if the power values provided are in milliWatts
+ * or in some other (abstract) scale
*/
struct scmi_perf_proto_ops {
+ int (*num_domains_get)(const struct scmi_protocol_handle *ph);
+ const struct scmi_perf_domain_info __must_check *(*info_get)
+ (const struct scmi_protocol_handle *ph, u32 domain);
int (*limits_set)(const struct scmi_protocol_handle *ph, u32 domain,
u32 max_perf, u32 min_perf);
int (*limits_get)(const struct scmi_protocol_handle *ph, u32 domain,
@@ -111,11 +168,12 @@ struct scmi_perf_proto_ops {
u32 level, bool poll);
int (*level_get)(const struct scmi_protocol_handle *ph, u32 domain,
u32 *level, bool poll);
- int (*device_domain_id)(struct device *dev);
int (*transition_latency_get)(const struct scmi_protocol_handle *ph,
- struct device *dev);
+ u32 domain);
+ int (*rate_limit_get)(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 *rate_limit);
int (*device_opps_add)(const struct scmi_protocol_handle *ph,
- struct device *dev);
+ struct device *dev, u32 domain);
int (*freq_set)(const struct scmi_protocol_handle *ph, u32 domain,
unsigned long rate, bool poll);
int (*freq_get)(const struct scmi_protocol_handle *ph, u32 domain,
@@ -123,8 +181,10 @@ struct scmi_perf_proto_ops {
int (*est_power_get)(const struct scmi_protocol_handle *ph, u32 domain,
unsigned long *rate, unsigned long *power);
bool (*fast_switch_possible)(const struct scmi_protocol_handle *ph,
- struct device *dev);
- bool (*power_scale_mw_get)(const struct scmi_protocol_handle *ph);
+ u32 domain);
+ int (*fast_switch_rate_limit)(const struct scmi_protocol_handle *ph,
+ u32 domain, u32 *rate_limit);
+ enum scmi_power_scale (*power_scale_get)(const struct scmi_protocol_handle *ph);
};
/**
@@ -138,7 +198,8 @@ struct scmi_perf_proto_ops {
*/
struct scmi_power_proto_ops {
int (*num_domains_get)(const struct scmi_protocol_handle *ph);
- char *(*name_get)(const struct scmi_protocol_handle *ph, u32 domain);
+ const char *(*name_get)(const struct scmi_protocol_handle *ph,
+ u32 domain);
#define SCMI_POWER_STATE_TYPE_SHIFT 30
#define SCMI_POWER_STATE_ID_MASK (BIT(28) - 1)
#define SCMI_POWER_STATE_PARAM(type, id) \
@@ -153,7 +214,7 @@ struct scmi_power_proto_ops {
};
/**
- * scmi_sensor_reading - represent a timestamped read
+ * struct scmi_sensor_reading - represent a timestamped read
*
* Used by @reading_get_timestamped method.
*
@@ -167,7 +228,7 @@ struct scmi_sensor_reading {
};
/**
- * scmi_range_attrs - specifies a sensor or axis values' range
+ * struct scmi_range_attrs - specifies a sensor or axis values' range
* @min_range: The minimum value which can be represented by the sensor/axis.
* @max_range: The maximum value which can be represented by the sensor/axis.
*/
@@ -177,7 +238,7 @@ struct scmi_range_attrs {
};
/**
- * scmi_sensor_axis_info - describes one sensor axes
+ * struct scmi_sensor_axis_info - describes one sensor axes
* @id: The axes ID.
* @type: Axes type. Chosen amongst one of @enum scmi_sensor_class.
* @scale: Power-of-10 multiplier applied to the axis unit.
@@ -205,8 +266,8 @@ struct scmi_sensor_axis_info {
};
/**
- * scmi_sensor_intervals_info - describes number and type of available update
- * intervals
+ * struct scmi_sensor_intervals_info - describes number and type of available
+ * update intervals
* @segmented: Flag for segmented intervals' representation. When True there
* will be exactly 3 intervals in @desc, with each entry
* representing a member of a segment in this order:
@@ -448,7 +509,7 @@ enum scmi_sensor_class {
*/
struct scmi_sensor_proto_ops {
int (*count_get)(const struct scmi_protocol_handle *ph);
- const struct scmi_sensor_info *(*info_get)
+ const struct scmi_sensor_info __must_check *(*info_get)
(const struct scmi_protocol_handle *ph, u32 sensor_id);
int (*trip_point_config)(const struct scmi_protocol_handle *ph,
u32 sensor_id, u8 trip_id, u64 trip_value);
@@ -476,13 +537,19 @@ struct scmi_sensor_proto_ops {
*/
struct scmi_reset_proto_ops {
int (*num_domains_get)(const struct scmi_protocol_handle *ph);
- char *(*name_get)(const struct scmi_protocol_handle *ph, u32 domain);
+ const char *(*name_get)(const struct scmi_protocol_handle *ph,
+ u32 domain);
int (*latency_get)(const struct scmi_protocol_handle *ph, u32 domain);
int (*reset)(const struct scmi_protocol_handle *ph, u32 domain);
int (*assert)(const struct scmi_protocol_handle *ph, u32 domain);
int (*deassert)(const struct scmi_protocol_handle *ph, u32 domain);
};
+enum scmi_voltage_level_mode {
+ SCMI_VOLTAGE_LEVEL_SET_AUTO,
+ SCMI_VOLTAGE_LEVEL_SET_SYNC,
+};
+
/**
* struct scmi_voltage_info - describe one available SCMI Voltage Domain
*
@@ -495,7 +562,8 @@ struct scmi_reset_proto_ops {
* supported voltage level
* @negative_volts_allowed: True if any of the entries of @levels_uv represent
* a negative voltage.
- * @attributes: represents Voltage Domain advertised attributes
+ * @async_level_set: True when the voltage domain supports asynchronous level
+ * set commands.
* @name: name assigned to the Voltage Domain by platform
* @num_levels: number of total entries in @levels_uv.
* @levels_uv: array of entries describing the available voltage levels for
@@ -505,7 +573,7 @@ struct scmi_voltage_info {
unsigned int id;
bool segmented;
bool negative_volts_allowed;
- unsigned int attributes;
+ bool async_level_set;
char name[SCMI_MAX_STR_SIZE];
unsigned int num_levels;
#define SCMI_VOLTAGE_SEGMENT_LOW 0
@@ -536,12 +604,223 @@ struct scmi_voltage_proto_ops {
int (*config_get)(const struct scmi_protocol_handle *ph, u32 domain_id,
u32 *config);
int (*level_set)(const struct scmi_protocol_handle *ph, u32 domain_id,
- u32 flags, s32 volt_uV);
+ enum scmi_voltage_level_mode mode, s32 volt_uV);
int (*level_get)(const struct scmi_protocol_handle *ph, u32 domain_id,
s32 *volt_uV);
};
/**
+ * struct scmi_powercap_info - Describe one available Powercap domain
+ *
+ * @id: Domain ID as advertised by the platform.
+ * @notify_powercap_cap_change: CAP change notification support.
+ * @notify_powercap_measurement_change: MEASUREMENTS change notifications
+ * support.
+ * @async_powercap_cap_set: Asynchronous CAP set support.
+ * @powercap_cap_config: CAP configuration support.
+ * @powercap_monitoring: Monitoring (measurements) support.
+ * @powercap_pai_config: PAI configuration support.
+ * @powercap_scale_mw: Domain reports power data in milliwatt units.
+ * @powercap_scale_uw: Domain reports power data in microwatt units.
+ * Note that, when both @powercap_scale_mw and
+ * @powercap_scale_uw are set to false, the domain
+ * reports power data on an abstract linear scale.
+ * @name: name assigned to the Powercap Domain by platform.
+ * @min_pai: Minimum configurable PAI.
+ * @max_pai: Maximum configurable PAI.
+ * @pai_step: Step size between two consecutive PAI values.
+ * @min_power_cap: Minimum configurable CAP.
+ * @max_power_cap: Maximum configurable CAP.
+ * @power_cap_step: Step size between two consecutive CAP values.
+ * @sustainable_power: Maximum sustainable power consumption for this domain
+ * under normal conditions.
+ * @accuracy: The accuracy with which the power is measured and reported in
+ * integral multiples of 0.001 percent.
+ * @parent_id: Identifier of the containing parent power capping domain, or the
+ * value 0xFFFFFFFF if this powercap domain is a root domain not
+ * contained in any other domain.
+ */
+struct scmi_powercap_info {
+ unsigned int id;
+ bool notify_powercap_cap_change;
+ bool notify_powercap_measurement_change;
+ bool async_powercap_cap_set;
+ bool powercap_cap_config;
+ bool powercap_monitoring;
+ bool powercap_pai_config;
+ bool powercap_scale_mw;
+ bool powercap_scale_uw;
+ bool fastchannels;
+ char name[SCMI_MAX_STR_SIZE];
+ unsigned int min_pai;
+ unsigned int max_pai;
+ unsigned int pai_step;
+ unsigned int min_power_cap;
+ unsigned int max_power_cap;
+ unsigned int power_cap_step;
+ unsigned int sustainable_power;
+ unsigned int accuracy;
+#define SCMI_POWERCAP_ROOT_ZONE_ID 0xFFFFFFFFUL
+ unsigned int parent_id;
+ struct scmi_fc_info *fc_info;
+};
+
+/**
+ * struct scmi_powercap_proto_ops - represents the various operations provided
+ * by SCMI Powercap Protocol
+ *
+ * @num_domains_get: get the count of powercap domains provided by SCMI.
+ * @info_get: get the information for the specified domain.
+ * @cap_get: get the current CAP value for the specified domain.
+ * On SCMI platforms supporting powercap zone disabling, this could
+ * report a zero value for a zone where powercapping is disabled.
+ * @cap_set: set the CAP value for the specified domain to the provided value;
+ * if the domain supports setting the CAP with an asynchronous command
+ * this request will finally trigger an asynchronous transfer, but, if
+ * @ignore_dresp here is set to true, this call will anyway return
+ * immediately without waiting for the related delayed response.
+ * Note that the powercap requested value must NOT be zero, even if
+ * the platform supports disabling a powercap by setting its cap to
+ * zero (since SCMI v3.2): there are dedicated operations that should
+ * be used for that. (@cap_enable_set/get)
+ * @cap_enable_set: enable or disable the powercapping on the specified domain,
+ * if supported by the SCMI platform implementation.
+ * Note that, by the SCMI specification, the platform can
+ * silently ignore our disable request and decide to enforce
+ * anyway some other powercap value requested by another agent
+ * on the system: for this reason @cap_get and @cap_enable_get
+ * will always report the final platform view of the powercaps.
+ * @cap_enable_get: get the current CAP enable status for the specified domain.
+ * @pai_get: get the current PAI value for the specified domain.
+ * @pai_set: set the PAI value for the specified domain to the provided value.
+ * @measurements_get: retrieve the current average power measurements for the
+ * specified domain and the related PAI upon which is
+ * calculated.
+ * @measurements_threshold_set: set the desired low and high power thresholds
+ * to be used when registering for notification
+ * of type POWERCAP_MEASUREMENTS_NOTIFY with this
+ * powercap domain.
+ * Note that this must be called at least once
+ * before registering any callback with the usual
+ * @scmi_notify_ops; moreover, in case this method
+ * is called with measurement notifications already
+ * enabled it will also trigger, transparently, a
+ * proper update of the power thresholds configured
+ * in the SCMI backend server.
+ * @measurements_threshold_get: get the currently configured low and high power
+ * thresholds used when registering callbacks for
+ * notification POWERCAP_MEASUREMENTS_NOTIFY.
+ */
+struct scmi_powercap_proto_ops {
+ int (*num_domains_get)(const struct scmi_protocol_handle *ph);
+ const struct scmi_powercap_info __must_check *(*info_get)
+ (const struct scmi_protocol_handle *ph, u32 domain_id);
+ int (*cap_get)(const struct scmi_protocol_handle *ph, u32 domain_id,
+ u32 *power_cap);
+ int (*cap_set)(const struct scmi_protocol_handle *ph, u32 domain_id,
+ u32 power_cap, bool ignore_dresp);
+ int (*cap_enable_set)(const struct scmi_protocol_handle *ph,
+ u32 domain_id, bool enable);
+ int (*cap_enable_get)(const struct scmi_protocol_handle *ph,
+ u32 domain_id, bool *enable);
+ int (*pai_get)(const struct scmi_protocol_handle *ph, u32 domain_id,
+ u32 *pai);
+ int (*pai_set)(const struct scmi_protocol_handle *ph, u32 domain_id,
+ u32 pai);
+ int (*measurements_get)(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *average_power, u32 *pai);
+ int (*measurements_threshold_set)(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 power_thresh_low,
+ u32 power_thresh_high);
+ int (*measurements_threshold_get)(const struct scmi_protocol_handle *ph,
+ u32 domain_id, u32 *power_thresh_low,
+ u32 *power_thresh_high);
+};
+
+enum scmi_pinctrl_selector_type {
+ PIN_TYPE = 0,
+ GROUP_TYPE,
+ FUNCTION_TYPE,
+};
+
+enum scmi_pinctrl_conf_type {
+ SCMI_PIN_DEFAULT = 0,
+ SCMI_PIN_BIAS_BUS_HOLD = 1,
+ SCMI_PIN_BIAS_DISABLE = 2,
+ SCMI_PIN_BIAS_HIGH_IMPEDANCE = 3,
+ SCMI_PIN_BIAS_PULL_UP = 4,
+ SCMI_PIN_BIAS_PULL_DEFAULT = 5,
+ SCMI_PIN_BIAS_PULL_DOWN = 6,
+ SCMI_PIN_DRIVE_OPEN_DRAIN = 7,
+ SCMI_PIN_DRIVE_OPEN_SOURCE = 8,
+ SCMI_PIN_DRIVE_PUSH_PULL = 9,
+ SCMI_PIN_DRIVE_STRENGTH = 10,
+ SCMI_PIN_INPUT_DEBOUNCE = 11,
+ SCMI_PIN_INPUT_MODE = 12,
+ SCMI_PIN_PULL_MODE = 13,
+ SCMI_PIN_INPUT_VALUE = 14,
+ SCMI_PIN_INPUT_SCHMITT = 15,
+ SCMI_PIN_LOW_POWER_MODE = 16,
+ SCMI_PIN_OUTPUT_MODE = 17,
+ SCMI_PIN_OUTPUT_VALUE = 18,
+ SCMI_PIN_POWER_SOURCE = 19,
+ SCMI_PIN_SLEW_RATE = 20,
+ SCMI_PIN_OEM_START = 192,
+ SCMI_PIN_OEM_END = 255,
+};
+
+/**
+ * struct scmi_pinctrl_proto_ops - represents the various operations provided
+ * by SCMI Pinctrl Protocol
+ *
+ * @count_get: returns count of the registered elements in given type
+ * @name_get: returns name by index of given type
+ * @group_pins_get: returns the set of pins, assigned to the specified group
+ * @function_groups_get: returns the set of groups, assigned to the specified
+ * function
+ * @mux_set: set muxing function for groups of pins
+ * @settings_get_one: returns one configuration parameter for pin or group
+ * specified by config_type
+ * @settings_get_all: returns all configuration parameters for pin or group
+ * @settings_conf: sets the configuration parameter for pin or group
+ * @pin_request: aquire pin before selecting mux setting
+ * @pin_free: frees pin, acquired by request_pin call
+ */
+struct scmi_pinctrl_proto_ops {
+ int (*count_get)(const struct scmi_protocol_handle *ph,
+ enum scmi_pinctrl_selector_type type);
+ int (*name_get)(const struct scmi_protocol_handle *ph, u32 selector,
+ enum scmi_pinctrl_selector_type type,
+ const char **name);
+ int (*group_pins_get)(const struct scmi_protocol_handle *ph,
+ u32 selector, const unsigned int **pins,
+ unsigned int *nr_pins);
+ int (*function_groups_get)(const struct scmi_protocol_handle *ph,
+ u32 selector, unsigned int *nr_groups,
+ const unsigned int **groups);
+ int (*mux_set)(const struct scmi_protocol_handle *ph, u32 selector,
+ u32 group);
+ int (*settings_get_one)(const struct scmi_protocol_handle *ph,
+ u32 selector,
+ enum scmi_pinctrl_selector_type type,
+ enum scmi_pinctrl_conf_type config_type,
+ u32 *config_value);
+ int (*settings_get_all)(const struct scmi_protocol_handle *ph,
+ u32 selector,
+ enum scmi_pinctrl_selector_type type,
+ unsigned int *nr_configs,
+ enum scmi_pinctrl_conf_type *config_types,
+ u32 *config_values);
+ int (*settings_conf)(const struct scmi_protocol_handle *ph,
+ u32 selector, enum scmi_pinctrl_selector_type type,
+ unsigned int nr_configs,
+ enum scmi_pinctrl_conf_type *config_type,
+ u32 *config_value);
+ int (*pin_request)(const struct scmi_protocol_handle *ph, u32 pin);
+ int (*pin_free)(const struct scmi_protocol_handle *ph, u32 pin);
+};
+
+/**
* struct scmi_notify_ops - represents notifications' operations provided by
* SCMI core
* @devm_event_notifier_register: Managed registration of a notifier_block for
@@ -587,8 +866,6 @@ struct scmi_notify_ops {
const u32 *src_id,
struct notifier_block *nb);
int (*devm_event_notifier_unregister)(struct scmi_device *sdev,
- u8 proto_id, u8 evt_id,
- const u32 *src_id,
struct notifier_block *nb);
int (*event_notifier_register)(const struct scmi_handle *handle,
u8 proto_id, u8 evt_id,
@@ -605,19 +882,35 @@ struct scmi_notify_ops {
*
* @dev: pointer to the SCMI device
* @version: pointer to the structure containing SCMI version information
+ * @devm_protocol_acquire: devres managed method to get hold of a protocol,
+ * causing its initialization and related resource
+ * accounting
* @devm_protocol_get: devres managed method to acquire a protocol and get specific
* operations and a dedicated protocol handler
* @devm_protocol_put: devres managed method to release a protocol
+ * @is_transport_atomic: method to check if the underlying transport for this
+ * instance handle is configured to support atomic
+ * transactions for commands.
+ * Some users of the SCMI stack in the upper layers could
+ * be interested to know if they can assume SCMI
+ * command transactions associated to this handle will
+ * never sleep and act accordingly.
+ * An optional atomic threshold value could be returned
+ * where configured.
* @notify_ops: pointer to set of notifications related operations
*/
struct scmi_handle {
struct device *dev;
struct scmi_revision_info *version;
+ int __must_check (*devm_protocol_acquire)(struct scmi_device *sdev,
+ u8 proto);
const void __must_check *
(*devm_protocol_get)(struct scmi_device *sdev, u8 proto,
struct scmi_protocol_handle **ph);
void (*devm_protocol_put)(struct scmi_device *sdev, u8 proto);
+ bool (*is_transport_atomic)(const struct scmi_handle *handle,
+ unsigned int *atomic_threshold);
const struct scmi_notify_ops *notify_ops;
};
@@ -631,6 +924,8 @@ enum scmi_std_protocol {
SCMI_PROTOCOL_SENSOR = 0x15,
SCMI_PROTOCOL_RESET = 0x16,
SCMI_PROTOCOL_VOLTAGE = 0x17,
+ SCMI_PROTOCOL_POWERCAP = 0x18,
+ SCMI_PROTOCOL_PINCTRL = 0x19,
};
enum scmi_system_events {
@@ -650,12 +945,7 @@ struct scmi_device {
struct scmi_handle *handle;
};
-#define to_scmi_dev(d) container_of(d, struct scmi_device, dev)
-
-struct scmi_device *
-scmi_device_create(struct device_node *np, struct device *parent, int protocol,
- const char *name);
-void scmi_device_destroy(struct scmi_device *scmi_dev);
+#define to_scmi_dev(d) container_of_const(d, struct scmi_device, dev)
struct scmi_device_id {
u8 protocol_id;
@@ -723,6 +1013,8 @@ void scmi_protocol_unregister(const struct scmi_protocol *proto);
/* SCMI Notification API - Custom Event Reports */
enum scmi_notification_events {
SCMI_EVENT_POWER_STATE_CHANGED = 0x0,
+ SCMI_EVENT_CLOCK_RATE_CHANGED = 0x0,
+ SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED = 0x1,
SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED = 0x0,
SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED = 0x1,
SCMI_EVENT_SENSOR_TRIP_POINT_EVENT = 0x0,
@@ -730,6 +1022,8 @@ enum scmi_notification_events {
SCMI_EVENT_RESET_ISSUED = 0x0,
SCMI_EVENT_BASE_ERROR_EVENT = 0x0,
SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER = 0x0,
+ SCMI_EVENT_POWERCAP_CAP_CHANGED = 0x0,
+ SCMI_EVENT_POWERCAP_MEASUREMENTS_CHANGED = 0x1,
};
struct scmi_power_state_changed_report {
@@ -739,11 +1033,20 @@ struct scmi_power_state_changed_report {
unsigned int power_state;
};
+struct scmi_clock_rate_notif_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int clock_id;
+ unsigned long long rate;
+};
+
struct scmi_system_power_state_notifier_report {
ktime_t timestamp;
unsigned int agent_id;
+#define SCMI_SYSPOWER_IS_REQUEST_GRACEFUL(flags) ((flags) & BIT(0))
unsigned int flags;
unsigned int system_state;
+ unsigned int timeout;
};
struct scmi_perf_limits_report {
@@ -752,6 +1055,8 @@ struct scmi_perf_limits_report {
unsigned int domain_id;
unsigned int range_max;
unsigned int range_min;
+ unsigned long range_max_freq;
+ unsigned long range_min_freq;
};
struct scmi_perf_level_report {
@@ -759,6 +1064,7 @@ struct scmi_perf_level_report {
unsigned int agent_id;
unsigned int domain_id;
unsigned int performance_level;
+ unsigned long performance_level_freq;
};
struct scmi_sensor_trip_point_report {
@@ -791,4 +1097,18 @@ struct scmi_base_error_report {
unsigned long long reports[];
};
+struct scmi_powercap_cap_changed_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int domain_id;
+ unsigned int power_cap;
+ unsigned int pai;
+};
+
+struct scmi_powercap_meas_changed_report {
+ ktime_t timestamp;
+ unsigned int agent_id;
+ unsigned int domain_id;
+ unsigned int power;
+};
#endif /* _LINUX_SCMI_PROTOCOL_H */
diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h
index afbf8037d8db..d2176a56828a 100644
--- a/include/linux/scpi_protocol.h
+++ b/include/linux/scpi_protocol.h
@@ -51,6 +51,14 @@ struct scpi_sensor_info {
* OPP is an index to the list return by @dvfs_get_info
* @dvfs_get_info: returns the DVFS capabilities of the given power
* domain. It includes the OPP list and the latency information
+ * @device_domain_id: gets the scpi domain id for a given device
+ * @get_transition_latency: gets the DVFS transition latency for a given device
+ * @add_opps_to_device: adds all the OPPs for a given device
+ * @sensor_get_capability: get the list of capabilities for the sensors
+ * @sensor_get_info: get the information of the specified sensor
+ * @sensor_get_value: gets the current value of the sensor
+ * @device_get_power_state: gets the power state of a power domain
+ * @device_set_power_state: sets the power state of a power domain
*/
struct scpi_ops {
u32 (*get_version)(void);
diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
index eab7081392d5..1690706206e8 100644
--- a/include/linux/screen_info.h
+++ b/include/linux/screen_info.h
@@ -4,6 +4,153 @@
#include <uapi/linux/screen_info.h>
+#include <linux/bits.h>
+
+/**
+ * SCREEN_INFO_MAX_RESOURCES - maximum number of resources per screen_info
+ */
+#define SCREEN_INFO_MAX_RESOURCES 3
+
+struct pci_dev;
+struct pixel_format;
+struct resource;
+
+static inline bool __screen_info_has_lfb(unsigned int type)
+{
+ return (type == VIDEO_TYPE_VLFB) || (type == VIDEO_TYPE_EFI);
+}
+
+static inline u64 __screen_info_lfb_base(const struct screen_info *si)
+{
+ u64 lfb_base = si->lfb_base;
+
+ if (si->capabilities & VIDEO_CAPABILITY_64BIT_BASE)
+ lfb_base |= (u64)si->ext_lfb_base << 32;
+
+ return lfb_base;
+}
+
+static inline void __screen_info_set_lfb_base(struct screen_info *si, u64 lfb_base)
+{
+ si->lfb_base = lfb_base & GENMASK_ULL(31, 0);
+ si->ext_lfb_base = (lfb_base & GENMASK_ULL(63, 32)) >> 32;
+
+ if (si->ext_lfb_base)
+ si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
+ else
+ si->capabilities &= ~VIDEO_CAPABILITY_64BIT_BASE;
+}
+
+static inline u64 __screen_info_lfb_size(const struct screen_info *si, unsigned int type)
+{
+ u64 lfb_size = si->lfb_size;
+
+ if (type == VIDEO_TYPE_VLFB)
+ lfb_size <<= 16;
+ return lfb_size;
+}
+
+static inline bool __screen_info_vbe_mode_nonvga(const struct screen_info *si)
+{
+ /*
+ * VESA modes typically run on VGA hardware. Set bit 5 signals that this
+ * is not the case. Drivers can then not make use of VGA resources. See
+ * Sec 4.4 of the VBE 2.0 spec.
+ */
+ return si->vesa_attributes & BIT(5);
+}
+
+static inline unsigned int __screen_info_video_type(unsigned int type)
+{
+ switch (type) {
+ case VIDEO_TYPE_MDA:
+ case VIDEO_TYPE_CGA:
+ case VIDEO_TYPE_EGAM:
+ case VIDEO_TYPE_EGAC:
+ case VIDEO_TYPE_VGAC:
+ case VIDEO_TYPE_VLFB:
+ case VIDEO_TYPE_PICA_S3:
+ case VIDEO_TYPE_MIPS_G364:
+ case VIDEO_TYPE_SGI:
+ case VIDEO_TYPE_TGAC:
+ case VIDEO_TYPE_SUN:
+ case VIDEO_TYPE_SUNPCI:
+ case VIDEO_TYPE_PMAC:
+ case VIDEO_TYPE_EFI:
+ return type;
+ default:
+ return 0;
+ }
+}
+
+/**
+ * screen_info_video_type() - Decodes the video type from struct screen_info
+ * @si: an instance of struct screen_info
+ *
+ * Returns:
+ * A VIDEO_TYPE_ constant representing si's type of video display, or 0 otherwise.
+ */
+static inline unsigned int screen_info_video_type(const struct screen_info *si)
+{
+ unsigned int type;
+
+ // check if display output is on
+ if (!si->orig_video_isVGA)
+ return 0;
+
+ // check for a known VIDEO_TYPE_ constant
+ type = __screen_info_video_type(si->orig_video_isVGA);
+ if (type)
+ return si->orig_video_isVGA;
+
+ // check if text mode has been initialized
+ if (!si->orig_video_lines || !si->orig_video_cols)
+ return 0;
+
+ // 80x25 text, mono
+ if (si->orig_video_mode == 0x07) {
+ if ((si->orig_video_ega_bx & 0xff) != 0x10)
+ return VIDEO_TYPE_EGAM;
+ else
+ return VIDEO_TYPE_MDA;
+ }
+
+ // EGA/VGA, 16 colors
+ if ((si->orig_video_ega_bx & 0xff) != 0x10) {
+ if (si->orig_video_isVGA)
+ return VIDEO_TYPE_VGAC;
+ else
+ return VIDEO_TYPE_EGAC;
+ }
+
+ // the rest...
+ return VIDEO_TYPE_CGA;
+}
+
+static inline u32 __screen_info_vesapm_info_base(const struct screen_info *si)
+{
+ if (si->vesapm_seg < 0xc000)
+ return 0;
+ return (si->vesapm_seg << 4) + si->vesapm_off;
+}
+
+ssize_t screen_info_resources(const struct screen_info *si, struct resource *r, size_t num);
+
+u32 __screen_info_lfb_bits_per_pixel(const struct screen_info *si);
+int screen_info_pixel_format(const struct screen_info *si, struct pixel_format *f);
+
+#if defined(CONFIG_PCI)
+void screen_info_apply_fixups(void);
+struct pci_dev *screen_info_pci_dev(const struct screen_info *si);
+#else
+static inline void screen_info_apply_fixups(void)
+{ }
+static inline struct pci_dev *screen_info_pci_dev(const struct screen_info *si)
+{
+ return NULL;
+}
+#endif
+
extern struct screen_info screen_info;
#endif /* _SCREEN_INFO_H */
diff --git a/include/linux/scs.h b/include/linux/scs.h
index 18122d9e17ff..4ab5bdc898cf 100644
--- a/include/linux/scs.h
+++ b/include/linux/scs.h
@@ -53,6 +53,22 @@ static inline bool task_scs_end_corrupted(struct task_struct *tsk)
return sz >= SCS_SIZE - 1 || READ_ONCE_NOCHECK(*magic) != SCS_END_MAGIC;
}
+DECLARE_STATIC_KEY_FALSE(dynamic_scs_enabled);
+
+static inline bool scs_is_dynamic(void)
+{
+ if (!IS_ENABLED(CONFIG_DYNAMIC_SCS))
+ return false;
+ return static_branch_likely(&dynamic_scs_enabled);
+}
+
+static inline bool scs_is_enabled(void)
+{
+ if (!IS_ENABLED(CONFIG_DYNAMIC_SCS))
+ return true;
+ return scs_is_dynamic();
+}
+
#else /* CONFIG_SHADOW_CALL_STACK */
static inline void *scs_alloc(int node) { return NULL; }
@@ -62,6 +78,8 @@ static inline void scs_task_reset(struct task_struct *tsk) {}
static inline int scs_prepare(struct task_struct *tsk, int node) { return 0; }
static inline void scs_release(struct task_struct *tsk) {}
static inline bool task_scs_end_corrupted(struct task_struct *tsk) { return false; }
+static inline bool scs_is_enabled(void) { return false; }
+static inline bool scs_is_dynamic(void) { return false; }
#endif /* CONFIG_SHADOW_CALL_STACK */
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index bb1926589693..6719949135c9 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -98,6 +98,7 @@ enum sctp_cid {
SCTP_CID_I_FWD_TSN = 0xC2,
SCTP_CID_ASCONF_ACK = 0x80,
SCTP_CID_RECONF = 0x82,
+ SCTP_CID_PAD = 0x84,
}; /* enum */
@@ -221,7 +222,6 @@ struct sctp_datahdr {
__be16 stream;
__be16 ssn;
__u32 ppid;
- __u8 payload[];
};
struct sctp_data_chunk {
@@ -238,7 +238,6 @@ struct sctp_idatahdr {
__u32 ppid;
__be32 fsn;
};
- __u8 payload[0];
};
struct sctp_idata_chunk {
@@ -269,7 +268,7 @@ struct sctp_inithdr {
__be16 num_outbound_streams;
__be16 num_inbound_streams;
__be32 initial_tsn;
- __u8 params[];
+ /* __u8 params[]; */
};
struct sctp_init_chunk {
@@ -384,7 +383,7 @@ struct sctp_sackhdr {
__be32 a_rwnd;
__be16 num_gap_ack_blocks;
__be16 num_dup_tsns;
- union sctp_sack_variable variable[];
+ /* union sctp_sack_variable variable[]; */
};
struct sctp_sack_chunk {
@@ -410,6 +409,12 @@ struct sctp_heartbeat_chunk {
};
+/* PAD chunk could be bundled with heartbeat chunk to probe pmtu */
+struct sctp_pad_chunk {
+ struct sctp_chunkhdr uh;
+};
+
+
/* For the abort and shutdown ACK we must carry the init tag in the
* common header. Just the common header is all that is needed with a
* chunk descriptor.
@@ -436,7 +441,7 @@ struct sctp_shutdown_chunk {
struct sctp_errhdr {
__be16 cause;
__be16 length;
- __u8 variable[];
+ /* __u8 variable[]; */
};
struct sctp_operr_chunk {
@@ -596,7 +601,7 @@ struct sctp_fwdtsn_skip {
struct sctp_fwdtsn_hdr {
__be32 new_cum_tsn;
- struct sctp_fwdtsn_skip skip[];
+ /* struct sctp_fwdtsn_skip skip[]; */
};
struct sctp_fwdtsn_chunk {
@@ -613,7 +618,7 @@ struct sctp_ifwdtsn_skip {
struct sctp_ifwdtsn_hdr {
__be32 new_cum_tsn;
- struct sctp_ifwdtsn_skip skip[];
+ /* struct sctp_ifwdtsn_skip skip[]; */
};
struct sctp_ifwdtsn_chunk {
@@ -660,7 +665,7 @@ struct sctp_addip_param {
struct sctp_addiphdr {
__be32 serial;
- __u8 params[];
+ /* __u8 params[]; */
};
struct sctp_addip_chunk {
@@ -720,7 +725,7 @@ struct sctp_addip_chunk {
struct sctp_authhdr {
__be16 shkey_id;
__be16 hmac_id;
- __u8 hmac[];
+ /* __u8 hmac[]; */
};
struct sctp_auth_chunk {
@@ -735,7 +740,7 @@ struct sctp_infox {
struct sctp_reconf_chunk {
struct sctp_chunkhdr chunk_hdr;
- __u8 params[];
+ /* __u8 params[]; */
};
struct sctp_strreset_outreq {
@@ -813,4 +818,9 @@ struct sctp_new_encap_port_hdr {
__be16 new_port;
};
+/* Round an int up to the next multiple of 4. */
+#define SCTP_PAD4(s) (((s)+3)&~3)
+/* Truncate to the previous multiple of 4. */
+#define SCTP_TRUNC4(s) ((s)&~3)
+
#endif /* __LINUX_SCTP_H__ */
diff --git a/include/linux/sdb.h b/include/linux/sdb.h
deleted file mode 100644
index a2404a2bbd10..000000000000
--- a/include/linux/sdb.h
+++ /dev/null
@@ -1,160 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This is the official version 1.1 of sdb.h
- */
-#ifndef __SDB_H__
-#define __SDB_H__
-#ifdef __KERNEL__
-#include <linux/types.h>
-#else
-#include <stdint.h>
-#endif
-
-/*
- * All structures are 64 bytes long and are expected
- * to live in an array, one for each interconnect.
- * Most fields of the structures are shared among the
- * various types, and most-specific fields are at the
- * beginning (for alignment reasons, and to keep the
- * magic number at the head of the interconnect record
- */
-
-/* Product, 40 bytes at offset 24, 8-byte aligned
- *
- * device_id is vendor-assigned; version is device-specific,
- * date is hex (e.g 0x20120501), name is UTF-8, blank-filled
- * and not terminated with a 0 byte.
- */
-struct sdb_product {
- uint64_t vendor_id; /* 0x18..0x1f */
- uint32_t device_id; /* 0x20..0x23 */
- uint32_t version; /* 0x24..0x27 */
- uint32_t date; /* 0x28..0x2b */
- uint8_t name[19]; /* 0x2c..0x3e */
- uint8_t record_type; /* 0x3f */
-};
-
-/*
- * Component, 56 bytes at offset 8, 8-byte aligned
- *
- * The address range is first to last, inclusive
- * (for example 0x100000 - 0x10ffff)
- */
-struct sdb_component {
- uint64_t addr_first; /* 0x08..0x0f */
- uint64_t addr_last; /* 0x10..0x17 */
- struct sdb_product product; /* 0x18..0x3f */
-};
-
-/* Type of the SDB record */
-enum sdb_record_type {
- sdb_type_interconnect = 0x00,
- sdb_type_device = 0x01,
- sdb_type_bridge = 0x02,
- sdb_type_integration = 0x80,
- sdb_type_repo_url = 0x81,
- sdb_type_synthesis = 0x82,
- sdb_type_empty = 0xFF,
-};
-
-/* Type 0: interconnect (first of the array)
- *
- * sdb_records is the length of the table including this first
- * record, version is 1. The bus type is enumerated later.
- */
-#define SDB_MAGIC 0x5344422d /* "SDB-" */
-struct sdb_interconnect {
- uint32_t sdb_magic; /* 0x00-0x03 */
- uint16_t sdb_records; /* 0x04-0x05 */
- uint8_t sdb_version; /* 0x06 */
- uint8_t sdb_bus_type; /* 0x07 */
- struct sdb_component sdb_component; /* 0x08-0x3f */
-};
-
-/* Type 1: device
- *
- * class is 0 for "custom device", other values are
- * to be standardized; ABI version is for the driver,
- * bus-specific bits are defined by each bus (see below)
- */
-struct sdb_device {
- uint16_t abi_class; /* 0x00-0x01 */
- uint8_t abi_ver_major; /* 0x02 */
- uint8_t abi_ver_minor; /* 0x03 */
- uint32_t bus_specific; /* 0x04-0x07 */
- struct sdb_component sdb_component; /* 0x08-0x3f */
-};
-
-/* Type 2: bridge
- *
- * child is the address of the nested SDB table
- */
-struct sdb_bridge {
- uint64_t sdb_child; /* 0x00-0x07 */
- struct sdb_component sdb_component; /* 0x08-0x3f */
-};
-
-/* Type 0x80: integration
- *
- * all types with bit 7 set are meta-information, so
- * software can ignore the types it doesn't know. Here we
- * just provide product information for an aggregate device
- */
-struct sdb_integration {
- uint8_t reserved[24]; /* 0x00-0x17 */
- struct sdb_product product; /* 0x08-0x3f */
-};
-
-/* Type 0x81: Top module repository url
- *
- * again, an informative field that software can ignore
- */
-struct sdb_repo_url {
- uint8_t repo_url[63]; /* 0x00-0x3e */
- uint8_t record_type; /* 0x3f */
-};
-
-/* Type 0x82: Synthesis tool information
- *
- * this informative record
- */
-struct sdb_synthesis {
- uint8_t syn_name[16]; /* 0x00-0x0f */
- uint8_t commit_id[16]; /* 0x10-0x1f */
- uint8_t tool_name[8]; /* 0x20-0x27 */
- uint32_t tool_version; /* 0x28-0x2b */
- uint32_t date; /* 0x2c-0x2f */
- uint8_t user_name[15]; /* 0x30-0x3e */
- uint8_t record_type; /* 0x3f */
-};
-
-/* Type 0xff: empty
- *
- * this allows keeping empty slots during development,
- * so they can be filled later with minimal efforts and
- * no misleading description is ever shipped -- hopefully.
- * It can also be used to pad a table to a desired length.
- */
-struct sdb_empty {
- uint8_t reserved[63]; /* 0x00-0x3e */
- uint8_t record_type; /* 0x3f */
-};
-
-/* The type of bus, for bus-specific flags */
-enum sdb_bus_type {
- sdb_wishbone = 0x00,
- sdb_data = 0x01,
-};
-
-#define SDB_WB_WIDTH_MASK 0x0f
-#define SDB_WB_ACCESS8 0x01
-#define SDB_WB_ACCESS16 0x02
-#define SDB_WB_ACCESS32 0x04
-#define SDB_WB_ACCESS64 0x08
-#define SDB_WB_LITTLE_ENDIAN 0x80
-
-#define SDB_DATA_READ 0x04
-#define SDB_DATA_WRITE 0x02
-#define SDB_DATA_EXEC 0x01
-
-#endif /* __SDB_H__ */
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index 0c564e5d40ff..9b959972bf4a 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -3,12 +3,14 @@
#define _LINUX_SECCOMP_H
#include <uapi/linux/seccomp.h>
+#include <linux/seccomp_types.h>
#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \
SECCOMP_FILTER_FLAG_LOG | \
SECCOMP_FILTER_FLAG_SPEC_ALLOW | \
SECCOMP_FILTER_FLAG_NEW_LISTENER | \
- SECCOMP_FILTER_FLAG_TSYNC_ESRCH)
+ SECCOMP_FILTER_FLAG_TSYNC_ESRCH | \
+ SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV)
/* sizeof() the first published struct seccomp_notif_addfd */
#define SECCOMP_NOTIFY_ADDFD_SIZE_VER0 24
@@ -20,30 +22,13 @@
#include <linux/atomic.h>
#include <asm/seccomp.h>
-struct seccomp_filter;
-/**
- * struct seccomp - the state of a seccomp'ed process
- *
- * @mode: indicates one of the valid values above for controlled
- * system calls available to a process.
- * @filter: must always point to a valid seccomp-filter or NULL as it is
- * accessed without locking during system call entry.
- *
- * @filter must only be accessed from the context of current as there
- * is no read locking.
- */
-struct seccomp {
- int mode;
- atomic_t filter_count;
- struct seccomp_filter *filter;
-};
+extern int __secure_computing(void);
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
-extern int __secure_computing(const struct seccomp_data *sd);
static inline int secure_computing(void)
{
if (unlikely(test_syscall_work(SECCOMP)))
- return __secure_computing(NULL);
+ return __secure_computing();
return 0;
}
#else
@@ -62,16 +47,14 @@ static inline int seccomp_mode(struct seccomp *s)
#include <linux/errno.h>
-struct seccomp { };
-struct seccomp_filter { };
struct seccomp_data;
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
static inline int secure_computing(void) { return 0; }
-static inline int __secure_computing(const struct seccomp_data *sd) { return 0; }
#else
static inline void secure_computing_strict(int this_syscall) { return; }
#endif
+static inline int __secure_computing(void) { return 0; }
static inline long prctl_get_seccomp(void)
{
@@ -124,6 +107,8 @@ static inline long seccomp_get_metadata(struct task_struct *task,
#ifdef CONFIG_SECCOMP_CACHE_DEBUG
struct seq_file;
+struct pid_namespace;
+struct pid;
int proc_pid_seccomp_cache(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task);
diff --git a/include/linux/seccomp_types.h b/include/linux/seccomp_types.h
new file mode 100644
index 000000000000..cf0a0355024f
--- /dev/null
+++ b/include/linux/seccomp_types.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SECCOMP_TYPES_H
+#define _LINUX_SECCOMP_TYPES_H
+
+#include <linux/types.h>
+
+#ifdef CONFIG_SECCOMP
+
+struct seccomp_filter;
+/**
+ * struct seccomp - the state of a seccomp'ed process
+ *
+ * @mode: indicates one of the valid values above for controlled
+ * system calls available to a process.
+ * @filter_count: number of seccomp filters
+ * @filter: must always point to a valid seccomp-filter or NULL as it is
+ * accessed without locking during system call entry.
+ *
+ * @filter must only be accessed from the context of current as there
+ * is no read locking.
+ */
+struct seccomp {
+ int mode;
+ atomic_t filter_count;
+ struct seccomp_filter *filter;
+};
+
+#else
+
+struct seccomp { };
+struct seccomp_filter { };
+
+#endif
+
+#endif /* _LINUX_SECCOMP_TYPES_H */
diff --git a/include/linux/secretmem.h b/include/linux/secretmem.h
new file mode 100644
index 000000000000..e918f96881f5
--- /dev/null
+++ b/include/linux/secretmem.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _LINUX_SECRETMEM_H
+#define _LINUX_SECRETMEM_H
+
+#ifdef CONFIG_SECRETMEM
+
+extern const struct address_space_operations secretmem_aops;
+
+static inline bool secretmem_mapping(struct address_space *mapping)
+{
+ return mapping->a_ops == &secretmem_aops;
+}
+
+bool vma_is_secretmem(struct vm_area_struct *vma);
+bool secretmem_active(void);
+
+#else
+
+static inline bool vma_is_secretmem(struct vm_area_struct *vma)
+{
+ return false;
+}
+
+static inline bool secretmem_mapping(struct address_space *mapping)
+{
+ return false;
+}
+
+static inline bool secretmem_active(void)
+{
+ return false;
+}
+
+#endif /* CONFIG_SECRETMEM */
+
+#endif /* _LINUX_SECRETMEM_H */
diff --git a/include/linux/security.h b/include/linux/security.h
index 06f7c50ce77f..83a646d72f6f 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -31,6 +31,13 @@
#include <linux/err.h>
#include <linux/string.h>
#include <linux/mm.h>
+#include <linux/sockptr.h>
+#include <linux/bpf.h>
+#include <uapi/linux/lsm.h>
+#include <linux/lsm/selinux.h>
+#include <linux/lsm/smack.h>
+#include <linux/lsm/apparmor.h>
+#include <linux/lsm/bpf.h>
struct linux_binprm;
struct cred;
@@ -59,6 +66,7 @@ struct fs_parameter;
enum fs_value_type;
struct watch;
struct watch_notification;
+struct lsm_ctx;
/* Default (no) options for the capable function */
#define CAP_OPT_NONE 0x0
@@ -67,7 +75,7 @@ struct watch_notification;
/* If capable is being called by a setid function */
#define CAP_OPT_INSETID BIT(2)
-/* LSM Agnostic defines for fs_context::lsm_flags */
+/* LSM Agnostic defines for security_sb_set_mnt_opts() flags */
#define SECURITY_LSM_NATIVE_LABELS 1
struct ctl_table;
@@ -77,6 +85,19 @@ struct timezone;
enum lsm_event {
LSM_POLICY_CHANGE,
+ LSM_STARTED_ALL,
+};
+
+struct dm_verity_digest {
+ const char *alg;
+ const u8 *digest;
+ size_t digest_len;
+};
+
+enum lsm_integrity_type {
+ LSM_INT_DMVERITY_SIG_VALID,
+ LSM_INT_DMVERITY_ROOTHASH,
+ LSM_INT_FSVERITY_BUILTINSIG_VALID,
};
/*
@@ -114,16 +135,21 @@ enum lockdown_reason {
LOCKDOWN_IOPORT,
LOCKDOWN_MSR,
LOCKDOWN_ACPI_TABLES,
+ LOCKDOWN_DEVICE_TREE,
LOCKDOWN_PCMCIA_CIS,
LOCKDOWN_TIOCSSERIAL,
LOCKDOWN_MODULE_PARAMETERS,
LOCKDOWN_MMIOTRACE,
LOCKDOWN_DEBUGFS,
LOCKDOWN_XMON_WR,
+ LOCKDOWN_BPF_WRITE_USER,
+ LOCKDOWN_DBG_WRITE_KERNEL,
+ LOCKDOWN_RTAS_ERROR_INJECTION,
LOCKDOWN_INTEGRITY_MAX,
LOCKDOWN_KCORE,
LOCKDOWN_KPROBES,
- LOCKDOWN_BPF_READ,
+ LOCKDOWN_BPF_READ_KERNEL,
+ LOCKDOWN_DBG_READ_KERNEL,
LOCKDOWN_PERF,
LOCKDOWN_TRACEFS,
LOCKDOWN_XMON_RW,
@@ -131,6 +157,16 @@ enum lockdown_reason {
LOCKDOWN_CONFIDENTIALITY_MAX,
};
+/*
+ * Data exported by the security modules
+ */
+struct lsm_prop {
+ struct lsm_prop_selinux selinux;
+ struct lsm_prop_smack smack;
+ struct lsm_prop_apparmor apparmor;
+ struct lsm_prop_bpf bpf;
+};
+
extern const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1];
/* These functions are in security/commoncap.c */
@@ -139,25 +175,23 @@ extern int cap_capable(const struct cred *cred, struct user_namespace *ns,
extern int cap_settime(const struct timespec64 *ts, const struct timezone *tz);
extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode);
extern int cap_ptrace_traceme(struct task_struct *parent);
-extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
+extern int cap_capget(const struct task_struct *target, kernel_cap_t *effective,
+ kernel_cap_t *inheritable, kernel_cap_t *permitted);
extern int cap_capset(struct cred *new, const struct cred *old,
const kernel_cap_t *effective,
const kernel_cap_t *inheritable,
const kernel_cap_t *permitted);
-extern int cap_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file);
+extern int cap_bprm_creds_from_file(struct linux_binprm *bprm, const struct file *file);
int cap_inode_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
-int cap_inode_removexattr(struct user_namespace *mnt_userns,
+int cap_inode_removexattr(struct mnt_idmap *idmap,
struct dentry *dentry, const char *name);
int cap_inode_need_killpriv(struct dentry *dentry);
-int cap_inode_killpriv(struct user_namespace *mnt_userns,
- struct dentry *dentry);
-int cap_inode_getsecurity(struct user_namespace *mnt_userns,
+int cap_inode_killpriv(struct mnt_idmap *idmap, struct dentry *dentry);
+int cap_inode_getsecurity(struct mnt_idmap *idmap,
struct inode *inode, const char *name, void **buffer,
bool alloc);
extern int cap_mmap_addr(unsigned long addr);
-extern int cap_mmap_file(struct file *file, unsigned long reqprot,
- unsigned long prot, unsigned long flags);
extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags);
extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5);
@@ -178,7 +212,7 @@ struct xfrm_policy;
struct xfrm_state;
struct xfrm_user_sec_ctx;
struct seq_file;
-struct sctp_endpoint;
+struct sctp_association;
#ifdef CONFIG_MMU
extern unsigned long mmap_min_addr;
@@ -189,6 +223,18 @@ extern unsigned long dac_mmap_min_addr;
#endif
/*
+ * A "security context" is the text representation of
+ * the information used by LSMs.
+ * This structure contains the string, its length, and which LSM
+ * it is useful for.
+ */
+struct lsm_context {
+ char *context; /* Provided by the module */
+ u32 len;
+ int id; /* Identifies the module */
+};
+
+/*
* Values used in the task_security_ops calls
*/
/* setuid or setgid, id0 == uid or gid */
@@ -217,7 +263,7 @@ struct request_sock;
#define LSM_UNSAFE_NO_NEW_PRIVS 4
#ifdef CONFIG_MMU
-extern int mmap_min_addr_handler(struct ctl_table *table, int write,
+extern int mmap_min_addr_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
#endif
@@ -246,8 +292,32 @@ static inline const char *kernel_load_data_id_str(enum kernel_load_data_id id)
return kernel_load_data_str[id];
}
+/**
+ * lsmprop_init - initialize a lsm_prop structure
+ * @prop: Pointer to the data to initialize
+ *
+ * Set all secid for all modules to the specified value.
+ */
+static inline void lsmprop_init(struct lsm_prop *prop)
+{
+ memset(prop, 0, sizeof(*prop));
+}
+
#ifdef CONFIG_SECURITY
+/**
+ * lsmprop_is_set - report if there is a value in the lsm_prop
+ * @prop: Pointer to the exported LSM data
+ *
+ * Returns true if there is a value set, false otherwise
+ */
+static inline bool lsmprop_is_set(struct lsm_prop *prop)
+{
+ const struct lsm_prop empty = {};
+
+ return !!memcmp(prop, &empty, sizeof(*prop));
+}
+
int call_blocking_lsm_notifier(enum lsm_event event, void *data);
int register_blocking_lsm_notifier(struct notifier_block *nb);
int unregister_blocking_lsm_notifier(struct notifier_block *nb);
@@ -255,18 +325,19 @@ int unregister_blocking_lsm_notifier(struct notifier_block *nb);
/* prototypes */
extern int security_init(void);
extern int early_security_init(void);
+extern u64 lsm_name_to_attr(const char *name);
/* Security operations */
-int security_binder_set_context_mgr(struct task_struct *mgr);
-int security_binder_transaction(struct task_struct *from,
- struct task_struct *to);
-int security_binder_transfer_binder(struct task_struct *from,
- struct task_struct *to);
-int security_binder_transfer_file(struct task_struct *from,
- struct task_struct *to, struct file *file);
+int security_binder_set_context_mgr(const struct cred *mgr);
+int security_binder_transaction(const struct cred *from,
+ const struct cred *to);
+int security_binder_transfer_binder(const struct cred *from,
+ const struct cred *to);
+int security_binder_transfer_file(const struct cred *from,
+ const struct cred *to, const struct file *file);
int security_ptrace_access_check(struct task_struct *child, unsigned int mode);
int security_ptrace_traceme(struct task_struct *parent);
-int security_capget(struct task_struct *target,
+int security_capget(const struct task_struct *target,
kernel_cap_t *effective,
kernel_cap_t *inheritable,
kernel_cap_t *permitted);
@@ -278,16 +349,17 @@ int security_capable(const struct cred *cred,
struct user_namespace *ns,
int cap,
unsigned int opts);
-int security_quotactl(int cmds, int type, int id, struct super_block *sb);
+int security_quotactl(int cmds, int type, int id, const struct super_block *sb);
int security_quota_on(struct dentry *dentry);
int security_syslog(int type);
int security_settime64(const struct timespec64 *ts, const struct timezone *tz);
int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
int security_bprm_creds_for_exec(struct linux_binprm *bprm);
-int security_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file);
+int security_bprm_creds_from_file(struct linux_binprm *bprm, const struct file *file);
int security_bprm_check(struct linux_binprm *bprm);
-void security_bprm_committing_creds(struct linux_binprm *bprm);
-void security_bprm_committed_creds(struct linux_binprm *bprm);
+void security_bprm_committing_creds(const struct linux_binprm *bprm);
+void security_bprm_committed_creds(const struct linux_binprm *bprm);
+int security_fs_context_submount(struct fs_context *fc, struct super_block *reference);
int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc);
int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param);
int security_sb_alloc(struct super_block *sb);
@@ -297,7 +369,7 @@ void security_free_mnt_opts(void **mnt_opts);
int security_sb_eat_lsm_opts(char *options, void **mnt_opts);
int security_sb_mnt_opts_compat(struct super_block *sb, void *mnt_opts);
int security_sb_remount(struct super_block *sb, void *mnt_opts);
-int security_sb_kern_mount(struct super_block *sb);
+int security_sb_kern_mount(const struct super_block *sb);
int security_sb_show_options(struct seq_file *m, struct super_block *sb);
int security_sb_statfs(struct dentry *dentry);
int security_sb_mount(const char *dev_name, const struct path *path,
@@ -312,19 +384,18 @@ int security_sb_clone_mnt_opts(const struct super_block *oldsb,
struct super_block *newsb,
unsigned long kern_flags,
unsigned long *set_kern_flags);
-int security_add_mnt_opt(const char *option, const char *val,
- int len, void **mnt_opts);
int security_move_mount(const struct path *from_path, const struct path *to_path);
int security_dentry_init_security(struct dentry *dentry, int mode,
- const struct qstr *name, void **ctx,
- u32 *ctxlen);
+ const struct qstr *name,
+ const char **xattr_name,
+ struct lsm_context *lsmcxt);
int security_dentry_create_files_as(struct dentry *dentry, int mode,
- struct qstr *name,
+ const struct qstr *name,
const struct cred *old,
struct cred *new);
int security_path_notify(const struct path *path, u64 mask,
unsigned int obj_type);
-int security_inode_alloc(struct inode *inode);
+int security_inode_alloc(struct inode *inode, gfp_t gfp);
void security_inode_free(struct inode *inode);
int security_inode_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr,
@@ -332,10 +403,9 @@ int security_inode_init_security(struct inode *inode, struct inode *dir,
int security_inode_init_security_anon(struct inode *inode,
const struct qstr *name,
const struct inode *context_inode);
-int security_old_inode_init_security(struct inode *inode, struct inode *dir,
- const struct qstr *qstr, const char **name,
- void **value, size_t *len);
int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode);
+void security_inode_post_create_tmpfile(struct mnt_idmap *idmap,
+ struct inode *inode);
int security_inode_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry);
int security_inode_unlink(struct inode *dir, struct dentry *dentry);
@@ -351,34 +421,59 @@ int security_inode_readlink(struct dentry *dentry);
int security_inode_follow_link(struct dentry *dentry, struct inode *inode,
bool rcu);
int security_inode_permission(struct inode *inode, int mask);
-int security_inode_setattr(struct dentry *dentry, struct iattr *attr);
+int security_inode_setattr(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct iattr *attr);
+void security_inode_post_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ int ia_valid);
int security_inode_getattr(const struct path *path);
-int security_inode_setxattr(struct user_namespace *mnt_userns,
+int security_inode_setxattr(struct mnt_idmap *idmap,
struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
+int security_inode_set_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name,
+ struct posix_acl *kacl);
+void security_inode_post_set_acl(struct dentry *dentry, const char *acl_name,
+ struct posix_acl *kacl);
+int security_inode_get_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name);
+int security_inode_remove_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, const char *acl_name);
+void security_inode_post_remove_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry,
+ const char *acl_name);
void security_inode_post_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
int security_inode_getxattr(struct dentry *dentry, const char *name);
int security_inode_listxattr(struct dentry *dentry);
-int security_inode_removexattr(struct user_namespace *mnt_userns,
+int security_inode_removexattr(struct mnt_idmap *idmap,
struct dentry *dentry, const char *name);
+void security_inode_post_removexattr(struct dentry *dentry, const char *name);
+int security_inode_file_setattr(struct dentry *dentry,
+ struct file_kattr *fa);
+int security_inode_file_getattr(struct dentry *dentry,
+ struct file_kattr *fa);
int security_inode_need_killpriv(struct dentry *dentry);
-int security_inode_killpriv(struct user_namespace *mnt_userns,
- struct dentry *dentry);
-int security_inode_getsecurity(struct user_namespace *mnt_userns,
+int security_inode_killpriv(struct mnt_idmap *idmap, struct dentry *dentry);
+int security_inode_getsecurity(struct mnt_idmap *idmap,
struct inode *inode, const char *name,
void **buffer, bool alloc);
int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags);
int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size);
-void security_inode_getsecid(struct inode *inode, u32 *secid);
+void security_inode_getlsmprop(struct inode *inode, struct lsm_prop *prop);
int security_inode_copy_up(struct dentry *src, struct cred **new);
-int security_inode_copy_up_xattr(const char *name);
+int security_inode_copy_up_xattr(struct dentry *src, const char *name);
+int security_inode_setintegrity(const struct inode *inode,
+ enum lsm_integrity_type type, const void *value,
+ size_t size);
int security_kernfs_init_security(struct kernfs_node *kn_dir,
struct kernfs_node *kn);
int security_file_permission(struct file *file, int mask);
int security_file_alloc(struct file *file);
+void security_file_release(struct file *file);
void security_file_free(struct file *file);
int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+int security_file_ioctl_compat(struct file *file, unsigned int cmd,
+ unsigned long arg);
int security_mmap_file(struct file *file, unsigned long prot,
unsigned long flags);
int security_mmap_addr(unsigned long addr);
@@ -391,13 +486,16 @@ int security_file_send_sigiotask(struct task_struct *tsk,
struct fown_struct *fown, int sig);
int security_file_receive(struct file *file);
int security_file_open(struct file *file);
-int security_task_alloc(struct task_struct *task, unsigned long clone_flags);
+int security_file_post_open(struct file *file, int mask);
+int security_file_truncate(struct file *file);
+int security_task_alloc(struct task_struct *task, u64 clone_flags);
void security_task_free(struct task_struct *task);
int security_cred_alloc_blank(struct cred *cred, gfp_t gfp);
void security_cred_free(struct cred *cred);
int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp);
void security_transfer_creds(struct cred *new, const struct cred *old);
void security_cred_getsecid(const struct cred *c, u32 *secid);
+void security_cred_getlsmprop(const struct cred *c, struct lsm_prop *prop);
int security_kernel_act_as(struct cred *new, u32 secid);
int security_kernel_create_files_as(struct cred *new, struct inode *inode);
int security_kernel_module_request(char *kmod_name);
@@ -413,11 +511,12 @@ int security_task_fix_setuid(struct cred *new, const struct cred *old,
int flags);
int security_task_fix_setgid(struct cred *new, const struct cred *old,
int flags);
+int security_task_fix_setgroups(struct cred *new, const struct cred *old);
int security_task_setpgid(struct task_struct *p, pid_t pgid);
int security_task_getpgid(struct task_struct *p);
int security_task_getsid(struct task_struct *p);
-void security_task_getsecid_subj(struct task_struct *p, u32 *secid);
-void security_task_getsecid_obj(struct task_struct *p, u32 *secid);
+void security_current_getlsmprop_subj(struct lsm_prop *prop);
+void security_task_getlsmprop_obj(struct task_struct *p, struct lsm_prop *prop);
int security_task_setnice(struct task_struct *p, int nice);
int security_task_setioprio(struct task_struct *p, int ioprio);
int security_task_getioprio(struct task_struct *p);
@@ -433,8 +532,9 @@ int security_task_kill(struct task_struct *p, struct kernel_siginfo *info,
int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5);
void security_task_to_inode(struct task_struct *p, struct inode *inode);
+int security_create_user_ns(const struct cred *cred);
int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag);
-void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid);
+void security_ipc_getlsmprop(struct kern_ipc_perm *ipcp, struct lsm_prop *prop);
int security_msg_msg_alloc(struct msg_msg *msg);
void security_msg_msg_free(struct msg_msg *msg);
int security_msg_queue_alloc(struct kern_ipc_perm *msq);
@@ -457,22 +557,44 @@ int security_sem_semctl(struct kern_ipc_perm *sma, int cmd);
int security_sem_semop(struct kern_ipc_perm *sma, struct sembuf *sops,
unsigned nsops, int alter);
void security_d_instantiate(struct dentry *dentry, struct inode *inode);
-int security_getprocattr(struct task_struct *p, const char *lsm, char *name,
+int security_getselfattr(unsigned int attr, struct lsm_ctx __user *ctx,
+ u32 __user *size, u32 flags);
+int security_setselfattr(unsigned int attr, struct lsm_ctx __user *ctx,
+ u32 size, u32 flags);
+int security_getprocattr(struct task_struct *p, int lsmid, const char *name,
char **value);
-int security_setprocattr(const char *lsm, const char *name, void *value,
- size_t size);
-int security_netlink_send(struct sock *sk, struct sk_buff *skb);
+int security_setprocattr(int lsmid, const char *name, void *value, size_t size);
int security_ismaclabel(const char *name);
-int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen);
+int security_secid_to_secctx(u32 secid, struct lsm_context *cp);
+int security_lsmprop_to_secctx(struct lsm_prop *prop, struct lsm_context *cp,
+ int lsmid);
int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid);
-void security_release_secctx(char *secdata, u32 seclen);
+void security_release_secctx(struct lsm_context *cp);
void security_inode_invalidate_secctx(struct inode *inode);
int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen);
int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen);
-int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen);
+int security_inode_getsecctx(struct inode *inode, struct lsm_context *cp);
int security_locked_down(enum lockdown_reason what);
+int lsm_fill_user_ctx(struct lsm_ctx __user *uctx, u32 *uctx_len,
+ void *val, size_t val_len, u64 id, u64 flags);
+int security_bdev_alloc(struct block_device *bdev);
+void security_bdev_free(struct block_device *bdev);
+int security_bdev_setintegrity(struct block_device *bdev,
+ enum lsm_integrity_type type, const void *value,
+ size_t size);
#else /* CONFIG_SECURITY */
+/**
+ * lsmprop_is_set - report if there is a value in the lsm_prop
+ * @prop: Pointer to the exported LSM data
+ *
+ * Returns true if there is a value set, false otherwise
+ */
+static inline bool lsmprop_is_set(struct lsm_prop *prop)
+{
+ return false;
+}
+
static inline int call_blocking_lsm_notifier(enum lsm_event event, void *data)
{
return 0;
@@ -488,6 +610,11 @@ static inline int unregister_blocking_lsm_notifier(struct notifier_block *nb)
return 0;
}
+static inline u64 lsm_name_to_attr(const char *name)
+{
+ return LSM_ATTR_UNDEF;
+}
+
static inline void security_free_mnt_opts(void **mnt_opts)
{
}
@@ -507,26 +634,26 @@ static inline int early_security_init(void)
return 0;
}
-static inline int security_binder_set_context_mgr(struct task_struct *mgr)
+static inline int security_binder_set_context_mgr(const struct cred *mgr)
{
return 0;
}
-static inline int security_binder_transaction(struct task_struct *from,
- struct task_struct *to)
+static inline int security_binder_transaction(const struct cred *from,
+ const struct cred *to)
{
return 0;
}
-static inline int security_binder_transfer_binder(struct task_struct *from,
- struct task_struct *to)
+static inline int security_binder_transfer_binder(const struct cred *from,
+ const struct cred *to)
{
return 0;
}
-static inline int security_binder_transfer_file(struct task_struct *from,
- struct task_struct *to,
- struct file *file)
+static inline int security_binder_transfer_file(const struct cred *from,
+ const struct cred *to,
+ const struct file *file)
{
return 0;
}
@@ -542,7 +669,7 @@ static inline int security_ptrace_traceme(struct task_struct *parent)
return cap_ptrace_traceme(parent);
}
-static inline int security_capget(struct task_struct *target,
+static inline int security_capget(const struct task_struct *target,
kernel_cap_t *effective,
kernel_cap_t *inheritable,
kernel_cap_t *permitted)
@@ -568,7 +695,7 @@ static inline int security_capable(const struct cred *cred,
}
static inline int security_quotactl(int cmds, int type, int id,
- struct super_block *sb)
+ const struct super_block *sb)
{
return 0;
}
@@ -591,7 +718,7 @@ static inline int security_settime64(const struct timespec64 *ts,
static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
{
- return __vm_enough_memory(mm, pages, cap_vm_enough_memory(mm, pages));
+ return __vm_enough_memory(mm, pages, !cap_vm_enough_memory(mm, pages));
}
static inline int security_bprm_creds_for_exec(struct linux_binprm *bprm)
@@ -600,7 +727,7 @@ static inline int security_bprm_creds_for_exec(struct linux_binprm *bprm)
}
static inline int security_bprm_creds_from_file(struct linux_binprm *bprm,
- struct file *file)
+ const struct file *file)
{
return cap_bprm_creds_from_file(bprm, file);
}
@@ -610,14 +737,19 @@ static inline int security_bprm_check(struct linux_binprm *bprm)
return 0;
}
-static inline void security_bprm_committing_creds(struct linux_binprm *bprm)
+static inline void security_bprm_committing_creds(const struct linux_binprm *bprm)
{
}
-static inline void security_bprm_committed_creds(struct linux_binprm *bprm)
+static inline void security_bprm_committed_creds(const struct linux_binprm *bprm)
{
}
+static inline int security_fs_context_submount(struct fs_context *fc,
+ struct super_block *reference)
+{
+ return 0;
+}
static inline int security_fs_context_dup(struct fs_context *fc,
struct fs_context *src_fc)
{
@@ -709,12 +841,6 @@ static inline int security_sb_clone_mnt_opts(const struct super_block *oldsb,
return 0;
}
-static inline int security_add_mnt_opt(const char *option, const char *val,
- int len, void **mnt_opts)
-{
- return 0;
-}
-
static inline int security_move_mount(const struct path *from_path,
const struct path *to_path)
{
@@ -727,7 +853,7 @@ static inline int security_path_notify(const struct path *path, u64 mask,
return 0;
}
-static inline int security_inode_alloc(struct inode *inode)
+static inline int security_inode_alloc(struct inode *inode, gfp_t gfp)
{
return 0;
}
@@ -738,14 +864,14 @@ static inline void security_inode_free(struct inode *inode)
static inline int security_dentry_init_security(struct dentry *dentry,
int mode,
const struct qstr *name,
- void **ctx,
- u32 *ctxlen)
+ const char **xattr_name,
+ struct lsm_context *lsmcxt)
{
return -EOPNOTSUPP;
}
static inline int security_dentry_create_files_as(struct dentry *dentry,
- int mode, struct qstr *name,
+ int mode, const struct qstr *name,
const struct cred *old,
struct cred *new)
{
@@ -769,15 +895,6 @@ static inline int security_inode_init_security_anon(struct inode *inode,
return 0;
}
-static inline int security_old_inode_init_security(struct inode *inode,
- struct inode *dir,
- const struct qstr *qstr,
- const char **name,
- void **value, size_t *len)
-{
- return -EOPNOTSUPP;
-}
-
static inline int security_inode_create(struct inode *dir,
struct dentry *dentry,
umode_t mode)
@@ -785,6 +902,10 @@ static inline int security_inode_create(struct inode *dir,
return 0;
}
+static inline void
+security_inode_post_create_tmpfile(struct mnt_idmap *idmap, struct inode *inode)
+{ }
+
static inline int security_inode_link(struct dentry *old_dentry,
struct inode *dir,
struct dentry *new_dentry)
@@ -851,24 +972,62 @@ static inline int security_inode_permission(struct inode *inode, int mask)
return 0;
}
-static inline int security_inode_setattr(struct dentry *dentry,
- struct iattr *attr)
+static inline int security_inode_setattr(struct mnt_idmap *idmap,
+ struct dentry *dentry,
+ struct iattr *attr)
{
return 0;
}
+static inline void
+security_inode_post_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ int ia_valid)
+{ }
+
static inline int security_inode_getattr(const struct path *path)
{
return 0;
}
-static inline int security_inode_setxattr(struct user_namespace *mnt_userns,
+static inline int security_inode_setxattr(struct mnt_idmap *idmap,
struct dentry *dentry, const char *name, const void *value,
size_t size, int flags)
{
return cap_inode_setxattr(dentry, name, value, size, flags);
}
+static inline int security_inode_set_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry,
+ const char *acl_name,
+ struct posix_acl *kacl)
+{
+ return 0;
+}
+
+static inline void security_inode_post_set_acl(struct dentry *dentry,
+ const char *acl_name,
+ struct posix_acl *kacl)
+{ }
+
+static inline int security_inode_get_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry,
+ const char *acl_name)
+{
+ return 0;
+}
+
+static inline int security_inode_remove_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry,
+ const char *acl_name)
+{
+ return 0;
+}
+
+static inline void security_inode_post_remove_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry,
+ const char *acl_name)
+{ }
+
static inline void security_inode_post_setxattr(struct dentry *dentry,
const char *name, const void *value, size_t size, int flags)
{ }
@@ -884,11 +1043,27 @@ static inline int security_inode_listxattr(struct dentry *dentry)
return 0;
}
-static inline int security_inode_removexattr(struct user_namespace *mnt_userns,
+static inline int security_inode_removexattr(struct mnt_idmap *idmap,
struct dentry *dentry,
const char *name)
{
- return cap_inode_removexattr(mnt_userns, dentry, name);
+ return cap_inode_removexattr(idmap, dentry, name);
+}
+
+static inline void security_inode_post_removexattr(struct dentry *dentry,
+ const char *name)
+{ }
+
+static inline int security_inode_file_setattr(struct dentry *dentry,
+ struct file_kattr *fa)
+{
+ return 0;
+}
+
+static inline int security_inode_file_getattr(struct dentry *dentry,
+ struct file_kattr *fa)
+{
+ return 0;
}
static inline int security_inode_need_killpriv(struct dentry *dentry)
@@ -896,18 +1071,18 @@ static inline int security_inode_need_killpriv(struct dentry *dentry)
return cap_inode_need_killpriv(dentry);
}
-static inline int security_inode_killpriv(struct user_namespace *mnt_userns,
+static inline int security_inode_killpriv(struct mnt_idmap *idmap,
struct dentry *dentry)
{
- return cap_inode_killpriv(mnt_userns, dentry);
+ return cap_inode_killpriv(idmap, dentry);
}
-static inline int security_inode_getsecurity(struct user_namespace *mnt_userns,
+static inline int security_inode_getsecurity(struct mnt_idmap *idmap,
struct inode *inode,
const char *name, void **buffer,
bool alloc)
{
- return cap_inode_getsecurity(mnt_userns, inode, name, buffer, alloc);
+ return cap_inode_getsecurity(idmap, inode, name, buffer, alloc);
}
static inline int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
@@ -920,9 +1095,10 @@ static inline int security_inode_listsecurity(struct inode *inode, char *buffer,
return 0;
}
-static inline void security_inode_getsecid(struct inode *inode, u32 *secid)
+static inline void security_inode_getlsmprop(struct inode *inode,
+ struct lsm_prop *prop)
{
- *secid = 0;
+ lsmprop_init(prop);
}
static inline int security_inode_copy_up(struct dentry *src, struct cred **new)
@@ -930,13 +1106,20 @@ static inline int security_inode_copy_up(struct dentry *src, struct cred **new)
return 0;
}
+static inline int security_inode_setintegrity(const struct inode *inode,
+ enum lsm_integrity_type type,
+ const void *value, size_t size)
+{
+ return 0;
+}
+
static inline int security_kernfs_init_security(struct kernfs_node *kn_dir,
struct kernfs_node *kn)
{
return 0;
}
-static inline int security_inode_copy_up_xattr(const char *name)
+static inline int security_inode_copy_up_xattr(struct dentry *src, const char *name)
{
return -EOPNOTSUPP;
}
@@ -951,6 +1134,9 @@ static inline int security_file_alloc(struct file *file)
return 0;
}
+static inline void security_file_release(struct file *file)
+{ }
+
static inline void security_file_free(struct file *file)
{ }
@@ -960,6 +1146,13 @@ static inline int security_file_ioctl(struct file *file, unsigned int cmd,
return 0;
}
+static inline int security_file_ioctl_compat(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ return 0;
+}
+
static inline int security_mmap_file(struct file *file, unsigned long prot,
unsigned long flags)
{
@@ -1011,8 +1204,18 @@ static inline int security_file_open(struct file *file)
return 0;
}
+static inline int security_file_post_open(struct file *file, int mask)
+{
+ return 0;
+}
+
+static inline int security_file_truncate(struct file *file)
+{
+ return 0;
+}
+
static inline int security_task_alloc(struct task_struct *task,
- unsigned long clone_flags)
+ u64 clone_flags)
{
return 0;
}
@@ -1040,6 +1243,15 @@ static inline void security_transfer_creds(struct cred *new,
{
}
+static inline void security_cred_getsecid(const struct cred *c, u32 *secid)
+{
+ *secid = 0;
+}
+
+static inline void security_cred_getlsmprop(const struct cred *c,
+ struct lsm_prop *prop)
+{ }
+
static inline int security_kernel_act_as(struct cred *cred, u32 secid)
{
return 0;
@@ -1096,6 +1308,12 @@ static inline int security_task_fix_setgid(struct cred *new,
return 0;
}
+static inline int security_task_fix_setgroups(struct cred *new,
+ const struct cred *old)
+{
+ return 0;
+}
+
static inline int security_task_setpgid(struct task_struct *p, pid_t pgid)
{
return 0;
@@ -1111,14 +1329,15 @@ static inline int security_task_getsid(struct task_struct *p)
return 0;
}
-static inline void security_task_getsecid_subj(struct task_struct *p, u32 *secid)
+static inline void security_current_getlsmprop_subj(struct lsm_prop *prop)
{
- *secid = 0;
+ lsmprop_init(prop);
}
-static inline void security_task_getsecid_obj(struct task_struct *p, u32 *secid)
+static inline void security_task_getlsmprop_obj(struct task_struct *p,
+ struct lsm_prop *prop)
{
- *secid = 0;
+ lsmprop_init(prop);
}
static inline int security_task_setnice(struct task_struct *p, int nice)
@@ -1183,15 +1402,21 @@ static inline int security_task_prctl(int option, unsigned long arg2,
static inline void security_task_to_inode(struct task_struct *p, struct inode *inode)
{ }
+static inline int security_create_user_ns(const struct cred *cred)
+{
+ return 0;
+}
+
static inline int security_ipc_permission(struct kern_ipc_perm *ipcp,
short flag)
{
return 0;
}
-static inline void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid)
+static inline void security_ipc_getlsmprop(struct kern_ipc_perm *ipcp,
+ struct lsm_prop *prop)
{
- *secid = 0;
+ lsmprop_init(prop);
}
static inline int security_msg_msg_alloc(struct msg_msg *msg)
@@ -1289,21 +1514,30 @@ static inline void security_d_instantiate(struct dentry *dentry,
struct inode *inode)
{ }
-static inline int security_getprocattr(struct task_struct *p, const char *lsm,
- char *name, char **value)
+static inline int security_getselfattr(unsigned int attr,
+ struct lsm_ctx __user *ctx,
+ size_t __user *size, u32 flags)
{
- return -EINVAL;
+ return -EOPNOTSUPP;
+}
+
+static inline int security_setselfattr(unsigned int attr,
+ struct lsm_ctx __user *ctx,
+ size_t size, u32 flags)
+{
+ return -EOPNOTSUPP;
}
-static inline int security_setprocattr(const char *lsm, char *name,
- void *value, size_t size)
+static inline int security_getprocattr(struct task_struct *p, int lsmid,
+ const char *name, char **value)
{
return -EINVAL;
}
-static inline int security_netlink_send(struct sock *sk, struct sk_buff *skb)
+static inline int security_setprocattr(int lsmid, char *name, void *value,
+ size_t size)
{
- return 0;
+ return -EINVAL;
}
static inline int security_ismaclabel(const char *name)
@@ -1311,7 +1545,14 @@ static inline int security_ismaclabel(const char *name)
return 0;
}
-static inline int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
+static inline int security_secid_to_secctx(u32 secid, struct lsm_context *cp)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int security_lsmprop_to_secctx(struct lsm_prop *prop,
+ struct lsm_context *cp,
+ int lsmid)
{
return -EOPNOTSUPP;
}
@@ -1323,7 +1564,7 @@ static inline int security_secctx_to_secid(const char *secdata,
return -EOPNOTSUPP;
}
-static inline void security_release_secctx(char *secdata, u32 seclen)
+static inline void security_release_secctx(struct lsm_context *cp)
{
}
@@ -1339,7 +1580,8 @@ static inline int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32
{
return -EOPNOTSUPP;
}
-static inline int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
+static inline int security_inode_getsecctx(struct inode *inode,
+ struct lsm_context *cp)
{
return -EOPNOTSUPP;
}
@@ -1347,6 +1589,29 @@ static inline int security_locked_down(enum lockdown_reason what)
{
return 0;
}
+static inline int lsm_fill_user_ctx(struct lsm_ctx __user *uctx,
+ u32 *uctx_len, void *val, size_t val_len,
+ u64 id, u64 flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int security_bdev_alloc(struct block_device *bdev)
+{
+ return 0;
+}
+
+static inline void security_bdev_free(struct block_device *bdev)
+{
+}
+
+static inline int security_bdev_setintegrity(struct block_device *bdev,
+ enum lsm_integrity_type type,
+ const void *value, size_t size)
+{
+ return 0;
+}
+
#endif /* CONFIG_SECURITY */
#if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE)
@@ -1373,6 +1638,7 @@ static inline int security_watch_key(struct key *key)
#ifdef CONFIG_SECURITY_NETWORK
+int security_netlink_send(struct sock *sk, struct sk_buff *skb);
int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk);
int security_unix_may_send(struct socket *sock, struct socket *other);
int security_socket_create(int family, int type, int protocol, int kern);
@@ -1392,13 +1658,14 @@ int security_socket_getsockopt(struct socket *sock, int level, int optname);
int security_socket_setsockopt(struct socket *sock, int level, int optname);
int security_socket_shutdown(struct socket *sock, int how);
int security_sock_rcv_skb(struct sock *sk, struct sk_buff *skb);
-int security_socket_getpeersec_stream(struct socket *sock, char __user *optval,
- int __user *optlen, unsigned len);
+int security_socket_getpeersec_stream(struct socket *sock, sockptr_t optval,
+ sockptr_t optlen, unsigned int len);
int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid);
int security_sk_alloc(struct sock *sk, int family, gfp_t priority);
void security_sk_free(struct sock *sk);
void security_sk_clone(const struct sock *sk, struct sock *newsk);
-void security_sk_classify_flow(struct sock *sk, struct flowi_common *flic);
+void security_sk_classify_flow(const struct sock *sk,
+ struct flowi_common *flic);
void security_req_classify_flow(const struct request_sock *req,
struct flowi_common *flic);
void security_sock_graft(struct sock*sk, struct socket *parent);
@@ -1417,13 +1684,21 @@ int security_tun_dev_create(void);
int security_tun_dev_attach_queue(void *security);
int security_tun_dev_attach(struct sock *sk, void *security);
int security_tun_dev_open(void *security);
-int security_sctp_assoc_request(struct sctp_endpoint *ep, struct sk_buff *skb);
+int security_sctp_assoc_request(struct sctp_association *asoc, struct sk_buff *skb);
int security_sctp_bind_connect(struct sock *sk, int optname,
struct sockaddr *address, int addrlen);
-void security_sctp_sk_clone(struct sctp_endpoint *ep, struct sock *sk,
+void security_sctp_sk_clone(struct sctp_association *asoc, struct sock *sk,
struct sock *newsk);
+int security_sctp_assoc_established(struct sctp_association *asoc,
+ struct sk_buff *skb);
+int security_mptcp_add_subflow(struct sock *sk, struct sock *ssk);
#else /* CONFIG_SECURITY_NETWORK */
+static inline int security_netlink_send(struct sock *sk, struct sk_buff *skb)
+{
+ return 0;
+}
+
static inline int security_unix_stream_connect(struct sock *sock,
struct sock *other,
struct sock *newsk)
@@ -1527,8 +1802,10 @@ static inline int security_sock_rcv_skb(struct sock *sk,
return 0;
}
-static inline int security_socket_getpeersec_stream(struct socket *sock, char __user *optval,
- int __user *optlen, unsigned len)
+static inline int security_socket_getpeersec_stream(struct socket *sock,
+ sockptr_t optval,
+ sockptr_t optlen,
+ unsigned int len)
{
return -ENOPROTOOPT;
}
@@ -1551,7 +1828,7 @@ static inline void security_sk_clone(const struct sock *sk, struct sock *newsk)
{
}
-static inline void security_sk_classify_flow(struct sock *sk,
+static inline void security_sk_classify_flow(const struct sock *sk,
struct flowi_common *flic)
{
}
@@ -1623,7 +1900,7 @@ static inline int security_tun_dev_open(void *security)
return 0;
}
-static inline int security_sctp_assoc_request(struct sctp_endpoint *ep,
+static inline int security_sctp_assoc_request(struct sctp_association *asoc,
struct sk_buff *skb)
{
return 0;
@@ -1636,11 +1913,22 @@ static inline int security_sctp_bind_connect(struct sock *sk, int optname,
return 0;
}
-static inline void security_sctp_sk_clone(struct sctp_endpoint *ep,
+static inline void security_sctp_sk_clone(struct sctp_association *asoc,
struct sock *sk,
struct sock *newsk)
{
}
+
+static inline int security_sctp_assoc_established(struct sctp_association *asoc,
+ struct sk_buff *skb)
+{
+ return 0;
+}
+
+static inline int security_mptcp_add_subflow(struct sock *sk, struct sock *ssk)
+{
+ return 0;
+}
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_INFINIBAND
@@ -1681,7 +1969,7 @@ int security_xfrm_state_alloc_acquire(struct xfrm_state *x,
struct xfrm_sec_ctx *polsec, u32 secid);
int security_xfrm_state_delete(struct xfrm_state *x);
void security_xfrm_state_free(struct xfrm_state *x);
-int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
+int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid);
int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
struct xfrm_policy *xp,
const struct flowi_common *flic);
@@ -1732,7 +2020,7 @@ static inline int security_xfrm_state_delete(struct xfrm_state *x)
return 0;
}
-static inline int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
+static inline int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid)
{
return 0;
}
@@ -1762,6 +2050,7 @@ int security_path_mkdir(const struct path *dir, struct dentry *dentry, umode_t m
int security_path_rmdir(const struct path *dir, struct dentry *dentry);
int security_path_mknod(const struct path *dir, struct dentry *dentry, umode_t mode,
unsigned int dev);
+void security_path_post_mknod(struct mnt_idmap *idmap, struct dentry *dentry);
int security_path_truncate(const struct path *path);
int security_path_symlink(const struct path *dir, struct dentry *dentry,
const char *old_name);
@@ -1796,6 +2085,10 @@ static inline int security_path_mknod(const struct path *dir, struct dentry *den
return 0;
}
+static inline void security_path_post_mknod(struct mnt_idmap *idmap,
+ struct dentry *dentry)
+{ }
+
static inline int security_path_truncate(const struct path *path)
{
return 0;
@@ -1847,6 +2140,9 @@ void security_key_free(struct key *key);
int security_key_permission(key_ref_t key_ref, const struct cred *cred,
enum key_need_perm need_perm);
int security_key_getsecurity(struct key *key, char **_buffer);
+void security_key_post_create_or_update(struct key *keyring, struct key *key,
+ const void *payload, size_t payload_len,
+ unsigned long flags, bool create);
#else
@@ -1874,20 +2170,30 @@ static inline int security_key_getsecurity(struct key *key, char **_buffer)
return 0;
}
+static inline void security_key_post_create_or_update(struct key *keyring,
+ struct key *key,
+ const void *payload,
+ size_t payload_len,
+ unsigned long flags,
+ bool create)
+{ }
+
#endif
#endif /* CONFIG_KEYS */
#ifdef CONFIG_AUDIT
#ifdef CONFIG_SECURITY
-int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule);
+int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule,
+ gfp_t gfp);
int security_audit_rule_known(struct audit_krule *krule);
-int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule);
+int security_audit_rule_match(struct lsm_prop *prop, u32 field, u32 op,
+ void *lsmrule);
void security_audit_rule_free(void *lsmrule);
#else
static inline int security_audit_rule_init(u32 field, u32 op, char *rulestr,
- void **lsmrule)
+ void **lsmrule, gfp_t gfp)
{
return 0;
}
@@ -1897,8 +2203,8 @@ static inline int security_audit_rule_known(struct audit_krule *krule)
return 0;
}
-static inline int security_audit_rule_match(u32 secid, u32 field, u32 op,
- void *lsmrule)
+static inline int security_audit_rule_match(struct lsm_prop *prop, u32 field,
+ u32 op, void *lsmrule)
{
return 0;
}
@@ -1955,18 +2261,25 @@ static inline void securityfs_remove(struct dentry *dentry)
union bpf_attr;
struct bpf_map;
struct bpf_prog;
-struct bpf_prog_aux;
+struct bpf_token;
#ifdef CONFIG_SECURITY
-extern int security_bpf(int cmd, union bpf_attr *attr, unsigned int size);
+extern int security_bpf(int cmd, union bpf_attr *attr, unsigned int size, bool kernel);
extern int security_bpf_map(struct bpf_map *map, fmode_t fmode);
extern int security_bpf_prog(struct bpf_prog *prog);
-extern int security_bpf_map_alloc(struct bpf_map *map);
+extern int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
+ struct bpf_token *token, bool kernel);
extern void security_bpf_map_free(struct bpf_map *map);
-extern int security_bpf_prog_alloc(struct bpf_prog_aux *aux);
-extern void security_bpf_prog_free(struct bpf_prog_aux *aux);
+extern int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
+ struct bpf_token *token, bool kernel);
+extern void security_bpf_prog_free(struct bpf_prog *prog);
+extern int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
+ const struct path *path);
+extern void security_bpf_token_free(struct bpf_token *token);
+extern int security_bpf_token_cmd(const struct bpf_token *token, enum bpf_cmd cmd);
+extern int security_bpf_token_capable(const struct bpf_token *token, int cap);
#else
static inline int security_bpf(int cmd, union bpf_attr *attr,
- unsigned int size)
+ unsigned int size, bool kernel)
{
return 0;
}
@@ -1981,7 +2294,8 @@ static inline int security_bpf_prog(struct bpf_prog *prog)
return 0;
}
-static inline int security_bpf_map_alloc(struct bpf_map *map)
+static inline int security_bpf_map_create(struct bpf_map *map, union bpf_attr *attr,
+ struct bpf_token *token, bool kernel)
{
return 0;
}
@@ -1989,13 +2303,33 @@ static inline int security_bpf_map_alloc(struct bpf_map *map)
static inline void security_bpf_map_free(struct bpf_map *map)
{ }
-static inline int security_bpf_prog_alloc(struct bpf_prog_aux *aux)
+static inline int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr,
+ struct bpf_token *token, bool kernel)
{
return 0;
}
-static inline void security_bpf_prog_free(struct bpf_prog_aux *aux)
+static inline void security_bpf_prog_free(struct bpf_prog *prog)
{ }
+
+static inline int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr,
+ const struct path *path)
+{
+ return 0;
+}
+
+static inline void security_bpf_token_free(struct bpf_token *token)
+{ }
+
+static inline int security_bpf_token_cmd(const struct bpf_token *token, enum bpf_cmd cmd)
+{
+ return 0;
+}
+
+static inline int security_bpf_token_capable(const struct bpf_token *token, int cap)
+{
+ return 0;
+}
#endif /* CONFIG_SECURITY */
#endif /* CONFIG_BPF_SYSCALL */
@@ -2004,14 +2338,13 @@ struct perf_event_attr;
struct perf_event;
#ifdef CONFIG_SECURITY
-extern int security_perf_event_open(struct perf_event_attr *attr, int type);
+extern int security_perf_event_open(int type);
extern int security_perf_event_alloc(struct perf_event *event);
extern void security_perf_event_free(struct perf_event *event);
extern int security_perf_event_read(struct perf_event *event);
extern int security_perf_event_write(struct perf_event *event);
#else
-static inline int security_perf_event_open(struct perf_event_attr *attr,
- int type)
+static inline int security_perf_event_open(int type)
{
return 0;
}
@@ -2037,4 +2370,38 @@ static inline int security_perf_event_write(struct perf_event *event)
#endif /* CONFIG_SECURITY */
#endif /* CONFIG_PERF_EVENTS */
+#ifdef CONFIG_IO_URING
+#ifdef CONFIG_SECURITY
+extern int security_uring_override_creds(const struct cred *new);
+extern int security_uring_sqpoll(void);
+extern int security_uring_cmd(struct io_uring_cmd *ioucmd);
+extern int security_uring_allowed(void);
+#else
+static inline int security_uring_override_creds(const struct cred *new)
+{
+ return 0;
+}
+static inline int security_uring_sqpoll(void)
+{
+ return 0;
+}
+static inline int security_uring_cmd(struct io_uring_cmd *ioucmd)
+{
+ return 0;
+}
+static inline int security_uring_allowed(void)
+{
+ return 0;
+}
+#endif /* CONFIG_SECURITY */
+#endif /* CONFIG_IO_URING */
+
+#ifdef CONFIG_SECURITY
+extern void security_initramfs_populated(void);
+#else
+static inline void security_initramfs_populated(void)
+{
+}
+#endif /* CONFIG_SECURITY */
+
#endif /* ! __LINUX_SECURITY_H */
diff --git a/include/linux/sed-opal-key.h b/include/linux/sed-opal-key.h
new file mode 100644
index 000000000000..0ca03054e8f6
--- /dev/null
+++ b/include/linux/sed-opal-key.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * SED key operations.
+ *
+ * Copyright (C) 2023 IBM Corporation
+ *
+ * These are the accessor functions (read/write) for SED Opal
+ * keys. Specific keystores can provide overrides.
+ *
+ */
+
+#include <linux/kernel.h>
+
+#ifdef CONFIG_PSERIES_PLPKS_SED
+int sed_read_key(char *keyname, char *key, u_int *keylen);
+int sed_write_key(char *keyname, char *key, u_int keylen);
+#else
+static inline
+int sed_read_key(char *keyname, char *key, u_int *keylen) {
+ return -EOPNOTSUPP;
+}
+static inline
+int sed_write_key(char *keyname, char *key, u_int keylen) {
+ return -EOPNOTSUPP;
+}
+#endif
diff --git a/include/linux/sed-opal.h b/include/linux/sed-opal.h
index 1ac0d712a9c3..80f33a93f944 100644
--- a/include/linux/sed-opal.h
+++ b/include/linux/sed-opal.h
@@ -11,7 +11,8 @@
#define LINUX_OPAL_H
#include <uapi/linux/sed-opal.h>
-#include <linux/kernel.h>
+#include <linux/compiler_types.h>
+#include <linux/types.h>
struct opal_dev;
@@ -24,6 +25,9 @@ bool opal_unlock_from_suspend(struct opal_dev *dev);
struct opal_dev *init_opal_dev(void *data, sec_send_recv *send_recv);
int sed_ioctl(struct opal_dev *dev, unsigned int cmd, void __user *ioctl_ptr);
+#define OPAL_AUTH_KEY "opal-boot-pin"
+#define OPAL_AUTH_KEY_PREV "opal-boot-pin-prev"
+
static inline bool is_sed_ioctl(unsigned int cmd)
{
switch (cmd) {
@@ -43,6 +47,12 @@ static inline bool is_sed_ioctl(unsigned int cmd)
case IOC_OPAL_MBR_DONE:
case IOC_OPAL_WRITE_SHADOW_MBR:
case IOC_OPAL_GENERIC_TABLE_RW:
+ case IOC_OPAL_GET_STATUS:
+ case IOC_OPAL_GET_LR_STATUS:
+ case IOC_OPAL_GET_GEOMETRY:
+ case IOC_OPAL_DISCOVERY:
+ case IOC_OPAL_REVERT_LSP:
+ case IOC_OPAL_SET_SID_PW:
return true;
}
return false;
diff --git a/include/linux/selection.h b/include/linux/selection.h
index 170ef28ff26b..bab7d30d3446 100644
--- a/include/linux/selection.h
+++ b/include/linux/selection.h
@@ -14,17 +14,16 @@
struct tty_struct;
struct vc_data;
-extern void clear_selection(void);
-extern int set_selection_user(const struct tiocl_selection __user *sel,
- struct tty_struct *tty);
-extern int set_selection_kernel(struct tiocl_selection *v,
- struct tty_struct *tty);
-extern int paste_selection(struct tty_struct *tty);
-extern int sel_loadlut(char __user *p);
-extern int mouse_reporting(void);
-extern void mouse_report(struct tty_struct * tty, int butt, int mrx, int mry);
-
-bool vc_is_sel(struct vc_data *vc);
+void clear_selection(void);
+int set_selection_user(const struct tiocl_selection __user *sel,
+ struct tty_struct *tty);
+int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty);
+int paste_selection(struct tty_struct *tty);
+int sel_loadlut(u32 __user *lut);
+int mouse_reporting(void);
+void mouse_report(struct tty_struct *tty, int butt, int mrx, int mry);
+
+bool vc_is_sel(const struct vc_data *vc);
extern int console_blanked;
@@ -33,24 +32,21 @@ extern unsigned char default_red[];
extern unsigned char default_grn[];
extern unsigned char default_blu[];
-extern unsigned short *screen_pos(const struct vc_data *vc, int w_offset,
- bool viewed);
-extern u16 screen_glyph(const struct vc_data *vc, int offset);
-extern u32 screen_glyph_unicode(const struct vc_data *vc, int offset);
-extern void complement_pos(struct vc_data *vc, int offset);
-extern void invert_screen(struct vc_data *vc, int offset, int count, bool viewed);
-
-extern void getconsxy(const struct vc_data *vc, unsigned char xy[static 2]);
-extern void putconsxy(struct vc_data *vc, unsigned char xy[static const 2]);
-
-extern u16 vcs_scr_readw(const struct vc_data *vc, const u16 *org);
-extern void vcs_scr_writew(struct vc_data *vc, u16 val, u16 *org);
-extern void vcs_scr_updated(struct vc_data *vc);
-
-extern int vc_uniscr_check(struct vc_data *vc);
-extern void vc_uniscr_copy_line(const struct vc_data *vc, void *dest,
- bool viewed,
- unsigned int row, unsigned int col,
- unsigned int nr);
+unsigned short *screen_pos(const struct vc_data *vc, int w_offset, bool viewed);
+u16 screen_glyph(const struct vc_data *vc, int offset);
+u32 screen_glyph_unicode(const struct vc_data *vc, int offset);
+void complement_pos(struct vc_data *vc, int offset);
+void invert_screen(struct vc_data *vc, int offset, int count, bool viewed);
+
+void getconsxy(const struct vc_data *vc, unsigned char xy[static 2]);
+void putconsxy(struct vc_data *vc, unsigned char xy[static const 2]);
+
+u16 vcs_scr_readw(const struct vc_data *vc, const u16 *org);
+void vcs_scr_writew(struct vc_data *vc, u16 val, u16 *org);
+void vcs_scr_updated(struct vc_data *vc);
+
+int vc_uniscr_check(struct vc_data *vc);
+void vc_uniscr_copy_line(const struct vc_data *vc, void *dest, bool viewed,
+ unsigned int row, unsigned int col, unsigned int nr);
#endif
diff --git a/include/linux/sem.h b/include/linux/sem.h
index 5608a500c43e..275269ce2ec8 100644
--- a/include/linux/sem.h
+++ b/include/linux/sem.h
@@ -3,26 +3,18 @@
#define _LINUX_SEM_H
#include <uapi/linux/sem.h>
+#include <linux/sem_types.h>
struct task_struct;
-struct sem_undo_list;
#ifdef CONFIG_SYSVIPC
-struct sysv_sem {
- struct sem_undo_list *undo_list;
-};
-
-extern int copy_semundo(unsigned long clone_flags, struct task_struct *tsk);
+extern int copy_semundo(u64 clone_flags, struct task_struct *tsk);
extern void exit_sem(struct task_struct *tsk);
#else
-struct sysv_sem {
- /* empty */
-};
-
-static inline int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
+static inline int copy_semundo(u64 clone_flags, struct task_struct *tsk)
{
return 0;
}
diff --git a/include/linux/sem_types.h b/include/linux/sem_types.h
new file mode 100644
index 000000000000..73df1971a7ae
--- /dev/null
+++ b/include/linux/sem_types.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SEM_TYPES_H
+#define _LINUX_SEM_TYPES_H
+
+struct sem_undo_list;
+
+struct sysv_sem {
+#ifdef CONFIG_SYSVIPC
+ struct sem_undo_list *undo_list;
+#endif
+};
+
+#endif /* _LINUX_SEM_TYPES_H */
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
index 6694d0019a68..89706157e622 100644
--- a/include/linux/semaphore.h
+++ b/include/linux/semaphore.h
@@ -16,17 +16,35 @@ struct semaphore {
raw_spinlock_t lock;
unsigned int count;
struct list_head wait_list;
+
+#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
+ unsigned long last_holder;
+#endif
};
+#ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER
+#define __LAST_HOLDER_SEMAPHORE_INITIALIZER \
+ , .last_holder = 0UL
+#else
+#define __LAST_HOLDER_SEMAPHORE_INITIALIZER
+#endif
+
#define __SEMAPHORE_INITIALIZER(name, n) \
{ \
.lock = __RAW_SPIN_LOCK_UNLOCKED((name).lock), \
.count = n, \
- .wait_list = LIST_HEAD_INIT((name).wait_list), \
+ .wait_list = LIST_HEAD_INIT((name).wait_list) \
+ __LAST_HOLDER_SEMAPHORE_INITIALIZER \
}
-#define DEFINE_SEMAPHORE(name) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
+/*
+ * Unlike mutexes, binary semaphores do not have an owner, so up() can
+ * be called in a different thread from the one which called down().
+ * It is also safe to call down_trylock() and up() from interrupt
+ * context.
+ */
+#define DEFINE_SEMAPHORE(_name, _n) \
+ struct semaphore _name = __SEMAPHORE_INITIALIZER(_name, _n)
static inline void sema_init(struct semaphore *sem, int val)
{
@@ -41,5 +59,6 @@ extern int __must_check down_killable(struct semaphore *sem);
extern int __must_check down_trylock(struct semaphore *sem);
extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
extern void up(struct semaphore *sem);
+extern unsigned long sem_last_holder(struct semaphore *sem);
#endif /* __LINUX_SEMAPHORE_H */
diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h
index 5b31c5147969..9f2839e73f8a 100644
--- a/include/linux/seq_buf.h
+++ b/include/linux/seq_buf.h
@@ -2,7 +2,10 @@
#ifndef _LINUX_SEQ_BUF_H
#define _LINUX_SEQ_BUF_H
-#include <linux/fs.h>
+#include <linux/bug.h>
+#include <linux/minmax.h>
+#include <linux/seq_file.h>
+#include <linux/types.h>
/*
* Trace sequences are used to allow a function to call several other functions
@@ -10,23 +13,28 @@
*/
/**
- * seq_buf - seq buffer structure
+ * struct seq_buf - seq buffer structure
* @buffer: pointer to the buffer
* @size: size of the buffer
* @len: the amount of data inside the buffer
- * @readpos: The next position to read in the buffer.
*/
struct seq_buf {
char *buffer;
size_t size;
size_t len;
- loff_t readpos;
};
+#define DECLARE_SEQ_BUF(NAME, SIZE) \
+ struct seq_buf NAME = { \
+ .buffer = (char[SIZE]) { 0 }, \
+ .size = SIZE, \
+ }
+
static inline void seq_buf_clear(struct seq_buf *s)
{
s->len = 0;
- s->readpos = 0;
+ if (s->size)
+ s->buffer[0] = '\0';
}
static inline void
@@ -39,7 +47,7 @@ seq_buf_init(struct seq_buf *s, char *buf, unsigned int size)
/*
* seq_buf have a buffer that might overflow. When this happens
- * the len and size are set to be equal.
+ * len is set to be greater than size.
*/
static inline bool
seq_buf_has_overflowed(struct seq_buf *s)
@@ -72,10 +80,10 @@ static inline unsigned int seq_buf_used(struct seq_buf *s)
}
/**
- * seq_buf_terminate - Make sure buffer is nul terminated
- * @s: the seq_buf descriptor to terminate.
+ * seq_buf_str - get NUL-terminated C string from seq_buf
+ * @s: the seq_buf handle
*
- * This makes sure that the buffer in @s is nul terminated and
+ * This makes sure that the buffer in @s is NUL-terminated and
* safe to read as a string.
*
* Note, if this is called when the buffer has overflowed, then
@@ -84,16 +92,20 @@ static inline unsigned int seq_buf_used(struct seq_buf *s)
*
* After this function is called, s->buffer is safe to use
* in string operations.
+ *
+ * Returns: @s->buf after making sure it is terminated.
*/
-static inline void seq_buf_terminate(struct seq_buf *s)
+static inline const char *seq_buf_str(struct seq_buf *s)
{
if (WARN_ON(s->size == 0))
- return;
+ return "";
if (seq_buf_buffer_left(s))
s->buffer[s->len] = 0;
else
s->buffer[s->size - 1] = 0;
+
+ return s->buffer;
}
/**
@@ -101,7 +113,7 @@ static inline void seq_buf_terminate(struct seq_buf *s)
* @s: the seq_buf handle
* @bufp: the beginning of the buffer is stored here
*
- * Return the number of bytes available in the buffer, or zero if
+ * Returns: the number of bytes available in the buffer, or zero if
* there's no space.
*/
static inline size_t seq_buf_get_buf(struct seq_buf *s, char **bufp)
@@ -123,7 +135,7 @@ static inline size_t seq_buf_get_buf(struct seq_buf *s, char **bufp)
* @num: the number of bytes to commit
*
* Commit @num bytes of data written to a buffer previously acquired
- * by seq_buf_get. To signal an error condition, or that the data
+ * by seq_buf_get_buf(). To signal an error condition, or that the data
* didn't fit in the available space, pass a negative @num value.
*/
static inline void seq_buf_commit(struct seq_buf *s, int num)
@@ -137,13 +149,30 @@ static inline void seq_buf_commit(struct seq_buf *s, int num)
}
}
+/**
+ * seq_buf_pop - pop off the last written character
+ * @s: the seq_buf handle
+ *
+ * Removes the last written character to the seq_buf @s.
+ *
+ * Returns the last character or -1 if it is empty.
+ */
+static inline int seq_buf_pop(struct seq_buf *s)
+{
+ if (!s->len)
+ return -1;
+
+ s->len--;
+ return (unsigned int)s->buffer[s->len];
+}
+
extern __printf(2, 3)
int seq_buf_printf(struct seq_buf *s, const char *fmt, ...);
extern __printf(2, 0)
int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args);
extern int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s);
extern int seq_buf_to_user(struct seq_buf *s, char __user *ubuf,
- int cnt);
+ size_t start, int cnt);
extern int seq_buf_puts(struct seq_buf *s, const char *str);
extern int seq_buf_putc(struct seq_buf *s, unsigned char c);
extern int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len);
@@ -155,8 +184,10 @@ extern int seq_buf_hex_dump(struct seq_buf *s, const char *prefix_str,
const void *buf, size_t len, bool ascii);
#ifdef CONFIG_BINARY_PRINTF
-extern int
-seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary);
+__printf(2, 0)
+int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary);
#endif
+void seq_buf_do_printk(struct seq_buf *s, const char *lvl);
+
#endif /* _LINUX_SEQ_BUF_H */
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 723b1fa1177e..d6ebf0596510 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -4,9 +4,9 @@
#include <linux/types.h>
#include <linux/string.h>
+#include <linux/string_helpers.h>
#include <linux/bug.h>
#include <linux/mutex.h>
-#include <linux/cpumask.h>
#include <linux/nodemask.h>
#include <linux/fs.h>
#include <linux/cred.h>
@@ -117,7 +117,18 @@ void seq_vprintf(struct seq_file *m, const char *fmt, va_list args);
__printf(2, 3)
void seq_printf(struct seq_file *m, const char *fmt, ...);
void seq_putc(struct seq_file *m, char c);
-void seq_puts(struct seq_file *m, const char *s);
+void __seq_puts(struct seq_file *m, const char *s);
+
+static __always_inline void seq_puts(struct seq_file *m, const char *s)
+{
+ if (!__builtin_constant_p(*s))
+ __seq_puts(m, s);
+ else if (s[0] && !s[1])
+ seq_putc(m, s[0]);
+ else
+ seq_write(m, s, __builtin_strlen(s));
+}
+
void seq_put_decimal_ull_width(struct seq_file *m, const char *delimiter,
unsigned long long num, unsigned int width);
void seq_put_decimal_ull(struct seq_file *m, const char *delimiter,
@@ -126,8 +137,30 @@ void seq_put_decimal_ll(struct seq_file *m, const char *delimiter, long long num
void seq_put_hex_ll(struct seq_file *m, const char *delimiter,
unsigned long long v, unsigned int width);
-void seq_escape(struct seq_file *m, const char *s, const char *esc);
-void seq_escape_mem_ascii(struct seq_file *m, const char *src, size_t isz);
+void seq_escape_mem(struct seq_file *m, const char *src, size_t len,
+ unsigned int flags, const char *esc);
+
+static inline void seq_escape_str(struct seq_file *m, const char *src,
+ unsigned int flags, const char *esc)
+{
+ seq_escape_mem(m, src, strlen(src), flags, esc);
+}
+
+/**
+ * seq_escape - print string into buffer, escaping some characters
+ * @m: target buffer
+ * @s: NULL-terminated string
+ * @esc: set of characters that need escaping
+ *
+ * Puts string into buffer, replacing each occurrence of character from
+ * @esc with usual octal escape.
+ *
+ * Use seq_has_overflowed() to check for errors.
+ */
+static inline void seq_escape(struct seq_file *m, const char *s, const char *esc)
+{
+ seq_escape_str(m, s, ESCAPE_OCTAL, esc);
+}
void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type,
int rowsize, int groupsize, const void *buf, size_t len,
@@ -139,6 +172,7 @@ int seq_dentry(struct seq_file *, struct dentry *, const char *);
int seq_path_root(struct seq_file *m, const struct path *path,
const struct path *root, const char *esc);
+void *single_start(struct seq_file *, loff_t *);
int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
int single_release(struct inode *, struct file *);
@@ -147,6 +181,7 @@ int seq_open_private(struct file *, const struct seq_operations *, int);
int seq_release_private(struct inode *, struct file *);
#ifdef CONFIG_BINARY_PRINTF
+__printf(2, 0)
void seq_bprintf(struct seq_file *m, const char *f, const u32 *binary);
#endif
@@ -183,12 +218,27 @@ static const struct file_operations __name ## _fops = { \
.release = single_release, \
}
-#define DEFINE_PROC_SHOW_ATTRIBUTE(__name) \
+#define DEFINE_SHOW_STORE_ATTRIBUTE(__name) \
static int __name ## _open(struct inode *inode, struct file *file) \
{ \
return single_open(file, __name ## _show, inode->i_private); \
} \
\
+static const struct file_operations __name ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __name ## _open, \
+ .read = seq_read, \
+ .write = __name ## _write, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+#define DEFINE_PROC_SHOW_ATTRIBUTE(__name) \
+static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __name ## _show, pde_data(inode)); \
+} \
+ \
static const struct proc_ops __name ## _proc_ops = { \
.proc_open = __name ## _open, \
.proc_read = seq_read, \
@@ -225,18 +275,19 @@ static inline void seq_show_option(struct seq_file *m, const char *name,
/**
* seq_show_option_n - display mount options with appropriate escapes
- * where @value must be a specific length.
+ * where @value must be a specific length (i.e.
+ * not NUL-terminated).
* @m: the seq_file handle
* @name: the mount option name
* @value: the mount option name's value, cannot be NULL
- * @length: the length of @value to display
+ * @length: the exact length of @value to display, must be constant expression
*
* This is a macro since this uses "length" to define the size of the
* stack buffer.
*/
#define seq_show_option_n(m, name, value, length) { \
char val_buf[length + 1]; \
- strncpy(val_buf, value, length); \
+ memcpy(val_buf, value, length); \
val_buf[length] = '\0'; \
seq_show_option(m, name, val_buf); \
}
@@ -253,6 +304,10 @@ extern struct list_head *seq_list_start_head(struct list_head *head,
extern struct list_head *seq_list_next(void *v, struct list_head *head,
loff_t *ppos);
+extern struct list_head *seq_list_start_rcu(struct list_head *head, loff_t pos);
+extern struct list_head *seq_list_start_head_rcu(struct list_head *head, loff_t pos);
+extern struct list_head *seq_list_next_rcu(void *v, struct list_head *head, loff_t *ppos);
+
/*
* Helpers for iteration over hlist_head-s in seq_files
*/
diff --git a/include/linux/seq_file_net.h b/include/linux/seq_file_net.h
index 0fdbe1ddd8d1..79638395bc32 100644
--- a/include/linux/seq_file_net.h
+++ b/include/linux/seq_file_net.h
@@ -3,13 +3,15 @@
#define __SEQ_FILE_NET_H__
#include <linux/seq_file.h>
+#include <net/net_trackers.h>
struct net;
extern struct net init_net;
struct seq_net_private {
#ifdef CONFIG_NET_NS
- struct net *net;
+ struct net *net;
+ netns_tracker ns_tracker;
#endif
};
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index f61e34fbaaea..221123660e71 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -17,8 +17,8 @@
#include <linux/kcsan-checks.h>
#include <linux/lockdep.h>
#include <linux/mutex.h>
-#include <linux/ww_mutex.h>
#include <linux/preempt.h>
+#include <linux/seqlock_types.h>
#include <linux/spinlock.h>
#include <asm/processor.h>
@@ -38,37 +38,6 @@
*/
#define KCSAN_SEQLOCK_REGION_MAX 1000
-/*
- * Sequence counters (seqcount_t)
- *
- * This is the raw counting mechanism, without any writer protection.
- *
- * Write side critical sections must be serialized and non-preemptible.
- *
- * If readers can be invoked from hardirq or softirq contexts,
- * interrupts or bottom halves must also be respectively disabled before
- * entering the write section.
- *
- * This mechanism can't be used if the protected data contains pointers,
- * as the writer can invalidate a pointer that a reader is following.
- *
- * If the write serialization mechanism is one of the common kernel
- * locking primitives, use a sequence counter with associated lock
- * (seqcount_LOCKNAME_t) instead.
- *
- * If it's desired to automatically handle the sequence counter writer
- * serialization and non-preemptibility requirements, use a sequential
- * lock (seqlock_t) instead.
- *
- * See Documentation/locking/seqlock.rst
- */
-typedef struct seqcount {
- unsigned sequence;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-} seqcount_t;
-
static inline void __seqcount_init(seqcount_t *s, const char *name,
struct lock_class_key *key)
{
@@ -133,28 +102,6 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
*/
/*
- * For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot
- * disable preemption. It can lead to higher latencies, and the write side
- * sections will not be able to acquire locks which become sleeping locks
- * (e.g. spinlock_t).
- *
- * To remain preemptible while avoiding a possible livelock caused by the
- * reader preempting the writer, use a different technique: let the reader
- * detect if a seqcount_LOCKNAME_t writer is in progress. If that is the
- * case, acquire then release the associated LOCKNAME writer serialization
- * lock. This will allow any possibly-preempted writer to make progress
- * until the end of its writer serialization lock critical section.
- *
- * This lock-unlock technique must be implemented for all of PREEMPT_RT
- * sleeping locks. See Documentation/locking/locktypes.rst
- */
-#if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT)
-#define __SEQ_LOCK(expr) expr
-#else
-#define __SEQ_LOCK(expr)
-#endif
-
-/*
* typedef seqcount_LOCKNAME_t - sequence counter with LOCKNAME associated
* @seqcount: The real sequence counter
* @lock: Pointer to the associated lock
@@ -164,7 +111,7 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
* static initializer or init function. This enables lockdep to validate
* that the write side critical section is properly serialized.
*
- * LOCKNAME: raw_spinlock, spinlock, rwlock, mutex, or ww_mutex.
+ * LOCKNAME: raw_spinlock, spinlock, rwlock or mutex
*/
/*
@@ -182,9 +129,8 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
#define seqcount_raw_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, raw_spinlock)
#define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock)
-#define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock);
-#define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex);
-#define seqcount_ww_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, ww_mutex);
+#define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock)
+#define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex)
/*
* SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers
@@ -193,39 +139,38 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
* @lockname: "LOCKNAME" part of seqcount_LOCKNAME_t
* @locktype: LOCKNAME canonical C data type
* @preemptible: preemptibility of above locktype
- * @lockmember: argument for lockdep_assert_held()
- * @lockbase: associated lock release function (prefix only)
- * @lock_acquire: associated lock acquisition function (full call)
- */
-#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockmember, lockbase, lock_acquire) \
-typedef struct seqcount_##lockname { \
- seqcount_t seqcount; \
- __SEQ_LOCK(locktype *lock); \
-} seqcount_##lockname##_t; \
- \
+ * @lockbase: prefix for associated lock/unlock
+ */
+#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockbase) \
static __always_inline seqcount_t * \
__seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \
{ \
return &s->seqcount; \
} \
\
+static __always_inline const seqcount_t * \
+__seqprop_##lockname##_const_ptr(const seqcount_##lockname##_t *s) \
+{ \
+ return &s->seqcount; \
+} \
+ \
static __always_inline unsigned \
__seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \
{ \
- unsigned seq = READ_ONCE(s->seqcount.sequence); \
+ unsigned seq = smp_load_acquire(&s->seqcount.sequence); \
\
if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
return seq; \
\
if (preemptible && unlikely(seq & 1)) { \
- __SEQ_LOCK(lock_acquire); \
+ __SEQ_LOCK(lockbase##_lock(s->lock)); \
__SEQ_LOCK(lockbase##_unlock(s->lock)); \
\
/* \
* Re-read the sequence counter since the (possibly \
* preempted) writer made progress. \
*/ \
- seq = READ_ONCE(s->seqcount.sequence); \
+ seq = smp_load_acquire(&s->seqcount.sequence); \
} \
\
return seq; \
@@ -244,7 +189,7 @@ __seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s) \
static __always_inline void \
__seqprop_##lockname##_assert(const seqcount_##lockname##_t *s) \
{ \
- __SEQ_LOCK(lockdep_assert_held(lockmember)); \
+ __SEQ_LOCK(lockdep_assert_held(s->lock)); \
}
/*
@@ -256,9 +201,14 @@ static inline seqcount_t *__seqprop_ptr(seqcount_t *s)
return s;
}
+static inline const seqcount_t *__seqprop_const_ptr(const seqcount_t *s)
+{
+ return s;
+}
+
static inline unsigned __seqprop_sequence(const seqcount_t *s)
{
- return READ_ONCE(s->sequence);
+ return smp_load_acquire(&s->sequence);
}
static inline bool __seqprop_preemptible(const seqcount_t *s)
@@ -273,11 +223,11 @@ static inline void __seqprop_assert(const seqcount_t *s)
#define __SEQ_RT IS_ENABLED(CONFIG_PREEMPT_RT)
-SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, s->lock, raw_spin, raw_spin_lock(s->lock))
-SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, s->lock, spin, spin_lock(s->lock))
-SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, s->lock, read, read_lock(s->lock))
-SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex, mutex_lock(s->lock))
-SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mutex, ww_mutex_lock(s->lock, NULL))
+SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, raw_spin)
+SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, spin)
+SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, read)
+SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
+#undef SEQCOUNT_LOCKNAME
/*
* SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
@@ -297,40 +247,32 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
#define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
#define __seqprop_case(s, lockname, prop) \
- seqcount_##lockname##_t: __seqprop_##lockname##_##prop((void *)(s))
+ seqcount_##lockname##_t: __seqprop_##lockname##_##prop
#define __seqprop(s, prop) _Generic(*(s), \
- seqcount_t: __seqprop_##prop((void *)(s)), \
+ seqcount_t: __seqprop_##prop, \
__seqprop_case((s), raw_spinlock, prop), \
__seqprop_case((s), spinlock, prop), \
__seqprop_case((s), rwlock, prop), \
- __seqprop_case((s), mutex, prop), \
- __seqprop_case((s), ww_mutex, prop))
+ __seqprop_case((s), mutex, prop))
-#define seqprop_ptr(s) __seqprop(s, ptr)
-#define seqprop_sequence(s) __seqprop(s, sequence)
-#define seqprop_preemptible(s) __seqprop(s, preemptible)
-#define seqprop_assert(s) __seqprop(s, assert)
+#define seqprop_ptr(s) __seqprop(s, ptr)(s)
+#define seqprop_const_ptr(s) __seqprop(s, const_ptr)(s)
+#define seqprop_sequence(s) __seqprop(s, sequence)(s)
+#define seqprop_preemptible(s) __seqprop(s, preemptible)(s)
+#define seqprop_assert(s) __seqprop(s, assert)(s)
/**
- * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
+ * __read_seqcount_begin() - begin a seqcount_t read section
* @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
- * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
- * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
- * provided before actually loading any of the variables that are to be
- * protected in this critical section.
- *
- * Use carefully, only in critical code, and comment how the barrier is
- * provided.
- *
* Return: count to be passed to read_seqcount_retry()
*/
#define __read_seqcount_begin(s) \
({ \
unsigned __seq; \
\
- while ((__seq = seqprop_sequence(s)) & 1) \
+ while (unlikely((__seq = seqprop_sequence(s)) & 1)) \
cpu_relax(); \
\
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
@@ -343,13 +285,7 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
*
* Return: count to be passed to read_seqcount_retry()
*/
-#define raw_read_seqcount_begin(s) \
-({ \
- unsigned _seq = __read_seqcount_begin(s); \
- \
- smp_rmb(); \
- _seq; \
-})
+#define raw_read_seqcount_begin(s) __read_seqcount_begin(s)
/**
* read_seqcount_begin() - begin a seqcount_t read critical section
@@ -359,7 +295,7 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
*/
#define read_seqcount_begin(s) \
({ \
- seqcount_lockdep_reader_access(seqprop_ptr(s)); \
+ seqcount_lockdep_reader_access(seqprop_const_ptr(s)); \
raw_read_seqcount_begin(s); \
})
@@ -378,12 +314,34 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
({ \
unsigned __seq = seqprop_sequence(s); \
\
- smp_rmb(); \
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
__seq; \
})
/**
+ * raw_seqcount_try_begin() - begin a seqcount_t read critical section
+ * w/o lockdep and w/o counter stabilization
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ * @start: count to be passed to read_seqcount_retry()
+ *
+ * Similar to raw_seqcount_begin(), except it enables eliding the critical
+ * section entirely if odd, instead of doing the speculation knowing it will
+ * fail.
+ *
+ * Useful when counter stabilization is more or less equivalent to taking
+ * the lock and there is a slowpath that does that.
+ *
+ * If true, start will be set to the (even) sequence count read.
+ *
+ * Return: true when a read critical section is started.
+ */
+#define raw_seqcount_try_begin(s, start) \
+({ \
+ start = raw_read_seqcount(s); \
+ !(start & 1); \
+})
+
+/**
* raw_seqcount_begin() - begin a seqcount_t read critical section w/o
* lockdep and w/o counter stabilization
* @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
@@ -425,7 +383,7 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu
* Return: true if a read section retry is required, else false
*/
#define __read_seqcount_retry(s, start) \
- do___read_seqcount_retry(seqprop_ptr(s), start)
+ do___read_seqcount_retry(seqprop_const_ptr(s), start)
static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start)
{
@@ -445,7 +403,7 @@ static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start)
* Return: true if a read section retry is required, else false
*/
#define read_seqcount_retry(s, start) \
- do_read_seqcount_retry(seqprop_ptr(s), start)
+ do_read_seqcount_retry(seqprop_const_ptr(s), start)
static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start)
{
@@ -516,8 +474,8 @@ do { \
static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass)
{
- do_raw_write_seqcount_begin(s);
seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
+ do_raw_write_seqcount_begin(s);
}
/**
@@ -578,7 +536,7 @@ static inline void do_write_seqcount_end(seqcount_t *s)
* via WRITE_ONCE): a) to ensure the writes become visible to other threads
* atomically, avoiding compiler optimizations; b) to document which writes are
* meant to propagate to the reader critical section. This is necessary because
- * neither writes before and after the barrier are enclosed in a seq-writer
+ * neither writes before nor after the barrier are enclosed in a seq-writer
* critical section that would ensure readers are aware of ongoing writes::
*
* seqcount_t seq;
@@ -675,9 +633,9 @@ typedef struct {
*
* Return: sequence counter raw value. Use the lowest bit as an index for
* picking which data copy to read. The full counter must then be checked
- * with read_seqcount_latch_retry().
+ * with raw_read_seqcount_latch_retry().
*/
-static inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
+static __always_inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
{
/*
* Pairs with the first smp_wmb() in raw_write_seqcount_latch().
@@ -687,21 +645,64 @@ static inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s)
}
/**
- * read_seqcount_latch_retry() - end a seqcount_latch_t read section
+ * read_seqcount_latch() - pick even/odd latch data copy
+ * @s: Pointer to seqcount_latch_t
+ *
+ * See write_seqcount_latch() for details and a full reader/writer usage
+ * example.
+ *
+ * Return: sequence counter raw value. Use the lowest bit as an index for
+ * picking which data copy to read. The full counter must then be checked
+ * with read_seqcount_latch_retry().
+ */
+static __always_inline unsigned read_seqcount_latch(const seqcount_latch_t *s)
+{
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
+ return raw_read_seqcount_latch(s);
+}
+
+/**
+ * raw_read_seqcount_latch_retry() - end a seqcount_latch_t read section
* @s: Pointer to seqcount_latch_t
* @start: count, from raw_read_seqcount_latch()
*
* Return: true if a read section retry is required, else false
*/
-static inline int
+static __always_inline int
+raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
+{
+ smp_rmb();
+ return unlikely(READ_ONCE(s->seqcount.sequence) != start);
+}
+
+/**
+ * read_seqcount_latch_retry() - end a seqcount_latch_t read section
+ * @s: Pointer to seqcount_latch_t
+ * @start: count, from read_seqcount_latch()
+ *
+ * Return: true if a read section retry is required, else false
+ */
+static __always_inline int
read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
{
- return read_seqcount_retry(&s->seqcount, start);
+ kcsan_atomic_next(0);
+ return raw_read_seqcount_latch_retry(s, start);
}
/**
* raw_write_seqcount_latch() - redirect latch readers to even/odd copy
* @s: Pointer to seqcount_latch_t
+ */
+static __always_inline void raw_write_seqcount_latch(seqcount_latch_t *s)
+{
+ smp_wmb(); /* prior stores before incrementing "sequence" */
+ s->seqcount.sequence++;
+ smp_wmb(); /* increment "sequence" before following stores */
+}
+
+/**
+ * write_seqcount_latch_begin() - redirect latch readers to odd copy
+ * @s: Pointer to seqcount_latch_t
*
* The latch technique is a multiversion concurrency control method that allows
* queries during non-atomic modifications. If you can guarantee queries never
@@ -729,17 +730,11 @@ read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
*
* void latch_modify(struct latch_struct *latch, ...)
* {
- * smp_wmb(); // Ensure that the last data[1] update is visible
- * latch->seq.sequence++;
- * smp_wmb(); // Ensure that the seqcount update is visible
- *
+ * write_seqcount_latch_begin(&latch->seq);
* modify(latch->data[0], ...);
- *
- * smp_wmb(); // Ensure that the data[0] update is visible
- * latch->seq.sequence++;
- * smp_wmb(); // Ensure that the seqcount update is visible
- *
+ * write_seqcount_latch(&latch->seq);
* modify(latch->data[1], ...);
+ * write_seqcount_latch_end(&latch->seq);
* }
*
* The query will have a form like::
@@ -750,7 +745,7 @@ read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
* unsigned seq, idx;
*
* do {
- * seq = raw_read_seqcount_latch(&latch->seq);
+ * seq = read_seqcount_latch(&latch->seq);
*
* idx = seq & 0x01;
* entry = data_query(latch->data[idx], ...);
@@ -780,31 +775,32 @@ read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start)
* When data is a dynamic data structure; one should use regular RCU
* patterns to manage the lifetimes of the objects within.
*/
-static inline void raw_write_seqcount_latch(seqcount_latch_t *s)
+static __always_inline void write_seqcount_latch_begin(seqcount_latch_t *s)
{
- smp_wmb(); /* prior stores before incrementing "sequence" */
- s->seqcount.sequence++;
- smp_wmb(); /* increment "sequence" before following stores */
+ kcsan_nestable_atomic_begin();
+ raw_write_seqcount_latch(s);
}
-/*
- * Sequential locks (seqlock_t)
- *
- * Sequence counters with an embedded spinlock for writer serialization
- * and non-preemptibility.
+/**
+ * write_seqcount_latch() - redirect latch readers to even copy
+ * @s: Pointer to seqcount_latch_t
+ */
+static __always_inline void write_seqcount_latch(seqcount_latch_t *s)
+{
+ raw_write_seqcount_latch(s);
+}
+
+/**
+ * write_seqcount_latch_end() - end a seqcount_latch_t write section
+ * @s: Pointer to seqcount_latch_t
*
- * For more info, see:
- * - Comments on top of seqcount_t
- * - Documentation/locking/seqlock.rst
+ * Marks the end of a seqcount_latch_t writer section, after all copies of the
+ * latch-protected data have been updated.
*/
-typedef struct {
- /*
- * Make sure that readers don't starve writers on PREEMPT_RT: use
- * seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK().
- */
- seqcount_spinlock_t seqcount;
- spinlock_t lock;
-} seqlock_t;
+static __always_inline void write_seqcount_latch_end(seqcount_latch_t *s)
+{
+ kcsan_nestable_atomic_end();
+}
#define __SEQLOCK_UNLOCKED(lockname) \
{ \
@@ -837,11 +833,7 @@ typedef struct {
*/
static inline unsigned read_seqbegin(const seqlock_t *sl)
{
- unsigned ret = read_seqcount_begin(&sl->seqcount);
-
- kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry() */
- kcsan_flat_atomic_begin();
- return ret;
+ return read_seqcount_begin(&sl->seqcount);
}
/**
@@ -857,17 +849,11 @@ static inline unsigned read_seqbegin(const seqlock_t *sl)
*/
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
{
- /*
- * Assume not nested: read_seqretry() may be called multiple times when
- * completing read critical section.
- */
- kcsan_flat_atomic_end();
-
return read_seqcount_retry(&sl->seqcount, start);
}
/*
- * For all seqlock_t write side functions, use the the internal
+ * For all seqlock_t write side functions, use the internal
* do_write_seqcount_begin() instead of generic write_seqcount_begin().
* This way, no redundant lockdep_assert_held() checks are added.
*/
@@ -1223,4 +1209,118 @@ done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
if (seq & 1)
read_sequnlock_excl_irqrestore(lock, flags);
}
+
+enum ss_state {
+ ss_done = 0,
+ ss_lock,
+ ss_lock_irqsave,
+ ss_lockless,
+};
+
+struct ss_tmp {
+ enum ss_state state;
+ unsigned long data;
+ spinlock_t *lock;
+ spinlock_t *lock_irqsave;
+};
+
+static __always_inline void __scoped_seqlock_cleanup(struct ss_tmp *sst)
+{
+ if (sst->lock)
+ spin_unlock(sst->lock);
+ if (sst->lock_irqsave)
+ spin_unlock_irqrestore(sst->lock_irqsave, sst->data);
+}
+
+extern void __scoped_seqlock_invalid_target(void);
+
+#if (defined(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 90000) || defined(CONFIG_KASAN)
+/*
+ * For some reason some GCC-8 architectures (nios2, alpha) have trouble
+ * determining that the ss_done state is impossible in __scoped_seqlock_next()
+ * below.
+ *
+ * Similarly KASAN is known to confuse compilers enough to break this. But we
+ * don't care about code quality for KASAN builds anyway.
+ */
+static inline void __scoped_seqlock_bug(void) { }
+#else
+/*
+ * Canary for compiler optimization -- if the compiler doesn't realize this is
+ * an impossible state, it very likely generates sub-optimal code here.
+ */
+extern void __scoped_seqlock_bug(void);
+#endif
+
+static __always_inline void
+__scoped_seqlock_next(struct ss_tmp *sst, seqlock_t *lock, enum ss_state target)
+{
+ switch (sst->state) {
+ case ss_done:
+ __scoped_seqlock_bug();
+ return;
+
+ case ss_lock:
+ case ss_lock_irqsave:
+ sst->state = ss_done;
+ return;
+
+ case ss_lockless:
+ if (!read_seqretry(lock, sst->data)) {
+ sst->state = ss_done;
+ return;
+ }
+ break;
+ }
+
+ switch (target) {
+ case ss_done:
+ __scoped_seqlock_invalid_target();
+ return;
+
+ case ss_lock:
+ sst->lock = &lock->lock;
+ spin_lock(sst->lock);
+ sst->state = ss_lock;
+ return;
+
+ case ss_lock_irqsave:
+ sst->lock_irqsave = &lock->lock;
+ spin_lock_irqsave(sst->lock_irqsave, sst->data);
+ sst->state = ss_lock_irqsave;
+ return;
+
+ case ss_lockless:
+ sst->data = read_seqbegin(lock);
+ return;
+ }
+}
+
+#define __scoped_seqlock_read(_seqlock, _target, _s) \
+ for (struct ss_tmp _s __cleanup(__scoped_seqlock_cleanup) = \
+ { .state = ss_lockless, .data = read_seqbegin(_seqlock) }; \
+ _s.state != ss_done; \
+ __scoped_seqlock_next(&_s, _seqlock, _target))
+
+/**
+ * scoped_seqlock_read (lock, ss_state) - execute the read side critical
+ * section without manual sequence
+ * counter handling or calls to other
+ * helpers
+ * @lock: pointer to seqlock_t protecting the data
+ * @ss_state: one of {ss_lock, ss_lock_irqsave, ss_lockless} indicating
+ * the type of critical read section
+ *
+ * Example:
+ *
+ * scoped_seqlock_read (&lock, ss_lock) {
+ * // read-side critical section
+ * }
+ *
+ * Starts with a lockess pass first. If it fails, restarts the critical
+ * section with the lock held.
+ */
+#define scoped_seqlock_read(_seqlock, _target) \
+ __scoped_seqlock_read(_seqlock, _target, __UNIQUE_ID(seqlock))
+
#endif /* __LINUX_SEQLOCK_H */
diff --git a/include/linux/seqlock_api.h b/include/linux/seqlock_api.h
new file mode 100644
index 000000000000..be91e7d3b826
--- /dev/null
+++ b/include/linux/seqlock_api.h
@@ -0,0 +1 @@
+#include <linux/seqlock.h>
diff --git a/include/linux/seqlock_types.h b/include/linux/seqlock_types.h
new file mode 100644
index 000000000000..dfdf43e3fa3d
--- /dev/null
+++ b/include/linux/seqlock_types.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_SEQLOCK_TYPES_H
+#define __LINUX_SEQLOCK_TYPES_H
+
+#include <linux/lockdep_types.h>
+#include <linux/mutex_types.h>
+#include <linux/spinlock_types.h>
+
+/*
+ * Sequence counters (seqcount_t)
+ *
+ * This is the raw counting mechanism, without any writer protection.
+ *
+ * Write side critical sections must be serialized and non-preemptible.
+ *
+ * If readers can be invoked from hardirq or softirq contexts,
+ * interrupts or bottom halves must also be respectively disabled before
+ * entering the write section.
+ *
+ * This mechanism can't be used if the protected data contains pointers,
+ * as the writer can invalidate a pointer that a reader is following.
+ *
+ * If the write serialization mechanism is one of the common kernel
+ * locking primitives, use a sequence counter with associated lock
+ * (seqcount_LOCKNAME_t) instead.
+ *
+ * If it's desired to automatically handle the sequence counter writer
+ * serialization and non-preemptibility requirements, use a sequential
+ * lock (seqlock_t) instead.
+ *
+ * See Documentation/locking/seqlock.rst
+ */
+typedef struct seqcount {
+ unsigned sequence;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+} seqcount_t;
+
+/*
+ * For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot
+ * disable preemption. It can lead to higher latencies, and the write side
+ * sections will not be able to acquire locks which become sleeping locks
+ * (e.g. spinlock_t).
+ *
+ * To remain preemptible while avoiding a possible livelock caused by the
+ * reader preempting the writer, use a different technique: let the reader
+ * detect if a seqcount_LOCKNAME_t writer is in progress. If that is the
+ * case, acquire then release the associated LOCKNAME writer serialization
+ * lock. This will allow any possibly-preempted writer to make progress
+ * until the end of its writer serialization lock critical section.
+ *
+ * This lock-unlock technique must be implemented for all of PREEMPT_RT
+ * sleeping locks. See Documentation/locking/locktypes.rst
+ */
+#if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT)
+#define __SEQ_LOCK(expr) expr
+#else
+#define __SEQ_LOCK(expr)
+#endif
+
+#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockbase) \
+typedef struct seqcount_##lockname { \
+ seqcount_t seqcount; \
+ __SEQ_LOCK(locktype *lock); \
+} seqcount_##lockname##_t;
+
+SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, raw_spin)
+SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, spin)
+SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, read)
+SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
+#undef SEQCOUNT_LOCKNAME
+
+/*
+ * Sequential locks (seqlock_t)
+ *
+ * Sequence counters with an embedded spinlock for writer serialization
+ * and non-preemptibility.
+ *
+ * For more info, see:
+ * - Comments on top of seqcount_t
+ * - Documentation/locking/seqlock.rst
+ */
+typedef struct {
+ /*
+ * Make sure that readers don't starve writers on PREEMPT_RT: use
+ * seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK().
+ */
+ seqcount_spinlock_t seqcount;
+ spinlock_t lock;
+} seqlock_t;
+
+#endif /* __LINUX_SEQLOCK_TYPES_H */
diff --git a/include/linux/seqno-fence.h b/include/linux/seqno-fence.h
deleted file mode 100644
index 3cca2b8fac43..000000000000
--- a/include/linux/seqno-fence.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * seqno-fence, using a dma-buf to synchronize fencing
- *
- * Copyright (C) 2012 Texas Instruments
- * Copyright (C) 2012 Canonical Ltd
- * Authors:
- * Rob Clark <robdclark@gmail.com>
- * Maarten Lankhorst <maarten.lankhorst@canonical.com>
- */
-
-#ifndef __LINUX_SEQNO_FENCE_H
-#define __LINUX_SEQNO_FENCE_H
-
-#include <linux/dma-fence.h>
-#include <linux/dma-buf.h>
-
-enum seqno_fence_condition {
- SEQNO_FENCE_WAIT_GEQUAL,
- SEQNO_FENCE_WAIT_NONZERO
-};
-
-struct seqno_fence {
- struct dma_fence base;
-
- const struct dma_fence_ops *ops;
- struct dma_buf *sync_buf;
- uint32_t seqno_ofs;
- enum seqno_fence_condition condition;
-};
-
-extern const struct dma_fence_ops seqno_fence_ops;
-
-/**
- * to_seqno_fence - cast a fence to a seqno_fence
- * @fence: fence to cast to a seqno_fence
- *
- * Returns NULL if the fence is not a seqno_fence,
- * or the seqno_fence otherwise.
- */
-static inline struct seqno_fence *
-to_seqno_fence(struct dma_fence *fence)
-{
- if (fence->ops != &seqno_fence_ops)
- return NULL;
- return container_of(fence, struct seqno_fence, base);
-}
-
-/**
- * seqno_fence_init - initialize a seqno fence
- * @fence: seqno_fence to initialize
- * @lock: pointer to spinlock to use for fence
- * @sync_buf: buffer containing the memory location to signal on
- * @context: the execution context this fence is a part of
- * @seqno_ofs: the offset within @sync_buf
- * @seqno: the sequence # to signal on
- * @cond: fence wait condition
- * @ops: the fence_ops for operations on this seqno fence
- *
- * This function initializes a struct seqno_fence with passed parameters,
- * and takes a reference on sync_buf which is released on fence destruction.
- *
- * A seqno_fence is a dma_fence which can complete in software when
- * enable_signaling is called, but it also completes when
- * (s32)((sync_buf)[seqno_ofs] - seqno) >= 0 is true
- *
- * The seqno_fence will take a refcount on the sync_buf until it's
- * destroyed, but actual lifetime of sync_buf may be longer if one of the
- * callers take a reference to it.
- *
- * Certain hardware have instructions to insert this type of wait condition
- * in the command stream, so no intervention from software would be needed.
- * This type of fence can be destroyed before completed, however a reference
- * on the sync_buf dma-buf can be taken. It is encouraged to re-use the same
- * dma-buf for sync_buf, since mapping or unmapping the sync_buf to the
- * device's vm can be expensive.
- *
- * It is recommended for creators of seqno_fence to call dma_fence_signal()
- * before destruction. This will prevent possible issues from wraparound at
- * time of issue vs time of check, since users can check dma_fence_is_signaled()
- * before submitting instructions for the hardware to wait on the fence.
- * However, when ops.enable_signaling is not called, it doesn't have to be
- * done as soon as possible, just before there's any real danger of seqno
- * wraparound.
- */
-static inline void
-seqno_fence_init(struct seqno_fence *fence, spinlock_t *lock,
- struct dma_buf *sync_buf, uint32_t context,
- uint32_t seqno_ofs, uint32_t seqno,
- enum seqno_fence_condition cond,
- const struct dma_fence_ops *ops)
-{
- BUG_ON(!fence || !sync_buf || !ops);
- BUG_ON(!ops->wait || !ops->enable_signaling ||
- !ops->get_driver_name || !ops->get_timeline_name);
-
- /*
- * ops is used in dma_fence_init for get_driver_name, so needs to be
- * initialized first
- */
- fence->ops = ops;
- dma_fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno);
- get_dma_buf(sync_buf);
- fence->sync_buf = sync_buf;
- fence->seqno_ofs = seqno_ofs;
- fence->condition = cond;
-}
-
-#endif /* __LINUX_SEQNO_FENCE_H */
diff --git a/include/linux/serdev.h b/include/linux/serdev.h
index 9f14f9c12ec4..34562eb99931 100644
--- a/include/linux/serdev.h
+++ b/include/linux/serdev.h
@@ -7,6 +7,8 @@
#include <linux/types.h>
#include <linux/device.h>
+#include <linux/iopoll.h>
+#include <linux/uaccess.h>
#include <linux/termios.h>
#include <linux/delay.h>
@@ -25,7 +27,7 @@ struct serdev_device;
* not sleep.
*/
struct serdev_device_ops {
- int (*receive_buf)(struct serdev_device *, const unsigned char *, size_t);
+ size_t (*receive_buf)(struct serdev_device *, const u8 *, size_t);
void (*write_wakeup)(struct serdev_device *);
};
@@ -80,9 +82,8 @@ enum serdev_parity {
* serdev controller structures
*/
struct serdev_controller_ops {
- int (*write_buf)(struct serdev_controller *, const unsigned char *, size_t);
+ ssize_t (*write_buf)(struct serdev_controller *, const u8 *, size_t);
void (*write_flush)(struct serdev_controller *);
- int (*write_room)(struct serdev_controller *);
int (*open)(struct serdev_controller *);
void (*close)(struct serdev_controller *);
void (*set_flow_control)(struct serdev_controller *, bool);
@@ -91,17 +92,20 @@ struct serdev_controller_ops {
void (*wait_until_sent)(struct serdev_controller *, long);
int (*get_tiocm)(struct serdev_controller *);
int (*set_tiocm)(struct serdev_controller *, unsigned int, unsigned int);
+ int (*break_ctl)(struct serdev_controller *ctrl, unsigned int break_state);
};
/**
* struct serdev_controller - interface to the serdev controller
* @dev: Driver model representation of the device.
+ * @host: Serial port hardware controller device
* @nr: number identifier for this controller/bus.
* @serdev: Pointer to slave device for this controller.
* @ops: Controller operations.
*/
struct serdev_controller {
struct device dev;
+ struct device *host;
unsigned int nr;
struct serdev_device *serdev;
const struct serdev_controller_ops *ops;
@@ -164,7 +168,9 @@ struct serdev_device *serdev_device_alloc(struct serdev_controller *);
int serdev_device_add(struct serdev_device *);
void serdev_device_remove(struct serdev_device *);
-struct serdev_controller *serdev_controller_alloc(struct device *, size_t);
+struct serdev_controller *serdev_controller_alloc(struct device *host,
+ struct device *parent,
+ size_t size);
int serdev_controller_add(struct serdev_controller *);
void serdev_controller_remove(struct serdev_controller *);
@@ -178,9 +184,9 @@ static inline void serdev_controller_write_wakeup(struct serdev_controller *ctrl
serdev->ops->write_wakeup(serdev);
}
-static inline int serdev_controller_receive_buf(struct serdev_controller *ctrl,
- const unsigned char *data,
- size_t count)
+static inline size_t serdev_controller_receive_buf(struct serdev_controller *ctrl,
+ const u8 *data,
+ size_t count)
{
struct serdev_device *serdev = ctrl->serdev;
@@ -197,14 +203,14 @@ void serdev_device_close(struct serdev_device *);
int devm_serdev_device_open(struct device *, struct serdev_device *);
unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int);
void serdev_device_set_flow_control(struct serdev_device *, bool);
-int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_t);
+int serdev_device_write_buf(struct serdev_device *, const u8 *, size_t);
void serdev_device_wait_until_sent(struct serdev_device *, long);
int serdev_device_get_tiocm(struct serdev_device *);
int serdev_device_set_tiocm(struct serdev_device *, int, int);
+int serdev_device_break_ctl(struct serdev_device *serdev, int break_state);
void serdev_device_write_wakeup(struct serdev_device *);
-int serdev_device_write(struct serdev_device *, const unsigned char *, size_t, long);
+ssize_t serdev_device_write(struct serdev_device *, const u8 *, size_t, long);
void serdev_device_write_flush(struct serdev_device *);
-int serdev_device_write_room(struct serdev_device *);
/*
* serdev device driver functions
@@ -240,7 +246,7 @@ static inline unsigned int serdev_device_set_baudrate(struct serdev_device *sdev
}
static inline void serdev_device_set_flow_control(struct serdev_device *sdev, bool enable) {}
static inline int serdev_device_write_buf(struct serdev_device *serdev,
- const unsigned char *buf,
+ const u8 *buf,
size_t count)
{
return -ENODEV;
@@ -248,22 +254,23 @@ static inline int serdev_device_write_buf(struct serdev_device *serdev,
static inline void serdev_device_wait_until_sent(struct serdev_device *sdev, long timeout) {}
static inline int serdev_device_get_tiocm(struct serdev_device *serdev)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int serdev_device_set_tiocm(struct serdev_device *serdev, int set, int clear)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
-static inline int serdev_device_write(struct serdev_device *sdev, const unsigned char *buf,
- size_t count, unsigned long timeout)
+static inline int serdev_device_break_ctl(struct serdev_device *serdev, int break_state)
{
- return -ENODEV;
+ return -EOPNOTSUPP;
}
-static inline void serdev_device_write_flush(struct serdev_device *sdev) {}
-static inline int serdev_device_write_room(struct serdev_device *sdev)
+static inline ssize_t serdev_device_write(struct serdev_device *sdev,
+ const u8 *buf, size_t count,
+ unsigned long timeout)
{
- return 0;
+ return -ENODEV;
}
+static inline void serdev_device_write_flush(struct serdev_device *sdev) {}
#define serdev_device_driver_register(x)
#define serdev_device_driver_unregister(x)
@@ -278,18 +285,10 @@ static inline bool serdev_device_get_cts(struct serdev_device *serdev)
static inline int serdev_device_wait_for_cts(struct serdev_device *serdev, bool state, int timeout_ms)
{
- unsigned long timeout;
bool signal;
- timeout = jiffies + msecs_to_jiffies(timeout_ms);
- while (time_is_after_jiffies(timeout)) {
- signal = serdev_device_get_cts(serdev);
- if (signal == state)
- return 0;
- usleep_range(1000, 2000);
- }
-
- return -ETIMEDOUT;
+ return readx_poll_timeout(serdev_device_get_cts, serdev, signal, signal == state,
+ 2000, timeout_ms * 1000);
}
static inline int serdev_device_set_rts(struct serdev_device *serdev, bool enable)
@@ -311,11 +310,13 @@ struct tty_driver;
#ifdef CONFIG_SERIAL_DEV_CTRL_TTYPORT
struct device *serdev_tty_port_register(struct tty_port *port,
+ struct device *host,
struct device *parent,
struct tty_driver *drv, int idx);
int serdev_tty_port_unregister(struct tty_port *port);
#else
static inline struct device *serdev_tty_port_register(struct tty_port *port,
+ struct device *host,
struct device *parent,
struct tty_driver *drv, int idx)
{
@@ -327,4 +328,18 @@ static inline int serdev_tty_port_unregister(struct tty_port *port)
}
#endif /* CONFIG_SERIAL_DEV_CTRL_TTYPORT */
+struct acpi_resource;
+struct acpi_resource_uart_serialbus;
+
+#ifdef CONFIG_ACPI
+bool serdev_acpi_get_uart_resource(struct acpi_resource *ares,
+ struct acpi_resource_uart_serialbus **uart);
+#else
+static inline bool serdev_acpi_get_uart_resource(struct acpi_resource *ares,
+ struct acpi_resource_uart_serialbus **uart)
+{
+ return false;
+}
+#endif /* CONFIG_ACPI */
+
#endif /*_LINUX_SERDEV_H */
diff --git a/include/linux/serial.h b/include/linux/serial.h
index 0916107c77f9..bfda927dde15 100644
--- a/include/linux/serial.h
+++ b/include/linux/serial.h
@@ -9,9 +9,29 @@
#ifndef _LINUX_SERIAL_H
#define _LINUX_SERIAL_H
-#include <asm/page.h>
#include <uapi/linux/serial.h>
+#include <uapi/linux/serial_reg.h>
+#define UART_IER_ALL_INTR (UART_IER_MSI | \
+ UART_IER_RLSI | \
+ UART_IER_THRI | \
+ UART_IER_RDI)
+
+/* Helper for dealing with UART_LCR_WLEN* defines */
+#define UART_LCR_WLEN(x) ((x) - 5)
+
+/* FIFO and shifting register empty */
+#define UART_LSR_BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
+static inline bool uart_lsr_tx_empty(u16 lsr)
+{
+ return (lsr & UART_LSR_BOTH_EMPTY) == UART_LSR_BOTH_EMPTY;
+}
+
+#define UART_MSR_STATUS_BITS (UART_MSR_DCD | \
+ UART_MSR_RI | \
+ UART_MSR_DSR | \
+ UART_MSR_CTS)
/*
* Counters of the input lines (CTS, DSR, RI, CD) interrupts
@@ -23,11 +43,6 @@ struct async_icount {
__u32 buf_overrun;
};
-/*
- * The size of the serial xmit buffer is 1 page, or 4096 bytes
- */
-#define SERIAL_XMIT_SIZE PAGE_SIZE
-
#include <linux/compiler.h>
#endif /* _LINUX_SERIAL_H */
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 9e655055112d..01efdce0fda0 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -7,32 +7,52 @@
#ifndef _LINUX_SERIAL_8250_H
#define _LINUX_SERIAL_8250_H
+#include <linux/errno.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/platform_device.h>
+struct uart_8250_port;
+
/*
* This is the platform device platform_data structure
+ *
+ * @mapsize: Port size for ioremap()
+ * @bugs: Port bugs
+ *
+ * @dl_read: ``u32 ()(struct uart_8250_port *up)``
+ *
+ * UART divisor latch read.
+ *
+ * @dl_write: ``void ()(struct uart_8250_port *up, u32 value)``
+ *
+ * Write @value into UART divisor latch.
+ *
+ * Locking: Caller holds port's lock.
*/
struct plat_serial8250_port {
unsigned long iobase; /* io base address */
void __iomem *membase; /* ioremap cookie or NULL */
resource_size_t mapbase; /* resource base */
+ resource_size_t mapsize;
+ unsigned int uartclk; /* UART clock rate */
unsigned int irq; /* interrupt number */
unsigned long irqflags; /* request_irq flags */
- unsigned int uartclk; /* UART clock rate */
void *private_data;
unsigned char regshift; /* register shift */
unsigned char iotype; /* UPIO_* */
unsigned char hub6;
unsigned char has_sysrq; /* supports magic SysRq */
- upf_t flags; /* UPF_* flags */
unsigned int type; /* If UPF_FIXED_TYPE */
- unsigned int (*serial_in)(struct uart_port *, int);
- void (*serial_out)(struct uart_port *, int, int);
+ upf_t flags; /* UPF_* flags */
+ u16 bugs; /* port bugs */
+ u32 (*serial_in)(struct uart_port *, unsigned int offset);
+ void (*serial_out)(struct uart_port *, unsigned int offset, u32 val);
+ u32 (*dl_read)(struct uart_8250_port *up);
+ void (*dl_write)(struct uart_8250_port *up, u32 value);
void (*set_termios)(struct uart_port *,
struct ktermios *new,
- struct ktermios *old);
+ const struct ktermios *old);
void (*set_ldisc)(struct uart_port *,
struct ktermios *);
unsigned int (*get_mctrl)(struct uart_port *);
@@ -74,6 +94,7 @@ struct uart_8250_port;
struct uart_8250_ops {
int (*setup_irq)(struct uart_8250_port *);
void (*release_irq)(struct uart_8250_port *);
+ void (*setup_timer)(struct uart_8250_port *);
};
struct uart_8250_em485 {
@@ -89,23 +110,29 @@ struct uart_8250_em485 {
* their own 8250 ports without registering their own
* platform device. Using these will make your driver
* dependent on the 8250 driver.
+ *
+ * @dl_read: ``u32 ()(struct uart_8250_port *port)``
+ *
+ * UART divisor latch read.
+ *
+ * @dl_write: ``void ()(struct uart_8250_port *port, u32 value)``
+ *
+ * Write @value into UART divisor latch.
+ *
+ * Locking: Caller holds port's lock.
*/
-
struct uart_8250_port {
struct uart_port port;
struct timer_list timer; /* "no irq" timer */
struct list_head list; /* ports on this IRQ */
u32 capabilities; /* port capabilities */
- unsigned short bugs; /* port bugs */
- bool fifo_bug; /* min RX trigger if enabled */
+ u16 bugs; /* port bugs */
unsigned int tx_loadsz; /* transmit fifo load size */
unsigned char acr;
unsigned char fcr;
unsigned char ier;
unsigned char lcr;
unsigned char mcr;
- unsigned char mcr_mask; /* mask of user bits */
- unsigned char mcr_force; /* mask of forced bits */
unsigned char cur_iotype; /* Running I/O type */
unsigned int rpm_tx_active;
unsigned char canary; /* non-zero during system sleep
@@ -121,7 +148,8 @@ struct uart_8250_port {
* be immediately processed.
*/
#define LSR_SAVE_FLAGS UART_LSR_BRK_ERROR_BITS
- unsigned char lsr_saved_flags;
+ u16 lsr_saved_flags;
+ u16 lsr_save_mask;
#define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA
unsigned char msr_saved_flags;
@@ -129,12 +157,12 @@ struct uart_8250_port {
const struct uart_8250_ops *ops;
/* 8250 specific callbacks */
- int (*dl_read)(struct uart_8250_port *);
- void (*dl_write)(struct uart_8250_port *, int);
+ u32 (*dl_read)(struct uart_8250_port *up);
+ void (*dl_write)(struct uart_8250_port *up, u32 value);
struct uart_8250_em485 *em485;
- void (*rs485_start_tx)(struct uart_8250_port *);
- void (*rs485_stop_tx)(struct uart_8250_port *);
+ void (*rs485_start_tx)(struct uart_8250_port *up, bool toggle_ier);
+ void (*rs485_stop_tx)(struct uart_8250_port *up, bool toggle_ier);
/* Serial port overrun backoff */
struct delayed_work overrun_backoff;
@@ -146,34 +174,30 @@ static inline struct uart_8250_port *up_to_u8250p(struct uart_port *up)
return container_of(up, struct uart_8250_port, port);
}
-int serial8250_register_8250_port(struct uart_8250_port *);
+int serial8250_register_8250_port(const struct uart_8250_port *);
void serial8250_unregister_port(int line);
void serial8250_suspend_port(int line);
void serial8250_resume_port(int line);
-extern int early_serial_setup(struct uart_port *port);
-
-extern int early_serial8250_setup(struct earlycon_device *device,
- const char *options);
-extern void serial8250_update_uartclk(struct uart_port *port,
- unsigned int uartclk);
-extern void serial8250_do_set_termios(struct uart_port *port,
- struct ktermios *termios, struct ktermios *old);
-extern void serial8250_do_set_ldisc(struct uart_port *port,
- struct ktermios *termios);
-extern unsigned int serial8250_do_get_mctrl(struct uart_port *port);
-extern int serial8250_do_startup(struct uart_port *port);
-extern void serial8250_do_shutdown(struct uart_port *port);
-extern void serial8250_do_pm(struct uart_port *port, unsigned int state,
- unsigned int oldstate);
-extern void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl);
-extern void serial8250_do_set_divisor(struct uart_port *port, unsigned int baud,
- unsigned int quot,
- unsigned int quot_frac);
-extern int fsl8250_handle_irq(struct uart_port *port);
+int early_serial_setup(struct uart_port *port);
+int early_serial8250_setup(struct earlycon_device *device, const char *options);
+
+void serial8250_update_uartclk(struct uart_port *port, unsigned int uartclk);
+void serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
+ const struct ktermios *old);
+void serial8250_do_set_ldisc(struct uart_port *port, struct ktermios *termios);
+unsigned int serial8250_do_get_mctrl(struct uart_port *port);
+int serial8250_do_startup(struct uart_port *port);
+void serial8250_do_shutdown(struct uart_port *port);
+void serial8250_do_pm(struct uart_port *port, unsigned int state,
+ unsigned int oldstate);
+void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl);
+void serial8250_do_set_divisor(struct uart_port *port, unsigned int baud,
+ unsigned int quot);
+int fsl8250_handle_irq(struct uart_port *port);
int serial8250_handle_irq(struct uart_port *port, unsigned int iir);
-unsigned char serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr);
-void serial8250_read_char(struct uart_8250_port *up, unsigned char lsr);
+u16 serial8250_rx_chars(struct uart_8250_port *up, u16 lsr);
+void serial8250_read_char(struct uart_8250_port *up, u16 lsr);
void serial8250_tx_chars(struct uart_8250_port *up);
unsigned int serial8250_modem_status(struct uart_8250_port *up);
void serial8250_init_port(struct uart_8250_port *up);
@@ -183,13 +207,21 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
int serial8250_console_setup(struct uart_port *port, char *options, bool probe);
int serial8250_console_exit(struct uart_port *port);
-extern void serial8250_set_isa_configurator(void (*v)
- (int port, struct uart_port *up,
- u32 *capabilities));
+void serial8250_set_isa_configurator(void (*v)(int port, struct uart_port *up,
+ u32 *capabilities));
+
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+extern int hp300_setup_serial_console(void) __init;
+#else
+static inline int hp300_setup_serial_console(void) { return 0; }
+#endif
#ifdef CONFIG_SERIAL_8250_RT288X
-unsigned int au_serial_in(struct uart_port *p, int offset);
-void au_serial_out(struct uart_port *p, int offset, int value);
+int rt288x_setup(struct uart_port *p);
+int au_platform_setup(struct plat_serial8250_port *p);
+#else
+static inline int rt288x_setup(struct uart_port *p) { return -ENODEV; }
+static inline int au_platform_setup(struct plat_serial8250_port *p) { return -ENODEV; }
#endif
#endif
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index d7ed00f1594e..666430b47899 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -11,7 +11,8 @@
#include <linux/compiler.h>
#include <linux/console.h>
#include <linux/interrupt.h>
-#include <linux/circ_buf.h>
+#include <linux/lockdep.h>
+#include <linux/printk.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/tty.h>
@@ -28,12 +29,348 @@
struct uart_port;
struct serial_struct;
+struct serial_port_device;
struct device;
struct gpio_desc;
-/*
+/**
+ * struct uart_ops -- interface between serial_core and the driver
+ *
* This structure describes all the operations that can be done on the
- * physical hardware. See Documentation/driver-api/serial/driver.rst for details.
+ * physical hardware.
+ *
+ * @tx_empty: ``unsigned int ()(struct uart_port *port)``
+ *
+ * This function tests whether the transmitter fifo and shifter for the
+ * @port is empty. If it is empty, this function should return
+ * %TIOCSER_TEMT, otherwise return 0. If the port does not support this
+ * operation, then it should return %TIOCSER_TEMT.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ * This call must not sleep
+ *
+ * @set_mctrl: ``void ()(struct uart_port *port, unsigned int mctrl)``
+ *
+ * This function sets the modem control lines for @port to the state
+ * described by @mctrl. The relevant bits of @mctrl are:
+ *
+ * - %TIOCM_RTS RTS signal.
+ * - %TIOCM_DTR DTR signal.
+ * - %TIOCM_OUT1 OUT1 signal.
+ * - %TIOCM_OUT2 OUT2 signal.
+ * - %TIOCM_LOOP Set the port into loopback mode.
+ *
+ * If the appropriate bit is set, the signal should be driven
+ * active. If the bit is clear, the signal should be driven
+ * inactive.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @get_mctrl: ``unsigned int ()(struct uart_port *port)``
+ *
+ * Returns the current state of modem control inputs of @port. The state
+ * of the outputs should not be returned, since the core keeps track of
+ * their state. The state information should include:
+ *
+ * - %TIOCM_CAR state of DCD signal
+ * - %TIOCM_CTS state of CTS signal
+ * - %TIOCM_DSR state of DSR signal
+ * - %TIOCM_RI state of RI signal
+ *
+ * The bit is set if the signal is currently driven active. If
+ * the port does not support CTS, DCD or DSR, the driver should
+ * indicate that the signal is permanently active. If RI is
+ * not available, the signal should not be indicated as active.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @stop_tx: ``void ()(struct uart_port *port)``
+ *
+ * Stop transmitting characters. This might be due to the CTS line
+ * becoming inactive or the tty layer indicating we want to stop
+ * transmission due to an %XOFF character.
+ *
+ * The driver should stop transmitting characters as soon as possible.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @start_tx: ``void ()(struct uart_port *port)``
+ *
+ * Start transmitting characters.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @throttle: ``void ()(struct uart_port *port)``
+ *
+ * Notify the serial driver that input buffers for the line discipline are
+ * close to full, and it should somehow signal that no more characters
+ * should be sent to the serial port.
+ * This will be called only if hardware assisted flow control is enabled.
+ *
+ * Locking: serialized with @unthrottle() and termios modification by the
+ * tty layer.
+ *
+ * @unthrottle: ``void ()(struct uart_port *port)``
+ *
+ * Notify the serial driver that characters can now be sent to the serial
+ * port without fear of overrunning the input buffers of the line
+ * disciplines.
+ *
+ * This will be called only if hardware assisted flow control is enabled.
+ *
+ * Locking: serialized with @throttle() and termios modification by the
+ * tty layer.
+ *
+ * @send_xchar: ``void ()(struct uart_port *port, char ch)``
+ *
+ * Transmit a high priority character, even if the port is stopped. This
+ * is used to implement XON/XOFF flow control and tcflow(). If the serial
+ * driver does not implement this function, the tty core will append the
+ * character to the circular buffer and then call start_tx() / stop_tx()
+ * to flush the data out.
+ *
+ * Do not transmit if @ch == '\0' (%__DISABLED_CHAR).
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @start_rx: ``void ()(struct uart_port *port)``
+ *
+ * Start receiving characters.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @stop_rx: ``void ()(struct uart_port *port)``
+ *
+ * Stop receiving characters; the @port is in the process of being closed.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @enable_ms: ``void ()(struct uart_port *port)``
+ *
+ * Enable the modem status interrupts.
+ *
+ * This method may be called multiple times. Modem status interrupts
+ * should be disabled when the @shutdown() method is called.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @break_ctl: ``void ()(struct uart_port *port, int ctl)``
+ *
+ * Control the transmission of a break signal. If @ctl is nonzero, the
+ * break signal should be transmitted. The signal should be terminated
+ * when another call is made with a zero @ctl.
+ *
+ * Locking: caller holds tty_port->mutex
+ *
+ * @startup: ``int ()(struct uart_port *port)``
+ *
+ * Grab any interrupt resources and initialise any low level driver state.
+ * Enable the port for reception. It should not activate RTS nor DTR;
+ * this will be done via a separate call to @set_mctrl().
+ *
+ * This method will only be called when the port is initially opened.
+ *
+ * Locking: port_sem taken.
+ * Interrupts: globally disabled.
+ *
+ * @shutdown: ``void ()(struct uart_port *port)``
+ *
+ * Disable the @port, disable any break condition that may be in effect,
+ * and free any interrupt resources. It should not disable RTS nor DTR;
+ * this will have already been done via a separate call to @set_mctrl().
+ *
+ * Drivers must not access @port->state once this call has completed.
+ *
+ * This method will only be called when there are no more users of this
+ * @port.
+ *
+ * Locking: port_sem taken.
+ * Interrupts: caller dependent.
+ *
+ * @flush_buffer: ``void ()(struct uart_port *port)``
+ *
+ * Flush any write buffers, reset any DMA state and stop any ongoing DMA
+ * transfers.
+ *
+ * This will be called whenever the @port->state->xmit circular buffer is
+ * cleared.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
+ * @set_termios: ``void ()(struct uart_port *port, struct ktermios *new,
+ * struct ktermios *old)``
+ *
+ * Change the @port parameters, including word length, parity, stop bits.
+ * Update @port->read_status_mask and @port->ignore_status_mask to
+ * indicate the types of events we are interested in receiving. Relevant
+ * ktermios::c_cflag bits are:
+ *
+ * - %CSIZE - word size
+ * - %CSTOPB - 2 stop bits
+ * - %PARENB - parity enable
+ * - %PARODD - odd parity (when %PARENB is in force)
+ * - %ADDRB - address bit (changed through uart_port::rs485_config()).
+ * - %CREAD - enable reception of characters (if not set, still receive
+ * characters from the port, but throw them away).
+ * - %CRTSCTS - if set, enable CTS status change reporting.
+ * - %CLOCAL - if not set, enable modem status change reporting.
+ *
+ * Relevant ktermios::c_iflag bits are:
+ *
+ * - %INPCK - enable frame and parity error events to be passed to the TTY
+ * layer.
+ * - %BRKINT / %PARMRK - both of these enable break events to be passed to
+ * the TTY layer.
+ * - %IGNPAR - ignore parity and framing errors.
+ * - %IGNBRK - ignore break errors. If %IGNPAR is also set, ignore overrun
+ * errors as well.
+ *
+ * The interaction of the ktermios::c_iflag bits is as follows (parity
+ * error given as an example):
+ *
+ * ============ ======= ======= =========================================
+ * Parity error INPCK IGNPAR
+ * ============ ======= ======= =========================================
+ * n/a 0 n/a character received, marked as %TTY_NORMAL
+ * None 1 n/a character received, marked as %TTY_NORMAL
+ * Yes 1 0 character received, marked as %TTY_PARITY
+ * Yes 1 1 character discarded
+ * ============ ======= ======= =========================================
+ *
+ * Other flags may be used (eg, xon/xoff characters) if your hardware
+ * supports hardware "soft" flow control.
+ *
+ * Locking: caller holds tty_port->mutex
+ * Interrupts: caller dependent.
+ * This call must not sleep
+ *
+ * @set_ldisc: ``void ()(struct uart_port *port, struct ktermios *termios)``
+ *
+ * Notifier for discipline change. See
+ * Documentation/driver-api/tty/tty_ldisc.rst.
+ *
+ * Locking: caller holds tty_port->mutex
+ *
+ * @pm: ``void ()(struct uart_port *port, unsigned int state,
+ * unsigned int oldstate)``
+ *
+ * Perform any power management related activities on the specified @port.
+ * @state indicates the new state (defined by enum uart_pm_state),
+ * @oldstate indicates the previous state.
+ *
+ * This function should not be used to grab any resources.
+ *
+ * This will be called when the @port is initially opened and finally
+ * closed, except when the @port is also the system console. This will
+ * occur even if %CONFIG_PM is not set.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @type: ``const char *()(struct uart_port *port)``
+ *
+ * Return a pointer to a string constant describing the specified @port,
+ * or return %NULL, in which case the string 'unknown' is substituted.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @release_port: ``void ()(struct uart_port *port)``
+ *
+ * Release any memory and IO region resources currently in use by the
+ * @port.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @request_port: ``int ()(struct uart_port *port)``
+ *
+ * Request any memory and IO region resources required by the port. If any
+ * fail, no resources should be registered when this function returns, and
+ * it should return -%EBUSY on failure.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @config_port: ``void ()(struct uart_port *port, int type)``
+ *
+ * Perform any autoconfiguration steps required for the @port. @type
+ * contains a bit mask of the required configuration. %UART_CONFIG_TYPE
+ * indicates that the port requires detection and identification.
+ * @port->type should be set to the type found, or %PORT_UNKNOWN if no
+ * port was detected.
+ *
+ * %UART_CONFIG_IRQ indicates autoconfiguration of the interrupt signal,
+ * which should be probed using standard kernel autoprobing techniques.
+ * This is not necessary on platforms where ports have interrupts
+ * internally hard wired (eg, system on a chip implementations).
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @verify_port: ``int ()(struct uart_port *port,
+ * struct serial_struct *serinfo)``
+ *
+ * Verify the new serial port information contained within @serinfo is
+ * suitable for this port type.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @ioctl: ``int ()(struct uart_port *port, unsigned int cmd,
+ * unsigned long arg)``
+ *
+ * Perform any port specific IOCTLs. IOCTL commands must be defined using
+ * the standard numbering system found in <asm/ioctl.h>.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ *
+ * @poll_init: ``int ()(struct uart_port *port)``
+ *
+ * Called by kgdb to perform the minimal hardware initialization needed to
+ * support @poll_put_char() and @poll_get_char(). Unlike @startup(), this
+ * should not request interrupts.
+ *
+ * Locking: %tty_mutex and tty_port->mutex taken.
+ * Interrupts: n/a.
+ *
+ * @poll_put_char: ``void ()(struct uart_port *port, unsigned char ch)``
+ *
+ * Called by kgdb to write a single character @ch directly to the serial
+ * @port. It can and should block until there is space in the TX FIFO.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ * This call must not sleep
+ *
+ * @poll_get_char: ``int ()(struct uart_port *port)``
+ *
+ * Called by kgdb to read a single character directly from the serial
+ * port. If data is available, it should be returned; otherwise the
+ * function should return %NO_POLL_CHAR immediately.
+ *
+ * Locking: none.
+ * Interrupts: caller dependent.
+ * This call must not sleep
*/
struct uart_ops {
unsigned int (*tx_empty)(struct uart_port *);
@@ -45,32 +382,19 @@ struct uart_ops {
void (*unthrottle)(struct uart_port *);
void (*send_xchar)(struct uart_port *, char ch);
void (*stop_rx)(struct uart_port *);
+ void (*start_rx)(struct uart_port *);
void (*enable_ms)(struct uart_port *);
void (*break_ctl)(struct uart_port *, int ctl);
int (*startup)(struct uart_port *);
void (*shutdown)(struct uart_port *);
void (*flush_buffer)(struct uart_port *);
void (*set_termios)(struct uart_port *, struct ktermios *new,
- struct ktermios *old);
+ const struct ktermios *old);
void (*set_ldisc)(struct uart_port *, struct ktermios *);
void (*pm)(struct uart_port *, unsigned int state,
unsigned int oldstate);
-
- /*
- * Return a string describing the type of the port
- */
const char *(*type)(struct uart_port *);
-
- /*
- * Release IO and memory resources used by the port.
- * This includes iounmap if necessary.
- */
void (*release_port)(struct uart_port *);
-
- /*
- * Request IO and memory resources used by the port.
- * This includes iomapping the port if necessary.
- */
int (*request_port)(struct uart_port *);
void (*config_port)(struct uart_port *, int);
int (*verify_port)(struct uart_port *, struct serial_struct *);
@@ -100,18 +424,30 @@ struct uart_icount {
__u32 buf_overrun;
};
-typedef unsigned int __bitwise upf_t;
+typedef u64 __bitwise upf_t;
typedef unsigned int __bitwise upstat_t;
+enum uart_iotype {
+ UPIO_UNKNOWN = -1,
+ UPIO_PORT = SERIAL_IO_PORT, /* 8b I/O port access */
+ UPIO_HUB6 = SERIAL_IO_HUB6, /* Hub6 ISA card */
+ UPIO_MEM = SERIAL_IO_MEM, /* driver-specific */
+ UPIO_MEM32 = SERIAL_IO_MEM32, /* 32b little endian */
+ UPIO_AU = SERIAL_IO_AU, /* Au1x00 and RT288x type IO */
+ UPIO_TSI = SERIAL_IO_TSI, /* Tsi108/109 type IO */
+ UPIO_MEM32BE = SERIAL_IO_MEM32BE, /* 32b big endian */
+ UPIO_MEM16 = SERIAL_IO_MEM16, /* 16b little endian */
+};
+
struct uart_port {
spinlock_t lock; /* port lock */
unsigned long iobase; /* in/out[bwl] */
unsigned char __iomem *membase; /* read/write[bwl] */
- unsigned int (*serial_in)(struct uart_port *, int);
- void (*serial_out)(struct uart_port *, int, int);
+ u32 (*serial_in)(struct uart_port *, unsigned int offset);
+ void (*serial_out)(struct uart_port *, unsigned int offset, u32 val);
void (*set_termios)(struct uart_port *,
struct ktermios *new,
- struct ktermios *old);
+ const struct ktermios *old);
void (*set_ldisc)(struct uart_port *,
struct ktermios *);
unsigned int (*get_mctrl)(struct uart_port *);
@@ -132,30 +468,26 @@ struct uart_port {
unsigned int old);
void (*handle_break)(struct uart_port *);
int (*rs485_config)(struct uart_port *,
+ struct ktermios *termios,
struct serial_rs485 *rs485);
int (*iso7816_config)(struct uart_port *,
struct serial_iso7816 *iso7816);
+ unsigned int ctrl_id; /* optional serial core controller id */
+ unsigned int port_id; /* optional serial core port id */
unsigned int irq; /* irq number */
unsigned long irqflags; /* irq flags */
unsigned int uartclk; /* base uart clock */
unsigned int fifosize; /* tx fifo size */
unsigned char x_char; /* xon/xoff char */
unsigned char regshift; /* reg offset shift */
- unsigned char iotype; /* io access style */
- unsigned char quirks; /* internal quirks */
-#define UPIO_PORT (SERIAL_IO_PORT) /* 8b I/O port access */
-#define UPIO_HUB6 (SERIAL_IO_HUB6) /* Hub6 ISA card */
-#define UPIO_MEM (SERIAL_IO_MEM) /* driver-specific */
-#define UPIO_MEM32 (SERIAL_IO_MEM32) /* 32b little endian */
-#define UPIO_AU (SERIAL_IO_AU) /* Au1x00 and RT288x type IO */
-#define UPIO_TSI (SERIAL_IO_TSI) /* Tsi108/109 type IO */
-#define UPIO_MEM32BE (SERIAL_IO_MEM32BE) /* 32b big endian */
-#define UPIO_MEM16 (SERIAL_IO_MEM16) /* 16b little endian */
+ unsigned char quirks; /* internal quirks */
- /* quirks must be updated while holding port mutex */
+ /* internal quirks must be updated while holding port mutex */
#define UPQ_NO_TXEN_TEST BIT(0)
+ enum uart_iotype iotype; /* io access style */
+
unsigned int read_status_mask; /* driver specific */
unsigned int ignore_status_mask; /* driver specific */
struct uart_state *state; /* pointer to parent state */
@@ -171,11 +503,15 @@ struct uart_port {
* assigned from the serial_struct flags in uart_set_info()
* [for bit definitions in the UPF_CHANGE_MASK]
*
- * Bits [0..UPF_LAST_USER] are userspace defined/visible/changeable
+ * Bits [0..ASYNCB_LAST_USER] are userspace defined/visible/changeable
* The remaining bits are serial-core specific and not modifiable by
* userspace.
*/
+#ifdef CONFIG_HAS_IOPORT
#define UPF_FOURPORT ((__force upf_t) ASYNC_FOURPORT /* 1 */ )
+#else
+#define UPF_FOURPORT 0
+#endif
#define UPF_SAK ((__force upf_t) ASYNC_SAK /* 2 */ )
#define UPF_SPD_HI ((__force upf_t) ASYNC_SPD_HI /* 4 */ )
#define UPF_SPD_VHI ((__force upf_t) ASYNC_SPD_VHI /* 5 */ )
@@ -190,23 +526,24 @@ struct uart_port {
#define UPF_BUGGY_UART ((__force upf_t) ASYNC_BUGGY_UART /* 14 */ )
#define UPF_MAGIC_MULTIPLIER ((__force upf_t) ASYNC_MAGIC_MULTIPLIER /* 16 */ )
-#define UPF_NO_THRE_TEST ((__force upf_t) (1 << 19))
+#define UPF_NO_THRE_TEST ((__force upf_t) BIT_ULL(19))
/* Port has hardware-assisted h/w flow control */
-#define UPF_AUTO_CTS ((__force upf_t) (1 << 20))
-#define UPF_AUTO_RTS ((__force upf_t) (1 << 21))
+#define UPF_AUTO_CTS ((__force upf_t) BIT_ULL(20))
+#define UPF_AUTO_RTS ((__force upf_t) BIT_ULL(21))
#define UPF_HARD_FLOW ((__force upf_t) (UPF_AUTO_CTS | UPF_AUTO_RTS))
/* Port has hardware-assisted s/w flow control */
-#define UPF_SOFT_FLOW ((__force upf_t) (1 << 22))
-#define UPF_CONS_FLOW ((__force upf_t) (1 << 23))
-#define UPF_SHARE_IRQ ((__force upf_t) (1 << 24))
-#define UPF_EXAR_EFR ((__force upf_t) (1 << 25))
-#define UPF_BUG_THRE ((__force upf_t) (1 << 26))
+#define UPF_SOFT_FLOW ((__force upf_t) BIT_ULL(22))
+#define UPF_CONS_FLOW ((__force upf_t) BIT_ULL(23))
+#define UPF_SHARE_IRQ ((__force upf_t) BIT_ULL(24))
+#define UPF_EXAR_EFR ((__force upf_t) BIT_ULL(25))
+#define UPF_BUG_THRE ((__force upf_t) BIT_ULL(26))
/* The exact UART type is known and should not be probed. */
-#define UPF_FIXED_TYPE ((__force upf_t) (1 << 27))
-#define UPF_BOOT_AUTOCONF ((__force upf_t) (1 << 28))
-#define UPF_FIXED_PORT ((__force upf_t) (1 << 29))
-#define UPF_DEAD ((__force upf_t) (1 << 30))
-#define UPF_IOREMAP ((__force upf_t) (1 << 31))
+#define UPF_FIXED_TYPE ((__force upf_t) BIT_ULL(27))
+#define UPF_BOOT_AUTOCONF ((__force upf_t) BIT_ULL(28))
+#define UPF_FIXED_PORT ((__force upf_t) BIT_ULL(29))
+#define UPF_DEAD ((__force upf_t) BIT_ULL(30))
+#define UPF_IOREMAP ((__force upf_t) BIT_ULL(31))
+#define UPF_FULL_PROBE ((__force upf_t) BIT_ULL(32))
#define __UPF_CHANGE_MASK 0x17fff
#define UPF_CHANGE_MASK ((__force upf_t) __UPF_CHANGE_MASK)
@@ -229,9 +566,9 @@ struct uart_port {
#define UPSTAT_AUTOXOFF ((__force upstat_t) (1 << 4))
#define UPSTAT_SYNC_FIFO ((__force upstat_t) (1 << 5))
- int hw_stopped; /* sw-assisted CTS flow state */
+ bool hw_stopped; /* sw-assisted CTS flow state */
unsigned int mctrl; /* current modem ctrl settings */
- unsigned int timeout; /* character-based timeout */
+ unsigned int frame_time; /* frame timing in ns */
unsigned int type; /* port type */
const struct uart_ops *ops;
unsigned int custom_divisor;
@@ -239,10 +576,11 @@ struct uart_port {
unsigned int minor;
resource_size_t mapbase; /* for ioremap */
resource_size_t mapsize;
- struct device *dev; /* parent device */
+ struct device *dev; /* serial port physical parent device */
+ struct serial_port_device *port_dev; /* serial core port device */
unsigned long sysrq; /* sysrq timeout */
- unsigned int sysrq_ch; /* char for sysrq */
+ u8 sysrq_ch; /* char for sysrq */
unsigned char has_sysrq;
unsigned char sysrq_seq; /* index in sysrq_toggle_seq */
@@ -253,11 +591,216 @@ struct uart_port {
struct attribute_group *attr_group; /* port specific attributes */
const struct attribute_group **tty_groups; /* all attributes (serial core use only) */
struct serial_rs485 rs485;
+ struct serial_rs485 rs485_supported; /* Supported mask for serial_rs485 */
struct gpio_desc *rs485_term_gpio; /* enable RS485 bus termination */
+ struct gpio_desc *rs485_rx_during_tx_gpio; /* Output GPIO that sets the state of RS485 RX during TX */
struct serial_iso7816 iso7816;
void *private_data; /* generic platform data pointer */
};
+/*
+ * Only for console->device_lock()/_unlock() callbacks and internal
+ * port lock wrapper synchronization.
+ */
+static inline void __uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags)
+{
+ spin_lock_irqsave(&up->lock, *flags);
+}
+
+/*
+ * Only for console->device_lock()/_unlock() callbacks and internal
+ * port lock wrapper synchronization.
+ */
+static inline void __uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags)
+{
+ spin_unlock_irqrestore(&up->lock, flags);
+}
+
+/**
+ * uart_port_set_cons - Safely set the @cons field for a uart
+ * @up: The uart port to set
+ * @con: The new console to set to
+ *
+ * This function must be used to set @up->cons. It uses the port lock to
+ * synchronize with the port lock wrappers in order to ensure that the console
+ * cannot change or disappear while another context is holding the port lock.
+ */
+static inline void uart_port_set_cons(struct uart_port *up, struct console *con)
+{
+ unsigned long flags;
+
+ __uart_port_lock_irqsave(up, &flags);
+ up->cons = con;
+ __uart_port_unlock_irqrestore(up, flags);
+}
+
+/* Only for internal port lock wrapper usage. */
+static inline bool __uart_port_using_nbcon(struct uart_port *up)
+{
+ lockdep_assert_held_once(&up->lock);
+
+ if (likely(!uart_console(up)))
+ return false;
+
+ /*
+ * @up->cons is only modified under the port lock. Therefore it is
+ * certain that it cannot disappear here.
+ *
+ * @up->cons->node is added/removed from the console list under the
+ * port lock. Therefore it is certain that the registration status
+ * cannot change here, thus @up->cons->flags can be read directly.
+ */
+ if (hlist_unhashed_lockless(&up->cons->node) ||
+ !(up->cons->flags & CON_NBCON) ||
+ !up->cons->write_atomic) {
+ return false;
+ }
+
+ return true;
+}
+
+/* Only for internal port lock wrapper usage. */
+static inline bool __uart_port_nbcon_try_acquire(struct uart_port *up)
+{
+ if (!__uart_port_using_nbcon(up))
+ return true;
+
+ return nbcon_device_try_acquire(up->cons);
+}
+
+/* Only for internal port lock wrapper usage. */
+static inline void __uart_port_nbcon_acquire(struct uart_port *up)
+{
+ if (!__uart_port_using_nbcon(up))
+ return;
+
+ while (!nbcon_device_try_acquire(up->cons))
+ cpu_relax();
+}
+
+/* Only for internal port lock wrapper usage. */
+static inline void __uart_port_nbcon_release(struct uart_port *up)
+{
+ if (!__uart_port_using_nbcon(up))
+ return;
+
+ nbcon_device_release(up->cons);
+}
+
+/**
+ * uart_port_lock - Lock the UART port
+ * @up: Pointer to UART port structure
+ */
+static inline void uart_port_lock(struct uart_port *up)
+{
+ spin_lock(&up->lock);
+ __uart_port_nbcon_acquire(up);
+}
+
+/**
+ * uart_port_lock_irq - Lock the UART port and disable interrupts
+ * @up: Pointer to UART port structure
+ */
+static inline void uart_port_lock_irq(struct uart_port *up)
+{
+ spin_lock_irq(&up->lock);
+ __uart_port_nbcon_acquire(up);
+}
+
+/**
+ * uart_port_lock_irqsave - Lock the UART port, save and disable interrupts
+ * @up: Pointer to UART port structure
+ * @flags: Pointer to interrupt flags storage
+ */
+static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags)
+{
+ spin_lock_irqsave(&up->lock, *flags);
+ __uart_port_nbcon_acquire(up);
+}
+
+/**
+ * uart_port_trylock - Try to lock the UART port
+ * @up: Pointer to UART port structure
+ *
+ * Returns: True if lock was acquired, false otherwise
+ */
+static inline bool uart_port_trylock(struct uart_port *up)
+{
+ if (!spin_trylock(&up->lock))
+ return false;
+
+ if (!__uart_port_nbcon_try_acquire(up)) {
+ spin_unlock(&up->lock);
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * uart_port_trylock_irqsave - Try to lock the UART port, save and disable interrupts
+ * @up: Pointer to UART port structure
+ * @flags: Pointer to interrupt flags storage
+ *
+ * Returns: True if lock was acquired, false otherwise
+ */
+static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long *flags)
+{
+ if (!spin_trylock_irqsave(&up->lock, *flags))
+ return false;
+
+ if (!__uart_port_nbcon_try_acquire(up)) {
+ spin_unlock_irqrestore(&up->lock, *flags);
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * uart_port_unlock - Unlock the UART port
+ * @up: Pointer to UART port structure
+ */
+static inline void uart_port_unlock(struct uart_port *up)
+{
+ __uart_port_nbcon_release(up);
+ spin_unlock(&up->lock);
+}
+
+/**
+ * uart_port_unlock_irq - Unlock the UART port and re-enable interrupts
+ * @up: Pointer to UART port structure
+ */
+static inline void uart_port_unlock_irq(struct uart_port *up)
+{
+ __uart_port_nbcon_release(up);
+ spin_unlock_irq(&up->lock);
+}
+
+/**
+ * uart_port_unlock_irqrestore - Unlock the UART port, restore interrupts
+ * @up: Pointer to UART port structure
+ * @flags: The saved interrupt flags for restore
+ */
+static inline void uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags)
+{
+ __uart_port_nbcon_release(up);
+ spin_unlock_irqrestore(&up->lock, flags);
+}
+
+DEFINE_GUARD(uart_port_lock, struct uart_port *, uart_port_lock(_T), uart_port_unlock(_T));
+DEFINE_GUARD_COND(uart_port_lock, _try, uart_port_trylock(_T));
+
+DEFINE_GUARD(uart_port_lock_irq, struct uart_port *, uart_port_lock_irq(_T),
+ uart_port_unlock_irq(_T));
+
+DEFINE_LOCK_GUARD_1(uart_port_lock_irqsave, struct uart_port,
+ uart_port_lock_irqsave(_T->lock, &_T->flags),
+ uart_port_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags);
+DEFINE_LOCK_GUARD_1_COND(uart_port_lock_irqsave, _try,
+ uart_port_trylock_irqsave(_T->lock, &_T->flags));
+
static inline int serial_port_in(struct uart_port *up, int offset)
{
return up->serial_in(up, offset);
@@ -287,7 +830,6 @@ struct uart_state {
struct tty_port port;
enum uart_pm_state pm_state;
- struct circ_buf xmit;
atomic_t refcount;
wait_queue_head_t remove_wait;
@@ -300,6 +842,46 @@ struct uart_state {
/* number of characters left in xmit buffer before we ask for more */
#define WAKEUP_CHARS 256
+/**
+ * uart_xmit_advance - Advance xmit buffer and account Tx'ed chars
+ * @up: uart_port structure describing the port
+ * @chars: number of characters sent
+ *
+ * This function advances the tail of circular xmit buffer by the number of
+ * @chars transmitted and handles accounting of transmitted bytes (into
+ * @up's icount.tx).
+ */
+static inline void uart_xmit_advance(struct uart_port *up, unsigned int chars)
+{
+ struct tty_port *tport = &up->state->port;
+
+ kfifo_skip_count(&tport->xmit_fifo, chars);
+ up->icount.tx += chars;
+}
+
+static inline unsigned int uart_fifo_out(struct uart_port *up,
+ unsigned char *buf, unsigned int chars)
+{
+ struct tty_port *tport = &up->state->port;
+
+ chars = kfifo_out(&tport->xmit_fifo, buf, chars);
+ up->icount.tx += chars;
+
+ return chars;
+}
+
+static inline unsigned int uart_fifo_get(struct uart_port *up,
+ unsigned char *ch)
+{
+ struct tty_port *tport = &up->state->port;
+ unsigned int chars;
+
+ chars = kfifo_get(&tport->xmit_fifo, ch);
+ up->icount.tx += chars;
+
+ return chars;
+}
+
struct module;
struct tty_driver;
@@ -322,20 +904,154 @@ struct uart_driver {
void uart_write_wakeup(struct uart_port *port);
+/**
+ * enum UART_TX_FLAGS -- flags for uart_port_tx_flags()
+ *
+ * @UART_TX_NOSTOP: don't call port->ops->stop_tx() on empty buffer
+ */
+enum UART_TX_FLAGS {
+ UART_TX_NOSTOP = BIT(0),
+};
+
+#define __uart_port_tx(uport, ch, flags, tx_ready, put_char, tx_done, \
+ for_test, for_post) \
+({ \
+ struct uart_port *__port = (uport); \
+ struct tty_port *__tport = &__port->state->port; \
+ unsigned int pending; \
+ \
+ for (; (for_test) && (tx_ready); (for_post), __port->icount.tx++) { \
+ if (__port->x_char) { \
+ (ch) = __port->x_char; \
+ (put_char); \
+ __port->x_char = 0; \
+ continue; \
+ } \
+ \
+ if (uart_tx_stopped(__port)) \
+ break; \
+ \
+ if (!kfifo_get(&__tport->xmit_fifo, &(ch))) \
+ break; \
+ \
+ (put_char); \
+ } \
+ \
+ (tx_done); \
+ \
+ pending = kfifo_len(&__tport->xmit_fifo); \
+ if (pending < WAKEUP_CHARS) { \
+ uart_write_wakeup(__port); \
+ \
+ if (!((flags) & UART_TX_NOSTOP) && pending == 0) \
+ __port->ops->stop_tx(__port); \
+ } \
+ \
+ pending; \
+})
+
+/**
+ * uart_port_tx_limited -- transmit helper for uart_port with count limiting
+ * @port: uart port
+ * @ch: variable to store a character to be written to the HW
+ * @count: a limit of characters to send
+ * @tx_ready: can HW accept more data function
+ * @put_char: function to write a character
+ * @tx_done: function to call after the loop is done
+ *
+ * This helper transmits characters from the xmit buffer to the hardware using
+ * @put_char(). It does so until @count characters are sent and while @tx_ready
+ * evaluates to true.
+ *
+ * Returns: the number of characters in the xmit buffer when done.
+ *
+ * The expression in macro parameters shall be designed as follows:
+ * * **tx_ready:** should evaluate to true if the HW can accept more data to
+ * be sent. This parameter can be %true, which means the HW is always ready.
+ * * **put_char:** shall write @ch to the device of @port.
+ * * **tx_done:** when the write loop is done, this can perform arbitrary
+ * action before potential invocation of ops->stop_tx() happens. If the
+ * driver does not need to do anything, use e.g. ({}).
+ *
+ * For all of them, @port->lock is held, interrupts are locally disabled and
+ * the expressions must not sleep.
+ */
+#define uart_port_tx_limited(port, ch, count, tx_ready, put_char, tx_done) ({ \
+ unsigned int __count = (count); \
+ __uart_port_tx(port, ch, 0, tx_ready, put_char, tx_done, __count, \
+ __count--); \
+})
+
+/**
+ * uart_port_tx_limited_flags -- transmit helper for uart_port with count limiting with flags
+ * @port: uart port
+ * @ch: variable to store a character to be written to the HW
+ * @flags: %UART_TX_NOSTOP or similar
+ * @count: a limit of characters to send
+ * @tx_ready: can HW accept more data function
+ * @put_char: function to write a character
+ * @tx_done: function to call after the loop is done
+ *
+ * See uart_port_tx_limited() for more details.
+ */
+#define uart_port_tx_limited_flags(port, ch, flags, count, tx_ready, put_char, tx_done) ({ \
+ unsigned int __count = (count); \
+ __uart_port_tx(port, ch, flags, tx_ready, put_char, tx_done, __count, \
+ __count--); \
+})
+
+/**
+ * uart_port_tx -- transmit helper for uart_port
+ * @port: uart port
+ * @ch: variable to store a character to be written to the HW
+ * @tx_ready: can HW accept more data function
+ * @put_char: function to write a character
+ *
+ * See uart_port_tx_limited() for more details.
+ */
+#define uart_port_tx(port, ch, tx_ready, put_char) \
+ __uart_port_tx(port, ch, 0, tx_ready, put_char, ({}), true, ({}))
+
+
+/**
+ * uart_port_tx_flags -- transmit helper for uart_port with flags
+ * @port: uart port
+ * @ch: variable to store a character to be written to the HW
+ * @flags: %UART_TX_NOSTOP or similar
+ * @tx_ready: can HW accept more data function
+ * @put_char: function to write a character
+ *
+ * See uart_port_tx_limited() for more details.
+ */
+#define uart_port_tx_flags(port, ch, flags, tx_ready, put_char) \
+ __uart_port_tx(port, ch, flags, tx_ready, put_char, ({}), true, ({}))
/*
* Baud rate helpers.
*/
void uart_update_timeout(struct uart_port *port, unsigned int cflag,
unsigned int baud);
unsigned int uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old, unsigned int min,
+ const struct ktermios *old, unsigned int min,
unsigned int max);
unsigned int uart_get_divisor(struct uart_port *port, unsigned int baud);
+/*
+ * Calculates FIFO drain time.
+ */
+static inline unsigned long uart_fifo_timeout(struct uart_port *port)
+{
+ u64 fifo_timeout = (u64)READ_ONCE(port->frame_time) * port->fifosize;
+
+ /* Add .02 seconds of slop */
+ fifo_timeout += 20 * NSEC_PER_MSEC;
+
+ return max(nsecs_to_jiffies(fifo_timeout), 1UL);
+}
+
/* Base timer interval for polling */
-static inline int uart_poll_timeout(struct uart_port *port)
+static inline unsigned long uart_poll_timeout(struct uart_port *port)
{
- int timeout = port->timeout;
+ unsigned long timeout = uart_fifo_timeout(port);
return timeout > 6 ? (timeout / 2 - 2) : 1;
}
@@ -346,7 +1062,7 @@ static inline int uart_poll_timeout(struct uart_port *port)
struct earlycon_device {
struct console *con;
struct uart_port port;
- char options[16]; /* e.g., 115200n8 */
+ char options[32]; /* e.g., 115200n8 */
unsigned int baud;
};
@@ -376,9 +1092,8 @@ extern const struct earlycon_id __earlycon_table_end[];
#define EARLYCON_DECLARE(_name, fn) OF_EARLYCON_DECLARE(_name, "", fn)
-extern int of_setup_earlycon(const struct earlycon_id *match,
- unsigned long node,
- const char *options);
+int of_setup_earlycon(const struct earlycon_id *match, unsigned long node,
+ const char *options);
#ifdef CONFIG_SERIAL_EARLYCON
extern bool earlycon_acpi_spcr_enable __initdata;
@@ -388,10 +1103,19 @@ static const bool earlycon_acpi_spcr_enable EARLYCON_USED_OR_UNUSED;
static inline int setup_earlycon(char *buf) { return 0; }
#endif
-struct uart_port *uart_get_console(struct uart_port *ports, int nr,
- struct console *c);
-int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr,
- char **options);
+/* Variant of uart_console_registered() when the console_list_lock is held. */
+static inline bool uart_console_registered_locked(struct uart_port *port)
+{
+ return uart_console(port) && console_is_registered_locked(port->cons);
+}
+
+static inline bool uart_console_registered(struct uart_port *port)
+{
+ return uart_console(port) && console_is_registered(port->cons);
+}
+
+int uart_parse_earlycon(char *p, enum uart_iotype *iotype,
+ resource_size_t *addr, char **options);
void uart_parse_options(const char *options, int *baud, int *parity, int *bits,
int *flow);
int uart_set_options(struct uart_port *port, struct console *co, int baud,
@@ -399,7 +1123,7 @@ int uart_set_options(struct uart_port *port, struct console *co, int baud,
struct tty_driver *uart_console_device(struct console *co, int *index);
void uart_console_write(struct uart_port *port, const char *s,
unsigned int count,
- void (*putchar)(struct uart_port *, int));
+ void (*putchar)(struct uart_port *, unsigned char));
/*
* Port/driver registration/removal
@@ -407,8 +1131,11 @@ void uart_console_write(struct uart_port *port, const char *s,
int uart_register_driver(struct uart_driver *uart);
void uart_unregister_driver(struct uart_driver *uart);
int uart_add_one_port(struct uart_driver *reg, struct uart_port *port);
-int uart_remove_one_port(struct uart_driver *reg, struct uart_port *port);
-int uart_match_port(struct uart_port *port1, struct uart_port *port2);
+void uart_remove_one_port(struct uart_driver *reg, struct uart_port *port);
+int uart_read_port_properties(struct uart_port *port);
+int uart_read_and_validate_port_properties(struct uart_port *port);
+bool uart_match_port(const struct uart_port *port1,
+ const struct uart_port *port2);
/*
* Power Management
@@ -416,19 +1143,10 @@ int uart_match_port(struct uart_port *port1, struct uart_port *port2);
int uart_suspend_port(struct uart_driver *reg, struct uart_port *port);
int uart_resume_port(struct uart_driver *reg, struct uart_port *port);
-#define uart_circ_empty(circ) ((circ)->head == (circ)->tail)
-#define uart_circ_clear(circ) ((circ)->head = (circ)->tail = 0)
-
-#define uart_circ_chars_pending(circ) \
- (CIRC_CNT((circ)->head, (circ)->tail, UART_XMIT_SIZE))
-
-#define uart_circ_chars_free(circ) \
- (CIRC_SPACE((circ)->head, (circ)->tail, UART_XMIT_SIZE))
-
static inline int uart_tx_stopped(struct uart_port *port)
{
struct tty_struct *tty = port->state->port.tty;
- if ((tty && tty->stopped) || port->hw_stopped)
+ if ((tty && tty->flow.stopped) || port->hw_stopped)
return 1;
return 0;
}
@@ -449,20 +1167,20 @@ static inline bool uart_softcts_mode(struct uart_port *uport)
* The following are helper functions for the low level drivers.
*/
-extern void uart_handle_dcd_change(struct uart_port *uport,
- unsigned int status);
-extern void uart_handle_cts_change(struct uart_port *uport,
- unsigned int status);
+void uart_handle_dcd_change(struct uart_port *uport, bool active);
+void uart_handle_cts_change(struct uart_port *uport, bool active);
+
+void uart_insert_char(struct uart_port *port, unsigned int status,
+ unsigned int overrun, u8 ch, u8 flag);
-extern void uart_insert_char(struct uart_port *port, unsigned int status,
- unsigned int overrun, unsigned int ch, unsigned int flag);
+void uart_xchar_out(struct uart_port *uport, int offset);
#ifdef CONFIG_MAGIC_SYSRQ_SERIAL
#define SYSRQ_TIMEOUT (HZ * 5)
-bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch);
+bool uart_try_toggle_sysrq(struct uart_port *port, u8 ch);
-static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
+static inline int uart_handle_sysrq_char(struct uart_port *port, u8 ch)
{
if (!port->sysrq)
return 0;
@@ -481,7 +1199,7 @@ static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch
return 0;
}
-static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch)
+static inline int uart_prepare_sysrq_char(struct uart_port *port, u8 ch)
{
if (!port->sysrq)
return 0;
@@ -502,33 +1220,57 @@ static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int c
static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
{
- int sysrq_ch;
+ u8 sysrq_ch;
+
+ if (!port->has_sysrq) {
+ uart_port_unlock(port);
+ return;
+ }
+
+ sysrq_ch = port->sysrq_ch;
+ port->sysrq_ch = 0;
+
+ uart_port_unlock(port);
+
+ if (sysrq_ch)
+ handle_sysrq(sysrq_ch);
+}
+
+static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port,
+ unsigned long flags)
+{
+ u8 sysrq_ch;
if (!port->has_sysrq) {
- spin_unlock(&port->lock);
+ uart_port_unlock_irqrestore(port, flags);
return;
}
sysrq_ch = port->sysrq_ch;
port->sysrq_ch = 0;
- spin_unlock(&port->lock);
+ uart_port_unlock_irqrestore(port, flags);
if (sysrq_ch)
handle_sysrq(sysrq_ch);
}
#else /* CONFIG_MAGIC_SYSRQ_SERIAL */
-static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
+static inline int uart_handle_sysrq_char(struct uart_port *port, u8 ch)
{
return 0;
}
-static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch)
+static inline int uart_prepare_sysrq_char(struct uart_port *port, u8 ch)
{
return 0;
}
static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
{
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
+}
+static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port,
+ unsigned long flags)
+{
+ uart_port_unlock_irqrestore(port, flags);
}
#endif /* CONFIG_MAGIC_SYSRQ_SERIAL */
diff --git a/include/linux/serial_max3100.h b/include/linux/serial_max3100.h
deleted file mode 100644
index befd55c08a7c..000000000000
--- a/include/linux/serial_max3100.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- *
- * Copyright (C) 2007 Christian Pellegrin
- */
-
-
-#ifndef _LINUX_SERIAL_MAX3100_H
-#define _LINUX_SERIAL_MAX3100_H 1
-
-
-/**
- * struct plat_max3100 - MAX3100 SPI UART platform data
- * @loopback: force MAX3100 in loopback
- * @crystal: 1 for 3.6864 Mhz, 0 for 1.8432
- * @max3100_hw_suspend: MAX3100 has a shutdown pin. This is a hook
- * called on suspend and resume to activate it.
- * @poll_time: poll time for CTS signal in ms, 0 disables (so no hw
- * flow ctrl is possible but you have less CPU usage)
- *
- * You should use this structure in your machine description to specify
- * how the MAX3100 is connected. Example:
- *
- * static struct plat_max3100 max3100_plat_data = {
- * .loopback = 0,
- * .crystal = 0,
- * .poll_time = 100,
- * };
- *
- * static struct spi_board_info spi_board_info[] = {
- * {
- * .modalias = "max3100",
- * .platform_data = &max3100_plat_data,
- * .irq = IRQ_EINT12,
- * .max_speed_hz = 5*1000*1000,
- * .chip_select = 0,
- * },
- * };
- *
- **/
-struct plat_max3100 {
- int loopback;
- int crystal;
- void (*max3100_hw_suspend) (int suspend);
- int poll_time;
-};
-
-#endif
diff --git a/include/linux/serial_s3c.h b/include/linux/serial_s3c.h
index f6c3323fc4c5..102aa33d956c 100644
--- a/include/linux/serial_s3c.h
+++ b/include/linux/serial_s3c.h
@@ -83,7 +83,7 @@
#define S3C2410_UCON_RXIRQMODE (1<<0)
#define S3C2410_UCON_RXFIFO_TOI (1<<7)
#define S3C2443_UCON_RXERR_IRQEN (1<<6)
-#define S3C2443_UCON_LOOPBACK (1<<5)
+#define S3C2410_UCON_LOOPBACK (1<<5)
#define S3C2410_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
S3C2410_UCON_RXILEVEL | \
@@ -246,21 +246,28 @@
S5PV210_UFCON_TXTRIG4 | \
S5PV210_UFCON_RXTRIG4)
-#define APPLE_S5L_UCON_RXTO_ENA 9
-#define APPLE_S5L_UCON_RXTHRESH_ENA 12
-#define APPLE_S5L_UCON_TXTHRESH_ENA 13
-#define APPLE_S5L_UCON_RXTO_ENA_MSK (1 << APPLE_S5L_UCON_RXTO_ENA)
-#define APPLE_S5L_UCON_RXTHRESH_ENA_MSK (1 << APPLE_S5L_UCON_RXTHRESH_ENA)
-#define APPLE_S5L_UCON_TXTHRESH_ENA_MSK (1 << APPLE_S5L_UCON_TXTHRESH_ENA)
+#define APPLE_S5L_UCON_RXTO_ENA 9
+#define APPLE_S5L_UCON_RXTO_LEGACY_ENA 11
+#define APPLE_S5L_UCON_RXTHRESH_ENA 12
+#define APPLE_S5L_UCON_TXTHRESH_ENA 13
+#define APPLE_S5L_UCON_RXTO_ENA_MSK BIT(APPLE_S5L_UCON_RXTO_ENA)
+#define APPLE_S5L_UCON_RXTO_LEGACY_ENA_MSK BIT(APPLE_S5L_UCON_RXTO_LEGACY_ENA)
+#define APPLE_S5L_UCON_RXTHRESH_ENA_MSK BIT(APPLE_S5L_UCON_RXTHRESH_ENA)
+#define APPLE_S5L_UCON_TXTHRESH_ENA_MSK BIT(APPLE_S5L_UCON_TXTHRESH_ENA)
#define APPLE_S5L_UCON_DEFAULT (S3C2410_UCON_TXIRQMODE | \
S3C2410_UCON_RXIRQMODE | \
S3C2410_UCON_RXFIFO_TOI)
-
-#define APPLE_S5L_UTRSTAT_RXTHRESH (1<<4)
-#define APPLE_S5L_UTRSTAT_TXTHRESH (1<<5)
-#define APPLE_S5L_UTRSTAT_RXTO (1<<9)
-#define APPLE_S5L_UTRSTAT_ALL_FLAGS (0x3f0)
+#define APPLE_S5L_UCON_MASK (APPLE_S5L_UCON_RXTO_ENA_MSK | \
+ APPLE_S5L_UCON_RXTO_LEGACY_ENA_MSK | \
+ APPLE_S5L_UCON_RXTHRESH_ENA_MSK | \
+ APPLE_S5L_UCON_TXTHRESH_ENA_MSK)
+
+#define APPLE_S5L_UTRSTAT_RXTO_LEGACY BIT(3)
+#define APPLE_S5L_UTRSTAT_RXTHRESH BIT(4)
+#define APPLE_S5L_UTRSTAT_TXTHRESH BIT(5)
+#define APPLE_S5L_UTRSTAT_RXTO BIT(9)
+#define APPLE_S5L_UTRSTAT_ALL_FLAGS GENMASK(9, 3)
#ifndef __ASSEMBLY__
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h
index 1c89611e0e06..0f2f50b8d28e 100644
--- a/include/linux/serial_sci.h
+++ b/include/linux/serial_sci.h
@@ -37,6 +37,7 @@ enum {
SCIx_SH7705_SCIF_REGTYPE,
SCIx_HSCIF_REGTYPE,
SCIx_RZ_SCIFA_REGTYPE,
+ SCIx_RZV2H_SCIF_REGTYPE,
SCIx_NR_REGTYPES,
};
diff --git a/include/linux/serio.h b/include/linux/serio.h
index 6c27d413da92..69a47674af65 100644
--- a/include/linux/serio.h
+++ b/include/linux/serio.h
@@ -6,6 +6,7 @@
#define _SERIO_H
+#include <linux/cleanup.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/list.h>
@@ -15,7 +16,7 @@
#include <linux/mod_devicetable.h>
#include <uapi/linux/serio.h>
-extern struct bus_type serio_bus;
+extern const struct bus_type serio_bus;
struct serio {
void *port_data;
@@ -80,7 +81,7 @@ struct serio_driver {
struct device_driver driver;
};
-#define to_serio_driver(d) container_of(d, struct serio_driver, driver)
+#define to_serio_driver(d) container_of_const(d, struct serio_driver, driver)
int serio_open(struct serio *serio, struct serio_driver *drv);
void serio_close(struct serio *serio);
@@ -161,4 +162,6 @@ static inline void serio_continue_rx(struct serio *serio)
spin_unlock_irq(&serio->lock);
}
+DEFINE_GUARD(serio_pause_rx, struct serio *, serio_pause_rx(_T), serio_continue_rx(_T))
+
#endif
diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h
index fe1aa4e54680..3030d9245f5a 100644
--- a/include/linux/set_memory.h
+++ b/include/linux/set_memory.h
@@ -8,10 +8,20 @@
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
#include <asm/set_memory.h>
#else
-static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
-static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
-static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
-static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
+static inline int __must_check set_memory_ro(unsigned long addr, int numpages) { return 0; }
+static inline int __must_check set_memory_rw(unsigned long addr, int numpages) { return 0; }
+static inline int __must_check set_memory_x(unsigned long addr, int numpages) { return 0; }
+static inline int __must_check set_memory_nx(unsigned long addr, int numpages) { return 0; }
+#endif
+
+#ifndef set_memory_rox
+static inline int set_memory_rox(unsigned long addr, int numpages)
+{
+ int ret = set_memory_ro(addr, numpages);
+ if (ret)
+ return ret;
+ return set_memory_x(addr, numpages);
+}
#endif
#ifndef CONFIG_ARCH_HAS_SET_DIRECT_MAP
@@ -24,20 +34,38 @@ static inline int set_direct_map_default_noflush(struct page *page)
return 0;
}
+static inline int set_direct_map_valid_noflush(struct page *page,
+ unsigned nr, bool valid)
+{
+ return 0;
+}
+
static inline bool kernel_page_present(struct page *page)
{
return true;
}
+#else /* CONFIG_ARCH_HAS_SET_DIRECT_MAP */
+/*
+ * Some architectures, e.g. ARM64 can disable direct map modifications at
+ * boot time. Let them overrive this query.
+ */
+#ifndef can_set_direct_map
+static inline bool can_set_direct_map(void)
+{
+ return true;
+}
+#define can_set_direct_map can_set_direct_map
#endif
+#endif /* CONFIG_ARCH_HAS_SET_DIRECT_MAP */
-#ifndef set_mce_nospec
-static inline int set_mce_nospec(unsigned long pfn, bool unmap)
+#ifdef CONFIG_X86_64
+int set_mce_nospec(unsigned long pfn);
+int clear_mce_nospec(unsigned long pfn);
+#else
+static inline int set_mce_nospec(unsigned long pfn)
{
return 0;
}
-#endif
-
-#ifndef clear_mce_nospec
static inline int clear_mce_nospec(unsigned long pfn)
{
return 0;
diff --git a/include/linux/sfp.h b/include/linux/sfp.h
index 302094b855fb..5c71945a5e4d 100644
--- a/include/linux/sfp.h
+++ b/include/linux/sfp.h
@@ -284,6 +284,12 @@ enum {
SFF8024_ID_QSFP_8438 = 0x0c,
SFF8024_ID_QSFP_8436_8636 = 0x0d,
SFF8024_ID_QSFP28_8636 = 0x11,
+ SFF8024_ID_QSFP_DD = 0x18,
+ SFF8024_ID_OSFP = 0x19,
+ SFF8024_ID_DSFP = 0x1B,
+ SFF8024_ID_QSFP_PLUS_CMIS = 0x1E,
+ SFF8024_ID_SFP_DD_CMIS = 0x1F,
+ SFF8024_ID_SFP_PLUS_CMIS = 0x20,
SFF8024_ENCODING_UNSPEC = 0x00,
SFF8024_ENCODING_8B10B = 0x01,
@@ -332,39 +338,39 @@ enum {
/* SFP EEPROM registers */
enum {
- SFP_PHYS_ID = 0x00,
- SFP_PHYS_EXT_ID = 0x01,
- SFP_CONNECTOR = 0x02,
- SFP_COMPLIANCE = 0x03,
- SFP_ENCODING = 0x0b,
- SFP_BR_NOMINAL = 0x0c,
- SFP_RATE_ID = 0x0d,
- SFP_LINK_LEN_SM_KM = 0x0e,
- SFP_LINK_LEN_SM_100M = 0x0f,
- SFP_LINK_LEN_50UM_OM2_10M = 0x10,
- SFP_LINK_LEN_62_5UM_OM1_10M = 0x11,
- SFP_LINK_LEN_COPPER_1M = 0x12,
- SFP_LINK_LEN_50UM_OM4_10M = 0x12,
- SFP_LINK_LEN_50UM_OM3_10M = 0x13,
- SFP_VENDOR_NAME = 0x14,
- SFP_VENDOR_OUI = 0x25,
- SFP_VENDOR_PN = 0x28,
- SFP_VENDOR_REV = 0x38,
- SFP_OPTICAL_WAVELENGTH_MSB = 0x3c,
- SFP_OPTICAL_WAVELENGTH_LSB = 0x3d,
- SFP_CABLE_SPEC = 0x3c,
- SFP_CC_BASE = 0x3f,
- SFP_OPTIONS = 0x40, /* 2 bytes, MSB, LSB */
- SFP_BR_MAX = 0x42,
- SFP_BR_MIN = 0x43,
- SFP_VENDOR_SN = 0x44,
- SFP_DATECODE = 0x54,
- SFP_DIAGMON = 0x5c,
- SFP_ENHOPTS = 0x5d,
- SFP_SFF8472_COMPLIANCE = 0x5e,
- SFP_CC_EXT = 0x5f,
+ SFP_PHYS_ID = 0,
+ SFP_PHYS_EXT_ID = 1,
SFP_PHYS_EXT_ID_SFP = 0x04,
+
+ SFP_CONNECTOR = 2,
+ SFP_COMPLIANCE = 3,
+ SFP_ENCODING = 11,
+ SFP_BR_NOMINAL = 12,
+ SFP_RATE_ID = 13,
+ SFF_RID_8079 = 0x01,
+ SFF_RID_8431_RX_ONLY = 0x02,
+ SFF_RID_8431_TX_ONLY = 0x04,
+ SFF_RID_8431 = 0x06,
+ SFF_RID_10G8G = 0x0e,
+
+ SFP_LINK_LEN_SM_KM = 14,
+ SFP_LINK_LEN_SM_100M = 15,
+ SFP_LINK_LEN_50UM_OM2_10M = 16,
+ SFP_LINK_LEN_62_5UM_OM1_10M = 17,
+ SFP_LINK_LEN_COPPER_1M = 18,
+ SFP_LINK_LEN_50UM_OM4_10M = 18,
+ SFP_LINK_LEN_50UM_OM3_10M = 19,
+ SFP_VENDOR_NAME = 20,
+ SFP_VENDOR_OUI = 37,
+ SFP_VENDOR_PN = 40,
+ SFP_VENDOR_REV = 56,
+ SFP_OPTICAL_WAVELENGTH_MSB = 60,
+ SFP_OPTICAL_WAVELENGTH_LSB = 61,
+ SFP_CABLE_SPEC = 60,
+ SFP_CC_BASE = 63,
+
+ SFP_OPTIONS = 64, /* 2 bytes, MSB, LSB */
SFP_OPTIONS_HIGH_POWER_LEVEL = BIT(13),
SFP_OPTIONS_PAGING_A2 = BIT(12),
SFP_OPTIONS_RETIMER = BIT(11),
@@ -378,11 +384,20 @@ enum {
SFP_OPTIONS_TX_FAULT = BIT(3),
SFP_OPTIONS_LOS_INVERTED = BIT(2),
SFP_OPTIONS_LOS_NORMAL = BIT(1),
+
+ SFP_BR_MAX = 66,
+ SFP_BR_MIN = 67,
+ SFP_VENDOR_SN = 68,
+ SFP_DATECODE = 84,
+
+ SFP_DIAGMON = 92,
SFP_DIAGMON_DDM = BIT(6),
SFP_DIAGMON_INT_CAL = BIT(5),
SFP_DIAGMON_EXT_CAL = BIT(4),
SFP_DIAGMON_RXPWR_AVG = BIT(3),
SFP_DIAGMON_ADDRMODE = BIT(2),
+
+ SFP_ENHOPTS = 93,
SFP_ENHOPTS_ALARMWARN = BIT(7),
SFP_ENHOPTS_SOFT_TX_DISABLE = BIT(6),
SFP_ENHOPTS_SOFT_TX_FAULT = BIT(5),
@@ -390,6 +405,8 @@ enum {
SFP_ENHOPTS_SOFT_RATE_SELECT = BIT(3),
SFP_ENHOPTS_APP_SELECT_SFF8079 = BIT(2),
SFP_ENHOPTS_SOFT_RATE_SFF8431 = BIT(1),
+
+ SFP_SFF8472_COMPLIANCE = 94,
SFP_SFF8472_COMPLIANCE_NONE = 0x00,
SFP_SFF8472_COMPLIANCE_REV9_3 = 0x01,
SFP_SFF8472_COMPLIANCE_REV9_5 = 0x02,
@@ -399,68 +416,71 @@ enum {
SFP_SFF8472_COMPLIANCE_REV11_3 = 0x06,
SFP_SFF8472_COMPLIANCE_REV11_4 = 0x07,
SFP_SFF8472_COMPLIANCE_REV12_0 = 0x08,
+
+ SFP_CC_EXT = 95,
};
/* SFP Diagnostics */
enum {
/* Alarm and warnings stored MSB at lower address then LSB */
- SFP_TEMP_HIGH_ALARM = 0x00,
- SFP_TEMP_LOW_ALARM = 0x02,
- SFP_TEMP_HIGH_WARN = 0x04,
- SFP_TEMP_LOW_WARN = 0x06,
- SFP_VOLT_HIGH_ALARM = 0x08,
- SFP_VOLT_LOW_ALARM = 0x0a,
- SFP_VOLT_HIGH_WARN = 0x0c,
- SFP_VOLT_LOW_WARN = 0x0e,
- SFP_BIAS_HIGH_ALARM = 0x10,
- SFP_BIAS_LOW_ALARM = 0x12,
- SFP_BIAS_HIGH_WARN = 0x14,
- SFP_BIAS_LOW_WARN = 0x16,
- SFP_TXPWR_HIGH_ALARM = 0x18,
- SFP_TXPWR_LOW_ALARM = 0x1a,
- SFP_TXPWR_HIGH_WARN = 0x1c,
- SFP_TXPWR_LOW_WARN = 0x1e,
- SFP_RXPWR_HIGH_ALARM = 0x20,
- SFP_RXPWR_LOW_ALARM = 0x22,
- SFP_RXPWR_HIGH_WARN = 0x24,
- SFP_RXPWR_LOW_WARN = 0x26,
- SFP_LASER_TEMP_HIGH_ALARM = 0x28,
- SFP_LASER_TEMP_LOW_ALARM = 0x2a,
- SFP_LASER_TEMP_HIGH_WARN = 0x2c,
- SFP_LASER_TEMP_LOW_WARN = 0x2e,
- SFP_TEC_CUR_HIGH_ALARM = 0x30,
- SFP_TEC_CUR_LOW_ALARM = 0x32,
- SFP_TEC_CUR_HIGH_WARN = 0x34,
- SFP_TEC_CUR_LOW_WARN = 0x36,
- SFP_CAL_RXPWR4 = 0x38,
- SFP_CAL_RXPWR3 = 0x3c,
- SFP_CAL_RXPWR2 = 0x40,
- SFP_CAL_RXPWR1 = 0x44,
- SFP_CAL_RXPWR0 = 0x48,
- SFP_CAL_TXI_SLOPE = 0x4c,
- SFP_CAL_TXI_OFFSET = 0x4e,
- SFP_CAL_TXPWR_SLOPE = 0x50,
- SFP_CAL_TXPWR_OFFSET = 0x52,
- SFP_CAL_T_SLOPE = 0x54,
- SFP_CAL_T_OFFSET = 0x56,
- SFP_CAL_V_SLOPE = 0x58,
- SFP_CAL_V_OFFSET = 0x5a,
- SFP_CHKSUM = 0x5f,
-
- SFP_TEMP = 0x60,
- SFP_VCC = 0x62,
- SFP_TX_BIAS = 0x64,
- SFP_TX_POWER = 0x66,
- SFP_RX_POWER = 0x68,
- SFP_LASER_TEMP = 0x6a,
- SFP_TEC_CUR = 0x6c,
-
- SFP_STATUS = 0x6e,
+ SFP_TEMP_HIGH_ALARM = 0,
+ SFP_TEMP_LOW_ALARM = 2,
+ SFP_TEMP_HIGH_WARN = 4,
+ SFP_TEMP_LOW_WARN = 6,
+ SFP_VOLT_HIGH_ALARM = 8,
+ SFP_VOLT_LOW_ALARM = 10,
+ SFP_VOLT_HIGH_WARN = 12,
+ SFP_VOLT_LOW_WARN = 14,
+ SFP_BIAS_HIGH_ALARM = 16,
+ SFP_BIAS_LOW_ALARM = 18,
+ SFP_BIAS_HIGH_WARN = 20,
+ SFP_BIAS_LOW_WARN = 22,
+ SFP_TXPWR_HIGH_ALARM = 24,
+ SFP_TXPWR_LOW_ALARM = 26,
+ SFP_TXPWR_HIGH_WARN = 28,
+ SFP_TXPWR_LOW_WARN = 30,
+ SFP_RXPWR_HIGH_ALARM = 32,
+ SFP_RXPWR_LOW_ALARM = 34,
+ SFP_RXPWR_HIGH_WARN = 36,
+ SFP_RXPWR_LOW_WARN = 38,
+ SFP_LASER_TEMP_HIGH_ALARM = 40,
+ SFP_LASER_TEMP_LOW_ALARM = 42,
+ SFP_LASER_TEMP_HIGH_WARN = 44,
+ SFP_LASER_TEMP_LOW_WARN = 46,
+ SFP_TEC_CUR_HIGH_ALARM = 48,
+ SFP_TEC_CUR_LOW_ALARM = 50,
+ SFP_TEC_CUR_HIGH_WARN = 52,
+ SFP_TEC_CUR_LOW_WARN = 54,
+ SFP_CAL_RXPWR4 = 56,
+ SFP_CAL_RXPWR3 = 60,
+ SFP_CAL_RXPWR2 = 64,
+ SFP_CAL_RXPWR1 = 68,
+ SFP_CAL_RXPWR0 = 72,
+ SFP_CAL_TXI_SLOPE = 76,
+ SFP_CAL_TXI_OFFSET = 78,
+ SFP_CAL_TXPWR_SLOPE = 80,
+ SFP_CAL_TXPWR_OFFSET = 82,
+ SFP_CAL_T_SLOPE = 84,
+ SFP_CAL_T_OFFSET = 86,
+ SFP_CAL_V_SLOPE = 88,
+ SFP_CAL_V_OFFSET = 90,
+ SFP_CHKSUM = 95,
+
+ SFP_TEMP = 96,
+ SFP_VCC = 98,
+ SFP_TX_BIAS = 100,
+ SFP_TX_POWER = 102,
+ SFP_RX_POWER = 104,
+ SFP_LASER_TEMP = 106,
+ SFP_TEC_CUR = 108,
+
+ SFP_STATUS = 110,
SFP_STATUS_TX_DISABLE = BIT(7),
SFP_STATUS_TX_DISABLE_FORCE = BIT(6),
+ SFP_STATUS_RS0_SELECT = BIT(3),
SFP_STATUS_TX_FAULT = BIT(2),
SFP_STATUS_RX_LOS = BIT(1),
- SFP_ALARM0 = 0x70,
+ SFP_ALARM0 = 112,
SFP_ALARM0_TEMP_HIGH = BIT(7),
SFP_ALARM0_TEMP_LOW = BIT(6),
SFP_ALARM0_VCC_HIGH = BIT(5),
@@ -470,11 +490,11 @@ enum {
SFP_ALARM0_TXPWR_HIGH = BIT(1),
SFP_ALARM0_TXPWR_LOW = BIT(0),
- SFP_ALARM1 = 0x71,
+ SFP_ALARM1 = 113,
SFP_ALARM1_RXPWR_HIGH = BIT(7),
SFP_ALARM1_RXPWR_LOW = BIT(6),
- SFP_WARN0 = 0x74,
+ SFP_WARN0 = 116,
SFP_WARN0_TEMP_HIGH = BIT(7),
SFP_WARN0_TEMP_LOW = BIT(6),
SFP_WARN0_VCC_HIGH = BIT(5),
@@ -484,13 +504,16 @@ enum {
SFP_WARN0_TXPWR_HIGH = BIT(1),
SFP_WARN0_TXPWR_LOW = BIT(0),
- SFP_WARN1 = 0x75,
+ SFP_WARN1 = 117,
SFP_WARN1_RXPWR_HIGH = BIT(7),
SFP_WARN1_RXPWR_LOW = BIT(6),
- SFP_EXT_STATUS = 0x76,
- SFP_VSL = 0x78,
- SFP_PAGE = 0x7f,
+ SFP_EXT_STATUS = 118,
+ SFP_EXT_STATUS_RS1_SELECT = BIT(3),
+ SFP_EXT_STATUS_PWRLVL_SELECT = BIT(0),
+
+ SFP_VSL = 120,
+ SFP_PAGE = 127,
};
struct fwnode_handle;
@@ -499,6 +522,28 @@ struct ethtool_modinfo;
struct sfp_bus;
/**
+ * struct sfp_module_caps - sfp module capabilities
+ * @interfaces: bitmap of interfaces that the module may support
+ * @link_modes: bitmap of ethtool link modes that the module may support
+ */
+struct sfp_module_caps {
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(link_modes);
+ /**
+ * @may_have_phy: indicate whether the module may have an ethernet PHY
+ * There is no way to be sure that a module has a PHY as the EEPROM
+ * doesn't contain this information. When set, this does not mean that
+ * the module definitely has a PHY.
+ */
+ bool may_have_phy;
+ /**
+ * @port: one of ethtool %PORT_* definitions, parsed from the module
+ * EEPROM, or %PORT_OTHER if the port type is not known.
+ */
+ u8 port;
+};
+
+/**
* struct sfp_upstream_ops - upstream operations structure
* @attach: called when the sfp socket driver is bound to the upstream
* (mandatory).
@@ -527,17 +572,13 @@ struct sfp_upstream_ops {
void (*link_down)(void *priv);
void (*link_up)(void *priv);
int (*connect_phy)(void *priv, struct phy_device *);
- void (*disconnect_phy)(void *priv);
+ void (*disconnect_phy)(void *priv, struct phy_device *);
};
#if IS_ENABLED(CONFIG_SFP)
-int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
- unsigned long *support);
-bool sfp_may_have_phy(struct sfp_bus *bus, const struct sfp_eeprom_id *id);
-void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
- unsigned long *support);
+const struct sfp_module_caps *sfp_get_module_caps(struct sfp_bus *bus);
phy_interface_t sfp_select_interface(struct sfp_bus *bus,
- unsigned long *link_modes);
+ const unsigned long *link_modes);
int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo);
int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee,
@@ -547,33 +588,22 @@ int sfp_get_module_eeprom_by_page(struct sfp_bus *bus,
struct netlink_ext_ack *extack);
void sfp_upstream_start(struct sfp_bus *bus);
void sfp_upstream_stop(struct sfp_bus *bus);
+void sfp_upstream_set_signal_rate(struct sfp_bus *bus, unsigned int rate_kbd);
void sfp_bus_put(struct sfp_bus *bus);
-struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle *fwnode);
+struct sfp_bus *sfp_bus_find_fwnode(const struct fwnode_handle *fwnode);
int sfp_bus_add_upstream(struct sfp_bus *bus, void *upstream,
const struct sfp_upstream_ops *ops);
void sfp_bus_del_upstream(struct sfp_bus *bus);
+const char *sfp_get_name(struct sfp_bus *bus);
#else
-static inline int sfp_parse_port(struct sfp_bus *bus,
- const struct sfp_eeprom_id *id,
- unsigned long *support)
-{
- return PORT_OTHER;
-}
-
-static inline bool sfp_may_have_phy(struct sfp_bus *bus,
- const struct sfp_eeprom_id *id)
-{
- return false;
-}
-
-static inline void sfp_parse_support(struct sfp_bus *bus,
- const struct sfp_eeprom_id *id,
- unsigned long *support)
+static inline const struct sfp_module_caps *
+sfp_get_module_caps(struct sfp_bus *bus)
{
+ return NULL;
}
static inline phy_interface_t sfp_select_interface(struct sfp_bus *bus,
- unsigned long *link_modes)
+ const unsigned long *link_modes)
{
return PHY_INTERFACE_MODE_NA;
}
@@ -605,11 +635,17 @@ static inline void sfp_upstream_stop(struct sfp_bus *bus)
{
}
+static inline void sfp_upstream_set_signal_rate(struct sfp_bus *bus,
+ unsigned int rate_kbd)
+{
+}
+
static inline void sfp_bus_put(struct sfp_bus *bus)
{
}
-static inline struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle *fwnode)
+static inline struct sfp_bus *
+sfp_bus_find_fwnode(const struct fwnode_handle *fwnode)
{
return NULL;
}
@@ -623,6 +659,11 @@ static inline int sfp_bus_add_upstream(struct sfp_bus *bus, void *upstream,
static inline void sfp_bus_del_upstream(struct sfp_bus *bus)
{
}
+
+static inline const char *sfp_get_name(struct sfp_bus *bus)
+{
+ return NULL;
+}
#endif
#endif
diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h
index c255273b0281..27ae79191bdc 100644
--- a/include/linux/sh_intc.h
+++ b/include/linux/sh_intc.h
@@ -13,9 +13,9 @@
/*
* Convert back and forth between INTEVT and IRQ values.
*/
-#ifdef CONFIG_CPU_HAS_INTEVT
-#define evt2irq(evt) (((evt) >> 5) - 16)
-#define irq2evt(irq) (((irq) + 16) << 5)
+#ifdef CONFIG_CPU_HAS_INTEVT /* Avoid IRQ0 (invalid for platform devices) */
+#define evt2irq(evt) ((evt) >> 5)
+#define irq2evt(irq) ((irq) << 5)
#else
#define evt2irq(evt) (evt)
#define irq2evt(irq) (irq)
@@ -97,7 +97,10 @@ struct intc_hw_desc {
unsigned int nr_subgroups;
};
-#define _INTC_ARRAY(a) a, __same_type(a, NULL) ? 0 : sizeof(a)/sizeof(*a)
+#define _INTC_SIZEOF_OR_ZERO(a) (_Generic(a, \
+ typeof(NULL): 0, \
+ default: sizeof(a)))
+#define _INTC_ARRAY(a) a, _INTC_SIZEOF_OR_ZERO(a)/sizeof(*a)
#define INTC_HW_DESC(vectors, groups, mask_regs, \
prio_regs, sense_regs, ack_regs) \
diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h
index 6dfd05ef5c2d..03ba4dab2ef7 100644
--- a/include/linux/shdma-base.h
+++ b/include/linux/shdma-base.h
@@ -96,7 +96,7 @@ struct shdma_ops {
int (*desc_setup)(struct shdma_chan *, struct shdma_desc *,
dma_addr_t, dma_addr_t, size_t *);
int (*set_slave)(struct shdma_chan *, int, dma_addr_t, bool);
- void (*setup_xfer)(struct shdma_chan *, int);
+ int (*setup_xfer)(struct shdma_chan *, int);
void (*start_xfer)(struct shdma_chan *, struct shdma_desc *);
struct shdma_desc *(*embedded_desc)(void *, int);
bool (*chan_irq)(struct shdma_chan *, int);
diff --git a/include/linux/shm.h b/include/linux/shm.h
index d8e69aed3d32..1d3d3ae958fb 100644
--- a/include/linux/shm.h
+++ b/include/linux/shm.h
@@ -2,12 +2,12 @@
#ifndef _LINUX_SHM_H_
#define _LINUX_SHM_H_
-#include <linux/list.h>
+#include <linux/types.h>
#include <asm/page.h>
-#include <uapi/linux/shm.h>
#include <asm/shmparam.h>
struct file;
+struct task_struct;
#ifdef CONFIG_SYSVIPC
struct sysv_shm {
@@ -16,7 +16,6 @@ struct sysv_shm {
long do_shmat(int shmid, char __user *shmaddr, int shmflg, unsigned long *addr,
unsigned long shmlba);
-bool is_file_shm_hugepages(struct file *file);
void exit_shm(struct task_struct *task);
#define shm_init_task(task) INIT_LIST_HEAD(&(task)->sysvshm.shm_clist)
#else
@@ -30,10 +29,6 @@ static inline long do_shmat(int shmid, char __user *shmaddr,
{
return -ENOSYS;
}
-static inline bool is_file_shm_hugepages(struct file *file)
-{
- return false;
-}
static inline void exit_shm(struct task_struct *task)
{
}
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index d82b6f396588..e2069b3179c4 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -9,40 +9,86 @@
#include <linux/percpu_counter.h>
#include <linux/xattr.h>
#include <linux/fs_parser.h>
+#include <linux/userfaultfd_k.h>
+#include <linux/bits.h>
+
+struct swap_iocb;
/* inode in-kernel data */
+#ifdef CONFIG_TMPFS_QUOTA
+#define SHMEM_MAXQUOTAS 2
+#endif
+
+/* Suppress pre-accounting of the entire object size. */
+#define SHMEM_F_NORESERVE BIT(0)
+/* Disallow swapping. */
+#define SHMEM_F_LOCKED BIT(1)
+/*
+ * Disallow growing, shrinking, or hole punching in the inode. Combined with
+ * folio pinning, makes sure the inode's mapping stays fixed.
+ *
+ * In some ways similar to F_SEAL_GROW | F_SEAL_SHRINK, but can be removed and
+ * isn't directly visible to userspace.
+ */
+#define SHMEM_F_MAPPING_FROZEN BIT(2)
+
struct shmem_inode_info {
spinlock_t lock;
unsigned int seals; /* shmem seals */
unsigned long flags;
unsigned long alloced; /* data pages alloced to file */
unsigned long swapped; /* subtotal assigned to swap */
- struct list_head shrinklist; /* shrinkable hpage inodes */
- struct list_head swaplist; /* chain of maybes on swap */
+ union {
+ struct offset_ctx dir_offsets; /* stable directory offsets */
+ struct {
+ struct list_head shrinklist; /* shrinkable hpage inodes */
+ struct list_head swaplist; /* chain of maybes on swap */
+ };
+ };
+ struct timespec64 i_crtime; /* file creation time */
struct shared_policy policy; /* NUMA memory alloc policy */
struct simple_xattrs xattrs; /* list of xattrs */
+ pgoff_t fallocend; /* highest fallocate endindex */
+ unsigned int fsflags; /* for FS_IOC_[SG]ETFLAGS */
atomic_t stop_eviction; /* hold when working on inode */
+#ifdef CONFIG_TMPFS_QUOTA
+ struct dquot __rcu *i_dquot[MAXQUOTAS];
+#endif
struct inode vfs_inode;
};
+#define SHMEM_FL_USER_VISIBLE (FS_FL_USER_VISIBLE | FS_CASEFOLD_FL)
+#define SHMEM_FL_USER_MODIFIABLE \
+ (FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL | FS_NOATIME_FL | FS_CASEFOLD_FL)
+#define SHMEM_FL_INHERITED (FS_NODUMP_FL | FS_NOATIME_FL | FS_CASEFOLD_FL)
+
+struct shmem_quota_limits {
+ qsize_t usrquota_bhardlimit; /* Default user quota block hard limit */
+ qsize_t usrquota_ihardlimit; /* Default user quota inode hard limit */
+ qsize_t grpquota_bhardlimit; /* Default group quota block hard limit */
+ qsize_t grpquota_ihardlimit; /* Default group quota inode hard limit */
+};
+
struct shmem_sb_info {
unsigned long max_blocks; /* How many blocks are allowed */
struct percpu_counter used_blocks; /* How many are allocated */
unsigned long max_inodes; /* How many inodes are allowed */
- unsigned long free_inodes; /* How many are left for allocation */
- spinlock_t stat_lock; /* Serialize shmem_sb_info changes */
+ unsigned long free_ispace; /* How much ispace left for allocation */
+ raw_spinlock_t stat_lock; /* Serialize shmem_sb_info changes */
umode_t mode; /* Mount mode for root directory */
unsigned char huge; /* Whether to try for hugepages */
kuid_t uid; /* Mount uid for root directory */
kgid_t gid; /* Mount gid for root directory */
bool full_inums; /* If i_ino should be uint or ino_t */
+ bool noswap; /* ignores VM reclaim / swap requests */
ino_t next_ino; /* The next per-sb inode number to use */
ino_t __percpu *ino_batch; /* The next per-cpu inode number to use */
struct mempolicy *mpol; /* default memory policy for mappings */
spinlock_t shrinklist_lock; /* Protects shrinklist */
struct list_head shrinklist; /* List of shinkable inodes */
unsigned long shrinklist_len; /* Length of shrinklist */
+ struct shmem_quota_limits qlimits; /* Default quota limits */
};
static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
@@ -54,7 +100,7 @@ static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
* Functions in mm/shmem.c called directly from elsewhere:
*/
extern const struct fs_parameter_spec shmem_fs_parameters[];
-extern int shmem_init(void);
+extern void shmem_init(void);
extern int shmem_init_fs_context(struct fs_context *fc);
extern struct file *shmem_file_setup(const char *name,
loff_t size, unsigned long flags);
@@ -62,46 +108,81 @@ extern struct file *shmem_kernel_file_setup(const char *name, loff_t size,
unsigned long flags);
extern struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt,
const char *name, loff_t size, unsigned long flags);
-extern int shmem_zero_setup(struct vm_area_struct *);
+int shmem_zero_setup(struct vm_area_struct *vma);
+int shmem_zero_setup_desc(struct vm_area_desc *desc);
extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags);
-extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
+extern int shmem_lock(struct file *file, int lock, struct ucounts *ucounts);
#ifdef CONFIG_SHMEM
-extern const struct address_space_operations shmem_aops;
-static inline bool shmem_mapping(struct address_space *mapping)
-{
- return mapping->a_ops == &shmem_aops;
-}
+bool shmem_mapping(const struct address_space *mapping);
#else
-static inline bool shmem_mapping(struct address_space *mapping)
+static inline bool shmem_mapping(const struct address_space *mapping)
{
return false;
}
#endif /* CONFIG_SHMEM */
-extern void shmem_unlock_mapping(struct address_space *mapping);
-extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
+void shmem_unlock_mapping(struct address_space *mapping);
+struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
-extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
-extern int shmem_unuse(unsigned int type, bool frontswap,
- unsigned long *fs_pages_to_unuse);
+int shmem_writeout(struct folio *folio, struct swap_iocb **plug,
+ struct list_head *folio_list);
+void shmem_truncate_range(struct inode *inode, loff_t start, uoff_t end);
+int shmem_unuse(unsigned int type);
-extern bool shmem_huge_enabled(struct vm_area_struct *vma);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+unsigned long shmem_allowable_huge_orders(struct inode *inode,
+ struct vm_area_struct *vma, pgoff_t index,
+ loff_t write_end, bool shmem_huge_force);
+bool shmem_hpage_pmd_enabled(void);
+#else
+static inline unsigned long shmem_allowable_huge_orders(struct inode *inode,
+ struct vm_area_struct *vma, pgoff_t index,
+ loff_t write_end, bool shmem_huge_force)
+{
+ return 0;
+}
+
+static inline bool shmem_hpage_pmd_enabled(void)
+{
+ return false;
+}
+#endif
+
+#ifdef CONFIG_SHMEM
extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
+extern void shmem_uncharge(struct inode *inode, long pages);
+#else
+static inline unsigned long shmem_swap_usage(struct vm_area_struct *vma)
+{
+ return 0;
+}
+
+static inline void shmem_uncharge(struct inode *inode, long pages)
+{
+}
+#endif
extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
pgoff_t start, pgoff_t end);
-/* Flag allocation requirements to shmem_getpage */
+/* Flag allocation requirements to shmem_get_folio */
enum sgp_type {
SGP_READ, /* don't exceed i_size, don't allocate page */
+ SGP_NOALLOC, /* similar, but fail on hole or use fallocated page */
SGP_CACHE, /* don't exceed i_size, may allocate page */
- SGP_NOHUGE, /* like SGP_CACHE, but no huge pages */
- SGP_HUGE, /* like SGP_CACHE, huge pages preferred */
SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */
SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */
};
-extern int shmem_getpage(struct inode *inode, pgoff_t index,
- struct page **pagep, enum sgp_type sgp);
+int shmem_get_folio(struct inode *inode, pgoff_t index, loff_t write_end,
+ struct folio **foliop, enum sgp_type sgp);
+struct folio *shmem_read_folio_gfp(struct address_space *mapping,
+ pgoff_t index, gfp_t gfp);
+
+static inline struct folio *shmem_read_folio(struct address_space *mapping,
+ pgoff_t index)
+{
+ return shmem_read_folio_gfp(mapping, index, mapping_gfp_mask(mapping));
+}
static inline struct page *shmem_read_mapping_page(
struct address_space *mapping, pgoff_t index)
@@ -119,24 +200,54 @@ static inline bool shmem_file(struct file *file)
return shmem_mapping(file->f_mapping);
}
+/* Must be called with inode lock taken exclusive. */
+static inline void shmem_freeze(struct inode *inode, bool freeze)
+{
+ if (freeze)
+ SHMEM_I(inode)->flags |= SHMEM_F_MAPPING_FROZEN;
+ else
+ SHMEM_I(inode)->flags &= ~SHMEM_F_MAPPING_FROZEN;
+}
+
+/*
+ * If fallocate(FALLOC_FL_KEEP_SIZE) has been used, there may be pages
+ * beyond i_size's notion of EOF, which fallocate has committed to reserving:
+ * which split_huge_page() must therefore not delete. This use of a single
+ * "fallocend" per inode errs on the side of not deleting a reservation when
+ * in doubt: there are plenty of cases when it preserves unreserved pages.
+ */
+static inline pgoff_t shmem_fallocend(struct inode *inode, pgoff_t eof)
+{
+ return max(eof, SHMEM_I(inode)->fallocend);
+}
+
extern bool shmem_charge(struct inode *inode, long pages);
-extern void shmem_uncharge(struct inode *inode, long pages);
+#ifdef CONFIG_USERFAULTFD
#ifdef CONFIG_SHMEM
-extern int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
+extern int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
unsigned long src_addr,
- struct page **pagep);
-extern int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm,
- pmd_t *dst_pmd,
- struct vm_area_struct *dst_vma,
- unsigned long dst_addr);
-#else
-#define shmem_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
- src_addr, pagep) ({ BUG(); 0; })
-#define shmem_mfill_zeropage_pte(dst_mm, dst_pmd, dst_vma, \
- dst_addr) ({ BUG(); 0; })
-#endif
+ uffd_flags_t flags,
+ struct folio **foliop);
+#else /* !CONFIG_SHMEM */
+#define shmem_mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, \
+ src_addr, flags, foliop) ({ BUG(); 0; })
+#endif /* CONFIG_SHMEM */
+#endif /* CONFIG_USERFAULTFD */
+
+/*
+ * Used space is stored as unsigned 64-bit value in bytes but
+ * quota core supports only signed 64-bit values so use that
+ * as a limit
+ */
+#define SHMEM_QUOTA_MAX_SPC_LIMIT 0x7fffffffffffffffLL /* 2^63-1 */
+#define SHMEM_QUOTA_MAX_INO_LIMIT 0x7fffffffffffffffLL
+
+#ifdef CONFIG_TMPFS_QUOTA
+extern const struct dquot_operations shmem_quota_operations;
+extern struct quota_format_type shmem_quota_format;
+#endif /* CONFIG_TMPFS_QUOTA */
#endif
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 1eac79ce57d4..1a00be90d93a 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -2,9 +2,31 @@
#ifndef _LINUX_SHRINKER_H
#define _LINUX_SHRINKER_H
+#include <linux/atomic.h>
+#include <linux/types.h>
+#include <linux/refcount.h>
+#include <linux/completion.h>
+
+#define SHRINKER_UNIT_BITS BITS_PER_LONG
+
+/*
+ * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
+ * shrinkers, which have elements charged to the memcg.
+ */
+struct shrinker_info_unit {
+ atomic_long_t nr_deferred[SHRINKER_UNIT_BITS];
+ DECLARE_BITMAP(map, SHRINKER_UNIT_BITS);
+};
+
+struct shrinker_info {
+ struct rcu_head rcu;
+ int map_nr_max;
+ struct shrinker_info_unit *unit[];
+};
+
/*
* This struct is used to pass information from page reclaim to the shrinkers.
- * We consolidate the values for easier extention later.
+ * We consolidate the values for easier extension later.
*
* The 'gfpmask' refers to the allocation we are currently trying to
* fulfil.
@@ -67,30 +89,72 @@ struct shrinker {
int seeks; /* seeks to recreate an obj */
unsigned flags;
+ /*
+ * The reference count of this shrinker. Registered shrinker have an
+ * initial refcount of 1, then the lookup operations are now allowed
+ * to use it via shrinker_try_get(). Later in the unregistration step,
+ * the initial refcount will be discarded, and will free the shrinker
+ * asynchronously via RCU after its refcount reaches 0.
+ */
+ refcount_t refcount;
+ struct completion done; /* use to wait for refcount to reach 0 */
+ struct rcu_head rcu;
+
+ void *private_data;
+
/* These are for internal use */
struct list_head list;
#ifdef CONFIG_MEMCG
/* ID in shrinker_idr */
int id;
#endif
+#ifdef CONFIG_SHRINKER_DEBUG
+ int debugfs_id;
+ const char *name;
+ struct dentry *debugfs_entry;
+#endif
/* objs pending delete, per node */
atomic_long_t *nr_deferred;
};
#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
-/* Flags */
-#define SHRINKER_REGISTERED (1 << 0)
-#define SHRINKER_NUMA_AWARE (1 << 1)
-#define SHRINKER_MEMCG_AWARE (1 << 2)
+/* Internal flags */
+#define SHRINKER_REGISTERED BIT(0)
+#define SHRINKER_ALLOCATED BIT(1)
+
+/* Flags for users to use */
+#define SHRINKER_NUMA_AWARE BIT(2)
+#define SHRINKER_MEMCG_AWARE BIT(3)
/*
* It just makes sense when the shrinker is also MEMCG_AWARE for now,
* non-MEMCG_AWARE shrinker should not have this flag set.
*/
-#define SHRINKER_NONSLAB (1 << 3)
+#define SHRINKER_NONSLAB BIT(4)
-extern int prealloc_shrinker(struct shrinker *shrinker);
-extern void register_shrinker_prepared(struct shrinker *shrinker);
-extern int register_shrinker(struct shrinker *shrinker);
-extern void unregister_shrinker(struct shrinker *shrinker);
-extern void free_prealloced_shrinker(struct shrinker *shrinker);
-#endif
+__printf(2, 3)
+struct shrinker *shrinker_alloc(unsigned int flags, const char *fmt, ...);
+void shrinker_register(struct shrinker *shrinker);
+void shrinker_free(struct shrinker *shrinker);
+
+static inline bool shrinker_try_get(struct shrinker *shrinker)
+{
+ return refcount_inc_not_zero(&shrinker->refcount);
+}
+
+static inline void shrinker_put(struct shrinker *shrinker)
+{
+ if (refcount_dec_and_test(&shrinker->refcount))
+ complete(&shrinker->done);
+}
+
+#ifdef CONFIG_SHRINKER_DEBUG
+extern int __printf(2, 3) shrinker_debugfs_rename(struct shrinker *shrinker,
+ const char *fmt, ...);
+#else /* CONFIG_SHRINKER_DEBUG */
+static inline __printf(2, 3)
+int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
+{
+ return 0;
+}
+#endif /* CONFIG_SHRINKER_DEBUG */
+#endif /* _LINUX_SHRINKER_H */
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 0dbfda8d99d0..f19816832f05 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -3,6 +3,7 @@
#define _LINUX_SIGNAL_H
#include <linux/bug.h>
+#include <linux/list.h>
#include <linux/signal_types.h>
#include <linux/string.h>
@@ -40,10 +41,11 @@ enum siginfo_layout {
SIL_TIMER,
SIL_POLL,
SIL_FAULT,
+ SIL_FAULT_TRAPNO,
SIL_FAULT_MCEERR,
SIL_FAULT_BNDERR,
SIL_FAULT_PKUERR,
- SIL_PERF_EVENT,
+ SIL_FAULT_PERF_EVENT,
SIL_CHLD,
SIL_RT,
SIL_SYS,
@@ -125,7 +127,6 @@ static inline int sigequalsets(const sigset_t *set1, const sigset_t *set2)
#define sigmask(sig) (1UL << ((sig) - 1))
#ifndef __HAVE_ARCH_SIG_SETOPS
-#include <linux/string.h>
#define _SIG_SET_BINOP(name, op) \
static inline void name(sigset_t *r, const sigset_t *a, const sigset_t *b) \
@@ -266,7 +267,6 @@ static inline void init_sigpending(struct sigpending *sig)
}
extern void flush_sigqueue(struct sigpending *queue);
-extern void exit_task_sigqueue_cache(struct task_struct *tsk);
/* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
static inline int valid_signal(unsigned long sig)
@@ -283,7 +283,8 @@ extern int do_send_sig_info(int sig, struct kernel_siginfo *info,
struct task_struct *p, enum pid_type type);
extern int group_send_sig_info(int sig, struct kernel_siginfo *info,
struct task_struct *p, enum pid_type type);
-extern int __group_send_sig_info(int, struct kernel_siginfo *, struct task_struct *);
+extern int send_signal_locked(int sig, struct kernel_siginfo *info,
+ struct task_struct *p, enum pid_type type);
extern int sigprocmask(int, sigset_t *, sigset_t *);
extern void set_current_blocked(sigset_t *);
extern void __set_current_blocked(const sigset_t *);
@@ -462,10 +463,14 @@ int __save_altstack(stack_t __user *, unsigned long);
unsafe_put_user((void __user *)t->sas_ss_sp, &__uss->ss_sp, label); \
unsafe_put_user(t->sas_ss_flags, &__uss->ss_flags, label); \
unsafe_put_user(t->sas_ss_size, &__uss->ss_size, label); \
- if (t->sas_ss_flags & SS_AUTODISARM) \
- sas_ss_reset(t); \
} while (0);
+#ifdef CONFIG_DYNAMIC_SIGFRAME
+bool sigaltstack_size_valid(size_t ss_size);
+#else
+static inline bool sigaltstack_size_valid(size_t size) { return true; }
+#endif /* !CONFIG_DYNAMIC_SIGFRAME */
+
#ifdef CONFIG_PROC_FS
struct seq_file;
extern void render_sigset_t(struct seq_file *, const char *, sigset_t *);
diff --git a/include/linux/signal_types.h b/include/linux/signal_types.h
index 68e06c75c5b2..caf4f7a59ab9 100644
--- a/include/linux/signal_types.h
+++ b/include/linux/signal_types.h
@@ -6,13 +6,15 @@
* Basic signal handling related data type definitions:
*/
-#include <linux/list.h>
+#include <linux/types.h>
#include <uapi/linux/signal.h>
typedef struct kernel_siginfo {
__SIGINFO;
} kernel_siginfo_t;
+struct ucounts;
+
/*
* Real Time signals may be queued.
*/
@@ -21,7 +23,7 @@ struct sigqueue {
struct list_head list;
int flags;
kernel_siginfo_t info;
- struct user_struct *user;
+ struct ucounts *ucounts;
};
/* flags values. */
@@ -68,6 +70,9 @@ struct ksignal {
int sig;
};
+/* Used to kill the race between sigaction and forced signals */
+#define SA_IMMUTABLE 0x00800000
+
#ifndef __ARCH_UAPI_SA_FLAGS
#ifdef SA_RESTORER
#define __ARCH_UAPI_SA_FLAGS SA_RESTORER
diff --git a/include/linux/siphash.h b/include/linux/siphash.h
index bf21591a9e5e..9153e77382e1 100644
--- a/include/linux/siphash.h
+++ b/include/linux/siphash.h
@@ -1,6 +1,5 @@
-/* Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
- *
- * This file is provided under a dual BSD/GPLv2 license.
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* Copyright (C) 2016-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*
* SipHash: a fast short-input PRF
* https://131002.net/siphash/
@@ -21,15 +20,15 @@ typedef struct {
u64 key[2];
} siphash_key_t;
+#define siphash_aligned_key_t siphash_key_t __aligned(16)
+
static inline bool siphash_key_is_zero(const siphash_key_t *key)
{
return !(key->key[0] | key->key[1]);
}
u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key);
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key);
-#endif
u64 siphash_1u64(const u64 a, const siphash_key_t *key);
u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key);
@@ -82,10 +81,9 @@ static inline u64 ___siphash_aligned(const __le64 *data, size_t len,
static inline u64 siphash(const void *data, size_t len,
const siphash_key_t *key)
{
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
+ !IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
return __siphash_unaligned(data, len, key);
-#endif
return ___siphash_aligned(data, len, key);
}
@@ -96,10 +94,8 @@ typedef struct {
u32 __hsiphash_aligned(const void *data, size_t len,
const hsiphash_key_t *key);
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_unaligned(const void *data, size_t len,
const hsiphash_key_t *key);
-#endif
u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key);
u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key);
@@ -135,11 +131,38 @@ static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len,
static inline u32 hsiphash(const void *data, size_t len,
const hsiphash_key_t *key)
{
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
+ !IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
return __hsiphash_unaligned(data, len, key);
-#endif
return ___hsiphash_aligned(data, len, key);
}
+/*
+ * These macros expose the raw SipHash and HalfSipHash permutations.
+ * Do not use them directly! If you think you have a use for them,
+ * be sure to CC the maintainer of this file explaining why.
+ */
+
+#define SIPHASH_PERMUTATION(a, b, c, d) ( \
+ (a) += (b), (b) = rol64((b), 13), (b) ^= (a), (a) = rol64((a), 32), \
+ (c) += (d), (d) = rol64((d), 16), (d) ^= (c), \
+ (a) += (d), (d) = rol64((d), 21), (d) ^= (a), \
+ (c) += (b), (b) = rol64((b), 17), (b) ^= (c), (c) = rol64((c), 32))
+
+#define SIPHASH_CONST_0 0x736f6d6570736575ULL
+#define SIPHASH_CONST_1 0x646f72616e646f6dULL
+#define SIPHASH_CONST_2 0x6c7967656e657261ULL
+#define SIPHASH_CONST_3 0x7465646279746573ULL
+
+#define HSIPHASH_PERMUTATION(a, b, c, d) ( \
+ (a) += (b), (b) = rol32((b), 5), (b) ^= (a), (a) = rol32((a), 16), \
+ (c) += (d), (d) = rol32((d), 8), (d) ^= (c), \
+ (a) += (d), (d) = rol32((d), 7), (d) ^= (a), \
+ (c) += (b), (b) = rol32((b), 13), (b) ^= (c), (c) = rol32((c), 16))
+
+#define HSIPHASH_CONST_0 0U
+#define HSIPHASH_CONST_1 0U
+#define HSIPHASH_CONST_2 0x6c796765U
+#define HSIPHASH_CONST_3 0x74656462U
+
#endif /* _LINUX_SIPHASH_H */
diff --git a/include/linux/sizes.h b/include/linux/sizes.h
index 1ac79bcee2bb..f1f1a055b047 100644
--- a/include/linux/sizes.h
+++ b/include/linux/sizes.h
@@ -23,17 +23,25 @@
#define SZ_4K 0x00001000
#define SZ_8K 0x00002000
#define SZ_16K 0x00004000
+#define SZ_24K 0x00006000
#define SZ_32K 0x00008000
#define SZ_64K 0x00010000
#define SZ_128K 0x00020000
+#define SZ_192K 0x00030000
#define SZ_256K 0x00040000
+#define SZ_384K 0x00060000
#define SZ_512K 0x00080000
#define SZ_1M 0x00100000
#define SZ_2M 0x00200000
+#define SZ_3M 0x00300000
#define SZ_4M 0x00400000
+#define SZ_6M 0x00600000
#define SZ_8M 0x00800000
+#define SZ_12M 0x00c00000
#define SZ_16M 0x01000000
+#define SZ_18M 0x01200000
+#define SZ_24M 0x01800000
#define SZ_32M 0x02000000
#define SZ_64M 0x04000000
#define SZ_128M 0x08000000
@@ -47,6 +55,18 @@
#define SZ_8G _AC(0x200000000, ULL)
#define SZ_16G _AC(0x400000000, ULL)
#define SZ_32G _AC(0x800000000, ULL)
+#define SZ_64G _AC(0x1000000000, ULL)
+#define SZ_128G _AC(0x2000000000, ULL)
+#define SZ_256G _AC(0x4000000000, ULL)
+#define SZ_512G _AC(0x8000000000, ULL)
+
+#define SZ_1T _AC(0x10000000000, ULL)
+#define SZ_2T _AC(0x20000000000, ULL)
+#define SZ_4T _AC(0x40000000000, ULL)
+#define SZ_8T _AC(0x80000000000, ULL)
+#define SZ_16T _AC(0x100000000000, ULL)
+#define SZ_32T _AC(0x200000000000, ULL)
#define SZ_64T _AC(0x400000000000, ULL)
+#define SZ_128T _AC(0x800000000000, ULL)
#endif /* __LINUX_SIZES_H__ */
diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h
index e2d45b7cb619..bf178238a308 100644
--- a/include/linux/skb_array.h
+++ b/include/linux/skb_array.h
@@ -177,10 +177,11 @@ static inline int skb_array_peek_len_any(struct skb_array *a)
return PTR_RING_PEEK_CALL_ANY(&a->ring, __skb_array_len_with_tag);
}
-static inline int skb_array_init(struct skb_array *a, int size, gfp_t gfp)
+static inline int skb_array_init_noprof(struct skb_array *a, int size, gfp_t gfp)
{
- return ptr_ring_init(&a->ring, size, gfp);
+ return ptr_ring_init_noprof(&a->ring, size, gfp);
}
+#define skb_array_init(...) alloc_hooks(skb_array_init_noprof(__VA_ARGS__))
static void __skb_array_destroy_skb(void *ptr)
{
@@ -198,15 +199,18 @@ static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp)
return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb);
}
-static inline int skb_array_resize_multiple(struct skb_array **rings,
- int nrings, unsigned int size,
- gfp_t gfp)
+static inline int skb_array_resize_multiple_bh_noprof(struct skb_array **rings,
+ int nrings,
+ unsigned int size,
+ gfp_t gfp)
{
BUILD_BUG_ON(offsetof(struct skb_array, ring));
- return ptr_ring_resize_multiple((struct ptr_ring **)rings,
- nrings, size, gfp,
- __skb_array_destroy_skb);
+ return ptr_ring_resize_multiple_bh_noprof((struct ptr_ring **)rings,
+ nrings, size, gfp,
+ __skb_array_destroy_skb);
}
+#define skb_array_resize_multiple_bh(...) \
+ alloc_hooks(skb_array_resize_multiple_bh_noprof(__VA_ARGS__))
static inline void skb_array_cleanup(struct skb_array *a)
{
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index dbf820a50a39..86737076101d 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -23,116 +23,129 @@
#include <linux/atomic.h>
#include <asm/types.h>
#include <linux/spinlock.h>
-#include <linux/net.h>
-#include <linux/textsearch.h>
#include <net/checksum.h>
#include <linux/rcupdate.h>
-#include <linux/hrtimer.h>
#include <linux/dma-mapping.h>
#include <linux/netdev_features.h>
-#include <linux/sched.h>
-#include <linux/sched/clock.h>
#include <net/flow_dissector.h>
-#include <linux/splice.h>
#include <linux/in6.h>
#include <linux/if_packet.h>
+#include <linux/llist.h>
+#include <linux/page_frag_cache.h>
#include <net/flow.h>
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <linux/netfilter/nf_conntrack_common.h>
#endif
+#include <net/net_debug.h>
+#include <net/dropreason-core.h>
+#include <net/netmem.h>
-/* The interface for checksum offload between the stack and networking drivers
+/**
+ * DOC: skb checksums
+ *
+ * The interface for checksum offload between the stack and networking drivers
* is as follows...
*
- * A. IP checksum related features
+ * IP checksum related features
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* Drivers advertise checksum offload capabilities in the features of a device.
* From the stack's point of view these are capabilities offered by the driver.
* A driver typically only advertises features that it is capable of offloading
* to its device.
*
- * The checksum related features are:
- *
- * NETIF_F_HW_CSUM - The driver (or its device) is able to compute one
- * IP (one's complement) checksum for any combination
- * of protocols or protocol layering. The checksum is
- * computed and set in a packet per the CHECKSUM_PARTIAL
- * interface (see below).
- *
- * NETIF_F_IP_CSUM - Driver (device) is only able to checksum plain
- * TCP or UDP packets over IPv4. These are specifically
- * unencapsulated packets of the form IPv4|TCP or
- * IPv4|UDP where the Protocol field in the IPv4 header
- * is TCP or UDP. The IPv4 header may contain IP options.
- * This feature cannot be set in features for a device
- * with NETIF_F_HW_CSUM also set. This feature is being
- * DEPRECATED (see below).
- *
- * NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain
- * TCP or UDP packets over IPv6. These are specifically
- * unencapsulated packets of the form IPv6|TCP or
- * IPv6|UDP where the Next Header field in the IPv6
- * header is either TCP or UDP. IPv6 extension headers
- * are not supported with this feature. This feature
- * cannot be set in features for a device with
- * NETIF_F_HW_CSUM also set. This feature is being
- * DEPRECATED (see below).
- *
- * NETIF_F_RXCSUM - Driver (device) performs receive checksum offload.
- * This flag is only used to disable the RX checksum
- * feature for a device. The stack will accept receive
- * checksum indication in packets received on a device
- * regardless of whether NETIF_F_RXCSUM is set.
- *
- * B. Checksumming of received packets by device. Indication of checksum
- * verification is set in skb->ip_summed. Possible values are:
- *
- * CHECKSUM_NONE:
+ * .. flat-table:: Checksum related device features
+ * :widths: 1 10
+ *
+ * * - %NETIF_F_HW_CSUM
+ * - The driver (or its device) is able to compute one
+ * IP (one's complement) checksum for any combination
+ * of protocols or protocol layering. The checksum is
+ * computed and set in a packet per the CHECKSUM_PARTIAL
+ * interface (see below).
+ *
+ * * - %NETIF_F_IP_CSUM
+ * - Driver (device) is only able to checksum plain
+ * TCP or UDP packets over IPv4. These are specifically
+ * unencapsulated packets of the form IPv4|TCP or
+ * IPv4|UDP where the Protocol field in the IPv4 header
+ * is TCP or UDP. The IPv4 header may contain IP options.
+ * This feature cannot be set in features for a device
+ * with NETIF_F_HW_CSUM also set. This feature is being
+ * DEPRECATED (see below).
+ *
+ * * - %NETIF_F_IPV6_CSUM
+ * - Driver (device) is only able to checksum plain
+ * TCP or UDP packets over IPv6. These are specifically
+ * unencapsulated packets of the form IPv6|TCP or
+ * IPv6|UDP where the Next Header field in the IPv6
+ * header is either TCP or UDP. IPv6 extension headers
+ * are not supported with this feature. This feature
+ * cannot be set in features for a device with
+ * NETIF_F_HW_CSUM also set. This feature is being
+ * DEPRECATED (see below).
+ *
+ * * - %NETIF_F_RXCSUM
+ * - Driver (device) performs receive checksum offload.
+ * This flag is only used to disable the RX checksum
+ * feature for a device. The stack will accept receive
+ * checksum indication in packets received on a device
+ * regardless of whether NETIF_F_RXCSUM is set.
+ *
+ * Checksumming of received packets by device
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * Indication of checksum verification is set in &sk_buff.ip_summed.
+ * Possible values are:
+ *
+ * - %CHECKSUM_NONE
*
* Device did not checksum this packet e.g. due to lack of capabilities.
* The packet contains full (though not verified) checksum in packet but
* not in skb->csum. Thus, skb->csum is undefined in this case.
*
- * CHECKSUM_UNNECESSARY:
+ * - %CHECKSUM_UNNECESSARY
*
* The hardware you're dealing with doesn't calculate the full checksum
- * (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums
- * for specific protocols. For such packets it will set CHECKSUM_UNNECESSARY
- * if their checksums are okay. skb->csum is still undefined in this case
+ * (as in %CHECKSUM_COMPLETE), but it does parse headers and verify checksums
+ * for specific protocols. For such packets it will set %CHECKSUM_UNNECESSARY
+ * if their checksums are okay. &sk_buff.csum is still undefined in this case
* though. A driver or device must never modify the checksum field in the
* packet even if checksum is verified.
*
- * CHECKSUM_UNNECESSARY is applicable to following protocols:
- * TCP: IPv6 and IPv4.
- * UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a
+ * %CHECKSUM_UNNECESSARY is applicable to following protocols:
+ *
+ * - TCP: IPv6 and IPv4.
+ * - UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a
* zero UDP checksum for either IPv4 or IPv6, the networking stack
* may perform further validation in this case.
- * GRE: only if the checksum is present in the header.
- * SCTP: indicates the CRC in SCTP header has been validated.
- * FCOE: indicates the CRC in FC frame has been validated.
+ * - GRE: only if the checksum is present in the header.
+ * - SCTP: indicates the CRC in SCTP header has been validated.
+ * - FCOE: indicates the CRC in FC frame has been validated.
*
- * skb->csum_level indicates the number of consecutive checksums found in
- * the packet minus one that have been verified as CHECKSUM_UNNECESSARY.
+ * &sk_buff.csum_level indicates the number of consecutive checksums found in
+ * the packet minus one that have been verified as %CHECKSUM_UNNECESSARY.
* For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet
* and a device is able to verify the checksums for UDP (possibly zero),
- * GRE (checksum flag is set) and TCP, skb->csum_level would be set to
+ * GRE (checksum flag is set) and TCP, &sk_buff.csum_level would be set to
* two. If the device were only able to verify the UDP checksum and not
* GRE, either because it doesn't support GRE checksum or because GRE
* checksum is bad, skb->csum_level would be set to zero (TCP checksum is
* not considered in this case).
*
- * CHECKSUM_COMPLETE:
+ * - %CHECKSUM_COMPLETE
*
* This is the most generic way. The device supplied checksum of the _whole_
- * packet as seen by netif_rx() and fills in skb->csum. This means the
+ * packet as seen by netif_rx() and fills in &sk_buff.csum. This means the
* hardware doesn't need to parse L3/L4 headers to implement this.
*
* Notes:
+ *
* - Even if device supports only some protocols, but is able to produce
* skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY.
* - CHECKSUM_COMPLETE is not applicable to SCTP and FCoE protocols.
*
- * CHECKSUM_PARTIAL:
+ * - %CHECKSUM_PARTIAL
*
* A checksum is set up to be offloaded to a device as described in the
* output description for CHECKSUM_PARTIAL. This may occur on a packet
@@ -144,14 +157,18 @@
* packet that are after the checksum being offloaded are not considered to
* be verified.
*
- * C. Checksumming on transmit for non-GSO. The stack requests checksum offload
- * in the skb->ip_summed for a packet. Values are:
+ * Checksumming on transmit for non-GSO
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * The stack requests checksum offload in the &sk_buff.ip_summed for a packet.
+ * Values are:
*
- * CHECKSUM_PARTIAL:
+ * - %CHECKSUM_PARTIAL
*
* The driver is required to checksum the packet as seen by hard_start_xmit()
- * from skb->csum_start up to the end, and to record/write the checksum at
- * offset skb->csum_start + skb->csum_offset. A driver may verify that the
+ * from &sk_buff.csum_start up to the end, and to record/write the checksum at
+ * offset &sk_buff.csum_start + &sk_buff.csum_offset.
+ * A driver may verify that the
* csum_start and csum_offset values are valid values given the length and
* offset of the packet, but it should not attempt to validate that the
* checksum refers to a legitimate transport layer checksum -- it is the
@@ -163,55 +180,66 @@
* checksum calculation to the device, or call skb_checksum_help (in the case
* that the device does not support offload for a particular checksum).
*
- * NETIF_F_IP_CSUM and NETIF_F_IPV6_CSUM are being deprecated in favor of
- * NETIF_F_HW_CSUM. New devices should use NETIF_F_HW_CSUM to indicate
+ * %NETIF_F_IP_CSUM and %NETIF_F_IPV6_CSUM are being deprecated in favor of
+ * %NETIF_F_HW_CSUM. New devices should use %NETIF_F_HW_CSUM to indicate
* checksum offload capability.
- * skb_csum_hwoffload_help() can be called to resolve CHECKSUM_PARTIAL based
+ * skb_csum_hwoffload_help() can be called to resolve %CHECKSUM_PARTIAL based
* on network device checksumming capabilities: if a packet does not match
- * them, skb_checksum_help or skb_crc32c_help (depending on the value of
- * csum_not_inet, see item D.) is called to resolve the checksum.
+ * them, skb_checksum_help() or skb_crc32c_help() (depending on the value of
+ * &sk_buff.csum_not_inet, see :ref:`crc`)
+ * is called to resolve the checksum.
*
- * CHECKSUM_NONE:
+ * - %CHECKSUM_NONE
*
* The skb was already checksummed by the protocol, or a checksum is not
* required.
*
- * CHECKSUM_UNNECESSARY:
+ * - %CHECKSUM_UNNECESSARY
*
* This has the same meaning as CHECKSUM_NONE for checksum offload on
* output.
*
- * CHECKSUM_COMPLETE:
+ * - %CHECKSUM_COMPLETE
+ *
* Not used in checksum output. If a driver observes a packet with this value
- * set in skbuff, it should treat the packet as if CHECKSUM_NONE were set.
- *
- * D. Non-IP checksum (CRC) offloads
- *
- * NETIF_F_SCTP_CRC - This feature indicates that a device is capable of
- * offloading the SCTP CRC in a packet. To perform this offload the stack
- * will set csum_start and csum_offset accordingly, set ip_summed to
- * CHECKSUM_PARTIAL and set csum_not_inet to 1, to provide an indication in
- * the skbuff that the CHECKSUM_PARTIAL refers to CRC32c.
- * A driver that supports both IP checksum offload and SCTP CRC32c offload
- * must verify which offload is configured for a packet by testing the
- * value of skb->csum_not_inet; skb_crc32c_csum_help is provided to resolve
- * CHECKSUM_PARTIAL on skbs where csum_not_inet is set to 1.
- *
- * NETIF_F_FCOE_CRC - This feature indicates that a device is capable of
- * offloading the FCOE CRC in a packet. To perform this offload the stack
- * will set ip_summed to CHECKSUM_PARTIAL and set csum_start and csum_offset
- * accordingly. Note that there is no indication in the skbuff that the
- * CHECKSUM_PARTIAL refers to an FCOE checksum, so a driver that supports
- * both IP checksum offload and FCOE CRC offload must verify which offload
- * is configured for a packet, presumably by inspecting packet headers.
- *
- * E. Checksumming on output with GSO.
- *
- * In the case of a GSO packet (skb_is_gso(skb) is true), checksum offload
+ * set in skbuff, it should treat the packet as if %CHECKSUM_NONE were set.
+ *
+ * .. _crc:
+ *
+ * Non-IP checksum (CRC) offloads
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * .. flat-table::
+ * :widths: 1 10
+ *
+ * * - %NETIF_F_SCTP_CRC
+ * - This feature indicates that a device is capable of
+ * offloading the SCTP CRC in a packet. To perform this offload the stack
+ * will set csum_start and csum_offset accordingly, set ip_summed to
+ * %CHECKSUM_PARTIAL and set csum_not_inet to 1, to provide an indication
+ * in the skbuff that the %CHECKSUM_PARTIAL refers to CRC32c.
+ * A driver that supports both IP checksum offload and SCTP CRC32c offload
+ * must verify which offload is configured for a packet by testing the
+ * value of &sk_buff.csum_not_inet; skb_crc32c_csum_help() is provided to
+ * resolve %CHECKSUM_PARTIAL on skbs where csum_not_inet is set to 1.
+ *
+ * * - %NETIF_F_FCOE_CRC
+ * - This feature indicates that a device is capable of offloading the FCOE
+ * CRC in a packet. To perform this offload the stack will set ip_summed
+ * to %CHECKSUM_PARTIAL and set csum_start and csum_offset
+ * accordingly. Note that there is no indication in the skbuff that the
+ * %CHECKSUM_PARTIAL refers to an FCOE checksum, so a driver that supports
+ * both IP checksum offload and FCOE CRC offload must verify which offload
+ * is configured for a packet, presumably by inspecting packet headers.
+ *
+ * Checksumming on output with GSO
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * In the case of a GSO packet (skb_is_gso() is true), checksum offload
* is implied by the SKB_GSO_* flags in gso_type. Most obviously, if the
- * gso_type is SKB_GSO_TCPV4 or SKB_GSO_TCPV6, TCP checksum offload as
+ * gso_type is %SKB_GSO_TCPV4 or %SKB_GSO_TCPV6, TCP checksum offload as
* part of the GSO operation is implied. If a checksum is being offloaded
- * with GSO then ip_summed is CHECKSUM_PARTIAL, and both csum_start and
+ * with GSO then ip_summed is %CHECKSUM_PARTIAL, and both csum_start and
* csum_offset are set to refer to the outermost checksum being offloaded
* (two offloaded checksums are possible with UDP encapsulation).
*/
@@ -228,6 +256,14 @@
#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
#define SKB_WITH_OVERHEAD(X) \
((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+/* For X bytes available in skb->head, what is the minimal
+ * allocation needed, knowing struct skb_shared_info needs
+ * to be aligned.
+ */
+#define SKB_HEAD_ALIGN(X) (SKB_DATA_ALIGN(X) + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
#define SKB_MAX_ORDER(X, ORDER) \
SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
@@ -238,7 +274,6 @@
SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
-struct ahash_request;
struct net_device;
struct scatterlist;
struct pipe_inode_info;
@@ -247,6 +282,7 @@ struct napi_struct;
struct bpf_prog;
union bpf_attr;
struct skb_ext;
+struct ts_config;
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
struct nf_bridge_info {
@@ -258,8 +294,9 @@ struct nf_bridge_info {
u8 pkt_otherhost:1;
u8 in_prerouting:1;
u8 bridged_dnat:1;
+ u8 sabotage_in_done:1;
__u16 frag_max_size;
- struct net_device *physindev;
+ int physinif;
/* always valid & non-NULL from FORWARD on, for physdev match */
struct net_device *physoutdev;
@@ -283,16 +320,26 @@ struct nf_bridge_info {
* and read by ovs to recirc_id.
*/
struct tc_skb_ext {
- __u32 chain;
+ union {
+ u64 act_miss_cookie;
+ __u32 chain;
+ };
__u16 mru;
- bool post_ct;
+ __u16 zone;
+ u8 post_ct:1;
+ u8 post_ct_snat:1;
+ u8 post_ct_dnat:1;
+ u8 act_miss:1; /* Set if act_miss_cookie is used */
+ u8 l2_miss:1; /* Set by bridge upon FDB or MDB miss */
};
#endif
struct sk_buff_head {
- /* These two members must be first. */
- struct sk_buff *next;
- struct sk_buff *prev;
+ /* These two members must be first to match sk_buff. */
+ struct_group_tagged(sk_buff_list, list,
+ struct sk_buff *next;
+ struct sk_buff *prev;
+ );
__u32 qlen;
spinlock_t lock;
@@ -300,26 +347,22 @@ struct sk_buff_head {
struct sk_buff;
-/* To allow 64K frame to be packed as single skb without frag_list we
- * require 64K/PAGE_SIZE pages plus 1 additional page to allow for
- * buffers which do not start on a page boundary.
- *
- * Since GRO uses frags we allocate at least 16 regardless of page
- * size.
- */
-#if (65536/PAGE_SIZE + 1) < 16
-#define MAX_SKB_FRAGS 16UL
-#else
-#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
+#ifndef CONFIG_MAX_SKB_FRAGS
+# define CONFIG_MAX_SKB_FRAGS 17
#endif
-extern int sysctl_max_skb_frags;
+
+#define MAX_SKB_FRAGS CONFIG_MAX_SKB_FRAGS
/* Set skb_shinfo(skb)->gso_size to this in case you want skb_segment to
* segment using its current segmentation instead.
*/
#define GSO_BY_FRAGS 0xFFFF
-typedef struct bio_vec skb_frag_t;
+typedef struct skb_frag {
+ netmem_ref netmem;
+ unsigned int len;
+ unsigned int offset;
+} skb_frag_t;
/**
* skb_frag_size() - Returns the size of a skb fragment
@@ -327,7 +370,7 @@ typedef struct bio_vec skb_frag_t;
*/
static inline unsigned int skb_frag_size(const skb_frag_t *frag)
{
- return frag->bv_len;
+ return frag->len;
}
/**
@@ -337,7 +380,7 @@ static inline unsigned int skb_frag_size(const skb_frag_t *frag)
*/
static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
{
- frag->bv_len = size;
+ frag->len = size;
}
/**
@@ -347,7 +390,7 @@ static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
*/
static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
{
- frag->bv_len += delta;
+ frag->len += delta;
}
/**
@@ -357,7 +400,7 @@ static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
*/
static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
{
- frag->bv_len -= delta;
+ frag->len -= delta;
}
/**
@@ -377,7 +420,7 @@ static inline bool skb_frag_must_loop(struct page *p)
* skb_frag_foreach_page - loop over pages in a fragment
*
* @f: skb frag to operate on
- * @f_off: offset from start of f->bv_page
+ * @f_off: offset from start of f->netmem
* @f_len: length from f_off to loop over
* @p: (temp var) current page
* @p_off: (temp var) offset from start of current page,
@@ -400,12 +443,12 @@ static inline bool skb_frag_must_loop(struct page *p)
copied += p_len, p++, p_off = 0, \
p_len = min_t(u32, f_len - copied, PAGE_SIZE)) \
-#define HAVE_HW_TIME_STAMP
-
/**
* struct skb_shared_hwtstamps - hardware time stamps
- * @hwtstamp: hardware time stamp transformed into duration
- * since arbitrary point in time
+ * @hwtstamp: hardware time stamp transformed into duration
+ * since arbitrary point in time
+ * @netdev_data: address/cookie of network device driver used as
+ * reference to actual hardware time stamp
*
* Software time stamps generated by ktime_get_real() are stored in
* skb->tstamp.
@@ -417,13 +460,16 @@ static inline bool skb_frag_must_loop(struct page *p)
* &skb_shared_info. Use skb_hwtstamps() to get a pointer.
*/
struct skb_shared_hwtstamps {
- ktime_t hwtstamp;
+ union {
+ ktime_t hwtstamp;
+ void *netdev_data;
+ };
};
/* Definitions for tx_flags in struct skb_shared_info */
enum {
/* generate hardware time stamp */
- SKBTX_HW_TSTAMP = 1 << 0,
+ SKBTX_HW_TSTAMP_NOBPF = 1 << 0,
/* generate software time stamp when queueing packet to NIC */
SKBTX_SW_TSTAMP = 1 << 1,
@@ -431,16 +477,27 @@ enum {
/* device driver is going to provide hardware time stamp */
SKBTX_IN_PROGRESS = 1 << 2,
- /* generate wifi status information (where possible) */
- SKBTX_WIFI_STATUS = 1 << 4,
+ /* generate software time stamp on packet tx completion */
+ SKBTX_COMPLETION_TSTAMP = 1 << 3,
+
+ /* determine hardware time stamp based on time or cycles */
+ SKBTX_HW_TSTAMP_NETDEV = 1 << 5,
/* generate software time stamp when entering packet scheduling */
SKBTX_SCHED_TSTAMP = 1 << 6,
+
+ /* used for bpf extension when a bpf program is loaded */
+ SKBTX_BPF = 1 << 7,
};
+#define SKBTX_HW_TSTAMP (SKBTX_HW_TSTAMP_NOBPF | SKBTX_BPF)
+
#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
- SKBTX_SCHED_TSTAMP)
-#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
+ SKBTX_SCHED_TSTAMP | \
+ SKBTX_BPF | \
+ SKBTX_COMPLETION_TSTAMP)
+#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | \
+ SKBTX_ANY_SW_TSTAMP)
/* Definitions for flags in struct skb_shared_info */
enum {
@@ -453,9 +510,30 @@ enum {
* all frags to avoid possible bad checksum
*/
SKBFL_SHARED_FRAG = BIT(1),
+
+ /* segment contains only zerocopy data and should not be
+ * charged to the kernel memory.
+ */
+ SKBFL_PURE_ZEROCOPY = BIT(2),
+
+ SKBFL_DONT_ORPHAN = BIT(3),
+
+ /* page references are managed by the ubuf_info, so it's safe to
+ * use frags only up until ubuf_info is released
+ */
+ SKBFL_MANAGED_FRAG_REFS = BIT(4),
};
#define SKBFL_ZEROCOPY_FRAG (SKBFL_ZEROCOPY_ENABLE | SKBFL_SHARED_FRAG)
+#define SKBFL_ALL_ZEROCOPY (SKBFL_ZEROCOPY_FRAG | SKBFL_PURE_ZEROCOPY | \
+ SKBFL_DONT_ORPHAN | SKBFL_MANAGED_FRAG_REFS)
+
+struct ubuf_info_ops {
+ void (*complete)(struct sk_buff *, struct ubuf_info *,
+ bool zerocopy_success);
+ /* has to be compatible with skb_zcopy_set() */
+ int (*link_skb)(struct sk_buff *skb, struct ubuf_info *uarg);
+};
/*
* The callback notifies userspace to release buffers when skb DMA is done in
@@ -466,8 +544,14 @@ enum {
* The desc field is used to track userspace buffer index.
*/
struct ubuf_info {
- void (*callback)(struct sk_buff *, struct ubuf_info *,
- bool zerocopy_success);
+ const struct ubuf_info_ops *ops;
+ refcount_t refcnt;
+ u8 flags;
+};
+
+struct ubuf_info_msgzc {
+ struct ubuf_info ubuf;
+
union {
struct {
unsigned long desc;
@@ -480,8 +564,6 @@ struct ubuf_info {
u32 bytelen;
};
};
- refcount_t refcnt;
- u8 flags;
struct mmpin {
struct user_struct *user;
@@ -490,23 +572,20 @@ struct ubuf_info {
};
#define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))
+#define uarg_to_msgzc(ubuf_ptr) container_of((ubuf_ptr), struct ubuf_info_msgzc, \
+ ubuf)
int mm_account_pinned_pages(struct mmpin *mmp, size_t size);
void mm_unaccount_pinned_pages(struct mmpin *mmp);
-struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size);
-struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size,
- struct ubuf_info *uarg);
-
-void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
-
-void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg,
- bool success);
-
-int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len);
-int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
- struct msghdr *msg, int len,
- struct ubuf_info *uarg);
+/* Preserve some data across TX submission and completion.
+ *
+ * Note, this state is stored in the driver. Extending the layout
+ * might need some special care.
+ */
+struct xsk_tx_metadata_compl {
+ __u64 *tx_timestamp;
+};
/* This data is invariant across clones and lives at
* the end of the header data, ie. at skb->end.
@@ -520,7 +599,10 @@ struct skb_shared_info {
/* Warning: this field is not always filled in (UFO)! */
unsigned short gso_segs;
struct sk_buff *frag_list;
- struct skb_shared_hwtstamps hwtstamps;
+ union {
+ struct skb_shared_hwtstamps hwtstamps;
+ struct xsk_tx_metadata_compl xsk_meta;
+ };
unsigned int gso_type;
u32 tskey;
@@ -529,24 +611,49 @@ struct skb_shared_info {
*/
atomic_t dataref;
- /* Intermediate layers must ensure that destructor_arg
- * remains valid until skb destructor */
- void * destructor_arg;
+ union {
+ struct {
+ u32 xdp_frags_size;
+ u32 xdp_frags_truesize;
+ };
+
+ /*
+ * Intermediate layers must ensure that destructor_arg
+ * remains valid until skb destructor.
+ */
+ void *destructor_arg;
+ };
/* must be last field, see pskb_expand_head() */
skb_frag_t frags[MAX_SKB_FRAGS];
};
-/* We divide dataref into two halves. The higher 16 bits hold references
- * to the payload part of skb->data. The lower 16 bits hold references to
- * the entire skb->data. A clone of a headerless skb holds the length of
- * the header in skb->hdr_len.
- *
- * All users must obey the rule that the skb->data reference count must be
- * greater than or equal to the payload reference count.
- *
- * Holding a reference to the payload part means that the user does not
- * care about modifications to the header part of skb->data.
+/**
+ * DOC: dataref and headerless skbs
+ *
+ * Transport layers send out clones of payload skbs they hold for
+ * retransmissions. To allow lower layers of the stack to prepend their headers
+ * we split &skb_shared_info.dataref into two halves.
+ * The lower 16 bits count the overall number of references.
+ * The higher 16 bits indicate how many of the references are payload-only.
+ * skb_header_cloned() checks if skb is allowed to add / write the headers.
+ *
+ * The creator of the skb (e.g. TCP) marks its skb as &sk_buff.nohdr
+ * (via __skb_header_release()). Any clone created from marked skb will get
+ * &sk_buff.hdr_len populated with the available headroom.
+ * If there's the only clone in existence it's able to modify the headroom
+ * at will. The sequence of calls inside the transport layer is::
+ *
+ * <alloc skb>
+ * skb_reserve()
+ * __skb_header_release()
+ * skb_clone()
+ * // send the clone down the stack
+ *
+ * This is not a very generic construct and it depends on the transport layers
+ * doing the right thing. In practice there's usually only one payload-only skb.
+ * Having multiple payload-only skbs with different lengths of hdr_len is not
+ * possible. The payload-only skbs should never leave their owner.
*/
#define SKB_DATAREF_SHIFT 16
#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
@@ -567,7 +674,7 @@ enum {
/* This indicates the tcp segment has CWR set. */
SKB_GSO_TCP_ECN = 1 << 2,
- SKB_GSO_TCP_FIXEDID = 1 << 3,
+ __SKB_GSO_TCP_FIXEDID = 1 << 3,
SKB_GSO_TCPV6 = 1 << 4,
@@ -598,6 +705,14 @@ enum {
SKB_GSO_UDP_L4 = 1 << 17,
SKB_GSO_FRAGLIST = 1 << 18,
+
+ SKB_GSO_TCP_ACCECN = 1 << 19,
+
+ /* These indirectly map onto the same netdev feature.
+ * If NETIF_F_TSO_MANGLEID is set it may mangle both inner and outer IDs.
+ */
+ SKB_GSO_TCP_FIXEDID = 1 << 30,
+ SKB_GSO_TCP_FIXEDID_INNER = 1 << 31,
};
#if BITS_PER_LONG > 32
@@ -610,6 +725,53 @@ typedef unsigned int sk_buff_data_t;
typedef unsigned char *sk_buff_data_t;
#endif
+enum skb_tstamp_type {
+ SKB_CLOCK_REALTIME,
+ SKB_CLOCK_MONOTONIC,
+ SKB_CLOCK_TAI,
+ __SKB_CLOCK_MAX = SKB_CLOCK_TAI,
+};
+
+/**
+ * DOC: Basic sk_buff geometry
+ *
+ * struct sk_buff itself is a metadata structure and does not hold any packet
+ * data. All the data is held in associated buffers.
+ *
+ * &sk_buff.head points to the main "head" buffer. The head buffer is divided
+ * into two parts:
+ *
+ * - data buffer, containing headers and sometimes payload;
+ * this is the part of the skb operated on by the common helpers
+ * such as skb_put() or skb_pull();
+ * - shared info (struct skb_shared_info) which holds an array of pointers
+ * to read-only data in the (page, offset, length) format.
+ *
+ * Optionally &skb_shared_info.frag_list may point to another skb.
+ *
+ * Basic diagram may look like this::
+ *
+ * ---------------
+ * | sk_buff |
+ * ---------------
+ * ,--------------------------- + head
+ * / ,----------------- + data
+ * / / ,----------- + tail
+ * | | | , + end
+ * | | | |
+ * v v v v
+ * -----------------------------------------------
+ * | headroom | data | tailroom | skb_shared_info |
+ * -----------------------------------------------
+ * + [page frag]
+ * + [page frag]
+ * + [page frag]
+ * + [page frag] ---------
+ * + frag_list --> | sk_buff |
+ * ---------
+ *
+ */
+
/**
* struct sk_buff - socket buffer
* @next: Next buffer in list
@@ -619,14 +781,12 @@ typedef unsigned char *sk_buff_data_t;
* for retransmit timer
* @rbnode: RB tree node, alternative to next/prev for netem/tcp
* @list: queue head
+ * @ll_node: anchor in an llist (eg socket defer_list)
* @sk: Socket we are owned by
- * @ip_defrag_offset: (aka @sk) alternate use of @sk, used in
- * fragmentation management
* @dev: Device we arrived on/are leaving by
* @dev_scratch: (aka @dev) alternate use of @dev when @dev would be %NULL
* @cb: Control buffer. Free for use by every layer. Put private vars here
* @_skb_refdst: destination entry (with norefcount bit)
- * @sp: the security path, used for xfrm
* @len: Length of actual data
* @data_len: Data length
* @mac_len: Length of link layer header
@@ -651,6 +811,7 @@ typedef unsigned char *sk_buff_data_t;
* @tc_at_ingress: used within tc_classify to distinguish in/egress
* @redirected: packet was redirected by packet classifier
* @from_ingress: packet was redirected from the ingress path
+ * @nf_skip_egress: packet shall skip nf egress - see netfilter_netdev.h
* @peeked: this packet has been seen already, so stats have been
* done for it, don't do them again
* @nf_trace: netfilter packet trace flag
@@ -659,7 +820,6 @@ typedef unsigned char *sk_buff_data_t;
* @tcp_tsorted_anchor: list structure for TCP (tp->tsorted_sent_queue)
* @_sk_redir: socket redirection information for skmsg
* @_nfct: Associated connection, if any (with nfctinfo bits)
- * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
* @skb_iif: ifindex of device we arrived on
* @tc_index: Traffic control index
* @hash: the packet hash
@@ -667,6 +827,8 @@ typedef unsigned char *sk_buff_data_t;
* @head_frag: skb was allocated from page fragments,
* not allocated by kmalloc() or vmalloc().
* @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
+ * @pp_recycle: mark the packet for recycling instead of freeing (implies
+ * page_pool support on driver)
* @active_extensions: active extensions (skb_ext_id types)
* @ndisc_nodetype: router type (from link layer)
* @ooo_okay: allow the mapping of a socket to a queue to be changed
@@ -684,15 +846,21 @@ typedef unsigned char *sk_buff_data_t;
* @csum_level: indicates the number of consecutive checksums found in
* the packet minus one that have been verified as
* CHECKSUM_UNNECESSARY (max 3)
+ * @unreadable: indicates that at least 1 of the fragments in this skb is
+ * unreadable.
* @dst_pending_confirm: need to confirm neighbour
* @decrypted: Decrypted SKB
+ * @slow_gro: state present at GRO time, slower prepare step required
+ * @tstamp_type: When set, skb->tstamp has the
+ * delivery_time clock base of skb->tstamp.
* @napi_id: id of the NAPI struct this skb came from
* @sender_cpu: (aka @napi_id) source CPU in XPS
+ * @alloc_cpu: CPU which did the skb allocation.
* @secmark: security marking
* @mark: Generic packet mark
* @reserved_tailroom: (aka @mark) number of bytes of free space available
* at the tail of an sk_buff
- * @vlan_present: VLAN tag is present
+ * @vlan_all: vlan fields (proto & tci)
* @vlan_proto: vlan encapsulation protocol
* @vlan_tci: vlan tag control information
* @inner_protocol: Protocol (encapsulation)
@@ -717,7 +885,7 @@ typedef unsigned char *sk_buff_data_t;
struct sk_buff {
union {
struct {
- /* These two members must be first. */
+ /* These two members must be first to match sk_buff_head. */
struct sk_buff *next;
struct sk_buff *prev;
@@ -732,12 +900,10 @@ struct sk_buff {
};
struct rb_node rbnode; /* used in netem, ip4 defrag, and tcp stack */
struct list_head list;
+ struct llist_node ll_node;
};
- union {
- struct sock *sk;
- int ip_defrag_offset;
- };
+ struct sock *sk;
union {
ktime_t tstamp;
@@ -781,7 +947,7 @@ struct sk_buff {
#else
#define CLONED_MASK 1
#endif
-#define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
+#define CLONED_OFFSET offsetof(struct sk_buff, __cloned_offset)
/* private: */
__u8 __cloned_offset[0];
@@ -791,85 +957,85 @@ struct sk_buff {
fclone:2,
peeked:1,
head_frag:1,
- pfmemalloc:1;
+ pfmemalloc:1,
+ pp_recycle:1; /* page_pool recycle indicator */
#ifdef CONFIG_SKB_EXTENSIONS
__u8 active_extensions;
#endif
- /* fields enclosed in headers_start/headers_end are copied
+
+ /* Fields enclosed in headers group are copied
* using a single memcpy() in __copy_skb_header()
*/
- /* private: */
- __u32 headers_start[0];
- /* public: */
-
-/* if you move pkt_type around you also must adapt those constants */
-#ifdef __BIG_ENDIAN_BITFIELD
-#define PKT_TYPE_MAX (7 << 5)
-#else
-#define PKT_TYPE_MAX 7
-#endif
-#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
+ struct_group(headers,
/* private: */
__u8 __pkt_type_offset[0];
/* public: */
- __u8 pkt_type:3;
+ __u8 pkt_type:3; /* see PKT_TYPE_MAX */
__u8 ignore_df:1;
- __u8 nf_trace:1;
+ __u8 dst_pending_confirm:1;
__u8 ip_summed:2;
__u8 ooo_okay:1;
+ /* private: */
+ __u8 __mono_tc_offset[0];
+ /* public: */
+ __u8 tstamp_type:2; /* See skb_tstamp_type */
+#ifdef CONFIG_NET_XGRESS
+ __u8 tc_at_ingress:1; /* See TC_AT_INGRESS_MASK */
+ __u8 tc_skip_classify:1;
+#endif
+ __u8 remcsum_offload:1;
+ __u8 csum_complete_sw:1;
+ __u8 csum_level:2;
+ __u8 inner_protocol_type:1;
+
__u8 l4_hash:1;
__u8 sw_hash:1;
+#ifdef CONFIG_WIRELESS
__u8 wifi_acked_valid:1;
__u8 wifi_acked:1;
+#endif
__u8 no_fcs:1;
/* Indicates the inner headers are valid in the skbuff. */
__u8 encapsulation:1;
__u8 encap_hdr_csum:1;
__u8 csum_valid:1;
-
-#ifdef __BIG_ENDIAN_BITFIELD
-#define PKT_VLAN_PRESENT_BIT 7
-#else
-#define PKT_VLAN_PRESENT_BIT 0
-#endif
-#define PKT_VLAN_PRESENT_OFFSET() offsetof(struct sk_buff, __pkt_vlan_present_offset)
- /* private: */
- __u8 __pkt_vlan_present_offset[0];
- /* public: */
- __u8 vlan_present:1;
- __u8 csum_complete_sw:1;
- __u8 csum_level:2;
- __u8 csum_not_inet:1;
- __u8 dst_pending_confirm:1;
#ifdef CONFIG_IPV6_NDISC_NODETYPE
__u8 ndisc_nodetype:2;
#endif
+#if IS_ENABLED(CONFIG_IP_VS)
__u8 ipvs_property:1;
- __u8 inner_protocol_type:1;
- __u8 remcsum_offload:1;
+#endif
+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || IS_ENABLED(CONFIG_NF_TABLES)
+ __u8 nf_trace:1;
+#endif
#ifdef CONFIG_NET_SWITCHDEV
__u8 offload_fwd_mark:1;
__u8 offload_l3_fwd_mark:1;
#endif
-#ifdef CONFIG_NET_CLS_ACT
- __u8 tc_skip_classify:1;
- __u8 tc_at_ingress:1;
-#endif
-#ifdef CONFIG_NET_REDIRECT
__u8 redirected:1;
+#ifdef CONFIG_NET_REDIRECT
__u8 from_ingress:1;
#endif
-#ifdef CONFIG_TLS_DEVICE
+#ifdef CONFIG_NETFILTER_SKIP_EGRESS
+ __u8 nf_skip_egress:1;
+#endif
+#ifdef CONFIG_SKB_DECRYPTED
__u8 decrypted:1;
#endif
-
-#ifdef CONFIG_NET_SCHED
+ __u8 slow_gro:1;
+#if IS_ENABLED(CONFIG_IP_SCTP)
+ __u8 csum_not_inet:1;
+#endif
+ __u8 unreadable:1;
+#if defined(CONFIG_NET_SCHED) || defined(CONFIG_NET_XGRESS)
__u16 tc_index; /* traffic control index */
#endif
+ u16 alloc_cpu;
+
union {
__wsum csum;
struct {
@@ -880,8 +1046,13 @@ struct sk_buff {
__u32 priority;
int skb_iif;
__u32 hash;
- __be16 vlan_proto;
- __u16 vlan_tci;
+ union {
+ u32 vlan_all;
+ struct {
+ __be16 vlan_proto;
+ __u16 vlan_tci;
+ };
+ };
#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
union {
unsigned int napi_id;
@@ -915,9 +1086,7 @@ struct sk_buff {
u64 kcov_handle;
#endif
- /* private: */
- __u32 headers_end[0];
- /* public: */
+ ); /* end headers group */
/* These elements must be at the end, see alloc_skb() for details. */
sk_buff_data_t tail;
@@ -928,11 +1097,32 @@ struct sk_buff {
refcount_t users;
#ifdef CONFIG_SKB_EXTENSIONS
- /* only useable after checking ->active_extensions != 0 */
+ /* only usable after checking ->active_extensions != 0 */
struct skb_ext *extensions;
#endif
};
+/* if you move pkt_type around you also must adapt those constants */
+#ifdef __BIG_ENDIAN_BITFIELD
+#define PKT_TYPE_MAX (7 << 5)
+#else
+#define PKT_TYPE_MAX 7
+#endif
+#define PKT_TYPE_OFFSET offsetof(struct sk_buff, __pkt_type_offset)
+
+/* if you move tc_at_ingress or tstamp_type
+ * around, you also must adapt these constants.
+ */
+#ifdef __BIG_ENDIAN_BITFIELD
+#define SKB_TSTAMP_TYPE_MASK (3 << 6)
+#define SKB_TSTAMP_TYPE_RSHIFT (6)
+#define TC_AT_INGRESS_MASK (1 << 5)
+#else
+#define SKB_TSTAMP_TYPE_MASK (3)
+#define TC_AT_INGRESS_MASK (1 << 2)
+#endif
+#define SKB_BF_MONO_TC_OFFSET offsetof(struct sk_buff, __mono_tc_offset)
+
#ifdef __KERNEL__
/*
* Handling routines are only of interest to the kernel
@@ -962,7 +1152,7 @@ static inline bool skb_pfmemalloc(const struct sk_buff *skb)
* skb_dst - returns skb dst_entry
* @skb: buffer
*
- * Returns skb dst_entry, regardless of reference taken or not.
+ * Returns: skb dst_entry, regardless of reference taken or not.
*/
static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
{
@@ -975,6 +1165,45 @@ static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
}
+static inline void skb_dst_check_unset(struct sk_buff *skb)
+{
+ DEBUG_NET_WARN_ON_ONCE((skb->_skb_refdst & SKB_DST_PTRMASK) &&
+ !(skb->_skb_refdst & SKB_DST_NOREF));
+}
+
+/**
+ * skb_dstref_steal() - return current dst_entry value and clear it
+ * @skb: buffer
+ *
+ * Resets skb dst_entry without adjusting its reference count. Useful in
+ * cases where dst_entry needs to be temporarily reset and restored.
+ * Note that the returned value cannot be used directly because it
+ * might contain SKB_DST_NOREF bit.
+ *
+ * When in doubt, prefer skb_dst_drop() over skb_dstref_steal() to correctly
+ * handle dst_entry reference counting.
+ *
+ * Returns: original skb dst_entry.
+ */
+static inline unsigned long skb_dstref_steal(struct sk_buff *skb)
+{
+ unsigned long refdst = skb->_skb_refdst;
+
+ skb->_skb_refdst = 0;
+ return refdst;
+}
+
+/**
+ * skb_dstref_restore() - restore skb dst_entry removed via skb_dstref_steal()
+ * @skb: buffer
+ * @refdst: dst entry from a call to skb_dstref_steal()
+ */
+static inline void skb_dstref_restore(struct sk_buff *skb, unsigned long refdst)
+{
+ skb_dst_check_unset(skb);
+ skb->_skb_refdst = refdst;
+}
+
/**
* skb_dst_set - sets skb dst
* @skb: buffer
@@ -985,6 +1214,8 @@ static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
*/
static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
{
+ skb_dst_check_unset(skb);
+ skb->slow_gro |= !!dst;
skb->_skb_refdst = (unsigned long)dst;
}
@@ -1000,7 +1231,9 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
*/
static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
{
+ skb_dst_check_unset(skb);
WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
+ skb->slow_gro |= !!dst;
skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
}
@@ -1013,15 +1246,6 @@ static inline bool skb_dst_is_noref(const struct sk_buff *skb)
return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
}
-/**
- * skb_rtable - Returns the skb &rtable
- * @skb: buffer
- */
-static inline struct rtable *skb_rtable(const struct sk_buff *skb)
-{
- return (struct rtable *)skb_dst(skb);
-}
-
/* For mangling skb->pkt_type from user space side from applications
* such as nft, tc, etc, we only allow a conservative subset of
* possible pkt_types to be set.
@@ -1044,17 +1268,26 @@ static inline unsigned int skb_napi_id(const struct sk_buff *skb)
#endif
}
+static inline bool skb_wifi_acked_valid(const struct sk_buff *skb)
+{
+#ifdef CONFIG_WIRELESS
+ return skb->wifi_acked_valid;
+#else
+ return 0;
+#endif
+}
+
/**
* skb_unref - decrement the skb's reference count
* @skb: buffer
*
- * Returns true if we can free the skb.
+ * Returns: true if we can free the skb.
*/
static inline bool skb_unref(struct sk_buff *skb)
{
if (unlikely(!skb))
return false;
- if (likely(refcount_read(&skb->users) == 1))
+ if (!IS_ENABLED(CONFIG_DEBUG_NET) && likely(refcount_read(&skb->users) == 1))
smp_rmb();
else if (likely(!refcount_dec_and_test(&skb->users)))
return false;
@@ -1062,12 +1295,53 @@ static inline bool skb_unref(struct sk_buff *skb)
return true;
}
+static inline bool skb_data_unref(const struct sk_buff *skb,
+ struct skb_shared_info *shinfo)
+{
+ int bias;
+
+ if (!skb->cloned)
+ return true;
+
+ bias = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1;
+
+ if (atomic_read(&shinfo->dataref) == bias)
+ smp_rmb();
+ else if (atomic_sub_return(bias, &shinfo->dataref))
+ return false;
+
+ return true;
+}
+
+void __fix_address sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb,
+ enum skb_drop_reason reason);
+
+static inline void
+kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason)
+{
+ sk_skb_reason_drop(NULL, skb, reason);
+}
+
+/**
+ * kfree_skb - free an sk_buff with 'NOT_SPECIFIED' reason
+ * @skb: buffer to free
+ */
+static inline void kfree_skb(struct sk_buff *skb)
+{
+ kfree_skb_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED);
+}
+
void skb_release_head_state(struct sk_buff *skb);
-void kfree_skb(struct sk_buff *skb);
-void kfree_skb_list(struct sk_buff *segs);
+void kfree_skb_list_reason(struct sk_buff *segs,
+ enum skb_drop_reason reason);
void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt);
void skb_tx_error(struct sk_buff *skb);
+static inline void kfree_skb_list(struct sk_buff *segs)
+{
+ kfree_skb_list_reason(segs, SKB_DROP_REASON_NOT_SPECIFIED);
+}
+
#ifdef CONFIG_TRACEPOINTS
void consume_skb(struct sk_buff *skb);
#else
@@ -1079,7 +1353,6 @@ static inline void consume_skb(struct sk_buff *skb)
void __consume_stateless_skb(struct sk_buff *skb);
void __kfree_skb(struct sk_buff *skb);
-extern struct kmem_cache *skbuff_head_cache;
void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
@@ -1091,8 +1364,11 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size);
struct sk_buff *build_skb(void *data, unsigned int frag_size);
struct sk_buff *build_skb_around(struct sk_buff *skb,
void *data, unsigned int frag_size);
+void skb_attempt_defer_free(struct sk_buff *skb);
+u32 napi_skb_cache_get_bulk(void **skbs, u32 n);
struct sk_buff *napi_build_skb(void *data, unsigned int frag_size);
+struct sk_buff *slab_build_skb(void *data);
/**
* alloc_skb - allocate a network buffer
@@ -1128,9 +1404,9 @@ struct sk_buff_fclones {
* @sk: socket
* @skb: buffer
*
- * Returns true if skb is a fast clone, and its clone is not freed.
+ * Returns: true if skb is a fast clone, and its clone is not freed.
* Some drivers call skb_orphan() in their ndo_start_xmit(),
- * so we also check that this didnt happen.
+ * so we also check that didn't happen.
*/
static inline bool skb_fclone_busy(const struct sock *sk,
const struct sk_buff *skb)
@@ -1174,6 +1450,7 @@ static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
unsigned int headroom);
+struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom);
struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
int newtailroom, gfp_t priority);
int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
@@ -1201,7 +1478,7 @@ static inline int skb_pad(struct sk_buff *skb, int pad)
#define dev_kfree_skb(a) consume_skb(a)
int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
- int offset, size_t size);
+ int offset, size_t size, size_t max_frags);
struct skb_seq_state {
__u32 lower_offset;
@@ -1219,6 +1496,7 @@ void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
struct skb_seq_state *st);
void skb_abort_seq_read(struct skb_seq_state *st);
+int skb_copy_seq_read(struct skb_seq_state *st, int offset, void *to, int len);
unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
unsigned int to, struct ts_config *config);
@@ -1290,27 +1568,27 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
__skb_set_hash(skb, hash, true, is_l4);
}
-void __skb_get_hash(struct sk_buff *skb);
-u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
-u32 skb_get_poff(const struct sk_buff *skb);
-u32 __skb_get_poff(const struct sk_buff *skb, const void *data,
- const struct flow_keys_basic *keys, int hlen);
-__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
- const void *data, int hlen_proto);
+u32 __skb_get_hash_symmetric_net(const struct net *net, const struct sk_buff *skb);
-static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
- int thoff, u8 ip_proto)
+static inline u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
{
- return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
+ return __skb_get_hash_symmetric_net(NULL, skb);
}
+void __skb_get_hash_net(const struct net *net, struct sk_buff *skb);
+u32 skb_get_poff(const struct sk_buff *skb);
+u32 __skb_get_poff(const struct sk_buff *skb, const void *data,
+ const struct flow_keys_basic *keys, int hlen);
+__be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
+ const void *data, int hlen_proto);
+
void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
const struct flow_dissector_key *key,
unsigned int key_count);
struct bpf_flow_dissector;
-bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
- __be16 proto, int nhoff, int hlen, unsigned int flags);
+u32 bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
+ __be16 proto, int nhoff, int hlen, unsigned int flags);
bool __skb_flow_dissect(const struct net *net,
const struct sk_buff *skb,
@@ -1360,7 +1638,7 @@ skb_flow_dissect_ct(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container,
u16 *ctinfo_map, size_t mapsize,
- bool post_ct);
+ bool post_ct, u16 zone);
void
skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
@@ -1370,10 +1648,18 @@ void skb_flow_dissect_hash(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container);
+static inline __u32 skb_get_hash_net(const struct net *net, struct sk_buff *skb)
+{
+ if (!skb->l4_hash && !skb->sw_hash)
+ __skb_get_hash_net(net, skb);
+
+ return skb->hash;
+}
+
static inline __u32 skb_get_hash(struct sk_buff *skb)
{
if (!skb->l4_hash && !skb->sw_hash)
- __skb_get_hash(skb);
+ __skb_get_hash_net(NULL, skb);
return skb->hash;
}
@@ -1405,10 +1691,29 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
to->l4_hash = from->l4_hash;
};
+static inline int skb_cmp_decrypted(const struct sk_buff *skb1,
+ const struct sk_buff *skb2)
+{
+#ifdef CONFIG_SKB_DECRYPTED
+ return skb2->decrypted - skb1->decrypted;
+#else
+ return 0;
+#endif
+}
+
+static inline bool skb_is_decrypted(const struct sk_buff *skb)
+{
+#ifdef CONFIG_SKB_DECRYPTED
+ return skb->decrypted;
+#else
+ return false;
+#endif
+}
+
static inline void skb_copy_decrypted(struct sk_buff *to,
const struct sk_buff *from)
{
-#ifdef CONFIG_TLS_DEVICE
+#ifdef CONFIG_SKB_DECRYPTED
to->decrypted = from->decrypted;
#endif
}
@@ -1423,6 +1728,11 @@ static inline unsigned int skb_end_offset(const struct sk_buff *skb)
{
return skb->end;
}
+
+static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset)
+{
+ skb->end = offset;
+}
#else
static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
{
@@ -1433,8 +1743,42 @@ static inline unsigned int skb_end_offset(const struct sk_buff *skb)
{
return skb->end - skb->head;
}
+
+static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset)
+{
+ skb->end = skb->head + offset;
+}
#endif
+extern const struct ubuf_info_ops msg_zerocopy_ubuf_ops;
+
+struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size,
+ struct ubuf_info *uarg, bool devmem);
+
+void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
+
+struct net_devmem_dmabuf_binding;
+
+int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb, struct iov_iter *from,
+ size_t length,
+ struct net_devmem_dmabuf_binding *binding);
+
+int zerocopy_fill_skb_from_iter(struct sk_buff *skb,
+ struct iov_iter *from, size_t length);
+
+static inline int skb_zerocopy_iter_dgram(struct sk_buff *skb,
+ struct msghdr *msg, int len)
+{
+ return __zerocopy_sg_from_iter(msg, skb->sk, skb, &msg->msg_iter, len,
+ NULL);
+}
+
+int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
+ struct msghdr *msg, int len,
+ struct ubuf_info *uarg,
+ struct net_devmem_dmabuf_binding *binding);
+
/* Internal */
#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
@@ -1450,6 +1794,22 @@ static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
return is_zcopy ? skb_uarg(skb) : NULL;
}
+static inline bool skb_zcopy_pure(const struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->flags & SKBFL_PURE_ZEROCOPY;
+}
+
+static inline bool skb_zcopy_managed(const struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->flags & SKBFL_MANAGED_FRAG_REFS;
+}
+
+static inline bool skb_pure_zcopy_same(const struct sk_buff *skb1,
+ const struct sk_buff *skb2)
+{
+ return skb_zcopy_pure(skb1) == skb_zcopy_pure(skb2);
+}
+
static inline void net_zcopy_get(struct ubuf_info *uarg)
{
refcount_inc(&uarg->refcnt);
@@ -1492,13 +1852,13 @@ static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
static inline void net_zcopy_put(struct ubuf_info *uarg)
{
if (uarg)
- uarg->callback(NULL, uarg, true);
+ uarg->ops->complete(NULL, uarg, true);
}
static inline void net_zcopy_put_abort(struct ubuf_info *uarg, bool have_uref)
{
if (uarg) {
- if (uarg->callback == msg_zerocopy_callback)
+ if (uarg->ops == &msg_zerocopy_ubuf_ops)
msg_zerocopy_put_abort(uarg, have_uref);
else if (have_uref)
net_zcopy_put(uarg);
@@ -1512,17 +1872,38 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success)
if (uarg) {
if (!skb_zcopy_is_nouarg(skb))
- uarg->callback(skb, uarg, zerocopy_success);
+ uarg->ops->complete(skb, uarg, zerocopy_success);
- skb_shinfo(skb)->flags &= ~SKBFL_ZEROCOPY_FRAG;
+ skb_shinfo(skb)->flags &= ~SKBFL_ALL_ZEROCOPY;
}
}
+void __skb_zcopy_downgrade_managed(struct sk_buff *skb);
+
+static inline void skb_zcopy_downgrade_managed(struct sk_buff *skb)
+{
+ if (unlikely(skb_zcopy_managed(skb)))
+ __skb_zcopy_downgrade_managed(skb);
+}
+
+/* Return true if frags in this skb are readable by the host. */
+static inline bool skb_frags_readable(const struct sk_buff *skb)
+{
+ return !skb->unreadable;
+}
+
static inline void skb_mark_not_on_list(struct sk_buff *skb)
{
skb->next = NULL;
}
+static inline void skb_poison_list(struct sk_buff *skb)
+{
+#ifdef CONFIG_DEBUG_NET
+ skb->next = SKB_LIST_POISON_NEXT;
+#endif
+}
+
/* Iterate through singly-linked GSO fragments of an skb. */
#define skb_list_walk_safe(first, skb, next_skb) \
for ((skb) = (first), (next_skb) = (skb) ? (skb)->next : NULL; (skb); \
@@ -1661,6 +2042,22 @@ static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
return 0;
}
+/* This variant of skb_unclone() makes sure skb->truesize
+ * and skb_end_offset() are not changed, whenever a new skb->head is needed.
+ *
+ * Indeed there is no guarantee that ksize(kmalloc(X)) == ksize(kmalloc(X))
+ * when various debugging features are in place.
+ */
+int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri);
+static inline int skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri)
+{
+ might_sleep_if(gfpflags_allow_blocking(pri));
+
+ if (skb_cloned(skb))
+ return __skb_unclone_keeptruesize(skb, pri);
+ return 0;
+}
+
/**
* skb_header_cloned - is the header a clone
* @skb: buffer to check
@@ -1691,8 +2088,10 @@ static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
}
/**
- * __skb_header_release - release reference to header
- * @skb: buffer to operate on
+ * __skb_header_release() - allow clones to use the headroom
+ * @skb: buffer to operate on
+ *
+ * See "DOC: dataref and headerless skbs".
*/
static inline void __skb_header_release(struct sk_buff *skb)
{
@@ -1745,7 +2144,7 @@ static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
* Copy shared buffers into a new sk_buff. We effectively do COW on
* packets to handle cases where we have a local reader and forward
* and a couple of other messy ones. The normal one is tcpdumping
- * a packet thats being forwarded.
+ * a packet that's being forwarded.
*/
/**
@@ -1928,9 +2327,9 @@ static inline void __skb_insert(struct sk_buff *newsk,
*/
WRITE_ONCE(newsk->next, next);
WRITE_ONCE(newsk->prev, prev);
- WRITE_ONCE(next->prev, newsk);
- WRITE_ONCE(prev->next, newsk);
- list->qlen++;
+ WRITE_ONCE(((struct sk_buff_list *)next)->prev, newsk);
+ WRITE_ONCE(((struct sk_buff_list *)prev)->next, newsk);
+ WRITE_ONCE(list->qlen, list->qlen + 1);
}
static inline void __skb_queue_splice(const struct sk_buff_head *list,
@@ -2025,7 +2424,7 @@ static inline void __skb_queue_after(struct sk_buff_head *list,
struct sk_buff *prev,
struct sk_buff *newsk)
{
- __skb_insert(newsk, prev, prev->next, list);
+ __skb_insert(newsk, prev, ((struct sk_buff_list *)prev)->next, list);
}
void skb_append(struct sk_buff *old, struct sk_buff *newsk,
@@ -2035,7 +2434,7 @@ static inline void __skb_queue_before(struct sk_buff_head *list,
struct sk_buff *next,
struct sk_buff *newsk)
{
- __skb_insert(newsk, next->prev, next, list);
+ __skb_insert(newsk, ((struct sk_buff_list *)next)->prev, next, list);
}
/**
@@ -2148,11 +2547,56 @@ static inline unsigned int skb_pagelen(const struct sk_buff *skb)
return skb_headlen(skb) + __skb_pagelen(skb);
}
+static inline void skb_frag_fill_netmem_desc(skb_frag_t *frag,
+ netmem_ref netmem, int off,
+ int size)
+{
+ frag->netmem = netmem;
+ frag->offset = off;
+ skb_frag_size_set(frag, size);
+}
+
+static inline void skb_frag_fill_page_desc(skb_frag_t *frag,
+ struct page *page,
+ int off, int size)
+{
+ skb_frag_fill_netmem_desc(frag, page_to_netmem(page), off, size);
+}
+
+static inline void __skb_fill_netmem_desc_noacc(struct skb_shared_info *shinfo,
+ int i, netmem_ref netmem,
+ int off, int size)
+{
+ skb_frag_t *frag = &shinfo->frags[i];
+
+ skb_frag_fill_netmem_desc(frag, netmem, off, size);
+}
+
+static inline void __skb_fill_page_desc_noacc(struct skb_shared_info *shinfo,
+ int i, struct page *page,
+ int off, int size)
+{
+ __skb_fill_netmem_desc_noacc(shinfo, i, page_to_netmem(page), off,
+ size);
+}
+
+/**
+ * skb_len_add - adds a number to len fields of skb
+ * @skb: buffer to add len to
+ * @delta: number of bytes to add
+ */
+static inline void skb_len_add(struct sk_buff *skb, int delta)
+{
+ skb->len += delta;
+ skb->data_len += delta;
+ skb->truesize += delta;
+}
+
/**
- * __skb_fill_page_desc - initialise a paged fragment in an skb
+ * __skb_fill_netmem_desc - initialise a fragment in an skb
* @skb: buffer containing fragment to be initialised
- * @i: paged fragment index to initialise
- * @page: the page to use for this fragment
+ * @i: fragment index to initialise
+ * @netmem: the netmem to use for this fragment
* @off: the offset to the data with @page
* @size: the length of the data
*
@@ -2161,23 +2605,40 @@ static inline unsigned int skb_pagelen(const struct sk_buff *skb)
*
* Does not take any additional reference on the fragment.
*/
-static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
- struct page *page, int off, int size)
+static inline void __skb_fill_netmem_desc(struct sk_buff *skb, int i,
+ netmem_ref netmem, int off, int size)
{
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ struct page *page;
- /*
- * Propagate page pfmemalloc to the skb if we can. The problem is
+ __skb_fill_netmem_desc_noacc(skb_shinfo(skb), i, netmem, off, size);
+
+ if (netmem_is_net_iov(netmem)) {
+ skb->unreadable = true;
+ return;
+ }
+
+ page = netmem_to_page(netmem);
+
+ /* Propagate page pfmemalloc to the skb if we can. The problem is
* that not all callers have unique ownership of the page but rely
* on page_is_pfmemalloc doing the right thing(tm).
*/
- frag->bv_page = page;
- frag->bv_offset = off;
- skb_frag_size_set(frag, size);
-
page = compound_head(page);
if (page_is_pfmemalloc(page))
- skb->pfmemalloc = true;
+ skb->pfmemalloc = true;
+}
+
+static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
+ struct page *page, int off, int size)
+{
+ __skb_fill_netmem_desc(skb, i, page_to_netmem(page), off, size);
+}
+
+static inline void skb_fill_netmem_desc(struct sk_buff *skb, int i,
+ netmem_ref netmem, int off, int size)
+{
+ __skb_fill_netmem_desc(skb, i, netmem, off, size);
+ skb_shinfo(skb)->nr_frags = i + 1;
}
/**
@@ -2197,12 +2658,40 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
struct page *page, int off, int size)
{
- __skb_fill_page_desc(skb, i, page, off, size);
- skb_shinfo(skb)->nr_frags = i + 1;
+ skb_fill_netmem_desc(skb, i, page_to_netmem(page), off, size);
+}
+
+/**
+ * skb_fill_page_desc_noacc - initialise a paged fragment in an skb
+ * @skb: buffer containing fragment to be initialised
+ * @i: paged fragment index to initialise
+ * @page: the page to use for this fragment
+ * @off: the offset to the data with @page
+ * @size: the length of the data
+ *
+ * Variant of skb_fill_page_desc() which does not deal with
+ * pfmemalloc, if page is not owned by us.
+ */
+static inline void skb_fill_page_desc_noacc(struct sk_buff *skb, int i,
+ struct page *page, int off,
+ int size)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+
+ __skb_fill_page_desc_noacc(shinfo, i, page, off, size);
+ shinfo->nr_frags = i + 1;
}
-void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
- int size, unsigned int truesize);
+void skb_add_rx_frag_netmem(struct sk_buff *skb, int i, netmem_ref netmem,
+ int off, int size, unsigned int truesize);
+
+static inline void skb_add_rx_frag(struct sk_buff *skb, int i,
+ struct page *page, int off, int size,
+ unsigned int truesize)
+{
+ skb_add_rx_frag_netmem(skb, i, page_to_netmem(page), off, size,
+ truesize);
+}
void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
unsigned int truesize);
@@ -2244,6 +2733,20 @@ static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
#endif /* NET_SKBUFF_DATA_USES_OFFSET */
+static inline void skb_assert_len(struct sk_buff *skb)
+{
+#ifdef CONFIG_DEBUG_NET
+ if (WARN_ONCE(!skb->len, "%s\n", __func__))
+ DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
+#endif /* CONFIG_DEBUG_NET */
+}
+
+#if defined(CONFIG_FAIL_SKB_REALLOC)
+void skb_might_realloc(struct sk_buff *skb);
+#else
+static inline void skb_might_realloc(struct sk_buff *skb) {}
+#endif
+
/*
* Add data to an sk_buff
*/
@@ -2307,6 +2810,8 @@ static inline void skb_put_u8(struct sk_buff *skb, u8 val)
void *skb_push(struct sk_buff *skb, unsigned int len);
static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
{
+ DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
+
skb->data -= len;
skb->len += len;
return skb->data;
@@ -2315,8 +2820,17 @@ static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
void *skb_pull(struct sk_buff *skb, unsigned int len);
static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
{
+ DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
+
skb->len -= len;
- BUG_ON(skb->len < skb->data_len);
+ if (unlikely(skb->len < skb->data_len)) {
+#if defined(CONFIG_DEBUG_NET)
+ skb->len += len;
+ pr_err("__skb_pull(len=%u)\n", len);
+ skb_dump(KERN_ERR, skb, false);
+#endif
+ BUG();
+ }
return skb->data += len;
}
@@ -2325,29 +2839,40 @@ static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len)
return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
}
+void *skb_pull_data(struct sk_buff *skb, size_t len);
+
void *__pskb_pull_tail(struct sk_buff *skb, int delta);
-static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len)
+static inline enum skb_drop_reason
+pskb_may_pull_reason(struct sk_buff *skb, unsigned int len)
{
- if (len > skb_headlen(skb) &&
- !__pskb_pull_tail(skb, len - skb_headlen(skb)))
- return NULL;
- skb->len -= len;
- return skb->data += len;
+ DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
+ skb_might_realloc(skb);
+
+ if (likely(len <= skb_headlen(skb)))
+ return SKB_NOT_DROPPED_YET;
+
+ if (unlikely(len > skb->len))
+ return SKB_DROP_REASON_PKT_TOO_SMALL;
+
+ if (unlikely(!__pskb_pull_tail(skb, len - skb_headlen(skb))))
+ return SKB_DROP_REASON_NOMEM;
+
+ return SKB_NOT_DROPPED_YET;
}
-static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
+static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len)
{
- return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
+ return pskb_may_pull_reason(skb, len) == SKB_NOT_DROPPED_YET;
}
-static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len)
+static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
{
- if (likely(len <= skb_headlen(skb)))
- return true;
- if (unlikely(len > skb->len))
- return false;
- return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
+ if (!pskb_may_pull(skb, len))
+ return NULL;
+
+ skb->len -= len;
+ return skb->data += len;
}
void skb_condense(struct sk_buff *skb);
@@ -2451,9 +2976,19 @@ static inline void skb_reset_inner_headers(struct sk_buff *skb)
skb->inner_transport_header = skb->transport_header;
}
+static inline int skb_mac_header_was_set(const struct sk_buff *skb)
+{
+ return skb->mac_header != (typeof(skb->mac_header))~0U;
+}
+
static inline void skb_reset_mac_len(struct sk_buff *skb)
{
- skb->mac_len = skb->network_header - skb->mac_header;
+ if (!skb_mac_header_was_set(skb)) {
+ DEBUG_NET_WARN_ON_ONCE(1);
+ skb->mac_len = 0;
+ } else {
+ skb->mac_len = skb->network_header - skb->mac_header;
+ }
}
static inline unsigned char *skb_inner_transport_header(const struct sk_buff
@@ -2469,7 +3004,10 @@ static inline int skb_inner_transport_offset(const struct sk_buff *skb)
static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
{
- skb->inner_transport_header = skb->data - skb->head;
+ long offset = skb->data - skb->head;
+
+ DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->inner_transport_header))offset);
+ skb->inner_transport_header = offset;
}
static inline void skb_set_inner_transport_header(struct sk_buff *skb,
@@ -2486,7 +3024,10 @@ static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
static inline void skb_reset_inner_network_header(struct sk_buff *skb)
{
- skb->inner_network_header = skb->data - skb->head;
+ long offset = skb->data - skb->head;
+
+ DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->inner_network_header))offset);
+ skb->inner_network_header = offset;
}
static inline void skb_set_inner_network_header(struct sk_buff *skb,
@@ -2496,6 +3037,11 @@ static inline void skb_set_inner_network_header(struct sk_buff *skb,
skb->inner_network_header += offset;
}
+static inline bool skb_inner_network_header_was_set(const struct sk_buff *skb)
+{
+ return skb->inner_network_header > 0;
+}
+
static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
{
return skb->head + skb->inner_mac_header;
@@ -2503,7 +3049,10 @@ static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
{
- skb->inner_mac_header = skb->data - skb->head;
+ long offset = skb->data - skb->head;
+
+ DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->inner_mac_header))offset);
+ skb->inner_mac_header = offset;
}
static inline void skb_set_inner_mac_header(struct sk_buff *skb,
@@ -2519,12 +3068,39 @@ static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
{
+ DEBUG_NET_WARN_ON_ONCE(!skb_transport_header_was_set(skb));
return skb->head + skb->transport_header;
}
static inline void skb_reset_transport_header(struct sk_buff *skb)
{
- skb->transport_header = skb->data - skb->head;
+ long offset = skb->data - skb->head;
+
+ DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->transport_header))offset);
+ skb->transport_header = offset;
+}
+
+/**
+ * skb_reset_transport_header_careful - conditionally reset transport header
+ * @skb: buffer to alter
+ *
+ * Hardened version of skb_reset_transport_header().
+ *
+ * Returns: true if the operation was a success.
+ */
+static inline bool __must_check
+skb_reset_transport_header_careful(struct sk_buff *skb)
+{
+ long offset = skb->data - skb->head;
+
+ if (unlikely(offset != (typeof(skb->transport_header))offset))
+ return false;
+
+ if (unlikely(offset == (typeof(skb->transport_header))~0U))
+ return false;
+
+ skb->transport_header = offset;
+ return true;
}
static inline void skb_set_transport_header(struct sk_buff *skb,
@@ -2541,7 +3117,10 @@ static inline unsigned char *skb_network_header(const struct sk_buff *skb)
static inline void skb_reset_network_header(struct sk_buff *skb)
{
- skb->network_header = skb->data - skb->head;
+ long offset = skb->data - skb->head;
+
+ DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->network_header))offset);
+ skb->network_header = offset;
}
static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
@@ -2552,6 +3131,7 @@ static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
{
+ DEBUG_NET_WARN_ON_ONCE(!skb_mac_header_was_set(skb));
return skb->head + skb->mac_header;
}
@@ -2562,14 +3142,10 @@ static inline int skb_mac_offset(const struct sk_buff *skb)
static inline u32 skb_mac_header_len(const struct sk_buff *skb)
{
+ DEBUG_NET_WARN_ON_ONCE(!skb_mac_header_was_set(skb));
return skb->network_header - skb->mac_header;
}
-static inline int skb_mac_header_was_set(const struct sk_buff *skb)
-{
- return skb->mac_header != (typeof(skb->mac_header))~0U;
-}
-
static inline void skb_unset_mac_header(struct sk_buff *skb)
{
skb->mac_header = (typeof(skb->mac_header))~0U;
@@ -2577,7 +3153,10 @@ static inline void skb_unset_mac_header(struct sk_buff *skb)
static inline void skb_reset_mac_header(struct sk_buff *skb)
{
- skb->mac_header = skb->data - skb->head;
+ long offset = skb->data - skb->head;
+
+ DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->mac_header))offset);
+ skb->mac_header = offset;
}
static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
@@ -2613,6 +3192,21 @@ static inline void skb_mac_header_rebuild(struct sk_buff *skb)
}
}
+/* Move the full mac header up to current network_header.
+ * Leaves skb->data pointing at offset skb->mac_len into the mac_header.
+ * Must be provided the complete mac header length.
+ */
+static inline void skb_mac_header_rebuild_full(struct sk_buff *skb, u32 full_mac_len)
+{
+ if (skb_mac_header_was_set(skb)) {
+ const unsigned char *old_mac = skb_mac_header(skb);
+
+ skb_set_mac_header(skb, -full_mac_len);
+ memmove(skb_mac_header(skb), old_mac, full_mac_len);
+ __skb_push(skb, full_mac_len - skb->mac_len);
+ }
+}
+
static inline int skb_checksum_start_offset(const struct sk_buff *skb)
{
return skb->csum_start - skb_headroom(skb);
@@ -2630,6 +3224,7 @@ static inline int skb_transport_offset(const struct sk_buff *skb)
static inline u32 skb_network_header_len(const struct sk_buff *skb)
{
+ DEBUG_NET_WARN_ON_ONCE(!skb_transport_header_was_set(skb));
return skb->transport_header - skb->network_header;
}
@@ -2648,9 +3243,15 @@ static inline int skb_inner_network_offset(const struct sk_buff *skb)
return skb_inner_network_header(skb) - skb->data;
}
+static inline enum skb_drop_reason
+pskb_network_may_pull_reason(struct sk_buff *skb, unsigned int len)
+{
+ return pskb_may_pull_reason(skb, skb_network_offset(skb) + len);
+}
+
static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
{
- return pskb_may_pull(skb, skb_network_offset(skb) + len);
+ return pskb_network_may_pull_reason(skb, len) == SKB_NOT_DROPPED_YET;
}
/*
@@ -2728,6 +3329,7 @@ static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
{
+ skb_might_realloc(skb);
return (len < skb->len) ? __pskb_trim(skb, len) : 0;
}
@@ -2792,8 +3394,7 @@ static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
{
if (likely(!skb_zcopy(skb)))
return 0;
- if (!skb_zcopy_is_nouarg(skb) &&
- skb_uarg(skb)->callback == msg_zerocopy_callback)
+ if (skb_shinfo(skb)->flags & SKBFL_DONT_ORPHAN)
return 0;
return skb_copy_ubufs(skb, gfp_mask);
}
@@ -2807,22 +3408,38 @@ static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask)
}
/**
- * __skb_queue_purge - empty a list
+ * __skb_queue_purge_reason - empty a list
* @list: list to empty
+ * @reason: drop reason
*
* Delete all buffers on an &sk_buff list. Each buffer is removed from
* the list and one reference dropped. This function does not take the
* list lock and the caller must hold the relevant locks to use it.
*/
-static inline void __skb_queue_purge(struct sk_buff_head *list)
+static inline void __skb_queue_purge_reason(struct sk_buff_head *list,
+ enum skb_drop_reason reason)
{
struct sk_buff *skb;
+
while ((skb = __skb_dequeue(list)) != NULL)
- kfree_skb(skb);
+ kfree_skb_reason(skb, reason);
+}
+
+static inline void __skb_queue_purge(struct sk_buff_head *list)
+{
+ __skb_queue_purge_reason(list, SKB_DROP_REASON_QUEUE_PURGE);
+}
+
+void skb_queue_purge_reason(struct sk_buff_head *list,
+ enum skb_drop_reason reason);
+
+static inline void skb_queue_purge(struct sk_buff_head *list)
+{
+ skb_queue_purge_reason(list, SKB_DROP_REASON_QUEUE_PURGE);
}
-void skb_queue_purge(struct sk_buff_head *list);
unsigned int skb_rbtree_purge(struct rb_root *root);
+void skb_errqueue_purge(struct sk_buff_head *list);
void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
@@ -2916,17 +3533,11 @@ static inline void *napi_alloc_frag_align(unsigned int fragsz,
return __napi_alloc_frag_align(fragsz, -align);
}
-struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
- unsigned int length, gfp_t gfp_mask);
-static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
- unsigned int length)
-{
- return __napi_alloc_skb(napi, length, GFP_ATOMIC);
-}
+struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int length);
void napi_consume_skb(struct sk_buff *skb, int budget);
void napi_skb_free_stolen_head(struct sk_buff *skb);
-void __kfree_skb_defer(struct sk_buff *skb);
+void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason);
/**
* __dev_alloc_pages - allocate page for network Rx
@@ -2937,11 +3548,11 @@ void __kfree_skb_defer(struct sk_buff *skb);
*
* %NULL is returned if there is no free memory.
*/
-static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
+static inline struct page *__dev_alloc_pages_noprof(gfp_t gfp_mask,
unsigned int order)
{
/* This piece of code contains several assumptions.
- * 1. This is for device Rx, therefor a cold page is preferred.
+ * 1. This is for device Rx, therefore a cold page is preferred.
* 2. The expectation is the user wants a compound page.
* 3. If requesting a order 0 page it will not be compound
* due to the check to see if order has a value in prep_new_page
@@ -2950,13 +3561,15 @@ static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
*/
gfp_mask |= __GFP_COMP | __GFP_MEMALLOC;
- return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
+ return alloc_pages_node_noprof(NUMA_NO_NODE, gfp_mask, order);
}
+#define __dev_alloc_pages(...) alloc_hooks(__dev_alloc_pages_noprof(__VA_ARGS__))
-static inline struct page *dev_alloc_pages(unsigned int order)
-{
- return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order);
-}
+/*
+ * This specialized allocator has to be a macro for its allocations to be
+ * accounted separately (to have a separate alloc_tag).
+ */
+#define dev_alloc_pages(_order) __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, _order)
/**
* __dev_alloc_page - allocate a page for network Rx
@@ -2966,15 +3579,17 @@ static inline struct page *dev_alloc_pages(unsigned int order)
*
* %NULL is returned if there is no free memory.
*/
-static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
+static inline struct page *__dev_alloc_page_noprof(gfp_t gfp_mask)
{
- return __dev_alloc_pages(gfp_mask, 0);
+ return __dev_alloc_pages_noprof(gfp_mask, 0);
}
+#define __dev_alloc_page(...) alloc_hooks(__dev_alloc_page_noprof(__VA_ARGS__))
-static inline struct page *dev_alloc_page(void)
-{
- return dev_alloc_pages(0);
-}
+/*
+ * This specialized allocator has to be a macro for its allocations to be
+ * accounted separately (to have a separate alloc_tag).
+ */
+#define dev_alloc_page() dev_alloc_pages(0)
/**
* dev_page_is_reusable - check whether a page can be reused for network Rx
@@ -2983,7 +3598,7 @@ static inline struct page *dev_alloc_page(void)
* A page shouldn't be considered for reusing/recycling if it was allocated
* under memory pressure or at a distant memory node.
*
- * Returns false if this page should be returned to page allocator, true
+ * Returns: false if this page should be returned to page allocator, true
* otherwise.
*/
static inline bool dev_page_is_reusable(const struct page *page)
@@ -3010,7 +3625,7 @@ static inline void skb_propagate_pfmemalloc(const struct page *page,
*/
static inline unsigned int skb_frag_off(const skb_frag_t *frag)
{
- return frag->bv_offset;
+ return frag->offset;
}
/**
@@ -3020,7 +3635,7 @@ static inline unsigned int skb_frag_off(const skb_frag_t *frag)
*/
static inline void skb_frag_off_add(skb_frag_t *frag, int delta)
{
- frag->bv_offset += delta;
+ frag->offset += delta;
}
/**
@@ -3030,7 +3645,7 @@ static inline void skb_frag_off_add(skb_frag_t *frag, int delta)
*/
static inline void skb_frag_off_set(skb_frag_t *frag, unsigned int offset)
{
- frag->bv_offset = offset;
+ frag->offset = offset;
}
/**
@@ -3041,75 +3656,73 @@ static inline void skb_frag_off_set(skb_frag_t *frag, unsigned int offset)
static inline void skb_frag_off_copy(skb_frag_t *fragto,
const skb_frag_t *fragfrom)
{
- fragto->bv_offset = fragfrom->bv_offset;
+ fragto->offset = fragfrom->offset;
}
-/**
- * skb_frag_page - retrieve the page referred to by a paged fragment
- * @frag: the paged fragment
- *
- * Returns the &struct page associated with @frag.
- */
-static inline struct page *skb_frag_page(const skb_frag_t *frag)
+/* Return: true if the skb_frag contains a net_iov. */
+static inline bool skb_frag_is_net_iov(const skb_frag_t *frag)
{
- return frag->bv_page;
+ return netmem_is_net_iov(frag->netmem);
}
/**
- * __skb_frag_ref - take an addition reference on a paged fragment.
- * @frag: the paged fragment
+ * skb_frag_net_iov - retrieve the net_iov referred to by fragment
+ * @frag: the fragment
*
- * Takes an additional reference on the paged fragment @frag.
+ * Return: the &struct net_iov associated with @frag. Returns NULL if this
+ * frag has no associated net_iov.
*/
-static inline void __skb_frag_ref(skb_frag_t *frag)
+static inline struct net_iov *skb_frag_net_iov(const skb_frag_t *frag)
{
- get_page(skb_frag_page(frag));
-}
+ if (!skb_frag_is_net_iov(frag))
+ return NULL;
-/**
- * skb_frag_ref - take an addition reference on a paged fragment of an skb.
- * @skb: the buffer
- * @f: the fragment offset.
- *
- * Takes an additional reference on the @f'th paged fragment of @skb.
- */
-static inline void skb_frag_ref(struct sk_buff *skb, int f)
-{
- __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
+ return netmem_to_net_iov(frag->netmem);
}
/**
- * __skb_frag_unref - release a reference on a paged fragment.
+ * skb_frag_page - retrieve the page referred to by a paged fragment
* @frag: the paged fragment
*
- * Releases a reference on the paged fragment @frag.
+ * Return: the &struct page associated with @frag. Returns NULL if this frag
+ * has no associated page.
*/
-static inline void __skb_frag_unref(skb_frag_t *frag)
+static inline struct page *skb_frag_page(const skb_frag_t *frag)
{
- put_page(skb_frag_page(frag));
+ if (skb_frag_is_net_iov(frag))
+ return NULL;
+
+ return netmem_to_page(frag->netmem);
}
/**
- * skb_frag_unref - release a reference on a paged fragment of an skb.
- * @skb: the buffer
- * @f: the fragment offset
+ * skb_frag_netmem - retrieve the netmem referred to by a fragment
+ * @frag: the fragment
*
- * Releases a reference on the @f'th paged fragment of @skb.
+ * Return: the &netmem_ref associated with @frag.
*/
-static inline void skb_frag_unref(struct sk_buff *skb, int f)
+static inline netmem_ref skb_frag_netmem(const skb_frag_t *frag)
{
- __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
+ return frag->netmem;
}
+int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb,
+ unsigned int headroom);
+int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb,
+ const struct bpf_prog *prog);
+
/**
* skb_frag_address - gets the address of the data contained in a paged fragment
* @frag: the paged fragment buffer
*
- * Returns the address of the data within @frag. The page must already
+ * Returns: the address of the data within @frag. The page must already
* be mapped.
*/
static inline void *skb_frag_address(const skb_frag_t *frag)
{
+ if (!skb_frag_page(frag))
+ return NULL;
+
return page_address(skb_frag_page(frag)) + skb_frag_off(frag);
}
@@ -3117,12 +3730,18 @@ static inline void *skb_frag_address(const skb_frag_t *frag)
* skb_frag_address_safe - gets the address of the data contained in a paged fragment
* @frag: the paged fragment buffer
*
- * Returns the address of the data within @frag. Checks that the page
+ * Returns: the address of the data within @frag. Checks that the page
* is mapped and returns %NULL otherwise.
*/
static inline void *skb_frag_address_safe(const skb_frag_t *frag)
{
- void *ptr = page_address(skb_frag_page(frag));
+ struct page *page = skb_frag_page(frag);
+ void *ptr;
+
+ if (!page)
+ return NULL;
+
+ ptr = page_address(page);
if (unlikely(!ptr))
return NULL;
@@ -3137,39 +3756,13 @@ static inline void *skb_frag_address_safe(const skb_frag_t *frag)
static inline void skb_frag_page_copy(skb_frag_t *fragto,
const skb_frag_t *fragfrom)
{
- fragto->bv_page = fragfrom->bv_page;
-}
-
-/**
- * __skb_frag_set_page - sets the page contained in a paged fragment
- * @frag: the paged fragment
- * @page: the page to set
- *
- * Sets the fragment @frag to contain @page.
- */
-static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
-{
- frag->bv_page = page;
-}
-
-/**
- * skb_frag_set_page - sets the page contained in a paged fragment of an skb
- * @skb: the buffer
- * @f: the fragment offset
- * @page: the page to set
- *
- * Sets the @f'th fragment of @skb to contain @page.
- */
-static inline void skb_frag_set_page(struct sk_buff *skb, int f,
- struct page *page)
-{
- __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
+ fragto->netmem = fragfrom->netmem;
}
bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
/**
- * skb_frag_dma_map - maps a paged fragment via the DMA API
+ * __skb_frag_dma_map - maps a paged fragment via the DMA API
* @dev: the device to map the fragment to
* @frag: the paged fragment to map
* @offset: the offset within the fragment (starting at the
@@ -3179,15 +3772,40 @@ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
*
* Maps the page associated with @frag to @device.
*/
-static inline dma_addr_t skb_frag_dma_map(struct device *dev,
- const skb_frag_t *frag,
- size_t offset, size_t size,
- enum dma_data_direction dir)
+static inline dma_addr_t __skb_frag_dma_map(struct device *dev,
+ const skb_frag_t *frag,
+ size_t offset, size_t size,
+ enum dma_data_direction dir)
{
+ if (skb_frag_is_net_iov(frag)) {
+ return netmem_to_net_iov(frag->netmem)->desc.dma_addr +
+ offset + frag->offset;
+ }
return dma_map_page(dev, skb_frag_page(frag),
skb_frag_off(frag) + offset, size, dir);
}
+#define skb_frag_dma_map(dev, frag, ...) \
+ CONCATENATE(_skb_frag_dma_map, \
+ COUNT_ARGS(__VA_ARGS__))(dev, frag, ##__VA_ARGS__)
+
+#define __skb_frag_dma_map1(dev, frag, offset, uf, uo) ({ \
+ const skb_frag_t *uf = (frag); \
+ size_t uo = (offset); \
+ \
+ __skb_frag_dma_map(dev, uf, uo, skb_frag_size(uf) - uo, \
+ DMA_TO_DEVICE); \
+})
+#define _skb_frag_dma_map1(dev, frag, offset) \
+ __skb_frag_dma_map1(dev, frag, offset, __UNIQUE_ID(frag_), \
+ __UNIQUE_ID(offset_))
+#define _skb_frag_dma_map0(dev, frag) \
+ _skb_frag_dma_map1(dev, frag, 0)
+#define _skb_frag_dma_map2(dev, frag, offset, size) \
+ __skb_frag_dma_map(dev, frag, offset, size, DMA_TO_DEVICE)
+#define _skb_frag_dma_map3(dev, frag, offset, size, dir) \
+ __skb_frag_dma_map(dev, frag, offset, size, dir)
+
static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
gfp_t gfp_mask)
{
@@ -3328,39 +3946,29 @@ static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int l
return __skb_put_padto(skb, len, true);
}
-static inline int skb_add_data(struct sk_buff *skb,
- struct iov_iter *from, int copy)
-{
- const int off = skb->len;
-
- if (skb->ip_summed == CHECKSUM_NONE) {
- __wsum csum = 0;
- if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy,
- &csum, from)) {
- skb->csum = csum_block_add(skb->csum, csum, off);
- return 0;
- }
- } else if (copy_from_iter_full(skb_put(skb, copy), copy, from))
- return 0;
-
- __skb_trim(skb, off);
- return -EFAULT;
-}
+bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i)
+ __must_check;
-static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
- const struct page *page, int off)
+static inline bool skb_can_coalesce_netmem(struct sk_buff *skb, int i,
+ netmem_ref netmem, int off)
{
if (skb_zcopy(skb))
return false;
if (i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
- return page == skb_frag_page(frag) &&
+ return netmem == skb_frag_netmem(frag) &&
off == skb_frag_off(frag) + skb_frag_size(frag);
}
return false;
}
+static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
+ const struct page *page, int off)
+{
+ return skb_can_coalesce_netmem(skb, i, page_to_netmem(page), off);
+}
+
static inline int __skb_linearize(struct sk_buff *skb)
{
return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
@@ -3382,7 +3990,7 @@ static inline int skb_linearize(struct sk_buff *skb)
* skb_has_shared_frag - can any frag be overwritten
* @skb: buffer to test
*
- * Return true if the skb has at least one frag that might be modified
+ * Return: true if the skb has at least one frag that might be modified
* by an external entity (as in vmsplice()/sendfile())
*/
static inline bool skb_has_shared_frag(const struct sk_buff *skb)
@@ -3429,7 +4037,12 @@ __skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
static inline void skb_postpull_rcsum(struct sk_buff *skb,
const void *start, unsigned int len)
{
- __skb_postpull_rcsum(skb, start, len, 0);
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = wsum_negate(csum_partial(start, len,
+ wsum_negate(skb->csum)));
+ else if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb_checksum_start_offset(skb) < 0)
+ skb->ip_summed = CHECKSUM_NONE;
}
static __always_inline void
@@ -3489,6 +4102,7 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
{
+ skb_might_realloc(skb);
if (likely(len >= skb->len))
return 0;
return pskb_trim_rcsum_slow(skb, len);
@@ -3578,8 +4192,7 @@ static inline void skb_frag_list_init(struct sk_buff *skb)
int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue,
int *err, long *timeo_p,
const struct sk_buff *skb);
-struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
- struct sk_buff_head *queue,
+struct sk_buff *__skb_try_recv_from_queue(struct sk_buff_head *queue,
unsigned int flags,
int *off, int *err,
struct sk_buff **last);
@@ -3590,8 +4203,10 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
struct sk_buff *__skb_recv_datagram(struct sock *sk,
struct sk_buff_head *sk_queue,
unsigned int flags, int *off, int *err);
-struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
- int *err);
+struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags, int *err);
+__poll_t datagram_poll_queue(struct file *file, struct socket *sock,
+ struct poll_table_struct *wait,
+ struct sk_buff_head *rcv_queue);
__poll_t datagram_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
@@ -3603,19 +4218,14 @@ static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
}
int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
struct msghdr *msg);
-int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
- struct iov_iter *to, int len,
- struct ahash_request *hash);
+int skb_copy_and_crc32c_datagram_iter(const struct sk_buff *skb, int offset,
+ struct iov_iter *to, int len, u32 *crcp);
int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
struct iov_iter *from, int len);
+int skb_copy_datagram_from_iter_full(struct sk_buff *skb, int offset,
+ struct iov_iter *from, int len);
int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
-void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
-static inline void skb_free_datagram_locked(struct sock *sk,
- struct sk_buff *skb)
-{
- __skb_free_datagram_locked(sk, skb, 0);
-}
int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
@@ -3626,6 +4236,8 @@ int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
unsigned int flags);
int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
int len);
+int skb_send_sock_locked_with_flags(struct sock *sk, struct sk_buff *skb,
+ int offset, int len, int flags);
int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len);
void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
@@ -3634,13 +4246,12 @@ int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
void skb_scrub_packet(struct sk_buff *skb, bool xnet);
-bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
-bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features,
unsigned int offset);
struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
-int skb_ensure_writable(struct sk_buff *skb, int write_len);
+int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len);
+int skb_ensure_writable_head_tail(struct sk_buff *skb, struct net_device *dev);
int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
int skb_vlan_pop(struct sk_buff *skb);
int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
@@ -3666,17 +4277,9 @@ static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
}
-struct skb_checksum_ops {
- __wsum (*update)(const void *mem, int len, __wsum wsum);
- __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
-};
-
-extern const struct skb_checksum_ops *crc32c_csum_stub __read_mostly;
-
-__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
- __wsum csum, const struct skb_checksum_ops *ops);
__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
__wsum csum);
+u32 skb_crc32c(const struct sk_buff *skb, int offset, int len, u32 crc);
static inline void * __must_check
__skb_header_pointer(const struct sk_buff *skb, int offset, int len,
@@ -3698,6 +4301,14 @@ skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
skb_headlen(skb), buffer);
}
+static inline void * __must_check
+skb_pointer_if_linear(const struct sk_buff *skb, int offset, int len)
+{
+ if (likely(skb_headlen(skb) - offset >= len))
+ return skb->data + offset;
+ return NULL;
+}
+
/**
* skb_needs_linearize - check if we need to linearize a given skb
* depending on the given device features.
@@ -3797,6 +4408,7 @@ static inline void skb_get_new_timestampns(const struct sk_buff *skb,
static inline void __net_timestamp(struct sk_buff *skb)
{
skb->tstamp = ktime_get_real();
+ skb->tstamp_type = SKB_CLOCK_REALTIME;
}
static inline ktime_t net_timedelta(ktime_t t)
@@ -3804,8 +4416,79 @@ static inline ktime_t net_timedelta(ktime_t t)
return ktime_sub(ktime_get_real(), t);
}
-static inline ktime_t net_invalid_timestamp(void)
+static inline void skb_set_delivery_time(struct sk_buff *skb, ktime_t kt,
+ u8 tstamp_type)
{
+ skb->tstamp = kt;
+
+ if (kt)
+ skb->tstamp_type = tstamp_type;
+ else
+ skb->tstamp_type = SKB_CLOCK_REALTIME;
+}
+
+static inline void skb_set_delivery_type_by_clockid(struct sk_buff *skb,
+ ktime_t kt, clockid_t clockid)
+{
+ u8 tstamp_type = SKB_CLOCK_REALTIME;
+
+ switch (clockid) {
+ case CLOCK_REALTIME:
+ break;
+ case CLOCK_MONOTONIC:
+ tstamp_type = SKB_CLOCK_MONOTONIC;
+ break;
+ case CLOCK_TAI:
+ tstamp_type = SKB_CLOCK_TAI;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ kt = 0;
+ }
+
+ skb_set_delivery_time(skb, kt, tstamp_type);
+}
+
+DECLARE_STATIC_KEY_FALSE(netstamp_needed_key);
+
+/* It is used in the ingress path to clear the delivery_time.
+ * If needed, set the skb->tstamp to the (rcv) timestamp.
+ */
+static inline void skb_clear_delivery_time(struct sk_buff *skb)
+{
+ if (skb->tstamp_type) {
+ skb->tstamp_type = SKB_CLOCK_REALTIME;
+ if (static_branch_unlikely(&netstamp_needed_key))
+ skb->tstamp = ktime_get_real();
+ else
+ skb->tstamp = 0;
+ }
+}
+
+static inline void skb_clear_tstamp(struct sk_buff *skb)
+{
+ if (skb->tstamp_type)
+ return;
+
+ skb->tstamp = 0;
+}
+
+static inline ktime_t skb_tstamp(const struct sk_buff *skb)
+{
+ if (skb->tstamp_type)
+ return 0;
+
+ return skb->tstamp;
+}
+
+static inline ktime_t skb_tstamp_cond(const struct sk_buff *skb, bool cond)
+{
+ if (skb->tstamp_type != SKB_CLOCK_MONOTONIC && skb->tstamp)
+ return skb->tstamp;
+
+ if (static_branch_unlikely(&netstamp_needed_key) || cond)
+ return ktime_get_real();
+
return 0;
}
@@ -3825,10 +4508,13 @@ static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
{
const void *a = skb_metadata_end(skb_a);
const void *b = skb_metadata_end(skb_b);
- /* Using more efficient varaiant than plain call to memcmp(). */
-#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
u64 diffs = 0;
+ if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
+ BITS_PER_LONG != 64)
+ goto slow;
+
+ /* Using more efficient variant than plain call to memcmp(). */
switch (meta_len) {
#define __it(x, op) (x -= sizeof(u##op))
#define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op))
@@ -3848,11 +4534,11 @@ static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
fallthrough;
case 4: diffs |= __it_diff(a, b, 32);
break;
+ default:
+slow:
+ return memcmp(a - meta_len, b - meta_len, meta_len);
}
return diffs;
-#else
- return memcmp(a - meta_len, b - meta_len, meta_len);
-#endif
}
static inline bool skb_metadata_differs(const struct sk_buff *skb_a,
@@ -3878,6 +4564,81 @@ static inline void skb_metadata_clear(struct sk_buff *skb)
skb_metadata_set(skb, 0);
}
+/**
+ * skb_data_move - Move packet data and metadata after skb_push() or skb_pull().
+ * @skb: packet to operate on
+ * @len: number of bytes pushed or pulled from &sk_buff->data
+ * @n: number of bytes to memmove() from pre-push/pull &sk_buff->data
+ *
+ * Moves @n bytes of packet data, can be zero, and all bytes of skb metadata.
+ *
+ * Assumes metadata is located immediately before &sk_buff->data prior to the
+ * push/pull, and that sufficient headroom exists to hold it after an
+ * skb_push(). Otherwise, metadata is cleared and a one-time warning is issued.
+ *
+ * Prefer skb_postpull_data_move() or skb_postpush_data_move() to calling this
+ * helper directly.
+ */
+static inline void skb_data_move(struct sk_buff *skb, const int len,
+ const unsigned int n)
+{
+ const u8 meta_len = skb_metadata_len(skb);
+ u8 *meta, *meta_end;
+
+ if (!len || (!n && !meta_len))
+ return;
+
+ if (!meta_len)
+ goto no_metadata;
+
+ meta_end = skb_metadata_end(skb);
+ meta = meta_end - meta_len;
+
+ if (WARN_ON_ONCE(meta_end + len != skb->data ||
+ meta_len > skb_headroom(skb))) {
+ skb_metadata_clear(skb);
+ goto no_metadata;
+ }
+
+ memmove(meta + len, meta, meta_len + n);
+ return;
+
+no_metadata:
+ memmove(skb->data, skb->data - len, n);
+}
+
+/**
+ * skb_postpull_data_move - Move packet data and metadata after skb_pull().
+ * @skb: packet to operate on
+ * @len: number of bytes pulled from &sk_buff->data
+ * @n: number of bytes to memmove() from pre-pull &sk_buff->data
+ *
+ * See skb_data_move() for details.
+ */
+static inline void skb_postpull_data_move(struct sk_buff *skb,
+ const unsigned int len,
+ const unsigned int n)
+{
+ DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
+ skb_data_move(skb, len, n);
+}
+
+/**
+ * skb_postpush_data_move - Move packet data and metadata after skb_push().
+ * @skb: packet to operate on
+ * @len: number of bytes pushed onto &sk_buff->data
+ * @n: number of bytes to memmove() from pre-push &sk_buff->data
+ *
+ * See skb_data_move() for details.
+ */
+static inline void skb_postpush_data_move(struct sk_buff *skb,
+ const unsigned int len,
+ const unsigned int n)
+{
+ DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
+ skb_data_move(skb, -len, n);
+}
+
struct sk_buff *skb_clone_sk(struct sk_buff *skb);
#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
@@ -3946,7 +4707,7 @@ void skb_tstamp_tx(struct sk_buff *orig_skb,
static inline void skb_tx_timestamp(struct sk_buff *skb)
{
skb_clone_tx_timestamp(skb);
- if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP)
+ if (skb_shinfo(skb)->tx_flags & (SKBTX_SW_TSTAMP | SKBTX_BPF))
skb_tstamp_tx(skb, NULL);
}
@@ -4023,7 +4784,7 @@ static inline void __skb_reset_checksum_unnecessary(struct sk_buff *skb)
/* Check if we need to perform checksum complete validation.
*
- * Returns true if checksum complete is needed, false otherwise
+ * Returns: true if checksum complete is needed, false otherwise
* (either checksum is unnecessary or zero checksum is allowed).
*/
static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
@@ -4171,7 +4932,7 @@ static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
return;
}
- if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
+ if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
__skb_checksum_complete(skb);
skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
}
@@ -4203,6 +4964,7 @@ static inline unsigned long skb_get_nfct(const struct sk_buff *skb)
static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct)
{
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ skb->slow_gro |= !!nfct;
skb->_nfct = nfct;
#endif
}
@@ -4221,6 +4983,12 @@ enum skb_ext_id {
#if IS_ENABLED(CONFIG_MPTCP)
SKB_EXT_MPTCP,
#endif
+#if IS_ENABLED(CONFIG_MCTP_FLOWS)
+ SKB_EXT_MCTP,
+#endif
+#if IS_ENABLED(CONFIG_INET_PSP)
+ SKB_EXT_PSP,
+#endif
SKB_EXT_NUM, /* must be last */
};
@@ -4331,7 +5099,7 @@ static inline void nf_reset_ct(struct sk_buff *skb)
static inline void nf_reset_trace(struct sk_buff *skb)
{
-#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || IS_ENABLED(CONFIG_NF_TABLES)
skb->nf_trace = 0;
#endif
}
@@ -4351,7 +5119,7 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
dst->_nfct = src->_nfct;
nf_conntrack_get(skb_nfct(src));
#endif
-#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || IS_ENABLED(CONFIG_NF_TABLES)
if (copy)
dst->nf_trace = src->nf_trace;
#endif
@@ -4362,6 +5130,7 @@ static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
nf_conntrack_put(skb_nfct(dst));
#endif
+ dst->slow_gro = src->slow_gro;
__nf_copy(dst, src, true);
}
@@ -4450,75 +5219,6 @@ static inline struct sec_path *skb_sec_path(const struct sk_buff *skb)
#endif
}
-/* Keeps track of mac header offset relative to skb->head.
- * It is useful for TSO of Tunneling protocol. e.g. GRE.
- * For non-tunnel skb it points to skb_mac_header() and for
- * tunnel skb it points to outer mac header.
- * Keeps track of level of encapsulation of network headers.
- */
-struct skb_gso_cb {
- union {
- int mac_offset;
- int data_offset;
- };
- int encap_level;
- __wsum csum;
- __u16 csum_start;
-};
-#define SKB_GSO_CB_OFFSET 32
-#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_GSO_CB_OFFSET))
-
-static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
-{
- return (skb_mac_header(inner_skb) - inner_skb->head) -
- SKB_GSO_CB(inner_skb)->mac_offset;
-}
-
-static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
-{
- int new_headroom, headroom;
- int ret;
-
- headroom = skb_headroom(skb);
- ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
- if (ret)
- return ret;
-
- new_headroom = skb_headroom(skb);
- SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
- return 0;
-}
-
-static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
-{
- /* Do not update partial checksums if remote checksum is enabled. */
- if (skb->remcsum_offload)
- return;
-
- SKB_GSO_CB(skb)->csum = res;
- SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
-}
-
-/* Compute the checksum for a gso segment. First compute the checksum value
- * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and
- * then add in skb->csum (checksum from csum_start to end of packet).
- * skb->csum and csum_start are then updated to reflect the checksum of the
- * resultant packet starting from the transport header-- the resultant checksum
- * is in the res argument (i.e. normally zero or ~ of checksum of a pseudo
- * header.
- */
-static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
-{
- unsigned char *csum_start = skb_transport_header(skb);
- int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
- __wsum partial = SKB_GSO_CB(skb)->csum;
-
- SKB_GSO_CB(skb)->csum = res;
- SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
-
- return csum_fold(csum_partial(csum_start, plen, partial));
-}
-
static inline bool skb_is_gso(const struct sk_buff *skb)
{
return skb_shinfo(skb)->gso_size;
@@ -4598,9 +5298,7 @@ static inline void skb_forward_csum(struct sk_buff *skb)
*/
static inline void skb_checksum_none_assert(const struct sk_buff *skb)
{
-#ifdef DEBUG
- BUG_ON(skb->ip_summed != CHECKSUM_NONE);
-#endif
+ DEBUG_NET_WARN_ON_ONCE(skb->ip_summed != CHECKSUM_NONE);
}
bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
@@ -4651,33 +5349,48 @@ static inline __wsum lco_csum(struct sk_buff *skb)
static inline bool skb_is_redirected(const struct sk_buff *skb)
{
-#ifdef CONFIG_NET_REDIRECT
return skb->redirected;
-#else
- return false;
-#endif
}
static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress)
{
-#ifdef CONFIG_NET_REDIRECT
skb->redirected = 1;
+#ifdef CONFIG_NET_REDIRECT
skb->from_ingress = from_ingress;
if (skb->from_ingress)
- skb->tstamp = 0;
+ skb_clear_tstamp(skb);
#endif
}
static inline void skb_reset_redirect(struct sk_buff *skb)
{
-#ifdef CONFIG_NET_REDIRECT
skb->redirected = 0;
+}
+
+static inline void skb_set_redirected_noclear(struct sk_buff *skb,
+ bool from_ingress)
+{
+ skb->redirected = 1;
+#ifdef CONFIG_NET_REDIRECT
+ skb->from_ingress = from_ingress;
#endif
}
static inline bool skb_csum_is_sctp(struct sk_buff *skb)
{
+#if IS_ENABLED(CONFIG_IP_SCTP)
return skb->csum_not_inet;
+#else
+ return 0;
+#endif
+}
+
+static inline void skb_reset_csum_not_inet(struct sk_buff *skb)
+{
+ skb->ip_summed = CHECKSUM_NONE;
+#if IS_ENABLED(CONFIG_IP_SCTP)
+ skb->csum_not_inet = 0;
+#endif
}
static inline void skb_set_kcov_handle(struct sk_buff *skb,
@@ -4697,5 +5410,15 @@ static inline u64 skb_get_kcov_handle(struct sk_buff *skb)
#endif
}
+static inline void skb_mark_for_recycle(struct sk_buff *skb)
+{
+#ifdef CONFIG_PAGE_POOL
+ skb->pp_recycle = 1;
+#endif
+}
+
+ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter,
+ ssize_t maxsize);
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */
diff --git a/include/linux/skbuff_ref.h b/include/linux/skbuff_ref.h
new file mode 100644
index 000000000000..9e49372ef1a0
--- /dev/null
+++ b/include/linux/skbuff_ref.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Skb ref helpers.
+ *
+ */
+
+#ifndef _LINUX_SKBUFF_REF_H
+#define _LINUX_SKBUFF_REF_H
+
+#include <linux/skbuff.h>
+
+/**
+ * __skb_frag_ref - take an addition reference on a paged fragment.
+ * @frag: the paged fragment
+ *
+ * Takes an additional reference on the paged fragment @frag.
+ */
+static inline void __skb_frag_ref(skb_frag_t *frag)
+{
+ get_netmem(skb_frag_netmem(frag));
+}
+
+/**
+ * skb_frag_ref - take an addition reference on a paged fragment of an skb.
+ * @skb: the buffer
+ * @f: the fragment offset.
+ *
+ * Takes an additional reference on the @f'th paged fragment of @skb.
+ */
+static inline void skb_frag_ref(struct sk_buff *skb, int f)
+{
+ __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
+}
+
+bool napi_pp_put_page(netmem_ref netmem);
+
+static inline void skb_page_unref(netmem_ref netmem, bool recycle)
+{
+#ifdef CONFIG_PAGE_POOL
+ if (recycle && napi_pp_put_page(netmem))
+ return;
+#endif
+ put_netmem(netmem);
+}
+
+/**
+ * __skb_frag_unref - release a reference on a paged fragment.
+ * @frag: the paged fragment
+ * @recycle: recycle the page if allocated via page_pool
+ *
+ * Releases a reference on the paged fragment @frag
+ * or recycles the page via the page_pool API.
+ */
+static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle)
+{
+ skb_page_unref(skb_frag_netmem(frag), recycle);
+}
+
+/**
+ * skb_frag_unref - release a reference on a paged fragment of an skb.
+ * @skb: the buffer
+ * @f: the fragment offset
+ *
+ * Releases a reference on the @f'th paged fragment of @skb.
+ */
+static inline void skb_frag_unref(struct sk_buff *skb, int f)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+
+ if (!skb_zcopy_managed(skb))
+ __skb_frag_unref(&shinfo->frags[f], skb->pp_recycle);
+}
+
+#endif /* _LINUX_SKBUFF_REF_H */
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index aba0f0f429be..49847888c287 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -29,7 +29,7 @@ struct sk_msg_sg {
u32 end;
u32 size;
u32 copybreak;
- unsigned long copy;
+ DECLARE_BITMAP(copy, MAX_MSG_FRAGS + 2);
/* The extra two elements:
* 1) used for chaining the front and sections when the list becomes
* partitioned (e.g. end < start). The crypto APIs require the
@@ -38,7 +38,6 @@ struct sk_msg_sg {
*/
struct scatterlist data[MAX_MSG_FRAGS + 2];
};
-static_assert(BITS_PER_LONG >= NR_MSG_FRAG_IDS);
/* UAPI in filter.c depends on struct sk_msg_sg being first element. */
struct sk_msg {
@@ -59,10 +58,15 @@ struct sk_psock_progs {
struct bpf_prog *stream_parser;
struct bpf_prog *stream_verdict;
struct bpf_prog *skb_verdict;
+ struct bpf_link *msg_parser_link;
+ struct bpf_link *stream_parser_link;
+ struct bpf_link *stream_verdict_link;
+ struct bpf_link *skb_verdict_link;
};
enum sk_psock_state_bits {
SK_PSOCK_TX_ENABLED,
+ SK_PSOCK_RX_STRP_ENABLED,
};
struct sk_psock_link {
@@ -72,7 +76,6 @@ struct sk_psock_link {
};
struct sk_psock_work_state {
- struct sk_buff *skb;
u32 len;
u32 off;
};
@@ -83,10 +86,13 @@ struct sk_psock {
u32 apply_bytes;
u32 cork_bytes;
u32 eval;
+ bool redir_ingress; /* undefined if sk_redir is null */
struct sk_msg *cork;
struct sk_psock_progs progs;
#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
struct strparser strp;
+ u32 copied_seq;
+ u32 ingress_bytes;
#endif
struct sk_buff_head ingress_skb;
struct list_head ingress_msg;
@@ -96,15 +102,22 @@ struct sk_psock {
spinlock_t link_lock;
refcount_t refcnt;
void (*saved_unhash)(struct sock *sk);
+ void (*saved_destroy)(struct sock *sk);
void (*saved_close)(struct sock *sk, long timeout);
void (*saved_write_space)(struct sock *sk);
void (*saved_data_ready)(struct sock *sk);
+ /* psock_update_sk_prot may be called with restore=false many times
+ * so the handler must be safe for this case. It will be called
+ * exactly once with restore=true when the psock is being destroyed
+ * and psock refcnt is zero, but before an RCU grace period.
+ */
int (*psock_update_sk_prot)(struct sock *sk, struct sk_psock *psock,
bool restore);
struct proto *sk_proto;
struct mutex work_mutex;
struct sk_psock_work_state work_state;
- struct work_struct work;
+ struct delayed_work work;
+ struct sock *sk_pair;
struct rcu_work rwork;
};
@@ -126,10 +139,9 @@ int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
struct sk_msg *msg, u32 bytes);
int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
struct sk_msg *msg, u32 bytes);
-int sk_msg_wait_data(struct sock *sk, struct sk_psock *psock, int flags,
- long timeo, int *err);
int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
int len, int flags);
+bool sk_msg_is_readable(struct sock *sk);
static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes)
{
@@ -172,11 +184,6 @@ static inline u32 sk_msg_iter_dist(u32 start, u32 end)
#define sk_msg_iter_next(msg, which) \
sk_msg_iter_var_next(msg->sg.which)
-static inline void sk_msg_clear_meta(struct sk_msg *msg)
-{
- memset(&msg->sg, 0, offsetofend(struct sk_msg_sg, copy));
-}
-
static inline void sk_msg_init(struct sk_msg *msg)
{
BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != NR_MSG_FRAG_IDS);
@@ -235,7 +242,7 @@ static inline void sk_msg_compute_data_pointers(struct sk_msg *msg)
{
struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start);
- if (test_bit(msg->sg.start, &msg->sg.copy)) {
+ if (test_bit(msg->sg.start, msg->sg.copy)) {
msg->data = NULL;
msg->data_end = NULL;
} else {
@@ -254,7 +261,7 @@ static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page,
sg_set_page(sge, page, len, offset);
sg_unmark_end(sge);
- __set_bit(msg->sg.end, &msg->sg.copy);
+ __set_bit(msg->sg.end, msg->sg.copy);
msg->sg.size += len;
sk_msg_iter_next(msg, end);
}
@@ -263,9 +270,9 @@ static inline void sk_msg_sg_copy(struct sk_msg *msg, u32 i, bool copy_state)
{
do {
if (copy_state)
- __set_bit(i, &msg->sg.copy);
+ __set_bit(i, msg->sg.copy);
else
- __clear_bit(i, &msg->sg.copy);
+ __clear_bit(i, msg->sg.copy);
sk_msg_iter_var_next(i);
if (i == msg->sg.end)
break;
@@ -284,15 +291,50 @@ static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start)
static inline struct sk_psock *sk_psock(const struct sock *sk)
{
- return rcu_dereference_sk_user_data(sk);
+ return __rcu_dereference_sk_user_data_with_flags(sk,
+ SK_USER_DATA_PSOCK);
+}
+
+static inline void sk_psock_set_state(struct sk_psock *psock,
+ enum sk_psock_state_bits bit)
+{
+ set_bit(bit, &psock->state);
+}
+
+static inline void sk_psock_clear_state(struct sk_psock *psock,
+ enum sk_psock_state_bits bit)
+{
+ clear_bit(bit, &psock->state);
+}
+
+static inline bool sk_psock_test_state(const struct sk_psock *psock,
+ enum sk_psock_state_bits bit)
+{
+ return test_bit(bit, &psock->state);
}
-static inline void sk_psock_queue_msg(struct sk_psock *psock,
+static inline void sock_drop(struct sock *sk, struct sk_buff *skb)
+{
+ sk_drops_skbadd(sk, skb);
+ kfree_skb(skb);
+}
+
+static inline bool sk_psock_queue_msg(struct sk_psock *psock,
struct sk_msg *msg)
{
+ bool ret;
+
spin_lock_bh(&psock->ingress_lock);
- list_add_tail(&msg->list, &psock->ingress_msg);
+ if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
+ list_add_tail(&msg->list, &psock->ingress_msg);
+ ret = true;
+ } else {
+ sk_msg_free(psock->sk, msg);
+ kfree(msg);
+ ret = false;
+ }
spin_unlock_bh(&psock->ingress_lock);
+ return ret;
}
static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock)
@@ -348,11 +390,11 @@ static inline void sk_psock_report_error(struct sk_psock *psock, int err)
struct sock *sk = psock->sk;
sk->sk_err = err;
- sk->sk_error_report(sk);
+ sk_error_report(sk);
}
struct sk_psock *sk_psock_init(struct sock *sk, int node);
-void sk_psock_stop(struct sk_psock *psock, bool wait);
+void sk_psock_stop(struct sk_psock *psock);
#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock);
@@ -379,11 +421,14 @@ void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock);
int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
struct sk_msg *msg);
-static inline struct sk_psock_link *sk_psock_init_link(void)
-{
- return kzalloc(sizeof(struct sk_psock_link),
- GFP_ATOMIC | __GFP_NOWARN);
-}
+/*
+ * This specialized allocator has to be a macro for its allocations to be
+ * accounted separately (to have a separate alloc_tag). The typecast is
+ * intentional to enforce typesafety.
+ */
+#define sk_psock_init_link() \
+ ((struct sk_psock_link *)kzalloc(sizeof(struct sk_psock_link), \
+ GFP_ATOMIC | __GFP_NOWARN))
static inline void sk_psock_free_link(struct sk_psock_link *link)
{
@@ -408,24 +453,6 @@ static inline void sk_psock_restore_proto(struct sock *sk,
psock->psock_update_sk_prot(sk, psock, true);
}
-static inline void sk_psock_set_state(struct sk_psock *psock,
- enum sk_psock_state_bits bit)
-{
- set_bit(bit, &psock->state);
-}
-
-static inline void sk_psock_clear_state(struct sk_psock *psock,
- enum sk_psock_state_bits bit)
-{
- clear_bit(bit, &psock->state);
-}
-
-static inline bool sk_psock_test_state(const struct sk_psock *psock,
- enum sk_psock_state_bits bit)
-{
- return test_bit(bit, &psock->state);
-}
-
static inline struct sk_psock *sk_psock_get(struct sock *sk)
{
struct sk_psock *psock;
@@ -448,10 +475,12 @@ static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock)
{
+ read_lock_bh(&sk->sk_callback_lock);
if (psock->saved_data_ready)
psock->saved_data_ready(sk);
else
sk->sk_data_ready(sk);
+ read_unlock_bh(&sk->sk_callback_lock);
}
static inline void psock_set_prog(struct bpf_prog **pprog,
@@ -494,8 +523,22 @@ static inline bool sk_psock_strp_enabled(struct sk_psock *psock)
#if IS_ENABLED(CONFIG_NET_SOCK_MSG)
-/* We only have one bit so far. */
-#define BPF_F_PTR_MASK ~(BPF_F_INGRESS)
+#define BPF_F_STRPARSER (1UL << 1)
+
+/* We only have two bits so far. */
+#define BPF_F_PTR_MASK ~(BPF_F_INGRESS | BPF_F_STRPARSER)
+
+static inline bool skb_bpf_strparser(const struct sk_buff *skb)
+{
+ unsigned long sk_redir = skb->_sk_redir;
+
+ return sk_redir & BPF_F_STRPARSER;
+}
+
+static inline void skb_bpf_set_strparser(struct sk_buff *skb)
+{
+ skb->_sk_redir |= BPF_F_STRPARSER;
+}
static inline bool skb_bpf_ingress(const struct sk_buff *skb)
{
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 0c97d788762c..cf443f064a66 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -12,35 +12,94 @@
#ifndef _LINUX_SLAB_H
#define _LINUX_SLAB_H
+#include <linux/cache.h>
#include <linux/gfp.h>
#include <linux/overflow.h>
#include <linux/types.h>
+#include <linux/rcupdate.h>
#include <linux/workqueue.h>
#include <linux/percpu-refcount.h>
+#include <linux/cleanup.h>
+#include <linux/hash.h>
+
+enum _slab_flag_bits {
+ _SLAB_CONSISTENCY_CHECKS,
+ _SLAB_RED_ZONE,
+ _SLAB_POISON,
+ _SLAB_KMALLOC,
+ _SLAB_HWCACHE_ALIGN,
+ _SLAB_CACHE_DMA,
+ _SLAB_CACHE_DMA32,
+ _SLAB_STORE_USER,
+ _SLAB_PANIC,
+ _SLAB_TYPESAFE_BY_RCU,
+ _SLAB_TRACE,
+#ifdef CONFIG_DEBUG_OBJECTS
+ _SLAB_DEBUG_OBJECTS,
+#endif
+ _SLAB_NOLEAKTRACE,
+ _SLAB_NO_MERGE,
+#ifdef CONFIG_FAILSLAB
+ _SLAB_FAILSLAB,
+#endif
+#ifdef CONFIG_MEMCG
+ _SLAB_ACCOUNT,
+#endif
+#ifdef CONFIG_KASAN_GENERIC
+ _SLAB_KASAN,
+#endif
+ _SLAB_NO_USER_FLAGS,
+#ifdef CONFIG_KFENCE
+ _SLAB_SKIP_KFENCE,
+#endif
+#ifndef CONFIG_SLUB_TINY
+ _SLAB_RECLAIM_ACCOUNT,
+#endif
+ _SLAB_OBJECT_POISON,
+ _SLAB_CMPXCHG_DOUBLE,
+#ifdef CONFIG_SLAB_OBJ_EXT
+ _SLAB_NO_OBJ_EXT,
+#endif
+ _SLAB_FLAGS_LAST_BIT
+};
+#define __SLAB_FLAG_BIT(nr) ((slab_flags_t __force)(1U << (nr)))
+#define __SLAB_FLAG_UNUSED ((slab_flags_t __force)(0U))
/*
* Flags to pass to kmem_cache_create().
- * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
+ * The ones marked DEBUG need CONFIG_SLUB_DEBUG enabled, otherwise are no-op
*/
/* DEBUG: Perform (expensive) checks on alloc/free */
-#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
+#define SLAB_CONSISTENCY_CHECKS __SLAB_FLAG_BIT(_SLAB_CONSISTENCY_CHECKS)
/* DEBUG: Red zone objs in a cache */
-#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U)
+#define SLAB_RED_ZONE __SLAB_FLAG_BIT(_SLAB_RED_ZONE)
/* DEBUG: Poison objects */
-#define SLAB_POISON ((slab_flags_t __force)0x00000800U)
-/* Align objs on cache lines */
-#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
+#define SLAB_POISON __SLAB_FLAG_BIT(_SLAB_POISON)
+/* Indicate a kmalloc slab */
+#define SLAB_KMALLOC __SLAB_FLAG_BIT(_SLAB_KMALLOC)
+/**
+ * define SLAB_HWCACHE_ALIGN - Align objects on cache line boundaries.
+ *
+ * Sufficiently large objects are aligned on cache line boundary. For object
+ * size smaller than a half of cache line size, the alignment is on the half of
+ * cache line size. In general, if object size is smaller than 1/2^n of cache
+ * line size, the alignment is adjusted to 1/2^n.
+ *
+ * If explicit alignment is also requested by the respective
+ * &struct kmem_cache_args field, the greater of both is alignments is applied.
+ */
+#define SLAB_HWCACHE_ALIGN __SLAB_FLAG_BIT(_SLAB_HWCACHE_ALIGN)
/* Use GFP_DMA memory */
-#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
+#define SLAB_CACHE_DMA __SLAB_FLAG_BIT(_SLAB_CACHE_DMA)
/* Use GFP_DMA32 memory */
-#define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U)
+#define SLAB_CACHE_DMA32 __SLAB_FLAG_BIT(_SLAB_CACHE_DMA32)
/* DEBUG: Store the last owner for bug hunting */
-#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
+#define SLAB_STORE_USER __SLAB_FLAG_BIT(_SLAB_STORE_USER)
/* Panic if kmem_cache_create() fails */
-#define SLAB_PANIC ((slab_flags_t __force)0x00040000U)
-/*
- * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
+#define SLAB_PANIC __SLAB_FLAG_BIT(_SLAB_PANIC)
+/**
+ * define SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
*
* This delays freeing the SLAB page by a grace period, it does _NOT_
* delay object freeing. This means that if you do kmem_cache_free()
@@ -51,18 +110,22 @@
* stays valid, the trick to using this is relying on an independent
* object validation pass. Something like:
*
- * rcu_read_lock()
- * again:
- * obj = lockless_lookup(key);
- * if (obj) {
- * if (!try_get_ref(obj)) // might fail for free objects
- * goto again;
- *
- * if (obj->key != key) { // not the object we expected
- * put_ref(obj);
- * goto again;
- * }
- * }
+ * ::
+ *
+ * begin:
+ * rcu_read_lock();
+ * obj = lockless_lookup(key);
+ * if (obj) {
+ * if (!try_get_ref(obj)) // might fail for free objects
+ * rcu_read_unlock();
+ * goto begin;
+ *
+ * if (obj->key != key) { // not the object we expected
+ * put_ref(obj);
+ * rcu_read_unlock();
+ * goto begin;
+ * }
+ * }
* rcu_read_unlock();
*
* This is useful if we need to approach a kernel structure obliquely,
@@ -74,51 +137,112 @@
* rcu_read_lock before reading the address, then rcu_read_unlock after
* taking the spinlock within the structure expected at that address.
*
+ * Note that object identity check has to be done *after* acquiring a
+ * reference, therefore user has to ensure proper ordering for loads.
+ * Similarly, when initializing objects allocated with SLAB_TYPESAFE_BY_RCU,
+ * the newly allocated object has to be fully initialized *before* its
+ * refcount gets initialized and proper ordering for stores is required.
+ * refcount_{add|inc}_not_zero_acquire() and refcount_set_release() are
+ * designed with the proper fences required for reference counting objects
+ * allocated with SLAB_TYPESAFE_BY_RCU.
+ *
+ * Note that it is not possible to acquire a lock within a structure
+ * allocated with SLAB_TYPESAFE_BY_RCU without first acquiring a reference
+ * as described above. The reason is that SLAB_TYPESAFE_BY_RCU pages
+ * are not zeroed before being given to the slab, which means that any
+ * locks must be initialized after each and every kmem_struct_alloc().
+ * Alternatively, make the ctor passed to kmem_cache_create() initialize
+ * the locks at page-allocation time, as is done in __i915_request_ctor(),
+ * sighand_ctor(), and anon_vma_ctor(). Such a ctor permits readers
+ * to safely acquire those ctor-initialized locks under rcu_read_lock()
+ * protection.
+ *
* Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
*/
-/* Defer freeing slabs to RCU */
-#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U)
-/* Spread some memory over cpuset */
-#define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U)
+#define SLAB_TYPESAFE_BY_RCU __SLAB_FLAG_BIT(_SLAB_TYPESAFE_BY_RCU)
/* Trace allocations and frees */
-#define SLAB_TRACE ((slab_flags_t __force)0x00200000U)
+#define SLAB_TRACE __SLAB_FLAG_BIT(_SLAB_TRACE)
/* Flag to prevent checks on free */
#ifdef CONFIG_DEBUG_OBJECTS
-# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U)
+# define SLAB_DEBUG_OBJECTS __SLAB_FLAG_BIT(_SLAB_DEBUG_OBJECTS)
#else
-# define SLAB_DEBUG_OBJECTS 0
+# define SLAB_DEBUG_OBJECTS __SLAB_FLAG_UNUSED
#endif
/* Avoid kmemleak tracing */
-#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U)
+#define SLAB_NOLEAKTRACE __SLAB_FLAG_BIT(_SLAB_NOLEAKTRACE)
+
+/*
+ * Prevent merging with compatible kmem caches. This flag should be used
+ * cautiously. Valid use cases:
+ *
+ * - caches created for self-tests (e.g. kunit)
+ * - general caches created and used by a subsystem, only when a
+ * (subsystem-specific) debug option is enabled
+ * - performance critical caches, should be very rare and consulted with slab
+ * maintainers, and not used together with CONFIG_SLUB_TINY
+ */
+#define SLAB_NO_MERGE __SLAB_FLAG_BIT(_SLAB_NO_MERGE)
/* Fault injection mark */
#ifdef CONFIG_FAILSLAB
-# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U)
+# define SLAB_FAILSLAB __SLAB_FLAG_BIT(_SLAB_FAILSLAB)
#else
-# define SLAB_FAILSLAB 0
+# define SLAB_FAILSLAB __SLAB_FLAG_UNUSED
#endif
-/* Account to memcg */
-#ifdef CONFIG_MEMCG_KMEM
-# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U)
+/**
+ * define SLAB_ACCOUNT - Account allocations to memcg.
+ *
+ * All object allocations from this cache will be memcg accounted, regardless of
+ * __GFP_ACCOUNT being or not being passed to individual allocations.
+ */
+#ifdef CONFIG_MEMCG
+# define SLAB_ACCOUNT __SLAB_FLAG_BIT(_SLAB_ACCOUNT)
#else
-# define SLAB_ACCOUNT 0
+# define SLAB_ACCOUNT __SLAB_FLAG_UNUSED
#endif
-#ifdef CONFIG_KASAN
-#define SLAB_KASAN ((slab_flags_t __force)0x08000000U)
+#ifdef CONFIG_KASAN_GENERIC
+#define SLAB_KASAN __SLAB_FLAG_BIT(_SLAB_KASAN)
#else
-#define SLAB_KASAN 0
+#define SLAB_KASAN __SLAB_FLAG_UNUSED
+#endif
+
+/*
+ * Ignore user specified debugging flags.
+ * Intended for caches created for self-tests so they have only flags
+ * specified in the code and other flags are ignored.
+ */
+#define SLAB_NO_USER_FLAGS __SLAB_FLAG_BIT(_SLAB_NO_USER_FLAGS)
+
+#ifdef CONFIG_KFENCE
+#define SLAB_SKIP_KFENCE __SLAB_FLAG_BIT(_SLAB_SKIP_KFENCE)
+#else
+#define SLAB_SKIP_KFENCE __SLAB_FLAG_UNUSED
#endif
/* The following flags affect the page allocator grouping pages by mobility */
-/* Objects are reclaimable */
-#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
+/**
+ * define SLAB_RECLAIM_ACCOUNT - Objects are reclaimable.
+ *
+ * Use this flag for caches that have an associated shrinker. As a result, slab
+ * pages are allocated with __GFP_RECLAIMABLE, which affects grouping pages by
+ * mobility, and are accounted in SReclaimable counter in /proc/meminfo
+ */
+#ifndef CONFIG_SLUB_TINY
+#define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_BIT(_SLAB_RECLAIM_ACCOUNT)
+#else
+#define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_UNUSED
+#endif
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
-/* Slab deactivation flag */
-#define SLAB_DEACTIVATED ((slab_flags_t __force)0x10000000U)
+/* Slab created using create_boot_cache */
+#ifdef CONFIG_SLAB_OBJ_EXT
+#define SLAB_NO_OBJ_EXT __SLAB_FLAG_BIT(_SLAB_NO_OBJ_EXT)
+#else
+#define SLAB_NO_OBJ_EXT __SLAB_FLAG_UNUSED
+#endif
/*
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
@@ -135,25 +259,213 @@
#include <linux/kasan.h>
+struct list_lru;
struct mem_cgroup;
/*
* struct kmem_cache related prototypes
*/
-void __init kmem_cache_init(void);
bool slab_is_available(void);
-extern bool usercopy_fallback;
+/**
+ * struct kmem_cache_args - Less common arguments for kmem_cache_create()
+ *
+ * Any uninitialized fields of the structure are interpreted as unused. The
+ * exception is @freeptr_offset where %0 is a valid value, so
+ * @use_freeptr_offset must be also set to %true in order to interpret the field
+ * as used. For @useroffset %0 is also valid, but only with non-%0
+ * @usersize.
+ *
+ * When %NULL args is passed to kmem_cache_create(), it is equivalent to all
+ * fields unused.
+ */
+struct kmem_cache_args {
+ /**
+ * @align: The required alignment for the objects.
+ *
+ * %0 means no specific alignment is requested.
+ */
+ unsigned int align;
+ /**
+ * @useroffset: Usercopy region offset.
+ *
+ * %0 is a valid offset, when @usersize is non-%0
+ */
+ unsigned int useroffset;
+ /**
+ * @usersize: Usercopy region size.
+ *
+ * %0 means no usercopy region is specified.
+ */
+ unsigned int usersize;
+ /**
+ * @freeptr_offset: Custom offset for the free pointer
+ * in &SLAB_TYPESAFE_BY_RCU caches
+ *
+ * By default &SLAB_TYPESAFE_BY_RCU caches place the free pointer
+ * outside of the object. This might cause the object to grow in size.
+ * Cache creators that have a reason to avoid this can specify a custom
+ * free pointer offset in their struct where the free pointer will be
+ * placed.
+ *
+ * Note that placing the free pointer inside the object requires the
+ * caller to ensure that no fields are invalidated that are required to
+ * guard against object recycling (See &SLAB_TYPESAFE_BY_RCU for
+ * details).
+ *
+ * Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset
+ * is specified, %use_freeptr_offset must be set %true.
+ *
+ * Note that @ctor currently isn't supported with custom free pointers
+ * as a @ctor requires an external free pointer.
+ */
+ unsigned int freeptr_offset;
+ /**
+ * @use_freeptr_offset: Whether a @freeptr_offset is used.
+ */
+ bool use_freeptr_offset;
+ /**
+ * @ctor: A constructor for the objects.
+ *
+ * The constructor is invoked for each object in a newly allocated slab
+ * page. It is the cache user's responsibility to free object in the
+ * same state as after calling the constructor, or deal appropriately
+ * with any differences between a freshly constructed and a reallocated
+ * object.
+ *
+ * %NULL means no constructor.
+ */
+ void (*ctor)(void *);
+ /**
+ * @sheaf_capacity: Enable sheaves of given capacity for the cache.
+ *
+ * With a non-zero value, allocations from the cache go through caching
+ * arrays called sheaves. Each cpu has a main sheaf that's always
+ * present, and a spare sheaf that may be not present. When both become
+ * empty, there's an attempt to replace an empty sheaf with a full sheaf
+ * from the per-node barn.
+ *
+ * When no full sheaf is available, and gfp flags allow blocking, a
+ * sheaf is allocated and filled from slab(s) using bulk allocation.
+ * Otherwise the allocation falls back to the normal operation
+ * allocating a single object from a slab.
+ *
+ * Analogically when freeing and both percpu sheaves are full, the barn
+ * may replace it with an empty sheaf, unless it's over capacity. In
+ * that case a sheaf is bulk freed to slab pages.
+ *
+ * The sheaves do not enforce NUMA placement of objects, so allocations
+ * via kmem_cache_alloc_node() with a node specified other than
+ * NUMA_NO_NODE will bypass them.
+ *
+ * Bulk allocation and free operations also try to use the cpu sheaves
+ * and barn, but fallback to using slab pages directly.
+ *
+ * When slub_debug is enabled for the cache, the sheaf_capacity argument
+ * is ignored.
+ *
+ * %0 means no sheaves will be created.
+ */
+ unsigned int sheaf_capacity;
+};
+
+struct kmem_cache *__kmem_cache_create_args(const char *name,
+ unsigned int object_size,
+ struct kmem_cache_args *args,
+ slab_flags_t flags);
+static inline struct kmem_cache *
+__kmem_cache_create(const char *name, unsigned int size, unsigned int align,
+ slab_flags_t flags, void (*ctor)(void *))
+{
+ struct kmem_cache_args kmem_args = {
+ .align = align,
+ .ctor = ctor,
+ };
+
+ return __kmem_cache_create_args(name, size, &kmem_args, flags);
+}
+
+/**
+ * kmem_cache_create_usercopy - Create a kmem cache with a region suitable
+ * for copying to userspace.
+ * @name: A string which is used in /proc/slabinfo to identify this cache.
+ * @size: The size of objects to be created in this cache.
+ * @align: The required alignment for the objects.
+ * @flags: SLAB flags
+ * @useroffset: Usercopy region offset
+ * @usersize: Usercopy region size
+ * @ctor: A constructor for the objects, or %NULL.
+ *
+ * This is a legacy wrapper, new code should use either KMEM_CACHE_USERCOPY()
+ * if whitelisting a single field is sufficient, or kmem_cache_create() with
+ * the necessary parameters passed via the args parameter (see
+ * &struct kmem_cache_args)
+ *
+ * Return: a pointer to the cache on success, NULL on failure.
+ */
+static inline struct kmem_cache *
+kmem_cache_create_usercopy(const char *name, unsigned int size,
+ unsigned int align, slab_flags_t flags,
+ unsigned int useroffset, unsigned int usersize,
+ void (*ctor)(void *))
+{
+ struct kmem_cache_args kmem_args = {
+ .align = align,
+ .ctor = ctor,
+ .useroffset = useroffset,
+ .usersize = usersize,
+ };
+
+ return __kmem_cache_create_args(name, size, &kmem_args, flags);
+}
+
+/* If NULL is passed for @args, use this variant with default arguments. */
+static inline struct kmem_cache *
+__kmem_cache_default_args(const char *name, unsigned int size,
+ struct kmem_cache_args *args,
+ slab_flags_t flags)
+{
+ struct kmem_cache_args kmem_default_args = {};
+
+ /* Make sure we don't get passed garbage. */
+ if (WARN_ON_ONCE(args))
+ return ERR_PTR(-EINVAL);
+
+ return __kmem_cache_create_args(name, size, &kmem_default_args, flags);
+}
+
+/**
+ * kmem_cache_create - Create a kmem cache.
+ * @__name: A string which is used in /proc/slabinfo to identify this cache.
+ * @__object_size: The size of objects to be created in this cache.
+ * @__args: Optional arguments, see &struct kmem_cache_args. Passing %NULL
+ * means defaults will be used for all the arguments.
+ *
+ * This is currently implemented as a macro using ``_Generic()`` to call
+ * either the new variant of the function, or a legacy one.
+ *
+ * The new variant has 4 parameters:
+ * ``kmem_cache_create(name, object_size, args, flags)``
+ *
+ * See __kmem_cache_create_args() which implements this.
+ *
+ * The legacy variant has 5 parameters:
+ * ``kmem_cache_create(name, object_size, align, flags, ctor)``
+ *
+ * The align and ctor parameters map to the respective fields of
+ * &struct kmem_cache_args
+ *
+ * Context: Cannot be called within a interrupt, but can be interrupted.
+ *
+ * Return: a pointer to the cache on success, NULL on failure.
+ */
+#define kmem_cache_create(__name, __object_size, __args, ...) \
+ _Generic((__args), \
+ struct kmem_cache_args *: __kmem_cache_create_args, \
+ void *: __kmem_cache_default_args, \
+ default: __kmem_cache_create)(__name, __object_size, __args, __VA_ARGS__)
-struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
- unsigned int align, slab_flags_t flags,
- void (*ctor)(void *));
-struct kmem_cache *kmem_cache_create_usercopy(const char *name,
- unsigned int size, unsigned int align,
- slab_flags_t flags,
- unsigned int useroffset, unsigned int usersize,
- void (*ctor)(void *));
-void kmem_cache_destroy(struct kmem_cache *);
-int kmem_cache_shrink(struct kmem_cache *);
+void kmem_cache_destroy(struct kmem_cache *s);
+int kmem_cache_shrink(struct kmem_cache *s);
/*
* Please use this macro to create slab caches. Simply specify the
@@ -163,53 +475,79 @@ int kmem_cache_shrink(struct kmem_cache *);
* f.e. add ____cacheline_aligned_in_smp to the struct declaration
* then the objects will be properly aligned in SMP configurations.
*/
-#define KMEM_CACHE(__struct, __flags) \
- kmem_cache_create(#__struct, sizeof(struct __struct), \
- __alignof__(struct __struct), (__flags), NULL)
+#define KMEM_CACHE(__struct, __flags) \
+ __kmem_cache_create_args(#__struct, sizeof(struct __struct), \
+ &(struct kmem_cache_args) { \
+ .align = __alignof__(struct __struct), \
+ }, (__flags))
/*
* To whitelist a single field for copying to/from usercopy, use this
* macro instead for KMEM_CACHE() above.
*/
-#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \
- kmem_cache_create_usercopy(#__struct, \
- sizeof(struct __struct), \
- __alignof__(struct __struct), (__flags), \
- offsetof(struct __struct, __field), \
- sizeof_field(struct __struct, __field), NULL)
+#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \
+ __kmem_cache_create_args(#__struct, sizeof(struct __struct), \
+ &(struct kmem_cache_args) { \
+ .align = __alignof__(struct __struct), \
+ .useroffset = offsetof(struct __struct, __field), \
+ .usersize = sizeof_field(struct __struct, __field), \
+ }, (__flags))
/*
* Common kmalloc functions provided by all allocators
*/
-void * __must_check krealloc(const void *, size_t, gfp_t);
-void kfree(const void *);
-void kfree_sensitive(const void *);
-size_t __ksize(const void *);
-size_t ksize(const void *);
-#ifdef CONFIG_PRINTK
-bool kmem_valid_obj(void *object);
-void kmem_dump_obj(void *object);
-#endif
+void * __must_check krealloc_node_align_noprof(const void *objp, size_t new_size,
+ unsigned long align,
+ gfp_t flags, int nid) __realloc_size(2);
+#define krealloc_noprof(_o, _s, _f) krealloc_node_align_noprof(_o, _s, 1, _f, NUMA_NO_NODE)
+#define krealloc_node_align(...) alloc_hooks(krealloc_node_align_noprof(__VA_ARGS__))
+#define krealloc_node(_o, _s, _f, _n) krealloc_node_align(_o, _s, 1, _f, _n)
+#define krealloc(...) krealloc_node(__VA_ARGS__, NUMA_NO_NODE)
+
+void kfree(const void *objp);
+void kfree_nolock(const void *objp);
+void kfree_sensitive(const void *objp);
+size_t __ksize(const void *objp);
+
+DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T))
+DEFINE_FREE(kfree_sensitive, void *, if (_T) kfree_sensitive(_T))
-#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
-void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
- bool to_user);
+/**
+ * ksize - Report actual allocation size of associated object
+ *
+ * @objp: Pointer returned from a prior kmalloc()-family allocation.
+ *
+ * This should not be used for writing beyond the originally requested
+ * allocation size. Either use krealloc() or round up the allocation size
+ * with kmalloc_size_roundup() prior to allocation. If this is used to
+ * access beyond the originally requested allocation size, UBSAN_BOUNDS
+ * and/or FORTIFY_SOURCE may trip, since they only know about the
+ * originally allocated size via the __alloc_size attribute.
+ */
+size_t ksize(const void *objp);
+
+#ifdef CONFIG_PRINTK
+bool kmem_dump_obj(void *object);
#else
-static inline void __check_heap_object(const void *ptr, unsigned long n,
- struct page *page, bool to_user) { }
+static inline bool kmem_dump_obj(void *object) { return false; }
#endif
/*
* Some archs want to perform DMA into kmalloc caches and need a guaranteed
* alignment larger than the alignment of a 64-bit integer.
- * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
+ * Setting ARCH_DMA_MINALIGN in arch headers allows that.
*/
-#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
+#ifdef ARCH_HAS_DMA_MINALIGN
+#if ARCH_DMA_MINALIGN > 8 && !defined(ARCH_KMALLOC_MINALIGN)
#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
-#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
-#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
-#else
+#endif
+#endif
+
+#ifndef ARCH_KMALLOC_MINALIGN
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
+#elif ARCH_KMALLOC_MINALIGN > 8
+#define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN
+#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
#endif
/*
@@ -222,9 +560,21 @@ static inline void __check_heap_object(const void *ptr, unsigned long n,
#endif
/*
- * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
- * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
- * aligned pointers.
+ * Arches can define this function if they want to decide the minimum slab
+ * alignment at runtime. The value returned by the function must be a power
+ * of two and >= ARCH_SLAB_MINALIGN.
+ */
+#ifndef arch_slab_minalign
+static inline unsigned int arch_slab_minalign(void)
+{
+ return ARCH_SLAB_MINALIGN;
+}
+#endif
+
+/*
+ * kmem_cache_alloc and friends return pointers aligned to ARCH_SLAB_MINALIGN.
+ * kmalloc and friends return pointers aligned to both ARCH_KMALLOC_MINALIGN
+ * and ARCH_SLAB_MINALIGN, but here we only assume the former alignment.
*/
#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
@@ -234,48 +584,15 @@ static inline void __check_heap_object(const void *ptr, unsigned long n,
* Kmalloc array related definitions
*/
-#ifdef CONFIG_SLAB
-/*
- * The largest kmalloc size supported by the SLAB allocators is
- * 32 megabyte (2^25) or the maximum allocatable page order if that is
- * less than 32 MB.
- *
- * WARNING: Its not easy to increase this value since the allocators have
- * to do various tricks to work around compiler limitations in order to
- * ensure proper constant folding.
- */
-#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
- (MAX_ORDER + PAGE_SHIFT - 1) : 25)
-#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
-#ifndef KMALLOC_SHIFT_LOW
-#define KMALLOC_SHIFT_LOW 5
-#endif
-#endif
-
-#ifdef CONFIG_SLUB
/*
* SLUB directly allocates requests fitting in to an order-1 page
* (PAGE_SIZE*2). Larger requests are passed to the page allocator.
*/
#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
-#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
+#define KMALLOC_SHIFT_MAX (MAX_PAGE_ORDER + PAGE_SHIFT)
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 3
#endif
-#endif
-
-#ifdef CONFIG_SLOB
-/*
- * SLOB passes all requests larger than one page to the page allocator.
- * No kmalloc array is necessary since objects of different sizes can
- * be allocated from the same page.
- */
-#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
-#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
-#ifndef KMALLOC_SHIFT_LOW
-#define KMALLOC_SHIFT_LOW 3
-#endif
-#endif
/* Maximum allocatable size */
#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
@@ -302,41 +619,86 @@ static inline void __check_heap_object(const void *ptr, unsigned long n,
#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
(KMALLOC_MIN_SIZE) : 16)
+#ifdef CONFIG_RANDOM_KMALLOC_CACHES
+#define RANDOM_KMALLOC_CACHES_NR 15 // # of cache copies
+#else
+#define RANDOM_KMALLOC_CACHES_NR 0
+#endif
+
/*
* Whenever changing this, take care of that kmalloc_type() and
* create_kmalloc_caches() still work as intended.
+ *
+ * KMALLOC_NORMAL can contain only unaccounted objects whereas KMALLOC_CGROUP
+ * is for accounted but unreclaimable and non-dma objects. All the other
+ * kmem caches can have both accounted and unaccounted objects.
*/
enum kmalloc_cache_type {
KMALLOC_NORMAL = 0,
+#ifndef CONFIG_ZONE_DMA
+ KMALLOC_DMA = KMALLOC_NORMAL,
+#endif
+#ifndef CONFIG_MEMCG
+ KMALLOC_CGROUP = KMALLOC_NORMAL,
+#endif
+ KMALLOC_RANDOM_START = KMALLOC_NORMAL,
+ KMALLOC_RANDOM_END = KMALLOC_RANDOM_START + RANDOM_KMALLOC_CACHES_NR,
+#ifdef CONFIG_SLUB_TINY
+ KMALLOC_RECLAIM = KMALLOC_NORMAL,
+#else
KMALLOC_RECLAIM,
+#endif
#ifdef CONFIG_ZONE_DMA
KMALLOC_DMA,
#endif
+#ifdef CONFIG_MEMCG
+ KMALLOC_CGROUP,
+#endif
NR_KMALLOC_TYPES
};
-#ifndef CONFIG_SLOB
-extern struct kmem_cache *
-kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
+typedef struct kmem_cache * kmem_buckets[KMALLOC_SHIFT_HIGH + 1];
+
+extern kmem_buckets kmalloc_caches[NR_KMALLOC_TYPES];
+
+/*
+ * Define gfp bits that should not be set for KMALLOC_NORMAL.
+ */
+#define KMALLOC_NOT_NORMAL_BITS \
+ (__GFP_RECLAIMABLE | \
+ (IS_ENABLED(CONFIG_ZONE_DMA) ? __GFP_DMA : 0) | \
+ (IS_ENABLED(CONFIG_MEMCG) ? __GFP_ACCOUNT : 0))
-static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
+extern unsigned long random_kmalloc_seed;
+
+static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags, unsigned long caller)
{
-#ifdef CONFIG_ZONE_DMA
/*
* The most common case is KMALLOC_NORMAL, so test for it
- * with a single branch for both flags.
+ * with a single branch for all the relevant flags.
*/
- if (likely((flags & (__GFP_DMA | __GFP_RECLAIMABLE)) == 0))
+ if (likely((flags & KMALLOC_NOT_NORMAL_BITS) == 0))
+#ifdef CONFIG_RANDOM_KMALLOC_CACHES
+ /* RANDOM_KMALLOC_CACHES_NR (=15) copies + the KMALLOC_NORMAL */
+ return KMALLOC_RANDOM_START + hash_64(caller ^ random_kmalloc_seed,
+ ilog2(RANDOM_KMALLOC_CACHES_NR + 1));
+#else
return KMALLOC_NORMAL;
+#endif
/*
- * At least one of the flags has to be set. If both are, __GFP_DMA
- * is more important.
+ * At least one of the flags has to be set. Their priorities in
+ * decreasing order are:
+ * 1) __GFP_DMA
+ * 2) __GFP_RECLAIMABLE
+ * 3) __GFP_ACCOUNT
*/
- return flags & __GFP_DMA ? KMALLOC_DMA : KMALLOC_RECLAIM;
-#else
- return flags & __GFP_RECLAIMABLE ? KMALLOC_RECLAIM : KMALLOC_NORMAL;
-#endif
+ if (IS_ENABLED(CONFIG_ZONE_DMA) && (flags & __GFP_DMA))
+ return KMALLOC_DMA;
+ if (!IS_ENABLED(CONFIG_MEMCG) || (flags & __GFP_RECLAIMABLE))
+ return KMALLOC_RECLAIM;
+ else
+ return KMALLOC_CGROUP;
}
/*
@@ -346,8 +708,14 @@ static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
* 1 = 65 .. 96 bytes
* 2 = 129 .. 192 bytes
* n = 2^(n-1)+1 .. 2^n
+ *
+ * Note: __kmalloc_index() is compile-time optimized, and not runtime optimized;
+ * typical usage is via kmalloc_index() and therefore evaluated at compile-time.
+ * Callers where !size_is_constant should only be test modules, where runtime
+ * overheads of __kmalloc_index() can be tolerated. Also see kmalloc_slab().
*/
-static __always_inline unsigned int kmalloc_index(size_t size)
+static __always_inline unsigned int __kmalloc_index(size_t size,
+ bool size_is_constant)
{
if (!size)
return 0;
@@ -378,21 +746,72 @@ static __always_inline unsigned int kmalloc_index(size_t size)
if (size <= 512 * 1024) return 19;
if (size <= 1024 * 1024) return 20;
if (size <= 2 * 1024 * 1024) return 21;
- if (size <= 4 * 1024 * 1024) return 22;
- if (size <= 8 * 1024 * 1024) return 23;
- if (size <= 16 * 1024 * 1024) return 24;
- if (size <= 32 * 1024 * 1024) return 25;
- if (size <= 64 * 1024 * 1024) return 26;
- BUG();
+
+ if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && size_is_constant)
+ BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()");
+ else
+ BUG();
/* Will never be reached. Needed because the compiler may complain */
return -1;
}
-#endif /* !CONFIG_SLOB */
+static_assert(PAGE_SHIFT <= 20);
+#define kmalloc_index(s) __kmalloc_index(s, true)
+
+#include <linux/alloc_tag.h>
-void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
-void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
-void kmem_cache_free(struct kmem_cache *, void *);
+/**
+ * kmem_cache_alloc - Allocate an object
+ * @cachep: The cache to allocate from.
+ * @flags: See kmalloc().
+ *
+ * Allocate an object from this cache.
+ * See kmem_cache_zalloc() for a shortcut of adding __GFP_ZERO to flags.
+ *
+ * Return: pointer to the new object or %NULL in case of error
+ */
+void *kmem_cache_alloc_noprof(struct kmem_cache *cachep,
+ gfp_t flags) __assume_slab_alignment __malloc;
+#define kmem_cache_alloc(...) alloc_hooks(kmem_cache_alloc_noprof(__VA_ARGS__))
+
+void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru,
+ gfp_t gfpflags) __assume_slab_alignment __malloc;
+#define kmem_cache_alloc_lru(...) alloc_hooks(kmem_cache_alloc_lru_noprof(__VA_ARGS__))
+
+/**
+ * kmem_cache_charge - memcg charge an already allocated slab memory
+ * @objp: address of the slab object to memcg charge
+ * @gfpflags: describe the allocation context
+ *
+ * kmem_cache_charge allows charging a slab object to the current memcg,
+ * primarily in cases where charging at allocation time might not be possible
+ * because the target memcg is not known (i.e. softirq context)
+ *
+ * The objp should be pointer returned by the slab allocator functions like
+ * kmalloc (with __GFP_ACCOUNT in flags) or kmem_cache_alloc. The memcg charge
+ * behavior can be controlled through gfpflags parameter, which affects how the
+ * necessary internal metadata can be allocated. Including __GFP_NOFAIL denotes
+ * that overcharging is requested instead of failure, but is not applied for the
+ * internal metadata allocation.
+ *
+ * There are several cases where it will return true even if the charging was
+ * not done:
+ * More specifically:
+ *
+ * 1. For !CONFIG_MEMCG or cgroup_disable=memory systems.
+ * 2. Already charged slab objects.
+ * 3. For slab objects from KMALLOC_NORMAL caches - allocated by kmalloc()
+ * without __GFP_ACCOUNT
+ * 4. Allocating internal metadata has failed
+ *
+ * Return: true if charge was successful otherwise false.
+ */
+bool kmem_cache_charge(void *objp, gfp_t gfpflags);
+void kmem_cache_free(struct kmem_cache *s, void *objp);
+
+kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags,
+ unsigned int useroffset, unsigned int usersize,
+ void (*ctor)(void *));
/*
* Bulk allocation and freeing operations. These are accelerated in an
@@ -401,104 +820,91 @@ void kmem_cache_free(struct kmem_cache *, void *);
*
* Note that interrupts must be enabled when calling these functions.
*/
-void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
-int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
+void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
+
+int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
+#define kmem_cache_alloc_bulk(...) alloc_hooks(kmem_cache_alloc_bulk_noprof(__VA_ARGS__))
-/*
- * Caller must not use kfree_bulk() on memory not originally allocated
- * by kmalloc(), because the SLOB allocator cannot handle this.
- */
static __always_inline void kfree_bulk(size_t size, void **p)
{
kmem_cache_free_bulk(NULL, size, p);
}
-#ifdef CONFIG_NUMA
-void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
-void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
-#else
-static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
-{
- return __kmalloc(size, flags);
-}
+void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags,
+ int node) __assume_slab_alignment __malloc;
+#define kmem_cache_alloc_node(...) alloc_hooks(kmem_cache_alloc_node_noprof(__VA_ARGS__))
-static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
-{
- return kmem_cache_alloc(s, flags);
-}
-#endif
+struct slab_sheaf *
+kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size);
-#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
+int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp,
+ struct slab_sheaf **sheafp, unsigned int size);
-#ifdef CONFIG_NUMA
-extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
- gfp_t gfpflags,
- int node, size_t size) __assume_slab_alignment __malloc;
+void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp,
+ struct slab_sheaf *sheaf);
+
+void *kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache *cachep, gfp_t gfp,
+ struct slab_sheaf *sheaf) __assume_slab_alignment __malloc;
+#define kmem_cache_alloc_from_sheaf(...) \
+ alloc_hooks(kmem_cache_alloc_from_sheaf_noprof(__VA_ARGS__))
+
+unsigned int kmem_cache_sheaf_size(struct slab_sheaf *sheaf);
+
+/*
+ * These macros allow declaring a kmem_buckets * parameter alongside size, which
+ * can be compiled out with CONFIG_SLAB_BUCKETS=n so that a large number of call
+ * sites don't have to pass NULL.
+ */
+#ifdef CONFIG_SLAB_BUCKETS
+#define DECL_BUCKET_PARAMS(_size, _b) size_t (_size), kmem_buckets *(_b)
+#define PASS_BUCKET_PARAMS(_size, _b) (_size), (_b)
+#define PASS_BUCKET_PARAM(_b) (_b)
#else
-static __always_inline void *
-kmem_cache_alloc_node_trace(struct kmem_cache *s,
- gfp_t gfpflags,
- int node, size_t size)
-{
- return kmem_cache_alloc_trace(s, gfpflags, size);
-}
-#endif /* CONFIG_NUMA */
+#define DECL_BUCKET_PARAMS(_size, _b) size_t (_size)
+#define PASS_BUCKET_PARAMS(_size, _b) (_size)
+#define PASS_BUCKET_PARAM(_b) NULL
+#endif
-#else /* CONFIG_TRACING */
-static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
- gfp_t flags, size_t size)
-{
- void *ret = kmem_cache_alloc(s, flags);
+/*
+ * The following functions are not to be used directly and are intended only
+ * for internal use from kmalloc() and kmalloc_node()
+ * with the exception of kunit tests
+ */
- ret = kasan_kmalloc(s, ret, size, flags);
- return ret;
-}
+void *__kmalloc_noprof(size_t size, gfp_t flags)
+ __assume_kmalloc_alignment __alloc_size(1);
-static __always_inline void *
-kmem_cache_alloc_node_trace(struct kmem_cache *s,
- gfp_t gfpflags,
- int node, size_t size)
-{
- void *ret = kmem_cache_alloc_node(s, gfpflags, node);
+void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
+ __assume_kmalloc_alignment __alloc_size(1);
- ret = kasan_kmalloc(s, ret, size, gfpflags);
- return ret;
-}
-#endif /* CONFIG_TRACING */
+void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t flags, size_t size)
+ __assume_kmalloc_alignment __alloc_size(3);
-extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
+void *__kmalloc_cache_node_noprof(struct kmem_cache *s, gfp_t gfpflags,
+ int node, size_t size)
+ __assume_kmalloc_alignment __alloc_size(4);
-#ifdef CONFIG_TRACING
-extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
-#else
-static __always_inline void *
-kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
-{
- return kmalloc_order(size, flags, order);
-}
-#endif
+void *__kmalloc_large_noprof(size_t size, gfp_t flags)
+ __assume_page_alignment __alloc_size(1);
-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
-{
- unsigned int order = get_order(size);
- return kmalloc_order_trace(size, flags, order);
-}
+void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node)
+ __assume_page_alignment __alloc_size(1);
/**
- * kmalloc - allocate memory
+ * kmalloc - allocate kernel memory
* @size: how many bytes of memory are required.
- * @flags: the type of memory to allocate.
+ * @flags: describe the allocation context
*
* kmalloc is the normal method of allocating memory
* for objects smaller than page size in the kernel.
*
* The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN
* bytes. For @size of power of two bytes, the alignment is also guaranteed
- * to be at least to the size.
+ * to be at least to the size. For other sizes, the alignment is guaranteed to
+ * be at least the largest power-of-two divisor of @size.
*
* The @flags argument may be one of the GFP flags defined at
- * include/linux/gfp.h and described at
+ * include/linux/gfp_types.h and described at
* :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>`
*
* The recommended usage of the @flags is described at
@@ -515,12 +921,12 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
* %GFP_ATOMIC
* Allocation will not sleep. May use emergency pools.
*
- * %GFP_HIGHUSER
- * Allocate memory from high memory on behalf of user.
- *
* Also it is possible to set different flags by OR'ing
* in one or more of the following additional @flags:
*
+ * %__GFP_ZERO
+ * Zero the allocated memory before returning. Also see kzalloc().
+ *
* %__GFP_HIGH
* This allocation has high priority and may use emergency pools.
*
@@ -539,45 +945,48 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
* Try really hard to succeed the allocation but fail
* eventually.
*/
-static __always_inline void *kmalloc(size_t size, gfp_t flags)
+static __always_inline __alloc_size(1) void *kmalloc_noprof(size_t size, gfp_t flags)
{
- if (__builtin_constant_p(size)) {
-#ifndef CONFIG_SLOB
+ if (__builtin_constant_p(size) && size) {
unsigned int index;
-#endif
- if (size > KMALLOC_MAX_CACHE_SIZE)
- return kmalloc_large(size, flags);
-#ifndef CONFIG_SLOB
- index = kmalloc_index(size);
- if (!index)
- return ZERO_SIZE_PTR;
+ if (size > KMALLOC_MAX_CACHE_SIZE)
+ return __kmalloc_large_noprof(size, flags);
- return kmem_cache_alloc_trace(
- kmalloc_caches[kmalloc_type(flags)][index],
+ index = kmalloc_index(size);
+ return __kmalloc_cache_noprof(
+ kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
flags, size);
-#endif
}
- return __kmalloc(size, flags);
+ return __kmalloc_noprof(size, flags);
}
+#define kmalloc(...) alloc_hooks(kmalloc_noprof(__VA_ARGS__))
+
+void *kmalloc_nolock_noprof(size_t size, gfp_t gfp_flags, int node);
+#define kmalloc_nolock(...) alloc_hooks(kmalloc_nolock_noprof(__VA_ARGS__))
-static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
+#define kmem_buckets_alloc(_b, _size, _flags) \
+ alloc_hooks(__kmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE))
+
+#define kmem_buckets_alloc_track_caller(_b, _size, _flags) \
+ alloc_hooks(__kmalloc_node_track_caller_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE, _RET_IP_))
+
+static __always_inline __alloc_size(1) void *kmalloc_node_noprof(size_t size, gfp_t flags, int node)
{
-#ifndef CONFIG_SLOB
- if (__builtin_constant_p(size) &&
- size <= KMALLOC_MAX_CACHE_SIZE) {
- unsigned int i = kmalloc_index(size);
+ if (__builtin_constant_p(size) && size) {
+ unsigned int index;
- if (!i)
- return ZERO_SIZE_PTR;
+ if (size > KMALLOC_MAX_CACHE_SIZE)
+ return __kmalloc_large_node_noprof(size, flags, node);
- return kmem_cache_alloc_node_trace(
- kmalloc_caches[kmalloc_type(flags)][i],
- flags, node, size);
+ index = kmalloc_index(size);
+ return __kmalloc_cache_node_noprof(
+ kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
+ flags, node, size);
}
-#endif
- return __kmalloc_node(size, flags, node);
+ return __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node);
}
+#define kmalloc_node(...) alloc_hooks(kmalloc_node_noprof(__VA_ARGS__))
/**
* kmalloc_array - allocate memory for an array.
@@ -585,16 +994,15 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
* @size: element size.
* @flags: the type of memory to allocate (see kmalloc).
*/
-static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
+static inline __alloc_size(1, 2) void *kmalloc_array_noprof(size_t n, size_t size, gfp_t flags)
{
size_t bytes;
if (unlikely(check_mul_overflow(n, size, &bytes)))
return NULL;
- if (__builtin_constant_p(n) && __builtin_constant_p(size))
- return kmalloc(bytes, flags);
- return __kmalloc(bytes, flags);
+ return kmalloc_noprof(bytes, flags);
}
+#define kmalloc_array(...) alloc_hooks(kmalloc_array_noprof(__VA_ARGS__))
/**
* krealloc_array - reallocate memory for an array.
@@ -602,17 +1010,30 @@ static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
* @new_n: new number of elements to alloc
* @new_size: new size of a single member of the array
* @flags: the type of memory to allocate (see kmalloc)
+ *
+ * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
+ * initial memory allocation, every subsequent call to this API for the same
+ * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
+ * __GFP_ZERO is not fully honored by this API.
+ *
+ * See krealloc_noprof() for further details.
+ *
+ * In any case, the contents of the object pointed to are preserved up to the
+ * lesser of the new and old sizes.
*/
-static __must_check inline void *
-krealloc_array(void *p, size_t new_n, size_t new_size, gfp_t flags)
+static inline __realloc_size(2, 3) void * __must_check krealloc_array_noprof(void *p,
+ size_t new_n,
+ size_t new_size,
+ gfp_t flags)
{
size_t bytes;
if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
return NULL;
- return krealloc(p, bytes, flags);
+ return krealloc_noprof(p, bytes, flags);
}
+#define krealloc_array(...) alloc_hooks(krealloc_array_noprof(__VA_ARGS__))
/**
* kcalloc - allocate memory for an array. The memory is set to zero.
@@ -620,10 +1041,14 @@ krealloc_array(void *p, size_t new_n, size_t new_size, gfp_t flags)
* @size: element size.
* @flags: the type of memory to allocate (see kmalloc).
*/
-static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
-{
- return kmalloc_array(n, size, flags | __GFP_ZERO);
-}
+#define kcalloc(n, size, flags) kmalloc_array(n, size, (flags) | __GFP_ZERO)
+
+void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node,
+ unsigned long caller) __alloc_size(1);
+#define kmalloc_node_track_caller_noprof(size, flags, node, caller) \
+ __kmalloc_node_track_caller_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node, caller)
+#define kmalloc_node_track_caller(...) \
+ alloc_hooks(kmalloc_node_track_caller_noprof(__VA_ARGS__, _RET_IP_))
/*
* kmalloc_track_caller is a special version of kmalloc that records the
@@ -633,79 +1058,122 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
* allocator where we care about the real place the memory allocation
* request comes from.
*/
-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
-#define kmalloc_track_caller(size, flags) \
- __kmalloc_track_caller(size, flags, _RET_IP_)
+#define kmalloc_track_caller(...) kmalloc_node_track_caller(__VA_ARGS__, NUMA_NO_NODE)
-static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
- int node)
+#define kmalloc_track_caller_noprof(...) \
+ kmalloc_node_track_caller_noprof(__VA_ARGS__, NUMA_NO_NODE, _RET_IP_)
+
+static inline __alloc_size(1, 2) void *kmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags,
+ int node)
{
size_t bytes;
if (unlikely(check_mul_overflow(n, size, &bytes)))
return NULL;
if (__builtin_constant_p(n) && __builtin_constant_p(size))
- return kmalloc_node(bytes, flags, node);
- return __kmalloc_node(bytes, flags, node);
+ return kmalloc_node_noprof(bytes, flags, node);
+ return __kmalloc_node_noprof(PASS_BUCKET_PARAMS(bytes, NULL), flags, node);
}
+#define kmalloc_array_node(...) alloc_hooks(kmalloc_array_node_noprof(__VA_ARGS__))
-static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
-{
- return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
-}
-
-
-#ifdef CONFIG_NUMA
-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
-#define kmalloc_node_track_caller(size, flags, node) \
- __kmalloc_node_track_caller(size, flags, node, \
- _RET_IP_)
-
-#else /* CONFIG_NUMA */
-
-#define kmalloc_node_track_caller(size, flags, node) \
- kmalloc_track_caller(size, flags)
-
-#endif /* CONFIG_NUMA */
+#define kcalloc_node(_n, _size, _flags, _node) \
+ kmalloc_array_node(_n, _size, (_flags) | __GFP_ZERO, _node)
/*
* Shortcuts
*/
-static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
-{
- return kmem_cache_alloc(k, flags | __GFP_ZERO);
-}
+#define kmem_cache_zalloc(_k, _flags) kmem_cache_alloc(_k, (_flags)|__GFP_ZERO)
/**
* kzalloc - allocate memory. The memory is set to zero.
* @size: how many bytes of memory are required.
* @flags: the type of memory to allocate (see kmalloc).
*/
-static inline void *kzalloc(size_t size, gfp_t flags)
+static inline __alloc_size(1) void *kzalloc_noprof(size_t size, gfp_t flags)
{
- return kmalloc(size, flags | __GFP_ZERO);
+ return kmalloc_noprof(size, flags | __GFP_ZERO);
}
-
-/**
- * kzalloc_node - allocate zeroed memory from a particular memory node.
- * @size: how many bytes of memory are required.
- * @flags: the type of memory to allocate (see kmalloc).
- * @node: memory node from which to allocate
- */
-static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
+#define kzalloc(...) alloc_hooks(kzalloc_noprof(__VA_ARGS__))
+#define kzalloc_node(_size, _flags, _node) kmalloc_node(_size, (_flags)|__GFP_ZERO, _node)
+
+void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), unsigned long align,
+ gfp_t flags, int node) __alloc_size(1);
+#define kvmalloc_node_align_noprof(_size, _align, _flags, _node) \
+ __kvmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, NULL), _align, _flags, _node)
+#define kvmalloc_node_align(...) \
+ alloc_hooks(kvmalloc_node_align_noprof(__VA_ARGS__))
+#define kvmalloc_node(_s, _f, _n) kvmalloc_node_align(_s, 1, _f, _n)
+#define kvmalloc(...) kvmalloc_node(__VA_ARGS__, NUMA_NO_NODE)
+#define kvzalloc(_size, _flags) kvmalloc(_size, (_flags)|__GFP_ZERO)
+
+#define kvzalloc_node(_size, _flags, _node) kvmalloc_node(_size, (_flags)|__GFP_ZERO, _node)
+
+#define kmem_buckets_valloc(_b, _size, _flags) \
+ alloc_hooks(__kvmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), 1, _flags, NUMA_NO_NODE))
+
+static inline __alloc_size(1, 2) void *
+kvmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node)
{
- return kmalloc_node(size, flags | __GFP_ZERO, node);
+ size_t bytes;
+
+ if (unlikely(check_mul_overflow(n, size, &bytes)))
+ return NULL;
+
+ return kvmalloc_node_align_noprof(bytes, 1, flags, node);
}
+#define kvmalloc_array_noprof(...) kvmalloc_array_node_noprof(__VA_ARGS__, NUMA_NO_NODE)
+#define kvcalloc_node_noprof(_n,_s,_f,_node) kvmalloc_array_node_noprof(_n,_s,(_f)|__GFP_ZERO,_node)
+#define kvcalloc_noprof(...) kvcalloc_node_noprof(__VA_ARGS__, NUMA_NO_NODE)
+
+#define kvmalloc_array(...) alloc_hooks(kvmalloc_array_noprof(__VA_ARGS__))
+#define kvcalloc_node(...) alloc_hooks(kvcalloc_node_noprof(__VA_ARGS__))
+#define kvcalloc(...) alloc_hooks(kvcalloc_noprof(__VA_ARGS__))
+
+void *kvrealloc_node_align_noprof(const void *p, size_t size, unsigned long align,
+ gfp_t flags, int nid) __realloc_size(2);
+#define kvrealloc_node_align(...) \
+ alloc_hooks(kvrealloc_node_align_noprof(__VA_ARGS__))
+#define kvrealloc_node(_p, _s, _f, _n) kvrealloc_node_align(_p, _s, 1, _f, _n)
+#define kvrealloc(...) kvrealloc_node(__VA_ARGS__, NUMA_NO_NODE)
+
+extern void kvfree(const void *addr);
+DEFINE_FREE(kvfree, void *, if (!IS_ERR_OR_NULL(_T)) kvfree(_T))
+
+extern void kvfree_sensitive(const void *addr, size_t len);
+
unsigned int kmem_cache_size(struct kmem_cache *s);
-void __init kmem_cache_init_late(void);
-#if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
-int slab_prepare_cpu(unsigned int cpu);
-int slab_dead_cpu(unsigned int cpu);
+#ifndef CONFIG_KVFREE_RCU_BATCHED
+static inline void kvfree_rcu_barrier(void)
+{
+ rcu_barrier();
+}
+
+static inline void kfree_rcu_scheduler_running(void) { }
#else
-#define slab_prepare_cpu NULL
-#define slab_dead_cpu NULL
+void kvfree_rcu_barrier(void);
+
+void kfree_rcu_scheduler_running(void);
#endif
+/**
+ * kmalloc_size_roundup - Report allocation bucket size for the given size
+ *
+ * @size: Number of bytes to round up from.
+ *
+ * This returns the number of bytes that would be available in a kmalloc()
+ * allocation of @size bytes. For example, a 126 byte request would be
+ * rounded up to the next sized kmalloc bucket, 128 bytes. (This is strictly
+ * for the general-purpose kmalloc()-based allocations, and is not for the
+ * pre-sized kmem_cache_alloc()-based allocations.)
+ *
+ * Use this to kmalloc() the full bucket size ahead of time instead of using
+ * ksize() to query the size after an allocation.
+ */
+size_t kmalloc_size_roundup(size_t size);
+
+void __init kmem_cache_init_late(void);
+void __init kvfree_rcu_init(void);
+
#endif /* _LINUX_SLAB_H */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
deleted file mode 100644
index 3aa5e1e73ab6..000000000000
--- a/include/linux/slab_def.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_SLAB_DEF_H
-#define _LINUX_SLAB_DEF_H
-
-#include <linux/kfence.h>
-#include <linux/reciprocal_div.h>
-
-/*
- * Definitions unique to the original Linux SLAB allocator.
- */
-
-struct kmem_cache {
- struct array_cache __percpu *cpu_cache;
-
-/* 1) Cache tunables. Protected by slab_mutex */
- unsigned int batchcount;
- unsigned int limit;
- unsigned int shared;
-
- unsigned int size;
- struct reciprocal_value reciprocal_buffer_size;
-/* 2) touched by every alloc & free from the backend */
-
- slab_flags_t flags; /* constant flags */
- unsigned int num; /* # of objs per slab */
-
-/* 3) cache_grow/shrink */
- /* order of pgs per slab (2^n) */
- unsigned int gfporder;
-
- /* force GFP flags, e.g. GFP_DMA */
- gfp_t allocflags;
-
- size_t colour; /* cache colouring range */
- unsigned int colour_off; /* colour offset */
- struct kmem_cache *freelist_cache;
- unsigned int freelist_size;
-
- /* constructor func */
- void (*ctor)(void *obj);
-
-/* 4) cache creation/removal */
- const char *name;
- struct list_head list;
- int refcount;
- int object_size;
- int align;
-
-/* 5) statistics */
-#ifdef CONFIG_DEBUG_SLAB
- unsigned long num_active;
- unsigned long num_allocations;
- unsigned long high_mark;
- unsigned long grown;
- unsigned long reaped;
- unsigned long errors;
- unsigned long max_freeable;
- unsigned long node_allocs;
- unsigned long node_frees;
- unsigned long node_overflow;
- atomic_t allochit;
- atomic_t allocmiss;
- atomic_t freehit;
- atomic_t freemiss;
-
- /*
- * If debugging is enabled, then the allocator can add additional
- * fields and/or padding to every object. 'size' contains the total
- * object size including these internal fields, while 'obj_offset'
- * and 'object_size' contain the offset to the user object and its
- * size.
- */
- int obj_offset;
-#endif /* CONFIG_DEBUG_SLAB */
-
-#ifdef CONFIG_KASAN
- struct kasan_cache kasan_info;
-#endif
-
-#ifdef CONFIG_SLAB_FREELIST_RANDOM
- unsigned int *random_seq;
-#endif
-
- unsigned int useroffset; /* Usercopy region offset */
- unsigned int usersize; /* Usercopy region size */
-
- struct kmem_cache_node *node[MAX_NUMNODES];
-};
-
-static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
- void *x)
-{
- void *object = x - (x - page->s_mem) % cache->size;
- void *last_object = page->s_mem + (cache->num - 1) * cache->size;
-
- if (unlikely(object > last_object))
- return last_object;
- else
- return object;
-}
-
-/*
- * We want to avoid an expensive divide : (offset / cache->size)
- * Using the fact that size is a constant for a particular cache,
- * we can replace (offset / cache->size) by
- * reciprocal_divide(offset, cache->reciprocal_buffer_size)
- */
-static inline unsigned int obj_to_index(const struct kmem_cache *cache,
- const struct page *page, void *obj)
-{
- u32 offset = (obj - page->s_mem);
- return reciprocal_divide(offset, cache->reciprocal_buffer_size);
-}
-
-static inline int objs_per_slab_page(const struct kmem_cache *cache,
- const struct page *page)
-{
- if (is_kfence_address(page_address(page)))
- return 1;
- return cache->num;
-}
-
-#endif /* _LINUX_SLAB_DEF_H */
diff --git a/include/linux/slimbus.h b/include/linux/slimbus.h
index 12c9719b2a55..a4608d9a9684 100644
--- a/include/linux/slimbus.h
+++ b/include/linux/slimbus.h
@@ -10,7 +10,7 @@
#include <linux/completion.h>
#include <linux/mod_devicetable.h>
-extern struct bus_type slimbus_bus;
+extern const struct bus_type slimbus_bus;
/**
* struct slim_eaddr - Enumeration address for a SLIMbus device
@@ -91,7 +91,7 @@ struct slim_driver {
struct device_driver driver;
const struct slim_device_id *id_table;
};
-#define to_slim_driver(d) container_of(d, struct slim_driver, driver)
+#define to_slim_driver(d) container_of_const(d, struct slim_driver, driver)
/**
* struct slim_val_inf - Slimbus value or information element
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
deleted file mode 100644
index dcde82a4434c..000000000000
--- a/include/linux/slub_def.h
+++ /dev/null
@@ -1,199 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_SLUB_DEF_H
-#define _LINUX_SLUB_DEF_H
-
-/*
- * SLUB : A Slab allocator without object queues.
- *
- * (C) 2007 SGI, Christoph Lameter
- */
-#include <linux/kfence.h>
-#include <linux/kobject.h>
-#include <linux/reciprocal_div.h>
-
-enum stat_item {
- ALLOC_FASTPATH, /* Allocation from cpu slab */
- ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
- FREE_FASTPATH, /* Free to cpu slab */
- FREE_SLOWPATH, /* Freeing not to cpu slab */
- FREE_FROZEN, /* Freeing to frozen slab */
- FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
- FREE_REMOVE_PARTIAL, /* Freeing removes last object */
- ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */
- ALLOC_SLAB, /* Cpu slab acquired from page allocator */
- ALLOC_REFILL, /* Refill cpu slab from slab freelist */
- ALLOC_NODE_MISMATCH, /* Switching cpu slab */
- FREE_SLAB, /* Slab freed to the page allocator */
- CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
- DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
- DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
- DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
- DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
- DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
- DEACTIVATE_BYPASS, /* Implicit deactivation */
- ORDER_FALLBACK, /* Number of times fallback was necessary */
- CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
- CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
- CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
- CPU_PARTIAL_FREE, /* Refill cpu partial on free */
- CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */
- CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
- NR_SLUB_STAT_ITEMS };
-
-struct kmem_cache_cpu {
- void **freelist; /* Pointer to next available object */
- unsigned long tid; /* Globally unique transaction id */
- struct page *page; /* The slab from which we are allocating */
-#ifdef CONFIG_SLUB_CPU_PARTIAL
- struct page *partial; /* Partially allocated frozen slabs */
-#endif
-#ifdef CONFIG_SLUB_STATS
- unsigned stat[NR_SLUB_STAT_ITEMS];
-#endif
-};
-
-#ifdef CONFIG_SLUB_CPU_PARTIAL
-#define slub_percpu_partial(c) ((c)->partial)
-
-#define slub_set_percpu_partial(c, p) \
-({ \
- slub_percpu_partial(c) = (p)->next; \
-})
-
-#define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
-#else
-#define slub_percpu_partial(c) NULL
-
-#define slub_set_percpu_partial(c, p)
-
-#define slub_percpu_partial_read_once(c) NULL
-#endif // CONFIG_SLUB_CPU_PARTIAL
-
-/*
- * Word size structure that can be atomically updated or read and that
- * contains both the order and the number of objects that a slab of the
- * given order would contain.
- */
-struct kmem_cache_order_objects {
- unsigned int x;
-};
-
-/*
- * Slab cache management.
- */
-struct kmem_cache {
- struct kmem_cache_cpu __percpu *cpu_slab;
- /* Used for retrieving partial slabs, etc. */
- slab_flags_t flags;
- unsigned long min_partial;
- unsigned int size; /* The size of an object including metadata */
- unsigned int object_size;/* The size of an object without metadata */
- struct reciprocal_value reciprocal_size;
- unsigned int offset; /* Free pointer offset */
-#ifdef CONFIG_SLUB_CPU_PARTIAL
- /* Number of per cpu partial objects to keep around */
- unsigned int cpu_partial;
-#endif
- struct kmem_cache_order_objects oo;
-
- /* Allocation and freeing of slabs */
- struct kmem_cache_order_objects max;
- struct kmem_cache_order_objects min;
- gfp_t allocflags; /* gfp flags to use on each alloc */
- int refcount; /* Refcount for slab cache destroy */
- void (*ctor)(void *);
- unsigned int inuse; /* Offset to metadata */
- unsigned int align; /* Alignment */
- unsigned int red_left_pad; /* Left redzone padding size */
- const char *name; /* Name (only for display!) */
- struct list_head list; /* List of slab caches */
-#ifdef CONFIG_SYSFS
- struct kobject kobj; /* For sysfs */
-#endif
-#ifdef CONFIG_SLAB_FREELIST_HARDENED
- unsigned long random;
-#endif
-
-#ifdef CONFIG_NUMA
- /*
- * Defragmentation by allocating from a remote node.
- */
- unsigned int remote_node_defrag_ratio;
-#endif
-
-#ifdef CONFIG_SLAB_FREELIST_RANDOM
- unsigned int *random_seq;
-#endif
-
-#ifdef CONFIG_KASAN
- struct kasan_cache kasan_info;
-#endif
-
- unsigned int useroffset; /* Usercopy region offset */
- unsigned int usersize; /* Usercopy region size */
-
- struct kmem_cache_node *node[MAX_NUMNODES];
-};
-
-#ifdef CONFIG_SLUB_CPU_PARTIAL
-#define slub_cpu_partial(s) ((s)->cpu_partial)
-#define slub_set_cpu_partial(s, n) \
-({ \
- slub_cpu_partial(s) = (n); \
-})
-#else
-#define slub_cpu_partial(s) (0)
-#define slub_set_cpu_partial(s, n)
-#endif /* CONFIG_SLUB_CPU_PARTIAL */
-
-#ifdef CONFIG_SYSFS
-#define SLAB_SUPPORTS_SYSFS
-void sysfs_slab_unlink(struct kmem_cache *);
-void sysfs_slab_release(struct kmem_cache *);
-#else
-static inline void sysfs_slab_unlink(struct kmem_cache *s)
-{
-}
-static inline void sysfs_slab_release(struct kmem_cache *s)
-{
-}
-#endif
-
-void object_err(struct kmem_cache *s, struct page *page,
- u8 *object, char *reason);
-
-void *fixup_red_left(struct kmem_cache *s, void *p);
-
-static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
- void *x) {
- void *object = x - (x - page_address(page)) % cache->size;
- void *last_object = page_address(page) +
- (page->objects - 1) * cache->size;
- void *result = (unlikely(object > last_object)) ? last_object : object;
-
- result = fixup_red_left(cache, result);
- return result;
-}
-
-/* Determine object index from a given position */
-static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
- void *addr, void *obj)
-{
- return reciprocal_divide(kasan_reset_tag(obj) - addr,
- cache->reciprocal_size);
-}
-
-static inline unsigned int obj_to_index(const struct kmem_cache *cache,
- const struct page *page, void *obj)
-{
- if (is_kfence_address(obj))
- return 0;
- return __obj_to_index(cache, page_address(page), obj);
-}
-
-static inline int objs_per_slab_page(const struct kmem_cache *cache,
- const struct page *page)
-{
- return page->objects;
-}
-#endif /* _LINUX_SLUB_DEF_H */
diff --git a/include/linux/sm501.h b/include/linux/sm501.h
index 2f3488b2875d..bcda27a46e7a 100644
--- a/include/linux/sm501.h
+++ b/include/linux/sm501.h
@@ -12,9 +12,6 @@ extern int sm501_unit_power(struct device *dev,
extern unsigned long sm501_set_clock(struct device *dev,
int clksrc, unsigned long freq);
-extern unsigned long sm501_find_clock(struct device *dev,
- int clksrc, unsigned long req_freq);
-
/* sm501_misc_control
*
* Modify the SM501's MISC_CONTROL register
diff --git a/include/linux/smc911x.h b/include/linux/smc911x.h
deleted file mode 100644
index 8cace8189e74..000000000000
--- a/include/linux/smc911x.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __SMC911X_H__
-#define __SMC911X_H__
-
-#define SMC911X_USE_16BIT (1 << 0)
-#define SMC911X_USE_32BIT (1 << 1)
-
-struct smc911x_platdata {
- unsigned long flags;
- unsigned long irq_flags; /* IRQF_... */
- int irq_polarity;
-};
-
-#endif /* __SMC911X_H__ */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 510519e8a1eb..91d0ecf3b8d3 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -53,14 +53,14 @@ int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
void *info, bool wait, const struct cpumask *mask);
-int smp_call_function_single_async(int cpu, struct __call_single_data *csd);
+int smp_call_function_single_async(int cpu, call_single_data_t *csd);
/*
* Cpus stopping functions in panic. All have default weak definitions.
* Architecture-dependent code may override them.
*/
-void panic_smp_self_stop(void);
-void nmi_panic_self_stop(struct pt_regs *regs);
+void __noreturn panic_smp_self_stop(void);
+void __noreturn nmi_panic_self_stop(struct pt_regs *regs);
void crash_smp_send_stop(void);
/*
@@ -105,10 +105,15 @@ static inline void on_each_cpu_cond(smp_cond_func_t cond_func,
on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
}
+/*
+ * Architecture specific boot CPU setup. Defined as empty weak function in
+ * init/main.c. Architectures can override it.
+ */
+void __init smp_prepare_boot_cpu(void);
+
#ifdef CONFIG_SMP
#include <linux/preempt.h>
-#include <linux/kernel.h>
#include <linux/compiler.h>
#include <linux/thread_info.h>
#include <asm/smp.h>
@@ -126,8 +131,15 @@ extern void smp_send_stop(void);
/*
* sends a 'reschedule' event to another CPU:
*/
-extern void smp_send_reschedule(int cpu);
-
+extern void arch_smp_send_reschedule(int cpu);
+/*
+ * scheduler_ipi() is inline so can't be passed as callback reason, but the
+ * callsite IP should be sufficient for root-causing IPIs sent from here.
+ */
+#define smp_send_reschedule(cpu) ({ \
+ trace_ipi_send_cpu(cpu, _RET_IP_, NULL); \
+ arch_smp_send_reschedule(cpu); \
+})
/*
* Prepare machine for booting other CPUs.
@@ -156,6 +168,7 @@ int smp_call_function_any(const struct cpumask *mask,
void kick_all_cpus_sync(void);
void wake_up_all_idle_cpus(void);
+bool cpus_peek_for_pending_ipi(const struct cpumask *mask);
/*
* Generic and arch helpers
@@ -165,12 +178,6 @@ void generic_smp_call_function_single_interrupt(void);
#define generic_smp_call_function_interrupt \
generic_smp_call_function_single_interrupt
-/*
- * Mark the boot cpu "online" so that it can call console drivers in
- * printk() and can access its per-cpu storage.
- */
-void smp_prepare_boot_cpu(void);
-
extern unsigned int setup_max_cpus;
extern void __init setup_nr_cpu_ids(void);
extern void __init smp_init(void);
@@ -197,7 +204,6 @@ static inline void up_smp_call_function(smp_call_func_t func, void *info)
(up_smp_call_function(func, info))
static inline void smp_send_reschedule(int cpu) { }
-#define smp_prepare_boot_cpu() do {} while (0)
#define smp_call_function_many(mask, func, info, wait) \
(up_smp_call_function(func, info))
static inline void call_function_init(void) { }
@@ -211,10 +217,16 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
static inline void kick_all_cpus_sync(void) { }
static inline void wake_up_all_idle_cpus(void) { }
+static inline bool cpus_peek_for_pending_ipi(const struct cpumask *mask)
+{
+ return false;
+}
+
+#define setup_max_cpus 0
#ifdef CONFIG_UP_LATE_INIT
extern void __init up_late_init(void);
-static inline void smp_init(void) { up_late_init(); }
+static __always_inline void smp_init(void) { up_late_init(); }
#else
static inline void smp_init(void) { }
#endif
@@ -227,7 +239,7 @@ static inline int get_boot_cpu_id(void)
#endif /* !SMP */
/**
- * raw_processor_id() - get the current (unstable) CPU id
+ * raw_smp_processor_id() - get the current (unstable) CPU id
*
* For then you know what you are doing and need an unstable
* CPU id.
@@ -255,7 +267,7 @@ static inline int get_boot_cpu_id(void)
* regular asm read for the stable.
*/
#ifndef __smp_processor_id
-#define __smp_processor_id(x) raw_smp_processor_id(x)
+#define __smp_processor_id() raw_smp_processor_id()
#endif
#ifdef CONFIG_DEBUG_PREEMPT
@@ -287,4 +299,10 @@ int smpcfd_prepare_cpu(unsigned int cpu);
int smpcfd_dead_cpu(unsigned int cpu);
int smpcfd_dying_cpu(unsigned int cpu);
+#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
+bool csd_lock_is_stuck(void);
+#else
+static inline bool csd_lock_is_stuck(void) { return false; }
+#endif
+
#endif /* __LINUX_SMP_H */
diff --git a/include/linux/smscphy.h b/include/linux/smscphy.h
index 1a136271ba6a..1a6a851d2cf8 100644
--- a/include/linux/smscphy.h
+++ b/include/linux/smscphy.h
@@ -28,4 +28,48 @@
#define MII_LAN83C185_MODE_POWERDOWN 0xC0 /* Power Down mode */
#define MII_LAN83C185_MODE_ALL 0xE0 /* All capable mode */
+int smsc_phy_config_intr(struct phy_device *phydev);
+irqreturn_t smsc_phy_handle_interrupt(struct phy_device *phydev);
+int smsc_phy_config_init(struct phy_device *phydev);
+int lan87xx_read_status(struct phy_device *phydev);
+int smsc_phy_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data);
+int smsc_phy_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data);
+int smsc_phy_probe(struct phy_device *phydev);
+
+#define MII_LAN874X_PHY_MMD_WOL_WUCSR 0x8010
+#define MII_LAN874X_PHY_MMD_WOL_WUF_CFGA 0x8011
+#define MII_LAN874X_PHY_MMD_WOL_WUF_CFGB 0x8012
+#define MII_LAN874X_PHY_MMD_WOL_WUF_MASK0 0x8021
+#define MII_LAN874X_PHY_MMD_WOL_WUF_MASK1 0x8022
+#define MII_LAN874X_PHY_MMD_WOL_WUF_MASK2 0x8023
+#define MII_LAN874X_PHY_MMD_WOL_WUF_MASK3 0x8024
+#define MII_LAN874X_PHY_MMD_WOL_WUF_MASK4 0x8025
+#define MII_LAN874X_PHY_MMD_WOL_WUF_MASK5 0x8026
+#define MII_LAN874X_PHY_MMD_WOL_WUF_MASK6 0x8027
+#define MII_LAN874X_PHY_MMD_WOL_WUF_MASK7 0x8028
+#define MII_LAN874X_PHY_MMD_WOL_RX_ADDRA 0x8061
+#define MII_LAN874X_PHY_MMD_WOL_RX_ADDRB 0x8062
+#define MII_LAN874X_PHY_MMD_WOL_RX_ADDRC 0x8063
+#define MII_LAN874X_PHY_MMD_MCFGR 0x8064
+
+#define MII_LAN874X_PHY_PME1_SET (2 << 13)
+#define MII_LAN874X_PHY_PME2_SET (2 << 11)
+#define MII_LAN874X_PHY_PME_SELF_CLEAR BIT(9)
+#define MII_LAN874X_PHY_WOL_PFDA_FR BIT(7)
+#define MII_LAN874X_PHY_WOL_WUFR BIT(6)
+#define MII_LAN874X_PHY_WOL_MPR BIT(5)
+#define MII_LAN874X_PHY_WOL_BCAST_FR BIT(4)
+#define MII_LAN874X_PHY_WOL_PFDAEN BIT(3)
+#define MII_LAN874X_PHY_WOL_WUEN BIT(2)
+#define MII_LAN874X_PHY_WOL_MPEN BIT(1)
+#define MII_LAN874X_PHY_WOL_BCSTEN BIT(0)
+
+#define MII_LAN874X_PHY_WOL_FILTER_EN BIT(15)
+#define MII_LAN874X_PHY_WOL_FILTER_MCASTTEN BIT(9)
+#define MII_LAN874X_PHY_WOL_FILTER_BCSTEN BIT(8)
+
+#define MII_LAN874X_PHY_PME_SELF_CLEAR_DELAY 0x1000 /* 81 milliseconds */
+
#endif /* __LINUX_SMSCPHY_H__ */
diff --git a/include/linux/soc/airoha/airoha_offload.h b/include/linux/soc/airoha/airoha_offload.h
new file mode 100644
index 000000000000..4d23cbb7d407
--- /dev/null
+++ b/include/linux/soc/airoha/airoha_offload.h
@@ -0,0 +1,317 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2025 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+#ifndef AIROHA_OFFLOAD_H
+#define AIROHA_OFFLOAD_H
+
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+enum {
+ PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED = 0x0f,
+};
+
+struct airoha_ppe_dev {
+ struct {
+ int (*setup_tc_block_cb)(struct airoha_ppe_dev *dev,
+ void *type_data);
+ void (*check_skb)(struct airoha_ppe_dev *dev,
+ struct sk_buff *skb, u16 hash,
+ bool rx_wlan);
+ } ops;
+
+ void *priv;
+};
+
+#if (IS_BUILTIN(CONFIG_NET_AIROHA) || IS_MODULE(CONFIG_NET_AIROHA))
+struct airoha_ppe_dev *airoha_ppe_get_dev(struct device *dev);
+void airoha_ppe_put_dev(struct airoha_ppe_dev *dev);
+
+static inline int airoha_ppe_dev_setup_tc_block_cb(struct airoha_ppe_dev *dev,
+ void *type_data)
+{
+ return dev->ops.setup_tc_block_cb(dev, type_data);
+}
+
+static inline void airoha_ppe_dev_check_skb(struct airoha_ppe_dev *dev,
+ struct sk_buff *skb,
+ u16 hash, bool rx_wlan)
+{
+ dev->ops.check_skb(dev, skb, hash, rx_wlan);
+}
+#else
+static inline struct airoha_ppe_dev *airoha_ppe_get_dev(struct device *dev)
+{
+ return NULL;
+}
+
+static inline void airoha_ppe_put_dev(struct airoha_ppe_dev *dev)
+{
+}
+
+static inline int airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev *dev,
+ void *type_data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void airoha_ppe_dev_check_skb(struct airoha_ppe_dev *dev,
+ struct sk_buff *skb, u16 hash,
+ bool rx_wlan)
+{
+}
+#endif
+
+#define NPU_NUM_CORES 8
+#define NPU_NUM_IRQ 6
+#define NPU_RX0_DESC_NUM 512
+#define NPU_RX1_DESC_NUM 512
+
+/* CTRL */
+#define NPU_RX_DMA_DESC_LAST_MASK BIT(29)
+#define NPU_RX_DMA_DESC_LEN_MASK GENMASK(28, 15)
+#define NPU_RX_DMA_DESC_CUR_LEN_MASK GENMASK(14, 1)
+#define NPU_RX_DMA_DESC_DONE_MASK BIT(0)
+/* INFO */
+#define NPU_RX_DMA_PKT_COUNT_MASK GENMASK(31, 28)
+#define NPU_RX_DMA_PKT_ID_MASK GENMASK(28, 26)
+#define NPU_RX_DMA_SRC_PORT_MASK GENMASK(25, 21)
+#define NPU_RX_DMA_CRSN_MASK GENMASK(20, 16)
+#define NPU_RX_DMA_FOE_ID_MASK GENMASK(15, 0)
+/* DATA */
+#define NPU_RX_DMA_SID_MASK GENMASK(31, 16)
+#define NPU_RX_DMA_FRAG_TYPE_MASK GENMASK(15, 14)
+#define NPU_RX_DMA_PRIORITY_MASK GENMASK(13, 10)
+#define NPU_RX_DMA_RADIO_ID_MASK GENMASK(9, 6)
+#define NPU_RX_DMA_VAP_ID_MASK GENMASK(5, 2)
+#define NPU_RX_DMA_FRAME_TYPE_MASK GENMASK(1, 0)
+
+struct airoha_npu_rx_dma_desc {
+ u32 ctrl;
+ u32 info;
+ u32 data;
+ u32 addr;
+ u64 rsv;
+} __packed;
+
+/* CTRL */
+#define NPU_TX_DMA_DESC_SCHED_MASK BIT(31)
+#define NPU_TX_DMA_DESC_LEN_MASK GENMASK(30, 18)
+#define NPU_TX_DMA_DESC_VEND_LEN_MASK GENMASK(17, 1)
+#define NPU_TX_DMA_DESC_DONE_MASK BIT(0)
+
+#define NPU_TXWI_LEN 192
+
+struct airoha_npu_tx_dma_desc {
+ u32 ctrl;
+ u32 addr;
+ u64 rsv;
+ u8 txwi[NPU_TXWI_LEN];
+} __packed;
+
+enum airoha_npu_wlan_set_cmd {
+ WLAN_FUNC_SET_WAIT_PCIE_ADDR,
+ WLAN_FUNC_SET_WAIT_DESC,
+ WLAN_FUNC_SET_WAIT_NPU_INIT_DONE,
+ WLAN_FUNC_SET_WAIT_TRAN_TO_CPU,
+ WLAN_FUNC_SET_WAIT_BA_WIN_SIZE,
+ WLAN_FUNC_SET_WAIT_DRIVER_MODEL,
+ WLAN_FUNC_SET_WAIT_DEL_STA,
+ WLAN_FUNC_SET_WAIT_DRAM_BA_NODE_ADDR,
+ WLAN_FUNC_SET_WAIT_PKT_BUF_ADDR,
+ WLAN_FUNC_SET_WAIT_IS_TEST_NOBA,
+ WLAN_FUNC_SET_WAIT_FLUSHONE_TIMEOUT,
+ WLAN_FUNC_SET_WAIT_FLUSHALL_TIMEOUT,
+ WLAN_FUNC_SET_WAIT_IS_FORCE_TO_CPU,
+ WLAN_FUNC_SET_WAIT_PCIE_STATE,
+ WLAN_FUNC_SET_WAIT_PCIE_PORT_TYPE,
+ WLAN_FUNC_SET_WAIT_ERROR_RETRY_TIMES,
+ WLAN_FUNC_SET_WAIT_BAR_INFO,
+ WLAN_FUNC_SET_WAIT_FAST_FLAG,
+ WLAN_FUNC_SET_WAIT_NPU_BAND0_ONCPU,
+ WLAN_FUNC_SET_WAIT_TX_RING_PCIE_ADDR,
+ WLAN_FUNC_SET_WAIT_TX_DESC_HW_BASE,
+ WLAN_FUNC_SET_WAIT_TX_BUF_SPACE_HW_BASE,
+ WLAN_FUNC_SET_WAIT_RX_RING_FOR_TXDONE_HW_BASE,
+ WLAN_FUNC_SET_WAIT_TX_PKT_BUF_ADDR,
+ WLAN_FUNC_SET_WAIT_INODE_TXRX_REG_ADDR,
+ WLAN_FUNC_SET_WAIT_INODE_DEBUG_FLAG,
+ WLAN_FUNC_SET_WAIT_INODE_HW_CFG_INFO,
+ WLAN_FUNC_SET_WAIT_INODE_STOP_ACTION,
+ WLAN_FUNC_SET_WAIT_INODE_PCIE_SWAP,
+ WLAN_FUNC_SET_WAIT_RATELIMIT_CTRL,
+ WLAN_FUNC_SET_WAIT_HWNAT_INIT,
+ WLAN_FUNC_SET_WAIT_ARHT_CHIP_INFO,
+ WLAN_FUNC_SET_WAIT_TX_BUF_CHECK_ADDR,
+ WLAN_FUNC_SET_WAIT_TOKEN_ID_SIZE,
+};
+
+enum airoha_npu_wlan_get_cmd {
+ WLAN_FUNC_GET_WAIT_NPU_INFO,
+ WLAN_FUNC_GET_WAIT_LAST_RATE,
+ WLAN_FUNC_GET_WAIT_COUNTER,
+ WLAN_FUNC_GET_WAIT_DBG_COUNTER,
+ WLAN_FUNC_GET_WAIT_RXDESC_BASE,
+ WLAN_FUNC_GET_WAIT_WCID_DBG_COUNTER,
+ WLAN_FUNC_GET_WAIT_DMA_ADDR,
+ WLAN_FUNC_GET_WAIT_RING_SIZE,
+ WLAN_FUNC_GET_WAIT_NPU_SUPPORT_MAP,
+ WLAN_FUNC_GET_WAIT_MDC_LOCK_ADDRESS,
+ WLAN_FUNC_GET_WAIT_NPU_VERSION,
+};
+
+struct airoha_npu {
+#if (IS_BUILTIN(CONFIG_NET_AIROHA_NPU) || IS_MODULE(CONFIG_NET_AIROHA_NPU))
+ struct device *dev;
+ struct regmap *regmap;
+
+ struct airoha_npu_core {
+ struct airoha_npu *npu;
+ /* protect concurrent npu memory accesses */
+ spinlock_t lock;
+ struct work_struct wdt_work;
+ } cores[NPU_NUM_CORES];
+
+ int irqs[NPU_NUM_IRQ];
+
+ struct airoha_foe_stats __iomem *stats;
+
+ struct {
+ int (*ppe_init)(struct airoha_npu *npu);
+ int (*ppe_deinit)(struct airoha_npu *npu);
+ int (*ppe_init_stats)(struct airoha_npu *npu,
+ dma_addr_t addr, u32 num_stats_entries);
+ int (*ppe_flush_sram_entries)(struct airoha_npu *npu,
+ dma_addr_t foe_addr,
+ int sram_num_entries);
+ int (*ppe_foe_commit_entry)(struct airoha_npu *npu,
+ dma_addr_t foe_addr,
+ u32 entry_size, u32 hash,
+ bool ppe2);
+ int (*wlan_init_reserved_memory)(struct airoha_npu *npu);
+ int (*wlan_send_msg)(struct airoha_npu *npu, int ifindex,
+ enum airoha_npu_wlan_set_cmd func_id,
+ void *data, int data_len, gfp_t gfp);
+ int (*wlan_get_msg)(struct airoha_npu *npu, int ifindex,
+ enum airoha_npu_wlan_get_cmd func_id,
+ void *data, int data_len, gfp_t gfp);
+ u32 (*wlan_get_queue_addr)(struct airoha_npu *npu, int qid,
+ bool xmit);
+ void (*wlan_set_irq_status)(struct airoha_npu *npu, u32 val);
+ u32 (*wlan_get_irq_status)(struct airoha_npu *npu, int q);
+ void (*wlan_enable_irq)(struct airoha_npu *npu, int q);
+ void (*wlan_disable_irq)(struct airoha_npu *npu, int q);
+ } ops;
+#endif
+};
+
+#if (IS_BUILTIN(CONFIG_NET_AIROHA_NPU) || IS_MODULE(CONFIG_NET_AIROHA_NPU))
+struct airoha_npu *airoha_npu_get(struct device *dev);
+void airoha_npu_put(struct airoha_npu *npu);
+
+static inline int airoha_npu_wlan_init_reserved_memory(struct airoha_npu *npu)
+{
+ return npu->ops.wlan_init_reserved_memory(npu);
+}
+
+static inline int airoha_npu_wlan_send_msg(struct airoha_npu *npu,
+ int ifindex,
+ enum airoha_npu_wlan_set_cmd cmd,
+ void *data, int data_len, gfp_t gfp)
+{
+ return npu->ops.wlan_send_msg(npu, ifindex, cmd, data, data_len, gfp);
+}
+
+static inline int airoha_npu_wlan_get_msg(struct airoha_npu *npu, int ifindex,
+ enum airoha_npu_wlan_get_cmd cmd,
+ void *data, int data_len, gfp_t gfp)
+{
+ return npu->ops.wlan_get_msg(npu, ifindex, cmd, data, data_len, gfp);
+}
+
+static inline u32 airoha_npu_wlan_get_queue_addr(struct airoha_npu *npu,
+ int qid, bool xmit)
+{
+ return npu->ops.wlan_get_queue_addr(npu, qid, xmit);
+}
+
+static inline void airoha_npu_wlan_set_irq_status(struct airoha_npu *npu,
+ u32 val)
+{
+ npu->ops.wlan_set_irq_status(npu, val);
+}
+
+static inline u32 airoha_npu_wlan_get_irq_status(struct airoha_npu *npu, int q)
+{
+ return npu->ops.wlan_get_irq_status(npu, q);
+}
+
+static inline void airoha_npu_wlan_enable_irq(struct airoha_npu *npu, int q)
+{
+ npu->ops.wlan_enable_irq(npu, q);
+}
+
+static inline void airoha_npu_wlan_disable_irq(struct airoha_npu *npu, int q)
+{
+ npu->ops.wlan_disable_irq(npu, q);
+}
+#else
+static inline struct airoha_npu *airoha_npu_get(struct device *dev)
+{
+ return NULL;
+}
+
+static inline void airoha_npu_put(struct airoha_npu *npu)
+{
+}
+
+static inline int airoha_npu_wlan_init_reserved_memory(struct airoha_npu *npu)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int airoha_npu_wlan_send_msg(struct airoha_npu *npu,
+ int ifindex,
+ enum airoha_npu_wlan_set_cmd cmd,
+ void *data, int data_len, gfp_t gfp)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int airoha_npu_wlan_get_msg(struct airoha_npu *npu, int ifindex,
+ enum airoha_npu_wlan_get_cmd cmd,
+ void *data, int data_len, gfp_t gfp)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline u32 airoha_npu_wlan_get_queue_addr(struct airoha_npu *npu,
+ int qid, bool xmit)
+{
+ return 0;
+}
+
+static inline void airoha_npu_wlan_set_irq_status(struct airoha_npu *npu,
+ u32 val)
+{
+}
+
+static inline u32 airoha_npu_wlan_get_irq_status(struct airoha_npu *npu,
+ int q)
+{
+ return 0;
+}
+
+static inline void airoha_npu_wlan_enable_irq(struct airoha_npu *npu, int q)
+{
+}
+
+static inline void airoha_npu_wlan_disable_irq(struct airoha_npu *npu, int q)
+{
+}
+#endif
+
+#endif /* AIROHA_OFFLOAD_H */
diff --git a/include/linux/soc/amd/isp4_misc.h b/include/linux/soc/amd/isp4_misc.h
new file mode 100644
index 000000000000..6738796986a7
--- /dev/null
+++ b/include/linux/soc/amd/isp4_misc.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ */
+
+#ifndef __SOC_ISP4_MISC_H
+#define __SOC_ISP4_MISC_H
+
+#define AMDISP_I2C_ADAP_NAME "AMDISP DesignWare I2C adapter"
+
+#endif
diff --git a/include/linux/soc/andes/irq.h b/include/linux/soc/andes/irq.h
new file mode 100644
index 000000000000..edc3182d6e66
--- /dev/null
+++ b/include/linux/soc/andes/irq.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023 Andes Technology Corporation
+ */
+#ifndef __ANDES_IRQ_H
+#define __ANDES_IRQ_H
+
+/* Andes PMU irq number */
+#define ANDES_RV_IRQ_PMOVI 18
+#define ANDES_RV_IRQ_LAST ANDES_RV_IRQ_PMOVI
+#define ANDES_SLI_CAUSE_BASE 256
+
+/* Andes PMU related registers */
+#define ANDES_CSR_SLIE 0x9c4
+#define ANDES_CSR_SLIP 0x9c5
+#define ANDES_CSR_SCOUNTEROF 0x9d4
+
+#endif /* __ANDES_IRQ_H */
diff --git a/include/linux/soc/apple/rtkit.h b/include/linux/soc/apple/rtkit.h
new file mode 100644
index 000000000000..736f53018017
--- /dev/null
+++ b/include/linux/soc/apple/rtkit.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/*
+ * Apple RTKit IPC Library
+ * Copyright (C) The Asahi Linux Contributors
+ *
+ * Apple's SoCs come with various co-processors running their RTKit operating
+ * system. This protocol library is used by client drivers to use the
+ * features provided by them.
+ */
+#ifndef _LINUX_APPLE_RTKIT_H_
+#define _LINUX_APPLE_RTKIT_H_
+
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/mailbox_client.h>
+
+/*
+ * Struct to represent implementation-specific RTKit operations.
+ *
+ * @buffer: Shared memory buffer allocated inside normal RAM.
+ * @iomem: Shared memory buffer controlled by the co-processors.
+ * @size: Size of the shared memory buffer.
+ * @iova: Device VA of shared memory buffer.
+ * @is_mapped: Shared memory buffer is managed by the co-processor.
+ * @private: Private data pointer for the parent driver.
+ */
+
+struct apple_rtkit_shmem {
+ void *buffer;
+ void __iomem *iomem;
+ size_t size;
+ dma_addr_t iova;
+ bool is_mapped;
+ void *private;
+};
+
+/*
+ * Struct to represent implementation-specific RTKit operations.
+ *
+ * @crashed: Called when the co-processor has crashed. Runs in process
+ * context.
+ * @recv_message: Function called when a message from RTKit is received
+ * on a non-system endpoint. Called from a worker thread.
+ * @recv_message_early:
+ * Like recv_message, but called from atomic context. It
+ * should return true if it handled the message. If it
+ * returns false, the message will be passed on to the
+ * worker thread.
+ * @shmem_setup: Setup shared memory buffer. If bfr.is_iomem is true the
+ * buffer is managed by the co-processor and needs to be mapped.
+ * Otherwise the buffer is managed by Linux and needs to be
+ * allocated. If not specified dma_alloc_coherent is used.
+ * Called in process context.
+ * @shmem_destroy: Undo the shared memory buffer setup in shmem_setup. If not
+ * specified dma_free_coherent is used. Called in process
+ * context.
+ */
+struct apple_rtkit_ops {
+ void (*crashed)(void *cookie, const void *crashlog, size_t crashlog_size);
+ void (*recv_message)(void *cookie, u8 endpoint, u64 message);
+ bool (*recv_message_early)(void *cookie, u8 endpoint, u64 message);
+ int (*shmem_setup)(void *cookie, struct apple_rtkit_shmem *bfr);
+ void (*shmem_destroy)(void *cookie, struct apple_rtkit_shmem *bfr);
+};
+
+struct apple_rtkit;
+
+/*
+ * Initializes the internal state required to handle RTKit. This
+ * should usually be called within _probe.
+ *
+ * @dev: Pointer to the device node this coprocessor is associated with
+ * @cookie: opaque cookie passed to all functions defined in rtkit_ops
+ * @mbox_name: mailbox name used to communicate with the co-processor
+ * @mbox_idx: mailbox index to be used if mbox_name is NULL
+ * @ops: pointer to rtkit_ops to be used for this co-processor
+ */
+struct apple_rtkit *devm_apple_rtkit_init(struct device *dev, void *cookie,
+ const char *mbox_name, int mbox_idx,
+ const struct apple_rtkit_ops *ops);
+
+/*
+ * Non-devm version of devm_apple_rtkit_init. Must be freed with
+ * apple_rtkit_free.
+ *
+ * @dev: Pointer to the device node this coprocessor is associated with
+ * @cookie: opaque cookie passed to all functions defined in rtkit_ops
+ * @mbox_name: mailbox name used to communicate with the co-processor
+ * @mbox_idx: mailbox index to be used if mbox_name is NULL
+ * @ops: pointer to rtkit_ops to be used for this co-processor
+ */
+struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie,
+ const char *mbox_name, int mbox_idx,
+ const struct apple_rtkit_ops *ops);
+
+/*
+ * Free an instance of apple_rtkit.
+ */
+void apple_rtkit_free(struct apple_rtkit *rtk);
+
+/*
+ * Reinitialize internal structures. Must only be called with the co-processor
+ * is held in reset.
+ */
+int apple_rtkit_reinit(struct apple_rtkit *rtk);
+
+/*
+ * Handle RTKit's boot process. Should be called after the CPU of the
+ * co-processor has been started.
+ */
+int apple_rtkit_boot(struct apple_rtkit *rtk);
+
+/*
+ * Quiesce the co-processor.
+ */
+int apple_rtkit_quiesce(struct apple_rtkit *rtk);
+
+/*
+ * Wake the co-processor up from hibernation mode.
+ */
+int apple_rtkit_wake(struct apple_rtkit *rtk);
+
+/*
+ * Shutdown the co-processor
+ */
+int apple_rtkit_shutdown(struct apple_rtkit *rtk);
+
+/*
+ * Put the co-processor into idle mode
+ */
+int apple_rtkit_idle(struct apple_rtkit *rtk);
+
+/*
+ * Checks if RTKit is running and ready to handle messages.
+ */
+bool apple_rtkit_is_running(struct apple_rtkit *rtk);
+
+/*
+ * Checks if RTKit has crashed.
+ */
+bool apple_rtkit_is_crashed(struct apple_rtkit *rtk);
+
+/*
+ * Starts an endpoint. Must be called after boot but before any messages can be
+ * sent or received from that endpoint.
+ */
+int apple_rtkit_start_ep(struct apple_rtkit *rtk, u8 endpoint);
+
+/*
+ * Send a message to the given endpoint.
+ *
+ * @rtk: RTKit reference
+ * @ep: target endpoint
+ * @message: message to be sent
+ * @completeion: will be completed once the message has been submitted
+ * to the hardware FIFO. Can be NULL.
+ * @atomic: if set to true this function can be called from atomic
+ * context.
+ */
+int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message,
+ struct completion *completion, bool atomic);
+
+/*
+ * Process incoming messages in atomic context.
+ * This only guarantees that messages arrive as far as the recv_message_early
+ * callback; drivers expecting to handle incoming messages synchronously
+ * by calling this function must do it that way.
+ * Will return 1 if some data was processed, 0 if none was, or a
+ * negative error code on failure.
+ *
+ * @rtk: RTKit reference
+ */
+int apple_rtkit_poll(struct apple_rtkit *rtk);
+
+#endif /* _LINUX_APPLE_RTKIT_H_ */
diff --git a/include/linux/soc/apple/sart.h b/include/linux/soc/apple/sart.h
new file mode 100644
index 000000000000..2249bf6cde09
--- /dev/null
+++ b/include/linux/soc/apple/sart.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/*
+ * Apple SART device driver
+ * Copyright (C) The Asahi Linux Contributors
+ *
+ * Apple SART is a simple address filter for DMA transactions.
+ * Regions of physical memory must be added to the SART's allow
+ * list before any DMA can target these. Unlike a proper
+ * IOMMU no remapping can be done.
+ */
+
+#ifndef _LINUX_SOC_APPLE_SART_H_
+#define _LINUX_SOC_APPLE_SART_H_
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/types.h>
+
+struct apple_sart;
+
+/*
+ * Get a reference to the SART attached to dev.
+ *
+ * Looks for the phandle reference in apple,sart and returns a pointer
+ * to the corresponding apple_sart struct to be used with
+ * apple_sart_add_allowed_region and apple_sart_remove_allowed_region.
+ */
+struct apple_sart *devm_apple_sart_get(struct device *dev);
+
+/*
+ * Adds the region [paddr, paddr+size] to the DMA allow list.
+ *
+ * @sart: SART reference
+ * @paddr: Start address of the region to be used for DMA
+ * @size: Size of the region to be used for DMA.
+ */
+int apple_sart_add_allowed_region(struct apple_sart *sart, phys_addr_t paddr,
+ size_t size);
+
+/*
+ * Removes the region [paddr, paddr+size] from the DMA allow list.
+ *
+ * Note that exact same paddr and size used for apple_sart_add_allowed_region
+ * have to be passed.
+ *
+ * @sart: SART reference
+ * @paddr: Start address of the region no longer used for DMA
+ * @size: Size of the region no longer used for DMA.
+ */
+int apple_sart_remove_allowed_region(struct apple_sart *sart, phys_addr_t paddr,
+ size_t size);
+
+#endif /* _LINUX_SOC_APPLE_SART_H_ */
diff --git a/include/linux/soc/cirrus/ep93xx.h b/include/linux/soc/cirrus/ep93xx.h
index 56fbe2dc59b1..3e6cf2b25a97 100644
--- a/include/linux/soc/cirrus/ep93xx.h
+++ b/include/linux/soc/cirrus/ep93xx.h
@@ -2,7 +2,18 @@
#ifndef _SOC_EP93XX_H
#define _SOC_EP93XX_H
-struct platform_device;
+struct regmap;
+struct spinlock_t;
+
+enum ep93xx_soc_model {
+ EP93XX_9301_SOC,
+ EP93XX_9307_SOC,
+ EP93XX_9312_SOC,
+};
+
+#include <linux/auxiliary_bus.h>
+#include <linux/compiler_types.h>
+#include <linux/container_of.h>
#define EP93XX_CHIP_REV_D0 3
#define EP93XX_CHIP_REV_D1 4
@@ -10,28 +21,18 @@ struct platform_device;
#define EP93XX_CHIP_REV_E1 6
#define EP93XX_CHIP_REV_E2 7
-#ifdef CONFIG_ARCH_EP93XX
-int ep93xx_pwm_acquire_gpio(struct platform_device *pdev);
-void ep93xx_pwm_release_gpio(struct platform_device *pdev);
-int ep93xx_ide_acquire_gpio(struct platform_device *pdev);
-void ep93xx_ide_release_gpio(struct platform_device *pdev);
-int ep93xx_keypad_acquire_gpio(struct platform_device *pdev);
-void ep93xx_keypad_release_gpio(struct platform_device *pdev);
-int ep93xx_i2s_acquire(void);
-void ep93xx_i2s_release(void);
-unsigned int ep93xx_chip_revision(void);
+struct ep93xx_regmap_adev {
+ struct auxiliary_device adev;
+ struct regmap *map;
+ void __iomem *base;
+ spinlock_t *lock;
+ void (*write)(struct regmap *map, spinlock_t *lock, unsigned int reg,
+ unsigned int val);
+ void (*update_bits)(struct regmap *map, spinlock_t *lock,
+ unsigned int reg, unsigned int mask, unsigned int val);
+};
-#else
-static inline int ep93xx_pwm_acquire_gpio(struct platform_device *pdev) { return 0; }
-static inline void ep93xx_pwm_release_gpio(struct platform_device *pdev) {}
-static inline int ep93xx_ide_acquire_gpio(struct platform_device *pdev) { return 0; }
-static inline void ep93xx_ide_release_gpio(struct platform_device *pdev) {}
-static inline int ep93xx_keypad_acquire_gpio(struct platform_device *pdev) { return 0; }
-static inline void ep93xx_keypad_release_gpio(struct platform_device *pdev) {}
-static inline int ep93xx_i2s_acquire(void) { return 0; }
-static inline void ep93xx_i2s_release(void) {}
-static inline unsigned int ep93xx_chip_revision(void) { return 0; }
-
-#endif
+#define to_ep93xx_regmap_adev(_adev) \
+ container_of((_adev), struct ep93xx_regmap_adev, adev)
#endif
diff --git a/include/linux/soc/ixp4xx/cpu.h b/include/linux/soc/ixp4xx/cpu.h
new file mode 100644
index 000000000000..f526ac33afea
--- /dev/null
+++ b/include/linux/soc/ixp4xx/cpu.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * IXP4XX cpu type detection
+ *
+ * Copyright (C) 2007 MontaVista Software, Inc.
+ */
+
+#ifndef __SOC_IXP4XX_CPU_H__
+#define __SOC_IXP4XX_CPU_H__
+
+#include <linux/io.h>
+#include <linux/regmap.h>
+#ifdef CONFIG_ARM
+#include <asm/cputype.h>
+#endif
+
+/* Processor id value in CP15 Register 0 */
+#define IXP42X_PROCESSOR_ID_VALUE 0x690541c0 /* including unused 0x690541Ex */
+#define IXP42X_PROCESSOR_ID_MASK 0xffffffc0
+
+#define IXP43X_PROCESSOR_ID_VALUE 0x69054040
+#define IXP43X_PROCESSOR_ID_MASK 0xfffffff0
+
+#define IXP46X_PROCESSOR_ID_VALUE 0x69054200 /* including IXP455 */
+#define IXP46X_PROCESSOR_ID_MASK 0xfffffff0
+
+/* Feature register in the expansion bus controller */
+#define IXP4XX_EXP_CNFG2 0x2c
+
+/* "fuse" bits of IXP_EXP_CFG2 */
+/* All IXP4xx CPUs */
+#define IXP4XX_FEATURE_RCOMP (1 << 0)
+#define IXP4XX_FEATURE_USB_DEVICE (1 << 1)
+#define IXP4XX_FEATURE_HASH (1 << 2)
+#define IXP4XX_FEATURE_AES (1 << 3)
+#define IXP4XX_FEATURE_DES (1 << 4)
+#define IXP4XX_FEATURE_HDLC (1 << 5)
+#define IXP4XX_FEATURE_AAL (1 << 6)
+#define IXP4XX_FEATURE_HSS (1 << 7)
+#define IXP4XX_FEATURE_UTOPIA (1 << 8)
+#define IXP4XX_FEATURE_NPEB_ETH0 (1 << 9)
+#define IXP4XX_FEATURE_NPEC_ETH (1 << 10)
+#define IXP4XX_FEATURE_RESET_NPEA (1 << 11)
+#define IXP4XX_FEATURE_RESET_NPEB (1 << 12)
+#define IXP4XX_FEATURE_RESET_NPEC (1 << 13)
+#define IXP4XX_FEATURE_PCI (1 << 14)
+#define IXP4XX_FEATURE_UTOPIA_PHY_LIMIT (3 << 16)
+#define IXP4XX_FEATURE_XSCALE_MAX_FREQ (3 << 22)
+#define IXP42X_FEATURE_MASK (IXP4XX_FEATURE_RCOMP | \
+ IXP4XX_FEATURE_USB_DEVICE | \
+ IXP4XX_FEATURE_HASH | \
+ IXP4XX_FEATURE_AES | \
+ IXP4XX_FEATURE_DES | \
+ IXP4XX_FEATURE_HDLC | \
+ IXP4XX_FEATURE_AAL | \
+ IXP4XX_FEATURE_HSS | \
+ IXP4XX_FEATURE_UTOPIA | \
+ IXP4XX_FEATURE_NPEB_ETH0 | \
+ IXP4XX_FEATURE_NPEC_ETH | \
+ IXP4XX_FEATURE_RESET_NPEA | \
+ IXP4XX_FEATURE_RESET_NPEB | \
+ IXP4XX_FEATURE_RESET_NPEC | \
+ IXP4XX_FEATURE_PCI | \
+ IXP4XX_FEATURE_UTOPIA_PHY_LIMIT | \
+ IXP4XX_FEATURE_XSCALE_MAX_FREQ)
+
+
+/* IXP43x/46x CPUs */
+#define IXP4XX_FEATURE_ECC_TIMESYNC (1 << 15)
+#define IXP4XX_FEATURE_USB_HOST (1 << 18)
+#define IXP4XX_FEATURE_NPEA_ETH (1 << 19)
+#define IXP43X_FEATURE_MASK (IXP42X_FEATURE_MASK | \
+ IXP4XX_FEATURE_ECC_TIMESYNC | \
+ IXP4XX_FEATURE_USB_HOST | \
+ IXP4XX_FEATURE_NPEA_ETH)
+
+/* IXP46x CPU (including IXP455) only */
+#define IXP4XX_FEATURE_NPEB_ETH_1_TO_3 (1 << 20)
+#define IXP4XX_FEATURE_RSA (1 << 21)
+#define IXP46X_FEATURE_MASK (IXP43X_FEATURE_MASK | \
+ IXP4XX_FEATURE_NPEB_ETH_1_TO_3 | \
+ IXP4XX_FEATURE_RSA)
+
+#ifdef CONFIG_ARCH_IXP4XX
+#define cpu_is_ixp42x_rev_a0() ((read_cpuid_id() & (IXP42X_PROCESSOR_ID_MASK | 0xF)) == \
+ IXP42X_PROCESSOR_ID_VALUE)
+#define cpu_is_ixp42x() ((read_cpuid_id() & IXP42X_PROCESSOR_ID_MASK) == \
+ IXP42X_PROCESSOR_ID_VALUE)
+#define cpu_is_ixp43x() ((read_cpuid_id() & IXP43X_PROCESSOR_ID_MASK) == \
+ IXP43X_PROCESSOR_ID_VALUE)
+#define cpu_is_ixp46x() ((read_cpuid_id() & IXP46X_PROCESSOR_ID_MASK) == \
+ IXP46X_PROCESSOR_ID_VALUE)
+static inline u32 cpu_ixp4xx_features(struct regmap *rmap)
+{
+ u32 val;
+
+ regmap_read(rmap, IXP4XX_EXP_CNFG2, &val);
+ /* For some reason this register is inverted */
+ val = ~val;
+ if (cpu_is_ixp42x_rev_a0())
+ return IXP42X_FEATURE_MASK & ~(IXP4XX_FEATURE_RCOMP |
+ IXP4XX_FEATURE_AES);
+ if (cpu_is_ixp42x())
+ return val & IXP42X_FEATURE_MASK;
+ if (cpu_is_ixp43x())
+ return val & IXP43X_FEATURE_MASK;
+ return val & IXP46X_FEATURE_MASK;
+}
+#else
+#define cpu_is_ixp42x_rev_a0() 0
+#define cpu_is_ixp42x() 0
+#define cpu_is_ixp43x() 0
+#define cpu_is_ixp46x() 0
+static inline u32 cpu_ixp4xx_features(struct regmap *rmap)
+{
+ return 0;
+}
+#endif
+
+#endif /* _ASM_ARCH_CPU_H */
diff --git a/include/linux/soc/ixp4xx/npe.h b/include/linux/soc/ixp4xx/npe.h
index 2a91f465d456..9efeac777da1 100644
--- a/include/linux/soc/ixp4xx/npe.h
+++ b/include/linux/soc/ixp4xx/npe.h
@@ -3,6 +3,7 @@
#define __IXP4XX_NPE_H
#include <linux/kernel.h>
+#include <linux/regmap.h>
extern const char *npe_names[];
@@ -17,6 +18,7 @@ struct npe_regs {
struct npe {
struct npe_regs __iomem *regs;
+ struct regmap *rmap;
int id;
int valid;
};
diff --git a/include/linux/soc/marvell/octeontx2/asm.h b/include/linux/soc/marvell/octeontx2/asm.h
index 28c04d918f0f..d683251a0b40 100644
--- a/include/linux/soc/marvell/octeontx2/asm.h
+++ b/include/linux/soc/marvell/octeontx2/asm.h
@@ -5,6 +5,7 @@
#ifndef __SOC_OTX2_ASM_H
#define __SOC_OTX2_ASM_H
+#include <linux/types.h>
#if defined(CONFIG_ARM64)
/*
* otx2_lmt_flush is used for LMT store operation.
@@ -22,16 +23,35 @@
: [rs]"r" (ioaddr)); \
(result); \
})
+/*
+ * STEORL store to memory with release semantics.
+ * This will avoid using DMB barrier after each LMTST
+ * operation.
+ */
#define cn10k_lmt_flush(val, addr) \
({ \
__asm__ volatile(".cpu generic+lse\n" \
- "steor %x[rf],[%[rs]]" \
- : [rf]"+r"(val) \
- : [rs]"r"(addr)); \
+ "steorl %x[rf],[%[rs]]" \
+ : [rf] "+r"(val) \
+ : [rs] "r"(addr)); \
})
+
+static inline u64 otx2_atomic64_fetch_add(u64 incr, u64 *ptr)
+{
+ u64 result;
+
+ asm volatile (".cpu generic+lse\n"
+ "ldadda %x[i], %x[r], [%[b]]"
+ : [r] "=r" (result), "+m" (*ptr)
+ : [i] "r" (incr), [b] "r" (ptr)
+ : "memory");
+ return result;
+}
+
#else
#define otx2_lmt_flush(ioaddr) ({ 0; })
#define cn10k_lmt_flush(val, addr) ({ addr = val; })
+#define otx2_atomic64_fetch_add(incr, ptr) ({ incr; })
#endif
#endif /* __SOC_OTX2_ASM_H */
diff --git a/include/linux/soc/marvell/silicons.h b/include/linux/soc/marvell/silicons.h
new file mode 100644
index 000000000000..66bb9bfaf17d
--- /dev/null
+++ b/include/linux/soc/marvell/silicons.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright (C) 2024 Marvell.
+ */
+
+#ifndef __SOC_SILICON_H
+#define __SOC_SILICON_H
+
+#include <linux/types.h>
+#include <linux/pci.h>
+
+#if defined(CONFIG_ARM64)
+
+#define CN20K_CHIPID 0x20
+/*
+ * Silicon check for CN20K family
+ */
+static inline bool is_cn20k(struct pci_dev *pdev)
+{
+ return (pdev->subsystem_device & 0xFF) == CN20K_CHIPID;
+}
+#else
+#define is_cn20k(pdev) ((void)(pdev), 0)
+#endif
+
+#endif /* __SOC_SILICON_H */
diff --git a/include/linux/soc/mediatek/dvfsrc.h b/include/linux/soc/mediatek/dvfsrc.h
new file mode 100644
index 000000000000..1498b3ed396b
--- /dev/null
+++ b/include/linux/soc/mediatek/dvfsrc.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2021 MediaTek Inc.
+ * Copyright (c) 2024 Collabora Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef __MEDIATEK_DVFSRC_H
+#define __MEDIATEK_DVFSRC_H
+
+enum mtk_dvfsrc_cmd {
+ MTK_DVFSRC_CMD_BW,
+ MTK_DVFSRC_CMD_HRT_BW,
+ MTK_DVFSRC_CMD_PEAK_BW,
+ MTK_DVFSRC_CMD_OPP,
+ MTK_DVFSRC_CMD_VCORE_LEVEL,
+ MTK_DVFSRC_CMD_VSCP_LEVEL,
+ MTK_DVFSRC_CMD_MAX,
+};
+
+#if IS_ENABLED(CONFIG_MTK_DVFSRC)
+
+int mtk_dvfsrc_send_request(const struct device *dev, u32 cmd, u64 data);
+int mtk_dvfsrc_query_info(const struct device *dev, u32 cmd, int *data);
+
+#else
+
+static inline int mtk_dvfsrc_send_request(const struct device *dev, u32 cmd, u64 data)
+{ return -ENODEV; }
+
+static inline int mtk_dvfsrc_query_info(const struct device *dev, u32 cmd, int *data)
+{ return -ENODEV; }
+
+#endif /* CONFIG_MTK_DVFSRC */
+
+#endif
diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h
index 4615a228da51..9956e18c5ffa 100644
--- a/include/linux/soc/mediatek/infracfg.h
+++ b/include/linux/soc/mediatek/infracfg.h
@@ -2,6 +2,129 @@
#ifndef __SOC_MEDIATEK_INFRACFG_H
#define __SOC_MEDIATEK_INFRACFG_H
+#define MT8365_INFRA_TOPAXI_PROTECTEN_STA1 0x228
+#define MT8365_INFRA_TOPAXI_PROTECTEN_SET 0x2a0
+#define MT8365_INFRA_TOPAXI_PROTECTEN_CLR 0x2a4
+#define MT8365_INFRA_TOPAXI_PROTECTEN_MM_M0 BIT(1)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_MDMCU_M1 BIT(2)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_MMAPB_S BIT(6)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_MM2INFRA_AXI_GALS_SLV_0 BIT(10)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_MM2INFRA_AXI_GALS_SLV_1 BIT(11)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_AP2CONN_AHB BIT(13)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_CONN2INFRA_AHB BIT(14)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_MFG_M0 BIT(21)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_INFRA2MFG BIT(22)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_STA1 0x258
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_SET 0x2a8
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_CLR 0x2ac
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_APU2AP BIT(2)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_MM2INFRA_AXI_GALS_MST_0 BIT(16)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_MM2INFRA_AXI_GALS_MST_1 BIT(17)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_CONN2INFRA_AXI_GALS_MST BIT(18)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_CAM2MM_AXI_GALS_MST BIT(19)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_APU_CBIP_GALS_MST BIT(20)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_INFRA2CONN_AHB_GALS_SLV BIT(21)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_PWRDNREQ_INFRA_GALS_ADB BIT(24)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_PWRDNREQ_MP1_L2C_AFIFO BIT(27)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_AUDIO_BUS_AUDIO_M BIT(28)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_AUDIO_BUS_DSP_M BIT(30)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_AUDIO_BUS_DSP_S BIT(31)
+
+#define MT8365_INFRA_NAO_TOPAXI_SI0_STA 0x0
+#define MT8365_INFRA_NAO_TOPAXI_SI0_CTRL_UPDATED BIT(24)
+#define MT8365_INFRA_NAO_TOPAXI_SI2_STA 0x28
+#define MT8365_INFRA_NAO_TOPAXI_SI2_CTRL_UPDATED BIT(14)
+#define MT8365_INFRA_TOPAXI_SI0_CTL 0x200
+#define MT8365_INFRA_TOPAXI_SI0_WAY_EN_MMAPB_S BIT(6)
+#define MT8365_INFRA_TOPAXI_SI2_CTL 0x234
+#define MT8365_INFRA_TOPAXI_SI2_WAY_EN_PERI_M1 BIT(5)
+
+#define MT8365_SMI_COMMON_CLAMP_EN 0x3c0
+#define MT8365_SMI_COMMON_CLAMP_EN_SET 0x3c4
+#define MT8365_SMI_COMMON_CLAMP_EN_CLR 0x3c8
+
+#define MT8195_TOP_AXI_PROT_EN_STA1 0x228
+#define MT8195_TOP_AXI_PROT_EN_1_STA1 0x258
+#define MT8195_TOP_AXI_PROT_EN_SET 0x2a0
+#define MT8195_TOP_AXI_PROT_EN_CLR 0x2a4
+#define MT8195_TOP_AXI_PROT_EN_1_SET 0x2a8
+#define MT8195_TOP_AXI_PROT_EN_1_CLR 0x2ac
+#define MT8195_TOP_AXI_PROT_EN_MM_SET 0x2d4
+#define MT8195_TOP_AXI_PROT_EN_MM_CLR 0x2d8
+#define MT8195_TOP_AXI_PROT_EN_MM_STA1 0x2ec
+#define MT8195_TOP_AXI_PROT_EN_2_SET 0x714
+#define MT8195_TOP_AXI_PROT_EN_2_CLR 0x718
+#define MT8195_TOP_AXI_PROT_EN_2_STA1 0x724
+#define MT8195_TOP_AXI_PROT_EN_VDNR_SET 0xb84
+#define MT8195_TOP_AXI_PROT_EN_VDNR_CLR 0xb88
+#define MT8195_TOP_AXI_PROT_EN_VDNR_STA1 0xb90
+#define MT8195_TOP_AXI_PROT_EN_VDNR_1_SET 0xba4
+#define MT8195_TOP_AXI_PROT_EN_VDNR_1_CLR 0xba8
+#define MT8195_TOP_AXI_PROT_EN_VDNR_1_STA1 0xbb0
+#define MT8195_TOP_AXI_PROT_EN_VDNR_2_SET 0xbb8
+#define MT8195_TOP_AXI_PROT_EN_VDNR_2_CLR 0xbbc
+#define MT8195_TOP_AXI_PROT_EN_VDNR_2_STA1 0xbc4
+#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_SET 0xbcc
+#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_CLR 0xbd0
+#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_STA1 0xbd8
+#define MT8195_TOP_AXI_PROT_EN_MM_2_SET 0xdcc
+#define MT8195_TOP_AXI_PROT_EN_MM_2_CLR 0xdd0
+#define MT8195_TOP_AXI_PROT_EN_MM_2_STA1 0xdd8
+
+#define MT8195_TOP_AXI_PROT_EN_VDOSYS0 BIT(6)
+#define MT8195_TOP_AXI_PROT_EN_VPPSYS0 BIT(10)
+#define MT8195_TOP_AXI_PROT_EN_MFG1 BIT(11)
+#define MT8195_TOP_AXI_PROT_EN_MFG1_2ND GENMASK(22, 21)
+#define MT8195_TOP_AXI_PROT_EN_VPPSYS0_2ND BIT(23)
+#define MT8195_TOP_AXI_PROT_EN_1_MFG1 GENMASK(20, 19)
+#define MT8195_TOP_AXI_PROT_EN_1_CAM BIT(22)
+#define MT8195_TOP_AXI_PROT_EN_2_CAM BIT(0)
+#define MT8195_TOP_AXI_PROT_EN_2_MFG1_2ND GENMASK(6, 5)
+#define MT8195_TOP_AXI_PROT_EN_2_MFG1 BIT(7)
+#define MT8195_TOP_AXI_PROT_EN_2_AUDIO (BIT(9) | BIT(11))
+#define MT8195_TOP_AXI_PROT_EN_2_ADSP (BIT(12) | GENMASK(16, 14))
+#define MT8195_TOP_AXI_PROT_EN_MM_CAM (BIT(0) | BIT(2) | BIT(4))
+#define MT8195_TOP_AXI_PROT_EN_MM_IPE BIT(1)
+#define MT8195_TOP_AXI_PROT_EN_MM_IMG BIT(3)
+#define MT8195_TOP_AXI_PROT_EN_MM_VDOSYS0 GENMASK(21, 17)
+#define MT8195_TOP_AXI_PROT_EN_MM_VPPSYS1 GENMASK(8, 5)
+#define MT8195_TOP_AXI_PROT_EN_MM_VENC (BIT(9) | BIT(11))
+#define MT8195_TOP_AXI_PROT_EN_MM_VENC_CORE1 (BIT(10) | BIT(12))
+#define MT8195_TOP_AXI_PROT_EN_MM_VDEC0 BIT(13)
+#define MT8195_TOP_AXI_PROT_EN_MM_VDEC1 BIT(14)
+#define MT8195_TOP_AXI_PROT_EN_MM_VDOSYS1_2ND BIT(22)
+#define MT8195_TOP_AXI_PROT_EN_MM_VPPSYS1_2ND BIT(23)
+#define MT8195_TOP_AXI_PROT_EN_MM_CAM_2ND BIT(24)
+#define MT8195_TOP_AXI_PROT_EN_MM_IMG_2ND BIT(25)
+#define MT8195_TOP_AXI_PROT_EN_MM_VENC_2ND BIT(26)
+#define MT8195_TOP_AXI_PROT_EN_MM_WPESYS BIT(27)
+#define MT8195_TOP_AXI_PROT_EN_MM_VDEC0_2ND BIT(28)
+#define MT8195_TOP_AXI_PROT_EN_MM_VDEC1_2ND BIT(29)
+#define MT8195_TOP_AXI_PROT_EN_MM_VDOSYS1 GENMASK(31, 30)
+#define MT8195_TOP_AXI_PROT_EN_MM_2_VPPSYS0_2ND (GENMASK(1, 0) | BIT(4) | BIT(11))
+#define MT8195_TOP_AXI_PROT_EN_MM_2_VENC BIT(2)
+#define MT8195_TOP_AXI_PROT_EN_MM_2_VENC_CORE1 (BIT(3) | BIT(15))
+#define MT8195_TOP_AXI_PROT_EN_MM_2_CAM (BIT(5) | BIT(17))
+#define MT8195_TOP_AXI_PROT_EN_MM_2_VPPSYS1 (GENMASK(7, 6) | BIT(18))
+#define MT8195_TOP_AXI_PROT_EN_MM_2_VPPSYS0 GENMASK(9, 8)
+#define MT8195_TOP_AXI_PROT_EN_MM_2_VDOSYS1 BIT(10)
+#define MT8195_TOP_AXI_PROT_EN_MM_2_VDEC2_2ND BIT(12)
+#define MT8195_TOP_AXI_PROT_EN_MM_2_VDEC0_2ND BIT(13)
+#define MT8195_TOP_AXI_PROT_EN_MM_2_WPESYS_2ND BIT(14)
+#define MT8195_TOP_AXI_PROT_EN_MM_2_IPE BIT(16)
+#define MT8195_TOP_AXI_PROT_EN_MM_2_VDEC2 BIT(21)
+#define MT8195_TOP_AXI_PROT_EN_MM_2_VDEC0 BIT(22)
+#define MT8195_TOP_AXI_PROT_EN_MM_2_WPESYS GENMASK(24, 23)
+#define MT8195_TOP_AXI_PROT_EN_VDNR_1_EPD_TX BIT(1)
+#define MT8195_TOP_AXI_PROT_EN_VDNR_1_DP_TX BIT(2)
+#define MT8195_TOP_AXI_PROT_EN_VDNR_PCIE_MAC_P0 (BIT(11) | BIT(28))
+#define MT8195_TOP_AXI_PROT_EN_VDNR_PCIE_MAC_P1 (BIT(12) | BIT(29))
+#define MT8195_TOP_AXI_PROT_EN_VDNR_1_PCIE_MAC_P0 BIT(13)
+#define MT8195_TOP_AXI_PROT_EN_VDNR_1_PCIE_MAC_P1 BIT(14)
+#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_MFG1 (BIT(17) | BIT(19))
+#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_VPPSYS0 BIT(20)
+#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_VDOSYS0 BIT(21)
+
#define MT8192_TOP_AXI_PROT_EN_STA1 0x228
#define MT8192_TOP_AXI_PROT_EN_1_STA1 0x258
#define MT8192_TOP_AXI_PROT_EN_SET 0x2a0
@@ -58,6 +181,175 @@
#define MT8192_TOP_AXI_PROT_EN_MM_2_MDP_2ND BIT(13)
#define MT8192_TOP_AXI_PROT_EN_VDNR_CAM BIT(21)
+#define MT8188_TOP_AXI_PROT_EN_SET 0x2A0
+#define MT8188_TOP_AXI_PROT_EN_CLR 0x2A4
+#define MT8188_TOP_AXI_PROT_EN_STA 0x228
+#define MT8188_TOP_AXI_PROT_EN_1_SET 0x2A8
+#define MT8188_TOP_AXI_PROT_EN_1_CLR 0x2AC
+#define MT8188_TOP_AXI_PROT_EN_1_STA 0x258
+#define MT8188_TOP_AXI_PROT_EN_2_SET 0x714
+#define MT8188_TOP_AXI_PROT_EN_2_CLR 0x718
+#define MT8188_TOP_AXI_PROT_EN_2_STA 0x724
+
+#define MT8188_TOP_AXI_PROT_EN_MM_SET 0x2D4
+#define MT8188_TOP_AXI_PROT_EN_MM_CLR 0x2D8
+#define MT8188_TOP_AXI_PROT_EN_MM_STA 0x2EC
+#define MT8188_TOP_AXI_PROT_EN_MM_2_SET 0xDCC
+#define MT8188_TOP_AXI_PROT_EN_MM_2_CLR 0xDD0
+#define MT8188_TOP_AXI_PROT_EN_MM_2_STA 0xDD8
+
+#define MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_SET 0xB84
+#define MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_CLR 0xB88
+#define MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_STA 0xB90
+#define MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_SET 0xBCC
+#define MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_CLR 0xBD0
+#define MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_STA 0xBD8
+
+#define MT8188_TOP_AXI_PROT_EN_MFG1_STEP1 BIT(11)
+#define MT8188_TOP_AXI_PROT_EN_2_MFG1_STEP2 BIT(7)
+#define MT8188_TOP_AXI_PROT_EN_1_MFG1_STEP3 BIT(19)
+#define MT8188_TOP_AXI_PROT_EN_2_MFG1_STEP4 BIT(5)
+#define MT8188_TOP_AXI_PROT_EN_MFG1_STEP5 GENMASK(22, 21)
+#define MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_MFG1_STEP6 BIT(17)
+
+#define MT8188_TOP_AXI_PROT_EN_PEXTP_MAC_P0_STEP1 BIT(2)
+#define MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_PEXTP_MAC_P0_STEP2 (BIT(8) | BIT(18) | BIT(30))
+#define MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_ETHER_STEP1 BIT(24)
+#define MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_HDMI_TX_STEP1 BIT(20)
+#define MT8188_TOP_AXI_PROT_EN_2_ADSP_AO_STEP1 GENMASK(31, 29)
+#define MT8188_TOP_AXI_PROT_EN_2_ADSP_AO_STEP2 (GENMASK(4, 3) | BIT(28))
+#define MT8188_TOP_AXI_PROT_EN_2_ADSP_INFRA_STEP1 (GENMASK(16, 14) | BIT(23) | \
+ BIT(27))
+#define MT8188_TOP_AXI_PROT_EN_2_ADSP_INFRA_STEP2 (GENMASK(19, 17) | GENMASK(26, 25))
+#define MT8188_TOP_AXI_PROT_EN_2_ADSP_STEP1 GENMASK(11, 8)
+#define MT8188_TOP_AXI_PROT_EN_2_ADSP_STEP2 GENMASK(22, 21)
+#define MT8188_TOP_AXI_PROT_EN_2_AUDIO_STEP1 BIT(20)
+#define MT8188_TOP_AXI_PROT_EN_2_AUDIO_STEP2 BIT(12)
+#define MT8188_TOP_AXI_PROT_EN_2_AUDIO_ASRC_STEP1 BIT(24)
+#define MT8188_TOP_AXI_PROT_EN_2_AUDIO_ASRC_STEP2 BIT(13)
+
+#define MT8188_TOP_AXI_PROT_EN_VPPSYS0_STEP1 BIT(10)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_VPPSYS0_STEP2 GENMASK(9, 8)
+#define MT8188_TOP_AXI_PROT_EN_VPPSYS0_STEP3 BIT(23)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_VPPSYS0_STEP4 (BIT(1) | BIT(4) | BIT(11))
+#define MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_VPPSYS0_STEP5 (BIT(20))
+#define MT8188_TOP_AXI_PROT_EN_MM_VDOSYS0_STEP1 (GENMASK(18, 17) | GENMASK(21, 20))
+#define MT8188_TOP_AXI_PROT_EN_VDOSYS0_STEP2 BIT(6)
+#define MT8188_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_VDOSYS0_STEP3 BIT(21)
+#define MT8188_TOP_AXI_PROT_EN_MM_VDOSYS1_STEP1 GENMASK(31, 30)
+#define MT8188_TOP_AXI_PROT_EN_MM_VDOSYS1_STEP2 BIT(22)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_VDOSYS1_STEP3 BIT(10)
+#define MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_DP_TX_STEP1 BIT(23)
+#define MT8188_TOP_AXI_PROT_EN_INFRA_VDNR_EDP_TX_STEP1 BIT(22)
+
+#define MT8188_TOP_AXI_PROT_EN_MM_VPPSYS1_STEP1 GENMASK(6, 5)
+#define MT8188_TOP_AXI_PROT_EN_MM_VPPSYS1_STEP2 BIT(23)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_VPPSYS1_STEP3 BIT(18)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_WPE_STEP1 BIT(23)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_WPE_STEP2 BIT(21)
+#define MT8188_TOP_AXI_PROT_EN_MM_VDEC0_STEP1 BIT(13)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_VDEC0_STEP2 BIT(13)
+#define MT8188_TOP_AXI_PROT_EN_MM_VDEC1_STEP1 BIT(14)
+#define MT8188_TOP_AXI_PROT_EN_MM_VDEC1_STEP2 BIT(29)
+#define MT8188_TOP_AXI_PROT_EN_MM_VENC_STEP1 (BIT(9) | BIT(11))
+#define MT8188_TOP_AXI_PROT_EN_MM_VENC_STEP2 BIT(26)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_VENC_STEP3 BIT(2)
+#define MT8188_TOP_AXI_PROT_EN_MM_IMG_VCORE_STEP1 (BIT(1) | BIT(3))
+#define MT8188_TOP_AXI_PROT_EN_MM_IMG_VCORE_STEP2 BIT(25)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_IMG_VCORE_STEP3 BIT(16)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_IMG_MAIN_STEP1 GENMASK(27, 26)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_IMG_MAIN_STEP2 GENMASK(25, 24)
+#define MT8188_TOP_AXI_PROT_EN_MM_CAM_VCORE_STEP1 (BIT(2) | BIT(4))
+#define MT8188_TOP_AXI_PROT_EN_2_CAM_VCORE_STEP2 BIT(0)
+#define MT8188_TOP_AXI_PROT_EN_1_CAM_VCORE_STEP3 BIT(22)
+#define MT8188_TOP_AXI_PROT_EN_MM_CAM_VCORE_STEP4 BIT(24)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_CAM_VCORE_STEP5 BIT(17)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_CAM_MAIN_STEP1 GENMASK(31, 30)
+#define MT8188_TOP_AXI_PROT_EN_2_CAM_MAIN_STEP2 BIT(2)
+#define MT8188_TOP_AXI_PROT_EN_MM_2_CAM_MAIN_STEP3 GENMASK(29, 28)
+#define MT8188_TOP_AXI_PROT_EN_2_CAM_MAIN_STEP4 BIT(1)
+
+#define MT8188_SMI_COMMON_CLAMP_EN_STA 0x3C0
+#define MT8188_SMI_COMMON_CLAMP_EN_SET 0x3C4
+#define MT8188_SMI_COMMON_CLAMP_EN_CLR 0x3C8
+
+#define MT8188_SMI_COMMON_SMI_CLAMP_DIP_TO_VDO0 GENMASK(3, 1)
+#define MT8188_SMI_COMMON_SMI_CLAMP_DIP_TO_VPP1 GENMASK(2, 1)
+#define MT8188_SMI_COMMON_SMI_CLAMP_IPE_TO_VPP1 BIT(0)
+
+#define MT8188_SMI_COMMON_SMI_CLAMP_CAM_SUBA_TO_VPP0 GENMASK(3, 2)
+#define MT8188_SMI_COMMON_SMI_CLAMP_CAM_SUBB_TO_VDO0 GENMASK(3, 2)
+
+#define MT8188_SMI_LARB10_RESET_ADDR 0xC
+#define MT8188_SMI_LARB11A_RESET_ADDR 0xC
+#define MT8188_SMI_LARB11C_RESET_ADDR 0xC
+#define MT8188_SMI_LARB12_RESET_ADDR 0xC
+#define MT8188_SMI_LARB11B_RESET_ADDR 0xC
+#define MT8188_SMI_LARB15_RESET_ADDR 0xC
+#define MT8188_SMI_LARB16B_RESET_ADDR 0xA0
+#define MT8188_SMI_LARB17B_RESET_ADDR 0xA0
+#define MT8188_SMI_LARB16A_RESET_ADDR 0xA0
+#define MT8188_SMI_LARB17A_RESET_ADDR 0xA0
+
+#define MT8188_SMI_LARB10_RESET BIT(0)
+#define MT8188_SMI_LARB11A_RESET BIT(0)
+#define MT8188_SMI_LARB11C_RESET BIT(0)
+#define MT8188_SMI_LARB12_RESET BIT(8)
+#define MT8188_SMI_LARB11B_RESET BIT(0)
+#define MT8188_SMI_LARB15_RESET BIT(0)
+#define MT8188_SMI_LARB16B_RESET BIT(4)
+#define MT8188_SMI_LARB17B_RESET BIT(4)
+#define MT8188_SMI_LARB16A_RESET BIT(4)
+#define MT8188_SMI_LARB17A_RESET BIT(4)
+
+#define MT8186_TOP_AXI_PROT_EN_SET (0x2A0)
+#define MT8186_TOP_AXI_PROT_EN_CLR (0x2A4)
+#define MT8186_TOP_AXI_PROT_EN_STA (0x228)
+#define MT8186_TOP_AXI_PROT_EN_1_SET (0x2A8)
+#define MT8186_TOP_AXI_PROT_EN_1_CLR (0x2AC)
+#define MT8186_TOP_AXI_PROT_EN_1_STA (0x258)
+#define MT8186_TOP_AXI_PROT_EN_2_SET (0x2B0)
+#define MT8186_TOP_AXI_PROT_EN_2_CLR (0x2B4)
+#define MT8186_TOP_AXI_PROT_EN_2_STA (0x26C)
+#define MT8186_TOP_AXI_PROT_EN_3_SET (0x2B8)
+#define MT8186_TOP_AXI_PROT_EN_3_CLR (0x2BC)
+#define MT8186_TOP_AXI_PROT_EN_3_STA (0x2C8)
+
+/* MFG1 */
+#define MT8186_TOP_AXI_PROT_EN_1_MFG1_STEP1 (GENMASK(28, 27))
+#define MT8186_TOP_AXI_PROT_EN_MFG1_STEP2 (GENMASK(22, 21))
+#define MT8186_TOP_AXI_PROT_EN_MFG1_STEP3 (BIT(25))
+#define MT8186_TOP_AXI_PROT_EN_1_MFG1_STEP4 (BIT(29))
+/* DIS */
+#define MT8186_TOP_AXI_PROT_EN_1_DIS_STEP1 (GENMASK(12, 11))
+#define MT8186_TOP_AXI_PROT_EN_DIS_STEP2 (GENMASK(2, 1) | GENMASK(11, 10))
+/* IMG */
+#define MT8186_TOP_AXI_PROT_EN_1_IMG_STEP1 (BIT(23))
+#define MT8186_TOP_AXI_PROT_EN_1_IMG_STEP2 (BIT(15))
+/* IPE */
+#define MT8186_TOP_AXI_PROT_EN_1_IPE_STEP1 (BIT(24))
+#define MT8186_TOP_AXI_PROT_EN_1_IPE_STEP2 (BIT(16))
+/* CAM */
+#define MT8186_TOP_AXI_PROT_EN_1_CAM_STEP1 (GENMASK(22, 21))
+#define MT8186_TOP_AXI_PROT_EN_1_CAM_STEP2 (GENMASK(14, 13))
+/* VENC */
+#define MT8186_TOP_AXI_PROT_EN_1_VENC_STEP1 (BIT(31))
+#define MT8186_TOP_AXI_PROT_EN_1_VENC_STEP2 (BIT(19))
+/* VDEC */
+#define MT8186_TOP_AXI_PROT_EN_1_VDEC_STEP1 (BIT(30))
+#define MT8186_TOP_AXI_PROT_EN_1_VDEC_STEP2 (BIT(17))
+/* WPE */
+#define MT8186_TOP_AXI_PROT_EN_2_WPE_STEP1 (BIT(17))
+#define MT8186_TOP_AXI_PROT_EN_2_WPE_STEP2 (BIT(16))
+/* CONN_ON */
+#define MT8186_TOP_AXI_PROT_EN_1_CONN_ON_STEP1 (BIT(18))
+#define MT8186_TOP_AXI_PROT_EN_CONN_ON_STEP2 (BIT(14))
+#define MT8186_TOP_AXI_PROT_EN_CONN_ON_STEP3 (BIT(13))
+#define MT8186_TOP_AXI_PROT_EN_CONN_ON_STEP4 (BIT(16))
+/* ADSP_TOP */
+#define MT8186_TOP_AXI_PROT_EN_3_ADSP_TOP_STEP1 (GENMASK(12, 11))
+#define MT8186_TOP_AXI_PROT_EN_3_ADSP_TOP_STEP2 (GENMASK(1, 0))
+
#define MT8183_TOP_AXI_PROT_EN_STA1 0x228
#define MT8183_TOP_AXI_PROT_EN_STA1_1 0x258
#define MT8183_TOP_AXI_PROT_EN_SET 0x2a0
@@ -142,11 +434,19 @@
#define MT7622_TOP_AXI_PROT_EN_WB (BIT(2) | BIT(6) | \
BIT(7) | BIT(8))
+#define MT6735_TOP_AXI_PROT_EN_CONN (BIT(2) | BIT(8))
+#define MT6735_TOP_AXI_PROT_EN_MD1 (BIT(24) | BIT(25) | \
+ BIT(26) | BIT(27) | \
+ BIT(28))
+
#define INFRA_TOPAXI_PROTECTEN 0x0220
#define INFRA_TOPAXI_PROTECTSTA1 0x0228
#define INFRA_TOPAXI_PROTECTEN_SET 0x0260
#define INFRA_TOPAXI_PROTECTEN_CLR 0x0264
+#define MT8192_INFRA_CTRL 0x290
+#define MT8192_INFRA_CTRL_DISABLE_MFG2ACP BIT(9)
+
#define REG_INFRA_MISC 0xf00
#define F_DDR_4GB_SUPPORT_EN BIT(13)
diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h
index ac6b5f3cba95..0c3906e8ad19 100644
--- a/include/linux/soc/mediatek/mtk-cmdq.h
+++ b/include/linux/soc/mediatek/mtk-cmdq.h
@@ -14,8 +14,42 @@
#define CMDQ_ADDR_HIGH(addr) ((u32)(((addr) >> 16) & GENMASK(31, 0)))
#define CMDQ_ADDR_LOW(addr) ((u16)(addr) | BIT(1))
+/*
+ * Every cmdq thread has its own SPRs (Specific Purpose Registers),
+ * so there are 4 * N (threads) SPRs in GCE that shares the same indexes below.
+ */
+#define CMDQ_THR_SPR_IDX0 (0)
+#define CMDQ_THR_SPR_IDX1 (1)
+#define CMDQ_THR_SPR_IDX2 (2)
+#define CMDQ_THR_SPR_IDX3 (3)
+
struct cmdq_pkt;
+enum cmdq_logic_op {
+ CMDQ_LOGIC_ASSIGN = 0,
+ CMDQ_LOGIC_ADD = 1,
+ CMDQ_LOGIC_SUBTRACT = 2,
+ CMDQ_LOGIC_MULTIPLY = 3,
+ CMDQ_LOGIC_XOR = 8,
+ CMDQ_LOGIC_NOT = 9,
+ CMDQ_LOGIC_OR = 10,
+ CMDQ_LOGIC_AND = 11,
+ CMDQ_LOGIC_LEFT_SHIFT = 12,
+ CMDQ_LOGIC_RIGHT_SHIFT = 13,
+ CMDQ_LOGIC_MAX,
+};
+
+struct cmdq_operand {
+ /* register type */
+ bool reg;
+ union {
+ /* index */
+ u16 idx;
+ /* value */
+ u16 value;
+ };
+};
+
struct cmdq_client_reg {
u8 subsys;
u16 offset;
@@ -27,6 +61,8 @@ struct cmdq_client {
struct mbox_chan *chan;
};
+#if IS_ENABLED(CONFIG_MTK_CMDQ)
+
/**
* cmdq_dev_get_client_reg() - parse cmdq client reg from the device
* node of CMDQ client
@@ -60,17 +96,19 @@ void cmdq_mbox_destroy(struct cmdq_client *client);
/**
* cmdq_pkt_create() - create a CMDQ packet
* @client: the CMDQ mailbox client
+ * @pkt: the CMDQ packet
* @size: required CMDQ buffer size
*
- * Return: CMDQ packet pointer
+ * Return: 0 for success; else the error code is returned
*/
-struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size);
+int cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt, size_t size);
/**
* cmdq_pkt_destroy() - destroy the CMDQ packet
+ * @client: the CMDQ mailbox client
* @pkt: the CMDQ packet
*/
-void cmdq_pkt_destroy(struct cmdq_pkt *pkt);
+void cmdq_pkt_destroy(struct cmdq_client *client, struct cmdq_pkt *pkt);
/**
* cmdq_pkt_write() - append write command to the CMDQ packet
@@ -172,6 +210,18 @@ int cmdq_pkt_write_s_mask_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
u16 addr_low, u32 value, u32 mask);
/**
+ * cmdq_pkt_mem_move() - append memory move command to the CMDQ packet
+ * @pkt: the CMDQ packet
+ * @src_addr: source address
+ * @dst_addr: destination address
+ *
+ * Appends a CMDQ command to copy the value found in `src_addr` to `dst_addr`.
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_mem_move(struct cmdq_pkt *pkt, dma_addr_t src_addr, dma_addr_t dst_addr);
+
+/**
* cmdq_pkt_wfe() - append wait for event command to the CMDQ packet
* @pkt: the CMDQ packet
* @event: the desired event type to wait
@@ -182,6 +232,21 @@ int cmdq_pkt_write_s_mask_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear);
/**
+ * cmdq_pkt_acquire_event() - append acquire event command to the CMDQ packet
+ * @pkt: the CMDQ packet
+ * @event: the desired event to be acquired
+ *
+ * User can use cmdq_pkt_acquire_event() as `mutex_lock` and cmdq_pkt_clear_event()
+ * as `mutex_unlock` to protect some `critical section` instructions between them.
+ * cmdq_pkt_acquire_event() would wait for event to be cleared.
+ * After event is cleared by cmdq_pkt_clear_event in other GCE threads,
+ * cmdq_pkt_acquire_event() would set event and keep executing next instruction.
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_acquire_event(struct cmdq_pkt *pkt, u16 event);
+
+/**
* cmdq_pkt_clear_event() - append clear event command to the CMDQ packet
* @pkt: the CMDQ packet
* @event: the desired event to be cleared
@@ -233,6 +298,23 @@ int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
u16 offset, u32 value, u32 mask);
/**
+ * cmdq_pkt_logic_command() - Append logic command to the CMDQ packet, ask GCE to
+ * execute an instruction that store the result of logic operation
+ * with left and right operand into result_reg_idx.
+ * @pkt: the CMDQ packet
+ * @result_reg_idx: SPR index that store operation result of left_operand and right_operand
+ * @left_operand: left operand
+ * @s_op: the logic operator enum
+ * @right_operand: right operand
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_logic_command(struct cmdq_pkt *pkt, u16 result_reg_idx,
+ struct cmdq_operand *left_operand,
+ enum cmdq_logic_op s_op,
+ struct cmdq_operand *right_operand);
+
+/**
* cmdq_pkt_assign() - Append logic assign command to the CMDQ packet, ask GCE
* to execute an instruction that set a constant value into
* internal register and use as value, mask or address in
@@ -246,38 +328,189 @@ int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value);
/**
- * cmdq_pkt_jump() - Append jump command to the CMDQ packet, ask GCE
- * to execute an instruction that change current thread PC to
- * a physical address which should contains more instruction.
+ * cmdq_pkt_poll_addr() - Append blocking POLL command to CMDQ packet
+ * @pkt: the CMDQ packet
+ * @addr: the hardware register address
+ * @value: the specified target register value
+ * @mask: the specified target register mask
+ *
+ * Appends a polling (POLL) command to the CMDQ packet and asks the GCE
+ * to execute an instruction that checks for the specified `value` (with
+ * or without `mask`) to appear in the specified hardware register `addr`.
+ * All GCE threads will be blocked by this instruction.
+ *
+ * Return: 0 for success or negative error code
+ */
+int cmdq_pkt_poll_addr(struct cmdq_pkt *pkt, dma_addr_t addr, u32 value, u32 mask);
+
+/**
+ * cmdq_pkt_jump_abs() - Append jump command to the CMDQ packet, ask GCE
+ * to execute an instruction that change current thread
+ * PC to a absolute physical address which should
+ * contains more instruction.
* @pkt: the CMDQ packet
- * @addr: physical address of target instruction buffer
+ * @addr: absolute physical address of target instruction buffer
+ * @shift_pa: shift bits of physical address in CMDQ instruction. This value
+ * is got by cmdq_get_shift_pa().
*
* Return: 0 for success; else the error code is returned
*/
-int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr);
+int cmdq_pkt_jump_abs(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa);
+
+/* This wrapper has to be removed after all users migrated to jump_abs */
+static inline int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa)
+{
+ return cmdq_pkt_jump_abs(pkt, addr, shift_pa);
+}
/**
- * cmdq_pkt_finalize() - Append EOC and jump command to pkt.
+ * cmdq_pkt_jump_rel() - Append jump command to the CMDQ packet, ask GCE
+ * to execute an instruction that change current thread
+ * PC to a physical address with relative offset. The
+ * target address should contains more instruction.
* @pkt: the CMDQ packet
+ * @offset: relative offset of target instruction buffer from current PC.
+ * @shift_pa: shift bits of physical address in CMDQ instruction. This value
+ * is got by cmdq_get_shift_pa().
*
* Return: 0 for success; else the error code is returned
*/
-int cmdq_pkt_finalize(struct cmdq_pkt *pkt);
+int cmdq_pkt_jump_rel(struct cmdq_pkt *pkt, s32 offset, u8 shift_pa);
/**
- * cmdq_pkt_flush_async() - trigger CMDQ to asynchronously execute the CMDQ
- * packet and call back at the end of done packet
- * @pkt: the CMDQ packet
- * @cb: called at the end of done packet
- * @data: this data will pass back to cb
+ * cmdq_pkt_eoc() - Append EOC and ask GCE to generate an IRQ at end of execution
+ * @pkt: The CMDQ packet
*
- * Return: 0 for success; else the error code is returned
+ * Appends an End Of Code (EOC) command to the CMDQ packet and asks the GCE
+ * to generate an interrupt at the end of the execution of all commands in
+ * the pipeline.
+ * The EOC command is usually appended to the end of the pipeline to notify
+ * that all commands are done.
*
- * Trigger CMDQ to asynchronously execute the CMDQ packet and call back
- * at the end of done packet. Note that this is an ASYNC function. When the
- * function returned, it may or may not be finished.
+ * Return: 0 for success or negative error number
*/
-int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
- void *data);
+int cmdq_pkt_eoc(struct cmdq_pkt *pkt);
+
+#else /* IS_ENABLED(CONFIG_MTK_CMDQ) */
+
+static inline int cmdq_dev_get_client_reg(struct device *dev,
+ struct cmdq_client_reg *client_reg, int idx)
+{
+ return -ENODEV;
+}
+
+static inline struct cmdq_client *cmdq_mbox_create(struct device *dev, int index)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline void cmdq_mbox_destroy(struct cmdq_client *client) { }
+
+static inline int cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt, size_t size)
+{
+ return -EINVAL;
+}
+
+static inline void cmdq_pkt_destroy(struct cmdq_client *client, struct cmdq_pkt *pkt) { }
+
+static inline int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value)
+{
+ return -ENOENT;
+}
+
+static inline int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
+ u16 offset, u32 value, u32 mask)
+{
+ return -ENOENT;
+}
+
+static inline int cmdq_pkt_read_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
+ u16 addr_low, u16 reg_idx)
+{
+ return -ENOENT;
+}
+
+static inline int cmdq_pkt_write_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
+ u16 addr_low, u16 src_reg_idx)
+{
+ return -ENOENT;
+}
+
+static inline int cmdq_pkt_write_s_mask(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
+ u16 addr_low, u16 src_reg_idx, u32 mask)
+{
+ return -ENOENT;
+}
+
+static inline int cmdq_pkt_write_s_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
+ u16 addr_low, u32 value)
+{
+ return -ENOENT;
+}
+
+static inline int cmdq_pkt_write_s_mask_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
+ u16 addr_low, u32 value, u32 mask)
+{
+ return -ENOENT;
+}
+
+static inline int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear)
+{
+ return -EINVAL;
+}
+
+static inline int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
+{
+ return -EINVAL;
+}
+
+static inline int cmdq_pkt_set_event(struct cmdq_pkt *pkt, u16 event)
+{
+ return -EINVAL;
+}
+
+static inline int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
+ u16 offset, u32 value)
+{
+ return -EINVAL;
+}
+
+static inline int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
+ u16 offset, u32 value, u32 mask)
+{
+ return -EINVAL;
+}
+
+static inline int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value)
+{
+ return -EINVAL;
+}
+
+static inline int cmdq_pkt_poll_addr(struct cmdq_pkt *pkt, dma_addr_t addr, u32 value, u32 mask)
+{
+ return -EINVAL;
+}
+
+static inline int cmdq_pkt_jump_abs(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa)
+{
+ return -EINVAL;
+}
+
+static inline int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa)
+{
+ return -EINVAL;
+}
+
+static inline int cmdq_pkt_jump_rel(struct cmdq_pkt *pkt, s32 offset, u8 shift_pa)
+{
+ return -EINVAL;
+}
+
+static inline int cmdq_pkt_eoc(struct cmdq_pkt *pkt)
+{
+ return -EINVAL;
+}
+
+#endif /* IS_ENABLED(CONFIG_MTK_CMDQ) */
#endif /* __MTK_CMDQ_H__ */
diff --git a/include/linux/soc/mediatek/mtk-mmsys.h b/include/linux/soc/mediatek/mtk-mmsys.h
index 2228bf6133da..4885b065b849 100644
--- a/include/linux/soc/mediatek/mtk-mmsys.h
+++ b/include/linux/soc/mediatek/mtk-mmsys.h
@@ -6,9 +6,20 @@
#ifndef __MTK_MMSYS_H
#define __MTK_MMSYS_H
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox/mtk-cmdq-mailbox.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
+
enum mtk_ddp_comp_id;
struct device;
+enum mtk_dpi_out_format_con {
+ MTK_DPI_RGB888_SDR_CON,
+ MTK_DPI_RGB888_DDR_CON,
+ MTK_DPI_RGB565_SDR_CON,
+ MTK_DPI_RGB565_DDR_CON
+};
+
enum mtk_ddp_comp_id {
DDP_COMPONENT_AAL0,
DDP_COMPONENT_AAL1,
@@ -16,26 +27,57 @@ enum mtk_ddp_comp_id {
DDP_COMPONENT_CCORR,
DDP_COMPONENT_COLOR0,
DDP_COMPONENT_COLOR1,
- DDP_COMPONENT_DITHER,
+ DDP_COMPONENT_DITHER0,
+ DDP_COMPONENT_DITHER1,
+ DDP_COMPONENT_DP_INTF0,
+ DDP_COMPONENT_DP_INTF1,
DDP_COMPONENT_DPI0,
DDP_COMPONENT_DPI1,
+ DDP_COMPONENT_DSC0,
+ DDP_COMPONENT_DSC1,
DDP_COMPONENT_DSI0,
DDP_COMPONENT_DSI1,
DDP_COMPONENT_DSI2,
DDP_COMPONENT_DSI3,
+ DDP_COMPONENT_ETHDR_MIXER,
DDP_COMPONENT_GAMMA,
+ DDP_COMPONENT_MDP_RDMA0,
+ DDP_COMPONENT_MDP_RDMA1,
+ DDP_COMPONENT_MDP_RDMA2,
+ DDP_COMPONENT_MDP_RDMA3,
+ DDP_COMPONENT_MDP_RDMA4,
+ DDP_COMPONENT_MDP_RDMA5,
+ DDP_COMPONENT_MDP_RDMA6,
+ DDP_COMPONENT_MDP_RDMA7,
+ DDP_COMPONENT_MERGE0,
+ DDP_COMPONENT_MERGE1,
+ DDP_COMPONENT_MERGE2,
+ DDP_COMPONENT_MERGE3,
+ DDP_COMPONENT_MERGE4,
+ DDP_COMPONENT_MERGE5,
DDP_COMPONENT_OD0,
DDP_COMPONENT_OD1,
DDP_COMPONENT_OVL0,
DDP_COMPONENT_OVL_2L0,
DDP_COMPONENT_OVL_2L1,
+ DDP_COMPONENT_OVL_2L2,
DDP_COMPONENT_OVL1,
+ DDP_COMPONENT_PADDING0,
+ DDP_COMPONENT_PADDING1,
+ DDP_COMPONENT_PADDING2,
+ DDP_COMPONENT_PADDING3,
+ DDP_COMPONENT_PADDING4,
+ DDP_COMPONENT_PADDING5,
+ DDP_COMPONENT_PADDING6,
+ DDP_COMPONENT_PADDING7,
+ DDP_COMPONENT_POSTMASK0,
DDP_COMPONENT_PWM0,
DDP_COMPONENT_PWM1,
DDP_COMPONENT_PWM2,
DDP_COMPONENT_RDMA0,
DDP_COMPONENT_RDMA1,
DDP_COMPONENT_RDMA2,
+ DDP_COMPONENT_RDMA4,
DDP_COMPONENT_UFOE,
DDP_COMPONENT_WDMA0,
DDP_COMPONENT_WDMA1,
@@ -50,4 +92,24 @@ void mtk_mmsys_ddp_disconnect(struct device *dev,
enum mtk_ddp_comp_id cur,
enum mtk_ddp_comp_id next);
+void mtk_mmsys_ddp_dpi_fmt_config(struct device *dev, u32 val);
+
+void mtk_mmsys_merge_async_config(struct device *dev, int idx, int width,
+ int height, struct cmdq_pkt *cmdq_pkt);
+
+void mtk_mmsys_hdr_config(struct device *dev, int be_width, int be_height,
+ struct cmdq_pkt *cmdq_pkt);
+
+void mtk_mmsys_mixer_in_config(struct device *dev, int idx, bool alpha_sel, u16 alpha,
+ u8 mode, u32 biwidth, struct cmdq_pkt *cmdq_pkt);
+
+void mtk_mmsys_mixer_in_channel_swap(struct device *dev, int idx, bool channel_swap,
+ struct cmdq_pkt *cmdq_pkt);
+
+void mtk_mmsys_vpp_rsz_merge_config(struct device *dev, u32 id, bool enable,
+ struct cmdq_pkt *cmdq_pkt);
+
+void mtk_mmsys_vpp_rsz_dcm_config(struct device *dev, bool enable,
+ struct cmdq_pkt *cmdq_pkt);
+
#endif /* __MTK_MMSYS_H */
diff --git a/include/linux/soc/mediatek/mtk-mutex.h b/include/linux/soc/mediatek/mtk-mutex.h
index 6fe4ffbde290..635218e3ac68 100644
--- a/include/linux/soc/mediatek/mtk-mutex.h
+++ b/include/linux/soc/mediatek/mtk-mutex.h
@@ -10,11 +10,70 @@ struct regmap;
struct device;
struct mtk_mutex;
+enum mtk_mutex_mod_index {
+ /* MDP table index */
+ MUTEX_MOD_IDX_MDP_RDMA0,
+ MUTEX_MOD_IDX_MDP_RSZ0,
+ MUTEX_MOD_IDX_MDP_RSZ1,
+ MUTEX_MOD_IDX_MDP_TDSHP0,
+ MUTEX_MOD_IDX_MDP_WROT0,
+ MUTEX_MOD_IDX_MDP_WDMA,
+ MUTEX_MOD_IDX_MDP_AAL0,
+ MUTEX_MOD_IDX_MDP_CCORR0,
+ MUTEX_MOD_IDX_MDP_HDR0,
+ MUTEX_MOD_IDX_MDP_COLOR0,
+ MUTEX_MOD_IDX_MDP_RDMA1,
+ MUTEX_MOD_IDX_MDP_RDMA2,
+ MUTEX_MOD_IDX_MDP_RDMA3,
+ MUTEX_MOD_IDX_MDP_STITCH0,
+ MUTEX_MOD_IDX_MDP_FG0,
+ MUTEX_MOD_IDX_MDP_FG1,
+ MUTEX_MOD_IDX_MDP_FG2,
+ MUTEX_MOD_IDX_MDP_FG3,
+ MUTEX_MOD_IDX_MDP_HDR1,
+ MUTEX_MOD_IDX_MDP_HDR2,
+ MUTEX_MOD_IDX_MDP_HDR3,
+ MUTEX_MOD_IDX_MDP_AAL1,
+ MUTEX_MOD_IDX_MDP_AAL2,
+ MUTEX_MOD_IDX_MDP_AAL3,
+ MUTEX_MOD_IDX_MDP_RSZ2,
+ MUTEX_MOD_IDX_MDP_RSZ3,
+ MUTEX_MOD_IDX_MDP_MERGE2,
+ MUTEX_MOD_IDX_MDP_MERGE3,
+ MUTEX_MOD_IDX_MDP_TDSHP1,
+ MUTEX_MOD_IDX_MDP_TDSHP2,
+ MUTEX_MOD_IDX_MDP_TDSHP3,
+ MUTEX_MOD_IDX_MDP_COLOR1,
+ MUTEX_MOD_IDX_MDP_COLOR2,
+ MUTEX_MOD_IDX_MDP_COLOR3,
+ MUTEX_MOD_IDX_MDP_OVL0,
+ MUTEX_MOD_IDX_MDP_OVL1,
+ MUTEX_MOD_IDX_MDP_PAD0,
+ MUTEX_MOD_IDX_MDP_PAD1,
+ MUTEX_MOD_IDX_MDP_PAD2,
+ MUTEX_MOD_IDX_MDP_PAD3,
+ MUTEX_MOD_IDX_MDP_TCC0,
+ MUTEX_MOD_IDX_MDP_TCC1,
+ MUTEX_MOD_IDX_MDP_WROT1,
+ MUTEX_MOD_IDX_MDP_WROT2,
+ MUTEX_MOD_IDX_MDP_WROT3,
+
+ MUTEX_MOD_IDX_MAX /* ALWAYS keep at the end */
+};
+
+enum mtk_mutex_sof_index {
+ MUTEX_SOF_IDX_SINGLE_MODE,
+
+ MUTEX_SOF_IDX_MAX /* ALWAYS keep at the end */
+};
+
struct mtk_mutex *mtk_mutex_get(struct device *dev);
int mtk_mutex_prepare(struct mtk_mutex *mutex);
void mtk_mutex_add_comp(struct mtk_mutex *mutex,
enum mtk_ddp_comp_id id);
void mtk_mutex_enable(struct mtk_mutex *mutex);
+int mtk_mutex_enable_by_cmdq(struct mtk_mutex *mutex,
+ void *pkt);
void mtk_mutex_disable(struct mtk_mutex *mutex);
void mtk_mutex_remove_comp(struct mtk_mutex *mutex,
enum mtk_ddp_comp_id id);
@@ -22,5 +81,10 @@ void mtk_mutex_unprepare(struct mtk_mutex *mutex);
void mtk_mutex_put(struct mtk_mutex *mutex);
void mtk_mutex_acquire(struct mtk_mutex *mutex);
void mtk_mutex_release(struct mtk_mutex *mutex);
+int mtk_mutex_write_mod(struct mtk_mutex *mutex,
+ enum mtk_mutex_mod_index idx,
+ bool clear);
+int mtk_mutex_write_sof(struct mtk_mutex *mutex,
+ enum mtk_mutex_sof_index idx);
#endif /* MTK_MUTEX_H */
diff --git a/include/linux/soc/mediatek/mtk_sip_svc.h b/include/linux/soc/mediatek/mtk_sip_svc.h
index 082398e0cfb1..abe24a73ee19 100644
--- a/include/linux/soc/mediatek/mtk_sip_svc.h
+++ b/include/linux/soc/mediatek/mtk_sip_svc.h
@@ -22,4 +22,10 @@
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, MTK_SIP_SMC_CONVENTION, \
ARM_SMCCC_OWNER_SIP, fn_id)
+/* DVFSRC SMC calls */
+#define MTK_SIP_DVFSRC_VCOREFS_CONTROL MTK_SIP_SMC_CMD(0x506)
+
+/* IOMMU related SMC call */
+#define MTK_SIP_KERNEL_IOMMU_CONTROL MTK_SIP_SMC_CMD(0x514)
+
#endif
diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
new file mode 100644
index 000000000000..3fa93bd65004
--- /dev/null
+++ b/include/linux/soc/mediatek/mtk_wed.h
@@ -0,0 +1,333 @@
+#ifndef __MTK_WED_H
+#define __MTK_WED_H
+
+#include <linux/kernel.h>
+#include <linux/rcupdate.h>
+#include <linux/regmap.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
+#define MTK_WED_TX_QUEUES 2
+#define MTK_WED_RX_QUEUES 2
+#define MTK_WED_RX_PAGE_QUEUES 3
+
+#define WED_WO_STA_REC 0x6
+
+struct mtk_wed_hw;
+struct mtk_wdma_desc;
+
+enum mtk_wed_wo_cmd {
+ MTK_WED_WO_CMD_WED_CFG,
+ MTK_WED_WO_CMD_WED_RX_STAT,
+ MTK_WED_WO_CMD_RRO_SER,
+ MTK_WED_WO_CMD_DBG_INFO,
+ MTK_WED_WO_CMD_DEV_INFO,
+ MTK_WED_WO_CMD_BSS_INFO,
+ MTK_WED_WO_CMD_STA_REC,
+ MTK_WED_WO_CMD_DEV_INFO_DUMP,
+ MTK_WED_WO_CMD_BSS_INFO_DUMP,
+ MTK_WED_WO_CMD_STA_REC_DUMP,
+ MTK_WED_WO_CMD_BA_INFO_DUMP,
+ MTK_WED_WO_CMD_FBCMD_Q_DUMP,
+ MTK_WED_WO_CMD_FW_LOG_CTRL,
+ MTK_WED_WO_CMD_LOG_FLUSH,
+ MTK_WED_WO_CMD_CHANGE_STATE,
+ MTK_WED_WO_CMD_CPU_STATS_ENABLE,
+ MTK_WED_WO_CMD_CPU_STATS_DUMP,
+ MTK_WED_WO_CMD_EXCEPTION_INIT,
+ MTK_WED_WO_CMD_PROF_CTRL,
+ MTK_WED_WO_CMD_STA_BA_DUMP,
+ MTK_WED_WO_CMD_BA_CTRL_DUMP,
+ MTK_WED_WO_CMD_RXCNT_CTRL,
+ MTK_WED_WO_CMD_RXCNT_INFO,
+ MTK_WED_WO_CMD_SET_CAP,
+ MTK_WED_WO_CMD_CCIF_RING_DUMP,
+ MTK_WED_WO_CMD_WED_END
+};
+
+struct mtk_wed_bm_desc {
+ __le32 buf0;
+ __le32 token;
+} __packed __aligned(4);
+
+enum mtk_wed_bus_tye {
+ MTK_WED_BUS_PCIE,
+ MTK_WED_BUS_AXI,
+};
+
+#define MTK_WED_RING_CONFIGURED BIT(0)
+struct mtk_wed_ring {
+ struct mtk_wdma_desc *desc;
+ dma_addr_t desc_phys;
+ u32 desc_size;
+ int size;
+ u32 flags;
+
+ u32 reg_base;
+ void __iomem *wpdma;
+};
+
+struct mtk_wed_wo_rx_stats {
+ __le16 wlan_idx;
+ __le16 tid;
+ __le32 rx_pkt_cnt;
+ __le32 rx_byte_cnt;
+ __le32 rx_err_cnt;
+ __le32 rx_drop_cnt;
+};
+
+struct mtk_wed_buf {
+ void *p;
+ dma_addr_t phy_addr;
+};
+
+struct mtk_wed_device {
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ const struct mtk_wed_ops *ops;
+ struct device *dev;
+ struct mtk_wed_hw *hw;
+ bool init_done, running;
+ int wdma_idx;
+ int irq;
+ u8 version;
+
+ /* used by wlan driver */
+ u32 rev_id;
+
+ struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES];
+ struct mtk_wed_ring rx_ring[MTK_WED_RX_QUEUES];
+ struct mtk_wed_ring txfree_ring;
+ struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES];
+ struct mtk_wed_ring rx_wdma[MTK_WED_RX_QUEUES];
+ struct mtk_wed_ring rx_rro_ring[MTK_WED_RX_QUEUES];
+ struct mtk_wed_ring rx_page_ring[MTK_WED_RX_PAGE_QUEUES];
+ struct mtk_wed_ring ind_cmd_ring;
+
+ struct {
+ int size;
+ struct mtk_wed_buf *pages;
+ struct mtk_wdma_desc *desc;
+ dma_addr_t desc_phys;
+ } tx_buf_ring;
+
+ struct {
+ int size;
+ struct mtk_wed_bm_desc *desc;
+ dma_addr_t desc_phys;
+ } rx_buf_ring;
+
+ struct {
+ struct mtk_wed_ring ring;
+ dma_addr_t miod_phys;
+ dma_addr_t fdbk_phys;
+ } rro;
+
+ struct {
+ int size;
+ struct mtk_wed_buf *pages;
+ struct mtk_wed_bm_desc *desc;
+ dma_addr_t desc_phys;
+ } hw_rro;
+
+ /* filled by driver: */
+ struct {
+ union {
+ struct platform_device *platform_dev;
+ struct pci_dev *pci_dev;
+ };
+ enum mtk_wed_bus_tye bus_type;
+ void __iomem *base;
+ u32 phy_base;
+ u32 id;
+
+ u32 wpdma_phys;
+ u32 wpdma_int;
+ u32 wpdma_mask;
+ u32 wpdma_tx;
+ u32 wpdma_txfree;
+ u32 wpdma_rx_glo;
+ u32 wpdma_rx[MTK_WED_RX_QUEUES];
+ u32 wpdma_rx_rro[MTK_WED_RX_QUEUES];
+ u32 wpdma_rx_pg;
+
+ bool wcid_512;
+ bool hw_rro;
+ bool msi;
+ bool hif2;
+
+ u16 token_start;
+ unsigned int nbuf;
+ unsigned int rx_nbuf;
+ unsigned int rx_npkt;
+ unsigned int rx_size;
+ unsigned int amsdu_max_len;
+
+ u8 tx_tbit[MTK_WED_TX_QUEUES];
+ u8 rx_tbit[MTK_WED_RX_QUEUES];
+ u8 rro_rx_tbit[MTK_WED_RX_QUEUES];
+ u8 rx_pg_tbit[MTK_WED_RX_PAGE_QUEUES];
+ u8 txfree_tbit;
+ u8 amsdu_max_subframes;
+
+ struct {
+ u8 se_group_nums;
+ u16 win_size;
+ u16 particular_sid;
+ u32 ack_sn_addr;
+ dma_addr_t particular_se_phys;
+ dma_addr_t addr_elem_phys[1024];
+ } ind_cmd;
+
+ u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
+ int (*offload_enable)(struct mtk_wed_device *wed);
+ void (*offload_disable)(struct mtk_wed_device *wed);
+ u32 (*init_rx_buf)(struct mtk_wed_device *wed, int size);
+ void (*release_rx_buf)(struct mtk_wed_device *wed);
+ void (*update_wo_rx_stats)(struct mtk_wed_device *wed,
+ struct mtk_wed_wo_rx_stats *stats);
+ int (*reset)(struct mtk_wed_device *wed);
+ void (*reset_complete)(struct mtk_wed_device *wed);
+ } wlan;
+#endif
+};
+
+struct mtk_wed_ops {
+ int (*attach)(struct mtk_wed_device *dev) __releases(RCU);
+ int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring,
+ void __iomem *regs, bool reset);
+ int (*rx_ring_setup)(struct mtk_wed_device *dev, int ring,
+ void __iomem *regs, bool reset);
+ int (*txfree_ring_setup)(struct mtk_wed_device *dev,
+ void __iomem *regs);
+ int (*msg_update)(struct mtk_wed_device *dev, int cmd_id,
+ void *data, int len);
+ void (*detach)(struct mtk_wed_device *dev);
+ void (*ppe_check)(struct mtk_wed_device *dev, struct sk_buff *skb,
+ u32 reason, u32 hash);
+
+ void (*stop)(struct mtk_wed_device *dev);
+ void (*start)(struct mtk_wed_device *dev, u32 irq_mask);
+ void (*reset_dma)(struct mtk_wed_device *dev);
+
+ u32 (*reg_read)(struct mtk_wed_device *dev, u32 reg);
+ void (*reg_write)(struct mtk_wed_device *dev, u32 reg, u32 val);
+
+ u32 (*irq_get)(struct mtk_wed_device *dev, u32 mask);
+ void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
+ int (*setup_tc)(struct mtk_wed_device *wed, struct net_device *dev,
+ enum tc_setup_type type, void *type_data);
+ void (*start_hw_rro)(struct mtk_wed_device *dev, u32 irq_mask,
+ bool reset);
+ void (*rro_rx_ring_setup)(struct mtk_wed_device *dev, int ring,
+ void __iomem *regs);
+ void (*msdu_pg_rx_ring_setup)(struct mtk_wed_device *dev, int ring,
+ void __iomem *regs);
+ int (*ind_rx_ring_setup)(struct mtk_wed_device *dev,
+ void __iomem *regs);
+};
+
+extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
+
+static inline int
+mtk_wed_device_attach(struct mtk_wed_device *dev)
+{
+ int ret = -ENODEV;
+
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ rcu_read_lock();
+ dev->ops = rcu_dereference(mtk_soc_wed_ops);
+ if (dev->ops)
+ ret = dev->ops->attach(dev);
+ else
+ rcu_read_unlock();
+
+ if (ret)
+ dev->ops = NULL;
+#endif
+
+ return ret;
+}
+
+static inline bool mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
+{
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ if (dev->version == 3)
+ return dev->wlan.hw_rro;
+
+ return dev->version != 1;
+#else
+ return false;
+#endif
+}
+
+static inline bool mtk_wed_is_amsdu_supported(struct mtk_wed_device *dev)
+{
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ return dev->version == 3;
+#else
+ return false;
+#endif
+}
+
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+#define mtk_wed_device_active(_dev) !!(_dev)->ops
+#define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev)
+#define mtk_wed_device_start(_dev, _mask) (_dev)->ops->start(_dev, _mask)
+#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs, _reset) \
+ (_dev)->ops->tx_ring_setup(_dev, _ring, _regs, _reset)
+#define mtk_wed_device_txfree_ring_setup(_dev, _regs) \
+ (_dev)->ops->txfree_ring_setup(_dev, _regs)
+#define mtk_wed_device_reg_read(_dev, _reg) \
+ (_dev)->ops->reg_read(_dev, _reg)
+#define mtk_wed_device_reg_write(_dev, _reg, _val) \
+ (_dev)->ops->reg_write(_dev, _reg, _val)
+#define mtk_wed_device_irq_get(_dev, _mask) \
+ (_dev)->ops->irq_get(_dev, _mask)
+#define mtk_wed_device_irq_set_mask(_dev, _mask) \
+ (_dev)->ops->irq_set_mask(_dev, _mask)
+#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs, _reset) \
+ (_dev)->ops->rx_ring_setup(_dev, _ring, _regs, _reset)
+#define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) \
+ (_dev)->ops->ppe_check(_dev, _skb, _reason, _hash)
+#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) \
+ (_dev)->ops->msg_update(_dev, _id, _msg, _len)
+#define mtk_wed_device_stop(_dev) (_dev)->ops->stop(_dev)
+#define mtk_wed_device_dma_reset(_dev) (_dev)->ops->reset_dma(_dev)
+#define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) \
+ (_dev)->ops->setup_tc(_dev, _netdev, _type, _type_data)
+#define mtk_wed_device_start_hw_rro(_dev, _mask, _reset) \
+ (_dev)->ops->start_hw_rro(_dev, _mask, _reset)
+#define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) \
+ (_dev)->ops->rro_rx_ring_setup(_dev, _ring, _regs)
+#define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) \
+ (_dev)->ops->msdu_pg_rx_ring_setup(_dev, _ring, _regs)
+#define mtk_wed_device_ind_rx_ring_setup(_dev, _regs) \
+ (_dev)->ops->ind_rx_ring_setup(_dev, _regs)
+
+#else
+static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
+{
+ return false;
+}
+#define mtk_wed_device_detach(_dev) do {} while (0)
+#define mtk_wed_device_start(_dev, _mask) do {} while (0)
+#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs, _reset) -ENODEV
+#define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV
+#define mtk_wed_device_reg_read(_dev, _reg) 0
+#define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0)
+#define mtk_wed_device_irq_get(_dev, _mask) 0
+#define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0)
+#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs, _reset) -ENODEV
+#define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) do {} while (0)
+#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) -ENODEV
+#define mtk_wed_device_stop(_dev) do {} while (0)
+#define mtk_wed_device_dma_reset(_dev) do {} while (0)
+#define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) -EOPNOTSUPP
+#define mtk_wed_device_start_hw_rro(_dev, _mask, _reset) do {} while (0)
+#define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) -ENODEV
+#define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) -ENODEV
+#define mtk_wed_device_ind_rx_ring_setup(_dev, _regs) -ENODEV
+#endif
+
+#endif
diff --git a/include/linux/soc/mmp/cputype.h b/include/linux/soc/mmp/cputype.h
index 221790761e8e..f13d127fadc4 100644
--- a/include/linux/soc/mmp/cputype.h
+++ b/include/linux/soc/mmp/cputype.h
@@ -26,29 +26,7 @@
extern unsigned int mmp_chip_id;
-#ifdef CONFIG_CPU_PXA168
-static inline int cpu_is_pxa168(void)
-{
- return (((read_cpuid_id() >> 8) & 0xff) == 0x84) &&
- ((mmp_chip_id & 0xfff) == 0x168);
-}
-#else
-#define cpu_is_pxa168() (0)
-#endif
-
-/* cpu_is_pxa910() is shared on both pxa910 and pxa920 */
-#ifdef CONFIG_CPU_PXA910
-static inline int cpu_is_pxa910(void)
-{
- return (((read_cpuid_id() >> 8) & 0xff) == 0x84) &&
- (((mmp_chip_id & 0xfff) == 0x910) ||
- ((mmp_chip_id & 0xfff) == 0x920));
-}
-#else
-#define cpu_is_pxa910() (0)
-#endif
-
-#if defined(CONFIG_CPU_MMP2) || defined(CONFIG_MACH_MMP2_DT)
+#if defined(CONFIG_MACH_MMP2_DT)
static inline int cpu_is_mmp2(void)
{
return (((read_cpuid_id() >> 8) & 0xff) == 0x58) &&
diff --git a/include/linux/soc/pxa/cpu.h b/include/linux/soc/pxa/cpu.h
new file mode 100644
index 000000000000..5782450ee45c
--- /dev/null
+++ b/include/linux/soc/pxa/cpu.h
@@ -0,0 +1,252 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Author: Nicolas Pitre
+ * Created: Jun 15, 2001
+ * Copyright: MontaVista Software Inc.
+ */
+
+#ifndef __SOC_PXA_CPU_H
+#define __SOC_PXA_CPU_H
+
+#ifdef CONFIG_ARM
+#include <asm/cputype.h>
+#endif
+
+/*
+ * CPU Stepping CPU_ID JTAG_ID
+ *
+ * PXA210 B0 0x69052922 0x2926C013
+ * PXA210 B1 0x69052923 0x3926C013
+ * PXA210 B2 0x69052924 0x4926C013
+ * PXA210 C0 0x69052D25 0x5926C013
+ *
+ * PXA250 A0 0x69052100 0x09264013
+ * PXA250 A1 0x69052101 0x19264013
+ * PXA250 B0 0x69052902 0x29264013
+ * PXA250 B1 0x69052903 0x39264013
+ * PXA250 B2 0x69052904 0x49264013
+ * PXA250 C0 0x69052D05 0x59264013
+ *
+ * PXA255 A0 0x69052D06 0x69264013
+ *
+ * PXA26x A0 0x69052903 0x39264013
+ * PXA26x B0 0x69052D05 0x59264013
+ *
+ * PXA27x A0 0x69054110 0x09265013
+ * PXA27x A1 0x69054111 0x19265013
+ * PXA27x B0 0x69054112 0x29265013
+ * PXA27x B1 0x69054113 0x39265013
+ * PXA27x C0 0x69054114 0x49265013
+ * PXA27x C5 0x69054117 0x79265013
+ *
+ * PXA30x A0 0x69056880 0x0E648013
+ * PXA30x A1 0x69056881 0x1E648013
+ * PXA31x A0 0x69056890 0x0E649013
+ * PXA31x A1 0x69056891 0x1E649013
+ * PXA31x A2 0x69056892 0x2E649013
+ * PXA32x B1 0x69056825 0x5E642013
+ * PXA32x B2 0x69056826 0x6E642013
+ *
+ * PXA930 B0 0x69056835 0x5E643013
+ * PXA930 B1 0x69056837 0x7E643013
+ * PXA930 B2 0x69056838 0x8E643013
+ *
+ * PXA935 A0 0x56056931 0x1E653013
+ * PXA935 B0 0x56056936 0x6E653013
+ * PXA935 B1 0x56056938 0x8E653013
+ */
+#ifdef CONFIG_PXA25x
+#define __cpu_is_pxa210(id) \
+ ({ \
+ unsigned int _id = (id) & 0xf3f0; \
+ _id == 0x2120; \
+ })
+
+#define __cpu_is_pxa250(id) \
+ ({ \
+ unsigned int _id = (id) & 0xf3ff; \
+ _id <= 0x2105; \
+ })
+
+#define __cpu_is_pxa255(id) \
+ ({ \
+ unsigned int _id = (id) & 0xffff; \
+ _id == 0x2d06; \
+ })
+
+#define __cpu_is_pxa25x(id) \
+ ({ \
+ unsigned int _id = (id) & 0xf300; \
+ _id == 0x2100; \
+ })
+#else
+#define __cpu_is_pxa210(id) (0)
+#define __cpu_is_pxa250(id) (0)
+#define __cpu_is_pxa255(id) (0)
+#define __cpu_is_pxa25x(id) (0)
+#endif
+
+#ifdef CONFIG_PXA27x
+#define __cpu_is_pxa27x(id) \
+ ({ \
+ unsigned int _id = (id) >> 4 & 0xfff; \
+ _id == 0x411; \
+ })
+#else
+#define __cpu_is_pxa27x(id) (0)
+#endif
+
+#ifdef CONFIG_CPU_PXA300
+#define __cpu_is_pxa300(id) \
+ ({ \
+ unsigned int _id = (id) >> 4 & 0xfff; \
+ _id == 0x688; \
+ })
+#else
+#define __cpu_is_pxa300(id) (0)
+#endif
+
+#ifdef CONFIG_CPU_PXA310
+#define __cpu_is_pxa310(id) \
+ ({ \
+ unsigned int _id = (id) >> 4 & 0xfff; \
+ _id == 0x689; \
+ })
+#else
+#define __cpu_is_pxa310(id) (0)
+#endif
+
+#ifdef CONFIG_CPU_PXA320
+#define __cpu_is_pxa320(id) \
+ ({ \
+ unsigned int _id = (id) >> 4 & 0xfff; \
+ _id == 0x603 || _id == 0x682; \
+ })
+#else
+#define __cpu_is_pxa320(id) (0)
+#endif
+
+#ifdef CONFIG_CPU_PXA930
+#define __cpu_is_pxa930(id) \
+ ({ \
+ unsigned int _id = (id) >> 4 & 0xfff; \
+ _id == 0x683; \
+ })
+#else
+#define __cpu_is_pxa930(id) (0)
+#endif
+
+#ifdef CONFIG_CPU_PXA935
+#define __cpu_is_pxa935(id) \
+ ({ \
+ unsigned int _id = (id) >> 4 & 0xfff; \
+ _id == 0x693; \
+ })
+#else
+#define __cpu_is_pxa935(id) (0)
+#endif
+
+#define cpu_is_pxa210() \
+ ({ \
+ __cpu_is_pxa210(read_cpuid_id()); \
+ })
+
+#define cpu_is_pxa250() \
+ ({ \
+ __cpu_is_pxa250(read_cpuid_id()); \
+ })
+
+#define cpu_is_pxa255() \
+ ({ \
+ __cpu_is_pxa255(read_cpuid_id()); \
+ })
+
+#define cpu_is_pxa25x() \
+ ({ \
+ __cpu_is_pxa25x(read_cpuid_id()); \
+ })
+
+#define cpu_is_pxa27x() \
+ ({ \
+ __cpu_is_pxa27x(read_cpuid_id()); \
+ })
+
+#define cpu_is_pxa300() \
+ ({ \
+ __cpu_is_pxa300(read_cpuid_id()); \
+ })
+
+#define cpu_is_pxa310() \
+ ({ \
+ __cpu_is_pxa310(read_cpuid_id()); \
+ })
+
+#define cpu_is_pxa320() \
+ ({ \
+ __cpu_is_pxa320(read_cpuid_id()); \
+ })
+
+#define cpu_is_pxa930() \
+ ({ \
+ __cpu_is_pxa930(read_cpuid_id()); \
+ })
+
+#define cpu_is_pxa935() \
+ ({ \
+ __cpu_is_pxa935(read_cpuid_id()); \
+ })
+
+
+
+/*
+ * CPUID Core Generation Bit
+ * <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x
+ */
+#if defined(CONFIG_PXA25x) || defined(CONFIG_PXA27x)
+#define __cpu_is_pxa2xx(id) \
+ ({ \
+ unsigned int _id = (id) >> 13 & 0x7; \
+ _id <= 0x2; \
+ })
+#else
+#define __cpu_is_pxa2xx(id) (0)
+#endif
+
+#ifdef CONFIG_PXA3xx
+#define __cpu_is_pxa3xx(id) \
+ ({ \
+ __cpu_is_pxa300(id) \
+ || __cpu_is_pxa310(id) \
+ || __cpu_is_pxa320(id) \
+ || __cpu_is_pxa93x(id); \
+ })
+#else
+#define __cpu_is_pxa3xx(id) (0)
+#endif
+
+#if defined(CONFIG_CPU_PXA930) || defined(CONFIG_CPU_PXA935)
+#define __cpu_is_pxa93x(id) \
+ ({ \
+ __cpu_is_pxa930(id) \
+ || __cpu_is_pxa935(id); \
+ })
+#else
+#define __cpu_is_pxa93x(id) (0)
+#endif
+
+#define cpu_is_pxa2xx() \
+ ({ \
+ __cpu_is_pxa2xx(read_cpuid_id()); \
+ })
+
+#define cpu_is_pxa3xx() \
+ ({ \
+ __cpu_is_pxa3xx(read_cpuid_id()); \
+ })
+
+#define cpu_is_pxa93x() \
+ ({ \
+ __cpu_is_pxa93x(read_cpuid_id()); \
+ })
+
+#endif
diff --git a/include/linux/soc/pxa/mfp.h b/include/linux/soc/pxa/mfp.h
new file mode 100644
index 000000000000..39779cbed0c0
--- /dev/null
+++ b/include/linux/soc/pxa/mfp.h
@@ -0,0 +1,470 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Common Multi-Function Pin Definitions
+ *
+ * Copyright (C) 2007 Marvell International Ltd.
+ *
+ * 2007-8-21: eric miao <eric.miao@marvell.com>
+ * initial version
+ */
+
+#ifndef __ASM_PLAT_MFP_H
+#define __ASM_PLAT_MFP_H
+
+#define mfp_to_gpio(m) ((m) % 256)
+
+/* list of all the configurable MFP pins */
+enum {
+ MFP_PIN_INVALID = -1,
+
+ MFP_PIN_GPIO0 = 0,
+ MFP_PIN_GPIO1,
+ MFP_PIN_GPIO2,
+ MFP_PIN_GPIO3,
+ MFP_PIN_GPIO4,
+ MFP_PIN_GPIO5,
+ MFP_PIN_GPIO6,
+ MFP_PIN_GPIO7,
+ MFP_PIN_GPIO8,
+ MFP_PIN_GPIO9,
+ MFP_PIN_GPIO10,
+ MFP_PIN_GPIO11,
+ MFP_PIN_GPIO12,
+ MFP_PIN_GPIO13,
+ MFP_PIN_GPIO14,
+ MFP_PIN_GPIO15,
+ MFP_PIN_GPIO16,
+ MFP_PIN_GPIO17,
+ MFP_PIN_GPIO18,
+ MFP_PIN_GPIO19,
+ MFP_PIN_GPIO20,
+ MFP_PIN_GPIO21,
+ MFP_PIN_GPIO22,
+ MFP_PIN_GPIO23,
+ MFP_PIN_GPIO24,
+ MFP_PIN_GPIO25,
+ MFP_PIN_GPIO26,
+ MFP_PIN_GPIO27,
+ MFP_PIN_GPIO28,
+ MFP_PIN_GPIO29,
+ MFP_PIN_GPIO30,
+ MFP_PIN_GPIO31,
+ MFP_PIN_GPIO32,
+ MFP_PIN_GPIO33,
+ MFP_PIN_GPIO34,
+ MFP_PIN_GPIO35,
+ MFP_PIN_GPIO36,
+ MFP_PIN_GPIO37,
+ MFP_PIN_GPIO38,
+ MFP_PIN_GPIO39,
+ MFP_PIN_GPIO40,
+ MFP_PIN_GPIO41,
+ MFP_PIN_GPIO42,
+ MFP_PIN_GPIO43,
+ MFP_PIN_GPIO44,
+ MFP_PIN_GPIO45,
+ MFP_PIN_GPIO46,
+ MFP_PIN_GPIO47,
+ MFP_PIN_GPIO48,
+ MFP_PIN_GPIO49,
+ MFP_PIN_GPIO50,
+ MFP_PIN_GPIO51,
+ MFP_PIN_GPIO52,
+ MFP_PIN_GPIO53,
+ MFP_PIN_GPIO54,
+ MFP_PIN_GPIO55,
+ MFP_PIN_GPIO56,
+ MFP_PIN_GPIO57,
+ MFP_PIN_GPIO58,
+ MFP_PIN_GPIO59,
+ MFP_PIN_GPIO60,
+ MFP_PIN_GPIO61,
+ MFP_PIN_GPIO62,
+ MFP_PIN_GPIO63,
+ MFP_PIN_GPIO64,
+ MFP_PIN_GPIO65,
+ MFP_PIN_GPIO66,
+ MFP_PIN_GPIO67,
+ MFP_PIN_GPIO68,
+ MFP_PIN_GPIO69,
+ MFP_PIN_GPIO70,
+ MFP_PIN_GPIO71,
+ MFP_PIN_GPIO72,
+ MFP_PIN_GPIO73,
+ MFP_PIN_GPIO74,
+ MFP_PIN_GPIO75,
+ MFP_PIN_GPIO76,
+ MFP_PIN_GPIO77,
+ MFP_PIN_GPIO78,
+ MFP_PIN_GPIO79,
+ MFP_PIN_GPIO80,
+ MFP_PIN_GPIO81,
+ MFP_PIN_GPIO82,
+ MFP_PIN_GPIO83,
+ MFP_PIN_GPIO84,
+ MFP_PIN_GPIO85,
+ MFP_PIN_GPIO86,
+ MFP_PIN_GPIO87,
+ MFP_PIN_GPIO88,
+ MFP_PIN_GPIO89,
+ MFP_PIN_GPIO90,
+ MFP_PIN_GPIO91,
+ MFP_PIN_GPIO92,
+ MFP_PIN_GPIO93,
+ MFP_PIN_GPIO94,
+ MFP_PIN_GPIO95,
+ MFP_PIN_GPIO96,
+ MFP_PIN_GPIO97,
+ MFP_PIN_GPIO98,
+ MFP_PIN_GPIO99,
+ MFP_PIN_GPIO100,
+ MFP_PIN_GPIO101,
+ MFP_PIN_GPIO102,
+ MFP_PIN_GPIO103,
+ MFP_PIN_GPIO104,
+ MFP_PIN_GPIO105,
+ MFP_PIN_GPIO106,
+ MFP_PIN_GPIO107,
+ MFP_PIN_GPIO108,
+ MFP_PIN_GPIO109,
+ MFP_PIN_GPIO110,
+ MFP_PIN_GPIO111,
+ MFP_PIN_GPIO112,
+ MFP_PIN_GPIO113,
+ MFP_PIN_GPIO114,
+ MFP_PIN_GPIO115,
+ MFP_PIN_GPIO116,
+ MFP_PIN_GPIO117,
+ MFP_PIN_GPIO118,
+ MFP_PIN_GPIO119,
+ MFP_PIN_GPIO120,
+ MFP_PIN_GPIO121,
+ MFP_PIN_GPIO122,
+ MFP_PIN_GPIO123,
+ MFP_PIN_GPIO124,
+ MFP_PIN_GPIO125,
+ MFP_PIN_GPIO126,
+ MFP_PIN_GPIO127,
+
+ MFP_PIN_GPIO128,
+ MFP_PIN_GPIO129,
+ MFP_PIN_GPIO130,
+ MFP_PIN_GPIO131,
+ MFP_PIN_GPIO132,
+ MFP_PIN_GPIO133,
+ MFP_PIN_GPIO134,
+ MFP_PIN_GPIO135,
+ MFP_PIN_GPIO136,
+ MFP_PIN_GPIO137,
+ MFP_PIN_GPIO138,
+ MFP_PIN_GPIO139,
+ MFP_PIN_GPIO140,
+ MFP_PIN_GPIO141,
+ MFP_PIN_GPIO142,
+ MFP_PIN_GPIO143,
+ MFP_PIN_GPIO144,
+ MFP_PIN_GPIO145,
+ MFP_PIN_GPIO146,
+ MFP_PIN_GPIO147,
+ MFP_PIN_GPIO148,
+ MFP_PIN_GPIO149,
+ MFP_PIN_GPIO150,
+ MFP_PIN_GPIO151,
+ MFP_PIN_GPIO152,
+ MFP_PIN_GPIO153,
+ MFP_PIN_GPIO154,
+ MFP_PIN_GPIO155,
+ MFP_PIN_GPIO156,
+ MFP_PIN_GPIO157,
+ MFP_PIN_GPIO158,
+ MFP_PIN_GPIO159,
+ MFP_PIN_GPIO160,
+ MFP_PIN_GPIO161,
+ MFP_PIN_GPIO162,
+ MFP_PIN_GPIO163,
+ MFP_PIN_GPIO164,
+ MFP_PIN_GPIO165,
+ MFP_PIN_GPIO166,
+ MFP_PIN_GPIO167,
+ MFP_PIN_GPIO168,
+ MFP_PIN_GPIO169,
+ MFP_PIN_GPIO170,
+ MFP_PIN_GPIO171,
+ MFP_PIN_GPIO172,
+ MFP_PIN_GPIO173,
+ MFP_PIN_GPIO174,
+ MFP_PIN_GPIO175,
+ MFP_PIN_GPIO176,
+ MFP_PIN_GPIO177,
+ MFP_PIN_GPIO178,
+ MFP_PIN_GPIO179,
+ MFP_PIN_GPIO180,
+ MFP_PIN_GPIO181,
+ MFP_PIN_GPIO182,
+ MFP_PIN_GPIO183,
+ MFP_PIN_GPIO184,
+ MFP_PIN_GPIO185,
+ MFP_PIN_GPIO186,
+ MFP_PIN_GPIO187,
+ MFP_PIN_GPIO188,
+ MFP_PIN_GPIO189,
+ MFP_PIN_GPIO190,
+ MFP_PIN_GPIO191,
+
+ MFP_PIN_GPIO255 = 255,
+
+ MFP_PIN_GPIO0_2,
+ MFP_PIN_GPIO1_2,
+ MFP_PIN_GPIO2_2,
+ MFP_PIN_GPIO3_2,
+ MFP_PIN_GPIO4_2,
+ MFP_PIN_GPIO5_2,
+ MFP_PIN_GPIO6_2,
+ MFP_PIN_GPIO7_2,
+ MFP_PIN_GPIO8_2,
+ MFP_PIN_GPIO9_2,
+ MFP_PIN_GPIO10_2,
+ MFP_PIN_GPIO11_2,
+ MFP_PIN_GPIO12_2,
+ MFP_PIN_GPIO13_2,
+ MFP_PIN_GPIO14_2,
+ MFP_PIN_GPIO15_2,
+ MFP_PIN_GPIO16_2,
+ MFP_PIN_GPIO17_2,
+
+ MFP_PIN_ULPI_STP,
+ MFP_PIN_ULPI_NXT,
+ MFP_PIN_ULPI_DIR,
+
+ MFP_PIN_nXCVREN,
+ MFP_PIN_DF_CLE_nOE,
+ MFP_PIN_DF_nADV1_ALE,
+ MFP_PIN_DF_SCLK_E,
+ MFP_PIN_DF_SCLK_S,
+ MFP_PIN_nBE0,
+ MFP_PIN_nBE1,
+ MFP_PIN_DF_nADV2_ALE,
+ MFP_PIN_DF_INT_RnB,
+ MFP_PIN_DF_nCS0,
+ MFP_PIN_DF_nCS1,
+ MFP_PIN_nLUA,
+ MFP_PIN_nLLA,
+ MFP_PIN_DF_nWE,
+ MFP_PIN_DF_ALE_nWE,
+ MFP_PIN_DF_nRE_nOE,
+ MFP_PIN_DF_ADDR0,
+ MFP_PIN_DF_ADDR1,
+ MFP_PIN_DF_ADDR2,
+ MFP_PIN_DF_ADDR3,
+ MFP_PIN_DF_IO0,
+ MFP_PIN_DF_IO1,
+ MFP_PIN_DF_IO2,
+ MFP_PIN_DF_IO3,
+ MFP_PIN_DF_IO4,
+ MFP_PIN_DF_IO5,
+ MFP_PIN_DF_IO6,
+ MFP_PIN_DF_IO7,
+ MFP_PIN_DF_IO8,
+ MFP_PIN_DF_IO9,
+ MFP_PIN_DF_IO10,
+ MFP_PIN_DF_IO11,
+ MFP_PIN_DF_IO12,
+ MFP_PIN_DF_IO13,
+ MFP_PIN_DF_IO14,
+ MFP_PIN_DF_IO15,
+ MFP_PIN_DF_nCS0_SM_nCS2,
+ MFP_PIN_DF_nCS1_SM_nCS3,
+ MFP_PIN_SM_nCS0,
+ MFP_PIN_SM_nCS1,
+ MFP_PIN_DF_WEn,
+ MFP_PIN_DF_REn,
+ MFP_PIN_DF_CLE_SM_OEn,
+ MFP_PIN_DF_ALE_SM_WEn,
+ MFP_PIN_DF_RDY0,
+ MFP_PIN_DF_RDY1,
+
+ MFP_PIN_SM_SCLK,
+ MFP_PIN_SM_BE0,
+ MFP_PIN_SM_BE1,
+ MFP_PIN_SM_ADV,
+ MFP_PIN_SM_ADVMUX,
+ MFP_PIN_SM_RDY,
+
+ MFP_PIN_MMC1_DAT7,
+ MFP_PIN_MMC1_DAT6,
+ MFP_PIN_MMC1_DAT5,
+ MFP_PIN_MMC1_DAT4,
+ MFP_PIN_MMC1_DAT3,
+ MFP_PIN_MMC1_DAT2,
+ MFP_PIN_MMC1_DAT1,
+ MFP_PIN_MMC1_DAT0,
+ MFP_PIN_MMC1_CMD,
+ MFP_PIN_MMC1_CLK,
+ MFP_PIN_MMC1_CD,
+ MFP_PIN_MMC1_WP,
+
+ /* additional pins on PXA930 */
+ MFP_PIN_GSIM_UIO,
+ MFP_PIN_GSIM_UCLK,
+ MFP_PIN_GSIM_UDET,
+ MFP_PIN_GSIM_nURST,
+ MFP_PIN_PMIC_INT,
+ MFP_PIN_RDY,
+
+ /* additional pins on MMP2 */
+ MFP_PIN_TWSI1_SCL,
+ MFP_PIN_TWSI1_SDA,
+ MFP_PIN_TWSI4_SCL,
+ MFP_PIN_TWSI4_SDA,
+ MFP_PIN_CLK_REQ,
+
+ MFP_PIN_MAX,
+};
+
+/*
+ * a possible MFP configuration is represented by a 32-bit integer
+ *
+ * bit 0.. 9 - MFP Pin Number (1024 Pins Maximum)
+ * bit 10..12 - Alternate Function Selection
+ * bit 13..15 - Drive Strength
+ * bit 16..18 - Low Power Mode State
+ * bit 19..20 - Low Power Mode Edge Detection
+ * bit 21..22 - Run Mode Pull State
+ *
+ * to facilitate the definition, the following macros are provided
+ *
+ * MFP_CFG_DEFAULT - default MFP configuration value, with
+ * alternate function = 0,
+ * drive strength = fast 3mA (MFP_DS03X)
+ * low power mode = default
+ * edge detection = none
+ *
+ * MFP_CFG - default MFPR value with alternate function
+ * MFP_CFG_DRV - default MFPR value with alternate function and
+ * pin drive strength
+ * MFP_CFG_LPM - default MFPR value with alternate function and
+ * low power mode
+ * MFP_CFG_X - default MFPR value with alternate function,
+ * pin drive strength and low power mode
+ */
+
+typedef unsigned long mfp_cfg_t;
+
+#define MFP_PIN(x) ((x) & 0x3ff)
+
+#define MFP_AF0 (0x0 << 10)
+#define MFP_AF1 (0x1 << 10)
+#define MFP_AF2 (0x2 << 10)
+#define MFP_AF3 (0x3 << 10)
+#define MFP_AF4 (0x4 << 10)
+#define MFP_AF5 (0x5 << 10)
+#define MFP_AF6 (0x6 << 10)
+#define MFP_AF7 (0x7 << 10)
+#define MFP_AF_MASK (0x7 << 10)
+#define MFP_AF(x) (((x) >> 10) & 0x7)
+
+#define MFP_DS01X (0x0 << 13)
+#define MFP_DS02X (0x1 << 13)
+#define MFP_DS03X (0x2 << 13)
+#define MFP_DS04X (0x3 << 13)
+#define MFP_DS06X (0x4 << 13)
+#define MFP_DS08X (0x5 << 13)
+#define MFP_DS10X (0x6 << 13)
+#define MFP_DS13X (0x7 << 13)
+#define MFP_DS_MASK (0x7 << 13)
+#define MFP_DS(x) (((x) >> 13) & 0x7)
+
+#define MFP_LPM_DEFAULT (0x0 << 16)
+#define MFP_LPM_DRIVE_LOW (0x1 << 16)
+#define MFP_LPM_DRIVE_HIGH (0x2 << 16)
+#define MFP_LPM_PULL_LOW (0x3 << 16)
+#define MFP_LPM_PULL_HIGH (0x4 << 16)
+#define MFP_LPM_FLOAT (0x5 << 16)
+#define MFP_LPM_INPUT (0x6 << 16)
+#define MFP_LPM_STATE_MASK (0x7 << 16)
+#define MFP_LPM_STATE(x) (((x) >> 16) & 0x7)
+
+#define MFP_LPM_EDGE_NONE (0x0 << 19)
+#define MFP_LPM_EDGE_RISE (0x1 << 19)
+#define MFP_LPM_EDGE_FALL (0x2 << 19)
+#define MFP_LPM_EDGE_BOTH (0x3 << 19)
+#define MFP_LPM_EDGE_MASK (0x3 << 19)
+#define MFP_LPM_EDGE(x) (((x) >> 19) & 0x3)
+
+#define MFP_PULL_NONE (0x0 << 21)
+#define MFP_PULL_LOW (0x1 << 21)
+#define MFP_PULL_HIGH (0x2 << 21)
+#define MFP_PULL_BOTH (0x3 << 21)
+#define MFP_PULL_FLOAT (0x4 << 21)
+#define MFP_PULL_MASK (0x7 << 21)
+#define MFP_PULL(x) (((x) >> 21) & 0x7)
+
+#define MFP_CFG_DEFAULT (MFP_AF0 | MFP_DS03X | MFP_LPM_DEFAULT |\
+ MFP_LPM_EDGE_NONE | MFP_PULL_NONE)
+
+#define MFP_CFG(pin, af) \
+ ((MFP_CFG_DEFAULT & ~MFP_AF_MASK) |\
+ (MFP_PIN(MFP_PIN_##pin) | MFP_##af))
+
+#define MFP_CFG_DRV(pin, af, drv) \
+ ((MFP_CFG_DEFAULT & ~(MFP_AF_MASK | MFP_DS_MASK)) |\
+ (MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_##drv))
+
+#define MFP_CFG_LPM(pin, af, lpm) \
+ ((MFP_CFG_DEFAULT & ~(MFP_AF_MASK | MFP_LPM_STATE_MASK)) |\
+ (MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_LPM_##lpm))
+
+#define MFP_CFG_X(pin, af, drv, lpm) \
+ ((MFP_CFG_DEFAULT & ~(MFP_AF_MASK | MFP_DS_MASK | MFP_LPM_STATE_MASK)) |\
+ (MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_##drv | MFP_LPM_##lpm))
+
+#if defined(CONFIG_PXA3xx) || defined(CONFIG_ARCH_MMP)
+/*
+ * each MFP pin will have a MFPR register, since the offset of the
+ * register varies between processors, the processor specific code
+ * should initialize the pin offsets by mfp_init()
+ *
+ * mfp_init_base() - accepts a virtual base for all MFPR registers and
+ * initialize the MFP table to a default state
+ *
+ * mfp_init_addr() - accepts a table of "mfp_addr_map" structure, which
+ * represents a range of MFP pins from "start" to "end", with the offset
+ * beginning at "offset", to define a single pin, let "end" = -1.
+ *
+ * use
+ *
+ * MFP_ADDR_X() to define a range of pins
+ * MFP_ADDR() to define a single pin
+ * MFP_ADDR_END to signal the end of pin offset definitions
+ */
+struct mfp_addr_map {
+ unsigned int start;
+ unsigned int end;
+ unsigned long offset;
+};
+
+#define MFP_ADDR_X(start, end, offset) \
+ { MFP_PIN_##start, MFP_PIN_##end, offset }
+
+#define MFP_ADDR(pin, offset) \
+ { MFP_PIN_##pin, -1, offset }
+
+#define MFP_ADDR_END { MFP_PIN_INVALID, 0 }
+
+void mfp_init_base(void __iomem *mfpr_base);
+void mfp_init_addr(struct mfp_addr_map *map);
+
+/*
+ * mfp_{read, write}() - for direct read/write access to the MFPR register
+ * mfp_config() - for configuring a group of MFPR registers
+ * mfp_config_lpm() - configuring all low power MFPR registers for suspend
+ * mfp_config_run() - configuring all run time MFPR registers after resume
+ */
+unsigned long mfp_read(int mfp);
+void mfp_write(int mfp, unsigned long mfpr_val);
+void mfp_config(unsigned long *mfp_cfgs, int num);
+void mfp_config_run(void);
+void mfp_config_lpm(void);
+#endif /* CONFIG_PXA3xx || CONFIG_ARCH_MMP */
+
+#endif /* __ASM_PLAT_MFP_H */
diff --git a/include/linux/soc/pxa/smemc.h b/include/linux/soc/pxa/smemc.h
new file mode 100644
index 000000000000..4feb1dded3ec
--- /dev/null
+++ b/include/linux/soc/pxa/smemc.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __PXA_REGS_H
+#define __PXA_REGS_H
+
+#include <linux/types.h>
+
+void pxa_smemc_set_pcmcia_timing(int sock, u32 mcmem, u32 mcatt, u32 mcio);
+void pxa_smemc_set_pcmcia_socket(int nr);
+int pxa2xx_smemc_get_sdram_rows(void);
+unsigned int pxa3xx_smemc_get_memclkdiv(void);
+void __iomem *pxa_smemc_get_mdrefr(void);
+
+/*
+ * Once fully converted to the clock framework, all these functions should be
+ * removed, and replaced with a clk_get(NULL, "core").
+ */
+#ifdef CONFIG_PXA25x
+extern unsigned pxa25x_get_clk_frequency_khz(int);
+#else
+#define pxa25x_get_clk_frequency_khz(x) (0)
+#endif
+
+#ifdef CONFIG_PXA27x
+extern unsigned pxa27x_get_clk_frequency_khz(int);
+#else
+#define pxa27x_get_clk_frequency_khz(x) (0)
+#endif
+
+#endif
diff --git a/include/linux/soc/qcom/apr.h b/include/linux/soc/qcom/apr.h
index 137f9f2ac4c3..a532d1e4b1f4 100644
--- a/include/linux/soc/qcom/apr.h
+++ b/include/linux/soc/qcom/apr.h
@@ -7,8 +7,9 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <dt-bindings/soc/qcom,apr.h>
+#include <dt-bindings/soc/qcom,gpr.h>
-extern struct bus_type aprbus;
+extern const struct bus_type aprbus;
#define APR_HDR_LEN(hdr_len) ((hdr_len)/4)
@@ -75,10 +76,65 @@ struct apr_resp_pkt {
int payload_size;
};
+struct gpr_hdr {
+ uint32_t version:4;
+ uint32_t hdr_size:4;
+ uint32_t pkt_size:24;
+ uint32_t dest_domain:8;
+ uint32_t src_domain:8;
+ uint32_t reserved:16;
+ uint32_t src_port;
+ uint32_t dest_port;
+ uint32_t token;
+ uint32_t opcode;
+} __packed;
+
+struct gpr_pkt {
+ struct gpr_hdr hdr;
+ uint32_t payload[];
+};
+
+struct gpr_resp_pkt {
+ struct gpr_hdr hdr;
+ void *payload;
+ int payload_size;
+};
+
+#define GPR_HDR_SIZE sizeof(struct gpr_hdr)
+#define GPR_PKT_VER 0x0
+#define GPR_PKT_HEADER_WORD_SIZE ((sizeof(struct gpr_pkt) + 3) >> 2)
+#define GPR_PKT_HEADER_BYTE_SIZE (GPR_PKT_HEADER_WORD_SIZE << 2)
+
+#define GPR_BASIC_RSP_RESULT 0x02001005
+
+struct gpr_ibasic_rsp_result_t {
+ uint32_t opcode;
+ uint32_t status;
+};
+
+#define GPR_BASIC_EVT_ACCEPTED 0x02001006
+
+struct gpr_ibasic_rsp_accepted_t {
+ uint32_t opcode;
+};
+
/* Bits 0 to 15 -- Minor version, Bits 16 to 31 -- Major version */
#define APR_SVC_MAJOR_VERSION(v) ((v >> 16) & 0xFF)
#define APR_SVC_MINOR_VERSION(v) (v & 0xFF)
+typedef int (*gpr_port_cb) (struct gpr_resp_pkt *d, void *priv, int op);
+struct packet_router;
+struct pkt_router_svc {
+ struct device *dev;
+ gpr_port_cb callback;
+ struct packet_router *pr;
+ spinlock_t lock;
+ int id;
+ void *priv;
+};
+
+typedef struct pkt_router_svc gpr_port_t;
+
struct apr_device {
struct device dev;
uint16_t svc_id;
@@ -86,22 +142,27 @@ struct apr_device {
uint32_t version;
char name[APR_NAME_SIZE];
const char *service_path;
- spinlock_t lock;
+ struct pkt_router_svc svc;
struct list_head node;
};
+typedef struct apr_device gpr_device_t;
+
#define to_apr_device(d) container_of(d, struct apr_device, dev)
+#define svc_to_apr_device(d) container_of(d, struct apr_device, svc)
struct apr_driver {
int (*probe)(struct apr_device *sl);
- int (*remove)(struct apr_device *sl);
+ void (*remove)(struct apr_device *sl);
int (*callback)(struct apr_device *a,
struct apr_resp_pkt *d);
+ int (*gpr_callback)(struct gpr_resp_pkt *d, void *data, int op);
struct device_driver driver;
const struct apr_device_id *id_table;
};
-#define to_apr_driver(d) container_of(d, struct apr_driver, driver)
+typedef struct apr_driver gpr_driver_t;
+#define to_apr_driver(d) container_of_const(d, struct apr_driver, driver)
/*
* use a macro to avoid include chaining to get THIS_MODULE
@@ -123,7 +184,14 @@ void apr_driver_unregister(struct apr_driver *drv);
#define module_apr_driver(__apr_driver) \
module_driver(__apr_driver, apr_driver_register, \
apr_driver_unregister)
+#define module_gpr_driver(__gpr_driver) module_apr_driver(__gpr_driver)
int apr_send_pkt(struct apr_device *adev, struct apr_pkt *pkt);
+gpr_port_t *gpr_alloc_port(gpr_device_t *gdev, struct device *dev,
+ gpr_port_cb cb, void *priv);
+void gpr_free_port(gpr_port_t *port);
+int gpr_send_port_pkt(gpr_port_t *port, struct gpr_pkt *pkt);
+int gpr_send_pkt(gpr_device_t *gdev, struct gpr_pkt *pkt);
+
#endif /* __QCOM_APR_H_ */
diff --git a/include/linux/qcom-geni-se.h b/include/linux/soc/qcom/geni-se.h
index 7c811eebcaab..0a984e2579fe 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/soc/qcom/geni-se.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef _LINUX_QCOM_GENI_SE
@@ -8,11 +9,24 @@
#include <linux/interconnect.h>
-/* Transfer mode supported by GENI Serial Engines */
+/**
+ * enum geni_se_xfer_mode: Transfer modes supported by Serial Engines
+ *
+ * @GENI_SE_INVALID: Invalid mode
+ * @GENI_SE_FIFO: FIFO mode. Data is transferred with SE FIFO
+ * by programmed IO method
+ * @GENI_SE_DMA: Serial Engine DMA mode. Data is transferred
+ * with SE by DMAengine internal to SE
+ * @GENI_GPI_DMA: GPI DMA mode. Data is transferred using a DMAengine
+ * configured by a firmware residing on a GSI engine. This DMA name is
+ * interchangeably used as GSI or GPI which seem to imply the same DMAengine
+ */
+
enum geni_se_xfer_mode {
GENI_SE_INVALID,
GENI_SE_FIFO,
GENI_SE_DMA,
+ GENI_GPI_DMA,
};
/* Protocols supported by GENI Serial Engines */
@@ -22,6 +36,8 @@ enum geni_se_protocol_type {
GENI_SE_UART,
GENI_SE_I2C,
GENI_SE_I3C,
+ GENI_SE_SPI_SLAVE,
+ GENI_SE_INVALID_PROTO = 255,
};
struct geni_wrapper;
@@ -60,22 +76,29 @@ struct geni_se {
/* Common SE registers */
#define GENI_FORCE_DEFAULT_REG 0x20
+#define GENI_OUTPUT_CTRL 0x24
#define SE_GENI_STATUS 0x40
#define GENI_SER_M_CLK_CFG 0x48
#define GENI_SER_S_CLK_CFG 0x4c
+#define GENI_IF_DISABLE_RO 0x64
#define GENI_FW_REVISION_RO 0x68
#define SE_GENI_CLK_SEL 0x7c
+#define SE_GENI_CFG_SEQ_START 0x84
#define SE_GENI_DMA_MODE_EN 0x258
#define SE_GENI_M_CMD0 0x600
#define SE_GENI_M_CMD_CTRL_REG 0x604
#define SE_GENI_M_IRQ_STATUS 0x610
#define SE_GENI_M_IRQ_EN 0x614
#define SE_GENI_M_IRQ_CLEAR 0x618
+#define SE_GENI_M_IRQ_EN_SET 0x61c
+#define SE_GENI_M_IRQ_EN_CLEAR 0x620
#define SE_GENI_S_CMD0 0x630
#define SE_GENI_S_CMD_CTRL_REG 0x634
#define SE_GENI_S_IRQ_STATUS 0x640
#define SE_GENI_S_IRQ_EN 0x644
#define SE_GENI_S_IRQ_CLEAR 0x648
+#define SE_GENI_S_IRQ_EN_SET 0x64c
+#define SE_GENI_S_IRQ_EN_CLEAR 0x650
#define SE_GENI_TX_FIFOn 0x700
#define SE_GENI_RX_FIFOn 0x780
#define SE_GENI_TX_FIFO_STATUS 0x800
@@ -84,11 +107,14 @@ struct geni_se {
#define SE_GENI_RX_WATERMARK_REG 0x810
#define SE_GENI_RX_RFR_WATERMARK_REG 0x814
#define SE_GENI_IOS 0x908
+#define SE_GENI_M_GP_LENGTH 0x910
+#define SE_GENI_S_GP_LENGTH 0x914
#define SE_DMA_TX_IRQ_STAT 0xc40
#define SE_DMA_TX_IRQ_CLR 0xc44
#define SE_DMA_TX_FSM_RST 0xc58
#define SE_DMA_RX_IRQ_STAT 0xd40
#define SE_DMA_RX_IRQ_CLR 0xd44
+#define SE_DMA_RX_LEN_IN 0xd54
#define SE_DMA_RX_FSM_RST 0xd58
#define SE_HW_PARAM_0 0xe24
#define SE_HW_PARAM_1 0xe28
@@ -96,6 +122,9 @@ struct geni_se {
/* GENI_FORCE_DEFAULT_REG fields */
#define FORCE_DEFAULT BIT(0)
+/* GENI_OUTPUT_CTRL fields */
+#define GENI_IO_MUX_0_EN BIT(0)
+
/* GENI_STATUS fields */
#define M_GENI_CMD_ACTIVE BIT(0)
#define S_GENI_CMD_ACTIVE BIT(12)
@@ -105,6 +134,9 @@ struct geni_se {
#define CLK_DIV_MSK GENMASK(15, 4)
#define CLK_DIV_SHFT 4
+/* GENI_IF_DISABLE_RO fields */
+#define FIFO_IF_DISABLE (BIT(0))
+
/* GENI_FW_REVISION_RO fields */
#define FW_REV_PROTOCOL_MSK GENMASK(15, 8)
#define FW_REV_PROTOCOL_SHFT 8
@@ -112,6 +144,9 @@ struct geni_se {
/* GENI_CLK_SEL fields */
#define CLK_SEL_MSK GENMASK(2, 0)
+/* SE_GENI_CFG_SEQ_START fields */
+#define START_TRIGGER BIT(0)
+
/* SE_GENI_DMA_MODE_EN */
#define GENI_DMA_MODE_EN BIT(0)
@@ -151,6 +186,7 @@ struct geni_se {
#define M_GP_IRQ_3_EN BIT(12)
#define M_GP_IRQ_4_EN BIT(13)
#define M_GP_IRQ_5_EN BIT(14)
+#define M_TX_FIFO_NOT_EMPTY_EN BIT(21)
#define M_IO_DATA_DEASSERT_EN BIT(22)
#define M_IO_DATA_ASSERT_EN BIT(23)
#define M_RX_FIFO_RD_ERR_EN BIT(24)
@@ -206,6 +242,9 @@ struct geni_se {
#define IO2_DATA_IN BIT(1)
#define RX_DATA_IN BIT(0)
+/* SE_GENI_M_GP_LENGTH and SE_GENI_S_GP_LENGTH fields */
+#define GP_LENGTH GENMASK(31, 0)
+
/* SE_DMA_TX_IRQ_STAT Register fields */
#define TX_DMA_DONE BIT(0)
#define TX_EOT BIT(1)
@@ -218,19 +257,31 @@ struct geni_se {
#define RX_SBE BIT(2)
#define RX_RESET_DONE BIT(3)
#define RX_FLUSH_DONE BIT(4)
+#define RX_DMA_PARITY_ERR BIT(5)
+#define RX_DMA_BREAK GENMASK(8, 7)
#define RX_GENI_GP_IRQ GENMASK(10, 5)
-#define RX_GENI_CANCEL_IRQ BIT(11)
#define RX_GENI_GP_IRQ_EXT GENMASK(13, 12)
+#define RX_GENI_CANCEL_IRQ BIT(14)
/* SE_HW_PARAM_0 fields */
#define TX_FIFO_WIDTH_MSK GENMASK(29, 24)
#define TX_FIFO_WIDTH_SHFT 24
+/*
+ * For QUP HW Version >= 3.10 Tx fifo depth support is increased
+ * to 256bytes and corresponding bits are 16 to 23
+ */
+#define TX_FIFO_DEPTH_MSK_256_BYTES GENMASK(23, 16)
#define TX_FIFO_DEPTH_MSK GENMASK(21, 16)
#define TX_FIFO_DEPTH_SHFT 16
/* SE_HW_PARAM_1 fields */
#define RX_FIFO_WIDTH_MSK GENMASK(29, 24)
#define RX_FIFO_WIDTH_SHFT 24
+/*
+ * For QUP HW Version >= 3.10 Rx fifo depth support is increased
+ * to 256bytes and corresponding bits are 16 to 23
+ */
+#define RX_FIFO_DEPTH_MSK_256_BYTES GENMASK(23, 16)
#define RX_FIFO_DEPTH_MSK GENMASK(21, 16)
#define RX_FIFO_DEPTH_SHFT 16
@@ -371,7 +422,8 @@ static inline void geni_se_abort_s_cmd(struct geni_se *se)
/**
* geni_se_get_tx_fifo_depth() - Get the TX fifo depth of the serial engine
- * @se: Pointer to the concerned serial engine.
+ * based on QUP HW version
+ * @se: Pointer to the concerned serial engine.
*
* This function is used to get the depth i.e. number of elements in the
* TX fifo of the serial engine.
@@ -380,11 +432,20 @@ static inline void geni_se_abort_s_cmd(struct geni_se *se)
*/
static inline u32 geni_se_get_tx_fifo_depth(struct geni_se *se)
{
- u32 val;
+ u32 val, hw_version, hw_major, hw_minor, tx_fifo_depth_mask;
+
+ hw_version = geni_se_get_qup_hw_version(se);
+ hw_major = GENI_SE_VERSION_MAJOR(hw_version);
+ hw_minor = GENI_SE_VERSION_MINOR(hw_version);
+
+ if ((hw_major == 3 && hw_minor >= 10) || hw_major > 3)
+ tx_fifo_depth_mask = TX_FIFO_DEPTH_MSK_256_BYTES;
+ else
+ tx_fifo_depth_mask = TX_FIFO_DEPTH_MSK;
val = readl_relaxed(se->base + SE_HW_PARAM_0);
- return (val & TX_FIFO_DEPTH_MSK) >> TX_FIFO_DEPTH_SHFT;
+ return (val & tx_fifo_depth_mask) >> TX_FIFO_DEPTH_SHFT;
}
/**
@@ -407,7 +468,8 @@ static inline u32 geni_se_get_tx_fifo_width(struct geni_se *se)
/**
* geni_se_get_rx_fifo_depth() - Get the RX fifo depth of the serial engine
- * @se: Pointer to the concerned serial engine.
+ * based on QUP HW version
+ * @se: Pointer to the concerned serial engine.
*
* This function is used to get the depth i.e. number of elements in the
* RX fifo of the serial engine.
@@ -416,11 +478,20 @@ static inline u32 geni_se_get_tx_fifo_width(struct geni_se *se)
*/
static inline u32 geni_se_get_rx_fifo_depth(struct geni_se *se)
{
- u32 val;
+ u32 val, hw_version, hw_major, hw_minor, rx_fifo_depth_mask;
+
+ hw_version = geni_se_get_qup_hw_version(se);
+ hw_major = GENI_SE_VERSION_MAJOR(hw_version);
+ hw_minor = GENI_SE_VERSION_MINOR(hw_version);
+
+ if ((hw_major == 3 && hw_minor >= 10) || hw_major > 3)
+ rx_fifo_depth_mask = RX_FIFO_DEPTH_MSK_256_BYTES;
+ else
+ rx_fifo_depth_mask = RX_FIFO_DEPTH_MSK;
val = readl_relaxed(se->base + SE_HW_PARAM_1);
- return (val & RX_FIFO_DEPTH_MSK) >> RX_FIFO_DEPTH_SHFT;
+ return (val & rx_fifo_depth_mask) >> RX_FIFO_DEPTH_SHFT;
}
void geni_se_init(struct geni_se *se, u32 rx_wm, u32 rx_rfr);
@@ -440,9 +511,13 @@ int geni_se_clk_freq_match(struct geni_se *se, unsigned long req_freq,
unsigned int *index, unsigned long *res_freq,
bool exact);
+void geni_se_tx_init_dma(struct geni_se *se, dma_addr_t iova, size_t len);
+
int geni_se_tx_dma_prep(struct geni_se *se, void *buf, size_t len,
dma_addr_t *iova);
+void geni_se_rx_init_dma(struct geni_se *se, dma_addr_t iova, size_t len);
+
int geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len,
dma_addr_t *iova);
@@ -458,5 +533,7 @@ void geni_icc_set_tag(struct geni_se *se, u32 tag);
int geni_icc_enable(struct geni_se *se);
int geni_icc_disable(struct geni_se *se);
+
+int geni_load_se_firmware(struct geni_se *se, enum geni_se_protocol_type protocol);
#endif
#endif
diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h
index 437c9df13229..0287f9182c4d 100644
--- a/include/linux/soc/qcom/llcc-qcom.h
+++ b/include/linux/soc/qcom/llcc-qcom.h
@@ -24,15 +24,64 @@
#define LLCC_CMPTDMA 15
#define LLCC_DISP 16
#define LLCC_VIDFW 17
+#define LLCC_CAMFW 18
#define LLCC_MDMHPFX 20
#define LLCC_MDMPNG 21
#define LLCC_AUDHW 22
#define LLCC_NPU 23
#define LLCC_WLHW 24
+#define LLCC_PIMEM 25
+#define LLCC_ECC 26
#define LLCC_CVP 28
#define LLCC_MODPE 29
#define LLCC_APTCM 30
#define LLCC_WRCACHE 31
+#define LLCC_CVPFW 32
+#define LLCC_CPUSS1 33
+#define LLCC_CAMEXP0 34
+#define LLCC_CPUMTE 35
+#define LLCC_CPUHWT 36
+#define LLCC_MDMCLAD2 37
+#define LLCC_CAMEXP1 38
+#define LLCC_CMPTHCP 39
+#define LLCC_LCPDARE 40
+#define LLCC_AENPU 45
+#define LLCC_ISLAND1 46
+#define LLCC_ISLAND2 47
+#define LLCC_ISLAND3 48
+#define LLCC_ISLAND4 49
+#define LLCC_CAMEXP2 50
+#define LLCC_CAMEXP3 51
+#define LLCC_CAMEXP4 52
+#define LLCC_DISP_WB 53
+#define LLCC_DISP_1 54
+#define LLCC_VIEYE 57
+#define LLCC_VIDPTH 58
+#define LLCC_GPUMV 59
+#define LLCC_EVA_LEFT 60
+#define LLCC_EVA_RIGHT 61
+#define LLCC_EVAGAIN 62
+#define LLCC_VIPTH 63
+#define LLCC_VIDVSP 64
+#define LLCC_DISP_LEFT 65
+#define LLCC_DISP_RIGHT 66
+#define LLCC_EVCS_LEFT 67
+#define LLCC_EVCS_RIGHT 68
+#define LLCC_SPAD 69
+#define LLCC_VIDDEC 70
+#define LLCC_CAMOFE 71
+#define LLCC_CAMRTIP 72
+#define LLCC_CAMSRTIP 73
+#define LLCC_CAMRTRF 74
+#define LLCC_CAMSRTRF 75
+#define LLCC_VIDEO_APV 83
+#define LLCC_COMPUTE1 87
+#define LLCC_CPUSS_OPP 88
+#define LLCC_CPUSSMPAM 89
+#define LLCC_CAM_IPE_STROV 92
+#define LLCC_CAM_OFE_STROV 93
+#define LLCC_CPUSS_HEU 94
+#define LLCC_MDM_PNG_FIXED 100
/**
* struct llcc_slice_desc - Cache slice descriptor
@@ -47,9 +96,6 @@ struct llcc_slice_desc {
/**
* struct llcc_edac_reg_data - llcc edac registers data for each error type
* @name: Name of the error
- * @synd_reg: Syndrome register address
- * @count_status_reg: Status register address to read the error count
- * @ways_status_reg: Status register address to read the error ways
* @reg_cnt: Number of registers
* @count_mask: Mask value to get the error count
* @ways_mask: Mask value to get the error ways
@@ -58,9 +104,6 @@ struct llcc_slice_desc {
*/
struct llcc_edac_reg_data {
char *name;
- u64 synd_reg;
- u64 count_status_reg;
- u64 ways_status_reg;
u32 reg_cnt;
u32 count_mask;
u32 ways_mask;
@@ -68,32 +111,64 @@ struct llcc_edac_reg_data {
u8 ways_shift;
};
+struct llcc_edac_reg_offset {
+ /* LLCC TRP registers */
+ u32 trp_ecc_error_status0;
+ u32 trp_ecc_error_status1;
+ u32 trp_ecc_sb_err_syn0;
+ u32 trp_ecc_db_err_syn0;
+ u32 trp_ecc_error_cntr_clear;
+ u32 trp_interrupt_0_status;
+ u32 trp_interrupt_0_clear;
+ u32 trp_interrupt_0_enable;
+
+ /* LLCC Common registers */
+ u32 cmn_status0;
+ u32 cmn_interrupt_0_enable;
+ u32 cmn_interrupt_2_enable;
+
+ /* LLCC DRP registers */
+ u32 drp_ecc_error_cfg;
+ u32 drp_ecc_error_cntr_clear;
+ u32 drp_interrupt_status;
+ u32 drp_interrupt_clear;
+ u32 drp_interrupt_enable;
+ u32 drp_ecc_error_status0;
+ u32 drp_ecc_error_status1;
+ u32 drp_ecc_sb_err_syn0;
+ u32 drp_ecc_db_err_syn0;
+};
+
/**
* struct llcc_drv_data - Data associated with the llcc driver
- * @regmap: regmap associated with the llcc device
- * @bcast_regmap: regmap associated with llcc broadcast offset
+ * @regmaps: regmaps associated with the llcc device
+ * @bcast_regmap: regmap associated with llcc broadcast OR offset
+ * @bcast_and_regmap: regmap associated with llcc broadcast AND offset
* @cfg: pointer to the data structure for slice configuration
+ * @edac_reg_offset: Offset of the LLCC EDAC registers
* @lock: mutex associated with each slice
* @cfg_size: size of the config data table
* @max_slices: max slices as read from device tree
* @num_banks: Number of llcc banks
* @bitmap: Bit map to track the active slice ids
- * @offsets: Pointer to the bank offsets array
* @ecc_irq: interrupt for llcc cache error detection and reporting
- * @major_version: Indicates the LLCC major version
+ * @ecc_irq_configured: 'True' if firmware has already configured the irq propagation
+ * @version: Indicates the LLCC version
*/
struct llcc_drv_data {
- struct regmap *regmap;
+ struct regmap **regmaps;
struct regmap *bcast_regmap;
+ struct regmap *bcast_and_regmap;
const struct llcc_slice_config *cfg;
+ const struct llcc_edac_reg_offset *edac_reg_offset;
struct mutex lock;
u32 cfg_size;
u32 max_slices;
u32 num_banks;
unsigned long *bitmap;
- u32 *offsets;
int ecc_irq;
- u32 major_version;
+ bool ecc_irq_configured;
+ u32 version;
};
#if IS_ENABLED(CONFIG_QCOM_LLCC)
diff --git a/include/linux/soc/qcom/mdt_loader.h b/include/linux/soc/qcom/mdt_loader.h
index afd47217996b..8ea8230579a2 100644
--- a/include/linux/soc/qcom/mdt_loader.h
+++ b/include/linux/soc/qcom/mdt_loader.h
@@ -10,20 +10,25 @@
struct device;
struct firmware;
+struct qcom_scm_pas_metadata;
#if IS_ENABLED(CONFIG_QCOM_MDT_LOADER)
ssize_t qcom_mdt_get_size(const struct firmware *fw);
+int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw,
+ const char *fw_name, int pas_id, phys_addr_t mem_phys,
+ struct qcom_scm_pas_metadata *pas_metadata_ctx);
int qcom_mdt_load(struct device *dev, const struct firmware *fw,
const char *fw_name, int pas_id, void *mem_region,
phys_addr_t mem_phys, size_t mem_size,
phys_addr_t *reloc_base);
int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw,
- const char *fw_name, int pas_id, void *mem_region,
+ const char *fw_name, void *mem_region,
phys_addr_t mem_phys, size_t mem_size,
phys_addr_t *reloc_base);
-void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len);
+void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len,
+ const char *fw_name, struct device *dev);
#else /* !IS_ENABLED(CONFIG_QCOM_MDT_LOADER) */
@@ -32,6 +37,13 @@ static inline ssize_t qcom_mdt_get_size(const struct firmware *fw)
return -ENODEV;
}
+static inline int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw,
+ const char *fw_name, int pas_id, phys_addr_t mem_phys,
+ struct qcom_scm_pas_metadata *pas_metadata_ctx)
+{
+ return -ENODEV;
+}
+
static inline int qcom_mdt_load(struct device *dev, const struct firmware *fw,
const char *fw_name, int pas_id,
void *mem_region, phys_addr_t mem_phys,
@@ -42,16 +54,16 @@ static inline int qcom_mdt_load(struct device *dev, const struct firmware *fw,
static inline int qcom_mdt_load_no_init(struct device *dev,
const struct firmware *fw,
- const char *fw_name, int pas_id,
- void *mem_region, phys_addr_t mem_phys,
- size_t mem_size,
+ const char *fw_name, void *mem_region,
+ phys_addr_t mem_phys, size_t mem_size,
phys_addr_t *reloc_base)
{
return -ENODEV;
}
static inline void *qcom_mdt_read_metadata(const struct firmware *fw,
- size_t *data_len)
+ size_t *data_len, const char *fw_name,
+ struct device *dev)
{
return ERR_PTR(-ENODEV);
}
diff --git a/include/linux/soc/qcom/pmic_glink.h b/include/linux/soc/qcom/pmic_glink.h
new file mode 100644
index 000000000000..7cddf1027752
--- /dev/null
+++ b/include/linux/soc/qcom/pmic_glink.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022, Linaro Ltd
+ */
+#ifndef __SOC_QCOM_PMIC_GLINK_H__
+#define __SOC_QCOM_PMIC_GLINK_H__
+
+struct pmic_glink;
+struct pmic_glink_client;
+
+#define PMIC_GLINK_OWNER_BATTMGR 32778
+#define PMIC_GLINK_OWNER_USBC 32779
+#define PMIC_GLINK_OWNER_USBC_PAN 32780
+
+#define PMIC_GLINK_REQ_RESP 1
+#define PMIC_GLINK_NOTIFY 2
+
+struct pmic_glink_hdr {
+ __le32 owner;
+ __le32 type;
+ __le32 opcode;
+};
+
+int pmic_glink_send(struct pmic_glink_client *client, void *data, size_t len);
+
+struct pmic_glink_client *devm_pmic_glink_client_alloc(struct device *dev,
+ unsigned int id,
+ void (*cb)(const void *, size_t, void *),
+ void (*pdr)(void *, int),
+ void *priv);
+void pmic_glink_client_register(struct pmic_glink_client *client);
+
+#endif
diff --git a/include/linux/soc/qcom/qcom-pbs.h b/include/linux/soc/qcom/qcom-pbs.h
new file mode 100644
index 000000000000..8a46209ccf13
--- /dev/null
+++ b/include/linux/soc/qcom/qcom-pbs.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _QCOM_PBS_H
+#define _QCOM_PBS_H
+
+#include <linux/errno.h>
+#include <linux/types.h>
+
+struct device_node;
+struct pbs_dev;
+
+#if IS_ENABLED(CONFIG_QCOM_PBS)
+int qcom_pbs_trigger_event(struct pbs_dev *pbs, u8 bitmap);
+struct pbs_dev *get_pbs_client_device(struct device *client_dev);
+#else
+static inline int qcom_pbs_trigger_event(struct pbs_dev *pbs, u8 bitmap)
+{
+ return -ENODEV;
+}
+
+static inline struct pbs_dev *get_pbs_client_device(struct device *client_dev)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif
+
+#endif
diff --git a/include/linux/soc/qcom/qcom_aoss.h b/include/linux/soc/qcom/qcom_aoss.h
new file mode 100644
index 000000000000..7361ca028752
--- /dev/null
+++ b/include/linux/soc/qcom/qcom_aoss.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __QCOM_AOSS_H__
+#define __QCOM_AOSS_H__
+
+#include <linux/err.h>
+#include <linux/device.h>
+
+struct qmp;
+
+#if IS_ENABLED(CONFIG_QCOM_AOSS_QMP)
+
+int qmp_send(struct qmp *qmp, const char *fmt, ...);
+struct qmp *qmp_get(struct device *dev);
+void qmp_put(struct qmp *qmp);
+
+#else
+
+static inline int qmp_send(struct qmp *qmp, const char *fmt, ...)
+{
+ return -ENODEV;
+}
+
+static inline struct qmp *qmp_get(struct device *dev)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void qmp_put(struct qmp *qmp)
+{
+}
+
+#endif
+
+#endif
diff --git a/include/linux/soc/qcom/qmi.h b/include/linux/soc/qcom/qmi.h
index b1f80e756d2a..291cdc7ef49c 100644
--- a/include/linux/soc/qcom/qmi.h
+++ b/include/linux/soc/qcom/qmi.h
@@ -24,9 +24,9 @@ struct socket;
*/
struct qmi_header {
u8 type;
- u16 txn_id;
- u16 msg_id;
- u16 msg_len;
+ __le16 txn_id;
+ __le16 msg_id;
+ __le16 msg_len;
} __packed;
#define QMI_REQUEST 0
@@ -75,7 +75,7 @@ struct qmi_elem_info {
enum qmi_array_type array_type;
u8 tlv_type;
u32 offset;
- struct qmi_elem_info *ei_array;
+ const struct qmi_elem_info *ei_array;
};
#define QMI_RESULT_SUCCESS_V01 0
@@ -102,7 +102,7 @@ struct qmi_response_type_v01 {
u16 error;
};
-extern struct qmi_elem_info qmi_response_type_v01_ei[];
+extern const struct qmi_elem_info qmi_response_type_v01_ei[];
/**
* struct qmi_service - context to track lookup-results
@@ -173,7 +173,7 @@ struct qmi_txn {
struct completion completion;
int result;
- struct qmi_elem_info *ei;
+ const struct qmi_elem_info *ei;
void *dest;
};
@@ -189,7 +189,7 @@ struct qmi_msg_handler {
unsigned int type;
unsigned int msg_id;
- struct qmi_elem_info *ei;
+ const struct qmi_elem_info *ei;
size_t decoded_size;
void (*fn)(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
@@ -249,23 +249,23 @@ void qmi_handle_release(struct qmi_handle *qmi);
ssize_t qmi_send_request(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
struct qmi_txn *txn, int msg_id, size_t len,
- struct qmi_elem_info *ei, const void *c_struct);
+ const struct qmi_elem_info *ei, const void *c_struct);
ssize_t qmi_send_response(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
struct qmi_txn *txn, int msg_id, size_t len,
- struct qmi_elem_info *ei, const void *c_struct);
+ const struct qmi_elem_info *ei, const void *c_struct);
ssize_t qmi_send_indication(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
- int msg_id, size_t len, struct qmi_elem_info *ei,
+ int msg_id, size_t len, const struct qmi_elem_info *ei,
const void *c_struct);
void *qmi_encode_message(int type, unsigned int msg_id, size_t *len,
- unsigned int txn_id, struct qmi_elem_info *ei,
+ unsigned int txn_id, const struct qmi_elem_info *ei,
const void *c_struct);
int qmi_decode_message(const void *buf, size_t len,
- struct qmi_elem_info *ei, void *c_struct);
+ const struct qmi_elem_info *ei, void *c_struct);
int qmi_txn_init(struct qmi_handle *qmi, struct qmi_txn *txn,
- struct qmi_elem_info *ei, void *c_struct);
+ const struct qmi_elem_info *ei, void *c_struct);
int qmi_txn_wait(struct qmi_txn *txn, unsigned long timeout);
void qmi_txn_cancel(struct qmi_txn *txn);
diff --git a/include/linux/soc/qcom/smd-rpm.h b/include/linux/soc/qcom/smd-rpm.h
index f2645ec52520..8190878645f9 100644
--- a/include/linux/soc/qcom/smd-rpm.h
+++ b/include/linux/soc/qcom/smd-rpm.h
@@ -2,10 +2,13 @@
#ifndef __QCOM_SMD_RPM_H__
#define __QCOM_SMD_RPM_H__
+#include <linux/types.h>
+
struct qcom_smd_rpm;
-#define QCOM_SMD_RPM_ACTIVE_STATE 0
-#define QCOM_SMD_RPM_SLEEP_STATE 1
+#define QCOM_SMD_RPM_ACTIVE_STATE 0
+#define QCOM_SMD_RPM_SLEEP_STATE 1
+#define QCOM_SMD_RPM_STATE_NUM 2
/*
* Constants used for addressing resources in the RPM.
@@ -19,6 +22,7 @@ struct qcom_smd_rpm;
#define QCOM_SMD_RPM_CLK_BUF_A 0x616B6C63
#define QCOM_SMD_RPM_LDOA 0x616f646c
#define QCOM_SMD_RPM_LDOB 0x626F646C
+#define QCOM_SMD_RPM_LDOE 0x656f646c
#define QCOM_SMD_RPM_RWCX 0x78637772
#define QCOM_SMD_RPM_RWMX 0x786d7772
#define QCOM_SMD_RPM_RWLC 0x636c7772
@@ -29,14 +33,32 @@ struct qcom_smd_rpm;
#define QCOM_SMD_RPM_NCPB 0x6270636E
#define QCOM_SMD_RPM_OCMEM_PWR 0x706d636f
#define QCOM_SMD_RPM_QPIC_CLK 0x63697071
+#define QCOM_SMD_RPM_QUP_CLK 0x707571
#define QCOM_SMD_RPM_SMPA 0x61706d73
#define QCOM_SMD_RPM_SMPB 0x62706d73
+#define QCOM_SMD_RPM_SMPE 0x65706d73
#define QCOM_SMD_RPM_SPDM 0x63707362
#define QCOM_SMD_RPM_VSA 0x00617376
#define QCOM_SMD_RPM_MMAXI_CLK 0x69786d6d
#define QCOM_SMD_RPM_IPA_CLK 0x617069
#define QCOM_SMD_RPM_CE_CLK 0x6563
#define QCOM_SMD_RPM_AGGR_CLK 0x72676761
+#define QCOM_SMD_RPM_HWKM_CLK 0x6d6b7768
+#define QCOM_SMD_RPM_PKA_CLK 0x616b70
+#define QCOM_SMD_RPM_MCFG_CLK 0x6766636d
+
+#define QCOM_RPM_KEY_SOFTWARE_ENABLE 0x6e657773
+#define QCOM_RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY 0x62636370
+#define QCOM_RPM_SMD_KEY_RATE 0x007a484b
+#define QCOM_RPM_SMD_KEY_ENABLE 0x62616e45
+#define QCOM_RPM_SMD_KEY_STATE 0x54415453
+#define QCOM_RPM_SCALING_ENABLE_ID 0x2
+
+struct clk_smd_rpm_req {
+ __le32 key;
+ __le32 nbytes;
+ __le32 value;
+};
int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
int state,
diff --git a/include/linux/soc/qcom/smem.h b/include/linux/soc/qcom/smem.h
index 86e1b358688a..f946e3beca21 100644
--- a/include/linux/soc/qcom/smem.h
+++ b/include/linux/soc/qcom/smem.h
@@ -4,6 +4,7 @@
#define QCOM_SMEM_HOST_ANY -1
+bool qcom_smem_is_available(void);
int qcom_smem_alloc(unsigned host, unsigned item, size_t size);
void *qcom_smem_get(unsigned host, unsigned item, size_t *size);
@@ -11,4 +12,9 @@ int qcom_smem_get_free_space(unsigned host);
phys_addr_t qcom_smem_virt_to_phys(void *p);
+int qcom_smem_get_soc_id(u32 *id);
+int qcom_smem_get_feature_code(u32 *code);
+
+int qcom_smem_bust_hwspin_lock_by_host(unsigned int host);
+
#endif
diff --git a/include/linux/soc/qcom/smem_state.h b/include/linux/soc/qcom/smem_state.h
index 63ad8cddad14..652c0158baac 100644
--- a/include/linux/soc/qcom/smem_state.h
+++ b/include/linux/soc/qcom/smem_state.h
@@ -14,6 +14,7 @@ struct qcom_smem_state_ops {
#ifdef CONFIG_QCOM_SMEM_STATE
struct qcom_smem_state *qcom_smem_state_get(struct device *dev, const char *con_id, unsigned *bit);
+struct qcom_smem_state *devm_qcom_smem_state_get(struct device *dev, const char *con_id, unsigned *bit);
void qcom_smem_state_put(struct qcom_smem_state *);
int qcom_smem_state_update_bits(struct qcom_smem_state *state, u32 mask, u32 value);
@@ -29,6 +30,13 @@ static inline struct qcom_smem_state *qcom_smem_state_get(struct device *dev,
return ERR_PTR(-EINVAL);
}
+static inline struct qcom_smem_state *devm_qcom_smem_state_get(struct device *dev,
+ const char *con_id,
+ unsigned *bit)
+{
+ return ERR_PTR(-EINVAL);
+}
+
static inline void qcom_smem_state_put(struct qcom_smem_state *state)
{
}
diff --git a/include/linux/soc/qcom/socinfo.h b/include/linux/soc/qcom/socinfo.h
new file mode 100644
index 000000000000..ba823a0013c5
--- /dev/null
+++ b/include/linux/soc/qcom/socinfo.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __QCOM_SOCINFO_H__
+#define __QCOM_SOCINFO_H__
+
+#include <linux/types.h>
+
+/*
+ * SMEM item id, used to acquire handles to respective
+ * SMEM region.
+ */
+#define SMEM_HW_SW_BUILD_ID 137
+
+#define SMEM_SOCINFO_BUILD_ID_LENGTH 32
+#define SMEM_SOCINFO_CHIP_ID_LENGTH 32
+
+/*
+ * SoC version type with major number in the upper 16 bits and minor
+ * number in the lower 16 bits.
+ */
+#define SOCINFO_MAJOR(ver) (((ver) >> 16) & 0xffff)
+#define SOCINFO_MINOR(ver) ((ver) & 0xffff)
+#define SOCINFO_VERSION(maj, min) ((((maj) & 0xffff) << 16)|((min) & 0xffff))
+
+/* Socinfo SMEM item structure */
+struct socinfo {
+ __le32 fmt;
+ __le32 id;
+ __le32 ver;
+ char build_id[SMEM_SOCINFO_BUILD_ID_LENGTH];
+ /* Version 2 */
+ __le32 raw_id;
+ __le32 raw_ver;
+ /* Version 3 */
+ __le32 hw_plat;
+ /* Version 4 */
+ __le32 plat_ver;
+ /* Version 5 */
+ __le32 accessory_chip;
+ /* Version 6 */
+ __le32 hw_plat_subtype;
+ /* Version 7 */
+ __le32 pmic_model;
+ __le32 pmic_die_rev;
+ /* Version 8 */
+ __le32 pmic_model_1;
+ __le32 pmic_die_rev_1;
+ __le32 pmic_model_2;
+ __le32 pmic_die_rev_2;
+ /* Version 9 */
+ __le32 foundry_id;
+ /* Version 10 */
+ __le32 serial_num;
+ /* Version 11 */
+ __le32 num_pmics;
+ __le32 pmic_array_offset;
+ /* Version 12 */
+ __le32 chip_family;
+ __le32 raw_device_family;
+ __le32 raw_device_num;
+ /* Version 13 */
+ __le32 nproduct_id;
+ char chip_id[SMEM_SOCINFO_CHIP_ID_LENGTH];
+ /* Version 14 */
+ __le32 num_clusters;
+ __le32 ncluster_array_offset;
+ __le32 num_subset_parts;
+ __le32 nsubset_parts_array_offset;
+ /* Version 15 */
+ __le32 nmodem_supported;
+ /* Version 16 */
+ __le32 feature_code;
+ __le32 pcode;
+ __le32 npartnamemap_offset;
+ __le32 nnum_partname_mapping;
+ /* Version 17 */
+ __le32 oem_variant;
+ /* Version 18 */
+ __le32 num_kvps;
+ __le32 kvps_offset;
+ /* Version 19 */
+ __le32 num_func_clusters;
+ __le32 boot_cluster;
+ __le32 boot_core;
+ /* Version 20 */
+ __le32 raw_package_type;
+ /* Version 21, 22, 23 */
+ __le32 reserve1[4];
+};
+
+/* Internal feature codes */
+enum qcom_socinfo_feature_code {
+ /* External feature codes */
+ SOCINFO_FC_UNKNOWN = 0x0,
+ SOCINFO_FC_AA,
+ SOCINFO_FC_AB,
+ SOCINFO_FC_AC,
+ SOCINFO_FC_AD,
+ SOCINFO_FC_AE,
+ SOCINFO_FC_AF,
+ SOCINFO_FC_AG,
+ SOCINFO_FC_AH,
+};
+
+/* Internal feature codes */
+/* Valid values: 0 <= n <= 0xf */
+#define SOCINFO_FC_Yn(n) (0xf1 + (n))
+#define SOCINFO_FC_INT_MAX SOCINFO_FC_Yn(0xf)
+
+/* Product codes */
+#define SOCINFO_PC_UNKNOWN 0
+#define SOCINFO_PCn(n) ((n) + 1)
+#define SOCINFO_PC_RESERVE (BIT(31) - 1)
+
+#endif
diff --git a/include/linux/soc/qcom/ubwc.h b/include/linux/soc/qcom/ubwc.h
new file mode 100644
index 000000000000..0a4edfe3d96d
--- /dev/null
+++ b/include/linux/soc/qcom/ubwc.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018, The Linux Foundation
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef __QCOM_UBWC_H__
+#define __QCOM_UBWC_H__
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+struct qcom_ubwc_cfg_data {
+ u32 ubwc_enc_version;
+ /* Can be read from MDSS_BASE + 0x58 */
+ u32 ubwc_dec_version;
+
+ /**
+ * @ubwc_swizzle: Whether to enable level 1, 2 & 3 bank swizzling.
+ *
+ * UBWC 1.0 always enables all three levels.
+ * UBWC 2.0 removes level 1 bank swizzling, leaving levels 2 & 3.
+ * UBWC 4.0 adds the optional ability to disable levels 2 & 3.
+ */
+ u32 ubwc_swizzle;
+#define UBWC_SWIZZLE_ENABLE_LVL1 BIT(0)
+#define UBWC_SWIZZLE_ENABLE_LVL2 BIT(1)
+#define UBWC_SWIZZLE_ENABLE_LVL3 BIT(2)
+
+ /**
+ * @highest_bank_bit: Highest Bank Bit
+ *
+ * The Highest Bank Bit value represents the bit of the highest
+ * DDR bank. This should ideally use DRAM type detection.
+ */
+ int highest_bank_bit;
+ bool ubwc_bank_spread;
+
+ /**
+ * @macrotile_mode: Macrotile Mode
+ *
+ * Whether to use 4-channel macrotiling mode or the newer
+ * 8-channel macrotiling mode introduced in UBWC 3.1. 0 is
+ * 4-channel and 1 is 8-channel.
+ */
+ bool macrotile_mode;
+};
+
+#define UBWC_1_0 0x10000000
+#define UBWC_2_0 0x20000000
+#define UBWC_3_0 0x30000000
+#define UBWC_4_0 0x40000000
+#define UBWC_4_3 0x40030000
+#define UBWC_5_0 0x50000000
+#define UBWC_6_0 0x60000000
+
+#if IS_ENABLED(CONFIG_QCOM_UBWC_CONFIG)
+const struct qcom_ubwc_cfg_data *qcom_ubwc_config_get_data(void);
+#else
+static inline const struct qcom_ubwc_cfg_data *qcom_ubwc_config_get_data(void)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+#endif
+
+static inline bool qcom_ubwc_get_ubwc_mode(const struct qcom_ubwc_cfg_data *cfg)
+{
+ bool ret = cfg->ubwc_enc_version == UBWC_1_0;
+
+ if (ret && !(cfg->ubwc_swizzle & UBWC_SWIZZLE_ENABLE_LVL1))
+ pr_err("UBWC config discrepancy - level 1 swizzling disabled on UBWC 1.0\n");
+
+ return ret;
+}
+
+#endif /* __QCOM_UBWC_H__ */
diff --git a/include/linux/soc/renesas/r9a06g032-sysctrl.h b/include/linux/soc/renesas/r9a06g032-sysctrl.h
new file mode 100644
index 000000000000..066dfb15cbdd
--- /dev/null
+++ b/include/linux/soc/renesas/r9a06g032-sysctrl.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_SOC_RENESAS_R9A06G032_SYSCTRL_H__
+#define __LINUX_SOC_RENESAS_R9A06G032_SYSCTRL_H__
+
+#ifdef CONFIG_CLK_R9A06G032
+int r9a06g032_sysctrl_set_dmamux(u32 mask, u32 val);
+#else
+static inline int r9a06g032_sysctrl_set_dmamux(u32 mask, u32 val) { return -ENODEV; }
+#endif
+
+#endif /* __LINUX_SOC_RENESAS_R9A06G032_SYSCTRL_H__ */
diff --git a/include/linux/soc/renesas/rcar-rst.h b/include/linux/soc/renesas/rcar-rst.h
index 7899a5b8c247..1f1fe8bfaa76 100644
--- a/include/linux/soc/renesas/rcar-rst.h
+++ b/include/linux/soc/renesas/rcar-rst.h
@@ -4,8 +4,10 @@
#ifdef CONFIG_RST_RCAR
int rcar_rst_read_mode_pins(u32 *mode);
+int rcar_rst_set_rproc_boot_addr(u64 boot_addr);
#else
static inline int rcar_rst_read_mode_pins(u32 *mode) { return -ENODEV; }
+static inline int rcar_rst_set_rproc_boot_addr(u64 boot_addr) { return -ENODEV; }
#endif
#endif /* __LINUX_SOC_RENESAS_RCAR_RST_H__ */
diff --git a/include/linux/soc/samsung/exynos-chipid.h b/include/linux/soc/samsung/exynos-chipid.h
index 8bca6763f99c..62f0e2531068 100644
--- a/include/linux/soc/samsung/exynos-chipid.h
+++ b/include/linux/soc/samsung/exynos-chipid.h
@@ -9,10 +9,8 @@
#define __LINUX_SOC_EXYNOS_CHIPID_H
#define EXYNOS_CHIPID_REG_PRO_ID 0x00
-#define EXYNOS_SUBREV_MASK (0xf << 4)
-#define EXYNOS_MAINREV_MASK (0xf << 0)
-#define EXYNOS_REV_MASK (EXYNOS_SUBREV_MASK | \
- EXYNOS_MAINREV_MASK)
+#define EXYNOS_REV_PART_MASK 0xf
+#define EXYNOS_REV_PART_SHIFT 4
#define EXYNOS_MASK 0xfffff000
#define EXYNOS_CHIPID_REG_PKG_ID 0x04
diff --git a/include/linux/soc/samsung/exynos-pmu.h b/include/linux/soc/samsung/exynos-pmu.h
index a4f5516cc956..2bd9d12d9a52 100644
--- a/include/linux/soc/samsung/exynos-pmu.h
+++ b/include/linux/soc/samsung/exynos-pmu.h
@@ -10,6 +10,7 @@
#define __LINUX_SOC_EXYNOS_PMU_H
struct regmap;
+struct device_node;
enum sys_powerdown {
SYS_AFTR,
@@ -20,12 +21,20 @@ enum sys_powerdown {
extern void exynos_sys_powerdown_conf(enum sys_powerdown mode);
#ifdef CONFIG_EXYNOS_PMU
-extern struct regmap *exynos_get_pmu_regmap(void);
+struct regmap *exynos_get_pmu_regmap(void);
+struct regmap *exynos_get_pmu_regmap_by_phandle(struct device_node *np,
+ const char *propname);
#else
static inline struct regmap *exynos_get_pmu_regmap(void)
{
return ERR_PTR(-ENODEV);
}
+
+static inline struct regmap *exynos_get_pmu_regmap_by_phandle(struct device_node *np,
+ const char *propname)
+{
+ return ERR_PTR(-ENODEV);
+}
#endif
#endif /* __LINUX_SOC_EXYNOS_PMU_H */
diff --git a/include/linux/soc/samsung/exynos-regs-pmu.h b/include/linux/soc/samsung/exynos-regs-pmu.h
index fc9250fb3133..532c6c2d1195 100644
--- a/include/linux/soc/samsung/exynos-regs-pmu.h
+++ b/include/linux/soc/samsung/exynos-regs-pmu.h
@@ -55,6 +55,8 @@
#define EXYNOS4_MIPI_PHY_SRESETN (1 << 1)
#define EXYNOS4_MIPI_PHY_MRESETN (1 << 2)
#define EXYNOS4_MIPI_PHY_RESET_MASK (3 << 1)
+/* USB PHY enable bit, valid for Exynos7870 */
+#define EXYNOS7870_USB2PHY_ENABLE (1 << 1)
#define S5P_INFORM0 0x0800
#define S5P_INFORM1 0x0804
@@ -185,6 +187,9 @@
/* Only for S5Pv210 */
#define S5PV210_EINT_WAKEUP_MASK 0xC004
+/* Only for Exynos2200 */
+#define EXYNOS2200_PHY_CTRL_USB20 0x72C
+
/* Only for Exynos4210 */
#define S5P_CMU_CLKSTOP_LCD1_LOWPWR 0x1154
#define S5P_CMU_RESET_LCD1_LOWPWR 0x1174
@@ -611,12 +616,6 @@
#define EXYNOS5420_FSYS2_OPTION 0x4168
#define EXYNOS5420_PSGEN_OPTION 0x4188
-/* For EXYNOS_CENTRAL_SEQ_OPTION */
-#define EXYNOS5_USE_STANDBYWFI_ARM_CORE0 BIT(16)
-#define EXYNOS5_USE_STANDBYWFI_ARM_CORE1 BUT(17)
-#define EXYNOS5_USE_STANDBYWFE_ARM_CORE0 BIT(24)
-#define EXYNOS5_USE_STANDBYWFE_ARM_CORE1 BIT(25)
-
#define EXYNOS5420_ARM_USE_STANDBY_WFI0 BIT(4)
#define EXYNOS5420_ARM_USE_STANDBY_WFI1 BIT(5)
#define EXYNOS5420_ARM_USE_STANDBY_WFI2 BIT(6)
@@ -663,4 +662,357 @@
#define EXYNOS5433_PAD_RETENTION_UFS_OPTION (0x3268)
#define EXYNOS5433_PAD_RETENTION_FSYSGENIO_OPTION (0x32A8)
+/* For Exynos990 */
+#define EXYNOS990_PHY_CTRL_USB20 (0x72C)
+
+/* For Exynos7870 */
+#define EXYNOS7870_MIPI_PHY_CONTROL0 (0x070c)
+#define EXYNOS7870_MIPI_PHY_CONTROL1 (0x0714)
+#define EXYNOS7870_MIPI_PHY_CONTROL2 (0x0734)
+
+/* For Tensor GS101 */
+/* PMU ALIVE */
+#define GS101_OM_STAT 0x0000
+#define GS101_VERSION 0x0004
+#define GS101_PORESET_CHECK 0x0008
+#define GS101_OTP_STATUS 0x000c
+#define GS101_SYSTEM_INFO 0x0010
+#define GS101_IDLE_IP(n) (0x03e0 + ((n) & 3) * 4)
+#define GS101_IDLE_IP_MASK(n) (0x03f0 + ((n) & 3) * 4)
+#define GS101_SLC_CH_OFFSET(ch) (0x0400 + ((ch) & 3) * 0x10)
+#define GS101_DATARAM_STATE_SLC_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x00)
+#define GS101_TAGRAM_STATE_SLC_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x04)
+#define GS101_LRURAM_STATE_SLC_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x08)
+#define GS101_PPMPURAM_STATE_SLC_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x0c)
+#define GS101_DATARAM_INFORM_SCL_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x40)
+#define GS101_TAGRAM_INFORM_SCL_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x44)
+#define GS101_LRURAM_INFORM_SCL_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x48)
+#define GS101_PPMPURAM_INFORM_SCL_CH(ch) (GS101_SLC_CH_OFFSET(ch) + 0x4c)
+#define GS101_INFORM0 0x0800
+#define GS101_INFORM1 0x0804
+#define GS101_INFORM2 0x0808
+#define GS101_INFORM3 0x080c
+#define GS101_SYSIP_DAT(n) (0x0810 + ((n) & 3) * 4)
+#define GS101_PWR_HOLD_HW_TRIP 0x0820
+#define GS101_PWR_HOLD_SW_TRIP 0x0824
+#define GS101_GSA_INFORM(n) (0x0830 + ((n) & 1) * 4)
+#define GS101_INFORM4 0x0840
+#define GS101_INFORM5 0x0844
+#define GS101_INFORM6 0x0848
+#define GS101_INFORM7 0x084c
+#define GS101_INFORM8 0x0850
+#define GS101_INFORM9 0x0854
+#define GS101_INFORM10 0x0858
+#define GS101_INFORM11 0x085c
+#define GS101_CPU_INFORM(cpu) (0x0860 + ((cpu) & 7) * 4)
+#define GS101_IROM_INFORM 0x0880
+#define GS101_IROM_CPU_INFORM(cpu) (0x0890 + ((cpu) & 7) * 4)
+#define GS101_PMU_SPARE(n) (0x0900 + ((n) & 3) * 4)
+#define GS101_IROM_DATA_REG(n) (0x0980 + ((n) & 3) * 4)
+#define GS101_IROM_PWRMODE 0x0990
+#define GS101_DREX_CALIBRATION(n) (0x09a0 + ((n) & 7) * 4)
+
+#define GS101_CLUSTER0_OFFSET 0x1000
+#define GS101_CLUSTER1_OFFSET 0x1300
+#define GS101_CLUSTER2_OFFSET 0x1500
+#define GS101_CLUSTER_CPU_OFFSET(cl, cpu) ((cl) + ((cpu) * 0x80))
+#define GS101_CLUSTER_CPU_CONFIGURATION(cl, cpu) \
+ (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x00)
+#define GS101_CLUSTER_CPU_STATUS(cl, cpu) \
+ (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x04)
+#define GS101_CLUSTER_CPU_STATES(cl, cpu) \
+ (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x08)
+#define GS101_CLUSTER_CPU_OPTION(cl, cpu) \
+ (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x0c)
+#define GS101_CLUSTER_CPU_OUT(cl, cpu) \
+ (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x20)
+#define GS101_CLUSTER_CPU_IN(cl, cpu) \
+ (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x24)
+#define GS101_CLUSTER_CPU_INT_IN(cl, cpu) \
+ (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x40)
+#define GS101_CLUSTER_CPU_INT_EN(cl, cpu) \
+ (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x44)
+#define GS101_CLUSTER_CPU_INT_TYPE(cl, cpu) \
+ (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x48)
+#define GS101_CLUSTER_CPU_INT_DIR(cl, cpu) \
+ (GS101_CLUSTER_CPU_OFFSET(cl, cpu) + 0x4c)
+
+#define GS101_CLUSTER_NONCPU_OFFSET(cl) (0x1200 + ((cl) * 0x200))
+#define GS101_CLUSTER_NONCPU_CONFIGURATION(cl) \
+ (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x00)
+#define GS101_CLUSTER_NONCPU_STATUS(cl) \
+ (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x04)
+#define GS101_CLUSTER_NONCPU_STATES(cl) \
+ (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x08)
+#define GS101_CLUSTER_NONCPU_OPTION(cl) \
+ (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x0c)
+#define GS101_CLUSTER_NONCPU_OUT(cl) \
+ (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x20)
+#define GS101_CLUSTER_NONCPU_IN(cl) \
+ (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x24)
+#define GS101_CLUSTER_NONCPU_INT_IN(cl) \
+ (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x40)
+#define GS101_CLUSTER_NONCPU_INT_EN(cl) \
+ (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x44)
+#define GS101_CLUSTER_NONCPU_INT_TYPE(cl) \
+ (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x48)
+#define GS101_CLUSTER_NONCPU_INT_DIR(cl) \
+ (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x4c)
+#define GS101_CLUSTER_NONCPU_DUALRAIL_CTRL_OUT(cl) \
+ (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x60)
+#define GS101_CLUSTER_NONCPU_DUALRAIL_POS_OUT(cl) \
+ (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x64)
+#define GS101_CLUSTER_NONCPU_DUALRAIL_CTRL_IN(cl) \
+ (GS101_CLUSTER_NONCPU_OFFSET(cl) + 0x6c)
+#define GS101_CLUSTER0_NONCPU_DSU_PCH \
+ (GS101_CLUSTER_NONCPU_OFFSET(0) + 0x80)
+
+#define GS101_SUBBBLK_OFFSET_ALIVE 0x1800
+#define GS101_SUBBBLK_OFFSET_AOC 0x1880
+#define GS101_SUBBBLK_OFFSET_APM 0x1900
+#define GS101_SUBBBLK_OFFSET_CMU 0x1980
+#define GS101_SUBBBLK_OFFSET_BUS0 0x1a00
+#define GS101_SUBBBLK_OFFSET_BUS1 0x1a80
+#define GS101_SUBBBLK_OFFSET_BUS2 0x1b00
+#define GS101_SUBBBLK_OFFSET_CORE 0x1b80
+#define GS101_SUBBBLK_OFFSET_EH 0x1c00
+#define GS101_SUBBBLK_OFFSET_CPUCL0 0x1c80
+#define GS101_SUBBBLK_OFFSET_CPUCL1 0x1d00
+#define GS101_SUBBBLK_OFFSET_CPUCL2 0x1d80
+#define GS101_SUBBBLK_OFFSET_G3D 0x1e00
+#define GS101_SUBBBLK_OFFSET_EMBEDDED_CPUCL0 0x1e80
+#define GS101_SUBBBLK_OFFSET_EMBEDDED_G3D 0x2000
+#define GS101_SUBBBLK_OFFSET_HSI0 0x2080
+#define GS101_SUBBBLK_OFFSET_HSI1 0x2100
+#define GS101_SUBBBLK_OFFSET_HSI2 0x2180
+#define GS101_SUBBBLK_OFFSET_DPU 0x2200
+#define GS101_SUBBBLK_OFFSET_DISP 0x2280
+#define GS101_SUBBBLK_OFFSET_G2D 0x2300
+#define GS101_SUBBBLK_OFFSET_MFC 0x2380
+#define GS101_SUBBBLK_OFFSET_CSIS 0x2400
+#define GS101_SUBBBLK_OFFSET_PDP 0x2480
+#define GS101_SUBBBLK_OFFSET_DNS 0x2500
+#define GS101_SUBBBLK_OFFSET_G3AA 0x2580
+#define GS101_SUBBBLK_OFFSET_IPP 0x2600
+#define GS101_SUBBBLK_OFFSET_ITP 0x2680
+#define GS101_SUBBBLK_OFFSET_MCSC 0x2700
+#define GS101_SUBBBLK_OFFSET_GDC 0x2780
+#define GS101_SUBBBLK_OFFSET_TNR 0x2800
+#define GS101_SUBBBLK_OFFSET_BO 0x2880
+#define GS101_SUBBBLK_OFFSET_TPU 0x2900
+#define GS101_SUBBBLK_OFFSET_MIF0 0x2980
+#define GS101_SUBBBLK_OFFSET_MIF1 0x2a00
+#define GS101_SUBBBLK_OFFSET_MIF2 0x2a80
+#define GS101_SUBBBLK_OFFSET_MIF3 0x2b00
+#define GS101_SUBBBLK_OFFSET_MISC 0x2b80
+#define GS101_SUBBBLK_OFFSET_PERIC0 0x2c00
+#define GS101_SUBBBLK_OFFSET_PERIC1 0x2c80
+#define GS101_SUBBBLK_OFFSET_S2D 0x2d00
+#define GS101_SUBBLK_CONFIGURATION(blk) ((blk) + 0x00)
+#define GS101_SUBBLK_STATUS(blk) ((blk) + 0x04)
+#define GS101_SUBBLK_STATES(blk) ((blk) + 0x08)
+#define GS101_SUBBLK_OPTION(blk) ((blk) + 0x0c)
+#define GS101_SUBBLK_CTRL(blk) ((blk) + 0x10)
+#define GS101_SUBBLK_OUT(blk) ((blk) + 0x20)
+#define GS101_SUBBLK_IN(blk) ((blk) + 0x24)
+#define GS101_SUBBLK_INT_IN(blk) ((blk) + 0x40)
+#define GS101_SUBBLK_INT_EN(blk) ((blk) + 0x44)
+#define GS101_SUBBLK_INT_TYPE(blk) ((blk) + 0x48)
+#define GS101_SUBBLK_INT_DIR(blk) ((blk) + 0x4c)
+#define GS101_SUBBLK_MEMORY_OUT(blk) ((blk) + 0x60)
+#define GS101_SUBBLK_MEMORY_IN(blk) ((blk) + 0x64)
+
+#define GS101_SUBBBLK_CPU_OFFSET_APM 0x3000
+#define GS101_SUBBBLK_CPU_OFFSET_DBGCORE 0x3080
+#define GS101_SUBBBLK_CPU_OFFSET_SSS 0x3100
+#define GS101_SUBBLK_CPU_CONFIGURATION(blk) ((blk) + 0x00)
+#define GS101_SUBBLK_CPU_STATUS(blk) ((blk) + 0x04)
+#define GS101_SUBBLK_CPU_STATES(blk) ((blk) + 0x08)
+#define GS101_SUBBLK_CPU_OPTION(blk) ((blk) + 0x0c)
+#define GS101_SUBBLK_CPU_OUT(blk) ((blk) + 0x20)
+#define GS101_SUBBLK_CPU_IN(blk) ((blk) + 0x24)
+#define GS101_SUBBLK_CPU_INT_IN(blk) ((blk) + 0x40)
+#define GS101_SUBBLK_CPU_INT_EN(blk) ((blk) + 0x44)
+#define GS101_SUBBLK_CPU_INT_TYPE(blk) ((blk) + 0x48)
+#define GS101_SUBBLK_CPU_INT_DIR(blk) ((blk) + 0x4c)
+
+#define GS101_MIF_CONFIGURATION 0x3800
+#define GS101_MIF_STATUS 0x3804
+#define GS101_MIF_STATES 0x3808
+#define GS101_MIF_OPTION 0x380c
+#define GS101_MIF_CTRL 0x3810
+#define GS101_MIF_OUT 0x3820
+#define GS101_MIF_IN 0x3824
+#define GS101_MIF_INT_IN 0x3840
+#define GS101_MIF_INT_EN 0x3844
+#define GS101_MIF_INT_TYPE 0x3848
+#define GS101_MIF_INT_DIR 0x384c
+#define GS101_TOP_CONFIGURATION 0x3900
+#define GS101_TOP_STATUS 0x3904
+#define GS101_TOP_STATES 0x3908
+#define GS101_TOP_OPTION 0x390c
+#define GS101_TOP_OUT 0x3920
+#define GS101_TOP_IN 0x3924
+#define GS101_TOP_INT_IN 0x3940
+#define GS101_TOP_INT_EN 0x3944
+#define GS101_TOP_INT_TYPE 0x3948
+#define GS101_TOP_INT_DIR 0x394c
+#define GS101_WAKEUP_STAT 0x3950
+#define GS101_WAKEUP2_STAT 0x3954
+#define GS101_WAKEUP2_INT_IN 0x3960
+#define GS101_WAKEUP2_INT_EN 0x3964
+#define GS101_WAKEUP2_INT_TYPE 0x3968
+#define GS101_WAKEUP2_INT_DIR 0x396c
+#define GS101_SYSTEM_CONFIGURATION 0x3a00
+#define GS101_SYSTEM_STATUS 0x3a04
+#define GS101_SYSTEM_STATES 0x3a08
+#define GS101_SYSTEM_OPTION 0x3a0c
+#define GS101_SYSTEM_CTRL 0x3a10
+#define GS101_SPARE_CTRL 0x3a14
+#define GS101_USER_DEFINED_OUT 0x3a18
+#define GS101_SYSTEM_OUT 0x3a20
+#define GS101_SYSTEM_IN 0x3a24
+#define GS101_SYSTEM_INT_IN 0x3a40
+#define GS101_SYSTEM_INT_EN 0x3a44
+#define GS101_SYSTEM_INT_TYPE 0x3a48
+#define GS101_SYSTEM_INT_DIR 0x3a4c
+#define GS101_EINT_INT_IN 0x3a50
+#define GS101_EINT_INT_EN 0x3a54
+#define GS101_EINT_INT_TYPE 0x3a58
+#define GS101_EINT_INT_DIR 0x3a5c
+#define GS101_EINT2_INT_IN 0x3a60
+#define GS101_EINT2_INT_EN 0x3a64
+#define GS101_EINT2_INT_TYPE 0x3a68
+#define GS101_EINT2_INT_DIR 0x3a6c
+#define GS101_EINT3_INT_IN 0x3a70
+#define GS101_EINT3_INT_EN 0x3a74
+#define GS101_EINT3_INT_TYPE 0x3a78
+#define GS101_EINT3_INT_DIR 0x3a7c
+#define GS101_EINT_WAKEUP_MASK 0x3a80
+#define GS101_EINT_WAKEUP_MASK2 0x3a84
+#define GS101_EINT_WAKEUP_MASK3 0x3a88
+#define GS101_USER_DEFINED_INT_IN 0x3a90
+#define GS101_USER_DEFINED_INT_EN 0x3a94
+#define GS101_USER_DEFINED_INT_TYPE 0x3a98
+#define GS101_USER_DEFINED_INT_DIR 0x3a9c
+#define GS101_SCAN2DRAM_INT_IN 0x3aa0
+#define GS101_SCAN2DRAM_INT_EN 0x3aa4
+#define GS101_SCAN2DRAM_INT_TYPE 0x3aa8
+#define GS101_SCAN2DRAM_INT_DIR 0x3aac
+#define GS101_HCU_START 0x3ab0
+#define GS101_CUSTOM_OUT 0x3ac0
+#define GS101_CUSTOM_IN 0x3ac4
+#define GS101_CUSTOM_INT_IN 0x3ad0
+#define GS101_CUSTOM_INT_EN 0x3ad4
+#define GS101_CUSTOM_INT_TYPE 0x3ad8
+#define GS101_CUSTOM_INT_DIR 0x3adc
+#define GS101_ACK_LAST_CPU 0x3afc
+#define GS101_HCU_R(n) (0x3b00 + ((n) & 3) * 4)
+#define GS101_HCU_SP 0x3b14
+#define GS101_HCU_PC 0x3b18
+#define GS101_PMU_RAM_CTRL 0x3b20
+#define GS101_APM_HCU_CTRL 0x3b24
+#define GS101_APM_NMI_ENABLE 0x3b30
+#define GS101_DBGCORE_NMI_ENABLE 0x3b34
+#define GS101_HCU_NMI_ENABLE 0x3b38
+#define GS101_PWR_HOLD_WDT_ENABLE 0x3b3c
+#define GS101_NMI_SRC_IN 0x3b40
+#define GS101_RST_STAT 0x3b44
+#define GS101_RST_STAT_PMU 0x3b48
+#define GS101_HPM_INT_IN 0x3b60
+#define GS101_HPM_INT_EN 0x3b64
+#define GS101_HPM_INT_TYPE 0x3b68
+#define GS101_HPM_INT_DIR 0x3b6c
+#define GS101_S2D_AUTH 0x3b70
+#define GS101_BOOT_STAT 0x3b74
+#define GS101_PMLINK_OUT 0x3c00
+#define GS101_PMLINK_AOC_OUT 0x3c04
+#define GS101_PMLINK_AOC_CTRL 0x3c08
+#define GS101_TCXO_BUF_CTRL 0x3c10
+#define GS101_ADD_CTRL 0x3c14
+#define GS101_HCU_TIMEOUT_RESET 0x3c20
+#define GS101_HCU_TIMEOUT_SCAN2DRAM 0x3c24
+#define GS101_TIMER(n) (0x3c80 + ((n) & 3) * 4)
+#define GS101_PPC_MIF(n) (0x3c90 + ((n) & 3) * 4)
+#define GS101_PPC_CORE 0x3ca0
+#define GS101_PPC_EH 0x3ca4
+#define GS101_PPC_CPUCL1_0 0x3ca8
+#define GS101_PPC_CPUCL1_1 0x3cac
+#define GS101_EXT_REGULATOR_MIF_DURATION 0x3cb0
+#define GS101_EXT_REGULATOR_TOP_DURATION 0x3cb4
+#define GS101_EXT_REGULATOR_CPUCL2_DURATION 0x3cb8
+#define GS101_EXT_REGULATOR_CPUCL1_DURATION 0x3cbc
+#define GS101_EXT_REGULATOR_G3D_DURATION 0x3cc0
+#define GS101_EXT_REGULATOR_TPU_DURATION 0x3cc4
+#define GS101_TCXO_DURATION 0x3cc8
+#define GS101_BURNIN_CTRL 0x3cd0
+#define GS101_JTAG_DBG_DET 0x3cd4
+#define GS101_MMC_CONWKUP_CTRL 0x3cd8
+#define GS101_USBDPPHY0_USBDP_WAKEUP 0x3cdc
+#define GS101_TMU_TOP_TRIP 0x3ce0
+#define GS101_TMU_SUB_TRIP 0x3ce4
+#define GS101_MEMORY_CEN 0x3d00
+#define GS101_MEMORY_PGEN 0x3d04
+#define GS101_MEMORY_RET 0x3d08
+#define GS101_MEMORY_PGEN_FEEDBACK 0x3d0c
+#define GS101_MEMORY_SMX 0x3d10
+#define GS101_MEMORY_SMX_FEEDBACK 0x3d14
+#define GS101_SLC_PCH_CHANNEL 0x3d20
+#define GS101_SLC_PCH_CB 0x3d24
+#define GS101_FORCE_NOMC 0x3d3c
+#define GS101_FORCE_BOOST 0x3d4c
+#define GS101_PMLINK_SLC_REQ 0x3d50
+#define GS101_PMLINK_SLC_ACK 0x3d54
+#define GS101_PMLINK_SLC_BUSY 0x3d58
+#define GS101_BOOTSYNC_OUT 0x3d80
+#define GS101_BOOTSYNC_IN 0x3d84
+#define GS101_SCAN_READY_OUT 0x3d88
+#define GS101_SCAN_READY_IN 0x3d8c
+#define GS101_GSA_RESTORE 0x3d90
+#define GS101_ALIVE_OTP_LATCH 0x3d94
+#define GS101_DEBUG_OVERRIDE 0x3d98
+#define GS101_WDT_OPTION 0x3d9c
+#define GS101_AOC_WDT_CFG 0x3da0
+#define GS101_CTRL_SECJTAG_ALIVE 0x3da4
+#define GS101_CTRL_DIV_PLL_ALV_DIVLOW 0x3e00
+#define GS101_CTRL_MUX_CLK_APM_REFSRC_AUTORESTORE 0x3e04
+#define GS101_CTRL_MUX_CLK_APM_REFSRC 0x3e08
+#define GS101_CTRL_MUX_CLK_APM_REF 0x3e0c
+#define GS101_CTRL_MUX_PLL_ALV_DIV4 0x3e10
+#define GS101_CTRL_PLL_ALV_DIV4 0x3e14
+#define GS101_CTRL_OSCCLK_APMGSA 0x3e18
+#define GS101_CTRL_BLK_AOC_CLKS 0x3e1c
+#define GS101_CTRL_PLL_ALV_LOCK 0x3e20
+#define GS101_CTRL_CLKDIV__CLKRTC 0x3e24
+#define GS101_CTRL_SOC32K 0x3e30
+#define GS101_CTRL_STM_PMU 0x3e34
+#define GS101_CTRL_PMU_DEBUG 0x3e38
+#define GS101_CTRL_DEBUG_UART 0x3e3c
+#define GS101_CTRL_TCK 0x3e40
+#define GS101_CTRL_SBU_SW_EN 0x3e44
+#define GS101_PAD_CTRL_CLKOUT0 0x3e80
+#define GS101_PAD_CTRL_CLKOUT1 0x3e84
+#define GS101_PAD_CTRL_APM_24MOUT_0 0x3e88
+#define GS101_PAD_CTRL_APM_24MOUT_1 0x3e8c
+#define GS101_PAD_CTRL_IO_FORCE_RETENTION 0x3e90
+#define GS101_PAD_CTRL_APACTIVE_n 0x3e94
+#define GS101_PAD_CTRL_TCXO_ON 0x3e98
+#define GS101_PAD_CTRL_PWR_HOLD 0x3e9c
+#define GS101_PAD_CTRL_RESETO_n 0x3ea0
+#define GS101_PAD_CTRL_WRESETO_n 0x3ea4
+#define GS101_PHY_CTRL_USB20 0x3eb0
+#define GS101_PHY_CTRL_USBDP 0x3eb4
+#define GS101_PHY_CTRL_MIPI_DCPHY_M4M4 0x3eb8
+#define GS101_PHY_CTRL_MIPI_DCPHY_S4S4S4S4 0x3ebc
+#define GS101_PHY_CTRL_PCIE_GEN4_0 0x3ec0
+#define GS101_PHY_CTRL_PCIE_GEN4_1 0x3ec4
+#define GS101_PHY_CTRL_UFS 0x3ec8
+
+/* PMU INTR GEN */
+#define GS101_GRP1_INTR_BID_UPEND (0x0108)
+#define GS101_GRP1_INTR_BID_CLEAR (0x010c)
+#define GS101_GRP2_INTR_BID_ENABLE (0x0200)
+#define GS101_GRP2_INTR_BID_UPEND (0x0208)
+#define GS101_GRP2_INTR_BID_CLEAR (0x020c)
+
#endif /* __LINUX_SOC_EXYNOS_REGS_PMU_H */
diff --git a/include/linux/soc/samsung/s3c-adc.h b/include/linux/soc/samsung/s3c-adc.h
deleted file mode 100644
index 591c94ef957d..000000000000
--- a/include/linux/soc/samsung/s3c-adc.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C ADC driver information
- */
-
-#ifndef __LINUX_SOC_SAMSUNG_S3C_ADC_H
-#define __LINUX_SOC_SAMSUNG_S3C_ADC_H __FILE__
-
-struct s3c_adc_client;
-struct platform_device;
-
-extern int s3c_adc_start(struct s3c_adc_client *client,
- unsigned int channel, unsigned int nr_samples);
-
-extern int s3c_adc_read(struct s3c_adc_client *client, unsigned int ch);
-
-extern struct s3c_adc_client *
- s3c_adc_register(struct platform_device *pdev,
- void (*select)(struct s3c_adc_client *client,
- unsigned selected),
- void (*conv)(struct s3c_adc_client *client,
- unsigned d0, unsigned d1,
- unsigned *samples_left),
- unsigned int is_ts);
-
-extern void s3c_adc_release(struct s3c_adc_client *client);
-
-#endif /* __LINUX_SOC_SAMSUNG_S3C_ADC_H */
diff --git a/include/linux/soc/samsung/s3c-cpu-freq.h b/include/linux/soc/samsung/s3c-cpu-freq.h
deleted file mode 100644
index 63e88fd5dea2..000000000000
--- a/include/linux/soc/samsung/s3c-cpu-freq.h
+++ /dev/null
@@ -1,145 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2006-2007 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C CPU frequency scaling support - driver and board
- */
-#ifndef __LINUX_SOC_SAMSUNG_S3C_CPU_FREQ_H
-#define __LINUX_SOC_SAMSUNG_S3C_CPU_FREQ_H
-
-#include <linux/cpufreq.h>
-
-struct s3c_cpufreq_info;
-struct s3c_cpufreq_board;
-struct s3c_iotimings;
-
-/**
- * struct s3c_freq - frequency information (mainly for core drivers)
- * @fclk: The FCLK frequency in Hz.
- * @armclk: The ARMCLK frequency in Hz.
- * @hclk_tns: HCLK cycle time in 10ths of nano-seconds.
- * @hclk: The HCLK frequency in Hz.
- * @pclk: The PCLK frequency in Hz.
- *
- * This contains the frequency information about the current configuration
- * mainly for the core drivers to ensure we do not end up passing about
- * a large number of parameters.
- *
- * The @hclk_tns field is a useful cache for the parts of the drivers that
- * need to calculate IO timings and suchlike.
- */
-struct s3c_freq {
- unsigned long fclk;
- unsigned long armclk;
- unsigned long hclk_tns; /* in 10ths of ns */
- unsigned long hclk;
- unsigned long pclk;
-};
-
-/**
- * struct s3c_cpufreq_freqs - s3c cpufreq notification information.
- * @freqs: The cpufreq setting information.
- * @old: The old clock settings.
- * @new: The new clock settings.
- * @pll_changing: Set if the PLL is changing.
- *
- * Wrapper 'struct cpufreq_freqs' so that any drivers receiving the
- * notification can use this information that is not provided by just
- * having the core frequency alone.
- *
- * The pll_changing flag is used to indicate if the PLL itself is
- * being set during this change. This is important as the clocks
- * will temporarily be set to the XTAL clock during this time, so
- * drivers may want to close down their output during this time.
- *
- * Note, this is not being used by any current drivers and therefore
- * may be removed in the future.
- */
-struct s3c_cpufreq_freqs {
- struct cpufreq_freqs freqs;
- struct s3c_freq old;
- struct s3c_freq new;
-
- unsigned int pll_changing:1;
-};
-
-#define to_s3c_cpufreq(_cf) container_of(_cf, struct s3c_cpufreq_freqs, freqs)
-
-/**
- * struct s3c_clkdivs - clock divisor information
- * @p_divisor: Divisor from FCLK to PCLK.
- * @h_divisor: Divisor from FCLK to HCLK.
- * @arm_divisor: Divisor from FCLK to ARMCLK (not all CPUs).
- * @dvs: Non-zero if using DVS mode for ARMCLK.
- *
- * Divisor settings for the core clocks.
- */
-struct s3c_clkdivs {
- int p_divisor;
- int h_divisor;
- int arm_divisor;
- unsigned char dvs;
-};
-
-#define PLLVAL(_m, _p, _s) (((_m) << 12) | ((_p) << 4) | (_s))
-
-/**
- * struct s3c_pllval - PLL value entry.
- * @freq: The frequency for this entry in Hz.
- * @pll_reg: The PLL register setting for this PLL value.
- */
-struct s3c_pllval {
- unsigned long freq;
- unsigned long pll_reg;
-};
-
-/**
- * struct s3c_cpufreq_board - per-board cpu frequency informatin
- * @refresh: The SDRAM refresh period in nanoseconds.
- * @auto_io: Set if the IO timing settings should be generated from the
- * initialisation time hardware registers.
- * @need_io: Set if the board has external IO on any of the chipselect
- * lines that will require the hardware timing registers to be
- * updated on a clock change.
- * @max: The maxium frequency limits for the system. Any field that
- * is left at zero will use the CPU's settings.
- *
- * This contains the board specific settings that affect how the CPU
- * drivers chose settings. These include the memory refresh and IO
- * timing information.
- *
- * Registration depends on the driver being used, the ARMCLK only
- * implementation does not currently need this but the older style
- * driver requires this to be available.
- */
-struct s3c_cpufreq_board {
- unsigned int refresh;
- unsigned int auto_io:1; /* automatically init io timings. */
- unsigned int need_io:1; /* set if needs io timing support. */
-
- /* any non-zero field in here is taken as an upper limit. */
- struct s3c_freq max; /* frequency limits */
-};
-
-/* Things depending on frequency scaling. */
-#ifdef CONFIG_ARM_S3C_CPUFREQ
-#define __init_or_cpufreq
-#else
-#define __init_or_cpufreq __init
-#endif
-
-/* Board functions */
-
-#ifdef CONFIG_ARM_S3C_CPUFREQ
-extern int s3c_cpufreq_setboard(struct s3c_cpufreq_board *board);
-#else
-
-static inline int s3c_cpufreq_setboard(struct s3c_cpufreq_board *board)
-{
- return 0;
-}
-#endif /* CONFIG_ARM_S3C_CPUFREQ */
-
-#endif
diff --git a/include/linux/soc/samsung/s3c-cpufreq-core.h b/include/linux/soc/samsung/s3c-cpufreq-core.h
deleted file mode 100644
index 3b278afb769b..000000000000
--- a/include/linux/soc/samsung/s3c-cpufreq-core.h
+++ /dev/null
@@ -1,299 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2006-2009 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C CPU frequency scaling support - core support
- */
-#ifndef __LINUX_SOC_SAMSUNG_S3C_CPUFREQ_CORE_H
-#define __LINUX_SOC_SAMSUNG_S3C_CPUFREQ_CORE_H
-
-#include <linux/soc/samsung/s3c-cpu-freq.h>
-
-struct seq_file;
-
-#define MAX_BANKS (8)
-#define S3C2412_MAX_IO (8)
-
-/**
- * struct s3c2410_iobank_timing - IO bank timings for S3C2410 style timings
- * @bankcon: The cached version of settings in this structure.
- * @tacp:
- * @tacs: Time from address valid to nCS asserted.
- * @tcos: Time from nCS asserted to nOE or nWE asserted.
- * @tacc: Time that nOE or nWE is asserted.
- * @tcoh: Time nCS is held after nOE or nWE are released.
- * @tcah: Time address is held for after
- * @nwait_en: Whether nWAIT is enabled for this bank.
- *
- * This structure represents the IO timings for a S3C2410 style IO bank
- * used by the CPU frequency support if it needs to change the settings
- * of the IO.
- */
-struct s3c2410_iobank_timing {
- unsigned long bankcon;
- unsigned int tacp;
- unsigned int tacs;
- unsigned int tcos;
- unsigned int tacc;
- unsigned int tcoh; /* nCS hold after nOE/nWE */
- unsigned int tcah; /* Address hold after nCS */
- unsigned char nwait_en; /* nWait enabled for bank. */
-};
-
-/**
- * struct s3c2412_iobank_timing - io timings for PL092 (S3C2412) style IO
- * @idcy: The idle cycle time between transactions.
- * @wstrd: nCS release to end of read cycle.
- * @wstwr: nCS release to end of write cycle.
- * @wstoen: nCS assertion to nOE assertion time.
- * @wstwen: nCS assertion to nWE assertion time.
- * @wstbrd: Burst ready delay.
- * @smbidcyr: Register cache for smbidcyr value.
- * @smbwstrd: Register cache for smbwstrd value.
- * @smbwstwr: Register cache for smbwstwr value.
- * @smbwstoen: Register cache for smbwstoen value.
- * @smbwstwen: Register cache for smbwstwen value.
- * @smbwstbrd: Register cache for smbwstbrd value.
- *
- * Timing information for a IO bank on an S3C2412 or similar system which
- * uses a PL093 block.
- */
-struct s3c2412_iobank_timing {
- unsigned int idcy;
- unsigned int wstrd;
- unsigned int wstwr;
- unsigned int wstoen;
- unsigned int wstwen;
- unsigned int wstbrd;
-
- /* register cache */
- unsigned char smbidcyr;
- unsigned char smbwstrd;
- unsigned char smbwstwr;
- unsigned char smbwstoen;
- unsigned char smbwstwen;
- unsigned char smbwstbrd;
-};
-
-union s3c_iobank {
- struct s3c2410_iobank_timing *io_2410;
- struct s3c2412_iobank_timing *io_2412;
-};
-
-/**
- * struct s3c_iotimings - Chip IO timings holder
- * @bank: The timings for each IO bank.
- */
-struct s3c_iotimings {
- union s3c_iobank bank[MAX_BANKS];
-};
-
-/**
- * struct s3c_plltab - PLL table information.
- * @vals: List of PLL values.
- * @size: Size of the PLL table @vals.
- */
-struct s3c_plltab {
- struct s3c_pllval *vals;
- int size;
-};
-
-/**
- * struct s3c_cpufreq_config - current cpu frequency configuration
- * @freq: The current settings for the core clocks.
- * @max: Maxium settings, derived from core, board and user settings.
- * @pll: The PLL table entry for the current PLL settings.
- * @divs: The divisor settings for the core clocks.
- * @info: The current core driver information.
- * @board: The information for the board we are running on.
- * @lock_pll: Set if the PLL settings cannot be changed.
- *
- * This is for the core drivers that need to know information about
- * the current settings and values. It should not be needed by any
- * device drivers.
-*/
-struct s3c_cpufreq_config {
- struct s3c_freq freq;
- struct s3c_freq max;
- struct clk *mpll;
- struct cpufreq_frequency_table pll;
- struct s3c_clkdivs divs;
- struct s3c_cpufreq_info *info; /* for core, not drivers */
- struct s3c_cpufreq_board *board;
-
- unsigned int lock_pll:1;
-};
-
-/**
- * struct s3c_cpufreq_info - Information for the CPU frequency driver.
- * @name: The name of this implementation.
- * @max: The maximum frequencies for the system.
- * @latency: Transition latency to give to cpufreq.
- * @locktime_m: The lock-time in uS for the MPLL.
- * @locktime_u: The lock-time in uS for the UPLL.
- * @locttime_bits: The number of bits each LOCKTIME field.
- * @need_pll: Set if this driver needs to change the PLL values to achieve
- * any frequency changes. This is really only need by devices like the
- * S3C2410 where there is no or limited divider between the PLL and the
- * ARMCLK.
- * @get_iotiming: Get the current IO timing data, mainly for use at start.
- * @set_iotiming: Update the IO timings from the cached copies calculated
- * from the @calc_iotiming entry when changing the frequency.
- * @calc_iotiming: Calculate and update the cached copies of the IO timings
- * from the newly calculated frequencies.
- * @calc_freqtable: Calculate (fill in) the given frequency table from the
- * current frequency configuration. If the table passed in is NULL,
- * then the return is the number of elements to be filled for allocation
- * of the table.
- * @set_refresh: Set the memory refresh configuration.
- * @set_fvco: Set the PLL frequencies.
- * @set_divs: Update the clock divisors.
- * @calc_divs: Calculate the clock divisors.
- */
-struct s3c_cpufreq_info {
- const char *name;
- struct s3c_freq max;
-
- unsigned int latency;
-
- unsigned int locktime_m;
- unsigned int locktime_u;
- unsigned char locktime_bits;
-
- unsigned int need_pll:1;
-
- /* driver routines */
-
- int (*get_iotiming)(struct s3c_cpufreq_config *cfg,
- struct s3c_iotimings *timings);
-
- void (*set_iotiming)(struct s3c_cpufreq_config *cfg,
- struct s3c_iotimings *timings);
-
- int (*calc_iotiming)(struct s3c_cpufreq_config *cfg,
- struct s3c_iotimings *timings);
-
- int (*calc_freqtable)(struct s3c_cpufreq_config *cfg,
- struct cpufreq_frequency_table *t,
- size_t table_size);
-
- void (*debug_io_show)(struct seq_file *seq,
- struct s3c_cpufreq_config *cfg,
- union s3c_iobank *iob);
-
- void (*set_refresh)(struct s3c_cpufreq_config *cfg);
- void (*set_fvco)(struct s3c_cpufreq_config *cfg);
- void (*set_divs)(struct s3c_cpufreq_config *cfg);
- int (*calc_divs)(struct s3c_cpufreq_config *cfg);
-};
-
-extern int s3c_cpufreq_register(struct s3c_cpufreq_info *info);
-
-extern int s3c_plltab_register(struct cpufreq_frequency_table *plls,
- unsigned int plls_no);
-
-/* exports and utilities for debugfs */
-extern struct s3c_cpufreq_config *s3c_cpufreq_getconfig(void);
-extern struct s3c_iotimings *s3c_cpufreq_getiotimings(void);
-
-#ifdef CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS
-#define s3c_cpufreq_debugfs_call(x) x
-#else
-#define s3c_cpufreq_debugfs_call(x) NULL
-#endif
-
-/* Useful utility functions. */
-
-extern struct clk *s3c_cpufreq_clk_get(struct device *, const char *);
-
-/* S3C2410 and compatible exported functions */
-
-extern void s3c2410_cpufreq_setrefresh(struct s3c_cpufreq_config *cfg);
-extern void s3c2410_set_fvco(struct s3c_cpufreq_config *cfg);
-
-#ifdef CONFIG_S3C2410_IOTIMING
-extern void s3c2410_iotiming_debugfs(struct seq_file *seq,
- struct s3c_cpufreq_config *cfg,
- union s3c_iobank *iob);
-
-extern int s3c2410_iotiming_calc(struct s3c_cpufreq_config *cfg,
- struct s3c_iotimings *iot);
-
-extern int s3c2410_iotiming_get(struct s3c_cpufreq_config *cfg,
- struct s3c_iotimings *timings);
-
-extern void s3c2410_iotiming_set(struct s3c_cpufreq_config *cfg,
- struct s3c_iotimings *iot);
-#else
-#define s3c2410_iotiming_debugfs NULL
-#define s3c2410_iotiming_calc NULL
-#define s3c2410_iotiming_get NULL
-#define s3c2410_iotiming_set NULL
-#endif /* CONFIG_S3C2410_IOTIMING */
-
-/* S3C2412 compatible routines */
-
-#ifdef CONFIG_S3C2412_IOTIMING
-extern void s3c2412_iotiming_debugfs(struct seq_file *seq,
- struct s3c_cpufreq_config *cfg,
- union s3c_iobank *iob);
-
-extern int s3c2412_iotiming_get(struct s3c_cpufreq_config *cfg,
- struct s3c_iotimings *timings);
-
-extern int s3c2412_iotiming_calc(struct s3c_cpufreq_config *cfg,
- struct s3c_iotimings *iot);
-
-extern void s3c2412_iotiming_set(struct s3c_cpufreq_config *cfg,
- struct s3c_iotimings *iot);
-extern void s3c2412_cpufreq_setrefresh(struct s3c_cpufreq_config *cfg);
-#else
-#define s3c2412_iotiming_debugfs NULL
-#define s3c2412_iotiming_calc NULL
-#define s3c2412_iotiming_get NULL
-#define s3c2412_iotiming_set NULL
-#endif /* CONFIG_S3C2412_IOTIMING */
-
-#ifdef CONFIG_ARM_S3C24XX_CPUFREQ_DEBUG
-#define s3c_freq_dbg(x...) printk(KERN_INFO x)
-#else
-#define s3c_freq_dbg(x...) do { if (0) printk(x); } while (0)
-#endif /* CONFIG_ARM_S3C24XX_CPUFREQ_DEBUG */
-
-#ifdef CONFIG_ARM_S3C24XX_CPUFREQ_IODEBUG
-#define s3c_freq_iodbg(x...) printk(KERN_INFO x)
-#else
-#define s3c_freq_iodbg(x...) do { if (0) printk(x); } while (0)
-#endif /* CONFIG_ARM_S3C24XX_CPUFREQ_IODEBUG */
-
-static inline int s3c_cpufreq_addfreq(struct cpufreq_frequency_table *table,
- int index, size_t table_size,
- unsigned int freq)
-{
- if (index < 0)
- return index;
-
- if (table) {
- if (index >= table_size)
- return -ENOMEM;
-
- s3c_freq_dbg("%s: { %d = %u kHz }\n",
- __func__, index, freq);
-
- table[index].driver_data = index;
- table[index].frequency = freq;
- }
-
- return index + 1;
-}
-
-u32 s3c2440_read_camdivn(void);
-void s3c2440_write_camdivn(u32 camdiv);
-u32 s3c24xx_read_clkdivn(void);
-void s3c24xx_write_clkdivn(u32 clkdiv);
-u32 s3c24xx_read_mpllcon(void);
-void s3c24xx_write_locktime(u32 locktime);
-
-#endif
diff --git a/include/linux/soc/samsung/s3c-pm.h b/include/linux/soc/samsung/s3c-pm.h
index f9164559c99f..5b23d85d20ab 100644
--- a/include/linux/soc/samsung/s3c-pm.h
+++ b/include/linux/soc/samsung/s3c-pm.h
@@ -14,58 +14,10 @@
/* PM debug functions */
-/**
- * struct pm_uart_save - save block for core UART
- * @ulcon: Save value for S3C2410_ULCON
- * @ucon: Save value for S3C2410_UCON
- * @ufcon: Save value for S3C2410_UFCON
- * @umcon: Save value for S3C2410_UMCON
- * @ubrdiv: Save value for S3C2410_UBRDIV
- *
- * Save block for UART registers to be held over sleep and restored if they
- * are needed (say by debug).
-*/
-struct pm_uart_save {
- u32 ulcon;
- u32 ucon;
- u32 ufcon;
- u32 umcon;
- u32 ubrdiv;
- u32 udivslot;
-};
-
-#ifdef CONFIG_SAMSUNG_PM_DEBUG
-/**
- * s3c_pm_dbg() - low level debug function for use in suspend/resume.
- * @msg: The message to print.
- *
- * This function is used mainly to debug the resume process before the system
- * can rely on printk/console output. It uses the low-level debugging output
- * routine printascii() to do its work.
- */
-extern void s3c_pm_dbg(const char *msg, ...);
-
-#define S3C_PMDBG(fmt...) s3c_pm_dbg(fmt)
-
-extern void s3c_pm_save_uarts(bool is_s3c24xx);
-extern void s3c_pm_restore_uarts(bool is_s3c24xx);
-
-#ifdef CONFIG_ARCH_S3C64XX
-extern void s3c_pm_arch_update_uart(void __iomem *regs,
- struct pm_uart_save *save);
-#else
-static inline void
-s3c_pm_arch_update_uart(void __iomem *regs, struct pm_uart_save *save)
-{
-}
-#endif
-
-#else
#define S3C_PMDBG(fmt...) pr_debug(fmt)
static inline void s3c_pm_save_uarts(bool is_s3c24xx) { }
static inline void s3c_pm_restore_uarts(bool is_s3c24xx) { }
-#endif
/* suspend memory checking */
@@ -81,14 +33,4 @@ extern void s3c_pm_check_store(void);
#define s3c_pm_check_store() do { } while (0)
#endif
-/* system device subsystems */
-
-extern struct bus_type s3c2410_subsys;
-extern struct bus_type s3c2410a_subsys;
-extern struct bus_type s3c2412_subsys;
-extern struct bus_type s3c2416_subsys;
-extern struct bus_type s3c2440_subsys;
-extern struct bus_type s3c2442_subsys;
-extern struct bus_type s3c2443_subsys;
-
#endif
diff --git a/include/linux/soc/sunxi/sunxi_sram.h b/include/linux/soc/sunxi/sunxi_sram.h
index c5f663bba9c2..60e274d1b821 100644
--- a/include/linux/soc/sunxi/sunxi_sram.h
+++ b/include/linux/soc/sunxi/sunxi_sram.h
@@ -14,6 +14,6 @@
#define _SUNXI_SRAM_H_
int sunxi_sram_claim(struct device *dev);
-int sunxi_sram_release(struct device *dev);
+void sunxi_sram_release(struct device *dev);
#endif /* _SUNXI_SRAM_H_ */
diff --git a/include/linux/soc/ti/knav_dma.h b/include/linux/soc/ti/knav_dma.h
index 7127ec301537..18d806a8e52c 100644
--- a/include/linux/soc/ti/knav_dma.h
+++ b/include/linux/soc/ti/knav_dma.h
@@ -1,17 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2014 Texas Instruments Incorporated
* Authors: Sandeep Nair <sandeep_n@ti.com
* Cyril Chemparathy <cyril@ti.com
Santosh Shilimkar <santosh.shilimkar@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__
diff --git a/include/linux/soc/ti/knav_qmss.h b/include/linux/soc/ti/knav_qmss.h
index c75ef99c99ca..175f466ebcc3 100644
--- a/include/linux/soc/ti/knav_qmss.h
+++ b/include/linux/soc/ti/knav_qmss.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Keystone Navigator Queue Management Sub-System header
*
@@ -5,15 +6,6 @@
* Author: Sandeep Nair <sandeep_n@ti.com>
* Cyril Chemparathy <cyril@ti.com>
* Santosh Shilimkar <santosh.shilimkar@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __SOC_TI_KNAV_QMSS_H__
diff --git a/include/linux/soc/ti/omap1-io.h b/include/linux/soc/ti/omap1-io.h
new file mode 100644
index 000000000000..9a60f45899d3
--- /dev/null
+++ b/include/linux/soc/ti/omap1-io.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_ARCH_OMAP_IO_H
+#define __ASM_ARCH_OMAP_IO_H
+
+#ifndef __ASSEMBLER__
+#include <linux/types.h>
+
+#ifdef CONFIG_ARCH_OMAP1
+/*
+ * NOTE: Please use ioremap + __raw_read/write where possible instead of these
+ */
+extern u8 omap_readb(u32 pa);
+extern u16 omap_readw(u32 pa);
+extern u32 omap_readl(u32 pa);
+extern void omap_writeb(u8 v, u32 pa);
+extern void omap_writew(u16 v, u32 pa);
+extern void omap_writel(u32 v, u32 pa);
+#elif defined(CONFIG_COMPILE_TEST)
+static inline u8 omap_readb(u32 pa) { return 0; }
+static inline u16 omap_readw(u32 pa) { return 0; }
+static inline u32 omap_readl(u32 pa) { return 0; }
+static inline void omap_writeb(u8 v, u32 pa) { }
+static inline void omap_writew(u16 v, u32 pa) { }
+static inline void omap_writel(u32 v, u32 pa) { }
+#endif
+#endif
+
+/*
+ * ----------------------------------------------------------------------------
+ * System control registers
+ * ----------------------------------------------------------------------------
+ */
+#define MOD_CONF_CTRL_0 0xfffe1080
+#define MOD_CONF_CTRL_1 0xfffe1110
+
+/*
+ * ---------------------------------------------------------------------------
+ * UPLD
+ * ---------------------------------------------------------------------------
+ */
+#define ULPD_REG_BASE (0xfffe0800)
+#define ULPD_IT_STATUS (ULPD_REG_BASE + 0x14)
+#define ULPD_SETUP_ANALOG_CELL_3 (ULPD_REG_BASE + 0x24)
+#define ULPD_CLOCK_CTRL (ULPD_REG_BASE + 0x30)
+# define DIS_USB_PVCI_CLK (1 << 5) /* no USB/FAC synch */
+# define USB_MCLK_EN (1 << 4) /* enable W4_USB_CLKO */
+#define ULPD_SOFT_REQ (ULPD_REG_BASE + 0x34)
+# define SOFT_UDC_REQ (1 << 4)
+# define SOFT_USB_CLK_REQ (1 << 3)
+# define SOFT_DPLL_REQ (1 << 0)
+#define ULPD_DPLL_CTRL (ULPD_REG_BASE + 0x3c)
+#define ULPD_STATUS_REQ (ULPD_REG_BASE + 0x40)
+#define ULPD_APLL_CTRL (ULPD_REG_BASE + 0x4c)
+#define ULPD_POWER_CTRL (ULPD_REG_BASE + 0x50)
+#define ULPD_SOFT_DISABLE_REQ_REG (ULPD_REG_BASE + 0x68)
+# define DIS_MMC2_DPLL_REQ (1 << 11)
+# define DIS_MMC1_DPLL_REQ (1 << 10)
+# define DIS_UART3_DPLL_REQ (1 << 9)
+# define DIS_UART2_DPLL_REQ (1 << 8)
+# define DIS_UART1_DPLL_REQ (1 << 7)
+# define DIS_USB_HOST_DPLL_REQ (1 << 6)
+#define ULPD_SDW_CLK_DIV_CTRL_SEL (ULPD_REG_BASE + 0x74)
+#define ULPD_CAM_CLK_CTRL (ULPD_REG_BASE + 0x7c)
+
+/*
+ * ----------------------------------------------------------------------------
+ * Clocks
+ * ----------------------------------------------------------------------------
+ */
+#define CLKGEN_REG_BASE (0xfffece00)
+#define ARM_CKCTL (CLKGEN_REG_BASE + 0x0)
+#define ARM_IDLECT1 (CLKGEN_REG_BASE + 0x4)
+#define ARM_IDLECT2 (CLKGEN_REG_BASE + 0x8)
+#define ARM_EWUPCT (CLKGEN_REG_BASE + 0xC)
+#define ARM_RSTCT1 (CLKGEN_REG_BASE + 0x10)
+#define ARM_RSTCT2 (CLKGEN_REG_BASE + 0x14)
+#define ARM_SYSST (CLKGEN_REG_BASE + 0x18)
+#define ARM_IDLECT3 (CLKGEN_REG_BASE + 0x24)
+
+#define CK_RATEF 1
+#define CK_IDLEF 2
+#define CK_ENABLEF 4
+#define CK_SELECTF 8
+#define SETARM_IDLE_SHIFT
+
+/* DPLL control registers */
+#define DPLL_CTL (0xfffecf00)
+
+/* DSP clock control. Must use __raw_readw() and __raw_writew() with these */
+#define DSP_CONFIG_REG_BASE IOMEM(0xe1008000)
+#define DSP_CKCTL (DSP_CONFIG_REG_BASE + 0x0)
+#define DSP_IDLECT1 (DSP_CONFIG_REG_BASE + 0x4)
+#define DSP_IDLECT2 (DSP_CONFIG_REG_BASE + 0x8)
+#define DSP_RSTCT2 (DSP_CONFIG_REG_BASE + 0x14)
+
+/*
+ * ----------------------------------------------------------------------------
+ * Pulse-Width Light
+ * ----------------------------------------------------------------------------
+ */
+#define OMAP_PWL_BASE 0xfffb5800
+#define OMAP_PWL_ENABLE (OMAP_PWL_BASE + 0x00)
+#define OMAP_PWL_CLK_ENABLE (OMAP_PWL_BASE + 0x04)
+
+/*
+ * ----------------------------------------------------------------------------
+ * Pin multiplexing registers
+ * ----------------------------------------------------------------------------
+ */
+#define FUNC_MUX_CTRL_0 0xfffe1000
+#define FUNC_MUX_CTRL_1 0xfffe1004
+#define FUNC_MUX_CTRL_2 0xfffe1008
+#define COMP_MODE_CTRL_0 0xfffe100c
+#define FUNC_MUX_CTRL_3 0xfffe1010
+#define FUNC_MUX_CTRL_4 0xfffe1014
+#define FUNC_MUX_CTRL_5 0xfffe1018
+#define FUNC_MUX_CTRL_6 0xfffe101C
+#define FUNC_MUX_CTRL_7 0xfffe1020
+#define FUNC_MUX_CTRL_8 0xfffe1024
+#define FUNC_MUX_CTRL_9 0xfffe1028
+#define FUNC_MUX_CTRL_A 0xfffe102C
+#define FUNC_MUX_CTRL_B 0xfffe1030
+#define FUNC_MUX_CTRL_C 0xfffe1034
+#define FUNC_MUX_CTRL_D 0xfffe1038
+#define PULL_DWN_CTRL_0 0xfffe1040
+#define PULL_DWN_CTRL_1 0xfffe1044
+#define PULL_DWN_CTRL_2 0xfffe1048
+#define PULL_DWN_CTRL_3 0xfffe104c
+#define PULL_DWN_CTRL_4 0xfffe10ac
+
+/* OMAP-1610 specific multiplexing registers */
+#define FUNC_MUX_CTRL_E 0xfffe1090
+#define FUNC_MUX_CTRL_F 0xfffe1094
+#define FUNC_MUX_CTRL_10 0xfffe1098
+#define FUNC_MUX_CTRL_11 0xfffe109c
+#define FUNC_MUX_CTRL_12 0xfffe10a0
+#define PU_PD_SEL_0 0xfffe10b4
+#define PU_PD_SEL_1 0xfffe10b8
+#define PU_PD_SEL_2 0xfffe10bc
+#define PU_PD_SEL_3 0xfffe10c0
+#define PU_PD_SEL_4 0xfffe10c4
+
+#endif
diff --git a/include/linux/soc/ti/omap1-mux.h b/include/linux/soc/ti/omap1-mux.h
new file mode 100644
index 000000000000..59c239b5569c
--- /dev/null
+++ b/include/linux/soc/ti/omap1-mux.h
@@ -0,0 +1,311 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef __SOC_TI_OMAP1_MUX_H
+#define __SOC_TI_OMAP1_MUX_H
+/*
+ * This should not really be a global header, it reflects the
+ * traditional way that omap1 does pin muxing without the
+ * pinctrl subsystem.
+ */
+
+enum omap7xx_index {
+ /* OMAP 730 keyboard */
+ E2_7XX_KBR0,
+ J7_7XX_KBR1,
+ E1_7XX_KBR2,
+ F3_7XX_KBR3,
+ D2_7XX_KBR4,
+ C2_7XX_KBC0,
+ D3_7XX_KBC1,
+ E4_7XX_KBC2,
+ F4_7XX_KBC3,
+ E3_7XX_KBC4,
+
+ /* USB */
+ AA17_7XX_USB_DM,
+ W16_7XX_USB_PU_EN,
+ W17_7XX_USB_VBUSI,
+ W18_7XX_USB_DMCK_OUT,
+ W19_7XX_USB_DCRST,
+
+ /* MMC */
+ MMC_7XX_CMD,
+ MMC_7XX_CLK,
+ MMC_7XX_DAT0,
+
+ /* I2C */
+ I2C_7XX_SCL,
+ I2C_7XX_SDA,
+
+ /* SPI */
+ SPI_7XX_1,
+ SPI_7XX_2,
+ SPI_7XX_3,
+ SPI_7XX_4,
+ SPI_7XX_5,
+ SPI_7XX_6,
+
+ /* UART */
+ UART_7XX_1,
+ UART_7XX_2,
+};
+
+enum omap1xxx_index {
+ /* UART1 (BT_UART_GATING)*/
+ UART1_TX = 0,
+ UART1_RTS,
+
+ /* UART2 (COM_UART_GATING)*/
+ UART2_TX,
+ UART2_RX,
+ UART2_CTS,
+ UART2_RTS,
+
+ /* UART3 (GIGA_UART_GATING) */
+ UART3_TX,
+ UART3_RX,
+ UART3_CTS,
+ UART3_RTS,
+ UART3_CLKREQ,
+ UART3_BCLK, /* 12MHz clock out */
+ Y15_1610_UART3_RTS,
+
+ /* PWT & PWL */
+ PWT,
+ PWL,
+
+ /* USB master generic */
+ R18_USB_VBUS,
+ R18_1510_USB_GPIO0,
+ W4_USB_PUEN,
+ W4_USB_CLKO,
+ W4_USB_HIGHZ,
+ W4_GPIO58,
+
+ /* USB1 master */
+ USB1_SUSP,
+ USB1_SEO,
+ W13_1610_USB1_SE0,
+ USB1_TXEN,
+ USB1_TXD,
+ USB1_VP,
+ USB1_VM,
+ USB1_RCV,
+ USB1_SPEED,
+ R13_1610_USB1_SPEED,
+ R13_1710_USB1_SE0,
+
+ /* USB2 master */
+ USB2_SUSP,
+ USB2_VP,
+ USB2_TXEN,
+ USB2_VM,
+ USB2_RCV,
+ USB2_SEO,
+ USB2_TXD,
+
+ /* OMAP-1510 GPIO */
+ R18_1510_GPIO0,
+ R19_1510_GPIO1,
+ M14_1510_GPIO2,
+
+ /* OMAP1610 GPIO */
+ P18_1610_GPIO3,
+ Y15_1610_GPIO17,
+
+ /* OMAP-1710 GPIO */
+ R18_1710_GPIO0,
+ V2_1710_GPIO10,
+ N21_1710_GPIO14,
+ W15_1710_GPIO40,
+
+ /* MPUIO */
+ MPUIO2,
+ N15_1610_MPUIO2,
+ MPUIO4,
+ MPUIO5,
+ T20_1610_MPUIO5,
+ W11_1610_MPUIO6,
+ V10_1610_MPUIO7,
+ W11_1610_MPUIO9,
+ V10_1610_MPUIO10,
+ W10_1610_MPUIO11,
+ E20_1610_MPUIO13,
+ U20_1610_MPUIO14,
+ E19_1610_MPUIO15,
+
+ /* MCBSP2 */
+ MCBSP2_CLKR,
+ MCBSP2_CLKX,
+ MCBSP2_DR,
+ MCBSP2_DX,
+ MCBSP2_FSR,
+ MCBSP2_FSX,
+
+ /* MCBSP3 */
+ MCBSP3_CLKX,
+
+ /* Misc ballouts */
+ BALLOUT_V8_ARMIO3,
+ N20_HDQ,
+
+ /* OMAP-1610 MMC2 */
+ W8_1610_MMC2_DAT0,
+ V8_1610_MMC2_DAT1,
+ W15_1610_MMC2_DAT2,
+ R10_1610_MMC2_DAT3,
+ Y10_1610_MMC2_CLK,
+ Y8_1610_MMC2_CMD,
+ V9_1610_MMC2_CMDDIR,
+ V5_1610_MMC2_DATDIR0,
+ W19_1610_MMC2_DATDIR1,
+ R18_1610_MMC2_CLKIN,
+
+ /* OMAP-1610 External Trace Interface */
+ M19_1610_ETM_PSTAT0,
+ L15_1610_ETM_PSTAT1,
+ L18_1610_ETM_PSTAT2,
+ L19_1610_ETM_D0,
+ J19_1610_ETM_D6,
+ J18_1610_ETM_D7,
+
+ /* OMAP16XX GPIO */
+ P20_1610_GPIO4,
+ V9_1610_GPIO7,
+ W8_1610_GPIO9,
+ N20_1610_GPIO11,
+ N19_1610_GPIO13,
+ P10_1610_GPIO22,
+ V5_1610_GPIO24,
+ AA20_1610_GPIO_41,
+ W19_1610_GPIO48,
+ M7_1610_GPIO62,
+ V14_16XX_GPIO37,
+ R9_16XX_GPIO18,
+ L14_16XX_GPIO49,
+
+ /* OMAP-1610 uWire */
+ V19_1610_UWIRE_SCLK,
+ U18_1610_UWIRE_SDI,
+ W21_1610_UWIRE_SDO,
+ N14_1610_UWIRE_CS0,
+ P15_1610_UWIRE_CS3,
+ N15_1610_UWIRE_CS1,
+
+ /* OMAP-1610 SPI */
+ U19_1610_SPIF_SCK,
+ U18_1610_SPIF_DIN,
+ P20_1610_SPIF_DIN,
+ W21_1610_SPIF_DOUT,
+ R18_1610_SPIF_DOUT,
+ N14_1610_SPIF_CS0,
+ N15_1610_SPIF_CS1,
+ T19_1610_SPIF_CS2,
+ P15_1610_SPIF_CS3,
+
+ /* OMAP-1610 Flash */
+ L3_1610_FLASH_CS2B_OE,
+ M8_1610_FLASH_CS2B_WE,
+
+ /* First MMC */
+ MMC_CMD,
+ MMC_DAT1,
+ MMC_DAT2,
+ MMC_DAT0,
+ MMC_CLK,
+ MMC_DAT3,
+
+ /* OMAP-1710 MMC CMDDIR and DATDIR0 */
+ M15_1710_MMC_CLKI,
+ P19_1710_MMC_CMDDIR,
+ P20_1710_MMC_DATDIR0,
+
+ /* OMAP-1610 USB0 alternate pin configuration */
+ W9_USB0_TXEN,
+ AA9_USB0_VP,
+ Y5_USB0_RCV,
+ R9_USB0_VM,
+ V6_USB0_TXD,
+ W5_USB0_SE0,
+ V9_USB0_SPEED,
+ V9_USB0_SUSP,
+
+ /* USB2 */
+ W9_USB2_TXEN,
+ AA9_USB2_VP,
+ Y5_USB2_RCV,
+ R9_USB2_VM,
+ V6_USB2_TXD,
+ W5_USB2_SE0,
+
+ /* 16XX UART */
+ R13_1610_UART1_TX,
+ V14_16XX_UART1_RX,
+ R14_1610_UART1_CTS,
+ AA15_1610_UART1_RTS,
+ R9_16XX_UART2_RX,
+ L14_16XX_UART3_RX,
+
+ /* I2C OMAP-1610 */
+ I2C_SCL,
+ I2C_SDA,
+
+ /* Keypad */
+ F18_1610_KBC0,
+ D20_1610_KBC1,
+ D19_1610_KBC2,
+ E18_1610_KBC3,
+ C21_1610_KBC4,
+ G18_1610_KBR0,
+ F19_1610_KBR1,
+ H14_1610_KBR2,
+ E20_1610_KBR3,
+ E19_1610_KBR4,
+ N19_1610_KBR5,
+
+ /* Power management */
+ T20_1610_LOW_PWR,
+
+ /* MCLK Settings */
+ V5_1710_MCLK_ON,
+ V5_1710_MCLK_OFF,
+ R10_1610_MCLK_ON,
+ R10_1610_MCLK_OFF,
+
+ /* CompactFlash controller */
+ P11_1610_CF_CD2,
+ R11_1610_CF_IOIS16,
+ V10_1610_CF_IREQ,
+ W10_1610_CF_RESET,
+ W11_1610_CF_CD1,
+
+ /* parallel camera */
+ J15_1610_CAM_LCLK,
+ J18_1610_CAM_D7,
+ J19_1610_CAM_D6,
+ J14_1610_CAM_D5,
+ K18_1610_CAM_D4,
+ K19_1610_CAM_D3,
+ K15_1610_CAM_D2,
+ K14_1610_CAM_D1,
+ L19_1610_CAM_D0,
+ L18_1610_CAM_VS,
+ L15_1610_CAM_HS,
+ M19_1610_CAM_RSTZ,
+ Y15_1610_CAM_OUTCLK,
+
+ /* serial camera */
+ H19_1610_CAM_EXCLK,
+ Y12_1610_CCP_CLKP,
+ W13_1610_CCP_CLKM,
+ W14_1610_CCP_DATAP,
+ Y14_1610_CCP_DATAM,
+
+};
+
+#ifdef CONFIG_OMAP_MUX
+extern int omap_cfg_reg(unsigned long reg_cfg);
+#else
+static inline int omap_cfg_reg(unsigned long reg_cfg) { return 0; }
+#endif
+
+#endif
diff --git a/include/linux/soc/ti/omap1-soc.h b/include/linux/soc/ti/omap1-soc.h
new file mode 100644
index 000000000000..a42d9aa68648
--- /dev/null
+++ b/include/linux/soc/ti/omap1-soc.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * OMAP cpu type detection
+ *
+ * Copyright (C) 2004, 2008 Nokia Corporation
+ *
+ * Copyright (C) 2009-11 Texas Instruments.
+ *
+ * Written by Tony Lindgren <tony.lindgren@nokia.com>
+ *
+ * Added OMAP4/5 specific defines - Santosh Shilimkar<santosh.shilimkar@ti.com>
+ */
+
+#ifndef __ASM_ARCH_OMAP_CPU_H
+#define __ASM_ARCH_OMAP_CPU_H
+
+/*
+ * Test if multicore OMAP support is needed
+ */
+#undef MULTI_OMAP1
+#undef OMAP_NAME
+
+#ifdef CONFIG_ARCH_OMAP15XX
+# ifdef OMAP_NAME
+# undef MULTI_OMAP1
+# define MULTI_OMAP1
+# else
+# define OMAP_NAME omap1510
+# endif
+#endif
+#ifdef CONFIG_ARCH_OMAP16XX
+# ifdef OMAP_NAME
+# undef MULTI_OMAP1
+# define MULTI_OMAP1
+# else
+# define OMAP_NAME omap16xx
+# endif
+#endif
+
+/*
+ * omap_rev bits:
+ * CPU id bits (0730, 1510, 1710, 2422...) [31:16]
+ * CPU revision (See _REV_ defined in cpu.h) [15:08]
+ * CPU class bits (15xx, 16xx, 24xx, 34xx...) [07:00]
+ */
+unsigned int omap_rev(void);
+
+/*
+ * Get the CPU revision for OMAP devices
+ */
+#define GET_OMAP_REVISION() ((omap_rev() >> 8) & 0xff)
+
+/*
+ * Macros to group OMAP into cpu classes.
+ * These can be used in most places.
+ * cpu_is_omap15xx(): True for OMAP1510, OMAP5910 and OMAP310
+ * cpu_is_omap16xx(): True for OMAP1610, OMAP5912 and OMAP1710
+ */
+#define GET_OMAP_CLASS (omap_rev() & 0xff)
+
+#define IS_OMAP_CLASS(class, id) \
+static inline int is_omap ##class (void) \
+{ \
+ return (GET_OMAP_CLASS == (id)) ? 1 : 0; \
+}
+
+#define GET_OMAP_SUBCLASS ((omap_rev() >> 20) & 0x0fff)
+
+#define IS_OMAP_SUBCLASS(subclass, id) \
+static inline int is_omap ##subclass (void) \
+{ \
+ return (GET_OMAP_SUBCLASS == (id)) ? 1 : 0; \
+}
+
+IS_OMAP_CLASS(15xx, 0x15)
+IS_OMAP_CLASS(16xx, 0x16)
+
+#define cpu_is_omap15xx() 0
+#define cpu_is_omap16xx() 0
+
+#if defined(MULTI_OMAP1)
+# if defined(CONFIG_ARCH_OMAP15XX)
+# undef cpu_is_omap15xx
+# define cpu_is_omap15xx() is_omap15xx()
+# endif
+# if defined(CONFIG_ARCH_OMAP16XX)
+# undef cpu_is_omap16xx
+# define cpu_is_omap16xx() is_omap16xx()
+# endif
+#else
+# if defined(CONFIG_ARCH_OMAP15XX)
+# undef cpu_is_omap15xx
+# define cpu_is_omap15xx() 1
+# endif
+# if defined(CONFIG_ARCH_OMAP16XX)
+# undef cpu_is_omap16xx
+# define cpu_is_omap16xx() 1
+# endif
+#endif
+
+/*
+ * Macros to detect individual cpu types.
+ * These are only rarely needed.
+ * cpu_is_omap310(): True for OMAP310
+ * cpu_is_omap1510(): True for OMAP1510
+ * cpu_is_omap1610(): True for OMAP1610
+ * cpu_is_omap1611(): True for OMAP1611
+ * cpu_is_omap5912(): True for OMAP5912
+ * cpu_is_omap1621(): True for OMAP1621
+ * cpu_is_omap1710(): True for OMAP1710
+ */
+#define GET_OMAP_TYPE ((omap_rev() >> 16) & 0xffff)
+
+#define IS_OMAP_TYPE(type, id) \
+static inline int is_omap ##type (void) \
+{ \
+ return (GET_OMAP_TYPE == (id)) ? 1 : 0; \
+}
+
+IS_OMAP_TYPE(310, 0x0310)
+IS_OMAP_TYPE(1510, 0x1510)
+IS_OMAP_TYPE(1610, 0x1610)
+IS_OMAP_TYPE(1611, 0x1611)
+IS_OMAP_TYPE(5912, 0x1611)
+IS_OMAP_TYPE(1621, 0x1621)
+IS_OMAP_TYPE(1710, 0x1710)
+
+#define cpu_is_omap310() 0
+#define cpu_is_omap1510() 0
+#define cpu_is_omap1610() 0
+#define cpu_is_omap5912() 0
+#define cpu_is_omap1611() 0
+#define cpu_is_omap1621() 0
+#define cpu_is_omap1710() 0
+
+#define cpu_class_is_omap1() 1
+
+/*
+ * Whether we have MULTI_OMAP1 or not, we still need to distinguish
+ * between 310 vs. 1510 and 1611B/5912 vs. 1710.
+ */
+
+#if defined(CONFIG_ARCH_OMAP15XX)
+# undef cpu_is_omap310
+# undef cpu_is_omap1510
+# define cpu_is_omap310() is_omap310()
+# define cpu_is_omap1510() is_omap1510()
+#endif
+
+#if defined(CONFIG_ARCH_OMAP16XX)
+# undef cpu_is_omap1610
+# undef cpu_is_omap1611
+# undef cpu_is_omap5912
+# undef cpu_is_omap1621
+# undef cpu_is_omap1710
+# define cpu_is_omap1610() is_omap1610()
+# define cpu_is_omap1611() is_omap1611()
+# define cpu_is_omap5912() is_omap5912()
+# define cpu_is_omap1621() is_omap1621()
+# define cpu_is_omap1710() is_omap1710()
+#endif
+
+#endif
diff --git a/include/linux/soc/ti/omap1-usb.h b/include/linux/soc/ti/omap1-usb.h
new file mode 100644
index 000000000000..67488698601a
--- /dev/null
+++ b/include/linux/soc/ti/omap1-usb.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __SOC_TI_OMAP1_USB
+#define __SOC_TI_OMAP1_USB
+/*
+ * Constants in this file are used all over the place, in platform
+ * code, as well as the udc, phy and ohci drivers.
+ * This is not a great design, but unlikely to get fixed after
+ * such a long time. Don't do this elsewhere.
+ */
+
+#define OMAP1_OTG_BASE 0xfffb0400
+#define OMAP1_UDC_BASE 0xfffb4000
+
+#define OMAP2_UDC_BASE 0x4805e200
+#define OMAP2_OTG_BASE 0x4805e300
+#define OTG_BASE OMAP1_OTG_BASE
+#define UDC_BASE OMAP1_UDC_BASE
+
+/*
+ * OTG and transceiver registers, for OMAPs starting with ARM926
+ */
+#define OTG_REV (OTG_BASE + 0x00)
+#define OTG_SYSCON_1 (OTG_BASE + 0x04)
+# define USB2_TRX_MODE(w) (((w)>>24)&0x07)
+# define USB1_TRX_MODE(w) (((w)>>20)&0x07)
+# define USB0_TRX_MODE(w) (((w)>>16)&0x07)
+# define OTG_IDLE_EN (1 << 15)
+# define HST_IDLE_EN (1 << 14)
+# define DEV_IDLE_EN (1 << 13)
+# define OTG_RESET_DONE (1 << 2)
+# define OTG_SOFT_RESET (1 << 1)
+#define OTG_SYSCON_2 (OTG_BASE + 0x08)
+# define OTG_EN (1 << 31)
+# define USBX_SYNCHRO (1 << 30)
+# define OTG_MST16 (1 << 29)
+# define SRP_GPDATA (1 << 28)
+# define SRP_GPDVBUS (1 << 27)
+# define SRP_GPUVBUS(w) (((w)>>24)&0x07)
+# define A_WAIT_VRISE(w) (((w)>>20)&0x07)
+# define B_ASE_BRST(w) (((w)>>16)&0x07)
+# define SRP_DPW (1 << 14)
+# define SRP_DATA (1 << 13)
+# define SRP_VBUS (1 << 12)
+# define OTG_PADEN (1 << 10)
+# define HMC_PADEN (1 << 9)
+# define UHOST_EN (1 << 8)
+# define HMC_TLLSPEED (1 << 7)
+# define HMC_TLLATTACH (1 << 6)
+# define OTG_HMC(w) (((w)>>0)&0x3f)
+#define OTG_CTRL (OTG_BASE + 0x0c)
+# define OTG_USB2_EN (1 << 29)
+# define OTG_USB2_DP (1 << 28)
+# define OTG_USB2_DM (1 << 27)
+# define OTG_USB1_EN (1 << 26)
+# define OTG_USB1_DP (1 << 25)
+# define OTG_USB1_DM (1 << 24)
+# define OTG_USB0_EN (1 << 23)
+# define OTG_USB0_DP (1 << 22)
+# define OTG_USB0_DM (1 << 21)
+# define OTG_ASESSVLD (1 << 20)
+# define OTG_BSESSEND (1 << 19)
+# define OTG_BSESSVLD (1 << 18)
+# define OTG_VBUSVLD (1 << 17)
+# define OTG_ID (1 << 16)
+# define OTG_DRIVER_SEL (1 << 15)
+# define OTG_A_SETB_HNPEN (1 << 12)
+# define OTG_A_BUSREQ (1 << 11)
+# define OTG_B_HNPEN (1 << 9)
+# define OTG_B_BUSREQ (1 << 8)
+# define OTG_BUSDROP (1 << 7)
+# define OTG_PULLDOWN (1 << 5)
+# define OTG_PULLUP (1 << 4)
+# define OTG_DRV_VBUS (1 << 3)
+# define OTG_PD_VBUS (1 << 2)
+# define OTG_PU_VBUS (1 << 1)
+# define OTG_PU_ID (1 << 0)
+#define OTG_IRQ_EN (OTG_BASE + 0x10) /* 16-bit */
+# define DRIVER_SWITCH (1 << 15)
+# define A_VBUS_ERR (1 << 13)
+# define A_REQ_TMROUT (1 << 12)
+# define A_SRP_DETECT (1 << 11)
+# define B_HNP_FAIL (1 << 10)
+# define B_SRP_TMROUT (1 << 9)
+# define B_SRP_DONE (1 << 8)
+# define B_SRP_STARTED (1 << 7)
+# define OPRT_CHG (1 << 0)
+#define OTG_IRQ_SRC (OTG_BASE + 0x14) /* 16-bit */
+ // same bits as in IRQ_EN
+#define OTG_OUTCTRL (OTG_BASE + 0x18) /* 16-bit */
+# define OTGVPD (1 << 14)
+# define OTGVPU (1 << 13)
+# define OTGPUID (1 << 12)
+# define USB2VDR (1 << 10)
+# define USB2PDEN (1 << 9)
+# define USB2PUEN (1 << 8)
+# define USB1VDR (1 << 6)
+# define USB1PDEN (1 << 5)
+# define USB1PUEN (1 << 4)
+# define USB0VDR (1 << 2)
+# define USB0PDEN (1 << 1)
+# define USB0PUEN (1 << 0)
+#define OTG_TEST (OTG_BASE + 0x20) /* 16-bit */
+#define OTG_VENDOR_CODE (OTG_BASE + 0xfc) /* 16-bit */
+
+/*-------------------------------------------------------------------------*/
+
+/* OMAP1 */
+#define USB_TRANSCEIVER_CTRL (0xfffe1000 + 0x0064)
+# define CONF_USB2_UNI_R (1 << 8)
+# define CONF_USB1_UNI_R (1 << 7)
+# define CONF_USB_PORT0_R(x) (((x)>>4)&0x7)
+# define CONF_USB0_ISOLATE_R (1 << 3)
+# define CONF_USB_PWRDN_DM_R (1 << 2)
+# define CONF_USB_PWRDN_DP_R (1 << 1)
+
+#endif
diff --git a/include/linux/soc/ti/ti-msgmgr.h b/include/linux/soc/ti/ti-msgmgr.h
index 1f6e76d423cf..543da257a5f2 100644
--- a/include/linux/soc/ti/ti-msgmgr.h
+++ b/include/linux/soc/ti/ti-msgmgr.h
@@ -1,26 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Texas Instruments' Message Manager
*
- * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
+ * Copyright (C) 2015-2022 Texas Instruments Incorporated - https://www.ti.com/
* Nishanth Menon
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef TI_MSGMGR_H
#define TI_MSGMGR_H
+struct mbox_chan;
+
/**
* struct ti_msgmgr_message - Message Manager structure
* @len: Length of data in the Buffer
* @buf: Buffer pointer
+ * @chan_rx: Expected channel for response, must be provided to use polled rx
+ * @timeout_rx_ms: Timeout value to use if polling for response
*
* This is the structure for data used in mbox_send_message
* the length of data buffer used depends on the SoC integration
@@ -30,6 +26,8 @@
struct ti_msgmgr_message {
size_t len;
u8 *buf;
+ struct mbox_chan *chan_rx;
+ int timeout_rx_ms;
};
#endif /* TI_MSGMGR_H */
diff --git a/include/linux/soc/ti/ti_sci_inta_msi.h b/include/linux/soc/ti/ti_sci_inta_msi.h
index e3aa8b14612e..4dba2f2aff6f 100644
--- a/include/linux/soc/ti/ti_sci_inta_msi.h
+++ b/include/linux/soc/ti/ti_sci_inta_msi.h
@@ -18,6 +18,4 @@ struct irq_domain
struct irq_domain *parent);
int ti_sci_inta_msi_domain_alloc_irqs(struct device *dev,
struct ti_sci_resource *res);
-unsigned int ti_sci_inta_msi_get_virq(struct device *dev, u32 index);
-void ti_sci_inta_msi_domain_free_irqs(struct device *dev);
#endif /* __INCLUDE_LINUX_IRQCHIP_TI_SCI_INTA_H */
diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h
index 0aad7009b50e..fd104b666836 100644
--- a/include/linux/soc/ti/ti_sci_protocol.h
+++ b/include/linux/soc/ti/ti_sci_protocol.h
@@ -195,6 +195,35 @@ struct ti_sci_clk_ops {
u64 *current_freq);
};
+/* TISCI LPM IO isolation control values */
+#define TISCI_MSG_VALUE_IO_ENABLE 1
+#define TISCI_MSG_VALUE_IO_DISABLE 0
+
+/* TISCI LPM constraint state values */
+#define TISCI_MSG_CONSTRAINT_SET 1
+#define TISCI_MSG_CONSTRAINT_CLR 0
+
+/**
+ * struct ti_sci_pm_ops - Low Power Mode (LPM) control operations
+ * @lpm_wake_reason: Get the wake up source that woke the SoC from LPM
+ * - source: The wake up source that woke soc from LPM.
+ * - timestamp: Timestamp at which soc woke.
+ * @set_device_constraint: Set LPM constraint on behalf of a device
+ * - id: Device Identifier
+ * - state: The desired state of device constraint: set or clear.
+ * @set_latency_constraint: Set LPM resume latency constraint
+ * - latency: maximum acceptable latency to wake up from low power mode
+ * - state: The desired state of latency constraint: set or clear.
+ */
+struct ti_sci_pm_ops {
+ int (*lpm_wake_reason)(const struct ti_sci_handle *handle,
+ u32 *source, u64 *timestamp, u8 *pin, u8 *mode);
+ int (*set_device_constraint)(const struct ti_sci_handle *handle,
+ u32 id, u8 state);
+ int (*set_latency_constraint)(const struct ti_sci_handle *handle,
+ u16 latency, u8 state);
+};
+
/**
* struct ti_sci_resource_desc - Description of TI SCI resource instance range.
* @start: Start index of the first resource range.
@@ -539,6 +568,7 @@ struct ti_sci_ops {
struct ti_sci_core_ops core_ops;
struct ti_sci_dev_ops dev_ops;
struct ti_sci_clk_ops clk_ops;
+ struct ti_sci_pm_ops pm_ops;
struct ti_sci_rm_core_ops rm_core_ops;
struct ti_sci_rm_irq_ops rm_irq_ops;
struct ti_sci_rm_ringacc_ops rm_ring_ops;
@@ -645,7 +675,7 @@ devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
static inline struct ti_sci_resource *
devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev,
- u32 dev_id, u32 sub_type);
+ u32 dev_id, u32 sub_type)
{
return ERR_PTR(-EINVAL);
}
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
index 0b9ecd8cf979..110978dc9af1 100644
--- a/include/linux/sock_diag.h
+++ b/include/linux/sock_diag.h
@@ -13,6 +13,7 @@ struct nlmsghdr;
struct sock;
struct sock_diag_handler {
+ struct module *owner;
__u8 family;
int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
int (*get_info)(struct sk_buff *skb, struct sock *sk);
@@ -22,8 +23,13 @@ struct sock_diag_handler {
int sock_diag_register(const struct sock_diag_handler *h);
void sock_diag_unregister(const struct sock_diag_handler *h);
-void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
-void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
+struct sock_diag_inet_compat {
+ struct module *owner;
+ int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh);
+};
+
+void sock_diag_register_inet_compat(const struct sock_diag_inet_compat *ptr);
+void sock_diag_unregister_inet_compat(const struct sock_diag_inet_compat *ptr);
u64 __sock_gen_cookie(struct sock *sk);
diff --git a/include/linux/socket.h b/include/linux/socket.h
index b8fc5c53ba6f..ec715ad4bf25 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -14,6 +14,9 @@ struct file;
struct pid;
struct cred;
struct socket;
+struct sock;
+struct sk_buff;
+struct proto_accept_arg;
#define __sockaddr_check_size(size) \
BUILD_BUG_ON(((size) > sizeof(struct __kernel_sockaddr_storage)))
@@ -29,11 +32,29 @@ typedef __kernel_sa_family_t sa_family_t;
* 1003.1g requires sa_family_t and that sa_data is char.
*/
+/* Deprecated for in-kernel use. Use struct sockaddr_unsized instead. */
struct sockaddr {
sa_family_t sa_family; /* address family, AF_xxx */
char sa_data[14]; /* 14 bytes of protocol address */
};
+/**
+ * struct sockaddr_unsized - Unspecified size sockaddr for callbacks
+ * @sa_family: Address family (AF_UNIX, AF_INET, AF_INET6, etc.)
+ * @sa_data: Flexible array for address data
+ *
+ * This structure is designed for callback interfaces where the
+ * total size is known via the sockaddr_len parameter. Unlike struct
+ * sockaddr which has a fixed 14-byte sa_data limit or struct
+ * sockaddr_storage which has a fixed 128-byte sa_data limit, this
+ * structure can accommodate addresses of any size, but must be used
+ * carefully.
+ */
+struct sockaddr_unsized {
+ __kernel_sa_family_t sa_family; /* address family, AF_xxx */
+ char sa_data[]; /* flexible address data */
+};
+
struct linger {
int l_onoff; /* Linger active */
int l_linger; /* How long to linger for */
@@ -50,6 +71,9 @@ struct linger {
struct msghdr {
void *msg_name; /* ptr to socket address structure */
int msg_namelen; /* size of socket address structure */
+
+ int msg_inq; /* output, data left in socket */
+
struct iov_iter msg_iter; /* data */
/*
@@ -62,9 +86,13 @@ struct msghdr {
void __user *msg_control_user;
};
bool msg_control_is_user : 1;
- __kernel_size_t msg_controllen; /* ancillary data buffer length */
+ bool msg_get_inq : 1;/* return INQ after receive */
unsigned int msg_flags; /* flags on received message */
+ __kernel_size_t msg_controllen; /* ancillary data buffer length */
struct kiocb *msg_iocb; /* ptr to iocb for async requests */
+ struct ubuf_info *msg_ubuf;
+ int (*sg_from_iter)(struct sk_buff *skb,
+ struct iov_iter *from, size_t length);
};
struct user_msghdr {
@@ -155,7 +183,7 @@ static inline struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr
return __cmsg_nxthdr(__msg->msg_control, __msg->msg_controllen, __cmsg);
}
-static inline size_t msg_data_left(struct msghdr *msg)
+static inline size_t msg_data_left(const struct msghdr *msg)
{
return iov_iter_count(&msg->msg_iter);
}
@@ -165,6 +193,7 @@ static inline size_t msg_data_left(struct msghdr *msg)
#define SCM_RIGHTS 0x01 /* rw: access rights (array of int) */
#define SCM_CREDENTIALS 0x02 /* rw: struct ucred */
#define SCM_SECURITY 0x03 /* rw: security label */
+#define SCM_PIDFD 0x04 /* ro: pidfd (int) */
struct ucred {
__u32 pid;
@@ -223,8 +252,11 @@ struct ucred {
* reuses AF_INET address family
*/
#define AF_XDP 44 /* XDP sockets */
+#define AF_MCTP 45 /* Management component
+ * transport protocol
+ */
-#define AF_MAX 45 /* For now.. */
+#define AF_MAX 46 /* For now.. */
/* Protocol families, same as address families. */
#define PF_UNSPEC AF_UNSPEC
@@ -274,6 +306,7 @@ struct ucred {
#define PF_QIPCRTR AF_QIPCRTR
#define PF_SMC AF_SMC
#define PF_XDP AF_XDP
+#define PF_MCTP AF_MCTP
#define PF_MAX AF_MAX
/* Maximum queue length specifiable by listen. */
@@ -302,7 +335,6 @@ struct ucred {
#define MSG_MORE 0x8000 /* Sender will send more */
#define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */
#define MSG_SENDPAGE_NOPOLICY 0x10000 /* sendpage() internal : do no apply policy */
-#define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */
#define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */
#define MSG_EOF MSG_FIN
#define MSG_NO_SHARED_FRAGS 0x80000 /* sendpage() internal : page frags are not shared */
@@ -310,7 +342,9 @@ struct ucred {
* plain text and require encryption
*/
+#define MSG_SOCK_DEVMEM 0x2000000 /* Receive devmem skbs as cmsg */
#define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */
+#define MSG_SPLICE_PAGES 0x8000000 /* Splice the pages from the iterator in sendmsg() */
#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */
#define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exec for file
descriptor received through
@@ -321,6 +355,9 @@ struct ucred {
#define MSG_CMSG_COMPAT 0 /* We never have 32 bit fixups */
#endif
+/* Flags to be cleared on entry by sendmsg and sendmmsg syscalls */
+#define MSG_INTERNAL_SENDMSG_FLAGS \
+ (MSG_SPLICE_PAGES | MSG_SENDPAGE_NOPOLICY | MSG_SENDPAGE_DECRYPTED)
/* Setsockoptions(2) level. Thanks to BSD these must match IPPROTO_xxx */
#define SOL_IP 0
@@ -360,12 +397,18 @@ struct ucred {
#define SOL_KCM 281
#define SOL_TLS 282
#define SOL_XDP 283
+#define SOL_MPTCP 284
+#define SOL_MCTP 285
+#define SOL_SMC 286
+#define SOL_VSOCK 287
/* IPX options */
#define IPX_TYPE 1
extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
+extern int put_cmsg_notrunc(struct msghdr *msg, int level, int type, int len,
+ void *data);
struct timespec64;
struct __kernel_timespec;
@@ -398,17 +441,9 @@ extern long __sys_recvmsg_sock(struct socket *sock, struct msghdr *msg,
struct user_msghdr __user *umsg,
struct sockaddr __user *uaddr,
unsigned int flags);
-extern int sendmsg_copy_msghdr(struct msghdr *msg,
- struct user_msghdr __user *umsg, unsigned flags,
- struct iovec **iov);
-extern int recvmsg_copy_msghdr(struct msghdr *msg,
- struct user_msghdr __user *umsg, unsigned flags,
- struct sockaddr __user **uaddr,
- struct iovec **iov);
-extern int __copy_msghdr_from_user(struct msghdr *kmsg,
- struct user_msghdr __user *umsg,
- struct sockaddr __user **save_addr,
- struct iovec __user **uiov, size_t *nsegs);
+extern int __copy_msghdr(struct msghdr *kmsg,
+ struct user_msghdr *umsg,
+ struct sockaddr __user **save_addr);
/* helpers which do the actual work for syscalls */
extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size,
@@ -417,27 +452,28 @@ extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size,
extern int __sys_sendto(int fd, void __user *buff, size_t len,
unsigned int flags, struct sockaddr __user *addr,
int addr_len);
-extern int __sys_accept4_file(struct file *file, unsigned file_flags,
- struct sockaddr __user *upeer_sockaddr,
- int __user *upeer_addrlen, int flags,
- unsigned long nofile);
+extern struct file *do_accept(struct file *file, struct proto_accept_arg *arg,
+ struct sockaddr __user *upeer_sockaddr,
+ int __user *upeer_addrlen, int flags);
extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
int __user *upeer_addrlen, int flags);
extern int __sys_socket(int family, int type, int protocol);
+extern struct file *__sys_socket_file(int family, int type, int protocol);
extern int __sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen);
+extern int __sys_bind_socket(struct socket *sock, struct sockaddr_storage *address,
+ int addrlen);
extern int __sys_connect_file(struct file *file, struct sockaddr_storage *addr,
int addrlen, int file_flags);
extern int __sys_connect(int fd, struct sockaddr __user *uservaddr,
int addrlen);
extern int __sys_listen(int fd, int backlog);
+extern int __sys_listen_socket(struct socket *sock, int backlog);
+extern int do_getsockname(struct socket *sock, int peer,
+ struct sockaddr __user *usockaddr, int __user *usockaddr_len);
extern int __sys_getsockname(int fd, struct sockaddr __user *usockaddr,
- int __user *usockaddr_len);
-extern int __sys_getpeername(int fd, struct sockaddr __user *usockaddr,
- int __user *usockaddr_len);
+ int __user *usockaddr_len, int peer);
extern int __sys_socketpair(int family, int type, int protocol,
int __user *usockvec);
extern int __sys_shutdown_sock(struct socket *sock, int how);
extern int __sys_shutdown(int fd, int how);
-
-extern struct ns_common *get_net_ns(struct ns_common *ns);
#endif /* _LINUX_SOCKET_H */
diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h
index ea193414298b..3e6c8e9d67ae 100644
--- a/include/linux/sockptr.h
+++ b/include/linux/sockptr.h
@@ -50,11 +50,63 @@ static inline int copy_from_sockptr_offset(void *dst, sockptr_t src,
return 0;
}
+/* Deprecated.
+ * This is unsafe, unless caller checked user provided optlen.
+ * Prefer copy_safe_from_sockptr() instead.
+ *
+ * Returns 0 for success, or number of bytes not copied on error.
+ */
static inline int copy_from_sockptr(void *dst, sockptr_t src, size_t size)
{
return copy_from_sockptr_offset(dst, src, 0, size);
}
+/**
+ * copy_safe_from_sockptr: copy a struct from sockptr
+ * @dst: Destination address, in kernel space. This buffer must be @ksize
+ * bytes long.
+ * @ksize: Size of @dst struct.
+ * @optval: Source address. (in user or kernel space)
+ * @optlen: Size of @optval data.
+ *
+ * Returns:
+ * * -EINVAL: @optlen < @ksize
+ * * -EFAULT: access to userspace failed.
+ * * 0 : @ksize bytes were copied
+ */
+static inline int copy_safe_from_sockptr(void *dst, size_t ksize,
+ sockptr_t optval, unsigned int optlen)
+{
+ if (optlen < ksize)
+ return -EINVAL;
+ if (copy_from_sockptr(dst, optval, ksize))
+ return -EFAULT;
+ return 0;
+}
+
+static inline int copy_struct_from_sockptr(void *dst, size_t ksize,
+ sockptr_t src, size_t usize)
+{
+ size_t size = min(ksize, usize);
+ size_t rest = max(ksize, usize) - size;
+
+ if (!sockptr_is_kernel(src))
+ return copy_struct_from_user(dst, ksize, src.user, size);
+
+ if (usize < ksize) {
+ memset(dst + size, 0, rest);
+ } else if (usize > ksize) {
+ char *p = src.kernel;
+
+ while (rest--) {
+ if (*p++)
+ return -E2BIG;
+ }
+ }
+ memcpy(dst, src.kernel, size);
+ return 0;
+}
+
static inline int copy_to_sockptr_offset(sockptr_t dst, size_t offset,
const void *src, size_t size)
{
@@ -64,9 +116,14 @@ static inline int copy_to_sockptr_offset(sockptr_t dst, size_t offset,
return 0;
}
-static inline void *memdup_sockptr(sockptr_t src, size_t len)
+static inline int copy_to_sockptr(sockptr_t dst, const void *src, size_t size)
+{
+ return copy_to_sockptr_offset(dst, 0, src, size);
+}
+
+static inline void *memdup_sockptr_noprof(sockptr_t src, size_t len)
{
- void *p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
+ void *p = kmalloc_track_caller_noprof(len, GFP_USER | __GFP_NOWARN);
if (!p)
return ERR_PTR(-ENOMEM);
@@ -76,10 +133,11 @@ static inline void *memdup_sockptr(sockptr_t src, size_t len)
}
return p;
}
+#define memdup_sockptr(...) alloc_hooks(memdup_sockptr_noprof(__VA_ARGS__))
-static inline void *memdup_sockptr_nul(sockptr_t src, size_t len)
+static inline void *memdup_sockptr_nul_noprof(sockptr_t src, size_t len)
{
- char *p = kmalloc_track_caller(len + 1, GFP_KERNEL);
+ char *p = kmalloc_track_caller_noprof(len + 1, GFP_KERNEL);
if (!p)
return ERR_PTR(-ENOMEM);
@@ -90,6 +148,7 @@ static inline void *memdup_sockptr_nul(sockptr_t src, size_t len)
p[len] = '\0';
return p;
}
+#define memdup_sockptr_nul(...) alloc_hooks(memdup_sockptr_nul_noprof(__VA_ARGS__))
static inline long strncpy_from_sockptr(char *dst, sockptr_t src, size_t count)
{
@@ -102,4 +161,12 @@ static inline long strncpy_from_sockptr(char *dst, sockptr_t src, size_t count)
return strncpy_from_user(dst, src.user, count);
}
+static inline int check_zeroed_sockptr(sockptr_t src, size_t offset,
+ size_t size)
+{
+ if (!sockptr_is_kernel(src))
+ return check_zeroed_user(src.user + offset, size);
+ return memchr_inv(src.kernel + offset, 0, size) == NULL;
+}
+
#endif /* _LINUX_SOCKPTR_H */
diff --git a/include/linux/softirq.h b/include/linux/softirq.h
new file mode 100644
index 000000000000..c73d7dcb4cb5
--- /dev/null
+++ b/include/linux/softirq.h
@@ -0,0 +1 @@
+#include <linux/interrupt.h>
diff --git a/include/linux/sony-laptop.h b/include/linux/sony-laptop.h
deleted file mode 100644
index 1e3c92feea6e..000000000000
--- a/include/linux/sony-laptop.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _SONYLAPTOP_H_
-#define _SONYLAPTOP_H_
-
-#include <linux/types.h>
-
-#ifdef __KERNEL__
-
-/* used only for communication between v4l and sony-laptop */
-
-#define SONY_PIC_COMMAND_GETCAMERA 1 /* obsolete */
-#define SONY_PIC_COMMAND_SETCAMERA 2
-#define SONY_PIC_COMMAND_GETCAMERABRIGHTNESS 3 /* obsolete */
-#define SONY_PIC_COMMAND_SETCAMERABRIGHTNESS 4
-#define SONY_PIC_COMMAND_GETCAMERACONTRAST 5 /* obsolete */
-#define SONY_PIC_COMMAND_SETCAMERACONTRAST 6
-#define SONY_PIC_COMMAND_GETCAMERAHUE 7 /* obsolete */
-#define SONY_PIC_COMMAND_SETCAMERAHUE 8
-#define SONY_PIC_COMMAND_GETCAMERACOLOR 9 /* obsolete */
-#define SONY_PIC_COMMAND_SETCAMERACOLOR 10
-#define SONY_PIC_COMMAND_GETCAMERASHARPNESS 11 /* obsolete */
-#define SONY_PIC_COMMAND_SETCAMERASHARPNESS 12
-#define SONY_PIC_COMMAND_GETCAMERAPICTURE 13 /* obsolete */
-#define SONY_PIC_COMMAND_SETCAMERAPICTURE 14
-#define SONY_PIC_COMMAND_GETCAMERAAGC 15 /* obsolete */
-#define SONY_PIC_COMMAND_SETCAMERAAGC 16
-#define SONY_PIC_COMMAND_GETCAMERADIRECTION 17 /* obsolete */
-#define SONY_PIC_COMMAND_GETCAMERAROMVERSION 18 /* obsolete */
-#define SONY_PIC_COMMAND_GETCAMERAREVISION 19 /* obsolete */
-
-#if IS_ENABLED(CONFIG_SONY_LAPTOP)
-int sony_pic_camera_command(int command, u8 value);
-#else
-static inline int sony_pic_camera_command(int command, u8 value) { return 0; }
-#endif
-
-#endif /* __KERNEL__ */
-
-#endif /* _SONYLAPTOP_H_ */
diff --git a/include/linux/sort.h b/include/linux/sort.h
index b5898725fe9d..c01ef804a0eb 100644
--- a/include/linux/sort.h
+++ b/include/linux/sort.h
@@ -4,13 +4,34 @@
#include <linux/types.h>
+/**
+ * cmp_int - perform a three-way comparison of the arguments
+ * @l: the left argument
+ * @r: the right argument
+ *
+ * Return: 1 if the left argument is greater than the right one; 0 if the
+ * arguments are equal; -1 if the left argument is less than the right one.
+ */
+#define cmp_int(l, r) (((l) > (r)) - ((l) < (r)))
+
void sort_r(void *base, size_t num, size_t size,
cmp_r_func_t cmp_func,
- swap_func_t swap_func,
+ swap_r_func_t swap_func,
const void *priv);
void sort(void *base, size_t num, size_t size,
cmp_func_t cmp_func,
swap_func_t swap_func);
+/* Versions that periodically call cond_resched(): */
+
+void sort_r_nonatomic(void *base, size_t num, size_t size,
+ cmp_r_func_t cmp_func,
+ swap_r_func_t swap_func,
+ const void *priv);
+
+void sort_nonatomic(void *base, size_t num, size_t size,
+ cmp_func_t cmp_func,
+ swap_func_t swap_func);
+
#endif
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index ced07f8fde87..e6a3476bcef1 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -4,8 +4,22 @@
#ifndef __SOUNDWIRE_H
#define __SOUNDWIRE_H
-#include <linux/mod_devicetable.h>
#include <linux/bitfield.h>
+#include <linux/bug.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/lockdep_types.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <sound/sdca.h>
+
+struct dentry;
+struct fwnode_handle;
+struct device_node;
struct sdw_bus;
struct sdw_slave;
@@ -38,10 +52,13 @@ struct sdw_slave;
#define SDW_FRAME_CTRL_BITS 48
#define SDW_MAX_DEVICES 11
+#define SDW_FW_MAX_DEVICES 16
#define SDW_MAX_PORTS 15
#define SDW_VALID_PORT_RANGE(n) ((n) < SDW_MAX_PORTS && (n) >= 1)
+#define SDW_MAX_LANES 8
+
enum {
SDW_PORT_DIRN_SINK = 0,
SDW_PORT_DIRN_SOURCE,
@@ -136,12 +153,14 @@ enum sdw_dpn_pkg_mode {
*
* @SDW_STREAM_PCM: PCM data stream
* @SDW_STREAM_PDM: PDM data stream
+ * @SDW_STREAM_BPT: BPT data stream
*
* spec doesn't define this, but is used in implementation
*/
enum sdw_stream_type {
SDW_STREAM_PCM = 0,
SDW_STREAM_PDM = 1,
+ SDW_STREAM_BPT = 2,
};
/**
@@ -222,62 +241,36 @@ enum sdw_clk_stop_mode {
/**
* struct sdw_dp0_prop - DP0 properties
+ * @words: wordlengths supported
* @max_word: Maximum number of bits in a Payload Channel Sample, 1 to 64
* (inclusive)
* @min_word: Minimum number of bits in a Payload Channel Sample, 1 to 64
* (inclusive)
* @num_words: number of wordlengths supported
- * @words: wordlengths supported
+ * @ch_prep_timeout: Port-specific timeout value, in milliseconds
* @BRA_flow_controlled: Slave implementation results in an OK_NotReady
* response
* @simple_ch_prep_sm: If channel prepare sequence is required
* @imp_def_interrupts: If set, each bit corresponds to support for
* implementation-defined interrupts
+ * @num_lanes: array size of @lane_list
+ * @lane_list: indicates which Lanes can be used by DP0
*
* The wordlengths are specified by Spec as max, min AND number of
* discrete values, implementation can define based on the wordlengths they
* support
*/
struct sdw_dp0_prop {
+ u32 *words;
u32 max_word;
u32 min_word;
u32 num_words;
- u32 *words;
+ u32 ch_prep_timeout;
bool BRA_flow_controlled;
bool simple_ch_prep_sm;
bool imp_def_interrupts;
-};
-
-/**
- * struct sdw_dpn_audio_mode - Audio mode properties for DPn
- * @bus_min_freq: Minimum bus frequency, in Hz
- * @bus_max_freq: Maximum bus frequency, in Hz
- * @bus_num_freq: Number of discrete frequencies supported
- * @bus_freq: Discrete bus frequencies, in Hz
- * @min_freq: Minimum sampling frequency, in Hz
- * @max_freq: Maximum sampling bus frequency, in Hz
- * @num_freq: Number of discrete sampling frequency supported
- * @freq: Discrete sampling frequencies, in Hz
- * @prep_ch_behave: Specifies the dependencies between Channel Prepare
- * sequence and bus clock configuration
- * If 0, Channel Prepare can happen at any Bus clock rate
- * If 1, Channel Prepare sequence shall happen only after Bus clock is
- * changed to a frequency supported by this mode or compatible modes
- * described by the next field
- * @glitchless: Bitmap describing possible glitchless transitions from this
- * Audio Mode to other Audio Modes
- */
-struct sdw_dpn_audio_mode {
- u32 bus_min_freq;
- u32 bus_max_freq;
- u32 bus_num_freq;
- u32 *bus_freq;
- u32 max_freq;
- u32 min_freq;
- u32 num_freq;
- u32 *freq;
- u32 prep_ch_behave;
- u32 glitchless;
+ int num_lanes;
+ u32 *lane_list;
};
/**
@@ -292,24 +285,25 @@ struct sdw_dpn_audio_mode {
* @type: Data port type. Full, Simplified or Reduced
* @max_grouping: Maximum number of samples that can be grouped together for
* a full data port
- * @simple_ch_prep_sm: If the port supports simplified channel prepare state
- * machine
* @ch_prep_timeout: Port-specific timeout value, in milliseconds
* @imp_def_interrupts: If set, each bit corresponds to support for
* implementation-defined interrupts
* @max_ch: Maximum channels supported
* @min_ch: Minimum channels supported
* @num_channels: Number of discrete channels supported
- * @channels: Discrete channels supported
* @num_ch_combinations: Number of channel combinations supported
+ * @channels: Discrete channels supported
* @ch_combinations: Channel combinations supported
+ * @lane_list: indicates which Lanes can be used by DPn
+ * @num_lanes: array size of @lane_list
* @modes: SDW mode supported
* @max_async_buffer: Number of samples that this port can buffer in
* asynchronous modes
+ * @port_encoding: Payload Channel Sample encoding schemes supported
* @block_pack_mode: Type of block port mode supported
* @read_only_wordlength: Read Only wordlength field in DPN_BlockCtrl1 register
- * @port_encoding: Payload Channel Sample encoding schemes supported
- * @audio_modes: Audio modes supported
+ * @simple_ch_prep_sm: If the port supports simplified channel prepare state
+ * machine
*/
struct sdw_dpn_prop {
u32 num;
@@ -319,25 +313,29 @@ struct sdw_dpn_prop {
u32 *words;
enum sdw_dpn_type type;
u32 max_grouping;
- bool simple_ch_prep_sm;
u32 ch_prep_timeout;
u32 imp_def_interrupts;
u32 max_ch;
u32 min_ch;
u32 num_channels;
- u32 *channels;
u32 num_ch_combinations;
+ u32 *channels;
u32 *ch_combinations;
+ u32 *lane_list;
+ int num_lanes;
u32 modes;
u32 max_async_buffer;
+ u32 port_encoding;
bool block_pack_mode;
bool read_only_wordlength;
- u32 port_encoding;
- struct sdw_dpn_audio_mode *audio_modes;
+ bool simple_ch_prep_sm;
};
/**
* struct sdw_slave_prop - SoundWire Slave properties
+ * @dp0_prop: Data Port 0 properties
+ * @src_dpn_prop: Source Data Port N properties
+ * @sink_dpn_prop: Sink Data Port N properties
* @mipi_revision: Spec version of the implementation
* @wake_capable: Wake-up events are supported
* @test_mode_capable: If test mode is supported
@@ -354,20 +352,27 @@ struct sdw_dpn_prop {
* SCP_AddrPage2
* @bank_delay_support: Slave implements bank delay/bridge support registers
* SCP_BankDelay and SCP_NextFrame
+ * @lane_control_support: Slave supports lane control
* @p15_behave: Slave behavior when the Master attempts a read to the Port15
* alias
- * @lane_control_support: Slave supports lane control
* @master_count: Number of Masters present on this Slave
* @source_ports: Bitmap identifying source ports
* @sink_ports: Bitmap identifying sink ports
- * @dp0_prop: Data Port 0 properties
- * @src_dpn_prop: Source Data Port N properties
- * @sink_dpn_prop: Sink Data Port N properties
- * @scp_int1_mask: SCP_INT1_MASK desired settings
* @quirks: bitmask identifying deltas from the MIPI specification
- * @is_sdca: the Slave supports the SDCA specification
+ * @sdca_interrupt_register_list: indicates which sets of SDCA interrupt status
+ * and masks are supported
+ * @commit_register_supported: is PCP_Commit register supported
+ * @scp_int1_mask: SCP_INT1_MASK desired settings
+ * @lane_maps: Lane mapping for the slave, only valid if lane_control_support is set
+ * @clock_reg_supported: the Peripheral implements the clock base and scale
+ * registers introduced with the SoundWire 1.2 specification. SDCA devices
+ * do not need to set this boolean property as the registers are required.
+ * @use_domain_irq: call actual IRQ handler on slave, as well as callback
*/
struct sdw_slave_prop {
+ struct sdw_dp0_prop *dp0_prop;
+ struct sdw_dpn_prop *src_dpn_prop;
+ struct sdw_dpn_prop *sink_dpn_prop;
u32 mipi_revision;
bool wake_capable;
bool test_mode_capable;
@@ -379,30 +384,32 @@ struct sdw_slave_prop {
bool high_PHY_capable;
bool paging_support;
bool bank_delay_support;
- enum sdw_p15_behave p15_behave;
bool lane_control_support;
+ enum sdw_p15_behave p15_behave;
u32 master_count;
u32 source_ports;
u32 sink_ports;
- struct sdw_dp0_prop *dp0_prop;
- struct sdw_dpn_prop *src_dpn_prop;
- struct sdw_dpn_prop *sink_dpn_prop;
- u8 scp_int1_mask;
u32 quirks;
- bool is_sdca;
+ u32 sdca_interrupt_register_list;
+ u8 commit_register_supported;
+ u8 scp_int1_mask;
+ u8 lane_maps[SDW_MAX_LANES];
+ bool clock_reg_supported;
+ bool use_domain_irq;
};
#define SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY BIT(0)
/**
* struct sdw_master_prop - Master properties
+ * @clk_gears: Clock gears supported
+ * @clk_freq: Clock frequencies supported, in Hz
+ * @quirks: bitmask identifying optional behavior beyond the scope of the MIPI specification
* @revision: MIPI spec version of the implementation
* @clk_stop_modes: Bitmap, bit N set when clock-stop-modeN supported
* @max_clk_freq: Maximum Bus clock frequency, in Hz
* @num_clk_gears: Number of clock gears supported
- * @clk_gears: Clock gears supported
* @num_clk_freq: Number of clock frequencies supported, in Hz
- * @clk_freq: Clock frequencies supported, in Hz
* @default_frame_rate: Controller default Frame rate, in Hz
* @default_row: Number of rows
* @default_col: Number of columns
@@ -411,24 +418,23 @@ struct sdw_slave_prop {
* command
* @mclk_freq: clock reference passed to SoundWire Master, in Hz.
* @hw_disabled: if true, the Master is not functional, typically due to pin-mux
- * @quirks: bitmask identifying optional behavior beyond the scope of the MIPI specification
*/
struct sdw_master_prop {
+ u32 *clk_gears;
+ u32 *clk_freq;
+ u64 quirks;
u32 revision;
u32 clk_stop_modes;
u32 max_clk_freq;
u32 num_clk_gears;
- u32 *clk_gears;
u32 num_clk_freq;
- u32 *clk_freq;
u32 default_frame_rate;
u32 default_row;
u32 default_col;
- bool dynamic_frame;
u32 err_threshold;
u32 mclk_freq;
+ bool dynamic_frame;
bool hw_disabled;
- u64 quirks;
};
/* Definitions for Master quirks */
@@ -453,6 +459,7 @@ struct sdw_master_prop {
int sdw_master_read_prop(struct sdw_bus *bus);
int sdw_slave_read_prop(struct sdw_slave *slave);
+int sdw_slave_read_lane_mapping(struct sdw_slave *slave);
/*
* SDW Slave Structures and APIs
@@ -478,6 +485,11 @@ struct sdw_slave_id {
__u8 sdw_version:4;
};
+struct sdw_peripherals {
+ int num_peripherals;
+ struct sdw_slave *array[];
+};
+
/*
* Helper macros to extract the MIPI-defined IDs
*
@@ -530,21 +542,6 @@ enum sdw_reg_bank {
};
/**
- * struct sdw_bus_conf: Bus configuration
- *
- * @clk_freq: Clock frequency, in Hz
- * @num_rows: Number of rows in frame
- * @num_cols: Number of columns in frame
- * @bank: Next register bank
- */
-struct sdw_bus_conf {
- unsigned int clk_freq;
- unsigned int num_rows;
- unsigned int num_cols;
- unsigned int bank;
-};
-
-/**
* struct sdw_prepare_ch: Prepare/De-prepare Data Port channel
*
* @num: Port number
@@ -566,13 +563,15 @@ struct sdw_prepare_ch {
* enum sdw_port_prep_ops: Prepare operations for Data Port
*
* @SDW_OPS_PORT_PRE_PREP: Pre prepare operation for the Port
- * @SDW_OPS_PORT_PREP: Prepare operation for the Port
+ * @SDW_OPS_PORT_PRE_DEPREP: Pre deprepare operation for the Port
* @SDW_OPS_PORT_POST_PREP: Post prepare operation for the Port
+ * @SDW_OPS_PORT_POST_DEPREP: Post deprepare operation for the Port
*/
enum sdw_port_prep_ops {
SDW_OPS_PORT_PRE_PREP = 0,
- SDW_OPS_PORT_PREP = 1,
- SDW_OPS_PORT_POST_PREP = 2,
+ SDW_OPS_PORT_PRE_DEPREP,
+ SDW_OPS_PORT_POST_PREP,
+ SDW_OPS_PORT_POST_DEPREP,
};
/**
@@ -612,6 +611,7 @@ struct sdw_bus_params {
* @update_status: Update Slave status
* @bus_config: Update the bus config for Slave
* @port_prep: Prepare the port with parameters
+ * @clk_stop: handle imp-def sequences before and after prepare and de-prepare
*/
struct sdw_slave_ops {
int (*read_prop)(struct sdw_slave *sdw);
@@ -624,20 +624,19 @@ struct sdw_slave_ops {
int (*port_prep)(struct sdw_slave *slave,
struct sdw_prepare_ch *prepare_ch,
enum sdw_port_prep_ops pre_ops);
- int (*get_clk_stop_mode)(struct sdw_slave *slave);
int (*clk_stop)(struct sdw_slave *slave,
enum sdw_clk_stop_mode mode,
enum sdw_clk_stop_type type);
-
};
/**
* struct sdw_slave - SoundWire Slave
* @id: MIPI device ID
* @dev: Linux device
+ * @index: internal ID for this slave
+ * @irq: IRQ number
* @status: Status reported by the Slave
* @bus: Bus handle
- * @ops: Slave callback ops
* @prop: Slave properties
* @debugfs: Slave debugfs
* @node: node for bus list
@@ -646,9 +645,6 @@ struct sdw_slave_ops {
* @dev_num: Current Device Number, values can be 0 or dev_num_sticky
* @dev_num_sticky: one-time static Device Number assigned by Bus
* @probed: boolean tracking driver state
- * @probe_complete: completion utility to control potential races
- * on startup between driver probe/initialization and SoundWire
- * Slave state changes/implementation-defined interrupts
* @enumeration_complete: completion utility to control potential races
* on startup between device enumeration and read/write access to the
* Slave device
@@ -661,13 +657,18 @@ struct sdw_slave_ops {
* initialized
* @first_interrupt_done: status flag tracking if the interrupt handling
* for a Slave happens for the first time after enumeration
+ * @is_mockup_device: status flag used to squelch errors in the command/control
+ * protocol for SoundWire mockup devices
+ * @sdw_dev_lock: mutex used to protect callbacks/remove races
+ * @sdca_data: structure containing all device data for SDCA helpers
*/
struct sdw_slave {
struct sdw_slave_id id;
struct device dev;
+ int index;
+ int irq;
enum sdw_slave_status status;
struct sdw_bus *bus;
- const struct sdw_slave_ops *ops;
struct sdw_slave_prop prop;
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs;
@@ -675,15 +676,16 @@ struct sdw_slave {
struct list_head node;
struct completion port_ready[SDW_MAX_PORTS];
unsigned int m_port_map[SDW_MAX_PORTS];
- enum sdw_clk_stop_mode curr_clk_stop_mode;
u16 dev_num;
u16 dev_num_sticky;
bool probed;
- struct completion probe_complete;
struct completion enumeration_complete;
struct completion initialization_complete;
u32 unattach_request;
bool first_interrupt_done;
+ bool is_mockup_device;
+ struct mutex sdw_dev_lock; /* protect callbacks/remove races */
+ struct sdca_device_data sdca_data;
};
#define dev_to_sdw_dev(_dev) container_of(_dev, struct sdw_slave, dev)
@@ -702,10 +704,7 @@ struct sdw_master_device {
container_of(d, struct sdw_master_device, dev)
struct sdw_driver {
- const char *name;
-
- int (*probe)(struct sdw_slave *sdw,
- const struct sdw_device_id *id);
+ int (*probe)(struct sdw_slave *sdw, const struct sdw_device_id *id);
int (*remove)(struct sdw_slave *sdw);
void (*shutdown)(struct sdw_slave *sdw);
@@ -724,7 +723,7 @@ struct sdw_driver {
SDW_SLAVE_ENTRY_EXT((_mfg_id), (_part_id), 0, 0, (_drv_data))
int sdw_handle_slave_status(struct sdw_bus *bus,
- enum sdw_slave_status status[]);
+ enum sdw_slave_status status[]);
/*
* SDW master structures and APIs
@@ -806,121 +805,85 @@ struct sdw_enable_ch {
*/
struct sdw_master_port_ops {
int (*dpn_set_port_params)(struct sdw_bus *bus,
- struct sdw_port_params *port_params,
- unsigned int bank);
+ struct sdw_port_params *port_params,
+ unsigned int bank);
int (*dpn_set_port_transport_params)(struct sdw_bus *bus,
- struct sdw_transport_params *transport_params,
- enum sdw_reg_bank bank);
- int (*dpn_port_prep)(struct sdw_bus *bus,
- struct sdw_prepare_ch *prepare_ch);
+ struct sdw_transport_params *transport_params,
+ enum sdw_reg_bank bank);
+ int (*dpn_port_prep)(struct sdw_bus *bus, struct sdw_prepare_ch *prepare_ch);
int (*dpn_port_enable_ch)(struct sdw_bus *bus,
- struct sdw_enable_ch *enable_ch, unsigned int bank);
+ struct sdw_enable_ch *enable_ch, unsigned int bank);
};
struct sdw_msg;
/**
- * struct sdw_defer - SDW deffered message
- * @length: message length
+ * struct sdw_defer - SDW deferred message
* @complete: message completion
* @msg: SDW message
+ * @length: message length
*/
struct sdw_defer {
+ struct sdw_msg *msg;
int length;
struct completion complete;
- struct sdw_msg *msg;
};
+/*
+ * Add a practical limit to BPT transfer sizes. BPT is typically used
+ * to transfer firmware, and larger firmware transfers will increase
+ * the cold latency beyond typical OS or user requirements.
+ */
+#define SDW_BPT_MSG_MAX_BYTES (1024 * 1024)
+
+struct sdw_bpt_msg;
+
/**
* struct sdw_master_ops - Master driver ops
* @read_prop: Read Master properties
* @override_adr: Override value read from firmware (quirk for buggy firmware)
* @xfer_msg: Transfer message callback
- * @xfer_msg_defer: Defer version of transfer message callback
- * @reset_page_addr: Reset the SCP page address registers
+ * @xfer_msg_defer: Defer version of transfer message callback. The message is handled with the
+ * bus struct @sdw_defer
* @set_bus_conf: Set the bus configuration
* @pre_bank_switch: Callback for pre bank switch
* @post_bank_switch: Callback for post bank switch
+ * @read_ping_status: Read status from PING frames, reported with two bits per Device.
+ * Bits 31:24 are reserved.
+ * @get_device_num: Callback for vendor-specific device_number allocation
+ * @put_device_num: Callback for vendor-specific device_number release
+ * @new_peripheral_assigned: Callback to handle enumeration of new peripheral.
+ * @bpt_send_async: reserve resources for BPT stream and send message
+ * using BTP protocol
+ * @bpt_wait: wait for message completion using BTP protocol
+ * and release resources
*/
struct sdw_master_ops {
int (*read_prop)(struct sdw_bus *bus);
- u64 (*override_adr)
- (struct sdw_bus *bus, u64 addr);
- enum sdw_command_response (*xfer_msg)
- (struct sdw_bus *bus, struct sdw_msg *msg);
- enum sdw_command_response (*xfer_msg_defer)
- (struct sdw_bus *bus, struct sdw_msg *msg,
- struct sdw_defer *defer);
- enum sdw_command_response (*reset_page_addr)
- (struct sdw_bus *bus, unsigned int dev_num);
+ u64 (*override_adr)(struct sdw_bus *bus, u64 addr);
+ enum sdw_command_response (*xfer_msg)(struct sdw_bus *bus, struct sdw_msg *msg);
+ enum sdw_command_response (*xfer_msg_defer)(struct sdw_bus *bus);
int (*set_bus_conf)(struct sdw_bus *bus,
- struct sdw_bus_params *params);
+ struct sdw_bus_params *params);
int (*pre_bank_switch)(struct sdw_bus *bus);
int (*post_bank_switch)(struct sdw_bus *bus);
-
-};
-
-/**
- * struct sdw_bus - SoundWire bus
- * @dev: Shortcut to &bus->md->dev to avoid changing the entire code.
- * @md: Master device
- * @link_id: Link id number, can be 0 to N, unique for each Master
- * @id: bus system-wide unique id
- * @slaves: list of Slaves on this bus
- * @assigned: Bitmap for Slave device numbers.
- * Bit set implies used number, bit clear implies unused number.
- * @bus_lock: bus lock
- * @msg_lock: message lock
- * @compute_params: points to Bus resource management implementation
- * @ops: Master callback ops
- * @port_ops: Master port callback ops
- * @params: Current bus parameters
- * @prop: Master properties
- * @m_rt_list: List of Master instance of all stream(s) running on Bus. This
- * is used to compute and program bus bandwidth, clock, frame shape,
- * transport and port parameters
- * @debugfs: Bus debugfs
- * @defer_msg: Defer message
- * @clk_stop_timeout: Clock stop timeout computed
- * @bank_switch_timeout: Bank switch timeout computed
- * @multi_link: Store bus property that indicates if multi links
- * are supported. This flag is populated by drivers after reading
- * appropriate firmware (ACPI/DT).
- * @hw_sync_min_links: Number of links used by a stream above which
- * hardware-based synchronization is required. This value is only
- * meaningful if multi_link is set. If set to 1, hardware-based
- * synchronization will be used even if a stream only uses a single
- * SoundWire segment.
- */
-struct sdw_bus {
- struct device *dev;
- struct sdw_master_device *md;
- unsigned int link_id;
- int id;
- struct list_head slaves;
- DECLARE_BITMAP(assigned, SDW_MAX_DEVICES);
- struct mutex bus_lock;
- struct mutex msg_lock;
- int (*compute_params)(struct sdw_bus *bus);
- const struct sdw_master_ops *ops;
- const struct sdw_master_port_ops *port_ops;
- struct sdw_bus_params params;
- struct sdw_master_prop prop;
- struct list_head m_rt_list;
-#ifdef CONFIG_DEBUG_FS
- struct dentry *debugfs;
-#endif
- struct sdw_defer defer_msg;
- unsigned int clk_stop_timeout;
- u32 bank_switch_timeout;
- bool multi_link;
- int hw_sync_min_links;
+ u32 (*read_ping_status)(struct sdw_bus *bus);
+ int (*get_device_num)(struct sdw_bus *bus, struct sdw_slave *slave);
+ void (*put_device_num)(struct sdw_bus *bus, struct sdw_slave *slave);
+ void (*new_peripheral_assigned)(struct sdw_bus *bus,
+ struct sdw_slave *slave,
+ int dev_num);
+ int (*bpt_send_async)(struct sdw_bus *bus, struct sdw_slave *slave,
+ struct sdw_bpt_msg *msg);
+ int (*bpt_wait)(struct sdw_bus *bus, struct sdw_slave *slave, struct sdw_bpt_msg *msg);
};
int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
struct fwnode_handle *fwnode);
void sdw_bus_master_delete(struct sdw_bus *bus);
+void sdw_show_ping_status(struct sdw_bus *bus, bool sync_delay);
+
/**
* sdw_port_config: Master or Slave Port configuration
*
@@ -939,7 +902,7 @@ struct sdw_port_config {
* @ch_count: Channel count of the stream
* @bps: Number of bits per audio sample
* @direction: Data direction
- * @type: Stream type PCM or PDM
+ * @type: Stream type PCM, PDM or BPT
*/
struct sdw_stream_config {
unsigned int frame_rate;
@@ -989,40 +952,113 @@ struct sdw_stream_params {
* @name: SoundWire stream name
* @params: Stream parameters
* @state: Current state of the stream
- * @type: Stream type PCM or PDM
+ * @type: Stream type PCM, PDM or BPT
+ * @m_rt_count: Count of Master runtime(s) in this stream
* @master_list: List of Master runtime(s) in this stream.
* master_list can contain only one m_rt per Master instance
* for a stream
- * @m_rt_count: Count of Master runtime(s) in this stream
*/
struct sdw_stream_runtime {
const char *name;
struct sdw_stream_params params;
enum sdw_stream_state state;
enum sdw_stream_type type;
- struct list_head master_list;
int m_rt_count;
+ struct list_head master_list;
};
-struct sdw_stream_runtime *sdw_alloc_stream(const char *stream_name);
+/**
+ * struct sdw_bus - SoundWire bus
+ * @dev: Shortcut to &bus->md->dev to avoid changing the entire code.
+ * @md: Master device
+ * @bus_lock_key: bus lock key associated to @bus_lock
+ * @bus_lock: bus lock
+ * @slave_ida: IDA for allocating internal slave IDs
+ * @slaves: list of Slaves on this bus
+ * @msg_lock_key: message lock key associated to @msg_lock
+ * @msg_lock: message lock
+ * @m_rt_list: List of Master instance of all stream(s) running on Bus. This
+ * is used to compute and program bus bandwidth, clock, frame shape,
+ * transport and port parameters
+ * @defer_msg: Defer message
+ * @params: Current bus parameters
+ * @stream_refcount: number of streams currently using this bus
+ * @btp_stream_refcount: number of BTP streams currently using this bus (should
+ * be zero or one, multiple streams per link is not supported).
+ * @bpt_stream: pointer stored to handle BTP streams.
+ * @ops: Master callback ops
+ * @port_ops: Master port callback ops
+ * @prop: Master properties
+ * @vendor_specific_prop: pointer to non-standard properties
+ * @hw_sync_min_links: Number of links used by a stream above which
+ * hardware-based synchronization is required. This value is only
+ * meaningful if multi_link is set. If set to 1, hardware-based
+ * synchronization will be used even if a stream only uses a single
+ * SoundWire segment.
+ * @controller_id: system-unique controller ID. If set to -1, the bus @id will be used.
+ * @link_id: Link id number, can be 0 to N, unique for each Controller
+ * @id: bus system-wide unique id
+ * @compute_params: points to Bus resource management implementation
+ * @assigned: Bitmap for Slave device numbers.
+ * Bit set implies used number, bit clear implies unused number.
+ * @clk_stop_timeout: Clock stop timeout computed
+ * @bank_switch_timeout: Bank switch timeout computed
+ * @domain: IRQ domain
+ * @irq_chip: IRQ chip
+ * @debugfs: Bus debugfs (optional)
+ * @multi_link: Store bus property that indicates if multi links
+ * are supported. This flag is populated by drivers after reading
+ * appropriate firmware (ACPI/DT).
+ * @lane_used_bandwidth: how much bandwidth in bits per second is used by each lane
+ */
+struct sdw_bus {
+ struct device *dev;
+ struct sdw_master_device *md;
+ struct lock_class_key bus_lock_key;
+ struct mutex bus_lock;
+ struct ida slave_ida;
+ struct list_head slaves;
+ struct lock_class_key msg_lock_key;
+ struct mutex msg_lock;
+ struct list_head m_rt_list;
+ struct sdw_defer defer_msg;
+ struct sdw_bus_params params;
+ int stream_refcount;
+ int bpt_stream_refcount;
+ struct sdw_stream_runtime *bpt_stream;
+ const struct sdw_master_ops *ops;
+ const struct sdw_master_port_ops *port_ops;
+ struct sdw_master_prop prop;
+ void *vendor_specific_prop;
+ int hw_sync_min_links;
+ int controller_id;
+ unsigned int link_id;
+ int id;
+ int (*compute_params)(struct sdw_bus *bus, struct sdw_stream_runtime *stream);
+ DECLARE_BITMAP(assigned, SDW_MAX_DEVICES);
+ unsigned int clk_stop_timeout;
+ u32 bank_switch_timeout;
+ struct irq_chip irq_chip;
+ struct irq_domain *domain;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs;
+#endif
+ bool multi_link;
+ unsigned int lane_used_bandwidth[SDW_MAX_LANES];
+};
+
+struct sdw_stream_runtime *sdw_alloc_stream(const char *stream_name, enum sdw_stream_type type);
void sdw_release_stream(struct sdw_stream_runtime *stream);
-int sdw_compute_params(struct sdw_bus *bus);
+int sdw_compute_params(struct sdw_bus *bus, struct sdw_stream_runtime *stream);
int sdw_stream_add_master(struct sdw_bus *bus,
- struct sdw_stream_config *stream_config,
- struct sdw_port_config *port_config,
- unsigned int num_ports,
- struct sdw_stream_runtime *stream);
-int sdw_stream_add_slave(struct sdw_slave *slave,
- struct sdw_stream_config *stream_config,
- struct sdw_port_config *port_config,
- unsigned int num_ports,
- struct sdw_stream_runtime *stream);
+ struct sdw_stream_config *stream_config,
+ const struct sdw_port_config *port_config,
+ unsigned int num_ports,
+ struct sdw_stream_runtime *stream);
int sdw_stream_remove_master(struct sdw_bus *bus,
- struct sdw_stream_runtime *stream);
-int sdw_stream_remove_slave(struct sdw_slave *slave,
- struct sdw_stream_runtime *stream);
+ struct sdw_stream_runtime *stream);
int sdw_startup_stream(void *sdw_substream);
int sdw_prepare_stream(struct sdw_stream_runtime *stream);
int sdw_enable_stream(struct sdw_stream_runtime *stream);
@@ -1033,15 +1069,134 @@ int sdw_bus_prep_clk_stop(struct sdw_bus *bus);
int sdw_bus_clk_stop(struct sdw_bus *bus);
int sdw_bus_exit_clk_stop(struct sdw_bus *bus);
-/* messaging and data APIs */
+int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id);
+void sdw_extract_slave_id(struct sdw_bus *bus, u64 addr, struct sdw_slave_id *id);
+bool is_clock_scaling_supported_by_slave(struct sdw_slave *slave);
+int sdw_bpt_send_async(struct sdw_bus *bus, struct sdw_slave *slave, struct sdw_bpt_msg *msg);
+int sdw_bpt_wait(struct sdw_bus *bus, struct sdw_slave *slave, struct sdw_bpt_msg *msg);
+int sdw_bpt_send_sync(struct sdw_bus *bus, struct sdw_slave *slave, struct sdw_bpt_msg *msg);
+
+#if IS_ENABLED(CONFIG_SOUNDWIRE)
+
+int sdw_stream_add_slave(struct sdw_slave *slave,
+ struct sdw_stream_config *stream_config,
+ const struct sdw_port_config *port_config,
+ unsigned int num_ports,
+ struct sdw_stream_runtime *stream);
+int sdw_stream_remove_slave(struct sdw_slave *slave,
+ struct sdw_stream_runtime *stream);
+
+struct device *of_sdw_find_device_by_node(struct device_node *np);
+
+int sdw_slave_get_current_bank(struct sdw_slave *sdev);
+
+int sdw_slave_get_scale_index(struct sdw_slave *slave, u8 *base);
+
+/* messaging and data APIs */
int sdw_read(struct sdw_slave *slave, u32 addr);
int sdw_write(struct sdw_slave *slave, u32 addr, u8 value);
int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value);
int sdw_read_no_pm(struct sdw_slave *slave, u32 addr);
int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val);
-int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, u8 *val);
-int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id);
-void sdw_extract_slave_id(struct sdw_bus *bus, u64 addr, struct sdw_slave_id *id);
+int sdw_nread_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val);
+int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val);
+int sdw_nwrite_no_pm(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val);
+int sdw_update(struct sdw_slave *slave, u32 addr, u8 mask, u8 val);
+int sdw_update_no_pm(struct sdw_slave *slave, u32 addr, u8 mask, u8 val);
+
+#else
+
+static inline int sdw_stream_add_slave(struct sdw_slave *slave,
+ struct sdw_stream_config *stream_config,
+ const struct sdw_port_config *port_config,
+ unsigned int num_ports,
+ struct sdw_stream_runtime *stream)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+static inline int sdw_stream_remove_slave(struct sdw_slave *slave,
+ struct sdw_stream_runtime *stream)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+static inline struct device *of_sdw_find_device_by_node(struct device_node *np)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return NULL;
+}
+
+static inline int sdw_slave_get_current_bank(struct sdw_slave *sdev)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+/* messaging and data APIs */
+static inline int sdw_read(struct sdw_slave *slave, u32 addr)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+static inline int sdw_write(struct sdw_slave *slave, u32 addr, u8 value)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+static inline int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+static inline int sdw_read_no_pm(struct sdw_slave *slave, u32 addr)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+static inline int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+static inline int sdw_nread_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+static inline int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+static inline int sdw_nwrite_no_pm(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+static inline int sdw_update(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+static inline int sdw_update_no_pm(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
+#endif /* CONFIG_SOUNDWIRE */
#endif /* __SOUNDWIRE_H */
diff --git a/include/linux/soundwire/sdw_amd.h b/include/linux/soundwire/sdw_amd.h
new file mode 100644
index 000000000000..fe31773d5210
--- /dev/null
+++ b/include/linux/soundwire/sdw_amd.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Copyright (C) 2023-24 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef __SDW_AMD_H
+#define __SDW_AMD_H
+
+#include <linux/acpi.h>
+#include <linux/soundwire/sdw.h>
+
+/* AMD pm_runtime quirk definitions */
+
+/*
+ * Force the clock to stop(ClockStopMode0) when suspend callback
+ * is invoked.
+ */
+#define AMD_SDW_CLK_STOP_MODE 1
+
+/*
+ * Stop the bus when runtime suspend/system level suspend callback
+ * is invoked. If set, a complete bus reset and re-enumeration will
+ * be performed when the bus restarts. In-band wake interrupts are
+ * not supported in this mode.
+ */
+#define AMD_SDW_POWER_OFF_MODE 2
+#define ACP_SDW0 0
+#define ACP_SDW1 1
+#define AMD_SDW_MAX_MANAGER_COUNT 2
+#define ACP63_PCI_REV_ID 0x63
+#define ACP70_PCI_REV_ID 0x70
+#define ACP71_PCI_REV_ID 0x71
+#define ACP72_PCI_REV_ID 0x72
+
+struct acp_sdw_pdata {
+ u16 instance;
+ u32 acp_rev;
+ /* mutex to protect acp common register access */
+ struct mutex *acp_sdw_lock;
+};
+
+/**
+ * struct sdw_amd_dai_runtime: AMD sdw dai runtime data
+ *
+ * @name: SoundWire stream name
+ * @stream: stream runtime
+ * @bus: Bus handle
+ * @stream_type: Stream type
+ */
+struct sdw_amd_dai_runtime {
+ char *name;
+ struct sdw_stream_runtime *stream;
+ struct sdw_bus *bus;
+ enum sdw_stream_type stream_type;
+};
+
+/**
+ * struct amd_sdw_manager - amd manager driver context
+ * @bus: bus handle
+ * @dev: linux device
+ * @mmio: SoundWire registers mmio base
+ * @acp_mmio: acp registers mmio base
+ * @amd_sdw_irq_thread: SoundWire manager irq workqueue
+ * @amd_sdw_work: peripheral status work queue
+ * @acp_sdw_lock: mutex to protect acp share register access
+ * @status: peripheral devices status array
+ * @num_din_ports: number of input ports
+ * @num_dout_ports: number of output ports
+ * @cols_index: Column index in frame shape
+ * @rows_index: Rows index in frame shape
+ * @instance: SoundWire manager instance
+ * @quirks: SoundWire manager quirks
+ * @wake_en_mask: wake enable mask per SoundWire manager
+ * @acp_rev: acp pci device revision id
+ * @clk_stopped: flag set to true when clock is stopped
+ * @power_mode_mask: flag interprets amd SoundWire manager power mode
+ * @dai_runtime_array: dai runtime array
+ */
+struct amd_sdw_manager {
+ struct sdw_bus bus;
+ struct device *dev;
+
+ void __iomem *mmio;
+ void __iomem *acp_mmio;
+
+ struct work_struct amd_sdw_irq_thread;
+ struct work_struct amd_sdw_work;
+ /* mutex to protect acp common register access */
+ struct mutex *acp_sdw_lock;
+
+ enum sdw_slave_status status[SDW_MAX_DEVICES + 1];
+
+ int num_din_ports;
+ int num_dout_ports;
+
+ int cols_index;
+ int rows_index;
+
+ u32 instance;
+ u32 quirks;
+ u32 wake_en_mask;
+ u32 power_mode_mask;
+ u32 acp_rev;
+ bool clk_stopped;
+
+ struct sdw_amd_dai_runtime **dai_runtime_array;
+};
+
+/**
+ * struct sdw_amd_acpi_info - Soundwire AMD information found in ACPI tables
+ * @handle: ACPI controller handle
+ * @count: maximum no of soundwire manager links supported on AMD platform.
+ * @link_mask: bit-wise mask listing links enabled by BIOS menu
+ */
+struct sdw_amd_acpi_info {
+ acpi_handle handle;
+ int count;
+ u32 link_mask;
+};
+
+/**
+ * struct sdw_amd_ctx - context allocated by the controller driver probe
+ *
+ * @count: link count
+ * @link_mask: bit-wise mask listing SoundWire links reported by the
+ * Controller
+ * @pdev: platform device structure
+ * @peripherals: array representing Peripherals exposed across all enabled links
+ */
+struct sdw_amd_ctx {
+ int count;
+ u32 link_mask;
+ struct platform_device *pdev[AMD_SDW_MAX_MANAGER_COUNT];
+ struct sdw_peripherals *peripherals;
+};
+
+/**
+ * struct sdw_amd_res - Soundwire AMD global resource structure,
+ * typically populated by the DSP driver/Legacy driver
+ *
+ * @acp_rev: acp pci device revision id
+ * @addr: acp pci device resource start address
+ * @reg_range: ACP register range
+ * @link_mask: bit-wise mask listing links selected by the DSP driver/
+ * legacy driver
+ * @count: link count
+ * @mmio_base: mmio base of SoundWire registers
+ * @handle: ACPI parent handle
+ * @parent: parent device
+ * @dev: device implementing hwparams and free callbacks
+ * @acp_lock: mutex protecting acp common registers access
+ */
+struct sdw_amd_res {
+ u32 acp_rev;
+ u32 addr;
+ u32 reg_range;
+ u32 link_mask;
+ int count;
+ void __iomem *mmio_base;
+ acpi_handle handle;
+ struct device *parent;
+ struct device *dev;
+ /* use to protect acp common registers access */
+ struct mutex *acp_lock;
+};
+
+int sdw_amd_probe(struct sdw_amd_res *res, struct sdw_amd_ctx **ctx);
+
+void sdw_amd_exit(struct sdw_amd_ctx *ctx);
+
+int sdw_amd_get_slave_info(struct sdw_amd_ctx *ctx);
+
+int amd_sdw_scan_controller(struct sdw_amd_acpi_info *info);
+#endif
diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h
index 3a5446ac014a..9c9435009537 100644
--- a/include/linux/soundwire/sdw_intel.h
+++ b/include/linux/soundwire/sdw_intel.h
@@ -4,9 +4,194 @@
#ifndef __SDW_INTEL_H
#define __SDW_INTEL_H
+#include <linux/acpi.h>
#include <linux/irqreturn.h>
#include <linux/soundwire/sdw.h>
+/*********************************************************************
+ * cAVS and ACE1.x definitions
+ *********************************************************************/
+
+#define SDW_SHIM_BASE 0x2C000
+#define SDW_ALH_BASE 0x2C800
+#define SDW_SHIM_BASE_ACE 0x38000
+#define SDW_ALH_BASE_ACE 0x24000
+#define SDW_LINK_BASE 0x30000
+#define SDW_LINK_SIZE 0x10000
+
+/* Intel SHIM Registers Definition */
+/* LCAP */
+#define SDW_SHIM_LCAP 0x0
+#define SDW_SHIM_LCAP_LCOUNT_MASK GENMASK(2, 0)
+#define SDW_SHIM_LCAP_MLCS_MASK BIT(8)
+
+/* LCTL */
+#define SDW_SHIM_LCTL 0x4
+
+#define SDW_SHIM_LCTL_SPA BIT(0)
+#define SDW_SHIM_LCTL_SPA_MASK GENMASK(3, 0)
+#define SDW_SHIM_LCTL_CPA BIT(8)
+#define SDW_SHIM_LCTL_CPA_MASK GENMASK(11, 8)
+#define SDW_SHIM_LCTL_MLCS_MASK GENMASK(29, 27)
+#define SDW_SHIM_MLCS_XTAL_CLK 0x0
+#define SDW_SHIM_MLCS_CARDINAL_CLK 0x1
+#define SDW_SHIM_MLCS_AUDIO_PLL_CLK 0x2
+
+/* SYNC */
+#define SDW_SHIM_SYNC 0xC
+
+#define SDW_SHIM_SYNC_SYNCPRD_VAL_24 (24000 / SDW_CADENCE_GSYNC_KHZ - 1)
+#define SDW_SHIM_SYNC_SYNCPRD_VAL_24_576 (24576 / SDW_CADENCE_GSYNC_KHZ - 1)
+#define SDW_SHIM_SYNC_SYNCPRD_VAL_38_4 (38400 / SDW_CADENCE_GSYNC_KHZ - 1)
+#define SDW_SHIM_SYNC_SYNCPRD_VAL_96 (96000 / SDW_CADENCE_GSYNC_KHZ - 1)
+#define SDW_SHIM_SYNC_SYNCPRD GENMASK(14, 0)
+#define SDW_SHIM_SYNC_SYNCCPU BIT(15)
+#define SDW_SHIM_SYNC_CMDSYNC_MASK GENMASK(19, 16)
+#define SDW_SHIM_SYNC_CMDSYNC BIT(16)
+#define SDW_SHIM_SYNC_SYNCGO BIT(24)
+
+/* Control stream capabililities and channel mask */
+#define SDW_SHIM_CTLSCAP(x) (0x010 + 0x60 * (x))
+#define SDW_SHIM_CTLS0CM(x) (0x012 + 0x60 * (x))
+#define SDW_SHIM_CTLS1CM(x) (0x014 + 0x60 * (x))
+#define SDW_SHIM_CTLS2CM(x) (0x016 + 0x60 * (x))
+#define SDW_SHIM_CTLS3CM(x) (0x018 + 0x60 * (x))
+
+/* PCM Stream capabilities */
+#define SDW_SHIM_PCMSCAP(x) (0x020 + 0x60 * (x))
+
+#define SDW_SHIM_PCMSCAP_ISS GENMASK(3, 0)
+#define SDW_SHIM_PCMSCAP_OSS GENMASK(7, 4)
+#define SDW_SHIM_PCMSCAP_BSS GENMASK(12, 8)
+
+/* PCM Stream Channel Map */
+#define SDW_SHIM_PCMSYCHM(x, y) (0x022 + (0x60 * (x)) + (0x2 * (y)))
+
+/* PCM Stream Channel Count */
+#define SDW_SHIM_PCMSYCHC(x, y) (0x042 + (0x60 * (x)) + (0x2 * (y)))
+
+#define SDW_SHIM_PCMSYCM_LCHN GENMASK(3, 0)
+#define SDW_SHIM_PCMSYCM_HCHN GENMASK(7, 4)
+#define SDW_SHIM_PCMSYCM_STREAM GENMASK(13, 8)
+#define SDW_SHIM_PCMSYCM_DIR BIT(15)
+
+/* IO control */
+#define SDW_SHIM_IOCTL(x) (0x06C + 0x60 * (x))
+
+#define SDW_SHIM_IOCTL_MIF BIT(0)
+#define SDW_SHIM_IOCTL_CO BIT(1)
+#define SDW_SHIM_IOCTL_COE BIT(2)
+#define SDW_SHIM_IOCTL_DO BIT(3)
+#define SDW_SHIM_IOCTL_DOE BIT(4)
+#define SDW_SHIM_IOCTL_BKE BIT(5)
+#define SDW_SHIM_IOCTL_WPDD BIT(6)
+#define SDW_SHIM_IOCTL_CIBD BIT(8)
+#define SDW_SHIM_IOCTL_DIBD BIT(9)
+
+/* Wake Enable*/
+#define SDW_SHIM_WAKEEN 0x190
+
+#define SDW_SHIM_WAKEEN_ENABLE BIT(0)
+
+/* Wake Status */
+#define SDW_SHIM_WAKESTS 0x192
+
+#define SDW_SHIM_WAKESTS_STATUS BIT(0)
+
+/* AC Timing control */
+#define SDW_SHIM_CTMCTL(x) (0x06E + 0x60 * (x))
+
+#define SDW_SHIM_CTMCTL_DACTQE BIT(0)
+#define SDW_SHIM_CTMCTL_DODS BIT(1)
+#define SDW_SHIM_CTMCTL_DOAIS GENMASK(4, 3)
+
+/* Intel ALH Register definitions */
+#define SDW_ALH_STRMZCFG(x) (0x000 + (0x4 * (x)))
+#define SDW_ALH_NUM_STREAMS 64
+
+#define SDW_ALH_STRMZCFG_DMAT_VAL 0x3
+#define SDW_ALH_STRMZCFG_DMAT GENMASK(7, 0)
+#define SDW_ALH_STRMZCFG_CHN GENMASK(19, 16)
+
+/*********************************************************************
+ * ACE2.x definitions for SHIM registers - only accessible when the
+ * HDAudio extended link LCTL.SPA/CPA = 1.
+ *********************************************************************/
+/* x variable is link index */
+#define SDW_SHIM2_GENERIC_BASE(x) (0x00030000 + 0x8000 * (x))
+#define SDW_IP_BASE(x) (0x00030100 + 0x8000 * (x))
+#define SDW_SHIM2_VS_BASE(x) (0x00036000 + 0x8000 * (x))
+
+/* SHIM2 Generic Registers */
+/* Read-only capabilities */
+#define SDW_SHIM2_LECAP 0x00
+#define SDW_SHIM2_LECAP_HDS BIT(0) /* unset -> Host mode */
+#define SDW_SHIM2_LECAP_MLC GENMASK(3, 1) /* Number of Lanes */
+
+/* PCM Stream capabilities */
+#define SDW_SHIM2_PCMSCAP 0x10
+#define SDW_SHIM2_PCMSCAP_ISS GENMASK(3, 0) /* Input-only streams */
+#define SDW_SHIM2_PCMSCAP_OSS GENMASK(7, 4) /* Output-only streams */
+#define SDW_SHIM2_PCMSCAP_BSS GENMASK(12, 8) /* Bidirectional streams */
+
+/* Read-only PCM Stream Channel Count, y variable is stream */
+#define SDW_SHIM2_PCMSYCHC(y) (0x14 + (0x4 * (y)))
+#define SDW_SHIM2_PCMSYCHC_CS GENMASK(3, 0) /* Channels Supported */
+
+/* PCM Stream Channel Map */
+#define SDW_SHIM2_PCMSYCHM(y) (0x16 + (0x4 * (y)))
+#define SDW_SHIM2_PCMSYCHM_LCHAN GENMASK(3, 0) /* Lowest channel used by the FIFO port */
+#define SDW_SHIM2_PCMSYCHM_HCHAN GENMASK(7, 4) /* Lowest channel used by the FIFO port */
+#define SDW_SHIM2_PCMSYCHM_STRM GENMASK(13, 8) /* HDaudio stream tag */
+#define SDW_SHIM2_PCMSYCHM_DIR BIT(15) /* HDaudio stream direction */
+
+/* SHIM2 vendor-specific registers */
+#define SDW_SHIM2_INTEL_VS_LVSCTL 0x04
+#define SDW_SHIM2_INTEL_VS_LVSCTL_FCG BIT(26)
+#define SDW_SHIM2_INTEL_VS_LVSCTL_MLCS GENMASK(29, 27)
+#define SDW_SHIM2_INTEL_VS_LVSCTL_DCGD BIT(30)
+#define SDW_SHIM2_INTEL_VS_LVSCTL_ICGD BIT(31)
+
+#define SDW_SHIM2_MLCS_XTAL_CLK 0x0
+#define SDW_SHIM2_MLCS_CARDINAL_CLK 0x1
+#define SDW_SHIM2_MLCS_AUDIO_PLL_CLK 0x2
+#define SDW_SHIM2_MLCS_MCLK_INPUT_CLK 0x3
+#define SDW_SHIM2_MLCS_WOV_RING_OSC_CLK 0x4
+
+#define SDW_SHIM2_INTEL_VS_WAKEEN 0x08
+#define SDW_SHIM2_INTEL_VS_WAKEEN_PWE BIT(0)
+
+#define SDW_SHIM2_INTEL_VS_WAKESTS 0x0A
+#define SDW_SHIM2_INTEL_VS_WAKEEN_PWS BIT(0)
+
+#define SDW_SHIM2_INTEL_VS_IOCTL 0x0C
+#define SDW_SHIM2_INTEL_VS_IOCTL_MIF BIT(0)
+#define SDW_SHIM2_INTEL_VS_IOCTL_CO BIT(1)
+#define SDW_SHIM2_INTEL_VS_IOCTL_COE BIT(2)
+#define SDW_SHIM2_INTEL_VS_IOCTL_DO BIT(3)
+#define SDW_SHIM2_INTEL_VS_IOCTL_DOE BIT(4)
+#define SDW_SHIM2_INTEL_VS_IOCTL_BKE BIT(5)
+#define SDW_SHIM2_INTEL_VS_IOCTL_WPDD BIT(6)
+#define SDW_SHIM2_INTEL_VS_IOCTL_ODC BIT(7)
+#define SDW_SHIM2_INTEL_VS_IOCTL_CIBD BIT(8)
+#define SDW_SHIM2_INTEL_VS_IOCTL_DIBD BIT(9)
+#define SDW_SHIM2_INTEL_VS_IOCTL_HAMIFD BIT(10)
+
+#define SDW_SHIM2_INTEL_VS_ACTMCTL 0x0E
+#define SDW_SHIM2_INTEL_VS_ACTMCTL_DACTQE BIT(0)
+#define SDW_SHIM2_INTEL_VS_ACTMCTL_DODS BIT(1)
+#define SDW_SHIM2_INTEL_VS_ACTMCTL_DODSE BIT(2)
+#define SDW_SHIM2_INTEL_VS_ACTMCTL_DOAIS GENMASK(4, 3)
+#define SDW_SHIM2_INTEL_VS_ACTMCTL_DOAISE BIT(5)
+#define SDW_SHIM3_INTEL_VS_ACTMCTL_CLSS BIT(6)
+#define SDW_SHIM3_INTEL_VS_ACTMCTL_CLDS GENMASK(11, 7)
+#define SDW_SHIM3_INTEL_VS_ACTMCTL_DODSE2 GENMASK(13, 12)
+#define SDW_SHIM3_INTEL_VS_ACTMCTL_DOAISE2 BIT(14)
+#define SDW_SHIM3_INTEL_VS_ACTMCTL_CLDE BIT(15)
+
+/* ACE3+ Mic privacy control and status register */
+#define SDW_SHIM2_INTEL_VS_PVCCS 0x10
+
/**
* struct sdw_intel_stream_params_data: configuration passed during
* the @params_stream callback, e.g. for interaction with DSP
@@ -40,12 +225,13 @@ struct sdw_intel_ops {
struct sdw_intel_stream_params_data *params_data);
int (*free_stream)(struct device *dev,
struct sdw_intel_stream_free_data *free_data);
+ int (*trigger)(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai);
};
/**
* struct sdw_intel_acpi_info - Soundwire Intel information found in ACPI tables
* @handle: ACPI controller handle
- * @count: link count found with "sdw-master-count" property
+ * @count: link count found with "sdw-master-count" or "sdw-manager-list" property
* @link_mask: bit-wise mask listing links enabled by BIOS menu
*
* this structure could be expanded to e.g. provide all the _ADR
@@ -58,7 +244,7 @@ struct sdw_intel_acpi_info {
u32 link_mask;
};
-struct sdw_intel_link_res;
+struct sdw_intel_link_dev;
/* Intel clock-stop/pm_runtime quirk definitions */
@@ -94,10 +280,7 @@ struct sdw_intel_link_res;
*/
#define SDW_INTEL_CLK_STOP_BUS_RESET BIT(3)
-struct sdw_intel_slave_id {
- int link_id;
- struct sdw_slave_id id;
-};
+struct hdac_bus;
/**
* struct sdw_intel_ctx - context allocated by the controller
@@ -107,33 +290,35 @@ struct sdw_intel_slave_id {
* hardware capabilities after all power dependencies are settled.
* @link_mask: bit-wise mask listing SoundWire links reported by the
* Controller
- * @num_slaves: total number of devices exposed across all enabled links
* @handle: ACPI parent handle
- * @links: information for each link (controller-specific and kept
+ * @ldev: information for each link (controller-specific and kept
* opaque here)
- * @ids: array of slave_id, representing Slaves exposed across all enabled
- * links
* @link_list: list to handle interrupts across all links
* @shim_lock: mutex to handle concurrent rmw access to shared SHIM registers.
* @shim_mask: flags to track initialization of SHIM shared registers
+ * @shim_base: sdw shim base.
+ * @alh_base: sdw alh base.
+ * @peripherals: array representing Peripherals exposed across all enabled links
*/
struct sdw_intel_ctx {
int count;
void __iomem *mmio_base;
u32 link_mask;
- int num_slaves;
acpi_handle handle;
- struct sdw_intel_link_res *links;
- struct sdw_intel_slave_id *ids;
+ struct sdw_intel_link_dev **ldev;
struct list_head link_list;
struct mutex shim_lock; /* lock for access to shared SHIM registers */
u32 shim_mask;
+ u32 shim_base;
+ u32 alh_base;
+ struct sdw_peripherals *peripherals;
};
/**
* struct sdw_intel_res - Soundwire Intel global resource structure,
* typically populated by the DSP driver
*
+ * @hw_ops: abstraction for platform ops
* @count: link count
* @mmio_base: mmio base of SoundWire registers
* @irq: interrupt number
@@ -146,8 +331,16 @@ struct sdw_intel_ctx {
* machine-specific quirks are handled in the DSP driver.
* @clock_stop_quirks: mask array of possible behaviors requested by the
* DSP driver. The quirks are common for all links for now.
+ * @shim_base: sdw shim base.
+ * @alh_base: sdw alh base.
+ * @ext: extended HDaudio link support
+ * @mic_privacy: ACE version supports microphone privacy
+ * @hbus: hdac_bus pointer, needed for power management
+ * @eml_lock: mutex protecting shared registers in the HDaudio multi-link
+ * space
*/
struct sdw_intel_res {
+ const struct sdw_intel_hw_ops *hw_ops;
int count;
void __iomem *mmio_base;
int irq;
@@ -157,6 +350,12 @@ struct sdw_intel_res {
struct device *dev;
u32 link_mask;
u32 clock_stop_quirks;
+ u32 shim_base;
+ u32 alh_base;
+ bool ext;
+ bool mic_privacy;
+ struct hdac_bus *hbus;
+ struct mutex *eml_lock;
};
/*
@@ -171,7 +370,7 @@ struct sdw_intel_res {
* on e.g. which machine driver to select (I2S mode, HDaudio or
* SoundWire).
*/
-int sdw_intel_acpi_scan(acpi_handle *parent_handle,
+int sdw_intel_acpi_scan(acpi_handle parent_handle,
struct sdw_intel_acpi_info *info);
void sdw_intel_process_wakeen_event(struct sdw_intel_ctx *ctx);
@@ -183,10 +382,84 @@ int sdw_intel_startup(struct sdw_intel_ctx *ctx);
void sdw_intel_exit(struct sdw_intel_ctx *ctx);
-void sdw_intel_enable_irq(void __iomem *mmio_base, bool enable);
-
irqreturn_t sdw_intel_thread(int irq, void *dev_id);
#define SDW_INTEL_QUIRK_MASK_BUS_DISABLE BIT(1)
+struct sdw_intel;
+
+/* struct intel_sdw_hw_ops - SoundWire ops for Intel platforms.
+ * @debugfs_init: initialize all debugfs capabilities
+ * @debugfs_exit: close and cleanup debugfs capabilities
+ * @get_link_count: fetch link count from hardware registers
+ * @register_dai: read all PDI information and register DAIs
+ * @check_clock_stop: throw error message if clock is not stopped.
+ * @start_bus: normal start
+ * @start_bus_after_reset: start after reset
+ * @start_bus_after_clock_stop: start after mode0 clock stop
+ * @stop_bus: stop all bus
+ * @link_power_up: power-up using chip-specific helpers
+ * @link_power_down: power-down with chip-specific helpers
+ * @shim_check_wake: check if a wake was received
+ * @shim_wake: enable/disable in-band wake management
+ * @pre_bank_switch: helper for bus management
+ * @post_bank_switch: helper for bus management
+ * @sync_arm: helper for multi-link synchronization
+ * @sync_go_unlocked: helper for multi-link synchronization -
+ * shim_lock is assumed to be locked at higher level
+ * @sync_go: helper for multi-link synchronization
+ * @sync_check_cmdsync_unlocked: helper for multi-link synchronization
+ * and bank switch - shim_lock is assumed to be locked at higher level
+ * @program_sdi: helper for codec command/control based on dev_num
+ */
+struct sdw_intel_hw_ops {
+ void (*debugfs_init)(struct sdw_intel *sdw);
+ void (*debugfs_exit)(struct sdw_intel *sdw);
+
+ int (*get_link_count)(struct sdw_intel *sdw);
+
+ int (*register_dai)(struct sdw_intel *sdw);
+
+ void (*check_clock_stop)(struct sdw_intel *sdw);
+ int (*start_bus)(struct sdw_intel *sdw);
+ int (*start_bus_after_reset)(struct sdw_intel *sdw);
+ int (*start_bus_after_clock_stop)(struct sdw_intel *sdw);
+ int (*stop_bus)(struct sdw_intel *sdw, bool clock_stop);
+
+ int (*link_power_up)(struct sdw_intel *sdw);
+ int (*link_power_down)(struct sdw_intel *sdw);
+
+ int (*shim_check_wake)(struct sdw_intel *sdw);
+ void (*shim_wake)(struct sdw_intel *sdw, bool wake_enable);
+
+ int (*pre_bank_switch)(struct sdw_intel *sdw);
+ int (*post_bank_switch)(struct sdw_intel *sdw);
+
+ void (*sync_arm)(struct sdw_intel *sdw);
+ int (*sync_go_unlocked)(struct sdw_intel *sdw);
+ int (*sync_go)(struct sdw_intel *sdw);
+ bool (*sync_check_cmdsync_unlocked)(struct sdw_intel *sdw);
+
+ void (*program_sdi)(struct sdw_intel *sdw, int dev_num);
+
+ int (*bpt_send_async)(struct sdw_intel *sdw, struct sdw_slave *slave,
+ struct sdw_bpt_msg *msg);
+ int (*bpt_wait)(struct sdw_intel *sdw, struct sdw_slave *slave, struct sdw_bpt_msg *msg);
+};
+
+extern const struct sdw_intel_hw_ops sdw_intel_cnl_hw_ops;
+extern const struct sdw_intel_hw_ops sdw_intel_lnl_hw_ops;
+
+/*
+ * IDA min selected to allow for 5 unconstrained devices per link,
+ * and 6 system-unique Device Numbers for wake-capable devices.
+ */
+
+#define SDW_INTEL_DEV_NUM_IDA_MIN 6
+
+/*
+ * Max number of links supported in hardware
+ */
+#define SDW_INTEL_MAX_LINKS 5
+
#endif
diff --git a/include/linux/soundwire/sdw_registers.h b/include/linux/soundwire/sdw_registers.h
index 138bec908c40..cae8a0a5a9b0 100644
--- a/include/linux/soundwire/sdw_registers.h
+++ b/include/linux/soundwire/sdw_registers.h
@@ -4,6 +4,9 @@
#ifndef __SDW_REGISTERS_H
#define __SDW_REGISTERS_H
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+
/*
* SDW registers as defined by MIPI 1.2 Spec
*/
@@ -13,7 +16,7 @@
#define SDW_REG_NO_PAGE 0x00008000
#define SDW_REG_OPTIONAL_PAGE 0x00010000
-#define SDW_REG_MAX 0x80000000
+#define SDW_REG_MAX 0x48000000
#define SDW_DPN_SIZE 0x100
#define SDW_BANK1_OFFSET 0x10
@@ -329,16 +332,29 @@
* 2:0 Control Number[2:0]
*/
-#define SDW_SDCA_CTL(fun, ent, ctl, ch) (BIT(30) | \
- (((fun) & 0x7) << 22) | \
- (((ent) & 0x40) << 15) | \
- (((ent) & 0x3f) << 7) | \
- (((ctl) & 0x30) << 15) | \
- (((ctl) & 0x0f) << 3) | \
- (((ch) & 0x38) << 12) | \
- ((ch) & 0x07))
+#define SDW_SDCA_CTL(fun, ent, ctl, ch) (BIT(30) | \
+ (((fun) & GENMASK(2, 0)) << 22) | \
+ (((ent) & BIT(6)) << 15) | \
+ (((ent) & GENMASK(5, 0)) << 7) | \
+ (((ctl) & GENMASK(5, 4)) << 15) | \
+ (((ctl) & GENMASK(3, 0)) << 3) | \
+ (((ch) & GENMASK(5, 3)) << 12) | \
+ ((ch) & GENMASK(2, 0)))
+
+#define SDW_SDCA_CTL_FUNC(reg) FIELD_GET(GENMASK(24, 22), (reg))
+#define SDW_SDCA_CTL_ENT(reg) ((FIELD_GET(BIT(21), (reg)) << 6) | \
+ FIELD_GET(GENMASK(12, 7), (reg)))
+#define SDW_SDCA_CTL_CSEL(reg) ((FIELD_GET(GENMASK(20, 19), (reg)) << 4) | \
+ FIELD_GET(GENMASK(6, 3), (reg)))
+#define SDW_SDCA_CTL_CNUM(reg) ((FIELD_GET(GENMASK(17, 15), (reg)) << 3) | \
+ FIELD_GET(GENMASK(2, 0), (reg)))
#define SDW_SDCA_MBQ_CTL(reg) ((reg) | BIT(13))
#define SDW_SDCA_NEXT_CTL(reg) ((reg) | BIT(14))
+/* Check the reserved and fixed bits in address */
+#define SDW_SDCA_VALID_CTL(reg) (((reg) & (GENMASK(31, 25) | BIT(18) | BIT(13))) == BIT(30))
+
+#define SDW_SDCA_MAX_REGISTER 0x47FFFFFF
+
#endif /* __SDW_REGISTERS_H */
diff --git a/include/linux/soundwire/sdw_type.h b/include/linux/soundwire/sdw_type.h
index 52eb66cd11bc..d405935a45fe 100644
--- a/include/linux/soundwire/sdw_type.h
+++ b/include/linux/soundwire/sdw_type.h
@@ -4,16 +4,16 @@
#ifndef __SOUNDWIRE_TYPES_H
#define __SOUNDWIRE_TYPES_H
-extern struct bus_type sdw_bus_type;
-extern struct device_type sdw_slave_type;
-extern struct device_type sdw_master_type;
+extern const struct bus_type sdw_bus_type;
+extern const struct device_type sdw_slave_type;
+extern const struct device_type sdw_master_type;
static inline int is_sdw_slave(const struct device *dev)
{
return dev->type == &sdw_slave_type;
}
-#define drv_to_sdw_driver(_drv) container_of(_drv, struct sdw_driver, driver)
+#define drv_to_sdw_driver(_drv) container_of_const(_drv, struct sdw_driver, driver)
#define sdw_register_driver(drv) \
__sdw_register_driver(drv, THIS_MODULE)
@@ -21,7 +21,7 @@ static inline int is_sdw_slave(const struct device *dev)
int __sdw_register_driver(struct sdw_driver *drv, struct module *owner);
void sdw_unregister_driver(struct sdw_driver *drv);
-int sdw_slave_uevent(struct device *dev, struct kobj_uevent_env *env);
+int sdw_slave_uevent(const struct device *dev, struct kobj_uevent_env *env);
/**
* module_sdw_driver() - Helper macro for registering a Soundwire driver
diff --git a/include/linux/spi/ads7846.h b/include/linux/spi/ads7846.h
index 1a5eaef3b7f2..a04c1c34c344 100644
--- a/include/linux/spi/ads7846.h
+++ b/include/linux/spi/ads7846.h
@@ -1,17 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* linux/spi/ads7846.h */
-/* Touchscreen characteristics vary between boards and models. The
- * platform_data for the device's "struct device" holds this information.
- *
- * It's OK if the min/max values are zero.
- */
-enum ads7846_filter {
- ADS7846_FILTER_OK,
- ADS7846_FILTER_REPEAT,
- ADS7846_FILTER_IGNORE,
-};
-
struct ads7846_platform_data {
u16 model; /* 7843, 7845, 7846, 7873. */
u16 vref_delay_usecs; /* 0 for external vref; etc */
@@ -46,15 +35,9 @@ struct ads7846_platform_data {
u16 debounce_tol; /* tolerance used for filtering */
u16 debounce_rep; /* additional consecutive good readings
* required after the first two */
- int gpio_pendown; /* the GPIO used to decide the pendown
- * state if get_pendown_state == NULL */
int gpio_pendown_debounce; /* platform specific debounce time for
* the gpio_pendown */
int (*get_pendown_state)(void);
- int (*filter_init) (const struct ads7846_platform_data *pdata,
- void **filter_data);
- int (*filter) (void *filter_data, int data_idx, int *val);
- void (*filter_cleanup)(void *filter_data);
void (*wait_for_sync)(void);
bool wakeup;
unsigned long irq_flags;
diff --git a/include/linux/spi/altera.h b/include/linux/spi/altera.h
index 2e2a622e56da..3b74c3750caf 100644
--- a/include/linux/spi/altera.h
+++ b/include/linux/spi/altera.h
@@ -14,7 +14,7 @@
/**
* struct altera_spi_platform_data - Platform data of the Altera SPI driver
- * @mode_bits: Mode bits of SPI master.
+ * @mode_bits: Mode bits of SPI host.
* @num_chipselect: Number of chipselects.
* @bits_per_word_mask: bitmask of supported bits_per_word for transfers.
* @num_devices: Number of devices that shall be added when the driver
@@ -46,5 +46,5 @@ struct altera_spi {
};
extern irqreturn_t altera_spi_irq(int irq, void *dev);
-extern void altera_spi_init_master(struct spi_master *master);
+extern void altera_spi_init_host(struct spi_controller *host);
#endif /* __LINUX_SPI_ALTERA_H */
diff --git a/include/linux/spi/at86rf230.h b/include/linux/spi/at86rf230.h
deleted file mode 100644
index d278576ab692..000000000000
--- a/include/linux/spi/at86rf230.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * AT86RF230/RF231 driver
- *
- * Copyright (C) 2009-2012 Siemens AG
- *
- * Written by:
- * Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com>
- */
-#ifndef AT86RF230_H
-#define AT86RF230_H
-
-struct at86rf230_platform_data {
- int rstn;
- int slp_tr;
- int dig2;
- u8 xtal_trim;
-};
-
-#endif
diff --git a/include/linux/spi/cc2520.h b/include/linux/spi/cc2520.h
deleted file mode 100644
index 449bacf10700..000000000000
--- a/include/linux/spi/cc2520.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/* Header file for cc2520 radio driver
- *
- * Copyright (C) 2014 Varka Bhadram <varkab@cdac.in>
- * Md.Jamal Mohiuddin <mjmohiuddin@cdac.in>
- * P Sowjanya <sowjanyap@cdac.in>
- */
-
-#ifndef __CC2520_H
-#define __CC2520_H
-
-struct cc2520_platform_data {
- int fifo;
- int fifop;
- int cca;
- int sfd;
- int reset;
- int vreg;
-};
-
-#endif
diff --git a/include/linux/spi/corgi_lcd.h b/include/linux/spi/corgi_lcd.h
index 0b857616919c..fc6c1515dc54 100644
--- a/include/linux/spi/corgi_lcd.h
+++ b/include/linux/spi/corgi_lcd.h
@@ -15,4 +15,6 @@ struct corgi_lcd_platform_data {
void (*kick_battery)(void);
};
+void corgi_lcd_limit_intensity(int limit);
+
#endif /* __LINUX_SPI_CORGI_LCD_H */
diff --git a/include/linux/spi/max7301.h b/include/linux/spi/max7301.h
index 433c20e2f46e..e392c53758bc 100644
--- a/include/linux/spi/max7301.h
+++ b/include/linux/spi/max7301.h
@@ -2,7 +2,7 @@
#ifndef LINUX_SPI_MAX7301_H
#define LINUX_SPI_MAX7301_H
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
/*
* Some registers must be read back to modify.
@@ -31,6 +31,6 @@ struct max7301_platform_data {
u32 input_pullup_active;
};
-extern int __max730x_remove(struct device *dev);
+extern void __max730x_remove(struct device *dev);
extern int __max730x_probe(struct max7301 *ts);
#endif
diff --git a/include/linux/spi/offload/consumer.h b/include/linux/spi/offload/consumer.h
new file mode 100644
index 000000000000..cd7d5daa21e6
--- /dev/null
+++ b/include/linux/spi/offload/consumer.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024 Analog Devices Inc.
+ * Copyright (C) 2024 BayLibre, SAS
+ */
+
+#ifndef __LINUX_SPI_OFFLOAD_CONSUMER_H
+#define __LINUX_SPI_OFFLOAD_CONSUMER_H
+
+#include <linux/module.h>
+#include <linux/spi/offload/types.h>
+#include <linux/types.h>
+
+MODULE_IMPORT_NS("SPI_OFFLOAD");
+
+struct device;
+struct spi_device;
+
+struct spi_offload *devm_spi_offload_get(struct device *dev, struct spi_device *spi,
+ const struct spi_offload_config *config);
+
+struct spi_offload_trigger
+*devm_spi_offload_trigger_get(struct device *dev,
+ struct spi_offload *offload,
+ enum spi_offload_trigger_type type);
+int spi_offload_trigger_validate(struct spi_offload_trigger *trigger,
+ struct spi_offload_trigger_config *config);
+int spi_offload_trigger_enable(struct spi_offload *offload,
+ struct spi_offload_trigger *trigger,
+ struct spi_offload_trigger_config *config);
+void spi_offload_trigger_disable(struct spi_offload *offload,
+ struct spi_offload_trigger *trigger);
+
+struct dma_chan *devm_spi_offload_tx_stream_request_dma_chan(struct device *dev,
+ struct spi_offload *offload);
+struct dma_chan *devm_spi_offload_rx_stream_request_dma_chan(struct device *dev,
+ struct spi_offload *offload);
+
+#endif /* __LINUX_SPI_OFFLOAD_CONSUMER_H */
diff --git a/include/linux/spi/offload/provider.h b/include/linux/spi/offload/provider.h
new file mode 100644
index 000000000000..76c7cf651092
--- /dev/null
+++ b/include/linux/spi/offload/provider.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024 Analog Devices Inc.
+ * Copyright (C) 2024 BayLibre, SAS
+ */
+
+#ifndef __LINUX_SPI_OFFLOAD_PROVIDER_H
+#define __LINUX_SPI_OFFLOAD_PROVIDER_H
+
+#include <linux/module.h>
+#include <linux/spi/offload/types.h>
+#include <linux/types.h>
+
+MODULE_IMPORT_NS("SPI_OFFLOAD");
+
+struct device;
+struct spi_offload_trigger;
+
+struct spi_offload *devm_spi_offload_alloc(struct device *dev, size_t priv_size);
+
+struct spi_offload_trigger_ops {
+ bool (*match)(struct spi_offload_trigger *trigger,
+ enum spi_offload_trigger_type type, u64 *args, u32 nargs);
+ int (*request)(struct spi_offload_trigger *trigger,
+ enum spi_offload_trigger_type type, u64 *args, u32 nargs);
+ void (*release)(struct spi_offload_trigger *trigger);
+ int (*validate)(struct spi_offload_trigger *trigger,
+ struct spi_offload_trigger_config *config);
+ int (*enable)(struct spi_offload_trigger *trigger,
+ struct spi_offload_trigger_config *config);
+ void (*disable)(struct spi_offload_trigger *trigger);
+};
+
+struct spi_offload_trigger_info {
+ /** @fwnode: Provider fwnode, used to match to consumer. */
+ struct fwnode_handle *fwnode;
+ /** @ops: Provider-specific callbacks. */
+ const struct spi_offload_trigger_ops *ops;
+ /** Provider-specific state to be used in callbacks. */
+ void *priv;
+};
+
+int devm_spi_offload_trigger_register(struct device *dev,
+ struct spi_offload_trigger_info *info);
+void *spi_offload_trigger_get_priv(struct spi_offload_trigger *trigger);
+
+#endif /* __LINUX_SPI_OFFLOAD_PROVIDER_H */
diff --git a/include/linux/spi/offload/types.h b/include/linux/spi/offload/types.h
new file mode 100644
index 000000000000..cd61f8adb7a5
--- /dev/null
+++ b/include/linux/spi/offload/types.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024 Analog Devices Inc.
+ * Copyright (C) 2024 BayLibre, SAS
+ */
+
+#ifndef __LINUX_SPI_OFFLOAD_TYPES_H
+#define __LINUX_SPI_OFFLOAD_TYPES_H
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+struct device;
+
+/* This is write xfer but TX uses external data stream rather than tx_buf. */
+#define SPI_OFFLOAD_XFER_TX_STREAM BIT(0)
+/* This is read xfer but RX uses external data stream rather than rx_buf. */
+#define SPI_OFFLOAD_XFER_RX_STREAM BIT(1)
+
+/* Offload can be triggered by external hardware event. */
+#define SPI_OFFLOAD_CAP_TRIGGER BIT(0)
+/* Offload can record and then play back TX data when triggered. */
+#define SPI_OFFLOAD_CAP_TX_STATIC_DATA BIT(1)
+/* Offload can get TX data from an external stream source. */
+#define SPI_OFFLOAD_CAP_TX_STREAM_DMA BIT(2)
+/* Offload can send RX data to an external stream sink. */
+#define SPI_OFFLOAD_CAP_RX_STREAM_DMA BIT(3)
+
+/**
+ * struct spi_offload_config - offload configuration
+ *
+ * This is used to request an offload with specific configuration.
+ */
+struct spi_offload_config {
+ /** @capability_flags: required capabilities. See %SPI_OFFLOAD_CAP_* */
+ u32 capability_flags;
+};
+
+/**
+ * struct spi_offload - offload instance
+ */
+struct spi_offload {
+ /** @provider_dev: for get/put reference counting */
+ struct device *provider_dev;
+ /** @priv: provider driver private data */
+ void *priv;
+ /** @ops: callbacks for offload support */
+ const struct spi_offload_ops *ops;
+ /** @xfer_flags: %SPI_OFFLOAD_XFER_* flags supported by provider */
+ u32 xfer_flags;
+};
+
+enum spi_offload_trigger_type {
+ /* Indication from SPI peripheral that data is read to read. */
+ SPI_OFFLOAD_TRIGGER_DATA_READY,
+ /* Trigger comes from a periodic source such as a clock. */
+ SPI_OFFLOAD_TRIGGER_PERIODIC,
+};
+
+/**
+ * spi_offload_trigger_periodic - configuration parameters for periodic triggers
+ * @frequency_hz: The rate that the trigger should fire in Hz.
+ * @offset_ns: A delay in nanoseconds between when this trigger fires
+ * compared to another trigger. This requires specialized hardware
+ * that supports such synchronization with a delay between two or
+ * more triggers. Set to 0 when not needed.
+ */
+struct spi_offload_trigger_periodic {
+ u64 frequency_hz;
+ u64 offset_ns;
+};
+
+struct spi_offload_trigger_config {
+ /** @type: type discriminator for union */
+ enum spi_offload_trigger_type type;
+ union {
+ struct spi_offload_trigger_periodic periodic;
+ };
+};
+
+/**
+ * struct spi_offload_ops - callbacks implemented by offload providers
+ */
+struct spi_offload_ops {
+ /**
+ * @trigger_enable: Optional callback to enable the trigger for the
+ * given offload instance.
+ */
+ int (*trigger_enable)(struct spi_offload *offload);
+ /**
+ * @trigger_disable: Optional callback to disable the trigger for the
+ * given offload instance.
+ */
+ void (*trigger_disable)(struct spi_offload *offload);
+ /**
+ * @tx_stream_request_dma_chan: Optional callback for controllers that
+ * have an offload where the TX data stream is connected directly to a
+ * DMA channel.
+ */
+ struct dma_chan *(*tx_stream_request_dma_chan)(struct spi_offload *offload);
+ /**
+ * @rx_stream_request_dma_chan: Optional callback for controllers that
+ * have an offload where the RX data stream is connected directly to a
+ * DMA channel.
+ */
+ struct dma_chan *(*rx_stream_request_dma_chan)(struct spi_offload *offload);
+};
+
+#endif /* __LINUX_SPI_OFFLOAD_TYPES_H */
diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h
deleted file mode 100644
index 31f00c7f4f59..000000000000
--- a/include/linux/spi/pxa2xx_spi.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
- */
-#ifndef __linux_pxa2xx_spi_h
-#define __linux_pxa2xx_spi_h
-
-#include <linux/pxa2xx_ssp.h>
-
-#define PXA2XX_CS_ASSERT (0x01)
-#define PXA2XX_CS_DEASSERT (0x02)
-
-struct dma_chan;
-
-/* device.platform_data for SSP controller devices */
-struct pxa2xx_spi_controller {
- u16 num_chipselect;
- u8 enable_dma;
- u8 dma_burst_size;
- bool is_slave;
-
- /* DMA engine specific config */
- bool (*dma_filter)(struct dma_chan *chan, void *param);
- void *tx_param;
- void *rx_param;
-
- /* For non-PXA arches */
- struct ssp_device ssp;
-};
-
-/* spi_board_info.controller_data for SPI slave devices,
- * copied to spi_device.platform_data ... mostly for dma tuning
- */
-struct pxa2xx_spi_chip {
- u8 tx_threshold;
- u8 tx_hi_threshold;
- u8 rx_threshold;
- u8 dma_burst_size;
- u32 timeout;
- u8 enable_loopback;
- int gpio_cs;
- void (*cs_control)(u32 command);
-};
-
-#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
-
-#include <linux/clk.h>
-
-extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_controller *info);
-
-#endif
-#endif
diff --git a/include/linux/spi/rspi.h b/include/linux/spi/rspi.h
deleted file mode 100644
index dbdfcc7a3db2..000000000000
--- a/include/linux/spi/rspi.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Renesas SPI driver
- *
- * Copyright (C) 2012 Renesas Solutions Corp.
- */
-
-#ifndef __LINUX_SPI_RENESAS_SPI_H__
-#define __LINUX_SPI_RENESAS_SPI_H__
-
-struct rspi_plat_data {
- unsigned int dma_tx_id;
- unsigned int dma_rx_id;
-
- u16 num_chipselect;
-};
-
-#endif
diff --git a/include/linux/spi/s3c24xx-fiq.h b/include/linux/spi/s3c24xx-fiq.h
deleted file mode 100644
index d2842ac1de27..000000000000
--- a/include/linux/spi/s3c24xx-fiq.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* linux/drivers/spi/spi_s3c24xx_fiq.h
- *
- * Copyright 2009 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C24XX SPI - FIQ pseudo-DMA transfer support
-*/
-
-#ifndef __LINUX_SPI_S3C24XX_FIQ_H
-#define __LINUX_SPI_S3C24XX_FIQ_H __FILE__
-
-/* We have R8 through R13 to play with */
-
-#ifdef __ASSEMBLY__
-#define __REG_NR(x) r##x
-#else
-
-extern struct spi_fiq_code s3c24xx_spi_fiq_txrx;
-extern struct spi_fiq_code s3c24xx_spi_fiq_tx;
-extern struct spi_fiq_code s3c24xx_spi_fiq_rx;
-
-#define __REG_NR(x) (x)
-#endif
-
-#define fiq_rspi __REG_NR(8)
-#define fiq_rtmp __REG_NR(9)
-#define fiq_rrx __REG_NR(10)
-#define fiq_rtx __REG_NR(11)
-#define fiq_rcount __REG_NR(12)
-#define fiq_rirq __REG_NR(13)
-
-#endif /* __LINUX_SPI_S3C24XX_FIQ_H */
diff --git a/include/linux/spi/s3c24xx.h b/include/linux/spi/s3c24xx.h
deleted file mode 100644
index 440a71593162..000000000000
--- a/include/linux/spi/s3c24xx.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2006 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C2410 - SPI Controller platform_device info
-*/
-
-#ifndef __LINUX_SPI_S3C24XX_H
-#define __LINUX_SPI_S3C24XX_H __FILE__
-
-struct s3c2410_spi_info {
- int pin_cs; /* simple gpio cs */
- unsigned int num_cs; /* total chipselects */
- int bus_num; /* bus number to use. */
-
- unsigned int use_fiq:1; /* use fiq */
-
- void (*gpio_setup)(struct s3c2410_spi_info *spi, int enable);
- void (*set_cs)(struct s3c2410_spi_info *spi, int cs, int pol);
-};
-
-extern int s3c24xx_set_fiq(unsigned int irq, u32 *ack_ptr, bool on);
-
-#endif /* __LINUX_SPI_S3C24XX_H */
diff --git a/include/linux/spi/sh_msiof.h b/include/linux/spi/sh_msiof.h
index dc2a0cbd210d..9fbef3fd4056 100644
--- a/include/linux/spi/sh_msiof.h
+++ b/include/linux/spi/sh_msiof.h
@@ -2,9 +2,134 @@
#ifndef __SPI_SH_MSIOF_H__
#define __SPI_SH_MSIOF_H__
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+
+#define SITMDR1 0x00 /* Transmit Mode Register 1 */
+#define SITMDR2 0x04 /* Transmit Mode Register 2 */
+#define SITMDR3 0x08 /* Transmit Mode Register 3 */
+#define SIRMDR1 0x10 /* Receive Mode Register 1 */
+#define SIRMDR2 0x14 /* Receive Mode Register 2 */
+#define SIRMDR3 0x18 /* Receive Mode Register 3 */
+#define SITSCR 0x20 /* Transmit Clock Select Register */
+#define SIRSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */
+#define SICTR 0x28 /* Control Register */
+#define SIFCTR 0x30 /* FIFO Control Register */
+#define SISTR 0x40 /* Status Register */
+#define SIIER 0x44 /* Interrupt Enable Register */
+#define SITDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */
+#define SITDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */
+#define SITFDR 0x50 /* Transmit FIFO Data Register */
+#define SIRDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */
+#define SIRDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */
+#define SIRFDR 0x60 /* Receive FIFO Data Register */
+
+/* SITMDR1 and SIRMDR1 */
+#define SIMDR1_TRMD BIT(31) /* Transfer Mode (1 = Master mode) */
+#define SIMDR1_SYNCMD GENMASK(29, 28) /* SYNC Mode */
+#define SIMDR1_SYNCMD_PULSE 0U /* Frame start sync pulse */
+#define SIMDR1_SYNCMD_SPI 2U /* Level mode/SPI */
+#define SIMDR1_SYNCMD_LR 3U /* L/R mode */
+#define SIMDR1_SYNCAC BIT(25) /* Sync Polarity (1 = Active-low) */
+#define SIMDR1_BITLSB BIT(24) /* MSB/LSB First (1 = LSB first) */
+#define SIMDR1_DTDL GENMASK(22, 20) /* Data Pin Bit Delay for MSIOF_SYNC */
+#define SIMDR1_SYNCDL GENMASK(18, 16) /* Frame Sync Signal Timing Delay */
+#define SIMDR1_FLD GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */
+#define SIMDR1_XXSTP BIT(0) /* Transmission/Reception Stop on FIFO */
+/* SITMDR1 */
+#define SITMDR1_PCON BIT(30) /* Transfer Signal Connection */
+#define SITMDR1_SYNCCH GENMASK(27, 26) /* Sync Signal Channel Select */
+ /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */
+
+/* SITMDR2 and SIRMDR2 */
+#define SIMDR2_GRP GENMASK(31, 30) /* Group Count */
+#define SIMDR2_BITLEN1 GENMASK(28, 24) /* Data Size (8-32 bits) */
+#define SIMDR2_WDLEN1 GENMASK(23, 16) /* Word Count (1-64/256 (SH, A1))) */
+#define SIMDR2_GRPMASK GENMASK(3, 0) /* Group Output Mask 1-4 (SH, A1) */
+
+/* SITMDR3 and SIRMDR3 */
+#define SIMDR3_BITLEN2 GENMASK(28, 24) /* Data Size (8-32 bits) */
+#define SIMDR3_WDLEN2 GENMASK(23, 16) /* Word Count (1-64/256 (SH, A1))) */
+
+/* SITSCR and SIRSCR */
+#define SISCR_BRPS GENMASK(12, 8) /* Prescaler Setting (1-32) */
+#define SISCR_BRDV GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */
+
+/* SICTR */
+#define SICTR_TSCKIZ GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */
+#define SICTR_TSCKIZ_SCK BIT(31) /* Disable SCK when TX disabled */
+#define SICTR_TSCKIZ_POL BIT(30) /* Transmit Clock Polarity */
+#define SICTR_RSCKIZ GENMASK(29, 28) /* Receive Clock Polarity Select */
+#define SICTR_RSCKIZ_SCK BIT(29) /* Must match CTR_TSCKIZ_SCK */
+#define SICTR_RSCKIZ_POL BIT(28) /* Receive Clock Polarity */
+#define SICTR_TEDG BIT(27) /* Transmit Timing (1 = falling edge) */
+#define SICTR_REDG BIT(26) /* Receive Timing (1 = falling edge) */
+#define SICTR_TXDIZ GENMASK(23, 22) /* Pin Output When TX is Disabled */
+#define SICTR_TXDIZ_LOW 0U /* 0 */
+#define SICTR_TXDIZ_HIGH 1U /* 1 */
+#define SICTR_TXDIZ_HIZ 2U /* High-impedance */
+#define SICTR_TSCKE BIT(15) /* Transmit Serial Clock Output Enable */
+#define SICTR_TFSE BIT(14) /* Transmit Frame Sync Signal Output Enable */
+#define SICTR_TXE BIT(9) /* Transmit Enable */
+#define SICTR_RXE BIT(8) /* Receive Enable */
+#define SICTR_TXRST BIT(1) /* Transmit Reset */
+#define SICTR_RXRST BIT(0) /* Receive Reset */
+
+/* SIFCTR */
+#define SIFCTR_TFWM GENMASK(31, 29) /* Transmit FIFO Watermark */
+#define SIFCTR_TFWM_64 0U /* Transfer Request when 64 empty stages */
+#define SIFCTR_TFWM_32 1U /* Transfer Request when 32 empty stages */
+#define SIFCTR_TFWM_24 2U /* Transfer Request when 24 empty stages */
+#define SIFCTR_TFWM_16 3U /* Transfer Request when 16 empty stages */
+#define SIFCTR_TFWM_12 4U /* Transfer Request when 12 empty stages */
+#define SIFCTR_TFWM_8 5U /* Transfer Request when 8 empty stages */
+#define SIFCTR_TFWM_4 6U /* Transfer Request when 4 empty stages */
+#define SIFCTR_TFWM_1 7U /* Transfer Request when 1 empty stage */
+#define SIFCTR_TFUA GENMASK(28, 20) /* Transmit FIFO Usable Area */
+#define SIFCTR_RFWM GENMASK(15, 13) /* Receive FIFO Watermark */
+#define SIFCTR_RFWM_1 0U /* Transfer Request when 1 valid stages */
+#define SIFCTR_RFWM_4 1U /* Transfer Request when 4 valid stages */
+#define SIFCTR_RFWM_8 2U /* Transfer Request when 8 valid stages */
+#define SIFCTR_RFWM_16 3U /* Transfer Request when 16 valid stages */
+#define SIFCTR_RFWM_32 4U /* Transfer Request when 32 valid stages */
+#define SIFCTR_RFWM_64 5U /* Transfer Request when 64 valid stages */
+#define SIFCTR_RFWM_128 6U /* Transfer Request when 128 valid stages */
+#define SIFCTR_RFWM_256 7U /* Transfer Request when 256 valid stages */
+#define SIFCTR_RFUA GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */
+
+/* SISTR */
+#define SISTR_TFEMP BIT(29) /* Transmit FIFO Empty */
+#define SISTR_TDREQ BIT(28) /* Transmit Data Transfer Request */
+#define SISTR_TEOF BIT(23) /* Frame Transmission End */
+#define SISTR_TFSERR BIT(21) /* Transmit Frame Synchronization Error */
+#define SISTR_TFOVF BIT(20) /* Transmit FIFO Overflow */
+#define SISTR_TFUDF BIT(19) /* Transmit FIFO Underflow */
+#define SISTR_RFFUL BIT(13) /* Receive FIFO Full */
+#define SISTR_RDREQ BIT(12) /* Receive Data Transfer Request */
+#define SISTR_REOF BIT(7) /* Frame Reception End */
+#define SISTR_RFSERR BIT(5) /* Receive Frame Synchronization Error */
+#define SISTR_RFUDF BIT(4) /* Receive FIFO Underflow */
+#define SISTR_RFOVF BIT(3) /* Receive FIFO Overflow */
+
+/* SIIER */
+#define SIIER_TDMAE BIT(31) /* Transmit Data DMA Transfer Req. Enable */
+#define SIIER_TFEMPE BIT(29) /* Transmit FIFO Empty Enable */
+#define SIIER_TDREQE BIT(28) /* Transmit Data Transfer Request Enable */
+#define SIIER_TEOFE BIT(23) /* Frame Transmission End Enable */
+#define SIIER_TFSERRE BIT(21) /* Transmit Frame Sync Error Enable */
+#define SIIER_TFOVFE BIT(20) /* Transmit FIFO Overflow Enable */
+#define SIIER_TFUDFE BIT(19) /* Transmit FIFO Underflow Enable */
+#define SIIER_RDMAE BIT(15) /* Receive Data DMA Transfer Req. Enable */
+#define SIIER_RFFULE BIT(13) /* Receive FIFO Full Enable */
+#define SIIER_RDREQE BIT(12) /* Receive Data Transfer Request Enable */
+#define SIIER_REOFE BIT(7) /* Frame Reception End Enable */
+#define SIIER_RFSERRE BIT(5) /* Receive Frame Sync Error Enable */
+#define SIIER_RFUDFE BIT(4) /* Receive FIFO Underflow Enable */
+#define SIIER_RFOVFE BIT(3) /* Receive FIFO Overflow Enable */
+
enum {
- MSIOF_SPI_MASTER,
- MSIOF_SPI_SLAVE,
+ MSIOF_SPI_HOST,
+ MSIOF_SPI_TARGET,
};
struct sh_msiof_spi_info {
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h
index 2b65c9edc34e..82390712794c 100644
--- a/include/linux/spi/spi-mem.h
+++ b/include/linux/spi/spi-mem.h
@@ -15,16 +15,32 @@
#define SPI_MEM_OP_CMD(__opcode, __buswidth) \
{ \
+ .nbytes = 1, \
.buswidth = __buswidth, \
.opcode = __opcode, \
+ }
+
+#define SPI_MEM_DTR_OP_CMD(__opcode, __buswidth) \
+ { \
.nbytes = 1, \
+ .opcode = __opcode, \
+ .buswidth = __buswidth, \
+ .dtr = true, \
}
#define SPI_MEM_OP_ADDR(__nbytes, __val, __buswidth) \
{ \
.nbytes = __nbytes, \
+ .buswidth = __buswidth, \
+ .val = __val, \
+ }
+
+#define SPI_MEM_DTR_OP_ADDR(__nbytes, __val, __buswidth) \
+ { \
+ .nbytes = __nbytes, \
.val = __val, \
.buswidth = __buswidth, \
+ .dtr = true, \
}
#define SPI_MEM_OP_NO_ADDR { }
@@ -35,22 +51,47 @@
.buswidth = __buswidth, \
}
+#define SPI_MEM_DTR_OP_DUMMY(__nbytes, __buswidth) \
+ { \
+ .nbytes = __nbytes, \
+ .buswidth = __buswidth, \
+ .dtr = true, \
+ }
+
#define SPI_MEM_OP_NO_DUMMY { }
#define SPI_MEM_OP_DATA_IN(__nbytes, __buf, __buswidth) \
{ \
+ .buswidth = __buswidth, \
+ .dir = SPI_MEM_DATA_IN, \
+ .nbytes = __nbytes, \
+ .buf.in = __buf, \
+ }
+
+#define SPI_MEM_DTR_OP_DATA_IN(__nbytes, __buf, __buswidth) \
+ { \
.dir = SPI_MEM_DATA_IN, \
.nbytes = __nbytes, \
.buf.in = __buf, \
.buswidth = __buswidth, \
+ .dtr = true, \
}
#define SPI_MEM_OP_DATA_OUT(__nbytes, __buf, __buswidth) \
{ \
+ .buswidth = __buswidth, \
+ .dir = SPI_MEM_DATA_OUT, \
+ .nbytes = __nbytes, \
+ .buf.out = __buf, \
+ }
+
+#define SPI_MEM_DTR_OP_DATA_OUT(__nbytes, __buf, __buswidth) \
+ { \
.dir = SPI_MEM_DATA_OUT, \
.nbytes = __nbytes, \
.buf.out = __buf, \
.buswidth = __buswidth, \
+ .dtr = true, \
}
#define SPI_MEM_OP_NO_DATA { }
@@ -68,6 +109,9 @@ enum spi_mem_data_dir {
SPI_MEM_DATA_OUT,
};
+#define SPI_MEM_OP_MAX_FREQ(__freq) \
+ .max_freq = __freq
+
/**
* struct spi_mem_op - describes a SPI memory operation
* @cmd.nbytes: number of opcode bytes (only 1 or 2 are valid). The opcode is
@@ -89,17 +133,24 @@ enum spi_mem_data_dir {
* @dummy.dtr: whether the dummy bytes should be sent in DTR mode or not
* @data.buswidth: number of IO lanes used to send/receive the data
* @data.dtr: whether the data should be sent in DTR mode or not
+ * @data.ecc: whether error correction is required or not
+ * @data.swap16: whether the byte order of 16-bit words is swapped when read
+ * or written in Octal DTR mode compared to STR mode.
* @data.dir: direction of the transfer
* @data.nbytes: number of data bytes to send/receive. Can be zero if the
* operation does not involve transferring data
* @data.buf.in: input buffer (must be DMA-able)
* @data.buf.out: output buffer (must be DMA-able)
+ * @max_freq: frequency limitation wrt this operation. 0 means there is no
+ * specific constraint and the highest achievable frequency can be
+ * attempted.
*/
struct spi_mem_op {
struct {
u8 nbytes;
u8 buswidth;
u8 dtr : 1;
+ u8 __pad : 7;
u16 opcode;
} cmd;
@@ -107,6 +158,7 @@ struct spi_mem_op {
u8 nbytes;
u8 buswidth;
u8 dtr : 1;
+ u8 __pad : 7;
u64 val;
} addr;
@@ -114,11 +166,15 @@ struct spi_mem_op {
u8 nbytes;
u8 buswidth;
u8 dtr : 1;
+ u8 __pad : 7;
} dummy;
struct {
u8 buswidth;
u8 dtr : 1;
+ u8 ecc : 1;
+ u8 swap16 : 1;
+ u8 __pad : 5;
enum spi_mem_data_dir dir;
unsigned int nbytes;
union {
@@ -126,14 +182,17 @@ struct spi_mem_op {
const void *out;
} buf;
} data;
+
+ unsigned int max_freq;
};
-#define SPI_MEM_OP(__cmd, __addr, __dummy, __data) \
+#define SPI_MEM_OP(__cmd, __addr, __dummy, __data, ...) \
{ \
.cmd = __cmd, \
.addr = __addr, \
.dummy = __dummy, \
.data = __data, \
+ __VA_ARGS__ \
}
/**
@@ -223,10 +282,12 @@ static inline void *spi_mem_get_drvdata(struct spi_mem *mem)
/**
* struct spi_controller_mem_ops - SPI memory operations
* @adjust_op_size: shrink the data xfer of an operation to match controller's
- * limitations (can be alignment of max RX/TX size
+ * limitations (can be alignment or max RX/TX size
* limitations)
* @supports_op: check if an operation is supported by the controller
* @exec_op: execute a SPI memory operation
+ * not all driver provides supports_op(), so it can return -EOPNOTSUPP
+ * if the op is not supported by the driver/controller
* @get_name: get a custom name for the SPI mem device from the controller.
* This might be needed if the controller driver has been ported
* to use the SPI mem layer and a custom name is used to keep
@@ -250,6 +311,9 @@ static inline void *spi_mem_get_drvdata(struct spi_mem *mem)
* the currently mapped area), and the caller of
* spi_mem_dirmap_write() is responsible for calling it again in
* this case.
+ * @poll_status: poll memory device status until (status & mask) == match or
+ * when the timeout has expired. It fills the data buffer with
+ * the last status value.
*
* This interface should be implemented by SPI controllers providing an
* high-level interface to execute SPI memory operation, which is usually the
@@ -274,9 +338,33 @@ struct spi_controller_mem_ops {
u64 offs, size_t len, void *buf);
ssize_t (*dirmap_write)(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, const void *buf);
+ int (*poll_status)(struct spi_mem *mem,
+ const struct spi_mem_op *op,
+ u16 mask, u16 match,
+ unsigned long initial_delay_us,
+ unsigned long polling_rate_us,
+ unsigned long timeout_ms);
};
/**
+ * struct spi_controller_mem_caps - SPI memory controller capabilities
+ * @dtr: Supports DTR operations
+ * @ecc: Supports operations with error correction
+ * @swap16: Supports swapping bytes on a 16 bit boundary when configured in
+ * Octal DTR
+ * @per_op_freq: Supports per operation frequency switching
+ */
+struct spi_controller_mem_caps {
+ bool dtr;
+ bool ecc;
+ bool swap16;
+ bool per_op_freq;
+};
+
+#define spi_mem_controller_is_capable(ctlr, cap) \
+ ((ctlr)->mem_caps && (ctlr)->mem_caps->cap)
+
+/**
* struct spi_mem_driver - SPI memory driver
* @spidrv: inherit from a SPI driver
* @probe: probe a SPI memory. Usually where detection/initialization takes
@@ -310,10 +398,6 @@ void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
bool spi_mem_default_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op);
-
-bool spi_mem_dtr_supports_op(struct spi_mem *mem,
- const struct spi_mem_op *op);
-
#else
static inline int
spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
@@ -336,16 +420,11 @@ bool spi_mem_default_supports_op(struct spi_mem *mem,
{
return false;
}
-
-static inline
-bool spi_mem_dtr_supports_op(struct spi_mem *mem,
- const struct spi_mem_op *op)
-{
- return false;
-}
#endif /* CONFIG_SPI_MEM */
int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op);
+void spi_mem_adjust_op_freq(struct spi_mem *mem, struct spi_mem_op *op);
+u64 spi_mem_calc_op_duration(struct spi_mem *mem, struct spi_mem_op *op);
bool spi_mem_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op);
@@ -369,6 +448,13 @@ devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem,
void devm_spi_mem_dirmap_destroy(struct device *dev,
struct spi_mem_dirmap_desc *desc);
+int spi_mem_poll_status(struct spi_mem *mem,
+ const struct spi_mem_op *op,
+ u16 mask, u16 match,
+ unsigned long initial_delay_us,
+ unsigned long polling_delay_us,
+ u16 timeout_ms);
+
int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv,
struct module *owner);
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 360a3bc767ca..cb2c2df31089 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -6,33 +6,44 @@
#ifndef __LINUX_SPI_H
#define __LINUX_SPI_H
+#include <linux/acpi.h>
#include <linux/bits.h>
+#include <linux/completion.h>
#include <linux/device.h>
-#include <linux/mod_devicetable.h>
-#include <linux/slab.h>
+#include <linux/gpio/consumer.h>
#include <linux/kthread.h>
-#include <linux/completion.h>
+#include <linux/mod_devicetable.h>
+#include <linux/overflow.h>
#include <linux/scatterlist.h>
-#include <linux/gpio/consumer.h>
-#include <linux/ptp_clock_kernel.h>
+#include <linux/slab.h>
+#include <linux/u64_stats_sync.h>
#include <uapi/linux/spi/spi.h>
+/* Max no. of CS supported per spi device */
+#define SPI_DEVICE_CS_CNT_MAX 4
+
struct dma_chan;
struct software_node;
+struct ptp_system_timestamp;
struct spi_controller;
struct spi_transfer;
struct spi_controller_mem_ops;
+struct spi_controller_mem_caps;
+struct spi_message;
+struct spi_offload;
+struct spi_offload_config;
/*
- * INTERFACES between SPI master-side drivers and SPI slave protocol handlers,
+ * INTERFACES between SPI controller-side drivers and SPI target protocol handlers,
* and SPI infrastructure.
*/
-extern struct bus_type spi_bus_type;
+extern const struct bus_type spi_bus_type;
/**
* struct spi_statistics - statistics for spi transfers
- * @lock: lock protecting this structure
+ * @syncp: seqcount to protect members in this struct for per-cpu update
+ * on 32-bit systems
*
* @messages: number of spi-messages handled
* @transfers: number of spi_transfers handled
@@ -50,48 +61,55 @@ extern struct bus_type spi_bus_type;
* @bytes_rx: number of bytes received from device
*
* @transfer_bytes_histo:
- * transfer bytes histogramm
+ * transfer bytes histogram
*
* @transfers_split_maxsize:
* number of transfers that have been split because of
* maxsize limit
*/
struct spi_statistics {
- spinlock_t lock; /* lock for the whole structure */
+ struct u64_stats_sync syncp;
- unsigned long messages;
- unsigned long transfers;
- unsigned long errors;
- unsigned long timedout;
+ u64_stats_t messages;
+ u64_stats_t transfers;
+ u64_stats_t errors;
+ u64_stats_t timedout;
- unsigned long spi_sync;
- unsigned long spi_sync_immediate;
- unsigned long spi_async;
+ u64_stats_t spi_sync;
+ u64_stats_t spi_sync_immediate;
+ u64_stats_t spi_async;
- unsigned long long bytes;
- unsigned long long bytes_rx;
- unsigned long long bytes_tx;
+ u64_stats_t bytes;
+ u64_stats_t bytes_rx;
+ u64_stats_t bytes_tx;
#define SPI_STATISTICS_HISTO_SIZE 17
- unsigned long transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE];
+ u64_stats_t transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE];
- unsigned long transfers_split_maxsize;
+ u64_stats_t transfers_split_maxsize;
};
-void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
- struct spi_transfer *xfer,
- struct spi_controller *ctlr);
-
-#define SPI_STATISTICS_ADD_TO_FIELD(stats, field, count) \
- do { \
- unsigned long flags; \
- spin_lock_irqsave(&(stats)->lock, flags); \
- (stats)->field += count; \
- spin_unlock_irqrestore(&(stats)->lock, flags); \
+#define SPI_STATISTICS_ADD_TO_FIELD(pcpu_stats, field, count) \
+ do { \
+ struct spi_statistics *__lstats; \
+ get_cpu(); \
+ __lstats = this_cpu_ptr(pcpu_stats); \
+ u64_stats_update_begin(&__lstats->syncp); \
+ u64_stats_add(&__lstats->field, count); \
+ u64_stats_update_end(&__lstats->syncp); \
+ put_cpu(); \
} while (0)
-#define SPI_STATISTICS_INCREMENT_FIELD(stats, field) \
- SPI_STATISTICS_ADD_TO_FIELD(stats, field, 1)
+#define SPI_STATISTICS_INCREMENT_FIELD(pcpu_stats, field) \
+ do { \
+ struct spi_statistics *__lstats; \
+ get_cpu(); \
+ __lstats = this_cpu_ptr(pcpu_stats); \
+ u64_stats_update_begin(&__lstats->syncp); \
+ u64_stats_inc(&__lstats->field); \
+ u64_stats_update_end(&__lstats->syncp); \
+ put_cpu(); \
+ } while (0)
/**
* struct spi_delay - SPI delay information
@@ -108,21 +126,16 @@ struct spi_delay {
extern int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer);
extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer);
+extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
+ struct spi_transfer *xfer);
/**
- * struct spi_device - Controller side proxy for an SPI slave device
+ * struct spi_device - Controller side proxy for an SPI target device
* @dev: Driver model representation of the device.
* @controller: SPI controller used with the device.
- * @master: Copy of controller, for backwards compatibility.
* @max_speed_hz: Maximum clock rate to be used with this chip
* (on this board); may be changed by the device's driver.
* The spi_transfer.speed_hz can override this for each transfer.
- * @chip_select: Chipselect, distinguishing chips handled by @controller.
- * @mode: The spi mode defines how data is clocked out and in.
- * This may be changed by the device's driver.
- * The "active low" default for chipselect mode can be overridden
- * (by specifying SPI_CS_HIGH) as can the "MSB first" default for
- * each word in a transfer (by specifying SPI_LSB_FIRST).
* @bits_per_word: Data transfers involve one or more words; word sizes
* like eight or 12 bits are common. In-memory wordsizes are
* powers of two bytes (e.g. 20 bit samples use 32 bits).
@@ -130,6 +143,11 @@ extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer);
* default (0) indicating protocol words are eight bit bytes.
* The spi_transfer.bits_per_word can override this for each transfer.
* @rt: Make the pump thread real time priority.
+ * @mode: The spi mode defines how data is clocked out and in.
+ * This may be changed by the device's driver.
+ * The "active low" default for chipselect mode can be overridden
+ * (by specifying SPI_CS_HIGH) as can the "MSB first" default for
+ * each word in a transfer (by specifying SPI_LSB_FIRST).
* @irq: Negative, or the number passed to request_irq() to receive
* interrupts from this device.
* @controller_state: Controller's runtime state
@@ -140,17 +158,24 @@ extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer);
* for driver coldplugging, and in uevents used for hotplugging
* @driver_override: If the name of a driver is written to this attribute, then
* the device will bind to the named driver and only the named driver.
- * @cs_gpio: LEGACY: gpio number of the chipselect line (optional, -ENOENT when
- * not using a GPIO line) use cs_gpiod in new drivers by opting in on
- * the spi_master.
- * @cs_gpiod: gpio descriptor of the chipselect line (optional, NULL when
- * not using a GPIO line)
+ * Do not set directly, because core frees it; use driver_set_override() to
+ * set or clear it.
+ * @pcpu_statistics: statistics for the spi_device
* @word_delay: delay to be inserted between consecutive
* words of a transfer
+ * @cs_setup: delay to be introduced by the controller after CS is asserted
+ * @cs_hold: delay to be introduced by the controller before CS is deasserted
+ * @cs_inactive: delay to be introduced by the controller after CS is
+ * deasserted. If @cs_change_delay is used from @spi_transfer, then the
+ * two delays will be added up.
+ * @chip_select: Array of physical chipselect, spi->chipselect[i] gives
+ * the corresponding physical CS for logical CS i.
+ * @num_chipselect: Number of physical chipselects used.
+ * @cs_index_mask: Bit mask of the active chipselect(s) in the chipselect array
+ * @cs_gpiod: Array of GPIO descriptors of the corresponding chipselect lines
+ * (optional, NULL when not using a GPIO line)
*
- * @statistics: statistics for the spi_device
- *
- * A @spi_device is used to interchange data between an SPI slave
+ * A @spi_device is used to interchange data between an SPI target device
* (usually a discrete chip) and CPU memory.
*
* In @dev, the platform_data is used to hold information about this
@@ -162,13 +187,21 @@ extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer);
struct spi_device {
struct device dev;
struct spi_controller *controller;
- struct spi_controller *master; /* compatibility layer */
u32 max_speed_hz;
- u8 chip_select;
u8 bits_per_word;
bool rt;
-#define SPI_NO_TX BIT(31) /* no transmit wire */
-#define SPI_NO_RX BIT(30) /* no receive wire */
+#define SPI_NO_TX BIT(31) /* No transmit wire */
+#define SPI_NO_RX BIT(30) /* No receive wire */
+ /*
+ * TPM specification defines flow control over SPI. Client device
+ * can insert a wait state on MISO when address is transmitted by
+ * controller on MOSI. Detecting the wait state in software is only
+ * possible for full duplex controllers. For controllers that support
+ * only half-duplex, the wait state detection needs to be implemented
+ * in hardware. TPM devices would set this flag when hardware flow
+ * control is expected from SPI controller.
+ */
+#define SPI_TPM_HW_FLOW BIT(29) /* TPM HW flow control */
/*
* All bits defined above should be covered by SPI_MODE_KERNEL_MASK.
* The SPI_MODE_KERNEL_MASK has the SPI_MODE_USER_MASK counterpart,
@@ -178,22 +211,39 @@ struct spi_device {
* These bits must not overlap. A static assert check should make sure of that.
* If adding extra bits, make sure to decrease the bit index below as well.
*/
-#define SPI_MODE_KERNEL_MASK (~(BIT(30) - 1))
+#define SPI_MODE_KERNEL_MASK (~(BIT(29) - 1))
u32 mode;
int irq;
void *controller_state;
void *controller_data;
char modalias[SPI_NAME_SIZE];
const char *driver_override;
- int cs_gpio; /* LEGACY: chip select gpio */
- struct gpio_desc *cs_gpiod; /* chip select gpio desc */
- struct spi_delay word_delay; /* inter-word delay */
- /* the statistics */
- struct spi_statistics statistics;
+ /* The statistics */
+ struct spi_statistics __percpu *pcpu_statistics;
+
+ struct spi_delay word_delay; /* Inter-word delay */
+
+ /* CS delays */
+ struct spi_delay cs_setup;
+ struct spi_delay cs_hold;
+ struct spi_delay cs_inactive;
+
+ u8 chip_select[SPI_DEVICE_CS_CNT_MAX];
+ u8 num_chipselect;
/*
- * likely need more hooks for more protocol options affecting how
+ * Bit mask of the chipselect(s) that the driver need to use from
+ * the chipselect array. When the controller is capable to handle
+ * multiple chip selects & memories are connected in parallel
+ * then more than one bit need to be set in cs_index_mask.
+ */
+ u32 cs_index_mask : SPI_DEVICE_CS_CNT_MAX;
+
+ struct gpio_desc *cs_gpiod[SPI_DEVICE_CS_CNT_MAX]; /* Chip select gpio desc */
+
+ /*
+ * Likely need more hooks for more protocol options affecting how
* the controller talks to each chip, like:
* - memory packing (12 bit samples into low bits, others zeroed)
* - priority
@@ -206,12 +256,9 @@ struct spi_device {
static_assert((SPI_MODE_KERNEL_MASK & SPI_MODE_USER_MASK) == 0,
"SPI_MODE_USER_MASK & SPI_MODE_KERNEL_MASK must not overlap");
-static inline struct spi_device *to_spi_device(struct device *dev)
-{
- return dev ? container_of(dev, struct spi_device, dev) : NULL;
-}
+#define to_spi_device(__dev) container_of_const(__dev, struct spi_device, dev)
-/* most drivers won't need to care about device refcounting */
+/* Most drivers won't need to care about device refcounting */
static inline struct spi_device *spi_dev_get(struct spi_device *spi)
{
return (spi && get_device(&spi->dev)) ? spi : NULL;
@@ -224,7 +271,7 @@ static inline void spi_dev_put(struct spi_device *spi)
}
/* ctldata is for the bus_controller driver's runtime state */
-static inline void *spi_get_ctldata(struct spi_device *spi)
+static inline void *spi_get_ctldata(const struct spi_device *spi)
{
return spi->controller_state;
}
@@ -234,28 +281,57 @@ static inline void spi_set_ctldata(struct spi_device *spi, void *state)
spi->controller_state = state;
}
-/* device driver data */
+/* Device driver data */
static inline void spi_set_drvdata(struct spi_device *spi, void *data)
{
dev_set_drvdata(&spi->dev, data);
}
-static inline void *spi_get_drvdata(struct spi_device *spi)
+static inline void *spi_get_drvdata(const struct spi_device *spi)
{
return dev_get_drvdata(&spi->dev);
}
-struct spi_message;
+static inline u8 spi_get_chipselect(const struct spi_device *spi, u8 idx)
+{
+ return spi->chip_select[idx];
+}
+
+static inline void spi_set_chipselect(struct spi_device *spi, u8 idx, u8 chipselect)
+{
+ spi->chip_select[idx] = chipselect;
+}
+
+static inline struct gpio_desc *spi_get_csgpiod(const struct spi_device *spi, u8 idx)
+{
+ return spi->cs_gpiod[idx];
+}
+
+static inline void spi_set_csgpiod(struct spi_device *spi, u8 idx, struct gpio_desc *csgpiod)
+{
+ spi->cs_gpiod[idx] = csgpiod;
+}
+
+static inline bool spi_is_csgpiod(struct spi_device *spi)
+{
+ u8 idx;
+
+ for (idx = 0; idx < spi->num_chipselect; idx++) {
+ if (spi_get_csgpiod(spi, idx))
+ return true;
+ }
+ return false;
+}
/**
* struct spi_driver - Host side "protocol" driver
* @id_table: List of SPI devices supported by this driver
- * @probe: Binds this driver to the spi device. Drivers can verify
+ * @probe: Binds this driver to the SPI device. Drivers can verify
* that the device is actually present, and may need to configure
* characteristics (such as bits_per_word) which weren't needed for
* the initial configuration done during system setup.
- * @remove: Unbinds this driver from the spi device
+ * @remove: Unbinds this driver from the SPI device
* @shutdown: Standard shutdown callback used during system state
* transitions such as powerdown/halt and kexec
* @driver: SPI device drivers should initialize the name and owner
@@ -276,15 +352,13 @@ struct spi_message;
struct spi_driver {
const struct spi_device_id *id_table;
int (*probe)(struct spi_device *spi);
- int (*remove)(struct spi_device *spi);
+ void (*remove)(struct spi_device *spi);
void (*shutdown)(struct spi_device *spi);
struct device_driver driver;
};
-static inline struct spi_driver *to_spi_driver(struct device_driver *drv)
-{
- return drv ? container_of(drv, struct spi_driver, driver) : NULL;
-}
+#define to_spi_driver(__drv) \
+ ( __drv ? container_of_const(__drv, struct spi_driver, driver) : NULL )
extern int __spi_register_driver(struct module *owner, struct spi_driver *sdrv);
@@ -299,7 +373,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
driver_unregister(&sdrv->driver);
}
-/* use a define to avoid include chaining to get THIS_MODULE */
+extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 chip_select);
+
+/* Use a define to avoid include chaining to get THIS_MODULE */
#define spi_register_driver(driver) \
__spi_register_driver(THIS_MODULE, driver)
@@ -316,15 +392,15 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
spi_unregister_driver)
/**
- * struct spi_controller - interface to SPI master or slave controller
+ * struct spi_controller - interface to SPI host or target controller
* @dev: device interface to this driver
* @list: link with the global spi_controller list
* @bus_num: board-specific (and often SOC-specific) identifier for a
* given SPI controller.
* @num_chipselect: chipselects are used to distinguish individual
- * SPI slaves, and are numbered from zero to num_chipselects.
- * each slave has a chipselect signal, but it's common that not
- * every chipselect is connected to a slave.
+ * SPI targets, and are numbered from zero to num_chipselects.
+ * each target has a chipselect signal, but it's common that not
+ * every chipselect is connected to a target.
* @dma_alignment: SPI controller constraint on DMA buffers alignment.
* @mode_bits: flags understood by this controller driver
* @buswidth_override_bits: flags to override for this controller driver
@@ -337,11 +413,14 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @max_speed_hz: Highest supported transfer speed
* @flags: other constraints relevant to this driver
* @slave: indicates that this is an SPI slave controller
+ * @target: indicates that this is an SPI target controller
+ * @devm_allocated: whether the allocation of this struct is devres-managed
* @max_transfer_size: function that returns the max transfer size for
* a &spi_device; may be %NULL, so the default %SIZE_MAX will be used.
* @max_message_size: function that returns the max message size for
* a &spi_device; may be %NULL, so the default %SIZE_MAX will be used.
* @io_mutex: mutex for physical bus access
+ * @add_lock: mutex to avoid adding devices to the same chipselect
* @bus_lock_spinlock: spinlock for SPI bus locking
* @bus_lock_mutex: mutex for exclusion of multiple callers
* @bus_lock_flag: indicates that the SPI bus is locked for exclusive use
@@ -350,24 +429,34 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* must fail if an unrecognized or unsupported mode is requested.
* It's always safe to call this unless transfers are pending on
* the device whose settings are being modified.
- * @set_cs_timing: optional hook for SPI devices to request SPI master
+ * @set_cs_timing: optional hook for SPI devices to request SPI
* controller for configuring specific CS setup time, hold time and inactive
- * delay interms of clock counts
+ * delay in terms of clock counts
* @transfer: adds a message to the controller's transfer queue.
* @cleanup: frees controller-specific state
* @can_dma: determine whether this controller supports DMA
+ * @dma_map_dev: device which can be used for DMA mapping
+ * @cur_rx_dma_dev: device which is currently used for RX DMA mapping
+ * @cur_tx_dma_dev: device which is currently used for TX DMA mapping
* @queued: whether this controller is providing an internal message queue
* @kworker: pointer to thread struct for message pump
* @pump_messages: work struct for scheduling work to the message pump
- * @queue_lock: spinlock to syncronise access to message queue
+ * @queue_lock: spinlock to synchronise access to message queue
* @queue: message queue
- * @idling: the device is entering idle state
* @cur_msg: the currently in-flight message
- * @cur_msg_prepared: spi_prepare_message was called for the currently
- * in-flight message
- * @cur_msg_mapped: message has been mapped for DMA
- * @last_cs_enable: was enable true on the last call to set_cs.
+ * @cur_msg_completion: a completion for the current in-flight message
+ * @cur_msg_incomplete: Flag used internally to opportunistically skip
+ * the @cur_msg_completion. This flag is used to check if the driver has
+ * already called spi_finalize_current_message().
+ * @cur_msg_need_completion: Flag used internally to opportunistically skip
+ * the @cur_msg_completion. This flag is used to signal the context that
+ * is running spi_finalize_current_message() that it needs to complete()
+ * @fallback: fallback to PIO if DMA transfer return failure with
+ * SPI_TRANS_FAIL_NO_START.
* @last_cs_mode_high: was (mode & SPI_CS_HIGH) true on the last call to set_cs.
+ * @last_cs: the last chip_select that is recorded by set_cs, -1 on non chip
+ * selected
+ * @last_cs_index_mask: bit mask the last chip selects that were used
* @xfer_completion: used by core transfer_one_message()
* @busy: message pump is busy
* @running: message pump is running
@@ -390,6 +479,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
*
* @set_cs: set the logic level of the chip select line. May be called
* from interrupt context.
+ * @optimize_message: optimize the message for reuse
+ * @unoptimize_message: release resources allocated by optimize_message
* @prepare_message: set up the controller to transfer a single message,
* for example doing DMA mapping. Called from threaded
* context.
@@ -399,40 +490,40 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* - return 1 if the transfer is still in progress. When
* the driver is finished with this transfer it must
* call spi_finalize_current_transfer() so the subsystem
- * can issue the next transfer. Note: transfer_one and
- * transfer_one_message are mutually exclusive; when both
- * are set, the generic subsystem does not call your
- * transfer_one callback.
+ * can issue the next transfer. If the transfer fails, the
+ * driver must set the flag SPI_TRANS_FAIL_IO to
+ * spi_transfer->error first, before calling
+ * spi_finalize_current_transfer().
+ * Note: transfer_one and transfer_one_message are mutually
+ * exclusive; when both are set, the generic subsystem does
+ * not call your transfer_one callback.
* @handle_err: the subsystem calls the driver to handle an error that occurs
* in the generic implementation of transfer_one_message().
* @mem_ops: optimized/dedicated operations for interactions with SPI memory.
* This field is optional and should only be implemented if the
* controller has native support for memory like operations.
+ * @get_offload: callback for controllers with offload support to get matching
+ * offload instance. Implementations should return -ENODEV if no match is
+ * found.
+ * @put_offload: release the offload instance acquired by @get_offload.
+ * @mem_caps: controller capabilities for the handling of memory operations.
+ * @dtr_caps: true if controller has dtr(single/dual transfer rate) capability.
+ * QSPI based controller should fill this based on controller's capability.
* @unprepare_message: undo any work done by prepare_message().
- * @slave_abort: abort the ongoing transfer request on an SPI slave controller
- * @cs_setup: delay to be introduced by the controller after CS is asserted
- * @cs_hold: delay to be introduced by the controller before CS is deasserted
- * @cs_inactive: delay to be introduced by the controller after CS is
- * deasserted. If @cs_change_delay is used from @spi_transfer, then the
- * two delays will be added up.
- * @cs_gpios: LEGACY: array of GPIO descs to use as chip select lines; one per
- * CS number. Any individual value may be -ENOENT for CS lines that
- * are not GPIOs (driven by the SPI controller itself). Use the cs_gpiods
- * in new drivers.
- * @cs_gpiods: Array of GPIO descs to use as chip select lines; one per CS
+ * @target_abort: abort the ongoing transfer request on an SPI target controller
+ * @cs_gpiods: Array of GPIO descriptors to use as chip select lines; one per CS
* number. Any individual value may be NULL for CS lines that
* are not GPIOs (driven by the SPI controller itself).
* @use_gpio_descriptors: Turns on the code in the SPI core to parse and grab
- * GPIO descriptors rather than using global GPIO numbers grabbed by the
- * driver. This will fill in @cs_gpiods and @cs_gpios should not be used,
- * and SPI devices will have the cs_gpiod assigned rather than cs_gpio.
+ * GPIO descriptors. This will fill in @cs_gpiods and SPI devices will have
+ * the cs_gpiod assigned if a GPIO line is found for the chipselect.
* @unused_native_cs: When cs_gpiods is used, spi_register_controller() will
* fill in this field with the first unused native CS, to be used by SPI
* controller drivers that need to drive a native CS when using GPIO CS.
* @max_native_cs: When cs_gpiods is used, and this field is filled in,
* spi_register_controller() will validate all native CS (including the
* unused native CS) against this value.
- * @statistics: statistics for the spi_controller
+ * @pcpu_statistics: statistics for the spi_controller
* @dma_tx: DMA transmit channel
* @dma_rx: DMA receive channel
* @dummy_rx: dummy receive buffer for full-duplex devices
@@ -447,8 +538,12 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* If the driver does not set this, the SPI core takes the snapshot as
* close to the driver hand-over as possible.
* @irq_flags: Interrupt enable state during PTP system timestamping
- * @fallback: fallback to pio if dma transfer return failure with
- * SPI_TRANS_FAIL_NO_START.
+ * @queue_empty: signal green light for opportunistically skipping the queue
+ * for spi_sync transfers.
+ * @must_async: disable all fast paths in the core
+ * @defer_optimize_message: set to true if controller cannot pre-optimize messages
+ * and needs to defer the optimization step until the message is actually
+ * being transferred
*
* Each SPI controller can communicate with one or more @spi_device
* children. These make a small bus, sharing MOSI, MISO and SCK signals
@@ -458,7 +553,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
*
* The driver for an SPI controller manages access to those devices through
* a queue of spi_message transactions, copying data between CPU memory and
- * an SPI slave device. For each such message it queues, it calls the
+ * an SPI target device. For each such message it queues, it calls the
* message's completion function when the transaction completes.
*/
struct spi_controller {
@@ -466,20 +561,22 @@ struct spi_controller {
struct list_head list;
- /* other than negative (== assign one dynamically), bus_num is fully
- * board-specific. usually that simplifies to being SOC-specific.
- * example: one SOC has three SPI controllers, numbered 0..2,
- * and one board's schematics might show it using SPI-2. software
+ /*
+ * Other than negative (== assign one dynamically), bus_num is fully
+ * board-specific. Usually that simplifies to being SoC-specific.
+ * example: one SoC has three SPI controllers, numbered 0..2,
+ * and one board's schematics might show it using SPI-2. Software
* would normally use bus_num=2 for that controller.
*/
s16 bus_num;
- /* chipselects will be integral to many controllers; some others
+ /*
+ * Chipselects will be integral to many controllers; some others
* might use board-specific GPIOs.
*/
u16 num_chipselect;
- /* some SPI controllers pose alignment requirements on DMAable
+ /* Some SPI controllers pose alignment requirements on DMAable
* buffers; let protocol drivers know about these requirements.
*/
u16 dma_alignment;
@@ -490,34 +587,43 @@ struct spi_controller {
/* spi_device.mode flags override flags for this controller */
u32 buswidth_override_bits;
- /* bitmask of supported bits_per_word for transfers */
+ /* Bitmask of supported bits_per_word for transfers */
u32 bits_per_word_mask;
#define SPI_BPW_MASK(bits) BIT((bits) - 1)
#define SPI_BPW_RANGE_MASK(min, max) GENMASK((max) - 1, (min) - 1)
- /* limits on transfer speed */
+ /* Limits on transfer speed */
u32 min_speed_hz;
u32 max_speed_hz;
- /* other constraints relevant to this driver */
+ /* Other constraints relevant to this driver */
u16 flags;
-#define SPI_CONTROLLER_HALF_DUPLEX BIT(0) /* can't do full duplex */
-#define SPI_CONTROLLER_NO_RX BIT(1) /* can't do buffer read */
-#define SPI_CONTROLLER_NO_TX BIT(2) /* can't do buffer write */
-#define SPI_CONTROLLER_MUST_RX BIT(3) /* requires rx */
-#define SPI_CONTROLLER_MUST_TX BIT(4) /* requires tx */
-
-#define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */
+#define SPI_CONTROLLER_HALF_DUPLEX BIT(0) /* Can't do full duplex */
+#define SPI_CONTROLLER_NO_RX BIT(1) /* Can't do buffer read */
+#define SPI_CONTROLLER_NO_TX BIT(2) /* Can't do buffer write */
+#define SPI_CONTROLLER_MUST_RX BIT(3) /* Requires rx */
+#define SPI_CONTROLLER_MUST_TX BIT(4) /* Requires tx */
+#define SPI_CONTROLLER_GPIO_SS BIT(5) /* GPIO CS must select target device */
+#define SPI_CONTROLLER_SUSPENDED BIT(6) /* Currently suspended */
+ /*
+ * The spi-controller has multi chip select capability and can
+ * assert/de-assert more than one chip select at once.
+ */
+#define SPI_CONTROLLER_MULTI_CS BIT(7)
- /* flag indicating this is a non-devres managed controller */
+ /* Flag indicating if the allocation of this struct is devres-managed */
bool devm_allocated;
- /* flag indicating this is an SPI slave controller */
- bool slave;
+ union {
+ /* Flag indicating this is an SPI slave controller */
+ bool slave;
+ /* Flag indicating this is an SPI target controller */
+ bool target;
+ };
/*
- * on some hardware transfer / message size may be constrained
- * the limit may depend on device transfer settings
+ * On some hardware transfer / message size may be constrained
+ * the limit may depend on device transfer settings.
*/
size_t (*max_transfer_size)(struct spi_device *spi);
size_t (*max_message_size)(struct spi_device *spi);
@@ -525,14 +631,18 @@ struct spi_controller {
/* I/O mutex */
struct mutex io_mutex;
- /* lock and mutex for SPI bus locking */
+ /* Used to avoid adding the same CS twice */
+ struct mutex add_lock;
+
+ /* Lock and mutex for SPI bus locking */
spinlock_t bus_lock_spinlock;
struct mutex bus_lock_mutex;
- /* flag indicating that the SPI bus is locked for exclusive use */
+ /* Flag indicating that the SPI bus is locked for exclusive use */
bool bus_lock_flag;
- /* Setup mode and clock, etc (spi driver may call many times).
+ /*
+ * Setup mode and clock, etc (SPI driver may call many times).
*
* IMPORTANT: this may be called when transfers to another
* device are active. DO NOT UPDATE SHARED REGISTERS in ways
@@ -548,21 +658,21 @@ struct spi_controller {
* to configure specific CS timing through spi_set_cs_timing() after
* spi_setup().
*/
- int (*set_cs_timing)(struct spi_device *spi, struct spi_delay *setup,
- struct spi_delay *hold, struct spi_delay *inactive);
+ int (*set_cs_timing)(struct spi_device *spi);
- /* bidirectional bulk transfers
+ /*
+ * Bidirectional bulk transfers
*
* + The transfer() method may not sleep; its main role is
* just to add the message to the queue.
* + For now there's no remove-from-queue operation, or
* any other request management
- * + To a given spi_device, message queueing is pure fifo
+ * + To a given spi_device, message queueing is pure FIFO
*
* + The controller's main job is to process its message queue,
- * selecting a chip (for masters), then transferring data
+ * selecting a chip (for controllers), then transferring data
* + If there are multiple spi_device children, the i/o queue
- * arbitration algorithm is unspecified (round robin, fifo,
+ * arbitration algorithm is unspecified (round robin, FIFO,
* priority, reservations, preemption, etc)
*
* + Chipselect stays active during the entire message
@@ -573,7 +683,7 @@ struct spi_controller {
int (*transfer)(struct spi_device *spi,
struct spi_message *mesg);
- /* called on release() to free memory provided by spi_controller */
+ /* Called on release() to free memory provided by spi_controller */
void (*cleanup)(struct spi_device *spi);
/*
@@ -586,6 +696,9 @@ struct spi_controller {
bool (*can_dma)(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *xfer);
+ struct device *dma_map_dev;
+ struct device *cur_rx_dma_dev;
+ struct device *cur_tx_dma_dev;
/*
* These hooks are for drivers that want to use the generic
@@ -599,19 +712,22 @@ struct spi_controller {
spinlock_t queue_lock;
struct list_head queue;
struct spi_message *cur_msg;
- bool idling;
+ struct completion cur_msg_completion;
+ bool cur_msg_incomplete;
+ bool cur_msg_need_completion;
bool busy;
bool running;
bool rt;
bool auto_runtime_pm;
- bool cur_msg_prepared;
- bool cur_msg_mapped;
- bool last_cs_enable;
- bool last_cs_mode_high;
bool fallback;
+ bool last_cs_mode_high;
+ s8 last_cs[SPI_DEVICE_CS_CNT_MAX];
+ u32 last_cs_index_mask : SPI_DEVICE_CS_CNT_MAX;
struct completion xfer_completion;
size_t max_dma_len;
+ int (*optimize_message)(struct spi_message *msg);
+ int (*unoptimize_message)(struct spi_message *msg);
int (*prepare_transfer_hardware)(struct spi_controller *ctlr);
int (*transfer_one_message)(struct spi_controller *ctlr,
struct spi_message *mesg);
@@ -620,7 +736,7 @@ struct spi_controller {
struct spi_message *message);
int (*unprepare_message)(struct spi_controller *ctlr,
struct spi_message *message);
- int (*slave_abort)(struct spi_controller *ctlr);
+ int (*target_abort)(struct spi_controller *ctlr);
/*
* These hooks are for drivers that use a generic implementation
@@ -634,27 +750,29 @@ struct spi_controller {
/* Optimized handlers for SPI memory-like operations. */
const struct spi_controller_mem_ops *mem_ops;
+ const struct spi_controller_mem_caps *mem_caps;
- /* CS delays */
- struct spi_delay cs_setup;
- struct spi_delay cs_hold;
- struct spi_delay cs_inactive;
+ /* SPI or QSPI controller can set to true if supports SDR/DDR transfer rate */
+ bool dtr_caps;
- /* gpio chip select */
- int *cs_gpios;
+ struct spi_offload *(*get_offload)(struct spi_device *spi,
+ const struct spi_offload_config *config);
+ void (*put_offload)(struct spi_offload *offload);
+
+ /* GPIO chip select */
struct gpio_desc **cs_gpiods;
bool use_gpio_descriptors;
- u8 unused_native_cs;
- u8 max_native_cs;
+ s8 unused_native_cs;
+ s8 max_native_cs;
- /* statistics */
- struct spi_statistics statistics;
+ /* Statistics */
+ struct spi_statistics __percpu *pcpu_statistics;
/* DMA channels for use with core dmaengine helpers */
struct dma_chan *dma_tx;
struct dma_chan *dma_rx;
- /* dummy data for full duplex devices */
+ /* Dummy data for full duplex devices */
void *dummy_rx;
void *dummy_tx;
@@ -668,6 +786,11 @@ struct spi_controller {
/* Interrupt enable state during PTP system timestamping */
unsigned long irq_flags;
+
+ /* Flag for enabling opportunistic skipping of the queue in spi_sync */
+ bool queue_empty;
+ bool must_async;
+ bool defer_optimize_message;
};
static inline void *spi_controller_get_devdata(struct spi_controller *ctlr)
@@ -694,9 +817,9 @@ static inline void spi_controller_put(struct spi_controller *ctlr)
put_device(&ctlr->dev);
}
-static inline bool spi_controller_is_slave(struct spi_controller *ctlr)
+static inline bool spi_controller_is_target(struct spi_controller *ctlr)
{
- return IS_ENABLED(CONFIG_SPI_SLAVE) && ctlr->slave;
+ return IS_ENABLED(CONFIG_SPI_SLAVE) && ctlr->target;
}
/* PM calls that need to be issued by the driver */
@@ -716,37 +839,37 @@ void spi_take_timestamp_post(struct spi_controller *ctlr,
struct spi_transfer *xfer,
size_t progress, bool irqs_off);
-/* the spi driver core manages memory for the spi_controller classdev */
+/* The SPI driver core manages memory for the spi_controller classdev */
extern struct spi_controller *__spi_alloc_controller(struct device *host,
- unsigned int size, bool slave);
+ unsigned int size, bool target);
-static inline struct spi_controller *spi_alloc_master(struct device *host,
- unsigned int size)
+static inline struct spi_controller *spi_alloc_host(struct device *dev,
+ unsigned int size)
{
- return __spi_alloc_controller(host, size, false);
+ return __spi_alloc_controller(dev, size, false);
}
-static inline struct spi_controller *spi_alloc_slave(struct device *host,
- unsigned int size)
+static inline struct spi_controller *spi_alloc_target(struct device *dev,
+ unsigned int size)
{
if (!IS_ENABLED(CONFIG_SPI_SLAVE))
return NULL;
- return __spi_alloc_controller(host, size, true);
+ return __spi_alloc_controller(dev, size, true);
}
struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
unsigned int size,
- bool slave);
+ bool target);
-static inline struct spi_controller *devm_spi_alloc_master(struct device *dev,
- unsigned int size)
+static inline struct spi_controller *devm_spi_alloc_host(struct device *dev,
+ unsigned int size)
{
return __devm_spi_alloc_controller(dev, size, false);
}
-static inline struct spi_controller *devm_spi_alloc_slave(struct device *dev,
- unsigned int size)
+static inline struct spi_controller *devm_spi_alloc_target(struct device *dev,
+ unsigned int size)
{
if (!IS_ENABLED(CONFIG_SPI_SLAVE))
return NULL;
@@ -759,7 +882,30 @@ extern int devm_spi_register_controller(struct device *dev,
struct spi_controller *ctlr);
extern void spi_unregister_controller(struct spi_controller *ctlr);
-extern struct spi_controller *spi_busnum_to_master(u16 busnum);
+#if IS_ENABLED(CONFIG_ACPI) && IS_ENABLED(CONFIG_SPI_MASTER)
+extern struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev);
+extern struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
+ struct acpi_device *adev,
+ int index);
+int acpi_spi_count_resources(struct acpi_device *adev);
+#else
+static inline struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
+{
+ return NULL;
+}
+
+static inline struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
+ struct acpi_device *adev,
+ int index)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int acpi_spi_count_resources(struct acpi_device *adev)
+{
+ return 0;
+}
+#endif
/*
* SPI resource management while processing a SPI message
@@ -770,29 +916,20 @@ typedef void (*spi_res_release_t)(struct spi_controller *ctlr,
void *res);
/**
- * struct spi_res - spi resource management structure
+ * struct spi_res - SPI resource management structure
* @entry: list entry
* @release: release code called prior to freeing this resource
* @data: extra data allocated for the specific use-case
*
- * this is based on ideas from devres, but focused on life-cycle
- * management during spi_message processing
+ * This is based on ideas from devres, but focused on life-cycle
+ * management during spi_message processing.
*/
struct spi_res {
struct list_head entry;
spi_res_release_t release;
- unsigned long long data[]; /* guarantee ull alignment */
+ unsigned long long data[]; /* Guarantee ull alignment */
};
-extern void *spi_res_alloc(struct spi_device *spi,
- spi_res_release_t release,
- size_t size, gfp_t gfp);
-extern void spi_res_add(struct spi_message *message, void *res);
-extern void spi_res_free(void *res);
-
-extern void spi_res_release(struct spi_controller *ctlr,
- struct spi_message *message);
-
/*---------------------------------------------------------------------------*/
/*
@@ -803,7 +940,7 @@ extern void spi_res_release(struct spi_controller *ctlr,
*
* The spi_messages themselves consist of a series of read+write transfer
* segments. Those segments always read the same number of bits as they
- * write; but one or the other is easily ignored by passing a null buffer
+ * write; but one or the other is easily ignored by passing a NULL buffer
* pointer. (This is unlike most types of I/O API, because SPI hardware
* is full duplex.)
*
@@ -814,10 +951,10 @@ extern void spi_res_release(struct spi_controller *ctlr,
/**
* struct spi_transfer - a read/write buffer pair
- * @tx_buf: data to be written (dma-safe memory), or NULL
- * @rx_buf: data to be read (dma-safe memory), or NULL
- * @tx_dma: DMA address of tx_buf, if @spi_message.is_dma_mapped
- * @rx_dma: DMA address of rx_buf, if @spi_message.is_dma_mapped
+ * @tx_buf: data to be written (DMA-safe memory), or NULL
+ * @rx_buf: data to be read (DMA-safe memory), or NULL
+ * @tx_dma: DMA address of tx_buf, currently not for client use
+ * @rx_dma: DMA address of rx_buf, currently not for client use
* @tx_nbits: number of bits used for writing. If 0 the default
* (SPI_NBITS_SINGLE) is used.
* @rx_nbits: number of bits used for reading. If 0 the default
@@ -828,6 +965,7 @@ extern void spi_res_release(struct spi_controller *ctlr,
* @bits_per_word: select a bits_per_word other than the device default
* for this transfer. If 0 the default (from @spi_device) is used.
* @dummy_data: indicates transfer is dummy bytes transfer.
+ * @cs_off: performs the transfer with chipselect off.
* @cs_change: affects chipselect after this transfer completes
* @cs_change_delay: delay between cs deassert and assert when
* @cs_change is set and @spi_transfer is not the last in @spi_message
@@ -837,11 +975,15 @@ extern void spi_res_release(struct spi_controller *ctlr,
* @word_delay: inter word delay to be introduced after each word size
* (set by bits_per_word) transmission.
* @effective_speed_hz: the effective SCK-speed that was used to
- * transfer this transfer. Set to 0 if the spi bus driver does
+ * transfer this transfer. Set to 0 if the SPI bus driver does
* not support it.
* @transfer_list: transfers are sequenced through @spi_message.transfers
+ * @tx_sg_mapped: If true, the @tx_sg is mapped for DMA
+ * @rx_sg_mapped: If true, the @rx_sg is mapped for DMA
* @tx_sg: Scatterlist for transmit, currently not for client use
* @rx_sg: Scatterlist for receive, currently not for client use
+ * @offload_flags: Flags that are only applicable to specialized SPI offload
+ * transfers. See %SPI_OFFLOAD_XFER_* in spi-offload.h.
* @ptp_sts_word_pre: The word (subject to bits_per_word semantics) offset
* within @tx_buf for which the SPI device is requesting that the time
* snapshot for this transfer begins. Upon completing the SPI transfer,
@@ -856,26 +998,27 @@ extern void spi_res_release(struct spi_controller *ctlr,
* purposefully (instead of setting to spi_transfer->len - 1) to denote
* that a transfer-level snapshot taken from within the driver may still
* be of higher quality.
- * @ptp_sts: Pointer to a memory location held by the SPI slave device where a
+ * @ptp_sts: Pointer to a memory location held by the SPI target device where a
* PTP system timestamp structure may lie. If drivers use PIO or their
* hardware has some sort of assist for retrieving exact transfer timing,
* they can (and should) assert @ptp_sts_supported and populate this
* structure using the ptp_read_system_*ts helper functions.
- * The timestamp must represent the time at which the SPI slave device has
+ * The timestamp must represent the time at which the SPI target device has
* processed the word, i.e. the "pre" timestamp should be taken before
* transmitting the "pre" word, and the "post" timestamp after receiving
* transmit confirmation from the controller for the "post" word.
+ * @dtr_mode: true if supports double transfer rate.
* @timestamped: true if the transfer has been timestamped
- * @error: Error status logged by spi controller driver.
+ * @error: Error status logged by SPI controller driver.
*
* SPI transfers always write the same number of bytes as they read.
* Protocol drivers should always provide @rx_buf and/or @tx_buf.
* In some cases, they may also want to provide DMA addresses for
* the data being transferred; that may reduce overhead, when the
- * underlying driver uses dma.
+ * underlying driver uses DMA.
*
- * If the transmit buffer is null, zeroes will be shifted out
- * while filling @rx_buf. If the receive buffer is null, the data
+ * If the transmit buffer is NULL, zeroes will be shifted out
+ * while filling @rx_buf. If the receive buffer is NULL, the data
* shifted in will be discarded. Only "len" bytes shift out (or in).
* It's an error to try to shift out a partial word. (For example, by
* shifting out three bytes with word size of sixteen or twenty bits;
@@ -909,13 +1052,16 @@ extern void spi_res_release(struct spi_controller *ctlr,
* Some devices need protocol transactions to be built from a series of
* spi_message submissions, where the content of one message is determined
* by the results of previous messages and where the whole transaction
- * ends when the chipselect goes intactive.
+ * ends when the chipselect goes inactive.
*
* When SPI can transfer in 1x,2x or 4x. It can get this transfer information
* from device through @tx_nbits and @rx_nbits. In Bi-direction, these
* two should both be set. User can set transfer mode with SPI_NBITS_SINGLE(1x)
* SPI_NBITS_DUAL(2x) and SPI_NBITS_QUAD(4x) to support these three transfer.
*
+ * User may also set dtr_mode to true to use dual transfer mode if desired. if
+ * not, default considered as single transfer mode.
+ *
* The code that submits an spi_message (and its spi_transfers)
* to the lower layers is responsible for managing its memory.
* Zero-initialize every field you don't set up explicitly, to
@@ -923,27 +1069,38 @@ extern void spi_res_release(struct spi_controller *ctlr,
* and its transfers, ignore them until its completion callback.
*/
struct spi_transfer {
- /* it's ok if tx_buf == rx_buf (right?)
- * for MicroWire, one buffer must be null
- * buffers must work with dma_*map_single() calls, unless
- * spi_message.is_dma_mapped reports a pre-existing mapping
+ /*
+ * It's okay if tx_buf == rx_buf (right?).
+ * For MicroWire, one buffer must be NULL.
+ * Buffers must work with dma_*map_single() calls.
*/
const void *tx_buf;
void *rx_buf;
unsigned len;
- dma_addr_t tx_dma;
- dma_addr_t rx_dma;
+#define SPI_TRANS_FAIL_NO_START BIT(0)
+#define SPI_TRANS_FAIL_IO BIT(1)
+ u16 error;
+
+ bool tx_sg_mapped;
+ bool rx_sg_mapped;
+
struct sg_table tx_sg;
struct sg_table rx_sg;
+ dma_addr_t tx_dma;
+ dma_addr_t rx_dma;
unsigned dummy_data:1;
+ unsigned cs_off:1;
unsigned cs_change:1;
- unsigned tx_nbits:3;
- unsigned rx_nbits:3;
-#define SPI_NBITS_SINGLE 0x01 /* 1bit transfer */
-#define SPI_NBITS_DUAL 0x02 /* 2bits transfer */
-#define SPI_NBITS_QUAD 0x04 /* 4bits transfer */
+ unsigned tx_nbits:4;
+ unsigned rx_nbits:4;
+ unsigned timestamped:1;
+ bool dtr_mode;
+#define SPI_NBITS_SINGLE 0x01 /* 1-bit transfer */
+#define SPI_NBITS_DUAL 0x02 /* 2-bit transfer */
+#define SPI_NBITS_QUAD 0x04 /* 4-bit transfer */
+#define SPI_NBITS_OCTAL 0x08 /* 8-bit transfer */
u8 bits_per_word;
struct spi_delay delay;
struct spi_delay cs_change_delay;
@@ -952,34 +1109,35 @@ struct spi_transfer {
u32 effective_speed_hz;
+ /* Use %SPI_OFFLOAD_XFER_* from spi-offload.h */
+ unsigned int offload_flags;
+
unsigned int ptp_sts_word_pre;
unsigned int ptp_sts_word_post;
struct ptp_system_timestamp *ptp_sts;
- bool timestamped;
-
struct list_head transfer_list;
-
-#define SPI_TRANS_FAIL_NO_START BIT(0)
- u16 error;
};
/**
* struct spi_message - one multi-segment SPI transaction
* @transfers: list of transfer segments in this transaction
* @spi: SPI device to which the transaction is queued
- * @is_dma_mapped: if true, the caller provided both dma and cpu virtual
- * addresses for each transfer buffer
+ * @pre_optimized: peripheral driver pre-optimized the message
+ * @optimized: the message is in the optimized state
+ * @prepared: spi_prepare_message was called for the this message
+ * @status: zero for success, else negative errno
* @complete: called to report transaction completions
* @context: the argument to complete() when it's called
* @frame_length: the total number of bytes in the message
* @actual_length: the total number of bytes that were transferred in all
* successful segments
- * @status: zero for success, else negative errno
* @queue: for use by whichever driver currently owns the message
* @state: for use by whichever driver currently owns the message
- * @resources: for resource management when the spi message is processed
+ * @opt_state: for use by whichever driver currently owns the message
+ * @resources: for resource management when the SPI message is processed
+ * @offload: (optional) offload instance used by this message
*
* A @spi_message is used to execute an atomic sequence of data transfers,
* each represented by a struct spi_transfer. The sequence is "atomic"
@@ -1000,9 +1158,16 @@ struct spi_message {
struct spi_device *spi;
- unsigned is_dma_mapped:1;
+ /* spi_optimize_message() was called for this message */
+ bool pre_optimized;
+ /* __spi_optimize_message() was called for this message */
+ bool optimized;
+
+ /* spi_prepare_message() was called for this message */
+ bool prepared;
- /* REVISIT: we might want a flag affecting the behavior of the
+ /*
+ * REVISIT: we might want a flag affecting the behavior of the
* last transfer ... allowing things like "read 16 bit length L"
* immediately followed by "read L bytes". Basically imposing
* a specific message scheduling algorithm.
@@ -1013,21 +1178,33 @@ struct spi_message {
* tell them about such special cases.
*/
- /* completion is reported through a callback */
+ /* Completion is reported through a callback */
+ int status;
void (*complete)(void *context);
void *context;
unsigned frame_length;
unsigned actual_length;
- int status;
- /* for optional use by whatever driver currently owns the
+ /*
+ * For optional use by whatever driver currently owns the
* spi_message ... between calls to spi_async and then later
* complete(), that's the spi_controller controller driver.
*/
struct list_head queue;
void *state;
+ /*
+ * Optional state for use by controller driver between calls to
+ * __spi_optimize_message() and __spi_unoptimize_message().
+ */
+ void *opt_state;
+
+ /*
+ * Optional offload instance used by this message. This must be set
+ * by the peripheral driver before calling spi_optimize_message().
+ */
+ struct spi_offload *offload;
- /* list of spi_res reources when the spi message is processed */
+ /* List of spi_res resources when the SPI message is processed */
struct list_head resources;
};
@@ -1064,7 +1241,7 @@ spi_transfer_delay_exec(struct spi_transfer *t)
/**
* spi_message_init_with_transfers - Initialize spi_message and append transfers
* @m: spi_message to be initialized
- * @xfers: An array of spi transfers
+ * @xfers: An array of SPI transfers
* @num_xfers: Number of items in the xfer array
*
* This function initializes the given spi_message and adds each spi_transfer in
@@ -1081,26 +1258,27 @@ struct spi_transfer *xfers, unsigned int num_xfers)
spi_message_add_tail(&xfers[i], m);
}
-/* It's fine to embed message and transaction structures in other data
+/*
+ * It's fine to embed message and transaction structures in other data
* structures so long as you don't free them while they're in use.
*/
-
static inline struct spi_message *spi_message_alloc(unsigned ntrans, gfp_t flags)
{
- struct spi_message *m;
-
- m = kzalloc(sizeof(struct spi_message)
- + ntrans * sizeof(struct spi_transfer),
- flags);
- if (m) {
- unsigned i;
- struct spi_transfer *t = (struct spi_transfer *)(m + 1);
-
- spi_message_init_no_memset(m);
- for (i = 0; i < ntrans; i++, t++)
- spi_message_add_tail(t, m);
- }
- return m;
+ struct spi_message_with_transfers {
+ struct spi_message m;
+ struct spi_transfer t[];
+ } *mwt;
+ unsigned i;
+
+ mwt = kzalloc(struct_size(mwt, t, ntrans), flags);
+ if (!mwt)
+ return NULL;
+
+ spi_message_init_no_memset(&mwt->m);
+ for (i = 0; i < ntrans; i++)
+ spi_message_add_tail(&mwt->t[i], &mwt->m);
+
+ return &mwt->m;
}
static inline void spi_message_free(struct spi_message *m)
@@ -1108,16 +1286,14 @@ static inline void spi_message_free(struct spi_message *m)
kfree(m);
}
-extern int spi_set_cs_timing(struct spi_device *spi,
- struct spi_delay *setup,
- struct spi_delay *hold,
- struct spi_delay *inactive);
+extern int spi_optimize_message(struct spi_device *spi, struct spi_message *msg);
+extern void spi_unoptimize_message(struct spi_message *msg);
+extern int devm_spi_optimize_message(struct device *dev, struct spi_device *spi,
+ struct spi_message *msg);
extern int spi_setup(struct spi_device *spi);
extern int spi_async(struct spi_device *spi, struct spi_message *message);
-extern int spi_async_locked(struct spi_device *spi,
- struct spi_message *message);
-extern int spi_slave_abort(struct spi_device *spi);
+extern int spi_target_abort(struct spi_device *spi);
static inline size_t
spi_max_message_size(struct spi_device *spi)
@@ -1139,7 +1315,7 @@ spi_max_transfer_size(struct spi_device *spi)
if (ctlr->max_transfer_size)
tr_max = ctlr->max_transfer_size(spi);
- /* transfer size limit must not be greater than messsage size limit */
+ /* Transfer size limit must not be greater than message size limit */
return min(tr_max, msg_max);
}
@@ -1155,7 +1331,7 @@ spi_max_transfer_size(struct spi_device *spi)
*/
static inline bool spi_is_bpw_supported(struct spi_device *spi, u32 bpw)
{
- u32 bpw_mask = spi->master->bits_per_word_mask;
+ u32 bpw_mask = spi->controller->bits_per_word_mask;
if (bpw == 8 || (bpw <= 32 && bpw_mask & SPI_BPW_MASK(bpw)))
return true;
@@ -1163,6 +1339,49 @@ static inline bool spi_is_bpw_supported(struct spi_device *spi, u32 bpw)
return false;
}
+/**
+ * spi_bpw_to_bytes - Covert bits per word to bytes
+ * @bpw: Bits per word
+ *
+ * This function converts the given @bpw to bytes. The result is always
+ * power-of-two, e.g.,
+ *
+ * =============== =================
+ * Input (in bits) Output (in bytes)
+ * =============== =================
+ * 5 1
+ * 9 2
+ * 21 4
+ * 37 8
+ * =============== =================
+ *
+ * It will return 0 for the 0 input.
+ *
+ * Returns:
+ * Bytes for the given @bpw.
+ */
+static inline u32 spi_bpw_to_bytes(u32 bpw)
+{
+ return roundup_pow_of_two(BITS_TO_BYTES(bpw));
+}
+
+/**
+ * spi_controller_xfer_timeout - Compute a suitable timeout value
+ * @ctlr: SPI device
+ * @xfer: Transfer descriptor
+ *
+ * Compute a relevant timeout value for the given transfer. We derive the time
+ * that it would take on a single data line and take twice this amount of time
+ * with a minimum of 500ms to avoid false positives on loaded systems.
+ *
+ * Returns: Transfer timeout value in milliseconds.
+ */
+static inline unsigned int spi_controller_xfer_timeout(struct spi_controller *ctlr,
+ struct spi_transfer *xfer)
+{
+ return max(xfer->len * 8 * 2 / (xfer->speed_hz / 1000), 500U);
+}
+
/*---------------------------------------------------------------------------*/
/* SPI transfer replacement methods which make use of spi_res */
@@ -1176,7 +1395,7 @@ typedef void (*spi_replaced_release_t)(struct spi_controller *ctlr,
* replacements that have occurred
* so that they can get reverted
* @release: some extra release code to get executed prior to
- * relasing this structure
+ * releasing this structure
* @extradata: pointer to some extra data if requested or NULL
* @replaced_transfers: transfers that have been replaced and which need
* to get restored
@@ -1186,9 +1405,9 @@ typedef void (*spi_replaced_release_t)(struct spi_controller *ctlr,
* @inserted_transfers: array of spi_transfers of array-size @inserted,
* that have been replacing replaced_transfers
*
- * note: that @extradata will point to @inserted_transfers[@inserted]
+ * Note: that @extradata will point to @inserted_transfers[@inserted]
* if some extra allocation is requested, so alignment will be the same
- * as for spi_transfers
+ * as for spi_transfers.
*/
struct spi_replaced_transfers {
spi_replaced_release_t release;
@@ -1199,27 +1418,21 @@ struct spi_replaced_transfers {
struct spi_transfer inserted_transfers[];
};
-extern struct spi_replaced_transfers *spi_replace_transfers(
- struct spi_message *msg,
- struct spi_transfer *xfer_first,
- size_t remove,
- size_t insert,
- spi_replaced_release_t release,
- size_t extradatasize,
- gfp_t gfp);
-
/*---------------------------------------------------------------------------*/
/* SPI transfer transformation methods */
extern int spi_split_transfers_maxsize(struct spi_controller *ctlr,
struct spi_message *msg,
- size_t maxsize,
- gfp_t gfp);
+ size_t maxsize);
+extern int spi_split_transfers_maxwords(struct spi_controller *ctlr,
+ struct spi_message *msg,
+ size_t maxwords);
/*---------------------------------------------------------------------------*/
-/* All these synchronous SPI transfer routines are utilities layered
+/*
+ * All these synchronous SPI transfer routines are utilities layered
* over the core async transfer primitive. Here, "synchronous" means
* they will sleep uninterruptibly until the async transfer completes.
*/
@@ -1299,7 +1512,7 @@ spi_read(struct spi_device *spi, void *buf, size_t len)
return spi_sync_transfer(spi, &t, 1);
}
-/* this copies txbuf and rxbuf data; for small transfers only! */
+/* This copies txbuf and rxbuf data; for small transfers only! */
extern int spi_write_then_read(struct spi_device *spi,
const void *txbuf, unsigned n_tx,
void *rxbuf, unsigned n_rx);
@@ -1322,7 +1535,7 @@ static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd)
status = spi_write_then_read(spi, &cmd, 1, &result, 1);
- /* return negative errno or unsigned value */
+ /* Return negative errno or unsigned value */
return (status < 0) ? status : result;
}
@@ -1347,7 +1560,7 @@ static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd)
status = spi_write_then_read(spi, &cmd, 1, &result, 2);
- /* return negative errno or unsigned value */
+ /* Return negative errno or unsigned value */
return (status < 0) ? status : result;
}
@@ -1362,7 +1575,7 @@ static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd)
*
* Callable only from contexts that can sleep.
*
- * Return: the (unsigned) sixteen bit number returned by the device in cpu
+ * Return: the (unsigned) sixteen bit number returned by the device in CPU
* endianness, or else a negative error code.
*/
static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
@@ -1390,7 +1603,7 @@ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
* As a rule, SPI devices can't be probed. Instead, board init code
* provides a table listing the devices which are present, with enough
* information to bind and set up the device's driver. There's basic
- * support for nonstatic configurations too; enough to handle adding
+ * support for non-static configurations too; enough to handle adding
* parport adapters, or microcontrollers acting as USB-to-SPI bridges.
*/
@@ -1427,12 +1640,13 @@ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
* are active in some dynamic board configuration models.
*/
struct spi_board_info {
- /* the device name and module name are coupled, like platform_bus;
+ /*
+ * The device name and module name are coupled, like platform_bus;
* "modalias" is normally the driver name.
*
* platform_data goes to spi_device.dev.platform_data,
* controller_data goes to spi_device.controller_data,
- * irq is copied too
+ * IRQ is copied too.
*/
char modalias[SPI_NAME_SIZE];
const void *platform_data;
@@ -1440,25 +1654,28 @@ struct spi_board_info {
void *controller_data;
int irq;
- /* slower signaling on noisy or low voltage boards */
+ /* Slower signaling on noisy or low voltage boards */
u32 max_speed_hz;
- /* bus_num is board specific and matches the bus_num of some
+ /*
+ * bus_num is board specific and matches the bus_num of some
* spi_controller that will probably be registered later.
*
- * chip_select reflects how this chip is wired to that master;
+ * chip_select reflects how this chip is wired to that controller;
* it's less than num_chipselect.
*/
u16 bus_num;
u16 chip_select;
- /* mode becomes spi_device.mode, and is essential for chips
+ /*
+ * mode becomes spi_device.mode, and is essential for chips
* where the default of SPI_CS_HIGH = 0 is wrong.
*/
u32 mode;
- /* ... may need additional spi_device chip config data here.
+ /*
+ * ... may need additional spi_device chip config data here.
* avoid stuff protocol drivers can set; but include stuff
* needed to behave without being bound to a driver:
* - quirks like clock rate mattering when not selected
@@ -1469,19 +1686,20 @@ struct spi_board_info {
extern int
spi_register_board_info(struct spi_board_info const *info, unsigned n);
#else
-/* board init code may ignore whether SPI is configured or not */
+/* Board init code may ignore whether SPI is configured or not */
static inline int
spi_register_board_info(struct spi_board_info const *info, unsigned n)
{ return 0; }
#endif
-/* If you're hotplugging an adapter with devices (parport, usb, etc)
+/*
+ * If you're hotplugging an adapter with devices (parport, USB, etc)
* use spi_new_device() to describe each device. You can also call
* spi_unregister_device() to start making that device vanish, but
* normally that would be handled by spi_unregister_controller().
*
* You can also use spi_alloc_device() and spi_add_device() to use a two
- * stage registration sequence for each spi_device. This gives the caller
+ * stage registration sequence for each spi_device. This gives the caller
* some more control over the spi_device structure before it is registered,
* but requires that caller to initialize fields that would otherwise
* be defined using the board info.
@@ -1500,49 +1718,13 @@ extern void spi_unregister_device(struct spi_device *spi);
extern const struct spi_device_id *
spi_get_device_id(const struct spi_device *sdev);
+extern const void *
+spi_get_device_match_data(const struct spi_device *sdev);
+
static inline bool
spi_transfer_is_last(struct spi_controller *ctlr, struct spi_transfer *xfer)
{
return list_is_last(&xfer->transfer_list, &ctlr->cur_msg->transfers);
}
-/* OF support code */
-#if IS_ENABLED(CONFIG_OF)
-
-/* must call put_device() when done with returned spi_device device */
-extern struct spi_device *
-of_find_spi_device_by_node(struct device_node *node);
-
-#else
-
-static inline struct spi_device *
-of_find_spi_device_by_node(struct device_node *node)
-{
- return NULL;
-}
-
-#endif /* IS_ENABLED(CONFIG_OF) */
-
-/* Compatibility layer */
-#define spi_master spi_controller
-
-#define SPI_MASTER_HALF_DUPLEX SPI_CONTROLLER_HALF_DUPLEX
-#define SPI_MASTER_NO_RX SPI_CONTROLLER_NO_RX
-#define SPI_MASTER_NO_TX SPI_CONTROLLER_NO_TX
-#define SPI_MASTER_MUST_RX SPI_CONTROLLER_MUST_RX
-#define SPI_MASTER_MUST_TX SPI_CONTROLLER_MUST_TX
-
-#define spi_master_get_devdata(_ctlr) spi_controller_get_devdata(_ctlr)
-#define spi_master_set_devdata(_ctlr, _data) \
- spi_controller_set_devdata(_ctlr, _data)
-#define spi_master_get(_ctlr) spi_controller_get(_ctlr)
-#define spi_master_put(_ctlr) spi_controller_put(_ctlr)
-#define spi_master_suspend(_ctlr) spi_controller_suspend(_ctlr)
-#define spi_master_resume(_ctlr) spi_controller_resume(_ctlr)
-
-#define spi_register_master(_ctlr) spi_register_controller(_ctlr)
-#define devm_spi_register_master(_dev, _ctlr) \
- devm_spi_register_controller(_dev, _ctlr)
-#define spi_unregister_master(_ctlr) spi_unregister_controller(_ctlr)
-
#endif /* __LINUX_SPI_H */
diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h
index 4444c2a992cb..c92cd43a47f4 100644
--- a/include/linux/spi/spi_bitbang.h
+++ b/include/linux/spi/spi_bitbang.h
@@ -4,13 +4,15 @@
#include <linux/workqueue.h>
+typedef u32 (*spi_bb_txrx_word_fn)(struct spi_device *, unsigned int, u32, u8, unsigned int);
+
struct spi_bitbang {
struct mutex lock;
u8 busy;
u8 use_dma;
u16 flags; /* extra spi->mode support */
- struct spi_master *master;
+ struct spi_controller *ctlr;
/* setup_transfer() changes clock and/or wordsize to match settings
* for this transfer; zeroes restore defaults from spi_device.
@@ -22,15 +24,15 @@ struct spi_bitbang {
#define BITBANG_CS_ACTIVE 1 /* normally nCS, active low */
#define BITBANG_CS_INACTIVE 0
+ void (*set_mosi_idle)(struct spi_device *spi);
/* txrx_bufs() may handle dma mapping for transfers that don't
* already have one (transfer.{tx,rx}_dma is zero), or use PIO
*/
int (*txrx_bufs)(struct spi_device *spi, struct spi_transfer *t);
/* txrx_word[SPI_MODE_*]() just looks like a shift register */
- u32 (*txrx_word[4])(struct spi_device *spi,
- unsigned nsecs,
- u32 word, u8 bits, unsigned flags);
+ spi_bb_txrx_word_fn txrx_word[SPI_MODE_X_MASK + 1];
+
int (*set_line_direction)(struct spi_device *spi, bool output);
};
diff --git a/include/linux/spi/spi_gpio.h b/include/linux/spi/spi_gpio.h
index 9e7e83d8645b..5f0e1407917a 100644
--- a/include/linux/spi/spi_gpio.h
+++ b/include/linux/spi/spi_gpio.h
@@ -15,8 +15,8 @@
*/
/**
- * struct spi_gpio_platform_data - parameter for bitbanged SPI master
- * @num_chipselect: how many slaves to allow
+ * struct spi_gpio_platform_data - parameter for bitbanged SPI host controller
+ * @num_chipselect: how many target devices to allow
*/
struct spi_gpio_platform_data {
u16 num_chipselect;
diff --git a/include/linux/spi/xilinx_spi.h b/include/linux/spi/xilinx_spi.h
index c15d69d28e68..1b8d984668b6 100644
--- a/include/linux/spi/xilinx_spi.h
+++ b/include/linux/spi/xilinx_spi.h
@@ -2,19 +2,24 @@
#ifndef __LINUX_SPI_XILINX_SPI_H
#define __LINUX_SPI_XILINX_SPI_H
+#include <linux/types.h>
+
+struct spi_board_info;
+
/**
* struct xspi_platform_data - Platform data of the Xilinx SPI driver
- * @num_chipselect: Number of chip select by the IP.
- * @little_endian: If registers should be accessed little endian or not.
- * @bits_per_word: Number of bits per word.
* @devices: Devices to add when the driver is probed.
* @num_devices: Number of devices in the devices array.
+ * @num_chipselect: Number of chip select by the IP.
+ * @bits_per_word: Number of bits per word.
+ * @force_irq: If set, forces QSPI transaction requirements.
*/
struct xspi_platform_data {
- u16 num_chipselect;
- u8 bits_per_word;
struct spi_board_info *devices;
u8 num_devices;
+ u8 num_chipselect;
+ u8 bits_per_word;
+ bool force_irq;
};
#endif /* __LINUX_SPI_XILINX_SPI_H */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 79897841a2cc..d3561c4a080e 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_SPINLOCK_H
#define __LINUX_SPINLOCK_H
+#define __LINUX_INSIDE_SPINLOCK_H
/*
* include/linux/spinlock.h - generic spinlock/rwlock declarations
@@ -12,6 +13,8 @@
* asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
* initializers
*
+ * linux/spinlock_types_raw:
+ * The raw types and initializers
* linux/spinlock_types.h:
* defines the generic type and initializers
*
@@ -31,6 +34,8 @@
* contains the generic, simplified UP spinlock type.
* (which is an empty structure on non-debug builds)
*
+ * linux/spinlock_types_raw:
+ * The raw RT types and initializers
* linux/spinlock_types.h:
* defines the generic type and initializers
*
@@ -53,10 +58,10 @@
#include <linux/compiler.h>
#include <linux/irqflags.h>
#include <linux/thread_info.h>
-#include <linux/kernel.h>
#include <linux/stringify.h>
#include <linux/bottom_half.h>
#include <linux/lockdep.h>
+#include <linux/cleanup.h>
#include <asm/barrier.h>
#include <asm/mmiowb.h>
@@ -168,12 +173,11 @@ do { \
* Architectures that can implement ACQUIRE better need to take care.
*/
#ifndef smp_mb__after_spinlock
-#define smp_mb__after_spinlock() do { } while (0)
+#define smp_mb__after_spinlock() kcsan_mb()
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
-#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
extern int do_raw_spin_trylock(raw_spinlock_t *lock);
extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
#else
@@ -184,18 +188,6 @@ static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
mmiowb_spin_lock();
}
-#ifndef arch_spin_lock_flags
-#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-#endif
-
-static inline void
-do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
-{
- __acquire(lock);
- arch_spin_lock_flags(&lock->raw_lock, *flags);
- mmiowb_spin_lock();
-}
-
static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
{
int ret = arch_spin_trylock(&(lock)->raw_lock);
@@ -308,8 +300,10 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
1 : ({ local_irq_restore(flags); 0; }); \
})
-/* Include rwlock functions */
+#ifndef CONFIG_PREEMPT_RT
+/* Include rwlock functions for !RT */
#include <linux/rwlock.h>
+#endif
/*
* Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
@@ -320,6 +314,9 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
# include <linux/spinlock_api_up.h>
#endif
+/* Non PREEMPT_RT kernel, map to raw spinlocks: */
+#ifndef CONFIG_PREEMPT_RT
+
/*
* Map the spin_lock functions to the raw variants for PREEMPT_RT=n
*/
@@ -454,6 +451,39 @@ static __always_inline int spin_is_contended(spinlock_t *lock)
#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
+#else /* !CONFIG_PREEMPT_RT */
+# include <linux/spinlock_rt.h>
+#endif /* CONFIG_PREEMPT_RT */
+
+/*
+ * Does a critical section need to be broken due to another
+ * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
+ * but a general need for low latency)
+ */
+static inline int spin_needbreak(spinlock_t *lock)
+{
+ if (!preempt_model_preemptible())
+ return 0;
+
+ return spin_is_contended(lock);
+}
+
+/*
+ * Check if a rwlock is contended.
+ * Returns non-zero if there is another task waiting on the rwlock.
+ * Returns zero if the lock is not contended or the system / underlying
+ * rwlock implementation does not support contention detection.
+ * Technically does not depend on CONFIG_PREEMPTION, but a general need
+ * for low latency.
+ */
+static inline int rwlock_needbreak(rwlock_t *lock)
+{
+ if (!preempt_model_preemptible())
+ return 0;
+
+ return rwlock_is_contended(lock);
+}
+
/*
* Pull the atomic_t declaration:
* (asm-mips/atomic.h needs above definitions)
@@ -476,6 +506,15 @@ extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
__cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
+extern int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock);
+#define atomic_dec_and_raw_lock(atomic, lock) \
+ __cond_lock(lock, _atomic_dec_and_raw_lock(atomic, lock))
+
+extern int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock,
+ unsigned long *flags);
+#define atomic_dec_and_raw_lock_irqsave(atomic, lock, flags) \
+ __cond_lock(lock, _atomic_dec_and_raw_lock_irqsave(atomic, lock, &(flags)))
+
int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
size_t max_size, unsigned int cpu_mult,
gfp_t gfp, const char *name,
@@ -493,4 +532,89 @@ int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
void free_bucket_spinlocks(spinlock_t *locks);
+DEFINE_LOCK_GUARD_1(raw_spinlock, raw_spinlock_t,
+ raw_spin_lock(_T->lock),
+ raw_spin_unlock(_T->lock))
+
+DEFINE_LOCK_GUARD_1_COND(raw_spinlock, _try, raw_spin_trylock(_T->lock))
+
+DEFINE_LOCK_GUARD_1(raw_spinlock_nested, raw_spinlock_t,
+ raw_spin_lock_nested(_T->lock, SINGLE_DEPTH_NESTING),
+ raw_spin_unlock(_T->lock))
+
+DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t,
+ raw_spin_lock_irq(_T->lock),
+ raw_spin_unlock_irq(_T->lock))
+
+DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irq, _try, raw_spin_trylock_irq(_T->lock))
+
+DEFINE_LOCK_GUARD_1(raw_spinlock_bh, raw_spinlock_t,
+ raw_spin_lock_bh(_T->lock),
+ raw_spin_unlock_bh(_T->lock))
+
+DEFINE_LOCK_GUARD_1_COND(raw_spinlock_bh, _try, raw_spin_trylock_bh(_T->lock))
+
+DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t,
+ raw_spin_lock_irqsave(_T->lock, _T->flags),
+ raw_spin_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags)
+
+DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irqsave, _try,
+ raw_spin_trylock_irqsave(_T->lock, _T->flags))
+
+DEFINE_LOCK_GUARD_1(spinlock, spinlock_t,
+ spin_lock(_T->lock),
+ spin_unlock(_T->lock))
+
+DEFINE_LOCK_GUARD_1_COND(spinlock, _try, spin_trylock(_T->lock))
+
+DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t,
+ spin_lock_irq(_T->lock),
+ spin_unlock_irq(_T->lock))
+
+DEFINE_LOCK_GUARD_1_COND(spinlock_irq, _try,
+ spin_trylock_irq(_T->lock))
+
+DEFINE_LOCK_GUARD_1(spinlock_bh, spinlock_t,
+ spin_lock_bh(_T->lock),
+ spin_unlock_bh(_T->lock))
+
+DEFINE_LOCK_GUARD_1_COND(spinlock_bh, _try,
+ spin_trylock_bh(_T->lock))
+
+DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t,
+ spin_lock_irqsave(_T->lock, _T->flags),
+ spin_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags)
+
+DEFINE_LOCK_GUARD_1_COND(spinlock_irqsave, _try,
+ spin_trylock_irqsave(_T->lock, _T->flags))
+
+DEFINE_LOCK_GUARD_1(read_lock, rwlock_t,
+ read_lock(_T->lock),
+ read_unlock(_T->lock))
+
+DEFINE_LOCK_GUARD_1(read_lock_irq, rwlock_t,
+ read_lock_irq(_T->lock),
+ read_unlock_irq(_T->lock))
+
+DEFINE_LOCK_GUARD_1(read_lock_irqsave, rwlock_t,
+ read_lock_irqsave(_T->lock, _T->flags),
+ read_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags)
+
+DEFINE_LOCK_GUARD_1(write_lock, rwlock_t,
+ write_lock(_T->lock),
+ write_unlock(_T->lock))
+
+DEFINE_LOCK_GUARD_1(write_lock_irq, rwlock_t,
+ write_lock_irq(_T->lock),
+ write_unlock_irq(_T->lock))
+
+DEFINE_LOCK_GUARD_1(write_lock_irqsave, rwlock_t,
+ write_lock_irqsave(_T->lock, _T->flags),
+ write_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags)
+
+#undef __LINUX_INSIDE_SPINLOCK_H
#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/spinlock_api.h b/include/linux/spinlock_api.h
new file mode 100644
index 000000000000..6338b27f98df
--- /dev/null
+++ b/include/linux/spinlock_api.h
@@ -0,0 +1 @@
+#include <linux/spinlock.h>
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 19a9be9d97ee..9ecb0ab504e3 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -1,8 +1,8 @@
#ifndef __LINUX_SPINLOCK_API_SMP_H
#define __LINUX_SPINLOCK_API_SMP_H
-#ifndef __LINUX_SPINLOCK_H
-# error "please don't include this file directly"
+#ifndef __LINUX_INSIDE_SPINLOCK_H
+# error "Please do not include this file directly."
#endif
/*
@@ -108,16 +108,7 @@ static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
local_irq_save(flags);
preempt_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- /*
- * On lockdep we dont want the hand-coded irq-enable of
- * do_raw_spin_lock_flags() code, because lockdep assumes
- * that interrupts are not re-enabled during lock-acquire:
- */
-#ifdef CONFIG_LOCKDEP
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
-#else
- do_raw_spin_lock_flags(lock, &flags);
-#endif
return flags;
}
@@ -187,6 +178,9 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
return 0;
}
+/* PREEMPT_RT has its own rwlock implementation */
+#ifndef CONFIG_PREEMPT_RT
#include <linux/rwlock_api_smp.h>
+#endif
#endif /* __LINUX_SPINLOCK_API_SMP_H */
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index d0d188861ad6..819aeba1c87e 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -1,7 +1,7 @@
#ifndef __LINUX_SPINLOCK_API_UP_H
#define __LINUX_SPINLOCK_API_UP_H
-#ifndef __LINUX_SPINLOCK_H
+#ifndef __LINUX_INSIDE_SPINLOCK_H
# error "please don't include this file directly"
#endif
@@ -59,6 +59,7 @@
#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock)
#define _raw_read_lock(lock) __LOCK(lock)
#define _raw_write_lock(lock) __LOCK(lock)
+#define _raw_write_lock_nested(lock, subclass) __LOCK(lock)
#define _raw_spin_lock_bh(lock) __LOCK_BH(lock)
#define _raw_read_lock_bh(lock) __LOCK_BH(lock)
#define _raw_write_lock_bh(lock) __LOCK_BH(lock)
diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
new file mode 100644
index 000000000000..f6499c37157d
--- /dev/null
+++ b/include/linux/spinlock_rt.h
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef __LINUX_SPINLOCK_RT_H
+#define __LINUX_SPINLOCK_RT_H
+
+#ifndef __LINUX_INSIDE_SPINLOCK_H
+#error Do not include directly. Use spinlock.h
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern void __rt_spin_lock_init(spinlock_t *lock, const char *name,
+ struct lock_class_key *key, bool percpu);
+#else
+static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name,
+ struct lock_class_key *key, bool percpu)
+{
+}
+#endif
+
+#define __spin_lock_init(slock, name, key, percpu) \
+do { \
+ rt_mutex_base_init(&(slock)->lock); \
+ __rt_spin_lock_init(slock, name, key, percpu); \
+} while (0)
+
+#define _spin_lock_init(slock, percpu) \
+do { \
+ static struct lock_class_key __key; \
+ __spin_lock_init(slock, #slock, &__key, percpu); \
+} while (0)
+
+#define spin_lock_init(slock) _spin_lock_init(slock, false)
+#define local_spin_lock_init(slock) _spin_lock_init(slock, true)
+
+extern void rt_spin_lock(spinlock_t *lock) __acquires(lock);
+extern void rt_spin_lock_nested(spinlock_t *lock, int subclass) __acquires(lock);
+extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock) __acquires(lock);
+extern void rt_spin_unlock(spinlock_t *lock) __releases(lock);
+extern void rt_spin_lock_unlock(spinlock_t *lock);
+extern int rt_spin_trylock_bh(spinlock_t *lock);
+extern int rt_spin_trylock(spinlock_t *lock);
+
+static __always_inline void spin_lock(spinlock_t *lock)
+{
+ rt_spin_lock(lock);
+}
+
+#ifdef CONFIG_LOCKDEP
+# define __spin_lock_nested(lock, subclass) \
+ rt_spin_lock_nested(lock, subclass)
+
+# define __spin_lock_nest_lock(lock, nest_lock) \
+ do { \
+ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
+ rt_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
+ } while (0)
+# define __spin_lock_irqsave_nested(lock, flags, subclass) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
+ __spin_lock_nested(lock, subclass); \
+ } while (0)
+
+#else
+ /*
+ * Always evaluate the 'subclass' argument to avoid that the compiler
+ * warns about set-but-not-used variables when building with
+ * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
+ */
+# define __spin_lock_nested(lock, subclass) spin_lock(((void)(subclass), (lock)))
+# define __spin_lock_nest_lock(lock, subclass) spin_lock(((void)(subclass), (lock)))
+# define __spin_lock_irqsave_nested(lock, flags, subclass) \
+ spin_lock_irqsave(((void)(subclass), (lock)), flags)
+#endif
+
+#define spin_lock_nested(lock, subclass) \
+ __spin_lock_nested(lock, subclass)
+
+#define spin_lock_nest_lock(lock, nest_lock) \
+ __spin_lock_nest_lock(lock, nest_lock)
+
+#define spin_lock_irqsave_nested(lock, flags, subclass) \
+ __spin_lock_irqsave_nested(lock, flags, subclass)
+
+static __always_inline void spin_lock_bh(spinlock_t *lock)
+{
+ /* Investigate: Drop bh when blocking ? */
+ local_bh_disable();
+ rt_spin_lock(lock);
+}
+
+static __always_inline void spin_lock_irq(spinlock_t *lock)
+{
+ rt_spin_lock(lock);
+}
+
+#define spin_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
+ spin_lock(lock); \
+ } while (0)
+
+static __always_inline void spin_unlock(spinlock_t *lock)
+{
+ rt_spin_unlock(lock);
+}
+
+static __always_inline void spin_unlock_bh(spinlock_t *lock)
+{
+ rt_spin_unlock(lock);
+ local_bh_enable();
+}
+
+static __always_inline void spin_unlock_irq(spinlock_t *lock)
+{
+ rt_spin_unlock(lock);
+}
+
+static __always_inline void spin_unlock_irqrestore(spinlock_t *lock,
+ unsigned long flags)
+{
+ rt_spin_unlock(lock);
+}
+
+#define spin_trylock(lock) \
+ __cond_lock(lock, rt_spin_trylock(lock))
+
+#define spin_trylock_bh(lock) \
+ __cond_lock(lock, rt_spin_trylock_bh(lock))
+
+#define spin_trylock_irq(lock) \
+ __cond_lock(lock, rt_spin_trylock(lock))
+
+#define spin_trylock_irqsave(lock, flags) \
+({ \
+ int __locked; \
+ \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
+ __locked = spin_trylock(lock); \
+ __locked; \
+})
+
+#define spin_is_contended(lock) (((void)(lock), 0))
+
+static inline int spin_is_locked(spinlock_t *lock)
+{
+ return rt_mutex_base_is_locked(&lock->lock);
+}
+
+#define assert_spin_locked(lock) BUG_ON(!spin_is_locked(lock))
+
+#include <linux/rwlock_rt.h>
+
+#endif
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index b981caafe8bf..2dfa35ffec76 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -9,65 +9,11 @@
* Released under the General Public License (GPL).
*/
-#if defined(CONFIG_SMP)
-# include <asm/spinlock_types.h>
-#else
-# include <linux/spinlock_types_up.h>
-#endif
-
-#include <linux/lockdep_types.h>
+#include <linux/spinlock_types_raw.h>
-typedef struct raw_spinlock {
- arch_spinlock_t raw_lock;
-#ifdef CONFIG_DEBUG_SPINLOCK
- unsigned int magic, owner_cpu;
- void *owner;
-#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-} raw_spinlock_t;
-
-#define SPINLOCK_MAGIC 0xdead4ead
-
-#define SPINLOCK_OWNER_INIT ((void *)-1L)
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define RAW_SPIN_DEP_MAP_INIT(lockname) \
- .dep_map = { \
- .name = #lockname, \
- .wait_type_inner = LD_WAIT_SPIN, \
- }
-# define SPIN_DEP_MAP_INIT(lockname) \
- .dep_map = { \
- .name = #lockname, \
- .wait_type_inner = LD_WAIT_CONFIG, \
- }
-#else
-# define RAW_SPIN_DEP_MAP_INIT(lockname)
-# define SPIN_DEP_MAP_INIT(lockname)
-#endif
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-# define SPIN_DEBUG_INIT(lockname) \
- .magic = SPINLOCK_MAGIC, \
- .owner_cpu = -1, \
- .owner = SPINLOCK_OWNER_INIT,
-#else
-# define SPIN_DEBUG_INIT(lockname)
-#endif
-
-#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
- { \
- .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
- SPIN_DEBUG_INIT(lockname) \
- RAW_SPIN_DEP_MAP_INIT(lockname) }
-
-#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
- (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
-
-#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+#ifndef CONFIG_PREEMPT_RT
+/* Non PREEMPT_RT kernels map spinlock to raw_spinlock */
typedef struct spinlock {
union {
struct raw_spinlock rlock;
@@ -96,6 +42,35 @@ typedef struct spinlock {
#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
+#else /* !CONFIG_PREEMPT_RT */
+
+/* PREEMPT_RT kernels map spinlock to rt_mutex */
+#include <linux/rtmutex.h>
+
+typedef struct spinlock {
+ struct rt_mutex_base lock;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+} spinlock_t;
+
+#define __SPIN_LOCK_UNLOCKED(name) \
+ { \
+ .lock = __RT_MUTEX_BASE_INITIALIZER(name.lock), \
+ SPIN_DEP_MAP_INIT(name) \
+ }
+
+#define __LOCAL_SPIN_LOCK_UNLOCKED(name) \
+ { \
+ .lock = __RT_MUTEX_BASE_INITIALIZER(name.lock), \
+ LOCAL_SPIN_DEP_MAP_INIT(name) \
+ }
+
+#define DEFINE_SPINLOCK(name) \
+ spinlock_t name = __SPIN_LOCK_UNLOCKED(name)
+
+#endif /* CONFIG_PREEMPT_RT */
+
#include <linux/rwlock_types.h>
#endif /* __LINUX_SPINLOCK_TYPES_H */
diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h
new file mode 100644
index 000000000000..91cb36b65a17
--- /dev/null
+++ b/include/linux/spinlock_types_raw.h
@@ -0,0 +1,73 @@
+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
+#define __LINUX_SPINLOCK_TYPES_RAW_H
+
+#include <linux/types.h>
+
+#if defined(CONFIG_SMP)
+# include <asm/spinlock_types.h>
+#else
+# include <linux/spinlock_types_up.h>
+#endif
+
+#include <linux/lockdep_types.h>
+
+typedef struct raw_spinlock {
+ arch_spinlock_t raw_lock;
+#ifdef CONFIG_DEBUG_SPINLOCK
+ unsigned int magic, owner_cpu;
+ void *owner;
+#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+} raw_spinlock_t;
+
+#define SPINLOCK_MAGIC 0xdead4ead
+
+#define SPINLOCK_OWNER_INIT ((void *)-1L)
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define RAW_SPIN_DEP_MAP_INIT(lockname) \
+ .dep_map = { \
+ .name = #lockname, \
+ .wait_type_inner = LD_WAIT_SPIN, \
+ }
+# define SPIN_DEP_MAP_INIT(lockname) \
+ .dep_map = { \
+ .name = #lockname, \
+ .wait_type_inner = LD_WAIT_CONFIG, \
+ }
+
+# define LOCAL_SPIN_DEP_MAP_INIT(lockname) \
+ .dep_map = { \
+ .name = #lockname, \
+ .wait_type_inner = LD_WAIT_CONFIG, \
+ .lock_type = LD_LOCK_PERCPU, \
+ }
+#else
+# define RAW_SPIN_DEP_MAP_INIT(lockname)
+# define SPIN_DEP_MAP_INIT(lockname)
+# define LOCAL_SPIN_DEP_MAP_INIT(lockname)
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+# define SPIN_DEBUG_INIT(lockname) \
+ .magic = SPINLOCK_MAGIC, \
+ .owner_cpu = -1, \
+ .owner = SPINLOCK_OWNER_INIT,
+#else
+# define SPIN_DEBUG_INIT(lockname)
+#endif
+
+#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
+{ \
+ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
+ SPIN_DEBUG_INIT(lockname) \
+ RAW_SPIN_DEP_MAP_INIT(lockname) }
+
+#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
+ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
+
+#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+
+#endif /* __LINUX_SPINLOCK_TYPES_RAW_H */
diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h
index c09b6407ae1b..fc4e2d017c20 100644
--- a/include/linux/spinlock_types_up.h
+++ b/include/linux/spinlock_types_up.h
@@ -1,8 +1,8 @@
#ifndef __LINUX_SPINLOCK_TYPES_UP_H
#define __LINUX_SPINLOCK_TYPES_UP_H
-#ifndef __LINUX_SPINLOCK_TYPES_H
-# error "please don't include this file directly"
+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
+# error "Please do not include this file directly."
#endif
/*
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index 0ac9112c1bbe..1e84e71ca495 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -1,8 +1,8 @@
#ifndef __LINUX_SPINLOCK_UP_H
#define __LINUX_SPINLOCK_UP_H
-#ifndef __LINUX_SPINLOCK_H
-# error "please don't include this file directly"
+#ifndef __LINUX_INSIDE_SPINLOCK_H
+# error "Please do not include this file directly."
#endif
#include <asm/processor.h> /* for cpu_relax() */
@@ -62,7 +62,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
#define arch_spin_is_locked(lock) ((void)(lock), 0)
/* for sched/core.c and kernel_lock.c: */
# define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0)
-# define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0)
# define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0)
# define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; })
#endif /* DEBUG_SPINLOCK */
diff --git a/include/linux/splice.h b/include/linux/splice.h
index a55179fd60fc..9dec4861d09f 100644
--- a/include/linux/splice.h
+++ b/include/linux/splice.h
@@ -38,6 +38,7 @@ struct splice_desc {
struct file *file; /* file to read/write */
void *data; /* cookie */
} u;
+ void (*splice_eof)(struct splice_desc *sd); /* Unexpected EOF handler */
loff_t pos; /* file position */
loff_t *opos; /* sendfile: output position */
size_t num_spliced; /* number of bytes already spliced */
@@ -67,23 +68,37 @@ typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
typedef int (splice_direct_actor)(struct pipe_inode_info *,
struct splice_desc *);
-extern ssize_t splice_from_pipe(struct pipe_inode_info *, struct file *,
- loff_t *, size_t, unsigned int,
- splice_actor *);
-extern ssize_t __splice_from_pipe(struct pipe_inode_info *,
- struct splice_desc *, splice_actor *);
-extern ssize_t splice_to_pipe(struct pipe_inode_info *,
- struct splice_pipe_desc *);
-extern ssize_t add_to_pipe(struct pipe_inode_info *,
- struct pipe_buffer *);
-extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
- splice_direct_actor *);
-extern long do_splice(struct file *in, loff_t *off_in,
- struct file *out, loff_t *off_out,
- size_t len, unsigned int flags);
+ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out,
+ loff_t *ppos, size_t len, unsigned int flags,
+ splice_actor *actor);
+ssize_t __splice_from_pipe(struct pipe_inode_info *pipe,
+ struct splice_desc *sd, splice_actor *actor);
+ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
+ struct splice_pipe_desc *spd);
+ssize_t add_to_pipe(struct pipe_inode_info *pipe, struct pipe_buffer *buf);
+ssize_t vfs_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags);
+ssize_t splice_direct_to_actor(struct file *file, struct splice_desc *sd,
+ splice_direct_actor *actor);
+ssize_t do_splice(struct file *in, loff_t *off_in, struct file *out,
+ loff_t *off_out, size_t len, unsigned int flags);
+ssize_t do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
+ loff_t *opos, size_t len, unsigned int flags);
+ssize_t splice_file_range(struct file *in, loff_t *ppos, struct file *out,
+ loff_t *opos, size_t len);
-extern long do_tee(struct file *in, struct file *out, size_t len,
- unsigned int flags);
+static inline long splice_copy_file_range(struct file *in, loff_t pos_in,
+ struct file *out, loff_t pos_out,
+ size_t len)
+{
+ return splice_file_range(in, &pos_in, out, &pos_out, len);
+}
+
+ssize_t do_tee(struct file *in, struct file *out, size_t len,
+ unsigned int flags);
+ssize_t splice_to_socket(struct pipe_inode_info *pipe, struct file *out,
+ loff_t *ppos, size_t len, unsigned int flags);
/*
* for dynamic pipe sizing
diff --git a/include/linux/spmi.h b/include/linux/spmi.h
index 729bcbf9f5ad..28e8c8bd3944 100644
--- a/include/linux/spmi.h
+++ b/include/linux/spmi.h
@@ -120,6 +120,9 @@ static inline void spmi_controller_put(struct spmi_controller *ctrl)
int spmi_controller_add(struct spmi_controller *ctrl);
void spmi_controller_remove(struct spmi_controller *ctrl);
+struct spmi_controller *devm_spmi_controller_alloc(struct device *parent, size_t size);
+int devm_spmi_controller_add(struct device *parent, struct spmi_controller *ctrl);
+
/**
* struct spmi_driver - SPMI slave device driver
* @driver: SPMI device drivers should initialize name and owner field of
@@ -164,6 +167,9 @@ static inline void spmi_driver_unregister(struct spmi_driver *sdrv)
module_driver(__spmi_driver, spmi_driver_register, \
spmi_driver_unregister)
+struct device_node;
+
+struct spmi_device *spmi_find_device_by_of_node(struct device_node *np);
int spmi_register_read(struct spmi_device *sdev, u8 addr, u8 *buf);
int spmi_ext_register_read(struct spmi_device *sdev, u8 addr, u8 *buf,
size_t len);
diff --git a/include/linux/sprintf.h b/include/linux/sprintf.h
new file mode 100644
index 000000000000..f06f7b785091
--- /dev/null
+++ b/include/linux/sprintf.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_KERNEL_SPRINTF_H_
+#define _LINUX_KERNEL_SPRINTF_H_
+
+#include <linux/compiler_attributes.h>
+#include <linux/types.h>
+#include <linux/stdarg.h>
+
+int num_to_str(char *buf, int size, unsigned long long num, unsigned int width);
+
+__printf(2, 3) int sprintf(char *buf, const char * fmt, ...);
+__printf(2, 0) int vsprintf(char *buf, const char *, va_list);
+__printf(3, 4) int snprintf(char *buf, size_t size, const char *fmt, ...);
+__printf(3, 0) int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
+__printf(3, 4) int scnprintf(char *buf, size_t size, const char *fmt, ...);
+__printf(3, 0) int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
+__printf(2, 3) __malloc char *kasprintf(gfp_t gfp, const char *fmt, ...);
+__printf(2, 0) __malloc char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
+__printf(2, 0) const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args);
+
+__scanf(2, 3) int sscanf(const char *, const char *, ...);
+__scanf(2, 0) int vsscanf(const char *, const char *, va_list);
+
+/* These are for specific cases, do not use without real need */
+extern bool no_hash_pointers;
+void hash_pointers_finalize(bool slub_debug);
+
+/* Used for Rust formatting ('%pA') */
+char *rust_fmt_argument(char *buf, char *end, const void *ptr);
+
+#endif /* _LINUX_KERNEL_SPRINTF_H */
diff --git a/include/linux/sram.h b/include/linux/sram.h
index 4fb405fb0480..d7dee19505c6 100644
--- a/include/linux/sram.h
+++ b/include/linux/sram.h
@@ -1,15 +1,5 @@
-/*
- * Generic SRAM Driver Interface
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Generic SRAM Driver Interface */
#ifndef __LINUX_SRAM_H__
#define __LINUX_SRAM_H__
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index a0895bbf71ce..344ad51c8f6c 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -25,8 +25,12 @@ struct srcu_struct;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
- struct lock_class_key *key);
+int __init_srcu_struct(struct srcu_struct *ssp, const char *name, struct lock_class_key *key);
+#ifndef CONFIG_TINY_SRCU
+int __init_srcu_struct_fast(struct srcu_struct *ssp, const char *name, struct lock_class_key *key);
+int __init_srcu_struct_fast_updown(struct srcu_struct *ssp, const char *name,
+ struct lock_class_key *key);
+#endif // #ifndef CONFIG_TINY_SRCU
#define init_srcu_struct(ssp) \
({ \
@@ -35,35 +39,113 @@ int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
__init_srcu_struct((ssp), #ssp, &__srcu_key); \
})
+#define init_srcu_struct_fast(ssp) \
+({ \
+ static struct lock_class_key __srcu_key; \
+ \
+ __init_srcu_struct_fast((ssp), #ssp, &__srcu_key); \
+})
+
+#define init_srcu_struct_fast_updown(ssp) \
+({ \
+ static struct lock_class_key __srcu_key; \
+ \
+ __init_srcu_struct_fast_updown((ssp), #ssp, &__srcu_key); \
+})
+
#define __SRCU_DEP_MAP_INIT(srcu_name) .dep_map = { .name = #srcu_name },
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
int init_srcu_struct(struct srcu_struct *ssp);
+#ifndef CONFIG_TINY_SRCU
+int init_srcu_struct_fast(struct srcu_struct *ssp);
+int init_srcu_struct_fast_updown(struct srcu_struct *ssp);
+#endif // #ifndef CONFIG_TINY_SRCU
#define __SRCU_DEP_MAP_INIT(srcu_name)
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+/* Values for SRCU Tree srcu_data ->srcu_reader_flavor, but also used by rcutorture. */
+#define SRCU_READ_FLAVOR_NORMAL 0x1 // srcu_read_lock().
+#define SRCU_READ_FLAVOR_NMI 0x2 // srcu_read_lock_nmisafe().
+// 0x4 // SRCU-lite is no longer with us.
+#define SRCU_READ_FLAVOR_FAST 0x4 // srcu_read_lock_fast().
+#define SRCU_READ_FLAVOR_FAST_UPDOWN 0x8 // srcu_read_lock_fast().
+#define SRCU_READ_FLAVOR_ALL (SRCU_READ_FLAVOR_NORMAL | SRCU_READ_FLAVOR_NMI | \
+ SRCU_READ_FLAVOR_FAST | SRCU_READ_FLAVOR_FAST_UPDOWN)
+ // All of the above.
+#define SRCU_READ_FLAVOR_SLOWGP (SRCU_READ_FLAVOR_FAST | SRCU_READ_FLAVOR_FAST_UPDOWN)
+ // Flavors requiring synchronize_rcu()
+ // instead of smp_mb().
+void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp);
+
#ifdef CONFIG_TINY_SRCU
#include <linux/srcutiny.h>
#elif defined(CONFIG_TREE_SRCU)
#include <linux/srcutree.h>
-#elif defined(CONFIG_SRCU)
-#error "Unknown SRCU implementation specified to kernel configuration"
#else
-/* Dummy definition for things like notifiers. Actual use gets link error. */
-struct srcu_struct { };
+#error "Unknown SRCU implementation specified to kernel configuration"
#endif
void call_srcu(struct srcu_struct *ssp, struct rcu_head *head,
void (*func)(struct rcu_head *head));
void cleanup_srcu_struct(struct srcu_struct *ssp);
-int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp);
-void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp);
void synchronize_srcu(struct srcu_struct *ssp);
+
+#define SRCU_GET_STATE_COMPLETED 0x1
+
+/**
+ * get_completed_synchronize_srcu - Return a pre-completed polled state cookie
+ *
+ * Returns a value that poll_state_synchronize_srcu() will always treat
+ * as a cookie whose grace period has already completed.
+ */
+static inline unsigned long get_completed_synchronize_srcu(void)
+{
+ return SRCU_GET_STATE_COMPLETED;
+}
+
unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp);
unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp);
bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie);
+// Maximum number of unsigned long values corresponding to
+// not-yet-completed SRCU grace periods.
+#define NUM_ACTIVE_SRCU_POLL_OLDSTATE 2
+
+/**
+ * same_state_synchronize_srcu - Are two old-state values identical?
+ * @oldstate1: First old-state value.
+ * @oldstate2: Second old-state value.
+ *
+ * The two old-state values must have been obtained from either
+ * get_state_synchronize_srcu(), start_poll_synchronize_srcu(), or
+ * get_completed_synchronize_srcu(). Returns @true if the two values are
+ * identical and @false otherwise. This allows structures whose lifetimes
+ * are tracked by old-state values to push these values to a list header,
+ * allowing those structures to be slightly smaller.
+ */
+static inline bool same_state_synchronize_srcu(unsigned long oldstate1, unsigned long oldstate2)
+{
+ return oldstate1 == oldstate2;
+}
+
+#ifdef CONFIG_NEED_SRCU_NMI_SAFE
+int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp);
+void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) __releases(ssp);
+#else
+static inline int __srcu_read_lock_nmisafe(struct srcu_struct *ssp)
+{
+ return __srcu_read_lock(ssp);
+}
+static inline void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
+{
+ __srcu_read_unlock(ssp, idx);
+}
+#endif /* CONFIG_NEED_SRCU_NMI_SAFE */
+
+void srcu_init(void);
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/**
@@ -89,6 +171,32 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
return lock_is_held(&ssp->dep_map);
}
+/*
+ * Annotations provide deadlock detection for SRCU.
+ *
+ * Similar to other lockdep annotations, except there is an additional
+ * srcu_lock_sync(), which is basically an empty *write*-side critical section,
+ * see lock_sync() for more information.
+ */
+
+/* Annotates a srcu_read_lock() */
+static inline void srcu_lock_acquire(struct lockdep_map *map)
+{
+ lock_map_acquire_read(map);
+}
+
+/* Annotates a srcu_read_lock() */
+static inline void srcu_lock_release(struct lockdep_map *map)
+{
+ lock_map_release(map);
+}
+
+/* Annotates a synchronize_srcu() */
+static inline void srcu_lock_sync(struct lockdep_map *map)
+{
+ lock_map_sync(map);
+}
+
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
@@ -96,8 +204,13 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
return 1;
}
+#define srcu_lock_acquire(m) do { } while (0)
+#define srcu_lock_release(m) do { } while (0)
+#define srcu_lock_sync(m) do { } while (0)
+
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+
/**
* srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing
* @p: the pointer to fetch and protect for later dereferencing
@@ -111,7 +224,8 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
* lockdep_is_held() calls.
*/
#define srcu_dereference_check(p, ssp, c) \
- __rcu_dereference_check((p), (c) || srcu_read_lock_held(ssp), __rcu)
+ __rcu_dereference_check((p), __UNIQUE_ID(rcu), \
+ (c) || srcu_read_lock_held(ssp), __rcu)
/**
* srcu_dereference - fetch SRCU-protected pointer for later dereferencing
@@ -145,17 +259,154 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
* a mutex that is held elsewhere while calling synchronize_srcu() or
* synchronize_srcu_expedited().
*
- * Note that srcu_read_lock() and the matching srcu_read_unlock() must
- * occur in the same context, for example, it is illegal to invoke
- * srcu_read_unlock() in an irq handler if the matching srcu_read_lock()
- * was invoked in process context.
+ * The return value from srcu_read_lock() is guaranteed to be
+ * non-negative. This value must be passed unaltered to the matching
+ * srcu_read_unlock(). Note that srcu_read_lock() and the matching
+ * srcu_read_unlock() must occur in the same context, for example, it is
+ * illegal to invoke srcu_read_unlock() in an irq handler if the matching
+ * srcu_read_lock() was invoked in process context. Or, for that matter to
+ * invoke srcu_read_unlock() from one task and the matching srcu_read_lock()
+ * from another.
*/
static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp)
{
int retval;
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
retval = __srcu_read_lock(ssp);
- rcu_lock_acquire(&(ssp)->dep_map);
+ srcu_lock_acquire(&ssp->dep_map);
+ return retval;
+}
+
+/**
+ * srcu_read_lock_fast - register a new reader for an SRCU-protected structure.
+ * @ssp: srcu_struct in which to register the new reader.
+ *
+ * Enter an SRCU read-side critical section, but for a light-weight
+ * smp_mb()-free reader. See srcu_read_lock() for more information. This
+ * function is NMI-safe, in a manner similar to srcu_read_lock_nmisafe().
+ *
+ * For srcu_read_lock_fast() to be used on an srcu_struct structure,
+ * that structure must have been defined using either DEFINE_SRCU_FAST()
+ * or DEFINE_STATIC_SRCU_FAST() on the one hand or initialized with
+ * init_srcu_struct_fast() on the other. Such an srcu_struct structure
+ * cannot be passed to any non-fast variant of srcu_read_{,un}lock() or
+ * srcu_{down,up}_read(). In kernels built with CONFIG_PROVE_RCU=y,
+ * __srcu_check_read_flavor() will complain bitterly if you ignore this
+ * restriction.
+ *
+ * Grace-period auto-expediting is disabled for SRCU-fast srcu_struct
+ * structures because SRCU-fast expedited grace periods invoke
+ * synchronize_rcu_expedited(), IPIs and all. If you need expedited
+ * SRCU-fast grace periods, use synchronize_srcu_expedited().
+ *
+ * The srcu_read_lock_fast() function can be invoked only from those
+ * contexts where RCU is watching, that is, from contexts where it would
+ * be legal to invoke rcu_read_lock(). Otherwise, lockdep will complain.
+ */
+static inline struct srcu_ctr __percpu *srcu_read_lock_fast(struct srcu_struct *ssp) __acquires(ssp)
+{
+ struct srcu_ctr __percpu *retval;
+
+ RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_read_lock_fast().");
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST);
+ retval = __srcu_read_lock_fast(ssp);
+ rcu_try_lock_acquire(&ssp->dep_map);
+ return retval;
+}
+
+/**
+ * srcu_read_lock_fast_updown - register a new reader for an SRCU-fast-updown structure.
+ * @ssp: srcu_struct in which to register the new reader.
+ *
+ * Enter an SRCU read-side critical section, but for a light-weight
+ * smp_mb()-free reader. See srcu_read_lock() for more information.
+ * This function is compatible with srcu_down_read_fast(), but is not
+ * NMI-safe.
+ *
+ * For srcu_read_lock_fast_updown() to be used on an srcu_struct
+ * structure, that structure must have been defined using either
+ * DEFINE_SRCU_FAST_UPDOWN() or DEFINE_STATIC_SRCU_FAST_UPDOWN() on the one
+ * hand or initialized with init_srcu_struct_fast_updown() on the other.
+ * Such an srcu_struct structure cannot be passed to any non-fast-updown
+ * variant of srcu_read_{,un}lock() or srcu_{down,up}_read(). In kernels
+ * built with CONFIG_PROVE_RCU=y, __srcu_check_read_flavor() will complain
+ * bitterly if you ignore this * restriction.
+ *
+ * Grace-period auto-expediting is disabled for SRCU-fast-updown
+ * srcu_struct structures because SRCU-fast-updown expedited grace periods
+ * invoke synchronize_rcu_expedited(), IPIs and all. If you need expedited
+ * SRCU-fast-updown grace periods, use synchronize_srcu_expedited().
+ *
+ * The srcu_read_lock_fast_updown() function can be invoked only from
+ * those contexts where RCU is watching, that is, from contexts where
+ * it would be legal to invoke rcu_read_lock(). Otherwise, lockdep will
+ * complain.
+ */
+static inline struct srcu_ctr __percpu *srcu_read_lock_fast_updown(struct srcu_struct *ssp)
+__acquires(ssp)
+{
+ struct srcu_ctr __percpu *retval;
+
+ RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_read_lock_fast_updown().");
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST_UPDOWN);
+ retval = __srcu_read_lock_fast_updown(ssp);
+ rcu_try_lock_acquire(&ssp->dep_map);
+ return retval;
+}
+
+/*
+ * Used by tracing, cannot be traced and cannot call lockdep.
+ * See srcu_read_lock_fast() for more information.
+ */
+static inline struct srcu_ctr __percpu *srcu_read_lock_fast_notrace(struct srcu_struct *ssp)
+ __acquires(ssp)
+{
+ struct srcu_ctr __percpu *retval;
+
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST);
+ retval = __srcu_read_lock_fast(ssp);
+ return retval;
+}
+
+/**
+ * srcu_down_read_fast - register a new reader for an SRCU-protected structure.
+ * @ssp: srcu_struct in which to register the new reader.
+ *
+ * Enter a semaphore-like SRCU read-side critical section, but for
+ * a light-weight smp_mb()-free reader. See srcu_read_lock_fast() and
+ * srcu_down_read() for more information.
+ *
+ * The same srcu_struct may be used concurrently by srcu_down_read_fast()
+ * and srcu_read_lock_fast(). However, the same definition/initialization
+ * requirements called out for srcu_read_lock_safe() apply.
+ */
+static inline struct srcu_ctr __percpu *srcu_down_read_fast(struct srcu_struct *ssp) __acquires(ssp)
+{
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && in_nmi());
+ RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_down_read_fast().");
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST_UPDOWN);
+ return __srcu_read_lock_fast_updown(ssp);
+}
+
+/**
+ * srcu_read_lock_nmisafe - register a new reader for an SRCU-protected structure.
+ * @ssp: srcu_struct in which to register the new reader.
+ *
+ * Enter an SRCU read-side critical section, but in an NMI-safe manner.
+ * See srcu_read_lock() for more information.
+ *
+ * If srcu_read_lock_nmisafe() is ever used on an srcu_struct structure,
+ * then none of the other flavors may be used, whether before, during,
+ * or after.
+ */
+static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp)
+{
+ int retval;
+
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NMI);
+ retval = __srcu_read_lock_nmisafe(ssp);
+ rcu_try_lock_acquire(&ssp->dep_map);
return retval;
}
@@ -165,11 +416,41 @@ srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp)
{
int retval;
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
retval = __srcu_read_lock(ssp);
return retval;
}
/**
+ * srcu_down_read - register a new reader for an SRCU-protected structure.
+ * @ssp: srcu_struct in which to register the new reader.
+ *
+ * Enter a semaphore-like SRCU read-side critical section. Note that
+ * SRCU read-side critical sections may be nested. However, it is
+ * illegal to call anything that waits on an SRCU grace period for the
+ * same srcu_struct, whether directly or indirectly. Please note that
+ * one way to indirectly wait on an SRCU grace period is to acquire
+ * a mutex that is held elsewhere while calling synchronize_srcu() or
+ * synchronize_srcu_expedited(). But if you want lockdep to help you
+ * keep this stuff straight, you should instead use srcu_read_lock().
+ *
+ * The semaphore-like nature of srcu_down_read() means that the matching
+ * srcu_up_read() can be invoked from some other context, for example,
+ * from some other task or from an irq handler. However, neither
+ * srcu_down_read() nor srcu_up_read() may be invoked from an NMI handler.
+ *
+ * Calls to srcu_down_read() may be nested, similar to the manner in
+ * which calls to down_read() may be nested. The same srcu_struct may be
+ * used concurrently by srcu_down_read() and srcu_read_lock().
+ */
+static inline int srcu_down_read(struct srcu_struct *ssp) __acquires(ssp)
+{
+ WARN_ON_ONCE(in_nmi());
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
+ return __srcu_read_lock(ssp);
+}
+
+/**
* srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
* @ssp: srcu_struct in which to unregister the old reader.
* @idx: return value from corresponding srcu_read_lock().
@@ -180,14 +461,110 @@ static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx)
__releases(ssp)
{
WARN_ON_ONCE(idx & ~0x1);
- rcu_lock_release(&(ssp)->dep_map);
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
+ srcu_lock_release(&ssp->dep_map);
__srcu_read_unlock(ssp, idx);
}
+/**
+ * srcu_read_unlock_fast - unregister a old reader from an SRCU-protected structure.
+ * @ssp: srcu_struct in which to unregister the old reader.
+ * @scp: return value from corresponding srcu_read_lock_fast().
+ *
+ * Exit a light-weight SRCU read-side critical section.
+ */
+static inline void srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
+ __releases(ssp)
+{
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST);
+ srcu_lock_release(&ssp->dep_map);
+ __srcu_read_unlock_fast(ssp, scp);
+ RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_read_unlock_fast().");
+}
+
+/**
+ * srcu_read_unlock_fast_updown - unregister a old reader from an SRCU-fast-updown structure.
+ * @ssp: srcu_struct in which to unregister the old reader.
+ * @scp: return value from corresponding srcu_read_lock_fast_updown().
+ *
+ * Exit an SRCU-fast-updown read-side critical section.
+ */
+static inline void
+srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp) __releases(ssp)
+{
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST_UPDOWN);
+ srcu_lock_release(&ssp->dep_map);
+ __srcu_read_unlock_fast_updown(ssp, scp);
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "RCU must be watching srcu_read_unlock_fast_updown().");
+}
+
+/*
+ * Used by tracing, cannot be traced and cannot call lockdep.
+ * See srcu_read_unlock_fast() for more information.
+ */
+static inline void srcu_read_unlock_fast_notrace(struct srcu_struct *ssp,
+ struct srcu_ctr __percpu *scp) __releases(ssp)
+{
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST);
+ __srcu_read_unlock_fast(ssp, scp);
+}
+
+/**
+ * srcu_up_read_fast - unregister a old reader from an SRCU-protected structure.
+ * @ssp: srcu_struct in which to unregister the old reader.
+ * @scp: return value from corresponding srcu_read_lock_fast().
+ *
+ * Exit an SRCU read-side critical section, but not necessarily from
+ * the same context as the maching srcu_down_read_fast().
+ */
+static inline void srcu_up_read_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
+ __releases(ssp)
+{
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) && in_nmi());
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_FAST_UPDOWN);
+ __srcu_read_unlock_fast_updown(ssp, scp);
+ RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_up_read_fast_updown().");
+}
+
+/**
+ * srcu_read_unlock_nmisafe - unregister a old reader from an SRCU-protected structure.
+ * @ssp: srcu_struct in which to unregister the old reader.
+ * @idx: return value from corresponding srcu_read_lock_nmisafe().
+ *
+ * Exit an SRCU read-side critical section, but in an NMI-safe manner.
+ */
+static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
+ __releases(ssp)
+{
+ WARN_ON_ONCE(idx & ~0x1);
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NMI);
+ rcu_lock_release(&ssp->dep_map);
+ __srcu_read_unlock_nmisafe(ssp, idx);
+}
+
/* Used by tracing, cannot be traced and cannot call lockdep. */
static inline notrace void
srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp)
{
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
+ __srcu_read_unlock(ssp, idx);
+}
+
+/**
+ * srcu_up_read - unregister a old reader from an SRCU-protected structure.
+ * @ssp: srcu_struct in which to unregister the old reader.
+ * @idx: return value from corresponding srcu_read_lock().
+ *
+ * Exit an SRCU read-side critical section, but not necessarily from
+ * the same context as the maching srcu_down_read().
+ */
+static inline void srcu_up_read(struct srcu_struct *ssp, int idx)
+ __releases(ssp)
+{
+ WARN_ON_ONCE(idx & ~0x1);
+ WARN_ON_ONCE(in_nmi());
+ srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL);
__srcu_read_unlock(ssp, idx);
}
@@ -205,4 +582,33 @@ static inline void smp_mb__after_srcu_read_unlock(void)
/* __srcu_read_unlock has smp_mb() internally so nothing to do here. */
}
+/**
+ * smp_mb__after_srcu_read_lock - ensure full ordering after srcu_read_lock
+ *
+ * Converts the preceding srcu_read_lock into a two-way memory barrier.
+ *
+ * Call this after srcu_read_lock, to guarantee that all memory operations
+ * that occur after smp_mb__after_srcu_read_lock will appear to happen after
+ * the preceding srcu_read_lock.
+ */
+static inline void smp_mb__after_srcu_read_lock(void)
+{
+ /* __srcu_read_lock has smp_mb() internally so nothing to do here. */
+}
+
+DEFINE_LOCK_GUARD_1(srcu, struct srcu_struct,
+ _T->idx = srcu_read_lock(_T->lock),
+ srcu_read_unlock(_T->lock, _T->idx),
+ int idx)
+
+DEFINE_LOCK_GUARD_1(srcu_fast, struct srcu_struct,
+ _T->scp = srcu_read_lock_fast(_T->lock),
+ srcu_read_unlock_fast(_T->lock, _T->scp),
+ struct srcu_ctr __percpu *scp)
+
+DEFINE_LOCK_GUARD_1(srcu_fast_notrace, struct srcu_struct,
+ _T->scp = srcu_read_lock_fast_notrace(_T->lock),
+ srcu_read_unlock_fast_notrace(_T->lock, _T->scp),
+ struct srcu_ctr __percpu *scp)
+
#endif
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
index 0e0cf4d6a72a..e0698024667a 100644
--- a/include/linux/srcutiny.h
+++ b/include/linux/srcutiny.h
@@ -15,10 +15,10 @@
struct srcu_struct {
short srcu_lock_nesting[2]; /* srcu_read_lock() nesting depth. */
- unsigned short srcu_idx; /* Current reader array element in bit 0x2. */
- unsigned short srcu_idx_max; /* Furthest future srcu_idx request. */
u8 srcu_gp_running; /* GP workqueue running? */
u8 srcu_gp_waiting; /* GP waiting for readers? */
+ unsigned long srcu_idx; /* Current reader array element in bit 0x2. */
+ unsigned long srcu_idx_max; /* Furthest future srcu_idx request. */
struct swait_queue_head srcu_wq;
/* Last srcu_read_unlock() wakes GP. */
struct rcu_head *srcu_cb_head; /* Pending callbacks: Head. */
@@ -31,7 +31,7 @@ struct srcu_struct {
void srcu_drive_gp(struct work_struct *wp);
-#define __SRCU_STRUCT_INIT(name, __ignored) \
+#define __SRCU_STRUCT_INIT(name, __ignored, ___ignored, ____ignored) \
{ \
.srcu_wq = __SWAIT_QUEUE_HEAD_INITIALIZER(name.srcu_wq), \
.srcu_cb_tail = &name.srcu_cb_head, \
@@ -44,9 +44,25 @@ void srcu_drive_gp(struct work_struct *wp);
* Tree SRCU, which needs some per-CPU data.
*/
#define DEFINE_SRCU(name) \
- struct srcu_struct name = __SRCU_STRUCT_INIT(name, name)
+ struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name, name)
#define DEFINE_STATIC_SRCU(name) \
- static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name)
+ static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name, name)
+#define DEFINE_SRCU_FAST(name) DEFINE_SRCU(name)
+#define DEFINE_STATIC_SRCU_FAST(name) \
+ static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name, name)
+#define DEFINE_SRCU_FAST_UPDOWN(name) DEFINE_SRCU(name)
+#define DEFINE_STATIC_SRCU_FAST_UPDOWN(name) \
+ static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name, name)
+
+// Dummy structure for srcu_notifier_head.
+struct srcu_usage { };
+#define __SRCU_USAGE_INIT(name) { }
+#define __init_srcu_struct_fast __init_srcu_struct
+#define __init_srcu_struct_fast_updown __init_srcu_struct
+#ifndef CONFIG_DEBUG_LOCK_ALLOC
+#define init_srcu_struct_fast init_srcu_struct
+#define init_srcu_struct_fast_updown init_srcu_struct
+#endif // #ifndef CONFIG_DEBUG_LOCK_ALLOC
void synchronize_srcu(struct srcu_struct *ssp);
@@ -60,11 +76,46 @@ static inline int __srcu_read_lock(struct srcu_struct *ssp)
{
int idx;
+ preempt_disable(); // Needed for PREEMPT_LAZY
idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1;
- WRITE_ONCE(ssp->srcu_lock_nesting[idx], ssp->srcu_lock_nesting[idx] + 1);
+ WRITE_ONCE(ssp->srcu_lock_nesting[idx], READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1);
+ preempt_enable();
return idx;
}
+struct srcu_ctr;
+
+static inline bool __srcu_ptr_to_ctr(struct srcu_struct *ssp, struct srcu_ctr __percpu *scpp)
+{
+ return (int)(intptr_t)(struct srcu_ctr __force __kernel *)scpp;
+}
+
+static inline struct srcu_ctr __percpu *__srcu_ctr_to_ptr(struct srcu_struct *ssp, int idx)
+{
+ return (struct srcu_ctr __percpu *)(intptr_t)idx;
+}
+
+static inline struct srcu_ctr __percpu *__srcu_read_lock_fast(struct srcu_struct *ssp)
+{
+ return __srcu_ctr_to_ptr(ssp, __srcu_read_lock(ssp));
+}
+
+static inline void __srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
+{
+ __srcu_read_unlock(ssp, __srcu_ptr_to_ctr(ssp, scp));
+}
+
+static inline struct srcu_ctr __percpu *__srcu_read_lock_fast_updown(struct srcu_struct *ssp)
+{
+ return __srcu_ctr_to_ptr(ssp, __srcu_read_lock(ssp));
+}
+
+static inline
+void __srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
+{
+ __srcu_read_unlock(ssp, __srcu_ptr_to_ctr(ssp, scp));
+}
+
static inline void synchronize_srcu_expedited(struct srcu_struct *ssp)
{
synchronize_srcu(ssp);
@@ -75,17 +126,22 @@ static inline void srcu_barrier(struct srcu_struct *ssp)
synchronize_srcu(ssp);
}
+static inline void srcu_expedite_current(struct srcu_struct *ssp) { }
+#define srcu_check_read_flavor(ssp, read_flavor) do { } while (0)
+
/* Defined here to avoid size increase for non-torture kernels. */
static inline void srcu_torture_stats_print(struct srcu_struct *ssp,
char *tt, char *tf)
{
int idx;
- idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1;
- pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n",
+ idx = ((data_race(READ_ONCE(ssp->srcu_idx)) + 1) & 0x2) >> 1;
+ pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd) gp: %lu->%lu\n",
tt, tf, idx,
- READ_ONCE(ssp->srcu_lock_nesting[!idx]),
- READ_ONCE(ssp->srcu_lock_nesting[idx]));
+ data_race(READ_ONCE(ssp->srcu_lock_nesting[!idx])),
+ data_race(READ_ONCE(ssp->srcu_lock_nesting[idx])),
+ data_race(READ_ONCE(ssp->srcu_idx)),
+ data_race(READ_ONCE(ssp->srcu_idx_max)));
}
#endif
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index 9cfcc8a756ae..d6f978b50472 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -17,14 +17,21 @@
struct srcu_node;
struct srcu_struct;
+/* One element of the srcu_data srcu_ctrs array. */
+struct srcu_ctr {
+ atomic_long_t srcu_locks; /* Locks per CPU. */
+ atomic_long_t srcu_unlocks; /* Unlocks per CPU. */
+};
+
/*
* Per-CPU structure feeding into leaf srcu_node, similar in function
* to rcu_node.
*/
struct srcu_data {
/* Read-side state. */
- unsigned long srcu_lock_count[2]; /* Locks per CPU. */
- unsigned long srcu_unlock_count[2]; /* Unlocks per CPU. */
+ struct srcu_ctr srcu_ctrs[2]; /* Locks and unlocks per CPU. */
+ int srcu_reader_flavor; /* Reader flavor for srcu_struct structure? */
+ /* Values: SRCU_READ_FLAVOR_.* */
/* Update-side state. */
spinlock_t __private lock ____cacheline_internodealigned_in_smp;
@@ -35,6 +42,8 @@ struct srcu_data {
struct timer_list delay_work; /* Delay for CB invoking */
struct work_struct work; /* Context for CB invoking. */
struct rcu_head srcu_barrier_head; /* For srcu_barrier() use. */
+ struct rcu_head srcu_ec_head; /* For srcu_expedite_current() use. */
+ int srcu_ec_state; /* State for srcu_expedite_current(). */
struct srcu_node *mynode; /* Leaf srcu_node. */
unsigned long grpmask; /* Mask for leaf srcu_node */
/* ->srcu_data_have_cbs[]. */
@@ -47,11 +56,9 @@ struct srcu_data {
*/
struct srcu_node {
spinlock_t __private lock;
- unsigned long srcu_have_cbs[4]; /* GP seq for children */
- /* having CBs, but only */
- /* is > ->srcu_gq_seq. */
- unsigned long srcu_data_have_cbs[4]; /* Which srcu_data structs */
- /* have CBs for given GP? */
+ unsigned long srcu_have_cbs[4]; /* GP seq for children having CBs, but only */
+ /* if greater than ->srcu_gp_seq. */
+ unsigned long srcu_data_have_cbs[4]; /* Which srcu_data structs have CBs for given GP? */
unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
struct srcu_node *srcu_parent; /* Next up in tree. */
int grplo; /* Least CPU for node. */
@@ -59,21 +66,25 @@ struct srcu_node {
};
/*
- * Per-SRCU-domain structure, similar in function to rcu_state.
+ * Per-SRCU-domain structure, update-side data linked from srcu_struct.
*/
-struct srcu_struct {
- struct srcu_node node[NUM_RCU_NODES]; /* Combining tree. */
+struct srcu_usage {
+ struct srcu_node *node; /* Combining tree. */
struct srcu_node *level[RCU_NUM_LVLS + 1];
/* First node at each level. */
+ int srcu_size_state; /* Small-to-big transition state. */
struct mutex srcu_cb_mutex; /* Serialize CB preparation. */
- spinlock_t __private lock; /* Protect counters */
+ spinlock_t __private lock; /* Protect counters and size state. */
struct mutex srcu_gp_mutex; /* Serialize GP work. */
- unsigned int srcu_idx; /* Current rdr array element. */
unsigned long srcu_gp_seq; /* Grace-period seq #. */
unsigned long srcu_gp_seq_needed; /* Latest gp_seq needed. */
unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
+ unsigned long srcu_gp_start; /* Last GP start timestamp (jiffies) */
unsigned long srcu_last_gp_end; /* Last GP end timestamp (ns) */
- struct srcu_data __percpu *sda; /* Per-CPU srcu_data array. */
+ unsigned long srcu_size_jiffies; /* Current contention-measurement interval. */
+ unsigned long srcu_n_lock_retries; /* Contention events in current interval. */
+ unsigned long srcu_n_exp_nodelay; /* # expedited no-delays in current GP phase. */
+ bool sda_is_static; /* May ->sda be passed to free_percpu()? */
unsigned long srcu_barrier_seq; /* srcu_barrier seq #. */
struct mutex srcu_barrier_mutex; /* Serialize barrier ops. */
struct completion srcu_barrier_completion;
@@ -81,24 +92,92 @@ struct srcu_struct {
atomic_t srcu_barrier_cpu_cnt; /* # CPUs not yet posting a */
/* callback for the barrier */
/* operation. */
+ unsigned long reschedule_jiffies;
+ unsigned long reschedule_count;
struct delayed_work work;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct srcu_struct *srcu_ssp;
+};
+
+/*
+ * Per-SRCU-domain structure, similar in function to rcu_state.
+ */
+struct srcu_struct {
+ struct srcu_ctr __percpu *srcu_ctrp;
+ struct srcu_data __percpu *sda; /* Per-CPU srcu_data array. */
+ u8 srcu_reader_flavor;
struct lockdep_map dep_map;
-#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+ struct srcu_usage *srcu_sup; /* Update-side data. */
};
+// Values for size state variable (->srcu_size_state). Once the state
+// has been set to SRCU_SIZE_ALLOC, the grace-period code advances through
+// this state machine one step per grace period until the SRCU_SIZE_BIG state
+// is reached. Otherwise, the state machine remains in the SRCU_SIZE_SMALL
+// state indefinitely.
+#define SRCU_SIZE_SMALL 0 // No srcu_node combining tree, ->node == NULL
+#define SRCU_SIZE_ALLOC 1 // An srcu_node tree is being allocated, initialized,
+ // and then referenced by ->node. It will not be used.
+#define SRCU_SIZE_WAIT_BARRIER 2 // The srcu_node tree starts being used by everything
+ // except call_srcu(), especially by srcu_barrier().
+ // By the end of this state, all CPUs and threads
+ // are aware of this tree's existence.
+#define SRCU_SIZE_WAIT_CALL 3 // The srcu_node tree starts being used by call_srcu().
+ // By the end of this state, all of the call_srcu()
+ // invocations that were running on a non-boot CPU
+ // and using the boot CPU's callback queue will have
+ // completed.
+#define SRCU_SIZE_WAIT_CBS1 4 // Don't trust the ->srcu_have_cbs[] grace-period
+#define SRCU_SIZE_WAIT_CBS2 5 // sequence elements or the ->srcu_data_have_cbs[]
+#define SRCU_SIZE_WAIT_CBS3 6 // CPU-bitmask elements until all four elements of
+#define SRCU_SIZE_WAIT_CBS4 7 // each array have been initialized.
+#define SRCU_SIZE_BIG 8 // The srcu_node combining tree is fully initialized
+ // and all aspects of it are being put to use.
+
/* Values for state variable (bottom bits of ->srcu_gp_seq). */
#define SRCU_STATE_IDLE 0
#define SRCU_STATE_SCAN1 1
#define SRCU_STATE_SCAN2 2
-#define __SRCU_STRUCT_INIT(name, pcpu_name) \
-{ \
- .sda = &pcpu_name, \
- .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
- .srcu_gp_seq_needed = -1UL, \
- .work = __DELAYED_WORK_INITIALIZER(name.work, NULL, 0), \
- __SRCU_DEP_MAP_INIT(name) \
+/* Values for srcu_expedite_current() state (->srcu_ec_state). */
+#define SRCU_EC_IDLE 0
+#define SRCU_EC_PENDING 1
+#define SRCU_EC_REPOST 2
+
+/*
+ * Values for initializing gp sequence fields. Higher values allow wrap arounds to
+ * occur earlier.
+ * The second value with state is useful in the case of static initialization of
+ * srcu_usage where srcu_gp_seq_needed is expected to have some state value in its
+ * lower bits (or else it will appear to be already initialized within
+ * the call check_init_srcu_struct()).
+ */
+#define SRCU_GP_SEQ_INITIAL_VAL ((0UL - 100UL) << RCU_SEQ_CTR_SHIFT)
+#define SRCU_GP_SEQ_INITIAL_VAL_WITH_STATE (SRCU_GP_SEQ_INITIAL_VAL - 1)
+
+#define __SRCU_USAGE_INIT(name) \
+{ \
+ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
+ .srcu_gp_seq = SRCU_GP_SEQ_INITIAL_VAL, \
+ .srcu_gp_seq_needed = SRCU_GP_SEQ_INITIAL_VAL_WITH_STATE, \
+ .srcu_gp_seq_needed_exp = SRCU_GP_SEQ_INITIAL_VAL, \
+ .work = __DELAYED_WORK_INITIALIZER(name.work, NULL, 0), \
+}
+
+#define __SRCU_STRUCT_INIT_COMMON(name, usage_name, fast) \
+ .srcu_sup = &usage_name, \
+ .srcu_reader_flavor = fast, \
+ __SRCU_DEP_MAP_INIT(name)
+
+#define __SRCU_STRUCT_INIT_MODULE(name, usage_name, fast) \
+{ \
+ __SRCU_STRUCT_INIT_COMMON(name, usage_name, fast) \
+}
+
+#define __SRCU_STRUCT_INIT(name, usage_name, pcpu_name, fast) \
+{ \
+ .sda = &pcpu_name, \
+ .srcu_ctrp = &pcpu_name.srcu_ctrs[0], \
+ __SRCU_STRUCT_INIT_COMMON(name, usage_name, fast) \
}
/*
@@ -119,23 +198,171 @@ struct srcu_struct {
* init_srcu_struct(&my_srcu);
*
* See include/linux/percpu-defs.h for the rules on per-CPU variables.
+ *
+ * DEFINE_SRCU_FAST() and DEFINE_STATIC_SRCU_FAST create an srcu_struct
+ * and associated structures whose readers must be of the SRCU-fast variety.
+ * DEFINE_SRCU_FAST_UPDOWN() and DEFINE_STATIC_SRCU_FAST_UPDOWN() create
+ * an srcu_struct and associated structures whose readers must be of the
+ * SRCU-fast-updown variety. The key point (aside from error checking) with
+ * both varieties is that the grace periods must use synchronize_rcu()
+ * instead of smp_mb(), and given that the first (for example)
+ * srcu_read_lock_fast() might race with the first synchronize_srcu(),
+ * this different must be specified at initialization time.
*/
#ifdef MODULE
-# define __DEFINE_SRCU(name, is_static) \
- is_static struct srcu_struct name; \
- struct srcu_struct * const __srcu_struct_##name \
+# define __DEFINE_SRCU(name, fast, is_static) \
+ static struct srcu_usage name##_srcu_usage = __SRCU_USAGE_INIT(name##_srcu_usage); \
+ is_static struct srcu_struct name = __SRCU_STRUCT_INIT_MODULE(name, name##_srcu_usage, \
+ fast); \
+ extern struct srcu_struct * const __srcu_struct_##name; \
+ struct srcu_struct * const __srcu_struct_##name \
__section("___srcu_struct_ptrs") = &name
#else
-# define __DEFINE_SRCU(name, is_static) \
- static DEFINE_PER_CPU(struct srcu_data, name##_srcu_data); \
- is_static struct srcu_struct name = \
- __SRCU_STRUCT_INIT(name, name##_srcu_data)
+# define __DEFINE_SRCU(name, fast, is_static) \
+ static DEFINE_PER_CPU(struct srcu_data, name##_srcu_data); \
+ static struct srcu_usage name##_srcu_usage = __SRCU_USAGE_INIT(name##_srcu_usage); \
+ is_static struct srcu_struct name = \
+ __SRCU_STRUCT_INIT(name, name##_srcu_usage, name##_srcu_data, fast)
#endif
-#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
-#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
+#define DEFINE_SRCU(name) __DEFINE_SRCU(name, 0, /* not static */)
+#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, 0, static)
+#define DEFINE_SRCU_FAST(name) __DEFINE_SRCU(name, SRCU_READ_FLAVOR_FAST, /* not static */)
+#define DEFINE_STATIC_SRCU_FAST(name) __DEFINE_SRCU(name, SRCU_READ_FLAVOR_FAST, static)
+#define DEFINE_SRCU_FAST_UPDOWN(name) __DEFINE_SRCU(name, SRCU_READ_FLAVOR_FAST_UPDOWN, \
+ /* not static */)
+#define DEFINE_STATIC_SRCU_FAST_UPDOWN(name) \
+ __DEFINE_SRCU(name, SRCU_READ_FLAVOR_FAST_UPDOWN, static)
+int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp);
void synchronize_srcu_expedited(struct srcu_struct *ssp);
void srcu_barrier(struct srcu_struct *ssp);
+void srcu_expedite_current(struct srcu_struct *ssp);
void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf);
+// Converts a per-CPU pointer to an ->srcu_ctrs[] array element to that
+// element's index.
+static inline bool __srcu_ptr_to_ctr(struct srcu_struct *ssp, struct srcu_ctr __percpu *scpp)
+{
+ return scpp - &ssp->sda->srcu_ctrs[0];
+}
+
+// Converts an integer to a per-CPU pointer to the corresponding
+// ->srcu_ctrs[] array element.
+static inline struct srcu_ctr __percpu *__srcu_ctr_to_ptr(struct srcu_struct *ssp, int idx)
+{
+ return &ssp->sda->srcu_ctrs[idx];
+}
+
+/*
+ * Counts the new reader in the appropriate per-CPU element of the
+ * srcu_struct. Returns a pointer that must be passed to the matching
+ * srcu_read_unlock_fast().
+ *
+ * Note that both this_cpu_inc() and atomic_long_inc() are RCU read-side
+ * critical sections either because they disables interrupts, because
+ * they are a single instruction, or because they are read-modify-write
+ * atomic operations, depending on the whims of the architecture.
+ * This matters because the SRCU-fast grace-period mechanism uses either
+ * synchronize_rcu() or synchronize_rcu_expedited(), that is, RCU,
+ * *not* SRCU, in order to eliminate the need for the read-side smp_mb()
+ * invocations that are used by srcu_read_lock() and srcu_read_unlock().
+ * The __srcu_read_unlock_fast() function also relies on this same RCU
+ * (again, *not* SRCU) trick to eliminate the need for smp_mb().
+ *
+ * The key point behind this RCU trick is that if any part of a given
+ * RCU reader precedes the beginning of a given RCU grace period, then
+ * the entirety of that RCU reader and everything preceding it happens
+ * before the end of that same RCU grace period. Similarly, if any part
+ * of a given RCU reader follows the end of a given RCU grace period,
+ * then the entirety of that RCU reader and everything following it
+ * happens after the beginning of that same RCU grace period. Therefore,
+ * the operations labeled Y in __srcu_read_lock_fast() and those labeled Z
+ * in __srcu_read_unlock_fast() are ordered against the corresponding SRCU
+ * read-side critical section from the viewpoint of the SRCU grace period.
+ * This is all the ordering that is required, hence no calls to smp_mb().
+ *
+ * This means that __srcu_read_lock_fast() is not all that fast
+ * on architectures that support NMIs but do not supply NMI-safe
+ * implementations of this_cpu_inc().
+ */
+static inline struct srcu_ctr __percpu notrace *__srcu_read_lock_fast(struct srcu_struct *ssp)
+{
+ struct srcu_ctr __percpu *scp = READ_ONCE(ssp->srcu_ctrp);
+
+ if (!IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE))
+ this_cpu_inc(scp->srcu_locks.counter); // Y, and implicit RCU reader.
+ else
+ atomic_long_inc(raw_cpu_ptr(&scp->srcu_locks)); // Y, and implicit RCU reader.
+ barrier(); /* Avoid leaking the critical section. */
+ return scp;
+}
+
+/*
+ * Removes the count for the old reader from the appropriate
+ * per-CPU element of the srcu_struct. Note that this may well be a
+ * different CPU than that which was incremented by the corresponding
+ * srcu_read_lock_fast(), but it must be within the same task.
+ *
+ * Please see the __srcu_read_lock_fast() function's header comment for
+ * information on implicit RCU readers and NMI safety.
+ */
+static inline void notrace
+__srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
+{
+ barrier(); /* Avoid leaking the critical section. */
+ if (!IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE))
+ this_cpu_inc(scp->srcu_unlocks.counter); // Z, and implicit RCU reader.
+ else
+ atomic_long_inc(raw_cpu_ptr(&scp->srcu_unlocks)); // Z, and implicit RCU reader.
+}
+
+/*
+ * Counts the new reader in the appropriate per-CPU element of the
+ * srcu_struct. Returns a pointer that must be passed to the matching
+ * srcu_read_unlock_fast_updown(). This type of reader is compatible
+ * with srcu_down_read_fast() and srcu_up_read_fast().
+ *
+ * See the __srcu_read_lock_fast() comment for more details.
+ */
+static inline
+struct srcu_ctr __percpu notrace *__srcu_read_lock_fast_updown(struct srcu_struct *ssp)
+{
+ struct srcu_ctr __percpu *scp = READ_ONCE(ssp->srcu_ctrp);
+
+ if (!IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE))
+ this_cpu_inc(scp->srcu_locks.counter); // Y, and implicit RCU reader.
+ else
+ atomic_long_inc(raw_cpu_ptr(&scp->srcu_locks)); // Y, and implicit RCU reader.
+ barrier(); /* Avoid leaking the critical section. */
+ return scp;
+}
+
+/*
+ * Removes the count for the old reader from the appropriate
+ * per-CPU element of the srcu_struct. Note that this may well be a
+ * different CPU than that which was incremented by the corresponding
+ * srcu_read_lock_fast(), but it must be within the same task.
+ *
+ * Please see the __srcu_read_lock_fast() function's header comment for
+ * information on implicit RCU readers and NMI safety.
+ */
+static inline void notrace
+__srcu_read_unlock_fast_updown(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp)
+{
+ barrier(); /* Avoid leaking the critical section. */
+ if (!IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE))
+ this_cpu_inc(scp->srcu_unlocks.counter); // Z, and implicit RCU reader.
+ else
+ atomic_long_inc(raw_cpu_ptr(&scp->srcu_unlocks)); // Z, and implicit RCU reader.
+}
+
+void __srcu_check_read_flavor(struct srcu_struct *ssp, int read_flavor);
+
+// Record SRCU-reader usage type only for CONFIG_PROVE_RCU=y kernels.
+static inline void srcu_check_read_flavor(struct srcu_struct *ssp, int read_flavor)
+{
+ if (IS_ENABLED(CONFIG_PROVE_RCU))
+ __srcu_check_read_flavor(ssp, read_flavor);
+}
+
#endif
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index 0d5a2691e7e9..e1fb11e0f12c 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -7,7 +7,7 @@
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/mod_devicetable.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
@@ -285,7 +285,7 @@ struct ssb_device {
/* Go from struct device to struct ssb_device. */
static inline
-struct ssb_device * dev_to_ssb_dev(struct device *dev)
+struct ssb_device * dev_to_ssb_dev(const struct device *dev)
{
struct __ssb_dev_wrapper *wrap;
wrap = container_of(dev, struct __ssb_dev_wrapper, dev);
@@ -325,7 +325,7 @@ struct ssb_driver {
struct device_driver drv;
};
-#define drv_to_ssb_drv(_drv) container_of(_drv, struct ssb_driver, drv)
+#define drv_to_ssb_drv(_drv) container_of_const(_drv, struct ssb_driver, drv)
extern int __ssb_driver_register(struct ssb_driver *drv, struct module *owner);
#define ssb_driver_register(drv) \
@@ -621,14 +621,6 @@ extern u32 ssb_dma_translation(struct ssb_device *dev);
#define SSB_DMA_TRANSLATION_MASK 0xC0000000
#define SSB_DMA_TRANSLATION_SHIFT 30
-static inline void __cold __ssb_dma_not_implemented(struct ssb_device *dev)
-{
-#ifdef CONFIG_SSB_DEBUG
- printk(KERN_ERR "SSB: BUG! Calling DMA API for "
- "unsupported bustype %d\n", dev->bus->bustype);
-#endif /* DEBUG */
-}
-
#ifdef CONFIG_SSB_PCIHOST
/* PCI-host wrapper driver */
extern int ssb_pcihost_register(struct pci_driver *driver);
diff --git a/include/linux/ssb/ssb_driver_extif.h b/include/linux/ssb/ssb_driver_extif.h
index 3f8bc973d67d..19253bfacd1a 100644
--- a/include/linux/ssb/ssb_driver_extif.h
+++ b/include/linux/ssb/ssb_driver_extif.h
@@ -197,7 +197,7 @@ struct ssb_extif {
static inline bool ssb_extif_available(struct ssb_extif *extif)
{
- return 0;
+ return false;
}
static inline
diff --git a/include/linux/ssb/ssb_driver_gige.h b/include/linux/ssb/ssb_driver_gige.h
index 15ba0df1ee0d..28c145a51e57 100644
--- a/include/linux/ssb/ssb_driver_gige.h
+++ b/include/linux/ssb/ssb_driver_gige.h
@@ -95,7 +95,7 @@ static inline bool ssb_gige_must_flush_posted_writes(struct pci_dev *pdev)
struct ssb_gige *dev = pdev_to_ssb_gige(pdev);
if (dev)
return (dev->dev->bus->chip_id == 0x4785);
- return 0;
+ return false;
}
/* Get the device MAC address */
diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
index 6bb4bc1a5f54..2cc21ffcdaf9 100644
--- a/include/linux/stackdepot.h
+++ b/include/linux/stackdepot.h
@@ -1,33 +1,257 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * A generic stack depot implementation
+ * Stack depot - a stack trace storage that avoids duplication.
+ *
+ * Stack depot is intended to be used by subsystems that need to store and
+ * later retrieve many potentially duplicated stack traces without wasting
+ * memory.
+ *
+ * For example, KASAN needs to save allocation and free stack traces for each
+ * object. Storing two stack traces per object requires a lot of memory (e.g.
+ * SLUB_DEBUG needs 256 bytes per object for that). Since allocation and free
+ * stack traces often repeat, using stack depot allows to save about 100x space.
*
* Author: Alexander Potapenko <glider@google.com>
* Copyright (C) 2016 Google, Inc.
*
- * Based on code by Dmitry Chernenkov.
+ * Based on the code by Dmitry Chernenkov.
*/
#ifndef _LINUX_STACKDEPOT_H
#define _LINUX_STACKDEPOT_H
+#include <linux/gfp.h>
+
typedef u32 depot_stack_handle_t;
-depot_stack_handle_t stack_depot_save(unsigned long *entries,
- unsigned int nr_entries, gfp_t gfp_flags);
+/*
+ * Number of bits in the handle that stack depot doesn't use. Users may store
+ * information in them via stack_depot_set/get_extra_bits.
+ */
+#define STACK_DEPOT_EXTRA_BITS 5
-unsigned int stack_depot_fetch(depot_stack_handle_t handle,
- unsigned long **entries);
+#define DEPOT_HANDLE_BITS (sizeof(depot_stack_handle_t) * 8)
-unsigned int filter_irq_stacks(unsigned long *entries, unsigned int nr_entries);
+#define DEPOT_POOL_ORDER 2 /* Pool size order, 4 pages */
+#define DEPOT_POOL_SIZE (1LL << (PAGE_SHIFT + DEPOT_POOL_ORDER))
+#define DEPOT_STACK_ALIGN 4
+#define DEPOT_OFFSET_BITS (DEPOT_POOL_ORDER + PAGE_SHIFT - DEPOT_STACK_ALIGN)
+#define DEPOT_POOL_INDEX_BITS (DEPOT_HANDLE_BITS - DEPOT_OFFSET_BITS - \
+ STACK_DEPOT_EXTRA_BITS)
+
+#ifdef CONFIG_STACKDEPOT
+/* Compact structure that stores a reference to a stack. */
+union handle_parts {
+ depot_stack_handle_t handle;
+ struct {
+ u32 pool_index_plus_1 : DEPOT_POOL_INDEX_BITS;
+ u32 offset : DEPOT_OFFSET_BITS;
+ u32 extra : STACK_DEPOT_EXTRA_BITS;
+ };
+};
+struct stack_record {
+ struct list_head hash_list; /* Links in the hash table */
+ u32 hash; /* Hash in hash table */
+ u32 size; /* Number of stored frames */
+ union handle_parts handle; /* Constant after initialization */
+ refcount_t count;
+ union {
+ unsigned long entries[CONFIG_STACKDEPOT_MAX_FRAMES]; /* Frames */
+ struct {
+ /*
+ * An important invariant of the implementation is to
+ * only place a stack record onto the freelist iff its
+ * refcount is zero. Because stack records with a zero
+ * refcount are never considered as valid, it is safe to
+ * union @entries and freelist management state below.
+ * Conversely, as soon as an entry is off the freelist
+ * and its refcount becomes non-zero, the below must not
+ * be accessed until being placed back on the freelist.
+ */
+ struct list_head free_list; /* Links in the freelist */
+ unsigned long rcu_state; /* RCU cookie */
+ };
+ };
+};
+#endif
+
+typedef u32 depot_flags_t;
+
+/*
+ * Flags that can be passed to stack_depot_save_flags(); see the comment next
+ * to its declaration for more details.
+ */
+#define STACK_DEPOT_FLAG_CAN_ALLOC ((depot_flags_t)0x0001)
+#define STACK_DEPOT_FLAG_GET ((depot_flags_t)0x0002)
+
+#define STACK_DEPOT_FLAGS_NUM 2
+#define STACK_DEPOT_FLAGS_MASK ((depot_flags_t)((1 << STACK_DEPOT_FLAGS_NUM) - 1))
+
+/*
+ * Using stack depot requires its initialization, which can be done in 3 ways:
+ *
+ * 1. Selecting CONFIG_STACKDEPOT_ALWAYS_INIT. This option is suitable in
+ * scenarios where it's known at compile time that stack depot will be used.
+ * Enabling this config makes the kernel initialize stack depot in mm_init().
+ *
+ * 2. Calling stack_depot_request_early_init() during early boot, before
+ * stack_depot_early_init() in mm_init() completes. For example, this can
+ * be done when evaluating kernel boot parameters.
+ *
+ * 3. Calling stack_depot_init(). Possible after boot is complete. This option
+ * is recommended for modules initialized later in the boot process, after
+ * mm_init() completes.
+ *
+ * stack_depot_init() and stack_depot_request_early_init() can be called
+ * regardless of whether CONFIG_STACKDEPOT is enabled and are no-op when this
+ * config is disabled. The save/fetch/print stack depot functions can only be
+ * called from the code that makes sure CONFIG_STACKDEPOT is enabled _and_
+ * initializes stack depot via one of the ways listed above.
+ */
#ifdef CONFIG_STACKDEPOT
int stack_depot_init(void);
+
+void __init stack_depot_request_early_init(void);
+
+/* Must be only called from mm_init(). */
+int __init stack_depot_early_init(void);
#else
-static inline int stack_depot_init(void)
-{
- return 0;
-}
-#endif /* CONFIG_STACKDEPOT */
+static inline int stack_depot_init(void) { return 0; }
+
+static inline void stack_depot_request_early_init(void) { }
+
+static inline int stack_depot_early_init(void) { return 0; }
+#endif
+
+/**
+ * stack_depot_save_flags - Save a stack trace to stack depot
+ *
+ * @entries: Pointer to the stack trace
+ * @nr_entries: Number of frames in the stack
+ * @alloc_flags: Allocation GFP flags
+ * @depot_flags: Stack depot flags
+ *
+ * Saves a stack trace from @entries array of size @nr_entries.
+ *
+ * If STACK_DEPOT_FLAG_CAN_ALLOC is set in @depot_flags, stack depot can
+ * replenish the stack pools in case no space is left (allocates using GFP
+ * flags of @alloc_flags). Otherwise, stack depot avoids any allocations and
+ * fails if no space is left to store the stack trace.
+ *
+ * If STACK_DEPOT_FLAG_GET is set in @depot_flags, stack depot will increment
+ * the refcount on the saved stack trace if it already exists in stack depot.
+ * Users of this flag must also call stack_depot_put() when keeping the stack
+ * trace is no longer required to avoid overflowing the refcount.
+ *
+ * If the provided stack trace comes from the interrupt context, only the part
+ * up to the interrupt entry is saved.
+ *
+ * Context: Any context, but unsetting STACK_DEPOT_FLAG_CAN_ALLOC is required if
+ * alloc_pages() cannot be used from the current context. Currently
+ * this is the case for contexts where neither %GFP_ATOMIC nor
+ * %GFP_NOWAIT can be used (NMI, raw_spin_lock).
+ *
+ * Return: Handle of the stack struct stored in depot, 0 on failure
+ */
+depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
+ unsigned int nr_entries,
+ gfp_t alloc_flags,
+ depot_flags_t depot_flags);
+
+/**
+ * stack_depot_save - Save a stack trace to stack depot
+ *
+ * @entries: Pointer to the stack trace
+ * @nr_entries: Number of frames in the stack
+ * @alloc_flags: Allocation GFP flags
+ *
+ * Does not increment the refcount on the saved stack trace; see
+ * stack_depot_save_flags() for more details.
+ *
+ * Context: Contexts where allocations via alloc_pages() are allowed;
+ * see stack_depot_save_flags() for more details.
+ *
+ * Return: Handle of the stack trace stored in depot, 0 on failure
+ */
+depot_stack_handle_t stack_depot_save(unsigned long *entries,
+ unsigned int nr_entries, gfp_t alloc_flags);
+
+/**
+ * __stack_depot_get_stack_record - Get a pointer to a stack_record struct
+ *
+ * @handle: Stack depot handle
+ *
+ * This function is only for internal purposes.
+ *
+ * Return: Returns a pointer to a stack_record struct
+ */
+struct stack_record *__stack_depot_get_stack_record(depot_stack_handle_t handle);
+
+/**
+ * stack_depot_fetch - Fetch a stack trace from stack depot
+ *
+ * @handle: Stack depot handle returned from stack_depot_save()
+ * @entries: Pointer to store the address of the stack trace
+ *
+ * Return: Number of frames for the fetched stack
+ */
+unsigned int stack_depot_fetch(depot_stack_handle_t handle,
+ unsigned long **entries);
+
+/**
+ * stack_depot_print - Print a stack trace from stack depot
+ *
+ * @stack: Stack depot handle returned from stack_depot_save()
+ */
+void stack_depot_print(depot_stack_handle_t stack);
+
+/**
+ * stack_depot_snprint - Print a stack trace from stack depot into a buffer
+ *
+ * @handle: Stack depot handle returned from stack_depot_save()
+ * @buf: Pointer to the print buffer
+ * @size: Size of the print buffer
+ * @spaces: Number of leading spaces to print
+ *
+ * Return: Number of bytes printed
+ */
+int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
+ int spaces);
+
+/**
+ * stack_depot_put - Drop a reference to a stack trace from stack depot
+ *
+ * @handle: Stack depot handle returned from stack_depot_save()
+ *
+ * The stack trace is evicted from stack depot once all references to it have
+ * been dropped (once the number of stack_depot_evict() calls matches the
+ * number of stack_depot_save_flags() calls with STACK_DEPOT_FLAG_GET set for
+ * this stack trace).
+ */
+void stack_depot_put(depot_stack_handle_t handle);
+
+/**
+ * stack_depot_set_extra_bits - Set extra bits in a stack depot handle
+ *
+ * @handle: Stack depot handle returned from stack_depot_save()
+ * @extra_bits: Value to set the extra bits
+ *
+ * Return: Stack depot handle with extra bits set
+ *
+ * Stack depot handles have a few unused bits, which can be used for storing
+ * user-specific information. These bits are transparent to the stack depot.
+ */
+depot_stack_handle_t __must_check stack_depot_set_extra_bits(
+ depot_stack_handle_t handle, unsigned int extra_bits);
+
+/**
+ * stack_depot_get_extra_bits - Retrieve extra bits from a stack depot handle
+ *
+ * @handle: Stack depot handle with extra bits saved
+ *
+ * Return: Extra bits retrieved from the stack depot handle
+ */
+unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle);
#endif
diff --git a/include/linux/stackleak.h b/include/linux/stackleak.h
deleted file mode 100644
index a59db2f08e76..000000000000
--- a/include/linux/stackleak.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_STACKLEAK_H
-#define _LINUX_STACKLEAK_H
-
-#include <linux/sched.h>
-#include <linux/sched/task_stack.h>
-
-/*
- * Check that the poison value points to the unused hole in the
- * virtual memory map for your platform.
- */
-#define STACKLEAK_POISON -0xBEEF
-#define STACKLEAK_SEARCH_DEPTH 128
-
-#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
-#include <asm/stacktrace.h>
-
-static inline void stackleak_task_init(struct task_struct *t)
-{
- t->lowest_stack = (unsigned long)end_of_stack(t) + sizeof(unsigned long);
-# ifdef CONFIG_STACKLEAK_METRICS
- t->prev_lowest_stack = t->lowest_stack;
-# endif
-}
-
-#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE
-int stack_erasing_sysctl(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
-#endif
-
-#else /* !CONFIG_GCC_PLUGIN_STACKLEAK */
-static inline void stackleak_task_init(struct task_struct *t) { }
-#endif
-
-#endif
diff --git a/include/linux/stackprotector.h b/include/linux/stackprotector.h
index 4c678c4fec58..9c88707d9a0f 100644
--- a/include/linux/stackprotector.h
+++ b/include/linux/stackprotector.h
@@ -6,6 +6,25 @@
#include <linux/sched.h>
#include <linux/random.h>
+/*
+ * On 64-bit architectures, protect against non-terminated C string overflows
+ * by zeroing out the first byte of the canary; this leaves 56 bits of entropy.
+ */
+#ifdef CONFIG_64BIT
+# ifdef __LITTLE_ENDIAN
+# define CANARY_MASK 0xffffffffffffff00UL
+# else /* big endian, 64 bits: */
+# define CANARY_MASK 0x00ffffffffffffffUL
+# endif
+#else /* 32 bits: */
+# define CANARY_MASK 0xffffffffUL
+#endif
+
+static inline unsigned long get_random_canary(void)
+{
+ return get_random_long() & CANARY_MASK;
+}
+
#if defined(CONFIG_STACKPROTECTOR) || defined(CONFIG_ARM64_PTR_AUTH)
# include <asm/stackprotector.h>
#else
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index 9edecb494e9e..97455880ac41 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -8,21 +8,6 @@
struct task_struct;
struct pt_regs;
-#ifdef CONFIG_STACKTRACE
-void stack_trace_print(const unsigned long *trace, unsigned int nr_entries,
- int spaces);
-int stack_trace_snprint(char *buf, size_t size, const unsigned long *entries,
- unsigned int nr_entries, int spaces);
-unsigned int stack_trace_save(unsigned long *store, unsigned int size,
- unsigned int skipnr);
-unsigned int stack_trace_save_tsk(struct task_struct *task,
- unsigned long *store, unsigned int size,
- unsigned int skipnr);
-unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
- unsigned int size, unsigned int skipnr);
-unsigned int stack_trace_save_user(unsigned long *store, unsigned int size);
-
-/* Internal interfaces. Do not use in generic code */
#ifdef CONFIG_ARCH_STACKWALK
/**
@@ -75,8 +60,25 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, void *cookie,
void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
const struct pt_regs *regs);
+#endif /* CONFIG_ARCH_STACKWALK */
-#else /* CONFIG_ARCH_STACKWALK */
+#ifdef CONFIG_STACKTRACE
+void stack_trace_print(const unsigned long *trace, unsigned int nr_entries,
+ int spaces);
+int stack_trace_snprint(char *buf, size_t size, const unsigned long *entries,
+ unsigned int nr_entries, int spaces);
+unsigned int stack_trace_save(unsigned long *store, unsigned int size,
+ unsigned int skipnr);
+unsigned int stack_trace_save_tsk(struct task_struct *task,
+ unsigned long *store, unsigned int size,
+ unsigned int skipnr);
+unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
+ unsigned int size, unsigned int skipnr);
+unsigned int stack_trace_save_user(unsigned long *store, unsigned int size);
+unsigned int filter_irq_stacks(unsigned long *entries, unsigned int nr_entries);
+
+#ifndef CONFIG_ARCH_STACKWALK
+/* Internal interfaces. Do not use in generic code */
struct stack_trace {
unsigned int nr_entries, max_entries;
unsigned long *entries;
diff --git a/include/linux/start_kernel.h b/include/linux/start_kernel.h
index 8b369a41c03c..09f994ac87df 100644
--- a/include/linux/start_kernel.h
+++ b/include/linux/start_kernel.h
@@ -8,8 +8,6 @@
/* Define the prototype for start_kernel here, rather than cluttering
up something else. */
-extern asmlinkage void __init start_kernel(void);
-extern void __init arch_call_rest_init(void);
-extern void __ref rest_init(void);
+extern asmlinkage void __init __noreturn start_kernel(void);
#endif /* _LINUX_START_KERNEL_H */
diff --git a/include/linux/stat.h b/include/linux/stat.h
index fff27e603814..e3d00e7bb26d 100644
--- a/include/linux/stat.h
+++ b/include/linux/stat.h
@@ -34,6 +34,10 @@ struct kstat {
STATX_ATTR_ENCRYPTED | \
STATX_ATTR_VERITY \
)/* Attrs corresponding to FS_*_FL flags */
+#define KSTAT_ATTR_VFS_FLAGS \
+ (STATX_ATTR_IMMUTABLE | \
+ STATX_ATTR_APPEND \
+ ) /* Attrs corresponding to S_* flags that are enforced by the VFS */
u64 ino;
dev_t dev;
dev_t rdev;
@@ -46,6 +50,23 @@ struct kstat {
struct timespec64 btime; /* File creation time */
u64 blocks;
u64 mnt_id;
+ u64 change_cookie;
+ u64 subvol;
+ u32 dio_mem_align;
+ u32 dio_offset_align;
+ u32 dio_read_offset_align;
+ u32 atomic_write_unit_min;
+ u32 atomic_write_unit_max;
+ u32 atomic_write_unit_max_opt;
+ u32 atomic_write_segments_max;
};
+/* These definitions are internal to the kernel for now. Mainly used by nfsd. */
+
+/* mask values */
+#define STATX_CHANGE_COOKIE 0x40000000U /* Want/got stx_change_attr */
+
+/* file attribute values */
+#define STATX_ATTR_CHANGE_MONOTONIC 0x8000000000000000ULL /* version monotonically increases */
+
#endif
diff --git a/include/linux/static_call.h b/include/linux/static_call.h
index fc94faa53b5b..78a77a4ae0ea 100644
--- a/include/linux/static_call.h
+++ b/include/linux/static_call.h
@@ -17,11 +17,17 @@
* DECLARE_STATIC_CALL(name, func);
* DEFINE_STATIC_CALL(name, func);
* DEFINE_STATIC_CALL_NULL(name, typename);
+ * DEFINE_STATIC_CALL_RET0(name, typename);
+ *
+ * __static_call_return0;
+ *
* static_call(name)(args...);
* static_call_cond(name)(args...);
* static_call_update(name, func);
* static_call_query(name);
*
+ * EXPORT_STATIC_CALL{,_TRAMP}{,_GPL}()
+ *
* Usage example:
*
* # Start with the following functions (with identical prototypes):
@@ -96,6 +102,33 @@
* To query which function is currently set to be called, use:
*
* func = static_call_query(name);
+ *
+ *
+ * DEFINE_STATIC_CALL_RET0 / __static_call_return0:
+ *
+ * Just like how DEFINE_STATIC_CALL_NULL() / static_call_cond() optimize the
+ * conditional void function call, DEFINE_STATIC_CALL_RET0 /
+ * __static_call_return0 optimize the do nothing return 0 function.
+ *
+ * This feature is strictly UB per the C standard (since it casts a function
+ * pointer to a different signature) and relies on the architecture ABI to
+ * make things work. In particular it relies on Caller Stack-cleanup and the
+ * whole return register being clobbered for short return values. All normal
+ * CDECL style ABIs conform.
+ *
+ * In particular the x86_64 implementation replaces the 5 byte CALL
+ * instruction at the callsite with a 5 byte clear of the RAX register,
+ * completely eliding any function call overhead.
+ *
+ * Notably argument setup is unconditional.
+ *
+ *
+ * EXPORT_STATIC_CALL() vs EXPORT_STATIC_CALL_TRAMP():
+ *
+ * The difference is that the _TRAMP variant tries to only export the
+ * trampoline with the result that a module can use static_call{,_cond}() but
+ * not static_call_update().
+ *
*/
#include <linux/types.h>
@@ -127,8 +160,12 @@ extern void arch_static_call_transform(void *site, void *tramp, void *func, bool
#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
+extern int static_call_initialized;
+
extern int __init static_call_init(void);
+extern void static_call_force_reinit(void);
+
struct static_call_mod {
struct static_call_mod *next;
struct module *mod; /* for vmlinux, mod == NULL */
@@ -147,13 +184,13 @@ extern int static_call_text_reserved(void *start, void *end);
extern long __static_call_return0(void);
-#define __DEFINE_STATIC_CALL(name, _func, _func_init) \
+#define DEFINE_STATIC_CALL(name, _func) \
DECLARE_STATIC_CALL(name, _func); \
struct static_call_key STATIC_CALL_KEY(name) = { \
- .func = _func_init, \
+ .func = _func, \
.type = 1, \
}; \
- ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func_init)
+ ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func)
#define DEFINE_STATIC_CALL_NULL(name, _func) \
DECLARE_STATIC_CALL(name, _func); \
@@ -163,6 +200,14 @@ extern long __static_call_return0(void);
}; \
ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
+#define DEFINE_STATIC_CALL_RET0(name, _func) \
+ DECLARE_STATIC_CALL(name, _func); \
+ struct static_call_key STATIC_CALL_KEY(name) = { \
+ .func = __static_call_return0, \
+ .type = 1, \
+ }; \
+ ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name)
+
#define static_call_cond(name) (void)__static_call(name)
#define EXPORT_STATIC_CALL(name) \
@@ -182,14 +227,16 @@ extern long __static_call_return0(void);
#elif defined(CONFIG_HAVE_STATIC_CALL)
+#define static_call_initialized 0
+
static inline int static_call_init(void) { return 0; }
-#define __DEFINE_STATIC_CALL(name, _func, _func_init) \
+#define DEFINE_STATIC_CALL(name, _func) \
DECLARE_STATIC_CALL(name, _func); \
struct static_call_key STATIC_CALL_KEY(name) = { \
- .func = _func_init, \
+ .func = _func, \
}; \
- ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func_init)
+ ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func)
#define DEFINE_STATIC_CALL_NULL(name, _func) \
DECLARE_STATIC_CALL(name, _func); \
@@ -198,6 +245,12 @@ static inline int static_call_init(void) { return 0; }
}; \
ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
+#define DEFINE_STATIC_CALL_RET0(name, _func) \
+ DECLARE_STATIC_CALL(name, _func); \
+ struct static_call_key STATIC_CALL_KEY(name) = { \
+ .func = __static_call_return0, \
+ }; \
+ ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name)
#define static_call_cond(name) (void)__static_call(name)
@@ -215,10 +268,7 @@ static inline int static_call_text_reserved(void *start, void *end)
return 0;
}
-static inline long __static_call_return0(void)
-{
- return 0;
-}
+extern long __static_call_return0(void);
#define EXPORT_STATIC_CALL(name) \
EXPORT_SYMBOL(STATIC_CALL_KEY(name)); \
@@ -235,6 +285,8 @@ static inline long __static_call_return0(void)
#else /* Generic implementation */
+#define static_call_initialized 0
+
static inline int static_call_init(void) { return 0; }
static inline long __static_call_return0(void)
@@ -248,11 +300,14 @@ static inline long __static_call_return0(void)
.func = _func_init, \
}
+#define DEFINE_STATIC_CALL(name, _func) \
+ __DEFINE_STATIC_CALL(name, _func, _func)
+
#define DEFINE_STATIC_CALL_NULL(name, _func) \
- DECLARE_STATIC_CALL(name, _func); \
- struct static_call_key STATIC_CALL_KEY(name) = { \
- .func = NULL, \
- }
+ __DEFINE_STATIC_CALL(name, _func, NULL)
+
+#define DEFINE_STATIC_CALL_RET0(name, _func) \
+ __DEFINE_STATIC_CALL(name, _func, __static_call_return0)
static inline void __static_call_nop(void) { }
@@ -294,10 +349,4 @@ static inline int static_call_text_reserved(void *start, void *end)
#endif /* CONFIG_HAVE_STATIC_CALL */
-#define DEFINE_STATIC_CALL(name, _func) \
- __DEFINE_STATIC_CALL(name, _func, _func)
-
-#define DEFINE_STATIC_CALL_RET0(name, _func) \
- __DEFINE_STATIC_CALL(name, _func, __static_call_return0)
-
#endif /* _LINUX_STATIC_CALL_H */
diff --git a/include/linux/static_call_types.h b/include/linux/static_call_types.h
index 5a00b8b2cf9f..cfb6ddeb292b 100644
--- a/include/linux/static_call_types.h
+++ b/include/linux/static_call_types.h
@@ -25,6 +25,8 @@
#define STATIC_CALL_SITE_INIT 2UL /* init section */
#define STATIC_CALL_SITE_FLAGS 3UL
+#ifndef __ASSEMBLY__
+
/*
* The static call site table needs to be created by external tooling (objtool
* or a compiler plugin).
@@ -100,4 +102,6 @@ struct static_call_key {
#endif /* CONFIG_HAVE_STATIC_CALL */
+#endif /* __ASSEMBLY__ */
+
#endif /* _STATIC_CALL_TYPES_H */
diff --git a/include/linux/stdarg.h b/include/linux/stdarg.h
new file mode 100644
index 000000000000..c8dc7f4f390c
--- /dev/null
+++ b/include/linux/stdarg.h
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#ifndef _LINUX_STDARG_H
+#define _LINUX_STDARG_H
+
+typedef __builtin_va_list va_list;
+#define va_start(v, l) __builtin_va_start(v, l)
+#define va_end(v) __builtin_va_end(v)
+#define va_arg(v, T) __builtin_va_arg(v, T)
+#define va_copy(d, s) __builtin_va_copy(d, s)
+
+#endif
diff --git a/include/linux/stddef.h b/include/linux/stddef.h
index 998a4ba28eba..80b6bfb944f0 100644
--- a/include/linux/stddef.h
+++ b/include/linux/stddef.h
@@ -13,14 +13,10 @@ enum {
};
#undef offsetof
-#ifdef __compiler_offsetof
-#define offsetof(TYPE, MEMBER) __compiler_offsetof(TYPE, MEMBER)
-#else
-#define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER)
-#endif
+#define offsetof(TYPE, MEMBER) __builtin_offsetof(TYPE, MEMBER)
/**
- * sizeof_field(TYPE, MEMBER)
+ * sizeof_field() - Report the size of a struct field in bytes
*
* @TYPE: The structure containing the field of interest
* @MEMBER: The field to return the size of
@@ -28,7 +24,7 @@ enum {
#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
/**
- * offsetofend(TYPE, MEMBER)
+ * offsetofend() - Report the offset of a struct field within the struct
*
* @TYPE: The type of the structure
* @MEMBER: The member within the structure to get the end offset of
@@ -36,4 +32,101 @@ enum {
#define offsetofend(TYPE, MEMBER) \
(offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
+/**
+ * struct_group() - Wrap a set of declarations in a mirrored struct
+ *
+ * @NAME: The identifier name of the mirrored sub-struct
+ * @MEMBERS: The member declarations for the mirrored structs
+ *
+ * Used to create an anonymous union of two structs with identical
+ * layout and size: one anonymous and one named. The former can be
+ * used normally without sub-struct naming, and the latter can be
+ * used to reason about the start, end, and size of the group of
+ * struct members.
+ */
+#define struct_group(NAME, MEMBERS...) \
+ __struct_group(/* no tag */, NAME, /* no attrs */, MEMBERS)
+
+/**
+ * struct_group_attr() - Create a struct_group() with trailing attributes
+ *
+ * @NAME: The identifier name of the mirrored sub-struct
+ * @ATTRS: Any struct attributes to apply
+ * @MEMBERS: The member declarations for the mirrored structs
+ *
+ * Used to create an anonymous union of two structs with identical
+ * layout and size: one anonymous and one named. The former can be
+ * used normally without sub-struct naming, and the latter can be
+ * used to reason about the start, end, and size of the group of
+ * struct members. Includes structure attributes argument.
+ */
+#define struct_group_attr(NAME, ATTRS, MEMBERS...) \
+ __struct_group(/* no tag */, NAME, ATTRS, MEMBERS)
+
+/**
+ * struct_group_tagged() - Create a struct_group with a reusable tag
+ *
+ * @TAG: The tag name for the named sub-struct
+ * @NAME: The identifier name of the mirrored sub-struct
+ * @MEMBERS: The member declarations for the mirrored structs
+ *
+ * Used to create an anonymous union of two structs with identical
+ * layout and size: one anonymous and one named. The former can be
+ * used normally without sub-struct naming, and the latter can be
+ * used to reason about the start, end, and size of the group of
+ * struct members. Includes struct tag argument for the named copy,
+ * so the specified layout can be reused later.
+ */
+#define struct_group_tagged(TAG, NAME, MEMBERS...) \
+ __struct_group(TAG, NAME, /* no attrs */, MEMBERS)
+
+/**
+ * DECLARE_FLEX_ARRAY() - Declare a flexible array usable in a union
+ *
+ * @TYPE: The type of each flexible array element
+ * @NAME: The name of the flexible array member
+ *
+ * In order to have a flexible array member in a union or alone in a
+ * struct, it needs to be wrapped in an anonymous struct with at least 1
+ * named member, but that member can be empty.
+ */
+#define DECLARE_FLEX_ARRAY(TYPE, NAME) \
+ __DECLARE_FLEX_ARRAY(TYPE, NAME)
+
+/**
+ * __TRAILING_OVERLAP() - Overlap a flexible-array member with trailing
+ * members.
+ *
+ * Creates a union between a flexible-array member (FAM) in a struct and a set
+ * of additional members that would otherwise follow it.
+ *
+ * @TYPE: Flexible structure type name, including "struct" keyword.
+ * @NAME: Name for a variable to define.
+ * @FAM: The flexible-array member within @TYPE
+ * @ATTRS: Any struct attributes (usually empty)
+ * @MEMBERS: Trailing overlapping members.
+ */
+#define __TRAILING_OVERLAP(TYPE, NAME, FAM, ATTRS, MEMBERS) \
+ union { \
+ TYPE NAME; \
+ struct { \
+ unsigned char __offset_to_FAM[offsetof(TYPE, FAM)]; \
+ MEMBERS \
+ } ATTRS; \
+ }
+
+/**
+ * TRAILING_OVERLAP() - Overlap a flexible-array member with trailing members.
+ *
+ * Creates a union between a flexible-array member (FAM) in a struct and a set
+ * of additional members that would otherwise follow it.
+ *
+ * @TYPE: Flexible structure type name, including "struct" keyword.
+ * @NAME: Name for a variable to define.
+ * @FAM: The flexible-array member within @TYPE
+ * @MEMBERS: Trailing overlapping members.
+ */
+#define TRAILING_OVERLAP(TYPE, NAME, FAM, MEMBERS) \
+ __TRAILING_OVERLAP(TYPE, NAME, FAM, /* no attrs */, MEMBERS)
+
#endif
diff --git a/include/linux/stm.h b/include/linux/stm.h
index c6f577ab6f21..2fcbef9608f6 100644
--- a/include/linux/stm.h
+++ b/include/linux/stm.h
@@ -30,6 +30,16 @@ enum stp_packet_flags {
STP_PACKET_TIMESTAMPED = 0x2,
};
+/**
+ * enum stm_source_type - STM source driver
+ * @STM_USER: any STM trace source
+ * @STM_FTRACE: ftrace STM source
+ */
+enum stm_source_type {
+ STM_USER,
+ STM_FTRACE,
+};
+
struct stp_policy;
struct stm_device;
@@ -57,7 +67,7 @@ struct stm_device;
*
* Normally, an STM device will have a range of masters available to software
* and the rest being statically assigned to various hardware trace sources.
- * The former is defined by the the range [@sw_start..@sw_end] of the device
+ * The former is defined by the range [@sw_start..@sw_end] of the device
* description. That is, the lowest master that can be allocated to software
* writers is @sw_start and data from this writer will appear is @sw_start
* master in the STP stream.
@@ -106,6 +116,7 @@ struct stm_source_device;
* @name: device name, will be used for policy lookup
* @src: internal structure, only used by stm class code
* @nr_chans: number of channels to allocate
+ * @type: type of STM source driver represented by stm_source_type
* @link: called when this source gets linked to an STM device
* @unlink: called when this source is about to get unlinked from its STM
*
@@ -117,6 +128,7 @@ struct stm_source_data {
struct stm_source_device *src;
unsigned int percpu;
unsigned int nr_chans;
+ unsigned int type;
int (*link)(struct stm_source_data *data);
void (*unlink)(struct stm_source_data *data);
};
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 0db36360ef21..f1054b9c2d8a 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -13,7 +13,7 @@
#define __STMMAC_PLATFORM_DATA
#include <linux/platform_device.h>
-#include <linux/phy.h>
+#include <linux/phylink.h>
#define MTL_MAX_RX_QUEUES 8
#define MTL_MAX_TX_QUEUES 8
@@ -33,7 +33,9 @@
#define STMMAC_CSR_20_35M 0x2 /* MDC = clk_scr_i/16 */
#define STMMAC_CSR_35_60M 0x3 /* MDC = clk_scr_i/26 */
#define STMMAC_CSR_150_250M 0x4 /* MDC = clk_scr_i/102 */
-#define STMMAC_CSR_250_300M 0x5 /* MDC = clk_scr_i/122 */
+#define STMMAC_CSR_250_300M 0x5 /* MDC = clk_scr_i/124 */
+#define STMMAC_CSR_300_500M 0x6 /* MDC = clk_scr_i/204 */
+#define STMMAC_CSR_500_800M 0x7 /* MDC = clk_scr_i/324 */
/* MTL algorithms identifiers */
#define MTL_TX_ALGORITHM_WRR 0x0
@@ -76,12 +78,15 @@
| DMA_AXI_BLEN_32 | DMA_AXI_BLEN_64 \
| DMA_AXI_BLEN_128 | DMA_AXI_BLEN_256)
+struct clk;
+struct stmmac_priv;
+
/* Platfrom data for platform device structure's platform_data field */
struct stmmac_mdio_bus_data {
unsigned int phy_mask;
- unsigned int has_xpcs;
- unsigned int xpcs_an_inband;
+ unsigned int pcs_mask;
+ unsigned int default_an_inband;
int *irqs;
int probed_phy_irq;
bool needs_reset;
@@ -98,6 +103,7 @@ struct stmmac_dma_cfg {
bool eame;
bool multi_msi_en;
bool dche;
+ bool atds;
};
#define AXI_BLEN 7
@@ -107,24 +113,12 @@ struct stmmac_axi {
u32 axi_wr_osr_lmt;
u32 axi_rd_osr_lmt;
bool axi_kbbe;
- u32 axi_blen[AXI_BLEN];
+ u32 axi_blen_regval;
bool axi_fb;
bool axi_mb;
bool axi_rb;
};
-#define EST_GCL 1024
-struct stmmac_est {
- int enable;
- u32 btr_offset[2];
- u32 btr[2];
- u32 ctr[2];
- u32 ter;
- u32 gcl_unaligned[EST_GCL];
- u32 gcl[EST_GCL];
- u32 gcl_size;
-};
-
struct stmmac_rxq_cfg {
u8 mode_to_use;
u32 chan;
@@ -135,6 +129,7 @@ struct stmmac_rxq_cfg {
struct stmmac_txq_cfg {
u32 weight;
+ bool coe_unsupported;
u8 mode_to_use;
/* Credit Base Shaper parameters */
u32 send_slope;
@@ -146,46 +141,94 @@ struct stmmac_txq_cfg {
int tbs_en;
};
-/* FPE link state */
-enum stmmac_fpe_state {
- FPE_STATE_OFF = 0,
- FPE_STATE_CAPABLE = 1,
- FPE_STATE_ENTERING_ON = 2,
- FPE_STATE_ON = 3,
+struct stmmac_safety_feature_cfg {
+ u32 tsoee;
+ u32 mrxpee;
+ u32 mestee;
+ u32 mrxee;
+ u32 mtxee;
+ u32 epsi;
+ u32 edpp;
+ u32 prtyen;
+ u32 tmouten;
};
-/* FPE link-partner hand-shaking mPacket type */
-enum stmmac_mpacket_type {
- MPACKET_VERIFY = 0,
- MPACKET_RESPONSE = 1,
+/* Addresses that may be customized by a platform */
+struct dwmac4_addrs {
+ u32 dma_chan;
+ u32 dma_chan_offset;
+ u32 mtl_chan;
+ u32 mtl_chan_offset;
+ u32 mtl_ets_ctrl;
+ u32 mtl_ets_ctrl_offset;
+ u32 mtl_txq_weight;
+ u32 mtl_txq_weight_offset;
+ u32 mtl_send_slp_cred;
+ u32 mtl_send_slp_cred_offset;
+ u32 mtl_high_cred;
+ u32 mtl_high_cred_offset;
+ u32 mtl_low_cred;
+ u32 mtl_low_cred_offset;
};
-enum stmmac_fpe_task_state_t {
- __FPE_REMOVING,
- __FPE_TASK_SCHED,
+enum dwmac_core_type {
+ DWMAC_CORE_MAC100,
+ DWMAC_CORE_GMAC,
+ DWMAC_CORE_GMAC4,
+ DWMAC_CORE_XGMAC,
};
-struct stmmac_fpe_cfg {
- bool enable; /* FPE enable */
- bool hs_enable; /* FPE handshake enable */
- enum stmmac_fpe_state lp_fpe_state; /* Link Partner FPE state */
- enum stmmac_fpe_state lo_fpe_state; /* Local station FPE state */
-};
+#define STMMAC_FLAG_SPH_DISABLE BIT(1)
+#define STMMAC_FLAG_USE_PHY_WOL BIT(2)
+#define STMMAC_FLAG_HAS_SUN8I BIT(3)
+#define STMMAC_FLAG_TSO_EN BIT(4)
+#define STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP BIT(5)
+#define STMMAC_FLAG_VLAN_FAIL_Q_EN BIT(6)
+#define STMMAC_FLAG_MULTI_MSI_EN BIT(7)
+#define STMMAC_FLAG_EXT_SNAPSHOT_EN BIT(8)
+#define STMMAC_FLAG_INT_SNAPSHOT_EN BIT(9)
+#define STMMAC_FLAG_RX_CLK_RUNS_IN_LPI BIT(10)
+#define STMMAC_FLAG_EN_TX_LPI_CLOCKGATING BIT(11)
+#define STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP BIT(12)
+#define STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY BIT(13)
+
+struct mac_device_info;
struct plat_stmmacenet_data {
+ enum dwmac_core_type core_type;
int bus_id;
int phy_addr;
- int interface;
+ /* MAC ----- optional PCS ----- SerDes ----- optional PHY ----- Media
+ * ^
+ * phy_interface
+ *
+ * The Synopsys dwmac core only covers the MAC and an optional
+ * integrated PCS. Where the integrated PCS is used with a SerDes,
+ * e.g. for 1000base-X or Cisco SGMII, the connection between the
+ * PCS and SerDes will be TBI.
+ *
+ * Where the Synopsys dwmac core has been instantiated with multiple
+ * interface modes, these are selected via core-external configuration
+ * which is sampled when the dwmac core is reset. How this is done is
+ * platform glue specific, but this defines the interface used from
+ * the Synopsys dwmac core to the rest of the SoC.
+ *
+ * Where PCS other than the optional integrated Synopsys dwmac PCS
+ * is used, this counts as "the rest of the SoC" in the above
+ * paragraph.
+ *
+ * phy_interface is the PHY-side interface - the interface used by
+ * an attached PHY or SFP etc. This is equivalent to the interface
+ * that phylink uses.
+ */
phy_interface_t phy_interface;
struct stmmac_mdio_bus_data *mdio_bus_data;
struct device_node *phy_node;
- struct device_node *phylink_node;
+ struct fwnode_handle *port_node;
struct device_node *mdio_node;
struct stmmac_dma_cfg *dma_cfg;
- struct stmmac_est *est;
- struct stmmac_fpe_cfg *fpe_cfg;
+ struct stmmac_safety_feature_cfg *safety_feat_cfg;
int clk_csr;
- int has_gmac;
int enh_desc;
int tx_coe;
int rx_coe;
@@ -200,48 +243,60 @@ struct plat_stmmacenet_data {
int unicast_filter_entries;
int tx_fifo_size;
int rx_fifo_size;
- u32 addr64;
+ u32 host_dma_width;
u32 rx_queues_to_use;
u32 tx_queues_to_use;
u8 rx_sched_algorithm;
u8 tx_sched_algorithm;
struct stmmac_rxq_cfg rx_queues_cfg[MTL_MAX_RX_QUEUES];
struct stmmac_txq_cfg tx_queues_cfg[MTL_MAX_TX_QUEUES];
- void (*fix_mac_speed)(void *priv, unsigned int speed);
+ void (*get_interfaces)(struct stmmac_priv *priv, void *bsp_priv,
+ unsigned long *interfaces);
+ int (*set_phy_intf_sel)(void *priv, u8 phy_intf_sel);
+ int (*set_clk_tx_rate)(void *priv, struct clk *clk_tx_i,
+ phy_interface_t interface, int speed);
+ void (*fix_mac_speed)(void *priv, int speed, unsigned int mode);
+ int (*fix_soc_reset)(struct stmmac_priv *priv, void __iomem *ioaddr);
int (*serdes_powerup)(struct net_device *ndev, void *priv);
void (*serdes_powerdown)(struct net_device *ndev, void *priv);
- void (*ptp_clk_freq_config)(void *priv);
- int (*init)(struct platform_device *pdev, void *priv);
- void (*exit)(struct platform_device *pdev, void *priv);
- struct mac_device_info *(*setup)(void *priv);
+ int (*mac_finish)(struct net_device *ndev,
+ void *priv,
+ unsigned int mode,
+ phy_interface_t interface);
+ void (*ptp_clk_freq_config)(struct stmmac_priv *priv);
+ int (*init)(struct device *dev, void *priv);
+ void (*exit)(struct device *dev, void *priv);
+ int (*suspend)(struct device *dev, void *priv);
+ int (*resume)(struct device *dev, void *priv);
+ int (*mac_setup)(void *priv, struct mac_device_info *mac);
int (*clks_config)(void *priv, bool enabled);
int (*crosststamp)(ktime_t *device, struct system_counterval_t *system,
void *ctx);
+ void (*dump_debug_regs)(void *priv);
+ int (*pcs_init)(struct stmmac_priv *priv);
+ void (*pcs_exit)(struct stmmac_priv *priv);
+ struct phylink_pcs *(*select_pcs)(struct stmmac_priv *priv,
+ phy_interface_t interface);
void *bsp_priv;
struct clk *stmmac_clk;
struct clk *pclk;
struct clk *clk_ptp_ref;
- unsigned int clk_ptp_rate;
- unsigned int clk_ref_rate;
+ struct clk *clk_tx_i; /* clk_tx_i to MAC core */
+ unsigned long clk_ptp_rate;
+ unsigned long clk_ref_rate;
+ struct clk_bulk_data *clks;
+ int num_clks;
+ unsigned int mult_fact_100ns;
s32 ptp_max_adj;
+ u32 cdc_error_adj;
struct reset_control *stmmac_rst;
+ struct reset_control *stmmac_ahb_rst;
struct stmmac_axi *axi;
- int has_gmac4;
- bool has_sun8i;
- bool tso_en;
int rss_en;
int mac_port_sel_speed;
- bool en_tx_lpi_clockgating;
- int has_xgmac;
- bool vlan_fail_q_en;
u8 vlan_fail_q;
- unsigned int eee_usecs_rate;
struct pci_dev *pdev;
- bool has_crossts;
int int_snapshot_num;
- int ext_snapshot_num;
- bool ext_snapshot_en;
- bool multi_msi_en;
int msi_mac_vec;
int msi_wol_vec;
int msi_lpi_vec;
@@ -249,5 +304,7 @@ struct plat_stmmacenet_data {
int msi_sfty_ue_vec;
int msi_rx_base_vec;
int msi_tx_base_vec;
+ const struct dwmac4_addrs *dwmac4_addrs;
+ unsigned int flags;
};
#endif
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
index 46fb3ebdd16e..72820503514c 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -3,7 +3,7 @@
#define _LINUX_STOP_MACHINE
#include <linux/cpu.h>
-#include <linux/cpumask.h>
+#include <linux/cpumask_types.h>
#include <linux/smp.h>
#include <linux/list.h>
@@ -88,42 +88,76 @@ static inline void print_stop_info(const char *log_lvl, struct task_struct *task
#endif /* CONFIG_SMP */
/*
- * stop_machine "Bogolock": stop the entire machine, disable
- * interrupts. This is a very heavy lock, which is equivalent to
- * grabbing every spinlock (and more). So the "read" side to such a
- * lock is anything which disables preemption.
+ * stop_machine "Bogolock": stop the entire machine, disable interrupts.
+ * This is a very heavy lock, which is equivalent to grabbing every raw
+ * spinlock (and more). So the "read" side to such a lock is anything
+ * which disables preemption.
*/
#if defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
/**
* stop_machine: freeze the machine on all CPUs and run this function
* @fn: the function to run
- * @data: the data ptr for the @fn()
- * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
+ * @data: the data ptr to pass to @fn()
+ * @cpus: the cpus to run @fn() on (NULL = run on each online CPU)
*
- * Description: This causes a thread to be scheduled on every cpu,
- * each of which disables interrupts. The result is that no one is
- * holding a spinlock or inside any other preempt-disabled region when
- * @fn() runs.
+ * Description: This causes a thread to be scheduled on every CPU, which
+ * will run with interrupts disabled. Each CPU specified by @cpus will
+ * run @fn. While @fn is executing, there will no other CPUs holding
+ * a raw spinlock or running within any other type of preempt-disabled
+ * region of code.
*
- * This can be thought of as a very heavy write lock, equivalent to
- * grabbing every spinlock in the kernel.
+ * When @cpus specifies only a single CPU, this can be thought of as
+ * a reader-writer lock where readers disable preemption (for example,
+ * by holding a raw spinlock) and where the insanely heavy writers run
+ * @fn while also preventing any other CPU from doing any useful work.
+ * These writers can also be thought of as having implicitly grabbed every
+ * raw spinlock in the kernel.
*
- * Protects against CPU hotplug.
+ * When @fn is a no-op, this can be thought of as an RCU implementation
+ * where readers again disable preemption and writers use stop_machine()
+ * in place of synchronize_rcu(), albeit with orders of magnitude more
+ * disruption than even that of synchronize_rcu_expedited().
+ *
+ * Although only one stop_machine() operation can proceed at a time,
+ * the possibility of blocking in cpus_read_lock() means that the caller
+ * cannot usefully rely on this serialization.
+ *
+ * Return: 0 if all invocations of @fn return zero. Otherwise, the
+ * value returned by an arbitrarily chosen member of the set of calls to
+ * @fn that returned non-zero.
*/
int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
/**
* stop_machine_cpuslocked: freeze the machine on all CPUs and run this function
* @fn: the function to run
- * @data: the data ptr for the @fn()
- * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
+ * @data: the data ptr to pass to @fn()
+ * @cpus: the cpus to run @fn() on (NULL = run on each online CPU)
+ *
+ * Same as above. Avoids nested calls to cpus_read_lock().
*
- * Same as above. Must be called from with in a cpus_read_lock() protected
- * region. Avoids nested calls to cpus_read_lock().
+ * Context: Must be called from within a cpus_read_lock() protected region.
*/
int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
+/**
+ * stop_core_cpuslocked: - stop all threads on just one core
+ * @cpu: any cpu in the targeted core
+ * @fn: the function to run on each CPU in the core containing @cpu
+ * @data: the data ptr to pass to @fn()
+ *
+ * Same as above, but instead of every CPU, only the logical CPUs of the
+ * single core containing @cpu are affected.
+ *
+ * Context: Must be called from within a cpus_read_lock() protected region.
+ *
+ * Return: 0 if all invocations of @fn return zero. Otherwise, the
+ * value returned by an arbitrarily chosen member of the set of calls to
+ * @fn that returned non-zero.
+ */
+int stop_core_cpuslocked(unsigned int cpu, cpu_stop_fn_t fn, void *data);
+
int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
const struct cpumask *cpus);
#else /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
diff --git a/include/linux/string.h b/include/linux/string.h
index 9521d8cab18e..1b564c36d721 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -2,19 +2,63 @@
#ifndef _LINUX_STRING_H_
#define _LINUX_STRING_H_
-
+#include <linux/args.h>
+#include <linux/array_size.h>
+#include <linux/cleanup.h> /* for DEFINE_FREE() */
#include <linux/compiler.h> /* for inline */
#include <linux/types.h> /* for size_t */
#include <linux/stddef.h> /* for NULL */
+#include <linux/err.h> /* for ERR_PTR() */
#include <linux/errno.h> /* for E2BIG */
-#include <stdarg.h>
+#include <linux/overflow.h> /* for check_mul_overflow() */
+#include <linux/stdarg.h>
#include <uapi/linux/string.h>
extern char *strndup_user(const char __user *, long);
-extern void *memdup_user(const void __user *, size_t);
-extern void *vmemdup_user(const void __user *, size_t);
+extern void *memdup_user(const void __user *, size_t) __realloc_size(2);
+extern void *vmemdup_user(const void __user *, size_t) __realloc_size(2);
extern void *memdup_user_nul(const void __user *, size_t);
+/**
+ * memdup_array_user - duplicate array from user space
+ * @src: source address in user space
+ * @n: number of array members to copy
+ * @size: size of one array member
+ *
+ * Return: an ERR_PTR() on failure. Result is physically
+ * contiguous, to be freed by kfree().
+ */
+static inline __realloc_size(2, 3)
+void *memdup_array_user(const void __user *src, size_t n, size_t size)
+{
+ size_t nbytes;
+
+ if (check_mul_overflow(n, size, &nbytes))
+ return ERR_PTR(-EOVERFLOW);
+
+ return memdup_user(src, nbytes);
+}
+
+/**
+ * vmemdup_array_user - duplicate array from user space
+ * @src: source address in user space
+ * @n: number of array members to copy
+ * @size: size of one array member
+ *
+ * Return: an ERR_PTR() on failure. Result may be not
+ * physically contiguous. Use kvfree() to free.
+ */
+static inline __realloc_size(2, 3)
+void *vmemdup_array_user(const void __user *src, size_t n, size_t size)
+{
+ size_t nbytes;
+
+ if (check_mul_overflow(n, size, &nbytes))
+ return ERR_PTR(-EOVERFLOW);
+
+ return vmemdup_user(src, nbytes);
+}
+
/*
* Include machine specific inline routines
*/
@@ -26,15 +70,83 @@ extern char * strcpy(char *,const char *);
#ifndef __HAVE_ARCH_STRNCPY
extern char * strncpy(char *,const char *, __kernel_size_t);
#endif
-#ifndef __HAVE_ARCH_STRLCPY
-size_t strlcpy(char *, const char *, size_t);
-#endif
-#ifndef __HAVE_ARCH_STRSCPY
-ssize_t strscpy(char *, const char *, size_t);
-#endif
+ssize_t sized_strscpy(char *, const char *, size_t);
+
+/*
+ * The 2 argument style can only be used when dst is an array with a
+ * known size.
+ */
+#define __strscpy0(dst, src, ...) \
+ sized_strscpy(dst, src, sizeof(dst) + __must_be_array(dst) + \
+ __must_be_cstr(dst) + __must_be_cstr(src))
+#define __strscpy1(dst, src, size) \
+ sized_strscpy(dst, src, size + __must_be_cstr(dst) + __must_be_cstr(src))
+
+#define __strscpy_pad0(dst, src, ...) \
+ sized_strscpy_pad(dst, src, sizeof(dst) + __must_be_array(dst) + \
+ __must_be_cstr(dst) + __must_be_cstr(src))
+#define __strscpy_pad1(dst, src, size) \
+ sized_strscpy_pad(dst, src, size + __must_be_cstr(dst) + __must_be_cstr(src))
+
+/**
+ * strscpy - Copy a C-string into a sized buffer
+ * @dst: Where to copy the string to
+ * @src: Where to copy the string from
+ * @...: Size of destination buffer (optional)
+ *
+ * Copy the source string @src, or as much of it as fits, into the
+ * destination @dst buffer. The behavior is undefined if the string
+ * buffers overlap. The destination @dst buffer is always NUL terminated,
+ * unless it's zero-sized.
+ *
+ * The size argument @... is only required when @dst is not an array, or
+ * when the copy needs to be smaller than sizeof(@dst).
+ *
+ * Preferred to strncpy() since it always returns a valid string, and
+ * doesn't unnecessarily force the tail of the destination buffer to be
+ * zero padded. If padding is desired please use strscpy_pad().
+ *
+ * Returns the number of characters copied in @dst (not including the
+ * trailing %NUL) or -E2BIG if @size is 0 or the copy from @src was
+ * truncated.
+ */
+#define strscpy(dst, src, ...) \
+ CONCATENATE(__strscpy, COUNT_ARGS(__VA_ARGS__))(dst, src, __VA_ARGS__)
+
+#define sized_strscpy_pad(dest, src, count) ({ \
+ char *__dst = (dest); \
+ const char *__src = (src); \
+ const size_t __count = (count); \
+ ssize_t __wrote; \
+ \
+ __wrote = sized_strscpy(__dst, __src, __count); \
+ if (__wrote >= 0 && __wrote < __count) \
+ memset(__dst + __wrote + 1, 0, __count - __wrote - 1); \
+ __wrote; \
+})
-/* Wraps calls to strscpy()/memset(), no arch specific code required */
-ssize_t strscpy_pad(char *dest, const char *src, size_t count);
+/**
+ * strscpy_pad() - Copy a C-string into a sized buffer
+ * @dst: Where to copy the string to
+ * @src: Where to copy the string from
+ * @...: Size of destination buffer
+ *
+ * Copy the string, or as much of it as fits, into the dest buffer. The
+ * behavior is undefined if the string buffers overlap. The destination
+ * buffer is always %NUL terminated, unless it's zero-sized.
+ *
+ * If the source string is shorter than the destination buffer, the
+ * remaining bytes in the buffer will be filled with %NUL bytes.
+ *
+ * For full explanation of why you may want to consider using the
+ * 'strscpy' functions please see the function docstring for strscpy().
+ *
+ * Returns:
+ * * The number of characters copied (not including the trailing %NULs)
+ * * -E2BIG if count is 0 or @src was truncated.
+ */
+#define strscpy_pad(dst, src, ...) \
+ CONCATENATE(__strscpy_pad, COUNT_ARGS(__VA_ARGS__))(dst, src, __VA_ARGS__)
#ifndef __HAVE_ARCH_STRCAT
extern char * strcat(char *, const char *);
@@ -170,26 +282,47 @@ static inline void memcpy_flushcache(void *dst, const void *src, size_t cnt)
#endif
void *memchr_inv(const void *s, int c, size_t n);
-char *strreplace(char *s, char old, char new);
+char *strreplace(char *str, char old, char new);
+
+/**
+ * mem_is_zero - Check if an area of memory is all 0's.
+ * @s: The memory area
+ * @n: The size of the area
+ *
+ * Return: True if the area of memory is all 0's.
+ */
+static inline bool mem_is_zero(const void *s, size_t n)
+{
+ return !memchr_inv(s, 0, n);
+}
extern void kfree_const(const void *x);
extern char *kstrdup(const char *s, gfp_t gfp) __malloc;
extern const char *kstrdup_const(const char *s, gfp_t gfp);
extern char *kstrndup(const char *s, size_t len, gfp_t gfp);
-extern void *kmemdup(const void *src, size_t len, gfp_t gfp);
+extern void *kmemdup_noprof(const void *src, size_t len, gfp_t gfp) __realloc_size(2);
+#define kmemdup(...) alloc_hooks(kmemdup_noprof(__VA_ARGS__))
+
+extern void *kvmemdup(const void *src, size_t len, gfp_t gfp) __realloc_size(2);
extern char *kmemdup_nul(const char *s, size_t len, gfp_t gfp);
+extern void *kmemdup_array(const void *src, size_t count, size_t element_size, gfp_t gfp)
+ __realloc_size(2, 3);
+/* lib/argv_split.c */
extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
extern void argv_free(char **argv);
-extern bool sysfs_streq(const char *s1, const char *s2);
-extern int kstrtobool(const char *s, bool *res);
-static inline int strtobool(const char *s, bool *res)
-{
- return kstrtobool(s, res);
-}
+DEFINE_FREE(argv_free, char **, if (!IS_ERR_OR_NULL(_T)) argv_free(_T))
+/* lib/cmdline.c */
+extern int get_option(char **str, int *pint);
+extern char *get_options(const char *str, int nints, int *ints);
+extern unsigned long long memparse(const char *ptr, char **retptr);
+extern bool parse_option_str(const char *str, const char *option);
+extern char *next_arg(char *args, char **param, char **val);
+
+extern bool sysfs_streq(const char *s1, const char *s2);
int match_string(const char * const *array, size_t n, const char *string);
int __sysfs_match_string(const char * const *array, size_t n, const char *s);
@@ -203,9 +336,8 @@ int __sysfs_match_string(const char * const *array, size_t n, const char *s);
#define sysfs_match_string(_a, _s) __sysfs_match_string(_a, ARRAY_SIZE(_a), _s)
#ifdef CONFIG_BINARY_PRINTF
-int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
-int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf);
-int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
+__printf(3, 0) int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
+__printf(3, 0) int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf);
#endif
extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
@@ -213,16 +345,6 @@ extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
int ptr_to_hashval(const void *ptr, unsigned long *hashval_out);
-/**
- * strstarts - does @str start with @prefix?
- * @str: string to examine
- * @prefix: prefix to look for.
- */
-static inline bool strstarts(const char *str, const char *prefix)
-{
- return strncmp(str, prefix, strlen(prefix)) == 0;
-}
-
size_t memweight(const void *ptr, size_t bytes);
/**
@@ -249,6 +371,10 @@ static inline void memzero_explicit(void *s, size_t count)
* kbasename - return the last part of a pathname.
*
* @path: path to extract the filename from.
+ *
+ * Returns:
+ * Pointer to the filename portion inside @path. If no '/' exists,
+ * returns @path unchanged.
*/
static inline const char *kbasename(const char *path)
{
@@ -256,36 +382,158 @@ static inline const char *kbasename(const char *path)
return tail ? tail + 1 : path;
}
-#define __FORTIFY_INLINE extern __always_inline __attribute__((gnu_inline))
-#define __RENAME(x) __asm__(#x)
-
-void fortify_panic(const char *name) __noreturn __cold;
-void __read_overflow(void) __compiletime_error("detected read beyond size of object passed as 1st parameter");
-void __read_overflow2(void) __compiletime_error("detected read beyond size of object passed as 2nd parameter");
-void __read_overflow3(void) __compiletime_error("detected read beyond size of object passed as 3rd parameter");
-void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter");
-
#if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
#include <linux/fortify-string.h>
#endif
+#ifndef unsafe_memcpy
+#define unsafe_memcpy(dst, src, bytes, justification) \
+ memcpy(dst, src, bytes)
+#endif
+
+void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
+ int pad);
/**
- * memcpy_and_pad - Copy one buffer to another with padding
- * @dest: Where to copy to
- * @dest_len: The destination buffer size
- * @src: Where to copy from
- * @count: The number of bytes to copy
- * @pad: Character to use for padding if space is left in destination.
+ * strtomem_pad - Copy NUL-terminated string to non-NUL-terminated buffer
+ *
+ * @dest: Pointer of destination character array (marked as __nonstring)
+ * @src: Pointer to NUL-terminated string
+ * @pad: Padding character to fill any remaining bytes of @dest after copy
+ *
+ * This is a replacement for strncpy() uses where the destination is not
+ * a NUL-terminated string, but with bounds checking on the source size, and
+ * an explicit padding character. If padding is not required, use strtomem().
+ *
+ * Note that the size of @dest is not an argument, as the length of @dest
+ * must be discoverable by the compiler.
*/
-static inline void memcpy_and_pad(void *dest, size_t dest_len,
- const void *src, size_t count, int pad)
-{
- if (dest_len > count) {
- memcpy(dest, src, count);
- memset(dest + count, pad, dest_len - count);
- } else
- memcpy(dest, src, dest_len);
-}
+#define strtomem_pad(dest, src, pad) do { \
+ const size_t _dest_len = __must_be_byte_array(dest) + \
+ __must_be_noncstr(dest) + \
+ ARRAY_SIZE(dest); \
+ const size_t _src_len = __must_be_cstr(src) + \
+ __builtin_object_size(src, 1); \
+ \
+ BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
+ _dest_len == (size_t)-1); \
+ memcpy_and_pad(dest, _dest_len, src, \
+ strnlen(src, min(_src_len, _dest_len)), pad); \
+} while (0)
+
+/**
+ * strtomem - Copy NUL-terminated string to non-NUL-terminated buffer
+ *
+ * @dest: Pointer of destination character array (marked as __nonstring)
+ * @src: Pointer to NUL-terminated string
+ *
+ * This is a replacement for strncpy() uses where the destination is not
+ * a NUL-terminated string, but with bounds checking on the source size, and
+ * without trailing padding. If padding is required, use strtomem_pad().
+ *
+ * Note that the size of @dest is not an argument, as the length of @dest
+ * must be discoverable by the compiler.
+ */
+#define strtomem(dest, src) do { \
+ const size_t _dest_len = __must_be_byte_array(dest) + \
+ __must_be_noncstr(dest) + \
+ ARRAY_SIZE(dest); \
+ const size_t _src_len = __must_be_cstr(src) + \
+ __builtin_object_size(src, 1); \
+ \
+ BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
+ _dest_len == (size_t)-1); \
+ memcpy(dest, src, strnlen(src, min(_src_len, _dest_len))); \
+} while (0)
+
+/**
+ * memtostr - Copy a possibly non-NUL-term string to a NUL-term string
+ * @dest: Pointer to destination NUL-terminates string
+ * @src: Pointer to character array (likely marked as __nonstring)
+ *
+ * This is a replacement for strncpy() uses where the source is not
+ * a NUL-terminated string.
+ *
+ * Note that sizes of @dest and @src must be known at compile-time.
+ */
+#define memtostr(dest, src) do { \
+ const size_t _dest_len = __must_be_byte_array(dest) + \
+ __must_be_cstr(dest) + \
+ ARRAY_SIZE(dest); \
+ const size_t _src_len = __must_be_noncstr(src) + \
+ __builtin_object_size(src, 1); \
+ const size_t _src_chars = strnlen(src, _src_len); \
+ const size_t _copy_len = min(_dest_len - 1, _src_chars); \
+ \
+ BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
+ !__builtin_constant_p(_src_len) || \
+ _dest_len == 0 || _dest_len == (size_t)-1 || \
+ _src_len == 0 || _src_len == (size_t)-1); \
+ memcpy(dest, src, _copy_len); \
+ dest[_copy_len] = '\0'; \
+} while (0)
+
+/**
+ * memtostr_pad - Copy a possibly non-NUL-term string to a NUL-term string
+ * with NUL padding in the destination
+ * @dest: Pointer to destination NUL-terminates string
+ * @src: Pointer to character array (likely marked as __nonstring)
+ *
+ * This is a replacement for strncpy() uses where the source is not
+ * a NUL-terminated string.
+ *
+ * Note that sizes of @dest and @src must be known at compile-time.
+ */
+#define memtostr_pad(dest, src) do { \
+ const size_t _dest_len = __must_be_byte_array(dest) + \
+ __must_be_cstr(dest) + \
+ ARRAY_SIZE(dest); \
+ const size_t _src_len = __must_be_noncstr(src) + \
+ __builtin_object_size(src, 1); \
+ const size_t _src_chars = strnlen(src, _src_len); \
+ const size_t _copy_len = min(_dest_len - 1, _src_chars); \
+ \
+ BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
+ !__builtin_constant_p(_src_len) || \
+ _dest_len == 0 || _dest_len == (size_t)-1 || \
+ _src_len == 0 || _src_len == (size_t)-1); \
+ memcpy(dest, src, _copy_len); \
+ memset(&dest[_copy_len], 0, _dest_len - _copy_len); \
+} while (0)
+
+/**
+ * memset_after - Set a value after a struct member to the end of a struct
+ *
+ * @obj: Address of target struct instance
+ * @v: Byte value to repeatedly write
+ * @member: after which struct member to start writing bytes
+ *
+ * This is good for clearing padding following the given member.
+ */
+#define memset_after(obj, v, member) \
+({ \
+ u8 *__ptr = (u8 *)(obj); \
+ typeof(v) __val = (v); \
+ memset(__ptr + offsetofend(typeof(*(obj)), member), __val, \
+ sizeof(*(obj)) - offsetofend(typeof(*(obj)), member)); \
+})
+
+/**
+ * memset_startat - Set a value starting at a member to the end of a struct
+ *
+ * @obj: Address of target struct instance
+ * @v: Byte value to repeatedly write
+ * @member: struct member to start writing at
+ *
+ * Note that if there is padding between the prior member and the target
+ * member, memset_after() should be used to clear the prior padding.
+ */
+#define memset_startat(obj, v, member) \
+({ \
+ u8 *__ptr = (u8 *)(obj); \
+ typeof(v) __val = (v); \
+ memset(__ptr + offsetof(typeof(*(obj)), member), __val, \
+ sizeof(*(obj)) - offsetof(typeof(*(obj)), member)); \
+})
/**
* str_has_prefix - Test if a string has a given prefix
@@ -308,4 +556,36 @@ static __always_inline size_t str_has_prefix(const char *str, const char *prefix
return strncmp(str, prefix, len) == 0 ? len : 0;
}
+/**
+ * strstarts - does @str start with @prefix?
+ * @str: string to examine
+ * @prefix: prefix to look for.
+ *
+ * Returns:
+ * True if @str begins with @prefix. False in all other cases.
+ */
+static inline bool strstarts(const char *str, const char *prefix)
+{
+ return strncmp(str, prefix, strlen(prefix)) == 0;
+}
+
+/**
+ * strends - Check if a string ends with another string.
+ * @str: NULL-terminated string to check against @suffix
+ * @suffix: NULL-terminated string defining the suffix to look for in @str
+ *
+ * Returns:
+ * True if @str ends with @suffix. False in all other cases.
+ */
+static inline bool __attribute__((nonnull(1, 2)))
+strends(const char *str, const char *suffix)
+{
+ unsigned int str_len = strlen(str), suffix_len = strlen(suffix);
+
+ if (str_len < suffix_len)
+ return false;
+
+ return !(strcmp(str + str_len - suffix_len, suffix));
+}
+
#endif /* _LINUX_STRING_H_ */
diff --git a/include/linux/string_choices.h b/include/linux/string_choices.h
new file mode 100644
index 000000000000..ee84087d4b26
--- /dev/null
+++ b/include/linux/string_choices.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_STRING_CHOICES_H_
+#define _LINUX_STRING_CHOICES_H_
+
+/*
+ * Here provide a series of helpers in the str_$TRUE_$FALSE format (you can
+ * also expand some helpers as needed), where $TRUE and $FALSE are their
+ * corresponding literal strings. These helpers can be used in the printing
+ * and also in other places where constant strings are required. Using these
+ * helpers offers the following benefits:
+ * 1) Reducing the hardcoding of strings, which makes the code more elegant
+ * through these simple literal-meaning helpers.
+ * 2) Unifying the output, which prevents the same string from being printed
+ * in various forms, such as enable/disable, enabled/disabled, en/dis.
+ * 3) Deduping by the linker, which results in a smaller binary file.
+ */
+
+#include <linux/types.h>
+
+static inline const char *str_assert_deassert(bool v)
+{
+ return v ? "assert" : "deassert";
+}
+#define str_deassert_assert(v) str_assert_deassert(!(v))
+
+static inline const char *str_enable_disable(bool v)
+{
+ return v ? "enable" : "disable";
+}
+#define str_disable_enable(v) str_enable_disable(!(v))
+
+static inline const char *str_enabled_disabled(bool v)
+{
+ return v ? "enabled" : "disabled";
+}
+#define str_disabled_enabled(v) str_enabled_disabled(!(v))
+
+static inline const char *str_hi_lo(bool v)
+{
+ return v ? "hi" : "lo";
+}
+#define str_lo_hi(v) str_hi_lo(!(v))
+
+static inline const char *str_high_low(bool v)
+{
+ return v ? "high" : "low";
+}
+#define str_low_high(v) str_high_low(!(v))
+
+static inline const char *str_input_output(bool v)
+{
+ return v ? "input" : "output";
+}
+#define str_output_input(v) str_input_output(!(v))
+
+static inline const char *str_on_off(bool v)
+{
+ return v ? "on" : "off";
+}
+#define str_off_on(v) str_on_off(!(v))
+
+static inline const char *str_read_write(bool v)
+{
+ return v ? "read" : "write";
+}
+#define str_write_read(v) str_read_write(!(v))
+
+static inline const char *str_true_false(bool v)
+{
+ return v ? "true" : "false";
+}
+#define str_false_true(v) str_true_false(!(v))
+
+static inline const char *str_up_down(bool v)
+{
+ return v ? "up" : "down";
+}
+#define str_down_up(v) str_up_down(!(v))
+
+static inline const char *str_yes_no(bool v)
+{
+ return v ? "yes" : "no";
+}
+#define str_no_yes(v) str_yes_no(!(v))
+
+/**
+ * str_plural - Return the simple pluralization based on English counts
+ * @num: Number used for deciding pluralization
+ *
+ * If @num is 1, returns empty string, otherwise returns "s".
+ */
+static inline const char *str_plural(size_t num)
+{
+ return num == 1 ? "" : "s";
+}
+
+#endif
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index fa06dcdc481e..3fb88a1e9898 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -2,29 +2,47 @@
#ifndef _LINUX_STRING_HELPERS_H_
#define _LINUX_STRING_HELPERS_H_
+#include <linux/bits.h>
#include <linux/ctype.h>
+#include <linux/string_choices.h>
+#include <linux/string.h>
#include <linux/types.h>
+struct device;
struct file;
struct task_struct;
-/* Descriptions of the types of units to
- * print in */
+static inline bool string_is_terminated(const char *s, int len)
+{
+ return memchr(s, '\0', len) ? true : false;
+}
+
+/* Descriptions of the types of units to print in */
enum string_size_units {
STRING_UNITS_10, /* use powers of 10^3 (standard SI) */
STRING_UNITS_2, /* use binary powers of 2^10 */
+ STRING_UNITS_MASK = BIT(0),
+
+ /* Modifiers */
+ STRING_UNITS_NO_SPACE = BIT(30),
+ STRING_UNITS_NO_BYTES = BIT(31),
};
-void string_get_size(u64 size, u64 blk_size, enum string_size_units units,
- char *buf, int len);
+int string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
+ char *buf, int len);
+
+int parse_int_array(const char *buf, size_t count, int **array);
+int parse_int_array_user(const char __user *from, size_t count, int **array);
-#define UNESCAPE_SPACE 0x01
-#define UNESCAPE_OCTAL 0x02
-#define UNESCAPE_HEX 0x04
-#define UNESCAPE_SPECIAL 0x08
+#define UNESCAPE_SPACE BIT(0)
+#define UNESCAPE_OCTAL BIT(1)
+#define UNESCAPE_HEX BIT(2)
+#define UNESCAPE_SPECIAL BIT(3)
#define UNESCAPE_ANY \
(UNESCAPE_SPACE | UNESCAPE_OCTAL | UNESCAPE_HEX | UNESCAPE_SPECIAL)
+#define UNESCAPE_ALL_MASK GENMASK(3, 0)
+
int string_unescape(char *src, char *dst, size_t size, unsigned int flags);
static inline int string_unescape_inplace(char *buf, unsigned int flags)
@@ -42,22 +60,24 @@ static inline int string_unescape_any_inplace(char *buf)
return string_unescape_any(buf, buf, 0);
}
-#define ESCAPE_SPACE 0x01
-#define ESCAPE_SPECIAL 0x02
-#define ESCAPE_NULL 0x04
-#define ESCAPE_OCTAL 0x08
+#define ESCAPE_SPACE BIT(0)
+#define ESCAPE_SPECIAL BIT(1)
+#define ESCAPE_NULL BIT(2)
+#define ESCAPE_OCTAL BIT(3)
#define ESCAPE_ANY \
(ESCAPE_SPACE | ESCAPE_OCTAL | ESCAPE_SPECIAL | ESCAPE_NULL)
-#define ESCAPE_NP 0x10
+#define ESCAPE_NP BIT(4)
#define ESCAPE_ANY_NP (ESCAPE_ANY | ESCAPE_NP)
-#define ESCAPE_HEX 0x20
+#define ESCAPE_HEX BIT(5)
+#define ESCAPE_NA BIT(6)
+#define ESCAPE_NAP BIT(7)
+#define ESCAPE_APPEND BIT(8)
+
+#define ESCAPE_ALL_MASK GENMASK(8, 0)
int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz,
unsigned int flags, const char *only);
-int string_escape_mem_ascii(const char *src, size_t isz, char *dst,
- size_t osz);
-
static inline int string_escape_mem_any_np(const char *src, size_t isz,
char *dst, size_t osz, const char *only)
{
@@ -94,6 +114,11 @@ char *kstrdup_quotable(const char *src, gfp_t gfp);
char *kstrdup_quotable_cmdline(struct task_struct *task, gfp_t gfp);
char *kstrdup_quotable_file(struct file *file, gfp_t gfp);
+char *kstrdup_and_replace(const char *src, char old, char new, gfp_t gfp);
+
+char **kasprintf_strarray(gfp_t gfp, const char *prefix, size_t n);
void kfree_strarray(char **array, size_t n);
+char **devm_kasprintf_strarray(struct device *dev, const char *prefix, size_t n);
+
#endif
diff --git a/include/linux/stringify.h b/include/linux/stringify.h
index 841cec8ed525..0e84cbe65270 100644
--- a/include/linux/stringify.h
+++ b/include/linux/stringify.h
@@ -9,4 +9,6 @@
#define __stringify_1(x...) #x
#define __stringify(x...) __stringify_1(x)
+#define FILE_LINE __FILE__ ":" __stringify(__LINE__)
+
#endif /* !__LINUX_STRINGIFY_H */
diff --git a/include/linux/sungem_phy.h b/include/linux/sungem_phy.h
index 3a11fa41a131..eecc7eb63bfb 100644
--- a/include/linux/sungem_phy.h
+++ b/include/linux/sungem_phy.h
@@ -2,6 +2,8 @@
#ifndef __SUNGEM_PHY_H__
#define __SUNGEM_PHY_H__
+#include <linux/types.h>
+
struct mii_phy;
/* Operations supported by any kind of PHY */
@@ -38,7 +40,7 @@ enum {
/* An instance of a PHY, partially borrowed from mii_if_info */
struct mii_phy
{
- struct mii_phy_def* def;
+ const struct mii_phy_def *def;
u32 advertising;
int mii_id;
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index 98da816b5fc2..61e58327b1aa 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -99,6 +99,7 @@ struct rpc_auth_create_args {
/* Flags for rpcauth_lookupcred() */
#define RPCAUTH_LOOKUP_NEW 0x01 /* Accept an uninitialised cred */
+#define RPCAUTH_LOOKUP_ASYNC 0x02 /* Don't block waiting for memory */
/*
* Client authentication ops
@@ -119,6 +120,7 @@ struct rpc_authops {
struct rpcsec_gss_info *);
int (*key_timeout)(struct rpc_auth *,
struct rpc_cred *);
+ int (*ping)(struct rpc_clnt *clnt);
};
struct rpc_credops {
@@ -143,6 +145,7 @@ struct rpc_credops {
extern const struct rpc_authops authunix_ops;
extern const struct rpc_authops authnull_ops;
+extern const struct rpc_authops authtls_ops;
int __init rpc_init_authunix(void);
int __init rpcauth_init_module(void);
diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h
index f07c334c599f..f22bf915dcf6 100644
--- a/include/linux/sunrpc/bc_xprt.h
+++ b/include/linux/sunrpc/bc_xprt.h
@@ -1,22 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/******************************************************************************
(c) 2008 NetApp. All Rights Reserved.
-NetApp provides this source code under the GPL v2 License.
-The GPL v2 license is available at
-https://opensource.org/licenses/gpl-license.php.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
-CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
@@ -34,7 +20,8 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifdef CONFIG_SUNRPC_BACKCHANNEL
struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid);
void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied);
-void xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task);
+void xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task,
+ const struct rpc_timeout *to);
void xprt_free_bc_request(struct rpc_rqst *req);
int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs);
void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs);
@@ -83,4 +70,3 @@ static inline void xprt_free_bc_request(struct rpc_rqst *req)
}
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
#endif /* _LINUX_SUNRPC_BC_XPRT_H */
-
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index d0965e2997b0..e783132e481f 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -14,6 +14,7 @@
#include <linux/kref.h>
#include <linux/slab.h>
#include <linux/atomic.h>
+#include <linux/kstrtox.h>
#include <linux/proc_fs.h>
/*
@@ -55,10 +56,14 @@ struct cache_head {
struct kref ref;
unsigned long flags;
};
-#define CACHE_VALID 0 /* Entry contains valid data */
-#define CACHE_NEGATIVE 1 /* Negative entry - there is no match for the key */
-#define CACHE_PENDING 2 /* An upcall has been sent but no reply received yet*/
-#define CACHE_CLEANED 3 /* Entry has been cleaned from cache */
+
+/* cache_head.flags */
+enum {
+ CACHE_VALID, /* Entry contains valid data */
+ CACHE_NEGATIVE, /* Negative entry - there is no match for the key */
+ CACHE_PENDING, /* An upcall has been sent but no reply received yet*/
+ CACHE_CLEANED, /* Entry has been cleaned from cache */
+};
#define CACHE_NEW_EXPIRY 120 /* keep new things pending confirmation for 120 seconds */
@@ -120,17 +125,17 @@ struct cache_detail {
struct net *net;
};
-
/* this must be embedded in any request structure that
* identifies an object that will want a callback on
* a cache fill
*/
struct cache_req {
struct cache_deferred_req *(*defer)(struct cache_req *req);
- int thread_wait; /* How long (jiffies) we can block the
- * current thread to wait for updates.
- */
+ unsigned long thread_wait; /* How long (jiffies) we can block the
+ * current thread to wait for updates.
+ */
};
+
/* this must be embedded in a deferred_request that is being
* delayed awaiting cache-fill
*/
@@ -217,6 +222,8 @@ static inline bool cache_is_expired(struct cache_detail *detail, struct cache_he
return detail->flush_time >= h->last_refresh;
}
+extern int cache_check_rcu(struct cache_detail *detail,
+ struct cache_head *h, struct cache_req *rqstp);
extern int cache_check(struct cache_detail *detail,
struct cache_head *h, struct cache_req *rqstp);
extern void cache_flush(void);
@@ -299,17 +306,18 @@ static inline int get_time(char **bpp, time64_t *time)
return 0;
}
-static inline time64_t get_expiry(char **bpp)
+static inline int get_expiry(char **bpp, time64_t *rvp)
{
- time64_t rv;
+ int error;
struct timespec64 boot;
- if (get_time(bpp, &rv))
- return 0;
- if (rv < 0)
- return 0;
+ error = get_time(bpp, rvp);
+ if (error)
+ return error;
+
getboottime64(&boot);
- return rv - boot.tv_sec;
+ (*rvp) -= boot.tv_sec;
+ return 0;
}
#endif /* _LINUX_SUNRPC_CACHE_H_ */
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 02e7a5863d28..f8b406b0a1af 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -14,6 +14,7 @@
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/in6.h>
+#include <linux/refcount.h>
#include <linux/sunrpc/msg_prot.h>
#include <linux/sunrpc/sched.h>
@@ -29,15 +30,23 @@
#include <linux/sunrpc/xprtmultipath.h>
struct rpc_inode;
+struct rpc_sysfs_client {
+ struct kobject kobject;
+ struct net *net;
+ struct rpc_clnt *clnt;
+ struct rpc_xprt_switch *xprt_switch;
+};
+
/*
* The high-level client handle
*/
struct rpc_clnt {
- atomic_t cl_count; /* Number of references */
+ refcount_t cl_count; /* Number of references */
unsigned int cl_clid; /* client id */
struct list_head cl_clients; /* Global list of clients */
struct list_head cl_tasks; /* List of tasks */
+ atomic_t cl_pid; /* task PID counter */
spinlock_t cl_lock; /* spinlock */
struct rpc_xprt __rcu * cl_xprt; /* transport */
const struct rpc_procinfo *cl_procinfo; /* procedure info */
@@ -54,7 +63,11 @@ struct rpc_clnt {
cl_discrtry : 1,/* disconnect before retry */
cl_noretranstimeo: 1,/* No retransmit timeouts */
cl_autobind : 1,/* use getport() */
- cl_chatty : 1;/* be verbose */
+ cl_chatty : 1,/* be verbose */
+ cl_shutdown : 1,/* rpc immediate -EIO */
+ cl_netunreach_fatal : 1;
+ /* Treat ENETUNREACH errors as fatal */
+ struct xprtsec_parms cl_xprtsec; /* transport security policy */
struct rpc_rtt * cl_rtt; /* RTO estimator data */
const struct rpc_timeout *cl_timeout; /* Timeout strategy */
@@ -71,6 +84,7 @@ struct rpc_clnt {
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
struct dentry *cl_debugfs; /* debugfs directory */
#endif
+ struct rpc_sysfs_client *cl_sysfs; /* sysfs directory */
/* cl_work is only needed after cl_xpi is no longer used,
* and that are of similar size
*/
@@ -79,6 +93,9 @@ struct rpc_clnt {
struct work_struct cl_work;
};
const struct cred *cl_cred;
+ unsigned int cl_max_connect; /* max number of transports not to the same IP */
+ struct super_block *pipefs_sb;
+ atomic_t cl_task_count;
};
/*
@@ -125,6 +142,7 @@ struct rpc_create_args {
const char *servername;
const char *nodename;
const struct rpc_program *program;
+ struct rpc_stat *stats;
u32 prognumber; /* overrides program->number */
u32 version;
rpc_authflavor_t authflavor;
@@ -133,6 +151,10 @@ struct rpc_create_args {
char *client_name;
struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
const struct cred *cred;
+ unsigned int max_connect;
+ struct xprtsec_parms xprtsec;
+ unsigned long connect_timeout;
+ unsigned long reconnect_timeout;
};
struct rpc_add_xprt_test {
@@ -154,6 +176,8 @@ struct rpc_add_xprt_test {
#define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9)
#define RPC_CLNT_CREATE_SOFTERR (1UL << 10)
#define RPC_CLNT_CREATE_REUSEPORT (1UL << 11)
+#define RPC_CLNT_CREATE_CONNECTED (1UL << 12)
+#define RPC_CLNT_CREATE_NETUNREACH_FATAL (1UL << 13)
struct rpc_clnt *rpc_create(struct rpc_create_args *args);
struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
@@ -227,13 +251,18 @@ int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *,
struct rpc_xprt_switch *,
struct rpc_xprt *,
void *);
+void rpc_clnt_manage_trunked_xprts(struct rpc_clnt *);
+void rpc_clnt_probe_trunked_xprts(struct rpc_clnt *,
+ struct rpc_add_xprt_test *);
const char *rpc_proc_name(const struct rpc_task *task);
-void rpc_clnt_xprt_switch_put(struct rpc_clnt *);
void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *);
+void rpc_clnt_xprt_switch_remove_xprt(struct rpc_clnt *, struct rpc_xprt *);
bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
const struct sockaddr *sap);
+void rpc_clnt_xprt_set_online(struct rpc_clnt *clnt, struct rpc_xprt *xprt);
+void rpc_clnt_disconnect(struct rpc_clnt *clnt);
void rpc_cleanup_clids(void);
static inline int rpc_reply_expected(struct rpc_task *task)
diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h
index f6aeed07fe04..891f6173c951 100644
--- a/include/linux/sunrpc/debug.h
+++ b/include/linux/sunrpc/debug.h
@@ -23,43 +23,30 @@ extern unsigned int nlm_debug;
#define dprintk(fmt, ...) \
dfprintk(FACILITY, fmt, ##__VA_ARGS__)
-#define dprintk_cont(fmt, ...) \
- dfprintk_cont(FACILITY, fmt, ##__VA_ARGS__)
#define dprintk_rcu(fmt, ...) \
dfprintk_rcu(FACILITY, fmt, ##__VA_ARGS__)
-#define dprintk_rcu_cont(fmt, ...) \
- dfprintk_rcu_cont(FACILITY, fmt, ##__VA_ARGS__)
#undef ifdebug
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
# define ifdebug(fac) if (unlikely(rpc_debug & RPCDBG_##fac))
-# define dfprintk(fac, fmt, ...) \
-do { \
- ifdebug(fac) \
- printk(KERN_DEFAULT fmt, ##__VA_ARGS__); \
-} while (0)
+# if IS_ENABLED(CONFIG_SUNRPC_DEBUG_TRACE)
+# define __sunrpc_printk(fmt, ...) trace_printk(fmt, ##__VA_ARGS__)
+# else
+# define __sunrpc_printk(fmt, ...) printk(KERN_DEFAULT fmt, ##__VA_ARGS__)
+# endif
-# define dfprintk_cont(fac, fmt, ...) \
+# define dfprintk(fac, fmt, ...) \
do { \
ifdebug(fac) \
- printk(KERN_CONT fmt, ##__VA_ARGS__); \
+ __sunrpc_printk(fmt, ##__VA_ARGS__); \
} while (0)
# define dfprintk_rcu(fac, fmt, ...) \
do { \
ifdebug(fac) { \
rcu_read_lock(); \
- printk(KERN_DEFAULT fmt, ##__VA_ARGS__); \
- rcu_read_unlock(); \
- } \
-} while (0)
-
-# define dfprintk_rcu_cont(fac, fmt, ...) \
-do { \
- ifdebug(fac) { \
- rcu_read_lock(); \
- printk(KERN_CONT fmt, ##__VA_ARGS__); \
+ __sunrpc_printk(fmt, ##__VA_ARGS__); \
rcu_read_unlock(); \
} \
} while (0)
@@ -68,7 +55,6 @@ do { \
#else
# define ifdebug(fac) if (0)
# define dfprintk(fac, fmt, ...) do {} while (0)
-# define dfprintk_cont(fac, fmt, ...) do {} while (0)
# define dfprintk_rcu(fac, fmt, ...) do {} while (0)
# define RPC_IFDEBUG(x)
#endif
diff --git a/include/linux/sunrpc/gss_asn1.h b/include/linux/sunrpc/gss_asn1.h
deleted file mode 100644
index 3ccecd0ad229..000000000000
--- a/include/linux/sunrpc/gss_asn1.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * linux/include/linux/sunrpc/gss_asn1.h
- *
- * minimal asn1 for generic encoding/decoding of gss tokens
- *
- * Adapted from MIT Kerberos 5-1.2.1 lib/include/krb5.h,
- * lib/gssapi/krb5/gssapiP_krb5.h, and others
- *
- * Copyright (c) 2000 The Regents of the University of Michigan.
- * All rights reserved.
- *
- * Andy Adamson <andros@umich.edu>
- */
-
-/*
- * Copyright 1995 by the Massachusetts Institute of Technology.
- * All Rights Reserved.
- *
- * Export of this software from the United States of America may
- * require a specific license from the United States Government.
- * It is the responsibility of any person or organization contemplating
- * export to obtain such a license before exporting.
- *
- * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
- * distribute this software and its documentation for any purpose and
- * without fee is hereby granted, provided that the above copyright
- * notice appear in all copies and that both that copyright notice and
- * this permission notice appear in supporting documentation, and that
- * the name of M.I.T. not be used in advertising or publicity pertaining
- * to distribution of the software without specific, written prior
- * permission. Furthermore if you modify this software you must label
- * your software as modified software and not distribute it in such a
- * fashion that it might be confused with the original M.I.T. software.
- * M.I.T. makes no representations about the suitability of
- * this software for any purpose. It is provided "as is" without express
- * or implied warranty.
- *
- */
-
-
-#include <linux/sunrpc/gss_api.h>
-
-#define SIZEOF_INT 4
-
-/* from gssapi_err_generic.h */
-#define G_BAD_SERVICE_NAME (-2045022976L)
-#define G_BAD_STRING_UID (-2045022975L)
-#define G_NOUSER (-2045022974L)
-#define G_VALIDATE_FAILED (-2045022973L)
-#define G_BUFFER_ALLOC (-2045022972L)
-#define G_BAD_MSG_CTX (-2045022971L)
-#define G_WRONG_SIZE (-2045022970L)
-#define G_BAD_USAGE (-2045022969L)
-#define G_UNKNOWN_QOP (-2045022968L)
-#define G_NO_HOSTNAME (-2045022967L)
-#define G_BAD_HOSTNAME (-2045022966L)
-#define G_WRONG_MECH (-2045022965L)
-#define G_BAD_TOK_HEADER (-2045022964L)
-#define G_BAD_DIRECTION (-2045022963L)
-#define G_TOK_TRUNC (-2045022962L)
-#define G_REFLECT (-2045022961L)
-#define G_WRONG_TOKID (-2045022960L)
-
-#define g_OID_equal(o1,o2) \
- (((o1)->len == (o2)->len) && \
- (memcmp((o1)->data,(o2)->data,(int) (o1)->len) == 0))
-
-u32 g_verify_token_header(
- struct xdr_netobj *mech,
- int *body_size,
- unsigned char **buf_in,
- int toksize);
-
-int g_token_size(
- struct xdr_netobj *mech,
- unsigned int body_size);
-
-void g_make_token_header(
- struct xdr_netobj *mech,
- int body_size,
- unsigned char **buf);
diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h
index 91f43d86879d..43950b5237c8 100644
--- a/include/linux/sunrpc/gss_krb5.h
+++ b/include/linux/sunrpc/gss_krb5.h
@@ -1,6 +1,4 @@
/*
- * linux/include/linux/sunrpc/gss_krb5_types.h
- *
* Adapted from MIT Kerberos 5-1.2.1 lib/include/krb5.h,
* lib/gssapi/krb5/gssapiP_krb5.h, and others
*
@@ -36,88 +34,25 @@
*
*/
+#ifndef _LINUX_SUNRPC_GSS_KRB5_H
+#define _LINUX_SUNRPC_GSS_KRB5_H
+
#include <crypto/skcipher.h>
#include <linux/sunrpc/auth_gss.h>
#include <linux/sunrpc/gss_err.h>
-#include <linux/sunrpc/gss_asn1.h>
/* Length of constant used in key derivation */
#define GSS_KRB5_K5CLENGTH (5)
-/* Maximum key length (in bytes) for the supported crypto algorithms*/
+/* Maximum key length (in bytes) for the supported crypto algorithms */
#define GSS_KRB5_MAX_KEYLEN (32)
-/* Maximum checksum function output for the supported crypto algorithms */
-#define GSS_KRB5_MAX_CKSUM_LEN (20)
+/* Maximum checksum function output for the supported enctypes */
+#define GSS_KRB5_MAX_CKSUM_LEN (24)
/* Maximum blocksize for the supported crypto algorithms */
#define GSS_KRB5_MAX_BLOCKSIZE (16)
-struct krb5_ctx;
-
-struct gss_krb5_enctype {
- const u32 etype; /* encryption (key) type */
- const u32 ctype; /* checksum type */
- const char *name; /* "friendly" name */
- const char *encrypt_name; /* crypto encrypt name */
- const char *cksum_name; /* crypto checksum name */
- const u16 signalg; /* signing algorithm */
- const u16 sealalg; /* sealing algorithm */
- const u32 blocksize; /* encryption blocksize */
- const u32 conflen; /* confounder length
- (normally the same as
- the blocksize) */
- const u32 cksumlength; /* checksum length */
- const u32 keyed_cksum; /* is it a keyed cksum? */
- const u32 keybytes; /* raw key len, in bytes */
- const u32 keylength; /* final key len, in bytes */
- u32 (*encrypt) (struct crypto_sync_skcipher *tfm,
- void *iv, void *in, void *out,
- int length); /* encryption function */
- u32 (*decrypt) (struct crypto_sync_skcipher *tfm,
- void *iv, void *in, void *out,
- int length); /* decryption function */
- u32 (*mk_key) (const struct gss_krb5_enctype *gk5e,
- struct xdr_netobj *in,
- struct xdr_netobj *out); /* complete key generation */
- u32 (*encrypt_v2) (struct krb5_ctx *kctx, u32 offset,
- struct xdr_buf *buf,
- struct page **pages); /* v2 encryption function */
- u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset, u32 len,
- struct xdr_buf *buf, u32 *headskip,
- u32 *tailskip); /* v2 decryption function */
-};
-
-/* krb5_ctx flags definitions */
-#define KRB5_CTX_FLAG_INITIATOR 0x00000001
-#define KRB5_CTX_FLAG_CFX 0x00000002
-#define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY 0x00000004
-
-struct krb5_ctx {
- int initiate; /* 1 = initiating, 0 = accepting */
- u32 enctype;
- u32 flags;
- const struct gss_krb5_enctype *gk5e; /* enctype-specific info */
- struct crypto_sync_skcipher *enc;
- struct crypto_sync_skcipher *seq;
- struct crypto_sync_skcipher *acceptor_enc;
- struct crypto_sync_skcipher *initiator_enc;
- struct crypto_sync_skcipher *acceptor_enc_aux;
- struct crypto_sync_skcipher *initiator_enc_aux;
- u8 Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */
- u8 cksum[GSS_KRB5_MAX_KEYLEN];
- atomic_t seq_send;
- atomic64_t seq_send64;
- time64_t endtime;
- struct xdr_netobj mech_used;
- u8 initiator_sign[GSS_KRB5_MAX_KEYLEN];
- u8 acceptor_sign[GSS_KRB5_MAX_KEYLEN];
- u8 initiator_seal[GSS_KRB5_MAX_KEYLEN];
- u8 acceptor_seal[GSS_KRB5_MAX_KEYLEN];
- u8 initiator_integ[GSS_KRB5_MAX_KEYLEN];
- u8 acceptor_integ[GSS_KRB5_MAX_KEYLEN];
-};
-
/* The length of the Kerberos GSS token header */
#define GSS_KRB5_TOK_HDR_LEN (16)
@@ -150,6 +85,12 @@ enum seal_alg {
SEAL_ALG_DES3KD = 0x0002
};
+/*
+ * These values are assigned by IANA and published via the
+ * subregistry at the link below:
+ *
+ * https://www.iana.org/assignments/kerberos-parameters/kerberos-parameters.xhtml#kerberos-parameters-2
+ */
#define CKSUMTYPE_CRC32 0x0001
#define CKSUMTYPE_RSA_MD4 0x0002
#define CKSUMTYPE_RSA_MD4_DES 0x0003
@@ -160,6 +101,10 @@ enum seal_alg {
#define CKSUMTYPE_HMAC_SHA1_DES3 0x000c
#define CKSUMTYPE_HMAC_SHA1_96_AES128 0x000f
#define CKSUMTYPE_HMAC_SHA1_96_AES256 0x0010
+#define CKSUMTYPE_CMAC_CAMELLIA128 0x0011
+#define CKSUMTYPE_CMAC_CAMELLIA256 0x0012
+#define CKSUMTYPE_HMAC_SHA256_128_AES128 0x0013
+#define CKSUMTYPE_HMAC_SHA384_192_AES256 0x0014
#define CKSUMTYPE_HMAC_MD5_ARCFOUR -138 /* Microsoft md5 hmac cksumtype */
/* from gssapi_err_krb5.h */
@@ -180,6 +125,11 @@ enum seal_alg {
/* per Kerberos v5 protocol spec crypto types from the wire.
* these get mapped to linux kernel crypto routines.
+ *
+ * These values are assigned by IANA and published via the
+ * subregistry at the link below:
+ *
+ * https://www.iana.org/assignments/kerberos-parameters/kerberos-parameters.xhtml#kerberos-parameters-1
*/
#define ENCTYPE_NULL 0x0000
#define ENCTYPE_DES_CBC_CRC 0x0001 /* DES cbc mode with CRC-32 */
@@ -193,8 +143,12 @@ enum seal_alg {
#define ENCTYPE_DES3_CBC_SHA1 0x0010
#define ENCTYPE_AES128_CTS_HMAC_SHA1_96 0x0011
#define ENCTYPE_AES256_CTS_HMAC_SHA1_96 0x0012
+#define ENCTYPE_AES128_CTS_HMAC_SHA256_128 0x0013
+#define ENCTYPE_AES256_CTS_HMAC_SHA384_192 0x0014
#define ENCTYPE_ARCFOUR_HMAC 0x0017
#define ENCTYPE_ARCFOUR_HMAC_EXP 0x0018
+#define ENCTYPE_CAMELLIA128_CTS_CMAC 0x0019
+#define ENCTYPE_CAMELLIA256_CTS_CMAC 0x001A
#define ENCTYPE_UNKNOWN 0x01ff
/*
@@ -216,103 +170,4 @@ enum seal_alg {
#define KG_USAGE_INITIATOR_SEAL (24)
#define KG_USAGE_INITIATOR_SIGN (25)
-/*
- * This compile-time check verifies that we will not exceed the
- * slack space allotted by the client and server auth_gss code
- * before they call gss_wrap().
- */
-#define GSS_KRB5_MAX_SLACK_NEEDED \
- (GSS_KRB5_TOK_HDR_LEN /* gss token header */ \
- + GSS_KRB5_MAX_CKSUM_LEN /* gss token checksum */ \
- + GSS_KRB5_MAX_BLOCKSIZE /* confounder */ \
- + GSS_KRB5_MAX_BLOCKSIZE /* possible padding */ \
- + GSS_KRB5_TOK_HDR_LEN /* encrypted hdr in v2 token */\
- + GSS_KRB5_MAX_CKSUM_LEN /* encryption hmac */ \
- + 4 + 4 /* RPC verifier */ \
- + GSS_KRB5_TOK_HDR_LEN \
- + GSS_KRB5_MAX_CKSUM_LEN)
-
-u32
-make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
- struct xdr_buf *body, int body_offset, u8 *cksumkey,
- unsigned int usage, struct xdr_netobj *cksumout);
-
-u32
-make_checksum_v2(struct krb5_ctx *, char *header, int hdrlen,
- struct xdr_buf *body, int body_offset, u8 *key,
- unsigned int usage, struct xdr_netobj *cksum);
-
-u32 gss_get_mic_kerberos(struct gss_ctx *, struct xdr_buf *,
- struct xdr_netobj *);
-
-u32 gss_verify_mic_kerberos(struct gss_ctx *, struct xdr_buf *,
- struct xdr_netobj *);
-
-u32
-gss_wrap_kerberos(struct gss_ctx *ctx_id, int offset,
- struct xdr_buf *outbuf, struct page **pages);
-
-u32
-gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset, int len,
- struct xdr_buf *buf);
-
-
-u32
-krb5_encrypt(struct crypto_sync_skcipher *key,
- void *iv, void *in, void *out, int length);
-
-u32
-krb5_decrypt(struct crypto_sync_skcipher *key,
- void *iv, void *in, void *out, int length);
-
-int
-gss_encrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *outbuf,
- int offset, struct page **pages);
-
-int
-gss_decrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *inbuf,
- int offset);
-
-s32
-krb5_make_seq_num(struct krb5_ctx *kctx,
- struct crypto_sync_skcipher *key,
- int direction,
- u32 seqnum, unsigned char *cksum, unsigned char *buf);
-
-s32
-krb5_get_seq_num(struct krb5_ctx *kctx,
- unsigned char *cksum,
- unsigned char *buf, int *direction, u32 *seqnum);
-
-int
-xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen);
-
-u32
-krb5_derive_key(const struct gss_krb5_enctype *gk5e,
- const struct xdr_netobj *inkey,
- struct xdr_netobj *outkey,
- const struct xdr_netobj *in_constant,
- gfp_t gfp_mask);
-
-u32
-gss_krb5_des3_make_key(const struct gss_krb5_enctype *gk5e,
- struct xdr_netobj *randombits,
- struct xdr_netobj *key);
-
-u32
-gss_krb5_aes_make_key(const struct gss_krb5_enctype *gk5e,
- struct xdr_netobj *randombits,
- struct xdr_netobj *key);
-
-u32
-gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
- struct xdr_buf *buf,
- struct page **pages);
-
-u32
-gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len,
- struct xdr_buf *buf, u32 *plainoffset,
- u32 *plainlen);
-
-void
-gss_krb5_make_confounder(char *p, u32 conflen);
+#endif /* _LINUX_SUNRPC_GSS_KRB5_H */
diff --git a/include/linux/sunrpc/gss_krb5_enctypes.h b/include/linux/sunrpc/gss_krb5_enctypes.h
deleted file mode 100644
index 87eea679d750..000000000000
--- a/include/linux/sunrpc/gss_krb5_enctypes.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Define the string that exports the set of kernel-supported
- * Kerberos enctypes. This list is sent via upcall to gssd, and
- * is also exposed via the nfsd /proc API. The consumers generally
- * treat this as an ordered list, where the first item in the list
- * is the most preferred.
- */
-
-#ifndef _LINUX_SUNRPC_GSS_KRB5_ENCTYPES_H
-#define _LINUX_SUNRPC_GSS_KRB5_ENCTYPES_H
-
-#ifdef CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES
-
-/*
- * NB: This list includes DES3_CBC_SHA1, which was deprecated by RFC 8429.
- *
- * ENCTYPE_AES256_CTS_HMAC_SHA1_96
- * ENCTYPE_AES128_CTS_HMAC_SHA1_96
- * ENCTYPE_DES3_CBC_SHA1
- */
-#define KRB5_SUPPORTED_ENCTYPES "18,17,16"
-
-#else /* CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES */
-
-/*
- * NB: This list includes encryption types that were deprecated
- * by RFC 8429 and RFC 6649.
- *
- * ENCTYPE_AES256_CTS_HMAC_SHA1_96
- * ENCTYPE_AES128_CTS_HMAC_SHA1_96
- * ENCTYPE_DES3_CBC_SHA1
- * ENCTYPE_DES_CBC_MD5
- * ENCTYPE_DES_CBC_CRC
- * ENCTYPE_DES_CBC_MD4
- */
-#define KRB5_SUPPORTED_ENCTYPES "18,17,16,3,1,2"
-
-#endif /* CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES */
-
-#endif /* _LINUX_SUNRPC_GSS_KRB5_ENCTYPES_H */
diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h
index 938c2bf29db8..ada17b57ca44 100644
--- a/include/linux/sunrpc/msg_prot.h
+++ b/include/linux/sunrpc/msg_prot.h
@@ -20,6 +20,7 @@ enum rpc_auth_flavors {
RPC_AUTH_DES = 3,
RPC_AUTH_KRB = 4,
RPC_AUTH_GSS = 6,
+ RPC_AUTH_TLS = 7,
RPC_AUTH_MAXFLAVOR = 8,
/* pseudoflavors: */
RPC_AUTH_GSS_KRB5 = 390003,
@@ -33,6 +34,11 @@ enum rpc_auth_flavors {
RPC_AUTH_GSS_SPKMP = 390011,
};
+/* Maximum size (in octets) of the machinename in an AUTH_UNIX
+ * credential (per RFC 5531 Appendix A)
+ */
+#define RPC_MAX_MACHINENAME (255)
+
/* Maximum size (in bytes) of an rpc credential or verifier */
#define RPC_MAX_AUTH_SIZE (400)
@@ -63,15 +69,17 @@ enum rpc_reject_stat {
};
enum rpc_auth_stat {
- RPC_AUTH_OK = 0,
- RPC_AUTH_BADCRED = 1,
- RPC_AUTH_REJECTEDCRED = 2,
- RPC_AUTH_BADVERF = 3,
- RPC_AUTH_REJECTEDVERF = 4,
- RPC_AUTH_TOOWEAK = 5,
+ RPC_AUTH_OK = 0, /* success */
+ RPC_AUTH_BADCRED = 1, /* bad credential (seal broken) */
+ RPC_AUTH_REJECTEDCRED = 2, /* client must begin new session */
+ RPC_AUTH_BADVERF = 3, /* bad verifier (seal broken) */
+ RPC_AUTH_REJECTEDVERF = 4, /* verifier expired or replayed */
+ RPC_AUTH_TOOWEAK = 5, /* rejected for security reasons */
+ RPC_AUTH_INVALIDRESP = 6, /* bogus response verifier */
+ RPC_AUTH_FAILED = 7, /* reason unknown */
/* RPCSEC_GSS errors */
- RPCSEC_GSS_CREDPROBLEM = 13,
- RPCSEC_GSS_CTXPROBLEM = 14
+ RPCSEC_GSS_CREDPROBLEM = 13, /* no credentials for user */
+ RPCSEC_GSS_CTXPROBLEM = 14 /* problem with context */
};
#define RPC_MAXNETNAMELEN 256
diff --git a/include/linux/sunrpc/rdma_rn.h b/include/linux/sunrpc/rdma_rn.h
new file mode 100644
index 000000000000..7d032ca057af
--- /dev/null
+++ b/include/linux/sunrpc/rdma_rn.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * * Copyright (c) 2024, Oracle and/or its affiliates.
+ */
+
+#ifndef _LINUX_SUNRPC_RDMA_RN_H
+#define _LINUX_SUNRPC_RDMA_RN_H
+
+#include <rdma/ib_verbs.h>
+
+/**
+ * rpcrdma_notification - request removal notification
+ */
+struct rpcrdma_notification {
+ void (*rn_done)(struct rpcrdma_notification *rn);
+ u32 rn_index;
+};
+
+int rpcrdma_rn_register(struct ib_device *device,
+ struct rpcrdma_notification *rn,
+ void (*done)(struct rpcrdma_notification *rn));
+void rpcrdma_rn_unregister(struct ib_device *device,
+ struct rpcrdma_notification *rn);
+int rpcrdma_ib_client_register(void);
+void rpcrdma_ib_client_unregister(void);
+
+#endif /* _LINUX_SUNRPC_RDMA_RN_H */
diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h
index cd188a527d16..2cb406f8ff4e 100644
--- a/include/linux/sunrpc/rpc_pipe_fs.h
+++ b/include/linux/sunrpc/rpc_pipe_fs.h
@@ -92,8 +92,13 @@ extern ssize_t rpc_pipe_generic_upcall(struct file *, struct rpc_pipe_msg *,
char __user *, size_t);
extern int rpc_queue_upcall(struct rpc_pipe *, struct rpc_pipe_msg *);
+/* returns true if the msg is in-flight, i.e., already eaten by the peer */
+static inline bool rpc_msg_is_inflight(const struct rpc_pipe_msg *msg) {
+ return (msg->copied != 0 && list_empty(&msg->list));
+}
+
struct rpc_clnt;
-extern struct dentry *rpc_create_client_dir(struct dentry *, const char *, struct rpc_clnt *);
+extern int rpc_create_client_dir(struct dentry *, const char *, struct rpc_clnt *);
extern int rpc_remove_client_dir(struct rpc_clnt *);
extern void rpc_init_pipe_dir_head(struct rpc_pipe_dir_head *pdh);
@@ -122,9 +127,9 @@ extern void rpc_remove_cache_dir(struct dentry *);
struct rpc_pipe *rpc_mkpipe_data(const struct rpc_pipe_ops *ops, int flags);
void rpc_destroy_pipe_data(struct rpc_pipe *pipe);
-extern struct dentry *rpc_mkpipe_dentry(struct dentry *, const char *, void *,
+extern int rpc_mkpipe_dentry(struct dentry *, const char *, void *,
struct rpc_pipe *);
-extern int rpc_unlink(struct dentry *);
+extern void rpc_unlink(struct rpc_pipe *);
extern int register_rpc_pipefs(void);
extern void unregister_rpc_pipefs(void);
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index df696efdd675..ccba79ebf893 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -38,6 +38,17 @@ struct rpc_wait {
};
/*
+ * This describes a timeout strategy
+ */
+struct rpc_timeout {
+ unsigned long to_initval, /* initial timeout */
+ to_maxval, /* max timeout */
+ to_increment; /* if !exponential */
+ unsigned int to_retries; /* max # of retries */
+ unsigned char to_exponential;
+};
+
+/*
* This is the RPC task struct
*/
struct rpc_task {
@@ -61,8 +72,6 @@ struct rpc_task {
struct rpc_wait tk_wait; /* RPC wait */
} u;
- int tk_rpc_status; /* Result of last RPC operation */
-
/*
* RPC call state
*/
@@ -82,6 +91,8 @@ struct rpc_task {
ktime_t tk_start; /* RPC task init timestamp */
pid_t tk_owner; /* Process id for batching tasks */
+
+ int tk_rpc_status; /* Result of last RPC operation */
unsigned short tk_flags; /* misc flags */
unsigned short tk_timeouts; /* maj timeouts */
@@ -90,8 +101,7 @@ struct rpc_task {
#endif
unsigned char tk_priority : 2,/* Task priority */
tk_garb_retry : 2,
- tk_cred_retry : 2,
- tk_rebind_retry : 2;
+ tk_cred_retry : 2;
};
typedef void (*rpc_action)(struct rpc_task *);
@@ -121,9 +131,10 @@ struct rpc_task_setup {
*/
#define RPC_TASK_ASYNC 0x0001 /* is an async task */
#define RPC_TASK_SWAPPER 0x0002 /* is swapping in/out */
+#define RPC_TASK_MOVEABLE 0x0004 /* nfs4.1+ rpc tasks */
#define RPC_TASK_NULLCREDS 0x0010 /* Use AUTH_NULL credential */
#define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */
-#define RPC_TASK_ROOTCREDS 0x0040 /* force root creds */
+#define RPC_TASK_NETUNREACH_FATAL 0x0040 /* ENETUNREACH is fatal */
#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */
#define RPC_TASK_NO_ROUND_ROBIN 0x0100 /* send requests on "main" xprt */
#define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */
@@ -139,38 +150,28 @@ struct rpc_task_setup {
#define RPC_IS_SOFT(t) ((t)->tk_flags & (RPC_TASK_SOFT|RPC_TASK_TIMEOUT))
#define RPC_IS_SOFTCONN(t) ((t)->tk_flags & RPC_TASK_SOFTCONN)
#define RPC_WAS_SENT(t) ((t)->tk_flags & RPC_TASK_SENT)
+#define RPC_IS_MOVEABLE(t) ((t)->tk_flags & RPC_TASK_MOVEABLE)
+
+enum {
+ RPC_TASK_RUNNING,
+ RPC_TASK_QUEUED,
+ RPC_TASK_ACTIVE,
+ RPC_TASK_NEED_XMIT,
+ RPC_TASK_NEED_RECV,
+ RPC_TASK_MSG_PIN_WAIT,
+};
-#define RPC_TASK_RUNNING 0
-#define RPC_TASK_QUEUED 1
-#define RPC_TASK_ACTIVE 2
-#define RPC_TASK_NEED_XMIT 3
-#define RPC_TASK_NEED_RECV 4
-#define RPC_TASK_MSG_PIN_WAIT 5
-#define RPC_TASK_SIGNALLED 6
-
-#define RPC_IS_RUNNING(t) test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
-#define rpc_set_running(t) set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
#define rpc_test_and_set_running(t) \
test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
-#define rpc_clear_running(t) \
- do { \
- smp_mb__before_atomic(); \
- clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate); \
- smp_mb__after_atomic(); \
- } while (0)
+#define rpc_clear_running(t) clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
#define RPC_IS_QUEUED(t) test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
#define rpc_set_queued(t) set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
-#define rpc_clear_queued(t) \
- do { \
- smp_mb__before_atomic(); \
- clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate); \
- smp_mb__after_atomic(); \
- } while (0)
+#define rpc_clear_queued(t) clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
#define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate)
-#define RPC_SIGNALLED(t) test_bit(RPC_TASK_SIGNALLED, &(t)->tk_runstate)
+#define RPC_SIGNALLED(t) (READ_ONCE(task->tk_rpc_status) == -ERESTARTSYS)
/*
* Task priorities.
@@ -198,7 +199,7 @@ struct rpc_wait_queue {
unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */
unsigned char priority; /* current priority */
unsigned char nr; /* # tasks remaining for cookie */
- unsigned short qlen; /* total # tasks waiting in queue */
+ unsigned int qlen; /* total # tasks waiting in queue */
struct rpc_timer timer_list;
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
const char * name;
@@ -217,14 +218,21 @@ struct rpc_wait_queue {
*/
struct rpc_task *rpc_new_task(const struct rpc_task_setup *);
struct rpc_task *rpc_run_task(const struct rpc_task_setup *);
-struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req);
+struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
+ struct rpc_timeout *timeout);
void rpc_put_task(struct rpc_task *);
void rpc_put_task_async(struct rpc_task *);
+bool rpc_task_set_rpc_status(struct rpc_task *task, int rpc_status);
+void rpc_task_try_cancel(struct rpc_task *task, int error);
void rpc_signal_task(struct rpc_task *);
void rpc_exit_task(struct rpc_task *);
void rpc_exit(struct rpc_task *, int);
void rpc_release_calldata(const struct rpc_call_ops *, void *);
void rpc_killall_tasks(struct rpc_clnt *);
+unsigned long rpc_cancel_tasks(struct rpc_clnt *clnt, int error,
+ bool (*fnmatch)(const struct rpc_task *,
+ const void *),
+ const void *data);
void rpc_execute(struct rpc_task *);
void rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *);
void rpc_init_wait_queue(struct rpc_wait_queue *, const char *);
@@ -263,7 +271,7 @@ int rpc_malloc(struct rpc_task *);
void rpc_free(struct rpc_task *);
int rpciod_up(void);
void rpciod_down(void);
-int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *);
+int rpc_wait_for_completion_task(struct rpc_task *task);
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
struct net;
void rpc_show_tasks(struct net *);
@@ -273,11 +281,7 @@ void rpc_destroy_mempool(void);
extern struct workqueue_struct *rpciod_workqueue;
extern struct workqueue_struct *xprtiod_workqueue;
void rpc_prepare_task(struct rpc_task *task);
-
-static inline int rpc_wait_for_completion_task(struct rpc_task *task)
-{
- return __rpc_wait_for_completion_task(task, NULL);
-}
+gfp_t rpc_task_gfp_mask(void);
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
static inline const char * rpc_qname(const struct rpc_wait_queue *q)
diff --git a/include/linux/sunrpc/stats.h b/include/linux/sunrpc/stats.h
index d94d4f410507..3ce1550d1beb 100644
--- a/include/linux/sunrpc/stats.h
+++ b/include/linux/sunrpc/stats.h
@@ -43,22 +43,6 @@ struct net;
#ifdef CONFIG_PROC_FS
int rpc_proc_init(struct net *);
void rpc_proc_exit(struct net *);
-#else
-static inline int rpc_proc_init(struct net *net)
-{
- return 0;
-}
-
-static inline void rpc_proc_exit(struct net *net)
-{
-}
-#endif
-
-#ifdef MODULE
-void rpc_modcount(struct inode *, int);
-#endif
-
-#ifdef CONFIG_PROC_FS
struct proc_dir_entry * rpc_proc_register(struct net *,struct rpc_stat *);
void rpc_proc_unregister(struct net *,const char *);
void rpc_proc_zero(const struct rpc_program *);
@@ -69,7 +53,14 @@ void svc_proc_unregister(struct net *, const char *);
void svc_seq_show(struct seq_file *,
const struct svc_stat *);
#else
+static inline int rpc_proc_init(struct net *net)
+{
+ return 0;
+}
+static inline void rpc_proc_exit(struct net *net)
+{
+}
static inline struct proc_dir_entry *rpc_proc_register(struct net *net, struct rpc_stat *s) { return NULL; }
static inline void rpc_proc_unregister(struct net *net, const char *p) {}
static inline void rpc_proc_zero(const struct rpc_program *p) {}
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index e91d51ea028b..5506d20857c3 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -17,16 +17,11 @@
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/auth.h>
#include <linux/sunrpc/svcauth.h>
+#include <linux/lwq.h>
#include <linux/wait.h>
#include <linux/mm.h>
-
-/* statistics for svc_pool structures */
-struct svc_pool_stats {
- atomic_long_t packets;
- unsigned long sockets_queued;
- atomic_long_t threads_woken;
- atomic_long_t threads_timedout;
-};
+#include <linux/pagevec.h>
+#include <linux/kthread.h>
/*
*
@@ -39,37 +34,28 @@ struct svc_pool_stats {
* node traffic on multi-node NUMA NFS servers.
*/
struct svc_pool {
- unsigned int sp_id; /* pool id; also node id on NUMA */
- spinlock_t sp_lock; /* protects all fields */
- struct list_head sp_sockets; /* pending sockets */
+ unsigned int sp_id; /* pool id; also node id on NUMA */
+ struct lwq sp_xprts; /* pending transports */
unsigned int sp_nrthreads; /* # of threads in pool */
struct list_head sp_all_threads; /* all server threads */
- struct svc_pool_stats sp_stats; /* statistics on pool operation */
-#define SP_TASK_PENDING (0) /* still work to do even if no
- * xprt is queued. */
-#define SP_CONGESTED (1)
- unsigned long sp_flags;
-} ____cacheline_aligned_in_smp;
-
-struct svc_serv;
-
-struct svc_serv_ops {
- /* Callback to use when last thread exits. */
- void (*svo_shutdown)(struct svc_serv *, struct net *);
-
- /* function for service threads to run */
- int (*svo_function)(void *);
+ struct llist_head sp_idle_threads; /* idle server threads */
- /* queue up a transport for servicing */
- void (*svo_enqueue_xprt)(struct svc_xprt *);
+ /* statistics on pool operation */
+ struct percpu_counter sp_messages_arrived;
+ struct percpu_counter sp_sockets_queued;
+ struct percpu_counter sp_threads_woken;
- /* set up thread (or whatever) execution context */
- int (*svo_setup)(struct svc_serv *, struct svc_pool *, int);
+ unsigned long sp_flags;
+} ____cacheline_aligned_in_smp;
- /* optional module to count when adding threads (pooled svcs only) */
- struct module *svo_module;
+/* bits for sp_flags */
+enum {
+ SP_TASK_PENDING, /* still work to do even if no xprt is queued */
+ SP_NEED_VICTIM, /* One thread needs to agree to exit */
+ SP_VICTIM_REMAINS, /* One thread needs to actually exit */
};
+
/*
* RPC service.
*
@@ -81,48 +67,41 @@ struct svc_serv_ops {
* We currently do not support more than one RPC program per daemon.
*/
struct svc_serv {
- struct svc_program * sv_program; /* RPC program */
+ struct svc_program * sv_programs; /* RPC programs */
struct svc_stat * sv_stats; /* RPC statistics */
spinlock_t sv_lock;
+ unsigned int sv_nprogs; /* Number of sv_programs */
unsigned int sv_nrthreads; /* # of server threads */
- unsigned int sv_maxconn; /* max connections allowed or
- * '0' causing max to be based
- * on number of threads. */
-
unsigned int sv_max_payload; /* datagram payload size */
unsigned int sv_max_mesg; /* max_payload + 1 page for overheads */
unsigned int sv_xdrsize; /* XDR buffer size */
struct list_head sv_permsocks; /* all permanent sockets */
struct list_head sv_tempsocks; /* all temporary sockets */
- int sv_tmpcnt; /* count of temporary sockets */
+ int sv_tmpcnt; /* count of temporary "valid" sockets */
struct timer_list sv_temptimer; /* timer for aging temporary sockets */
char * sv_name; /* service name */
unsigned int sv_nrpools; /* number of thread pools */
+ bool sv_is_pooled; /* is this a pooled service? */
struct svc_pool * sv_pools; /* array of thread pools */
- const struct svc_serv_ops *sv_ops; /* server operations */
+ int (*sv_threadfn)(void *data);
+
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
- struct list_head sv_cb_list; /* queue for callback requests
+ struct lwq sv_cb_list; /* queue for callback requests
* that arrive over the same
* connection */
- spinlock_t sv_cb_lock; /* protects the svc_cb_list */
- wait_queue_head_t sv_cb_waitq; /* sleep here if there are no
- * entries in the svc_cb_list */
bool sv_bc_enabled; /* service uses backchannel */
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
};
-/*
- * We use sv_nrthreads as a reference count. svc_destroy() drops
- * this refcount, so we need to bump it up around operations that
- * change the number of threads. Horrible, but there it is.
- * Should be called with the "service mutex" held.
- */
-static inline void svc_get(struct svc_serv *serv)
-{
- serv->sv_nrthreads++;
-}
+/* This is used by pool_stats to find and lock an svc */
+struct svc_info {
+ struct svc_serv *serv;
+ struct mutex *mutex;
+};
+
+void svc_destroy(struct svc_serv **svcp);
/*
* Maximum payload size supported by a kernel RPC server.
@@ -140,28 +119,27 @@ static inline void svc_get(struct svc_serv *serv)
* Linux limit; someone who cares more about NFS/UDP performance
* can test a larger number.
*
- * For TCP transports we have more freedom. A size of 1MB is
- * chosen to match the client limit. Other OSes are known to
- * have larger limits, but those numbers are probably beyond
- * the point of diminishing returns.
+ * For non-UDP transports we have more freedom. A size of 4MB is
+ * chosen to accommodate clients that support larger I/O sizes.
*/
-#define RPCSVC_MAXPAYLOAD (1*1024*1024u)
-#define RPCSVC_MAXPAYLOAD_TCP RPCSVC_MAXPAYLOAD
-#define RPCSVC_MAXPAYLOAD_UDP (32*1024u)
+enum {
+ RPCSVC_MAXPAYLOAD = 4 * 1024 * 1024,
+ RPCSVC_MAXPAYLOAD_TCP = RPCSVC_MAXPAYLOAD,
+ RPCSVC_MAXPAYLOAD_UDP = 32 * 1024,
+};
extern u32 svc_max_payload(const struct svc_rqst *rqstp);
/*
- * RPC Requsts and replies are stored in one or more pages.
+ * RPC Requests and replies are stored in one or more pages.
* We maintain an array of pages for each server thread.
* Requests are copied into these pages as they arrive. Remaining
* pages are available to write the reply into.
*
- * Pages are sent using ->sendpage so each server thread needs to
- * allocate more to replace those used in sending. To help keep track
- * of these pages we have a receive list where all pages initialy live,
- * and a send list where pages are moved to when there are to be part
- * of a reply.
+ * Pages are sent using ->sendmsg with MSG_SPLICE_PAGES so each server thread
+ * needs to allocate more to replace those used in sending. To help keep track
+ * of these pages we have a receive list where all pages initialy live, and a
+ * send list where pages are moved to when there are to be part of a reply.
*
* We use xdr_buf for holding responses as it fits well with NFS
* read responses (that have a header, and some data pages, and possibly
@@ -172,54 +150,23 @@ extern u32 svc_max_payload(const struct svc_rqst *rqstp);
* list. xdr_buf.tail points to the end of the first page.
* This assumes that the non-page part of an rpc reply will fit
* in a page - NFSd ensures this. lockd also has no trouble.
- *
- * Each request/reply pair can have at most one "payload", plus two pages,
- * one for the request, and one for the reply.
- * We using ->sendfile to return read data, we might need one extra page
- * if the request is not page-aligned. So add another '1'.
*/
-#define RPCSVC_MAXPAGES ((RPCSVC_MAXPAYLOAD+PAGE_SIZE-1)/PAGE_SIZE \
- + 2 + 1)
-
-static inline u32 svc_getnl(struct kvec *iov)
-{
- __be32 val, *vp;
- vp = iov->iov_base;
- val = *vp++;
- iov->iov_base = (void*)vp;
- iov->iov_len -= sizeof(__be32);
- return ntohl(val);
-}
-static inline void svc_putnl(struct kvec *iov, u32 val)
-{
- __be32 *vp = iov->iov_base + iov->iov_len;
- *vp = htonl(val);
- iov->iov_len += sizeof(__be32);
-}
-
-static inline __be32 svc_getu32(struct kvec *iov)
-{
- __be32 val, *vp;
- vp = iov->iov_base;
- val = *vp++;
- iov->iov_base = (void*)vp;
- iov->iov_len -= sizeof(__be32);
- return val;
-}
-
-static inline void svc_ungetu32(struct kvec *iov)
-{
- __be32 *vp = (__be32 *)iov->iov_base;
- iov->iov_base = (void *)(vp - 1);
- iov->iov_len += sizeof(*vp);
-}
-
-static inline void svc_putu32(struct kvec *iov, __be32 val)
+/**
+ * svc_serv_maxpages - maximum count of pages needed for one RPC message
+ * @serv: RPC service context
+ *
+ * Returns a count of pages or vectors that can hold the maximum
+ * size RPC message for @serv.
+ *
+ * Each request/reply pair can have at most one "payload", plus two
+ * pages, one for the request, and one for the reply.
+ * nfsd_splice_actor() might need an extra page when a READ payload
+ * is not page-aligned.
+ */
+static inline unsigned long svc_serv_maxpages(const struct svc_serv *serv)
{
- __be32 *vp = iov->iov_base + iov->iov_len;
- *vp = val;
- iov->iov_len += sizeof(__be32);
+ return DIV_ROUND_UP(serv->sv_max_mesg, PAGE_SIZE) + 2 + 1;
}
/*
@@ -228,6 +175,7 @@ static inline void svc_putu32(struct kvec *iov, __be32 val)
*/
struct svc_rqst {
struct list_head rq_all; /* all threads list */
+ struct llist_node rq_idle; /* On the idle list */
struct rcu_head rq_rcu_head; /* for RCU deferred kfree */
struct svc_xprt * rq_xprt; /* transport ptr */
@@ -245,19 +193,19 @@ struct svc_rqst {
void * rq_xprt_ctxt; /* transport specific context ptr */
struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */
- size_t rq_xprt_hlen; /* xprt header len */
struct xdr_buf rq_arg;
struct xdr_stream rq_arg_stream;
struct xdr_stream rq_res_stream;
- struct page *rq_scratch_page;
+ struct folio *rq_scratch_folio;
struct xdr_buf rq_res;
- struct page *rq_pages[RPCSVC_MAXPAGES + 1];
+ unsigned long rq_maxpages; /* num of entries in rq_pages */
+ struct page * *rq_pages;
struct page * *rq_respages; /* points into rq_pages */
struct page * *rq_next_page; /* next reply page to use */
struct page * *rq_page_end; /* one past the last page */
- struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */
- struct bio_vec rq_bvec[RPCSVC_MAXPAGES];
+ struct folio_batch rq_fbatch;
+ struct bio_vec *rq_bvec;
__be32 rq_xid; /* transmission id */
u32 rq_prog; /* program number */
@@ -265,23 +213,14 @@ struct svc_rqst {
u32 rq_proc; /* procedure number */
u32 rq_prot; /* IP protocol */
int rq_cachetype; /* catering to nfsd */
-#define RQ_SECURE (0) /* secure port */
-#define RQ_LOCAL (1) /* local request */
-#define RQ_USEDEFERRAL (2) /* use deferral */
-#define RQ_DROPME (3) /* drop current reply */
-#define RQ_SPLICE_OK (4) /* turned off in gss privacy
- * to prevent encrypting page
- * cache pages */
-#define RQ_VICTIM (5) /* about to be shut down */
-#define RQ_BUSY (6) /* request is busy */
-#define RQ_DATA (7) /* request has data */
-#define RQ_AUTHERR (8) /* Request status is auth error */
unsigned long rq_flags; /* flags field */
ktime_t rq_qtime; /* enqueue time */
void * rq_argp; /* decoded arguments */
void * rq_resp; /* xdr'd results */
+ __be32 *rq_accept_statp;
void * rq_auth_data; /* flavor-specific data */
+ __be32 rq_auth_stat; /* authentication status */
int rq_auth_slack; /* extra space xdr code
* should leave in head
* for krb5i, krb5p.
@@ -297,13 +236,29 @@ struct svc_rqst {
/* Catering to nfsd */
struct auth_domain * rq_client; /* RPC peer info */
struct auth_domain * rq_gssclient; /* "gss/"-style peer info */
- struct svc_cacherep * rq_cacherep; /* cache info */
struct task_struct *rq_task; /* service thread */
- spinlock_t rq_lock; /* per-request lock */
struct net *rq_bc_net; /* pointer to backchannel's
* net namespace
*/
- void ** rq_lease_breaker; /* The v4 client breaking a lease */
+
+ int rq_err; /* Thread sets this to inidicate
+ * initialisation success.
+ */
+
+ unsigned long bc_to_initval;
+ unsigned int bc_to_retries;
+ unsigned int rq_status_counter; /* RPC processing counter */
+ void **rq_lease_breaker; /* The v4 client breaking a lease */
+};
+
+/* bits for rq_flags */
+enum {
+ RQ_SECURE, /* secure port */
+ RQ_LOCAL, /* local request */
+ RQ_USEDEFERRAL, /* use deferral */
+ RQ_DROPME, /* drop current reply */
+ RQ_VICTIM, /* Have agreed to shut down */
+ RQ_DATA, /* request has data */
};
#define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net)
@@ -341,38 +296,46 @@ static inline struct sockaddr *svc_daddr(const struct svc_rqst *rqst)
return (struct sockaddr *) &rqst->rq_daddr;
}
-/*
- * Check buffer bounds after decoding arguments
+/**
+ * svc_thread_should_stop - check if this thread should stop
+ * @rqstp: the thread that might need to stop
+ *
+ * To stop an svc thread, the pool flags SP_NEED_VICTIM and SP_VICTIM_REMAINS
+ * are set. The first thread which sees SP_NEED_VICTIM clears it, becoming
+ * the victim using this function. It should then promptly call
+ * svc_exit_thread() to complete the process, clearing SP_VICTIM_REMAINS
+ * so the task waiting for a thread to exit can wake and continue.
+ *
+ * Return values:
+ * %true: caller should invoke svc_exit_thread()
+ * %false: caller should do nothing
*/
-static inline int
-xdr_argsize_check(struct svc_rqst *rqstp, __be32 *p)
+static inline bool svc_thread_should_stop(struct svc_rqst *rqstp)
{
- char *cp = (char *)p;
- struct kvec *vec = &rqstp->rq_arg.head[0];
- return cp >= (char*)vec->iov_base
- && cp <= (char*)vec->iov_base + vec->iov_len;
-}
+ if (test_and_clear_bit(SP_NEED_VICTIM, &rqstp->rq_pool->sp_flags))
+ set_bit(RQ_VICTIM, &rqstp->rq_flags);
-static inline int
-xdr_ressize_check(struct svc_rqst *rqstp, __be32 *p)
-{
- struct kvec *vec = &rqstp->rq_res.head[0];
- char *cp = (char*)p;
-
- vec->iov_len = cp - (char*)vec->iov_base;
-
- return vec->iov_len <= PAGE_SIZE;
+ return test_bit(RQ_VICTIM, &rqstp->rq_flags);
}
-static inline void svc_free_res_pages(struct svc_rqst *rqstp)
+/**
+ * svc_thread_init_status - report whether thread has initialised successfully
+ * @rqstp: the thread in question
+ * @err: errno code
+ *
+ * After performing any initialisation that could fail, and before starting
+ * normal work, each sunrpc svc_thread must call svc_thread_init_status()
+ * with an appropriate error, or zero.
+ *
+ * If zero is passed, the thread is ready and must continue until
+ * svc_thread_should_stop() returns true. If a non-zero error is passed
+ * the call will not return - the thread will exit.
+ */
+static inline void svc_thread_init_status(struct svc_rqst *rqstp, int err)
{
- while (rqstp->rq_next_page != rqstp->rq_respages) {
- struct page **pp = --rqstp->rq_next_page;
- if (*pp) {
- put_page(*pp);
- *pp = NULL;
- }
- }
+ store_release_wake_up(&rqstp->rq_err, err);
+ if (err)
+ kthread_exit(1);
}
struct svc_deferred_req {
@@ -382,15 +345,15 @@ struct svc_deferred_req {
size_t addrlen;
struct sockaddr_storage daddr; /* where reply must come from */
size_t daddrlen;
+ void *xprt_ctxt;
struct cache_deferred_req handle;
- size_t xprt_hlen;
int argslen;
__be32 args[];
};
struct svc_process_info {
union {
- int (*dispatch)(struct svc_rqst *, __be32 *);
+ int (*dispatch)(struct svc_rqst *rqstp);
struct {
unsigned int lovers;
unsigned int hivers;
@@ -399,10 +362,9 @@ struct svc_process_info {
};
/*
- * List of RPC programs on the same transport endpoint
+ * RPC program - an array of these can use the same transport endpoint
*/
struct svc_program {
- struct svc_program * pg_next; /* other programs (same xprt) */
u32 pg_prog; /* program number */
unsigned int pg_lovers; /* lowest version */
unsigned int pg_hivers; /* highest version */
@@ -410,8 +372,7 @@ struct svc_program {
const struct svc_version **pg_vers; /* version array */
char * pg_name; /* service name */
char * pg_class; /* class name: services sharing authentication */
- struct svc_stat * pg_stats; /* rpc statistics */
- int (*pg_authenticate)(struct svc_rqst *);
+ enum svc_auth_status (*pg_authenticate)(struct svc_rqst *rqstp);
__be32 (*pg_init_request)(struct svc_rqst *,
const struct svc_program *,
struct svc_process_info *);
@@ -429,7 +390,7 @@ struct svc_version {
u32 vs_vers; /* version number */
u32 vs_nproc; /* number of procedures */
const struct svc_procedure *vs_proc; /* per-procedure info */
- unsigned int *vs_count; /* call counts */
+ unsigned long __percpu *vs_count; /* call counts */
u32 vs_xdrsize; /* xdrsize needed for this version */
/* Don't register with rpcbind */
@@ -441,11 +402,8 @@ struct svc_version {
/* Need xprt with congestion control */
bool vs_need_cong_ctrl;
- /* Override dispatch function (e.g. when caching replies).
- * A return value of 0 means drop the request.
- * vs_dispatch == NULL means use default dispatcher.
- */
- int (*vs_dispatch)(struct svc_rqst *, __be32 *);
+ /* Dispatch function */
+ int (*vs_dispatch)(struct svc_rqst *rqstp);
};
/*
@@ -455,12 +413,15 @@ struct svc_procedure {
/* process the request: */
__be32 (*pc_func)(struct svc_rqst *);
/* XDR decode args: */
- int (*pc_decode)(struct svc_rqst *, __be32 *data);
+ bool (*pc_decode)(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr);
/* XDR encode result: */
- int (*pc_encode)(struct svc_rqst *, __be32 *data);
+ bool (*pc_encode)(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr);
/* XDR free result: */
void (*pc_release)(struct svc_rqst *);
unsigned int pc_argsize; /* argument struct size */
+ unsigned int pc_argzero; /* how much of argument to clear */
unsigned int pc_ressize; /* result struct size */
unsigned int pc_cachetype; /* cache info (NFS) */
unsigned int pc_xdrressize; /* maximum size of XDR reply */
@@ -468,71 +429,42 @@ struct svc_procedure {
};
/*
- * Mode for mapping cpus to pools.
- */
-enum {
- SVC_POOL_AUTO = -1, /* choose one of the others */
- SVC_POOL_GLOBAL, /* no mapping, just a single global pool
- * (legacy & UP mode) */
- SVC_POOL_PERCPU, /* one pool per cpu */
- SVC_POOL_PERNODE /* one pool per numa node */
-};
-
-struct svc_pool_map {
- int count; /* How many svc_servs use us */
- int mode; /* Note: int not enum to avoid
- * warnings about "enumeration value
- * not handled in switch" */
- unsigned int npools;
- unsigned int *pool_to; /* maps pool id to cpu or node */
- unsigned int *to_pool; /* maps cpu or node to pool id */
-};
-
-extern struct svc_pool_map svc_pool_map;
-
-/*
* Function prototypes.
*/
-int svc_rpcb_setup(struct svc_serv *serv, struct net *net);
+int sunrpc_set_pool_mode(const char *val);
+int sunrpc_get_pool_mode(char *val, size_t size);
void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net);
int svc_bind(struct svc_serv *serv, struct net *net);
struct svc_serv *svc_create(struct svc_program *, unsigned int,
- const struct svc_serv_ops *);
-struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv,
- struct svc_pool *pool, int node);
-struct svc_rqst *svc_prepare_thread(struct svc_serv *serv,
- struct svc_pool *pool, int node);
-void svc_rqst_free(struct svc_rqst *);
+ int (*threadfn)(void *data));
+bool svc_rqst_replace_page(struct svc_rqst *rqstp,
+ struct page *page);
+void svc_rqst_release_pages(struct svc_rqst *rqstp);
void svc_exit_thread(struct svc_rqst *);
-unsigned int svc_pool_map_get(void);
-void svc_pool_map_put(void);
-struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
- const struct svc_serv_ops *);
+struct svc_serv * svc_create_pooled(struct svc_program *prog,
+ unsigned int nprog,
+ struct svc_stat *stats,
+ unsigned int bufsize,
+ int (*threadfn)(void *data));
int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
-int svc_set_num_threads_sync(struct svc_serv *, struct svc_pool *, int);
-int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
-void svc_destroy(struct svc_serv *);
-void svc_shutdown_net(struct svc_serv *, struct net *);
-int svc_process(struct svc_rqst *);
-int bc_svc_process(struct svc_serv *, struct rpc_rqst *,
- struct svc_rqst *);
+int svc_pool_stats_open(struct svc_info *si, struct file *file);
+void svc_process(struct svc_rqst *rqstp);
+void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp);
int svc_register(const struct svc_serv *, struct net *, const int,
const unsigned short, const unsigned short);
void svc_wake_up(struct svc_serv *);
void svc_reserve(struct svc_rqst *rqstp, int space);
-struct svc_pool * svc_pool_for_cpu(struct svc_serv *serv, int cpu);
+void svc_pool_wake_idle_thread(struct svc_pool *pool);
+struct svc_pool *svc_pool_for_cpu(struct svc_serv *serv);
char * svc_print_addr(struct svc_rqst *, char *, size_t);
+const char * svc_proc_name(const struct svc_rqst *rqstp);
int svc_encode_result_payload(struct svc_rqst *rqstp,
unsigned int offset,
unsigned int length);
-unsigned int svc_fill_write_vector(struct svc_rqst *rqstp,
- struct page **pages,
- struct kvec *first, size_t total);
char *svc_fill_symlink_pathname(struct svc_rqst *rqstp,
struct kvec *first, void *p,
size_t total);
-__be32 svc_return_autherr(struct svc_rqst *rqstp, __be32 auth_err);
__be32 svc_generic_init_request(struct svc_rqst *rqstp,
const struct svc_program *progp,
struct svc_process_info *procinfo);
@@ -541,11 +473,6 @@ int svc_generic_rpcbind_set(struct net *net,
u32 version, int family,
unsigned short proto,
unsigned short port);
-int svc_rpcbind_set_version(struct net *net,
- const struct svc_program *progp,
- u32 version, int family,
- unsigned short proto,
- unsigned short port);
#define RPC_MAX_ADDRBUFLEN (63U)
@@ -562,17 +489,21 @@ static inline void svc_reserve_auth(struct svc_rqst *rqstp, int space)
}
/**
- * svcxdr_init_decode - Prepare an xdr_stream for svc Call decoding
+ * svcxdr_init_decode - Prepare an xdr_stream for Call decoding
* @rqstp: controlling server RPC transaction context
*
*/
static inline void svcxdr_init_decode(struct svc_rqst *rqstp)
{
struct xdr_stream *xdr = &rqstp->rq_arg_stream;
- struct kvec *argv = rqstp->rq_arg.head;
+ struct xdr_buf *buf = &rqstp->rq_arg;
+ struct kvec *argv = buf->head;
+
+ WARN_ON(buf->len != buf->head->iov_len + buf->page_len + buf->tail->iov_len);
+ buf->len = buf->head->iov_len + buf->page_len + buf->tail->iov_len;
- xdr_init_decode(xdr, &rqstp->rq_arg, argv->iov_base, NULL);
- xdr_set_scratch_page(xdr, rqstp->rq_scratch_page);
+ xdr_init_decode(xdr, buf, argv->iov_base, NULL);
+ xdr_set_scratch_folio(xdr, rqstp->rq_scratch_folio);
}
/**
@@ -591,12 +522,74 @@ static inline void svcxdr_init_encode(struct svc_rqst *rqstp)
xdr->buf = buf;
xdr->iov = resv;
xdr->p = resv->iov_base + resv->iov_len;
- xdr->end = resv->iov_base + PAGE_SIZE - rqstp->rq_auth_slack;
+ xdr->end = resv->iov_base + PAGE_SIZE;
buf->len = resv->iov_len;
xdr->page_ptr = buf->pages - 1;
- buf->buflen = PAGE_SIZE * (1 + rqstp->rq_page_end - buf->pages);
- buf->buflen -= rqstp->rq_auth_slack;
+ buf->buflen = PAGE_SIZE * (rqstp->rq_page_end - buf->pages);
xdr->rqst = NULL;
}
+/**
+ * svcxdr_encode_opaque_pages - Insert pages into an xdr_stream
+ * @xdr: xdr_stream to be updated
+ * @pages: array of pages to insert
+ * @base: starting offset of first data byte in @pages
+ * @len: number of data bytes in @pages to insert
+ *
+ * After the @pages are added, the tail iovec is instantiated pointing
+ * to end of the head buffer, and the stream is set up to encode
+ * subsequent items into the tail.
+ */
+static inline void svcxdr_encode_opaque_pages(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct page **pages,
+ unsigned int base,
+ unsigned int len)
+{
+ xdr_write_pages(xdr, pages, base, len);
+ xdr->page_ptr = rqstp->rq_next_page - 1;
+}
+
+/**
+ * svcxdr_set_auth_slack -
+ * @rqstp: RPC transaction
+ * @slack: buffer space to reserve for the transaction's security flavor
+ *
+ * Set the request's slack space requirement, and set aside that much
+ * space in the rqstp's rq_res.head for use when the auth wraps the Reply.
+ */
+static inline void svcxdr_set_auth_slack(struct svc_rqst *rqstp, int slack)
+{
+ struct xdr_stream *xdr = &rqstp->rq_res_stream;
+ struct xdr_buf *buf = &rqstp->rq_res;
+ struct kvec *resv = buf->head;
+
+ rqstp->rq_auth_slack = slack;
+
+ xdr->end -= XDR_QUADLEN(slack);
+ buf->buflen -= rqstp->rq_auth_slack;
+
+ WARN_ON(xdr->iov != resv);
+ WARN_ON(xdr->p > xdr->end);
+}
+
+/**
+ * svcxdr_set_accept_stat - Reserve space for the accept_stat field
+ * @rqstp: RPC transaction context
+ *
+ * Return values:
+ * %true: Success
+ * %false: No response buffer space was available
+ */
+static inline bool svcxdr_set_accept_stat(struct svc_rqst *rqstp)
+{
+ struct xdr_stream *xdr = &rqstp->rq_res_stream;
+
+ rqstp->rq_accept_statp = xdr_reserve_space(xdr, XDR_UNIT);
+ if (unlikely(!rqstp->rq_accept_statp))
+ return false;
+ *rqstp->rq_accept_statp = rpc_success;
+ return true;
+}
+
#endif /* SUNRPC_SVC_H */
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 3184465de3a0..57f4fd94166a 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -48,6 +48,7 @@
#include <linux/sunrpc/rpc_rdma.h>
#include <linux/sunrpc/rpc_rdma_cid.h>
#include <linux/sunrpc/svc_rdma_pcl.h>
+#include <linux/sunrpc/rdma_rn.h>
#include <linux/percpu_counter.h>
#include <rdma/ib_verbs.h>
@@ -65,6 +66,7 @@ extern unsigned int svcrdma_ord;
extern unsigned int svcrdma_max_requests;
extern unsigned int svcrdma_max_bc_requests;
extern unsigned int svcrdma_max_req_size;
+extern struct workqueue_struct *svcrdma_wq;
extern struct percpu_counter svcrdma_stat_read;
extern struct percpu_counter svcrdma_stat_recv;
@@ -75,6 +77,7 @@ struct svcxprt_rdma {
struct svc_xprt sc_xprt; /* SVC transport structure */
struct rdma_cm_id *sc_cm_id; /* RDMA connection id */
struct list_head sc_accept_q; /* Conn. waiting accept */
+ struct rpcrdma_notification sc_rn; /* removal notification */
int sc_ord; /* RDMA read limit */
int sc_max_send_sges;
bool sc_snd_w_inv; /* OK to use Send With Invalidate */
@@ -90,13 +93,14 @@ struct svcxprt_rdma {
struct ib_pd *sc_pd;
spinlock_t sc_send_lock;
- struct list_head sc_send_ctxts;
+ struct llist_head sc_send_ctxts;
spinlock_t sc_rw_ctxt_lock;
- struct list_head sc_rw_ctxts;
+ struct llist_head sc_rw_ctxts;
u32 sc_pending_recvs;
u32 sc_recv_batch;
struct list_head sc_rq_dto_q;
+ struct list_head sc_read_complete_q;
spinlock_t sc_rq_dto_lock;
struct ib_qp *sc_qp;
struct ib_cq *sc_rq_cq;
@@ -115,17 +119,61 @@ struct svcxprt_rdma {
/* sc_flags */
#define RDMAXPRT_CONN_PENDING 3
+static inline struct svcxprt_rdma *svc_rdma_rqst_rdma(struct svc_rqst *rqstp)
+{
+ struct svc_xprt *xprt = rqstp->rq_xprt;
+
+ return container_of(xprt, struct svcxprt_rdma, sc_xprt);
+}
+
/*
* Default connection parameters
*/
enum {
RPCRDMA_LISTEN_BACKLOG = 10,
- RPCRDMA_MAX_REQUESTS = 64,
+ RPCRDMA_MAX_REQUESTS = 128,
RPCRDMA_MAX_BC_REQUESTS = 2,
};
#define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD
+/**
+ * svc_rdma_send_cid_init - Initialize a Receive Queue completion ID
+ * @rdma: controlling transport
+ * @cid: completion ID to initialize
+ */
+static inline void svc_rdma_recv_cid_init(struct svcxprt_rdma *rdma,
+ struct rpc_rdma_cid *cid)
+{
+ cid->ci_queue_id = rdma->sc_rq_cq->res.id;
+ cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
+}
+
+/**
+ * svc_rdma_send_cid_init - Initialize a Send Queue completion ID
+ * @rdma: controlling transport
+ * @cid: completion ID to initialize
+ */
+static inline void svc_rdma_send_cid_init(struct svcxprt_rdma *rdma,
+ struct rpc_rdma_cid *cid)
+{
+ cid->ci_queue_id = rdma->sc_sq_cq->res.id;
+ cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
+}
+
+/*
+ * A chunk context tracks all I/O for moving one Read or Write
+ * chunk. This is a set of rdma_rw's that handle data movement
+ * for all segments of one chunk.
+ */
+struct svc_rdma_chunk_ctxt {
+ struct rpc_rdma_cid cc_cid;
+ struct ib_cqe cc_cqe;
+ struct list_head cc_rwctxts;
+ ktime_t cc_posttime;
+ int cc_sqecount;
+};
+
struct svc_rdma_recv_ctxt {
struct llist_node rc_node;
struct list_head rc_list;
@@ -135,32 +183,70 @@ struct svc_rdma_recv_ctxt {
struct ib_sge rc_recv_sge;
void *rc_recv_buf;
struct xdr_stream rc_stream;
- bool rc_temp;
u32 rc_byte_len;
- unsigned int rc_page_count;
u32 rc_inv_rkey;
__be32 rc_msgtype;
+ /* State for pulling a Read chunk */
+ unsigned int rc_pageoff;
+ unsigned int rc_curpage;
+ unsigned int rc_readbytes;
+ struct xdr_buf rc_saved_arg;
+ struct svc_rdma_chunk_ctxt rc_cc;
+
struct svc_rdma_pcl rc_call_pcl;
struct svc_rdma_pcl rc_read_pcl;
struct svc_rdma_chunk *rc_cur_result_payload;
struct svc_rdma_pcl rc_write_pcl;
struct svc_rdma_pcl rc_reply_pcl;
+
+ unsigned int rc_page_count;
+ unsigned long rc_maxpages;
+ struct page *rc_pages[] __counted_by(rc_maxpages);
+};
+
+/*
+ * State for sending a Write chunk.
+ * - Tracks progress of writing one chunk over all its segments
+ * - Stores arguments for the SGL constructor functions
+ */
+struct svc_rdma_write_info {
+ struct svcxprt_rdma *wi_rdma;
+
+ const struct svc_rdma_chunk *wi_chunk;
+
+ /* write state of this chunk */
+ unsigned int wi_seg_off;
+ unsigned int wi_seg_no;
+
+ /* SGL constructor arguments */
+ const struct xdr_buf *wi_xdr;
+ unsigned char *wi_base;
+ unsigned int wi_next_off;
+
+ struct svc_rdma_chunk_ctxt wi_cc;
+ struct work_struct wi_work;
};
struct svc_rdma_send_ctxt {
- struct list_head sc_list;
+ struct llist_node sc_node;
struct rpc_rdma_cid sc_cid;
+ struct work_struct sc_work;
+ struct svcxprt_rdma *sc_rdma;
struct ib_send_wr sc_send_wr;
+ struct ib_send_wr *sc_wr_chain;
+ int sc_sqecount;
struct ib_cqe sc_cqe;
- struct completion sc_done;
struct xdr_buf sc_hdrbuf;
struct xdr_stream sc_stream;
+ struct svc_rdma_write_info sc_reply_info;
void *sc_xprt_buf;
+ int sc_page_count;
int sc_cur_sge_no;
-
+ unsigned long sc_maxpages;
+ struct page **sc_pages;
struct ib_sge sc_sges[];
};
@@ -176,17 +262,28 @@ extern struct svc_rdma_recv_ctxt *
extern void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
struct svc_rdma_recv_ctxt *ctxt);
extern void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma);
-extern void svc_rdma_release_rqst(struct svc_rqst *rqstp);
+extern void svc_rdma_release_ctxt(struct svc_xprt *xprt, void *ctxt);
extern int svc_rdma_recvfrom(struct svc_rqst *);
/* svc_rdma_rw.c */
+extern void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
+ struct svc_rdma_chunk_ctxt *cc);
extern void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma);
-extern int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma,
- const struct svc_rdma_chunk *chunk,
- const struct xdr_buf *xdr);
-extern int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma,
- const struct svc_rdma_recv_ctxt *rctxt,
- const struct xdr_buf *xdr);
+extern void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
+ struct svc_rdma_chunk_ctxt *cc);
+extern void svc_rdma_cc_release(struct svcxprt_rdma *rdma,
+ struct svc_rdma_chunk_ctxt *cc,
+ enum dma_data_direction dir);
+extern void svc_rdma_reply_chunk_release(struct svcxprt_rdma *rdma,
+ struct svc_rdma_send_ctxt *ctxt);
+extern int svc_rdma_send_write_list(struct svcxprt_rdma *rdma,
+ const struct svc_rdma_recv_ctxt *rctxt,
+ const struct xdr_buf *xdr);
+extern int svc_rdma_prepare_reply_chunk(struct svcxprt_rdma *rdma,
+ const struct svc_rdma_pcl *write_pcl,
+ const struct svc_rdma_pcl *reply_pcl,
+ struct svc_rdma_send_ctxt *sctxt,
+ const struct xdr_buf *xdr);
extern int svc_rdma_process_read_list(struct svcxprt_rdma *rdma,
struct svc_rqst *rqstp,
struct svc_rdma_recv_ctxt *head);
@@ -197,16 +294,18 @@ extern struct svc_rdma_send_ctxt *
svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma);
extern void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
struct svc_rdma_send_ctxt *ctxt);
-extern int svc_rdma_send(struct svcxprt_rdma *rdma,
- struct svc_rdma_send_ctxt *ctxt);
+extern int svc_rdma_post_send(struct svcxprt_rdma *rdma,
+ struct svc_rdma_send_ctxt *ctxt);
extern int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
struct svc_rdma_send_ctxt *sctxt,
- const struct svc_rdma_recv_ctxt *rctxt,
+ const struct svc_rdma_pcl *write_pcl,
+ const struct svc_rdma_pcl *reply_pcl,
const struct xdr_buf *xdr);
extern void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
struct svc_rdma_send_ctxt *sctxt,
struct svc_rdma_recv_ctxt *rctxt,
int status);
+extern void svc_rdma_wake_send_waiters(struct svcxprt_rdma *rdma, int avail);
extern int svc_rdma_sendto(struct svc_rqst *);
extern int svc_rdma_result_payload(struct svc_rqst *rqstp, unsigned int offset,
unsigned int length);
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 571f605bc91e..da2a2531e110 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -23,11 +23,11 @@ struct svc_xprt_ops {
int (*xpo_sendto)(struct svc_rqst *);
int (*xpo_result_payload)(struct svc_rqst *, unsigned int,
unsigned int);
- void (*xpo_release_rqst)(struct svc_rqst *);
+ void (*xpo_release_ctxt)(struct svc_xprt *xprt, void *ctxt);
void (*xpo_detach)(struct svc_xprt *);
void (*xpo_free)(struct svc_xprt *);
- void (*xpo_secure_port)(struct svc_rqst *rqstp);
void (*xpo_kill_temp_xprt)(struct svc_xprt *);
+ void (*xpo_handshake)(struct svc_xprt *xprt);
};
struct svc_xprt_class {
@@ -53,23 +53,10 @@ struct svc_xprt {
struct svc_xprt_class *xpt_class;
const struct svc_xprt_ops *xpt_ops;
struct kref xpt_ref;
+ ktime_t xpt_qtime;
struct list_head xpt_list;
- struct list_head xpt_ready;
+ struct lwq_node xpt_ready;
unsigned long xpt_flags;
-#define XPT_BUSY 0 /* enqueued/receiving */
-#define XPT_CONN 1 /* conn pending */
-#define XPT_CLOSE 2 /* dead or dying */
-#define XPT_DATA 3 /* data pending */
-#define XPT_TEMP 4 /* connected transport */
-#define XPT_DEAD 6 /* transport closed */
-#define XPT_CHNGBUF 7 /* need to change snd/rcv buf sizes */
-#define XPT_DEFERRED 8 /* deferred request pending */
-#define XPT_OLD 9 /* used for xprt aging mark+sweep */
-#define XPT_LISTENER 10 /* listening endpoint */
-#define XPT_CACHE_AUTH 11 /* cache auth info */
-#define XPT_LOCAL 12 /* connection from loopback interface */
-#define XPT_KILL_TEMP 13 /* call xpo_kill_temp_xprt before closing */
-#define XPT_CONG_CTRL 14 /* has congestion control */
struct svc_serv *xpt_server; /* service for transport */
atomic_t xpt_reserved; /* space on outq that is rsvd */
@@ -88,11 +75,58 @@ struct svc_xprt {
struct list_head xpt_users; /* callbacks on free */
struct net *xpt_net;
+ netns_tracker ns_tracker;
const struct cred *xpt_cred;
struct rpc_xprt *xpt_bc_xprt; /* NFSv4.1 backchannel */
struct rpc_xprt_switch *xpt_bc_xps; /* NFSv4.1 backchannel */
};
+/* flag bits for xpt_flags */
+enum {
+ XPT_BUSY, /* enqueued/receiving */
+ XPT_CONN, /* conn pending */
+ XPT_CLOSE, /* dead or dying */
+ XPT_DATA, /* data pending */
+ XPT_TEMP, /* connected transport */
+ XPT_DEAD, /* transport closed */
+ XPT_CHNGBUF, /* need to change snd/rcv buf sizes */
+ XPT_DEFERRED, /* deferred request pending */
+ XPT_OLD, /* used for xprt aging mark+sweep */
+ XPT_LISTENER, /* listening endpoint */
+ XPT_CACHE_AUTH, /* cache auth info */
+ XPT_LOCAL, /* connection from loopback interface */
+ XPT_KILL_TEMP, /* call xpo_kill_temp_xprt before closing */
+ XPT_CONG_CTRL, /* has congestion control */
+ XPT_HANDSHAKE, /* xprt requests a handshake */
+ XPT_TLS_SESSION, /* transport-layer security established */
+ XPT_PEER_AUTH, /* peer has been authenticated */
+ XPT_PEER_VALID, /* peer has presented a filehandle that
+ * it has access to. It is NOT counted
+ * in ->sv_tmpcnt.
+ */
+ XPT_RPCB_UNREG, /* transport that needs unregistering
+ * with rpcbind (TCP, UDP) on destroy
+ */
+};
+
+/*
+ * Maximum number of "tmp" connections - those without XPT_PEER_VALID -
+ * permitted on any service.
+ */
+#define XPT_MAX_TMP_CONN 64
+
+static inline void svc_xprt_set_valid(struct svc_xprt *xpt)
+{
+ if (test_bit(XPT_TEMP, &xpt->xpt_flags) &&
+ !test_and_set_bit(XPT_PEER_VALID, &xpt->xpt_flags)) {
+ struct svc_serv *serv = xpt->xpt_server;
+
+ spin_lock(&serv->sv_lock);
+ serv->sv_tmpcnt -= 1;
+ spin_unlock(&serv->sv_lock);
+ }
+}
+
static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
{
spin_lock(&xpt->xpt_lock);
@@ -127,17 +161,24 @@ int svc_reg_xprt_class(struct svc_xprt_class *);
void svc_unreg_xprt_class(struct svc_xprt_class *);
void svc_xprt_init(struct net *, struct svc_xprt_class *, struct svc_xprt *,
struct svc_serv *);
-int svc_create_xprt(struct svc_serv *, const char *, struct net *,
- const int, const unsigned short, int,
- const struct cred *);
+int svc_xprt_create_from_sa(struct svc_serv *serv, const char *xprt_name,
+ struct net *net, struct sockaddr *sap,
+ int flags, const struct cred *cred);
+int svc_xprt_create(struct svc_serv *serv, const char *xprt_name,
+ struct net *net, const int family,
+ const unsigned short port, int flags,
+ const struct cred *cred);
+void svc_xprt_destroy_all(struct svc_serv *serv, struct net *net,
+ bool unregister);
void svc_xprt_received(struct svc_xprt *xprt);
-void svc_xprt_do_enqueue(struct svc_xprt *xprt);
void svc_xprt_enqueue(struct svc_xprt *xprt);
void svc_xprt_put(struct svc_xprt *xprt);
void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt);
-void svc_close_xprt(struct svc_xprt *xprt);
+void svc_xprt_close(struct svc_xprt *xprt);
int svc_port_is_privileged(struct sockaddr *sin);
int svc_print_xprts(char *buf, int maxlen);
+struct svc_xprt *svc_find_listener(struct svc_serv *serv, const char *xcl_name,
+ struct net *net, const struct sockaddr *sa);
struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name,
struct net *net, const sa_family_t af,
const unsigned short port);
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index b0003866a249..4b92fec23a49 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -14,6 +14,7 @@
#include <linux/sunrpc/msg_prot.h>
#include <linux/sunrpc/cache.h>
#include <linux/sunrpc/gss_api.h>
+#include <linux/sunrpc/clnt.h>
#include <linux/hash.h>
#include <linux/stringhash.h>
#include <linux/cred.h>
@@ -83,6 +84,18 @@ struct auth_domain {
struct rcu_head rcu_head;
};
+enum svc_auth_status {
+ SVC_GARBAGE = 1,
+ SVC_VALID,
+ SVC_NEGATIVE,
+ SVC_OK,
+ SVC_DROP,
+ SVC_CLOSE,
+ SVC_DENIED,
+ SVC_PENDING,
+ SVC_COMPLETE,
+};
+
/*
* Each authentication flavour registers an auth_ops
* structure.
@@ -98,6 +111,8 @@ struct auth_domain {
* is (probably) already in place. Certainly space is
* reserved for it.
* DROP - simply drop the request. It may have been deferred
+ * CLOSE - like SVC_DROP, but request is definitely lost.
+ * If there is a tcp connection, it should be closed.
* GARBAGE - rpc garbage_args error
* SYSERR - rpc system_err error
* DENIED - authp holds reason for denial.
@@ -111,60 +126,48 @@ struct auth_domain {
*
* release() is given a request after the procedure has been run.
* It should sign/encrypt the results if needed
- * It should return:
- * OK - the resbuf is ready to be sent
- * DROP - the reply should be quitely dropped
- * DENIED - authp holds a reason for MSG_DENIED
- * SYSERR - rpc system_err
*
* domain_release()
* This call releases a domain.
+ *
* set_client()
- * Givens a pending request (struct svc_rqst), finds and assigns
+ * Given a pending request (struct svc_rqst), finds and assigns
* an appropriate 'auth_domain' as the client.
+ *
+ * pseudoflavor()
+ * Returns RPC_AUTH pseudoflavor in use by @rqstp.
*/
struct auth_ops {
char * name;
struct module *owner;
int flavour;
- int (*accept)(struct svc_rqst *rq, __be32 *authp);
- int (*release)(struct svc_rqst *rq);
- void (*domain_release)(struct auth_domain *);
- int (*set_client)(struct svc_rqst *rq);
-};
-#define SVC_GARBAGE 1
-#define SVC_SYSERR 2
-#define SVC_VALID 3
-#define SVC_NEGATIVE 4
-#define SVC_OK 5
-#define SVC_DROP 6
-#define SVC_CLOSE 7 /* Like SVC_DROP, but request is definitely
- * lost so if there is a tcp connection, it
- * should be closed
- */
-#define SVC_DENIED 8
-#define SVC_PENDING 9
-#define SVC_COMPLETE 10
+ enum svc_auth_status (*accept)(struct svc_rqst *rqstp);
+ int (*release)(struct svc_rqst *rqstp);
+ void (*domain_release)(struct auth_domain *dom);
+ enum svc_auth_status (*set_client)(struct svc_rqst *rqstp);
+ rpc_authflavor_t (*pseudoflavor)(struct svc_rqst *rqstp);
+};
struct svc_xprt;
-extern int svc_authenticate(struct svc_rqst *rqstp, __be32 *authp);
+extern rpc_authflavor_t svc_auth_flavor(struct svc_rqst *rqstp);
extern int svc_authorise(struct svc_rqst *rqstp);
-extern int svc_set_client(struct svc_rqst *rqstp);
+extern enum svc_auth_status svc_set_client(struct svc_rqst *rqstp);
extern int svc_auth_register(rpc_authflavor_t flavor, struct auth_ops *aops);
extern void svc_auth_unregister(rpc_authflavor_t flavor);
+extern void svcauth_map_clnt_to_svc_cred_local(struct rpc_clnt *clnt,
+ const struct cred *,
+ struct svc_cred *);
+
extern struct auth_domain *unix_domain_find(char *name);
extern void auth_domain_put(struct auth_domain *item);
-extern int auth_unix_add_addr(struct net *net, struct in6_addr *addr, struct auth_domain *dom);
extern struct auth_domain *auth_domain_lookup(char *name, struct auth_domain *new);
extern struct auth_domain *auth_domain_find(char *name);
-extern struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr);
-extern int auth_unix_forget_old(struct auth_domain *dom);
extern void svcauth_unix_purge(struct net *net);
extern void svcauth_unix_info_release(struct svc_xprt *xpt);
-extern int svcauth_unix_set_client(struct svc_rqst *rqstp);
+extern enum svc_auth_status svcauth_unix_set_client(struct svc_rqst *rqstp);
extern int unix_gid_cache_create(struct net *net);
extern void unix_gid_cache_destroy(struct net *net);
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index bcc555c7ae9c..de37069aba90 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -26,6 +26,9 @@ struct svc_sock {
void (*sk_odata)(struct sock *);
void (*sk_owspace)(struct sock *);
+ /* For sends (protected by xpt_mutex) */
+ struct bio_vec *sk_bvec;
+
/* private TCP part */
/* On-the-wire fragment header: */
__be32 sk_marker;
@@ -35,10 +38,14 @@ struct svc_sock {
/* Total length of the data (not including fragment headers)
* received so far in the fragments making up this rpc: */
u32 sk_datalen;
- /* Number of queued send requests */
- atomic_t sk_sendqlen;
- struct page * sk_pages[RPCSVC_MAXPAGES]; /* received data */
+ struct page_frag_cache sk_frag_cache;
+
+ struct completion sk_handshake_done;
+
+ /* received data */
+ unsigned long sk_maxpages;
+ struct page * sk_pages[] __counted_by(sk_maxpages);
};
static inline u32 svc_sock_reclen(struct svc_sock *svsk)
@@ -54,19 +61,13 @@ static inline u32 svc_sock_final_rec(struct svc_sock *svsk)
/*
* Function prototypes.
*/
-void svc_close_net(struct svc_serv *, struct net *);
-int svc_recv(struct svc_rqst *, long);
-int svc_send(struct svc_rqst *);
-void svc_drop(struct svc_rqst *);
-void svc_sock_update_bufs(struct svc_serv *serv);
-bool svc_alien_sock(struct net *net, int fd);
-int svc_addsock(struct svc_serv *serv, const int fd,
- char *name_return, const size_t len,
- const struct cred *cred);
+void svc_recv(struct svc_rqst *rqstp);
+void svc_send(struct svc_rqst *rqstp);
+int svc_addsock(struct svc_serv *serv, struct net *net,
+ const int fd, char *name_return, const size_t len,
+ const struct cred *cred);
void svc_init_xprt_sock(void);
void svc_cleanup_xprt_sock(void);
-struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot);
-void svc_sock_destroy(struct svc_xprt *);
/*
* svc_makesock socket characteristics
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index a965cbc136ad..152597750f55 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -13,7 +13,7 @@
#include <linux/uio.h>
#include <asm/byteorder.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/scatterlist.h>
struct bio_vec;
@@ -95,6 +95,7 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
#define rpc_auth_unix cpu_to_be32(RPC_AUTH_UNIX)
#define rpc_auth_short cpu_to_be32(RPC_AUTH_SHORT)
#define rpc_auth_gss cpu_to_be32(RPC_AUTH_GSS)
+#define rpc_auth_tls cpu_to_be32(RPC_AUTH_TLS)
#define rpc_call cpu_to_be32(RPC_CALL)
#define rpc_reply cpu_to_be32(RPC_REPLY)
@@ -118,6 +119,8 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
#define rpc_autherr_badverf cpu_to_be32(RPC_AUTH_BADVERF)
#define rpc_autherr_rejectedverf cpu_to_be32(RPC_AUTH_REJECTEDVERF)
#define rpc_autherr_tooweak cpu_to_be32(RPC_AUTH_TOOWEAK)
+#define rpc_autherr_invalidresp cpu_to_be32(RPC_AUTH_INVALIDRESP)
+#define rpc_autherr_failed cpu_to_be32(RPC_AUTH_FAILED)
#define rpcsec_gsserr_credproblem cpu_to_be32(RPCSEC_GSS_CREDPROBLEM)
#define rpcsec_gsserr_ctxproblem cpu_to_be32(RPCSEC_GSS_CTXPROBLEM)
@@ -127,10 +130,7 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
__be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int len);
__be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int len);
__be32 *xdr_encode_string(__be32 *p, const char *s);
-__be32 *xdr_decode_string_inplace(__be32 *p, char **sp, unsigned int *lenp,
- unsigned int maxlen);
__be32 *xdr_encode_netobj(__be32 *p, const struct xdr_netobj *);
-__be32 *xdr_decode_netobj(__be32 *p, struct xdr_netobj *);
void xdr_inline_pages(struct xdr_buf *, unsigned int,
struct page **, unsigned int, unsigned int);
@@ -138,6 +138,8 @@ void xdr_terminate_string(const struct xdr_buf *, const u32);
size_t xdr_buf_pagecount(const struct xdr_buf *buf);
int xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp);
void xdr_free_bvec(struct xdr_buf *buf);
+unsigned int xdr_buf_to_bvec(struct bio_vec *bvec, unsigned int bvec_size,
+ const struct xdr_buf *xdr);
static inline __be32 *xdr_encode_array(__be32 *p, const void *s, unsigned int len)
{
@@ -187,7 +189,6 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p)
/*
* XDR buffer helper functions
*/
-extern void xdr_shift_buf(struct xdr_buf *, size_t);
extern void xdr_buf_from_iov(const struct kvec *, struct xdr_buf *);
extern int xdr_buf_subsegment(const struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int);
extern void xdr_buf_trim(struct xdr_buf *, unsigned int);
@@ -224,6 +225,7 @@ struct xdr_stream {
struct kvec *iov; /* pointer to the current kvec */
struct kvec scratch; /* Scratch buffer */
struct page **page_ptr; /* pointer to the current page */
+ void *page_kaddr; /* kmapped address of the current page */
unsigned int nwords; /* Remaining decode buffer length */
struct rpc_rqst *rqst; /* For debugging */
@@ -239,11 +241,12 @@ typedef int (*kxdrdproc_t)(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf,
__be32 *p, struct rpc_rqst *rqst);
+void xdr_init_encode_pages(struct xdr_stream *xdr, struct xdr_buf *buf);
extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes);
-extern int xdr_reserve_space_vec(struct xdr_stream *xdr, struct kvec *vec,
- size_t nbytes);
-extern void xdr_commit_encode(struct xdr_stream *xdr);
+extern int xdr_reserve_space_vec(struct xdr_stream *xdr, size_t nbytes);
+extern void __xdr_commit_encode(struct xdr_stream *xdr);
extern void xdr_truncate_encode(struct xdr_stream *xdr, size_t len);
+extern void xdr_truncate_decode(struct xdr_stream *xdr, size_t len);
extern int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen);
extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages,
unsigned int base, unsigned int len);
@@ -253,14 +256,18 @@ extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf,
__be32 *p, struct rpc_rqst *rqst);
extern void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
struct page **pages, unsigned int len);
+extern void xdr_finish_decode(struct xdr_stream *xdr);
extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes);
extern unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len);
extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len);
extern int xdr_process_buf(const struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data);
-extern unsigned int xdr_align_data(struct xdr_stream *, unsigned int offset, unsigned int length);
-extern unsigned int xdr_expand_hole(struct xdr_stream *, unsigned int offset, unsigned int length);
+extern void xdr_set_pagelen(struct xdr_stream *, unsigned int len);
extern bool xdr_stream_subsegment(struct xdr_stream *xdr, struct xdr_buf *subbuf,
unsigned int len);
+extern unsigned int xdr_stream_move_subsegment(struct xdr_stream *xdr, unsigned int offset,
+ unsigned int target, unsigned int length);
+extern unsigned int xdr_stream_zero(struct xdr_stream *xdr, unsigned int offset,
+ unsigned int length);
/**
* xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
@@ -281,16 +288,16 @@ xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
}
/**
- * xdr_set_scratch_page - Attach a scratch buffer for decoding data
+ * xdr_set_scratch_folio - Attach a scratch buffer for decoding data
* @xdr: pointer to xdr_stream struct
- * @page: an anonymous page
+ * @page: an anonymous folio
*
* See xdr_set_scratch_buffer().
*/
static inline void
-xdr_set_scratch_page(struct xdr_stream *xdr, struct page *page)
+xdr_set_scratch_folio(struct xdr_stream *xdr, struct folio *folio)
{
- xdr_set_scratch_buffer(xdr, page_address(page), PAGE_SIZE);
+ xdr_set_scratch_buffer(xdr, folio_address(folio), folio_size(folio));
}
/**
@@ -306,6 +313,20 @@ xdr_reset_scratch_buffer(struct xdr_stream *xdr)
}
/**
+ * xdr_commit_encode - Ensure all data is written to xdr->buf
+ * @xdr: pointer to xdr_stream
+ *
+ * Handle encoding across page boundaries by giving the caller a
+ * temporary location to write to, then later copying the data into
+ * place. __xdr_commit_encode() does that copying.
+ */
+static inline void xdr_commit_encode(struct xdr_stream *xdr)
+{
+ if (unlikely(xdr->scratch.iov_len))
+ __xdr_commit_encode(xdr);
+}
+
+/**
* xdr_stream_remaining - Return the number of bytes remaining in the stream
* @xdr: pointer to struct xdr_stream
*
@@ -318,14 +339,13 @@ xdr_stream_remaining(const struct xdr_stream *xdr)
return xdr->nwords << 2;
}
-ssize_t xdr_stream_decode_opaque(struct xdr_stream *xdr, void *ptr,
- size_t size);
-ssize_t xdr_stream_decode_opaque_dup(struct xdr_stream *xdr, void **ptr,
- size_t maxlen, gfp_t gfp_flags);
-ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str,
- size_t size);
ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str,
size_t maxlen, gfp_t gfp_flags);
+ssize_t xdr_stream_decode_opaque_auth(struct xdr_stream *xdr, u32 *flavor,
+ void **body, unsigned int *body_len);
+ssize_t xdr_stream_encode_opaque_auth(struct xdr_stream *xdr, u32 flavor,
+ void *body, unsigned int body_len);
+
/**
* xdr_align_size - Calculate padded size of an object
* @n: Size of an object being XDR encoded (in bytes)
@@ -404,8 +424,8 @@ static inline int xdr_stream_encode_item_absent(struct xdr_stream *xdr)
*/
static inline __be32 *xdr_encode_bool(__be32 *p, u32 n)
{
- *p = n ? xdr_one : xdr_zero;
- return p++;
+ *p++ = n ? xdr_one : xdr_zero;
+ return p;
}
/**
@@ -450,6 +470,27 @@ xdr_stream_encode_u32(struct xdr_stream *xdr, __u32 n)
}
/**
+ * xdr_stream_encode_be32 - Encode a big-endian 32-bit integer
+ * @xdr: pointer to xdr_stream
+ * @n: integer to encode
+ *
+ * Return values:
+ * On success, returns length in bytes of XDR buffer consumed
+ * %-EMSGSIZE on XDR buffer overflow
+ */
+static inline ssize_t
+xdr_stream_encode_be32(struct xdr_stream *xdr, __be32 n)
+{
+ const size_t len = sizeof(n);
+ __be32 *p = xdr_reserve_space(xdr, len);
+
+ if (unlikely(!p))
+ return -EMSGSIZE;
+ *p = n;
+ return len;
+}
+
+/**
* xdr_stream_encode_u64 - Encode a 64-bit integer
* @xdr: pointer to xdr_stream
* @n: 64-bit integer to encode
@@ -632,6 +673,27 @@ xdr_stream_decode_u32(struct xdr_stream *xdr, __u32 *ptr)
}
/**
+ * xdr_stream_decode_be32 - Decode a big-endian 32-bit integer
+ * @xdr: pointer to xdr_stream
+ * @ptr: location to store integer
+ *
+ * Return values:
+ * %0 on success
+ * %-EBADMSG on XDR buffer overflow
+ */
+static inline ssize_t
+xdr_stream_decode_be32(struct xdr_stream *xdr, __be32 *ptr)
+{
+ const size_t count = sizeof(*ptr);
+ __be32 *p = xdr_inline_decode(xdr, count);
+
+ if (unlikely(!p))
+ return -EBADMSG;
+ *ptr = *p;
+ return 0;
+}
+
+/**
* xdr_stream_decode_u64 - Decode a 64-bit integer
* @xdr: pointer to xdr_stream
* @ptr: location to store 64-bit integer
@@ -659,7 +721,7 @@ xdr_stream_decode_u64(struct xdr_stream *xdr, __u64 *ptr)
* @len: size of buffer pointed to by @ptr
*
* Return values:
- * On success, returns size of object stored in @ptr
+ * %0 on success
* %-EBADMSG on XDR buffer overflow
*/
static inline ssize_t
@@ -670,7 +732,7 @@ xdr_stream_decode_opaque_fixed(struct xdr_stream *xdr, void *ptr, size_t len)
if (unlikely(!p))
return -EBADMSG;
xdr_decode_opaque_fixed(p, ptr, len);
- return len;
+ return 0;
}
/**
@@ -730,6 +792,8 @@ xdr_stream_decode_uint32_array(struct xdr_stream *xdr,
if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0))
return -EBADMSG;
+ if (U32_MAX >= SIZE_MAX / sizeof(*p) && len > SIZE_MAX / sizeof(*p))
+ return -EBADMSG;
p = xdr_inline_decode(xdr, len * sizeof(*p));
if (unlikely(!p))
return -EBADMSG;
diff --git a/include/linux/sunrpc/xdrgen/_builtins.h b/include/linux/sunrpc/xdrgen/_builtins.h
new file mode 100644
index 000000000000..66ca3ece951a
--- /dev/null
+++ b/include/linux/sunrpc/xdrgen/_builtins.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Oracle and/or its affiliates.
+ *
+ * This header defines XDR data type primitives specified in
+ * Section 4 of RFC 4506, used by RPC programs implemented
+ * in the Linux kernel.
+ */
+
+#ifndef _SUNRPC_XDRGEN__BUILTINS_H_
+#define _SUNRPC_XDRGEN__BUILTINS_H_
+
+#include <linux/sunrpc/xdr.h>
+
+static inline bool
+xdrgen_decode_void(struct xdr_stream *xdr)
+{
+ return true;
+}
+
+static inline bool
+xdrgen_encode_void(struct xdr_stream *xdr)
+{
+ return true;
+}
+
+static inline bool
+xdrgen_decode_bool(struct xdr_stream *xdr, bool *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = (*p != xdr_zero);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_bool(struct xdr_stream *xdr, bool val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *p = val ? xdr_one : xdr_zero;
+ return true;
+}
+
+static inline bool
+xdrgen_decode_int(struct xdr_stream *xdr, s32 *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = be32_to_cpup(p);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_int(struct xdr_stream *xdr, s32 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *p = cpu_to_be32(val);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_unsigned_int(struct xdr_stream *xdr, u32 *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = be32_to_cpup(p);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_unsigned_int(struct xdr_stream *xdr, u32 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *p = cpu_to_be32(val);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_long(struct xdr_stream *xdr, s32 *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = be32_to_cpup(p);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_long(struct xdr_stream *xdr, s32 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *p = cpu_to_be32(val);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_unsigned_long(struct xdr_stream *xdr, u32 *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = be32_to_cpup(p);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_unsigned_long(struct xdr_stream *xdr, u32 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT);
+
+ if (unlikely(!p))
+ return false;
+ *p = cpu_to_be32(val);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_hyper(struct xdr_stream *xdr, s64 *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT * 2);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = get_unaligned_be64(p);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_hyper(struct xdr_stream *xdr, s64 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT * 2);
+
+ if (unlikely(!p))
+ return false;
+ put_unaligned_be64(val, p);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_unsigned_hyper(struct xdr_stream *xdr, u64 *ptr)
+{
+ __be32 *p = xdr_inline_decode(xdr, XDR_UNIT * 2);
+
+ if (unlikely(!p))
+ return false;
+ *ptr = get_unaligned_be64(p);
+ return true;
+}
+
+static inline bool
+xdrgen_encode_unsigned_hyper(struct xdr_stream *xdr, u64 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT * 2);
+
+ if (unlikely(!p))
+ return false;
+ put_unaligned_be64(val, p);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_string(struct xdr_stream *xdr, string *ptr, u32 maxlen)
+{
+ __be32 *p;
+ u32 len;
+
+ if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0))
+ return false;
+ if (unlikely(maxlen && len > maxlen))
+ return false;
+ if (len != 0) {
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ return false;
+ ptr->data = (unsigned char *)p;
+ }
+ ptr->len = len;
+ return true;
+}
+
+static inline bool
+xdrgen_encode_string(struct xdr_stream *xdr, string val, u32 maxlen)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT + xdr_align_size(val.len));
+
+ if (unlikely(!p))
+ return false;
+ xdr_encode_opaque(p, val.data, val.len);
+ return true;
+}
+
+static inline bool
+xdrgen_decode_opaque(struct xdr_stream *xdr, opaque *ptr, u32 maxlen)
+{
+ __be32 *p;
+ u32 len;
+
+ if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0))
+ return false;
+ if (unlikely(maxlen && len > maxlen))
+ return false;
+ if (len != 0) {
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ return false;
+ ptr->data = (u8 *)p;
+ }
+ ptr->len = len;
+ return true;
+}
+
+static inline bool
+xdrgen_encode_opaque(struct xdr_stream *xdr, opaque val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT + xdr_align_size(val.len));
+
+ if (unlikely(!p))
+ return false;
+ xdr_encode_opaque(p, val.data, val.len);
+ return true;
+}
+
+#endif /* _SUNRPC_XDRGEN__BUILTINS_H_ */
diff --git a/include/linux/sunrpc/xdrgen/_defs.h b/include/linux/sunrpc/xdrgen/_defs.h
new file mode 100644
index 000000000000..20c7270aa64d
--- /dev/null
+++ b/include/linux/sunrpc/xdrgen/_defs.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Oracle and/or its affiliates.
+ *
+ * This header defines XDR data type primitives specified in
+ * Section 4 of RFC 4506, used by RPC programs implemented
+ * in the Linux kernel.
+ */
+
+#ifndef _SUNRPC_XDRGEN__DEFS_H_
+#define _SUNRPC_XDRGEN__DEFS_H_
+
+#define TRUE (true)
+#define FALSE (false)
+
+typedef struct {
+ u32 len;
+ unsigned char *data;
+} string;
+
+typedef struct {
+ u32 len;
+ u8 *data;
+} opaque;
+
+#define XDR_void (0)
+#define XDR_bool (1)
+#define XDR_int (1)
+#define XDR_unsigned_int (1)
+#define XDR_long (1)
+#define XDR_unsigned_long (1)
+#define XDR_hyper (2)
+#define XDR_unsigned_hyper (2)
+
+#endif /* _SUNRPC_XDRGEN__DEFS_H_ */
diff --git a/include/linux/sunrpc/xdrgen/nfs4_1.h b/include/linux/sunrpc/xdrgen/nfs4_1.h
new file mode 100644
index 000000000000..cf21a14aa885
--- /dev/null
+++ b/include/linux/sunrpc/xdrgen/nfs4_1.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Generated by xdrgen. Manual edits will be lost. */
+/* XDR specification file: ../../Documentation/sunrpc/xdr/nfs4_1.x */
+/* XDR specification modification time: Mon Oct 14 09:10:13 2024 */
+
+#ifndef _LINUX_XDRGEN_NFS4_1_DEF_H
+#define _LINUX_XDRGEN_NFS4_1_DEF_H
+
+#include <linux/types.h>
+#include <linux/sunrpc/xdrgen/_defs.h>
+
+typedef s64 int64_t;
+
+typedef u32 uint32_t;
+
+typedef struct {
+ u32 count;
+ uint32_t *element;
+} bitmap4;
+
+struct nfstime4 {
+ int64_t seconds;
+ uint32_t nseconds;
+};
+
+typedef bool fattr4_offline;
+
+enum { FATTR4_OFFLINE = 83 };
+
+struct open_arguments4 {
+ bitmap4 oa_share_access;
+ bitmap4 oa_share_deny;
+ bitmap4 oa_share_access_want;
+ bitmap4 oa_open_claim;
+ bitmap4 oa_create_mode;
+};
+
+enum open_args_share_access4 {
+ OPEN_ARGS_SHARE_ACCESS_READ = 1,
+ OPEN_ARGS_SHARE_ACCESS_WRITE = 2,
+ OPEN_ARGS_SHARE_ACCESS_BOTH = 3,
+};
+typedef enum open_args_share_access4 open_args_share_access4;
+
+enum open_args_share_deny4 {
+ OPEN_ARGS_SHARE_DENY_NONE = 0,
+ OPEN_ARGS_SHARE_DENY_READ = 1,
+ OPEN_ARGS_SHARE_DENY_WRITE = 2,
+ OPEN_ARGS_SHARE_DENY_BOTH = 3,
+};
+typedef enum open_args_share_deny4 open_args_share_deny4;
+
+enum open_args_share_access_want4 {
+ OPEN_ARGS_SHARE_ACCESS_WANT_ANY_DELEG = 3,
+ OPEN_ARGS_SHARE_ACCESS_WANT_NO_DELEG = 4,
+ OPEN_ARGS_SHARE_ACCESS_WANT_CANCEL = 5,
+ OPEN_ARGS_SHARE_ACCESS_WANT_SIGNAL_DELEG_WHEN_RESRC_AVAIL = 17,
+ OPEN_ARGS_SHARE_ACCESS_WANT_PUSH_DELEG_WHEN_UNCONTENDED = 18,
+ OPEN_ARGS_SHARE_ACCESS_WANT_DELEG_TIMESTAMPS = 20,
+ OPEN_ARGS_SHARE_ACCESS_WANT_OPEN_XOR_DELEGATION = 21,
+};
+typedef enum open_args_share_access_want4 open_args_share_access_want4;
+
+enum open_args_open_claim4 {
+ OPEN_ARGS_OPEN_CLAIM_NULL = 0,
+ OPEN_ARGS_OPEN_CLAIM_PREVIOUS = 1,
+ OPEN_ARGS_OPEN_CLAIM_DELEGATE_CUR = 2,
+ OPEN_ARGS_OPEN_CLAIM_DELEGATE_PREV = 3,
+ OPEN_ARGS_OPEN_CLAIM_FH = 4,
+ OPEN_ARGS_OPEN_CLAIM_DELEG_CUR_FH = 5,
+ OPEN_ARGS_OPEN_CLAIM_DELEG_PREV_FH = 6,
+};
+typedef enum open_args_open_claim4 open_args_open_claim4;
+
+enum open_args_createmode4 {
+ OPEN_ARGS_CREATEMODE_UNCHECKED4 = 0,
+ OPEN_ARGS_CREATE_MODE_GUARDED = 1,
+ OPEN_ARGS_CREATEMODE_EXCLUSIVE4 = 2,
+ OPEN_ARGS_CREATE_MODE_EXCLUSIVE4_1 = 3,
+};
+typedef enum open_args_createmode4 open_args_createmode4;
+
+typedef struct open_arguments4 fattr4_open_arguments;
+
+enum { FATTR4_OPEN_ARGUMENTS = 86 };
+
+enum { OPEN4_RESULT_NO_OPEN_STATEID = 0x00000010 };
+
+typedef struct nfstime4 fattr4_time_deleg_access;
+
+typedef struct nfstime4 fattr4_time_deleg_modify;
+
+enum { FATTR4_TIME_DELEG_ACCESS = 84 };
+
+enum { FATTR4_TIME_DELEG_MODIFY = 85 };
+
+enum { OPEN4_SHARE_ACCESS_WANT_DELEG_MASK = 0xFF00 };
+
+enum { OPEN4_SHARE_ACCESS_WANT_NO_PREFERENCE = 0x0000 };
+
+enum { OPEN4_SHARE_ACCESS_WANT_READ_DELEG = 0x0100 };
+
+enum { OPEN4_SHARE_ACCESS_WANT_WRITE_DELEG = 0x0200 };
+
+enum { OPEN4_SHARE_ACCESS_WANT_ANY_DELEG = 0x0300 };
+
+enum { OPEN4_SHARE_ACCESS_WANT_NO_DELEG = 0x0400 };
+
+enum { OPEN4_SHARE_ACCESS_WANT_CANCEL = 0x0500 };
+
+enum { OPEN4_SHARE_ACCESS_WANT_SIGNAL_DELEG_WHEN_RESRC_AVAIL = 0x10000 };
+
+enum { OPEN4_SHARE_ACCESS_WANT_PUSH_DELEG_WHEN_UNCONTENDED = 0x20000 };
+
+enum { OPEN4_SHARE_ACCESS_WANT_DELEG_TIMESTAMPS = 0x100000 };
+
+enum { OPEN4_SHARE_ACCESS_WANT_OPEN_XOR_DELEGATION = 0x200000 };
+
+enum open_delegation_type4 {
+ OPEN_DELEGATE_NONE = 0,
+ OPEN_DELEGATE_READ = 1,
+ OPEN_DELEGATE_WRITE = 2,
+ OPEN_DELEGATE_NONE_EXT = 3,
+ OPEN_DELEGATE_READ_ATTRS_DELEG = 4,
+ OPEN_DELEGATE_WRITE_ATTRS_DELEG = 5,
+};
+typedef enum open_delegation_type4 open_delegation_type4;
+
+#define NFS4_int64_t_sz \
+ (XDR_hyper)
+#define NFS4_uint32_t_sz \
+ (XDR_unsigned_int)
+#define NFS4_bitmap4_sz (XDR_unsigned_int)
+#define NFS4_nfstime4_sz \
+ (NFS4_int64_t_sz + NFS4_uint32_t_sz)
+#define NFS4_fattr4_offline_sz \
+ (XDR_bool)
+#define NFS4_open_arguments4_sz \
+ (NFS4_bitmap4_sz + NFS4_bitmap4_sz + NFS4_bitmap4_sz + NFS4_bitmap4_sz + NFS4_bitmap4_sz)
+#define NFS4_open_args_share_access4_sz (XDR_int)
+#define NFS4_open_args_share_deny4_sz (XDR_int)
+#define NFS4_open_args_share_access_want4_sz (XDR_int)
+#define NFS4_open_args_open_claim4_sz (XDR_int)
+#define NFS4_open_args_createmode4_sz (XDR_int)
+#define NFS4_fattr4_open_arguments_sz \
+ (NFS4_open_arguments4_sz)
+#define NFS4_fattr4_time_deleg_access_sz \
+ (NFS4_nfstime4_sz)
+#define NFS4_fattr4_time_deleg_modify_sz \
+ (NFS4_nfstime4_sz)
+#define NFS4_open_delegation_type4_sz (XDR_int)
+
+#endif /* _LINUX_XDRGEN_NFS4_1_DEF_H */
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index d81fe8b364d0..f46d1fb8f71a 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -30,16 +30,7 @@
#define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT)
#define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
-/*
- * This describes a timeout strategy
- */
-struct rpc_timeout {
- unsigned long to_initval, /* initial timeout */
- to_maxval, /* max timeout */
- to_increment; /* if !exponential */
- unsigned int to_retries; /* max # of retries */
- unsigned char to_exponential;
-};
+#define RPC_GSS_SEQNO_ARRAY_SIZE 3U
enum rpc_display_format_t {
RPC_DISPLAY_ADDR = 0,
@@ -53,9 +44,11 @@ enum rpc_display_format_t {
struct rpc_task;
struct rpc_xprt;
+struct xprt_class;
struct seq_file;
struct svc_serv;
struct net;
+#include <linux/lwq.h>
/*
* This describes a complete RPC request
@@ -75,7 +68,8 @@ struct rpc_rqst {
struct rpc_cred * rq_cred; /* Bound cred */
__be32 rq_xid; /* request XID */
int rq_cong; /* has incremented xprt->cong */
- u32 rq_seqno; /* gss seq no. used on req. */
+ u32 rq_seqnos[RPC_GSS_SEQNO_ARRAY_SIZE]; /* past gss req seq nos. */
+ unsigned int rq_seqno_count; /* number of entries in rq_seqnos */
int rq_enc_pages_num;
struct page **rq_enc_pages; /* scratch pages for use by
gss privacy code */
@@ -120,7 +114,7 @@ struct rpc_rqst {
int rq_ntrans;
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
- struct list_head rq_bc_list; /* Callback service list */
+ struct lwq_node rq_bc_list; /* Callback service list */
unsigned long rq_bc_pa_state; /* Backchannel prealloc state */
struct list_head rq_bc_pa_list; /* Backchannel prealloc list */
#endif /* CONFIG_SUNRPC_BACKCHANEL */
@@ -128,6 +122,33 @@ struct rpc_rqst {
#define rq_svec rq_snd_buf.head
#define rq_slen rq_snd_buf.len
+static inline int xprt_rqst_add_seqno(struct rpc_rqst *req, u32 seqno)
+{
+ if (likely(req->rq_seqno_count < RPC_GSS_SEQNO_ARRAY_SIZE))
+ req->rq_seqno_count++;
+
+ /* Shift array to make room for the newest element at the beginning */
+ memmove(&req->rq_seqnos[1], &req->rq_seqnos[0],
+ (RPC_GSS_SEQNO_ARRAY_SIZE - 1) * sizeof(req->rq_seqnos[0]));
+ req->rq_seqnos[0] = seqno;
+ return 0;
+}
+
+/* RPC transport layer security policies */
+enum xprtsec_policies {
+ RPC_XPRTSEC_NONE = 0,
+ RPC_XPRTSEC_TLS_ANON,
+ RPC_XPRTSEC_TLS_X509,
+};
+
+struct xprtsec_parms {
+ enum xprtsec_policies policy;
+
+ /* authentication material */
+ key_serial_t cert_serial;
+ key_serial_t privkey_serial;
+};
+
struct rpc_xprt_ops {
void (*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize);
int (*reserve_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
@@ -138,10 +159,15 @@ struct rpc_xprt_ops {
void (*rpcbind)(struct rpc_task *task);
void (*set_port)(struct rpc_xprt *xprt, unsigned short port);
void (*connect)(struct rpc_xprt *xprt, struct rpc_task *task);
+ int (*get_srcaddr)(struct rpc_xprt *xprt, char *buf,
+ size_t buflen);
+ unsigned short (*get_srcport)(struct rpc_xprt *xprt);
int (*buf_alloc)(struct rpc_task *task);
void (*buf_free)(struct rpc_task *task);
- void (*prepare_request)(struct rpc_rqst *req);
+ int (*prepare_request)(struct rpc_rqst *req,
+ struct xdr_buf *buf);
int (*send_request)(struct rpc_rqst *req);
+ void (*abort_send_request)(struct rpc_rqst *req);
void (*wait_for_reply_request)(struct rpc_task *task);
void (*timer)(struct rpc_xprt *xprt, struct rpc_task *task);
void (*release_request)(struct rpc_task *task);
@@ -180,11 +206,14 @@ enum xprt_transports {
XPRT_TRANSPORT_RDMA = 256,
XPRT_TRANSPORT_BC_RDMA = XPRT_TRANSPORT_RDMA | XPRT_TRANSPORT_BC,
XPRT_TRANSPORT_LOCAL = 257,
+ XPRT_TRANSPORT_TCP_TLS = 258,
};
+struct rpc_sysfs_xprt;
struct rpc_xprt {
struct kref kref; /* Reference count */
const struct rpc_xprt_ops *ops; /* transport methods */
+ unsigned int id; /* transport id */
const struct rpc_timeout *timeout; /* timeout parms */
struct sockaddr_storage addr; /* server address */
@@ -222,6 +251,7 @@ struct rpc_xprt {
*/
unsigned long bind_timeout,
reestablish_timeout;
+ struct xprtsec_parms xprtsec;
unsigned int connect_cookie; /* A cookie that gets bumped
every time the transport
is reconnected */
@@ -281,13 +311,16 @@ struct rpc_xprt {
} stat;
struct net *xprt_net;
+ netns_tracker ns_tracker;
const char *servername;
const char *address_strings[RPC_DISPLAY_MAX];
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
struct dentry *debugfs; /* debugfs directory */
- atomic_t inject_disconnect;
#endif
struct rcu_head rcu;
+ const struct xprt_class *xprt_class;
+ struct rpc_sysfs_xprt *xprt_sysfs;
+ bool main; /*mark if this is the 1st transport */
};
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
@@ -323,6 +356,9 @@ struct xprt_create {
struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
struct rpc_xprt_switch *bc_xps;
unsigned int flags;
+ struct xprtsec_parms xprtsec;
+ unsigned long connect_timeout;
+ unsigned long reconnect_timeout;
};
struct xprt_class {
@@ -349,10 +385,9 @@ int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
void xprt_free_slot(struct rpc_xprt *xprt,
struct rpc_rqst *req);
-void xprt_request_prepare(struct rpc_rqst *req);
bool xprt_prepare_transmit(struct rpc_task *task);
void xprt_request_enqueue_transmit(struct rpc_task *task);
-void xprt_request_enqueue_receive(struct rpc_task *task);
+int xprt_request_enqueue_receive(struct rpc_task *task);
void xprt_request_wait_receive(struct rpc_task *task);
void xprt_request_dequeue_xprt(struct rpc_task *task);
bool xprt_request_need_retransmit(struct rpc_task *task);
@@ -368,6 +403,9 @@ struct rpc_xprt * xprt_alloc(struct net *net, size_t size,
unsigned int num_prealloc,
unsigned int max_req);
void xprt_free(struct rpc_xprt *);
+void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task);
+bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req);
+void xprt_cleanup_ids(void);
static inline int
xprt_enable_swap(struct rpc_xprt *xprt)
@@ -406,6 +444,7 @@ void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie);
bool xprt_lock_connect(struct rpc_xprt *, struct rpc_task *, void *);
void xprt_unlock_connect(struct rpc_xprt *, void *);
+void xprt_release_write(struct rpc_xprt *, struct rpc_task *);
/*
* Reserved bit positions in xprt->state
@@ -417,9 +456,12 @@ void xprt_unlock_connect(struct rpc_xprt *, void *);
#define XPRT_BOUND (4)
#define XPRT_BINDING (5)
#define XPRT_CLOSING (6)
+#define XPRT_OFFLINE (7)
+#define XPRT_REMOVE (8)
#define XPRT_CONGESTED (9)
#define XPRT_CWND_WAIT (10)
#define XPRT_WRITE_SPACE (11)
+#define XPRT_SND_IS_COOKIE (12)
static inline void xprt_set_connected(struct rpc_xprt *xprt)
{
@@ -490,21 +532,7 @@ static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt)
return test_and_set_bit(XPRT_BINDING, &xprt->state);
}
-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
-extern unsigned int rpc_inject_disconnect;
-static inline void xprt_inject_disconnect(struct rpc_xprt *xprt)
-{
- if (!rpc_inject_disconnect)
- return;
- if (atomic_dec_return(&xprt->inject_disconnect))
- return;
- atomic_set(&xprt->inject_disconnect, rpc_inject_disconnect);
- xprt->ops->inject_disconnect(xprt);
-}
-#else
-static inline void xprt_inject_disconnect(struct rpc_xprt *xprt)
-{
-}
-#endif
-
+void xprt_set_offline_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps);
+void xprt_set_online_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps);
+void xprt_delete_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps);
#endif /* _LINUX_SUNRPC_XPRT_H */
diff --git a/include/linux/sunrpc/xprtmultipath.h b/include/linux/sunrpc/xprtmultipath.h
index c6cce3fbf29d..e4db5022fe92 100644
--- a/include/linux/sunrpc/xprtmultipath.h
+++ b/include/linux/sunrpc/xprtmultipath.h
@@ -10,12 +10,15 @@
#define _NET_SUNRPC_XPRTMULTIPATH_H
struct rpc_xprt_iter_ops;
+struct rpc_sysfs_xprt_switch;
struct rpc_xprt_switch {
spinlock_t xps_lock;
struct kref xps_kref;
+ unsigned int xps_id;
unsigned int xps_nxprts;
unsigned int xps_nactive;
+ unsigned int xps_nunique_destaddr_xprts;
atomic_long_t xps_queuelen;
struct list_head xps_xprt_list;
@@ -23,6 +26,7 @@ struct rpc_xprt_switch {
const struct rpc_xprt_iter_ops *xps_iter_ops;
+ struct rpc_sysfs_xprt_switch *xps_sysfs;
struct rcu_head xps_rcu;
};
@@ -51,7 +55,8 @@ extern void rpc_xprt_switch_set_roundrobin(struct rpc_xprt_switch *xps);
extern void rpc_xprt_switch_add_xprt(struct rpc_xprt_switch *xps,
struct rpc_xprt *xprt);
extern void rpc_xprt_switch_remove_xprt(struct rpc_xprt_switch *xps,
- struct rpc_xprt *xprt);
+ struct rpc_xprt *xprt, bool offline);
+extern struct rpc_xprt *rpc_xprt_switch_get_main_xprt(struct rpc_xprt_switch *xps);
extern void xprt_iter_init(struct rpc_xprt_iter *xpi,
struct rpc_xprt_switch *xps);
@@ -59,16 +64,23 @@ extern void xprt_iter_init(struct rpc_xprt_iter *xpi,
extern void xprt_iter_init_listall(struct rpc_xprt_iter *xpi,
struct rpc_xprt_switch *xps);
+extern void xprt_iter_init_listoffline(struct rpc_xprt_iter *xpi,
+ struct rpc_xprt_switch *xps);
+
extern void xprt_iter_destroy(struct rpc_xprt_iter *xpi);
+extern void xprt_iter_rewind(struct rpc_xprt_iter *xpi);
+
extern struct rpc_xprt_switch *xprt_iter_xchg_switch(
struct rpc_xprt_iter *xpi,
struct rpc_xprt_switch *newswitch);
extern struct rpc_xprt *xprt_iter_xprt(struct rpc_xprt_iter *xpi);
-extern struct rpc_xprt *xprt_iter_get_xprt(struct rpc_xprt_iter *xpi);
extern struct rpc_xprt *xprt_iter_get_next(struct rpc_xprt_iter *xpi);
extern bool rpc_xprt_switch_has_addr(struct rpc_xprt_switch *xps,
const struct sockaddr *sap);
+
+extern void xprt_multipath_cleanup_ids(void);
+
#endif
diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h
index 3c1423ee74b4..700a1e6c047c 100644
--- a/include/linux/sunrpc/xprtsock.h
+++ b/include/linux/sunrpc/xprtsock.h
@@ -57,9 +57,11 @@ struct sock_xprt {
struct work_struct error_worker;
struct work_struct recv_worker;
struct mutex recv_mutex;
+ struct completion handshake_done;
struct sockaddr_storage srcaddr;
unsigned short srcport;
int xprt_err;
+ struct rpc_clnt *clnt;
/*
* UDP socket buffer size parameters
@@ -88,5 +90,8 @@ struct sock_xprt {
#define XPRT_SOCK_WAKE_WRITE (5)
#define XPRT_SOCK_WAKE_PENDING (6)
#define XPRT_SOCK_WAKE_DISCONNECT (7)
+#define XPRT_SOCK_CONNECT_SENT (8)
+#define XPRT_SOCK_NOSPACE (9)
+#define XPRT_SOCK_IGNORE_RECV (10)
#endif /* _LINUX_SUNRPC_XPRTSOCK_H */
diff --git a/include/linux/superhyway.h b/include/linux/superhyway.h
deleted file mode 100644
index 8d3376775813..000000000000
--- a/include/linux/superhyway.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * include/linux/superhyway.h
- *
- * SuperHyway Bus definitions
- *
- * Copyright (C) 2004, 2005 Paul Mundt <lethal@linux-sh.org>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#ifndef __LINUX_SUPERHYWAY_H
-#define __LINUX_SUPERHYWAY_H
-
-#include <linux/device.h>
-
-/*
- * SuperHyway IDs
- */
-#define SUPERHYWAY_DEVICE_ID_SH5_DMAC 0x0183
-
-struct superhyway_vcr_info {
- u8 perr_flags; /* P-port Error flags */
- u8 merr_flags; /* Module Error flags */
- u16 mod_vers; /* Module Version */
- u16 mod_id; /* Module ID */
- u8 bot_mb; /* Bottom Memory block */
- u8 top_mb; /* Top Memory block */
-};
-
-struct superhyway_ops {
- int (*read_vcr)(unsigned long base, struct superhyway_vcr_info *vcr);
- int (*write_vcr)(unsigned long base, struct superhyway_vcr_info vcr);
-};
-
-struct superhyway_bus {
- struct superhyway_ops *ops;
-};
-
-extern struct superhyway_bus superhyway_channels[];
-
-struct superhyway_device_id {
- unsigned int id;
- unsigned long driver_data;
-};
-
-struct superhyway_device;
-extern struct bus_type superhyway_bus_type;
-
-struct superhyway_driver {
- char *name;
-
- const struct superhyway_device_id *id_table;
- struct device_driver drv;
-
- int (*probe)(struct superhyway_device *dev, const struct superhyway_device_id *id);
- void (*remove)(struct superhyway_device *dev);
-};
-
-#define to_superhyway_driver(d) container_of((d), struct superhyway_driver, drv)
-
-struct superhyway_device {
- char name[32];
-
- struct device dev;
-
- struct superhyway_device_id id;
- struct superhyway_driver *drv;
- struct superhyway_bus *bus;
-
- int num_resources;
- struct resource *resource;
- struct superhyway_vcr_info vcr;
-};
-
-#define to_superhyway_device(d) container_of((d), struct superhyway_device, dev)
-
-#define superhyway_get_drvdata(d) dev_get_drvdata(&(d)->dev)
-#define superhyway_set_drvdata(d,p) dev_set_drvdata(&(d)->dev, (p))
-
-static inline int
-superhyway_read_vcr(struct superhyway_device *dev, unsigned long base,
- struct superhyway_vcr_info *vcr)
-{
- return dev->bus->ops->read_vcr(base, vcr);
-}
-
-static inline int
-superhyway_write_vcr(struct superhyway_device *dev, unsigned long base,
- struct superhyway_vcr_info vcr)
-{
- return dev->bus->ops->write_vcr(base, vcr);
-}
-
-extern int superhyway_scan_bus(struct superhyway_bus *);
-
-/* drivers/sh/superhyway/superhyway.c */
-int superhyway_register_driver(struct superhyway_driver *);
-void superhyway_unregister_driver(struct superhyway_driver *);
-int superhyway_add_device(unsigned long base, struct superhyway_device *, struct superhyway_bus *);
-int superhyway_add_devices(struct superhyway_bus *bus, struct superhyway_device **devices, int nr_devices);
-
-/* drivers/sh/superhyway/superhyway-sysfs.c */
-extern const struct attribute_group *superhyway_dev_groups[];
-
-#endif /* __LINUX_SUPERHYWAY_H */
-
diff --git a/include/linux/surface_aggregator/controller.h b/include/linux/surface_aggregator/controller.h
index 0806796eabcb..5b67f0f47d80 100644
--- a/include/linux/surface_aggregator/controller.h
+++ b/include/linux/surface_aggregator/controller.h
@@ -6,7 +6,7 @@
* managing access and communication to and from the SSAM EC, as well as main
* communication structures and definitions.
*
- * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
*/
#ifndef _LINUX_SURFACE_AGGREGATOR_CONTROLLER_H
@@ -44,7 +44,7 @@ struct ssam_event {
u8 command_id;
u8 instance_id;
u16 length;
- u8 data[];
+ u8 data[] __counted_by(length);
};
/**
@@ -207,17 +207,17 @@ static inline int ssam_request_sync_wait(struct ssam_request_sync *rqst)
return rqst->status;
}
-int ssam_request_sync(struct ssam_controller *ctrl,
- const struct ssam_request *spec,
- struct ssam_response *rsp);
+int ssam_request_do_sync(struct ssam_controller *ctrl,
+ const struct ssam_request *spec,
+ struct ssam_response *rsp);
-int ssam_request_sync_with_buffer(struct ssam_controller *ctrl,
- const struct ssam_request *spec,
- struct ssam_response *rsp,
- struct ssam_span *buf);
+int ssam_request_do_sync_with_buffer(struct ssam_controller *ctrl,
+ const struct ssam_request *spec,
+ struct ssam_response *rsp,
+ struct ssam_span *buf);
/**
- * ssam_request_sync_onstack - Execute a synchronous request on the stack.
+ * ssam_request_do_sync_onstack - Execute a synchronous request on the stack.
* @ctrl: The controller via which the request is submitted.
* @rqst: The request specification.
* @rsp: The response buffer.
@@ -227,7 +227,7 @@ int ssam_request_sync_with_buffer(struct ssam_controller *ctrl,
* fully initializes it via the provided request specification, submits it,
* and finally waits for its completion before returning its status. This
* helper macro essentially allocates the request message buffer on the stack
- * and then calls ssam_request_sync_with_buffer().
+ * and then calls ssam_request_do_sync_with_buffer().
*
* Note: The @payload_len parameter specifies the maximum payload length, used
* for buffer allocation. The actual payload length may be smaller.
@@ -235,12 +235,12 @@ int ssam_request_sync_with_buffer(struct ssam_controller *ctrl,
* Return: Returns the status of the request or any failure during setup, i.e.
* zero on success and a negative value on failure.
*/
-#define ssam_request_sync_onstack(ctrl, rqst, rsp, payload_len) \
+#define ssam_request_do_sync_onstack(ctrl, rqst, rsp, payload_len) \
({ \
u8 __data[SSH_COMMAND_MESSAGE_LENGTH(payload_len)]; \
struct ssam_span __buf = { &__data[0], ARRAY_SIZE(__data) }; \
\
- ssam_request_sync_with_buffer(ctrl, rqst, rsp, &__buf); \
+ ssam_request_do_sync_with_buffer(ctrl, rqst, rsp, &__buf); \
})
/**
@@ -349,7 +349,7 @@ struct ssam_request_spec_md {
* zero on success and negative on failure. The ``ctrl`` parameter is the
* controller via which the request is being sent.
*
- * Refer to ssam_request_sync_onstack() for more details on the behavior of
+ * Refer to ssam_request_do_sync_onstack() for more details on the behavior of
* the generated function.
*/
#define SSAM_DEFINE_SYNC_REQUEST_N(name, spec...) \
@@ -366,7 +366,7 @@ struct ssam_request_spec_md {
rqst.length = 0; \
rqst.payload = NULL; \
\
- return ssam_request_sync_onstack(ctrl, &rqst, NULL, 0); \
+ return ssam_request_do_sync_onstack(ctrl, &rqst, NULL, 0); \
}
/**
@@ -389,7 +389,7 @@ struct ssam_request_spec_md {
* parameter is the controller via which the request is sent. The request
* argument is specified via the ``arg`` pointer.
*
- * Refer to ssam_request_sync_onstack() for more details on the behavior of
+ * Refer to ssam_request_do_sync_onstack() for more details on the behavior of
* the generated function.
*/
#define SSAM_DEFINE_SYNC_REQUEST_W(name, atype, spec...) \
@@ -406,8 +406,8 @@ struct ssam_request_spec_md {
rqst.length = sizeof(atype); \
rqst.payload = (u8 *)arg; \
\
- return ssam_request_sync_onstack(ctrl, &rqst, NULL, \
- sizeof(atype)); \
+ return ssam_request_do_sync_onstack(ctrl, &rqst, NULL, \
+ sizeof(atype)); \
}
/**
@@ -430,7 +430,7 @@ struct ssam_request_spec_md {
* the controller via which the request is sent. The request's return value is
* written to the memory pointed to by the ``ret`` parameter.
*
- * Refer to ssam_request_sync_onstack() for more details on the behavior of
+ * Refer to ssam_request_do_sync_onstack() for more details on the behavior of
* the generated function.
*/
#define SSAM_DEFINE_SYNC_REQUEST_R(name, rtype, spec...) \
@@ -453,7 +453,68 @@ struct ssam_request_spec_md {
rsp.length = 0; \
rsp.pointer = (u8 *)ret; \
\
- status = ssam_request_sync_onstack(ctrl, &rqst, &rsp, 0); \
+ status = ssam_request_do_sync_onstack(ctrl, &rqst, &rsp, 0); \
+ if (status) \
+ return status; \
+ \
+ if (rsp.length != sizeof(rtype)) { \
+ struct device *dev = ssam_controller_device(ctrl); \
+ dev_err(dev, \
+ "rqst: invalid response length, expected %zu, got %zu (tc: %#04x, cid: %#04x)", \
+ sizeof(rtype), rsp.length, rqst.target_category,\
+ rqst.command_id); \
+ return -EIO; \
+ } \
+ \
+ return 0; \
+ }
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_WR() - Define synchronous SAM request function with
+ * both argument and return value.
+ * @name: Name of the generated function.
+ * @atype: Type of the request's argument.
+ * @rtype: Type of the request's return value.
+ * @spec: Specification (&struct ssam_request_spec) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by @spec,
+ * with the request taking an argument of type @atype and having a return value
+ * of type @rtype. The generated function takes care of setting up the request
+ * and response structs, buffer allocation, as well as execution of the request
+ * itself, returning once the request has been fully completed. The required
+ * transport buffer will be allocated on the stack.
+ *
+ * The generated function is defined as ``static int name(struct
+ * ssam_controller *ctrl, const atype *arg, rtype *ret)``, returning the status
+ * of the request, which is zero on success and negative on failure. The
+ * ``ctrl`` parameter is the controller via which the request is sent. The
+ * request argument is specified via the ``arg`` pointer. The request's return
+ * value is written to the memory pointed to by the ``ret`` parameter.
+ *
+ * Refer to ssam_request_do_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_WR(name, atype, rtype, spec...) \
+ static int name(struct ssam_controller *ctrl, const atype *arg, rtype *ret) \
+ { \
+ struct ssam_request_spec s = (struct ssam_request_spec)spec; \
+ struct ssam_request rqst; \
+ struct ssam_response rsp; \
+ int status; \
+ \
+ rqst.target_category = s.target_category; \
+ rqst.target_id = s.target_id; \
+ rqst.command_id = s.command_id; \
+ rqst.instance_id = s.instance_id; \
+ rqst.flags = s.flags | SSAM_REQUEST_HAS_RESPONSE; \
+ rqst.length = sizeof(atype); \
+ rqst.payload = (u8 *)arg; \
+ \
+ rsp.capacity = sizeof(rtype); \
+ rsp.length = 0; \
+ rsp.pointer = (u8 *)ret; \
+ \
+ status = ssam_request_do_sync_onstack(ctrl, &rqst, &rsp, sizeof(atype)); \
if (status) \
return status; \
\
@@ -489,7 +550,7 @@ struct ssam_request_spec_md {
* parameter is the controller via which the request is sent, ``tid`` the
* target ID for the request, and ``iid`` the instance ID.
*
- * Refer to ssam_request_sync_onstack() for more details on the behavior of
+ * Refer to ssam_request_do_sync_onstack() for more details on the behavior of
* the generated function.
*/
#define SSAM_DEFINE_SYNC_REQUEST_MD_N(name, spec...) \
@@ -506,7 +567,7 @@ struct ssam_request_spec_md {
rqst.length = 0; \
rqst.payload = NULL; \
\
- return ssam_request_sync_onstack(ctrl, &rqst, NULL, 0); \
+ return ssam_request_do_sync_onstack(ctrl, &rqst, NULL, 0); \
}
/**
@@ -531,7 +592,7 @@ struct ssam_request_spec_md {
* ``tid`` the target ID for the request, and ``iid`` the instance ID. The
* request argument is specified via the ``arg`` pointer.
*
- * Refer to ssam_request_sync_onstack() for more details on the behavior of
+ * Refer to ssam_request_do_sync_onstack() for more details on the behavior of
* the generated function.
*/
#define SSAM_DEFINE_SYNC_REQUEST_MD_W(name, atype, spec...) \
@@ -548,7 +609,7 @@ struct ssam_request_spec_md {
rqst.length = sizeof(atype); \
rqst.payload = (u8 *)arg; \
\
- return ssam_request_sync_onstack(ctrl, &rqst, NULL, \
+ return ssam_request_do_sync_onstack(ctrl, &rqst, NULL, \
sizeof(atype)); \
}
@@ -574,7 +635,7 @@ struct ssam_request_spec_md {
* the target ID for the request, and ``iid`` the instance ID. The request's
* return value is written to the memory pointed to by the ``ret`` parameter.
*
- * Refer to ssam_request_sync_onstack() for more details on the behavior of
+ * Refer to ssam_request_do_sync_onstack() for more details on the behavior of
* the generated function.
*/
#define SSAM_DEFINE_SYNC_REQUEST_MD_R(name, rtype, spec...) \
@@ -597,7 +658,71 @@ struct ssam_request_spec_md {
rsp.length = 0; \
rsp.pointer = (u8 *)ret; \
\
- status = ssam_request_sync_onstack(ctrl, &rqst, &rsp, 0); \
+ status = ssam_request_do_sync_onstack(ctrl, &rqst, &rsp, 0); \
+ if (status) \
+ return status; \
+ \
+ if (rsp.length != sizeof(rtype)) { \
+ struct device *dev = ssam_controller_device(ctrl); \
+ dev_err(dev, \
+ "rqst: invalid response length, expected %zu, got %zu (tc: %#04x, cid: %#04x)", \
+ sizeof(rtype), rsp.length, rqst.target_category,\
+ rqst.command_id); \
+ return -EIO; \
+ } \
+ \
+ return 0; \
+ }
+
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_MD_WR() - Define synchronous multi-device SAM
+ * request function with both argument and return value.
+ * @name: Name of the generated function.
+ * @atype: Type of the request's argument.
+ * @rtype: Type of the request's return value.
+ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by @spec,
+ * with the request taking an argument of type @atype and having a return value
+ * of type @rtype. Device specifying parameters are not hard-coded, but instead
+ * must be provided to the function. The generated function takes care of
+ * setting up the request and response structs, buffer allocation, as well as
+ * execution of the request itself, returning once the request has been fully
+ * completed. The required transport buffer will be allocated on the stack.
+ *
+ * The generated function is defined as ``static int name(struct
+ * ssam_controller *ctrl, u8 tid, u8 iid, const atype *arg, rtype *ret)``,
+ * returning the status of the request, which is zero on success and negative
+ * on failure. The ``ctrl`` parameter is the controller via which the request
+ * is sent, ``tid`` the target ID for the request, and ``iid`` the instance ID.
+ * The request argument is specified via the ``arg`` pointer. The request's
+ * return value is written to the memory pointed to by the ``ret`` parameter.
+ *
+ * Refer to ssam_request_do_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_MD_WR(name, atype, rtype, spec...) \
+ static int name(struct ssam_controller *ctrl, u8 tid, u8 iid, \
+ const atype *arg, rtype *ret) \
+ { \
+ struct ssam_request_spec_md s = (struct ssam_request_spec_md)spec; \
+ struct ssam_request rqst; \
+ struct ssam_response rsp; \
+ int status; \
+ \
+ rqst.target_category = s.target_category; \
+ rqst.target_id = tid; \
+ rqst.command_id = s.command_id; \
+ rqst.instance_id = iid; \
+ rqst.flags = s.flags | SSAM_REQUEST_HAS_RESPONSE; \
+ rqst.length = sizeof(atype); \
+ rqst.payload = (u8 *)arg; \
+ \
+ rsp.capacity = sizeof(rtype); \
+ rsp.length = 0; \
+ rsp.pointer = (u8 *)ret; \
+ \
+ status = ssam_request_do_sync_onstack(ctrl, &rqst, &rsp, sizeof(atype)); \
if (status) \
return status; \
\
@@ -787,13 +912,27 @@ enum ssam_event_mask {
})
#define SSAM_EVENT_REGISTRY_SAM \
- SSAM_EVENT_REGISTRY(SSAM_SSH_TC_SAM, 0x01, 0x0b, 0x0c)
+ SSAM_EVENT_REGISTRY(SSAM_SSH_TC_SAM, SSAM_SSH_TID_SAM, 0x0b, 0x0c)
#define SSAM_EVENT_REGISTRY_KIP \
- SSAM_EVENT_REGISTRY(SSAM_SSH_TC_KIP, 0x02, 0x27, 0x28)
+ SSAM_EVENT_REGISTRY(SSAM_SSH_TC_KIP, SSAM_SSH_TID_KIP, 0x27, 0x28)
-#define SSAM_EVENT_REGISTRY_REG \
- SSAM_EVENT_REGISTRY(SSAM_SSH_TC_REG, 0x02, 0x01, 0x02)
+#define SSAM_EVENT_REGISTRY_REG(tid)\
+ SSAM_EVENT_REGISTRY(SSAM_SSH_TC_REG, tid, 0x01, 0x02)
+
+/**
+ * enum ssam_event_notifier_flags - Flags for event notifiers.
+ * @SSAM_EVENT_NOTIFIER_OBSERVER:
+ * The corresponding notifier acts as observer. Registering a notifier
+ * with this flag set will not attempt to enable any event. Equally,
+ * unregistering will not attempt to disable any event. Note that a
+ * notifier with this flag may not even correspond to a certain event at
+ * all, only to a specific event target category. Event matching will not
+ * be influenced by this flag.
+ */
+enum ssam_event_notifier_flags {
+ SSAM_EVENT_NOTIFIER_OBSERVER = BIT(0),
+};
/**
* struct ssam_event_notifier - Notifier block for SSAM events.
@@ -803,6 +942,7 @@ enum ssam_event_mask {
* @event.id: ID specifying the event.
* @event.mask: Flags determining how events are matched to the notifier.
* @event.flags: Flags used for enabling the event.
+ * @flags: Notifier flags (see &enum ssam_event_notifier_flags).
*/
struct ssam_event_notifier {
struct ssam_notifier_block base;
@@ -813,12 +953,42 @@ struct ssam_event_notifier {
enum ssam_event_mask mask;
u8 flags;
} event;
+
+ unsigned long flags;
};
int ssam_notifier_register(struct ssam_controller *ctrl,
struct ssam_event_notifier *n);
-int ssam_notifier_unregister(struct ssam_controller *ctrl,
- struct ssam_event_notifier *n);
+int __ssam_notifier_unregister(struct ssam_controller *ctrl,
+ struct ssam_event_notifier *n, bool disable);
+
+/**
+ * ssam_notifier_unregister() - Unregister an event notifier.
+ * @ctrl: The controller the notifier has been registered on.
+ * @n: The event notifier to unregister.
+ *
+ * Unregister an event notifier. Decrement the usage counter of the associated
+ * SAM event if the notifier is not marked as an observer. If the usage counter
+ * reaches zero, the event will be disabled.
+ *
+ * Return: Returns zero on success, %-ENOENT if the given notifier block has
+ * not been registered on the controller. If the given notifier block was the
+ * last one associated with its specific event, returns the status of the
+ * event-disable EC-command.
+ */
+static inline int ssam_notifier_unregister(struct ssam_controller *ctrl,
+ struct ssam_event_notifier *n)
+{
+ return __ssam_notifier_unregister(ctrl, n, true);
+}
+
+int ssam_controller_event_enable(struct ssam_controller *ctrl,
+ struct ssam_event_registry reg,
+ struct ssam_event_id id, u8 flags);
+
+int ssam_controller_event_disable(struct ssam_controller *ctrl,
+ struct ssam_event_registry reg,
+ struct ssam_event_id id, u8 flags);
#endif /* _LINUX_SURFACE_AGGREGATOR_CONTROLLER_H */
diff --git a/include/linux/surface_aggregator/device.h b/include/linux/surface_aggregator/device.h
index 4441ad667c3f..8cd8c38cf3f3 100644
--- a/include/linux/surface_aggregator/device.h
+++ b/include/linux/surface_aggregator/device.h
@@ -7,7 +7,7 @@
* Provides support for non-platform/non-ACPI SSAM clients via dedicated
* subsystem.
*
- * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
*/
#ifndef _LINUX_SURFACE_AGGREGATOR_DEVICE_H
@@ -15,6 +15,7 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
+#include <linux/property.h>
#include <linux/types.h>
#include <linux/surface_aggregator/controller.h>
@@ -67,9 +68,9 @@ struct ssam_device_uid {
* match_flags member of the device ID structure. Do not use them directly
* with struct ssam_device_id or struct ssam_device_uid.
*/
-#define SSAM_ANY_TID 0xffff
-#define SSAM_ANY_IID 0xffff
-#define SSAM_ANY_FUN 0xffff
+#define SSAM_SSH_TID_ANY 0xffff
+#define SSAM_SSH_IID_ANY 0xffff
+#define SSAM_SSH_FUN_ANY 0xffff
/**
* SSAM_DEVICE() - Initialize a &struct ssam_device_id with the given
@@ -82,25 +83,25 @@ struct ssam_device_uid {
*
* Initializes a &struct ssam_device_id with the given parameters. See &struct
* ssam_device_uid for details regarding the parameters. The special values
- * %SSAM_ANY_TID, %SSAM_ANY_IID, and %SSAM_ANY_FUN can be used to specify that
+ * %SSAM_SSH_TID_ANY, %SSAM_SSH_IID_ANY, and %SSAM_SSH_FUN_ANY can be used to specify that
* matching should ignore target ID, instance ID, and/or sub-function,
* respectively. This macro initializes the ``match_flags`` field based on the
* given parameters.
*
* Note: The parameters @d and @cat must be valid &u8 values, the parameters
- * @tid, @iid, and @fun must be either valid &u8 values or %SSAM_ANY_TID,
- * %SSAM_ANY_IID, or %SSAM_ANY_FUN, respectively. Other non-&u8 values are not
+ * @tid, @iid, and @fun must be either valid &u8 values or %SSAM_SSH_TID_ANY,
+ * %SSAM_SSH_IID_ANY, or %SSAM_SSH_FUN_ANY, respectively. Other non-&u8 values are not
* allowed.
*/
#define SSAM_DEVICE(d, cat, tid, iid, fun) \
- .match_flags = (((tid) != SSAM_ANY_TID) ? SSAM_MATCH_TARGET : 0) \
- | (((iid) != SSAM_ANY_IID) ? SSAM_MATCH_INSTANCE : 0) \
- | (((fun) != SSAM_ANY_FUN) ? SSAM_MATCH_FUNCTION : 0), \
+ .match_flags = (((tid) != SSAM_SSH_TID_ANY) ? SSAM_MATCH_TARGET : 0) \
+ | (((iid) != SSAM_SSH_IID_ANY) ? SSAM_MATCH_INSTANCE : 0) \
+ | (((fun) != SSAM_SSH_FUN_ANY) ? SSAM_MATCH_FUNCTION : 0), \
.domain = d, \
.category = cat, \
- .target = ((tid) != SSAM_ANY_TID) ? (tid) : 0, \
- .instance = ((iid) != SSAM_ANY_IID) ? (iid) : 0, \
- .function = ((fun) != SSAM_ANY_FUN) ? (fun) : 0 \
+ .target = __builtin_choose_expr((tid) != SSAM_SSH_TID_ANY, (tid), 0), \
+ .instance = __builtin_choose_expr((iid) != SSAM_SSH_IID_ANY, (iid), 0), \
+ .function = __builtin_choose_expr((fun) != SSAM_SSH_FUN_ANY, (fun), 0)
/**
* SSAM_VDEV() - Initialize a &struct ssam_device_id as virtual device with
@@ -112,18 +113,18 @@ struct ssam_device_uid {
*
* Initializes a &struct ssam_device_id with the given parameters in the
* virtual domain. See &struct ssam_device_uid for details regarding the
- * parameters. The special values %SSAM_ANY_TID, %SSAM_ANY_IID, and
- * %SSAM_ANY_FUN can be used to specify that matching should ignore target ID,
+ * parameters. The special values %SSAM_SSH_TID_ANY, %SSAM_SSH_IID_ANY, and
+ * %SSAM_SSH_FUN_ANY can be used to specify that matching should ignore target ID,
* instance ID, and/or sub-function, respectively. This macro initializes the
* ``match_flags`` field based on the given parameters.
*
* Note: The parameter @cat must be a valid &u8 value, the parameters @tid,
- * @iid, and @fun must be either valid &u8 values or %SSAM_ANY_TID,
- * %SSAM_ANY_IID, or %SSAM_ANY_FUN, respectively. Other non-&u8 values are not
+ * @iid, and @fun must be either valid &u8 values or %SSAM_SSH_TID_ANY,
+ * %SSAM_SSH_IID_ANY, or %SSAM_SSH_FUN_ANY, respectively. Other non-&u8 values are not
* allowed.
*/
#define SSAM_VDEV(cat, tid, iid, fun) \
- SSAM_DEVICE(SSAM_DOMAIN_VIRTUAL, SSAM_VIRTUAL_TC_##cat, tid, iid, fun)
+ SSAM_DEVICE(SSAM_DOMAIN_VIRTUAL, SSAM_VIRTUAL_TC_##cat, SSAM_SSH_TID_##tid, iid, fun)
/**
* SSAM_SDEV() - Initialize a &struct ssam_device_id as physical SSH device
@@ -135,30 +136,43 @@ struct ssam_device_uid {
*
* Initializes a &struct ssam_device_id with the given parameters in the SSH
* domain. See &struct ssam_device_uid for details regarding the parameters.
- * The special values %SSAM_ANY_TID, %SSAM_ANY_IID, and %SSAM_ANY_FUN can be
- * used to specify that matching should ignore target ID, instance ID, and/or
- * sub-function, respectively. This macro initializes the ``match_flags``
- * field based on the given parameters.
+ * The special values %SSAM_SSH_TID_ANY, %SSAM_SSH_IID_ANY, and
+ * %SSAM_SSH_FUN_ANY can be used to specify that matching should ignore target
+ * ID, instance ID, and/or sub-function, respectively. This macro initializes
+ * the ``match_flags`` field based on the given parameters.
*
* Note: The parameter @cat must be a valid &u8 value, the parameters @tid,
- * @iid, and @fun must be either valid &u8 values or %SSAM_ANY_TID,
- * %SSAM_ANY_IID, or %SSAM_ANY_FUN, respectively. Other non-&u8 values are not
- * allowed.
+ * @iid, and @fun must be either valid &u8 values or %SSAM_SSH_TID_ANY,
+ * %SSAM_SSH_IID_ANY, or %SSAM_SSH_FUN_ANY, respectively. Other non-&u8 values
+ * are not allowed.
*/
#define SSAM_SDEV(cat, tid, iid, fun) \
- SSAM_DEVICE(SSAM_DOMAIN_SERIALHUB, SSAM_SSH_TC_##cat, tid, iid, fun)
+ SSAM_DEVICE(SSAM_DOMAIN_SERIALHUB, SSAM_SSH_TC_##cat, SSAM_SSH_TID_##tid, iid, fun)
+
+/*
+ * enum ssam_device_flags - Flags for SSAM client devices.
+ * @SSAM_DEVICE_HOT_REMOVED_BIT:
+ * The device has been hot-removed. Further communication with it may time
+ * out and should be avoided.
+ */
+enum ssam_device_flags {
+ SSAM_DEVICE_HOT_REMOVED_BIT = 0,
+};
/**
* struct ssam_device - SSAM client device.
- * @dev: Driver model representation of the device.
- * @ctrl: SSAM controller managing this device.
- * @uid: UID identifying the device.
+ * @dev: Driver model representation of the device.
+ * @ctrl: SSAM controller managing this device.
+ * @uid: UID identifying the device.
+ * @flags: Device state flags, see &enum ssam_device_flags.
*/
struct ssam_device {
struct device dev;
struct ssam_controller *ctrl;
struct ssam_device_uid uid;
+
+ unsigned long flags;
};
/**
@@ -177,7 +191,8 @@ struct ssam_device_driver {
void (*remove)(struct ssam_device *sdev);
};
-extern struct bus_type ssam_bus_type;
+#ifdef CONFIG_SURFACE_AGGREGATOR_BUS
+
extern const struct device_type ssam_device_type;
/**
@@ -193,6 +208,15 @@ static inline bool is_ssam_device(struct device *d)
return d->type == &ssam_device_type;
}
+#else /* CONFIG_SURFACE_AGGREGATOR_BUS */
+
+static inline bool is_ssam_device(struct device *d)
+{
+ return false;
+}
+
+#endif /* CONFIG_SURFACE_AGGREGATOR_BUS */
+
/**
* to_ssam_device() - Casts the given device to a SSAM client device.
* @d: The device to cast.
@@ -204,10 +228,7 @@ static inline bool is_ssam_device(struct device *d)
* Return: Returns a pointer to the &struct ssam_device wrapping the given
* device @d.
*/
-static inline struct ssam_device *to_ssam_device(struct device *d)
-{
- return container_of(d, struct ssam_device, dev);
-}
+#define to_ssam_device(d) container_of_const(d, struct ssam_device, dev)
/**
* to_ssam_device_driver() - Casts the given device driver to a SSAM client
@@ -221,11 +242,7 @@ static inline struct ssam_device *to_ssam_device(struct device *d)
* Return: Returns the pointer to the &struct ssam_device_driver wrapping the
* given device driver @d.
*/
-static inline
-struct ssam_device_driver *to_ssam_device_driver(struct device_driver *d)
-{
- return container_of(d, struct ssam_device_driver, driver);
-}
+#define to_ssam_device_driver(d) container_of_const(d, struct ssam_device_driver, driver)
const struct ssam_device_id *ssam_device_id_match(const struct ssam_device_id *table,
const struct ssam_device_uid uid);
@@ -241,6 +258,35 @@ int ssam_device_add(struct ssam_device *sdev);
void ssam_device_remove(struct ssam_device *sdev);
/**
+ * ssam_device_mark_hot_removed() - Mark the given device as hot-removed.
+ * @sdev: The device to mark as hot-removed.
+ *
+ * Mark the device as having been hot-removed. This signals drivers using the
+ * device that communication with the device should be avoided and may lead to
+ * timeouts.
+ */
+static inline void ssam_device_mark_hot_removed(struct ssam_device *sdev)
+{
+ dev_dbg(&sdev->dev, "marking device as hot-removed\n");
+ set_bit(SSAM_DEVICE_HOT_REMOVED_BIT, &sdev->flags);
+}
+
+/**
+ * ssam_device_is_hot_removed() - Check if the given device has been
+ * hot-removed.
+ * @sdev: The device to check.
+ *
+ * Checks if the given device has been marked as hot-removed. See
+ * ssam_device_mark_hot_removed() for more details.
+ *
+ * Return: Returns ``true`` if the device has been marked as hot-removed.
+ */
+static inline bool ssam_device_is_hot_removed(struct ssam_device *sdev)
+{
+ return test_bit(SSAM_DEVICE_HOT_REMOVED_BIT, &sdev->flags);
+}
+
+/**
* ssam_device_get() - Increment reference count of SSAM client device.
* @sdev: The device to increment the reference count of.
*
@@ -319,6 +365,66 @@ void ssam_device_driver_unregister(struct ssam_device_driver *d);
ssam_device_driver_unregister)
+/* -- Helpers for controller and hub devices. ------------------------------- */
+
+#ifdef CONFIG_SURFACE_AGGREGATOR_BUS
+
+int __ssam_register_clients(struct device *parent, struct ssam_controller *ctrl,
+ struct fwnode_handle *node);
+void ssam_remove_clients(struct device *dev);
+
+#else /* CONFIG_SURFACE_AGGREGATOR_BUS */
+
+static inline int __ssam_register_clients(struct device *parent, struct ssam_controller *ctrl,
+ struct fwnode_handle *node)
+{
+ return 0;
+}
+
+static inline void ssam_remove_clients(struct device *dev) {}
+
+#endif /* CONFIG_SURFACE_AGGREGATOR_BUS */
+
+/**
+ * ssam_register_clients() - Register all client devices defined under the
+ * given parent device.
+ * @dev: The parent device under which clients should be registered.
+ * @ctrl: The controller with which client should be registered.
+ *
+ * Register all clients that have via firmware nodes been defined as children
+ * of the given (parent) device. The respective child firmware nodes will be
+ * associated with the correspondingly created child devices.
+ *
+ * The given controller will be used to instantiate the new devices. See
+ * ssam_device_add() for details.
+ *
+ * Return: Returns zero on success, nonzero on failure.
+ */
+static inline int ssam_register_clients(struct device *dev, struct ssam_controller *ctrl)
+{
+ return __ssam_register_clients(dev, ctrl, dev_fwnode(dev));
+}
+
+/**
+ * ssam_device_register_clients() - Register all client devices defined under
+ * the given SSAM parent device.
+ * @sdev: The parent device under which clients should be registered.
+ *
+ * Register all clients that have via firmware nodes been defined as children
+ * of the given (parent) device. The respective child firmware nodes will be
+ * associated with the correspondingly created child devices.
+ *
+ * The controller used by the parent device will be used to instantiate the new
+ * devices. See ssam_device_add() for details.
+ *
+ * Return: Returns zero on success, nonzero on failure.
+ */
+static inline int ssam_device_register_clients(struct ssam_device *sdev)
+{
+ return ssam_register_clients(&sdev->dev, sdev->ctrl);
+}
+
+
/* -- Helpers for client-device requests. ----------------------------------- */
/**
@@ -342,7 +448,7 @@ void ssam_device_driver_unregister(struct ssam_device_driver *d);
* device of the request and by association the controller via which the
* request is sent.
*
- * Refer to ssam_request_sync_onstack() for more details on the behavior of
+ * Refer to ssam_request_do_sync_onstack() for more details on the behavior of
* the generated function.
*/
#define SSAM_DEFINE_SYNC_REQUEST_CL_N(name, spec...) \
@@ -376,7 +482,7 @@ void ssam_device_driver_unregister(struct ssam_device_driver *d);
* which the request is sent. The request's argument is specified via the
* ``arg`` pointer.
*
- * Refer to ssam_request_sync_onstack() for more details on the behavior of
+ * Refer to ssam_request_do_sync_onstack() for more details on the behavior of
* the generated function.
*/
#define SSAM_DEFINE_SYNC_REQUEST_CL_W(name, atype, spec...) \
@@ -410,7 +516,7 @@ void ssam_device_driver_unregister(struct ssam_device_driver *d);
* the request is sent. The request's return value is written to the memory
* pointed to by the ``ret`` parameter.
*
- * Refer to ssam_request_sync_onstack() for more details on the behavior of
+ * Refer to ssam_request_do_sync_onstack() for more details on the behavior of
* the generated function.
*/
#define SSAM_DEFINE_SYNC_REQUEST_CL_R(name, rtype, spec...) \
@@ -421,4 +527,106 @@ void ssam_device_driver_unregister(struct ssam_device_driver *d);
sdev->uid.instance, ret); \
}
+/**
+ * SSAM_DEFINE_SYNC_REQUEST_CL_WR() - Define synchronous client-device SAM
+ * request function with argument and return value.
+ * @name: Name of the generated function.
+ * @atype: Type of the request's argument.
+ * @rtype: Type of the request's return value.
+ * @spec: Specification (&struct ssam_request_spec_md) defining the request.
+ *
+ * Defines a function executing the synchronous SAM request specified by @spec,
+ * with the request taking an argument of type @atype and having a return value
+ * of type @rtype. Device specifying parameters are not hard-coded, but instead
+ * are provided via the client device, specifically its UID, supplied when
+ * calling this function. The generated function takes care of setting up the
+ * request struct, buffer allocation, as well as execution of the request
+ * itself, returning once the request has been fully completed. The required
+ * transport buffer will be allocated on the stack.
+ *
+ * The generated function is defined as ``static int name(struct ssam_device
+ * *sdev, const atype *arg, rtype *ret)``, returning the status of the request,
+ * which is zero on success and negative on failure. The ``sdev`` parameter
+ * specifies both the target device of the request and by association the
+ * controller via which the request is sent. The request's argument is
+ * specified via the ``arg`` pointer. The request's return value is written to
+ * the memory pointed to by the ``ret`` parameter.
+ *
+ * Refer to ssam_request_do_sync_onstack() for more details on the behavior of
+ * the generated function.
+ */
+#define SSAM_DEFINE_SYNC_REQUEST_CL_WR(name, atype, rtype, spec...) \
+ SSAM_DEFINE_SYNC_REQUEST_MD_WR(__raw_##name, atype, rtype, spec) \
+ static int name(struct ssam_device *sdev, const atype *arg, rtype *ret) \
+ { \
+ return __raw_##name(sdev->ctrl, sdev->uid.target, \
+ sdev->uid.instance, arg, ret); \
+ }
+
+
+/* -- Helpers for client-device notifiers. ---------------------------------- */
+
+/**
+ * ssam_device_notifier_register() - Register an event notifier for the
+ * specified client device.
+ * @sdev: The device the notifier should be registered on.
+ * @n: The event notifier to register.
+ *
+ * Register an event notifier. Increment the usage counter of the associated
+ * SAM event if the notifier is not marked as an observer. If the event is not
+ * marked as an observer and is currently not enabled, it will be enabled
+ * during this call. If the notifier is marked as an observer, no attempt will
+ * be made at enabling any event and no reference count will be modified.
+ *
+ * Notifiers marked as observers do not need to be associated with one specific
+ * event, i.e. as long as no event matching is performed, only the event target
+ * category needs to be set.
+ *
+ * Return: Returns zero on success, %-ENOSPC if there have already been
+ * %INT_MAX notifiers for the event ID/type associated with the notifier block
+ * registered, %-ENOMEM if the corresponding event entry could not be
+ * allocated, %-ENODEV if the device is marked as hot-removed. If this is the
+ * first time that a notifier block is registered for the specific associated
+ * event, returns the status of the event-enable EC-command.
+ */
+static inline int ssam_device_notifier_register(struct ssam_device *sdev,
+ struct ssam_event_notifier *n)
+{
+ /*
+ * Note that this check does not provide any guarantees whatsoever as
+ * hot-removal could happen at any point and we can't protect against
+ * it. Nevertheless, if we can detect hot-removal, bail early to avoid
+ * communication timeouts.
+ */
+ if (ssam_device_is_hot_removed(sdev))
+ return -ENODEV;
+
+ return ssam_notifier_register(sdev->ctrl, n);
+}
+
+/**
+ * ssam_device_notifier_unregister() - Unregister an event notifier for the
+ * specified client device.
+ * @sdev: The device the notifier has been registered on.
+ * @n: The event notifier to unregister.
+ *
+ * Unregister an event notifier. Decrement the usage counter of the associated
+ * SAM event if the notifier is not marked as an observer. If the usage counter
+ * reaches zero, the event will be disabled.
+ *
+ * In case the device has been marked as hot-removed, the event will not be
+ * disabled on the EC, as in those cases any attempt at doing so may time out.
+ *
+ * Return: Returns zero on success, %-ENOENT if the given notifier block has
+ * not been registered on the controller. If the given notifier block was the
+ * last one associated with its specific event, returns the status of the
+ * event-disable EC-command.
+ */
+static inline int ssam_device_notifier_unregister(struct ssam_device *sdev,
+ struct ssam_event_notifier *n)
+{
+ return __ssam_notifier_unregister(sdev->ctrl, n,
+ !ssam_device_is_hot_removed(sdev));
+}
+
#endif /* _LINUX_SURFACE_AGGREGATOR_DEVICE_H */
diff --git a/include/linux/surface_aggregator/serial_hub.h b/include/linux/surface_aggregator/serial_hub.h
index 64276fbfa1d5..d8dbef6b7fc2 100644
--- a/include/linux/surface_aggregator/serial_hub.h
+++ b/include/linux/surface_aggregator/serial_hub.h
@@ -6,13 +6,13 @@
* Surface System Aggregator Module (SSAM). Provides the interface for basic
* packet- and request-based communication with the SSAM EC via SSH.
*
- * Copyright (C) 2019-2020 Maximilian Luz <luzmaximilian@gmail.com>
+ * Copyright (C) 2019-2021 Maximilian Luz <luzmaximilian@gmail.com>
*/
#ifndef _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H
#define _LINUX_SURFACE_AGGREGATOR_SERIAL_HUB_H
-#include <linux/crc-ccitt.h>
+#include <linux/crc-itu-t.h>
#include <linux/kref.h>
#include <linux/ktime.h>
#include <linux/list.h>
@@ -83,23 +83,21 @@ enum ssh_payload_type {
/**
* struct ssh_command - Payload of a command-type frame.
- * @type: The type of the payload. See &enum ssh_payload_type. Should be
- * SSH_PLD_TYPE_CMD for this struct.
- * @tc: Command target category.
- * @tid_out: Output target ID. Should be zero if this an incoming (EC to host)
- * message.
- * @tid_in: Input target ID. Should be zero if this is an outgoing (host to
- * EC) message.
- * @iid: Instance ID.
- * @rqid: Request ID. Used to match requests with responses and differentiate
- * between responses and events.
- * @cid: Command ID.
+ * @type: The type of the payload. See &enum ssh_payload_type. Should be
+ * SSH_PLD_TYPE_CMD for this struct.
+ * @tc: Command target category.
+ * @tid: Target ID. Indicates the target of the message.
+ * @sid: Source ID. Indicates the source of the message.
+ * @iid: Instance ID.
+ * @rqid: Request ID. Used to match requests with responses and differentiate
+ * between responses and events.
+ * @cid: Command ID.
*/
struct ssh_command {
u8 type;
u8 tc;
- u8 tid_out;
- u8 tid_in;
+ u8 tid;
+ u8 sid;
u8 iid;
__le16 rqid;
u8 cid;
@@ -190,7 +188,7 @@ static_assert(sizeof(struct ssh_command) == 8);
*/
static inline u16 ssh_crc(const u8 *buf, size_t len)
{
- return crc_ccitt_false(0xffff, buf, len);
+ return crc_itu_t(0xffff, buf, len);
}
/*
@@ -201,7 +199,7 @@ static inline u16 ssh_crc(const u8 *buf, size_t len)
* exception of zero, which is not an event ID. Thus, this is also the
* absolute maximum number of event handlers that can be registered.
*/
-#define SSH_NUM_EVENTS 34
+#define SSH_NUM_EVENTS 38
/*
* SSH_NUM_TARGETS - The number of communication targets used in the protocol.
@@ -280,6 +278,22 @@ struct ssam_span {
size_t len;
};
+/**
+ * enum ssam_ssh_tid - Target/source IDs for Serial Hub messages.
+ * @SSAM_SSH_TID_HOST: We as the kernel Serial Hub driver.
+ * @SSAM_SSH_TID_SAM: The Surface Aggregator EC.
+ * @SSAM_SSH_TID_KIP: Keyboard and perihperal controller.
+ * @SSAM_SSH_TID_DEBUG: Debug connector.
+ * @SSAM_SSH_TID_SURFLINK: SurfLink connector.
+ */
+enum ssam_ssh_tid {
+ SSAM_SSH_TID_HOST = 0x00,
+ SSAM_SSH_TID_SAM = 0x01,
+ SSAM_SSH_TID_KIP = 0x02,
+ SSAM_SSH_TID_DEBUG = 0x03,
+ SSAM_SSH_TID_SURFLINK = 0x04,
+};
+
/*
* Known SSH/EC target categories.
*
@@ -292,40 +306,45 @@ struct ssam_span {
* Windows driver.
*/
enum ssam_ssh_tc {
- /* Category 0x00 is invalid for EC use. */
- SSAM_SSH_TC_SAM = 0x01, /* Generic system functionality, real-time clock. */
- SSAM_SSH_TC_BAT = 0x02, /* Battery/power subsystem. */
- SSAM_SSH_TC_TMP = 0x03, /* Thermal subsystem. */
- SSAM_SSH_TC_PMC = 0x04,
- SSAM_SSH_TC_FAN = 0x05,
- SSAM_SSH_TC_PoM = 0x06,
- SSAM_SSH_TC_DBG = 0x07,
- SSAM_SSH_TC_KBD = 0x08, /* Legacy keyboard (Laptop 1/2). */
- SSAM_SSH_TC_FWU = 0x09,
- SSAM_SSH_TC_UNI = 0x0a,
- SSAM_SSH_TC_LPC = 0x0b,
- SSAM_SSH_TC_TCL = 0x0c,
- SSAM_SSH_TC_SFL = 0x0d,
- SSAM_SSH_TC_KIP = 0x0e,
- SSAM_SSH_TC_EXT = 0x0f,
- SSAM_SSH_TC_BLD = 0x10,
- SSAM_SSH_TC_BAS = 0x11, /* Detachment system (Surface Book 2/3). */
- SSAM_SSH_TC_SEN = 0x12,
- SSAM_SSH_TC_SRQ = 0x13,
- SSAM_SSH_TC_MCU = 0x14,
- SSAM_SSH_TC_HID = 0x15, /* Generic HID input subsystem. */
- SSAM_SSH_TC_TCH = 0x16,
- SSAM_SSH_TC_BKL = 0x17,
- SSAM_SSH_TC_TAM = 0x18,
- SSAM_SSH_TC_ACC = 0x19,
- SSAM_SSH_TC_UFI = 0x1a,
- SSAM_SSH_TC_USC = 0x1b,
- SSAM_SSH_TC_PEN = 0x1c,
- SSAM_SSH_TC_VID = 0x1d,
- SSAM_SSH_TC_AUD = 0x1e,
- SSAM_SSH_TC_SMC = 0x1f,
- SSAM_SSH_TC_KPD = 0x20,
- SSAM_SSH_TC_REG = 0x21, /* Extended event registry. */
+ /* Category 0x00 is invalid for EC use. */
+ SSAM_SSH_TC_SAM = 0x01, /* Generic system functionality, real-time clock. */
+ SSAM_SSH_TC_BAT = 0x02, /* Battery/power subsystem. */
+ SSAM_SSH_TC_TMP = 0x03, /* Thermal subsystem. */
+ SSAM_SSH_TC_PMC = 0x04,
+ SSAM_SSH_TC_FAN = 0x05,
+ SSAM_SSH_TC_PoM = 0x06,
+ SSAM_SSH_TC_DBG = 0x07,
+ SSAM_SSH_TC_KBD = 0x08, /* Legacy keyboard (Laptop 1/2). */
+ SSAM_SSH_TC_FWU = 0x09,
+ SSAM_SSH_TC_UNI = 0x0a,
+ SSAM_SSH_TC_LPC = 0x0b,
+ SSAM_SSH_TC_TCL = 0x0c,
+ SSAM_SSH_TC_SFL = 0x0d,
+ SSAM_SSH_TC_KIP = 0x0e, /* Manages detachable peripherals (Pro X/8 keyboard cover) */
+ SSAM_SSH_TC_EXT = 0x0f,
+ SSAM_SSH_TC_BLD = 0x10,
+ SSAM_SSH_TC_BAS = 0x11, /* Detachment system (Surface Book 2/3). */
+ SSAM_SSH_TC_SEN = 0x12,
+ SSAM_SSH_TC_SRQ = 0x13,
+ SSAM_SSH_TC_MCU = 0x14,
+ SSAM_SSH_TC_HID = 0x15, /* Generic HID input subsystem. */
+ SSAM_SSH_TC_TCH = 0x16,
+ SSAM_SSH_TC_BKL = 0x17,
+ SSAM_SSH_TC_TAM = 0x18,
+ SSAM_SSH_TC_ACC0 = 0x19,
+ SSAM_SSH_TC_UFI = 0x1a,
+ SSAM_SSH_TC_USC = 0x1b,
+ SSAM_SSH_TC_PEN = 0x1c,
+ SSAM_SSH_TC_VID = 0x1d,
+ SSAM_SSH_TC_AUD = 0x1e,
+ SSAM_SSH_TC_SMC = 0x1f,
+ SSAM_SSH_TC_KPD = 0x20,
+ SSAM_SSH_TC_REG = 0x21, /* Extended event registry. */
+ SSAM_SSH_TC_SPT = 0x22,
+ SSAM_SSH_TC_SYS = 0x23,
+ SSAM_SSH_TC_ACC1 = 0x24,
+ SSAM_SSH_TC_SHB = 0x25,
+ SSAM_SSH_TC_POS = 0x26, /* For obtaining Laptop Studio screen position. */
};
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 8af13ba60c7e..b02876f1ae38 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -40,62 +40,6 @@ typedef int __bitwise suspend_state_t;
#define PM_SUSPEND_MIN PM_SUSPEND_TO_IDLE
#define PM_SUSPEND_MAX ((__force suspend_state_t) 4)
-enum suspend_stat_step {
- SUSPEND_FREEZE = 1,
- SUSPEND_PREPARE,
- SUSPEND_SUSPEND,
- SUSPEND_SUSPEND_LATE,
- SUSPEND_SUSPEND_NOIRQ,
- SUSPEND_RESUME_NOIRQ,
- SUSPEND_RESUME_EARLY,
- SUSPEND_RESUME
-};
-
-struct suspend_stats {
- int success;
- int fail;
- int failed_freeze;
- int failed_prepare;
- int failed_suspend;
- int failed_suspend_late;
- int failed_suspend_noirq;
- int failed_resume;
- int failed_resume_early;
- int failed_resume_noirq;
-#define REC_FAILED_NUM 2
- int last_failed_dev;
- char failed_devs[REC_FAILED_NUM][40];
- int last_failed_errno;
- int errno[REC_FAILED_NUM];
- int last_failed_step;
- enum suspend_stat_step failed_steps[REC_FAILED_NUM];
-};
-
-extern struct suspend_stats suspend_stats;
-
-static inline void dpm_save_failed_dev(const char *name)
-{
- strlcpy(suspend_stats.failed_devs[suspend_stats.last_failed_dev],
- name,
- sizeof(suspend_stats.failed_devs[0]));
- suspend_stats.last_failed_dev++;
- suspend_stats.last_failed_dev %= REC_FAILED_NUM;
-}
-
-static inline void dpm_save_failed_errno(int err)
-{
- suspend_stats.errno[suspend_stats.last_failed_errno] = err;
- suspend_stats.last_failed_errno++;
- suspend_stats.last_failed_errno %= REC_FAILED_NUM;
-}
-
-static inline void dpm_save_failed_step(enum suspend_stat_step step)
-{
- suspend_stats.failed_steps[suspend_stats.last_failed_step] = step;
- suspend_stats.last_failed_step++;
- suspend_stats.last_failed_step %= REC_FAILED_NUM;
-}
-
/**
* struct platform_suspend_ops - Callbacks for managing platform dependent
* system sleep states.
@@ -191,6 +135,7 @@ struct platform_s2idle_ops {
int (*begin)(void);
int (*prepare)(void);
int (*prepare_late)(void);
+ void (*check)(void);
bool (*wake)(void);
void (*restore_early)(void);
void (*restore)(void);
@@ -198,6 +143,7 @@ struct platform_s2idle_ops {
};
#ifdef CONFIG_SUSPEND
+extern suspend_state_t pm_suspend_target_state;
extern suspend_state_t mem_sleep_current;
extern suspend_state_t mem_sleep_default;
@@ -333,6 +279,8 @@ extern bool sync_on_suspend_enabled;
#else /* !CONFIG_SUSPEND */
#define suspend_valid_only_mem NULL
+#define pm_suspend_target_state (PM_SUSPEND_ON)
+
static inline void pm_suspend_clear_flags(void) {}
static inline void pm_set_suspend_via_firmware(void) {}
static inline void pm_set_resume_via_firmware(void) {}
@@ -350,6 +298,11 @@ static inline void s2idle_set_ops(const struct platform_s2idle_ops *ops) {}
static inline void s2idle_wake(void) {}
#endif /* !CONFIG_SUSPEND */
+static inline bool pm_suspend_in_progress(void)
+{
+ return pm_suspend_target_state != PM_SUSPEND_ON;
+}
+
/* struct pbe is used for creating lists of pages that should be restored
* atomically during the resume from disk, because the page frames they have
* occupied before the suspend are in use.
@@ -360,9 +313,6 @@ struct pbe {
struct pbe *next;
};
-/* mm/page_alloc.c */
-extern void mark_free_pages(struct zone *zone);
-
/**
* struct platform_hibernation_ops - hibernation platform support
*
@@ -430,15 +380,7 @@ struct platform_hibernation_ops {
#ifdef CONFIG_HIBERNATION
/* kernel/power/snapshot.c */
-extern void __register_nosave_region(unsigned long b, unsigned long e, int km);
-static inline void __init register_nosave_region(unsigned long b, unsigned long e)
-{
- __register_nosave_region(b, e, 0);
-}
-static inline void __init register_nosave_region_late(unsigned long b, unsigned long e)
-{
- __register_nosave_region(b, e, 1);
-}
+extern void register_nosave_region(unsigned long b, unsigned long e);
extern int swsusp_page_is_forbidden(struct page *);
extern void swsusp_set_page_free(struct page *);
extern void swsusp_unset_page_free(struct page *);
@@ -446,6 +388,7 @@ extern unsigned long get_safe_page(gfp_t gfp_mask);
extern asmlinkage int swsusp_arch_suspend(void);
extern asmlinkage int swsusp_arch_resume(void);
+extern u32 swsusp_hardware_signature;
extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
extern int hibernate(void);
extern bool system_entering_hibernation(void);
@@ -455,9 +398,12 @@ extern struct pbe *restore_pblist;
int pfn_is_nosave(unsigned long pfn);
int hibernate_quiet_exec(int (*func)(void *data), void *data);
+int hibernate_resume_nonboot_cpu_disable(void);
+int arch_hibernation_header_save(void *addr, unsigned int max_size);
+int arch_hibernation_header_restore(void *addr);
+
#else /* CONFIG_HIBERNATION */
static inline void register_nosave_region(unsigned long b, unsigned long e) {}
-static inline void register_nosave_region_late(unsigned long b, unsigned long e) {}
static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
static inline void swsusp_set_page_free(struct page *p) {}
static inline void swsusp_unset_page_free(struct page *p) {}
@@ -472,6 +418,14 @@ static inline int hibernate_quiet_exec(int (*func)(void *data), void *data) {
}
#endif /* CONFIG_HIBERNATION */
+#if defined(CONFIG_HIBERNATION) && defined(CONFIG_SUSPEND)
+bool pm_hibernation_mode_is_suspend(void);
+#else
+static inline bool pm_hibernation_mode_is_suspend(void) { return false; }
+#endif
+
+int arch_resume_nosmt(void);
+
#ifdef CONFIG_HIBERNATION_SNAPSHOT_DEV
int is_hibernate_resume_dev(dev_t dev);
#else
@@ -496,6 +450,10 @@ void restore_processor_state(void);
extern int register_pm_notifier(struct notifier_block *nb);
extern int unregister_pm_notifier(struct notifier_block *nb);
extern void ksys_sync_helper(void);
+extern void pm_report_hw_sleep_time(u64 t);
+extern void pm_report_max_hw_sleep(u64 t);
+void pm_restrict_gfp_mask(void);
+void pm_restore_gfp_mask(void);
#define pm_notifier(fn, pri) { \
static struct notifier_block fn##_nb = \
@@ -505,21 +463,28 @@ extern void ksys_sync_helper(void);
/* drivers/base/power/wakeup.c */
extern bool events_check_enabled;
-extern unsigned int pm_wakeup_irq;
-extern suspend_state_t pm_suspend_target_state;
+
+static inline bool pm_suspended_storage(void)
+{
+ return !gfp_has_io_fs(gfp_allowed_mask);
+}
extern bool pm_wakeup_pending(void);
extern void pm_system_wakeup(void);
extern void pm_system_cancel_wakeup(void);
-extern void pm_wakeup_clear(bool reset);
+extern void pm_wakeup_clear(unsigned int irq_number);
extern void pm_system_irq_wakeup(unsigned int irq_number);
+extern unsigned int pm_wakeup_irq(void);
extern bool pm_get_wakeup_count(unsigned int *count, bool block);
extern bool pm_save_wakeup_count(unsigned int count);
extern void pm_wakep_autosleep_enabled(bool set);
extern void pm_print_active_wakeup_sources(void);
-extern void lock_system_sleep(void);
-extern void unlock_system_sleep(void);
+extern unsigned int lock_system_sleep(void);
+extern void unlock_system_sleep(unsigned int);
+
+extern bool pm_sleep_transition_in_progress(void);
+bool pm_hibernate_is_recovering(void);
#else /* !CONFIG_PM_SLEEP */
@@ -533,39 +498,85 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
return 0;
}
+static inline void pm_report_hw_sleep_time(u64 t) {};
+static inline void pm_report_max_hw_sleep(u64 t) {};
+
+static inline void pm_restrict_gfp_mask(void) {}
+static inline void pm_restore_gfp_mask(void) {}
+
static inline void ksys_sync_helper(void) {}
#define pm_notifier(fn, pri) do { (void)(fn); } while (0)
+static inline bool pm_suspended_storage(void) { return false; }
static inline bool pm_wakeup_pending(void) { return false; }
static inline void pm_system_wakeup(void) {}
static inline void pm_wakeup_clear(bool reset) {}
static inline void pm_system_irq_wakeup(unsigned int irq_number) {}
-static inline void lock_system_sleep(void) {}
-static inline void unlock_system_sleep(void) {}
+static inline unsigned int lock_system_sleep(void) { return 0; }
+static inline void unlock_system_sleep(unsigned int flags) {}
+
+static inline bool pm_sleep_transition_in_progress(void) { return false; }
+static inline bool pm_hibernate_is_recovering(void) { return false; }
#endif /* !CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_SLEEP_DEBUG
extern bool pm_print_times_enabled;
extern bool pm_debug_messages_on;
-extern __printf(2, 3) void __pm_pr_dbg(bool defer, const char *fmt, ...);
+extern bool pm_debug_messages_should_print(void);
+static inline int pm_dyn_debug_messages_on(void)
+{
+#ifdef CONFIG_DYNAMIC_DEBUG
+ return 1;
+#else
+ return 0;
+#endif
+}
+#ifndef pr_fmt
+#define pr_fmt(fmt) "PM: " fmt
+#endif
+#define __pm_pr_dbg(fmt, ...) \
+ do { \
+ if (pm_debug_messages_should_print()) \
+ printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
+ else if (pm_dyn_debug_messages_on()) \
+ pr_debug(fmt, ##__VA_ARGS__); \
+ } while (0)
+#define __pm_deferred_pr_dbg(fmt, ...) \
+ do { \
+ if (pm_debug_messages_should_print()) \
+ printk_deferred(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
+ } while (0)
#else
#define pm_print_times_enabled (false)
#define pm_debug_messages_on (false)
#include <linux/printk.h>
-#define __pm_pr_dbg(defer, fmt, ...) \
- no_printk(KERN_DEBUG fmt, ##__VA_ARGS__)
+#define __pm_pr_dbg(fmt, ...) \
+ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#define __pm_deferred_pr_dbg(fmt, ...) \
+ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
#endif
+/**
+ * pm_pr_dbg - print pm sleep debug messages
+ *
+ * If pm_debug_messages_on is enabled and the system is entering/leaving
+ * suspend, print message.
+ * If pm_debug_messages_on is disabled and CONFIG_DYNAMIC_DEBUG is enabled,
+ * print message only from instances explicitly enabled on dynamic debug's
+ * control.
+ * If pm_debug_messages_on is disabled and CONFIG_DYNAMIC_DEBUG is disabled,
+ * don't print message.
+ */
#define pm_pr_dbg(fmt, ...) \
- __pm_pr_dbg(false, fmt, ##__VA_ARGS__)
+ __pm_pr_dbg(fmt, ##__VA_ARGS__)
#define pm_deferred_pr_dbg(fmt, ...) \
- __pm_pr_dbg(true, fmt, ##__VA_ARGS__)
+ __pm_deferred_pr_dbg(fmt, ##__VA_ARGS__)
#ifdef CONFIG_PM_AUTOSLEEP
@@ -578,4 +589,19 @@ static inline void queue_up_suspend_work(void) {}
#endif /* !CONFIG_PM_AUTOSLEEP */
+enum suspend_stat_step {
+ SUSPEND_WORKING = 0,
+ SUSPEND_FREEZE,
+ SUSPEND_PREPARE,
+ SUSPEND_SUSPEND,
+ SUSPEND_SUSPEND_LATE,
+ SUSPEND_SUSPEND_NOIRQ,
+ SUSPEND_RESUME_NOIRQ,
+ SUSPEND_RESUME_EARLY,
+ SUSPEND_RESUME
+};
+
+void dpm_save_failed_dev(const char *name);
+void dpm_save_failed_step(enum suspend_stat_step step);
+
#endif /* _LINUX_SUSPEND_H */
diff --git a/include/linux/swab.h b/include/linux/swab.h
index bcff5149861a..9b804dbb0d79 100644
--- a/include/linux/swab.h
+++ b/include/linux/swab.h
@@ -20,4 +20,29 @@
# define swab64s __swab64s
# define swahw32s __swahw32s
# define swahb32s __swahb32s
+
+static inline void swab16_array(u16 *buf, unsigned int words)
+{
+ while (words--) {
+ swab16s(buf);
+ buf++;
+ }
+}
+
+static inline void swab32_array(u32 *buf, unsigned int words)
+{
+ while (words--) {
+ swab32s(buf);
+ buf++;
+ }
+}
+
+static inline void swab64_array(u64 *buf, unsigned int words)
+{
+ while (words--) {
+ swab64s(buf);
+ buf++;
+ }
+}
+
#endif /* _LINUX_SWAB_H */
diff --git a/include/linux/swait.h b/include/linux/swait.h
index 6a8c22b8c2a5..d324419482a0 100644
--- a/include/linux/swait.h
+++ b/include/linux/swait.h
@@ -146,7 +146,7 @@ static inline bool swq_has_sleeper(struct swait_queue_head *wq)
extern void swake_up_one(struct swait_queue_head *q);
extern void swake_up_all(struct swait_queue_head *q);
-extern void swake_up_locked(struct swait_queue_head *q);
+extern void swake_up_locked(struct swait_queue_head *q, int wake_flags);
extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state);
extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state);
diff --git a/include/linux/swait_api.h b/include/linux/swait_api.h
new file mode 100644
index 000000000000..1eeaaaaa5ea7
--- /dev/null
+++ b/include/linux/swait_api.h
@@ -0,0 +1 @@
+#include <linux/swait.h>
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 144727041e78..38ca3df68716 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -24,7 +24,6 @@ struct pagevec;
#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
#define SWAP_FLAG_PRIO_MASK 0x7fff
-#define SWAP_FLAG_PRIO_SHIFT 0
#define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */
#define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */
#define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
@@ -56,29 +55,49 @@ static inline int current_is_kswapd(void)
*/
/*
+ * PTE markers are used to persist information onto PTEs that otherwise
+ * should be a none pte. As its name "PTE" hints, it should only be
+ * applied to the leaves of pgtables.
+ */
+#define SWP_PTE_MARKER_NUM 1
+#define SWP_PTE_MARKER (MAX_SWAPFILES + SWP_HWPOISON_NUM + \
+ SWP_MIGRATION_NUM + SWP_DEVICE_NUM)
+
+/*
* Unaddressable device memory support. See include/linux/hmm.h and
- * Documentation/vm/hmm.rst. Short description is we need struct pages for
+ * Documentation/mm/hmm.rst. Short description is we need struct pages for
* device memory that is unaddressable (inaccessible) by CPU, so that we can
* migrate part of a process memory to device memory.
*
* When a page is migrated from CPU to device, we set the CPU page table entry
- * to a special SWP_DEVICE_* entry.
+ * to a special SWP_DEVICE_{READ|WRITE} entry.
+ *
+ * When a page is mapped by the device for exclusive access we set the CPU page
+ * table entries to a special SWP_DEVICE_EXCLUSIVE entry.
*/
#ifdef CONFIG_DEVICE_PRIVATE
-#define SWP_DEVICE_NUM 2
+#define SWP_DEVICE_NUM 3
#define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
#define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
+#define SWP_DEVICE_EXCLUSIVE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2)
#else
#define SWP_DEVICE_NUM 0
#endif
/*
- * NUMA node memory migration support
+ * Page migration support.
+ *
+ * SWP_MIGRATION_READ_EXCLUSIVE is only applicable to anonymous pages and
+ * indicates that the referenced (part of) an anonymous page is exclusive to
+ * a single process. For SWP_MIGRATION_WRITE, that information is implicit:
+ * (part of) an anonymous page that are mapped writable are exclusive to a
+ * single process.
*/
#ifdef CONFIG_MIGRATION
-#define SWP_MIGRATION_NUM 2
-#define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
-#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
+#define SWP_MIGRATION_NUM 3
+#define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
+#define SWP_MIGRATION_READ_EXCLUSIVE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
+#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 2)
#else
#define SWP_MIGRATION_NUM 0
#endif
@@ -95,7 +114,8 @@ static inline int current_is_kswapd(void)
#define MAX_SWAPFILES \
((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
- SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
+ SWP_MIGRATION_NUM - SWP_HWPOISON_NUM - \
+ SWP_PTE_MARKER_NUM)
/*
* Magic header for a swap area. The first part of the union is
@@ -131,9 +151,28 @@ union swap_header {
* memory reclaim
*/
struct reclaim_state {
- unsigned long reclaimed_slab;
+ /* pages reclaimed outside of LRU-based reclaim */
+ unsigned long reclaimed;
+#ifdef CONFIG_LRU_GEN
+ /* per-thread mm walk data */
+ struct lru_gen_mm_walk *mm_walk;
+#endif
};
+/*
+ * mm_account_reclaimed_pages(): account reclaimed pages outside of LRU-based
+ * reclaim
+ * @pages: number of pages reclaimed
+ *
+ * If the current process is undergoing a reclaim operation, increment the
+ * number of reclaimed pages by @pages.
+ */
+static inline void mm_account_reclaimed_pages(unsigned long pages)
+{
+ if (current->reclaim_state)
+ current->reclaim_state->reclaimed += pages;
+}
+
#ifdef __KERNEL__
struct address_space;
@@ -143,8 +182,8 @@ struct zone;
/*
* A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
- * disk blocks. A list of swap extents maps the entire swapfile. (Where the
- * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart
+ * disk blocks. A rbtree of swap extents maps the entire swapfile (Where the
+ * term `swapfile' refers to either a blockdevice or an IS_REG file). Apart
* from setup, they're handled identically.
*
* We always assume that blocks are of size PAGE_SIZE.
@@ -177,12 +216,11 @@ enum {
SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */
SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */
SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */
- SWP_VALID = (1 << 13), /* swap is valid to be operated on? */
/* add others here before... */
- SWP_SCANNING = (1 << 14), /* refcount in scan_swap_map */
};
#define SWAP_CLUSTER_MAX 32UL
+#define SWAP_CLUSTER_MAX_SKIPPED (SWAP_CLUSTER_MAX << 10)
#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
/* Bit flag in swap_map */
@@ -198,79 +236,58 @@ enum {
#define SWAP_CONT_MAX 0x7f /* Max count */
/*
- * We use this to track usage of a cluster. A cluster is a block of swap disk
- * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All
- * free clusters are organized into a list. We fetch an entry from the list to
- * get a free cluster.
- *
- * The data field stores next cluster if the cluster is free or cluster usage
- * counter otherwise. The flags field determines if a cluster is free. This is
- * protected by swap_info_struct.lock.
+ * The first page in the swap file is the swap header, which is always marked
+ * bad to prevent it from being allocated as an entry. This also prevents the
+ * cluster to which it belongs being marked free. Therefore 0 is safe to use as
+ * a sentinel to indicate an entry is not valid.
*/
-struct swap_cluster_info {
- spinlock_t lock; /*
- * Protect swap_cluster_info fields
- * and swap_info_struct->swap_map
- * elements correspond to the swap
- * cluster
- */
- unsigned int data:24;
- unsigned int flags:8;
-};
-#define CLUSTER_FLAG_FREE 1 /* This cluster is free */
-#define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
-#define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */
+#define SWAP_ENTRY_INVALID 0
+
+#ifdef CONFIG_THP_SWAP
+#define SWAP_NR_ORDERS (PMD_ORDER + 1)
+#else
+#define SWAP_NR_ORDERS 1
+#endif
/*
- * We assign a cluster to each CPU, so each CPU can allocate swap entry from
- * its own cluster and swapout sequentially. The purpose is to optimize swapout
- * throughput.
+ * We keep using same cluster for rotational device so IO will be sequential.
+ * The purpose is to optimize SWAP throughput on these device.
*/
-struct percpu_cluster {
- struct swap_cluster_info index; /* Current cluster index */
- unsigned int next; /* Likely next allocation offset */
-};
-
-struct swap_cluster_list {
- struct swap_cluster_info head;
- struct swap_cluster_info tail;
+struct swap_sequential_cluster {
+ unsigned int next[SWAP_NR_ORDERS]; /* Likely next allocation offset */
};
/*
* The in-memory structure used to track swap areas.
*/
struct swap_info_struct {
+ struct percpu_ref users; /* indicate and keep swap device valid. */
unsigned long flags; /* SWP_USED etc: see above */
signed short prio; /* swap priority of this type */
struct plist_node list; /* entry in swap_active_head */
signed char type; /* strange name for an index */
unsigned int max; /* extent of the swap_map */
unsigned char *swap_map; /* vmalloc'ed array of usage counts */
+ unsigned long *zeromap; /* kvmalloc'ed bitmap to track zero pages */
struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
- struct swap_cluster_list free_clusters; /* free clusters list */
- unsigned int lowest_bit; /* index of first free in swap_map */
- unsigned int highest_bit; /* index of last free in swap_map */
+ struct list_head free_clusters; /* free clusters list */
+ struct list_head full_clusters; /* full clusters list */
+ struct list_head nonfull_clusters[SWAP_NR_ORDERS];
+ /* list of cluster that contains at least one free slot */
+ struct list_head frag_clusters[SWAP_NR_ORDERS];
+ /* list of cluster that are fragmented or contented */
unsigned int pages; /* total of usable pages of swap */
- unsigned int inuse_pages; /* number of those currently in use */
- unsigned int cluster_next; /* likely index for next allocation */
- unsigned int cluster_nr; /* countdown to next cluster search */
- unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */
- struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
+ atomic_long_t inuse_pages; /* number of those currently in use */
+ struct swap_sequential_cluster *global_cluster; /* Use one global cluster for rotating device */
+ spinlock_t global_cluster_lock; /* Serialize usage of global cluster */
struct rb_root swap_extent_root;/* root of the swap extent rbtree */
struct block_device *bdev; /* swap device or bdev of swap file */
struct file *swap_file; /* seldom referenced */
- unsigned int old_block_size; /* seldom referenced */
-#ifdef CONFIG_FRONTSWAP
- unsigned long *frontswap_map; /* frontswap in-use, one bit per page */
- atomic_t frontswap_pages; /* frontswap pages in-use counter */
-#endif
+ struct completion comp; /* seldom referenced */
spinlock_t lock; /*
* protect map scan related fields like
- * swap_map, lowest_bit, highest_bit,
- * inuse_pages, cluster_next,
- * cluster_nr, lowest_alloc,
- * highest_alloc, free/discard cluster
- * list. other fields are only changed
+ * swap_map, inuse_pages and all cluster
+ * lists. other fields are only changed
* at swapon/swapoff, so are protected
* by swap_lock. changing flags need
* hold this lock and swap_lock. If
@@ -282,65 +299,54 @@ struct swap_info_struct {
* list.
*/
struct work_struct discard_work; /* discard worker */
- struct swap_cluster_list discard_clusters; /* discard clusters list */
- struct plist_node avail_lists[]; /*
- * entries in swap_avail_heads, one
- * entry per node.
- * Must be last as the number of the
- * array is nr_node_ids, which is not
- * a fixed value so have to allocate
- * dynamically.
- * And it has to be an array so that
- * plist_for_each_* can work.
- */
+ struct work_struct reclaim_work; /* reclaim worker */
+ struct list_head discard_clusters; /* discard clusters list */
+ struct plist_node avail_list; /* entry in swap_avail_head */
};
-#ifdef CONFIG_64BIT
-#define SWAP_RA_ORDER_CEILING 5
-#else
-/* Avoid stack overflow, because we need to save part of page table */
-#define SWAP_RA_ORDER_CEILING 3
-#define SWAP_RA_PTE_CACHE_SIZE (1 << SWAP_RA_ORDER_CEILING)
-#endif
+static inline swp_entry_t page_swap_entry(struct page *page)
+{
+ struct folio *folio = page_folio(page);
+ swp_entry_t entry = folio->swap;
-struct vma_swap_readahead {
- unsigned short win;
- unsigned short offset;
- unsigned short nr_pte;
-#ifdef CONFIG_64BIT
- pte_t *ptes;
-#else
- pte_t ptes[SWAP_RA_PTE_CACHE_SIZE];
-#endif
-};
+ entry.val += folio_page_idx(folio, page);
+ return entry;
+}
/* linux/mm/workingset.c */
+bool workingset_test_recent(void *shadow, bool file, bool *workingset,
+ bool flush);
void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
-void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg);
-void workingset_refault(struct page *page, void *shadow);
-void workingset_activation(struct page *page);
-
-/* Only track the nodes of mappings with shadow entries */
-void workingset_update_node(struct xa_node *node);
-#define mapping_set_update(xas, mapping) do { \
- if (!dax_mapping(mapping) && !shmem_mapping(mapping)) \
- xas_set_update(xas, workingset_update_node); \
-} while (0)
+void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg);
+void workingset_refault(struct folio *folio, void *shadow);
+void workingset_activation(struct folio *folio);
/* linux/mm/page_alloc.c */
extern unsigned long totalreserve_pages;
-extern unsigned long nr_free_buffer_pages(void);
/* Definition of global_zone_page_state not available yet */
#define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
/* linux/mm/swap.c */
-extern void lru_note_cost(struct lruvec *lruvec, bool file,
- unsigned int nr_pages);
-extern void lru_note_cost_page(struct page *);
-extern void lru_cache_add(struct page *);
-extern void mark_page_accessed(struct page *);
+void lru_note_cost_unlock_irq(struct lruvec *lruvec, bool file,
+ unsigned int nr_io, unsigned int nr_rotated)
+ __releases(lruvec->lru_lock);
+void lru_note_cost_refault(struct folio *);
+void folio_add_lru(struct folio *);
+void folio_add_lru_vma(struct folio *, struct vm_area_struct *);
+void mark_page_accessed(struct page *);
+void folio_mark_accessed(struct folio *);
+
+static inline bool folio_may_be_lru_cached(struct folio *folio)
+{
+ /*
+ * Holding PMD-sized folios in per-CPU LRU cache unbalances accounting.
+ * Holding small numbers of low-order mTHP folios in per-CPU LRU cache
+ * will be sensible, but nobody has implemented and tested that yet.
+ */
+ return !folio_test_large(folio);
+}
extern atomic_t lru_disable_count;
@@ -359,114 +365,81 @@ extern void lru_add_drain(void);
extern void lru_add_drain_cpu(int cpu);
extern void lru_add_drain_cpu_zone(struct zone *zone);
extern void lru_add_drain_all(void);
-extern void rotate_reclaimable_page(struct page *page);
-extern void deactivate_file_page(struct page *page);
-extern void deactivate_page(struct page *page);
-extern void mark_page_lazyfree(struct page *page);
+void folio_deactivate(struct folio *folio);
+void folio_mark_lazyfree(struct folio *folio);
extern void swap_setup(void);
-extern void lru_cache_add_inactive_or_unevictable(struct page *page,
- struct vm_area_struct *vma);
-
/* linux/mm/vmscan.c */
extern unsigned long zone_reclaimable_pages(struct zone *zone);
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask, nodemask_t *mask);
-extern bool __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode);
+
+#define MEMCG_RECLAIM_MAY_SWAP (1 << 1)
+#define MEMCG_RECLAIM_PROACTIVE (1 << 2)
+#define MIN_SWAPPINESS 0
+#define MAX_SWAPPINESS 200
+
+/* Just reclaim from anon folios in proactive memory reclaim */
+#define SWAPPINESS_ANON_ONLY (MAX_SWAPPINESS + 1)
+
extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
unsigned long nr_pages,
gfp_t gfp_mask,
- bool may_swap);
+ unsigned int reclaim_options,
+ int *swappiness);
extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
gfp_t gfp_mask, bool noswap,
pg_data_t *pgdat,
unsigned long *nr_scanned);
extern unsigned long shrink_all_memory(unsigned long nr_pages);
extern int vm_swappiness;
-extern int remove_mapping(struct address_space *mapping, struct page *page);
+long remove_mapping(struct address_space *mapping, struct folio *folio);
+
+#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
+extern int reclaim_register_node(struct node *node);
+extern void reclaim_unregister_node(struct node *node);
-extern unsigned long reclaim_pages(struct list_head *page_list);
-#ifdef CONFIG_NUMA
-extern int node_reclaim_mode;
-extern int sysctl_min_unmapped_ratio;
-extern int sysctl_min_slab_ratio;
#else
-#define node_reclaim_mode 0
-#endif
-static inline bool node_reclaim_enabled(void)
+static inline int reclaim_register_node(struct node *node)
{
- /* Is any node_reclaim_mode bit set? */
- return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP);
+ return 0;
}
-extern void check_move_unevictable_pages(struct pagevec *pvec);
+static inline void reclaim_unregister_node(struct node *node)
+{
+}
+#endif /* CONFIG_SYSFS && CONFIG_NUMA */
-extern int kswapd_run(int nid);
-extern void kswapd_stop(int nid);
+#ifdef CONFIG_NUMA
+extern int sysctl_min_unmapped_ratio;
+extern int sysctl_min_slab_ratio;
+#endif
-#ifdef CONFIG_SWAP
+void check_move_unevictable_folios(struct folio_batch *fbatch);
-#include <linux/blk_types.h> /* for bio_end_io_t */
+extern void __meminit kswapd_run(int nid);
+extern void __meminit kswapd_stop(int nid);
-/* linux/mm/page_io.c */
-extern int swap_readpage(struct page *page, bool do_poll);
-extern int swap_writepage(struct page *page, struct writeback_control *wbc);
-extern void end_swap_bio_write(struct bio *bio);
-extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
- bio_end_io_t end_write_func);
-extern int swap_set_page_dirty(struct page *page);
+#ifdef CONFIG_SWAP
int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
unsigned long nr_pages, sector_t start_block);
int generic_swapfile_activate(struct swap_info_struct *, struct file *,
sector_t *);
-/* linux/mm/swap_state.c */
-/* One swap address space for each 64M swap space */
-#define SWAP_ADDRESS_SPACE_SHIFT 14
-#define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT)
-extern struct address_space *swapper_spaces[];
-#define swap_address_space(entry) \
- (&swapper_spaces[swp_type(entry)][swp_offset(entry) \
- >> SWAP_ADDRESS_SPACE_SHIFT])
static inline unsigned long total_swapcache_pages(void)
{
return global_node_page_state(NR_SWAPCACHE);
}
-extern void show_swap_cache_info(void);
-extern int add_to_swap(struct page *page);
-extern void *get_shadow_from_swap_cache(swp_entry_t entry);
-extern int add_to_swap_cache(struct page *page, swp_entry_t entry,
- gfp_t gfp, void **shadowp);
-extern void __delete_from_swap_cache(struct page *page,
- swp_entry_t entry, void *shadow);
-extern void delete_from_swap_cache(struct page *);
-extern void clear_shadow_from_swap_cache(int type, unsigned long begin,
- unsigned long end);
-extern void free_page_and_swap_cache(struct page *);
-extern void free_pages_and_swap_cache(struct page **, int);
-extern struct page *lookup_swap_cache(swp_entry_t entry,
- struct vm_area_struct *vma,
- unsigned long addr);
-struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index);
-extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
- struct vm_area_struct *vma, unsigned long addr,
- bool do_poll);
-extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
- struct vm_area_struct *vma, unsigned long addr,
- bool *new_page_allocated);
-extern struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
- struct vm_fault *vmf);
-extern struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
- struct vm_fault *vmf);
-
+void free_swap_cache(struct folio *folio);
+void free_folio_and_swap_cache(struct folio *folio);
+void free_pages_and_swap_cache(struct encoded_page **, int);
/* linux/mm/swapfile.c */
extern atomic_long_t nr_swap_pages;
extern long total_swap_pages;
extern atomic_t nr_rotate_swap;
-extern bool has_usable_swap(void);
/* Swap 50% full? Release swapcache more aggressively.. */
static inline bool vm_swap_full(void)
@@ -480,53 +453,42 @@ static inline long get_nr_swap_pages(void)
}
extern void si_swapinfo(struct sysinfo *);
-extern swp_entry_t get_swap_page(struct page *page);
-extern void put_swap_page(struct page *page, swp_entry_t entry);
+int folio_alloc_swap(struct folio *folio);
+bool folio_free_swap(struct folio *folio);
+void put_swap_folio(struct folio *folio, swp_entry_t entry);
extern swp_entry_t get_swap_page_of_type(int);
-extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size);
extern int add_swap_count_continuation(swp_entry_t, gfp_t);
-extern void swap_shmem_alloc(swp_entry_t);
+extern void swap_shmem_alloc(swp_entry_t, int);
extern int swap_duplicate(swp_entry_t);
-extern int swapcache_prepare(swp_entry_t);
-extern void swap_free(swp_entry_t);
-extern void swapcache_free_entries(swp_entry_t *entries, int n);
-extern int free_swap_and_cache(swp_entry_t);
+extern int swapcache_prepare(swp_entry_t entry, int nr);
+extern void swap_free_nr(swp_entry_t entry, int nr_pages);
+extern void free_swap_and_cache_nr(swp_entry_t entry, int nr);
int swap_type_of(dev_t device, sector_t offset);
int find_first_swap(dev_t *device);
extern unsigned int count_swap_pages(int, int);
extern sector_t swapdev_block(int, pgoff_t);
-extern int page_swapcount(struct page *);
extern int __swap_count(swp_entry_t entry);
-extern int __swp_swapcount(swp_entry_t entry);
+extern bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry);
extern int swp_swapcount(swp_entry_t entry);
-extern struct swap_info_struct *page_swap_info(struct page *);
-extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
-extern bool reuse_swap_page(struct page *, int *);
-extern int try_to_free_swap(struct page *);
struct backing_dev_info;
-extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
-extern void exit_swap_address_space(unsigned int type);
extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
-sector_t swap_page_sector(struct page *page);
+sector_t swap_folio_sector(struct folio *folio);
static inline void put_swap_device(struct swap_info_struct *si)
{
- rcu_read_unlock();
+ percpu_ref_put(&si->users);
}
#else /* CONFIG_SWAP */
-
-static inline int swap_readpage(struct page *page, bool do_poll)
+static inline struct swap_info_struct *get_swap_device(swp_entry_t entry)
{
- return 0;
+ return NULL;
}
-static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
+static inline void put_swap_device(struct swap_info_struct *si)
{
- return NULL;
}
-#define swap_address_space(entry) (NULL)
#define get_nr_swap_pages() 0L
#define total_swap_pages 0L
#define total_swapcache_pages() 0UL
@@ -534,26 +496,25 @@ static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
#define si_swapinfo(val) \
do { (val)->freeswap = (val)->totalswap = 0; } while (0)
-/* only sparc can not include linux/pagemap.h in this file
- * so leave put_page and release_pages undeclared... */
-#define free_page_and_swap_cache(page) \
- put_page(page)
+#define free_folio_and_swap_cache(folio) \
+ folio_put(folio)
#define free_pages_and_swap_cache(pages, nr) \
release_pages((pages), (nr));
-static inline void show_swap_cache_info(void)
+static inline void free_swap_and_cache_nr(swp_entry_t entry, int nr)
{
}
-#define free_swap_and_cache(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
-#define swapcache_prepare(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
+static inline void free_swap_cache(struct folio *folio)
+{
+}
static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
{
return 0;
}
-static inline void swap_shmem_alloc(swp_entry_t swp)
+static inline void swap_shmem_alloc(swp_entry_t swp, int nr)
{
}
@@ -562,160 +523,118 @@ static inline int swap_duplicate(swp_entry_t swp)
return 0;
}
-static inline void swap_free(swp_entry_t swp)
-{
-}
-
-static inline void put_swap_page(struct page *page, swp_entry_t swp)
-{
-}
-
-static inline struct page *swap_cluster_readahead(swp_entry_t entry,
- gfp_t gfp_mask, struct vm_fault *vmf)
-{
- return NULL;
-}
-
-static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
- struct vm_fault *vmf)
-{
- return NULL;
-}
-
-static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
+static inline int swapcache_prepare(swp_entry_t swp, int nr)
{
return 0;
}
-static inline struct page *lookup_swap_cache(swp_entry_t swp,
- struct vm_area_struct *vma,
- unsigned long addr)
+static inline void swap_free_nr(swp_entry_t entry, int nr_pages)
{
- return NULL;
}
-static inline
-struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index)
+static inline void put_swap_folio(struct folio *folio, swp_entry_t swp)
{
- return find_get_page(mapping, index);
}
-static inline int add_to_swap(struct page *page)
+static inline int __swap_count(swp_entry_t entry)
{
return 0;
}
-static inline void *get_shadow_from_swap_cache(swp_entry_t entry)
-{
- return NULL;
-}
-
-static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
- gfp_t gfp_mask, void **shadowp)
-{
- return -1;
-}
-
-static inline void __delete_from_swap_cache(struct page *page,
- swp_entry_t entry, void *shadow)
-{
-}
-
-static inline void delete_from_swap_cache(struct page *page)
-{
-}
-
-static inline void clear_shadow_from_swap_cache(int type, unsigned long begin,
- unsigned long end)
+static inline bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry)
{
+ return false;
}
-static inline int page_swapcount(struct page *page)
+static inline int swp_swapcount(swp_entry_t entry)
{
return 0;
}
-static inline int __swap_count(swp_entry_t entry)
+static inline int folio_alloc_swap(struct folio *folio)
{
- return 0;
+ return -EINVAL;
}
-static inline int __swp_swapcount(swp_entry_t entry)
+static inline bool folio_free_swap(struct folio *folio)
{
- return 0;
+ return false;
}
-static inline int swp_swapcount(swp_entry_t entry)
+static inline int add_swap_extent(struct swap_info_struct *sis,
+ unsigned long start_page,
+ unsigned long nr_pages, sector_t start_block)
{
- return 0;
+ return -EINVAL;
}
+#endif /* CONFIG_SWAP */
-#define reuse_swap_page(page, total_map_swapcount) \
- (page_trans_huge_mapcount(page, total_map_swapcount) == 1)
-
-static inline int try_to_free_swap(struct page *page)
+static inline void free_swap_and_cache(swp_entry_t entry)
{
- return 0;
+ free_swap_and_cache_nr(entry, 1);
}
-static inline swp_entry_t get_swap_page(struct page *page)
+static inline void swap_free(swp_entry_t entry)
{
- swp_entry_t entry;
- entry.val = 0;
- return entry;
+ swap_free_nr(entry, 1);
}
-#endif /* CONFIG_SWAP */
-
-#ifdef CONFIG_THP_SWAP
-extern int split_swap_cluster(swp_entry_t entry);
-#else
-static inline int split_swap_cluster(swp_entry_t entry)
-{
- return 0;
-}
-#endif
-
#ifdef CONFIG_MEMCG
static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
{
/* Cgroup2 doesn't have per-cgroup swappiness */
if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
- return vm_swappiness;
+ return READ_ONCE(vm_swappiness);
/* root ? */
if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg))
- return vm_swappiness;
+ return READ_ONCE(vm_swappiness);
- return memcg->swappiness;
+ return READ_ONCE(memcg->swappiness);
}
#else
static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
{
- return vm_swappiness;
+ return READ_ONCE(vm_swappiness);
}
#endif
#if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
-extern void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask);
+void __folio_throttle_swaprate(struct folio *folio, gfp_t gfp);
+static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
+{
+ if (mem_cgroup_disabled())
+ return;
+ __folio_throttle_swaprate(folio, gfp);
+}
#else
-static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
+static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
{
}
#endif
-#ifdef CONFIG_MEMCG_SWAP
-extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry);
-extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry);
-extern void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
-extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
-extern bool mem_cgroup_swap_full(struct page *page);
-#else
-static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
+#if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP)
+int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry);
+static inline int mem_cgroup_try_charge_swap(struct folio *folio,
+ swp_entry_t entry)
{
+ if (mem_cgroup_disabled())
+ return 0;
+ return __mem_cgroup_try_charge_swap(folio, entry);
}
-static inline int mem_cgroup_try_charge_swap(struct page *page,
+extern void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
+static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
+{
+ if (mem_cgroup_disabled())
+ return;
+ __mem_cgroup_uncharge_swap(entry, nr_pages);
+}
+
+extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
+extern bool mem_cgroup_swap_full(struct folio *folio);
+#else
+static inline int mem_cgroup_try_charge_swap(struct folio *folio,
swp_entry_t entry)
{
return 0;
@@ -731,7 +650,7 @@ static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
return get_nr_swap_pages();
}
-static inline bool mem_cgroup_swap_full(struct page *page)
+static inline bool mem_cgroup_swap_full(struct folio *folio)
{
return vm_swap_full();
}
diff --git a/include/linux/swap_cgroup.h b/include/linux/swap_cgroup.h
index a12dd1c3966c..91cdf12190a0 100644
--- a/include/linux/swap_cgroup.h
+++ b/include/linux/swap_cgroup.h
@@ -4,12 +4,10 @@
#include <linux/swap.h>
-#ifdef CONFIG_MEMCG_SWAP
+#if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP)
-extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
- unsigned short old, unsigned short new);
-extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id,
- unsigned int nr_ents);
+extern void swap_cgroup_record(struct folio *folio, unsigned short id, swp_entry_t ent);
+extern unsigned short swap_cgroup_clear(swp_entry_t ent, unsigned int nr_ents);
extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent);
extern int swap_cgroup_swapon(int type, unsigned long max_pages);
extern void swap_cgroup_swapoff(int type);
@@ -17,8 +15,12 @@ extern void swap_cgroup_swapoff(int type);
#else
static inline
-unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id,
- unsigned int nr_ents)
+void swap_cgroup_record(struct folio *folio, unsigned short id, swp_entry_t ent)
+{
+}
+
+static inline
+unsigned short swap_cgroup_clear(swp_entry_t ent, unsigned int nr_ents)
{
return 0;
}
@@ -40,6 +42,6 @@ static inline void swap_cgroup_swapoff(int type)
return;
}
-#endif /* CONFIG_MEMCG_SWAP */
+#endif
#endif /* __LINUX_SWAP_CGROUP_H */
diff --git a/include/linux/swap_slots.h b/include/linux/swap_slots.h
deleted file mode 100644
index 347f1a304190..000000000000
--- a/include/linux/swap_slots.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_SWAP_SLOTS_H
-#define _LINUX_SWAP_SLOTS_H
-
-#include <linux/swap.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
-
-#define SWAP_SLOTS_CACHE_SIZE SWAP_BATCH
-#define THRESHOLD_ACTIVATE_SWAP_SLOTS_CACHE (5*SWAP_SLOTS_CACHE_SIZE)
-#define THRESHOLD_DEACTIVATE_SWAP_SLOTS_CACHE (2*SWAP_SLOTS_CACHE_SIZE)
-
-struct swap_slots_cache {
- bool lock_initialized;
- struct mutex alloc_lock; /* protects slots, nr, cur */
- swp_entry_t *slots;
- int nr;
- int cur;
- spinlock_t free_lock; /* protects slots_ret, n_ret */
- swp_entry_t *slots_ret;
- int n_ret;
-};
-
-void disable_swap_slots_cache_lock(void);
-void reenable_swap_slots_cache_unlock(void);
-void enable_swap_slots_cache(void);
-int free_swap_slot(swp_entry_t entry);
-
-extern bool swap_slot_cache_enabled;
-
-#endif /* _LINUX_SWAP_SLOTS_H */
diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h
index e06febf62978..99e3ed469e88 100644
--- a/include/linux/swapfile.h
+++ b/include/linux/swapfile.h
@@ -2,15 +2,12 @@
#ifndef _LINUX_SWAPFILE_H
#define _LINUX_SWAPFILE_H
-/*
- * these were static in swapfile.c but frontswap.c needs them and we don't
- * want to expose them to the dozens of source files that include swap.h
- */
-extern spinlock_t swap_lock;
-extern struct plist_head swap_active_head;
-extern struct swap_info_struct *swap_info[];
-extern int try_to_unuse(unsigned int, bool, unsigned long);
extern unsigned long generic_max_swapfile_size(void);
-extern unsigned long max_swapfile_size(void);
+unsigned long arch_max_swapfile_size(void);
+
+/* Maximum swapfile size supported for the arch (not inclusive). */
+extern unsigned long swapfile_maximum_size;
+/* Whether swap migration entry supports storing A/D bits for the arch */
+extern bool swap_migration_ad_supported;
#endif /* _LINUX_SWAPFILE_H */
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index d9b7c9132c2f..8cfc966eae48 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -8,15 +8,19 @@
#ifdef CONFIG_MMU
+#ifdef CONFIG_SWAP
+#include <linux/swapfile.h>
+#endif /* CONFIG_SWAP */
+
/*
* swapcache pages are stored in the swapper_space radix tree. We want to
* get good packing density in that tree, so the index should be dense in
* the low-order bits.
*
- * We arrange the `type' and `offset' fields so that `type' is at the seven
+ * We arrange the `type' and `offset' fields so that `type' is at the six
* high-order bits of the swp_entry_t and `offset' is right-aligned in the
* remaining bits. Although `type' itself needs only five bits, we allow for
- * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry().
+ * shmem/tmpfs to shift it all up a further one bit: see swp_to_radix_entry().
*
* swp_entry_t's are *never* stored anywhere in their arch-dependent format.
*/
@@ -24,6 +28,57 @@
#define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1)
/*
+ * Definitions only for PFN swap entries (see leafeant_has_pfn()). To
+ * store PFN, we only need SWP_PFN_BITS bits. Each of the pfn swap entries
+ * can use the extra bits to store other information besides PFN.
+ */
+#ifdef MAX_PHYSMEM_BITS
+#define SWP_PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
+#else /* MAX_PHYSMEM_BITS */
+#define SWP_PFN_BITS min_t(int, \
+ sizeof(phys_addr_t) * 8 - PAGE_SHIFT, \
+ SWP_TYPE_SHIFT)
+#endif /* MAX_PHYSMEM_BITS */
+#define SWP_PFN_MASK (BIT(SWP_PFN_BITS) - 1)
+
+/**
+ * Migration swap entry specific bitfield definitions. Layout:
+ *
+ * |----------+--------------------|
+ * | swp_type | swp_offset |
+ * |----------+--------+-+-+-------|
+ * | | resv |D|A| PFN |
+ * |----------+--------+-+-+-------|
+ *
+ * @SWP_MIG_YOUNG_BIT: Whether the page used to have young bit set (bit A)
+ * @SWP_MIG_DIRTY_BIT: Whether the page used to have dirty bit set (bit D)
+ *
+ * Note: A/D bits will be stored in migration entries iff there're enough
+ * free bits in arch specific swp offset. By default we'll ignore A/D bits
+ * when migrating a page. Please refer to migration_entry_supports_ad()
+ * for more information. If there're more bits besides PFN and A/D bits,
+ * they should be reserved and always be zeros.
+ */
+#define SWP_MIG_YOUNG_BIT (SWP_PFN_BITS)
+#define SWP_MIG_DIRTY_BIT (SWP_PFN_BITS + 1)
+#define SWP_MIG_TOTAL_BITS (SWP_PFN_BITS + 2)
+
+#define SWP_MIG_YOUNG BIT(SWP_MIG_YOUNG_BIT)
+#define SWP_MIG_DIRTY BIT(SWP_MIG_DIRTY_BIT)
+
+/* Clear all flags but only keep swp_entry_t related information */
+static inline pte_t pte_swp_clear_flags(pte_t pte)
+{
+ if (pte_swp_exclusive(pte))
+ pte = pte_swp_clear_exclusive(pte);
+ if (pte_swp_soft_dirty(pte))
+ pte = pte_swp_clear_soft_dirty(pte);
+ if (pte_swp_uffd_wp(pte))
+ pte = pte_swp_clear_uffd_wp(pte);
+ return pte;
+}
+
+/*
* Store a type+offset into a swp_entry_t in an arch-independent format
*/
static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
@@ -52,28 +107,6 @@ static inline pgoff_t swp_offset(swp_entry_t entry)
return entry.val & SWP_OFFSET_MASK;
}
-/* check whether a pte points to a swap entry */
-static inline int is_swap_pte(pte_t pte)
-{
- return !pte_none(pte) && !pte_present(pte);
-}
-
-/*
- * Convert the arch-dependent pte representation of a swp_entry_t into an
- * arch-independent swp_entry_t.
- */
-static inline swp_entry_t pte_to_swp_entry(pte_t pte)
-{
- swp_entry_t arch_entry;
-
- if (pte_swp_soft_dirty(pte))
- pte = pte_swp_clear_soft_dirty(pte);
- if (pte_swp_uffd_wp(pte))
- pte = pte_swp_clear_uffd_wp(pte);
- arch_entry = __pte_to_swp_entry(pte);
- return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
-}
-
/*
* Convert the arch-independent representation of a swp_entry_t into the
* arch-dependent pte representation.
@@ -100,151 +133,196 @@ static inline void *swp_to_radix_entry(swp_entry_t entry)
}
#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
-static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
+static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset)
{
- return swp_entry(write ? SWP_DEVICE_WRITE : SWP_DEVICE_READ,
- page_to_pfn(page));
+ return swp_entry(SWP_DEVICE_READ, offset);
}
-static inline bool is_device_private_entry(swp_entry_t entry)
+static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
{
- int type = swp_type(entry);
- return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE;
+ return swp_entry(SWP_DEVICE_WRITE, offset);
}
-static inline void make_device_private_entry_read(swp_entry_t *entry)
+static inline swp_entry_t make_device_exclusive_entry(pgoff_t offset)
{
- *entry = swp_entry(SWP_DEVICE_READ, swp_offset(*entry));
+ return swp_entry(SWP_DEVICE_EXCLUSIVE, offset);
}
-static inline bool is_write_device_private_entry(swp_entry_t entry)
+#else /* CONFIG_DEVICE_PRIVATE */
+static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset)
{
- return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
+ return swp_entry(0, 0);
}
-static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
+static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
{
- return swp_offset(entry);
+ return swp_entry(0, 0);
}
-static inline struct page *device_private_entry_to_page(swp_entry_t entry)
+static inline swp_entry_t make_device_exclusive_entry(pgoff_t offset)
{
- return pfn_to_page(swp_offset(entry));
+ return swp_entry(0, 0);
}
-#else /* CONFIG_DEVICE_PRIVATE */
-static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
+
+#endif /* CONFIG_DEVICE_PRIVATE */
+
+#ifdef CONFIG_MIGRATION
+
+static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
{
- return swp_entry(0, 0);
+ return swp_entry(SWP_MIGRATION_READ, offset);
}
-static inline void make_device_private_entry_read(swp_entry_t *entry)
+static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset)
{
+ return swp_entry(SWP_MIGRATION_READ_EXCLUSIVE, offset);
}
-static inline bool is_device_private_entry(swp_entry_t entry)
+static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
{
- return false;
+ return swp_entry(SWP_MIGRATION_WRITE, offset);
}
-static inline bool is_write_device_private_entry(swp_entry_t entry)
+/*
+ * Returns whether the host has large enough swap offset field to support
+ * carrying over pgtable A/D bits for page migrations. The result is
+ * pretty much arch specific.
+ */
+static inline bool migration_entry_supports_ad(void)
{
+#ifdef CONFIG_SWAP
+ return swap_migration_ad_supported;
+#else /* CONFIG_SWAP */
return false;
+#endif /* CONFIG_SWAP */
}
-static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
+static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
{
- return 0;
+ if (migration_entry_supports_ad())
+ return swp_entry(swp_type(entry),
+ swp_offset(entry) | SWP_MIG_YOUNG);
+ return entry;
}
-static inline struct page *device_private_entry_to_page(swp_entry_t entry)
+static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
{
- return NULL;
+ if (migration_entry_supports_ad())
+ return swp_entry(swp_type(entry),
+ swp_offset(entry) | SWP_MIG_DIRTY);
+ return entry;
}
-#endif /* CONFIG_DEVICE_PRIVATE */
-#ifdef CONFIG_MIGRATION
-static inline swp_entry_t make_migration_entry(struct page *page, int write)
+extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long address);
+extern void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *pte);
+#else /* CONFIG_MIGRATION */
+static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
{
- BUG_ON(!PageLocked(compound_head(page)));
+ return swp_entry(0, 0);
+}
- return swp_entry(write ? SWP_MIGRATION_WRITE : SWP_MIGRATION_READ,
- page_to_pfn(page));
+static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset)
+{
+ return swp_entry(0, 0);
}
-static inline int is_migration_entry(swp_entry_t entry)
+static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
{
- return unlikely(swp_type(entry) == SWP_MIGRATION_READ ||
- swp_type(entry) == SWP_MIGRATION_WRITE);
+ return swp_entry(0, 0);
}
-static inline int is_write_migration_entry(swp_entry_t entry)
+static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long address) { }
+static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *pte) { }
+
+static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
{
- return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
+ return entry;
}
-static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
+static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
{
- return swp_offset(entry);
+ return entry;
}
-static inline struct page *migration_entry_to_page(swp_entry_t entry)
+#endif /* CONFIG_MIGRATION */
+
+#ifdef CONFIG_MEMORY_FAILURE
+
+/*
+ * Support for hardware poisoned pages
+ */
+static inline swp_entry_t make_hwpoison_entry(struct page *page)
{
- struct page *p = pfn_to_page(swp_offset(entry));
- /*
- * Any use of migration entries may only occur while the
- * corresponding page is locked
- */
- BUG_ON(!PageLocked(compound_head(p)));
- return p;
+ BUG_ON(!PageLocked(page));
+ return swp_entry(SWP_HWPOISON, page_to_pfn(page));
}
-static inline void make_migration_entry_read(swp_entry_t *entry)
+static inline int is_hwpoison_entry(swp_entry_t entry)
{
- *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry));
+ return swp_type(entry) == SWP_HWPOISON;
}
-extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
- spinlock_t *ptl);
-extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
- unsigned long address);
-extern void migration_entry_wait_huge(struct vm_area_struct *vma,
- struct mm_struct *mm, pte_t *pte);
#else
-#define make_migration_entry(page, write) swp_entry(0, 0)
-static inline int is_migration_entry(swp_entry_t swp)
+static inline swp_entry_t make_hwpoison_entry(struct page *page)
{
- return 0;
+ return swp_entry(0, 0);
}
-static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
+static inline int is_hwpoison_entry(swp_entry_t swp)
{
return 0;
}
+#endif
+
+typedef unsigned long pte_marker;
+
+#define PTE_MARKER_UFFD_WP BIT(0)
+/*
+ * "Poisoned" here is meant in the very general sense of "future accesses are
+ * invalid", instead of referring very specifically to hardware memory errors.
+ * This marker is meant to represent any of various different causes of this.
+ *
+ * Note that, when encountered by the faulting logic, PTEs with this marker will
+ * result in VM_FAULT_HWPOISON and thus regardless trigger hardware memory error
+ * logic.
+ */
+#define PTE_MARKER_POISONED BIT(1)
+/*
+ * Indicates that, on fault, this PTE will case a SIGSEGV signal to be
+ * sent. This means guard markers behave in effect as if the region were mapped
+ * PROT_NONE, rather than if they were a memory hole or equivalent.
+ */
+#define PTE_MARKER_GUARD BIT(2)
+#define PTE_MARKER_MASK (BIT(3) - 1)
+
+static inline swp_entry_t make_pte_marker_entry(pte_marker marker)
+{
+ return swp_entry(SWP_PTE_MARKER, marker);
+}
-static inline struct page *migration_entry_to_page(swp_entry_t entry)
+static inline pte_t make_pte_marker(pte_marker marker)
{
- return NULL;
+ return swp_entry_to_pte(make_pte_marker_entry(marker));
}
-static inline void make_migration_entry_read(swp_entry_t *entryp) { }
-static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
- spinlock_t *ptl) { }
-static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
- unsigned long address) { }
-static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
- struct mm_struct *mm, pte_t *pte) { }
-static inline int is_write_migration_entry(swp_entry_t entry)
+static inline swp_entry_t make_poisoned_swp_entry(void)
{
- return 0;
+ return make_pte_marker_entry(PTE_MARKER_POISONED);
}
-#endif
+static inline swp_entry_t make_guard_swp_entry(void)
+{
+ return make_pte_marker_entry(PTE_MARKER_GUARD);
+}
struct page_vma_mapped_walk;
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
-extern void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
+extern int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
struct page *page);
extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
@@ -252,16 +330,6 @@ extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
-static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
-{
- swp_entry_t arch_entry;
-
- if (pmd_swp_soft_dirty(pmd))
- pmd = pmd_swp_clear_soft_dirty(pmd);
- arch_entry = __pmd_to_swp_entry(pmd);
- return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
-}
-
static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
{
swp_entry_t arch_entry;
@@ -270,12 +338,8 @@ static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
return __swp_entry_to_pmd(arch_entry);
}
-static inline int is_pmd_migration_entry(pmd_t pmd)
-{
- return !pmd_present(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
-}
-#else
-static inline void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
+#else /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
+static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
struct page *page)
{
BUILD_BUG();
@@ -289,79 +353,12 @@ static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { }
-static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
-{
- return swp_entry(0, 0);
-}
-
static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
{
return __pmd(0);
}
-static inline int is_pmd_migration_entry(pmd_t pmd)
-{
- return 0;
-}
-#endif